public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-07-24  9:18 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-07-24  9:18 UTC (permalink / raw
  To: gentoo-commits

commit:     6f827a42cd3aeec48ca7d4f56535dfb60325b256
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 24 09:17:54 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Jul 24 09:17:54 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6f827a42

Linux patch 6.12.40

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1039_linux-6.12.40.patch | 5382 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5386 insertions(+)

diff --git a/0000_README b/0000_README
index b596d7c2..229d421f 100644
--- a/0000_README
+++ b/0000_README
@@ -199,6 +199,10 @@ Patch:  1038_linux-6.12.39.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.39
 
+Patch:  1039_linux-6.12.40.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.40
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1039_linux-6.12.40.patch b/1039_linux-6.12.40.patch
new file mode 100644
index 00000000..78d45d71
--- /dev/null
+++ b/1039_linux-6.12.40.patch
@@ -0,0 +1,5382 @@
+diff --git a/Makefile b/Makefile
+index ba6054d96398dd..c891f51637d5bd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 39
++SUBLEVEL = 40
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+index 0baf256b44003f..983b2f0e87970a 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+@@ -687,11 +687,12 @@ lpuart5: serial@29a0000 {
+ 		};
+ 
+ 		wdog0: watchdog@2ad0000 {
+-			compatible = "fsl,imx21-wdt";
++			compatible = "fsl,ls1046a-wdt", "fsl,imx21-wdt";
+ 			reg = <0x0 0x2ad0000 0x0 0x10000>;
+ 			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ 					    QORIQ_CLK_PLL_DIV(2)>;
++			big-endian;
+ 		};
+ 
+ 		edma0: dma-controller@2c00000 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index d9b13c87f93bbe..c579a45273f0d0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -484,6 +484,7 @@ reg_vdd_phy: LDO4 {
+ 			};
+ 
+ 			reg_nvcc_sd: LDO5 {
++				regulator-always-on;
+ 				regulator-max-microvolt = <3300000>;
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-name = "On-module +V3.3_1.8_SD (LDO5)";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi
+index 2f740d74707bdf..4bf818873fe3c5 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw71xx.dtsi
+@@ -70,7 +70,7 @@ &ecspi2 {
+ 	tpm@1 {
+ 		compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
+ 		reg = <0x1>;
+-		spi-max-frequency = <36000000>;
++		spi-max-frequency = <25000000>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
+index 5ab3ffe9931d4a..cf747ec6fa16eb 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
+@@ -110,7 +110,7 @@ &ecspi2 {
+ 	tpm@1 {
+ 		compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
+ 		reg = <0x1>;
+-		spi-max-frequency = <36000000>;
++		spi-max-frequency = <25000000>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+index e2b5e7ac3e465f..5eb114d2360a3b 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+@@ -122,7 +122,7 @@ &ecspi2 {
+ 	tpm@1 {
+ 		compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
+ 		reg = <0x1>;
+-		spi-max-frequency = <36000000>;
++		spi-max-frequency = <25000000>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+index d765b79728415e..c3647a059d1fba 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+@@ -199,7 +199,7 @@ &ecspi1 {
+ 	tpm@0 {
+ 		compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
+ 		reg = <0x0>;
+-		spi-max-frequency = <36000000>;
++		spi-max-frequency = <25000000>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
+index f904d6b1c84bf0..7365d6538a733d 100644
+--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
+@@ -1523,7 +1523,7 @@ pcie0_ep: pcie-ep@4c300000 {
+ 			      <0x9 0 1 0>;
+ 			reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
+ 			num-lanes = <1>;
+-			interrupts = <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "dma";
+ 			clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ 				 <&scmi_clk IMX95_CLK_HSIOPLL>,
+diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+index f743aaf78359d2..c17c2f40194f2b 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+@@ -344,6 +344,18 @@ pmic_int: pmic-int {
+ 				<0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ 		};
+ 	};
++
++	spi1 {
++		spi1_csn0_gpio_pin: spi1-csn0-gpio-pin {
++			rockchip,pins =
++				<3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
++		};
++
++		spi1_csn1_gpio_pin: spi1-csn1-gpio-pin {
++			rockchip,pins =
++				<3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
++		};
++	};
+ };
+ 
+ &pmu_io_domains {
+@@ -361,6 +373,17 @@ &sdmmc {
+ 	vqmmc-supply = <&vccio_sd>;
+ };
+ 
++&spi1 {
++	/*
++	 * Hardware CS has a very slow rise time of about 6us,
++	 * causing transmission errors.
++	 * With cs-gpios we have a rise time of about 20ns.
++	 */
++	cs-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_LOW>, <&gpio3 RK_PB2 GPIO_ACTIVE_LOW>;
++	pinctrl-names = "default";
++	pinctrl-0 = <&spi1_clk &spi1_csn0_gpio_pin &spi1_csn1_gpio_pin &spi1_miso &spi1_mosi>;
++};
++
+ &tsadc {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
+index fde8b228f2c7c9..5825141d200767 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
+@@ -317,6 +317,7 @@ &sdmmc {
+ 	bus-width = <4>;
+ 	cap-mmc-highspeed;
+ 	cap-sd-highspeed;
++	cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ 	disable-wp;
+ 	max-frequency = <150000000>;
+ 	no-sdio;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
+index 074c316a9a694f..9713f05f92e9c4 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
+@@ -438,6 +438,7 @@ &sdmmc {
+ 	bus-width = <4>;
+ 	cap-mmc-highspeed;
+ 	cap-sd-highspeed;
++	cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ 	disable-wp;
+ 	max-frequency = <150000000>;
+ 	no-sdio;
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 9c83848797a78b..80230de167def3 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -6,6 +6,7 @@
+ #include <linux/cpu.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <linux/irqflags.h>
+ #include <linux/randomize_kstack.h>
+ #include <linux/sched.h>
+ #include <linux/sched/debug.h>
+@@ -151,7 +152,9 @@ asmlinkage __visible __trap_section void name(struct pt_regs *regs)		\
+ {										\
+ 	if (user_mode(regs)) {							\
+ 		irqentry_enter_from_user_mode(regs);				\
++		local_irq_enable();						\
+ 		do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
++		local_irq_disable();						\
+ 		irqentry_exit_to_user_mode(regs);				\
+ 	} else {								\
+ 		irqentry_state_t state = irqentry_nmi_enter(regs);		\
+@@ -173,17 +176,14 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
+ 
+ 	if (user_mode(regs)) {
+ 		irqentry_enter_from_user_mode(regs);
+-
+ 		local_irq_enable();
+ 
+ 		handled = riscv_v_first_use_handler(regs);
+-
+-		local_irq_disable();
+-
+ 		if (!handled)
+ 			do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
+ 				      "Oops - illegal instruction");
+ 
++		local_irq_disable();
+ 		irqentry_exit_to_user_mode(regs);
+ 	} else {
+ 		irqentry_state_t state = irqentry_nmi_enter(regs);
+@@ -308,9 +308,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
+ {
+ 	if (user_mode(regs)) {
+ 		irqentry_enter_from_user_mode(regs);
++		local_irq_enable();
+ 
+ 		handle_break(regs);
+ 
++		local_irq_disable();
+ 		irqentry_exit_to_user_mode(regs);
+ 	} else {
+ 		irqentry_state_t state = irqentry_nmi_enter(regs);
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index d14bfc23e315b0..4128aa5e0c7632 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -436,7 +436,7 @@ int handle_misaligned_load(struct pt_regs *regs)
+ 	}
+ 
+ 	if (!fp)
+-		SET_RD(insn, regs, val.data_ulong << shift >> shift);
++		SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift);
+ 	else if (len == 8)
+ 		set_f64_rd(insn, regs, val.data_u64);
+ 	else
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 64bb8b71013ae4..ead8d9ba9032c5 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -544,7 +544,15 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
+ {
+ 	memcpy(plt, &bpf_plt, sizeof(*plt));
+ 	plt->ret = ret;
+-	plt->target = target;
++	/*
++	 * (target == NULL) implies that the branch to this PLT entry was
++	 * patched and became a no-op. However, some CPU could have jumped
++	 * to this PLT entry before patching and may be still executing it.
++	 *
++	 * Since the intention in this case is to make the PLT entry a no-op,
++	 * make the target point to the return label instead of NULL.
++	 */
++	plt->target = target ?: ret;
+ }
+ 
+ /*
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index 759cc3e9c0fac7..1fc2035df404fd 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -1472,7 +1472,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
+ 	if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
+ 				sched_poll.nr_ports * sizeof(*ports), &e)) {
+ 		*r = -EFAULT;
+-		return true;
++		goto out;
+ 	}
+ 
+ 	for (i = 0; i < sched_poll.nr_ports; i++) {
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 0e2520d929e1db..6a38f312e385c3 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -868,4 +868,5 @@ void blk_unregister_queue(struct gendisk *disk)
+ 	mutex_unlock(&q->sysfs_dir_lock);
+ 
+ 	blk_debugfs_remove(disk);
++	kobject_put(&disk->queue_kobj);
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index e9a197474b9d8b..2f42d164461846 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -323,14 +323,13 @@ static void lo_complete_rq(struct request *rq)
+ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
+ {
+ 	struct request *rq = blk_mq_rq_from_pdu(cmd);
+-	struct loop_device *lo = rq->q->queuedata;
+ 
+ 	if (!atomic_dec_and_test(&cmd->ref))
+ 		return;
+ 	kfree(cmd->bvec);
+ 	cmd->bvec = NULL;
+ 	if (req_op(rq) == REQ_OP_WRITE)
+-		file_end_write(lo->lo_backing_file);
++		kiocb_end_write(&cmd->iocb);
+ 	if (likely(!blk_should_fake_timeout(rq->q)))
+ 		blk_mq_complete_request(rq);
+ }
+@@ -406,7 +405,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+ 	}
+ 
+ 	if (rw == ITER_SOURCE) {
+-		file_start_write(lo->lo_backing_file);
++		kiocb_start_write(&cmd->iocb);
+ 		ret = file->f_op->write_iter(&cmd->iocb, &iter);
+ 	} else
+ 		ret = file->f_op->read_iter(&cmd->iocb, &iter);
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index 51d6d91ed4041b..85df941afb6cf4 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -2656,7 +2656,7 @@ static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
+ 	 * Distinguish ISO data packets form ACL data packets
+ 	 * based on their connection handle value range.
+ 	 */
+-	if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
++	if (iso_capable(hdev) && hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
+ 		__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+ 
+ 		if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE)
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index aa63852060500c..72b5297573735c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3194,6 +3194,32 @@ static const struct qca_device_info qca_devices_table[] = {
+ 	{ 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */
+ };
+ 
++static u16 qca_extract_board_id(const struct qca_version *ver)
++{
++	u16 flag = le16_to_cpu(ver->flag);
++	u16 board_id = 0;
++
++	if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
++		/* The board_id should be split into two bytes
++		 * The 1st byte is chip ID, and the 2nd byte is platform ID
++		 * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
++		 * we have several platforms, and platform IDs are continuously added
++		 * Platform ID:
++		 * 0x00 is for Mobile
++		 * 0x01 is for X86
++		 * 0x02 is for Automotive
++		 * 0x03 is for Consumer electronic
++		 */
++		board_id = (ver->chip_id << 8) + ver->platform_id;
++	}
++
++	/* Take 0xffff as invalid board ID */
++	if (board_id == 0xffff)
++		board_id = 0;
++
++	return board_id;
++}
++
+ static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
+ 				     void *data, u16 size)
+ {
+@@ -3350,44 +3376,28 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
+ 					const struct qca_version *ver)
+ {
+ 	u32 rom_version = le32_to_cpu(ver->rom_version);
+-	u16 flag = le16_to_cpu(ver->flag);
++	const char *variant;
++	int len;
++	u16 board_id;
+ 
+-	if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+-		/* The board_id should be split into two bytes
+-		 * The 1st byte is chip ID, and the 2nd byte is platform ID
+-		 * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
+-		 * we have several platforms, and platform IDs are continuously added
+-		 * Platform ID:
+-		 * 0x00 is for Mobile
+-		 * 0x01 is for X86
+-		 * 0x02 is for Automotive
+-		 * 0x03 is for Consumer electronic
+-		 */
+-		u16 board_id = (ver->chip_id << 8) + ver->platform_id;
+-		const char *variant;
++	board_id = qca_extract_board_id(ver);
+ 
+-		switch (le32_to_cpu(ver->ram_version)) {
+-		case WCN6855_2_0_RAM_VERSION_GF:
+-		case WCN6855_2_1_RAM_VERSION_GF:
+-			variant = "_gf";
+-			break;
+-		default:
+-			variant = "";
+-			break;
+-		}
+-
+-		if (board_id == 0) {
+-			snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
+-				rom_version, variant);
+-		} else {
+-			snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
+-				rom_version, variant, board_id);
+-		}
+-	} else {
+-		snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
+-			rom_version);
++	switch (le32_to_cpu(ver->ram_version)) {
++	case WCN6855_2_0_RAM_VERSION_GF:
++	case WCN6855_2_1_RAM_VERSION_GF:
++		variant = "_gf";
++		break;
++	default:
++		variant = NULL;
++		break;
+ 	}
+ 
++	len = snprintf(fwname, max_size, "qca/nvm_usb_%08x", rom_version);
++	if (variant)
++		len += snprintf(fwname + len, max_size - len, "%s", variant);
++	if (board_id)
++		len += snprintf(fwname + len, max_size - len, "_%04x", board_id);
++	len += snprintf(fwname + len, max_size - len, ".bin");
+ }
+ 
+ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
+diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
+index b9df9b19d4bd97..07bc81a706b4d3 100644
+--- a/drivers/comedi/comedi_fops.c
++++ b/drivers/comedi/comedi_fops.c
+@@ -1556,21 +1556,27 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
+ 	}
+ 
+ 	for (i = 0; i < n_insns; ++i) {
++		unsigned int n = insns[i].n;
++
+ 		if (insns[i].insn & INSN_MASK_WRITE) {
+ 			if (copy_from_user(data, insns[i].data,
+-					   insns[i].n * sizeof(unsigned int))) {
++					   n * sizeof(unsigned int))) {
+ 				dev_dbg(dev->class_dev,
+ 					"copy_from_user failed\n");
+ 				ret = -EFAULT;
+ 				goto error;
+ 			}
++			if (n < MIN_SAMPLES) {
++				memset(&data[n], 0, (MIN_SAMPLES - n) *
++						    sizeof(unsigned int));
++			}
+ 		}
+ 		ret = parse_insn(dev, insns + i, data, file);
+ 		if (ret < 0)
+ 			goto error;
+ 		if (insns[i].insn & INSN_MASK_READ) {
+ 			if (copy_to_user(insns[i].data, data,
+-					 insns[i].n * sizeof(unsigned int))) {
++					 n * sizeof(unsigned int))) {
+ 				dev_dbg(dev->class_dev,
+ 					"copy_to_user failed\n");
+ 				ret = -EFAULT;
+@@ -1589,6 +1595,16 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
+ 	return i;
+ }
+ 
++#define MAX_INSNS   MAX_SAMPLES
++static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns)
++{
++	if (n_insns > MAX_INSNS) {
++		dev_dbg(dev->class_dev, "insnlist length too large\n");
++		return -EINVAL;
++	}
++	return 0;
++}
++
+ /*
+  * COMEDI_INSN ioctl
+  * synchronous instruction
+@@ -1633,6 +1649,10 @@ static int do_insn_ioctl(struct comedi_device *dev,
+ 			ret = -EFAULT;
+ 			goto error;
+ 		}
++		if (insn->n < MIN_SAMPLES) {
++			memset(&data[insn->n], 0,
++			       (MIN_SAMPLES - insn->n) * sizeof(unsigned int));
++		}
+ 	}
+ 	ret = parse_insn(dev, insn, data, file);
+ 	if (ret < 0)
+@@ -2239,6 +2259,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ 			rc = -EFAULT;
+ 			break;
+ 		}
++		rc = check_insnlist_len(dev, insnlist.n_insns);
++		if (rc)
++			break;
+ 		insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
+ 		if (!insns) {
+ 			rc = -ENOMEM;
+@@ -3090,6 +3113,9 @@ static int compat_insnlist(struct file *file, unsigned long arg)
+ 	if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32)))
+ 		return -EFAULT;
+ 
++	rc = check_insnlist_len(dev, insnlist32.n_insns);
++	if (rc)
++		return rc;
+ 	insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL);
+ 	if (!insns)
+ 		return -ENOMEM;
+diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
+index 376130bfba8a2c..9e4b7c840a8f5a 100644
+--- a/drivers/comedi/drivers.c
++++ b/drivers/comedi/drivers.c
+@@ -339,10 +339,10 @@ int comedi_dio_insn_config(struct comedi_device *dev,
+ 			   unsigned int *data,
+ 			   unsigned int mask)
+ {
+-	unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
++	unsigned int chan = CR_CHAN(insn->chanspec);
+ 
+-	if (!mask)
+-		mask = chan_mask;
++	if (!mask && chan < 32)
++		mask = 1U << chan;
+ 
+ 	switch (data[0]) {
+ 	case INSN_CONFIG_DIO_INPUT:
+@@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
+ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
+ 				     unsigned int *data)
+ {
+-	unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
++	unsigned int chanmask = (s->n_chan < 32) ? ((1U << s->n_chan) - 1)
+ 						 : 0xffffffff;
+ 	unsigned int mask = data[0] & chanmask;
+ 	unsigned int bits = data[1];
+@@ -615,6 +615,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
+ 	unsigned int _data[2];
+ 	int ret;
+ 
++	if (insn->n == 0)
++		return 0;
++
+ 	memset(_data, 0, sizeof(_data));
+ 	memset(&_insn, 0, sizeof(_insn));
+ 	_insn.insn = INSN_BITS;
+@@ -625,8 +628,8 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
+ 	if (insn->insn == INSN_WRITE) {
+ 		if (!(s->subdev_flags & SDF_WRITABLE))
+ 			return -EINVAL;
+-		_data[0] = 1 << (chan - base_chan);		    /* mask */
+-		_data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
++		_data[0] = 1U << (chan - base_chan);		     /* mask */
++		_data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */
+ 	}
+ 
+ 	ret = s->insn_bits(dev, s, &_insn, _data);
+@@ -709,7 +712,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev)
+ 
+ 		if (s->type == COMEDI_SUBD_DO) {
+ 			if (s->n_chan < 32)
+-				s->io_bits = (1 << s->n_chan) - 1;
++				s->io_bits = (1U << s->n_chan) - 1;
+ 			else
+ 				s->io_bits = 0xffffffff;
+ 		}
+diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c
+index b00fab0b89d4c4..739cc4db52ac7e 100644
+--- a/drivers/comedi/drivers/aio_iiro_16.c
++++ b/drivers/comedi/drivers/aio_iiro_16.c
+@@ -177,7 +177,8 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
+ 	 * Digital input change of state interrupts are optionally supported
+ 	 * using IRQ 2-7, 10-12, 14, or 15.
+ 	 */
+-	if ((1 << it->options[1]) & 0xdcfc) {
++	if (it->options[1] > 0 && it->options[1] < 16 &&
++	    (1 << it->options[1]) & 0xdcfc) {
+ 		ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
+ 				  dev->board_name, dev);
+ 		if (ret == 0)
+diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
+index b8ea737ad3d14c..1b638f5b5a4fb9 100644
+--- a/drivers/comedi/drivers/das16m1.c
++++ b/drivers/comedi/drivers/das16m1.c
+@@ -522,7 +522,8 @@ static int das16m1_attach(struct comedi_device *dev,
+ 	devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
+ 
+ 	/* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
+-	if ((1 << it->options[1]) & 0xdcfc) {
++	if (it->options[1] >= 2 && it->options[1] <= 15 &&
++	    (1 << it->options[1]) & 0xdcfc) {
+ 		ret = request_irq(it->options[1], das16m1_interrupt, 0,
+ 				  dev->board_name, dev);
+ 		if (ret == 0)
+diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
+index 68f95330de45fd..7660487e563c56 100644
+--- a/drivers/comedi/drivers/das6402.c
++++ b/drivers/comedi/drivers/das6402.c
+@@ -567,7 +567,8 @@ static int das6402_attach(struct comedi_device *dev,
+ 	das6402_reset(dev);
+ 
+ 	/* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
+-	if ((1 << it->options[1]) & 0x8cec) {
++	if (it->options[1] > 0 && it->options[1] < 16 &&
++	    (1 << it->options[1]) & 0x8cec) {
+ 		ret = request_irq(it->options[1], das6402_interrupt, 0,
+ 				  dev->board_name, dev);
+ 		if (ret == 0) {
+diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
+index 0df639c6a595e5..abca61a72cf7ea 100644
+--- a/drivers/comedi/drivers/pcl812.c
++++ b/drivers/comedi/drivers/pcl812.c
+@@ -1149,7 +1149,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+ 		if (IS_ERR(dev->pacer))
+ 			return PTR_ERR(dev->pacer);
+ 
+-		if ((1 << it->options[1]) & board->irq_bits) {
++		if (it->options[1] > 0 && it->options[1] < 16 &&
++		    (1 << it->options[1]) & board->irq_bits) {
+ 			ret = request_irq(it->options[1], pcl812_interrupt, 0,
+ 					  dev->board_name, dev);
+ 			if (ret == 0)
+diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
+index 2562dc001fc1de..f3c30037dc8efc 100644
+--- a/drivers/cpuidle/cpuidle-psci.c
++++ b/drivers/cpuidle/cpuidle-psci.c
+@@ -38,7 +38,6 @@ struct psci_cpuidle_data {
+ static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
+ static DEFINE_PER_CPU(u32, domain_state);
+ static bool psci_cpuidle_use_syscore;
+-static bool psci_cpuidle_use_cpuhp;
+ 
+ void psci_set_domain_state(u32 state)
+ {
+@@ -105,8 +104,12 @@ static int psci_idle_cpuhp_up(unsigned int cpu)
+ {
+ 	struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+ 
+-	if (pd_dev)
+-		pm_runtime_get_sync(pd_dev);
++	if (pd_dev) {
++		if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++			pm_runtime_get_sync(pd_dev);
++		else
++			dev_pm_genpd_resume(pd_dev);
++	}
+ 
+ 	return 0;
+ }
+@@ -116,7 +119,11 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
+ 	struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+ 
+ 	if (pd_dev) {
+-		pm_runtime_put_sync(pd_dev);
++		if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++			pm_runtime_put_sync(pd_dev);
++		else
++			dev_pm_genpd_suspend(pd_dev);
++
+ 		/* Clear domain state to start fresh at next online. */
+ 		psci_set_domain_state(0);
+ 	}
+@@ -177,9 +184,6 @@ static void psci_idle_init_cpuhp(void)
+ {
+ 	int err;
+ 
+-	if (!psci_cpuidle_use_cpuhp)
+-		return;
+-
+ 	err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+ 					"cpuidle/psci:online",
+ 					psci_idle_cpuhp_up,
+@@ -240,10 +244,8 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
+ 	 * s2ram and s2idle.
+ 	 */
+ 	drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
+-	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
++	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ 		drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
+-		psci_cpuidle_use_cpuhp = true;
+-	}
+ 
+ 	return 0;
+ }
+@@ -320,7 +322,6 @@ static void psci_cpu_deinit_idle(int cpu)
+ 
+ 	dt_idle_detach_cpu(data->dev);
+ 	psci_cpuidle_use_syscore = false;
+-	psci_cpuidle_use_cpuhp = false;
+ }
+ 
+ static int psci_idle_init_cpu(struct device *dev, int cpu)
+diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
+index 3b011a91d48ec7..5f5d6242427e18 100644
+--- a/drivers/dma/nbpfaxi.c
++++ b/drivers/dma/nbpfaxi.c
+@@ -1351,7 +1351,7 @@ static int nbpf_probe(struct platform_device *pdev)
+ 	if (irqs == 1) {
+ 		eirq = irqbuf[0];
+ 
+-		for (i = 0; i <= num_channels; i++)
++		for (i = 0; i < num_channels; i++)
+ 			nbpf->chan[i].irq = irqbuf[0];
+ 	} else {
+ 		eirq = platform_get_irq_byname(pdev, "error");
+@@ -1361,16 +1361,15 @@ static int nbpf_probe(struct platform_device *pdev)
+ 		if (irqs == num_channels + 1) {
+ 			struct nbpf_channel *chan;
+ 
+-			for (i = 0, chan = nbpf->chan; i <= num_channels;
++			for (i = 0, chan = nbpf->chan; i < num_channels;
+ 			     i++, chan++) {
+ 				/* Skip the error IRQ */
+ 				if (irqbuf[i] == eirq)
+ 					i++;
++				if (i >= ARRAY_SIZE(irqbuf))
++					return -EINVAL;
+ 				chan->irq = irqbuf[i];
+ 			}
+-
+-			if (chan != nbpf->chan + num_channels)
+-				return -EINVAL;
+ 		} else {
+ 			/* 2 IRQs and more than one channel */
+ 			if (irqbuf[0] == eirq)
+@@ -1378,7 +1377,7 @@ static int nbpf_probe(struct platform_device *pdev)
+ 			else
+ 				irq = irqbuf[0];
+ 
+-			for (i = 0; i <= num_channels; i++)
++			for (i = 0; i < num_channels; i++)
+ 				nbpf->chan[i].irq = irq;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 690976665cf699..10da6e550d7683 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -439,6 +439,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
+ {
+ 	unsigned long flags;
+ 	ktime_t deadline;
++	bool ret;
+ 
+ 	if (unlikely(ring->adev->debug_disable_soft_recovery))
+ 		return false;
+@@ -453,12 +454,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
+ 		dma_fence_set_error(fence, -ENODATA);
+ 	spin_unlock_irqrestore(fence->lock, flags);
+ 
+-	atomic_inc(&ring->adev->gpu_reset_counter);
+ 	while (!dma_fence_is_signaled(fence) &&
+ 	       ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
+ 		ring->funcs->soft_recovery(ring, vmid);
+ 
+-	return dma_fence_is_signaled(fence);
++	ret = dma_fence_is_signaled(fence);
++	/* increment the counter only if soft reset worked */
++	if (ret)
++		atomic_inc(&ring->adev->gpu_reset_counter);
++
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 9d741695ca07d6..1f675d67a1a785 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4652,6 +4652,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
+ 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
+ 		/* reset ring buffer */
+ 		ring->wptr = 0;
++		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+ 		amdgpu_ring_clear_ring(ring);
+ 	}
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 2ac56e79df05e6..9a31e5da368792 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -731,7 +731,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ 	 * support programmable degamma anywhere.
+ 	 */
+ 	is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
+-	drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
++	/* Dont't enable DRM CRTC degamma property for DCN401 since the
++	 * pre-blending degamma LUT doesn't apply to cursor, and therefore
++	 * can't work similar to a post-blending degamma LUT as in other hw
++	 * versions.
++	 * TODO: revisit it once KMS plane color API is merged.
++	 */
++	drm_crtc_enable_color_mgmt(&acrtc->base,
++				   (is_dcn &&
++				    dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
++				     MAX_COLOR_LUT_ENTRIES : 0,
+ 				   true, MAX_COLOR_LUT_ENTRIES);
+ 
+ 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+index 313e52997596a0..2ee034879f9ff5 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+@@ -1700,7 +1700,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
+ 	clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+ 	if (!clk_mgr->base.bw_params) {
+ 		BREAK_TO_DEBUGGER();
+-		kfree(clk_mgr);
++		kfree(clk_mgr401);
+ 		return NULL;
+ 	}
+ 
+@@ -1711,6 +1711,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
+ 	if (!clk_mgr->wm_range_table) {
+ 		BREAK_TO_DEBUGGER();
+ 		kfree(clk_mgr->base.bw_params);
++		kfree(clk_mgr401);
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
+index 8f6fba4217ece5..bc7527542fdc6f 100644
+--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
+@@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ 	return 0;
+ }
+ 
++void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane)
++{
++#if IS_REACHABLE(CONFIG_MTK_CMDQ)
++	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
++	struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state);
++	int i;
++
++	/* no need to wait for disabling the plane by CPU */
++	if (!mtk_crtc->cmdq_client.chan)
++		return;
++
++	if (!mtk_crtc->enabled)
++		return;
++
++	/* set pending plane state to disabled */
++	for (i = 0; i < mtk_crtc->layer_nr; i++) {
++		struct drm_plane *mtk_plane = &mtk_crtc->planes[i];
++		struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state);
++
++		if (mtk_plane->index == plane->index) {
++			memcpy(mtk_plane_state, plane_state, sizeof(*plane_state));
++			break;
++		}
++	}
++	mtk_crtc_update_config(mtk_crtc, false);
++
++	/* wait for planes to be disabled by CMDQ */
++	wait_event_timeout(mtk_crtc->cb_blocking_queue,
++			   mtk_crtc->cmdq_vblank_cnt == 0,
++			   msecs_to_jiffies(500));
++#endif
++}
++
+ void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ 			   struct drm_atomic_state *state)
+ {
+@@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
+ 				mtk_ddp_comp_supported_rotations(comp),
+ 				mtk_ddp_comp_get_blend_modes(comp),
+ 				mtk_ddp_comp_get_formats(comp),
+-				mtk_ddp_comp_get_num_formats(comp), i);
++				mtk_ddp_comp_get_num_formats(comp),
++				mtk_ddp_comp_is_afbc_supported(comp), i);
+ 		if (ret)
+ 			return ret;
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h
+index 388e900b6f4ded..828f109b83e78f 100644
+--- a/drivers/gpu/drm/mediatek/mtk_crtc.h
++++ b/drivers/gpu/drm/mediatek/mtk_crtc.h
+@@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
+ 		    unsigned int num_conn_routes);
+ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ 			 struct mtk_plane_state *state);
++void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane);
+ void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ 			   struct drm_atomic_state *plane_state);
+ struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc);
+diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+index edc6417639e642..ac6620e10262e3 100644
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+@@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
+ 	.get_blend_modes = mtk_ovl_get_blend_modes,
+ 	.get_formats = mtk_ovl_get_formats,
+ 	.get_num_formats = mtk_ovl_get_num_formats,
++	.is_afbc_supported = mtk_ovl_is_afbc_supported,
+ };
+ 
+ static const struct mtk_ddp_comp_funcs ddp_postmask = {
+diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+index 39720b27f4e9ed..7289b3dcf22f22 100644
+--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
++++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+@@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs {
+ 	u32 (*get_blend_modes)(struct device *dev);
+ 	const u32 *(*get_formats)(struct device *dev);
+ 	size_t (*get_num_formats)(struct device *dev);
++	bool (*is_afbc_supported)(struct device *dev);
+ 	void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
+ 	void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
+ 	void (*add)(struct device *dev, struct mtk_mutex *mutex);
+@@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp)
+ 	return 0;
+ }
+ 
++static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp)
++{
++	if (comp->funcs && comp->funcs->is_afbc_supported)
++		return comp->funcs->is_afbc_supported(comp->dev);
++
++	return false;
++}
++
+ static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex)
+ {
+ 	if (comp->funcs && comp->funcs->add) {
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+index 04154db9085c08..c0f7f77e057460 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
++++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+@@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev);
+ u32 mtk_ovl_get_blend_modes(struct device *dev);
+ const u32 *mtk_ovl_get_formats(struct device *dev);
+ size_t mtk_ovl_get_num_formats(struct device *dev);
++bool mtk_ovl_is_afbc_supported(struct device *dev);
+ 
+ void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex);
+ void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex);
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+index 19b0d508398198..ca4a9a60b8904e 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+@@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev)
+ 	return ovl->data->num_formats;
+ }
+ 
++bool mtk_ovl_is_afbc_supported(struct device *dev)
++{
++	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
++
++	return ovl->data->supports_afbc;
++}
++
+ int mtk_ovl_clk_enable(struct device *dev)
+ {
+ 	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
+index 8a48b3b0a95676..74c2704efb6642 100644
+--- a/drivers/gpu/drm/mediatek/mtk_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_plane.c
+@@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
+ 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ 									   plane);
+ 	struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
++	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
++									   plane);
++
+ 	mtk_plane_state->pending.enable = false;
+ 	wmb(); /* Make sure the above parameter is set before update */
+ 	mtk_plane_state->pending.dirty = true;
++
++	mtk_crtc_plane_disable(old_state->crtc, plane);
+ }
+ 
+ static void mtk_plane_atomic_update(struct drm_plane *plane,
+@@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
+ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ 		   unsigned long possible_crtcs, enum drm_plane_type type,
+ 		   unsigned int supported_rotations, const u32 blend_modes,
+-		   const u32 *formats, size_t num_formats, unsigned int plane_idx)
++		   const u32 *formats, size_t num_formats,
++		   bool supports_afbc, unsigned int plane_idx)
+ {
+ 	int err;
+ 
+@@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ 
+ 	err = drm_universal_plane_init(dev, plane, possible_crtcs,
+ 				       &mtk_plane_funcs, formats,
+-				       num_formats, modifiers, type, NULL);
++				       num_formats,
++				       supports_afbc ? modifiers : NULL,
++				       type, NULL);
+ 	if (err) {
+ 		DRM_ERROR("failed to initialize plane\n");
+ 		return err;
+diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
+index 3b13b89989c7e4..95c5fa5295d8ac 100644
+--- a/drivers/gpu/drm/mediatek/mtk_plane.h
++++ b/drivers/gpu/drm/mediatek/mtk_plane.h
+@@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
+ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ 		   unsigned long possible_crtcs, enum drm_plane_type type,
+ 		   unsigned int supported_rotations, const u32 blend_modes,
+-		   const u32 *formats, size_t num_formats, unsigned int plane_idx);
++		   const u32 *formats, size_t num_formats,
++		   bool supports_afbc, unsigned int plane_idx);
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index 231ed53cf907c6..30ec13cb5b6d8e 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -389,6 +389,8 @@ int xe_gt_init_early(struct xe_gt *gt)
+ 	if (err)
+ 		return err;
+ 
++	xe_mocs_init_early(gt);
++
+ 	return 0;
+ }
+ 
+@@ -592,17 +594,15 @@ int xe_gt_init(struct xe_gt *gt)
+ 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
+ 	}
+ 
+-	err = xe_gt_pagefault_init(gt);
++	err = xe_gt_sysfs_init(gt);
+ 	if (err)
+ 		return err;
+ 
+-	xe_mocs_init_early(gt);
+-
+-	err = xe_gt_sysfs_init(gt);
++	err = gt_fw_domain_init(gt);
+ 	if (err)
+ 		return err;
+ 
+-	err = gt_fw_domain_init(gt);
++	err = xe_gt_pagefault_init(gt);
+ 	if (err)
+ 		return err;
+ 
+@@ -773,6 +773,9 @@ static int gt_reset(struct xe_gt *gt)
+ 		goto err_out;
+ 	}
+ 
++	if (IS_SRIOV_PF(gt_to_xe(gt)))
++		xe_gt_sriov_pf_stop_prepare(gt);
++
+ 	xe_uc_gucrc_disable(&gt->uc);
+ 	xe_uc_stop_prepare(&gt->uc);
+ 	xe_gt_pagefault_reset(gt);
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+index 905f409db74b08..57e9eddc092e14 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+@@ -5,14 +5,20 @@
+ 
+ #include <drm/drm_managed.h>
+ 
++#include "regs/xe_guc_regs.h"
+ #include "regs/xe_regs.h"
+ 
++#include "xe_gt.h"
+ #include "xe_gt_sriov_pf.h"
+ #include "xe_gt_sriov_pf_config.h"
+ #include "xe_gt_sriov_pf_control.h"
+ #include "xe_gt_sriov_pf_helpers.h"
+ #include "xe_gt_sriov_pf_service.h"
++#include "xe_gt_sriov_printk.h"
+ #include "xe_mmio.h"
++#include "xe_pm.h"
++
++static void pf_worker_restart_func(struct work_struct *w);
+ 
+ /*
+  * VF's metadata is maintained in the flexible array where:
+@@ -38,6 +44,11 @@ static int pf_alloc_metadata(struct xe_gt *gt)
+ 	return 0;
+ }
+ 
++static void pf_init_workers(struct xe_gt *gt)
++{
++	INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
++}
++
+ /**
+  * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
+  * @gt: the &xe_gt to initialize
+@@ -62,6 +73,8 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
+ 	if (err)
+ 		return err;
+ 
++	pf_init_workers(gt);
++
+ 	return 0;
+ }
+ 
+@@ -89,14 +102,111 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
+ 	xe_gt_sriov_pf_service_update(gt);
+ }
+ 
++static u32 pf_get_vf_regs_stride(struct xe_device *xe)
++{
++	return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
++}
++
++static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
++{
++	struct xe_reg pf_reg = vf_reg;
++
++	pf_reg.vf = 0;
++	pf_reg.addr += stride * vfid;
++
++	return pf_reg;
++}
++
++static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
++{
++	u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
++	struct xe_reg scratch;
++	int n, count;
++
++	if (xe_gt_is_media_type(gt)) {
++		count = MED_VF_SW_FLAG_COUNT;
++		for (n = 0; n < count; n++) {
++			scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
++			xe_mmio_write32(gt, scratch, 0);
++		}
++	} else {
++		count = VF_SW_FLAG_COUNT;
++		for (n = 0; n < count; n++) {
++			scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
++			xe_mmio_write32(gt, scratch, 0);
++		}
++	}
++}
++
+ /**
+- * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
++ * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
+  * @gt: the &xe_gt
++ * @vfid: the VF identifier
+  *
+  * This function can only be called on PF.
+  */
+-void xe_gt_sriov_pf_restart(struct xe_gt *gt)
++void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
++{
++	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
++
++	pf_clear_vf_scratch_regs(gt, vfid);
++}
++
++static void pf_cancel_restart(struct xe_gt *gt)
++{
++	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
++
++	if (cancel_work_sync(&gt->sriov.pf.workers.restart))
++		xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
++}
++
++/**
++ * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
++ * @gt: the &xe_gt
++ *
++ * This function can only be called on the PF.
++ */
++void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
+ {
++	pf_cancel_restart(gt);
++}
++
++static void pf_restart(struct xe_gt *gt)
++{
++	struct xe_device *xe = gt_to_xe(gt);
++
++	xe_pm_runtime_get(xe);
+ 	xe_gt_sriov_pf_config_restart(gt);
+ 	xe_gt_sriov_pf_control_restart(gt);
++	xe_pm_runtime_put(xe);
++
++	xe_gt_sriov_dbg(gt, "restart completed\n");
++}
++
++static void pf_worker_restart_func(struct work_struct *w)
++{
++	struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
++
++	pf_restart(gt);
++}
++
++static void pf_queue_restart(struct xe_gt *gt)
++{
++	struct xe_device *xe = gt_to_xe(gt);
++
++	xe_gt_assert(gt, IS_SRIOV_PF(xe));
++
++	if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
++		xe_gt_sriov_dbg(gt, "restart already in queue!\n");
++}
++
++/**
++ * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
++ * @gt: the &xe_gt
++ *
++ * This function can only be called on PF.
++ */
++void xe_gt_sriov_pf_restart(struct xe_gt *gt)
++{
++	pf_queue_restart(gt);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+index f0cb726a6919f1..165ba31d039134 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+@@ -11,6 +11,8 @@ struct xe_gt;
+ #ifdef CONFIG_PCI_IOV
+ int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
++void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
++void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
+ void xe_gt_sriov_pf_restart(struct xe_gt *gt);
+ #else
+ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
+@@ -22,6 +24,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
+ {
+ }
+ 
++static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
++{
++}
++
+ static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt)
+ {
+ }
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+index 02f7328bd6ceaa..b4fd5a81aff1f9 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+@@ -9,6 +9,7 @@
+ 
+ #include "xe_device.h"
+ #include "xe_gt.h"
++#include "xe_gt_sriov_pf.h"
+ #include "xe_gt_sriov_pf_config.h"
+ #include "xe_gt_sriov_pf_control.h"
+ #include "xe_gt_sriov_pf_helpers.h"
+@@ -1008,7 +1009,7 @@ static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
+ 	if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
+ 		return false;
+ 
+-	/* XXX: placeholder */
++	xe_gt_sriov_pf_sanitize_hw(gt, vfid);
+ 
+ 	pf_enter_vf_flr_send_finish(gt, vfid);
+ 	return true;
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+index 28e1b130bf87c9..a69d128c4f45a4 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+@@ -31,8 +31,17 @@ struct xe_gt_sriov_metadata {
+ 	struct xe_gt_sriov_pf_service_version version;
+ };
+ 
++/**
++ * struct xe_gt_sriov_pf_workers - GT level workers used by the PF.
++ */
++struct xe_gt_sriov_pf_workers {
++	/** @restart: worker that executes actions post GT reset */
++	struct work_struct restart;
++};
++
+ /**
+  * struct xe_gt_sriov_pf - GT level PF virtualization data.
++ * @workers: workers data.
+  * @service: service data.
+  * @control: control data.
+  * @policy: policy data.
+@@ -40,6 +49,7 @@ struct xe_gt_sriov_metadata {
+  * @vfs: metadata for all VFs.
+  */
+ struct xe_gt_sriov_pf {
++	struct xe_gt_sriov_pf_workers workers;
+ 	struct xe_gt_sriov_pf_service service;
+ 	struct xe_gt_sriov_pf_control control;
+ 	struct xe_gt_sriov_pf_policy policy;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 155deef867ac09..c2783d04c6e050 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1873,9 +1873,12 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
+ 	/*
+ 	 * 7 extra bytes are necessary to achieve proper functionality
+ 	 * of implement() working on 8 byte chunks
++	 * 1 extra byte for the report ID if it is null (not used) so
++	 * we can reserve that extra byte in the first position of the buffer
++	 * when sending it to .raw_request()
+ 	 */
+ 
+-	u32 len = hid_report_len(report) + 7;
++	u32 len = hid_report_len(report) + 7 + (report->id == 0);
+ 
+ 	return kzalloc(len, flags);
+ }
+@@ -1963,7 +1966,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
+ int __hid_request(struct hid_device *hid, struct hid_report *report,
+ 		enum hid_class_request reqtype)
+ {
+-	char *buf;
++	char *buf, *data_buf;
+ 	int ret;
+ 	u32 len;
+ 
+@@ -1971,13 +1974,19 @@ int __hid_request(struct hid_device *hid, struct hid_report *report,
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
++	data_buf = buf;
+ 	len = hid_report_len(report);
+ 
++	if (report->id == 0) {
++		/* reserve the first byte for the report ID */
++		data_buf++;
++		len++;
++	}
++
+ 	if (reqtype == HID_REQ_SET_REPORT)
+-		hid_output_report(report, buf);
++		hid_output_report(report, data_buf);
+ 
+-	ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
+-					  report->type, reqtype);
++	ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
+ 	if (ret < 0) {
+ 		dbg_hid("unable to complete request: %d\n", ret);
+ 		goto out;
+diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
+index e1a7f7aa7f8048..b7b911f8359c7f 100644
+--- a/drivers/hwmon/corsair-cpro.c
++++ b/drivers/hwmon/corsair-cpro.c
+@@ -89,6 +89,7 @@ struct ccp_device {
+ 	struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */
+ 	u8 *cmd_buffer;
+ 	u8 *buffer;
++	int buffer_recv_size; /* number of received bytes in buffer */
+ 	int target[6];
+ 	DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
+ 	DECLARE_BITMAP(fan_cnct, NUM_FANS);
+@@ -146,6 +147,9 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2,
+ 	if (!t)
+ 		return -ETIMEDOUT;
+ 
++	if (ccp->buffer_recv_size != IN_BUFFER_SIZE)
++		return -EPROTO;
++
+ 	return ccp_get_errno(ccp);
+ }
+ 
+@@ -157,6 +161,7 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8
+ 	spin_lock(&ccp->wait_input_report_lock);
+ 	if (!completion_done(&ccp->wait_input_report)) {
+ 		memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
++		ccp->buffer_recv_size = size;
+ 		complete_all(&ccp->wait_input_report);
+ 	}
+ 	spin_unlock(&ccp->wait_input_report_lock);
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 2254abda5c46c9..0e679cc5014882 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -937,6 +937,7 @@ config I2C_OMAP
+ 	tristate "OMAP I2C adapter"
+ 	depends on ARCH_OMAP || ARCH_K3 || COMPILE_TEST
+ 	default MACH_OMAP_OSK
++	select MULTIPLEXER
+ 	help
+ 	  If you say yes to this option, support will be included for the
+ 	  I2C interface on the Texas Instruments OMAP1/2 family of processors.
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 8c9cf08ad45e22..0bdee43dc134b0 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -24,6 +24,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+ #include <linux/io.h>
++#include <linux/mux/consumer.h>
+ #include <linux/of.h>
+ #include <linux/slab.h>
+ #include <linux/platform_data/i2c-omap.h>
+@@ -211,6 +212,7 @@ struct omap_i2c_dev {
+ 	u16			syscstate;
+ 	u16			westate;
+ 	u16			errata;
++	struct mux_state	*mux_state;
+ };
+ 
+ static const u8 reg_map_ip_v1[] = {
+@@ -1452,8 +1454,27 @@ omap_i2c_probe(struct platform_device *pdev)
+ 				       (1000 * omap->speed / 8);
+ 	}
+ 
++	if (of_property_present(node, "mux-states")) {
++		struct mux_state *mux_state;
++
++		mux_state = devm_mux_state_get(&pdev->dev, NULL);
++		if (IS_ERR(mux_state)) {
++			r = PTR_ERR(mux_state);
++			dev_dbg(&pdev->dev, "failed to get I2C mux: %d\n", r);
++			goto err_put_pm;
++		}
++		omap->mux_state = mux_state;
++		r = mux_state_select(omap->mux_state);
++		if (r) {
++			dev_err(&pdev->dev, "failed to select I2C mux: %d\n", r);
++			goto err_put_pm;
++		}
++	}
++
+ 	/* reset ASAP, clearing any IRQs */
+-	omap_i2c_init(omap);
++	r = omap_i2c_init(omap);
++	if (r)
++		goto err_mux_state_deselect;
+ 
+ 	if (omap->rev < OMAP_I2C_OMAP1_REV_2)
+ 		r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr,
+@@ -1496,6 +1517,10 @@ omap_i2c_probe(struct platform_device *pdev)
+ 
+ err_unuse_clocks:
+ 	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
++err_mux_state_deselect:
++	if (omap->mux_state)
++		mux_state_deselect(omap->mux_state);
++err_put_pm:
+ 	pm_runtime_dont_use_autosuspend(omap->dev);
+ 	pm_runtime_put_sync(omap->dev);
+ err_disable_pm:
+@@ -1511,6 +1536,9 @@ static void omap_i2c_remove(struct platform_device *pdev)
+ 
+ 	i2c_del_adapter(&omap->adapter);
+ 
++	if (omap->mux_state)
++		mux_state_deselect(omap->mux_state);
++
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+ 	if (ret < 0)
+ 		dev_err(omap->dev, "Failed to resume hardware, skip disable\n");
+diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
+index 157c64e27d0bd3..f84ec056e36dfe 100644
+--- a/drivers/i2c/busses/i2c-stm32.c
++++ b/drivers/i2c/busses/i2c-stm32.c
+@@ -102,7 +102,6 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
+ 			    void *dma_async_param)
+ {
+ 	struct dma_async_tx_descriptor *txdesc;
+-	struct device *chan_dev;
+ 	int ret;
+ 
+ 	if (rd_wr) {
+@@ -116,11 +115,10 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
+ 	}
+ 
+ 	dma->dma_len = len;
+-	chan_dev = dma->chan_using->device->dev;
+ 
+-	dma->dma_buf = dma_map_single(chan_dev, buf, dma->dma_len,
++	dma->dma_buf = dma_map_single(dev, buf, dma->dma_len,
+ 				      dma->dma_data_dir);
+-	if (dma_mapping_error(chan_dev, dma->dma_buf)) {
++	if (dma_mapping_error(dev, dma->dma_buf)) {
+ 		dev_err(dev, "DMA mapping failed\n");
+ 		return -EINVAL;
+ 	}
+@@ -150,7 +148,7 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
+ 	return 0;
+ 
+ err:
+-	dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len,
++	dma_unmap_single(dev, dma->dma_buf, dma->dma_len,
+ 			 dma->dma_data_dir);
+ 	return ret;
+ }
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index 0174ead99de6c1..a4587f281216a4 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -739,12 +739,13 @@ static void stm32f7_i2c_disable_dma_req(struct stm32f7_i2c_dev *i2c_dev)
+ 
+ static void stm32f7_i2c_dma_callback(void *arg)
+ {
+-	struct stm32f7_i2c_dev *i2c_dev = (struct stm32f7_i2c_dev *)arg;
++	struct stm32f7_i2c_dev *i2c_dev = arg;
+ 	struct stm32_i2c_dma *dma = i2c_dev->dma;
+-	struct device *dev = dma->chan_using->device->dev;
+ 
+ 	stm32f7_i2c_disable_dma_req(i2c_dev);
+-	dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir);
++	dmaengine_terminate_async(dma->chan_using);
++	dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len,
++			 dma->dma_data_dir);
+ 	complete(&dma->dma_complete);
+ }
+ 
+@@ -1510,7 +1511,6 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev,
+ 	u16 addr = f7_msg->addr;
+ 	void __iomem *base = i2c_dev->base;
+ 	struct device *dev = i2c_dev->dev;
+-	struct stm32_i2c_dma *dma = i2c_dev->dma;
+ 
+ 	/* Bus error */
+ 	if (status & STM32F7_I2C_ISR_BERR) {
+@@ -1551,10 +1551,8 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev,
+ 	}
+ 
+ 	/* Disable dma */
+-	if (i2c_dev->use_dma) {
+-		stm32f7_i2c_disable_dma_req(i2c_dev);
+-		dmaengine_terminate_async(dma->chan_using);
+-	}
++	if (i2c_dev->use_dma)
++		stm32f7_i2c_dma_callback(i2c_dev);
+ 
+ 	i2c_dev->master_mode = false;
+ 	complete(&i2c_dev->complete);
+@@ -1600,7 +1598,6 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
+ {
+ 	struct stm32f7_i2c_dev *i2c_dev = data;
+ 	struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+-	struct stm32_i2c_dma *dma = i2c_dev->dma;
+ 	void __iomem *base = i2c_dev->base;
+ 	u32 status, mask;
+ 	int ret;
+@@ -1619,10 +1616,8 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
+ 		dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
+ 			__func__, f7_msg->addr);
+ 		writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
+-		if (i2c_dev->use_dma) {
+-			stm32f7_i2c_disable_dma_req(i2c_dev);
+-			dmaengine_terminate_async(dma->chan_using);
+-		}
++		if (i2c_dev->use_dma)
++			stm32f7_i2c_dma_callback(i2c_dev);
+ 		f7_msg->result = -ENXIO;
+ 	}
+ 
+@@ -1640,8 +1635,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
+ 			ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
+ 			if (!ret) {
+ 				dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
+-				stm32f7_i2c_disable_dma_req(i2c_dev);
+-				dmaengine_terminate_async(dma->chan_using);
++				stm32f7_i2c_dma_callback(i2c_dev);
+ 				f7_msg->result = -ETIMEDOUT;
+ 			}
+ 		}
+diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
+index 5e17c1e6d2c717..a4e4e7964a1aa5 100644
+--- a/drivers/iio/accel/fxls8962af-core.c
++++ b/drivers/iio/accel/fxls8962af-core.c
+@@ -865,6 +865,8 @@ static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev)
+ 	if (ret)
+ 		return ret;
+ 
++	synchronize_irq(data->irq);
++
+ 	ret = __fxls8962af_fifo_set_mode(data, false);
+ 
+ 	if (data->enable_event)
+diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
+index 0e371efbda705f..7394ea72948b17 100644
+--- a/drivers/iio/accel/st_accel_core.c
++++ b/drivers/iio/accel/st_accel_core.c
+@@ -1353,6 +1353,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
+ 	union acpi_object *ont;
+ 	union acpi_object *elements;
+ 	acpi_status status;
++	struct device *parent = indio_dev->dev.parent;
+ 	int ret = -EINVAL;
+ 	unsigned int val;
+ 	int i, j;
+@@ -1371,7 +1372,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
+ 	};
+ 
+ 
+-	adev = ACPI_COMPANION(indio_dev->dev.parent);
++	adev = ACPI_COMPANION(parent);
+ 	if (!adev)
+ 		return -ENXIO;
+ 
+@@ -1380,8 +1381,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
+ 	if (status == AE_NOT_FOUND) {
+ 		return -ENXIO;
+ 	} else if (ACPI_FAILURE(status)) {
+-		dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
+-			 status);
++		dev_warn(parent, "failed to execute _ONT: %d\n", status);
+ 		return status;
+ 	}
+ 
+@@ -1457,12 +1457,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
+ 	}
+ 
+ 	ret = 0;
+-	dev_info(&indio_dev->dev, "computed mount matrix from ACPI\n");
++	dev_info(parent, "computed mount matrix from ACPI\n");
+ 
+ out:
+ 	kfree(buffer.pointer);
+ 	if (ret)
+-		dev_dbg(&indio_dev->dev,
++		dev_dbg(parent,
+ 			"failed to apply ACPI orientation data: %d\n", ret);
+ 
+ 	return ret;
+diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
+index 6c1a5d1b0a83d4..0226dfbcf4aed1 100644
+--- a/drivers/iio/adc/axp20x_adc.c
++++ b/drivers/iio/adc/axp20x_adc.c
+@@ -217,6 +217,7 @@ static struct iio_map axp717_maps[] = {
+ 		.consumer_channel = "batt_chrg_i",
+ 		.adc_channel_label = "batt_chrg_i",
+ 	},
++	{ }
+ };
+ 
+ /*
+diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
+index d0c6e94f7204ee..c9d531f233ebaf 100644
+--- a/drivers/iio/adc/max1363.c
++++ b/drivers/iio/adc/max1363.c
+@@ -504,10 +504,10 @@ static const struct iio_event_spec max1363_events[] = {
+ 	MAX1363_CHAN_U(1, _s1, 1, bits, ev_spec, num_ev_spec),		\
+ 	MAX1363_CHAN_U(2, _s2, 2, bits, ev_spec, num_ev_spec),		\
+ 	MAX1363_CHAN_U(3, _s3, 3, bits, ev_spec, num_ev_spec),		\
+-	MAX1363_CHAN_B(0, 1, d0m1, 4, bits, ev_spec, num_ev_spec),	\
+-	MAX1363_CHAN_B(2, 3, d2m3, 5, bits, ev_spec, num_ev_spec),	\
+-	MAX1363_CHAN_B(1, 0, d1m0, 6, bits, ev_spec, num_ev_spec),	\
+-	MAX1363_CHAN_B(3, 2, d3m2, 7, bits, ev_spec, num_ev_spec),	\
++	MAX1363_CHAN_B(0, 1, d0m1, 12, bits, ev_spec, num_ev_spec),	\
++	MAX1363_CHAN_B(2, 3, d2m3, 13, bits, ev_spec, num_ev_spec),	\
++	MAX1363_CHAN_B(1, 0, d1m0, 18, bits, ev_spec, num_ev_spec),	\
++	MAX1363_CHAN_B(3, 2, d3m2, 19, bits, ev_spec, num_ev_spec),	\
+ 	IIO_CHAN_SOFT_TIMESTAMP(8)					\
+ 	}
+ 
+@@ -525,23 +525,23 @@ static const struct iio_chan_spec max1363_channels[] =
+ /* Applies to max1236, max1237 */
+ static const enum max1363_modes max1236_mode_list[] = {
+ 	_s0, _s1, _s2, _s3,
+-	s0to1, s0to2, s0to3,
++	s0to1, s0to2, s2to3, s0to3,
+ 	d0m1, d2m3, d1m0, d3m2,
+ 	d0m1to2m3, d1m0to3m2,
+-	s2to3,
+ };
+ 
+ /* Applies to max1238, max1239 */
+ static const enum max1363_modes max1238_mode_list[] = {
+ 	_s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
+ 	s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
++	s6to7, s6to8, s6to9, s6to10, s6to11,
+ 	s0to7, s0to8, s0to9, s0to10, s0to11,
+ 	d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
+ 	d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
+-	d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
+-	d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
+-	s6to7, s6to8, s6to9, s6to10, s6to11,
+-	d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10,
++	d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9,
++	d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2,
++	d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8,
++	d7m6to11m10, d1m0to11m10,
+ };
+ 
+ #define MAX1363_12X_CHANS(bits) {				\
+@@ -577,16 +577,15 @@ static const struct iio_chan_spec max1238_channels[] = MAX1363_12X_CHANS(12);
+ 
+ static const enum max1363_modes max11607_mode_list[] = {
+ 	_s0, _s1, _s2, _s3,
+-	s0to1, s0to2, s0to3,
+-	s2to3,
++	s0to1, s0to2, s2to3,
++	s0to3,
+ 	d0m1, d2m3, d1m0, d3m2,
+ 	d0m1to2m3, d1m0to3m2,
+ };
+ 
+ static const enum max1363_modes max11608_mode_list[] = {
+ 	_s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7,
+-	s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s0to7,
+-	s6to7,
++	s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s6to7, s0to7,
+ 	d0m1, d2m3, d4m5, d6m7,
+ 	d1m0, d3m2, d5m4, d7m6,
+ 	d0m1to2m3, d0m1to4m5, d0m1to6m7,
+@@ -602,14 +601,14 @@ static const enum max1363_modes max11608_mode_list[] = {
+ 	MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0),	\
+ 	MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0),	\
+ 	MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0),	\
+-	MAX1363_CHAN_B(0, 1, d0m1, 8, bits, NULL, 0),	\
+-	MAX1363_CHAN_B(2, 3, d2m3, 9, bits, NULL, 0),	\
+-	MAX1363_CHAN_B(4, 5, d4m5, 10, bits, NULL, 0),	\
+-	MAX1363_CHAN_B(6, 7, d6m7, 11, bits, NULL, 0),	\
+-	MAX1363_CHAN_B(1, 0, d1m0, 12, bits, NULL, 0),	\
+-	MAX1363_CHAN_B(3, 2, d3m2, 13, bits, NULL, 0),	\
+-	MAX1363_CHAN_B(5, 4, d5m4, 14, bits, NULL, 0),	\
+-	MAX1363_CHAN_B(7, 6, d7m6, 15, bits, NULL, 0),	\
++	MAX1363_CHAN_B(0, 1, d0m1, 12, bits, NULL, 0),	\
++	MAX1363_CHAN_B(2, 3, d2m3, 13, bits, NULL, 0),	\
++	MAX1363_CHAN_B(4, 5, d4m5, 14, bits, NULL, 0),	\
++	MAX1363_CHAN_B(6, 7, d6m7, 15, bits, NULL, 0),	\
++	MAX1363_CHAN_B(1, 0, d1m0, 18, bits, NULL, 0),	\
++	MAX1363_CHAN_B(3, 2, d3m2, 19, bits, NULL, 0),	\
++	MAX1363_CHAN_B(5, 4, d5m4, 20, bits, NULL, 0),	\
++	MAX1363_CHAN_B(7, 6, d7m6, 21, bits, NULL, 0),	\
+ 	IIO_CHAN_SOFT_TIMESTAMP(16)			\
+ }
+ static const struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8);
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 616dd729666aa1..97ea15cba9f7a9 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -429,10 +429,9 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
+ 		return -ENOMEM;
+ 	}
+ 
+-	for (i = 0; i < priv->cfg->num_irqs; i++) {
+-		irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler);
+-		irq_set_handler_data(priv->irq[i], priv);
+-	}
++	for (i = 0; i < priv->cfg->num_irqs; i++)
++		irq_set_chained_handler_and_data(priv->irq[i],
++						 stm32_adc_irq_handler, priv);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
+index 1b4287991d00ae..48a194b8e06014 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_core.c
++++ b/drivers/iio/common/st_sensors/st_sensors_core.c
+@@ -154,7 +154,7 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
+ 	return err;
+ 
+ st_accel_set_fullscale_error:
+-	dev_err(&indio_dev->dev, "failed to set new fullscale.\n");
++	dev_err(indio_dev->dev.parent, "failed to set new fullscale.\n");
+ 	return err;
+ }
+ 
+@@ -231,8 +231,7 @@ int st_sensors_power_enable(struct iio_dev *indio_dev)
+ 					     ARRAY_SIZE(regulator_names),
+ 					     regulator_names);
+ 	if (err)
+-		return dev_err_probe(&indio_dev->dev, err,
+-				     "unable to enable supplies\n");
++		return dev_err_probe(parent, err, "unable to enable supplies\n");
+ 
+ 	return 0;
+ }
+@@ -241,13 +240,14 @@ EXPORT_SYMBOL_NS(st_sensors_power_enable, IIO_ST_SENSORS);
+ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
+ 					struct st_sensors_platform_data *pdata)
+ {
++	struct device *parent = indio_dev->dev.parent;
+ 	struct st_sensor_data *sdata = iio_priv(indio_dev);
+ 
+ 	/* Sensor does not support interrupts */
+ 	if (!sdata->sensor_settings->drdy_irq.int1.addr &&
+ 	    !sdata->sensor_settings->drdy_irq.int2.addr) {
+ 		if (pdata->drdy_int_pin)
+-			dev_info(&indio_dev->dev,
++			dev_info(parent,
+ 				 "DRDY on pin INT%d specified, but sensor does not support interrupts\n",
+ 				 pdata->drdy_int_pin);
+ 		return 0;
+@@ -256,29 +256,27 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
+ 	switch (pdata->drdy_int_pin) {
+ 	case 1:
+ 		if (!sdata->sensor_settings->drdy_irq.int1.mask) {
+-			dev_err(&indio_dev->dev,
+-					"DRDY on INT1 not available.\n");
++			dev_err(parent, "DRDY on INT1 not available.\n");
+ 			return -EINVAL;
+ 		}
+ 		sdata->drdy_int_pin = 1;
+ 		break;
+ 	case 2:
+ 		if (!sdata->sensor_settings->drdy_irq.int2.mask) {
+-			dev_err(&indio_dev->dev,
+-					"DRDY on INT2 not available.\n");
++			dev_err(parent, "DRDY on INT2 not available.\n");
+ 			return -EINVAL;
+ 		}
+ 		sdata->drdy_int_pin = 2;
+ 		break;
+ 	default:
+-		dev_err(&indio_dev->dev, "DRDY on pdata not valid.\n");
++		dev_err(parent, "DRDY on pdata not valid.\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (pdata->open_drain) {
+ 		if (!sdata->sensor_settings->drdy_irq.int1.addr_od &&
+ 		    !sdata->sensor_settings->drdy_irq.int2.addr_od)
+-			dev_err(&indio_dev->dev,
++			dev_err(parent,
+ 				"open drain requested but unsupported.\n");
+ 		else
+ 			sdata->int_pin_open_drain = true;
+@@ -336,6 +334,7 @@ EXPORT_SYMBOL_NS(st_sensors_dev_name_probe, IIO_ST_SENSORS);
+ int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ 					struct st_sensors_platform_data *pdata)
+ {
++	struct device *parent = indio_dev->dev.parent;
+ 	struct st_sensor_data *sdata = iio_priv(indio_dev);
+ 	struct st_sensors_platform_data *of_pdata;
+ 	int err = 0;
+@@ -343,7 +342,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ 	mutex_init(&sdata->odr_lock);
+ 
+ 	/* If OF/DT pdata exists, it will take precedence of anything else */
+-	of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
++	of_pdata = st_sensors_dev_probe(parent, pdata);
+ 	if (IS_ERR(of_pdata))
+ 		return PTR_ERR(of_pdata);
+ 	if (of_pdata)
+@@ -370,7 +369,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ 		if (err < 0)
+ 			return err;
+ 	} else
+-		dev_info(&indio_dev->dev, "Full-scale not possible\n");
++		dev_info(parent, "Full-scale not possible\n");
+ 
+ 	err = st_sensors_set_odr(indio_dev, sdata->odr);
+ 	if (err < 0)
+@@ -405,7 +404,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ 			mask = sdata->sensor_settings->drdy_irq.int2.mask_od;
+ 		}
+ 
+-		dev_info(&indio_dev->dev,
++		dev_info(parent,
+ 			 "set interrupt line to open drain mode on pin %d\n",
+ 			 sdata->drdy_int_pin);
+ 		err = st_sensors_write_data_with_mask(indio_dev, addr,
+@@ -594,21 +593,20 @@ EXPORT_SYMBOL_NS(st_sensors_get_settings_index, IIO_ST_SENSORS);
+ int st_sensors_verify_id(struct iio_dev *indio_dev)
+ {
+ 	struct st_sensor_data *sdata = iio_priv(indio_dev);
++	struct device *parent = indio_dev->dev.parent;
+ 	int wai, err;
+ 
+ 	if (sdata->sensor_settings->wai_addr) {
+ 		err = regmap_read(sdata->regmap,
+ 				  sdata->sensor_settings->wai_addr, &wai);
+ 		if (err < 0) {
+-			dev_err(&indio_dev->dev,
+-				"failed to read Who-Am-I register.\n");
+-			return err;
++			return dev_err_probe(parent, err,
++					     "failed to read Who-Am-I register.\n");
+ 		}
+ 
+ 		if (sdata->sensor_settings->wai != wai) {
+-			dev_warn(&indio_dev->dev,
+-				"%s: WhoAmI mismatch (0x%x).\n",
+-				indio_dev->name, wai);
++			dev_warn(parent, "%s: WhoAmI mismatch (0x%x).\n",
++				 indio_dev->name, wai);
+ 		}
+ 	}
+ 
+diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
+index a0df9250a69ff6..b900acd471bd4a 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
++++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
+@@ -127,7 +127,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ 	sdata->trig = devm_iio_trigger_alloc(parent, "%s-trigger",
+ 					     indio_dev->name);
+ 	if (sdata->trig == NULL) {
+-		dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
++		dev_err(parent, "failed to allocate iio trigger.\n");
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -143,7 +143,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ 	case IRQF_TRIGGER_FALLING:
+ 	case IRQF_TRIGGER_LOW:
+ 		if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
+-			dev_err(&indio_dev->dev,
++			dev_err(parent,
+ 				"falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n");
+ 			if (irq_trig == IRQF_TRIGGER_FALLING)
+ 				irq_trig = IRQF_TRIGGER_RISING;
+@@ -156,21 +156,19 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ 				sdata->sensor_settings->drdy_irq.mask_ihl, 1);
+ 			if (err < 0)
+ 				return err;
+-			dev_info(&indio_dev->dev,
++			dev_info(parent,
+ 				 "interrupts on the falling edge or active low level\n");
+ 		}
+ 		break;
+ 	case IRQF_TRIGGER_RISING:
+-		dev_info(&indio_dev->dev,
+-			 "interrupts on the rising edge\n");
++		dev_info(parent, "interrupts on the rising edge\n");
+ 		break;
+ 	case IRQF_TRIGGER_HIGH:
+-		dev_info(&indio_dev->dev,
+-			 "interrupts active high level\n");
++		dev_info(parent, "interrupts active high level\n");
+ 		break;
+ 	default:
+ 		/* This is the most preferred mode, if possible */
+-		dev_err(&indio_dev->dev,
++		dev_err(parent,
+ 			"unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig);
+ 		irq_trig = IRQF_TRIGGER_RISING;
+ 	}
+@@ -179,7 +177,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ 	if (irq_trig == IRQF_TRIGGER_FALLING ||
+ 	    irq_trig == IRQF_TRIGGER_RISING) {
+ 		if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+-			dev_err(&indio_dev->dev,
++			dev_err(parent,
+ 				"edge IRQ not supported w/o stat register.\n");
+ 			return -EOPNOTSUPP;
+ 		}
+@@ -214,13 +212,13 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ 					sdata->trig->name,
+ 					sdata->trig);
+ 	if (err) {
+-		dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
++		dev_err(parent, "failed to request trigger IRQ.\n");
+ 		return err;
+ 	}
+ 
+ 	err = devm_iio_trigger_register(parent, sdata->trig);
+ 	if (err < 0) {
+-		dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
++		dev_err(parent, "failed to register iio trigger.\n");
+ 		return err;
+ 	}
+ 	indio_dev->trig = iio_trigger_get(sdata->trig);
+diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
+index 42e0ee683ef6b2..a3abcdeb6281ef 100644
+--- a/drivers/iio/industrialio-backend.c
++++ b/drivers/iio/industrialio-backend.c
+@@ -155,11 +155,14 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file,
+ 	ssize_t rc;
+ 	int ret;
+ 
++	if (count >= sizeof(buf))
++		return -ENOSPC;
++
+ 	rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count);
+ 	if (rc < 0)
+ 		return rc;
+ 
+-	buf[count] = '\0';
++	buf[rc] = '\0';
+ 
+ 	ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
+ 
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 6d679e235af6cc..f0cab6870404fa 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -169,12 +169,12 @@ static const struct xpad_device {
+ 	{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
+ 	{ 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX },
+ 	{ 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 },
++	{ 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX360 },
+ 	{ 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 },
+ 	{ 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
+ 	{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
+ 	{ 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
+ 	{ 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
+-	{ 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX },
+ 	{ 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
+ 	{ 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
+ 	{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 56e9f125cda9a0..af4e6c1e55db6f 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4414,9 +4414,6 @@ static int device_set_dirty_tracking(struct list_head *devices, bool enable)
+ 			break;
+ 	}
+ 
+-	if (!ret)
+-		info->domain_attached = true;
+-
+ 	return ret;
+ }
+ 
+@@ -4600,6 +4597,9 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device
+ 		ret = device_setup_pass_through(dev);
+ 	}
+ 
++	if (!ret)
++		info->domain_attached = true;
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index ca60ef209df837..aaa21fe295f2d7 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -2741,7 +2741,11 @@ static unsigned long __evict_many(struct dm_bufio_client *c,
+ 		__make_buffer_clean(b);
+ 		__free_buffer_wake(b);
+ 
+-		cond_resched();
++		if (need_resched()) {
++			dm_bufio_unlock(c);
++			cond_resched();
++			dm_bufio_lock(c);
++		}
+ 	}
+ 
+ 	return count;
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
+index 9a3a784054cc6c..e6801ad14318b9 100644
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -322,7 +322,7 @@ EXPORT_SYMBOL(memstick_init_req);
+ static int h_memstick_read_dev_id(struct memstick_dev *card,
+ 				  struct memstick_request **mrq)
+ {
+-	struct ms_id_register id_reg;
++	struct ms_id_register id_reg = {};
+ 
+ 	if (!(*mrq)) {
+ 		memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
+diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
+index 35d8fdea668b91..f923447ed2ce23 100644
+--- a/drivers/mmc/host/bcm2835.c
++++ b/drivers/mmc/host/bcm2835.c
+@@ -502,7 +502,8 @@ void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
+ 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ 
+ 	if (!desc) {
+-		dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data);
++		dma_unmap_sg(dma_chan->device->dev, data->sg, data->sg_len,
++			     dir_data);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index b0b1d403f35276..76ea0e892d4e44 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -912,7 +912,8 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
+ {
+ 	return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
+ 	       (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
+-		dmi_match(DMI_SYS_VENDOR, "IRBIS"));
++		dmi_match(DMI_SYS_VENDOR, "IRBIS") ||
++		dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA"));
+ }
+ 
+ static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 0aa3c40ea6ed8e..8e0eb0acf4428a 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -588,7 +588,8 @@ static const struct sdhci_ops sdhci_am654_ops = {
+ static const struct sdhci_pltfm_data sdhci_am654_pdata = {
+ 	.ops = &sdhci_am654_ops,
+ 	.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+-	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++		   SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
+ };
+ 
+ static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = {
+@@ -618,7 +619,8 @@ static const struct sdhci_ops sdhci_j721e_8bit_ops = {
+ static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
+ 	.ops = &sdhci_j721e_8bit_ops,
+ 	.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+-	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++		   SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
+ };
+ 
+ static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = {
+@@ -642,7 +644,8 @@ static const struct sdhci_ops sdhci_j721e_4bit_ops = {
+ static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
+ 	.ops = &sdhci_j721e_4bit_ops,
+ 	.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+-	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++		   SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
+ };
+ 
+ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
+diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
+index b6c5c8bab7390f..e8995738cf9967 100644
+--- a/drivers/net/can/m_can/tcan4x5x-core.c
++++ b/drivers/net/can/m_can/tcan4x5x-core.c
+@@ -92,6 +92,8 @@
+ #define TCAN4X5X_MODE_STANDBY BIT(6)
+ #define TCAN4X5X_MODE_NORMAL BIT(7)
+ 
++#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19)
++
+ #define TCAN4X5X_DISABLE_WAKE_MSK	(BIT(31) | BIT(30))
+ #define TCAN4X5X_DISABLE_INH_MSK	BIT(9)
+ 
+@@ -267,6 +269,13 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
+ 	if (ret)
+ 		return ret;
+ 
++	if (tcan4x5x->nwkrq_voltage_vio) {
++		ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
++				      TCAN4X5X_NWKRQ_VOLTAGE_VIO);
++		if (ret)
++			return ret;
++	}
++
+ 	return ret;
+ }
+ 
+@@ -318,21 +327,27 @@ static const struct tcan4x5x_version_info
+ 	return &tcan4x5x_versions[TCAN4X5X];
+ }
+ 
+-static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
+-			      const struct tcan4x5x_version_info *version_info)
++static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev)
++{
++	struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
++
++	tcan4x5x->nwkrq_voltage_vio =
++		of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio");
++}
++
++static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
+ {
+ 	struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+ 	int ret;
+ 
+-	if (version_info->has_wake_pin) {
+-		tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
+-							    GPIOD_OUT_HIGH);
+-		if (IS_ERR(tcan4x5x->device_wake_gpio)) {
+-			if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
+-				return -EPROBE_DEFER;
++	tcan4x5x->device_wake_gpio = devm_gpiod_get_optional(cdev->dev,
++							     "device-wake",
++							     GPIOD_OUT_HIGH);
++	if (IS_ERR(tcan4x5x->device_wake_gpio)) {
++		if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
++			return -EPROBE_DEFER;
+ 
+-			tcan4x5x_disable_wake(cdev);
+-		}
++		tcan4x5x->device_wake_gpio = NULL;
+ 	}
+ 
+ 	tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
+@@ -344,14 +359,31 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (version_info->has_state_pin) {
+-		tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
+-								      "device-state",
+-								      GPIOD_IN);
+-		if (IS_ERR(tcan4x5x->device_state_gpio)) {
+-			tcan4x5x->device_state_gpio = NULL;
+-			tcan4x5x_disable_state(cdev);
+-		}
++	tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
++							      "device-state",
++							      GPIOD_IN);
++	if (IS_ERR(tcan4x5x->device_state_gpio))
++		tcan4x5x->device_state_gpio = NULL;
++
++	return 0;
++}
++
++static int tcan4x5x_check_gpios(struct m_can_classdev *cdev,
++				const struct tcan4x5x_version_info *version_info)
++{
++	struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
++	int ret;
++
++	if (version_info->has_wake_pin && !tcan4x5x->device_wake_gpio) {
++		ret = tcan4x5x_disable_wake(cdev);
++		if (ret)
++			return ret;
++	}
++
++	if (version_info->has_state_pin && !tcan4x5x->device_state_gpio) {
++		ret = tcan4x5x_disable_state(cdev);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	return 0;
+@@ -442,18 +474,26 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 		goto out_m_can_class_free_dev;
+ 	}
+ 
++	ret = tcan4x5x_get_gpios(mcan_class);
++	if (ret) {
++		dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
++		goto out_power;
++	}
++
+ 	version_info = tcan4x5x_find_version(priv);
+ 	if (IS_ERR(version_info)) {
+ 		ret = PTR_ERR(version_info);
+ 		goto out_power;
+ 	}
+ 
+-	ret = tcan4x5x_get_gpios(mcan_class, version_info);
++	ret = tcan4x5x_check_gpios(mcan_class, version_info);
+ 	if (ret) {
+-		dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
++		dev_err(&spi->dev, "Checking gpios failed %pe\n", ERR_PTR(ret));
+ 		goto out_power;
+ 	}
+ 
++	tcan4x5x_get_dt_data(mcan_class);
++
+ 	tcan4x5x_check_wake(priv);
+ 
+ 	ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
+diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
+index e62c030d3e1e5a..203399d5e8ccf3 100644
+--- a/drivers/net/can/m_can/tcan4x5x.h
++++ b/drivers/net/can/m_can/tcan4x5x.h
+@@ -42,6 +42,8 @@ struct tcan4x5x_priv {
+ 
+ 	struct tcan4x5x_map_buf map_buf_rx;
+ 	struct tcan4x5x_map_buf map_buf_tx;
++
++	bool nwkrq_voltage_vio;
+ };
+ 
+ static inline void
+diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
+index 9fc0fd95a13d8f..cb71eca6a85bf6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
++++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
+@@ -606,7 +606,7 @@ void ice_debugfs_fwlog_init(struct ice_pf *pf)
+ 
+ 	pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
+ 						      pf->ice_debugfs_pf);
+-	if (IS_ERR(pf->ice_debugfs_pf))
++	if (IS_ERR(pf->ice_debugfs_pf_fwlog))
+ 		goto err_create_module_files;
+ 
+ 	fw_modules_dir = debugfs_create_dir("modules",
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
+index 2410aee59fb2d5..d132eb4775513c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.c
++++ b/drivers/net/ethernet/intel/ice/ice_lag.c
+@@ -2226,7 +2226,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf)
+ 	struct ice_lag *lag = pf->lag;
+ 	struct net_device *tmp_nd;
+ 
+-	if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
++	if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) ||
++	    !lag || !lag->upper_netdev)
+ 		return false;
+ 
+ 	rcu_read_lock();
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 8e24ba96c779ae..8ed47e7a7515b8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1156,8 +1156,9 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
+ 	}
+ }
+ 
+-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+-				 u32 cqe_bcnt)
++static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
++					 struct mlx5_cqe64 *cqe,
++					 u32 cqe_bcnt)
+ {
+ 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
+ 	struct tcphdr	*tcp;
+@@ -1207,6 +1208,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ 		tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
+ 					  &ipv6->daddr, check);
+ 	}
++
++	return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
+ }
+ 
+ static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
+@@ -1563,8 +1566,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ 		mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
+ 
+ 	if (lro_num_seg > 1) {
+-		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+-		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
++		unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
++
++		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
+ 		/* Subtract one since we already counted this as one
+ 		 * "regular" packet in mlx5e_complete_rx_cqe()
+ 		 */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 220a9ac75c8ba0..5bc947f703b5ea 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2241,6 +2241,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
+ 	{ PCI_VDEVICE(MELLANOX, 0x1021) },			/* ConnectX-7 */
+ 	{ PCI_VDEVICE(MELLANOX, 0x1023) },			/* ConnectX-8 */
+ 	{ PCI_VDEVICE(MELLANOX, 0x1025) },			/* ConnectX-9 */
++	{ PCI_VDEVICE(MELLANOX, 0x1027) },			/* ConnectX-10 */
+ 	{ PCI_VDEVICE(MELLANOX, 0xa2d2) },			/* BlueField integrated ConnectX-5 network controller */
+ 	{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},	/* BlueField integrated ConnectX-5 network controller VF */
+ 	{ PCI_VDEVICE(MELLANOX, 0xa2d6) },			/* BlueField-2 integrated ConnectX-6 Dx network controller */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+index 83ad7c7935e31e..23d9ece46d9c02 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+@@ -379,6 +379,12 @@ static int intel_crosststamp(ktime_t *device,
+ 		return -ETIMEDOUT;
+ 	}
+ 
++	*system = (struct system_counterval_t) {
++		.cycles = 0,
++		.cs_id = CSID_X86_ART,
++		.use_nsecs = false,
++	};
++
+ 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
+ 			GMAC_TIMESTAMP_ATSNS_MASK) >>
+ 			GMAC_TIMESTAMP_ATSNS_SHIFT;
+@@ -394,7 +400,7 @@ static int intel_crosststamp(ktime_t *device,
+ 	}
+ 
+ 	system->cycles *= intel_priv->crossts_adj;
+-	system->cs_id = CSID_X86_ART;
++
+ 	priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index deaf670c160ebf..e79220cb725b09 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -1531,7 +1531,6 @@ static void wx_configure_rx_ring(struct wx *wx,
+ 				 struct wx_ring *ring)
+ {
+ 	u16 reg_idx = ring->reg_idx;
+-	union wx_rx_desc *rx_desc;
+ 	u64 rdba = ring->dma;
+ 	u32 rxdctl;
+ 
+@@ -1561,9 +1560,9 @@ static void wx_configure_rx_ring(struct wx *wx,
+ 	memset(ring->rx_buffer_info, 0,
+ 	       sizeof(struct wx_rx_buffer) * ring->count);
+ 
+-	/* initialize Rx descriptor 0 */
+-	rx_desc = WX_RX_DESC(ring, 0);
+-	rx_desc->wb.upper.length = 0;
++	/* reset ntu and ntc to place SW in sync with hardware */
++	ring->next_to_clean = 0;
++	ring->next_to_use = 0;
+ 
+ 	/* enable receive descriptor ring */
+ 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
+@@ -2356,6 +2355,8 @@ void wx_update_stats(struct wx *wx)
+ 		hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
+ 	}
+ 
++	/* qmprc is not cleared on read, manual reset it */
++	hwstats->qmprc = 0;
+ 	for (i = 0; i < wx->mac.max_rx_queues; i++)
+ 		hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
+ }
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index e711797a3a8cff..4c203f4afd6899 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -172,10 +172,6 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
+ 				      skb_frag_off(frag),
+ 				      skb_frag_size(frag),
+ 				      DMA_FROM_DEVICE);
+-
+-	/* If the page was released, just unmap it. */
+-	if (unlikely(WX_CB(skb)->page_released))
+-		page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+ }
+ 
+ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
+@@ -225,10 +221,6 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
+ 			     struct sk_buff *skb,
+ 			     int rx_buffer_pgcnt)
+ {
+-	if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
+-		/* the page has been released from the ring */
+-		WX_CB(skb)->page_released = true;
+-
+ 	/* clear contents of rx_buffer */
+ 	rx_buffer->page = NULL;
+ 	rx_buffer->skb = NULL;
+@@ -313,7 +305,7 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
+ 		return false;
+ 	dma = page_pool_get_dma_addr(page);
+ 
+-	bi->page_dma = dma;
++	bi->dma = dma;
+ 	bi->page = page;
+ 	bi->page_offset = 0;
+ 
+@@ -350,7 +342,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
+ 						 DMA_FROM_DEVICE);
+ 
+ 		rx_desc->read.pkt_addr =
+-			cpu_to_le64(bi->page_dma + bi->page_offset);
++			cpu_to_le64(bi->dma + bi->page_offset);
+ 
+ 		rx_desc++;
+ 		bi++;
+@@ -363,6 +355,8 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
+ 
+ 		/* clear the status bits for the next_to_use descriptor */
+ 		rx_desc->wb.upper.status_error = 0;
++		/* clear the length for the next_to_use descriptor */
++		rx_desc->wb.upper.length = 0;
+ 
+ 		cleaned_count--;
+ 	} while (cleaned_count);
+@@ -2219,9 +2213,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
+ 		if (rx_buffer->skb) {
+ 			struct sk_buff *skb = rx_buffer->skb;
+ 
+-			if (WX_CB(skb)->page_released)
+-				page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+-
+ 			dev_kfree_skb(skb);
+ 		}
+ 
+@@ -2245,6 +2236,9 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
+ 		}
+ 	}
+ 
++	/* Zero out the descriptor ring */
++	memset(rx_ring->desc, 0, rx_ring->size);
++
+ 	rx_ring->next_to_alloc = 0;
+ 	rx_ring->next_to_clean = 0;
+ 	rx_ring->next_to_use = 0;
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+index dbac133eacfc58..950cacaf095a92 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+@@ -787,7 +787,6 @@ enum wx_reset_type {
+ struct wx_cb {
+ 	dma_addr_t dma;
+ 	u16     append_cnt;      /* number of skb's appended */
+-	bool    page_released;
+ 	bool    dma_released;
+ };
+ 
+@@ -875,7 +874,6 @@ struct wx_tx_buffer {
+ struct wx_rx_buffer {
+ 	struct sk_buff *skb;
+ 	dma_addr_t dma;
+-	dma_addr_t page_dma;
+ 	struct page *page;
+ 	unsigned int page_offset;
+ };
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+index 940452d0a4d2a5..258096543b08ab 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+@@ -285,7 +285,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
+ 
+ 		/* Read the remaining data */
+ 		for (; length > 0; length--)
+-			*to_u8_ptr = *from_u8_ptr;
++			*to_u8_ptr++ = *from_u8_ptr++;
+ 	}
+ }
+ 
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 8ec497023224a4..4376e116eb9f0f 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2316,8 +2316,11 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
+ 	if (!ndev)
+ 		return NOTIFY_DONE;
+ 
+-	/* set slave flag before open to prevent IPv6 addrconf */
++	/* Set slave flag and no addrconf flag before open
++	 * to prevent IPv6 addrconf.
++	 */
+ 	vf_netdev->flags |= IFF_SLAVE;
++	vf_netdev->priv_flags |= IFF_NO_ADDRCONF;
+ 	return NOTIFY_DONE;
+ }
+ 
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 13dea33d86ffa5..834624a61060ea 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3663,7 +3663,8 @@ static int phy_probe(struct device *dev)
+ 	/* Get the LEDs from the device tree, and instantiate standard
+ 	 * LEDs for them.
+ 	 */
+-	if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
++	if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) &&
++	    !phy_driver_is_genphy_10g(phydev))
+ 		err = of_phy_leds(phydev);
+ 
+ out:
+@@ -3680,7 +3681,8 @@ static int phy_remove(struct device *dev)
+ 
+ 	cancel_delayed_work_sync(&phydev->state_queue);
+ 
+-	if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
++	if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) &&
++	    !phy_driver_is_genphy_10g(phydev))
+ 		phy_leds_unregister(phydev);
+ 
+ 	phydev->state = PHY_DOWN;
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index 3d239b8d1a1bcb..52e9fd8116f98e 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -689,6 +689,10 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+ 			status);
+ 		return -ENODEV;
+ 	}
++	if (!dev->status) {
++		dev_err(&dev->udev->dev, "No status endpoint found");
++		return -ENODEV;
++	}
+ 	/* Initialize sierra private data */
+ 	priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ 	if (!priv)
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 54c5d9a14c6724..0408c21bb1220a 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -6802,7 +6802,7 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	   otherwise get link status from config. */
+ 	netif_carrier_off(dev);
+ 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+-		virtnet_config_changed_work(&vi->config_work);
++		virtio_config_changed(vi->vdev);
+ 	} else {
+ 		vi->status = VIRTIO_NET_S_LINK_UP;
+ 		virtnet_update_settings(vi);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index abd42598fc78b6..9e223574db7f77 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -375,12 +375,12 @@ static void nvme_log_err_passthru(struct request *req)
+ 		nr->status & NVME_SC_MASK,	/* Status Code */
+ 		nr->status & NVME_STATUS_MORE ? "MORE " : "",
+ 		nr->status & NVME_STATUS_DNR  ? "DNR "  : "",
+-		nr->cmd->common.cdw10,
+-		nr->cmd->common.cdw11,
+-		nr->cmd->common.cdw12,
+-		nr->cmd->common.cdw13,
+-		nr->cmd->common.cdw14,
+-		nr->cmd->common.cdw15);
++		le32_to_cpu(nr->cmd->common.cdw10),
++		le32_to_cpu(nr->cmd->common.cdw11),
++		le32_to_cpu(nr->cmd->common.cdw12),
++		le32_to_cpu(nr->cmd->common.cdw13),
++		le32_to_cpu(nr->cmd->common.cdw14),
++		le32_to_cpu(nr->cmd->common.cdw15));
+ }
+ 
+ enum nvme_disposition {
+@@ -757,6 +757,10 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+ 	    !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
+ 	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ 		return BLK_STS_RESOURCE;
++
++	if (!(rq->rq_flags & RQF_DONTPREP))
++		nvme_clear_nvme_request(rq);
++
+ 	return nvme_host_path_error(rq);
+ }
+ EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
+@@ -3854,7 +3858,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
+ 			return;
+ 		}
+ 	}
+-	list_add(&ns->list, &ns->ctrl->namespaces);
++	list_add_rcu(&ns->list, &ns->ctrl->namespaces);
+ }
+ 
+ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 259ad77c03c50f..6268b18d24569b 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1941,10 +1941,10 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
+ 		struct sock *sk = queue->sock->sk;
+ 
+ 		/* Restore the default callbacks before starting upcall */
+-		read_lock_bh(&sk->sk_callback_lock);
++		write_lock_bh(&sk->sk_callback_lock);
+ 		sk->sk_user_data = NULL;
+ 		sk->sk_data_ready = port->data_ready;
+-		read_unlock_bh(&sk->sk_callback_lock);
++		write_unlock_bh(&sk->sk_callback_lock);
+ 		if (!nvmet_tcp_try_peek_pdu(queue)) {
+ 			if (!nvmet_tcp_tls_handshake(queue))
+ 				return;
+diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
+index ca6dd71d8a2e29..7807ec0e2d18dc 100644
+--- a/drivers/nvmem/imx-ocotp-ele.c
++++ b/drivers/nvmem/imx-ocotp-ele.c
+@@ -12,6 +12,7 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
++#include <linux/if_ether.h>	/* ETH_ALEN */
+ 
+ enum fuse_type {
+ 	FUSE_FSB = BIT(0),
+@@ -118,9 +119,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index,
+ 	int i;
+ 
+ 	/* Deal with some post processing of nvmem cell data */
+-	if (id && !strcmp(id, "mac-address"))
++	if (id && !strcmp(id, "mac-address")) {
++		bytes = min(bytes, ETH_ALEN);
+ 		for (i = 0; i < bytes / 2; i++)
+ 			swap(buf[i], buf[bytes - i - 1]);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
+index 79dd4fda03295a..7bf7656d4f9631 100644
+--- a/drivers/nvmem/imx-ocotp.c
++++ b/drivers/nvmem/imx-ocotp.c
+@@ -23,6 +23,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
++#include <linux/if_ether.h>	/* ETH_ALEN */
+ 
+ #define IMX_OCOTP_OFFSET_B0W0		0x400 /* Offset from base address of the
+ 					       * OTP Bank0 Word0
+@@ -227,9 +228,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index,
+ 	int i;
+ 
+ 	/* Deal with some post processing of nvmem cell data */
+-	if (id && !strcmp(id, "mac-address"))
++	if (id && !strcmp(id, "mac-address")) {
++		bytes = min(bytes, ETH_ALEN);
+ 		for (i = 0; i < bytes / 2; i++)
+ 			swap(buf[i], buf[bytes - i - 1]);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c
+index 731e6f4f12b2bf..21f6dcf905dd9f 100644
+--- a/drivers/nvmem/layouts/u-boot-env.c
++++ b/drivers/nvmem/layouts/u-boot-env.c
+@@ -92,7 +92,7 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
+ 	size_t crc32_data_offset;
+ 	size_t crc32_data_len;
+ 	size_t crc32_offset;
+-	__le32 *crc32_addr;
++	uint32_t *crc32_addr;
+ 	size_t data_offset;
+ 	size_t data_len;
+ 	size_t dev_size;
+@@ -143,8 +143,8 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
+ 		goto err_kfree;
+ 	}
+ 
+-	crc32_addr = (__le32 *)(buf + crc32_offset);
+-	crc32 = le32_to_cpu(*crc32_addr);
++	crc32_addr = (uint32_t *)(buf + crc32_offset);
++	crc32 = *crc32_addr;
+ 	crc32_data_len = dev_size - crc32_data_offset;
+ 	data_len = dev_size - data_offset;
+ 
+diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
+index 23a23f2d64e586..e818f6c3980e6b 100644
+--- a/drivers/phy/tegra/xusb-tegra186.c
++++ b/drivers/phy/tegra/xusb-tegra186.c
+@@ -648,14 +648,15 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
+ 		udelay(100);
+ 	}
+ 
+-	if (padctl->soc->trk_hw_mode) {
+-		value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+-		value |= USB2_TRK_HW_MODE;
++	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
++	if (padctl->soc->trk_update_on_idle)
+ 		value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE;
+-		padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+-	} else {
++	if (padctl->soc->trk_hw_mode)
++		value |= USB2_TRK_HW_MODE;
++	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
++
++	if (!padctl->soc->trk_hw_mode)
+ 		clk_disable_unprepare(priv->usb2_trk_clk);
+-	}
+ }
+ 
+ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
+@@ -782,13 +783,15 @@ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ }
+ 
+ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
+-					    bool status)
++					    struct tegra_xusb_usb2_port *port, bool status)
+ {
+-	u32 value;
++	u32 value, id_override;
++	int err = 0;
+ 
+ 	dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
+ 
+ 	value = padctl_readl(padctl, USB2_VBUS_ID);
++	id_override = value & ID_OVERRIDE(~0);
+ 
+ 	if (status) {
+ 		if (value & VBUS_OVERRIDE) {
+@@ -799,15 +802,35 @@ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
+ 			value = padctl_readl(padctl, USB2_VBUS_ID);
+ 		}
+ 
+-		value &= ~ID_OVERRIDE(~0);
+-		value |= ID_OVERRIDE_GROUNDED;
++		if (id_override != ID_OVERRIDE_GROUNDED) {
++			value &= ~ID_OVERRIDE(~0);
++			value |= ID_OVERRIDE_GROUNDED;
++			padctl_writel(padctl, value, USB2_VBUS_ID);
++
++			err = regulator_enable(port->supply);
++			if (err) {
++				dev_err(padctl->dev, "Failed to enable regulator: %d\n", err);
++				return err;
++			}
++		}
+ 	} else {
+-		value &= ~ID_OVERRIDE(~0);
+-		value |= ID_OVERRIDE_FLOATING;
++		if (id_override == ID_OVERRIDE_GROUNDED) {
++			/*
++			 * The regulator is disabled only when the role transitions
++			 * from USB_ROLE_HOST to USB_ROLE_NONE.
++			 */
++			err = regulator_disable(port->supply);
++			if (err) {
++				dev_err(padctl->dev, "Failed to disable regulator: %d\n", err);
++				return err;
++			}
++
++			value &= ~ID_OVERRIDE(~0);
++			value |= ID_OVERRIDE_FLOATING;
++			padctl_writel(padctl, value, USB2_VBUS_ID);
++		}
+ 	}
+ 
+-	padctl_writel(padctl, value, USB2_VBUS_ID);
+-
+ 	return 0;
+ }
+ 
+@@ -826,27 +849,20 @@ static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode,
+ 
+ 	if (mode == PHY_MODE_USB_OTG) {
+ 		if (submode == USB_ROLE_HOST) {
+-			tegra186_xusb_padctl_id_override(padctl, true);
+-
+-			err = regulator_enable(port->supply);
++			err = tegra186_xusb_padctl_id_override(padctl, port, true);
++			if (err)
++				goto out;
+ 		} else if (submode == USB_ROLE_DEVICE) {
+ 			tegra186_xusb_padctl_vbus_override(padctl, true);
+ 		} else if (submode == USB_ROLE_NONE) {
+-			/*
+-			 * When port is peripheral only or role transitions to
+-			 * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
+-			 * enabled.
+-			 */
+-			if (regulator_is_enabled(port->supply))
+-				regulator_disable(port->supply);
+-
+-			tegra186_xusb_padctl_id_override(padctl, false);
++			err = tegra186_xusb_padctl_id_override(padctl, port, false);
++			if (err)
++				goto out;
+ 			tegra186_xusb_padctl_vbus_override(padctl, false);
+ 		}
+ 	}
+-
++out:
+ 	mutex_unlock(&padctl->lock);
+-
+ 	return err;
+ }
+ 
+@@ -1710,7 +1726,8 @@ const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = {
+ 	.num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
+ 	.supports_gen2 = true,
+ 	.poll_trk_completed = true,
+-	.trk_hw_mode = true,
++	.trk_hw_mode = false,
++	.trk_update_on_idle = true,
+ 	.supports_lp_cfg_en = true,
+ };
+ EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc);
+diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
+index 6e45d194c68947..d2b5f95651324a 100644
+--- a/drivers/phy/tegra/xusb.h
++++ b/drivers/phy/tegra/xusb.h
+@@ -434,6 +434,7 @@ struct tegra_xusb_padctl_soc {
+ 	bool need_fake_usb3_port;
+ 	bool poll_trk_completed;
+ 	bool trk_hw_mode;
++	bool trk_update_on_idle;
+ 	bool supports_lp_cfg_en;
+ };
+ 
+diff --git a/drivers/pmdomain/governor.c b/drivers/pmdomain/governor.c
+index d1a10eeebd1616..600592f19669f7 100644
+--- a/drivers/pmdomain/governor.c
++++ b/drivers/pmdomain/governor.c
+@@ -8,6 +8,7 @@
+ #include <linux/pm_domain.h>
+ #include <linux/pm_qos.h>
+ #include <linux/hrtimer.h>
++#include <linux/cpu.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpumask.h>
+ #include <linux/ktime.h>
+@@ -349,6 +350,8 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+ 	struct cpuidle_device *dev;
+ 	ktime_t domain_wakeup, next_hrtimer;
+ 	ktime_t now = ktime_get();
++	struct device *cpu_dev;
++	s64 cpu_constraint, global_constraint;
+ 	s64 idle_duration_ns;
+ 	int cpu, i;
+ 
+@@ -359,6 +362,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+ 	if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+ 		return true;
+ 
++	global_constraint = cpu_latency_qos_limit();
+ 	/*
+ 	 * Find the next wakeup for any of the online CPUs within the PM domain
+ 	 * and its subdomains. Note, we only need the genpd->cpus, as it already
+@@ -372,8 +376,16 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+ 			if (ktime_before(next_hrtimer, domain_wakeup))
+ 				domain_wakeup = next_hrtimer;
+ 		}
++
++		cpu_dev = get_cpu_device(cpu);
++		if (cpu_dev) {
++			cpu_constraint = dev_pm_qos_raw_resume_latency(cpu_dev);
++			if (cpu_constraint < global_constraint)
++				global_constraint = cpu_constraint;
++		}
+ 	}
+ 
++	global_constraint *= NSEC_PER_USEC;
+ 	/* The minimum idle duration is from now - until the next wakeup. */
+ 	idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
+ 	if (idle_duration_ns <= 0)
+@@ -389,8 +401,10 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+ 	 */
+ 	i = genpd->state_idx;
+ 	do {
+-		if (idle_duration_ns >= (genpd->states[i].residency_ns +
+-		    genpd->states[i].power_off_latency_ns)) {
++		if ((idle_duration_ns >= (genpd->states[i].residency_ns +
++		    genpd->states[i].power_off_latency_ns)) &&
++		    (global_constraint >= (genpd->states[i].power_on_latency_ns +
++		    genpd->states[i].power_off_latency_ns))) {
+ 			genpd->state_idx = i;
+ 			return true;
+ 		}
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index d2e63277f0aa9a..54db2abc2e2a72 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -58,6 +58,7 @@ struct aspeed_lpc_snoop_model_data {
+ };
+ 
+ struct aspeed_lpc_snoop_channel {
++	bool enabled;
+ 	struct kfifo		fifo;
+ 	wait_queue_head_t	wq;
+ 	struct miscdevice	miscdev;
+@@ -190,6 +191,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ 	const struct aspeed_lpc_snoop_model_data *model_data =
+ 		of_device_get_match_data(dev);
+ 
++	if (WARN_ON(lpc_snoop->chan[channel].enabled))
++		return -EBUSY;
++
+ 	init_waitqueue_head(&lpc_snoop->chan[channel].wq);
+ 	/* Create FIFO datastructure */
+ 	rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo,
+@@ -236,6 +240,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ 		regmap_update_bits(lpc_snoop->regmap, HICRB,
+ 				hicrb_en, hicrb_en);
+ 
++	lpc_snoop->chan[channel].enabled = true;
++
+ 	return 0;
+ 
+ err_misc_deregister:
+@@ -248,6 +254,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ 				     int channel)
+ {
++	if (!lpc_snoop->chan[channel].enabled)
++		return;
++
+ 	switch (channel) {
+ 	case 0:
+ 		regmap_update_bits(lpc_snoop->regmap, HICR5,
+@@ -263,8 +272,10 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ 		return;
+ 	}
+ 
+-	kfifo_free(&lpc_snoop->chan[channel].fifo);
++	lpc_snoop->chan[channel].enabled = false;
++	/* Consider improving safety wrt concurrent reader(s) */
+ 	misc_deregister(&lpc_snoop->chan[channel].miscdev);
++	kfifo_free(&lpc_snoop->chan[channel].fifo);
+ }
+ 
+ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
+index e3d5e6c1d582c3..1895fba5e70bbc 100644
+--- a/drivers/soundwire/amd_manager.c
++++ b/drivers/soundwire/amd_manager.c
+@@ -187,7 +187,7 @@ static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lo
+ 
+ 	if (sts & AMD_SDW_IMM_RES_VALID) {
+ 		dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
+-		writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
++		writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
+ 	}
+ 	writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
+ 	writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
+@@ -1107,9 +1107,11 @@ static int __maybe_unused amd_suspend(struct device *dev)
+ 	}
+ 
+ 	if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
++		cancel_work_sync(&amd_manager->amd_sdw_work);
+ 		amd_sdw_wake_enable(amd_manager, false);
+ 		return amd_sdw_clock_stop(amd_manager);
+ 	} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
++		cancel_work_sync(&amd_manager->amd_sdw_work);
+ 		amd_sdw_wake_enable(amd_manager, false);
+ 		/*
+ 		 * As per hardware programming sequence on AMD platforms,
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 0f3e6e2c24743c..8d6341b0d8668c 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -4141,10 +4141,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ 				xfer->tx_nbits != SPI_NBITS_OCTAL)
+ 				return -EINVAL;
+ 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
+-				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
++				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
+ 				return -EINVAL;
+ 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
+-				!(spi->mode & SPI_TX_QUAD))
++				!(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
++				return -EINVAL;
++			if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
++				!(spi->mode & SPI_TX_OCTAL))
+ 				return -EINVAL;
+ 		}
+ 		/* Check transfer rx_nbits */
+@@ -4157,10 +4160,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ 				xfer->rx_nbits != SPI_NBITS_OCTAL)
+ 				return -EINVAL;
+ 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
+-				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
++				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
+ 				return -EINVAL;
+ 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
+-				!(spi->mode & SPI_RX_QUAD))
++				!(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
++				return -EINVAL;
++			if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
++				!(spi->mode & SPI_RX_OCTAL))
+ 				return -EINVAL;
+ 		}
+ 
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index 1a9432646b70ae..97787002080a18 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -588,6 +588,29 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
+ 	return 0;
+ }
+ 
++int
++vchiq_platform_init_state(struct vchiq_state *state)
++{
++	struct vchiq_arm_state *platform_state;
++
++	platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
++	if (!platform_state)
++		return -ENOMEM;
++
++	rwlock_init(&platform_state->susp_res_lock);
++
++	init_completion(&platform_state->ka_evt);
++	atomic_set(&platform_state->ka_use_count, 0);
++	atomic_set(&platform_state->ka_use_ack_count, 0);
++	atomic_set(&platform_state->ka_release_count, 0);
++
++	platform_state->state = state;
++
++	state->platform_state = (struct opaque_platform_state *)platform_state;
++
++	return 0;
++}
++
+ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
+ {
+ 	return (struct vchiq_arm_state *)state->platform_state;
+@@ -1335,39 +1358,6 @@ vchiq_keepalive_thread_func(void *v)
+ 	return 0;
+ }
+ 
+-int
+-vchiq_platform_init_state(struct vchiq_state *state)
+-{
+-	struct vchiq_arm_state *platform_state;
+-	char threadname[16];
+-
+-	platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+-	if (!platform_state)
+-		return -ENOMEM;
+-
+-	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+-		 state->id);
+-	platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+-						   (void *)state, threadname);
+-	if (IS_ERR(platform_state->ka_thread)) {
+-		dev_err(state->dev, "couldn't create thread %s\n", threadname);
+-		return PTR_ERR(platform_state->ka_thread);
+-	}
+-
+-	rwlock_init(&platform_state->susp_res_lock);
+-
+-	init_completion(&platform_state->ka_evt);
+-	atomic_set(&platform_state->ka_use_count, 0);
+-	atomic_set(&platform_state->ka_use_ack_count, 0);
+-	atomic_set(&platform_state->ka_release_count, 0);
+-
+-	platform_state->state = state;
+-
+-	state->platform_state = (struct opaque_platform_state *)platform_state;
+-
+-	return 0;
+-}
+-
+ int
+ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
+ 		   enum USE_TYPE_E use_type)
+@@ -1688,6 +1678,7 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
+ 				       enum vchiq_connstate newstate)
+ {
+ 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
++	char threadname[16];
+ 
+ 	dev_dbg(state->dev, "suspend: %d: %s->%s\n",
+ 		state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
+@@ -1702,7 +1693,17 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
+ 
+ 	arm_state->first_connect = 1;
+ 	write_unlock_bh(&arm_state->susp_res_lock);
+-	wake_up_process(arm_state->ka_thread);
++	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
++		 state->id);
++	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
++					      (void *)state,
++					      threadname);
++	if (IS_ERR(arm_state->ka_thread)) {
++		dev_err(state->dev, "suspend: Couldn't create thread %s\n",
++			threadname);
++	} else {
++		wake_up_process(arm_state->ka_thread);
++	}
+ }
+ 
+ static const struct of_device_id vchiq_of_match[] = {
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 6a2116cbb06f92..60818c1bec4831 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -1450,7 +1450,7 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
+ 		return ret;
+ 
+ 	data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
+-	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
++	data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK;
+ 	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ 
+ 	data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
+@@ -3437,7 +3437,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
+ 	}
+ }
+ 
+-static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
++static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
+ {
+ 	if (flags)
+ 		tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
+@@ -3445,7 +3445,7 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+ 		tb_sw_dbg(sw, "disabling wakeup\n");
+ 
+ 	if (tb_switch_is_usb4(sw))
+-		return usb4_switch_set_wake(sw, flags);
++		return usb4_switch_set_wake(sw, flags, runtime);
+ 	return tb_lc_set_wake(sw, flags);
+ }
+ 
+@@ -3521,7 +3521,7 @@ int tb_switch_resume(struct tb_switch *sw, bool runtime)
+ 		tb_switch_check_wakes(sw);
+ 
+ 	/* Disable wakes */
+-	tb_switch_set_wake(sw, 0);
++	tb_switch_set_wake(sw, 0, true);
+ 
+ 	err = tb_switch_tmu_init(sw);
+ 	if (err)
+@@ -3602,7 +3602,7 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
+ 		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+ 	}
+ 
+-	tb_switch_set_wake(sw, flags);
++	tb_switch_set_wake(sw, flags, runtime);
+ 
+ 	if (tb_switch_is_usb4(sw))
+ 		usb4_switch_set_sleep(sw);
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index 6737188f258157..2a701f94af1293 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -1299,7 +1299,7 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ 			  size_t size);
+ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
++int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime);
+ int usb4_switch_set_sleep(struct tb_switch *sw);
+ int usb4_switch_nvm_sector_size(struct tb_switch *sw);
+ int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index 57821b6f4e4682..9eacde552f81d2 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -403,12 +403,12 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
+  * usb4_switch_set_wake() - Enabled/disable wake
+  * @sw: USB4 router
+  * @flags: Wakeup flags (%0 to disable)
++ * @runtime: Wake is being programmed during system runtime
+  *
+  * Enables/disables router to wake up from sleep.
+  */
+-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
++int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
+ {
+-	struct usb4_port *usb4;
+ 	struct tb_port *port;
+ 	u64 route = tb_route(sw);
+ 	u32 val;
+@@ -438,13 +438,11 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+ 			val |= PORT_CS_19_WOU4;
+ 		} else {
+ 			bool configured = val & PORT_CS_19_PC;
+-			usb4 = port->usb4;
++			bool wakeup = runtime || device_may_wakeup(&port->usb4->dev);
+ 
+-			if (((flags & TB_WAKE_ON_CONNECT) &&
+-			      device_may_wakeup(&usb4->dev)) && !configured)
++			if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured)
+ 				val |= PORT_CS_19_WOC;
+-			if (((flags & TB_WAKE_ON_DISCONNECT) &&
+-			      device_may_wakeup(&usb4->dev)) && configured)
++			if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured)
+ 				val |= PORT_CS_19_WOD;
+ 			if ((flags & TB_WAKE_ON_USB4) && configured)
+ 				val |= PORT_CS_19_WOU4;
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index c7cee5fee60372..70676e3247ab33 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -954,7 +954,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
+ 			__func__);
+ 		return 0;
+ 	}
+-	dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE);
++	dma_sync_sg_for_device(port->dev, priv->sg_tx_p, num, DMA_TO_DEVICE);
+ 	priv->desc_tx = desc;
+ 	desc->callback = pch_dma_tx_complete;
+ 	desc->callback_param = priv;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index da6da5ec42372f..090b3a75711241 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -67,6 +67,12 @@
+  */
+ #define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT	500  /* ms */
+ 
++/*
++ * Give SS hubs 200ms time after wake to train downstream links before
++ * assuming no port activity and allowing hub to runtime suspend back.
++ */
++#define USB_SS_PORT_U0_WAKE_TIME	200  /* ms */
++
+ /* Protect struct usb_device->state and ->children members
+  * Note: Both are also protected by ->dev.sem, except that ->state can
+  * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
+@@ -1094,6 +1100,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 			goto init2;
+ 		goto init3;
+ 	}
++
+ 	hub_get(hub);
+ 
+ 	/* The superspeed hub except for root hub has to use Hub Depth
+@@ -1342,6 +1349,17 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 		device_unlock(&hdev->dev);
+ 	}
+ 
++	if (type == HUB_RESUME && hub_is_superspeed(hub->hdev)) {
++		/* give usb3 downstream links training time after hub resume */
++		usb_autopm_get_interface_no_resume(
++			to_usb_interface(hub->intfdev));
++
++		queue_delayed_work(system_power_efficient_wq,
++				   &hub->post_resume_work,
++				   msecs_to_jiffies(USB_SS_PORT_U0_WAKE_TIME));
++		return;
++	}
++
+ 	hub_put(hub);
+ }
+ 
+@@ -1360,6 +1378,14 @@ static void hub_init_func3(struct work_struct *ws)
+ 	hub_activate(hub, HUB_INIT3);
+ }
+ 
++static void hub_post_resume(struct work_struct *ws)
++{
++	struct usb_hub *hub = container_of(ws, struct usb_hub, post_resume_work.work);
++
++	usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
++	hub_put(hub);
++}
++
+ enum hub_quiescing_type {
+ 	HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
+ };
+@@ -1385,6 +1411,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
+ 
+ 	/* Stop hub_wq and related activity */
+ 	del_timer_sync(&hub->irq_urb_retry);
++	flush_delayed_work(&hub->post_resume_work);
+ 	usb_kill_urb(hub->urb);
+ 	if (hub->has_indicators)
+ 		cancel_delayed_work_sync(&hub->leds);
+@@ -1943,6 +1970,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	hub->hdev = hdev;
+ 	INIT_DELAYED_WORK(&hub->leds, led_work);
+ 	INIT_DELAYED_WORK(&hub->init_work, NULL);
++	INIT_DELAYED_WORK(&hub->post_resume_work, hub_post_resume);
+ 	INIT_WORK(&hub->events, hub_event);
+ 	INIT_LIST_HEAD(&hub->onboard_devs);
+ 	spin_lock_init(&hub->irq_urb_lock);
+@@ -5721,6 +5749,7 @@ static void port_event(struct usb_hub *hub, int port1)
+ 	struct usb_device *hdev = hub->hdev;
+ 	u16 portstatus, portchange;
+ 	int i = 0;
++	int err;
+ 
+ 	connect_change = test_bit(port1, hub->change_bits);
+ 	clear_bit(port1, hub->event_bits);
+@@ -5817,8 +5846,11 @@ static void port_event(struct usb_hub *hub, int port1)
+ 		} else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
+ 				|| udev->state == USB_STATE_NOTATTACHED) {
+ 			dev_dbg(&port_dev->dev, "do warm reset, port only\n");
+-			if (hub_port_reset(hub, port1, NULL,
+-					HUB_BH_RESET_TIME, true) < 0)
++			err = hub_port_reset(hub, port1, NULL,
++					     HUB_BH_RESET_TIME, true);
++			if (!udev && err == -ENOTCONN)
++				connect_change = 0;
++			else if (err < 0)
+ 				hub_port_disable(hub, port1, 1);
+ 		} else {
+ 			dev_dbg(&port_dev->dev, "do warm reset, full device\n");
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index e6ae73f8a95dc8..9ebc5ef54a325d 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -70,6 +70,7 @@ struct usb_hub {
+ 	u8			indicator[USB_MAXCHILDREN];
+ 	struct delayed_work	leds;
+ 	struct delayed_work	init_work;
++	struct delayed_work	post_resume_work;
+ 	struct work_struct      events;
+ 	spinlock_t		irq_urb_lock;
+ 	struct timer_list	irq_urb_retry;
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index d3d0d75ab1f594..834fc02610a2dc 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -5352,20 +5352,34 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
+ 	if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
+ 		/* ULPI interface */
+ 		gpwrdn |= GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY;
+-	}
+-	dwc2_writel(hsotg, gpwrdn, GPWRDN);
+-	udelay(10);
++		dwc2_writel(hsotg, gpwrdn, GPWRDN);
++		udelay(10);
+ 
+-	/* Suspend the Phy Clock */
+-	pcgcctl = dwc2_readl(hsotg, PCGCTL);
+-	pcgcctl |= PCGCTL_STOPPCLK;
+-	dwc2_writel(hsotg, pcgcctl, PCGCTL);
+-	udelay(10);
++		/* Suspend the Phy Clock */
++		pcgcctl = dwc2_readl(hsotg, PCGCTL);
++		pcgcctl |= PCGCTL_STOPPCLK;
++		dwc2_writel(hsotg, pcgcctl, PCGCTL);
++		udelay(10);
+ 
+-	gpwrdn = dwc2_readl(hsotg, GPWRDN);
+-	gpwrdn |= GPWRDN_PMUACTV;
+-	dwc2_writel(hsotg, gpwrdn, GPWRDN);
+-	udelay(10);
++		gpwrdn = dwc2_readl(hsotg, GPWRDN);
++		gpwrdn |= GPWRDN_PMUACTV;
++		dwc2_writel(hsotg, gpwrdn, GPWRDN);
++		udelay(10);
++	} else {
++		/* UTMI+ Interface */
++		dwc2_writel(hsotg, gpwrdn, GPWRDN);
++		udelay(10);
++
++		gpwrdn = dwc2_readl(hsotg, GPWRDN);
++		gpwrdn |= GPWRDN_PMUACTV;
++		dwc2_writel(hsotg, gpwrdn, GPWRDN);
++		udelay(10);
++
++		pcgcctl = dwc2_readl(hsotg, PCGCTL);
++		pcgcctl |= PCGCTL_STOPPCLK;
++		dwc2_writel(hsotg, pcgcctl, PCGCTL);
++		udelay(10);
++	}
+ 
+ 	/* Set flag to indicate that we are in hibernation */
+ 	hsotg->hibernated = 1;
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index c1d4b52f25b063..6c79303891d118 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -763,13 +763,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 	ret = reset_control_deassert(qcom->resets);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret);
+-		goto reset_assert;
++		return ret;
+ 	}
+ 
+ 	ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np));
+ 	if (ret) {
+ 		dev_err_probe(dev, ret, "failed to get clocks\n");
+-		goto reset_assert;
++		return ret;
+ 	}
+ 
+ 	qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0);
+@@ -835,8 +835,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 		clk_disable_unprepare(qcom->clks[i]);
+ 		clk_put(qcom->clks[i]);
+ 	}
+-reset_assert:
+-	reset_control_assert(qcom->resets);
+ 
+ 	return ret;
+ }
+@@ -857,8 +855,6 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
+ 	qcom->num_clocks = 0;
+ 
+ 	dwc3_qcom_interconnect_exit(qcom);
+-	reset_control_assert(qcom->resets);
+-
+ 	pm_runtime_allow(dev);
+ 	pm_runtime_disable(dev);
+ }
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 29390d573e2346..1b4d0056f1d082 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -1065,6 +1065,8 @@ static ssize_t webusb_landingPage_store(struct config_item *item, const char *pa
+ 	unsigned int bytes_to_strip = 0;
+ 	int l = len;
+ 
++	if (!len)
++		return len;
+ 	if (page[l - 1] == '\n') {
+ 		--l;
+ 		++bytes_to_strip;
+@@ -1188,6 +1190,8 @@ static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page,
+ 	struct gadget_info *gi = os_desc_item_to_gadget_info(item);
+ 	int res, l;
+ 
++	if (!len)
++		return len;
+ 	l = min((int)len, OS_STRING_QW_SIGN_LEN >> 1);
+ 	if (page[l - 1] == '\n')
+ 		--l;
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index c6076df0d50cc7..da2b864fdadfc8 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1912,6 +1912,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
+ 	 * gadget driver here and have everything work;
+ 	 * that currently misbehaves.
+ 	 */
++	usb_gadget_set_state(g, USB_STATE_NOTATTACHED);
+ 
+ 	/* Force check of devctl register for PM runtime */
+ 	pm_runtime_mark_last_busy(musb->controller);
+@@ -2018,6 +2019,7 @@ void musb_g_disconnect(struct musb *musb)
+ 	case OTG_STATE_B_PERIPHERAL:
+ 	case OTG_STATE_B_IDLE:
+ 		musb_set_state(musb, OTG_STATE_B_IDLE);
++		usb_gadget_set_state(&musb->g, USB_STATE_NOTATTACHED);
+ 		break;
+ 	case OTG_STATE_B_SRP_INIT:
+ 		break;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index eef614be7db579..4f21d75f587770 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -803,6 +803,8 @@ static const struct usb_device_id id_table_combined[] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
++	{ USB_DEVICE(FTDI_NDI_VID, FTDI_NDI_EMGUIDE_GEMINI_PID),
++		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ 	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+ 	{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
+ 	{ USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 9acb6f83732763..4cc1fae8acb970 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -204,6 +204,9 @@
+ #define FTDI_NDI_FUTURE_3_PID		0xDA73	/* NDI future device #3 */
+ #define FTDI_NDI_AURORA_SCU_PID		0xDA74	/* NDI Aurora SCU */
+ 
++#define FTDI_NDI_VID			0x23F2
++#define FTDI_NDI_EMGUIDE_GEMINI_PID	0x0003	/* NDI Emguide Gemini */
++
+ /*
+  * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
+  */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 27879cc575365c..147ca50c94beec 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1415,6 +1415,9 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x30),	/* Telit FE910C04 (ECM) */
++	  .driver_info = NCTRL(4) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30),	/* Telit FN990B (MBIM) */
+ 	  .driver_info = NCTRL(6) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
+@@ -2343,6 +2346,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff),			/* Foxconn T99W651 RNDIS */
+ 	  .driver_info = RSVD(5) | RSVD(6) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe167, 0xff),                     /* Foxconn T99W640 MBIM */
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 (IOT version) */
+ 	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x1782, 0x4d10) },						/* Fibocom L610 (AT mode) */
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index aa8656c8b7e7e7..dd35e29d80824c 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2780,8 +2780,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
+ 		/* Already aborted the transaction if it failed. */
+ next:
+ 		btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
++
++		spin_lock(&fs_info->unused_bgs_lock);
+ 		list_del_init(&block_group->bg_list);
+ 		clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
++		spin_unlock(&fs_info->unused_bgs_lock);
+ 
+ 		/*
+ 		 * If the block group is still unused, add it to the list of
+diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
+index 6a821a959b59e6..6c378b230de202 100644
+--- a/fs/cachefiles/io.c
++++ b/fs/cachefiles/io.c
+@@ -346,8 +346,6 @@ int __cachefiles_write(struct cachefiles_object *object,
+ 	default:
+ 		ki->was_async = false;
+ 		cachefiles_write_complete(&ki->iocb, ret);
+-		if (ret > 0)
+-			ret = 0;
+ 		break;
+ 	}
+ 
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index fe3de9ad57bf6d..00e1f2471b9e23 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -83,10 +83,8 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
+ 
+ 	trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
+ 	ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
+-	if (!ret) {
+-		ret = len;
++	if (ret > 0)
+ 		kiocb->ki_pos += ret;
+-	}
+ 
+ out:
+ 	fput(file);
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index beba15673be8d3..11ebddc57bc73a 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -354,10 +354,16 @@ static int efivarfs_reconfigure(struct fs_context *fc)
+ 	return 0;
+ }
+ 
++static void efivarfs_free(struct fs_context *fc)
++{
++	kfree(fc->s_fs_info);
++}
++
+ static const struct fs_context_operations efivarfs_context_ops = {
+ 	.get_tree	= efivarfs_get_tree,
+ 	.parse_param	= efivarfs_parse_param,
+ 	.reconfigure	= efivarfs_reconfigure,
++	.free		= efivarfs_free,
+ };
+ 
+ static int efivarfs_init_fs_context(struct fs_context *fc)
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index d5da9817df9b36..33e6a620c103e0 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -1440,9 +1440,16 @@ static int isofs_read_inode(struct inode *inode, int relocated)
+ 		inode->i_op = &page_symlink_inode_operations;
+ 		inode_nohighmem(inode);
+ 		inode->i_data.a_ops = &isofs_symlink_aops;
+-	} else
++	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
++		   S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ 		/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
+ 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
++	} else {
++		printk(KERN_DEBUG "ISOFS: Invalid file type 0%04o for inode %lu.\n",
++			inode->i_mode, inode->i_ino);
++		ret = -EIO;
++		goto fail;
++	}
+ 
+ 	ret = 0;
+ out:
+diff --git a/fs/namespace.c b/fs/namespace.c
+index b5c5cf01d0c40a..bb1560b0d25c33 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2263,6 +2263,11 @@ struct vfsmount *clone_private_mount(const struct path *path)
+ 	if (!check_mnt(old_mnt))
+ 		goto invalid;
+ 
++	if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN)) {
++		up_read(&namespace_sem);
++		return ERR_PTR(-EPERM);
++	}
++
+ 	if (has_locked_children(old_mnt, path->dentry))
+ 		goto invalid;
+ 
+diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
+index d5dbef7f5c95bb..0539c2a328c731 100644
+--- a/fs/notify/dnotify/dnotify.c
++++ b/fs/notify/dnotify/dnotify.c
+@@ -309,6 +309,10 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
+ 		goto out_err;
+ 	}
+ 
++	error = file_f_owner_allocate(filp);
++	if (error)
++		goto out_err;
++
+ 	/* new fsnotify mark, we expect most fcntl calls to add a new mark */
+ 	new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
+ 	if (!new_dn_mark) {
+@@ -316,10 +320,6 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
+ 		goto out_err;
+ 	}
+ 
+-	error = file_f_owner_allocate(filp);
+-	if (error)
+-		goto out_err;
+-
+ 	/* set up the new_fsn_mark and new_dn_mark */
+ 	new_fsn_mark = &new_dn_mark->fsn_mark;
+ 	fsnotify_init_mark(new_fsn_mark, dnotify_group);
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 0f6fec042f6a03..166dc8fd06c027 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -3076,7 +3076,8 @@ void cifs_oplock_break(struct work_struct *work)
+ 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ 						  oplock_break);
+ 	struct inode *inode = d_inode(cfile->dentry);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct super_block *sb = inode->i_sb;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ 	struct cifsInodeInfo *cinode = CIFS_I(inode);
+ 	struct cifs_tcon *tcon;
+ 	struct TCP_Server_Info *server;
+@@ -3086,6 +3087,12 @@ void cifs_oplock_break(struct work_struct *work)
+ 	__u64 persistent_fid, volatile_fid;
+ 	__u16 net_fid;
+ 
++	/*
++	 * Hold a reference to the superblock to prevent it and its inodes from
++	 * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
++	 * may release the last reference to the sb and trigger inode eviction.
++	 */
++	cifs_sb_active(sb);
+ 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ 			TASK_UNINTERRUPTIBLE);
+ 
+@@ -3158,6 +3165,7 @@ void cifs_oplock_break(struct work_struct *work)
+ 	cifs_put_tlink(tlink);
+ out:
+ 	cifs_done_oplock_break(cinode);
++	cifs_sb_deactive(sb);
+ }
+ 
+ static int cifs_swap_activate(struct swap_info_struct *sis,
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index e596bc4837b68f..78a546ef69e889 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4342,6 +4342,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	u8 key[SMB3_ENC_DEC_KEY_SIZE];
+ 	struct aead_request *req;
+ 	u8 *iv;
++	DECLARE_CRYPTO_WAIT(wait);
+ 	unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+ 	void *creq;
+ 	size_t sensitive_size;
+@@ -4392,7 +4393,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+ 	aead_request_set_ad(req, assoc_data_len);
+ 
+-	rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
++	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++				  crypto_req_done, &wait);
++
++	rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
++				: crypto_aead_decrypt(req), &wait);
+ 
+ 	if (!rc && enc)
+ 		memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index ac06f2617f3468..754e94a0e07f50 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -907,8 +907,10 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ 			.local_dma_lkey	= sc->ib.pd->local_dma_lkey,
+ 			.direction	= DMA_TO_DEVICE,
+ 		};
++		size_t payload_len = umin(*_remaining_data_length,
++					  sp->max_send_size - sizeof(*packet));
+ 
+-		rc = smb_extract_iter_to_rdma(iter, *_remaining_data_length,
++		rc = smb_extract_iter_to_rdma(iter, payload_len,
+ 					      &extract);
+ 		if (rc < 0)
+ 			goto err_dma;
+@@ -1013,6 +1015,27 @@ static int smbd_post_send_empty(struct smbd_connection *info)
+ 	return smbd_post_send_iter(info, NULL, &remaining_data_length);
+ }
+ 
++static int smbd_post_send_full_iter(struct smbd_connection *info,
++				    struct iov_iter *iter,
++				    int *_remaining_data_length)
++{
++	int rc = 0;
++
++	/*
++	 * smbd_post_send_iter() respects the
++	 * negotiated max_send_size, so we need to
++	 * loop until the full iter is posted
++	 */
++
++	while (iov_iter_count(iter) > 0) {
++		rc = smbd_post_send_iter(info, iter, _remaining_data_length);
++		if (rc < 0)
++			break;
++	}
++
++	return rc;
++}
++
+ /*
+  * Post a receive request to the transport
+  * The remote peer can only send data when a receive request is posted
+@@ -1962,14 +1985,14 @@ int smbd_send(struct TCP_Server_Info *server,
+ 			klen += rqst->rq_iov[i].iov_len;
+ 		iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen);
+ 
+-		rc = smbd_post_send_iter(info, &iter, &remaining_data_length);
++		rc = smbd_post_send_full_iter(info, &iter, &remaining_data_length);
+ 		if (rc < 0)
+ 			break;
+ 
+ 		if (iov_iter_count(&rqst->rq_iter) > 0) {
+ 			/* And then the data pages if there are any */
+-			rc = smbd_post_send_iter(info, &rqst->rq_iter,
+-						 &remaining_data_length);
++			rc = smbd_post_send_full_iter(info, &rqst->rq_iter,
++						      &remaining_data_length);
+ 			if (rc < 0)
+ 				break;
+ 		}
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 730aa0245aef93..3d1d7296aed911 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -817,20 +817,20 @@ extern struct mutex hci_cb_list_lock;
+ #define hci_dev_test_and_clear_flag(hdev, nr)  test_and_clear_bit((nr), (hdev)->dev_flags)
+ #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
+ 
+-#define hci_dev_clear_volatile_flags(hdev)			\
+-	do {							\
+-		hci_dev_clear_flag(hdev, HCI_LE_SCAN);		\
+-		hci_dev_clear_flag(hdev, HCI_LE_ADV);		\
+-		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
+-		hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);	\
+-		hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);	\
++#define hci_dev_clear_volatile_flags(hdev)				\
++	do {								\
++		hci_dev_clear_flag((hdev), HCI_LE_SCAN);		\
++		hci_dev_clear_flag((hdev), HCI_LE_ADV);			\
++		hci_dev_clear_flag((hdev), HCI_LL_RPA_RESOLUTION);	\
++		hci_dev_clear_flag((hdev), HCI_PERIODIC_INQ);		\
++		hci_dev_clear_flag((hdev), HCI_QUALITY_REPORT);		\
+ 	} while (0)
+ 
+ #define hci_dev_le_state_simultaneous(hdev) \
+-	(!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \
+-	 (hdev->le_states[4] & 0x08) &&	/* Central */ \
+-	 (hdev->le_states[4] & 0x40) &&	/* Peripheral */ \
+-	 (hdev->le_states[3] & 0x10))	/* Simultaneous */
++	(!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &(hdev)->quirks) && \
++	 ((hdev)->le_states[4] & 0x08) &&	/* Central */ \
++	 ((hdev)->le_states[4] & 0x40) &&	/* Peripheral */ \
++	 ((hdev)->le_states[3] & 0x10))		/* Simultaneous */
+ 
+ /* ----- HCI interface to upper protocols ----- */
+ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 8a712ca73f2b07..bb1862536f9ca5 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -2710,7 +2710,7 @@ struct cfg80211_scan_request {
+ 	s8 tsf_report_link_id;
+ 
+ 	/* keep last */
+-	struct ieee80211_channel *channels[] __counted_by(n_channels);
++	struct ieee80211_channel *channels[];
+ };
+ 
+ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
+index cba3ccf03fcc84..8cb70e7485e2f1 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -308,8 +308,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+ /* use after obtaining a reference count */
+ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+ {
+-	return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
+-	       !nf_ct_is_dying(ct);
++	if (!nf_ct_is_confirmed(ct))
++		return false;
++
++	/* load ct->timeout after is_confirmed() test.
++	 * Pairs with __nf_conntrack_confirm() which:
++	 * 1. Increases ct->timeout value
++	 * 2. Inserts ct into rcu hlist
++	 * 3. Sets the confirmed bit
++	 * 4. Unlocks the hlist lock
++	 */
++	smp_acquire__after_ctrl_dep();
++
++	return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
+ }
+ 
+ #define	NF_CT_DAY	(86400 * HZ)
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index e1a37e9c2d42d5..eea3769765ac0c 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -282,12 +282,15 @@
+ 	EM(rxrpc_call_put_userid,		"PUT user-id ") \
+ 	EM(rxrpc_call_see_accept,		"SEE accept  ") \
+ 	EM(rxrpc_call_see_activate_client,	"SEE act-clnt") \
++	EM(rxrpc_call_see_already_released,	"SEE alrdy-rl") \
+ 	EM(rxrpc_call_see_connect_failed,	"SEE con-fail") \
+ 	EM(rxrpc_call_see_connected,		"SEE connect ") \
+ 	EM(rxrpc_call_see_conn_abort,		"SEE conn-abt") \
++	EM(rxrpc_call_see_discard,		"SEE discard ") \
+ 	EM(rxrpc_call_see_disconnected,		"SEE disconn ") \
+ 	EM(rxrpc_call_see_distribute_error,	"SEE dist-err") \
+ 	EM(rxrpc_call_see_input,		"SEE input   ") \
++	EM(rxrpc_call_see_recvmsg,		"SEE recvmsg ") \
+ 	EM(rxrpc_call_see_release,		"SEE release ") \
+ 	EM(rxrpc_call_see_userid_exists,	"SEE u-exists") \
+ 	EM(rxrpc_call_see_waiting_call,		"SEE q-conn  ") \
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 0116cfaec84881..356f95c33aa281 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1735,9 +1735,11 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 	int ret;
+ 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ 
+-	if (unlikely(req->flags & REQ_F_FAIL)) {
+-		ret = -ECONNRESET;
+-		goto out;
++	if (connect->in_progress) {
++		struct poll_table_struct pt = { ._key = EPOLLERR };
++
++		if (vfs_poll(req->file, &pt) & EPOLLERR)
++			goto get_sock_err;
+ 	}
+ 
+ 	file_flags = force_nonblock ? O_NONBLOCK : 0;
+@@ -1762,8 +1764,10 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 		 * which means the previous result is good. For both of these,
+ 		 * grab the sock_error() and use that for the completion.
+ 		 */
+-		if (ret == -EBADFD || ret == -EISCONN)
++		if (ret == -EBADFD || ret == -EISCONN) {
++get_sock_err:
+ 			ret = sock_error(sock_from_file(req->file)->sk);
++		}
+ 	}
+ 	if (ret == -ERESTARTSYS)
+ 		ret = -EINTR;
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index b93e9ebdd87c8f..17dea8aa09c9b3 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -315,8 +315,6 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
+ 				return IOU_POLL_REISSUE;
+ 			}
+ 		}
+-		if (unlikely(req->cqe.res & EPOLLERR))
+-			req_set_fail(req);
+ 		if (req->apoll_events & EPOLLONESHOT)
+ 			return IOU_POLL_DONE;
+ 
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 9173d107758d45..6cf165c55bdacc 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -883,6 +883,13 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ 		if (fmt[i] == 'p') {
+ 			sizeof_cur_arg = sizeof(long);
+ 
++			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
++			    ispunct(fmt[i + 1])) {
++				if (tmp_buf)
++					cur_arg = raw_args[num_spec];
++				goto nocopy_fmt;
++			}
++
+ 			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
+ 			    fmt[i + 2] == 's') {
+ 				fmt_ptype = fmt[i + 1];
+@@ -890,11 +897,9 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ 				goto fmt_str;
+ 			}
+ 
+-			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
+-			    ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
++			if (fmt[i + 1] == 'K' ||
+ 			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
+ 			    fmt[i + 1] == 'S') {
+-				/* just kernel pointers */
+ 				if (tmp_buf)
+ 					cur_arg = raw_args[num_spec];
+ 				i++;
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index 01c02d116e8e16..c37888b7d25afd 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -66,15 +66,9 @@ static struct freezer *parent_freezer(struct freezer *freezer)
+ bool cgroup_freezing(struct task_struct *task)
+ {
+ 	bool ret;
+-	unsigned int state;
+ 
+ 	rcu_read_lock();
+-	/* Check if the cgroup is still FREEZING, but not FROZEN. The extra
+-	 * !FROZEN check is required, because the FREEZING bit is not cleared
+-	 * when the state FROZEN is reached.
+-	 */
+-	state = task_freezer(task)->state;
+-	ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
++	ret = task_freezer(task)->state & CGROUP_FREEZING;
+ 	rcu_read_unlock();
+ 
+ 	return ret;
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 8d530d0949ff69..6a96149aede9f5 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -201,18 +201,9 @@ static int __restore_freezer_state(struct task_struct *p, void *arg)
+ 
+ void __thaw_task(struct task_struct *p)
+ {
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&freezer_lock, flags);
+-	if (WARN_ON_ONCE(freezing(p)))
+-		goto unlock;
+-
+-	if (!frozen(p) || task_call_func(p, __restore_freezer_state, NULL))
+-		goto unlock;
+-
+-	wake_up_state(p, TASK_FROZEN);
+-unlock:
+-	spin_unlock_irqrestore(&freezer_lock, flags);
++	guard(spinlock_irqsave)(&freezer_lock);
++	if (frozen(p) && !task_call_func(p, __restore_freezer_state, NULL))
++		wake_up_state(p, TASK_FROZEN);
+ }
+ 
+ /**
+diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
+index c48900b856a2aa..52ca8e268cfc56 100644
+--- a/kernel/sched/loadavg.c
++++ b/kernel/sched/loadavg.c
+@@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
+ 	long nr_active, delta = 0;
+ 
+ 	nr_active = this_rq->nr_running - adjust;
+-	nr_active += (int)this_rq->nr_uninterruptible;
++	nr_active += (long)this_rq->nr_uninterruptible;
+ 
+ 	if (nr_active != this_rq->calc_load_active) {
+ 		delta = nr_active - this_rq->calc_load_active;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index e7f5ab21221c48..a441990fe808d8 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1156,7 +1156,7 @@ struct rq {
+ 	 * one CPU and if it got migrated afterwards it may decrease
+ 	 * it on another CPU. Always updated under the runqueue lock:
+ 	 */
+-	unsigned int		nr_uninterruptible;
++	unsigned long 		nr_uninterruptible;
+ 
+ 	struct task_struct __rcu	*curr;
+ 	struct sched_dl_entity	*dl_server;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 15fb255733fb63..dbea76058863b8 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2879,7 +2879,10 @@ __register_event(struct trace_event_call *call, struct module *mod)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	down_write(&trace_event_sem);
+ 	list_add(&call->list, &ftrace_events);
++	up_write(&trace_event_sem);
++
+ 	if (call->flags & TRACE_EVENT_FL_DYNAMIC)
+ 		atomic_set(&call->refcnt, 0);
+ 	else
+@@ -3471,6 +3474,8 @@ __trace_add_event_dirs(struct trace_array *tr)
+ 	struct trace_event_call *call;
+ 	int ret;
+ 
++	lockdep_assert_held(&trace_event_sem);
++
+ 	list_for_each_entry(call, &ftrace_events, list) {
+ 		ret = __trace_add_new_event(call, tr);
+ 		if (ret < 0)
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index a94790f5cda727..216247913980ed 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -665,8 +665,8 @@ __timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, u
+ 
+ 	entry = ring_buffer_event_data(event);
+ 
+-	memcpy(&entry->caller, fstack->calls, size);
+ 	entry->size = fstack->nr_entries;
++	memcpy(&entry->caller, fstack->calls, size);
+ 
+ 	if (!call_filter_check_discard(call, entry, buffer, event))
+ 		trace_buffer_unlock_commit_nostack(buffer, event);
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index ae20ad7f746162..055f5164bd96f7 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -657,7 +657,7 @@ static int parse_btf_arg(char *varname,
+ 		ret = query_btf_context(ctx);
+ 		if (ret < 0 || ctx->nr_params == 0) {
+ 			trace_probe_log_err(ctx->offset, NO_BTF_ENTRY);
+-			return PTR_ERR(params);
++			return -ENOENT;
+ 		}
+ 	}
+ 	params = ctx->params;
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 41be38264493df..49a6d49c23dc59 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -358,6 +358,35 @@ static int __vlan_device_event(struct net_device *dev, unsigned long event)
+ 	return err;
+ }
+ 
++static void vlan_vid0_add(struct net_device *dev)
++{
++	struct vlan_info *vlan_info;
++	int err;
++
++	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++		return;
++
++	pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name);
++
++	err = vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
++	if (err)
++		return;
++
++	vlan_info = rtnl_dereference(dev->vlan_info);
++	vlan_info->auto_vid0 = true;
++}
++
++static void vlan_vid0_del(struct net_device *dev)
++{
++	struct vlan_info *vlan_info = rtnl_dereference(dev->vlan_info);
++
++	if (!vlan_info || !vlan_info->auto_vid0)
++		return;
++
++	vlan_info->auto_vid0 = false;
++	vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
++}
++
+ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ 			     void *ptr)
+ {
+@@ -379,15 +408,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ 			return notifier_from_errno(err);
+ 	}
+ 
+-	if ((event == NETDEV_UP) &&
+-	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+-		pr_info("adding VLAN 0 to HW filter on device %s\n",
+-			dev->name);
+-		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
+-	}
+-	if (event == NETDEV_DOWN &&
+-	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+-		vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
++	if (event == NETDEV_UP)
++		vlan_vid0_add(dev);
++	else if (event == NETDEV_DOWN)
++		vlan_vid0_del(dev);
+ 
+ 	vlan_info = rtnl_dereference(dev->vlan_info);
+ 	if (!vlan_info)
+diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
+index 5eaf38875554b0..c7ffe591d59366 100644
+--- a/net/8021q/vlan.h
++++ b/net/8021q/vlan.h
+@@ -33,6 +33,7 @@ struct vlan_info {
+ 	struct vlan_group	grp;
+ 	struct list_head	vid_list;
+ 	unsigned int		nr_vids;
++	bool			auto_vid0;
+ 	struct rcu_head		rcu;
+ };
+ 
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index bc01135e43f3ea..bbd809414b2f2c 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -6789,8 +6789,8 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ 		return 0;
+ 	}
+ 
+-	/* No privacy so use a public address. */
+-	*own_addr_type = ADDR_LE_DEV_PUBLIC;
++	/* No privacy, use the current address */
++	hci_copy_identity_address(hdev, rand_addr, own_addr_type);
+ 
+ 	return 0;
+ }
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 0628fedc0e29b6..7dafc3e0a15aaf 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3485,12 +3485,28 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
+ 		/* Configure output options and let the other side know
+ 		 * which ones we don't like. */
+ 
+-		/* If MTU is not provided in configure request, use the most recently
+-		 * explicitly or implicitly accepted value for the other direction,
+-		 * or the default value.
++		/* If MTU is not provided in configure request, try adjusting it
++		 * to the current output MTU if it has been set
++		 *
++		 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
++		 *
++		 * Each configuration parameter value (if any is present) in an
++		 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
++		 * configuration parameter value that has been sent (or, in case
++		 * of default values, implied) in the corresponding
++		 * L2CAP_CONFIGURATION_REQ packet.
+ 		 */
+-		if (mtu == 0)
+-			mtu = chan->imtu ? chan->imtu : L2CAP_DEFAULT_MTU;
++		if (!mtu) {
++			/* Only adjust for ERTM channels as for older modes the
++			 * remote stack may not be able to detect that the
++			 * adjustment causing it to silently drop packets.
++			 */
++			if (chan->mode == L2CAP_MODE_ERTM &&
++			    chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
++				mtu = chan->omtu;
++			else
++				mtu = L2CAP_DEFAULT_MTU;
++		}
+ 
+ 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ 			result = L2CAP_CONF_UNACCEPT;
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index acd11b268b98ad..615c18e290ab92 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1690,6 +1690,9 @@ static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
+ {
+ 	struct sock *sk = chan->data;
+ 
++	if (!sk)
++		return;
++
+ 	if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) {
+ 		sk->sk_state = BT_CONNECTED;
+ 		chan->state = BT_CONNECTED;
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 8b9724fd752a1a..a31971fe2fd7e8 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1379,7 +1379,7 @@ static void smp_timeout(struct work_struct *work)
+ 
+ 	bt_dev_dbg(conn->hcon->hdev, "conn %p", conn);
+ 
+-	hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM);
++	hci_disconnect(conn->hcon, HCI_ERROR_AUTH_FAILURE);
+ }
+ 
+ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
+@@ -2977,8 +2977,25 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
+ 	if (code > SMP_CMD_MAX)
+ 		goto drop;
+ 
+-	if (smp && !test_and_clear_bit(code, &smp->allow_cmd))
++	if (smp && !test_and_clear_bit(code, &smp->allow_cmd)) {
++		/* If there is a context and the command is not allowed consider
++		 * it a failure so the session is cleanup properly.
++		 */
++		switch (code) {
++		case SMP_CMD_IDENT_INFO:
++		case SMP_CMD_IDENT_ADDR_INFO:
++		case SMP_CMD_SIGN_INFO:
++			/* 3.6.1. Key distribution and generation
++			 *
++			 * A device may reject a distributed key by sending the
++			 * Pairing Failed command with the reason set to
++			 * "Key Rejected".
++			 */
++			smp_failure(conn, SMP_KEY_REJECTED);
++			break;
++		}
+ 		goto drop;
++	}
+ 
+ 	/* If we don't have a context the only allowed commands are
+ 	 * pairing request and security request.
+diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
+index 87a59ec2c9f02b..c5da53dfab04f2 100644
+--- a/net/bluetooth/smp.h
++++ b/net/bluetooth/smp.h
+@@ -138,6 +138,7 @@ struct smp_cmd_keypress_notify {
+ #define SMP_NUMERIC_COMP_FAILED		0x0c
+ #define SMP_BREDR_PAIRING_IN_PROGRESS	0x0d
+ #define SMP_CROSS_TRANSP_NOT_ALLOWED	0x0e
++#define SMP_KEY_REJECTED		0x0f
+ 
+ #define SMP_MIN_ENC_KEY_SIZE		7
+ #define SMP_MAX_ENC_KEY_SIZE		16
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index 7b41ee8740cbba..f10bd6a233dcf9 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -17,6 +17,9 @@ static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
+ 	if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
+ 		return false;
+ 
++	if (br_multicast_igmp_type(skb))
++		return false;
++
+ 	return (p->flags & BR_TX_FWD_OFFLOAD) &&
+ 	       (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
+ }
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index e04ebe651c3347..3a9c5c14c310ed 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -355,6 +355,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ 		flush |= skb->ip_summed != p->ip_summed;
+ 		flush |= skb->csum_level != p->csum_level;
+ 		flush |= NAPI_GRO_CB(p)->count >= 64;
++		skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
+ 
+ 		if (flush || skb_gro_receive_list(p, skb))
+ 			mss = 1;
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 845730184c5d31..5de47dd5e9093d 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -604,6 +604,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+ 					NAPI_GRO_CB(skb)->flush = 1;
+ 					return NULL;
+ 				}
++				skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
+ 				ret = skb_gro_receive_list(p, skb);
+ 			} else {
+ 				skb_gro_postpull_rcsum(skb, uh,
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index b7b62e5a562e5d..9949554e3211b5 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -804,8 +804,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
+ 		} else {
+ 			im->mca_crcount = idev->mc_qrv;
+ 		}
+-		in6_dev_put(pmc->idev);
+ 		ip6_mc_clear_src(pmc);
++		in6_dev_put(pmc->idev);
+ 		kfree_rcu(pmc, rcu);
+ 	}
+ }
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index 7c05ac846646f3..eccfa4203e96b4 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -129,13 +129,13 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ 			     struct dst_entry *cache_dst)
+ {
+ 	struct ipv6_rpl_sr_hdr *isrh, *csrh;
+-	const struct ipv6hdr *oldhdr;
++	struct ipv6hdr oldhdr;
+ 	struct ipv6hdr *hdr;
+ 	unsigned char *buf;
+ 	size_t hdrlen;
+ 	int err;
+ 
+-	oldhdr = ipv6_hdr(skb);
++	memcpy(&oldhdr, ipv6_hdr(skb), sizeof(oldhdr));
+ 
+ 	buf = kcalloc(struct_size(srh, segments.addr, srh->segments_left), 2, GFP_ATOMIC);
+ 	if (!buf)
+@@ -147,7 +147,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ 	memcpy(isrh, srh, sizeof(*isrh));
+ 	memcpy(isrh->rpl_segaddr, &srh->rpl_segaddr[1],
+ 	       (srh->segments_left - 1) * 16);
+-	isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr->daddr;
++	isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr.daddr;
+ 
+ 	ipv6_rpl_srh_compress(csrh, isrh, &srh->rpl_segaddr[0],
+ 			      isrh->segments_left - 1);
+@@ -169,7 +169,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ 	skb_mac_header_rebuild(skb);
+ 
+ 	hdr = ipv6_hdr(skb);
+-	memmove(hdr, oldhdr, sizeof(*hdr));
++	memmove(hdr, &oldhdr, sizeof(*hdr));
+ 	isrh = (void *)hdr + sizeof(*hdr);
+ 	memcpy(isrh, csrh, hdrlen);
+ 
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 23949ae2a3a8db..a97505b786712e 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -979,8 +979,9 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ 		if (subflow->mp_join)
+ 			goto reset;
+ 		subflow->mp_capable = 0;
++		if (!mptcp_try_fallback(ssk))
++			goto reset;
+ 		pr_fallback(msk);
+-		mptcp_do_fallback(ssk);
+ 		return false;
+ 	}
+ 
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 620264c75dc2e3..2c8815daf5b04a 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -303,8 +303,14 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
+ 
+ 	pr_debug("fail_seq=%llu\n", fail_seq);
+ 
+-	if (!READ_ONCE(msk->allow_infinite_fallback))
++	/* After accepting the fail, we can't create any other subflows */
++	spin_lock_bh(&msk->fallback_lock);
++	if (!msk->allow_infinite_fallback) {
++		spin_unlock_bh(&msk->fallback_lock);
+ 		return;
++	}
++	msk->allow_subflows = false;
++	spin_unlock_bh(&msk->fallback_lock);
+ 
+ 	if (!subflow->fail_tout) {
+ 		pr_debug("send MP_FAIL response and infinite map\n");
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 42b239d9b2b3cf..d865d08a0c5eda 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -623,10 +623,9 @@ static bool mptcp_check_data_fin(struct sock *sk)
+ 
+ static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
+ {
+-	if (READ_ONCE(msk->allow_infinite_fallback)) {
++	if (mptcp_try_fallback(ssk)) {
+ 		MPTCP_INC_STATS(sock_net(ssk),
+ 				MPTCP_MIB_DSSCORRUPTIONFALLBACK);
+-		mptcp_do_fallback(ssk);
+ 	} else {
+ 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
+ 		mptcp_subflow_reset(ssk);
+@@ -878,7 +877,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+ static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
+-	WRITE_ONCE(msk->allow_infinite_fallback, false);
++	msk->allow_infinite_fallback = false;
+ 	mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
+ }
+ 
+@@ -889,6 +888,14 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ 	if (sk->sk_state != TCP_ESTABLISHED)
+ 		return false;
+ 
++	spin_lock_bh(&msk->fallback_lock);
++	if (!msk->allow_subflows) {
++		spin_unlock_bh(&msk->fallback_lock);
++		return false;
++	}
++	mptcp_subflow_joined(msk, ssk);
++	spin_unlock_bh(&msk->fallback_lock);
++
+ 	/* attach to msk socket only after we are sure we will deal with it
+ 	 * at close time
+ 	 */
+@@ -897,7 +904,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ 
+ 	mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
+ 	mptcp_sockopt_sync_locked(msk, ssk);
+-	mptcp_subflow_joined(msk, ssk);
+ 	mptcp_stop_tout_timer(sk);
+ 	__mptcp_propagate_sndbuf(sk, ssk);
+ 	return true;
+@@ -1236,10 +1242,14 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
+ 	mpext->infinite_map = 1;
+ 	mpext->data_len = 0;
+ 
++	if (!mptcp_try_fallback(ssk)) {
++		mptcp_subflow_reset(ssk);
++		return;
++	}
++
+ 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
+ 	mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
+ 	pr_fallback(msk);
+-	mptcp_do_fallback(ssk);
+ }
+ 
+ #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
+@@ -2643,9 +2653,9 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
+ 
+ static void __mptcp_retrans(struct sock *sk)
+ {
++	struct mptcp_sendmsg_info info = { .data_lock_held = true, };
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	struct mptcp_subflow_context *subflow;
+-	struct mptcp_sendmsg_info info = {};
+ 	struct mptcp_data_frag *dfrag;
+ 	struct sock *ssk;
+ 	int ret, err;
+@@ -2690,6 +2700,18 @@ static void __mptcp_retrans(struct sock *sk)
+ 			info.sent = 0;
+ 			info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len :
+ 								    dfrag->already_sent;
++
++			/*
++			 * make the whole retrans decision, xmit, disallow
++			 * fallback atomic
++			 */
++			spin_lock_bh(&msk->fallback_lock);
++			if (__mptcp_check_fallback(msk)) {
++				spin_unlock_bh(&msk->fallback_lock);
++				release_sock(ssk);
++				return;
++			}
++
+ 			while (info.sent < info.limit) {
+ 				ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+ 				if (ret <= 0)
+@@ -2703,8 +2725,9 @@ static void __mptcp_retrans(struct sock *sk)
+ 				len = max(copied, len);
+ 				tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
+ 					 info.size_goal);
+-				WRITE_ONCE(msk->allow_infinite_fallback, false);
++				msk->allow_infinite_fallback = false;
+ 			}
++			spin_unlock_bh(&msk->fallback_lock);
+ 
+ 			release_sock(ssk);
+ 		}
+@@ -2833,7 +2856,8 @@ static void __mptcp_init_sock(struct sock *sk)
+ 	WRITE_ONCE(msk->first, NULL);
+ 	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
+ 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
+-	WRITE_ONCE(msk->allow_infinite_fallback, true);
++	msk->allow_infinite_fallback = true;
++	msk->allow_subflows = true;
+ 	msk->recovery = false;
+ 	msk->subflow_id = 1;
+ 	msk->last_data_sent = tcp_jiffies32;
+@@ -2841,6 +2865,7 @@ static void __mptcp_init_sock(struct sock *sk)
+ 	msk->last_ack_recv = tcp_jiffies32;
+ 
+ 	mptcp_pm_data_init(msk);
++	spin_lock_init(&msk->fallback_lock);
+ 
+ 	/* re-use the csk retrans timer for MPTCP-level retrans */
+ 	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
+@@ -3224,7 +3249,16 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ 	 * subflow
+ 	 */
+ 	mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
++
++	/* The first subflow is already in TCP_CLOSE status, the following
++	 * can't overlap with a fallback anymore
++	 */
++	spin_lock_bh(&msk->fallback_lock);
++	msk->allow_subflows = true;
++	msk->allow_infinite_fallback = true;
+ 	WRITE_ONCE(msk->flags, 0);
++	spin_unlock_bh(&msk->fallback_lock);
++
+ 	msk->cb_flags = 0;
+ 	msk->recovery = false;
+ 	WRITE_ONCE(msk->can_ack, false);
+@@ -3637,7 +3671,13 @@ bool mptcp_finish_join(struct sock *ssk)
+ 
+ 	/* active subflow, already present inside the conn_list */
+ 	if (!list_empty(&subflow->node)) {
++		spin_lock_bh(&msk->fallback_lock);
++		if (!msk->allow_subflows) {
++			spin_unlock_bh(&msk->fallback_lock);
++			return false;
++		}
+ 		mptcp_subflow_joined(msk, ssk);
++		spin_unlock_bh(&msk->fallback_lock);
+ 		mptcp_propagate_sndbuf(parent, ssk);
+ 		return true;
+ 	}
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 7e2f70f22b05b6..6f191b12597883 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -342,10 +342,16 @@ struct mptcp_sock {
+ 		u64	rtt_us; /* last maximum rtt of subflows */
+ 	} rcvq_space;
+ 	u8		scaling_ratio;
++	bool		allow_subflows;
+ 
+ 	u32		subflow_id;
+ 	u32		setsockopt_seq;
+ 	char		ca_name[TCP_CA_NAME_MAX];
++
++	spinlock_t	fallback_lock;	/* protects fallback,
++					 * allow_infinite_fallback and
++					 * allow_join
++					 */
+ };
+ 
+ #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
+@@ -1188,15 +1194,22 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
+ 	return __mptcp_check_fallback(msk);
+ }
+ 
+-static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
++static inline bool __mptcp_try_fallback(struct mptcp_sock *msk)
+ {
+ 	if (__mptcp_check_fallback(msk)) {
+ 		pr_debug("TCP fallback already done (msk=%p)\n", msk);
+-		return;
++		return true;
+ 	}
+-	if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback)))
+-		return;
++	spin_lock_bh(&msk->fallback_lock);
++	if (!msk->allow_infinite_fallback) {
++		spin_unlock_bh(&msk->fallback_lock);
++		return false;
++	}
++
++	msk->allow_subflows = false;
+ 	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
++	spin_unlock_bh(&msk->fallback_lock);
++	return true;
+ }
+ 
+ static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk)
+@@ -1208,14 +1221,15 @@ static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk)
+ 			TCPF_SYN_RECV | TCPF_LISTEN));
+ }
+ 
+-static inline void mptcp_do_fallback(struct sock *ssk)
++static inline bool mptcp_try_fallback(struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	struct sock *sk = subflow->conn;
+ 	struct mptcp_sock *msk;
+ 
+ 	msk = mptcp_sk(sk);
+-	__mptcp_do_fallback(msk);
++	if (!__mptcp_try_fallback(msk))
++		return false;
+ 	if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) {
+ 		gfp_t saved_allocation = ssk->sk_allocation;
+ 
+@@ -1227,6 +1241,7 @@ static inline void mptcp_do_fallback(struct sock *ssk)
+ 		tcp_shutdown(ssk, SEND_SHUTDOWN);
+ 		ssk->sk_allocation = saved_allocation;
+ 	}
++	return true;
+ }
+ 
+ #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+@@ -1236,7 +1251,7 @@ static inline void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
+ {
+ 	pr_fallback(msk);
+ 	subflow->request_mptcp = 0;
+-	__mptcp_do_fallback(msk);
++	WARN_ON_ONCE(!__mptcp_try_fallback(msk));
+ }
+ 
+ static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 4c2aa45c466d93..0253a863a621c8 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -543,9 +543,11 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 	mptcp_get_options(skb, &mp_opt);
+ 	if (subflow->request_mptcp) {
+ 		if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
++			if (!mptcp_try_fallback(sk))
++				goto do_reset;
++
+ 			MPTCP_INC_STATS(sock_net(sk),
+ 					MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
+-			mptcp_do_fallback(sk);
+ 			pr_fallback(msk);
+ 			goto fallback;
+ 		}
+@@ -1288,20 +1290,29 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
+ 		mptcp_schedule_work(sk);
+ }
+ 
+-static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
++static bool mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	unsigned long fail_tout;
+ 
++	/* we are really failing, prevent any later subflow join */
++	spin_lock_bh(&msk->fallback_lock);
++	if (!msk->allow_infinite_fallback) {
++		spin_unlock_bh(&msk->fallback_lock);
++		return false;
++	}
++	msk->allow_subflows = false;
++	spin_unlock_bh(&msk->fallback_lock);
++
+ 	/* graceful failure can happen only on the MPC subflow */
+ 	if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
+-		return;
++		return false;
+ 
+ 	/* since the close timeout take precedence on the fail one,
+ 	 * no need to start the latter when the first is already set
+ 	 */
+ 	if (sock_flag((struct sock *)msk, SOCK_DEAD))
+-		return;
++		return true;
+ 
+ 	/* we don't need extreme accuracy here, use a zero fail_tout as special
+ 	 * value meaning no fail timeout at all;
+@@ -1313,6 +1324,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+ 	tcp_send_ack(ssk);
+ 
+ 	mptcp_reset_tout_timer(msk, subflow->fail_tout);
++	return true;
+ }
+ 
+ static bool subflow_check_data_avail(struct sock *ssk)
+@@ -1373,17 +1385,16 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 		    (subflow->mp_join || subflow->valid_csum_seen)) {
+ 			subflow->send_mp_fail = 1;
+ 
+-			if (!READ_ONCE(msk->allow_infinite_fallback)) {
++			if (!mptcp_subflow_fail(msk, ssk)) {
+ 				subflow->reset_transient = 0;
+ 				subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
+ 				goto reset;
+ 			}
+-			mptcp_subflow_fail(msk, ssk);
+ 			WRITE_ONCE(subflow->data_avail, true);
+ 			return true;
+ 		}
+ 
+-		if (!READ_ONCE(msk->allow_infinite_fallback)) {
++		if (!mptcp_try_fallback(ssk)) {
+ 			/* fatal protocol error, close the socket.
+ 			 * subflow_error_report() will introduce the appropriate barriers
+ 			 */
+@@ -1399,8 +1410,6 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 			WRITE_ONCE(subflow->data_avail, false);
+ 			return false;
+ 		}
+-
+-		mptcp_do_fallback(ssk);
+ 	}
+ 
+ 	skb = skb_peek(&ssk->sk_receive_queue);
+@@ -1665,7 +1674,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local,
+ 	/* discard the subflow socket */
+ 	mptcp_sock_graft(ssk, sk->sk_socket);
+ 	iput(SOCK_INODE(sf));
+-	WRITE_ONCE(msk->allow_infinite_fallback, false);
+ 	mptcp_stop_tout_timer(sk);
+ 	return 0;
+ 
+@@ -1845,7 +1853,7 @@ static void subflow_state_change(struct sock *sk)
+ 
+ 	msk = mptcp_sk(parent);
+ 	if (subflow_simultaneous_connect(sk)) {
+-		mptcp_do_fallback(sk);
++		WARN_ON_ONCE(!mptcp_try_fallback(sk));
+ 		pr_fallback(msk);
+ 		subflow->conn_finished = 1;
+ 		mptcp_propagate_state(parent, sk, subflow, NULL);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 456446d7af200e..f5bde4f13958e1 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1121,6 +1121,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
+ 
+ 	hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+ 				 &nf_conntrack_hash[repl_idx]);
++	/* confirmed bit must be set after hlist add, not before:
++	 * loser_ct can still be visible to other cpu due to
++	 * SLAB_TYPESAFE_BY_RCU.
++	 */
++	smp_mb__before_atomic();
++	set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);
+ 
+ 	NF_CT_STAT_INC(net, clash_resolve);
+ 	return NF_ACCEPT;
+@@ -1257,8 +1263,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
+ 	 * user context, else we insert an already 'dead' hash, blocking
+ 	 * further use of that particular connection -JM.
+ 	 */
+-	ct->status |= IPS_CONFIRMED;
+-
+ 	if (unlikely(nf_ct_is_dying(ct))) {
+ 		NF_CT_STAT_INC(net, insert_failed);
+ 		goto dying;
+@@ -1290,7 +1294,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
+ 		}
+ 	}
+ 
+-	/* Timer relative to confirmation time, not original
++	/* Timeout is relative to confirmation time, not original
+ 	   setting time, otherwise we'd get timer wrap in
+ 	   weird delay cases. */
+ 	ct->timeout += nfct_time_stamp;
+@@ -1298,11 +1302,21 @@ __nf_conntrack_confirm(struct sk_buff *skb)
+ 	__nf_conntrack_insert_prepare(ct);
+ 
+ 	/* Since the lookup is lockless, hash insertion must be done after
+-	 * starting the timer and setting the CONFIRMED bit. The RCU barriers
+-	 * guarantee that no other CPU can find the conntrack before the above
+-	 * stores are visible.
++	 * setting ct->timeout. The RCU barriers guarantee that no other CPU
++	 * can find the conntrack before the above stores are visible.
+ 	 */
+ 	__nf_conntrack_hash_insert(ct, hash, reply_hash);
++
++	/* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
++	 * skip entries that lack this bit.  This happens when a CPU is looking
++	 * at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
++	 * or when another CPU encounters this entry right after the insertion
++	 * but before the set-confirm-bit below.  This bit must not be set until
++	 * after __nf_conntrack_hash_insert().
++	 */
++	smp_mb__before_atomic();
++	set_bit(IPS_CONFIRMED_BIT, &ct->status);
++
+ 	nf_conntrack_double_unlock(hash, reply_hash);
+ 	local_bh_enable();
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index f3cecb3e4bcb18..19c4c1f27e586c 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2784,7 +2784,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ 	int len_sum = 0;
+ 	int status = TP_STATUS_AVAILABLE;
+ 	int hlen, tlen, copylen = 0;
+-	long timeo = 0;
++	long timeo;
+ 
+ 	mutex_lock(&po->pg_vec_lock);
+ 
+@@ -2838,22 +2838,28 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ 	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
+ 		size_max = dev->mtu + reserve + VLAN_HLEN;
+ 
++	timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
+ 	reinit_completion(&po->skb_completion);
+ 
+ 	do {
+ 		ph = packet_current_frame(po, &po->tx_ring,
+ 					  TP_STATUS_SEND_REQUEST);
+ 		if (unlikely(ph == NULL)) {
+-			if (need_wait && skb) {
+-				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
++			/* Note: packet_read_pending() might be slow if we
++			 * have to call it as it's per_cpu variable, but in
++			 * fast-path we don't have to call it, only when ph
++			 * is NULL, we need to check the pending_refcnt.
++			 */
++			if (need_wait && packet_read_pending(&po->tx_ring)) {
+ 				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
+ 				if (timeo <= 0) {
+ 					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
+ 					goto out_put;
+ 				}
+-			}
+-			/* check for additional frames */
+-			continue;
++				/* check for additional frames */
++				continue;
++			} else
++				break;
+ 		}
+ 
+ 		skb = NULL;
+@@ -2942,14 +2948,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ 		}
+ 		packet_increment_head(&po->tx_ring);
+ 		len_sum += tp_len;
+-	} while (likely((ph != NULL) ||
+-		/* Note: packet_read_pending() might be slow if we have
+-		 * to call it as it's per_cpu variable, but in fast-path
+-		 * we already short-circuit the loop with the first
+-		 * condition, and luckily don't have to go that path
+-		 * anyway.
+-		 */
+-		 (need_wait && packet_read_pending(&po->tx_ring))));
++	} while (1);
+ 
+ 	err = len_sum;
+ 	goto out_put;
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index 53a858478e22f0..62527e1ebb883d 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -826,6 +826,7 @@ static struct sock *pep_sock_accept(struct sock *sk,
+ 	}
+ 
+ 	/* Check for duplicate pipe handle */
++	pn_skb_get_dst_sockaddr(skb, &dst);
+ 	newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
+ 	if (unlikely(newsk)) {
+ 		__sock_put(newsk);
+@@ -850,7 +851,6 @@ static struct sock *pep_sock_accept(struct sock *sk,
+ 	newsk->sk_destruct = pipe_destruct;
+ 
+ 	newpn = pep_sk(newsk);
+-	pn_skb_get_dst_sockaddr(skb, &dst);
+ 	pn_skb_get_src_sockaddr(skb, &src);
+ 	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
+ 	newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 773bdb2e37dafd..37ac8a66567866 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -219,6 +219,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
+ 	tail = b->call_backlog_tail;
+ 	while (CIRC_CNT(head, tail, size) > 0) {
+ 		struct rxrpc_call *call = b->call_backlog[tail];
++		rxrpc_see_call(call, rxrpc_call_see_discard);
+ 		rcu_assign_pointer(call->socket, rx);
+ 		if (rx->discard_new_call) {
+ 			_debug("discard %lx", call->user_call_ID);
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index 5ea9601efd05a4..ccfae607c9bb7f 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -590,6 +590,9 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
+ 	__be32 code;
+ 	int ret, ioc;
+ 
++	if (sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
++		return; /* Never abort an abort. */
++
+ 	rxrpc_see_skb(skb, rxrpc_skb_see_reject);
+ 
+ 	iov[0].iov_base = &whdr;
+diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
+index a482f88c5fc5b6..e24a44bae9a32c 100644
+--- a/net/rxrpc/recvmsg.c
++++ b/net/rxrpc/recvmsg.c
+@@ -351,6 +351,16 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		goto try_again;
+ 	}
+ 
++	rxrpc_see_call(call, rxrpc_call_see_recvmsg);
++	if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
++		rxrpc_see_call(call, rxrpc_call_see_already_released);
++		list_del_init(&call->recvmsg_link);
++		spin_unlock_irq(&rx->recvmsg_lock);
++		release_sock(&rx->sk);
++		trace_rxrpc_recvmsg(call->debug_id, rxrpc_recvmsg_unqueue, 0);
++		rxrpc_put_call(call, rxrpc_call_put_recvmsg);
++		goto try_again;
++	}
+ 	if (!(flags & MSG_PEEK))
+ 		list_del_init(&call->recvmsg_link);
+ 	else
+@@ -374,8 +384,13 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 
+ 	release_sock(&rx->sk);
+ 
+-	if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
+-		BUG();
++	if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
++		rxrpc_see_call(call, rxrpc_call_see_already_released);
++		mutex_unlock(&call->user_mutex);
++		if (!(flags & MSG_PEEK))
++			rxrpc_put_call(call, rxrpc_call_put_recvmsg);
++		goto try_again;
++	}
+ 
+ 	if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+ 		if (flags & MSG_CMSG_COMPAT) {
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index b2494d24a54253..1021681a571822 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -821,7 +821,9 @@ static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
+ 		u32 *pid;
+ 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
+ 
+-	BUG_ON(!hprio->row.rb_node);
++	if (unlikely(!hprio->row.rb_node))
++		return NULL;
++
+ 	sp->root = hprio->row.rb_node;
+ 	sp->pptr = &hprio->ptr;
+ 	sp->pid = &hprio->last_ptr_id;
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index aa4fbd2fae29eb..8e60fb5a7083bc 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -412,7 +412,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	bool existing = false;
+ 	struct nlattr *tb[TCA_QFQ_MAX + 1];
+ 	struct qfq_aggregate *new_agg = NULL;
+-	u32 weight, lmax, inv_w;
++	u32 weight, lmax, inv_w, old_weight, old_lmax;
+ 	int err;
+ 	int delta_w;
+ 
+@@ -446,12 +446,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	inv_w = ONE_FP / weight;
+ 	weight = ONE_FP / inv_w;
+ 
+-	if (cl != NULL &&
+-	    lmax == cl->agg->lmax &&
+-	    weight == cl->agg->class_weight)
+-		return 0; /* nothing to change */
++	if (cl != NULL) {
++		sch_tree_lock(sch);
++		old_weight = cl->agg->class_weight;
++		old_lmax   = cl->agg->lmax;
++		sch_tree_unlock(sch);
++		if (lmax == old_lmax && weight == old_weight)
++			return 0; /* nothing to change */
++	}
+ 
+-	delta_w = weight - (cl ? cl->agg->class_weight : 0);
++	delta_w = weight - (cl ? old_weight : 0);
+ 
+ 	if (q->wsum + delta_w > QFQ_MAX_WSUM) {
+ 		NL_SET_ERR_MSG_FMT_MOD(extack,
+@@ -558,10 +562,10 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
+ 
+ 	qdisc_purge_queue(cl->qdisc);
+ 	qdisc_class_hash_remove(&q->clhash, &cl->common);
++	qfq_destroy_class(sch, cl);
+ 
+ 	sch_tree_unlock(sch);
+ 
+-	qfq_destroy_class(sch, cl);
+ 	return 0;
+ }
+ 
+@@ -628,6 +632,7 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
+ {
+ 	struct qfq_class *cl = (struct qfq_class *)arg;
+ 	struct nlattr *nest;
++	u32 class_weight, lmax;
+ 
+ 	tcm->tcm_parent	= TC_H_ROOT;
+ 	tcm->tcm_handle	= cl->common.classid;
+@@ -636,8 +641,13 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
+ 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+ 	if (nest == NULL)
+ 		goto nla_put_failure;
+-	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
+-	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
++
++	sch_tree_lock(sch);
++	class_weight	= cl->agg->class_weight;
++	lmax		= cl->agg->lmax;
++	sch_tree_unlock(sch);
++	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, class_weight) ||
++	    nla_put_u32(skb, TCA_QFQ_LMAX, lmax))
+ 		goto nla_put_failure;
+ 	return nla_nest_end(skb, nest);
+ 
+@@ -654,8 +664,10 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+ 
+ 	memset(&xstats, 0, sizeof(xstats));
+ 
++	sch_tree_lock(sch);
+ 	xstats.weight = cl->agg->class_weight;
+ 	xstats.lmax = cl->agg->lmax;
++	sch_tree_unlock(sch);
+ 
+ 	if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
+ 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 78b0e6dba0a2b7..3c43239f09d367 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -30,6 +30,10 @@
+ #include <linux/splice.h>
+ 
+ #include <net/sock.h>
++#include <net/inet_common.h>
++#if IS_ENABLED(CONFIG_IPV6)
++#include <net/ipv6.h>
++#endif
+ #include <net/tcp.h>
+ #include <net/smc.h>
+ #include <asm/ioctls.h>
+@@ -360,6 +364,16 @@ static void smc_destruct(struct sock *sk)
+ 		return;
+ 	if (!sock_flag(sk, SOCK_DEAD))
+ 		return;
++	switch (sk->sk_family) {
++	case AF_INET:
++		inet_sock_destruct(sk);
++		break;
++#if IS_ENABLED(CONFIG_IPV6)
++	case AF_INET6:
++		inet6_sock_destruct(sk);
++		break;
++#endif
++	}
+ }
+ 
+ static struct lock_class_key smc_key;
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index ad77d6b6b8d3ae..7579f9622e0104 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -283,10 +283,10 @@ struct smc_connection {
+ };
+ 
+ struct smc_sock {				/* smc sock container */
+-	struct sock		sk;
+-#if IS_ENABLED(CONFIG_IPV6)
+-	struct ipv6_pinfo	*pinet6;
+-#endif
++	union {
++		struct sock		sk;
++		struct inet_sock	icsk_inet;
++	};
+ 	struct socket		*clcsock;	/* internal tcp socket */
+ 	void			(*clcsk_state_change)(struct sock *sk);
+ 						/* original stat_change fct. */
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index 65b0da6fdf6a79..095cf31bae0ba9 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -512,9 +512,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ 	if (inq < strp->stm.full_len)
+ 		return tls_strp_read_copy(strp, true);
+ 
++	tls_strp_load_anchor_with_queue(strp, inq);
+ 	if (!strp->stm.full_len) {
+-		tls_strp_load_anchor_with_queue(strp, inq);
+-
+ 		sz = tls_rx_msg_size(strp, strp->anchor);
+ 		if (sz < 0) {
+ 			tls_strp_abort_strp(strp, sz);
+diff --git a/rust/Makefile b/rust/Makefile
+index b8b7f817c48e42..17491d8229a430 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -157,6 +157,7 @@ quiet_cmd_rustdoc_test = RUSTDOC T $<
+       cmd_rustdoc_test = \
+ 	OBJTREE=$(abspath $(objtree)) \
+ 	$(RUSTDOC) --test $(rust_common_flags) \
++		-Zcrate-attr='feature(used_with_arg)' \
+ 		@$(objtree)/include/generated/rustc_cfg \
+ 		$(rustc_target_flags) $(rustdoc_test_target_flags) \
+ 		$(rustdoc_test_quiet) \
+diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
+index 904d241604db91..889ddcb1a2ddc3 100644
+--- a/rust/kernel/lib.rs
++++ b/rust/kernel/lib.rs
+@@ -18,6 +18,7 @@
+ #![feature(inline_const)]
+ #![feature(lint_reasons)]
+ #![feature(unsize)]
++#![feature(used_with_arg)]
+ 
+ // Ensure conditional compilation based on the kernel configuration works;
+ // otherwise we may silently break things like initcall handling.
+diff --git a/rust/macros/module.rs b/rust/macros/module.rs
+index a5ea5850e307a0..edb23b28f4467e 100644
+--- a/rust/macros/module.rs
++++ b/rust/macros/module.rs
+@@ -57,7 +57,7 @@ fn emit_base(&mut self, field: &str, content: &str, builtin: bool) {
+                 {cfg}
+                 #[doc(hidden)]
+                 #[link_section = \".modinfo\"]
+-                #[used]
++                #[used(compiler)]
+                 pub static __{module}_{counter}: [u8; {length}] = *{string};
+             ",
+             cfg = if builtin {
+@@ -230,7 +230,7 @@ mod __module_init {{
+                     // key or a new section. For the moment, keep it simple.
+                     #[cfg(MODULE)]
+                     #[doc(hidden)]
+-                    #[used]
++                    #[used(compiler)]
+                     static __IS_RUST_MODULE: () = ();
+ 
+                     static mut __MOD: Option<{type_}> = None;
+@@ -253,7 +253,7 @@ mod __module_init {{
+ 
+                     #[cfg(MODULE)]
+                     #[doc(hidden)]
+-                    #[used]
++                    #[used(compiler)]
+                     #[link_section = \".init.data\"]
+                     static __UNIQUE_ID___addressable_init_module: unsafe extern \"C\" fn() -> i32 = init_module;
+ 
+@@ -273,7 +273,7 @@ mod __module_init {{
+ 
+                     #[cfg(MODULE)]
+                     #[doc(hidden)]
+-                    #[used]
++                    #[used(compiler)]
+                     #[link_section = \".exit.data\"]
+                     static __UNIQUE_ID___addressable_cleanup_module: extern \"C\" fn() = cleanup_module;
+ 
+@@ -283,7 +283,7 @@ mod __module_init {{
+                     #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
+                     #[doc(hidden)]
+                     #[link_section = \"{initcall_section}\"]
+-                    #[used]
++                    #[used(compiler)]
+                     pub static __{name}_initcall: extern \"C\" fn() -> kernel::ffi::c_int = __{name}_init;
+ 
+                     #[cfg(not(MODULE))]
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 2bba59e790b8a4..2c5c1a214f3b85 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -248,7 +248,7 @@ $(obj)/%.lst: $(obj)/%.c FORCE
+ # Compile Rust sources (.rs)
+ # ---------------------------------------------------------------------------
+ 
+-rust_allowed_features := arbitrary_self_types,lint_reasons
++rust_allowed_features := arbitrary_self_types,lint_reasons,used_with_arg
+ 
+ # `--out-dir` is required to avoid temporaries being created by `rustc` in the
+ # current working directory, which may be not accessible in the out-of-tree
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e98823bd3634f8..f033214bf77fd6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10731,6 +10731,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8bbe, "HP Victus 16-r0xxx (MB 8BBE)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10912,6 +10913,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ 	SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x1a8e, "ASUS G712LWS", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ 	SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
+ 	SND_PCI_QUIRK(0x1043, 0x1b13, "ASUS U41SV/GA403U", ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC),
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 36e341b4b77bf2..747cef47e685bc 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -726,7 +726,7 @@ struct bpf_object {
+ 
+ 	struct usdt_manager *usdt_man;
+ 
+-	struct bpf_map *arena_map;
++	int arena_map_idx;
+ 	void *arena_data;
+ 	size_t arena_data_sz;
+ 
+@@ -1494,6 +1494,7 @@ static struct bpf_object *bpf_object__new(const char *path,
+ 	obj->efile.obj_buf_sz = obj_buf_sz;
+ 	obj->efile.btf_maps_shndx = -1;
+ 	obj->kconfig_map_idx = -1;
++	obj->arena_map_idx = -1;
+ 
+ 	obj->kern_version = get_kernel_version();
+ 	obj->loaded = false;
+@@ -2935,7 +2936,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
+ 	const long page_sz = sysconf(_SC_PAGE_SIZE);
+ 	size_t mmap_sz;
+ 
+-	mmap_sz = bpf_map_mmap_sz(obj->arena_map);
++	mmap_sz = bpf_map_mmap_sz(map);
+ 	if (roundup(data_sz, page_sz) > mmap_sz) {
+ 		pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
+ 			sec_name, mmap_sz, data_sz);
+@@ -3009,12 +3010,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
+ 		if (map->def.type != BPF_MAP_TYPE_ARENA)
+ 			continue;
+ 
+-		if (obj->arena_map) {
++		if (obj->arena_map_idx >= 0) {
+ 			pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
+-				map->name, obj->arena_map->name);
++				map->name, obj->maps[obj->arena_map_idx].name);
+ 			return -EINVAL;
+ 		}
+-		obj->arena_map = map;
++		obj->arena_map_idx = i;
+ 
+ 		if (obj->efile.arena_data) {
+ 			err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
+@@ -3024,7 +3025,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
+ 				return err;
+ 		}
+ 	}
+-	if (obj->efile.arena_data && !obj->arena_map) {
++	if (obj->efile.arena_data && obj->arena_map_idx < 0) {
+ 		pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
+ 			ARENA_SEC);
+ 		return -ENOENT;
+@@ -4547,8 +4548,13 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
+ 	if (shdr_idx == obj->efile.arena_data_shndx) {
+ 		reloc_desc->type = RELO_DATA;
+ 		reloc_desc->insn_idx = insn_idx;
+-		reloc_desc->map_idx = obj->arena_map - obj->maps;
++		reloc_desc->map_idx = obj->arena_map_idx;
+ 		reloc_desc->sym_off = sym->st_value;
++
++		map = &obj->maps[obj->arena_map_idx];
++		pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
++			 prog->name, obj->arena_map_idx, map->name, map->sec_idx,
++			 map->sec_offset, insn_idx);
+ 		return 0;
+ 	}
+ 
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index a737286de75926..d4d82bb9b5511d 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -216,6 +216,7 @@ static bool is_rust_noreturn(const struct symbol *func)
+ 	       str_ends_with(func->name, "_4core9panicking14panic_explicit")				||
+ 	       str_ends_with(func->name, "_4core9panicking14panic_nounwind")				||
+ 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
++	       str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt")			||
+ 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
+ 	       str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference")		||
+ 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
+diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
+index fe86e4fdb89c80..c3ab9b6fb0694c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/token.c
++++ b/tools/testing/selftests/bpf/prog_tests/token.c
+@@ -828,8 +828,12 @@ static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel)
+ 	return validate_struct_ops_load(mnt_fd, true /* should succeed */);
+ }
+ 
++static const char *token_bpffs_custom_dir()
++{
++	return getenv("BPF_SELFTESTS_BPF_TOKEN_DIR") ?: "/tmp/bpf-token-fs";
++}
++
+ #define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH"
+-#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs"
+ 
+ static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel)
+ {
+@@ -892,6 +896,7 @@ static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel
+ 
+ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel)
+ {
++	const char *custom_dir = token_bpffs_custom_dir();
+ 	LIBBPF_OPTS(bpf_object_open_opts, opts);
+ 	struct dummy_st_ops_success *skel;
+ 	int err;
+@@ -909,10 +914,10 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
+ 	 * BPF token implicitly, unless pointed to it through
+ 	 * LIBBPF_BPF_TOKEN_PATH envvar
+ 	 */
+-	rmdir(TOKEN_BPFFS_CUSTOM);
+-	if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom"))
++	rmdir(custom_dir);
++	if (!ASSERT_OK(mkdir(custom_dir, 0777), "mkdir_bpffs_custom"))
+ 		goto err_out;
+-	err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH);
++	err = sys_move_mount(mnt_fd, "", AT_FDCWD, custom_dir, MOVE_MOUNT_F_EMPTY_PATH);
+ 	if (!ASSERT_OK(err, "move_mount_bpffs"))
+ 		goto err_out;
+ 
+@@ -925,7 +930,7 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
+ 		goto err_out;
+ 	}
+ 
+-	err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/);
++	err = setenv(TOKEN_ENVVAR, custom_dir, 1 /*overwrite*/);
+ 	if (!ASSERT_OK(err, "setenv_token_path"))
+ 		goto err_out;
+ 
+@@ -951,11 +956,11 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
+ 	if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
+ 		goto err_out;
+ 
+-	rmdir(TOKEN_BPFFS_CUSTOM);
++	rmdir(custom_dir);
+ 	unsetenv(TOKEN_ENVVAR);
+ 	return 0;
+ err_out:
+-	rmdir(TOKEN_BPFFS_CUSTOM);
++	rmdir(custom_dir);
+ 	unsetenv(TOKEN_ENVVAR);
+ 	return -EINVAL;
+ }
+diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
+index d5ffd8c9172e1d..799dbc2b4b01c9 100755
+--- a/tools/testing/selftests/net/udpgro.sh
++++ b/tools/testing/selftests/net/udpgro.sh
+@@ -48,7 +48,7 @@ run_one() {
+ 
+ 	cfg_veth
+ 
+-	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} &
++	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} &
+ 	local PID1=$!
+ 
+ 	wait_local_port_listen ${PEER_NS} 8000 udp
+@@ -95,7 +95,7 @@ run_one_nat() {
+ 	# will land on the 'plain' one
+ 	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
+ 	local PID1=$!
+-	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} &
++	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${family} -b ${addr2%/*} ${rx_args} &
+ 	local PID2=$!
+ 
+ 	wait_local_port_listen "${PEER_NS}" 8000 udp
+@@ -117,9 +117,9 @@ run_one_2sock() {
+ 
+ 	cfg_veth
+ 
+-	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
++	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} -p 12345 &
+ 	local PID1=$!
+-	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} &
++	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 100 ${rx_args} &
+ 	local PID2=$!
+ 
+ 	wait_local_port_listen "${PEER_NS}" 12345 udp
+diff --git a/tools/testing/selftests/sched_ext/exit.c b/tools/testing/selftests/sched_ext/exit.c
+index 31bcd06e21cd3d..2c084ded296808 100644
+--- a/tools/testing/selftests/sched_ext/exit.c
++++ b/tools/testing/selftests/sched_ext/exit.c
+@@ -22,6 +22,14 @@ static enum scx_test_status run(void *ctx)
+ 		struct bpf_link *link;
+ 		char buf[16];
+ 
++		/*
++		 * On single-CPU systems, ops.select_cpu() is never
++		 * invoked, so skip this test to avoid getting stuck
++		 * indefinitely.
++		 */
++		if (tc == EXIT_SELECT_CPU && libbpf_num_possible_cpus() == 1)
++			continue;
++
+ 		skel = exit__open();
+ 		skel->rodata->exit_point = tc;
+ 		exit__load(skel);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-10-24  9:09 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-10-24  9:09 UTC (permalink / raw
  To: gentoo-commits

commit:     3cff75ffc1aa5e90c741183d55a846164df5eef3
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Oct 24 09:08:47 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Oct 24 09:08:47 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3cff75ff

Linux patch 6.12.55

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1054_linux-6.12.55.patch | 6854 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6858 insertions(+)

diff --git a/0000_README b/0000_README
index ad7dc827..7adc9490 100644
--- a/0000_README
+++ b/0000_README
@@ -259,6 +259,10 @@ Patch:  1053_linux-6.12.54.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.54
 
+Patch:  1054_linux-6.12.55.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.55
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1054_linux-6.12.55.patch b/1054_linux-6.12.55.patch
new file mode 100644
index 00000000..df8a45e9
--- /dev/null
+++ b/1054_linux-6.12.55.patch
@@ -0,0 +1,6854 @@
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index b42fea07c5cec8..b6dacd012539a4 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -198,6 +198,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-V3     | #3312417        | ARM64_ERRATUM_3194386       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Neoverse-V3AE   | #3312417        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | MMU-500         | #841119,826419  | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | MMU-600         | #1076982,1209401| N/A                         |
+diff --git a/Documentation/networking/seg6-sysctl.rst b/Documentation/networking/seg6-sysctl.rst
+index 07c20e470bafe6..1b6af4779be114 100644
+--- a/Documentation/networking/seg6-sysctl.rst
++++ b/Documentation/networking/seg6-sysctl.rst
+@@ -25,6 +25,9 @@ seg6_require_hmac - INTEGER
+ 
+ 	Default is 0.
+ 
++/proc/sys/net/ipv6/seg6_* variables:
++====================================
++
+ seg6_flowlabel - INTEGER
+ 	Controls the behaviour of computing the flowlabel of outer
+ 	IPv6 header in case of SR T.encaps
+diff --git a/Makefile b/Makefile
+index 0c6deb33c23935..d4c679b2d4bcfc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 54
++SUBLEVEL = 55
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index bd9f095d69fa03..593452b43dd499 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -861,6 +861,7 @@ config HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC
+ 	def_bool y
+ 	depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS_CLANG
+ 	depends on RUSTC_VERSION >= 107900
++	depends on ARM64 || X86_64
+ 	# With GCOV/KASAN we need this fix: https://github.com/rust-lang/rust/pull/129373
+ 	depends on (RUSTC_LLVM_VERSION >= 190103 && RUSTC_VERSION >= 108200) || \
+ 		(!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS)
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 7887d18cce3e45..40ae4dd961b152 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1111,6 +1111,7 @@ config ARM64_ERRATUM_3194386
+ 	  * ARM Neoverse-V1 erratum 3324341
+ 	  * ARM Neoverse V2 erratum 3324336
+ 	  * ARM Neoverse-V3 erratum 3312417
++	  * ARM Neoverse-V3AE erratum 3312417
+ 
+ 	  On affected cores "MSR SSBS, #0" instructions may not affect
+ 	  subsequent speculative instructions, which may permit unexepected
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index d92a0203e5a93d..c279a0a9b3660e 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -93,6 +93,7 @@
+ #define ARM_CPU_PART_NEOVERSE_V2	0xD4F
+ #define ARM_CPU_PART_CORTEX_A720	0xD81
+ #define ARM_CPU_PART_CORTEX_X4		0xD82
++#define ARM_CPU_PART_NEOVERSE_V3AE	0xD83
+ #define ARM_CPU_PART_NEOVERSE_V3	0xD84
+ #define ARM_CPU_PART_CORTEX_X925	0xD85
+ #define ARM_CPU_PART_CORTEX_A725	0xD87
+@@ -180,6 +181,7 @@
+ #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+ #define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
+ #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
++#define MIDR_NEOVERSE_V3AE	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3AE)
+ #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+ #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
+ #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index a78f247029aec3..3f675ae57d09ac 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -455,6 +455,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
++	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE),
+ 	{}
+ };
+ #endif
+diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
+index d2dacea1aedd9e..0daba93c1a81e2 100644
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -49,10 +49,15 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+ 	post_kprobe_handler(p, kcb, regs);
+ }
+ 
+-static bool __kprobes arch_check_kprobe(struct kprobe *p)
++static bool __kprobes arch_check_kprobe(unsigned long addr)
+ {
+-	unsigned long tmp  = (unsigned long)p->addr - p->offset;
+-	unsigned long addr = (unsigned long)p->addr;
++	unsigned long tmp, offset;
++
++	/* start iterating at the closest preceding symbol */
++	if (!kallsyms_lookup_size_offset(addr, NULL, &offset))
++		return false;
++
++	tmp = addr - offset;
+ 
+ 	while (tmp <= addr) {
+ 		if (tmp == addr)
+@@ -71,7 +76,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ 	if ((unsigned long)insn & 0x1)
+ 		return -EILSEQ;
+ 
+-	if (!arch_check_kprobe(p))
++	if (!arch_check_kprobe((unsigned long)p->addr))
+ 		return -EILSEQ;
+ 
+ 	/* copy instruction */
+diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
+index 851b561850e0c2..c117fba4f8da92 100644
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -312,15 +312,35 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
+ 	return chunks >> shift;
+ }
+ 
++static u64 get_corrected_val(struct rdt_resource *r, struct rdt_mon_domain *d,
++			     u32 rmid, enum resctrl_event_id eventid, u64 msr_val)
++{
++	struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
++	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
++	struct arch_mbm_state *am;
++	u64 chunks;
++
++	am = get_arch_mbm_state(hw_dom, rmid, eventid);
++	if (am) {
++		am->chunks += mbm_overflow_count(am->prev_msr, msr_val,
++						 hw_res->mbm_width);
++		chunks = get_corrected_mbm_count(rmid, am->chunks);
++		am->prev_msr = msr_val;
++	} else {
++		chunks = msr_val;
++	}
++
++	return chunks * hw_res->mon_scale;
++}
++
+ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ 			   u32 unused, u32 rmid, enum resctrl_event_id eventid,
+ 			   u64 *val, void *ignored)
+ {
+ 	struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
+-	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+ 	int cpu = cpumask_any(&d->hdr.cpu_mask);
+ 	struct arch_mbm_state *am;
+-	u64 msr_val, chunks;
++	u64 msr_val;
+ 	u32 prmid;
+ 	int ret;
+ 
+@@ -328,22 +348,16 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ 
+ 	prmid = logical_rmid_to_physical_rmid(cpu, rmid);
+ 	ret = __rmid_read_phys(prmid, eventid, &msr_val);
+-	if (ret)
+-		return ret;
+ 
+-	am = get_arch_mbm_state(hw_dom, rmid, eventid);
+-	if (am) {
+-		am->chunks += mbm_overflow_count(am->prev_msr, msr_val,
+-						 hw_res->mbm_width);
+-		chunks = get_corrected_mbm_count(rmid, am->chunks);
+-		am->prev_msr = msr_val;
+-	} else {
+-		chunks = msr_val;
++	if (!ret) {
++		*val = get_corrected_val(r, d, rmid, eventid, msr_val);
++	} else if (ret == -EINVAL) {
++		am = get_arch_mbm_state(hw_dom, rmid, eventid);
++		if (am)
++			am->prev_msr = 0;
+ 	}
+ 
+-	*val = chunks * hw_res->mon_scale;
+-
+-	return 0;
++	return ret;
+ }
+ 
+ static void limbo_release_entry(struct rmid_entry *entry)
+diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
+index 02561b6cecc64b..2d7b3af09e2846 100644
+--- a/drivers/accel/qaic/qaic.h
++++ b/drivers/accel/qaic/qaic.h
+@@ -91,6 +91,8 @@ struct dma_bridge_chan {
+ 	 * response queue's head and tail pointer of this DBC.
+ 	 */
+ 	void __iomem		*dbc_base;
++	/* Synchronizes access to Request queue's head and tail pointer */
++	struct mutex		req_lock;
+ 	/* Head of list where each node is a memory handle queued in request queue */
+ 	struct list_head	xfer_list;
+ 	/* Synchronizes DBC readers during cleanup */
+diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
+index d8bdab69f80095..b86a8e48e731b7 100644
+--- a/drivers/accel/qaic/qaic_control.c
++++ b/drivers/accel/qaic/qaic_control.c
+@@ -407,7 +407,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
+ 		return -EINVAL;
+ 	remaining = in_trans->size - resources->xferred_dma_size;
+ 	if (remaining == 0)
+-		return 0;
++		return -EINVAL;
+ 
+ 	if (check_add_overflow(xfer_start_addr, remaining, &end))
+ 		return -EINVAL;
+diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
+index 43aba57b48f05f..265eeb4e156fc6 100644
+--- a/drivers/accel/qaic/qaic_data.c
++++ b/drivers/accel/qaic/qaic_data.c
+@@ -1357,13 +1357,17 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
+ 		goto release_ch_rcu;
+ 	}
+ 
++	ret = mutex_lock_interruptible(&dbc->req_lock);
++	if (ret)
++		goto release_ch_rcu;
++
+ 	head = readl(dbc->dbc_base + REQHP_OFF);
+ 	tail = readl(dbc->dbc_base + REQTP_OFF);
+ 
+ 	if (head == U32_MAX || tail == U32_MAX) {
+ 		/* PCI link error */
+ 		ret = -ENODEV;
+-		goto release_ch_rcu;
++		goto unlock_req_lock;
+ 	}
+ 
+ 	queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
+@@ -1371,11 +1375,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
+ 	ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
+ 				     head, &tail);
+ 	if (ret)
+-		goto release_ch_rcu;
++		goto unlock_req_lock;
+ 
+ 	/* Finalize commit to hardware */
+ 	submit_ts = ktime_get_ns();
+ 	writel(tail, dbc->dbc_base + REQTP_OFF);
++	mutex_unlock(&dbc->req_lock);
+ 
+ 	update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
+ 			      submit_ts, queue_level);
+@@ -1383,6 +1388,9 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
+ 	if (datapath_polling)
+ 		schedule_work(&dbc->poll_work);
+ 
++unlock_req_lock:
++	if (ret)
++		mutex_unlock(&dbc->req_lock);
+ release_ch_rcu:
+ 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ unlock_dev_srcu:
+diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
+index 20b653d99e524a..5ed49daaf541f4 100644
+--- a/drivers/accel/qaic/qaic_debugfs.c
++++ b/drivers/accel/qaic/qaic_debugfs.c
+@@ -251,6 +251,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
+ 	if (ret)
+ 		goto destroy_workqueue;
+ 
++	dev_set_drvdata(&mhi_dev->dev, qdev);
++	qdev->bootlog_ch = mhi_dev;
++
+ 	for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
+ 		msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
+ 		if (!msg) {
+@@ -266,8 +269,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
+ 			goto mhi_unprepare;
+ 	}
+ 
+-	dev_set_drvdata(&mhi_dev->dev, qdev);
+-	qdev->bootlog_ch = mhi_dev;
+ 	return 0;
+ 
+ mhi_unprepare:
+diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
+index 10e711c96a6706..cb606c4bb85116 100644
+--- a/drivers/accel/qaic/qaic_drv.c
++++ b/drivers/accel/qaic/qaic_drv.c
+@@ -422,6 +422,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
+ 			return NULL;
+ 		init_waitqueue_head(&qdev->dbc[i].dbc_release);
+ 		INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
++		ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock);
++		if (ret)
++			return NULL;
+ 	}
+ 
+ 	return qdev;
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index c7ec69597a955f..d8aaa5b61628b8 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1554,6 +1554,32 @@ void pm_runtime_enable(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(pm_runtime_enable);
+ 
++static void pm_runtime_set_suspended_action(void *data)
++{
++	pm_runtime_set_suspended(data);
++}
++
++/**
++ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
++ *
++ * @dev: Device to handle.
++ */
++int devm_pm_runtime_set_active_enabled(struct device *dev)
++{
++	int err;
++
++	err = pm_runtime_set_active(dev);
++	if (err)
++		return err;
++
++	err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
++	if (err)
++		return err;
++
++	return devm_pm_runtime_enable(dev);
++}
++EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
++
+ static void pm_runtime_disable_action(void *data)
+ {
+ 	pm_runtime_dont_use_autosuspend(data);
+@@ -1576,6 +1602,24 @@ int devm_pm_runtime_enable(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+ 
++static void pm_runtime_put_noidle_action(void *data)
++{
++	pm_runtime_put_noidle(data);
++}
++
++/**
++ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
++ *
++ * @dev: Device to handle.
++ */
++int devm_pm_runtime_get_noresume(struct device *dev)
++{
++	pm_runtime_get_noresume(dev);
++
++	return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
++}
++EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
++
+ /**
+  * pm_runtime_forbid - Block runtime PM of a device.
+  * @dev: Device to handle.
+diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
+index e55f1716cfcb20..d7bade143998e9 100644
+--- a/drivers/cdx/cdx_msi.c
++++ b/drivers/cdx/cdx_msi.c
+@@ -165,7 +165,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
+ 	struct device_node *parent_node;
+ 	struct irq_domain *parent;
+ 
+-	fwnode_handle = of_node_to_fwnode(np);
++	fwnode_handle = of_fwnode_handle(np);
+ 
+ 	parent_node = of_parse_phandle(np, "msi-map", 1);
+ 	if (!parent_node) {
+@@ -173,7 +173,8 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
+ 		return NULL;
+ 	}
+ 
+-	parent = irq_find_matching_fwnode(of_node_to_fwnode(parent_node), DOMAIN_BUS_NEXUS);
++	parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
++	of_node_put(parent_node);
+ 	if (!parent || !msi_get_domain_info(parent)) {
+ 		dev_err(dev, "unable to locate ITS domain\n");
+ 		return NULL;
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 8d5279c21e6cfe..1abedcae50b265 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -339,6 +339,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
+ 	return 0;
+ }
+ 
++static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
++{
++	unsigned int transition_latency_ns = cppc_get_transition_latency(cpu);
++
++	if (transition_latency_ns == CPUFREQ_ETERNAL)
++		return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
++
++	return transition_latency_ns / NSEC_PER_USEC;
++}
++
+ /*
+  * The PCC subspace describes the rate at which platform can accept commands
+  * on the shared PCC channel (including READs which do not count towards freq
+@@ -361,12 +371,12 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+ 			return 10000;
+ 		}
+ 	}
+-	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
++	return __cppc_cpufreq_get_transition_delay_us(cpu);
+ }
+ #else
+ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+ {
+-	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
++	return __cppc_cpufreq_get_transition_delay_us(cpu);
+ }
+ #endif
+ 
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 74a83203181d6f..e55136bb525e2c 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -923,6 +923,8 @@ static void idxd_remove(struct pci_dev *pdev)
+ 	idxd_cleanup_interrupts(idxd);
+ 	if (device_pasid_enabled(idxd))
+ 		idxd_disable_system_pasid(idxd);
++	if (device_user_pasid_enabled(idxd))
++		idxd_disable_sva(idxd->pdev);
+ 	pci_iounmap(pdev, idxd->reg_base);
+ 	put_device(idxd_confdev(idxd));
+ 	pci_disable_device(pdev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index c7b18c52825d67..784651269ec551 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -83,7 +83,8 @@ amdgpu-y += \
+ 	vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
+ 	nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
+ 	sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
+-	nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
++	nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
++	cyan_skillfish_reg_init.o
+ 
+ # add DF block
+ amdgpu-y += \
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 1465b3adacb0af..d349a4816e5375 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -2353,10 +2353,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
+ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
+ 					  struct kfd_vm_fault_info *mem)
+ {
+-	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
++	if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
+ 		*mem = *adev->gmc.vm_fault_info;
+-		mb(); /* make sure read happened */
+-		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++		atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 6042956cd5c3c1..e00b5e45423472 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -1016,7 +1016,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
+ 	/* Until a uniform way is figured, get mask based on hwid */
+ 	switch (hw_id) {
+ 	case VCN_HWID:
+-		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
++		/* VCN vs UVD+VCE */
++		if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
++			harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ 		break;
+ 	case DMU_HWID:
+ 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
+@@ -2462,7 +2464,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		amdgpu_discovery_init(adev);
+ 		vega10_reg_base_init(adev);
+ 		adev->sdma.num_instances = 2;
++		adev->sdma.sdma_mask = 3;
+ 		adev->gmc.num_umc = 4;
++		adev->gfx.xcc_mask = 1;
+ 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
+@@ -2489,7 +2493,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		amdgpu_discovery_init(adev);
+ 		vega10_reg_base_init(adev);
+ 		adev->sdma.num_instances = 2;
++		adev->sdma.sdma_mask = 3;
+ 		adev->gmc.num_umc = 4;
++		adev->gfx.xcc_mask = 1;
+ 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
+@@ -2516,8 +2522,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		amdgpu_discovery_init(adev);
+ 		vega10_reg_base_init(adev);
+ 		adev->sdma.num_instances = 1;
++		adev->sdma.sdma_mask = 1;
+ 		adev->vcn.num_vcn_inst = 1;
+ 		adev->gmc.num_umc = 2;
++		adev->gfx.xcc_mask = 1;
+ 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+ 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+@@ -2560,7 +2568,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		amdgpu_discovery_init(adev);
+ 		vega20_reg_base_init(adev);
+ 		adev->sdma.num_instances = 2;
++		adev->sdma.sdma_mask = 3;
+ 		adev->gmc.num_umc = 8;
++		adev->gfx.xcc_mask = 1;
+ 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
+@@ -2588,8 +2598,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		amdgpu_discovery_init(adev);
+ 		arct_reg_base_init(adev);
+ 		adev->sdma.num_instances = 8;
++		adev->sdma.sdma_mask = 0xff;
+ 		adev->vcn.num_vcn_inst = 2;
+ 		adev->gmc.num_umc = 8;
++		adev->gfx.xcc_mask = 1;
+ 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
+@@ -2621,8 +2633,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		amdgpu_discovery_init(adev);
+ 		aldebaran_reg_base_init(adev);
+ 		adev->sdma.num_instances = 5;
++		adev->sdma.sdma_mask = 0x1f;
+ 		adev->vcn.num_vcn_inst = 2;
+ 		adev->gmc.num_umc = 4;
++		adev->gfx.xcc_mask = 1;
+ 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
+@@ -2644,6 +2658,38 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
+ 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ 		break;
++	case CHIP_CYAN_SKILLFISH:
++		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
++			r = amdgpu_discovery_reg_base_init(adev);
++			if (r)
++				return -EINVAL;
++
++			amdgpu_discovery_harvest_ip(adev);
++			amdgpu_discovery_get_gfx_info(adev);
++			amdgpu_discovery_get_mall_info(adev);
++			amdgpu_discovery_get_vcn_info(adev);
++		} else {
++			cyan_skillfish_reg_base_init(adev);
++			adev->sdma.num_instances = 2;
++			adev->sdma.sdma_mask = 3;
++			adev->gfx.xcc_mask = 1;
++			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
++			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
++			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
++			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
++			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
++			adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
++			adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
++			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
++			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
++			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
++			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
++			adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
++			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
++			adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
++			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
++		}
++		break;
+ 	default:
+ 		r = amdgpu_discovery_reg_base_init(adev);
+ 		if (r) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 8553ac4c0ad3f1..a8358d1d1acbca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -2171,7 +2171,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
+ 	}
+ 
+ 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
+-	if (!ret) {
++	if (!ret && !psp->securedisplay_context.context.resp_status) {
+ 		psp->securedisplay_context.context.initialized = true;
+ 		mutex_init(&psp->securedisplay_context.mutex);
+ 	} else
+diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
+new file mode 100644
+index 00000000000000..96616a865aac71
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
+@@ -0,0 +1,56 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include "amdgpu.h"
++#include "nv.h"
++
++#include "soc15_common.h"
++#include "soc15_hw_ip.h"
++#include "cyan_skillfish_ip_offset.h"
++
++int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
++{
++	/* HW has more IP blocks,  only initialized the blocke needed by driver */
++	uint32_t i;
++
++	adev->gfx.xcc_mask = 1;
++	for (i = 0 ; i < MAX_INSTANCE ; ++i) {
++		adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
++		adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
++		adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
++		adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
++		adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
++		adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
++		adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
++		adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
++		adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
++		adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
++		adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
++		adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
++		adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
++		adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
++		adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
++		adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
++	}
++	return 0;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 994432fb57eafa..8e2f7312565049 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -1055,7 +1055,7 @@ static int gmc_v7_0_sw_init(void *handle)
+ 					GFP_KERNEL);
+ 	if (!adev->gmc.vm_fault_info)
+ 		return -ENOMEM;
+-	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++	atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+ 
+ 	return 0;
+ }
+@@ -1287,7 +1287,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
+ 	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ 			     VMID);
+ 	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+-		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
++		&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
+ 		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ 		u32 protections = REG_GET_FIELD(status,
+ 					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+@@ -1303,8 +1303,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
+ 		info->prot_read = protections & 0x8 ? true : false;
+ 		info->prot_write = protections & 0x10 ? true : false;
+ 		info->prot_exec = protections & 0x20 ? true : false;
+-		mb();
+-		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
++		atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 86488c052f8224..5248832c04adf0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1168,7 +1168,7 @@ static int gmc_v8_0_sw_init(void *handle)
+ 					GFP_KERNEL);
+ 	if (!adev->gmc.vm_fault_info)
+ 		return -ENOMEM;
+-	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++	atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+ 
+ 	return 0;
+ }
+@@ -1468,7 +1468,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
+ 	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ 			     VMID);
+ 	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+-		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
++		&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
+ 		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ 		u32 protections = REG_GET_FIELD(status,
+ 					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+@@ -1484,8 +1484,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
+ 		info->prot_read = protections & 0x8 ? true : false;
+ 		info->prot_write = protections & 0x10 ? true : false;
+ 		info->prot_exec = protections & 0x20 ? true : false;
+-		mb();
+-		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
++		atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+index e3f4f5fbbd6e75..19cbf80fa32144 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+@@ -224,7 +224,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
+ 			pipe, x_pkt->header.opcode);
+ 
+ 	r = amdgpu_fence_wait_polling(ring, seq, timeout);
+-	if (r < 1 || !*status_ptr) {
++
++	/*
++	 * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success).
++	 * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information.
++	 */
++	if (r < 1 || !(lower_32_bits(*status_ptr))) {
+ 
+ 		if (misc_op_str)
+ 			dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
+index 83e9782aef39d6..8f4817404f10d0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.h
++++ b/drivers/gpu/drm/amd/amdgpu/nv.h
+@@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block;
+ void nv_grbm_select(struct amdgpu_device *adev,
+ 		    u32 me, u32 pipe, u32 queue, u32 vmid);
+ void nv_set_virt_ops(struct amdgpu_device *adev);
++int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 632a25957477ee..3018e294673a5b 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ 		thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
+ 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 	else if (hwmgr->pp_table_version == PP_TABLE_V0)
+-		thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
+-			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++		thermal_data->max = data->thermal_temp_setting.temperature_shutdown;
+ 
+ 	thermal_data->sw_ctf_threshold = thermal_data->max;
+ 
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
+index c8881796fba4c6..4014375f06ea13 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
+@@ -120,8 +120,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx)
+ 	}
+ 
+ 	/* Test for known Chip ID. */
+-	if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE ||
+-	    chipid[2] != REG_CHIPID2_VALUE) {
++	if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) {
+ 		dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",
+ 			chipid[0], chipid[1], chipid[2]);
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
+index d41f8ae1c14833..b61ebc5bdd5cef 100644
+--- a/drivers/gpu/drm/drm_draw.c
++++ b/drivers/gpu/drm/drm_draw.c
+@@ -125,7 +125,7 @@ EXPORT_SYMBOL(drm_draw_fill16);
+ 
+ void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ 		     unsigned int height, unsigned int width,
+-		     u16 color)
++		     u32 color)
+ {
+ 	unsigned int y, x;
+ 
+diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
+index f121ee7339dc11..20cb404e23ea62 100644
+--- a/drivers/gpu/drm/drm_draw_internal.h
++++ b/drivers/gpu/drm/drm_draw_internal.h
+@@ -47,7 +47,7 @@ void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ 
+ void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ 		     unsigned int height, unsigned int width,
+-		     u16 color);
++		     u32 color);
+ 
+ void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ 		     unsigned int height, unsigned int width,
+diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+index 9eeba254cf45df..2a218d20584284 100644
+--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+@@ -51,7 +51,6 @@ struct decon_context {
+ 	void __iomem			*regs;
+ 	unsigned long			irq_flags;
+ 	bool				i80_if;
+-	bool				suspended;
+ 	wait_queue_head_t		wait_vsync_queue;
+ 	atomic_t			wait_vsync_event;
+ 
+@@ -81,13 +80,30 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
+ 	DRM_PLANE_TYPE_CURSOR,
+ };
+ 
+-static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
++/**
++ * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
++ *
++ * @ctx: display and enhancement controller context
++ * @win: window to protect registers for
++ * @protect: 1 to protect (disable updates)
++ */
++static void decon_shadow_protect_win(struct decon_context *ctx,
++				     unsigned int win, bool protect)
+ {
+-	struct decon_context *ctx = crtc->ctx;
++	u32 bits, val;
+ 
+-	if (ctx->suspended)
+-		return;
++	bits = SHADOWCON_WINx_PROTECT(win);
++
++	val = readl(ctx->regs + SHADOWCON);
++	if (protect)
++		val |= bits;
++	else
++		val &= ~bits;
++	writel(val, ctx->regs + SHADOWCON);
++}
+ 
++static void decon_wait_for_vblank(struct decon_context *ctx)
++{
+ 	atomic_set(&ctx->wait_vsync_event, 1);
+ 
+ 	/*
+@@ -100,25 +116,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
+ 		DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
+ }
+ 
+-static void decon_clear_channels(struct exynos_drm_crtc *crtc)
++static void decon_clear_channels(struct decon_context *ctx)
+ {
+-	struct decon_context *ctx = crtc->ctx;
+ 	unsigned int win, ch_enabled = 0;
++	u32 val;
+ 
+ 	/* Check if any channel is enabled. */
+ 	for (win = 0; win < WINDOWS_NR; win++) {
+-		u32 val = readl(ctx->regs + WINCON(win));
++		val = readl(ctx->regs + WINCON(win));
+ 
+ 		if (val & WINCONx_ENWIN) {
++			decon_shadow_protect_win(ctx, win, true);
++
+ 			val &= ~WINCONx_ENWIN;
+ 			writel(val, ctx->regs + WINCON(win));
+ 			ch_enabled = 1;
++
++			decon_shadow_protect_win(ctx, win, false);
+ 		}
+ 	}
+ 
++	val = readl(ctx->regs + DECON_UPDATE);
++	val |= DECON_UPDATE_STANDALONE_F;
++	writel(val, ctx->regs + DECON_UPDATE);
++
+ 	/* Wait for vsync, as disable channel takes effect at next vsync */
+ 	if (ch_enabled)
+-		decon_wait_for_vblank(ctx->crtc);
++		decon_wait_for_vblank(ctx);
+ }
+ 
+ static int decon_ctx_initialize(struct decon_context *ctx,
+@@ -126,7 +150,7 @@ static int decon_ctx_initialize(struct decon_context *ctx,
+ {
+ 	ctx->drm_dev = drm_dev;
+ 
+-	decon_clear_channels(ctx->crtc);
++	decon_clear_channels(ctx);
+ 
+ 	return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
+ }
+@@ -155,9 +179,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
+ 	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
+ 	u32 val, clkdiv;
+ 
+-	if (ctx->suspended)
+-		return;
+-
+ 	/* nothing to do if we haven't set the mode yet */
+ 	if (mode->htotal == 0 || mode->vtotal == 0)
+ 		return;
+@@ -219,9 +240,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
+ 	struct decon_context *ctx = crtc->ctx;
+ 	u32 val;
+ 
+-	if (ctx->suspended)
+-		return -EPERM;
+-
+ 	if (!test_and_set_bit(0, &ctx->irq_flags)) {
+ 		val = readl(ctx->regs + VIDINTCON0);
+ 
+@@ -244,9 +262,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
+ 	struct decon_context *ctx = crtc->ctx;
+ 	u32 val;
+ 
+-	if (ctx->suspended)
+-		return;
+-
+ 	if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ 		val = readl(ctx->regs + VIDINTCON0);
+ 
+@@ -343,36 +358,11 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
+ 	writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
+ }
+ 
+-/**
+- * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
+- *
+- * @ctx: display and enhancement controller context
+- * @win: window to protect registers for
+- * @protect: 1 to protect (disable updates)
+- */
+-static void decon_shadow_protect_win(struct decon_context *ctx,
+-				     unsigned int win, bool protect)
+-{
+-	u32 bits, val;
+-
+-	bits = SHADOWCON_WINx_PROTECT(win);
+-
+-	val = readl(ctx->regs + SHADOWCON);
+-	if (protect)
+-		val |= bits;
+-	else
+-		val &= ~bits;
+-	writel(val, ctx->regs + SHADOWCON);
+-}
+-
+ static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
+ {
+ 	struct decon_context *ctx = crtc->ctx;
+ 	int i;
+ 
+-	if (ctx->suspended)
+-		return;
+-
+ 	for (i = 0; i < WINDOWS_NR; i++)
+ 		decon_shadow_protect_win(ctx, i, true);
+ }
+@@ -392,9 +382,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
+ 	unsigned int cpp = fb->format->cpp[0];
+ 	unsigned int pitch = fb->pitches[0];
+ 
+-	if (ctx->suspended)
+-		return;
+-
+ 	/*
+ 	 * SHADOWCON/PRTCON register is used for enabling timing.
+ 	 *
+@@ -482,9 +469,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+ 	unsigned int win = plane->index;
+ 	u32 val;
+ 
+-	if (ctx->suspended)
+-		return;
+-
+ 	/* protect windows */
+ 	decon_shadow_protect_win(ctx, win, true);
+ 
+@@ -503,9 +487,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
+ 	struct decon_context *ctx = crtc->ctx;
+ 	int i;
+ 
+-	if (ctx->suspended)
+-		return;
+-
+ 	for (i = 0; i < WINDOWS_NR; i++)
+ 		decon_shadow_protect_win(ctx, i, false);
+ 	exynos_crtc_handle_event(crtc);
+@@ -533,9 +514,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
+ 	struct decon_context *ctx = crtc->ctx;
+ 	int ret;
+ 
+-	if (!ctx->suspended)
+-		return;
+-
+ 	ret = pm_runtime_resume_and_get(ctx->dev);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
+@@ -549,8 +527,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
+ 		decon_enable_vblank(ctx->crtc);
+ 
+ 	decon_commit(ctx->crtc);
+-
+-	ctx->suspended = false;
+ }
+ 
+ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
+@@ -558,9 +534,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
+ 	struct decon_context *ctx = crtc->ctx;
+ 	int i;
+ 
+-	if (ctx->suspended)
+-		return;
+-
+ 	/*
+ 	 * We need to make sure that all windows are disabled before we
+ 	 * suspend that connector. Otherwise we might try to scan from
+@@ -570,8 +543,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
+ 		decon_disable_plane(crtc, &ctx->planes[i]);
+ 
+ 	pm_runtime_put_sync(ctx->dev);
+-
+-	ctx->suspended = true;
+ }
+ 
+ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
+@@ -692,7 +663,6 @@ static int decon_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	ctx->dev = dev;
+-	ctx->suspended = true;
+ 
+ 	i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
+ 	if (i80_if_timings)
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+index 0d5197c0824a91..5cf3a516ccfb38 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+@@ -1324,9 +1324,16 @@ static int ct_receive(struct intel_guc_ct *ct)
+ 
+ static void ct_try_receive_message(struct intel_guc_ct *ct)
+ {
++	struct intel_guc *guc = ct_to_guc(ct);
+ 	int ret;
+ 
+-	if (GEM_WARN_ON(!ct->enabled))
++	if (!ct->enabled) {
++		GEM_WARN_ON(!guc_to_gt(guc)->uc.reset_in_progress);
++		return;
++	}
++
++	/* When interrupt disabled, message handling is not expected */
++	if (!guc->interrupts.enabled)
+ 		return;
+ 
+ 	ret = ct_receive(ct);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index 67fa528f546d33..8609fa38058ea0 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -236,6 +236,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
+ 	if (ret)
+ 		DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
+ 
++	set_bit(GMU_STATUS_FW_START, &gmu->status);
++
+ 	return ret;
+ }
+ 
+@@ -482,6 +484,9 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
+ 	int ret;
+ 	u32 val;
+ 
++	if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status))
++		return 0;
++
+ 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
+ 
+ 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+@@ -509,6 +514,9 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
+ 	int ret;
+ 	u32 val;
+ 
++	if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status))
++		return;
++
+ 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
+ 
+ 	ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+@@ -517,6 +525,8 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
+ 		DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
+ 
+ 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
++
++	set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status);
+ }
+ 
+ static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
+@@ -645,8 +655,6 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
+ 	/* ensure no writes happen before the uCode is fully written */
+ 	wmb();
+ 
+-	a6xx_rpmh_stop(gmu);
+-
+ err:
+ 	if (!IS_ERR_OR_NULL(pdcptr))
+ 		iounmap(pdcptr);
+@@ -799,19 +807,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
+ 	else
+ 		gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+ 
+-	if (state == GMU_WARM_BOOT) {
+-		ret = a6xx_rpmh_start(gmu);
+-		if (ret)
+-			return ret;
+-	} else {
++	ret = a6xx_rpmh_start(gmu);
++	if (ret)
++		return ret;
++
++	if (state == GMU_COLD_BOOT) {
+ 		if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
+ 			"GMU firmware is not loaded\n"))
+ 			return -ENOENT;
+ 
+-		ret = a6xx_rpmh_start(gmu);
+-		if (ret)
+-			return ret;
+-
+ 		ret = a6xx_gmu_fw_load(gmu);
+ 		if (ret)
+ 			return ret;
+@@ -980,6 +984,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+ 
+ 	/* Reset GPU core blocks */
+ 	a6xx_gpu_sw_reset(gpu, true);
++
++	a6xx_rpmh_stop(gmu);
+ }
+ 
+ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+index 94b6c5cab6f435..db5b3b13e7435a 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+@@ -99,6 +99,12 @@ struct a6xx_gmu {
+ 	struct completion pd_gate;
+ 
+ 	struct qmp *qmp;
++
++/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */
++#define GMU_STATUS_FW_START	0
++/* To track if PDC sleep seq was done */
++#define GMU_STATUS_PDC_SLEEP	1
++	unsigned long status;
+ };
+ 
+ static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
+diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
+index 4e2d3a02ea0689..cdd6e1c08cebdb 100644
+--- a/drivers/gpu/drm/panthor/panthor_fw.c
++++ b/drivers/gpu/drm/panthor/panthor_fw.c
+@@ -1057,6 +1057,7 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
+ 	}
+ 
+ 	panthor_job_irq_suspend(&ptdev->fw->irq);
++	panthor_fw_stop(ptdev);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 5d7df4c3b08c47..a551458ad43404 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -1118,7 +1118,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
+ 		return format;
+ 
+ 	if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 ||
+-	    drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) {
++	    drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) {
+ 		drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",
+ 			drm_rect_width(src) >> 16, drm_rect_height(src) >> 16,
+ 			drm_rect_width(dest), drm_rect_height(dest));
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 416590ea0dc3d6..d5260cb1ed0ec6 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -952,13 +952,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
+ 	dma_resv_assert_held(resv);
+ 
+ 	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
+-		/* Make sure to grab an additional ref on the added fence */
+-		dma_fence_get(fence);
+-		ret = drm_sched_job_add_dependency(job, fence);
+-		if (ret) {
+-			dma_fence_put(fence);
++		/*
++		 * As drm_sched_job_add_dependency always consumes the fence
++		 * reference (even when it fails), and dma_resv_for_each_fence
++		 * is not obtaining one, we need to grab one before calling.
++		 */
++		ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
++		if (ret)
+ 			return ret;
+-		}
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index cf6946424fc357..03d674e9e80753 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -41,6 +41,7 @@
+ #include "xe_ring_ops_types.h"
+ #include "xe_sched_job.h"
+ #include "xe_trace.h"
++#include "xe_uc_fw.h"
+ #include "xe_vm.h"
+ 
+ static struct xe_guc *
+@@ -1285,7 +1286,17 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
+ 	xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
+ 	trace_xe_exec_queue_cleanup_entity(q);
+ 
+-	if (exec_queue_registered(q))
++	/*
++	 * Expected state transitions for cleanup:
++	 * - If the exec queue is registered and GuC firmware is running, we must first
++	 *   disable scheduling and deregister the queue to ensure proper teardown and
++	 *   resource release in the GuC, then destroy the exec queue on driver side.
++	 * - If the GuC is already stopped (e.g., during driver unload or GPU reset),
++	 *   we cannot expect a response for the deregister request. In this case,
++	 *   it is safe to directly destroy the exec queue on driver side, as the GuC
++	 *   will not process further requests and all resources must be cleaned up locally.
++	 */
++	if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw))
+ 		disable_scheduling_deregister(guc, q);
+ 	else
+ 		__guc_exec_queue_fini(guc, q);
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index f5c217ac4bfaa7..f073d5621050a1 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -622,7 +622,10 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
+ 		return;
+ 	}
+ 
+-	if (value == 0 || value < dev->battery_min || value > dev->battery_max)
++	if ((usage & HID_USAGE_PAGE) == HID_UP_DIGITIZER && value == 0)
++		return;
++
++	if (value < dev->battery_min || value > dev->battery_max)
+ 		return;
+ 
+ 	capacity = hidinput_scale_battery_capacity(dev, value);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 5c424010bc025c..0e4cb0e668eb5c 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -83,9 +83,8 @@ enum latency_mode {
+ 	HID_LATENCY_HIGH = 1,
+ };
+ 
+-#define MT_IO_FLAGS_RUNNING		0
+-#define MT_IO_FLAGS_ACTIVE_SLOTS	1
+-#define MT_IO_FLAGS_PENDING_SLOTS	2
++#define MT_IO_SLOTS_MASK		GENMASK(7, 0) /* reserve first 8 bits for slot tracking */
++#define MT_IO_FLAGS_RUNNING		32
+ 
+ static const bool mtrue = true;		/* default for true */
+ static const bool mfalse;		/* default for false */
+@@ -160,7 +159,11 @@ struct mt_device {
+ 	struct mt_class mtclass;	/* our mt device class */
+ 	struct timer_list release_timer;	/* to release sticky fingers */
+ 	struct hid_device *hdev;	/* hid_device we're attached to */
+-	unsigned long mt_io_flags;	/* mt flags (MT_IO_FLAGS_*) */
++	unsigned long mt_io_flags;	/* mt flags (MT_IO_FLAGS_RUNNING)
++					 * first 8 bits are reserved for keeping the slot
++					 * states, this is fine because we only support up
++					 * to 250 slots (MT_MAX_MAXCONTACT)
++					 */
+ 	__u8 inputmode_value;	/* InputMode HID feature value */
+ 	__u8 maxcontacts;
+ 	bool is_buttonpad;	/* is this device a button pad? */
+@@ -941,6 +944,7 @@ static void mt_release_pending_palms(struct mt_device *td,
+ 
+ 	for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) {
+ 		clear_bit(slotnum, app->pending_palm_slots);
++		clear_bit(slotnum, &td->mt_io_flags);
+ 
+ 		input_mt_slot(input, slotnum);
+ 		input_mt_report_slot_inactive(input);
+@@ -972,12 +976,6 @@ static void mt_sync_frame(struct mt_device *td, struct mt_application *app,
+ 
+ 	app->num_received = 0;
+ 	app->left_button_state = 0;
+-
+-	if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
+-		set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
+-	else
+-		clear_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
+-	clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
+ }
+ 
+ static int mt_compute_timestamp(struct mt_application *app, __s32 value)
+@@ -1152,7 +1150,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 		input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
+ 		input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
+ 
+-		set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
++		set_bit(slotnum, &td->mt_io_flags);
++	} else {
++		clear_bit(slotnum, &td->mt_io_flags);
+ 	}
+ 
+ 	return 0;
+@@ -1287,7 +1287,7 @@ static void mt_touch_report(struct hid_device *hid,
+ 	 * defect.
+ 	 */
+ 	if (app->quirks & MT_QUIRK_STICKY_FINGERS) {
+-		if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
++		if (td->mt_io_flags & MT_IO_SLOTS_MASK)
+ 			mod_timer(&td->release_timer,
+ 				  jiffies + msecs_to_jiffies(100));
+ 		else
+@@ -1663,6 +1663,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ 	case HID_CP_CONSUMER_CONTROL:
+ 	case HID_GD_WIRELESS_RADIO_CTLS:
+ 	case HID_GD_SYSTEM_MULTIAXIS:
++	case HID_DG_PEN:
+ 		/* already handled by hid core */
+ 		break;
+ 	case HID_DG_TOUCHSCREEN:
+@@ -1734,6 +1735,7 @@ static void mt_release_contacts(struct hid_device *hid)
+ 			for (i = 0; i < mt->num_slots; i++) {
+ 				input_mt_slot(input_dev, i);
+ 				input_mt_report_slot_inactive(input_dev);
++				clear_bit(i, &td->mt_io_flags);
+ 			}
+ 			input_mt_sync_frame(input_dev);
+ 			input_sync(input_dev);
+@@ -1756,7 +1758,7 @@ static void mt_expired_timeout(struct timer_list *t)
+ 	 */
+ 	if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
+ 		return;
+-	if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
++	if (td->mt_io_flags & MT_IO_SLOTS_MASK)
+ 		mt_release_contacts(hdev);
+ 	clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
+ }
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+index 790bc8fbf21da3..9f88d8ca6d1b1f 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -667,20 +667,12 @@ static void inv_icm42600_disable_vdd_reg(void *_data)
+ static void inv_icm42600_disable_vddio_reg(void *_data)
+ {
+ 	struct inv_icm42600_state *st = _data;
+-	const struct device *dev = regmap_get_device(st->map);
+-	int ret;
+-
+-	ret = regulator_disable(st->vddio_supply);
+-	if (ret)
+-		dev_err(dev, "failed to disable vddio error %d\n", ret);
+-}
++	struct device *dev = regmap_get_device(st->map);
+ 
+-static void inv_icm42600_disable_pm(void *_data)
+-{
+-	struct device *dev = _data;
++	if (pm_runtime_status_suspended(dev))
++		return;
+ 
+-	pm_runtime_put_sync(dev);
+-	pm_runtime_disable(dev);
++	regulator_disable(st->vddio_supply);
+ }
+ 
+ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
+@@ -777,16 +769,14 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
+ 		return ret;
+ 
+ 	/* setup runtime power management */
+-	ret = pm_runtime_set_active(dev);
++	ret = devm_pm_runtime_set_active_enabled(dev);
+ 	if (ret)
+ 		return ret;
+-	pm_runtime_get_noresume(dev);
+-	pm_runtime_enable(dev);
++
+ 	pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS);
+ 	pm_runtime_use_autosuspend(dev);
+-	pm_runtime_put(dev);
+ 
+-	return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev);
++	return ret;
+ }
+ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600);
+ 
+@@ -797,17 +787,15 @@ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600);
+ static int inv_icm42600_suspend(struct device *dev)
+ {
+ 	struct inv_icm42600_state *st = dev_get_drvdata(dev);
+-	int ret;
++	int ret = 0;
+ 
+ 	mutex_lock(&st->lock);
+ 
+ 	st->suspended.gyro = st->conf.gyro.mode;
+ 	st->suspended.accel = st->conf.accel.mode;
+ 	st->suspended.temp = st->conf.temp_en;
+-	if (pm_runtime_suspended(dev)) {
+-		ret = 0;
++	if (pm_runtime_suspended(dev))
+ 		goto out_unlock;
+-	}
+ 
+ 	/* disable FIFO data streaming */
+ 	if (st->fifo.on) {
+@@ -839,10 +827,13 @@ static int inv_icm42600_resume(struct device *dev)
+ 	struct inv_icm42600_state *st = dev_get_drvdata(dev);
+ 	struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ 	struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
+-	int ret;
++	int ret = 0;
+ 
+ 	mutex_lock(&st->lock);
+ 
++	if (pm_runtime_suspended(dev))
++		goto out_unlock;
++
+ 	ret = inv_icm42600_enable_regulator_vddio(st);
+ 	if (ret)
+ 		goto out_unlock;
+diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
+index 369aed044b409f..d733ebee624b7e 100644
+--- a/drivers/md/md-linear.c
++++ b/drivers/md/md-linear.c
+@@ -267,6 +267,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
+ 		}
+ 
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		submit_bio_noacct(bio);
+ 		bio = split;
+ 	}
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 31bea72bcb01ad..db1ab214250f91 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -464,7 +464,15 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+ 		struct bio *split = bio_split(bio,
+ 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
+ 			&mddev->bio_set);
++
++		if (IS_ERR(split)) {
++			bio->bi_status = errno_to_blk_status(PTR_ERR(split));
++			bio_endio(bio);
++			return;
++		}
++
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		submit_bio_noacct(bio);
+ 		bio = split;
+ 		end = zone->zone_end;
+@@ -606,7 +614,15 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ 	if (sectors < bio_sectors(bio)) {
+ 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
+ 					      &mddev->bio_set);
++
++		if (IS_ERR(split)) {
++			bio->bi_status = errno_to_blk_status(PTR_ERR(split));
++			bio_endio(bio);
++			return true;
++		}
++
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		raid0_map_submit_bio(mddev, bio);
+ 		bio = split;
+ 	}
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index faccf7344ef933..4c6b1bd6da9bb1 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1317,7 +1317,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ 	struct raid1_info *mirror;
+ 	struct bio *read_bio;
+ 	int max_sectors;
+-	int rdisk;
++	int rdisk, error;
+ 	bool r1bio_existed = !!r1_bio;
+ 
+ 	/*
+@@ -1378,7 +1378,14 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ 	if (max_sectors < bio_sectors(bio)) {
+ 		struct bio *split = bio_split(bio, max_sectors,
+ 					      gfp, &conf->bio_split);
++
++		if (IS_ERR(split)) {
++			error = PTR_ERR(split);
++			goto err_handle;
++		}
++
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		submit_bio_noacct(bio);
+ 		bio = split;
+ 		r1_bio->master_bio = bio;
+@@ -1404,6 +1411,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ 	read_bio->bi_private = r1_bio;
+ 	mddev_trace_remap(mddev, read_bio, r1_bio->sector);
+ 	submit_bio_noacct(read_bio);
++	return;
++
++err_handle:
++	atomic_dec(&mirror->rdev->nr_pending);
++	bio->bi_status = errno_to_blk_status(error);
++	set_bit(R1BIO_Uptodate, &r1_bio->state);
++	raid_end_bio_io(r1_bio);
+ }
+ 
+ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+@@ -1411,7 +1425,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ {
+ 	struct r1conf *conf = mddev->private;
+ 	struct r1bio *r1_bio;
+-	int i, disks;
++	int i, disks, k, error;
+ 	unsigned long flags;
+ 	struct md_rdev *blocked_rdev;
+ 	int first_clone;
+@@ -1557,7 +1571,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 	if (max_sectors < bio_sectors(bio)) {
+ 		struct bio *split = bio_split(bio, max_sectors,
+ 					      GFP_NOIO, &conf->bio_split);
++
++		if (IS_ERR(split)) {
++			error = PTR_ERR(split);
++			goto err_handle;
++		}
++
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		submit_bio_noacct(bio);
+ 		bio = split;
+ 		r1_bio->master_bio = bio;
+@@ -1640,6 +1661,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 
+ 	/* In case raid1d snuck in to freeze_array */
+ 	wake_up_barrier(conf);
++	return;
++err_handle:
++	for (k = 0; k < i; k++) {
++		if (r1_bio->bios[k]) {
++			rdev_dec_pending(conf->mirrors[k].rdev, mddev);
++			r1_bio->bios[k] = NULL;
++		}
++	}
++
++	bio->bi_status = errno_to_blk_status(error);
++	set_bit(R1BIO_Uptodate, &r1_bio->state);
++	raid_end_bio_io(r1_bio);
+ }
+ 
+ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 6579bbb6a39a5d..b0062ad9b1d95f 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1153,6 +1153,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ 	int slot = r10_bio->read_slot;
+ 	struct md_rdev *err_rdev = NULL;
+ 	gfp_t gfp = GFP_NOIO;
++	int error;
+ 
+ 	if (slot >= 0 && r10_bio->devs[slot].rdev) {
+ 		/*
+@@ -1203,7 +1204,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ 	if (max_sectors < bio_sectors(bio)) {
+ 		struct bio *split = bio_split(bio, max_sectors,
+ 					      gfp, &conf->bio_split);
++		if (IS_ERR(split)) {
++			error = PTR_ERR(split);
++			goto err_handle;
++		}
++
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		allow_barrier(conf);
+ 		submit_bio_noacct(bio);
+ 		wait_barrier(conf, false);
+@@ -1233,6 +1240,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ 	mddev_trace_remap(mddev, read_bio, r10_bio->sector);
+ 	submit_bio_noacct(read_bio);
+ 	return;
++err_handle:
++	atomic_dec(&rdev->nr_pending);
++	bio->bi_status = errno_to_blk_status(error);
++	set_bit(R10BIO_Uptodate, &r10_bio->state);
++	raid_end_bio_io(r10_bio);
+ }
+ 
+ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+@@ -1341,9 +1353,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 				 struct r10bio *r10_bio)
+ {
+ 	struct r10conf *conf = mddev->private;
+-	int i;
++	int i, k;
+ 	sector_t sectors;
+ 	int max_sectors;
++	int error;
+ 
+ 	if ((mddev_is_clustered(mddev) &&
+ 	     md_cluster_ops->area_resyncing(mddev, WRITE,
+@@ -1469,7 +1482,13 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 	if (r10_bio->sectors < bio_sectors(bio)) {
+ 		struct bio *split = bio_split(bio, r10_bio->sectors,
+ 					      GFP_NOIO, &conf->bio_split);
++		if (IS_ERR(split)) {
++			error = PTR_ERR(split);
++			goto err_handle;
++		}
++
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		allow_barrier(conf);
+ 		submit_bio_noacct(bio);
+ 		wait_barrier(conf, false);
+@@ -1488,6 +1507,26 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
+ 	}
+ 	one_write_done(r10_bio);
++	return;
++err_handle:
++	for (k = 0;  k < i; k++) {
++		int d = r10_bio->devs[k].devnum;
++		struct md_rdev *rdev = conf->mirrors[d].rdev;
++		struct md_rdev *rrdev = conf->mirrors[d].replacement;
++
++		if (r10_bio->devs[k].bio) {
++			rdev_dec_pending(rdev, mddev);
++			r10_bio->devs[k].bio = NULL;
++		}
++		if (r10_bio->devs[k].repl_bio) {
++			rdev_dec_pending(rrdev, mddev);
++			r10_bio->devs[k].repl_bio = NULL;
++		}
++	}
++
++	bio->bi_status = errno_to_blk_status(error);
++	set_bit(R10BIO_Uptodate, &r10_bio->state);
++	raid_end_bio_io(r10_bio);
+ }
+ 
+ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
+@@ -1629,7 +1668,14 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	if (remainder) {
+ 		split_size = stripe_size - remainder;
+ 		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
++		if (IS_ERR(split)) {
++			bio->bi_status = errno_to_blk_status(PTR_ERR(split));
++			bio_endio(bio);
++			return 0;
++		}
++
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		allow_barrier(conf);
+ 		/* Resend the fist split part */
+ 		submit_bio_noacct(split);
+@@ -1639,7 +1685,14 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	if (remainder) {
+ 		split_size = bio_sectors(bio) - remainder;
+ 		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
++		if (IS_ERR(split)) {
++			bio->bi_status = errno_to_blk_status(PTR_ERR(split));
++			bio_endio(bio);
++			return 0;
++		}
++
+ 		bio_chain(split, bio);
++		trace_block_split(split, bio->bi_iter.bi_sector);
+ 		allow_barrier(conf);
+ 		/* Resend the second split part */
+ 		submit_bio_noacct(bio);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 39e7596e78c0b0..4fae8ade24090f 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5484,8 +5484,10 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
+ 
+ 	if (sectors < bio_sectors(raid_bio)) {
+ 		struct r5conf *conf = mddev->private;
++
+ 		split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
+ 		bio_chain(split, raid_bio);
++		trace_block_split(split, raid_bio->bi_iter.bi_sector);
+ 		submit_bio_noacct(raid_bio);
+ 		raid_bio = split;
+ 	}
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+index 2810ebe9b5f75c..5a4676d5207935 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+@@ -361,7 +361,7 @@ void mxc_isi_channel_get(struct mxc_isi_pipe *pipe);
+ void mxc_isi_channel_put(struct mxc_isi_pipe *pipe);
+ void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe);
+ void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe);
+-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass);
++int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe);
+ void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe);
+ 
+ void mxc_isi_channel_config(struct mxc_isi_pipe *pipe,
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+index 5623914f95e649..9225a7ac1c3ee7 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+@@ -587,7 +587,7 @@ void mxc_isi_channel_release(struct mxc_isi_pipe *pipe)
+  *
+  * TODO: Support secondary line buffer for downscaling YUV420 images.
+  */
+-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass)
++int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe)
+ {
+ 	/* Channel chaining requires both line and output buffer. */
+ 	const u8 resources = MXC_ISI_CHANNEL_RES_OUTPUT_BUF
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+index cd6c52e9d158a7..81223d28ee56e8 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+@@ -43,7 +43,6 @@ struct mxc_isi_m2m_ctx_queue_data {
+ 	struct v4l2_pix_format_mplane format;
+ 	const struct mxc_isi_format_info *info;
+ 	u32 sequence;
+-	bool streaming;
+ };
+ 
+ struct mxc_isi_m2m_ctx {
+@@ -236,6 +235,65 @@ static void mxc_isi_m2m_vb2_buffer_queue(struct vb2_buffer *vb2)
+ 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ }
+ 
++static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q)
++{
++	struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
++	const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
++	const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
++	const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
++	const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
++	struct mxc_isi_m2m *m2m = ctx->m2m;
++	int ret;
++
++	guard(mutex)(&m2m->lock);
++
++	if (m2m->usage_count == INT_MAX)
++		return -EOVERFLOW;
++
++	/*
++	 * Acquire the pipe and initialize the channel with the first user of
++	 * the M2M device.
++	 */
++	if (m2m->usage_count == 0) {
++		bool bypass = cap_pix->width == out_pix->width &&
++			      cap_pix->height == out_pix->height &&
++			      cap_info->encoding == out_info->encoding;
++
++		ret = mxc_isi_channel_acquire(m2m->pipe,
++					      &mxc_isi_m2m_frame_write_done,
++					      bypass);
++		if (ret)
++			return ret;
++
++		mxc_isi_channel_get(m2m->pipe);
++	}
++
++	m2m->usage_count++;
++
++	/*
++	 * Allocate resources for the channel, counting how many users require
++	 * buffer chaining.
++	 */
++	if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
++		ret = mxc_isi_channel_chain(m2m->pipe);
++		if (ret)
++			goto err_deinit;
++
++		m2m->chained_count++;
++		ctx->chained = true;
++	}
++
++	return 0;
++
++err_deinit:
++	if (--m2m->usage_count == 0) {
++		mxc_isi_channel_put(m2m->pipe);
++		mxc_isi_channel_release(m2m->pipe);
++	}
++
++	return ret;
++}
++
+ static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
+ 					   unsigned int count)
+ {
+@@ -265,6 +323,35 @@ static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q)
+ 	}
+ }
+ 
++static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q)
++{
++	struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
++	struct mxc_isi_m2m *m2m = ctx->m2m;
++
++	guard(mutex)(&m2m->lock);
++
++	/*
++	 * If the last context is this one, reset it to make sure the device
++	 * will be reconfigured when streaming is restarted.
++	 */
++	if (m2m->last_ctx == ctx)
++		m2m->last_ctx = NULL;
++
++	/* Free the channel resources if this is the last chained context. */
++	if (ctx->chained && --m2m->chained_count == 0)
++		mxc_isi_channel_unchain(m2m->pipe);
++	ctx->chained = false;
++
++	/* Turn off the light with the last user. */
++	if (--m2m->usage_count == 0) {
++		mxc_isi_channel_disable(m2m->pipe);
++		mxc_isi_channel_put(m2m->pipe);
++		mxc_isi_channel_release(m2m->pipe);
++	}
++
++	WARN_ON(m2m->usage_count < 0);
++}
++
+ static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
+ 	.queue_setup		= mxc_isi_m2m_vb2_queue_setup,
+ 	.buf_init		= mxc_isi_m2m_vb2_buffer_init,
+@@ -272,8 +359,10 @@ static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
+ 	.buf_queue		= mxc_isi_m2m_vb2_buffer_queue,
+ 	.wait_prepare		= vb2_ops_wait_prepare,
+ 	.wait_finish		= vb2_ops_wait_finish,
++	.prepare_streaming	= mxc_isi_m2m_vb2_prepare_streaming,
+ 	.start_streaming	= mxc_isi_m2m_vb2_start_streaming,
+ 	.stop_streaming		= mxc_isi_m2m_vb2_stop_streaming,
++	.unprepare_streaming	= mxc_isi_m2m_vb2_unprepare_streaming,
+ };
+ 
+ static int mxc_isi_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+@@ -483,136 +572,6 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh,
+ 	return 0;
+ }
+ 
+-static int mxc_isi_m2m_streamon(struct file *file, void *fh,
+-				enum v4l2_buf_type type)
+-{
+-	struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+-	struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
+-	const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
+-	const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
+-	const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
+-	const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
+-	struct mxc_isi_m2m *m2m = ctx->m2m;
+-	bool bypass;
+-	int ret;
+-
+-	if (q->streaming)
+-		return 0;
+-
+-	mutex_lock(&m2m->lock);
+-
+-	if (m2m->usage_count == INT_MAX) {
+-		ret = -EOVERFLOW;
+-		goto unlock;
+-	}
+-
+-	bypass = cap_pix->width == out_pix->width &&
+-		 cap_pix->height == out_pix->height &&
+-		 cap_info->encoding == out_info->encoding;
+-
+-	/*
+-	 * Acquire the pipe and initialize the channel with the first user of
+-	 * the M2M device.
+-	 */
+-	if (m2m->usage_count == 0) {
+-		ret = mxc_isi_channel_acquire(m2m->pipe,
+-					      &mxc_isi_m2m_frame_write_done,
+-					      bypass);
+-		if (ret)
+-			goto unlock;
+-
+-		mxc_isi_channel_get(m2m->pipe);
+-	}
+-
+-	m2m->usage_count++;
+-
+-	/*
+-	 * Allocate resources for the channel, counting how many users require
+-	 * buffer chaining.
+-	 */
+-	if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
+-		ret = mxc_isi_channel_chain(m2m->pipe, bypass);
+-		if (ret)
+-			goto deinit;
+-
+-		m2m->chained_count++;
+-		ctx->chained = true;
+-	}
+-
+-	/*
+-	 * Drop the lock to start the stream, as the .device_run() operation
+-	 * needs to acquire it.
+-	 */
+-	mutex_unlock(&m2m->lock);
+-	ret = v4l2_m2m_ioctl_streamon(file, fh, type);
+-	if (ret) {
+-		/* Reacquire the lock for the cleanup path. */
+-		mutex_lock(&m2m->lock);
+-		goto unchain;
+-	}
+-
+-	q->streaming = true;
+-
+-	return 0;
+-
+-unchain:
+-	if (ctx->chained && --m2m->chained_count == 0)
+-		mxc_isi_channel_unchain(m2m->pipe);
+-	ctx->chained = false;
+-
+-deinit:
+-	if (--m2m->usage_count == 0) {
+-		mxc_isi_channel_put(m2m->pipe);
+-		mxc_isi_channel_release(m2m->pipe);
+-	}
+-
+-unlock:
+-	mutex_unlock(&m2m->lock);
+-	return ret;
+-}
+-
+-static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
+-				 enum v4l2_buf_type type)
+-{
+-	struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+-	struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
+-	struct mxc_isi_m2m *m2m = ctx->m2m;
+-
+-	v4l2_m2m_ioctl_streamoff(file, fh, type);
+-
+-	if (!q->streaming)
+-		return 0;
+-
+-	mutex_lock(&m2m->lock);
+-
+-	/*
+-	 * If the last context is this one, reset it to make sure the device
+-	 * will be reconfigured when streaming is restarted.
+-	 */
+-	if (m2m->last_ctx == ctx)
+-		m2m->last_ctx = NULL;
+-
+-	/* Free the channel resources if this is the last chained context. */
+-	if (ctx->chained && --m2m->chained_count == 0)
+-		mxc_isi_channel_unchain(m2m->pipe);
+-	ctx->chained = false;
+-
+-	/* Turn off the light with the last user. */
+-	if (--m2m->usage_count == 0) {
+-		mxc_isi_channel_disable(m2m->pipe);
+-		mxc_isi_channel_put(m2m->pipe);
+-		mxc_isi_channel_release(m2m->pipe);
+-	}
+-
+-	WARN_ON(m2m->usage_count < 0);
+-
+-	mutex_unlock(&m2m->lock);
+-
+-	q->streaming = false;
+-
+-	return 0;
+-}
+-
+ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
+ 	.vidioc_querycap		= mxc_isi_m2m_querycap,
+ 
+@@ -633,8 +592,8 @@ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
+ 	.vidioc_prepare_buf		= v4l2_m2m_ioctl_prepare_buf,
+ 	.vidioc_create_bufs		= v4l2_m2m_ioctl_create_bufs,
+ 
+-	.vidioc_streamon		= mxc_isi_m2m_streamon,
+-	.vidioc_streamoff		= mxc_isi_m2m_streamoff,
++	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
++	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
+ 
+ 	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+ 	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
+index d76eb58deb096b..a41c51dd9ce0f2 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
+@@ -855,7 +855,7 @@ int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe,
+ 
+ 	/* Chain the channel if needed for wide resolutions. */
+ 	if (sink_fmt->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
+-		ret = mxc_isi_channel_chain(pipe, bypass);
++		ret = mxc_isi_channel_chain(pipe);
+ 		if (ret)
+ 			mxc_isi_channel_release(pipe);
+ 	}
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index dbcf17fb3ef256..f31a91ec7a6d08 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -812,6 +812,9 @@ static int m_can_handle_state_change(struct net_device *dev,
+ 	u32 timestamp = 0;
+ 
+ 	switch (new_state) {
++	case CAN_STATE_ERROR_ACTIVE:
++		cdev->can.state = CAN_STATE_ERROR_ACTIVE;
++		break;
+ 	case CAN_STATE_ERROR_WARNING:
+ 		/* error warning state */
+ 		cdev->can.can_stats.error_warning++;
+@@ -841,6 +844,12 @@ static int m_can_handle_state_change(struct net_device *dev,
+ 	__m_can_get_berr_counter(dev, &bec);
+ 
+ 	switch (new_state) {
++	case CAN_STATE_ERROR_ACTIVE:
++		cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
++		cf->data[1] = CAN_ERR_CRTL_ACTIVE;
++		cf->data[6] = bec.txerr;
++		cf->data[7] = bec.rxerr;
++		break;
+ 	case CAN_STATE_ERROR_WARNING:
+ 		/* error warning state */
+ 		cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+@@ -877,30 +886,33 @@ static int m_can_handle_state_change(struct net_device *dev,
+ 	return 1;
+ }
+ 
+-static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
++static enum can_state
++m_can_state_get_by_psr(struct m_can_classdev *cdev)
+ {
+-	struct m_can_classdev *cdev = netdev_priv(dev);
+-	int work_done = 0;
++	u32 reg_psr;
+ 
+-	if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
+-		netdev_dbg(dev, "entered error warning state\n");
+-		work_done += m_can_handle_state_change(dev,
+-						       CAN_STATE_ERROR_WARNING);
+-	}
++	reg_psr = m_can_read(cdev, M_CAN_PSR);
+ 
+-	if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
+-		netdev_dbg(dev, "entered error passive state\n");
+-		work_done += m_can_handle_state_change(dev,
+-						       CAN_STATE_ERROR_PASSIVE);
+-	}
++	if (reg_psr & PSR_BO)
++		return CAN_STATE_BUS_OFF;
++	if (reg_psr & PSR_EP)
++		return CAN_STATE_ERROR_PASSIVE;
++	if (reg_psr & PSR_EW)
++		return CAN_STATE_ERROR_WARNING;
+ 
+-	if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
+-		netdev_dbg(dev, "entered error bus off state\n");
+-		work_done += m_can_handle_state_change(dev,
+-						       CAN_STATE_BUS_OFF);
+-	}
++	return CAN_STATE_ERROR_ACTIVE;
++}
+ 
+-	return work_done;
++static int m_can_handle_state_errors(struct net_device *dev)
++{
++	struct m_can_classdev *cdev = netdev_priv(dev);
++	enum can_state new_state;
++
++	new_state = m_can_state_get_by_psr(cdev);
++	if (new_state == cdev->can.state)
++		return 0;
++
++	return m_can_handle_state_change(dev, new_state);
+ }
+ 
+ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
+@@ -1031,8 +1043,7 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
+ 	}
+ 
+ 	if (irqstatus & IR_ERR_STATE)
+-		work_done += m_can_handle_state_errors(dev,
+-						       m_can_read(cdev, M_CAN_PSR));
++		work_done += m_can_handle_state_errors(dev);
+ 
+ 	if (irqstatus & IR_ERR_BUS_30X)
+ 		work_done += m_can_handle_bus_errors(dev, irqstatus,
+@@ -1606,7 +1617,7 @@ static int m_can_start(struct net_device *dev)
+ 	netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
+ 				       cdev->tx_max_coalesced_frames);
+ 
+-	cdev->can.state = CAN_STATE_ERROR_ACTIVE;
++	cdev->can.state = m_can_state_get_by_psr(cdev);
+ 
+ 	m_can_enable_all_interrupts(cdev);
+ 
+@@ -1785,6 +1796,13 @@ static void m_can_stop(struct net_device *dev)
+ 
+ 	/* set the state as STOPPED */
+ 	cdev->can.state = CAN_STATE_STOPPED;
++
++	if (cdev->ops->deinit) {
++		ret = cdev->ops->deinit(cdev);
++		if (ret)
++			netdev_err(dev, "failed to deinitialize: %pe\n",
++				   ERR_PTR(ret));
++	}
+ }
+ 
+ static int m_can_close(struct net_device *dev)
+@@ -2467,6 +2485,7 @@ int m_can_class_suspend(struct device *dev)
+ {
+ 	struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ 	struct net_device *ndev = cdev->net;
++	int ret = 0;
+ 
+ 	if (netif_running(ndev)) {
+ 		netif_stop_queue(ndev);
+@@ -2479,18 +2498,20 @@ int m_can_class_suspend(struct device *dev)
+ 		if (cdev->pm_wake_source) {
+ 			hrtimer_cancel(&cdev->hrtimer);
+ 			m_can_write(cdev, M_CAN_IE, IR_RF0N);
++
++			if (cdev->ops->deinit)
++				ret = cdev->ops->deinit(cdev);
+ 		} else {
+ 			m_can_stop(ndev);
+ 		}
+ 
+ 		m_can_clk_stop(cdev);
++		cdev->can.state = CAN_STATE_SLEEPING;
+ 	}
+ 
+ 	pinctrl_pm_select_sleep_state(dev);
+ 
+-	cdev->can.state = CAN_STATE_SLEEPING;
+-
+-	return 0;
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(m_can_class_suspend);
+ 
+@@ -2498,14 +2519,11 @@ int m_can_class_resume(struct device *dev)
+ {
+ 	struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ 	struct net_device *ndev = cdev->net;
++	int ret = 0;
+ 
+ 	pinctrl_pm_select_default_state(dev);
+ 
+-	cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+-
+ 	if (netif_running(ndev)) {
+-		int ret;
+-
+ 		ret = m_can_clk_start(cdev);
+ 		if (ret)
+ 			return ret;
+@@ -2518,6 +2536,12 @@ int m_can_class_resume(struct device *dev)
+ 			 * again.
+ 			 */
+ 			cdev->active_interrupts |= IR_RF0N | IR_TEFN;
++
++			if (cdev->ops->init)
++				ret = cdev->ops->init(cdev);
++
++			cdev->can.state = m_can_state_get_by_psr(cdev);
++
+ 			m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
+ 		} else {
+ 			ret  = m_can_start(ndev);
+@@ -2531,7 +2555,7 @@ int m_can_class_resume(struct device *dev)
+ 		netif_start_queue(ndev);
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(m_can_class_resume);
+ 
+diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
+index ef39e8e527ab67..bd4746c63af3f0 100644
+--- a/drivers/net/can/m_can/m_can.h
++++ b/drivers/net/can/m_can/m_can.h
+@@ -68,6 +68,7 @@ struct m_can_ops {
+ 	int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
+ 			  const void *val, size_t val_count);
+ 	int (*init)(struct m_can_classdev *cdev);
++	int (*deinit)(struct m_can_classdev *cdev);
+ };
+ 
+ struct m_can_tx_op {
+diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
+index b832566efda042..057eaa7b8b4b29 100644
+--- a/drivers/net/can/m_can/m_can_platform.c
++++ b/drivers/net/can/m_can/m_can_platform.c
+@@ -180,7 +180,7 @@ static void m_can_plat_remove(struct platform_device *pdev)
+ 	struct m_can_classdev *mcan_class = &priv->cdev;
+ 
+ 	m_can_class_unregister(mcan_class);
+-
++	pm_runtime_disable(mcan_class->dev);
+ 	m_can_class_free_dev(mcan_class->net);
+ }
+ 
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index b6f4de375df75d..fb904dc28b8eea 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -286,11 +286,6 @@ struct gs_host_frame {
+ #define GS_MAX_RX_URBS 30
+ #define GS_NAPI_WEIGHT 32
+ 
+-/* Maximum number of interfaces the driver supports per device.
+- * Current hardware only supports 3 interfaces. The future may vary.
+- */
+-#define GS_MAX_INTF 3
+-
+ struct gs_tx_context {
+ 	struct gs_can *dev;
+ 	unsigned int echo_id;
+@@ -321,7 +316,6 @@ struct gs_can {
+ 
+ /* usb interface struct */
+ struct gs_usb {
+-	struct gs_can *canch[GS_MAX_INTF];
+ 	struct usb_anchor rx_submitted;
+ 	struct usb_device *udev;
+ 
+@@ -333,9 +327,11 @@ struct gs_usb {
+ 
+ 	unsigned int hf_size_rx;
+ 	u8 active_channels;
++	u8 channel_cnt;
+ 
+ 	unsigned int pipe_in;
+ 	unsigned int pipe_out;
++	struct gs_can *canch[] __counted_by(channel_cnt);
+ };
+ 
+ /* 'allocate' a tx context.
+@@ -596,7 +592,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 	}
+ 
+ 	/* device reports out of range channel id */
+-	if (hf->channel >= GS_MAX_INTF)
++	if (hf->channel >= parent->channel_cnt)
+ 		goto device_detach;
+ 
+ 	dev = parent->canch[hf->channel];
+@@ -696,7 +692,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 	/* USB failure take down all interfaces */
+ 	if (rc == -ENODEV) {
+ device_detach:
+-		for (rc = 0; rc < GS_MAX_INTF; rc++) {
++		for (rc = 0; rc < parent->channel_cnt; rc++) {
+ 			if (parent->canch[rc])
+ 				netif_device_detach(parent->canch[rc]->netdev);
+ 		}
+@@ -1246,6 +1242,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
+ 
+ 	netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+ 	netdev->dev_id = channel;
++	netdev->dev_port = channel;
+ 
+ 	/* dev setup */
+ 	strcpy(dev->bt_const.name, KBUILD_MODNAME);
+@@ -1457,17 +1454,19 @@ static int gs_usb_probe(struct usb_interface *intf,
+ 	icount = dconf.icount + 1;
+ 	dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
+ 
+-	if (icount > GS_MAX_INTF) {
++	if (icount > type_max(parent->channel_cnt)) {
+ 		dev_err(&intf->dev,
+ 			"Driver cannot handle more that %u CAN interfaces\n",
+-			GS_MAX_INTF);
++			type_max(parent->channel_cnt));
+ 		return -EINVAL;
+ 	}
+ 
+-	parent = kzalloc(sizeof(*parent), GFP_KERNEL);
++	parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL);
+ 	if (!parent)
+ 		return -ENOMEM;
+ 
++	parent->channel_cnt = icount;
++
+ 	init_usb_anchor(&parent->rx_submitted);
+ 
+ 	usb_set_intfdata(intf, parent);
+@@ -1528,7 +1527,7 @@ static void gs_usb_disconnect(struct usb_interface *intf)
+ 		return;
+ 	}
+ 
+-	for (i = 0; i < GS_MAX_INTF; i++)
++	for (i = 0; i < parent->channel_cnt; i++)
+ 		if (parent->canch[i])
+ 			gs_destroy_candev(parent->canch[i]);
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 8bc49259d71af1..32a6d52614242c 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1172,7 +1172,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
+ 
+ static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+ {
+-	pdata->phy_link = -1;
+ 	pdata->phy_speed = SPEED_UNKNOWN;
+ 
+ 	return pdata->phy_if.phy_reset(pdata);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index ed76a8df6ec6ed..75e9cb3fc7aa66 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1664,6 +1664,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ 		pdata->phy.duplex = DUPLEX_FULL;
+ 	}
+ 
++	pdata->phy_link = 0;
+ 	pdata->phy.link = 0;
+ 
+ 	pdata->phy.pause_autoneg = pdata->pause_autoneg;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 717e110d23c914..dc170feee8ad7c 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -5803,7 +5803,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
+ 	u32 current_speed = SPEED_UNKNOWN;
+ 	u8 current_duplex = DUPLEX_UNKNOWN;
+ 	bool current_link_up = false;
+-	u32 local_adv, remote_adv, sgsr;
++	u32 local_adv = 0, remote_adv = 0, sgsr;
+ 
+ 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
+@@ -5944,9 +5944,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
+ 		else
+ 			current_duplex = DUPLEX_HALF;
+ 
+-		local_adv = 0;
+-		remote_adv = 0;
+-
+ 		if (bmcr & BMCR_ANENABLE) {
+ 			u32 common;
+ 
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index 92856cf387c76f..7c9658a4ec4b5f 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -498,25 +498,34 @@ static int alloc_list(struct net_device *dev)
+ 	for (i = 0; i < RX_RING_SIZE; i++) {
+ 		/* Allocated fixed size of skbuff */
+ 		struct sk_buff *skb;
++		dma_addr_t addr;
+ 
+ 		skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
+ 		np->rx_skbuff[i] = skb;
+-		if (!skb) {
+-			free_list(dev);
+-			return -ENOMEM;
+-		}
++		if (!skb)
++			goto err_free_list;
++
++		addr = dma_map_single(&np->pdev->dev, skb->data,
++				      np->rx_buf_sz, DMA_FROM_DEVICE);
++		if (dma_mapping_error(&np->pdev->dev, addr))
++			goto err_kfree_skb;
+ 
+ 		np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
+ 						((i + 1) % RX_RING_SIZE) *
+ 						sizeof(struct netdev_desc));
+ 		/* Rubicon now supports 40 bits of addressing space. */
+-		np->rx_ring[i].fraginfo =
+-		    cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
+-					       np->rx_buf_sz, DMA_FROM_DEVICE));
++		np->rx_ring[i].fraginfo = cpu_to_le64(addr);
+ 		np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
+ 	}
+ 
+ 	return 0;
++
++err_kfree_skb:
++	dev_kfree_skb(np->rx_skbuff[i]);
++	np->rx_skbuff[i] = NULL;
++err_free_list:
++	free_list(dev);
++	return -ENOMEM;
+ }
+ 
+ static void rio_hw_init(struct net_device *dev)
+diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
+index 5f08779c0e4e31..e177d1d58696aa 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
++++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+ 
+ #ifndef _IXGBEVF_DEFINES_H_
+ #define _IXGBEVF_DEFINES_H_
+@@ -16,6 +16,9 @@
+ #define IXGBE_DEV_ID_X550_VF_HV		0x1564
+ #define IXGBE_DEV_ID_X550EM_X_VF_HV	0x15A9
+ 
++#define IXGBE_DEV_ID_E610_VF		0x57AD
++#define IXGBE_SUBDEV_ID_E610_VF_HV	0x00FF
++
+ #define IXGBE_VF_IRQ_CLEAR_MASK		7
+ #define IXGBE_VF_MAX_TX_QUEUES		8
+ #define IXGBE_VF_MAX_RX_QUEUES		8
+@@ -25,6 +28,7 @@
+ 
+ /* Link speed */
+ typedef u32 ixgbe_link_speed;
++#define IXGBE_LINK_SPEED_UNKNOWN	0
+ #define IXGBE_LINK_SPEED_1GB_FULL	0x0020
+ #define IXGBE_LINK_SPEED_10GB_FULL	0x0080
+ #define IXGBE_LINK_SPEED_100_FULL	0x0008
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+index f804b35d79c726..83b3243f172c72 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+@@ -271,6 +271,9 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+ 	adapter = netdev_priv(dev);
+ 	ipsec = adapter->ipsec;
+ 
++	if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
++		return -EOPNOTSUPP;
++
+ 	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ 		NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
+ 		return -EINVAL;
+@@ -400,6 +403,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
+ 	adapter = netdev_priv(dev);
+ 	ipsec = adapter->ipsec;
+ 
++	if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
++		return;
++
+ 	if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
+ 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
+ 
+@@ -628,6 +634,10 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
+ 	size_t size;
+ 
+ 	switch (adapter->hw.api_version) {
++	case ixgbe_mbox_api_17:
++		if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
++			return;
++		break;
+ 	case ixgbe_mbox_api_14:
+ 		break;
+ 	default:
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+index 130cb868774c40..f31068e24e867f 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+ 
+ #ifndef _IXGBEVF_H_
+ #define _IXGBEVF_H_
+@@ -366,6 +366,13 @@ struct ixgbevf_adapter {
+ 	/* Interrupt Throttle Rate */
+ 	u32 eitr_param;
+ 
++	u32 pf_features;
++#define IXGBEVF_PF_SUP_IPSEC		BIT(0)
++#define IXGBEVF_PF_SUP_ESX_MBX		BIT(1)
++
++#define IXGBEVF_SUPPORTED_FEATURES	(IXGBEVF_PF_SUP_IPSEC | \
++					IXGBEVF_PF_SUP_ESX_MBX)
++
+ 	struct ixgbevf_hw_stats stats;
+ 
+ 	unsigned long state;
+@@ -418,6 +425,8 @@ enum ixgbevf_boards {
+ 	board_X550EM_x_vf,
+ 	board_X550EM_x_vf_hv,
+ 	board_x550em_a_vf,
++	board_e610_vf,
++	board_e610_vf_hv,
+ };
+ 
+ enum ixgbevf_xcast_modes {
+@@ -434,11 +443,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
+ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
+ extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
++extern const struct ixgbevf_info ixgbevf_e610_vf_info;
+ 
+ extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
+ extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
+ extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
+ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
++extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
+ extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+ 
+ /* needed by ethtool.c */
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index 149911e3002a22..a2a7cb6d8ea181 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+ 
+ /******************************************************************************
+  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
+ 	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
+ 
+ static char ixgbevf_copyright[] =
+-	"Copyright (c) 2009 - 2018 Intel Corporation.";
++	"Copyright (c) 2009 - 2024 Intel Corporation.";
+ 
+ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ 	[board_82599_vf]	= &ixgbevf_82599_vf_info,
+@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ 	[board_X550EM_x_vf]	= &ixgbevf_X550EM_x_vf_info,
+ 	[board_X550EM_x_vf_hv]	= &ixgbevf_X550EM_x_vf_hv_info,
+ 	[board_x550em_a_vf]	= &ixgbevf_x550em_a_vf_info,
++	[board_e610_vf]         = &ixgbevf_e610_vf_info,
++	[board_e610_vf_hv]      = &ixgbevf_e610_vf_hv_info,
+ };
+ 
+ /* ixgbevf_pci_tbl - PCI Device ID Table
+@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
+ 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
+ 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
+ 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
++	{PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
++			 IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
++	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
+ 	/* required last entry */
+ 	{0, }
+ };
+@@ -2269,10 +2274,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+ 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+ }
+ 
++/**
++ * ixgbevf_set_features - Set features supported by PF
++ * @adapter: pointer to the adapter struct
++ *
++ * Negotiate with PF supported features and then set pf_features accordingly.
++ */
++static void ixgbevf_set_features(struct ixgbevf_adapter *adapter)
++{
++	u32 *pf_features = &adapter->pf_features;
++	struct ixgbe_hw *hw = &adapter->hw;
++	int err;
++
++	err = hw->mac.ops.negotiate_features(hw, pf_features);
++	if (err && err != -EOPNOTSUPP)
++		netdev_dbg(adapter->netdev,
++			   "PF feature negotiation failed.\n");
++
++	/* Address also pre API 1.7 cases */
++	if (hw->api_version == ixgbe_mbox_api_14)
++		*pf_features |= IXGBEVF_PF_SUP_IPSEC;
++	else if (hw->api_version == ixgbe_mbox_api_15)
++		*pf_features |= IXGBEVF_PF_SUP_ESX_MBX;
++}
++
+ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
+ {
+ 	struct ixgbe_hw *hw = &adapter->hw;
+ 	static const int api[] = {
++		ixgbe_mbox_api_17,
++		ixgbe_mbox_api_16,
+ 		ixgbe_mbox_api_15,
+ 		ixgbe_mbox_api_14,
+ 		ixgbe_mbox_api_13,
+@@ -2292,7 +2323,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
+ 		idx++;
+ 	}
+ 
+-	if (hw->api_version >= ixgbe_mbox_api_15) {
++	ixgbevf_set_features(adapter);
++
++	if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) {
+ 		hw->mbx.ops.init_params(hw);
+ 		memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ 		       sizeof(struct ixgbe_mbx_operations));
+@@ -2649,6 +2682,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+ 		case ixgbe_mbox_api_13:
+ 		case ixgbe_mbox_api_14:
+ 		case ixgbe_mbox_api_15:
++		case ixgbe_mbox_api_16:
++		case ixgbe_mbox_api_17:
+ 			if (adapter->xdp_prog &&
+ 			    hw->mac.max_tx_queues == rss)
+ 				rss = rss > 3 ? 2 : 1;
+@@ -4643,6 +4678,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	case ixgbe_mbox_api_13:
+ 	case ixgbe_mbox_api_14:
+ 	case ixgbe_mbox_api_15:
++	case ixgbe_mbox_api_16:
++	case ixgbe_mbox_api_17:
+ 		netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
+ 				  (ETH_HLEN + ETH_FCS_LEN);
+ 		break;
+@@ -4693,6 +4730,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	case ixgbe_mac_X540_vf:
+ 		dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
+ 		break;
++	case ixgbe_mac_e610_vf:
++		dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
++		break;
+ 	case ixgbe_mac_82599_vf:
+ 	default:
+ 		dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
+diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
+index 835bbcc5cc8e63..a8ed23ee66aa84 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
++++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
+@@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev {
+ 	ixgbe_mbox_api_13,	/* API version 1.3, linux/freebsd VF driver */
+ 	ixgbe_mbox_api_14,	/* API version 1.4, linux/freebsd VF driver */
+ 	ixgbe_mbox_api_15,	/* API version 1.5, linux/freebsd VF driver */
++	ixgbe_mbox_api_16,      /* API version 1.6, linux/freebsd VF driver */
++	ixgbe_mbox_api_17,	/* API version 1.7, linux/freebsd VF driver */
+ 	/* This value should always be last */
+ 	ixgbe_mbox_api_unknown,	/* indicates that API version is not known */
+ };
+@@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev {
+ 
+ #define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+ 
++/* mailbox API, version 1.6 VF requests */
++#define IXGBE_VF_GET_PF_LINK_STATE	0x11 /* request PF to send link info */
++
++/* mailbox API, version 1.7 VF requests */
++#define IXGBE_VF_FEATURES_NEGOTIATE	0x12 /* get features supported by PF*/
++
+ /* length of permanent address message returned from PF */
+ #define IXGBE_VF_PERMADDR_MSG_LEN	4
+ /* word in permanent address message with the current multicast type */
+diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
+index 1641d00d8ed35c..65257107dfc8a4 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
++++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+ 
+ #include "vf.h"
+ #include "ixgbevf.h"
+@@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
+ 	 * is not supported for this device type.
+ 	 */
+ 	switch (hw->api_version) {
++	case ixgbe_mbox_api_17:
++	case ixgbe_mbox_api_16:
+ 	case ixgbe_mbox_api_15:
+ 	case ixgbe_mbox_api_14:
+ 	case ixgbe_mbox_api_13:
+@@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
+ 	 * or if the operation is not supported for this device type.
+ 	 */
+ 	switch (hw->api_version) {
++	case ixgbe_mbox_api_17:
++	case ixgbe_mbox_api_16:
+ 	case ixgbe_mbox_api_15:
+ 	case ixgbe_mbox_api_14:
+ 	case ixgbe_mbox_api_13:
+@@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+ 	case ixgbe_mbox_api_13:
+ 	case ixgbe_mbox_api_14:
+ 	case ixgbe_mbox_api_15:
++	case ixgbe_mbox_api_16:
++	case ixgbe_mbox_api_17:
+ 		break;
+ 	default:
+ 		return -EOPNOTSUPP;
+@@ -624,6 +630,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+ 	return -EOPNOTSUPP;
+ }
+ 
++/**
++ * ixgbevf_get_pf_link_state - Get PF's link status
++ * @hw: pointer to the HW structure
++ * @speed: link speed
++ * @link_up: indicate if link is up/down
++ *
++ * Ask PF to provide link_up state and speed of the link.
++ *
++ * Return: IXGBE_ERR_MBX in the case of mailbox error,
++ * -EOPNOTSUPP if the op is not supported or 0 on success.
++ */
++static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
++				     bool *link_up)
++{
++	u32 msgbuf[3] = {};
++	int err;
++
++	switch (hw->api_version) {
++	case ixgbe_mbox_api_16:
++	case ixgbe_mbox_api_17:
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
++	msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
++
++	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
++					 ARRAY_SIZE(msgbuf));
++	if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
++		err = IXGBE_ERR_MBX;
++		*speed = IXGBE_LINK_SPEED_UNKNOWN;
++		/* No need to set @link_up to false as it will be done by
++		 * ixgbe_check_mac_link_vf().
++		 */
++	} else {
++		*speed = msgbuf[1];
++		*link_up = msgbuf[2];
++	}
++
++	return err;
++}
++
++/**
++ * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
++ * @hw: pointer to the HW structure
++ * @pf_features: bitmask of features supported by PF
++ *
++ * Return: IXGBE_ERR_MBX in the  case of mailbox error,
++ * -EOPNOTSUPP if the op is not supported or 0 on success.
++ */
++static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
++{
++	u32 msgbuf[2] = {};
++	int err;
++
++	switch (hw->api_version) {
++	case ixgbe_mbox_api_17:
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
++	msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
++	msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
++
++	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
++					 ARRAY_SIZE(msgbuf));
++
++	if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
++		err = IXGBE_ERR_MBX;
++		*pf_features = 0x0;
++	} else {
++		*pf_features = msgbuf[1];
++	}
++
++	return err;
++}
++
+ /**
+  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
+  *  @hw: pointer to the HW structure
+@@ -658,6 +743,58 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ 	return err;
+ }
+ 
++/**
++ * ixgbe_read_vflinks - Read VFLINKS register
++ * @hw: pointer to the HW structure
++ * @speed: link speed
++ * @link_up: indicate if link is up/down
++ *
++ * Get linkup status and link speed from the VFLINKS register.
++ */
++static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
++			       bool *link_up)
++{
++	u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
++
++	/* if link status is down no point in checking to see if PF is up */
++	if (!(vflinks & IXGBE_LINKS_UP)) {
++		*link_up = false;
++		return;
++	}
++
++	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
++	 * before the link status is correct
++	 */
++	if (hw->mac.type == ixgbe_mac_82599_vf) {
++		for (int i = 0; i < 5; i++) {
++			udelay(100);
++			vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
++
++			if (!(vflinks & IXGBE_LINKS_UP)) {
++				*link_up = false;
++				return;
++			}
++		}
++	}
++
++	/* We reached this point so there's link */
++	*link_up = true;
++
++	switch (vflinks & IXGBE_LINKS_SPEED_82599) {
++	case IXGBE_LINKS_SPEED_10G_82599:
++		*speed = IXGBE_LINK_SPEED_10GB_FULL;
++		break;
++	case IXGBE_LINKS_SPEED_1G_82599:
++		*speed = IXGBE_LINK_SPEED_1GB_FULL;
++		break;
++	case IXGBE_LINKS_SPEED_100_82599:
++		*speed = IXGBE_LINK_SPEED_100_FULL;
++		break;
++	default:
++		*speed = IXGBE_LINK_SPEED_UNKNOWN;
++	}
++}
++
+ /**
+  * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
+  * @hw: unused
+@@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+ 				     bool *link_up,
+ 				     bool autoneg_wait_to_complete)
+ {
++	struct ixgbevf_adapter *adapter = hw->back;
+ 	struct ixgbe_mbx_info *mbx = &hw->mbx;
+ 	struct ixgbe_mac_info *mac = &hw->mac;
+ 	s32 ret_val = 0;
+-	u32 links_reg;
+ 	u32 in_msg = 0;
+ 
+ 	/* If we were hit with a reset drop the link */
+@@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+ 	if (!mac->get_link_status)
+ 		goto out;
+ 
+-	/* if link status is down no point in checking to see if pf is up */
+-	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+-	if (!(links_reg & IXGBE_LINKS_UP))
+-		goto out;
+-
+-	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+-	 * before the link status is correct
+-	 */
+-	if (mac->type == ixgbe_mac_82599_vf) {
+-		int i;
+-
+-		for (i = 0; i < 5; i++) {
+-			udelay(100);
+-			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+-
+-			if (!(links_reg & IXGBE_LINKS_UP))
+-				goto out;
+-		}
+-	}
+-
+-	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+-	case IXGBE_LINKS_SPEED_10G_82599:
+-		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+-		break;
+-	case IXGBE_LINKS_SPEED_1G_82599:
+-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+-		break;
+-	case IXGBE_LINKS_SPEED_100_82599:
+-		*speed = IXGBE_LINK_SPEED_100_FULL;
+-		break;
++	if (hw->mac.type == ixgbe_mac_e610_vf) {
++		ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
++		if (ret_val)
++			goto out;
++	} else {
++		ixgbe_read_vflinks(hw, speed, link_up);
++		if (*link_up == false)
++			goto out;
+ 	}
+ 
+ 	/* if the read failed it could just be a mailbox collision, best wait
+ 	 * until we are called again and don't report an error
+ 	 */
+ 	if (mbx->ops.read(hw, &in_msg, 1)) {
+-		if (hw->api_version >= ixgbe_mbox_api_15)
++		if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
+ 			mac->get_link_status = false;
+ 		goto out;
+ 	}
+@@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ 	case ixgbe_mbox_api_13:
+ 	case ixgbe_mbox_api_14:
+ 	case ixgbe_mbox_api_15:
++	case ixgbe_mbox_api_16:
++	case ixgbe_mbox_api_17:
+ 		break;
+ 	default:
+ 		return 0;
+@@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
+ 	.setup_link		= ixgbevf_setup_mac_link_vf,
+ 	.check_link		= ixgbevf_check_mac_link_vf,
+ 	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
++	.negotiate_features	= ixgbevf_negotiate_features_vf,
+ 	.set_rar		= ixgbevf_set_rar_vf,
+ 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
+ 	.update_xcast_mode	= ixgbevf_update_xcast_mode,
+@@ -1076,3 +1194,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
+ 	.mac = ixgbe_mac_x550em_a_vf,
+ 	.mac_ops = &ixgbevf_mac_ops,
+ };
++
++const struct ixgbevf_info ixgbevf_e610_vf_info = {
++	.mac                    = ixgbe_mac_e610_vf,
++	.mac_ops                = &ixgbevf_mac_ops,
++};
++
++const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
++	.mac            = ixgbe_mac_e610_vf,
++	.mac_ops        = &ixgbevf_hv_mac_ops,
++};
+diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
+index b4eef5b6c172bd..4f19b8900c29a3 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
++++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+ 
+ #ifndef __IXGBE_VF_H__
+ #define __IXGBE_VF_H__
+@@ -26,6 +26,7 @@ struct ixgbe_mac_operations {
+ 	s32 (*stop_adapter)(struct ixgbe_hw *);
+ 	s32 (*get_bus_info)(struct ixgbe_hw *);
+ 	s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
++	int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features);
+ 
+ 	/* Link */
+ 	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+@@ -54,6 +55,8 @@ enum ixgbe_mac_type {
+ 	ixgbe_mac_X550_vf,
+ 	ixgbe_mac_X550EM_x_vf,
+ 	ixgbe_mac_x550em_a_vf,
++	ixgbe_mac_e610,
++	ixgbe_mac_e610_vf,
+ 	ixgbe_num_macs
+ };
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 7b82779e4cd5d2..80b5262d0d5727 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -5060,8 +5060,9 @@ static int rtl8169_resume(struct device *device)
+ 	if (!device_may_wakeup(tp_to_dev(tp)))
+ 		clk_prepare_enable(tp->clk);
+ 
+-	/* Reportedly at least Asus X453MA truncates packets otherwise */
+-	if (tp->mac_version == RTL_GIGA_MAC_VER_37)
++	/* Some chip versions may truncate packets without this initialization */
++	if (tp->mac_version == RTL_GIGA_MAC_VER_37 ||
++	    tp->mac_version == RTL_GIGA_MAC_VER_46)
+ 		rtl_init_rxcfg(tp);
+ 
+ 	return rtl8169_runtime_resume(device);
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index ee2a7b2f6268de..e2d92295ad33b2 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -433,6 +433,7 @@ static void nsim_enable_napi(struct netdevsim *ns)
+ static int nsim_open(struct net_device *dev)
+ {
+ 	struct netdevsim *ns = netdev_priv(dev);
++	struct netdevsim *peer;
+ 	int err;
+ 
+ 	err = nsim_init_napi(ns);
+@@ -441,6 +442,12 @@ static int nsim_open(struct net_device *dev)
+ 
+ 	nsim_enable_napi(ns);
+ 
++	peer = rtnl_dereference(ns->peer);
++	if (peer && netif_running(peer->netdev)) {
++		netif_carrier_on(dev);
++		netif_carrier_on(peer->netdev);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 2f8637224b69e3..2da67814556f99 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1920,13 +1920,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
+ 	.get_regs	= lan78xx_get_regs,
+ };
+ 
+-static void lan78xx_init_mac_address(struct lan78xx_net *dev)
++static int lan78xx_init_mac_address(struct lan78xx_net *dev)
+ {
+ 	u32 addr_lo, addr_hi;
+ 	u8 addr[6];
++	int ret;
++
++	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
++	if (ret < 0)
++		return ret;
+ 
+-	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+-	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
++	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
++	if (ret < 0)
++		return ret;
+ 
+ 	addr[0] = addr_lo & 0xFF;
+ 	addr[1] = (addr_lo >> 8) & 0xFF;
+@@ -1959,14 +1965,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+ 			  (addr[2] << 16) | (addr[3] << 24);
+ 		addr_hi = addr[4] | (addr[5] << 8);
+ 
+-		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+-		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
++		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
++		if (ret < 0)
++			return ret;
++
++		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
++		if (ret < 0)
++			return ret;
+ 	}
+ 
+-	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+-	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
++	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
++	if (ret < 0)
++		return ret;
++
++	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
++	if (ret < 0)
++		return ret;
+ 
+ 	eth_hw_addr_set(dev->net, addr);
++
++	return 0;
+ }
+ 
+ /* MDIO read and write wrappers for phylib */
+@@ -2905,8 +2923,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
+ 		}
+ 	} while (buf & HW_CFG_LRST_);
+ 
+-	lan78xx_init_mac_address(dev);
+-
+ 	/* save DEVID for later usage */
+ 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
+ 	if (ret < 0)
+@@ -2915,6 +2931,10 @@ static int lan78xx_reset(struct lan78xx_net *dev)
+ 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
+ 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
+ 
++	ret = lan78xx_init_mac_address(dev);
++	if (ret < 0)
++		return ret;
++
+ 	/* Respond to the IN token with a NAK */
+ 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ 	if (ret < 0)
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 2cab046749a922..3fcd2b736c5e3e 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -10151,7 +10151,12 @@ static int __init rtl8152_driver_init(void)
+ 	ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
+ 	if (ret)
+ 		return ret;
+-	return usb_register(&rtl8152_driver);
++
++	ret = usb_register(&rtl8152_driver);
++	if (ret)
++		usb_deregister_device_driver(&rtl8152_cfgselector_driver);
++
++	return ret;
+ }
+ 
+ static void __exit rtl8152_driver_exit(void)
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index 99711a4fb85df8..df8fad7a2aea63 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -978,25 +978,14 @@ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
+ }
+ 
+ int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+-				    int qsel, unsigned int timeout)
++				    struct rtw89_tx_wait_info *wait, int qsel,
++				    unsigned int timeout)
+ {
+-	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+-	struct rtw89_tx_wait_info *wait;
+ 	unsigned long time_left;
+ 	int ret = 0;
+ 
+ 	lockdep_assert_wiphy(rtwdev->hw->wiphy);
+ 
+-	wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+-	if (!wait) {
+-		rtw89_core_tx_kick_off(rtwdev, qsel);
+-		return 0;
+-	}
+-
+-	init_completion(&wait->completion);
+-	wait->skb = skb;
+-	rcu_assign_pointer(skb_data->wait, wait);
+-
+ 	rtw89_core_tx_kick_off(rtwdev, qsel);
+ 	time_left = wait_for_completion_timeout(&wait->completion,
+ 						msecs_to_jiffies(timeout));
+@@ -1057,10 +1046,12 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+-			struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
++			struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel,
++			struct rtw89_tx_wait_info *wait)
+ {
+ 	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ 	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+ 	struct rtw89_core_tx_request tx_req = {0};
+ 	struct rtw89_sta_link *rtwsta_link = NULL;
+ 	struct rtw89_vif_link *rtwvif_link;
+@@ -1093,6 +1084,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 	rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
+ 	rtw89_core_tx_wake(rtwdev, &tx_req);
+ 
++	rcu_assign_pointer(skb_data->wait, wait);
++
+ 	ret = rtw89_hci_tx_write(rtwdev, &tx_req);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
+@@ -2908,7 +2901,7 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
+ 			goto out;
+ 		}
+ 		rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
+-		ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
++		ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL, NULL);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "failed to push txq: %d\n", ret);
+ 			ieee80211_free_txskb(rtwdev->hw, skb);
+@@ -3084,7 +3077,7 @@ static void rtw89_core_sta_pending_tx_iter(void *data,
+ 	skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) {
+ 		skb_unlink(skb, &rtwsta->roc_queue);
+ 
+-		ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
++		ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, NULL);
+ 		if (ret) {
+ 			rtw89_warn(rtwdev, "pending tx failed with %d\n", ret);
+ 			dev_kfree_skb_any(skb);
+@@ -3106,6 +3099,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
+ 				    struct rtw89_vif_link *rtwvif_link, bool qos, bool ps)
+ {
+ 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct rtw89_tx_wait_info *wait;
+ 	struct ieee80211_sta *sta;
+ 	struct ieee80211_hdr *hdr;
+ 	struct sk_buff *skb;
+@@ -3114,6 +3108,12 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
+ 	if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+ 		return 0;
+ 
++	wait = kzalloc(sizeof(*wait), GFP_KERNEL);
++	if (!wait)
++		return -ENOMEM;
++
++	init_completion(&wait->completion);
++
+ 	rcu_read_lock();
+ 	sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
+ 	if (!sta) {
+@@ -3127,11 +3127,13 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
+ 		goto out;
+ 	}
+ 
++	wait->skb = skb;
++
+ 	hdr = (struct ieee80211_hdr *)skb->data;
+ 	if (ps)
+ 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+ 
+-	ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
++	ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, wait);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
+ 		dev_kfree_skb_any(skb);
+@@ -3140,10 +3142,11 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
+ 
+ 	rcu_read_unlock();
+ 
+-	return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
++	return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel,
+ 					       RTW89_ROC_TX_TIMEOUT);
+ out:
+ 	rcu_read_unlock();
++	kfree(wait);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
+index cb703588e3a4ff..7be32b83d4d64c 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -6818,12 +6818,14 @@ static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev)
+ }
+ 
+ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+-			struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
++			struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel,
++			struct rtw89_tx_wait_info *wait);
+ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
+ 		 struct sk_buff *skb, bool fwdl);
+ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
+ int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+-				    int qsel, unsigned int timeout);
++				    struct rtw89_tx_wait_info *wait, int qsel,
++				    unsigned int timeout);
+ void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
+ 			    struct rtw89_tx_desc_info *desc_info,
+ 			    void *txdesc);
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index 3a1a2b243adf0e..d1f9bd41f8b5d0 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -36,7 +36,7 @@ static void rtw89_ops_tx(struct ieee80211_hw *hw,
+ 		return;
+ 	}
+ 
+-	ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
++	ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, NULL);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to transmit skb: %d\n", ret);
+ 		ieee80211_free_txskb(hw, skb);
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 5fd5fe88e6b083..a87e1778a0d419 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -1366,7 +1366,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
+ 	struct pci_dev *pdev = rtwpci->pdev;
+ 	struct sk_buff *skb = tx_req->skb;
+ 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
+-	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+ 	bool en_wd_info = desc_info->en_wd_info;
+ 	u32 txwd_len;
+ 	u32 txwp_len;
+@@ -1382,7 +1381,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
+ 	}
+ 
+ 	tx_data->dma = dma;
+-	rcu_assign_pointer(skb_data->wait, NULL);
+ 
+ 	txwp_len = sizeof(*txwp_info);
+ 	txwd_len = chip->txwd_body_size;
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 561dd08022c061..24cff8b0449236 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -131,12 +131,14 @@ void nvme_mpath_start_request(struct request *rq)
+ 	struct nvme_ns *ns = rq->q->queuedata;
+ 	struct gendisk *disk = ns->head->disk;
+ 
+-	if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
++	if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) &&
++	    !(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) {
+ 		atomic_inc(&ns->ctrl->nr_active);
+ 		nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
+ 	}
+ 
+-	if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
++	if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) ||
++	    (nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
+ 		return;
+ 
+ 	nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 83a6b18b01ada0..77df3432dfb78e 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1075,6 +1075,9 @@ static void nvme_tcp_write_space(struct sock *sk)
+ 	queue = sk->sk_user_data;
+ 	if (likely(queue && sk_stream_is_writeable(sk))) {
+ 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
++		/* Ensure pending TLS partial records are retried */
++		if (nvme_tcp_queue_tls(queue))
++			queue->write_space(sk);
+ 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ 	}
+ 	read_unlock_bh(&sk->sk_callback_lock);
+diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
+index dddb66de6dba15..8d93a830ab8bff 100644
+--- a/drivers/phy/cadence/cdns-dphy.c
++++ b/drivers/phy/cadence/cdns-dphy.c
+@@ -30,6 +30,7 @@
+ 
+ #define DPHY_CMN_SSM			DPHY_PMA_CMN(0x20)
+ #define DPHY_CMN_SSM_EN			BIT(0)
++#define DPHY_CMN_SSM_CAL_WAIT_TIME	GENMASK(8, 1)
+ #define DPHY_CMN_TX_MODE_EN		BIT(9)
+ 
+ #define DPHY_CMN_PWM			DPHY_PMA_CMN(0x40)
+@@ -79,6 +80,7 @@ struct cdns_dphy_cfg {
+ 	u8 pll_ipdiv;
+ 	u8 pll_opdiv;
+ 	u16 pll_fbdiv;
++	u32 hs_clk_rate;
+ 	unsigned int nlanes;
+ };
+ 
+@@ -99,6 +101,8 @@ struct cdns_dphy_ops {
+ 	void (*set_pll_cfg)(struct cdns_dphy *dphy,
+ 			    const struct cdns_dphy_cfg *cfg);
+ 	unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
++	int (*wait_for_pll_lock)(struct cdns_dphy *dphy);
++	int (*wait_for_cmn_ready)(struct cdns_dphy *dphy);
+ };
+ 
+ struct cdns_dphy {
+@@ -108,6 +112,8 @@ struct cdns_dphy {
+ 	struct clk *pll_ref_clk;
+ 	const struct cdns_dphy_ops *ops;
+ 	struct phy *phy;
++	bool is_configured;
++	bool is_powered;
+ };
+ 
+ /* Order of bands is important since the index is the band number. */
+@@ -154,6 +160,9 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
+ 					  cfg->pll_ipdiv,
+ 					  pll_ref_hz);
+ 
++	cfg->hs_clk_rate = div_u64((u64)pll_ref_hz * cfg->pll_fbdiv,
++				   2 * cfg->pll_opdiv * cfg->pll_ipdiv);
++
+ 	return 0;
+ }
+ 
+@@ -191,6 +200,16 @@ static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
+ 	return dphy->ops->get_wakeup_time_ns(dphy);
+ }
+ 
++static int cdns_dphy_wait_for_pll_lock(struct cdns_dphy *dphy)
++{
++	return dphy->ops->wait_for_pll_lock ? dphy->ops->wait_for_pll_lock(dphy) : 0;
++}
++
++static int cdns_dphy_wait_for_cmn_ready(struct cdns_dphy *dphy)
++{
++	return  dphy->ops->wait_for_cmn_ready ? dphy->ops->wait_for_cmn_ready(dphy) : 0;
++}
++
+ static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
+ {
+ 	/* Default wakeup time is 800 ns (in a simulated environment). */
+@@ -232,7 +251,6 @@ static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
+ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
+ 					const struct cdns_dphy_cfg *cfg)
+ {
+-	u32 status;
+ 
+ 	/*
+ 	 * set the PWM and PLL Byteclk divider settings to recommended values
+@@ -249,13 +267,6 @@ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
+ 
+ 	writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
+ 	       dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
+-
+-	readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
+-			   (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
+-
+-	readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
+-			   (status & DPHY_TX_WIZ_O_CMN_READY), 0,
+-			   POLL_TIMEOUT_US);
+ }
+ 
+ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
+@@ -263,6 +274,23 @@ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
+ 	writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
+ }
+ 
++static int cdns_dphy_j721e_wait_for_pll_lock(struct cdns_dphy *dphy)
++{
++	u32 status;
++
++	return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
++			       status & DPHY_TX_WIZ_PLL_LOCK, 0, POLL_TIMEOUT_US);
++}
++
++static int cdns_dphy_j721e_wait_for_cmn_ready(struct cdns_dphy *dphy)
++{
++	u32 status;
++
++	return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
++			       status & DPHY_TX_WIZ_O_CMN_READY, 0,
++			       POLL_TIMEOUT_US);
++}
++
+ /*
+  * This is the reference implementation of DPHY hooks. Specific integration of
+  * this IP may have to re-implement some of them depending on how they decided
+@@ -278,6 +306,8 @@ static const struct cdns_dphy_ops j721e_dphy_ops = {
+ 	.get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
+ 	.set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
+ 	.set_psm_div = cdns_dphy_j721e_set_psm_div,
++	.wait_for_pll_lock = cdns_dphy_j721e_wait_for_pll_lock,
++	.wait_for_cmn_ready = cdns_dphy_j721e_wait_for_cmn_ready,
+ };
+ 
+ static int cdns_dphy_config_from_opts(struct phy *phy,
+@@ -297,6 +327,7 @@ static int cdns_dphy_config_from_opts(struct phy *phy,
+ 	if (ret)
+ 		return ret;
+ 
++	opts->hs_clk_rate = cfg->hs_clk_rate;
+ 	opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
+ 
+ 	return 0;
+@@ -334,21 +365,36 @@ static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+ {
+ 	struct cdns_dphy *dphy = phy_get_drvdata(phy);
+-	struct cdns_dphy_cfg cfg = { 0 };
+-	int ret, band_ctrl;
+-	unsigned int reg;
++	int ret;
+ 
+-	ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+-	if (ret)
+-		return ret;
++	ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &dphy->cfg);
++	if (!ret)
++		dphy->is_configured = true;
++
++	return ret;
++}
++
++static int cdns_dphy_power_on(struct phy *phy)
++{
++	struct cdns_dphy *dphy = phy_get_drvdata(phy);
++	int ret;
++	u32 reg;
++
++	if (!dphy->is_configured || dphy->is_powered)
++		return -EINVAL;
++
++	clk_prepare_enable(dphy->psm_clk);
++	clk_prepare_enable(dphy->pll_ref_clk);
+ 
+ 	/*
+ 	 * Configure the internal PSM clk divider so that the DPHY has a
+ 	 * 1MHz clk (or something close).
+ 	 */
+ 	ret = cdns_dphy_setup_psm(dphy);
+-	if (ret)
+-		return ret;
++	if (ret) {
++		dev_err(&dphy->phy->dev, "Failed to setup PSM with error %d\n", ret);
++		goto err_power_on;
++	}
+ 
+ 	/*
+ 	 * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
+@@ -363,40 +409,61 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+ 	 * Configure the DPHY PLL that will be used to generate the TX byte
+ 	 * clk.
+ 	 */
+-	cdns_dphy_set_pll_cfg(dphy, &cfg);
++	cdns_dphy_set_pll_cfg(dphy, &dphy->cfg);
+ 
+-	band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+-	if (band_ctrl < 0)
+-		return band_ctrl;
++	ret = cdns_dphy_tx_get_band_ctrl(dphy->cfg.hs_clk_rate);
++	if (ret < 0) {
++		dev_err(&dphy->phy->dev, "Failed to get band control value with error %d\n", ret);
++		goto err_power_on;
++	}
+ 
+-	reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
+-	      FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
++	reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, ret) |
++	      FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, ret);
+ 	writel(reg, dphy->regs + DPHY_BAND_CFG);
+ 
+-	return 0;
+-}
++	/* Start TX state machine. */
++	reg = readl(dphy->regs + DPHY_CMN_SSM);
++	writel((reg & DPHY_CMN_SSM_CAL_WAIT_TIME) | DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
++	       dphy->regs + DPHY_CMN_SSM);
+ 
+-static int cdns_dphy_power_on(struct phy *phy)
+-{
+-	struct cdns_dphy *dphy = phy_get_drvdata(phy);
++	ret = cdns_dphy_wait_for_pll_lock(dphy);
++	if (ret) {
++		dev_err(&dphy->phy->dev, "Failed to lock PLL with error %d\n", ret);
++		goto err_power_on;
++	}
+ 
+-	clk_prepare_enable(dphy->psm_clk);
+-	clk_prepare_enable(dphy->pll_ref_clk);
++	ret = cdns_dphy_wait_for_cmn_ready(dphy);
++	if (ret) {
++		dev_err(&dphy->phy->dev, "O_CMN_READY signal failed to assert with error %d\n",
++			ret);
++		goto err_power_on;
++	}
+ 
+-	/* Start TX state machine. */
+-	writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+-	       dphy->regs + DPHY_CMN_SSM);
++	dphy->is_powered = true;
+ 
+ 	return 0;
++
++err_power_on:
++	clk_disable_unprepare(dphy->pll_ref_clk);
++	clk_disable_unprepare(dphy->psm_clk);
++
++	return ret;
+ }
+ 
+ static int cdns_dphy_power_off(struct phy *phy)
+ {
+ 	struct cdns_dphy *dphy = phy_get_drvdata(phy);
++	u32 reg;
+ 
+ 	clk_disable_unprepare(dphy->pll_ref_clk);
+ 	clk_disable_unprepare(dphy->psm_clk);
+ 
++	/* Stop TX state machine. */
++	reg = readl(dphy->regs + DPHY_CMN_SSM);
++	writel(reg & ~DPHY_CMN_SSM_EN, dphy->regs + DPHY_CMN_SSM);
++
++	dphy->is_powered = false;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
+index 7061720b9732e4..106046e17c4e11 100644
+--- a/drivers/usb/gadget/function/f_acm.c
++++ b/drivers/usb/gadget/function/f_acm.c
+@@ -11,12 +11,15 @@
+ 
+ /* #define VERBOSE_DEBUG */
+ 
++#include <linux/cleanup.h>
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+ 
++#include <linux/usb/gadget.h>
++
+ #include "u_serial.h"
+ 
+ 
+@@ -613,6 +616,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	struct usb_string	*us;
+ 	int			status;
+ 	struct usb_ep		*ep;
++	struct usb_request	*request __free(free_usb_request) = NULL;
+ 
+ 	/* REVISIT might want instance-specific strings to help
+ 	 * distinguish instances ...
+@@ -630,7 +634,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	/* allocate instance-specific interface IDs, and patch descriptors */
+ 	status = usb_interface_id(c, f);
+ 	if (status < 0)
+-		goto fail;
++		return status;
+ 	acm->ctrl_id = status;
+ 	acm_iad_descriptor.bFirstInterface = status;
+ 
+@@ -639,43 +643,41 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	status = usb_interface_id(c, f);
+ 	if (status < 0)
+-		goto fail;
++		return status;
+ 	acm->data_id = status;
+ 
+ 	acm_data_interface_desc.bInterfaceNumber = status;
+ 	acm_union_desc.bSlaveInterface0 = status;
+ 	acm_call_mgmt_descriptor.bDataInterface = status;
+ 
+-	status = -ENODEV;
+-
+ 	/* allocate instance-specific endpoints */
+ 	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	acm->port.in = ep;
+ 
+ 	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	acm->port.out = ep;
+ 
+ 	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	acm->notify = ep;
+ 
+ 	acm_iad_descriptor.bFunctionProtocol = acm->bInterfaceProtocol;
+ 	acm_control_interface_desc.bInterfaceProtocol = acm->bInterfaceProtocol;
+ 
+ 	/* allocate notification */
+-	acm->notify_req = gs_alloc_req(ep,
+-			sizeof(struct usb_cdc_notification) + 2,
+-			GFP_KERNEL);
+-	if (!acm->notify_req)
+-		goto fail;
++	request = gs_alloc_req(ep,
++			       sizeof(struct usb_cdc_notification) + 2,
++			       GFP_KERNEL);
++	if (!request)
++		return -ENODEV;
+ 
+-	acm->notify_req->complete = acm_cdc_notify_complete;
+-	acm->notify_req->context = acm;
++	request->complete = acm_cdc_notify_complete;
++	request->context = acm;
+ 
+ 	/* support all relevant hardware speeds... we expect that when
+ 	 * hardware is dual speed, all bulk-capable endpoints work at
+@@ -692,7 +694,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
+ 			acm_ss_function, acm_ss_function);
+ 	if (status)
+-		goto fail;
++		return status;
++
++	acm->notify_req = no_free_ptr(request);
+ 
+ 	dev_dbg(&cdev->gadget->dev,
+ 		"acm ttyGS%d: IN/%s OUT/%s NOTIFY/%s\n",
+@@ -700,14 +704,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
+ 		acm->port.in->name, acm->port.out->name,
+ 		acm->notify->name);
+ 	return 0;
+-
+-fail:
+-	if (acm->notify_req)
+-		gs_free_req(acm->notify, acm->notify_req);
+-
+-	ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+-
+-	return status;
+ }
+ 
+ static void acm_unbind(struct usb_configuration *c, struct usb_function *f)
+diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
+index 549efc84dd8321..123c03a0850995 100644
+--- a/drivers/usb/gadget/function/f_ecm.c
++++ b/drivers/usb/gadget/function/f_ecm.c
+@@ -8,12 +8,15 @@
+ 
+ /* #define VERBOSE_DEBUG */
+ 
++#include <linux/cleanup.h>
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
+ #include <linux/etherdevice.h>
+ 
++#include <linux/usb/gadget.h>
++
+ #include "u_ether.h"
+ #include "u_ether_configfs.h"
+ #include "u_ecm.h"
+@@ -678,6 +681,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	struct usb_ep		*ep;
+ 
+ 	struct f_ecm_opts	*ecm_opts;
++	struct usb_request	*request __free(free_usb_request) = NULL;
+ 
+ 	if (!can_support_ecm(cdev->gadget))
+ 		return -EINVAL;
+@@ -711,7 +715,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	/* allocate instance-specific interface IDs */
+ 	status = usb_interface_id(c, f);
+ 	if (status < 0)
+-		goto fail;
++		return status;
+ 	ecm->ctrl_id = status;
+ 	ecm_iad_descriptor.bFirstInterface = status;
+ 
+@@ -720,24 +724,22 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	status = usb_interface_id(c, f);
+ 	if (status < 0)
+-		goto fail;
++		return status;
+ 	ecm->data_id = status;
+ 
+ 	ecm_data_nop_intf.bInterfaceNumber = status;
+ 	ecm_data_intf.bInterfaceNumber = status;
+ 	ecm_union_desc.bSlaveInterface0 = status;
+ 
+-	status = -ENODEV;
+-
+ 	/* allocate instance-specific endpoints */
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	ecm->port.in_ep = ep;
+ 
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	ecm->port.out_ep = ep;
+ 
+ 	/* NOTE:  a status/notification endpoint is *OPTIONAL* but we
+@@ -746,20 +748,18 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	 */
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	ecm->notify = ep;
+ 
+-	status = -ENOMEM;
+-
+ 	/* allocate notification request and buffer */
+-	ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+-	if (!ecm->notify_req)
+-		goto fail;
+-	ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
+-	if (!ecm->notify_req->buf)
+-		goto fail;
+-	ecm->notify_req->context = ecm;
+-	ecm->notify_req->complete = ecm_notify_complete;
++	request = usb_ep_alloc_request(ep, GFP_KERNEL);
++	if (!request)
++		return -ENOMEM;
++	request->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
++	if (!request->buf)
++		return -ENOMEM;
++	request->context = ecm;
++	request->complete = ecm_notify_complete;
+ 
+ 	/* support all relevant hardware speeds... we expect that when
+ 	 * hardware is dual speed, all bulk-capable endpoints work at
+@@ -778,7 +778,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
+ 			ecm_ss_function, ecm_ss_function);
+ 	if (status)
+-		goto fail;
++		return status;
+ 
+ 	/* NOTE:  all that is done without knowing or caring about
+ 	 * the network link ... which is unavailable to this code
+@@ -788,20 +788,12 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	ecm->port.open = ecm_open;
+ 	ecm->port.close = ecm_close;
+ 
++	ecm->notify_req = no_free_ptr(request);
++
+ 	DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n",
+ 			ecm->port.in_ep->name, ecm->port.out_ep->name,
+ 			ecm->notify->name);
+ 	return 0;
+-
+-fail:
+-	if (ecm->notify_req) {
+-		kfree(ecm->notify_req->buf);
+-		usb_ep_free_request(ecm->notify, ecm->notify_req);
+-	}
+-
+-	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+-
+-	return status;
+ }
+ 
+ static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item)
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 8e761249d672c7..3afc9a622086c2 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -11,6 +11,7 @@
+  * Copyright (C) 2008 Nokia Corporation
+  */
+ 
++#include <linux/cleanup.h>
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+@@ -19,6 +20,7 @@
+ #include <linux/crc32.h>
+ 
+ #include <linux/usb/cdc.h>
++#include <linux/usb/gadget.h>
+ 
+ #include "u_ether.h"
+ #include "u_ether_configfs.h"
+@@ -1435,18 +1437,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	struct usb_ep		*ep;
+ 	struct f_ncm_opts	*ncm_opts;
+ 
++	struct usb_os_desc_table	*os_desc_table __free(kfree) = NULL;
++	struct usb_request		*request __free(free_usb_request) = NULL;
++
+ 	if (!can_support_ecm(cdev->gadget))
+ 		return -EINVAL;
+ 
+ 	ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+ 
+ 	if (cdev->use_os_string) {
+-		f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+-					   GFP_KERNEL);
+-		if (!f->os_desc_table)
++		os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
++		if (!os_desc_table)
+ 			return -ENOMEM;
+-		f->os_desc_n = 1;
+-		f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ 	}
+ 
+ 	mutex_lock(&ncm_opts->lock);
+@@ -1458,16 +1460,15 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	mutex_unlock(&ncm_opts->lock);
+ 
+ 	if (status)
+-		goto fail;
++		return status;
+ 
+ 	ncm_opts->bound = true;
+ 
+ 	us = usb_gstrings_attach(cdev, ncm_strings,
+ 				 ARRAY_SIZE(ncm_string_defs));
+-	if (IS_ERR(us)) {
+-		status = PTR_ERR(us);
+-		goto fail;
+-	}
++	if (IS_ERR(us))
++		return PTR_ERR(us);
++
+ 	ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
+ 	ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
+ 	ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
+@@ -1477,20 +1478,16 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	/* allocate instance-specific interface IDs */
+ 	status = usb_interface_id(c, f);
+ 	if (status < 0)
+-		goto fail;
++		return status;
+ 	ncm->ctrl_id = status;
+ 	ncm_iad_desc.bFirstInterface = status;
+ 
+ 	ncm_control_intf.bInterfaceNumber = status;
+ 	ncm_union_desc.bMasterInterface0 = status;
+ 
+-	if (cdev->use_os_string)
+-		f->os_desc_table[0].if_id =
+-			ncm_iad_desc.bFirstInterface;
+-
+ 	status = usb_interface_id(c, f);
+ 	if (status < 0)
+-		goto fail;
++		return status;
+ 	ncm->data_id = status;
+ 
+ 	ncm_data_nop_intf.bInterfaceNumber = status;
+@@ -1499,35 +1496,31 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_opts->max_segment_size);
+ 
+-	status = -ENODEV;
+-
+ 	/* allocate instance-specific endpoints */
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	ncm->port.in_ep = ep;
+ 
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	ncm->port.out_ep = ep;
+ 
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	ncm->notify = ep;
+ 
+-	status = -ENOMEM;
+-
+ 	/* allocate notification request and buffer */
+-	ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+-	if (!ncm->notify_req)
+-		goto fail;
+-	ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
+-	if (!ncm->notify_req->buf)
+-		goto fail;
+-	ncm->notify_req->context = ncm;
+-	ncm->notify_req->complete = ncm_notify_complete;
++	request = usb_ep_alloc_request(ep, GFP_KERNEL);
++	if (!request)
++		return -ENOMEM;
++	request->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
++	if (!request->buf)
++		return -ENOMEM;
++	request->context = ncm;
++	request->complete = ncm_notify_complete;
+ 
+ 	/*
+ 	 * support all relevant hardware speeds... we expect that when
+@@ -1547,7 +1540,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
+ 			ncm_ss_function, ncm_ss_function);
+ 	if (status)
+-		goto fail;
++		return status;
+ 
+ 	/*
+ 	 * NOTE:  all that is done without knowing or caring about
+@@ -1561,23 +1554,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ 	ncm->task_timer.function = ncm_tx_timeout;
+ 
++	if (cdev->use_os_string) {
++		os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
++		os_desc_table[0].if_id = ncm_iad_desc.bFirstInterface;
++		f->os_desc_table = no_free_ptr(os_desc_table);
++		f->os_desc_n = 1;
++	}
++	ncm->notify_req = no_free_ptr(request);
++
+ 	DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
+ 			ncm->port.in_ep->name, ncm->port.out_ep->name,
+ 			ncm->notify->name);
+ 	return 0;
+-
+-fail:
+-	kfree(f->os_desc_table);
+-	f->os_desc_n = 0;
+-
+-	if (ncm->notify_req) {
+-		kfree(ncm->notify_req->buf);
+-		usb_ep_free_request(ncm->notify, ncm->notify_req);
+-	}
+-
+-	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+-
+-	return status;
+ }
+ 
+ static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
+diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
+index 7cec19d65fb534..7451e7cb7a8523 100644
+--- a/drivers/usb/gadget/function/f_rndis.c
++++ b/drivers/usb/gadget/function/f_rndis.c
+@@ -19,6 +19,8 @@
+ 
+ #include <linux/atomic.h>
+ 
++#include <linux/usb/gadget.h>
++
+ #include "u_ether.h"
+ #include "u_ether_configfs.h"
+ #include "u_rndis.h"
+@@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 	struct usb_ep		*ep;
+ 
+ 	struct f_rndis_opts *rndis_opts;
++	struct usb_os_desc_table        *os_desc_table __free(kfree) = NULL;
++	struct usb_request		*request __free(free_usb_request) = NULL;
+ 
+ 	if (!can_support_rndis(c))
+ 		return -EINVAL;
+@@ -669,12 +673,9 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 	rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst);
+ 
+ 	if (cdev->use_os_string) {
+-		f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+-					   GFP_KERNEL);
+-		if (!f->os_desc_table)
++		os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
++		if (!os_desc_table)
+ 			return -ENOMEM;
+-		f->os_desc_n = 1;
+-		f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
+ 	}
+ 
+ 	rndis_iad_descriptor.bFunctionClass = rndis_opts->class;
+@@ -692,16 +693,14 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 		gether_set_gadget(rndis_opts->net, cdev->gadget);
+ 		status = gether_register_netdev(rndis_opts->net);
+ 		if (status)
+-			goto fail;
++			return status;
+ 		rndis_opts->bound = true;
+ 	}
+ 
+ 	us = usb_gstrings_attach(cdev, rndis_strings,
+ 				 ARRAY_SIZE(rndis_string_defs));
+-	if (IS_ERR(us)) {
+-		status = PTR_ERR(us);
+-		goto fail;
+-	}
++	if (IS_ERR(us))
++		return PTR_ERR(us);
+ 	rndis_control_intf.iInterface = us[0].id;
+ 	rndis_data_intf.iInterface = us[1].id;
+ 	rndis_iad_descriptor.iFunction = us[2].id;
+@@ -709,36 +708,30 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 	/* allocate instance-specific interface IDs */
+ 	status = usb_interface_id(c, f);
+ 	if (status < 0)
+-		goto fail;
++		return status;
+ 	rndis->ctrl_id = status;
+ 	rndis_iad_descriptor.bFirstInterface = status;
+ 
+ 	rndis_control_intf.bInterfaceNumber = status;
+ 	rndis_union_desc.bMasterInterface0 = status;
+ 
+-	if (cdev->use_os_string)
+-		f->os_desc_table[0].if_id =
+-			rndis_iad_descriptor.bFirstInterface;
+-
+ 	status = usb_interface_id(c, f);
+ 	if (status < 0)
+-		goto fail;
++		return status;
+ 	rndis->data_id = status;
+ 
+ 	rndis_data_intf.bInterfaceNumber = status;
+ 	rndis_union_desc.bSlaveInterface0 = status;
+ 
+-	status = -ENODEV;
+-
+ 	/* allocate instance-specific endpoints */
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	rndis->port.in_ep = ep;
+ 
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	rndis->port.out_ep = ep;
+ 
+ 	/* NOTE:  a status/notification endpoint is, strictly speaking,
+@@ -747,21 +740,19 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 	 */
+ 	ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc);
+ 	if (!ep)
+-		goto fail;
++		return -ENODEV;
+ 	rndis->notify = ep;
+ 
+-	status = -ENOMEM;
+-
+ 	/* allocate notification request and buffer */
+-	rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+-	if (!rndis->notify_req)
+-		goto fail;
+-	rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
+-	if (!rndis->notify_req->buf)
+-		goto fail;
+-	rndis->notify_req->length = STATUS_BYTECOUNT;
+-	rndis->notify_req->context = rndis;
+-	rndis->notify_req->complete = rndis_response_complete;
++	request = usb_ep_alloc_request(ep, GFP_KERNEL);
++	if (!request)
++		return -ENOMEM;
++	request->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
++	if (!request->buf)
++		return -ENOMEM;
++	request->length = STATUS_BYTECOUNT;
++	request->context = rndis;
++	request->complete = rndis_response_complete;
+ 
+ 	/* support all relevant hardware speeds... we expect that when
+ 	 * hardware is dual speed, all bulk-capable endpoints work at
+@@ -778,7 +769,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 	status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
+ 			eth_ss_function, eth_ss_function);
+ 	if (status)
+-		goto fail;
++		return status;
+ 
+ 	rndis->port.open = rndis_open;
+ 	rndis->port.close = rndis_close;
+@@ -789,9 +780,18 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 	if (rndis->manufacturer && rndis->vendorID &&
+ 			rndis_set_param_vendor(rndis->params, rndis->vendorID,
+ 					       rndis->manufacturer)) {
+-		status = -EINVAL;
+-		goto fail_free_descs;
++		usb_free_all_descriptors(f);
++		return -EINVAL;
++	}
++
++	if (cdev->use_os_string) {
++		os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
++		os_desc_table[0].if_id = rndis_iad_descriptor.bFirstInterface;
++		f->os_desc_table = no_free_ptr(os_desc_table);
++		f->os_desc_n = 1;
++
+ 	}
++	rndis->notify_req = no_free_ptr(request);
+ 
+ 	/* NOTE:  all that is done without knowing or caring about
+ 	 * the network link ... which is unavailable to this code
+@@ -802,21 +802,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ 			rndis->port.in_ep->name, rndis->port.out_ep->name,
+ 			rndis->notify->name);
+ 	return 0;
+-
+-fail_free_descs:
+-	usb_free_all_descriptors(f);
+-fail:
+-	kfree(f->os_desc_table);
+-	f->os_desc_n = 0;
+-
+-	if (rndis->notify_req) {
+-		kfree(rndis->notify_req->buf);
+-		usb_ep_free_request(rndis->notify, rndis->notify_req);
+-	}
+-
+-	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+-
+-	return status;
+ }
+ 
+ void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index d709e24c1fd422..e3d63b8fa0f4c1 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -194,6 +194,9 @@ struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
+ 
+ 	req = ep->ops->alloc_request(ep, gfp_flags);
+ 
++	if (req)
++		req->ep = ep;
++
+ 	trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
+ 
+ 	return req;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index b310a07a84adfa..86a76efb21f637 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -962,7 +962,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl,
+ {
+ 	const u64 ra_pos = readahead_pos(ractl);
+ 	const u64 ra_end = ra_pos + readahead_length(ractl);
+-	const u64 em_end = em->start + em->ram_bytes;
++	const u64 em_end = em->start + em->len;
+ 
+ 	/* No expansion for holes and inline extents. */
+ 	if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index 51f286d5d00ab3..8e31af036e75e1 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1108,14 +1108,15 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
+ 	 * If ret is 1 (no key found), it means this is an empty block group,
+ 	 * without any extents allocated from it and there's no block group
+ 	 * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
+-	 * because we are using the block group tree feature, so block group
+-	 * items are stored in the block group tree. It also means there are no
+-	 * extents allocated for block groups with a start offset beyond this
+-	 * block group's end offset (this is the last, highest, block group).
++	 * because we are using the block group tree feature (so block group
++	 * items are stored in the block group tree) or this is a new block
++	 * group created in the current transaction and its block group item
++	 * was not yet inserted in the extent tree (that happens in
++	 * btrfs_create_pending_block_groups() -> insert_block_group_item()).
++	 * It also means there are no extents allocated for block groups with a
++	 * start offset beyond this block group's end offset (this is the last,
++	 * highest, block group).
+ 	 */
+-	if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE))
+-		ASSERT(ret == 0);
+-
+ 	start = block_group->start;
+ 	end = block_group->start + block_group->length;
+ 	while (ret == 0) {
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 1706f6d9b12e68..03c3b5d0abbe4f 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3852,7 +3852,7 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
+ 		prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ 		if (!prealloc) {
+ 			ret = -ENOMEM;
+-			goto drop_write;
++			goto out;
+ 		}
+ 	}
+ 
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 79eb984041dd69..0d5a3846811ad3 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -3906,6 +3906,7 @@ static noinline_for_stack struct inode *create_reloc_inode(
+ /*
+  * Mark start of chunk relocation that is cancellable. Check if the cancellation
+  * has been requested meanwhile and don't start in that case.
++ * NOTE: if this returns an error, reloc_chunk_end() must not be called.
+  *
+  * Return:
+  *   0             success
+@@ -3922,10 +3923,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
+ 
+ 	if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
+ 		btrfs_info(fs_info, "chunk relocation canceled on start");
+-		/*
+-		 * On cancel, clear all requests but let the caller mark
+-		 * the end after cleanup operations.
+-		 */
++		/* On cancel, clear all requests. */
++		clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
+ 		atomic_set(&fs_info->reloc_cancel_req, 0);
+ 		return -ECANCELED;
+ 	}
+@@ -3934,9 +3933,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
+ 
+ /*
+  * Mark end of chunk relocation that is cancellable and wake any waiters.
++ * NOTE: call only if a previous call to reloc_chunk_start() succeeded.
+  */
+ static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
+ {
++	ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
+ 	/* Requested after start, clear bit first so any waiters can continue */
+ 	if (atomic_read(&fs_info->reloc_cancel_req) > 0)
+ 		btrfs_info(fs_info, "chunk relocation canceled during operation");
+@@ -4145,9 +4146,9 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
+ 	if (err && rw)
+ 		btrfs_dec_block_group_ro(rc->block_group);
+ 	iput(rc->data_inode);
++	reloc_chunk_end(fs_info);
+ out_put_bg:
+ 	btrfs_put_block_group(bg);
+-	reloc_chunk_end(fs_info);
+ 	free_reloc_control(rc);
+ 	return err;
+ }
+@@ -4331,8 +4332,8 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
+ 		ret = ret2;
+ out_unset:
+ 	unset_reloc_control(rc);
+-out_end:
+ 	reloc_chunk_end(fs_info);
++out_end:
+ 	free_reloc_control(rc);
+ out:
+ 	free_reloc_roots(&reloc_roots);
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 8e8edfe0c6190e..4966b4f5a7d245 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1664,7 +1664,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 	    !fs_info->stripe_root) {
+ 		btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ 			  btrfs_bg_type_to_raid_name(map->type));
+-		return -EINVAL;
++		ret = -EINVAL;
+ 	}
+ 
+ 	if (cache->alloc_offset > cache->zone_capacity) {
+diff --git a/fs/dax.c b/fs/dax.c
+index 21b47402b3dca4..756400f2a62573 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1578,7 +1578,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
+ 	if (iov_iter_rw(iter) == WRITE) {
+ 		lockdep_assert_held_write(&iomi.inode->i_rwsem);
+ 		iomi.flags |= IOMAP_WRITE;
+-	} else {
++	} else if (!sb_rdonly(iomi.inode->i_sb)) {
+ 		lockdep_assert_held(&iomi.inode->i_rwsem);
+ 	}
+ 
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 0f6b16ba30d082..d7814142ba7db3 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2475,13 +2475,21 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
+ 	unsigned int hash = name->hash;
+ 	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
+ 	struct hlist_bl_node *node;
+-	struct dentry *new = d_alloc(parent, name);
++	struct dentry *new = __d_alloc(parent->d_sb, name);
+ 	struct dentry *dentry;
+ 	unsigned seq, r_seq, d_seq;
+ 
+ 	if (unlikely(!new))
+ 		return ERR_PTR(-ENOMEM);
+ 
++	new->d_flags |= DCACHE_PAR_LOOKUP;
++	spin_lock(&parent->d_lock);
++	new->d_parent = dget_dlock(parent);
++	hlist_add_head(&new->d_sib, &parent->d_children);
++	if (parent->d_flags & DCACHE_DISCONNECTED)
++		new->d_flags |= DCACHE_DISCONNECTED;
++	spin_unlock(&parent->d_lock);
++
+ retry:
+ 	rcu_read_lock();
+ 	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
+@@ -2565,8 +2573,6 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
+ 		return dentry;
+ 	}
+ 	rcu_read_unlock();
+-	/* we can't take ->d_lock here; it's OK, though. */
+-	new->d_flags |= DCACHE_PAR_LOOKUP;
+ 	new->d_wait = wq;
+ 	hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
+ 	hlist_bl_unlock(b);
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index da4a8245638364..9e7275dd901f49 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -276,9 +276,16 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
+ 		  bh, is_metadata, inode->i_mode,
+ 		  test_opt(inode->i_sb, DATA_FLAGS));
+ 
+-	/* In the no journal case, we can just do a bforget and return */
++	/*
++	 * In the no journal case, we should wait for the ongoing buffer
++	 * to complete and do a forget.
++	 */
+ 	if (!ext4_handle_valid(handle)) {
+-		bforget(bh);
++		if (bh) {
++			clear_buffer_dirty(bh);
++			wait_on_buffer(bh);
++			__bforget(bh);
++		}
+ 		return 0;
+ 	}
+ 
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 6af57242559658..4ad34eba00a770 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4904,6 +4904,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 	}
+ 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+ 	ext4_set_inode_flags(inode, true);
++	/* Detect invalid flag combination - can't have both inline data and extents */
++	if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
++	    ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
++		ext4_error_inode(inode, function, line, 0,
++			"inode has both inline data and extents flags");
++		ret = -EFSCORRUPTED;
++		goto bad_inode;
++	}
+ 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
+ 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
+ 	if (ext4_has_feature_64bit(sb))
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 040c06dfb8c033..bbb29b6024382a 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1508,8 +1508,8 @@ static bool f2fs_map_blocks_cached(struct inode *inode,
+ 		struct f2fs_dev_info *dev = &sbi->devs[bidx];
+ 
+ 		map->m_bdev = dev->bdev;
+-		map->m_pblk -= dev->start_blk;
+ 		map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
++		map->m_pblk -= dev->start_blk;
+ 	} else {
+ 		map->m_bdev = inode->i_sb->s_bdev;
+ 	}
+diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
+index 36b6cf2a3abba4..ebd326799f35ac 100644
+--- a/fs/hfsplus/unicode.c
++++ b/fs/hfsplus/unicode.c
+@@ -40,6 +40,18 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
+ 	p1 = s1->unicode;
+ 	p2 = s2->unicode;
+ 
++	if (len1 > HFSPLUS_MAX_STRLEN) {
++		len1 = HFSPLUS_MAX_STRLEN;
++		pr_err("invalid length %u has been corrected to %d\n",
++			be16_to_cpu(s1->length), len1);
++	}
++
++	if (len2 > HFSPLUS_MAX_STRLEN) {
++		len2 = HFSPLUS_MAX_STRLEN;
++		pr_err("invalid length %u has been corrected to %d\n",
++			be16_to_cpu(s2->length), len2);
++	}
++
+ 	while (1) {
+ 		c1 = c2 = 0;
+ 
+@@ -74,6 +86,18 @@ int hfsplus_strcmp(const struct hfsplus_unistr *s1,
+ 	p1 = s1->unicode;
+ 	p2 = s2->unicode;
+ 
++	if (len1 > HFSPLUS_MAX_STRLEN) {
++		len1 = HFSPLUS_MAX_STRLEN;
++		pr_err("invalid length %u has been corrected to %d\n",
++			be16_to_cpu(s1->length), len1);
++	}
++
++	if (len2 > HFSPLUS_MAX_STRLEN) {
++		len2 = HFSPLUS_MAX_STRLEN;
++		pr_err("invalid length %u has been corrected to %d\n",
++			be16_to_cpu(s2->length), len2);
++	}
++
+ 	for (len = min(len1, len2); len > 0; len--) {
+ 		c1 = be16_to_cpu(*p1);
+ 		c2 = be16_to_cpu(*p2);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index f440110df93a9b..ae43920ce395c1 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1663,6 +1663,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
+ 	int drop_reserve = 0;
+ 	int err = 0;
+ 	int was_modified = 0;
++	int wait_for_writeback = 0;
+ 
+ 	if (is_handle_aborted(handle))
+ 		return -EROFS;
+@@ -1786,18 +1787,22 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
+ 		}
+ 
+ 		/*
+-		 * The buffer is still not written to disk, we should
+-		 * attach this buffer to current transaction so that the
+-		 * buffer can be checkpointed only after the current
+-		 * transaction commits.
++		 * The buffer has not yet been written to disk. We should
++		 * either clear the buffer or ensure that the ongoing I/O
++		 * is completed, and attach this buffer to current
++		 * transaction so that the buffer can be checkpointed only
++		 * after the current transaction commits.
+ 		 */
+ 		clear_buffer_dirty(bh);
++		wait_for_writeback = 1;
+ 		__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
+ 		spin_unlock(&journal->j_list_lock);
+ 	}
+ drop:
+ 	__brelse(bh);
+ 	spin_unlock(&jh->b_state_lock);
++	if (wait_for_writeback)
++		wait_on_buffer(bh);
+ 	jbd2_journal_put_journal_head(jh);
+ 	if (drop_reserve) {
+ 		/* no need to reserve log space for this block -bzzz */
+diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
+index 08a20e5bcf7fee..0822d8a119c6fa 100644
+--- a/fs/nfsd/blocklayout.c
++++ b/fs/nfsd/blocklayout.c
+@@ -118,7 +118,6 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
+ 		struct iomap *iomaps, int nr_iomaps)
+ {
+ 	struct timespec64 mtime = inode_get_mtime(inode);
+-	loff_t new_size = lcp->lc_last_wr + 1;
+ 	struct iattr iattr = { .ia_valid = 0 };
+ 	int error;
+ 
+@@ -128,9 +127,9 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
+ 	iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
+ 	iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
+ 
+-	if (new_size > i_size_read(inode)) {
++	if (lcp->lc_size_chg) {
+ 		iattr.ia_valid |= ATTR_SIZE;
+-		iattr.ia_size = new_size;
++		iattr.ia_size = lcp->lc_newsize;
+ 	}
+ 
+ 	error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
+@@ -173,16 +172,20 @@ nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
+ }
+ 
+ static __be32
+-nfsd4_block_proc_layoutcommit(struct inode *inode,
++nfsd4_block_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
+ 		struct nfsd4_layoutcommit *lcp)
+ {
+ 	struct iomap *iomaps;
+ 	int nr_iomaps;
++	__be32 nfserr;
+ 
+-	nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
+-			lcp->lc_up_len, &iomaps, i_blocksize(inode));
+-	if (nr_iomaps < 0)
+-		return nfserrno(nr_iomaps);
++	rqstp->rq_arg = lcp->lc_up_layout;
++	svcxdr_init_decode(rqstp);
++
++	nfserr = nfsd4_block_decode_layoutupdate(&rqstp->rq_arg_stream,
++			&iomaps, &nr_iomaps, i_blocksize(inode));
++	if (nfserr != nfs_ok)
++		return nfserr;
+ 
+ 	return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
+ }
+@@ -311,16 +314,20 @@ nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
+ 	return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp));
+ }
+ static __be32
+-nfsd4_scsi_proc_layoutcommit(struct inode *inode,
++nfsd4_scsi_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
+ 		struct nfsd4_layoutcommit *lcp)
+ {
+ 	struct iomap *iomaps;
+ 	int nr_iomaps;
++	__be32 nfserr;
++
++	rqstp->rq_arg = lcp->lc_up_layout;
++	svcxdr_init_decode(rqstp);
+ 
+-	nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout,
+-			lcp->lc_up_len, &iomaps, i_blocksize(inode));
+-	if (nr_iomaps < 0)
+-		return nfserrno(nr_iomaps);
++	nfserr = nfsd4_scsi_decode_layoutupdate(&rqstp->rq_arg_stream,
++			&iomaps, &nr_iomaps, i_blocksize(inode));
++	if (nfserr != nfs_ok)
++		return nfserr;
+ 
+ 	return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
+ }
+diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
+index ce78f74715eead..e50afe34073719 100644
+--- a/fs/nfsd/blocklayoutxdr.c
++++ b/fs/nfsd/blocklayoutxdr.c
+@@ -29,8 +29,7 @@ nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
+ 	*p++ = cpu_to_be32(len);
+ 	*p++ = cpu_to_be32(1);		/* we always return a single extent */
+ 
+-	p = xdr_encode_opaque_fixed(p, &b->vol_id,
+-			sizeof(struct nfsd4_deviceid));
++	p = svcxdr_encode_deviceid4(p, &b->vol_id);
+ 	p = xdr_encode_hyper(p, b->foff);
+ 	p = xdr_encode_hyper(p, b->len);
+ 	p = xdr_encode_hyper(p, b->soff);
+@@ -112,64 +111,86 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
+ 	return 0;
+ }
+ 
+-int
+-nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+-		u32 block_size)
++/**
++ * nfsd4_block_decode_layoutupdate - decode the block layout extent array
++ * @xdr: subbuf set to the encoded array
++ * @iomapp: pointer to store the decoded extent array
++ * @nr_iomapsp: pointer to store the number of extents
++ * @block_size: alignment of extent offset and length
++ *
++ * This function decodes the opaque field of the layoutupdate4 structure
++ * in a layoutcommit request for the block layout driver. The field is
++ * actually an array of extents sent by the client. It also checks that
++ * the file offset, storage offset and length of each extent are aligned
++ * by @block_size.
++ *
++ * Return values:
++ *   %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid
++ *   %nfserr_bad_xdr: The encoded array in @xdr is invalid
++ *   %nfserr_inval: An unaligned extent found
++ *   %nfserr_delay: Failed to allocate memory for @iomapp
++ */
++__be32
++nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp,
++		int *nr_iomapsp, u32 block_size)
+ {
+ 	struct iomap *iomaps;
+-	u32 nr_iomaps, i;
++	u32 nr_iomaps, expected, len, i;
++	__be32 nfserr;
+ 
+-	if (len < sizeof(u32)) {
+-		dprintk("%s: extent array too small: %u\n", __func__, len);
+-		return -EINVAL;
+-	}
+-	len -= sizeof(u32);
+-	if (len % PNFS_BLOCK_EXTENT_SIZE) {
+-		dprintk("%s: extent array invalid: %u\n", __func__, len);
+-		return -EINVAL;
+-	}
++	if (xdr_stream_decode_u32(xdr, &nr_iomaps))
++		return nfserr_bad_xdr;
+ 
+-	nr_iomaps = be32_to_cpup(p++);
+-	if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE) {
+-		dprintk("%s: extent array size mismatch: %u/%u\n",
+-			__func__, len, nr_iomaps);
+-		return -EINVAL;
+-	}
++	len = sizeof(__be32) + xdr_stream_remaining(xdr);
++	expected = sizeof(__be32) + nr_iomaps * PNFS_BLOCK_EXTENT_SIZE;
++	if (len != expected)
++		return nfserr_bad_xdr;
+ 
+ 	iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
+-	if (!iomaps) {
+-		dprintk("%s: failed to allocate extent array\n", __func__);
+-		return -ENOMEM;
+-	}
++	if (!iomaps)
++		return nfserr_delay;
+ 
+ 	for (i = 0; i < nr_iomaps; i++) {
+ 		struct pnfs_block_extent bex;
+ 
+-		memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
+-		p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
++		if (nfsd4_decode_deviceid4(xdr, &bex.vol_id)) {
++			nfserr = nfserr_bad_xdr;
++			goto fail;
++		}
+ 
+-		p = xdr_decode_hyper(p, &bex.foff);
++		if (xdr_stream_decode_u64(xdr, &bex.foff)) {
++			nfserr = nfserr_bad_xdr;
++			goto fail;
++		}
+ 		if (bex.foff & (block_size - 1)) {
+-			dprintk("%s: unaligned offset 0x%llx\n",
+-				__func__, bex.foff);
++			nfserr = nfserr_inval;
++			goto fail;
++		}
++
++		if (xdr_stream_decode_u64(xdr, &bex.len)) {
++			nfserr = nfserr_bad_xdr;
+ 			goto fail;
+ 		}
+-		p = xdr_decode_hyper(p, &bex.len);
+ 		if (bex.len & (block_size - 1)) {
+-			dprintk("%s: unaligned length 0x%llx\n",
+-				__func__, bex.foff);
++			nfserr = nfserr_inval;
++			goto fail;
++		}
++
++		if (xdr_stream_decode_u64(xdr, &bex.soff)) {
++			nfserr = nfserr_bad_xdr;
+ 			goto fail;
+ 		}
+-		p = xdr_decode_hyper(p, &bex.soff);
+ 		if (bex.soff & (block_size - 1)) {
+-			dprintk("%s: unaligned disk offset 0x%llx\n",
+-				__func__, bex.soff);
++			nfserr = nfserr_inval;
++			goto fail;
++		}
++
++		if (xdr_stream_decode_u32(xdr, &bex.es)) {
++			nfserr = nfserr_bad_xdr;
+ 			goto fail;
+ 		}
+-		bex.es = be32_to_cpup(p++);
+ 		if (bex.es != PNFS_BLOCK_READWRITE_DATA) {
+-			dprintk("%s: incorrect extent state %d\n",
+-				__func__, bex.es);
++			nfserr = nfserr_inval;
+ 			goto fail;
+ 		}
+ 
+@@ -178,59 +199,79 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+ 	}
+ 
+ 	*iomapp = iomaps;
+-	return nr_iomaps;
++	*nr_iomapsp = nr_iomaps;
++	return nfs_ok;
+ fail:
+ 	kfree(iomaps);
+-	return -EINVAL;
++	return nfserr;
+ }
+ 
+-int
+-nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+-		u32 block_size)
++/**
++ * nfsd4_scsi_decode_layoutupdate - decode the scsi layout extent array
++ * @xdr: subbuf set to the encoded array
++ * @iomapp: pointer to store the decoded extent array
++ * @nr_iomapsp: pointer to store the number of extents
++ * @block_size: alignment of extent offset and length
++ *
++ * This function decodes the opaque field of the layoutupdate4 structure
++ * in a layoutcommit request for the scsi layout driver. The field is
++ * actually an array of extents sent by the client. It also checks that
++ * the offset and length of each extent are aligned by @block_size.
++ *
++ * Return values:
++ *   %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid
++ *   %nfserr_bad_xdr: The encoded array in @xdr is invalid
++ *   %nfserr_inval: An unaligned extent found
++ *   %nfserr_delay: Failed to allocate memory for @iomapp
++ */
++__be32
++nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp,
++		int *nr_iomapsp, u32 block_size)
+ {
+ 	struct iomap *iomaps;
+-	u32 nr_iomaps, expected, i;
++	u32 nr_iomaps, expected, len, i;
++	__be32 nfserr;
+ 
+-	if (len < sizeof(u32)) {
+-		dprintk("%s: extent array too small: %u\n", __func__, len);
+-		return -EINVAL;
+-	}
++	if (xdr_stream_decode_u32(xdr, &nr_iomaps))
++		return nfserr_bad_xdr;
+ 
+-	nr_iomaps = be32_to_cpup(p++);
++	len = sizeof(__be32) + xdr_stream_remaining(xdr);
+ 	expected = sizeof(__be32) + nr_iomaps * PNFS_SCSI_RANGE_SIZE;
+-	if (len != expected) {
+-		dprintk("%s: extent array size mismatch: %u/%u\n",
+-			__func__, len, expected);
+-		return -EINVAL;
+-	}
++	if (len != expected)
++		return nfserr_bad_xdr;
+ 
+ 	iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
+-	if (!iomaps) {
+-		dprintk("%s: failed to allocate extent array\n", __func__);
+-		return -ENOMEM;
+-	}
++	if (!iomaps)
++		return nfserr_delay;
+ 
+ 	for (i = 0; i < nr_iomaps; i++) {
+ 		u64 val;
+ 
+-		p = xdr_decode_hyper(p, &val);
++		if (xdr_stream_decode_u64(xdr, &val)) {
++			nfserr = nfserr_bad_xdr;
++			goto fail;
++		}
+ 		if (val & (block_size - 1)) {
+-			dprintk("%s: unaligned offset 0x%llx\n", __func__, val);
++			nfserr = nfserr_inval;
+ 			goto fail;
+ 		}
+ 		iomaps[i].offset = val;
+ 
+-		p = xdr_decode_hyper(p, &val);
++		if (xdr_stream_decode_u64(xdr, &val)) {
++			nfserr = nfserr_bad_xdr;
++			goto fail;
++		}
+ 		if (val & (block_size - 1)) {
+-			dprintk("%s: unaligned length 0x%llx\n", __func__, val);
++			nfserr = nfserr_inval;
+ 			goto fail;
+ 		}
+ 		iomaps[i].length = val;
+ 	}
+ 
+ 	*iomapp = iomaps;
+-	return nr_iomaps;
++	*nr_iomapsp = nr_iomaps;
++	return nfs_ok;
+ fail:
+ 	kfree(iomaps);
+-	return -EINVAL;
++	return nfserr;
+ }
+diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
+index 4e28ac8f112797..7d25ef689671f7 100644
+--- a/fs/nfsd/blocklayoutxdr.h
++++ b/fs/nfsd/blocklayoutxdr.h
+@@ -54,9 +54,9 @@ __be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
+ 		const struct nfsd4_getdeviceinfo *gdp);
+ __be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
+ 		const struct nfsd4_layoutget *lgp);
+-int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+-		u32 block_size);
+-int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+-		u32 block_size);
++__be32 nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr,
++		struct iomap **iomapp, int *nr_iomapsp, u32 block_size);
++__be32 nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr,
++		struct iomap **iomapp, int *nr_iomapsp, u32 block_size);
+ 
+ #endif /* _NFSD_BLOCKLAYOUTXDR_H */
+diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
+index 3ca5304440ff0a..3c4419da5e24c2 100644
+--- a/fs/nfsd/flexfilelayout.c
++++ b/fs/nfsd/flexfilelayout.c
+@@ -125,6 +125,13 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
+ 	return 0;
+ }
+ 
++static __be32
++nfsd4_ff_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
++		struct nfsd4_layoutcommit *lcp)
++{
++	return nfs_ok;
++}
++
+ const struct nfsd4_layout_ops ff_layout_ops = {
+ 	.notify_types		=
+ 			NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
+@@ -133,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_ops = {
+ 	.encode_getdeviceinfo	= nfsd4_ff_encode_getdeviceinfo,
+ 	.proc_layoutget		= nfsd4_ff_proc_layoutget,
+ 	.encode_layoutget	= nfsd4_ff_encode_layoutget,
++	.proc_layoutcommit	= nfsd4_ff_proc_layoutcommit,
+ };
+diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
+index aeb71c10ff1b96..f9f7e38cba13fb 100644
+--- a/fs/nfsd/flexfilelayoutxdr.c
++++ b/fs/nfsd/flexfilelayoutxdr.c
+@@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
+ 	*p++ = cpu_to_be32(1);			/* single mirror */
+ 	*p++ = cpu_to_be32(1);			/* single data server */
+ 
+-	p = xdr_encode_opaque_fixed(p, &fl->deviceid,
+-			sizeof(struct nfsd4_deviceid));
++	p = svcxdr_encode_deviceid4(p, &fl->deviceid);
+ 
+ 	*p++ = cpu_to_be32(1);			/* efficiency */
+ 
+diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
+index fbfddd3c4c943c..fc5e82eddaa1af 100644
+--- a/fs/nfsd/nfs4layouts.c
++++ b/fs/nfsd/nfs4layouts.c
+@@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
+ 
+ 	id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
+ 	id->generation = device_generation;
+-	id->pad = 0;
+ 	return 0;
+ }
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 8f2dc7eb4fc451..6040de0923f851 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2362,7 +2362,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
+ 	const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
+ 	struct svc_fh *current_fh = &cstate->current_fh;
+ 	const struct nfsd4_layout_ops *ops;
+-	loff_t new_size = lcp->lc_last_wr + 1;
+ 	struct inode *inode;
+ 	struct nfs4_layout_stateid *ls;
+ 	__be32 nfserr;
+@@ -2378,18 +2377,20 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
+ 		goto out;
+ 	inode = d_inode(current_fh->fh_dentry);
+ 
+-	nfserr = nfserr_inval;
+-	if (new_size <= seg->offset) {
+-		dprintk("pnfsd: last write before layout segment\n");
+-		goto out;
+-	}
+-	if (new_size > seg->offset + seg->length) {
+-		dprintk("pnfsd: last write beyond layout segment\n");
+-		goto out;
+-	}
+-	if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
+-		dprintk("pnfsd: layoutcommit beyond EOF\n");
+-		goto out;
++	lcp->lc_size_chg = false;
++	if (lcp->lc_newoffset) {
++		loff_t new_size = lcp->lc_last_wr + 1;
++
++		nfserr = nfserr_inval;
++		if (new_size <= seg->offset)
++			goto out;
++		if (new_size > seg->offset + seg->length)
++			goto out;
++
++		if (new_size > i_size_read(inode)) {
++			lcp->lc_size_chg = true;
++			lcp->lc_newsize = new_size;
++		}
+ 	}
+ 
+ 	nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
+@@ -2406,14 +2407,7 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
+ 	/* LAYOUTCOMMIT does not require any serialization */
+ 	mutex_unlock(&ls->ls_mutex);
+ 
+-	if (new_size > i_size_read(inode)) {
+-		lcp->lc_size_chg = true;
+-		lcp->lc_newsize = new_size;
+-	} else {
+-		lcp->lc_size_chg = false;
+-	}
+-
+-	nfserr = ops->proc_layoutcommit(inode, lcp);
++	nfserr = ops->proc_layoutcommit(inode, rqstp, lcp);
+ 	nfs4_put_stid(&ls->ls_stid);
+ out:
+ 	return nfserr;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 90db900b346ce9..66383eeeed15a0 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -566,23 +566,13 @@ nfsd4_decode_state_owner4(struct nfsd4_compoundargs *argp,
+ }
+ 
+ #ifdef CONFIG_NFSD_PNFS
+-static __be32
+-nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp,
+-		       struct nfsd4_deviceid *devid)
+-{
+-	__be32 *p;
+-
+-	p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE);
+-	if (!p)
+-		return nfserr_bad_xdr;
+-	memcpy(devid, p, sizeof(*devid));
+-	return nfs_ok;
+-}
+ 
+ static __be32
+ nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
+ 			   struct nfsd4_layoutcommit *lcp)
+ {
++	u32 len;
++
+ 	if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_layout_type) < 0)
+ 		return nfserr_bad_xdr;
+ 	if (lcp->lc_layout_type < LAYOUT_NFSV4_1_FILES)
+@@ -590,13 +580,10 @@ nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
+ 	if (lcp->lc_layout_type >= LAYOUT_TYPE_MAX)
+ 		return nfserr_bad_xdr;
+ 
+-	if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_up_len) < 0)
++	if (xdr_stream_decode_u32(argp->xdr, &len) < 0)
++		return nfserr_bad_xdr;
++	if (!xdr_stream_subsegment(argp->xdr, &lcp->lc_up_layout, len))
+ 		return nfserr_bad_xdr;
+-	if (lcp->lc_up_len > 0) {
+-		lcp->lc_up_layout = xdr_inline_decode(argp->xdr, lcp->lc_up_len);
+-		if (!lcp->lc_up_layout)
+-			return nfserr_bad_xdr;
+-	}
+ 
+ 	return nfs_ok;
+ }
+@@ -1762,7 +1749,7 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
+ 	__be32 status;
+ 
+ 	memset(gdev, 0, sizeof(*gdev));
+-	status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid);
++	status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid);
+ 	if (status)
+ 		return status;
+ 	if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0)
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 4b56ba1e8e48d0..ae435444e8b3b1 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -286,6 +286,7 @@ void		nfsd_lockd_shutdown(void);
+ #define	nfserr_cb_path_down	cpu_to_be32(NFSERR_CB_PATH_DOWN)
+ #define	nfserr_locked		cpu_to_be32(NFSERR_LOCKED)
+ #define	nfserr_wrongsec		cpu_to_be32(NFSERR_WRONGSEC)
++#define nfserr_delay			cpu_to_be32(NFS4ERR_DELAY)
+ #define nfserr_badiomode		cpu_to_be32(NFS4ERR_BADIOMODE)
+ #define nfserr_badlayout		cpu_to_be32(NFS4ERR_BADLAYOUT)
+ #define nfserr_bad_session_digest	cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
+diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
+index 925817f669176c..dfd411d1f363fd 100644
+--- a/fs/nfsd/pnfs.h
++++ b/fs/nfsd/pnfs.h
+@@ -35,6 +35,7 @@ struct nfsd4_layout_ops {
+ 			const struct nfsd4_layoutget *lgp);
+ 
+ 	__be32 (*proc_layoutcommit)(struct inode *inode,
++			struct svc_rqst *rqstp,
+ 			struct nfsd4_layoutcommit *lcp);
+ 
+ 	void (*fence_client)(struct nfs4_layout_stateid *ls,
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index 2a21a7662e030c..c75b295df206ad 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -596,9 +596,43 @@ struct nfsd4_reclaim_complete {
+ struct nfsd4_deviceid {
+ 	u64			fsid_idx;
+ 	u32			generation;
+-	u32			pad;
+ };
+ 
++static inline __be32 *
++svcxdr_encode_deviceid4(__be32 *p, const struct nfsd4_deviceid *devid)
++{
++	__be64 *q = (__be64 *)p;
++
++	*q = (__force __be64)devid->fsid_idx;
++	p += 2;
++	*p++ = (__force __be32)devid->generation;
++	*p++ = xdr_zero;
++	return p;
++}
++
++static inline __be32 *
++svcxdr_decode_deviceid4(__be32 *p, struct nfsd4_deviceid *devid)
++{
++	__be64 *q = (__be64 *)p;
++
++	devid->fsid_idx = (__force u64)(*q);
++	p += 2;
++	devid->generation = (__force u32)(*p++);
++	p++; /* NFSD does not use the remaining octets */
++	return p;
++}
++
++static inline __be32
++nfsd4_decode_deviceid4(struct xdr_stream *xdr, struct nfsd4_deviceid *devid)
++{
++	__be32 *p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
++
++	if (unlikely(!p))
++		return nfserr_bad_xdr;
++	svcxdr_decode_deviceid4(p, devid);
++	return nfs_ok;
++}
++
+ struct nfsd4_layout_seg {
+ 	u32			iomode;
+ 	u64			offset;
+@@ -631,8 +665,7 @@ struct nfsd4_layoutcommit {
+ 	u64			lc_last_wr;	/* request */
+ 	struct timespec64	lc_mtime;	/* request */
+ 	u32			lc_layout_type;	/* request */
+-	u32			lc_up_len;	/* layout length */
+-	void			*lc_up_layout;	/* decoded by callback */
++	struct xdr_buf		lc_up_layout;	/* decoded by callback */
+ 	bool			lc_size_chg;	/* response */
+ 	u64			lc_newsize;	/* response */
+ };
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index e06d02b68c5387..4862a9518a3215 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -2381,8 +2381,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
+ 	tcon = tlink_tcon(tlink);
+ 	server = tcon->ses->server;
+ 
+-	if (!server->ops->rename)
+-		return -ENOSYS;
++	if (!server->ops->rename) {
++		rc = -ENOSYS;
++		goto do_rename_exit;
++	}
+ 
+ 	/* try path-based rename first */
+ 	rc = server->ops->rename(xid, tcon, from_dentry,
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 499f791df77998..3f3f184f7fb97b 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -913,6 +913,14 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
+ 	char *data_end;
+ 	struct dfs_referral_level_3 *ref;
+ 
++	if (rsp_size < sizeof(*rsp)) {
++		cifs_dbg(VFS | ONCE,
++			 "%s: header is malformed (size is %u, must be %zu)\n",
++			 __func__, rsp_size, sizeof(*rsp));
++		rc = -EINVAL;
++		goto parse_DFS_referrals_exit;
++	}
++
+ 	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
+ 
+ 	if (*num_of_nodes < 1) {
+@@ -922,6 +930,15 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
+ 		goto parse_DFS_referrals_exit;
+ 	}
+ 
++	if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) {
++		cifs_dbg(VFS | ONCE,
++			 "%s: malformed buffer (size is %u, must be at least %zu)\n",
++			 __func__, rsp_size,
++			 sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3));
++		rc = -EINVAL;
++		goto parse_DFS_referrals_exit;
++	}
++
+ 	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
+ 	if (ref->VersionNumber != cpu_to_le16(3)) {
+ 		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 1b30035d02bc51..35299967737f11 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3134,8 +3134,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ 	if (!utf16_path) {
+ 		rc = -ENOMEM;
+-		free_xid(xid);
+-		return ERR_PTR(rc);
++		goto put_tlink;
+ 	}
+ 
+ 	oparms = (struct cifs_open_parms) {
+@@ -3167,6 +3166,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ 	}
+ 
++put_tlink:
+ 	cifs_put_tlink(tlink);
+ 	free_xid(xid);
+ 
+@@ -3207,8 +3207,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
+ 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ 	if (!utf16_path) {
+ 		rc = -ENOMEM;
+-		free_xid(xid);
+-		return rc;
++		goto put_tlink;
+ 	}
+ 
+ 	oparms = (struct cifs_open_parms) {
+@@ -3229,6 +3228,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
+ 		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ 	}
+ 
++put_tlink:
+ 	cifs_put_tlink(tlink);
+ 	free_xid(xid);
+ 	return rc;
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index b36d0676dbe584..00805aed0b07d9 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -147,14 +147,11 @@ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+ {
+ 	struct ksmbd_session_rpc *entry;
+-	int method;
+ 
+-	down_read(&sess->rpc_lock);
++	lockdep_assert_held(&sess->rpc_lock);
+ 	entry = xa_load(&sess->rpc_handle_list, id);
+-	method = entry ? entry->method : 0;
+-	up_read(&sess->rpc_lock);
+ 
+-	return method;
++	return entry ? entry->method : 0;
+ }
+ 
+ void ksmbd_session_destroy(struct ksmbd_session *sess)
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index d2182477566a64..796235cb956771 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4623,8 +4623,15 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
+ 	 * pipe without opening it, checking error condition here
+ 	 */
+ 	id = req->VolatileFileId;
+-	if (!ksmbd_session_rpc_method(sess, id))
++
++	lockdep_assert_not_held(&sess->rpc_lock);
++
++	down_read(&sess->rpc_lock);
++	if (!ksmbd_session_rpc_method(sess, id)) {
++		up_read(&sess->rpc_lock);
+ 		return -ENOENT;
++	}
++	up_read(&sess->rpc_lock);
+ 
+ 	ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
+ 		    req->FileInfoClass, req->VolatileFileId);
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 4454bbe3c7107d..816f136ce5c4e1 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -825,6 +825,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle
+ 	if (!msg)
+ 		return NULL;
+ 
++	lockdep_assert_not_held(&sess->rpc_lock);
++
++	down_read(&sess->rpc_lock);
+ 	msg->type = KSMBD_EVENT_RPC_REQUEST;
+ 	req = (struct ksmbd_rpc_command *)msg->payload;
+ 	req->handle = handle;
+@@ -833,6 +836,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle
+ 	req->flags |= KSMBD_RPC_WRITE_METHOD;
+ 	req->payload_sz = payload_sz;
+ 	memcpy(req->payload, payload, payload_sz);
++	up_read(&sess->rpc_lock);
+ 
+ 	resp = ipc_msg_send_request(msg, req->handle);
+ 	ipc_msg_free(msg);
+@@ -849,6 +853,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle)
+ 	if (!msg)
+ 		return NULL;
+ 
++	lockdep_assert_not_held(&sess->rpc_lock);
++
++	down_read(&sess->rpc_lock);
+ 	msg->type = KSMBD_EVENT_RPC_REQUEST;
+ 	req = (struct ksmbd_rpc_command *)msg->payload;
+ 	req->handle = handle;
+@@ -856,6 +863,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle)
+ 	req->flags |= rpc_context_flags(sess);
+ 	req->flags |= KSMBD_RPC_READ_METHOD;
+ 	req->payload_sz = 0;
++	up_read(&sess->rpc_lock);
+ 
+ 	resp = ipc_msg_send_request(msg, req->handle);
+ 	ipc_msg_free(msg);
+@@ -876,6 +884,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle
+ 	if (!msg)
+ 		return NULL;
+ 
++	lockdep_assert_not_held(&sess->rpc_lock);
++
++	down_read(&sess->rpc_lock);
+ 	msg->type = KSMBD_EVENT_RPC_REQUEST;
+ 	req = (struct ksmbd_rpc_command *)msg->payload;
+ 	req->handle = handle;
+@@ -884,6 +895,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle
+ 	req->flags |= KSMBD_RPC_IOCTL_METHOD;
+ 	req->payload_sz = payload_sz;
+ 	memcpy(req->payload, payload, payload_sz);
++	up_read(&sess->rpc_lock);
+ 
+ 	resp = ipc_msg_send_request(msg, req->handle);
+ 	ipc_msg_free(msg);
+diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
+index 3e6682ed656b30..a151ea6c0f7215 100644
+--- a/fs/xfs/libxfs/xfs_log_format.h
++++ b/fs/xfs/libxfs/xfs_log_format.h
+@@ -174,12 +174,40 @@ typedef struct xlog_rec_header {
+ 	__be32	  h_prev_block; /* block number to previous LR		:  4 */
+ 	__be32	  h_num_logops;	/* number of log operations in this LR	:  4 */
+ 	__be32	  h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
+-	/* new fields */
++
++	/* fields added by the Linux port: */
+ 	__be32    h_fmt;        /* format of log record                 :  4 */
+ 	uuid_t	  h_fs_uuid;    /* uuid of FS                           : 16 */
++
++	/* fields added for log v2: */
+ 	__be32	  h_size;	/* iclog size				:  4 */
++
++	/*
++	 * When h_size added for log v2 support, it caused structure to have
++	 * a different size on i386 vs all other architectures because the
++	 * sum of the size ofthe  member is not aligned by that of the largest
++	 * __be64-sized member, and i386 has really odd struct alignment rules.
++	 *
++	 * Due to the way the log headers are placed out on-disk that alone is
++	 * not a problem becaue the xlog_rec_header always sits alone in a
++	 * BBSIZEs area, and the rest of that area is padded with zeroes.
++	 * But xlog_cksum used to calculate the checksum based on the structure
++	 * size, and thus gives different checksums for i386 vs the rest.
++	 * We now do two checksum validation passes for both sizes to allow
++	 * moving v5 file systems with unclean logs between i386 and other
++	 * (little-endian) architectures.
++	 */
++	__u32	  h_pad0;
+ } xlog_rec_header_t;
+ 
++#ifdef __i386__
++#define XLOG_REC_SIZE		offsetofend(struct xlog_rec_header, h_size)
++#define XLOG_REC_SIZE_OTHER	sizeof(struct xlog_rec_header)
++#else
++#define XLOG_REC_SIZE		sizeof(struct xlog_rec_header)
++#define XLOG_REC_SIZE_OTHER	offsetofend(struct xlog_rec_header, h_size)
++#endif /* __i386__ */
++
+ typedef struct xlog_rec_ext_header {
+ 	__be32	  xh_cycle;	/* write cycle of log			: 4 */
+ 	__be32	  xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /*	: 256 */
+diff --git a/fs/xfs/libxfs/xfs_ondisk.h b/fs/xfs/libxfs/xfs_ondisk.h
+index 23c133fd36f5bb..01b65d87745338 100644
+--- a/fs/xfs/libxfs/xfs_ondisk.h
++++ b/fs/xfs/libxfs/xfs_ondisk.h
+@@ -149,6 +149,8 @@ xfs_check_ondisk_structs(void)
+ 	XFS_CHECK_STRUCT_SIZE(struct xfs_rud_log_format,	16);
+ 	XFS_CHECK_STRUCT_SIZE(struct xfs_map_extent,		32);
+ 	XFS_CHECK_STRUCT_SIZE(struct xfs_phys_extent,		16);
++	XFS_CHECK_STRUCT_SIZE(struct xlog_rec_header,		328);
++	XFS_CHECK_STRUCT_SIZE(struct xlog_rec_ext_header,	260);
+ 
+ 	XFS_CHECK_OFFSET(struct xfs_bui_log_format, bui_extents,	16);
+ 	XFS_CHECK_OFFSET(struct xfs_cui_log_format, cui_extents,	16);
+diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c
+index 53697f3c5e1b0b..8688edec587554 100644
+--- a/fs/xfs/scrub/reap.c
++++ b/fs/xfs/scrub/reap.c
+@@ -409,8 +409,6 @@ xreap_agextent_iter(
+ 	if (crosslinked) {
+ 		trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp);
+ 
+-		rs->force_roll = true;
+-
+ 		if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
+ 			/*
+ 			 * If we're unmapping CoW staging extents, remove the
+@@ -418,11 +416,14 @@ xreap_agextent_iter(
+ 			 * rmap record as well.
+ 			 */
+ 			xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp);
++			rs->force_roll = true;
+ 			return 0;
+ 		}
+ 
+-		return xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno,
+-				*aglenp, rs->oinfo);
++		xfs_rmap_free_extent(sc->tp, sc->sa.pag->pag_agno, agbno,
++				*aglenp, rs->oinfo->oi_owner);
++		rs->deferred++;
++		return 0;
+ 	}
+ 
+ 	trace_xreap_dispose_free_extent(sc->sa.pag, agbno, *aglenp);
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 26b2f5887b8819..a4c00decd97ba8 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -1567,13 +1567,13 @@ xlog_cksum(
+ 	struct xlog		*log,
+ 	struct xlog_rec_header	*rhead,
+ 	char			*dp,
+-	int			size)
++	unsigned int		hdrsize,
++	unsigned int		size)
+ {
+ 	uint32_t		crc;
+ 
+ 	/* first generate the crc for the record header ... */
+-	crc = xfs_start_cksum_update((char *)rhead,
+-			      sizeof(struct xlog_rec_header),
++	crc = xfs_start_cksum_update((char *)rhead, hdrsize,
+ 			      offsetof(struct xlog_rec_header, h_crc));
+ 
+ 	/* ... then for additional cycle data for v2 logs ... */
+@@ -1837,7 +1837,7 @@ xlog_sync(
+ 
+ 	/* calculcate the checksum */
+ 	iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
+-					    iclog->ic_datap, size);
++			iclog->ic_datap, XLOG_REC_SIZE, size);
+ 	/*
+ 	 * Intentionally corrupt the log record CRC based on the error injection
+ 	 * frequency, if defined. This facilitates testing log recovery in the
+diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
+index b8778a4fd6b64e..89a75b7cbd6a47 100644
+--- a/fs/xfs/xfs_log_priv.h
++++ b/fs/xfs/xfs_log_priv.h
+@@ -498,8 +498,8 @@ xlog_recover_finish(
+ extern void
+ xlog_recover_cancel(struct xlog *);
+ 
+-extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
+-			    char *dp, int size);
++__le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
++		char *dp, unsigned int hdrsize, unsigned int size);
+ 
+ extern struct kmem_cache *xfs_log_ticket_cache;
+ struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 704aaadb61cf29..64338ce501c996 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2890,20 +2890,34 @@ xlog_recover_process(
+ 	int			pass,
+ 	struct list_head	*buffer_list)
+ {
+-	__le32			old_crc = rhead->h_crc;
+-	__le32			crc;
++	__le32			expected_crc = rhead->h_crc, crc, other_crc;
+ 
+-	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
++	crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE,
++			be32_to_cpu(rhead->h_len));
++
++	/*
++	 * Look at the end of the struct xlog_rec_header definition in
++	 * xfs_log_format.h for the glory details.
++	 */
++	if (expected_crc && crc != expected_crc) {
++		other_crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE_OTHER,
++				be32_to_cpu(rhead->h_len));
++		if (other_crc == expected_crc) {
++			xfs_notice_once(log->l_mp,
++	"Fixing up incorrect CRC due to padding.");
++			crc = other_crc;
++		}
++	}
+ 
+ 	/*
+ 	 * Nothing else to do if this is a CRC verification pass. Just return
+ 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
+-	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
+-	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
+-	 * know precisely what failed.
++	 * sets expected_crc to 0 so we must consider this valid even on v5
++	 * supers.  Otherwise, return EFSBADCRC on failure so the callers up the
++	 * stack know precisely what failed.
+ 	 */
+ 	if (pass == XLOG_RECOVER_CRCPASS) {
+-		if (old_crc && crc != old_crc)
++		if (expected_crc && crc != expected_crc)
+ 			return -EFSBADCRC;
+ 		return 0;
+ 	}
+@@ -2914,11 +2928,11 @@ xlog_recover_process(
+ 	 * zero CRC check prevents warnings from being emitted when upgrading
+ 	 * the kernel from one that does not add CRCs by default.
+ 	 */
+-	if (crc != old_crc) {
+-		if (old_crc || xfs_has_crc(log->l_mp)) {
++	if (crc != expected_crc) {
++		if (expected_crc || xfs_has_crc(log->l_mp)) {
+ 			xfs_alert(log->l_mp,
+ 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
+-					le32_to_cpu(old_crc),
++					le32_to_cpu(expected_crc),
+ 					le32_to_cpu(crc));
+ 			xfs_hex_dump(dp, 32);
+ 		}
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index f0fa8404957dbf..13b4bd7355c14c 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -320,7 +320,7 @@ extern unsigned int kobjsize(const void *objp);
+ #define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
+ #define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
+ #define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
+-#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
++#define VM_MERGEABLE	BIT(31)		/* KSM may merge identical pages */
+ 
+ #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
+ #define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 6b3fef24d60e72..452a3dca28eaa0 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1066,6 +1066,20 @@ struct pci_driver {
+ 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
+ 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
+ 
++/**
++ * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
++ * @vend: the vendor name
++ * @dev: the 16 bit PCI Device ID
++ * @subvend: the 16 bit PCI Subvendor ID
++ * @subdev: the 16 bit PCI Subdevice ID
++ *
++ * Generate the pci_device_id struct layout for the specific PCI
++ * device/subdevice. Private data may follow the output.
++ */
++#define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
++	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
++	.subvendor = (subvend), .subdevice = (subdev), 0, 0
++
+ /**
+  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
+  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index d0b29cd1fd204e..142ab39a3b386d 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -94,7 +94,9 @@ extern void pm_runtime_new_link(struct device *dev);
+ extern void pm_runtime_drop_link(struct device_link *link);
+ extern void pm_runtime_release_supplier(struct device_link *link);
+ 
++int devm_pm_runtime_set_active_enabled(struct device *dev);
+ extern int devm_pm_runtime_enable(struct device *dev);
++int devm_pm_runtime_get_noresume(struct device *dev);
+ 
+ /**
+  * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
+@@ -278,7 +280,9 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {}
+ static inline void pm_runtime_allow(struct device *dev) {}
+ static inline void pm_runtime_forbid(struct device *dev) {}
+ 
++static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
+ static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
++static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
+ 
+ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
+ static inline void pm_runtime_get_noresume(struct device *dev) {}
+diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
+index df33333650a0d9..da4309f3cea3f8 100644
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -15,6 +15,7 @@
+ #ifndef __LINUX_USB_GADGET_H
+ #define __LINUX_USB_GADGET_H
+ 
++#include <linux/cleanup.h>
+ #include <linux/configfs.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
+@@ -32,6 +33,7 @@ struct usb_ep;
+ 
+ /**
+  * struct usb_request - describes one i/o request
++ * @ep: The associated endpoint set by usb_ep_alloc_request().
+  * @buf: Buffer used for data.  Always provide this; some controllers
+  *	only use PIO, or don't use DMA for some endpoints.
+  * @dma: DMA address corresponding to 'buf'.  If you don't set this
+@@ -98,6 +100,7 @@ struct usb_ep;
+  */
+ 
+ struct usb_request {
++	struct usb_ep		*ep;
+ 	void			*buf;
+ 	unsigned		length;
+ 	dma_addr_t		dma;
+@@ -291,6 +294,28 @@ static inline void usb_ep_fifo_flush(struct usb_ep *ep)
+ 
+ /*-------------------------------------------------------------------------*/
+ 
++/**
++ * free_usb_request - frees a usb_request object and its buffer
++ * @req: the request being freed
++ *
++ * This helper function frees both the request's buffer and the request object
++ * itself by calling usb_ep_free_request(). Its signature is designed to be used
++ * with DEFINE_FREE() to enable automatic, scope-based cleanup for usb_request
++ * pointers.
++ */
++static inline void free_usb_request(struct usb_request *req)
++{
++	if (!req)
++		return;
++
++	kfree(req->buf);
++	usb_ep_free_request(req->ep, req);
++}
++
++DEFINE_FREE(free_usb_request, struct usb_request *, free_usb_request(_T))
++
++/*-------------------------------------------------------------------------*/
++
+ struct usb_dcd_config_params {
+ 	__u8  bU1devExitLat;	/* U1 Device exit Latency */
+ #define USB_DEFAULT_U1_DEV_EXIT_LAT	0x01	/* Less then 1 microsec */
+diff --git a/include/net/dst.h b/include/net/dst.h
+index e18826cd055952..e5c9ea18838381 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -561,6 +561,38 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+ 		dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
+ }
+ 
++static inline struct net_device *dst_dev(const struct dst_entry *dst)
++{
++	return READ_ONCE(dst->dev);
++}
++
++static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst)
++{
++	/* In the future, use rcu_dereference(dst->dev) */
++	WARN_ON_ONCE(!rcu_read_lock_held());
++	return READ_ONCE(dst->dev);
++}
++
++static inline struct net_device *skb_dst_dev(const struct sk_buff *skb)
++{
++	return dst_dev(skb_dst(skb));
++}
++
++static inline struct net_device *skb_dst_dev_rcu(const struct sk_buff *skb)
++{
++	return dst_dev_rcu(skb_dst(skb));
++}
++
++static inline struct net *skb_dst_dev_net(const struct sk_buff *skb)
++{
++	return dev_net(skb_dst_dev(skb));
++}
++
++static inline struct net *skb_dst_dev_net_rcu(const struct sk_buff *skb)
++{
++	return dev_net_rcu(skb_dst_dev(skb));
++}
++
+ struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
+ void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ 			       struct sk_buff *skb, u32 mtu, bool confirm_neigh);
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index 74dd90ff5f129f..c32878c69179da 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -150,7 +150,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
+ 					      int iif, int sdif,
+ 					      bool *refcounted)
+ {
+-	struct net *net = dev_net(skb_dst(skb)->dev);
++	struct net *net = dev_net_rcu(skb_dst(skb)->dev);
+ 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ 	struct sock *sk;
+ 
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 4bd93571e6c1b5..bcc138ff087bd6 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -116,7 +116,8 @@ struct inet_connection_sock {
+ 		#define ATO_BITS 8
+ 		__u32		  ato:ATO_BITS,	 /* Predicted tick of soft clock	   */
+ 				  lrcv_flowlabel:20, /* last received ipv6 flowlabel	   */
+-				  unused:4;
++				  dst_quick_ack:1, /* cache dst RTAX_QUICKACK		   */
++				  unused:3;
+ 		unsigned long	  timeout;	 /* Currently scheduled timeout		   */
+ 		__u32		  lrcvtime;	 /* timestamp of last received data packet */
+ 		__u16		  last_seg_size; /* Size of last incoming segment	   */
+diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
+index 5eea47f135a421..3c4118c63cfe1b 100644
+--- a/include/net/inet_hashtables.h
++++ b/include/net/inet_hashtables.h
+@@ -492,7 +492,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
+ 					     const int sdif,
+ 					     bool *refcounted)
+ {
+-	struct net *net = dev_net(skb_dst(skb)->dev);
++	struct net *net = skb_dst_dev_net_rcu(skb);
+ 	const struct iphdr *iph = ip_hdr(skb);
+ 	struct sock *sk;
+ 
+diff --git a/include/net/ip.h b/include/net/ip.h
+index bd201278c55a58..5f0f1215d2f923 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -475,7 +475,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ 
+ 	rcu_read_lock();
+ 
+-	net = dev_net_rcu(dst->dev);
++	net = dev_net_rcu(dst_dev(dst));
+ 	if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
+ 	    ip_mtu_locked(dst) ||
+ 	    !forwarding) {
+@@ -489,7 +489,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ 	if (mtu)
+ 		goto out;
+ 
+-	mtu = READ_ONCE(dst->dev->mtu);
++	mtu = READ_ONCE(dst_dev(dst)->mtu);
+ 
+ 	if (unlikely(ip_mtu_locked(dst))) {
+ 		if (rt->rt_uses_gateway && mtu > 576)
+@@ -509,16 +509,17 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+ 					  const struct sk_buff *skb)
+ {
++	const struct dst_entry *dst = skb_dst(skb);
+ 	unsigned int mtu;
+ 
+ 	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
+ 		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
+ 
+-		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
++		return ip_dst_mtu_maybe_forward(dst, forwarding);
+ 	}
+ 
+-	mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+-	return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
++	mtu = min(READ_ONCE(dst_dev(dst)->mtu), IP_MAX_MTU);
++	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+ }
+ 
+ struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len,
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index ae83a969ae64b2..01fcf952b05de9 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -605,6 +605,21 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
+ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
+ 			  int headroom, bool reply);
+ 
++static inline void ip_tunnel_adj_headroom(struct net_device *dev,
++					  unsigned int headroom)
++{
++	/* we must cap headroom to some upperlimit, else pskb_expand_head
++	 * will overflow header offsets in skb_headers_offset_update().
++	 */
++	const unsigned int max_allowed = 512;
++
++	if (headroom > max_allowed)
++		headroom = max_allowed;
++
++	if (headroom > READ_ONCE(dev->needed_headroom))
++		WRITE_ONCE(dev->needed_headroom, headroom);
++}
++
+ int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+ 
+ static inline int iptunnel_pull_offloads(struct sk_buff *skb)
+diff --git a/include/net/route.h b/include/net/route.h
+index 8a11d19f897bb2..232b7bf55ba221 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -369,7 +369,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
+ 		const struct net *net;
+ 
+ 		rcu_read_lock();
+-		net = dev_net_rcu(dst->dev);
++		net = dev_net_rcu(dst_dev(dst));
+ 		hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
+ 		rcu_read_unlock();
+ 	}
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 3ad104cf1e7d83..996cd4bec4821a 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -477,7 +477,7 @@ static void io_req_io_end(struct io_kiocb *req)
+ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
+ {
+ 	if (unlikely(res != req->cqe.res)) {
+-		if (res == -EAGAIN && io_rw_should_reissue(req)) {
++		if ((res == -EOPNOTSUPP || res == -EAGAIN) && io_rw_should_reissue(req)) {
+ 			/*
+ 			 * Reissue will start accounting again, finish the
+ 			 * current cycle.
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 3cc06ffb60c1bc..d60d48d482b014 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -8997,7 +8997,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ 		flags |= MAP_HUGETLB;
+ 
+ 	if (file) {
+-		struct inode *inode;
++		const struct inode *inode;
+ 		dev_t dev;
+ 
+ 		buf = kmalloc(PATH_MAX, GFP_KERNEL);
+@@ -9010,12 +9010,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ 		 * need to add enough zero bytes after the string to handle
+ 		 * the 64bit alignment we do later.
+ 		 */
+-		name = file_path(file, buf, PATH_MAX - sizeof(u64));
++		name = d_path(file_user_path(file), buf, PATH_MAX - sizeof(u64));
+ 		if (IS_ERR(name)) {
+ 			name = "//toolong";
+ 			goto cpy_name;
+ 		}
+-		inode = file_inode(vma->vm_file);
++		inode = file_user_inode(vma->vm_file);
+ 		dev = inode->i_sb->s_dev;
+ 		ino = inode->i_ino;
+ 		gen = inode->i_generation;
+@@ -9086,7 +9086,7 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
+ 	if (!filter->path.dentry)
+ 		return false;
+ 
+-	if (d_inode(filter->path.dentry) != file_inode(file))
++	if (d_inode(filter->path.dentry) != file_user_inode(file))
+ 		return false;
+ 
+ 	if (filter->offset > offset + size)
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 3e0ef0753e73e1..c3810f5bd71563 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -290,7 +290,11 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
+ 	if (remove_object) {
+ 		list_del_init(&padata->list);
+ 		++pd->processed;
+-		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
++		/* When sequence wraps around, reset to the first CPU. */
++		if (unlikely(pd->processed == 0))
++			pd->cpu = cpumask_first(pd->cpumask.pcpu);
++		else
++			pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
+ 	}
+ 
+ 	spin_unlock(&reorder->lock);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index b3d9826e25b035..8bdcb5df0d4616 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -9059,21 +9059,21 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+ 	return p;
+ 
+ idle:
+-	if (!rf)
+-		return NULL;
+-
+-	new_tasks = sched_balance_newidle(rq, rf);
++	if (rf) {
++		new_tasks = sched_balance_newidle(rq, rf);
+ 
+-	/*
+-	 * Because sched_balance_newidle() releases (and re-acquires) rq->lock, it is
+-	 * possible for any higher priority task to appear. In that case we
+-	 * must re-start the pick_next_entity() loop.
+-	 */
+-	if (new_tasks < 0)
+-		return RETRY_TASK;
++		/*
++		 * Because sched_balance_newidle() releases (and re-acquires)
++		 * rq->lock, it is possible for any higher priority task to
++		 * appear. In that case we must re-start the pick_next_entity()
++		 * loop.
++		 */
++		if (new_tasks < 0)
++			return RETRY_TASK;
+ 
+-	if (new_tasks > 0)
+-		goto again;
++		if (new_tasks > 0)
++			goto again;
++	}
+ 
+ 	/*
+ 	 * rq is about to be idle, check if we need to update the
+diff --git a/mm/slub.c b/mm/slub.c
+index b75b50ad6748f0..24e65d7048ba23 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2038,8 +2038,15 @@ static inline void free_slab_obj_exts(struct slab *slab)
+ 	struct slabobj_ext *obj_exts;
+ 
+ 	obj_exts = slab_obj_exts(slab);
+-	if (!obj_exts)
++	if (!obj_exts) {
++		/*
++		 * If obj_exts allocation failed, slab->obj_exts is set to
++		 * OBJEXTS_ALLOC_FAIL. In this case, we end up here and should
++		 * clear the flag.
++		 */
++		slab->obj_exts = 0;
+ 		return;
++	}
+ 
+ 	/*
+ 	 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
+diff --git a/net/core/dst.c b/net/core/dst.c
+index cc990706b64515..9a0ddef8bee430 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -150,7 +150,7 @@ void dst_dev_put(struct dst_entry *dst)
+ 		dst->ops->ifdown(dst, dev);
+ 	WRITE_ONCE(dst->input, dst_discard);
+ 	WRITE_ONCE(dst->output, dst_discard_out);
+-	dst->dev = blackhole_netdev;
++	WRITE_ONCE(dst->dev, blackhole_netdev);
+ 	netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
+ 			   GFP_ATOMIC);
+ }
+@@ -263,7 +263,7 @@ unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
+ {
+ 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+ 
+-	return mtu ? : dst->dev->mtu;
++	return mtu ? : dst_dev(dst)->mtu;
+ }
+ EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index d392cb37a864f7..a5f248a9140426 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2534,8 +2534,8 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
+ 		   !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
+ #endif
+ 	/* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
+-	max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) :
+-			READ_ONCE(dst->dev->gso_ipv4_max_size);
++	max_size = is_ipv6 ? READ_ONCE(dst_dev(dst)->gso_max_size) :
++			READ_ONCE(dst_dev(dst)->gso_ipv4_max_size);
+ 	if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
+ 		max_size = GSO_LEGACY_MAX_SIZE;
+ 
+@@ -2546,9 +2546,13 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+ {
+ 	u32 max_segs = 1;
+ 
+-	sk->sk_route_caps = dst->dev->features;
+-	if (sk_is_tcp(sk))
++	sk->sk_route_caps = dst_dev(dst)->features;
++	if (sk_is_tcp(sk)) {
++		struct inet_connection_sock *icsk = inet_csk(sk);
++
+ 		sk->sk_route_caps |= NETIF_F_GSO;
++		icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK);
++	}
+ 	if (sk->sk_route_caps & NETIF_F_GSO)
+ 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
+ 	if (unlikely(sk->sk_gso_disabled))
+@@ -2560,7 +2564,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+ 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ 			sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
+ 			/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
+-			max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
++			max_segs = max_t(u32, READ_ONCE(dst_dev(dst)->gso_max_segs), 1);
+ 		}
+ 	}
+ 	sk->sk_gso_max_segs = max_segs;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 8f11870b77377d..508b23204edc5b 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -311,18 +311,20 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+ {
+ 	struct dst_entry *dst = &rt->dst;
+ 	struct inet_peer *peer;
++	struct net_device *dev;
+ 	bool rc = true;
+ 
+ 	if (!apply_ratelimit)
+ 		return true;
+ 
+ 	/* No rate limit on loopback */
+-	if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
++	dev = dst_dev(dst);
++	if (dev && (dev->flags & IFF_LOOPBACK))
+ 		goto out;
+ 
+ 	rcu_read_lock();
+ 	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr,
+-			       l3mdev_master_ifindex_rcu(dst->dev));
++			       l3mdev_master_ifindex_rcu(dev));
+ 	rc = inet_peer_xrlim_allow(peer,
+ 				   READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
+ 	rcu_read_unlock();
+@@ -468,13 +470,13 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
+  */
+ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
+ {
+-	struct net_device *route_lookup_dev = NULL;
++	struct net_device *dev = skb->dev;
++	const struct dst_entry *dst;
+ 
+-	if (skb->dev)
+-		route_lookup_dev = skb->dev;
+-	else if (skb_dst(skb))
+-		route_lookup_dev = skb_dst(skb)->dev;
+-	return route_lookup_dev;
++	if (dev)
++		return dev;
++	dst = skb_dst(skb);
++	return dst ? dst_dev(dst) : NULL;
+ }
+ 
+ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
+@@ -873,7 +875,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
+ 	struct net *net;
+ 	u32 info = 0;
+ 
+-	net = dev_net_rcu(skb_dst(skb)->dev);
++	net = skb_dst_dev_net_rcu(skb);
+ 
+ 	/*
+ 	 *	Incomplete header ?
+@@ -1016,7 +1018,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
+ 	struct icmp_bxm icmp_param;
+ 	struct net *net;
+ 
+-	net = dev_net_rcu(skb_dst(skb)->dev);
++	net = skb_dst_dev_net_rcu(skb);
+ 	/* should there be an ICMP stat for ignored echos? */
+ 	if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
+ 		return SKB_NOT_DROPPED_YET;
+@@ -1186,7 +1188,7 @@ static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
+ 	return SKB_NOT_DROPPED_YET;
+ 
+ out_err:
+-	__ICMP_INC_STATS(dev_net_rcu(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
++	__ICMP_INC_STATS(skb_dst_dev_net_rcu(skb), ICMP_MIB_INERRORS);
+ 	return SKB_DROP_REASON_PKT_TOO_SMALL;
+ }
+ 
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 9bf09de6a2e77c..f4a87b90351e94 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -424,7 +424,7 @@ static int igmpv3_sendpack(struct sk_buff *skb)
+ 
+ 	pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
+ 
+-	return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
++	return ip_local_out(skb_dst_dev_net(skb), skb->sk, skb);
+ }
+ 
+ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 9ca0a183a55ffa..183856b0b74094 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -488,7 +488,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ /* Process an incoming IP datagram fragment. */
+ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
+ {
+-	struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
++	struct net_device *dev = skb->dev ? : skb_dst_dev(skb);
+ 	int vif = l3mdev_master_ifindex_rcu(dev);
+ 	struct ipq *qp;
+ 
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 49811c9281d424..0bda561aa8a89f 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -117,7 +117,7 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	skb->protocol = htons(ETH_P_IP);
+ 
+ 	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
+-		       net, sk, skb, NULL, skb_dst(skb)->dev,
++		       net, sk, skb, NULL, skb_dst_dev(skb),
+ 		       dst_output);
+ }
+ 
+@@ -200,7 +200,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct rtable *rt = dst_rtable(dst);
+-	struct net_device *dev = dst->dev;
++	struct net_device *dev = dst_dev(dst);
+ 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
+ 	struct neighbour *neigh;
+ 	bool is_v6gw = false;
+@@ -426,15 +426,20 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 
+ int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
++	struct net_device *dev, *indev = skb->dev;
++	int ret_val;
+ 
++	rcu_read_lock();
++	dev = skb_dst_dev_rcu(skb);
+ 	skb->dev = dev;
+ 	skb->protocol = htons(ETH_P_IP);
+ 
+-	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+-			    net, sk, skb, indev, dev,
+-			    ip_finish_output,
+-			    !(IPCB(skb)->flags & IPSKB_REROUTED));
++	ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
++				net, sk, skb, indev, dev,
++				ip_finish_output,
++				!(IPCB(skb)->flags & IPSKB_REROUTED));
++	rcu_read_unlock();
++	return ret_val;
+ }
+ EXPORT_SYMBOL(ip_output);
+ 
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 09b73acf037ae2..7c77d06372d195 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -567,20 +567,6 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+ 	return 0;
+ }
+ 
+-static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
+-{
+-	/* we must cap headroom to some upperlimit, else pskb_expand_head
+-	 * will overflow header offsets in skb_headers_offset_update().
+-	 */
+-	static const unsigned int max_allowed = 512;
+-
+-	if (headroom > max_allowed)
+-		headroom = max_allowed;
+-
+-	if (headroom > READ_ONCE(dev->needed_headroom))
+-		WRITE_ONCE(dev->needed_headroom, headroom);
+-}
+-
+ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 		       u8 proto, int tunnel_hlen)
+ {
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index f0b4419cef3493..fc951616630711 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -229,7 +229,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+ 		goto tx_error_icmp;
+ 	}
+ 
+-	tdev = dst->dev;
++	tdev = dst_dev(dst);
+ 
+ 	if (tdev == dev) {
+ 		dst_release(dst);
+@@ -259,7 +259,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+ xmit:
+ 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
+ 	skb_dst_set(skb, dst);
+-	skb->dev = skb_dst(skb)->dev;
++	skb->dev = skb_dst_dev(skb);
+ 
+ 	err = dst_output(tunnel->net, skb->sk, skb);
+ 	if (net_xmit_eval(err) == 0)
+diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
+index e0aab66cd92511..dff06b9eb6607f 100644
+--- a/net/ipv4/netfilter.c
++++ b/net/ipv4/netfilter.c
+@@ -20,12 +20,12 @@
+ /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
+ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type)
+ {
++	struct net_device *dev = skb_dst_dev(skb);
+ 	const struct iphdr *iph = ip_hdr(skb);
+ 	struct rtable *rt;
+ 	struct flowi4 fl4 = {};
+ 	__be32 saddr = iph->saddr;
+ 	__u8 flags;
+-	struct net_device *dev = skb_dst(skb)->dev;
+ 	struct flow_keys flkeys;
+ 	unsigned int hh_len;
+ 
+@@ -74,7 +74,7 @@ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, un
+ #endif
+ 
+ 	/* Change in oif may mean change in hh_len. */
+-	hh_len = skb_dst(skb)->dev->hard_header_len;
++	hh_len = skb_dst_dev(skb)->hard_header_len;
+ 	if (skb_headroom(skb) < hh_len &&
+ 	    pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+ 				0, GFP_ATOMIC))
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 261ddb6542a40f..7d04df4fc66084 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -413,7 +413,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ 					   const void *daddr)
+ {
+ 	const struct rtable *rt = container_of(dst, struct rtable, dst);
+-	struct net_device *dev = dst->dev;
++	struct net_device *dev = dst_dev(dst);
+ 	struct neighbour *n;
+ 
+ 	rcu_read_lock();
+@@ -440,7 +440,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+ {
+ 	const struct rtable *rt = container_of(dst, struct rtable, dst);
+-	struct net_device *dev = dst->dev;
++	struct net_device *dev = dst_dev(dst);
+ 	const __be32 *pkey = daddr;
+ 
+ 	if (rt->rt_gw_family == AF_INET) {
+@@ -1025,7 +1025,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ 		return;
+ 
+ 	rcu_read_lock();
+-	net = dev_net_rcu(dst->dev);
++	net = dev_net_rcu(dst_dev(dst));
+ 	if (mtu < net->ipv4.ip_rt_min_pmtu) {
+ 		lock = true;
+ 		mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
+@@ -1323,7 +1323,7 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
+ 	struct net *net;
+ 
+ 	rcu_read_lock();
+-	net = dev_net_rcu(dst->dev);
++	net = dev_net_rcu(dst_dev(dst));
+ 	advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
+ 				   net->ipv4.ip_rt_min_advmss);
+ 	rcu_read_unlock();
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 408985eb74eefa..86c995dc1c5e5b 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -558,6 +558,7 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
+ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
++	struct net_device *dev;
+ 	struct dst_entry *dst;
+ 	struct sk_buff *skb;
+ 
+@@ -575,7 +576,8 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
+ 	} else if (tp->syn_fastopen_ch &&
+ 		   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
+ 		dst = sk_dst_get(sk);
+-		if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
++		dev = dst ? dst_dev(dst) : NULL;
++		if (!(dev && (dev->flags & IFF_LOOPBACK)))
+ 			atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
+ 		dst_release(dst);
+ 	}
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 4c8d84fc27ca35..1d9e93a04930b8 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -331,9 +331,8 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+ static bool tcp_in_quickack_mode(struct sock *sk)
+ {
+ 	const struct inet_connection_sock *icsk = inet_csk(sk);
+-	const struct dst_entry *dst = __sk_dst_get(sk);
+ 
+-	return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
++	return icsk->icsk_ack.dst_quick_ack ||
+ 		(icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
+ }
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 824048679e1b8f..1572562b0498c9 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -494,14 +494,14 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
+ {
+ 	const struct iphdr *iph = (const struct iphdr *)skb->data;
+ 	struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+-	struct tcp_sock *tp;
++	struct net *net = dev_net_rcu(skb->dev);
+ 	const int type = icmp_hdr(skb)->type;
+ 	const int code = icmp_hdr(skb)->code;
+-	struct sock *sk;
+ 	struct request_sock *fastopen;
++	struct tcp_sock *tp;
+ 	u32 seq, snd_una;
++	struct sock *sk;
+ 	int err;
+-	struct net *net = dev_net(skb->dev);
+ 
+ 	sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ 				       iph->daddr, th->dest, iph->saddr,
+@@ -786,7 +786,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
+ 	arg.iov[0].iov_base = (unsigned char *)&rep;
+ 	arg.iov[0].iov_len  = sizeof(rep.th);
+ 
+-	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
++	net = sk ? sock_net(sk) : skb_dst_dev_net_rcu(skb);
+ 
+ 	/* Invalid TCP option size or twice included auth */
+ 	if (tcp_parse_auth_options(tcp_hdr(skb), &md5_hash_location, &aoh))
+@@ -1965,7 +1965,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv);
+ 
+ int tcp_v4_early_demux(struct sk_buff *skb)
+ {
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	const struct iphdr *iph;
+ 	const struct tcphdr *th;
+ 	struct sock *sk;
+@@ -2176,7 +2176,7 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
+ 
+ int tcp_v4_rcv(struct sk_buff *skb)
+ {
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	enum skb_drop_reason drop_reason;
+ 	int sdif = inet_sdif(skb);
+ 	int dif = inet_iif(skb);
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index 95669935494ef8..03c068ea27b6ad 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -166,11 +166,11 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
+ 					  unsigned int hash)
+ {
+ 	struct tcp_metrics_block *tm;
+-	struct net *net;
+ 	bool reclaim = false;
++	struct net *net;
+ 
+ 	spin_lock_bh(&tcp_metrics_lock);
+-	net = dev_net(dst->dev);
++	net = dev_net_rcu(dst_dev(dst));
+ 
+ 	/* While waiting for the spin-lock the cache might have been populated
+ 	 * with this entry and so we have to check again.
+@@ -273,7 +273,7 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
+ 		return NULL;
+ 	}
+ 
+-	net = dev_net(dst->dev);
++	net = dev_net_rcu(dst_dev(dst));
+ 	hash ^= net_hash_mix(net);
+ 	hash = hash_32(hash, tcp_metrics_hash_log);
+ 
+@@ -318,7 +318,7 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
+ 	else
+ 		return NULL;
+ 
+-	net = dev_net(dst->dev);
++	net = dev_net_rcu(dst_dev(dst));
+ 	hash ^= net_hash_mix(net);
+ 	hash = hash_32(hash, tcp_metrics_hash_log);
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 6d5387811c32ad..5e37dc45639db1 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2217,7 +2217,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
+ 				 u32 max_segs)
+ {
+ 	const struct inet_connection_sock *icsk = inet_csk(sk);
+-	u32 send_win, cong_win, limit, in_flight;
++	u32 send_win, cong_win, limit, in_flight, threshold;
++	u64 srtt_in_ns, expected_ack, how_far_is_the_ack;
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct sk_buff *head;
+ 	int win_divisor;
+@@ -2279,9 +2280,19 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
+ 	head = tcp_rtx_queue_head(sk);
+ 	if (!head)
+ 		goto send_now;
+-	delta = tp->tcp_clock_cache - head->tstamp;
+-	/* If next ACK is likely to come too late (half srtt), do not defer */
+-	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
++
++	srtt_in_ns = (u64)(NSEC_PER_USEC >> 3) * tp->srtt_us;
++	/* When is the ACK expected ? */
++	expected_ack = head->tstamp + srtt_in_ns;
++	/* How far from now is the ACK expected ? */
++	how_far_is_the_ack = expected_ack - tp->tcp_clock_cache;
++
++	/* If next ACK is likely to come too late,
++	 * ie in more than min(1ms, half srtt), do not defer.
++	 */
++	threshold = min(srtt_in_ns >> 1, NSEC_PER_MSEC);
++
++	if ((s64)(how_far_is_the_ack - threshold) > 0)
+ 		goto send_now;
+ 
+ 	/* Ok, it looks like it is advisable to defer.
+diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
+index 3cff51ba72bb01..0ae67d537499a2 100644
+--- a/net/ipv4/xfrm4_output.c
++++ b/net/ipv4/xfrm4_output.c
+@@ -31,7 +31,7 @@ static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+-			    net, sk, skb, skb->dev, skb_dst(skb)->dev,
++			    net, sk, skb, skb->dev, skb_dst_dev(skb),
+ 			    __xfrm4_output,
+ 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
+ }
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 5350c9bb2319bf..b72ca103490686 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1257,8 +1257,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ 	 */
+ 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ 			+ dst->header_len + t->hlen;
+-	if (max_headroom > READ_ONCE(dev->needed_headroom))
+-		WRITE_ONCE(dev->needed_headroom, max_headroom);
++	ip_tunnel_adj_headroom(dev, max_headroom);
+ 
+ 	err = ip6_tnl_encap(skb, t, &proto, fl6);
+ 	if (err)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 882ce5444572ea..06edfe0b2db904 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -376,7 +376,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ {
+ 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
+ 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	struct request_sock *fastopen;
+ 	struct ipv6_pinfo *np;
+ 	struct tcp_sock *tp;
+@@ -864,16 +864,16 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
+ 				 int oif, int rst, u8 tclass, __be32 label,
+ 				 u32 priority, u32 txhash, struct tcp_key *key)
+ {
+-	const struct tcphdr *th = tcp_hdr(skb);
+-	struct tcphdr *t1;
+-	struct sk_buff *buff;
+-	struct flowi6 fl6;
+-	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
+-	struct sock *ctl_sk = net->ipv6.tcp_sk;
++	struct net *net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev);
+ 	unsigned int tot_len = sizeof(struct tcphdr);
++	struct sock *ctl_sk = net->ipv6.tcp_sk;
++	const struct tcphdr *th = tcp_hdr(skb);
+ 	__be32 mrst = 0, *topt;
+ 	struct dst_entry *dst;
+-	__u32 mark = 0;
++	struct sk_buff *buff;
++	struct tcphdr *t1;
++	struct flowi6 fl6;
++	u32 mark = 0;
+ 
+ 	if (tsecr)
+ 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
+@@ -1036,7 +1036,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
+ 	if (!sk && !ipv6_unicast_destination(skb))
+ 		return;
+ 
+-	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
++	net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev);
+ 	/* Invalid TCP option size or twice included auth */
+ 	if (tcp_parse_auth_options(th, &md5_hash_location, &aoh))
+ 		return;
+@@ -1739,6 +1739,7 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
+ 
+ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
+ {
++	struct net *net = dev_net_rcu(skb->dev);
+ 	enum skb_drop_reason drop_reason;
+ 	int sdif = inet6_sdif(skb);
+ 	int dif = inet6_iif(skb);
+@@ -1748,7 +1749,6 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
+ 	bool refcounted;
+ 	int ret;
+ 	u32 isn;
+-	struct net *net = dev_net(skb->dev);
+ 
+ 	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ 	if (skb->pkt_type != PACKET_HOST)
+@@ -1999,7 +1999,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
+ 
+ void tcp_v6_early_demux(struct sk_buff *skb)
+ {
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	const struct ipv6hdr *hdr;
+ 	const struct tcphdr *th;
+ 	struct sock *sk;
+diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
+index dd595d9b5e50c7..0d60556cfefabc 100644
+--- a/net/mptcp/ctrl.c
++++ b/net/mptcp/ctrl.c
+@@ -381,10 +381,15 @@ void mptcp_active_enable(struct sock *sk)
+ 	struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk));
+ 
+ 	if (atomic_read(&pernet->active_disable_times)) {
+-		struct dst_entry *dst = sk_dst_get(sk);
++		struct net_device *dev;
++		struct dst_entry *dst;
+ 
+-		if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))
++		rcu_read_lock();
++		dst = __sk_dst_get(sk);
++		dev = dst ? dst_dev_rcu(dst) : NULL;
++		if (!(dev && (dev->flags & IFF_LOOPBACK)))
+ 			atomic_set(&pernet->active_disable_times, 0);
++		rcu_read_unlock();
+ 	}
+ }
+ 
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 0acf313deb01ff..e52e4c91c90911 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -255,12 +255,9 @@ int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
+ 			if (msg->msg_flags & MSG_MORE)
+ 				return -EINVAL;
+ 
+-			rc = tls_handle_open_record(sk, msg->msg_flags);
+-			if (rc)
+-				return rc;
+-
+ 			*record_type = *(unsigned char *)CMSG_DATA(cmsg);
+-			rc = 0;
++
++			rc = tls_handle_open_record(sk, msg->msg_flags);
+ 			break;
+ 		default:
+ 			return -EINVAL;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index f46550b96061ea..1ff0d01bdadf0d 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1054,7 +1054,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ 			if (ret == -EINPROGRESS)
+ 				num_async++;
+ 			else if (ret != -EAGAIN)
+-				goto send_end;
++				goto end;
+ 		}
+ 	}
+ 
+@@ -1112,8 +1112,11 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ 				goto send_end;
+ 			tls_ctx->pending_open_record_frags = true;
+ 
+-			if (sk_msg_full(msg_pl))
++			if (sk_msg_full(msg_pl)) {
+ 				full_record = true;
++				sk_msg_trim(sk, msg_en,
++					    msg_pl->sg.size + prot->overhead_size);
++			}
+ 
+ 			if (full_record || eor)
+ 				goto copied;
+@@ -1149,6 +1152,13 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ 				} else if (ret != -EAGAIN)
+ 					goto send_end;
+ 			}
++
++			/* Transmit if any encryptions have completed */
++			if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
++				cancel_delayed_work(&ctx->tx_work.work);
++				tls_tx_records(sk, msg->msg_flags);
++			}
++
+ 			continue;
+ rollback_iter:
+ 			copied -= try_to_copy;
+@@ -1204,6 +1214,12 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ 					goto send_end;
+ 				}
+ 			}
++
++			/* Transmit if any encryptions have completed */
++			if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
++				cancel_delayed_work(&ctx->tx_work.work);
++				tls_tx_records(sk, msg->msg_flags);
++			}
+ 		}
+ 
+ 		continue;
+@@ -1223,8 +1239,9 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ 			goto alloc_encrypted;
+ 	}
+ 
++send_end:
+ 	if (!num_async) {
+-		goto send_end;
++		goto end;
+ 	} else if (num_zc || eor) {
+ 		int err;
+ 
+@@ -1242,7 +1259,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ 		tls_tx_records(sk, msg->msg_flags);
+ 	}
+ 
+-send_end:
++end:
+ 	ret = sk_stream_error(sk, msg->msg_flags, ret);
+ 	return copied > 0 ? copied : ret;
+ }
+@@ -1633,8 +1650,10 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ 
+ 	if (unlikely(darg->async)) {
+ 		err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
+-		if (err)
+-			__skb_queue_tail(&ctx->async_hold, darg->skb);
++		if (err) {
++			err = tls_decrypt_async_wait(ctx);
++			darg->async = false;
++		}
+ 		return err;
+ 	}
+ 
+diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
+index a80783fcbe042a..8b97919a86e2d7 100644
+--- a/rust/bindings/bindings_helper.h
++++ b/rust/bindings/bindings_helper.h
+@@ -33,3 +33,4 @@ const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO;
+ const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM;
+ const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN;
+ const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL;
++const vm_flags_t RUST_CONST_HELPER_VM_MERGEABLE = VM_MERGEABLE;
+diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
+index 775db3fc4959f5..ec10270c2cce3d 100644
+--- a/sound/firewire/amdtp-stream.h
++++ b/sound/firewire/amdtp-stream.h
+@@ -32,7 +32,7 @@
+  *	allows 5 times as large as IEC 61883-6 defines.
+  * @CIP_HEADER_WITHOUT_EOH: Only for in-stream. CIP Header doesn't include
+  *	valid EOH.
+- * @CIP_NO_HEADERS: a lack of headers in packets
++ * @CIP_NO_HEADER: a lack of headers in packets
+  * @CIP_UNALIGHED_DBC: Only for in-stream. The value of dbc is not alighed to
+  *	the value of current SYT_INTERVAL; e.g. initial value is not zero.
+  * @CIP_UNAWARE_SYT: For outgoing packet, the value in SYT field of CIP is 0xffff.
+diff --git a/sound/soc/amd/acp/acp-sdw-sof-mach.c b/sound/soc/amd/acp/acp-sdw-sof-mach.c
+index 99a244f495bd30..876f0b7fcd3dee 100644
+--- a/sound/soc/amd/acp/acp-sdw-sof-mach.c
++++ b/sound/soc/amd/acp/acp-sdw-sof-mach.c
+@@ -216,9 +216,9 @@ static int create_sdw_dailink(struct snd_soc_card *card,
+ 			cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ 							"SDW%d Pin%d",
+ 							link_num, cpu_pin_id);
+-			dev_dbg(dev, "cpu->dai_name:%s\n", cpus->dai_name);
+ 			if (!cpus->dai_name)
+ 				return -ENOMEM;
++			dev_dbg(dev, "cpu->dai_name:%s\n", cpus->dai_name);
+ 
+ 			codec_maps[j].cpu = 0;
+ 			codec_maps[j].codec = j;
+diff --git a/sound/soc/codecs/idt821034.c b/sound/soc/codecs/idt821034.c
+index cb7a68c799f8fc..401d0897b8ab45 100644
+--- a/sound/soc/codecs/idt821034.c
++++ b/sound/soc/codecs/idt821034.c
+@@ -548,14 +548,14 @@ static int idt821034_kctrl_mute_put(struct snd_kcontrol *kcontrol,
+ 	return ret;
+ }
+ 
+-static const DECLARE_TLV_DB_LINEAR(idt821034_gain_in, -6520, 1306);
+-#define IDT821034_GAIN_IN_MIN_RAW	1 /* -65.20 dB -> 10^(-65.2/20.0) * 1820 = 1 */
+-#define IDT821034_GAIN_IN_MAX_RAW	8191 /* 13.06 dB -> 10^(13.06/20.0) * 1820 = 8191 */
++static const DECLARE_TLV_DB_LINEAR(idt821034_gain_in, -300, 1300);
++#define IDT821034_GAIN_IN_MIN_RAW	1288 /* -3.0 dB -> 10^(-3.0/20.0) * 1820 = 1288 */
++#define IDT821034_GAIN_IN_MAX_RAW	8130 /* 13.0 dB -> 10^(13.0/20.0) * 1820 = 8130 */
+ #define IDT821034_GAIN_IN_INIT_RAW	1820 /* 0dB -> 10^(0/20) * 1820 = 1820 */
+ 
+-static const DECLARE_TLV_DB_LINEAR(idt821034_gain_out, -6798, 1029);
+-#define IDT821034_GAIN_OUT_MIN_RAW	1 /* -67.98 dB -> 10^(-67.98/20.0) * 2506 = 1*/
+-#define IDT821034_GAIN_OUT_MAX_RAW	8191 /* 10.29 dB -> 10^(10.29/20.0) * 2506 = 8191 */
++static const DECLARE_TLV_DB_LINEAR(idt821034_gain_out, -1300, 300);
++#define IDT821034_GAIN_OUT_MIN_RAW	561 /* -13.0 dB -> 10^(-13.0/20.0) * 2506 = 561 */
++#define IDT821034_GAIN_OUT_MAX_RAW	3540 /* 3.0 dB -> 10^(3.0/20.0) * 2506 = 3540 */
+ #define IDT821034_GAIN_OUT_INIT_RAW	2506 /* 0dB -> 10^(0/20) * 2506 = 2506 */
+ 
+ static const struct snd_kcontrol_new idt821034_controls[] = {
+diff --git a/sound/soc/codecs/nau8821.c b/sound/soc/codecs/nau8821.c
+index de5c4db05c8f8b..bfb719ca4c2cf2 100644
+--- a/sound/soc/codecs/nau8821.c
++++ b/sound/soc/codecs/nau8821.c
+@@ -26,7 +26,8 @@
+ #include <sound/tlv.h>
+ #include "nau8821.h"
+ 
+-#define NAU8821_JD_ACTIVE_HIGH			BIT(0)
++#define NAU8821_QUIRK_JD_ACTIVE_HIGH			BIT(0)
++#define NAU8821_QUIRK_JD_DB_BYPASS			BIT(1)
+ 
+ static int nau8821_quirk;
+ static int quirk_override = -1;
+@@ -1022,12 +1023,17 @@ static bool nau8821_is_jack_inserted(struct regmap *regmap)
+ 	return active_high == is_high;
+ }
+ 
+-static void nau8821_int_status_clear_all(struct regmap *regmap)
++static void nau8821_irq_status_clear(struct regmap *regmap, int active_irq)
+ {
+-	int active_irq, clear_irq, i;
++	int clear_irq, i;
+ 
+-	/* Reset the intrruption status from rightmost bit if the corres-
+-	 * ponding irq event occurs.
++	if (active_irq) {
++		regmap_write(regmap, NAU8821_R11_INT_CLR_KEY_STATUS, active_irq);
++		return;
++	}
++
++	/* Reset the interruption status from rightmost bit if the
++	 * corresponding irq event occurs.
+ 	 */
+ 	regmap_read(regmap, NAU8821_R10_IRQ_STATUS, &active_irq);
+ 	for (i = 0; i < NAU8821_REG_DATA_LEN; i++) {
+@@ -1054,7 +1060,7 @@ static void nau8821_eject_jack(struct nau8821 *nau8821)
+ 	snd_soc_dapm_sync(dapm);
+ 
+ 	/* Clear all interruption status */
+-	nau8821_int_status_clear_all(regmap);
++	nau8821_irq_status_clear(regmap, 0);
+ 
+ 	/* Enable the insertion interruption, disable the ejection inter-
+ 	 * ruption, and then bypass de-bounce circuit.
+@@ -1161,9 +1167,10 @@ static void nau8821_setup_inserted_irq(struct nau8821 *nau8821)
+ 	regmap_update_bits(regmap, NAU8821_R1D_I2S_PCM_CTRL2,
+ 		NAU8821_I2S_MS_MASK, NAU8821_I2S_MS_SLAVE);
+ 
+-	/* Not bypass de-bounce circuit */
+-	regmap_update_bits(regmap, NAU8821_R0D_JACK_DET_CTRL,
+-		NAU8821_JACK_DET_DB_BYPASS, 0);
++	/* Do not bypass de-bounce circuit */
++	if (!(nau8821_quirk & NAU8821_QUIRK_JD_DB_BYPASS))
++		regmap_update_bits(regmap, NAU8821_R0D_JACK_DET_CTRL,
++				   NAU8821_JACK_DET_DB_BYPASS, 0);
+ 
+ 	regmap_update_bits(regmap, NAU8821_R0F_INTERRUPT_MASK,
+ 		NAU8821_IRQ_EJECT_EN, 0);
+@@ -1186,6 +1193,7 @@ static irqreturn_t nau8821_interrupt(int irq, void *data)
+ 
+ 	if ((active_irq & NAU8821_JACK_EJECT_IRQ_MASK) ==
+ 		NAU8821_JACK_EJECT_DETECTED) {
++		cancel_work_sync(&nau8821->jdet_work);
+ 		regmap_update_bits(regmap, NAU8821_R71_ANALOG_ADC_1,
+ 			NAU8821_MICDET_MASK, NAU8821_MICDET_DIS);
+ 		nau8821_eject_jack(nau8821);
+@@ -1200,11 +1208,11 @@ static irqreturn_t nau8821_interrupt(int irq, void *data)
+ 		clear_irq = NAU8821_KEY_RELEASE_IRQ;
+ 	} else if ((active_irq & NAU8821_JACK_INSERT_IRQ_MASK) ==
+ 		NAU8821_JACK_INSERT_DETECTED) {
++		cancel_work_sync(&nau8821->jdet_work);
+ 		regmap_update_bits(regmap, NAU8821_R71_ANALOG_ADC_1,
+ 			NAU8821_MICDET_MASK, NAU8821_MICDET_EN);
+ 		if (nau8821_is_jack_inserted(regmap)) {
+ 			/* detect microphone and jack type */
+-			cancel_work_sync(&nau8821->jdet_work);
+ 			schedule_work(&nau8821->jdet_work);
+ 			/* Turn off insertion interruption at manual mode */
+ 			regmap_update_bits(regmap,
+@@ -1522,7 +1530,7 @@ static int nau8821_resume_setup(struct nau8821 *nau8821)
+ 	nau8821_configure_sysclk(nau8821, NAU8821_CLK_DIS, 0);
+ 	if (nau8821->irq) {
+ 		/* Clear all interruption status */
+-		nau8821_int_status_clear_all(regmap);
++		nau8821_irq_status_clear(regmap, 0);
+ 
+ 		/* Enable both insertion and ejection interruptions, and then
+ 		 * bypass de-bounce circuit.
+@@ -1857,7 +1865,23 @@ static const struct dmi_system_id nau8821_quirk_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+ 			DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P-V2"),
+ 		},
+-		.driver_data = (void *)(NAU8821_JD_ACTIVE_HIGH),
++		.driver_data = (void *)(NAU8821_QUIRK_JD_ACTIVE_HIGH),
++	},
++	{
++		/* Valve Steam Deck LCD */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
++		},
++		.driver_data = (void *)(NAU8821_QUIRK_JD_DB_BYPASS),
++	},
++	{
++		/* Valve Steam Deck OLED */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
++		},
++		.driver_data = (void *)(NAU8821_QUIRK_JD_DB_BYPASS),
+ 	},
+ 	{}
+ };
+@@ -1899,9 +1923,12 @@ static int nau8821_i2c_probe(struct i2c_client *i2c)
+ 
+ 	nau8821_check_quirks();
+ 
+-	if (nau8821_quirk & NAU8821_JD_ACTIVE_HIGH)
++	if (nau8821_quirk & NAU8821_QUIRK_JD_ACTIVE_HIGH)
+ 		nau8821->jkdet_polarity = 0;
+ 
++	if (nau8821_quirk & NAU8821_QUIRK_JD_DB_BYPASS)
++		dev_dbg(dev, "Force bypassing jack detection debounce circuit\n");
++
+ 	nau8821_print_device_properties(nau8821);
+ 
+ 	nau8821_reset_chip(nau8821->regmap);
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 9c411b82a218dc..d0a42859208aaa 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -760,10 +760,16 @@ get_alias_quirk(struct usb_device *dev, unsigned int id)
+  */
+ static int try_to_register_card(struct snd_usb_audio *chip, int ifnum)
+ {
++	struct usb_interface *iface;
++
+ 	if (check_delayed_register_option(chip) == ifnum ||
+-	    chip->last_iface == ifnum ||
+-	    usb_interface_claimed(usb_ifnum_to_if(chip->dev, chip->last_iface)))
++	    chip->last_iface == ifnum)
++		return snd_card_register(chip->card);
++
++	iface = usb_ifnum_to_if(chip->dev, chip->last_iface);
++	if (iface && usb_interface_claimed(iface))
+ 		return snd_card_register(chip->card);
++
+ 	return 0;
+ }
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
+index bb143de68875cc..e27d66b75fb1fc 100644
+--- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
++++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
+@@ -144,11 +144,17 @@ static void test_parse_test_list_file(void)
+ 	if (!ASSERT_OK(ferror(fp), "prepare tmp"))
+ 		goto out_fclose;
+ 
++	if (!ASSERT_OK(fsync(fileno(fp)), "fsync tmp"))
++		goto out_fclose;
++
+ 	init_test_filter_set(&set);
+ 
+-	ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file");
++	if (!ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"))
++		goto out_fclose;
++
++	if (!ASSERT_EQ(set.cnt, 4, "test  count"))
++		goto out_free_set;
+ 
+-	ASSERT_EQ(set.cnt, 4, "test  count");
+ 	ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name");
+ 	ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count");
+ 	ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name");
+@@ -158,8 +164,8 @@ static void test_parse_test_list_file(void)
+ 	ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name");
+ 	ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name");
+ 
++out_free_set:
+ 	free_test_filter_set(&set);
+-
+ out_fclose:
+ 	fclose(fp);
+ out_remove:


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-10-20  5:30 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-10-20  5:30 UTC (permalink / raw
  To: gentoo-commits

commit:     79b56a9c5986ad16c9d0220059616c0a11252fd2
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Oct 20 05:30:07 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Oct 20 05:30:07 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=79b56a9c

Linux patch 6.12.54

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |     4 +
 1053_linux-6.12.54.patch | 10707 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10711 insertions(+)

diff --git a/0000_README b/0000_README
index 8f91dc7a..ad7dc827 100644
--- a/0000_README
+++ b/0000_README
@@ -255,6 +255,10 @@ Patch:  1052_linux-6.12.53.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.53
 
+Patch:  1053_linux-6.12.54.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.54
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1053_linux-6.12.54.patch b/1053_linux-6.12.54.patch
new file mode 100644
index 00000000..8b2be273
--- /dev/null
+++ b/1053_linux-6.12.54.patch
@@ -0,0 +1,10707 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 8724c2c580b887..e88505e945d528 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5923,6 +5923,9 @@
+ 
+ 	rootflags=	[KNL] Set root filesystem mount option string
+ 
++	initramfs_options= [KNL]
++                        Specify mount options for for the initramfs mount.
++
+ 	rootfstype=	[KNL] Set root filesystem type
+ 
+ 	rootwait	[KNL] Wait (indefinitely) for root device to show up.
+diff --git a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
+index 5ac994b3c0aa15..b304bc5a08c402 100644
+--- a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
++++ b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
+@@ -57,11 +57,24 @@ required:
+   - clocks
+   - clock-names
+   - '#phy-cells'
+-  - power-domains
+   - resets
+   - reset-names
+   - rockchip,grf
+ 
++allOf:
++  - if:
++      properties:
++        compatible:
++          contains:
++            enum:
++              - rockchip,px30-csi-dphy
++              - rockchip,rk1808-csi-dphy
++              - rockchip,rk3326-csi-dphy
++              - rockchip,rk3368-csi-dphy
++    then:
++      required:
++        - power-domains
++
+ additionalProperties: false
+ 
+ examples:
+diff --git a/Makefile b/Makefile
+index a4a2228276e67d..0c6deb33c23935 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 53
++SUBLEVEL = 54
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c
+index fcf3d557aa7866..3cdf223addcc28 100644
+--- a/arch/arm/mach-omap2/am33xx-restart.c
++++ b/arch/arm/mach-omap2/am33xx-restart.c
+@@ -2,12 +2,46 @@
+ /*
+  * am33xx-restart.c - Code common to all AM33xx machines.
+  */
++#include <dt-bindings/pinctrl/am33xx.h>
++#include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/reboot.h>
+ 
+ #include "common.h"
++#include "control.h"
+ #include "prm.h"
+ 
++/*
++ * Advisory 1.0.36 EMU0 and EMU1: Terminals Must be Pulled High Before
++ * ICEPick Samples
++ *
++ * If EMU0/EMU1 pins have been used as GPIO outputs and actively driving low
++ * level, the device might not reboot in normal mode. We are in a bad position
++ * to override GPIO state here, so just switch the pins into EMU input mode
++ * (that's what reset will do anyway) and wait a bit, because the state will be
++ * latched 190 ns after reset.
++ */
++static void am33xx_advisory_1_0_36(void)
++{
++	u32 emu0 = omap_ctrl_readl(AM335X_PIN_EMU0);
++	u32 emu1 = omap_ctrl_readl(AM335X_PIN_EMU1);
++
++	/* If both pins are in EMU mode, nothing to do */
++	if (!(emu0 & 7) && !(emu1 & 7))
++		return;
++
++	/* Switch GPIO3_7/GPIO3_8 into EMU0/EMU1 modes respectively */
++	omap_ctrl_writel(emu0 & ~7, AM335X_PIN_EMU0);
++	omap_ctrl_writel(emu1 & ~7, AM335X_PIN_EMU1);
++
++	/*
++	 * Give pull-ups time to load the pin/PCB trace capacity.
++	 * 5 ms shall be enough to load 1 uF (would be huge capacity for these
++	 * pins) with TI-recommended 4k7 external pull-ups.
++	 */
++	mdelay(5);
++}
++
+ /**
+  * am33xx_restart - trigger a software restart of the SoC
+  * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c
+@@ -18,6 +52,8 @@
+  */
+ void am33xx_restart(enum reboot_mode mode, const char *cmd)
+ {
++	am33xx_advisory_1_0_36();
++
+ 	/* TODO: Handle cmd if necessary */
+ 	prm_reboot_mode = mode;
+ 
+diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
+index c907478be196ed..4abb86dc98fdac 100644
+--- a/arch/arm/mach-omap2/pm33xx-core.c
++++ b/arch/arm/mach-omap2/pm33xx-core.c
+@@ -388,12 +388,15 @@ static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
+ 		if (!state_node)
+ 			break;
+ 
+-		if (!of_device_is_available(state_node))
++		if (!of_device_is_available(state_node)) {
++			of_node_put(state_node);
+ 			continue;
++		}
+ 
+ 		if (i == CPUIDLE_STATE_MAX) {
+ 			pr_warn("%s: cpuidle states reached max possible\n",
+ 				__func__);
++			of_node_put(state_node);
+ 			break;
+ 		}
+ 
+@@ -403,6 +406,7 @@ static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
+ 			states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 |
+ 							 WFI_FLAG_FLUSH_CACHE;
+ 
++		of_node_put(state_node);
+ 		state_count++;
+ 	}
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 800bfe83dbf837..b0f1c1b2e6f30b 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1540,6 +1540,8 @@ mdss: display-subsystem@1a00000 {
+ 
+ 			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ 
++			resets = <&gcc GCC_MDSS_BCR>;
++
+ 			interrupt-controller;
+ 			#interrupt-cells = <1>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index effa3aaeb25054..39493f75c0dced 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -1218,6 +1218,8 @@ mdss: display-subsystem@1a00000 {
+ 
+ 			power-domains = <&gcc MDSS_GDSC>;
+ 
++			resets = <&gcc GCC_MDSS_BCR>;
++
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			#interrupt-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 9bf7a405a964c2..54956ae1f67459 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -5365,11 +5365,11 @@ slimbam: dma-controller@17184000 {
+ 			compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
+ 			qcom,controlled-remotely;
+ 			reg = <0 0x17184000 0 0x2a000>;
+-			num-channels = <31>;
++			num-channels = <23>;
+ 			interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ 			#dma-cells = <1>;
+ 			qcom,ee = <1>;
+-			qcom,num-ees = <2>;
++			qcom,num-ees = <4>;
+ 			iommus = <&apps_smmu 0x1806 0x0>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
+index 5b54ee79f048e3..9986a10ae0068f 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
+@@ -475,6 +475,8 @@ pm8010: pmic@c {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
++		status = "disabled";
++
+ 		pm8010_temp_alarm: temp-alarm@2400 {
+ 			compatible = "qcom,spmi-temp-alarm";
+ 			reg = <0x2400>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+index 45d68a0d1b5931..c4857bfbeb14ee 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+@@ -258,7 +258,7 @@ secure_proxy_sa3: mailbox@43600000 {
+ 
+ 	main_pmx0: pinctrl@f4000 {
+ 		compatible = "pinctrl-single";
+-		reg = <0x00 0xf4000 0x00 0x2ac>;
++		reg = <0x00 0xf4000 0x00 0x25c>;
+ 		#pinctrl-cells = <1>;
+ 		pinctrl-single,register-width = <32>;
+ 		pinctrl-single,function-mask = <0xffffffff>;
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 9ca5ffd8d817f7..5e68d65e675e52 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2279,17 +2279,21 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
+ #ifdef CONFIG_ARM64_MTE
+ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ {
++	static bool cleared_zero_page = false;
++
+ 	sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
+ 
+ 	mte_cpu_setup();
+ 
+ 	/*
+ 	 * Clear the tags in the zero page. This needs to be done via the
+-	 * linear map which has the Tagged attribute.
++	 * linear map which has the Tagged attribute. Since this page is
++	 * always mapped as pte_special(), set_pte_at() will not attempt to
++	 * clear the tags or set PG_mte_tagged.
+ 	 */
+-	if (try_page_mte_tagging(ZERO_PAGE(0))) {
++	if (!cleared_zero_page) {
++		cleared_zero_page = true;
+ 		mte_clear_page_tags(lm_alias(empty_zero_page));
+-		set_page_mte_tagged(ZERO_PAGE(0));
+ 	}
+ 
+ 	kasan_init_hw_tags_cpu();
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
+index 6174671be7c18d..5d63ca9667370e 100644
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -428,7 +428,8 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
+ 			put_page(page);
+ 			break;
+ 		}
+-		WARN_ON_ONCE(!page_mte_tagged(page));
++
++		WARN_ON_ONCE(!page_mte_tagged(page) && !is_zero_page(page));
+ 
+ 		/* limit access to the end of the page */
+ 		offset = offset_in_page(addr);
+diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
+index e57b043f324b51..adec61b8b278a8 100644
+--- a/arch/arm64/kernel/pi/map_kernel.c
++++ b/arch/arm64/kernel/pi/map_kernel.c
+@@ -78,6 +78,12 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
+ 	twopass |= enable_scs;
+ 	prot = twopass ? data_prot : text_prot;
+ 
++	/*
++	 * [_stext, _text) isn't executed after boot and contains some
++	 * non-executable, unpredictable data, so map it non-executable.
++	 */
++	map_segment(init_pg_dir, &pgdp, va_offset, _text, _stext, data_prot,
++		    false, root_level);
+ 	map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot,
+ 		    !twopass, root_level);
+ 	map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata,
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index 4268678d0e86cc..6e397d8dcd4c26 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -10,6 +10,7 @@
+ 
+ #define pr_fmt(fmt) "kprobes: " fmt
+ 
++#include <linux/execmem.h>
+ #include <linux/extable.h>
+ #include <linux/kasan.h>
+ #include <linux/kernel.h>
+@@ -41,6 +42,17 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+ static void __kprobes
+ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
+ 
++void *alloc_insn_page(void)
++{
++	void *addr;
++
++	addr = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
++	if (!addr)
++		return NULL;
++	set_memory_rox((unsigned long)addr, 1);
++	return addr;
++}
++
+ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+ {
+ 	kprobe_opcode_t *addr = p->ainsn.api.insn;
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index 87f61fd6783c20..8185979f0f116d 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -213,7 +213,7 @@ static void __init request_standard_resources(void)
+ 	unsigned long i = 0;
+ 	size_t res_size;
+ 
+-	kernel_code.start   = __pa_symbol(_stext);
++	kernel_code.start   = __pa_symbol(_text);
+ 	kernel_code.end     = __pa_symbol(__init_begin - 1);
+ 	kernel_data.start   = __pa_symbol(_sdata);
+ 	kernel_data.end     = __pa_symbol(_end - 1);
+@@ -281,7 +281,7 @@ u64 cpu_logical_map(unsigned int cpu)
+ 
+ void __init __no_sanitize_address setup_arch(char **cmdline_p)
+ {
+-	setup_initial_init_mm(_stext, _etext, _edata, _end);
++	setup_initial_init_mm(_text, _etext, _edata, _end);
+ 
+ 	*cmdline_p = boot_command_line;
+ 
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 93ba66de160ce4..c59aa6df14db73 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -300,7 +300,7 @@ void __init arm64_memblock_init(void)
+ 	 * Register the kernel text, kernel data, initrd, and initial
+ 	 * pagetables with memblock.
+ 	 */
+-	memblock_reserve(__pa_symbol(_stext), _end - _stext);
++	memblock_reserve(__pa_symbol(_text), _end - _text);
+ 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
+ 		/* the generic initrd code expects virtual addresses */
+ 		initrd_start = __phys_to_virt(phys_initrd_start);
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index aed8d32979d9c7..ea80e271301ed5 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -561,8 +561,8 @@ void __init mark_linear_text_alias_ro(void)
+ 	/*
+ 	 * Remove the write permissions from the linear alias of .text/.rodata
+ 	 */
+-	update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
+-			    (unsigned long)__init_begin - (unsigned long)_stext,
++	update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
++			    (unsigned long)__init_begin - (unsigned long)_text,
+ 			    PAGE_KERNEL_RO);
+ }
+ 
+@@ -623,7 +623,7 @@ static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) {
+ static void __init map_mem(pgd_t *pgdp)
+ {
+ 	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
+-	phys_addr_t kernel_start = __pa_symbol(_stext);
++	phys_addr_t kernel_start = __pa_symbol(_text);
+ 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
+ 	phys_addr_t start, end;
+ 	phys_addr_t early_kfence_pool;
+@@ -670,7 +670,7 @@ static void __init map_mem(pgd_t *pgdp)
+ 	}
+ 
+ 	/*
+-	 * Map the linear alias of the [_stext, __init_begin) interval
++	 * Map the linear alias of the [_text, __init_begin) interval
+ 	 * as non-executable now, and remove the write permission in
+ 	 * mark_linear_text_alias_ro() below (which will be called after
+ 	 * alternative patching has completed). This makes the contents
+@@ -697,6 +697,10 @@ void mark_rodata_ro(void)
+ 	WRITE_ONCE(rodata_is_rw, false);
+ 	update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
+ 			    section_size, PAGE_KERNEL_RO);
++	/* mark the range between _text and _stext as read only. */
++	update_mapping_prot(__pa_symbol(_text), (unsigned long)_text,
++			    (unsigned long)_stext - (unsigned long)_text,
++			    PAGE_KERNEL_RO);
+ }
+ 
+ static void __init declare_vma(struct vm_struct *vma,
+@@ -767,7 +771,7 @@ static void __init declare_kernel_vmas(void)
+ {
+ 	static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT];
+ 
+-	declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD);
++	declare_vma(&vmlinux_seg[0], _text, _etext, VM_NO_GUARD);
+ 	declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD);
+ 	declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD);
+ 	declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD);
+diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
+index 567bd122a9ee47..c14ae21075c5fc 100644
+--- a/arch/loongarch/Makefile
++++ b/arch/loongarch/Makefile
+@@ -114,7 +114,7 @@ KBUILD_RUSTFLAGS_KERNEL		+= -Crelocation-model=pie
+ LDFLAGS_vmlinux			+= -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)
+ endif
+ 
+-cflags-y += $(call cc-option, -mno-check-zero-division)
++cflags-y += $(call cc-option, -mno-check-zero-division -fno-isolate-erroneous-paths-dereference)
+ 
+ ifndef CONFIG_KASAN
+ cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index 1fa6a604734ef2..2ceb198ae8c808 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -354,6 +354,7 @@ void __init platform_init(void)
+ 
+ #ifdef CONFIG_ACPI
+ 	acpi_table_upgrade();
++	acpi_gbl_use_global_lock = false;
+ 	acpi_gbl_use_default_register_widths = false;
+ 	acpi_boot_table_init();
+ #endif
+diff --git a/arch/parisc/include/uapi/asm/ioctls.h b/arch/parisc/include/uapi/asm/ioctls.h
+index 82d1148c6379a5..74b4027a4e8083 100644
+--- a/arch/parisc/include/uapi/asm/ioctls.h
++++ b/arch/parisc/include/uapi/asm/ioctls.h
+@@ -10,10 +10,10 @@
+ #define TCSETS		_IOW('T', 17, struct termios) /* TCSETATTR */
+ #define TCSETSW		_IOW('T', 18, struct termios) /* TCSETATTRD */
+ #define TCSETSF		_IOW('T', 19, struct termios) /* TCSETATTRF */
+-#define TCGETA		_IOR('T', 1, struct termio)
+-#define TCSETA		_IOW('T', 2, struct termio)
+-#define TCSETAW		_IOW('T', 3, struct termio)
+-#define TCSETAF		_IOW('T', 4, struct termio)
++#define TCGETA          0x40125401
++#define TCSETA          0x80125402
++#define TCSETAW         0x80125403
++#define TCSETAF         0x80125404
+ #define TCSBRK		_IO('T', 5)
+ #define TCXONC		_IO('T', 6)
+ #define TCFLSH		_IO('T', 7)
+diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
+index 69d65ffab31263..03165c82dfdbd9 100644
+--- a/arch/parisc/lib/memcpy.c
++++ b/arch/parisc/lib/memcpy.c
+@@ -41,7 +41,6 @@ unsigned long raw_copy_from_user(void *dst, const void __user *src,
+ 	mtsp(get_kernel_space(), SR_TEMP2);
+ 
+ 	/* Check region is user accessible */
+-	if (start)
+ 	while (start < end) {
+ 		if (!prober_user(SR_TEMP1, start)) {
+ 			newlen = (start - (unsigned long) src);
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index b0a14e48175c69..4d938c1bfdd38c 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -1897,7 +1897,7 @@ static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ 	return 0;
+ 
+ out:
+-	irq_domain_free_irqs_parent(domain, virq, i - 1);
++	irq_domain_free_irqs_parent(domain, virq, i);
+ 	msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs);
+ 	return ret;
+ }
+diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
+index ba98a680a12e67..169859df1aa911 100644
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -592,7 +592,7 @@ static int pseries_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+ 
+ out:
+ 	/* TODO: handle RTAS cleanup in ->msi_finish() ? */
+-	irq_domain_free_irqs_parent(domain, virq, i - 1);
++	irq_domain_free_irqs_parent(domain, virq, i);
+ 	return ret;
+ }
+ 
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index 5b97af31170928..f949f7e1000cfa 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -25,6 +25,7 @@ endif
+ KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
+ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+ KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
++KBUILD_CFLAGS_DECOMPRESSOR += -Wno-pointer-sign
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
+ KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index ff1ddba96352a1..0d18d3267bc4f8 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -202,6 +202,33 @@ SECTIONS
+ 	. = ALIGN(PAGE_SIZE);
+ 	_end = . ;
+ 
++	/* Debugging sections.	*/
++	STABS_DEBUG
++	DWARF_DEBUG
++	ELF_DETAILS
++
++	/*
++	 * Make sure that the .got.plt is either completely empty or it
++	 * contains only the three reserved double words.
++	 */
++	.got.plt : {
++		*(.got.plt)
++	}
++	ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, "Unexpected GOT/PLT entries detected!")
++
++	/*
++	 * Sections that should stay zero sized, which is safer to
++	 * explicitly check instead of blindly discarding.
++	 */
++	.plt : {
++		*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
++	}
++	ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
++	.rela.dyn : {
++		*(.rela.*) *(.rela_*)
++	}
++	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
++
+ 	/*
+ 	 * uncompressed image info used by the decompressor
+ 	 * it should match struct vmlinux_info
+@@ -232,33 +259,6 @@ SECTIONS
+ #endif
+ 	} :NONE
+ 
+-	/* Debugging sections.	*/
+-	STABS_DEBUG
+-	DWARF_DEBUG
+-	ELF_DETAILS
+-
+-	/*
+-	 * Make sure that the .got.plt is either completely empty or it
+-	 * contains only the three reserved double words.
+-	 */
+-	.got.plt : {
+-		*(.got.plt)
+-	}
+-	ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, "Unexpected GOT/PLT entries detected!")
+-
+-	/*
+-	 * Sections that should stay zero sized, which is safer to
+-	 * explicitly check instead of blindly discarding.
+-	 */
+-	.plt : {
+-		*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
+-	}
+-	ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
+-	.rela.dyn : {
+-		*(.rela.*) *(.rela_*)
+-	}
+-	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
+-
+ 	/* Sections to be discarded */
+ 	DISCARDS
+ 	/DISCARD/ : {
+diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
+deleted file mode 100644
+index 7822ea92e54afd..00000000000000
+--- a/arch/s390/net/bpf_jit.h
++++ /dev/null
+@@ -1,55 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * BPF Jit compiler defines
+- *
+- * Copyright IBM Corp. 2012,2015
+- *
+- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+- *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
+- */
+-
+-#ifndef __ARCH_S390_NET_BPF_JIT_H
+-#define __ARCH_S390_NET_BPF_JIT_H
+-
+-#ifndef __ASSEMBLY__
+-
+-#include <linux/filter.h>
+-#include <linux/types.h>
+-
+-#endif /* __ASSEMBLY__ */
+-
+-/*
+- * Stackframe layout (packed stack):
+- *
+- *				    ^ high
+- *	      +---------------+     |
+- *	      | old backchain |     |
+- *	      +---------------+     |
+- *	      |   r15 - r6    |     |
+- *	      +---------------+     |
+- *	      | 4 byte align  |     |
+- *	      | tail_call_cnt |     |
+- * BFP	   -> +===============+     |
+- *	      |		      |     |
+- *	      |   BPF stack   |     |
+- *	      |		      |     |
+- * R15+160 -> +---------------+     |
+- *	      | new backchain |     |
+- * R15+152 -> +---------------+     |
+- *	      | + 152 byte SA |     |
+- * R15	   -> +---------------+     + low
+- *
+- * We get 160 bytes stack space from calling function, but only use
+- * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
+- *
+- * The stack size used by the BPF program ("BPF stack" above) is passed
+- * via "aux->stack_depth".
+- */
+-#define STK_SPACE_ADD	(160)
+-#define STK_160_UNUSED	(160 - 12 * 8)
+-#define STK_OFF		(STK_SPACE_ADD - STK_160_UNUSED)
+-
+-#define STK_OFF_R6	(160 - 11 * 8)	/* Offset of r6 on stack */
+-#define STK_OFF_TCCNT	(160 - 12 * 8)	/* Offset of tail_call_cnt on stack */
+-
+-#endif /* __ARCH_S390_NET_BPF_JIT_H */
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index ead8d9ba9032c5..f305cb42070df7 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -32,7 +32,6 @@
+ #include <asm/set_memory.h>
+ #include <asm/text-patching.h>
+ #include <asm/unwind.h>
+-#include "bpf_jit.h"
+ 
+ struct bpf_jit {
+ 	u32 seen;		/* Flags to remember seen eBPF instructions */
+@@ -56,6 +55,7 @@ struct bpf_jit {
+ 	int prologue_plt;	/* Start of prologue hotpatch PLT */
+ 	int kern_arena;		/* Pool offset of kernel arena address */
+ 	u64 user_arena;		/* User arena address */
++	u32 frame_off;		/* Offset of struct bpf_prog from %r15 */
+ };
+ 
+ #define SEEN_MEM	BIT(0)		/* use mem[] for temporary storage */
+@@ -403,12 +403,26 @@ static void jit_fill_hole(void *area, unsigned int size)
+ 	memset(area, 0, size);
+ }
+ 
++/*
++ * Caller-allocated part of the frame.
++ * Thanks to packed stack, its otherwise unused initial part can be used for
++ * the BPF stack and for the next frame.
++ */
++struct prog_frame {
++	u64 unused[8];
++	/* BPF stack starts here and grows towards 0 */
++	u32 tail_call_cnt;
++	u32 pad;
++	u64 r6[10];  /* r6 - r15 */
++	u64 backchain;
++} __packed;
++
+ /*
+  * Save registers from "rs" (register start) to "re" (register end) on stack
+  */
+ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
+ {
+-	u32 off = STK_OFF_R6 + (rs - 6) * 8;
++	u32 off = offsetof(struct prog_frame, r6) + (rs - 6) * 8;
+ 
+ 	if (rs == re)
+ 		/* stg %rs,off(%r15) */
+@@ -421,12 +435,9 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
+ /*
+  * Restore registers from "rs" (register start) to "re" (register end) on stack
+  */
+-static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
++static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
+ {
+-	u32 off = STK_OFF_R6 + (rs - 6) * 8;
+-
+-	if (jit->seen & SEEN_STACK)
+-		off += STK_OFF + stack_depth;
++	u32 off = jit->frame_off + offsetof(struct prog_frame, r6) + (rs - 6) * 8;
+ 
+ 	if (rs == re)
+ 		/* lg %rs,off(%r15) */
+@@ -470,8 +481,7 @@ static int get_end(u16 seen_regs, int start)
+  * Save and restore clobbered registers (6-15) on stack.
+  * We save/restore registers in chunks with gap >= 2 registers.
+  */
+-static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth,
+-			      u16 extra_regs)
++static void save_restore_regs(struct bpf_jit *jit, int op, u16 extra_regs)
+ {
+ 	u16 seen_regs = jit->seen_regs | extra_regs;
+ 	const int last = 15, save_restore_size = 6;
+@@ -494,7 +504,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth,
+ 		if (op == REGS_SAVE)
+ 			save_regs(jit, rs, re);
+ 		else
+-			restore_regs(jit, rs, re, stack_depth);
++			restore_regs(jit, rs, re);
+ 		re++;
+ 	} while (re <= last);
+ }
+@@ -559,11 +569,12 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
+  * Emit function prologue
+  *
+  * Save registers and create stack frame if necessary.
+- * See stack frame layout description in "bpf_jit.h"!
++ * Stack frame layout is described by struct prog_frame.
+  */
+-static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+-			     u32 stack_depth)
++static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp)
+ {
++	BUILD_BUG_ON(sizeof(struct prog_frame) != STACK_FRAME_OVERHEAD);
++
+ 	/* No-op for hotpatching */
+ 	/* brcl 0,prologue_plt */
+ 	EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
+@@ -571,8 +582,9 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ 
+ 	if (!bpf_is_subprog(fp)) {
+ 		/* Initialize the tail call counter in the main program. */
+-		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
+-		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
++		/* xc tail_call_cnt(4,%r15),tail_call_cnt(%r15) */
++		_EMIT6(0xd703f000 | offsetof(struct prog_frame, tail_call_cnt),
++		       0xf000 | offsetof(struct prog_frame, tail_call_cnt));
+ 	} else {
+ 		/*
+ 		 * Skip the tail call counter initialization in subprograms.
+@@ -595,7 +607,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		jit->seen_regs |= NVREGS;
+ 	} else {
+ 		/* Save registers */
+-		save_restore_regs(jit, REGS_SAVE, stack_depth,
++		save_restore_regs(jit, REGS_SAVE,
+ 				  fp->aux->exception_boundary ? NVREGS : 0);
+ 	}
+ 	/* Setup literal pool */
+@@ -615,13 +627,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ 	if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
+ 		/* lgr %w1,%r15 (backchain) */
+ 		EMIT4(0xb9040000, REG_W1, REG_15);
+-		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
+-		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
+-		/* aghi %r15,-STK_OFF */
+-		EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
+-		/* stg %w1,152(%r15) (backchain) */
++		/* la %bfp,unused_end(%r15) (BPF frame pointer) */
++		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15,
++			   offsetofend(struct prog_frame, unused));
++		/* aghi %r15,-frame_off */
++		EMIT4_IMM(0xa70b0000, REG_15, -jit->frame_off);
++		/* stg %w1,backchain(%r15) */
+ 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
+-			      REG_15, 152);
++			      REG_15,
++			      offsetof(struct prog_frame, backchain));
+ 	}
+ }
+ 
+@@ -665,13 +679,13 @@ static void call_r1(struct bpf_jit *jit)
+ /*
+  * Function epilogue
+  */
+-static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
++static void bpf_jit_epilogue(struct bpf_jit *jit)
+ {
+ 	jit->exit_ip = jit->prg;
+ 	/* Load exit code: lgr %r2,%b0 */
+ 	EMIT4(0xb9040000, REG_2, BPF_REG_0);
+ 	/* Restore registers */
+-	save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
++	save_restore_regs(jit, REGS_RESTORE, 0);
+ 	if (nospec_uses_trampoline()) {
+ 		jit->r14_thunk_ip = jit->prg;
+ 		/* Generate __s390_indirect_jump_r14 thunk */
+@@ -862,7 +876,7 @@ static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
+  * stack space for the large switch statement.
+  */
+ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+-				 int i, bool extra_pass, u32 stack_depth)
++				 int i, bool extra_pass)
+ {
+ 	struct bpf_insn *insn = &fp->insnsi[i];
+ 	s32 branch_oc_off = insn->off;
+@@ -1775,17 +1789,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		jit->seen |= SEEN_FUNC;
+ 		/*
+ 		 * Copy the tail call counter to where the callee expects it.
+-		 *
+-		 * Note 1: The callee can increment the tail call counter, but
+-		 * we do not load it back, since the x86 JIT does not do this
+-		 * either.
+-		 *
+-		 * Note 2: We assume that the verifier does not let us call the
+-		 * main program, which clears the tail call counter on entry.
+ 		 */
+-		/* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
+-		_EMIT6(0xd203f000 | STK_OFF_TCCNT,
+-		       0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
++		/* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */
++		_EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
++		       0xf000 | (jit->frame_off +
++				 offsetof(struct prog_frame, tail_call_cnt)));
+ 
+ 		/* Sign-extend the kfunc arguments. */
+ 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+@@ -1807,6 +1815,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		call_r1(jit);
+ 		/* lgr %b0,%r2: load return value into %b0 */
+ 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
++
++		/*
++		 * Copy the potentially updated tail call counter back.
++		 */
++
++		if (insn->src_reg == BPF_PSEUDO_CALL)
++			/*
++			 * mvc frame_off+tail_call_cnt(%r15),
++			 *     tail_call_cnt(4,%r15)
++			 */
++			_EMIT6(0xd203f000 | (jit->frame_off +
++					     offsetof(struct prog_frame,
++						      tail_call_cnt)),
++			       0xf000 | offsetof(struct prog_frame,
++						 tail_call_cnt));
++
+ 		break;
+ 	}
+ 	case BPF_JMP | BPF_TAIL_CALL: {
+@@ -1836,10 +1860,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		 *         goto out;
+ 		 */
+ 
+-		if (jit->seen & SEEN_STACK)
+-			off = STK_OFF_TCCNT + STK_OFF + stack_depth;
+-		else
+-			off = STK_OFF_TCCNT;
++		off = jit->frame_off +
++		      offsetof(struct prog_frame, tail_call_cnt);
+ 		/* lhi %w0,1 */
+ 		EMIT4_IMM(0xa7080000, REG_W0, 1);
+ 		/* laal %w1,%w0,off(%r15) */
+@@ -1869,7 +1891,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		/*
+ 		 * Restore registers before calling function
+ 		 */
+-		save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
++		save_restore_regs(jit, REGS_RESTORE, 0);
+ 
+ 		/*
+ 		 * goto *(prog->bpf_func + tail_call_start);
+@@ -2161,7 +2183,7 @@ static int bpf_set_addr(struct bpf_jit *jit, int i)
+  * Compile eBPF program into s390x code
+  */
+ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
+-			bool extra_pass, u32 stack_depth)
++			bool extra_pass)
+ {
+ 	int i, insn_count, lit32_size, lit64_size;
+ 	u64 kern_arena;
+@@ -2170,24 +2192,30 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
+ 	jit->lit64 = jit->lit64_start;
+ 	jit->prg = 0;
+ 	jit->excnt = 0;
++	if (is_first_pass(jit) || (jit->seen & SEEN_STACK))
++		jit->frame_off = sizeof(struct prog_frame) -
++				 offsetofend(struct prog_frame, unused) +
++				 round_up(fp->aux->stack_depth, 8);
++	else
++		jit->frame_off = 0;
+ 
+ 	kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena);
+ 	if (kern_arena)
+ 		jit->kern_arena = _EMIT_CONST_U64(kern_arena);
+ 	jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena);
+ 
+-	bpf_jit_prologue(jit, fp, stack_depth);
++	bpf_jit_prologue(jit, fp);
+ 	if (bpf_set_addr(jit, 0) < 0)
+ 		return -1;
+ 	for (i = 0; i < fp->len; i += insn_count) {
+-		insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
++		insn_count = bpf_jit_insn(jit, fp, i, extra_pass);
+ 		if (insn_count < 0)
+ 			return -1;
+ 		/* Next instruction address */
+ 		if (bpf_set_addr(jit, i + insn_count) < 0)
+ 			return -1;
+ 	}
+-	bpf_jit_epilogue(jit, stack_depth);
++	bpf_jit_epilogue(jit);
+ 
+ 	lit32_size = jit->lit32 - jit->lit32_start;
+ 	lit64_size = jit->lit64 - jit->lit64_start;
+@@ -2263,7 +2291,6 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
+  */
+ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ {
+-	u32 stack_depth = round_up(fp->aux->stack_depth, 8);
+ 	struct bpf_prog *tmp, *orig_fp = fp;
+ 	struct bpf_binary_header *header;
+ 	struct s390_jit_data *jit_data;
+@@ -2316,7 +2343,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ 	 *   - 3:   Calculate program size and addrs array
+ 	 */
+ 	for (pass = 1; pass <= 3; pass++) {
+-		if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
++		if (bpf_jit_prog(&jit, fp, extra_pass)) {
+ 			fp = orig_fp;
+ 			goto free_addrs;
+ 		}
+@@ -2330,7 +2357,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ 		goto free_addrs;
+ 	}
+ skip_init_ctx:
+-	if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
++	if (bpf_jit_prog(&jit, fp, extra_pass)) {
+ 		bpf_jit_binary_free(header);
+ 		fp = orig_fp;
+ 		goto free_addrs;
+@@ -2651,9 +2678,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	/* stg %r1,backchain_off(%r15) */
+ 	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
+ 		      tjit->backchain_off);
+-	/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
++	/* mvc tccnt_off(4,%r15),stack_size+tail_call_cnt(%r15) */
+ 	_EMIT6(0xd203f000 | tjit->tccnt_off,
+-	       0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
++	       0xf000 | (tjit->stack_size +
++			 offsetof(struct prog_frame, tail_call_cnt)));
+ 	/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
+ 	if (nr_reg_args)
+ 		EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
+@@ -2790,8 +2818,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 				       (nr_stack_args * sizeof(u64) - 1) << 16 |
+ 				       tjit->stack_args_off,
+ 			       0xf000 | tjit->orig_stack_args_off);
+-		/* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
+-		_EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
++		/* mvc tail_call_cnt(4,%r15),tccnt_off(%r15) */
++		_EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
++		       0xf000 | tjit->tccnt_off);
+ 		/* lgr %r1,%r8 */
+ 		EMIT4(0xb9040000, REG_1, REG_8);
+ 		/* %r1() */
+@@ -2799,6 +2828,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 		/* stg %r2,retval_off(%r15) */
+ 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
+ 			      tjit->retval_off);
++		/* mvc tccnt_off(%r15),tail_call_cnt(4,%r15) */
++		_EMIT6(0xd203f000 | tjit->tccnt_off,
++		       0xf000 | offsetof(struct prog_frame, tail_call_cnt));
+ 
+ 		im->ip_after_call = jit->prg_buf + jit->prg;
+ 
+@@ -2848,8 +2880,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
+ 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
+ 			      tjit->retval_off);
+-	/* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
+-	_EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
++	/* mvc stack_size+tail_call_cnt(4,%r15),tccnt_off(%r15) */
++	_EMIT6(0xd203f000 | (tjit->stack_size +
++			     offsetof(struct prog_frame, tail_call_cnt)),
+ 	       0xf000 | tjit->tccnt_off);
+ 	/* aghi %r15,stack_size */
+ 	EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
+diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
+index 06012e68bdcaec..284a4cafa4324c 100644
+--- a/arch/sparc/kernel/of_device_32.c
++++ b/arch/sparc/kernel/of_device_32.c
+@@ -387,6 +387,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
+ 
+ 	if (of_device_register(op)) {
+ 		printk("%pOF: Could not register of device.\n", dp);
++		put_device(&op->dev);
+ 		kfree(op);
+ 		op = NULL;
+ 	}
+diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
+index f98c2901f3357a..f53092b07b9e7d 100644
+--- a/arch/sparc/kernel/of_device_64.c
++++ b/arch/sparc/kernel/of_device_64.c
+@@ -677,6 +677,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
+ 
+ 	if (of_device_register(op)) {
+ 		printk("%pOF: Could not register of device.\n", dp);
++		put_device(&op->dev);
+ 		kfree(op);
+ 		op = NULL;
+ 	}
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index c276d70a747995..305f191ba81b1d 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -130,6 +130,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ 
+ static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
+ {
++	unsigned long hugepage_size = _PAGE_SZ4MB_4U;
++
++	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4U;
++
++	switch (shift) {
++	case HPAGE_256MB_SHIFT:
++		hugepage_size = _PAGE_SZ256MB_4U;
++		pte_val(entry) |= _PAGE_PMD_HUGE;
++		break;
++	case HPAGE_SHIFT:
++		pte_val(entry) |= _PAGE_PMD_HUGE;
++		break;
++	case HPAGE_64K_SHIFT:
++		hugepage_size = _PAGE_SZ64K_4U;
++		break;
++	default:
++		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
++	}
++
++	pte_val(entry) = pte_val(entry) | hugepage_size;
+ 	return entry;
+ }
+ 
+diff --git a/arch/x86/entry/entry_64_fred.S b/arch/x86/entry/entry_64_fred.S
+index a02bc6f3d2e6a4..0ca9db540b16d3 100644
+--- a/arch/x86/entry/entry_64_fred.S
++++ b/arch/x86/entry/entry_64_fred.S
+@@ -16,7 +16,7 @@
+ 
+ .macro FRED_ENTER
+ 	UNWIND_HINT_END_OF_STACK
+-	ENDBR
++	ANNOTATE_NOENDBR
+ 	PUSH_AND_CLEAR_REGS
+ 	movq	%rsp, %rdi	/* %rdi -> pt_regs */
+ .endm
+diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
+index af87f440bc2aca..8c345ad458415e 100644
+--- a/arch/x86/hyperv/ivm.c
++++ b/arch/x86/hyperv/ivm.c
+@@ -681,7 +681,7 @@ void __init hv_vtom_init(void)
+ 	x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
+ 
+ 	/* Set WB as the default cache mode. */
+-	mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
++	guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);
+ }
+ 
+ #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 21d07aa9400c7a..b67280d761f630 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -722,6 +722,7 @@
+ #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS	0xc0000300
+ #define MSR_AMD64_PERF_CNTR_GLOBAL_CTL		0xc0000301
+ #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR	0xc0000302
++#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET	0xc0000303
+ 
+ /* AMD Last Branch Record MSRs */
+ #define MSR_AMD64_LBR_SELECT			0xc000010e
+diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
+index 4218248083d985..c69e269937c567 100644
+--- a/arch/x86/include/asm/mtrr.h
++++ b/arch/x86/include/asm/mtrr.h
+@@ -58,8 +58,8 @@ struct mtrr_state_type {
+  */
+ # ifdef CONFIG_MTRR
+ void mtrr_bp_init(void);
+-void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
+-			  mtrr_type def_type);
++void guest_force_mtrr_state(struct mtrr_var_range *var, unsigned int num_var,
++			    mtrr_type def_type);
+ extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
+ extern void mtrr_save_fixed_ranges(void *);
+ extern void mtrr_save_state(void);
+@@ -75,9 +75,9 @@ void mtrr_disable(void);
+ void mtrr_enable(void);
+ void mtrr_generic_set_state(void);
+ #  else
+-static inline void mtrr_overwrite_state(struct mtrr_var_range *var,
+-					unsigned int num_var,
+-					mtrr_type def_type)
++static inline void guest_force_mtrr_state(struct mtrr_var_range *var,
++					  unsigned int num_var,
++					  mtrr_type def_type)
+ {
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index 1ececfce7a46a6..4b1e2d14e5aac3 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -423,7 +423,7 @@ void __init mtrr_copy_map(void)
+ }
+ 
+ /**
+- * mtrr_overwrite_state - set static MTRR state
++ * guest_force_mtrr_state - set static MTRR state for a guest
+  *
+  * Used to set MTRR state via different means (e.g. with data obtained from
+  * a hypervisor).
+@@ -436,8 +436,8 @@ void __init mtrr_copy_map(void)
+  * @num_var: length of the @var array
+  * @def_type: default caching type
+  */
+-void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
+-			  mtrr_type def_type)
++void guest_force_mtrr_state(struct mtrr_var_range *var, unsigned int num_var,
++			    mtrr_type def_type)
+ {
+ 	unsigned int i;
+ 
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
+index 989d368be04fcc..ecbda0341a8a32 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
+@@ -625,7 +625,7 @@ void mtrr_save_state(void)
+ static int __init mtrr_init_finalize(void)
+ {
+ 	/*
+-	 * Map might exist if mtrr_overwrite_state() has been called or if
++	 * Map might exist if guest_force_mtrr_state() has been called or if
+ 	 * mtrr_enabled() returns true.
+ 	 */
+ 	mtrr_copy_map();
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 21e9e484535415..bd21c568bc2ad1 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -933,6 +933,19 @@ static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
+ 
+ static void __init kvm_init_platform(void)
+ {
++	u64 tolud = PFN_PHYS(e820__end_of_low_ram_pfn());
++	/*
++	 * Note, hardware requires variable MTRR ranges to be power-of-2 sized
++	 * and naturally aligned.  But when forcing guest MTRR state, Linux
++	 * doesn't program the forced ranges into hardware.  Don't bother doing
++	 * the math to generate a technically-legal range.
++	 */
++	struct mtrr_var_range pci_hole = {
++		.base_lo = tolud | X86_MEMTYPE_UC,
++		.mask_lo = (u32)(~(SZ_4G - tolud - 1)) | MTRR_PHYSMASK_V,
++		.mask_hi = (BIT_ULL(boot_cpu_data.x86_phys_bits) - 1) >> 32,
++	};
++
+ 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
+ 	    kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
+ 		unsigned long nr_pages;
+@@ -982,8 +995,12 @@ static void __init kvm_init_platform(void)
+ 	kvmclock_init();
+ 	x86_platform.apic_post_init = kvm_apic_init;
+ 
+-	/* Set WB as the default cache mode for SEV-SNP and TDX */
+-	mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
++	/*
++	 * Set WB as the default cache mode for SEV-SNP and TDX, with a single
++	 * UC range for the legacy PCI hole, e.g. so that devices that expect
++	 * to get UC/WC mappings don't get surprised with WB.
++	 */
++	guest_force_mtrr_state(&pci_hole, 1, MTRR_TYPE_WRBACK);
+ }
+ 
+ #if defined(CONFIG_AMD_MEM_ENCRYPT)
+diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
+index 5a4b21389b1d98..d432f3824f0c29 100644
+--- a/arch/x86/kernel/umip.c
++++ b/arch/x86/kernel/umip.c
+@@ -156,15 +156,26 @@ static int identify_insn(struct insn *insn)
+ 	if (!insn->modrm.nbytes)
+ 		return -EINVAL;
+ 
+-	/* All the instructions of interest start with 0x0f. */
+-	if (insn->opcode.bytes[0] != 0xf)
++	/* The instructions of interest have 2-byte opcodes: 0F 00 or 0F 01. */
++	if (insn->opcode.nbytes < 2 || insn->opcode.bytes[0] != 0xf)
+ 		return -EINVAL;
+ 
+ 	if (insn->opcode.bytes[1] == 0x1) {
+ 		switch (X86_MODRM_REG(insn->modrm.value)) {
+ 		case 0:
++			/* The reg form of 0F 01 /0 encodes VMX instructions. */
++			if (X86_MODRM_MOD(insn->modrm.value) == 3)
++				return -EINVAL;
++
+ 			return UMIP_INST_SGDT;
+ 		case 1:
++			/*
++			 * The reg form of 0F 01 /1 encodes MONITOR/MWAIT,
++			 * STAC/CLAC, and ENCLS.
++			 */
++			if (X86_MODRM_MOD(insn->modrm.value) == 3)
++				return -EINVAL;
++
+ 			return UMIP_INST_SIDT;
+ 		case 4:
+ 			return UMIP_INST_SMSW;
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 8f587c5bb6bc4e..a7a1486ce27041 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -816,7 +816,7 @@ void kvm_set_cpu_caps(void)
+ 		F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
+ 		F(VERW_CLEAR) |
+ 		F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
+-		F(WRMSR_XX_BASE_NS)
++		F(WRMSR_XX_BASE_NS) | F(SRSO_USER_KERNEL_NO)
+ 	);
+ 
+ 	kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index 47a46283c86677..a26e8f5ad79013 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -650,6 +650,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		msr_info->data = pmu->global_ctrl;
+ 		break;
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
++	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
+ 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ 		msr_info->data = 0;
+ 		break;
+@@ -711,6 +712,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		if (!msr_info->host_initiated)
+ 			pmu->global_status &= ~data;
+ 		break;
++	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
++		if (!msr_info->host_initiated)
++			pmu->global_status |= data & ~pmu->global_status_rsvd;
++		break;
+ 	default:
+ 		kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
+ 		return kvm_pmu_call(set_msr)(vcpu, msr_info);
+diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
+index 22d5a65b410c9c..dbca6453ec4d7f 100644
+--- a/arch/x86/kvm/svm/pmu.c
++++ b/arch/x86/kvm/svm/pmu.c
+@@ -113,6 +113,7 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
++	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
+ 		return pmu->version > 1;
+ 	default:
+ 		if (msr > MSR_F15H_PERF_CTR5 &&
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 86cabeca6265a2..20f89bceaeae9a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -364,6 +364,7 @@ static const u32 msrs_to_save_pmu[] = {
+ 	MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
+ 	MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
+ 	MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
++	MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
+ };
+ 
+ static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
+@@ -7449,6 +7450,7 @@ static void kvm_probe_msr_to_save(u32 msr_index)
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
++	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
+ 		if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
+ 			return;
+ 		break;
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index a8eb7e0c473cf6..e033d559426594 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -171,7 +171,7 @@ static void __init xen_set_mtrr_data(void)
+ 
+ 	/* Only overwrite MTRR state if any MTRR could be got from Xen. */
+ 	if (reg)
+-		mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE);
++		guest_force_mtrr_state(var, reg, MTRR_TYPE_UNCACHABLE);
+ #endif
+ }
+ 
+@@ -195,7 +195,7 @@ static void __init xen_pv_init_platform(void)
+ 	if (xen_initial_domain())
+ 		xen_set_mtrr_data();
+ 	else
+-		mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
++		guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);
+ 
+ 	/* Adjust nr_cpu_ids before "enumeration" happens */
+ 	xen_smp_count_cpus();
+diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
+index d6d2b533a5744d..f5add569ca8308 100644
+--- a/arch/xtensa/platforms/iss/simdisk.c
++++ b/arch/xtensa/platforms/iss/simdisk.c
+@@ -230,10 +230,14 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
+ static ssize_t proc_write_simdisk(struct file *file, const char __user *buf,
+ 			size_t count, loff_t *ppos)
+ {
+-	char *tmp = memdup_user_nul(buf, count);
++	char *tmp;
+ 	struct simdisk *dev = pde_data(file_inode(file));
+ 	int err;
+ 
++	if (count == 0 || count > PAGE_SIZE)
++		return -EINVAL;
++
++	tmp = memdup_user_nul(buf, count);
+ 	if (IS_ERR(tmp))
+ 		return PTR_ERR(tmp);
+ 
+diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
+index b1e7415f8439c4..d04192ae3dce9a 100644
+--- a/block/blk-crypto-fallback.c
++++ b/block/blk-crypto-fallback.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/random.h>
+ #include <linux/scatterlist.h>
++#include <trace/events/block.h>
+ 
+ #include "blk-cgroup.h"
+ #include "blk-crypto-internal.h"
+@@ -230,7 +231,9 @@ static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
+ 			bio->bi_status = BLK_STS_RESOURCE;
+ 			return false;
+ 		}
++
+ 		bio_chain(split_bio, bio);
++		trace_block_split(split_bio, bio->bi_iter.bi_sector);
+ 		submit_bio_noacct(bio);
+ 		*bio_ptr = split_bio;
+ 	}
+diff --git a/crypto/essiv.c b/crypto/essiv.c
+index e63fc6442e3201..aa7ebc1235274e 100644
+--- a/crypto/essiv.c
++++ b/crypto/essiv.c
+@@ -186,9 +186,14 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
+ 	const struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
+ 	struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
+ 	struct aead_request *subreq = &rctx->aead_req;
++	int ivsize = crypto_aead_ivsize(tfm);
++	int ssize = req->assoclen - ivsize;
+ 	struct scatterlist *src = req->src;
+ 	int err;
+ 
++	if (ssize < 0)
++		return -EINVAL;
++
+ 	crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
+ 
+ 	/*
+@@ -198,19 +203,12 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
+ 	 */
+ 	rctx->assoc = NULL;
+ 	if (req->src == req->dst || !enc) {
+-		scatterwalk_map_and_copy(req->iv, req->dst,
+-					 req->assoclen - crypto_aead_ivsize(tfm),
+-					 crypto_aead_ivsize(tfm), 1);
++		scatterwalk_map_and_copy(req->iv, req->dst, ssize, ivsize, 1);
+ 	} else {
+ 		u8 *iv = (u8 *)aead_request_ctx(req) + tctx->ivoffset;
+-		int ivsize = crypto_aead_ivsize(tfm);
+-		int ssize = req->assoclen - ivsize;
+ 		struct scatterlist *sg;
+ 		int nents;
+ 
+-		if (ssize < 0)
+-			return -EINVAL;
+-
+ 		nents = sg_nents_for_len(req->src, ssize);
+ 		if (nents < 0)
+ 			return -EINVAL;
+diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
+index d50261d05f3a1a..515b20d0b698a4 100644
+--- a/drivers/acpi/acpi_dbg.c
++++ b/drivers/acpi/acpi_dbg.c
+@@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static int acpi_aml_read_user(char __user *buf, int len)
++static ssize_t acpi_aml_read_user(char __user *buf, size_t len)
+ {
+-	int ret;
+ 	struct circ_buf *crc = &acpi_aml_io.out_crc;
+-	int n;
++	ssize_t ret;
++	size_t n;
+ 	char *p;
+ 
+ 	ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
+@@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
+ 	/* sync head before removing logs */
+ 	smp_rmb();
+ 	p = &crc->buf[crc->tail];
+-	n = min(len, circ_count_to_end(crc));
++	n = min_t(size_t, len, circ_count_to_end(crc));
+ 	if (copy_to_user(buf, p, n)) {
+ 		ret = -EFAULT;
+ 		goto out;
+@@ -599,8 +599,8 @@ static int acpi_aml_read_user(char __user *buf, int len)
+ static ssize_t acpi_aml_read(struct file *file, char __user *buf,
+ 			     size_t count, loff_t *ppos)
+ {
+-	int ret = 0;
+-	int size = 0;
++	ssize_t ret = 0;
++	ssize_t size = 0;
+ 
+ 	if (!count)
+ 		return 0;
+@@ -639,11 +639,11 @@ static ssize_t acpi_aml_read(struct file *file, char __user *buf,
+ 	return size > 0 ? size : ret;
+ }
+ 
+-static int acpi_aml_write_user(const char __user *buf, int len)
++static ssize_t acpi_aml_write_user(const char __user *buf, size_t len)
+ {
+-	int ret;
+ 	struct circ_buf *crc = &acpi_aml_io.in_crc;
+-	int n;
++	ssize_t ret;
++	size_t n;
+ 	char *p;
+ 
+ 	ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
+@@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
+ 	/* sync tail before inserting cmds */
+ 	smp_mb();
+ 	p = &crc->buf[crc->head];
+-	n = min(len, circ_space_to_end(crc));
++	n = min_t(size_t, len, circ_space_to_end(crc));
+ 	if (copy_from_user(p, buf, n)) {
+ 		ret = -EFAULT;
+ 		goto out;
+@@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len)
+ 	ret = n;
+ out:
+ 	acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
+-	return n;
++	return ret;
+ }
+ 
+ static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
+ 			      size_t count, loff_t *ppos)
+ {
+-	int ret = 0;
+-	int size = 0;
++	ssize_t ret = 0;
++	ssize_t size = 0;
+ 
+ 	if (!count)
+ 		return 0;
+diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
+index b831cb8e53dc31..b75ceaab716e80 100644
+--- a/drivers/acpi/acpi_tad.c
++++ b/drivers/acpi/acpi_tad.c
+@@ -565,6 +565,9 @@ static void acpi_tad_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(dev);
+ 
++	if (dd->capabilities & ACPI_TAD_RT)
++		sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group);
++
+ 	if (dd->capabilities & ACPI_TAD_DC_WAKE)
+ 		sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
+ 
+diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
+index 989dc01af03fbb..bc205b3309043b 100644
+--- a/drivers/acpi/acpica/evglock.c
++++ b/drivers/acpi/acpica/evglock.c
+@@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
+ 		return_ACPI_STATUS(AE_OK);
+ 	}
+ 
++	if (!acpi_gbl_use_global_lock) {
++		return_ACPI_STATUS(AE_OK);
++	}
++
+ 	/* Attempt installation of the global lock handler */
+ 
+ 	status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 65fa3444367a13..11c7e35fafa254 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -92,7 +92,7 @@ enum {
+ 
+ struct acpi_battery {
+ 	struct mutex lock;
+-	struct mutex sysfs_lock;
++	struct mutex update_lock;
+ 	struct power_supply *bat;
+ 	struct power_supply_desc bat_desc;
+ 	struct acpi_device *device;
+@@ -903,15 +903,12 @@ static int sysfs_add_battery(struct acpi_battery *battery)
+ 
+ static void sysfs_remove_battery(struct acpi_battery *battery)
+ {
+-	mutex_lock(&battery->sysfs_lock);
+-	if (!battery->bat) {
+-		mutex_unlock(&battery->sysfs_lock);
++	if (!battery->bat)
+ 		return;
+-	}
++
+ 	battery_hook_remove_battery(battery);
+ 	power_supply_unregister(battery->bat);
+ 	battery->bat = NULL;
+-	mutex_unlock(&battery->sysfs_lock);
+ }
+ 
+ static void find_battery(const struct dmi_header *dm, void *private)
+@@ -1071,6 +1068,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
+ 
+ 	if (!battery)
+ 		return;
++
++	guard(mutex)(&battery->update_lock);
++
+ 	old = battery->bat;
+ 	/*
+ 	 * On Acer Aspire V5-573G notifications are sometimes triggered too
+@@ -1093,21 +1093,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
+ }
+ 
+ static int battery_notify(struct notifier_block *nb,
+-			       unsigned long mode, void *_unused)
++			  unsigned long mode, void *_unused)
+ {
+ 	struct acpi_battery *battery = container_of(nb, struct acpi_battery,
+ 						    pm_nb);
+-	int result;
+ 
+-	switch (mode) {
+-	case PM_POST_HIBERNATION:
+-	case PM_POST_SUSPEND:
++	if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) {
++		guard(mutex)(&battery->update_lock);
++
+ 		if (!acpi_battery_present(battery))
+ 			return 0;
+ 
+ 		if (battery->bat) {
+ 			acpi_battery_refresh(battery);
+ 		} else {
++			int result;
++
+ 			result = acpi_battery_get_info(battery);
+ 			if (result)
+ 				return result;
+@@ -1119,7 +1120,6 @@ static int battery_notify(struct notifier_block *nb,
+ 
+ 		acpi_battery_init_alarm(battery);
+ 		acpi_battery_get_state(battery);
+-		break;
+ 	}
+ 
+ 	return 0;
+@@ -1197,6 +1197,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
+ {
+ 	int retry, ret;
+ 
++	guard(mutex)(&battery->update_lock);
++
+ 	for (retry = 5; retry; retry--) {
+ 		ret = acpi_battery_update(battery, false);
+ 		if (!ret)
+@@ -1207,6 +1209,13 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
+ 	return ret;
+ }
+ 
++static void sysfs_battery_cleanup(struct acpi_battery *battery)
++{
++	guard(mutex)(&battery->update_lock);
++
++	sysfs_remove_battery(battery);
++}
++
+ static int acpi_battery_add(struct acpi_device *device)
+ {
+ 	int result = 0;
+@@ -1218,15 +1227,21 @@ static int acpi_battery_add(struct acpi_device *device)
+ 	if (device->dep_unmet)
+ 		return -EPROBE_DEFER;
+ 
+-	battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
++	battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL);
+ 	if (!battery)
+ 		return -ENOMEM;
+ 	battery->device = device;
+ 	strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
+ 	strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
+ 	device->driver_data = battery;
+-	mutex_init(&battery->lock);
+-	mutex_init(&battery->sysfs_lock);
++	result = devm_mutex_init(&device->dev, &battery->lock);
++	if (result)
++		return result;
++
++	result = devm_mutex_init(&device->dev, &battery->update_lock);
++	if (result)
++		return result;
++
+ 	if (acpi_has_method(battery->device->handle, "_BIX"))
+ 		set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
+ 
+@@ -1253,10 +1268,7 @@ static int acpi_battery_add(struct acpi_device *device)
+ 	device_init_wakeup(&device->dev, 0);
+ 	unregister_pm_notifier(&battery->pm_nb);
+ fail:
+-	sysfs_remove_battery(battery);
+-	mutex_destroy(&battery->lock);
+-	mutex_destroy(&battery->sysfs_lock);
+-	kfree(battery);
++	sysfs_battery_cleanup(battery);
+ 
+ 	return result;
+ }
+@@ -1275,11 +1287,10 @@ static void acpi_battery_remove(struct acpi_device *device)
+ 
+ 	device_init_wakeup(&device->dev, 0);
+ 	unregister_pm_notifier(&battery->pm_nb);
+-	sysfs_remove_battery(battery);
+ 
+-	mutex_destroy(&battery->lock);
+-	mutex_destroy(&battery->sysfs_lock);
+-	kfree(battery);
++	guard(mutex)(&battery->update_lock);
++
++	sysfs_remove_battery(battery);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -1296,6 +1307,9 @@ static int acpi_battery_resume(struct device *dev)
+ 		return -EINVAL;
+ 
+ 	battery->update_time = 0;
++
++	guard(mutex)(&battery->update_lock);
++
+ 	acpi_battery_update(battery, true);
+ 	return 0;
+ }
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index e9186339f6e6bb..342e7cef723cc2 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
+ 					struct fwnode_handle *parent)
+ {
+ 	struct acpi_data_node *dn;
++	acpi_handle scope = NULL;
+ 	bool result;
+ 
+ 	if (acpi_graph_ignore_port(handle))
+@@ -98,29 +99,35 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
+ 	INIT_LIST_HEAD(&dn->data.properties);
+ 	INIT_LIST_HEAD(&dn->data.subnodes);
+ 
+-	result = acpi_extract_properties(handle, desc, &dn->data);
+-
+-	if (handle) {
+-		acpi_handle scope;
+-		acpi_status status;
++	/*
++	 * The scope for the completion of relative pathname segments and
++	 * subnode object lookup is the one of the namespace node (device)
++	 * containing the object that has returned the package.  That is, it's
++	 * the scope of that object's parent device.
++	 */
++	if (handle)
++		acpi_get_parent(handle, &scope);
+ 
+-		/*
+-		 * The scope for the subnode object lookup is the one of the
+-		 * namespace node (device) containing the object that has
+-		 * returned the package.  That is, it's the scope of that
+-		 * object's parent.
+-		 */
+-		status = acpi_get_parent(handle, &scope);
+-		if (ACPI_SUCCESS(status)
+-		    && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
+-						      &dn->fwnode))
+-			result = true;
+-	} else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
+-						  &dn->fwnode)) {
++	/*
++	 * Extract properties from the _DSD-equivalent package pointed to by
++	 * desc and use scope (if not NULL) for the completion of relative
++	 * pathname segments.
++	 *
++	 * The extracted properties will be held in the new data node dn.
++	 */
++	result = acpi_extract_properties(scope, desc, &dn->data);
++	/*
++	 * Look for subnodes in the _DSD-equivalent package pointed to by desc
++	 * and create child nodes of dn if there are any.
++	 */
++	if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
+ 		result = true;
+-	}
+ 
+ 	if (result) {
++		/*
++		 * This will be NULL if the desc package is embedded in an outer
++		 * _DSD-equivalent package and its scope cannot be determined.
++		 */
+ 		dn->handle = handle;
+ 		dn->data.pointer = desc;
+ 		list_add_tail(&dn->sibling, list);
+@@ -132,35 +139,21 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
+ 	return false;
+ }
+ 
+-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
+-					const union acpi_object *link,
+-					struct list_head *list,
+-					struct fwnode_handle *parent)
+-{
+-	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+-	acpi_status status;
+-
+-	status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+-					    ACPI_TYPE_PACKAGE);
+-	if (ACPI_FAILURE(status))
+-		return false;
+-
+-	if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+-					parent))
+-		return true;
+-
+-	ACPI_FREE(buf.pointer);
+-	return false;
+-}
+-
+ static bool acpi_nondev_subnode_ok(acpi_handle scope,
+ 				   const union acpi_object *link,
+ 				   struct list_head *list,
+ 				   struct fwnode_handle *parent)
+ {
++	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ 	acpi_handle handle;
+ 	acpi_status status;
+ 
++	/*
++	 * If the scope is unknown, the _DSD-equivalent package being parsed
++	 * was embedded in an outer _DSD-equivalent package as a result of
++	 * direct evaluation of an object pointed to by a reference.  In that
++	 * case, using a pathname as the target object pointer is invalid.
++	 */
+ 	if (!scope)
+ 		return false;
+ 
+@@ -169,7 +162,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
+ 	if (ACPI_FAILURE(status))
+ 		return false;
+ 
+-	return acpi_nondev_subnode_data_ok(handle, link, list, parent);
++	status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
++					    ACPI_TYPE_PACKAGE);
++	if (ACPI_FAILURE(status))
++		return false;
++
++	if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
++					parent))
++		return true;
++
++	ACPI_FREE(buf.pointer);
++	return false;
+ }
+ 
+ static bool acpi_add_nondev_subnodes(acpi_handle scope,
+@@ -180,9 +183,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ 	bool ret = false;
+ 	int i;
+ 
++	/*
++	 * Every element in the links package is expected to represent a link
++	 * to a non-device node in a tree containing device-specific data.
++	 */
+ 	for (i = 0; i < links->package.count; i++) {
+ 		union acpi_object *link, *desc;
+-		acpi_handle handle;
+ 		bool result;
+ 
+ 		link = &links->package.elements[i];
+@@ -190,26 +196,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ 		if (link->package.count != 2)
+ 			continue;
+ 
+-		/* The first one must be a string. */
++		/* The first one (the key) must be a string. */
+ 		if (link->package.elements[0].type != ACPI_TYPE_STRING)
+ 			continue;
+ 
+-		/* The second one may be a string, a reference or a package. */
++		/* The second one (the target) may be a string or a package. */
+ 		switch (link->package.elements[1].type) {
+ 		case ACPI_TYPE_STRING:
++			/*
++			 * The string is expected to be a full pathname or a
++			 * pathname segment relative to the given scope.  That
++			 * pathname is expected to point to an object returning
++			 * a package that contains _DSD-equivalent information.
++			 */
+ 			result = acpi_nondev_subnode_ok(scope, link, list,
+ 							 parent);
+ 			break;
+-		case ACPI_TYPE_LOCAL_REFERENCE:
+-			handle = link->package.elements[1].reference.handle;
+-			result = acpi_nondev_subnode_data_ok(handle, link, list,
+-							     parent);
+-			break;
+ 		case ACPI_TYPE_PACKAGE:
++			/*
++			 * This happens when a reference is used in AML to
++			 * point to the target.  Since the target is expected
++			 * to be a named object, a reference to it will cause it
++			 * to be avaluated in place and its return package will
++			 * be embedded in the links package at the location of
++			 * the reference.
++			 *
++			 * The target package is expected to contain _DSD-
++			 * equivalent information, but the scope in which it
++			 * is located in the original AML is unknown.  Thus
++			 * it cannot contain pathname segments represented as
++			 * strings because there is no way to build full
++			 * pathnames out of them.
++			 */
++			acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
++					  link->package.elements[0].string.pointer);
+ 			desc = &link->package.elements[1];
+ 			result = acpi_nondev_subnode_extract(desc, NULL, link,
+ 							     list, parent);
+ 			break;
++		case ACPI_TYPE_LOCAL_REFERENCE:
++			/*
++			 * It is not expected to see any local references in
++			 * the links package because referencing a named object
++			 * should cause it to be evaluated in place.
++			 */
++			acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
++					 link->package.elements[0].string.pointer);
++			fallthrough;
+ 		default:
+ 			result = false;
+ 			break;
+@@ -369,6 +402,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
+ 	struct acpi_data_node *dn;
+ 
+ 	list_for_each_entry(dn, &data->subnodes, sibling) {
++		if (!dn->handle)
++			continue;
++
+ 		acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
+ 
+ 		acpi_untie_nondev_subnodes(&dn->data);
+@@ -383,6 +419,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
+ 		acpi_status status;
+ 		bool ret;
+ 
++		if (!dn->handle)
++			continue;
++
+ 		status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
+ 		if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ 			acpi_handle_err(dn->handle, "Can't tag data node\n");
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index db9b5164cccaf3..dd411719220198 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -536,8 +536,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
+ 		return -EBADF;
+ 
+ 	error = loop_check_backing_file(file);
+-	if (error)
++	if (error) {
++		fput(file);
+ 		return error;
++	}
+ 
+ 	/* suppress uevents while reconfiguring the device */
+ 	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+@@ -973,8 +975,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ 		return -EBADF;
+ 
+ 	error = loop_check_backing_file(file);
+-	if (error)
++	if (error) {
++		fput(file);
+ 		return error;
++	}
+ 
+ 	is_loop = is_loop_device(file);
+ 
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index b3eafcf2a2c50d..cdea24e9291959 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ {
+ 	struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+-	size_t tr_len, read_offset, write_offset;
++	size_t tr_len, read_offset;
+ 	struct mhi_ep_buf_info buf_info = {};
+ 	u32 len = MHI_EP_DEFAULT_MTU;
+ 	struct mhi_ring_element *el;
+-	bool tr_done = false;
+ 	void *buf_addr;
+-	u32 buf_left;
+ 	int ret;
+ 
+-	buf_left = len;
+-
+ 	do {
+ 		/* Don't process the transfer ring if the channel is not in RUNNING state */
+ 		if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+@@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ 		/* Check if there is data pending to be read from previous read operation */
+ 		if (mhi_chan->tre_bytes_left) {
+ 			dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
+-			tr_len = min(buf_left, mhi_chan->tre_bytes_left);
++			tr_len = min(len, mhi_chan->tre_bytes_left);
+ 		} else {
+ 			mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
+ 			mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
+ 			mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+ 
+-			tr_len = min(buf_left, mhi_chan->tre_size);
++			tr_len = min(len, mhi_chan->tre_size);
+ 		}
+ 
+ 		read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+-		write_offset = len - buf_left;
+ 
+ 		buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
+ 		if (!buf_addr)
+ 			return -ENOMEM;
+ 
+ 		buf_info.host_addr = mhi_chan->tre_loc + read_offset;
+-		buf_info.dev_addr = buf_addr + write_offset;
++		buf_info.dev_addr = buf_addr;
+ 		buf_info.size = tr_len;
+ 		buf_info.cb = mhi_ep_read_completion;
+ 		buf_info.cb_buf = buf_addr;
+@@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ 			goto err_free_buf_addr;
+ 		}
+ 
+-		buf_left -= tr_len;
+ 		mhi_chan->tre_bytes_left -= tr_len;
+ 
+-		if (!mhi_chan->tre_bytes_left) {
+-			if (MHI_TRE_DATA_GET_IEOT(el))
+-				tr_done = true;
+-
++		if (!mhi_chan->tre_bytes_left)
+ 			mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
+-		}
+-	} while (buf_left && !tr_done);
++	/* Read until the some buffer is left or the ring becomes not empty */
++	} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+ 
+ 	return 0;
+ 
+@@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
+ 		mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ 	} else {
+ 		/* UL channel */
+-		do {
+-			ret = mhi_ep_read_channel(mhi_cntrl, ring);
+-			if (ret < 0) {
+-				dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+-				return ret;
+-			}
+-
+-			/* Read until the ring becomes empty */
+-		} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
++		ret = mhi_ep_read_channel(mhi_cntrl, ring);
++		if (ret < 0) {
++			dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
++			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
+index a9b1f8beee7bc9..223fc54c45b3e8 100644
+--- a/drivers/bus/mhi/host/init.c
++++ b/drivers/bus/mhi/host/init.c
+@@ -194,7 +194,6 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+ {
+ 	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
+ 	int i, ret;
+ 
+@@ -221,7 +220,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+ 			continue;
+ 
+ 		if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
+-			dev_err(dev, "irq %d not available for event ring\n",
++			dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
+ 				mhi_event->irq);
+ 			ret = -EINVAL;
+ 			goto error_request;
+@@ -232,7 +231,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+ 				  irq_flags,
+ 				  "mhi", mhi_event);
+ 		if (ret) {
+-			dev_err(dev, "Error requesting irq:%d for ev:%d\n",
++			dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
+ 				mhi_cntrl->irq[mhi_event->irq], i);
+ 			goto error_request;
+ 		}
+diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
+index ecfcb50302f6ce..efda90dcf5b3d0 100644
+--- a/drivers/char/ipmi/ipmi_kcs_sm.c
++++ b/drivers/char/ipmi/ipmi_kcs_sm.c
+@@ -122,10 +122,10 @@ struct si_sm_data {
+ 	unsigned long  error0_timeout;
+ };
+ 
+-static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
+-				  struct si_sm_io *io, enum kcs_states state)
++static unsigned int init_kcs_data(struct si_sm_data *kcs,
++				  struct si_sm_io *io)
+ {
+-	kcs->state = state;
++	kcs->state = KCS_IDLE;
+ 	kcs->io = io;
+ 	kcs->write_pos = 0;
+ 	kcs->write_count = 0;
+@@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
+ 	return 2;
+ }
+ 
+-static unsigned int init_kcs_data(struct si_sm_data *kcs,
+-				  struct si_sm_io *io)
+-{
+-	return init_kcs_data_with_state(kcs, io, KCS_IDLE);
+-}
+-
+ static inline unsigned char read_status(struct si_sm_data *kcs)
+ {
+ 	return kcs->io->inputb(kcs->io, 1);
+@@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
+ 	if (size > MAX_KCS_WRITE_SIZE)
+ 		return IPMI_REQ_LEN_EXCEEDED_ERR;
+ 
+-	if (kcs->state != KCS_IDLE) {
++	if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
+ 		dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
+ 		return IPMI_NOT_IN_MY_STATE_ERR;
+ 	}
+@@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
+ 	}
+ 
+ 	if (kcs->state == KCS_HOSED) {
+-		init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
++		init_kcs_data(kcs, kcs->io);
+ 		return SI_SM_HOSED;
+ 	}
+ 
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 09405668ebb378..99fe0132197120 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -39,7 +39,9 @@
+ 
+ #define IPMI_DRIVER_VERSION "39.2"
+ 
+-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
++static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
++static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
++				   struct ipmi_user *user);
+ static int ipmi_init_msghandler(void);
+ static void smi_recv_work(struct work_struct *t);
+ static void handle_new_recv_msgs(struct ipmi_smi *intf);
+@@ -939,13 +941,11 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+ 		 * risk.  At this moment, simply skip it in that case.
+ 		 */
+ 		ipmi_free_recv_msg(msg);
+-		atomic_dec(&msg->user->nr_msgs);
+ 	} else {
+ 		int index;
+ 		struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
+ 
+ 		if (user) {
+-			atomic_dec(&user->nr_msgs);
+ 			user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ 			release_ipmi_user(user, index);
+ 		} else {
+@@ -1634,8 +1634,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
+ 		spin_unlock_irqrestore(&intf->events_lock, flags);
+ 
+ 		list_for_each_entry_safe(msg, msg2, &msgs, link) {
+-			msg->user = user;
+-			kref_get(&user->refcount);
++			ipmi_set_recv_msg_user(msg, user);
+ 			deliver_local_response(intf, msg);
+ 		}
+ 
+@@ -2309,22 +2308,18 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 	struct ipmi_recv_msg *recv_msg;
+ 	int rv = 0;
+ 
+-	if (user) {
+-		if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+-			/* Decrement will happen at the end of the routine. */
+-			rv = -EBUSY;
+-			goto out;
+-		}
+-	}
+-
+-	if (supplied_recv)
++	if (supplied_recv) {
+ 		recv_msg = supplied_recv;
+-	else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (recv_msg == NULL) {
+-			rv = -ENOMEM;
+-			goto out;
++		recv_msg->user = user;
++		if (user) {
++			atomic_inc(&user->nr_msgs);
++			/* The put happens when the message is freed. */
++			kref_get(&user->refcount);
+ 		}
++	} else {
++		recv_msg = ipmi_alloc_recv_msg(user);
++		if (IS_ERR(recv_msg))
++			return PTR_ERR(recv_msg);
+ 	}
+ 	recv_msg->user_msg_data = user_msg_data;
+ 
+@@ -2335,8 +2330,7 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 		if (smi_msg == NULL) {
+ 			if (!supplied_recv)
+ 				ipmi_free_recv_msg(recv_msg);
+-			rv = -ENOMEM;
+-			goto out;
++			return -ENOMEM;
+ 		}
+ 	}
+ 
+@@ -2346,10 +2340,6 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 		goto out_err;
+ 	}
+ 
+-	recv_msg->user = user;
+-	if (user)
+-		/* The put happens when the message is freed. */
+-		kref_get(&user->refcount);
+ 	recv_msg->msgid = msgid;
+ 	/*
+ 	 * Store the message to send in the receive message so timeout
+@@ -2378,8 +2368,10 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 
+ 	if (rv) {
+ out_err:
+-		ipmi_free_smi_msg(smi_msg);
+-		ipmi_free_recv_msg(recv_msg);
++		if (!supplied_smi)
++			ipmi_free_smi_msg(smi_msg);
++		if (!supplied_recv)
++			ipmi_free_recv_msg(recv_msg);
+ 	} else {
+ 		dev_dbg(intf->si_dev, "Send: %*ph\n",
+ 			smi_msg->data_size, smi_msg->data);
+@@ -2388,9 +2380,6 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 	}
+ 	rcu_read_unlock();
+ 
+-out:
+-	if (rv && user)
+-		atomic_dec(&user->nr_msgs);
+ 	return rv;
+ }
+ 
+@@ -3882,7 +3871,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+ 	unsigned char            chan;
+ 	struct ipmi_user         *user = NULL;
+ 	struct ipmi_ipmb_addr    *ipmb_addr;
+-	struct ipmi_recv_msg     *recv_msg;
++	struct ipmi_recv_msg     *recv_msg = NULL;
+ 
+ 	if (msg->rsp_size < 10) {
+ 		/* Message not big enough, just ignore it. */
+@@ -3903,9 +3892,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+ 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ 	if (rcvr) {
+ 		user = rcvr->user;
+-		kref_get(&user->refcount);
+-	} else
+-		user = NULL;
++		recv_msg = ipmi_alloc_recv_msg(user);
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (user == NULL) {
+@@ -3940,47 +3928,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+ 			rv = -1;
+ 		}
+ 		rcu_read_unlock();
+-	} else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
+-			/*
+-			 * We couldn't allocate memory for the
+-			 * message, so requeue it for handling
+-			 * later.
+-			 */
+-			rv = 1;
+-			kref_put(&user->refcount, free_user);
+-		} else {
+-			/* Extract the source address from the data. */
+-			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+-			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+-			ipmb_addr->slave_addr = msg->rsp[6];
+-			ipmb_addr->lun = msg->rsp[7] & 3;
+-			ipmb_addr->channel = msg->rsp[3] & 0xf;
++	} else if (!IS_ERR(recv_msg)) {
++		/* Extract the source address from the data. */
++		ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
++		ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
++		ipmb_addr->slave_addr = msg->rsp[6];
++		ipmb_addr->lun = msg->rsp[7] & 3;
++		ipmb_addr->channel = msg->rsp[3] & 0xf;
+ 
+-			/*
+-			 * Extract the rest of the message information
+-			 * from the IPMB header.
+-			 */
+-			recv_msg->user = user;
+-			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-			recv_msg->msgid = msg->rsp[7] >> 2;
+-			recv_msg->msg.netfn = msg->rsp[4] >> 2;
+-			recv_msg->msg.cmd = msg->rsp[8];
+-			recv_msg->msg.data = recv_msg->msg_data;
++		/*
++		 * Extract the rest of the message information
++		 * from the IPMB header.
++		 */
++		recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++		recv_msg->msgid = msg->rsp[7] >> 2;
++		recv_msg->msg.netfn = msg->rsp[4] >> 2;
++		recv_msg->msg.cmd = msg->rsp[8];
++		recv_msg->msg.data = recv_msg->msg_data;
+ 
+-			/*
+-			 * We chop off 10, not 9 bytes because the checksum
+-			 * at the end also needs to be removed.
+-			 */
+-			recv_msg->msg.data_len = msg->rsp_size - 10;
+-			memcpy(recv_msg->msg_data, &msg->rsp[9],
+-			       msg->rsp_size - 10);
+-			if (deliver_response(intf, recv_msg))
+-				ipmi_inc_stat(intf, unhandled_commands);
+-			else
+-				ipmi_inc_stat(intf, handled_commands);
+-		}
++		/*
++		 * We chop off 10, not 9 bytes because the checksum
++		 * at the end also needs to be removed.
++		 */
++		recv_msg->msg.data_len = msg->rsp_size - 10;
++		memcpy(recv_msg->msg_data, &msg->rsp[9],
++		       msg->rsp_size - 10);
++		if (deliver_response(intf, recv_msg))
++			ipmi_inc_stat(intf, unhandled_commands);
++		else
++			ipmi_inc_stat(intf, handled_commands);
++	} else {
++		/*
++		 * We couldn't allocate memory for the message, so
++		 * requeue it for handling later.
++		 */
++		rv = 1;
+ 	}
+ 
+ 	return rv;
+@@ -3993,7 +3975,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ 	int                      rv = 0;
+ 	struct ipmi_user         *user = NULL;
+ 	struct ipmi_ipmb_direct_addr *daddr;
+-	struct ipmi_recv_msg     *recv_msg;
++	struct ipmi_recv_msg     *recv_msg = NULL;
+ 	unsigned char netfn = msg->rsp[0] >> 2;
+ 	unsigned char cmd = msg->rsp[3];
+ 
+@@ -4002,9 +3984,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ 	rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+ 	if (rcvr) {
+ 		user = rcvr->user;
+-		kref_get(&user->refcount);
+-	} else
+-		user = NULL;
++		recv_msg = ipmi_alloc_recv_msg(user);
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (user == NULL) {
+@@ -4031,44 +4012,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ 			rv = -1;
+ 		}
+ 		rcu_read_unlock();
+-	} else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
+-			/*
+-			 * We couldn't allocate memory for the
+-			 * message, so requeue it for handling
+-			 * later.
+-			 */
+-			rv = 1;
+-			kref_put(&user->refcount, free_user);
+-		} else {
+-			/* Extract the source address from the data. */
+-			daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+-			daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+-			daddr->channel = 0;
+-			daddr->slave_addr = msg->rsp[1];
+-			daddr->rs_lun = msg->rsp[0] & 3;
+-			daddr->rq_lun = msg->rsp[2] & 3;
++	} else if (!IS_ERR(recv_msg)) {
++		/* Extract the source address from the data. */
++		daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
++		daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
++		daddr->channel = 0;
++		daddr->slave_addr = msg->rsp[1];
++		daddr->rs_lun = msg->rsp[0] & 3;
++		daddr->rq_lun = msg->rsp[2] & 3;
+ 
+-			/*
+-			 * Extract the rest of the message information
+-			 * from the IPMB header.
+-			 */
+-			recv_msg->user = user;
+-			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-			recv_msg->msgid = (msg->rsp[2] >> 2);
+-			recv_msg->msg.netfn = msg->rsp[0] >> 2;
+-			recv_msg->msg.cmd = msg->rsp[3];
+-			recv_msg->msg.data = recv_msg->msg_data;
+-
+-			recv_msg->msg.data_len = msg->rsp_size - 4;
+-			memcpy(recv_msg->msg_data, msg->rsp + 4,
+-			       msg->rsp_size - 4);
+-			if (deliver_response(intf, recv_msg))
+-				ipmi_inc_stat(intf, unhandled_commands);
+-			else
+-				ipmi_inc_stat(intf, handled_commands);
+-		}
++		/*
++		 * Extract the rest of the message information
++		 * from the IPMB header.
++		 */
++		recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++		recv_msg->msgid = (msg->rsp[2] >> 2);
++		recv_msg->msg.netfn = msg->rsp[0] >> 2;
++		recv_msg->msg.cmd = msg->rsp[3];
++		recv_msg->msg.data = recv_msg->msg_data;
++
++		recv_msg->msg.data_len = msg->rsp_size - 4;
++		memcpy(recv_msg->msg_data, msg->rsp + 4,
++		       msg->rsp_size - 4);
++		if (deliver_response(intf, recv_msg))
++			ipmi_inc_stat(intf, unhandled_commands);
++		else
++			ipmi_inc_stat(intf, handled_commands);
++	} else {
++		/*
++		 * We couldn't allocate memory for the message, so
++		 * requeue it for handling later.
++		 */
++		rv = 1;
+ 	}
+ 
+ 	return rv;
+@@ -4182,7 +4157,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+ 	unsigned char            chan;
+ 	struct ipmi_user         *user = NULL;
+ 	struct ipmi_lan_addr     *lan_addr;
+-	struct ipmi_recv_msg     *recv_msg;
++	struct ipmi_recv_msg     *recv_msg = NULL;
+ 
+ 	if (msg->rsp_size < 12) {
+ 		/* Message not big enough, just ignore it. */
+@@ -4203,9 +4178,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+ 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ 	if (rcvr) {
+ 		user = rcvr->user;
+-		kref_get(&user->refcount);
+-	} else
+-		user = NULL;
++		recv_msg = ipmi_alloc_recv_msg(user);
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (user == NULL) {
+@@ -4217,49 +4191,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+ 		 * them to be freed.
+ 		 */
+ 		rv = 0;
+-	} else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
+-			/*
+-			 * We couldn't allocate memory for the
+-			 * message, so requeue it for handling later.
+-			 */
+-			rv = 1;
+-			kref_put(&user->refcount, free_user);
+-		} else {
+-			/* Extract the source address from the data. */
+-			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+-			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+-			lan_addr->session_handle = msg->rsp[4];
+-			lan_addr->remote_SWID = msg->rsp[8];
+-			lan_addr->local_SWID = msg->rsp[5];
+-			lan_addr->lun = msg->rsp[9] & 3;
+-			lan_addr->channel = msg->rsp[3] & 0xf;
+-			lan_addr->privilege = msg->rsp[3] >> 4;
++	} else if (!IS_ERR(recv_msg)) {
++		/* Extract the source address from the data. */
++		lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
++		lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
++		lan_addr->session_handle = msg->rsp[4];
++		lan_addr->remote_SWID = msg->rsp[8];
++		lan_addr->local_SWID = msg->rsp[5];
++		lan_addr->lun = msg->rsp[9] & 3;
++		lan_addr->channel = msg->rsp[3] & 0xf;
++		lan_addr->privilege = msg->rsp[3] >> 4;
+ 
+-			/*
+-			 * Extract the rest of the message information
+-			 * from the IPMB header.
+-			 */
+-			recv_msg->user = user;
+-			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-			recv_msg->msgid = msg->rsp[9] >> 2;
+-			recv_msg->msg.netfn = msg->rsp[6] >> 2;
+-			recv_msg->msg.cmd = msg->rsp[10];
+-			recv_msg->msg.data = recv_msg->msg_data;
++		/*
++		 * Extract the rest of the message information
++		 * from the IPMB header.
++		 */
++		recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++		recv_msg->msgid = msg->rsp[9] >> 2;
++		recv_msg->msg.netfn = msg->rsp[6] >> 2;
++		recv_msg->msg.cmd = msg->rsp[10];
++		recv_msg->msg.data = recv_msg->msg_data;
+ 
+-			/*
+-			 * We chop off 12, not 11 bytes because the checksum
+-			 * at the end also needs to be removed.
+-			 */
+-			recv_msg->msg.data_len = msg->rsp_size - 12;
+-			memcpy(recv_msg->msg_data, &msg->rsp[11],
+-			       msg->rsp_size - 12);
+-			if (deliver_response(intf, recv_msg))
+-				ipmi_inc_stat(intf, unhandled_commands);
+-			else
+-				ipmi_inc_stat(intf, handled_commands);
+-		}
++		/*
++		 * We chop off 12, not 11 bytes because the checksum
++		 * at the end also needs to be removed.
++		 */
++		recv_msg->msg.data_len = msg->rsp_size - 12;
++		memcpy(recv_msg->msg_data, &msg->rsp[11],
++		       msg->rsp_size - 12);
++		if (deliver_response(intf, recv_msg))
++			ipmi_inc_stat(intf, unhandled_commands);
++		else
++			ipmi_inc_stat(intf, handled_commands);
++	} else {
++		/*
++		 * We couldn't allocate memory for the message, so
++		 * requeue it for handling later.
++		 */
++		rv = 1;
+ 	}
+ 
+ 	return rv;
+@@ -4281,7 +4250,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+ 	unsigned char         chan;
+ 	struct ipmi_user *user = NULL;
+ 	struct ipmi_system_interface_addr *smi_addr;
+-	struct ipmi_recv_msg  *recv_msg;
++	struct ipmi_recv_msg  *recv_msg = NULL;
+ 
+ 	/*
+ 	 * We expect the OEM SW to perform error checking
+@@ -4310,9 +4279,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+ 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ 	if (rcvr) {
+ 		user = rcvr->user;
+-		kref_get(&user->refcount);
+-	} else
+-		user = NULL;
++		recv_msg = ipmi_alloc_recv_msg(user);
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (user == NULL) {
+@@ -4325,48 +4293,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+ 		 */
+ 
+ 		rv = 0;
+-	} else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
+-			/*
+-			 * We couldn't allocate memory for the
+-			 * message, so requeue it for handling
+-			 * later.
+-			 */
+-			rv = 1;
+-			kref_put(&user->refcount, free_user);
+-		} else {
+-			/*
+-			 * OEM Messages are expected to be delivered via
+-			 * the system interface to SMS software.  We might
+-			 * need to visit this again depending on OEM
+-			 * requirements
+-			 */
+-			smi_addr = ((struct ipmi_system_interface_addr *)
+-				    &recv_msg->addr);
+-			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+-			smi_addr->channel = IPMI_BMC_CHANNEL;
+-			smi_addr->lun = msg->rsp[0] & 3;
+-
+-			recv_msg->user = user;
+-			recv_msg->user_msg_data = NULL;
+-			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+-			recv_msg->msg.netfn = msg->rsp[0] >> 2;
+-			recv_msg->msg.cmd = msg->rsp[1];
+-			recv_msg->msg.data = recv_msg->msg_data;
++	} else if (!IS_ERR(recv_msg)) {
++		/*
++		 * OEM Messages are expected to be delivered via
++		 * the system interface to SMS software.  We might
++		 * need to visit this again depending on OEM
++		 * requirements
++		 */
++		smi_addr = ((struct ipmi_system_interface_addr *)
++			    &recv_msg->addr);
++		smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
++		smi_addr->channel = IPMI_BMC_CHANNEL;
++		smi_addr->lun = msg->rsp[0] & 3;
++
++		recv_msg->user_msg_data = NULL;
++		recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
++		recv_msg->msg.netfn = msg->rsp[0] >> 2;
++		recv_msg->msg.cmd = msg->rsp[1];
++		recv_msg->msg.data = recv_msg->msg_data;
+ 
+-			/*
+-			 * The message starts at byte 4 which follows the
+-			 * Channel Byte in the "GET MESSAGE" command
+-			 */
+-			recv_msg->msg.data_len = msg->rsp_size - 4;
+-			memcpy(recv_msg->msg_data, &msg->rsp[4],
+-			       msg->rsp_size - 4);
+-			if (deliver_response(intf, recv_msg))
+-				ipmi_inc_stat(intf, unhandled_commands);
+-			else
+-				ipmi_inc_stat(intf, handled_commands);
+-		}
++		/*
++		 * The message starts at byte 4 which follows the
++		 * Channel Byte in the "GET MESSAGE" command
++		 */
++		recv_msg->msg.data_len = msg->rsp_size - 4;
++		memcpy(recv_msg->msg_data, &msg->rsp[4],
++		       msg->rsp_size - 4);
++		if (deliver_response(intf, recv_msg))
++			ipmi_inc_stat(intf, unhandled_commands);
++		else
++			ipmi_inc_stat(intf, handled_commands);
++	} else {
++		/*
++		 * We couldn't allocate memory for the message, so
++		 * requeue it for handling later.
++		 */
++		rv = 1;
+ 	}
+ 
+ 	return rv;
+@@ -4425,8 +4387,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
+ 		if (!user->gets_events)
+ 			continue;
+ 
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
++		recv_msg = ipmi_alloc_recv_msg(user);
++		if (IS_ERR(recv_msg)) {
+ 			rcu_read_unlock();
+ 			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+ 						 link) {
+@@ -4445,8 +4407,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
+ 		deliver_count++;
+ 
+ 		copy_event_into_recv_msg(recv_msg, msg);
+-		recv_msg->user = user;
+-		kref_get(&user->refcount);
+ 		list_add_tail(&recv_msg->link, &msgs);
+ 	}
+ 	srcu_read_unlock(&intf->users_srcu, index);
+@@ -4462,8 +4422,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
+ 		 * No one to receive the message, put it in queue if there's
+ 		 * not already too many things in the queue.
+ 		 */
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
++		recv_msg = ipmi_alloc_recv_msg(NULL);
++		if (IS_ERR(recv_msg)) {
+ 			/*
+ 			 * We couldn't allocate memory for the
+ 			 * message, so requeue it for handling
+@@ -5155,27 +5115,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
+ 		kfree(msg);
+ }
+ 
+-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
++static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
+ {
+ 	struct ipmi_recv_msg *rv;
+ 
++	if (user) {
++		if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
++			atomic_dec(&user->nr_msgs);
++			return ERR_PTR(-EBUSY);
++		}
++	}
++
+ 	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+-	if (rv) {
+-		rv->user = NULL;
+-		rv->done = free_recv_msg;
+-		atomic_inc(&recv_msg_inuse_count);
++	if (!rv) {
++		if (user)
++			atomic_dec(&user->nr_msgs);
++		return ERR_PTR(-ENOMEM);
+ 	}
++
++	rv->user = user;
++	rv->done = free_recv_msg;
++	if (user)
++		kref_get(&user->refcount);
++	atomic_inc(&recv_msg_inuse_count);
+ 	return rv;
+ }
+ 
+ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+ {
+-	if (msg->user && !oops_in_progress)
++	if (msg->user && !oops_in_progress) {
++		atomic_dec(&msg->user->nr_msgs);
+ 		kref_put(&msg->user->refcount, free_user);
++	}
+ 	msg->done(msg);
+ }
+ EXPORT_SYMBOL(ipmi_free_recv_msg);
+ 
++static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
++				   struct ipmi_user *user)
++{
++	WARN_ON_ONCE(msg->user); /* User should not be set. */
++	msg->user = user;
++	atomic_inc(&user->nr_msgs);
++	kref_get(&user->refcount);
++}
++
+ static atomic_t panic_done_count = ATOMIC_INIT(0);
+ 
+ static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index ed0d3d8449b306..59e992dc65c4cd 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -977,8 +977,8 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ 	 * will call disable_irq which undoes all of the above.
+ 	 */
+ 	if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+-		tpm_tis_write8(priv, original_int_vec,
+-			       TPM_INT_VECTOR(priv->locality));
++		tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality),
++			       original_int_vec);
+ 		rc = -1;
+ 	}
+ 
+diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
+index c173a44c800aa8..629f050a855aae 100644
+--- a/drivers/clk/at91/clk-peripheral.c
++++ b/drivers/clk/at91/clk-peripheral.c
+@@ -279,8 +279,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
+ 	long best_diff = LONG_MIN;
+ 	u32 shift;
+ 
+-	if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
+-		return parent_rate;
++	if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
++		req->rate = parent_rate;
++
++		return 0;
++	}
+ 
+ 	/* Fist step: check the available dividers. */
+ 	for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
+diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+index bb648a88e43afd..ad47fdb2346075 100644
+--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
++++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+@@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = {
+ 	GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
+ 	GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
+ 	/* INFRA_AO1 */
+-	GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
++	GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0),
+ 	GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
+ 	GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
+ 	GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
+diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
+index 60990296450bbb..9a12e58230bed8 100644
+--- a/drivers/clk/mediatek/clk-mux.c
++++ b/drivers/clk/mediatek/clk-mux.c
+@@ -146,9 +146,7 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
+ static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
+ 				      struct clk_rate_request *req)
+ {
+-	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+-
+-	return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
++	return clk_mux_determine_rate_flags(hw, req, 0);
+ }
+ 
+ const struct clk_ops mtk_mux_clr_set_upd_ops = {
+diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
+index 81efa885069b2a..b9e204d63a9722 100644
+--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
++++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
+@@ -370,23 +370,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
+ 	return 0;
+ }
+ 
+-static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate,
+-				    unsigned long *prate)
++static int lpc18xx_pll0_determine_rate(struct clk_hw *hw,
++				       struct clk_rate_request *req)
+ {
+ 	unsigned long m;
+ 
+-	if (*prate < rate) {
++	if (req->best_parent_rate < req->rate) {
+ 		pr_warn("%s: pll dividers not supported\n", __func__);
+ 		return -EINVAL;
+ 	}
+ 
+-	m = DIV_ROUND_UP_ULL(*prate, rate * 2);
+-	if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
+-		pr_warn("%s: unable to support rate %lu\n", __func__, rate);
++	m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2);
++	if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
++		pr_warn("%s: unable to support rate %lu\n", __func__, req->rate);
+ 		return -EINVAL;
+ 	}
+ 
+-	return 2 * *prate * m;
++	req->rate = 2 * req->best_parent_rate * m;
++
++	return 0;
+ }
+ 
+ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
+@@ -402,7 +404,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	}
+ 
+ 	m = DIV_ROUND_UP_ULL(parent_rate, rate * 2);
+-	if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
++	if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
+ 		pr_warn("%s: unable to support rate %lu\n", __func__, rate);
+ 		return -EINVAL;
+ 	}
+@@ -443,7 +445,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ static const struct clk_ops lpc18xx_pll0_ops = {
+ 	.recalc_rate	= lpc18xx_pll0_recalc_rate,
+-	.round_rate	= lpc18xx_pll0_round_rate,
++	.determine_rate = lpc18xx_pll0_determine_rate,
+ 	.set_rate	= lpc18xx_pll0_set_rate,
+ };
+ 
+diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
+index 33cc1f73c69d1f..cab2dbb7a8d499 100644
+--- a/drivers/clk/qcom/common.c
++++ b/drivers/clk/qcom/common.c
+@@ -273,8 +273,8 @@ static int qcom_cc_icc_register(struct device *dev,
+ 		icd[i].slave_id = desc->icc_hws[i].slave_id;
+ 		hws = &desc->clks[desc->icc_hws[i].clk_id]->hw;
+ 		icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc");
+-		if (!icd[i].clk)
+-			return dev_err_probe(dev, -ENOENT,
++		if (IS_ERR(icd[i].clk))
++			return dev_err_probe(dev, PTR_ERR(icd[i].clk),
+ 					     "(%d) clock entry is null\n", i);
+ 		icd[i].name = clk_hw_get_name(hws);
+ 	}
+diff --git a/drivers/clk/qcom/tcsrcc-x1e80100.c b/drivers/clk/qcom/tcsrcc-x1e80100.c
+index ff61769a08077e..a367e1f55622d9 100644
+--- a/drivers/clk/qcom/tcsrcc-x1e80100.c
++++ b/drivers/clk/qcom/tcsrcc-x1e80100.c
+@@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref_en = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(const struct clk_init_data) {
+ 			.name = "tcsr_edp_clkref_en",
++			.parent_data = &(const struct clk_parent_data){
++				.index = DT_BI_TCXO_PAD,
++			},
++			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 0f27c33192e10d..112ed81f648eec 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -1012,6 +1012,7 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
+ 
+ 		of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
+ 			int idx;
++			unsigned int *new_ids;
+ 
+ 			if (it.node != priv->np)
+ 				continue;
+@@ -1022,11 +1023,13 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
+ 			if (args[0] != CPG_MOD)
+ 				continue;
+ 
+-			ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
+-			if (!ids) {
++			new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
++			if (!new_ids) {
+ 				of_node_put(it.node);
++				kfree(ids);
+ 				return -ENOMEM;
+ 			}
++			ids = new_ids;
+ 
+ 			if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ 				idx = MOD_CLK_PACK_10(args[1]);	/* for DEF_MOD_STB() */
+diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
+index 7bfba0afd77831..4ec408c3a26aa4 100644
+--- a/drivers/clk/tegra/clk-bpmp.c
++++ b/drivers/clk/tegra/clk-bpmp.c
+@@ -635,7 +635,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp,
+ 
+ 	bpmp->num_clocks = count;
+ 
+-	bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL);
++	bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL);
+ 	if (!bpmp->clocks)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
+index e95fdc49c2269c..bbceb0289d457a 100644
+--- a/drivers/clocksource/clps711x-timer.c
++++ b/drivers/clocksource/clps711x-timer.c
+@@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np)
+ 	unsigned int irq = irq_of_parse_and_map(np, 0);
+ 	struct clk *clock = of_clk_get(np, 0);
+ 	void __iomem *base = of_iomap(np, 0);
++	int ret = 0;
+ 
+ 	if (!base)
+ 		return -ENOMEM;
+-	if (!irq)
+-		return -EINVAL;
+-	if (IS_ERR(clock))
+-		return PTR_ERR(clock);
++	if (!irq) {
++		ret = -EINVAL;
++		goto unmap_io;
++	}
++	if (IS_ERR(clock)) {
++		ret = PTR_ERR(clock);
++		goto unmap_io;
++	}
+ 
+ 	switch (of_alias_get_id(np, "timer")) {
+ 	case CLPS711X_CLKSRC_CLOCKSOURCE:
+ 		clps711x_clksrc_init(clock, base);
+ 		break;
+ 	case CLPS711X_CLKSRC_CLOCKEVENT:
+-		return _clps711x_clkevt_init(clock, base, irq);
++		ret =  _clps711x_clkevt_init(clock, base, irq);
++		break;
+ 	default:
+-		return -EINVAL;
++		ret = -EINVAL;
++		break;
+ 	}
+ 
+-	return 0;
++unmap_io:
++	iounmap(base);
++	return ret;
+ }
+ TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
+diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
+index 983443396f8f22..30e1b64d0558f2 100644
+--- a/drivers/cpufreq/cpufreq-dt.c
++++ b/drivers/cpufreq/cpufreq-dt.c
+@@ -110,7 +110,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
+ 
+ 	transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ 	if (!transition_latency)
+-		transition_latency = CPUFREQ_ETERNAL;
++		transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+ 
+ 	cpumask_copy(policy->cpus, priv->cpus);
+ 	policy->driver_data = priv;
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index c20d3ecc5a81ea..33c1df7f683e40 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -443,7 +443,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	if (of_property_read_u32(np, "clock-latency", &transition_latency))
+-		transition_latency = CPUFREQ_ETERNAL;
++		transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+ 
+ 	/*
+ 	 * Calculate the ramp time for max voltage change in the
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index e9087109203895..d0f4f7c2ae4d94 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1582,10 +1582,10 @@ static void update_qos_request(enum freq_qos_req_type type)
+ 			continue;
+ 
+ 		req = policy->driver_data;
+-		cpufreq_cpu_put(policy);
+-
+-		if (!req)
++		if (!req) {
++			cpufreq_cpu_put(policy);
+ 			continue;
++		}
+ 
+ 		if (hwp_active)
+ 			intel_pstate_get_hwp_cap(cpu);
+@@ -1601,6 +1601,8 @@ static void update_qos_request(enum freq_qos_req_type type)
+ 
+ 		if (freq_qos_update_request(req, freq) < 0)
+ 			pr_warn("Failed to update freq constraint: CPU%d\n", i);
++
++		cpufreq_cpu_put(policy);
+ 	}
+ }
+ 
+diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
+index aeb5e63045421b..1f1ec43aad7ca3 100644
+--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
++++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
+@@ -238,7 +238,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ 
+ 	latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
+ 	if (!latency)
+-		latency = CPUFREQ_ETERNAL;
++		latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+ 
+ 	policy->cpuinfo.transition_latency = latency;
+ 	policy->fast_switch_possible = true;
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index a2ec1addafc931..bb265541671a58 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -280,7 +280,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
+ 
+ 	latency = perf_ops->transition_latency_get(ph, domain);
+ 	if (!latency)
+-		latency = CPUFREQ_ETERNAL;
++		latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+ 
+ 	policy->cpuinfo.transition_latency = latency;
+ 
+diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
+index a191d9bdf667ad..bdbc3923629e7b 100644
+--- a/drivers/cpufreq/scpi-cpufreq.c
++++ b/drivers/cpufreq/scpi-cpufreq.c
+@@ -157,7 +157,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
+ 
+ 	latency = scpi_ops->get_transition_latency(cpu_dev);
+ 	if (!latency)
+-		latency = CPUFREQ_ETERNAL;
++		latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+ 
+ 	policy->cpuinfo.transition_latency = latency;
+ 
+diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
+index d8ab5b01d46d04..c032dd5d12d3cc 100644
+--- a/drivers/cpufreq/spear-cpufreq.c
++++ b/drivers/cpufreq/spear-cpufreq.c
+@@ -183,7 +183,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
+ 
+ 	if (of_property_read_u32(np, "clock-latency",
+ 				&spear_cpufreq.transition_latency))
+-		spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
++		spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
+ 
+ 	cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
+ 	if (cnt <= 0) {
+diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
+index 7b8fcfa55038bc..39186008afbfdf 100644
+--- a/drivers/cpufreq/tegra186-cpufreq.c
++++ b/drivers/cpufreq/tegra186-cpufreq.c
+@@ -86,10 +86,14 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
+ {
+ 	struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ 	struct cpufreq_frequency_table *tbl = policy->freq_table + index;
+-	unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
++	unsigned int edvd_offset;
+ 	u32 edvd_val = tbl->driver_data;
++	u32 cpu;
+ 
+-	writel(edvd_val, data->regs + edvd_offset);
++	for_each_cpu(cpu, policy->cpus) {
++		edvd_offset = data->cpus[cpu].edvd_offset;
++		writel(edvd_val, data->regs + edvd_offset);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
+index a72dfebc53ffc2..fa201dae1f81b4 100644
+--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
++++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
+@@ -346,7 +346,7 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+ 
+ 	} else {
+ 		dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+-			     DMA_TO_DEVICE);
++			     DMA_FROM_DEVICE);
+ 		dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ 			     DMA_TO_DEVICE);
+ 	}
+diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
+index dcc2380a5889f5..d15b2e943447ef 100644
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
+ 
+ 	if (err && (dd->flags & TDES_FLAGS_FAST)) {
+ 		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+-		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
++		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+index 69d6019d8abcf0..ba26b4db7a6084 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+@@ -252,7 +252,7 @@ static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
+ 	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ 	struct rk_crypto_info *rkc = rctx->dev;
+ 
+-	dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
++	dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ }
+ 
+ static int rk_hash_run(struct crypto_engine *engine, void *breq)
+diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
+index f25a9746249b60..3ab67aaa9e5da9 100644
+--- a/drivers/firmware/meson/meson_sm.c
++++ b/drivers/firmware/meson/meson_sm.c
+@@ -232,11 +232,16 @@ EXPORT_SYMBOL(meson_sm_call_write);
+ struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
+ {
+ 	struct platform_device *pdev = of_find_device_by_node(sm_node);
++	struct meson_sm_firmware *fw;
+ 
+ 	if (!pdev)
+ 		return NULL;
+ 
+-	return platform_get_drvdata(pdev);
++	fw = platform_get_drvdata(pdev);
++
++	put_device(&pdev->dev);
++
++	return fw;
+ }
+ EXPORT_SYMBOL_GPL(meson_sm_get);
+ 
+diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
+index cfa7b0a50c8e33..03b16b8f639ad0 100644
+--- a/drivers/gpio/gpio-wcd934x.c
++++ b/drivers/gpio/gpio-wcd934x.c
+@@ -102,7 +102,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
+ 	chip->base = -1;
+ 	chip->ngpio = WCD934X_NPINS;
+ 	chip->label = dev_name(dev);
+-	chip->can_sleep = false;
++	chip->can_sleep = true;
+ 
+ 	return devm_gpiochip_add_data(dev, chip, data);
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index b02ff92bae0b1b..ffa0d7483ffc14 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2027,6 +2027,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 
+ 	init_data.flags.disable_ips_in_vpb = 0;
+ 
++	/* DCN35 and above supports dynamic DTBCLK switch */
++	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
++		init_data.flags.allow_0_dtb_clk = true;
++
+ 	/* Enable DWB for tested platforms only */
+ 	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ 		init_data.num_virtual_links = 1;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index 2b1673d69ea83f..1ab5ae9b5ea515 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration(
+ 	REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
+ 
+ 	if (data->taps.h_taps + data->taps.v_taps <= 2) {
+-		/* Set bypass */
+-
+-		/* DCE6 has no SCL_MODE register, skip scale mode programming */
++		/* Disable scaler functionality */
++		REG_WRITE(SCL_SCALER_ENABLE, 0);
+ 
++		/* Clear registers that can cause glitches even when the scaler is off */
++		REG_WRITE(SCL_TAP_CONTROL, 0);
++		REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
++		REG_WRITE(SCL_F_SHARP_CONTROL, 0);
+ 		return false;
+ 	}
+ 
+@@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration(
+ 			SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
+ 			SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
+ 
+-	/* DCE6 has no SCL_MODE register, skip scale mode programming */
++	REG_WRITE(SCL_SCALER_ENABLE, 1);
+ 
+ 	/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
+ 
+@@ -502,6 +505,8 @@ static void dce60_transform_set_scaler(
+ 	REG_SET(DC_LB_MEM_SIZE, 0,
+ 		DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+ 
++	REG_WRITE(SCL_UPDATE, 0x00010000);
++
+ 	/* Clear SCL_F_SHARP_CONTROL value to 0 */
+ 	REG_WRITE(SCL_F_SHARP_CONTROL, 0);
+ 
+@@ -527,8 +532,7 @@ static void dce60_transform_set_scaler(
+ 		if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
+ 			/* 4. Program vertical filters */
+ 			if (xfm_dce->filter_v == NULL)
+-				REG_SET(SCL_VERT_FILTER_CONTROL, 0,
+-						SCL_V_2TAP_HARDCODE_COEF_EN, 0);
++				REG_WRITE(SCL_VERT_FILTER_CONTROL, 0);
+ 			program_multi_taps_filter(
+ 					xfm_dce,
+ 					data->taps.v_taps,
+@@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(
+ 
+ 			/* 5. Program horizontal filters */
+ 			if (xfm_dce->filter_h == NULL)
+-				REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
+-						SCL_H_2TAP_HARDCODE_COEF_EN, 0);
++				REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0);
+ 			program_multi_taps_filter(
+ 					xfm_dce,
+ 					data->taps.h_taps,
+@@ -566,6 +569,8 @@ static void dce60_transform_set_scaler(
+ 	/* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
+ 
+ 	/* DCE6 DATA_FORMAT register does not support ALPHA_EN */
++
++	REG_WRITE(SCL_UPDATE, 0);
+ }
+ #endif
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+index cbce194ec7b82b..eb716e8337e236 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+@@ -155,6 +155,9 @@
+ 	SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
+ 	SRI(VIEWPORT_START, SCL, id), \
+ 	SRI(VIEWPORT_SIZE, SCL, id), \
++	SRI(SCL_SCALER_ENABLE, SCL, id), \
++	SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \
++	SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \
+ 	SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
+ 	SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
+ 	SRI(SCL_VERT_FILTER_INIT, SCL, id), \
+@@ -590,6 +593,7 @@ struct dce_transform_registers {
+ 	uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+ 	uint32_t SCL_HORZ_FILTER_INIT;
+ #if defined(CONFIG_DRM_AMD_DC_SI)
++	uint32_t SCL_SCALER_ENABLE;
+ 	uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;
+ 	uint32_t SCL_HORZ_FILTER_INIT_CHROMA;
+ #endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+index 9de01ae574c035..067eddd9c62d80 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+@@ -4115,6 +4115,7 @@
+ #define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55
+ #define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40
+ #define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41
++#define mmSCL0_SCL_SCALER_ENABLE 0x1B42
+ #define mmSCL0_SCL_CONTROL 0x1B44
+ #define mmSCL0_SCL_DEBUG 0x1B6A
+ #define mmSCL0_SCL_DEBUG2 0x1B69
+@@ -4144,6 +4145,7 @@
+ #define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55
+ #define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40
+ #define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41
++#define mmSCL1_SCL_SCALER_ENABLE 0x1E42
+ #define mmSCL1_SCL_CONTROL 0x1E44
+ #define mmSCL1_SCL_DEBUG 0x1E6A
+ #define mmSCL1_SCL_DEBUG2 0x1E69
+@@ -4173,6 +4175,7 @@
+ #define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
+ #define mmSCL2_SCL_COEF_RAM_SELECT 0x4140
+ #define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141
++#define mmSCL2_SCL_SCALER_ENABLE 0x4142
+ #define mmSCL2_SCL_CONTROL 0x4144
+ #define mmSCL2_SCL_DEBUG 0x416A
+ #define mmSCL2_SCL_DEBUG2 0x4169
+@@ -4202,6 +4205,7 @@
+ #define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455
+ #define mmSCL3_SCL_COEF_RAM_SELECT 0x4440
+ #define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441
++#define mmSCL3_SCL_SCALER_ENABLE 0x4442
+ #define mmSCL3_SCL_CONTROL 0x4444
+ #define mmSCL3_SCL_DEBUG 0x446A
+ #define mmSCL3_SCL_DEBUG2 0x4469
+@@ -4231,6 +4235,7 @@
+ #define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755
+ #define mmSCL4_SCL_COEF_RAM_SELECT 0x4740
+ #define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741
++#define mmSCL4_SCL_SCALER_ENABLE 0x4742
+ #define mmSCL4_SCL_CONTROL 0x4744
+ #define mmSCL4_SCL_DEBUG 0x476A
+ #define mmSCL4_SCL_DEBUG2 0x4769
+@@ -4260,6 +4265,7 @@
+ #define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55
+ #define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40
+ #define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41
++#define mmSCL5_SCL_SCALER_ENABLE 0x4A42
+ #define mmSCL5_SCL_CONTROL 0x4A44
+ #define mmSCL5_SCL_DEBUG 0x4A6A
+ #define mmSCL5_SCL_DEBUG2 0x4A69
+@@ -4287,6 +4293,7 @@
+ #define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55
+ #define mmSCL_COEF_RAM_SELECT 0x1B40
+ #define mmSCL_COEF_RAM_TAP_DATA 0x1B41
++#define mmSCL_SCALER_ENABLE 0x1B42
+ #define mmSCL_CONTROL 0x1B44
+ #define mmSCL_DEBUG 0x1B6A
+ #define mmSCL_DEBUG2 0x1B69
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+index bd8085ec54ed57..da5596fbfdcb31 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+@@ -8648,6 +8648,8 @@
+ #define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000
+ #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L
+ #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000
++#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L
++#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000
+ #define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L
+ #define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000
+ #define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 2016c1e7242fe3..fc433f39c127f6 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -852,7 +852,7 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
+ 		nvif_vmm_put(vmm, &old_mem->vma[1]);
+ 		nvif_vmm_put(vmm, &old_mem->vma[0]);
+ 	}
+-	return 0;
++	return ret;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
+index 03eb7d52209a2f..9360d486e52e5b 100644
+--- a/drivers/gpu/drm/panthor/panthor_drv.c
++++ b/drivers/gpu/drm/panthor/panthor_drv.c
+@@ -1032,14 +1032,15 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
+ 
+ 	ret = group_priority_permit(file, args->priority);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	ret = panthor_group_create(pfile, args, queue_args);
+-	if (ret >= 0) {
+-		args->group_handle = ret;
+-		ret = 0;
+-	}
++	if (ret < 0)
++		goto out;
++	args->group_handle = ret;
++	ret = 0;
+ 
++out:
+ 	kvfree(queue_args);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+index 92f4261305bd9d..f2ae5d17ea601e 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+@@ -576,7 +576,10 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
+ 	udelay(10);
+ 	rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
+ 
+-	ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN;
++	rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK);
++	rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1);
++
++	ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN;
+ 	rcar_mipi_dsi_write(dsi, PPISETR, ppisetr);
+ 
+ 	rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+index a6b276f1d6ee15..a54c7eb4113b93 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+@@ -12,6 +12,9 @@
+ #define LINKSR_LPBUSY			(1 << 1)
+ #define LINKSR_HSBUSY			(1 << 0)
+ 
++#define TXSETR				0x100
++#define TXSETR_LANECNT_MASK		(0x3 << 0)
++
+ /*
+  * Video Mode Register
+  */
+@@ -80,10 +83,7 @@
+  * PHY-Protocol Interface (PPI) Registers
+  */
+ #define PPISETR				0x700
+-#define PPISETR_DLEN_0			(0x1 << 0)
+-#define PPISETR_DLEN_1			(0x3 << 0)
+-#define PPISETR_DLEN_2			(0x7 << 0)
+-#define PPISETR_DLEN_3			(0xf << 0)
++#define PPISETR_DLEN_MASK		(0xf << 0)
+ #define PPISETR_CLEN			(1 << 8)
+ 
+ #define PPICLCR				0x710
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index ea741bc4ac3fc7..8b72848bb25cd9 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1515,6 +1515,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ 		       SVGA3dCmdHeader *header)
+ {
+ 	struct vmw_bo *vmw_bo = NULL;
++	struct vmw_resource *res;
+ 	struct vmw_surface *srf = NULL;
+ 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
+ 	int ret;
+@@ -1550,18 +1551,24 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ 
+ 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
+ 		VMW_RES_DIRTY_SET : 0;
+-	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+-				dirty, user_surface_converter,
+-				&cmd->body.host.sid, NULL);
++	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty,
++				user_surface_converter, &cmd->body.host.sid,
++				NULL);
+ 	if (unlikely(ret != 0)) {
+ 		if (unlikely(ret != -ERESTARTSYS))
+ 			VMW_DEBUG_USER("could not find surface for DMA.\n");
+ 		return ret;
+ 	}
+ 
+-	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
++	res = sw_context->res_cache[vmw_res_surface].res;
++	if (!res) {
++		VMW_DEBUG_USER("Invalid DMA surface.\n");
++		return -EINVAL;
++	}
+ 
+-	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
++	srf = vmw_res_to_srf(res);
++	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo,
++			     header);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+index e7625b3f71e0ed..80e8bbc3021d1c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+@@ -309,8 +309,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+ 		hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
+ 	}
+ 	node->res = vmw_resource_reference_unless_doomed(res);
+-	if (!node->res)
++	if (!node->res) {
++		hash_del_rcu(&node->hash.head);
+ 		return -ESRCH;
++	}
+ 
+ 	node->first_usage = 1;
+ 	if (!res->dev_priv->has_mob) {
+@@ -637,7 +639,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
+ 		hash_del_rcu(&val->hash.head);
+ 
+ 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
+-		hash_del_rcu(&entry->hash.head);
++		hash_del_rcu(&val->hash.head);
+ 
+ 	ctx->sw_context = NULL;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
+index 82750520a90a59..f14a3cc7d11730 100644
+--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
++++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
+@@ -237,17 +237,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
+ 
+ 		err = q->ops->suspend_wait(q);
+ 		if (err)
+-			goto err_suspend;
++			return err;
+ 	}
+ 
+ 	if (need_resume)
+ 		xe_hw_engine_group_resume_faulting_lr_jobs(group);
+ 
+ 	return 0;
+-
+-err_suspend:
+-	up_write(&group->mode_sem);
+-	return err;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
+index 6e7c940d7e2275..71a5e852fbac78 100644
+--- a/drivers/gpu/drm/xe/xe_query.c
++++ b/drivers/gpu/drm/xe/xe_query.c
+@@ -277,8 +277,7 @@ static int query_mem_regions(struct xe_device *xe,
+ 	mem_regions->mem_regions[0].instance = 0;
+ 	mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
+ 	mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
+-	if (perfmon_capable())
+-		mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
++	mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
+ 	mem_regions->num_mem_regions = 1;
+ 
+ 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
+@@ -294,13 +293,11 @@ static int query_mem_regions(struct xe_device *xe,
+ 			mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
+ 				man->size;
+ 
+-			if (perfmon_capable()) {
+-				xe_ttm_vram_get_used(man,
+-					&mem_regions->mem_regions
+-					[mem_regions->num_mem_regions].used,
+-					&mem_regions->mem_regions
+-					[mem_regions->num_mem_regions].cpu_visible_used);
+-			}
++			xe_ttm_vram_get_used(man,
++					     &mem_regions->mem_regions
++					     [mem_regions->num_mem_regions].used,
++					     &mem_regions->mem_regions
++					     [mem_regions->num_mem_regions].cpu_visible_used);
+ 
+ 			mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
+ 				xe_ttm_vram_get_cpu_visible_size(man);
+diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
+index c3f9fa307b84c2..2db16c36d81449 100644
+--- a/drivers/iio/adc/pac1934.c
++++ b/drivers/iio/adc/pac1934.c
+@@ -88,6 +88,7 @@
+ #define PAC1934_VPOWER_3_ADDR			0x19
+ #define PAC1934_VPOWER_4_ADDR			0x1A
+ #define PAC1934_REFRESH_V_REG_ADDR		0x1F
++#define PAC1934_SLOW_REG_ADDR			0x20
+ #define PAC1934_CTRL_STAT_REGS_ADDR		0x1C
+ #define PAC1934_PID_REG_ADDR			0xFD
+ #define PAC1934_MID_REG_ADDR			0xFE
+@@ -1265,8 +1266,23 @@ static int pac1934_chip_configure(struct pac1934_chip_info *info)
+ 	/* no SLOW triggered REFRESH, clear POR */
+ 	regs[PAC1934_SLOW_REG_OFF] = 0;
+ 
+-	ret =  i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+-					  ARRAY_SIZE(regs), (u8 *)regs);
++	/*
++	 * Write the three bytes sequentially, as the device does not support
++	 * block write.
++	 */
++	ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
++					regs[PAC1934_CHANNEL_DIS_REG_OFF]);
++	if (ret)
++		return ret;
++
++	ret = i2c_smbus_write_byte_data(client,
++					PAC1934_CTRL_STAT_REGS_ADDR + PAC1934_NEG_PWR_REG_OFF,
++					regs[PAC1934_NEG_PWR_REG_OFF]);
++	if (ret)
++		return ret;
++
++	ret = i2c_smbus_write_byte_data(client, PAC1934_SLOW_REG_ADDR,
++					regs[PAC1934_SLOW_REG_OFF]);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
+index ebc583b07e0c00..e12eb7137b06a3 100644
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -118,7 +118,7 @@
+ #define AMS_ALARM_THRESHOLD_OFF_10	0x10
+ #define AMS_ALARM_THRESHOLD_OFF_20	0x20
+ 
+-#define AMS_ALARM_THR_DIRECT_MASK	BIT(1)
++#define AMS_ALARM_THR_DIRECT_MASK	BIT(0)
+ #define AMS_ALARM_THR_MIN		0x0000
+ #define AMS_ALARM_THR_MAX		(BIT(16) - 1)
+ 
+@@ -389,6 +389,29 @@ static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
+ 	ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
+ }
+ 
++static void ams_unmask(struct ams *ams)
++{
++	unsigned int status, unmask;
++
++	status = readl(ams->base + AMS_ISR_0);
++
++	/* Clear those bits which are not active anymore */
++	unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
++
++	/* Clear status of disabled alarm */
++	unmask |= ams->intr_mask;
++
++	ams->current_masked_alarm &= status;
++
++	/* Also clear those which are masked out anyway */
++	ams->current_masked_alarm &= ~ams->intr_mask;
++
++	/* Clear the interrupts before we unmask them */
++	writel(unmask, ams->base + AMS_ISR_0);
++
++	ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
++}
++
+ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+ {
+ 	unsigned long flags;
+@@ -401,6 +424,7 @@ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+ 
+ 	spin_lock_irqsave(&ams->intr_lock, flags);
+ 	ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
++	ams_unmask(ams);
+ 	spin_unlock_irqrestore(&ams->intr_lock, flags);
+ }
+ 
+@@ -1035,28 +1059,9 @@ static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
+ static void ams_unmask_worker(struct work_struct *work)
+ {
+ 	struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+-	unsigned int status, unmask;
+ 
+ 	spin_lock_irq(&ams->intr_lock);
+-
+-	status = readl(ams->base + AMS_ISR_0);
+-
+-	/* Clear those bits which are not active anymore */
+-	unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+-
+-	/* Clear status of disabled alarm */
+-	unmask |= ams->intr_mask;
+-
+-	ams->current_masked_alarm &= status;
+-
+-	/* Also clear those which are masked out anyway */
+-	ams->current_masked_alarm &= ~ams->intr_mask;
+-
+-	/* Clear the interrupts before we unmask them */
+-	writel(unmask, ams->base + AMS_ISR_0);
+-
+-	ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+-
++	ams_unmask(ams);
+ 	spin_unlock_irq(&ams->intr_lock);
+ 
+ 	/* If still pending some alarm re-trigger the timer */
+diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
+index e0b7f658d61196..cf9cf90cd6e276 100644
+--- a/drivers/iio/dac/ad5360.c
++++ b/drivers/iio/dac/ad5360.c
+@@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ 	unsigned int clr)
+ {
+ 	struct ad5360_state *st = iio_priv(indio_dev);
+-	unsigned int ret;
++	int ret;
+ 
+ 	mutex_lock(&st->lock);
+ 
+diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
+index 7644acfd879e04..9228e3cee1b850 100644
+--- a/drivers/iio/dac/ad5421.c
++++ b/drivers/iio/dac/ad5421.c
+@@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ 	unsigned int clr)
+ {
+ 	struct ad5421_state *st = iio_priv(indio_dev);
+-	unsigned int ret;
++	int ret;
+ 
+ 	mutex_lock(&st->lock);
+ 
+diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
+index e13e64a5164c14..f58d23c049ec3f 100644
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -149,6 +149,19 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
+ 	if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
+ 		return -EINVAL;
+ 
++	st->r4_rf_div_sel = 0;
++
++	/*
++	 * !\TODO: The below computation is making sure we get a power of 2
++	 * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to
++	 * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long()
++	 * and friends.
++	 */
++	while (freq < ADF4350_MIN_VCO_FREQ) {
++		freq <<= 1;
++		st->r4_rf_div_sel++;
++	}
++
+ 	if (freq > ADF4350_MAX_FREQ_45_PRESC) {
+ 		prescaler = ADF4350_REG1_PRESCALER;
+ 		mdiv = 75;
+@@ -157,13 +170,6 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
+ 		mdiv = 23;
+ 	}
+ 
+-	st->r4_rf_div_sel = 0;
+-
+-	while (freq < ADF4350_MIN_VCO_FREQ) {
+-		freq <<= 1;
+-		st->r4_rf_div_sel++;
+-	}
+-
+ 	/*
+ 	 * Allow a predefined reference division factor
+ 	 * if not set, compute our own
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+index 73aeddf53b767d..790bc8fbf21da3 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -847,10 +847,6 @@ static int inv_icm42600_resume(struct device *dev)
+ 	if (ret)
+ 		goto out_unlock;
+ 
+-	pm_runtime_disable(dev);
+-	pm_runtime_set_active(dev);
+-	pm_runtime_enable(dev);
+-
+ 	/* restore sensors state */
+ 	ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro,
+ 					 st->suspended.accel,
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 32d6710e264a4e..667407974e23a7 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3952,7 +3952,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
+ 			}
+ 
+ 			if (info->ats_supported && ecap_prs(iommu->ecap) &&
+-			    pci_pri_supported(pdev))
++			    ecap_pds(iommu->ecap) && pci_pri_supported(pdev))
+ 				info->pri_supported = 1;
+ 		}
+ 	}
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index 36dbcf2d728a54..9c4af7d5884631 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -252,11 +252,11 @@ static int plic_irq_suspend(void)
+ 
+ 	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
+ 
+-	for (i = 0; i < priv->nr_irqs; i++)
+-		if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID))
+-			__set_bit(i, priv->prio_save);
+-		else
+-			__clear_bit(i, priv->prio_save);
++	/* irq ID 0 is reserved */
++	for (i = 1; i < priv->nr_irqs; i++) {
++		__assign_bit(i, priv->prio_save,
++			     readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
++	}
+ 
+ 	for_each_cpu(cpu, cpu_present_mask) {
+ 		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+@@ -284,7 +284,8 @@ static void plic_irq_resume(void)
+ 
+ 	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
+ 
+-	for (i = 0; i < priv->nr_irqs; i++) {
++	/* irq ID 0 is reserved */
++	for (i = 1; i < priv->nr_irqs; i++) {
+ 		index = BIT_WORD(i);
+ 		writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
+ 		       priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index d24f71819c3d65..38ab35157c85f9 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -379,20 +379,13 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
+ 	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ 	struct cmdq_task *task;
+ 	unsigned long curr_pa, end_pa;
+-	int ret;
+ 
+ 	/* Client should not flush new tasks if suspended. */
+ 	WARN_ON(cmdq->suspended);
+ 
+-	ret = pm_runtime_get_sync(cmdq->mbox.dev);
+-	if (ret < 0)
+-		return ret;
+-
+ 	task = kzalloc(sizeof(*task), GFP_ATOMIC);
+-	if (!task) {
+-		pm_runtime_put_autosuspend(cmdq->mbox.dev);
++	if (!task)
+ 		return -ENOMEM;
+-	}
+ 
+ 	task->cmdq = cmdq;
+ 	INIT_LIST_HEAD(&task->list_entry);
+@@ -439,9 +432,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
+ 	}
+ 	list_move_tail(&task->list_entry, &thread->task_busy_list);
+ 
+-	pm_runtime_mark_last_busy(cmdq->mbox.dev);
+-	pm_runtime_put_autosuspend(cmdq->mbox.dev);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index d59fcb74b34794..5bf81b60e9e67f 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -62,7 +62,8 @@
+ #define DST_BIT_POS	9U
+ #define SRC_BITMASK	GENMASK(11, 8)
+ 
+-#define MAX_SGI 16
++/* Macro to represent SGI type for IPI IRQs */
++#define IPI_IRQ_TYPE_SGI	2
+ 
+ /*
+  * Module parameters
+@@ -121,6 +122,7 @@ struct zynqmp_ipi_mbox {
+  * @dev:                  device pointer corresponding to the Xilinx ZynqMP
+  *                        IPI agent
+  * @irq:                  IPI agent interrupt ID
++ * @irq_type:             IPI SGI or SPI IRQ type
+  * @method:               IPI SMC or HVC is going to be used
+  * @local_id:             local IPI agent ID
+  * @virq_sgi:             IRQ number mapped to SGI
+@@ -130,6 +132,7 @@ struct zynqmp_ipi_mbox {
+ struct zynqmp_ipi_pdata {
+ 	struct device *dev;
+ 	int irq;
++	unsigned int irq_type;
+ 	unsigned int method;
+ 	u32 local_id;
+ 	int virq_sgi;
+@@ -887,17 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+ 	struct zynqmp_ipi_mbox *ipi_mbox;
+ 	int i;
+ 
+-	if (pdata->irq < MAX_SGI)
++	if (pdata->irq_type == IPI_IRQ_TYPE_SGI)
+ 		xlnx_mbox_cleanup_sgi(pdata);
+ 
+-	i = pdata->num_mboxes;
++	i = pdata->num_mboxes - 1;
+ 	for (; i >= 0; i--) {
+ 		ipi_mbox = &pdata->ipi_mboxes[i];
+-		if (ipi_mbox->dev.parent) {
+-			mbox_controller_unregister(&ipi_mbox->mbox);
+-			if (device_is_registered(&ipi_mbox->dev))
+-				device_unregister(&ipi_mbox->dev);
+-		}
++		if (device_is_registered(&ipi_mbox->dev))
++			device_unregister(&ipi_mbox->dev);
+ 	}
+ }
+ 
+@@ -959,14 +959,16 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
+ 		dev_err(dev, "failed to parse interrupts\n");
+ 		goto free_mbox_dev;
+ 	}
+-	ret = out_irq.args[1];
++
++	/* Use interrupt type to distinguish SGI and SPI interrupts */
++	pdata->irq_type = out_irq.args[0];
+ 
+ 	/*
+ 	 * If Interrupt number is in SGI range, then request SGI else request
+ 	 * IPI system IRQ.
+ 	 */
+-	if (ret < MAX_SGI) {
+-		pdata->irq = ret;
++	if (pdata->irq_type == IPI_IRQ_TYPE_SGI) {
++		pdata->irq = out_irq.args[1];
+ 		ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata);
+ 		if (ret)
+ 			goto free_mbox_dev;
+diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
+index 2e8f7f60263f1c..08d58524419f7d 100644
+--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
++++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
+@@ -1,8 +1,2 @@
+ extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o
+ obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o
+-
+-all:
+-	$(MAKE) -C $(KDIR) M=$(shell pwd) modules
+-
+-install:
+-	$(MAKE) -C $(KDIR) M=$(shell pwd) modules_install
+diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
+index 723fe138e7bcc0..8bf06a763a2519 100644
+--- a/drivers/media/i2c/mt9v111.c
++++ b/drivers/media/i2c/mt9v111.c
+@@ -532,8 +532,8 @@ static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111,
+ static int mt9v111_hw_config(struct mt9v111_dev *mt9v111)
+ {
+ 	struct i2c_client *c = mt9v111->client;
+-	unsigned int ret;
+ 	u16 outfmtctrl2;
++	int ret;
+ 
+ 	/* Force device reset. */
+ 	ret = __mt9v111_hw_reset(mt9v111);
+diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
+index 56444edaf13651..6daa7aa9944226 100644
+--- a/drivers/media/mc/mc-devnode.c
++++ b/drivers/media/mc/mc-devnode.c
+@@ -50,11 +50,6 @@ static void media_devnode_release(struct device *cd)
+ {
+ 	struct media_devnode *devnode = to_media_devnode(cd);
+ 
+-	mutex_lock(&media_devnode_lock);
+-	/* Mark device node number as free */
+-	clear_bit(devnode->minor, media_devnode_nums);
+-	mutex_unlock(&media_devnode_lock);
+-
+ 	/* Release media_devnode and perform other cleanups as needed. */
+ 	if (devnode->release)
+ 		devnode->release(devnode);
+@@ -281,6 +276,7 @@ void media_devnode_unregister(struct media_devnode *devnode)
+ 	/* Delete the cdev on this minor as well */
+ 	cdev_device_del(&devnode->cdev, &devnode->dev);
+ 	devnode->media_dev = NULL;
++	clear_bit(devnode->minor, media_devnode_nums);
+ 	mutex_unlock(&media_devnode_lock);
+ 
+ 	put_device(&devnode->dev);
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 96dd0f6ccd0d0a..b3761839a5218f 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -691,7 +691,7 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ 		 * (already discovered through iterating over links) and pads
+ 		 * not internally connected.
+ 		 */
+-		if (origin == local || !local->num_links ||
++		if (origin == local || local->num_links ||
+ 		    !media_entity_has_pad_interdep(origin->entity, origin->index,
+ 						   local->index))
+ 			continue;
+diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
+index 013694bfcb1c1b..7cbb2d5869320b 100644
+--- a/drivers/media/pci/cx18/cx18-queue.c
++++ b/drivers/media/pci/cx18/cx18-queue.c
+@@ -379,15 +379,22 @@ int cx18_stream_alloc(struct cx18_stream *s)
+ 			break;
+ 		}
+ 
++		buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
++						 buf->buf, s->buf_size,
++						 s->dma);
++		if (dma_mapping_error(&s->cx->pci_dev->dev, buf->dma_handle)) {
++			kfree(buf->buf);
++			kfree(mdl);
++			kfree(buf);
++			break;
++		}
++
+ 		INIT_LIST_HEAD(&mdl->list);
+ 		INIT_LIST_HEAD(&mdl->buf_list);
+ 		mdl->id = s->mdl_base_idx; /* a somewhat safe value */
+ 		cx18_enqueue(s, mdl, &s->q_idle);
+ 
+ 		INIT_LIST_HEAD(&buf->list);
+-		buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
+-						 buf->buf, s->buf_size,
+-						 s->dma);
+ 		cx18_buf_sync_for_cpu(s, buf);
+ 		list_add_tail(&buf->list, &s->buf_pool);
+ 	}
+diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
+index b7aaa8b4a7841d..e39bf64c5c715b 100644
+--- a/drivers/media/pci/ivtv/ivtv-irq.c
++++ b/drivers/media/pci/ivtv/ivtv-irq.c
+@@ -351,7 +351,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
+ 
+ 	/* Insert buffer block for YUV if needed */
+ 	if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
+-		if (yi->blanking_dmaptr) {
++		if (yi->blanking_ptr) {
+ 			s->sg_pending[idx].src = yi->blanking_dmaptr;
+ 			s->sg_pending[idx].dst = offset;
+ 			s->sg_pending[idx].size = 720 * 16;
+diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
+index 2d9274537725af..71f0401066471a 100644
+--- a/drivers/media/pci/ivtv/ivtv-yuv.c
++++ b/drivers/media/pci/ivtv/ivtv-yuv.c
+@@ -125,7 +125,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
+ 	ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
+ 
+ 	/* If we've offset the y plane, ensure top area is blanked */
+-	if (f->offset_y && yi->blanking_dmaptr) {
++	if (f->offset_y && yi->blanking_ptr) {
+ 		dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
+ 		dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
+ 		dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
+@@ -929,6 +929,12 @@ static void ivtv_yuv_init(struct ivtv *itv)
+ 		yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
+ 						     yi->blanking_ptr,
+ 						     720 * 16, DMA_TO_DEVICE);
++		if (dma_mapping_error(&itv->pdev->dev, yi->blanking_dmaptr)) {
++			kfree(yi->blanking_ptr);
++			yi->blanking_ptr = NULL;
++			yi->blanking_dmaptr = 0;
++			IVTV_DEBUG_WARN("Failed to dma_map yuv blanking buffer\n");
++		}
+ 	} else {
+ 		yi->blanking_dmaptr = 0;
+ 		IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
+diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c
+index 923650d53d4c82..d7dddc5c8728e8 100644
+--- a/drivers/media/pci/mgb4/mgb4_trigger.c
++++ b/drivers/media/pci/mgb4/mgb4_trigger.c
+@@ -91,7 +91,7 @@ static irqreturn_t trigger_handler(int irq, void *p)
+ 	struct {
+ 		u32 data;
+ 		s64 ts __aligned(8);
+-	} scan;
++	} scan = { };
+ 
+ 	scan.data = mgb4_read_reg(&st->mgbdev->video, 0xA0);
+ 	mgb4_write_reg(&st->mgbdev->video, 0xA0, scan.data);
+diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
+index 66a18830e66dac..4e2636b0536693 100644
+--- a/drivers/media/platform/qcom/venus/firmware.c
++++ b/drivers/media/platform/qcom/venus/firmware.c
+@@ -30,7 +30,7 @@ static void venus_reset_cpu(struct venus_core *core)
+ 	u32 fw_size = core->fw.mapped_mem_size;
+ 	void __iomem *wrapper_base;
+ 
+-	if (IS_IRIS2_1(core))
++	if (IS_IRIS2(core) || IS_IRIS2_1(core))
+ 		wrapper_base = core->wrapper_tz_base;
+ 	else
+ 		wrapper_base = core->wrapper_base;
+@@ -42,7 +42,7 @@ static void venus_reset_cpu(struct venus_core *core)
+ 	writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
+ 	writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
+ 
+-	if (IS_IRIS2_1(core)) {
++	if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
+ 		/* Bring XTSS out of reset */
+ 		writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
+ 	} else {
+@@ -68,7 +68,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
+ 	if (resume) {
+ 		venus_reset_cpu(core);
+ 	} else {
+-		if (IS_IRIS2_1(core))
++		if (IS_IRIS2(core) || IS_IRIS2_1(core))
+ 			writel(WRAPPER_XTSS_SW_RESET_BIT,
+ 			       core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ 		else
+@@ -181,7 +181,7 @@ static int venus_shutdown_no_tz(struct venus_core *core)
+ 	void __iomem *wrapper_base = core->wrapper_base;
+ 	void __iomem *wrapper_tz_base = core->wrapper_tz_base;
+ 
+-	if (IS_IRIS2_1(core)) {
++	if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
+ 		/* Assert the reset to XTSS */
+ 		reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ 		reg |= WRAPPER_XTSS_SW_RESET_BIT;
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
+index 47bc3014b5d8b8..f7c682fca64595 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
+@@ -14,8 +14,7 @@
+ #include "s5p_mfc_opr.h"
+ #include "s5p_mfc_cmd_v6.h"
+ 
+-static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
+-				    const struct s5p_mfc_cmd_args *args)
++static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd)
+ {
+ 	mfc_debug(2, "Issue the command: %d\n", cmd);
+ 
+@@ -31,7 +30,6 @@ static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
+ 
+ static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
+ {
+-	struct s5p_mfc_cmd_args h2r_args;
+ 	const struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ 	int ret;
+ 
+@@ -41,33 +39,23 @@ static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
+ 
+ 	mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
+ 	mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
+-	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6,
+-					&h2r_args);
++	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6);
+ }
+ 
+ static int s5p_mfc_sleep_cmd_v6(struct s5p_mfc_dev *dev)
+ {
+-	struct s5p_mfc_cmd_args h2r_args;
+-
+-	memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+-	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6,
+-			&h2r_args);
++	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6);
+ }
+ 
+ static int s5p_mfc_wakeup_cmd_v6(struct s5p_mfc_dev *dev)
+ {
+-	struct s5p_mfc_cmd_args h2r_args;
+-
+-	memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+-	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6,
+-					&h2r_args);
++	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6);
+ }
+ 
+ /* Open a new instance and get its number */
+ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
+ {
+ 	struct s5p_mfc_dev *dev = ctx->dev;
+-	struct s5p_mfc_cmd_args h2r_args;
+ 	int codec_type;
+ 
+ 	mfc_debug(2, "Requested codec mode: %d\n", ctx->codec_mode);
+@@ -129,23 +117,20 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
+ 	mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
+ 	mfc_write(dev, 0, S5P_FIMV_D_CRC_CTRL_V6); /* no crc */
+ 
+-	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6,
+-					&h2r_args);
++	return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6);
+ }
+ 
+ /* Close instance */
+ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
+ {
+ 	struct s5p_mfc_dev *dev = ctx->dev;
+-	struct s5p_mfc_cmd_args h2r_args;
+ 	int ret = 0;
+ 
+ 	dev->curr_ctx = ctx->num;
+ 	if (ctx->state != MFCINST_FREE) {
+ 		mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
+ 		ret = s5p_mfc_cmd_host2risc_v6(dev,
+-					S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6,
+-					&h2r_args);
++					S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6);
+ 	} else {
+ 		ret = -EINVAL;
+ 	}
+@@ -153,9 +138,15 @@ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
+ 	return ret;
+ }
+ 
++static int s5p_mfc_cmd_host2risc_v6_args(struct s5p_mfc_dev *dev, int cmd,
++				    const struct s5p_mfc_cmd_args *ignored)
++{
++	return s5p_mfc_cmd_host2risc_v6(dev, cmd);
++}
++
+ /* Initialize cmd function pointers for MFC v6 */
+ static const struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = {
+-	.cmd_host2risc = s5p_mfc_cmd_host2risc_v6,
++	.cmd_host2risc = s5p_mfc_cmd_host2risc_v6_args,
+ 	.sys_init_cmd = s5p_mfc_sys_init_cmd_v6,
+ 	.sleep_cmd = s5p_mfc_sleep_cmd_v6,
+ 	.wakeup_cmd = s5p_mfc_wakeup_cmd_v6,
+diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+index 3853245fcf6e58..f8cb93ce9459fa 100644
+--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
++++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+@@ -52,6 +52,8 @@
+ #define DRAIN_TIMEOUT_MS		50
+ #define DRAIN_BUFFER_SIZE		SZ_32K
+ 
++#define CSI2RX_BRIDGE_SOURCE_PAD	1
++
+ struct ti_csi2rx_fmt {
+ 	u32				fourcc;	/* Four character code. */
+ 	u32				code;	/* Mbus code. */
+@@ -426,8 +428,9 @@ static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad,
+-					      MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
++	ret = media_create_pad_link(&csi->source->entity, CSI2RX_BRIDGE_SOURCE_PAD,
++				    &vdev->entity, csi->pad.index,
++				    MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ 
+ 	if (ret) {
+ 		video_unregister_device(vdev);
+@@ -1121,7 +1124,7 @@ static int ti_csi2rx_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_vb2q;
+ 
+-	ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
++	ret = devm_of_platform_populate(csi->dev);
+ 	if (ret) {
+ 		dev_err(csi->dev, "Failed to create children: %d\n", ret);
+ 		goto err_subdev;
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index f042f3f14afa9d..314f64420f6292 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -736,11 +736,11 @@ int lirc_register(struct rc_dev *dev)
+ 
+ 	cdev_init(&dev->lirc_cdev, &lirc_fops);
+ 
++	get_device(&dev->dev);
++
+ 	err = cdev_device_add(&dev->lirc_cdev, &dev->lirc_dev);
+ 	if (err)
+-		goto out_ida;
+-
+-	get_device(&dev->dev);
++		goto out_put_device;
+ 
+ 	switch (dev->driver_type) {
+ 	case RC_DRIVER_SCANCODE:
+@@ -764,7 +764,8 @@ int lirc_register(struct rc_dev *dev)
+ 
+ 	return 0;
+ 
+-out_ida:
++out_put_device:
++	put_device(&dev->lirc_dev);
+ 	ida_free(&lirc_ida, minor);
+ 	return err;
+ }
+diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
+index 356a988dd6a135..2d15fdd5d999e0 100644
+--- a/drivers/media/test-drivers/vivid/vivid-cec.c
++++ b/drivers/media/test-drivers/vivid/vivid-cec.c
+@@ -327,7 +327,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
+ 		char osd[14];
+ 
+ 		if (!cec_is_sink(adap))
+-			return -ENOMSG;
++			break;
+ 		cec_ops_set_osd_string(msg, &disp_ctl, osd);
+ 		switch (disp_ctl) {
+ 		case CEC_OP_DISP_CTL_DEFAULT:
+@@ -348,7 +348,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
+ 			cec_transmit_msg(adap, &reply, false);
+ 			break;
+ 		}
+-		break;
++		return 0;
+ 	}
+ 	case CEC_MSG_VENDOR_COMMAND_WITH_ID: {
+ 		u32 vendor_id;
+@@ -379,7 +379,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
+ 		if (size == 1) {
+ 			// Ignore even op values
+ 			if (!(vendor_cmd[0] & 1))
+-				break;
++				return 0;
+ 			reply.len = msg->len;
+ 			memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1);
+ 			reply.msg[msg->len - 1]++;
+@@ -388,12 +388,10 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
+ 					      CEC_OP_ABORT_INVALID_OP);
+ 		}
+ 		cec_transmit_msg(adap, &reply, false);
+-		break;
++		return 0;
+ 	}
+-	default:
+-		return -ENOMSG;
+ 	}
+-	return 0;
++	return -ENOMSG;
+ }
+ 
+ static const struct cec_adap_ops vivid_cec_adap_ops = {
+diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
+index e73dd330af477d..d913fb901973f0 100644
+--- a/drivers/memory/samsung/exynos-srom.c
++++ b/drivers/memory/samsung/exynos-srom.c
+@@ -121,20 +121,18 @@ static int exynos_srom_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	srom->dev = dev;
+-	srom->reg_base = of_iomap(np, 0);
+-	if (!srom->reg_base) {
++	srom->reg_base = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(srom->reg_base)) {
+ 		dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
+-		return -ENOMEM;
++		return PTR_ERR(srom->reg_base);
+ 	}
+ 
+ 	platform_set_drvdata(pdev, srom);
+ 
+ 	srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
+ 						      ARRAY_SIZE(exynos_srom_offsets));
+-	if (!srom->reg_offset) {
+-		iounmap(srom->reg_base);
++	if (!srom->reg_offset)
+ 		return -ENOMEM;
+-	}
+ 
+ 	for_each_child_of_node(np, child) {
+ 		if (exynos_srom_configure_bank(srom, child)) {
+diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+index 992855bfda3e47..6daf33e07ea0a8 100644
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -81,8 +81,9 @@ static struct mfd_cell chtdc_ti_dev[] = {
+ static const struct regmap_config chtdc_ti_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+-	.max_register = 128,
+-	.cache_type = REGCACHE_NONE,
++	.max_register = 0xff,
++	/* The hardware does not support reading multiple registers at once */
++	.use_single_read = true,
+ };
+ 
+ static const struct regmap_irq chtdc_ti_irqs[] = {
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 4b19b8a16b0968..2e37700ae36e73 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -945,7 +945,11 @@ static void mmc_sdio_remove(struct mmc_host *host)
+  */
+ static int mmc_sdio_alive(struct mmc_host *host)
+ {
+-	return mmc_select_card(host->card);
++	if (!mmc_host_is_spi(host))
++		return mmc_select_card(host->card);
++	else
++		return mmc_io_rw_direct(host->card, 0, 0, SDIO_CCCR_CCCR, 0,
++					NULL);
+ }
+ 
+ /*
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index 47443fb5eb3362..f42d5f9c48c1ba 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -563,7 +563,7 @@ mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write)
+ 	 * the next token (next data block, or STOP_TRAN).  We can try to
+ 	 * minimize I/O ops by using a single read to collect end-of-busy.
+ 	 */
+-	if (multiple || write) {
++	if (write) {
+ 		t = &host->early_status;
+ 		memset(t, 0, sizeof(*t));
+ 		t->len = write ? sizeof(scratch->status) : 1;
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index fe5912d31beea4..b0a70badf3eb77 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -876,10 +876,14 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ 	if (!of_property_read_u32(np, "bank-width", &val)) {
+ 		if (val == 2) {
+ 			nand->options |= NAND_BUSWIDTH_16;
+-		} else if (val != 1) {
++		} else if (val == 1) {
++			nand->options |= NAND_BUSWIDTH_AUTO;
++		} else {
+ 			dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+ 			return -EINVAL;
+ 		}
++	} else {
++		nand->options |= NAND_BUSWIDTH_AUTO;
+ 	}
+ 
+ 	if (of_property_read_bool(np, "nand-skip-bbtscan"))
+diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+index 026f7270a54de8..fb5d12a87872ea 100644
+--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
++++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+@@ -479,10 +479,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
+ 					"missing 'reg' property in node %pOF\n",
+ 					tbi);
+ 				err = -EBUSY;
++				of_node_put(tbi);
+ 				goto error;
+ 			}
+ 			set_tbipa(*prop, pdev,
+ 				  data->get_tbipa, priv->map, &res);
++			of_node_put(tbi);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
+index 10285995c9eddd..cc4798026182e3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
+@@ -98,19 +98,21 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
+ 
+ 	index = ice_adapter_xa_index(pdev);
+ 	scoped_guard(mutex, &ice_adapters_mutex) {
+-		err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
+-		if (err == -EBUSY) {
+-			adapter = xa_load(&ice_adapters, index);
++		adapter = xa_load(&ice_adapters, index);
++		if (adapter) {
+ 			refcount_inc(&adapter->refcount);
+ 			WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
+ 			return adapter;
+ 		}
++		err = xa_reserve(&ice_adapters, index, GFP_KERNEL);
+ 		if (err)
+ 			return ERR_PTR(err);
+ 
+ 		adapter = ice_adapter_new(pdev);
+-		if (!adapter)
++		if (!adapter) {
++			xa_release(&ice_adapters, index);
+ 			return ERR_PTR(-ENOMEM);
++		}
+ 		xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
+ 	}
+ 	return adapter;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 281b34af0bb48f..3160a1a2d2ab30 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1180,9 +1180,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+ 				mlx4_unregister_mac(mdev->dev, priv->port, mac);
+ 
+ 				hlist_del_rcu(&entry->hlist);
+-				kfree_rcu(entry, rcu);
+ 				en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
+ 				       entry->mac, priv->port);
++				kfree_rcu(entry, rcu);
+ 				++removed;
+ 			}
+ 		}
+diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
+index c018783757fb2f..858d4cbc83d3f1 100644
+--- a/drivers/net/ethernet/mscc/ocelot_stats.c
++++ b/drivers/net/ethernet/mscc/ocelot_stats.c
+@@ -984,6 +984,6 @@ int ocelot_stats_init(struct ocelot *ocelot)
+ 
+ void ocelot_stats_deinit(struct ocelot *ocelot)
+ {
+-	cancel_delayed_work(&ocelot->stats_work);
++	disable_delayed_work_sync(&ocelot->stats_work);
+ 	destroy_workqueue(ocelot->stats_queue);
+ }
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index bb46ef986b2a48..afac4a1e9a1db9 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -1936,14 +1936,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
+ 	mutex_unlock(&ab->core_lock);
+ 
+ 	ath11k_dp_free(ab);
+-	ath11k_hal_srng_deinit(ab);
++	ath11k_hal_srng_clear(ab);
+ 
+ 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+ 
+-	ret = ath11k_hal_srng_init(ab);
+-	if (ret)
+-		return ret;
+-
+ 	clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ 
+ 	ret = ath11k_core_qmi_firmware_ready(ab);
+diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
+index 65e52ab742b417..e47774b6d99266 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.c
++++ b/drivers/net/wireless/ath/ath11k/hal.c
+@@ -1383,6 +1383,22 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
+ }
+ EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+ 
++void ath11k_hal_srng_clear(struct ath11k_base *ab)
++{
++	/* No need to memset rdp and wrp memory since each individual
++	 * segment would get cleared in ath11k_hal_srng_src_hw_init()
++	 * and ath11k_hal_srng_dst_hw_init().
++	 */
++	memset(ab->hal.srng_list, 0,
++	       sizeof(ab->hal.srng_list));
++	memset(ab->hal.shadow_reg_addr, 0,
++	       sizeof(ab->hal.shadow_reg_addr));
++	ab->hal.avail_blk_resource = 0;
++	ab->hal.current_blk_index = 0;
++	ab->hal.num_shadow_reg_configured = 0;
++}
++EXPORT_SYMBOL(ath11k_hal_srng_clear);
++
+ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
+ {
+ 	struct hal_srng *srng;
+diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
+index dc8bbe0730170e..0f725668b819d4 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.h
++++ b/drivers/net/wireless/ath/ath11k/hal.h
+@@ -965,6 +965,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ 			  struct hal_srng_params *params);
+ int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+ void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
++void ath11k_hal_srng_clear(struct ath11k_base *ab);
+ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
+ void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ 				       u32 **cfg, u32 *len);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+index e3459295ad884e..5be0edb2fe9aad 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
+ 	/* Netgear, Inc. [A8000,AXE3000] */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
++	/* Netgear, Inc. A7500 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9065, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ 	/* TP-Link TXE50UH */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+index 682db1bab21c6a..a63ff630eaba9f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+@@ -12,6 +12,9 @@
+ static const struct usb_device_id mt7925u_device_table[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
++	/* Netgear, Inc. A9000 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9072, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
+ 	{ },
+ };
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index fdc9f1df0578b7..ff004350dc2c33 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3140,10 +3140,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ 		 * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
+ 		 * because of high power consumption (> 2 Watt) in s2idle
+ 		 * sleep. Only some boards with Intel CPU are affected.
++		 * (Note for testing: Samsung 990 Evo Plus has same PCI ID)
+ 		 */
+ 		if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
+ 		    dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ 		    dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
++		    dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index 9a72f75e5c2d8d..63b5b435bd3aea 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -4191,6 +4191,7 @@ static int of_unittest_pci_node_verify(struct pci_dev *pdev, bool add)
+ 		unittest(!np, "Child device tree node is not removed\n");
+ 		child_dev = device_find_any_child(&pdev->dev);
+ 		unittest(!child_dev, "Child device is not removed\n");
++		put_device(child_dev);
+ 	}
+ 
+ failed:
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index eb772c18d44ed9..c014220d2b24ba 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -277,6 +277,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
+ 	if (!ret)
+ 		offset = args.args[0];
+ 
++	/*
++	 * The PCIe Controller's registers have different "reset-values"
++	 * depending on the "strap" settings programmed into the PCIEn_CTRL
++	 * register within the CTRL_MMR memory-mapped register space.
++	 * The registers latch onto a "reset-value" based on the "strap"
++	 * settings sampled after the PCIe Controller is powered on.
++	 * To ensure that the "reset-values" are sampled accurately, power
++	 * off the PCIe Controller before programming the "strap" settings
++	 * and power it on after that. The runtime PM APIs namely
++	 * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and
++	 * increment the usage counter respectively, causing GENPD to power off
++	 * and power on the PCIe Controller.
++	 */
++	ret = pm_runtime_put_sync(dev);
++	if (ret < 0) {
++		dev_err(dev, "Failed to power off PCIe Controller\n");
++		return ret;
++	}
++
+ 	ret = j721e_pcie_set_mode(pcie, syscon, offset);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Failed to set pci mode\n");
+@@ -295,6 +314,12 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
+ 		return ret;
+ 	}
+ 
++	ret = pm_runtime_get_sync(dev);
++	if (ret < 0) {
++		dev_err(dev, "Failed to power on PCIe Controller\n");
++		return ret;
++	}
++
+ 	/* Enable ACSPCIE refclk output if the optional property exists */
+ 	syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ 						"ti,syscon-acspcie-proxy-ctrl");
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 44b34559de1ac5..4f75d13fe1dee9 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1202,8 +1202,8 @@ static int ks_pcie_probe(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		return irq;
+ 
+-	ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+-			  "ks-pcie-error-irq", ks_pcie);
++	ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED,
++			       "ks-pcie-error-irq", ks_pcie);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to request error IRQ %d\n",
+ 			irq);
+diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+index 14f69efa243c38..397c2f9477a157 100644
+--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
++++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+@@ -723,7 +723,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable
+ 	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
+ 	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
+ 	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
+-	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0));
++	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(1, 0));
+ 	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
+ 	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
+ 	rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 815599ef72db8c..c2d626b090e3ce 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -1205,6 +1205,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+ 	struct mrq_uphy_response resp;
+ 	struct tegra_bpmp_message msg;
+ 	struct mrq_uphy_request req;
++	int err;
+ 
+ 	/*
+ 	 * Controller-5 doesn't need to have its state set by BPMP-FW in
+@@ -1227,7 +1228,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+ 	msg.rx.data = &resp;
+ 	msg.rx.size = sizeof(resp);
+ 
+-	return tegra_bpmp_transfer(pcie->bpmp, &msg);
++	err = tegra_bpmp_transfer(pcie->bpmp, &msg);
++	if (err)
++		return err;
++	if (msg.rx.ret)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+@@ -1236,6 +1243,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ 	struct mrq_uphy_response resp;
+ 	struct tegra_bpmp_message msg;
+ 	struct mrq_uphy_request req;
++	int err;
+ 
+ 	memset(&req, 0, sizeof(req));
+ 	memset(&resp, 0, sizeof(resp));
+@@ -1255,7 +1263,13 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ 	msg.rx.data = &resp;
+ 	msg.rx.size = sizeof(resp);
+ 
+-	return tegra_bpmp_transfer(pcie->bpmp, &msg);
++	err = tegra_bpmp_transfer(pcie->bpmp, &msg);
++	if (err)
++		return err;
++	if (msg.rx.ret)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+@@ -1940,6 +1954,15 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
+ 	return IRQ_HANDLED;
+ }
+ 
++static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
++{
++	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++	enum pci_barno bar;
++
++	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
++		dw_pcie_ep_reset_bar(pci, bar);
++};
++
+ static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
+ {
+ 	/* Tegra194 supports only INTA */
+@@ -1954,10 +1977,10 @@ static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
+ 
+ static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+ {
+-	if (unlikely(irq > 31))
++	if (unlikely(irq > 32))
+ 		return -EINVAL;
+ 
+-	appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
++	appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
+ 
+ 	return 0;
+ }
+@@ -2016,6 +2039,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
+ }
+ 
+ static const struct dw_pcie_ep_ops pcie_ep_ops = {
++	.init = tegra_pcie_ep_init,
+ 	.raise_irq = tegra_pcie_ep_raise_irq,
+ 	.get_features = tegra_pcie_ep_get_features,
+ };
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index 4f70b7f2ded9cb..927ef421c4fc99 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -14,6 +14,7 @@
+  */
+ 
+ #include <linux/clk.h>
++#include <linux/cleanup.h>
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
+@@ -269,7 +270,7 @@ struct tegra_msi {
+ 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ 	struct irq_domain *domain;
+ 	struct mutex map_lock;
+-	spinlock_t mask_lock;
++	raw_spinlock_t mask_lock;
+ 	void *virt;
+ 	dma_addr_t phys;
+ 	int irq;
+@@ -1604,14 +1605,13 @@ static void tegra_msi_irq_mask(struct irq_data *d)
+ 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ 	struct tegra_pcie *pcie = msi_to_pcie(msi);
+ 	unsigned int index = d->hwirq / 32;
+-	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&msi->mask_lock, flags);
+-	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+-	value &= ~BIT(d->hwirq % 32);
+-	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+-	spin_unlock_irqrestore(&msi->mask_lock, flags);
++	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
++		value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
++		value &= ~BIT(d->hwirq % 32);
++		afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
++	}
+ }
+ 
+ static void tegra_msi_irq_unmask(struct irq_data *d)
+@@ -1619,14 +1619,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
+ 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ 	struct tegra_pcie *pcie = msi_to_pcie(msi);
+ 	unsigned int index = d->hwirq / 32;
+-	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&msi->mask_lock, flags);
+-	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+-	value |= BIT(d->hwirq % 32);
+-	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+-	spin_unlock_irqrestore(&msi->mask_lock, flags);
++	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
++		value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
++		value |= BIT(d->hwirq % 32);
++		afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
++	}
+ }
+ 
+ static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+@@ -1736,7 +1735,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
+ 	int err;
+ 
+ 	mutex_init(&msi->map_lock);
+-	spin_lock_init(&msi->mask_lock);
++	raw_spin_lock_init(&msi->mask_lock);
+ 
+ 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ 		err = tegra_allocate_domains(msi);
+diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
+index 3dd653f3d78414..a90beedd8e24ee 100644
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -12,6 +12,7 @@
+  */
+ 
+ #include <linux/bitops.h>
++#include <linux/cleanup.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/delay.h>
+@@ -37,7 +38,7 @@ struct rcar_msi {
+ 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ 	struct irq_domain *domain;
+ 	struct mutex map_lock;
+-	spinlock_t mask_lock;
++	raw_spinlock_t mask_lock;
+ 	int irq1;
+ 	int irq2;
+ };
+@@ -51,20 +52,13 @@ struct rcar_pcie_host {
+ 	int			(*phy_init_fn)(struct rcar_pcie_host *host);
+ };
+ 
+-static DEFINE_SPINLOCK(pmsr_lock);
+-
+ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+ {
+-	unsigned long flags;
+ 	u32 pmsr, val;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(&pmsr_lock, flags);
+-
+-	if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
+-		ret = -EINVAL;
+-		goto unlock_exit;
+-	}
++	if (!pcie_base || pm_runtime_suspended(pcie_dev))
++		return -EINVAL;
+ 
+ 	pmsr = readl(pcie_base + PMSR);
+ 
+@@ -86,8 +80,6 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+ 		writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ 	}
+ 
+-unlock_exit:
+-	spin_unlock_irqrestore(&pmsr_lock, flags);
+ 	return ret;
+ }
+ 
+@@ -634,28 +626,26 @@ static void rcar_msi_irq_mask(struct irq_data *d)
+ {
+ 	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ 	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+-	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&msi->mask_lock, flags);
+-	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+-	value &= ~BIT(d->hwirq);
+-	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+-	spin_unlock_irqrestore(&msi->mask_lock, flags);
++	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
++		value = rcar_pci_read_reg(pcie, PCIEMSIIER);
++		value &= ~BIT(d->hwirq);
++		rcar_pci_write_reg(pcie, value, PCIEMSIIER);
++	}
+ }
+ 
+ static void rcar_msi_irq_unmask(struct irq_data *d)
+ {
+ 	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ 	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+-	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&msi->mask_lock, flags);
+-	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+-	value |= BIT(d->hwirq);
+-	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+-	spin_unlock_irqrestore(&msi->mask_lock, flags);
++	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
++		value = rcar_pci_read_reg(pcie, PCIEMSIIER);
++		value |= BIT(d->hwirq);
++		rcar_pci_write_reg(pcie, value, PCIEMSIIER);
++	}
+ }
+ 
+ static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+@@ -765,7 +755,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
+ 	int err;
+ 
+ 	mutex_init(&msi->map_lock);
+-	spin_lock_init(&msi->mask_lock);
++	raw_spin_lock_init(&msi->mask_lock);
+ 
+ 	err = of_address_to_resource(dev->of_node, 0, &res);
+ 	if (err)
+diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
+index a8ae14474dd0a4..2e7356c28b5e40 100644
+--- a/drivers/pci/controller/pcie-xilinx-nwl.c
++++ b/drivers/pci/controller/pcie-xilinx-nwl.c
+@@ -721,9 +721,10 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
+ 	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
+ 			  E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
+ 
+-	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
+-			  (NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT),
+-			  E_ECAM_CONTROL);
++	ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
++	ecam_val &= ~E_ECAM_SIZE_LOC;
++	ecam_val |= NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT;
++	nwl_bridge_writel(pcie, ecam_val, E_ECAM_CONTROL);
+ 
+ 	nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
+ 			  E_ECAM_BASE_LO);
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 21aa3709e25776..eeb7fbc2d67a5e 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -282,17 +282,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
+ 	if (!epf_test->dma_supported)
+ 		return;
+ 
+-	dma_release_channel(epf_test->dma_chan_tx);
+-	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
++	if (epf_test->dma_chan_tx) {
++		dma_release_channel(epf_test->dma_chan_tx);
++		if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
++			epf_test->dma_chan_tx = NULL;
++			epf_test->dma_chan_rx = NULL;
++			return;
++		}
+ 		epf_test->dma_chan_tx = NULL;
+-		epf_test->dma_chan_rx = NULL;
+-		return;
+ 	}
+ 
+-	dma_release_channel(epf_test->dma_chan_rx);
+-	epf_test->dma_chan_rx = NULL;
+-
+-	return;
++	if (epf_test->dma_chan_rx) {
++		dma_release_channel(epf_test->dma_chan_rx);
++		epf_test->dma_chan_rx = NULL;
++	}
+ }
+ 
+ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index aaa33e8dc4c977..44a33df3c69c2d 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -581,15 +581,18 @@ static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
+ 	if (dev->no_vf_scan)
+ 		return 0;
+ 
++	pci_lock_rescan_remove();
+ 	for (i = 0; i < num_vfs; i++) {
+ 		rc = pci_iov_add_virtfn(dev, i);
+ 		if (rc)
+ 			goto failed;
+ 	}
++	pci_unlock_rescan_remove();
+ 	return 0;
+ failed:
+ 	while (i--)
+ 		pci_iov_remove_virtfn(dev, i);
++	pci_unlock_rescan_remove();
+ 
+ 	return rc;
+ }
+@@ -709,8 +712,10 @@ static void sriov_del_vfs(struct pci_dev *dev)
+ 	struct pci_sriov *iov = dev->sriov;
+ 	int i;
+ 
++	pci_lock_rescan_remove();
+ 	for (i = 0; i < iov->num_VFs; i++)
+ 		pci_iov_remove_virtfn(dev, i);
++	pci_unlock_rescan_remove();
+ }
+ 
+ static void sriov_disable(struct pci_dev *dev)
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 35270172c83318..0c3aa91d1aee0f 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -1600,6 +1600,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
+ 	switch (err_type) {
+ 	case PCI_ERS_RESULT_NONE:
+ 	case PCI_ERS_RESULT_CAN_RECOVER:
++	case PCI_ERS_RESULT_NEED_RESET:
+ 		envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
+ 		envp[idx++] = "DEVICE_ONLINE=0";
+ 		break;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 5af4a804a4f896..96f9cf9f8d643b 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -200,8 +200,14 @@ static ssize_t max_link_width_show(struct device *dev,
+ 				   struct device_attribute *attr, char *buf)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
++	ssize_t ret;
+ 
+-	return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
++	/* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
++	pci_config_pm_runtime_get(pdev);
++	ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
++	pci_config_pm_runtime_put(pdev);
++
++	return ret;
+ }
+ static DEVICE_ATTR_RO(max_link_width);
+ 
+@@ -213,7 +219,10 @@ static ssize_t current_link_speed_show(struct device *dev,
+ 	int err;
+ 	enum pci_bus_speed speed;
+ 
++	pci_config_pm_runtime_get(pci_dev);
+ 	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
++	pci_config_pm_runtime_put(pci_dev);
++
+ 	if (err)
+ 		return -EINVAL;
+ 
+@@ -230,7 +239,10 @@ static ssize_t current_link_width_show(struct device *dev,
+ 	u16 linkstat;
+ 	int err;
+ 
++	pci_config_pm_runtime_get(pci_dev);
+ 	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
++	pci_config_pm_runtime_put(pci_dev);
++
+ 	if (err)
+ 		return -EINVAL;
+ 
+@@ -246,7 +258,10 @@ static ssize_t secondary_bus_number_show(struct device *dev,
+ 	u8 sec_bus;
+ 	int err;
+ 
++	pci_config_pm_runtime_get(pci_dev);
+ 	err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
++	pci_config_pm_runtime_put(pci_dev);
++
+ 	if (err)
+ 		return -EINVAL;
+ 
+@@ -262,7 +277,10 @@ static ssize_t subordinate_bus_number_show(struct device *dev,
+ 	u8 sub_bus;
+ 	int err;
+ 
++	pci_config_pm_runtime_get(pci_dev);
+ 	err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
++	pci_config_pm_runtime_put(pci_dev);
++
+ 	if (err)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
+index 13b8586924ead1..e5cbea3a4968bd 100644
+--- a/drivers/pci/pcie/aer.c
++++ b/drivers/pci/pcie/aer.c
+@@ -38,7 +38,7 @@
+ #define AER_ERROR_SOURCES_MAX		128
+ 
+ #define AER_MAX_TYPEOF_COR_ERRS		16	/* as per PCI_ERR_COR_STATUS */
+-#define AER_MAX_TYPEOF_UNCOR_ERRS	27	/* as per PCI_ERR_UNCOR_STATUS*/
++#define AER_MAX_TYPEOF_UNCOR_ERRS	32	/* as per PCI_ERR_UNCOR_STATUS*/
+ 
+ struct aer_err_source {
+ 	u32 status;			/* PCI_ERR_ROOT_STATUS */
+@@ -510,11 +510,11 @@ static const char *aer_uncorrectable_error_string[] = {
+ 	"AtomicOpBlocked",		/* Bit Position 24	*/
+ 	"TLPBlockedErr",		/* Bit Position 25	*/
+ 	"PoisonTLPBlocked",		/* Bit Position 26	*/
+-	NULL,				/* Bit Position 27	*/
+-	NULL,				/* Bit Position 28	*/
+-	NULL,				/* Bit Position 29	*/
+-	NULL,				/* Bit Position 30	*/
+-	NULL,				/* Bit Position 31	*/
++	"DMWrReqBlocked",		/* Bit Position 27	*/
++	"IDECheck",			/* Bit Position 28	*/
++	"MisIDETLP",			/* Bit Position 29	*/
++	"PCRC_CHECK",			/* Bit Position 30	*/
++	"TLPXlatBlocked",		/* Bit Position 31	*/
+ };
+ 
+ static const char *aer_agent_string[] = {
+diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
+index 31090770fffcc9..628d192de90b27 100644
+--- a/drivers/pci/pcie/err.c
++++ b/drivers/pci/pcie/err.c
+@@ -108,6 +108,12 @@ static int report_normal_detected(struct pci_dev *dev, void *data)
+ 	return report_error_detected(dev, pci_channel_io_normal, data);
+ }
+ 
++static int report_perm_failure_detected(struct pci_dev *dev, void *data)
++{
++	pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
++	return 0;
++}
++
+ static int report_mmio_enabled(struct pci_dev *dev, void *data)
+ {
+ 	struct pci_driver *pdrv;
+@@ -269,7 +275,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ failed:
+ 	pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+ 
+-	pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
++	pci_walk_bridge(bridge, report_perm_failure_detected, NULL);
+ 
+ 	/* TODO: Should kernel panic here? */
+ 	pci_info(bridge, "device recovery failed\n");
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 978b239ec10bde..e6b0c8ec9c1fa1 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -65,7 +65,7 @@
+ /* PMU registers occupy the 3rd 4KB page of each node's region */
+ #define CMN_PMU_OFFSET			0x2000
+ /* ...except when they don't :( */
+-#define CMN_S3_DTM_OFFSET		0xa000
++#define CMN_S3_R1_DTM_OFFSET		0xa000
+ #define CMN_S3_PMU_OFFSET		0xd900
+ 
+ /* For most nodes, this is all there is */
+@@ -233,6 +233,9 @@ enum cmn_revision {
+ 	REV_CMN700_R1P0,
+ 	REV_CMN700_R2P0,
+ 	REV_CMN700_R3P0,
++	REV_CMNS3_R0P0 = 0,
++	REV_CMNS3_R0P1,
++	REV_CMNS3_R1P0,
+ 	REV_CI700_R0P0 = 0,
+ 	REV_CI700_R1P0,
+ 	REV_CI700_R2P0,
+@@ -425,8 +428,8 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
+ static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn)
+ {
+ 	if (cmn->part == PART_CMN_S3) {
+-		if (dn->type == CMN_TYPE_XP)
+-			return CMN_S3_DTM_OFFSET;
++		if (cmn->rev >= REV_CMNS3_R1P0 && dn->type == CMN_TYPE_XP)
++			return CMN_S3_R1_DTM_OFFSET;
+ 		return CMN_S3_PMU_OFFSET;
+ 	}
+ 	return CMN_PMU_OFFSET;
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
+index 7ffd2e193e4256..02b53ae9d9aaf3 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
+@@ -393,10 +393,6 @@ extern const struct samsung_pinctrl_of_match_data exynosautov920_of_data;
+ extern const struct samsung_pinctrl_of_match_data fsd_of_data;
+ extern const struct samsung_pinctrl_of_match_data gs101_of_data;
+ extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
+-extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
+-extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
+-extern const struct samsung_pinctrl_of_match_data s3c2440_of_data;
+-extern const struct samsung_pinctrl_of_match_data s3c2450_of_data;
+ extern const struct samsung_pinctrl_of_match_data s5pv210_of_data;
+ 
+ #endif /* __PINCTRL_SAMSUNG_H */
+diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
+index d7e520da768864..e2424efb266df8 100644
+--- a/drivers/power/supply/max77976_charger.c
++++ b/drivers/power/supply/max77976_charger.c
+@@ -292,10 +292,10 @@ static int max77976_get_property(struct power_supply *psy,
+ 	case POWER_SUPPLY_PROP_ONLINE:
+ 		err = max77976_get_online(chg, &val->intval);
+ 		break;
+-	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ 		val->intval = MAX77976_CHG_CC_MAX;
+ 		break;
+-	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ 		err = max77976_get_integer(chg, CHG_CC,
+ 					   MAX77976_CHG_CC_MIN,
+ 					   MAX77976_CHG_CC_MAX,
+@@ -330,7 +330,7 @@ static int max77976_set_property(struct power_supply *psy,
+ 	int err = 0;
+ 
+ 	switch (psp) {
+-	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ 		err = max77976_set_integer(chg, CHG_CC,
+ 					   MAX77976_CHG_CC_MIN,
+ 					   MAX77976_CHG_CC_MAX,
+@@ -355,7 +355,7 @@ static int max77976_property_is_writeable(struct power_supply *psy,
+ 					  enum power_supply_property psp)
+ {
+ 	switch (psp) {
+-	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ 		return true;
+ 	default:
+@@ -368,8 +368,8 @@ static enum power_supply_property max77976_psy_props[] = {
+ 	POWER_SUPPLY_PROP_CHARGE_TYPE,
+ 	POWER_SUPPLY_PROP_HEALTH,
+ 	POWER_SUPPLY_PROP_ONLINE,
+-	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+-	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
++	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
++	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ 	POWER_SUPPLY_PROP_MODEL_NAME,
+ 	POWER_SUPPLY_PROP_MANUFACTURER,
+diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
+index 831aed228cafcb..858d369913742c 100644
+--- a/drivers/pwm/pwm-berlin.c
++++ b/drivers/pwm/pwm-berlin.c
+@@ -234,7 +234,7 @@ static int berlin_pwm_suspend(struct device *dev)
+ 	for (i = 0; i < chip->npwm; i++) {
+ 		struct berlin_pwm_channel *channel = &bpc->channel[i];
+ 
+-		channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
++		channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_EN);
+ 		channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL);
+ 		channel->duty = berlin_pwm_readl(bpc, i, BERLIN_PWM_DUTY);
+ 		channel->tcnt = berlin_pwm_readl(bpc, i, BERLIN_PWM_TCNT);
+@@ -262,7 +262,7 @@ static int berlin_pwm_resume(struct device *dev)
+ 		berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
+ 		berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY);
+ 		berlin_pwm_writel(bpc, i, channel->tcnt, BERLIN_PWM_TCNT);
+-		berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_ENABLE);
++		berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_EN);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index aaf76406cd7d7d..39db12f267cc62 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -443,6 +443,29 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 	else
+ 		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+ 
++	/*
++	 * Check for potential race described above. If the waiting for next
++	 * second, and the second just ticked since the check above, either
++	 *
++	 * 1) It ticked after the alarm was set, and an alarm irq should be
++	 *    generated.
++	 *
++	 * 2) It ticked before the alarm was set, and alarm irq most likely will
++	 * not be generated.
++	 *
++	 * While we cannot easily check for which of these two scenarios we
++	 * are in, we can return -ETIME to signal that the timer has already
++	 * expired, which is true in both cases.
++	 */
++	if ((scheduled - now) <= 1) {
++		err = __rtc_read_time(rtc, &tm);
++		if (err)
++			return err;
++		now = rtc_tm_to_time64(&tm);
++		if (scheduled <= now)
++			return -ETIME;
++	}
++
+ 	trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err);
+ 	return err;
+ }
+@@ -594,6 +617,10 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
+ 		rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
+ 		rtc->uie_rtctimer.period = ktime_set(1, 0);
+ 		err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
++		if (!err && rtc->ops && rtc->ops->alarm_irq_enable)
++			err = rtc->ops->alarm_irq_enable(rtc->dev.parent, 1);
++		if (err)
++			goto out;
+ 	} else {
+ 		rtc_timer_remove(rtc, &rtc->uie_rtctimer);
+ 	}
+diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c
+index 9f8b5d4a8f6b65..6b77c122fdc109 100644
+--- a/drivers/rtc/rtc-optee.c
++++ b/drivers/rtc/rtc-optee.c
+@@ -320,6 +320,7 @@ static int optee_rtc_remove(struct device *dev)
+ {
+ 	struct optee_rtc *priv = dev_get_drvdata(dev);
+ 
++	tee_shm_free(priv->shm);
+ 	tee_client_close_session(priv->ctx, priv->session_id);
+ 	tee_client_close_context(priv->ctx);
+ 
+diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
+index 4bcd7ca32f27bf..b8a0fccef14e03 100644
+--- a/drivers/rtc/rtc-x1205.c
++++ b/drivers/rtc/rtc-x1205.c
+@@ -669,7 +669,7 @@ static const struct i2c_device_id x1205_id[] = {
+ MODULE_DEVICE_TABLE(i2c, x1205_id);
+ 
+ static const struct of_device_id x1205_dt_ids[] = {
+-	{ .compatible = "xircom,x1205", },
++	{ .compatible = "xicor,x1205", },
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, x1205_dt_ids);
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 42a4a996defbe1..93f346d313e982 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -332,6 +332,11 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
+ 	lim.max_dev_sectors = device->discipline->max_sectors(block);
+ 	lim.max_hw_sectors = lim.max_dev_sectors;
+ 	lim.logical_block_size = block->bp_block;
++	/*
++	 * Adjust dma_alignment to match block_size - 1
++	 * to ensure proper buffer alignment checks in the block layer.
++	 */
++	lim.dma_alignment = lim.logical_block_size - 1;
+ 
+ 	if (device->discipline->has_discard) {
+ 		unsigned int max_bytes;
+@@ -3112,12 +3117,14 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
+ 		    PTR_ERR(cqr) == -ENOMEM ||
+ 		    PTR_ERR(cqr) == -EAGAIN) {
+ 			rc = BLK_STS_RESOURCE;
+-			goto out;
++		} else if (PTR_ERR(cqr) == -EINVAL) {
++			rc = BLK_STS_INVAL;
++		} else {
++			DBF_DEV_EVENT(DBF_ERR, basedev,
++				      "CCW creation failed (rc=%ld) on request %p",
++				      PTR_ERR(cqr), req);
++			rc = BLK_STS_IOERR;
+ 		}
+-		DBF_DEV_EVENT(DBF_ERR, basedev,
+-			      "CCW creation failed (rc=%ld) on request %p",
+-			      PTR_ERR(cqr), req);
+-		rc = BLK_STS_IOERR;
+ 		goto out;
+ 	}
+ 	/*
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index 9498825d9c7a5c..858a9e171351b7 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -1318,23 +1318,34 @@ void ccw_device_schedule_recovery(void)
+ 	spin_unlock_irqrestore(&recovery_lock, flags);
+ }
+ 
+-static int purge_fn(struct device *dev, void *data)
++static int purge_fn(struct subchannel *sch, void *data)
+ {
+-	struct ccw_device *cdev = to_ccwdev(dev);
+-	struct ccw_dev_id *id = &cdev->private->dev_id;
+-	struct subchannel *sch = to_subchannel(cdev->dev.parent);
++	struct ccw_device *cdev;
+ 
+-	spin_lock_irq(cdev->ccwlock);
+-	if (is_blacklisted(id->ssid, id->devno) &&
+-	    (cdev->private->state == DEV_STATE_OFFLINE) &&
+-	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
+-		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+-			      id->devno);
++	spin_lock_irq(&sch->lock);
++	if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv)
++		goto unlock;
++
++	if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev))
++		goto unlock;
++
++	cdev = sch_get_cdev(sch);
++	if (cdev) {
++		if (cdev->private->state != DEV_STATE_OFFLINE)
++			goto unlock;
++
++		if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
++			goto unlock;
+ 		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+-		css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ 		atomic_set(&cdev->private->onoff, 0);
+ 	}
+-	spin_unlock_irq(cdev->ccwlock);
++
++	css_sched_sch_todo(sch, SCH_TODO_UNREG);
++	CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid,
++		      sch->schib.pmcw.dev, cdev ? "" : " (no cdev)");
++
++unlock:
++	spin_unlock_irq(&sch->lock);
+ 	/* Abort loop in case of pending signal. */
+ 	if (signal_pending(current))
+ 		return -EINTR;
+@@ -1350,7 +1361,7 @@ static int purge_fn(struct device *dev, void *data)
+ int ccw_purge_blacklisted(void)
+ {
+ 	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
+-	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
++	for_each_subchannel_staged(purge_fn, NULL, NULL);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 0c49414c1f350b..6cb6586f4790bc 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -6528,18 +6528,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
+ 	while (left) {
+ 		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
+ 		buff_size[sg_used] = sz;
+-		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+-		if (buff[sg_used] == NULL) {
+-			status = -ENOMEM;
+-			goto cleanup1;
+-		}
++
+ 		if (ioc->Request.Type.Direction & XFER_WRITE) {
+-			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
+-				status = -EFAULT;
++			buff[sg_used] = memdup_user(data_ptr, sz);
++			if (IS_ERR(buff[sg_used])) {
++				status = PTR_ERR(buff[sg_used]);
+ 				goto cleanup1;
+ 			}
+-		} else
+-			memset(buff[sg_used], 0, sz);
++		} else {
++			buff[sg_used] = kzalloc(sz, GFP_KERNEL);
++			if (!buff[sg_used]) {
++				status = -ENOMEM;
++				goto cleanup1;
++			}
++		}
++
+ 		left -= sz;
+ 		data_ptr += sz;
+ 		sg_used++;
+diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
+index 020037cbf0d91d..655cca5fe7ccbb 100644
+--- a/drivers/scsi/mvsas/mv_init.c
++++ b/drivers/scsi/mvsas/mv_init.c
+@@ -124,7 +124,7 @@ static void mvs_free(struct mvs_info *mvi)
+ 	if (mvi->shost)
+ 		scsi_host_put(mvi->shost);
+ 	list_for_each_entry(mwq, &mvi->wq_list, entry)
+-		cancel_delayed_work(&mwq->work_q);
++		cancel_delayed_work_sync(&mwq->work_q);
+ 	kfree(mvi->rsvd_tags);
+ 	kfree(mvi);
+ }
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index ee1d5dec3bc605..3745cf8569171b 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3695,10 +3695,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	struct scsi_disk *sdkp = scsi_disk(disk);
+ 	struct scsi_device *sdp = sdkp->device;
+ 	sector_t old_capacity = sdkp->capacity;
+-	struct queue_limits lim;
+-	unsigned char *buffer;
++	struct queue_limits *lim = NULL;
++	unsigned char *buffer = NULL;
+ 	unsigned int dev_max;
+-	int err;
++	int err = 0;
+ 
+ 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
+ 				      "sd_revalidate_disk\n"));
+@@ -3710,6 +3710,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	if (!scsi_device_online(sdp))
+ 		goto out;
+ 
++	lim = kmalloc(sizeof(*lim), GFP_KERNEL);
++	if (!lim)
++		goto out;
++
+ 	buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
+ 	if (!buffer) {
+ 		sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
+@@ -3719,14 +3723,14 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 
+ 	sd_spinup_disk(sdkp);
+ 
+-	lim = queue_limits_start_update(sdkp->disk->queue);
++	*lim = queue_limits_start_update(sdkp->disk->queue);
+ 
+ 	/*
+ 	 * Without media there is no reason to ask; moreover, some devices
+ 	 * react badly if we do.
+ 	 */
+ 	if (sdkp->media_present) {
+-		sd_read_capacity(sdkp, &lim, buffer);
++		sd_read_capacity(sdkp, lim, buffer);
+ 		/*
+ 		 * Some USB/UAS devices return generic values for mode pages
+ 		 * until the media has been accessed. Trigger a READ operation
+@@ -3740,17 +3744,17 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 		 * cause this to be updated correctly and any device which
+ 		 * doesn't support it should be treated as rotational.
+ 		 */
+-		lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
++		lim->features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
+ 
+ 		if (scsi_device_supports_vpd(sdp)) {
+ 			sd_read_block_provisioning(sdkp);
+-			sd_read_block_limits(sdkp, &lim);
++			sd_read_block_limits(sdkp, lim);
+ 			sd_read_block_limits_ext(sdkp);
+-			sd_read_block_characteristics(sdkp, &lim);
+-			sd_zbc_read_zones(sdkp, &lim, buffer);
++			sd_read_block_characteristics(sdkp, lim);
++			sd_zbc_read_zones(sdkp, lim, buffer);
+ 		}
+ 
+-		sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp));
++		sd_config_discard(sdkp, lim, sd_discard_mode(sdkp));
+ 
+ 		sd_print_capacity(sdkp, old_capacity);
+ 
+@@ -3760,47 +3764,46 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 		sd_read_app_tag_own(sdkp, buffer);
+ 		sd_read_write_same(sdkp, buffer);
+ 		sd_read_security(sdkp, buffer);
+-		sd_config_protection(sdkp, &lim);
++		sd_config_protection(sdkp, lim);
+ 	}
+ 
+ 	/*
+ 	 * We now have all cache related info, determine how we deal
+ 	 * with flush requests.
+ 	 */
+-	sd_set_flush_flag(sdkp, &lim);
++	sd_set_flush_flag(sdkp, lim);
+ 
+ 	/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
+ 	dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
+ 
+ 	/* Some devices report a maximum block count for READ/WRITE requests. */
+ 	dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
+-	lim.max_dev_sectors = logical_to_sectors(sdp, dev_max);
++	lim->max_dev_sectors = logical_to_sectors(sdp, dev_max);
+ 
+ 	if (sd_validate_min_xfer_size(sdkp))
+-		lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
++		lim->io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
+ 	else
+-		lim.io_min = 0;
++		lim->io_min = 0;
+ 
+ 	/*
+ 	 * Limit default to SCSI host optimal sector limit if set. There may be
+ 	 * an impact on performance for when the size of a request exceeds this
+ 	 * host limit.
+ 	 */
+-	lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
++	lim->io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
+ 	if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
+-		lim.io_opt = min_not_zero(lim.io_opt,
++		lim->io_opt = min_not_zero(lim->io_opt,
+ 				logical_to_bytes(sdp, sdkp->opt_xfer_blocks));
+ 	}
+ 
+ 	sdkp->first_scan = 0;
+ 
+ 	set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
+-	sd_config_write_same(sdkp, &lim);
+-	kfree(buffer);
++	sd_config_write_same(sdkp, lim);
+ 
+-	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
++	err = queue_limits_commit_update_frozen(sdkp->disk->queue, lim);
+ 	if (err)
+-		return err;
++		goto out;
+ 
+ 	/*
+ 	 * Query concurrent positioning ranges after
+@@ -3819,7 +3822,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 		set_capacity_and_notify(disk, 0);
+ 
+  out:
+-	return 0;
++	kfree(buffer);
++	kfree(lim);
++
++	return err;
+ }
+ 
+ /**
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 56be0b6901a879..06e43b184d85de 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -712,6 +712,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
+ 	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ 	reg |= (op->addr.nbytes - 1);
+ 	writel(reg, reg_base + CQSPI_REG_SIZE);
++	readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
+ 	return 0;
+ }
+ 
+@@ -754,6 +755,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
+ 	reinit_completion(&cqspi->transfer_complete);
+ 	writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ 	       reg_base + CQSPI_REG_INDIRECTRD);
++	readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */
+ 
+ 	while (remaining > 0) {
+ 		if (use_irq &&
+@@ -1033,6 +1035,7 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
+ 	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ 	reg |= (op->addr.nbytes - 1);
+ 	writel(reg, reg_base + CQSPI_REG_SIZE);
++	readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
+ 	return 0;
+ }
+ 
+@@ -1058,6 +1061,8 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
+ 	reinit_completion(&cqspi->transfer_complete);
+ 	writel(CQSPI_REG_INDIRECTWR_START_MASK,
+ 	       reg_base + CQSPI_REG_INDIRECTWR);
++	readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */
++
+ 	/*
+ 	 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+ 	 * Controller programming sequence, couple of cycles of
+@@ -1668,12 +1673,10 @@ static const struct spi_controller_mem_caps cqspi_mem_caps = {
+ 
+ static int cqspi_setup_flash(struct cqspi_st *cqspi)
+ {
+-	unsigned int max_cs = cqspi->num_chipselect - 1;
+ 	struct platform_device *pdev = cqspi->pdev;
+ 	struct device *dev = &pdev->dev;
+ 	struct cqspi_flash_pdata *f_pdata;
+-	unsigned int cs;
+-	int ret;
++	int ret, cs, max_cs = -1;
+ 
+ 	/* Get flash device data */
+ 	for_each_available_child_of_node_scoped(dev->of_node, np) {
+@@ -1686,10 +1689,10 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
+ 		if (cs >= cqspi->num_chipselect) {
+ 			dev_err(dev, "Chip select %d out of range.\n", cs);
+ 			return -EINVAL;
+-		} else if (cs < max_cs) {
+-			max_cs = cs;
+ 		}
+ 
++		max_cs = max_t(int, cs, max_cs);
++
+ 		f_pdata = &cqspi->f_pdata[cs];
+ 		f_pdata->cqspi = cqspi;
+ 		f_pdata->cs = cs;
+@@ -1699,6 +1702,11 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
+ 			return ret;
+ 	}
+ 
++	if (max_cs < 0) {
++		dev_err(dev, "No flash device declared\n");
++		return -ENODEV;
++	}
++
+ 	cqspi->num_chipselect = max_cs + 1;
+ 	return 0;
+ }
+diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c
+index 4d1634c492ec4d..594b60424d1c64 100644
+--- a/drivers/video/fbdev/core/fb_cmdline.c
++++ b/drivers/video/fbdev/core/fb_cmdline.c
+@@ -40,7 +40,7 @@ int fb_get_options(const char *name, char **option)
+ 	bool enabled;
+ 
+ 	if (name)
+-		is_of = strncmp(name, "offb", 4);
++		is_of = !strncmp(name, "offb", 4);
+ 
+ 	enabled = __video_get_options(name, &options, is_of);
+ 
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 81effbd53dc52b..ecd3ddda6ccbea 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1320,14 +1320,17 @@ int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
+ }
+ EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
+ 
+-static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
++static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn,
++		     bool percpu)
+ {
+ 	struct evtchn_status status;
+ 	evtchn_port_t port;
+-	int rc = -ENOENT;
++	bool exists = false;
+ 
+ 	memset(&status, 0, sizeof(status));
+ 	for (port = 0; port < xen_evtchn_max_channels(); port++) {
++		int rc;
++
+ 		status.dom = DOMID_SELF;
+ 		status.port = port;
+ 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
+@@ -1335,12 +1338,16 @@ static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
+ 			continue;
+ 		if (status.status != EVTCHNSTAT_virq)
+ 			continue;
+-		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
++		if (status.u.virq != virq)
++			continue;
++		if (status.vcpu == xen_vcpu_nr(cpu)) {
+ 			*evtchn = port;
+-			break;
++			return 0;
++		} else if (!percpu) {
++			exists = true;
+ 		}
+ 	}
+-	return rc;
++	return exists ? -EEXIST : -ENOENT;
+ }
+ 
+ /**
+@@ -1387,8 +1394,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ 			evtchn = bind_virq.port;
+ 		else {
+ 			if (ret == -EEXIST)
+-				ret = find_virq(virq, cpu, &evtchn);
+-			BUG_ON(ret < 0);
++				ret = find_virq(virq, cpu, &evtchn, percpu);
++			if (ret) {
++				__unbind_from_irq(info, info->irq);
++				goto out;
++			}
+ 		}
+ 
+ 		ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
+@@ -1793,9 +1803,20 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
+ 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
+ 	 * it, but don't do the xenlinux-level rebind in that case.
+ 	 */
+-	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
++	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) {
++		int old_cpu = info->cpu;
++
+ 		bind_evtchn_to_cpu(info, tcpu, false);
+ 
++		if (info->type == IRQT_VIRQ) {
++			int virq = info->u.virq;
++			int irq = per_cpu(virq_to_irq, old_cpu)[virq];
++
++			per_cpu(virq_to_irq, old_cpu)[virq] = -1;
++			per_cpu(virq_to_irq, tcpu)[virq] = irq;
++		}
++	}
++
+ 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+ 
+ 	return 0;
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index b4b4ebed68daf5..8f543c8163478d 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -116,7 +116,7 @@ static void do_suspend(void)
+ 	err = dpm_suspend_start(PMSG_FREEZE);
+ 	if (err) {
+ 		pr_err("%s: dpm_suspend_start %d\n", __func__, err);
+-		goto out_thaw;
++		goto out_resume_end;
+ 	}
+ 
+ 	printk(KERN_DEBUG "suspending xenstore...\n");
+@@ -156,6 +156,7 @@ static void do_suspend(void)
+ 	else
+ 		xs_suspend_cancel();
+ 
++out_resume_end:
+ 	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
+ 
+ out_thaw:
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index e2b22bea348a35..c403117ac01361 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -23,7 +23,11 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ 	int type;
+ 
+ 	if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
+-		*max_len = BTRFS_FID_SIZE_CONNECTABLE;
++		if (btrfs_root_id(BTRFS_I(inode)->root) !=
++		    btrfs_root_id(BTRFS_I(parent)->root))
++			*max_len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
++		else
++			*max_len = BTRFS_FID_SIZE_CONNECTABLE;
+ 		return FILEID_INVALID;
+ 	} else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
+ 		*max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
+@@ -45,6 +49,8 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ 		parent_root_id = btrfs_root_id(BTRFS_I(parent)->root);
+ 
+ 		if (parent_root_id != fid->root_objectid) {
++			if (*max_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
++				return FILEID_INVALID;
+ 			fid->parent_root_objectid = parent_root_id;
+ 			len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
+ 			type = FILEID_BTRFS_WITH_PARENT_ROOT;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 60fe155b1ce05d..b310a07a84adfa 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -355,6 +355,13 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
+ 	/* step one, find a bunch of delalloc bytes starting at start */
+ 	delalloc_start = *start;
+ 	delalloc_end = 0;
++
++	/*
++	 * If @max_bytes is smaller than a block, btrfs_find_delalloc_range() can
++	 * return early without handling any dirty ranges.
++	 */
++	ASSERT(max_bytes >= fs_info->sectorsize);
++
+ 	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
+ 					  max_bytes, &cached_state);
+ 	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
+@@ -385,13 +392,14 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
+ 				   delalloc_end);
+ 	ASSERT(!ret || ret == -EAGAIN);
+ 	if (ret == -EAGAIN) {
+-		/* some of the folios are gone, lets avoid looping by
+-		 * shortening the size of the delalloc range we're searching
++		/*
++		 * Some of the folios are gone, lets avoid looping by
++		 * shortening the size of the delalloc range we're searching.
+ 		 */
+ 		free_extent_state(cached_state);
+ 		cached_state = NULL;
+ 		if (!loops) {
+-			max_bytes = PAGE_SIZE;
++			max_bytes = fs_info->sectorsize;
+ 			loops = 1;
+ 			goto again;
+ 		} else {
+diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
+index b84d1747a0205a..e7d192f7ab3b4c 100644
+--- a/fs/cramfs/inode.c
++++ b/fs/cramfs/inode.c
+@@ -117,9 +117,18 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
+ 		inode_nohighmem(inode);
+ 		inode->i_data.a_ops = &cramfs_aops;
+ 		break;
+-	default:
++	case S_IFCHR:
++	case S_IFBLK:
++	case S_IFIFO:
++	case S_IFSOCK:
+ 		init_special_inode(inode, cramfs_inode->mode,
+ 				old_decode_dev(cramfs_inode->size));
++		break;
++	default:
++		printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n",
++		       inode->i_mode, inode->i_ino);
++		iget_failed(inode);
++		return ERR_PTR(-EIO);
+ 	}
+ 
+ 	inode->i_mode = cramfs_inode->mode;
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 075fee4ba29bcd..8711fac20804c7 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -46,10 +46,10 @@
+  *
+  * 1) epnested_mutex (mutex)
+  * 2) ep->mtx (mutex)
+- * 3) ep->lock (rwlock)
++ * 3) ep->lock (spinlock)
+  *
+  * The acquire order is the one listed above, from 1 to 3.
+- * We need a rwlock (ep->lock) because we manipulate objects
++ * We need a spinlock (ep->lock) because we manipulate objects
+  * from inside the poll callback, that might be triggered from
+  * a wake_up() that in turn might be called from IRQ context.
+  * So we can't sleep inside the poll callback and hence we need
+@@ -195,7 +195,7 @@ struct eventpoll {
+ 	struct list_head rdllist;
+ 
+ 	/* Lock which protects rdllist and ovflist */
+-	rwlock_t lock;
++	spinlock_t lock;
+ 
+ 	/* RB tree root used to store monitored fd structs */
+ 	struct rb_root_cached rbr;
+@@ -713,10 +713,10 @@ static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
+ 	 * in a lockless way.
+ 	 */
+ 	lockdep_assert_irqs_enabled();
+-	write_lock_irq(&ep->lock);
++	spin_lock_irq(&ep->lock);
+ 	list_splice_init(&ep->rdllist, txlist);
+ 	WRITE_ONCE(ep->ovflist, NULL);
+-	write_unlock_irq(&ep->lock);
++	spin_unlock_irq(&ep->lock);
+ }
+ 
+ static void ep_done_scan(struct eventpoll *ep,
+@@ -724,7 +724,7 @@ static void ep_done_scan(struct eventpoll *ep,
+ {
+ 	struct epitem *epi, *nepi;
+ 
+-	write_lock_irq(&ep->lock);
++	spin_lock_irq(&ep->lock);
+ 	/*
+ 	 * During the time we spent inside the "sproc" callback, some
+ 	 * other events might have been queued by the poll callback.
+@@ -765,7 +765,7 @@ static void ep_done_scan(struct eventpoll *ep,
+ 			wake_up(&ep->wq);
+ 	}
+ 
+-	write_unlock_irq(&ep->lock);
++	spin_unlock_irq(&ep->lock);
+ }
+ 
+ static void ep_get(struct eventpoll *ep)
+@@ -839,10 +839,10 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
+ 
+ 	rb_erase_cached(&epi->rbn, &ep->rbr);
+ 
+-	write_lock_irq(&ep->lock);
++	spin_lock_irq(&ep->lock);
+ 	if (ep_is_linked(epi))
+ 		list_del_init(&epi->rdllink);
+-	write_unlock_irq(&ep->lock);
++	spin_unlock_irq(&ep->lock);
+ 
+ 	wakeup_source_unregister(ep_wakeup_source(epi));
+ 	/*
+@@ -1123,7 +1123,7 @@ static int ep_alloc(struct eventpoll **pep)
+ 		return -ENOMEM;
+ 
+ 	mutex_init(&ep->mtx);
+-	rwlock_init(&ep->lock);
++	spin_lock_init(&ep->lock);
+ 	init_waitqueue_head(&ep->wq);
+ 	init_waitqueue_head(&ep->poll_wait);
+ 	INIT_LIST_HEAD(&ep->rdllist);
+@@ -1210,100 +1210,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
+ }
+ #endif /* CONFIG_KCMP */
+ 
+-/*
+- * Adds a new entry to the tail of the list in a lockless way, i.e.
+- * multiple CPUs are allowed to call this function concurrently.
+- *
+- * Beware: it is necessary to prevent any other modifications of the
+- *         existing list until all changes are completed, in other words
+- *         concurrent list_add_tail_lockless() calls should be protected
+- *         with a read lock, where write lock acts as a barrier which
+- *         makes sure all list_add_tail_lockless() calls are fully
+- *         completed.
+- *
+- *        Also an element can be locklessly added to the list only in one
+- *        direction i.e. either to the tail or to the head, otherwise
+- *        concurrent access will corrupt the list.
+- *
+- * Return: %false if element has been already added to the list, %true
+- * otherwise.
+- */
+-static inline bool list_add_tail_lockless(struct list_head *new,
+-					  struct list_head *head)
+-{
+-	struct list_head *prev;
+-
+-	/*
+-	 * This is simple 'new->next = head' operation, but cmpxchg()
+-	 * is used in order to detect that same element has been just
+-	 * added to the list from another CPU: the winner observes
+-	 * new->next == new.
+-	 */
+-	if (!try_cmpxchg(&new->next, &new, head))
+-		return false;
+-
+-	/*
+-	 * Initially ->next of a new element must be updated with the head
+-	 * (we are inserting to the tail) and only then pointers are atomically
+-	 * exchanged.  XCHG guarantees memory ordering, thus ->next should be
+-	 * updated before pointers are actually swapped and pointers are
+-	 * swapped before prev->next is updated.
+-	 */
+-
+-	prev = xchg(&head->prev, new);
+-
+-	/*
+-	 * It is safe to modify prev->next and new->prev, because a new element
+-	 * is added only to the tail and new->next is updated before XCHG.
+-	 */
+-
+-	prev->next = new;
+-	new->prev = prev;
+-
+-	return true;
+-}
+-
+-/*
+- * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
+- * i.e. multiple CPUs are allowed to call this function concurrently.
+- *
+- * Return: %false if epi element has been already chained, %true otherwise.
+- */
+-static inline bool chain_epi_lockless(struct epitem *epi)
+-{
+-	struct eventpoll *ep = epi->ep;
+-
+-	/* Fast preliminary check */
+-	if (epi->next != EP_UNACTIVE_PTR)
+-		return false;
+-
+-	/* Check that the same epi has not been just chained from another CPU */
+-	if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
+-		return false;
+-
+-	/* Atomically exchange tail */
+-	epi->next = xchg(&ep->ovflist, epi);
+-
+-	return true;
+-}
+-
+ /*
+  * This is the callback that is passed to the wait queue wakeup
+  * mechanism. It is called by the stored file descriptors when they
+  * have events to report.
+- *
+- * This callback takes a read lock in order not to contend with concurrent
+- * events from another file descriptor, thus all modifications to ->rdllist
+- * or ->ovflist are lockless.  Read lock is paired with the write lock from
+- * ep_start/done_scan(), which stops all list modifications and guarantees
+- * that lists state is seen correctly.
+- *
+- * Another thing worth to mention is that ep_poll_callback() can be called
+- * concurrently for the same @epi from different CPUs if poll table was inited
+- * with several wait queues entries.  Plural wakeup from different CPUs of a
+- * single wait queue is serialized by wq.lock, but the case when multiple wait
+- * queues are used should be detected accordingly.  This is detected using
+- * cmpxchg() operation.
+  */
+ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
+ {
+@@ -1314,7 +1224,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
+ 	unsigned long flags;
+ 	int ewake = 0;
+ 
+-	read_lock_irqsave(&ep->lock, flags);
++	spin_lock_irqsave(&ep->lock, flags);
+ 
+ 	ep_set_busy_poll_napi_id(epi);
+ 
+@@ -1343,12 +1253,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
+ 	 * chained in ep->ovflist and requeued later on.
+ 	 */
+ 	if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
+-		if (chain_epi_lockless(epi))
++		if (epi->next == EP_UNACTIVE_PTR) {
++			epi->next = READ_ONCE(ep->ovflist);
++			WRITE_ONCE(ep->ovflist, epi);
+ 			ep_pm_stay_awake_rcu(epi);
++		}
+ 	} else if (!ep_is_linked(epi)) {
+ 		/* In the usual case, add event to ready list. */
+-		if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
+-			ep_pm_stay_awake_rcu(epi);
++		list_add_tail(&epi->rdllink, &ep->rdllist);
++		ep_pm_stay_awake_rcu(epi);
+ 	}
+ 
+ 	/*
+@@ -1381,7 +1294,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
+ 		pwake++;
+ 
+ out_unlock:
+-	read_unlock_irqrestore(&ep->lock, flags);
++	spin_unlock_irqrestore(&ep->lock, flags);
+ 
+ 	/* We have to call this outside the lock */
+ 	if (pwake)
+@@ -1716,7 +1629,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
+ 	}
+ 
+ 	/* We have to drop the new item inside our item list to keep track of it */
+-	write_lock_irq(&ep->lock);
++	spin_lock_irq(&ep->lock);
+ 
+ 	/* record NAPI ID of new item if present */
+ 	ep_set_busy_poll_napi_id(epi);
+@@ -1733,7 +1646,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
+ 			pwake++;
+ 	}
+ 
+-	write_unlock_irq(&ep->lock);
++	spin_unlock_irq(&ep->lock);
+ 
+ 	/* We have to call this outside the lock */
+ 	if (pwake)
+@@ -1797,7 +1710,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
+ 	 * list, push it inside.
+ 	 */
+ 	if (ep_item_poll(epi, &pt, 1)) {
+-		write_lock_irq(&ep->lock);
++		spin_lock_irq(&ep->lock);
+ 		if (!ep_is_linked(epi)) {
+ 			list_add_tail(&epi->rdllink, &ep->rdllist);
+ 			ep_pm_stay_awake(epi);
+@@ -1808,7 +1721,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
+ 			if (waitqueue_active(&ep->poll_wait))
+ 				pwake++;
+ 		}
+-		write_unlock_irq(&ep->lock);
++		spin_unlock_irq(&ep->lock);
+ 	}
+ 
+ 	/* We have to call this outside the lock */
+@@ -2041,7 +1954,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ 		init_wait(&wait);
+ 		wait.func = ep_autoremove_wake_function;
+ 
+-		write_lock_irq(&ep->lock);
++		spin_lock_irq(&ep->lock);
+ 		/*
+ 		 * Barrierless variant, waitqueue_active() is called under
+ 		 * the same lock on wakeup ep_poll_callback() side, so it
+@@ -2060,7 +1973,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ 		if (!eavail)
+ 			__add_wait_queue_exclusive(&ep->wq, &wait);
+ 
+-		write_unlock_irq(&ep->lock);
++		spin_unlock_irq(&ep->lock);
+ 
+ 		if (!eavail)
+ 			timed_out = !schedule_hrtimeout_range(to, slack,
+@@ -2075,7 +1988,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ 		eavail = 1;
+ 
+ 		if (!list_empty_careful(&wait.entry)) {
+-			write_lock_irq(&ep->lock);
++			spin_lock_irq(&ep->lock);
+ 			/*
+ 			 * If the thread timed out and is not on the wait queue,
+ 			 * it means that the thread was woken up after its
+@@ -2086,7 +1999,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ 			if (timed_out)
+ 				eavail = list_empty(&wait.entry);
+ 			__remove_wait_queue(&ep->wq, &wait);
+-			write_unlock_irq(&ep->lock);
++			spin_unlock_irq(&ep->lock);
+ 		}
+ 	}
+ }
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 37e18e654f715b..d8a059ec1ad622 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3107,6 +3107,8 @@ extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
+ 					 sector_t block, blk_opf_t op_flags);
+ extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
+ 						   sector_t block);
++extern struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
++						sector_t block);
+ extern void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
+ 				bh_end_io_t *end_io, bool simu_fail);
+ extern int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index 91185c40f755a5..22fc333244ef73 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -74,7 +74,8 @@ static int ext4_getfsmap_dev_compare(const void *p1, const void *p2)
+ static bool ext4_getfsmap_rec_before_low_key(struct ext4_getfsmap_info *info,
+ 					     struct ext4_fsmap *rec)
+ {
+-	return rec->fmr_physical < info->gfi_low.fmr_physical;
++	return rec->fmr_physical + rec->fmr_length <=
++	       info->gfi_low.fmr_physical;
+ }
+ 
+ /*
+@@ -200,15 +201,18 @@ static int ext4_getfsmap_meta_helper(struct super_block *sb,
+ 			  ext4_group_first_block_no(sb, agno));
+ 	fs_end = fs_start + EXT4_C2B(sbi, len);
+ 
+-	/* Return relevant extents from the meta_list */
++	/*
++	 * Return relevant extents from the meta_list. We emit all extents that
++	 * partially/fully overlap with the query range
++	 */
+ 	list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) {
+-		if (p->fmr_physical < info->gfi_next_fsblk) {
++		if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) {
+ 			list_del(&p->fmr_list);
+ 			kfree(p);
+ 			continue;
+ 		}
+-		if (p->fmr_physical <= fs_start ||
+-		    p->fmr_physical + p->fmr_length <= fs_end) {
++		if (p->fmr_physical <= fs_end &&
++		    p->fmr_physical + p->fmr_length > fs_start) {
+ 			/* Emit the retained free extent record if present */
+ 			if (info->gfi_lastfree.fmr_owner) {
+ 				error = ext4_getfsmap_helper(sb, info,
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index d45124318200d8..da76353b3a5750 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -1025,7 +1025,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
+ 			}
+ 
+ 			/* Go read the buffer for the next level down */
+-			bh = ext4_sb_bread(inode->i_sb, nr, 0);
++			bh = ext4_sb_bread_nofail(inode->i_sb, nr);
+ 
+ 			/*
+ 			 * A read failure? Report error and clear slot
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 558a585c5df513..6af57242559658 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3859,7 +3859,11 @@ int ext4_can_truncate(struct inode *inode)
+  * We have to make sure i_disksize gets properly updated before we truncate
+  * page cache due to hole punching or zero range. Otherwise i_disksize update
+  * can get lost as it may have been postponed to submission of writeback but
+- * that will never happen after we truncate page cache.
++ * that will never happen if we remove the folio containing i_size from the
++ * page cache. Also if we punch hole within i_size but above i_disksize,
++ * following ext4_page_mkwrite() may mistakenly allocate written blocks over
++ * the hole and thus introduce allocated blocks beyond i_disksize which is
++ * not allowed (e2fsck would complain in case of crash).
+  */
+ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
+ 				      loff_t len)
+@@ -3870,9 +3874,11 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
+ 	loff_t size = i_size_read(inode);
+ 
+ 	WARN_ON(!inode_is_locked(inode));
+-	if (offset > size || offset + len < size)
++	if (offset > size)
+ 		return 0;
+ 
++	if (offset + len < size)
++		size = offset + len;
+ 	if (EXT4_I(inode)->i_disksize >= size)
+ 		return 0;
+ 
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 898443e98efc9e..a4c94eabc78ec6 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -225,7 +225,7 @@ static int mext_page_mkuptodate(struct folio *folio, size_t from, size_t to)
+ 	do {
+ 		if (bh_offset(bh) + blocksize <= from)
+ 			continue;
+-		if (bh_offset(bh) > to)
++		if (bh_offset(bh) >= to)
+ 			break;
+ 		wait_on_buffer(bh);
+ 		if (buffer_uptodate(bh))
+diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
+index c53918768cb256..05997b4d012038 100644
+--- a/fs/ext4/orphan.c
++++ b/fs/ext4/orphan.c
+@@ -513,7 +513,7 @@ void ext4_release_orphan_info(struct super_block *sb)
+ 		return;
+ 	for (i = 0; i < oi->of_blocks; i++)
+ 		brelse(oi->of_binfo[i].ob_bh);
+-	kfree(oi->of_binfo);
++	kvfree(oi->of_binfo);
+ }
+ 
+ static struct ext4_orphan_block_tail *ext4_orphan_block_tail(
+@@ -584,9 +584,20 @@ int ext4_init_orphan_info(struct super_block *sb)
+ 		ext4_msg(sb, KERN_ERR, "get orphan inode failed");
+ 		return PTR_ERR(inode);
+ 	}
++	/*
++	 * This is just an artificial limit to prevent corrupted fs from
++	 * consuming absurd amounts of memory when pinning blocks of orphan
++	 * file in memory.
++	 */
++	if (inode->i_size > 8 << 20) {
++		ext4_msg(sb, KERN_ERR, "orphan file too big: %llu",
++			 (unsigned long long)inode->i_size);
++		ret = -EFSCORRUPTED;
++		goto out_put;
++	}
+ 	oi->of_blocks = inode->i_size >> sb->s_blocksize_bits;
+ 	oi->of_csum_seed = EXT4_I(inode)->i_csum_seed;
+-	oi->of_binfo = kmalloc_array(oi->of_blocks,
++	oi->of_binfo = kvmalloc_array(oi->of_blocks,
+ 				     sizeof(struct ext4_orphan_block),
+ 				     GFP_KERNEL);
+ 	if (!oi->of_binfo) {
+@@ -627,7 +638,7 @@ int ext4_init_orphan_info(struct super_block *sb)
+ out_free:
+ 	for (i--; i >= 0; i--)
+ 		brelse(oi->of_binfo[i].ob_bh);
+-	kfree(oi->of_binfo);
++	kvfree(oi->of_binfo);
+ out_put:
+ 	iput(inode);
+ 	return ret;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index cbb65e61c4926f..78ecdedadb076a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -266,6 +266,15 @@ struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
+ 	return __ext4_sb_bread_gfp(sb, block, 0, gfp);
+ }
+ 
++struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
++					 sector_t block)
++{
++	gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
++			~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL;
++
++	return __ext4_sb_bread_gfp(sb, block, 0, gfp);
++}
++
+ void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
+ {
+ 	struct buffer_head *bh = bdev_getblk(sb->s_bdev, block,
+@@ -2484,7 +2493,7 @@ static int parse_apply_sb_mount_options(struct super_block *sb,
+ 					struct ext4_fs_context *m_ctx)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	char *s_mount_opts = NULL;
++	char s_mount_opts[65];
+ 	struct ext4_fs_context *s_ctx = NULL;
+ 	struct fs_context *fc = NULL;
+ 	int ret = -ENOMEM;
+@@ -2492,15 +2501,11 @@ static int parse_apply_sb_mount_options(struct super_block *sb,
+ 	if (!sbi->s_es->s_mount_opts[0])
+ 		return 0;
+ 
+-	s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+-				sizeof(sbi->s_es->s_mount_opts),
+-				GFP_KERNEL);
+-	if (!s_mount_opts)
+-		return ret;
++	strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts);
+ 
+ 	fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
+ 	if (!fc)
+-		goto out_free;
++		return -ENOMEM;
+ 
+ 	s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
+ 	if (!s_ctx)
+@@ -2532,11 +2537,8 @@ static int parse_apply_sb_mount_options(struct super_block *sb,
+ 	ret = 0;
+ 
+ out_free:
+-	if (fc) {
+-		ext4_fc_free(fc);
+-		kfree(fc);
+-	}
+-	kfree(s_mount_opts);
++	ext4_fc_free(fc);
++	kfree(fc);
+ 	return ret;
+ }
+ 
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 6ff94cdf1515c5..5ddfa4801bb300 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -251,6 +251,10 @@ check_xattrs(struct inode *inode, struct buffer_head *bh,
+ 			err_str = "invalid ea_ino";
+ 			goto errout;
+ 		}
++		if (ea_ino && !size) {
++			err_str = "invalid size in ea xattr";
++			goto errout;
++		}
+ 		if (size > EXT4_XATTR_SIZE_MAX) {
+ 			err_str = "e_value size too large";
+ 			goto errout;
+@@ -1036,7 +1040,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+ 				       int ref_change)
+ {
+ 	struct ext4_iloc iloc;
+-	s64 ref_count;
++	u64 ref_count;
+ 	int ret;
+ 
+ 	inode_lock_nested(ea_inode, I_MUTEX_XATTR);
+@@ -1046,13 +1050,17 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+ 		goto out;
+ 
+ 	ref_count = ext4_xattr_inode_get_ref(ea_inode);
++	if ((ref_count == 0 && ref_change < 0) || (ref_count == U64_MAX && ref_change > 0)) {
++		ext4_error_inode(ea_inode, __func__, __LINE__, 0,
++			"EA inode %lu ref wraparound: ref_count=%lld ref_change=%d",
++			ea_inode->i_ino, ref_count, ref_change);
++		ret = -EFSCORRUPTED;
++		goto out;
++	}
+ 	ref_count += ref_change;
+ 	ext4_xattr_inode_set_ref(ea_inode, ref_count);
+ 
+ 	if (ref_change > 0) {
+-		WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
+-			  ea_inode->i_ino, ref_count);
+-
+ 		if (ref_count == 1) {
+ 			WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
+ 				  ea_inode->i_ino, ea_inode->i_nlink);
+@@ -1061,9 +1069,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+ 			ext4_orphan_del(handle, ea_inode);
+ 		}
+ 	} else {
+-		WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
+-			  ea_inode->i_ino, ref_count);
+-
+ 		if (ref_count == 0) {
+ 			WARN_ONCE(ea_inode->i_nlink != 1,
+ 				  "EA inode %lu i_nlink=%u",
+diff --git a/fs/file.c b/fs/file.c
+index bfc9eb9e722984..68c1bcc6b7e970 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -1262,7 +1262,10 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
+ 	err = expand_files(files, fd);
+ 	if (unlikely(err < 0))
+ 		goto out_unlock;
+-	return do_dup2(files, file, fd, flags);
++	err = do_dup2(files, file, fd, flags);
++	if (err < 0)
++		return err;
++	return 0;
+ 
+ out_unlock:
+ 	spin_unlock(&files->file_lock);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 4ae226778d646c..28edfad85c6280 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -446,22 +446,23 @@ static bool inode_do_switch_wbs(struct inode *inode,
+ 	 * Transfer to @new_wb's IO list if necessary.  If the @inode is dirty,
+ 	 * the specific list @inode was on is ignored and the @inode is put on
+ 	 * ->b_dirty which is always correct including from ->b_dirty_time.
+-	 * The transfer preserves @inode->dirtied_when ordering.  If the @inode
+-	 * was clean, it means it was on the b_attached list, so move it onto
+-	 * the b_attached list of @new_wb.
++	 * If the @inode was clean, it means it was on the b_attached list, so
++	 * move it onto the b_attached list of @new_wb.
+ 	 */
+ 	if (!list_empty(&inode->i_io_list)) {
+ 		inode->i_wb = new_wb;
+ 
+ 		if (inode->i_state & I_DIRTY_ALL) {
+-			struct inode *pos;
+-
+-			list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
+-				if (time_after_eq(inode->dirtied_when,
+-						  pos->dirtied_when))
+-					break;
++			/*
++			 * We need to keep b_dirty list sorted by
++			 * dirtied_time_when. However properly sorting the
++			 * inode in the list gets too expensive when switching
++			 * many inodes. So just attach inode at the end of the
++			 * dirty list and clobber the dirtied_time_when.
++			 */
++			inode->dirtied_time_when = jiffies;
+ 			inode_io_list_move_locked(inode, new_wb,
+-						  pos->i_io_list.prev);
++						  &new_wb->b_dirty);
+ 		} else {
+ 			inode_cgwb_move_to_attached(inode, new_wb);
+ 		}
+@@ -503,6 +504,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
+ 	 */
+ 	down_read(&bdi->wb_switch_rwsem);
+ 
++	inodep = isw->inodes;
+ 	/*
+ 	 * By the time control reaches here, RCU grace period has passed
+ 	 * since I_WB_SWITCH assertion and all wb stat update transactions
+@@ -513,6 +515,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
+ 	 * gives us exclusion against all wb related operations on @inode
+ 	 * including IO list manipulations and stat updates.
+ 	 */
++relock:
+ 	if (old_wb < new_wb) {
+ 		spin_lock(&old_wb->list_lock);
+ 		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
+@@ -521,10 +524,17 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
+ 		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
+ 	}
+ 
+-	for (inodep = isw->inodes; *inodep; inodep++) {
++	while (*inodep) {
+ 		WARN_ON_ONCE((*inodep)->i_wb != old_wb);
+ 		if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
+ 			nr_switched++;
++		inodep++;
++		if (*inodep && need_resched()) {
++			spin_unlock(&new_wb->list_lock);
++			spin_unlock(&old_wb->list_lock);
++			cond_resched();
++			goto relock;
++		}
+ 	}
+ 
+ 	spin_unlock(&new_wb->list_lock);
+diff --git a/fs/fsopen.c b/fs/fsopen.c
+index 6cef3deccdedfa..8903d2c23db4e9 100644
+--- a/fs/fsopen.c
++++ b/fs/fsopen.c
+@@ -18,50 +18,56 @@
+ #include "internal.h"
+ #include "mount.h"
+ 
++static inline const char *fetch_message_locked(struct fc_log *log, size_t len,
++					       bool *need_free)
++{
++	const char *p;
++	int index;
++
++	if (unlikely(log->head == log->tail))
++		return ERR_PTR(-ENODATA);
++
++	index = log->tail & (ARRAY_SIZE(log->buffer) - 1);
++	p = log->buffer[index];
++	if (unlikely(strlen(p) > len))
++		return ERR_PTR(-EMSGSIZE);
++
++	log->buffer[index] = NULL;
++	*need_free = log->need_free & (1 << index);
++	log->need_free &= ~(1 << index);
++	log->tail++;
++
++	return p;
++}
++
+ /*
+  * Allow the user to read back any error, warning or informational messages.
++ * Only one message is returned for each read(2) call.
+  */
+ static ssize_t fscontext_read(struct file *file,
+ 			      char __user *_buf, size_t len, loff_t *pos)
+ {
+ 	struct fs_context *fc = file->private_data;
+-	struct fc_log *log = fc->log.log;
+-	unsigned int logsize = ARRAY_SIZE(log->buffer);
+-	ssize_t ret;
+-	char *p;
++	ssize_t err;
++	const char *p __free(kfree) = NULL, *message;
+ 	bool need_free;
+-	int index, n;
++	int n;
+ 
+-	ret = mutex_lock_interruptible(&fc->uapi_mutex);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (log->head == log->tail) {
+-		mutex_unlock(&fc->uapi_mutex);
+-		return -ENODATA;
+-	}
+-
+-	index = log->tail & (logsize - 1);
+-	p = log->buffer[index];
+-	need_free = log->need_free & (1 << index);
+-	log->buffer[index] = NULL;
+-	log->need_free &= ~(1 << index);
+-	log->tail++;
++	err = mutex_lock_interruptible(&fc->uapi_mutex);
++	if (err < 0)
++		return err;
++	message = fetch_message_locked(fc->log.log, len, &need_free);
+ 	mutex_unlock(&fc->uapi_mutex);
++	if (IS_ERR(message))
++		return PTR_ERR(message);
+ 
+-	ret = -EMSGSIZE;
+-	n = strlen(p);
+-	if (n > len)
+-		goto err_free;
+-	ret = -EFAULT;
+-	if (copy_to_user(_buf, p, n) != 0)
+-		goto err_free;
+-	ret = n;
+-
+-err_free:
+ 	if (need_free)
+-		kfree(p);
+-	return ret;
++		p = message;
++
++	n = strlen(message);
++	if (copy_to_user(_buf, message, n))
++		return -EFAULT;
++	return n;
+ }
+ 
+ static int fscontext_release(struct inode *inode, struct file *file)
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 1f64ae6d7a69e5..8207855f9af257 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1989,7 +1989,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
+ 	 */
+ 	if (!oh.unique) {
+ 		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
+-		goto out;
++		goto copy_finish;
+ 	}
+ 
+ 	err = -EINVAL;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 49659d1b293218..a8218a3bc0b4a0 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -355,8 +355,14 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff,
+ 	 * Make the release synchronous if this is a fuseblk mount,
+ 	 * synchronous RELEASE is allowed (and desirable) in this case
+ 	 * because the server can be trusted not to screw up.
++	 *
++	 * Always use the asynchronous file put because the current thread
++	 * might be the fuse server.  This can happen if a process starts some
++	 * aio and closes the fd before the aio completes.  Since aio takes its
++	 * own ref to the file, the IO completion has to drop the ref, which is
++	 * how the fuse server can end up closing its clients' files.
+ 	 */
+-	fuse_file_put(ff, ff->fm->fc->destroy);
++	fuse_file_put(ff, false);
+ }
+ 
+ void fuse_release_common(struct file *file, bool isdir)
+diff --git a/fs/minix/inode.c b/fs/minix/inode.c
+index f007e389d5d29b..fc01f9dc8c3917 100644
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -491,8 +491,14 @@ void minix_set_inode(struct inode *inode, dev_t rdev)
+ 		inode->i_op = &minix_symlink_inode_operations;
+ 		inode_nohighmem(inode);
+ 		inode->i_mapping->a_ops = &minix_aops;
+-	} else
++	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
++		   S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ 		init_special_inode(inode, inode->i_mode, rdev);
++	} else {
++		printk(KERN_DEBUG "MINIX-fs: Invalid file type 0%04o for inode %lu.\n",
++		       inode->i_mode, inode->i_ino);
++		make_bad_inode(inode);
++	}
+ }
+ 
+ /*
+diff --git a/fs/namei.c b/fs/namei.c
+index 6795600c5738a5..1eb20cb5e58f9e 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1388,6 +1388,10 @@ static int follow_automount(struct path *path, int *count, unsigned lookup_flags
+ 	    dentry->d_inode)
+ 		return -EISDIR;
+ 
++	/* No need to trigger automounts if mountpoint crossing is disabled. */
++	if (lookup_flags & LOOKUP_NO_XDEV)
++		return -EXDEV;
++
+ 	if (count && (*count)++ >= MAXSYMLINKS)
+ 		return -ELOOP;
+ 
+@@ -1411,6 +1415,10 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
+ 		/* Allow the filesystem to manage the transit without i_mutex
+ 		 * being held. */
+ 		if (flags & DCACHE_MANAGE_TRANSIT) {
++			if (lookup_flags & LOOKUP_NO_XDEV) {
++				ret = -EXDEV;
++				break;
++			}
+ 			ret = path->dentry->d_op->d_manage(path, false);
+ 			flags = smp_load_acquire(&path->dentry->d_flags);
+ 			if (ret < 0)
+diff --git a/fs/namespace.c b/fs/namespace.c
+index c8519302f58240..cc4926d53e7dea 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -65,6 +65,15 @@ static int __init set_mphash_entries(char *str)
+ }
+ __setup("mphash_entries=", set_mphash_entries);
+ 
++static char * __initdata initramfs_options;
++static int __init initramfs_options_setup(char *str)
++{
++	initramfs_options = str;
++	return 1;
++}
++
++__setup("initramfs_options=", initramfs_options_setup);
++
+ static u64 event;
+ static DEFINE_IDA(mnt_id_ida);
+ static DEFINE_IDA(mnt_group_ida);
+@@ -144,7 +153,7 @@ static void mnt_ns_release(struct mnt_namespace *ns)
+ 	lockdep_assert_not_held(&mnt_ns_tree_lock);
+ 
+ 	/* keep alive for {list,stat}mount() */
+-	if (refcount_dec_and_test(&ns->passive)) {
++	if (ns && refcount_dec_and_test(&ns->passive)) {
+ 		put_user_ns(ns->user_ns);
+ 		kfree(ns);
+ 	}
+@@ -5200,7 +5209,6 @@ static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
+ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
+ 			struct mnt_namespace *ns)
+ {
+-	struct path root __free(path_put) = {};
+ 	struct mount *m;
+ 	int err;
+ 
+@@ -5212,7 +5220,7 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
+ 	if (!s->mnt)
+ 		return -ENOENT;
+ 
+-	err = grab_requested_root(ns, &root);
++	err = grab_requested_root(ns, &s->root);
+ 	if (err)
+ 		return err;
+ 
+@@ -5221,15 +5229,13 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
+ 	 * mounts to show users.
+ 	 */
+ 	m = real_mount(s->mnt);
+-	if (!is_path_reachable(m, m->mnt.mnt_root, &root) &&
++	if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
+ 	    !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+ 	err = security_sb_statfs(s->mnt->mnt_root);
+ 	if (err)
+ 		return err;
+-
+-	s->root = root;
+ 	if (s->mask & STATMOUNT_SB_BASIC)
+ 		statmount_sb_basic(s);
+ 
+@@ -5406,28 +5412,40 @@ SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
+ 	if (!ret)
+ 		ret = copy_statmount_to_user(ks);
+ 	kvfree(ks->seq.buf);
++	path_put(&ks->root);
+ 	if (retry_statmount(ret, &seq_size))
+ 		goto retry;
+ 	return ret;
+ }
+ 
+-static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
+-			    u64 last_mnt_id, u64 *mnt_ids, size_t nr_mnt_ids,
+-			    bool reverse)
++struct klistmount {
++	u64 last_mnt_id;
++	u64 mnt_parent_id;
++	u64 *kmnt_ids;
++	u32 nr_mnt_ids;
++	struct mnt_namespace *ns;
++	struct path root;
++};
++
++static ssize_t do_listmount(struct klistmount *kls, bool reverse)
+ {
+-	struct path root __free(path_put) = {};
++	struct mnt_namespace *ns = kls->ns;
++	u64 mnt_parent_id = kls->mnt_parent_id;
++	u64 last_mnt_id = kls->last_mnt_id;
++	u64 *mnt_ids = kls->kmnt_ids;
++	size_t nr_mnt_ids = kls->nr_mnt_ids;
+ 	struct path orig;
+ 	struct mount *r, *first;
+ 	ssize_t ret;
+ 
+ 	rwsem_assert_held(&namespace_sem);
+ 
+-	ret = grab_requested_root(ns, &root);
++	ret = grab_requested_root(ns, &kls->root);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (mnt_parent_id == LSMT_ROOT) {
+-		orig = root;
++		orig = kls->root;
+ 	} else {
+ 		orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
+ 		if (!orig.mnt)
+@@ -5439,7 +5457,7 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
+ 	 * Don't trigger audit denials. We just want to determine what
+ 	 * mounts to show users.
+ 	 */
+-	if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) &&
++	if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &kls->root) &&
+ 	    !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+@@ -5472,14 +5490,45 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
+ 	return ret;
+ }
+ 
++static void __free_klistmount_free(const struct klistmount *kls)
++{
++	path_put(&kls->root);
++	kvfree(kls->kmnt_ids);
++	mnt_ns_release(kls->ns);
++}
++
++static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq,
++				     size_t nr_mnt_ids)
++{
++
++	u64 last_mnt_id = kreq->param;
++
++	/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
++	if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
++		return -EINVAL;
++
++	kls->last_mnt_id = last_mnt_id;
++
++	kls->nr_mnt_ids = nr_mnt_ids;
++	kls->kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kls->kmnt_ids),
++				       GFP_KERNEL_ACCOUNT);
++	if (!kls->kmnt_ids)
++		return -ENOMEM;
++
++	kls->ns = grab_requested_mnt_ns(kreq);
++	if (!kls->ns)
++		return -ENOENT;
++
++	kls->mnt_parent_id = kreq->mnt_id;
++	return 0;
++}
++
+ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
+ 		u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
+ {
+-	u64 *kmnt_ids __free(kvfree) = NULL;
++	struct klistmount kls __free(klistmount_free) = {};
+ 	const size_t maxcount = 1000000;
+-	struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
+ 	struct mnt_id_req kreq;
+-	u64 last_mnt_id;
+ 	ssize_t ret;
+ 
+ 	if (flags & ~LISTMOUNT_REVERSE)
+@@ -5500,31 +5549,20 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
+ 	if (ret)
+ 		return ret;
+ 
+-	last_mnt_id = kreq.param;
+-	/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
+-	if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
+-		return -EINVAL;
+-
+-	kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kmnt_ids),
+-				  GFP_KERNEL_ACCOUNT);
+-	if (!kmnt_ids)
+-		return -ENOMEM;
+-
+-	ns = grab_requested_mnt_ns(&kreq);
+-	if (!ns)
+-		return -ENOENT;
++	ret = prepare_klistmount(&kls, &kreq, nr_mnt_ids);
++	if (ret)
++		return ret;
+ 
+-	if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
+-	    !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
++	if (kreq.mnt_ns_id && (kls.ns != current->nsproxy->mnt_ns) &&
++	    !ns_capable_noaudit(kls.ns->user_ns, CAP_SYS_ADMIN))
+ 		return -ENOENT;
+ 
+ 	scoped_guard(rwsem_read, &namespace_sem)
+-		ret = do_listmount(ns, kreq.mnt_id, last_mnt_id, kmnt_ids,
+-				   nr_mnt_ids, (flags & LISTMOUNT_REVERSE));
++		ret = do_listmount(&kls, (flags & LISTMOUNT_REVERSE));
+ 	if (ret <= 0)
+ 		return ret;
+ 
+-	if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids)))
++	if (copy_to_user(mnt_ids, kls.kmnt_ids, ret * sizeof(*mnt_ids)))
+ 		return -EFAULT;
+ 
+ 	return ret;
+@@ -5537,7 +5575,7 @@ static void __init init_mount_tree(void)
+ 	struct mnt_namespace *ns;
+ 	struct path root;
+ 
+-	mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
++	mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options);
+ 	if (IS_ERR(mnt))
+ 		panic("Can't create rootfs");
+ 
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 6cf92498a5ac6f..86bdc7d23fb907 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -211,10 +211,6 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 	cb_info->serv = serv;
+-	/* As there is only one thread we need to over-ride the
+-	 * default maximum of 80 connections
+-	 */
+-	serv->sv_maxconn = 1024;
+ 	dprintk("nfs_callback_create_svc: service created\n");
+ 	return serv;
+ }
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index fdeb0b34a3d39b..4254ba3ee7c572 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -984,6 +984,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
+ 			nfs_put_client(cps.clp);
+ 			goto out_invalidcred;
+ 		}
++		svc_xprt_set_valid(rqstp->rq_xprt);
+ 	}
+ 
+ 	cps.minorversion = hdr_arg.minorversion;
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 49aede376d8668..3216416ca04261 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -1075,47 +1075,62 @@ static struct svc_export *exp_find(struct cache_detail *cd,
+ }
+ 
+ /**
+- * check_nfsd_access - check if access to export is allowed.
++ * check_xprtsec_policy - check if access to export is allowed by the
++ *			  xprtsec policy
+  * @exp: svc_export that is being accessed.
+- * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO).
++ * @rqstp: svc_rqst attempting to access @exp.
++ *
++ * Helper function for check_nfsd_access().  Note that callers should be
++ * using check_nfsd_access() instead of calling this function directly.  The
++ * one exception is __fh_verify() since it has logic that may result in one
++ * or both of the helpers being skipped.
+  *
+  * Return values:
+  *   %nfs_ok if access is granted, or
+  *   %nfserr_wrongsec if access is denied
+  */
+-__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
++__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp)
+ {
+-	struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
+-	struct svc_xprt *xprt;
+-
+-	/*
+-	 * If rqstp is NULL, this is a LOCALIO request which will only
+-	 * ever use a filehandle/credential pair for which access has
+-	 * been affirmed (by ACCESS or OPEN NFS requests) over the
+-	 * wire. So there is no need for further checks here.
+-	 */
+-	if (!rqstp)
+-		return nfs_ok;
+-
+-	xprt = rqstp->rq_xprt;
++	struct svc_xprt *xprt = rqstp->rq_xprt;
+ 
+ 	if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
+ 		if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
+-			goto ok;
++			return nfs_ok;
+ 	}
+ 	if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) {
+ 		if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
+ 		    !test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
+-			goto ok;
++			return nfs_ok;
+ 	}
+ 	if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) {
+ 		if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
+ 		    test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
+-			goto ok;
++			return nfs_ok;
+ 	}
+-	goto denied;
++	return nfserr_wrongsec;
++}
++
++/**
++ * check_security_flavor - check if access to export is allowed by the
++ *			   security flavor
++ * @exp: svc_export that is being accessed.
++ * @rqstp: svc_rqst attempting to access @exp.
++ * @may_bypass_gss: reduce strictness of authorization check
++ *
++ * Helper function for check_nfsd_access().  Note that callers should be
++ * using check_nfsd_access() instead of calling this function directly.  The
++ * one exception is __fh_verify() since it has logic that may result in one
++ * or both of the helpers being skipped.
++ *
++ * Return values:
++ *   %nfs_ok if access is granted, or
++ *   %nfserr_wrongsec if access is denied
++ */
++__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
++			     bool may_bypass_gss)
++{
++	struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
+ 
+-ok:
+ 	/* legacy gss-only clients are always OK: */
+ 	if (exp->ex_client == rqstp->rq_gssclient)
+ 		return nfs_ok;
+@@ -1140,10 +1155,47 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
+ 	if (nfsd4_spo_must_allow(rqstp))
+ 		return nfs_ok;
+ 
+-denied:
++	/* Some calls may be processed without authentication
++	 * on GSS exports. For example NFS2/3 calls on root
++	 * directory, see section 2.3.2 of rfc 2623.
++	 * For "may_bypass_gss" check that export has really
++	 * enabled some flavor with authentication (GSS or any
++	 * other) and also check that the used auth flavor is
++	 * without authentication (none or sys).
++	 */
++	if (may_bypass_gss && (
++	     rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
++	     rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)) {
++		for (f = exp->ex_flavors; f < end; f++) {
++			if (f->pseudoflavor >= RPC_AUTH_DES)
++				return 0;
++		}
++	}
++
+ 	return nfserr_wrongsec;
+ }
+ 
++/**
++ * check_nfsd_access - check if access to export is allowed.
++ * @exp: svc_export that is being accessed.
++ * @rqstp: svc_rqst attempting to access @exp.
++ * @may_bypass_gss: reduce strictness of authorization check
++ *
++ * Return values:
++ *   %nfs_ok if access is granted, or
++ *   %nfserr_wrongsec if access is denied
++ */
++__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
++			 bool may_bypass_gss)
++{
++	__be32 status;
++
++	status = check_xprtsec_policy(exp, rqstp);
++	if (status != nfs_ok)
++		return status;
++	return check_security_flavor(exp, rqstp, may_bypass_gss);
++}
++
+ /*
+  * Uses rq_client and rq_gssclient to find an export; uses rq_client (an
+  * auth_unix client) if it's available and has secinfo information;
+diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
+index 3794ae253a7016..0f4d34943e2c0e 100644
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -101,7 +101,11 @@ struct svc_expkey {
+ 
+ struct svc_cred;
+ int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp);
+-__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
++__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp);
++__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
++			     bool may_bypass_gss);
++__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
++			 bool may_bypass_gss);
+ 
+ /*
+  * Function declarations
+diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
+index 46a7f9b813e527..6b042218668b82 100644
+--- a/fs/nfsd/lockd.c
++++ b/fs/nfsd/lockd.c
+@@ -38,16 +38,40 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp,
+ 	memcpy(&fh.fh_handle.fh_raw, f->data, f->size);
+ 	fh.fh_export = NULL;
+ 
++	/*
++	 * Allow BYPASS_GSS as some client implementations use AUTH_SYS
++	 * for NLM even when GSS is used for NFS.
++	 * Allow OWNER_OVERRIDE as permission might have been changed
++	 * after the file was opened.
++	 * Pass MAY_NLM so that authentication can be completely bypassed
++	 * if NFSEXP_NOAUTHNLM is set.  Some older clients use AUTH_NULL
++	 * for NLM requests.
++	 */
+ 	access = (mode == O_WRONLY) ? NFSD_MAY_WRITE : NFSD_MAY_READ;
+-	access |= NFSD_MAY_LOCK;
++	access |= NFSD_MAY_NLM | NFSD_MAY_OWNER_OVERRIDE | NFSD_MAY_BYPASS_GSS;
+ 	nfserr = nfsd_open(rqstp, &fh, S_IFREG, access, filp);
+ 	fh_put(&fh);
+- 	/* We return nlm error codes as nlm doesn't know
++	/* We return nlm error codes as nlm doesn't know
+ 	 * about nfsd, but nfsd does know about nlm..
+ 	 */
+ 	switch (nfserr) {
+ 	case nfs_ok:
+ 		return 0;
++	case nfserr_jukebox:
++		/* this error can indicate a presence of a conflicting
++		 * delegation to an NLM lock request. Options are:
++		 * (1) For now, drop this request and make the client
++		 * retry. When delegation is returned, client's lock retry
++		 * will complete.
++		 * (2) NLM4_DENIED as per "spec" signals to the client
++		 * that the lock is unavailable now but client can retry.
++		 * Linux client implementation does not. It treats
++		 * NLM4_DENIED same as NLM4_FAILED and errors the request.
++		 * (3) For the future, treat this as blocked lock and try
++		 * to callback when the delegation is returned but might
++		 * not have a proper lock request to block on.
++		 */
++		fallthrough;
+ 	case nfserr_dropit:
+ 		return nlm_drop_reply;
+ 	case nfserr_stale:
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index 26f7b34d1a0305..a05a45bb197811 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -129,8 +129,8 @@ struct nfsd_net {
+ 	unsigned char writeverf[8];
+ 
+ 	/*
+-	 * Max number of connections this nfsd container will allow. Defaults
+-	 * to '0' which is means that it bases this on the number of threads.
++	 * Max number of non-validated connections this nfsd container
++	 * will allow.  Defaults to '0' gets mapped to 64.
+ 	 */
+ 	unsigned int max_connections;
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 02c9f3b312a0e8..8f2dc7eb4fc451 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1372,7 +1372,7 @@ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
+ 		return 0;
+ 	}
+ 	if (work) {
+-		strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1);
++		strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
+ 		refcount_set(&work->nsui_refcnt, 2);
+ 		work->nsui_busy = true;
+ 		list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
+@@ -2799,7 +2799,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
+ 
+ 			if (current_fh->fh_export &&
+ 					need_wrongsec_check(rqstp))
+-				op->status = check_nfsd_access(current_fh->fh_export, rqstp);
++				op->status = check_nfsd_access(current_fh->fh_export, rqstp, false);
+ 		}
+ encode_op:
+ 		if (op->status == nfserr_replay_me) {
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index bcb44400e24398..7b0fabf8c657a1 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7998,11 +7998,9 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	if (check_lock_length(lock->lk_offset, lock->lk_length))
+ 		 return nfserr_inval;
+ 
+-	if ((status = fh_verify(rqstp, &cstate->current_fh,
+-				S_IFREG, NFSD_MAY_LOCK))) {
+-		dprintk("NFSD: nfsd4_lock: permission denied!\n");
++	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
++	if (status != nfs_ok)
+ 		return status;
+-	}
+ 	sb = cstate->current_fh.fh_dentry->d_sb;
+ 
+ 	if (lock->lk_is_new) {
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 6edeb3bdf81b50..90db900b346ce9 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3784,7 +3784,7 @@ nfsd4_encode_entry4_fattr(struct nfsd4_readdir *cd, const char *name,
+ 			nfserr = nfserrno(err);
+ 			goto out_put;
+ 		}
+-		nfserr = check_nfsd_access(exp, cd->rd_rqstp);
++		nfserr = check_nfsd_access(exp, cd->rd_rqstp, false);
+ 		if (nfserr)
+ 			goto out_put;
+ 
+diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
+index 96e19c50a5d7ee..854dcdc36b2ce6 100644
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -320,6 +320,7 @@ __fh_verify(struct svc_rqst *rqstp,
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 	struct svc_export *exp = NULL;
++	bool may_bypass_gss = false;
+ 	struct dentry	*dentry;
+ 	__be32		error;
+ 
+@@ -363,12 +364,31 @@ __fh_verify(struct svc_rqst *rqstp,
+ 		goto out;
+ 
+ 	/*
+-	 * pseudoflavor restrictions are not enforced on NLM,
+-	 * which clients virtually always use auth_sys for,
+-	 * even while using RPCSEC_GSS for NFS.
++	 * If rqstp is NULL, this is a LOCALIO request which will only
++	 * ever use a filehandle/credential pair for which access has
++	 * been affirmed (by ACCESS or OPEN NFS requests) over the
++	 * wire.  Skip both the xprtsec policy and the security flavor
++	 * checks.
+ 	 */
+-	if (access & NFSD_MAY_LOCK || access & NFSD_MAY_BYPASS_GSS)
+-		goto skip_pseudoflavor_check;
++	if (!rqstp)
++		goto check_permissions;
++
++	if ((access & NFSD_MAY_NLM) && (exp->ex_flags & NFSEXP_NOAUTHNLM))
++		/* NLM is allowed to fully bypass authentication */
++		goto out;
++
++	/*
++	 * NLM is allowed to bypass the xprtsec policy check because lockd
++	 * doesn't support xprtsec.
++	 */
++	if (!(access & NFSD_MAY_NLM)) {
++		error = check_xprtsec_policy(exp, rqstp);
++		if (error)
++			goto out;
++	}
++
++	if (access & NFSD_MAY_BYPASS_GSS)
++		may_bypass_gss = true;
+ 	/*
+ 	 * Clients may expect to be able to use auth_sys during mount,
+ 	 * even if they use gss for everything else; see section 2.3.2
+@@ -376,13 +396,17 @@ __fh_verify(struct svc_rqst *rqstp,
+ 	 */
+ 	if (access & NFSD_MAY_BYPASS_GSS_ON_ROOT
+ 			&& exp->ex_path.dentry == dentry)
+-		goto skip_pseudoflavor_check;
++		may_bypass_gss = true;
+ 
+-	error = check_nfsd_access(exp, rqstp);
++	error = check_security_flavor(exp, rqstp, may_bypass_gss);
+ 	if (error)
+ 		goto out;
+ 
+-skip_pseudoflavor_check:
++	/* During LOCALIO call to fh_verify will be called with a NULL rqstp */
++	if (rqstp)
++		svc_xprt_set_valid(rqstp->rq_xprt);
++
++check_permissions:
+ 	/* Finally, check access permissions. */
+ 	error = nfsd_permission(cred, exp, dentry, access);
+ out:
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index b8470d4cbe99e9..3448e444d4100f 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -79,7 +79,7 @@ DEFINE_NFSD_XDR_ERR_EVENT(cant_encode);
+ 		{ NFSD_MAY_READ,		"READ" },		\
+ 		{ NFSD_MAY_SATTR,		"SATTR" },		\
+ 		{ NFSD_MAY_TRUNC,		"TRUNC" },		\
+-		{ NFSD_MAY_LOCK,		"LOCK" },		\
++		{ NFSD_MAY_NLM,			"NLM" },		\
+ 		{ NFSD_MAY_OWNER_OVERRIDE,	"OWNER_OVERRIDE" },	\
+ 		{ NFSD_MAY_LOCAL_ACCESS,	"LOCAL_ACCESS" },	\
+ 		{ NFSD_MAY_BYPASS_GSS_ON_ROOT,	"BYPASS_GSS_ON_ROOT" },	\
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index ca29a5e1600fd9..8c4f4e2f9cee0b 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -321,7 +321,7 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
+ 	err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
+ 	if (err)
+ 		return err;
+-	err = check_nfsd_access(exp, rqstp);
++	err = check_nfsd_access(exp, rqstp, false);
+ 	if (err)
+ 		goto out;
+ 	/*
+@@ -2519,7 +2519,7 @@ nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
+ 		(acc & NFSD_MAY_EXEC)?	" exec"  : "",
+ 		(acc & NFSD_MAY_SATTR)?	" sattr" : "",
+ 		(acc & NFSD_MAY_TRUNC)?	" trunc" : "",
+-		(acc & NFSD_MAY_LOCK)?	" lock"  : "",
++		(acc & NFSD_MAY_NLM)?	" nlm"  : "",
+ 		(acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "",
+ 		inode->i_mode,
+ 		IS_IMMUTABLE(inode)?	" immut" : "",
+@@ -2544,16 +2544,6 @@ nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
+ 	if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode))
+ 		return nfserr_perm;
+ 
+-	if (acc & NFSD_MAY_LOCK) {
+-		/* If we cannot rely on authentication in NLM requests,
+-		 * just allow locks, otherwise require read permission, or
+-		 * ownership
+-		 */
+-		if (exp->ex_flags & NFSEXP_NOAUTHNLM)
+-			return 0;
+-		else
+-			acc = NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE;
+-	}
+ 	/*
+ 	 * The file owner always gets access permission for accesses that
+ 	 * would normally be checked at open time. This is to make
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index 3ff14652255696..a61ada4fd9203c 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -20,7 +20,7 @@
+ #define NFSD_MAY_READ			0x004 /* == MAY_READ */
+ #define NFSD_MAY_SATTR			0x008
+ #define NFSD_MAY_TRUNC			0x010
+-#define NFSD_MAY_LOCK			0x020
++#define NFSD_MAY_NLM			0x020 /* request is from lockd */
+ #define NFSD_MAY_MASK			0x03f
+ 
+ /* extra hints to permission and open routines: */
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index cf4fe21a50399b..29b585f443f3eb 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -1399,6 +1399,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+ 		mark_buffer_dirty(bh);
+ 		unlock_buffer(bh);
+ 		/* err = sync_dirty_buffer(bh); */
++		put_bh(bh);
+ 
+ 		b0 = 0;
+ 		bits -= op;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 71c0ce31a4c4db..94825180385ab6 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -163,6 +163,9 @@ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
+ /* SLAB cache for dquot structures */
+ static struct kmem_cache *dquot_cachep;
+ 
++/* workqueue for work quota_release_work*/
++static struct workqueue_struct *quota_unbound_wq;
++
+ void register_quota_format(struct quota_format_type *fmt)
+ {
+ 	spin_lock(&dq_list_lock);
+@@ -882,7 +885,7 @@ void dqput(struct dquot *dquot)
+ 	put_releasing_dquots(dquot);
+ 	atomic_dec(&dquot->dq_count);
+ 	spin_unlock(&dq_list_lock);
+-	queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
++	queue_delayed_work(quota_unbound_wq, &quota_release_work, 1);
+ }
+ EXPORT_SYMBOL(dqput);
+ 
+@@ -3042,6 +3045,11 @@ static int __init dquot_init(void)
+ 
+ 	shrinker_register(dqcache_shrinker);
+ 
++	quota_unbound_wq = alloc_workqueue("quota_events_unbound",
++					   WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
++	if (!quota_unbound_wq)
++		panic("Cannot create quota_unbound_wq\n");
++
+ 	return 0;
+ }
+ fs_initcall(dquot_init);
+diff --git a/fs/read_write.c b/fs/read_write.c
+index befec0b5c537a7..46408bab92385e 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -1600,6 +1600,13 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
+ 	if (len == 0)
+ 		return 0;
+ 
++	/*
++	 * Make sure return value doesn't overflow in 32bit compat mode.  Also
++	 * limit the size for all cases except when calling ->copy_file_range().
++	 */
++	if (splice || !file_out->f_op->copy_file_range || in_compat_syscall())
++		len = min_t(size_t, MAX_RW_COUNT, len);
++
+ 	file_start_write(file_out);
+ 
+ 	/*
+@@ -1613,9 +1620,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
+ 						      len, flags);
+ 	} else if (!splice && file_in->f_op->remap_file_range && samesb) {
+ 		ret = file_in->f_op->remap_file_range(file_in, pos_in,
+-				file_out, pos_out,
+-				min_t(loff_t, MAX_RW_COUNT, len),
+-				REMAP_FILE_CAN_SHORTEN);
++				file_out, pos_out, len, REMAP_FILE_CAN_SHORTEN);
+ 		/* fallback to splice */
+ 		if (ret <= 0)
+ 			splice = true;
+@@ -1648,8 +1653,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
+ 	 * to splicing from input file, while file_start_write() is held on
+ 	 * the output file on a different sb.
+ 	 */
+-	ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out,
+-			       min_t(size_t, len, MAX_RW_COUNT), 0);
++	ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, len, 0);
+ done:
+ 	if (ret > 0) {
+ 		fsnotify_access(file_in);
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index 0385a514f59e9f..4670c3e451a755 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -671,14 +671,72 @@ static int cifs_query_path_info(const unsigned int xid,
+ 	}
+ 
+ #ifdef CONFIG_CIFS_XATTR
++	/*
++	 * For non-symlink WSL reparse points it is required to fetch
++	 * EA $LXMOD which contains in its S_DT part the mandatory file type.
++	 */
++	if (!rc && data->reparse_point) {
++		struct smb2_file_full_ea_info *ea;
++		u32 next = 0;
++
++		ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
++		do {
++			ea = (void *)((u8 *)ea + next);
++			next = le32_to_cpu(ea->next_entry_offset);
++		} while (next);
++		if (le16_to_cpu(ea->ea_value_length)) {
++			ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) +
++						ea->ea_name_length + 1 +
++						le16_to_cpu(ea->ea_value_length), 4));
++			ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset));
++		}
++
++		rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_MODE,
++				    &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
++				    SMB2_WSL_XATTR_MODE_SIZE, cifs_sb);
++		if (rc == SMB2_WSL_XATTR_MODE_SIZE) {
++			ea->next_entry_offset = cpu_to_le32(0);
++			ea->flags = 0;
++			ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
++			ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_MODE_SIZE);
++			memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_MODE, SMB2_WSL_XATTR_NAME_LEN + 1);
++			data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
++						   SMB2_WSL_XATTR_MODE_SIZE, 4);
++			rc = 0;
++		} else if (rc >= 0) {
++			/* It is an error if EA $LXMOD has wrong size. */
++			rc = -EINVAL;
++		} else {
++			/*
++			 * In all other cases ignore error if fetching
++			 * of EA $LXMOD failed. It is needed only for
++			 * non-symlink WSL reparse points and wsl_to_fattr()
++			 * handle the case when EA is missing.
++			 */
++			rc = 0;
++		}
++	}
++
+ 	/*
+ 	 * For WSL CHR and BLK reparse points it is required to fetch
+ 	 * EA $LXDEV which contains major and minor device numbers.
+ 	 */
+ 	if (!rc && data->reparse_point) {
+ 		struct smb2_file_full_ea_info *ea;
++		u32 next = 0;
+ 
+ 		ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
++		do {
++			ea = (void *)((u8 *)ea + next);
++			next = le32_to_cpu(ea->next_entry_offset);
++		} while (next);
++		if (le16_to_cpu(ea->ea_value_length)) {
++			ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) +
++						ea->ea_name_length + 1 +
++						le16_to_cpu(ea->ea_value_length), 4));
++			ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset));
++		}
++
+ 		rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV,
+ 				    &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
+ 				    SMB2_WSL_XATTR_DEV_SIZE, cifs_sb);
+@@ -688,8 +746,8 @@ static int cifs_query_path_info(const unsigned int xid,
+ 			ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
+ 			ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE);
+ 			memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1);
+-			data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
+-					    SMB2_WSL_XATTR_DEV_SIZE;
++			data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
++						   SMB2_WSL_XATTR_MODE_SIZE, 4);
+ 			rc = 0;
+ 		} else if (rc >= 0) {
+ 			/* It is an error if EA $LXDEV has wrong size. */
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 104a563dc317fe..cb049bc70e0cb7 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -1220,31 +1220,33 @@ int
+ smb2_set_file_info(struct inode *inode, const char *full_path,
+ 		   FILE_BASIC_INFO *buf, const unsigned int xid)
+ {
+-	struct cifs_open_parms oparms;
++	struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), };
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifsFileInfo *cfile = NULL;
++	struct cifs_open_parms oparms;
+ 	struct tcon_link *tlink;
+ 	struct cifs_tcon *tcon;
+-	struct cifsFileInfo *cfile;
+-	struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), };
+-	int rc;
+-
+-	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
+-	    (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
+-	    (buf->Attributes == 0))
+-		return 0; /* would be a no op, no sense sending this */
++	int rc = 0;
+ 
+ 	tlink = cifs_sb_tlink(cifs_sb);
+ 	if (IS_ERR(tlink))
+ 		return PTR_ERR(tlink);
+ 	tcon = tlink_tcon(tlink);
+ 
+-	cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
++	    (buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) {
++		if (buf->Attributes == 0)
++			goto out; /* would be a no op, no sense sending this */
++		cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++	}
++
+ 	oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES,
+ 			     FILE_OPEN, 0, ACL_NO_MODE);
+ 	rc = smb2_compound_op(xid, tcon, cifs_sb,
+ 			      full_path, &oparms, &in_iov,
+ 			      &(int){SMB2_OP_SET_INFO}, 1,
+ 			      cfile, NULL, NULL, NULL);
++out:
+ 	cifs_put_tlink(tlink);
+ 	return rc;
+ }
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index c946c3a09245c6..1b30035d02bc51 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4657,7 +4657,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 	unsigned int pad_len;
+ 	struct cifs_io_subrequest *rdata = mid->callback_data;
+ 	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-	int length;
++	size_t copied;
+ 	bool use_rdma_mr = false;
+ 
+ 	if (shdr->Command != SMB2_READ) {
+@@ -4770,10 +4770,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 	} else if (buf_len >= data_offset + data_len) {
+ 		/* read response payload is in buf */
+ 		WARN_ONCE(buffer, "read data can be either in buf or in buffer");
+-		length = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
+-		if (length < 0)
+-			return length;
+-		rdata->got_bytes = data_len;
++		copied = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
++		if (copied == 0)
++			return -EIO;
++		rdata->got_bytes = copied;
+ 	} else {
+ 		/* read response payload cannot be in both buf and pages */
+ 		WARN_ONCE(1, "buf can not contain only a part of read data");
+diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
+index 53104f25de5116..f5dcb8353f862f 100644
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -140,8 +140,17 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		if (err < 0)
+ 			goto failed_read;
+ 
++		inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+ 		frag = le32_to_cpu(sqsh_ino->fragment);
+ 		if (frag != SQUASHFS_INVALID_FRAG) {
++			/*
++			 * the file cannot have a fragment (tailend) and have a
++			 * file size a multiple of the block size
++			 */
++			if ((inode->i_size & (msblk->block_size - 1)) == 0) {
++				err = -EINVAL;
++				goto failed_read;
++			}
+ 			frag_offset = le32_to_cpu(sqsh_ino->offset);
+ 			frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+ 			if (frag_size < 0) {
+@@ -155,7 +164,6 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		}
+ 
+ 		set_nlink(inode, 1);
+-		inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+ 		inode->i_fop = &generic_ro_fops;
+ 		inode->i_mode |= S_IFREG;
+ 		inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+@@ -184,8 +192,21 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		if (err < 0)
+ 			goto failed_read;
+ 
++		inode->i_size = le64_to_cpu(sqsh_ino->file_size);
++		if (inode->i_size < 0) {
++			err = -EINVAL;
++			goto failed_read;
++		}
+ 		frag = le32_to_cpu(sqsh_ino->fragment);
+ 		if (frag != SQUASHFS_INVALID_FRAG) {
++			/*
++			 * the file cannot have a fragment (tailend) and have a
++			 * file size a multiple of the block size
++			 */
++			if ((inode->i_size & (msblk->block_size - 1)) == 0) {
++				err = -EINVAL;
++				goto failed_read;
++			}
+ 			frag_offset = le32_to_cpu(sqsh_ino->offset);
+ 			frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+ 			if (frag_size < 0) {
+@@ -200,7 +221,6 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 
+ 		xattr_id = le32_to_cpu(sqsh_ino->xattr);
+ 		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+-		inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+ 		inode->i_op = &squashfs_inode_ops;
+ 		inode->i_fop = &generic_ro_fops;
+ 		inode->i_mode |= S_IFREG;
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 78b24b09048885..f35b29f5009729 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -213,6 +213,12 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
+  */
+ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
+ 
++/*
++ * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems
++ * (such as loong_arch) may not have and not use Global Lock.
++ */
++ACPI_INIT_GLOBAL(u8, acpi_gbl_use_global_lock, TRUE);
++
+ /*
+  * Maximum timeout for While() loop iterations before forced method abort.
+  * This mechanism is intended to prevent infinite loops during interpreter
+diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
+index 80de699bf6af4b..6862982c5f1a98 100644
+--- a/include/asm-generic/io.h
++++ b/include/asm-generic/io.h
+@@ -75,6 +75,7 @@
+ #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+ #include <linux/tracepoint-defs.h>
+ 
++#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
+ DECLARE_TRACEPOINT(rwmmio_write);
+ DECLARE_TRACEPOINT(rwmmio_post_write);
+ DECLARE_TRACEPOINT(rwmmio_read);
+@@ -91,6 +92,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ 
+ #else
+ 
++#define rwmmio_tracepoint_enabled(tracepoint) false
+ static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ 				  unsigned long caller_addr, unsigned long caller_addr0) {}
+ static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+@@ -189,11 +191,13 @@ static inline u8 readb(const volatile void __iomem *addr)
+ {
+ 	u8 val;
+ 
+-	log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+ 	val = __raw_readb(addr);
+ 	__io_ar(val);
+-	log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -204,11 +208,13 @@ static inline u16 readw(const volatile void __iomem *addr)
+ {
+ 	u16 val;
+ 
+-	log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+ 	val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ 	__io_ar(val);
+-	log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -219,11 +225,13 @@ static inline u32 readl(const volatile void __iomem *addr)
+ {
+ 	u32 val;
+ 
+-	log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+ 	val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ 	__io_ar(val);
+-	log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -235,11 +243,13 @@ static inline u64 readq(const volatile void __iomem *addr)
+ {
+ 	u64 val;
+ 
+-	log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+ 	val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ 	__io_ar(val);
+-	log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -249,11 +259,13 @@ static inline u64 readq(const volatile void __iomem *addr)
+ #define writeb writeb
+ static inline void writeb(u8 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+ 	__raw_writeb(value, addr);
+ 	__io_aw();
+-	log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -261,11 +273,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
+ #define writew writew
+ static inline void writew(u16 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+ 	__raw_writew((u16 __force)cpu_to_le16(value), addr);
+ 	__io_aw();
+-	log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -273,11 +287,13 @@ static inline void writew(u16 value, volatile void __iomem *addr)
+ #define writel writel
+ static inline void writel(u32 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+ 	__raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ 	__io_aw();
+-	log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -286,11 +302,13 @@ static inline void writel(u32 value, volatile void __iomem *addr)
+ #define writeq writeq
+ static inline void writeq(u64 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+ 	__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ 	__io_aw();
+-	log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ #endif /* CONFIG_64BIT */
+@@ -306,9 +324,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
+ {
+ 	u8 val;
+ 
+-	log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ 	val = __raw_readb(addr);
+-	log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -319,9 +339,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
+ {
+ 	u16 val;
+ 
+-	log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ 	val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+-	log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -332,9 +354,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
+ {
+ 	u32 val;
+ 
+-	log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ 	val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+-	log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -345,9 +369,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
+ {
+ 	u64 val;
+ 
+-	log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ 	val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+-	log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -356,9 +382,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
+ #define writeb_relaxed writeb_relaxed
+ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ 	__raw_writeb(value, addr);
+-	log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -366,9 +394,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+ #define writew_relaxed writew_relaxed
+ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ 	__raw_writew((u16 __force)cpu_to_le16(value), addr);
+-	log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -376,9 +406,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ #define writel_relaxed writel_relaxed
+ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ 	__raw_writel((u32 __force)__cpu_to_le32(value), addr);
+-	log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -386,9 +418,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ #define writeq_relaxed writeq_relaxed
+ static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ 	__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+-	log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index a604c54ae44dad..794e38320f5685 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -32,6 +32,9 @@
+  */
+ 
+ #define CPUFREQ_ETERNAL			(-1)
++
++#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS	NSEC_PER_MSEC
++
+ #define CPUFREQ_NAME_LEN		16
+ /* Print length for names. Extra 1 space for accommodating '\n' in prints */
+ #define CPUFREQ_NAME_PLEN		(CPUFREQ_NAME_LEN + 1)
+diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
+index de45cf2ee1e4f8..ce2086f97e3fcf 100644
+--- a/include/linux/iio/frequency/adf4350.h
++++ b/include/linux/iio/frequency/adf4350.h
+@@ -51,7 +51,7 @@
+ 
+ /* REG3 Bit Definitions */
+ #define ADF4350_REG3_12BIT_CLKDIV(x)		((x) << 3)
+-#define ADF4350_REG3_12BIT_CLKDIV_MODE(x)	((x) << 16)
++#define ADF4350_REG3_12BIT_CLKDIV_MODE(x)	((x) << 15)
+ #define ADF4350_REG3_12BIT_CSR_EN		(1 << 18)
+ #define ADF4351_REG3_CHARGE_CANCELLATION_EN	(1 << 21)
+ #define ADF4351_REG3_ANTI_BACKLASH_3ns_EN	(1 << 22)
+diff --git a/include/linux/ksm.h b/include/linux/ksm.h
+index ec9c05044d4fed..af303641819a37 100644
+--- a/include/linux/ksm.h
++++ b/include/linux/ksm.h
+@@ -57,8 +57,14 @@ static inline long mm_ksm_zero_pages(struct mm_struct *mm)
+ static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+ 	/* Adding mm to ksm is best effort on fork. */
+-	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
++	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
++		long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
++
++		mm->ksm_merging_pages = 0;
++		mm->ksm_rmap_items = 0;
++		atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
+ 		__ksm_enter(mm);
++	}
+ }
+ 
+ static inline int ksm_execve(struct mm_struct *mm)
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 41f5c88bdf3bc9..f0fa8404957dbf 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -4318,14 +4318,13 @@ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+  * since this value becomes part of PP_SIGNATURE; meaning we can just use the
+  * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
+  * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
+- * 0, we make sure that we leave the two topmost bits empty, as that guarantees
+- * we won't mistake a valid kernel pointer for a value we set, regardless of the
+- * VMSPLIT setting.
++ * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is
++ * known at compile-time.
+  *
+- * Altogether, this means that the number of bits available is constrained by
+- * the size of an unsigned long (at the upper end, subtracting two bits per the
+- * above), and the definition of PP_SIGNATURE (with or without
+- * POISON_POINTER_DELTA).
++ * If the value of PAGE_OFFSET is not known at compile time, or if it is too
++ * small to leave at least 8 bits available above PP_SIGNATURE, we define the
++ * number of bits to be 0, which turns off the DMA index tracking altogether
++ * (see page_pool_register_dma_index()).
+  */
+ #define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
+ #if POISON_POINTER_DELTA > 0
+@@ -4334,8 +4333,13 @@ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+  */
+ #define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
+ #else
+-/* Always leave out the topmost two; see above. */
+-#define PP_DMA_INDEX_BITS MIN(32, BITS_PER_LONG - PP_DMA_INDEX_SHIFT - 2)
++/* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */
++#define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8))
++#define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \
++			    PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \
++			    !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \
++			      MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0)
++
+ #endif
+ 
+ #define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
+diff --git a/include/linux/rseq.h b/include/linux/rseq.h
+index bc8af3eb559876..1fbeb61babeb8b 100644
+--- a/include/linux/rseq.h
++++ b/include/linux/rseq.h
+@@ -7,6 +7,12 @@
+ #include <linux/preempt.h>
+ #include <linux/sched.h>
+ 
++#ifdef CONFIG_MEMBARRIER
++# define RSEQ_EVENT_GUARD	irq
++#else
++# define RSEQ_EVENT_GUARD	preempt
++#endif
++
+ /*
+  * Map the event mask on the user-space ABI enum rseq_cs_flags
+  * for direct mask checks.
+@@ -41,9 +47,8 @@ static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ static inline void rseq_signal_deliver(struct ksignal *ksig,
+ 				       struct pt_regs *regs)
+ {
+-	preempt_disable();
+-	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+-	preempt_enable();
++	scoped_guard(RSEQ_EVENT_GUARD)
++		__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+ 	rseq_handle_notify_resume(ksig, regs);
+ }
+ 
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index e68fecf6eab5b6..617ebfff2f304f 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -81,7 +81,7 @@ struct svc_serv {
+ 	unsigned int		sv_xdrsize;	/* XDR buffer size */
+ 	struct list_head	sv_permsocks;	/* all permanent sockets */
+ 	struct list_head	sv_tempsocks;	/* all temporary sockets */
+-	int			sv_tmpcnt;	/* count of temporary sockets */
++	int			sv_tmpcnt;	/* count of temporary "valid" sockets */
+ 	struct timer_list	sv_temptimer;	/* timer for aging temporary sockets */
+ 
+ 	char *			sv_name;	/* service name */
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index 0981e35a9fedae..72d1a08f482821 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -99,8 +99,27 @@ enum {
+ 	XPT_HANDSHAKE,		/* xprt requests a handshake */
+ 	XPT_TLS_SESSION,	/* transport-layer security established */
+ 	XPT_PEER_AUTH,		/* peer has been authenticated */
++	XPT_PEER_VALID,		/* peer has presented a filehandle that
++				 * it has access to.  It is NOT counted
++				 * in ->sv_tmpcnt.
++				 */
++	XPT_RPCB_UNREG,		/* transport that needs unregistering
++				 * with rpcbind (TCP, UDP) on destroy
++				 */
+ };
+ 
++static inline void svc_xprt_set_valid(struct svc_xprt *xpt)
++{
++	if (test_bit(XPT_TEMP, &xpt->xpt_flags) &&
++	    !test_and_set_bit(XPT_PEER_VALID, &xpt->xpt_flags)) {
++		struct svc_serv *serv = xpt->xpt_server;
++
++		spin_lock(&serv->sv_lock);
++		serv->sv_tmpcnt -= 1;
++		spin_unlock(&serv->sv_lock);
++	}
++}
++
+ static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+ {
+ 	spin_lock(&xpt->xpt_lock);
+diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
+index 43343f1586d138..d0746e2c869ad9 100644
+--- a/include/media/v4l2-subdev.h
++++ b/include/media/v4l2-subdev.h
+@@ -1952,19 +1952,23 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
+  *
+  * Note: only legacy non-MC drivers may need this macro.
+  */
+-#define v4l2_subdev_call_state_try(sd, o, f, args...)                 \
+-	({                                                            \
+-		int __result;                                         \
+-		static struct lock_class_key __key;                   \
+-		const char *name = KBUILD_BASENAME                    \
+-			":" __stringify(__LINE__) ":state->lock";     \
+-		struct v4l2_subdev_state *state =                     \
+-			__v4l2_subdev_state_alloc(sd, name, &__key);  \
+-		v4l2_subdev_lock_state(state);                        \
+-		__result = v4l2_subdev_call(sd, o, f, state, ##args); \
+-		v4l2_subdev_unlock_state(state);                      \
+-		__v4l2_subdev_state_free(state);                      \
+-		__result;                                             \
++#define v4l2_subdev_call_state_try(sd, o, f, args...)                         \
++	({                                                                    \
++		int __result;                                                 \
++		static struct lock_class_key __key;                           \
++		const char *name = KBUILD_BASENAME                            \
++			":" __stringify(__LINE__) ":state->lock";             \
++		struct v4l2_subdev_state *state =                             \
++			__v4l2_subdev_state_alloc(sd, name, &__key);          \
++		if (IS_ERR(state)) {                                          \
++			__result = PTR_ERR(state);                            \
++		} else {                                                      \
++			v4l2_subdev_lock_state(state);                        \
++			__result = v4l2_subdev_call(sd, o, f, state, ##args); \
++			v4l2_subdev_unlock_state(state);                      \
++			__v4l2_subdev_state_free(state);                      \
++		}                                                             \
++		__result;                                                     \
+ 	})
+ 
+ /**
+diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
+index 63b55ccc4f00cd..e5187144c91b7a 100644
+--- a/include/trace/events/dma.h
++++ b/include/trace/events/dma.h
+@@ -136,6 +136,7 @@ DECLARE_EVENT_CLASS(dma_alloc_class,
+ 		__entry->dma_addr = dma_addr;
+ 		__entry->size = size;
+ 		__entry->flags = flags;
++		__entry->dir = dir;
+ 		__entry->attrs = attrs;
+ 	),
+ 
+diff --git a/init/main.c b/init/main.c
+index c4778edae7972f..821df1f05e9c02 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -543,6 +543,12 @@ static int __init unknown_bootoption(char *param, char *val,
+ 				     const char *unused, void *arg)
+ {
+ 	size_t len = strlen(param);
++	/*
++	 * Well-known bootloader identifiers:
++	 * 1. LILO/Grub pass "BOOT_IMAGE=...";
++	 * 2. kexec/kdump (kexec-tools) pass "kexec".
++	 */
++	const char *bootloader[] = { "BOOT_IMAGE=", "kexec", NULL };
+ 
+ 	/* Handle params aliased to sysctls */
+ 	if (sysctl_is_alias(param))
+@@ -550,6 +556,12 @@ static int __init unknown_bootoption(char *param, char *val,
+ 
+ 	repair_env_string(param, val);
+ 
++	/* Handle bootloader identifier */
++	for (int i = 0; bootloader[i]; i++) {
++		if (strstarts(param, bootloader[i]))
++			return 0;
++	}
++
+ 	/* Handle obsolete-style parameters */
+ 	if (obsolete_checksetup(param))
+ 		return 0;
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index 9aaf5124648bda..746b5644d9a19f 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -775,7 +775,7 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
+ 	return 0;
+ }
+ 
+-static void bpf_free_inode(struct inode *inode)
++static void bpf_destroy_inode(struct inode *inode)
+ {
+ 	enum bpf_type type;
+ 
+@@ -790,7 +790,7 @@ const struct super_operations bpf_super_ops = {
+ 	.statfs		= simple_statfs,
+ 	.drop_inode	= generic_delete_inode,
+ 	.show_options	= bpf_show_options,
+-	.free_inode	= bpf_free_inode,
++	.destroy_inode	= bpf_destroy_inode,
+ };
+ 
+ enum {
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 97c9afe3efc38d..e5ec098a6f61e9 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1807,7 +1807,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct *tsk,
+ 	return 0;
+ }
+ 
+-static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
++static int copy_sighand(u64 clone_flags, struct task_struct *tsk)
+ {
+ 	struct sighand_struct *sig;
+ 
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 2715afb77eab8f..b80c3bfb58d07f 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -487,7 +487,7 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+ 	struct upid *upid;
+ 	pid_t nr = 0;
+ 
+-	if (pid && ns->level <= pid->level) {
++	if (pid && ns && ns->level <= pid->level) {
+ 		upid = &pid->numbers[ns->level];
+ 		if (upid->ns == ns)
+ 			nr = upid->nr;
+diff --git a/kernel/rseq.c b/kernel/rseq.c
+index 23894ba8250cf9..810005f927d7cc 100644
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -255,12 +255,12 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
+ 
+ 	/*
+ 	 * Load and clear event mask atomically with respect to
+-	 * scheduler preemption.
++	 * scheduler preemption and membarrier IPIs.
+ 	 */
+-	preempt_disable();
+-	event_mask = t->rseq_event_mask;
+-	t->rseq_event_mask = 0;
+-	preempt_enable();
++	scoped_guard(RSEQ_EVENT_GUARD) {
++		event_mask = t->rseq_event_mask;
++		t->rseq_event_mask = 0;
++	}
+ 
+ 	return !!event_mask;
+ }
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 53e3670fbb1e05..6ec66fef3f91ea 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2617,6 +2617,25 @@ static int find_later_rq(struct task_struct *task)
+ 	return -1;
+ }
+ 
++static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
++{
++	struct task_struct *p;
++
++	if (!has_pushable_dl_tasks(rq))
++		return NULL;
++
++	p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
++
++	WARN_ON_ONCE(rq->cpu != task_cpu(p));
++	WARN_ON_ONCE(task_current(rq, p));
++	WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
++
++	WARN_ON_ONCE(!task_on_rq_queued(p));
++	WARN_ON_ONCE(!dl_task(p));
++
++	return p;
++}
++
+ /* Locks the rq it finds */
+ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+ {
+@@ -2644,12 +2663,37 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+ 
+ 		/* Retry if something changed. */
+ 		if (double_lock_balance(rq, later_rq)) {
+-			if (unlikely(task_rq(task) != rq ||
++			/*
++			 * double_lock_balance had to release rq->lock, in the
++			 * meantime, task may no longer be fit to be migrated.
++			 * Check the following to ensure that the task is
++			 * still suitable for migration:
++			 * 1. It is possible the task was scheduled,
++			 *    migrate_disabled was set and then got preempted,
++			 *    so we must check the task migration disable
++			 *    flag.
++			 * 2. The CPU picked is in the task's affinity.
++			 * 3. For throttled task (dl_task_offline_migration),
++			 *    check the following:
++			 *    - the task is not on the rq anymore (it was
++			 *      migrated)
++			 *    - the task is not on CPU anymore
++			 *    - the task is still a dl task
++			 *    - the task is not queued on the rq anymore
++			 * 4. For the non-throttled task (push_dl_task), the
++			 *    check to ensure that this task is still at the
++			 *    head of the pushable tasks list is enough.
++			 */
++			if (unlikely(is_migration_disabled(task) ||
+ 				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
+-				     task_on_cpu(rq, task) ||
+-				     !dl_task(task) ||
+-				     is_migration_disabled(task) ||
+-				     !task_on_rq_queued(task))) {
++				     (task->dl.dl_throttled &&
++				      (task_rq(task) != rq ||
++				       task_on_cpu(rq, task) ||
++				       !dl_task(task) ||
++				       !task_on_rq_queued(task))) ||
++				     (!task->dl.dl_throttled &&
++				      task != pick_next_pushable_dl_task(rq)))) {
++
+ 				double_unlock_balance(rq, later_rq);
+ 				later_rq = NULL;
+ 				break;
+@@ -2672,25 +2716,6 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+ 	return later_rq;
+ }
+ 
+-static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
+-{
+-	struct task_struct *p;
+-
+-	if (!has_pushable_dl_tasks(rq))
+-		return NULL;
+-
+-	p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
+-
+-	WARN_ON_ONCE(rq->cpu != task_cpu(p));
+-	WARN_ON_ONCE(task_current(rq, p));
+-	WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
+-
+-	WARN_ON_ONCE(!task_on_rq_queued(p));
+-	WARN_ON_ONCE(!dl_task(p));
+-
+-	return p;
+-}
+-
+ /*
+  * See if the non running -deadline tasks on this rq
+  * can be sent to some other CPU where they can preempt
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index af61769b1d5020..b3d9826e25b035 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7187,6 +7187,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 	int h_nr_delayed = 0;
+ 	struct cfs_rq *cfs_rq;
+ 	u64 slice = 0;
++	int ret = 0;
+ 
+ 	if (entity_is_task(se)) {
+ 		p = task_of(se);
+@@ -7218,7 +7219,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 
+ 		/* end evaluation on encountering a throttled cfs_rq */
+ 		if (cfs_rq_throttled(cfs_rq))
+-			return 0;
++			goto out;
+ 
+ 		/* Don't dequeue parent if it has other entities besides us */
+ 		if (cfs_rq->load.weight) {
+@@ -7261,7 +7262,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 
+ 		/* end evaluation on encountering a throttled cfs_rq */
+ 		if (cfs_rq_throttled(cfs_rq))
+-			return 0;
++			goto out;
+ 	}
+ 
+ 	sub_nr_running(rq, h_nr_queued);
+@@ -7273,6 +7274,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 	if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
+ 		rq->next_balance = jiffies;
+ 
++	ret = 1;
++out:
+ 	if (p && task_delayed) {
+ 		SCHED_WARN_ON(!task_sleep);
+ 		SCHED_WARN_ON(p->on_rq != 1);
+@@ -7288,7 +7291,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 		__block_task(rq, p);
+ 	}
+ 
+-	return 1;
++	return ret;
+ }
+ 
+ /*
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 4da31f28fda81f..35990f0796bcab 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1698,6 +1698,7 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
+ 	struct rlimit old, new;
+ 	struct task_struct *tsk;
+ 	unsigned int checkflags = 0;
++	bool need_tasklist;
+ 	int ret;
+ 
+ 	if (old_rlim)
+@@ -1724,8 +1725,25 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
+ 	get_task_struct(tsk);
+ 	rcu_read_unlock();
+ 
+-	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
+-			old_rlim ? &old : NULL);
++	need_tasklist = !same_thread_group(tsk, current);
++	if (need_tasklist) {
++		/*
++		 * Ensure we can't race with group exit or de_thread(),
++		 * so tsk->group_leader can't be freed or changed until
++		 * read_unlock(tasklist_lock) below.
++		 */
++		read_lock(&tasklist_lock);
++		if (!pid_alive(tsk))
++			ret = -ESRCH;
++	}
++
++	if (!ret) {
++		ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
++				old_rlim ? &old : NULL);
++	}
++
++	if (need_tasklist)
++		read_unlock(&tasklist_lock);
+ 
+ 	if (!ret && old_rlim) {
+ 		rlim_to_rlim64(&old, &old64);
+diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
+index af7d6e2060d9d9..440dbfa6bbfd5b 100644
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -343,12 +343,14 @@ static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
+ 			     void *entry_data)
+ {
+ 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++	unsigned int flags = trace_probe_load_flag(&tf->tp);
+ 	int ret = 0;
+ 
+-	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
++	if (flags & TP_FLAG_TRACE)
+ 		fentry_trace_func(tf, entry_ip, regs);
++
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		ret = fentry_perf_func(tf, entry_ip, regs);
+ #endif
+ 	return ret;
+@@ -360,11 +362,12 @@ static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
+ 			     void *entry_data)
+ {
+ 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++	unsigned int flags = trace_probe_load_flag(&tf->tp);
+ 
+-	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
++	if (flags & TP_FLAG_TRACE)
+ 		fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data);
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data);
+ #endif
+ }
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 6b9c3f3f870f4f..b273611c5026c2 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1799,14 +1799,15 @@ static int kprobe_register(struct trace_event_call *event,
+ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
+ {
+ 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
++	unsigned int flags = trace_probe_load_flag(&tk->tp);
+ 	int ret = 0;
+ 
+ 	raw_cpu_inc(*tk->nhit);
+ 
+-	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++	if (flags & TP_FLAG_TRACE)
+ 		kprobe_trace_func(tk, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		ret = kprobe_perf_func(tk, regs);
+ #endif
+ 	return ret;
+@@ -1818,6 +1819,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
+ {
+ 	struct kretprobe *rp = get_kretprobe(ri);
+ 	struct trace_kprobe *tk;
++	unsigned int flags;
+ 
+ 	/*
+ 	 * There is a small chance that get_kretprobe(ri) returns NULL when
+@@ -1830,10 +1832,11 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
+ 	tk = container_of(rp, struct trace_kprobe, rp);
+ 	raw_cpu_inc(*tk->nhit);
+ 
+-	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++	flags = trace_probe_load_flag(&tk->tp);
++	if (flags & TP_FLAG_TRACE)
+ 		kretprobe_trace_func(tk, ri, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		kretprobe_perf_func(tk, ri, regs);
+ #endif
+ 	return 0;	/* We don't tweak kernel, so just return 0 */
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 8a6797c2278d90..4f54f7935d5dbe 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -269,16 +269,21 @@ struct event_file_link {
+ 	struct list_head		list;
+ };
+ 
++static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
++{
++	return smp_load_acquire(&tp->event->flags);
++}
++
+ static inline bool trace_probe_test_flag(struct trace_probe *tp,
+ 					 unsigned int flag)
+ {
+-	return !!(tp->event->flags & flag);
++	return !!(trace_probe_load_flag(tp) & flag);
+ }
+ 
+ static inline void trace_probe_set_flag(struct trace_probe *tp,
+ 					unsigned int flag)
+ {
+-	tp->event->flags |= flag;
++	smp_store_release(&tp->event->flags, tp->event->flags | flag);
+ }
+ 
+ static inline void trace_probe_clear_flag(struct trace_probe *tp,
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 9916677acf24e4..f210e71bc15502 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1531,6 +1531,7 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+ 	struct trace_uprobe *tu;
+ 	struct uprobe_dispatch_data udd;
+ 	struct uprobe_cpu_buffer *ucb = NULL;
++	unsigned int flags;
+ 	int ret = 0;
+ 
+ 	tu = container_of(con, struct trace_uprobe, consumer);
+@@ -1545,11 +1546,12 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+ 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+ 		return 0;
+ 
+-	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++	flags = trace_probe_load_flag(&tu->tp);
++	if (flags & TP_FLAG_TRACE)
+ 		ret |= uprobe_trace_func(tu, regs, &ucb);
+ 
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		ret |= uprobe_perf_func(tu, regs, &ucb);
+ #endif
+ 	uprobe_buffer_put(ucb);
+@@ -1562,6 +1564,7 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
+ 	struct trace_uprobe *tu;
+ 	struct uprobe_dispatch_data udd;
+ 	struct uprobe_cpu_buffer *ucb = NULL;
++	unsigned int flags;
+ 
+ 	tu = container_of(con, struct trace_uprobe, consumer);
+ 
+@@ -1573,11 +1576,12 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
+ 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+ 		return 0;
+ 
+-	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++	flags = trace_probe_load_flag(&tu->tp);
++	if (flags & TP_FLAG_TRACE)
+ 		uretprobe_trace_func(tu, func, regs, &ucb);
+ 
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		uretprobe_perf_func(tu, func, regs, &ucb);
+ #endif
+ 	uprobe_buffer_put(ucb);
+diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
+index 969baab8c805f2..7ccc18accb235a 100644
+--- a/lib/crypto/Makefile
++++ b/lib/crypto/Makefile
+@@ -33,6 +33,10 @@ obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC)	+= libcurve25519-generic.o
+ libcurve25519-generic-y				:= curve25519-fiat32.o
+ libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128)	:= curve25519-hacl64.o
+ libcurve25519-generic-y				+= curve25519-generic.o
++# clang versions prior to 18 may blow out the stack with KASAN
++ifeq ($(call clang-min-version, 180000),)
++KASAN_SANITIZE_curve25519-hacl64.o := n
++endif
+ 
+ obj-$(CONFIG_CRYPTO_LIB_CURVE25519)		+= libcurve25519.o
+ libcurve25519-y					+= curve25519.o
+diff --git a/lib/genalloc.c b/lib/genalloc.c
+index 4fa5635bf81bd6..841f2978383334 100644
+--- a/lib/genalloc.c
++++ b/lib/genalloc.c
+@@ -899,8 +899,11 @@ struct gen_pool *of_gen_pool_get(struct device_node *np,
+ 		if (!name)
+ 			name = of_node_full_name(np_pool);
+ 	}
+-	if (pdev)
++	if (pdev) {
+ 		pool = gen_pool_get(&pdev->dev, name);
++		put_device(&pdev->dev);
++	}
++
+ 	of_node_put(np_pool);
+ 
+ 	return pool;
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index c2b4f0b0714727..5654e31a198a43 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -203,7 +203,7 @@ static int damon_lru_sort_apply_parameters(void)
+ 		goto out;
+ 	}
+ 
+-	err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
++	err = damon_set_attrs(param_ctx, &damon_lru_sort_mon_attrs);
+ 	if (err)
+ 		goto out;
+ 
+diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
+index dba3b2f4d75813..52e5c244bd0215 100644
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -324,10 +324,8 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
+ 	}
+ 
+ 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+-	if (!pte) {
+-		walk->action = ACTION_AGAIN;
++	if (!pte)
+ 		return 0;
+-	}
+ 	if (!pte_present(ptep_get(pte)))
+ 		goto out;
+ 	damon_ptep_mkold(pte, walk->vma, addr);
+@@ -479,10 +477,8 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
+ #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
+ 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+-	if (!pte) {
+-		walk->action = ACTION_AGAIN;
++	if (!pte)
+ 		return 0;
+-	}
+ 	ptent = ptep_get(pte);
+ 	if (!pte_present(ptent))
+ 		goto out;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index f94a9d41358555..029b67d48d30bb 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3715,32 +3715,23 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
+ static bool thp_underused(struct folio *folio)
+ {
+ 	int num_zero_pages = 0, num_filled_pages = 0;
+-	void *kaddr;
+ 	int i;
+ 
+ 	if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
+ 		return false;
+ 
+ 	for (i = 0; i < folio_nr_pages(folio); i++) {
+-		kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
+-		if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
+-			num_zero_pages++;
+-			if (num_zero_pages > khugepaged_max_ptes_none) {
+-				kunmap_local(kaddr);
++		if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
++			if (++num_zero_pages > khugepaged_max_ptes_none)
+ 				return true;
+-			}
+ 		} else {
+ 			/*
+ 			 * Another path for early exit once the number
+ 			 * of non-zero filled pages exceeds threshold.
+ 			 */
+-			num_filled_pages++;
+-			if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
+-				kunmap_local(kaddr);
++			if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
+ 				return false;
+-			}
+ 		}
+-		kunmap_local(kaddr);
+ 	}
+ 	return false;
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index fff6edb1174d59..d404532ae41e2a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3453,6 +3453,9 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
+ 		initialized = true;
+ 	}
+ 
++	if (!h->max_huge_pages)
++		return;
++
+ 	/* do node specific alloc */
+ 	if (hugetlb_hstate_alloc_pages_specific_nodes(h))
+ 		return;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 8619aa884eaa87..2bcfc41b7e4c3b 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -198,19 +198,16 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+ }
+ 
+ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
+-					  struct folio *folio,
+-					  unsigned long idx)
++		struct folio *folio, pte_t old_pte, unsigned long idx)
+ {
+ 	struct page *page = folio_page(folio, idx);
+-	bool contains_data;
+ 	pte_t newpte;
+-	void *addr;
+ 
+ 	if (PageCompound(page))
+ 		return false;
+ 	VM_BUG_ON_PAGE(!PageAnon(page), page);
+ 	VM_BUG_ON_PAGE(!PageLocked(page), page);
+-	VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
++	VM_BUG_ON_PAGE(pte_present(old_pte), page);
+ 
+ 	if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
+ 	    mm_forbids_zeropage(pvmw->vma->vm_mm))
+@@ -221,15 +218,17 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
+ 	 * this subpage has been non present. If the subpage is only zero-filled
+ 	 * then map it to the shared zeropage.
+ 	 */
+-	addr = kmap_local_page(page);
+-	contains_data = memchr_inv(addr, 0, PAGE_SIZE);
+-	kunmap_local(addr);
+-
+-	if (contains_data)
++	if (!pages_identical(page, ZERO_PAGE(0)))
+ 		return false;
+ 
+ 	newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
+ 					pvmw->vma->vm_page_prot));
++
++	if (pte_swp_soft_dirty(old_pte))
++		newpte = pte_mksoft_dirty(newpte);
++	if (pte_swp_uffd_wp(old_pte))
++		newpte = pte_mkuffd_wp(newpte);
++
+ 	set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
+ 
+ 	dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
+@@ -272,13 +271,13 @@ static bool remove_migration_pte(struct folio *folio,
+ 			continue;
+ 		}
+ #endif
++		old_pte = ptep_get(pvmw.pte);
+ 		if (rmap_walk_arg->map_unused_to_zeropage &&
+-		    try_to_map_unused_to_zeropage(&pvmw, folio, idx))
++		    try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx))
+ 			continue;
+ 
+ 		folio_get(folio);
+ 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
+-		old_pte = ptep_get(pvmw.pte);
+ 
+ 		entry = pte_to_swp_entry(old_pte);
+ 		if (!is_migration_entry_young(entry))
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 752576749db9d0..765c890e6a843a 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4052,7 +4052,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
+ 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
+ 			alloc_flags |= ALLOC_NON_BLOCK;
+ 
+-			if (order > 0)
++			if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE))
+ 				alloc_flags |= ALLOC_HIGHATOMIC;
+ 		}
+ 
+diff --git a/mm/slab.h b/mm/slab.h
+index 92ca5ff2037534..b65d2462b3fdbd 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -572,8 +572,12 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
+ 	unsigned long obj_exts = READ_ONCE(slab->obj_exts);
+ 
+ #ifdef CONFIG_MEMCG
+-	VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS),
+-							slab_page(slab));
++	/*
++	 * obj_exts should be either NULL, a valid pointer with
++	 * MEMCG_DATA_OBJEXTS bit set or be equal to OBJEXTS_ALLOC_FAIL.
++	 */
++	VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS) &&
++		       obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
+ 	VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
+ #endif
+ 	return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
+diff --git a/mm/slub.c b/mm/slub.c
+index 7fbba36f7aac52..b75b50ad6748f0 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1999,8 +1999,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
+ 			   slab_nid(slab));
+ 	if (!vec) {
+ 		/* Mark vectors which failed to allocate */
+-		if (new_slab)
+-			mark_failed_objexts_alloc(slab);
++		mark_failed_objexts_alloc(slab);
+ 
+ 		return -ENOMEM;
+ 	}
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index f2efb58d152bc2..13d6c3f51c29d0 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -1455,7 +1455,7 @@ void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
+ 	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
+ 		return;
+ 
+-	vg = br_vlan_group(br);
++	vg = br_vlan_group_rcu(br);
+ 
+ 	if (idx >= 0 &&
+ 	    ctx->vlan[idx].proto == br->vlan_proto) {
+diff --git a/net/core/filter.c b/net/core/filter.c
+index c850e5d6cbd876..fef4d85fee0082 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2289,6 +2289,7 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
+ 		if (IS_ERR(dst))
+ 			goto out_drop;
+ 
++		skb_dst_drop(skb);
+ 		skb_dst_set(skb, dst);
+ 	} else if (nh->nh_family != AF_INET6) {
+ 		goto out_drop;
+@@ -2397,6 +2398,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
+ 			goto out_drop;
+ 		}
+ 
++		skb_dst_drop(skb);
+ 		skb_dst_set(skb, &rt->dst);
+ 	}
+ 
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index b1c3e0ad6dbf48..6a7d740b396f67 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -462,11 +462,60 @@ page_pool_dma_sync_for_device(const struct page_pool *pool,
+ 	}
+ }
+ 
++static int page_pool_register_dma_index(struct page_pool *pool,
++					netmem_ref netmem, gfp_t gfp)
++{
++	int err = 0;
++	u32 id;
++
++	if (unlikely(!PP_DMA_INDEX_BITS))
++		goto out;
++
++	if (in_softirq())
++		err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
++			       PP_DMA_INDEX_LIMIT, gfp);
++	else
++		err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
++				  PP_DMA_INDEX_LIMIT, gfp);
++	if (err) {
++		WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
++		goto out;
++	}
++
++	netmem_set_dma_index(netmem, id);
++out:
++	return err;
++}
++
++static int page_pool_release_dma_index(struct page_pool *pool,
++				       netmem_ref netmem)
++{
++	struct page *old, *page = netmem_to_page(netmem);
++	unsigned long id;
++
++	if (unlikely(!PP_DMA_INDEX_BITS))
++		return 0;
++
++	id = netmem_get_dma_index(netmem);
++	if (!id)
++		return -1;
++
++	if (in_softirq())
++		old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
++	else
++		old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
++	if (old != page)
++		return -1;
++
++	netmem_set_dma_index(netmem, 0);
++
++	return 0;
++}
++
+ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp)
+ {
+ 	dma_addr_t dma;
+ 	int err;
+-	u32 id;
+ 
+ 	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
+ 	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
+@@ -485,18 +534,10 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t g
+ 		goto unmap_failed;
+ 	}
+ 
+-	if (in_softirq())
+-		err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
+-			       PP_DMA_INDEX_LIMIT, gfp);
+-	else
+-		err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
+-				  PP_DMA_INDEX_LIMIT, gfp);
+-	if (err) {
+-		WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
++	err = page_pool_register_dma_index(pool, netmem, gfp);
++	if (err)
+ 		goto unset_failed;
+-	}
+ 
+-	netmem_set_dma_index(netmem, id);
+ 	page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
+ 
+ 	return true;
+@@ -669,8 +710,6 @@ void page_pool_clear_pp_info(netmem_ref netmem)
+ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
+ 							 netmem_ref netmem)
+ {
+-	struct page *old, *page = netmem_to_page(netmem);
+-	unsigned long id;
+ 	dma_addr_t dma;
+ 
+ 	if (!pool->dma_map)
+@@ -679,15 +718,7 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
+ 		 */
+ 		return;
+ 
+-	id = netmem_get_dma_index(netmem);
+-	if (!id)
+-		return;
+-
+-	if (in_softirq())
+-		old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
+-	else
+-		old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
+-	if (old != page)
++	if (page_pool_release_dma_index(pool, netmem))
+ 		return;
+ 
+ 	dma = page_pool_get_dma_addr_netmem(netmem);
+@@ -697,7 +728,6 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
+ 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ 	page_pool_set_dma_addr_netmem(netmem, 0);
+-	netmem_set_dma_index(netmem, 0);
+ }
+ 
+ /* Disconnects a page (from a page_pool).  API users can have a need
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 739931aabb4e31..795ffa62cc0e69 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1735,6 +1735,7 @@ EXPORT_SYMBOL(tcp_peek_len);
+ /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
+ int tcp_set_rcvlowat(struct sock *sk, int val)
+ {
++	struct tcp_sock *tp = tcp_sk(sk);
+ 	int space, cap;
+ 
+ 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
+@@ -1753,7 +1754,9 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
+ 	space = tcp_space_from_win(sk, val);
+ 	if (space > sk->sk_rcvbuf) {
+ 		WRITE_ONCE(sk->sk_rcvbuf, space);
+-		WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
++
++		if (tp->window_clamp && tp->window_clamp < val)
++			WRITE_ONCE(tp->window_clamp, val);
+ 	}
+ 	return 0;
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 30f4375f8431b8..4c8d84fc27ca35 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -7338,7 +7338,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ 				    &foc, TCP_SYNACK_FASTOPEN, skb);
+ 		/* Add the child socket directly into the accept queue */
+ 		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
+-			reqsk_fastopen_remove(fastopen_sk, req, false);
+ 			bh_unlock_sock(fastopen_sk);
+ 			sock_put(fastopen_sk);
+ 			goto drop_and_free;
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 2c8815daf5b04a..1b7541206a70b9 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -226,9 +226,12 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ 		} else {
+ 			__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
+ 		}
+-	/* id0 should not have a different address */
++	/* - id0 should not have a different address
++	 * - special case for C-flag: linked to fill_local_addresses_vec()
++	 */
+ 	} else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
+-		   (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
++		   (addr->id > 0 && !READ_ONCE(pm->accept_addr) &&
++		    !mptcp_pm_add_addr_c_flag_case(msk))) {
+ 		mptcp_pm_announce_addr(msk, addr, true);
+ 		mptcp_pm_add_addr_send_ack(msk);
+ 	} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 463c2e7956d52e..8d5406515c304d 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -674,10 +674,12 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 	struct mptcp_addr_info mpc_addr;
+ 	struct pm_nl_pernet *pernet;
+ 	unsigned int subflows_max;
++	bool c_flag_case;
+ 	int i = 0;
+ 
+ 	pernet = pm_nl_get_pernet_from_msk(msk);
+ 	subflows_max = mptcp_pm_get_subflows_max(msk);
++	c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk);
+ 
+ 	mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+ 
+@@ -690,12 +692,27 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 			continue;
+ 
+ 		if (msk->pm.subflows < subflows_max) {
++			bool is_id0;
++
+ 			locals[i].addr = entry->addr;
+ 			locals[i].flags = entry->flags;
+ 			locals[i].ifindex = entry->ifindex;
+ 
++			is_id0 = mptcp_addresses_equal(&locals[i].addr,
++						       &mpc_addr,
++						       locals[i].addr.port);
++
++			if (c_flag_case &&
++			    (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) {
++				__clear_bit(locals[i].addr.id,
++					    msk->pm.id_avail_bitmap);
++
++				if (!is_id0)
++					msk->pm.local_addr_used++;
++			}
++
+ 			/* Special case for ID0: set the correct ID */
+-			if (mptcp_addresses_equal(&locals[i].addr, &mpc_addr, locals[i].addr.port))
++			if (is_id0)
+ 				locals[i].addr.id = 0;
+ 
+ 			msk->pm.subflows++;
+@@ -704,6 +721,37 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 	}
+ 	rcu_read_unlock();
+ 
++	/* Special case: peer sets the C flag, accept one ADD_ADDR if default
++	 * limits are used -- accepting no ADD_ADDR -- and use subflow endpoints
++	 */
++	if (!i && c_flag_case) {
++		unsigned int local_addr_max = mptcp_pm_get_local_addr_max(msk);
++
++		while (msk->pm.local_addr_used < local_addr_max &&
++		       msk->pm.subflows < subflows_max) {
++			struct mptcp_pm_local *local = &locals[i];
++
++			if (!select_local_address(pernet, msk, local))
++				break;
++
++			__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
++
++			if (!mptcp_pm_addr_families_match(sk, &local->addr,
++							  remote))
++				continue;
++
++			if (mptcp_addresses_equal(&local->addr, &mpc_addr,
++						  local->addr.port))
++				continue;
++
++			msk->pm.local_addr_used++;
++			msk->pm.subflows++;
++			i++;
++		}
++
++		return i;
++	}
++
+ 	/* If the array is empty, fill in the single
+ 	 * 'IPADDRANY' local address
+ 	 */
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 6f191b12597883..9653fee227ab2e 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -1172,6 +1172,14 @@ static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
+ 	spin_unlock_bh(&msk->pm.lock);
+ }
+ 
++static inline bool mptcp_pm_add_addr_c_flag_case(struct mptcp_sock *msk)
++{
++	return READ_ONCE(msk->pm.remote_deny_join_id0) &&
++	       msk->pm.local_addr_used == 0 &&
++	       mptcp_pm_get_add_addr_accept_max(msk) == 0 &&
++	       msk->pm.subflows < mptcp_pm_get_subflows_max(msk);
++}
++
+ void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
+ 
+ static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb)
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 8ee66a86c3bc75..1a62e384766a76 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -22,6 +22,35 @@ void nft_objref_eval(const struct nft_expr *expr,
+ 	obj->ops->eval(obj, regs, pkt);
+ }
+ 
++static int nft_objref_validate_obj_type(const struct nft_ctx *ctx, u32 type)
++{
++	unsigned int hooks;
++
++	switch (type) {
++	case NFT_OBJECT_SYNPROXY:
++		if (ctx->family != NFPROTO_IPV4 &&
++		    ctx->family != NFPROTO_IPV6 &&
++		    ctx->family != NFPROTO_INET)
++			return -EOPNOTSUPP;
++
++		hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD);
++
++		return nft_chain_validate_hooks(ctx->chain, hooks);
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++static int nft_objref_validate(const struct nft_ctx *ctx,
++			       const struct nft_expr *expr)
++{
++	struct nft_object *obj = nft_objref_priv(expr);
++
++	return nft_objref_validate_obj_type(ctx, obj->ops->type->type);
++}
++
+ static int nft_objref_init(const struct nft_ctx *ctx,
+ 			   const struct nft_expr *expr,
+ 			   const struct nlattr * const tb[])
+@@ -93,6 +122,7 @@ static const struct nft_expr_ops nft_objref_ops = {
+ 	.activate	= nft_objref_activate,
+ 	.deactivate	= nft_objref_deactivate,
+ 	.dump		= nft_objref_dump,
++	.validate	= nft_objref_validate,
+ 	.reduce		= NFT_REDUCE_READONLY,
+ };
+ 
+@@ -197,6 +227,14 @@ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
+ 	nf_tables_destroy_set(ctx, priv->set);
+ }
+ 
++static int nft_objref_map_validate(const struct nft_ctx *ctx,
++				   const struct nft_expr *expr)
++{
++	const struct nft_objref_map *priv = nft_expr_priv(expr);
++
++	return nft_objref_validate_obj_type(ctx, priv->set->objtype);
++}
++
+ static const struct nft_expr_ops nft_objref_map_ops = {
+ 	.type		= &nft_objref_type,
+ 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
+@@ -206,6 +244,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
+ 	.deactivate	= nft_objref_map_deactivate,
+ 	.destroy	= nft_objref_map_destroy,
+ 	.dump		= nft_objref_map_dump,
++	.validate	= nft_objref_map_validate,
+ 	.reduce		= NFT_REDUCE_READONLY,
+ };
+ 
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index f80208edd6a5c6..96ca400120e608 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -31,6 +31,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <crypto/hash.h>
++#include <crypto/utils.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/ip.h>
+@@ -1796,7 +1797,7 @@ struct sctp_association *sctp_unpack_cookie(
+ 		}
+ 	}
+ 
+-	if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
++	if (crypto_memneq(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
+ 		*error = -SCTP_IERROR_BAD_SIG;
+ 		goto fail;
+ 	}
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index a0524ba8d78781..dc66dff33d6d46 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -30,6 +30,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <crypto/utils.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/ip.h>
+@@ -885,7 +886,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
+ 	return SCTP_DISPOSITION_CONSUME;
+ 
+ nomem_authev:
+-	sctp_ulpevent_free(ai_ev);
++	if (ai_ev)
++		sctp_ulpevent_free(ai_ev);
+ nomem_aiev:
+ 	sctp_ulpevent_free(ev);
+ nomem_ev:
+@@ -4416,7 +4418,7 @@ static enum sctp_ierror sctp_sf_authenticate(
+ 				 sh_key, GFP_ATOMIC);
+ 
+ 	/* Discard the packet if the digests do not match */
+-	if (memcmp(save_digest, digest, sig_len)) {
++	if (crypto_memneq(save_digest, digest, sig_len)) {
+ 		kfree(save_digest);
+ 		return SCTP_IERROR_BAD_SIG;
+ 	}
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 43c57124de52f4..67474470320cba 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -606,7 +606,8 @@ int svc_port_is_privileged(struct sockaddr *sin)
+ }
+ 
+ /*
+- * Make sure that we don't have too many active connections. If we have,
++ * Make sure that we don't have too many connections that have not yet
++ * demonstrated that they have access to the NFS server. If we have,
+  * something must be dropped. It's not clear what will happen if we allow
+  * "too many" connections, but when dealing with network-facing software,
+  * we have to code defensively. Here we do that by imposing hard limits.
+@@ -625,27 +626,25 @@ int svc_port_is_privileged(struct sockaddr *sin)
+  */
+ static void svc_check_conn_limits(struct svc_serv *serv)
+ {
+-	unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
+-				(serv->sv_nrthreads+3) * 20;
++	unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 64;
+ 
+ 	if (serv->sv_tmpcnt > limit) {
+-		struct svc_xprt *xprt = NULL;
++		struct svc_xprt *xprt = NULL, *xprti;
+ 		spin_lock_bh(&serv->sv_lock);
+ 		if (!list_empty(&serv->sv_tempsocks)) {
+-			/* Try to help the admin */
+-			net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
+-					       serv->sv_name, serv->sv_maxconn ?
+-					       "max number of connections" :
+-					       "number of threads");
+ 			/*
+ 			 * Always select the oldest connection. It's not fair,
+-			 * but so is life
++			 * but nor is life.
+ 			 */
+-			xprt = list_entry(serv->sv_tempsocks.prev,
+-					  struct svc_xprt,
+-					  xpt_list);
+-			set_bit(XPT_CLOSE, &xprt->xpt_flags);
+-			svc_xprt_get(xprt);
++			list_for_each_entry_reverse(xprti, &serv->sv_tempsocks,
++						    xpt_list) {
++				if (!test_bit(XPT_PEER_VALID, &xprti->xpt_flags)) {
++					xprt = xprti;
++					set_bit(XPT_CLOSE, &xprt->xpt_flags);
++					svc_xprt_get(xprt);
++					break;
++				}
++			}
+ 		}
+ 		spin_unlock_bh(&serv->sv_lock);
+ 
+@@ -1029,6 +1028,19 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
+ 	struct svc_serv	*serv = xprt->xpt_server;
+ 	struct svc_deferred_req *dr;
+ 
++	/* unregister with rpcbind for when transport type is TCP or UDP.
++	 */
++	if (test_bit(XPT_RPCB_UNREG, &xprt->xpt_flags)) {
++		struct svc_sock *svsk = container_of(xprt, struct svc_sock,
++						     sk_xprt);
++		struct socket *sock = svsk->sk_sock;
++
++		if (svc_register(serv, xprt->xpt_net, sock->sk->sk_family,
++				 sock->sk->sk_protocol, 0) < 0)
++			pr_warn("failed to unregister %s with rpcbind\n",
++				xprt->xpt_class->xcl_name);
++	}
++
+ 	if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
+ 		return;
+ 
+@@ -1039,7 +1051,8 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
+ 
+ 	spin_lock_bh(&serv->sv_lock);
+ 	list_del_init(&xprt->xpt_list);
+-	if (test_bit(XPT_TEMP, &xprt->xpt_flags))
++	if (test_bit(XPT_TEMP, &xprt->xpt_flags) &&
++	    !test_bit(XPT_PEER_VALID, &xprt->xpt_flags))
+ 		serv->sv_tmpcnt--;
+ 	spin_unlock_bh(&serv->sv_lock);
+ 
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index e61e945760582a..443d8390ebf1c8 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -837,6 +837,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
+ 	/* data might have come in before data_ready set up */
+ 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+ 	set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
++	set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
+ 
+ 	/* make sure we get destination address info */
+ 	switch (svsk->sk_sk->sk_family) {
+@@ -1357,6 +1358,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
+ 	if (sk->sk_state == TCP_LISTEN) {
+ 		strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
+ 		set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
++		set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
+ 		sk->sk_data_ready = svc_tcp_listen_data_ready;
+ 		set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
+ 	} else {
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index 406b20dfee8d47..9351e1298ef404 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -143,14 +143,24 @@ static inline bool xp_unused_options_set(u32 options)
+ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
+ 					    struct xdp_desc *desc)
+ {
+-	u64 addr = desc->addr - pool->tx_metadata_len;
+-	u64 len = desc->len + pool->tx_metadata_len;
+-	u64 offset = addr & (pool->chunk_size - 1);
++	u64 len = desc->len;
++	u64 addr, offset;
+ 
+-	if (!desc->len)
++	if (!len)
+ 		return false;
+ 
+-	if (offset + len > pool->chunk_size)
++	/* Can overflow if desc->addr < pool->tx_metadata_len */
++	if (check_sub_overflow(desc->addr, pool->tx_metadata_len, &addr))
++		return false;
++
++	offset = addr & (pool->chunk_size - 1);
++
++	/*
++	 * Can't overflow: @offset is guaranteed to be < ``U32_MAX``
++	 * (pool->chunk_size is ``u32``), @len is guaranteed
++	 * to be <= ``U32_MAX``.
++	 */
++	if (offset + len + pool->tx_metadata_len > pool->chunk_size)
+ 		return false;
+ 
+ 	if (addr >= pool->addrs_cnt)
+@@ -158,27 +168,42 @@ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
+ 
+ 	if (xp_unused_options_set(desc->options))
+ 		return false;
++
+ 	return true;
+ }
+ 
+ static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
+ 					      struct xdp_desc *desc)
+ {
+-	u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len;
+-	u64 len = desc->len + pool->tx_metadata_len;
++	u64 len = desc->len;
++	u64 addr, end;
+ 
+-	if (!desc->len)
++	if (!len)
+ 		return false;
+ 
++	/* Can't overflow: @len is guaranteed to be <= ``U32_MAX`` */
++	len += pool->tx_metadata_len;
+ 	if (len > pool->chunk_size)
+ 		return false;
+ 
+-	if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt ||
+-	    xp_desc_crosses_non_contig_pg(pool, addr, len))
++	/* Can overflow if desc->addr is close to 0 */
++	if (check_sub_overflow(xp_unaligned_add_offset_to_addr(desc->addr),
++			       pool->tx_metadata_len, &addr))
++		return false;
++
++	if (addr >= pool->addrs_cnt)
++		return false;
++
++	/* Can overflow if pool->addrs_cnt is high enough */
++	if (check_add_overflow(addr, len, &end) || end > pool->addrs_cnt)
++		return false;
++
++	if (xp_desc_crosses_non_contig_pg(pool, addr, len))
+ 		return false;
+ 
+ 	if (xp_unused_options_set(desc->options))
+ 		return false;
++
+ 	return true;
+ }
+ 
+diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
+index 89c9798d180071..e73f2c6c817a07 100644
+--- a/security/keys/trusted-keys/trusted_tpm1.c
++++ b/security/keys/trusted-keys/trusted_tpm1.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <crypto/hash_info.h>
++#include <crypto/utils.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/parser.h>
+@@ -241,7 +242,7 @@ int TSS_checkhmac1(unsigned char *buffer,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
++	if (crypto_memneq(testhmac, authdata, SHA1_DIGEST_SIZE))
+ 		ret = -EINVAL;
+ out:
+ 	kfree_sensitive(sdesc);
+@@ -334,7 +335,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
+ 			  TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
++	if (crypto_memneq(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -343,7 +344,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
+ 			  TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
++	if (crypto_memneq(testhmac2, authdata2, SHA1_DIGEST_SIZE))
+ 		ret = -EINVAL;
+ out:
+ 	kfree_sensitive(sdesc);
+diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
+index f6e24edd7adbe9..898e4fcde2dde0 100644
+--- a/sound/soc/sof/intel/hda-pcm.c
++++ b/sound/soc/sof/intel/hda-pcm.c
+@@ -29,6 +29,8 @@
+ #define SDnFMT_BITS(x)	((x) << 4)
+ #define SDnFMT_CHAN(x)	((x) << 0)
+ 
++#define HDA_MAX_PERIOD_TIME_HEADROOM	10
++
+ static bool hda_always_enable_dmi_l1;
+ module_param_named(always_enable_dmi_l1, hda_always_enable_dmi_l1, bool, 0444);
+ MODULE_PARM_DESC(always_enable_dmi_l1, "SOF HDA always enable DMI l1");
+@@ -276,19 +278,30 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ 	 * On playback start the DMA will transfer dsp_max_burst_size_in_ms
+ 	 * amount of data in one initial burst to fill up the host DMA buffer.
+ 	 * Consequent DMA burst sizes are shorter and their length can vary.
+-	 * To make sure that userspace allocate large enough ALSA buffer we need
+-	 * to place a constraint on the buffer time.
++	 * To avoid immediate xrun by the initial burst we need to place
++	 * constraint on the period size (via PERIOD_TIME) to cover the size of
++	 * the host buffer.
++	 * We need to add headroom of max 10ms as the firmware needs time to
++	 * settle to the 1ms pacing and initially it can run faster for few
++	 * internal periods.
+ 	 *
+ 	 * On capture the DMA will transfer 1ms chunks.
+-	 *
+-	 * Exact dsp_max_burst_size_in_ms constraint is racy, so set the
+-	 * constraint to a minimum of 2x dsp_max_burst_size_in_ms.
+ 	 */
+-	if (spcm->stream[direction].dsp_max_burst_size_in_ms)
++	if (spcm->stream[direction].dsp_max_burst_size_in_ms) {
++		unsigned int period_time = spcm->stream[direction].dsp_max_burst_size_in_ms;
++
++		/*
++		 * add headroom over the maximum burst size to cover the time
++		 * needed for the DMA pace to settle.
++		 * Limit the headroom time to HDA_MAX_PERIOD_TIME_HEADROOM
++		 */
++		period_time += min(period_time, HDA_MAX_PERIOD_TIME_HEADROOM);
++
+ 		snd_pcm_hw_constraint_minmax(substream->runtime,
+-			SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+-			spcm->stream[direction].dsp_max_burst_size_in_ms * USEC_PER_MSEC * 2,
++			SNDRV_PCM_HW_PARAM_PERIOD_TIME,
++			period_time * USEC_PER_MSEC,
+ 			UINT_MAX);
++	}
+ 
+ 	/* binding pcm substream to hda stream */
+ 	substream->runtime->private_data = &dsp_stream->hstream;
+diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
+index 24f3cc7676142b..2be0d02f9cf9b7 100644
+--- a/sound/soc/sof/intel/hda-stream.c
++++ b/sound/soc/sof/intel/hda-stream.c
+@@ -1103,10 +1103,35 @@ u64 hda_dsp_get_stream_llp(struct snd_sof_dev *sdev,
+ 			   struct snd_soc_component *component,
+ 			   struct snd_pcm_substream *substream)
+ {
+-	struct hdac_stream *hstream = substream->runtime->private_data;
+-	struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
++	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
++	struct snd_soc_pcm_runtime *be_rtd = NULL;
++	struct hdac_ext_stream *hext_stream;
++	struct snd_soc_dai *cpu_dai;
++	struct snd_soc_dpcm *dpcm;
+ 	u32 llp_l, llp_u;
+ 
++	/*
++	 * The LLP needs to be read from the Link DMA used for this FE as it is
++	 * allowed to use any combination of Link and Host channels
++	 */
++	for_each_dpcm_be(rtd, substream->stream, dpcm) {
++		if (dpcm->fe != rtd)
++			continue;
++
++		be_rtd = dpcm->be;
++	}
++
++	if (!be_rtd)
++		return 0;
++
++	cpu_dai = snd_soc_rtd_to_cpu(be_rtd, 0);
++	if (!cpu_dai)
++		return 0;
++
++	hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
++	if (!hext_stream)
++		return 0;
++
+ 	/*
+ 	 * The pplc_addr have been calculated during probe in
+ 	 * hda_dsp_stream_init():
+diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
+index 9db2cdb3212822..b11279f9d3c978 100644
+--- a/sound/soc/sof/ipc4-pcm.c
++++ b/sound/soc/sof/ipc4-pcm.c
+@@ -19,12 +19,14 @@
+  * struct sof_ipc4_timestamp_info - IPC4 timestamp info
+  * @host_copier: the host copier of the pcm stream
+  * @dai_copier: the dai copier of the pcm stream
+- * @stream_start_offset: reported by fw in memory window (converted to frames)
+- * @stream_end_offset: reported by fw in memory window (converted to frames)
++ * @stream_start_offset: reported by fw in memory window (converted to
++ *                       frames at host_copier sampling rate)
++ * @stream_end_offset: reported by fw in memory window (converted to
++ *                     frames at host_copier sampling rate)
+  * @llp_offset: llp offset in memory window
+- * @boundary: wrap boundary should be used for the LLP frame counter
+  * @delay: Calculated and stored in pointer callback. The stored value is
+- *	   returned in the delay callback.
++ *         returned in the delay callback. Expressed in frames at host copier
++ *         sampling rate.
+  */
+ struct sof_ipc4_timestamp_info {
+ 	struct sof_ipc4_copier *host_copier;
+@@ -33,7 +35,6 @@ struct sof_ipc4_timestamp_info {
+ 	u64 stream_end_offset;
+ 	u32 llp_offset;
+ 
+-	u64 boundary;
+ 	snd_pcm_sframes_t delay;
+ };
+ 
+@@ -48,6 +49,16 @@ struct sof_ipc4_pcm_stream_priv {
+ 	bool chain_dma_allocated;
+ };
+ 
++/*
++ * Modulus to use to compare host and link position counters. The sampling
++ * rates may be different, so the raw hardware counters will wrap
++ * around at different times. To calculate differences, use
++ * DELAY_BOUNDARY as a common modulus. This value must be smaller than
++ * the wrap-around point of any hardware counter, and larger than any
++ * valid delay measurement.
++ */
++#define DELAY_BOUNDARY		U32_MAX
++
+ static inline struct sof_ipc4_timestamp_info *
+ sof_ipc4_sps_to_time_info(struct snd_sof_pcm_stream *sps)
+ {
+@@ -409,9 +420,33 @@ static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component,
+ 	 * If use_chain_dma attribute is set we proceed to chained DMA
+ 	 * trigger function that handles the rest for the substream.
+ 	 */
+-	if (pipeline->use_chain_dma)
+-		return sof_ipc4_chain_dma_trigger(sdev, spcm, substream->stream,
+-						  pipeline_list, state, cmd);
++	if (pipeline->use_chain_dma) {
++		struct sof_ipc4_timestamp_info *time_info;
++
++		time_info = sof_ipc4_sps_to_time_info(&spcm->stream[substream->stream]);
++
++		ret = sof_ipc4_chain_dma_trigger(sdev, spcm, substream->stream,
++						 pipeline_list, state, cmd);
++		if (ret || !time_info)
++			return ret;
++
++		if (state == SOF_IPC4_PIPE_PAUSED) {
++			/*
++			 * Record the DAI position for delay reporting
++			 * To handle multiple pause/resume/xrun we need to add
++			 * the positions to simulate how the firmware behaves
++			 */
++			u64 pos = snd_sof_pcm_get_dai_frame_counter(sdev, component,
++								    substream);
++
++			time_info->stream_end_offset += pos;
++		} else if (state == SOF_IPC4_PIPE_RESET) {
++			/* Reset the end offset as the stream is stopped */
++			time_info->stream_end_offset = 0;
++		}
++
++		return 0;
++	}
+ 
+ 	/* allocate memory for the pipeline data */
+ 	trigger_list = kzalloc(struct_size(trigger_list, pipeline_instance_ids,
+@@ -909,6 +944,35 @@ static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
+ 	return 0;
+ }
+ 
++static u64 sof_ipc4_frames_dai_to_host(struct sof_ipc4_timestamp_info *time_info, u64 value)
++{
++	u64 dai_rate, host_rate;
++
++	if (!time_info->dai_copier || !time_info->host_copier)
++		return value;
++
++	/*
++	 * copiers do not change sampling rate, so we can use the
++	 * out_format independently of stream direction
++	 */
++	dai_rate = time_info->dai_copier->data.out_format.sampling_frequency;
++	host_rate = time_info->host_copier->data.out_format.sampling_frequency;
++
++	if (!dai_rate || !host_rate || dai_rate == host_rate)
++		return value;
++
++	/* take care not to overflow u64, rates can be up to 768000 */
++	if (value > U32_MAX) {
++		value = div64_u64(value, dai_rate);
++		value *= host_rate;
++	} else {
++		value *= host_rate;
++		value = div64_u64(value, dai_rate);
++	}
++
++	return value;
++}
++
+ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
+ 					    struct snd_pcm_substream *substream,
+ 					    struct snd_sof_pcm_stream *sps,
+@@ -924,8 +988,30 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
+ 	if (!host_copier || !dai_copier)
+ 		return -EINVAL;
+ 
+-	if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_INVALID_NODE_ID)
++	if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_INVALID_NODE_ID) {
+ 		return -EINVAL;
++	} else if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_CHAIN_DMA_NODE_ID) {
++		/*
++		 * While the firmware does not support time_info reporting for
++		 * streams using ChainDMA, it is granted that ChainDMA can only
++		 * be used on Host+Link pairs where the link position is
++		 * accessible from the host side.
++		 *
++		 * Enable delay calculation in case of ChainDMA via host
++		 * accessible registers.
++		 *
++		 * The ChainDMA prefills the link DMA with a preamble
++		 * of zero samples. Set the stream start offset based
++		 * on size of the preamble (driver provided fifo size
++		 * multiplied by 2.5). We add 1ms of margin as the FW
++		 * will align the buffer size to DMA hardware
++		 * alignment that is not known to host.
++		 */
++		int pre_ms = SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS * 5 / 2 + 1;
++
++		time_info->stream_start_offset = pre_ms * substream->runtime->rate / MSEC_PER_SEC;
++		goto out;
++	}
+ 
+ 	node_index = SOF_IPC4_NODE_INDEX(host_copier->data.gtw_cfg.node_id);
+ 	offset = offsetof(struct sof_ipc4_fw_registers, pipeline_regs) + node_index * sizeof(ppl_reg);
+@@ -943,13 +1029,13 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
+ 	time_info->stream_end_offset = ppl_reg.stream_end_offset;
+ 	do_div(time_info->stream_end_offset, dai_sample_size);
+ 
+-	/*
+-	 * Calculate the wrap boundary need to be used for delay calculation
+-	 * The host counter is in bytes, it will wrap earlier than the frames
+-	 * based link counter.
+-	 */
+-	time_info->boundary = div64_u64(~((u64)0),
+-					frames_to_bytes(substream->runtime, 1));
++	/* convert to host frame time */
++	time_info->stream_start_offset =
++		sof_ipc4_frames_dai_to_host(time_info, time_info->stream_start_offset);
++	time_info->stream_end_offset =
++		sof_ipc4_frames_dai_to_host(time_info, time_info->stream_end_offset);
++
++out:
+ 	/* Initialize the delay value to 0 (no delay) */
+ 	time_info->delay = 0;
+ 
+@@ -992,6 +1078,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
+ 
+ 	/* For delay calculation we need the host counter */
+ 	host_cnt = snd_sof_pcm_get_host_byte_counter(sdev, component, substream);
++
++	/* Store the original value to host_ptr */
+ 	host_ptr = host_cnt;
+ 
+ 	/* convert the host_cnt to frames */
+@@ -1010,6 +1098,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
+ 		sof_mailbox_read(sdev, time_info->llp_offset, &llp, sizeof(llp));
+ 		dai_cnt = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l;
+ 	}
++
++	dai_cnt = sof_ipc4_frames_dai_to_host(time_info, dai_cnt);
+ 	dai_cnt += time_info->stream_end_offset;
+ 
+ 	/* In two cases dai dma counter is not accurate
+@@ -1043,8 +1133,9 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
+ 		dai_cnt -= time_info->stream_start_offset;
+ 	}
+ 
+-	/* Wrap the dai counter at the boundary where the host counter wraps */
+-	div64_u64_rem(dai_cnt, time_info->boundary, &dai_cnt);
++	/* Convert to a common base before comparisons */
++	dai_cnt &= DELAY_BOUNDARY;
++	host_cnt &= DELAY_BOUNDARY;
+ 
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 		head_cnt = host_cnt;
+@@ -1054,14 +1145,11 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
+ 		tail_cnt = host_cnt;
+ 	}
+ 
+-	if (head_cnt < tail_cnt) {
+-		time_info->delay = time_info->boundary - tail_cnt + head_cnt;
+-		goto out;
+-	}
+-
+-	time_info->delay =  head_cnt - tail_cnt;
++	if (unlikely(head_cnt < tail_cnt))
++		time_info->delay = DELAY_BOUNDARY - tail_cnt + head_cnt;
++	else
++		time_info->delay = head_cnt - tail_cnt;
+ 
+-out:
+ 	/*
+ 	 * Convert the host byte counter to PCM pointer which wraps in buffer
+ 	 * and it is in frames
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index f82db7f2a6b7e7..4849a3ce02dca3 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -32,7 +32,6 @@ MODULE_PARM_DESC(ipc4_ignore_cpc,
+ 
+ #define SOF_IPC4_GAIN_PARAM_ID  0
+ #define SOF_IPC4_TPLG_ABI_SIZE 6
+-#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2
+ 
+ static DEFINE_IDA(alh_group_ida);
+ static DEFINE_IDA(pipeline_ida);
+@@ -519,8 +518,13 @@ static int sof_ipc4_widget_setup_pcm(struct snd_sof_widget *swidget)
+ 				      swidget->tuples,
+ 				      swidget->num_tuples, sizeof(u32), 1);
+ 		/* Set default DMA buffer size if it is not specified in topology */
+-		if (!sps->dsp_max_burst_size_in_ms)
+-			sps->dsp_max_burst_size_in_ms = SOF_IPC4_MIN_DMA_BUFFER_SIZE;
++		if (!sps->dsp_max_burst_size_in_ms) {
++			struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget;
++			struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
++
++			sps->dsp_max_burst_size_in_ms = pipeline->use_chain_dma ?
++				SOF_IPC4_CHAIN_DMA_BUFFER_SIZE : SOF_IPC4_MIN_DMA_BUFFER_SIZE;
++		}
+ 	} else {
+ 		/* Capture data is copied from DSP to host in 1ms bursts */
+ 		spcm->stream[dir].dsp_max_burst_size_in_ms = 1;
+@@ -1777,10 +1781,10 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
+ 			pipeline->msg.extension |= SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE(fifo_size);
+ 
+ 			/*
+-			 * Chain DMA does not support stream timestamping, set node_id to invalid
+-			 * to skip the code in sof_ipc4_get_stream_start_offset().
++			 * Chain DMA does not support stream timestamping, but it
++			 * can use the host side registers for delay calculation.
+ 			 */
+-			copier_data->gtw_cfg.node_id = SOF_IPC4_INVALID_NODE_ID;
++			copier_data->gtw_cfg.node_id = SOF_IPC4_CHAIN_DMA_NODE_ID;
+ 
+ 			return 0;
+ 		}
+diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
+index f4dc499c0ffe55..da9592430b1520 100644
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -58,10 +58,14 @@
+ 
+ #define SOF_IPC4_DMA_DEVICE_MAX_COUNT 16
+ 
++#define SOF_IPC4_CHAIN_DMA_NODE_ID	0x7fffffff
+ #define SOF_IPC4_INVALID_NODE_ID	0xffffffff
+ 
+-/* FW requires minimum 2ms DMA buffer size */
+-#define SOF_IPC4_MIN_DMA_BUFFER_SIZE	2
++/* FW requires minimum 4ms DMA buffer size */
++#define SOF_IPC4_MIN_DMA_BUFFER_SIZE	4
++
++/* ChainDMA in fw uses 5ms DMA buffer */
++#define SOF_IPC4_CHAIN_DMA_BUFFER_SIZE	5
+ 
+ /*
+  * The base of multi-gateways. Multi-gateways addressing starts from
+@@ -246,6 +250,8 @@ struct sof_ipc4_dma_stream_ch_map {
+ #define SOF_IPC4_DMA_METHOD_HDA   1
+ #define SOF_IPC4_DMA_METHOD_GPDMA 2 /* defined for consistency but not used */
+ 
++#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2
++
+ /**
+  * struct sof_ipc4_dma_config: DMA configuration
+  * @dma_method: HDAudio or GPDMA
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index 1658596188bf85..592ca17b4b74a5 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -327,10 +327,10 @@ $(OUTPUT)test-libcapstone.bin:
+ 	$(BUILD) # -lcapstone provided by $(FEATURE_CHECK_LDFLAGS-libcapstone)
+ 
+ $(OUTPUT)test-compile-32.bin:
+-	$(CC) -m32 -o $@ test-compile.c
++	$(CC) -m32 -Wall -Werror -o $@ test-compile.c
+ 
+ $(OUTPUT)test-compile-x32.bin:
+-	$(CC) -mx32 -o $@ test-compile.c
++	$(CC) -mx32 -Wall -Werror -o $@ test-compile.c
+ 
+ $(OUTPUT)test-zlib.bin:
+ 	$(BUILD) -lz
+diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
+index 37bb7771d91434..32b75c0326c931 100644
+--- a/tools/lib/perf/include/perf/event.h
++++ b/tools/lib/perf/include/perf/event.h
+@@ -291,6 +291,7 @@ struct perf_record_header_event_type {
+ struct perf_record_header_tracing_data {
+ 	struct perf_event_header header;
+ 	__u32			 size;
++	__u32			 pad;
+ };
+ 
+ #define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15)
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 628c61397d2d38..b578930ed76a42 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -639,8 +639,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
+ 	 * (behavior changed with commit b0a873e).
+ 	 */
+ 	if (errno == EINVAL || errno == ENOSYS ||
+-	    errno == ENOENT || errno == EOPNOTSUPP ||
+-	    errno == ENXIO) {
++	    errno == ENOENT || errno == ENXIO) {
+ 		if (verbose > 0)
+ 			ui__warning("%s event is not supported by the kernel.\n",
+ 				    evsel__name(counter));
+@@ -658,7 +657,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
+ 		if (verbose > 0)
+ 			ui__warning("%s\n", msg);
+ 		return COUNTER_RETRY;
+-	} else if (target__has_per_thread(&target) &&
++	} else if (target__has_per_thread(&target) && errno != EOPNOTSUPP &&
+ 		   evsel_list->core.threads &&
+ 		   evsel_list->core.threads->err_thread != -1) {
+ 		/*
+@@ -679,6 +678,19 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
+ 		return COUNTER_SKIP;
+ 	}
+ 
++	if (errno == EOPNOTSUPP) {
++		if (verbose > 0) {
++			ui__warning("%s event is not supported by the kernel.\n",
++				    evsel__name(counter));
++		}
++		counter->supported = false;
++		counter->errored = true;
++
++		if ((evsel__leader(counter) != counter) ||
++		    !(counter->core.leader->nr_members > 1))
++			return COUNTER_SKIP;
++	}
++
+ 	evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
+ 	ui__error("%s\n", msg);
+ 
+diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
+index 5228f94a793f95..6817cac149e0bc 100644
+--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
++++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
+@@ -113,7 +113,7 @@
+     {
+         "MetricName": "load_store_spec_rate",
+         "MetricExpr": "LDST_SPEC / INST_SPEC",
+-        "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
++        "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speculatively executed",
+         "MetricGroup": "Operation_Mix",
+         "ScaleUnit": "100percent of operations"
+     },
+@@ -132,7 +132,7 @@
+     {
+         "MetricName": "pc_write_spec_rate",
+         "MetricExpr": "PC_WRITE_SPEC / INST_SPEC",
+-        "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
++        "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speculatively executed",
+         "MetricGroup": "Operation_Mix",
+         "ScaleUnit": "100percent of operations"
+     },
+@@ -195,14 +195,14 @@
+     {
+         "MetricName": "stall_frontend_cache_rate",
+         "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+-        "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
++        "BriefDescription": "Proportion of cycles stalled and no operations delivered from frontend and cache miss",
+         "MetricGroup": "Stall",
+         "ScaleUnit": "100percent of cycles"
+     },
+     {
+         "MetricName": "stall_frontend_tlb_rate",
+         "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
+-        "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
++        "BriefDescription": "Proportion of cycles stalled and no operations delivered from frontend and TLB miss",
+         "MetricGroup": "Stall",
+         "ScaleUnit": "100percent of cycles"
+     },
+@@ -391,7 +391,7 @@
+         "ScaleUnit": "100percent of cache acceses"
+     },
+     {
+-        "MetricName": "l1d_cache_access_prefetces",
++        "MetricName": "l1d_cache_access_prefetches",
+         "MetricExpr": "L1D_CACHE_PRFM / L1D_CACHE",
+         "BriefDescription": "L1D cache access - prefetch",
+         "MetricGroup": "Cache",
+diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
+index 1c4feec1adff11..6e7f053006b4ff 100644
+--- a/tools/perf/tests/perf-record.c
++++ b/tools/perf/tests/perf-record.c
+@@ -115,6 +115,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
+ 	if (err < 0) {
+ 		pr_debug("sched__get_first_possible_cpu: %s\n",
+ 			 str_error_r(errno, sbuf, sizeof(sbuf)));
++		evlist__cancel_workload(evlist);
+ 		goto out_delete_evlist;
+ 	}
+ 
+@@ -126,6 +127,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
+ 	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
+ 		pr_debug("sched_setaffinity: %s\n",
+ 			 str_error_r(errno, sbuf, sizeof(sbuf)));
++		evlist__cancel_workload(evlist);
+ 		goto out_delete_evlist;
+ 	}
+ 
+@@ -137,6 +139,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
+ 	if (err < 0) {
+ 		pr_debug("perf_evlist__open: %s\n",
+ 			 str_error_r(errno, sbuf, sizeof(sbuf)));
++		evlist__cancel_workload(evlist);
+ 		goto out_delete_evlist;
+ 	}
+ 
+@@ -149,6 +152,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
+ 	if (err < 0) {
+ 		pr_debug("evlist__mmap: %s\n",
+ 			 str_error_r(errno, sbuf, sizeof(sbuf)));
++		evlist__cancel_workload(evlist);
+ 		goto out_delete_evlist;
+ 	}
+ 
+diff --git a/tools/perf/tests/shell/record_lbr.sh b/tools/perf/tests/shell/record_lbr.sh
+index 32314641217e6d..5984ca9b78f8a4 100755
+--- a/tools/perf/tests/shell/record_lbr.sh
++++ b/tools/perf/tests/shell/record_lbr.sh
+@@ -4,7 +4,12 @@
+ 
+ set -e
+ 
+-if [ ! -f /sys/devices/cpu/caps/branches ] && [ ! -f /sys/devices/cpu_core/caps/branches ]
++ParanoidAndNotRoot() {
++  [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
++}
++
++if [ ! -f /sys/bus/event_source/devices/cpu/caps/branches ] &&
++   [ ! -f /sys/bus/event_source/devices/cpu_core/caps/branches ]
+ then
+   echo "Skip: only x86 CPUs support LBR"
+   exit 2
+@@ -22,6 +27,7 @@ cleanup() {
+ }
+ 
+ trap_cleanup() {
++  echo "Unexpected signal in ${FUNCNAME[1]}"
+   cleanup
+   exit 1
+ }
+@@ -122,8 +128,11 @@ lbr_test "-j ind_call" "any indirect call" 2
+ lbr_test "-j ind_jmp" "any indirect jump" 100
+ lbr_test "-j call" "direct calls" 2
+ lbr_test "-j ind_call,u" "any indirect user call" 100
+-lbr_test "-a -b" "system wide any branch" 2
+-lbr_test "-a -j any_call" "system wide any call" 2
++if ! ParanoidAndNotRoot 1
++then
++  lbr_test "-a -b" "system wide any branch" 2
++  lbr_test "-a -j any_call" "system wide any call" 2
++fi
+ 
+ # Parallel
+ parallel_lbr_test "-b" "parallel any branch" 100 &
+@@ -140,10 +149,16 @@ parallel_lbr_test "-j call" "parallel direct calls" 100 &
+ pid6=$!
+ parallel_lbr_test "-j ind_call,u" "parallel any indirect user call" 100 &
+ pid7=$!
+-parallel_lbr_test "-a -b" "parallel system wide any branch" 100 &
+-pid8=$!
+-parallel_lbr_test "-a -j any_call" "parallel system wide any call" 100 &
+-pid9=$!
++if ParanoidAndNotRoot 1
++then
++  pid8=
++  pid9=
++else
++  parallel_lbr_test "-a -b" "parallel system wide any branch" 100 &
++  pid8=$!
++  parallel_lbr_test "-a -j any_call" "parallel system wide any call" 100 &
++  pid9=$!
++fi
+ 
+ for pid in $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9
+ do
+diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
+index 3f1e67795490a0..62f13dfeae8e4d 100755
+--- a/tools/perf/tests/shell/stat.sh
++++ b/tools/perf/tests/shell/stat.sh
+@@ -146,6 +146,34 @@ test_cputype() {
+   echo "cputype test [Success]"
+ }
+ 
++test_hybrid() {
++  # Test the default stat command on hybrid devices opens one cycles event for
++  # each CPU type.
++  echo "hybrid test"
++
++  # Count the number of core PMUs, assume minimum of 1
++  pmus=$(ls /sys/bus/event_source/devices/*/cpus 2>/dev/null | wc -l)
++  if [ "$pmus" -lt 1 ]
++  then
++    pmus=1
++  fi
++
++  # Run default Perf stat
++  cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*|  cycles[:uH]*  " -c)
++
++  # The expectation is that default output will have a cycles events on each
++  # hybrid PMU. In situations with no cycles PMU events, like virtualized, this
++  # can fall back to task-clock and so the end count may be 0. Fail if neither
++  # condition holds.
++  if [ "$pmus" -ne "$cycles_events" ] && [ "0" -ne "$cycles_events" ]
++  then
++    echo "hybrid test [Found $pmus PMUs but $cycles_events cycles events. Failed]"
++    err=1
++    return
++  fi
++  echo "hybrid test [Success]"
++}
++
+ test_default_stat
+ test_stat_record_report
+ test_stat_record_script
+@@ -153,4 +181,5 @@ test_stat_repeat_weak_groups
+ test_topdown_groups
+ test_topdown_weak_groups
+ test_cputype
++test_hybrid
+ exit $err
+diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
+index 8d1e6bbeac9068..1447d7425f381c 100755
+--- a/tools/perf/tests/shell/trace_btf_enum.sh
++++ b/tools/perf/tests/shell/trace_btf_enum.sh
+@@ -23,6 +23,14 @@ check_vmlinux() {
+   fi
+ }
+ 
++check_permissions() {
++  if perf trace -e $syscall $TESTPROG 2>&1 | grep -q "Operation not permitted"
++  then
++    echo "trace+enum test [Skipped permissions]"
++    err=2
++  fi
++}
++
+ trace_landlock() {
+   echo "Tracing syscall ${syscall}"
+ 
+@@ -54,6 +62,9 @@ trace_non_syscall() {
+ }
+ 
+ check_vmlinux
++if [ $err = 0 ]; then
++  check_permissions
++fi
+ 
+ if [ $err = 0 ]; then
+   trace_landlock
+diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+index 1443c28545a946..358c611eeddbb3 100644
+--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
++++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+@@ -56,15 +56,15 @@ enum arm_spe_op_type {
+ 	ARM_SPE_OP_BR_INDIRECT	= 1 << 17,
+ };
+ 
+-enum arm_spe_neoverse_data_source {
+-	ARM_SPE_NV_L1D		 = 0x0,
+-	ARM_SPE_NV_L2		 = 0x8,
+-	ARM_SPE_NV_PEER_CORE	 = 0x9,
+-	ARM_SPE_NV_LOCAL_CLUSTER = 0xa,
+-	ARM_SPE_NV_SYS_CACHE	 = 0xb,
+-	ARM_SPE_NV_PEER_CLUSTER	 = 0xc,
+-	ARM_SPE_NV_REMOTE	 = 0xd,
+-	ARM_SPE_NV_DRAM		 = 0xe,
++enum arm_spe_common_data_source {
++	ARM_SPE_COMMON_DS_L1D		= 0x0,
++	ARM_SPE_COMMON_DS_L2		= 0x8,
++	ARM_SPE_COMMON_DS_PEER_CORE	= 0x9,
++	ARM_SPE_COMMON_DS_LOCAL_CLUSTER = 0xa,
++	ARM_SPE_COMMON_DS_SYS_CACHE	= 0xb,
++	ARM_SPE_COMMON_DS_PEER_CLUSTER	= 0xc,
++	ARM_SPE_COMMON_DS_REMOTE	= 0xd,
++	ARM_SPE_COMMON_DS_DRAM		= 0xe,
+ };
+ 
+ struct arm_spe_record {
+diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
+index 2c06f2a85400e1..9890b17241c34a 100644
+--- a/tools/perf/util/arm-spe.c
++++ b/tools/perf/util/arm-spe.c
+@@ -411,15 +411,15 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
+ 	return arm_spe_deliver_synth_event(spe, speq, event, &sample);
+ }
+ 
+-static const struct midr_range neoverse_spe[] = {
++static const struct midr_range common_ds_encoding_cpus[] = {
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ 	{},
+ };
+ 
+-static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
+-						union perf_mem_data_src *data_src)
++static void arm_spe__synth_data_source_common(const struct arm_spe_record *record,
++					      union perf_mem_data_src *data_src)
+ {
+ 	/*
+ 	 * Even though four levels of cache hierarchy are possible, no known
+@@ -441,17 +441,17 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec
+ 	}
+ 
+ 	switch (record->source) {
+-	case ARM_SPE_NV_L1D:
++	case ARM_SPE_COMMON_DS_L1D:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ 		break;
+-	case ARM_SPE_NV_L2:
++	case ARM_SPE_COMMON_DS_L2:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ 		break;
+-	case ARM_SPE_NV_PEER_CORE:
++	case ARM_SPE_COMMON_DS_PEER_CORE:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+@@ -460,8 +460,8 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec
+ 	 * We don't know if this is L1, L2 but we do know it was a cache-2-cache
+ 	 * transfer, so set SNOOPX_PEER
+ 	 */
+-	case ARM_SPE_NV_LOCAL_CLUSTER:
+-	case ARM_SPE_NV_PEER_CLUSTER:
++	case ARM_SPE_COMMON_DS_LOCAL_CLUSTER:
++	case ARM_SPE_COMMON_DS_PEER_CLUSTER:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+@@ -469,7 +469,7 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec
+ 	/*
+ 	 * System cache is assumed to be L3
+ 	 */
+-	case ARM_SPE_NV_SYS_CACHE:
++	case ARM_SPE_COMMON_DS_SYS_CACHE:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ 		data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
+@@ -478,13 +478,13 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec
+ 	 * We don't know what level it hit in, except it came from the other
+ 	 * socket
+ 	 */
+-	case ARM_SPE_NV_REMOTE:
+-		data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
+-		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
++	case ARM_SPE_COMMON_DS_REMOTE:
++		data_src->mem_lvl = PERF_MEM_LVL_NA;
++		data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
+ 		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ 		break;
+-	case ARM_SPE_NV_DRAM:
++	case ARM_SPE_COMMON_DS_DRAM:
+ 		data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+ 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+@@ -514,13 +514,13 @@ static void arm_spe__synth_data_source_generic(const struct arm_spe_record *reco
+ 	}
+ 
+ 	if (record->type & ARM_SPE_REMOTE_ACCESS)
+-		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
++		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ }
+ 
+ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
+ {
+ 	union perf_mem_data_src	data_src = { .mem_op = PERF_MEM_OP_NA };
+-	bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
++	bool is_common = is_midr_in_range_list(midr, common_ds_encoding_cpus);
+ 
+ 	/* Only synthesize data source for LDST operations */
+ 	if (!is_ldst_op(record->op))
+@@ -533,8 +533,8 @@ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 m
+ 	else
+ 		return 0;
+ 
+-	if (is_neoverse)
+-		arm_spe__synth_data_source_neoverse(record, &data_src);
++	if (is_common)
++		arm_spe__synth_data_source_common(record, &data_src);
+ 	else
+ 		arm_spe__synth_data_source_generic(record, &data_src);
+ 
+diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
+index 648e8d87ef1945..a228a7ba30caa7 100644
+--- a/tools/perf/util/disasm.c
++++ b/tools/perf/util/disasm.c
+@@ -389,13 +389,16 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
+ 	 * skip over possible up to 2 operands to get to address, e.g.:
+ 	 * tbnz	 w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
+ 	 */
+-	if (c++ != NULL) {
++	if (c != NULL) {
++		c++;
+ 		ops->target.addr = strtoull(c, NULL, 16);
+ 		if (!ops->target.addr) {
+ 			c = strchr(c, ',');
+ 			c = validate_comma(c, ops);
+-			if (c++ != NULL)
++			if (c != NULL) {
++				c++;
+ 				ops->target.addr = strtoull(c, NULL, 16);
++			}
+ 		}
+ 	} else {
+ 		ops->target.addr = strtoull(ops->raw, NULL, 16);
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 6d7249cc1a993a..dda107b12b8c68 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -3237,7 +3237,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
+ 
+ 		/* If event has exclude user then don't exclude kernel. */
+ 		if (evsel->core.attr.exclude_user)
+-			return false;
++			goto no_fallback;
+ 
+ 		/* Is there already the separator in the name. */
+ 		if (strchr(name, '/') ||
+@@ -3245,7 +3245,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
+ 			sep = "";
+ 
+ 		if (asprintf(&new_name, "%s%su", name, sep) < 0)
+-			return false;
++			goto no_fallback;
+ 
+ 		free(evsel->name);
+ 		evsel->name = new_name;
+@@ -3256,8 +3256,31 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
+ 		evsel->core.attr.exclude_hv     = 1;
+ 
+ 		return true;
+-	}
++	} else if (err == EOPNOTSUPP && !evsel->core.attr.exclude_guest &&
++		   !evsel->exclude_GH) {
++		const char *name = evsel__name(evsel);
++		char *new_name;
++		const char *sep = ":";
++
++		/* Is there already the separator in the name. */
++		if (strchr(name, '/') ||
++		    (strchr(name, ':') && !evsel->is_libpfm_event))
++			sep = "";
++
++		if (asprintf(&new_name, "%s%sH", name, sep) < 0)
++			goto no_fallback;
+ 
++		free(evsel->name);
++		evsel->name = new_name;
++		/* Apple M1 requires exclude_guest */
++		scnprintf(msg, msgsize, "Trying to fall back to excluding guest samples");
++		evsel->core.attr.exclude_guest = 1;
++
++		return true;
++	}
++no_fallback:
++	scnprintf(msg, msgsize, "No fallback found for '%s' for error %d",
++		  evsel__name(evsel), err);
+ 	return false;
+ }
+ 
+@@ -3497,6 +3520,8 @@ bool evsel__is_hybrid(const struct evsel *evsel)
+ 
+ struct evsel *evsel__leader(const struct evsel *evsel)
+ {
++	if (evsel->core.leader == NULL)
++		return NULL;
+ 	return container_of(evsel->core.leader, struct evsel, core);
+ }
+ 
+diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
+index af9a97612f9df3..f61574d1581e3c 100644
+--- a/tools/perf/util/lzma.c
++++ b/tools/perf/util/lzma.c
+@@ -113,7 +113,7 @@ bool lzma_is_compressed(const char *input)
+ 	ssize_t rc;
+ 
+ 	if (fd < 0)
+-		return -1;
++		return false;
+ 
+ 	rc = read(fd, buf, sizeof(buf));
+ 	close(fd);
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index dbaf07bf6c5fb8..89e5354fa094c9 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -1371,7 +1371,7 @@ static s64 perf_session__process_user_event(struct perf_session *session,
+ 	const struct perf_tool *tool = session->tool;
+ 	struct perf_sample sample = { .time = 0, };
+ 	int fd = perf_data__fd(session->data);
+-	int err;
++	s64 err;
+ 
+ 	if (event->header.type != PERF_RECORD_COMPRESSED || perf_tool__compressed_is_stub(tool))
+ 		dump_event(session->evlist, event, file_offset, &sample, file_path);
+diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
+index 649550e9b7aa8c..abb567de3e8a99 100644
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -1,6 +1,7 @@
+ from os import getenv, path
+ from subprocess import Popen, PIPE
+ from re import sub
++import shlex
+ 
+ cc = getenv("CC")
+ 
+@@ -16,7 +17,9 @@ cc_is_clang = b"clang version" in Popen([cc, "-v"], stderr=PIPE).stderr.readline
+ src_feature_tests  = getenv('srctree') + '/tools/build/feature'
+ 
+ def clang_has_option(option):
+-    cc_output = Popen([cc, cc_options + option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
++    cmd = shlex.split(f"{cc} {cc_options} {option}")
++    cmd.append(path.join(src_feature_tests, "test-hello.c"))
++    cc_output = Popen(cmd, stderr=PIPE).stderr.readlines()
+     return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o) or (b"unknown warning option" in o))] == [ ]
+ 
+ if cc_is_clang:
+diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
+index 78d2297c1b6746..1f7c065230599d 100644
+--- a/tools/perf/util/zlib.c
++++ b/tools/perf/util/zlib.c
+@@ -88,7 +88,7 @@ bool gzip_is_compressed(const char *input)
+ 	ssize_t rc;
+ 
+ 	if (fd < 0)
+-		return -1;
++		return false;
+ 
+ 	rc = read(fd, buf, sizeof(buf));
+ 	close(fd);
+diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c
+index ef7d911da13e01..16f5754b8b1f73 100644
+--- a/tools/testing/selftests/mm/madv_populate.c
++++ b/tools/testing/selftests/mm/madv_populate.c
+@@ -264,23 +264,6 @@ static void test_softdirty(void)
+ 	munmap(addr, SIZE);
+ }
+ 
+-static int system_has_softdirty(void)
+-{
+-	/*
+-	 * There is no way to check if the kernel supports soft-dirty, other
+-	 * than by writing to a page and seeing if the bit was set. But the
+-	 * tests are intended to check that the bit gets set when it should, so
+-	 * doing that check would turn a potentially legitimate fail into a
+-	 * skip. Fortunately, we know for sure that arm64 does not support
+-	 * soft-dirty. So for now, let's just use the arch as a corse guide.
+-	 */
+-#if defined(__aarch64__)
+-	return 0;
+-#else
+-	return 1;
+-#endif
+-}
+-
+ int main(int argc, char **argv)
+ {
+ 	int nr_tests = 16;
+@@ -288,7 +271,7 @@ int main(int argc, char **argv)
+ 
+ 	pagesize = getpagesize();
+ 
+-	if (system_has_softdirty())
++	if (softdirty_supported())
+ 		nr_tests += 5;
+ 
+ 	ksft_print_header();
+@@ -300,7 +283,7 @@ int main(int argc, char **argv)
+ 	test_holes();
+ 	test_populate_read();
+ 	test_populate_write();
+-	if (system_has_softdirty())
++	if (softdirty_supported())
+ 		test_softdirty();
+ 
+ 	err = ksft_get_fail_cnt();
+diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
+index bdfa5d085f0099..7b91df12ce5b9f 100644
+--- a/tools/testing/selftests/mm/soft-dirty.c
++++ b/tools/testing/selftests/mm/soft-dirty.c
+@@ -193,8 +193,11 @@ int main(int argc, char **argv)
+ 	int pagesize;
+ 
+ 	ksft_print_header();
+-	ksft_set_plan(15);
+ 
++	if (!softdirty_supported())
++		ksft_exit_skip("soft-dirty is not support\n");
++
++	ksft_set_plan(15);
+ 	pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
+ 	if (pagemap_fd < 0)
+ 		ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
+diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
+index d8d0cf04bb57fd..a4a2805d3d3e7c 100644
+--- a/tools/testing/selftests/mm/vm_util.c
++++ b/tools/testing/selftests/mm/vm_util.c
+@@ -193,6 +193,42 @@ unsigned long rss_anon(void)
+ 	return rss_anon;
+ }
+ 
++char *__get_smap_entry(void *addr, const char *pattern, char *buf, size_t len)
++{
++	int ret;
++	FILE *fp;
++	char *entry = NULL;
++	char addr_pattern[MAX_LINE_LENGTH];
++
++	ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
++		       (unsigned long)addr);
++	if (ret >= MAX_LINE_LENGTH)
++		ksft_exit_fail_msg("%s: Pattern is too long\n", __func__);
++
++	fp = fopen(SMAP_FILE_PATH, "r");
++	if (!fp)
++		ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__,
++				   SMAP_FILE_PATH);
++
++	if (!check_for_pattern(fp, addr_pattern, buf, len))
++		goto err_out;
++
++	/* Fetch the pattern in the same block */
++	if (!check_for_pattern(fp, pattern, buf, len))
++		goto err_out;
++
++	/* Trim trailing newline */
++	entry = strchr(buf, '\n');
++	if (entry)
++		*entry = '\0';
++
++	entry = buf + strlen(pattern);
++
++err_out:
++	fclose(fp);
++	return entry;
++}
++
+ bool __check_huge(void *addr, char *pattern, int nr_hpages,
+ 		  uint64_t hpage_size)
+ {
+@@ -384,3 +420,44 @@ unsigned long get_free_hugepages(void)
+ 	fclose(f);
+ 	return fhp;
+ }
++
++static bool check_vmflag(void *addr, const char *flag)
++{
++	char buffer[MAX_LINE_LENGTH];
++	const char *flags;
++	size_t flaglen;
++
++	flags = __get_smap_entry(addr, "VmFlags:", buffer, sizeof(buffer));
++	if (!flags)
++		ksft_exit_fail_msg("%s: No VmFlags for %p\n", __func__, addr);
++
++	while (true) {
++		flags += strspn(flags, " ");
++
++		flaglen = strcspn(flags, " ");
++		if (!flaglen)
++			return false;
++
++		if (flaglen == strlen(flag) && !memcmp(flags, flag, flaglen))
++			return true;
++
++		flags += flaglen;
++	}
++}
++
++bool softdirty_supported(void)
++{
++	char *addr;
++	bool supported = false;
++	const size_t pagesize = getpagesize();
++
++	/* New mappings are expected to be marked with VM_SOFTDIRTY (sd). */
++	addr = mmap(0, pagesize, PROT_READ | PROT_WRITE,
++		    MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
++	if (!addr)
++		ksft_exit_fail_msg("mmap failed\n");
++
++	supported = check_vmflag(addr, "sd");
++	munmap(addr, pagesize);
++	return supported;
++}
+diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
+index 2eaed82099255e..823d07d84ad01f 100644
+--- a/tools/testing/selftests/mm/vm_util.h
++++ b/tools/testing/selftests/mm/vm_util.h
+@@ -53,6 +53,7 @@ int uffd_unregister(int uffd, void *addr, uint64_t len);
+ int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
+ 			      bool miss, bool wp, bool minor, uint64_t *ioctls);
+ unsigned long get_free_hugepages(void);
++bool softdirty_supported(void);
+ 
+ /*
+  * On ppc64 this will only work with radix 2M hugepage size
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index c07e2bd3a315aa..6b22b8c7374248 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3160,6 +3160,17 @@ deny_join_id0_tests()
+ 		run_tests $ns1 $ns2 10.0.1.1
+ 		chk_join_nr 1 1 1
+ 	fi
++
++	# default limits, server deny join id 0 + signal
++	if reset_with_allow_join_id0 "default limits, server deny join id 0" 0 1; then
++		pm_nl_set_limits $ns1 0 2
++		pm_nl_set_limits $ns2 0 2
++		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
++		pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
++		pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
++		run_tests $ns1 $ns2 10.0.1.1
++		chk_join_nr 2 2 2
++	fi
+ }
+ 
+ fullmesh_tests()
+diff --git a/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh b/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh
+index 1014551dd76945..6731fe1eaf2e99 100755
+--- a/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh
++++ b/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh
+@@ -17,9 +17,31 @@ cleanup()
+ 
+ checktool "socat -h" "run test without socat"
+ checktool "iptables --version" "run test without iptables"
++checktool "conntrack --version" "run test without conntrack"
+ 
+ trap cleanup EXIT
+ 
++connect_done()
++{
++	local ns="$1"
++	local port="$2"
++
++	ip netns exec "$ns" ss -nt -o state established "dport = :$port" | grep -q "$port"
++}
++
++check_ctstate()
++{
++	local ns="$1"
++	local dp="$2"
++
++	if ! ip netns exec "$ns" conntrack --get -s 192.168.1.2 -d 192.168.1.1 -p tcp \
++	     --sport 10000 --dport "$dp" --state ESTABLISHED > /dev/null 2>&1;then
++		echo "FAIL: Did not find expected state for dport $2"
++		ip netns exec "$ns" bash -c 'conntrack -L; conntrack -S; ss -nt'
++		ret=1
++	fi
++}
++
+ setup_ns ns1 ns2
+ 
+ # Connect the namespaces using a veth pair
+@@ -44,15 +66,18 @@ socatpid=$!
+ ip netns exec "$ns2" sysctl -q net.ipv4.ip_local_port_range="10000 10000"
+ 
+ # add a virtual IP using DNAT
+-ip netns exec "$ns2" iptables -t nat -A OUTPUT -d 10.96.0.1/32 -p tcp --dport 443 -j DNAT --to-destination 192.168.1.1:5201
++ip netns exec "$ns2" iptables -t nat -A OUTPUT -d 10.96.0.1/32 -p tcp --dport 443 -j DNAT --to-destination 192.168.1.1:5201 || exit 1
+ 
+ # ... and route it to the other namespace
+ ip netns exec "$ns2" ip route add 10.96.0.1 via 192.168.1.1
+ 
+-# add a persistent connection from the other namespace
+-ip netns exec "$ns2" socat -t 10 - TCP:192.168.1.1:5201 > /dev/null &
++# listener should be up by now, wait if it isn't yet.
++wait_local_port_listen "$ns1" 5201 tcp
+ 
+-sleep 1
++# add a persistent connection from the other namespace
++sleep 10 | ip netns exec "$ns2" socat -t 10 - TCP:192.168.1.1:5201 > /dev/null &
++cpid0=$!
++busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" "5201"
+ 
+ # ip daddr:dport will be rewritten to 192.168.1.1 5201
+ # NAT must reallocate source port 10000 because
+@@ -71,26 +96,25 @@ fi
+ ip netns exec "$ns1" iptables -t nat -A PREROUTING -p tcp --dport 5202 -j REDIRECT --to-ports 5201
+ ip netns exec "$ns1" iptables -t nat -A PREROUTING -p tcp --dport 5203 -j REDIRECT --to-ports 5201
+ 
+-sleep 5 | ip netns exec "$ns2" socat -t 5 -u STDIN TCP:192.168.1.1:5202,connect-timeout=5 >/dev/null &
++sleep 5 | ip netns exec "$ns2" socat -T 5 -u STDIN TCP:192.168.1.1:5202,connect-timeout=5 >/dev/null &
++cpid1=$!
+ 
+-# if connect succeeds, client closes instantly due to EOF on stdin.
+-# if connect hangs, it will time out after 5s.
+-echo | ip netns exec "$ns2" socat -t 3 -u STDIN TCP:192.168.1.1:5203,connect-timeout=5 >/dev/null &
++sleep 5 | ip netns exec "$ns2" socat -T 5 -u STDIN TCP:192.168.1.1:5203,connect-timeout=5 >/dev/null &
+ cpid2=$!
+ 
+-time_then=$(date +%s)
+-wait $cpid2
+-rv=$?
+-time_now=$(date +%s)
++busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" 5202
++busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" 5203
+ 
+-# Check how much time has elapsed, expectation is for
+-# 'cpid2' to connect and then exit (and no connect delay).
+-delta=$((time_now - time_then))
++check_ctstate "$ns1" 5202
++check_ctstate "$ns1" 5203
+ 
+-if [ $delta -lt 2 ] && [ $rv -eq 0 ]; then
++kill $socatpid $cpid0 $cpid1 $cpid2
++socatpid=0
++
++if [ $ret -eq 0 ]; then
+ 	echo "PASS: could connect to service via redirected ports"
+ else
+-	echo "FAIL: socat cannot connect to service via redirect ($delta seconds elapsed, returned $rv)"
++	echo "FAIL: socat cannot connect to service via redirect"
+ 	ret=1
+ fi
+ 
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index f6156790c3b4df..05dc77fd527b3b 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -40,9 +40,9 @@
+  * Define weak versions to play nice with binaries that are statically linked
+  * against a libc that doesn't support registering its own rseq.
+  */
+-__weak ptrdiff_t __rseq_offset;
+-__weak unsigned int __rseq_size;
+-__weak unsigned int __rseq_flags;
++extern __weak ptrdiff_t __rseq_offset;
++extern __weak unsigned int __rseq_size;
++extern __weak unsigned int __rseq_flags;
+ 
+ static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset;
+ static const unsigned int *libc_rseq_size_p = &__rseq_size;
+@@ -198,7 +198,7 @@ void rseq_init(void)
+ 	 * libc not having registered a restartable sequence.  Try to find the
+ 	 * symbols if that's the case.
+ 	 */
+-	if (!*libc_rseq_size_p) {
++	if (!libc_rseq_size_p || !*libc_rseq_size_p) {
+ 		libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
+ 		libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
+ 		libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-10-15 17:30 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-10-15 17:30 UTC (permalink / raw
  To: gentoo-commits

commit:     3ace481ff1bde18e58ac350599b7b0ee9016698c
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 15 17:29:48 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Oct 15 17:29:48 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3ace481f

Linux patch 6.12.53

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1052_linux-6.12.53.patch | 8669 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8673 insertions(+)

diff --git a/0000_README b/0000_README
index 9624f9e2..8f91dc7a 100644
--- a/0000_README
+++ b/0000_README
@@ -251,6 +251,10 @@ Patch:  1051_linux-6.12.52.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.52
 
+Patch:  1052_linux-6.12.53.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.53
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1052_linux-6.12.53.patch b/1052_linux-6.12.53.patch
new file mode 100644
index 00000000..14e06a14
--- /dev/null
+++ b/1052_linux-6.12.53.patch
@@ -0,0 +1,8669 @@
+diff --git a/Documentation/trace/histogram-design.rst b/Documentation/trace/histogram-design.rst
+index 5765eb3e9efa78..a30f4bed11b4ee 100644
+--- a/Documentation/trace/histogram-design.rst
++++ b/Documentation/trace/histogram-design.rst
+@@ -380,7 +380,9 @@ entry, ts0, corresponding to the ts0 variable in the sched_waking
+ trigger above.
+ 
+ sched_waking histogram
+-----------------------::
++----------------------
++
++.. code-block::
+ 
+   +------------------+
+   | hist_data        |<-------------------------------------------------------+
+diff --git a/Makefile b/Makefile
+index 3345d6257350d7..a4a2228276e67d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 52
++SUBLEVEL = 53
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/renesas/r8a7791-porter.dts b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
+index 93c86e92164555..b255eb228dd74e 100644
+--- a/arch/arm/boot/dts/renesas/r8a7791-porter.dts
++++ b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
+@@ -290,7 +290,7 @@ vin0_pins: vin0 {
+ 	};
+ 
+ 	can0_pins: can0 {
+-		groups = "can0_data";
++		groups = "can0_data_b";
+ 		function = "can0";
+ 	};
+ 
+diff --git a/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi b/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi
+index a4beb718559c42..9ee9e7a1343c47 100644
+--- a/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi
++++ b/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi
+@@ -270,7 +270,7 @@ &tps {
+ 	vcc7-supply = <&vbat>;
+ 	vccio-supply = <&vbat>;
+ 
+-	ti,en-ck32k-xtal = <1>;
++	ti,en-ck32k-xtal;
+ 
+ 	regulators {
+ 		vrtc_reg: regulator@0 {
+diff --git a/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts b/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts
+index 06767ea164b598..ece7f7854f6aae 100644
+--- a/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts
++++ b/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts
+@@ -483,8 +483,6 @@ &mcasp1 {
+ 
+ 		op-mode = <0>;          /* MCASP_IIS_MODE */
+ 		tdm-slots = <2>;
+-		/* 16 serializers */
+-		num-serializer = <16>;
+ 		serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+ 			0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 0
+ 		>;
+diff --git a/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi
+index a7f99ae0c1fe9a..78c657429f6410 100644
+--- a/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi
+@@ -65,7 +65,7 @@ ads7846@0 {
+ 		ti,debounce-max = /bits/ 16 <10>;
+ 		ti,debounce-tol = /bits/ 16 <5>;
+ 		ti,debounce-rep = /bits/ 16 <1>;
+-		ti,keep-vref-on = <1>;
++		ti,keep-vref-on;
+ 		ti,settle-delay-usec = /bits/ 16 <150>;
+ 
+ 		wakeup-source;
+diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
+index e5869cca5e7916..94dece1839af34 100644
+--- a/arch/arm/mach-at91/pm_suspend.S
++++ b/arch/arm/mach-at91/pm_suspend.S
+@@ -872,7 +872,7 @@ e_done:
+ /**
+  * at91_mckx_ps_restore: restore MCK1..4 settings
+  *
+- * Side effects: overwrites tmp1, tmp2
++ * Side effects: overwrites tmp1, tmp2 and tmp3
+  */
+ .macro at91_mckx_ps_restore
+ #ifdef CONFIG_SOC_SAMA7
+@@ -916,7 +916,7 @@ r_ps:
+ 	bic	tmp3, tmp3, #AT91_PMC_MCR_V2_ID_MSK
+ 	orr	tmp3, tmp3, tmp1
+ 	orr	tmp3, tmp3, #AT91_PMC_MCR_V2_CMD
+-	str	tmp2, [pmc, #AT91_PMC_MCR_V2]
++	str	tmp3, [pmc, #AT91_PMC_MCR_V2]
+ 
+ 	wait_mckrdy tmp1
+ 
+diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts
+index 152f95fd49a211..7089ccf3ce5566 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j457.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j457.dts
+@@ -21,6 +21,14 @@ aliases {
+ 	};
+ };
+ 
++/*
++ * Adjust pcie0's iommu-map to account for the disabled port01.
++ */
++&pcie0 {
++	iommu-map = <0x100 &pcie0_dart_0 1 1>,
++			<0x200 &pcie0_dart_2 1 1>;
++};
++
+ &bluetooth0 {
+ 	brcm,board-type = "apple,santorini";
+ };
+@@ -36,10 +44,10 @@ &wifi0 {
+  */
+ 
+ &port02 {
+-	bus-range = <3 3>;
++	bus-range = <2 2>;
+ 	status = "okay";
+ 	ethernet0: ethernet@0,0 {
+-		reg = <0x30000 0x0 0x0 0x0 0x0>;
++		reg = <0x20000 0x0 0x0 0x0 0x0>;
+ 		/* To be filled by the loader */
+ 		local-mac-address = [00 10 18 00 00 00];
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts
+index 89e97c604bd3e4..c3d2ddd887fdf0 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts
++++ b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts
+@@ -33,7 +33,9 @@ pwm-beeper {
+ 
+ 	reg_vcc_panel: regulator-vcc-panel {
+ 		compatible = "regulator-fixed";
+-		gpio = <&gpio4 3 GPIO_ACTIVE_HIGH>;
++		pinctrl-names = "default";
++		pinctrl-0 = <&pinctrl_reg_vcc_panel>;
++		gpio = <&gpio2 21 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-min-microvolt = <3300000>;
+@@ -135,6 +137,16 @@ &tpm6 {
+ };
+ 
+ &usbotg1 {
++	adp-disable;
++	hnp-disable;
++	srp-disable;
++	disable-over-current;
++	dr_mode = "otg";
++	usb-role-switch;
++	status = "okay";
++};
++
++&usbotg2 {
+ 	#address-cells = <1>;
+ 	#size-cells = <0>;
+ 	disable-over-current;
+@@ -147,17 +159,15 @@ usb1@1 {
+ 	};
+ };
+ 
+-&usbotg2 {
+-	adp-disable;
+-	hnp-disable;
+-	srp-disable;
+-	disable-over-current;
+-	dr_mode = "otg";
+-	usb-role-switch;
+-	status = "okay";
+-};
+-
+ &usdhc2 {
+ 	vmmc-supply = <&reg_vdd_3v3>;
+ 	status = "okay";
+ };
++
++&iomuxc {
++	pinctrl_reg_vcc_panel: regvccpanelgrp {
++		fsl,pins = <
++			MX93_PAD_GPIO_IO21__GPIO2_IO21		0x31e /* PWM_2 */
++		>;
++	};
++};
+diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
+index 7365d6538a733d..ddbc94c375e0c8 100644
+--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
+@@ -822,7 +822,7 @@ lpuart7: serial@42690000 {
+ 				interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&scmi_clk IMX95_CLK_LPUART7>;
+ 				clock-names = "ipg";
+-				dmas = <&edma2 26 0 FSL_EDMA_RX>, <&edma2 25 0 0>;
++				dmas = <&edma2 88 0 FSL_EDMA_RX>, <&edma2 87 0 0>;
+ 				dma-names = "rx", "tx";
+ 				status = "disabled";
+ 			};
+@@ -834,7 +834,7 @@ lpuart8: serial@426a0000 {
+ 				interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&scmi_clk IMX95_CLK_LPUART8>;
+ 				clock-names = "ipg";
+-				dmas = <&edma2 28 0 FSL_EDMA_RX>, <&edma2 27 0 0>;
++				dmas = <&edma2 90 0 FSL_EDMA_RX>, <&edma2 89 0 0>;
+ 				dma-names = "rx", "tx";
+ 				status = "disabled";
+ 			};
+diff --git a/arch/arm64/boot/dts/mediatek/mt6331.dtsi b/arch/arm64/boot/dts/mediatek/mt6331.dtsi
+index d89858c73ab1b0..243afbffa21fd7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6331.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6331.dtsi
+@@ -6,12 +6,12 @@
+ #include <dt-bindings/input/input.h>
+ 
+ &pwrap {
+-	pmic: mt6331 {
++	pmic: pmic {
+ 		compatible = "mediatek,mt6331";
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+ 
+-		mt6331regulator: mt6331regulator {
++		mt6331regulator: regulators {
+ 			compatible = "mediatek,mt6331-regulator";
+ 
+ 			mt6331_vdvfs11_reg: buck-vdvfs11 {
+@@ -258,7 +258,7 @@ mt6331_vrtc_reg: ldo-vrtc {
+ 			};
+ 
+ 			mt6331_vdig18_reg: ldo-vdig18 {
+-				regulator-name = "dvdd18_dig";
++				regulator-name = "vdig18";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+ 				regulator-ramp-delay = <0>;
+@@ -266,11 +266,11 @@ mt6331_vdig18_reg: ldo-vdig18 {
+ 			};
+ 		};
+ 
+-		mt6331rtc: mt6331rtc {
++		mt6331rtc: rtc {
+ 			compatible = "mediatek,mt6331-rtc";
+ 		};
+ 
+-		mt6331keys: mt6331keys {
++		mt6331keys: keys {
+ 			compatible = "mediatek,mt6331-keys";
+ 			power {
+ 				linux,keycodes = <KEY_POWER>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts b/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
+index 91de920c224571..03cc48321a3f48 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
++++ b/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
+@@ -212,7 +212,7 @@ proximity@48 {
+ 
+ &mmc0 {
+ 	/* eMMC controller */
+-	mediatek,latch-ck = <0x14>; /* hs400 */
++	mediatek,latch-ck = <4>; /* hs400 */
+ 	mediatek,hs200-cmd-int-delay = <1>;
+ 	mediatek,hs400-cmd-int-delay = <1>;
+ 	mediatek,hs400-ds-dly3 = <0x1a>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi
+index 7c971198fa9561..72a2a2bff0a93f 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi
+@@ -71,14 +71,14 @@ &i2c1 {
+ 	i2c-scl-internal-delay-ns = <10000>;
+ 
+ 	touchscreen: touchscreen@10 {
+-		compatible = "hid-over-i2c";
++		compatible = "elan,ekth6915";
+ 		reg = <0x10>;
+ 		interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&touchscreen_pins>;
+-		post-power-on-delay-ms = <10>;
+-		hid-descr-addr = <0x0001>;
+-		vdd-supply = <&pp3300_s3>;
++		reset-gpios = <&pio 60 GPIO_ACTIVE_LOW>;
++		vcc33-supply = <&pp3300_s3>;
++		no-reset-on-power-off;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts
+index 26d3451a5e47c0..24d9ede63eaa21 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts
+@@ -42,3 +42,7 @@ MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
+ 		CROS_STD_MAIN_KEYMAP
+ 	>;
+ };
++
++&touchscreen {
++	compatible = "elan,ekth6a12nay";
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 2e138b54f55639..451aa278bef502 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -1563,9 +1563,6 @@ pcie0: pcie@112f0000 {
+ 
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_PCIE_MAC_P0>;
+ 
+-			resets = <&infracfg_ao MT8195_INFRA_RST2_PCIE_P0_SWRST>;
+-			reset-names = "mac";
+-
+ 			#interrupt-cells = <1>;
+ 			interrupt-map-mask = <0 0 0 7>;
+ 			interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
+index e2e75b8ff91880..9ab4fee769e405 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
+@@ -351,7 +351,7 @@ regulator {
+ 			LDO_VIN2-supply = <&vsys>;
+ 			LDO_VIN3-supply = <&vsys>;
+ 
+-			mt6360_buck1: BUCK1 {
++			mt6360_buck1: buck1 {
+ 				regulator-name = "emi_vdd2";
+ 				regulator-min-microvolt = <600000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -361,7 +361,7 @@ MT6360_OPMODE_LP
+ 				regulator-always-on;
+ 			};
+ 
+-			mt6360_buck2: BUCK2 {
++			mt6360_buck2: buck2 {
+ 				regulator-name = "emi_vddq";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1300000>;
+@@ -371,7 +371,7 @@ MT6360_OPMODE_LP
+ 				regulator-always-on;
+ 			};
+ 
+-			mt6360_ldo1: LDO1 {
++			mt6360_ldo1: ldo1 {
+ 				regulator-name = "mt6360_ldo1"; /* Test point */
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -379,7 +379,7 @@ mt6360_ldo1: LDO1 {
+ 							   MT6360_OPMODE_LP>;
+ 			};
+ 
+-			mt6360_ldo2: LDO2 {
++			mt6360_ldo2: ldo2 {
+ 				regulator-name = "panel1_p1v8";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -387,7 +387,7 @@ mt6360_ldo2: LDO2 {
+ 							   MT6360_OPMODE_LP>;
+ 			};
+ 
+-			mt6360_ldo3: LDO3 {
++			mt6360_ldo3: ldo3 {
+ 				regulator-name = "vmc_pmu";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -395,7 +395,7 @@ mt6360_ldo3: LDO3 {
+ 							   MT6360_OPMODE_LP>;
+ 			};
+ 
+-			mt6360_ldo5: LDO5 {
++			mt6360_ldo5: ldo5 {
+ 				regulator-name = "vmch_pmu";
+ 				regulator-min-microvolt = <3300000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -403,7 +403,7 @@ mt6360_ldo5: LDO5 {
+ 							   MT6360_OPMODE_LP>;
+ 			};
+ 
+-			mt6360_ldo6: LDO6 {
++			mt6360_ldo6: ldo6 {
+ 				regulator-name = "mt6360_ldo6"; /* Test point */
+ 				regulator-min-microvolt = <500000>;
+ 				regulator-max-microvolt = <2100000>;
+@@ -411,7 +411,7 @@ mt6360_ldo6: LDO6 {
+ 							   MT6360_OPMODE_LP>;
+ 			};
+ 
+-			mt6360_ldo7: LDO7 {
++			mt6360_ldo7: ldo7 {
+ 				regulator-name = "emi_vmddr_en";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
+index cce642c5381280..3d3db33a64dc66 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
+@@ -11,7 +11,7 @@
+ 
+ / {
+ 	model = "Pumpkin MT8516";
+-	compatible = "mediatek,mt8516";
++	compatible = "mediatek,mt8516-pumpkin", "mediatek,mt8516";
+ 
+ 	memory@40000000 {
+ 		device_type = "memory";
+diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+index 2cfdf5bd5fd9be..e75e6354b2d52a 100644
+--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+@@ -1405,6 +1405,7 @@ usb_dwc3: usb@4e00000 {
+ 				snps,has-lpm-erratum;
+ 				snps,hird-threshold = /bits/ 8 <0x10>;
+ 				snps,usb3_lpm_capable;
++				snps,parkmode-disable-ss-quirk;
+ 				maximum-speed = "super-speed";
+ 				dr_mode = "otg";
+ 				usb-role-switch;
+diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
+index 377849cbb462ea..5785a934c28bf1 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
+@@ -48,7 +48,10 @@ sound_card {
+ #if (SW_SCIF_CAN || SW_RSPI_CAN)
+ &canfd {
+ 	pinctrl-0 = <&can1_pins>;
+-	/delete-node/ channel@0;
++
++	channel0 {
++		status = "disabled";
++	};
+ };
+ #else
+ &canfd {
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 5553508c364402..ca6d002a6f1373 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -2754,8 +2754,7 @@ void bpf_jit_free(struct bpf_prog *prog)
+ 		 * before freeing it.
+ 		 */
+ 		if (jit_data) {
+-			bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
+-					   sizeof(jit_data->header->size));
++			bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header);
+ 			kfree(jit_data);
+ 		}
+ 		hdr = bpf_jit_binary_pack_hdr(prog);
+diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
+index 50c469067f3aa3..b5e2312a2fca51 100644
+--- a/arch/loongarch/kernel/relocate.c
++++ b/arch/loongarch/kernel/relocate.c
+@@ -166,6 +166,10 @@ static inline __init bool kaslr_disabled(void)
+ 		return true;
+ #endif
+ 
++	str = strstr(boot_command_line, "kexec_file");
++	if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
++		return true;
++
+ 	return false;
+ }
+ 
+diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
+index dd4eb306317581..f4390704d5ba29 100644
+--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
++++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
+@@ -7,8 +7,14 @@
+ 
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+-	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+-			pgtable_gfp_flags(mm, GFP_KERNEL));
++	pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
++				      pgtable_gfp_flags(mm, GFP_KERNEL));
++
++#ifdef CONFIG_PPC_BOOK3S_603
++	memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
++	       (MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
++#endif
++	return pgd;
+ }
+ 
+ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
+index bb5f3e8ea912df..4ef780b291bc31 100644
+--- a/arch/powerpc/include/asm/nohash/pgalloc.h
++++ b/arch/powerpc/include/asm/nohash/pgalloc.h
+@@ -22,7 +22,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ 	pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ 			pgtable_gfp_flags(mm, GFP_KERNEL));
+ 
+-#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
++#ifdef CONFIG_PPC_8xx
+ 	memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
+ 	       (MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ #endif
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index 56c5ebe21b99a4..613606400ee999 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -162,7 +162,7 @@ instruction_counter:
+  * For the MPC8xx, this is a software tablewalk to load the instruction
+  * TLB.  The task switch loads the M_TWB register with the pointer to the first
+  * level table.
+- * If we discover there is no second level table (value is zero) or if there
++ * If there is no second level table (value is zero) or if there
+  * is an invalid pte, we load that into the TLB, which causes another fault
+  * into the TLB Error interrupt where we can handle such problems.
+  * We have to use the MD_xxx registers for the tablewalk because the
+@@ -183,9 +183,6 @@ instruction_counter:
+ 	mtspr	SPRN_SPRG_SCRATCH2, r10
+ 	mtspr	SPRN_M_TW, r11
+ 
+-	/* If we are faulting a kernel address, we have to use the
+-	 * kernel page tables.
+-	 */
+ 	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
+ 	INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
+ 	mtspr	SPRN_MD_EPN, r10
+@@ -228,10 +225,6 @@ instruction_counter:
+ 	mtspr	SPRN_SPRG_SCRATCH2, r10
+ 	mtspr	SPRN_M_TW, r11
+ 
+-	/* If we are faulting a kernel address, we have to use the
+-	 * kernel page tables.
+-	 */
+-	mfspr	r10, SPRN_MD_EPN
+ 	mfspr	r10, SPRN_M_TWB	/* Get level 1 table */
+ 	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10)	/* Get level 1 entry */
+ 
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 563425b4963c98..497945aa3e2c49 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -559,6 +559,39 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
+ 	}
+ }
+ 
++/*
++ * Sign-extend the register if necessary
++ */
++static int sign_extend(u8 rd, u8 rs, u8 sz, bool sign, struct rv_jit_context *ctx)
++{
++	if (!sign && (sz == 1 || sz == 2)) {
++		if (rd != rs)
++			emit_mv(rd, rs, ctx);
++		return 0;
++	}
++
++	switch (sz) {
++	case 1:
++		emit_sextb(rd, rs, ctx);
++		break;
++	case 2:
++		emit_sexth(rd, rs, ctx);
++		break;
++	case 4:
++		emit_sextw(rd, rs, ctx);
++		break;
++	case 8:
++		if (rd != rs)
++			emit_mv(rd, rs, ctx);
++		break;
++	default:
++		pr_err("bpf-jit: invalid size %d for sign_extend\n", sz);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ #define BPF_FIXUP_OFFSET_MASK   GENMASK(26, 0)
+ #define BPF_FIXUP_REG_MASK      GENMASK(31, 27)
+ #define REG_DONT_CLEAR_MARKER	0	/* RV_REG_ZERO unused in pt_regmap */
+@@ -1020,8 +1053,15 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 		restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx);
+ 
+ 	if (save_ret) {
+-		emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
+ 		emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
++		if (is_struct_ops) {
++			ret = sign_extend(RV_REG_A0, regmap[BPF_REG_0], m->ret_size,
++					  m->ret_flags & BTF_FMODEL_SIGNED_ARG, ctx);
++			if (ret)
++				goto out;
++		} else {
++			emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
++		}
+ 	}
+ 
+ 	emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
+diff --git a/arch/sparc/lib/M7memcpy.S b/arch/sparc/lib/M7memcpy.S
+index cbd42ea7c3f7c2..99357bfa8e82ad 100644
+--- a/arch/sparc/lib/M7memcpy.S
++++ b/arch/sparc/lib/M7memcpy.S
+@@ -696,16 +696,16 @@ FUNC_NAME:
+ 	EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40)
+ 	faligndata %f24, %f26, %f10
+ 	EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40)
+-	EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40)
++	EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_32)
+ 	faligndata %f26, %f28, %f12
+-	EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40)
++	EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_32)
+ 	add	%o4, 64, %o4
+-	EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40)
++	EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_24)
+ 	faligndata %f28, %f30, %f14
+-	EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40)
+-	EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40)
++	EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_24)
++	EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_16)
+ 	add	%o0, 64, %o0
+-	EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40)
++	EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_8)
+ 	fsrc2	%f30, %f14
+ 	bgu,pt	%xcc, .Lunalign_sloop
+ 	 prefetch [%o4 + (8 * BLOCK_SIZE)], 20
+@@ -728,7 +728,7 @@ FUNC_NAME:
+ 	add	%o4, 8, %o4
+ 	faligndata %f0, %f2, %f16
+ 	subcc	%o5, 8, %o5
+-	EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5)
++	EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_8)
+ 	fsrc2	%f2, %f0
+ 	bgu,pt	%xcc, .Lunalign_by8
+ 	 add	%o0, 8, %o0
+@@ -772,7 +772,7 @@ FUNC_NAME:
+ 	subcc	%o5, 0x20, %o5
+ 	EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
+ 	EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
+-	EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
++	EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_16)
+ 	EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
+ 	bne,pt	%xcc, 1b
+ 	 add	%o0, 0x20, %o0
+@@ -804,12 +804,12 @@ FUNC_NAME:
+ 	brz,pt	%o3, 2f
+ 	 sub	%o2, %o3, %o2
+ 
+-1:	EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
++1:	EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_o3)
+ 	add	%o1, 1, %o1
+ 	subcc	%o3, 1, %o3
+ 	add	%o0, 1, %o0
+ 	bne,pt	%xcc, 1b
+-	 EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1)
++	 EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_o3_plus_1)
+ 2:
+ 	and	%o1, 0x7, %o3
+ 	brz,pn	%o3, .Lmedium_noprefetch_cp
+diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S
+index 64fbac28b3db18..207343367bb2da 100644
+--- a/arch/sparc/lib/Memcpy_utils.S
++++ b/arch/sparc/lib/Memcpy_utils.S
+@@ -137,6 +137,15 @@ ENTRY(memcpy_retl_o2_plus_63_8)
+ 	ba,pt	%xcc, __restore_asi
+ 	 add	%o2, 8, %o0
+ ENDPROC(memcpy_retl_o2_plus_63_8)
++ENTRY(memcpy_retl_o2_plus_o3)
++	ba,pt	%xcc, __restore_asi
++	 add	%o2, %o3, %o0
++ENDPROC(memcpy_retl_o2_plus_o3)
++ENTRY(memcpy_retl_o2_plus_o3_plus_1)
++	add	%o3, 1, %o3
++	ba,pt	%xcc, __restore_asi
++	 add	%o2, %o3, %o0
++ENDPROC(memcpy_retl_o2_plus_o3_plus_1)
+ ENTRY(memcpy_retl_o2_plus_o5)
+ 	ba,pt	%xcc, __restore_asi
+ 	 add	%o2, %o5, %o0
+diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
+index 7ad58ebe0d0096..df0ec1bd194892 100644
+--- a/arch/sparc/lib/NG4memcpy.S
++++ b/arch/sparc/lib/NG4memcpy.S
+@@ -281,7 +281,7 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	subcc		%o5, 0x20, %o5
+ 	EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
+ 	EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
+-	EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
++	EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_16)
+ 	EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
+ 	bne,pt		%icc, 1b
+ 	 add		%o0, 0x20, %o0
+diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
+index ee51c12306894e..bbd3ea0a64822c 100644
+--- a/arch/sparc/lib/NGmemcpy.S
++++ b/arch/sparc/lib/NGmemcpy.S
+@@ -79,8 +79,8 @@
+ #ifndef EX_RETVAL
+ #define EX_RETVAL(x)	x
+ __restore_asi:
+-	ret
+ 	wr	%g0, ASI_AIUS, %asi
++	ret
+ 	 restore
+ ENTRY(NG_ret_i2_plus_i4_plus_1)
+ 	ba,pt	%xcc, __restore_asi
+@@ -125,15 +125,16 @@ ENTRY(NG_ret_i2_plus_g1_minus_56)
+ 	ba,pt	%xcc, __restore_asi
+ 	 add	%i2, %g1, %i0
+ ENDPROC(NG_ret_i2_plus_g1_minus_56)
+-ENTRY(NG_ret_i2_plus_i4)
++ENTRY(NG_ret_i2_plus_i4_plus_16)
++        add     %i4, 16, %i4
+ 	ba,pt	%xcc, __restore_asi
+ 	 add	%i2, %i4, %i0
+-ENDPROC(NG_ret_i2_plus_i4)
+-ENTRY(NG_ret_i2_plus_i4_minus_8)
+-	sub	%i4, 8, %i4
++ENDPROC(NG_ret_i2_plus_i4_plus_16)
++ENTRY(NG_ret_i2_plus_i4_plus_8)
++	add	%i4, 8, %i4
+ 	ba,pt	%xcc, __restore_asi
+ 	 add	%i2, %i4, %i0
+-ENDPROC(NG_ret_i2_plus_i4_minus_8)
++ENDPROC(NG_ret_i2_plus_i4_plus_8)
+ ENTRY(NG_ret_i2_plus_8)
+ 	ba,pt	%xcc, __restore_asi
+ 	 add	%i2, 8, %i0
+@@ -160,6 +161,12 @@ ENTRY(NG_ret_i2_and_7_plus_i4)
+ 	ba,pt	%xcc, __restore_asi
+ 	 add	%i2, %i4, %i0
+ ENDPROC(NG_ret_i2_and_7_plus_i4)
++ENTRY(NG_ret_i2_and_7_plus_i4_plus_8)
++	and	%i2, 7, %i2
++	add	%i4, 8, %i4
++	ba,pt	%xcc, __restore_asi
++	 add	%i2, %i4, %i0
++ENDPROC(NG_ret_i2_and_7_plus_i4)
+ #endif
+ 
+ 	.align		64
+@@ -405,13 +412,13 @@ FUNC_NAME:	/* %i0=dst, %i1=src, %i2=len */
+ 	andn		%i2, 0xf, %i4
+ 	and		%i2, 0xf, %i2
+ 1:	subcc		%i4, 0x10, %i4
+-	EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
++	EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4_plus_16)
+ 	add		%i1, 0x08, %i1
+-	EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
++	EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4_plus_16)
+ 	sub		%i1, 0x08, %i1
+-	EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
++	EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4_plus_16)
+ 	add		%i1, 0x8, %i1
+-	EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
++	EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_plus_8)
+ 	bgu,pt		%XCC, 1b
+ 	 add		%i1, 0x8, %i1
+ 73:	andcc		%i2, 0x8, %g0
+@@ -468,7 +475,7 @@ FUNC_NAME:	/* %i0=dst, %i1=src, %i2=len */
+ 	subcc		%i4, 0x8, %i4
+ 	srlx		%g3, %i3, %i5
+ 	or		%i5, %g2, %i5
+-	EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
++	EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4_plus_8)
+ 	add		%o0, 0x8, %o0
+ 	bgu,pt		%icc, 1b
+ 	 sllx		%g3, %g1, %g2
+diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
+index 635398ec7540ee..154fbd35400ca8 100644
+--- a/arch/sparc/lib/U1memcpy.S
++++ b/arch/sparc/lib/U1memcpy.S
+@@ -164,17 +164,18 @@ ENTRY(U1_gs_40_fp)
+ 	retl
+ 	 add		%o0, %o2, %o0
+ ENDPROC(U1_gs_40_fp)
+-ENTRY(U1_g3_0_fp)
+-	VISExitHalf
+-	retl
+-	 add		%g3, %o2, %o0
+-ENDPROC(U1_g3_0_fp)
+ ENTRY(U1_g3_8_fp)
+ 	VISExitHalf
+ 	add		%g3, 8, %g3
+ 	retl
+ 	 add		%g3, %o2, %o0
+ ENDPROC(U1_g3_8_fp)
++ENTRY(U1_g3_16_fp)
++	VISExitHalf
++	add		%g3, 16, %g3
++	retl
++	 add		%g3, %o2, %o0
++ENDPROC(U1_g3_16_fp)
+ ENTRY(U1_o2_0_fp)
+ 	VISExitHalf
+ 	retl
+@@ -547,18 +548,18 @@ FUNC_NAME:		/* %o0=dst, %o1=src, %o2=len */
+ 62:	FINISH_VISCHUNK(o0, f44, f46)
+ 63:	UNEVEN_VISCHUNK_LAST(o0, f46, f0)
+ 
+-93:	EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
++93:	EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_8_fp)
+ 	add		%o1, 8, %o1
+ 	subcc		%g3, 8, %g3
+ 	faligndata	%f0, %f2, %f8
+-	EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
++	EX_ST_FP(STORE(std, %f8, %o0), U1_g3_16_fp)
+ 	bl,pn		%xcc, 95f
+ 	 add		%o0, 8, %o0
+-	EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
++	EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_8_fp)
+ 	add		%o1, 8, %o1
+ 	subcc		%g3, 8, %g3
+ 	faligndata	%f2, %f0, %f8
+-	EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
++	EX_ST_FP(STORE(std, %f8, %o0), U1_g3_16_fp)
+ 	bge,pt		%xcc, 93b
+ 	 add		%o0, 8, %o0
+ 
+diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
+index 9248d59c734ce2..bace3a18f836f1 100644
+--- a/arch/sparc/lib/U3memcpy.S
++++ b/arch/sparc/lib/U3memcpy.S
+@@ -267,6 +267,7 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	faligndata	%f10, %f12, %f26
+ 	EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
+ 
++	and		%o2, 0x3f, %o2
+ 	subcc		GLOBAL_SPARE, 0x80, GLOBAL_SPARE
+ 	add		%o1, 0x40, %o1
+ 	bgu,pt		%XCC, 1f
+@@ -336,7 +337,6 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	 * Also notice how this code is careful not to perform a
+ 	 * load past the end of the src buffer.
+ 	 */
+-	and		%o2, 0x3f, %o2
+ 	andcc		%o2, 0x38, %g2
+ 	be,pn		%XCC, 2f
+ 	 subcc		%g2, 0x8, %g2
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index 9d6411c6592050..00cefbb59fa98c 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -244,7 +244,7 @@ static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
+ 
+ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
+ {
+-	unsigned int p;
++	unsigned long p;
+ 
+ 	/*
+ 	 * Load CPU and node number from the GDT.  LSL is faster than RDTSCP
+@@ -254,10 +254,10 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
+ 	 *
+ 	 * If RDPID is available, use it.
+ 	 */
+-	alternative_io ("lsl %[seg],%[p]",
+-			".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
++	alternative_io ("lsl %[seg],%k[p]",
++			"rdpid %[p]",
+ 			X86_FEATURE_RDPID,
+-			[p] "=a" (p), [seg] "r" (__CPUNODE_SEG));
++			[p] "=r" (p), [seg] "r" (__CPUNODE_SEG));
+ 
+ 	if (cpu)
+ 		*cpu = (p & VDSO_CPUNODE_MASK);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9170a9e127b7a8..c48a466db0c373 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4237,13 +4237,21 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
+ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
++	struct vmcb_control_area *control = &svm->vmcb->control;
++
++	/*
++	 * Next RIP must be provided as IRQs are disabled, and accessing guest
++	 * memory to decode the instruction might fault, i.e. might sleep.
++	 */
++	if (!nrips || !control->next_rip)
++		return EXIT_FASTPATH_NONE;
+ 
+ 	if (is_guest_mode(vcpu))
+ 		return EXIT_FASTPATH_NONE;
+ 
+-	switch (svm->vmcb->control.exit_code) {
++	switch (control->exit_code) {
+ 	case SVM_EXIT_MSR:
+-		if (!svm->vmcb->control.exit_info_1)
++		if (!control->exit_info_1)
+ 			break;
+ 		return handle_fastpath_set_msr_irqoff(vcpu);
+ 	case SVM_EXIT_HLT:
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index 156e9bb07abf1a..2fb234ab467b16 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -150,9 +150,11 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
+ 		return;
+ 
+ 	hctx_for_each_ctx(hctx, ctx, i)
+-		kobject_del(&ctx->kobj);
++		if (ctx->kobj.state_in_sysfs)
++			kobject_del(&ctx->kobj);
+ 
+-	kobject_del(&hctx->kobj);
++	if (hctx->kobj.state_in_sysfs)
++		kobject_del(&hctx->kobj);
+ }
+ 
+ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index f24fffdb6c294c..d72a283401c3a1 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -552,7 +552,8 @@ static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lb
+ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 		     sector_t start)
+ {
+-	unsigned int top, bottom, alignment, ret = 0;
++	unsigned int top, bottom, alignment;
++	int ret = 0;
+ 
+ 	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
+ 
+diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
+index ee2fdab42334f1..7e0ce7bf68c999 100644
+--- a/crypto/asymmetric_keys/x509_cert_parser.c
++++ b/crypto/asymmetric_keys/x509_cert_parser.c
+@@ -611,11 +611,14 @@ int x509_process_extension(void *context, size_t hdrlen,
+ 		/*
+ 		 * Get hold of the basicConstraints
+ 		 * v[1] is the encoding size
+-		 *	(Expect 0x2 or greater, making it 1 or more bytes)
++		 *	(Expect 0x00 for empty SEQUENCE with CA:FALSE, or
++		 *	0x03 or greater for non-empty SEQUENCE)
+ 		 * v[2] is the encoding type
+ 		 *	(Expect an ASN1_BOOL for the CA)
+-		 * v[3] is the contents of the ASN1_BOOL
+-		 *      (Expect 1 if the CA is TRUE)
++		 * v[3] is the length of the ASN1_BOOL
++		 *	(Expect 1 for a single byte boolean)
++		 * v[4] is the contents of the ASN1_BOOL
++		 *	(Expect 0xFF if the CA is TRUE)
+ 		 * vlen should match the entire extension size
+ 		 */
+ 		if (v[0] != (ASN1_CONS_BIT | ASN1_SEQ))
+@@ -624,8 +627,13 @@ int x509_process_extension(void *context, size_t hdrlen,
+ 			return -EBADMSG;
+ 		if (v[1] != vlen - 2)
+ 			return -EBADMSG;
+-		if (vlen >= 4 && v[1] != 0 && v[2] == ASN1_BOOL && v[3] == 1)
++		/* Empty SEQUENCE means CA:FALSE (default value omitted per DER) */
++		if (v[1] == 0)
++			return 0;
++		if (vlen >= 5 && v[2] == ASN1_BOOL && v[3] == 1 && v[4] == 0xFF)
+ 			ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_CA;
++		else
++			return -EBADMSG;
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
+index 6f4fe47c955bd0..35460c2072a4ac 100644
+--- a/drivers/acpi/acpica/aclocal.h
++++ b/drivers/acpi/acpica/aclocal.h
+@@ -1141,7 +1141,7 @@ struct acpi_port_info {
+ #define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION   0x91
+ #define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG     0x92
+ #define ACPI_RESOURCE_NAME_CLOCK_INPUT          0x93
+-#define ACPI_RESOURCE_NAME_LARGE_MAX            0x94
++#define ACPI_RESOURCE_NAME_LARGE_MAX            0x93
+ 
+ /*****************************************************************************
+  *
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index ae035b93da0878..3eb56b77cb6d93 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -2637,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
+ 	if (ndr_desc->target_node == NUMA_NO_NODE) {
+ 		ndr_desc->target_node = phys_to_target_node(spa->address);
+ 		dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
+-			NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
++			NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 0888e4d618d53a..b524cf27213d4f 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1410,6 +1410,9 @@ int acpi_processor_power_init(struct acpi_processor *pr)
+ 		if (retval) {
+ 			if (acpi_processor_registered == 0)
+ 				cpuidle_unregister_driver(&acpi_idle_driver);
++
++			per_cpu(acpi_cpuidle_device, pr->id) = NULL;
++			kfree(dev);
+ 			return retval;
+ 		}
+ 		acpi_processor_registered++;
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index eb72580288e627..deccfe68214ec2 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -879,6 +879,10 @@ int __register_one_node(int nid)
+ 	node_devices[nid] = node;
+ 
+ 	error = register_node(node_devices[nid], nid);
++	if (error) {
++		node_devices[nid] = NULL;
++		return error;
++	}
+ 
+ 	/* link cpu under this node */
+ 	for_each_present_cpu(cpu) {
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index faf4cdec23f04c..abb16a5bb79671 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -628,8 +628,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
+ 	if (dev->power.syscore || dev->power.direct_complete)
+ 		goto Out;
+ 
+-	if (!dev->power.is_noirq_suspended)
++	if (!dev->power.is_noirq_suspended) {
++		/*
++		 * This means that system suspend has been aborted in the noirq
++		 * phase before invoking the noirq suspend callback for the
++		 * device, so if device_suspend_late() has left it in suspend,
++		 * device_resume_early() should leave it in suspend either in
++		 * case the early resume of it depends on the noirq resume that
++		 * has not run.
++		 */
++		if (dev_pm_skip_suspend(dev))
++			dev->power.must_resume = false;
++
+ 		goto Out;
++	}
+ 
+ 	if (!dpm_wait_for_superior(dev, async))
+ 		goto Out;
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index de4e2f3db942a4..66b3840bd96e35 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -828,7 +828,7 @@ struct regmap *__regmap_init(struct device *dev,
+ 		map->read_flag_mask = bus->read_flag_mask;
+ 	}
+ 
+-	if (config && config->read && config->write) {
++	if (config->read && config->write) {
+ 		map->reg_read  = _regmap_bus_read;
+ 		if (config->reg_update_bits)
+ 			map->reg_update_bits = config->reg_update_bits;
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index c705acc4d6f4b9..de692eed987402 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1156,6 +1156,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
+ 	if (!sock)
+ 		return NULL;
+ 
++	if (!sk_is_tcp(sock->sk) &&
++	    !sk_is_stream_unix(sock->sk)) {
++		dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
++		*err = -EINVAL;
++		sockfd_put(sock);
++		return NULL;
++	}
++
+ 	if (sock->ops->shutdown == sock_no_shutdown) {
+ 		dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+ 		*err = -EINVAL;
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index f10369ad90f768..ceb7aeca5d9bd4 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
+ 
+ static unsigned long g_cache_size;
+ module_param_named(cache_size, g_cache_size, ulong, 0444);
+-MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
++MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+ 
+ static bool g_fua = true;
+ module_param_named(fua, g_fua, bool, 0444);
+diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
+index 4575d9a4e5ed6f..dbc8d8f14ce7d6 100644
+--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
+@@ -1103,6 +1103,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
+ 	 * Get physical address of MC portal for the root DPRC:
+ 	 */
+ 	plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!plat_res)
++		return -EINVAL;
++
+ 	mc_portal_phys_addr = plat_res->start;
+ 	mc_portal_size = resource_size(plat_res);
+ 	mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
+diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
+index b51d9e243f3512..f0dde77f50b423 100644
+--- a/drivers/char/hw_random/Kconfig
++++ b/drivers/char/hw_random/Kconfig
+@@ -286,6 +286,7 @@ config HW_RANDOM_INGENIC_TRNG
+ config HW_RANDOM_NOMADIK
+ 	tristate "ST-Ericsson Nomadik Random Number Generator support"
+ 	depends on ARCH_NOMADIK || COMPILE_TEST
++	depends on ARM_AMBA
+ 	default HW_RANDOM
+ 	help
+ 	  This driver provides kernel-side support for the Random Number
+diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
+index 36c34252b4f631..3c514b4fbc8aec 100644
+--- a/drivers/char/hw_random/ks-sa-rng.c
++++ b/drivers/char/hw_random/ks-sa-rng.c
+@@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
+ 	if (IS_ERR(ks_sa_rng->regmap_cfg))
+ 		return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n");
+ 
++	ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
++	if (IS_ERR(ks_sa_rng->clk))
++		return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
++
+ 	pm_runtime_enable(dev);
+ 	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
+index cf0be8a7939de4..db41301e63f28f 100644
+--- a/drivers/char/tpm/Kconfig
++++ b/drivers/char/tpm/Kconfig
+@@ -29,7 +29,7 @@ if TCG_TPM
+ 
+ config TCG_TPM2_HMAC
+ 	bool "Use HMAC and encrypted transactions on the TPM bus"
+-	default X86_64
++	default n
+ 	select CRYPTO_ECDH
+ 	select CRYPTO_LIB_AESCFB
+ 	select CRYPTO_LIB_SHA256
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index beb660ca240cc8..a2ec1addafc931 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -15,6 +15,7 @@
+ #include <linux/energy_model.h>
+ #include <linux/export.h>
+ #include <linux/module.h>
++#include <linux/of.h>
+ #include <linux/pm_opp.h>
+ #include <linux/slab.h>
+ #include <linux/scmi_protocol.h>
+@@ -398,6 +399,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
+ 			return true;
+ 	}
+ 
++	/*
++	 * Older Broadcom STB chips had a "clocks" property for CPU node(s)
++	 * that did not match the SCMI performance protocol node, if we got
++	 * there, it means we had such an older Device Tree, therefore return
++	 * true to preserve backwards compatibility.
++	 */
++	if (of_machine_is_compatible("brcm,brcmstb"))
++		return true;
++
+ 	return false;
+ }
+ 
+diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
+index 1fc9968eae1996..b6b06a510fd866 100644
+--- a/drivers/cpuidle/cpuidle-qcom-spm.c
++++ b/drivers/cpuidle/cpuidle-qcom-spm.c
+@@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
+ 		return -ENODEV;
+ 
+ 	saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
++	of_node_put(cpu_node);
+ 	if (!saw_node)
+ 		return -ENODEV;
+ 
+ 	pdev = of_find_device_by_node(saw_node);
+ 	of_node_put(saw_node);
+-	of_node_put(cpu_node);
+ 	if (!pdev)
+ 		return -ENODEV;
+ 
+ 	data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
+-	if (!data)
++	if (!data) {
++		put_device(&pdev->dev);
+ 		return -ENOMEM;
++	}
+ 
+ 	data->spm = dev_get_drvdata(&pdev->dev);
++	put_device(&pdev->dev);
+ 	if (!data->spm)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
+index 45e130b901eb5e..17eb236e9ee4d5 100644
+--- a/drivers/crypto/hisilicon/debugfs.c
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm,
+ 		dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+ 		ret = PTR_ERR(qm->debug.acc_diff_regs);
+ 		qm->debug.acc_diff_regs = NULL;
++		qm->debug.qm_diff_regs = NULL;
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index 34d30b78381343..a11fe5e276773a 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -690,6 +690,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
+ 
+ 	/* Config data buffer pasid needed by Kunpeng 920 */
+ 	hpre_config_pasid(qm);
++	hpre_open_sva_prefetch(qm);
+ 
+ 	hpre_enable_clock_gate(qm);
+ 
+@@ -1366,8 +1367,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
+ 	if (ret)
+ 		return ret;
+ 
+-	hpre_open_sva_prefetch(qm);
+-
+ 	hisi_qm_dev_err_init(qm);
+ 	ret = hpre_show_last_regs_init(qm);
+ 	if (ret)
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index b18692ee7fd563..a9550a05dfbd3e 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -3657,6 +3657,10 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
+ 	}
+ 
+ 	pdev = container_of(dev, struct pci_dev, dev);
++	if (pci_physfn(pdev) != qm->pdev) {
++		pci_err(qm->pdev, "the pdev input does not match the pf!\n");
++		return -EINVAL;
++	}
+ 
+ 	*fun_index = pdev->devfn;
+ 
+@@ -4272,9 +4276,6 @@ static void qm_restart_prepare(struct hisi_qm *qm)
+ {
+ 	u32 value;
+ 
+-	if (qm->err_ini->open_sva_prefetch)
+-		qm->err_ini->open_sva_prefetch(qm);
+-
+ 	if (qm->ver >= QM_HW_V3)
+ 		return;
+ 
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index 75c25f0d5f2b82..9014cc36705ffe 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -441,6 +441,45 @@ static void sec_set_endian(struct hisi_qm *qm)
+ 	writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+ }
+ 
++static void sec_close_sva_prefetch(struct hisi_qm *qm)
++{
++	u32 val;
++	int ret;
++
++	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
++		return;
++
++	val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
++	val |= SEC_PREFETCH_DISABLE;
++	writel(val, qm->io_base + SEC_PREFETCH_CFG);
++
++	ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
++					 val, !(val & SEC_SVA_DISABLE_READY),
++					 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
++	if (ret)
++		pci_err(qm->pdev, "failed to close sva prefetch\n");
++}
++
++static void sec_open_sva_prefetch(struct hisi_qm *qm)
++{
++	u32 val;
++	int ret;
++
++	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
++		return;
++
++	/* Enable prefetch */
++	val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
++	val &= SEC_PREFETCH_ENABLE;
++	writel(val, qm->io_base + SEC_PREFETCH_CFG);
++
++	ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
++					 val, !(val & SEC_PREFETCH_DISABLE),
++					 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
++	if (ret)
++		pci_err(qm->pdev, "failed to open sva prefetch\n");
++}
++
+ static void sec_engine_sva_config(struct hisi_qm *qm)
+ {
+ 	u32 reg;
+@@ -474,45 +513,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm)
+ 		writel_relaxed(reg, qm->io_base +
+ 				SEC_INTERFACE_USER_CTRL1_REG);
+ 	}
+-}
+-
+-static void sec_open_sva_prefetch(struct hisi_qm *qm)
+-{
+-	u32 val;
+-	int ret;
+-
+-	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+-		return;
+-
+-	/* Enable prefetch */
+-	val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+-	val &= SEC_PREFETCH_ENABLE;
+-	writel(val, qm->io_base + SEC_PREFETCH_CFG);
+-
+-	ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
+-					 val, !(val & SEC_PREFETCH_DISABLE),
+-					 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+-	if (ret)
+-		pci_err(qm->pdev, "failed to open sva prefetch\n");
+-}
+-
+-static void sec_close_sva_prefetch(struct hisi_qm *qm)
+-{
+-	u32 val;
+-	int ret;
+-
+-	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+-		return;
+-
+-	val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+-	val |= SEC_PREFETCH_DISABLE;
+-	writel(val, qm->io_base + SEC_PREFETCH_CFG);
+-
+-	ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
+-					 val, !(val & SEC_SVA_DISABLE_READY),
+-					 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+-	if (ret)
+-		pci_err(qm->pdev, "failed to close sva prefetch\n");
++	sec_open_sva_prefetch(qm);
+ }
+ 
+ static void sec_enable_clock_gate(struct hisi_qm *qm)
+@@ -1094,7 +1095,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
+ 	if (ret)
+ 		return ret;
+ 
+-	sec_open_sva_prefetch(qm);
+ 	hisi_qm_dev_err_init(qm);
+ 	sec_debug_regs_clear(qm);
+ 	ret = sec_show_last_regs_init(qm);
+diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
+index 80c2fcb1d26dcf..323f53217f0c0d 100644
+--- a/drivers/crypto/hisilicon/zip/zip_main.c
++++ b/drivers/crypto/hisilicon/zip/zip_main.c
+@@ -449,10 +449,9 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
+ 	return false;
+ }
+ 
+-static int hisi_zip_set_high_perf(struct hisi_qm *qm)
++static void hisi_zip_set_high_perf(struct hisi_qm *qm)
+ {
+ 	u32 val;
+-	int ret;
+ 
+ 	val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
+ 	if (perf_mode == HZIP_HIGH_COMP_PERF)
+@@ -462,13 +461,6 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm)
+ 
+ 	/* Set perf mode */
+ 	writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
+-	ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
+-					 val, val == perf_mode, HZIP_DELAY_1_US,
+-					 HZIP_POLL_TIMEOUT_US);
+-	if (ret)
+-		pci_err(qm->pdev, "failed to set perf mode\n");
+-
+-	return ret;
+ }
+ 
+ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+@@ -565,6 +557,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
+ 		writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
+ 		writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
+ 	}
++	hisi_zip_open_sva_prefetch(qm);
+ 
+ 	/* let's open all compression/decompression cores */
+ 	dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
+@@ -576,6 +569,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
+ 	       CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
+ 	       FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+ 
++	hisi_zip_set_high_perf(qm);
+ 	hisi_zip_enable_clock_gate(qm);
+ 
+ 	return 0;
+@@ -1171,11 +1165,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = hisi_zip_set_high_perf(qm);
+-	if (ret)
+-		return ret;
+-
+-	hisi_zip_open_sva_prefetch(qm);
+ 	hisi_qm_dev_err_init(qm);
+ 	hisi_zip_debug_regs_clear(qm);
+ 
+diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+index fdeca861933cb5..903f31dc663e9d 100644
+--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
++++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+@@ -232,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
+ 	struct device *dev = rctx->hcu_dev->dev;
+ 	unsigned int remainder = 0;
+ 	unsigned int total;
+-	size_t nents;
++	int nents;
+ 	size_t count;
+ 	int rc;
+ 	int i;
+@@ -253,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
+ 	/* Determine the number of scatter gather list entries to process. */
+ 	nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+ 
++	if (nents < 0)
++		return nents;
++
+ 	/* If there are entries to process, map them. */
+ 	if (nents) {
+ 		rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+index 1493a373baf71e..cf68b213c3e62f 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+@@ -1614,7 +1614,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
+ 		return -EINVAL;
+ 	}
+ 	err_msg = "Invalid engine group format";
+-	strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
++	strscpy(tmp_buf, ctx->val.vstr);
+ 	start = tmp_buf;
+ 
+ 	has_se = has_ie = has_ae = false;
+diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
+index e2a1e4463b6f5d..712df781436cec 100644
+--- a/drivers/devfreq/event/rockchip-dfi.c
++++ b/drivers/devfreq/event/rockchip-dfi.c
+@@ -116,6 +116,7 @@ struct rockchip_dfi {
+ 	int buswidth[DMC_MAX_CHANNELS];
+ 	int ddrmon_stride;
+ 	bool ddrmon_ctrl_single;
++	unsigned int count_multiplier;	/* number of data clocks per count */
+ };
+ 
+ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
+@@ -435,7 +436,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
+ 
+ 	switch (event->attr.config) {
+ 	case PERF_EVENT_CYCLES:
+-		count = total.c[0].clock_cycles;
++		count = total.c[0].clock_cycles * dfi->count_multiplier;
+ 		break;
+ 	case PERF_EVENT_READ_BYTES:
+ 		for (i = 0; i < dfi->max_channels; i++)
+@@ -656,6 +657,9 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
+ 		break;
+ 	}
+ 
++	if (!dfi->count_multiplier)
++		dfi->count_multiplier = 1;
++
+ 	ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
+ 	if (ret)
+ 		return ret;
+@@ -752,6 +756,7 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi)
+ 	dfi->max_channels = 4;
+ 
+ 	dfi->ddrmon_stride = 0x4000;
++	dfi->count_multiplier = 2;
+ 
+ 	return 0;
+ };
+diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
+index 7ad5225b0381d2..6e608d92f7e66a 100644
+--- a/drivers/devfreq/mtk-cci-devfreq.c
++++ b/drivers/devfreq/mtk-cci-devfreq.c
+@@ -386,7 +386,8 @@ static int mtk_ccifreq_probe(struct platform_device *pdev)
+ out_free_resources:
+ 	if (regulator_is_enabled(drv->proc_reg))
+ 		regulator_disable(drv->proc_reg);
+-	if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
++	if (!IS_ERR_OR_NULL(drv->sram_reg) &&
++	    regulator_is_enabled(drv->sram_reg))
+ 		regulator_disable(drv->sram_reg);
+ 
+ 	return ret;
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index ac4b3d95531c5d..d8cd12d906a726 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -967,6 +967,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
+ 	return !!GET_BITFIELD(mcmtr, 2, 2);
+ }
+ 
++static bool i10nm_channel_disabled(struct skx_imc *imc, int chan)
++{
++	u32 mcmtr = I10NM_GET_MCMTR(imc, chan);
++
++	edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr);
++
++	return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18));
++}
++
+ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
+ 				 struct res_config *cfg)
+ {
+@@ -980,6 +989,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
+ 		if (!imc->mbase)
+ 			continue;
+ 
++		if (i10nm_channel_disabled(imc, i)) {
++			edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i);
++			continue;
++		}
++
+ 		ndimms = 0;
+ 
+ 		if (res_cfg->type != GNR)
+diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
+index d349766bc0b267..f78b87f3340373 100644
+--- a/drivers/firmware/arm_scmi/transports/virtio.c
++++ b/drivers/firmware/arm_scmi/transports/virtio.c
+@@ -870,6 +870,9 @@ static int scmi_vio_probe(struct virtio_device *vdev)
+ 	/* Ensure initialized scmi_vdev is visible */
+ 	smp_store_mb(scmi_vdev, vdev);
+ 
++	/* Set device ready */
++	virtio_device_ready(vdev);
++
+ 	ret = platform_driver_register(&scmi_virtio_driver);
+ 	if (ret) {
+ 		vdev->priv = NULL;
+diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
+index f2fdd375664822..179f5d46d8ddff 100644
+--- a/drivers/firmware/meson/Kconfig
++++ b/drivers/firmware/meson/Kconfig
+@@ -5,7 +5,7 @@
+ config MESON_SM
+ 	tristate "Amlogic Secure Monitor driver"
+ 	depends on ARCH_MESON || COMPILE_TEST
+-	default y
++	default ARCH_MESON
+ 	depends on ARM64_4K_PAGES
+ 	help
+ 	  Say y here to enable the Amlogic secure monitor driver
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+index 805d6662c88b6d..21376d98ee4982 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+@@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
+  *
+  * @handle: handle used to pass amdgpu_device pointer
+  *
+- * Initialize the hardware, boot up the VCPU and do some testing
++ * Initialize the hardware, boot up the VCPU and do some testing.
++ *
++ * On SI, the UVD is meant to be used in a specific power state,
++ * or alternatively the driver can manually enable its clock.
++ * In amdgpu we use the dedicated UVD power state when DPM is enabled.
++ * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state
++ * for the SMU and afterwards enables the UVD clock.
++ * This is automatically done by amdgpu_uvd_ring_begin_use when work
++ * is submitted to the UVD ring. Here, we have to call it manually
++ * in order to power up UVD before firmware validation.
++ *
++ * Note that we must not disable the UVD clock here, as that would
++ * cause the ring test to fail. However, UVD is powered off
++ * automatically after the ring test: amdgpu_uvd_ring_end_use calls
++ * the UVD idle work handler which will disable the UVD clock when
++ * all fences are signalled.
+  */
+ static int uvd_v3_1_hw_init(void *handle)
+ {
+@@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(void *handle)
+ 	int r;
+ 
+ 	uvd_v3_1_mc_resume(adev);
++	uvd_v3_1_enable_mgcg(adev, true);
++
++	/* Make sure UVD is powered during FW validation.
++	 * It's going to be automatically powered off after the ring test.
++	 */
++	if (adev->pm.dpm_enabled)
++		amdgpu_dpm_enable_uvd(adev, true);
++	else
++		amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+ 
+ 	r = uvd_v3_1_fw_validate(adev);
+ 	if (r) {
+@@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(void *handle)
+ 		return r;
+ 	}
+ 
+-	uvd_v3_1_enable_mgcg(adev, true);
+-	amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+-
+ 	uvd_v3_1_start(adev);
+ 
+ 	r = amdgpu_ring_test_helper(ring);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 3e9e0f36cd3f47..a8d4b3a3e77afb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -4231,7 +4231,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
+ 		r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
+ 		break;
+ 	default:
+-		r = EINVAL;
++		r = -EINVAL;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+index 9ba6cb67655f4a..6c75aa82327ac1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+@@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
+ 	if (dual_plane) {
+ 		unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
+ 				num_pipes, pipe_idx);
+-		;
+ 		if (src->sw_mode == dm_sw_linear)
+ 			ASSERT(p1_pte_row_height_linear >= 8);
+ 
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+index 42efe838fa85c5..2d2d2d5e676341 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+@@ -66,6 +66,13 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
+ 					(amdgpu_crtc->v_border * 2));
+ 
+ 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
++
++				/* we have issues with mclk switching with
++				 * refresh rates over 120 hz on the non-DC code.
++				 */
++				if (drm_mode_vrefresh(&amdgpu_crtc->hw_mode) > 120)
++					vblank_time_us = 0;
++
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+index a5ad1b60597e61..82167eca26683c 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+@@ -3066,7 +3066,13 @@ static bool si_dpm_vblank_too_short(void *handle)
+ 	/* we never hit the non-gddr5 limit so disable it */
+ 	u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+ 
+-	if (vblank_time < switch_limit)
++	/* Consider zero vblank time too short and disable MCLK switching.
++	 * Note that the vblank time is set to maximum when no displays are attached,
++	 * so we'll still enable MCLK switching in that case.
++	 */
++	if (vblank_time == 0)
++		return true;
++	else if (vblank_time < switch_limit)
+ 		return true;
+ 	else
+ 		return false;
+@@ -3424,12 +3430,14 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ {
+ 	struct  si_ps *ps = si_get_ps(rps);
+ 	struct amdgpu_clock_and_voltage_limits *max_limits;
++	struct amdgpu_connector *conn;
+ 	bool disable_mclk_switching = false;
+ 	bool disable_sclk_switching = false;
+ 	u32 mclk, sclk;
+ 	u16 vddc, vddci, min_vce_voltage = 0;
+ 	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+ 	u32 max_sclk = 0, max_mclk = 0;
++	u32 high_pixelclock_count = 0;
+ 	int i;
+ 
+ 	if (adev->asic_type == CHIP_HAINAN) {
+@@ -3457,6 +3465,35 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ 		}
+ 	}
+ 
++	/* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz.
++	 * For example, 4K 60Hz and 1080p 144Hz fall into this category.
++	 * Find number of such displays connected.
++	 */
++	for (i = 0; i < adev->mode_info.num_crtc; i++) {
++		if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) ||
++			!adev->mode_info.crtcs[i]->enabled)
++			continue;
++
++		conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector);
++
++		if (conn->pixelclock_for_modeset > 297000)
++			high_pixelclock_count++;
++	}
++
++	/* These are some ad-hoc fixes to some issues observed with SI GPUs.
++	 * They are necessary because we don't have something like dce_calcs
++	 * for these GPUs to calculate bandwidth requirements.
++	 */
++	if (high_pixelclock_count) {
++		/* On Oland, we observe some flickering when two 4K 60Hz
++		 * displays are connected, possibly because voltage is too low.
++		 * Raise the voltage by requiring a higher SCLK.
++		 * (Voltage cannot be adjusted independently without also SCLK.)
++		 */
++		if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND)
++			disable_sclk_switching = true;
++	}
++
+ 	if (rps->vce_active) {
+ 		rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+ 		rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+@@ -5613,14 +5650,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
+ 
+ static int si_disable_ulv(struct amdgpu_device *adev)
+ {
+-	struct si_power_info *si_pi = si_get_pi(adev);
+-	struct si_ulv_param *ulv = &si_pi->ulv;
+-
+-	if (ulv->supported)
+-		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+-			0 : -EINVAL;
++	PPSMC_Result r;
+ 
+-	return 0;
++	r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV);
++	return (r == PPSMC_Result_OK) ? 0 : -EINVAL;
+ }
+ 
+ static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
+@@ -5793,9 +5826,9 @@ static int si_upload_smc_data(struct amdgpu_device *adev)
+ {
+ 	struct amdgpu_crtc *amdgpu_crtc = NULL;
+ 	int i;
+-
+-	if (adev->pm.dpm.new_active_crtc_count == 0)
+-		return 0;
++	u32 crtc_index = 0;
++	u32 mclk_change_block_cp_min = 0;
++	u32 mclk_change_block_cp_max = 0;
+ 
+ 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ 		if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
+@@ -5804,26 +5837,31 @@ static int si_upload_smc_data(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 
+-	if (amdgpu_crtc == NULL)
+-		return 0;
++	/* When a display is plugged in, program these so that the SMC
++	 * performs MCLK switching when it doesn't cause flickering.
++	 * When no display is plugged in, there is no need to restrict
++	 * MCLK switching, so program them to zero.
++	 */
++	if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) {
++		crtc_index = amdgpu_crtc->crtc_id;
+ 
+-	if (amdgpu_crtc->line_time <= 0)
+-		return 0;
++		if (amdgpu_crtc->line_time) {
++			mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time;
++			mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time;
++		}
++	}
+ 
+-	if (si_write_smc_soft_register(adev,
+-				       SI_SMC_SOFT_REGISTER_crtc_index,
+-				       amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
+-		return 0;
++	si_write_smc_soft_register(adev,
++		SI_SMC_SOFT_REGISTER_crtc_index,
++		crtc_index);
+ 
+-	if (si_write_smc_soft_register(adev,
+-				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+-				       amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+-		return 0;
++	si_write_smc_soft_register(adev,
++		SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
++		mclk_change_block_cp_min);
+ 
+-	if (si_write_smc_soft_register(adev,
+-				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+-				       amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+-		return 0;
++	si_write_smc_soft_register(adev,
++		SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
++		mclk_change_block_cp_max);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index 3eb955333c809e..66369b6a023f02 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -101,6 +101,7 @@ config DRM_ITE_IT6505
+ 	select EXTCON
+ 	select CRYPTO
+ 	select CRYPTO_HASH
++	select REGMAP_I2C
+ 	help
+ 	  ITE IT6505 DisplayPort bridge chip driver.
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+index 07035ab77b792e..4597fdb653588f 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -445,7 +445,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
+ static int dpu_encoder_phys_wb_wait_for_commit_done(
+ 		struct dpu_encoder_phys *phys_enc)
+ {
+-	unsigned long ret;
++	int ret;
+ 	struct dpu_encoder_wait_info wait_info;
+ 	struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ 
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+index 5bbea734123bc0..ee04c55175bb85 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+@@ -161,7 +161,7 @@ static int nt35560_set_brightness(struct backlight_device *bl)
+ 		par = 0x00;
+ 		ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 					 &par, 1);
+-		if (ret) {
++		if (ret < 0) {
+ 			dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret);
+ 			return ret;
+ 		}
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index ac77d1246b9453..811265648a5828 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -1408,7 +1408,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+ 			      unsigned block_align, unsigned height_align, unsigned base_align,
+ 			      unsigned *l0_size, unsigned *mipmap_size)
+ {
+-	unsigned offset, i, level;
++	unsigned offset, i;
+ 	unsigned width, height, depth, size;
+ 	unsigned blocksize;
+ 	unsigned nbx, nby;
+@@ -1420,7 +1420,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+ 	w0 = r600_mip_minify(w0, 0);
+ 	h0 = r600_mip_minify(h0, 0);
+ 	d0 = r600_mip_minify(d0, 0);
+-	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
++	for (i = 0, offset = 0; i < nlevels; i++) {
+ 		width = r600_mip_minify(w0, i);
+ 		nbx = r600_fmt_get_nblocksx(format, width);
+ 
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index c887f48756f4be..bbd6f23bce7895 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -394,27 +394,15 @@ static int hidraw_revoke(struct hidraw_list *list)
+ 	return 0;
+ }
+ 
+-static long hidraw_ioctl(struct file *file, unsigned int cmd,
+-							unsigned long arg)
++static long hidraw_fixed_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
++				    void __user *arg)
+ {
+-	struct inode *inode = file_inode(file);
+-	unsigned int minor = iminor(inode);
+-	long ret = 0;
+-	struct hidraw *dev;
+-	struct hidraw_list *list = file->private_data;
+-	void __user *user_arg = (void __user*) arg;
+-
+-	down_read(&minors_rwsem);
+-	dev = hidraw_table[minor];
+-	if (!dev || !dev->exist || hidraw_is_revoked(list)) {
+-		ret = -ENODEV;
+-		goto out;
+-	}
++	struct hid_device *hid = dev->hid;
+ 
+ 	switch (cmd) {
+ 		case HIDIOCGRDESCSIZE:
+-			if (put_user(dev->hid->rsize, (int __user *)arg))
+-				ret = -EFAULT;
++			if (put_user(hid->rsize, (int __user *)arg))
++				return -EFAULT;
+ 			break;
+ 
+ 		case HIDIOCGRDESC:
+@@ -422,113 +410,145 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
+ 				__u32 len;
+ 
+ 				if (get_user(len, (int __user *)arg))
+-					ret = -EFAULT;
+-				else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
+-					ret = -EINVAL;
+-				else if (copy_to_user(user_arg + offsetof(
+-					struct hidraw_report_descriptor,
+-					value[0]),
+-					dev->hid->rdesc,
+-					min(dev->hid->rsize, len)))
+-					ret = -EFAULT;
++					return -EFAULT;
++
++				if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
++					return -EINVAL;
++
++				if (copy_to_user(arg + offsetof(
++				    struct hidraw_report_descriptor,
++				    value[0]),
++				    hid->rdesc,
++				    min(hid->rsize, len)))
++					return -EFAULT;
++
+ 				break;
+ 			}
+ 		case HIDIOCGRAWINFO:
+ 			{
+ 				struct hidraw_devinfo dinfo;
+ 
+-				dinfo.bustype = dev->hid->bus;
+-				dinfo.vendor = dev->hid->vendor;
+-				dinfo.product = dev->hid->product;
+-				if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
+-					ret = -EFAULT;
++				dinfo.bustype = hid->bus;
++				dinfo.vendor = hid->vendor;
++				dinfo.product = hid->product;
++				if (copy_to_user(arg, &dinfo, sizeof(dinfo)))
++					return -EFAULT;
+ 				break;
+ 			}
+ 		case HIDIOCREVOKE:
+ 			{
+-				if (user_arg)
+-					ret = -EINVAL;
+-				else
+-					ret = hidraw_revoke(list);
+-				break;
++				struct hidraw_list *list = file->private_data;
++
++				if (arg)
++					return -EINVAL;
++
++				return hidraw_revoke(list);
+ 			}
+ 		default:
+-			{
+-				struct hid_device *hid = dev->hid;
+-				if (_IOC_TYPE(cmd) != 'H') {
+-					ret = -EINVAL;
+-					break;
+-				}
++			/*
++			 * None of the above ioctls can return -EAGAIN, so
++			 * use it as a marker that we need to check variable
++			 * length ioctls.
++			 */
++			return -EAGAIN;
++	}
+ 
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) {
+-					int len = _IOC_SIZE(cmd);
+-					ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
+-					break;
+-				}
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) {
+-					int len = _IOC_SIZE(cmd);
+-					ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
+-					break;
+-				}
++	return 0;
++}
+ 
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSINPUT(0))) {
+-					int len = _IOC_SIZE(cmd);
+-					ret = hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
+-					break;
+-				}
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGINPUT(0))) {
+-					int len = _IOC_SIZE(cmd);
+-					ret = hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
+-					break;
+-				}
++static long hidraw_rw_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
++					  void __user *user_arg)
++{
++	int len = _IOC_SIZE(cmd);
++
++	switch (cmd & ~IOCSIZE_MASK) {
++	case HIDIOCSFEATURE(0):
++		return hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
++	case HIDIOCGFEATURE(0):
++		return hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
++	case HIDIOCSINPUT(0):
++		return hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
++	case HIDIOCGINPUT(0):
++		return hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
++	case HIDIOCSOUTPUT(0):
++		return hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
++	case HIDIOCGOUTPUT(0):
++		return hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
++	}
+ 
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSOUTPUT(0))) {
+-					int len = _IOC_SIZE(cmd);
+-					ret = hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
+-					break;
+-				}
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGOUTPUT(0))) {
+-					int len = _IOC_SIZE(cmd);
+-					ret = hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
+-					break;
+-				}
++	return -EINVAL;
++}
+ 
+-				/* Begin Read-only ioctls. */
+-				if (_IOC_DIR(cmd) != _IOC_READ) {
+-					ret = -EINVAL;
+-					break;
+-				}
++static long hidraw_ro_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
++					  void __user *user_arg)
++{
++	struct hid_device *hid = dev->hid;
++	int len = _IOC_SIZE(cmd);
++	int field_len;
++
++	switch (cmd & ~IOCSIZE_MASK) {
++	case HIDIOCGRAWNAME(0):
++		field_len = strlen(hid->name) + 1;
++		if (len > field_len)
++			len = field_len;
++		return copy_to_user(user_arg, hid->name, len) ?  -EFAULT : len;
++	case HIDIOCGRAWPHYS(0):
++		field_len = strlen(hid->phys) + 1;
++		if (len > field_len)
++			len = field_len;
++		return copy_to_user(user_arg, hid->phys, len) ?  -EFAULT : len;
++	case HIDIOCGRAWUNIQ(0):
++		field_len = strlen(hid->uniq) + 1;
++		if (len > field_len)
++			len = field_len;
++		return copy_to_user(user_arg, hid->uniq, len) ?  -EFAULT : len;
++	}
+ 
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) {
+-					int len = strlen(hid->name) + 1;
+-					if (len > _IOC_SIZE(cmd))
+-						len = _IOC_SIZE(cmd);
+-					ret = copy_to_user(user_arg, hid->name, len) ?
+-						-EFAULT : len;
+-					break;
+-				}
++	return -EINVAL;
++}
+ 
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) {
+-					int len = strlen(hid->phys) + 1;
+-					if (len > _IOC_SIZE(cmd))
+-						len = _IOC_SIZE(cmd);
+-					ret = copy_to_user(user_arg, hid->phys, len) ?
+-						-EFAULT : len;
+-					break;
+-				}
++static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++	struct inode *inode = file_inode(file);
++	unsigned int minor = iminor(inode);
++	struct hidraw *dev;
++	struct hidraw_list *list = file->private_data;
++	void __user *user_arg = (void __user *)arg;
++	int ret;
+ 
+-				if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
+-					int len = strlen(hid->uniq) + 1;
+-					if (len > _IOC_SIZE(cmd))
+-						len = _IOC_SIZE(cmd);
+-					ret = copy_to_user(user_arg, hid->uniq, len) ?
+-						-EFAULT : len;
+-					break;
+-				}
+-			}
++	down_read(&minors_rwsem);
++	dev = hidraw_table[minor];
++	if (!dev || !dev->exist || hidraw_is_revoked(list)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
++	if (_IOC_TYPE(cmd) != 'H') {
++		ret = -EINVAL;
++		goto out;
++	}
+ 
++	if (_IOC_NR(cmd) > HIDIOCTL_LAST || _IOC_NR(cmd) == 0) {
+ 		ret = -ENOTTY;
++		goto out;
+ 	}
++
++	ret = hidraw_fixed_size_ioctl(file, dev, cmd, user_arg);
++	if (ret != -EAGAIN)
++		goto out;
++
++	switch (_IOC_DIR(cmd)) {
++	case (_IOC_READ | _IOC_WRITE):
++		ret = hidraw_rw_variable_size_ioctl(file, dev, cmd, user_arg);
++		break;
++	case _IOC_READ:
++		ret = hidraw_ro_variable_size_ioctl(file, dev, cmd, user_arg);
++		break;
++	default:
++		/* Any other IOC_DIR is wrong */
++		ret = -EINVAL;
++	}
++
+ out:
+ 	up_read(&minors_rwsem);
+ 	return ret;
+diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
+index c25a54d5b39ad5..0ba9195c9d713e 100644
+--- a/drivers/hwmon/mlxreg-fan.c
++++ b/drivers/hwmon/mlxreg-fan.c
+@@ -113,8 +113,8 @@ struct mlxreg_fan {
+ 	int divider;
+ };
+ 
+-static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+-				    unsigned long state);
++static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
++				     unsigned long state, bool thermal);
+ 
+ static int
+ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+@@ -224,8 +224,9 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ 				 * last thermal state.
+ 				 */
+ 				if (pwm->last_hwmon_state >= pwm->last_thermal_state)
+-					return mlxreg_fan_set_cur_state(pwm->cdev,
+-									pwm->last_hwmon_state);
++					return _mlxreg_fan_set_cur_state(pwm->cdev,
++									 pwm->last_hwmon_state,
++									 false);
+ 				return 0;
+ 			}
+ 			return regmap_write(fan->regmap, pwm->reg, val);
+@@ -357,9 +358,8 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ 	return 0;
+ }
+ 
+-static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+-				    unsigned long state)
+-
++static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
++				     unsigned long state, bool thermal)
+ {
+ 	struct mlxreg_fan_pwm *pwm = cdev->devdata;
+ 	struct mlxreg_fan *fan = pwm->fan;
+@@ -369,7 +369,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ 		return -EINVAL;
+ 
+ 	/* Save thermal state. */
+-	pwm->last_thermal_state = state;
++	if (thermal)
++		pwm->last_thermal_state = state;
+ 
+ 	state = max_t(unsigned long, state, pwm->last_hwmon_state);
+ 	err = regmap_write(fan->regmap, pwm->reg,
+@@ -381,6 +382,13 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ 	return 0;
+ }
+ 
++static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
++				    unsigned long state)
++
++{
++	return _mlxreg_fan_set_cur_state(cdev, state, true);
++}
++
+ static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+ 	.get_max_state	= mlxreg_fan_get_max_state,
+ 	.get_cur_state	= mlxreg_fan_get_cur_state,
+diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
+index 25fd02955c38d6..abfff42b20c931 100644
+--- a/drivers/hwtracing/coresight/coresight-catu.c
++++ b/drivers/hwtracing/coresight/coresight-catu.c
+@@ -521,6 +521,10 @@ static int __catu_probe(struct device *dev, struct resource *res)
+ 	struct coresight_platform_data *pdata = NULL;
+ 	void __iomem *base;
+ 
++	drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
++	if (IS_ERR(drvdata->atclk))
++		return PTR_ERR(drvdata->atclk);
++
+ 	catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
+ 	if (!catu_desc.name)
+ 		return -ENOMEM;
+@@ -668,18 +672,26 @@ static int catu_runtime_suspend(struct device *dev)
+ {
+ 	struct catu_drvdata *drvdata = dev_get_drvdata(dev);
+ 
+-	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
+-		clk_disable_unprepare(drvdata->pclk);
++	clk_disable_unprepare(drvdata->atclk);
++	clk_disable_unprepare(drvdata->pclk);
++
+ 	return 0;
+ }
+ 
+ static int catu_runtime_resume(struct device *dev)
+ {
+ 	struct catu_drvdata *drvdata = dev_get_drvdata(dev);
++	int ret;
+ 
+-	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
+-		clk_prepare_enable(drvdata->pclk);
+-	return 0;
++	ret = clk_prepare_enable(drvdata->pclk);
++	if (ret)
++		return ret;
++
++	ret = clk_prepare_enable(drvdata->atclk);
++	if (ret)
++		clk_disable_unprepare(drvdata->pclk);
++
++	return ret;
+ }
+ #endif
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
+index 755776cd19c5bb..6e6b7aac206dca 100644
+--- a/drivers/hwtracing/coresight/coresight-catu.h
++++ b/drivers/hwtracing/coresight/coresight-catu.h
+@@ -62,6 +62,7 @@
+ 
+ struct catu_drvdata {
+ 	struct clk *pclk;
++	struct clk *atclk;
+ 	void __iomem *base;
+ 	struct coresight_device *csdev;
+ 	int irq;
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index b7941d8abbfe7a..f20d4cab8f1dfb 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -1200,8 +1200,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
+ 		goto out_unlock;
+ 	}
+ 
+-	if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+-	    csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
++	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
++	     csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
++	    sink_ops(csdev)->alloc_buffer) {
+ 		ret = etm_perf_add_symlink_sink(csdev);
+ 
+ 		if (ret) {
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index be8b46f26ddc83..7b9eaeb115d21f 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -481,7 +481,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ 		etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
+ 		etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
+ 	}
+-	etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
++	if (drvdata->numextinsel)
++		etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+ 		etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
+ 		etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
+@@ -1362,6 +1363,7 @@ static void etm4_init_arch_data(void *info)
+ 	etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
+ 	/* NUMEXTIN, bits[8:0] number of external inputs implemented */
+ 	drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
++	drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5);
+ 	/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
+ 	drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
+ 	/* ATBTRIG, bit[22] implementation can support ATB triggers? */
+@@ -1789,7 +1791,9 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 		state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
+ 		state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
+ 	}
+-	state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
++
++	if (drvdata->numextinsel)
++		state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
+ 
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+ 		state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
+@@ -1921,7 +1925,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 		etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
+ 		etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
+ 	}
+-	etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
++	if (drvdata->numextinsel)
++		etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
+ 
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+ 		etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
+@@ -2152,6 +2157,10 @@ static int etm4_probe(struct device *dev)
+ 	if (WARN_ON(!drvdata))
+ 		return -ENOMEM;
+ 
++	drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
++	if (IS_ERR(drvdata->atclk))
++		return PTR_ERR(drvdata->atclk);
++
+ 	if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
+ 		pm_save_enable = coresight_loses_context_with_cpu(dev) ?
+ 			       PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
+@@ -2400,8 +2409,8 @@ static int etm4_runtime_suspend(struct device *dev)
+ {
+ 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+ 
+-	if (drvdata->pclk && !IS_ERR(drvdata->pclk))
+-		clk_disable_unprepare(drvdata->pclk);
++	clk_disable_unprepare(drvdata->atclk);
++	clk_disable_unprepare(drvdata->pclk);
+ 
+ 	return 0;
+ }
+@@ -2409,11 +2418,17 @@ static int etm4_runtime_suspend(struct device *dev)
+ static int etm4_runtime_resume(struct device *dev)
+ {
+ 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
++	int ret;
+ 
+-	if (drvdata->pclk && !IS_ERR(drvdata->pclk))
+-		clk_prepare_enable(drvdata->pclk);
++	ret = clk_prepare_enable(drvdata->pclk);
++	if (ret)
++		return ret;
+ 
+-	return 0;
++	ret = clk_prepare_enable(drvdata->atclk);
++	if (ret)
++		clk_disable_unprepare(drvdata->pclk);
++
++	return ret;
+ }
+ #endif
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index 9e9165f62e81fe..3683966bd06039 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -162,6 +162,7 @@
+ #define TRCIDR4_NUMVMIDC_MASK			GENMASK(31, 28)
+ 
+ #define TRCIDR5_NUMEXTIN_MASK			GENMASK(8, 0)
++#define TRCIDR5_NUMEXTINSEL_MASK               GENMASK(11, 9)
+ #define TRCIDR5_TRACEIDSIZE_MASK		GENMASK(21, 16)
+ #define TRCIDR5_ATBTRIG				BIT(22)
+ #define TRCIDR5_LPOVERRIDE			BIT(23)
+@@ -919,7 +920,8 @@ struct etmv4_save_state {
+ 
+ /**
+  * struct etm4_drvdata - specifics associated to an ETM component
+- * @pclk        APB clock if present, otherwise NULL
++ * @pclk:       APB clock if present, otherwise NULL
++ * @atclk:      Optional clock for the core parts of the ETMv4.
+  * @base:       Memory mapped base address for this component.
+  * @csdev:      Component vitals needed by the framework.
+  * @spinlock:   Only one at a time pls.
+@@ -987,6 +989,7 @@ struct etmv4_save_state {
+  */
+ struct etmv4_drvdata {
+ 	struct clk			*pclk;
++	struct clk			*atclk;
+ 	void __iomem			*base;
+ 	struct coresight_device		*csdev;
+ 	spinlock_t			spinlock;
+@@ -998,6 +1001,7 @@ struct etmv4_drvdata {
+ 	u8				nr_cntr;
+ 	u8				nr_ext_inp;
+ 	u8				numcidc;
++	u8				numextinsel;
+ 	u8				numvmidc;
+ 	u8				nrseqstate;
+ 	u8				nr_event;
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
+index 475fa4bb6813b9..96ef2517dd4393 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
+@@ -480,6 +480,10 @@ static int __tmc_probe(struct device *dev, struct resource *res)
+ 	struct coresight_desc desc = { 0 };
+ 	struct coresight_dev_list *dev_list = NULL;
+ 
++	drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
++	if (IS_ERR(drvdata->atclk))
++		return PTR_ERR(drvdata->atclk);
++
+ 	ret = -ENOMEM;
+ 
+ 	/* Validity for the resource is already checked by the AMBA core */
+@@ -700,18 +704,26 @@ static int tmc_runtime_suspend(struct device *dev)
+ {
+ 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
+ 
+-	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
+-		clk_disable_unprepare(drvdata->pclk);
++	clk_disable_unprepare(drvdata->atclk);
++	clk_disable_unprepare(drvdata->pclk);
++
+ 	return 0;
+ }
+ 
+ static int tmc_runtime_resume(struct device *dev)
+ {
+ 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
++	int ret;
+ 
+-	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
+-		clk_prepare_enable(drvdata->pclk);
+-	return 0;
++	ret = clk_prepare_enable(drvdata->pclk);
++	if (ret)
++		return ret;
++
++	ret = clk_prepare_enable(drvdata->atclk);
++	if (ret)
++		clk_disable_unprepare(drvdata->pclk);
++
++	return ret;
+ }
+ #endif
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
+index 2671926be62a37..2a53acbb5990be 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc.h
++++ b/drivers/hwtracing/coresight/coresight-tmc.h
+@@ -166,6 +166,7 @@ struct etr_buf {
+ 
+ /**
+  * struct tmc_drvdata - specifics associated to an TMC component
++ * @atclk:	optional clock for the core parts of the TMC.
+  * @pclk:	APB clock if present, otherwise NULL
+  * @base:	memory mapped base address for this component.
+  * @csdev:	component vitals needed by the framework.
+@@ -191,6 +192,7 @@ struct etr_buf {
+  * @perf_buf:	PERF buffer for ETR.
+  */
+ struct tmc_drvdata {
++	struct clk		*atclk;
+ 	struct clk		*pclk;
+ 	void __iomem		*base;
+ 	struct coresight_device	*csdev;
+diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
+index bfca103f9f8476..865fd6273e5e46 100644
+--- a/drivers/hwtracing/coresight/coresight-tpda.c
++++ b/drivers/hwtracing/coresight/coresight-tpda.c
+@@ -71,12 +71,15 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
+ 	if (tpdm_has_dsb_dataset(tpdm_data)) {
+ 		rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
+ 				"qcom,dsb-element-bits", &drvdata->dsb_esize);
++		if (rc)
++			goto out;
+ 	}
+ 	if (tpdm_has_cmb_dataset(tpdm_data)) {
+ 		rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
+ 				"qcom,cmb-element-bits", &drvdata->cmb_esize);
+ 	}
+ 
++out:
+ 	if (rc)
+ 		dev_warn_once(&csdev->dev,
+ 			"Failed to read TPDM Element size: %d\n", rc);
+diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
+index 96a32b21366994..d771980a278dcb 100644
+--- a/drivers/hwtracing/coresight/coresight-trbe.c
++++ b/drivers/hwtracing/coresight/coresight-trbe.c
+@@ -22,7 +22,8 @@
+ #include "coresight-self-hosted-trace.h"
+ #include "coresight-trbe.h"
+ 
+-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
++#define PERF_IDX2OFF(idx, buf) \
++	((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
+ 
+ /*
+  * A padding packet that will help the user space tools
+@@ -744,12 +745,12 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
+ 
+ 	buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
+ 	if (!buf)
+-		return ERR_PTR(-ENOMEM);
++		return NULL;
+ 
+ 	pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
+ 	if (!pglist) {
+ 		kfree(buf);
+-		return ERR_PTR(-ENOMEM);
++		return NULL;
+ 	}
+ 
+ 	for (i = 0; i < nr_pages; i++)
+@@ -759,7 +760,7 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
+ 	if (!buf->trbe_base) {
+ 		kfree(pglist);
+ 		kfree(buf);
+-		return ERR_PTR(-ENOMEM);
++		return NULL;
+ 	}
+ 	buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
+ 	buf->trbe_write = buf->trbe_base;
+@@ -1266,7 +1267,7 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
+ 	 * into the device for that purpose.
+ 	 */
+ 	desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL);
+-	if (IS_ERR(desc.pdata))
++	if (!desc.pdata)
+ 		goto cpu_clear;
+ 
+ 	desc.type = CORESIGHT_DEV_TYPE_SINK;
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index ef9bed2f2dccb9..24c0ada72f6a56 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -311,6 +311,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ 
+ exit_probe:
+ 	dw_i2c_plat_pm_cleanup(dev);
++	i2c_dw_prepare_clk(dev, false);
+ exit_reset:
+ 	reset_control_assert(dev->rst);
+ 	return ret;
+@@ -328,9 +329,11 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
+ 	i2c_dw_disable(dev);
+ 
+ 	pm_runtime_dont_use_autosuspend(device);
+-	pm_runtime_put_sync(device);
++	pm_runtime_put_noidle(device);
+ 	dw_i2c_plat_pm_cleanup(dev);
+ 
++	i2c_dw_prepare_clk(dev, false);
++
+ 	i2c_dw_remove_lock_support(dev);
+ 
+ 	reset_control_assert(dev->rst);
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index e0ba653dec2d67..8beef73960c74a 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -1243,6 +1243,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
+ {
+ 	int ret;
+ 	int left_num = num;
++	bool write_then_read_en = false;
+ 	struct mtk_i2c *i2c = i2c_get_adapdata(adap);
+ 
+ 	ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
+@@ -1256,6 +1257,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
+ 		if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) &&
+ 		    msgs[0].addr == msgs[1].addr) {
+ 			i2c->auto_restart = 0;
++			write_then_read_en = true;
+ 		}
+ 	}
+ 
+@@ -1280,12 +1282,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
+ 		else
+ 			i2c->op = I2C_MASTER_WR;
+ 
+-		if (!i2c->auto_restart) {
+-			if (num > 1) {
+-				/* combined two messages into one transaction */
+-				i2c->op = I2C_MASTER_WRRD;
+-				left_num--;
+-			}
++		if (write_then_read_en) {
++			/* combined two messages into one transaction */
++			i2c->op = I2C_MASTER_WRRD;
++			left_num--;
+ 		}
+ 
+ 		/* always use DMA mode. */
+@@ -1293,7 +1293,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
+ 		if (ret < 0)
+ 			goto err_exit;
+ 
+-		msgs++;
++		if (i2c->op == I2C_MASTER_WRRD)
++			msgs += 2;
++		else
++			msgs++;
+ 	}
+ 	/* the return value is number of executed messages */
+ 	ret = num;
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 474a96ebda2265..a1945bf9ef19e9 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -377,6 +377,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ 						SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
+ 	if (ret) {
+ 		dev_err(master->dev, "Timeout when polling for COMPLETE\n");
++		i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+ 		return ret;
+ 	}
+ 
+@@ -438,9 +439,24 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 	 */
+ 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+ 
+-	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+-	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+-	       SVC_I3C_MCTRL_IBIRESP_AUTO,
++	/*
++	 * Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
++	 * instend of using AUTO_IBI.
++	 *
++	 * Using AutoIBI request may cause controller to remain in AutoIBI state when
++	 * there is a glitch on SDA line (high->low->high).
++	 * 1. SDA high->low, raising an interrupt to execute IBI isr.
++	 * 2. SDA low->high.
++	 * 3. IBI isr writes an AutoIBI request.
++	 * 4. The controller will not start AutoIBI process because SDA is not low.
++	 * 5. IBIWON polling times out.
++	 * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
++	 */
++	writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
++	       SVC_I3C_MCTRL_TYPE_I3C |
++	       SVC_I3C_MCTRL_IBIRESP_MANUAL |
++	       SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
++	       SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
+ 	       master->regs + SVC_I3C_MCTRL);
+ 
+ 	/* Wait for IBIWON, should take approximately 100us */
+@@ -460,10 +476,15 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 	switch (ibitype) {
+ 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ 		dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
+-		if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
++		if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
+ 			svc_i3c_master_nack_ibi(master);
+-		else
++		} else {
++			if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
++				svc_i3c_master_ack_ibi(master, true);
++			else
++				svc_i3c_master_ack_ibi(master, false);
+ 			svc_i3c_master_handle_ibi(master, dev);
++		}
+ 		break;
+ 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
+ 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 1155487f7aeac8..85ba80c57d08f7 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -10,6 +10,7 @@
+ #include <linux/mutex.h>
+ #include <linux/property.h>
+ #include <linux/slab.h>
++#include <linux/units.h>
+ 
+ #include <linux/iio/iio.h>
+ #include <linux/iio/iio-opaque.h>
+@@ -602,7 +603,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
+ {
+ 	int scale_type, scale_val, scale_val2;
+ 	int offset_type, offset_val, offset_val2;
+-	s64 raw64 = raw;
++	s64 denominator, raw64 = raw;
+ 
+ 	offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
+ 				       IIO_CHAN_INFO_OFFSET);
+@@ -637,7 +638,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
+ 		 * If no channel scaling is available apply consumer scale to
+ 		 * raw value and return.
+ 		 */
+-		*processed = raw * scale;
++		*processed = raw64 * scale;
+ 		return 0;
+ 	}
+ 
+@@ -646,20 +647,19 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
+ 		*processed = raw64 * scale_val * scale;
+ 		break;
+ 	case IIO_VAL_INT_PLUS_MICRO:
+-		if (scale_val2 < 0)
+-			*processed = -raw64 * scale_val * scale;
+-		else
+-			*processed = raw64 * scale_val * scale;
+-		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
+-				      1000000LL);
+-		break;
+ 	case IIO_VAL_INT_PLUS_NANO:
+-		if (scale_val2 < 0)
+-			*processed = -raw64 * scale_val * scale;
+-		else
+-			*processed = raw64 * scale_val * scale;
+-		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
+-				      1000000000LL);
++		switch (scale_type) {
++		case IIO_VAL_INT_PLUS_MICRO:
++			denominator = MICRO;
++			break;
++		case IIO_VAL_INT_PLUS_NANO:
++			denominator = NANO;
++			break;
++		}
++		*processed = raw64 * scale * abs(scale_val);
++		*processed += div_s64(raw64 * scale * abs(scale_val2), denominator);
++		if (scale_val < 0 || scale_val2 < 0)
++			*processed *= -1;
+ 		break;
+ 	case IIO_VAL_FRACTIONAL:
+ 		*processed = div_s64(raw64 * (s64)scale_val * scale,
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index be0743dac3fff3..929e89841c12a6 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -454,14 +454,10 @@ static int addr_resolve_neigh(const struct dst_entry *dst,
+ {
+ 	int ret = 0;
+ 
+-	if (ndev_flags & IFF_LOOPBACK) {
++	if (ndev_flags & IFF_LOOPBACK)
+ 		memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+-	} else {
+-		if (!(ndev_flags & IFF_NOARP)) {
+-			/* If the device doesn't do ARP internally */
+-			ret = fetch_ha(dst, addr, dst_in, seq);
+-		}
+-	}
++	else
++		ret = fetch_ha(dst, addr, dst_in, seq);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index d45e3909dafe1d..50bb3c43f40bf6 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1032,8 +1032,8 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ 	struct cm_id_private *cm_id_priv;
+ 
+ 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+-	pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+-	       cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
++	pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
++			   cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ }
+ 
+ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index 53571e6b3162ca..66df5bed6a5627 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -1013,6 +1013,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ 	if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
+ 		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
+ 
++	spin_lock_irqsave(&ib_nl_request_lock, flags);
++
+ 	delta = timeout - sa_local_svc_timeout_ms;
+ 	if (delta < 0)
+ 		abs_delta = -delta;
+@@ -1020,7 +1022,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ 		abs_delta = delta;
+ 
+ 	if (delta != 0) {
+-		spin_lock_irqsave(&ib_nl_request_lock, flags);
+ 		sa_local_svc_timeout_ms = timeout;
+ 		list_for_each_entry(query, &ib_nl_request_list, list) {
+ 			if (delta < 0 && abs_delta > query->timeout)
+@@ -1038,9 +1039,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ 		if (delay)
+ 			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
+ 					 (unsigned long)delay);
+-		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+ 	}
+ 
++	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
++
+ settimeout_out:
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 435c456a4fd5b4..f3e58797705d72 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -13,6 +13,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/slab.h>
+ #include <linux/bitmap.h>
++#include <linux/log2.h>
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+ #include <linux/sched/task.h>
+@@ -865,6 +866,51 @@ static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
+ 	resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
+ }
+ 
++/*
++ * Calculate maximum SQ overhead across all QP types.
++ * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
++ * have smaller overhead than the types calculated below,
++ * so they are implicitly included.
++ */
++static u32 mlx5_ib_calc_max_sq_overhead(void)
++{
++	u32 max_overhead_xrc, overhead_ud_lso, a, b;
++
++	/* XRC_INI */
++	max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
++	max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
++	a = sizeof(struct mlx5_wqe_atomic_seg) +
++	    sizeof(struct mlx5_wqe_raddr_seg);
++	b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
++	    sizeof(struct mlx5_mkey_seg) +
++	    MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
++	max_overhead_xrc += max(a, b);
++
++	/* UD with LSO */
++	overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
++	overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
++	overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
++	overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
++
++	return max(max_overhead_xrc, overhead_ud_lso);
++}
++
++static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
++{
++	struct mlx5_core_dev *mdev = dev->mdev;
++	u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
++	u32 max_wqe_size;
++	/* max QP overhead + 1 SGE, no inline, no special features */
++	max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
++		       sizeof(struct mlx5_wqe_data_seg);
++
++	max_wqe_size = roundup_pow_of_two(max_wqe_size);
++
++	max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
++
++	return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
++}
++
+ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 				struct ib_device_attr *props,
+ 				struct ib_udata *uhw)
+@@ -1023,7 +1069,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 	props->max_mr_size	   = ~0ull;
+ 	props->page_size_cap	   = ~(min_page_size - 1);
+ 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
+-	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
++	props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
+ 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
+ 		     sizeof(struct mlx5_wqe_data_seg);
+ 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
+@@ -1767,7 +1813,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
+ }
+ 
+ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
+-				struct mlx5_core_dev *slave)
++				struct mlx5_core_dev *slave,
++				struct mlx5_ib_lb_state *lb_state)
+ {
+ 	int err;
+ 
+@@ -1779,6 +1826,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
+ 	if (err)
+ 		goto out;
+ 
++	lb_state->force_enable = true;
+ 	return 0;
+ 
+ out:
+@@ -1787,16 +1835,22 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
+ }
+ 
+ static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
+-				  struct mlx5_core_dev *slave)
++				  struct mlx5_core_dev *slave,
++				  struct mlx5_ib_lb_state *lb_state)
+ {
+ 	mlx5_nic_vport_update_local_lb(slave, false);
+ 	mlx5_nic_vport_update_local_lb(master, false);
++
++	lb_state->force_enable = false;
+ }
+ 
+ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+ {
+ 	int err = 0;
+ 
++	if (dev->lb.force_enable)
++		return 0;
++
+ 	mutex_lock(&dev->lb.mutex);
+ 	if (td)
+ 		dev->lb.user_td++;
+@@ -1818,6 +1872,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+ 
+ void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+ {
++	if (dev->lb.force_enable)
++		return;
++
+ 	mutex_lock(&dev->lb.mutex);
+ 	if (td)
+ 		dev->lb.user_td--;
+@@ -3475,7 +3532,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ 
+ 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
+ 
+-	mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
++	mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
+ 
+ 	mlx5_core_mp_event_replay(ibdev->mdev,
+ 				  MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+@@ -3572,7 +3629,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ 				  MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ 				  &key);
+ 
+-	err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
++	err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
+ 	if (err)
+ 		goto unbind;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 29bde64ea1eac9..f49cb588a856d5 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -1083,6 +1083,7 @@ struct mlx5_ib_lb_state {
+ 	u32			user_td;
+ 	int			qps;
+ 	bool			enabled;
++	bool			force_enable;
+ };
+ 
+ struct mlx5_ib_pf_eq {
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
+index 80332638d9e3ac..be6cd8ce4d97ec 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.c
++++ b/drivers/infiniband/sw/rxe/rxe_task.c
+@@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task)
+ 		 * yield the cpu and reschedule the task
+ 		 */
+ 		if (!ret) {
+-			task->state = TASK_STATE_IDLE;
+-			resched = 1;
++			if (task->state != TASK_STATE_DRAINING) {
++				task->state = TASK_STATE_IDLE;
++				resched = 1;
++			} else {
++				cont = 1;
++			}
+ 			goto exit;
+ 		}
+ 
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 7ca0297d68a4a7..d0c0cde09f1186 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -773,7 +773,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
+ 	struct siw_wqe *wqe = tx_wqe(qp);
+ 
+ 	unsigned long flags;
+-	int rv = 0;
++	int rv = 0, imm_err = 0;
+ 
+ 	if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
+ 		siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
+@@ -959,9 +959,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
+ 	 * Send directly if SQ processing is not in progress.
+ 	 * Eventual immediate errors (rv < 0) do not affect the involved
+ 	 * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
+-	 * processing, if new work is already pending. But rv must be passed
+-	 * to caller.
++	 * processing, if new work is already pending. But rv and pointer
++	 * to failed work request must be passed to caller.
+ 	 */
++	if (unlikely(rv < 0)) {
++		/*
++		 * Immediate error
++		 */
++		siw_dbg_qp(qp, "Immediate error %d\n", rv);
++		imm_err = rv;
++		*bad_wr = wr;
++	}
+ 	if (wqe->wr_status != SIW_WR_IDLE) {
+ 		spin_unlock_irqrestore(&qp->sq_lock, flags);
+ 		goto skip_direct_sending;
+@@ -986,15 +994,10 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
+ 
+ 	up_read(&qp->state_lock);
+ 
+-	if (rv >= 0)
+-		return 0;
+-	/*
+-	 * Immediate error
+-	 */
+-	siw_dbg_qp(qp, "error %d\n", rv);
++	if (unlikely(imm_err))
++		return imm_err;
+ 
+-	*bad_wr = wr;
+-	return rv;
++	return (rv >= 0) ? 0 : rv;
+ }
+ 
+ /*
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index 2c51ea9d01d777..13336a2fd49c8a 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -775,6 +775,7 @@ static int uinput_ff_upload_to_user(char __user *buffer,
+ 	if (in_compat_syscall()) {
+ 		struct uinput_ff_upload_compat ff_up_compat;
+ 
++		memset(&ff_up_compat, 0, sizeof(ff_up_compat));
+ 		ff_up_compat.request_id = ff_up->request_id;
+ 		ff_up_compat.retval = ff_up->retval;
+ 		/*
+diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
+index 3ddabc5a2c999b..d7496d47eabe8e 100644
+--- a/drivers/input/touchscreen/atmel_mxt_ts.c
++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
+@@ -3319,7 +3319,7 @@ static int mxt_probe(struct i2c_client *client)
+ 	if (data->reset_gpio) {
+ 		/* Wait a while and then de-assert the RESET GPIO line */
+ 		msleep(MXT_RESET_GPIO_TIME);
+-		gpiod_set_value(data->reset_gpio, 0);
++		gpiod_set_value_cansleep(data->reset_gpio, 0);
+ 		msleep(MXT_RESET_INVALID_CHG);
+ 	}
+ 
+diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
+index affbf4a1558dee..5aa7f46a420b58 100644
+--- a/drivers/iommu/intel/debugfs.c
++++ b/drivers/iommu/intel/debugfs.c
+@@ -435,8 +435,21 @@ static int domain_translation_struct_show(struct seq_file *m,
+ 			}
+ 			pgd &= VTD_PAGE_MASK;
+ 		} else { /* legacy mode */
+-			pgd = context->lo & VTD_PAGE_MASK;
+-			agaw = context->hi & 7;
++			u8 tt = (u8)(context->lo & GENMASK_ULL(3, 2)) >> 2;
++
++			/*
++			 * According to Translation Type(TT),
++			 * get the page table pointer(SSPTPTR).
++			 */
++			switch (tt) {
++			case CONTEXT_TT_MULTI_LEVEL:
++			case CONTEXT_TT_DEV_IOTLB:
++				pgd = context->lo & VTD_PAGE_MASK;
++				agaw = context->hi & 7;
++				break;
++			default:
++				goto iommu_unlock;
++			}
+ 		}
+ 
+ 		seq_printf(m, "Device %04x:%02x:%02x.%x ",
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index f521155fb793b8..df24a62e8ca40d 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -541,7 +541,8 @@ enum {
+ #define pasid_supported(iommu)	(sm_supported(iommu) &&			\
+ 				 ecap_pasid((iommu)->ecap))
+ #define ssads_supported(iommu) (sm_supported(iommu) &&                 \
+-				ecap_slads((iommu)->ecap))
++				ecap_slads((iommu)->ecap) &&           \
++				ecap_smpwc(iommu->ecap))
+ #define nested_supported(iommu)	(sm_supported(iommu) &&			\
+ 				 ecap_nest((iommu)->ecap))
+ 
+diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
+index 07a83bb2dfdf62..bb00097b1ae59b 100644
+--- a/drivers/leds/flash/leds-qcom-flash.c
++++ b/drivers/leds/flash/leds-qcom-flash.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/bitfield.h>
+@@ -114,36 +114,39 @@ enum {
+ 	REG_THERM_THRSH1,
+ 	REG_THERM_THRSH2,
+ 	REG_THERM_THRSH3,
++	REG_TORCH_CLAMP,
+ 	REG_MAX_COUNT,
+ };
+ 
+ static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
+-	REG_FIELD(0x08, 0, 7),			/* status1	*/
+-	REG_FIELD(0x09, 0, 7),                  /* status2	*/
+-	REG_FIELD(0x0a, 0, 7),                  /* status3	*/
+-	REG_FIELD_ID(0x40, 0, 7, 3, 1),         /* chan_timer	*/
+-	REG_FIELD_ID(0x43, 0, 6, 3, 1),         /* itarget	*/
+-	REG_FIELD(0x46, 7, 7),                  /* module_en	*/
+-	REG_FIELD(0x47, 0, 5),                  /* iresolution	*/
+-	REG_FIELD_ID(0x49, 0, 2, 3, 1),         /* chan_strobe	*/
+-	REG_FIELD(0x4c, 0, 2),                  /* chan_en	*/
+-	REG_FIELD(0x56, 0, 2),			/* therm_thrsh1 */
+-	REG_FIELD(0x57, 0, 2),			/* therm_thrsh2 */
+-	REG_FIELD(0x58, 0, 2),			/* therm_thrsh3 */
++	[REG_STATUS1]		= REG_FIELD(0x08, 0, 7),
++	[REG_STATUS2]		= REG_FIELD(0x09, 0, 7),
++	[REG_STATUS3]		= REG_FIELD(0x0a, 0, 7),
++	[REG_CHAN_TIMER]	= REG_FIELD_ID(0x40, 0, 7, 3, 1),
++	[REG_ITARGET]		= REG_FIELD_ID(0x43, 0, 6, 3, 1),
++	[REG_MODULE_EN]		= REG_FIELD(0x46, 7, 7),
++	[REG_IRESOLUTION]	= REG_FIELD(0x47, 0, 5),
++	[REG_CHAN_STROBE]	= REG_FIELD_ID(0x49, 0, 2, 3, 1),
++	[REG_CHAN_EN]		= REG_FIELD(0x4c, 0, 2),
++	[REG_THERM_THRSH1]	= REG_FIELD(0x56, 0, 2),
++	[REG_THERM_THRSH2]	= REG_FIELD(0x57, 0, 2),
++	[REG_THERM_THRSH3]	= REG_FIELD(0x58, 0, 2),
++	[REG_TORCH_CLAMP]	= REG_FIELD(0xec, 0, 6),
+ };
+ 
+ static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
+-	REG_FIELD(0x06, 0, 7),			/* status1	*/
+-	REG_FIELD(0x07, 0, 6),			/* status2	*/
+-	REG_FIELD(0x09, 0, 7),			/* status3	*/
+-	REG_FIELD_ID(0x3e, 0, 7, 4, 1),		/* chan_timer	*/
+-	REG_FIELD_ID(0x42, 0, 6, 4, 1),		/* itarget	*/
+-	REG_FIELD(0x46, 7, 7),			/* module_en	*/
+-	REG_FIELD(0x49, 0, 3),			/* iresolution	*/
+-	REG_FIELD_ID(0x4a, 0, 6, 4, 1),		/* chan_strobe	*/
+-	REG_FIELD(0x4e, 0, 3),			/* chan_en	*/
+-	REG_FIELD(0x7a, 0, 2),			/* therm_thrsh1 */
+-	REG_FIELD(0x78, 0, 2),			/* therm_thrsh2 */
++	[REG_STATUS1]		= REG_FIELD(0x06, 0, 7),
++	[REG_STATUS2]		= REG_FIELD(0x07, 0, 6),
++	[REG_STATUS3]		= REG_FIELD(0x09, 0, 7),
++	[REG_CHAN_TIMER]	= REG_FIELD_ID(0x3e, 0, 7, 4, 1),
++	[REG_ITARGET]		= REG_FIELD_ID(0x42, 0, 6, 4, 1),
++	[REG_MODULE_EN]		= REG_FIELD(0x46, 7, 7),
++	[REG_IRESOLUTION]	= REG_FIELD(0x49, 0, 3),
++	[REG_CHAN_STROBE]	= REG_FIELD_ID(0x4a, 0, 6, 4, 1),
++	[REG_CHAN_EN]		= REG_FIELD(0x4e, 0, 3),
++	[REG_THERM_THRSH1]	= REG_FIELD(0x7a, 0, 2),
++	[REG_THERM_THRSH2]	= REG_FIELD(0x78, 0, 2),
++	[REG_TORCH_CLAMP]	= REG_FIELD(0xed, 0, 6),
+ };
+ 
+ struct qcom_flash_data {
+@@ -156,6 +159,7 @@ struct qcom_flash_data {
+ 	u8			max_channels;
+ 	u8			chan_en_bits;
+ 	u8			revision;
++	u8			torch_clamp;
+ };
+ 
+ struct qcom_flash_led {
+@@ -702,6 +706,7 @@ static int qcom_flash_register_led_device(struct device *dev,
+ 	u32 current_ua, timeout_us;
+ 	u32 channels[4];
+ 	int i, rc, count;
++	u8 torch_clamp;
+ 
+ 	count = fwnode_property_count_u32(node, "led-sources");
+ 	if (count <= 0) {
+@@ -751,6 +756,12 @@ static int qcom_flash_register_led_device(struct device *dev,
+ 	current_ua = min_t(u32, current_ua, TORCH_CURRENT_MAX_UA * led->chan_count);
+ 	led->max_torch_current_ma = current_ua / UA_PER_MA;
+ 
++	torch_clamp = (current_ua / led->chan_count) / TORCH_IRES_UA;
++	if (torch_clamp != 0)
++		torch_clamp--;
++
++	flash_data->torch_clamp = max_t(u8, flash_data->torch_clamp, torch_clamp);
++
+ 	if (fwnode_property_present(node, "flash-max-microamp")) {
+ 		flash->led_cdev.flags |= LED_DEV_CAP_FLASH;
+ 
+@@ -918,8 +929,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
+ 		flash_data->leds_count++;
+ 	}
+ 
+-	return 0;
+-
++	return regmap_field_write(flash_data->r_fields[REG_TORCH_CLAMP], flash_data->torch_clamp);
+ release:
+ 	fwnode_handle_put(child);
+ 	while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count)
+diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
+index e71456a56ab8da..fd447eb7eb15e2 100644
+--- a/drivers/leds/leds-lp55xx-common.c
++++ b/drivers/leds/leds-lp55xx-common.c
+@@ -212,7 +212,7 @@ int lp55xx_update_program_memory(struct lp55xx_chip *chip,
+ 	 * For LED chip that support page, PAGE is already set in load_engine.
+ 	 */
+ 	if (!cfg->pages_per_engine)
+-		start_addr += LP55xx_BYTES_PER_PAGE * idx;
++		start_addr += LP55xx_BYTES_PER_PAGE * (idx - 1);
+ 
+ 	for (page = 0; page < program_length / LP55xx_BYTES_PER_PAGE; page++) {
+ 		/* Write to the next page each 32 bytes (if supported) */
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index f3a3f2ef632261..391c1df19a7647 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -162,6 +162,7 @@ struct mapped_device {
+ #define DMF_SUSPENDED_INTERNALLY 7
+ #define DMF_POST_SUSPENDING 8
+ #define DMF_EMULATE_ZONE_APPEND 9
++#define DMF_QUEUE_STOPPED 10
+ 
+ void disable_discard(struct mapped_device *md);
+ void disable_write_zeroes(struct mapped_device *md);
+diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c
+index 12f954a0c5325d..afb062e1f1fb48 100644
+--- a/drivers/md/dm-vdo/indexer/volume-index.c
++++ b/drivers/md/dm-vdo/indexer/volume-index.c
+@@ -836,7 +836,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
+ 				    "%zu bytes decoded of %zu expected", offset,
+ 				    sizeof(buffer));
+ 		if (result != VDO_SUCCESS)
+-			result = UDS_CORRUPT_DATA;
++			return UDS_CORRUPT_DATA;
+ 
+ 		if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
+ 			return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+@@ -928,7 +928,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index,
+ 				    "%zu bytes decoded of %zu expected", offset,
+ 				    sizeof(buffer));
+ 		if (result != VDO_SUCCESS)
+-			result = UDS_CORRUPT_DATA;
++			return UDS_CORRUPT_DATA;
+ 
+ 		if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
+ 			return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index a7deeda59a55a7..fd84a126f63fb7 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2918,7 +2918,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+ {
+ 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
+ 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
+-	int r;
++	int r = 0;
+ 
+ 	lockdep_assert_held(&md->suspend_lock);
+ 
+@@ -2970,8 +2970,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+ 	 * Stop md->queue before flushing md->wq in case request-based
+ 	 * dm defers requests to md->wq from md->queue.
+ 	 */
+-	if (dm_request_based(md))
++	if (map && dm_request_based(md)) {
+ 		dm_stop_queue(md->queue);
++		set_bit(DMF_QUEUE_STOPPED, &md->flags);
++	}
+ 
+ 	flush_workqueue(md->wq);
+ 
+@@ -2980,7 +2982,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+ 	 * We call dm_wait_for_completion to wait for all existing requests
+ 	 * to finish.
+ 	 */
+-	r = dm_wait_for_completion(md, task_state);
++	if (map)
++		r = dm_wait_for_completion(md, task_state);
+ 	if (!r)
+ 		set_bit(dmf_suspended_flag, &md->flags);
+ 
+@@ -2993,7 +2996,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+ 	if (r < 0) {
+ 		dm_queue_flush(md);
+ 
+-		if (dm_request_based(md))
++		if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
+ 			dm_start_queue(md->queue);
+ 
+ 		unlock_fs(md);
+@@ -3077,7 +3080,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
+ 	 * so that mapping of targets can work correctly.
+ 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
+ 	 */
+-	if (dm_request_based(md))
++	if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
+ 		dm_start_queue(md->queue);
+ 
+ 	unlock_fs(md);
+diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
+index b7ca39f63dba84..6dfc912168510f 100644
+--- a/drivers/media/i2c/rj54n1cb0c.c
++++ b/drivers/media/i2c/rj54n1cb0c.c
+@@ -1329,10 +1329,13 @@ static int rj54n1_probe(struct i2c_client *client)
+ 			V4L2_CID_GAIN, 0, 127, 1, 66);
+ 	v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ 			V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+-	rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
+-	if (rj54n1->hdl.error)
+-		return rj54n1->hdl.error;
+ 
++	if (rj54n1->hdl.error) {
++		ret = rj54n1->hdl.error;
++		goto err_free_ctrl;
++	}
++
++	rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
+ 	rj54n1->clk_div		= clk_div;
+ 	rj54n1->rect.left	= RJ54N1_COLUMN_SKIP;
+ 	rj54n1->rect.top	= RJ54N1_ROW_SKIP;
+diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
+index 1cd990468d3de9..d05e222b392156 100644
+--- a/drivers/media/pci/zoran/zoran.h
++++ b/drivers/media/pci/zoran/zoran.h
+@@ -154,12 +154,6 @@ struct zoran_jpg_settings {
+ 
+ struct zoran;
+ 
+-/* zoran_fh contains per-open() settings */
+-struct zoran_fh {
+-	struct v4l2_fh fh;
+-	struct zoran *zr;
+-};
+-
+ struct card_info {
+ 	enum card_type type;
+ 	char name[32];
+diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
+index 5c05e64c71a905..80377992a6073f 100644
+--- a/drivers/media/pci/zoran/zoran_driver.c
++++ b/drivers/media/pci/zoran/zoran_driver.c
+@@ -511,12 +511,11 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
+ 			       struct v4l2_format *fmt)
+ {
+ 	struct zoran *zr = video_drvdata(file);
+-	struct zoran_fh *fh = __fh;
+ 	int i;
+ 	int res = 0;
+ 
+ 	if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
+-		return zoran_s_fmt_vid_out(file, fh, fmt);
++		return zoran_s_fmt_vid_out(file, __fh, fmt);
+ 
+ 	for (i = 0; i < NUM_FORMATS; i++)
+ 		if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc)
+diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
+index 0533d4a083d249..a078f1107300ee 100644
+--- a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
++++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
+@@ -239,7 +239,7 @@ static int delta_mjpeg_ipc_open(struct delta_ctx *pctx)
+ 	return 0;
+ }
+ 
+-static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
++static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, dma_addr_t pstart, dma_addr_t pend)
+ {
+ 	struct delta_dev *delta = pctx->dev;
+ 	struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+@@ -256,8 +256,8 @@ static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
+ 
+ 	memset(params, 0, sizeof(*params));
+ 
+-	params->picture_start_addr_p = (u32)(au->paddr);
+-	params->picture_end_addr_p = (u32)(au->paddr + au->size - 1);
++	params->picture_start_addr_p = pstart;
++	params->picture_end_addr_p = pend;
+ 
+ 	/*
+ 	 * !WARNING!
+@@ -374,12 +374,14 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
+ 	struct delta_dev *delta = pctx->dev;
+ 	struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ 	int ret;
+-	struct delta_au au = *pau;
++	void *au_vaddr = pau->vaddr;
++	dma_addr_t au_dma = pau->paddr;
++	size_t au_size = pau->size;
+ 	unsigned int data_offset = 0;
+ 	struct mjpeg_header *header = &ctx->header_struct;
+ 
+ 	if (!ctx->header) {
+-		ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
++		ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
+ 					      header, &data_offset);
+ 		if (ret) {
+ 			pctx->stream_errors++;
+@@ -405,17 +407,17 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
+ 			goto err;
+ 	}
+ 
+-	ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
++	ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
+ 				      ctx->header, &data_offset);
+ 	if (ret) {
+ 		pctx->stream_errors++;
+ 		goto err;
+ 	}
+ 
+-	au.paddr += data_offset;
+-	au.vaddr += data_offset;
++	au_dma += data_offset;
++	au_vaddr += data_offset;
+ 
+-	ret = delta_mjpeg_ipc_decode(pctx, &au);
++	ret = delta_mjpeg_ipc_decode(pctx, au_dma, au_dma + au_size - 1);
+ 	if (ret)
+ 		goto err;
+ 
+diff --git a/drivers/mfd/rz-mtu3.c b/drivers/mfd/rz-mtu3.c
+index f3dac4a29a8324..9cdfef610398f3 100644
+--- a/drivers/mfd/rz-mtu3.c
++++ b/drivers/mfd/rz-mtu3.c
+@@ -32,7 +32,7 @@ static const unsigned long rz_mtu3_8bit_ch_reg_offs[][13] = {
+ 	[RZ_MTU3_CHAN_2] = MTU_8BIT_CH_1_2(0x204, 0x092, 0x205, 0x200, 0x20c, 0x201, 0x202),
+ 	[RZ_MTU3_CHAN_3] = MTU_8BIT_CH_3_4_6_7(0x008, 0x093, 0x02c, 0x000, 0x04c, 0x002, 0x004, 0x005, 0x038),
+ 	[RZ_MTU3_CHAN_4] = MTU_8BIT_CH_3_4_6_7(0x009, 0x094, 0x02d, 0x001, 0x04d, 0x003, 0x006, 0x007, 0x039),
+-	[RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x1eb, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
++	[RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x895, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
+ 	[RZ_MTU3_CHAN_6] = MTU_8BIT_CH_3_4_6_7(0x808, 0x893, 0x82c, 0x800, 0x84c, 0x802, 0x804, 0x805, 0x838),
+ 	[RZ_MTU3_CHAN_7] = MTU_8BIT_CH_3_4_6_7(0x809, 0x894, 0x82d, 0x801, 0x84d, 0x803, 0x806, 0x807, 0x839),
+ 	[RZ_MTU3_CHAN_8] = MTU_8BIT_CH_8(0x404, 0x098, 0x400, 0x406, 0x401, 0x402, 0x403)
+diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
+index d34d58ce46db2a..5f26ef733fd0d0 100644
+--- a/drivers/mfd/vexpress-sysreg.c
++++ b/drivers/mfd/vexpress-sysreg.c
+@@ -90,6 +90,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
+ 	struct resource *mem;
+ 	void __iomem *base;
+ 	struct gpio_chip *mmc_gpio_chip;
++	int ret;
+ 
+ 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	if (!mem)
+@@ -110,7 +111,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
+ 	bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
+ 			NULL, NULL, NULL, NULL, 0);
+ 	mmc_gpio_chip->ngpio = 2;
+-	devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
++
++	ret = devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
++	if (ret)
++		return ret;
+ 
+ 	return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ 			vexpress_sysreg_cells,
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index e567a36275afc5..9d8e51351ff88b 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -323,11 +323,11 @@ static void fastrpc_free_map(struct kref *ref)
+ 
+ 			perm.vmid = QCOM_SCM_VMID_HLOS;
+ 			perm.perm = QCOM_SCM_PERM_RWX;
+-			err = qcom_scm_assign_mem(map->phys, map->size,
++			err = qcom_scm_assign_mem(map->phys, map->len,
+ 				&src_perms, &perm, 1);
+ 			if (err) {
+ 				dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
+-						map->phys, map->size, err);
++						map->phys, map->len, err);
+ 				return;
+ 			}
+ 		}
+@@ -363,26 +363,21 @@ static int fastrpc_map_get(struct fastrpc_map *map)
+ 
+ 
+ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
+-			    struct fastrpc_map **ppmap, bool take_ref)
++			    struct fastrpc_map **ppmap)
+ {
+-	struct fastrpc_session_ctx *sess = fl->sctx;
+ 	struct fastrpc_map *map = NULL;
++	struct dma_buf *buf;
+ 	int ret = -ENOENT;
+ 
++	buf = dma_buf_get(fd);
++	if (IS_ERR(buf))
++		return PTR_ERR(buf);
++
+ 	spin_lock(&fl->lock);
+ 	list_for_each_entry(map, &fl->maps, node) {
+-		if (map->fd != fd)
++		if (map->fd != fd || map->buf != buf)
+ 			continue;
+ 
+-		if (take_ref) {
+-			ret = fastrpc_map_get(map);
+-			if (ret) {
+-				dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
+-					__func__, fd, ret);
+-				break;
+-			}
+-		}
+-
+ 		*ppmap = map;
+ 		ret = 0;
+ 		break;
+@@ -752,16 +747,14 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = {
+ 	.release = fastrpc_release,
+ };
+ 
+-static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
++static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
+ 			      u64 len, u32 attr, struct fastrpc_map **ppmap)
+ {
+ 	struct fastrpc_session_ctx *sess = fl->sctx;
+ 	struct fastrpc_map *map = NULL;
+ 	struct sg_table *table;
+-	int err = 0;
+-
+-	if (!fastrpc_map_lookup(fl, fd, ppmap, true))
+-		return 0;
++	struct scatterlist *sgl = NULL;
++	int err = 0, sgl_index = 0;
+ 
+ 	map = kzalloc(sizeof(*map), GFP_KERNEL);
+ 	if (!map)
+@@ -798,7 +791,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ 		map->phys = sg_dma_address(map->table->sgl);
+ 		map->phys += ((u64)fl->sctx->sid << 32);
+ 	}
+-	map->size = len;
++	for_each_sg(map->table->sgl, sgl, map->table->nents,
++		sgl_index)
++		map->size += sg_dma_len(sgl);
++	if (len > map->size) {
++		dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n",
++				len, map->size);
++		err = -EINVAL;
++		goto map_err;
++	}
+ 	map->va = sg_virt(map->table->sgl);
+ 	map->len = len;
+ 
+@@ -815,10 +816,10 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ 		dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
+ 		dst_perms[1].perm = QCOM_SCM_PERM_RWX;
+ 		map->attr = attr;
+-		err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2);
++		err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2);
+ 		if (err) {
+ 			dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
+-					map->phys, map->size, err);
++					map->phys, map->len, err);
+ 			goto map_err;
+ 		}
+ 	}
+@@ -839,6 +840,24 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ 	return err;
+ }
+ 
++static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
++			      u64 len, u32 attr, struct fastrpc_map **ppmap)
++{
++	struct fastrpc_session_ctx *sess = fl->sctx;
++	int err = 0;
++
++	if (!fastrpc_map_lookup(fl, fd, ppmap)) {
++		if (!fastrpc_map_get(*ppmap))
++			return 0;
++		dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n",
++			__func__, fd);
++	}
++
++	err = fastrpc_map_attach(fl, fd, len, attr, ppmap);
++
++	return err;
++}
++
+ /*
+  * Fastrpc payload buffer with metadata looks like:
+  *
+@@ -911,8 +930,12 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
+ 		    ctx->args[i].length == 0)
+ 			continue;
+ 
+-		err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
+-			 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
++		if (i < ctx->nbufs)
++			err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
++				 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
++		else
++			err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd,
++				 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
+ 		if (err) {
+ 			dev_err(dev, "Error Creating map %d\n", err);
+ 			return -EINVAL;
+@@ -1071,6 +1094,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
+ 	struct fastrpc_phy_page *pages;
+ 	u64 *fdlist;
+ 	int i, inbufs, outbufs, handles;
++	int ret = 0;
+ 
+ 	inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
+ 	outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
+@@ -1086,23 +1110,26 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
+ 			u64 len = rpra[i].buf.len;
+ 
+ 			if (!kernel) {
+-				if (copy_to_user((void __user *)dst, src, len))
+-					return -EFAULT;
++				if (copy_to_user((void __user *)dst, src, len)) {
++					ret = -EFAULT;
++					goto cleanup_fdlist;
++				}
+ 			} else {
+ 				memcpy(dst, src, len);
+ 			}
+ 		}
+ 	}
+ 
++cleanup_fdlist:
+ 	/* Clean up fdlist which is updated by DSP */
+ 	for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
+ 		if (!fdlist[i])
+ 			break;
+-		if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
++		if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
+ 			fastrpc_map_put(mmap);
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
+@@ -2044,7 +2071,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
+ 	args[0].length = sizeof(req_msg);
+ 
+ 	pages.addr = map->phys;
+-	pages.size = map->size;
++	pages.size = map->len;
+ 
+ 	args[1].ptr = (u64) (uintptr_t) &pages;
+ 	args[1].length = sizeof(pages);
+@@ -2059,7 +2086,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
+ 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
+ 	if (err) {
+ 		dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
+-			req.fd, req.vaddrin, map->size);
++			req.fd, req.vaddrin, map->len);
+ 		goto err_invoke;
+ 	}
+ 
+@@ -2072,7 +2099,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
+ 	if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
+ 		/* unmap the memory and release the buffer */
+ 		req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
+-		req_unmap.length = map->size;
++		req_unmap.length = map->len;
+ 		fastrpc_req_mem_unmap_impl(fl, &req_unmap);
+ 		return -EFAULT;
+ 	}
+diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
+index 500b1feaf1f6f5..fd7d5cd50d3966 100644
+--- a/drivers/misc/genwqe/card_ddcb.c
++++ b/drivers/misc/genwqe/card_ddcb.c
+@@ -923,7 +923,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
+ 	}
+ 	if (cmd->asv_length > DDCB_ASV_LENGTH) {
+ 		dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
+-			__func__, cmd->asiv_length);
++			__func__, cmd->asv_length);
+ 		return -EINVAL;
+ 	}
+ 	rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 1d08009f2bd83f..08b8276e1da93d 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2876,15 +2876,15 @@ static int mmc_route_rpmb_frames(struct device *dev, u8 *req,
+ 		return -ENOMEM;
+ 
+ 	if (write) {
+-		struct rpmb_frame *frm = (struct rpmb_frame *)resp;
++		struct rpmb_frame *resp_frm = (struct rpmb_frame *)resp;
+ 
+ 		/* Send write request frame(s) */
+ 		set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK,
+ 			  1 | MMC_CMD23_ARG_REL_WR, req, req_len);
+ 
+ 		/* Send result request frame */
+-		memset(frm, 0, sizeof(*frm));
+-		frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
++		memset(resp_frm, 0, sizeof(*resp_frm));
++		resp_frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
+ 		set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp,
+ 			  resp_len);
+ 
+diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
+index 5b02119d8ba23f..543a0be9dc645e 100644
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -1858,7 +1858,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
+ 
+ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
+ {
+-	struct device_node *np, *nand_np;
++	struct device_node *np;
+ 	struct device *dev = nc->dev;
+ 	int ret, reg_cells;
+ 	u32 val;
+@@ -1885,7 +1885,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
+ 
+ 	reg_cells += val;
+ 
+-	for_each_child_of_node(np, nand_np) {
++	for_each_child_of_node_scoped(np, nand_np) {
+ 		struct atmel_nand *nand;
+ 
+ 		nand = atmel_nand_create(nc, nand_np, reg_cells);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index 60fb35ec4b15ac..0b2e257b591f03 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -869,7 +869,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
+ 
+ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
+ {
+-	return ENA_HASH_KEY_SIZE;
++	struct ena_adapter *adapter = netdev_priv(netdev);
++	struct ena_rss *rss = &adapter->ena_dev->rss;
++
++	return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
+ }
+ 
+ static int ena_indirection_table_set(struct ena_adapter *adapter,
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index 2c1b551e144231..92856cf387c76f 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -953,15 +953,18 @@ receive_packet (struct net_device *dev)
+ 		} else {
+ 			struct sk_buff *skb;
+ 
++			skb = NULL;
+ 			/* Small skbuffs for short packets */
+-			if (pkt_len > copy_thresh) {
++			if (pkt_len <= copy_thresh)
++				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
++			if (!skb) {
+ 				dma_unmap_single(&np->pdev->dev,
+ 						 desc_to_dma(desc),
+ 						 np->rx_buf_sz,
+ 						 DMA_FROM_DEVICE);
+ 				skb_put (skb = np->rx_skbuff[entry], pkt_len);
+ 				np->rx_skbuff[entry] = NULL;
+-			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
++			} else {
+ 				dma_sync_single_for_cpu(&np->pdev->dev,
+ 							desc_to_dma(desc),
+ 							np->rx_buf_sz,
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 4086a6ef352e59..087a3077d54812 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3216,18 +3216,14 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
+ 		/* get the Rx desc from Rx queue based on 'next_to_clean' */
+ 		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
+ 
+-		/* This memory barrier is needed to keep us from reading
+-		 * any other fields out of the rx_desc
+-		 */
+-		dma_rmb();
+-
+ 		/* if the descriptor isn't done, no work yet to do */
+ 		gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ 				       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
+-
+ 		if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
+ 			break;
+ 
++		dma_rmb();
++
+ 		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
+ 				  rx_desc->rxdid_ucast);
+ 		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+index f27a8cf3816db3..d1f374da009817 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+@@ -727,9 +727,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
+ 		/* If post failed clear the only buffer we supplied */
+ 		if (post_err) {
+ 			if (dma_mem)
+-				dmam_free_coherent(&adapter->pdev->dev,
+-						   dma_mem->size, dma_mem->va,
+-						   dma_mem->pa);
++				dma_free_coherent(&adapter->pdev->dev,
++						  dma_mem->size, dma_mem->va,
++						  dma_mem->pa);
+ 			break;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 5bb4940da59d48..b51c006277598a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -289,6 +289,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
+ 			return;
+ 		}
+ 		cond_resched();
++		if (mlx5_cmd_is_down(dev)) {
++			ent->ret = -ENXIO;
++			return;
++		}
+ 	} while (time_before(jiffies, poll_end));
+ 
+ 	ent->ret = -ETIMEDOUT;
+@@ -1066,7 +1070,7 @@ static void cmd_work_handler(struct work_struct *work)
+ 		poll_timeout(ent);
+ 		/* make sure we read the descriptor after ownership is SW */
+ 		rmb();
+-		mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
++		mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+index 66d276a1be836a..f4a19ffbb641c0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+@@ -66,23 +66,11 @@ struct mlx5e_port_buffer {
+ 	struct mlx5e_bufferx_reg  buffer[MLX5E_MAX_NETWORK_BUFFER];
+ };
+ 
+-#ifdef CONFIG_MLX5_CORE_EN_DCB
+ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 				    u32 change, unsigned int mtu,
+ 				    struct ieee_pfc *pfc,
+ 				    u32 *buffer_size,
+ 				    u8 *prio2buffer);
+-#else
+-static inline int
+-mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+-				u32 change, unsigned int mtu,
+-				void *pfc,
+-				u32 *buffer_size,
+-				u8 *prio2buffer)
+-{
+-	return 0;
+-}
+-#endif
+ 
+ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
+ 			    struct mlx5e_port_buffer *port_buffer);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index de2327ffb0f788..4a2f58a9d70660 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -47,7 +47,6 @@
+ #include "en.h"
+ #include "en/dim.h"
+ #include "en/txrx.h"
+-#include "en/port_buffer.h"
+ #include "en_tc.h"
+ #include "en_rep.h"
+ #include "en_accel/ipsec.h"
+@@ -2918,11 +2917,9 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
+ 	struct mlx5e_params *params = &priv->channels.params;
+ 	struct net_device *netdev = priv->netdev;
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+-	u16 mtu, prev_mtu;
++	u16 mtu;
+ 	int err;
+ 
+-	mlx5e_query_mtu(mdev, params, &prev_mtu);
+-
+ 	err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
+ 	if (err)
+ 		return err;
+@@ -2932,18 +2929,6 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
+ 		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+ 			    __func__, mtu, params->sw_mtu);
+ 
+-	if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) {
+-		err = mlx5e_port_manual_buffer_config(priv, 0, mtu,
+-						      NULL, NULL, NULL);
+-		if (err) {
+-			netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n",
+-				    __func__, mtu, err, prev_mtu);
+-
+-			mlx5e_set_mtu(mdev, params, prev_mtu);
+-			return err;
+-		}
+-	}
+-
+ 	params->sw_mtu = mtu;
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index 516df7f1997ebc..35d2fe08c0fb59 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -27,6 +27,7 @@ struct mlx5_fw_reset {
+ 	struct work_struct reset_reload_work;
+ 	struct work_struct reset_now_work;
+ 	struct work_struct reset_abort_work;
++	struct delayed_work reset_timeout_work;
+ 	unsigned long reset_flags;
+ 	u8 reset_method;
+ 	struct timer_list timer;
+@@ -258,6 +259,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
+ 		return -EALREADY;
+ 	}
+ 
++	if (current_work() != &fw_reset->reset_timeout_work.work)
++		cancel_delayed_work(&fw_reset->reset_timeout_work);
+ 	mlx5_stop_sync_reset_poll(dev);
+ 	if (poll_health)
+ 		mlx5_start_health_poll(dev);
+@@ -328,6 +331,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
+ 	}
+ 	mlx5_stop_health_poll(dev, true);
+ 	mlx5_start_sync_reset_poll(dev);
++
++	if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
++		      &fw_reset->reset_flags))
++		schedule_delayed_work(&fw_reset->reset_timeout_work,
++			msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
+ 	return 0;
+ }
+ 
+@@ -728,6 +736,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
+ 	}
+ }
+ 
++static void mlx5_sync_reset_timeout_work(struct work_struct *work)
++{
++	struct delayed_work *dwork = container_of(work, struct delayed_work,
++						  work);
++	struct mlx5_fw_reset *fw_reset =
++		container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
++	struct mlx5_core_dev *dev = fw_reset->dev;
++
++	if (mlx5_sync_reset_clear_reset_requested(dev, true))
++		return;
++	mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
++}
++
+ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
+ {
+ 	struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
+@@ -811,6 +832,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
+ 	cancel_work_sync(&fw_reset->reset_reload_work);
+ 	cancel_work_sync(&fw_reset->reset_now_work);
+ 	cancel_work_sync(&fw_reset->reset_abort_work);
++	cancel_delayed_work(&fw_reset->reset_timeout_work);
+ }
+ 
+ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
+@@ -854,6 +876,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
+ 	INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
+ 	INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
+ 	INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
++	INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
++			  mlx5_sync_reset_timeout_work);
+ 
+ 	init_completion(&fw_reset->done);
+ 	return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index 9bc9bd83c2324c..cd68c4b2c0bf91 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ 	u32 func_id;
+ 	u32 npages;
+ 	u32 i = 0;
++	int err;
+ 
+-	if (!mlx5_cmd_is_down(dev))
+-		return mlx5_cmd_do(dev, in, in_size, out, out_size);
++	err = mlx5_cmd_do(dev, in, in_size, out, out_size);
++	/* If FW is gone (-ENXIO), proceed to forceful reclaim */
++	if (err != -ENXIO)
++		return err;
+ 
+ 	/* No hard feelings, we want our pages back! */
+ 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+index fbca8d0efd858c..37a46596268a08 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+@@ -1789,7 +1789,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
+ 	struct nfp_net *nn = netdev_priv(netdev);
+ 
+ 	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
+-		return -EOPNOTSUPP;
++		return 0;
+ 
+ 	return nfp_net_rss_key_sz(nn);
+ }
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 792ddda1ad493d..85bd5d845409b9 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -625,6 +625,21 @@ static void ax88772_suspend(struct usbnet *dev)
+ 		   asix_read_medium_status(dev, 1));
+ }
+ 
++/* Notes on PM callbacks and locking context:
++ *
++ * - asix_suspend()/asix_resume() are invoked for both runtime PM and
++ *   system-wide suspend/resume. For struct usb_driver the ->resume()
++ *   callback does not receive pm_message_t, so the resume type cannot
++ *   be distinguished here.
++ *
++ * - The MAC driver must hold RTNL when calling phylink interfaces such as
++ *   phylink_suspend()/resume(). Those calls will also perform MDIO I/O.
++ *
++ * - Taking RTNL and doing MDIO from a runtime-PM resume callback (while
++ *   the USB PM lock is held) is fragile. Since autosuspend brings no
++ *   measurable power saving here, we block it by holding a PM usage
++ *   reference in ax88772_bind().
++ */
+ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
+ {
+ 	struct usbnet *dev = usb_get_intfdata(intf);
+@@ -919,6 +934,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	if (ret)
+ 		goto initphy_err;
+ 
++	/* Keep this interface runtime-PM active by taking a usage ref.
++	 * Prevents runtime suspend while bound and avoids resume paths
++	 * that could deadlock (autoresume under RTNL while USB PM lock
++	 * is held, phylink/MDIO wants RTNL).
++	 */
++	pm_runtime_get_noresume(&intf->dev);
++
+ 	return 0;
+ 
+ initphy_err:
+@@ -948,6 +970,8 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
+ 	phylink_destroy(priv->phylink);
+ 	ax88772_mdio_unregister(priv);
+ 	asix_rx_fixup_common_free(dev->driver_priv);
++	/* Drop the PM usage ref taken in bind() */
++	pm_runtime_put(&intf->dev);
+ }
+ 
+ static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
+@@ -1600,6 +1624,11 @@ static struct usb_driver asix_driver = {
+ 	.resume =	asix_resume,
+ 	.reset_resume =	asix_resume,
+ 	.disconnect =	usbnet_disconnect,
++	/* usbnet enables autosuspend by default (supports_autosuspend=1).
++	 * We keep runtime-PM active for AX88772* by taking a PM usage
++	 * reference in ax88772_bind() (pm_runtime_get_noresume()) and
++	 * dropping it in unbind(), which effectively blocks autosuspend.
++	 */
+ 	.supports_autosuspend = 1,
+ 	.disable_hub_initiated_lpm = 1,
+ };
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index ddff6f19ff98eb..92add3daadbb18 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
+ 	rtl8150_t *dev = netdev_priv(netdev);
+ 	u16 rx_creg = 0x9e;
+ 
+-	netif_stop_queue(netdev);
+ 	if (netdev->flags & IFF_PROMISC) {
+ 		rx_creg |= 0x0001;
+ 		dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
+@@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
+ 		rx_creg &= 0x00fc;
+ 	}
+ 	async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
+-	netif_wake_queue(netdev);
+ }
+ 
+ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 09066e6aca4025..fdab67a56e438c 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -1764,33 +1764,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
+ 
+ int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+ {
++	unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
+ 	unsigned long time_left, i;
+ 
+-	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+-						WMI_SERVICE_READY_TIMEOUT_HZ);
+-	if (!time_left) {
+-		/* Sometimes the PCI HIF doesn't receive interrupt
+-		 * for the service ready message even if the buffer
+-		 * was completed. PCIe sniffer shows that it's
+-		 * because the corresponding CE ring doesn't fires
+-		 * it. Workaround here by polling CE rings once.
+-		 */
+-		ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
+-
++	/* Sometimes the PCI HIF doesn't receive interrupt
++	 * for the service ready message even if the buffer
++	 * was completed. PCIe sniffer shows that it's
++	 * because the corresponding CE ring doesn't fires
++	 * it. Workaround here by polling CE rings. Since
++	 * the message could arrive at any time, continue
++	 * polling until timeout.
++	 */
++	do {
+ 		for (i = 0; i < CE_COUNT; i++)
+ 			ath10k_hif_send_complete_check(ar, i, 1);
+ 
++		/* The 100 ms granularity is a tradeoff considering scheduler
++		 * overhead and response latency
++		 */
+ 		time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+-							WMI_SERVICE_READY_TIMEOUT_HZ);
+-		if (!time_left) {
+-			ath10k_warn(ar, "polling timed out\n");
+-			return -ETIMEDOUT;
+-		}
+-
+-		ath10k_warn(ar, "service ready completion received, continuing normally\n");
+-	}
++							msecs_to_jiffies(100));
++		if (time_left)
++			return 0;
++	} while (time_before(jiffies, timeout));
+ 
+-	return 0;
++	ath10k_warn(ar, "failed to receive service ready completion\n");
++	return -ETIMEDOUT;
+ }
+ 
+ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
+index b66d23d6b2bd9e..bd21e8fe9c90b3 100644
+--- a/drivers/net/wireless/ath/ath12k/ce.c
++++ b/drivers/net/wireless/ath/ath12k/ce.c
+@@ -388,7 +388,7 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
+ 	}
+ 
+ 	while ((skb = __skb_dequeue(&list))) {
+-		ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
++		ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n",
+ 			   pipe->pipe_num, skb->len);
+ 		pipe->recv_cb(ab, skb);
+ 	}
+diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
+index f7005917362c62..ea711e02ca03cc 100644
+--- a/drivers/net/wireless/ath/ath12k/debug.h
++++ b/drivers/net/wireless/ath/ath12k/debug.h
+@@ -26,6 +26,7 @@ enum ath12k_debug_mask {
+ 	ATH12K_DBG_DP_TX	= 0x00002000,
+ 	ATH12K_DBG_DP_RX	= 0x00004000,
+ 	ATH12K_DBG_WOW		= 0x00008000,
++	ATH12K_DBG_CE		= 0x00010000,
+ 	ATH12K_DBG_ANY		= 0xffffffff,
+ };
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+index 81787501d4a4f0..11704163876b8e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+@@ -12,7 +12,6 @@
+ #include "fw/api/phy.h"
+ #include "fw/api/config.h"
+ #include "fw/api/nvm-reg.h"
+-#include "fw/img.h"
+ #include "iwl-trans.h"
+ 
+ #define BIOS_SAR_MAX_PROFILE_NUM	4
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index 59bea82eab2947..8801b93eacd426 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -684,10 +684,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
+ 		return;
+ 	}
+ 
+-	/* Don't send world or same regdom info to firmware */
+-	if (strncmp(request->alpha2, "00", 2) &&
+-	    strncmp(request->alpha2, adapter->country_code,
+-		    sizeof(request->alpha2))) {
++	/* Don't send same regdom info to firmware */
++	if (strncmp(request->alpha2, adapter->country_code,
++		    sizeof(request->alpha2)) != 0) {
+ 		memcpy(adapter->country_code, request->alpha2,
+ 		       sizeof(request->alpha2));
+ 		mwifiex_send_domain_info_cmd_fw(wiphy);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+index ec02148a7f1f74..2ee8a6e1e310e5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+@@ -48,7 +48,7 @@ mt76_wmac_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ error:
+-	ieee80211_free_hw(mt76_hw(dev));
++	mt76_free_device(mdev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+index 509fb43d8a688a..2908e8113e48a3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+@@ -50,9 +50,9 @@ enum mt7915_eeprom_field {
+ #define MT_EE_CAL_GROUP_SIZE_7975		(54 * MT_EE_CAL_UNIT + 16)
+ #define MT_EE_CAL_GROUP_SIZE_7976		(94 * MT_EE_CAL_UNIT + 16)
+ #define MT_EE_CAL_GROUP_SIZE_7916_6G		(94 * MT_EE_CAL_UNIT + 16)
++#define MT_EE_CAL_GROUP_SIZE_7981		(144 * MT_EE_CAL_UNIT + 16)
+ #define MT_EE_CAL_DPD_SIZE_V1			(54 * MT_EE_CAL_UNIT)
+ #define MT_EE_CAL_DPD_SIZE_V2			(300 * MT_EE_CAL_UNIT)
+-#define MT_EE_CAL_DPD_SIZE_V2_7981		(102 * MT_EE_CAL_UNIT)	/* no 6g dpd data */
+ 
+ #define MT_EE_WIFI_CONF0_TX_PATH		GENMASK(2, 0)
+ #define MT_EE_WIFI_CONF0_BAND_SEL		GENMASK(7, 6)
+@@ -179,6 +179,8 @@ mt7915_get_cal_group_size(struct mt7915_dev *dev)
+ 		val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
+ 		return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G :
+ 							 MT_EE_CAL_GROUP_SIZE_7916;
++	} else if (is_mt7981(&dev->mt76)) {
++		return MT_EE_CAL_GROUP_SIZE_7981;
+ 	} else if (mt7915_check_adie(dev, false)) {
+ 		return MT_EE_CAL_GROUP_SIZE_7976;
+ 	} else {
+@@ -191,8 +193,6 @@ mt7915_get_cal_dpd_size(struct mt7915_dev *dev)
+ {
+ 	if (is_mt7915(&dev->mt76))
+ 		return MT_EE_CAL_DPD_SIZE_V1;
+-	else if (is_mt7981(&dev->mt76))
+-		return MT_EE_CAL_DPD_SIZE_V2_7981;
+ 	else
+ 		return MT_EE_CAL_DPD_SIZE_V2;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 3398c25cb03c0d..7b481aea76b6cc 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -3004,30 +3004,15 @@ static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw)
+ 		/* 5G BW160 */
+ 		5250, 5570, 5815
+ 	};
+-	static const u16 freq_list_v2_7981[] = {
+-		/* 5G BW20 */
+-		5180, 5200, 5220, 5240,
+-		5260, 5280, 5300, 5320,
+-		5500, 5520, 5540, 5560,
+-		5580, 5600, 5620, 5640,
+-		5660, 5680, 5700, 5720,
+-		5745, 5765, 5785, 5805,
+-		5825, 5845, 5865, 5885,
+-		/* 5G BW160 */
+-		5250, 5570, 5815
+-	};
+-	const u16 *freq_list = freq_list_v1;
+-	int n_freqs = ARRAY_SIZE(freq_list_v1);
+-	int idx;
++	const u16 *freq_list;
++	int idx, n_freqs;
+ 
+ 	if (!is_mt7915(&dev->mt76)) {
+-		if (is_mt7981(&dev->mt76)) {
+-			freq_list = freq_list_v2_7981;
+-			n_freqs = ARRAY_SIZE(freq_list_v2_7981);
+-		} else {
+-			freq_list = freq_list_v2;
+-			n_freqs = ARRAY_SIZE(freq_list_v2);
+-		}
++		freq_list = freq_list_v2;
++		n_freqs = ARRAY_SIZE(freq_list_v2);
++	} else {
++		freq_list = freq_list_v1;
++		n_freqs = ARRAY_SIZE(freq_list_v1);
+ 	}
+ 
+ 	if (freq < 4000) {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+index c5503855411436..91b7d35bdb4316 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+@@ -679,6 +679,7 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
+ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+ {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++	u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
+ 	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ 	u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
+ 	struct mt7996_wed_rro_addr *addr;
+@@ -718,7 +719,7 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+ 
+ 		addr = dev->wed_rro.addr_elem[i].ptr;
+ 		for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) {
+-			addr->signature = 0xff;
++			addr->data = cpu_to_le32(val);
+ 			addr++;
+ 		}
+ 
+@@ -736,7 +737,7 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+ 	dev->wed_rro.session.ptr = ptr;
+ 	addr = dev->wed_rro.session.ptr;
+ 	for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
+-		addr->signature = 0xff;
++		addr->data = cpu_to_le32(val);
+ 		addr++;
+ 	}
+ 
+@@ -836,6 +837,7 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
+ static void mt7996_wed_rro_work(struct work_struct *work)
+ {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++	u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
+ 	struct mt7996_dev *dev;
+ 	LIST_HEAD(list);
+ 
+@@ -872,7 +874,7 @@ static void mt7996_wed_rro_work(struct work_struct *work)
+ 				MT7996_RRO_WINDOW_MAX_LEN;
+ reset:
+ 			elem = ptr + elem_id * sizeof(*elem);
+-			elem->signature = 0xff;
++			elem->data |= cpu_to_le32(val);
+ 		}
+ 		mt7996_mcu_wed_rro_reset_sessions(dev, e->id);
+ out:
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+index 425fd030bee001..29e7289c3a169e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+@@ -194,13 +194,12 @@ struct mt7996_hif {
+ 	int irq;
+ };
+ 
++#define WED_RRO_ADDR_SIGNATURE_MASK	GENMASK(31, 24)
++#define WED_RRO_ADDR_COUNT_MASK		GENMASK(14, 4)
++#define WED_RRO_ADDR_HEAD_HIGH_MASK	GENMASK(3, 0)
+ struct mt7996_wed_rro_addr {
+-	u32 head_low;
+-	u32 head_high : 4;
+-	u32 count: 11;
+-	u32 oor: 1;
+-	u32 rsv : 8;
+-	u32 signature : 8;
++	__le32 head_low;
++	__le32 data;
+ };
+ 
+ struct mt7996_wed_rro_session_id {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+index 04056181368a69..dbd05612f2e4a7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+@@ -132,6 +132,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
+ 	mdev = &dev->mt76;
+ 	mt7996_wfsys_reset(dev);
+ 	hif2 = mt7996_pci_init_hif2(pdev);
++	dev->hif2 = hif2;
+ 
+ 	ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
+ 	if (ret < 0)
+@@ -156,7 +157,6 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
+ 
+ 	if (hif2) {
+ 		hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
+-		dev->hif2 = hif2;
+ 
+ 		ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &hif2_irq);
+ 		if (ret < 0)
+diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
+index c0f0e3d71f5f54..2a303a758e2769 100644
+--- a/drivers/net/wireless/realtek/rtw89/ser.c
++++ b/drivers/net/wireless/realtek/rtw89/ser.c
+@@ -207,7 +207,6 @@ static void rtw89_ser_hdl_work(struct work_struct *work)
+ 
+ static int ser_send_msg(struct rtw89_ser *ser, u8 event)
+ {
+-	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ 	struct ser_msg *msg = NULL;
+ 
+ 	if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
+@@ -223,7 +222,7 @@ static int ser_send_msg(struct rtw89_ser *ser, u8 event)
+ 	list_add(&msg->list, &ser->msg_q);
+ 	spin_unlock_irq(&ser->msg_q_lock);
+ 
+-	ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
++	schedule_work(&ser->ser_hdl_work);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index ef8c5961e10c89..0ade23610ae640 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op {		/* for an LS RQST XMT */
+ 	int				ls_error;
+ 	struct list_head		lsreq_list; /* tgtport->ls_req_list */
+ 	bool				req_queued;
++
++	struct work_struct		put_work;
+ };
+ 
+ 
+@@ -111,8 +113,6 @@ struct nvmet_fc_tgtport {
+ 	struct nvmet_fc_port_entry	*pe;
+ 	struct kref			ref;
+ 	u32				max_sg_cnt;
+-
+-	struct work_struct		put_work;
+ };
+ 
+ struct nvmet_fc_port_entry {
+@@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+ static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+ static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+-static void nvmet_fc_put_tgtport_work(struct work_struct *work)
++static void nvmet_fc_put_lsop_work(struct work_struct *work)
+ {
+-	struct nvmet_fc_tgtport *tgtport =
+-		container_of(work, struct nvmet_fc_tgtport, put_work);
++	struct nvmet_fc_ls_req_op *lsop =
++		container_of(work, struct nvmet_fc_ls_req_op, put_work);
+ 
+-	nvmet_fc_tgtport_put(tgtport);
++	nvmet_fc_tgtport_put(lsop->tgtport);
++	kfree(lsop);
+ }
+ static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+ static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+@@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+ 				  DMA_BIDIRECTIONAL);
+ 
+ out_putwork:
+-	queue_work(nvmet_wq, &tgtport->put_work);
++	queue_work(nvmet_wq, &lsop->put_work);
+ }
+ 
+ static int
+@@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
+ 	lsreq->done = done;
+ 	lsop->req_queued = false;
+ 	INIT_LIST_HEAD(&lsop->lsreq_list);
++	INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work);
+ 
+ 	lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
+ 				  lsreq->rqstlen + lsreq->rsplen,
+@@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+ 	__nvmet_fc_finish_ls_req(lsop);
+ 
+ 	/* fc-nvme target doesn't care about success or failure of cmd */
+-
+-	kfree(lsop);
+ }
+ 
+ /*
+@@ -1412,7 +1412,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+ 	kref_init(&newrec->ref);
+ 	ida_init(&newrec->assoc_cnt);
+ 	newrec->max_sg_cnt = template->max_sgl_segments;
+-	INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
+ 
+ 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
+ 	if (ret) {
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index bae829ac759e12..eb772c18d44ed9 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -531,7 +531,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ 
+ 	ret = j721e_pcie_ctrl_init(pcie);
+ 	if (ret < 0) {
+-		dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
++		dev_err_probe(dev, ret, "j721e_pcie_ctrl_init failed\n");
+ 		goto err_get_sync;
+ 	}
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+index 5d77a01648606c..14f69efa243c38 100644
+--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
++++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+@@ -182,8 +182,17 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
+ 		return ret;
+ 	}
+ 
+-	if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc))
++	if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) {
+ 		reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
++		/*
++		 * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr.
++		 * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B)
++		 * indicates that for peripherals in HSC domain, after
++		 * reset has been asserted by writing a matching reset bit
++		 * into register SRCR, it is mandatory to wait 1ms.
++		 */
++		fsleep(1000);
++	}
+ 
+ 	val = readl(rcar->base + PCIEMSR0);
+ 	if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
+@@ -204,6 +213,19 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
+ 	if (ret)
+ 		goto err_unprepare;
+ 
++	/*
++	 * Assure the reset is latched and the core is ready for DBI access.
++	 * On R-Car V4H, the PCIe reset is asynchronous and does not take
++	 * effect immediately, but needs a short time to complete. In case
++	 * DBI access happens in that short time, that access generates an
++	 * SError. To make sure that condition can never happen, read back the
++	 * state of the reset, which should turn the asynchronous reset into
++	 * synchronous one, and wait a little over 1ms to add additional
++	 * safety margin.
++	 */
++	reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
++	fsleep(1000);
++
+ 	if (rcar->drvdata->additional_common_init)
+ 		rcar->drvdata->additional_common_init(rcar);
+ 
+@@ -711,7 +733,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable
+ 	val &= ~APP_HOLD_PHY_RST;
+ 	writel(val, rcar->base + PCIERSTCTRL1);
+ 
+-	ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000);
++	ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index ced3b7e7bdaded..815599ef72db8c 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -1721,9 +1721,9 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+ 				ret);
+ 	}
+ 
+-	ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
++	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+ 	if (ret)
+-		dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
++		dev_err(pcie->dev, "Failed to disable controller: %d\n", ret);
+ 
+ 	pcie->ep_state = EP_STATE_DISABLED;
+ 	dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index d7517c3976e7f1..4f70b7f2ded9cb 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -1343,7 +1343,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
+ 	unsigned int i;
+ 	int err;
+ 
+-	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
++	port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL);
+ 	if (!port->phys)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index 99c58ee09fbb0b..0cd8a75e225800 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -122,6 +122,8 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
+ 
+ bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
+ {
++	bool ret = false;
++
+ 	if (ACPI_HANDLE(&host_bridge->dev)) {
+ 		union acpi_object *obj;
+ 
+@@ -135,11 +137,11 @@ bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
+ 					      1, DSM_PCI_PRESERVE_BOOT_CONFIG,
+ 					      NULL, ACPI_TYPE_INTEGER);
+ 		if (obj && obj->integer.value == 0)
+-			return true;
++			ret = true;
+ 		ACPI_FREE(obj);
+ 	}
+ 
+-	return false;
++	return ret;
+ }
+ 
+ /* _HPX PCI Setting Record (Type 0); same as _HPP */
+diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
+index 3569050f9cf375..abd23430dc0335 100644
+--- a/drivers/perf/arm_spe_pmu.c
++++ b/drivers/perf/arm_spe_pmu.c
+@@ -96,7 +96,8 @@ struct arm_spe_pmu {
+ #define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
+ 
+ /* Convert a free-running index from perf into an SPE buffer offset */
+-#define PERF_IDX2OFF(idx, buf)	((idx) % ((buf)->nr_pages << PAGE_SHIFT))
++#define PERF_IDX2OFF(idx, buf) \
++	((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
+ 
+ /* Keep track of our dynamic hotplug state */
+ static enum cpuhp_state arm_spe_pmu_online;
+diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+index 1ef6d9630f7e09..fbaeb7ca600d16 100644
+--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
++++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+@@ -122,6 +122,8 @@ struct rockchip_combphy_grfcfg {
+ 	struct combphy_reg pipe_xpcs_phy_ready;
+ 	struct combphy_reg pipe_pcie1l0_sel;
+ 	struct combphy_reg pipe_pcie1l1_sel;
++	struct combphy_reg u3otg0_port_en;
++	struct combphy_reg u3otg1_port_en;
+ };
+ 
+ struct rockchip_combphy_cfg {
+@@ -431,6 +433,14 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
+ 		rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
+ 		rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
+ 		rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
++		switch (priv->id) {
++		case 0:
++			rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg0_port_en, true);
++			break;
++		case 1:
++			rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg1_port_en, true);
++			break;
++		}
+ 		break;
+ 
+ 	case PHY_TYPE_SATA:
+@@ -574,6 +584,8 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
+ 	/* pipe-grf */
+ 	.pipe_con0_for_sata	= { 0x0000, 15, 0, 0x00, 0x2220 },
+ 	.pipe_xpcs_phy_ready	= { 0x0040, 2, 2, 0x00, 0x01 },
++	.u3otg0_port_en		= { 0x0104, 15, 0, 0x0181, 0x1100 },
++	.u3otg1_port_en		= { 0x0144, 15, 0, 0x0181, 0x1100 },
+ };
+ 
+ static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+index 9171de657f9780..a75762e4d26418 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+@@ -187,6 +187,9 @@ static const unsigned int i2c_sda_c_pins[]	= { GPIODV_28 };
+ static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 };
+ static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 };
+ 
++static const unsigned int i2c_sck_d_pins[]	= { GPIOX_11 };
++static const unsigned int i2c_sda_d_pins[]	= { GPIOX_10 };
++
+ static const unsigned int eth_mdio_pins[]	= { GPIOZ_0 };
+ static const unsigned int eth_mdc_pins[]	= { GPIOZ_1 };
+ static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
+@@ -411,6 +414,8 @@ static const struct meson_pmx_group meson_gxl_periphs_groups[] = {
+ 	GPIO_GROUP(GPIO_TEST_N),
+ 
+ 	/* Bank X */
++	GROUP(i2c_sda_d,	5,	5),
++	GROUP(i2c_sck_d,	5,	4),
+ 	GROUP(sdio_d0,		5,	31),
+ 	GROUP(sdio_d1,		5,	30),
+ 	GROUP(sdio_d2,		5,	29),
+@@ -651,6 +656,10 @@ static const char * const i2c_c_groups[] = {
+ 	"i2c_sck_c", "i2c_sda_c", "i2c_sda_c_dv18", "i2c_sck_c_dv19",
+ };
+ 
++static const char * const i2c_d_groups[] = {
++	"i2c_sck_d", "i2c_sda_d",
++};
++
+ static const char * const eth_groups[] = {
+ 	"eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv",
+ 	"eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3",
+@@ -777,6 +786,7 @@ static const struct meson_pmx_func meson_gxl_periphs_functions[] = {
+ 	FUNCTION(i2c_a),
+ 	FUNCTION(i2c_b),
+ 	FUNCTION(i2c_c),
++	FUNCTION(i2c_d),
+ 	FUNCTION(eth),
+ 	FUNCTION(pwm_a),
+ 	FUNCTION(pwm_b),
+diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
+index 2c31e7f2a27a86..56e14193d678c5 100644
+--- a/drivers/pinctrl/pinmux.c
++++ b/drivers/pinctrl/pinmux.c
+@@ -337,7 +337,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
+ 	while (selector < nfuncs) {
+ 		const char *fname = ops->get_function_name(pctldev, selector);
+ 
+-		if (!strcmp(function, fname))
++		if (fname && !strcmp(function, fname))
+ 			return selector;
+ 
+ 		selector++;
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index bde58f5a743cb9..698ab8cc970a63 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -1074,7 +1074,7 @@ static u32 rzg3s_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+ 
+ 	bit = rzg3s_pin_to_oen_bit(pctrl, _pin);
+ 	if (bit < 0)
+-		return bit;
++		return 0;
+ 
+ 	return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
+ }
+diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
+index 29d16c9c1bd194..3a742f74ecd1dc 100644
+--- a/drivers/pinctrl/renesas/pinctrl.c
++++ b/drivers/pinctrl/renesas/pinctrl.c
+@@ -726,7 +726,8 @@ static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
+ 	struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ 	const unsigned int *pins;
+ 	unsigned int num_pins;
+-	unsigned int i, ret;
++	unsigned int i;
++	int ret;
+ 
+ 	pins = pmx->pfc->info->groups[group].pins;
+ 	num_pins = pmx->pfc->info->groups[group].nr_pins;
+diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
+index f63c3c41045155..382dff8805c623 100644
+--- a/drivers/power/supply/cw2015_battery.c
++++ b/drivers/power/supply/cw2015_battery.c
+@@ -702,8 +702,7 @@ static int cw_bat_probe(struct i2c_client *client)
+ 	if (!cw_bat->battery_workqueue)
+ 		return -ENOMEM;
+ 
+-	devm_delayed_work_autocancel(&client->dev,
+-							  &cw_bat->battery_delay_work, cw_bat_work);
++	devm_delayed_work_autocancel(&client->dev, &cw_bat->battery_delay_work, cw_bat_work);
+ 	queue_delayed_work(cw_bat->battery_workqueue,
+ 			   &cw_bat->battery_delay_work, msecs_to_jiffies(10));
+ 	return 0;
+diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
+index 92d1b62ea239d7..e9389876229eaa 100644
+--- a/drivers/pps/kapi.c
++++ b/drivers/pps/kapi.c
+@@ -109,16 +109,13 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
+ 	if (err < 0) {
+ 		pr_err("%s: unable to create char device\n",
+ 					info->name);
+-		goto kfree_pps;
++		goto pps_register_source_exit;
+ 	}
+ 
+ 	dev_dbg(&pps->dev, "new PPS source %s\n", info->name);
+ 
+ 	return pps;
+ 
+-kfree_pps:
+-	kfree(pps);
+-
+ pps_register_source_exit:
+ 	pr_err("%s: unable to register source\n", info->name);
+ 
+diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
+index 9463232af8d2e6..c6b8b647827611 100644
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -374,6 +374,7 @@ int pps_register_cdev(struct pps_device *pps)
+ 			       pps->info.name);
+ 			err = -EBUSY;
+ 		}
++		kfree(pps);
+ 		goto out_unlock;
+ 	}
+ 	pps->id = err;
+@@ -383,13 +384,11 @@ int pps_register_cdev(struct pps_device *pps)
+ 	pps->dev.devt = MKDEV(pps_major, pps->id);
+ 	dev_set_drvdata(&pps->dev, pps);
+ 	dev_set_name(&pps->dev, "pps%d", pps->id);
++	pps->dev.release = pps_device_destruct;
+ 	err = device_register(&pps->dev);
+ 	if (err)
+ 		goto free_idr;
+ 
+-	/* Override the release function with our own */
+-	pps->dev.release = pps_device_destruct;
+-
+ 	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major,
+ 		 pps->id);
+ 
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index b352df4cd3f972..f329263f33aa12 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -22,6 +22,7 @@
+ #define PTP_MAX_TIMESTAMPS 128
+ #define PTP_BUF_TIMESTAMPS 30
+ #define PTP_DEFAULT_MAX_VCLOCKS 20
++#define PTP_MAX_VCLOCKS_LIMIT (KMALLOC_MAX_SIZE/(sizeof(int)))
+ #define PTP_MAX_CHANNELS 2048
+ 
+ enum {
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 6b1b8f57cd9510..200eaf50069681 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -284,7 +284,7 @@ static ssize_t max_vclocks_store(struct device *dev,
+ 	size_t size;
+ 	u32 max;
+ 
+-	if (kstrtou32(buf, 0, &max) || max == 0)
++	if (kstrtou32(buf, 0, &max) || max == 0 || max > PTP_MAX_VCLOCKS_LIMIT)
+ 		return -EINVAL;
+ 
+ 	if (max == ptp->max_vclocks)
+diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
+index 0125e73b98dfb4..7a86cb090f76f1 100644
+--- a/drivers/pwm/pwm-tiehrpwm.c
++++ b/drivers/pwm/pwm-tiehrpwm.c
+@@ -36,7 +36,7 @@
+ 
+ #define CLKDIV_MAX		7
+ #define HSPCLKDIV_MAX		7
+-#define PERIOD_MAX		0xFFFF
++#define PERIOD_MAX		0x10000
+ 
+ /* compare module registers */
+ #define CMPA			0x12
+@@ -65,14 +65,10 @@
+ #define AQCTL_ZRO_FRCHIGH	BIT(1)
+ #define AQCTL_ZRO_FRCTOGGLE	(BIT(1) | BIT(0))
+ 
+-#define AQCTL_CHANA_POLNORMAL	(AQCTL_CAU_FRCLOW | AQCTL_PRD_FRCHIGH | \
+-				AQCTL_ZRO_FRCHIGH)
+-#define AQCTL_CHANA_POLINVERSED	(AQCTL_CAU_FRCHIGH | AQCTL_PRD_FRCLOW | \
+-				AQCTL_ZRO_FRCLOW)
+-#define AQCTL_CHANB_POLNORMAL	(AQCTL_CBU_FRCLOW | AQCTL_PRD_FRCHIGH | \
+-				AQCTL_ZRO_FRCHIGH)
+-#define AQCTL_CHANB_POLINVERSED	(AQCTL_CBU_FRCHIGH | AQCTL_PRD_FRCLOW | \
+-				AQCTL_ZRO_FRCLOW)
++#define AQCTL_CHANA_POLNORMAL	(AQCTL_CAU_FRCLOW | AQCTL_ZRO_FRCHIGH)
++#define AQCTL_CHANA_POLINVERSED	(AQCTL_CAU_FRCHIGH | AQCTL_ZRO_FRCLOW)
++#define AQCTL_CHANB_POLNORMAL	(AQCTL_CBU_FRCLOW | AQCTL_ZRO_FRCHIGH)
++#define AQCTL_CHANB_POLINVERSED	(AQCTL_CBU_FRCHIGH | AQCTL_ZRO_FRCLOW)
+ 
+ #define AQSFRC_RLDCSF_MASK	(BIT(7) | BIT(6))
+ #define AQSFRC_RLDCSF_ZRO	0
+@@ -108,7 +104,6 @@ struct ehrpwm_pwm_chip {
+ 	unsigned long clk_rate;
+ 	void __iomem *mmio_base;
+ 	unsigned long period_cycles[NUM_PWM_CHANNEL];
+-	enum pwm_polarity polarity[NUM_PWM_CHANNEL];
+ 	struct clk *tbclk;
+ 	struct ehrpwm_context ctx;
+ };
+@@ -166,7 +161,7 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div,
+ 
+ 			*prescale_div = (1 << clkdiv) *
+ 					(hspclkdiv ? (hspclkdiv * 2) : 1);
+-			if (*prescale_div > rqst_prescaler) {
++			if (*prescale_div >= rqst_prescaler) {
+ 				*tb_clk_div = (clkdiv << TBCTL_CLKDIV_SHIFT) |
+ 					(hspclkdiv << TBCTL_HSPCLKDIV_SHIFT);
+ 				return 0;
+@@ -177,51 +172,20 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div,
+ 	return 1;
+ }
+ 
+-static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan)
+-{
+-	u16 aqctl_val, aqctl_mask;
+-	unsigned int aqctl_reg;
+-
+-	/*
+-	 * Configure PWM output to HIGH/LOW level on counter
+-	 * reaches compare register value and LOW/HIGH level
+-	 * on counter value reaches period register value and
+-	 * zero value on counter
+-	 */
+-	if (chan == 1) {
+-		aqctl_reg = AQCTLB;
+-		aqctl_mask = AQCTL_CBU_MASK;
+-
+-		if (pc->polarity[chan] == PWM_POLARITY_INVERSED)
+-			aqctl_val = AQCTL_CHANB_POLINVERSED;
+-		else
+-			aqctl_val = AQCTL_CHANB_POLNORMAL;
+-	} else {
+-		aqctl_reg = AQCTLA;
+-		aqctl_mask = AQCTL_CAU_MASK;
+-
+-		if (pc->polarity[chan] == PWM_POLARITY_INVERSED)
+-			aqctl_val = AQCTL_CHANA_POLINVERSED;
+-		else
+-			aqctl_val = AQCTL_CHANA_POLNORMAL;
+-	}
+-
+-	aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK;
+-	ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val);
+-}
+-
+ /*
+  * period_ns = 10^9 * (ps_divval * period_cycles) / PWM_CLK_RATE
+  * duty_ns   = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE
+  */
+ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+-			     u64 duty_ns, u64 period_ns)
++			     u64 duty_ns, u64 period_ns, enum pwm_polarity polarity)
+ {
+ 	struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+ 	u32 period_cycles, duty_cycles;
+ 	u16 ps_divval, tb_divval;
+ 	unsigned int i, cmp_reg;
+ 	unsigned long long c;
++	u16 aqctl_val, aqctl_mask;
++	unsigned int aqctl_reg;
+ 
+ 	if (period_ns > NSEC_PER_SEC)
+ 		return -ERANGE;
+@@ -231,15 +195,10 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	do_div(c, NSEC_PER_SEC);
+ 	period_cycles = (unsigned long)c;
+ 
+-	if (period_cycles < 1) {
+-		period_cycles = 1;
+-		duty_cycles = 1;
+-	} else {
+-		c = pc->clk_rate;
+-		c = c * duty_ns;
+-		do_div(c, NSEC_PER_SEC);
+-		duty_cycles = (unsigned long)c;
+-	}
++	c = pc->clk_rate;
++	c = c * duty_ns;
++	do_div(c, NSEC_PER_SEC);
++	duty_cycles = (unsigned long)c;
+ 
+ 	/*
+ 	 * Period values should be same for multiple PWM channels as IP uses
+@@ -265,52 +224,73 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	pc->period_cycles[pwm->hwpwm] = period_cycles;
+ 
+ 	/* Configure clock prescaler to support Low frequency PWM wave */
+-	if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval,
++	if (set_prescale_div(DIV_ROUND_UP(period_cycles, PERIOD_MAX), &ps_divval,
+ 			     &tb_divval)) {
+ 		dev_err(pwmchip_parent(chip), "Unsupported values\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	pm_runtime_get_sync(pwmchip_parent(chip));
+-
+-	/* Update clock prescaler values */
+-	ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
+-
+ 	/* Update period & duty cycle with presacler division */
+ 	period_cycles = period_cycles / ps_divval;
+ 	duty_cycles = duty_cycles / ps_divval;
+ 
+-	/* Configure shadow loading on Period register */
+-	ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW);
++	if (period_cycles < 1)
++		period_cycles = 1;
+ 
+-	ehrpwm_write(pc->mmio_base, TBPRD, period_cycles);
++	pm_runtime_get_sync(pwmchip_parent(chip));
+ 
+-	/* Configure ehrpwm counter for up-count mode */
+-	ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK,
+-		      TBCTL_CTRMODE_UP);
++	/* Update clock prescaler values */
++	ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
+ 
+-	if (pwm->hwpwm == 1)
++	if (pwm->hwpwm == 1) {
+ 		/* Channel 1 configured with compare B register */
+ 		cmp_reg = CMPB;
+-	else
++
++		aqctl_reg = AQCTLB;
++		aqctl_mask = AQCTL_CBU_MASK;
++
++		if (polarity == PWM_POLARITY_INVERSED)
++			aqctl_val = AQCTL_CHANB_POLINVERSED;
++		else
++			aqctl_val = AQCTL_CHANB_POLNORMAL;
++
++		/* if duty_cycle is big, don't toggle on CBU */
++		if (duty_cycles > period_cycles)
++			aqctl_val &= ~AQCTL_CBU_MASK;
++
++	} else {
+ 		/* Channel 0 configured with compare A register */
+ 		cmp_reg = CMPA;
+ 
+-	ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
++		aqctl_reg = AQCTLA;
++		aqctl_mask = AQCTL_CAU_MASK;
+ 
+-	pm_runtime_put_sync(pwmchip_parent(chip));
++		if (polarity == PWM_POLARITY_INVERSED)
++			aqctl_val = AQCTL_CHANA_POLINVERSED;
++		else
++			aqctl_val = AQCTL_CHANA_POLNORMAL;
+ 
+-	return 0;
+-}
++		/* if duty_cycle is big, don't toggle on CAU */
++		if (duty_cycles > period_cycles)
++			aqctl_val &= ~AQCTL_CAU_MASK;
++	}
+ 
+-static int ehrpwm_pwm_set_polarity(struct pwm_chip *chip,
+-				   struct pwm_device *pwm,
+-				   enum pwm_polarity polarity)
+-{
+-	struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
++	aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK;
++	ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val);
++
++	/* Configure shadow loading on Period register */
++	ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW);
++
++	ehrpwm_write(pc->mmio_base, TBPRD, period_cycles - 1);
+ 
+-	/* Configuration of polarity in hardware delayed, do at enable */
+-	pc->polarity[pwm->hwpwm] = polarity;
++	/* Configure ehrpwm counter for up-count mode */
++	ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK,
++		      TBCTL_CTRMODE_UP);
++
++	if (!(duty_cycles > period_cycles))
++		ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
++
++	pm_runtime_put_sync(pwmchip_parent(chip));
+ 
+ 	return 0;
+ }
+@@ -339,9 +319,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 
+ 	ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
+ 
+-	/* Channels polarity can be configured from action qualifier module */
+-	configure_polarity(pc, pwm->hwpwm);
+-
+ 	/* Enable TBCLK */
+ 	ret = clk_enable(pc->tbclk);
+ 	if (ret) {
+@@ -391,12 +368,7 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+ {
+ 	struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+ 
+-	if (pwm_is_enabled(pwm)) {
+-		dev_warn(pwmchip_parent(chip), "Removing PWM device without disabling\n");
+-		pm_runtime_put_sync(pwmchip_parent(chip));
+-	}
+-
+-	/* set period value to zero on free */
++	/* Don't let a pwm without consumer block requests to the other channel */
+ 	pc->period_cycles[pwm->hwpwm] = 0;
+ }
+ 
+@@ -411,10 +383,6 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 			ehrpwm_pwm_disable(chip, pwm);
+ 			enabled = false;
+ 		}
+-
+-		err = ehrpwm_pwm_set_polarity(chip, pwm, state->polarity);
+-		if (err)
+-			return err;
+ 	}
+ 
+ 	if (!state->enabled) {
+@@ -423,7 +391,7 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		return 0;
+ 	}
+ 
+-	err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period);
++	err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period, state->polarity);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
+index 9df726f10ad121..6d609c42e4793b 100644
+--- a/drivers/regulator/scmi-regulator.c
++++ b/drivers/regulator/scmi-regulator.c
+@@ -257,7 +257,8 @@ static int process_scmi_regulator_of_node(struct scmi_device *sdev,
+ 					  struct device_node *np,
+ 					  struct scmi_regulator_info *rinfo)
+ {
+-	u32 dom, ret;
++	u32 dom;
++	int ret;
+ 
+ 	ret = of_property_read_u32(np, "reg", &dom);
+ 	if (ret)
+diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
+index 327f0c7ee3d6ba..7ffbae209a31fb 100644
+--- a/drivers/remoteproc/pru_rproc.c
++++ b/drivers/remoteproc/pru_rproc.c
+@@ -340,7 +340,7 @@ EXPORT_SYMBOL_GPL(pru_rproc_put);
+  */
+ int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr)
+ {
+-	struct pru_rproc *pru = rproc->priv;
++	struct pru_rproc *pru;
+ 	unsigned int reg;
+ 	u32 mask, set;
+ 	u16 idx;
+@@ -352,6 +352,7 @@ int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr)
+ 	if (!rproc->dev.parent || !is_pru_rproc(rproc->dev.parent))
+ 		return -ENODEV;
+ 
++	pru = rproc->priv;
+ 	/* pointer is 16 bit and index is 8-bit so mask out the rest */
+ 	idx_mask = (c >= PRU_C28) ? 0xFFFF : 0xFF;
+ 
+diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
+index 4ee5e67a9f03f5..769c6d6d6a7316 100644
+--- a/drivers/remoteproc/qcom_q6v5.c
++++ b/drivers/remoteproc/qcom_q6v5.c
+@@ -156,9 +156,6 @@ int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout)
+ 	int ret;
+ 
+ 	ret = wait_for_completion_timeout(&q6v5->start_done, timeout);
+-	if (!ret)
+-		disable_irq(q6v5->handover_irq);
+-
+ 	return !ret ? -ETIMEDOUT : 0;
+ }
+ EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index d84413b77d8499..421db8996927b6 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -987,11 +987,9 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ 	list_for_each_entry_safe(mpt3sas_phy, next_phy,
+ 	    &mpt3sas_port->phy_list, port_siblings) {
+ 		if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+-			dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+-			    "remove: sas_addr(0x%016llx), phy(%d)\n",
+-			    (unsigned long long)
+-			    mpt3sas_port->remote_identify.sas_address,
+-			    mpt3sas_phy->phy_id);
++			ioc_info(ioc, "remove: sas_addr(0x%016llx), phy(%d)\n",
++				(unsigned long long) mpt3sas_port->remote_identify.sas_address,
++					mpt3sas_phy->phy_id);
+ 		mpt3sas_phy->phy_belongs_to_port = 0;
+ 		if (!ioc->remove_host)
+ 			sas_port_delete_phy(mpt3sas_port->port,
+diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
+index 1469d0c54e4558..5a02fd3bc6c9ef 100644
+--- a/drivers/scsi/myrs.c
++++ b/drivers/scsi/myrs.c
+@@ -498,14 +498,14 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
+ 	/* Temporary dma mapping, used only in the scope of this function */
+ 	mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+ 				  &mbox_addr, GFP_KERNEL);
+-	if (dma_mapping_error(&pdev->dev, mbox_addr))
++	if (!mbox)
+ 		return false;
+ 
+ 	/* These are the base addresses for the command memory mailbox array */
+ 	cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
+ 	cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
+ 				      &cs->cmd_mbox_addr, GFP_KERNEL);
+-	if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
++	if (!cmd_mbox) {
+ 		dev_err(&pdev->dev, "Failed to map command mailbox\n");
+ 		goto out_free;
+ 	}
+@@ -520,7 +520,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
+ 	cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
+ 	stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
+ 				       &cs->stat_mbox_addr, GFP_KERNEL);
+-	if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
++	if (!stat_mbox) {
+ 		dev_err(&pdev->dev, "Failed to map status mailbox\n");
+ 		goto out_free;
+ 	}
+@@ -533,7 +533,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
+ 	cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
+ 					    sizeof(struct myrs_fwstat),
+ 					    &cs->fwstat_addr, GFP_KERNEL);
+-	if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
++	if (!cs->fwstat_buf) {
+ 		dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
+ 		cs->fwstat_buf = NULL;
+ 		goto out_free;
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index a9d6dac4133466..4daab8b6d67527 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -703,6 +703,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
+ 	unsigned long flags = 0;
+ 	struct pm8001_hba_info *pm8001_ha;
+ 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
++	struct domain_device *parent_dev = dev->parent;
+ 
+ 	pm8001_ha = pm8001_find_ha_by_dev(dev);
+ 	spin_lock_irqsave(&pm8001_ha->lock, flags);
+@@ -719,7 +720,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
+ 			spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 		}
+ 		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+-		pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
++
++		/*
++		 * The phy array only contains local phys. Thus, we cannot clear
++		 * phy_attached for a device behind an expander.
++		 */
++		if (!(parent_dev && dev_is_expander(parent_dev->dev_type)))
++			pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
+ 		pm8001_free_dev(pm8001_dev);
+ 	} else {
+ 		pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
+diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
+index dcde55c8ee5dea..be20e2c457b8ea 100644
+--- a/drivers/scsi/qla2xxx/qla_edif.c
++++ b/drivers/scsi/qla2xxx/qla_edif.c
+@@ -1797,7 +1797,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
+ 	switch (rval) {
+ 	case QLA_SUCCESS:
+ 		break;
+-	case EAGAIN:
++	case -EAGAIN:
+ 		msleep(EDIF_MSLEEP_INTERVAL);
+ 		cnt++;
+ 		if (cnt < EDIF_RETRY_COUNT)
+@@ -3648,7 +3648,7 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+ 		       p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+ 		       sp->handle, sp->remap.req.len, bsg_job);
+ 		break;
+-	case EAGAIN:
++	case -EAGAIN:
+ 		msleep(EDIF_MSLEEP_INTERVAL);
+ 		cnt++;
+ 		if (cnt < EDIF_RETRY_COUNT)
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 79cdfec2bca356..8bd4aa935e22b3 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -2059,11 +2059,11 @@ static void qla_marker_sp_done(srb_t *sp, int res)
+ 	int cnt = 5; \
+ 	do { \
+ 		if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
+-			_rval = EINVAL; \
++			_rval = -EINVAL; \
+ 			break; \
+ 		} \
+ 		_rval = qla2x00_start_sp(_sp); \
+-		if (_rval == EAGAIN) \
++		if (_rval == -EAGAIN) \
+ 			msleep(1); \
+ 		else \
+ 			break; \
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 8ee2e337c9e1b7..316594aa40cc5a 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -419,7 +419,7 @@ static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
+ 	switch (rval) {
+ 	case QLA_SUCCESS:
+ 		break;
+-	case EAGAIN:
++	case -EAGAIN:
+ 		msleep(PURLS_MSLEEP_INTERVAL);
+ 		cnt++;
+ 		if (cnt < PURLS_RETRY_COUNT)
+diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
+index 9a91298c125397..4cb8169aec6b55 100644
+--- a/drivers/soc/mediatek/mtk-svs.c
++++ b/drivers/soc/mediatek/mtk-svs.c
+@@ -2167,10 +2167,18 @@ static struct device *svs_add_device_link(struct svs_platform *svsp,
+ 	return dev;
+ }
+ 
++static void svs_put_device(void *_dev)
++{
++	struct device *dev = _dev;
++
++	put_device(dev);
++}
++
+ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+ {
+ 	struct device *dev;
+ 	u32 idx;
++	int ret;
+ 
+ 	svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
+ 	if (IS_ERR(svsp->rst))
+@@ -2181,6 +2189,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+ 	if (IS_ERR(dev))
+ 		return dev_err_probe(svsp->dev, PTR_ERR(dev),
+ 				     "failed to get lvts device\n");
++	put_device(dev);
+ 
+ 	for (idx = 0; idx < svsp->bank_max; idx++) {
+ 		struct svs_bank *svsb = &svsp->banks[idx];
+@@ -2190,6 +2199,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+ 		case SVSB_SWID_CPU_LITTLE:
+ 		case SVSB_SWID_CPU_BIG:
+ 			svsb->opp_dev = get_cpu_device(bdata->cpu_id);
++			get_device(svsb->opp_dev);
+ 			break;
+ 		case SVSB_SWID_CCI:
+ 			svsb->opp_dev = svs_add_device_link(svsp, "cci");
+@@ -2209,6 +2219,11 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+ 			return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+ 					     "failed to get OPP device for bank %d\n",
+ 					     idx);
++
++		ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
++					       svsb->opp_dev);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	return 0;
+@@ -2218,11 +2233,13 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
+ {
+ 	struct device *dev;
+ 	u32 idx;
++	int ret;
+ 
+ 	dev = svs_add_device_link(svsp, "thermal-sensor");
+ 	if (IS_ERR(dev))
+ 		return dev_err_probe(svsp->dev, PTR_ERR(dev),
+ 				     "failed to get thermal device\n");
++	put_device(dev);
+ 
+ 	for (idx = 0; idx < svsp->bank_max; idx++) {
+ 		struct svs_bank *svsb = &svsp->banks[idx];
+@@ -2232,6 +2249,7 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
+ 		case SVSB_SWID_CPU_LITTLE:
+ 		case SVSB_SWID_CPU_BIG:
+ 			svsb->opp_dev = get_cpu_device(bdata->cpu_id);
++			get_device(svsb->opp_dev);
+ 			break;
+ 		case SVSB_SWID_CCI:
+ 			svsb->opp_dev = svs_add_device_link(svsp, "cci");
+@@ -2248,6 +2266,11 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
+ 			return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+ 					     "failed to get OPP device for bank %d\n",
+ 					     idx);
++
++		ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
++					       svsb->opp_dev);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index 641f29a98cbd28..cc72a31a450e41 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -453,13 +453,10 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
+ 
+ 		trace_rpmh_tx_done(drv, i, req);
+ 
+-		/*
+-		 * If wake tcs was re-purposed for sending active
+-		 * votes, clear AMC trigger & enable modes and
++		/* Clear AMC trigger & enable modes and
+ 		 * disable interrupt for this TCS
+ 		 */
+-		if (!drv->tcs[ACTIVE_TCS].num_tcs)
+-			__tcs_set_trigger(drv, i, false);
++		__tcs_set_trigger(drv, i, false);
+ skip:
+ 		/* Reclaim the TCS */
+ 		write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 8d6341b0d8668c..5ad9f4a2148fac 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2462,7 +2462,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
+ 	if (rc > ctlr->num_chipselect) {
+ 		dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
+ 			nc, rc);
+-		return rc;
++		return -EINVAL;
+ 	}
+ 	if ((of_property_read_bool(nc, "parallel-memories")) &&
+ 	    (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
+diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
+index 2a7d253d9c554c..8e50476eb71fbc 100644
+--- a/drivers/tee/tee_shm.c
++++ b/drivers/tee/tee_shm.c
+@@ -321,6 +321,14 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
+ 	if (unlikely(len <= 0)) {
+ 		ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
+ 		goto err_free_shm_pages;
++	} else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) {
++		/*
++		 * If we only got a few pages, update to release the
++		 * correct amount below.
++		 */
++		shm->num_pages = len / PAGE_SIZE;
++		ret = ERR_PTR(-ENOMEM);
++		goto err_put_shm_pages;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
+index 2c7f3f9a26ebbb..a6bb01082ec697 100644
+--- a/drivers/thermal/qcom/Kconfig
++++ b/drivers/thermal/qcom/Kconfig
+@@ -34,7 +34,8 @@ config QCOM_SPMI_TEMP_ALARM
+ 
+ config QCOM_LMH
+ 	tristate "Qualcomm Limits Management Hardware"
+-	depends on ARCH_QCOM && QCOM_SCM
++	depends on ARCH_QCOM || COMPILE_TEST
++	select QCOM_SCM
+ 	help
+ 	  This enables initialization of Qualcomm limits management
+ 	  hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on
+diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
+index d2d49264cf83a4..7c299184c59b1c 100644
+--- a/drivers/thermal/qcom/lmh.c
++++ b/drivers/thermal/qcom/lmh.c
+@@ -5,6 +5,8 @@
+  */
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/irqdesc.h>
+ #include <linux/irqdomain.h>
+ #include <linux/err.h>
+ #include <linux/platform_device.h>
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 252849910588f6..c917fc20b469a7 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -461,6 +461,7 @@ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg);
+ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr);
+ static void gsmld_write_trigger(struct gsm_mux *gsm);
+ static void gsmld_write_task(struct work_struct *work);
++static int gsm_modem_send_initial_msc(struct gsm_dlci *dlci);
+ 
+ /**
+  *	gsm_fcs_add	-	update FCS
+@@ -2174,7 +2175,7 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
+ 		pr_debug("DLCI %d goes open.\n", dlci->addr);
+ 	/* Send current modem state */
+ 	if (dlci->addr) {
+-		gsm_modem_update(dlci, 0);
++		gsm_modem_send_initial_msc(dlci);
+ 	} else {
+ 		/* Start keep-alive control */
+ 		gsm->ka_num = 0;
+@@ -4156,6 +4157,28 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
+ 	return gsm_control_wait(dlci->gsm, ctrl);
+ }
+ 
++/**
++ * gsm_modem_send_initial_msc - Send initial modem status message
++ *
++ * @dlci channel
++ *
++ * Send an initial MSC message after DLCI open to set the initial
++ * modem status lines. This is only done for basic mode.
++ * Does not wait for a response as we cannot block the input queue
++ * processing.
++ */
++static int gsm_modem_send_initial_msc(struct gsm_dlci *dlci)
++{
++	u8 modembits[2];
++
++	if (dlci->adaption != 1 || dlci->gsm->encoding != GSM_BASIC_OPT)
++		return 0;
++
++	modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
++	modembits[1] = (gsm_encode_modem(dlci) << 1) | EA;
++	return gsm_control_command(dlci->gsm, CMD_MSC, (const u8 *)&modembits, 2);
++}
++
+ /**
+  *	gsm_modem_update	-	send modem status line state
+  *	@dlci: channel
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 35369a2f77b29e..2f8e3ea4fe1287 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -1641,6 +1641,8 @@ static int max310x_i2c_probe(struct i2c_client *client)
+ 		port_client = devm_i2c_new_dummy_device(&client->dev,
+ 							client->adapter,
+ 							port_addr);
++		if (IS_ERR(port_client))
++			return PTR_ERR(port_client);
+ 
+ 		regcfg_i2c.name = max310x_regmap_name(i);
+ 		regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index a6551a795f7440..0b414d1168dd53 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -98,7 +98,6 @@ static void hv_uio_channel_cb(void *context)
+ 	struct hv_device *hv_dev = chan->device_obj;
+ 	struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
+ 
+-	chan->inbound.ring_buffer->interrupt_mask = 1;
+ 	virt_mb();
+ 
+ 	uio_event_notify(&pdata->info);
+@@ -163,8 +162,6 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
+ 		return;
+ 	}
+ 
+-	/* Disable interrupts on sub channel */
+-	new_sc->inbound.ring_buffer->interrupt_mask = 1;
+ 	set_channel_read_mode(new_sc, HV_CALL_ISR);
+ 	ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap);
+ 	if (ret) {
+@@ -207,9 +204,7 @@ hv_uio_open(struct uio_info *info, struct inode *inode)
+ 
+ 	ret = vmbus_connect_ring(dev->channel,
+ 				 hv_uio_channel_cb, dev->channel);
+-	if (ret == 0)
+-		dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+-	else
++	if (ret)
+ 		atomic_dec(&pdata->refcnt);
+ 
+ 	return ret;
+diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
+index 36781ea60f6aa4..4a9eff7fdb12f9 100644
+--- a/drivers/usb/cdns3/cdnsp-pci.c
++++ b/drivers/usb/cdns3/cdnsp-pci.c
+@@ -91,7 +91,7 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
+ 		cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL);
+ 		if (!cdnsp) {
+ 			ret = -ENOMEM;
+-			goto disable_pci;
++			goto put_pci;
+ 		}
+ 	}
+ 
+@@ -174,9 +174,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
+ 	if (!pci_is_enabled(func))
+ 		kfree(cdnsp);
+ 
+-disable_pci:
+-	pci_disable_device(pdev);
+-
+ put_pci:
+ 	pci_dev_put(func);
+ 
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 1b4d0056f1d082..82282373c786b7 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -1750,6 +1750,8 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
+ 		cdev->use_os_string = true;
+ 		cdev->b_vendor_code = gi->b_vendor_code;
+ 		memcpy(cdev->qw_sign, gi->qw_sign, OS_STRING_QW_SIGN_LEN);
++	} else {
++		cdev->use_os_string = false;
+ 	}
+ 
+ 	if (gadget_is_otg(gadget) && !otg_desc[0]) {
+diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
+index dcf31a592f5d11..4b5f03f683f775 100644
+--- a/drivers/usb/host/max3421-hcd.c
++++ b/drivers/usb/host/max3421-hcd.c
+@@ -1916,7 +1916,7 @@ max3421_probe(struct spi_device *spi)
+ 	if (hcd) {
+ 		kfree(max3421_hcd->tx);
+ 		kfree(max3421_hcd->rx);
+-		if (max3421_hcd->spi_thread)
++		if (!IS_ERR_OR_NULL(max3421_hcd->spi_thread))
+ 			kthread_stop(max3421_hcd->spi_thread);
+ 		usb_put_hcd(hcd);
+ 	}
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 1002fa51a25aa2..f377725a12128e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1199,19 +1199,16 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ 			 * Stopped state, but it will soon change to Running.
+ 			 *
+ 			 * Assume this bug on unexpected Stop Endpoint failures.
+-			 * Keep retrying until the EP starts and stops again.
++			 * Keep retrying until the EP starts and stops again, on
++			 * chips where this is known to help. Wait for 100ms.
+ 			 */
++			if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
++				break;
+ 			fallthrough;
+ 		case EP_STATE_RUNNING:
+ 			/* Race, HW handled stop ep cmd before ep was running */
+ 			xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
+ 					GET_EP_CTX_STATE(ep_ctx));
+-			/*
+-			 * Don't retry forever if we guessed wrong or a defective HC never starts
+-			 * the EP or says 'Running' but fails the command. We must give back TDs.
+-			 */
+-			if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+-				break;
+ 
+ 			command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ 			if (!command) {
+diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
+index 6497c4e81e951a..9bf8fc6247baca 100644
+--- a/drivers/usb/misc/Kconfig
++++ b/drivers/usb/misc/Kconfig
+@@ -147,6 +147,7 @@ config USB_APPLEDISPLAY
+ config USB_QCOM_EUD
+ 	tristate "QCOM Embedded USB Debugger(EUD) Driver"
+ 	depends on ARCH_QCOM || COMPILE_TEST
++	select QCOM_SCM
+ 	select USB_ROLE_SWITCH
+ 	help
+ 	  This module enables support for Qualcomm Technologies, Inc.
+diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c
+index 19906301a4eb8a..012e3b9d9bcc8e 100644
+--- a/drivers/usb/misc/qcom_eud.c
++++ b/drivers/usb/misc/qcom_eud.c
+@@ -15,6 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/sysfs.h>
+ #include <linux/usb/role.h>
++#include <linux/firmware/qcom/qcom_scm.h>
+ 
+ #define EUD_REG_INT1_EN_MASK	0x0024
+ #define EUD_REG_INT_STATUS_1	0x0044
+@@ -34,7 +35,7 @@ struct eud_chip {
+ 	struct device			*dev;
+ 	struct usb_role_switch		*role_sw;
+ 	void __iomem			*base;
+-	void __iomem			*mode_mgr;
++	phys_addr_t			mode_mgr;
+ 	unsigned int			int_status;
+ 	int				irq;
+ 	bool				enabled;
+@@ -43,18 +44,29 @@ struct eud_chip {
+ 
+ static int enable_eud(struct eud_chip *priv)
+ {
++	int ret;
++
++	ret = qcom_scm_io_writel(priv->mode_mgr + EUD_REG_EUD_EN2, 1);
++	if (ret)
++		return ret;
++
+ 	writel(EUD_ENABLE, priv->base + EUD_REG_CSR_EUD_EN);
+ 	writel(EUD_INT_VBUS | EUD_INT_SAFE_MODE,
+ 			priv->base + EUD_REG_INT1_EN_MASK);
+-	writel(1, priv->mode_mgr + EUD_REG_EUD_EN2);
+ 
+ 	return usb_role_switch_set_role(priv->role_sw, USB_ROLE_DEVICE);
+ }
+ 
+-static void disable_eud(struct eud_chip *priv)
++static int disable_eud(struct eud_chip *priv)
+ {
++	int ret;
++
++	ret = qcom_scm_io_writel(priv->mode_mgr + EUD_REG_EUD_EN2, 0);
++	if (ret)
++		return ret;
++
+ 	writel(0, priv->base + EUD_REG_CSR_EUD_EN);
+-	writel(0, priv->mode_mgr + EUD_REG_EUD_EN2);
++	return 0;
+ }
+ 
+ static ssize_t enable_show(struct device *dev,
+@@ -82,11 +94,12 @@ static ssize_t enable_store(struct device *dev,
+ 			chip->enabled = enable;
+ 		else
+ 			disable_eud(chip);
++
+ 	} else {
+-		disable_eud(chip);
++		ret = disable_eud(chip);
+ 	}
+ 
+-	return count;
++	return ret < 0 ? ret : count;
+ }
+ 
+ static DEVICE_ATTR_RW(enable);
+@@ -178,6 +191,7 @@ static void eud_role_switch_release(void *data)
+ static int eud_probe(struct platform_device *pdev)
+ {
+ 	struct eud_chip *chip;
++	struct resource *res;
+ 	int ret;
+ 
+ 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+@@ -200,9 +214,10 @@ static int eud_probe(struct platform_device *pdev)
+ 	if (IS_ERR(chip->base))
+ 		return PTR_ERR(chip->base);
+ 
+-	chip->mode_mgr = devm_platform_ioremap_resource(pdev, 1);
+-	if (IS_ERR(chip->mode_mgr))
+-		return PTR_ERR(chip->mode_mgr);
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++	if (!res)
++		return -ENODEV;
++	chip->mode_mgr = res->start;
+ 
+ 	chip->irq = platform_get_irq(pdev, 0);
+ 	if (chip->irq < 0)
+diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
+index da09cff55abcec..0e732cd53b629a 100644
+--- a/drivers/usb/phy/phy-twl6030-usb.c
++++ b/drivers/usb/phy/phy-twl6030-usb.c
+@@ -328,9 +328,8 @@ static int twl6030_set_vbus(struct phy_companion *comparator, bool enabled)
+ 
+ static int twl6030_usb_probe(struct platform_device *pdev)
+ {
+-	u32 ret;
+ 	struct twl6030_usb	*twl;
+-	int			status, err;
++	int			status, err, ret;
+ 	struct device_node	*np = pdev->dev.of_node;
+ 	struct device		*dev = &pdev->dev;
+ 
+diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
+index 7ee721a877c12d..1bdf6f5538f441 100644
+--- a/drivers/usb/typec/tipd/core.c
++++ b/drivers/usb/typec/tipd/core.c
+@@ -545,24 +545,23 @@ static irqreturn_t cd321x_interrupt(int irq, void *data)
+ 	if (!event)
+ 		goto err_unlock;
+ 
++	tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event);
++
+ 	if (!tps6598x_read_status(tps, &status))
+-		goto err_clear_ints;
++		goto err_unlock;
+ 
+ 	if (event & APPLE_CD_REG_INT_POWER_STATUS_UPDATE)
+ 		if (!tps6598x_read_power_status(tps))
+-			goto err_clear_ints;
++			goto err_unlock;
+ 
+ 	if (event & APPLE_CD_REG_INT_DATA_STATUS_UPDATE)
+ 		if (!tps6598x_read_data_status(tps))
+-			goto err_clear_ints;
++			goto err_unlock;
+ 
+ 	/* Handle plug insert or removal */
+ 	if (event & APPLE_CD_REG_INT_PLUG_EVENT)
+ 		tps6598x_handle_plug_event(tps, status);
+ 
+-err_clear_ints:
+-	tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event);
+-
+ err_unlock:
+ 	mutex_unlock(&tps->lock);
+ 
+@@ -668,25 +667,24 @@ static irqreturn_t tps6598x_interrupt(int irq, void *data)
+ 	if (!(event1[0] | event1[1] | event2[0] | event2[1]))
+ 		goto err_unlock;
+ 
++	tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
++	tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
++
+ 	if (!tps6598x_read_status(tps, &status))
+-		goto err_clear_ints;
++		goto err_unlock;
+ 
+ 	if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE)
+ 		if (!tps6598x_read_power_status(tps))
+-			goto err_clear_ints;
++			goto err_unlock;
+ 
+ 	if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE)
+ 		if (!tps6598x_read_data_status(tps))
+-			goto err_clear_ints;
++			goto err_unlock;
+ 
+ 	/* Handle plug insert or removal */
+ 	if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT)
+ 		tps6598x_handle_plug_event(tps, status);
+ 
+-err_clear_ints:
+-	tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
+-	tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
+-
+ err_unlock:
+ 	mutex_unlock(&tps->lock);
+ 
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 8dac1edc74d4ee..a793e30d46b789 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -764,6 +764,17 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ 				 ctrlreq->wValue, vdev->rhport);
+ 
+ 			vdev->udev = usb_get_dev(urb->dev);
++			/*
++			 * NOTE: A similar operation has been done via
++			 * USB_REQ_GET_DESCRIPTOR handler below, which is
++			 * supposed to always precede USB_REQ_SET_ADDRESS.
++			 *
++			 * It's not entirely clear if operating on a different
++			 * usb_device instance here is a real possibility,
++			 * otherwise this call and vdev->udev assignment above
++			 * should be dropped.
++			 */
++			dev_pm_syscore_device(&vdev->udev->dev, true);
+ 			usb_put_dev(old);
+ 
+ 			spin_lock(&vdev->ud.lock);
+@@ -784,6 +795,17 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ 					"Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
+ 
+ 			vdev->udev = usb_get_dev(urb->dev);
++			/*
++			 * Set syscore PM flag for the virtually attached
++			 * devices to ensure they will not enter suspend on
++			 * the client side.
++			 *
++			 * Note this doesn't have any impact on the physical
++			 * devices attached to the host system on the server
++			 * side, hence there is no need to undo the operation
++			 * on disconnect.
++			 */
++			dev_pm_syscore_device(&vdev->udev->dev, true);
+ 			usb_put_dev(old);
+ 			goto out;
+ 
+diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c
+index c51f5e4c3dd6d2..481992142f7901 100644
+--- a/drivers/vfio/pci/pds/dirty.c
++++ b/drivers/vfio/pci/pds/dirty.c
+@@ -82,7 +82,7 @@ static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_region *region,
+ 
+ 	host_ack_bmp = vzalloc(bytes);
+ 	if (!host_ack_bmp) {
+-		bitmap_free(host_seq_bmp);
++		vfree(host_seq_bmp);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index 73e153f9b4495e..781731eb95cfe4 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -1191,6 +1191,7 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
+ 		struct iov_iter iter;
+ 		u64 translated;
+ 		int ret;
++		size_t size;
+ 
+ 		ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ 				      len - total_translated, &translated,
+@@ -1208,9 +1209,9 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
+ 				      translated);
+ 		}
+ 
+-		ret = copy_from_iter(dst, translated, &iter);
+-		if (ret < 0)
+-			return ret;
++		size = copy_from_iter(dst, translated, &iter);
++		if (size != translated)
++			return -EFAULT;
+ 
+ 		src += translated;
+ 		dst += translated;
+@@ -1237,6 +1238,7 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
+ 		struct iov_iter iter;
+ 		u64 translated;
+ 		int ret;
++		size_t size;
+ 
+ 		ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ 				      len - total_translated, &translated,
+@@ -1254,9 +1256,9 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
+ 				      translated);
+ 		}
+ 
+-		ret = copy_to_iter(src, translated, &iter);
+-		if (ret < 0)
+-			return ret;
++		size = copy_to_iter(src, translated, &iter);
++		if (size != translated)
++			return -EFAULT;
+ 
+ 		src += translated;
+ 		dst += translated;
+diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
+index be95fcddce4c8c..85bc40aa8b2c62 100644
+--- a/drivers/video/fbdev/simplefb.c
++++ b/drivers/video/fbdev/simplefb.c
+@@ -93,6 +93,7 @@ struct simplefb_par {
+ 
+ static void simplefb_clocks_destroy(struct simplefb_par *par);
+ static void simplefb_regulators_destroy(struct simplefb_par *par);
++static void simplefb_detach_genpds(void *res);
+ 
+ /*
+  * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+@@ -105,6 +106,7 @@ static void simplefb_destroy(struct fb_info *info)
+ 
+ 	simplefb_regulators_destroy(info->par);
+ 	simplefb_clocks_destroy(info->par);
++	simplefb_detach_genpds(info->par);
+ 	if (info->screen_base)
+ 		iounmap(info->screen_base);
+ 
+@@ -454,13 +456,14 @@ static void simplefb_detach_genpds(void *res)
+ 		if (!IS_ERR_OR_NULL(par->genpds[i]))
+ 			dev_pm_domain_detach(par->genpds[i], true);
+ 	}
++	par->num_genpds = 0;
+ }
+ 
+ static int simplefb_attach_genpds(struct simplefb_par *par,
+ 				  struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+-	unsigned int i;
++	unsigned int i, num_genpds;
+ 	int err;
+ 
+ 	err = of_count_phandle_with_args(dev->of_node, "power-domains",
+@@ -474,26 +477,35 @@ static int simplefb_attach_genpds(struct simplefb_par *par,
+ 		return err;
+ 	}
+ 
+-	par->num_genpds = err;
++	num_genpds = err;
+ 
+ 	/*
+ 	 * Single power-domain devices are handled by the driver core, so
+ 	 * nothing to do here.
+ 	 */
+-	if (par->num_genpds <= 1)
++	if (num_genpds <= 1) {
++		par->num_genpds = num_genpds;
+ 		return 0;
++	}
+ 
+-	par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds),
++	par->genpds = devm_kcalloc(dev, num_genpds, sizeof(*par->genpds),
+ 				   GFP_KERNEL);
+ 	if (!par->genpds)
+ 		return -ENOMEM;
+ 
+-	par->genpd_links = devm_kcalloc(dev, par->num_genpds,
++	par->genpd_links = devm_kcalloc(dev, num_genpds,
+ 					sizeof(*par->genpd_links),
+ 					GFP_KERNEL);
+ 	if (!par->genpd_links)
+ 		return -ENOMEM;
+ 
++	/*
++	 * Set par->num_genpds only after genpds and genpd_links are allocated
++	 * to exit early from simplefb_detach_genpds() without full
++	 * initialisation.
++	 */
++	par->num_genpds = num_genpds;
++
+ 	for (i = 0; i < par->num_genpds; i++) {
+ 		par->genpds[i] = dev_pm_domain_attach_by_id(dev, i);
+ 		if (IS_ERR(par->genpds[i])) {
+@@ -515,9 +527,10 @@ static int simplefb_attach_genpds(struct simplefb_par *par,
+ 			dev_warn(dev, "failed to link power-domain %u\n", i);
+ 	}
+ 
+-	return devm_add_action_or_reset(dev, simplefb_detach_genpds, par);
++	return 0;
+ }
+ #else
++static void simplefb_detach_genpds(void *res) { }
+ static int simplefb_attach_genpds(struct simplefb_par *par,
+ 				  struct platform_device *pdev)
+ {
+@@ -631,18 +644,20 @@ static int simplefb_probe(struct platform_device *pdev)
+ 	ret = devm_aperture_acquire_for_platform_device(pdev, par->base, par->size);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Unable to acquire aperture: %d\n", ret);
+-		goto error_regulators;
++		goto error_genpds;
+ 	}
+ 	ret = register_framebuffer(info);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
+-		goto error_regulators;
++		goto error_genpds;
+ 	}
+ 
+ 	dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
+ 
+ 	return 0;
+ 
++error_genpds:
++	simplefb_detach_genpds(par);
+ error_regulators:
+ 	simplefb_regulators_destroy(par);
+ error_clocks:
+diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
+index 867f9f31137971..a4b497ecfa2051 100644
+--- a/drivers/watchdog/mpc8xxx_wdt.c
++++ b/drivers/watchdog/mpc8xxx_wdt.c
+@@ -100,6 +100,8 @@ static int mpc8xxx_wdt_start(struct watchdog_device *w)
+ 	ddata->swtc = tmp >> 16;
+ 	set_bit(WDOG_HW_RUNNING, &ddata->wdd.status);
+ 
++	mpc8xxx_wdt_keepalive(ddata);
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index afebc91882befd..60fe155b1ce05d 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1479,7 +1479,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ 	unsigned long range_bitmap = 0;
+ 	bool submitted_io = false;
+-	bool error = false;
++	int found_error = 0;
+ 	const u64 folio_start = folio_pos(folio);
+ 	u64 cur;
+ 	int bit;
+@@ -1536,7 +1536,8 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 			 */
+ 			btrfs_mark_ordered_io_finished(inode, folio, cur,
+ 						       fs_info->sectorsize, false);
+-			error = true;
++			if (!found_error)
++				found_error = ret;
+ 			continue;
+ 		}
+ 		submitted_io = true;
+@@ -1553,11 +1554,11 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 	 * If we hit any error, the corresponding sector will still be dirty
+ 	 * thus no need to clear PAGECACHE_TAG_DIRTY.
+ 	 */
+-	if (!submitted_io && !error) {
++	if (!submitted_io && !found_error) {
+ 		btrfs_folio_set_writeback(fs_info, folio, start, len);
+ 		btrfs_folio_clear_writeback(fs_info, folio, start, len);
+ 	}
+-	return ret;
++	return found_error;
+ }
+ 
+ /*
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index d8120b88fa00e9..37e18e654f715b 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1969,6 +1969,16 @@ static inline bool ext4_verity_in_progress(struct inode *inode)
+ 
+ #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
+ 
++/*
++ * Check whether the inode is tracked as orphan (either in orphan file or
++ * orphan list).
++ */
++static inline bool ext4_inode_orphan_tracked(struct inode *inode)
++{
++	return ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
++		!list_empty(&EXT4_I(inode)->i_orphan);
++}
++
+ /*
+  * Codes for operating systems
+  */
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 6c692151b0d6c5..7d949ed0ab5fac 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -354,7 +354,7 @@ static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc)
+ 	 * to cleanup the orphan list in ext4_handle_inode_extension(). Do it
+ 	 * now.
+ 	 */
+-	if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
++	if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) {
+ 		handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ 
+ 		if (IS_ERR(handle)) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 7923602271ad0a..558a585c5df513 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4330,7 +4330,7 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
+ 		 * old inodes get re-used with the upper 16 bits of the
+ 		 * uid/gid intact.
+ 		 */
+-		if (ei->i_dtime && list_empty(&ei->i_orphan)) {
++		if (ei->i_dtime && !ext4_inode_orphan_tracked(inode)) {
+ 			raw_inode->i_uid_high = 0;
+ 			raw_inode->i_gid_high = 0;
+ 		} else {
+diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
+index a23b0c01f8096d..c53918768cb256 100644
+--- a/fs/ext4/orphan.c
++++ b/fs/ext4/orphan.c
+@@ -109,11 +109,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
+ 
+ 	WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+ 		     !inode_is_locked(inode));
+-	/*
+-	 * Inode orphaned in orphan file or in orphan list?
+-	 */
+-	if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
+-	    !list_empty(&EXT4_I(inode)->i_orphan))
++	if (ext4_inode_orphan_tracked(inode))
+ 		return 0;
+ 
+ 	/*
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 58d125ad237143..cbb65e61c4926f 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1461,9 +1461,9 @@ static void ext4_free_in_core_inode(struct inode *inode)
+ 
+ static void ext4_destroy_inode(struct inode *inode)
+ {
+-	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
++	if (ext4_inode_orphan_tracked(inode)) {
+ 		ext4_msg(inode->i_sb, KERN_ERR,
+-			 "Inode %lu (%p): orphan list check failed!",
++			 "Inode %lu (%p): inode tracked as orphan!",
+ 			 inode->i_ino, EXT4_I(inode));
+ 		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ 				EXT4_I(inode), sizeof(struct ext4_inode_info),
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index efc30626760a6e..040c06dfb8c033 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1781,12 +1781,13 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
+ 		if (map->m_flags & F2FS_MAP_MAPPED) {
+ 			unsigned int ofs = start_pgofs - map->m_lblk;
+ 
+-			f2fs_update_read_extent_cache_range(&dn,
+-				start_pgofs, map->m_pblk + ofs,
+-				map->m_len - ofs);
++			if (map->m_len > ofs)
++				f2fs_update_read_extent_cache_range(&dn,
++					start_pgofs, map->m_pblk + ofs,
++					map->m_len - ofs);
+ 		}
+ 		if (map->m_next_extent)
+-			*map->m_next_extent = pgofs + 1;
++			*map->m_next_extent = is_hole ? pgofs + 1 : pgofs;
+ 	}
+ 	f2fs_put_dnode(&dn);
+ unlock_out:
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 2dec22f2ea639b..0d3ef487f72aca 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2331,8 +2331,6 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
+ {
+ 	if (!inode)
+ 		return true;
+-	if (!test_opt(sbi, RESERVE_ROOT))
+-		return false;
+ 	if (IS_NOQUOTA(inode))
+ 		return true;
+ 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
+@@ -2353,7 +2351,7 @@ static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
+ 	avail_user_block_count = sbi->user_block_count -
+ 					sbi->current_reserved_blocks;
+ 
+-	if (!__allow_reserved_blocks(sbi, inode, cap))
++	if (test_opt(sbi, RESERVE_ROOT) && !__allow_reserved_blocks(sbi, inode, cap))
+ 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
+ 
+ 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index fa77841f3e2cca..2a108c561e8bc0 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -35,15 +35,23 @@
+ #include <trace/events/f2fs.h>
+ #include <uapi/linux/f2fs.h>
+ 
+-static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size)
++static void f2fs_zero_post_eof_page(struct inode *inode,
++					loff_t new_size, bool lock)
+ {
+ 	loff_t old_size = i_size_read(inode);
+ 
+ 	if (old_size >= new_size)
+ 		return;
+ 
++	if (mapping_empty(inode->i_mapping))
++		return;
++
++	if (lock)
++		filemap_invalidate_lock(inode->i_mapping);
+ 	/* zero or drop pages only in range of [old_size, new_size] */
+-	truncate_pagecache(inode, old_size);
++	truncate_inode_pages_range(inode->i_mapping, old_size, new_size);
++	if (lock)
++		filemap_invalidate_unlock(inode->i_mapping);
+ }
+ 
+ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
+@@ -114,9 +122,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
+ 
+ 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
+ 
+-	filemap_invalidate_lock(inode->i_mapping);
+-	f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT);
+-	filemap_invalidate_unlock(inode->i_mapping);
++	f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT, true);
+ 
+ 	file_update_time(vmf->vma->vm_file);
+ 	filemap_invalidate_lock_shared(inode->i_mapping);
+@@ -856,8 +862,16 @@ int f2fs_truncate(struct inode *inode)
+ 	/* we should check inline_data size */
+ 	if (!f2fs_may_inline_data(inode)) {
+ 		err = f2fs_convert_inline_inode(inode);
+-		if (err)
++		if (err) {
++			/*
++			 * Always truncate page #0 to avoid page cache
++			 * leak in evict() path.
++			 */
++			truncate_inode_pages_range(inode->i_mapping,
++					F2FS_BLK_TO_BYTES(0),
++					F2FS_BLK_END_BYTES(0));
+ 			return err;
++		}
+ 	}
+ 
+ 	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+@@ -1081,7 +1095,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 		filemap_invalidate_lock(inode->i_mapping);
+ 
+ 		if (attr->ia_size > old_size)
+-			f2fs_zero_post_eof_page(inode, attr->ia_size);
++			f2fs_zero_post_eof_page(inode, attr->ia_size, false);
+ 		truncate_setsize(inode, attr->ia_size);
+ 
+ 		if (attr->ia_size <= old_size)
+@@ -1200,9 +1214,7 @@ static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+ 	if (ret)
+ 		return ret;
+ 
+-	filemap_invalidate_lock(inode->i_mapping);
+-	f2fs_zero_post_eof_page(inode, offset + len);
+-	filemap_invalidate_unlock(inode->i_mapping);
++	f2fs_zero_post_eof_page(inode, offset + len, true);
+ 
+ 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
+@@ -1487,7 +1499,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
+ 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 
+-	f2fs_zero_post_eof_page(inode, offset + len);
++	f2fs_zero_post_eof_page(inode, offset + len, false);
+ 
+ 	f2fs_lock_op(sbi);
+ 	f2fs_drop_extent_tree(inode);
+@@ -1610,9 +1622,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
+ 	if (ret)
+ 		return ret;
+ 
+-	filemap_invalidate_lock(mapping);
+-	f2fs_zero_post_eof_page(inode, offset + len);
+-	filemap_invalidate_unlock(mapping);
++	f2fs_zero_post_eof_page(inode, offset + len, true);
+ 
+ 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
+@@ -1746,7 +1756,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 	filemap_invalidate_lock(mapping);
+ 
+-	f2fs_zero_post_eof_page(inode, offset + len);
++	f2fs_zero_post_eof_page(inode, offset + len, false);
+ 	truncate_pagecache(inode, offset);
+ 
+ 	while (!ret && idx > pg_start) {
+@@ -1804,9 +1814,7 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
+ 	if (err)
+ 		return err;
+ 
+-	filemap_invalidate_lock(inode->i_mapping);
+-	f2fs_zero_post_eof_page(inode, offset + len);
+-	filemap_invalidate_unlock(inode->i_mapping);
++	f2fs_zero_post_eof_page(inode, offset + len, true);
+ 
+ 	f2fs_balance_fs(sbi, true);
+ 
+@@ -4751,9 +4759,8 @@ static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+ 	if (err)
+ 		return err;
+ 
+-	filemap_invalidate_lock(inode->i_mapping);
+-	f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from));
+-	filemap_invalidate_unlock(inode->i_mapping);
++	f2fs_zero_post_eof_page(inode,
++		iocb->ki_pos + iov_iter_count(from), true);
+ 	return count;
+ }
+ 
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 161fc76ed5b0ed..e5558e63e2cba7 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -801,8 +801,6 @@ __acquires(&gl->gl_lockref.lock)
+ 			clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
+ 			gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+ 			return;
+-		} else {
+-			clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+ 		}
+ 	}
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index ea92483d5e71ec..c21e63027fc0ea 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -9393,7 +9393,7 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args
+ 		goto out;
+ 	if (rcvd->max_rqst_sz > sent->max_rqst_sz)
+ 		return -EINVAL;
+-	if (rcvd->max_resp_sz < sent->max_resp_sz)
++	if (rcvd->max_resp_sz > sent->max_resp_sz)
+ 		return -EINVAL;
+ 	if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
+ 		return -EINVAL;
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 1bf2a6593dec66..6d1bf890929d92 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1508,6 +1508,16 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 			bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size);
+ 		}
+ 
++		/*
++		 * Index blocks exist, but $BITMAP has zero valid bits.
++		 * This implies an on-disk corruption and must be rejected.
++		 */
++		if (in->name == I30_NAME &&
++		    unlikely(bmp_size_v == 0 && indx->alloc_run.count)) {
++			err = -EINVAL;
++			goto out1;
++		}
++
+ 		bit = bmp_size << 3;
+ 	}
+ 
+diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
+index 48566dff0dc92b..662add939da78b 100644
+--- a/fs/ntfs3/run.c
++++ b/fs/ntfs3/run.c
+@@ -9,6 +9,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/fs.h>
+ #include <linux/log2.h>
++#include <linux/overflow.h>
+ 
+ #include "debug.h"
+ #include "ntfs.h"
+@@ -982,14 +983,18 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+ 
+ 			if (!dlcn)
+ 				return -EINVAL;
+-			lcn = prev_lcn + dlcn;
++
++			if (check_add_overflow(prev_lcn, dlcn, &lcn))
++				return -EINVAL;
+ 			prev_lcn = lcn;
+ 		} else {
+ 			/* The size of 'dlcn' can't be > 8. */
+ 			return -EINVAL;
+ 		}
+ 
+-		next_vcn = vcn64 + len;
++		if (check_add_overflow(vcn64, len, &next_vcn))
++			return -EINVAL;
++
+ 		/* Check boundary. */
+ 		if (next_vcn > evcn + 1)
+ 			return -EINVAL;
+@@ -1153,7 +1158,8 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
+ 			return -EINVAL;
+ 
+ 		run_buf += size_size + offset_size;
+-		vcn64 += len;
++		if (check_add_overflow(vcn64, len, &vcn64))
++			return -EINVAL;
+ 
+ #ifndef CONFIG_NTFS3_64BIT_CLUSTER
+ 		if (vcn64 > 0x100000000ull)
+diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
+index 77edcd70f72c21..c5236b3ed168f6 100644
+--- a/fs/ocfs2/stack_user.c
++++ b/fs/ocfs2/stack_user.c
+@@ -1018,6 +1018,7 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
+ 			printk(KERN_ERR "ocfs2: Could not determine"
+ 					" locking version\n");
+ 			user_cluster_disconnect(conn);
++			lc = NULL;
+ 			goto out;
+ 		}
+ 		wait_event(lc->oc_wait, (atomic_read(&lc->oc_this_node) > 0));
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index ab911a9672465c..c946c3a09245c6 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4223,7 +4223,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
+ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
+ 				 int num_rqst, const u8 *sig, u8 **iv,
+ 				 struct aead_request **req, struct sg_table *sgt,
+-				 unsigned int *num_sgs, size_t *sensitive_size)
++				 unsigned int *num_sgs)
+ {
+ 	unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
+ 	unsigned int iv_size = crypto_aead_ivsize(tfm);
+@@ -4240,9 +4240,8 @@ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst
+ 	len += req_size;
+ 	len = ALIGN(len, __alignof__(struct scatterlist));
+ 	len += array_size(*num_sgs, sizeof(struct scatterlist));
+-	*sensitive_size = len;
+ 
+-	p = kvzalloc(len, GFP_NOFS);
++	p = kzalloc(len, GFP_NOFS);
+ 	if (!p)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -4256,16 +4255,14 @@ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst
+ 
+ static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst,
+ 			       int num_rqst, const u8 *sig, u8 **iv,
+-			       struct aead_request **req, struct scatterlist **sgl,
+-			       size_t *sensitive_size)
++			       struct aead_request **req, struct scatterlist **sgl)
+ {
+ 	struct sg_table sgtable = {};
+ 	unsigned int skip, num_sgs, i, j;
+ 	ssize_t rc;
+ 	void *p;
+ 
+-	p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable,
+-				&num_sgs, sensitive_size);
++	p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, &num_sgs);
+ 	if (IS_ERR(p))
+ 		return ERR_CAST(p);
+ 
+@@ -4354,7 +4351,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	DECLARE_CRYPTO_WAIT(wait);
+ 	unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+ 	void *creq;
+-	size_t sensitive_size;
+ 
+ 	rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
+ 	if (rc) {
+@@ -4380,8 +4376,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 		return rc;
+ 	}
+ 
+-	creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg,
+-				 &sensitive_size);
++	creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
+ 	if (IS_ERR(creq))
+ 		return PTR_ERR(creq);
+ 
+@@ -4411,7 +4406,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	if (!rc && enc)
+ 		memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+ 
+-	kvfree_sensitive(creq, sensitive_size);
++	kfree_sensitive(creq);
+ 	return rc;
+ }
+ 
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+index 3f07a612c05b40..8ccd57fd904bc2 100644
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -112,10 +112,11 @@ struct ksmbd_startup_request {
+ 	__u32	smbd_max_io_size;	/* smbd read write size */
+ 	__u32	max_connections;	/* Number of maximum simultaneous connections */
+ 	__s8	bind_interfaces_only;
+-	__s8	reserved[503];		/* Reserved room */
++	__u32	max_ip_connections;	/* Number of maximum connection per ip address */
++	__s8	reserved[499];		/* Reserved room */
+ 	__u32	ifc_list_sz;		/* interfaces list size */
+ 	__s8	____payload[];
+-};
++} __packed;
+ 
+ #define KSMBD_STARTUP_CONFIG_INTERFACES(s)	((s)->____payload)
+ 
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index 9dec4c2940bc04..b36d0676dbe584 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -104,29 +104,32 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ 	if (!entry)
+ 		return -ENOMEM;
+ 
+-	down_read(&sess->rpc_lock);
+ 	entry->method = method;
+ 	entry->id = id = ksmbd_ipc_id_alloc();
+ 	if (id < 0)
+ 		goto free_entry;
++
++	down_write(&sess->rpc_lock);
+ 	old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP);
+-	if (xa_is_err(old))
++	if (xa_is_err(old)) {
++		up_write(&sess->rpc_lock);
+ 		goto free_id;
++	}
+ 
+ 	resp = ksmbd_rpc_open(sess, id);
+-	if (!resp)
+-		goto erase_xa;
++	if (!resp) {
++		xa_erase(&sess->rpc_handle_list, entry->id);
++		up_write(&sess->rpc_lock);
++		goto free_id;
++	}
+ 
+-	up_read(&sess->rpc_lock);
++	up_write(&sess->rpc_lock);
+ 	kvfree(resp);
+ 	return id;
+-erase_xa:
+-	xa_erase(&sess->rpc_handle_list, entry->id);
+ free_id:
+ 	ksmbd_rpc_id_free(entry->id);
+ free_entry:
+ 	kfree(entry);
+-	up_read(&sess->rpc_lock);
+ 	return -EINVAL;
+ }
+ 
+@@ -144,9 +147,14 @@ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+ {
+ 	struct ksmbd_session_rpc *entry;
++	int method;
+ 
++	down_read(&sess->rpc_lock);
+ 	entry = xa_load(&sess->rpc_handle_list, id);
+-	return entry ? entry->method : 0;
++	method = entry ? entry->method : 0;
++	up_read(&sess->rpc_lock);
++
++	return method;
+ }
+ 
+ void ksmbd_session_destroy(struct ksmbd_session *sess)
+diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
+index 995555febe7d16..b8a7317be86b4e 100644
+--- a/fs/smb/server/server.h
++++ b/fs/smb/server/server.h
+@@ -43,6 +43,7 @@ struct ksmbd_server_config {
+ 	unsigned int		auth_mechs;
+ 	unsigned int		max_connections;
+ 	unsigned int		max_inflight_req;
++	unsigned int		max_ip_connections;
+ 
+ 	char			*conf[SERVER_CONF_WORK_GROUP + 1];
+ 	struct task_struct	*dh_task;
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 6dafc2fbac2585..d2182477566a64 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5600,7 +5600,8 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 
+ 		if (!work->tcon->posix_extensions) {
+ 			pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
+-			rc = -EOPNOTSUPP;
++			path_put(&path);
++			return -EOPNOTSUPP;
+ 		} else {
+ 			info = (struct filesystem_posix_info *)(rsp->Buffer);
+ 			info->OptimalTransferSize = cpu_to_le32(stfs.f_bsize);
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 2da2a5f6b983a5..4454bbe3c7107d 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -335,6 +335,9 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ 	if (req->max_connections)
+ 		server_conf.max_connections = req->max_connections;
+ 
++	if (req->max_ip_connections)
++		server_conf.max_ip_connections = req->max_ip_connections;
++
+ 	ret = ksmbd_set_netbios_name(req->netbios_name);
+ 	ret |= ksmbd_set_server_string(req->server_string);
+ 	ret |= ksmbd_set_work_group(req->work_group);
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index d059c890d14280..05dfef7ad67f5c 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -152,6 +152,10 @@ struct smb_direct_transport {
+ 	struct work_struct	disconnect_work;
+ 
+ 	bool			negotiation_requested;
++
++	bool			legacy_iwarp;
++	u8			initiator_depth;
++	u8			responder_resources;
+ };
+ 
+ #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
+@@ -346,6 +350,9 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+ 	t->cm_id = cm_id;
+ 	cm_id->context = t;
+ 
++	t->initiator_depth = SMB_DIRECT_CM_INITIATOR_DEPTH;
++	t->responder_resources = 1;
++
+ 	t->status = SMB_DIRECT_CS_NEW;
+ 	init_waitqueue_head(&t->wait_status);
+ 
+@@ -1623,21 +1630,21 @@ static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
+ static int smb_direct_accept_client(struct smb_direct_transport *t)
+ {
+ 	struct rdma_conn_param conn_param;
+-	struct ib_port_immutable port_immutable;
+-	u32 ird_ord_hdr[2];
++	__be32 ird_ord_hdr[2];
+ 	int ret;
+ 
++	/*
++	 * smb_direct_handle_connect_request()
++	 * already negotiated t->initiator_depth
++	 * and t->responder_resources
++	 */
+ 	memset(&conn_param, 0, sizeof(conn_param));
+-	conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
+-					   SMB_DIRECT_CM_INITIATOR_DEPTH);
+-	conn_param.responder_resources = 0;
+-
+-	t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
+-						 t->cm_id->port_num,
+-						 &port_immutable);
+-	if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+-		ird_ord_hdr[0] = conn_param.responder_resources;
+-		ird_ord_hdr[1] = 1;
++	conn_param.initiator_depth = t->initiator_depth;
++	conn_param.responder_resources = t->responder_resources;
++
++	if (t->legacy_iwarp) {
++		ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources);
++		ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth);
+ 		conn_param.private_data = ird_ord_hdr;
+ 		conn_param.private_data_len = sizeof(ird_ord_hdr);
+ 	} else {
+@@ -2023,10 +2030,13 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
+ 	return true;
+ }
+ 
+-static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
++static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id,
++					     struct rdma_cm_event *event)
+ {
+ 	struct smb_direct_transport *t;
+ 	struct task_struct *handler;
++	u8 peer_initiator_depth;
++	u8 peer_responder_resources;
+ 	int ret;
+ 
+ 	if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
+@@ -2040,6 +2050,67 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+ 	if (!t)
+ 		return -ENOMEM;
+ 
++	peer_initiator_depth = event->param.conn.initiator_depth;
++	peer_responder_resources = event->param.conn.responder_resources;
++	if (rdma_protocol_iwarp(new_cm_id->device, new_cm_id->port_num) &&
++	    event->param.conn.private_data_len == 8) {
++		/*
++		 * Legacy clients with only iWarp MPA v1 support
++		 * need a private blob in order to negotiate
++		 * the IRD/ORD values.
++		 */
++		const __be32 *ird_ord_hdr = event->param.conn.private_data;
++		u32 ird32 = be32_to_cpu(ird_ord_hdr[0]);
++		u32 ord32 = be32_to_cpu(ird_ord_hdr[1]);
++
++		/*
++		 * cifs.ko sends the legacy IRD/ORD negotiation
++		 * event if iWarp MPA v2 was used.
++		 *
++		 * Here we check that the values match and only
++		 * mark the client as legacy if they don't match.
++		 */
++		if ((u32)event->param.conn.initiator_depth != ird32 ||
++		    (u32)event->param.conn.responder_resources != ord32) {
++			/*
++			 * There are broken clients (old cifs.ko)
++			 * using little endian and also
++			 * struct rdma_conn_param only uses u8
++			 * for initiator_depth and responder_resources,
++			 * so we truncate the value to U8_MAX.
++			 *
++			 * smb_direct_accept_client() will then
++			 * do the real negotiation in order to
++			 * select the minimum between client and
++			 * server.
++			 */
++			ird32 = min_t(u32, ird32, U8_MAX);
++			ord32 = min_t(u32, ord32, U8_MAX);
++
++			t->legacy_iwarp = true;
++			peer_initiator_depth = (u8)ird32;
++			peer_responder_resources = (u8)ord32;
++		}
++	}
++
++	/*
++	 * First set what the we as server are able to support
++	 */
++	t->initiator_depth = min_t(u8, t->initiator_depth,
++				   new_cm_id->device->attrs.max_qp_rd_atom);
++
++	/*
++	 * negotiate the value by using the minimum
++	 * between client and server if the client provided
++	 * non 0 values.
++	 */
++	if (peer_initiator_depth != 0)
++		t->initiator_depth = min_t(u8, t->initiator_depth,
++					   peer_initiator_depth);
++	if (peer_responder_resources != 0)
++		t->responder_resources = min_t(u8, t->responder_resources,
++					       peer_responder_resources);
++
+ 	ret = smb_direct_connect(t);
+ 	if (ret)
+ 		goto out_err;
+@@ -2064,7 +2135,7 @@ static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
+ {
+ 	switch (event->event) {
+ 	case RDMA_CM_EVENT_CONNECT_REQUEST: {
+-		int ret = smb_direct_handle_connect_request(cm_id);
++		int ret = smb_direct_handle_connect_request(cm_id, event);
+ 
+ 		if (ret) {
+ 			pr_err("Can't create transport: %d\n", ret);
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index 756833c91b140b..b51ccc16abe114 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -240,6 +240,7 @@ static int ksmbd_kthread_fn(void *p)
+ 	struct interface *iface = (struct interface *)p;
+ 	struct ksmbd_conn *conn;
+ 	int ret;
++	unsigned int max_ip_conns;
+ 
+ 	while (!kthread_should_stop()) {
+ 		mutex_lock(&iface->sock_release_lock);
+@@ -257,34 +258,38 @@ static int ksmbd_kthread_fn(void *p)
+ 			continue;
+ 		}
+ 
++		if (!server_conf.max_ip_connections)
++			goto skip_max_ip_conns_limit;
++
+ 		/*
+ 		 * Limits repeated connections from clients with the same IP.
+ 		 */
++		max_ip_conns = 0;
+ 		down_read(&conn_list_lock);
+-		list_for_each_entry(conn, &conn_list, conns_list)
++		list_for_each_entry(conn, &conn_list, conns_list) {
+ #if IS_ENABLED(CONFIG_IPV6)
+ 			if (client_sk->sk->sk_family == AF_INET6) {
+ 				if (memcmp(&client_sk->sk->sk_v6_daddr,
+-					   &conn->inet6_addr, 16) == 0) {
+-					ret = -EAGAIN;
+-					break;
+-				}
++					   &conn->inet6_addr, 16) == 0)
++					max_ip_conns++;
+ 			} else if (inet_sk(client_sk->sk)->inet_daddr ==
+-				 conn->inet_addr) {
+-				ret = -EAGAIN;
+-				break;
+-			}
++				 conn->inet_addr)
++				max_ip_conns++;
+ #else
+ 			if (inet_sk(client_sk->sk)->inet_daddr ==
+-			    conn->inet_addr) {
++			    conn->inet_addr)
++				max_ip_conns++;
++#endif
++			if (server_conf.max_ip_connections <= max_ip_conns) {
+ 				ret = -EAGAIN;
+ 				break;
+ 			}
+-#endif
++		}
+ 		up_read(&conn_list_lock);
+ 		if (ret == -EAGAIN)
+ 			continue;
+ 
++skip_max_ip_conns_limit:
+ 		if (server_conf.max_connections &&
+ 		    atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
+ 			pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
+diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
+index d5918eba27e371..53104f25de5116 100644
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -165,6 +165,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+ 		squashfs_i(inode)->block_list_start = block;
+ 		squashfs_i(inode)->offset = offset;
++		squashfs_i(inode)->parent = 0;
+ 		inode->i_data.a_ops = &squashfs_aops;
+ 
+ 		TRACE("File inode %x:%x, start_block %llx, block_list_start "
+@@ -212,6 +213,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
+ 		squashfs_i(inode)->block_list_start = block;
+ 		squashfs_i(inode)->offset = offset;
++		squashfs_i(inode)->parent = 0;
+ 		inode->i_data.a_ops = &squashfs_aops;
+ 
+ 		TRACE("File inode %x:%x, start_block %llx, block_list_start "
+@@ -292,6 +294,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		inode->i_mode |= S_IFLNK;
+ 		squashfs_i(inode)->start = block;
+ 		squashfs_i(inode)->offset = offset;
++		squashfs_i(inode)->parent = 0;
+ 
+ 		if (type == SQUASHFS_LSYMLINK_TYPE) {
+ 			__le32 xattr;
+@@ -329,6 +332,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ 		rdev = le32_to_cpu(sqsh_ino->rdev);
+ 		init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
++		squashfs_i(inode)->parent = 0;
+ 
+ 		TRACE("Device inode %x:%x, rdev %x\n",
+ 				SQUASHFS_INODE_BLK(ino), offset, rdev);
+@@ -353,6 +357,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ 		rdev = le32_to_cpu(sqsh_ino->rdev);
+ 		init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
++		squashfs_i(inode)->parent = 0;
+ 
+ 		TRACE("Device inode %x:%x, rdev %x\n",
+ 				SQUASHFS_INODE_BLK(ino), offset, rdev);
+@@ -373,6 +378,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 			inode->i_mode |= S_IFSOCK;
+ 		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ 		init_special_inode(inode, inode->i_mode, 0);
++		squashfs_i(inode)->parent = 0;
+ 		break;
+ 	}
+ 	case SQUASHFS_LFIFO_TYPE:
+@@ -392,6 +398,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		inode->i_op = &squashfs_inode_ops;
+ 		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ 		init_special_inode(inode, inode->i_mode, 0);
++		squashfs_i(inode)->parent = 0;
+ 		break;
+ 	}
+ 	default:
+diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
+index 2c82d6f2a4561b..8e497ac07b9a83 100644
+--- a/fs/squashfs/squashfs_fs_i.h
++++ b/fs/squashfs/squashfs_fs_i.h
+@@ -16,6 +16,7 @@ struct squashfs_inode_info {
+ 	u64		xattr;
+ 	unsigned int	xattr_size;
+ 	int		xattr_count;
++	int		parent;
+ 	union {
+ 		struct {
+ 			u64		fragment_block;
+@@ -27,7 +28,6 @@ struct squashfs_inode_info {
+ 			u64		dir_idx_start;
+ 			int		dir_idx_offset;
+ 			int		dir_idx_cnt;
+-			int		parent;
+ 		};
+ 	};
+ 	struct inode	vfs_inode;
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 4386dd845e4009..0f97850a165ad4 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -2265,6 +2265,9 @@ int udf_current_aext(struct inode *inode, struct extent_position *epos,
+ 		if (check_add_overflow(sizeof(struct allocExtDesc),
+ 				le32_to_cpu(header->lengthAllocDescs), &alen))
+ 			return -1;
++
++		if (alen > epos->bh->b_size)
++			return -1;
+ 	}
+ 
+ 	switch (iinfo->i_alloc_type) {
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 23b358a1271cd9..3c75199947f096 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -354,6 +354,7 @@
+ 	__start_once = .;						\
+ 	*(.data..once)							\
+ 	__end_once = .;							\
++	*(.data..do_once)						\
+ 	STRUCT_ALIGN();							\
+ 	*(__tracepoints)						\
+ 	/* implement dynamic printk debug */				\
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 6db72c66de91d6..e8d9803cc6756f 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -281,6 +281,7 @@ struct bpf_map_owner {
+ 	bool xdp_has_frags;
+ 	u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
+ 	const struct btf_type *attach_func_proto;
++	enum bpf_attach_type expected_attach_type;
+ };
+ 
+ struct bpf_map {
+diff --git a/include/linux/btf.h b/include/linux/btf.h
+index d99178ce01d21d..e473bbfe412862 100644
+--- a/include/linux/btf.h
++++ b/include/linux/btf.h
+@@ -82,7 +82,7 @@
+  * as to avoid issues such as the compiler inlining or eliding either a static
+  * kfunc, or a global kfunc in an LTO build.
+  */
+-#define __bpf_kfunc __used __retain noinline
++#define __bpf_kfunc __used __retain __noclone noinline
+ 
+ #define __bpf_kfunc_start_defs()					       \
+ 	__diag_push();							       \
+diff --git a/include/linux/once.h b/include/linux/once.h
+index 30346fcdc7995d..449a0e34ad5ad9 100644
+--- a/include/linux/once.h
++++ b/include/linux/once.h
+@@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ #define DO_ONCE(func, ...)						     \
+ 	({								     \
+ 		bool ___ret = false;					     \
+-		static bool __section(".data..once") ___done = false;	     \
++		static bool __section(".data..do_once") ___done = false;     \
+ 		static DEFINE_STATIC_KEY_TRUE(___once_key);		     \
+ 		if (static_branch_unlikely(&___once_key)) {		     \
+ 			unsigned long ___flags;				     \
+@@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ #define DO_ONCE_SLEEPABLE(func, ...)						\
+ 	({									\
+ 		bool ___ret = false;						\
+-		static bool __section(".data..once") ___done = false;		\
++		static bool __section(".data..do_once") ___done = false;	\
+ 		static DEFINE_STATIC_KEY_TRUE(___once_key);			\
+ 		if (static_branch_unlikely(&___once_key)) {			\
+ 			___ret = __do_once_sleepable_start(&___done);		\
+diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
+index b8d1e00a7982c9..2dfeb158e848a5 100644
+--- a/include/trace/events/filelock.h
++++ b/include/trace/events/filelock.h
+@@ -27,7 +27,8 @@
+ 		{ FL_SLEEP,		"FL_SLEEP" },			\
+ 		{ FL_DOWNGRADE_PENDING,	"FL_DOWNGRADE_PENDING" },	\
+ 		{ FL_UNLOCK_PENDING,	"FL_UNLOCK_PENDING" },		\
+-		{ FL_OFDLCK,		"FL_OFDLCK" })
++		{ FL_OFDLCK,		"FL_OFDLCK" },			\
++		{ FL_RECLAIM,		"FL_RECLAIM"})
+ 
+ #define show_fl_type(val)				\
+ 	__print_symbolic(val,				\
+diff --git a/include/uapi/linux/hidraw.h b/include/uapi/linux/hidraw.h
+index d5ee269864e07f..ebd701b3c18d9d 100644
+--- a/include/uapi/linux/hidraw.h
++++ b/include/uapi/linux/hidraw.h
+@@ -48,6 +48,8 @@ struct hidraw_devinfo {
+ #define HIDIOCGOUTPUT(len)    _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0C, len)
+ #define HIDIOCREVOKE	      _IOW('H', 0x0D, int) /* Revoke device access */
+ 
++#define HIDIOCTL_LAST		_IOC_NR(HIDIOCREVOKE)
++
+ #define HIDRAW_FIRST_MINOR 0
+ #define HIDRAW_MAX_DEVICES 64
+ /* number of reports to buffer */
+diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
+index c50d152e7b3e06..9ac161866653a0 100644
+--- a/include/vdso/gettime.h
++++ b/include/vdso/gettime.h
+@@ -5,6 +5,7 @@
+ #include <linux/types.h>
+ 
+ struct __kernel_timespec;
++struct __kernel_old_timeval;
+ struct timezone;
+ 
+ #if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+diff --git a/init/Kconfig b/init/Kconfig
+index 45990792cb4a68..219ccdb0af732e 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1440,6 +1440,7 @@ config BOOT_CONFIG_EMBED_FILE
+ 
+ config INITRAMFS_PRESERVE_MTIME
+ 	bool "Preserve cpio archive mtimes in initramfs"
++	depends on BLK_DEV_INITRD
+ 	default y
+ 	help
+ 	  Each entry in an initramfs cpio archive carries an mtime value. When
+diff --git a/io_uring/waitid.c b/io_uring/waitid.c
+index 2f7b5eeab845e9..ecaa358d0ad87b 100644
+--- a/io_uring/waitid.c
++++ b/io_uring/waitid.c
+@@ -272,13 +272,14 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
+ 	if (!pid_child_should_wake(wo, p))
+ 		return 0;
+ 
++	list_del_init(&wait->entry);
++
+ 	/* cancel is in progress */
+ 	if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
+ 		return 1;
+ 
+ 	req->io_task_work.func = io_waitid_cb;
+ 	io_req_task_work_add(req);
+-	list_del_init(&wait->entry);
+ 	return 1;
+ }
+ 
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 1f51c8f20722ed..08bdb623f4f91b 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2326,6 +2326,7 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ 		map->owner->type  = prog_type;
+ 		map->owner->jited = fp->jited;
+ 		map->owner->xdp_has_frags = aux->xdp_has_frags;
++		map->owner->expected_attach_type = fp->expected_attach_type;
+ 		map->owner->attach_func_proto = aux->attach_func_proto;
+ 		for_each_cgroup_storage_type(i) {
+ 			map->owner->storage_cookie[i] =
+@@ -2337,6 +2338,10 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ 		ret = map->owner->type  == prog_type &&
+ 		      map->owner->jited == fp->jited &&
+ 		      map->owner->xdp_has_frags == aux->xdp_has_frags;
++		if (ret &&
++		    map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
++		    map->owner->expected_attach_type != fp->expected_attach_type)
++			ret = false;
+ 		for_each_cgroup_storage_type(i) {
+ 			if (!ret)
+ 				break;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 1829f62a74a9e3..96640a80fd9c4e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -14545,7 +14545,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
+ 
+ 		if (BPF_SRC(insn->code) == BPF_X) {
+-			if (insn->imm != 0 || insn->off > 1 ||
++			if (insn->imm != 0 || (insn->off != 0 && insn->off != 1) ||
+ 			    (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
+ 				verbose(env, "BPF_ALU uses reserved fields\n");
+ 				return -EINVAL;
+@@ -14555,7 +14555,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ 			if (err)
+ 				return err;
+ 		} else {
+-			if (insn->src_reg != BPF_REG_0 || insn->off > 1 ||
++			if (insn->src_reg != BPF_REG_0 || (insn->off != 0 && insn->off != 1) ||
+ 			    (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
+ 				verbose(env, "BPF_ALU uses reserved fields\n");
+ 				return -EINVAL;
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index e60f5e71e35df7..c00981cc6fe5b4 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -114,7 +114,7 @@ struct xol_area {
+ 
+ static void uprobe_warn(struct task_struct *t, const char *msg)
+ {
+-	pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg);
++	pr_warn("uprobe: %s:%d failed to %s\n", t->comm, t->pid, msg);
+ }
+ 
+ /*
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 0cd1f8b5a102ee..267b00005eaf2b 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -1124,7 +1124,7 @@ static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_kn
+ static bool should_sleep_killable(struct seccomp_filter *match,
+ 				  struct seccomp_knotif *n)
+ {
+-	return match->wait_killable_recv && n->state == SECCOMP_NOTIFY_SENT;
++	return match->wait_killable_recv && n->state >= SECCOMP_NOTIFY_SENT;
+ }
+ 
+ static int seccomp_do_user_notification(int this_syscall,
+@@ -1171,13 +1171,11 @@ static int seccomp_do_user_notification(int this_syscall,
+ 
+ 		if (err != 0) {
+ 			/*
+-			 * Check to see if the notifcation got picked up and
+-			 * whether we should switch to wait killable.
++			 * Check to see whether we should switch to wait
++			 * killable. Only return the interrupted error if not.
+ 			 */
+-			if (!wait_killable && should_sleep_killable(match, &n))
+-				continue;
+-
+-			goto interrupted;
++			if (!(!wait_killable && should_sleep_killable(match, &n)))
++				goto interrupted;
+ 		}
+ 
+ 		addfd = list_first_entry_or_null(&n.addfd,
+diff --git a/kernel/smp.c b/kernel/smp.c
+index f25e20617b7eb7..fa6faf50fb43bd 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -891,16 +891,15 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
+  * @mask: The set of cpus to run on (only runs on online subset).
+  * @func: The function to run. This must be fast and non-blocking.
+  * @info: An arbitrary pointer to pass to the function.
+- * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
+- *        (atomically) until function has completed on other CPUs. If
+- *        %SCF_RUN_LOCAL is set, the function will also be run locally
+- *        if the local CPU is set in the @cpumask.
+- *
+- * If @wait is true, then returns once @func has returned.
++ * @wait: If true, wait (atomically) until function has completed
++ *        on other CPUs.
+  *
+  * You must not call this function with disabled interrupts or from a
+  * hardware interrupt handler or from a bottom half handler. Preemption
+  * must be disabled when calling this function.
++ *
++ * @func is not called on the local CPU even if @mask contains it.  Consider
++ * using on_each_cpu_cond_mask() instead if this is not desirable.
+  */
+ void smp_call_function_many(const struct cpumask *mask,
+ 			    smp_call_func_t func, void *info, bool wait)
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 3ec7df7dbeec4e..4a44451efbcc67 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2759,19 +2759,24 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
+ 	struct bpf_run_ctx *old_run_ctx;
+ 	int err;
+ 
++	/*
++	 * graph tracer framework ensures we won't migrate, so there is no need
++	 * to use migrate_disable for bpf_prog_run again. The check here just for
++	 * __this_cpu_inc_return.
++	 */
++	cant_sleep();
++
+ 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
+ 		bpf_prog_inc_misses_counter(link->link.prog);
+ 		err = 0;
+ 		goto out;
+ 	}
+ 
+-	migrate_disable();
+ 	rcu_read_lock();
+ 	old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
+ 	err = bpf_prog_run(link->link.prog, regs);
+ 	bpf_reset_run_ctx(old_run_ctx);
+ 	rcu_read_unlock();
+-	migrate_enable();
+ 
+  out:
+ 	__this_cpu_dec(bpf_prog_active);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index f116af53a93922..fff6edb1174d59 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6886,6 +6886,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
+ 						psize);
+ 		}
+ 		spin_unlock(ptl);
++
++		cond_resched();
+ 	}
+ 	/*
+ 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
+diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
+index 6b694f117aef29..468f7e8f0277b9 100644
+--- a/net/9p/trans_usbg.c
++++ b/net/9p/trans_usbg.c
+@@ -231,6 +231,8 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
+ 	struct f_usb9pfs *usb9pfs = ep->driver_data;
+ 	struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ 	struct p9_req_t *p9_rx_req;
++	unsigned int req_size = req->actual;
++	int status = REQ_STATUS_RCVD;
+ 
+ 	if (req->status) {
+ 		dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+@@ -242,11 +244,19 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
+ 	if (!p9_rx_req)
+ 		return;
+ 
+-	memcpy(p9_rx_req->rc.sdata, req->buf, req->actual);
++	if (req_size > p9_rx_req->rc.capacity) {
++		dev_err(&cdev->gadget->dev,
++			"%s received data size %u exceeds buffer capacity %zu\n",
++			ep->name, req_size, p9_rx_req->rc.capacity);
++		req_size = 0;
++		status = REQ_STATUS_ERROR;
++	}
++
++	memcpy(p9_rx_req->rc.sdata, req->buf, req_size);
+ 
+-	p9_rx_req->rc.size = req->actual;
++	p9_rx_req->rc.size = req_size;
+ 
+-	p9_client_cb(usb9pfs->client, p9_rx_req, REQ_STATUS_RCVD);
++	p9_client_cb(usb9pfs->client, p9_rx_req, status);
+ 	p9_req_put(usb9pfs->client, p9_rx_req);
+ 
+ 	complete(&usb9pfs->received);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 333f32a9fd2191..853acfa8e9433a 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -1325,7 +1325,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ {
+ 	struct hci_cp_le_set_ext_adv_params cp;
+ 	struct hci_rp_le_set_ext_adv_params rp;
+-	bool connectable;
++	bool connectable, require_privacy;
+ 	u32 flags;
+ 	bdaddr_t random_addr;
+ 	u8 own_addr_type;
+@@ -1363,10 +1363,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ 		return -EPERM;
+ 
+ 	/* Set require_privacy to true only when non-connectable
+-	 * advertising is used. In that case it is fine to use a
+-	 * non-resolvable private address.
++	 * advertising is used and it is not periodic.
++	 * In that case it is fine to use a non-resolvable private address.
+ 	 */
+-	err = hci_get_random_address(hdev, !connectable,
++	require_privacy = !connectable && !(adv && adv->periodic);
++
++	err = hci_get_random_address(hdev, require_privacy,
+ 				     adv_use_rpa(hdev, flags), adv,
+ 				     &own_addr_type, &random_addr);
+ 	if (err < 0)
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index a08a0f3d5003cc..2cd0b963c96bd0 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -111,6 +111,8 @@ static void iso_conn_free(struct kref *ref)
+ 	/* Ensure no more work items will run since hci_conn has been dropped */
+ 	disable_delayed_work_sync(&conn->timeout_work);
+ 
++	kfree_skb(conn->rx_skb);
++
+ 	kfree(conn);
+ }
+ 
+@@ -743,6 +745,13 @@ static void iso_sock_kill(struct sock *sk)
+ 
+ 	BT_DBG("sk %p state %d", sk, sk->sk_state);
+ 
++	/* Sock is dead, so set conn->sk to NULL to avoid possible UAF */
++	if (iso_pi(sk)->conn) {
++		iso_conn_lock(iso_pi(sk)->conn);
++		iso_pi(sk)->conn->sk = NULL;
++		iso_conn_unlock(iso_pi(sk)->conn);
++	}
++
+ 	/* Kill poor orphan */
+ 	bt_sock_unlink(&iso_sk_list, sk);
+ 	sock_set_flag(sk, SOCK_DEAD);
+@@ -2295,7 +2304,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+ 					  skb->len);
+ 		conn->rx_len -= skb->len;
+-		return;
++		break;
+ 
+ 	case ISO_END:
+ 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 8b75647076baec..563cae4f76b0d1 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -4412,13 +4412,11 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
+ 		return -ENOMEM;
+ 
+ #ifdef CONFIG_BT_FEATURE_DEBUG
+-	if (!hdev) {
+-		flags = bt_dbg_get() ? BIT(0) : 0;
++	flags = bt_dbg_get() ? BIT(0) : 0;
+ 
+-		memcpy(rp->features[idx].uuid, debug_uuid, 16);
+-		rp->features[idx].flags = cpu_to_le32(flags);
+-		idx++;
+-	}
++	memcpy(rp->features[idx].uuid, debug_uuid, 16);
++	rp->features[idx].flags = cpu_to_le32(flags);
++	idx++;
+ #endif
+ 
+ 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 02fedc404d7f7d..c850e5d6cbd876 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -9233,13 +9233,17 @@ static bool sock_addr_is_valid_access(int off, int size,
+ 			return false;
+ 		info->reg_type = PTR_TO_SOCKET;
+ 		break;
+-	default:
+-		if (type == BPF_READ) {
+-			if (size != size_default)
+-				return false;
+-		} else {
++	case bpf_ctx_range(struct bpf_sock_addr, user_family):
++	case bpf_ctx_range(struct bpf_sock_addr, family):
++	case bpf_ctx_range(struct bpf_sock_addr, type):
++	case bpf_ctx_range(struct bpf_sock_addr, protocol):
++		if (type != BPF_READ)
+ 			return false;
+-		}
++		if (size != size_default)
++			return false;
++		break;
++	default:
++		return false;
+ 	}
+ 
+ 	return true;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 619ddc087957fa..37a3fa98d904fe 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -77,6 +77,7 @@ static inline struct hlist_head *ping_hashslot(struct ping_table *table,
+ 
+ int ping_get_port(struct sock *sk, unsigned short ident)
+ {
++	struct net *net = sock_net(sk);
+ 	struct inet_sock *isk, *isk2;
+ 	struct hlist_head *hlist;
+ 	struct sock *sk2 = NULL;
+@@ -90,9 +91,10 @@ int ping_get_port(struct sock *sk, unsigned short ident)
+ 		for (i = 0; i < (1L << 16); i++, result++) {
+ 			if (!result)
+ 				result++; /* avoid zero */
+-			hlist = ping_hashslot(&ping_table, sock_net(sk),
+-					    result);
++			hlist = ping_hashslot(&ping_table, net, result);
+ 			sk_for_each(sk2, hlist) {
++				if (!net_eq(sock_net(sk2), net))
++					continue;
+ 				isk2 = inet_sk(sk2);
+ 
+ 				if (isk2->inet_num == result)
+@@ -108,8 +110,10 @@ int ping_get_port(struct sock *sk, unsigned short ident)
+ 		if (i >= (1L << 16))
+ 			goto fail;
+ 	} else {
+-		hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
++		hlist = ping_hashslot(&ping_table, net, ident);
+ 		sk_for_each(sk2, hlist) {
++			if (!net_eq(sock_net(sk2), net))
++				continue;
+ 			isk2 = inet_sk(sk2);
+ 
+ 			/* BUG? Why is this reuse and not reuseaddr? ping.c
+@@ -129,7 +133,7 @@ int ping_get_port(struct sock *sk, unsigned short ident)
+ 		pr_debug("was not hashed\n");
+ 		sk_add_node_rcu(sk, hlist);
+ 		sock_set_flag(sk, SOCK_RCU_FREE);
+-		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
++		sock_prot_inuse_add(net, sk->sk_prot, 1);
+ 	}
+ 	spin_unlock(&ping_table.lock);
+ 	return 0;
+@@ -188,6 +192,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
+ 	}
+ 
+ 	sk_for_each_rcu(sk, hslot) {
++		if (!net_eq(sock_net(sk), net))
++			continue;
+ 		isk = inet_sk(sk);
+ 
+ 		pr_debug("iterate\n");
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 988992ff898b3d..739931aabb4e31 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3058,8 +3058,8 @@ bool tcp_check_oom(const struct sock *sk, int shift)
+ 
+ void __tcp_close(struct sock *sk, long timeout)
+ {
++	bool data_was_unread = false;
+ 	struct sk_buff *skb;
+-	int data_was_unread = 0;
+ 	int state;
+ 
+ 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+@@ -3078,11 +3078,12 @@ void __tcp_close(struct sock *sk, long timeout)
+ 	 *  reader process may not have drained the data yet!
+ 	 */
+ 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
+-		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
++		u32 end_seq = TCP_SKB_CB(skb)->end_seq;
+ 
+ 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+-			len--;
+-		data_was_unread += len;
++			end_seq--;
++		if (after(end_seq, tcp_sk(sk)->copied_seq))
++			data_was_unread = true;
+ 		__kfree_skb(skb);
+ 	}
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 8c0d91dfd7e2b3..538c6eea645f20 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -5280,12 +5280,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 			}
+ 
+ 			rx.sdata = prev_sta->sdata;
++			if (!status->link_valid && prev_sta->sta.mlo) {
++				struct link_sta_info *link_sta;
++
++				link_sta = link_sta_info_get_bss(rx.sdata,
++								 hdr->addr2);
++				if (!link_sta)
++					continue;
++
++				link_id = link_sta->link_id;
++			}
++
+ 			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
+ 				goto out;
+ 
+-			if (!status->link_valid && prev_sta->sta.mlo)
+-				continue;
+-
+ 			ieee80211_prepare_and_rx_handle(&rx, skb, false);
+ 
+ 			prev_sta = sta;
+@@ -5293,10 +5301,18 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 
+ 		if (prev_sta) {
+ 			rx.sdata = prev_sta->sdata;
+-			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
+-				goto out;
++			if (!status->link_valid && prev_sta->sta.mlo) {
++				struct link_sta_info *link_sta;
++
++				link_sta = link_sta_info_get_bss(rx.sdata,
++								 hdr->addr2);
++				if (!link_sta)
++					goto out;
+ 
+-			if (!status->link_valid && prev_sta->sta.mlo)
++				link_id = link_sta->link_id;
++			}
++
++			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
+ 				goto out;
+ 
+ 			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index 5251524b96afac..5e4453e9ef8e73 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -63,7 +63,7 @@ struct hbucket {
+ 		: jhash_size((htable_bits) - HTABLE_REGION_BITS))
+ #define ahash_sizeof_regions(htable_bits)		\
+ 	(ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
+-#define ahash_region(n, htable_bits)		\
++#define ahash_region(n)		\
+ 	((n) / jhash_size(HTABLE_REGION_BITS))
+ #define ahash_bucket_start(h,  htable_bits)	\
+ 	((htable_bits) < HTABLE_REGION_BITS ? 0	\
+@@ -702,7 +702,7 @@ mtype_resize(struct ip_set *set, bool retried)
+ #endif
+ 				key = HKEY(data, h->initval, htable_bits);
+ 				m = __ipset_dereference(hbucket(t, key));
+-				nr = ahash_region(key, htable_bits);
++				nr = ahash_region(key);
+ 				if (!m) {
+ 					m = kzalloc(sizeof(*m) +
+ 					    AHASH_INIT_SIZE * dsize,
+@@ -852,7 +852,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 	rcu_read_lock_bh();
+ 	t = rcu_dereference_bh(h->table);
+ 	key = HKEY(value, h->initval, t->htable_bits);
+-	r = ahash_region(key, t->htable_bits);
++	r = ahash_region(key);
+ 	atomic_inc(&t->uref);
+ 	elements = t->hregion[r].elements;
+ 	maxelem = t->maxelem;
+@@ -1050,7 +1050,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 	rcu_read_lock_bh();
+ 	t = rcu_dereference_bh(h->table);
+ 	key = HKEY(value, h->initval, t->htable_bits);
+-	r = ahash_region(key, t->htable_bits);
++	r = ahash_region(key);
+ 	atomic_inc(&t->uref);
+ 	rcu_read_unlock_bh();
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index c0289f83f96df8..327baa17882a84 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -885,7 +885,7 @@ static void ip_vs_conn_expire(struct timer_list *t)
+ 			 * conntrack cleanup for the net.
+ 			 */
+ 			smp_rmb();
+-			if (ipvs->enable)
++			if (READ_ONCE(ipvs->enable))
+ 				ip_vs_conn_drop_conntrack(cp);
+ 		}
+ 
+@@ -1433,7 +1433,7 @@ void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs)
+ 		cond_resched_rcu();
+ 
+ 		/* netns clean up started, abort delayed work */
+-		if (!ipvs->enable)
++		if (!READ_ONCE(ipvs->enable))
+ 			break;
+ 	}
+ 	rcu_read_unlock();
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index c7a8a08b730891..5ea7ab8bf4dcc2 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1353,9 +1353,6 @@ ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *stat
+ 	if (unlikely(!skb_dst(skb)))
+ 		return NF_ACCEPT;
+ 
+-	if (!ipvs->enable)
+-		return NF_ACCEPT;
+-
+ 	ip_vs_fill_iph_skb(af, skb, false, &iph);
+ #ifdef CONFIG_IP_VS_IPV6
+ 	if (af == AF_INET6) {
+@@ -1940,7 +1937,7 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
+ 		return NF_ACCEPT;
+ 	}
+ 	/* ipvs enabled in this netns ? */
+-	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
++	if (unlikely(sysctl_backup_only(ipvs)))
+ 		return NF_ACCEPT;
+ 
+ 	ip_vs_fill_iph_skb(af, skb, false, &iph);
+@@ -2108,7 +2105,7 @@ ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
+ 	int r;
+ 
+ 	/* ipvs enabled in this netns ? */
+-	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
++	if (unlikely(sysctl_backup_only(ipvs)))
+ 		return NF_ACCEPT;
+ 
+ 	if (state->pf == NFPROTO_IPV4) {
+@@ -2295,7 +2292,7 @@ static int __net_init __ip_vs_init(struct net *net)
+ 		return -ENOMEM;
+ 
+ 	/* Hold the beast until a service is registered */
+-	ipvs->enable = 0;
++	WRITE_ONCE(ipvs->enable, 0);
+ 	ipvs->net = net;
+ 	/* Counters used for creating unique names */
+ 	ipvs->gen = atomic_read(&ipvs_netns_cnt);
+@@ -2367,7 +2364,7 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list)
+ 		ipvs = net_ipvs(net);
+ 		ip_vs_unregister_hooks(ipvs, AF_INET);
+ 		ip_vs_unregister_hooks(ipvs, AF_INET6);
+-		ipvs->enable = 0;	/* Disable packet reception */
++		WRITE_ONCE(ipvs->enable, 0);	/* Disable packet reception */
+ 		smp_wmb();
+ 		ip_vs_sync_net_cleanup(ipvs);
+ 	}
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 3224f6e17e7361..3219338feca4df 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -256,7 +256,7 @@ static void est_reload_work_handler(struct work_struct *work)
+ 		struct ip_vs_est_kt_data *kd = ipvs->est_kt_arr[id];
+ 
+ 		/* netns clean up started, abort delayed work */
+-		if (!ipvs->enable)
++		if (!READ_ONCE(ipvs->enable))
+ 			goto unlock;
+ 		if (!kd)
+ 			continue;
+@@ -1482,9 +1482,9 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
+ 
+ 	*svc_p = svc;
+ 
+-	if (!ipvs->enable) {
++	if (!READ_ONCE(ipvs->enable)) {
+ 		/* Now there is a service - full throttle */
+-		ipvs->enable = 1;
++		WRITE_ONCE(ipvs->enable, 1);
+ 
+ 		/* Start estimation for first time */
+ 		ip_vs_est_reload_start(ipvs);
+diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
+index f821ad2e19b35e..3492108bb3b97e 100644
+--- a/net/netfilter/ipvs/ip_vs_est.c
++++ b/net/netfilter/ipvs/ip_vs_est.c
+@@ -231,7 +231,7 @@ static int ip_vs_estimation_kthread(void *data)
+ void ip_vs_est_reload_start(struct netns_ipvs *ipvs)
+ {
+ 	/* Ignore reloads before first service is added */
+-	if (!ipvs->enable)
++	if (!READ_ONCE(ipvs->enable))
+ 		return;
+ 	ip_vs_est_stopped_recalc(ipvs);
+ 	/* Bump the kthread configuration genid */
+@@ -305,7 +305,7 @@ static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
+ 	int i;
+ 
+ 	if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads &&
+-	    ipvs->enable && ipvs->est_max_threads)
++	    READ_ONCE(ipvs->enable) && ipvs->est_max_threads)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&ipvs->est_mutex);
+@@ -342,7 +342,7 @@ static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
+ 	}
+ 
+ 	/* Start kthread tasks only when services are present */
+-	if (ipvs->enable && !ip_vs_est_stopped(ipvs)) {
++	if (READ_ONCE(ipvs->enable) && !ip_vs_est_stopped(ipvs)) {
+ 		ret = ip_vs_est_kthread_start(ipvs, kd);
+ 		if (ret < 0)
+ 			goto out;
+@@ -485,7 +485,7 @@ int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
+ 	struct ip_vs_estimator *est = &stats->est;
+ 	int ret;
+ 
+-	if (!ipvs->est_max_threads && ipvs->enable)
++	if (!ipvs->est_max_threads && READ_ONCE(ipvs->enable))
+ 		ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
+ 
+ 	est->ktid = -1;
+@@ -662,7 +662,7 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
+ 			/* Wait for cpufreq frequency transition */
+ 			wait_event_idle_timeout(wq, kthread_should_stop(),
+ 						HZ / 50);
+-			if (!ipvs->enable || kthread_should_stop())
++			if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
+ 				goto stop;
+ 		}
+ 
+@@ -680,7 +680,7 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
+ 		rcu_read_unlock();
+ 		local_bh_enable();
+ 
+-		if (!ipvs->enable || kthread_should_stop())
++		if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
+ 			goto stop;
+ 		cond_resched();
+ 
+@@ -756,7 +756,7 @@ static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs)
+ 	mutex_lock(&ipvs->est_mutex);
+ 	for (id = 1; id < ipvs->est_kt_count; id++) {
+ 		/* netns clean up started, abort */
+-		if (!ipvs->enable)
++		if (!READ_ONCE(ipvs->enable))
+ 			goto unlock2;
+ 		kd = ipvs->est_kt_arr[id];
+ 		if (!kd)
+@@ -786,7 +786,7 @@ static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs)
+ 	id = ipvs->est_kt_count;
+ 
+ next_kt:
+-	if (!ipvs->enable || kthread_should_stop())
++	if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
+ 		goto unlock;
+ 	id--;
+ 	if (id < 0)
+diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
+index d8a284999544b0..206c6700e2006e 100644
+--- a/net/netfilter/ipvs/ip_vs_ftp.c
++++ b/net/netfilter/ipvs/ip_vs_ftp.c
+@@ -53,6 +53,7 @@ enum {
+ 	IP_VS_FTP_EPSV,
+ };
+ 
++static bool exiting_module;
+ /*
+  * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
+  * First port is set to the default port.
+@@ -605,7 +606,7 @@ static void __ip_vs_ftp_exit(struct net *net)
+ {
+ 	struct netns_ipvs *ipvs = net_ipvs(net);
+ 
+-	if (!ipvs)
++	if (!ipvs || !exiting_module)
+ 		return;
+ 
+ 	unregister_ip_vs_app(ipvs, &ip_vs_ftp);
+@@ -627,6 +628,7 @@ static int __init ip_vs_ftp_init(void)
+  */
+ static void __exit ip_vs_ftp_exit(void)
+ {
++	exiting_module = true;
+ 	unregister_pernet_subsys(&ip_vs_ftp_ops);
+ 	/* rcu_barrier() is called by netns */
+ }
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 7784ec094097bc..f12d0d229aaa53 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -376,6 +376,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	const struct nfnetlink_subsystem *ss;
+ 	const struct nfnl_callback *nc;
+ 	struct netlink_ext_ack extack;
++	struct nlmsghdr *onlh = nlh;
+ 	LIST_HEAD(err_list);
+ 	u32 status;
+ 	int err;
+@@ -386,6 +387,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	status = 0;
+ replay_abort:
+ 	skb = netlink_skb_clone(oskb, GFP_KERNEL);
++	nlh = onlh;
+ 	if (!skb)
+ 		return netlink_ack(oskb, nlh, -ENOMEM, NULL);
+ 
+diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
+index 994a0a1efb5890..cb2a672105dc1a 100644
+--- a/net/nfc/nci/ntf.c
++++ b/net/nfc/nci/ntf.c
+@@ -27,11 +27,16 @@
+ 
+ /* Handle NCI Notification packets */
+ 
+-static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
+-				      const struct sk_buff *skb)
++static int nci_core_reset_ntf_packet(struct nci_dev *ndev,
++				     const struct sk_buff *skb)
+ {
+ 	/* Handle NCI 2.x core reset notification */
+-	const struct nci_core_reset_ntf *ntf = (void *)skb->data;
++	const struct nci_core_reset_ntf *ntf;
++
++	if (skb->len < sizeof(struct nci_core_reset_ntf))
++		return -EINVAL;
++
++	ntf = (struct nci_core_reset_ntf *)skb->data;
+ 
+ 	ndev->nci_ver = ntf->nci_ver;
+ 	pr_debug("nci_ver 0x%x, config_status 0x%x\n",
+@@ -42,15 +47,22 @@ static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
+ 		__le32_to_cpu(ntf->manufact_specific_info);
+ 
+ 	nci_req_complete(ndev, NCI_STATUS_OK);
++
++	return 0;
+ }
+ 
+-static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
+-					     struct sk_buff *skb)
++static int nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
++					    struct sk_buff *skb)
+ {
+-	struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
++	struct nci_core_conn_credit_ntf *ntf;
+ 	struct nci_conn_info *conn_info;
+ 	int i;
+ 
++	if (skb->len < sizeof(struct nci_core_conn_credit_ntf))
++		return -EINVAL;
++
++	ntf = (struct nci_core_conn_credit_ntf *)skb->data;
++
+ 	pr_debug("num_entries %d\n", ntf->num_entries);
+ 
+ 	if (ntf->num_entries > NCI_MAX_NUM_CONN)
+@@ -68,7 +80,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
+ 		conn_info = nci_get_conn_info_by_conn_id(ndev,
+ 							 ntf->conn_entries[i].conn_id);
+ 		if (!conn_info)
+-			return;
++			return 0;
+ 
+ 		atomic_add(ntf->conn_entries[i].credits,
+ 			   &conn_info->credits_cnt);
+@@ -77,12 +89,19 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
+ 	/* trigger the next tx */
+ 	if (!skb_queue_empty(&ndev->tx_q))
+ 		queue_work(ndev->tx_wq, &ndev->tx_work);
++
++	return 0;
+ }
+ 
+-static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
+-					      const struct sk_buff *skb)
++static int nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
++					     const struct sk_buff *skb)
+ {
+-	__u8 status = skb->data[0];
++	__u8 status;
++
++	if (skb->len < 1)
++		return -EINVAL;
++
++	status = skb->data[0];
+ 
+ 	pr_debug("status 0x%x\n", status);
+ 
+@@ -91,12 +110,19 @@ static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
+ 		   (the state remains the same) */
+ 		nci_req_complete(ndev, status);
+ 	}
++
++	return 0;
+ }
+ 
+-static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
+-						struct sk_buff *skb)
++static int nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
++					       struct sk_buff *skb)
+ {
+-	struct nci_core_intf_error_ntf *ntf = (void *) skb->data;
++	struct nci_core_intf_error_ntf *ntf;
++
++	if (skb->len < sizeof(struct nci_core_intf_error_ntf))
++		return -EINVAL;
++
++	ntf = (struct nci_core_intf_error_ntf *)skb->data;
+ 
+ 	ntf->conn_id = nci_conn_id(&ntf->conn_id);
+ 
+@@ -105,6 +131,8 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
+ 	/* complete the data exchange transaction, if exists */
+ 	if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ 		nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO);
++
++	return 0;
+ }
+ 
+ static const __u8 *
+@@ -329,13 +357,18 @@ void nci_clear_target_list(struct nci_dev *ndev)
+ 	ndev->n_targets = 0;
+ }
+ 
+-static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
+-				       const struct sk_buff *skb)
++static int nci_rf_discover_ntf_packet(struct nci_dev *ndev,
++				      const struct sk_buff *skb)
+ {
+ 	struct nci_rf_discover_ntf ntf;
+-	const __u8 *data = skb->data;
++	const __u8 *data;
+ 	bool add_target = true;
+ 
++	if (skb->len < sizeof(struct nci_rf_discover_ntf))
++		return -EINVAL;
++
++	data = skb->data;
++
+ 	ntf.rf_discovery_id = *data++;
+ 	ntf.rf_protocol = *data++;
+ 	ntf.rf_tech_and_mode = *data++;
+@@ -390,6 +423,8 @@ static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
+ 		nfc_targets_found(ndev->nfc_dev, ndev->targets,
+ 				  ndev->n_targets);
+ 	}
++
++	return 0;
+ }
+ 
+ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
+@@ -531,14 +566,19 @@ static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev,
+ 	return NCI_STATUS_OK;
+ }
+ 
+-static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+-					     const struct sk_buff *skb)
++static int nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
++					    const struct sk_buff *skb)
+ {
+ 	struct nci_conn_info *conn_info;
+ 	struct nci_rf_intf_activated_ntf ntf;
+-	const __u8 *data = skb->data;
++	const __u8 *data;
+ 	int err = NCI_STATUS_OK;
+ 
++	if (skb->len < sizeof(struct nci_rf_intf_activated_ntf))
++		return -EINVAL;
++
++	data = skb->data;
++
+ 	ntf.rf_discovery_id = *data++;
+ 	ntf.rf_interface = *data++;
+ 	ntf.rf_protocol = *data++;
+@@ -645,7 +685,7 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+ 	if (err == NCI_STATUS_OK) {
+ 		conn_info = ndev->rf_conn_info;
+ 		if (!conn_info)
+-			return;
++			return 0;
+ 
+ 		conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size;
+ 		conn_info->initial_num_credits = ntf.initial_num_credits;
+@@ -691,19 +731,26 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+ 				pr_err("error when signaling tm activation\n");
+ 		}
+ 	}
++
++	return 0;
+ }
+ 
+-static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
+-					 const struct sk_buff *skb)
++static int nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
++					const struct sk_buff *skb)
+ {
+ 	const struct nci_conn_info *conn_info;
+-	const struct nci_rf_deactivate_ntf *ntf = (void *)skb->data;
++	const struct nci_rf_deactivate_ntf *ntf;
++
++	if (skb->len < sizeof(struct nci_rf_deactivate_ntf))
++		return -EINVAL;
++
++	ntf = (struct nci_rf_deactivate_ntf *)skb->data;
+ 
+ 	pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
+ 
+ 	conn_info = ndev->rf_conn_info;
+ 	if (!conn_info)
+-		return;
++		return 0;
+ 
+ 	/* drop tx data queue */
+ 	skb_queue_purge(&ndev->tx_q);
+@@ -735,14 +782,20 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
+ 	}
+ 
+ 	nci_req_complete(ndev, NCI_STATUS_OK);
++
++	return 0;
+ }
+ 
+-static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
+-					  const struct sk_buff *skb)
++static int nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
++					 const struct sk_buff *skb)
+ {
+ 	u8 status = NCI_STATUS_OK;
+-	const struct nci_nfcee_discover_ntf *nfcee_ntf =
+-				(struct nci_nfcee_discover_ntf *)skb->data;
++	const struct nci_nfcee_discover_ntf *nfcee_ntf;
++
++	if (skb->len < sizeof(struct nci_nfcee_discover_ntf))
++		return -EINVAL;
++
++	nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data;
+ 
+ 	/* NFCForum NCI 9.2.1 HCI Network Specific Handling
+ 	 * If the NFCC supports the HCI Network, it SHALL return one,
+@@ -753,6 +806,8 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
+ 	ndev->cur_params.id = nfcee_ntf->nfcee_id;
+ 
+ 	nci_req_complete(ndev, status);
++
++	return 0;
+ }
+ 
+ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
+@@ -779,35 +834,43 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
+ 
+ 	switch (ntf_opcode) {
+ 	case NCI_OP_CORE_RESET_NTF:
+-		nci_core_reset_ntf_packet(ndev, skb);
++		if (nci_core_reset_ntf_packet(ndev, skb))
++			goto end;
+ 		break;
+ 
+ 	case NCI_OP_CORE_CONN_CREDITS_NTF:
+-		nci_core_conn_credits_ntf_packet(ndev, skb);
++		if (nci_core_conn_credits_ntf_packet(ndev, skb))
++			goto end;
+ 		break;
+ 
+ 	case NCI_OP_CORE_GENERIC_ERROR_NTF:
+-		nci_core_generic_error_ntf_packet(ndev, skb);
++		if (nci_core_generic_error_ntf_packet(ndev, skb))
++			goto end;
+ 		break;
+ 
+ 	case NCI_OP_CORE_INTF_ERROR_NTF:
+-		nci_core_conn_intf_error_ntf_packet(ndev, skb);
++		if (nci_core_conn_intf_error_ntf_packet(ndev, skb))
++			goto end;
+ 		break;
+ 
+ 	case NCI_OP_RF_DISCOVER_NTF:
+-		nci_rf_discover_ntf_packet(ndev, skb);
++		if (nci_rf_discover_ntf_packet(ndev, skb))
++			goto end;
+ 		break;
+ 
+ 	case NCI_OP_RF_INTF_ACTIVATED_NTF:
+-		nci_rf_intf_activated_ntf_packet(ndev, skb);
++		if (nci_rf_intf_activated_ntf_packet(ndev, skb))
++			goto end;
+ 		break;
+ 
+ 	case NCI_OP_RF_DEACTIVATE_NTF:
+-		nci_rf_deactivate_ntf_packet(ndev, skb);
++		if (nci_rf_deactivate_ntf_packet(ndev, skb))
++			goto end;
+ 		break;
+ 
+ 	case NCI_OP_NFCEE_DISCOVER_NTF:
+-		nci_nfcee_discover_ntf_packet(ndev, skb);
++		if (nci_nfcee_discover_ntf_packet(ndev, skb))
++			goto end;
+ 		break;
+ 
+ 	case NCI_OP_RF_NFCEE_ACTION_NTF:
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 73a90ad873fb9d..2d5ac2b3d5269d 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -724,7 +724,7 @@ svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
+ 		rqstp->rq_auth_stat = rpc_autherr_badverf;
+ 		return SVC_DENIED;
+ 	}
+-	if (flavor != RPC_AUTH_GSS) {
++	if (flavor != RPC_AUTH_GSS || checksum.len < XDR_UNIT) {
+ 		rqstp->rq_auth_stat = rpc_autherr_badverf;
+ 		return SVC_DENIED;
+ 	}
+diff --git a/security/Kconfig b/security/Kconfig
+index 28e685f53bd1a1..ce9f1a651ccc33 100644
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -264,6 +264,7 @@ endchoice
+ 
+ config LSM
+ 	string "Ordered list of enabled LSMs"
++	depends on SECURITY
+ 	default "landlock,lockdown,yama,loadpin,safesetid,smack,selinux,tomoyo,apparmor,ipe,bpf" if DEFAULT_SECURITY_SMACK
+ 	default "landlock,lockdown,yama,loadpin,safesetid,apparmor,selinux,smack,tomoyo,ipe,bpf" if DEFAULT_SECURITY_APPARMOR
+ 	default "landlock,lockdown,yama,loadpin,safesetid,tomoyo,ipe,bpf" if DEFAULT_SECURITY_TOMOYO
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 9b91f68b3fff07..d15de21f6ebf09 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -84,19 +84,24 @@ void snd_pcm_group_init(struct snd_pcm_group *group)
+ }
+ 
+ /* define group lock helpers */
+-#define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
++#define DEFINE_PCM_GROUP_LOCK(action, bh_lock, bh_unlock, mutex_action) \
+ static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
+ { \
+-	if (nonatomic) \
++	if (nonatomic) { \
+ 		mutex_ ## mutex_action(&group->mutex); \
+-	else \
+-		spin_ ## action(&group->lock); \
+-}
+-
+-DEFINE_PCM_GROUP_LOCK(lock, lock);
+-DEFINE_PCM_GROUP_LOCK(unlock, unlock);
+-DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
+-DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
++	} else { \
++		if (IS_ENABLED(CONFIG_PREEMPT_RT) && bh_lock)   \
++			local_bh_disable();			\
++		spin_ ## action(&group->lock);			\
++		if (IS_ENABLED(CONFIG_PREEMPT_RT) && bh_unlock) \
++			local_bh_enable();                      \
++	}							\
++}
++
++DEFINE_PCM_GROUP_LOCK(lock, false, false, lock);
++DEFINE_PCM_GROUP_LOCK(unlock, false, false, unlock);
++DEFINE_PCM_GROUP_LOCK(lock_irq, true, false, lock);
++DEFINE_PCM_GROUP_LOCK(unlock_irq, false, true, unlock);
+ 
+ /**
+  * snd_pcm_stream_lock - Lock the PCM stream
+diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
+index 9d95ecb299aed8..a99acd1125e74f 100644
+--- a/sound/pci/lx6464es/lx_core.c
++++ b/sound/pci/lx6464es/lx_core.c
+@@ -316,7 +316,7 @@ static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
+ /* low-level dsp access */
+ int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
+ {
+-	u16 ret;
++	int ret;
+ 
+ 	mutex_lock(&chip->msg_lock);
+ 
+@@ -330,10 +330,10 @@ int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
+ 
+ int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
+ {
+-	u16 ret = 0;
+ 	u32 freq_raw = 0;
+ 	u32 freq = 0;
+ 	u32 frequency = 0;
++	int ret;
+ 
+ 	mutex_lock(&chip->msg_lock);
+ 
+diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
+index c7f1b28f3b2302..01f8d1296b183d 100644
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -5845,6 +5845,13 @@ static const struct snd_soc_component_driver wcd934x_component_drv = {
+ 	.endianness = 1,
+ };
+ 
++static void wcd934x_put_device_action(void *data)
++{
++	struct device *dev = data;
++
++	put_device(dev);
++}
++
+ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
+ {
+ 	struct device *dev = &wcd->sdev->dev;
+@@ -5861,11 +5868,13 @@ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
+ 		return dev_err_probe(dev, -EINVAL, "Unable to get SLIM Interface device\n");
+ 
+ 	slim_get_logical_addr(wcd->sidev);
+-	wcd->if_regmap = regmap_init_slimbus(wcd->sidev,
++	wcd->if_regmap = devm_regmap_init_slimbus(wcd->sidev,
+ 				  &wcd934x_ifc_regmap_config);
+-	if (IS_ERR(wcd->if_regmap))
++	if (IS_ERR(wcd->if_regmap)) {
++		put_device(&wcd->sidev->dev);
+ 		return dev_err_probe(dev, PTR_ERR(wcd->if_regmap),
+ 				     "Failed to allocate ifc register map\n");
++	}
+ 
+ 	of_property_read_u32(dev->parent->of_node, "qcom,dmic-sample-rate",
+ 			     &wcd->dmic_sample_rate);
+@@ -5907,6 +5916,10 @@ static int wcd934x_codec_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
++	ret = devm_add_action_or_reset(dev, wcd934x_put_device_action, &wcd->sidev->dev);
++	if (ret)
++		return ret;
++
+ 	/* set default rate 9P6MHz */
+ 	regmap_update_bits(wcd->regmap, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+ 			   WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK,
+diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c
+index 1df827a084cace..17ddd91eac5fcb 100644
+--- a/sound/soc/codecs/wcd937x.c
++++ b/sound/soc/codecs/wcd937x.c
+@@ -2039,9 +2039,9 @@ static const struct snd_kcontrol_new wcd937x_snd_controls[] = {
+ 	SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum,
+ 		     wcd937x_rx_hph_mode_get, wcd937x_rx_hph_mode_put),
+ 
+-	SOC_SINGLE_EXT("HPHL_COMP Switch", SND_SOC_NOPM, 0, 1, 0,
++	SOC_SINGLE_EXT("HPHL_COMP Switch", WCD937X_COMP_L, 0, 1, 0,
+ 		       wcd937x_get_compander, wcd937x_set_compander),
+-	SOC_SINGLE_EXT("HPHR_COMP Switch", SND_SOC_NOPM, 1, 1, 0,
++	SOC_SINGLE_EXT("HPHR_COMP Switch", WCD937X_COMP_R, 1, 1, 0,
+ 		       wcd937x_get_compander, wcd937x_set_compander),
+ 
+ 	SOC_SINGLE_TLV("HPHL Volume", WCD937X_HPH_L_EN, 0, 20, 1, line_gain),
+diff --git a/sound/soc/codecs/wcd937x.h b/sound/soc/codecs/wcd937x.h
+index 4afa48dcaf7431..7bfd0f630fcdd1 100644
+--- a/sound/soc/codecs/wcd937x.h
++++ b/sound/soc/codecs/wcd937x.h
+@@ -548,21 +548,21 @@ int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd,
+ struct device *wcd937x_sdw_device_get(struct device_node *np);
+ 
+ #else
+-int wcd937x_sdw_free(struct wcd937x_sdw_priv *wcd,
++static inline int wcd937x_sdw_free(struct wcd937x_sdw_priv *wcd,
+ 		     struct snd_pcm_substream *substream,
+ 		     struct snd_soc_dai *dai)
+ {
+ 	return -EOPNOTSUPP;
+ }
+ 
+-int wcd937x_sdw_set_sdw_stream(struct wcd937x_sdw_priv *wcd,
++static inline int wcd937x_sdw_set_sdw_stream(struct wcd937x_sdw_priv *wcd,
+ 			       struct snd_soc_dai *dai,
+ 			       void *stream, int direction)
+ {
+ 	return -EOPNOTSUPP;
+ }
+ 
+-int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd,
++static inline int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd,
+ 			  struct snd_pcm_substream *substream,
+ 			  struct snd_pcm_hw_params *params,
+ 			  struct snd_soc_dai *dai)
+diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
+index d3327bc237b5f4..7975dc0ceb3518 100644
+--- a/sound/soc/intel/boards/bytcht_es8316.c
++++ b/sound/soc/intel/boards/bytcht_es8316.c
+@@ -47,7 +47,8 @@ enum {
+ 	BYT_CHT_ES8316_INTMIC_IN2_MAP,
+ };
+ 
+-#define BYT_CHT_ES8316_MAP(quirk)		((quirk) & GENMASK(3, 0))
++#define BYT_CHT_ES8316_MAP_MASK			GENMASK(3, 0)
++#define BYT_CHT_ES8316_MAP(quirk)		((quirk) & BYT_CHT_ES8316_MAP_MASK)
+ #define BYT_CHT_ES8316_SSP0			BIT(16)
+ #define BYT_CHT_ES8316_MONO_SPEAKER		BIT(17)
+ #define BYT_CHT_ES8316_JD_INVERTED		BIT(18)
+@@ -60,10 +61,23 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+ 
+ static void log_quirks(struct device *dev)
+ {
+-	if (BYT_CHT_ES8316_MAP(quirk) == BYT_CHT_ES8316_INTMIC_IN1_MAP)
++	int map;
++
++	map = BYT_CHT_ES8316_MAP(quirk);
++	switch (map) {
++	case BYT_CHT_ES8316_INTMIC_IN1_MAP:
+ 		dev_info(dev, "quirk IN1_MAP enabled");
+-	if (BYT_CHT_ES8316_MAP(quirk) == BYT_CHT_ES8316_INTMIC_IN2_MAP)
++		break;
++	case BYT_CHT_ES8316_INTMIC_IN2_MAP:
+ 		dev_info(dev, "quirk IN2_MAP enabled");
++		break;
++	default:
++		dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to INTMIC_IN1_MAP\n", map);
++		quirk &= ~BYT_CHT_ES8316_MAP_MASK;
++		quirk |= BYT_CHT_ES8316_INTMIC_IN1_MAP;
++		break;
++	}
++
+ 	if (quirk & BYT_CHT_ES8316_SSP0)
+ 		dev_info(dev, "quirk SSP0 enabled");
+ 	if (quirk & BYT_CHT_ES8316_MONO_SPEAKER)
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index b6434b4731261b..d6991864c5a491 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -68,7 +68,8 @@ enum {
+ 	BYT_RT5640_OVCD_SF_1P5		= (RT5640_OVCD_SF_1P5 << 13),
+ };
+ 
+-#define BYT_RT5640_MAP(quirk)		((quirk) &  GENMASK(3, 0))
++#define BYT_RT5640_MAP_MASK		GENMASK(3, 0)
++#define BYT_RT5640_MAP(quirk)		((quirk) & BYT_RT5640_MAP_MASK)
+ #define BYT_RT5640_JDSRC(quirk)		(((quirk) & GENMASK(7, 4)) >> 4)
+ #define BYT_RT5640_OVCD_TH(quirk)	(((quirk) & GENMASK(12, 8)) >> 8)
+ #define BYT_RT5640_OVCD_SF(quirk)	(((quirk) & GENMASK(14, 13)) >> 13)
+@@ -140,7 +141,9 @@ static void log_quirks(struct device *dev)
+ 		dev_info(dev, "quirk NO_INTERNAL_MIC_MAP enabled\n");
+ 		break;
+ 	default:
+-		dev_err(dev, "quirk map 0x%x is not supported, microphone input will not work\n", map);
++		dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC1_MAP\n", map);
++		byt_rt5640_quirk &= ~BYT_RT5640_MAP_MASK;
++		byt_rt5640_quirk |= BYT_RT5640_DMIC1_MAP;
+ 		break;
+ 	}
+ 	if (byt_rt5640_quirk & BYT_RT5640_HSMIC2_ON_IN1)
+diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
+index 8e4b821efe9277..6860ac41e6b320 100644
+--- a/sound/soc/intel/boards/bytcr_rt5651.c
++++ b/sound/soc/intel/boards/bytcr_rt5651.c
+@@ -58,7 +58,8 @@ enum {
+ 	BYT_RT5651_OVCD_SF_1P5	= (RT5651_OVCD_SF_1P5 << 13),
+ };
+ 
+-#define BYT_RT5651_MAP(quirk)		((quirk) & GENMASK(3, 0))
++#define BYT_RT5651_MAP_MASK		GENMASK(3, 0)
++#define BYT_RT5651_MAP(quirk)		((quirk) & BYT_RT5651_MAP_MASK)
+ #define BYT_RT5651_JDSRC(quirk)		(((quirk) & GENMASK(7, 4)) >> 4)
+ #define BYT_RT5651_OVCD_TH(quirk)	(((quirk) & GENMASK(12, 8)) >> 8)
+ #define BYT_RT5651_OVCD_SF(quirk)	(((quirk) & GENMASK(14, 13)) >> 13)
+@@ -100,14 +101,29 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+ 
+ static void log_quirks(struct device *dev)
+ {
+-	if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_DMIC_MAP)
++	int map;
++
++	map = BYT_RT5651_MAP(byt_rt5651_quirk);
++	switch (map) {
++	case BYT_RT5651_DMIC_MAP:
+ 		dev_info(dev, "quirk DMIC_MAP enabled");
+-	if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_MAP)
++		break;
++	case BYT_RT5651_IN1_MAP:
+ 		dev_info(dev, "quirk IN1_MAP enabled");
+-	if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_MAP)
++		break;
++	case BYT_RT5651_IN2_MAP:
+ 		dev_info(dev, "quirk IN2_MAP enabled");
+-	if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_IN2_MAP)
++		break;
++	case BYT_RT5651_IN1_IN2_MAP:
+ 		dev_info(dev, "quirk IN1_IN2_MAP enabled");
++		break;
++	default:
++		dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC_MAP\n", map);
++		byt_rt5651_quirk &= ~BYT_RT5651_MAP_MASK;
++		byt_rt5651_quirk |= BYT_RT5651_DMIC_MAP;
++		break;
++	}
++
+ 	if (BYT_RT5651_JDSRC(byt_rt5651_quirk)) {
+ 		dev_info(dev, "quirk realtek,jack-detect-source %ld\n",
+ 			 BYT_RT5651_JDSRC(byt_rt5651_quirk));
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 5911a055865160..00d840d5e585c1 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -741,7 +741,7 @@ static int create_sdw_dailink(struct snd_soc_card *card,
+ 			(*codec_conf)++;
+ 		}
+ 
+-		if (sof_end->include_sidecar) {
++		if (sof_end->include_sidecar && sof_end->codec_info->add_sidecar) {
+ 			ret = sof_end->codec_info->add_sidecar(card, dai_links, codec_conf);
+ 			if (ret)
+ 				return ret;
+diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
+index e98b53b67d12b9..19bbae725d838e 100644
+--- a/sound/soc/sof/ipc3-topology.c
++++ b/sound/soc/sof/ipc3-topology.c
+@@ -2485,11 +2485,6 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	/* free all the scheduler widgets now */
+-	ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify);
+-	if (ret < 0)
+-		return ret;
+-
+ 	/*
+ 	 * Tear down all pipelines associated with PCMs that did not get suspended
+ 	 * and unset the prepare flag so that they can be set up again during resume.
+@@ -2505,6 +2500,11 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
+ 		}
+ 	}
+ 
++	/* free all the scheduler widgets now. This will also power down the secondary cores */
++	ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify);
++	if (ret < 0)
++		return ret;
++
+ 	list_for_each_entry(sroute, &sdev->route_list, list)
+ 		sroute->setup = false;
+ 
+diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h
+index a9d8b5b51f37f8..f24953f8b949c1 100644
+--- a/tools/include/nolibc/std.h
++++ b/tools/include/nolibc/std.h
+@@ -33,6 +33,6 @@ typedef unsigned long       nlink_t;
+ typedef   signed long         off_t;
+ typedef   signed long     blksize_t;
+ typedef   signed long      blkcnt_t;
+-typedef __kernel_old_time_t  time_t;
++typedef __kernel_time_t      time_t;
+ 
+ #endif /* _NOLIBC_STD_H */
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index e33cf3caf8b645..060aecf60b76b1 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -990,35 +990,33 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
+ 	const struct btf_member *kern_data_member;
+ 	struct btf *btf = NULL;
+ 	__s32 kern_vtype_id, kern_type_id;
+-	char tname[256];
++	char tname[192], stname[256];
+ 	__u32 i;
+ 
+ 	snprintf(tname, sizeof(tname), "%.*s",
+ 		 (int)bpf_core_essential_name_len(tname_raw), tname_raw);
+ 
+-	kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
+-					&btf, mod_btf);
+-	if (kern_type_id < 0) {
+-		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
+-			tname);
+-		return kern_type_id;
+-	}
+-	kern_type = btf__type_by_id(btf, kern_type_id);
++	snprintf(stname, sizeof(stname), "%s%s", STRUCT_OPS_VALUE_PREFIX, tname);
+ 
+-	/* Find the corresponding "map_value" type that will be used
+-	 * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
+-	 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
+-	 * btf_vmlinux.
++	/* Look for the corresponding "map_value" type that will be used
++	 * in map_update(BPF_MAP_TYPE_STRUCT_OPS) first, figure out the btf
++	 * and the mod_btf.
++	 * For example, find "struct bpf_struct_ops_tcp_congestion_ops".
+ 	 */
+-	kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
+-						tname, BTF_KIND_STRUCT);
++	kern_vtype_id = find_ksym_btf_id(obj, stname, BTF_KIND_STRUCT, &btf, mod_btf);
+ 	if (kern_vtype_id < 0) {
+-		pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
+-			STRUCT_OPS_VALUE_PREFIX, tname);
++		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", stname);
+ 		return kern_vtype_id;
+ 	}
+ 	kern_vtype = btf__type_by_id(btf, kern_vtype_id);
+ 
++	kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
++	if (kern_type_id < 0) {
++		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname);
++		return kern_type_id;
++	}
++	kern_type = btf__type_by_id(btf, kern_type_id);
++
+ 	/* Find "struct tcp_congestion_ops" from
+ 	 * struct bpf_struct_ops_tcp_congestion_ops {
+ 	 *	[ ... ]
+@@ -1031,8 +1029,8 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
+ 			break;
+ 	}
+ 	if (i == btf_vlen(kern_vtype)) {
+-		pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
+-			tname, STRUCT_OPS_VALUE_PREFIX, tname);
++		pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n",
++			tname, stname);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -5056,6 +5054,16 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
+ 		return false;
+ 	}
+ 
++	/*
++	 * bpf_get_map_info_by_fd() for DEVMAP will always return flags with
++	 * BPF_F_RDONLY_PROG set, but it generally is not set at map creation time.
++	 * Thus, ignore the BPF_F_RDONLY_PROG flag in the flags returned from
++	 * bpf_get_map_info_by_fd() when checking for compatibility with an
++	 * existing DEVMAP.
++	 */
++	if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH)
++		map_info.map_flags &= ~BPF_F_RDONLY_PROG;
++
+ 	return (map_info.type == map->def.type &&
+ 		map_info.key_size == map->def.key_size &&
+ 		map_info.value_size == map->def.value_size &&
+diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
+index 892e990c034ab7..22ee6309c53c1f 100644
+--- a/tools/testing/nvdimm/test/ndtest.c
++++ b/tools/testing/nvdimm/test/ndtest.c
+@@ -850,11 +850,22 @@ static int ndtest_probe(struct platform_device *pdev)
+ 
+ 	p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+ 				 sizeof(dma_addr_t), GFP_KERNEL);
++	if (!p->dcr_dma) {
++		rc = -ENOMEM;
++		goto err;
++	}
+ 	p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+ 				   sizeof(dma_addr_t), GFP_KERNEL);
++	if (!p->label_dma) {
++		rc = -ENOMEM;
++		goto err;
++	}
+ 	p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+ 				  sizeof(dma_addr_t), GFP_KERNEL);
+-
++	if (!p->dimm_dma) {
++		rc = -ENOMEM;
++		goto err;
++	}
+ 	rc = ndtest_nvdimm_init(p);
+ 	if (rc)
+ 		goto err;
+diff --git a/tools/testing/selftests/arm64/pauth/exec_target.c b/tools/testing/selftests/arm64/pauth/exec_target.c
+index 4435600ca400dd..e597861b26d6bf 100644
+--- a/tools/testing/selftests/arm64/pauth/exec_target.c
++++ b/tools/testing/selftests/arm64/pauth/exec_target.c
+@@ -13,7 +13,12 @@ int main(void)
+ 	unsigned long hwcaps;
+ 	size_t val;
+ 
+-	fread(&val, sizeof(size_t), 1, stdin);
++	size_t size = fread(&val, sizeof(size_t), 1, stdin);
++
++	if (size != 1) {
++		fprintf(stderr, "Could not read input from stdin\n");
++		return EXIT_FAILURE;
++	}
+ 
+ 	/* don't try to execute illegal (unimplemented) instructions) caller
+ 	 * should have checked this and keep worker simple
+diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+index 540181c115a85a..ef00d38b0a8d24 100644
+--- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
++++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+@@ -23,7 +23,6 @@ struct {
+ 
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+-	__uint(max_entries, 2);
+ 	__type(key, int);
+ 	__type(value, __u32);
+ } perf_event_map SEC(".maps");
+diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
+index 595194453ff8f8..35b4893ccdf8ae 100644
+--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
++++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
+@@ -15,20 +15,18 @@
+ #include <bpf/libbpf.h>
+ #include <sys/ioctl.h>
+ #include <linux/rtnetlink.h>
+-#include <signal.h>
+ #include <linux/perf_event.h>
+-#include <linux/err.h>
+ 
+-#include "bpf_util.h"
+ #include "cgroup_helpers.h"
+ 
+ #include "test_tcpnotify.h"
+-#include "trace_helpers.h"
+ #include "testing_helpers.h"
+ 
+ #define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L)
+ 
+ pthread_t tid;
++static bool exit_thread;
++
+ int rx_callbacks;
+ 
+ static void dummyfn(void *ctx, int cpu, void *data, __u32 size)
+@@ -45,7 +43,7 @@ void tcp_notifier_poller(struct perf_buffer *pb)
+ {
+ 	int err;
+ 
+-	while (1) {
++	while (!exit_thread) {
+ 		err = perf_buffer__poll(pb, 100);
+ 		if (err < 0 && err != -EINTR) {
+ 			printf("failed perf_buffer__poll: %d\n", err);
+@@ -78,15 +76,10 @@ int main(int argc, char **argv)
+ 	int error = EXIT_FAILURE;
+ 	struct bpf_object *obj;
+ 	char test_script[80];
+-	cpu_set_t cpuset;
+ 	__u32 key = 0;
+ 
+ 	libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+ 
+-	CPU_ZERO(&cpuset);
+-	CPU_SET(0, &cpuset);
+-	pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
+-
+ 	cg_fd = cgroup_setup_and_join(cg_path);
+ 	if (cg_fd < 0)
+ 		goto err;
+@@ -151,6 +144,13 @@ int main(int argc, char **argv)
+ 
+ 	sleep(10);
+ 
++	exit_thread = true;
++	int ret = pthread_join(tid, NULL);
++	if (ret) {
++		printf("FAILED: pthread_join\n");
++		goto err;
++	}
++
+ 	if (verify_result(&g)) {
+ 		printf("FAILED: Wrong stats Expected %d calls, got %d\n",
+ 			g.ncalls, rx_callbacks);
+diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
+index 6fba7025c5e3c0..d73bff23c61e76 100644
+--- a/tools/testing/selftests/nolibc/nolibc-test.c
++++ b/tools/testing/selftests/nolibc/nolibc-test.c
+@@ -196,8 +196,8 @@ int expect_zr(int expr, int llen)
+ }
+ 
+ 
+-#define EXPECT_NZ(cond, expr, val)			\
+-	do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen; } while (0)
++#define EXPECT_NZ(cond, expr)				\
++	do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen); } while (0)
+ 
+ static __attribute__((unused))
+ int expect_nz(int expr, int llen)
+diff --git a/tools/testing/selftests/vDSO/vdso_call.h b/tools/testing/selftests/vDSO/vdso_call.h
+index bb237d771051bd..e7205584cbdca5 100644
+--- a/tools/testing/selftests/vDSO/vdso_call.h
++++ b/tools/testing/selftests/vDSO/vdso_call.h
+@@ -44,7 +44,6 @@
+ 	register long _r6 asm ("r6");					\
+ 	register long _r7 asm ("r7");					\
+ 	register long _r8 asm ("r8");					\
+-	register long _rval asm ("r3");					\
+ 									\
+ 	LOADARGS_##nr(fn, args);					\
+ 									\
+@@ -54,13 +53,13 @@
+ 		"	bns+	1f\n"					\
+ 		"	neg	3, 3\n"					\
+ 		"1:"							\
+-		: "+r" (_r0), "=r" (_r3), "+r" (_r4), "+r" (_r5),	\
++		: "+r" (_r0), "+r" (_r3), "+r" (_r4), "+r" (_r5),	\
+ 		  "+r" (_r6), "+r" (_r7), "+r" (_r8)			\
+-		: "r" (_rval)						\
++		:							\
+ 		: "r9", "r10", "r11", "r12", "cr0", "cr1", "cr5",	\
+ 		  "cr6", "cr7", "xer", "lr", "ctr", "memory"		\
+ 	);								\
+-	_rval;								\
++	_r3;								\
+ })
+ 
+ #else
+diff --git a/tools/testing/selftests/vDSO/vdso_test_abi.c b/tools/testing/selftests/vDSO/vdso_test_abi.c
+index a54424e2336f45..67cbfc56e4e1b0 100644
+--- a/tools/testing/selftests/vDSO/vdso_test_abi.c
++++ b/tools/testing/selftests/vDSO/vdso_test_abi.c
+@@ -182,12 +182,11 @@ int main(int argc, char **argv)
+ 	unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
+ 
+ 	ksft_print_header();
+-	ksft_set_plan(VDSO_TEST_PLAN);
+ 
+-	if (!sysinfo_ehdr) {
+-		ksft_print_msg("AT_SYSINFO_EHDR is not present!\n");
+-		return KSFT_SKIP;
+-	}
++	if (!sysinfo_ehdr)
++		ksft_exit_skip("AT_SYSINFO_EHDR is not present!\n");
++
++	ksft_set_plan(VDSO_TEST_PLAN);
+ 
+ 	version = versions[VDSO_VERSION];
+ 	name = (const char **)&names[VDSO_NAMES];
+diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
+index a1f506ba557864..4f09c5db0c7f30 100644
+--- a/tools/testing/selftests/watchdog/watchdog-test.c
++++ b/tools/testing/selftests/watchdog/watchdog-test.c
+@@ -332,6 +332,12 @@ int main(int argc, char *argv[])
+ 	if (oneshot)
+ 		goto end;
+ 
++	/* Check if WDIOF_KEEPALIVEPING is supported */
++	if (!(info.options & WDIOF_KEEPALIVEPING)) {
++		printf("WDIOC_KEEPALIVE not supported by this device\n");
++		goto end;
++	}
++
+ 	printf("Watchdog Ticking Away!\n");
+ 
+ 	/*


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-10-13 11:56 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-10-13 11:56 UTC (permalink / raw
  To: gentoo-commits

commit:     967f8300e507aa4742fbfae4e6d6253341831730
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Oct 13 11:56:32 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Oct 13 11:56:32 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=967f8300

Linux patch 6.12.52

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1051_linux-6.12.52.patch | 1264 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1268 insertions(+)

diff --git a/0000_README b/0000_README
index b7557830..9624f9e2 100644
--- a/0000_README
+++ b/0000_README
@@ -247,6 +247,10 @@ Patch:  1050_linux-6.12.51.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.51
 
+Patch:  1051_linux-6.12.52.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.52
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1051_linux-6.12.52.patch b/1051_linux-6.12.52.patch
new file mode 100644
index 00000000..3a00722c
--- /dev/null
+++ b/1051_linux-6.12.52.patch
@@ -0,0 +1,1264 @@
+diff --git a/Makefile b/Makefile
+index 05b7983b56eda7..3345d6257350d7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 51
++SUBLEVEL = 52
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 60986f67c35a88..4b43944141034e 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -5104,12 +5104,11 @@ void init_decode_cache(struct x86_emulate_ctxt *ctxt)
+ 	ctxt->mem_read.end = 0;
+ }
+ 
+-int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
++int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts)
+ {
+ 	const struct x86_emulate_ops *ops = ctxt->ops;
+ 	int rc = X86EMUL_CONTINUE;
+ 	int saved_dst_type = ctxt->dst.type;
+-	bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt);
+ 
+ 	ctxt->mem_read.pos = 0;
+ 
+@@ -5157,7 +5156,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+ 				fetch_possible_mmx_operand(&ctxt->dst);
+ 		}
+ 
+-		if (unlikely(is_guest_mode) && ctxt->intercept) {
++		if (unlikely(check_intercepts) && ctxt->intercept) {
+ 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
+ 						      X86_ICPT_PRE_EXCEPT);
+ 			if (rc != X86EMUL_CONTINUE)
+@@ -5186,7 +5185,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+ 				goto done;
+ 		}
+ 
+-		if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
++		if (unlikely(check_intercepts) && (ctxt->d & Intercept)) {
+ 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
+ 						      X86_ICPT_POST_EXCEPT);
+ 			if (rc != X86EMUL_CONTINUE)
+@@ -5240,7 +5239,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+ 
+ special_insn:
+ 
+-	if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
++	if (unlikely(check_intercepts) && (ctxt->d & Intercept)) {
+ 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
+ 					      X86_ICPT_POST_MEMACCESS);
+ 		if (rc != X86EMUL_CONTINUE)
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index 10495fffb8905c..1bede46b67c3fb 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -230,7 +230,6 @@ struct x86_emulate_ops {
+ 	void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
+ 
+ 	bool (*is_smm)(struct x86_emulate_ctxt *ctxt);
+-	bool (*is_guest_mode)(struct x86_emulate_ctxt *ctxt);
+ 	int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
+ 	void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
+ 	int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+@@ -514,7 +513,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
+ #define EMULATION_RESTART 1
+ #define EMULATION_INTERCEPTED 2
+ void init_decode_cache(struct x86_emulate_ctxt *ctxt);
+-int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
++int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts);
+ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
+ 			 u16 tss_selector, int idt_index, int reason,
+ 			 bool has_error_code, u32 error_code);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 13ab13d2e9d67c..86cabeca6265a2 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8567,11 +8567,6 @@ static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt)
+ 	return is_smm(emul_to_vcpu(ctxt));
+ }
+ 
+-static bool emulator_is_guest_mode(struct x86_emulate_ctxt *ctxt)
+-{
+-	return is_guest_mode(emul_to_vcpu(ctxt));
+-}
+-
+ #ifndef CONFIG_KVM_SMM
+ static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
+ {
+@@ -8655,7 +8650,6 @@ static const struct x86_emulate_ops emulate_ops = {
+ 	.guest_cpuid_is_intel_compatible = emulator_guest_cpuid_is_intel_compatible,
+ 	.set_nmi_mask        = emulator_set_nmi_mask,
+ 	.is_smm              = emulator_is_smm,
+-	.is_guest_mode       = emulator_is_guest_mode,
+ 	.leave_smm           = emulator_leave_smm,
+ 	.triple_fault        = emulator_triple_fault,
+ 	.set_xcr             = emulator_set_xcr,
+@@ -9209,7 +9203,14 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ 		ctxt->exception.address = 0;
+ 	}
+ 
+-	r = x86_emulate_insn(ctxt);
++	/*
++	 * Check L1's instruction intercepts when emulating instructions for
++	 * L2, unless KVM is re-emulating a previously decoded instruction,
++	 * e.g. to complete userspace I/O, in which case KVM has already
++	 * checked the intercepts.
++	 */
++	r = x86_emulate_insn(ctxt, is_guest_mode(vcpu) &&
++				   !(emulation_type & EMULTYPE_NO_DECODE));
+ 
+ 	if (r == EMULATION_INTERCEPTED)
+ 		return 1;
+diff --git a/crypto/rng.c b/crypto/rng.c
+index 9d8804e464226d..72da96fdfb5ddc 100644
+--- a/crypto/rng.c
++++ b/crypto/rng.c
+@@ -167,6 +167,11 @@ int crypto_del_default_rng(void)
+ EXPORT_SYMBOL_GPL(crypto_del_default_rng);
+ #endif
+ 
++static void rng_default_set_ent(struct crypto_rng *tfm, const u8 *data,
++				unsigned int len)
++{
++}
++
+ int crypto_register_rng(struct rng_alg *alg)
+ {
+ 	struct crypto_alg *base = &alg->base;
+@@ -178,6 +183,9 @@ int crypto_register_rng(struct rng_alg *alg)
+ 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ 	base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
+ 
++	if (!alg->set_ent)
++		alg->set_ent = rng_default_set_ent;
++
+ 	return crypto_register_alg(base);
+ }
+ EXPORT_SYMBOL_GPL(crypto_register_rng);
+diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
+index 956f1bd087d1c5..c7299ce8b37413 100644
+--- a/drivers/android/dbitmap.h
++++ b/drivers/android/dbitmap.h
+@@ -37,6 +37,7 @@ static inline void dbitmap_free(struct dbitmap *dmap)
+ {
+ 	dmap->nbits = 0;
+ 	kfree(dmap->map);
++	dmap->map = NULL;
+ }
+ 
+ /* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index d7aaaeb4fe326e..c07d57eaca3bf0 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -514,6 +514,8 @@ static const struct usb_device_id quirks_table[] = {
+ 	/* Realtek 8851BU Bluetooth devices */
+ 	{ USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Realtek 8852AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 49113df8baefd9..25b175f23312c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -677,6 +677,12 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
+ 	mes_set_hw_res_pkt.enable_reg_active_poll = 1;
+ 	mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
+ 	mes_set_hw_res_pkt.oversubscription_timer = 50;
++	if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f)
++		mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
++	else
++		dev_info_once(mes->adev->dev,
++			      "MES FW version must be >= 0x7f to enable LR compute workaround.\n");
++
+ 	if (amdgpu_mes_log_enable) {
+ 		mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+ 		mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+index 459f7b8d72b4d1..e3f4f5fbbd6e75 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+@@ -610,6 +610,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
+ 	mes_set_hw_res_pkt.use_different_vmid_compute = 1;
+ 	mes_set_hw_res_pkt.enable_reg_active_poll = 1;
+ 	mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
++	if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82)
++		mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
++	else
++		dev_info_once(adev->dev,
++			      "MES FW version must be >= 0x82 to enable LR compute workaround.\n");
+ 
+ 	/*
+ 	 * Keep oversubscribe timer for sdma . When we have unmapped doorbell
+diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+index 21ceafce1f9b27..ab1cfc92dbeb1b 100644
+--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
++++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+@@ -230,13 +230,24 @@ union MESAPI_SET_HW_RESOURCES {
+ 				uint32_t disable_add_queue_wptr_mc_addr : 1;
+ 				uint32_t enable_mes_event_int_logging : 1;
+ 				uint32_t enable_reg_active_poll : 1;
+-				uint32_t reserved	: 21;
++				uint32_t use_disable_queue_in_legacy_uq_preemption : 1;
++				uint32_t send_write_data : 1;
++				uint32_t os_tdr_timeout_override : 1;
++				uint32_t use_rs64mem_for_proc_gang_ctx : 1;
++				uint32_t use_add_queue_unmap_flag_addr : 1;
++				uint32_t enable_mes_sch_stb_log : 1;
++				uint32_t limit_single_process : 1;
++				uint32_t is_strix_tmz_wa_enabled  :1;
++				uint32_t enable_lr_compute_wa : 1;
++				uint32_t reserved : 12;
+ 			};
+ 			uint32_t	uint32_t_all;
+ 		};
+ 		uint32_t	oversubscription_timer;
+ 		uint64_t        doorbell_info;
+ 		uint64_t        event_intr_history_gpu_mc_ptr;
++		uint64_t	timestamp;
++		uint32_t	os_tdr_timeout_in_sec;
+ 	};
+ 
+ 	uint32_t	max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+@@ -256,7 +267,8 @@ union MESAPI_SET_HW_RESOURCES_1 {
+ 		};
+ 		uint64_t							mes_info_ctx_mc_addr;
+ 		uint32_t							mes_info_ctx_size;
+-		uint32_t							mes_kiq_unmap_timeout; // unit is 100ms
++		uint64_t							reserved1;
++		uint64_t							cleaner_shader_fence_mc_addr;
+ 	};
+ 
+ 	uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+@@ -563,6 +575,11 @@ enum MESAPI_MISC_OPCODE {
+ 	MESAPI_MISC__READ_REG,
+ 	MESAPI_MISC__WAIT_REG_MEM,
+ 	MESAPI_MISC__SET_SHADER_DEBUGGER,
++	MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE,
++	MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES,
++	MESAPI_MISC__CHANGE_CONFIG,
++	MESAPI_MISC__LAUNCH_CLEANER_SHADER,
++
+ 	MESAPI_MISC__MAX,
+ };
+ 
+@@ -617,6 +634,31 @@ struct SET_SHADER_DEBUGGER {
+ 	uint32_t trap_en;
+ };
+ 
++enum MESAPI_MISC__CHANGE_CONFIG_OPTION {
++	MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0,
++	MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1,
++	MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG    = 2,
++
++	MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F
++};
++
++struct CHANGE_CONFIG {
++	enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode;
++	union {
++		struct {
++			uint32_t limit_single_process : 1;
++			uint32_t enable_hws_logging_buffer : 1;
++			uint32_t reserved : 31;
++		} bits;
++		uint32_t all;
++	} option;
++
++	struct {
++		uint32_t tdr_level;
++		uint32_t tdr_delay;
++	} tdr_config;
++};
++
+ union MESAPI__MISC {
+ 	struct {
+ 		union MES_API_HEADER	header;
+@@ -631,6 +673,7 @@ union MESAPI__MISC {
+ 			struct          WAIT_REG_MEM wait_reg_mem;
+ 			struct		SET_SHADER_DEBUGGER set_shader_debugger;
+ 			enum MES_AMD_PRIORITY_LEVEL queue_sch_level;
++			struct		CHANGE_CONFIG change_config;
+ 
+ 			uint32_t	data[MISC_DATA_MAX_SIZE_IN_DWORDS];
+ 		};
+diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+index 101e2fe962c6a6..a402974939d63c 100644
+--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
++++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+@@ -105,6 +105,43 @@ struct MES_API_STATUS {
+ 	uint64_t api_completion_fence_value;
+ };
+ 
++/*
++ * MES will set api_completion_fence_value in api_completion_fence_addr
++ * when it can successflly process the API. MES will also trigger
++ * following interrupt when it finish process the API no matter success
++ * or failed.
++ *     Interrupt source id 181 (EOP) with context ID (DW 6 in the int
++ *     cookie) set to 0xb1 and context type set to 8. Driver side need
++ *     to enable TIME_STAMP_INT_ENABLE in CPC_INT_CNTL for MES pipe to
++ *     catch this interrupt.
++ *     Driver side also need to set enable_mes_fence_int = 1 in
++ *     set_HW_resource package to enable this fence interrupt.
++ * when the API process failed.
++ *     lowre 32 bits set to 0.
++ *     higher 32 bits set as follows (bit shift within high 32)
++ *         bit 0  -  7    API specific error code.
++ *         bit 8  - 15    API OPCODE.
++ *         bit 16 - 23    MISC OPCODE if any
++ *         bit 24 - 30    ERROR category (API_ERROR_XXX)
++ *         bit 31         Set to 1 to indicate error status
++ *
++ */
++enum { MES_SCH_ERROR_CODE_HEADER_SHIFT_12 = 8 };
++enum { MES_SCH_ERROR_CODE_MISC_OP_SHIFT_12 = 16 };
++enum { MES_ERROR_CATEGORY_SHIFT_12 = 24 };
++enum { MES_API_STATUS_ERROR_SHIFT_12 = 31 };
++
++enum MES_ERROR_CATEGORY_CODE_12 {
++	MES_ERROR_API                = 1,
++	MES_ERROR_SCHEDULING         = 2,
++	MES_ERROR_UNKNOWN            = 3,
++};
++
++#define MES_ERR_CODE(api_err, opcode, misc_op, category) \
++			((uint64) (api_err | opcode << MES_SCH_ERROR_CODE_HEADER_SHIFT_12 | \
++			misc_op << MES_SCH_ERROR_CODE_MISC_OP_SHIFT_12 | \
++			category << MES_ERROR_CATEGORY_SHIFT_12 | \
++			1 << MES_API_STATUS_ERROR_SHIFT_12) << 32)
+ 
+ enum { MAX_COMPUTE_PIPES = 8 };
+ enum { MAX_GFX_PIPES	 = 2 };
+@@ -248,7 +285,9 @@ union MESAPI_SET_HW_RESOURCES {
+ 				uint32_t enable_mes_sch_stb_log : 1;
+ 				uint32_t limit_single_process : 1;
+ 				uint32_t unmapped_doorbell_handling: 2;
+-				uint32_t reserved : 11;
++				uint32_t enable_mes_fence_int: 1;
++				uint32_t enable_lr_compute_wa : 1;
++				uint32_t reserved : 9;
+ 			};
+ 			uint32_t uint32_all;
+ 		};
+@@ -278,6 +317,8 @@ union MESAPI_SET_HW_RESOURCES_1 {
+ 		uint32_t                            mes_debug_ctx_size;
+ 		/* unit is 100ms */
+ 		uint32_t                            mes_kiq_unmap_timeout;
++		uint64_t                            reserved1;
++		uint64_t                            cleaner_shader_fence_mc_addr;
+ 	};
+ 
+ 	uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+@@ -643,6 +684,10 @@ enum MESAPI_MISC_OPCODE {
+ 	MESAPI_MISC__SET_SHADER_DEBUGGER,
+ 	MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE,
+ 	MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES,
++	MESAPI_MISC__QUERY_HUNG_ENGINE_ID,
++	MESAPI_MISC__CHANGE_CONFIG,
++	MESAPI_MISC__LAUNCH_CLEANER_SHADER,
++	MESAPI_MISC__SETUP_MES_DBGEXT,
+ 
+ 	MESAPI_MISC__MAX,
+ };
+@@ -713,6 +758,31 @@ struct SET_GANG_SUBMIT {
+ 	uint32_t slave_gang_context_array_index;
+ };
+ 
++enum MESAPI_MISC__CHANGE_CONFIG_OPTION {
++	MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0,
++	MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1,
++	MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG    = 2,
++
++	MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F
++};
++
++struct CHANGE_CONFIG {
++	enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode;
++	union {
++		struct  {
++			uint32_t limit_single_process : 1;
++			uint32_t enable_hws_logging_buffer : 1;
++			uint32_t reserved : 30;
++		} bits;
++		uint32_t all;
++	} option;
++
++	struct {
++		uint32_t tdr_level;
++		uint32_t tdr_delay;
++	} tdr_config;
++};
++
+ union MESAPI__MISC {
+ 	struct {
+ 		union MES_API_HEADER	header;
+@@ -726,7 +796,7 @@ union MESAPI__MISC {
+ 			struct WAIT_REG_MEM wait_reg_mem;
+ 			struct SET_SHADER_DEBUGGER set_shader_debugger;
+ 			enum MES_AMD_PRIORITY_LEVEL queue_sch_level;
+-
++			struct CHANGE_CONFIG change_config;
+ 			uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS];
+ 		};
+ 		uint64_t		timestamp;
+diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
+index 0f93c22a479f33..83941b916cd6b2 100644
+--- a/drivers/hid/hid-mcp2221.c
++++ b/drivers/hid/hid-mcp2221.c
+@@ -814,6 +814,10 @@ static int mcp2221_raw_event(struct hid_device *hdev,
+ 			}
+ 			if (data[2] == MCP2221_I2C_READ_COMPL ||
+ 			    data[2] == MCP2221_I2C_READ_PARTIAL) {
++				if (!mcp->rxbuf || mcp->rxbuf_idx < 0 || data[3] > 60) {
++					mcp->status = -EINVAL;
++					break;
++				}
+ 				buf = mcp->rxbuf;
+ 				memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]);
+ 				mcp->rxbuf_idx = mcp->rxbuf_idx + data[3];
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 450e1a7e7bac7a..444cf35feebf4d 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -133,7 +133,7 @@ struct journal_sector {
+ 	commit_id_t commit_id;
+ };
+ 
+-#define MAX_TAG_SIZE			(JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
++#define MAX_TAG_SIZE			255
+ 
+ #define METADATA_PADDING_SECTORS	8
+ 
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index d1306f39fa135d..6a023fe22635ad 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -2189,10 +2189,10 @@ static int tc358743_probe(struct i2c_client *client)
+ err_work_queues:
+ 	cec_unregister_adapter(state->cec_adap);
+ 	if (!state->i2c_client->irq) {
+-		del_timer(&state->timer);
++		timer_delete_sync(&state->timer);
+ 		flush_work(&state->work_i2c_poll);
+ 	}
+-	cancel_delayed_work(&state->delayed_work_enable_hotplug);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ 	mutex_destroy(&state->confctl_mutex);
+ err_hdl:
+ 	media_entity_cleanup(&sd->entity);
+diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
+index aa3df0d05b853b..24dd15c917228b 100644
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -732,9 +732,6 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
+ 	/* Reset Global error flags */
+ 	rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
+ 
+-	/* Set the controller into appropriate mode */
+-	rcar_canfd_set_mode(gpriv);
+-
+ 	/* Transition all Channels to reset mode */
+ 	for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ 		rcar_canfd_clear_bit(gpriv->base,
+@@ -754,6 +751,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
+ 			return err;
+ 		}
+ 	}
++
++	/* Set the controller into appropriate mode */
++	rcar_canfd_set_mode(gpriv);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
+index ff39afc77d7d23..c9eba1d37b0eb2 100644
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -545,8 +545,6 @@ static int hi3110_stop(struct net_device *net)
+ 
+ 	priv->force_quit = 1;
+ 	free_irq(spi->irq, priv);
+-	destroy_workqueue(priv->wq);
+-	priv->wq = NULL;
+ 
+ 	mutex_lock(&priv->hi3110_lock);
+ 
+@@ -771,34 +769,23 @@ static int hi3110_open(struct net_device *net)
+ 		goto out_close;
+ 	}
+ 
+-	priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+-				   0);
+-	if (!priv->wq) {
+-		ret = -ENOMEM;
+-		goto out_free_irq;
+-	}
+-	INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+-	INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
+-
+ 	ret = hi3110_hw_reset(spi);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 
+ 	ret = hi3110_setup(net);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 
+ 	ret = hi3110_set_normal_mode(spi);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 
+ 	netif_wake_queue(net);
+ 	mutex_unlock(&priv->hi3110_lock);
+ 
+ 	return 0;
+ 
+- out_free_wq:
+-	destroy_workqueue(priv->wq);
+  out_free_irq:
+ 	free_irq(spi->irq, priv);
+ 	hi3110_hw_sleep(spi);
+@@ -910,6 +897,15 @@ static int hi3110_can_probe(struct spi_device *spi)
+ 	if (ret)
+ 		goto out_clk;
+ 
++	priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
++				   0);
++	if (!priv->wq) {
++		ret = -ENOMEM;
++		goto out_clk;
++	}
++	INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
++	INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
++
+ 	priv->spi = spi;
+ 	mutex_init(&priv->hi3110_lock);
+ 
+@@ -945,6 +941,8 @@ static int hi3110_can_probe(struct spi_device *spi)
+ 	return 0;
+ 
+  error_probe:
++	destroy_workqueue(priv->wq);
++	priv->wq = NULL;
+ 	hi3110_power_enable(priv->power, 0);
+ 
+  out_clk:
+@@ -965,6 +963,9 @@ static void hi3110_can_remove(struct spi_device *spi)
+ 
+ 	hi3110_power_enable(priv->power, 0);
+ 
++	destroy_workqueue(priv->wq);
++	priv->wq = NULL;
++
+ 	clk_disable_unprepare(priv->clk);
+ 
+ 	free_candev(net);
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+index c6f69d87c38d41..d07f0f75d23f2f 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+@@ -8170,8 +8170,6 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+-{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
+-	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+index c9b9e2bc90cc49..1d75d8ec001660 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+@@ -291,7 +291,6 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+ 	{RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
+ 	{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
+-	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ 	{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index c336c66ac8e354..99711a4fb85df8 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -960,6 +960,14 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
+ 	}
+ }
+ 
++static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work)
++{
++	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
++						tx_wait_work.work);
++
++	rtw89_tx_wait_list_clear(rtwdev);
++}
++
+ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
+ {
+ 	u8 ch_dma;
+@@ -977,6 +985,8 @@ int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *sk
+ 	unsigned long time_left;
+ 	int ret = 0;
+ 
++	lockdep_assert_wiphy(rtwdev->hw->wiphy);
++
+ 	wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ 	if (!wait) {
+ 		rtw89_core_tx_kick_off(rtwdev, qsel);
+@@ -984,18 +994,23 @@ int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *sk
+ 	}
+ 
+ 	init_completion(&wait->completion);
++	wait->skb = skb;
+ 	rcu_assign_pointer(skb_data->wait, wait);
+ 
+ 	rtw89_core_tx_kick_off(rtwdev, qsel);
+ 	time_left = wait_for_completion_timeout(&wait->completion,
+ 						msecs_to_jiffies(timeout));
+-	if (time_left == 0)
+-		ret = -ETIMEDOUT;
+-	else if (!wait->tx_done)
+-		ret = -EAGAIN;
+ 
+-	rcu_assign_pointer(skb_data->wait, NULL);
+-	kfree_rcu(wait, rcu_head);
++	if (time_left == 0) {
++		ret = -ETIMEDOUT;
++		list_add_tail(&wait->list, &rtwdev->tx_waits);
++		wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->tx_wait_work,
++					 RTW89_TX_WAIT_WORK_TIMEOUT);
++	} else {
++		if (!wait->tx_done)
++			ret = -EAGAIN;
++		rtw89_tx_wait_release(wait);
++	}
+ 
+ 	return ret;
+ }
+@@ -4419,6 +4434,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
+ void rtw89_core_stop(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_btc *btc = &rtwdev->btc;
++	struct wiphy *wiphy = rtwdev->hw->wiphy;
+ 
+ 	/* Prvent to stop twice; enter_ips and ops_stop */
+ 	if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
+@@ -4437,6 +4453,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
+ 	cancel_work_sync(&btc->dhcp_notify_work);
+ 	cancel_work_sync(&btc->icmp_notify_work);
+ 	cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
++	wiphy_delayed_work_cancel(wiphy, &rtwdev->tx_wait_work);
+ 	cancel_delayed_work_sync(&rtwdev->track_work);
+ 	cancel_delayed_work_sync(&rtwdev->chanctx_work);
+ 	cancel_delayed_work_sync(&rtwdev->coex_act1_work);
+@@ -4657,6 +4674,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
+ 			continue;
+ 		INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
+ 	}
++	INIT_LIST_HEAD(&rtwdev->tx_waits);
+ 	INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
+ 	INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
+ 	INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
+@@ -4666,6 +4684,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
+ 	INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
+ 	INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
+ 	INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
++	wiphy_delayed_work_init(&rtwdev->tx_wait_work, rtw89_tx_wait_work);
+ 	INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
+ 	INIT_DELAYED_WORK(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
+ 	rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
+diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
+index 4f64ea392e6c9e..cb703588e3a4ff 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -3406,9 +3406,12 @@ struct rtw89_phy_rate_pattern {
+ 	bool enable;
+ };
+ 
++#define RTW89_TX_WAIT_WORK_TIMEOUT msecs_to_jiffies(500)
+ struct rtw89_tx_wait_info {
+ 	struct rcu_head rcu_head;
++	struct list_head list;
+ 	struct completion completion;
++	struct sk_buff *skb;
+ 	bool tx_done;
+ };
+ 
+@@ -5539,6 +5542,9 @@ struct rtw89_dev {
+ 	/* used to protect rpwm */
+ 	spinlock_t rpwm_lock;
+ 
++	struct list_head tx_waits;
++	struct wiphy_delayed_work tx_wait_work;
++
+ 	struct rtw89_cam_info cam_info;
+ 
+ 	struct sk_buff_head c2h_queue;
+@@ -5735,6 +5741,26 @@ u8 rtw89_sta_link_inst_get_index(struct rtw89_sta_link *rtwsta_link)
+ 	return rtwsta_link - rtwsta->links_inst;
+ }
+ 
++static inline void rtw89_tx_wait_release(struct rtw89_tx_wait_info *wait)
++{
++	dev_kfree_skb_any(wait->skb);
++	kfree_rcu(wait, rcu_head);
++}
++
++static inline void rtw89_tx_wait_list_clear(struct rtw89_dev *rtwdev)
++{
++	struct rtw89_tx_wait_info *wait, *tmp;
++
++	lockdep_assert_wiphy(rtwdev->hw->wiphy);
++
++	list_for_each_entry_safe(wait, tmp, &rtwdev->tx_waits, list) {
++		if (!completion_done(&wait->completion))
++			continue;
++		list_del(&wait->list);
++		rtw89_tx_wait_release(wait);
++	}
++}
++
+ static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
+ 				     struct rtw89_core_tx_request *tx_req)
+ {
+@@ -5744,6 +5770,7 @@ static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
+ static inline void rtw89_hci_reset(struct rtw89_dev *rtwdev)
+ {
+ 	rtwdev->hci.ops->reset(rtwdev);
++	rtw89_tx_wait_list_clear(rtwdev);
+ }
+ 
+ static inline int rtw89_hci_start(struct rtw89_dev *rtwdev)
+@@ -6745,11 +6772,12 @@ static inline struct sk_buff *rtw89_alloc_skb_for_rx(struct rtw89_dev *rtwdev,
+ 	return dev_alloc_skb(length);
+ }
+ 
+-static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
++static inline bool rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
+ 					       struct rtw89_tx_skb_data *skb_data,
+ 					       bool tx_done)
+ {
+ 	struct rtw89_tx_wait_info *wait;
++	bool ret = false;
+ 
+ 	rcu_read_lock();
+ 
+@@ -6757,11 +6785,14 @@ static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
+ 	if (!wait)
+ 		goto out;
+ 
++	ret = true;
+ 	wait->tx_done = tx_done;
+-	complete(&wait->completion);
++	/* Don't access skb anymore after completion */
++	complete_all(&wait->completion);
+ 
+ out:
+ 	rcu_read_unlock();
++	return ret;
+ }
+ 
+ static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index e203d3b2a82749..5fd5fe88e6b083 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -458,7 +458,8 @@ static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
+ 	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+ 	struct ieee80211_tx_info *info;
+ 
+-	rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
++	if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE))
++		return;
+ 
+ 	info = IEEE80211_SKB_CB(skb);
+ 	ieee80211_tx_info_clear_status(info);
+diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
+index 02c2ac12f197a5..c0f0e3d71f5f54 100644
+--- a/drivers/net/wireless/realtek/rtw89/ser.c
++++ b/drivers/net/wireless/realtek/rtw89/ser.c
+@@ -484,6 +484,7 @@ static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt)
+ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
+ {
+ 	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
++	struct wiphy *wiphy = rtwdev->hw->wiphy;
+ 
+ 	switch (evt) {
+ 	case SER_EV_STATE_IN:
+@@ -496,7 +497,9 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
+ 		}
+ 
+ 		drv_stop_rx(ser);
++		wiphy_lock(wiphy);
+ 		drv_trx_reset(ser);
++		wiphy_unlock(wiphy);
+ 
+ 		/* wait m3 */
+ 		hal_send_m2_event(ser);
+diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
+index 65d39e19f6eca4..f381ce1e84bd37 100644
+--- a/drivers/nvmem/layouts.c
++++ b/drivers/nvmem/layouts.c
+@@ -45,11 +45,24 @@ static void nvmem_layout_bus_remove(struct device *dev)
+ 	return drv->remove(layout);
+ }
+ 
++static int nvmem_layout_bus_uevent(const struct device *dev,
++				   struct kobj_uevent_env *env)
++{
++	int ret;
++
++	ret = of_device_uevent_modalias(dev, env);
++	if (ret != ENODEV)
++		return ret;
++
++	return 0;
++}
++
+ static const struct bus_type nvmem_layout_bus_type = {
+ 	.name		= "nvmem-layout",
+ 	.match		= nvmem_layout_bus_match,
+ 	.probe		= nvmem_layout_bus_probe,
+ 	.remove		= nvmem_layout_bus_remove,
++	.uevent		= nvmem_layout_bus_uevent,
+ };
+ 
+ int __nvmem_layout_driver_register(struct nvmem_layout_driver *drv,
+diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+index 6f5437d210a617..9fd2829ee2ab48 100644
+--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+@@ -233,6 +233,14 @@ static const struct dmi_system_id fwbug_list[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
+ 		}
+ 	},
++	{
++		.ident = "MECHREVO Yilong15Pro Series GM5HG7A",
++		.driver_data = &quirk_spurious_8042,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "MECHREVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Yilong15Pro Series GM5HG7A"),
++		}
++	},
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */
+ 	{
+ 		.ident = "PCSpecialist Lafite Pro V 14M",
+@@ -242,6 +250,13 @@ static const struct dmi_system_id fwbug_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"),
+ 		}
+ 	},
++	{
++		.ident = "TUXEDO Stellaris Slim 15 AMD Gen6",
++		.driver_data = &quirk_spurious_8042,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
++		}
++	},
+ 	{
+ 		.ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10",
+ 		.driver_data = &quirk_spurious_8042,
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index 719caa2a00f056..8a1e2268d301a3 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -406,6 +406,7 @@ static const struct acpi_device_id amd_pmf_acpi_ids[] = {
+ 	{"AMDI0103", 0},
+ 	{"AMDI0105", 0},
+ 	{"AMDI0107", 0},
++	{"AMDI0108", 0},
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
+diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
+index 6769f066b0b4e8..028ed5d0ecbe75 100644
+--- a/drivers/staging/axis-fifo/axis-fifo.c
++++ b/drivers/staging/axis-fifo/axis-fifo.c
+@@ -42,7 +42,6 @@
+ #define DRIVER_NAME "axis_fifo"
+ 
+ #define READ_BUF_SIZE 128U /* read buffer length in words */
+-#define WRITE_BUF_SIZE 128U /* write buffer length in words */
+ 
+ /* ----------------------------
+  *     IP register offsets
+@@ -392,6 +391,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ 	}
+ 
+ 	bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
++	words_available = bytes_available / sizeof(u32);
+ 	if (!bytes_available) {
+ 		dev_err(fifo->dt_device, "received a packet of length 0\n");
+ 		ret = -EIO;
+@@ -402,7 +402,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ 		dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n",
+ 			bytes_available, len);
+ 		ret = -EINVAL;
+-		goto end_unlock;
++		goto err_flush_rx;
+ 	}
+ 
+ 	if (bytes_available % sizeof(u32)) {
+@@ -411,11 +411,9 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ 		 */
+ 		dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n");
+ 		ret = -EIO;
+-		goto end_unlock;
++		goto err_flush_rx;
+ 	}
+ 
+-	words_available = bytes_available / sizeof(u32);
+-
+ 	/* read data into an intermediate buffer, copying the contents
+ 	 * to userspace when the buffer is full
+ 	 */
+@@ -427,18 +425,23 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ 			tmp_buf[i] = ioread32(fifo->base_addr +
+ 					      XLLF_RDFD_OFFSET);
+ 		}
++		words_available -= copy;
+ 
+ 		if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
+ 				 copy * sizeof(u32))) {
+ 			ret = -EFAULT;
+-			goto end_unlock;
++			goto err_flush_rx;
+ 		}
+ 
+ 		copied += copy;
+-		words_available -= copy;
+ 	}
++	mutex_unlock(&fifo->read_lock);
++
++	return bytes_available;
+ 
+-	ret = bytes_available;
++err_flush_rx:
++	while (words_available--)
++		ioread32(fifo->base_addr + XLLF_RDFD_OFFSET);
+ 
+ end_unlock:
+ 	mutex_unlock(&fifo->read_lock);
+@@ -466,11 +469,8 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
+ {
+ 	struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
+ 	unsigned int words_to_write;
+-	unsigned int copied;
+-	unsigned int copy;
+-	unsigned int i;
++	u32 *txbuf;
+ 	int ret;
+-	u32 tmp_buf[WRITE_BUF_SIZE];
+ 
+ 	if (len % sizeof(u32)) {
+ 		dev_err(fifo->dt_device,
+@@ -486,11 +486,17 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (words_to_write > fifo->tx_fifo_depth) {
+-		dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n",
+-			words_to_write, fifo->tx_fifo_depth);
++	/*
++	 * In 'Store-and-Forward' mode, the maximum packet that can be
++	 * transmitted is limited by the size of the FIFO, which is
++	 * (C_TX_FIFO_DEPTH–4)*(data interface width/8) bytes.
++	 *
++	 * Do not attempt to send a packet larger than 'tx_fifo_depth - 4',
++	 * otherwise a 'Transmit Packet Overrun Error' interrupt will be
++	 * raised, which requires a reset of the TX circuit to recover.
++	 */
++	if (words_to_write > (fifo->tx_fifo_depth - 4))
+ 		return -EINVAL;
+-	}
+ 
+ 	if (fifo->write_flags & O_NONBLOCK) {
+ 		/*
+@@ -529,32 +535,20 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
+ 		}
+ 	}
+ 
+-	/* write data from an intermediate buffer into the fifo IP, refilling
+-	 * the buffer with userspace data as needed
+-	 */
+-	copied = 0;
+-	while (words_to_write > 0) {
+-		copy = min(words_to_write, WRITE_BUF_SIZE);
+-
+-		if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
+-				   copy * sizeof(u32))) {
+-			ret = -EFAULT;
+-			goto end_unlock;
+-		}
+-
+-		for (i = 0; i < copy; i++)
+-			iowrite32(tmp_buf[i], fifo->base_addr +
+-				  XLLF_TDFD_OFFSET);
+-
+-		copied += copy;
+-		words_to_write -= copy;
++	txbuf = vmemdup_user(buf, len);
++	if (IS_ERR(txbuf)) {
++		ret = PTR_ERR(txbuf);
++		goto end_unlock;
+ 	}
+ 
+-	ret = copied * sizeof(u32);
++	for (int i = 0; i < words_to_write; ++i)
++		iowrite32(txbuf[i], fifo->base_addr + XLLF_TDFD_OFFSET);
+ 
+ 	/* write packet size to fifo */
+-	iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET);
++	iowrite32(len, fifo->base_addr + XLLF_TLR_OFFSET);
+ 
++	ret = len;
++	kvfree(txbuf);
+ end_unlock:
+ 	mutex_unlock(&fifo->write_lock);
+ 
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index 28e4beeabf8f37..4fd789a77a13b5 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -1401,7 +1401,7 @@ config SERIAL_STM32
+ 
+ config SERIAL_STM32_CONSOLE
+ 	bool "Support for console on STM32"
+-	depends on SERIAL_STM32=y
++	depends on SERIAL_STM32
+ 	select SERIAL_CORE_CONSOLE
+ 	select SERIAL_EARLYCON
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index fc869b7f803f04..62e984d20e5982 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2114,6 +2114,12 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) },	/* Simcom SIM7500/SIM7600 MBIM mode */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff),	/* Simcom SIM7500/SIM7600 RNDIS mode */
+ 	  .driver_info = RSVD(7) },
++	{ USB_DEVICE(0x1e0e, 0x9071),				/* Simcom SIM8230 RMNET mode */
++	  .driver_info = RSVD(3) | RSVD(4) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9078, 0xff),	/* Simcom SIM8230 ECM mode */
++	  .driver_info = RSVD(5) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x907b, 0xff),	/* Simcom SIM8230 RNDIS mode */
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) },	/* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) },	/* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 2928abf7eb8271..fc46190d26c8e9 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -998,11 +998,18 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
+ 	if (!btrfs_test_opt(fs_info, REF_VERIFY))
+ 		return 0;
+ 
++	extent_root = btrfs_extent_root(fs_info, 0);
++	/* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */
++	if (IS_ERR(extent_root)) {
++		btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling");
++		btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
++		return 0;
++	}
++
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+ 		return -ENOMEM;
+ 
+-	extent_root = btrfs_extent_root(fs_info, 0);
+ 	eb = btrfs_read_lock_root_node(extent_root);
+ 	level = btrfs_header_level(eb);
+ 	path->nodes[level] = eb;
+diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
+index 896d1d4219ed9f..be77a137cc8711 100644
+--- a/fs/netfs/buffered_write.c
++++ b/fs/netfs/buffered_write.c
+@@ -340,7 +340,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ 		folio_put(folio);
+ 		ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
+ 		if (ret < 0)
+-			goto error_folio_unlock;
++			goto out;
+ 		continue;
+ 
+ 	copied:
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 39120b172992ed..1f6130e13620de 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -873,6 +873,9 @@ static inline bool device_pm_not_required(struct device *dev)
+ static inline void device_set_pm_not_required(struct device *dev)
+ {
+ 	dev->power.no_pm = true;
++#ifdef CONFIG_PM
++	dev->power.no_callbacks = true;
++#endif
+ }
+ 
+ static inline void dev_pm_syscore_device(struct device *dev, bool val)
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 791e4868f2d4e1..7e9d731c459760 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -725,10 +725,10 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
+ 	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+ 
+ 	spin_lock(&m->req_lock);
+-	/* Ignore cancelled request if message has been received
+-	 * before lock.
+-	 */
+-	if (req->status == REQ_STATUS_RCVD) {
++	/* Ignore cancelled request if status changed since the request was
++	 * processed in p9_client_flush()
++	*/
++	if (req->status != REQ_STATUS_SENT) {
+ 		spin_unlock(&m->req_lock);
+ 		return 0;
+ 	}
+diff --git a/rust/kernel/block/mq/gen_disk.rs b/rust/kernel/block/mq/gen_disk.rs
+index c6df153ebb8860..8cd47ddd1dbb5f 100644
+--- a/rust/kernel/block/mq/gen_disk.rs
++++ b/rust/kernel/block/mq/gen_disk.rs
+@@ -3,7 +3,7 @@
+ //! Generic disk abstraction.
+ //!
+ //! C header: [`include/linux/blkdev.h`](srctree/include/linux/blkdev.h)
+-//! C header: [`include/linux/blk_mq.h`](srctree/include/linux/blk_mq.h)
++//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
+ 
+ use crate::block::mq::{raw_writer::RawWriter, Operations, TagSet};
+ use crate::{bindings, error::from_err_ptr, error::Result, sync::Arc};
+diff --git a/sound/soc/amd/acp/amd.h b/sound/soc/amd/acp/amd.h
+index 854269fea875f1..aa0aa64202fe42 100644
+--- a/sound/soc/amd/acp/amd.h
++++ b/sound/soc/amd/acp/amd.h
+@@ -135,7 +135,7 @@
+ #define PDM_DMA_INTR_MASK       0x10000
+ #define PDM_DEC_64              0x2
+ #define PDM_CLK_FREQ_MASK       0x07
+-#define PDM_MISC_CTRL_MASK      0x10
++#define PDM_MISC_CTRL_MASK      0x18
+ #define PDM_ENABLE              0x01
+ #define PDM_DISABLE             0x00
+ #define DMA_EN_MASK             0x02
+diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
+index ce2e88e066f3e5..d773c96e2543c9 100644
+--- a/sound/soc/codecs/rt5682s.c
++++ b/sound/soc/codecs/rt5682s.c
+@@ -653,14 +653,15 @@ static void rt5682s_sar_power_mode(struct snd_soc_component *component, int mode
+ 	switch (mode) {
+ 	case SAR_PWR_SAVING:
+ 		snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_3,
+-			RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_DIS);
++			RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_EN);
+ 		snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1,
+-			RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK,
+-			RT5682S_CTRL_MB1_REG | RT5682S_CTRL_MB2_REG);
++			RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK |
++			RT5682S_VREF_POW_MASK, RT5682S_CTRL_MB1_FSM |
++			RT5682S_CTRL_MB2_FSM | RT5682S_VREF_POW_FSM);
+ 		snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1,
+ 			RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK |
+ 			RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS |
+-			RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU);
++			RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU);
+ 		usleep_range(5000, 5500);
+ 		snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1,
+ 			RT5682S_SAR_BUTDET_MASK, RT5682S_SAR_BUTDET_EN);
+@@ -688,7 +689,7 @@ static void rt5682s_sar_power_mode(struct snd_soc_component *component, int mode
+ 		snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1,
+ 			RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK |
+ 			RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS |
+-			RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU);
++			RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU);
+ 		break;
+ 	default:
+ 		dev_err(component->dev, "Invalid SAR Power mode: %d\n", mode);
+@@ -725,7 +726,7 @@ static void rt5682s_disable_push_button_irq(struct snd_soc_component *component)
+ 	snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1,
+ 		RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK |
+ 		RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS |
+-		RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU);
++		RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU);
+ }
+ 
+ /**
+@@ -786,7 +787,7 @@ static int rt5682s_headset_detect(struct snd_soc_component *component, int jack_
+ 			jack_type = SND_JACK_HEADSET;
+ 			snd_soc_component_write(component, RT5682S_SAR_IL_CMD_3, 0x024c);
+ 			snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1,
+-				RT5682S_FAST_OFF_MASK, RT5682S_FAST_OFF_EN);
++				RT5682S_FAST_OFF_MASK, RT5682S_FAST_OFF_DIS);
+ 			snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1,
+ 				RT5682S_SAR_SEL_MB1_2_MASK, val << RT5682S_SAR_SEL_MB1_2_SFT);
+ 			rt5682s_enable_push_button_irq(component);
+@@ -966,7 +967,7 @@ static int rt5682s_set_jack_detect(struct snd_soc_component *component,
+ 			RT5682S_EMB_JD_MASK | RT5682S_DET_TYPE |
+ 			RT5682S_POL_FAST_OFF_MASK | RT5682S_MIC_CAP_MASK,
+ 			RT5682S_EMB_JD_EN | RT5682S_DET_TYPE |
+-			RT5682S_POL_FAST_OFF_HIGH | RT5682S_MIC_CAP_HS);
++			RT5682S_POL_FAST_OFF_LOW | RT5682S_MIC_CAP_HS);
+ 		regmap_update_bits(rt5682s->regmap, RT5682S_SAR_IL_CMD_1,
+ 			RT5682S_SAR_POW_MASK, RT5682S_SAR_POW_EN);
+ 		regmap_update_bits(rt5682s->regmap, RT5682S_GPIO_CTRL_1,
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index a792ada18863ac..461e183680daa0 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1522,12 +1522,12 @@ static void snd_usbmidi_free(struct snd_usb_midi *umidi)
+ {
+ 	int i;
+ 
++	if (!umidi->disconnected)
++		snd_usbmidi_disconnect(&umidi->list);
++
+ 	for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+ 		struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i];
+-		if (ep->out)
+-			snd_usbmidi_out_endpoint_delete(ep->out);
+-		if (ep->in)
+-			snd_usbmidi_in_endpoint_delete(ep->in);
++		kfree(ep->out);
+ 	}
+ 	mutex_destroy(&umidi->mutex);
+ 	kfree(umidi);
+@@ -1553,7 +1553,7 @@ void snd_usbmidi_disconnect(struct list_head *p)
+ 	spin_unlock_irq(&umidi->disc_lock);
+ 	up_write(&umidi->disc_rwsem);
+ 
+-	del_timer_sync(&umidi->error_timer);
++	timer_shutdown_sync(&umidi->error_timer);
+ 
+ 	for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+ 		struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i];
+diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
+index 9ef569492560ef..ddaeb4eb3e2497 100644
+--- a/tools/lib/subcmd/help.c
++++ b/tools/lib/subcmd/help.c
+@@ -75,6 +75,9 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
+ 	size_t ci, cj, ei;
+ 	int cmp;
+ 
++	if (!excludes->cnt)
++		return;
++
+ 	ci = cj = ei = 0;
+ 	while (ci < cmds->cnt && ei < excludes->cnt) {
+ 		cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-10-06 11:06 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-10-06 11:06 UTC (permalink / raw
  To: gentoo-commits

commit:     da5eb372e048eb0af130d2ac6ecdf1542c8627e3
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Oct  6 11:06:47 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Oct  6 11:06:47 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=da5eb372

Linux patch 6.12.51

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |   4 +
 1050_linux-6.12.51.patch | 374 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 378 insertions(+)

diff --git a/0000_README b/0000_README
index faa9f43e..b7557830 100644
--- a/0000_README
+++ b/0000_README
@@ -243,6 +243,10 @@ Patch:  1049_linux-6.12.50.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.50
 
+Patch:  1050_linux-6.12.51.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.51
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1050_linux-6.12.51.patch b/1050_linux-6.12.51.patch
new file mode 100644
index 00000000..a3006d3d
--- /dev/null
+++ b/1050_linux-6.12.51.patch
@@ -0,0 +1,374 @@
+diff --git a/Makefile b/Makefile
+index 7b0a94828fdb1c..05b7983b56eda7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 50
++SUBLEVEL = 51
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
+index 486c8ec0fa60d9..ab53c5b02c48df 100644
+--- a/drivers/media/pci/b2c2/flexcop-pci.c
++++ b/drivers/media/pci/b2c2/flexcop-pci.c
+@@ -411,7 +411,7 @@ static void flexcop_pci_remove(struct pci_dev *pdev)
+ 	struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
+ 
+ 	if (irq_chk_intv > 0)
+-		cancel_delayed_work(&fc_pci->irq_check_work);
++		cancel_delayed_work_sync(&fc_pci->irq_check_work);
+ 
+ 	flexcop_pci_dma_exit(fc_pci);
+ 	flexcop_device_exit(fc_pci->fc_dev);
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 8f1361bcce3a60..1e7f800701331e 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -536,7 +536,9 @@ static int display_open(struct inode *inode, struct file *file)
+ 
+ 	mutex_lock(&ictx->lock);
+ 
+-	if (!ictx->display_supported) {
++	if (ictx->disconnected) {
++		retval = -ENODEV;
++	} else if (!ictx->display_supported) {
+ 		pr_err("display not supported by device\n");
+ 		retval = -ENODEV;
+ 	} else if (ictx->display_isopen) {
+@@ -598,6 +600,9 @@ static int send_packet(struct imon_context *ictx)
+ 	int retval = 0;
+ 	struct usb_ctrlrequest *control_req = NULL;
+ 
++	if (ictx->disconnected)
++		return -ENODEV;
++
+ 	/* Check if we need to use control or interrupt urb */
+ 	if (!ictx->tx_control) {
+ 		pipe = usb_sndintpipe(ictx->usbdev_intf0,
+@@ -949,12 +954,14 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
+ 	static const unsigned char vfd_packet6[] = {
+ 		0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
+ 
+-	if (ictx->disconnected)
+-		return -ENODEV;
+-
+ 	if (mutex_lock_interruptible(&ictx->lock))
+ 		return -ERESTARTSYS;
+ 
++	if (ictx->disconnected) {
++		retval = -ENODEV;
++		goto exit;
++	}
++
+ 	if (!ictx->dev_present_intf0) {
+ 		pr_err_ratelimited("no iMON device present\n");
+ 		retval = -ENODEV;
+@@ -1029,11 +1036,13 @@ static ssize_t lcd_write(struct file *file, const char __user *buf,
+ 	int retval = 0;
+ 	struct imon_context *ictx = file->private_data;
+ 
+-	if (ictx->disconnected)
+-		return -ENODEV;
+-
+ 	mutex_lock(&ictx->lock);
+ 
++	if (ictx->disconnected) {
++		retval = -ENODEV;
++		goto exit;
++	}
++
+ 	if (!ictx->display_supported) {
+ 		pr_err_ratelimited("no iMON display present\n");
+ 		retval = -ENODEV;
+@@ -2499,7 +2508,11 @@ static void imon_disconnect(struct usb_interface *interface)
+ 	int ifnum;
+ 
+ 	ictx = usb_get_intfdata(interface);
++
++	mutex_lock(&ictx->lock);
+ 	ictx->disconnected = true;
++	mutex_unlock(&ictx->lock);
++
+ 	dev = ictx->dev;
+ 	ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
+ 
+diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
+index 30aa4ee958bdea..ec9a3cd4784e1f 100644
+--- a/drivers/media/tuners/xc5000.c
++++ b/drivers/media/tuners/xc5000.c
+@@ -1304,7 +1304,7 @@ static void xc5000_release(struct dvb_frontend *fe)
+ 	mutex_lock(&xc5000_list_mutex);
+ 
+ 	if (priv) {
+-		cancel_delayed_work(&priv->timer_sleep);
++		cancel_delayed_work_sync(&priv->timer_sleep);
+ 		hybrid_tuner_release_state(priv);
+ 	}
+ 
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index fde5cc70bf79c5..9c2dd64be6d383 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -135,6 +135,9 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
+ {
+ 	struct uvc_entity *entity;
+ 
++	if (id == UVC_INVALID_ENTITY_ID)
++		return NULL;
++
+ 	list_for_each_entry(entity, &dev->entities, list) {
+ 		if (entity->id == id)
+ 			return entity;
+@@ -778,14 +781,27 @@ static const u8 uvc_media_transport_input_guid[16] =
+ 	UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+ 
+-static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+-		unsigned int num_pads, unsigned int extra_size)
++static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
++					       u16 id, unsigned int num_pads,
++					       unsigned int extra_size)
+ {
+ 	struct uvc_entity *entity;
+ 	unsigned int num_inputs;
+ 	unsigned int size;
+ 	unsigned int i;
+ 
++	/* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
++	if (id == 0) {
++		dev_err(&dev->intf->dev, "Found Unit with invalid ID 0\n");
++		id = UVC_INVALID_ENTITY_ID;
++	}
++
++	/* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
++	if (uvc_entity_by_id(dev, id)) {
++		dev_err(&dev->intf->dev, "Found multiple Units with ID %u\n", id);
++		id = UVC_INVALID_ENTITY_ID;
++	}
++
+ 	extra_size = roundup(extra_size, sizeof(*entity->pads));
+ 	if (num_pads)
+ 		num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+@@ -795,7 +811,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ 	     + num_inputs;
+ 	entity = kzalloc(size, GFP_KERNEL);
+ 	if (entity == NULL)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	entity->id = id;
+ 	entity->type = type;
+@@ -907,10 +923,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ 			break;
+ 		}
+ 
+-		unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
+-					p + 1, 2*n);
+-		if (unit == NULL)
+-			return -ENOMEM;
++		unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
++					    buffer[3], p + 1, 2 * n);
++		if (IS_ERR(unit))
++			return PTR_ERR(unit);
+ 
+ 		memcpy(unit->guid, &buffer[4], 16);
+ 		unit->extension.bNumControls = buffer[20];
+@@ -1019,10 +1035,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
+-					1, n + p);
+-		if (term == NULL)
+-			return -ENOMEM;
++		term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
++					    buffer[3], 1, n + p);
++		if (IS_ERR(term))
++			return PTR_ERR(term);
+ 
+ 		if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
+ 			term->camera.bControlSize = n;
+@@ -1078,10 +1094,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return 0;
+ 		}
+ 
+-		term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
+-					1, 0);
+-		if (term == NULL)
+-			return -ENOMEM;
++		term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
++					    buffer[3], 1, 0);
++		if (IS_ERR(term))
++			return PTR_ERR(term);
+ 
+ 		memcpy(term->baSourceID, &buffer[7], 1);
+ 
+@@ -1100,9 +1116,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
+-		if (unit == NULL)
+-			return -ENOMEM;
++		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++					    p + 1, 0);
++		if (IS_ERR(unit))
++			return PTR_ERR(unit);
+ 
+ 		memcpy(unit->baSourceID, &buffer[5], p);
+ 
+@@ -1122,9 +1139,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
+-		if (unit == NULL)
+-			return -ENOMEM;
++		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
++		if (IS_ERR(unit))
++			return PTR_ERR(unit);
+ 
+ 		memcpy(unit->baSourceID, &buffer[4], 1);
+ 		unit->processing.wMaxMultiplier =
+@@ -1151,9 +1168,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
+-		if (unit == NULL)
+-			return -ENOMEM;
++		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++					    p + 1, n);
++		if (IS_ERR(unit))
++			return PTR_ERR(unit);
+ 
+ 		memcpy(unit->guid, &buffer[4], 16);
+ 		unit->extension.bNumControls = buffer[20];
+@@ -1293,9 +1311,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ 		return dev_err_probe(&dev->intf->dev, irq,
+ 				     "No IRQ for privacy GPIO\n");
+ 
+-	unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+-	if (!unit)
+-		return -ENOMEM;
++	unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
++				    UVC_EXT_GPIO_UNIT_ID, 0, 1);
++	if (IS_ERR(unit))
++		return PTR_ERR(unit);
+ 
+ 	unit->gpio.gpio_privacy = gpio_privacy;
+ 	unit->gpio.irq = irq;
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 74ac2106f08e2c..62d6129c88ece5 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -41,6 +41,8 @@
+ #define UVC_EXT_GPIO_UNIT		0x7ffe
+ #define UVC_EXT_GPIO_UNIT_ID		0x100
+ 
++#define UVC_INVALID_ENTITY_ID          0xffff
++
+ /* ------------------------------------------------------------------------
+  * Driver specific constants.
+  */
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index a5555c959dec96..3ffb7723b67318 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -2550,7 +2550,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
+ 					   GFP_KERNEL);
+ 	if (!m3_mem->vaddr) {
+ 		ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
+-			   fw->size);
++			   m3_len);
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index c40217f44b1bc5..3188bca17e1b94 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -2776,7 +2776,7 @@ static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
+ 			config_item_name(&dev->dev_group.cg_item));
+ 		cur_len++; /* Extra byte for NULL terminator */
+ 
+-		if ((cur_len + len) > PAGE_SIZE) {
++		if ((cur_len + len) > PAGE_SIZE || cur_len > LU_GROUP_NAME_BUF) {
+ 			pr_warn("Ran out of lu_gp_show_attr"
+ 				"_members buffer\n");
+ 			break;
+diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
+index e0418818d63c84..e3e610cfe8d300 100644
+--- a/include/crypto/sha256_base.h
++++ b/include/crypto/sha256_base.h
+@@ -44,7 +44,7 @@ static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
+ 	sctx->count += len;
+ 
+ 	if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
+-		int blocks;
++		unsigned int blocks;
+ 
+ 		if (partial) {
+ 			int p = SHA256_BLOCK_SIZE - partial;
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index c02493d9c7bee5..883333a87a45f3 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2337,6 +2337,8 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type)
+ 	VMA_ITERATOR(vmi, mm, 0);
+ 
+ 	mmap_read_lock(mm);
++	if (check_stable_address_space(mm))
++		goto unlock;
+ 	for_each_vma(vmi, vma) {
+ 		if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
+ 			ret = unuse_vma(vma, type);
+@@ -2346,6 +2348,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type)
+ 
+ 		cond_resched();
+ 	}
++unlock:
+ 	mmap_read_unlock(mm);
+ 	return ret;
+ }
+diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
+index ef12c8f929eda3..70c67061cc448b 100644
+--- a/scripts/gcc-plugins/gcc-common.h
++++ b/scripts/gcc-plugins/gcc-common.h
+@@ -191,10 +191,17 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
+ }
+ #endif
+ 
++#if BUILDING_GCC_VERSION < 16000
+ #define TODO_verify_ssa TODO_verify_il
+ #define TODO_verify_flow TODO_verify_il
+ #define TODO_verify_stmts TODO_verify_il
+ #define TODO_verify_rtl_sharing TODO_verify_il
++#else
++#define TODO_verify_ssa 0
++#define TODO_verify_flow 0
++#define TODO_verify_stmts 0
++#define TODO_verify_rtl_sharing 0
++#endif
+ 
+ #define INSN_DELETED_P(insn) (insn)->deleted()
+ 
+diff --git a/sound/soc/qcom/qdsp6/topology.c b/sound/soc/qcom/qdsp6/topology.c
+index 83319a928f2917..01bb1bdee5cec1 100644
+--- a/sound/soc/qcom/qdsp6/topology.c
++++ b/sound/soc/qcom/qdsp6/topology.c
+@@ -587,8 +587,8 @@ static int audioreach_widget_load_module_common(struct snd_soc_component *compon
+ 		return PTR_ERR(cont);
+ 
+ 	mod = audioreach_parse_common_tokens(apm, cont, &tplg_w->priv, w);
+-	if (IS_ERR(mod))
+-		return PTR_ERR(mod);
++	if (IS_ERR_OR_NULL(mod))
++		return mod ? PTR_ERR(mod) : -ENODEV;
+ 
+ 	dobj = &w->dobj;
+ 	dobj->private = mod;


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-10-02 13:25 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-10-02 13:25 UTC (permalink / raw
  To: gentoo-commits

commit:     50f803d7b786eaa890e222aeead78ad4b56e2c32
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Oct  2 13:25:15 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Oct  2 13:25:15 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=50f803d7

Linux patch 6.12.50

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1049_linux-6.12.50.patch | 3459 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3463 insertions(+)

diff --git a/0000_README b/0000_README
index b6db0425..faa9f43e 100644
--- a/0000_README
+++ b/0000_README
@@ -239,6 +239,10 @@ Patch:  1048_linux-6.12.49.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.49
 
+Patch:  1049_linux-6.12.50.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.50
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1049_linux-6.12.50.patch b/1049_linux-6.12.50.patch
new file mode 100644
index 00000000..32c3e577
--- /dev/null
+++ b/1049_linux-6.12.50.patch
@@ -0,0 +1,3459 @@
+diff --git a/Documentation/admin-guide/laptops/lg-laptop.rst b/Documentation/admin-guide/laptops/lg-laptop.rst
+index 67fd6932cef4ff..c4dd534f91edd1 100644
+--- a/Documentation/admin-guide/laptops/lg-laptop.rst
++++ b/Documentation/admin-guide/laptops/lg-laptop.rst
+@@ -48,8 +48,8 @@ This value is reset to 100 when the kernel boots.
+ Fan mode
+ --------
+ 
+-Writing 1/0 to /sys/devices/platform/lg-laptop/fan_mode disables/enables
+-the fan silent mode.
++Writing 0/1/2 to /sys/devices/platform/lg-laptop/fan_mode sets fan mode to
++Optimal/Silent/Performance respectively.
+ 
+ 
+ USB charge
+diff --git a/Makefile b/Makefile
+index 66ae67c52da819..7b0a94828fdb1c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_sodia.dts
+index ce0d6514eeb571..e4794ccb8e413f 100644
+--- a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_sodia.dts
++++ b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_sodia.dts
+@@ -66,8 +66,10 @@ &gmac1 {
+ 	mdio0 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+-		phy0: ethernet-phy@0 {
+-			reg = <0>;
++		compatible = "snps,dwmac-mdio";
++
++		phy0: ethernet-phy@4 {
++			reg = <4>;
+ 			rxd0-skew-ps = <0>;
+ 			rxd1-skew-ps = <0>;
+ 			rxd2-skew-ps = <0>;
+diff --git a/arch/arm/boot/dts/marvell/kirkwood-openrd-client.dts b/arch/arm/boot/dts/marvell/kirkwood-openrd-client.dts
+index d4e0b8150a84ce..cf26e2ceaaa074 100644
+--- a/arch/arm/boot/dts/marvell/kirkwood-openrd-client.dts
++++ b/arch/arm/boot/dts/marvell/kirkwood-openrd-client.dts
+@@ -38,7 +38,7 @@ sound {
+ 		simple-audio-card,mclk-fs = <256>;
+ 
+ 		simple-audio-card,cpu {
+-			sound-dai = <&audio0 0>;
++			sound-dai = <&audio0>;
+ 		};
+ 
+ 		simple-audio-card,codec {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index 40e847bc0b7f81..62cf525ab714b4 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -283,7 +283,7 @@ thermal-zones {
+ 		cpu-thermal {
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <2000>;
+-			thermal-sensors = <&tmu 0>;
++			thermal-sensors = <&tmu 1>;
+ 			trips {
+ 				cpu_alert0: trip0 {
+ 					temperature = <85000>;
+@@ -313,7 +313,7 @@ map0 {
+ 		soc-thermal {
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <2000>;
+-			thermal-sensors = <&tmu 1>;
++			thermal-sensors = <&tmu 0>;
+ 			trips {
+ 				soc_alert0: trip0 {
+ 					temperature = <85000>;
+diff --git a/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts b/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts
+index 0f53745a6fa0d8..41166f865f87cb 100644
+--- a/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts
++++ b/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts
+@@ -413,7 +413,13 @@ fixed-link {
+ /* SRDS #0,#1,#2,#3 - PCIe */
+ &cp0_pcie0 {
+ 	num-lanes = <4>;
+-	phys = <&cp0_comphy0 0>, <&cp0_comphy1 0>, <&cp0_comphy2 0>, <&cp0_comphy3 0>;
++	/*
++	 * The mvebu-comphy driver does not currently know how to pass correct
++	 * lane-count to ATF while configuring the serdes lanes.
++	 * Rely on bootloader configuration only.
++	 *
++	 * phys = <&cp0_comphy0 0>, <&cp0_comphy1 0>, <&cp0_comphy2 0>, <&cp0_comphy3 0>;
++	 */
+ 	status = "okay";
+ };
+ 
+@@ -475,7 +481,13 @@ &cp1_eth0 {
+ /* SRDS #0,#1 - PCIe */
+ &cp1_pcie0 {
+ 	num-lanes = <2>;
+-	phys = <&cp1_comphy0 0>, <&cp1_comphy1 0>;
++	/*
++	 * The mvebu-comphy driver does not currently know how to pass correct
++	 * lane-count to ATF while configuring the serdes lanes.
++	 * Rely on bootloader configuration only.
++	 *
++	 * phys = <&cp1_comphy0 0>, <&cp1_comphy1 0>;
++	 */
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi b/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi
+index afc041c1c448c3..bb2bb47fd77c12 100644
+--- a/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi
+@@ -137,6 +137,14 @@ &ap_sdhci0 {
+ 	pinctrl-0 = <&ap_mmc0_pins>;
+ 	pinctrl-names = "default";
+ 	vqmmc-supply = <&v_1_8>;
++	/*
++	 * Not stable in HS modes - phy needs "more calibration", so disable
++	 * UHS (by preventing voltage switch), SDR104, SDR50 and DDR50 modes.
++	 */
++	no-1-8-v;
++	no-sd;
++	no-sdio;
++	non-removable;
+ 	status = "okay";
+ };
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index bd55c235630350..9600a96f911760 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2973,6 +2973,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ 			goto err_null_driver;
+ 	}
+ 
++	/*
++	 * Mark support for the scheduler's frequency invariance engine for
++	 * drivers that implement target(), target_index() or fast_switch().
++	 */
++	if (!cpufreq_driver->setpolicy) {
++		static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
++		pr_debug("cpufreq: supports frequency invariance\n");
++	}
++
+ 	ret = subsys_interface_register(&cpufreq_interface);
+ 	if (ret)
+ 		goto err_boost_unreg;
+@@ -2994,21 +3003,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ 	hp_online = ret;
+ 	ret = 0;
+ 
+-	/*
+-	 * Mark support for the scheduler's frequency invariance engine for
+-	 * drivers that implement target(), target_index() or fast_switch().
+-	 */
+-	if (!cpufreq_driver->setpolicy) {
+-		static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+-		pr_debug("supports frequency invariance");
+-	}
+-
+ 	pr_debug("driver %s up and running\n", driver_data->name);
+ 	goto out;
+ 
+ err_if_unreg:
+ 	subsys_interface_unregister(&cpufreq_interface);
+ err_boost_unreg:
++	if (!cpufreq_driver->setpolicy)
++		static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
+ 	remove_boost_sysfs_file();
+ err_null_driver:
+ 	write_lock_irqsave(&cpufreq_driver_lock, flags);
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index b360dca2c69e85..cc9731c3616c1d 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -41,7 +41,7 @@
+ /*
+  * ABI version history is documented in linux/firewire-cdev.h.
+  */
+-#define FW_CDEV_KERNEL_VERSION			5
++#define FW_CDEV_KERNEL_VERSION			6
+ #define FW_CDEV_VERSION_EVENT_REQUEST2		4
+ #define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
+ #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW	5
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 209871c219d697..e5d0d2b0d7989b 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -4317,6 +4317,23 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
+ 	return desc;
+ }
+ 
++static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode,
++					     struct device *consumer,
++					     const char *con_id,
++					     unsigned int idx,
++					     enum gpiod_flags *flags,
++					     unsigned long *lookupflags)
++{
++	struct gpio_desc *desc;
++
++	desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags);
++	if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode))
++		desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id,
++					    idx, flags, lookupflags);
++
++	return desc;
++}
++
+ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
+ 					 struct fwnode_handle *fwnode,
+ 					 const char *con_id,
+@@ -4335,8 +4352,8 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
+ 	int ret = 0;
+ 
+ 	scoped_guard(srcu, &gpio_devices_srcu) {
+-		desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
+-					    &flags, &lookupflags);
++		desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx,
++					   &flags, &lookupflags);
+ 		if (gpiod_not_found(desc) && platform_lookup_allowed) {
+ 			/*
+ 			 * Either we are not using DT or ACPI, or their lookup
+diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
+index 5dadc895e7f26b..5e87ca2320a42d 100644
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -79,7 +79,7 @@ static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, si
+ 			 * 3. The Delays are often longer a lot when system resume from S3/S4.
+ 			 */
+ 			if (j)
+-				mdelay(j + 1);
++				msleep(j + 1);
+ 
+ 			/* Wait for EDID offset to show up in mirror register */
+ 			vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7);
+diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+index ed8626c73541c1..f0ae675581d9a5 100644
+--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
++++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+@@ -726,8 +726,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
+ 
+ 	if (hdmi_dev) {
+ 		pdev = hdmi_dev->dev;
+-		pci_set_drvdata(pdev, NULL);
+ 		oaktrail_hdmi_i2c_exit(pdev);
++		pci_set_drvdata(pdev, NULL);
+ 		iounmap(hdmi_dev->regs);
+ 		kfree(hdmi_dev);
+ 		pci_dev_put(pdev);
+diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
+index 9e05745d797d19..4d93781f2dd763 100644
+--- a/drivers/gpu/drm/i915/display/intel_backlight.c
++++ b/drivers/gpu/drm/i915/display/intel_backlight.c
+@@ -40,8 +40,9 @@ static u32 scale(u32 source_val,
+ {
+ 	u64 target_val;
+ 
+-	WARN_ON(source_min > source_max);
+-	WARN_ON(target_min > target_max);
++	if (WARN_ON(source_min >= source_max) ||
++	    WARN_ON(target_min > target_max))
++		return target_min;
+ 
+ 	/* defensive */
+ 	source_val = clamp(source_val, source_min, source_max);
+diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
+index 20135a9bc026ed..0bc5b69ec636b4 100644
+--- a/drivers/gpu/drm/panthor/panthor_sched.c
++++ b/drivers/gpu/drm/panthor/panthor_sched.c
+@@ -865,8 +865,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
+ 	if (IS_ERR_OR_NULL(queue))
+ 		return;
+ 
+-	if (queue->entity.fence_context)
+-		drm_sched_entity_destroy(&queue->entity);
++	drm_sched_entity_destroy(&queue->entity);
+ 
+ 	if (queue->scheduler.ops)
+ 		drm_sched_fini(&queue->scheduler);
+@@ -3458,11 +3457,6 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
+ 	if (!group)
+ 		return -EINVAL;
+ 
+-	for (u32 i = 0; i < group->queue_count; i++) {
+-		if (group->queues[i])
+-			drm_sched_entity_destroy(&group->queues[i]->entity);
+-	}
+-
+ 	mutex_lock(&sched->reset.lock);
+ 	mutex_lock(&sched->lock);
+ 	group->destroyed = true;
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index 3438d392920fad..8dae9a77668536 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
+ 	struct amdtp_hid_data *hid_data = hid->driver_data;
+ 	struct amdtp_cl_data *cli_data = hid_data->cli_data;
+ 	struct request_list *req_list = &cli_data->req_list;
++	struct amd_input_data *in_data = cli_data->in_data;
++	struct amd_mp2_dev *mp2;
+ 	int i;
+ 
++	mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
++	guard(mutex)(&mp2->lock);
+ 	for (i = 0; i < cli_data->num_hid_devices; i++) {
+ 		if (cli_data->hid_sensor_hubs[i] == hid) {
+ 			struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL);
+@@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work)
+ 	u8 report_id, node_type;
+ 	u8 report_size = 0;
+ 
++	mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
++	guard(mutex)(&mp2->lock);
+ 	req_node = list_last_entry(&req_list->list, struct request_list, list);
+ 	list_del(&req_node->list);
+ 	current_index = req_node->current_index;
+@@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work)
+ 	node_type = req_node->report_type;
+ 	kfree(req_node);
+ 
+-	mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ 	mp2_ops = mp2->mp2_ops;
+ 	if (node_type == HID_FEATURE_REPORT) {
+ 		report_size = mp2_ops->get_feat_rep(sensor_index, report_id,
+@@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work)
+ 	cli_data->cur_hid_dev = current_index;
+ 	cli_data->sensor_requested_cnt[current_index] = 0;
+ 	amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]);
++	if (!list_empty(&req_list->list))
++		schedule_delayed_work(&cli_data->work, 0);
+ }
+ 
+ void amd_sfh_work_buffer(struct work_struct *work)
+@@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work)
+ 	u8 report_size;
+ 	int i;
+ 
++	mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
++	guard(mutex)(&mp2->lock);
+ 	for (i = 0; i < cli_data->num_hid_devices; i++) {
+ 		if (cli_data->sensor_sts[i] == SENSOR_ENABLED) {
+-			mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ 			report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i],
+ 							       cli_data->report_id[i], in_data);
+ 			hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+index e5620d7db5690e..00308d8998d4dd 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+@@ -10,6 +10,7 @@
+ #ifndef AMD_SFH_COMMON_H
+ #define AMD_SFH_COMMON_H
+ 
++#include <linux/mutex.h>
+ #include <linux/pci.h>
+ #include "amd_sfh_hid.h"
+ 
+@@ -57,6 +58,8 @@ struct amd_mp2_dev {
+ 	u32 mp2_acs;
+ 	struct sfh_dev_status dev_en;
+ 	struct work_struct work;
++	/* mp2 to protect data */
++	struct mutex lock;
+ 	u8 init_done;
+ 	u8 rver;
+ };
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+index 0c28ca349bcd37..9739f66e925c00 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+@@ -405,6 +405,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
+ 	if (!privdata->cl_data)
+ 		return -ENOMEM;
+ 
++	rc = devm_mutex_init(&pdev->dev, &privdata->lock);
++	if (rc)
++		return rc;
++
+ 	privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
+ 	if (privdata->sfh1_1_ops) {
+ 		if (boot_cpu_data.x86 >= 0x1A)
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 6b90d2c03e889a..dd989b519f86e7 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -971,7 +971,10 @@ static int asus_input_mapping(struct hid_device *hdev,
+ 		case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP);		break;
+ 		case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN);		break;
+ 		case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE);	break;
++		case 0x4e: asus_map_key_clear(KEY_FN_ESC);		break;
++		case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER);	break;
+ 
++		case 0x8b: asus_map_key_clear(KEY_PROG1);	break; /* ProArt Creator Hub key */
+ 		case 0x6b: asus_map_key_clear(KEY_F21);		break; /* ASUS touchpad toggle */
+ 		case 0x38: asus_map_key_clear(KEY_PROG1);	break; /* ROG key */
+ 		case 0xba: asus_map_key_clear(KEY_PROG2);	break; /* Fn+C ASUS Splendid */
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index a3e86930bf4186..ef9bed2f2dccb9 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -101,7 +101,7 @@ static int bt1_i2c_request_regs(struct dw_i2c_dev *dev)
+ }
+ #endif
+ 
+-static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev)
++static int dw_i2c_get_parent_regmap(struct dw_i2c_dev *dev)
+ {
+ 	dev->map = dev_get_regmap(dev->dev->parent, NULL);
+ 	if (!dev->map)
+@@ -123,12 +123,15 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev)
+ 	struct platform_device *pdev = to_platform_device(dev->dev);
+ 	int ret;
+ 
++	if (device_is_compatible(dev->dev, "intel,xe-i2c"))
++		return dw_i2c_get_parent_regmap(dev);
++
+ 	switch (dev->flags & MODEL_MASK) {
+ 	case MODEL_BAIKAL_BT1:
+ 		ret = bt1_i2c_request_regs(dev);
+ 		break;
+ 	case MODEL_WANGXUN_SP:
+-		ret = txgbe_i2c_request_regs(dev);
++		ret = dw_i2c_get_parent_regmap(dev);
+ 		break;
+ 	default:
+ 		dev->base = devm_platform_ioremap_resource(pdev, 0);
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index f49f78b69ab9c8..e74273bc078be5 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -191,6 +191,7 @@ static u16 get_legacy_obj_type(u16 opcode)
+ {
+ 	switch (opcode) {
+ 	case MLX5_CMD_OP_CREATE_RQ:
++	case MLX5_CMD_OP_CREATE_RMP:
+ 		return MLX5_EVENT_QUEUE_TYPE_RQ;
+ 	case MLX5_CMD_OP_CREATE_QP:
+ 		return MLX5_EVENT_QUEUE_TYPE_QP;
+diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
+index 1b0812f8bf840a..af39b2379d5340 100644
+--- a/drivers/iommu/iommufd/fault.c
++++ b/drivers/iommu/iommufd/fault.c
+@@ -415,7 +415,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
+ 	fdno = get_unused_fd_flags(O_CLOEXEC);
+ 	if (fdno < 0) {
+ 		rc = fdno;
+-		goto out_fput;
++		goto out_abort;
+ 	}
+ 
+ 	cmd->out_fault_id = fault->obj.id;
+@@ -431,8 +431,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
+ 	return 0;
+ out_put_fdno:
+ 	put_unused_fd(fdno);
+-out_fput:
+-	fput(filep);
+ out_abort:
+ 	iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj);
+ 
+diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
+index 649fe79d0f0cc6..6add737a670842 100644
+--- a/drivers/iommu/iommufd/main.c
++++ b/drivers/iommu/iommufd/main.c
+@@ -23,6 +23,7 @@
+ #include "iommufd_test.h"
+ 
+ struct iommufd_object_ops {
++	size_t file_offset;
+ 	void (*destroy)(struct iommufd_object *obj);
+ 	void (*abort)(struct iommufd_object *obj);
+ };
+@@ -97,10 +98,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
+ void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
+ 				      struct iommufd_object *obj)
+ {
+-	if (iommufd_object_ops[obj->type].abort)
+-		iommufd_object_ops[obj->type].abort(obj);
++	const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type];
++
++	if (ops->file_offset) {
++		struct file **filep = ((void *)obj) + ops->file_offset;
++
++		/*
++		 * A file should hold a users refcount while the file is open
++		 * and put it back in its release. The file should hold a
++		 * pointer to obj in their private data. Normal fput() is
++		 * deferred to a workqueue and can get out of order with the
++		 * following kfree(obj). Using the sync version ensures the
++		 * release happens immediately. During abort we require the file
++		 * refcount is one at this point - meaning the object alloc
++		 * function cannot do anything to allow another thread to take a
++		 * refcount prior to a guaranteed success.
++		 */
++		if (*filep)
++			__fput_sync(*filep);
++	}
++
++	if (ops->abort)
++		ops->abort(obj);
+ 	else
+-		iommufd_object_ops[obj->type].destroy(obj);
++		ops->destroy(obj);
+ 	iommufd_object_abort(ictx, obj);
+ }
+ 
+@@ -498,6 +519,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx)
+ }
+ EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD);
+ 
++#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj)                           \
++	.file_offset = (offsetof(_struct, _filep) +                          \
++			BUILD_BUG_ON_ZERO(!__same_type(                      \
++				struct file *, ((_struct *)NULL)->_filep)) + \
++			BUILD_BUG_ON_ZERO(offsetof(_struct, _obj)))
++
+ static const struct iommufd_object_ops iommufd_object_ops[] = {
+ 	[IOMMUFD_OBJ_ACCESS] = {
+ 		.destroy = iommufd_access_destroy_object,
+@@ -518,6 +545,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
+ 	},
+ 	[IOMMUFD_OBJ_FAULT] = {
+ 		.destroy = iommufd_fault_destroy,
++		IOMMUFD_FILE_OFFSET(struct iommufd_fault, filep, obj),
+ 	},
+ #ifdef CONFIG_IOMMUFD_TEST
+ 	[IOMMUFD_OBJ_SELFTEST] = {
+diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
+index be1505e8c536e8..7759531ccca70f 100644
+--- a/drivers/mmc/host/sdhci-cadence.c
++++ b/drivers/mmc/host/sdhci-cadence.c
+@@ -433,6 +433,13 @@ static const struct sdhci_cdns_drv_data sdhci_elba_drv_data = {
+ 	},
+ };
+ 
++static const struct sdhci_cdns_drv_data sdhci_eyeq_drv_data = {
++	.pltfm_data = {
++		.ops = &sdhci_cdns_ops,
++		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++	},
++};
++
+ static const struct sdhci_cdns_drv_data sdhci_cdns_drv_data = {
+ 	.pltfm_data = {
+ 		.ops = &sdhci_cdns_ops,
+@@ -595,6 +602,10 @@ static const struct of_device_id sdhci_cdns_match[] = {
+ 		.compatible = "amd,pensando-elba-sd4hc",
+ 		.data = &sdhci_elba_drv_data,
+ 	},
++	{
++		.compatible = "mobileye,eyeq-sd4hc",
++		.data = &sdhci_eyeq_drv_data,
++	},
+ 	{ .compatible = "cdns,sd4hc" },
+ 	{ /* sentinel */ }
+ };
+diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
+index 2b7dd359f27b7d..8569178b66df7d 100644
+--- a/drivers/net/can/rcar/rcar_can.c
++++ b/drivers/net/can/rcar/rcar_can.c
+@@ -861,7 +861,6 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
+ {
+ 	struct net_device *ndev = dev_get_drvdata(dev);
+ 	struct rcar_can_priv *priv = netdev_priv(ndev);
+-	u16 ctlr;
+ 	int err;
+ 
+ 	if (!netif_running(ndev))
+@@ -873,12 +872,7 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
+ 		return err;
+ 	}
+ 
+-	ctlr = readw(&priv->regs->ctlr);
+-	ctlr &= ~RCAR_CAN_CTLR_SLPM;
+-	writew(ctlr, &priv->regs->ctlr);
+-	ctlr &= ~RCAR_CAN_CTLR_CANM;
+-	writew(ctlr, &priv->regs->ctlr);
+-	priv->can.state = CAN_STATE_ERROR_ACTIVE;
++	rcar_can_start(ndev);
+ 
+ 	netif_device_attach(ndev);
+ 	netif_start_queue(ndev);
+diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
+index 1b9501ee10deb5..ff39afc77d7d23 100644
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -813,6 +813,7 @@ static const struct net_device_ops hi3110_netdev_ops = {
+ 	.ndo_open = hi3110_open,
+ 	.ndo_stop = hi3110_stop,
+ 	.ndo_start_xmit = hi3110_hard_start_xmit,
++	.ndo_change_mtu = can_change_mtu,
+ };
+ 
+ static const struct ethtool_ops hi3110_ethtool_ops = {
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index 4311c1f0eafd8d..30b30fdbcae9c0 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -768,6 +768,7 @@ static const struct net_device_ops sun4ican_netdev_ops = {
+ 	.ndo_open = sun4ican_open,
+ 	.ndo_stop = sun4ican_close,
+ 	.ndo_start_xmit = sun4ican_start_xmit,
++	.ndo_change_mtu = can_change_mtu,
+ };
+ 
+ static const struct ethtool_ops sun4ican_ethtool_ops = {
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
+index 71f24dc0a92711..4fc9bed0d2e1eb 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
+@@ -7,7 +7,7 @@
+  *
+  * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
+  * Copyright (c) 2020 ETAS K.K.. All rights reserved.
+- * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
++ * Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org>
+  */
+ 
+ #include <linux/unaligned.h>
+@@ -1977,6 +1977,7 @@ static const struct net_device_ops es58x_netdev_ops = {
+ 	.ndo_stop = es58x_stop,
+ 	.ndo_start_xmit = es58x_start_xmit,
+ 	.ndo_eth_ioctl = can_eth_ioctl_hwts,
++	.ndo_change_mtu = can_change_mtu,
+ };
+ 
+ static const struct ethtool_ops es58x_ethtool_ops = {
+diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
+index 41c0a1c399bf36..1f9b915094e64d 100644
+--- a/drivers/net/can/usb/mcba_usb.c
++++ b/drivers/net/can/usb/mcba_usb.c
+@@ -761,6 +761,7 @@ static const struct net_device_ops mcba_netdev_ops = {
+ 	.ndo_open = mcba_usb_open,
+ 	.ndo_stop = mcba_usb_close,
+ 	.ndo_start_xmit = mcba_usb_start_xmit,
++	.ndo_change_mtu = can_change_mtu,
+ };
+ 
+ static const struct ethtool_ops mcba_ethtool_ops = {
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 59f7cd8ceb397f..69c44f675ff1c6 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -111,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
+ 		u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
+ 
+ 		if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
+-			delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1;
++			delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1;
+ 
+ 		time_ref->ts_total += delta_ts;
+ 	}
+diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
+index fcd4505f49252f..b1f18a840da773 100644
+--- a/drivers/net/dsa/lantiq_gswip.c
++++ b/drivers/net/dsa/lantiq_gswip.c
+@@ -685,18 +685,27 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
+ 	return 0;
+ }
+ 
+-static int gswip_port_enable(struct dsa_switch *ds, int port,
+-			     struct phy_device *phydev)
++static int gswip_port_setup(struct dsa_switch *ds, int port)
+ {
+ 	struct gswip_priv *priv = ds->priv;
+ 	int err;
+ 
+ 	if (!dsa_is_cpu_port(ds, port)) {
+-		u32 mdio_phy = 0;
+-
+ 		err = gswip_add_single_port_br(priv, port, true);
+ 		if (err)
+ 			return err;
++	}
++
++	return 0;
++}
++
++static int gswip_port_enable(struct dsa_switch *ds, int port,
++			     struct phy_device *phydev)
++{
++	struct gswip_priv *priv = ds->priv;
++
++	if (!dsa_is_cpu_port(ds, port)) {
++		u32 mdio_phy = 0;
+ 
+ 		if (phydev)
+ 			mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+@@ -1359,8 +1368,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
+ 	int i;
+ 	int err;
+ 
++	/* Operation not supported on the CPU port, don't throw errors */
+ 	if (!bridge)
+-		return -EINVAL;
++		return 0;
+ 
+ 	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ 		if (priv->vlans[i].bridge == bridge) {
+@@ -1829,6 +1839,7 @@ static const struct phylink_mac_ops gswip_phylink_mac_ops = {
+ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
+ 	.get_tag_protocol	= gswip_get_tag_protocol,
+ 	.setup			= gswip_setup,
++	.port_setup		= gswip_port_setup,
+ 	.port_enable		= gswip_port_enable,
+ 	.port_disable		= gswip_port_disable,
+ 	.port_bridge_join	= gswip_port_bridge_join,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index d2ca90407cce76..8057350236c5ef 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
+ 			   offset < offset_of_ip6_daddr + 16) {
+ 			actions->nat.src_xlate = false;
+ 			idx = (offset - offset_of_ip6_daddr) / 4;
+-			actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
++			actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val);
+ 		} else {
+ 			netdev_err(bp->dev,
+ 				   "%s: IPv6_hdr: Invalid pedit field\n",
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 0bd814251d56ef..d144494f97e913 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -131,7 +131,7 @@ static const struct fec_devinfo fec_mvf600_info = {
+ 		  FEC_QUIRK_HAS_MDIO_C45,
+ };
+ 
+-static const struct fec_devinfo fec_imx6x_info = {
++static const struct fec_devinfo fec_imx6sx_info = {
+ 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+@@ -196,7 +196,7 @@ static const struct of_device_id fec_dt_ids[] = {
+ 	{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
+ 	{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
+ 	{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
+-	{ .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
++	{ .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, },
+ 	{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
+ 	{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
+ 	{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index b22bb0ae9b9d18..b8de97343ad3ea 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -1277,7 +1277,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ 					    const u8 *macaddr);
+ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
+ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+-int i40e_count_filters(struct i40e_vsi *vsi);
++int i40e_count_all_filters(struct i40e_vsi *vsi);
++int i40e_count_active_filters(struct i40e_vsi *vsi);
+ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
+ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+ static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 037c1a0cbd6a80..eae5923104f793 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1241,12 +1241,30 @@ void i40e_update_stats(struct i40e_vsi *vsi)
+ }
+ 
+ /**
+- * i40e_count_filters - counts VSI mac filters
++ * i40e_count_all_filters - counts VSI MAC filters
+  * @vsi: the VSI to be searched
+  *
+- * Returns count of mac filters
+- **/
+-int i40e_count_filters(struct i40e_vsi *vsi)
++ * Return: count of MAC filters in any state.
++ */
++int i40e_count_all_filters(struct i40e_vsi *vsi)
++{
++	struct i40e_mac_filter *f;
++	struct hlist_node *h;
++	int bkt, cnt = 0;
++
++	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
++		cnt++;
++
++	return cnt;
++}
++
++/**
++ * i40e_count_active_filters - counts VSI MAC filters
++ * @vsi: the VSI to be searched
++ *
++ * Return: count of active MAC filters.
++ */
++int i40e_count_active_filters(struct i40e_vsi *vsi)
+ {
+ 	struct i40e_mac_filter *f;
+ 	struct hlist_node *h;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 97f32a0c68d09e..646e394f519038 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
+ 		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ 		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ 		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+-		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
++		    FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
+ 		wr32(hw, reg_idx, reg);
+ 	}
+ 
+@@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
+ 
+ 	/* only set the required fields */
+ 	tx_ctx.base = info->dma_ring_addr / 128;
++
++	/* ring_len has to be multiple of 8 */
++	if (!IS_ALIGNED(info->ring_len, 8) ||
++	    info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
++		ret = -EINVAL;
++		goto error_context;
++	}
+ 	tx_ctx.qlen = info->ring_len;
+ 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
+ 	tx_ctx.rdylist_act = 0;
+@@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
+ 
+ 	/* only set the required fields */
+ 	rx_ctx.base = info->dma_ring_addr / 128;
++
++	/* ring_len has to be multiple of 32 */
++	if (!IS_ALIGNED(info->ring_len, 32) ||
++	    info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
++		ret = -EINVAL;
++		goto error_param;
++	}
+ 	rx_ctx.qlen = info->ring_len;
+ 
+ 	if (info->splithdr_enabled) {
+@@ -1453,6 +1467,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
+ 	 * functions that may still be running at this point.
+ 	 */
+ 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
++	clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
+ 
+ 	/* In the case of a VFLR, the HW has already reset the VF and we
+ 	 * just need to clean up, so don't hit the VFRTRIG register.
+@@ -2119,7 +2134,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+ 	size_t len = 0;
+ 	int ret;
+ 
+-	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
++	i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
++
++	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
++	    test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
+ 		aq_ret = -EINVAL;
+ 		goto err;
+ 	}
+@@ -2222,6 +2240,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+ 				vf->default_lan_addr.addr);
+ 	}
+ 	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
++	set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
+ 
+ err:
+ 	/* send the response back to the VF */
+@@ -2384,7 +2403,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 		}
+ 
+ 		if (vf->adq_enabled) {
+-			if (idx >= ARRAY_SIZE(vf->ch)) {
++			if (idx >= vf->num_tc) {
+ 				aq_ret = -ENODEV;
+ 				goto error_param;
+ 			}
+@@ -2405,7 +2424,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 		 * to its appropriate VSIs based on TC mapping
+ 		 */
+ 		if (vf->adq_enabled) {
+-			if (idx >= ARRAY_SIZE(vf->ch)) {
++			if (idx >= vf->num_tc) {
+ 				aq_ret = -ENODEV;
+ 				goto error_param;
+ 			}
+@@ -2455,8 +2474,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
+ 	u16 vsi_queue_id, queue_id;
+ 
+ 	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
+-		if (vf->adq_enabled) {
+-			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
++		u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
++
++		if (vf->adq_enabled && idx < vf->num_tc) {
++			vsi_id = vf->ch[idx].vsi_id;
+ 			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
+ 		} else {
+ 			queue_id = vsi_queue_id;
+@@ -2844,24 +2865,6 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+ 				      (u8 *)&stats, sizeof(stats));
+ }
+ 
+-/**
+- * i40e_can_vf_change_mac
+- * @vf: pointer to the VF info
+- *
+- * Return true if the VF is allowed to change its MAC filters, false otherwise
+- */
+-static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
+-{
+-	/* If the VF MAC address has been set administratively (via the
+-	 * ndo_set_vf_mac command), then deny permission to the VF to
+-	 * add/delete unicast MAC addresses, unless the VF is trusted
+-	 */
+-	if (vf->pf_set_mac && !vf->trusted)
+-		return false;
+-
+-	return true;
+-}
+-
+ #define I40E_MAX_MACVLAN_PER_HW 3072
+ #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW /	\
+ 	(num_ports))
+@@ -2900,8 +2903,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ 	struct i40e_hw *hw = &pf->hw;
+-	int mac2add_cnt = 0;
+-	int i;
++	int i, mac_add_max, mac_add_cnt = 0;
++	bool vf_trusted;
++
++	vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ 
+ 	for (i = 0; i < al->num_elements; i++) {
+ 		struct i40e_mac_filter *f;
+@@ -2921,9 +2926,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
+ 		 * The VF may request to set the MAC address filter already
+ 		 * assigned to it so do not return an error in that case.
+ 		 */
+-		if (!i40e_can_vf_change_mac(vf) &&
+-		    !is_multicast_ether_addr(addr) &&
+-		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
++		if (!vf_trusted && !is_multicast_ether_addr(addr) &&
++		    vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ 			dev_err(&pf->pdev->dev,
+ 				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ 			return -EPERM;
+@@ -2932,29 +2936,33 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
+ 		/*count filters that really will be added*/
+ 		f = i40e_find_mac(vsi, addr);
+ 		if (!f)
+-			++mac2add_cnt;
++			++mac_add_cnt;
+ 	}
+ 
+ 	/* If this VF is not privileged, then we can't add more than a limited
+-	 * number of addresses. Check to make sure that the additions do not
+-	 * push us over the limit.
+-	 */
+-	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+-		if ((i40e_count_filters(vsi) + mac2add_cnt) >
+-		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
+-			dev_err(&pf->pdev->dev,
+-				"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+-			return -EPERM;
+-		}
+-	/* If this VF is trusted, it can use more resources than untrusted.
++	 * number of addresses.
++	 *
++	 * If this VF is trusted, it can use more resources than untrusted.
+ 	 * However to ensure that every trusted VF has appropriate number of
+ 	 * resources, divide whole pool of resources per port and then across
+ 	 * all VFs.
+ 	 */
+-	} else {
+-		if ((i40e_count_filters(vsi) + mac2add_cnt) >
+-		    I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
+-						       hw->num_ports)) {
++	if (!vf_trusted)
++		mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF;
++	else
++		mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports);
++
++	/* VF can replace all its filters in one step, in this case mac_add_max
++	 * will be added as active and another mac_add_max will be in
++	 * a to-be-removed state. Account for that.
++	 */
++	if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max ||
++	    (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) {
++		if (!vf_trusted) {
++			dev_err(&pf->pdev->dev,
++				"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
++			return -EPERM;
++		} else {
+ 			dev_err(&pf->pdev->dev,
+ 				"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
+ 			return -EPERM;
+@@ -3589,7 +3597,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
+ 
+ 	/* action_meta is TC number here to which the filter is applied */
+ 	if (!tc_filter->action_meta ||
+-	    tc_filter->action_meta > vf->num_tc) {
++	    tc_filter->action_meta >= vf->num_tc) {
+ 		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
+ 			 vf->vf_id, tc_filter->action_meta);
+ 		goto err;
+@@ -3887,6 +3895,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 				       aq_ret);
+ }
+ 
++#define I40E_MAX_VF_CLOUD_FILTER 0xFF00
++
+ /**
+  * i40e_vc_add_cloud_filter
+  * @vf: pointer to the VF info
+@@ -3926,6 +3936,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 		goto err_out;
+ 	}
+ 
++	if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
++		dev_warn(&pf->pdev->dev,
++			 "VF %d: Max number of filters reached, can't apply cloud filter\n",
++			 vf->vf_id);
++		aq_ret = -ENOSPC;
++		goto err_out;
++	}
++
+ 	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+ 	if (!cfilter) {
+ 		aq_ret = -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+index 66f95e2f3146a8..e0e797fea138ad 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+@@ -41,7 +41,8 @@ enum i40e_vf_states {
+ 	I40E_VF_STATE_MC_PROMISC,
+ 	I40E_VF_STATE_UC_PROMISC,
+ 	I40E_VF_STATE_PRE_ENABLE,
+-	I40E_VF_STATE_RESETTING
++	I40E_VF_STATE_RESETTING,
++	I40E_VF_STATE_RESOURCES_LOADED,
+ };
+ 
+ /* VF capabilities */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 971993586fb49d..ca74bf68433695 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -21,8 +21,7 @@
+ #include "rvu.h"
+ #include "lmac_common.h"
+ 
+-#define DRV_NAME	"Marvell-CGX/RPM"
+-#define DRV_STRING      "Marvell CGX/RPM Driver"
++#define DRV_NAME	"Marvell-CGX-RPM"
+ 
+ #define CGX_RX_STAT_GLOBAL_INDEX	9
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+index e63cc1eb6d8917..ed041c3f714f26 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+@@ -1319,7 +1319,6 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ 
+ free_leaf:
+ 	otx2_tc_del_from_flow_list(flow_cfg, new_node);
+-	kfree_rcu(new_node, rcu);
+ 	if (new_node->is_act_police) {
+ 		mutex_lock(&nic->mbox.lock);
+ 
+@@ -1339,6 +1338,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ 
+ 		mutex_unlock(&nic->mbox.lock);
+ 	}
++	kfree_rcu(new_node, rcu);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 7b33993f7001e4..f1827a1bd7a596 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -360,6 +360,11 @@ static void sfp_fixup_ignore_tx_fault(struct sfp *sfp)
+ 	sfp->state_ignore_mask |= SFP_F_TX_FAULT;
+ }
+ 
++static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask)
++{
++	sfp->state_hw_mask &= ~mask;
++}
++
+ static void sfp_fixup_nokia(struct sfp *sfp)
+ {
+ 	sfp_fixup_long_startup(sfp);
+@@ -408,7 +413,19 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp)
+ 	 * these are possibly used for other purposes on this
+ 	 * module, e.g. a serial port.
+ 	 */
+-	sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS);
++	sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS);
++}
++
++static void sfp_fixup_potron(struct sfp *sfp)
++{
++	/*
++	 * The TX_FAULT and LOS pins on this device are used for serial
++	 * communication, so ignore them. Additionally, provide extra
++	 * time for this device to fully start up.
++	 */
++
++	sfp_fixup_long_startup(sfp);
++	sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS);
+ }
+ 
+ static void sfp_fixup_rollball_cc(struct sfp *sfp)
+@@ -474,6 +491,9 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
+ 		  sfp_fixup_nokia),
+ 
++	// FLYPRO SFP-10GT-CS-30M uses Rollball protocol to talk to the PHY.
++	SFP_QUIRK_F("FLYPRO", "SFP-10GT-CS-30M", sfp_fixup_rollball),
++
+ 	// Fiberstore SFP-10G-T doesn't identify as copper, uses the Rollball
+ 	// protocol to talk to the PHY and needs 4 sec wait before probing the
+ 	// PHY.
+@@ -511,6 +531,8 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt),
+ 	SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt),
+ 
++	SFP_QUIRK_F("YV", "SFP+ONU-XGSPON", sfp_fixup_potron),
++
+ 	// OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator
+ 	SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index fae1a0ab36bdfe..fb9d425eff8c1b 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1932,6 +1932,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 				local_bh_enable();
+ 				goto unlock_frags;
+ 			}
++
++			if (frags && skb != tfile->napi.skb)
++				tfile->napi.skb = skb;
+ 		}
+ 		rcu_read_unlock();
+ 		local_bh_enable();
+diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
+index 4ee3740804667b..a77a27c36bdbee 100644
+--- a/drivers/net/wireless/virtual/virt_wifi.c
++++ b/drivers/net/wireless/virtual/virt_wifi.c
+@@ -277,7 +277,9 @@ static void virt_wifi_connect_complete(struct work_struct *work)
+ 		priv->is_connected = true;
+ 
+ 	/* Schedules an event that acquires the rtnl lock. */
+-	cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0,
++	cfg80211_connect_result(priv->upperdev,
++				priv->is_connected ? fake_router_bssid : NULL,
++				NULL, 0, NULL, 0,
+ 				status, GFP_KERNEL);
+ 	netif_carrier_on(priv->upperdev);
+ }
+diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
+index 4b57102c7f6270..6af6cf477c5b5b 100644
+--- a/drivers/platform/x86/lg-laptop.c
++++ b/drivers/platform/x86/lg-laptop.c
+@@ -8,6 +8,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <linux/acpi.h>
++#include <linux/bitfield.h>
+ #include <linux/bits.h>
+ #include <linux/device.h>
+ #include <linux/dev_printk.h>
+@@ -75,6 +76,9 @@ MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages");
+ #define WMBB_USB_CHARGE 0x10B
+ #define WMBB_BATT_LIMIT 0x10C
+ 
++#define FAN_MODE_LOWER GENMASK(1, 0)
++#define FAN_MODE_UPPER GENMASK(5, 4)
++
+ #define PLATFORM_NAME   "lg-laptop"
+ 
+ MODULE_ALIAS("wmi:" WMI_EVENT_GUID0);
+@@ -274,29 +278,19 @@ static ssize_t fan_mode_store(struct device *dev,
+ 			      struct device_attribute *attr,
+ 			      const char *buffer, size_t count)
+ {
+-	bool value;
++	unsigned long value;
+ 	union acpi_object *r;
+-	u32 m;
+ 	int ret;
+ 
+-	ret = kstrtobool(buffer, &value);
++	ret = kstrtoul(buffer, 10, &value);
+ 	if (ret)
+ 		return ret;
++	if (value >= 3)
++		return -EINVAL;
+ 
+-	r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
+-	if (!r)
+-		return -EIO;
+-
+-	if (r->type != ACPI_TYPE_INTEGER) {
+-		kfree(r);
+-		return -EIO;
+-	}
+-
+-	m = r->integer.value;
+-	kfree(r);
+-	r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+-	kfree(r);
+-	r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
++	r = lg_wmab(dev, WM_FAN_MODE, WM_SET,
++		FIELD_PREP(FAN_MODE_LOWER, value) |
++		FIELD_PREP(FAN_MODE_UPPER, value));
+ 	kfree(r);
+ 
+ 	return count;
+@@ -305,7 +299,7 @@ static ssize_t fan_mode_store(struct device *dev,
+ static ssize_t fan_mode_show(struct device *dev,
+ 			     struct device_attribute *attr, char *buffer)
+ {
+-	unsigned int status;
++	unsigned int mode;
+ 	union acpi_object *r;
+ 
+ 	r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
+@@ -317,10 +311,10 @@ static ssize_t fan_mode_show(struct device *dev,
+ 		return -EIO;
+ 	}
+ 
+-	status = r->integer.value & 0x01;
++	mode = FIELD_GET(FAN_MODE_LOWER, r->integer.value);
+ 	kfree(r);
+ 
+-	return sysfs_emit(buffer, "%d\n", status);
++	return sysfs_emit(buffer, "%d\n", mode);
+ }
+ 
+ static ssize_t usb_charge_store(struct device *dev,
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 420e943bb73a7a..5e6197a6af5e29 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -243,7 +243,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+ 		hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
+ 							 &hwq->sqe_dma_addr,
+ 							 GFP_KERNEL);
+-		if (!hwq->sqe_dma_addr) {
++		if (!hwq->sqe_base_addr) {
+ 			dev_err(hba->dev, "SQE allocation failed\n");
+ 			return -ENOMEM;
+ 		}
+@@ -252,7 +252,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+ 		hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
+ 							 &hwq->cqe_dma_addr,
+ 							 GFP_KERNEL);
+-		if (!hwq->cqe_dma_addr) {
++		if (!hwq->cqe_base_addr) {
+ 			dev_err(hba->dev, "CQE allocation failed\n");
+ 			return -ENOMEM;
+ 		}
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index bfd97cad8aa4d7..c0fd8ab3fe8fc2 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -734,7 +734,7 @@ void usb_detect_quirks(struct usb_device *udev)
+ 	udev->quirks ^= usb_detect_dynamic_quirks(udev);
+ 
+ 	if (udev->quirks)
+-		dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
++		dev_dbg(&udev->dev, "USB quirks for this device: 0x%x\n",
+ 			udev->quirks);
+ 
+ #ifdef CONFIG_USB_DEFAULT_PERSIST
+diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
+index 1fcc9348dd439d..123506681ef0a4 100644
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -458,7 +458,7 @@ static void xhci_dbc_ring_init(struct xhci_ring *ring)
+ 		trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
+ 		trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
+ 	}
+-	xhci_initialize_ring_info(ring);
++	xhci_initialize_ring_info(ring, 1);
+ }
+ 
+ static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index c9694526b157dd..f0ed38da6a0c89 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -27,12 +27,14 @@
+  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+  */
+ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
++					       unsigned int cycle_state,
+ 					       unsigned int max_packet,
+ 					       unsigned int num,
+ 					       gfp_t flags)
+ {
+ 	struct xhci_segment *seg;
+ 	dma_addr_t	dma;
++	int		i;
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 
+ 	seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
+@@ -54,6 +56,11 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
+ 			return NULL;
+ 		}
+ 	}
++	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
++	if (cycle_state == 0) {
++		for (i = 0; i < TRBS_PER_SEGMENT; i++)
++			seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
++	}
+ 	seg->num = num;
+ 	seg->dma = dma;
+ 	seg->next = NULL;
+@@ -131,14 +138,6 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ 
+ 	chain_links = xhci_link_chain_quirk(xhci, ring->type);
+ 
+-	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+-	if (ring->cycle_state == 0) {
+-		xhci_for_each_ring_seg(ring->first_seg, seg) {
+-			for (int i = 0; i < TRBS_PER_SEGMENT; i++)
+-				seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+-		}
+-	}
+-
+ 	next = ring->enq_seg->next;
+ 	xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
+ 	xhci_link_segments(last, next, ring->type, chain_links);
+@@ -288,7 +287,8 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+ 	kfree(ring);
+ }
+ 
+-void xhci_initialize_ring_info(struct xhci_ring *ring)
++void xhci_initialize_ring_info(struct xhci_ring *ring,
++			       unsigned int cycle_state)
+ {
+ 	/* The ring is empty, so the enqueue pointer == dequeue pointer */
+ 	ring->enqueue = ring->first_seg->trbs;
+@@ -302,7 +302,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring)
+ 	 * New rings are initialized with cycle state equal to 1; if we are
+ 	 * handling ring expansion, set the cycle state equal to the old ring.
+ 	 */
+-	ring->cycle_state = 1;
++	ring->cycle_state = cycle_state;
+ 
+ 	/*
+ 	 * Each segment has a link TRB, and leave an extra TRB for SW
+@@ -317,6 +317,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+ 					struct xhci_segment **first,
+ 					struct xhci_segment **last,
+ 					unsigned int num_segs,
++					unsigned int cycle_state,
+ 					enum xhci_ring_type type,
+ 					unsigned int max_packet,
+ 					gfp_t flags)
+@@ -327,7 +328,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+ 
+ 	chain_links = xhci_link_chain_quirk(xhci, type);
+ 
+-	prev = xhci_segment_alloc(xhci, max_packet, num, flags);
++	prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
+ 	if (!prev)
+ 		return -ENOMEM;
+ 	num++;
+@@ -336,7 +337,8 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+ 	while (num < num_segs) {
+ 		struct xhci_segment	*next;
+ 
+-		next = xhci_segment_alloc(xhci, max_packet, num, flags);
++		next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
++					  flags);
+ 		if (!next)
+ 			goto free_segments;
+ 
+@@ -361,8 +363,9 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+  * Set the end flag and the cycle toggle bit on the last segment.
+  * See section 4.9.1 and figures 15 and 16.
+  */
+-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+-				  enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
++struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
++		unsigned int num_segs, unsigned int cycle_state,
++		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+ {
+ 	struct xhci_ring	*ring;
+ 	int ret;
+@@ -380,7 +383,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+ 		return ring;
+ 
+ 	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs,
+-					   type, max_packet, flags);
++					   cycle_state, type, max_packet, flags);
+ 	if (ret)
+ 		goto fail;
+ 
+@@ -390,7 +393,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+ 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ 			cpu_to_le32(LINK_TOGGLE);
+ 	}
+-	xhci_initialize_ring_info(ring);
++	xhci_initialize_ring_info(ring, cycle_state);
+ 	trace_xhci_ring_alloc(ring);
+ 	return ring;
+ 
+@@ -418,8 +421,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ 	struct xhci_segment	*last;
+ 	int			ret;
+ 
+-	ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type,
+-					   ring->bounce_buf_len, flags);
++	ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state,
++					   ring->type, ring->bounce_buf_len, flags);
+ 	if (ret)
+ 		return -ENOMEM;
+ 
+@@ -629,7 +632,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+ 
+ 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ 		stream_info->stream_rings[cur_stream] =
+-			xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
++			xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
++					mem_flags);
+ 		cur_ring = stream_info->stream_rings[cur_stream];
+ 		if (!cur_ring)
+ 			goto cleanup_rings;
+@@ -970,7 +974,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ 	}
+ 
+ 	/* Allocate endpoint 0 ring */
+-	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
++	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
+ 	if (!dev->eps[0].ring)
+ 		goto fail;
+ 
+@@ -1453,7 +1457,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 
+ 	/* Set up the endpoint ring */
+ 	virt_dev->eps[ep_index].new_ring =
+-		xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
++		xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ 	if (!virt_dev->eps[ep_index].new_ring)
+ 		return -ENOMEM;
+ 
+@@ -2262,7 +2266,7 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
+ 	if (!ir)
+ 		return NULL;
+ 
+-	ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
++	ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags);
+ 	if (!ir->event_ring) {
+ 		xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
+ 		kfree(ir);
+@@ -2468,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 		goto fail;
+ 
+ 	/* Set up the command ring to have one segments for now. */
+-	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
++	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
+ 	if (!xhci->cmd_ring)
+ 		goto fail;
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 3970ec831b8ca9..abbf89e82d01a3 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -769,7 +769,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+ 		seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ 	}
+ 
+-	xhci_initialize_ring_info(ring);
++	xhci_initialize_ring_info(ring, 1);
+ 	/*
+ 	 * Reset the hardware dequeue pointer.
+ 	 * Yes, this will need to be re-written after resume, but we're paranoid
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index b2aeb444daaf5e..b4fa8e7e437639 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1803,12 +1803,14 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
+ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+ 		struct usb_device *udev, struct usb_host_endpoint *ep,
+ 		gfp_t mem_flags);
+-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
++struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
++		unsigned int num_segs, unsigned int cycle_state,
+ 		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
+ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ 		unsigned int num_trbs, gfp_t flags);
+-void xhci_initialize_ring_info(struct xhci_ring *ring);
++void xhci_initialize_ring_info(struct xhci_ring *ring,
++			unsigned int cycle_state);
+ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
+ 		struct xhci_virt_device *virt_dev,
+ 		unsigned int ep_index);
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 893fd66b5269c7..469f81f880f025 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2492,7 +2492,7 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font,
+ 	unsigned charcount = font->charcount;
+ 	int w = font->width;
+ 	int h = font->height;
+-	int size;
++	int size, alloc_size;
+ 	int i, csum;
+ 	u8 *new_data, *data = font->data;
+ 	int pitch = PITCH(font->width);
+@@ -2519,9 +2519,16 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font,
+ 	if (fbcon_invalid_charcount(info, charcount))
+ 		return -EINVAL;
+ 
+-	size = CALC_FONTSZ(h, pitch, charcount);
++	/* Check for integer overflow in font size calculation */
++	if (check_mul_overflow(h, pitch, &size) ||
++	    check_mul_overflow(size, charcount, &size))
++		return -EINVAL;
++
++	/* Check for overflow in allocation size calculation */
++	if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &alloc_size))
++		return -EINVAL;
+ 
+-	new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
++	new_data = kmalloc(alloc_size, GFP_USER);
+ 
+ 	if (!new_data)
+ 		return -ENOMEM;
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index 4504e16b458cc1..5e40e1332259b4 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -394,13 +394,14 @@ struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_tra
+ void afs_put_server(struct afs_net *net, struct afs_server *server,
+ 		    enum afs_server_trace reason)
+ {
+-	unsigned int a, debug_id = server->debug_id;
++	unsigned int a, debug_id;
+ 	bool zero;
+ 	int r;
+ 
+ 	if (!server)
+ 		return;
+ 
++	debug_id = server->debug_id;
+ 	a = atomic_read(&server->active);
+ 	zero = __refcount_dec_and_test(&server->ref, &r);
+ 	trace_afs_server(debug_id, r - 1, a, reason);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 58e0cac5779dd5..ce991a8390466a 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -2699,6 +2699,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
+ 		goto error;
+ 	}
+ 
++	if (bdev_nr_bytes(file_bdev(bdev_file)) <= BTRFS_DEVICE_RANGE_RESERVED) {
++		ret = -EINVAL;
++		goto error;
++	}
++
+ 	if (fs_devices->seeding) {
+ 		seeding_dev = true;
+ 		down_write(&sb->s_umount);
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index c0856585bb6386..4aa9a1428dd583 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -594,14 +594,16 @@ static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
+ 
+ 	/*
+ 	 * If folio is mapped, it was faulted in after being
+-	 * unmapped in caller.  Unmap (again) while holding
+-	 * the fault mutex.  The mutex will prevent faults
+-	 * until we finish removing the folio.
++	 * unmapped in caller or hugetlb_vmdelete_list() skips
++	 * unmapping it due to fail to grab lock.  Unmap (again)
++	 * while holding the fault mutex.  The mutex will prevent
++	 * faults until we finish removing the folio.  Hold folio
++	 * lock to guarantee no concurrent migration.
+ 	 */
++	folio_lock(folio);
+ 	if (unlikely(folio_mapped(folio)))
+ 		hugetlb_unmap_file_folio(h, mapping, folio, index);
+ 
+-	folio_lock(folio);
+ 	/*
+ 	 * We must remove the folio from page cache before removing
+ 	 * the region/ reserve map (hugetlb_unreserve_pages).  In
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 2257bf52fb2a49..8f5ad591d76256 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -2259,6 +2259,9 @@ static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
+ {
+ 	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
+ 
++	if (!p->vec_buf)
++		return;
++
+ 	if (cur_buf->start != addr)
+ 		cur_buf->end = addr;
+ 	else
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index b51ccfb8843941..104a563dc317fe 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -641,7 +641,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	tmp_rc = rc;
+ 	for (i = 0; i < num_cmds; i++) {
+-		char *buf = rsp_iov[i + i].iov_base;
++		char *buf = rsp_iov[i + 1].iov_base;
+ 
+ 		if (buf && resp_buftype[i + 1] != CIFS_NO_BUFFER)
+ 			rc = server->ops->map_error(buf, false);
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 2fc689f99997e8..d059c890d14280 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -147,7 +147,7 @@ struct smb_direct_transport {
+ 	wait_queue_head_t	wait_send_pending;
+ 	atomic_t		send_pending;
+ 
+-	struct delayed_work	post_recv_credits_work;
++	struct work_struct	post_recv_credits_work;
+ 	struct work_struct	send_immediate_work;
+ 	struct work_struct	disconnect_work;
+ 
+@@ -366,8 +366,8 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+ 
+ 	spin_lock_init(&t->lock_new_recv_credits);
+ 
+-	INIT_DELAYED_WORK(&t->post_recv_credits_work,
+-			  smb_direct_post_recv_credits);
++	INIT_WORK(&t->post_recv_credits_work,
++		  smb_direct_post_recv_credits);
+ 	INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
+ 	INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
+ 
+@@ -398,9 +398,9 @@ static void free_transport(struct smb_direct_transport *t)
+ 	wait_event(t->wait_send_pending,
+ 		   atomic_read(&t->send_pending) == 0);
+ 
+-	cancel_work_sync(&t->disconnect_work);
+-	cancel_delayed_work_sync(&t->post_recv_credits_work);
+-	cancel_work_sync(&t->send_immediate_work);
++	disable_work_sync(&t->disconnect_work);
++	disable_work_sync(&t->post_recv_credits_work);
++	disable_work_sync(&t->send_immediate_work);
+ 
+ 	if (t->qp) {
+ 		ib_drain_qp(t->qp);
+@@ -614,8 +614,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 			wake_up_interruptible(&t->wait_send_credits);
+ 
+ 		if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
+-			mod_delayed_work(smb_direct_wq,
+-					 &t->post_recv_credits_work, 0);
++			queue_work(smb_direct_wq, &t->post_recv_credits_work);
+ 
+ 		if (data_length) {
+ 			enqueue_reassembly(t, recvmsg, (int)data_length);
+@@ -772,8 +771,7 @@ static int smb_direct_read(struct ksmbd_transport *t, char *buf,
+ 		st->count_avail_recvmsg += queue_removed;
+ 		if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
+ 			spin_unlock(&st->receive_credit_lock);
+-			mod_delayed_work(smb_direct_wq,
+-					 &st->post_recv_credits_work, 0);
++			queue_work(smb_direct_wq, &st->post_recv_credits_work);
+ 		} else {
+ 			spin_unlock(&st->receive_credit_lock);
+ 		}
+@@ -800,7 +798,7 @@ static int smb_direct_read(struct ksmbd_transport *t, char *buf,
+ static void smb_direct_post_recv_credits(struct work_struct *work)
+ {
+ 	struct smb_direct_transport *t = container_of(work,
+-		struct smb_direct_transport, post_recv_credits_work.work);
++		struct smb_direct_transport, post_recv_credits_work);
+ 	struct smb_direct_recvmsg *recvmsg;
+ 	int receive_credits, credits = 0;
+ 	int ret;
+@@ -1681,7 +1679,7 @@ static int smb_direct_prepare_negotiation(struct smb_direct_transport *t)
+ 		goto out_err;
+ 	}
+ 
+-	smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
++	smb_direct_post_recv_credits(&t->post_recv_credits_work);
+ 	return 0;
+ out_err:
+ 	put_recvmsg(t, recvmsg);
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index 0c70f3a5557505..107b797c33ecf7 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -152,7 +152,7 @@ struct af_alg_ctx {
+ 	size_t used;
+ 	atomic_t rcvused;
+ 
+-	u32		more:1,
++	bool		more:1,
+ 			merge:1,
+ 			enc:1,
+ 			write:1,
+diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
+index 9b85a3f028d1b0..61f7a02b050096 100644
+--- a/include/linux/firmware/imx/sm.h
++++ b/include/linux/firmware/imx/sm.h
+@@ -17,7 +17,19 @@
+ #define SCMI_IMX_CTRL_SAI4_MCLK		4	/* WAKE SAI4 MCLK */
+ #define SCMI_IMX_CTRL_SAI5_MCLK		5	/* WAKE SAI5 MCLK */
+ 
++#if IS_ENABLED(CONFIG_IMX_SCMI_MISC_DRV)
+ int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
+ int scmi_imx_misc_ctrl_set(u32 id, u32 val);
++#else
++static inline int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val)
++{
++	return -EOPNOTSUPP;
++}
++
++static inline int scmi_imx_misc_ctrl_set(u32 id, u32 val)
++{
++	return -EOPNOTSUPP;
++}
++#endif
+ 
+ #endif
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index f3e0ac20c2e8c9..63f85b3fee2388 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -382,6 +382,16 @@ void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
+ void mark_page_accessed(struct page *);
+ void folio_mark_accessed(struct folio *);
+ 
++static inline bool folio_may_be_lru_cached(struct folio *folio)
++{
++	/*
++	 * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
++	 * Holding small numbers of low-order mTHP folios in per-CPU LRU cache
++	 * will be sensible, but nobody has implemented and tested that yet.
++	 */
++	return !folio_test_large(folio);
++}
++
+ extern atomic_t lru_disable_count;
+ 
+ static inline bool lru_cache_disabled(void)
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index df4af45f8603cd..69a1d8b12beff3 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1203,6 +1203,27 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
+ 	return NULL;
+ }
+ 
++static inline struct hci_conn *hci_conn_hash_lookup_role(struct hci_dev *hdev,
++							 __u8 type, __u8 role,
++							 bdaddr_t *ba)
++{
++	struct hci_conn_hash *h = &hdev->conn_hash;
++	struct hci_conn  *c;
++
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(c, &h->list, list) {
++		if (c->type == type && c->role == role && !bacmp(&c->dst, ba)) {
++			rcu_read_unlock();
++			return c;
++		}
++	}
++
++	rcu_read_unlock();
++
++	return NULL;
++}
++
+ static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
+ 						       bdaddr_t *ba,
+ 						       __u8 ba_type)
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 9380e0fd5e4af0..1f51c8f20722ed 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2953,7 +2953,10 @@ EXPORT_SYMBOL_GPL(bpf_event_output);
+ 
+ /* Always built-in helper functions. */
+ const struct bpf_func_proto bpf_tail_call_proto = {
+-	.func		= NULL,
++	/* func is unused for tail_call, we set it to pass the
++	 * get_helper_proto check
++	 */
++	.func		= BPF_PTR_POISON,
+ 	.gpl_only	= false,
+ 	.ret_type	= RET_VOID,
+ 	.arg1_type	= ARG_PTR_TO_CTX,
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 24ae8f33e5d76d..1829f62a74a9e3 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -7799,6 +7799,10 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno,
+ 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
+ 		return -EFAULT;
+ 	}
++	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++		verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n");
++		return -EOPNOTSUPP;
++	}
+ 	meta->map_uid = reg->map_uid;
+ 	meta->map_ptr = map;
+ 	return 0;
+@@ -10465,7 +10469,7 @@ static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
+ 		return -EINVAL;
+ 
+ 	*ptr = env->ops->get_func_proto(func_id, env->prog);
+-	return *ptr ? 0 : -EINVAL;
++	return *ptr && (*ptr)->func ? 0 : -EINVAL;
+ }
+ 
+ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c
+index b47bb764b35201..559aae55792c6a 100644
+--- a/kernel/futex/requeue.c
++++ b/kernel/futex/requeue.c
+@@ -225,18 +225,20 @@ static inline
+ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
+ 			   struct futex_hash_bucket *hb)
+ {
+-	q->key = *key;
++	struct task_struct *task;
+ 
++	q->key = *key;
+ 	__futex_unqueue(q);
+ 
+ 	WARN_ON(!q->rt_waiter);
+ 	q->rt_waiter = NULL;
+ 
+ 	q->lock_ptr = &hb->lock;
++	task = READ_ONCE(q->task);
+ 
+ 	/* Signal locked state to the waiter */
+ 	futex_requeue_pi_complete(q, 1);
+-	wake_up_state(q->task, TASK_NORMAL);
++	wake_up_state(task, TASK_NORMAL);
+ }
+ 
+ /**
+diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
+index c9b0533407edeb..76737492e750e3 100644
+--- a/kernel/trace/trace_dynevent.c
++++ b/kernel/trace/trace_dynevent.c
+@@ -239,6 +239,10 @@ static int dyn_event_open(struct inode *inode, struct file *file)
+ {
+ 	int ret;
+ 
++	ret = security_locked_down(LOCKDOWN_TRACEFS);
++	if (ret)
++		return ret;
++
+ 	ret = tracing_check_open_get_tr(NULL);
+ 	if (ret)
+ 		return ret;
+diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
+index 2f844c279a3e01..7f24ccc896c649 100644
+--- a/kernel/vhost_task.c
++++ b/kernel/vhost_task.c
+@@ -100,6 +100,7 @@ void vhost_task_stop(struct vhost_task *vtsk)
+ 	 * freeing it below.
+ 	 */
+ 	wait_for_completion(&vtsk->exited);
++	put_task_struct(vtsk->task);
+ 	kfree(vtsk);
+ }
+ EXPORT_SYMBOL_GPL(vhost_task_stop);
+@@ -148,7 +149,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *),
+ 		return ERR_PTR(PTR_ERR(tsk));
+ 	}
+ 
+-	vtsk->task = tsk;
++	vtsk->task = get_task_struct(tsk);
+ 	return vtsk;
+ }
+ EXPORT_SYMBOL_GPL(vhost_task_create);
+diff --git a/mm/gup.c b/mm/gup.c
+index e9be7c49542a0f..d105817a0c9aab 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2356,8 +2356,8 @@ static unsigned long collect_longterm_unpinnable_folios(
+ 		struct pages_or_folios *pofs)
+ {
+ 	unsigned long collected = 0;
+-	bool drain_allow = true;
+ 	struct folio *folio;
++	int drained = 0;
+ 	long i = 0;
+ 
+ 	for (folio = pofs_get_folio(pofs, i); folio;
+@@ -2376,10 +2376,17 @@ static unsigned long collect_longterm_unpinnable_folios(
+ 			continue;
+ 		}
+ 
+-		if (drain_allow && folio_ref_count(folio) !=
+-				   folio_expected_ref_count(folio) + 1) {
++		if (drained == 0 && folio_may_be_lru_cached(folio) &&
++				folio_ref_count(folio) !=
++				folio_expected_ref_count(folio) + 1) {
++			lru_add_drain();
++			drained = 1;
++		}
++		if (drained == 1 && folio_may_be_lru_cached(folio) &&
++				folio_ref_count(folio) !=
++				folio_expected_ref_count(folio) + 1) {
+ 			lru_add_drain_all();
+-			drain_allow = false;
++			drained = 2;
+ 		}
+ 
+ 		if (!folio_isolate_lru(folio))
+diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
+index a495debf143632..abb79a6c076910 100644
+--- a/mm/kmsan/core.c
++++ b/mm/kmsan/core.c
+@@ -195,7 +195,8 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ 				      u32 origin, bool checked)
+ {
+ 	u64 address = (u64)addr;
+-	u32 *shadow_start, *origin_start;
++	void *shadow_start;
++	u32 *aligned_shadow, *origin_start;
+ 	size_t pad = 0;
+ 
+ 	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
+@@ -214,9 +215,12 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ 	}
+ 	__memset(shadow_start, b, size);
+ 
+-	if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
++	if (IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
++		aligned_shadow = shadow_start;
++	} else {
+ 		pad = address % KMSAN_ORIGIN_SIZE;
+ 		address -= pad;
++		aligned_shadow = shadow_start - pad;
+ 		size += pad;
+ 	}
+ 	size = ALIGN(size, KMSAN_ORIGIN_SIZE);
+@@ -230,7 +234,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ 	 * corresponding shadow slot is zero.
+ 	 */
+ 	for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
+-		if (origin || !shadow_start[i])
++		if (origin || !aligned_shadow[i])
+ 			origin_start[i] = origin;
+ 	}
+ }
+diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
+index 13236d579ebaa2..c95a8e72e49670 100644
+--- a/mm/kmsan/kmsan_test.c
++++ b/mm/kmsan/kmsan_test.c
+@@ -556,6 +556,21 @@ DEFINE_TEST_MEMSETXX(16)
+ DEFINE_TEST_MEMSETXX(32)
+ DEFINE_TEST_MEMSETXX(64)
+ 
++/* Test case: ensure that KMSAN does not access shadow memory out of bounds. */
++static void test_memset_on_guarded_buffer(struct kunit *test)
++{
++	void *buf = vmalloc(PAGE_SIZE);
++
++	kunit_info(test,
++		   "memset() on ends of guarded buffer should not crash\n");
++
++	for (size_t size = 0; size <= 128; size++) {
++		memset(buf, 0xff, size);
++		memset(buf + PAGE_SIZE - size, 0xff, size);
++	}
++	vfree(buf);
++}
++
+ static noinline void fibonacci(int *array, int size, int start)
+ {
+ 	if (start < 2 || (start == size))
+@@ -661,6 +676,7 @@ static struct kunit_case kmsan_test_cases[] = {
+ 	KUNIT_CASE(test_memset16),
+ 	KUNIT_CASE(test_memset32),
+ 	KUNIT_CASE(test_memset64),
++	KUNIT_CASE(test_memset_on_guarded_buffer),
+ 	KUNIT_CASE(test_long_origin_chain),
+ 	KUNIT_CASE(test_stackdepot_roundtrip),
+ 	KUNIT_CASE(test_unpoison_memory),
+diff --git a/mm/mlock.c b/mm/mlock.c
+index cde076fa7d5e5a..8c8d522efdd59c 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -255,7 +255,7 @@ void mlock_folio(struct folio *folio)
+ 
+ 	folio_get(folio);
+ 	if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
+-	    folio_test_large(folio) || lru_cache_disabled())
++	    !folio_may_be_lru_cached(folio) || lru_cache_disabled())
+ 		mlock_folio_batch(fbatch);
+ 	local_unlock(&mlock_fbatch.lock);
+ }
+@@ -278,7 +278,7 @@ void mlock_new_folio(struct folio *folio)
+ 
+ 	folio_get(folio);
+ 	if (!folio_batch_add(fbatch, mlock_new(folio)) ||
+-	    folio_test_large(folio) || lru_cache_disabled())
++	    !folio_may_be_lru_cached(folio) || lru_cache_disabled())
+ 		mlock_folio_batch(fbatch);
+ 	local_unlock(&mlock_fbatch.lock);
+ }
+@@ -299,7 +299,7 @@ void munlock_folio(struct folio *folio)
+ 	 */
+ 	folio_get(folio);
+ 	if (!folio_batch_add(fbatch, folio) ||
+-	    folio_test_large(folio) || lru_cache_disabled())
++	    !folio_may_be_lru_cached(folio) || lru_cache_disabled())
+ 		mlock_folio_batch(fbatch);
+ 	local_unlock(&mlock_fbatch.lock);
+ }
+diff --git a/mm/swap.c b/mm/swap.c
+index 59f30a981c6f96..ff846915db4544 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -195,6 +195,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
+ 	for (i = 0; i < folio_batch_count(fbatch); i++) {
+ 		struct folio *folio = fbatch->folios[i];
+ 
++		/* block memcg migration while the folio moves between lru */
++		if (move_fn != lru_add && !folio_test_clear_lru(folio))
++			continue;
++
+ 		folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
+ 		move_fn(lruvec, folio);
+ 
+@@ -207,14 +211,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
+ }
+ 
+ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
+-		struct folio *folio, move_fn_t move_fn,
+-		bool on_lru, bool disable_irq)
++		struct folio *folio, move_fn_t move_fn, bool disable_irq)
+ {
+ 	unsigned long flags;
+ 
+-	if (on_lru && !folio_test_clear_lru(folio))
+-		return;
+-
+ 	folio_get(folio);
+ 
+ 	if (disable_irq)
+@@ -222,8 +222,8 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
+ 	else
+ 		local_lock(&cpu_fbatches.lock);
+ 
+-	if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
+-	    lru_cache_disabled())
++	if (!folio_batch_add(this_cpu_ptr(fbatch), folio) ||
++			!folio_may_be_lru_cached(folio) || lru_cache_disabled())
+ 		folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
+ 
+ 	if (disable_irq)
+@@ -232,13 +232,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
+ 		local_unlock(&cpu_fbatches.lock);
+ }
+ 
+-#define folio_batch_add_and_move(folio, op, on_lru)						\
+-	__folio_batch_add_and_move(								\
+-		&cpu_fbatches.op,								\
+-		folio,										\
+-		op,										\
+-		on_lru,										\
+-		offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq)	\
++#define folio_batch_add_and_move(folio, op)		\
++	__folio_batch_add_and_move(			\
++		&cpu_fbatches.op,			\
++		folio,					\
++		op,					\
++		offsetof(struct cpu_fbatches, op) >=	\
++		offsetof(struct cpu_fbatches, lock_irq)	\
+ 	)
+ 
+ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
+@@ -262,10 +262,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
+ void folio_rotate_reclaimable(struct folio *folio)
+ {
+ 	if (folio_test_locked(folio) || folio_test_dirty(folio) ||
+-	    folio_test_unevictable(folio))
++	    folio_test_unevictable(folio) || !folio_test_lru(folio))
+ 		return;
+ 
+-	folio_batch_add_and_move(folio, lru_move_tail, true);
++	folio_batch_add_and_move(folio, lru_move_tail);
+ }
+ 
+ void lru_note_cost(struct lruvec *lruvec, bool file,
+@@ -354,10 +354,11 @@ static void folio_activate_drain(int cpu)
+ 
+ void folio_activate(struct folio *folio)
+ {
+-	if (folio_test_active(folio) || folio_test_unevictable(folio))
++	if (folio_test_active(folio) || folio_test_unevictable(folio) ||
++	    !folio_test_lru(folio))
+ 		return;
+ 
+-	folio_batch_add_and_move(folio, lru_activate, true);
++	folio_batch_add_and_move(folio, lru_activate);
+ }
+ 
+ #else
+@@ -510,7 +511,7 @@ void folio_add_lru(struct folio *folio)
+ 	    lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
+ 		folio_set_active(folio);
+ 
+-	folio_batch_add_and_move(folio, lru_add, false);
++	folio_batch_add_and_move(folio, lru_add);
+ }
+ EXPORT_SYMBOL(folio_add_lru);
+ 
+@@ -685,10 +686,10 @@ void lru_add_drain_cpu(int cpu)
+ void deactivate_file_folio(struct folio *folio)
+ {
+ 	/* Deactivating an unevictable folio will not accelerate reclaim */
+-	if (folio_test_unevictable(folio))
++	if (folio_test_unevictable(folio) || !folio_test_lru(folio))
+ 		return;
+ 
+-	folio_batch_add_and_move(folio, lru_deactivate_file, true);
++	folio_batch_add_and_move(folio, lru_deactivate_file);
+ }
+ 
+ /*
+@@ -701,10 +702,11 @@ void deactivate_file_folio(struct folio *folio)
+  */
+ void folio_deactivate(struct folio *folio)
+ {
+-	if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
++	if (folio_test_unevictable(folio) || !folio_test_lru(folio) ||
++	    !(folio_test_active(folio) || lru_gen_enabled()))
+ 		return;
+ 
+-	folio_batch_add_and_move(folio, lru_deactivate, true);
++	folio_batch_add_and_move(folio, lru_deactivate);
+ }
+ 
+ /**
+@@ -717,10 +719,11 @@ void folio_deactivate(struct folio *folio)
+ void folio_mark_lazyfree(struct folio *folio)
+ {
+ 	if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
++	    !folio_test_lru(folio) ||
+ 	    folio_test_swapcache(folio) || folio_test_unevictable(folio))
+ 		return;
+ 
+-	folio_batch_add_and_move(folio, lru_lazyfree, true);
++	folio_batch_add_and_move(folio, lru_lazyfree);
+ }
+ 
+ void lru_add_drain(void)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 262ff30261d67d..1e537ed83ba4b2 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3050,8 +3050,18 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 
+ 	hci_dev_lock(hdev);
+ 
++	/* Check for existing connection:
++	 *
++	 * 1. If it doesn't exist then it must be receiver/slave role.
++	 * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
++	 *    of initiator/master role since there could be a collision where
++	 *    either side is attempting to connect or something like a fuzzing
++	 *    testing is trying to play tricks to destroy the hcon object before
++	 *    it even attempts to connect (e.g. hcon->state == BT_OPEN).
++	 */
+ 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
+-	if (!conn) {
++	if (!conn ||
++	    (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
+ 		/* In case of error status and there is no connection pending
+ 		 * just unlock as there is nothing to cleanup.
+ 		 */
+@@ -5618,8 +5628,18 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 	 */
+ 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
+ 
+-	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
+-	if (!conn) {
++	/* Check for existing connection:
++	 *
++	 * 1. If it doesn't exist then use the role to create a new object.
++	 * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
++	 *    of initiator/master role since there could be a collision where
++	 *    either side is attempting to connect or something like a fuzzing
++	 *    testing is trying to play tricks to destroy the hcon object before
++	 *    it even attempts to connect (e.g. hcon->state == BT_OPEN).
++	 */
++	conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr);
++	if (!conn ||
++	    (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
+ 		/* In case of error status and there is no connection pending
+ 		 * just unlock as there is nothing to cleanup.
+ 		 */
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 5f5137764b80af..333f32a9fd2191 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2604,6 +2604,13 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)
+ 			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
+ 							 NULL);
+ 		}
++
++		/* If current advertising instance is set to instance 0x00
++		 * then we need to re-enable it.
++		 */
++		if (!hdev->cur_adv_instance)
++			err = hci_enable_ext_advertising_sync(hdev,
++							      hdev->cur_adv_instance);
+ 	} else {
+ 		/* Schedule for most recent instance to be restarted and begin
+ 		 * the software rotation loop
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index cf54593149ccee..6a92c03ee6f426 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6603,7 +6603,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
+ 		return NULL;
+ 
+ 	while (data_len) {
+-		if (nr_frags == MAX_SKB_FRAGS - 1)
++		if (nr_frags == MAX_SKB_FRAGS)
+ 			goto failure;
+ 		while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
+ 			order--;
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index 93aaea0006ba7e..c52ff9364ae8d6 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -2375,6 +2375,13 @@ static int replace_nexthop_single(struct net *net, struct nexthop *old,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!list_empty(&old->grp_list) &&
++	    rtnl_dereference(new->nh_info)->fdb_nh !=
++	    rtnl_dereference(old->nh_info)->fdb_nh) {
++		NL_SET_ERR_MSG(extack, "Cannot change nexthop FDB status while in a group");
++		return -EINVAL;
++	}
++
+ 	err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
+ 	if (err)
+ 		return err;
+diff --git a/net/smc/smc_loopback.c b/net/smc/smc_loopback.c
+index 3c5f64ca41153f..85f0b7853b1737 100644
+--- a/net/smc/smc_loopback.c
++++ b/net/smc/smc_loopback.c
+@@ -56,6 +56,7 @@ static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
+ {
+ 	struct smc_lo_dmb_node *dmb_node, *tmp_node;
+ 	struct smc_lo_dev *ldev = smcd->priv;
++	struct folio *folio;
+ 	int sba_idx, rc;
+ 
+ 	/* check space for new dmb */
+@@ -74,13 +75,16 @@ static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
+ 
+ 	dmb_node->sba_idx = sba_idx;
+ 	dmb_node->len = dmb->dmb_len;
+-	dmb_node->cpu_addr = kzalloc(dmb_node->len, GFP_KERNEL |
+-				     __GFP_NOWARN | __GFP_NORETRY |
+-				     __GFP_NOMEMALLOC);
+-	if (!dmb_node->cpu_addr) {
++
++	/* not critical; fail under memory pressure and fallback to TCP */
++	folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
++			    __GFP_NORETRY | __GFP_ZERO,
++			    get_order(dmb_node->len));
++	if (!folio) {
+ 		rc = -ENOMEM;
+ 		goto err_node;
+ 	}
++	dmb_node->cpu_addr = folio_address(folio);
+ 	dmb_node->dma_addr = SMC_DMA_ADDR_INVALID;
+ 	refcount_set(&dmb_node->refcnt, 1);
+ 
+@@ -122,7 +126,7 @@ static void __smc_lo_unregister_dmb(struct smc_lo_dev *ldev,
+ 	write_unlock_bh(&ldev->dmb_ht_lock);
+ 
+ 	clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
+-	kvfree(dmb_node->cpu_addr);
++	folio_put(virt_to_folio(dmb_node->cpu_addr));
+ 	kfree(dmb_node);
+ 
+ 	if (atomic_dec_and_test(&ldev->dmb_cnt))
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 6f99fd2d966c65..1e2f5ecd63248b 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -2502,6 +2502,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
+ 
+ 	for (h = 0; h < range; h++) {
+ 		u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high);
++		if (spi == 0)
++			goto next;
+ 		newspi = htonl(spi);
+ 
+ 		spin_lock_bh(&net->xfrm.xfrm_state_lock);
+@@ -2517,6 +2519,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
+ 		xfrm_state_put(x0);
+ 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 
++next:
+ 		if (signal_pending(current)) {
+ 			err = -ERESTARTSYS;
+ 			goto unlock;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5f061d2d9fc969..a41df821e15f7d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7272,6 +7272,11 @@ static void cs35l41_fixup_spi_two(struct hda_codec *codec, const struct hda_fixu
+ 	comp_generic_fixup(codec, action, "spi", "CSC3551", "-%s:00-cs35l41-hda.%d", 2);
+ }
+ 
++static void cs35l41_fixup_spi_one(struct hda_codec *codec, const struct hda_fixup *fix, int action)
++{
++	comp_generic_fixup(codec, action, "spi", "CSC3551", "-%s:00-cs35l41-hda.%d", 1);
++}
++
+ static void cs35l41_fixup_spi_four(struct hda_codec *codec, const struct hda_fixup *fix, int action)
+ {
+ 	comp_generic_fixup(codec, action, "spi", "CSC3551", "-%s:00-cs35l41-hda.%d", 4);
+@@ -7956,6 +7961,7 @@ enum {
+ 	ALC287_FIXUP_CS35L41_I2C_2,
+ 	ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED,
+ 	ALC287_FIXUP_CS35L41_I2C_4,
++	ALC245_FIXUP_CS35L41_SPI_1,
+ 	ALC245_FIXUP_CS35L41_SPI_2,
+ 	ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED,
+ 	ALC245_FIXUP_CS35L41_SPI_4,
+@@ -10067,6 +10073,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cs35l41_fixup_spi_two,
+ 	},
++	[ALC245_FIXUP_CS35L41_SPI_1] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cs35l41_fixup_spi_one,
++	},
+ 	[ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cs35l41_fixup_spi_two,
+@@ -11001,6 +11011,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
++	SND_PCI_QUIRK(0x1043, 0x88f4, "ASUS NUC14LNS", ALC245_FIXUP_CS35L41_SPI_1),
+ 	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
+ 	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 7bd87193c6177b..b663764644cd86 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -17,6 +17,7 @@
+ #include <linux/bitfield.h>
+ #include <linux/hid.h>
+ #include <linux/init.h>
++#include <linux/input.h>
+ #include <linux/math64.h>
+ #include <linux/slab.h>
+ #include <linux/usb.h>
+@@ -54,13 +55,13 @@ struct std_mono_table {
+  * version, we keep it mono for simplicity.
+  */
+ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer,
+-				unsigned int unitid,
+-				unsigned int control,
+-				unsigned int cmask,
+-				int val_type,
+-				unsigned int idx_off,
+-				const char *name,
+-				snd_kcontrol_tlv_rw_t *tlv_callback)
++					  unsigned int unitid,
++					  unsigned int control,
++					  unsigned int cmask,
++					  int val_type,
++					  unsigned int idx_off,
++					  const char *name,
++					  snd_kcontrol_tlv_rw_t *tlv_callback)
+ {
+ 	struct usb_mixer_elem_info *cval;
+ 	struct snd_kcontrol *kctl;
+@@ -77,7 +78,8 @@ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer,
+ 	cval->idx_off = idx_off;
+ 
+ 	/* get_min_max() is called only for integer volumes later,
+-	 * so provide a short-cut for booleans */
++	 * so provide a short-cut for booleans
++	 */
+ 	cval->min = 0;
+ 	cval->max = 1;
+ 	cval->res = 0;
+@@ -107,15 +109,16 @@ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer,
+ }
+ 
+ static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer,
+-				unsigned int unitid,
+-				unsigned int control,
+-				unsigned int cmask,
+-				int val_type,
+-				const char *name,
+-				snd_kcontrol_tlv_rw_t *tlv_callback)
++				   unsigned int unitid,
++				   unsigned int control,
++				   unsigned int cmask,
++				   int val_type,
++				   const char *name,
++				   snd_kcontrol_tlv_rw_t *tlv_callback)
+ {
+ 	return snd_create_std_mono_ctl_offset(mixer, unitid, control, cmask,
+-		val_type, 0 /* Offset */, name, tlv_callback);
++					      val_type, 0 /* Offset */,
++					      name, tlv_callback);
+ }
+ 
+ /*
+@@ -126,9 +129,10 @@ static int snd_create_std_mono_table(struct usb_mixer_interface *mixer,
+ {
+ 	int err;
+ 
+-	while (t->name != NULL) {
++	while (t->name) {
+ 		err = snd_create_std_mono_ctl(mixer, t->unitid, t->control,
+-				t->cmask, t->val_type, t->name, t->tlv_callback);
++					      t->cmask, t->val_type, t->name,
++					      t->tlv_callback);
+ 		if (err < 0)
+ 			return err;
+ 		t++;
+@@ -208,12 +212,11 @@ static void snd_usb_soundblaster_remote_complete(struct urb *urb)
+ 	if (code == rc->mute_code)
+ 		snd_usb_mixer_notify_id(mixer, rc->mute_mixer_id);
+ 	mixer->rc_code = code;
+-	wmb();
+ 	wake_up(&mixer->rc_waitq);
+ }
+ 
+ static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf,
+-				     long count, loff_t *offset)
++				    long count, loff_t *offset)
+ {
+ 	struct usb_mixer_interface *mixer = hw->private_data;
+ 	int err;
+@@ -233,7 +236,7 @@ static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf,
+ }
+ 
+ static __poll_t snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file,
+-					    poll_table *wait)
++					poll_table *wait)
+ {
+ 	struct usb_mixer_interface *mixer = hw->private_data;
+ 
+@@ -309,20 +312,20 @@ static int snd_audigy2nx_led_update(struct usb_mixer_interface *mixer,
+ 
+ 	if (chip->usb_id == USB_ID(0x041e, 0x3042))
+ 		err = snd_usb_ctl_msg(chip->dev,
+-			      usb_sndctrlpipe(chip->dev, 0), 0x24,
+-			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+-			      !value, 0, NULL, 0);
++				      usb_sndctrlpipe(chip->dev, 0), 0x24,
++				      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
++				      !value, 0, NULL, 0);
+ 	/* USB X-Fi S51 Pro */
+ 	if (chip->usb_id == USB_ID(0x041e, 0x30df))
+ 		err = snd_usb_ctl_msg(chip->dev,
+-			      usb_sndctrlpipe(chip->dev, 0), 0x24,
+-			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+-			      !value, 0, NULL, 0);
++				      usb_sndctrlpipe(chip->dev, 0), 0x24,
++				      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
++				      !value, 0, NULL, 0);
+ 	else
+ 		err = snd_usb_ctl_msg(chip->dev,
+-			      usb_sndctrlpipe(chip->dev, 0), 0x24,
+-			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+-			      value, index + 2, NULL, 0);
++				      usb_sndctrlpipe(chip->dev, 0), 0x24,
++				      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
++				      value, index + 2, NULL, 0);
+ 	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+@@ -376,10 +379,10 @@ static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer)
+ 		struct snd_kcontrol_new knew;
+ 
+ 		/* USB X-Fi S51 doesn't have a CMSS LED */
+-		if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0)
++		if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042) && i == 0)
+ 			continue;
+ 		/* USB X-Fi S51 Pro doesn't have one either */
+-		if ((mixer->chip->usb_id == USB_ID(0x041e, 0x30df)) && i == 0)
++		if (mixer->chip->usb_id == USB_ID(0x041e, 0x30df) && i == 0)
+ 			continue;
+ 		if (i > 1 && /* Live24ext has 2 LEDs only */
+ 			(mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+@@ -480,9 +483,9 @@ static int snd_emu0204_ch_switch_update(struct usb_mixer_interface *mixer,
+ 	buf[0] = 0x01;
+ 	buf[1] = value ? 0x02 : 0x01;
+ 	err = snd_usb_ctl_msg(chip->dev,
+-		      usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
+-		      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+-		      0x0400, 0x0e00, buf, 2);
++			      usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
++			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
++			      0x0400, 0x0e00, buf, 2);
+ 	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+@@ -528,6 +531,265 @@ static int snd_emu0204_controls_create(struct usb_mixer_interface *mixer)
+ 					  &snd_emu0204_control, NULL);
+ }
+ 
++#if IS_REACHABLE(CONFIG_INPUT)
++/*
++ * Sony DualSense controller (PS5) jack detection
++ *
++ * Since this is an UAC 1 device, it doesn't support jack detection.
++ * However, the controller hid-playstation driver reports HP & MIC
++ * insert events through a dedicated input device.
++ */
++
++#define SND_DUALSENSE_JACK_OUT_TERM_ID 3
++#define SND_DUALSENSE_JACK_IN_TERM_ID 4
++
++struct dualsense_mixer_elem_info {
++	struct usb_mixer_elem_info info;
++	struct input_handler ih;
++	struct input_device_id id_table[2];
++	bool connected;
++};
++
++static void snd_dualsense_ih_event(struct input_handle *handle,
++				   unsigned int type, unsigned int code,
++				   int value)
++{
++	struct dualsense_mixer_elem_info *mei;
++	struct usb_mixer_elem_list *me;
++
++	if (type != EV_SW)
++		return;
++
++	mei = container_of(handle->handler, struct dualsense_mixer_elem_info, ih);
++	me = &mei->info.head;
++
++	if ((me->id == SND_DUALSENSE_JACK_OUT_TERM_ID && code == SW_HEADPHONE_INSERT) ||
++	    (me->id == SND_DUALSENSE_JACK_IN_TERM_ID && code == SW_MICROPHONE_INSERT)) {
++		mei->connected = !!value;
++		snd_ctl_notify(me->mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &me->kctl->id);
++	}
++}
++
++static bool snd_dualsense_ih_match(struct input_handler *handler,
++				   struct input_dev *dev)
++{
++	struct dualsense_mixer_elem_info *mei;
++	struct usb_device *snd_dev;
++	char *input_dev_path, *usb_dev_path;
++	size_t usb_dev_path_len;
++	bool match = false;
++
++	mei = container_of(handler, struct dualsense_mixer_elem_info, ih);
++	snd_dev = mei->info.head.mixer->chip->dev;
++
++	input_dev_path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
++	if (!input_dev_path) {
++		dev_warn(&snd_dev->dev, "Failed to get input dev path\n");
++		return false;
++	}
++
++	usb_dev_path = kobject_get_path(&snd_dev->dev.kobj, GFP_KERNEL);
++	if (!usb_dev_path) {
++		dev_warn(&snd_dev->dev, "Failed to get USB dev path\n");
++		goto free_paths;
++	}
++
++	/*
++	 * Ensure the VID:PID matched input device supposedly owned by the
++	 * hid-playstation driver belongs to the actual hardware handled by
++	 * the current USB audio device, which implies input_dev_path being
++	 * a subpath of usb_dev_path.
++	 *
++	 * This verification is necessary when there is more than one identical
++	 * controller attached to the host system.
++	 */
++	usb_dev_path_len = strlen(usb_dev_path);
++	if (usb_dev_path_len >= strlen(input_dev_path))
++		goto free_paths;
++
++	usb_dev_path[usb_dev_path_len] = '/';
++	match = !memcmp(input_dev_path, usb_dev_path, usb_dev_path_len + 1);
++
++free_paths:
++	kfree(input_dev_path);
++	kfree(usb_dev_path);
++
++	return match;
++}
++
++static int snd_dualsense_ih_connect(struct input_handler *handler,
++				    struct input_dev *dev,
++				    const struct input_device_id *id)
++{
++	struct input_handle *handle;
++	int err;
++
++	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
++	if (!handle)
++		return -ENOMEM;
++
++	handle->dev = dev;
++	handle->handler = handler;
++	handle->name = handler->name;
++
++	err = input_register_handle(handle);
++	if (err)
++		goto err_free;
++
++	err = input_open_device(handle);
++	if (err)
++		goto err_unregister;
++
++	return 0;
++
++err_unregister:
++	input_unregister_handle(handle);
++err_free:
++	kfree(handle);
++	return err;
++}
++
++static void snd_dualsense_ih_disconnect(struct input_handle *handle)
++{
++	input_close_device(handle);
++	input_unregister_handle(handle);
++	kfree(handle);
++}
++
++static void snd_dualsense_ih_start(struct input_handle *handle)
++{
++	struct dualsense_mixer_elem_info *mei;
++	struct usb_mixer_elem_list *me;
++	int status = -1;
++
++	mei = container_of(handle->handler, struct dualsense_mixer_elem_info, ih);
++	me = &mei->info.head;
++
++	if (me->id == SND_DUALSENSE_JACK_OUT_TERM_ID &&
++	    test_bit(SW_HEADPHONE_INSERT, handle->dev->swbit))
++		status = test_bit(SW_HEADPHONE_INSERT, handle->dev->sw);
++	else if (me->id == SND_DUALSENSE_JACK_IN_TERM_ID &&
++		 test_bit(SW_MICROPHONE_INSERT, handle->dev->swbit))
++		status = test_bit(SW_MICROPHONE_INSERT, handle->dev->sw);
++
++	if (status >= 0) {
++		mei->connected = !!status;
++		snd_ctl_notify(me->mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &me->kctl->id);
++	}
++}
++
++static int snd_dualsense_jack_get(struct snd_kcontrol *kctl,
++				  struct snd_ctl_elem_value *ucontrol)
++{
++	struct dualsense_mixer_elem_info *mei = snd_kcontrol_chip(kctl);
++
++	ucontrol->value.integer.value[0] = mei->connected;
++
++	return 0;
++}
++
++static const struct snd_kcontrol_new snd_dualsense_jack_control = {
++	.iface = SNDRV_CTL_ELEM_IFACE_CARD,
++	.access = SNDRV_CTL_ELEM_ACCESS_READ,
++	.info = snd_ctl_boolean_mono_info,
++	.get = snd_dualsense_jack_get,
++};
++
++static int snd_dualsense_resume_jack(struct usb_mixer_elem_list *list)
++{
++	snd_ctl_notify(list->mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
++		       &list->kctl->id);
++	return 0;
++}
++
++static void snd_dualsense_mixer_elem_free(struct snd_kcontrol *kctl)
++{
++	struct dualsense_mixer_elem_info *mei = snd_kcontrol_chip(kctl);
++
++	if (mei->ih.event)
++		input_unregister_handler(&mei->ih);
++
++	snd_usb_mixer_elem_free(kctl);
++}
++
++static int snd_dualsense_jack_create(struct usb_mixer_interface *mixer,
++				     const char *name, bool is_output)
++{
++	struct dualsense_mixer_elem_info *mei;
++	struct input_device_id *idev_id;
++	struct snd_kcontrol *kctl;
++	int err;
++
++	mei = kzalloc(sizeof(*mei), GFP_KERNEL);
++	if (!mei)
++		return -ENOMEM;
++
++	snd_usb_mixer_elem_init_std(&mei->info.head, mixer,
++				    is_output ? SND_DUALSENSE_JACK_OUT_TERM_ID :
++						SND_DUALSENSE_JACK_IN_TERM_ID);
++
++	mei->info.head.resume = snd_dualsense_resume_jack;
++	mei->info.val_type = USB_MIXER_BOOLEAN;
++	mei->info.channels = 1;
++	mei->info.min = 0;
++	mei->info.max = 1;
++
++	kctl = snd_ctl_new1(&snd_dualsense_jack_control, mei);
++	if (!kctl) {
++		kfree(mei);
++		return -ENOMEM;
++	}
++
++	strscpy(kctl->id.name, name, sizeof(kctl->id.name));
++	kctl->private_free = snd_dualsense_mixer_elem_free;
++
++	err = snd_usb_mixer_add_control(&mei->info.head, kctl);
++	if (err)
++		return err;
++
++	idev_id = &mei->id_table[0];
++	idev_id->flags = INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT |
++			 INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT;
++	idev_id->vendor = USB_ID_VENDOR(mixer->chip->usb_id);
++	idev_id->product = USB_ID_PRODUCT(mixer->chip->usb_id);
++	idev_id->evbit[BIT_WORD(EV_SW)] = BIT_MASK(EV_SW);
++	if (is_output)
++		idev_id->swbit[BIT_WORD(SW_HEADPHONE_INSERT)] = BIT_MASK(SW_HEADPHONE_INSERT);
++	else
++		idev_id->swbit[BIT_WORD(SW_MICROPHONE_INSERT)] = BIT_MASK(SW_MICROPHONE_INSERT);
++
++	mei->ih.event = snd_dualsense_ih_event;
++	mei->ih.match = snd_dualsense_ih_match;
++	mei->ih.connect = snd_dualsense_ih_connect;
++	mei->ih.disconnect = snd_dualsense_ih_disconnect;
++	mei->ih.start = snd_dualsense_ih_start;
++	mei->ih.name = name;
++	mei->ih.id_table = mei->id_table;
++
++	err = input_register_handler(&mei->ih);
++	if (err) {
++		dev_warn(&mixer->chip->dev->dev,
++			 "Could not register input handler: %d\n", err);
++		mei->ih.event = NULL;
++	}
++
++	return 0;
++}
++
++static int snd_dualsense_controls_create(struct usb_mixer_interface *mixer)
++{
++	int err;
++
++	err = snd_dualsense_jack_create(mixer, "Headphone Jack", true);
++	if (err < 0)
++		return err;
++
++	return snd_dualsense_jack_create(mixer, "Headset Mic Jack", false);
++}
++#endif /* IS_REACHABLE(CONFIG_INPUT) */
++
+ /* ASUS Xonar U1 / U3 controls */
+ 
+ static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol,
+@@ -1020,7 +1282,7 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
+ /* M-Audio FastTrack Ultra quirks */
+ /* FTU Effect switch (also used by C400/C600) */
+ static int snd_ftu_eff_switch_info(struct snd_kcontrol *kcontrol,
+-					struct snd_ctl_elem_info *uinfo)
++				   struct snd_ctl_elem_info *uinfo)
+ {
+ 	static const char *const texts[8] = {
+ 		"Room 1", "Room 2", "Room 3", "Hall 1",
+@@ -1054,7 +1316,7 @@ static int snd_ftu_eff_switch_init(struct usb_mixer_interface *mixer,
+ }
+ 
+ static int snd_ftu_eff_switch_get(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
++				  struct snd_ctl_elem_value *ucontrol)
+ {
+ 	ucontrol->value.enumerated.item[0] = kctl->private_value >> 24;
+ 	return 0;
+@@ -1085,7 +1347,7 @@ static int snd_ftu_eff_switch_update(struct usb_mixer_elem_list *list)
+ }
+ 
+ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
++				  struct snd_ctl_elem_value *ucontrol)
+ {
+ 	struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl);
+ 	unsigned int pval = list->kctl->private_value;
+@@ -1103,7 +1365,7 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
+ }
+ 
+ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
+-	int validx, int bUnitID)
++					int validx, int bUnitID)
+ {
+ 	static struct snd_kcontrol_new template = {
+ 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+@@ -1142,22 +1404,22 @@ static int snd_ftu_create_volume_ctls(struct usb_mixer_interface *mixer)
+ 		for (in = 0; in < 8; in++) {
+ 			cmask = BIT(in);
+ 			snprintf(name, sizeof(name),
+-				"AIn%d - Out%d Capture Volume",
+-				in  + 1, out + 1);
++				 "AIn%d - Out%d Capture Volume",
++				 in  + 1, out + 1);
+ 			err = snd_create_std_mono_ctl(mixer, id, control,
+-							cmask, val_type, name,
+-							&snd_usb_mixer_vol_tlv);
++						      cmask, val_type, name,
++						      &snd_usb_mixer_vol_tlv);
+ 			if (err < 0)
+ 				return err;
+ 		}
+ 		for (in = 8; in < 16; in++) {
+ 			cmask = BIT(in);
+ 			snprintf(name, sizeof(name),
+-				"DIn%d - Out%d Playback Volume",
+-				in - 7, out + 1);
++				 "DIn%d - Out%d Playback Volume",
++				 in - 7, out + 1);
+ 			err = snd_create_std_mono_ctl(mixer, id, control,
+-							cmask, val_type, name,
+-							&snd_usb_mixer_vol_tlv);
++						      cmask, val_type, name,
++						      &snd_usb_mixer_vol_tlv);
+ 			if (err < 0)
+ 				return err;
+ 		}
+@@ -1218,10 +1480,10 @@ static int snd_ftu_create_effect_return_ctls(struct usb_mixer_interface *mixer)
+ 	for (ch = 0; ch < 4; ++ch) {
+ 		cmask = BIT(ch);
+ 		snprintf(name, sizeof(name),
+-			"Effect Return %d Volume", ch + 1);
++			 "Effect Return %d Volume", ch + 1);
+ 		err = snd_create_std_mono_ctl(mixer, id, control,
+-						cmask, val_type, name,
+-						snd_usb_mixer_vol_tlv);
++					      cmask, val_type, name,
++					      snd_usb_mixer_vol_tlv);
+ 		if (err < 0)
+ 			return err;
+ 	}
+@@ -1242,20 +1504,20 @@ static int snd_ftu_create_effect_send_ctls(struct usb_mixer_interface *mixer)
+ 	for (ch = 0; ch < 8; ++ch) {
+ 		cmask = BIT(ch);
+ 		snprintf(name, sizeof(name),
+-			"Effect Send AIn%d Volume", ch + 1);
++			 "Effect Send AIn%d Volume", ch + 1);
+ 		err = snd_create_std_mono_ctl(mixer, id, control, cmask,
+-						val_type, name,
+-						snd_usb_mixer_vol_tlv);
++					      val_type, name,
++					      snd_usb_mixer_vol_tlv);
+ 		if (err < 0)
+ 			return err;
+ 	}
+ 	for (ch = 8; ch < 16; ++ch) {
+ 		cmask = BIT(ch);
+ 		snprintf(name, sizeof(name),
+-			"Effect Send DIn%d Volume", ch - 7);
++			 "Effect Send DIn%d Volume", ch - 7);
+ 		err = snd_create_std_mono_ctl(mixer, id, control, cmask,
+-						val_type, name,
+-						snd_usb_mixer_vol_tlv);
++					      val_type, name,
++					      snd_usb_mixer_vol_tlv);
+ 		if (err < 0)
+ 			return err;
+ 	}
+@@ -1345,19 +1607,19 @@ static int snd_c400_create_vol_ctls(struct usb_mixer_interface *mixer)
+ 		for (out = 0; out < num_outs; out++) {
+ 			if (chan < num_outs) {
+ 				snprintf(name, sizeof(name),
+-					"PCM%d-Out%d Playback Volume",
+-					chan + 1, out + 1);
++					 "PCM%d-Out%d Playback Volume",
++					 chan + 1, out + 1);
+ 			} else {
+ 				snprintf(name, sizeof(name),
+-					"In%d-Out%d Playback Volume",
+-					chan - num_outs + 1, out + 1);
++					 "In%d-Out%d Playback Volume",
++					 chan - num_outs + 1, out + 1);
+ 			}
+ 
+ 			cmask = (out == 0) ? 0 : BIT(out - 1);
+ 			offset = chan * num_outs;
+ 			err = snd_create_std_mono_ctl_offset(mixer, id, control,
+-						cmask, val_type, offset, name,
+-						&snd_usb_mixer_vol_tlv);
++							     cmask, val_type, offset, name,
++							     &snd_usb_mixer_vol_tlv);
+ 			if (err < 0)
+ 				return err;
+ 		}
+@@ -1376,7 +1638,7 @@ static int snd_c400_create_effect_volume_ctl(struct usb_mixer_interface *mixer)
+ 	const unsigned int cmask = 0;
+ 
+ 	return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type,
+-					name, snd_usb_mixer_vol_tlv);
++				       name, snd_usb_mixer_vol_tlv);
+ }
+ 
+ /* This control needs a volume quirk, see mixer.c */
+@@ -1389,7 +1651,7 @@ static int snd_c400_create_effect_duration_ctl(struct usb_mixer_interface *mixer
+ 	const unsigned int cmask = 0;
+ 
+ 	return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type,
+-					name, snd_usb_mixer_vol_tlv);
++				       name, snd_usb_mixer_vol_tlv);
+ }
+ 
+ /* This control needs a volume quirk, see mixer.c */
+@@ -1402,7 +1664,7 @@ static int snd_c400_create_effect_feedback_ctl(struct usb_mixer_interface *mixer
+ 	const unsigned int cmask = 0;
+ 
+ 	return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type,
+-					name, NULL);
++				       name, NULL);
+ }
+ 
+ static int snd_c400_create_effect_vol_ctls(struct usb_mixer_interface *mixer)
+@@ -1431,18 +1693,18 @@ static int snd_c400_create_effect_vol_ctls(struct usb_mixer_interface *mixer)
+ 	for (chan = 0; chan < num_outs + num_ins; chan++) {
+ 		if (chan < num_outs) {
+ 			snprintf(name, sizeof(name),
+-				"Effect Send DOut%d",
+-				chan + 1);
++				 "Effect Send DOut%d",
++				 chan + 1);
+ 		} else {
+ 			snprintf(name, sizeof(name),
+-				"Effect Send AIn%d",
+-				chan - num_outs + 1);
++				 "Effect Send AIn%d",
++				 chan - num_outs + 1);
+ 		}
+ 
+ 		cmask = (chan == 0) ? 0 : BIT(chan - 1);
+ 		err = snd_create_std_mono_ctl(mixer, id, control,
+-						cmask, val_type, name,
+-						&snd_usb_mixer_vol_tlv);
++					      cmask, val_type, name,
++					      &snd_usb_mixer_vol_tlv);
+ 		if (err < 0)
+ 			return err;
+ 	}
+@@ -1477,14 +1739,14 @@ static int snd_c400_create_effect_ret_vol_ctls(struct usb_mixer_interface *mixer
+ 
+ 	for (chan = 0; chan < num_outs; chan++) {
+ 		snprintf(name, sizeof(name),
+-			"Effect Return %d",
+-			chan + 1);
++			 "Effect Return %d",
++			 chan + 1);
+ 
+ 		cmask = (chan == 0) ? 0 :
+ 			BIT(chan + (chan % 2) * num_outs - 1);
+ 		err = snd_create_std_mono_ctl_offset(mixer, id, control,
+-						cmask, val_type, offset, name,
+-						&snd_usb_mixer_vol_tlv);
++						     cmask, val_type, offset, name,
++						     &snd_usb_mixer_vol_tlv);
+ 		if (err < 0)
+ 			return err;
+ 	}
+@@ -1625,7 +1887,7 @@ static const struct std_mono_table ebox44_table[] = {
+  *
+  */
+ static int snd_microii_spdif_info(struct snd_kcontrol *kcontrol,
+-	struct snd_ctl_elem_info *uinfo)
++				  struct snd_ctl_elem_info *uinfo)
+ {
+ 	uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ 	uinfo->count = 1;
+@@ -1633,7 +1895,7 @@ static int snd_microii_spdif_info(struct snd_kcontrol *kcontrol,
+ }
+ 
+ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+-	struct snd_ctl_elem_value *ucontrol)
++					 struct snd_ctl_elem_value *ucontrol)
+ {
+ 	struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ 	struct snd_usb_audio *chip = list->mixer->chip;
+@@ -1666,13 +1928,13 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	err = snd_usb_ctl_msg(chip->dev,
+-			usb_rcvctrlpipe(chip->dev, 0),
+-			UAC_GET_CUR,
+-			USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
+-			UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+-			ep,
+-			data,
+-			sizeof(data));
++			      usb_rcvctrlpipe(chip->dev, 0),
++			      UAC_GET_CUR,
++			      USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
++			      UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
++			      ep,
++			      data,
++			      sizeof(data));
+ 	if (err < 0)
+ 		goto end;
+ 
+@@ -1699,26 +1961,26 @@ static int snd_microii_spdif_default_update(struct usb_mixer_elem_list *list)
+ 
+ 	reg = ((pval >> 4) & 0xf0) | (pval & 0x0f);
+ 	err = snd_usb_ctl_msg(chip->dev,
+-			usb_sndctrlpipe(chip->dev, 0),
+-			UAC_SET_CUR,
+-			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+-			reg,
+-			2,
+-			NULL,
+-			0);
++			      usb_sndctrlpipe(chip->dev, 0),
++			      UAC_SET_CUR,
++			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
++			      reg,
++			      2,
++			      NULL,
++			      0);
+ 	if (err < 0)
+ 		goto end;
+ 
+ 	reg = (pval & IEC958_AES0_NONAUDIO) ? 0xa0 : 0x20;
+ 	reg |= (pval >> 12) & 0x0f;
+ 	err = snd_usb_ctl_msg(chip->dev,
+-			usb_sndctrlpipe(chip->dev, 0),
+-			UAC_SET_CUR,
+-			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+-			reg,
+-			3,
+-			NULL,
+-			0);
++			      usb_sndctrlpipe(chip->dev, 0),
++			      UAC_SET_CUR,
++			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
++			      reg,
++			      3,
++			      NULL,
++			      0);
+ 	if (err < 0)
+ 		goto end;
+ 
+@@ -1728,13 +1990,14 @@ static int snd_microii_spdif_default_update(struct usb_mixer_elem_list *list)
+ }
+ 
+ static int snd_microii_spdif_default_put(struct snd_kcontrol *kcontrol,
+-	struct snd_ctl_elem_value *ucontrol)
++					 struct snd_ctl_elem_value *ucontrol)
+ {
+ 	struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ 	unsigned int pval, pval_old;
+ 	int err;
+ 
+-	pval = pval_old = kcontrol->private_value;
++	pval = kcontrol->private_value;
++	pval_old = pval;
+ 	pval &= 0xfffff0f0;
+ 	pval |= (ucontrol->value.iec958.status[1] & 0x0f) << 8;
+ 	pval |= (ucontrol->value.iec958.status[0] & 0x0f);
+@@ -1755,7 +2018,7 @@ static int snd_microii_spdif_default_put(struct snd_kcontrol *kcontrol,
+ }
+ 
+ static int snd_microii_spdif_mask_get(struct snd_kcontrol *kcontrol,
+-	struct snd_ctl_elem_value *ucontrol)
++				      struct snd_ctl_elem_value *ucontrol)
+ {
+ 	ucontrol->value.iec958.status[0] = 0x0f;
+ 	ucontrol->value.iec958.status[1] = 0xff;
+@@ -1766,7 +2029,7 @@ static int snd_microii_spdif_mask_get(struct snd_kcontrol *kcontrol,
+ }
+ 
+ static int snd_microii_spdif_switch_get(struct snd_kcontrol *kcontrol,
+-	struct snd_ctl_elem_value *ucontrol)
++					struct snd_ctl_elem_value *ucontrol)
+ {
+ 	ucontrol->value.integer.value[0] = !(kcontrol->private_value & 0x02);
+ 
+@@ -1784,20 +2047,20 @@ static int snd_microii_spdif_switch_update(struct usb_mixer_elem_list *list)
+ 		return err;
+ 
+ 	err = snd_usb_ctl_msg(chip->dev,
+-			usb_sndctrlpipe(chip->dev, 0),
+-			UAC_SET_CUR,
+-			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+-			reg,
+-			9,
+-			NULL,
+-			0);
++			      usb_sndctrlpipe(chip->dev, 0),
++			      UAC_SET_CUR,
++			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
++			      reg,
++			      9,
++			      NULL,
++			      0);
+ 
+ 	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+ static int snd_microii_spdif_switch_put(struct snd_kcontrol *kcontrol,
+-	struct snd_ctl_elem_value *ucontrol)
++					struct snd_ctl_elem_value *ucontrol)
+ {
+ 	struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ 	u8 reg;
+@@ -1882,9 +2145,9 @@ static int snd_soundblaster_e1_switch_update(struct usb_mixer_interface *mixer,
+ 	if (err < 0)
+ 		return err;
+ 	err = snd_usb_ctl_msg(chip->dev,
+-			usb_sndctrlpipe(chip->dev, 0), HID_REQ_SET_REPORT,
+-			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+-			0x0202, 3, buff, 2);
++			      usb_sndctrlpipe(chip->dev, 0), HID_REQ_SET_REPORT,
++			      USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
++			      0x0202, 3, buff, 2);
+ 	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+@@ -3234,7 +3497,7 @@ static int snd_rme_digiface_enum_put(struct snd_kcontrol *kcontrol,
+ }
+ 
+ static int snd_rme_digiface_current_sync_get(struct snd_kcontrol *kcontrol,
+-				     struct snd_ctl_elem_value *ucontrol)
++					     struct snd_ctl_elem_value *ucontrol)
+ {
+ 	int ret = snd_rme_digiface_enum_get(kcontrol, ucontrol);
+ 
+@@ -3806,7 +4069,7 @@ static const struct snd_djm_device snd_djm_devices[] = {
+ 
+ 
+ static int snd_djm_controls_info(struct snd_kcontrol *kctl,
+-				struct snd_ctl_elem_info *info)
++				 struct snd_ctl_elem_info *info)
+ {
+ 	unsigned long private_value = kctl->private_value;
+ 	u8 device_idx = (private_value & SND_DJM_DEVICE_MASK) >> SND_DJM_DEVICE_SHIFT;
+@@ -3825,8 +4088,8 @@ static int snd_djm_controls_info(struct snd_kcontrol *kctl,
+ 		info->value.enumerated.item = noptions - 1;
+ 
+ 	name = snd_djm_get_label(device_idx,
+-				ctl->options[info->value.enumerated.item],
+-				ctl->wIndex);
++				 ctl->options[info->value.enumerated.item],
++				 ctl->wIndex);
+ 	if (!name)
+ 		return -EINVAL;
+ 
+@@ -3838,25 +4101,25 @@ static int snd_djm_controls_info(struct snd_kcontrol *kctl,
+ }
+ 
+ static int snd_djm_controls_update(struct usb_mixer_interface *mixer,
+-				u8 device_idx, u8 group, u16 value)
++				   u8 device_idx, u8 group, u16 value)
+ {
+ 	int err;
+ 	const struct snd_djm_device *device = &snd_djm_devices[device_idx];
+ 
+-	if ((group >= device->ncontrols) || value >= device->controls[group].noptions)
++	if (group >= device->ncontrols || value >= device->controls[group].noptions)
+ 		return -EINVAL;
+ 
+ 	err = snd_usb_lock_shutdown(mixer->chip);
+ 	if (err)
+ 		return err;
+ 
+-	err = snd_usb_ctl_msg(
+-		mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0),
+-		USB_REQ_SET_FEATURE,
+-		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+-		device->controls[group].options[value],
+-		device->controls[group].wIndex,
+-		NULL, 0);
++	err = snd_usb_ctl_msg(mixer->chip->dev,
++			      usb_sndctrlpipe(mixer->chip->dev, 0),
++			      USB_REQ_SET_FEATURE,
++			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
++			      device->controls[group].options[value],
++			      device->controls[group].wIndex,
++			      NULL, 0);
+ 
+ 	snd_usb_unlock_shutdown(mixer->chip);
+ 	return err;
+@@ -3897,7 +4160,7 @@ static int snd_djm_controls_resume(struct usb_mixer_elem_list *list)
+ }
+ 
+ static int snd_djm_controls_create(struct usb_mixer_interface *mixer,
+-		const u8 device_idx)
++				   const u8 device_idx)
+ {
+ 	int err, i;
+ 	u16 value;
+@@ -3916,10 +4179,10 @@ static int snd_djm_controls_create(struct usb_mixer_interface *mixer,
+ 	for (i = 0; i < device->ncontrols; i++) {
+ 		value = device->controls[i].default_value;
+ 		knew.name = device->controls[i].name;
+-		knew.private_value = (
++		knew.private_value =
+ 			((unsigned long)device_idx << SND_DJM_DEVICE_SHIFT) |
+ 			(i << SND_DJM_GROUP_SHIFT) |
+-			value);
++			value;
+ 		err = snd_djm_controls_update(mixer, device_idx, i, value);
+ 		if (err)
+ 			return err;
+@@ -3961,6 +4224,13 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ 		err = snd_emu0204_controls_create(mixer);
+ 		break;
+ 
++#if IS_REACHABLE(CONFIG_INPUT)
++	case USB_ID(0x054c, 0x0ce6): /* Sony DualSense controller (PS5) */
++	case USB_ID(0x054c, 0x0df2): /* Sony DualSense Edge controller (PS5) */
++		err = snd_dualsense_controls_create(mixer);
++		break;
++#endif /* IS_REACHABLE(CONFIG_INPUT) */
++
+ 	case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+ 	case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C400 */
+ 		err = snd_c400_create_mixer(mixer);
+@@ -3986,13 +4256,15 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ 		break;
+ 
+ 	case USB_ID(0x17cc, 0x1011): /* Traktor Audio 6 */
+-		err = snd_nativeinstruments_create_mixer(mixer,
++		err = snd_nativeinstruments_create_mixer(/* checkpatch hack */
++				mixer,
+ 				snd_nativeinstruments_ta6_mixers,
+ 				ARRAY_SIZE(snd_nativeinstruments_ta6_mixers));
+ 		break;
+ 
+ 	case USB_ID(0x17cc, 0x1021): /* Traktor Audio 10 */
+-		err = snd_nativeinstruments_create_mixer(mixer,
++		err = snd_nativeinstruments_create_mixer(/* checkpatch hack */
++				mixer,
+ 				snd_nativeinstruments_ta10_mixers,
+ 				ARRAY_SIZE(snd_nativeinstruments_ta10_mixers));
+ 		break;
+@@ -4127,7 +4399,8 @@ static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
+ 					 struct snd_kcontrol *kctl)
+ {
+ 	/* Approximation using 10 ranges based on output measurement on hw v1.2.
+-	 * This seems close to the cubic mapping e.g. alsamixer uses. */
++	 * This seems close to the cubic mapping e.g. alsamixer uses.
++	 */
+ 	static const DECLARE_TLV_DB_RANGE(scale,
+ 		 0,  1, TLV_DB_MINMAX_ITEM(-5300, -4970),
+ 		 2,  5, TLV_DB_MINMAX_ITEM(-4710, -4160),
+@@ -4211,16 +4484,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
+ 		if (unitid == 7 && cval->control == UAC_FU_VOLUME)
+ 			snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
+ 		break;
++	}
++
+ 	/* lowest playback value is muted on some devices */
+-	case USB_ID(0x0572, 0x1b09): /* Conexant Systems (Rockwell), Inc. */
+-	case USB_ID(0x0d8c, 0x000c): /* C-Media */
+-	case USB_ID(0x0d8c, 0x0014): /* C-Media */
+-	case USB_ID(0x19f7, 0x0003): /* RODE NT-USB */
+-	case USB_ID(0x2d99, 0x0026): /* HECATE G2 GAMING HEADSET */
++	if (mixer->chip->quirk_flags & QUIRK_FLAG_MIXER_MIN_MUTE)
+ 		if (strstr(kctl->id.name, "Playback"))
+ 			cval->min_mute = 1;
+-		break;
+-	}
+ 
+ 	/* ALSA-ify some Plantronics headset control names */
+ 	if (USB_ID_VENDOR(mixer->chip->usb_id) == 0x047f &&
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 0da4ee9757c018..8a20508e055a39 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2196,6 +2196,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_SET_IFACE_FIRST),
+ 	DEVICE_FLG(0x0556, 0x0014, /* Phoenix Audio TMX320VC */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x0572, 0x1b08, /* Conexant Systems (Rockwell), Inc. */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
++	DEVICE_FLG(0x0572, 0x1b09, /* Conexant Systems (Rockwell), Inc. */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
+ 	DEVICE_FLG(0x05a3, 0x9420, /* ELP HD USB Camera */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x05a7, 0x1020, /* Bose Companion 5 */
+@@ -2238,12 +2242,16 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
++	DEVICE_FLG(0x0bda, 0x498a, /* Realtek Semiconductor Corp. */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
+ 	DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x0c45, 0x636b, /* Microdia JP001 USB Camera */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+-	DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */
+-		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
++	DEVICE_FLG(0x0d8c, 0x000c, /* C-Media */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
++	DEVICE_FLG(0x0d8c, 0x0014, /* C-Media */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIXER_MIN_MUTE),
+ 	DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+ 		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+@@ -2252,6 +2260,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ 	DEVICE_FLG(0x1101, 0x0003, /* Audioengine D1 */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x12d1, 0x3a07, /* Huawei Technologies Co., Ltd. */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
+ 	DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16),
+ 	DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
+@@ -2290,6 +2300,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ 	DEVICE_FLG(0x1901, 0x0191, /* GE B850V3 CP2114 audio interface */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x19f7, 0x0003, /* RODE NT-USB */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
+ 	DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x1bcf, 0x2281, /* HD Webcam */
+@@ -2340,6 +2352,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x2a70, 0x1881, /* OnePlus Technology (Shenzhen) Co., Ltd. BE02T */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
+ 	DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ 	DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
+@@ -2350,10 +2364,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
++	DEVICE_FLG(0x2d99, 0x0026, /* HECATE G2 GAMING HEADSET */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
+ 	DEVICE_FLG(0x2fc6, 0xf0b7, /* iBasso DC07 Pro */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
++	DEVICE_FLG(0x339b, 0x3a07, /* Synaptics HONOR USB-C HEADSET */
++		   QUIRK_FLAG_MIXER_MIN_MUTE),
+ 	DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x534d, 0x0021, /* MacroSilicon MS2100/MS2106 */
+@@ -2405,6 +2423,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x2d87, /* Cayin device */
+ 		   QUIRK_FLAG_DSD_RAW),
++	VENDOR_FLG(0x2fc6, /* Comture-inc devices */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3336, /* HEM devices */
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3353, /* Khadas devices */
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 158ec053dc44dd..1ef4d39978df36 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -196,6 +196,9 @@ extern bool snd_usb_skip_validation;
+  *  for the given endpoint.
+  * QUIRK_FLAG_MIC_RES_16 and QUIRK_FLAG_MIC_RES_384
+  *  Set the fixed resolution for Mic Capture Volume (mostly for webcams)
++ * QUIRK_FLAG_MIXER_MIN_MUTE
++ *  Set minimum volume control value as mute for devices where the lowest
++ *  playback value represents muted state instead of minimum audible volume
+  */
+ 
+ #define QUIRK_FLAG_GET_SAMPLE_RATE	(1U << 0)
+@@ -222,5 +225,6 @@ extern bool snd_usb_skip_validation;
+ #define QUIRK_FLAG_FIXED_RATE		(1U << 21)
+ #define QUIRK_FLAG_MIC_RES_16		(1U << 22)
+ #define QUIRK_FLAG_MIC_RES_384		(1U << 23)
++#define QUIRK_FLAG_MIXER_MIN_MUTE	(1U << 24)
+ 
+ #endif /* __USBAUDIO_H */
+diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
+index 77c83d9508d3b5..6845da99028180 100755
+--- a/tools/testing/selftests/net/fib_nexthops.sh
++++ b/tools/testing/selftests/net/fib_nexthops.sh
+@@ -465,8 +465,8 @@ ipv6_fdb_grp_fcnal()
+ 	log_test $? 0 "Get Fdb nexthop group by id"
+ 
+ 	# fdb nexthop group can only contain fdb nexthops
+-	run_cmd "$IP nexthop add id 63 via 2001:db8:91::4"
+-	run_cmd "$IP nexthop add id 64 via 2001:db8:91::5"
++	run_cmd "$IP nexthop add id 63 via 2001:db8:91::4 dev veth1"
++	run_cmd "$IP nexthop add id 64 via 2001:db8:91::5 dev veth1"
+ 	run_cmd "$IP nexthop add id 103 group 63/64 fdb"
+ 	log_test $? 2 "Fdb Nexthop group with non-fdb nexthops"
+ 
+@@ -545,15 +545,15 @@ ipv4_fdb_grp_fcnal()
+ 	log_test $? 0 "Get Fdb nexthop group by id"
+ 
+ 	# fdb nexthop group can only contain fdb nexthops
+-	run_cmd "$IP nexthop add id 14 via 172.16.1.2"
+-	run_cmd "$IP nexthop add id 15 via 172.16.1.3"
++	run_cmd "$IP nexthop add id 14 via 172.16.1.2 dev veth1"
++	run_cmd "$IP nexthop add id 15 via 172.16.1.3 dev veth1"
+ 	run_cmd "$IP nexthop add id 103 group 14/15 fdb"
+ 	log_test $? 2 "Fdb Nexthop group with non-fdb nexthops"
+ 
+ 	# Non fdb nexthop group can not contain fdb nexthops
+ 	run_cmd "$IP nexthop add id 16 via 172.16.1.2 fdb"
+ 	run_cmd "$IP nexthop add id 17 via 172.16.1.3 fdb"
+-	run_cmd "$IP nexthop add id 104 group 14/15"
++	run_cmd "$IP nexthop add id 104 group 16/17"
+ 	log_test $? 2 "Non-Fdb Nexthop group with fdb nexthops"
+ 
+ 	# fdb nexthop cannot have blackhole
+@@ -580,7 +580,7 @@ ipv4_fdb_grp_fcnal()
+ 	run_cmd "$BRIDGE fdb add 02:02:00:00:00:14 dev vx10 nhid 12 self"
+ 	log_test $? 255 "Fdb mac add with nexthop"
+ 
+-	run_cmd "$IP ro add 172.16.0.0/22 nhid 15"
++	run_cmd "$IP ro add 172.16.0.0/22 nhid 16"
+ 	log_test $? 2 "Route add with fdb nexthop"
+ 
+ 	run_cmd "$IP ro add 172.16.0.0/22 nhid 103"


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-25 12:02 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-25 12:02 UTC (permalink / raw
  To: gentoo-commits

commit:     58abc47e9289b91e71cb97f5a3d77336d0fc9948
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 25 12:02:26 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Sep 25 12:02:26 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=58abc47e

Linux patch 6.12.49

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1048_linux-6.12.49.patch | 4133 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4137 insertions(+)

diff --git a/0000_README b/0000_README
index 737863c6..b6db0425 100644
--- a/0000_README
+++ b/0000_README
@@ -235,6 +235,10 @@ Patch:  1047_linux-6.12.48.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.48
 
+Patch:  1048_linux-6.12.49.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.49
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1048_linux-6.12.49.patch b/1048_linux-6.12.49.patch
new file mode 100644
index 00000000..4cb7b1bc
--- /dev/null
+++ b/1048_linux-6.12.49.patch
@@ -0,0 +1,4133 @@
+diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
+index 2ad1c05b8c8839..66af95251a3d1b 100644
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -104,7 +104,20 @@ The possible values in this file are:
+ 
+    (spec_rstack_overflow=ibpb-vmexit)
+ 
++ * 'Mitigation: Reduced Speculation':
+ 
++   This mitigation gets automatically enabled when the above one "IBPB on
++   VMEXIT" has been selected and the CPU supports the BpSpecReduce bit.
++
++   It gets automatically enabled on machines which have the
++   SRSO_USER_KERNEL_NO=1 CPUID bit. In that case, the code logic is to switch
++   to the above =ibpb-vmexit mitigation because the user/kernel boundary is
++   not affected anymore and thus "safe RET" is not needed.
++
++   After enabling the IBPB on VMEXIT mitigation option, the BpSpecReduce bit
++   is detected (functionality present on all such machines) and that
++   practically overrides IBPB on VMEXIT as it has a lot less performance
++   impact and takes care of the guest->host attack vector too.
+ 
+ In order to exploit vulnerability, an attacker needs to:
+ 
+diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
+index 7e295bad8b2923..a670a9bbe01bb1 100644
+--- a/Documentation/netlink/specs/mptcp_pm.yaml
++++ b/Documentation/netlink/specs/mptcp_pm.yaml
+@@ -28,13 +28,13 @@ definitions:
+         traffic-patterns it can take a long time until the
+         MPTCP_EVENT_ESTABLISHED is sent.
+         Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+-        dport, server-side.
++        dport, server-side, [flags].
+      -
+       name: established
+       doc: >-
+         A MPTCP connection is established (can start new subflows).
+         Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+-        dport, server-side.
++        dport, server-side, [flags].
+      -
+       name: closed
+       doc: >-
+diff --git a/Makefile b/Makefile
+index ede8c04ea112bc..66ae67c52da819 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 48
++SUBLEVEL = 49
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index a7a1f15bcc6724..5f35a8bd8996ec 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -540,10 +540,14 @@ config ARCH_STRICT_ALIGN
+ 	  -mstrict-align build parameter to prevent unaligned accesses.
+ 
+ 	  CPUs with h/w unaligned access support:
+-	  Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
++	  Loongson-2K2000/2K3000 and all of Loongson-3 series processors
++	  based on LoongArch.
+ 
+ 	  CPUs without h/w unaligned access support:
+-	  Loongson-2K500/2K1000.
++	  Loongson-2K0300/2K0500/2K1000.
++
++	  If you want to make sure whether to support unaligned memory access
++	  on your hardware, please read the bit 20 (UAL) of CPUCFG1 register.
+ 
+ 	  This option is enabled by default to make the kernel be able to run
+ 	  on all LoongArch systems. But you can disable it manually if you want
+diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h
+index 52f298f7293bab..483c955f2ae50d 100644
+--- a/arch/loongarch/include/asm/acenv.h
++++ b/arch/loongarch/include/asm/acenv.h
+@@ -10,9 +10,8 @@
+ #ifndef _ASM_LOONGARCH_ACENV_H
+ #define _ASM_LOONGARCH_ACENV_H
+ 
+-/*
+- * This header is required by ACPI core, but we have nothing to fill in
+- * right now. Will be updated later when needed.
+- */
++#ifdef CONFIG_ARCH_STRICT_ALIGN
++#define ACPI_MISALIGNMENT_NOT_SUPPORTED
++#endif /* CONFIG_ARCH_STRICT_ALIGN */
+ 
+ #endif /* _ASM_LOONGARCH_ACENV_H */
+diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
+index c0a5dc9aeae287..be309a71f20491 100644
+--- a/arch/loongarch/kernel/env.c
++++ b/arch/loongarch/kernel/env.c
+@@ -109,6 +109,8 @@ static int __init boardinfo_init(void)
+ 	struct kobject *loongson_kobj;
+ 
+ 	loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
++	if (!loongson_kobj)
++		return -ENOMEM;
+ 
+ 	return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
+ }
+diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
+index 9a038d1070d73b..387dc4d3c4868f 100644
+--- a/arch/loongarch/kernel/stacktrace.c
++++ b/arch/loongarch/kernel/stacktrace.c
+@@ -51,12 +51,13 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ 	if (task == current) {
+ 		regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+ 		regs->csr_era = (unsigned long)__builtin_return_address(0);
++		regs->regs[22] = 0;
+ 	} else {
+ 		regs->regs[3] = thread_saved_fp(task);
+ 		regs->csr_era = thread_saved_ra(task);
++		regs->regs[22] = task->thread.reg22;
+ 	}
+ 	regs->regs[1] = 0;
+-	regs->regs[22] = 0;
+ 
+ 	for (unwind_start(&state, task, regs);
+ 	     !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
+diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
+index 2c0d852ca5366b..0e8793617a2e95 100644
+--- a/arch/loongarch/kernel/vdso.c
++++ b/arch/loongarch/kernel/vdso.c
+@@ -108,6 +108,9 @@ static int __init init_vdso(void)
+ 	vdso_info.code_mapping.pages =
+ 		kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
+ 
++	if (!vdso_info.code_mapping.pages)
++		return -ENOMEM;
++
+ 	pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
+ 	for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
+ 		vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index 2b6e701776b6b8..e2cba5117fd25b 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -1231,10 +1231,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
+ 	device_set_wakeup_capable(&vu_dev->vdev.dev, true);
+ 
+ 	rc = register_virtio_device(&vu_dev->vdev);
+-	if (rc)
++	if (rc) {
+ 		put_device(&vu_dev->vdev.dev);
++		return rc;
++	}
+ 	vu_dev->registered = 1;
+-	return rc;
++	return 0;
+ 
+ error_init:
+ 	os_close_file(vu_dev->sock);
+diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
+index f1d03cf3957fe8..62c176a2c1ac48 100644
+--- a/arch/um/os-Linux/file.c
++++ b/arch/um/os-Linux/file.c
+@@ -556,7 +556,7 @@ ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
+ 	    cmsg->cmsg_type != SCM_RIGHTS)
+ 		return n;
+ 
+-	memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len);
++	memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0));
+ 	return n;
+ }
+ 
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 5e43d390f7a3dd..36d8404f406dec 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2793,7 +2793,7 @@ static void intel_pmu_read_event(struct perf_event *event)
+ 		if (pmu_enabled)
+ 			intel_pmu_disable_all();
+ 
+-		if (is_topdown_event(event))
++		if (is_topdown_count(event))
+ 			static_call(intel_pmu_update_topdown_event)(event);
+ 		else
+ 			intel_pmu_drain_pebs_buffer();
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 90f1f2f9d31400..16a8c1f3ff6558 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -464,6 +464,11 @@
+ #define X86_FEATURE_SBPB		(20*32+27) /* Selective Branch Prediction Barrier */
+ #define X86_FEATURE_IBPB_BRTYPE		(20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO		(20*32+29) /* CPU is not affected by SRSO */
++#define X86_FEATURE_SRSO_USER_KERNEL_NO	(20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
++#define X86_FEATURE_SRSO_BP_SPEC_REDUCE	(20*32+31) /*
++						    * BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs.
++						    * (SRSO_MSR_FIX in the official doc).
++						    */
+ 
+ /*
+  * Extended auxiliary flags: Linux defined - for features scattered in various
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 2b6e3127ef4e2d..21d07aa9400c7a 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -728,6 +728,7 @@
+ 
+ /* Zen4 */
+ #define MSR_ZEN4_BP_CFG                 0xc001102e
++#define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4
+ #define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
+ 
+ /* Fam 19h MSRs */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 06bbc297c26c03..f3cb559a598df5 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2718,6 +2718,7 @@ enum srso_mitigation {
+ 	SRSO_MITIGATION_SAFE_RET,
+ 	SRSO_MITIGATION_IBPB,
+ 	SRSO_MITIGATION_IBPB_ON_VMEXIT,
++	SRSO_MITIGATION_BP_SPEC_REDUCE,
+ };
+ 
+ enum srso_mitigation_cmd {
+@@ -2735,7 +2736,8 @@ static const char * const srso_strings[] = {
+ 	[SRSO_MITIGATION_MICROCODE]		= "Vulnerable: Microcode, no safe RET",
+ 	[SRSO_MITIGATION_SAFE_RET]		= "Mitigation: Safe RET",
+ 	[SRSO_MITIGATION_IBPB]			= "Mitigation: IBPB",
+-	[SRSO_MITIGATION_IBPB_ON_VMEXIT]	= "Mitigation: IBPB on VMEXIT only"
++	[SRSO_MITIGATION_IBPB_ON_VMEXIT]	= "Mitigation: IBPB on VMEXIT only",
++	[SRSO_MITIGATION_BP_SPEC_REDUCE]	= "Mitigation: Reduced Speculation"
+ };
+ 
+ static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
+@@ -2774,7 +2776,7 @@ static void __init srso_select_mitigation(void)
+ 	    srso_cmd == SRSO_CMD_OFF) {
+ 		if (boot_cpu_has(X86_FEATURE_SBPB))
+ 			x86_pred_cmd = PRED_CMD_SBPB;
+-		return;
++		goto out;
+ 	}
+ 
+ 	if (has_microcode) {
+@@ -2786,7 +2788,7 @@ static void __init srso_select_mitigation(void)
+ 		 */
+ 		if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
+ 			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
+-			return;
++			goto out;
+ 		}
+ 
+ 		if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+@@ -2810,6 +2812,9 @@ static void __init srso_select_mitigation(void)
+ 		break;
+ 
+ 	case SRSO_CMD_SAFE_RET:
++		if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
++			goto ibpb_on_vmexit;
++
+ 		if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
+ 			/*
+ 			 * Enable the return thunk for generated code
+@@ -2861,7 +2866,14 @@ static void __init srso_select_mitigation(void)
+ 		}
+ 		break;
+ 
++ibpb_on_vmexit:
+ 	case SRSO_CMD_IBPB_ON_VMEXIT:
++		if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
++			pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
++			srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
++			break;
++		}
++
+ 		if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
+ 			if (has_microcode) {
+ 				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+@@ -2883,7 +2895,15 @@ static void __init srso_select_mitigation(void)
+ 	}
+ 
+ out:
+-	pr_info("%s\n", srso_strings[srso_mitigation]);
++	/*
++	 * Clear the feature flag if this mitigation is not selected as that
++	 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
++	 */
++	if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
++		setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
++
++	if (srso_mitigation != SRSO_MITIGATION_NONE)
++		pr_info("%s\n", srso_strings[srso_mitigation]);
+ }
+ 
+ #undef pr_fmt
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 800f781475c028..9170a9e127b7a8 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1507,6 +1507,63 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
+ 	__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
+ }
+ 
++#ifdef CONFIG_CPU_MITIGATIONS
++static DEFINE_SPINLOCK(srso_lock);
++static atomic_t srso_nr_vms;
++
++static void svm_srso_clear_bp_spec_reduce(void *ign)
++{
++	struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
++
++	if (!sd->bp_spec_reduce_set)
++		return;
++
++	msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
++	sd->bp_spec_reduce_set = false;
++}
++
++static void svm_srso_vm_destroy(void)
++{
++	if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
++		return;
++
++	if (atomic_dec_return(&srso_nr_vms))
++		return;
++
++	guard(spinlock)(&srso_lock);
++
++	/*
++	 * Verify a new VM didn't come along, acquire the lock, and increment
++	 * the count before this task acquired the lock.
++	 */
++	if (atomic_read(&srso_nr_vms))
++		return;
++
++	on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
++}
++
++static void svm_srso_vm_init(void)
++{
++	if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
++		return;
++
++	/*
++	 * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
++	 * transition, i.e. destroying the last VM, is fully complete, e.g. so
++	 * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
++	 */
++	if (atomic_inc_not_zero(&srso_nr_vms))
++		return;
++
++	guard(spinlock)(&srso_lock);
++
++	atomic_inc(&srso_nr_vms);
++}
++#else
++static void svm_srso_vm_init(void) { }
++static void svm_srso_vm_destroy(void) { }
++#endif
++
+ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+@@ -1539,6 +1596,11 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+ 	    (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
+ 		kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
+ 
++	if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
++	    !sd->bp_spec_reduce_set) {
++		sd->bp_spec_reduce_set = true;
++		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
++	}
+ 	svm->guest_state_loaded = true;
+ }
+ 
+@@ -4044,8 +4106,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 	u64 cr8;
+ 
+-	if (nested_svm_virtualize_tpr(vcpu) ||
+-	    kvm_vcpu_apicv_active(vcpu))
++	if (nested_svm_virtualize_tpr(vcpu))
+ 		return;
+ 
+ 	cr8 = kvm_get_cr8(vcpu);
+@@ -5005,6 +5066,8 @@ static void svm_vm_destroy(struct kvm *kvm)
+ {
+ 	avic_vm_destroy(kvm);
+ 	sev_vm_destroy(kvm);
++
++	svm_srso_vm_destroy();
+ }
+ 
+ static int svm_vm_init(struct kvm *kvm)
+@@ -5030,6 +5093,7 @@ static int svm_vm_init(struct kvm *kvm)
+ 			return ret;
+ 	}
+ 
++	svm_srso_vm_init();
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index d114efac7af78d..1aa9b1e468cb44 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -335,6 +335,8 @@ struct svm_cpu_data {
+ 	u32 next_asid;
+ 	u32 min_asid;
+ 
++	bool bp_spec_reduce_set;
++
+ 	struct vmcb *save_area;
+ 	unsigned long save_area_pa;
+ 
+diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
+index 4bf4fad5b148ef..5a18ecc04a6c35 100644
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -103,6 +103,7 @@ int msr_set_bit(u32 msr, u8 bit)
+ {
+ 	return __flip_bit(msr, bit, true);
+ }
++EXPORT_SYMBOL_GPL(msr_set_bit);
+ 
+ /**
+  * msr_clear_bit - Clear @bit in a MSR @msr.
+@@ -118,6 +119,7 @@ int msr_clear_bit(u32 msr, u8 bit)
+ {
+ 	return __flip_bit(msr, bit, false);
+ }
++EXPORT_SYMBOL_GPL(msr_clear_bit);
+ 
+ #ifdef CONFIG_TRACEPOINTS
+ void do_trace_write_msr(unsigned int msr, u64 val, int failed)
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 0da7c1ac778a0e..ca6fdcc6c54aca 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -970,6 +970,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 	}
+ 
+ 	lock_sock(sk);
++	if (ctx->write) {
++		release_sock(sk);
++		return -EBUSY;
++	}
++	ctx->write = true;
++
+ 	if (ctx->init && !ctx->more) {
+ 		if (ctx->used) {
+ 			err = -EINVAL;
+@@ -1019,6 +1025,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 			continue;
+ 		}
+ 
++		ctx->merge = 0;
++
+ 		if (!af_alg_writable(sk)) {
+ 			err = af_alg_wait_for_wmem(sk, msg->msg_flags);
+ 			if (err)
+@@ -1058,7 +1066,6 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 			ctx->used += plen;
+ 			copied += plen;
+ 			size -= plen;
+-			ctx->merge = 0;
+ 		} else {
+ 			do {
+ 				struct page *pg;
+@@ -1104,6 +1111,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 
+ unlock:
+ 	af_alg_data_wakeup(sk);
++	ctx->write = false;
+ 	release_sock(sk);
+ 
+ 	return copied ?: err;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index b585c321d3454c..b02ff92bae0b1b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -8462,7 +8462,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ static void manage_dm_interrupts(struct amdgpu_device *adev,
+ 				 struct amdgpu_crtc *acrtc,
+ 				 struct dm_crtc_state *acrtc_state)
+-{
++{	/*
++	 * We cannot be sure that the frontend index maps to the same
++	 * backend index - some even map to more than one.
++	 * So we have to go through the CRTC to find the right IRQ.
++	 */
++	int irq_type = amdgpu_display_crtc_idx_to_irq_type(
++			adev,
++			acrtc->crtc_id);
++	struct drm_device *dev = adev_to_drm(adev);
++
+ 	struct drm_vblank_crtc_config config = {0};
+ 	struct dc_crtc_timing *timing;
+ 	int offdelay;
+@@ -8515,7 +8524,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
+ 
+ 		drm_crtc_vblank_on_config(&acrtc->base,
+ 					  &config);
++		/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
++		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
++		case IP_VERSION(3, 0, 0):
++		case IP_VERSION(3, 0, 2):
++		case IP_VERSION(3, 0, 3):
++		case IP_VERSION(3, 2, 0):
++			if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
++				drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
++#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
++			if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
++				drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
++#endif
++		}
++
+ 	} else {
++		/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
++		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
++		case IP_VERSION(3, 0, 0):
++		case IP_VERSION(3, 0, 2):
++		case IP_VERSION(3, 0, 3):
++		case IP_VERSION(3, 2, 0):
++#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
++			if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
++				drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
++#endif
++			if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
++				drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
++		}
++
+ 		drm_crtc_vblank_off(&acrtc->base);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index c036bbc92ba96e..7244c3abb7f992 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -2683,7 +2683,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+ 		ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
+ 						NULL, anx7625_intr_hpd_isr,
+ 						IRQF_TRIGGER_FALLING |
+-						IRQF_ONESHOT,
++						IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ 						"anx7625-intp", platform);
+ 		if (ret) {
+ 			DRM_DEV_ERROR(dev, "fail to request irq\n");
+@@ -2753,8 +2753,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+ 	}
+ 
+ 	/* Add work function */
+-	if (platform->pdata.intp_irq)
++	if (platform->pdata.intp_irq) {
++		enable_irq(platform->pdata.intp_irq);
+ 		queue_work(platform->workqueue, &platform->work);
++	}
+ 
+ 	if (platform->pdata.audio_en)
+ 		anx7625_register_audio(dev, platform);
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+index 9ba2a667a1f3a1..b18bdb2daddf8b 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+@@ -2054,8 +2054,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
+ 	mhdp_state = to_cdns_mhdp_bridge_state(new_state);
+ 
+ 	mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
+-	if (!mhdp_state->current_mode)
+-		return;
++	if (!mhdp_state->current_mode) {
++		ret = -EINVAL;
++		goto out;
++	}
+ 
+ 	drm_mode_set_name(mhdp_state->current_mode);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
+index b804234a655160..9e1236a9ec6734 100644
+--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
++++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
+@@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile)
+ 	kt->tile = tile;
+ 
+ 	err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
+-	if (err) {
+-		kobject_put(&kt->base);
+-		return err;
+-	}
++	if (err)
++		goto err_object;
+ 
+ 	tile->sysfs = &kt->base;
+ 
+ 	err = xe_vram_freq_sysfs_init(tile);
+ 	if (err)
+-		return err;
++		goto err_object;
+ 
+ 	return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile);
++
++err_object:
++	kobject_put(&kt->base);
++	return err;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index a4845d4213b006..fc5f0e1351932a 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -239,8 +239,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+ 
+ 	pfence = xe_preempt_fence_create(q, q->lr.context,
+ 					 ++q->lr.seqno);
+-	if (!pfence) {
+-		err = -ENOMEM;
++	if (IS_ERR(pfence)) {
++		err = PTR_ERR(pfence);
+ 		goto out_fini;
+ 	}
+ 
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 6fb2f2919ab1ff..a14ee649d3da3b 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -545,6 +545,7 @@ struct gcr3_tbl_info {
+ };
+ 
+ struct amd_io_pgtable {
++	seqcount_t		seqcount;	/* Protects root/mode update */
+ 	struct io_pgtable	pgtbl;
+ 	int			mode;
+ 	u64			*root;
+diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
+index f3399087859fd1..91cc1e0c663dbf 100644
+--- a/drivers/iommu/amd/io_pgtable.c
++++ b/drivers/iommu/amd/io_pgtable.c
+@@ -17,6 +17,7 @@
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/dma-mapping.h>
++#include <linux/seqlock.h>
+ 
+ #include <asm/barrier.h>
+ 
+@@ -144,8 +145,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
+ 
+ 	*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
+ 
++	write_seqcount_begin(&pgtable->seqcount);
+ 	pgtable->root  = pte;
+ 	pgtable->mode += 1;
++	write_seqcount_end(&pgtable->seqcount);
++
+ 	amd_iommu_update_and_flush_device_table(domain);
+ 
+ 	pte = NULL;
+@@ -167,6 +171,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
+ {
+ 	unsigned long last_addr = address + (page_size - 1);
+ 	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
++	unsigned int seqcount;
+ 	int level, end_lvl;
+ 	u64 *pte, *page;
+ 
+@@ -184,8 +189,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
+ 	}
+ 
+ 
+-	level   = pgtable->mode - 1;
+-	pte     = &pgtable->root[PM_LEVEL_INDEX(level, address)];
++	do {
++		seqcount = read_seqcount_begin(&pgtable->seqcount);
++
++		level   = pgtable->mode - 1;
++		pte     = &pgtable->root[PM_LEVEL_INDEX(level, address)];
++	} while (read_seqcount_retry(&pgtable->seqcount, seqcount));
++
++
+ 	address = PAGE_SIZE_ALIGN(address, page_size);
+ 	end_lvl = PAGE_SIZE_LEVEL(page_size);
+ 
+@@ -262,6 +273,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ 		      unsigned long *page_size)
+ {
+ 	int level;
++	unsigned int seqcount;
+ 	u64 *pte;
+ 
+ 	*page_size = 0;
+@@ -269,8 +281,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ 	if (address > PM_LEVEL_SIZE(pgtable->mode))
+ 		return NULL;
+ 
+-	level	   =  pgtable->mode - 1;
+-	pte	   = &pgtable->root[PM_LEVEL_INDEX(level, address)];
++	do {
++		seqcount = read_seqcount_begin(&pgtable->seqcount);
++		level	   =  pgtable->mode - 1;
++		pte	   = &pgtable->root[PM_LEVEL_INDEX(level, address)];
++	} while (read_seqcount_retry(&pgtable->seqcount, seqcount));
++
+ 	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
+ 
+ 	while (level > 0) {
+@@ -552,6 +568,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
+ 	if (!pgtable->root)
+ 		return NULL;
+ 	pgtable->mode = PAGE_MODE_3_LEVEL;
++	seqcount_init(&pgtable->seqcount);
+ 
+ 	cfg->pgsize_bitmap  = amd_iommu_pgsize_bitmap;
+ 	cfg->ias            = IOMMU_IN_ADDR_BIT_SIZE;
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index b300f72cf01e68..32d6710e264a4e 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1768,6 +1768,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
+ 	unsigned long lvl_pages = lvl_to_nr_pages(level);
+ 	struct dma_pte *pte = NULL;
+ 
++	if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
++		    !IS_ALIGNED(end_pfn + 1, lvl_pages)))
++		return;
++
+ 	while (start_pfn <= end_pfn) {
+ 		if (!pte)
+ 			pte = pfn_to_dma_pte(domain, start_pfn, &level,
+@@ -1844,7 +1848,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 				unsigned long pages_to_remove;
+ 
+ 				pteval |= DMA_PTE_LARGE_PAGE;
+-				pages_to_remove = min_t(unsigned long, nr_pages,
++				pages_to_remove = min_t(unsigned long,
++							round_down(nr_pages, lvl_pages),
+ 							nr_pte_to_next_page(pte) * lvl_pages);
+ 				end_pfn = iov_pfn + pages_to_remove - 1;
+ 				switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 163a5bbd485f97..c69696d2540ab8 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3811,8 +3811,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
+ 	struct raid_set *rs = ti->private;
+ 	unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
+ 
+-	limits->io_min = chunk_size_bytes;
+-	limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
++	if (chunk_size_bytes) {
++		limits->io_min = chunk_size_bytes;
++		limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
++	}
+ }
+ 
+ static void raid_presuspend(struct dm_target *ti)
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index c68dc1653cfd1b..cbf4c102c4d0c0 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -457,11 +457,15 @@ static void stripe_io_hints(struct dm_target *ti,
+ 			    struct queue_limits *limits)
+ {
+ 	struct stripe_c *sc = ti->private;
+-	unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
++	unsigned int io_min, io_opt;
+ 
+ 	limits->chunk_sectors = sc->chunk_size;
+-	limits->io_min = chunk_size;
+-	limits->io_opt = chunk_size * sc->stripes;
++
++	if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) &&
++	    !check_mul_overflow(io_min, sc->stripes, &io_opt)) {
++		limits->io_min = io_min;
++		limits->io_opt = io_opt;
++	}
+ }
+ 
+ static struct target_type stripe_target = {
+diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
+index 12df4ff9eeee53..878fda43b7f73b 100644
+--- a/drivers/mmc/host/mvsdio.c
++++ b/drivers/mmc/host/mvsdio.c
+@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
+ 		host->pio_ptr = NULL;
+ 		host->pio_size = 0;
+ 	} else {
+-		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
++		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ 			     mmc_get_dma_dir(data));
+ 	}
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 52ff0f9e04e079..00204e42de2e77 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2117,6 +2117,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+ 	} else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ 		   BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
++		   bond_has_slaves(bond) &&
+ 		   memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ 		/* Set slave to random address to avoid duplicate mac
+ 		 * address in later fail over.
+@@ -3337,7 +3338,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+ 		/* Find out through which dev should the packet go */
+ 		memset(&fl6, 0, sizeof(struct flowi6));
+ 		fl6.daddr = targets[i];
+-		fl6.flowi6_oif = bond->dev->ifindex;
+ 
+ 		dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ 		if (dst->error) {
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index a9040c42d2ff97..6e97a5a7daaf9c 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+ 
+ 	cnic_bnx2x_delete_wait(dev, 0);
+ 
+-	cancel_delayed_work(&cp->delete_task);
+-	flush_workqueue(cnic_wq);
++	cancel_delayed_work_sync(&cp->delete_task);
+ 
+ 	if (atomic_read(&cp->iscsi_conn) != 0)
+ 		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index de8a6ce86ad7e2..12105ffb5dac6d 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ 	oct->io_qmask.iq |= BIT_ULL(iq_no);
+ 
+ 	/* Set the 32B/64B mode for each input queue */
+-	oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++	oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ 	iq->iqcmd_64B = (conf->instr_type == 64);
+ 
+ 	oct->fn_list.setup_iq_regs(oct, iq_no);
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index cbd3859ea475bf..980daecab8ea3d 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -2735,7 +2735,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+ 		dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ 		goto err_get_attr;
+ 	}
+-	ethsw->bpid = dpbp_attrs.id;
++	ethsw->bpid = dpbp_attrs.bpid;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index c006f716a3bdbe..ca7517a68a2c32 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -947,9 +947,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ 		if (!eop_desc)
+ 			break;
+ 
+-		/* prevent any other reads prior to eop_desc */
+-		smp_rmb();
+-
+ 		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ 		/* we have caught up to head, no work left to do */
+ 		if (tx_head == tx_desc)
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 2960709f6b62ca..0e699a0432c5b2 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -372,9 +372,6 @@ struct ice_vsi {
+ 	spinlock_t arfs_lock;	/* protects aRFS hash table and filter state */
+ 	atomic_t *arfs_last_fltr_id;
+ 
+-	u16 max_frame;
+-	u16 rx_buf_len;
+-
+ 	struct ice_aqc_vsi_props info;	 /* VSI properties */
+ 	struct ice_vsi_vlan_info vlan_info;	/* vlan config to be restored */
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 4a9a6899fc453c..98c3764fed396e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -445,7 +445,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
+ 	/* Max packet size for this queue - must not be set to a larger value
+ 	 * than 5 x DBUF
+ 	 */
+-	rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
++	rlan_ctx.rxmax = min_t(u32, ring->max_frame,
+ 			       ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
+ 
+ 	/* Rx queue threshold in units of 64 */
+@@ -541,8 +541,6 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ 	u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
+ 	int err;
+ 
+-	ring->rx_buf_len = ring->vsi->rx_buf_len;
+-
+ 	if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
+ 		if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ 			err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+@@ -641,21 +639,25 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+ /**
+  * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+  * @vsi: VSI
++ * @ring: Rx ring to configure
++ *
++ * Determine the maximum frame size and Rx buffer length to use for a PF VSI.
++ * Set these in the associated Rx ring structure.
+  */
+-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
++static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
+ {
+ 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+-		vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+-		vsi->rx_buf_len = ICE_RXBUF_1664;
++		ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
++		ring->rx_buf_len = ICE_RXBUF_1664;
+ #if (PAGE_SIZE < 8192)
+ 	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+-		vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+-		vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
++		ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
++		ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ #endif
+ 	} else {
+-		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+-		vsi->rx_buf_len = ICE_RXBUF_3072;
++		ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
++		ring->rx_buf_len = ICE_RXBUF_3072;
+ 	}
+ }
+ 
+@@ -670,15 +672,15 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+ {
+ 	u16 i;
+ 
+-	if (vsi->type == ICE_VSI_VF)
+-		goto setup_rings;
+-
+-	ice_vsi_cfg_frame_size(vsi);
+-setup_rings:
+ 	/* set up individual rings */
+ 	ice_for_each_rxq(vsi, i) {
+-		int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
++		struct ice_rx_ring *ring = vsi->rx_rings[i];
++		int err;
++
++		if (vsi->type != ICE_VSI_VF)
++			ice_vsi_cfg_frame_size(vsi, ring);
+ 
++		err = ice_vsi_cfg_rxq(ring);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index cde69f56866562..431a6ed498a4ed 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -865,10 +865,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ 	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ 				   rx_buf->page_offset, size);
+ 	sinfo->xdp_frags_size += size;
+-	/* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+-	 * can pop off frags but driver has to handle it on its own
+-	 */
+-	rx_ring->nr_frags = sinfo->nr_frags;
+ 
+ 	if (page_is_pfmemalloc(rx_buf->page))
+ 		xdp_buff_set_frag_pfmemalloc(xdp);
+@@ -939,20 +935,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ /**
+  * ice_get_pgcnts - grab page_count() for gathered fragments
+  * @rx_ring: Rx descriptor ring to store the page counts on
++ * @ntc: the next to clean element (not included in this frame!)
+  *
+  * This function is intended to be called right before running XDP
+  * program so that the page recycling mechanism will be able to take
+  * a correct decision regarding underlying pages; this is done in such
+  * way as XDP program can change the refcount of page
+  */
+-static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
++static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc)
+ {
+-	u32 nr_frags = rx_ring->nr_frags + 1;
+ 	u32 idx = rx_ring->first_desc;
+ 	struct ice_rx_buf *rx_buf;
+ 	u32 cnt = rx_ring->count;
+ 
+-	for (int i = 0; i < nr_frags; i++) {
++	while (idx != ntc) {
+ 		rx_buf = &rx_ring->rx_buf[idx];
+ 		rx_buf->pgcnt = page_count(rx_buf->page);
+ 
+@@ -1125,62 +1121,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ }
+ 
+ /**
+- * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
++ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame
+  * @rx_ring: Rx ring with all the auxiliary data
+  * @xdp: XDP buffer carrying linear + frags part
+- * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
+- * @ntc: a current next_to_clean value to be stored at rx_ring
++ * @ntc: the next to clean element (not included in this frame!)
+  * @verdict: return code from XDP program execution
+  *
+- * Walk through gathered fragments and satisfy internal page
+- * recycle mechanism; we take here an action related to verdict
+- * returned by XDP program;
++ * Called after XDP program is completed, or on error with verdict set to
++ * ICE_XDP_CONSUMED.
++ *
++ * Walk through buffers from first_desc to the end of the frame, releasing
++ * buffers and satisfying internal page recycle mechanism. The action depends
++ * on verdict from XDP program.
+  */
+ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+-			    u32 *xdp_xmit, u32 ntc, u32 verdict)
++			    u32 ntc, u32 verdict)
+ {
+-	u32 nr_frags = rx_ring->nr_frags + 1;
+ 	u32 idx = rx_ring->first_desc;
+ 	u32 cnt = rx_ring->count;
+-	u32 post_xdp_frags = 1;
+ 	struct ice_rx_buf *buf;
+-	int i;
++	u32 xdp_frags = 0;
++	int i = 0;
+ 
+ 	if (unlikely(xdp_buff_has_frags(xdp)))
+-		post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
++		xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ 
+-	for (i = 0; i < post_xdp_frags; i++) {
++	while (idx != ntc) {
+ 		buf = &rx_ring->rx_buf[idx];
++		if (++idx == cnt)
++			idx = 0;
+ 
+-		if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
++		/* An XDP program could release fragments from the end of the
++		 * buffer. For these, we need to keep the pagecnt_bias as-is.
++		 * To do this, only adjust pagecnt_bias for fragments up to
++		 * the total remaining after the XDP program has run.
++		 */
++		if (verdict != ICE_XDP_CONSUMED)
+ 			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+-			*xdp_xmit |= verdict;
+-		} else if (verdict & ICE_XDP_CONSUMED) {
++		else if (i++ <= xdp_frags)
+ 			buf->pagecnt_bias++;
+-		} else if (verdict == ICE_XDP_PASS) {
+-			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+-		}
+ 
+ 		ice_put_rx_buf(rx_ring, buf);
+-
+-		if (++idx == cnt)
+-			idx = 0;
+-	}
+-	/* handle buffers that represented frags released by XDP prog;
+-	 * for these we keep pagecnt_bias as-is; refcount from struct page
+-	 * has been decremented within XDP prog and we do not have to increase
+-	 * the biased refcnt
+-	 */
+-	for (; i < nr_frags; i++) {
+-		buf = &rx_ring->rx_buf[idx];
+-		ice_put_rx_buf(rx_ring, buf);
+-		if (++idx == cnt)
+-			idx = 0;
+ 	}
+ 
+ 	xdp->data = NULL;
+ 	rx_ring->first_desc = ntc;
+-	rx_ring->nr_frags = 0;
+ }
+ 
+ /**
+@@ -1260,6 +1245,10 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		/* retrieve a buffer from the ring */
+ 		rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
+ 
++		/* Increment ntc before calls to ice_put_rx_mbuf() */
++		if (++ntc == cnt)
++			ntc = 0;
++
+ 		if (!xdp->data) {
+ 			void *hard_start;
+ 
+@@ -1268,24 +1257,23 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 			xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
+ 			xdp_buff_clear_frags_flag(xdp);
+ 		} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+-			ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
++			ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED);
+ 			break;
+ 		}
+-		if (++ntc == cnt)
+-			ntc = 0;
+ 
+ 		/* skip if it is NOP desc */
+ 		if (ice_is_non_eop(rx_ring, rx_desc))
+ 			continue;
+ 
+-		ice_get_pgcnts(rx_ring);
++		ice_get_pgcnts(rx_ring, ntc);
+ 		xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ 		if (xdp_verdict == ICE_XDP_PASS)
+ 			goto construct_skb;
+ 		total_rx_bytes += xdp_get_buff_len(xdp);
+ 		total_rx_pkts++;
+ 
+-		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++		ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
++		xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR);
+ 
+ 		continue;
+ construct_skb:
+@@ -1298,7 +1286,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 			rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ 			xdp_verdict = ICE_XDP_CONSUMED;
+ 		}
+-		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++		ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
+ 
+ 		if (!skb)
+ 			break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index 7130992d417798..a13531c21d4ad8 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -357,9 +357,9 @@ struct ice_rx_ring {
+ 	struct ice_tx_ring *xdp_ring;
+ 	struct ice_rx_ring *next;	/* pointer to next ring in q_vector */
+ 	struct xsk_buff_pool *xsk_pool;
+-	u32 nr_frags;
+-	dma_addr_t dma;			/* physical address of ring */
++	u16 max_frame;
+ 	u16 rx_buf_len;
++	dma_addr_t dma;			/* physical address of ring */
+ 	u8 dcb_tc;			/* Traffic class of ring */
+ 	u8 ptp_rx;
+ #define ICE_RX_FLAGS_RING_BUILD_SKB	BIT(1)
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index 87ffd25b268a2e..471d64d202b760 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -1748,19 +1748,18 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ 			     qpi->rxq.databuffer_size < 1024))
+ 				goto error_param;
+-			vsi->rx_buf_len = qpi->rxq.databuffer_size;
+-			ring->rx_buf_len = vsi->rx_buf_len;
++			ring->rx_buf_len = qpi->rxq.databuffer_size;
+ 			if (qpi->rxq.max_pkt_size > max_frame_size ||
+ 			    qpi->rxq.max_pkt_size < 64)
+ 				goto error_param;
+ 
+-			vsi->max_frame = qpi->rxq.max_pkt_size;
++			ring->max_frame = qpi->rxq.max_pkt_size;
+ 			/* add space for the port VLAN since the VF driver is
+ 			 * not expected to account for it in the MTU
+ 			 * calculation
+ 			 */
+ 			if (ice_vf_is_port_vlan_ena(vf))
+-				vsi->max_frame += VLAN_HLEN;
++				ring->max_frame += VLAN_HLEN;
+ 
+ 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index 323db1e2be3886..79d5fc5ac4fcec 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -336,6 +336,7 @@ struct igc_adapter {
+ 	/* LEDs */
+ 	struct mutex led_mutex;
+ 	struct igc_led_classdev *leds;
++	bool leds_available;
+ };
+ 
+ void igc_up(struct igc_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index aadc0667fa04a4..9ba41a427e141c 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -7169,8 +7169,14 @@ static int igc_probe(struct pci_dev *pdev,
+ 
+ 	if (IS_ENABLED(CONFIG_IGC_LEDS)) {
+ 		err = igc_led_setup(adapter);
+-		if (err)
+-			goto err_register;
++		if (err) {
++			netdev_warn_once(netdev,
++					 "LED init failed (%d); continuing without LED support\n",
++					 err);
++			adapter->leds_available = false;
++		} else {
++			adapter->leds_available = true;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -7226,7 +7232,7 @@ static void igc_remove(struct pci_dev *pdev)
+ 	cancel_work_sync(&adapter->watchdog_task);
+ 	hrtimer_cancel(&adapter->hrtimer);
+ 
+-	if (IS_ENABLED(CONFIG_IGC_LEDS))
++	if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
+ 		igc_led_free(adapter);
+ 
+ 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+index e6eb98d70f3c42..6334b68f28d796 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+@@ -177,6 +177,7 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct,  u32 vf_id,
+ 		dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ 		return;
+ 	}
++	ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr);
+ 	rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ }
+ 
+@@ -186,6 +187,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct,  u32 vf_id,
+ {
+ 	int err;
+ 
++	/* Reset VF-specific information maintained by the PF */
++	memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info));
+ 	err = octep_ctrl_net_dev_remove(oct, vf_id);
+ 	if (err) {
+ 		rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+index 63130ba37e9df1..69b435ed8fbbe9 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+@@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
+ 	if (!ptp)
+ 		return;
+ 
+-	cancel_delayed_work(&pfvf->ptp->synctstamp_work);
++	cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
+ 
+ 	ptp_clock_unregister(ptp->ptp_clock);
+ 	kfree(ptp);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 6176457b846bc1..de2327ffb0f788 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -135,8 +135,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ 	if (up) {
+ 		netdev_info(priv->netdev, "Link up\n");
+ 		netif_carrier_on(priv->netdev);
+-		mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+-						NULL, NULL, NULL);
+ 	} else {
+ 		netdev_info(priv->netdev, "Link down\n");
+ 		netif_carrier_off(priv->netdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 18ec392d17404c..b561358474c4ff 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1497,12 +1497,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
+ static int
+ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+ {
+-	struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
+ 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
++	struct net_device *netdev;
++	struct mlx5e_priv *priv;
++	int err;
++
++	netdev = mlx5_uplink_netdev_get(dev);
++	if (!netdev)
++		return 0;
+ 
++	priv = netdev_priv(netdev);
+ 	rpriv->netdev = priv->netdev;
+-	return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+-					   rpriv);
++	err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
++					  rpriv);
++	mlx5_uplink_netdev_put(dev, netdev);
++	return err;
+ }
+ 
+ static void
+@@ -1629,8 +1638,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
+ {
+ 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ 	struct net_device *netdev = rpriv->netdev;
+-	struct mlx5e_priv *priv = netdev_priv(netdev);
+-	void *ppriv = priv->ppriv;
++	struct mlx5e_priv *priv;
++	void *ppriv;
++
++	if (!netdev) {
++		ppriv = rpriv;
++		goto free_ppriv;
++	}
++
++	priv = netdev_priv(netdev);
++	ppriv = priv->ppriv;
+ 
+ 	if (rep->vport == MLX5_VPORT_UPLINK) {
+ 		mlx5e_vport_uplink_rep_unload(rpriv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 02a3563f51ad26..d8c304427e2ab8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -733,6 +733,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+ 		speed = lksettings.base.speed;
+ 
+ out:
++	mlx5_uplink_netdev_put(mdev, slave);
+ 	return speed;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+index 37d5f445598c7b..a7486e6d0d5eff 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+@@ -52,7 +52,20 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+ 
+ static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
+ {
+-	return mdev->mlx5e_res.uplink_netdev;
++	struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
++	struct net_device *netdev;
++
++	mutex_lock(&mlx5e_res->uplink_netdev_lock);
++	netdev = mlx5e_res->uplink_netdev;
++	netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
++	mutex_unlock(&mlx5e_res->uplink_netdev_lock);
++	return netdev;
++}
++
++static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
++					  struct net_device *netdev)
++{
++	netdev_put(netdev, &mdev->mlx5e_res.tracker);
+ }
+ 
+ struct mlx5_sd;
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 998586872599b3..c692d2e878b2e3 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
+ 	struct ns83820 *dev = PRIV(ndev);
+ 	struct rx_info *info = &dev->rx_info;
+ 	unsigned next_rx;
+-	int rx_rc, len;
++	int len;
+ 	u32 cmdsts;
+ 	__le32 *desc;
+ 	unsigned long flags;
+@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
+ 		if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ 			skb_put(skb, len);
+-			if (unlikely(!skb))
++			if (unlikely(!skb)) {
++				ndev->stats.rx_dropped++;
+ 				goto netdev_mangle_me_harder_failed;
++			}
+ 			if (cmdsts & CMDSTS_DEST_MULTI)
+ 				ndev->stats.multicast++;
+ 			ndev->stats.rx_packets++;
+@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
+ 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ 			}
+ #endif
+-			rx_rc = netif_rx(skb);
+-			if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+-				ndev->stats.rx_dropped++;
+-			}
++			netif_rx(skb);
+ 		} else {
+ 			dev_kfree_skb_irq(skb);
+ 		}
+ 
++netdev_mangle_me_harder_failed:
+ 		nr++;
+ 		next_rx = info->next_rx;
+ 		desc = info->descs + (DESC_SIZE * next_rx);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index f67be4b8ad4351..523cbd91baf491 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -4461,10 +4461,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ 		goto out;
+ 	}
+ 
+-	/* Add override window info to buffer */
++	/* Add override window info to buffer, preventing buffer overflow */
+ 	override_window_dwords =
+-		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+-		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
++		min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
++		PROTECTION_OVERRIDE_ELEMENT_DWORDS,
++		PROTECTION_OVERRIDE_DEPTH_DWORDS);
+ 	if (override_window_dwords) {
+ 		addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ 		offset += qed_grc_dump_addr_range(p_hwfn,
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index c48c2de6f961f7..e1e472b3a301ef 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -2051,6 +2051,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
+ 
+ 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
+ 	rq->comp_ring.next2proc = 0;
++
++	if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
++		xdp_rxq_info_unreg(&rq->xdp_rxq);
++	page_pool_destroy(rq->page_pool);
++	rq->page_pool = NULL;
+ }
+ 
+ 
+@@ -2091,11 +2096,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ 		}
+ 	}
+ 
+-	if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+-		xdp_rxq_info_unreg(&rq->xdp_rxq);
+-	page_pool_destroy(rq->page_pool);
+-	rq->page_pool = NULL;
+-
+ 	if (rq->data_ring.base) {
+ 		dma_free_coherent(&adapter->pdev->dev,
+ 				  rq->rx_ring[0].size * rq->data_ring.desc_size,
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+index 131388886acbfa..cfabd5aebb5400 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+@@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = {
+ };
+ 
+ static const struct wilc_cfg_str g_cfg_str[] = {
+-	{WID_FIRMWARE_VERSION, NULL},
+-	{WID_MAC_ADDR, NULL},
+-	{WID_ASSOC_RES_INFO, NULL},
+-	{WID_NIL, NULL}
++	{WID_FIRMWARE_VERSION, 0, NULL},
++	{WID_MAC_ADDR, 0, NULL},
++	{WID_ASSOC_RES_INFO, 0, NULL},
++	{WID_NIL, 0, NULL}
+ };
+ 
+ #define WILC_RESP_MSG_TYPE_CONFIG_REPLY		'R'
+@@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
+ 
+ 		switch (FIELD_GET(WILC_WID_TYPE, wid)) {
+ 		case WID_CHAR:
++			len = 3;
++			if (len + 2  > size)
++				return;
++
+ 			while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
+ 				i++;
+ 
+ 			if (cfg->b[i].id == wid)
+ 				cfg->b[i].val = info[4];
+ 
+-			len = 3;
+ 			break;
+ 
+ 		case WID_SHORT:
++			len = 4;
++			if (len + 2  > size)
++				return;
++
+ 			while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
+ 				i++;
+ 
+ 			if (cfg->hw[i].id == wid)
+ 				cfg->hw[i].val = get_unaligned_le16(&info[4]);
+ 
+-			len = 4;
+ 			break;
+ 
+ 		case WID_INT:
++			len = 6;
++			if (len + 2  > size)
++				return;
++
+ 			while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
+ 				i++;
+ 
+ 			if (cfg->w[i].id == wid)
+ 				cfg->w[i].val = get_unaligned_le32(&info[4]);
+ 
+-			len = 6;
+ 			break;
+ 
+ 		case WID_STR:
++			len = 2 + get_unaligned_le16(&info[2]);
++
+ 			while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
+ 				i++;
+ 
+-			if (cfg->s[i].id == wid)
++			if (cfg->s[i].id == wid) {
++				if (len > cfg->s[i].len || (len + 2  > size))
++					return;
++
+ 				memcpy(cfg->s[i].str, &info[2],
+-				       get_unaligned_le16(&info[2]) + 2);
++				       len);
++			}
+ 
+-			len = 2 + get_unaligned_le16(&info[2]);
+ 			break;
+ 
+ 		default:
+@@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl)
+ 	/* store the string cfg parameters */
+ 	wl->cfg.s[i].id = WID_FIRMWARE_VERSION;
+ 	wl->cfg.s[i].str = str_vals->firmware_version;
++	wl->cfg.s[i].len = sizeof(str_vals->firmware_version);
+ 	i++;
+ 	wl->cfg.s[i].id = WID_MAC_ADDR;
+ 	wl->cfg.s[i].str = str_vals->mac_address;
++	wl->cfg.s[i].len = sizeof(str_vals->mac_address);
+ 	i++;
+ 	wl->cfg.s[i].id = WID_ASSOC_RES_INFO;
+ 	wl->cfg.s[i].str = str_vals->assoc_rsp;
++	wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp);
+ 	i++;
+ 	wl->cfg.s[i].id = WID_NIL;
+ 	wl->cfg.s[i].str = NULL;
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+index 7038b74f8e8ff6..5ae74bced7d748 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+@@ -24,12 +24,13 @@ struct wilc_cfg_word {
+ 
+ struct wilc_cfg_str {
+ 	u16 id;
++	u16 len;
+ 	u8 *str;
+ };
+ 
+ struct wilc_cfg_str_vals {
+-	u8 mac_address[7];
+-	u8 firmware_version[129];
++	u8 mac_address[8];
++	u8 firmware_version[130];
+ 	u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
+ };
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 9e223574db7f77..24d82d35041b54 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -890,6 +890,15 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
+ 	u32 upper, lower;
+ 	u64 ref48;
+ 
++	/* only type1 and type 2 PI formats have a reftag */
++	switch (ns->head->pi_type) {
++	case NVME_NS_DPS_PI_TYPE1:
++	case NVME_NS_DPS_PI_TYPE2:
++		break;
++	default:
++		return;
++	}
++
+ 	/* both rw and write zeroes share the same reftag format */
+ 	switch (ns->head->guard_type) {
+ 	case NVME_NVM_NS_16B_GUARD:
+@@ -929,13 +938,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
+ 
+ 	if (nvme_ns_has_pi(ns->head)) {
+ 		cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
+-
+-		switch (ns->head->pi_type) {
+-		case NVME_NS_DPS_PI_TYPE1:
+-		case NVME_NS_DPS_PI_TYPE2:
+-			nvme_set_ref_tag(ns, cmnd, req);
+-			break;
+-		}
++		nvme_set_ref_tag(ns, cmnd, req);
+ 	}
+ 
+ 	return BLK_STS_OK;
+@@ -1014,6 +1017,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
+ 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
+ 				return BLK_STS_NOTSUPP;
+ 			control |= NVME_RW_PRINFO_PRACT;
++			nvme_set_ref_tag(ns, cmnd, req);
+ 		}
+ 
+ 		switch (ns->head->pi_type) {
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index 5b639c942f17a9..a2f50db2b8bab2 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -304,7 +304,13 @@ static void __exit omap_cf_remove(struct platform_device *pdev)
+ 	kfree(cf);
+ }
+ 
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ 	.driver = {
+ 		.name	= driver_name,
+ 	},
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index 8f06adf828361a..1955495ea95f4a 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -146,7 +146,12 @@ static struct quirk_entry quirk_asus_ignore_fan = {
+ };
+ 
+ static struct quirk_entry quirk_asus_zenbook_duo_kbd = {
+-	.ignore_key_wlan = true,
++	.key_wlan_event = ASUS_WMI_KEY_IGNORE,
++};
++
++static struct quirk_entry quirk_asus_z13 = {
++	.key_wlan_event = ASUS_WMI_KEY_ARMOURY,
++	.tablet_switch_mode = asus_wmi_kbd_dock_devid,
+ };
+ 
+ static int dmi_matched(const struct dmi_system_id *dmi)
+@@ -538,6 +543,15 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		},
+ 		.driver_data = &quirk_asus_zenbook_duo_kbd,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUS ROG Z13",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow Z13"),
++		},
++		.driver_data = &quirk_asus_z13,
++	},
+ 	{},
+ };
+ 
+@@ -635,6 +649,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_IGNORE, 0xCF, },	/* AC mode */
+ 	{ KE_KEY, 0xFA, { KEY_PROG2 } },           /* Lid flip action */
+ 	{ KE_KEY, 0xBD, { KEY_PROG2 } },           /* Lid flip action on ROG xflow laptops */
++	{ KE_KEY, ASUS_WMI_KEY_ARMOURY, { KEY_PROG3 } },
+ 	{ KE_END, 0},
+ };
+ 
+@@ -654,9 +669,11 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
+ 		if (atkbd_reports_vol_keys)
+ 			*code = ASUS_WMI_KEY_IGNORE;
+ 		break;
+-	case 0x5F: /* Wireless console Disable */
+-		if (quirks->ignore_key_wlan)
+-			*code = ASUS_WMI_KEY_IGNORE;
++	case 0x5D: /* Wireless console Toggle */
++	case 0x5E: /* Wireless console Enable / Keyboard Attach, Detach */
++	case 0x5F: /* Wireless console Disable / Special Key */
++		if (quirks->key_wlan_event)
++			*code = quirks->key_wlan_event;
+ 		break;
+ 	}
+ }
+diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
+index d02f15fd3482fa..56fe5d8a1196ed 100644
+--- a/drivers/platform/x86/asus-wmi.h
++++ b/drivers/platform/x86/asus-wmi.h
+@@ -18,6 +18,7 @@
+ #include <linux/i8042.h>
+ 
+ #define ASUS_WMI_KEY_IGNORE (-1)
++#define ASUS_WMI_KEY_ARMOURY	0xffff01
+ #define ASUS_WMI_BRN_DOWN	0x2e
+ #define ASUS_WMI_BRN_UP		0x2f
+ 
+@@ -40,7 +41,7 @@ struct quirk_entry {
+ 	bool wmi_force_als_set;
+ 	bool wmi_ignore_fan;
+ 	bool filter_i8042_e1_extended_codes;
+-	bool ignore_key_wlan;
++	int key_wlan_event;
+ 	enum asus_wmi_tablet_switch_mode tablet_switch_mode;
+ 	int wapf;
+ 	/*
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 871f03d160c53a..14be797e89c3d1 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1909,8 +1909,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
+ 	bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
+ 
+ 	cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+-	if ((cache.flags & 0xff) == 0xff)
+-		cache.flags = -1; /* read error */
++	if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff)
++		cache.flags = -ENODEV; /* bq27000 hdq read error */
+ 	if (cache.flags >= 0) {
+ 		cache.capacity = bq27xxx_battery_read_soc(di);
+ 
+diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
+index fc079b9dcf7192..502571f0c203fa 100644
+--- a/drivers/rtc/rtc-pcf2127.c
++++ b/drivers/rtc/rtc-pcf2127.c
+@@ -1383,11 +1383,6 @@ static int pcf2127_i2c_probe(struct i2c_client *client)
+ 		variant = &pcf21xx_cfg[type];
+ 	}
+ 
+-	if (variant->type == PCF2131) {
+-		config.read_flag_mask = 0x0;
+-		config.write_flag_mask = 0x0;
+-	}
+-
+ 	config.max_register = variant->max_register,
+ 
+ 	regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap,
+@@ -1461,6 +1456,11 @@ static int pcf2127_spi_probe(struct spi_device *spi)
+ 		variant = &pcf21xx_cfg[type];
+ 	}
+ 
++	if (variant->type == PCF2131) {
++		config.read_flag_mask = 0x0;
++		config.write_flag_mask = 0x0;
++	}
++
+ 	config.max_register = variant->max_register;
+ 
+ 	regmap = devm_regmap_init_spi(spi, &config);
+diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
+index bdc664ad6a934c..1fcc9348dd439d 100644
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -101,13 +101,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
+ 	return string_length;
+ }
+ 
++static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
++{
++	struct xhci_ep_ctx      *ep_ctx;
++	unsigned int		max_burst;
++	dma_addr_t		deq;
++
++	max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
++
++	/* Populate bulk out endpoint context: */
++	ep_ctx                  = dbc_bulkout_ctx(dbc);
++	deq                     = dbc_bulkout_enq(dbc);
++	ep_ctx->ep_info         = 0;
++	ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
++	ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
++
++	/* Populate bulk in endpoint context: */
++	ep_ctx                  = dbc_bulkin_ctx(dbc);
++	deq                     = dbc_bulkin_enq(dbc);
++	ep_ctx->ep_info         = 0;
++	ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
++	ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
++}
++
+ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
+ {
+ 	struct dbc_info_context	*info;
+-	struct xhci_ep_ctx	*ep_ctx;
+ 	u32			dev_info;
+-	dma_addr_t		deq, dma;
+-	unsigned int		max_burst;
++	dma_addr_t		dma;
+ 
+ 	if (!dbc)
+ 		return;
+@@ -121,20 +142,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
+ 	info->serial		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
+ 	info->length		= cpu_to_le32(string_length);
+ 
+-	/* Populate bulk out endpoint context: */
+-	ep_ctx			= dbc_bulkout_ctx(dbc);
+-	max_burst		= DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
+-	deq			= dbc_bulkout_enq(dbc);
+-	ep_ctx->ep_info		= 0;
+-	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
+-	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_out->cycle_state);
+-
+-	/* Populate bulk in endpoint context: */
+-	ep_ctx			= dbc_bulkin_ctx(dbc);
+-	deq			= dbc_bulkin_enq(dbc);
+-	ep_ctx->ep_info		= 0;
+-	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
+-	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_in->cycle_state);
++	/* Populate bulk in and out endpoint contexts: */
++	xhci_dbc_init_ep_contexts(dbc);
+ 
+ 	/* Set DbC context and info registers: */
+ 	lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
+@@ -435,6 +444,42 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags)
+ 	return ctx;
+ }
+ 
++static void xhci_dbc_ring_init(struct xhci_ring *ring)
++{
++	struct xhci_segment *seg = ring->first_seg;
++
++	/* clear all trbs on ring in case of old ring */
++	memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
++
++	/* Only event ring does not use link TRB */
++	if (ring->type != TYPE_EVENT) {
++		union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
++
++		trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
++		trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
++	}
++	xhci_initialize_ring_info(ring);
++}
++
++static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
++{
++	struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
++	struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
++
++	if (!in_ring || !out_ring || !dbc->ctx) {
++		dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
++		return -ENODEV;
++	}
++
++	xhci_dbc_ring_init(in_ring);
++	xhci_dbc_ring_init(out_ring);
++
++	/* set ep context enqueue, dequeue, and cycle to initial values */
++	xhci_dbc_init_ep_contexts(dbc);
++
++	return 0;
++}
++
+ static struct xhci_ring *
+ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
+ {
+@@ -463,15 +508,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
+ 
+ 	seg->dma = dma;
+ 
+-	/* Only event ring does not use link TRB */
+-	if (type != TYPE_EVENT) {
+-		union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
+-
+-		trb->link.segment_ptr = cpu_to_le64(dma);
+-		trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
+-	}
+ 	INIT_LIST_HEAD(&ring->td_list);
+-	xhci_initialize_ring_info(ring, 1);
++
++	xhci_dbc_ring_init(ring);
++
+ 	return ring;
+ dma_fail:
+ 	kfree(seg);
+@@ -863,7 +903,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+ 			dev_info(dbc->dev, "DbC cable unplugged\n");
+ 			dbc->state = DS_ENABLED;
+ 			xhci_dbc_flush_requests(dbc);
+-
++			xhci_dbc_reinit_ep_rings(dbc);
+ 			return EVT_DISC;
+ 		}
+ 
+@@ -873,7 +913,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+ 			writel(portsc, &dbc->regs->portsc);
+ 			dbc->state = DS_ENABLED;
+ 			xhci_dbc_flush_requests(dbc);
+-
++			xhci_dbc_reinit_ep_rings(dbc);
+ 			return EVT_DISC;
+ 		}
+ 
+diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
+index f8ba15e7c225c2..570210e8a8e874 100644
+--- a/drivers/usb/host/xhci-debugfs.c
++++ b/drivers/usb/host/xhci-debugfs.c
+@@ -214,14 +214,11 @@ static void xhci_ring_dump_segment(struct seq_file *s,
+ 
+ static int xhci_ring_trb_show(struct seq_file *s, void *unused)
+ {
+-	int			i;
+ 	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
+ 	struct xhci_segment	*seg = ring->first_seg;
+ 
+-	for (i = 0; i < ring->num_segs; i++) {
++	xhci_for_each_ring_seg(ring->first_seg, seg)
+ 		xhci_ring_dump_segment(s, seg);
+-		seg = seg->next;
+-	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 91b47f9573cd7e..c9694526b157dd 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -27,14 +27,12 @@
+  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+  */
+ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
+-					       unsigned int cycle_state,
+ 					       unsigned int max_packet,
+ 					       unsigned int num,
+ 					       gfp_t flags)
+ {
+ 	struct xhci_segment *seg;
+ 	dma_addr_t	dma;
+-	int		i;
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 
+ 	seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
+@@ -56,11 +54,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
+ 			return NULL;
+ 		}
+ 	}
+-	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+-	if (cycle_state == 0) {
+-		for (i = 0; i < TRBS_PER_SEGMENT; i++)
+-			seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
+-	}
+ 	seg->num = num;
+ 	seg->dma = dma;
+ 	seg->next = NULL;
+@@ -138,6 +131,14 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ 
+ 	chain_links = xhci_link_chain_quirk(xhci, ring->type);
+ 
++	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
++	if (ring->cycle_state == 0) {
++		xhci_for_each_ring_seg(ring->first_seg, seg) {
++			for (int i = 0; i < TRBS_PER_SEGMENT; i++)
++				seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
++		}
++	}
++
+ 	next = ring->enq_seg->next;
+ 	xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
+ 	xhci_link_segments(last, next, ring->type, chain_links);
+@@ -224,7 +225,6 @@ static int xhci_update_stream_segment_mapping(
+ 		struct radix_tree_root *trb_address_map,
+ 		struct xhci_ring *ring,
+ 		struct xhci_segment *first_seg,
+-		struct xhci_segment *last_seg,
+ 		gfp_t mem_flags)
+ {
+ 	struct xhci_segment *seg;
+@@ -234,28 +234,22 @@ static int xhci_update_stream_segment_mapping(
+ 	if (WARN_ON_ONCE(trb_address_map == NULL))
+ 		return 0;
+ 
+-	seg = first_seg;
+-	do {
++	xhci_for_each_ring_seg(first_seg, seg) {
+ 		ret = xhci_insert_segment_mapping(trb_address_map,
+ 				ring, seg, mem_flags);
+ 		if (ret)
+ 			goto remove_streams;
+-		if (seg == last_seg)
+-			return 0;
+-		seg = seg->next;
+-	} while (seg != first_seg);
++	}
+ 
+ 	return 0;
+ 
+ remove_streams:
+ 	failed_seg = seg;
+-	seg = first_seg;
+-	do {
++	xhci_for_each_ring_seg(first_seg, seg) {
+ 		xhci_remove_segment_mapping(trb_address_map, seg);
+ 		if (seg == failed_seg)
+ 			return ret;
+-		seg = seg->next;
+-	} while (seg != first_seg);
++	}
+ 
+ 	return ret;
+ }
+@@ -267,17 +261,14 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring)
+ 	if (WARN_ON_ONCE(ring->trb_address_map == NULL))
+ 		return;
+ 
+-	seg = ring->first_seg;
+-	do {
++	xhci_for_each_ring_seg(ring->first_seg, seg)
+ 		xhci_remove_segment_mapping(ring->trb_address_map, seg);
+-		seg = seg->next;
+-	} while (seg != ring->first_seg);
+ }
+ 
+ static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
+ {
+ 	return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
+-			ring->first_seg, ring->last_seg, mem_flags);
++			ring->first_seg, mem_flags);
+ }
+ 
+ /* XXX: Do we need the hcd structure in all these functions? */
+@@ -297,8 +288,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+ 	kfree(ring);
+ }
+ 
+-void xhci_initialize_ring_info(struct xhci_ring *ring,
+-			       unsigned int cycle_state)
++void xhci_initialize_ring_info(struct xhci_ring *ring)
+ {
+ 	/* The ring is empty, so the enqueue pointer == dequeue pointer */
+ 	ring->enqueue = ring->first_seg->trbs;
+@@ -312,7 +302,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
+ 	 * New rings are initialized with cycle state equal to 1; if we are
+ 	 * handling ring expansion, set the cycle state equal to the old ring.
+ 	 */
+-	ring->cycle_state = cycle_state;
++	ring->cycle_state = 1;
+ 
+ 	/*
+ 	 * Each segment has a link TRB, and leave an extra TRB for SW
+@@ -327,7 +317,6 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+ 					struct xhci_segment **first,
+ 					struct xhci_segment **last,
+ 					unsigned int num_segs,
+-					unsigned int cycle_state,
+ 					enum xhci_ring_type type,
+ 					unsigned int max_packet,
+ 					gfp_t flags)
+@@ -338,7 +327,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+ 
+ 	chain_links = xhci_link_chain_quirk(xhci, type);
+ 
+-	prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
++	prev = xhci_segment_alloc(xhci, max_packet, num, flags);
+ 	if (!prev)
+ 		return -ENOMEM;
+ 	num++;
+@@ -347,8 +336,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+ 	while (num < num_segs) {
+ 		struct xhci_segment	*next;
+ 
+-		next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
+-					  flags);
++		next = xhci_segment_alloc(xhci, max_packet, num, flags);
+ 		if (!next)
+ 			goto free_segments;
+ 
+@@ -373,9 +361,8 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+  * Set the end flag and the cycle toggle bit on the last segment.
+  * See section 4.9.1 and figures 15 and 16.
+  */
+-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+-		unsigned int num_segs, unsigned int cycle_state,
+-		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
++struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
++				  enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+ {
+ 	struct xhci_ring	*ring;
+ 	int ret;
+@@ -393,7 +380,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ 		return ring;
+ 
+ 	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs,
+-					   cycle_state, type, max_packet, flags);
++					   type, max_packet, flags);
+ 	if (ret)
+ 		goto fail;
+ 
+@@ -403,7 +390,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ 			cpu_to_le32(LINK_TOGGLE);
+ 	}
+-	xhci_initialize_ring_info(ring, cycle_state);
++	xhci_initialize_ring_info(ring);
+ 	trace_xhci_ring_alloc(ring);
+ 	return ring;
+ 
+@@ -431,14 +418,14 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ 	struct xhci_segment	*last;
+ 	int			ret;
+ 
+-	ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state,
+-					   ring->type, ring->bounce_buf_len, flags);
++	ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type,
++					   ring->bounce_buf_len, flags);
+ 	if (ret)
+ 		return -ENOMEM;
+ 
+ 	if (ring->type == TYPE_STREAM) {
+ 		ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
+-						ring, first, last, flags);
++						ring, first, flags);
+ 		if (ret)
+ 			goto free_segments;
+ 	}
+@@ -642,8 +629,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+ 
+ 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ 		stream_info->stream_rings[cur_stream] =
+-			xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
+-					mem_flags);
++			xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
+ 		cur_ring = stream_info->stream_rings[cur_stream];
+ 		if (!cur_ring)
+ 			goto cleanup_rings;
+@@ -984,7 +970,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ 	}
+ 
+ 	/* Allocate endpoint 0 ring */
+-	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
++	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
+ 	if (!dev->eps[0].ring)
+ 		goto fail;
+ 
+@@ -1467,7 +1453,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 
+ 	/* Set up the endpoint ring */
+ 	virt_dev->eps[ep_index].new_ring =
+-		xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
++		xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
+ 	if (!virt_dev->eps[ep_index].new_ring)
+ 		return -ENOMEM;
+ 
+@@ -2276,7 +2262,7 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
+ 	if (!ir)
+ 		return NULL;
+ 
+-	ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags);
++	ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
+ 	if (!ir->event_ring) {
+ 		xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
+ 		kfree(ir);
+@@ -2482,7 +2468,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 		goto fail;
+ 
+ 	/* Set up the command ring to have one segments for now. */
+-	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
++	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
+ 	if (!xhci->cmd_ring)
+ 		goto fail;
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index d5bcd5475b72b1..3970ec831b8ca9 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -41,15 +41,15 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
+ 
+ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
+ {
+-	struct xhci_segment *seg = ring->first_seg;
++	struct xhci_segment *seg;
+ 
+ 	if (!td || !td->start_seg)
+ 		return false;
+-	do {
++
++	xhci_for_each_ring_seg(ring->first_seg, seg) {
+ 		if (seg == td->start_seg)
+ 			return true;
+-		seg = seg->next;
+-	} while (seg && seg != ring->first_seg);
++	}
+ 
+ 	return false;
+ }
+@@ -764,16 +764,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+ 	struct xhci_segment *seg;
+ 
+ 	ring = xhci->cmd_ring;
+-	seg = ring->deq_seg;
+-	do {
+-		memset(seg->trbs, 0,
+-			sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+-		seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+-			cpu_to_le32(~TRB_CYCLE);
+-		seg = seg->next;
+-	} while (seg != ring->deq_seg);
+-
+-	xhci_initialize_ring_info(ring, 1);
++	xhci_for_each_ring_seg(ring->deq_seg, seg) {
++		memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
++		seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
++	}
++
++	xhci_initialize_ring_info(ring);
+ 	/*
+ 	 * Reset the hardware dequeue pointer.
+ 	 * Yes, this will need to be re-written after resume, but we're paranoid
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 67ee2e04994330..b2aeb444daaf5e 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1263,6 +1263,9 @@ static inline const char *xhci_trb_type_string(u8 type)
+ #define AVOID_BEI_INTERVAL_MIN	8
+ #define AVOID_BEI_INTERVAL_MAX	32
+ 
++#define xhci_for_each_ring_seg(head, seg) \
++	for (seg = head; seg != NULL; seg = (seg->next != head ? seg->next : NULL))
++
+ struct xhci_segment {
+ 	union xhci_trb		*trbs;
+ 	/* private to HCD */
+@@ -1800,14 +1803,12 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
+ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+ 		struct usb_device *udev, struct usb_host_endpoint *ep,
+ 		gfp_t mem_flags);
+-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+-		unsigned int num_segs, unsigned int cycle_state,
++struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+ 		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
+ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ 		unsigned int num_trbs, gfp_t flags);
+-void xhci_initialize_ring_info(struct xhci_ring *ring,
+-			unsigned int cycle_state);
++void xhci_initialize_ring_info(struct xhci_ring *ring);
+ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
+ 		struct xhci_virt_device *virt_dev,
+ 		unsigned int ep_index);
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index ffa5b83d3a4a3a..14f96d217e6e1a 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1744,10 +1744,10 @@ static int check_inode_ref(struct extent_buffer *leaf,
+ 	while (ptr < end) {
+ 		u16 namelen;
+ 
+-		if (unlikely(ptr + sizeof(iref) > end)) {
++		if (unlikely(ptr + sizeof(*iref) > end)) {
+ 			inode_ref_err(leaf, slot,
+ 			"inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
+-				ptr, end, sizeof(iref));
++				ptr, end, sizeof(*iref));
+ 			return -EUCLEAN;
+ 		}
+ 
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index f917fdae7e672b..0022ad003791f4 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1946,7 +1946,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 
+ 	search_key.objectid = log_key.objectid;
+ 	search_key.type = BTRFS_INODE_EXTREF_KEY;
+-	search_key.offset = key->objectid;
++	search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len);
+ 	ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
+ 	if (ret < 0) {
+ 		goto out;
+diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
+index 14868a3dd592ca..bc52afbfc5c739 100644
+--- a/fs/nilfs2/sysfs.c
++++ b/fs/nilfs2/sysfs.c
+@@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
+  ************************************************************************/
+ 
+ static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
+-					    struct attribute *attr, char *buf)
++					    struct kobj_attribute *attr, char *buf)
+ {
+ 	return sysfs_emit(buf, "%d.%d\n",
+ 			NILFS_CURRENT_REV, NILFS_MINOR_REV);
+@@ -1087,7 +1087,7 @@ static const char features_readme_str[] =
+ 	"(1) revision\n\tshow current revision of NILFS file system driver.\n";
+ 
+ static ssize_t nilfs_feature_README_show(struct kobject *kobj,
+-					 struct attribute *attr,
++					 struct kobj_attribute *attr,
+ 					 char *buf)
+ {
+ 	return sysfs_emit(buf, features_readme_str);
+diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h
+index 78a87a016928b7..d370cd5cce3f5d 100644
+--- a/fs/nilfs2/sysfs.h
++++ b/fs/nilfs2/sysfs.h
+@@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
+ 	struct completion sg_segments_kobj_unregister;
+ };
+ 
+-#define NILFS_COMMON_ATTR_STRUCT(name) \
++#define NILFS_KOBJ_ATTR_STRUCT(name) \
+ struct nilfs_##name##_attr { \
+ 	struct attribute attr; \
+-	ssize_t (*show)(struct kobject *, struct attribute *, \
++	ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
+ 			char *); \
+-	ssize_t (*store)(struct kobject *, struct attribute *, \
++	ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
+ 			 const char *, size_t); \
+ }
+ 
+-NILFS_COMMON_ATTR_STRUCT(feature);
++NILFS_KOBJ_ATTR_STRUCT(feature);
+ 
+ #define NILFS_DEV_ATTR_STRUCT(name) \
+ struct nilfs_##name##_attr { \
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index fee7bc9848a36a..b59647291363b6 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -298,8 +298,8 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
+ 
+ extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+ 
+-extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+-				const char *path);
++void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
++					   struct dentry *dentry);
+ 
+ extern void cifs_mark_open_handles_for_deleted_file(struct inode *inode,
+ 				const char *path);
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index c0df2c1841243e..e06d02b68c5387 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1958,7 +1958,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
+ 	}
+ 
+ 	netfs_wait_for_outstanding_io(inode);
+-	cifs_close_deferred_file_under_dentry(tcon, full_path);
++	cifs_close_deferred_file_under_dentry(tcon, dentry);
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ 	if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+@@ -2489,10 +2489,10 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
+ 		goto cifs_rename_exit;
+ 	}
+ 
+-	cifs_close_deferred_file_under_dentry(tcon, from_name);
++	cifs_close_deferred_file_under_dentry(tcon, source_dentry);
+ 	if (d_inode(target_dentry) != NULL) {
+ 		netfs_wait_for_outstanding_io(d_inode(target_dentry));
+-		cifs_close_deferred_file_under_dentry(tcon, to_name);
++		cifs_close_deferred_file_under_dentry(tcon, target_dentry);
+ 	}
+ 
+ 	rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 57b6b191293eea..499f791df77998 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -829,33 +829,28 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
+ 		kfree(tmp_list);
+ 	}
+ }
+-void
+-cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
++
++void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon,
++					   struct dentry *dentry)
+ {
+-	struct cifsFileInfo *cfile;
+ 	struct file_list *tmp_list, *tmp_next_list;
+-	void *page;
+-	const char *full_path;
++	struct cifsFileInfo *cfile;
+ 	LIST_HEAD(file_head);
+ 
+-	page = alloc_dentry_path();
+ 	spin_lock(&tcon->open_file_lock);
+ 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+-		full_path = build_path_from_dentry(cfile->dentry, page);
+-		if (strstr(full_path, path)) {
+-			if (delayed_work_pending(&cfile->deferred)) {
+-				if (cancel_delayed_work(&cfile->deferred)) {
+-					spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+-					cifs_del_deferred_close(cfile);
+-					spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+-
+-					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+-					if (tmp_list == NULL)
+-						break;
+-					tmp_list->cfile = cfile;
+-					list_add_tail(&tmp_list->list, &file_head);
+-				}
+-			}
++		if ((cfile->dentry == dentry) &&
++		    delayed_work_pending(&cfile->deferred) &&
++		    cancel_delayed_work(&cfile->deferred)) {
++			spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
++			cifs_del_deferred_close(cfile);
++			spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
++
++			tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
++			if (tmp_list == NULL)
++				break;
++			tmp_list->cfile = cfile;
++			list_add_tail(&tmp_list->list, &file_head);
+ 		}
+ 	}
+ 	spin_unlock(&tcon->open_file_lock);
+@@ -865,7 +860,6 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+ 		list_del(&tmp_list->list);
+ 		kfree(tmp_list);
+ 	}
+-	free_dentry_path(page);
+ }
+ 
+ /*
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index b9bb531717a651..b1548269c308a1 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1075,8 +1075,10 @@ static int smbd_negotiate(struct smbd_connection *info)
+ 	log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
+ 		       rc, response->sge.addr,
+ 		       response->sge.length, response->sge.lkey);
+-	if (rc)
++	if (rc) {
++		put_receive_buffer(info, response);
+ 		return rc;
++	}
+ 
+ 	init_completion(&info->negotiate_completion);
+ 	info->negotiate_done = false;
+@@ -1308,6 +1310,9 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ 			sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+ 	}
+ 
++	log_rdma_event(INFO, "cancelling post_send_credits_work\n");
++	disable_work_sync(&info->post_send_credits_work);
++
+ 	log_rdma_event(INFO, "destroying qp\n");
+ 	ib_drain_qp(sc->ib.qp);
+ 	rdma_destroy_qp(sc->rdma.cm_id);
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 67c989e5ddaa79..2fc689f99997e8 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -553,7 +553,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	case SMB_DIRECT_MSG_DATA_TRANSFER: {
+ 		struct smb_direct_data_transfer *data_transfer =
+ 			(struct smb_direct_data_transfer *)recvmsg->packet;
+-		unsigned int data_length;
++		u32 remaining_data_length, data_offset, data_length;
+ 		int avail_recvmsg_count, receive_credits;
+ 
+ 		if (wc->byte_len <
+@@ -563,15 +563,25 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 			return;
+ 		}
+ 
++		remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length);
+ 		data_length = le32_to_cpu(data_transfer->data_length);
+-		if (data_length) {
+-			if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+-			    (u64)data_length) {
+-				put_recvmsg(t, recvmsg);
+-				smb_direct_disconnect_rdma_connection(t);
+-				return;
+-			}
++		data_offset = le32_to_cpu(data_transfer->data_offset);
++		if (wc->byte_len < data_offset ||
++		    wc->byte_len < (u64)data_offset + data_length) {
++			put_recvmsg(t, recvmsg);
++			smb_direct_disconnect_rdma_connection(t);
++			return;
++		}
++		if (remaining_data_length > t->max_fragmented_recv_size ||
++		    data_length > t->max_fragmented_recv_size ||
++		    (u64)remaining_data_length + (u64)data_length >
++		    (u64)t->max_fragmented_recv_size) {
++			put_recvmsg(t, recvmsg);
++			smb_direct_disconnect_rdma_connection(t);
++			return;
++		}
+ 
++		if (data_length) {
+ 			if (t->full_packet_received)
+ 				recvmsg->first_segment = true;
+ 
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index f7b3b93f3a49a7..0c70f3a5557505 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -135,6 +135,7 @@ struct af_alg_async_req {
+  *			SG?
+  * @enc:		Cryptographic operation to be performed when
+  *			recvmsg is invoked.
++ * @write:		True if we are in the middle of a write.
+  * @init:		True if metadata has been sent.
+  * @len:		Length of memory allocated for this data structure.
+  * @inflight:		Non-zero when AIO requests are in flight.
+@@ -151,10 +152,11 @@ struct af_alg_ctx {
+ 	size_t used;
+ 	atomic_t rcvused;
+ 
+-	bool more;
+-	bool merge;
+-	bool enc;
+-	bool init;
++	u32		more:1,
++			merge:1,
++			enc:1,
++			write:1,
++			init:1;
+ 
+ 	unsigned int len;
+ 
+diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
+index 61675ea95e0b98..f18f50d39598ff 100644
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -37,6 +37,7 @@ enum io_uring_cmd_flags {
+ 	/* set when uring wants to cancel a previously issued command */
+ 	IO_URING_F_CANCEL		= (1 << 11),
+ 	IO_URING_F_COMPAT		= (1 << 12),
++	IO_URING_F_TASK_DEAD		= (1 << 13),
+ };
+ 
+ struct io_wq_work_node {
+@@ -399,9 +400,6 @@ struct io_ring_ctx {
+ 	struct callback_head		poll_wq_task_work;
+ 	struct list_head		defer_list;
+ 
+-	struct io_alloc_cache		msg_cache;
+-	spinlock_t			msg_lock;
+-
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+ 	struct list_head	napi_list;	/* track busy poll napi_id */
+ 	spinlock_t		napi_lock;	/* napi_list lock */
+diff --git a/include/linux/minmax.h b/include/linux/minmax.h
+index 98008dd92153db..eaaf5c008e4d05 100644
+--- a/include/linux/minmax.h
++++ b/include/linux/minmax.h
+@@ -8,13 +8,10 @@
+ #include <linux/types.h>
+ 
+ /*
+- * min()/max()/clamp() macros must accomplish three things:
++ * min()/max()/clamp() macros must accomplish several things:
+  *
+  * - Avoid multiple evaluations of the arguments (so side-effects like
+  *   "x++" happen only once) when non-constant.
+- * - Retain result as a constant expressions when called with only
+- *   constant expressions (to avoid tripping VLA warnings in stack
+- *   allocation usage).
+  * - Perform signed v unsigned type-checking (to generate compile
+  *   errors instead of nasty runtime surprises).
+  * - Unsigned char/short are always promoted to signed int and can be
+@@ -31,58 +28,54 @@
+  *   bit #0 set if ok for unsigned comparisons
+  *   bit #1 set if ok for signed comparisons
+  *
+- * In particular, statically non-negative signed integer
+- * expressions are ok for both.
++ * In particular, statically non-negative signed integer expressions
++ * are ok for both.
+  *
+- * NOTE! Unsigned types smaller than 'int' are implicitly
+- * converted to 'int' in expressions, and are accepted for
+- * signed conversions for now. This is debatable.
++ * NOTE! Unsigned types smaller than 'int' are implicitly converted to 'int'
++ * in expressions, and are accepted for signed conversions for now.
++ * This is debatable.
+  *
+- * Note that 'x' is the original expression, and 'ux' is
+- * the unique variable that contains the value.
++ * Note that 'x' is the original expression, and 'ux' is the unique variable
++ * that contains the value.
+  *
+- * We use 'ux' for pure type checking, and 'x' for when
+- * we need to look at the value (but without evaluating
+- * it for side effects! Careful to only ever evaluate it
+- * with sizeof() or __builtin_constant_p() etc).
++ * We use 'ux' for pure type checking, and 'x' for when we need to look at the
++ * value (but without evaluating it for side effects!
++ * Careful to only ever evaluate it with sizeof() or __builtin_constant_p() etc).
+  *
+- * Pointers end up being checked by the normal C type
+- * rules at the actual comparison, and these expressions
+- * only need to be careful to not cause warnings for
+- * pointer use.
++ * Pointers end up being checked by the normal C type rules at the actual
++ * comparison, and these expressions only need to be careful to not cause
++ * warnings for pointer use.
+  */
+-#define __signed_type_use(x,ux) (2+__is_nonneg(x,ux))
+-#define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4))
+-#define __sign_use(x,ux) (is_signed_type(typeof(ux))? \
+-	__signed_type_use(x,ux):__unsigned_type_use(x,ux))
++#define __sign_use(ux) (is_signed_type(typeof(ux)) ? \
++	(2 + __is_nonneg(ux)) : (1 + 2 * (sizeof(ux) < 4)))
+ 
+ /*
+- * To avoid warnings about casting pointers to integers
+- * of different sizes, we need that special sign type.
++ * Check whether a signed value is always non-negative.
+  *
+- * On 64-bit we can just always use 'long', since any
+- * integer or pointer type can just be cast to that.
++ * A cast is needed to avoid any warnings from values that aren't signed
++ * integer types (in which case the result doesn't matter).
+  *
+- * This does not work for 128-bit signed integers since
+- * the cast would truncate them, but we do not use s128
+- * types in the kernel (we do use 'u128', but they will
+- * be handled by the !is_signed_type() case).
++ * On 64-bit any integer or pointer type can safely be cast to 'long long'.
++ * But on 32-bit we need to avoid warnings about casting pointers to integers
++ * of different sizes without truncating 64-bit values so 'long' or 'long long'
++ * must be used depending on the size of the value.
+  *
+- * NOTE! The cast is there only to avoid any warnings
+- * from when values that aren't signed integer types.
++ * This does not work for 128-bit signed integers since the cast would truncate
++ * them, but we do not use s128 types in the kernel (we do use 'u128',
++ * but they are handled by the !is_signed_type() case).
+  */
+-#ifdef CONFIG_64BIT
+-  #define __signed_type(ux) long
++#if __SIZEOF_POINTER__ == __SIZEOF_LONG_LONG__
++#define __is_nonneg(ux) statically_true((long long)(ux) >= 0)
+ #else
+-  #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L))
++#define __is_nonneg(ux) statically_true( \
++	(typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)))(ux) >= 0)
+ #endif
+-#define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0)
+ 
+-#define __types_ok(x,y,ux,uy) \
+-	(__sign_use(x,ux) & __sign_use(y,uy))
++#define __types_ok(ux, uy) \
++	(__sign_use(ux) & __sign_use(uy))
+ 
+-#define __types_ok3(x,y,z,ux,uy,uz) \
+-	(__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz))
++#define __types_ok3(ux, uy, uz) \
++	(__sign_use(ux) & __sign_use(uy) & __sign_use(uz))
+ 
+ #define __cmp_op_min <
+ #define __cmp_op_max >
+@@ -97,30 +90,13 @@
+ 
+ #define __careful_cmp_once(op, x, y, ux, uy) ({		\
+ 	__auto_type ux = (x); __auto_type uy = (y);	\
+-	BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy),	\
++	BUILD_BUG_ON_MSG(!__types_ok(ux, uy),		\
+ 		#op"("#x", "#y") signedness error");	\
+ 	__cmp(op, ux, uy); })
+ 
+ #define __careful_cmp(op, x, y) \
+ 	__careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
+ 
+-#define __clamp(val, lo, hi)	\
+-	((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+-
+-#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({				\
+-	__auto_type uval = (val);						\
+-	__auto_type ulo = (lo);							\
+-	__auto_type uhi = (hi);							\
+-	static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), 	\
+-			(lo) <= (hi), true),					\
+-		"clamp() low limit " #lo " greater than high limit " #hi);	\
+-	BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi),			\
+-		"clamp("#val", "#lo", "#hi") signedness error");		\
+-	__clamp(uval, ulo, uhi); })
+-
+-#define __careful_clamp(val, lo, hi) \
+-	__clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
+-
+ /**
+  * min - return minimum of two values of the same or compatible types
+  * @x: first value
+@@ -154,7 +130,7 @@
+ 
+ #define __careful_op3(op, x, y, z, ux, uy, uz) ({			\
+ 	__auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\
+-	BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz),			\
++	BUILD_BUG_ON_MSG(!__types_ok3(ux, uy, uz),			\
+ 		#op"3("#x", "#y", "#z") signedness error");		\
+ 	__cmp(op, ux, __cmp(op, uy, uz)); })
+ 
+@@ -176,6 +152,22 @@
+ #define max3(x, y, z) \
+ 	__careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
+ 
++/**
++ * min_t - return minimum of two values, using the specified type
++ * @type: data type to use
++ * @x: first value
++ * @y: second value
++ */
++#define min_t(type, x, y) __cmp_once(min, type, x, y)
++
++/**
++ * max_t - return maximum of two values, using the specified type
++ * @type: data type to use
++ * @x: first value
++ * @y: second value
++ */
++#define max_t(type, x, y) __cmp_once(max, type, x, y)
++
+ /**
+  * min_not_zero - return the minimum that is _not_ zero, unless both are zero
+  * @x: value1
+@@ -186,39 +178,57 @@
+ 	typeof(y) __y = (y);			\
+ 	__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+ 
++#define __clamp(val, lo, hi)	\
++	((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
++
++#define __clamp_once(type, val, lo, hi, uval, ulo, uhi) ({			\
++	type uval = (val);							\
++	type ulo = (lo);							\
++	type uhi = (hi);							\
++	BUILD_BUG_ON_MSG(statically_true(ulo > uhi),				\
++		"clamp() low limit " #lo " greater than high limit " #hi);	\
++	BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi),				\
++		"clamp("#val", "#lo", "#hi") signedness error");		\
++	__clamp(uval, ulo, uhi); })
++
++#define __careful_clamp(type, val, lo, hi) \
++	__clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
++
+ /**
+- * clamp - return a value clamped to a given range with strict typechecking
++ * clamp - return a value clamped to a given range with typechecking
+  * @val: current value
+  * @lo: lowest allowable value
+  * @hi: highest allowable value
+  *
+- * This macro does strict typechecking of @lo/@hi to make sure they are of the
+- * same type as @val.  See the unnecessary pointer comparisons.
+- */
+-#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
+-
+-/*
+- * ..and if you can't take the strict
+- * types, you can specify one yourself.
+- *
+- * Or not use min/max/clamp at all, of course.
++ * This macro checks @val/@lo/@hi to make sure they have compatible
++ * signedness.
+  */
++#define clamp(val, lo, hi) __careful_clamp(__auto_type, val, lo, hi)
+ 
+ /**
+- * min_t - return minimum of two values, using the specified type
+- * @type: data type to use
+- * @x: first value
+- * @y: second value
++ * clamp_t - return a value clamped to a given range using a given type
++ * @type: the type of variable to use
++ * @val: current value
++ * @lo: minimum allowable value
++ * @hi: maximum allowable value
++ *
++ * This macro does no typechecking and uses temporary variables of type
++ * @type to make all the comparisons.
+  */
+-#define min_t(type, x, y) __cmp_once(min, type, x, y)
++#define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi)
+ 
+ /**
+- * max_t - return maximum of two values, using the specified type
+- * @type: data type to use
+- * @x: first value
+- * @y: second value
++ * clamp_val - return a value clamped to a given range using val's type
++ * @val: current value
++ * @lo: minimum allowable value
++ * @hi: maximum allowable value
++ *
++ * This macro does no typechecking and uses temporary variables of whatever
++ * type the input argument @val is.  This is useful when @val is an unsigned
++ * type and @lo and @hi are literals that will otherwise be assigned a signed
++ * integer type.
+  */
+-#define max_t(type, x, y) __cmp_once(max, type, x, y)
++#define clamp_val(val, lo, hi) __careful_clamp(typeof(val), val, lo, hi)
+ 
+ /*
+  * Do not check the array parameter using __must_be_array().
+@@ -263,31 +273,6 @@
+  */
+ #define max_array(array, len) __minmax_array(max, array, len)
+ 
+-/**
+- * clamp_t - return a value clamped to a given range using a given type
+- * @type: the type of variable to use
+- * @val: current value
+- * @lo: minimum allowable value
+- * @hi: maximum allowable value
+- *
+- * This macro does no typechecking and uses temporary variables of type
+- * @type to make all the comparisons.
+- */
+-#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
+-
+-/**
+- * clamp_val - return a value clamped to a given range using val's type
+- * @val: current value
+- * @lo: minimum allowable value
+- * @hi: maximum allowable value
+- *
+- * This macro does no typechecking and uses temporary variables of whatever
+- * type the input argument @val is.  This is useful when @val is an unsigned
+- * type and @lo and @hi are literals that will otherwise be assigned a signed
+- * integer type.
+- */
+-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+-
+ static inline bool in_range64(u64 val, u64 start, u64 len)
+ {
+ 	return (val - start) < len;
+@@ -326,9 +311,9 @@ static inline bool in_range32(u32 val, u32 start, u32 len)
+  * Use these carefully: no type checking, and uses the arguments
+  * multiple times. Use for obvious constants only.
+  */
+-#define MIN(a,b) __cmp(min,a,b)
+-#define MAX(a,b) __cmp(max,a,b)
+-#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b))
+-#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b))
++#define MIN(a, b) __cmp(min, a, b)
++#define MAX(a, b) __cmp(max, a, b)
++#define MIN_T(type, a, b) __cmp(min, (type)(a), (type)(b))
++#define MAX_T(type, a, b) __cmp(max, (type)(a), (type)(b))
+ 
+ #endif	/* _LINUX_MINMAX_H */
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index da9749739abde9..9a8eb644f6707d 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -689,6 +689,7 @@ struct mlx5e_resources {
+ 		bool			   tisn_valid;
+ 	} hw_objs;
+ 	struct net_device *uplink_netdev;
++	netdevice_tracker tracker;
+ 	struct mutex uplink_netdev_lock;
+ 	struct mlx5_crypto_dek_priv *dek_priv;
+ };
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index deeb535f920c8a..41f5c88bdf3bc9 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2200,6 +2200,61 @@ static inline bool folio_likely_mapped_shared(struct folio *folio)
+ 	return atomic_read(&folio->_mapcount) > 0;
+ }
+ 
++/**
++ * folio_expected_ref_count - calculate the expected folio refcount
++ * @folio: the folio
++ *
++ * Calculate the expected folio refcount, taking references from the pagecache,
++ * swapcache, PG_private and page table mappings into account. Useful in
++ * combination with folio_ref_count() to detect unexpected references (e.g.,
++ * GUP or other temporary references).
++ *
++ * Does currently not consider references from the LRU cache. If the folio
++ * was isolated from the LRU (which is the case during migration or split),
++ * the LRU cache does not apply.
++ *
++ * Calling this function on an unmapped folio -- !folio_mapped() -- that is
++ * locked will return a stable result.
++ *
++ * Calling this function on a mapped folio will not result in a stable result,
++ * because nothing stops additional page table mappings from coming (e.g.,
++ * fork()) or going (e.g., munmap()).
++ *
++ * Calling this function without the folio lock will also not result in a
++ * stable result: for example, the folio might get dropped from the swapcache
++ * concurrently.
++ *
++ * However, even when called without the folio lock or on a mapped folio,
++ * this function can be used to detect unexpected references early (for example,
++ * if it makes sense to even lock the folio and unmap it).
++ *
++ * The caller must add any reference (e.g., from folio_try_get()) it might be
++ * holding itself to the result.
++ *
++ * Returns the expected folio refcount.
++ */
++static inline int folio_expected_ref_count(const struct folio *folio)
++{
++	const int order = folio_order(folio);
++	int ref_count = 0;
++
++	if (WARN_ON_ONCE(folio_test_slab(folio)))
++		return 0;
++
++	if (folio_test_anon(folio)) {
++		/* One reference per page from the swapcache. */
++		ref_count += folio_test_swapcache(folio) << order;
++	} else if (!((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS)) {
++		/* One reference per page from the pagecache. */
++		ref_count += !!folio->mapping << order;
++		/* One reference from PG_private. */
++		ref_count += folio_test_private(folio);
++	}
++
++	/* One reference per page table mapping. */
++	return ref_count + folio_mapcount(folio);
++}
++
+ #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+ static inline int arch_make_folio_accessible(struct folio *folio)
+ {
+diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
+index 67d015df8893cc..5fd5b4cf75ca1e 100644
+--- a/include/uapi/linux/mptcp.h
++++ b/include/uapi/linux/mptcp.h
+@@ -31,6 +31,8 @@
+ #define MPTCP_INFO_FLAG_FALLBACK		_BITUL(0)
+ #define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED	_BITUL(1)
+ 
++#define MPTCP_PM_EV_FLAG_DENY_JOIN_ID0		_BITUL(0)
++
+ #define MPTCP_PM_ADDR_FLAG_SIGNAL                      (1 << 0)
+ #define MPTCP_PM_ADDR_FLAG_SUBFLOW                     (1 << 1)
+ #define MPTCP_PM_ADDR_FLAG_BACKUP                      (1 << 2)
+diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
+index 6ac84b2f636ca2..7359d34da446b9 100644
+--- a/include/uapi/linux/mptcp_pm.h
++++ b/include/uapi/linux/mptcp_pm.h
+@@ -16,10 +16,10 @@
+  *   good time to allocate memory and send ADD_ADDR if needed. Depending on the
+  *   traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
+  *   is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
+- *   sport, dport, server-side.
++ *   sport, dport, server-side, [flags].
+  * @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new
+  *   subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
+- *   sport, dport, server-side.
++ *   sport, dport, server-side, [flags].
+  * @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token.
+  * @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer.
+  *   Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 52ada466bf98f3..68439eb0dc8f35 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -316,9 +316,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
+ 			    sizeof(struct io_async_rw));
+ 	ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
+ 			    sizeof(struct uring_cache));
+-	spin_lock_init(&ctx->msg_lock);
+-	ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
+-			    sizeof(struct io_kiocb));
+ 	ret |= io_futex_cache_init(ctx);
+ 	if (ret)
+ 		goto free_ref;
+@@ -358,7 +355,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
+ 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+ 	io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
+ 	io_alloc_cache_free(&ctx->uring_cache, kfree);
+-	io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
+ 	io_futex_cache_free(ctx);
+ 	kfree(ctx->cancel_table.hbs);
+ 	kfree(ctx->cancel_table_locked.hbs);
+@@ -1358,9 +1354,10 @@ static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
+ 
+ void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
+ {
+-	io_tw_lock(req->ctx, ts);
+-	/* req->task == current here, checking PF_EXITING is safe */
+-	if (unlikely(req->task->flags & PF_EXITING))
++	struct io_ring_ctx *ctx = req->ctx;
++
++	io_tw_lock(ctx, ts);
++	if (unlikely(io_should_terminate_tw(ctx)))
+ 		io_req_defer_failed(req, -EFAULT);
+ 	else if (req->flags & REQ_F_FORCE_ASYNC)
+ 		io_queue_iowq(req);
+@@ -2742,7 +2739,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+ 	io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
+ 	io_alloc_cache_free(&ctx->uring_cache, kfree);
+-	io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
+ 	io_futex_cache_free(ctx);
+ 	io_destroy_buffers(ctx);
+ 	mutex_unlock(&ctx->uring_lock);
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 70b6675941ff76..e8a3b75bc6c683 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -421,6 +421,19 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
+ 		      ctx->submitter_task == current);
+ }
+ 
++/*
++ * Terminate the request if either of these conditions are true:
++ *
++ * 1) It's being executed by the original task, but that task is marked
++ *    with PF_EXITING as it's exiting.
++ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
++ *    our fallback task_work.
++ */
++static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
++{
++	return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
++}
++
+ static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
+ {
+ 	io_req_set_res(req, res, 0);
+diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
+index 2586a292dfb914..a3ad8aea45c8a8 100644
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -143,7 +143,7 @@ static inline bool io_kbuf_commit(struct io_kiocb *req,
+ 		struct io_uring_buf *buf;
+ 
+ 		buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
+-		if (WARN_ON_ONCE(len > buf->len))
++		if (len > buf->len)
+ 			len = buf->len;
+ 		buf->len -= len;
+ 		if (buf->len) {
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index b68e009bce2180..97708e5132bc4f 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -11,7 +11,6 @@
+ #include "io_uring.h"
+ #include "rsrc.h"
+ #include "filetable.h"
+-#include "alloc_cache.h"
+ #include "msg_ring.h"
+ 
+ /* All valid masks for MSG_RING */
+@@ -76,13 +75,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 
+ 	io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
+-	if (spin_trylock(&ctx->msg_lock)) {
+-		if (io_alloc_cache_put(&ctx->msg_cache, req))
+-			req = NULL;
+-		spin_unlock(&ctx->msg_lock);
+-	}
+-	if (req)
+-		kfree_rcu(req, rcu_head);
++	kfree_rcu(req, rcu_head);
+ 	percpu_ref_put(&ctx->refs);
+ }
+ 
+@@ -104,19 +97,6 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 	return 0;
+ }
+ 
+-static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
+-{
+-	struct io_kiocb *req = NULL;
+-
+-	if (spin_trylock(&ctx->msg_lock)) {
+-		req = io_alloc_cache_get(&ctx->msg_cache);
+-		spin_unlock(&ctx->msg_lock);
+-		if (req)
+-			return req;
+-	}
+-	return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+-}
+-
+ static int io_msg_data_remote(struct io_kiocb *req)
+ {
+ 	struct io_ring_ctx *target_ctx = req->file->private_data;
+@@ -124,7 +104,7 @@ static int io_msg_data_remote(struct io_kiocb *req)
+ 	struct io_kiocb *target;
+ 	u32 flags = 0;
+ 
+-	target = io_msg_get_kiocb(req->ctx);
++	target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ 	if (unlikely(!target))
+ 		return -ENOMEM;
+ 
+diff --git a/io_uring/notif.c b/io_uring/notif.c
+index 28859ae3ee6eb9..d4cf5a1328e639 100644
+--- a/io_uring/notif.c
++++ b/io_uring/notif.c
+@@ -85,7 +85,7 @@ static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg)
+ 		return -EEXIST;
+ 
+ 	prev_nd = container_of(prev_uarg, struct io_notif_data, uarg);
+-	prev_notif = cmd_to_io_kiocb(nd);
++	prev_notif = cmd_to_io_kiocb(prev_nd);
+ 
+ 	/* make sure all noifications can be finished in the same task_work */
+ 	if (unlikely(notif->ctx != prev_notif->ctx ||
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 17dea8aa09c9b3..bfdb537572f7ff 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -265,8 +265,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
+ {
+ 	int v;
+ 
+-	/* req->task == current here, checking PF_EXITING is safe */
+-	if (unlikely(req->task->flags & PF_EXITING))
++	if (unlikely(io_should_terminate_tw(req->ctx)))
+ 		return -ECANCELED;
+ 
+ 	do {
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index 21c4bfea79f1c9..b215b2fbddd01d 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -303,7 +303,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t
+ 	int ret = -ENOENT;
+ 
+ 	if (prev) {
+-		if (!(req->task->flags & PF_EXITING)) {
++		if (!io_should_terminate_tw(req->ctx)) {
+ 			struct io_cancel_data cd = {
+ 				.ctx		= req->ctx,
+ 				.data		= prev->cqe.user_data,
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index b2ce4b56100271..f927844c8ada79 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -116,9 +116,13 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
+ static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
+ {
+ 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
++	unsigned int flags = IO_URING_F_COMPLETE_DEFER;
++
++	if (io_should_terminate_tw(req->ctx))
++		flags |= IO_URING_F_TASK_DEAD;
+ 
+ 	/* task_work executor checks the deffered list completion */
+-	ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER);
++	ioucmd->task_work_cb(ioucmd, flags);
+ }
+ 
+ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 62933468aaf46c..5fc2801ee921cc 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -123,8 +123,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+  * of concurrent destructions.  Use a separate workqueue so that cgroup
+  * destruction work items don't end up filling up max_active of system_wq
+  * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ *    which can never complete as it's behind in the same queue and
++ *    workqueue's max_active is 1.
+  */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+ 
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5541,7 +5564,7 @@ static void css_release_work_fn(struct work_struct *work)
+ 	cgroup_unlock();
+ 
+ 	INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+-	queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++	queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+ 
+ static void css_release(struct percpu_ref *ref)
+@@ -5550,7 +5573,7 @@ static void css_release(struct percpu_ref *ref)
+ 		container_of(ref, struct cgroup_subsys_state, refcnt);
+ 
+ 	INIT_WORK(&css->destroy_work, css_release_work_fn);
+-	queue_work(cgroup_destroy_wq, &css->destroy_work);
++	queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+ 
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5685,7 +5708,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ err_free_css:
+ 	list_del_rcu(&css->rstat_css_node);
+ 	INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+-	queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++	queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -5918,7 +5941,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+ 
+ 	if (atomic_dec_and_test(&css->online_cnt)) {
+ 		INIT_WORK(&css->destroy_work, css_killed_work_fn);
+-		queue_work(cgroup_destroy_wq, &css->destroy_work);
++		queue_work(cgroup_offline_wq, &css->destroy_work);
+ 	}
+ }
+ 
+@@ -6296,8 +6319,14 @@ static int __init cgroup_wq_init(void)
+ 	 * We would prefer to do this in cgroup_init() above, but that
+ 	 * is called before init_workqueues(): so leave this until after.
+ 	 */
+-	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+-	BUG_ON(!cgroup_destroy_wq);
++	cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++	BUG_ON(!cgroup_offline_wq);
++
++	cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++	BUG_ON(!cgroup_release_wq);
++
++	cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++	BUG_ON(!cgroup_free_wq);
+ 	return 0;
+ }
+ core_initcall(cgroup_wq_init);
+diff --git a/mm/gup.c b/mm/gup.c
+index e323843cc5dd80..e9be7c49542a0f 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2323,6 +2323,31 @@ static void pofs_unpin(struct pages_or_folios *pofs)
+ 		unpin_user_pages(pofs->pages, pofs->nr_entries);
+ }
+ 
++static struct folio *pofs_next_folio(struct folio *folio,
++		struct pages_or_folios *pofs, long *index_ptr)
++{
++	long i = *index_ptr + 1;
++
++	if (!pofs->has_folios && folio_test_large(folio)) {
++		const unsigned long start_pfn = folio_pfn(folio);
++		const unsigned long end_pfn = start_pfn + folio_nr_pages(folio);
++
++		for (; i < pofs->nr_entries; i++) {
++			unsigned long pfn = page_to_pfn(pofs->pages[i]);
++
++			/* Is this page part of this folio? */
++			if (pfn < start_pfn || pfn >= end_pfn)
++				break;
++		}
++	}
++
++	if (unlikely(i == pofs->nr_entries))
++		return NULL;
++	*index_ptr = i;
++
++	return pofs_get_folio(pofs, i);
++}
++
+ /*
+  * Returns the number of collected folios. Return value is always >= 0.
+  */
+@@ -2330,16 +2355,13 @@ static unsigned long collect_longterm_unpinnable_folios(
+ 		struct list_head *movable_folio_list,
+ 		struct pages_or_folios *pofs)
+ {
+-	unsigned long i, collected = 0;
+-	struct folio *prev_folio = NULL;
++	unsigned long collected = 0;
+ 	bool drain_allow = true;
++	struct folio *folio;
++	long i = 0;
+ 
+-	for (i = 0; i < pofs->nr_entries; i++) {
+-		struct folio *folio = pofs_get_folio(pofs, i);
+-
+-		if (folio == prev_folio)
+-			continue;
+-		prev_folio = folio;
++	for (folio = pofs_get_folio(pofs, i); folio;
++	     folio = pofs_next_folio(folio, pofs, &i)) {
+ 
+ 		if (folio_is_longterm_pinnable(folio))
+ 			continue;
+@@ -2354,7 +2376,8 @@ static unsigned long collect_longterm_unpinnable_folios(
+ 			continue;
+ 		}
+ 
+-		if (!folio_test_lru(folio) && drain_allow) {
++		if (drain_allow && folio_ref_count(folio) !=
++				   folio_expected_ref_count(folio) + 1) {
+ 			lru_add_drain_all();
+ 			drain_allow = false;
+ 		}
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 25e7438af968a4..8619aa884eaa87 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -453,20 +453,6 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
+ }
+ #endif
+ 
+-static int folio_expected_refs(struct address_space *mapping,
+-		struct folio *folio)
+-{
+-	int refs = 1;
+-	if (!mapping)
+-		return refs;
+-
+-	refs += folio_nr_pages(folio);
+-	if (folio_test_private(folio))
+-		refs++;
+-
+-	return refs;
+-}
+-
+ /*
+  * Replace the folio in the mapping.
+  *
+@@ -609,7 +595,7 @@ static int __folio_migrate_mapping(struct address_space *mapping,
+ int folio_migrate_mapping(struct address_space *mapping,
+ 		struct folio *newfolio, struct folio *folio, int extra_count)
+ {
+-	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
++	int expected_count = folio_expected_ref_count(folio) + extra_count + 1;
+ 
+ 	if (folio_ref_count(folio) != expected_count)
+ 		return -EAGAIN;
+@@ -626,7 +612,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
+ 				   struct folio *dst, struct folio *src)
+ {
+ 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
+-	int rc, expected_count = folio_expected_refs(mapping, src);
++	int rc, expected_count = folio_expected_ref_count(src) + 1;
+ 
+ 	if (folio_ref_count(src) != expected_count)
+ 		return -EAGAIN;
+@@ -756,7 +742,7 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
+ 			   struct folio *src, void *src_private,
+ 			   enum migrate_mode mode)
+ {
+-	int rc, expected_count = folio_expected_refs(mapping, src);
++	int rc, expected_count = folio_expected_ref_count(src) + 1;
+ 
+ 	/* Check whether src does not have extra refs before we do more work */
+ 	if (folio_ref_count(src) != expected_count)
+@@ -844,7 +830,7 @@ static int __buffer_migrate_folio(struct address_space *mapping,
+ 		return migrate_folio(mapping, dst, src, mode);
+ 
+ 	/* Check whether page does not have extra refs before we do more work */
+-	expected_count = folio_expected_refs(mapping, src);
++	expected_count = folio_expected_ref_count(src) + 1;
+ 	if (folio_ref_count(src) != expected_count)
+ 		return -EAGAIN;
+ 
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index e3c1e2e1560d75..0ceed77af0fbdf 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4352,7 +4352,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
+ 	}
+ 
+ 	/* ineligible */
+-	if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
++	if (zone > sc->reclaim_idx) {
+ 		gen = folio_inc_gen(lruvec, folio, false);
+ 		list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ 		return true;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 156da81bce068e..988992ff898b3d 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3286,6 +3286,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	int old_state = sk->sk_state;
++	struct request_sock *req;
+ 	u32 seq;
+ 
+ 	if (old_state != TCP_CLOSE)
+@@ -3400,6 +3401,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 
+ 
+ 	/* Clean up fastopen related fields */
++	req = rcu_dereference_protected(tp->fastopen_rsk,
++					lockdep_sock_is_held(sk));
++	if (req)
++		reqsk_fastopen_remove(sk, req, false);
+ 	tcp_free_fastopen_req(tp);
+ 	inet_clear_bit(DEFER_CONNECT, sk);
+ 	tp->fastopen_client_fail = 0;
+diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
+index bbb8d5f0eae7d3..3338b6cc85c487 100644
+--- a/net/ipv4/tcp_ao.c
++++ b/net/ipv4/tcp_ao.c
+@@ -1178,7 +1178,9 @@ void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
+ 	if (!ao)
+ 		return;
+ 
+-	WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
++	/* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
++	if (skb)
++		WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
+ 	ao->rcv_sne = 0;
+ 
+ 	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index d1c10f5f951604..69de9fdd779f28 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1388,7 +1388,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ 			    struct ieee80211_sub_if_data *sdata,
+ 			    struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+-	u32 ret = -EOPNOTSUPP;
++	int ret = -EOPNOTSUPP;
+ 
+ 	might_sleep();
+ 	lockdep_assert_wiphy(local->hw.wiphy);
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index caedc939eea19e..c745de0aae7762 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1120,7 +1120,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 	int result, i;
+ 	enum nl80211_band band;
+ 	int channels, max_bitrates;
+-	bool supp_ht, supp_vht, supp_he, supp_eht;
++	bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g;
+ 	struct cfg80211_chan_def dflt_chandef = {};
+ 
+ 	if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
+@@ -1236,6 +1236,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 	supp_vht = false;
+ 	supp_he = false;
+ 	supp_eht = false;
++	supp_s1g = false;
+ 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ 		const struct ieee80211_sband_iftype_data *iftd;
+ 		struct ieee80211_supported_band *sband;
+@@ -1283,6 +1284,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 			max_bitrates = sband->n_bitrates;
+ 		supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ 		supp_vht = supp_vht || sband->vht_cap.vht_supported;
++		supp_s1g = supp_s1g || sband->s1g_cap.s1g;
+ 
+ 		for_each_sband_iftype_data(sband, i, iftd) {
+ 			u8 he_40_mhz_cap;
+@@ -1411,6 +1413,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 		local->scan_ies_len +=
+ 			2 + sizeof(struct ieee80211_vht_cap);
+ 
++	if (supp_s1g)
++		local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap);
++
+ 	/*
+ 	 * HE cap element is variable in size - set len to allow max size */
+ 	if (supp_he) {
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 7d4718a57bdccd..479a3bfa87aa2e 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -985,13 +985,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ 		return false;
+ 	}
+ 
+-	if (mp_opt->deny_join_id0)
+-		WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+-
+ 	if (unlikely(!READ_ONCE(msk->pm.server_side)))
+ 		pr_warn_once("bogus mpc option on established client sk");
+ 
+ set_fully_established:
++	if (mp_opt->deny_join_id0)
++		WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
++
+ 	mptcp_data_lock((struct sock *)msk);
+ 	__mptcp_subflow_fully_established(msk, subflow, mp_opt);
+ 	mptcp_data_unlock((struct sock *)msk);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index b763729b85e0a6..463c2e7956d52e 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -2211,6 +2211,7 @@ static int mptcp_event_created(struct sk_buff *skb,
+ 			       const struct sock *ssk)
+ {
+ 	int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
++	u16 flags = 0;
+ 
+ 	if (err)
+ 		return err;
+@@ -2218,6 +2219,12 @@ static int mptcp_event_created(struct sk_buff *skb,
+ 	if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
+ 		return -EMSGSIZE;
+ 
++	if (READ_ONCE(msk->pm.remote_deny_join_id0))
++		flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0;
++
++	if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags))
++		return -EMSGSIZE;
++
+ 	return mptcp_event_add_subflow(skb, ssk);
+ }
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index d865d08a0c5eda..dde097502230d2 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -413,6 +413,20 @@ static void mptcp_close_wake_up(struct sock *sk)
+ 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ }
+ 
++static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
++{
++	struct mptcp_subflow_context *subflow;
++
++	mptcp_for_each_subflow(msk, subflow) {
++		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++		bool slow;
++
++		slow = lock_sock_fast(ssk);
++		tcp_shutdown(ssk, SEND_SHUTDOWN);
++		unlock_sock_fast(ssk, slow);
++	}
++}
++
+ /* called under the msk socket lock */
+ static bool mptcp_pending_data_fin_ack(struct sock *sk)
+ {
+@@ -437,6 +451,7 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
+ 			break;
+ 		case TCP_CLOSING:
+ 		case TCP_LAST_ACK:
++			mptcp_shutdown_subflows(msk);
+ 			mptcp_set_state(sk, TCP_CLOSE);
+ 			break;
+ 		}
+@@ -605,6 +620,7 @@ static bool mptcp_check_data_fin(struct sock *sk)
+ 			mptcp_set_state(sk, TCP_CLOSING);
+ 			break;
+ 		case TCP_FIN_WAIT2:
++			mptcp_shutdown_subflows(msk);
+ 			mptcp_set_state(sk, TCP_CLOSE);
+ 			break;
+ 		default:
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index a05f201d194c52..17d1a9d8b0e98b 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -888,6 +888,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 
+ 			ctx->subflow_id = 1;
+ 			owner = mptcp_sk(ctx->conn);
++
++			if (mp_opt.deny_join_id0)
++				WRITE_ONCE(owner->pm.remote_deny_join_id0, true);
++
+ 			mptcp_pm_new_connection(owner, child, 1);
+ 
+ 			/* with OoO packets we can reach here without ingress
+diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
+index 28c1b00221780f..bd861191157b54 100644
+--- a/net/rds/ib_frmr.c
++++ b/net/rds/ib_frmr.c
+@@ -133,12 +133,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
+ 
+ 	ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
+ 				&off, PAGE_SIZE);
+-	if (unlikely(ret != ibmr->sg_dma_len))
+-		return ret < 0 ? ret : -EINVAL;
++	if (unlikely(ret != ibmr->sg_dma_len)) {
++		ret = ret < 0 ? ret : -EINVAL;
++		goto out_inc;
++	}
+ 
+-	if (cmpxchg(&frmr->fr_state,
+-		    FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
+-		return -EBUSY;
++	if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) {
++		ret = -EBUSY;
++		goto out_inc;
++	}
+ 
+ 	atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
+ 
+@@ -166,11 +169,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
+ 		/* Failure here can be because of -ENOMEM as well */
+ 		rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
+ 
+-		atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ 		if (printk_ratelimit())
+ 			pr_warn("RDS/IB: %s returned error(%d)\n",
+ 				__func__, ret);
+-		goto out;
++		goto out_inc;
+ 	}
+ 
+ 	/* Wait for the registration to complete in order to prevent an invalid
+@@ -179,8 +181,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
+ 	 */
+ 	wait_event(frmr->fr_reg_done, !frmr->fr_reg);
+ 
+-out:
++	return ret;
+ 
++out_inc:
++	atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ 	return ret;
+ }
+ 
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index a8e21060112ffd..2b27d4fe9d3632 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -94,10 +94,10 @@ static const struct dmi_system_id rfkill_gpio_deny_table[] = {
+ static int rfkill_gpio_probe(struct platform_device *pdev)
+ {
+ 	struct rfkill_gpio_data *rfkill;
+-	struct gpio_desc *gpio;
++	const char *type_name = NULL;
+ 	const char *name_property;
+ 	const char *type_property;
+-	const char *type_name;
++	struct gpio_desc *gpio;
+ 	int ret;
+ 
+ 	if (dmi_check_system(rfkill_gpio_deny_table))
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index e1eaf12b374264..fca0c0e1700478 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -141,6 +141,7 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx);
+ 
+ int wait_on_pending_writer(struct sock *sk, long *timeo);
+ void tls_err_abort(struct sock *sk, int err);
++void tls_strp_abort_strp(struct tls_strparser *strp, int err);
+ 
+ int init_prot_info(struct tls_prot_info *prot,
+ 		   const struct tls_crypto_info *crypto_info,
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index d71643b494a1ae..98e12f0ff57e51 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -13,7 +13,7 @@
+ 
+ static struct workqueue_struct *tls_strp_wq;
+ 
+-static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
++void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+ {
+ 	if (strp->stopped)
+ 		return;
+@@ -211,11 +211,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+ 				struct sk_buff *in_skb, unsigned int offset,
+ 				size_t in_len)
+ {
++	unsigned int nfrag = skb->len / PAGE_SIZE;
+ 	size_t len, chunk;
+ 	skb_frag_t *frag;
+ 	int sz;
+ 
+-	frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
++	if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
++		DEBUG_NET_WARN_ON_ONCE(1);
++		return -EMSGSIZE;
++	}
++
++	frag = &skb_shinfo(skb)->frags[nfrag];
+ 
+ 	len = in_len;
+ 	/* First make sure we got the header */
+@@ -520,10 +526,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ 	tls_strp_load_anchor_with_queue(strp, inq);
+ 	if (!strp->stm.full_len) {
+ 		sz = tls_rx_msg_size(strp, strp->anchor);
+-		if (sz < 0) {
+-			tls_strp_abort_strp(strp, sz);
++		if (sz < 0)
+ 			return sz;
+-		}
+ 
+ 		strp->stm.full_len = sz;
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index ee92ce3255f936..f46550b96061ea 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2440,8 +2440,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
+ 	return data_len + TLS_HEADER_SIZE;
+ 
+ read_failure:
+-	tls_err_abort(strp->sk, ret);
+-
++	tls_strp_abort_strp(strp, ret);
+ 	return ret;
+ }
+ 
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index 88d1f4b56e4be4..a220ac0c8eb831 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -111,7 +111,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ 		events = 0;
+ 	spin_unlock_irq(&motu->lock);
+ 
+-	return events | EPOLLOUT;
++	return events;
+ }
+ 
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index eb0e404c178411..5f061d2d9fc969 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10666,6 +10666,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x89a0, "HP Laptop 15-dw4xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
+index 8a532f7d750c8b..808a4d4b6f80b3 100644
+--- a/sound/soc/codecs/wm8940.c
++++ b/sound/soc/codecs/wm8940.c
+@@ -220,7 +220,7 @@ static const struct snd_kcontrol_new wm8940_snd_controls[] = {
+ 	SOC_SINGLE_TLV("Digital Capture Volume", WM8940_ADCVOL,
+ 		       0, 255, 0, wm8940_adc_tlv),
+ 	SOC_ENUM("Mic Bias Level", wm8940_mic_bias_level_enum),
+-	SOC_SINGLE_TLV("Capture Boost Volue", WM8940_ADCBOOST,
++	SOC_SINGLE_TLV("Capture Boost Volume", WM8940_ADCBOOST,
+ 		       8, 1, 0, wm8940_capture_boost_vol_tlv),
+ 	SOC_SINGLE_TLV("Speaker Playback Volume", WM8940_SPKVOL,
+ 		       0, 63, 0, wm8940_spk_vol_tlv),
+@@ -693,7 +693,12 @@ static int wm8940_update_clocks(struct snd_soc_dai *dai)
+ 	f = wm8940_get_mclkdiv(priv->mclk, fs256, &mclkdiv);
+ 	if (f != priv->mclk) {
+ 		/* The PLL performs best around 90MHz */
+-		fpll = wm8940_get_mclkdiv(22500000, fs256, &mclkdiv);
++		if (fs256 % 8000)
++			f = 22579200;
++		else
++			f = 24576000;
++
++		fpll = wm8940_get_mclkdiv(f, fs256, &mclkdiv);
+ 	}
+ 
+ 	wm8940_set_dai_pll(dai, 0, 0, priv->mclk, fpll);
+diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
+index 0ee3655cad01fb..c0a8fc867301c2 100644
+--- a/sound/soc/codecs/wm8974.c
++++ b/sound/soc/codecs/wm8974.c
+@@ -419,10 +419,14 @@ static int wm8974_update_clocks(struct snd_soc_dai *dai)
+ 	fs256 = 256 * priv->fs;
+ 
+ 	f = wm8974_get_mclkdiv(priv->mclk, fs256, &mclkdiv);
+-
+ 	if (f != priv->mclk) {
+ 		/* The PLL performs best around 90MHz */
+-		fpll = wm8974_get_mclkdiv(22500000, fs256, &mclkdiv);
++		if (fs256 % 8000)
++			f = 22579200;
++		else
++			f = 24576000;
++
++		fpll = wm8974_get_mclkdiv(f, fs256, &mclkdiv);
+ 	}
+ 
+ 	wm8974_set_dai_pll(dai, 0, 0, priv->mclk, fpll);
+diff --git a/sound/soc/intel/catpt/pcm.c b/sound/soc/intel/catpt/pcm.c
+index 81a2f0339e0552..ff1fa01acb85b2 100644
+--- a/sound/soc/intel/catpt/pcm.c
++++ b/sound/soc/intel/catpt/pcm.c
+@@ -568,8 +568,9 @@ static const struct snd_pcm_hardware catpt_pcm_hardware = {
+ 				  SNDRV_PCM_INFO_RESUME |
+ 				  SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+ 	.formats		= SNDRV_PCM_FMTBIT_S16_LE |
+-				  SNDRV_PCM_FMTBIT_S24_LE |
+ 				  SNDRV_PCM_FMTBIT_S32_LE,
++	.subformats		= SNDRV_PCM_SUBFMTBIT_MSBITS_24 |
++				  SNDRV_PCM_SUBFMTBIT_MSBITS_MAX,
+ 	.period_bytes_min	= PAGE_SIZE,
+ 	.period_bytes_max	= CATPT_BUFFER_MAX_SIZE / CATPT_PCM_PERIODS_MIN,
+ 	.periods_min		= CATPT_PCM_PERIODS_MIN,
+@@ -699,14 +700,18 @@ static struct snd_soc_dai_driver dai_drivers[] = {
+ 		.channels_min = 2,
+ 		.channels_max = 2,
+ 		.rates = SNDRV_PCM_RATE_48000,
+-		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
++		.subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 |
++			      SNDRV_PCM_SUBFMTBIT_MSBITS_MAX,
+ 	},
+ 	.capture = {
+ 		.stream_name = "Analog Capture",
+ 		.channels_min = 2,
+ 		.channels_max = 4,
+ 		.rates = SNDRV_PCM_RATE_48000,
+-		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
++		.subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 |
++			      SNDRV_PCM_SUBFMTBIT_MSBITS_MAX,
+ 	},
+ },
+ {
+@@ -718,7 +723,9 @@ static struct snd_soc_dai_driver dai_drivers[] = {
+ 		.channels_min = 2,
+ 		.channels_max = 2,
+ 		.rates = SNDRV_PCM_RATE_8000_192000,
+-		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
++		.subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 |
++			      SNDRV_PCM_SUBFMTBIT_MSBITS_MAX,
+ 	},
+ },
+ {
+@@ -730,7 +737,9 @@ static struct snd_soc_dai_driver dai_drivers[] = {
+ 		.channels_min = 2,
+ 		.channels_max = 2,
+ 		.rates = SNDRV_PCM_RATE_8000_192000,
+-		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
++		.subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 |
++			      SNDRV_PCM_SUBFMTBIT_MSBITS_MAX,
+ 	},
+ },
+ {
+@@ -742,7 +751,9 @@ static struct snd_soc_dai_driver dai_drivers[] = {
+ 		.channels_min = 2,
+ 		.channels_max = 2,
+ 		.rates = SNDRV_PCM_RATE_48000,
+-		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
++		.subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 |
++			      SNDRV_PCM_SUBFMTBIT_MSBITS_MAX,
+ 	},
+ },
+ {
+diff --git a/sound/soc/qcom/qdsp6/audioreach.c b/sound/soc/qcom/qdsp6/audioreach.c
+index 4ebaaf736fb98a..3f5eed5afce55e 100644
+--- a/sound/soc/qcom/qdsp6/audioreach.c
++++ b/sound/soc/qcom/qdsp6/audioreach.c
+@@ -971,6 +971,7 @@ static int audioreach_i2s_set_media_format(struct q6apm_graph *graph,
+ 	param_data->param_id = PARAM_ID_I2S_INTF_CFG;
+ 	param_data->param_size = ic_sz - APM_MODULE_PARAM_DATA_SIZE;
+ 
++	intf_cfg->cfg.lpaif_type = module->hw_interface_type;
+ 	intf_cfg->cfg.intf_idx = module->hw_interface_idx;
+ 	intf_cfg->cfg.sd_line_idx = module->sd_line_idx;
+ 
+diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+index 9c98a35ad09945..b46aff1110e143 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -213,8 +213,10 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+ 
+ 	return 0;
+ err:
+-	q6apm_graph_close(dai_data->graph[dai->id]);
+-	dai_data->graph[dai->id] = NULL;
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++		q6apm_graph_close(dai_data->graph[dai->id]);
++		dai_data->graph[dai->id] = NULL;
++	}
+ 	return rc;
+ }
+ 
+@@ -260,6 +262,7 @@ static const struct snd_soc_dai_ops q6i2s_ops = {
+ 	.shutdown	= q6apm_lpass_dai_shutdown,
+ 	.set_channel_map  = q6dma_set_channel_map,
+ 	.hw_params        = q6dma_hw_params,
++	.set_fmt	= q6i2s_set_fmt,
+ };
+ 
+ static const struct snd_soc_dai_ops q6hdmi_ops = {
+diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
+index 3ac63ce67ab1ce..24f3cc7676142b 100644
+--- a/sound/soc/sof/intel/hda-stream.c
++++ b/sound/soc/sof/intel/hda-stream.c
+@@ -864,7 +864,7 @@ int hda_dsp_stream_init(struct snd_sof_dev *sdev)
+ 
+ 	if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
+ 		dev_err(sdev->dev, "error: too many capture streams %d\n",
+-			num_playback);
++			num_capture);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/tools/arch/loongarch/include/asm/inst.h b/tools/arch/loongarch/include/asm/inst.h
+index c25b5853181dba..d68fad63c8b732 100644
+--- a/tools/arch/loongarch/include/asm/inst.h
++++ b/tools/arch/loongarch/include/asm/inst.h
+@@ -51,6 +51,10 @@ enum reg2i16_op {
+ 	bgeu_op		= 0x1b,
+ };
+ 
++enum reg3_op {
++	amswapw_op	= 0x70c0,
++};
++
+ struct reg0i15_format {
+ 	unsigned int immediate : 15;
+ 	unsigned int opcode : 17;
+@@ -96,6 +100,13 @@ struct reg2i16_format {
+ 	unsigned int opcode : 6;
+ };
+ 
++struct reg3_format {
++	unsigned int rd : 5;
++	unsigned int rj : 5;
++	unsigned int rk : 5;
++	unsigned int opcode : 17;
++};
++
+ union loongarch_instruction {
+ 	unsigned int word;
+ 	struct reg0i15_format	reg0i15_format;
+@@ -105,6 +116,7 @@ union loongarch_instruction {
+ 	struct reg2i12_format	reg2i12_format;
+ 	struct reg2i14_format	reg2i14_format;
+ 	struct reg2i16_format	reg2i16_format;
++	struct reg3_format	reg3_format;
+ };
+ 
+ #define LOONGARCH_INSN_SIZE	sizeof(union loongarch_instruction)
+diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c
+index 69b66994f2a155..5687c4996517c8 100644
+--- a/tools/objtool/arch/loongarch/decode.c
++++ b/tools/objtool/arch/loongarch/decode.c
+@@ -281,6 +281,25 @@ static bool decode_insn_reg2i16_fomat(union loongarch_instruction inst,
+ 	return true;
+ }
+ 
++static bool decode_insn_reg3_fomat(union loongarch_instruction inst,
++				   struct instruction *insn)
++{
++	switch (inst.reg3_format.opcode) {
++	case amswapw_op:
++		if (inst.reg3_format.rd == LOONGARCH_GPR_ZERO &&
++		    inst.reg3_format.rk == LOONGARCH_GPR_RA &&
++		    inst.reg3_format.rj == LOONGARCH_GPR_ZERO) {
++			/* amswap.w $zero, $ra, $zero */
++			insn->type = INSN_BUG;
++		}
++		break;
++	default:
++		return false;
++	}
++
++	return true;
++}
++
+ int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
+ 			    unsigned long offset, unsigned int maxlen,
+ 			    struct instruction *insn)
+@@ -312,11 +331,19 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
+ 		return 0;
+ 	if (decode_insn_reg2i16_fomat(inst, insn))
+ 		return 0;
++	if (decode_insn_reg3_fomat(inst, insn))
++		return 0;
+ 
+-	if (inst.word == 0)
++	if (inst.word == 0) {
++		/* andi $zero, $zero, 0x0 */
+ 		insn->type = INSN_NOP;
+-	else if (inst.reg0i15_format.opcode == break_op) {
+-		/* break */
++	} else if (inst.reg0i15_format.opcode == break_op &&
++		   inst.reg0i15_format.immediate == 0x0) {
++		/* break 0x0 */
++		insn->type = INSN_TRAP;
++	} else if (inst.reg0i15_format.opcode == break_op &&
++		   inst.reg0i15_format.immediate == 0x1) {
++		/* break 0x1 */
+ 		insn->type = INSN_BUG;
+ 	} else if (inst.reg2_format.opcode == ertn_op) {
+ 		/* ertn */
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index c83a8b47bbdfa5..fc9eff0e89e226 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1079,6 +1079,7 @@ int main_loop_s(int listensock)
+ 	struct pollfd polls;
+ 	socklen_t salen;
+ 	int remotesock;
++	int err = 0;
+ 	int fd = 0;
+ 
+ again:
+@@ -1111,7 +1112,7 @@ int main_loop_s(int listensock)
+ 		SOCK_TEST_TCPULP(remotesock, 0);
+ 
+ 		memset(&winfo, 0, sizeof(winfo));
+-		copyfd_io(fd, remotesock, 1, true, &winfo);
++		err = copyfd_io(fd, remotesock, 1, true, &winfo);
+ 	} else {
+ 		perror("accept");
+ 		return 1;
+@@ -1120,10 +1121,10 @@ int main_loop_s(int listensock)
+ 	if (cfg_input)
+ 		close(fd);
+ 
+-	if (--cfg_repeat > 0)
++	if (!err && --cfg_repeat > 0)
+ 		goto again;
+ 
+-	return 0;
++	return err;
+ }
+ 
+ static void init_rng(void)
+@@ -1233,7 +1234,7 @@ void xdisconnect(int fd)
+ 	else
+ 		xerror("bad family");
+ 
+-	strcpy(cmd, "ss -M | grep -q ");
++	strcpy(cmd, "ss -Mnt | grep -q ");
+ 	cmdlen = strlen(cmd);
+ 	if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen],
+ 		       sizeof(cmd) - cmdlen))
+@@ -1243,7 +1244,7 @@ void xdisconnect(int fd)
+ 
+ 	/*
+ 	 * wait until the pending data is completely flushed and all
+-	 * the MPTCP sockets reached the closed status.
++	 * the sockets reached the closed status.
+ 	 * disconnect will bypass/ignore/drop any pending data.
+ 	 */
+ 	for (i = 0; ; i += msec_sleep) {
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+index 926b0be87c9905..1dc2bd6ee4a50e 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+@@ -658,22 +658,26 @@ static void process_one_client(int fd, int pipefd)
+ 
+ 	do_getsockopts(&s, fd, ret, ret2);
+ 	if (s.mptcpi_rcv_delta != (uint64_t)ret + 1)
+-		xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret);
++		xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64,
++		       s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1));
+ 
+ 	/* be nice when running on top of older kernel */
+ 	if (s.pkt_stats_avail) {
+ 		if (s.last_sample.mptcpi_bytes_sent != ret2)
+-			xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64,
++			xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64
++			       ", diff %" PRId64,
+ 			       s.last_sample.mptcpi_bytes_sent, ret2,
+ 			       s.last_sample.mptcpi_bytes_sent - ret2);
+ 		if (s.last_sample.mptcpi_bytes_received != ret)
+-			xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64,
++			xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64
++			       ", diff %" PRId64,
+ 			       s.last_sample.mptcpi_bytes_received, ret,
+ 			       s.last_sample.mptcpi_bytes_received - ret);
+ 		if (s.last_sample.mptcpi_bytes_acked != ret)
+-			xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64,
+-			       s.last_sample.mptcpi_bytes_acked, ret2,
+-			       s.last_sample.mptcpi_bytes_acked - ret2);
++			xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64
++			       ", diff %" PRId64,
++			       s.last_sample.mptcpi_bytes_acked, ret,
++			       s.last_sample.mptcpi_bytes_acked - ret);
+ 	}
+ 
+ 	close(fd);
+diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+index 994a556f46c151..93fea3442216c8 100644
+--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+@@ -188,6 +188,13 @@ static int capture_events(int fd, int event_group)
+ 					fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
+ 				else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
+ 					fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
++				else if (attrs->rta_type == MPTCP_ATTR_FLAGS) {
++					__u16 flags = *(__u16 *)RTA_DATA(attrs);
++
++					/* only print when present, easier */
++					if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0)
++						fprintf(stderr, ",deny_join_id0:1");
++				}
+ 
+ 				attrs = RTA_NEXT(attrs, msg_len);
+ 			}
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index 3651f73451cf8b..cc682bf675b2b6 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -173,6 +173,9 @@ make_connection()
+ 		is_v6="v4"
+ 	fi
+ 
++	# set this on the client side only: will not affect the rest
++	ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0
++
+ 	:>"$client_evts"
+ 	:>"$server_evts"
+ 
+@@ -195,23 +198,28 @@ make_connection()
+ 	local client_token
+ 	local client_port
+ 	local client_serverside
++	local client_nojoin
+ 	local server_token
+ 	local server_serverside
++	local server_nojoin
+ 
+ 	client_token=$(mptcp_lib_evts_get_info token "$client_evts")
+ 	client_port=$(mptcp_lib_evts_get_info sport "$client_evts")
+ 	client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts")
++	client_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$client_evts")
+ 	server_token=$(mptcp_lib_evts_get_info token "$server_evts")
+ 	server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts")
++	server_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$server_evts")
+ 
+ 	print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1"
+-	if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
+-		   [ "$server_serverside" = 1 ]
++	if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] &&
++	   [ "${client_serverside}" = 0 ] && [ "${server_serverside}" = 1 ] &&
++	   [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ]
+ 	then
+ 		test_pass
+ 		print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
+ 	else
+-		test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
++		test_fail "Expected tokens (c:${client_token} - s:${server_token}), server (c:${client_serverside} - s:${server_serverside}), nojoin (c:${client_nojoin} - s:${server_nojoin})"
+ 		mptcp_lib_result_print_all_tap
+ 		exit ${KSFT_FAIL}
+ 	fi


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-20  6:14 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-20  6:14 UTC (permalink / raw
  To: gentoo-commits

commit:     757e310886c9069a06c656e1462bec40104764c8
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 20 06:13:44 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Sep 20 06:13:44 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=757e3108

Remove 1801_proc_fix_type_confusion_in_pde_set_flags.patch

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README                                        |  4 ---
 ..._proc_fix_type_confusion_in_pde_set_flags.patch | 40 ----------------------
 2 files changed, 44 deletions(-)

diff --git a/0000_README b/0000_README
index d95bc841..737863c6 100644
--- a/0000_README
+++ b/0000_README
@@ -255,10 +255,6 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
-Patch:  1801_proc_fix_type_confusion_in_pde_set_flags.patch
-From:   https://lore.kernel.org/linux-fsdevel/20250904135715.3972782-1-wangzijie1@honor.com/
-Desc:   proc: fix type confusion in pde_set_flags()
-
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1801_proc_fix_type_confusion_in_pde_set_flags.patch b/1801_proc_fix_type_confusion_in_pde_set_flags.patch
deleted file mode 100644
index 4777dbdc..00000000
--- a/1801_proc_fix_type_confusion_in_pde_set_flags.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-Subject: [PATCH] proc: fix type confusion in pde_set_flags()
-
-Commit 2ce3d282bd50 ("proc: fix missing pde_set_flags() for net proc files")
-missed a key part in the definition of proc_dir_entry:
-
-union {
-	const struct proc_ops *proc_ops;
-	const struct file_operations *proc_dir_ops;
-};
-
-So dereference of ->proc_ops assumes it is a proc_ops structure results in
-type confusion and make NULL check for 'proc_ops' not work for proc dir.
-
-Add !S_ISDIR(dp->mode) test before calling pde_set_flags() to fix it.
-
-Fixes: 2ce3d282bd50 ("proc: fix missing pde_set_flags() for net proc files")
-Reported-by: Brad Spengler <spender@grsecurity.net>
-Signed-off-by: wangzijie <wangzijie1@honor.com>
----
- fs/proc/generic.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/fs/proc/generic.c b/fs/proc/generic.c
-index bd0c099cf..176281112 100644
---- a/fs/proc/generic.c
-+++ b/fs/proc/generic.c
-@@ -393,7 +393,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
- 	if (proc_alloc_inum(&dp->low_ino))
- 		goto out_free_entry;
- 
--	pde_set_flags(dp);
-+	if (!S_ISDIR(dp->mode))
-+		pde_set_flags(dp);
- 
- 	write_lock(&proc_subdir_lock);
- 	dp->parent = dir;
--- 
-2.25.1
-
-


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-20  5:26 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-20  5:26 UTC (permalink / raw
  To: gentoo-commits

commit:     0dedd8172dae29400c4809cfa407918e5761e495
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 20 05:25:57 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Sep 20 05:25:57 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0dedd817

Linux patch 6.12.48

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1047_linux-6.12.48.patch | 6907 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6911 insertions(+)

diff --git a/0000_README b/0000_README
index faedd16f..d95bc841 100644
--- a/0000_README
+++ b/0000_README
@@ -231,6 +231,10 @@ Patch:  1046_linux-6.12.47.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.47
 
+Patch:  1047_linux-6.12.48.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.48
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1047_linux-6.12.48.patch b/1047_linux-6.12.48.patch
new file mode 100644
index 00000000..8ac710e3
--- /dev/null
+++ b/1047_linux-6.12.48.patch
@@ -0,0 +1,6907 @@
+diff --git a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
+index 89c462653e2d33..8cc848ae11cb73 100644
+--- a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
++++ b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
+@@ -41,7 +41,7 @@ properties:
+           - const: dma_intr2
+ 
+   clocks:
+-    minItems: 1
++    maxItems: 1
+ 
+   clock-names:
+     const: sw_baud
+diff --git a/Documentation/filesystems/nfs/localio.rst b/Documentation/filesystems/nfs/localio.rst
+index bd1967e2eab329..20fc901a08f4d3 100644
+--- a/Documentation/filesystems/nfs/localio.rst
++++ b/Documentation/filesystems/nfs/localio.rst
+@@ -306,6 +306,19 @@ is issuing IO to the underlying local filesystem that it is sharing with
+ the NFS server. See: fs/nfs/localio.c:nfs_local_doio() and
+ fs/nfs/localio.c:nfs_local_commit().
+ 
++With normal NFS that makes use of RPC to issue IO to the server, if an
++application uses O_DIRECT the NFS client will bypass the pagecache but
++the NFS server will not. Because the NFS server's use of buffered IO
++affords applications to be less precise with their alignment when
++issuing IO to the NFS client. LOCALIO can be configured to use O_DIRECT
++semantics by setting the 'localio_O_DIRECT_semantics' nfs module
++parameter to Y, e.g.:
++
++  echo Y > /sys/module/nfs/parameters/localio_O_DIRECT_semantics
++
++Once enabled, it will cause LOCALIO to use O_DIRECT semantics (this may
++cause IO to fail if applications do not properly align their IO).
++
+ Security
+ ========
+ 
+diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
+index dc190bf838fec6..7e295bad8b2923 100644
+--- a/Documentation/netlink/specs/mptcp_pm.yaml
++++ b/Documentation/netlink/specs/mptcp_pm.yaml
+@@ -22,65 +22,67 @@ definitions:
+       doc: unused event
+      -
+       name: created
+-      doc:
+-        token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
++      doc: >-
+         A new MPTCP connection has been created. It is the good time to
+         allocate memory and send ADD_ADDR if needed. Depending on the
+         traffic-patterns it can take a long time until the
+         MPTCP_EVENT_ESTABLISHED is sent.
++        Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
++        dport, server-side.
+      -
+       name: established
+-      doc:
+-        token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
++      doc: >-
+         A MPTCP connection is established (can start new subflows).
++        Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
++        dport, server-side.
+      -
+       name: closed
+-      doc:
+-        token
++      doc: >-
+         A MPTCP connection has stopped.
++        Attribute: token.
+      -
+       name: announced
+       value: 6
+-      doc:
+-        token, rem_id, family, daddr4 | daddr6 [, dport]
++      doc: >-
+         A new address has been announced by the peer.
++        Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
+      -
+       name: removed
+-      doc:
+-        token, rem_id
++      doc: >-
+         An address has been lost by the peer.
++        Attributes: token, rem_id.
+      -
+       name: sub-established
+       value: 10
+-      doc:
+-        token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
+-        dport, backup, if_idx [, error]
++      doc: >-
+         A new subflow has been established. 'error' should not be set.
++        Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
++        daddr6, sport, dport, backup, if-idx [, error].
+      -
+       name: sub-closed
+-      doc:
+-        token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
+-        dport, backup, if_idx [, error]
++      doc: >-
+         A subflow has been closed. An error (copy of sk_err) could be set if an
+         error has been detected for this subflow.
++        Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
++        daddr6, sport, dport, backup, if-idx [, error].
+      -
+       name: sub-priority
+       value: 13
+-      doc:
+-        token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
+-        dport, backup, if_idx [, error]
++      doc: >-
+         The priority of a subflow has changed. 'error' should not be set.
++        Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
++        daddr6, sport, dport, backup, if-idx [, error].
+      -
+       name: listener-created
+       value: 15
+-      doc:
+-        family, sport, saddr4 | saddr6
++      doc: >-
+         A new PM listener is created.
++        Attributes: family, sport, saddr4 | saddr6.
+      -
+       name: listener-closed
+-      doc:
+-        family, sport, saddr4 | saddr6
++      doc: >-
+         A PM listener is closed.
++        Attributes: family, sport, saddr4 | saddr6.
+ 
+ attribute-sets:
+   -
+@@ -253,8 +255,8 @@ attribute-sets:
+         name: timeout
+         type: u32
+       -
+-        name: if_idx
+-        type: u32
++        name: if-idx
++        type: s32
+       -
+         name: reset-reason
+         type: u32
+diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
+index 62519d38c58bad..58cc609e8669bf 100644
+--- a/Documentation/networking/can.rst
++++ b/Documentation/networking/can.rst
+@@ -742,7 +742,7 @@ The broadcast manager sends responses to user space in the same form:
+             struct timeval ival1, ival2;    /* count and subsequent interval */
+             canid_t can_id;                 /* unique can_id for task */
+             __u32 nframes;                  /* number of can_frames following */
+-            struct can_frame frames[0];
++            struct can_frame frames[];
+     };
+ 
+ The aligned payload 'frames' uses the same basic CAN frame structure defined
+diff --git a/Makefile b/Makefile
+index 2500f343c6c8a2..ede8c04ea112bc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 47
++SUBLEVEL = 48
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
+index aa103530a5c83a..6081327e55f5b6 100644
+--- a/arch/riscv/include/asm/compat.h
++++ b/arch/riscv/include/asm/compat.h
+@@ -9,7 +9,6 @@
+  */
+ #include <linux/types.h>
+ #include <linux/sched.h>
+-#include <linux/sched/task_stack.h>
+ #include <asm-generic/compat.h>
+ 
+ static inline int is_compat_task(void)
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index 6d6b057b562fda..b017db3344cb52 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -761,8 +761,6 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
+ 		break;
+ 
+ 	case PERF_TYPE_HARDWARE:
+-		if (is_sampling_event(event))	/* No sampling support */
+-			return -ENOENT;
+ 		ev = attr->config;
+ 		if (!attr->exclude_user && attr->exclude_kernel) {
+ 			/*
+@@ -860,6 +858,8 @@ static int cpumf_pmu_event_init(struct perf_event *event)
+ 	unsigned int type = event->attr.type;
+ 	int err = -ENOENT;
+ 
++	if (is_sampling_event(event))	/* No sampling support */
++		return err;
+ 	if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
+ 		err = __hw_perf_event_init(event, type);
+ 	else if (event->pmu->type == type)
+diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
+index 10725f5a6f0fd1..11200880a96c18 100644
+--- a/arch/s390/kernel/perf_pai_crypto.c
++++ b/arch/s390/kernel/perf_pai_crypto.c
+@@ -286,10 +286,10 @@ static int paicrypt_event_init(struct perf_event *event)
+ 	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
+ 	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
+ 		return -ENOENT;
+-	/* PAI crypto event must be in valid range */
++	/* PAI crypto event must be in valid range, try others if not */
+ 	if (a->config < PAI_CRYPTO_BASE ||
+ 	    a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
+-		return -EINVAL;
++		return -ENOENT;
+ 	/* Allow only CRYPTO_ALL for sampling */
+ 	if (a->sample_period && a->config != PAI_CRYPTO_BASE)
+ 		return -EINVAL;
+diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
+index a8f0bad99cf04f..28398e313b58d1 100644
+--- a/arch/s390/kernel/perf_pai_ext.c
++++ b/arch/s390/kernel/perf_pai_ext.c
+@@ -266,7 +266,7 @@ static int paiext_event_valid(struct perf_event *event)
+ 		event->hw.config_base = offsetof(struct paiext_cb, acc);
+ 		return 0;
+ 	}
+-	return -EINVAL;
++	return -ENOENT;
+ }
+ 
+ /* Might be called on different CPU than the one the event is intended for. */
+diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
+index 0fab130a8249ef..78d05a068af4f2 100644
+--- a/arch/x86/kernel/cpu/topology_amd.c
++++ b/arch/x86/kernel/cpu/topology_amd.c
+@@ -174,24 +174,27 @@ static void topoext_fixup(struct topo_scan *tscan)
+ 
+ static void parse_topology_amd(struct topo_scan *tscan)
+ {
+-	bool has_topoext = false;
+-
+ 	/*
+-	 * If the extended topology leaf 0x8000_001e is available
+-	 * try to get SMT, CORE, TILE, and DIE shifts from extended
++	 * Try to get SMT, CORE, TILE, and DIE shifts from extended
+ 	 * CPUID leaf 0x8000_0026 on supported processors first. If
+ 	 * extended CPUID leaf 0x8000_0026 is not supported, try to
+-	 * get SMT and CORE shift from leaf 0xb first, then try to
+-	 * get the CORE shift from leaf 0x8000_0008.
++	 * get SMT and CORE shift from leaf 0xb. If either leaf is
++	 * available, cpu_parse_topology_ext() will return true.
+ 	 */
+-	if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
+-		has_topoext = cpu_parse_topology_ext(tscan);
++	bool has_xtopology = cpu_parse_topology_ext(tscan);
+ 
+-	if (!has_topoext && !parse_8000_0008(tscan))
++	/*
++	 * If XTOPOLOGY leaves (0x26/0xb) are not available, try to
++	 * get the CORE shift from leaf 0x8000_0008 first.
++	 */
++	if (!has_xtopology && !parse_8000_0008(tscan))
+ 		return;
+ 
+-	/* Prefer leaf 0x8000001e if available */
+-	if (parse_8000_001e(tscan, has_topoext))
++	/*
++	 * Prefer leaf 0x8000001e if available to get the SMT shift and
++	 * the initial APIC ID if XTOPOLOGY leaves are not available.
++	 */
++	if (parse_8000_001e(tscan, has_xtopology))
+ 		return;
+ 
+ 	/* Try the NODEID MSR */
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index e2567c8f6bac5d..26449cd74c26d9 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -486,10 +486,18 @@ SECTIONS
+ }
+ 
+ /*
+- * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
++ * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
++ * this.  Let's assume that nobody will be running a COMPILE_TEST kernel and
++ * let's assert that fuller build coverage is more valuable than being able to
++ * run a COMPILE_TEST kernel.
++ */
++#ifndef CONFIG_COMPILE_TEST
++/*
++ * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
+  */
+ . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
+ 	   "kernel image bigger than KERNEL_IMAGE_SIZE");
++#endif
+ 
+ /* needed for Clang - see arch/x86/entry/entry.S */
+ PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
+diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
+index fee04fdb08220c..b46eb8a552d7be 100644
+--- a/drivers/dma-buf/Kconfig
++++ b/drivers/dma-buf/Kconfig
+@@ -36,7 +36,6 @@ config UDMABUF
+ 	depends on DMA_SHARED_BUFFER
+ 	depends on MEMFD_CREATE || COMPILE_TEST
+ 	depends on MMU
+-	select VMAP_PFN
+ 	help
+ 	  A driver to let userspace turn memfd regions into dma-bufs.
+ 	  Qemu can use this to create host dmabufs for guest framebuffers.
+diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
+index 959f690b12260d..0e127a9109e752 100644
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -74,29 +74,21 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
+ static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+ {
+ 	struct udmabuf *ubuf = buf->priv;
+-	unsigned long *pfns;
++	struct page **pages;
+ 	void *vaddr;
+ 	pgoff_t pg;
+ 
+ 	dma_resv_assert_held(buf->resv);
+ 
+-	/**
+-	 * HVO may free tail pages, so just use pfn to map each folio
+-	 * into vmalloc area.
+-	 */
+-	pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
+-	if (!pfns)
++	pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
++	if (!pages)
+ 		return -ENOMEM;
+ 
+-	for (pg = 0; pg < ubuf->pagecount; pg++) {
+-		unsigned long pfn = folio_pfn(ubuf->folios[pg]);
+-
+-		pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
+-		pfns[pg] = pfn;
+-	}
++	for (pg = 0; pg < ubuf->pagecount; pg++)
++		pages[pg] = &ubuf->folios[pg]->page;
+ 
+-	vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
+-	kvfree(pfns);
++	vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
++	kvfree(pages);
+ 	if (!vaddr)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
+index 4fb8508419dbd8..deadf135681b67 100644
+--- a/drivers/dma/dw/rzn1-dmamux.c
++++ b/drivers/dma/dw/rzn1-dmamux.c
+@@ -48,12 +48,16 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ 	u32 mask;
+ 	int ret;
+ 
+-	if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
+-		return ERR_PTR(-EINVAL);
++	if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) {
++		ret = -EINVAL;
++		goto put_device;
++	}
+ 
+ 	map = kzalloc(sizeof(*map), GFP_KERNEL);
+-	if (!map)
+-		return ERR_PTR(-ENOMEM);
++	if (!map) {
++		ret = -ENOMEM;
++		goto put_device;
++	}
+ 
+ 	chan = dma_spec->args[0];
+ 	map->req_idx = dma_spec->args[4];
+@@ -94,12 +98,15 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ 	if (ret)
+ 		goto clear_bitmap;
+ 
++	put_device(&pdev->dev);
+ 	return map;
+ 
+ clear_bitmap:
+ 	clear_bit(map->req_idx, dmamux->used_chans);
+ free_map:
+ 	kfree(map);
++put_device:
++	put_device(&pdev->dev);
+ 
+ 	return ERR_PTR(ret);
+ }
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 18997f80bdc97a..74a83203181d6f 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -187,27 +187,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 	idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
+ 	if (!idxd->wq_enable_map) {
+ 		rc = -ENOMEM;
+-		goto err_bitmap;
++		goto err_free_wqs;
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+ 		wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+ 		if (!wq) {
+ 			rc = -ENOMEM;
+-			goto err;
++			goto err_unwind;
+ 		}
+ 
+ 		idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+ 		conf_dev = wq_confdev(wq);
+ 		wq->id = i;
+ 		wq->idxd = idxd;
+-		device_initialize(wq_confdev(wq));
++		device_initialize(conf_dev);
+ 		conf_dev->parent = idxd_confdev(idxd);
+ 		conf_dev->bus = &dsa_bus_type;
+ 		conf_dev->type = &idxd_wq_device_type;
+ 		rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
+-		if (rc < 0)
+-			goto err;
++		if (rc < 0) {
++			put_device(conf_dev);
++			kfree(wq);
++			goto err_unwind;
++		}
+ 
+ 		mutex_init(&wq->wq_lock);
+ 		init_waitqueue_head(&wq->err_queue);
+@@ -218,15 +221,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 		wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+ 		wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
+ 		if (!wq->wqcfg) {
++			put_device(conf_dev);
++			kfree(wq);
+ 			rc = -ENOMEM;
+-			goto err;
++			goto err_unwind;
+ 		}
+ 
+ 		if (idxd->hw.wq_cap.op_config) {
+ 			wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ 			if (!wq->opcap_bmap) {
++				kfree(wq->wqcfg);
++				put_device(conf_dev);
++				kfree(wq);
+ 				rc = -ENOMEM;
+-				goto err_opcap_bmap;
++				goto err_unwind;
+ 			}
+ 			bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
+ 		}
+@@ -237,13 +245,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 
+ 	return 0;
+ 
+-err_opcap_bmap:
+-	kfree(wq->wqcfg);
+-
+-err:
+-	put_device(conf_dev);
+-	kfree(wq);
+-
++err_unwind:
+ 	while (--i >= 0) {
+ 		wq = idxd->wqs[i];
+ 		if (idxd->hw.wq_cap.op_config)
+@@ -252,11 +254,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 		conf_dev = wq_confdev(wq);
+ 		put_device(conf_dev);
+ 		kfree(wq);
+-
+ 	}
+ 	bitmap_free(idxd->wq_enable_map);
+ 
+-err_bitmap:
++err_free_wqs:
+ 	kfree(idxd->wqs);
+ 
+ 	return rc;
+@@ -918,10 +919,12 @@ static void idxd_remove(struct pci_dev *pdev)
+ 	device_unregister(idxd_confdev(idxd));
+ 	idxd_shutdown(pdev);
+ 	idxd_device_remove_debugfs(idxd);
+-	idxd_cleanup(idxd);
++	perfmon_pmu_remove(idxd);
++	idxd_cleanup_interrupts(idxd);
++	if (device_pasid_enabled(idxd))
++		idxd_disable_system_pasid(idxd);
+ 	pci_iounmap(pdev, idxd->reg_base);
+ 	put_device(idxd_confdev(idxd));
+-	idxd_free(idxd);
+ 	pci_disable_device(pdev);
+ }
+ 
+diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
+index d43a881e43b904..348bb9a5c87bdd 100644
+--- a/drivers/dma/qcom/bam_dma.c
++++ b/drivers/dma/qcom/bam_dma.c
+@@ -1283,13 +1283,17 @@ static int bam_dma_probe(struct platform_device *pdev)
+ 	if (!bdev->bamclk) {
+ 		ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
+ 					   &bdev->num_channels);
+-		if (ret)
++		if (ret) {
+ 			dev_err(bdev->dev, "num-channels unspecified in dt\n");
++			return ret;
++		}
+ 
+ 		ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
+ 					   &bdev->num_ees);
+-		if (ret)
++		if (ret) {
+ 			dev_err(bdev->dev, "num-ees unspecified in dt\n");
++			return ret;
++		}
+ 	}
+ 
+ 	ret = clk_prepare_enable(bdev->bamclk);
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 7f861fb07cb837..6333426b4c96c5 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2063,8 +2063,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ 	 * priority. So Q0 is the highest priority queue and the last queue has
+ 	 * the lowest priority.
+ 	 */
+-	queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+-					  GFP_KERNEL);
++	queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++					  sizeof(*queue_priority_map), GFP_KERNEL);
+ 	if (!queue_priority_map)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
+index a059964b97f8cf..605493b5080671 100644
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -128,7 +128,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
+ 
+ 	ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
+ 	if (!ptemp) {
+-		dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
+ 		edac_printk(KERN_ERR, EDAC_MC,
+ 			    "Inject: Buffer Allocation error\n");
+ 		return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 10da6e550d7683..bc86a445509081 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -400,9 +400,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
+ 	dma_fence_put(ring->vmid_wait);
+ 	ring->vmid_wait = NULL;
+ 	ring->me = 0;
+-
+-	if (!ring->is_mes_queue)
+-		ring->adev->rings[ring->idx] = NULL;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index be86f86b49e974..97004145209635 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1813,15 +1813,19 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
+ 				struct amdgpu_job *job)
+ {
+ 	struct drm_gpu_scheduler **scheds;
+-
+-	/* The create msg must be in the first IB submitted */
+-	if (atomic_read(&job->base.entity->fence_seq))
+-		return -EINVAL;
++	struct dma_fence *fence;
+ 
+ 	/* if VCN0 is harvested, we can't support AV1 */
+ 	if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+ 		return -EINVAL;
+ 
++	/* wait for all jobs to finish before switching to instance 0 */
++	fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
++	if (fence) {
++		dma_fence_wait(fence, false);
++		dma_fence_put(fence);
++	}
++
+ 	scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+ 		[AMDGPU_RING_PRIO_DEFAULT].sched;
+ 	drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+index f391f0c54043db..33d413444a46ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+@@ -1737,15 +1737,19 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
+ 				struct amdgpu_job *job)
+ {
+ 	struct drm_gpu_scheduler **scheds;
+-
+-	/* The create msg must be in the first IB submitted */
+-	if (atomic_read(&job->base.entity->fence_seq))
+-		return -EINVAL;
++	struct dma_fence *fence;
+ 
+ 	/* if VCN0 is harvested, we can't support AV1 */
+ 	if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+ 		return -EINVAL;
+ 
++	/* wait for all jobs to finish before switching to instance 0 */
++	fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
++	if (fence) {
++		dma_fence_wait(fence, false);
++		dma_fence_put(fence);
++	}
++
+ 	scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
+ 		[AMDGPU_RING_PRIO_0].sched;
+ 	drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
+@@ -1836,22 +1840,16 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ 
+ #define RADEON_VCN_ENGINE_TYPE_ENCODE			(0x00000002)
+ #define RADEON_VCN_ENGINE_TYPE_DECODE			(0x00000003)
+-
+ #define RADEON_VCN_ENGINE_INFO				(0x30000001)
+-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET		16
+-
+ #define RENCODE_ENCODE_STANDARD_AV1			2
+ #define RENCODE_IB_PARAM_SESSION_INIT			0x00000003
+-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET	64
+ 
+-/* return the offset in ib if id is found, -1 otherwise
+- * to speed up the searching we only search upto max_offset
+- */
+-static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
++/* return the offset in ib if id is found, -1 otherwise */
++static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int start)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
++	for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 4) {
+ 		if (ib->ptr[i + 1] == id)
+ 			return i;
+ 	}
+@@ -1866,33 +1864,29 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ 	struct amdgpu_vcn_decode_buffer *decode_buffer;
+ 	uint64_t addr;
+ 	uint32_t val;
+-	int idx;
++	int idx = 0, sidx;
+ 
+ 	/* The first instance can decode anything */
+ 	if (!ring->me)
+ 		return 0;
+ 
+-	/* RADEON_VCN_ENGINE_INFO is at the top of ib block */
+-	idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
+-			RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
+-	if (idx < 0) /* engine info is missing */
+-		return 0;
+-
+-	val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+-	if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+-		decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
+-
+-		if (!(decode_buffer->valid_buf_flag  & 0x1))
+-			return 0;
+-
+-		addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+-			decode_buffer->msg_buffer_address_lo;
+-		return vcn_v4_0_dec_msg(p, job, addr);
+-	} else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+-		idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
+-			RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
+-		if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+-			return vcn_v4_0_limit_sched(p, job);
++	while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, idx)) >= 0) {
++		val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
++		if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
++			decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
++
++			if (!(decode_buffer->valid_buf_flag & 0x1))
++				return 0;
++
++			addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
++				decode_buffer->msg_buffer_address_lo;
++			return vcn_v4_0_dec_msg(p, job, addr);
++		} else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
++			sidx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, idx);
++			if (sidx >= 0 && ib->ptr[sidx + 2] == RENCODE_ENCODE_STANDARD_AV1)
++				return vcn_v4_0_limit_sched(p, job);
++		}
++		idx += ib->ptr[idx] / 4;
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 48ab93e715c823..6e4f9c6108f602 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -239,6 +239,13 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
+ 		.max_pixels_per_frame = 4096 * 4096,
+ 		.max_level = 186,
+ 	},
++	{
++		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
++		.max_width = 4096,
++		.max_height = 4096,
++		.max_pixels_per_frame = 4096 * 4096,
++		.max_level = 0,
++	},
+ };
+ 
+ static const struct amdgpu_video_codecs cz_video_codecs_decode =
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9763752cf5cded..b585c321d3454c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -11483,6 +11483,11 @@ static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ 		new_plane_state = drm_atomic_get_plane_state(state, plane);
+ 		old_plane_state = drm_atomic_get_plane_state(state, plane);
+ 
++		if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
++			DRM_ERROR("Failed to get plane state for plane %s\n", plane->name);
++			return false;
++		}
++
+ 		if (old_plane_state->fb && new_plane_state->fb &&
+ 		    get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ 			return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+index 01480a04f85ef5..9a3be1dd352b6a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+@@ -483,11 +483,10 @@ void dpp1_set_cursor_position(
+ 	if (src_y_offset + cursor_height <= 0)
+ 		cur_en = 0;  /* not visible beyond top edge*/
+ 
+-	if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+-		REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
++	REG_UPDATE(CURSOR0_CONTROL,
++			CUR0_ENABLE, cur_en);
+ 
+-		dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+-	}
++	dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ }
+ 
+ void dpp1_cnv_set_optional_cursor_attributes(
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+index 712aff7e17f7a0..92b34fe47f7400 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+@@ -155,11 +155,9 @@ void dpp401_set_cursor_position(
+ 	struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+ 	uint32_t cur_en = pos->enable ? 1 : 0;
+ 
+-	if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+-		REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
++	REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ 
+-		dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+-	}
++	dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ }
+ 
+ void dpp401_set_optional_cursor_attributes(
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+index c74ee2d50a699a..b405fa22f87a9e 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+@@ -1044,13 +1044,11 @@ void hubp2_cursor_set_position(
+ 	if (src_y_offset + cursor_height <= 0)
+ 		cur_en = 0;  /* not visible beyond top edge*/
+ 
+-	if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+-		if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+-			hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
++	if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
++		hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ 
+-		REG_UPDATE(CURSOR_CONTROL,
++	REG_UPDATE(CURSOR_CONTROL,
+ 			CURSOR_ENABLE, cur_en);
+-	}
+ 
+ 	REG_SET_2(CURSOR_POSITION, 0,
+ 			CURSOR_X_POSITION, pos->x,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+index 7013c124efcff8..2d52100510f05f 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+@@ -718,13 +718,11 @@ void hubp401_cursor_set_position(
+ 			dc_fixpt_from_int(dst_x_offset),
+ 			param->h_scale_ratio));
+ 
+-	if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+-		if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+-			hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
++	if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
++		hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ 
+-		REG_UPDATE(CURSOR_CONTROL,
+-			CURSOR_ENABLE, cur_en);
+-	}
++	REG_UPDATE(CURSOR_CONTROL,
++		CURSOR_ENABLE, cur_en);
+ 
+ 	REG_SET_2(CURSOR_POSITION, 0,
+ 		CURSOR_X_POSITION, x_pos,
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+index 08fc2a2c399f60..d96f52a551940d 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -945,7 +945,7 @@ enum dc_status dcn20_enable_stream_timing(
+ 		return DC_ERROR_UNEXPECTED;
+ 	}
+ 
+-	fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
++	udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
+ 
+ 	params.vertical_total_min = stream->adjust.v_total_min;
+ 	params.vertical_total_max = stream->adjust.v_total_max;
+diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
+index ef2fdbf973460d..85335d8dfec115 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_power.c
++++ b/drivers/gpu/drm/i915/display/intel_display_power.c
+@@ -1150,7 +1150,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
+ 	if (DISPLAY_VER(dev_priv) == 12)
+ 		abox_regs |= BIT(0);
+ 
+-	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
++	for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
+ 		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
+ }
+ 
+@@ -1603,11 +1603,11 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+ 	if (table[config].page_mask == 0) {
+ 		drm_dbg(&dev_priv->drm,
+ 			"Unknown memory configuration; disabling address buddy logic.\n");
+-		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
++		for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
+ 			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+ 				       BW_BUDDY_DISABLE);
+ 	} else {
+-		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
++		for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
+ 			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+ 				       table[config].page_mask);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index b48373b1667793..355a21eb484430 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -1469,6 +1469,19 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
+ 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+ }
+ 
++static void __update_guc_busyness_running_state(struct intel_guc *guc)
++{
++	struct intel_gt *gt = guc_to_gt(guc);
++	struct intel_engine_cs *engine;
++	enum intel_engine_id id;
++	unsigned long flags;
++
++	spin_lock_irqsave(&guc->timestamp.lock, flags);
++	for_each_engine(engine, gt, id)
++		engine->stats.guc.running = false;
++	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
++}
++
+ static void __update_guc_busyness_stats(struct intel_guc *guc)
+ {
+ 	struct intel_gt *gt = guc_to_gt(guc);
+@@ -1619,6 +1632,9 @@ void intel_guc_busyness_park(struct intel_gt *gt)
+ 	if (!guc_submission_initialized(guc))
+ 		return;
+ 
++	/* Assume no engines are running and set running state to false */
++	__update_guc_busyness_running_state(guc);
++
+ 	/*
+ 	 * There is a race with suspend flow where the worker runs after suspend
+ 	 * and causes an unclaimed register access warning. Cancel the worker
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 2508e9e9431dcd..b089219025681c 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -381,11 +381,11 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ 
+ 		of_id = of_match_node(mtk_drm_of_ids, node);
+ 		if (!of_id)
+-			goto next_put_node;
++			continue;
+ 
+ 		pdev = of_find_device_by_node(node);
+ 		if (!pdev)
+-			goto next_put_node;
++			continue;
+ 
+ 		drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
+ 		if (!drm_dev)
+@@ -411,11 +411,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ next_put_device_pdev_dev:
+ 		put_device(&pdev->dev);
+ 
+-next_put_node:
+-		of_node_put(node);
+-
+-		if (cnt == MAX_CRTC)
++		if (cnt == MAX_CRTC) {
++			of_node_put(node);
+ 			break;
++		}
+ 	}
+ 
+ 	if (drm_priv->data->mmsys_dev_num == cnt) {
+diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
+index c520f156e2d73f..03eb7d52209a2f 100644
+--- a/drivers/gpu/drm/panthor/panthor_drv.c
++++ b/drivers/gpu/drm/panthor/panthor_drv.c
+@@ -1023,7 +1023,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
+ 	struct drm_panthor_queue_create *queue_args;
+ 	int ret;
+ 
+-	if (!args->queues.count)
++	if (!args->queues.count || args->queues.count > MAX_CS_PER_CSG)
+ 		return -EINVAL;
+ 
+ 	ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
+diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
+index 8dac069483e8fd..754df8e9d38a19 100644
+--- a/drivers/gpu/drm/xe/tests/xe_bo.c
++++ b/drivers/gpu/drm/xe/tests/xe_bo.c
+@@ -222,7 +222,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
+ 		}
+ 
+ 		xe_bo_lock(external, false);
+-		err = xe_bo_pin_external(external);
++		err = xe_bo_pin_external(external, false);
+ 		xe_bo_unlock(external);
+ 		if (err) {
+ 			KUNIT_FAIL(test, "external bo pin err=%pe\n",
+diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+index cedd3e88a6fb2c..5a6e0206989de2 100644
+--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+@@ -89,15 +89,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
+ 		return;
+ 	}
+ 
+-	/*
+-	 * If on different devices, the exporter is kept in system  if
+-	 * possible, saving a migration step as the transfer is just
+-	 * likely as fast from system memory.
+-	 */
+-	if (params->mem_mask & XE_BO_FLAG_SYSTEM)
+-		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
+-	else
+-		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
++	KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+ 
+ 	if (params->force_different_devices)
+ 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
+diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
+index 445bbe0299b08f..b5058a35c4513a 100644
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -157,6 +157,8 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
+ 
+ 		bo->placements[*c] = (struct ttm_place) {
+ 			.mem_type = XE_PL_TT,
++			.flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ?
++			TTM_PL_FLAG_FALLBACK : 0,
+ 		};
+ 		*c += 1;
+ 	}
+@@ -1743,6 +1745,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
+ /**
+  * xe_bo_pin_external - pin an external BO
+  * @bo: buffer object to be pinned
++ * @in_place: Pin in current placement, don't attempt to migrate.
+  *
+  * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
+  * BO. Unique call compared to xe_bo_pin as this function has it own set of
+@@ -1750,7 +1753,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
+  *
+  * Returns 0 for success, negative error code otherwise.
+  */
+-int xe_bo_pin_external(struct xe_bo *bo)
++int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
+ {
+ 	struct xe_device *xe = xe_bo_device(bo);
+ 	int err;
+@@ -1759,9 +1762,11 @@ int xe_bo_pin_external(struct xe_bo *bo)
+ 	xe_assert(xe, xe_bo_is_user(bo));
+ 
+ 	if (!xe_bo_is_pinned(bo)) {
+-		err = xe_bo_validate(bo, NULL, false);
+-		if (err)
+-			return err;
++		if (!in_place) {
++			err = xe_bo_validate(bo, NULL, false);
++			if (err)
++				return err;
++		}
+ 
+ 		if (xe_bo_is_vram(bo)) {
+ 			spin_lock(&xe->pinned.lock);
+@@ -1913,6 +1918,9 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
+ 		.no_wait_gpu = false,
+ 	};
+ 
++	if (xe_bo_is_pinned(bo))
++		return 0;
++
+ 	if (vm) {
+ 		lockdep_assert_held(&vm->lock);
+ 		xe_vm_assert_held(vm);
+diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
+index d04159c598465a..5e982be66c7431 100644
+--- a/drivers/gpu/drm/xe/xe_bo.h
++++ b/drivers/gpu/drm/xe/xe_bo.h
+@@ -173,7 +173,7 @@ static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
+ 	}
+ }
+ 
+-int xe_bo_pin_external(struct xe_bo *bo);
++int xe_bo_pin_external(struct xe_bo *bo, bool in_place);
+ int xe_bo_pin(struct xe_bo *bo);
+ void xe_bo_unpin_external(struct xe_bo *bo);
+ void xe_bo_unpin(struct xe_bo *bo);
+diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
+index 78204578443f46..a41f453bab5919 100644
+--- a/drivers/gpu/drm/xe/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/xe_dma_buf.c
+@@ -72,7 +72,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
+ 		return ret;
+ 	}
+ 
+-	ret = xe_bo_pin_external(bo);
++	ret = xe_bo_pin_external(bo, true);
+ 	xe_assert(xe, !ret);
+ 
+ 	return 0;
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 75dab01d43a750..be7ca6a0ebeb8a 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1057,7 +1057,7 @@ static const struct pci_device_id i801_ids[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS,	FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS,	FEATURES_ICH5 | FEATURE_TCO_CNL) },
+-	{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
++	{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,		FEATURES_ICH5)			 },
+ 	{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index 846aac9a5c9df2..7a2e3494985498 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -2430,6 +2430,9 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222,
+ 		if (error)
+ 			return error;
+ 
++		if (!iqs7222->kp_type[chan_index][i])
++			continue;
++
+ 		if (!dev_desc->event_offset)
+ 			continue;
+ 
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 8813db7eec3978..630cdd5a132831 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1155,6 +1155,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++	},
+ 	/*
+ 	 * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ 	 * after suspend fixable with the forcenorestore quirk.
+diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
+index 6f8eb7fa4c59fc..5b02119d8ba23f 100644
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -1377,14 +1377,24 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * Read setup timing depends on the operation done on the NAND:
++	 *
++	 * NRD_SETUP = max(tAR, tCLR)
++	 */
++	timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
++	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
++	totalcycles += ncycles;
++	ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * The read cycle timing is directly matching tRC, but is also
+ 	 * dependent on the setup and hold timings we calculated earlier,
+ 	 * which gives:
+ 	 *
+-	 * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+-	 *
+-	 * NRD_SETUP is always 0.
++	 * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
+ 	 */
+ 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+ 	ncycles = max(totalcycles, ncycles);
+diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+index 0f67e96cc24020..7488a496950291 100644
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -272,6 +272,7 @@ struct stm32_fmc2_nfc {
+ 	struct sg_table dma_data_sg;
+ 	struct sg_table dma_ecc_sg;
+ 	u8 *ecc_buf;
++	dma_addr_t dma_ecc_addr;
+ 	int dma_ecc_len;
+ 	u32 tx_dma_max_burst;
+ 	u32 rx_dma_max_burst;
+@@ -902,17 +903,10 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+ 
+ 	if (!write_data && !raw) {
+ 		/* Configure DMA ECC status */
+-		p = nfc->ecc_buf;
+ 		for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+-			sg_set_buf(sg, p, nfc->dma_ecc_len);
+-			p += nfc->dma_ecc_len;
+-		}
+-
+-		ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+-				 eccsteps, dma_data_dir);
+-		if (!ret) {
+-			ret = -EIO;
+-			goto err_unmap_data;
++			sg_dma_address(sg) = nfc->dma_ecc_addr +
++					     s * nfc->dma_ecc_len;
++			sg_dma_len(sg) = nfc->dma_ecc_len;
+ 		}
+ 
+ 		desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+@@ -921,7 +915,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+ 						   DMA_PREP_INTERRUPT);
+ 		if (!desc_ecc) {
+ 			ret = -ENOMEM;
+-			goto err_unmap_ecc;
++			goto err_unmap_data;
+ 		}
+ 
+ 		reinit_completion(&nfc->dma_ecc_complete);
+@@ -929,7 +923,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+ 		desc_ecc->callback_param = &nfc->dma_ecc_complete;
+ 		ret = dma_submit_error(dmaengine_submit(desc_ecc));
+ 		if (ret)
+-			goto err_unmap_ecc;
++			goto err_unmap_data;
+ 
+ 		dma_async_issue_pending(nfc->dma_ecc_ch);
+ 	}
+@@ -949,7 +943,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+ 		if (!write_data && !raw)
+ 			dmaengine_terminate_all(nfc->dma_ecc_ch);
+ 		ret = -ETIMEDOUT;
+-		goto err_unmap_ecc;
++		goto err_unmap_data;
+ 	}
+ 
+ 	/* Wait DMA data transfer completion */
+@@ -969,11 +963,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+ 		}
+ 	}
+ 
+-err_unmap_ecc:
+-	if (!write_data && !raw)
+-		dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+-			     eccsteps, dma_data_dir);
+-
+ err_unmap_data:
+ 	dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
+ 
+@@ -996,9 +985,21 @@ static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
+ 
+ 	/* Write oob */
+ 	if (oob_required) {
+-		ret = nand_change_write_column_op(chip, mtd->writesize,
+-						  chip->oob_poi, mtd->oobsize,
+-						  false);
++		unsigned int offset_in_page = mtd->writesize;
++		const void *buf = chip->oob_poi;
++		unsigned int len = mtd->oobsize;
++
++		if (!raw) {
++			struct mtd_oob_region oob_free;
++
++			mtd_ooblayout_free(mtd, 0, &oob_free);
++			offset_in_page += oob_free.offset;
++			buf += oob_free.offset;
++			len = oob_free.length;
++		}
++
++		ret = nand_change_write_column_op(chip, offset_in_page,
++						  buf, len, false);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -1610,7 +1611,8 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
+ 		return ret;
+ 
+ 	/* Allocate a buffer to store ECC status registers */
+-	nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
++	nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN,
++					   &nfc->dma_ecc_addr, GFP_KERNEL);
+ 	if (!nfc->ecc_buf)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
+index a33ad04e99cc8e..d1666b31518173 100644
+--- a/drivers/mtd/nand/spi/winbond.c
++++ b/drivers/mtd/nand/spi/winbond.c
+@@ -122,6 +122,41 @@ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
+ 	.free = w25n02kv_ooblayout_free,
+ };
+ 
++static int w25n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
++				  struct mtd_oob_region *region)
++{
++	if (section > 3)
++		return -ERANGE;
++
++	region->offset = (16 * section) + 12;
++	region->length = 4;
++
++	return 0;
++}
++
++static int w25n01jw_ooblayout_free(struct mtd_info *mtd, int section,
++				   struct mtd_oob_region *region)
++{
++	if (section > 3)
++		return -ERANGE;
++
++	region->offset = (16 * section);
++	region->length = 12;
++
++	/* Extract BBM */
++	if (!section) {
++		region->offset += 2;
++		region->length -= 2;
++	}
++
++	return 0;
++}
++
++static const struct mtd_ooblayout_ops w25n01jw_ooblayout = {
++	.ecc = w25n01jw_ooblayout_ecc,
++	.free = w25n01jw_ooblayout_free,
++};
++
+ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
+ 				   u8 status)
+ {
+@@ -206,7 +241,7 @@ static const struct spinand_info winbond_spinand_table[] = {
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+ 		     0,
+-		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
++		     SPINAND_ECCINFO(&w25n01jw_ooblayout, NULL)),
+ 	SPINAND_INFO("W25N02JWZEIF",
+ 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
+ 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index 436c0e4b0344c7..91382225f11405 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -690,14 +690,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ 		dlc |= XCAN_DLCR_EDL_MASK;
+ 	}
+ 
+-	if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+-	    (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+-		can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+-	else
+-		can_put_echo_skb(skb, ndev, 0, 0);
+-
+-	priv->tx_head++;
+-
+ 	priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ 	/* If the CAN frame is RTR frame this write triggers transmission
+ 	 * (not on CAN FD)
+@@ -730,6 +722,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
+ 					data[1]);
+ 		}
+ 	}
++
++	if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
++	    (priv->devtype.flags & XCAN_FLAG_TXFEMP))
++		can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
++	else
++		can_put_echo_skb(skb, ndev, 0, 0);
++
++	priv->tx_head++;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index a1cc338cf20f38..0bd814251d56ef 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2356,7 +2356,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+ 		 */
+ 		phy_dev = of_phy_find_device(fep->phy_node);
+ 		phy_reset_after_clk_enable(phy_dev);
+-		put_device(&phy_dev->mdio.dev);
++		if (phy_dev)
++			put_device(&phy_dev->mdio.dev);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 55fb362eb50816..037c1a0cbd6a80 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4206,7 +4206,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ 		irq_num = pf->msix_entries[base + vector].vector;
+ 		irq_set_affinity_notifier(irq_num, NULL);
+ 		irq_update_affinity_hint(irq_num, NULL);
+-		free_irq(irq_num, &vsi->q_vectors[vector]);
++		free_irq(irq_num, vsi->q_vectors[vector]);
+ 	}
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index ca6ccbc139548b..6412c84e2d17db 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
+ 	} else {
+ 		dev_info(&adapter->pdev->dev, "online testing starting\n");
+ 
+-		/* PHY is powered down when interface is down */
+-		if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++		if (igb_link_test(adapter, &data[TEST_LINK]))
+ 			eth_test->flags |= ETH_TEST_FL_FAILED;
+-		else
+-			data[TEST_LINK] = 0;
+ 
+ 		/* Online tests aren't run; pass by default */
+ 		data[TEST_REG] = 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
+index 61a1155d4b4fdb..ce541c60c5b49f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
+@@ -165,14 +165,14 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+ 						    next->match_ste.rtc_0_id,
+ 						    next->match_ste.rtc_1_id);
+ 		if (ret) {
+-			mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
+-			goto matcher_reconnect;
++			mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n");
++			return ret;
+ 		}
+ 	} else {
+ 		ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ 		if (ret) {
+-			mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
+-			goto matcher_reconnect;
++			mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n");
++			return ret;
+ 		}
+ 	}
+ 
+@@ -180,27 +180,19 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+ 	if (prev_ft_id == tbl->ft_id) {
+ 		ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ 		if (ret) {
+-			mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
+-			goto matcher_reconnect;
++			mlx5hws_err(tbl->ctx,
++				    "Fatal error, failed to update connected miss table\n");
++			return ret;
+ 		}
+ 	}
+ 
+ 	ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
+ 	if (ret) {
+ 		mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
+-		goto matcher_reconnect;
++		return ret;
+ 	}
+ 
+ 	return 0;
+-
+-matcher_reconnect:
+-	if (list_empty(&tbl->matchers_list) || !prev)
+-		list_add(&matcher->list_node, &tbl->matchers_list);
+-	else
+-		/* insert after prev matcher */
+-		list_add(&matcher->list_node, &prev->list_node);
+-
+-	return ret;
+ }
+ 
+ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 591e8fd33d8ea6..a508cd81cd4ed9 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -97,6 +97,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
+ 	if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
+ 		return -EINVAL;
+ 
++	gpiod_put(mdiodev->reset_gpio);
+ 	reset_control_put(mdiodev->reset_ctrl);
+ 
+ 	mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
+@@ -814,9 +815,6 @@ void mdiobus_unregister(struct mii_bus *bus)
+ 		if (!mdiodev)
+ 			continue;
+ 
+-		if (mdiodev->reset_gpio)
+-			gpiod_put(mdiodev->reset_gpio);
+-
+ 		mdiodev->device_remove(mdiodev);
+ 		mdiodev->device_free(mdiodev);
+ 	}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 2bddc9f60fecc9..fdc9f1df0578b7 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -989,6 +989,9 @@ static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
+ {
+ 	struct request *req;
+ 
++	if (rq_list_empty(rqlist))
++		return;
++
+ 	spin_lock(&nvmeq->sq_lock);
+ 	while ((req = rq_list_pop(rqlist))) {
+ 		struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+index 163950e16dbe13..c173c6244d9e56 100644
+--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
++++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+@@ -127,13 +127,13 @@ static int eusb2_repeater_init(struct phy *phy)
+ 			     rptr->cfg->init_tbl[i].value);
+ 
+ 	/* Override registers from devicetree values */
+-	if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
++	if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
+ 		regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, val);
+ 
+ 	if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &val))
+ 		regmap_write(regmap, base + EUSB2_TUNE_HSDISC, val);
+ 
+-	if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
++	if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
+ 		regmap_write(regmap, base + EUSB2_TUNE_IUSB2, val);
+ 
+ 	/* Wait for status OK */
+diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
+index ebc8a7e21a3181..3409924498e9cf 100644
+--- a/drivers/phy/tegra/xusb-tegra210.c
++++ b/drivers/phy/tegra/xusb-tegra210.c
+@@ -3164,18 +3164,22 @@ tegra210_xusb_padctl_probe(struct device *dev,
+ 	}
+ 
+ 	pdev = of_find_device_by_node(np);
++	of_node_put(np);
+ 	if (!pdev) {
+ 		dev_warn(dev, "PMC device is not available\n");
+ 		goto out;
+ 	}
+ 
+-	if (!platform_get_drvdata(pdev))
++	if (!platform_get_drvdata(pdev)) {
++		put_device(&pdev->dev);
+ 		return ERR_PTR(-EPROBE_DEFER);
++	}
+ 
+ 	padctl->regmap = dev_get_regmap(&pdev->dev, "usb_sleepwalk");
+ 	if (!padctl->regmap)
+ 		dev_info(dev, "failed to find PMC regmap\n");
+ 
++	put_device(&pdev->dev);
+ out:
+ 	return &padctl->base;
+ }
+diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
+index 78e19b128962a9..0fea766a98d756 100644
+--- a/drivers/phy/ti/phy-omap-usb2.c
++++ b/drivers/phy/ti/phy-omap-usb2.c
+@@ -363,6 +363,13 @@ static void omap_usb2_init_errata(struct omap_usb *phy)
+ 		phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
+ }
+ 
++static void omap_usb2_put_device(void *_dev)
++{
++	struct device *dev = _dev;
++
++	put_device(dev);
++}
++
+ static int omap_usb2_probe(struct platform_device *pdev)
+ {
+ 	struct omap_usb	*phy;
+@@ -373,6 +380,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
+ 	struct device_node *control_node;
+ 	struct platform_device *control_pdev;
+ 	const struct usb_phy_data *phy_data;
++	int ret;
+ 
+ 	phy_data = device_get_match_data(&pdev->dev);
+ 	if (!phy_data)
+@@ -423,6 +431,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
+ 			return -EINVAL;
+ 		}
+ 		phy->control_dev = &control_pdev->dev;
++
++		ret = devm_add_action_or_reset(&pdev->dev, omap_usb2_put_device,
++					       phy->control_dev);
++		if (ret)
++			return ret;
+ 	} else {
+ 		if (of_property_read_u32_index(node,
+ 					       "syscon-phy-power", 1,
+diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
+index 874c1a25ce36e6..8e94d2c6e266a0 100644
+--- a/drivers/phy/ti/phy-ti-pipe3.c
++++ b/drivers/phy/ti/phy-ti-pipe3.c
+@@ -667,12 +667,20 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
+ 	return 0;
+ }
+ 
++static void ti_pipe3_put_device(void *_dev)
++{
++	struct device *dev = _dev;
++
++	put_device(dev);
++}
++
+ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
+ {
+ 	struct device *dev = phy->dev;
+ 	struct device_node *node = dev->of_node;
+ 	struct device_node *control_node;
+ 	struct platform_device *control_pdev;
++	int ret;
+ 
+ 	phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node,
+ 							"syscon-phy-power");
+@@ -704,6 +712,11 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
+ 		}
+ 
+ 		phy->control_dev = &control_pdev->dev;
++
++		ret = devm_add_action_or_reset(dev, ti_pipe3_put_device,
++					       phy->control_dev);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	if (phy->mode == PIPE3_MODE_PCIE) {
+diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
+index d1e7ba1fb3e1af..27e3d939b7bb9e 100644
+--- a/drivers/regulator/sy7636a-regulator.c
++++ b/drivers/regulator/sy7636a-regulator.c
+@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ 	if (!regmap)
+ 		return -EPROBE_DEFER;
+ 
+-	gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
++	device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
++	gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
+ 	if (IS_ERR(gdp)) {
+-		dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
++		dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ 		return PTR_ERR(gdp);
+ 	}
+ 
+@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	config.dev = &pdev->dev;
+-	config.dev->of_node = pdev->dev.parent->of_node;
+ 	config.regmap = regmap;
+ 
+ 	rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index cd1f657f782df2..13c663a154c4e8 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -543,10 +543,10 @@ static ssize_t hvc_write(struct tty_struct *tty, const u8 *buf, size_t count)
+ 	}
+ 
+ 	/*
+-	 * Racy, but harmless, kick thread if there is still pending data.
++	 * Kick thread to flush if there's still pending data
++	 * or to wakeup the write queue.
+ 	 */
+-	if (hp->n_outbuf)
+-		hvc_kick();
++	hvc_kick();
+ 
+ 	return written;
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 6a0a1cce3a897f..835bd453c0e882 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1161,17 +1161,6 @@ static int sc16is7xx_startup(struct uart_port *port)
+ 	sc16is7xx_port_write(port, SC16IS7XX_FCR_REG,
+ 			     SC16IS7XX_FCR_FIFO_BIT);
+ 
+-	/* Enable EFR */
+-	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+-			     SC16IS7XX_LCR_CONF_MODE_B);
+-
+-	regcache_cache_bypass(one->regmap, true);
+-
+-	/* Enable write access to enhanced features and internal clock div */
+-	sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+-			      SC16IS7XX_EFR_ENABLE_BIT,
+-			      SC16IS7XX_EFR_ENABLE_BIT);
+-
+ 	/* Enable TCR/TLR */
+ 	sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ 			      SC16IS7XX_MCR_TCRTLR_BIT,
+@@ -1183,7 +1172,8 @@ static int sc16is7xx_startup(struct uart_port *port)
+ 			     SC16IS7XX_TCR_RX_RESUME(24) |
+ 			     SC16IS7XX_TCR_RX_HALT(48));
+ 
+-	regcache_cache_bypass(one->regmap, false);
++	/* Disable TCR/TLR access */
++	sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, SC16IS7XX_MCR_TCRTLR_BIT, 0);
+ 
+ 	/* Now, initialize the UART */
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
+diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
+index 0c45936f51b3d7..cbb4481b9da8be 100644
+--- a/drivers/usb/gadget/function/f_midi2.c
++++ b/drivers/usb/gadget/function/f_midi2.c
+@@ -1601,6 +1601,7 @@ static int f_midi2_create_card(struct f_midi2 *midi2)
+ 			strscpy(fb->info.name, ump_fb_name(b),
+ 				sizeof(fb->info.name));
+ 		}
++		snd_ump_update_group_attrs(ump);
+ 	}
+ 
+ 	for (i = 0; i < midi2->num_eps; i++) {
+@@ -1738,9 +1739,12 @@ static int f_midi2_create_usb_configs(struct f_midi2 *midi2,
+ 	case USB_SPEED_HIGH:
+ 		midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(512);
+ 		midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(512);
+-		for (i = 0; i < midi2->num_eps; i++)
++		for (i = 0; i < midi2->num_eps; i++) {
+ 			midi2_midi2_ep_out_desc[i].wMaxPacketSize =
+ 				cpu_to_le16(512);
++			midi2_midi2_ep_in_desc[i].wMaxPacketSize =
++				cpu_to_le16(512);
++		}
+ 		fallthrough;
+ 	case USB_SPEED_FULL:
+ 		midi1_in_eps = midi2_midi1_ep_in_descs;
+@@ -1749,9 +1753,12 @@ static int f_midi2_create_usb_configs(struct f_midi2 *midi2,
+ 	case USB_SPEED_SUPER:
+ 		midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+ 		midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+-		for (i = 0; i < midi2->num_eps; i++)
++		for (i = 0; i < midi2->num_eps; i++) {
+ 			midi2_midi2_ep_out_desc[i].wMaxPacketSize =
+ 				cpu_to_le16(1024);
++			midi2_midi2_ep_in_desc[i].wMaxPacketSize =
++				cpu_to_le16(1024);
++		}
+ 		midi1_in_eps = midi2_midi1_ep_in_ss_descs;
+ 		midi1_out_eps = midi2_midi1_ep_out_ss_descs;
+ 		break;
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 081ac7683c0b35..fbb101c37594df 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -764,8 +764,7 @@ static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+ 	if (!dum->driver)
+ 		return -ESHUTDOWN;
+ 
+-	local_irq_save(flags);
+-	spin_lock(&dum->lock);
++	spin_lock_irqsave(&dum->lock, flags);
+ 	list_for_each_entry(iter, &ep->queue, queue) {
+ 		if (&iter->req != _req)
+ 			continue;
+@@ -775,15 +774,16 @@ static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+ 		retval = 0;
+ 		break;
+ 	}
+-	spin_unlock(&dum->lock);
+ 
+ 	if (retval == 0) {
+ 		dev_dbg(udc_dev(dum),
+ 				"dequeued req %p from %s, len %d buf %p\n",
+ 				req, _ep->name, _req->length, _req->buf);
++		spin_unlock(&dum->lock);
+ 		usb_gadget_giveback_request(_ep, _req);
++		spin_lock(&dum->lock);
+ 	}
+-	local_irq_restore(flags);
++	spin_unlock_irqrestore(&dum->lock, flags);
+ 	return retval;
+ }
+ 
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 69188afa526660..91b47f9573cd7e 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -939,7 +939,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i
+ out:
+ 	/* we are now at a leaf device */
+ 	xhci_debugfs_remove_slot(xhci, slot_id);
+-	xhci_free_virt_device(xhci, vdev, slot_id);
++	xhci_free_virt_device(xhci, xhci->devs[slot_id], slot_id);
+ }
+ 
+ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index e5cd3309342364..fc869b7f803f04 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1322,7 +1322,18 @@ static const struct usb_device_id option_ids[] = {
+ 	 .driver_info = NCTRL(0) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff),	/* Telit LE910C1-EUX (ECM) */
+ 	 .driver_info = NCTRL(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1034, 0xff),	/* Telit LE910C4-WWX (rmnet) */
++	 .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1036, 0xff) },  /* Telit LE910C4-WWX */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1037, 0xff),	/* Telit LE910C4-WWX (rmnet) */
++	 .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1038, 0xff),	/* Telit LE910C4-WWX (rmnet) */
++	 .driver_info = NCTRL(0) | RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103b, 0xff),	/* Telit LE910C4-WWX */
++	 .driver_info = NCTRL(0) | NCTRL(1) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103c, 0xff),	/* Telit LE910C4-WWX */
++	 .driver_info = NCTRL(0) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ 	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+@@ -1369,6 +1380,12 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990A (PCIe) */
+ 	  .driver_info = RSVD(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1077, 0xff),	/* Telit FN990A (rmnet + audio) */
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1078, 0xff),	/* Telit FN990A (MBIM + audio) */
++	  .driver_info = NCTRL(0) | RSVD(1) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1079, 0xff),	/* Telit FN990A (RNDIS + audio) */
++	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),	/* Telit FE990A (rmnet) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff),	/* Telit FE990A (MBIM) */
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 43e3dac5129fa5..92ce01b7d049fa 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2375,17 +2375,21 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
+ 		case ADEV_NONE:
+ 			break;
+ 		case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
+-			WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
+-			typec_altmode_vdm(adev, p[0], &p[1], cnt);
++			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
++				typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
++			} else {
++				WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
++				typec_altmode_vdm(adev, p[0], &p[1], cnt);
++			}
+ 			break;
+ 		case ADEV_QUEUE_VDM:
+-			if (response_tx_sop_type == TCPC_TX_SOP_PRIME)
++			if (rx_sop_type == TCPC_TX_SOP_PRIME)
+ 				typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
+ 			else
+ 				typec_altmode_vdm(adev, p[0], &p[1], cnt);
+ 			break;
+ 		case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
+-			if (response_tx_sop_type == TCPC_TX_SOP_PRIME) {
++			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ 				if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
+ 							    p[0], &p[1], cnt)) {
+ 					int svdm_version = typec_get_cable_svdm_version(
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index d322cf82783f9d..afebc91882befd 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -108,6 +108,25 @@ struct btrfs_bio_ctrl {
+ 	 * This is to avoid touching ranges covered by compression/inline.
+ 	 */
+ 	unsigned long submit_bitmap;
++	struct readahead_control *ractl;
++
++	/*
++	 * The start offset of the last used extent map by a read operation.
++	 *
++	 * This is for proper compressed read merge.
++	 * U64_MAX means we are starting the read and have made no progress yet.
++	 *
++	 * The current btrfs_bio_is_contig() only uses disk_bytenr as
++	 * the condition to check if the read can be merged with previous
++	 * bio, which is not correct. E.g. two file extents pointing to the
++	 * same extent but with different offset.
++	 *
++	 * So here we need to do extra checks to only merge reads that are
++	 * covered by the same extent map.
++	 * Just extent_map::start will be enough, as they are unique
++	 * inside the same inode.
++	 */
++	u64 last_em_start;
+ };
+ 
+ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
+@@ -929,6 +948,23 @@ static struct extent_map *get_extent_map(struct btrfs_inode *inode,
+ 
+ 	return em;
+ }
++
++static void btrfs_readahead_expand(struct readahead_control *ractl,
++				   const struct extent_map *em)
++{
++	const u64 ra_pos = readahead_pos(ractl);
++	const u64 ra_end = ra_pos + readahead_length(ractl);
++	const u64 em_end = em->start + em->ram_bytes;
++
++	/* No expansion for holes and inline extents. */
++	if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)
++		return;
++
++	ASSERT(em_end >= ra_pos);
++	if (em_end > ra_end)
++		readahead_expand(ractl, ra_pos, em_end - ra_pos);
++}
++
+ /*
+  * basic readpage implementation.  Locked extent state structs are inserted
+  * into the tree that are removed when the IO is done (by the end_io
+@@ -937,7 +973,7 @@ static struct extent_map *get_extent_map(struct btrfs_inode *inode,
+  * return 0 on success, otherwise return error
+  */
+ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+-		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
++			     struct btrfs_bio_ctrl *bio_ctrl)
+ {
+ 	struct inode *inode = folio->mapping->host;
+ 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+@@ -994,6 +1030,17 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+ 
+ 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
+ 		iosize = ALIGN(iosize, blocksize);
++
++		/*
++		 * Only expand readahead for extents which are already creating
++		 * the pages anyway in add_ra_bio_pages, which is compressed
++		 * extents in the non subpage case.
++		 */
++		if (bio_ctrl->ractl &&
++		    !btrfs_is_subpage(fs_info, folio->mapping) &&
++		    compress_type != BTRFS_COMPRESS_NONE)
++			btrfs_readahead_expand(bio_ctrl->ractl, em);
++
+ 		if (compress_type != BTRFS_COMPRESS_NONE)
+ 			disk_bytenr = em->disk_bytenr;
+ 		else
+@@ -1037,12 +1084,11 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+ 		 * non-optimal behavior (submitting 2 bios for the same extent).
+ 		 */
+ 		if (compress_type != BTRFS_COMPRESS_NONE &&
+-		    prev_em_start && *prev_em_start != (u64)-1 &&
+-		    *prev_em_start != em->start)
++		    bio_ctrl->last_em_start != U64_MAX &&
++		    bio_ctrl->last_em_start != em->start)
+ 			force_bio_submit = true;
+ 
+-		if (prev_em_start)
+-			*prev_em_start = em->start;
++		bio_ctrl->last_em_start = em->start;
+ 
+ 		free_extent_map(em);
+ 		em = NULL;
+@@ -1086,12 +1132,15 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
+ 	const u64 start = folio_pos(folio);
+ 	const u64 end = start + folio_size(folio) - 1;
+ 	struct extent_state *cached_state = NULL;
+-	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
++	struct btrfs_bio_ctrl bio_ctrl = {
++		.opf = REQ_OP_READ,
++		.last_em_start = U64_MAX,
++	};
+ 	struct extent_map *em_cached = NULL;
+ 	int ret;
+ 
+ 	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
+-	ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
++	ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
+ 	unlock_extent(&inode->io_tree, start, end, &cached_state);
+ 
+ 	free_extent_map(em_cached);
+@@ -2360,19 +2409,22 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wb
+ 
+ void btrfs_readahead(struct readahead_control *rac)
+ {
+-	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
++	struct btrfs_bio_ctrl bio_ctrl = {
++		.opf = REQ_OP_READ | REQ_RAHEAD,
++		.ractl = rac,
++		.last_em_start = U64_MAX,
++	};
+ 	struct folio *folio;
+ 	struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
+ 	const u64 start = readahead_pos(rac);
+ 	const u64 end = start + readahead_length(rac) - 1;
+ 	struct extent_state *cached_state = NULL;
+ 	struct extent_map *em_cached = NULL;
+-	u64 prev_em_start = (u64)-1;
+ 
+ 	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
+ 
+ 	while ((folio = readahead_folio(rac)) != NULL)
+-		btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
++		btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
+ 
+ 	unlock_extent(&inode->io_tree, start, end, &cached_state);
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 98d087a14be5eb..19c0ec9c327c15 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5634,7 +5634,17 @@ static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
+ 	bool empty = false;
+ 
+ 	xa_lock(&root->inodes);
+-	entry = __xa_erase(&root->inodes, btrfs_ino(inode));
++	/*
++	 * This btrfs_inode is being freed and has already been unhashed at this
++	 * point. It's possible that another btrfs_inode has already been
++	 * allocated for the same inode and inserted itself into the root, so
++	 * don't delete it in that case.
++	 *
++	 * Note that this shouldn't need to allocate memory, so the gfp flags
++	 * don't really matter.
++	 */
++	entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
++			     GFP_ATOMIC);
+ 	if (entry == inode)
+ 		empty = xa_empty(&root->inodes);
+ 	xa_unlock(&root->inodes);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 530a2bab6ada00..2c9b38ae40da29 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1501,6 +1501,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
+ 	struct btrfs_qgroup *qgroup;
+ 	LIST_HEAD(qgroup_list);
+ 	u64 num_bytes = src->excl;
++	u64 num_bytes_cmpr = src->excl_cmpr;
+ 	int ret = 0;
+ 
+ 	qgroup = find_qgroup_rb(fs_info, ref_root);
+@@ -1512,11 +1513,12 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
+ 		struct btrfs_qgroup_list *glist;
+ 
+ 		qgroup->rfer += sign * num_bytes;
+-		qgroup->rfer_cmpr += sign * num_bytes;
++		qgroup->rfer_cmpr += sign * num_bytes_cmpr;
+ 
+ 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
++		WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
+ 		qgroup->excl += sign * num_bytes;
+-		qgroup->excl_cmpr += sign * num_bytes;
++		qgroup->excl_cmpr += sign * num_bytes_cmpr;
+ 
+ 		if (sign > 0)
+ 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
+diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
+index 24c08078f5aa3e..74297fd2a3654e 100644
+--- a/fs/ceph/debugfs.c
++++ b/fs/ceph/debugfs.c
+@@ -55,8 +55,6 @@ static int mdsc_show(struct seq_file *s, void *p)
+ 	struct ceph_mds_client *mdsc = fsc->mdsc;
+ 	struct ceph_mds_request *req;
+ 	struct rb_node *rp;
+-	int pathlen = 0;
+-	u64 pathbase;
+ 	char *path;
+ 
+ 	mutex_lock(&mdsc->mutex);
+@@ -81,8 +79,8 @@ static int mdsc_show(struct seq_file *s, void *p)
+ 		if (req->r_inode) {
+ 			seq_printf(s, " #%llx", ceph_ino(req->r_inode));
+ 		} else if (req->r_dentry) {
+-			path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
+-						    &pathbase, 0);
++			struct ceph_path_info path_info;
++			path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
+ 			if (IS_ERR(path))
+ 				path = NULL;
+ 			spin_lock(&req->r_dentry->d_lock);
+@@ -91,7 +89,7 @@ static int mdsc_show(struct seq_file *s, void *p)
+ 				   req->r_dentry,
+ 				   path ? path : "");
+ 			spin_unlock(&req->r_dentry->d_lock);
+-			ceph_mdsc_free_path(path, pathlen);
++			ceph_mdsc_free_path_info(&path_info);
+ 		} else if (req->r_path1) {
+ 			seq_printf(s, " #%llx/%s", req->r_ino1.ino,
+ 				   req->r_path1);
+@@ -100,8 +98,8 @@ static int mdsc_show(struct seq_file *s, void *p)
+ 		}
+ 
+ 		if (req->r_old_dentry) {
+-			path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
+-						    &pathbase, 0);
++			struct ceph_path_info path_info;
++			path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0);
+ 			if (IS_ERR(path))
+ 				path = NULL;
+ 			spin_lock(&req->r_old_dentry->d_lock);
+@@ -111,7 +109,7 @@ static int mdsc_show(struct seq_file *s, void *p)
+ 				   req->r_old_dentry,
+ 				   path ? path : "");
+ 			spin_unlock(&req->r_old_dentry->d_lock);
+-			ceph_mdsc_free_path(path, pathlen);
++			ceph_mdsc_free_path_info(&path_info);
+ 		} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
+ 			if (req->r_ino2.ino)
+ 				seq_printf(s, " #%llx/%s", req->r_ino2.ino,
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 952109292d6912..16c25c48465e5e 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -1263,10 +1263,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
+ 
+ 	/* If op failed, mark everyone involved for errors */
+ 	if (result) {
+-		int pathlen = 0;
+-		u64 base = 0;
+-		char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
+-						  &base, 0);
++		struct ceph_path_info path_info = {0};
++		char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
+ 
+ 		/* mark error on parent + clear complete */
+ 		mapping_set_error(req->r_parent->i_mapping, result);
+@@ -1280,8 +1278,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
+ 		mapping_set_error(req->r_old_inode->i_mapping, result);
+ 
+ 		pr_warn_client(cl, "failure path=(%llx)%s result=%d!\n",
+-			       base, IS_ERR(path) ? "<<bad>>" : path, result);
+-		ceph_mdsc_free_path(path, pathlen);
++			       path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
++		ceph_mdsc_free_path_info(&path_info);
+ 	}
+ out:
+ 	iput(req->r_old_inode);
+@@ -1339,8 +1337,6 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
+ 	int err = -EROFS;
+ 	int op;
+ 	char *path;
+-	int pathlen;
+-	u64 pathbase;
+ 
+ 	if (ceph_snap(dir) == CEPH_SNAPDIR) {
+ 		/* rmdir .snap/foo is RMSNAP */
+@@ -1359,14 +1355,15 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
+ 	if (!dn) {
+ 		try_async = false;
+ 	} else {
+-		path = ceph_mdsc_build_path(mdsc, dn, &pathlen, &pathbase, 0);
++		struct ceph_path_info path_info;
++		path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
+ 		if (IS_ERR(path)) {
+ 			try_async = false;
+ 			err = 0;
+ 		} else {
+ 			err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
+ 		}
+-		ceph_mdsc_free_path(path, pathlen);
++		ceph_mdsc_free_path_info(&path_info);
+ 		dput(dn);
+ 
+ 		/* For none EACCES cases will let the MDS do the mds auth check */
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index a7254cab44cc2e..6587c2d5af1e08 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -368,8 +368,6 @@ int ceph_open(struct inode *inode, struct file *file)
+ 	int flags, fmode, wanted;
+ 	struct dentry *dentry;
+ 	char *path;
+-	int pathlen;
+-	u64 pathbase;
+ 	bool do_sync = false;
+ 	int mask = MAY_READ;
+ 
+@@ -399,14 +397,15 @@ int ceph_open(struct inode *inode, struct file *file)
+ 	if (!dentry) {
+ 		do_sync = true;
+ 	} else {
+-		path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase, 0);
++		struct ceph_path_info path_info;
++		path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
+ 		if (IS_ERR(path)) {
+ 			do_sync = true;
+ 			err = 0;
+ 		} else {
+ 			err = ceph_mds_check_access(mdsc, path, mask);
+ 		}
+-		ceph_mdsc_free_path(path, pathlen);
++		ceph_mdsc_free_path_info(&path_info);
+ 		dput(dentry);
+ 
+ 		/* For none EACCES cases will let the MDS do the mds auth check */
+@@ -614,15 +613,13 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
+ 	mapping_set_error(req->r_parent->i_mapping, result);
+ 
+ 	if (result) {
+-		int pathlen = 0;
+-		u64 base = 0;
+-		char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
+-						  &base, 0);
++		struct ceph_path_info path_info = {0};
++		char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
+ 
+ 		pr_warn_client(cl,
+ 			"async create failure path=(%llx)%s result=%d!\n",
+-			base, IS_ERR(path) ? "<<bad>>" : path, result);
+-		ceph_mdsc_free_path(path, pathlen);
++			path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
++		ceph_mdsc_free_path_info(&path_info);
+ 
+ 		ceph_dir_clear_complete(req->r_parent);
+ 		if (!d_unhashed(dentry))
+@@ -791,8 +788,6 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
+ 	int mask;
+ 	int err;
+ 	char *path;
+-	int pathlen;
+-	u64 pathbase;
+ 
+ 	doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n",
+ 	      dir, ceph_vinop(dir), dentry, dentry,
+@@ -814,7 +809,8 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
+ 	if (!dn) {
+ 		try_async = false;
+ 	} else {
+-		path = ceph_mdsc_build_path(mdsc, dn, &pathlen, &pathbase, 0);
++		struct ceph_path_info path_info;
++		path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
+ 		if (IS_ERR(path)) {
+ 			try_async = false;
+ 			err = 0;
+@@ -826,7 +822,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
+ 				mask |= MAY_WRITE;
+ 			err = ceph_mds_check_access(mdsc, path, mask);
+ 		}
+-		ceph_mdsc_free_path(path, pathlen);
++		ceph_mdsc_free_path_info(&path_info);
+ 		dput(dn);
+ 
+ 		/* For none EACCES cases will let the MDS do the mds auth check */
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index f7875e6f302902..ead51d9e019baa 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -55,6 +55,52 @@ static int ceph_set_ino_cb(struct inode *inode, void *data)
+ 	return 0;
+ }
+ 
++/*
++ * Check if the parent inode matches the vino from directory reply info
++ */
++static inline bool ceph_vino_matches_parent(struct inode *parent,
++					    struct ceph_vino vino)
++{
++	return ceph_ino(parent) == vino.ino && ceph_snap(parent) == vino.snap;
++}
++
++/*
++ * Validate that the directory inode referenced by @req->r_parent matches the
++ * inode number and snapshot id contained in the reply's directory record.  If
++ * they do not match – which can theoretically happen if the parent dentry was
++ * moved between the time the request was issued and the reply arrived – fall
++ * back to looking up the correct inode in the inode cache.
++ *
++ * A reference is *always* returned.  Callers that receive a different inode
++ * than the original @parent are responsible for dropping the extra reference
++ * once the reply has been processed.
++ */
++static struct inode *ceph_get_reply_dir(struct super_block *sb,
++					struct inode *parent,
++					struct ceph_mds_reply_info_parsed *rinfo)
++{
++	struct ceph_vino vino;
++
++	if (unlikely(!rinfo->diri.in))
++		return parent; /* nothing to compare against */
++
++	/* If we didn't have a cached parent inode to begin with, just bail out. */
++	if (!parent)
++		return NULL;
++
++	vino.ino  = le64_to_cpu(rinfo->diri.in->ino);
++	vino.snap = le64_to_cpu(rinfo->diri.in->snapid);
++
++	if (likely(ceph_vino_matches_parent(parent, vino)))
++		return parent; /* matches – use the original reference */
++
++	/* Mismatch – this should be rare.  Emit a WARN and obtain the correct inode. */
++	WARN_ONCE(1, "ceph: reply dir mismatch (parent valid %llx.%llx reply %llx.%llx)\n",
++		  ceph_ino(parent), ceph_snap(parent), vino.ino, vino.snap);
++
++	return ceph_get_inode(sb, vino, NULL);
++}
++
+ /**
+  * ceph_new_inode - allocate a new inode in advance of an expected create
+  * @dir: parent directory for new inode
+@@ -1523,6 +1569,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ 	struct ceph_vino tvino, dvino;
+ 	struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ 	struct ceph_client *cl = fsc->client;
++	struct inode *parent_dir = NULL;
+ 	int err = 0;
+ 
+ 	doutc(cl, "%p is_dentry %d is_target %d\n", req,
+@@ -1536,10 +1583,17 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ 	}
+ 
+ 	if (rinfo->head->is_dentry) {
+-		struct inode *dir = req->r_parent;
+-
+-		if (dir) {
+-			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
++		/*
++		 * r_parent may be stale, in cases when R_PARENT_LOCKED is not set,
++		 * so we need to get the correct inode
++		 */
++		parent_dir = ceph_get_reply_dir(sb, req->r_parent, rinfo);
++		if (unlikely(IS_ERR(parent_dir))) {
++			err = PTR_ERR(parent_dir);
++			goto done;
++		}
++		if (parent_dir) {
++			err = ceph_fill_inode(parent_dir, NULL, &rinfo->diri,
+ 					      rinfo->dirfrag, session, -1,
+ 					      &req->r_caps_reservation);
+ 			if (err < 0)
+@@ -1548,14 +1602,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ 			WARN_ON_ONCE(1);
+ 		}
+ 
+-		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
++		if (parent_dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
+ 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
+ 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
+ 			bool is_nokey = false;
+ 			struct qstr dname;
+ 			struct dentry *dn, *parent;
+ 			struct fscrypt_str oname = FSTR_INIT(NULL, 0);
+-			struct ceph_fname fname = { .dir	= dir,
++			struct ceph_fname fname = { .dir	= parent_dir,
+ 						    .name	= rinfo->dname,
+ 						    .ctext	= rinfo->altname,
+ 						    .name_len	= rinfo->dname_len,
+@@ -1564,10 +1618,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ 			BUG_ON(!rinfo->head->is_target);
+ 			BUG_ON(req->r_dentry);
+ 
+-			parent = d_find_any_alias(dir);
++			parent = d_find_any_alias(parent_dir);
+ 			BUG_ON(!parent);
+ 
+-			err = ceph_fname_alloc_buffer(dir, &oname);
++			err = ceph_fname_alloc_buffer(parent_dir, &oname);
+ 			if (err < 0) {
+ 				dput(parent);
+ 				goto done;
+@@ -1576,7 +1630,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ 			err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
+ 			if (err < 0) {
+ 				dput(parent);
+-				ceph_fname_free_buffer(dir, &oname);
++				ceph_fname_free_buffer(parent_dir, &oname);
+ 				goto done;
+ 			}
+ 			dname.name = oname.name;
+@@ -1595,7 +1649,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ 				      dname.len, dname.name, dn);
+ 				if (!dn) {
+ 					dput(parent);
+-					ceph_fname_free_buffer(dir, &oname);
++					ceph_fname_free_buffer(parent_dir, &oname);
+ 					err = -ENOMEM;
+ 					goto done;
+ 				}
+@@ -1610,12 +1664,12 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
+ 				doutc(cl, " dn %p points to wrong inode %p\n",
+ 				      dn, d_inode(dn));
+-				ceph_dir_clear_ordered(dir);
++				ceph_dir_clear_ordered(parent_dir);
+ 				d_delete(dn);
+ 				dput(dn);
+ 				goto retry_lookup;
+ 			}
+-			ceph_fname_free_buffer(dir, &oname);
++			ceph_fname_free_buffer(parent_dir, &oname);
+ 
+ 			req->r_dentry = dn;
+ 			dput(parent);
+@@ -1794,6 +1848,9 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ 					    &dvino, ptvino);
+ 	}
+ done:
++	/* Drop extra ref from ceph_get_reply_dir() if it returned a new inode */
++	if (unlikely(!IS_ERR_OR_NULL(parent_dir) && parent_dir != req->r_parent))
++		iput(parent_dir);
+ 	doutc(cl, "done err=%d\n", err);
+ 	return err;
+ }
+@@ -2483,22 +2540,21 @@ int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ 	int truncate_retry = 20; /* The RMW will take around 50ms */
+ 	struct dentry *dentry;
+ 	char *path;
+-	int pathlen;
+-	u64 pathbase;
+ 	bool do_sync = false;
+ 
+ 	dentry = d_find_alias(inode);
+ 	if (!dentry) {
+ 		do_sync = true;
+ 	} else {
+-		path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase, 0);
++		struct ceph_path_info path_info;
++		path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
+ 		if (IS_ERR(path)) {
+ 			do_sync = true;
+ 			err = 0;
+ 		} else {
+ 			err = ceph_mds_check_access(mdsc, path, MAY_WRITE);
+ 		}
+-		ceph_mdsc_free_path(path, pathlen);
++		ceph_mdsc_free_path_info(&path_info);
+ 		dput(dentry);
+ 
+ 		/* For none EACCES cases will let the MDS do the mds auth check */
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index f3af6330d74a7d..df89d45f33a1ff 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -2686,8 +2686,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+  * ceph_mdsc_build_path - build a path string to a given dentry
+  * @mdsc: mds client
+  * @dentry: dentry to which path should be built
+- * @plen: returned length of string
+- * @pbase: returned base inode number
++ * @path_info: output path, length, base ino+snap, and freepath ownership flag
+  * @for_wire: is this path going to be sent to the MDS?
+  *
+  * Build a string that represents the path to the dentry. This is mostly called
+@@ -2705,7 +2704,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+  *   foo/.snap/bar -> foo//bar
+  */
+ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+-			   int *plen, u64 *pbase, int for_wire)
++			   struct ceph_path_info *path_info, int for_wire)
+ {
+ 	struct ceph_client *cl = mdsc->fsc->client;
+ 	struct dentry *cur;
+@@ -2815,16 +2814,28 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+ 		return ERR_PTR(-ENAMETOOLONG);
+ 	}
+ 
+-	*pbase = base;
+-	*plen = PATH_MAX - 1 - pos;
++	/* Initialize the output structure */
++	memset(path_info, 0, sizeof(*path_info));
++
++	path_info->vino.ino = base;
++	path_info->pathlen = PATH_MAX - 1 - pos;
++	path_info->path = path + pos;
++	path_info->freepath = true;
++
++	/* Set snap from dentry if available */
++	if (d_inode(dentry))
++		path_info->vino.snap = ceph_snap(d_inode(dentry));
++	else
++		path_info->vino.snap = CEPH_NOSNAP;
++
+ 	doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry),
+-	      base, *plen, path + pos);
++	      base, PATH_MAX - 1 - pos, path + pos);
+ 	return path + pos;
+ }
+ 
+ static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+-			     struct inode *dir, const char **ppath, int *ppathlen,
+-			     u64 *pino, bool *pfreepath, bool parent_locked)
++			     struct inode *dir, struct ceph_path_info *path_info,
++			     bool parent_locked)
+ {
+ 	char *path;
+ 
+@@ -2833,41 +2844,47 @@ static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry
+ 		dir = d_inode_rcu(dentry->d_parent);
+ 	if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
+ 	    !IS_ENCRYPTED(dir)) {
+-		*pino = ceph_ino(dir);
++		path_info->vino.ino = ceph_ino(dir);
++		path_info->vino.snap = ceph_snap(dir);
+ 		rcu_read_unlock();
+-		*ppath = dentry->d_name.name;
+-		*ppathlen = dentry->d_name.len;
++		path_info->path = dentry->d_name.name;
++		path_info->pathlen = dentry->d_name.len;
++		path_info->freepath = false;
+ 		return 0;
+ 	}
+ 	rcu_read_unlock();
+-	path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
++	path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
+ 	if (IS_ERR(path))
+ 		return PTR_ERR(path);
+-	*ppath = path;
+-	*pfreepath = true;
++	/*
++	 * ceph_mdsc_build_path already fills path_info, including snap handling.
++	 */
+ 	return 0;
+ }
+ 
+-static int build_inode_path(struct inode *inode,
+-			    const char **ppath, int *ppathlen, u64 *pino,
+-			    bool *pfreepath)
++static int build_inode_path(struct inode *inode, struct ceph_path_info *path_info)
+ {
+ 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ 	struct dentry *dentry;
+ 	char *path;
+ 
+ 	if (ceph_snap(inode) == CEPH_NOSNAP) {
+-		*pino = ceph_ino(inode);
+-		*ppathlen = 0;
++		path_info->vino.ino = ceph_ino(inode);
++		path_info->vino.snap = ceph_snap(inode);
++		path_info->pathlen = 0;
++		path_info->freepath = false;
+ 		return 0;
+ 	}
+ 	dentry = d_find_alias(inode);
+-	path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
++	path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
+ 	dput(dentry);
+ 	if (IS_ERR(path))
+ 		return PTR_ERR(path);
+-	*ppath = path;
+-	*pfreepath = true;
++	/*
++	 * ceph_mdsc_build_path already fills path_info, including snap from dentry.
++	 * Override with inode's snap since that's what this function is for.
++	 */
++	path_info->vino.snap = ceph_snap(inode);
+ 	return 0;
+ }
+ 
+@@ -2877,26 +2894,32 @@ static int build_inode_path(struct inode *inode,
+  */
+ static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
+ 				 struct dentry *rdentry, struct inode *rdiri,
+-				 const char *rpath, u64 rino, const char **ppath,
+-				 int *pathlen, u64 *ino, bool *freepath,
++				 const char *rpath, u64 rino,
++				 struct ceph_path_info *path_info,
+ 				 bool parent_locked)
+ {
+ 	struct ceph_client *cl = mdsc->fsc->client;
+ 	int r = 0;
+ 
++	/* Initialize the output structure */
++	memset(path_info, 0, sizeof(*path_info));
++
+ 	if (rinode) {
+-		r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
++		r = build_inode_path(rinode, path_info);
+ 		doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
+ 		      ceph_snap(rinode));
+ 	} else if (rdentry) {
+-		r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
+-					freepath, parent_locked);
+-		doutc(cl, " dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath);
++		r = build_dentry_path(mdsc, rdentry, rdiri, path_info, parent_locked);
++		doutc(cl, " dentry %p %llx/%.*s\n", rdentry, path_info->vino.ino,
++		      path_info->pathlen, path_info->path);
+ 	} else if (rpath || rino) {
+-		*ino = rino;
+-		*ppath = rpath;
+-		*pathlen = rpath ? strlen(rpath) : 0;
+-		doutc(cl, " path %.*s\n", *pathlen, rpath);
++		path_info->vino.ino = rino;
++		path_info->vino.snap = CEPH_NOSNAP;
++		path_info->path = rpath;
++		path_info->pathlen = rpath ? strlen(rpath) : 0;
++		path_info->freepath = false;
++
++		doutc(cl, " path %.*s\n", path_info->pathlen, rpath);
+ 	}
+ 
+ 	return r;
+@@ -2973,11 +2996,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ 	struct ceph_client *cl = mdsc->fsc->client;
+ 	struct ceph_msg *msg;
+ 	struct ceph_mds_request_head_legacy *lhead;
+-	const char *path1 = NULL;
+-	const char *path2 = NULL;
+-	u64 ino1 = 0, ino2 = 0;
+-	int pathlen1 = 0, pathlen2 = 0;
+-	bool freepath1 = false, freepath2 = false;
++	struct ceph_path_info path_info1 = {0};
++	struct ceph_path_info path_info2 = {0};
+ 	struct dentry *old_dentry = NULL;
+ 	int len;
+ 	u16 releases;
+@@ -2987,25 +3007,49 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ 	u16 request_head_version = mds_supported_head_version(session);
+ 	kuid_t caller_fsuid = req->r_cred->fsuid;
+ 	kgid_t caller_fsgid = req->r_cred->fsgid;
++	bool parent_locked = test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+ 
+ 	ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
+-			      req->r_parent, req->r_path1, req->r_ino1.ino,
+-			      &path1, &pathlen1, &ino1, &freepath1,
+-			      test_bit(CEPH_MDS_R_PARENT_LOCKED,
+-					&req->r_req_flags));
++				    req->r_parent, req->r_path1, req->r_ino1.ino,
++				    &path_info1, parent_locked);
+ 	if (ret < 0) {
+ 		msg = ERR_PTR(ret);
+ 		goto out;
+ 	}
+ 
++	/*
++	 * When the parent directory's i_rwsem is *not* locked, req->r_parent may
++	 * have become stale (e.g. after a concurrent rename) between the time the
++	 * dentry was looked up and now.  If we detect that the stored r_parent
++	 * does not match the inode number we just encoded for the request, switch
++	 * to the correct inode so that the MDS receives a valid parent reference.
++	 */
++	if (!parent_locked && req->r_parent && path_info1.vino.ino &&
++	    ceph_ino(req->r_parent) != path_info1.vino.ino) {
++		struct inode *old_parent = req->r_parent;
++		struct inode *correct_dir = ceph_get_inode(mdsc->fsc->sb, path_info1.vino, NULL);
++		if (!IS_ERR(correct_dir)) {
++			WARN_ONCE(1, "ceph: r_parent mismatch (had %llx wanted %llx) - updating\n",
++			          ceph_ino(old_parent), path_info1.vino.ino);
++			/*
++			 * Transfer CEPH_CAP_PIN from the old parent to the new one.
++			 * The pin was taken earlier in ceph_mdsc_submit_request().
++			 */
++			ceph_put_cap_refs(ceph_inode(old_parent), CEPH_CAP_PIN);
++			iput(old_parent);
++			req->r_parent = correct_dir;
++			ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
++		}
++	}
++
+ 	/* If r_old_dentry is set, then assume that its parent is locked */
+ 	if (req->r_old_dentry &&
+ 	    !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
+ 		old_dentry = req->r_old_dentry;
+ 	ret = set_request_path_attr(mdsc, NULL, old_dentry,
+-			      req->r_old_dentry_dir,
+-			      req->r_path2, req->r_ino2.ino,
+-			      &path2, &pathlen2, &ino2, &freepath2, true);
++				    req->r_old_dentry_dir,
++				    req->r_path2, req->r_ino2.ino,
++				    &path_info2, true);
+ 	if (ret < 0) {
+ 		msg = ERR_PTR(ret);
+ 		goto out_free1;
+@@ -3036,7 +3080,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ 
+ 	/* filepaths */
+ 	len += 2 * (1 + sizeof(u32) + sizeof(u64));
+-	len += pathlen1 + pathlen2;
++	len += path_info1.pathlen + path_info2.pathlen;
+ 
+ 	/* cap releases */
+ 	len += sizeof(struct ceph_mds_request_release) *
+@@ -3044,9 +3088,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ 		 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
+ 
+ 	if (req->r_dentry_drop)
+-		len += pathlen1;
++		len += path_info1.pathlen;
+ 	if (req->r_old_dentry_drop)
+-		len += pathlen2;
++		len += path_info2.pathlen;
+ 
+ 	/* MClientRequest tail */
+ 
+@@ -3159,8 +3203,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ 	lhead->ino = cpu_to_le64(req->r_deleg_ino);
+ 	lhead->args = req->r_args;
+ 
+-	ceph_encode_filepath(&p, end, ino1, path1);
+-	ceph_encode_filepath(&p, end, ino2, path2);
++	ceph_encode_filepath(&p, end, path_info1.vino.ino, path_info1.path);
++	ceph_encode_filepath(&p, end, path_info2.vino.ino, path_info2.path);
+ 
+ 	/* make note of release offset, in case we need to replay */
+ 	req->r_request_release_offset = p - msg->front.iov_base;
+@@ -3223,11 +3267,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ 	msg->hdr.data_off = cpu_to_le16(0);
+ 
+ out_free2:
+-	if (freepath2)
+-		ceph_mdsc_free_path((char *)path2, pathlen2);
++	ceph_mdsc_free_path_info(&path_info2);
+ out_free1:
+-	if (freepath1)
+-		ceph_mdsc_free_path((char *)path1, pathlen1);
++	ceph_mdsc_free_path_info(&path_info1);
+ out:
+ 	return msg;
+ out_err:
+@@ -4584,24 +4626,20 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ 	struct ceph_pagelist *pagelist = recon_state->pagelist;
+ 	struct dentry *dentry;
+ 	struct ceph_cap *cap;
+-	char *path;
+-	int pathlen = 0, err;
+-	u64 pathbase;
++	struct ceph_path_info path_info = {0};
++	int err;
+ 	u64 snap_follows;
+ 
+ 	dentry = d_find_primary(inode);
+ 	if (dentry) {
+ 		/* set pathbase to parent dir when msg_version >= 2 */
+-		path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
++		char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info,
+ 					    recon_state->msg_version >= 2);
+ 		dput(dentry);
+ 		if (IS_ERR(path)) {
+ 			err = PTR_ERR(path);
+ 			goto out_err;
+ 		}
+-	} else {
+-		path = NULL;
+-		pathbase = 0;
+ 	}
+ 
+ 	spin_lock(&ci->i_ceph_lock);
+@@ -4634,7 +4672,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ 		rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
+ 		rec.v2.issued = cpu_to_le32(cap->issued);
+ 		rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+-		rec.v2.pathbase = cpu_to_le64(pathbase);
++		rec.v2.pathbase = cpu_to_le64(path_info.vino.ino);
+ 		rec.v2.flock_len = (__force __le32)
+ 			((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
+ 	} else {
+@@ -4649,7 +4687,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ 		ts = inode_get_atime(inode);
+ 		ceph_encode_timespec64(&rec.v1.atime, &ts);
+ 		rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+-		rec.v1.pathbase = cpu_to_le64(pathbase);
++		rec.v1.pathbase = cpu_to_le64(path_info.vino.ino);
+ 	}
+ 
+ 	if (list_empty(&ci->i_cap_snaps)) {
+@@ -4711,7 +4749,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ 			    sizeof(struct ceph_filelock);
+ 		rec.v2.flock_len = cpu_to_le32(struct_len);
+ 
+-		struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
++		struct_len += sizeof(u32) + path_info.pathlen + sizeof(rec.v2);
+ 
+ 		if (struct_v >= 2)
+ 			struct_len += sizeof(u64); /* snap_follows */
+@@ -4735,7 +4773,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ 			ceph_pagelist_encode_8(pagelist, 1);
+ 			ceph_pagelist_encode_32(pagelist, struct_len);
+ 		}
+-		ceph_pagelist_encode_string(pagelist, path, pathlen);
++		ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
+ 		ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
+ 		ceph_locks_to_pagelist(flocks, pagelist,
+ 				       num_fcntl_locks, num_flock_locks);
+@@ -4746,17 +4784,17 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ 	} else {
+ 		err = ceph_pagelist_reserve(pagelist,
+ 					    sizeof(u64) + sizeof(u32) +
+-					    pathlen + sizeof(rec.v1));
++					    path_info.pathlen + sizeof(rec.v1));
+ 		if (err)
+ 			goto out_err;
+ 
+ 		ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
+-		ceph_pagelist_encode_string(pagelist, path, pathlen);
++		ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
+ 		ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
+ 	}
+ 
+ out_err:
+-	ceph_mdsc_free_path(path, pathlen);
++	ceph_mdsc_free_path_info(&path_info);
+ 	if (!err)
+ 		recon_state->nr_caps++;
+ 	return err;
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 3dd54587944ac0..0a602080d8ef69 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -612,14 +612,24 @@ extern int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath,
+ 
+ extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
+ 
+-static inline void ceph_mdsc_free_path(char *path, int len)
++/*
++ * Structure to group path-related output parameters for build_*_path functions
++ */
++struct ceph_path_info {
++	const char *path;
++	int pathlen;
++	struct ceph_vino vino;
++	bool freepath;
++};
++
++static inline void ceph_mdsc_free_path_info(const struct ceph_path_info *path_info)
+ {
+-	if (!IS_ERR_OR_NULL(path))
+-		__putname(path - (PATH_MAX - 1 - len));
++	if (path_info && path_info->freepath && !IS_ERR_OR_NULL(path_info->path))
++		__putname((char *)path_info->path - (PATH_MAX - 1 - path_info->pathlen));
+ }
+ 
+ extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
+-				  struct dentry *dentry, int *plen, u64 *base,
++				  struct dentry *dentry, struct ceph_path_info *path_info,
+ 				  int for_wire);
+ 
+ extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 286f8fcb74cc9d..29c7c0b8295fb8 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1462,7 +1462,8 @@ static bool ext4_match(struct inode *parent,
+ 		 * sure cf_name was properly initialized before
+ 		 * considering the calculated hash.
+ 		 */
+-		if (IS_ENCRYPTED(parent) && fname->cf_name.name &&
++		if (sb_no_casefold_compat_fallback(parent->i_sb) &&
++		    IS_ENCRYPTED(parent) && fname->cf_name.name &&
+ 		    (fname->hinfo.hash != EXT4_DIRENT_HASH(de) ||
+ 		     fname->hinfo.minor_hash != EXT4_DIRENT_MINOR_HASH(de)))
+ 			return false;
+@@ -1595,10 +1596,15 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
+ 		 * return.  Otherwise, fall back to doing a search the
+ 		 * old fashioned way.
+ 		 */
+-		if (!IS_ERR(ret) || PTR_ERR(ret) != ERR_BAD_DX_DIR)
++		if (IS_ERR(ret) && PTR_ERR(ret) == ERR_BAD_DX_DIR)
++			dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
++				       "falling back\n"));
++		else if (!sb_no_casefold_compat_fallback(dir->i_sb) &&
++			 *res_dir == NULL && IS_CASEFOLDED(dir))
++			dxtrace(printk(KERN_DEBUG "ext4_find_entry: casefold "
++				       "failed, falling back\n"));
++		else
+ 			goto cleanup_and_exit;
+-		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
+-			       "falling back\n"));
+ 		ret = NULL;
+ 	}
+ 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index 82df28d45cd70a..ff90f8203015ef 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -176,6 +176,14 @@ static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
+ 	if (!ctx->flags)
+ 		return 1;
+ 
++	/*
++	 * Verify that the decoded dentry itself has a valid id mapping.
++	 * In case the decoded dentry is the mountfd root itself, this
++	 * verifies that the mountfd inode itself has a valid id mapping.
++	 */
++	if (!privileged_wrt_inode_uidgid(user_ns, idmap, d_inode(dentry)))
++		return 0;
++
+ 	/*
+ 	 * It's racy as we're not taking rename_lock but we're able to ignore
+ 	 * permissions and we just need an approximation whether we were able
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index f597f7e68e5014..49659d1b293218 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3229,7 +3229,7 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
+ 		.nodeid_out = ff_out->nodeid,
+ 		.fh_out = ff_out->fh,
+ 		.off_out = pos_out,
+-		.len = len,
++		.len = min_t(size_t, len, UINT_MAX & PAGE_MASK),
+ 		.flags = flags
+ 	};
+ 	struct fuse_write_out outarg;
+@@ -3295,6 +3295,9 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
+ 		fc->no_copy_file_range = 1;
+ 		err = -EOPNOTSUPP;
+ 	}
++	if (!err && outarg.size > len)
++		err = -EIO;
++
+ 	if (err)
+ 		goto out;
+ 
+diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
+index bbac547dfcb3c8..6bfd09dda9e3e6 100644
+--- a/fs/fuse/passthrough.c
++++ b/fs/fuse/passthrough.c
+@@ -233,6 +233,11 @@ int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
+ 	if (!file)
+ 		goto out;
+ 
++	/* read/write/splice/mmap passthrough only relevant for regular files */
++	res = d_is_dir(file->f_path.dentry) ? -EISDIR : -EINVAL;
++	if (!d_is_reg(file->f_path.dentry))
++		goto out_fput;
++
+ 	backing_sb = file_inode(file)->i_sb;
+ 	res = -ELOOP;
+ 	if (backing_sb->s_stack_depth >= fc->max_stack_depth)
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 2d9d5dfa19b87c..c81cffa72b1c00 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -70,6 +70,24 @@ static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
+ 					 !list_empty(&of->list));
+ }
+ 
++/* Get active reference to kernfs node for an open file */
++static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file *of)
++{
++	/* Skip if file was already released */
++	if (unlikely(of->released))
++		return NULL;
++
++	if (!kernfs_get_active(of->kn))
++		return NULL;
++
++	return of;
++}
++
++static void kernfs_put_active_of(struct kernfs_open_file *of)
++{
++	return kernfs_put_active(of->kn);
++}
++
+ /**
+  * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
+  *
+@@ -139,7 +157,7 @@ static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
+ 
+ 	if (ops->seq_stop)
+ 		ops->seq_stop(sf, v);
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ }
+ 
+ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
+@@ -152,7 +170,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
+ 	 * the ops aren't called concurrently for the same open file.
+ 	 */
+ 	mutex_lock(&of->mutex);
+-	if (!kernfs_get_active(of->kn))
++	if (!kernfs_get_active_of(of))
+ 		return ERR_PTR(-ENODEV);
+ 
+ 	ops = kernfs_ops(of->kn);
+@@ -238,7 +256,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	 * the ops aren't called concurrently for the same open file.
+ 	 */
+ 	mutex_lock(&of->mutex);
+-	if (!kernfs_get_active(of->kn)) {
++	if (!kernfs_get_active_of(of)) {
+ 		len = -ENODEV;
+ 		mutex_unlock(&of->mutex);
+ 		goto out_free;
+@@ -252,7 +270,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	else
+ 		len = -EINVAL;
+ 
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ 	mutex_unlock(&of->mutex);
+ 
+ 	if (len < 0)
+@@ -323,7 +341,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	 * the ops aren't called concurrently for the same open file.
+ 	 */
+ 	mutex_lock(&of->mutex);
+-	if (!kernfs_get_active(of->kn)) {
++	if (!kernfs_get_active_of(of)) {
+ 		mutex_unlock(&of->mutex);
+ 		len = -ENODEV;
+ 		goto out_free;
+@@ -335,7 +353,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	else
+ 		len = -EINVAL;
+ 
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ 	mutex_unlock(&of->mutex);
+ 
+ 	if (len > 0)
+@@ -357,13 +375,13 @@ static void kernfs_vma_open(struct vm_area_struct *vma)
+ 	if (!of->vm_ops)
+ 		return;
+ 
+-	if (!kernfs_get_active(of->kn))
++	if (!kernfs_get_active_of(of))
+ 		return;
+ 
+ 	if (of->vm_ops->open)
+ 		of->vm_ops->open(vma);
+ 
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ }
+ 
+ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
+@@ -375,14 +393,14 @@ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
+ 	if (!of->vm_ops)
+ 		return VM_FAULT_SIGBUS;
+ 
+-	if (!kernfs_get_active(of->kn))
++	if (!kernfs_get_active_of(of))
+ 		return VM_FAULT_SIGBUS;
+ 
+ 	ret = VM_FAULT_SIGBUS;
+ 	if (of->vm_ops->fault)
+ 		ret = of->vm_ops->fault(vmf);
+ 
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ 	return ret;
+ }
+ 
+@@ -395,7 +413,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
+ 	if (!of->vm_ops)
+ 		return VM_FAULT_SIGBUS;
+ 
+-	if (!kernfs_get_active(of->kn))
++	if (!kernfs_get_active_of(of))
+ 		return VM_FAULT_SIGBUS;
+ 
+ 	ret = 0;
+@@ -404,7 +422,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
+ 	else
+ 		file_update_time(file);
+ 
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ 	return ret;
+ }
+ 
+@@ -418,14 +436,14 @@ static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
+ 	if (!of->vm_ops)
+ 		return -EINVAL;
+ 
+-	if (!kernfs_get_active(of->kn))
++	if (!kernfs_get_active_of(of))
+ 		return -EINVAL;
+ 
+ 	ret = -EINVAL;
+ 	if (of->vm_ops->access)
+ 		ret = of->vm_ops->access(vma, addr, buf, len, write);
+ 
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ 	return ret;
+ }
+ 
+@@ -455,7 +473,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
+ 	mutex_lock(&of->mutex);
+ 
+ 	rc = -ENODEV;
+-	if (!kernfs_get_active(of->kn))
++	if (!kernfs_get_active_of(of))
+ 		goto out_unlock;
+ 
+ 	ops = kernfs_ops(of->kn);
+@@ -490,7 +508,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
+ 	}
+ 	vma->vm_ops = &kernfs_vm_ops;
+ out_put:
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ out_unlock:
+ 	mutex_unlock(&of->mutex);
+ 
+@@ -852,7 +870,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
+ 	struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
+ 	__poll_t ret;
+ 
+-	if (!kernfs_get_active(kn))
++	if (!kernfs_get_active_of(of))
+ 		return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+ 
+ 	if (kn->attr.ops->poll)
+@@ -860,7 +878,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
+ 	else
+ 		ret = kernfs_generic_poll(of, wait);
+ 
+-	kernfs_put_active(kn);
++	kernfs_put_active_of(of);
+ 	return ret;
+ }
+ 
+@@ -875,7 +893,7 @@ static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
+ 	 * the ops aren't called concurrently for the same open file.
+ 	 */
+ 	mutex_lock(&of->mutex);
+-	if (!kernfs_get_active(of->kn)) {
++	if (!kernfs_get_active_of(of)) {
+ 		mutex_unlock(&of->mutex);
+ 		return -ENODEV;
+ 	}
+@@ -886,7 +904,7 @@ static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
+ 	else
+ 		ret = generic_file_llseek(file, offset, whence);
+ 
+-	kernfs_put_active(of->kn);
++	kernfs_put_active_of(of);
+ 	mutex_unlock(&of->mutex);
+ 	return ret;
+ }
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 17edc124d03f22..035474f3fb8f36 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -881,6 +881,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
+ 
+ 	if (fsinfo->xattr_support)
+ 		server->caps |= NFS_CAP_XATTR;
++	else
++		server->caps &= ~NFS_CAP_XATTR;
+ #endif
+ }
+ 
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index c1f1b826888c98..f32f8d7c9122bf 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -320,6 +320,7 @@ static void nfs_read_sync_pgio_error(struct list_head *head, int error)
+ static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
+ {
+ 	get_dreq(hdr->dreq);
++	set_bit(NFS_IOHDR_ODIRECT, &hdr->flags);
+ }
+ 
+ static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
+@@ -471,8 +472,16 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+ 	if (user_backed_iter(iter))
+ 		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
+ 
+-	if (!swap)
+-		nfs_start_io_direct(inode);
++	if (!swap) {
++		result = nfs_start_io_direct(inode);
++		if (result) {
++			/* release the reference that would usually be
++			 * consumed by nfs_direct_read_schedule_iovec()
++			 */
++			nfs_direct_req_release(dreq);
++			goto out_release;
++		}
++	}
+ 
+ 	NFS_I(inode)->read_io += count;
+ 	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
+@@ -1030,7 +1039,14 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+ 		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
+ 							    FLUSH_STABLE);
+ 	} else {
+-		nfs_start_io_direct(inode);
++		result = nfs_start_io_direct(inode);
++		if (result) {
++			/* release the reference that would usually be
++			 * consumed by nfs_direct_write_schedule_iovec()
++			 */
++			nfs_direct_req_release(dreq);
++			goto out_release;
++		}
+ 
+ 		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
+ 							    FLUSH_COND_STABLE);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 153d25d4b810c5..a16a619fb8c33b 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -167,7 +167,10 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
+ 		iocb->ki_filp,
+ 		iov_iter_count(to), (unsigned long) iocb->ki_pos);
+ 
+-	nfs_start_io_read(inode);
++	result = nfs_start_io_read(inode);
++	if (result)
++		return result;
++
+ 	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
+ 	if (!result) {
+ 		result = generic_file_read_iter(iocb, to);
+@@ -188,7 +191,10 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe
+ 
+ 	dprintk("NFS: splice_read(%pD2, %zu@%llu)\n", in, len, *ppos);
+ 
+-	nfs_start_io_read(inode);
++	result = nfs_start_io_read(inode);
++	if (result)
++		return result;
++
+ 	result = nfs_revalidate_mapping(inode, in->f_mapping);
+ 	if (!result) {
+ 		result = filemap_splice_read(in, ppos, pipe, len, flags);
+@@ -431,10 +437,11 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset,
+ 	dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
+ 		 folio->index, offset, length);
+ 
+-	if (offset != 0 || length < folio_size(folio))
+-		return;
+ 	/* Cancel any unstarted writes on this page */
+-	nfs_wb_folio_cancel(inode, folio);
++	if (offset != 0 || length < folio_size(folio))
++		nfs_wb_folio(inode, folio);
++	else
++		nfs_wb_folio_cancel(inode, folio);
+ 	folio_wait_private_2(folio); /* [DEPRECATED] */
+ 	trace_nfs_invalidate_folio(inode, folio_pos(folio) + offset, length);
+ }
+@@ -669,7 +676,9 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
+ 	nfs_clear_invalid_mapping(file->f_mapping);
+ 
+ 	since = filemap_sample_wb_err(file->f_mapping);
+-	nfs_start_io_write(inode);
++	error = nfs_start_io_write(inode);
++	if (error)
++		return error;
+ 	result = generic_write_checks(iocb, from);
+ 	if (result > 0)
+ 		result = generic_perform_write(iocb, from);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index b685e763ef11be..6469846971966b 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -292,7 +292,7 @@ ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
+ 		struct pnfs_layout_segment *l2)
+ {
+ 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
+-	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
++	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
+ 	u32 i;
+ 
+ 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
+@@ -772,8 +772,11 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
+ 			continue;
+ 
+ 		if (check_device &&
+-		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
++		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) {
++			// reinitialize the error state in case if this is the last iteration
++			ds = ERR_PTR(-EINVAL);
+ 			continue;
++		}
+ 
+ 		*best_idx = idx;
+ 		break;
+@@ -803,7 +806,7 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
+ 	struct nfs4_pnfs_ds *ds;
+ 
+ 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
+-	if (ds)
++	if (!IS_ERR(ds))
+ 		return ds;
+ 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
+ }
+@@ -817,7 +820,7 @@ ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
+ 
+ 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
+ 					       best_idx);
+-	if (ds || !pgio->pg_mirror_idx)
++	if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
+ 		return ds;
+ 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
+ }
+@@ -867,7 +870,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 	req->wb_nio = 0;
+ 
+ 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
+-	if (!ds) {
++	if (IS_ERR(ds)) {
+ 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+ 			goto out_mds;
+ 		pnfs_generic_pg_cleanup(pgio);
+@@ -1071,11 +1074,13 @@ static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
+ {
+ 	u32 idx = hdr->pgio_mirror_idx + 1;
+ 	u32 new_idx = 0;
++	struct nfs4_pnfs_ds *ds;
+ 
+-	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
+-		ff_layout_send_layouterror(hdr->lseg);
+-	else
++	ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx);
++	if (IS_ERR(ds))
+ 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
++	else
++		ff_layout_send_layouterror(hdr->lseg);
+ 	pnfs_read_resend_pnfs(hdr, new_idx);
+ }
+ 
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 8827cb00f86d52..5bf5fb5ddd34c0 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -761,8 +761,10 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 	trace_nfs_setattr_enter(inode);
+ 
+ 	/* Write all dirty data */
+-	if (S_ISREG(inode->i_mode))
++	if (S_ISREG(inode->i_mode)) {
++		nfs_file_block_o_direct(NFS_I(inode));
+ 		nfs_sync_inode(inode);
++	}
+ 
+ 	fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
+ 	if (fattr == NULL) {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 882d804089add1..456b4234028141 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -6,6 +6,7 @@
+ #include "nfs4_fs.h"
+ #include <linux/fs_context.h>
+ #include <linux/security.h>
++#include <linux/compiler_attributes.h>
+ #include <linux/crc32.h>
+ #include <linux/sunrpc/addr.h>
+ #include <linux/nfs_page.h>
+@@ -516,11 +517,11 @@ extern const struct netfs_request_ops nfs_netfs_ops;
+ #endif
+ 
+ /* io.c */
+-extern void nfs_start_io_read(struct inode *inode);
++extern __must_check int nfs_start_io_read(struct inode *inode);
+ extern void nfs_end_io_read(struct inode *inode);
+-extern void nfs_start_io_write(struct inode *inode);
++extern  __must_check int nfs_start_io_write(struct inode *inode);
+ extern void nfs_end_io_write(struct inode *inode);
+-extern void nfs_start_io_direct(struct inode *inode);
++extern __must_check int nfs_start_io_direct(struct inode *inode);
+ extern void nfs_end_io_direct(struct inode *inode);
+ 
+ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
+@@ -528,6 +529,16 @@ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
+ 	return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0;
+ }
+ 
++/* Must be called with exclusively locked inode->i_rwsem */
++static inline void nfs_file_block_o_direct(struct nfs_inode *nfsi)
++{
++	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
++		clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
++		inode_dio_wait(&nfsi->vfs_inode);
++	}
++}
++
++
+ /* namespace.c */
+ #define NFS_PATH_CANONICAL 1
+ extern char *nfs_path(char **p, struct dentry *dentry,
+diff --git a/fs/nfs/io.c b/fs/nfs/io.c
+index b5551ed8f648bc..d275b0a250bf3b 100644
+--- a/fs/nfs/io.c
++++ b/fs/nfs/io.c
+@@ -14,15 +14,6 @@
+ 
+ #include "internal.h"
+ 
+-/* Call with exclusively locked inode->i_rwsem */
+-static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
+-{
+-	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
+-		clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
+-		inode_dio_wait(inode);
+-	}
+-}
+-
+ /**
+  * nfs_start_io_read - declare the file is being used for buffered reads
+  * @inode: file inode
+@@ -39,19 +30,28 @@ static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
+  * Note that buffered writes and truncates both take a write lock on
+  * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
+  */
+-void
++int
+ nfs_start_io_read(struct inode *inode)
+ {
+ 	struct nfs_inode *nfsi = NFS_I(inode);
++	int err;
++
+ 	/* Be an optimist! */
+-	down_read(&inode->i_rwsem);
++	err = down_read_killable(&inode->i_rwsem);
++	if (err)
++		return err;
+ 	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
+-		return;
++		return 0;
+ 	up_read(&inode->i_rwsem);
++
+ 	/* Slow path.... */
+-	down_write(&inode->i_rwsem);
+-	nfs_block_o_direct(nfsi, inode);
++	err = down_write_killable(&inode->i_rwsem);
++	if (err)
++		return err;
++	nfs_file_block_o_direct(nfsi);
+ 	downgrade_write(&inode->i_rwsem);
++
++	return 0;
+ }
+ 
+ /**
+@@ -74,11 +74,15 @@ nfs_end_io_read(struct inode *inode)
+  * Declare that a buffered read operation is about to start, and ensure
+  * that we block all direct I/O.
+  */
+-void
++int
+ nfs_start_io_write(struct inode *inode)
+ {
+-	down_write(&inode->i_rwsem);
+-	nfs_block_o_direct(NFS_I(inode), inode);
++	int err;
++
++	err = down_write_killable(&inode->i_rwsem);
++	if (!err)
++		nfs_file_block_o_direct(NFS_I(inode));
++	return err;
+ }
+ 
+ /**
+@@ -119,19 +123,28 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
+  * Note that buffered writes and truncates both take a write lock on
+  * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
+  */
+-void
++int
+ nfs_start_io_direct(struct inode *inode)
+ {
+ 	struct nfs_inode *nfsi = NFS_I(inode);
++	int err;
++
+ 	/* Be an optimist! */
+-	down_read(&inode->i_rwsem);
++	err = down_read_killable(&inode->i_rwsem);
++	if (err)
++		return err;
+ 	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0)
+-		return;
++		return 0;
+ 	up_read(&inode->i_rwsem);
++
+ 	/* Slow path.... */
+-	down_write(&inode->i_rwsem);
++	err = down_write_killable(&inode->i_rwsem);
++	if (err)
++		return err;
+ 	nfs_block_buffered(nfsi, inode);
+ 	downgrade_write(&inode->i_rwsem);
++
++	return 0;
+ }
+ 
+ /**
+diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
+index 21b2b38fae9f3a..82a053304ad594 100644
+--- a/fs/nfs/localio.c
++++ b/fs/nfs/localio.c
+@@ -35,6 +35,7 @@ struct nfs_local_kiocb {
+ 	struct bio_vec		*bvec;
+ 	struct nfs_pgio_header	*hdr;
+ 	struct work_struct	work;
++	void (*aio_complete_work)(struct work_struct *);
+ 	struct nfsd_file	*localio;
+ };
+ 
+@@ -50,6 +51,11 @@ static void nfs_local_fsync_work(struct work_struct *work);
+ static bool localio_enabled __read_mostly = true;
+ module_param(localio_enabled, bool, 0644);
+ 
++static bool localio_O_DIRECT_semantics __read_mostly = false;
++module_param(localio_O_DIRECT_semantics, bool, 0644);
++MODULE_PARM_DESC(localio_O_DIRECT_semantics,
++		 "LOCALIO will use O_DIRECT semantics to filesystem.");
++
+ static inline bool nfs_client_is_local(const struct nfs_client *clp)
+ {
+ 	return !!test_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+@@ -274,7 +280,7 @@ nfs_local_iocb_free(struct nfs_local_kiocb *iocb)
+ 
+ static struct nfs_local_kiocb *
+ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
+-		     struct nfsd_file *localio, gfp_t flags)
++		     struct file *file, gfp_t flags)
+ {
+ 	struct nfs_local_kiocb *iocb;
+ 
+@@ -287,11 +293,19 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
+ 		kfree(iocb);
+ 		return NULL;
+ 	}
+-	init_sync_kiocb(&iocb->kiocb, nfs_to->nfsd_file_file(localio));
++
++	if (localio_O_DIRECT_semantics &&
++	    test_bit(NFS_IOHDR_ODIRECT, &hdr->flags)) {
++		iocb->kiocb.ki_filp = file;
++		iocb->kiocb.ki_flags = IOCB_DIRECT;
++	} else
++		init_sync_kiocb(&iocb->kiocb, file);
++
+ 	iocb->kiocb.ki_pos = hdr->args.offset;
+-	iocb->localio = localio;
+ 	iocb->hdr = hdr;
+ 	iocb->kiocb.ki_flags &= ~IOCB_APPEND;
++	iocb->aio_complete_work = NULL;
++
+ 	return iocb;
+ }
+ 
+@@ -346,6 +360,18 @@ nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
+ 	nfs_local_hdr_release(hdr, hdr->task.tk_ops);
+ }
+ 
++/*
++ * Complete the I/O from iocb->kiocb.ki_complete()
++ *
++ * Note that this function can be called from a bottom half context,
++ * hence we need to queue the rpc_call_done() etc to a workqueue
++ */
++static inline void nfs_local_pgio_aio_complete(struct nfs_local_kiocb *iocb)
++{
++	INIT_WORK(&iocb->work, iocb->aio_complete_work);
++	queue_work(nfsiod_workqueue, &iocb->work);
++}
++
+ static void
+ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
+ {
+@@ -368,6 +394,23 @@ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
+ 			status > 0 ? status : 0, hdr->res.eof);
+ }
+ 
++static void nfs_local_read_aio_complete_work(struct work_struct *work)
++{
++	struct nfs_local_kiocb *iocb =
++		container_of(work, struct nfs_local_kiocb, work);
++
++	nfs_local_pgio_release(iocb);
++}
++
++static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret)
++{
++	struct nfs_local_kiocb *iocb =
++		container_of(kiocb, struct nfs_local_kiocb, kiocb);
++
++	nfs_local_read_done(iocb, ret);
++	nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */
++}
++
+ static void nfs_local_call_read(struct work_struct *work)
+ {
+ 	struct nfs_local_kiocb *iocb =
+@@ -382,12 +425,13 @@ static void nfs_local_call_read(struct work_struct *work)
+ 	nfs_local_iter_init(&iter, iocb, READ);
+ 
+ 	status = filp->f_op->read_iter(&iocb->kiocb, &iter);
+-	WARN_ON_ONCE(status == -EIOCBQUEUED);
+-
+-	nfs_local_read_done(iocb, status);
+-	nfs_local_pgio_release(iocb);
+ 
+ 	revert_creds(save_cred);
++
++	if (status != -EIOCBQUEUED) {
++		nfs_local_read_done(iocb, status);
++		nfs_local_pgio_release(iocb);
++	}
+ }
+ 
+ static int
+@@ -396,17 +440,28 @@ nfs_do_local_read(struct nfs_pgio_header *hdr,
+ 		  const struct rpc_call_ops *call_ops)
+ {
+ 	struct nfs_local_kiocb *iocb;
++	struct file *file = nfs_to->nfsd_file_file(localio);
++
++	/* Don't support filesystems without read_iter */
++	if (!file->f_op->read_iter)
++		return -EAGAIN;
+ 
+ 	dprintk("%s: vfs_read count=%u pos=%llu\n",
+ 		__func__, hdr->args.count, hdr->args.offset);
+ 
+-	iocb = nfs_local_iocb_alloc(hdr, localio, GFP_KERNEL);
++	iocb = nfs_local_iocb_alloc(hdr, file, GFP_KERNEL);
+ 	if (iocb == NULL)
+ 		return -ENOMEM;
++	iocb->localio = localio;
+ 
+ 	nfs_local_pgio_init(hdr, call_ops);
+ 	hdr->res.eof = false;
+ 
++	if (iocb->kiocb.ki_flags & IOCB_DIRECT) {
++		iocb->kiocb.ki_complete = nfs_local_read_aio_complete;
++		iocb->aio_complete_work = nfs_local_read_aio_complete_work;
++	}
++
+ 	INIT_WORK(&iocb->work, nfs_local_call_read);
+ 	queue_work(nfslocaliod_workqueue, &iocb->work);
+ 
+@@ -536,6 +591,24 @@ nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
+ 	nfs_local_pgio_done(hdr, status);
+ }
+ 
++static void nfs_local_write_aio_complete_work(struct work_struct *work)
++{
++	struct nfs_local_kiocb *iocb =
++		container_of(work, struct nfs_local_kiocb, work);
++
++	nfs_local_vfs_getattr(iocb);
++	nfs_local_pgio_release(iocb);
++}
++
++static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret)
++{
++	struct nfs_local_kiocb *iocb =
++		container_of(kiocb, struct nfs_local_kiocb, kiocb);
++
++	nfs_local_write_done(iocb, ret);
++	nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */
++}
++
+ static void nfs_local_call_write(struct work_struct *work)
+ {
+ 	struct nfs_local_kiocb *iocb =
+@@ -554,14 +627,15 @@ static void nfs_local_call_write(struct work_struct *work)
+ 	file_start_write(filp);
+ 	status = filp->f_op->write_iter(&iocb->kiocb, &iter);
+ 	file_end_write(filp);
+-	WARN_ON_ONCE(status == -EIOCBQUEUED);
+-
+-	nfs_local_write_done(iocb, status);
+-	nfs_local_vfs_getattr(iocb);
+-	nfs_local_pgio_release(iocb);
+ 
+ 	revert_creds(save_cred);
+ 	current->flags = old_flags;
++
++	if (status != -EIOCBQUEUED) {
++		nfs_local_write_done(iocb, status);
++		nfs_local_vfs_getattr(iocb);
++		nfs_local_pgio_release(iocb);
++	}
+ }
+ 
+ static int
+@@ -570,14 +644,20 @@ nfs_do_local_write(struct nfs_pgio_header *hdr,
+ 		   const struct rpc_call_ops *call_ops)
+ {
+ 	struct nfs_local_kiocb *iocb;
++	struct file *file = nfs_to->nfsd_file_file(localio);
++
++	/* Don't support filesystems without write_iter */
++	if (!file->f_op->write_iter)
++		return -EAGAIN;
+ 
+ 	dprintk("%s: vfs_write count=%u pos=%llu %s\n",
+ 		__func__, hdr->args.count, hdr->args.offset,
+ 		(hdr->args.stable == NFS_UNSTABLE) ?  "unstable" : "stable");
+ 
+-	iocb = nfs_local_iocb_alloc(hdr, localio, GFP_NOIO);
++	iocb = nfs_local_iocb_alloc(hdr, file, GFP_NOIO);
+ 	if (iocb == NULL)
+ 		return -ENOMEM;
++	iocb->localio = localio;
+ 
+ 	switch (hdr->args.stable) {
+ 	default:
+@@ -588,10 +668,16 @@ nfs_do_local_write(struct nfs_pgio_header *hdr,
+ 	case NFS_FILE_SYNC:
+ 		iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
+ 	}
++
+ 	nfs_local_pgio_init(hdr, call_ops);
+ 
+ 	nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable);
+ 
++	if (iocb->kiocb.ki_flags & IOCB_DIRECT) {
++		iocb->kiocb.ki_complete = nfs_local_write_aio_complete;
++		iocb->aio_complete_work = nfs_local_write_aio_complete_work;
++	}
++
+ 	INIT_WORK(&iocb->work, nfs_local_call_write);
+ 	queue_work(nfslocaliod_workqueue, &iocb->work);
+ 
+@@ -603,16 +689,9 @@ int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
+ 		   const struct rpc_call_ops *call_ops)
+ {
+ 	int status = 0;
+-	struct file *filp = nfs_to->nfsd_file_file(localio);
+ 
+ 	if (!hdr->args.count)
+ 		return 0;
+-	/* Don't support filesystems without read_iter/write_iter */
+-	if (!filp->f_op->read_iter || !filp->f_op->write_iter) {
+-		nfs_local_disable(clp);
+-		status = -EAGAIN;
+-		goto out;
+-	}
+ 
+ 	switch (hdr->rw_mode) {
+ 	case FMODE_READ:
+@@ -626,8 +705,10 @@ int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
+ 			hdr->rw_mode);
+ 		status = -EINVAL;
+ 	}
+-out:
++
+ 	if (status != 0) {
++		if (status == -EAGAIN)
++			nfs_local_disable(clp);
+ 		nfs_to_nfsd_file_put_local(localio);
+ 		hdr->task.tk_status = status;
+ 		nfs_local_hdr_release(hdr, call_ops);
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 9f0d69e6526443..582cf8a469560b 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -112,6 +112,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 	exception.inode = inode;
+ 	exception.state = lock->open_context->state;
+ 
++	nfs_file_block_o_direct(NFS_I(inode));
+ 	err = nfs_sync_inode(inode);
+ 	if (err)
+ 		goto out;
+@@ -355,6 +356,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
+ 		return status;
+ 	}
+ 
++	nfs_file_block_o_direct(NFS_I(dst_inode));
+ 	status = nfs_sync_inode(dst_inode);
+ 	if (status)
+ 		return status;
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 1cd9652f3c2803..453d08a9c4b4dc 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -283,9 +283,11 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
+ 
+ 	/* flush all pending writes on both src and dst so that server
+ 	 * has the latest data */
++	nfs_file_block_o_direct(NFS_I(src_inode));
+ 	ret = nfs_sync_inode(src_inode);
+ 	if (ret)
+ 		goto out_unlock;
++	nfs_file_block_o_direct(NFS_I(dst_inode));
+ 	ret = nfs_sync_inode(dst_inode);
+ 	if (ret)
+ 		goto out_unlock;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index e6b7cbc06c9c8e..ea92483d5e71ec 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3989,8 +3989,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
+ 				     res.attr_bitmask[2];
+ 		}
+ 		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
+-		server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
+-				  NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
++		server->caps &=
++			~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS |
++			  NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS |
++			  NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME);
+ 		server->fattr_valid = NFS_ATTR_FATTR_V4;
+ 		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
+ 				res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
+@@ -4064,7 +4066,6 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
+ 	};
+ 	int err;
+ 
+-	nfs_server_set_init_caps(server);
+ 	do {
+ 		err = nfs4_handle_exception(server,
+ 				_nfs4_server_capabilities(server, fhandle),
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 2b6b3542405c30..fd86546fafd3f8 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -2058,6 +2058,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
+ 		 * release it */
+ 		nfs_inode_remove_request(req);
+ 		nfs_unlock_and_release_request(req);
++		folio_cancel_dirty(folio);
+ 	}
+ 
+ 	return ret;
+diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
+index f7672472fa8279..5e86c7e2c82125 100644
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -696,6 +696,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
+  * it not only handles the fiemap for inlined files, but also deals
+  * with the fast symlink, cause they have no difference for extent
+  * mapping per se.
++ *
++ * Must be called with ip_alloc_sem semaphore held.
+  */
+ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
+ 			       struct fiemap_extent_info *fieinfo,
+@@ -707,6 +709,7 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
+ 	u64 phys;
+ 	u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
+ 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
++	lockdep_assert_held_read(&oi->ip_alloc_sem);
+ 
+ 	di = (struct ocfs2_dinode *)di_bh->b_data;
+ 	if (ocfs2_inode_is_fast_symlink(inode))
+@@ -722,8 +725,11 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
+ 			phys += offsetof(struct ocfs2_dinode,
+ 					 id2.i_data.id_data);
+ 
++		/* Release the ip_alloc_sem to prevent deadlock on page fault */
++		up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ 		ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
+ 					      flags);
++		down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+@@ -792,9 +798,11 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 		len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
+ 		phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
+ 		virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
+-
++		/* Release the ip_alloc_sem to prevent deadlock on page fault */
++		up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ 		ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
+ 					      len_bytes, fe_flags);
++		down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ 		if (ret)
+ 			break;
+ 
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index a87a9404e0d0c8..eb49beff69bcb8 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -388,7 +388,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
+ 	if (proc_alloc_inum(&dp->low_ino))
+ 		goto out_free_entry;
+ 
+-	pde_set_flags(dp);
++	if (!S_ISDIR(dp->mode))
++		pde_set_flags(dp);
+ 
+ 	write_lock(&proc_subdir_lock);
+ 	dp->parent = dir;
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index 4c1a39dcb624e9..c4e705b794c535 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -18,23 +18,42 @@
+ #define KASAN_ABI_VERSION 5
+ 
+ /*
++ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
++ * dropping __has_feature support for sanitizers:
++ * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
++ * Create these macros for older versions of clang so that it is easy to clean
++ * up once the minimum supported version of LLVM for building the kernel always
++ * creates these macros.
++ *
+  * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+  * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+  * to avoid adding redundant attributes in other configurations.
+  */
++#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
++#define __SANITIZE_ADDRESS__
++#endif
++#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
++#define __SANITIZE_HWADDRESS__
++#endif
++#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
++#define __SANITIZE_THREAD__
++#endif
+ 
+-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
+-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
++/*
++ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
++ */
++#ifdef __SANITIZE_HWADDRESS__
+ #define __SANITIZE_ADDRESS__
++#endif
++
++#ifdef __SANITIZE_ADDRESS__
+ #define __no_sanitize_address \
+ 		__attribute__((no_sanitize("address", "hwaddress")))
+ #else
+ #define __no_sanitize_address
+ #endif
+ 
+-#if __has_feature(thread_sanitizer)
+-/* emulate gcc's __SANITIZE_THREAD__ flag */
+-#define __SANITIZE_THREAD__
++#ifdef __SANITIZE_THREAD__
+ #define __no_sanitize_thread \
+ 		__attribute__((no_sanitize("thread")))
+ #else
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index a6de8d93838d1c..37a01c9d965836 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1197,11 +1197,19 @@ extern int send_sigurg(struct file *file);
+ #define SB_NOUSER       BIT(31)
+ 
+ /* These flags relate to encoding and casefolding */
+-#define SB_ENC_STRICT_MODE_FL	(1 << 0)
++#define SB_ENC_STRICT_MODE_FL		(1 << 0)
++#define SB_ENC_NO_COMPAT_FALLBACK_FL	(1 << 1)
+ 
+ #define sb_has_strict_encoding(sb) \
+ 	(sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
+ 
++#if IS_ENABLED(CONFIG_UNICODE)
++#define sb_no_casefold_compat_fallback(sb) \
++	(sb->s_encoding_flags & SB_ENC_NO_COMPAT_FALLBACK_FL)
++#else
++#define sb_no_casefold_compat_fallback(sb) (1)
++#endif
++
+ /*
+  *	Umount options
+  */
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 12d8e47bc5a388..b48d94f099657c 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1637,6 +1637,7 @@ enum {
+ 	NFS_IOHDR_RESEND_PNFS,
+ 	NFS_IOHDR_RESEND_MDS,
+ 	NFS_IOHDR_UNSTABLE_WRITES,
++	NFS_IOHDR_ODIRECT,
+ };
+ 
+ struct nfs_io_completion;
+diff --git a/include/linux/pgalloc.h b/include/linux/pgalloc.h
+new file mode 100644
+index 00000000000000..9174fa59bbc54d
+--- /dev/null
++++ b/include/linux/pgalloc.h
+@@ -0,0 +1,29 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_PGALLOC_H
++#define _LINUX_PGALLOC_H
++
++#include <linux/pgtable.h>
++#include <asm/pgalloc.h>
++
++/*
++ * {pgd,p4d}_populate_kernel() are defined as macros to allow
++ * compile-time optimization based on the configured page table levels.
++ * Without this, linking may fail because callers (e.g., KASAN) may rely
++ * on calls to these functions being optimized away when passing symbols
++ * that exist only for certain page table levels.
++ */
++#define pgd_populate_kernel(addr, pgd, p4d)				\
++	do {								\
++		pgd_populate(&init_mm, pgd, p4d);			\
++		if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_PGD_MODIFIED)	\
++			arch_sync_kernel_mappings(addr, addr);		\
++	} while (0)
++
++#define p4d_populate_kernel(addr, p4d, pud)				\
++	do {								\
++		p4d_populate(&init_mm, p4d, pud);			\
++		if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_P4D_MODIFIED)	\
++			arch_sync_kernel_mappings(addr, addr);		\
++	} while (0)
++
++#endif /* _LINUX_PGALLOC_H */
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index 1ba6e32909f89d..d2ae79f7c55258 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1699,8 +1699,8 @@ static inline int pmd_protnone(pmd_t pmd)
+ 
+ /*
+  * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
+- * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
+- * needs to be called.
++ * and let generic vmalloc, ioremap and page table update code know when
++ * arch_sync_kernel_mappings() needs to be called.
+  */
+ #ifndef ARCH_PAGE_TABLE_SYNC_MASK
+ #define ARCH_PAGE_TABLE_SYNC_MASK 0
+@@ -1833,10 +1833,11 @@ static inline bool arch_has_pfn_modify_check(void)
+ /*
+  * Page Table Modification bits for pgtbl_mod_mask.
+  *
+- * These are used by the p?d_alloc_track*() set of functions an in the generic
+- * vmalloc/ioremap code to track at which page-table levels entries have been
+- * modified. Based on that the code can better decide when vmalloc and ioremap
+- * mapping changes need to be synchronized to other page-tables in the system.
++ * These are used by the p?d_alloc_track*() and p*d_populate_kernel()
++ * functions in the generic vmalloc, ioremap and page table update code
++ * to track at which page-table levels entries have been modified.
++ * Based on that the code can better decide when page table changes need
++ * to be synchronized to other page-tables in the system.
+  */
+ #define		__PGTBL_PGD_MODIFIED	0
+ #define		__PGTBL_P4D_MODIFIED	1
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 757abcb54d117d..ee550229d4ffad 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -459,19 +459,17 @@ struct nft_set_ext;
+  *	control plane functions.
+  */
+ struct nft_set_ops {
+-	bool				(*lookup)(const struct net *net,
++	const struct nft_set_ext *	(*lookup)(const struct net *net,
+ 						  const struct nft_set *set,
+-						  const u32 *key,
+-						  const struct nft_set_ext **ext);
+-	bool				(*update)(struct nft_set *set,
++						  const u32 *key);
++	const struct nft_set_ext *	(*update)(struct nft_set *set,
+ 						  const u32 *key,
+ 						  struct nft_elem_priv *
+ 							(*new)(struct nft_set *,
+ 							       const struct nft_expr *,
+ 							       struct nft_regs *),
+ 						  const struct nft_expr *expr,
+-						  struct nft_regs *regs,
+-						  const struct nft_set_ext **ext);
++						  struct nft_regs *regs);
+ 	bool				(*delete)(const struct nft_set *set,
+ 						  const u32 *key);
+ 
+@@ -1911,7 +1909,6 @@ struct nftables_pernet {
+ 	struct mutex		commit_mutex;
+ 	u64			table_handle;
+ 	u64			tstamp;
+-	unsigned int		base_seq;
+ 	unsigned int		gc_seq;
+ 	u8			validate_state;
+ 	struct work_struct	destroy_work;
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 03b6165756fc5d..04699eac5b5243 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -94,34 +94,35 @@ extern const struct nft_set_type nft_set_pipapo_type;
+ extern const struct nft_set_type nft_set_pipapo_avx2_type;
+ 
+ #ifdef CONFIG_MITIGATION_RETPOLINE
+-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+-		      const u32 *key, const struct nft_set_ext **ext);
+-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+-		       const u32 *key, const struct nft_set_ext **ext);
+-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+-		       const u32 *key, const struct nft_set_ext **ext);
+-bool nft_hash_lookup_fast(const struct net *net,
+-			  const struct nft_set *set,
+-			  const u32 *key, const struct nft_set_ext **ext);
+-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+-		     const u32 *key, const struct nft_set_ext **ext);
+-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+-		       const u32 *key, const struct nft_set_ext **ext);
+-#else
+-static inline bool
+-nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+-		  const u32 *key, const struct nft_set_ext **ext)
+-{
+-	return set->ops->lookup(net, set, key, ext);
+-}
++const struct nft_set_ext *
++nft_rhash_lookup(const struct net *net, const struct nft_set *set,
++		 const u32 *key);
++const struct nft_set_ext *
++nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++		  const u32 *key);
++const struct nft_set_ext *
++nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
++		  const u32 *key);
++const struct nft_set_ext *
++nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
++		     const u32 *key);
++const struct nft_set_ext *
++nft_hash_lookup(const struct net *net, const struct nft_set *set,
++		const u32 *key);
+ #endif
+ 
++const struct nft_set_ext *
++nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++		  const u32 *key);
++
+ /* called from nft_pipapo_avx2.c */
+-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+-		       const u32 *key, const struct nft_set_ext **ext);
++const struct nft_set_ext *
++nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++		  const u32 *key);
+ /* called from nft_set_pipapo.c */
+-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+-			    const u32 *key, const struct nft_set_ext **ext);
++const struct nft_set_ext *
++nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
++			const u32 *key);
+ 
+ void nft_counter_init_seqcount(void);
+ 
+diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
+index cc8060c017d5fb..99dd166c5d07c3 100644
+--- a/include/net/netns/nftables.h
++++ b/include/net/netns/nftables.h
+@@ -3,6 +3,7 @@
+ #define _NETNS_NFTABLES_H_
+ 
+ struct netns_nftables {
++	unsigned int		base_seq;
+ 	u8			gencursor;
+ };
+ 
+diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
+index b0f41265191c39..63b55ccc4f00cd 100644
+--- a/include/trace/events/dma.h
++++ b/include/trace/events/dma.h
+@@ -114,10 +114,11 @@ DEFINE_EVENT(dma_unmap, dma_unmap_resource,
+ 		 enum dma_data_direction dir, unsigned long attrs),
+ 	TP_ARGS(dev, addr, size, dir, attrs));
+ 
+-TRACE_EVENT(dma_alloc,
++DECLARE_EVENT_CLASS(dma_alloc_class,
+ 	TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+-		 size_t size, gfp_t flags, unsigned long attrs),
+-	TP_ARGS(dev, virt_addr, dma_addr, size, flags, attrs),
++		 size_t size, enum dma_data_direction dir, gfp_t flags,
++		 unsigned long attrs),
++	TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs),
+ 
+ 	TP_STRUCT__entry(
+ 		__string(device, dev_name(dev))
+@@ -125,6 +126,7 @@ TRACE_EVENT(dma_alloc,
+ 		__field(u64, dma_addr)
+ 		__field(size_t, size)
+ 		__field(gfp_t, flags)
++		__field(enum dma_data_direction, dir)
+ 		__field(unsigned long, attrs)
+ 	),
+ 
+@@ -137,8 +139,9 @@ TRACE_EVENT(dma_alloc,
+ 		__entry->attrs = attrs;
+ 	),
+ 
+-	TP_printk("%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s",
++	TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s",
+ 		__get_str(device),
++		decode_dma_data_direction(__entry->dir),
+ 		__entry->dma_addr,
+ 		__entry->size,
+ 		__entry->virt_addr,
+@@ -146,16 +149,69 @@ TRACE_EVENT(dma_alloc,
+ 		decode_dma_attrs(__entry->attrs))
+ );
+ 
+-TRACE_EVENT(dma_free,
++#define DEFINE_ALLOC_EVENT(name) \
++DEFINE_EVENT(dma_alloc_class, name, \
++	TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
++		 size_t size, enum dma_data_direction dir, gfp_t flags, \
++		 unsigned long attrs), \
++	TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs))
++
++DEFINE_ALLOC_EVENT(dma_alloc);
++DEFINE_ALLOC_EVENT(dma_alloc_pages);
++DEFINE_ALLOC_EVENT(dma_alloc_sgt_err);
++
++TRACE_EVENT(dma_alloc_sgt,
++	TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
++		 enum dma_data_direction dir, gfp_t flags, unsigned long attrs),
++	TP_ARGS(dev, sgt, size, dir, flags, attrs),
++
++	TP_STRUCT__entry(
++		__string(device, dev_name(dev))
++		__dynamic_array(u64, phys_addrs, sgt->orig_nents)
++		__field(u64, dma_addr)
++		__field(size_t, size)
++		__field(enum dma_data_direction, dir)
++		__field(gfp_t, flags)
++		__field(unsigned long, attrs)
++	),
++
++	TP_fast_assign(
++		struct scatterlist *sg;
++		int i;
++
++		__assign_str(device);
++		for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
++			((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
++		__entry->dma_addr = sg_dma_address(sgt->sgl);
++		__entry->size = size;
++		__entry->dir = dir;
++		__entry->flags = flags;
++		__entry->attrs = attrs;
++	),
++
++	TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s flags=%s attrs=%s",
++		__get_str(device),
++		decode_dma_data_direction(__entry->dir),
++		__entry->dma_addr,
++		__entry->size,
++		__print_array(__get_dynamic_array(phys_addrs),
++			      __get_dynamic_array_len(phys_addrs) /
++				sizeof(u64), sizeof(u64)),
++		show_gfp_flags(__entry->flags),
++		decode_dma_attrs(__entry->attrs))
++);
++
++DECLARE_EVENT_CLASS(dma_free_class,
+ 	TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+-		 size_t size, unsigned long attrs),
+-	TP_ARGS(dev, virt_addr, dma_addr, size, attrs),
++		 size_t size, enum dma_data_direction dir, unsigned long attrs),
++	TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs),
+ 
+ 	TP_STRUCT__entry(
+ 		__string(device, dev_name(dev))
+ 		__field(void *, virt_addr)
+ 		__field(u64, dma_addr)
+ 		__field(size_t, size)
++		__field(enum dma_data_direction, dir)
+ 		__field(unsigned long, attrs)
+ 	),
+ 
+@@ -164,17 +220,63 @@ TRACE_EVENT(dma_free,
+ 		__entry->virt_addr = virt_addr;
+ 		__entry->dma_addr = dma_addr;
+ 		__entry->size = size;
++		__entry->dir = dir;
+ 		__entry->attrs = attrs;
+ 	),
+ 
+-	TP_printk("%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s",
++	TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s",
+ 		__get_str(device),
++		decode_dma_data_direction(__entry->dir),
+ 		__entry->dma_addr,
+ 		__entry->size,
+ 		__entry->virt_addr,
+ 		decode_dma_attrs(__entry->attrs))
+ );
+ 
++#define DEFINE_FREE_EVENT(name) \
++DEFINE_EVENT(dma_free_class, name, \
++	TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
++		 size_t size, enum dma_data_direction dir, unsigned long attrs), \
++	TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs))
++
++DEFINE_FREE_EVENT(dma_free);
++DEFINE_FREE_EVENT(dma_free_pages);
++
++TRACE_EVENT(dma_free_sgt,
++	TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
++		 enum dma_data_direction dir),
++	TP_ARGS(dev, sgt, size, dir),
++
++	TP_STRUCT__entry(
++		__string(device, dev_name(dev))
++		__dynamic_array(u64, phys_addrs, sgt->orig_nents)
++		__field(u64, dma_addr)
++		__field(size_t, size)
++		__field(enum dma_data_direction, dir)
++	),
++
++	TP_fast_assign(
++		struct scatterlist *sg;
++		int i;
++
++		__assign_str(device);
++		for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
++			((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
++		__entry->dma_addr = sg_dma_address(sgt->sgl);
++		__entry->size = size;
++		__entry->dir = dir;
++	),
++
++	TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s",
++		__get_str(device),
++		decode_dma_data_direction(__entry->dir),
++		__entry->dma_addr,
++		__entry->size,
++		__print_array(__get_dynamic_array(phys_addrs),
++			      __get_dynamic_array_len(phys_addrs) /
++				sizeof(u64), sizeof(u64)))
++);
++
+ TRACE_EVENT(dma_map_sg,
+ 	TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ 		 int ents, enum dma_data_direction dir, unsigned long attrs),
+@@ -221,6 +323,41 @@ TRACE_EVENT(dma_map_sg,
+ 		decode_dma_attrs(__entry->attrs))
+ );
+ 
++TRACE_EVENT(dma_map_sg_err,
++	TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
++		 int err, enum dma_data_direction dir, unsigned long attrs),
++	TP_ARGS(dev, sgl, nents, err, dir, attrs),
++
++	TP_STRUCT__entry(
++		__string(device, dev_name(dev))
++		__dynamic_array(u64, phys_addrs, nents)
++		__field(int, err)
++		__field(enum dma_data_direction, dir)
++		__field(unsigned long, attrs)
++	),
++
++	TP_fast_assign(
++		struct scatterlist *sg;
++		int i;
++
++		__assign_str(device);
++		for_each_sg(sgl, sg, nents, i)
++			((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
++		__entry->err = err;
++		__entry->dir = dir;
++		__entry->attrs = attrs;
++	),
++
++	TP_printk("%s dir=%s dma_addrs=%s err=%d attrs=%s",
++		__get_str(device),
++		decode_dma_data_direction(__entry->dir),
++		__print_array(__get_dynamic_array(phys_addrs),
++			      __get_dynamic_array_len(phys_addrs) /
++				sizeof(u64), sizeof(u64)),
++		__entry->err,
++		decode_dma_attrs(__entry->attrs))
++);
++
+ TRACE_EVENT(dma_unmap_sg,
+ 	TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ 		 enum dma_data_direction dir, unsigned long attrs),
+diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
+index 50589e5dd6a38a..6ac84b2f636ca2 100644
+--- a/include/uapi/linux/mptcp_pm.h
++++ b/include/uapi/linux/mptcp_pm.h
+@@ -12,31 +12,33 @@
+ /**
+  * enum mptcp_event_type
+  * @MPTCP_EVENT_UNSPEC: unused event
+- * @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+- *   sport, dport A new MPTCP connection has been created. It is the good time
+- *   to allocate memory and send ADD_ADDR if needed. Depending on the
++ * @MPTCP_EVENT_CREATED: A new MPTCP connection has been created. It is the
++ *   good time to allocate memory and send ADD_ADDR if needed. Depending on the
+  *   traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
+- *   is sent.
+- * @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+- *   sport, dport A MPTCP connection is established (can start new subflows).
+- * @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped.
+- * @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A
+- *   new address has been announced by the peer.
+- * @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer.
+- * @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 |
+- *   saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new
+- *   subflow has been established. 'error' should not be set.
+- * @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+- *   daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been
+- *   closed. An error (copy of sk_err) could be set if an error has been
+- *   detected for this subflow.
+- * @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+- *   daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a
+- *   subflow has changed. 'error' should not be set.
+- * @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM
+- *   listener is created.
+- * @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener
+- *   is closed.
++ *   is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
++ *   sport, dport, server-side.
++ * @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new
++ *   subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
++ *   sport, dport, server-side.
++ * @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token.
++ * @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer.
++ *   Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
++ * @MPTCP_EVENT_REMOVED: An address has been lost by the peer. Attributes:
++ *   token, rem_id.
++ * @MPTCP_EVENT_SUB_ESTABLISHED: A new subflow has been established. 'error'
++ *   should not be set. Attributes: token, family, loc_id, rem_id, saddr4 |
++ *   saddr6, daddr4 | daddr6, sport, dport, backup, if-idx [, error].
++ * @MPTCP_EVENT_SUB_CLOSED: A subflow has been closed. An error (copy of
++ *   sk_err) could be set if an error has been detected for this subflow.
++ *   Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
++ *   daddr6, sport, dport, backup, if-idx [, error].
++ * @MPTCP_EVENT_SUB_PRIORITY: The priority of a subflow has changed. 'error'
++ *   should not be set. Attributes: token, family, loc_id, rem_id, saddr4 |
++ *   saddr6, daddr4 | daddr6, sport, dport, backup, if-idx [, error].
++ * @MPTCP_EVENT_LISTENER_CREATED: A new PM listener is created. Attributes:
++ *   family, sport, saddr4 | saddr6.
++ * @MPTCP_EVENT_LISTENER_CLOSED: A PM listener is closed. Attributes: family,
++ *   sport, saddr4 | saddr6.
+  */
+ enum mptcp_event_type {
+ 	MPTCP_EVENT_UNSPEC,
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 6f91e3a123e554..9380e0fd5e4af0 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2299,8 +2299,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+ 					 const struct bpf_insn *insn)
+ {
+ 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+-	 * is not working properly, or interpreter is being used when
+-	 * prog->jit_requested is not 0, so warn about it!
++	 * is not working properly, so warn about it!
+ 	 */
+ 	WARN_ON_ONCE(1);
+ 	return 0;
+@@ -2401,8 +2400,9 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
+ 	return ret;
+ }
+ 
+-static void bpf_prog_select_func(struct bpf_prog *fp)
++static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
+ {
++	bool select_interpreter = false;
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+ 	u32 idx = (round_up(stack_depth, 32) / 32) - 1;
+@@ -2411,15 +2411,16 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
+ 	 * But for non-JITed programs, we don't need bpf_func, so no bounds
+ 	 * check needed.
+ 	 */
+-	if (!fp->jit_requested &&
+-	    !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
++	if (idx < ARRAY_SIZE(interpreters)) {
+ 		fp->bpf_func = interpreters[idx];
++		select_interpreter = true;
+ 	} else {
+ 		fp->bpf_func = __bpf_prog_ret0_warn;
+ 	}
+ #else
+ 	fp->bpf_func = __bpf_prog_ret0_warn;
+ #endif
++	return select_interpreter;
+ }
+ 
+ /**
+@@ -2438,7 +2439,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ 	/* In case of BPF to BPF calls, verifier did all the prep
+ 	 * work with regards to JITing, etc.
+ 	 */
+-	bool jit_needed = fp->jit_requested;
++	bool jit_needed = false;
+ 
+ 	if (fp->bpf_func)
+ 		goto finalize;
+@@ -2447,7 +2448,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ 	    bpf_prog_has_kfunc_call(fp))
+ 		jit_needed = true;
+ 
+-	bpf_prog_select_func(fp);
++	if (!bpf_prog_select_interpreter(fp))
++		jit_needed = true;
+ 
+ 	/* eBPF JITs can rewrite the program in case constant
+ 	 * blinding is active. However, in case of error during
+diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
+index 94854cd9c4cc32..83c4d9943084b9 100644
+--- a/kernel/bpf/crypto.c
++++ b/kernel/bpf/crypto.c
+@@ -278,7 +278,7 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
+ 	siv_len = siv ? __bpf_dynptr_size(siv) : 0;
+ 	src_len = __bpf_dynptr_size(src);
+ 	dst_len = __bpf_dynptr_size(dst);
+-	if (!src_len || !dst_len)
++	if (!src_len || !dst_len || src_len > dst_len)
+ 		return -EINVAL;
+ 
+ 	if (siv_len != ctx->siv_len)
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index be4429463599f6..a0bf39b7359aae 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1276,8 +1276,11 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
+ 		goto out;
+ 	}
+ 
+-	/* allocate hrtimer via map_kmalloc to use memcg accounting */
+-	cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
++	/* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until
++	 * kmalloc_nolock() is available, avoid locking issues by using
++	 * __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM).
++	 */
++	cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node);
+ 	if (!cb) {
+ 		ret = -ENOMEM;
+ 		goto out;
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index f6f0387761d05a..39972e834e7a13 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -39,6 +39,7 @@ enum {
+ 	dma_debug_sg,
+ 	dma_debug_coherent,
+ 	dma_debug_resource,
++	dma_debug_noncoherent,
+ };
+ 
+ enum map_err_types {
+@@ -59,8 +60,7 @@ enum map_err_types {
+  * @direction: enum dma_data_direction
+  * @sg_call_ents: 'nents' from dma_map_sg
+  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
+- * @pfn: page frame of the start address
+- * @offset: offset of mapping relative to pfn
++ * @paddr: physical start address of the mapping
+  * @map_err_type: track whether dma_mapping_error() was checked
+  * @stack_len: number of backtrace entries in @stack_entries
+  * @stack_entries: stack of backtrace history
+@@ -74,8 +74,7 @@ struct dma_debug_entry {
+ 	int              direction;
+ 	int		 sg_call_ents;
+ 	int		 sg_mapped_ents;
+-	unsigned long	 pfn;
+-	size_t		 offset;
++	phys_addr_t	 paddr;
+ 	enum map_err_types  map_err_type;
+ #ifdef CONFIG_STACKTRACE
+ 	unsigned int	stack_len;
+@@ -143,6 +142,7 @@ static const char *type2name[] = {
+ 	[dma_debug_sg] = "scatter-gather",
+ 	[dma_debug_coherent] = "coherent",
+ 	[dma_debug_resource] = "resource",
++	[dma_debug_noncoherent] = "noncoherent",
+ };
+ 
+ static const char *dir2name[] = {
+@@ -389,14 +389,6 @@ static void hash_bucket_del(struct dma_debug_entry *entry)
+ 	list_del(&entry->list);
+ }
+ 
+-static unsigned long long phys_addr(struct dma_debug_entry *entry)
+-{
+-	if (entry->type == dma_debug_resource)
+-		return __pfn_to_phys(entry->pfn) + entry->offset;
+-
+-	return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
+-}
+-
+ /*
+  * For each mapping (initial cacheline in the case of
+  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
+@@ -428,8 +420,8 @@ static DEFINE_SPINLOCK(radix_lock);
+ 
+ static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
+ {
+-	return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
+-		(entry->offset >> L1_CACHE_SHIFT);
++	return ((entry->paddr >> PAGE_SHIFT) << CACHELINE_PER_PAGE_SHIFT) +
++		(offset_in_page(entry->paddr) >> L1_CACHE_SHIFT);
+ }
+ 
+ static int active_cacheline_read_overlap(phys_addr_t cln)
+@@ -538,11 +530,11 @@ void debug_dma_dump_mappings(struct device *dev)
+ 			if (!dev || dev == entry->dev) {
+ 				cln = to_cacheline_number(entry);
+ 				dev_info(entry->dev,
+-					 "%s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n",
++					 "%s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
+ 					 type2name[entry->type], idx,
+-					 phys_addr(entry), entry->pfn,
+-					 entry->dev_addr, entry->size,
+-					 &cln, dir2name[entry->direction],
++					 &entry->paddr, entry->dev_addr,
++					 entry->size, &cln,
++					 dir2name[entry->direction],
+ 					 maperr2str[entry->map_err_type]);
+ 			}
+ 		}
+@@ -569,13 +561,13 @@ static int dump_show(struct seq_file *seq, void *v)
+ 		list_for_each_entry(entry, &bucket->list, list) {
+ 			cln = to_cacheline_number(entry);
+ 			seq_printf(seq,
+-				   "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n",
++				   "%s %s %s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
+ 				   dev_driver_string(entry->dev),
+ 				   dev_name(entry->dev),
+ 				   type2name[entry->type], idx,
+-				   phys_addr(entry), entry->pfn,
+-				   entry->dev_addr, entry->size,
+-				   &cln, dir2name[entry->direction],
++				   &entry->paddr, entry->dev_addr,
++				   entry->size, &cln,
++				   dir2name[entry->direction],
+ 				   maperr2str[entry->map_err_type]);
+ 		}
+ 		spin_unlock_irqrestore(&bucket->lock, flags);
+@@ -1003,16 +995,17 @@ static void check_unmap(struct dma_debug_entry *ref)
+ 			   "[mapped as %s] [unmapped as %s]\n",
+ 			   ref->dev_addr, ref->size,
+ 			   type2name[entry->type], type2name[ref->type]);
+-	} else if ((entry->type == dma_debug_coherent) &&
+-		   (phys_addr(ref) != phys_addr(entry))) {
++	} else if ((entry->type == dma_debug_coherent ||
++		    entry->type == dma_debug_noncoherent) &&
++		   ref->paddr != entry->paddr) {
+ 		err_printk(ref->dev, entry, "device driver frees "
+ 			   "DMA memory with different CPU address "
+ 			   "[device address=0x%016llx] [size=%llu bytes] "
+-			   "[cpu alloc address=0x%016llx] "
+-			   "[cpu free address=0x%016llx]",
++			   "[cpu alloc address=0x%pa] "
++			   "[cpu free address=0x%pa]",
+ 			   ref->dev_addr, ref->size,
+-			   phys_addr(entry),
+-			   phys_addr(ref));
++			   &entry->paddr,
++			   &ref->paddr);
+ 	}
+ 
+ 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
+@@ -1231,8 +1224,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
+ 
+ 	entry->dev       = dev;
+ 	entry->type      = dma_debug_single;
+-	entry->pfn	 = page_to_pfn(page);
+-	entry->offset	 = offset;
++	entry->paddr	 = page_to_phys(page) + offset;
+ 	entry->dev_addr  = dma_addr;
+ 	entry->size      = size;
+ 	entry->direction = direction;
+@@ -1327,8 +1319,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ 
+ 		entry->type           = dma_debug_sg;
+ 		entry->dev            = dev;
+-		entry->pfn	      = page_to_pfn(sg_page(s));
+-		entry->offset	      = s->offset;
++		entry->paddr	      = sg_phys(s);
+ 		entry->size           = sg_dma_len(s);
+ 		entry->dev_addr       = sg_dma_address(s);
+ 		entry->direction      = direction;
+@@ -1374,8 +1365,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ 		struct dma_debug_entry ref = {
+ 			.type           = dma_debug_sg,
+ 			.dev            = dev,
+-			.pfn		= page_to_pfn(sg_page(s)),
+-			.offset		= s->offset,
++			.paddr		= sg_phys(s),
+ 			.dev_addr       = sg_dma_address(s),
+ 			.size           = sg_dma_len(s),
+ 			.direction      = dir,
+@@ -1392,6 +1382,18 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ 	}
+ }
+ 
++static phys_addr_t virt_to_paddr(void *virt)
++{
++	struct page *page;
++
++	if (is_vmalloc_addr(virt))
++		page = vmalloc_to_page(virt);
++	else
++		page = virt_to_page(virt);
++
++	return page_to_phys(page) + offset_in_page(virt);
++}
++
+ void debug_dma_alloc_coherent(struct device *dev, size_t size,
+ 			      dma_addr_t dma_addr, void *virt,
+ 			      unsigned long attrs)
+@@ -1414,16 +1416,11 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
+ 
+ 	entry->type      = dma_debug_coherent;
+ 	entry->dev       = dev;
+-	entry->offset	 = offset_in_page(virt);
++	entry->paddr	 = virt_to_paddr(virt);
+ 	entry->size      = size;
+ 	entry->dev_addr  = dma_addr;
+ 	entry->direction = DMA_BIDIRECTIONAL;
+ 
+-	if (is_vmalloc_addr(virt))
+-		entry->pfn = vmalloc_to_pfn(virt);
+-	else
+-		entry->pfn = page_to_pfn(virt_to_page(virt));
+-
+ 	add_dma_entry(entry, attrs);
+ }
+ 
+@@ -1433,7 +1430,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
+ 	struct dma_debug_entry ref = {
+ 		.type           = dma_debug_coherent,
+ 		.dev            = dev,
+-		.offset		= offset_in_page(virt),
+ 		.dev_addr       = dma_addr,
+ 		.size           = size,
+ 		.direction      = DMA_BIDIRECTIONAL,
+@@ -1443,10 +1439,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
+ 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
+ 		return;
+ 
+-	if (is_vmalloc_addr(virt))
+-		ref.pfn = vmalloc_to_pfn(virt);
+-	else
+-		ref.pfn = page_to_pfn(virt_to_page(virt));
++	ref.paddr = virt_to_paddr(virt);
+ 
+ 	if (unlikely(dma_debug_disabled()))
+ 		return;
+@@ -1469,8 +1462,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
+ 
+ 	entry->type		= dma_debug_resource;
+ 	entry->dev		= dev;
+-	entry->pfn		= PHYS_PFN(addr);
+-	entry->offset		= offset_in_page(addr);
++	entry->paddr		= addr;
+ 	entry->size		= size;
+ 	entry->dev_addr		= dma_addr;
+ 	entry->direction	= direction;
+@@ -1547,8 +1539,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ 		struct dma_debug_entry ref = {
+ 			.type           = dma_debug_sg,
+ 			.dev            = dev,
+-			.pfn		= page_to_pfn(sg_page(s)),
+-			.offset		= s->offset,
++			.paddr		= sg_phys(s),
+ 			.dev_addr       = sg_dma_address(s),
+ 			.size           = sg_dma_len(s),
+ 			.direction      = direction,
+@@ -1579,8 +1570,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ 		struct dma_debug_entry ref = {
+ 			.type           = dma_debug_sg,
+ 			.dev            = dev,
+-			.pfn		= page_to_pfn(sg_page(s)),
+-			.offset		= s->offset,
++			.paddr		= sg_phys(sg),
+ 			.dev_addr       = sg_dma_address(s),
+ 			.size           = sg_dma_len(s),
+ 			.direction      = direction,
+@@ -1596,6 +1586,49 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ 	}
+ }
+ 
++void debug_dma_alloc_pages(struct device *dev, struct page *page,
++			   size_t size, int direction,
++			   dma_addr_t dma_addr,
++			   unsigned long attrs)
++{
++	struct dma_debug_entry *entry;
++
++	if (unlikely(dma_debug_disabled()))
++		return;
++
++	entry = dma_entry_alloc();
++	if (!entry)
++		return;
++
++	entry->type      = dma_debug_noncoherent;
++	entry->dev       = dev;
++	entry->paddr	 = page_to_phys(page);
++	entry->size      = size;
++	entry->dev_addr  = dma_addr;
++	entry->direction = direction;
++
++	add_dma_entry(entry, attrs);
++}
++
++void debug_dma_free_pages(struct device *dev, struct page *page,
++			  size_t size, int direction,
++			  dma_addr_t dma_addr)
++{
++	struct dma_debug_entry ref = {
++		.type           = dma_debug_noncoherent,
++		.dev            = dev,
++		.paddr		= page_to_phys(page),
++		.dev_addr       = dma_addr,
++		.size           = size,
++		.direction      = direction,
++	};
++
++	if (unlikely(dma_debug_disabled()))
++		return;
++
++	check_unmap(&ref);
++}
++
+ static int __init dma_debug_driver_setup(char *str)
+ {
+ 	int i;
+diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h
+index f525197d3cae60..48757ca13f3140 100644
+--- a/kernel/dma/debug.h
++++ b/kernel/dma/debug.h
+@@ -54,6 +54,13 @@ extern void debug_dma_sync_sg_for_cpu(struct device *dev,
+ extern void debug_dma_sync_sg_for_device(struct device *dev,
+ 					 struct scatterlist *sg,
+ 					 int nelems, int direction);
++extern void debug_dma_alloc_pages(struct device *dev, struct page *page,
++				  size_t size, int direction,
++				  dma_addr_t dma_addr,
++				  unsigned long attrs);
++extern void debug_dma_free_pages(struct device *dev, struct page *page,
++				 size_t size, int direction,
++				 dma_addr_t dma_addr);
+ #else /* CONFIG_DMA_API_DEBUG */
+ static inline void debug_dma_map_page(struct device *dev, struct page *page,
+ 				      size_t offset, size_t size,
+@@ -126,5 +133,18 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
+ 						int nelems, int direction)
+ {
+ }
++
++static inline void debug_dma_alloc_pages(struct device *dev, struct page *page,
++					 size_t size, int direction,
++					 dma_addr_t dma_addr,
++					 unsigned long attrs)
++{
++}
++
++static inline void debug_dma_free_pages(struct device *dev, struct page *page,
++					size_t size, int direction,
++					dma_addr_t dma_addr)
++{
++}
+ #endif /* CONFIG_DMA_API_DEBUG */
+ #endif /* _KERNEL_DMA_DEBUG_H */
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 74d453ec750a11..c7cc4e33ec6e0e 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -223,6 +223,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ 		debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
+ 	} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
+ 				ents != -EIO && ents != -EREMOTEIO)) {
++		trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs);
+ 		return -EIO;
+ 	}
+ 
+@@ -604,22 +605,29 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ 	if (WARN_ON_ONCE(flag & __GFP_COMP))
+ 		return NULL;
+ 
+-	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
++	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) {
++		trace_dma_alloc(dev, cpu_addr, *dma_handle, size,
++				DMA_BIDIRECTIONAL, flag, attrs);
+ 		return cpu_addr;
++	}
+ 
+ 	/* let the implementation decide on the zone to allocate from: */
+ 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+ 
+-	if (dma_alloc_direct(dev, ops))
++	if (dma_alloc_direct(dev, ops)) {
+ 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
+-	else if (use_dma_iommu(dev))
++	} else if (use_dma_iommu(dev)) {
+ 		cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
+-	else if (ops->alloc)
++	} else if (ops->alloc) {
+ 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+-	else
++	} else {
++		trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag,
++				attrs);
+ 		return NULL;
++	}
+ 
+-	trace_dma_alloc(dev, cpu_addr, *dma_handle, size, flag, attrs);
++	trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL,
++			flag, attrs);
+ 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
+ 	return cpu_addr;
+ }
+@@ -641,10 +649,11 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ 	 */
+ 	WARN_ON(irqs_disabled());
+ 
++	trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
++		       attrs);
+ 	if (!cpu_addr)
+ 		return;
+ 
+-	trace_dma_free(dev, cpu_addr, dma_handle, size, attrs);
+ 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ 	if (dma_alloc_direct(dev, ops))
+ 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
+@@ -683,9 +692,11 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
+ 	struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
+ 
+ 	if (page) {
+-		trace_dma_map_page(dev, page_to_phys(page), *dma_handle, size,
+-				   dir, 0);
+-		debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
++		trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
++				      size, dir, gfp, 0);
++		debug_dma_alloc_pages(dev, page, size, dir, *dma_handle, 0);
++	} else {
++		trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
+ 	}
+ 	return page;
+ }
+@@ -708,8 +719,8 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
+ void dma_free_pages(struct device *dev, size_t size, struct page *page,
+ 		dma_addr_t dma_handle, enum dma_data_direction dir)
+ {
+-	trace_dma_unmap_page(dev, dma_handle, size, dir, 0);
+-	debug_dma_unmap_page(dev, dma_handle, size, dir);
++	trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
++	debug_dma_free_pages(dev, page, size, dir, dma_handle);
+ 	__dma_free_pages(dev, size, page, dma_handle, dir);
+ }
+ EXPORT_SYMBOL_GPL(dma_free_pages);
+@@ -768,8 +779,10 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
+ 
+ 	if (sgt) {
+ 		sgt->nents = 1;
+-		trace_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
++		trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
+ 		debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
++	} else {
++		trace_dma_alloc_sgt_err(dev, NULL, 0, size, dir, gfp, attrs);
+ 	}
+ 	return sgt;
+ }
+@@ -787,7 +800,7 @@ static void free_single_sgt(struct device *dev, size_t size,
+ void dma_free_noncontiguous(struct device *dev, size_t size,
+ 		struct sg_table *sgt, enum dma_data_direction dir)
+ {
+-	trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
++	trace_dma_free_sgt(dev, sgt, size, dir);
+ 	debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ 
+ 	if (use_dma_iommu(dev))
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 06fbc226341fd0..cd9cb7ccb540c7 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -798,10 +798,10 @@ static void retrigger_next_event(void *arg)
+ 	 * of the next expiring timer is enough. The return from the SMP
+ 	 * function call will take care of the reprogramming in case the
+ 	 * CPU was in a NOHZ idle sleep.
++	 *
++	 * In periodic low resolution mode, the next softirq expiration
++	 * must also be updated.
+ 	 */
+-	if (!hrtimer_hres_active(base) && !tick_nohz_active)
+-		return;
+-
+ 	raw_spin_lock(&base->lock);
+ 	hrtimer_update_base(base);
+ 	if (hrtimer_hres_active(base))
+@@ -2286,11 +2286,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+ 				     &new_base->clock_base[i]);
+ 	}
+ 
+-	/*
+-	 * The migration might have changed the first expiring softirq
+-	 * timer on this CPU. Update it.
+-	 */
+-	__hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
+ 	/* Tell the other CPU to retrigger the next event */
+ 	smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+ 
+diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
+index 2eed8bc672f913..988a4c4ba97bc8 100644
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -1316,7 +1316,8 @@ int register_ftrace_graph(struct fgraph_ops *gops)
+ 		ftrace_graph_active--;
+ 		gops->saved_func = NULL;
+ 		fgraph_lru_release_index(i);
+-		unregister_pm_notifier(&ftrace_suspend_notifier);
++		if (!ftrace_graph_active)
++			unregister_pm_notifier(&ftrace_suspend_notifier);
+ 	}
+ 	return ret;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ba3358eef34baa..91e6bf1b101a7f 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -787,7 +787,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
+ 		/* copy the current bits to the new max */
+ 		ret = trace_pid_list_first(filtered_pids, &pid);
+ 		while (!ret) {
+-			trace_pid_list_set(pid_list, pid);
++			ret = trace_pid_list_set(pid_list, pid);
++			if (ret < 0)
++				goto out;
++
+ 			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
+ 			nr_pids++;
+ 		}
+@@ -824,6 +827,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
+ 		trace_parser_clear(&parser);
+ 		ret = 0;
+ 	}
++ out:
+ 	trace_parser_put(&parser);
+ 
+ 	if (ret < 0) {
+@@ -6949,7 +6953,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+ 	entry = ring_buffer_event_data(event);
+ 	entry->ip = _THIS_IP_;
+ 
+-	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
++	len = copy_from_user_nofault(&entry->buf, ubuf, cnt);
+ 	if (len) {
+ 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
+ 		cnt = FAULTED_SIZE;
+@@ -7020,7 +7024,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+ 
+ 	entry = ring_buffer_event_data(event);
+ 
+-	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
++	len = copy_from_user_nofault(&entry->id, ubuf, cnt);
+ 	if (len) {
+ 		entry->id = -1;
+ 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 33fa51d608dc51..59c36bb9ce6b0b 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -244,7 +244,7 @@ config SLUB
+ 
+ config SLUB_TINY
+ 	bool "Configure for minimal memory footprint"
+-	depends on EXPERT
++	depends on EXPERT && !COMPILE_TEST
+ 	select SLAB_MERGE_DEFAULT
+ 	help
+ 	   Configures the slab allocator in a way to achieve minimal memory
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 9689f542523832..ed2b750231810d 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1596,6 +1596,10 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
+ 	if (!quota->ms && !quota->sz && list_empty(&quota->goals))
+ 		return;
+ 
++	/* First charge window */
++	if (!quota->total_charged_sz && !quota->charged_from)
++		quota->charged_from = jiffies;
++
+ 	/* New charge window starts */
+ 	if (time_after_eq(jiffies, quota->charged_from +
+ 				msecs_to_jiffies(quota->reset_interval))) {
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 4af8fd4a390b66..c2b4f0b0714727 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -198,6 +198,11 @@ static int damon_lru_sort_apply_parameters(void)
+ 	if (err)
+ 		return err;
+ 
++	if (!damon_lru_sort_mon_attrs.sample_interval) {
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
+ 	if (err)
+ 		goto out;
+diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
+index 9e0077a9404e2c..65842e6854fd1d 100644
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -194,6 +194,11 @@ static int damon_reclaim_apply_parameters(void)
+ 	if (err)
+ 		return err;
+ 
++	if (!damon_reclaim_mon_attrs.aggr_interval) {
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
+ 	if (err)
+ 		goto out;
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index 58145d59881dd8..9ce2abc64de400 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1067,14 +1067,18 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+ {
+ 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
+ 			struct damon_sysfs_kdamond, kobj);
+-	struct damon_ctx *ctx = kdamond->damon_ctx;
+-	bool running;
++	struct damon_ctx *ctx;
++	bool running = false;
+ 
+-	if (!ctx)
+-		running = false;
+-	else
++	if (!mutex_trylock(&damon_sysfs_lock))
++		return -EBUSY;
++
++	ctx = kdamond->damon_ctx;
++	if (ctx)
+ 		running = damon_sysfs_ctx_running(ctx);
+ 
++	mutex_unlock(&damon_sysfs_lock);
++
+ 	return sysfs_emit(buf, "%s\n", running ?
+ 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
+ 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 9c6a4e855481af..f116af53a93922 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5512,7 +5512,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 	struct page *page;
+ 	struct hstate *h = hstate_vma(vma);
+ 	unsigned long sz = huge_page_size(h);
+-	bool adjust_reservation = false;
++	bool adjust_reservation;
+ 	unsigned long last_addr_mask;
+ 	bool force_flush = false;
+ 
+@@ -5604,6 +5604,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 					sz);
+ 		hugetlb_count_sub(pages_per_huge_page(h), mm);
+ 		hugetlb_remove_rmap(page_folio(page));
++		spin_unlock(ptl);
+ 
+ 		/*
+ 		 * Restore the reservation for anonymous page, otherwise the
+@@ -5611,14 +5612,16 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 		 * If there we are freeing a surplus, do not set the restore
+ 		 * reservation bit.
+ 		 */
++		adjust_reservation = false;
++
++		spin_lock_irq(&hugetlb_lock);
+ 		if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
+ 		    folio_test_anon(page_folio(page))) {
+ 			folio_set_hugetlb_restore_reserve(page_folio(page));
+ 			/* Reservation to be adjusted after the spin lock */
+ 			adjust_reservation = true;
+ 		}
+-
+-		spin_unlock(ptl);
++		spin_unlock_irq(&hugetlb_lock);
+ 
+ 		/*
+ 		 * Adjust the reservation for the region that will have the
+diff --git a/mm/kasan/init.c b/mm/kasan/init.c
+index ac607c306292f4..d1810e624cfc7c 100644
+--- a/mm/kasan/init.c
++++ b/mm/kasan/init.c
+@@ -13,9 +13,9 @@
+ #include <linux/mm.h>
+ #include <linux/pfn.h>
+ #include <linux/slab.h>
++#include <linux/pgalloc.h>
+ 
+ #include <asm/page.h>
+-#include <asm/pgalloc.h>
+ 
+ #include "kasan.h"
+ 
+@@ -203,7 +203,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
+ 			pud_t *pud;
+ 			pmd_t *pmd;
+ 
+-			p4d_populate(&init_mm, p4d,
++			p4d_populate_kernel(addr, p4d,
+ 					lm_alias(kasan_early_shadow_pud));
+ 			pud = pud_offset(p4d, addr);
+ 			pud_populate(&init_mm, pud,
+@@ -224,7 +224,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
+ 			} else {
+ 				p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ 				pud_init(p);
+-				p4d_populate(&init_mm, p4d, p);
++				p4d_populate_kernel(addr, p4d, p);
+ 			}
+ 		}
+ 		zero_pud_populate(p4d, addr, next);
+@@ -263,10 +263,10 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
+ 			 * puds,pmds, so pgd_populate(), pud_populate()
+ 			 * is noops.
+ 			 */
+-			pgd_populate(&init_mm, pgd,
++			pgd_populate_kernel(addr, pgd,
+ 					lm_alias(kasan_early_shadow_p4d));
+ 			p4d = p4d_offset(pgd, addr);
+-			p4d_populate(&init_mm, p4d,
++			p4d_populate_kernel(addr, p4d,
+ 					lm_alias(kasan_early_shadow_pud));
+ 			pud = pud_offset(p4d, addr);
+ 			pud_populate(&init_mm, pud,
+@@ -285,7 +285,7 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
+ 				if (!p)
+ 					return -ENOMEM;
+ 			} else {
+-				pgd_populate(&init_mm, pgd,
++				pgd_populate_kernel(addr, pgd,
+ 					early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ 			}
+ 		}
+diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
+index d4ac26ad1d3e53..ed41987a34cc63 100644
+--- a/mm/kasan/kasan_test_c.c
++++ b/mm/kasan/kasan_test_c.c
+@@ -1500,6 +1500,7 @@ static void kasan_memchr(struct kunit *test)
+ 
+ 	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
++	OPTIMIZER_HIDE_VAR(ptr);
+ 
+ 	OPTIMIZER_HIDE_VAR(ptr);
+ 	OPTIMIZER_HIDE_VAR(size);
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index abd5764e48642d..a97b20617869ae 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1403,8 +1403,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+ 		 */
+ 		if (cc->is_khugepaged &&
+ 		    (pte_young(pteval) || folio_test_young(folio) ||
+-		     folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
+-								     address)))
++		     folio_test_referenced(folio) ||
++		     mmu_notifier_test_young(vma->vm_mm, _address)))
+ 			referenced++;
+ 	}
+ 	if (!writable) {
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 8c8d78d6d3062e..a51122a220fc8c 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -948,7 +948,7 @@ static const char * const action_page_types[] = {
+ 	[MF_MSG_BUDDY]			= "free buddy page",
+ 	[MF_MSG_DAX]			= "dax page",
+ 	[MF_MSG_UNSPLIT_THP]		= "unsplit thp",
+-	[MF_MSG_ALREADY_POISONED]	= "already poisoned",
++	[MF_MSG_ALREADY_POISONED]	= "already poisoned page",
+ 	[MF_MSG_UNKNOWN]		= "unknown page",
+ };
+ 
+@@ -1341,9 +1341,10 @@ static int action_result(unsigned long pfn, enum mf_action_page_type type,
+ {
+ 	trace_memory_failure_event(pfn, type, result);
+ 
+-	num_poisoned_pages_inc(pfn);
+-
+-	update_per_node_mf_stats(pfn, result);
++	if (type != MF_MSG_ALREADY_POISONED) {
++		num_poisoned_pages_inc(pfn);
++		update_per_node_mf_stats(pfn, result);
++	}
+ 
+ 	pr_err("%#lx: recovery action for %s: %s\n",
+ 		pfn, action_page_types[type], action_name[result]);
+@@ -2086,12 +2087,11 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
+ 		*hugetlb = 0;
+ 		return 0;
+ 	} else if (res == -EHWPOISON) {
+-		pr_err("%#lx: already hardware poisoned\n", pfn);
+ 		if (flags & MF_ACTION_REQUIRED) {
+ 			folio = page_folio(p);
+ 			res = kill_accessing_process(current, folio_pfn(folio), flags);
+-			action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
+ 		}
++		action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
+ 		return res;
+ 	} else if (res == -EBUSY) {
+ 		if (!(flags & MF_NO_RETRY)) {
+@@ -2273,7 +2273,6 @@ int memory_failure(unsigned long pfn, int flags)
+ 		goto unlock_mutex;
+ 
+ 	if (TestSetPageHWPoison(p)) {
+-		pr_err("%#lx: already hardware poisoned\n", pfn);
+ 		res = -EHWPOISON;
+ 		if (flags & MF_ACTION_REQUIRED)
+ 			res = kill_accessing_process(current, pfn, flags);
+@@ -2570,10 +2569,9 @@ int unpoison_memory(unsigned long pfn)
+ 	static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
+ 					DEFAULT_RATELIMIT_BURST);
+ 
+-	if (!pfn_valid(pfn))
+-		return -ENXIO;
+-
+-	p = pfn_to_page(pfn);
++	p = pfn_to_online_page(pfn);
++	if (!p)
++		return -EIO;
+ 	folio = page_folio(p);
+ 
+ 	mutex_lock(&mf_mutex);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index da21680ff294cb..fb0307723da695 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -3129,7 +3129,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
+ #endif /* BUILD_EMBED_FIRST_CHUNK */
+ 
+ #ifdef BUILD_PAGE_FIRST_CHUNK
+-#include <asm/pgalloc.h>
++#include <linux/pgalloc.h>
+ 
+ #ifndef P4D_TABLE_SIZE
+ #define P4D_TABLE_SIZE PAGE_SIZE
+@@ -3157,7 +3157,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
+ 		p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
+ 		if (!p4d)
+ 			goto err_alloc;
+-		pgd_populate(&init_mm, pgd, p4d);
++		pgd_populate_kernel(addr, pgd, p4d);
+ 	}
+ 
+ 	p4d = p4d_offset(pgd, addr);
+@@ -3165,7 +3165,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
+ 		pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
+ 		if (!pud)
+ 			goto err_alloc;
+-		p4d_populate(&init_mm, p4d, pud);
++		p4d_populate_kernel(addr, p4d, pud);
+ 	}
+ 
+ 	pud = pud_offset(p4d, addr);
+diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
+index 2628fc02be08b9..c3353cd442a5d3 100644
+--- a/mm/sparse-vmemmap.c
++++ b/mm/sparse-vmemmap.c
+@@ -27,9 +27,9 @@
+ #include <linux/spinlock.h>
+ #include <linux/vmalloc.h>
+ #include <linux/sched.h>
++#include <linux/pgalloc.h>
+ 
+ #include <asm/dma.h>
+-#include <asm/pgalloc.h>
+ 
+ /*
+  * Allocate a block of memory to be used to back the virtual memory map
+@@ -230,7 +230,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
+ 		if (!p)
+ 			return NULL;
+ 		pud_init(p);
+-		p4d_populate(&init_mm, p4d, p);
++		p4d_populate_kernel(addr, p4d, p);
+ 	}
+ 	return p4d;
+ }
+@@ -242,7 +242,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
+ 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
+ 		if (!p)
+ 			return NULL;
+-		pgd_populate(&init_mm, pgd, p);
++		pgd_populate_kernel(addr, pgd, p);
+ 	}
+ 	return pgd;
+ }
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index 2cab878e0a39c9..ed08717541fe74 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -312,6 +312,13 @@ int br_boolopt_multi_toggle(struct net_bridge *br,
+ 	int err = 0;
+ 	int opt_id;
+ 
++	opt_id = find_next_bit(&bitmap, BITS_PER_LONG, BR_BOOLOPT_MAX);
++	if (opt_id != BITS_PER_LONG) {
++		NL_SET_ERR_MSG_FMT_MOD(extack, "Unknown boolean option %d",
++				       opt_id);
++		return -EINVAL;
++	}
++
+ 	for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
+ 		bool on = !!(bm->optval & BIT(opt_id));
+ 
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 48668790160211..e0b966c2517cf1 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
+ 	if (!ecu)
+ 		ecu = j1939_ecu_create_locked(priv, name);
+ 	err = PTR_ERR_OR_ZERO(ecu);
+-	if (err)
++	if (err) {
++		if (j1939_address_is_unicast(sa))
++			priv->ents[sa].nusers--;
+ 		goto done;
++	}
+ 
+ 	ecu->nusers++;
+ 	/* TODO: do we care if ecu->addr != sa? */
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 17226b2341d03d..75c2b9b2339010 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+ 	if (ret) {
+ 		j1939_netdev_stop(priv);
++		jsk->priv = NULL;
++		synchronize_rcu();
++		j1939_priv_put(priv);
+ 		goto out_release_sock;
+ 	}
+ 
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index d1b5705dc0c648..9f6d860411cbd1 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -1524,7 +1524,7 @@ static void con_fault_finish(struct ceph_connection *con)
+ 	 * in case we faulted due to authentication, invalidate our
+ 	 * current tickets so that we can get new ones.
+ 	 */
+-	if (con->v1.auth_retry) {
++	if (!ceph_msgr2(from_msgr(con->msgr)) && con->v1.auth_retry) {
+ 		dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
+ 		if (con->ops->invalidate_authorizer)
+ 			con->ops->invalidate_authorizer(con);
+@@ -1714,9 +1714,10 @@ static void clear_standby(struct ceph_connection *con)
+ {
+ 	/* come back from STANDBY? */
+ 	if (con->state == CEPH_CON_S_STANDBY) {
+-		dout("clear_standby %p and ++connect_seq\n", con);
++		dout("clear_standby %p\n", con);
+ 		con->state = CEPH_CON_S_PREOPEN;
+-		con->v1.connect_seq++;
++		if (!ceph_msgr2(from_msgr(con->msgr)))
++			con->v1.connect_seq++;
+ 		WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
+ 		WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
+ 	}
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 9d0754b3642fde..d2ae9fbed9e30c 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -49,7 +49,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
+ 
+ 	ASSERT_RTNL();
+ 
+-	hsr_for_each_port(master->hsr, port) {
++	hsr_for_each_port_rtnl(master->hsr, port) {
+ 		if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+ 			netif_carrier_on(master->dev);
+ 			return true;
+@@ -105,7 +105,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
+ 	struct hsr_port *port;
+ 
+ 	mtu_max = ETH_DATA_LEN;
+-	hsr_for_each_port(hsr, port)
++	hsr_for_each_port_rtnl(hsr, port)
+ 		if (port->type != HSR_PT_MASTER)
+ 			mtu_max = min(port->dev->mtu, mtu_max);
+ 
+@@ -139,7 +139,7 @@ static int hsr_dev_open(struct net_device *dev)
+ 
+ 	hsr = netdev_priv(dev);
+ 
+-	hsr_for_each_port(hsr, port) {
++	hsr_for_each_port_rtnl(hsr, port) {
+ 		if (port->type == HSR_PT_MASTER)
+ 			continue;
+ 		switch (port->type) {
+@@ -172,7 +172,7 @@ static int hsr_dev_close(struct net_device *dev)
+ 	struct hsr_priv *hsr;
+ 
+ 	hsr = netdev_priv(dev);
+-	hsr_for_each_port(hsr, port) {
++	hsr_for_each_port_rtnl(hsr, port) {
+ 		if (port->type == HSR_PT_MASTER)
+ 			continue;
+ 		switch (port->type) {
+@@ -205,7 +205,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+ 	 * may become enabled.
+ 	 */
+ 	features &= ~NETIF_F_ONE_FOR_ALL;
+-	hsr_for_each_port(hsr, port)
++	hsr_for_each_port_rtnl(hsr, port)
+ 		features = netdev_increment_features(features,
+ 						     port->dev->features,
+ 						     mask);
+@@ -226,6 +226,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	struct hsr_priv *hsr = netdev_priv(dev);
+ 	struct hsr_port *master;
+ 
++	rcu_read_lock();
+ 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ 	if (master) {
+ 		skb->dev = master->dev;
+@@ -238,6 +239,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		dev_core_stats_tx_dropped_inc(dev);
+ 		dev_kfree_skb_any(skb);
+ 	}
++	rcu_read_unlock();
++
+ 	return NETDEV_TX_OK;
+ }
+ 
+@@ -483,7 +486,7 @@ static void hsr_set_rx_mode(struct net_device *dev)
+ 
+ 	hsr = netdev_priv(dev);
+ 
+-	hsr_for_each_port(hsr, port) {
++	hsr_for_each_port_rtnl(hsr, port) {
+ 		if (port->type == HSR_PT_MASTER)
+ 			continue;
+ 		switch (port->type) {
+@@ -505,7 +508,7 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+ 
+ 	hsr = netdev_priv(dev);
+ 
+-	hsr_for_each_port(hsr, port) {
++	hsr_for_each_port_rtnl(hsr, port) {
+ 		if (port->type == HSR_PT_MASTER)
+ 			continue;
+ 		switch (port->type) {
+@@ -522,6 +525,77 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
+ 	}
+ }
+ 
++static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
++				   __be16 proto, u16 vid)
++{
++	bool is_slave_a_added = false;
++	bool is_slave_b_added = false;
++	struct hsr_port *port;
++	struct hsr_priv *hsr;
++	int ret = 0;
++
++	hsr = netdev_priv(dev);
++
++	hsr_for_each_port_rtnl(hsr, port) {
++		if (port->type == HSR_PT_MASTER ||
++		    port->type == HSR_PT_INTERLINK)
++			continue;
++
++		ret = vlan_vid_add(port->dev, proto, vid);
++		switch (port->type) {
++		case HSR_PT_SLAVE_A:
++			if (ret) {
++				/* clean up Slave-B */
++				netdev_err(dev, "add vid failed for Slave-A\n");
++				if (is_slave_b_added)
++					vlan_vid_del(port->dev, proto, vid);
++				return ret;
++			}
++
++			is_slave_a_added = true;
++			break;
++
++		case HSR_PT_SLAVE_B:
++			if (ret) {
++				/* clean up Slave-A */
++				netdev_err(dev, "add vid failed for Slave-B\n");
++				if (is_slave_a_added)
++					vlan_vid_del(port->dev, proto, vid);
++				return ret;
++			}
++
++			is_slave_b_added = true;
++			break;
++		default:
++			break;
++		}
++	}
++
++	return 0;
++}
++
++static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
++				    __be16 proto, u16 vid)
++{
++	struct hsr_port *port;
++	struct hsr_priv *hsr;
++
++	hsr = netdev_priv(dev);
++
++	hsr_for_each_port_rtnl(hsr, port) {
++		switch (port->type) {
++		case HSR_PT_SLAVE_A:
++		case HSR_PT_SLAVE_B:
++			vlan_vid_del(port->dev, proto, vid);
++			break;
++		default:
++			break;
++		}
++	}
++
++	return 0;
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+ 	.ndo_change_mtu = hsr_dev_change_mtu,
+ 	.ndo_open = hsr_dev_open,
+@@ -530,6 +604,8 @@ static const struct net_device_ops hsr_device_ops = {
+ 	.ndo_change_rx_flags = hsr_change_rx_flags,
+ 	.ndo_fix_features = hsr_fix_features,
+ 	.ndo_set_rx_mode = hsr_set_rx_mode,
++	.ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid,
++	.ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid,
+ };
+ 
+ static const struct device_type hsr_type = {
+@@ -578,7 +654,8 @@ void hsr_dev_setup(struct net_device *dev)
+ 
+ 	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+ 			   NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+-			   NETIF_F_HW_VLAN_CTAG_TX;
++			   NETIF_F_HW_VLAN_CTAG_TX |
++			   NETIF_F_HW_VLAN_CTAG_FILTER;
+ 
+ 	dev->features = dev->hw_features;
+ 
+@@ -661,6 +738,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ 	    (slave[1]->features & NETIF_F_HW_HSR_FWD))
+ 		hsr->fwd_offloaded = true;
+ 
++	if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
++	    (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++		hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++
+ 	res = register_netdevice(hsr_dev);
+ 	if (res)
+ 		goto err_unregister;
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index d7ae32473c41a6..9e4ce1ccc0229a 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
+ {
+ 	struct hsr_port *port;
+ 
+-	hsr_for_each_port(hsr, port)
++	hsr_for_each_port_rtnl(hsr, port)
+ 		if (port->type != HSR_PT_MASTER)
+ 			return false;
+ 	return true;
+@@ -125,7 +125,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
+ {
+ 	struct hsr_port *port;
+ 
+-	hsr_for_each_port(hsr, port)
++	hsr_for_each_port_rtnl(hsr, port)
+ 		if (port->type == pt)
+ 			return port;
+ 	return NULL;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index e26244456f6396..f066c9c401c605 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -231,6 +231,9 @@ struct hsr_priv {
+ #define hsr_for_each_port(hsr, port) \
+ 	list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+ 
++#define hsr_for_each_port_rtnl(hsr, port) \
++	list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held())
++
+ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+ 
+ /* Caller must ensure skb is a valid HSR frame */
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index f65d2f7273813b..8392d304a72ebe 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -204,6 +204,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
+ 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ 		return -EINVAL;
+ 
++	if (skb_is_gso(skb))
++		skb_gso_reset(skb);
++
+ 	skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ 	pskb_pull(skb, ETH_HLEN);
+ 	skb_reset_network_header(skb);
+@@ -298,6 +301,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ 		return -EINVAL;
+ 
++	if (skb_is_gso(skb))
++		skb_gso_reset(skb);
++
+ 	skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+ 	pskb_pull(skb, ETH_HLEN);
+ 	skb_reset_network_header(skb);
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 22e8a2af5dd8b0..8372ca512a7552 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -408,8 +408,11 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ 		if (!psock->cork) {
+ 			psock->cork = kzalloc(sizeof(*psock->cork),
+ 					      GFP_ATOMIC | __GFP_NOWARN);
+-			if (!psock->cork)
++			if (!psock->cork) {
++				sk_msg_free(sk, msg);
++				*copied = 0;
+ 				return -ENOMEM;
++			}
+ 		}
+ 		memcpy(psock->cork, msg, sizeof(*msg));
+ 		return 0;
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 3caa0a9d3b3885..25d2b65653cd40 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -1508,13 +1508,12 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK;
+ 	struct sock *sk = (struct sock *)msk;
++	bool keep_open;
+ 
+-	if (ssk->sk_prot->keepalive) {
+-		if (sock_flag(sk, SOCK_KEEPOPEN))
+-			ssk->sk_prot->keepalive(ssk, 1);
+-		else
+-			ssk->sk_prot->keepalive(ssk, 0);
+-	}
++	keep_open = sock_flag(sk, SOCK_KEEPOPEN);
++	if (ssk->sk_prot->keepalive)
++		ssk->sk_prot->keepalive(ssk, keep_open);
++	sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open);
+ 
+ 	ssk->sk_priority = sk->sk_priority;
+ 	ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 3743e4249dc8c7..3028d388b29333 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -995,11 +995,14 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
+ 	return ERR_PTR(-ENOENT);
+ }
+ 
+-static __be16 nft_base_seq(const struct net *net)
++static unsigned int nft_base_seq(const struct net *net)
+ {
+-	struct nftables_pernet *nft_net = nft_pernet(net);
++	return READ_ONCE(net->nft.base_seq);
++}
+ 
+-	return htons(nft_net->base_seq & 0xffff);
++static __be16 nft_base_seq_be16(const struct net *net)
++{
++	return htons(nft_base_seq(net) & 0xffff);
+ }
+ 
+ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
+@@ -1017,9 +1020,9 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+ {
+ 	struct nlmsghdr *nlh;
+ 
+-	event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+-	nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+-			   NFNETLINK_V0, nft_base_seq(net));
++	nlh = nfnl_msg_put(skb, portid, seq,
++			   nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++			   flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+@@ -1029,6 +1032,12 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+ 			 NFTA_TABLE_PAD))
+ 		goto nla_put_failure;
+ 
++	if (event == NFT_MSG_DELTABLE ||
++	    event == NFT_MSG_DESTROYTABLE) {
++		nlmsg_end(skb, nlh);
++		return 0;
++	}
++
+ 	if (nla_put_be32(skb, NFTA_TABLE_FLAGS,
+ 			 htonl(table->flags & NFT_TABLE_F_MASK)))
+ 		goto nla_put_failure;
+@@ -1106,7 +1115,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
+ 
+ 	rcu_read_lock();
+ 	nft_net = nft_pernet(net);
+-	cb->seq = READ_ONCE(nft_net->base_seq);
++	cb->seq = nft_base_seq(net);
+ 
+ 	list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ 		if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -1872,9 +1881,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+ {
+ 	struct nlmsghdr *nlh;
+ 
+-	event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+-	nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+-			   NFNETLINK_V0, nft_base_seq(net));
++	nlh = nfnl_msg_put(skb, portid, seq,
++			   nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++			   flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+@@ -1884,6 +1893,13 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+ 			 NFTA_CHAIN_PAD))
+ 		goto nla_put_failure;
+ 
++	if (!hook_list &&
++	    (event == NFT_MSG_DELCHAIN ||
++	     event == NFT_MSG_DESTROYCHAIN)) {
++		nlmsg_end(skb, nlh);
++		return 0;
++	}
++
+ 	if (nft_is_base_chain(chain)) {
+ 		const struct nft_base_chain *basechain = nft_base_chain(chain);
+ 		struct nft_stats __percpu *stats;
+@@ -1970,7 +1986,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
+ 
+ 	rcu_read_lock();
+ 	nft_net = nft_pernet(net);
+-	cb->seq = READ_ONCE(nft_net->base_seq);
++	cb->seq = nft_base_seq(net);
+ 
+ 	list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ 		if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -3467,7 +3483,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
+ 	u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+ 
+ 	nlh = nfnl_msg_put(skb, portid, seq, type, flags, family, NFNETLINK_V0,
+-			   nft_base_seq(net));
++			   nft_base_seq_be16(net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+@@ -3635,7 +3651,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
+ 
+ 	rcu_read_lock();
+ 	nft_net = nft_pernet(net);
+-	cb->seq = READ_ONCE(nft_net->base_seq);
++	cb->seq = nft_base_seq(net);
+ 
+ 	list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ 		if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -3846,7 +3862,7 @@ static int nf_tables_getrule_reset(struct sk_buff *skb,
+ 	buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
+ 			nla_len(nla[NFTA_RULE_TABLE]),
+ 			(char *)nla_data(nla[NFTA_RULE_TABLE]),
+-			nft_net->base_seq);
++			nft_base_seq(net));
+ 	audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
+ 			AUDIT_NFT_OP_RULE_RESET, GFP_ATOMIC);
+ 	kfree(buf);
+@@ -4654,9 +4670,10 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 	u32 seq = ctx->seq;
+ 	int i;
+ 
+-	event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+-	nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
+-			   NFNETLINK_V0, nft_base_seq(ctx->net));
++	nlh = nfnl_msg_put(skb, portid, seq,
++			   nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++			   flags, ctx->family, NFNETLINK_V0,
++			   nft_base_seq_be16(ctx->net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+@@ -4668,6 +4685,12 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 			 NFTA_SET_PAD))
+ 		goto nla_put_failure;
+ 
++	if (event == NFT_MSG_DELSET ||
++	    event == NFT_MSG_DESTROYSET) {
++		nlmsg_end(skb, nlh);
++		return 0;
++	}
++
+ 	if (set->flags != 0)
+ 		if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
+ 			goto nla_put_failure;
+@@ -4792,7 +4815,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 	rcu_read_lock();
+ 	nft_net = nft_pernet(net);
+-	cb->seq = READ_ONCE(nft_net->base_seq);
++	cb->seq = nft_base_seq(net);
+ 
+ 	list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ 		if (ctx->family != NFPROTO_UNSPEC &&
+@@ -5968,7 +5991,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 	rcu_read_lock();
+ 	nft_net = nft_pernet(net);
+-	cb->seq = READ_ONCE(nft_net->base_seq);
++	cb->seq = nft_base_seq(net);
+ 
+ 	list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ 		if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
+@@ -5997,7 +6020,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+ 	seq    = cb->nlh->nlmsg_seq;
+ 
+ 	nlh = nfnl_msg_put(skb, portid, seq, event, NLM_F_MULTI,
+-			   table->family, NFNETLINK_V0, nft_base_seq(net));
++			   table->family, NFNETLINK_V0, nft_base_seq_be16(net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+@@ -6090,7 +6113,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
+ 
+ 	event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+ 	nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
+-			   NFNETLINK_V0, nft_base_seq(ctx->net));
++			   NFNETLINK_V0, nft_base_seq_be16(ctx->net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+@@ -6389,7 +6412,7 @@ static int nf_tables_getsetelem_reset(struct sk_buff *skb,
+ 		}
+ 		nelems++;
+ 	}
+-	audit_log_nft_set_reset(dump_ctx.ctx.table, nft_net->base_seq, nelems);
++	audit_log_nft_set_reset(dump_ctx.ctx.table, nft_base_seq(info->net), nelems);
+ 
+ out_unlock:
+ 	rcu_read_unlock();
+@@ -7990,20 +8013,26 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
+ {
+ 	struct nlmsghdr *nlh;
+ 
+-	event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+-	nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+-			   NFNETLINK_V0, nft_base_seq(net));
++	nlh = nfnl_msg_put(skb, portid, seq,
++			   nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++			   flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+ 	if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
+ 	    nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
++	    nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
+ 	    nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle),
+ 			 NFTA_OBJ_PAD))
+ 		goto nla_put_failure;
+ 
+-	if (nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
+-	    nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
++	if (event == NFT_MSG_DELOBJ ||
++	    event == NFT_MSG_DESTROYOBJ) {
++		nlmsg_end(skb, nlh);
++		return 0;
++	}
++
++	if (nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
+ 	    nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
+ 		goto nla_put_failure;
+ 
+@@ -8051,7 +8080,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 	rcu_read_lock();
+ 	nft_net = nft_pernet(net);
+-	cb->seq = READ_ONCE(nft_net->base_seq);
++	cb->seq = nft_base_seq(net);
+ 
+ 	list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ 		if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -8085,7 +8114,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 			idx++;
+ 		}
+ 		if (ctx->reset && entries)
+-			audit_log_obj_reset(table, nft_net->base_seq, entries);
++			audit_log_obj_reset(table, nft_base_seq(net), entries);
+ 		if (rc < 0)
+ 			break;
+ 	}
+@@ -8254,7 +8283,7 @@ static int nf_tables_getobj_reset(struct sk_buff *skb,
+ 	buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
+ 			nla_len(nla[NFTA_OBJ_TABLE]),
+ 			(char *)nla_data(nla[NFTA_OBJ_TABLE]),
+-			nft_net->base_seq);
++			nft_base_seq(net));
+ 	audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
+ 			AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+ 	kfree(buf);
+@@ -8359,9 +8388,8 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
+ 		    struct nft_object *obj, u32 portid, u32 seq, int event,
+ 		    u16 flags, int family, int report, gfp_t gfp)
+ {
+-	struct nftables_pernet *nft_net = nft_pernet(net);
+ 	char *buf = kasprintf(gfp, "%s:%u",
+-			      table->name, nft_net->base_seq);
++			      table->name, nft_base_seq(net));
+ 
+ 	audit_log_nfcfg(buf,
+ 			family,
+@@ -9008,9 +9036,9 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+ 	struct nft_hook *hook;
+ 	struct nlmsghdr *nlh;
+ 
+-	event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+-	nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+-			   NFNETLINK_V0, nft_base_seq(net));
++	nlh = nfnl_msg_put(skb, portid, seq,
++			   nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
++			   flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+@@ -9020,6 +9048,13 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+ 			 NFTA_FLOWTABLE_PAD))
+ 		goto nla_put_failure;
+ 
++	if (!hook_list &&
++	    (event == NFT_MSG_DELFLOWTABLE ||
++	     event == NFT_MSG_DESTROYFLOWTABLE)) {
++		nlmsg_end(skb, nlh);
++		return 0;
++	}
++
+ 	if (nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
+ 	    nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
+ 		goto nla_put_failure;
+@@ -9071,7 +9106,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
+ 
+ 	rcu_read_lock();
+ 	nft_net = nft_pernet(net);
+-	cb->seq = READ_ONCE(nft_net->base_seq);
++	cb->seq = nft_base_seq(net);
+ 
+ 	list_for_each_entry_rcu(table, &nft_net->tables, list) {
+ 		if (family != NFPROTO_UNSPEC && family != table->family)
+@@ -9256,17 +9291,16 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
+ 				   u32 portid, u32 seq)
+ {
+-	struct nftables_pernet *nft_net = nft_pernet(net);
+ 	struct nlmsghdr *nlh;
+ 	char buf[TASK_COMM_LEN];
+ 	int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN);
+ 
+ 	nlh = nfnl_msg_put(skb, portid, seq, event, 0, AF_UNSPEC,
+-			   NFNETLINK_V0, nft_base_seq(net));
++			   NFNETLINK_V0, nft_base_seq_be16(net));
+ 	if (!nlh)
+ 		goto nla_put_failure;
+ 
+-	if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_net->base_seq)) ||
++	if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_base_seq(net))) ||
+ 	    nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) ||
+ 	    nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current)))
+ 		goto nla_put_failure;
+@@ -10429,11 +10463,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 	 * Bump generation counter, invalidate any dump in progress.
+ 	 * Cannot fail after this point.
+ 	 */
+-	base_seq = READ_ONCE(nft_net->base_seq);
++	base_seq = nft_base_seq(net);
+ 	while (++base_seq == 0)
+ 		;
+ 
+-	WRITE_ONCE(nft_net->base_seq, base_seq);
++	/* pairs with smp_load_acquire in nft_lookup_eval */
++	smp_store_release(&net->nft.base_seq, base_seq);
+ 
+ 	gc_seq = nft_gc_seq_begin(nft_net);
+ 
+@@ -10665,7 +10700,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 
+ 	nft_commit_notify(net, NETLINK_CB(skb).portid);
+ 	nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+-	nf_tables_commit_audit_log(&adl, nft_net->base_seq);
++	nf_tables_commit_audit_log(&adl, nft_base_seq(net));
+ 
+ 	nft_gc_seq_end(nft_net, gc_seq);
+ 	nft_net->validate_state = NFT_VALIDATE_SKIP;
+@@ -10999,7 +11034,7 @@ static bool nf_tables_valid_genid(struct net *net, u32 genid)
+ 	mutex_lock(&nft_net->commit_mutex);
+ 	nft_net->tstamp = get_jiffies_64();
+ 
+-	genid_ok = genid == 0 || nft_net->base_seq == genid;
++	genid_ok = genid == 0 || nft_base_seq(net) == genid;
+ 	if (!genid_ok)
+ 		mutex_unlock(&nft_net->commit_mutex);
+ 
+@@ -11677,7 +11712,7 @@ static int __net_init nf_tables_init_net(struct net *net)
+ 	INIT_LIST_HEAD(&nft_net->module_list);
+ 	INIT_LIST_HEAD(&nft_net->notify_list);
+ 	mutex_init(&nft_net->commit_mutex);
+-	nft_net->base_seq = 1;
++	net->nft.base_seq = 1;
+ 	nft_net->gc_seq = 0;
+ 	nft_net->validate_state = NFT_VALIDATE_SKIP;
+ 	INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work);
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 88922e0e8e8377..e24493d9e77615 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -91,8 +91,9 @@ void nft_dynset_eval(const struct nft_expr *expr,
+ 		return;
+ 	}
+ 
+-	if (set->ops->update(set, &regs->data[priv->sreg_key], nft_dynset_new,
+-			     expr, regs, &ext)) {
++	ext = set->ops->update(set, &regs->data[priv->sreg_key], nft_dynset_new,
++			     expr, regs);
++	if (ext) {
+ 		if (priv->op == NFT_DYNSET_OP_UPDATE &&
+ 		    nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+ 		    READ_ONCE(nft_set_ext_timeout(ext)->timeout) != 0) {
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 63ef832b8aa710..58c5b14889c474 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -24,36 +24,73 @@ struct nft_lookup {
+ 	struct nft_set_binding		binding;
+ };
+ 
+-#ifdef CONFIG_MITIGATION_RETPOLINE
+-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+-		       const u32 *key, const struct nft_set_ext **ext)
++static const struct nft_set_ext *
++__nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++		    const u32 *key)
+ {
++#ifdef CONFIG_MITIGATION_RETPOLINE
+ 	if (set->ops == &nft_set_hash_fast_type.ops)
+-		return nft_hash_lookup_fast(net, set, key, ext);
++		return nft_hash_lookup_fast(net, set, key);
+ 	if (set->ops == &nft_set_hash_type.ops)
+-		return nft_hash_lookup(net, set, key, ext);
++		return nft_hash_lookup(net, set, key);
+ 
+ 	if (set->ops == &nft_set_rhash_type.ops)
+-		return nft_rhash_lookup(net, set, key, ext);
++		return nft_rhash_lookup(net, set, key);
+ 
+ 	if (set->ops == &nft_set_bitmap_type.ops)
+-		return nft_bitmap_lookup(net, set, key, ext);
++		return nft_bitmap_lookup(net, set, key);
+ 
+ 	if (set->ops == &nft_set_pipapo_type.ops)
+-		return nft_pipapo_lookup(net, set, key, ext);
++		return nft_pipapo_lookup(net, set, key);
+ #if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
+ 	if (set->ops == &nft_set_pipapo_avx2_type.ops)
+-		return nft_pipapo_avx2_lookup(net, set, key, ext);
++		return nft_pipapo_avx2_lookup(net, set, key);
+ #endif
+ 
+ 	if (set->ops == &nft_set_rbtree_type.ops)
+-		return nft_rbtree_lookup(net, set, key, ext);
++		return nft_rbtree_lookup(net, set, key);
+ 
+ 	WARN_ON_ONCE(1);
+-	return set->ops->lookup(net, set, key, ext);
++#endif
++	return set->ops->lookup(net, set, key);
++}
++
++static unsigned int nft_base_seq(const struct net *net)
++{
++	/* pairs with smp_store_release() in nf_tables_commit() */
++	return smp_load_acquire(&net->nft.base_seq);
++}
++
++static bool nft_lookup_should_retry(const struct net *net, unsigned int seq)
++{
++	return unlikely(seq != nft_base_seq(net));
++}
++
++const struct nft_set_ext *
++nft_set_do_lookup(const struct net *net, const struct nft_set *set,
++		  const u32 *key)
++{
++	const struct nft_set_ext *ext;
++	unsigned int base_seq;
++
++	do {
++		base_seq = nft_base_seq(net);
++
++		ext = __nft_set_do_lookup(net, set, key);
++		if (ext)
++			break;
++		/* No match?  There is a small chance that lookup was
++		 * performed in the old generation, but nf_tables_commit()
++		 * already unlinked a (matching) element.
++		 *
++		 * We need to repeat the lookup to make sure that we didn't
++		 * miss a matching element in the new generation.
++		 */
++	} while (nft_lookup_should_retry(net, base_seq));
++
++	return ext;
+ }
+ EXPORT_SYMBOL_GPL(nft_set_do_lookup);
+-#endif
+ 
+ void nft_lookup_eval(const struct nft_expr *expr,
+ 		     struct nft_regs *regs,
+@@ -61,12 +98,12 @@ void nft_lookup_eval(const struct nft_expr *expr,
+ {
+ 	const struct nft_lookup *priv = nft_expr_priv(expr);
+ 	const struct nft_set *set = priv->set;
+-	const struct nft_set_ext *ext = NULL;
+ 	const struct net *net = nft_net(pkt);
++	const struct nft_set_ext *ext;
+ 	bool found;
+ 
+-	found =	nft_set_do_lookup(net, set, &regs->data[priv->sreg], &ext) ^
+-				  priv->invert;
++	ext = nft_set_do_lookup(net, set, &regs->data[priv->sreg]);
++	found = !!ext ^ priv->invert;
+ 	if (!found) {
+ 		ext = nft_set_catchall_lookup(net, set);
+ 		if (!ext) {
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 09da7a3f9f9677..8ee66a86c3bc75 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -111,10 +111,9 @@ void nft_objref_map_eval(const struct nft_expr *expr,
+ 	struct net *net = nft_net(pkt);
+ 	const struct nft_set_ext *ext;
+ 	struct nft_object *obj;
+-	bool found;
+ 
+-	found = nft_set_do_lookup(net, set, &regs->data[priv->sreg], &ext);
+-	if (!found) {
++	ext = nft_set_do_lookup(net, set, &regs->data[priv->sreg]);
++	if (!ext) {
+ 		ext = nft_set_catchall_lookup(net, set);
+ 		if (!ext) {
+ 			regs->verdict.code = NFT_BREAK;
+diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
+index 1caa04619dc6da..b4765fb92d727c 100644
+--- a/net/netfilter/nft_set_bitmap.c
++++ b/net/netfilter/nft_set_bitmap.c
+@@ -75,16 +75,21 @@ nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask)
+ }
+ 
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+-		       const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
++		  const u32 *key)
+ {
+ 	const struct nft_bitmap *priv = nft_set_priv(set);
++	static const struct nft_set_ext found;
+ 	u8 genmask = nft_genmask_cur(net);
+ 	u32 idx, off;
+ 
+ 	nft_bitmap_location(set, key, &idx, &off);
+ 
+-	return nft_bitmap_active(priv->bitmap, idx, off, genmask);
++	if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
++		return &found;
++
++	return NULL;
+ }
+ 
+ static struct nft_bitmap_elem *
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 4b3452dff2ec08..900eddb93dcc8d 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -81,8 +81,9 @@ static const struct rhashtable_params nft_rhash_params = {
+ };
+ 
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+-		      const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_rhash_lookup(const struct net *net, const struct nft_set *set,
++		 const u32 *key)
+ {
+ 	struct nft_rhash *priv = nft_set_priv(set);
+ 	const struct nft_rhash_elem *he;
+@@ -95,9 +96,9 @@ bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ 
+ 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
+ 	if (he != NULL)
+-		*ext = &he->ext;
++		return &he->ext;
+ 
+-	return !!he;
++	return NULL;
+ }
+ 
+ static struct nft_elem_priv *
+@@ -120,14 +121,11 @@ nft_rhash_get(const struct net *net, const struct nft_set *set,
+ 	return ERR_PTR(-ENOENT);
+ }
+ 
+-static bool nft_rhash_update(struct nft_set *set, const u32 *key,
+-			     struct nft_elem_priv *
+-				   (*new)(struct nft_set *,
+-					  const struct nft_expr *,
+-					  struct nft_regs *regs),
+-			     const struct nft_expr *expr,
+-			     struct nft_regs *regs,
+-			     const struct nft_set_ext **ext)
++static const struct nft_set_ext *
++nft_rhash_update(struct nft_set *set, const u32 *key,
++		 struct nft_elem_priv *(*new)(struct nft_set *, const struct nft_expr *,
++		 struct nft_regs *regs),
++		 const struct nft_expr *expr, struct nft_regs *regs)
+ {
+ 	struct nft_rhash *priv = nft_set_priv(set);
+ 	struct nft_rhash_elem *he, *prev;
+@@ -161,14 +159,13 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
+ 	}
+ 
+ out:
+-	*ext = &he->ext;
+-	return true;
++	return &he->ext;
+ 
+ err2:
+ 	nft_set_elem_destroy(set, &he->priv, true);
+ 	atomic_dec(&set->nelems);
+ err1:
+-	return false;
++	return NULL;
+ }
+ 
+ static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
+@@ -507,8 +504,9 @@ struct nft_hash_elem {
+ };
+ 
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+-		     const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_hash_lookup(const struct net *net, const struct nft_set *set,
++		const u32 *key)
+ {
+ 	struct nft_hash *priv = nft_set_priv(set);
+ 	u8 genmask = nft_genmask_cur(net);
+@@ -519,12 +517,10 @@ bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ 	hash = reciprocal_scale(hash, priv->buckets);
+ 	hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
+ 		if (!memcmp(nft_set_ext_key(&he->ext), key, set->klen) &&
+-		    nft_set_elem_active(&he->ext, genmask)) {
+-			*ext = &he->ext;
+-			return true;
+-		}
++		    nft_set_elem_active(&he->ext, genmask))
++			return &he->ext;
+ 	}
+-	return false;
++	return NULL;
+ }
+ 
+ static struct nft_elem_priv *
+@@ -547,9 +543,9 @@ nft_hash_get(const struct net *net, const struct nft_set *set,
+ }
+ 
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_hash_lookup_fast(const struct net *net,
+-			  const struct nft_set *set,
+-			  const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
++		     const u32 *key)
+ {
+ 	struct nft_hash *priv = nft_set_priv(set);
+ 	u8 genmask = nft_genmask_cur(net);
+@@ -562,12 +558,10 @@ bool nft_hash_lookup_fast(const struct net *net,
+ 	hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
+ 		k2 = *(u32 *)nft_set_ext_key(&he->ext)->data;
+ 		if (k1 == k2 &&
+-		    nft_set_elem_active(&he->ext, genmask)) {
+-			*ext = &he->ext;
+-			return true;
+-		}
++		    nft_set_elem_active(&he->ext, genmask))
++			return &he->ext;
+ 	}
+-	return false;
++	return NULL;
+ }
+ 
+ static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv,
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 9e4e25f2458f99..793790d79d1384 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -397,37 +397,38 @@ int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ }
+ 
+ /**
+- * nft_pipapo_lookup() - Lookup function
+- * @net:	Network namespace
+- * @set:	nftables API set representation
+- * @key:	nftables API element representation containing key data
+- * @ext:	nftables API extension pointer, filled with matching reference
++ * pipapo_get() - Get matching element reference given key data
++ * @m:		storage containing the set elements
++ * @data:	Key data to be matched against existing elements
++ * @genmask:	If set, check that element is active in given genmask
++ * @tstamp:	timestamp to check for expired elements
+  *
+  * For more details, see DOC: Theory of Operation.
+  *
+- * Return: true on match, false otherwise.
++ * This is the main lookup function.  It matches key data against either
++ * the working match set or the uncommitted copy, depending on what the
++ * caller passed to us.
++ * nft_pipapo_get (lookup from userspace/control plane) and nft_pipapo_lookup
++ * (datapath lookup) pass the active copy.
++ * The insertion path will pass the uncommitted working copy.
++ *
++ * Return: pointer to &struct nft_pipapo_elem on match, NULL otherwise.
+  */
+-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+-		       const u32 *key, const struct nft_set_ext **ext)
++static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
++					  const u8 *data, u8 genmask,
++					  u64 tstamp)
+ {
+-	struct nft_pipapo *priv = nft_set_priv(set);
+ 	struct nft_pipapo_scratch *scratch;
+ 	unsigned long *res_map, *fill_map;
+-	u8 genmask = nft_genmask_cur(net);
+-	const struct nft_pipapo_match *m;
+ 	const struct nft_pipapo_field *f;
+-	const u8 *rp = (const u8 *)key;
+ 	bool map_index;
+ 	int i;
+ 
+ 	local_bh_disable();
+ 
+-	m = rcu_dereference(priv->match);
+-
+-	if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
+-		goto out;
+-
+ 	scratch = *raw_cpu_ptr(m->scratch);
++	if (unlikely(!scratch))
++		goto out;
+ 
+ 	map_index = scratch->map_index;
+ 
+@@ -444,12 +445,12 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ 		 * packet bytes value, then AND bucket value
+ 		 */
+ 		if (likely(f->bb == 8))
+-			pipapo_and_field_buckets_8bit(f, res_map, rp);
++			pipapo_and_field_buckets_8bit(f, res_map, data);
+ 		else
+-			pipapo_and_field_buckets_4bit(f, res_map, rp);
++			pipapo_and_field_buckets_4bit(f, res_map, data);
+ 		NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
+ 
+-		rp += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
++		data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
+ 
+ 		/* Now populate the bitmap for the next field, unless this is
+ 		 * the last field, in which case return the matched 'ext'
+@@ -465,13 +466,15 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ 			scratch->map_index = map_index;
+ 			local_bh_enable();
+ 
+-			return false;
++			return NULL;
+ 		}
+ 
+ 		if (last) {
+-			*ext = &f->mt[b].e->ext;
+-			if (unlikely(nft_set_elem_expired(*ext) ||
+-				     !nft_set_elem_active(*ext, genmask)))
++			struct nft_pipapo_elem *e;
++
++			e = f->mt[b].e;
++			if (unlikely(__nft_set_elem_expired(&e->ext, tstamp) ||
++				     !nft_set_elem_active(&e->ext, genmask)))
+ 				goto next_match;
+ 
+ 			/* Last field: we're just returning the key without
+@@ -481,8 +484,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ 			 */
+ 			scratch->map_index = map_index;
+ 			local_bh_enable();
+-
+-			return true;
++			return e;
+ 		}
+ 
+ 		/* Swap bitmap indices: res_map is the initial bitmap for the
+@@ -492,112 +494,54 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ 		map_index = !map_index;
+ 		swap(res_map, fill_map);
+ 
+-		rp += NFT_PIPAPO_GROUPS_PADDING(f);
++		data += NFT_PIPAPO_GROUPS_PADDING(f);
+ 	}
+ 
+ out:
+ 	local_bh_enable();
+-	return false;
++	return NULL;
+ }
+ 
+ /**
+- * pipapo_get() - Get matching element reference given key data
++ * nft_pipapo_lookup() - Dataplane fronted for main lookup function
+  * @net:	Network namespace
+  * @set:	nftables API set representation
+- * @m:		storage containing active/existing elements
+- * @data:	Key data to be matched against existing elements
+- * @genmask:	If set, check that element is active in given genmask
+- * @tstamp:	timestamp to check for expired elements
+- * @gfp:	the type of memory to allocate (see kmalloc).
++ * @key:	pointer to nft registers containing key data
++ *
++ * This function is called from the data path.  It will search for
++ * an element matching the given key in the current active copy.
++ * Unlike other set types, this uses NFT_GENMASK_ANY instead of
++ * nft_genmask_cur().
+  *
+- * This is essentially the same as the lookup function, except that it matches
+- * key data against the uncommitted copy and doesn't use preallocated maps for
+- * bitmap results.
++ * This is because new (future) elements are not reachable from
++ * priv->match, they get added to priv->clone instead.
++ * When the commit phase flips the generation bitmask, the
++ * 'now old' entries are skipped but without the 'now current'
++ * elements becoming visible. Using nft_genmask_cur() thus creates
++ * inconsistent state: matching old entries get skipped but thew
++ * newly matching entries are unreachable.
+  *
+- * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
++ * GENMASK will still find the 'now old' entries which ensures consistent
++ * priv->match view.
++ *
++ * nft_pipapo_commit swaps ->clone and ->match shortly after the
++ * genbit flip.  As ->clone doesn't contain the old entries in the first
++ * place, lookup will only find the now-current ones.
++ *
++ * Return: ntables API extension pointer or NULL if no match.
+  */
+-static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+-					  const struct nft_set *set,
+-					  const struct nft_pipapo_match *m,
+-					  const u8 *data, u8 genmask,
+-					  u64 tstamp, gfp_t gfp)
++const struct nft_set_ext *
++nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
++		  const u32 *key)
+ {
+-	struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
+-	unsigned long *res_map, *fill_map = NULL;
+-	const struct nft_pipapo_field *f;
+-	int i;
+-
+-	if (m->bsize_max == 0)
+-		return ret;
+-
+-	res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
+-	if (!res_map) {
+-		ret = ERR_PTR(-ENOMEM);
+-		goto out;
+-	}
+-
+-	fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
+-	if (!fill_map) {
+-		ret = ERR_PTR(-ENOMEM);
+-		goto out;
+-	}
+-
+-	pipapo_resmap_init(m, res_map);
+-
+-	nft_pipapo_for_each_field(f, i, m) {
+-		bool last = i == m->field_count - 1;
+-		int b;
+-
+-		/* For each bit group: select lookup table bucket depending on
+-		 * packet bytes value, then AND bucket value
+-		 */
+-		if (f->bb == 8)
+-			pipapo_and_field_buckets_8bit(f, res_map, data);
+-		else if (f->bb == 4)
+-			pipapo_and_field_buckets_4bit(f, res_map, data);
+-		else
+-			BUG();
+-
+-		data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
+-
+-		/* Now populate the bitmap for the next field, unless this is
+-		 * the last field, in which case return the matched 'ext'
+-		 * pointer if any.
+-		 *
+-		 * Now res_map contains the matching bitmap, and fill_map is the
+-		 * bitmap for the next field.
+-		 */
+-next_match:
+-		b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
+-				  last);
+-		if (b < 0)
+-			goto out;
+-
+-		if (last) {
+-			if (__nft_set_elem_expired(&f->mt[b].e->ext, tstamp))
+-				goto next_match;
+-			if ((genmask &&
+-			     !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
+-				goto next_match;
+-
+-			ret = f->mt[b].e;
+-			goto out;
+-		}
+-
+-		data += NFT_PIPAPO_GROUPS_PADDING(f);
++	struct nft_pipapo *priv = nft_set_priv(set);
++	const struct nft_pipapo_match *m;
++	const struct nft_pipapo_elem *e;
+ 
+-		/* Swap bitmap indices: fill_map will be the initial bitmap for
+-		 * the next field (i.e. the new res_map), and res_map is
+-		 * guaranteed to be all-zeroes at this point, ready to be filled
+-		 * according to the next mapping table.
+-		 */
+-		swap(res_map, fill_map);
+-	}
++	m = rcu_dereference(priv->match);
++	e = pipapo_get(m, (const u8 *)key, NFT_GENMASK_ANY, get_jiffies_64());
+ 
+-out:
+-	kfree(fill_map);
+-	kfree(res_map);
+-	return ret;
++	return e ? &e->ext : NULL;
+ }
+ 
+ /**
+@@ -606,6 +550,11 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+  * @set:	nftables API set representation
+  * @elem:	nftables API element representation containing key data
+  * @flags:	Unused
++ *
++ * This function is called from the control plane path under
++ * RCU read lock.
++ *
++ * Return: set element private pointer or ERR_PTR(-ENOENT).
+  */
+ static struct nft_elem_priv *
+ nft_pipapo_get(const struct net *net, const struct nft_set *set,
+@@ -615,11 +564,10 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ 	struct nft_pipapo_match *m = rcu_dereference(priv->match);
+ 	struct nft_pipapo_elem *e;
+ 
+-	e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
+-		       nft_genmask_cur(net), get_jiffies_64(),
+-		       GFP_ATOMIC);
+-	if (IS_ERR(e))
+-		return ERR_CAST(e);
++	e = pipapo_get(m, (const u8 *)elem->key.val.data,
++		       nft_genmask_cur(net), get_jiffies_64());
++	if (!e)
++		return ERR_PTR(-ENOENT);
+ 
+ 	return &e->priv;
+ }
+@@ -1344,8 +1292,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ 	else
+ 		end = start;
+ 
+-	dup = pipapo_get(net, set, m, start, genmask, tstamp, GFP_KERNEL);
+-	if (!IS_ERR(dup)) {
++	dup = pipapo_get(m, start, genmask, tstamp);
++	if (dup) {
+ 		/* Check if we already have the same exact entry */
+ 		const struct nft_data *dup_key, *dup_end;
+ 
+@@ -1364,15 +1312,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ 		return -ENOTEMPTY;
+ 	}
+ 
+-	if (PTR_ERR(dup) == -ENOENT) {
+-		/* Look for partially overlapping entries */
+-		dup = pipapo_get(net, set, m, end, nft_genmask_next(net), tstamp,
+-				 GFP_KERNEL);
+-	}
+-
+-	if (PTR_ERR(dup) != -ENOENT) {
+-		if (IS_ERR(dup))
+-			return PTR_ERR(dup);
++	/* Look for partially overlapping entries */
++	dup = pipapo_get(m, end, nft_genmask_next(net), tstamp);
++	if (dup) {
+ 		*elem_priv = &dup->priv;
+ 		return -ENOTEMPTY;
+ 	}
+@@ -1913,9 +1855,9 @@ nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
+ 	if (!m)
+ 		return NULL;
+ 
+-	e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
+-		       nft_genmask_next(net), nft_net_tstamp(net), GFP_KERNEL);
+-	if (IS_ERR(e))
++	e = pipapo_get(m, (const u8 *)elem->key.val.data,
++		       nft_genmask_next(net), nft_net_tstamp(net));
++	if (!e)
+ 		return NULL;
+ 
+ 	nft_set_elem_change_active(net, set, &e->ext);
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index be7c16c79f711e..39e356c9687a98 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1146,26 +1146,27 @@ static inline void pipapo_resmap_init_avx2(const struct nft_pipapo_match *m, uns
+  *
+  * Return: true on match, false otherwise.
+  */
+-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+-			    const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
++		       const u32 *key)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
++	const struct nft_set_ext *ext = NULL;
+ 	struct nft_pipapo_scratch *scratch;
+-	u8 genmask = nft_genmask_cur(net);
+ 	const struct nft_pipapo_match *m;
+ 	const struct nft_pipapo_field *f;
+ 	const u8 *rp = (const u8 *)key;
+ 	unsigned long *res, *fill;
+ 	bool map_index;
+-	int i, ret = 0;
++	int i;
+ 
+ 	local_bh_disable();
+ 
+ 	if (unlikely(!irq_fpu_usable())) {
+-		bool fallback_res = nft_pipapo_lookup(net, set, key, ext);
++		ext = nft_pipapo_lookup(net, set, key);
+ 
+ 		local_bh_enable();
+-		return fallback_res;
++		return ext;
+ 	}
+ 
+ 	m = rcu_dereference(priv->match);
+@@ -1182,7 +1183,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 	if (unlikely(!scratch)) {
+ 		kernel_fpu_end();
+ 		local_bh_enable();
+-		return false;
++		return NULL;
+ 	}
+ 
+ 	map_index = scratch->map_index;
+@@ -1197,6 +1198,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ next_match:
+ 	nft_pipapo_for_each_field(f, i, m) {
+ 		bool last = i == m->field_count - 1, first = !i;
++		int ret = 0;
+ 
+ #define NFT_SET_PIPAPO_AVX2_LOOKUP(b, n)				\
+ 		(ret = nft_pipapo_avx2_lookup_##b##b_##n(res, fill, f,	\
+@@ -1244,13 +1246,12 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 			goto out;
+ 
+ 		if (last) {
+-			*ext = &f->mt[ret].e->ext;
+-			if (unlikely(nft_set_elem_expired(*ext) ||
+-				     !nft_set_elem_active(*ext, genmask))) {
+-				ret = 0;
++			const struct nft_set_ext *e = &f->mt[ret].e->ext;
++
++			if (unlikely(nft_set_elem_expired(e)))
+ 				goto next_match;
+-			}
+ 
++			ext = e;
+ 			goto out;
+ 		}
+ 
+@@ -1264,5 +1265,5 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 	kernel_fpu_end();
+ 	local_bh_enable();
+ 
+-	return ret >= 0;
++	return ext;
+ }
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 2e8ef16ff191d4..b1f04168ec9377 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -52,9 +52,9 @@ static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
+ 	return nft_set_elem_expired(&rbe->ext);
+ }
+ 
+-static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+-				const u32 *key, const struct nft_set_ext **ext,
+-				unsigned int seq)
++static const struct nft_set_ext *
++__nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++		    const u32 *key, unsigned int seq)
+ {
+ 	struct nft_rbtree *priv = nft_set_priv(set);
+ 	const struct nft_rbtree_elem *rbe, *interval = NULL;
+@@ -65,7 +65,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ 	parent = rcu_dereference_raw(priv->root.rb_node);
+ 	while (parent != NULL) {
+ 		if (read_seqcount_retry(&priv->count, seq))
+-			return false;
++			return NULL;
+ 
+ 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+ 
+@@ -77,7 +77,9 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ 			    nft_rbtree_interval_end(rbe) &&
+ 			    nft_rbtree_interval_start(interval))
+ 				continue;
+-			interval = rbe;
++			if (nft_set_elem_active(&rbe->ext, genmask) &&
++			    !nft_rbtree_elem_expired(rbe))
++				interval = rbe;
+ 		} else if (d > 0)
+ 			parent = rcu_dereference_raw(parent->rb_right);
+ 		else {
+@@ -87,50 +89,46 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ 			}
+ 
+ 			if (nft_rbtree_elem_expired(rbe))
+-				return false;
++				return NULL;
+ 
+ 			if (nft_rbtree_interval_end(rbe)) {
+ 				if (nft_set_is_anonymous(set))
+-					return false;
++					return NULL;
+ 				parent = rcu_dereference_raw(parent->rb_left);
+ 				interval = NULL;
+ 				continue;
+ 			}
+ 
+-			*ext = &rbe->ext;
+-			return true;
++			return &rbe->ext;
+ 		}
+ 	}
+ 
+ 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+-	    nft_set_elem_active(&interval->ext, genmask) &&
+-	    !nft_rbtree_elem_expired(interval) &&
+-	    nft_rbtree_interval_start(interval)) {
+-		*ext = &interval->ext;
+-		return true;
+-	}
++	    nft_rbtree_interval_start(interval))
++		return &interval->ext;
+ 
+-	return false;
++	return NULL;
+ }
+ 
+ INDIRECT_CALLABLE_SCOPE
+-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+-		       const u32 *key, const struct nft_set_ext **ext)
++const struct nft_set_ext *
++nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
++		  const u32 *key)
+ {
+ 	struct nft_rbtree *priv = nft_set_priv(set);
+ 	unsigned int seq = read_seqcount_begin(&priv->count);
+-	bool ret;
++	const struct nft_set_ext *ext;
+ 
+-	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
+-	if (ret || !read_seqcount_retry(&priv->count, seq))
+-		return ret;
++	ext = __nft_rbtree_lookup(net, set, key, seq);
++	if (ext || !read_seqcount_retry(&priv->count, seq))
++		return ext;
+ 
+ 	read_lock_bh(&priv->lock);
+ 	seq = read_seqcount_begin(&priv->count);
+-	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
++	ext = __nft_rbtree_lookup(net, set, key, seq);
+ 	read_unlock_bh(&priv->lock);
+ 
+-	return ret;
++	return ext;
+ }
+ 
+ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 07ad65774fe298..3327d84518141f 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1836,6 +1836,9 @@ static int genl_bind(struct net *net, int group)
+ 		    !ns_capable(net->user_ns, CAP_SYS_ADMIN))
+ 			ret = -EPERM;
+ 
++		if (ret)
++			break;
++
+ 		if (family->bind)
+ 			family->bind(i);
+ 
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 73bc39281ef5f5..9b45fbdc90cabe 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -276,8 +276,6 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
+ 
+ static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
+ {
+-	if (unlikely(current->flags & PF_EXITING))
+-		return -EINTR;
+ 	schedule();
+ 	if (signal_pending_state(mode, current))
+ 		return -ERESTARTSYS;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 92cec227215aea..b78f1aae9e8068 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -407,9 +407,9 @@ xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags)
+ 	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
+ 		      alert_kvec.iov_len);
+ 	ret = sock_recvmsg(sock, &msg, flags);
+-	if (ret > 0 &&
+-	    tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
+-		iov_iter_revert(&msg.msg_iter, ret);
++	if (ret > 0) {
++		if (tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT)
++			iov_iter_revert(&msg.msg_iter, ret);
+ 		ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
+ 					   -EAGAIN);
+ 	}
+diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
+index 81220390851a39..328c6e60f024bf 100644
+--- a/samples/ftrace/ftrace-direct-modify.c
++++ b/samples/ftrace/ftrace-direct-modify.c
+@@ -75,8 +75,8 @@ asm (
+ 	CALL_DEPTH_ACCOUNT
+ "	call my_direct_func1\n"
+ "	leave\n"
+-"	.size		my_tramp1, .-my_tramp1\n"
+ 	ASM_RET
++"	.size		my_tramp1, .-my_tramp1\n"
+ 
+ "	.type		my_tramp2, @function\n"
+ "	.globl		my_tramp2\n"
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a28a59926adad1..eb0e404c178411 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10857,6 +10857,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x1043, 0x106f, "ASUS VivoBook X515UA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1074, "ASUS G614PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x10a4, "ASUS TP3407SA", ALC287_FIXUP_TAS2781_I2C),


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-12  3:56 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-12  3:56 UTC (permalink / raw
  To: gentoo-commits

commit:     e870b96b6d476dbdd1868df9dc367662275679b5
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Sep 12 03:56:44 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Sep 12 03:56:44 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e870b96b

Linux patch 6.12.47

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |   4 +
 1046_linux-6.12.47.patch | 748 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 752 insertions(+)

diff --git a/0000_README b/0000_README
index d7552396..faedd16f 100644
--- a/0000_README
+++ b/0000_README
@@ -227,6 +227,10 @@ Patch:  1045_linux-6.12.46.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.46
 
+Patch:  1046_linux-6.12.47.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.47
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1046_linux-6.12.47.patch b/1046_linux-6.12.47.patch
new file mode 100644
index 00000000..148e8236
--- /dev/null
+++ b/1046_linux-6.12.47.patch
@@ -0,0 +1,748 @@
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index 53755b2021ed01..28f062dc25e1f4 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -525,6 +525,7 @@ What:		/sys/devices/system/cpu/vulnerabilities
+ 		/sys/devices/system/cpu/vulnerabilities/srbds
+ 		/sys/devices/system/cpu/vulnerabilities/tsa
+ 		/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
++		/sys/devices/system/cpu/vulnerabilities/vmscape
+ Date:		January 2018
+ Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description:	Information about CPU vulnerabilities
+diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
+index d2caa390395e5b..5d6c001b8a988c 100644
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -23,3 +23,4 @@ are configurable at compile, boot or run time.
+    gather_data_sampling
+    reg-file-data-sampling
+    indirect-target-selection
++   vmscape
+diff --git a/Documentation/admin-guide/hw-vuln/vmscape.rst b/Documentation/admin-guide/hw-vuln/vmscape.rst
+new file mode 100644
+index 00000000000000..d9b9a2b6c114c0
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/vmscape.rst
+@@ -0,0 +1,110 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++VMSCAPE
++=======
++
++VMSCAPE is a vulnerability that may allow a guest to influence the branch
++prediction in host userspace. It particularly affects hypervisors like QEMU.
++
++Even if a hypervisor may not have any sensitive data like disk encryption keys,
++guest-userspace may be able to attack the guest-kernel using the hypervisor as
++a confused deputy.
++
++Affected processors
++-------------------
++
++The following CPU families are affected by VMSCAPE:
++
++**Intel processors:**
++  - Skylake generation (Parts without Enhanced-IBRS)
++  - Cascade Lake generation - (Parts affected by ITS guest/host separation)
++  - Alder Lake and newer (Parts affected by BHI)
++
++Note that, BHI affected parts that use BHB clearing software mitigation e.g.
++Icelake are not vulnerable to VMSCAPE.
++
++**AMD processors:**
++  - Zen series (families 0x17, 0x19, 0x1a)
++
++** Hygon processors:**
++ - Family 0x18
++
++Mitigation
++----------
++
++Conditional IBPB
++----------------
++
++Kernel tracks when a CPU has run a potentially malicious guest and issues an
++IBPB before the first exit to userspace after VM-exit. If userspace did not run
++between VM-exit and the next VM-entry, no IBPB is issued.
++
++Note that the existing userspace mitigation against Spectre-v2 is effective in
++protecting the userspace. They are insufficient to protect the userspace VMMs
++from a malicious guest. This is because Spectre-v2 mitigations are applied at
++context switch time, while the userspace VMM can run after a VM-exit without a
++context switch.
++
++Vulnerability enumeration and mitigation is not applied inside a guest. This is
++because nested hypervisors should already be deploying IBPB to isolate
++themselves from nested guests.
++
++SMT considerations
++------------------
++
++When Simultaneous Multi-Threading (SMT) is enabled, hypervisors can be
++vulnerable to cross-thread attacks. For complete protection against VMSCAPE
++attacks in SMT environments, STIBP should be enabled.
++
++The kernel will issue a warning if SMT is enabled without adequate STIBP
++protection. Warning is not issued when:
++
++- SMT is disabled
++- STIBP is enabled system-wide
++- Intel eIBRS is enabled (which implies STIBP protection)
++
++System information and options
++------------------------------
++
++The sysfs file showing VMSCAPE mitigation status is:
++
++  /sys/devices/system/cpu/vulnerabilities/vmscape
++
++The possible values in this file are:
++
++ * 'Not affected':
++
++   The processor is not vulnerable to VMSCAPE attacks.
++
++ * 'Vulnerable':
++
++   The processor is vulnerable and no mitigation has been applied.
++
++ * 'Mitigation: IBPB before exit to userspace':
++
++   Conditional IBPB mitigation is enabled. The kernel tracks when a CPU has
++   run a potentially malicious guest and issues an IBPB before the first
++   exit to userspace after VM-exit.
++
++ * 'Mitigation: IBPB on VMEXIT':
++
++   IBPB is issued on every VM-exit. This occurs when other mitigations like
++   RETBLEED or SRSO are already issuing IBPB on VM-exit.
++
++Mitigation control on the kernel command line
++----------------------------------------------
++
++The mitigation can be controlled via the ``vmscape=`` command line parameter:
++
++ * ``vmscape=off``:
++
++   Disable the VMSCAPE mitigation.
++
++ * ``vmscape=ibpb``:
++
++   Enable conditional IBPB mitigation (default when CONFIG_MITIGATION_VMSCAPE=y).
++
++ * ``vmscape=force``:
++
++   Force vulnerability detection and mitigation even on processors that are
++   not known to be affected.
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index f402bbaccc8aa3..8724c2c580b887 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3548,6 +3548,7 @@
+ 					       srbds=off [X86,INTEL]
+ 					       ssbd=force-off [ARM64]
+ 					       tsx_async_abort=off [X86]
++					       vmscape=off [X86]
+ 
+ 				Exceptions:
+ 					       This does not have any effect on
+@@ -7425,6 +7426,16 @@
+ 	vmpoff=		[KNL,S390] Perform z/VM CP command after power off.
+ 			Format: <command>
+ 
++	vmscape=	[X86] Controls mitigation for VMscape attacks.
++			VMscape attacks can leak information from a userspace
++			hypervisor to a guest via speculative side-channels.
++
++			off		- disable the mitigation
++			ibpb		- use Indirect Branch Prediction Barrier
++					  (IBPB) mitigation (default)
++			force		- force vulnerability detection even on
++					  unaffected processors
++
+ 	vsyscall=	[X86-64,EARLY]
+ 			Controls the behavior of vsyscalls (i.e. calls to
+ 			fixed addresses of 0xffffffffff600x00 from legacy
+diff --git a/Makefile b/Makefile
+index 4a4c5e04417d6a..2500f343c6c8a2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 46
++SUBLEVEL = 47
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 2df0ae2a5e5d0e..df14d0e67ea0cf 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2769,6 +2769,15 @@ config MITIGATION_TSA
+ 	  security vulnerability on AMD CPUs which can lead to forwarding of
+ 	  invalid info to subsequent instructions and thus can affect their
+ 	  timing and thereby cause a leakage.
++
++config MITIGATION_VMSCAPE
++	bool "Mitigate VMSCAPE"
++	depends on KVM
++	default y
++	help
++	  Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
++	  vulnerability on Intel and AMD CPUs that may allow a guest to do
++	  Spectre v2 style attacks on userspace hypervisor.
+ endif
+ 
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 98e72c1391f240..90f1f2f9d31400 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -482,6 +482,7 @@
+ #define X86_FEATURE_TSA_SQ_NO          (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
+ #define X86_FEATURE_TSA_L1_NO          (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
+ #define X86_FEATURE_CLEAR_CPU_BUF_VM   (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
++#define X86_FEATURE_IBPB_EXIT_TO_USER  (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
+ 
+ /*
+  * BUG word(s)
+@@ -536,4 +537,5 @@
+ #define X86_BUG_ITS			X86_BUG(1*32 + 5) /* "its" CPU is affected by Indirect Target Selection */
+ #define X86_BUG_ITS_NATIVE_ONLY		X86_BUG(1*32 + 6) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
+ #define X86_BUG_TSA			X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
++#define X86_BUG_VMSCAPE			X86_BUG( 1*32+10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
+index 77d20555e04de5..f71290eec4b1f3 100644
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -92,6 +92,13 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ 	 * 8 (ia32) bits.
+ 	 */
+ 	choose_random_kstack_offset(rdtsc());
++
++	/* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */
++	if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
++	    this_cpu_read(x86_ibpb_exit_to_user)) {
++		indirect_branch_prediction_barrier();
++		this_cpu_write(x86_ibpb_exit_to_user, false);
++	}
+ }
+ #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+ 
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 331f6a05535d4c..f2cc7754918c0d 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -549,6 +549,8 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+ 
+ extern u64 x86_pred_cmd;
+ 
++DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user);
++
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+ 	alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 31b4b73e54053b..06bbc297c26c03 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -51,6 +51,7 @@ static void __init srso_select_mitigation(void);
+ static void __init gds_select_mitigation(void);
+ static void __init its_select_mitigation(void);
+ static void __init tsa_select_mitigation(void);
++static void __init vmscape_select_mitigation(void);
+ 
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -60,6 +61,14 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+ DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
+ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
+ 
++/*
++ * Set when the CPU has run a potentially malicious guest. An IBPB will
++ * be needed to before running userspace. That IBPB will flush the branch
++ * predictor content.
++ */
++DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
++EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
++
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+ 
+@@ -186,6 +195,7 @@ void __init cpu_select_mitigations(void)
+ 	gds_select_mitigation();
+ 	its_select_mitigation();
+ 	tsa_select_mitigation();
++	vmscape_select_mitigation();
+ }
+ 
+ /*
+@@ -2192,80 +2202,6 @@ static void __init tsa_select_mitigation(void)
+ 	pr_info("%s\n", tsa_strings[tsa_mitigation]);
+ }
+ 
+-void cpu_bugs_smt_update(void)
+-{
+-	mutex_lock(&spec_ctrl_mutex);
+-
+-	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+-	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+-		pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+-
+-	switch (spectre_v2_user_stibp) {
+-	case SPECTRE_V2_USER_NONE:
+-		break;
+-	case SPECTRE_V2_USER_STRICT:
+-	case SPECTRE_V2_USER_STRICT_PREFERRED:
+-		update_stibp_strict();
+-		break;
+-	case SPECTRE_V2_USER_PRCTL:
+-	case SPECTRE_V2_USER_SECCOMP:
+-		update_indir_branch_cond();
+-		break;
+-	}
+-
+-	switch (mds_mitigation) {
+-	case MDS_MITIGATION_FULL:
+-	case MDS_MITIGATION_VMWERV:
+-		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+-			pr_warn_once(MDS_MSG_SMT);
+-		update_mds_branch_idle();
+-		break;
+-	case MDS_MITIGATION_OFF:
+-		break;
+-	}
+-
+-	switch (taa_mitigation) {
+-	case TAA_MITIGATION_VERW:
+-	case TAA_MITIGATION_UCODE_NEEDED:
+-		if (sched_smt_active())
+-			pr_warn_once(TAA_MSG_SMT);
+-		break;
+-	case TAA_MITIGATION_TSX_DISABLED:
+-	case TAA_MITIGATION_OFF:
+-		break;
+-	}
+-
+-	switch (mmio_mitigation) {
+-	case MMIO_MITIGATION_VERW:
+-	case MMIO_MITIGATION_UCODE_NEEDED:
+-		if (sched_smt_active())
+-			pr_warn_once(MMIO_MSG_SMT);
+-		break;
+-	case MMIO_MITIGATION_OFF:
+-		break;
+-	}
+-
+-	switch (tsa_mitigation) {
+-	case TSA_MITIGATION_USER_KERNEL:
+-	case TSA_MITIGATION_VM:
+-	case TSA_MITIGATION_FULL:
+-	case TSA_MITIGATION_UCODE_NEEDED:
+-		/*
+-		 * TSA-SQ can potentially lead to info leakage between
+-		 * SMT threads.
+-		 */
+-		if (sched_smt_active())
+-			static_branch_enable(&cpu_buf_idle_clear);
+-		else
+-			static_branch_disable(&cpu_buf_idle_clear);
+-		break;
+-	case TSA_MITIGATION_NONE:
+-		break;
+-	}
+-
+-	mutex_unlock(&spec_ctrl_mutex);
+-}
+-
+ #undef pr_fmt
+ #define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
+ 
+@@ -2950,9 +2886,169 @@ static void __init srso_select_mitigation(void)
+ 	pr_info("%s\n", srso_strings[srso_mitigation]);
+ }
+ 
++#undef pr_fmt
++#define pr_fmt(fmt)	"VMSCAPE: " fmt
++
++enum vmscape_mitigations {
++	VMSCAPE_MITIGATION_NONE,
++	VMSCAPE_MITIGATION_AUTO,
++	VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
++	VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
++};
++
++static const char * const vmscape_strings[] = {
++	[VMSCAPE_MITIGATION_NONE]		= "Vulnerable",
++	/* [VMSCAPE_MITIGATION_AUTO] */
++	[VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER]	= "Mitigation: IBPB before exit to userspace",
++	[VMSCAPE_MITIGATION_IBPB_ON_VMEXIT]	= "Mitigation: IBPB on VMEXIT",
++};
++
++static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
++	IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
++
++static int __init vmscape_parse_cmdline(char *str)
++{
++	if (!str)
++		return -EINVAL;
++
++	if (!strcmp(str, "off")) {
++		vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
++	} else if (!strcmp(str, "ibpb")) {
++		vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
++	} else if (!strcmp(str, "force")) {
++		setup_force_cpu_bug(X86_BUG_VMSCAPE);
++		vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
++	} else {
++		pr_err("Ignoring unknown vmscape=%s option.\n", str);
++	}
++
++	return 0;
++}
++early_param("vmscape", vmscape_parse_cmdline);
++
++static void __init vmscape_select_mitigation(void)
++{
++	if (cpu_mitigations_off() ||
++	    !boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
++	    !boot_cpu_has(X86_FEATURE_IBPB)) {
++		vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
++		return;
++	}
++
++	if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
++		vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
++
++	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
++	    srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
++		vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
++
++	if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
++		setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
++
++	pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
++}
++
+ #undef pr_fmt
+ #define pr_fmt(fmt) fmt
+ 
++#define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
++
++void cpu_bugs_smt_update(void)
++{
++	mutex_lock(&spec_ctrl_mutex);
++
++	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
++	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
++		pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
++
++	switch (spectre_v2_user_stibp) {
++	case SPECTRE_V2_USER_NONE:
++		break;
++	case SPECTRE_V2_USER_STRICT:
++	case SPECTRE_V2_USER_STRICT_PREFERRED:
++		update_stibp_strict();
++		break;
++	case SPECTRE_V2_USER_PRCTL:
++	case SPECTRE_V2_USER_SECCOMP:
++		update_indir_branch_cond();
++		break;
++	}
++
++	switch (mds_mitigation) {
++	case MDS_MITIGATION_FULL:
++	case MDS_MITIGATION_VMWERV:
++		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
++			pr_warn_once(MDS_MSG_SMT);
++		update_mds_branch_idle();
++		break;
++	case MDS_MITIGATION_OFF:
++		break;
++	}
++
++	switch (taa_mitigation) {
++	case TAA_MITIGATION_VERW:
++	case TAA_MITIGATION_UCODE_NEEDED:
++		if (sched_smt_active())
++			pr_warn_once(TAA_MSG_SMT);
++		break;
++	case TAA_MITIGATION_TSX_DISABLED:
++	case TAA_MITIGATION_OFF:
++		break;
++	}
++
++	switch (mmio_mitigation) {
++	case MMIO_MITIGATION_VERW:
++	case MMIO_MITIGATION_UCODE_NEEDED:
++		if (sched_smt_active())
++			pr_warn_once(MMIO_MSG_SMT);
++		break;
++	case MMIO_MITIGATION_OFF:
++		break;
++	}
++
++	switch (tsa_mitigation) {
++	case TSA_MITIGATION_USER_KERNEL:
++	case TSA_MITIGATION_VM:
++	case TSA_MITIGATION_FULL:
++	case TSA_MITIGATION_UCODE_NEEDED:
++		/*
++		 * TSA-SQ can potentially lead to info leakage between
++		 * SMT threads.
++		 */
++		if (sched_smt_active())
++			static_branch_enable(&cpu_buf_idle_clear);
++		else
++			static_branch_disable(&cpu_buf_idle_clear);
++		break;
++	case TSA_MITIGATION_NONE:
++		break;
++	}
++
++	switch (vmscape_mitigation) {
++	case VMSCAPE_MITIGATION_NONE:
++	case VMSCAPE_MITIGATION_AUTO:
++		break;
++	case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
++	case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
++		/*
++		 * Hypervisors can be attacked across-threads, warn for SMT when
++		 * STIBP is not already enabled system-wide.
++		 *
++		 * Intel eIBRS (!AUTOIBRS) implies STIBP on.
++		 */
++		if (!sched_smt_active() ||
++		    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++		    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
++		    (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
++		     !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
++			break;
++		pr_warn_once(VMSCAPE_MSG_SMT);
++		break;
++	}
++
++	mutex_unlock(&spec_ctrl_mutex);
++}
++
+ #ifdef CONFIG_SYSFS
+ 
+ #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
+@@ -3196,6 +3292,11 @@ static ssize_t tsa_show_state(char *buf)
+ 	return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
+ }
+ 
++static ssize_t vmscape_show_state(char *buf)
++{
++	return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ 			       char *buf, unsigned int bug)
+ {
+@@ -3260,6 +3361,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ 	case X86_BUG_TSA:
+ 		return tsa_show_state(buf);
+ 
++	case X86_BUG_VMSCAPE:
++		return vmscape_show_state(buf);
++
+ 	default:
+ 		break;
+ 	}
+@@ -3349,6 +3453,11 @@ ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *bu
+ {
+ 	return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
+ }
++
++ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
++}
+ #endif
+ 
+ void __warn_thunk(void)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 976545ec8fdcb3..bc51fccba4cb63 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1235,54 +1235,70 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ #define ITS_NATIVE_ONLY	BIT(9)
+ /* CPU is affected by Transient Scheduler Attacks */
+ #define TSA		BIT(10)
++/* CPU is affected by VMSCAPE */
++#define VMSCAPE		BIT(11)
+ 
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+-	VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE,		X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_HASWELL,		X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L,		X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G,		X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X,		X86_STEPPING_ANY,		MMIO),
+-	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D,	X86_STEPPING_ANY,		MMIO),
+-	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G,	X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X,	X86_STEPPING_ANY,		MMIO),
+-	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL,		X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,		X86_STEPPINGS(0x0, 0x5),	MMIO | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | ITS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,	X86_STEPPINGS(0x0, 0xb),	MMIO | RETBLEED | GDS | SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS | ITS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,		X86_STEPPINGS(0x0, 0xc),	MMIO | RETBLEED | GDS | SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS | ITS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED),
++	VULNBL_INTEL_STEPPINGS(INTEL_SANDYBRIDGE_X,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_SANDYBRIDGE,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE_X,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE,		X86_STEPPING_ANY,		SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_HASWELL,		X86_STEPPING_ANY,		SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L,		X86_STEPPING_ANY,		SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G,		X86_STEPPING_ANY,		SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X,		X86_STEPPING_ANY,		MMIO | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D,	X86_STEPPING_ANY,		MMIO | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X,	X86_STEPPING_ANY,		MMIO | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G,	X86_STEPPING_ANY,		SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL,		X86_STEPPING_ANY,		SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,		X86_STEPPINGS(0x0, 0x5),	MMIO | RETBLEED | GDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | ITS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,	X86_STEPPINGS(0x0, 0xb),	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,		X86_STEPPINGS(0x0, 0xc),	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED | VMSCAPE),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D,		X86_STEPPING_ANY,		MMIO | GDS | ITS | ITS_NATIVE_ONLY),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X,		X86_STEPPING_ANY,		MMIO | GDS | ITS | ITS_NATIVE_ONLY),
+-	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | RETBLEED | ITS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
++	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | RETBLEED | ITS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L,	X86_STEPPING_ANY,		GDS | ITS | ITS_NATIVE_ONLY),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE,		X86_STEPPING_ANY,		GDS | ITS | ITS_NATIVE_ONLY),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
+-	VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE,		X86_STEPPING_ANY,		RFDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L,	X86_STEPPING_ANY,		RFDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE,	X86_STEPPING_ANY,		RFDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P,	X86_STEPPING_ANY,		RFDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S,	X86_STEPPING_ANY,		RFDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE,		X86_STEPPING_ANY,		RFDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L,	X86_STEPPING_ANY,		RFDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE,	X86_STEPPING_ANY,		RFDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P,	X86_STEPPING_ANY,		RFDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S,	X86_STEPPING_ANY,		RFDS | VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_METEORLAKE_L,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_ARROWLAKE_H,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_ARROWLAKE,		X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_ARROWLAKE_U,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_LUNARLAKE_M,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_SAPPHIRERAPIDS_X,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_GRANITERAPIDS_X,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_EMERALDRAPIDS_X,	X86_STEPPING_ANY,		VMSCAPE),
++	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT,	X86_STEPPING_ANY,		RFDS | VMSCAPE),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RFDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_D,	X86_STEPPING_ANY,		MMIO | RFDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RFDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT,	X86_STEPPING_ANY,		RFDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_D,	X86_STEPPING_ANY,		RFDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_ATOM_CRESTMONT_X,	X86_STEPPING_ANY,		VMSCAPE),
+ 
+ 	VULNBL_AMD(0x15, RETBLEED),
+ 	VULNBL_AMD(0x16, RETBLEED),
+-	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
+-	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
+-	VULNBL_AMD(0x19, SRSO | TSA),
++	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
++	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
++	VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE),
++	VULNBL_AMD(0x1a, SRSO | VMSCAPE),
++
+ 	{}
+ };
+ 
+@@ -1502,6 +1518,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 		}
+ 	}
+ 
++	/*
++	 * Set the bug only on bare-metal. A nested hypervisor should already be
++	 * deploying IBPB to isolate itself from nested guests.
++	 */
++	if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
++	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
++		setup_force_cpu_bug(X86_BUG_VMSCAPE);
++
+ 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ 		return;
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 17ec4c4a3d92e6..13ab13d2e9d67c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11069,6 +11069,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	if (vcpu->arch.guest_fpu.xfd_err)
+ 		wrmsrl(MSR_IA32_XFD_ERR, 0);
+ 
++	/*
++	 * Mark this CPU as needing a branch predictor flush before running
++	 * userspace. Must be done before enabling preemption to ensure it gets
++	 * set for the CPU that actually ran the guest, and not the CPU that it
++	 * may migrate to.
++	 */
++	if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
++		this_cpu_write(x86_ibpb_exit_to_user, true);
++
+ 	/*
+ 	 * Consume any pending interrupts, including the possible source of
+ 	 * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index 02870e70ed5955..ee52b106a95534 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -601,6 +601,7 @@ CPU_SHOW_VULN_FALLBACK(gds);
+ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+ CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+ CPU_SHOW_VULN_FALLBACK(tsa);
++CPU_SHOW_VULN_FALLBACK(vmscape);
+ 
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+@@ -618,6 +619,7 @@ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+ static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+ static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
++static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
+ 
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_meltdown.attr,
+@@ -636,6 +638,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_reg_file_data_sampling.attr,
+ 	&dev_attr_indirect_target_selection.attr,
+ 	&dev_attr_tsa.attr,
++	&dev_attr_vmscape.attr,
+ 	NULL
+ };
+ 
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 4342b569490952..e682c75a3bb025 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -80,6 +80,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
+ extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+ 						  struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
+ 
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-10  6:21 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-10  6:21 UTC (permalink / raw
  To: gentoo-commits

commit:     e043e693b6a0625ef5797ab34f54f0ffd4109be0
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 10 06:20:39 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Sep 10 06:20:39 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e043e693

Remove 1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README                                        |   4 -
 ..._missing_pde_set_flags_for_net_proc_files.patch | 164 ---------------------
 2 files changed, 168 deletions(-)

diff --git a/0000_README b/0000_README
index 32b80f93..d7552396 100644
--- a/0000_README
+++ b/0000_README
@@ -247,10 +247,6 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
-Patch:  1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch
-From:   https://lore.kernel.org/all/20250821105806.1453833-1-wangzijie1@honor.com/
-Desc:   proc: fix missing pde_set_flags() for net proc files
-
 Patch:  1801_proc_fix_type_confusion_in_pde_set_flags.patch
 From:   https://lore.kernel.org/linux-fsdevel/20250904135715.3972782-1-wangzijie1@honor.com/
 Desc:   proc: fix type confusion in pde_set_flags()

diff --git a/1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch b/1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch
deleted file mode 100644
index d42d9d07..00000000
--- a/1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch
+++ /dev/null
@@ -1,164 +0,0 @@
-From mboxrd@z Thu Jan  1 00:00:00 1970
-Received: from mta21.hihonor.com (mta21.honor.com [81.70.160.142])
-	(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
-	(No client certificate requested)
-	by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7D18E308F18
-	for <regressions@lists.linux.dev>; Thu, 21 Aug 2025 10:58:11 +0000 (UTC)
-Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=81.70.160.142
-ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;
-	t=1755773896; cv=none; b=uf0SHAKs+B4CVnXckwKblCkTaQzofZ/iIeFqe1l9Igj3XxpGpi8FFZKzT1q5rOZYOSOY9gOoTGx+Z8Zy+I08SQQzgoJxttAoeEklbFCCKFPTRLRQthyO+J1EEf6vI2T1GLJZUhHTthgDrKCTPFbrIf2oGmPkSrJHH+STidm3krA=
-ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org;
-	s=arc-20240116; t=1755773896; c=relaxed/simple;
-	bh=l3jMKIk6lYIIzI4SccsDJUmeup9tuX38D/XM4qedxQU=;
-	h=From:To:CC:Subject:Date:Message-ID:MIME-Version:Content-Type; b=k9YALQ1oS/qWu7MXOxgISf6DxjFqOtw8eXno5JFk+wgaTBuvUyqz5D0hD+ir4kHDBBIz6MHiHUQixuW774vBGzujNekFLweIbzdfYn7osOYNvr8bD80Qam+1D1B5DM0OtVAEBNN8YmkB0RY49sa91xx9G3U5fqJoKOHT+28M+/0=
-ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=honor.com; spf=pass smtp.mailfrom=honor.com; arc=none smtp.client-ip=81.70.160.142
-Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=honor.com
-Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=honor.com
-Received: from w011.hihonor.com (unknown [10.68.20.122])
-	by mta21.hihonor.com (SkyGuard) with ESMTPS id 4c70fC2v03zYl7h7;
-	Thu, 21 Aug 2025 18:57:55 +0800 (CST)
-Received: from a011.hihonor.com (10.68.31.243) by w011.hihonor.com
- (10.68.20.122) with Microsoft SMTP Server (version=TLS1_2,
- cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Thu, 21 Aug
- 2025 18:58:08 +0800
-Received: from localhost.localdomain (10.144.23.14) by a011.hihonor.com
- (10.68.31.243) with Microsoft SMTP Server (version=TLS1_2,
- cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Thu, 21 Aug
- 2025 18:58:07 +0800
-From: wangzijie <wangzijie1@honor.com>
-To: <akpm@linux-foundation.org>, <brauner@kernel.org>,
-	<viro@zeniv.linux.org.uk>, <adobriyan@gmail.com>,
-	<rick.p.edgecombe@intel.com>, <ast@kernel.org>, <k.shutemov@gmail.com>,
-	<jirislaby@kernel.org>, <linux-fsdevel@vger.kernel.org>
-CC: <polynomial-c@gmx.de>, <gregkh@linuxfoundation.org>,
-	<stable@vger.kernel.org>, <regressions@lists.linux.dev>, wangzijie
-	<wangzijie1@honor.com>
-Subject: [PATCH v3] proc: fix missing pde_set_flags() for net proc files
-Date: Thu, 21 Aug 2025 18:58:06 +0800
-Message-ID: <20250821105806.1453833-1-wangzijie1@honor.com>
-X-Mailer: git-send-email 2.25.1
-Precedence: bulk
-X-Mailing-List: regressions@lists.linux.dev
-List-Id: <regressions.lists.linux.dev>
-List-Subscribe: <mailto:regressions+subscribe@lists.linux.dev>
-List-Unsubscribe: <mailto:regressions+unsubscribe@lists.linux.dev>
-MIME-Version: 1.0
-Content-Transfer-Encoding: 8bit
-Content-Type: text/plain
-X-ClientProxiedBy: w002.hihonor.com (10.68.28.120) To a011.hihonor.com
- (10.68.31.243)
-
-To avoid potential UAF issues during module removal races, we use pde_set_flags()
-to save proc_ops flags in PDE itself before proc_register(), and then use
-pde_has_proc_*() helpers instead of directly dereferencing pde->proc_ops->*.
-
-However, the pde_set_flags() call was missing when creating net related proc files.
-This omission caused incorrect behavior which FMODE_LSEEK was being cleared
-inappropriately in proc_reg_open() for net proc files. Lars reported it in this link[1].
-
-Fix this by ensuring pde_set_flags() is called when register proc entry, and add
-NULL check for proc_ops in pde_set_flags().
-
-[1]: https://lore.kernel.org/all/20250815195616.64497967@chagall.paradoxon.rec/
-
-Fixes: ff7ec8dc1b64 ("proc: use the same treatment to check proc_lseek as ones for proc_read_iter et.al")
-Cc: stable@vger.kernel.org
-Reported-by: Lars Wendler <polynomial-c@gmx.de>
-Signed-off-by: wangzijie <wangzijie1@honor.com>
----
-v3:
-- followed by Christian's suggestion to stash pde->proc_ops in a local const variable
-v2:
-- followed by Jiri's suggestion to refractor code and reformat commit message
----
- fs/proc/generic.c | 38 +++++++++++++++++++++-----------------
- 1 file changed, 21 insertions(+), 17 deletions(-)
-
-diff --git a/fs/proc/generic.c b/fs/proc/generic.c
-index 76e800e38..bd0c099cf 100644
---- a/fs/proc/generic.c
-+++ b/fs/proc/generic.c
-@@ -367,6 +367,25 @@ static const struct inode_operations proc_dir_inode_operations = {
- 	.setattr	= proc_notify_change,
- };
- 
-+static void pde_set_flags(struct proc_dir_entry *pde)
-+{
-+	const struct proc_ops *proc_ops = pde->proc_ops;
-+
-+	if (!proc_ops)
-+		return;
-+
-+	if (proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
-+		pde->flags |= PROC_ENTRY_PERMANENT;
-+	if (proc_ops->proc_read_iter)
-+		pde->flags |= PROC_ENTRY_proc_read_iter;
-+#ifdef CONFIG_COMPAT
-+	if (proc_ops->proc_compat_ioctl)
-+		pde->flags |= PROC_ENTRY_proc_compat_ioctl;
-+#endif
-+	if (proc_ops->proc_lseek)
-+		pde->flags |= PROC_ENTRY_proc_lseek;
-+}
-+
- /* returns the registered entry, or frees dp and returns NULL on failure */
- struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
- 		struct proc_dir_entry *dp)
-@@ -374,6 +393,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
- 	if (proc_alloc_inum(&dp->low_ino))
- 		goto out_free_entry;
- 
-+	pde_set_flags(dp);
-+
- 	write_lock(&proc_subdir_lock);
- 	dp->parent = dir;
- 	if (pde_subdir_insert(dir, dp) == false) {
-@@ -561,20 +582,6 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
- 	return p;
- }
- 
--static void pde_set_flags(struct proc_dir_entry *pde)
--{
--	if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
--		pde->flags |= PROC_ENTRY_PERMANENT;
--	if (pde->proc_ops->proc_read_iter)
--		pde->flags |= PROC_ENTRY_proc_read_iter;
--#ifdef CONFIG_COMPAT
--	if (pde->proc_ops->proc_compat_ioctl)
--		pde->flags |= PROC_ENTRY_proc_compat_ioctl;
--#endif
--	if (pde->proc_ops->proc_lseek)
--		pde->flags |= PROC_ENTRY_proc_lseek;
--}
--
- struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
- 		struct proc_dir_entry *parent,
- 		const struct proc_ops *proc_ops, void *data)
-@@ -585,7 +592,6 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
- 	if (!p)
- 		return NULL;
- 	p->proc_ops = proc_ops;
--	pde_set_flags(p);
- 	return proc_register(parent, p);
- }
- EXPORT_SYMBOL(proc_create_data);
-@@ -636,7 +642,6 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
- 	p->proc_ops = &proc_seq_ops;
- 	p->seq_ops = ops;
- 	p->state_size = state_size;
--	pde_set_flags(p);
- 	return proc_register(parent, p);
- }
- EXPORT_SYMBOL(proc_create_seq_private);
-@@ -667,7 +672,6 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
- 		return NULL;
- 	p->proc_ops = &proc_single_ops;
- 	p->single_show = show;
--	pde_set_flags(p);
- 	return proc_register(parent, p);
- }
- EXPORT_SYMBOL(proc_create_single_data);
--- 
-2.25.1
-
-


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-10  5:31 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-10  5:31 UTC (permalink / raw
  To: gentoo-commits

commit:     4785dbe1139f7cbd50e75b8854af281f2c31b685
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 10 05:31:22 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Sep 10 05:31:22 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4785dbe1

Linux patch 6.12.46

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1045_linux-6.12.46.patch | 7199 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7203 insertions(+)

diff --git a/0000_README b/0000_README
index a7d5ef72..32b80f93 100644
--- a/0000_README
+++ b/0000_README
@@ -223,6 +223,10 @@ Patch:  1044_linux-6.12.45.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.45
 
+Patch:  1045_linux-6.12.46.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.46
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1045_linux-6.12.46.patch b/1045_linux-6.12.46.patch
new file mode 100644
index 00000000..37f5663c
--- /dev/null
+++ b/1045_linux-6.12.46.patch
@@ -0,0 +1,7199 @@
+diff --git a/Makefile b/Makefile
+index cc59990e379679..4a4c5e04417d6a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 45
++SUBLEVEL = 46
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+index d0fc5977258fbf..16078ff60ef08b 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+@@ -555,6 +555,7 @@ &usdhc2 {
+ 	pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ 	cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_usdhc2_vmmc>;
++	vqmmc-supply = <&ldo5>;
+ 	bus-width = <4>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+index a90e28c07e3f1d..6835f28c1e3c51 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+@@ -609,6 +609,7 @@ &usdhc2 {
+ 	pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ 	cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_usdhc2_vmmc>;
++	vqmmc-supply = <&ldo5>;
+ 	bus-width = <4>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314.dts
+index d7fd9d36f8240e..f7346b3d35fe53 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mp-ras314.dts
+@@ -467,6 +467,10 @@ &pwm4 {
+ 	status = "okay";
+ };
+ 
++&reg_usdhc2_vqmmc {
++	status = "okay";
++};
++
+ &sai5 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_sai5>;
+@@ -876,8 +880,7 @@ pinctrl_usdhc2: usdhc2grp {
+ 			   <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0	0x1d2>,
+ 			   <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1	0x1d2>,
+ 			   <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2	0x1d2>,
+-			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d2>,
+-			   <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT	0xc0>;
++			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d2>;
+ 	};
+ 
+ 	pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+@@ -886,8 +889,7 @@ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ 			   <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0	0x1d4>,
+ 			   <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1	0x1d4>,
+ 			   <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2	0x1d4>,
+-			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d4>,
+-			   <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT	0xc0>;
++			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d4>;
+ 	};
+ 
+ 	pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+@@ -896,8 +898,7 @@ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ 			   <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0	0x1d4>,
+ 			   <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1	0x1d4>,
+ 			   <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2	0x1d4>,
+-			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d4>,
+-			   <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT	0xc0>;
++			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d4>;
+ 	};
+ 
+ 	pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+index ae64731266f35e..e7c16a7ee6c26f 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+@@ -603,6 +603,10 @@ &pwm3 {
+ 	status = "okay";
+ };
+ 
++&reg_usdhc2_vqmmc {
++	status = "okay";
++};
++
+ &sai3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_sai3>;
+@@ -982,8 +986,7 @@ pinctrl_usdhc2: usdhc2grp {
+ 			   <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0	0x1d2>,
+ 			   <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1	0x1d2>,
+ 			   <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2	0x1d2>,
+-			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d2>,
+-			   <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT	0xc0>;
++			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d2>;
+ 	};
+ 
+ 	pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+@@ -992,8 +995,7 @@ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ 			   <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0	0x1d4>,
+ 			   <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1	0x1d4>,
+ 			   <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2	0x1d4>,
+-			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d4>,
+-			   <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT	0xc0>;
++			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d4>;
+ 	};
+ 
+ 	pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+@@ -1002,8 +1004,7 @@ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ 			   <MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0	0x1d4>,
+ 			   <MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1	0x1d4>,
+ 			   <MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2	0x1d4>,
+-			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d4>,
+-			   <MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT	0xc0>;
++			   <MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3	0x1d4>;
+ 	};
+ 
+ 	pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
+index 3ddc5aaa7c5f0c..9eac178ab2c20e 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
+@@ -24,6 +24,20 @@ reg_vcc3v3: regulator-vcc3v3 {
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-always-on;
+ 	};
++
++	reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
++		compatible = "regulator-gpio";
++		pinctrl-names = "default";
++		pinctrl-0 = <&pinctrl_reg_usdhc2_vqmmc>;
++		regulator-name = "V_SD2";
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <3300000>;
++		gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
++		states = <1800000 0x1>,
++			 <3300000 0x0>;
++		vin-supply = <&ldo5_reg>;
++		status = "disabled";
++	};
+ };
+ 
+ &A53_0 {
+@@ -179,6 +193,10 @@ m24c64: eeprom@57 {
+ 	};
+ };
+ 
++&usdhc2 {
++	vqmmc-supply = <&reg_usdhc2_vqmmc>;
++};
++
+ &usdhc3 {
+ 	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+@@ -228,6 +246,10 @@ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ 		fsl,pins = <MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19	0x10>;
+ 	};
+ 
++	pinctrl_reg_usdhc2_vqmmc: regusdhc2vqmmcgrp {
++		fsl,pins = <MX8MP_IOMUXC_GPIO1_IO04__GPIO1_IO04		0xc0>;
++	};
++
+ 	pinctrl_usdhc3: usdhc3grp {
+ 		fsl,pins = <MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK		0x194>,
+ 			   <MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD		0x1d4>,
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+index a5a7e374bc5947..a7afc83d2f266e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+@@ -966,6 +966,7 @@ spiflash: flash@0 {
+ 		reg = <0>;
+ 		m25p,fast-read;
+ 		spi-max-frequency = <10000000>;
++		vcc-supply = <&vcc_3v0>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
+index 79550b22ba19ce..fb9b88eebeb15a 100644
+--- a/arch/arm64/include/asm/module.h
++++ b/arch/arm64/include/asm/module.h
+@@ -19,6 +19,7 @@ struct mod_arch_specific {
+ 
+ 	/* for CONFIG_DYNAMIC_FTRACE */
+ 	struct plt_entry	*ftrace_trampolines;
++	struct plt_entry	*init_ftrace_trampolines;
+ };
+ 
+ u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
+diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
+index b9ae8349e35dbb..fb944b46846dae 100644
+--- a/arch/arm64/include/asm/module.lds.h
++++ b/arch/arm64/include/asm/module.lds.h
+@@ -2,6 +2,7 @@ SECTIONS {
+ 	.plt 0 : { BYTE(0) }
+ 	.init.plt 0 : { BYTE(0) }
+ 	.text.ftrace_trampoline 0 : { BYTE(0) }
++	.init.text.ftrace_trampoline 0 : { BYTE(0) }
+ 
+ #ifdef CONFIG_KASAN_SW_TAGS
+ 	/*
+diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
+index a650f5e11fc5d8..b657f058bf4d50 100644
+--- a/arch/arm64/kernel/ftrace.c
++++ b/arch/arm64/kernel/ftrace.c
+@@ -195,10 +195,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+ 	return ftrace_modify_code(pc, 0, new, false);
+ }
+ 
+-static struct plt_entry *get_ftrace_plt(struct module *mod)
++static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
+ {
+ #ifdef CONFIG_MODULES
+-	struct plt_entry *plt = mod->arch.ftrace_trampolines;
++	struct plt_entry *plt = NULL;
++
++	if (within_module_mem_type(addr, mod, MOD_INIT_TEXT))
++		plt = mod->arch.init_ftrace_trampolines;
++	else if (within_module_mem_type(addr, mod, MOD_TEXT))
++		plt = mod->arch.ftrace_trampolines;
++	else
++		return NULL;
+ 
+ 	return &plt[FTRACE_PLT_IDX];
+ #else
+@@ -270,7 +277,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
+ 	if (WARN_ON(!mod))
+ 		return false;
+ 
+-	plt = get_ftrace_plt(mod);
++	plt = get_ftrace_plt(mod, pc);
+ 	if (!plt) {
+ 		pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
+ 		return false;
+diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
+index bde32979c06afc..7afd370da9f48f 100644
+--- a/arch/arm64/kernel/module-plts.c
++++ b/arch/arm64/kernel/module-plts.c
+@@ -283,7 +283,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ 	unsigned long core_plts = 0;
+ 	unsigned long init_plts = 0;
+ 	Elf64_Sym *syms = NULL;
+-	Elf_Shdr *pltsec, *tramp = NULL;
++	Elf_Shdr *pltsec, *tramp = NULL, *init_tramp = NULL;
+ 	int i;
+ 
+ 	/*
+@@ -298,6 +298,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ 		else if (!strcmp(secstrings + sechdrs[i].sh_name,
+ 				 ".text.ftrace_trampoline"))
+ 			tramp = sechdrs + i;
++		else if (!strcmp(secstrings + sechdrs[i].sh_name,
++				 ".init.text.ftrace_trampoline"))
++			init_tramp = sechdrs + i;
+ 		else if (sechdrs[i].sh_type == SHT_SYMTAB)
+ 			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
+ 	}
+@@ -363,5 +366,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ 		tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
+ 	}
+ 
++	if (init_tramp) {
++		init_tramp->sh_type = SHT_NOBITS;
++		init_tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
++		init_tramp->sh_addralign = __alignof__(struct plt_entry);
++		init_tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
++	}
++
+ 	return 0;
+ }
+diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
+index 36b25af5632430..65764ec1cc1343 100644
+--- a/arch/arm64/kernel/module.c
++++ b/arch/arm64/kernel/module.c
+@@ -453,6 +453,17 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
+ 	__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
+ 
+ 	mod->arch.ftrace_trampolines = plts;
++
++	s = find_section(hdr, sechdrs, ".init.text.ftrace_trampoline");
++	if (!s)
++		return -ENOEXEC;
++
++	plts = (void *)s->sh_addr;
++
++	__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
++
++	mod->arch.init_ftrace_trampolines = plts;
++
+ #endif
+ 	return 0;
+ }
+diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
+index 4740cb5b238898..c9f7ca778364ed 100644
+--- a/arch/loongarch/kernel/signal.c
++++ b/arch/loongarch/kernel/signal.c
+@@ -677,6 +677,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ 	for (i = 1; i < 32; i++)
+ 		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+ 
++#ifdef CONFIG_CPU_HAS_LBT
++	if (extctx->lbt.addr)
++		err |= protected_save_lbt_context(extctx);
++#endif
++
+ 	if (extctx->lasx.addr)
+ 		err |= protected_save_lasx_context(extctx);
+ 	else if (extctx->lsx.addr)
+@@ -684,11 +689,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ 	else if (extctx->fpu.addr)
+ 		err |= protected_save_fpu_context(extctx);
+ 
+-#ifdef CONFIG_CPU_HAS_LBT
+-	if (extctx->lbt.addr)
+-		err |= protected_save_lbt_context(extctx);
+-#endif
+-
+ 	/* Set the "end" magic */
+ 	info = (struct sctx_info *)extctx->end.addr;
+ 	err |= __put_user(0, &info->magic);
+diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
+index fdde1bcd4e2663..49af37f781bbef 100644
+--- a/arch/loongarch/vdso/Makefile
++++ b/arch/loongarch/vdso/Makefile
+@@ -36,8 +36,7 @@ endif
+ 
+ # VDSO linker flags.
+ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+-	$(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
+-	--hash-style=sysv --build-id -T
++	$(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id -T
+ 
+ #
+ # Shared build commands.
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index fa8f2da87a0a83..d160c3b830266f 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -61,7 +61,7 @@ config RISCV
+ 	select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
+ 	select ARCH_SUPPORTS_HUGETLBFS if MMU
+ 	# LLD >= 14: https://github.com/llvm/llvm-project/issues/50505
+-	select ARCH_SUPPORTS_LTO_CLANG if LLD_VERSION >= 140000
++	select ARCH_SUPPORTS_LTO_CLANG if LLD_VERSION >= 140000 && CMODEL_MEDANY
+ 	select ARCH_SUPPORTS_LTO_CLANG_THIN if LLD_VERSION >= 140000
+ 	select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
+ 	select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
+diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
+index 776354895b81e7..96a52344b2fb55 100644
+--- a/arch/riscv/include/asm/asm.h
++++ b/arch/riscv/include/asm/asm.h
+@@ -90,7 +90,7 @@
+ #endif
+ 
+ .macro asm_per_cpu dst sym tmp
+-	REG_L \tmp, TASK_TI_CPU_NUM(tp)
++	lw    \tmp, TASK_TI_CPU_NUM(tp)
+ 	slli  \tmp, \tmp, PER_CPU_OFFSET_SHIFT
+ 	la    \dst, __per_cpu_offset
+ 	add   \dst, \dst, \tmp
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 33a5a9f2a0d4e1..af483de85dfc9a 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -46,7 +46,7 @@
+ 	 * a0 = &new_vmalloc[BIT_WORD(cpu)]
+ 	 * a1 = BIT_MASK(cpu)
+ 	 */
+-	REG_L 	a2, TASK_TI_CPU(tp)
++	lw	a2, TASK_TI_CPU(tp)
+ 	/*
+ 	 * Compute the new_vmalloc element position:
+ 	 * (cpu / 64) * 8 = (cpu >> 6) << 3
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 4cc631fa703913..563425b4963c98 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -1150,7 +1150,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ 				emit_mv(rd, rs, ctx);
+ #ifdef CONFIG_SMP
+ 			/* Load current CPU number in T1 */
+-			emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
++			emit_lw(RV_REG_T1, offsetof(struct thread_info, cpu),
+ 				RV_REG_TP, ctx);
+ 			/* Load address of __per_cpu_offset array in T2 */
+ 			emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
+@@ -1557,7 +1557,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ 		 */
+ 		if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) {
+ 			/* Load current CPU number in R0 */
+-			emit_ld(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
++			emit_lw(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
+ 				RV_REG_TP, ctx);
+ 			break;
+ 		}
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index a98e53491a4e6e..746433efad50af 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -41,6 +41,9 @@ static inline bool pgtable_l5_enabled(void)
+ #define pgtable_l5_enabled() 0
+ #endif /* CONFIG_X86_5LEVEL */
+ 
++#define ARCH_PAGE_TABLE_SYNC_MASK \
++	(pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
++
+ extern unsigned int pgdir_shift;
+ extern unsigned int ptrs_per_p4d;
+ 
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index d8853afd314b24..63de8dbaa75ee8 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -223,6 +223,24 @@ static void sync_global_pgds(unsigned long start, unsigned long end)
+ 		sync_global_pgds_l4(start, end);
+ }
+ 
++/*
++ * Make kernel mappings visible in all page tables in the system.
++ * This is necessary except when the init task populates kernel mappings
++ * during the boot process. In that case, all processes originating from
++ * the init task copies the kernel mappings, so there is no issue.
++ * Otherwise, missing synchronization could lead to kernel crashes due
++ * to missing page table entries for certain kernel mappings.
++ *
++ * Synchronization is performed at the top level, which is the PGD in
++ * 5-level paging systems. But in 4-level paging systems, however,
++ * pgd_populate() is a no-op, so synchronization is done at the P4D level.
++ * sync_global_pgds() handles this difference between paging levels.
++ */
++void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
++{
++	sync_global_pgds(start, end);
++}
++
+ /*
+  * NOTE: This function is marked __ref because it calls __init function
+  * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index 83b696ba0cac31..3fe0681399f6e5 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -218,9 +218,7 @@ static ssize_t flag_store(struct device *dev, const char *page, size_t count,
+ 	else
+ 		lim.integrity.flags |= flag;
+ 
+-	blk_mq_freeze_queue(q);
+-	err = queue_limits_commit_update(q, &lim);
+-	blk_mq_unfreeze_queue(q);
++	err = queue_limits_commit_update_frozen(q, &lim);
+ 	if (err)
+ 		return err;
+ 	return count;
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 9ae3eee4b5ae5f..f24fffdb6c294c 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -444,6 +444,30 @@ int queue_limits_commit_update(struct request_queue *q,
+ }
+ EXPORT_SYMBOL_GPL(queue_limits_commit_update);
+ 
++/**
++ * queue_limits_commit_update_frozen - commit an atomic update of queue limits
++ * @q:		queue to update
++ * @lim:	limits to apply
++ *
++ * Apply the limits in @lim that were obtained from queue_limits_start_update()
++ * and updated with the new values by the caller to @q.  Freezes the queue
++ * before the update and unfreezes it after.
++ *
++ * Returns 0 if successful, else a negative error code.
++ */
++int queue_limits_commit_update_frozen(struct request_queue *q,
++		struct queue_limits *lim)
++{
++	int ret;
++
++	blk_mq_freeze_queue(q);
++	ret = queue_limits_commit_update(q, lim);
++	blk_mq_unfreeze_queue(q);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
++
+ /**
+  * queue_limits_set - apply queue limits to queue
+  * @q:		queue to update
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 5915fb98ffdcea..f1160cc2cf85d9 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1510,7 +1510,6 @@ static int disk_update_zone_resources(struct gendisk *disk,
+ 	unsigned int nr_seq_zones, nr_conv_zones;
+ 	unsigned int pool_size;
+ 	struct queue_limits lim;
+-	int ret;
+ 
+ 	disk->nr_zones = args->nr_zones;
+ 	disk->zone_capacity = args->zone_capacity;
+@@ -1561,11 +1560,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
+ 	}
+ 
+ commit:
+-	blk_mq_freeze_queue(q);
+-	ret = queue_limits_commit_update(q, &lim);
+-	blk_mq_unfreeze_queue(q);
+-
+-	return ret;
++	return queue_limits_commit_update_frozen(q, &lim);
+ }
+ 
+ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 00208c4a658073..ea2909c4745ab2 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -689,7 +689,7 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
+ static void ivpu_dev_fini(struct ivpu_device *vdev)
+ {
+ 	ivpu_jobs_abort_all(vdev);
+-	ivpu_pm_cancel_recovery(vdev);
++	ivpu_pm_disable_recovery(vdev);
+ 	ivpu_pm_disable(vdev);
+ 	ivpu_prepare_for_reset(vdev);
+ 	ivpu_shutdown(vdev);
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index 2269569bdee7bb..ad02b71c73bbfa 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -382,10 +382,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
+ 	ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
+ }
+ 
+-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
++void ivpu_pm_disable_recovery(struct ivpu_device *vdev)
+ {
+ 	drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
+-	cancel_work_sync(&vdev->pm->recovery_work);
++	disable_work_sync(&vdev->pm->recovery_work);
+ }
+ 
+ void ivpu_pm_enable(struct ivpu_device *vdev)
+diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
+index b70efe6c36e47f..ba5b9d567e7b4d 100644
+--- a/drivers/accel/ivpu/ivpu_pm.h
++++ b/drivers/accel/ivpu/ivpu_pm.h
+@@ -25,7 +25,7 @@ struct ivpu_pm_info {
+ void ivpu_pm_init(struct ivpu_device *vdev);
+ void ivpu_pm_enable(struct ivpu_device *vdev);
+ void ivpu_pm_disable(struct ivpu_device *vdev);
+-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
++void ivpu_pm_disable_recovery(struct ivpu_device *vdev);
+ 
+ int ivpu_pm_suspend_cb(struct device *dev);
+ int ivpu_pm_resume_cb(struct device *dev);
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index bf3be532e0895d..64c4dda5d6dbbd 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -937,8 +937,10 @@ static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
+ 
+ 	new_sids = krealloc_array(sids, count + new_count,
+ 				  sizeof(*new_sids), GFP_KERNEL);
+-	if (!new_sids)
++	if (!new_sids) {
++		kfree(sids);
+ 		return NULL;
++	}
+ 
+ 	for (i = count; i < total_count; i++)
+ 		new_sids[i] = id_start++;
+diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
+index 4cdff387deff6c..10a2b554b3ccf8 100644
+--- a/drivers/acpi/riscv/cppc.c
++++ b/drivers/acpi/riscv/cppc.c
+@@ -121,7 +121,7 @@ int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
+ 
+ 		*val = data.ret.value;
+ 
+-		return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
++		return data.ret.error;
+ 	}
+ 
+ 	return -EINVAL;
+@@ -150,7 +150,7 @@ int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
+ 
+ 		smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
+ 
+-		return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
++		return data.ret.error;
+ 	}
+ 
+ 	return -EINVAL;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index fd6c565f8a507c..4bedcb49a73a70 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1106,9 +1106,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
+ 		lim.features |= BLK_FEAT_WRITE_CACHE;
+ 	else
+ 		lim.features &= ~BLK_FEAT_WRITE_CACHE;
+-	blk_mq_freeze_queue(disk->queue);
+-	i = queue_limits_commit_update(disk->queue, &lim);
+-	blk_mq_unfreeze_queue(disk->queue);
++	i = queue_limits_commit_update_frozen(disk->queue, &lim);
+ 	if (i)
+ 		return i;
+ 	return count;
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 9ac22e4a070bef..59872e73c1878f 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -380,6 +380,28 @@ static const struct file_operations force_devcoredump_fops = {
+ 	.write		= force_devcd_write,
+ };
+ 
++static void vhci_debugfs_init(struct vhci_data *data)
++{
++	struct hci_dev *hdev = data->hdev;
++
++	debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
++			    &force_suspend_fops);
++
++	debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
++			    &force_wakeup_fops);
++
++	if (IS_ENABLED(CONFIG_BT_MSFTEXT))
++		debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
++				    &msft_opcode_fops);
++
++	if (IS_ENABLED(CONFIG_BT_AOSPEXT))
++		debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
++				    &aosp_capable_fops);
++
++	debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
++			    &force_devcoredump_fops);
++}
++
+ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ {
+ 	struct hci_dev *hdev;
+@@ -433,22 +455,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ 		return -EBUSY;
+ 	}
+ 
+-	debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+-			    &force_suspend_fops);
+-
+-	debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+-			    &force_wakeup_fops);
+-
+-	if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+-		debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
+-				    &msft_opcode_fops);
+-
+-	if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+-		debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
+-				    &aosp_capable_fops);
+-
+-	debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
+-			    &force_devcoredump_fops);
++	if (!IS_ERR_OR_NULL(hdev->debugfs))
++		vhci_debugfs_init(data);
+ 
+ 	hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
+ 
+@@ -650,6 +658,21 @@ static int vhci_open(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
++static void vhci_debugfs_remove(struct hci_dev *hdev)
++{
++	debugfs_lookup_and_remove("force_suspend", hdev->debugfs);
++
++	debugfs_lookup_and_remove("force_wakeup", hdev->debugfs);
++
++	if (IS_ENABLED(CONFIG_BT_MSFTEXT))
++		debugfs_lookup_and_remove("msft_opcode", hdev->debugfs);
++
++	if (IS_ENABLED(CONFIG_BT_AOSPEXT))
++		debugfs_lookup_and_remove("aosp_capable", hdev->debugfs);
++
++	debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs);
++}
++
+ static int vhci_release(struct inode *inode, struct file *file)
+ {
+ 	struct vhci_data *data = file->private_data;
+@@ -661,6 +684,8 @@ static int vhci_release(struct inode *inode, struct file *file)
+ 	hdev = data->hdev;
+ 
+ 	if (hdev) {
++		if (!IS_ERR_OR_NULL(hdev->debugfs))
++			vhci_debugfs_remove(hdev);
+ 		hci_unregister_dev(hdev);
+ 		hci_free_dev(hdev);
+ 	}
+diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
+index b69eabf12a24fa..1bf4fc461a8ccd 100644
+--- a/drivers/dma/mediatek/mtk-cqdma.c
++++ b/drivers/dma/mediatek/mtk-cqdma.c
+@@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c,
+ {
+ 	struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
+ 	struct virt_dma_desc *vd;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&cvc->pc->lock, flags);
+ 	list_for_each_entry(vd, &cvc->pc->queue, node)
+ 		if (vd->tx.cookie == cookie) {
+-			spin_unlock_irqrestore(&cvc->pc->lock, flags);
+ 			return vd;
+ 		}
+-	spin_unlock_irqrestore(&cvc->pc->lock, flags);
+ 
+ 	list_for_each_entry(vd, &cvc->vc.desc_issued, node)
+ 		if (vd->tx.cookie == cookie)
+@@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
+ 	if (ret == DMA_COMPLETE || !txstate)
+ 		return ret;
+ 
+-	spin_lock_irqsave(&cvc->vc.lock, flags);
++	spin_lock_irqsave(&cvc->pc->lock, flags);
++	spin_lock(&cvc->vc.lock);
+ 	vd = mtk_cqdma_find_active_desc(c, cookie);
+-	spin_unlock_irqrestore(&cvc->vc.lock, flags);
++	spin_unlock(&cvc->vc.lock);
++	spin_unlock_irqrestore(&cvc->pc->lock, flags);
+ 
+ 	if (vd) {
+ 		cvd = to_cqdma_vdesc(vd);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 3d42f6c3308ed3..8553ac4c0ad3f1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -433,7 +433,7 @@ static int psp_sw_init(void *handle)
+ 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ 	if (!psp->cmd) {
+ 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
+-		ret = -ENOMEM;
++		return -ENOMEM;
+ 	}
+ 
+ 	adev->psp.xgmi_context.supports_extended_data =
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 0adb106e2c4256..37d53578825b33 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2292,11 +2292,13 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
+  */
+ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
+ {
+-	timeout = drm_sched_entity_flush(&vm->immediate, timeout);
++	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
++					DMA_RESV_USAGE_BOOKKEEP,
++					true, timeout);
+ 	if (timeout <= 0)
+ 		return timeout;
+ 
+-	return drm_sched_entity_flush(&vm->delayed, timeout);
++	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
+ }
+ 
+ static void amdgpu_vm_destroy_task_info(struct kref *kref)
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+index 70c1399f738def..baafbb5c032af5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+@@ -1462,17 +1462,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev)
+ 
+ static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
+ {
+-	int i;
+-
+ 	if (!amdgpu_audio)
+ 		return;
+ 
+ 	if (!adev->mode_info.audio.enabled)
+ 		return;
+ 
+-	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
+-		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+-
+ 	adev->mode_info.audio.enabled = false;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index f154c24499c8a4..a67b6b20b677cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -1511,17 +1511,12 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
+ 
+ static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
+ {
+-	int i;
+-
+ 	if (!amdgpu_audio)
+ 		return;
+ 
+ 	if (!adev->mode_info.audio.enabled)
+ 		return;
+ 
+-	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
+-		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+-
+ 	adev->mode_info.audio.enabled = false;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+index a7fcb135827f8b..1036b7a3739032 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+@@ -1394,17 +1394,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
+ 
+ static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
+ {
+-	int i;
+-
+ 	if (!amdgpu_audio)
+ 		return;
+ 
+ 	if (!adev->mode_info.audio.enabled)
+ 		return;
+ 
+-	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
+-		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+-
+ 	adev->mode_info.audio.enabled = false;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+index 77ac3f114d2411..0b30b3ed9d4b96 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+@@ -1443,17 +1443,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev)
+ 
+ static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
+ {
+-	int i;
+-
+ 	if (!amdgpu_audio)
+ 		return;
+ 
+ 	if (!adev->mode_info.audio.enabled)
+ 		return;
+ 
+-	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
+-		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+-
+ 	adev->mode_info.audio.enabled = false;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 4a9d07c31bc5b1..0c50fe266c8a16 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
+ 						enc110->base.id, &bp_cap_info);
+ 
+ 	/* Override features with DCE-specific values */
+-	if (BP_RESULT_OK == result) {
++	if (result == BP_RESULT_OK) {
+ 		enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ 				bp_cap_info.DP_HBR2_EN;
+ 		enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ 				bp_cap_info.DP_HBR3_EN;
+ 		enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+-	} else {
++	} else if (result != BP_RESULT_NORECORD) {
+ 		DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ 				__func__,
+ 				result);
+@@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
+ 						enc110->base.id, &bp_cap_info);
+ 
+ 	/* Override features with DCE-specific values */
+-	if (BP_RESULT_OK == result) {
++	if (result == BP_RESULT_OK) {
+ 		enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ 				bp_cap_info.DP_HBR2_EN;
+ 		enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ 				bp_cap_info.DP_HBR3_EN;
+ 		enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+-	} else {
++	} else if (result != BP_RESULT_NORECORD) {
+ 		DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ 				__func__,
+ 				result);
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+index 75fb77bca83ba2..01480a04f85ef5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+@@ -520,6 +520,15 @@ void dpp1_dppclk_control(
+ 		REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
+ }
+ 
++void dpp_force_disable_cursor(struct dpp *dpp_base)
++{
++	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
++
++	/* Force disable cursor */
++	REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0);
++	dpp_base->pos.cur0_ctl.bits.cur0_enable = 0;
++}
++
+ static const struct dpp_funcs dcn10_dpp_funcs = {
+ 		.dpp_read_state = dpp_read_state,
+ 		.dpp_reset = dpp_reset,
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+index c48139bed11f51..f466182963f756 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+@@ -1525,4 +1525,6 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
+ 
+ void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+ 			     struct dpp_grph_csc_adjustment *adjust);
++void dpp_force_disable_cursor(struct dpp *dpp_base);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+index abf439e743f233..6c3cae593ad549 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+@@ -1497,6 +1497,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
+ 	.dpp_dppclk_control		= dpp1_dppclk_control,
+ 	.dpp_set_hdr_multiplier		= dpp3_set_hdr_multiplier,
+ 	.dpp_get_gamut_remap		= dpp3_cm_get_gamut_remap,
++	.dpp_force_disable_cursor 	= dpp_force_disable_cursor,
+ };
+ 
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+index efcc1a6b364c27..01137ec02f0849 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+@@ -502,3 +502,75 @@ void dcn314_disable_link_output(struct dc_link *link,
+ 
+ 	apply_symclk_on_tx_off_wa(link);
+ }
++
++/**
++ * dcn314_dpp_pg_control - DPP power gate control.
++ *
++ * @hws: dce_hwseq reference.
++ * @dpp_inst: DPP instance reference.
++ * @power_on: true if we want to enable power gate, false otherwise.
++ *
++ * Enable or disable power gate in the specific DPP instance.
++ * If power gating is disabled, will force disable cursor in the DPP instance.
++ */
++void dcn314_dpp_pg_control(
++		struct dce_hwseq *hws,
++		unsigned int dpp_inst,
++		bool power_on)
++{
++	uint32_t power_gate = power_on ? 0 : 1;
++	uint32_t pwr_status = power_on ? 0 : 2;
++
++
++	if (hws->ctx->dc->debug.disable_dpp_power_gate) {
++		/* Workaround for DCN314 with disabled power gating */
++		if (!power_on) {
++
++			/* Force disable cursor if power gating is disabled */
++			struct dpp *dpp = hws->ctx->dc->res_pool->dpps[dpp_inst];
++			if (dpp && dpp->funcs->dpp_force_disable_cursor)
++				dpp->funcs->dpp_force_disable_cursor(dpp);
++		}
++		return;
++	}
++	if (REG(DOMAIN1_PG_CONFIG) == 0)
++		return;
++
++	switch (dpp_inst) {
++	case 0: /* DPP0 */
++		REG_UPDATE(DOMAIN1_PG_CONFIG,
++				DOMAIN1_POWER_GATE, power_gate);
++
++		REG_WAIT(DOMAIN1_PG_STATUS,
++				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
++				1, 1000);
++		break;
++	case 1: /* DPP1 */
++		REG_UPDATE(DOMAIN3_PG_CONFIG,
++				DOMAIN3_POWER_GATE, power_gate);
++
++		REG_WAIT(DOMAIN3_PG_STATUS,
++				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
++				1, 1000);
++		break;
++	case 2: /* DPP2 */
++		REG_UPDATE(DOMAIN5_PG_CONFIG,
++				DOMAIN5_POWER_GATE, power_gate);
++
++		REG_WAIT(DOMAIN5_PG_STATUS,
++				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
++				1, 1000);
++		break;
++	case 3: /* DPP3 */
++		REG_UPDATE(DOMAIN7_PG_CONFIG,
++				DOMAIN7_POWER_GATE, power_gate);
++
++		REG_WAIT(DOMAIN7_PG_STATUS,
++				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
++				1, 1000);
++		break;
++	default:
++		BREAK_TO_DEBUGGER();
++		break;
++	}
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+index 2305ad282f218b..6c072d0274ea37 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+@@ -47,4 +47,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
+ 
+ void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
+ 
++void dcn314_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
++
+ #endif /* __DC_HWSS_DCN314_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+index 68e6de6b5758d5..5251dde383bbd3 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+@@ -141,6 +141,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
+ 	.enable_power_gating_plane = dcn314_enable_power_gating_plane,
+ 	.dpp_root_clock_control = dcn314_dpp_root_clock_control,
+ 	.hubp_pg_control = dcn31_hubp_pg_control,
++	.dpp_pg_control = dcn314_dpp_pg_control,
+ 	.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ 	.update_odm = dcn314_update_odm,
+ 	.dsc_pg_control = dcn314_dsc_pg_control,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+index 0c5675d1c59368..1b7c085dc2cc1e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+@@ -349,6 +349,9 @@ struct dpp_funcs {
+ 		struct dpp *dpp_base,
+ 		enum dc_color_space color_space,
+ 		struct dc_csc_transform cursor_csc_color_matrix);
++
++	void (*dpp_force_disable_cursor)(struct dpp *dpp_base);
++
+ };
+ 
+ 
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 4d17d1e1c38b4b..9859ec688e567a 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -375,6 +375,17 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
+ 
+ 	gpiod_set_value_cansleep(pdata->enable_gpio, 1);
+ 
++	/*
++	 * After EN is deasserted and an external clock is detected, the bridge
++	 * will sample GPIO3:1 to determine its frequency. The driver will
++	 * overwrite this setting in ti_sn_bridge_set_refclk_freq(). But this is
++	 * racy. Thus we have to wait a couple of us. According to the datasheet
++	 * the GPIO lines has to be stable at least 5 us (td5) but it seems that
++	 * is not enough and the refclk frequency value is still lost or
++	 * overwritten by the bridge itself. Waiting for 20us seems to work.
++	 */
++	usleep_range(20, 30);
++
+ 	/*
+ 	 * If we have a reference clock we can enable communication w/ the
+ 	 * panel (including the aux channel) w/out any need for an input clock
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+index 22443fe4a39ff6..d0092b293090f8 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+@@ -352,6 +352,8 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
+ 	mutex_destroy(&fifo->userd.mutex);
+ 
+ 	nvkm_event_fini(&fifo->nonstall.event);
++	if (fifo->func->nonstall_dtor)
++		fifo->func->nonstall_dtor(fifo);
+ 	mutex_destroy(&fifo->mutex);
+ 
+ 	if (fifo->func->dtor)
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+index e74493a4569edb..6848a56f20c076 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+@@ -517,19 +517,11 @@ ga100_fifo_nonstall_intr(struct nvkm_inth *inth)
+ static void
+ ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
+ {
+-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+-	struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
+-
+-	nvkm_inth_block(&runl->nonstall.inth);
+ }
+ 
+ static void
+ ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
+ {
+-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+-	struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
+-
+-	nvkm_inth_allow(&runl->nonstall.inth);
+ }
+ 
+ const struct nvkm_event_func
+@@ -564,12 +556,26 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
+ 		if (ret)
+ 			return ret;
+ 
++		nvkm_inth_allow(&runl->nonstall.inth);
++
+ 		nr = max(nr, runl->id + 1);
+ 	}
+ 
+ 	return nr;
+ }
+ 
++void
++ga100_fifo_nonstall_dtor(struct nvkm_fifo *fifo)
++{
++	struct nvkm_runl *runl;
++
++	nvkm_runl_foreach(runl, fifo) {
++		if (runl->nonstall.vector < 0)
++			continue;
++		nvkm_inth_block(&runl->nonstall.inth);
++	}
++}
++
+ int
+ ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
+ {
+@@ -599,6 +605,7 @@ ga100_fifo = {
+ 	.runl_ctor = ga100_fifo_runl_ctor,
+ 	.mmu_fault = &tu102_fifo_mmu_fault,
+ 	.nonstall_ctor = ga100_fifo_nonstall_ctor,
++	.nonstall_dtor = ga100_fifo_nonstall_dtor,
+ 	.nonstall = &ga100_fifo_nonstall,
+ 	.runl = &ga100_runl,
+ 	.runq = &ga100_runq,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
+index 755235f55b3aca..18a0b1f4eab76a 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
+@@ -30,6 +30,7 @@ ga102_fifo = {
+ 	.runl_ctor = ga100_fifo_runl_ctor,
+ 	.mmu_fault = &tu102_fifo_mmu_fault,
+ 	.nonstall_ctor = ga100_fifo_nonstall_ctor,
++	.nonstall_dtor = ga100_fifo_nonstall_dtor,
+ 	.nonstall = &ga100_fifo_nonstall,
+ 	.runl = &ga100_runl,
+ 	.runq = &ga100_runq,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+index a0f3277605a5cf..c5ecbcae29674a 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+@@ -40,6 +40,7 @@ struct nvkm_fifo_func {
+ 	void (*start)(struct nvkm_fifo *, unsigned long *);
+ 
+ 	int (*nonstall_ctor)(struct nvkm_fifo *);
++	void (*nonstall_dtor)(struct nvkm_fifo *);
+ 	const struct nvkm_event_func *nonstall;
+ 
+ 	const struct nvkm_runl_func *runl;
+@@ -198,6 +199,7 @@ extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault;
+ 
+ int ga100_fifo_runl_ctor(struct nvkm_fifo *);
+ int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
++void ga100_fifo_nonstall_dtor(struct nvkm_fifo *);
+ extern const struct nvkm_event_func ga100_fifo_nonstall;
+ extern const struct nvkm_runl_func ga100_runl;
+ extern const struct nvkm_runq_func ga100_runq;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+index 3454c7d2950295..f978e49a4d546d 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+@@ -660,6 +660,7 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
+ 	rm->chan.func = &r535_chan;
+ 	rm->nonstall = &ga100_fifo_nonstall;
+ 	rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
++	rm->nonstall_dtor = ga100_fifo_nonstall_dtor;
+ 
+ 	return nvkm_fifo_new_(rm, device, type, inst, pfifo);
+ }
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index c2783d04c6e050..9910c6d3fef300 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -45,6 +45,38 @@ static int hid_ignore_special_drivers = 0;
+ module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
+ MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
+ 
++/*
++ * Convert a signed n-bit integer to signed 32-bit integer.
++ */
++
++static s32 snto32(__u32 value, unsigned int n)
++{
++	if (!value || !n)
++		return 0;
++
++	if (n > 32)
++		n = 32;
++
++	return sign_extend32(value, n - 1);
++}
++
++/*
++ * Convert a signed 32-bit integer to a signed n-bit integer.
++ */
++
++static u32 s32ton(__s32 value, unsigned int n)
++{
++	s32 a;
++
++	if (!value || !n)
++		return 0;
++
++	a = value >> (n - 1);
++	if (a && a != -1)
++		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
++	return value & ((1 << n) - 1);
++}
++
+ /*
+  * Register a new report for a device.
+  */
+@@ -425,7 +457,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
+ 		 * both this and the standard encoding. */
+ 		raw_value = item_sdata(item);
+ 		if (!(raw_value & 0xfffffff0))
+-			parser->global.unit_exponent = hid_snto32(raw_value, 4);
++			parser->global.unit_exponent = snto32(raw_value, 4);
+ 		else
+ 			parser->global.unit_exponent = raw_value;
+ 		return 0;
+@@ -1317,46 +1349,6 @@ int hid_open_report(struct hid_device *device)
+ }
+ EXPORT_SYMBOL_GPL(hid_open_report);
+ 
+-/*
+- * Convert a signed n-bit integer to signed 32-bit integer. Common
+- * cases are done through the compiler, the screwed things has to be
+- * done by hand.
+- */
+-
+-static s32 snto32(__u32 value, unsigned n)
+-{
+-	if (!value || !n)
+-		return 0;
+-
+-	if (n > 32)
+-		n = 32;
+-
+-	switch (n) {
+-	case 8:  return ((__s8)value);
+-	case 16: return ((__s16)value);
+-	case 32: return ((__s32)value);
+-	}
+-	return value & (1 << (n - 1)) ? value | (~0U << n) : value;
+-}
+-
+-s32 hid_snto32(__u32 value, unsigned n)
+-{
+-	return snto32(value, n);
+-}
+-EXPORT_SYMBOL_GPL(hid_snto32);
+-
+-/*
+- * Convert a signed 32-bit integer to a signed n-bit integer.
+- */
+-
+-static u32 s32ton(__s32 value, unsigned n)
+-{
+-	s32 a = value >> (n - 1);
+-	if (a && a != -1)
+-		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
+-	return value & ((1 << n) - 1);
+-}
+-
+ /*
+  * Extract/implement a data field from/to a little endian report (bit array).
+  *
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 234ddd4422d902..59f630962338d3 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -3296,13 +3296,13 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
+ 					 120);
+ 		}
+ 
+-		v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12);
++		v = sign_extend32(hid_field_extract(hdev, data + 3, 0, 12), 11);
+ 		input_report_rel(hidpp->input, REL_X, v);
+ 
+-		v = hid_snto32(hid_field_extract(hdev, data+3, 12, 12), 12);
++		v = sign_extend32(hid_field_extract(hdev, data + 3, 12, 12), 11);
+ 		input_report_rel(hidpp->input, REL_Y, v);
+ 
+-		v = hid_snto32(data[6], 8);
++		v = sign_extend32(data[6], 7);
+ 		if (v != 0)
+ 			hidpp_scroll_counter_handle_scroll(hidpp->input,
+ 					&hidpp->vertical_wheel_counter, v);
+diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
+index a5f89aab3fb4d2..c25a54d5b39ad5 100644
+--- a/drivers/hwmon/mlxreg-fan.c
++++ b/drivers/hwmon/mlxreg-fan.c
+@@ -561,15 +561,14 @@ static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
+ 		if (!pwm->connected)
+ 			continue;
+ 		pwm->fan = fan;
++		/* Set minimal PWM speed. */
++		pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY);
+ 		pwm->cdev = devm_thermal_of_cooling_device_register(dev, NULL, mlxreg_fan_name[i],
+ 								    pwm, &mlxreg_fan_cooling_ops);
+ 		if (IS_ERR(pwm->cdev)) {
+ 			dev_err(dev, "Failed to register cooling device\n");
+ 			return PTR_ERR(pwm->cdev);
+ 		}
+-
+-		/* Set minimal PWM speed. */
+-		pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c
+index 0b3f29195330ac..0cd216e28f0090 100644
+--- a/drivers/isdn/mISDN/dsp_hwec.c
++++ b/drivers/isdn/mISDN/dsp_hwec.c
+@@ -51,14 +51,14 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
+ 		goto _do;
+ 
+ 	{
+-		char *dup, *tok, *name, *val;
++		char *dup, *next, *tok, *name, *val;
+ 		int tmp;
+ 
+-		dup = kstrdup(arg, GFP_ATOMIC);
++		dup = next = kstrdup(arg, GFP_ATOMIC);
+ 		if (!dup)
+ 			return;
+ 
+-		while ((tok = strsep(&dup, ","))) {
++		while ((tok = strsep(&next, ","))) {
+ 			if (!strlen(tok))
+ 				continue;
+ 			name = strsep(&tok, "=");
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 0da1d0723f882e..e7e1833ff04b24 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -426,8 +426,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
+ 	struct block_device *bdev;
+ 	struct mddev *mddev = bitmap->mddev;
+ 	struct bitmap_storage *store = &bitmap->storage;
+-	unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) <<
+-		PAGE_SHIFT;
++	unsigned long num_pages = bitmap->storage.file_pages;
++	unsigned int bitmap_limit = (num_pages - pg_index % num_pages) << PAGE_SHIFT;
+ 	loff_t sboff, offset = mddev->bitmap_info.offset;
+ 	sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
+ 	unsigned int size = PAGE_SIZE;
+@@ -436,7 +436,7 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
+ 
+ 	bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
+ 	/* we compare length (page numbers), not page offset. */
+-	if ((pg_index - store->sb_index) == store->file_pages - 1) {
++	if ((pg_index - store->sb_index) == num_pages - 1) {
+ 		unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
+ 
+ 		if (last_page_size == 0)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 4b32917236703e..d2630764429247 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8996,6 +8996,11 @@ void md_do_sync(struct md_thread *thread)
+ 	}
+ 
+ 	action = md_sync_action(mddev);
++	if (action == ACTION_FROZEN || action == ACTION_IDLE) {
++		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
++		goto skip;
++	}
++
+ 	desc = md_sync_action_name(action);
+ 	mddev->last_sync_action = action;
+ 
+diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
+index 4378d3250bd757..f3750ceaa58295 100644
+--- a/drivers/md/raid1-10.c
++++ b/drivers/md/raid1-10.c
+@@ -293,3 +293,13 @@ static inline bool raid1_should_read_first(struct mddev *mddev,
+ 
+ 	return false;
+ }
++
++/*
++ * bio with REQ_RAHEAD or REQ_NOWAIT can fail at anytime, before such IO is
++ * submitted to the underlying disks, hence don't record badblocks or retry
++ * in this case.
++ */
++static inline bool raid1_should_handle_error(struct bio *bio)
++{
++	return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT));
++}
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index fe1599db69c848..faccf7344ef933 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -371,14 +371,16 @@ static void raid1_end_read_request(struct bio *bio)
+ 	 */
+ 	update_head_pos(r1_bio->read_disk, r1_bio);
+ 
+-	if (uptodate)
++	if (uptodate) {
+ 		set_bit(R1BIO_Uptodate, &r1_bio->state);
+-	else if (test_bit(FailFast, &rdev->flags) &&
+-		 test_bit(R1BIO_FailFast, &r1_bio->state))
++	} else if (test_bit(FailFast, &rdev->flags) &&
++		 test_bit(R1BIO_FailFast, &r1_bio->state)) {
+ 		/* This was a fail-fast read so we definitely
+ 		 * want to retry */
+ 		;
+-	else {
++	} else if (!raid1_should_handle_error(bio)) {
++		uptodate = 1;
++	} else {
+ 		/* If all other devices have failed, we want to return
+ 		 * the error upwards rather than fail the last device.
+ 		 * Here we redefine "uptodate" to mean "Don't want to retry"
+@@ -449,16 +451,15 @@ static void raid1_end_write_request(struct bio *bio)
+ 	struct bio *to_put = NULL;
+ 	int mirror = find_bio_disk(r1_bio, bio);
+ 	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
+-	bool discard_error;
+ 	sector_t lo = r1_bio->sector;
+ 	sector_t hi = r1_bio->sector + r1_bio->sectors;
+-
+-	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
++	bool ignore_error = !raid1_should_handle_error(bio) ||
++		(bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
+ 
+ 	/*
+ 	 * 'one mirror IO has finished' event handler:
+ 	 */
+-	if (bio->bi_status && !discard_error) {
++	if (bio->bi_status && !ignore_error) {
+ 		set_bit(WriteErrorSeen,	&rdev->flags);
+ 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ 			set_bit(MD_RECOVERY_NEEDED, &
+@@ -509,7 +510,7 @@ static void raid1_end_write_request(struct bio *bio)
+ 
+ 		/* Maybe we can clear some bad blocks. */
+ 		if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+-		    !discard_error) {
++		    !ignore_error) {
+ 			r1_bio->bios[mirror] = IO_MADE_GOOD;
+ 			set_bit(R1BIO_MadeGood, &r1_bio->state);
+ 		}
+@@ -1223,7 +1224,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
+ 	int i = 0;
+ 	struct bio *behind_bio = NULL;
+ 
+-	behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
++	behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
+ 				      &r1_bio->mddev->bio_set);
+ 
+ 	/* discard op, we don't support writezero/writesame yet */
+@@ -1315,8 +1316,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ 	struct r1conf *conf = mddev->private;
+ 	struct raid1_info *mirror;
+ 	struct bio *read_bio;
+-	const enum req_op op = bio_op(bio);
+-	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+ 	int max_sectors;
+ 	int rdisk;
+ 	bool r1bio_existed = !!r1_bio;
+@@ -1393,13 +1392,12 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ 	}
+ 	read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
+ 				   &mddev->bio_set);
+-
++	read_bio->bi_opf &= ~REQ_NOWAIT;
+ 	r1_bio->bios[rdisk] = read_bio;
+ 
+ 	read_bio->bi_iter.bi_sector = r1_bio->sector +
+ 		mirror->rdev->data_offset;
+ 	read_bio->bi_end_io = raid1_end_read_request;
+-	read_bio->bi_opf = op | do_sync;
+ 	if (test_bit(FailFast, &mirror->rdev->flags) &&
+ 	    test_bit(R1BIO_FailFast, &r1_bio->state))
+ 	        read_bio->bi_opf |= MD_FAILFAST;
+@@ -1615,11 +1613,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 				wait_for_serialization(rdev, r1_bio);
+ 		}
+ 
++		mbio->bi_opf &= ~REQ_NOWAIT;
+ 		r1_bio->bios[i] = mbio;
+ 
+ 		mbio->bi_iter.bi_sector	= (r1_bio->sector + rdev->data_offset);
+ 		mbio->bi_end_io	= raid1_end_write_request;
+-		mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
+ 		if (test_bit(FailFast, &rdev->flags) &&
+ 		    !test_bit(WriteMostly, &rdev->flags) &&
+ 		    conf->raid_disks - mddev->degraded > 1)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 7515a98001ca7f..6579bbb6a39a5d 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -398,6 +398,8 @@ static void raid10_end_read_request(struct bio *bio)
+ 		 * wait for the 'master' bio.
+ 		 */
+ 		set_bit(R10BIO_Uptodate, &r10_bio->state);
++	} else if (!raid1_should_handle_error(bio)) {
++		uptodate = 1;
+ 	} else {
+ 		/* If all other devices that store this block have
+ 		 * failed, we want to return the error upwards rather
+@@ -455,9 +457,8 @@ static void raid10_end_write_request(struct bio *bio)
+ 	int slot, repl;
+ 	struct md_rdev *rdev = NULL;
+ 	struct bio *to_put = NULL;
+-	bool discard_error;
+-
+-	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
++	bool ignore_error = !raid1_should_handle_error(bio) ||
++		(bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
+ 
+ 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ 
+@@ -471,7 +472,7 @@ static void raid10_end_write_request(struct bio *bio)
+ 	/*
+ 	 * this branch is our 'one mirror IO has finished' event handler:
+ 	 */
+-	if (bio->bi_status && !discard_error) {
++	if (bio->bi_status && !ignore_error) {
+ 		if (repl)
+ 			/* Never record new bad blocks to replacement,
+ 			 * just fail it.
+@@ -526,7 +527,7 @@ static void raid10_end_write_request(struct bio *bio)
+ 		/* Maybe we can clear some bad blocks. */
+ 		if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
+ 				      r10_bio->sectors) &&
+-		    !discard_error) {
++		    !ignore_error) {
+ 			bio_put(bio);
+ 			if (repl)
+ 				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
+@@ -1146,8 +1147,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ {
+ 	struct r10conf *conf = mddev->private;
+ 	struct bio *read_bio;
+-	const enum req_op op = bio_op(bio);
+-	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+ 	int max_sectors;
+ 	struct md_rdev *rdev;
+ 	char b[BDEVNAME_SIZE];
+@@ -1219,6 +1218,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ 		r10_bio->master_bio = bio;
+ 	}
+ 	read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
++	read_bio->bi_opf &= ~REQ_NOWAIT;
+ 
+ 	r10_bio->devs[slot].bio = read_bio;
+ 	r10_bio->devs[slot].rdev = rdev;
+@@ -1226,7 +1226,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ 	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
+ 		choose_data_offset(r10_bio, rdev);
+ 	read_bio->bi_end_io = raid10_end_read_request;
+-	read_bio->bi_opf = op | do_sync;
+ 	if (test_bit(FailFast, &rdev->flags) &&
+ 	    test_bit(R10BIO_FailFast, &r10_bio->state))
+ 	        read_bio->bi_opf |= MD_FAILFAST;
+@@ -1240,9 +1239,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+ 				  struct bio *bio, bool replacement,
+ 				  int n_copy)
+ {
+-	const enum req_op op = bio_op(bio);
+-	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+-	const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
+ 	unsigned long flags;
+ 	struct r10conf *conf = mddev->private;
+ 	struct md_rdev *rdev;
+@@ -1253,6 +1249,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+ 			     conf->mirrors[devnum].rdev;
+ 
+ 	mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
++	mbio->bi_opf &= ~REQ_NOWAIT;
+ 	if (replacement)
+ 		r10_bio->devs[n_copy].repl_bio = mbio;
+ 	else
+@@ -1261,7 +1258,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+ 	mbio->bi_iter.bi_sector	= (r10_bio->devs[n_copy].addr +
+ 				   choose_data_offset(r10_bio, rdev));
+ 	mbio->bi_end_io	= raid10_end_write_request;
+-	mbio->bi_opf = op | do_sync | do_fua;
+ 	if (!replacement && test_bit(FailFast,
+ 				     &conf->mirrors[devnum].rdev->flags)
+ 			 && enough(conf, devnum))
+diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
+index 5edd024347bd5d..30daa2db80b193 100644
+--- a/drivers/mmc/host/sdhci-of-arasan.c
++++ b/drivers/mmc/host/sdhci-of-arasan.c
+@@ -76,6 +76,8 @@
+ #define FREQSEL_225M_200M		0x7
+ #define PHY_DLL_TIMEOUT_MS		100
+ 
++#define SDHCI_HW_RST_EN		BIT(4)
++
+ /* Default settings for ZynqMP Clock Phases */
+ #define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63,  0,   0, 183, 54,  0, 0}
+ #define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}
+@@ -97,6 +99,9 @@
+ #define HIWORD_UPDATE(val, mask, shift) \
+ 		((val) << (shift) | (mask) << ((shift) + 16))
+ 
++#define CD_STABLE_TIMEOUT_US		1000000
++#define CD_STABLE_MAX_SLEEP_US		10
++
+ /**
+  * struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map
+  *
+@@ -204,12 +209,15 @@ struct sdhci_arasan_data {
+  * 19MHz instead
+  */
+ #define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
++/* Enable CD stable check before power-up */
++#define SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE	BIT(3)
+ };
+ 
+ struct sdhci_arasan_of_data {
+ 	const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
+ 	const struct sdhci_pltfm_data *pdata;
+ 	const struct sdhci_arasan_clk_ops *clk_ops;
++	u32 quirks;
+ };
+ 
+ static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
+@@ -475,6 +483,21 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
+ 	}
+ }
+ 
++static void sdhci_arasan_hw_reset(struct sdhci_host *host)
++{
++	u8 reg;
++
++	reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
++	reg |= SDHCI_HW_RST_EN;
++	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
++	/* As per eMMC spec, minimum 1us is required but give it 2us for good measure */
++	usleep_range(2, 5);
++	reg &= ~SDHCI_HW_RST_EN;
++	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
++	/* As per eMMC spec, minimum 200us is required but give it 300us for good measure */
++	usleep_range(300, 500);
++}
++
+ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
+ 				       struct mmc_ios *ios)
+ {
+@@ -497,6 +520,24 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
+ 	return -EINVAL;
+ }
+ 
++static void sdhci_arasan_set_power_and_bus_voltage(struct sdhci_host *host, unsigned char mode,
++						   unsigned short vdd)
++{
++	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++	struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
++	u32 reg;
++
++	/*
++	 * Ensure that the card detect logic has stabilized before powering up, this is
++	 * necessary after a host controller reset.
++	 */
++	if (mode == MMC_POWER_UP && sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE)
++		read_poll_timeout(sdhci_readl, reg, reg & SDHCI_CD_STABLE, CD_STABLE_MAX_SLEEP_US,
++				  CD_STABLE_TIMEOUT_US, false, host, SDHCI_PRESENT_STATE);
++
++	sdhci_set_power_and_bus_voltage(host, mode, vdd);
++}
++
+ static const struct sdhci_ops sdhci_arasan_ops = {
+ 	.set_clock = sdhci_arasan_set_clock,
+ 	.get_max_clock = sdhci_pltfm_clk_get_max_clock,
+@@ -504,7 +545,8 @@ static const struct sdhci_ops sdhci_arasan_ops = {
+ 	.set_bus_width = sdhci_set_bus_width,
+ 	.reset = sdhci_arasan_reset,
+ 	.set_uhs_signaling = sdhci_set_uhs_signaling,
+-	.set_power = sdhci_set_power_and_bus_voltage,
++	.set_power = sdhci_arasan_set_power_and_bus_voltage,
++	.hw_reset = sdhci_arasan_hw_reset,
+ };
+ 
+ static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
+@@ -552,7 +594,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = {
+ 	.set_bus_width = sdhci_set_bus_width,
+ 	.reset = sdhci_arasan_reset,
+ 	.set_uhs_signaling = sdhci_set_uhs_signaling,
+-	.set_power = sdhci_set_power_and_bus_voltage,
++	.set_power = sdhci_arasan_set_power_and_bus_voltage,
+ 	.irq = sdhci_arasan_cqhci_irq,
+ };
+ 
+@@ -1429,6 +1471,7 @@ static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = {
+ static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
+ 	.pdata = &sdhci_arasan_zynqmp_pdata,
+ 	.clk_ops = &zynqmp_clk_ops,
++	.quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
+ };
+ 
+ static const struct sdhci_arasan_clk_ops versal_clk_ops = {
+@@ -1439,6 +1482,7 @@ static const struct sdhci_arasan_clk_ops versal_clk_ops = {
+ static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
+ 	.pdata = &sdhci_arasan_zynqmp_pdata,
+ 	.clk_ops = &versal_clk_ops,
++	.quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
+ };
+ 
+ static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
+@@ -1449,6 +1493,7 @@ static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
+ static struct sdhci_arasan_of_data sdhci_arasan_versal_net_data = {
+ 	.pdata = &sdhci_arasan_versal_net_pdata,
+ 	.clk_ops = &versal_net_clk_ops,
++	.quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
+ };
+ 
+ static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
+@@ -1927,6 +1972,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
+ 	if (of_device_is_compatible(np, "rockchip,rk3399-sdhci-5.1"))
+ 		sdhci_arasan_update_clockmultiplier(host, 0x0);
+ 
++	sdhci_arasan->quirks |= data->quirks;
++
+ 	if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
+ 	    of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
+ 	    of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 844cf2b8f72786..c903c6fcc6663b 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -2378,6 +2378,9 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
+ {
+ 	int ret;
+ 
++	if (!b53_support_eee(ds, port))
++		return 0;
++
+ 	ret = phy_init_eee(phy, false);
+ 	if (ret)
+ 		return 0;
+@@ -2388,13 +2391,16 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
+ }
+ EXPORT_SYMBOL(b53_eee_init);
+ 
+-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
++bool b53_support_eee(struct dsa_switch *ds, int port)
+ {
+ 	struct b53_device *dev = ds->priv;
+ 
+-	if (is5325(dev) || is5365(dev))
+-		return -EOPNOTSUPP;
++	return !is5325(dev) && !is5365(dev) && !is63xx(dev);
++}
++EXPORT_SYMBOL(b53_support_eee);
+ 
++int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
++{
+ 	return 0;
+ }
+ EXPORT_SYMBOL(b53_get_mac_eee);
+@@ -2404,9 +2410,6 @@ int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+ 	struct b53_device *dev = ds->priv;
+ 	struct ethtool_keee *p = &dev->ports[port].eee;
+ 
+-	if (is5325(dev) || is5365(dev))
+-		return -EOPNOTSUPP;
+-
+ 	p->eee_enabled = e->eee_enabled;
+ 	b53_eee_enable_set(ds, port, e->eee_enabled);
+ 
+@@ -2463,6 +2466,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
+ 	.port_setup		= b53_setup_port,
+ 	.port_enable		= b53_enable_port,
+ 	.port_disable		= b53_disable_port,
++	.support_eee		= b53_support_eee,
+ 	.get_mac_eee		= b53_get_mac_eee,
+ 	.set_mac_eee		= b53_set_mac_eee,
+ 	.port_bridge_join	= b53_br_join,
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index 4f8c97098d2a72..e908397e8b9a01 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -387,6 +387,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+ void b53_disable_port(struct dsa_switch *ds, int port);
+ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
+ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
++bool b53_support_eee(struct dsa_switch *ds, int port);
+ int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+ int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+ 
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index c4771a07878ea6..f1372830d5fa28 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -1233,6 +1233,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
+ 	.port_setup		= b53_setup_port,
+ 	.port_enable		= bcm_sf2_port_setup,
+ 	.port_disable		= bcm_sf2_port_disable,
++	.support_eee		= b53_support_eee,
+ 	.get_mac_eee		= b53_get_mac_eee,
+ 	.set_mac_eee		= b53_set_mac_eee,
+ 	.port_bridge_join	= b53_br_join,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 08886c3a28c61c..8a6f3e230fce6b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4207,7 +4207,7 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
+ 	for (i = 0; i < bp->rx_agg_ring_size; i++) {
+ 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
+ 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
+-				    ring_nr, i, bp->rx_ring_size);
++				    ring_nr, i, bp->rx_agg_ring_size);
+ 			break;
+ 		}
+ 		prod = NEXT_RX_AGG(prod);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 6c2d69ef1a8dbd..f7e8c08d844159 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1234,11 +1234,12 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
+ {
+ 	struct macb *bp = queue->bp;
+ 	u16 queue_index = queue - bp->queues;
++	unsigned long flags;
+ 	unsigned int tail;
+ 	unsigned int head;
+ 	int packets = 0;
+ 
+-	spin_lock(&queue->tx_ptr_lock);
++	spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+ 	head = queue->tx_head;
+ 	for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
+ 		struct macb_tx_skb	*tx_skb;
+@@ -1297,7 +1298,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
+ 	    CIRC_CNT(queue->tx_head, queue->tx_tail,
+ 		     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
+ 		netif_wake_subqueue(bp->dev, queue_index);
+-	spin_unlock(&queue->tx_ptr_lock);
++	spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+ 
+ 	return packets;
+ }
+@@ -1713,8 +1714,9 @@ static void macb_tx_restart(struct macb_queue *queue)
+ {
+ 	struct macb *bp = queue->bp;
+ 	unsigned int head_idx, tbqp;
++	unsigned long flags;
+ 
+-	spin_lock(&queue->tx_ptr_lock);
++	spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+ 
+ 	if (queue->tx_head == queue->tx_tail)
+ 		goto out_tx_ptr_unlock;
+@@ -1726,19 +1728,20 @@ static void macb_tx_restart(struct macb_queue *queue)
+ 	if (tbqp == head_idx)
+ 		goto out_tx_ptr_unlock;
+ 
+-	spin_lock_irq(&bp->lock);
++	spin_lock(&bp->lock);
+ 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+-	spin_unlock_irq(&bp->lock);
++	spin_unlock(&bp->lock);
+ 
+ out_tx_ptr_unlock:
+-	spin_unlock(&queue->tx_ptr_lock);
++	spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+ }
+ 
+ static bool macb_tx_complete_pending(struct macb_queue *queue)
+ {
+ 	bool retval = false;
++	unsigned long flags;
+ 
+-	spin_lock(&queue->tx_ptr_lock);
++	spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+ 	if (queue->tx_head != queue->tx_tail) {
+ 		/* Make hw descriptor updates visible to CPU */
+ 		rmb();
+@@ -1746,7 +1749,7 @@ static bool macb_tx_complete_pending(struct macb_queue *queue)
+ 		if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
+ 			retval = true;
+ 	}
+-	spin_unlock(&queue->tx_ptr_lock);
++	spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+ 	return retval;
+ }
+ 
+@@ -2314,6 +2317,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	struct macb_queue *queue = &bp->queues[queue_index];
+ 	unsigned int desc_cnt, nr_frags, frag_size, f;
+ 	unsigned int hdrlen;
++	unsigned long flags;
+ 	bool is_lso;
+ 	netdev_tx_t ret = NETDEV_TX_OK;
+ 
+@@ -2374,7 +2378,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
+ 	}
+ 
+-	spin_lock_bh(&queue->tx_ptr_lock);
++	spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+ 
+ 	/* This is a hard error, log it. */
+ 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
+@@ -2396,15 +2400,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	wmb();
+ 	skb_tx_timestamp(skb);
+ 
+-	spin_lock_irq(&bp->lock);
++	spin_lock(&bp->lock);
+ 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+-	spin_unlock_irq(&bp->lock);
++	spin_unlock(&bp->lock);
+ 
+ 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
+ 		netif_stop_subqueue(dev, queue_index);
+ 
+ unlock:
+-	spin_unlock_bh(&queue->tx_ptr_lock);
++	spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index aa80c370223237..a360d3dffccd4a 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -1493,13 +1493,17 @@ static int bgx_init_of_phy(struct bgx *bgx)
+ 		 * this cortina phy, for which there is no driver
+ 		 * support, ignore it.
+ 		 */
+-		if (phy_np &&
+-		    !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
+-			/* Wait until the phy drivers are available */
+-			pd = of_phy_find_device(phy_np);
+-			if (!pd)
+-				goto defer;
+-			bgx->lmac[lmac].phydev = pd;
++		if (phy_np) {
++			if (!of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
++				/* Wait until the phy drivers are available */
++				pd = of_phy_find_device(phy_np);
++				if (!pd) {
++					of_node_put(phy_np);
++					goto defer;
++				}
++				bgx->lmac[lmac].phydev = pd;
++			}
++			of_node_put(phy_np);
+ 		}
+ 
+ 		lmac++;
+@@ -1515,11 +1519,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
+ 	 * for phy devices we may have already found.
+ 	 */
+ 	while (lmac) {
++		lmac--;
+ 		if (bgx->lmac[lmac].phydev) {
+ 			put_device(&bgx->lmac[lmac].phydev->mdio.dev);
+ 			bgx->lmac[lmac].phydev = NULL;
+ 		}
+-		lmac--;
+ 	}
+ 	of_node_put(node);
+ 	return -EPROBE_DEFER;
+diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
+index 9364bc2b4eb155..641a36dd0e604a 100644
+--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
++++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
+@@ -549,12 +549,12 @@ static int e1000_set_eeprom(struct net_device *netdev,
+ {
+ 	struct e1000_adapter *adapter = netdev_priv(netdev);
+ 	struct e1000_hw *hw = &adapter->hw;
++	size_t total_len, max_len;
+ 	u16 *eeprom_buff;
+-	void *ptr;
+-	int max_len;
++	int ret_val = 0;
+ 	int first_word;
+ 	int last_word;
+-	int ret_val = 0;
++	void *ptr;
+ 	u16 i;
+ 
+ 	if (eeprom->len == 0)
+@@ -569,6 +569,10 @@ static int e1000_set_eeprom(struct net_device *netdev,
+ 
+ 	max_len = hw->nvm.word_size * 2;
+ 
++	if (check_add_overflow(eeprom->offset, eeprom->len, &total_len) ||
++	    total_len > max_len)
++		return -EFBIG;
++
+ 	first_word = eeprom->offset >> 1;
+ 	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ 	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index 59263551c3838f..0b099e5f48163d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -359,8 +359,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
+ 	if (i40e_client_get_params(vsi, &cdev->lan_info.params))
+ 		goto free_cdev;
+ 
+-	mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
+-			       struct netdev_hw_addr, list);
++	mac = list_first_entry_or_null(&cdev->lan_info.netdev->dev_addrs.list,
++				       struct netdev_hw_addr, list);
+ 	if (mac)
+ 		ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
+ 	else
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+index 208c2f0857b61c..ded8f43fdf0683 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+@@ -40,48 +40,6 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
+  * setup, adding or removing filters, or other things.  Many of
+  * these will be useful for some forms of unit testing.
+  **************************************************************/
+-static char i40e_dbg_command_buf[256] = "";
+-
+-/**
+- * i40e_dbg_command_read - read for command datum
+- * @filp: the opened file
+- * @buffer: where to write the data for the user to read
+- * @count: the size of the user's buffer
+- * @ppos: file position offset
+- **/
+-static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
+-				     size_t count, loff_t *ppos)
+-{
+-	struct i40e_pf *pf = filp->private_data;
+-	struct i40e_vsi *main_vsi;
+-	int bytes_not_copied;
+-	int buf_size = 256;
+-	char *buf;
+-	int len;
+-
+-	/* don't allow partial reads */
+-	if (*ppos != 0)
+-		return 0;
+-	if (count < buf_size)
+-		return -ENOSPC;
+-
+-	buf = kzalloc(buf_size, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOSPC;
+-
+-	main_vsi = i40e_pf_get_main_vsi(pf);
+-	len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
+-		       i40e_dbg_command_buf);
+-
+-	bytes_not_copied = copy_to_user(buffer, buf, len);
+-	kfree(buf);
+-
+-	if (bytes_not_copied)
+-		return -EFAULT;
+-
+-	*ppos = len;
+-	return len;
+-}
+ 
+ static char *i40e_filter_state_string[] = {
+ 	"INVALID",
+@@ -1621,7 +1579,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
+ static const struct file_operations i40e_dbg_command_fops = {
+ 	.owner = THIS_MODULE,
+ 	.open =  simple_open,
+-	.read =  i40e_dbg_command_read,
+ 	.write = i40e_dbg_command_write,
+ };
+ 
+@@ -1630,48 +1587,6 @@ static const struct file_operations i40e_dbg_command_fops = {
+  * The netdev_ops entry in debugfs is for giving the driver commands
+  * to be executed from the netdev operations.
+  **************************************************************/
+-static char i40e_dbg_netdev_ops_buf[256] = "";
+-
+-/**
+- * i40e_dbg_netdev_ops_read - read for netdev_ops datum
+- * @filp: the opened file
+- * @buffer: where to write the data for the user to read
+- * @count: the size of the user's buffer
+- * @ppos: file position offset
+- **/
+-static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
+-					size_t count, loff_t *ppos)
+-{
+-	struct i40e_pf *pf = filp->private_data;
+-	struct i40e_vsi *main_vsi;
+-	int bytes_not_copied;
+-	int buf_size = 256;
+-	char *buf;
+-	int len;
+-
+-	/* don't allow partal reads */
+-	if (*ppos != 0)
+-		return 0;
+-	if (count < buf_size)
+-		return -ENOSPC;
+-
+-	buf = kzalloc(buf_size, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOSPC;
+-
+-	main_vsi = i40e_pf_get_main_vsi(pf);
+-	len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
+-		       i40e_dbg_netdev_ops_buf);
+-
+-	bytes_not_copied = copy_to_user(buffer, buf, len);
+-	kfree(buf);
+-
+-	if (bytes_not_copied)
+-		return -EFAULT;
+-
+-	*ppos = len;
+-	return len;
+-}
+ 
+ /**
+  * i40e_dbg_netdev_ops_write - write into netdev_ops datum
+@@ -1685,35 +1600,36 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+ 					 size_t count, loff_t *ppos)
+ {
+ 	struct i40e_pf *pf = filp->private_data;
++	char *cmd_buf, *buf_tmp;
+ 	int bytes_not_copied;
+ 	struct i40e_vsi *vsi;
+-	char *buf_tmp;
+ 	int vsi_seid;
+ 	int i, cnt;
+ 
+ 	/* don't allow partial writes */
+ 	if (*ppos != 0)
+ 		return 0;
+-	if (count >= sizeof(i40e_dbg_netdev_ops_buf))
+-		return -ENOSPC;
+ 
+-	memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
+-	bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
+-					  buffer, count);
+-	if (bytes_not_copied)
++	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
++	if (!cmd_buf)
++		return count;
++	bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
++	if (bytes_not_copied) {
++		kfree(cmd_buf);
+ 		return -EFAULT;
+-	i40e_dbg_netdev_ops_buf[count] = '\0';
++	}
++	cmd_buf[count] = '\0';
+ 
+-	buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
++	buf_tmp = strchr(cmd_buf, '\n');
+ 	if (buf_tmp) {
+ 		*buf_tmp = '\0';
+-		count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
++		count = buf_tmp - cmd_buf + 1;
+ 	}
+ 
+-	if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
++	if (strncmp(cmd_buf, "change_mtu", 10) == 0) {
+ 		int mtu;
+ 
+-		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
++		cnt = sscanf(&cmd_buf[11], "%i %i",
+ 			     &vsi_seid, &mtu);
+ 		if (cnt != 2) {
+ 			dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
+@@ -1735,8 +1651,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+ 			dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ 		}
+ 
+-	} else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
+-		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
++	} else if (strncmp(cmd_buf, "set_rx_mode", 11) == 0) {
++		cnt = sscanf(&cmd_buf[11], "%i", &vsi_seid);
+ 		if (cnt != 1) {
+ 			dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
+ 			goto netdev_ops_write_done;
+@@ -1756,8 +1672,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+ 			dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ 		}
+ 
+-	} else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
+-		cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
++	} else if (strncmp(cmd_buf, "napi", 4) == 0) {
++		cnt = sscanf(&cmd_buf[4], "%i", &vsi_seid);
+ 		if (cnt != 1) {
+ 			dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
+ 			goto netdev_ops_write_done;
+@@ -1775,21 +1691,20 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+ 			dev_info(&pf->pdev->dev, "napi called\n");
+ 		}
+ 	} else {
+-		dev_info(&pf->pdev->dev, "unknown command '%s'\n",
+-			 i40e_dbg_netdev_ops_buf);
++		dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
+ 		dev_info(&pf->pdev->dev, "available commands\n");
+ 		dev_info(&pf->pdev->dev, "  change_mtu <vsi_seid> <mtu>\n");
+ 		dev_info(&pf->pdev->dev, "  set_rx_mode <vsi_seid>\n");
+ 		dev_info(&pf->pdev->dev, "  napi <vsi_seid>\n");
+ 	}
+ netdev_ops_write_done:
++	kfree(cmd_buf);
+ 	return count;
+ }
+ 
+ static const struct file_operations i40e_dbg_netdev_ops_fops = {
+ 	.owner = THIS_MODULE,
+ 	.open = simple_open,
+-	.read = i40e_dbg_netdev_ops_read,
+ 	.write = i40e_dbg_netdev_ops_write,
+ };
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 74d4f2fde3e0f1..bd5db525f19395 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3193,12 +3193,14 @@ static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
+ 	hw = &pf->hw;
+ 	tx = &pf->ptp.port.tx;
+ 	spin_lock_irqsave(&tx->lock, flags);
+-	ice_ptp_complete_tx_single_tstamp(tx);
++	if (tx->init) {
++		ice_ptp_complete_tx_single_tstamp(tx);
+ 
+-	idx = find_next_bit_wrap(tx->in_use, tx->len,
+-				 tx->last_ll_ts_idx_read + 1);
+-	if (idx != tx->len)
+-		ice_ptp_req_tx_single_tstamp(tx, idx);
++		idx = find_next_bit_wrap(tx->in_use, tx->len,
++					 tx->last_ll_ts_idx_read + 1);
++		if (idx != tx->len)
++			ice_ptp_req_tx_single_tstamp(tx, idx);
++	}
+ 	spin_unlock_irqrestore(&tx->lock, flags);
+ 
+ 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index 1468a0f0df2bab..52d9caab2fcb2a 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -2278,6 +2278,7 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
+ 	struct idpf_netdev_priv *np = netdev_priv(netdev);
+ 	struct idpf_vport_config *vport_config;
+ 	struct sockaddr *addr = p;
++	u8 old_mac_addr[ETH_ALEN];
+ 	struct idpf_vport *vport;
+ 	int err = 0;
+ 
+@@ -2301,17 +2302,19 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
+ 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ 		goto unlock_mutex;
+ 
++	ether_addr_copy(old_mac_addr, vport->default_mac_addr);
++	ether_addr_copy(vport->default_mac_addr, addr->sa_data);
+ 	vport_config = vport->adapter->vport_config[vport->idx];
+ 	err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
+ 	if (err) {
+ 		__idpf_del_mac_filter(vport_config, addr->sa_data);
++		ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
+ 		goto unlock_mutex;
+ 	}
+ 
+-	if (is_valid_ether_addr(vport->default_mac_addr))
+-		idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
++	if (is_valid_ether_addr(old_mac_addr))
++		__idpf_del_mac_filter(vport_config, old_mac_addr);
+ 
+-	ether_addr_copy(vport->default_mac_addr, addr->sa_data);
+ 	eth_hw_addr_set(netdev, addr->sa_data);
+ 
+ unlock_mutex:
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+index 151beea20d3435..f27a8cf3816db3 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+@@ -3513,6 +3513,16 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
+ 	return le32_to_cpu(vport_msg->vport_id);
+ }
+ 
++static void idpf_set_mac_type(struct idpf_vport *vport,
++			      struct virtchnl2_mac_addr *mac_addr)
++{
++	bool is_primary;
++
++	is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr);
++	mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
++				      VIRTCHNL2_MAC_ADDR_EXTRA;
++}
++
+ /**
+  * idpf_mac_filter_async_handler - Async callback for mac filters
+  * @adapter: private data struct
+@@ -3642,6 +3652,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ 			    list) {
+ 		if (add && f->add) {
+ 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
++			idpf_set_mac_type(vport, &mac_addr[i]);
+ 			i++;
+ 			f->add = false;
+ 			if (i == total_filters)
+@@ -3649,6 +3660,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ 		}
+ 		if (!add && f->remove) {
+ 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
++			idpf_set_mac_type(vport, &mac_addr[i]);
+ 			i++;
+ 			f->remove = false;
+ 			if (i == total_filters)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+index 9482e0cca8b7d2..0b9ecb10aa7cf9 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+@@ -3443,13 +3443,13 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
+ 		if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
+-			linkmode_set_bit(ixgbe_lp_map[i].link_mode,
++			linkmode_set_bit(ixgbe_ls_map[i].link_mode,
+ 					 edata->supported);
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
+ 		if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
+-			linkmode_set_bit(ixgbe_lp_map[i].link_mode,
++			linkmode_set_bit(ixgbe_ls_map[i].link_mode,
+ 					 edata->advertised);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 272f178906d61f..64d86068b51eb4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1606,6 +1606,13 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	bool gso = false;
+ 	int tx_num;
+ 
++	if (skb_vlan_tag_present(skb) &&
++	    !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) {
++		skb = __vlan_hwaccel_push_inside(skb);
++		if (!skb)
++			goto dropped;
++	}
++
+ 	/* normally we can rely on the stack not calling this more than once,
+ 	 * however we have 2 queues running on the same ring so we need to lock
+ 	 * the ring access
+@@ -1651,8 +1658,9 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ drop:
+ 	spin_unlock(&eth->page_lock);
+-	stats->tx_dropped++;
+ 	dev_kfree_skb_any(skb);
++dropped:
++	stats->tx_dropped++;
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+index 84c41f19356126..79b800d2b72c28 100644
+--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
++++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+@@ -423,13 +423,16 @@ static void lan865x_remove(struct spi_device *spi)
+ 	free_netdev(priv->netdev);
+ }
+ 
+-static const struct spi_device_id spidev_spi_ids[] = {
++static const struct spi_device_id lan865x_ids[] = {
+ 	{ .name = "lan8650" },
++	{ .name = "lan8651" },
+ 	{},
+ };
++MODULE_DEVICE_TABLE(spi, lan865x_ids);
+ 
+ static const struct of_device_id lan865x_dt_ids[] = {
+ 	{ .compatible = "microchip,lan8650" },
++	{ .compatible = "microchip,lan8651" },
+ 	{ /* Sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, lan865x_dt_ids);
+@@ -441,7 +444,7 @@ static struct spi_driver lan865x_driver = {
+ 	 },
+ 	.probe = lan865x_probe,
+ 	.remove = lan865x_remove,
+-	.id_table = spidev_spi_ids,
++	.id_table = lan865x_ids,
+ };
+ module_spi_driver(lan865x_driver);
+ 
+diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
+index db200e4ec284d7..91a906a7918a25 100644
+--- a/drivers/net/ethernet/oa_tc6.c
++++ b/drivers/net/ethernet/oa_tc6.c
+@@ -1249,7 +1249,8 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
+ 
+ 	/* Set the SPI controller to pump at realtime priority */
+ 	tc6->spi->rt = true;
+-	spi_setup(tc6->spi);
++	if (spi_setup(tc6->spi) < 0)
++		return NULL;
+ 
+ 	tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ 					    OA_TC6_CTRL_SPI_BUF_SIZE,
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 1775e060d39d38..3339c5e1a57a9a 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1127,6 +1127,15 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+ 						       &meta_max_len);
+ 	dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
+ 			 DMA_FROM_DEVICE);
++
++	if (IS_ERR(app_metadata)) {
++		if (net_ratelimit())
++			netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
++		dev_kfree_skb_any(skb);
++		lp->ndev->stats.rx_dropped++;
++		goto rx_submit;
++	}
++
+ 	/* TODO: Derive app word index programmatically */
+ 	rx_len = (app_metadata[LEN_APP] & 0xFFFF);
+ 	skb_put(skb, rx_len);
+@@ -1139,6 +1148,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+ 	u64_stats_add(&lp->rx_bytes, rx_len);
+ 	u64_stats_update_end(&lp->rx_stat_sync);
+ 
++rx_submit:
+ 	for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
+ 				   RX_BUF_NUM_DEFAULT); i++)
+ 		axienet_rx_submit_desc(lp->ndev);
+diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
+index a31d5d5e65936d..97e88886253f54 100644
+--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
++++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
+@@ -1576,7 +1576,7 @@ do_reset(struct net_device *dev, int full)
+ 	    msleep(40);			/* wait 40 msec to let it complete */
+ 	}
+ 	if (full_duplex)
+-	    PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex));
++	    PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR) | FullDuplex);
+     } else {  /* No MII */
+ 	SelectPage(0);
+ 	value = GetByte(XIRCREG_ESR);	 /* read the ESR */
+diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
+index d5b05e8032199a..ca35a50bb64053 100644
+--- a/drivers/net/ipvlan/ipvlan_l3s.c
++++ b/drivers/net/ipvlan/ipvlan_l3s.c
+@@ -224,5 +224,4 @@ void ipvlan_l3s_unregister(struct ipvl_port *port)
+ 
+ 	dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
+ 	ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
+-	dev->l3mdev_ops = NULL;
+ }
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 090a56a5e456ac..8b10112c30dc17 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1843,7 +1843,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&rx_sa->lock);
+-		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
++		rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&rx_sa->lock);
+ 	}
+ 
+@@ -2085,7 +2085,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	spin_lock_bh(&tx_sa->lock);
+-	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
++	tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
+ 	spin_unlock_bh(&tx_sa->lock);
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+@@ -2397,7 +2397,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		spin_lock_bh(&tx_sa->lock);
+ 		prev_pn = tx_sa->next_pn_halves;
+-		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
++		tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&tx_sa->lock);
+ 	}
+ 
+@@ -2495,7 +2495,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		spin_lock_bh(&rx_sa->lock);
+ 		prev_pn = rx_sa->next_pn_halves;
+-		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
++		rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&rx_sa->lock);
+ 	}
+ 
+diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
+index d0a722d43368f7..a7acbed14b0a9a 100644
+--- a/drivers/net/pcs/pcs-rzn1-miic.c
++++ b/drivers/net/pcs/pcs-rzn1-miic.c
+@@ -19,7 +19,7 @@
+ #define MIIC_PRCMD			0x0
+ #define MIIC_ESID_CODE			0x4
+ 
+-#define MIIC_MODCTRL			0x20
++#define MIIC_MODCTRL			0x8
+ #define MIIC_MODCTRL_SW_MODE		GENMASK(4, 0)
+ 
+ #define MIIC_CONVCTRL(port)		(0x100 + (port) * 4)
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index 920f35f8f84e71..fa24ba8f6bff0b 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -455,12 +455,12 @@ static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
+ 		*p++ = (reg >> 24) & 0xff;
+ 	}
+ 
+-	len = skb_queue_len(&ptp->tx_queue);
++	len = skb_queue_len_lockless(&ptp->tx_queue);
+ 	if (len < 1)
+ 		return;
+ 
+ 	while (len--) {
+-		skb = __skb_dequeue(&ptp->tx_queue);
++		skb = skb_dequeue(&ptp->tx_queue);
+ 		if (!skb)
+ 			return;
+ 
+@@ -485,7 +485,7 @@ static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
+ 		 * packet in the FIFO right now, reschedule it for later
+ 		 * packets.
+ 		 */
+-		__skb_queue_tail(&ptp->tx_queue, skb);
++		skb_queue_tail(&ptp->tx_queue, skb);
+ 	}
+ }
+ 
+@@ -1065,6 +1065,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
+ 	case HWTSTAMP_TX_ON:
+ 		break;
+ 	case HWTSTAMP_TX_OFF:
++		skb_queue_purge(&vsc8531->ptp->tx_queue);
+ 		break;
+ 	default:
+ 		return -ERANGE;
+@@ -1089,9 +1090,6 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
+ 
+ 	mutex_lock(&vsc8531->ts_lock);
+ 
+-	__skb_queue_purge(&vsc8531->ptp->tx_queue);
+-	__skb_queue_head_init(&vsc8531->ptp->tx_queue);
+-
+ 	/* Disable predictor while configuring the 1588 block */
+ 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+ 				  MSCC_PHY_PTP_INGR_PREDICTOR);
+@@ -1177,9 +1175,7 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
+ 
+ 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ 
+-	mutex_lock(&vsc8531->ts_lock);
+-	__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
+-	mutex_unlock(&vsc8531->ts_lock);
++	skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
+ 	return;
+ 
+ out:
+@@ -1545,6 +1541,7 @@ void vsc8584_ptp_deinit(struct phy_device *phydev)
+ 	if (vsc8531->ptp->ptp_clock) {
+ 		ptp_clock_unregister(vsc8531->ptp->ptp_clock);
+ 		skb_queue_purge(&vsc8531->rx_skbs_list);
++		skb_queue_purge(&vsc8531->ptp->tx_queue);
+ 	}
+ }
+ 
+@@ -1568,7 +1565,7 @@ irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
+ 	if (rc & VSC85XX_1588_INT_FIFO_ADD) {
+ 		vsc85xx_get_tx_ts(priv->ptp);
+ 	} else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
+-		__skb_queue_purge(&priv->ptp->tx_queue);
++		skb_queue_purge(&priv->ptp->tx_queue);
+ 		vsc85xx_ts_reset_fifo(phydev);
+ 	}
+ 
+@@ -1588,6 +1585,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
+ 	mutex_init(&vsc8531->phc_lock);
+ 	mutex_init(&vsc8531->ts_lock);
+ 	skb_queue_head_init(&vsc8531->rx_skbs_list);
++	skb_queue_head_init(&vsc8531->ptp->tx_queue);
+ 
+ 	/* Retrieve the shared load/save GPIO. Request it as non exclusive as
+ 	 * the same GPIO can be requested by all the PHYs of the same package.
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 0553b0b356b308..afc1566488b32a 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1753,7 +1753,6 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
+ 		 */
+ 		if (net_ratelimit())
+ 			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
+-		kfree_skb(skb);
+ 		consume_skb(new_skb);
+ 		new_skb = NULL;
+ 	}
+@@ -1855,9 +1854,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+ 					   "down - pkt dropped.\n");
+ 			goto drop;
+ 		}
+-		skb = pad_compress_skb(ppp, skb);
+-		if (!skb)
++		new_skb = pad_compress_skb(ppp, skb);
++		if (!new_skb)
+ 			goto drop;
++		skb = new_skb;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 4abfdfcf0e289c..5c89e03f93d619 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -2088,6 +2088,13 @@ static const struct usb_device_id cdc_devs[] = {
+ 	  .driver_info = (unsigned long)&wwan_info,
+ 	},
+ 
++	/* Intel modem (label from OEM reads Fibocom L850-GL) */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a,
++		USB_CLASS_COMM,
++		USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
++	  .driver_info = (unsigned long)&wwan_info,
++	},
++
+ 	/* DisplayLink docking stations */
+ 	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ 		| USB_DEVICE_ID_MATCH_VENDOR,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 0a0f0e18762bb1..f04da733240c0b 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1363,8 +1363,9 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)},	/* Telit FN980 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)},	/* Telit FN980 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)},	/* Telit LN920 */
+-	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)},	/* Telit FN990 */
+-	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)},	/* Telit FN990A */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)},	/* Telit FN990A w/audio */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 1a707709380017..bbfa4eed17559f 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -428,8 +428,8 @@ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
+ 	return NULL;
+ }
+ 
+-static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+-					const u8 *mac, __be32 vni)
++static struct vxlan_fdb *vxlan_find_mac_tx(struct vxlan_dev *vxlan,
++					   const u8 *mac, __be32 vni)
+ {
+ 	struct vxlan_fdb *f;
+ 
+@@ -1048,7 +1048,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
+ 	}
+ 
+ 	if (ndm_flags & NTF_USE)
+-		WRITE_ONCE(f->used, jiffies);
++		WRITE_ONCE(f->updated, jiffies);
+ 
+ 	if (notify) {
+ 		if (rd == NULL)
+@@ -1292,7 +1292,7 @@ int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ 	struct vxlan_fdb *f;
+ 	int err = -ENOENT;
+ 
+-	f = vxlan_find_mac(vxlan, addr, src_vni);
++	f = __vxlan_find_mac(vxlan, addr, src_vni);
+ 	if (!f)
+ 		return err;
+ 
+@@ -1437,9 +1437,10 @@ static int vxlan_fdb_get(struct sk_buff *skb,
+  * and Tunnel endpoint.
+  * Return true if packet is bogus and should be dropped.
+  */
+-static bool vxlan_snoop(struct net_device *dev,
+-			union vxlan_addr *src_ip, const u8 *src_mac,
+-			u32 src_ifindex, __be32 vni)
++static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
++					union vxlan_addr *src_ip,
++					const u8 *src_mac, u32 src_ifindex,
++					__be32 vni)
+ {
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
+ 	struct vxlan_fdb *f;
+@@ -1447,7 +1448,7 @@ static bool vxlan_snoop(struct net_device *dev,
+ 
+ 	/* Ignore packets from invalid src-address */
+ 	if (!is_valid_ether_addr(src_mac))
+-		return true;
++		return SKB_DROP_REASON_MAC_INVALID_SOURCE;
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	if (src_ip->sa.sa_family == AF_INET6 &&
+@@ -1455,21 +1456,21 @@ static bool vxlan_snoop(struct net_device *dev,
+ 		ifindex = src_ifindex;
+ #endif
+ 
+-	f = vxlan_find_mac(vxlan, src_mac, vni);
++	f = __vxlan_find_mac(vxlan, src_mac, vni);
+ 	if (likely(f)) {
+ 		struct vxlan_rdst *rdst = first_remote_rcu(f);
+ 
++		/* Don't override an fdb with nexthop with a learnt entry */
++		if (rcu_access_pointer(f->nh))
++			return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS;
++
+ 		if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
+ 			   rdst->remote_ifindex == ifindex))
+-			return false;
++			return SKB_NOT_DROPPED_YET;
+ 
+ 		/* Don't migrate static entries, drop packets */
+ 		if (f->state & (NUD_PERMANENT | NUD_NOARP))
+-			return true;
+-
+-		/* Don't override an fdb with nexthop with a learnt entry */
+-		if (rcu_access_pointer(f->nh))
+-			return true;
++			return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS;
+ 
+ 		if (net_ratelimit())
+ 			netdev_info(dev,
+@@ -1497,7 +1498,7 @@ static bool vxlan_snoop(struct net_device *dev,
+ 		spin_unlock(&vxlan->hash_lock[hash_index]);
+ 	}
+ 
+-	return false;
++	return SKB_NOT_DROPPED_YET;
+ }
+ 
+ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
+@@ -1604,9 +1605,9 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
+ 	unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
+ }
+ 
+-static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+-			  struct vxlan_sock *vs,
+-			  struct sk_buff *skb, __be32 vni)
++static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan,
++					  struct vxlan_sock *vs,
++					  struct sk_buff *skb, __be32 vni)
+ {
+ 	union vxlan_addr saddr;
+ 	u32 ifindex = skb->dev->ifindex;
+@@ -1617,7 +1618,7 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ 
+ 	/* Ignore packet loops (and multicast echo) */
+ 	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+-		return false;
++		return SKB_DROP_REASON_LOCAL_MAC;
+ 
+ 	/* Get address from the outer IP header */
+ 	if (vxlan_get_sk_family(vs) == AF_INET) {
+@@ -1630,11 +1631,11 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ #endif
+ 	}
+ 
+-	if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
+-	    vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
+-		return false;
++	if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
++		return SKB_NOT_DROPPED_YET;
+ 
+-	return true;
++	return vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source,
++			   ifindex, vni);
+ }
+ 
+ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
+@@ -1671,13 +1672,15 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 	struct vxlan_metadata _md;
+ 	struct vxlan_metadata *md = &_md;
+ 	__be16 protocol = htons(ETH_P_TEB);
++	enum skb_drop_reason reason;
+ 	bool raw_proto = false;
+ 	void *oiph;
+ 	__be32 vni = 0;
+ 	int nh;
+ 
+ 	/* Need UDP and VXLAN header to be present */
+-	if (!pskb_may_pull(skb, VXLAN_HLEN))
++	reason = pskb_may_pull_reason(skb, VXLAN_HLEN);
++	if (reason)
+ 		goto drop;
+ 
+ 	unparsed = *vxlan_hdr(skb);
+@@ -1686,6 +1689,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 		netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+ 			   ntohl(vxlan_hdr(skb)->vx_flags),
+ 			   ntohl(vxlan_hdr(skb)->vx_vni));
++		reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
+ 		/* Return non vxlan pkt */
+ 		goto drop;
+ 	}
+@@ -1699,8 +1703,10 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 	vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+ 
+ 	vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
+-	if (!vxlan)
++	if (!vxlan) {
++		reason = SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND;
+ 		goto drop;
++	}
+ 
+ 	/* For backwards compatibility, only allow reserved fields to be
+ 	 * used by VXLAN extensions if explicitly requested.
+@@ -1713,8 +1719,10 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 	}
+ 
+ 	if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
+-				   !net_eq(vxlan->net, dev_net(vxlan->dev))))
++				   !net_eq(vxlan->net, dev_net(vxlan->dev)))) {
++		reason = SKB_DROP_REASON_NOMEM;
+ 		goto drop;
++	}
+ 
+ 	if (vs->flags & VXLAN_F_REMCSUM_RX)
+ 		if (unlikely(!vxlan_remcsum(&unparsed, skb, vs->flags)))
+@@ -1728,8 +1736,10 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 		tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), flags,
+ 					 key32_to_tunnel_id(vni), sizeof(*md));
+ 
+-		if (!tun_dst)
++		if (!tun_dst) {
++			reason = SKB_DROP_REASON_NOMEM;
+ 			goto drop;
++		}
+ 
+ 		md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
+ 
+@@ -1753,11 +1763,13 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 		 * is more robust and provides a little more security in
+ 		 * adding extensions to VXLAN.
+ 		 */
++		reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
+ 		goto drop;
+ 	}
+ 
+ 	if (!raw_proto) {
+-		if (!vxlan_set_mac(vxlan, vs, skb, vni))
++		reason = vxlan_set_mac(vxlan, vs, skb, vni);
++		if (reason)
+ 			goto drop;
+ 	} else {
+ 		skb_reset_mac_header(skb);
+@@ -1773,7 +1785,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 
+ 	skb_reset_network_header(skb);
+ 
+-	if (!pskb_inet_may_pull(skb)) {
++	reason = pskb_inet_may_pull_reason(skb);
++	if (reason) {
+ 		DEV_STATS_INC(vxlan->dev, rx_length_errors);
+ 		DEV_STATS_INC(vxlan->dev, rx_errors);
+ 		vxlan_vnifilter_count(vxlan, vni, vninode,
+@@ -1785,6 +1798,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 	oiph = skb->head + nh;
+ 
+ 	if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
++		reason = SKB_DROP_REASON_IP_TUNNEL_ECN;
+ 		DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ 		DEV_STATS_INC(vxlan->dev, rx_errors);
+ 		vxlan_vnifilter_count(vxlan, vni, vninode,
+@@ -1799,6 +1813,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 		dev_core_stats_rx_dropped_inc(vxlan->dev);
+ 		vxlan_vnifilter_count(vxlan, vni, vninode,
+ 				      VXLAN_VNI_STATS_RX_DROPS, 0);
++		reason = SKB_DROP_REASON_DEV_READY;
+ 		goto drop;
+ 	}
+ 
+@@ -1811,8 +1826,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 	return 0;
+ 
+ drop:
++	reason = reason ?: SKB_DROP_REASON_NOT_SPECIFIED;
+ 	/* Consume bad packet */
+-	kfree_skb(skb);
++	kfree_skb_reason(skb, reason);
+ 	return 0;
+ }
+ 
+@@ -1885,6 +1901,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ 	n = neigh_lookup(&arp_tbl, &tip, dev);
+ 
+ 	if (n) {
++		struct vxlan_rdst *rdst = NULL;
+ 		struct vxlan_fdb *f;
+ 		struct sk_buff	*reply;
+ 
+@@ -1893,12 +1910,17 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ 			goto out;
+ 		}
+ 
+-		f = vxlan_find_mac(vxlan, n->ha, vni);
+-		if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
++		rcu_read_lock();
++		f = vxlan_find_mac_tx(vxlan, n->ha, vni);
++		if (f)
++			rdst = first_remote_rcu(f);
++		if (rdst && vxlan_addr_any(&rdst->remote_ip)) {
+ 			/* bridge-local neighbor */
+ 			neigh_release(n);
++			rcu_read_unlock();
+ 			goto out;
+ 		}
++		rcu_read_unlock();
+ 
+ 		reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+ 				n->ha, sha);
+@@ -2049,6 +2071,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ 	n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
+ 
+ 	if (n) {
++		struct vxlan_rdst *rdst = NULL;
+ 		struct vxlan_fdb *f;
+ 		struct sk_buff *reply;
+ 
+@@ -2057,8 +2080,10 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ 			goto out;
+ 		}
+ 
+-		f = vxlan_find_mac(vxlan, n->ha, vni);
+-		if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
++		f = vxlan_find_mac_tx(vxlan, n->ha, vni);
++		if (f)
++			rdst = first_remote_rcu(f);
++		if (rdst && vxlan_addr_any(&rdst->remote_ip)) {
+ 			/* bridge-local neighbor */
+ 			neigh_release(n);
+ 			goto out;
+@@ -2616,14 +2641,10 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
+ 	memset(&nh_rdst, 0, sizeof(struct vxlan_rdst));
+ 	hash = skb_get_hash(skb);
+ 
+-	rcu_read_lock();
+ 	nh = rcu_dereference(f->nh);
+-	if (!nh) {
+-		rcu_read_unlock();
++	if (!nh)
+ 		goto drop;
+-	}
+ 	do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst);
+-	rcu_read_unlock();
+ 
+ 	if (likely(do_xmit))
+ 		vxlan_xmit_one(skb, dev, vni, &nh_rdst, did_rsc);
+@@ -2708,7 +2729,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			if (info && info->mode & IP_TUNNEL_INFO_TX)
+ 				vxlan_xmit_one(skb, dev, vni, NULL, false);
+ 			else
+-				kfree_skb(skb);
++				kfree_skb_reason(skb, SKB_DROP_REASON_TUNNEL_TXINFO);
+ 			return NETDEV_TX_OK;
+ 		}
+ 	}
+@@ -2750,7 +2771,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	}
+ 
+ 	eth = eth_hdr(skb);
+-	f = vxlan_find_mac(vxlan, eth->h_dest, vni);
++	rcu_read_lock();
++	f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni);
+ 	did_rsc = false;
+ 
+ 	if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
+@@ -2758,11 +2780,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	     ntohs(eth->h_proto) == ETH_P_IPV6)) {
+ 		did_rsc = route_shortcircuit(dev, skb);
+ 		if (did_rsc)
+-			f = vxlan_find_mac(vxlan, eth->h_dest, vni);
++			f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni);
+ 	}
+ 
+ 	if (f == NULL) {
+-		f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
++		f = vxlan_find_mac_tx(vxlan, all_zeros_mac, vni);
+ 		if (f == NULL) {
+ 			if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
+ 			    !is_multicast_ether_addr(eth->h_dest))
+@@ -2771,8 +2793,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			dev_core_stats_tx_dropped_inc(dev);
+ 			vxlan_vnifilter_count(vxlan, vni, NULL,
+ 					      VXLAN_VNI_STATS_TX_DROPS, 0);
+-			kfree_skb(skb);
+-			return NETDEV_TX_OK;
++			kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
++			goto out;
+ 		}
+ 	}
+ 
+@@ -2794,9 +2816,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		if (fdst)
+ 			vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
+ 		else
+-			kfree_skb(skb);
++			kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
+ 	}
+ 
++out:
++	rcu_read_unlock();
+ 	return NETDEV_TX_OK;
+ }
+ 
+@@ -4700,7 +4724,7 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
+ 
+ 	spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ 
+-	f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
++	f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ 	if (!f)
+ 		goto out;
+ 
+@@ -4756,7 +4780,7 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
+ 	hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ 	spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ 
+-	f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
++	f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ 	if (!f)
+ 		err = -ENOENT;
+ 	else if (f->flags & NTF_EXT_LEARNED)
+diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c
+index 60eb95a06d551c..ec86d1c024834d 100644
+--- a/drivers/net/vxlan/vxlan_mdb.c
++++ b/drivers/net/vxlan/vxlan_mdb.c
+@@ -1712,7 +1712,7 @@ netdev_tx_t vxlan_mdb_xmit(struct vxlan_dev *vxlan,
+ 		vxlan_xmit_one(skb, vxlan->dev, src_vni,
+ 			       rcu_dereference(fremote->rd), false);
+ 	else
+-		kfree_skb(skb);
++		kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
+ 
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
+index 76a351a997d510..c6279ef98a5c25 100644
+--- a/drivers/net/vxlan/vxlan_private.h
++++ b/drivers/net/vxlan/vxlan_private.h
+@@ -56,9 +56,7 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port)
+ 	return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
+ }
+ 
+-/* First remote destination for a forwarding entry.
+- * Guaranteed to be non-NULL because remotes are never deleted.
+- */
++/* First remote destination for a forwarding entry. */
+ static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
+ {
+ 	if (rcu_access_pointer(fdb->nh))
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index 2ec1771262fd97..bb46ef986b2a48 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -1972,6 +1972,7 @@ void ath11k_core_halt(struct ath11k *ar)
+ 	ath11k_mac_scan_finish(ar);
+ 	ath11k_mac_peer_cleanup_all(ar);
+ 	cancel_delayed_work_sync(&ar->scan.timeout);
++	cancel_work_sync(&ar->channel_update_work);
+ 	cancel_work_sync(&ar->regd_update_work);
+ 	cancel_work_sync(&ab->update_11d_work);
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
+index 09fdb7be0e1971..cd9f9fb6ab68eb 100644
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -409,6 +409,8 @@ struct ath11k_vif {
+ 	bool do_not_send_tmpl;
+ 	struct ath11k_arp_ns_offload arp_ns_offload;
+ 	struct ath11k_rekey_data rekey_data;
++	u32 num_stations;
++	bool reinstall_group_keys;
+ 
+ 	struct ath11k_reg_tpc_power_info reg_tpc_info;
+ 
+@@ -689,7 +691,7 @@ struct ath11k {
+ 	struct mutex conf_mutex;
+ 	/* protects the radio specific data like debug stats, ppdu_stats_info stats,
+ 	 * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
+-	 * channel context data, survey info, test mode data.
++	 * channel context data, survey info, test mode data, channel_update_queue.
+ 	 */
+ 	spinlock_t data_lock;
+ 
+@@ -747,6 +749,9 @@ struct ath11k {
+ 	struct completion bss_survey_done;
+ 
+ 	struct work_struct regd_update_work;
++	struct work_struct channel_update_work;
++	/* protected with data_lock */
++	struct list_head channel_update_queue;
+ 
+ 	struct work_struct wmi_mgmt_tx_work;
+ 	struct sk_buff_head wmi_mgmt_tx_queue;
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index ddf4ec6b244b46..9db3369d32048d 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -4307,6 +4307,40 @@ static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
+ 	return first_errno;
+ }
+ 
++static int ath11k_set_group_keys(struct ath11k_vif *arvif)
++{
++	struct ath11k *ar = arvif->ar;
++	struct ath11k_base *ab = ar->ab;
++	const u8 *addr = arvif->bssid;
++	int i, ret, first_errno = 0;
++	struct ath11k_peer *peer;
++
++	spin_lock_bh(&ab->base_lock);
++	peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
++	spin_unlock_bh(&ab->base_lock);
++
++	if (!peer)
++		return -ENOENT;
++
++	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
++		struct ieee80211_key_conf *key = peer->keys[i];
++
++		if (!key || (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
++			continue;
++
++		ret = ath11k_install_key(arvif, key, SET_KEY, addr,
++					 WMI_KEY_GROUP);
++		if (ret < 0 && first_errno == 0)
++			first_errno = ret;
++
++		if (ret < 0)
++			ath11k_warn(ab, "failed to set group key of idx %d for vdev %d: %d\n",
++				    i, arvif->vdev_id, ret);
++	}
++
++	return first_errno;
++}
++
+ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 				 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ 				 struct ieee80211_key_conf *key)
+@@ -4316,6 +4350,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ 	struct ath11k_peer *peer;
+ 	struct ath11k_sta *arsta;
++	bool is_ap_with_no_sta;
+ 	const u8 *peer_addr;
+ 	int ret = 0;
+ 	u32 flags = 0;
+@@ -4376,16 +4411,57 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	else
+ 		flags |= WMI_KEY_GROUP;
+ 
+-	ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
+-	if (ret) {
+-		ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
+-		goto exit;
+-	}
++	ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
++		   "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n",
++		   cmd == SET_KEY ? "SET_KEY" : "DEL_KEY", peer_addr, arvif->vdev_id,
++		   flags, arvif->vdev_type, arvif->num_stations);
++
++	/* Allow group key clearing only in AP mode when no stations are
++	 * associated. There is a known race condition in firmware where
++	 * group addressed packets may be dropped if the key is cleared
++	 * and immediately set again during rekey.
++	 *
++	 * During GTK rekey, mac80211 issues a clear key (if the old key
++	 * exists) followed by an install key operation for same key
++	 * index. This causes ath11k to send two WMI commands in quick
++	 * succession: one to clear the old key and another to install the
++	 * new key in the same slot.
++	 *
++	 * Under certain conditions—especially under high load or time
++	 * sensitive scenarios, firmware may process these commands
++	 * asynchronously in a way that firmware assumes the key is
++	 * cleared whereas hardware has a valid key. This inconsistency
++	 * between hardware and firmware leads to group addressed packet
++	 * drops after rekey.
++	 * Only setting the same key again can restore a valid key in
++	 * firmware and allow packets to be transmitted.
++	 *
++	 * There is a use case where an AP can transition from Secure mode
++	 * to open mode without a vdev restart by just deleting all
++	 * associated peers and clearing key, Hence allow clear key for
++	 * that case alone. Mark arvif->reinstall_group_keys in such cases
++	 * and reinstall the same key when the first peer is added,
++	 * allowing firmware to recover from the race if it had occurred.
++	 */
+ 
+-	ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
+-	if (ret) {
+-		ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret);
+-		goto exit;
++	is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&
++			     !arvif->num_stations);
++	if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) {
++		ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
++		if (ret) {
++			ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
++			goto exit;
++		}
++
++		ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
++		if (ret) {
++			ath11k_warn(ab, "failed to offload PN replay detection %d\n",
++				    ret);
++			goto exit;
++		}
++
++		if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta)
++			arvif->reinstall_group_keys = true;
+ 	}
+ 
+ 	spin_lock_bh(&ab->base_lock);
+@@ -4984,6 +5060,7 @@ static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
+ 		return -ENOBUFS;
+ 
+ 	ar->num_stations++;
++	arvif->num_stations++;
+ 
+ 	return 0;
+ }
+@@ -4999,6 +5076,7 @@ static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
+ 		return;
+ 
+ 	ar->num_stations--;
++	arvif->num_stations--;
+ }
+ 
+ static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
+@@ -6289,6 +6367,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
+ {
+ 	struct ath11k *ar = hw->priv;
+ 	struct htt_ppdu_stats_info *ppdu_stats, *tmp;
++	struct scan_chan_list_params *params;
+ 	int ret;
+ 
+ 	ath11k_mac_drain_tx(ar);
+@@ -6304,6 +6383,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
+ 	mutex_unlock(&ar->conf_mutex);
+ 
+ 	cancel_delayed_work_sync(&ar->scan.timeout);
++	cancel_work_sync(&ar->channel_update_work);
+ 	cancel_work_sync(&ar->regd_update_work);
+ 	cancel_work_sync(&ar->ab->update_11d_work);
+ 
+@@ -6313,10 +6393,19 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
+ 	}
+ 
+ 	spin_lock_bh(&ar->data_lock);
++
+ 	list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ 		list_del(&ppdu_stats->list);
+ 		kfree(ppdu_stats);
+ 	}
++
++	while ((params = list_first_entry_or_null(&ar->channel_update_queue,
++						  struct scan_chan_list_params,
++						  list))) {
++		list_del(&params->list);
++		kfree(params);
++	}
++
+ 	spin_unlock_bh(&ar->data_lock);
+ 
+ 	rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+@@ -9519,6 +9608,21 @@ static int ath11k_mac_station_add(struct ath11k *ar,
+ 		goto exit;
+ 	}
+ 
++	/* Driver allows the DEL KEY followed by SET KEY sequence for
++	 * group keys for only when there is no clients associated, if at
++	 * all firmware has entered the race during that window,
++	 * reinstalling the same key when the first sta connects will allow
++	 * firmware to recover from the race.
++	 */
++	if (arvif->num_stations == 1 && arvif->reinstall_group_keys) {
++		ath11k_dbg(ab, ATH11K_DBG_MAC, "set group keys on 1st station add for vdev %d\n",
++			   arvif->vdev_id);
++		ret = ath11k_set_group_keys(arvif);
++		if (ret)
++			goto dec_num_station;
++		arvif->reinstall_group_keys = false;
++	}
++
+ 	arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ 	if (!arsta->rx_stats) {
+ 		ret = -ENOMEM;
+@@ -10101,6 +10205,7 @@ static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
+ 
+ static void __ath11k_mac_unregister(struct ath11k *ar)
+ {
++	cancel_work_sync(&ar->channel_update_work);
+ 	cancel_work_sync(&ar->regd_update_work);
+ 
+ 	ieee80211_unregister_hw(ar->hw);
+@@ -10500,6 +10605,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
+ 		init_completion(&ar->thermal.wmi_sync);
+ 
+ 		INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
++		INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work);
++		INIT_LIST_HEAD(&ar->channel_update_queue);
+ 		INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
+ 
+ 		INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
+diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
+index b0f289784dd3a2..d62a2014315a08 100644
+--- a/drivers/net/wireless/ath/ath11k/reg.c
++++ b/drivers/net/wireless/ath/ath11k/reg.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ #include <linux/rtnetlink.h>
+ 
+@@ -55,6 +55,19 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+ 	ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ 		   "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+ 
++	if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
++		ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++			   "driver initiated regd update\n");
++		if (ar->state != ATH11K_STATE_ON)
++			return;
++
++		ret = ath11k_reg_update_chan_list(ar, true);
++		if (ret)
++			ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret);
++
++		return;
++	}
++
+ 	/* Currently supporting only General User Hints. Cell base user
+ 	 * hints to be handled later.
+ 	 * Hints from other sources like Core, Beacons are not expected for
+@@ -111,32 +124,7 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
+ 	struct channel_param *ch;
+ 	enum nl80211_band band;
+ 	int num_channels = 0;
+-	int i, ret, left;
+-
+-	if (wait && ar->state_11d != ATH11K_11D_IDLE) {
+-		left = wait_for_completion_timeout(&ar->completed_11d_scan,
+-						   ATH11K_SCAN_TIMEOUT_HZ);
+-		if (!left) {
+-			ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+-				   "failed to receive 11d scan complete: timed out\n");
+-			ar->state_11d = ATH11K_11D_IDLE;
+-		}
+-		ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+-			   "11d scan wait left time %d\n", left);
+-	}
+-
+-	if (wait &&
+-	    (ar->scan.state == ATH11K_SCAN_STARTING ||
+-	    ar->scan.state == ATH11K_SCAN_RUNNING)) {
+-		left = wait_for_completion_timeout(&ar->scan.completed,
+-						   ATH11K_SCAN_TIMEOUT_HZ);
+-		if (!left)
+-			ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+-				   "failed to receive hw scan complete: timed out\n");
+-
+-		ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+-			   "hw scan wait left time %d\n", left);
+-	}
++	int i, ret = 0;
+ 
+ 	if (ar->state == ATH11K_STATE_RESTARTING)
+ 		return 0;
+@@ -218,6 +206,16 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
+ 		}
+ 	}
+ 
++	if (wait) {
++		spin_lock_bh(&ar->data_lock);
++		list_add_tail(&params->list, &ar->channel_update_queue);
++		spin_unlock_bh(&ar->data_lock);
++
++		queue_work(ar->ab->workqueue, &ar->channel_update_work);
++
++		return 0;
++	}
++
+ 	ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ 	kfree(params);
+ 
+@@ -293,12 +291,6 @@ int ath11k_regd_update(struct ath11k *ar)
+ 	if (ret)
+ 		goto err;
+ 
+-	if (ar->state == ATH11K_STATE_ON) {
+-		ret = ath11k_reg_update_chan_list(ar, true);
+-		if (ret)
+-			goto err;
+-	}
+-
+ 	return 0;
+ err:
+ 	ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
+@@ -804,6 +796,54 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
+ 	return new_regd;
+ }
+ 
++void ath11k_regd_update_chan_list_work(struct work_struct *work)
++{
++	struct ath11k *ar = container_of(work, struct ath11k,
++					 channel_update_work);
++	struct scan_chan_list_params *params;
++	struct list_head local_update_list;
++	int left;
++
++	INIT_LIST_HEAD(&local_update_list);
++
++	spin_lock_bh(&ar->data_lock);
++	list_splice_tail_init(&ar->channel_update_queue, &local_update_list);
++	spin_unlock_bh(&ar->data_lock);
++
++	while ((params = list_first_entry_or_null(&local_update_list,
++						  struct scan_chan_list_params,
++						  list))) {
++		if (ar->state_11d != ATH11K_11D_IDLE) {
++			left = wait_for_completion_timeout(&ar->completed_11d_scan,
++							   ATH11K_SCAN_TIMEOUT_HZ);
++			if (!left) {
++				ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++					   "failed to receive 11d scan complete: timed out\n");
++				ar->state_11d = ATH11K_11D_IDLE;
++			}
++
++			ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++				   "reg 11d scan wait left time %d\n", left);
++		}
++
++		if ((ar->scan.state == ATH11K_SCAN_STARTING ||
++		     ar->scan.state == ATH11K_SCAN_RUNNING)) {
++			left = wait_for_completion_timeout(&ar->scan.completed,
++							   ATH11K_SCAN_TIMEOUT_HZ);
++			if (!left)
++				ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++					   "failed to receive hw scan complete: timed out\n");
++
++			ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++				   "reg hw scan wait left time %d\n", left);
++		}
++
++		ath11k_wmi_send_scan_chan_list_cmd(ar, params);
++		list_del(&params->list);
++		kfree(params);
++	}
++}
++
+ static bool ath11k_reg_is_world_alpha(char *alpha)
+ {
+ 	if (alpha[0] == '0' && alpha[1] == '0')
+@@ -977,6 +1017,7 @@ void ath11k_regd_update_work(struct work_struct *work)
+ void ath11k_reg_init(struct ath11k *ar)
+ {
+ 	ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
++	ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
+ 	ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
+index 263ea90619483e..72b48359401576 100644
+--- a/drivers/net/wireless/ath/ath11k/reg.h
++++ b/drivers/net/wireless/ath/ath11k/reg.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #ifndef ATH11K_REG_H
+@@ -33,6 +33,7 @@ void ath11k_reg_init(struct ath11k *ar);
+ void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
+ void ath11k_reg_free(struct ath11k_base *ab);
+ void ath11k_regd_update_work(struct work_struct *work);
++void ath11k_regd_update_chan_list_work(struct work_struct *work);
+ struct ieee80211_regdomain *
+ ath11k_reg_build_regd(struct ath11k_base *ab,
+ 		      struct cur_regulatory_info *reg_info, bool intersect,
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
+index 8982b909c821e6..30b4b0c1768269 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.h
++++ b/drivers/net/wireless/ath/ath11k/wmi.h
+@@ -3817,6 +3817,7 @@ struct wmi_stop_scan_cmd {
+ };
+ 
+ struct scan_chan_list_params {
++	struct list_head list;
+ 	u32 pdev_id;
+ 	u16 nallchans;
+ 	struct channel_param ch_param[];
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+index 1e8495f50c16ae..6531cff58ae9f2 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+@@ -392,10 +392,8 @@ void brcmf_btcoex_detach(struct brcmf_cfg80211_info *cfg)
+ 	if (!cfg->btcoex)
+ 		return;
+ 
+-	if (cfg->btcoex->timer_on) {
+-		cfg->btcoex->timer_on = false;
+-		timer_shutdown_sync(&cfg->btcoex->timer);
+-	}
++	timer_shutdown_sync(&cfg->btcoex->timer);
++	cfg->btcoex->timer_on = false;
+ 
+ 	cancel_work_sync(&cfg->btcoex->work);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+index 86d6286a153785..e5fbb5fcc4abc9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+@@ -741,6 +741,12 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ 		goto out;
+ 	}
+ 
++	if (!(data->functions[DSM_FUNC_QUERY] & BIT(func))) {
++		IWL_DEBUG_RADIO(fwrt, "DSM func %d not in 0x%x\n",
++				func, data->functions[DSM_FUNC_QUERY]);
++		goto out;
++	}
++
+ 	*value = data->functions[func];
+ 	ret = 0;
+ out:
+diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
+index afe9bcd3ad46d7..37bb788f83e36e 100644
+--- a/drivers/net/wireless/marvell/libertas/cfg.c
++++ b/drivers/net/wireless/marvell/libertas/cfg.c
+@@ -1150,10 +1150,13 @@ static int lbs_associate(struct lbs_private *priv,
+ 	/* add SSID TLV */
+ 	rcu_read_lock();
+ 	ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+-	if (ssid_eid)
+-		pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]);
+-	else
++	if (ssid_eid) {
++		u32 ssid_len = min(ssid_eid[1], IEEE80211_MAX_SSID_LEN);
++
++		pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_len);
++	} else {
+ 		lbs_deb_assoc("no SSID\n");
++	}
+ 	rcu_read_unlock();
+ 
+ 	/* add DS param TLV */
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index fca3eea7ee8421..59bea82eab2947 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -4680,8 +4680,9 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
+ 	 * additional active scan request for hidden SSIDs on passive channels.
+ 	 */
+ 	adapter->num_in_chan_stats = 2 * (n_channels_bg + n_channels_a);
+-	adapter->chan_stats = vmalloc(array_size(sizeof(*adapter->chan_stats),
+-						 adapter->num_in_chan_stats));
++	adapter->chan_stats = kcalloc(adapter->num_in_chan_stats,
++				      sizeof(*adapter->chan_stats),
++				      GFP_KERNEL);
+ 
+ 	if (!adapter->chan_stats)
+ 		return -ENOMEM;
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
+index 855019fe548582..2a9eae68d9ba18 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.c
++++ b/drivers/net/wireless/marvell/mwifiex/main.c
+@@ -667,7 +667,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
+ 	goto done;
+ 
+ err_add_intf:
+-	vfree(adapter->chan_stats);
++	kfree(adapter->chan_stats);
+ err_init_chan_scan:
+ 	wiphy_unregister(adapter->wiphy);
+ 	wiphy_free(adapter->wiphy);
+@@ -1515,7 +1515,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
+ 	wiphy_free(adapter->wiphy);
+ 	adapter->wiphy = NULL;
+ 
+-	vfree(adapter->chan_stats);
++	kfree(adapter->chan_stats);
+ 	mwifiex_free_cmd_buffers(adapter);
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
+index 0ca83f1a3e3ea2..5373f8c419b045 100644
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
+@@ -1586,6 +1586,10 @@ void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
+ 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
+ 	spin_unlock(&wcid->tx_pending.lock);
+ 
++	spin_lock(&wcid->tx_offchannel.lock);
++	skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
++	spin_unlock(&wcid->tx_offchannel.lock);
++
+ 	spin_unlock_bh(&phy->tx_lock);
+ 
+ 	while ((skb = __skb_dequeue(&list)) != NULL) {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+index a095fb31e391a1..f1bd0c174acf46 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+@@ -1459,7 +1459,7 @@ void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ 	sta = wcid_to_sta(wcid);
+ 
+ 	if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+-		mt76_connac2_tx_check_aggr(sta, txwi);
++		mt7925_tx_check_aggr(sta, e->skb, wcid);
+ 
+ 	skb_pull(e->skb, headroom);
+ 	mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+index a635b223dab18e..59d4357819edad 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+@@ -1187,6 +1187,9 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 		struct mt792x_bss_conf *mconf;
+ 		struct mt792x_link_sta *mlink;
+ 
++		if (vif->type == NL80211_IFTYPE_AP)
++			break;
++
+ 		link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ 		if (!link_sta)
+ 			continue;
+@@ -2005,8 +2008,10 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 					     GFP_KERNEL);
+ 			mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink),
+ 					     GFP_KERNEL);
+-			if (!mconf || !mlink)
++			if (!mconf || !mlink) {
++				mt792x_mutex_release(dev);
+ 				return -ENOMEM;
++			}
+ 		}
+ 
+ 		mconfs[link_id] = mconf;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index 265958f7b78711..5f5544b6214cd1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -1834,8 +1834,8 @@ mt7996_mcu_get_mmps_mode(enum ieee80211_smps_mode smps)
+ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
+ 				   void *data, u16 version)
+ {
++	struct uni_header hdr = {};
+ 	struct ra_fixed_rate *req;
+-	struct uni_header hdr;
+ 	struct sk_buff *skb;
+ 	struct tlv *tlv;
+ 	int len;
+@@ -3115,7 +3115,7 @@ int mt7996_mcu_set_hdr_trans(struct mt7996_dev *dev, bool hdr_trans)
+ {
+ 	struct {
+ 		u8 __rsv[4];
+-	} __packed hdr;
++	} __packed hdr = {};
+ 	struct hdr_trans_blacklist *req_blacklist;
+ 	struct hdr_trans_en *req_en;
+ 	struct sk_buff *skb;
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index 065a1e4537457a..634b6dacd1e0d2 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -334,6 +334,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
+ 	struct mt76_wcid *wcid, struct sk_buff *skb)
+ {
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
++	struct ieee80211_hdr *hdr = (void *)skb->data;
+ 	struct sk_buff_head *head;
+ 
+ 	if (mt76_testmode_enabled(phy)) {
+@@ -351,7 +352,8 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
+ 	info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
+ 
+ 	if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
+-	    (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
++	    ((info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK) &&
++	     ieee80211_is_probe_req(hdr->frame_control)))
+ 		head = &wcid->tx_offchannel;
+ 	else
+ 		head = &wcid->tx_pending;
+@@ -643,6 +645,7 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
+ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
+ {
+ 	LIST_HEAD(tx_list);
++	int ret = 0;
+ 
+ 	if (list_empty(&phy->tx_list))
+ 		return;
+@@ -654,13 +657,13 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
+ 	list_splice_init(&phy->tx_list, &tx_list);
+ 	while (!list_empty(&tx_list)) {
+ 		struct mt76_wcid *wcid;
+-		int ret;
+ 
+ 		wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
+ 		list_del_init(&wcid->tx_list);
+ 
+ 		spin_unlock(&phy->tx_lock);
+-		ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
++		if (ret >= 0)
++			ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
+ 		if (ret >= 0 && !phy->offchannel)
+ 			ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
+ 		spin_lock(&phy->tx_lock);
+@@ -669,9 +672,6 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
+ 		    !skb_queue_empty(&wcid->tx_offchannel) &&
+ 		    list_empty(&wcid->tx_list))
+ 			list_add_tail(&wcid->tx_list, &phy->tx_list);
+-
+-		if (ret < 0)
+-			break;
+ 	}
+ 	spin_unlock(&phy->tx_lock);
+ 
+diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
+index c259da8161e4d5..2bce867dd4acf6 100644
+--- a/drivers/net/wireless/st/cw1200/sta.c
++++ b/drivers/net/wireless/st/cw1200/sta.c
+@@ -1290,7 +1290,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
+ 		rcu_read_lock();
+ 		ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ 		if (ssidie) {
+-			join.ssid_len = ssidie[1];
++			join.ssid_len = min(ssidie[1], IEEE80211_MAX_SSID_LEN);
+ 			memcpy(join.ssid, &ssidie[2], join.ssid_len);
+ 		}
+ 		rcu_read_unlock();
+diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
+index 2ec20886d176c5..dfb917c117fa75 100644
+--- a/drivers/of/of_numa.c
++++ b/drivers/of/of_numa.c
+@@ -62,8 +62,11 @@ static int __init of_numa_parse_memory_nodes(void)
+ 			r = -EINVAL;
+ 		}
+ 
+-		for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++)
++		for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++) {
+ 			r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1);
++			if (!r)
++				node_set(nid, numa_nodes_parsed);
++		}
+ 
+ 		if (!i || r) {
+ 			of_node_put(np);
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index 80137c7afe0d9b..5b639c942f17a9 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -215,6 +215,8 @@ static int __init omap_cf_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res)
++		return -EINVAL;
+ 
+ 	cf = kzalloc(sizeof *cf, GFP_KERNEL);
+ 	if (!cf)
+diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c
+index b04b16496b0c4b..2677b577c1f858 100644
+--- a/drivers/pcmcia/rsrc_iodyn.c
++++ b/drivers/pcmcia/rsrc_iodyn.c
+@@ -62,6 +62,9 @@ static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s,
+ 	unsigned long min = base;
+ 	int ret;
+ 
++	if (!res)
++		return NULL;
++
+ 	data.mask = align - 1;
+ 	data.offset = base & data.mask;
+ 
+diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
+index bf9d070a44966d..da494fe451baf0 100644
+--- a/drivers/pcmcia/rsrc_nonstatic.c
++++ b/drivers/pcmcia/rsrc_nonstatic.c
+@@ -375,7 +375,9 @@ static int do_validate_mem(struct pcmcia_socket *s,
+ 
+ 	if (validate && !s->fake_cis) {
+ 		/* move it to the validated data set */
+-		add_interval(&s_data->mem_db_valid, base, size);
++		ret = add_interval(&s_data->mem_db_valid, base, size);
++		if (ret)
++			return ret;
+ 		sub_interval(&s_data->mem_db, base, size);
+ 	}
+ 
+diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+index 04686ae1e976bd..6f5437d210a617 100644
+--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+@@ -242,6 +242,20 @@ static const struct dmi_system_id fwbug_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"),
+ 		}
+ 	},
++	{
++		.ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10",
++		.driver_data = &quirk_spurious_8042,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
++		}
++	},
++	{
++		.ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10",
++		.driver_data = &quirk_spurious_8042,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index 90ad0045fec5ff..8f06adf828361a 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -654,8 +654,6 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
+ 		if (atkbd_reports_vol_keys)
+ 			*code = ASUS_WMI_KEY_IGNORE;
+ 		break;
+-	case 0x5D: /* Wireless console Toggle */
+-	case 0x5E: /* Wireless console Enable */
+ 	case 0x5F: /* Wireless console Disable */
+ 		if (quirks->ignore_key_wlan)
+ 			*code = ASUS_WMI_KEY_IGNORE;
+diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
+index 12fb0943b5dc37..7e0b86535d02c1 100644
+--- a/drivers/platform/x86/intel/tpmi_power_domains.c
++++ b/drivers/platform/x86/intel/tpmi_power_domains.c
+@@ -167,7 +167,7 @@ static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
+ 
+ 	info->punit_thread_id = FIELD_GET(LP_ID_MASK, data);
+ 	info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data);
+-	info->pkg_id = topology_physical_package_id(cpu);
++	info->pkg_id = topology_logical_package_id(cpu);
+ 	info->linux_cpu = cpu;
+ 
+ 	return 0;
+diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
+index e6c9112a886275..9e44406921b73b 100644
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+ 	struct lpfc_nvmet_tgtport *tgtp;
+ 	struct lpfc_async_xchg_ctx *ctxp =
+ 		container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
+-	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
++	struct rqb_dmabuf *nvmebuf;
+ 	struct lpfc_hba *phba = ctxp->phba;
+ 	unsigned long iflag;
+ 
+@@ -1251,13 +1251,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+ 	lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+ 			 ctxp->oxid, ctxp->size, raw_smp_processor_id());
+ 
++	spin_lock_irqsave(&ctxp->ctxlock, iflag);
++	nvmebuf = ctxp->rqb_buffer;
+ 	if (!nvmebuf) {
++		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ 				"6425 Defer rcv: no buffer oxid x%x: "
+ 				"flg %x ste %x\n",
+ 				ctxp->oxid, ctxp->flag, ctxp->state);
+ 		return;
+ 	}
++	ctxp->rqb_buffer = NULL;
++	spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ 
+ 	tgtp = phba->targetport->private;
+ 	if (tgtp)
+@@ -1265,9 +1270,6 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+ 
+ 	/* Free the nvmebuf since a new buffer already replaced it */
+ 	nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+-	spin_lock_irqsave(&ctxp->ctxlock, iflag);
+-	ctxp->rqb_buffer = NULL;
+-	spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index e1b06f803a94b1..ee1d5dec3bc605 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -177,9 +177,8 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
+ 
+ 		lim = queue_limits_start_update(sdkp->disk->queue);
+ 		sd_set_flush_flag(sdkp, &lim);
+-		blk_mq_freeze_queue(sdkp->disk->queue);
+-		ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
+-		blk_mq_unfreeze_queue(sdkp->disk->queue);
++		ret = queue_limits_commit_update_frozen(sdkp->disk->queue,
++				&lim);
+ 		if (ret)
+ 			return ret;
+ 		return count;
+@@ -483,9 +482,7 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	lim = queue_limits_start_update(sdkp->disk->queue);
+ 	sd_config_discard(sdkp, &lim, mode);
+-	blk_mq_freeze_queue(sdkp->disk->queue);
+-	err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+-	blk_mq_unfreeze_queue(sdkp->disk->queue);
++	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
+ 	if (err)
+ 		return err;
+ 	return count;
+@@ -594,9 +591,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	lim = queue_limits_start_update(sdkp->disk->queue);
+ 	sd_config_write_same(sdkp, &lim);
+-	blk_mq_freeze_queue(sdkp->disk->queue);
+-	err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+-	blk_mq_unfreeze_queue(sdkp->disk->queue);
++	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
+ 	if (err)
+ 		return err;
+ 	return count;
+@@ -3803,9 +3798,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	sd_config_write_same(sdkp, &lim);
+ 	kfree(buffer);
+ 
+-	blk_mq_freeze_queue(sdkp->disk->queue);
+-	err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+-	blk_mq_unfreeze_queue(sdkp->disk->queue);
++	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 198bec87bb8e7c..add13e30689838 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -475,13 +475,21 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
+ 
+ static int sr_revalidate_disk(struct scsi_cd *cd)
+ {
++	struct request_queue *q = cd->device->request_queue;
+ 	struct scsi_sense_hdr sshdr;
++	struct queue_limits lim;
++	int sector_size;
+ 
+ 	/* if the unit is not ready, nothing more to do */
+ 	if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+ 		return 0;
+ 	sr_cd_check(&cd->cdi);
+-	return get_sectorsize(cd);
++	sector_size = get_sectorsize(cd);
++
++	lim = queue_limits_start_update(q);
++	lim.logical_block_size = sector_size;
++	lim.features |= BLK_FEAT_ROTATIONAL;
++	return queue_limits_commit_update_frozen(q, &lim);
+ }
+ 
+ static int sr_block_open(struct gendisk *disk, blk_mode_t mode)
+@@ -721,10 +729,8 @@ static int sr_probe(struct device *dev)
+ 
+ static int get_sectorsize(struct scsi_cd *cd)
+ {
+-	struct request_queue *q = cd->device->request_queue;
+ 	static const u8 cmd[10] = { READ_CAPACITY };
+ 	unsigned char buffer[8] = { };
+-	struct queue_limits lim;
+ 	int err;
+ 	int sector_size;
+ 	struct scsi_failure failure_defs[] = {
+@@ -795,12 +801,7 @@ static int get_sectorsize(struct scsi_cd *cd)
+ 		set_capacity(cd->disk, cd->capacity);
+ 	}
+ 
+-	lim = queue_limits_start_update(q);
+-	lim.logical_block_size = sector_size;
+-	blk_mq_freeze_queue(q);
+-	err = queue_limits_commit_update(q, &lim);
+-	blk_mq_unfreeze_queue(q);
+-	return err;
++	return sector_size;
+ }
+ 
+ static int get_capabilities(struct scsi_cd *cd)
+diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
+index 64e0facc392e5d..29124aa6d03b5e 100644
+--- a/drivers/soc/qcom/mdt_loader.c
++++ b/drivers/soc/qcom/mdt_loader.c
+@@ -39,12 +39,14 @@ static bool mdt_header_valid(const struct firmware *fw)
+ 	if (phend > fw->size)
+ 		return false;
+ 
+-	if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
+-		return false;
++	if (ehdr->e_shentsize || ehdr->e_shnum) {
++		if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
++			return false;
+ 
+-	shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
+-	if (shend > fw->size)
+-		return false;
++		shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
++		if (shend > fw->size)
++			return false;
++	}
+ 
+ 	return true;
+ }
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index f8cacb9c7408f3..5e96913fd94660 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -3,8 +3,9 @@
+ // Freescale i.MX7ULP LPSPI driver
+ //
+ // Copyright 2016 Freescale Semiconductor, Inc.
+-// Copyright 2018 NXP Semiconductors
++// Copyright 2018, 2023, 2025 NXP
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/completion.h>
+ #include <linux/delay.h>
+@@ -70,7 +71,7 @@
+ #define DER_TDDE	BIT(0)
+ #define CFGR1_PCSCFG	BIT(27)
+ #define CFGR1_PINCFG	(BIT(24)|BIT(25))
+-#define CFGR1_PCSPOL	BIT(8)
++#define CFGR1_PCSPOL_MASK	GENMASK(11, 8)
+ #define CFGR1_NOSTALL	BIT(3)
+ #define CFGR1_HOST	BIT(0)
+ #define FSR_TXCOUNT	(0xFF)
+@@ -82,6 +83,8 @@
+ #define TCR_RXMSK	BIT(19)
+ #define TCR_TXMSK	BIT(18)
+ 
++#define SR_CLEAR_MASK	GENMASK(13, 8)
++
+ struct fsl_lpspi_devtype_data {
+ 	u8 prescale_max;
+ };
+@@ -420,7 +423,9 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
+ 	else
+ 		temp = CFGR1_PINCFG;
+ 	if (fsl_lpspi->config.mode & SPI_CS_HIGH)
+-		temp |= CFGR1_PCSPOL;
++		temp |= FIELD_PREP(CFGR1_PCSPOL_MASK,
++				   BIT(fsl_lpspi->config.chip_select));
++
+ 	writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
+ 
+ 	temp = readl(fsl_lpspi->base + IMX7ULP_CR);
+@@ -529,14 +534,13 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
+ 		fsl_lpspi_intctrl(fsl_lpspi, 0);
+ 	}
+ 
+-	/* W1C for all flags in SR */
+-	temp = 0x3F << 8;
+-	writel(temp, fsl_lpspi->base + IMX7ULP_SR);
+-
+ 	/* Clear FIFO and disable module */
+ 	temp = CR_RRF | CR_RTF;
+ 	writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+ 
++	/* W1C for all flags in SR */
++	writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR);
++
+ 	return 0;
+ }
+ 
+@@ -727,12 +731,10 @@ static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
+ 	fsl_lpspi_write_tx_fifo(fsl_lpspi);
+ 
+ 	ret = fsl_lpspi_wait_for_completion(controller);
+-	if (ret)
+-		return ret;
+ 
+ 	fsl_lpspi_reset(fsl_lpspi);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int fsl_lpspi_transfer_one(struct spi_controller *controller,
+@@ -780,7 +782,7 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
+ 	if (temp_SR & SR_MBF ||
+ 	    readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
+ 		writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
+-		fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
++		fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE | (temp_IER & IER_TDIE));
+ 		return IRQ_HANDLED;
+ 	}
+ 
+diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
+index f9ef7d94cebd7a..a963eed70c1d4c 100644
+--- a/drivers/tee/optee/ffa_abi.c
++++ b/drivers/tee/optee/ffa_abi.c
+@@ -657,7 +657,7 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
+  * with a matching configuration.
+  */
+ 
+-static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
++static bool optee_ffa_api_is_compatible(struct ffa_device *ffa_dev,
+ 					const struct ffa_ops *ops)
+ {
+ 	const struct ffa_msg_ops *msg_ops = ops->msg_ops;
+@@ -908,7 +908,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
+ 	ffa_ops = ffa_dev->ops;
+ 	notif_ops = ffa_ops->notifier_ops;
+ 
+-	if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
++	if (!optee_ffa_api_is_compatible(ffa_dev, ffa_ops))
+ 		return -EINVAL;
+ 
+ 	if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
+diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
+index daf6e5cfd59ae2..2a7d253d9c554c 100644
+--- a/drivers/tee/tee_shm.c
++++ b/drivers/tee/tee_shm.c
+@@ -230,7 +230,7 @@ int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ 	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ 	if (!pages) {
+ 		rc = -ENOMEM;
+-		goto err;
++		goto err_pages;
+ 	}
+ 
+ 	for (i = 0; i < nr_pages; i++)
+@@ -243,11 +243,13 @@ int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ 		rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ 				  (unsigned long)shm->kaddr);
+ 		if (rc)
+-			goto err;
++			goto err_kfree;
+ 	}
+ 
+ 	return 0;
+-err:
++err_kfree:
++	kfree(pages);
++err_pages:
+ 	free_pages_exact(shm->kaddr, shm->size);
+ 	shm->kaddr = NULL;
+ 	return rc;
+@@ -560,9 +562,13 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
+  */
+ void tee_shm_put(struct tee_shm *shm)
+ {
+-	struct tee_device *teedev = shm->ctx->teedev;
++	struct tee_device *teedev;
+ 	bool do_release = false;
+ 
++	if (!shm || !shm->ctx || !shm->ctx->teedev)
++		return;
++
++	teedev = shm->ctx->teedev;
+ 	mutex_lock(&teedev->mutex);
+ 	if (refcount_dec_and_test(&shm->refcount)) {
+ 		/*
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index 017191b9f8645f..e6fe6cc35821d3 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -66,10 +66,14 @@
+ #define LVTS_TSSEL_CONF				0x13121110
+ #define LVTS_CALSCALE_CONF			0x300
+ 
+-#define LVTS_MONINT_OFFSET_SENSOR0		0xC
+-#define LVTS_MONINT_OFFSET_SENSOR1		0x180
+-#define LVTS_MONINT_OFFSET_SENSOR2		0x3000
+-#define LVTS_MONINT_OFFSET_SENSOR3		0x3000000
++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR0		BIT(3)
++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR1		BIT(8)
++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR2		BIT(13)
++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR3		BIT(25)
++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR0		BIT(2)
++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR1		BIT(7)
++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR2		BIT(12)
++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR3		BIT(24)
+ 
+ #define LVTS_INT_SENSOR0			0x0009001F
+ #define LVTS_INT_SENSOR1			0x001203E0
+@@ -329,23 +333,41 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
+ 
+ static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl)
+ {
+-	u32 masks[] = {
+-		LVTS_MONINT_OFFSET_SENSOR0,
+-		LVTS_MONINT_OFFSET_SENSOR1,
+-		LVTS_MONINT_OFFSET_SENSOR2,
+-		LVTS_MONINT_OFFSET_SENSOR3,
++	static const u32 high_offset_inten_masks[] = {
++		LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR0,
++		LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR1,
++		LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR2,
++		LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR3,
++	};
++	static const u32 low_offset_inten_masks[] = {
++		LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR0,
++		LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR1,
++		LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR2,
++		LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR3,
+ 	};
+ 	u32 value = 0;
+ 	int i;
+ 
+ 	value = readl(LVTS_MONINT(lvts_ctrl->base));
+ 
+-	for (i = 0; i < ARRAY_SIZE(masks); i++) {
++	for (i = 0; i < ARRAY_SIZE(high_offset_inten_masks); i++) {
+ 		if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
+-		    && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
+-			value |= masks[i];
+-		else
+-			value &= ~masks[i];
++		    && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh) {
++			/*
++			 * The minimum threshold needs to be configured in the
++			 * OFFSETL register to get working interrupts, but we
++			 * don't actually want to generate interrupts when
++			 * crossing it.
++			 */
++			if (lvts_ctrl->low_thresh == -INT_MAX) {
++				value &= ~low_offset_inten_masks[i];
++				value |= high_offset_inten_masks[i];
++			} else {
++				value |= low_offset_inten_masks[i] | high_offset_inten_masks[i];
++			}
++		} else {
++			value &= ~(low_offset_inten_masks[i] | high_offset_inten_masks[i]);
++		}
+ 	}
+ 
+ 	writel(value, LVTS_MONINT(lvts_ctrl->base));
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index db53a3263fbd05..89f582612fb997 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -247,7 +247,7 @@ struct btrfs_inode {
+ 		u64 new_delalloc_bytes;
+ 		/*
+ 		 * The offset of the last dir index key that was logged.
+-		 * This is used only for directories.
++		 * This is used only for directories. Protected by 'log_mutex'.
+ 		 */
+ 		u64 last_dir_index_offset;
+ 	};
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f84e3f9fad84aa..98d087a14be5eb 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7767,6 +7767,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ 	ei->last_sub_trans = 0;
+ 	ei->logged_trans = 0;
+ 	ei->delalloc_bytes = 0;
++	/* new_delalloc_bytes and last_dir_index_offset are in a union. */
+ 	ei->new_delalloc_bytes = 0;
+ 	ei->defrag_bytes = 0;
+ 	ei->disk_i_size = 0;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 31adea5b0b96a3..f917fdae7e672b 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3322,6 +3322,31 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
+ 	return 0;
+ }
+ 
++static bool mark_inode_as_not_logged(const struct btrfs_trans_handle *trans,
++				     struct btrfs_inode *inode)
++{
++	bool ret = false;
++
++	/*
++	 * Do this only if ->logged_trans is still 0 to prevent races with
++	 * concurrent logging as we may see the inode not logged when
++	 * inode_logged() is called but it gets logged after inode_logged() did
++	 * not find it in the log tree and we end up setting ->logged_trans to a
++	 * value less than trans->transid after the concurrent logging task has
++	 * set it to trans->transid. As a consequence, subsequent rename, unlink
++	 * and link operations may end up not logging new names and removing old
++	 * names from the log.
++	 */
++	spin_lock(&inode->lock);
++	if (inode->logged_trans == 0)
++		inode->logged_trans = trans->transid - 1;
++	else if (inode->logged_trans == trans->transid)
++		ret = true;
++	spin_unlock(&inode->lock);
++
++	return ret;
++}
++
+ /*
+  * Check if an inode was logged in the current transaction. This correctly deals
+  * with the case where the inode was logged but has a logged_trans of 0, which
+@@ -3339,15 +3364,32 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ 	struct btrfs_key key;
+ 	int ret;
+ 
+-	if (inode->logged_trans == trans->transid)
++	/*
++	 * Quick lockless call, since once ->logged_trans is set to the current
++	 * transaction, we never set it to a lower value anywhere else.
++	 */
++	if (data_race(inode->logged_trans) == trans->transid)
+ 		return 1;
+ 
+ 	/*
+-	 * If logged_trans is not 0, then we know the inode logged was not logged
+-	 * in this transaction, so we can return false right away.
++	 * If logged_trans is not 0 and not trans->transid, then we know the
++	 * inode was not logged in this transaction, so we can return false
++	 * right away. We take the lock to avoid a race caused by load/store
++	 * tearing with a concurrent btrfs_log_inode() call or a concurrent task
++	 * in this function further below - an update to trans->transid can be
++	 * teared into two 32 bits updates for example, in which case we could
++	 * see a positive value that is not trans->transid and assume the inode
++	 * was not logged when it was.
+ 	 */
+-	if (inode->logged_trans > 0)
++	spin_lock(&inode->lock);
++	if (inode->logged_trans == trans->transid) {
++		spin_unlock(&inode->lock);
++		return 1;
++	} else if (inode->logged_trans > 0) {
++		spin_unlock(&inode->lock);
+ 		return 0;
++	}
++	spin_unlock(&inode->lock);
+ 
+ 	/*
+ 	 * If no log tree was created for this root in this transaction, then
+@@ -3356,10 +3398,8 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ 	 * transaction's ID, to avoid the search below in a future call in case
+ 	 * a log tree gets created after this.
+ 	 */
+-	if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) {
+-		inode->logged_trans = trans->transid - 1;
+-		return 0;
+-	}
++	if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state))
++		return mark_inode_as_not_logged(trans, inode);
+ 
+ 	/*
+ 	 * We have a log tree and the inode's logged_trans is 0. We can't tell
+@@ -3413,8 +3453,7 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ 		 * Set logged_trans to a value greater than 0 and less then the
+ 		 * current transaction to avoid doing the search in future calls.
+ 		 */
+-		inode->logged_trans = trans->transid - 1;
+-		return 0;
++		return mark_inode_as_not_logged(trans, inode);
+ 	}
+ 
+ 	/*
+@@ -3422,20 +3461,9 @@ static int inode_logged(const struct btrfs_trans_handle *trans,
+ 	 * the current transacion's ID, to avoid future tree searches as long as
+ 	 * the inode is not evicted again.
+ 	 */
++	spin_lock(&inode->lock);
+ 	inode->logged_trans = trans->transid;
+-
+-	/*
+-	 * If it's a directory, then we must set last_dir_index_offset to the
+-	 * maximum possible value, so that the next attempt to log the inode does
+-	 * not skip checking if dir index keys found in modified subvolume tree
+-	 * leaves have been logged before, otherwise it would result in attempts
+-	 * to insert duplicate dir index keys in the log tree. This must be done
+-	 * because last_dir_index_offset is an in-memory only field, not persisted
+-	 * in the inode item or any other on-disk structure, so its value is lost
+-	 * once the inode is evicted.
+-	 */
+-	if (S_ISDIR(inode->vfs_inode.i_mode))
+-		inode->last_dir_index_offset = (u64)-1;
++	spin_unlock(&inode->lock);
+ 
+ 	return 1;
+ }
+@@ -4028,7 +4056,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+ 
+ /*
+  * If the inode was logged before and it was evicted, then its
+- * last_dir_index_offset is (u64)-1, so we don't the value of the last index
++ * last_dir_index_offset is 0, so we don't know the value of the last index
+  * key offset. If that's the case, search for it and update the inode. This
+  * is to avoid lookups in the log tree every time we try to insert a dir index
+  * key from a leaf changed in the current transaction, and to allow us to always
+@@ -4044,7 +4072,7 @@ static int update_last_dir_index_offset(struct btrfs_inode *inode,
+ 
+ 	lockdep_assert_held(&inode->log_mutex);
+ 
+-	if (inode->last_dir_index_offset != (u64)-1)
++	if (inode->last_dir_index_offset != 0)
+ 		return 0;
+ 
+ 	if (!ctx->logged_before) {
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 2fdb2987c83ac2..8e8edfe0c6190e 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2186,6 +2186,40 @@ static void wait_eb_writebacks(struct btrfs_block_group *block_group)
+ 	rcu_read_unlock();
+ }
+ 
++static int call_zone_finish(struct btrfs_block_group *block_group,
++			    struct btrfs_io_stripe *stripe)
++{
++	struct btrfs_device *device = stripe->dev;
++	const u64 physical = stripe->physical;
++	struct btrfs_zoned_device_info *zinfo = device->zone_info;
++	int ret;
++
++	if (!device->bdev)
++		return 0;
++
++	if (zinfo->max_active_zones == 0)
++		return 0;
++
++	if (btrfs_dev_is_sequential(device, physical)) {
++		unsigned int nofs_flags;
++
++		nofs_flags = memalloc_nofs_save();
++		ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
++				       physical >> SECTOR_SHIFT,
++				       zinfo->zone_size >> SECTOR_SHIFT);
++		memalloc_nofs_restore(nofs_flags);
++
++		if (ret)
++			return ret;
++	}
++
++	if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
++		zinfo->reserved_active_zones++;
++	btrfs_dev_clear_active_zone(device, physical);
++
++	return 0;
++}
++
+ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
+ {
+ 	struct btrfs_fs_info *fs_info = block_group->fs_info;
+@@ -2270,31 +2304,12 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ 	down_read(&dev_replace->rwsem);
+ 	map = block_group->physical_map;
+ 	for (i = 0; i < map->num_stripes; i++) {
+-		struct btrfs_device *device = map->stripes[i].dev;
+-		const u64 physical = map->stripes[i].physical;
+-		struct btrfs_zoned_device_info *zinfo = device->zone_info;
+-		unsigned int nofs_flags;
+-
+-		if (!device->bdev)
+-			continue;
+-
+-		if (zinfo->max_active_zones == 0)
+-			continue;
+-
+-		nofs_flags = memalloc_nofs_save();
+-		ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
+-				       physical >> SECTOR_SHIFT,
+-				       zinfo->zone_size >> SECTOR_SHIFT);
+-		memalloc_nofs_restore(nofs_flags);
+ 
++		ret = call_zone_finish(block_group, &map->stripes[i]);
+ 		if (ret) {
+ 			up_read(&dev_replace->rwsem);
+ 			return ret;
+ 		}
+-
+-		if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+-			zinfo->reserved_active_zones++;
+-		btrfs_dev_clear_active_zone(device, physical);
+ 	}
+ 	up_read(&dev_replace->rwsem);
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index a95525bfb99cf2..d8120b88fa00e9 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1823,7 +1823,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
+  */
+ enum {
+ 	EXT4_MF_MNTDIR_SAMPLED,
+-	EXT4_MF_FC_INELIGIBLE	/* Fast commit ineligible */
++	EXT4_MF_FC_INELIGIBLE,	/* Fast commit ineligible */
++	EXT4_MF_JOURNAL_DESTROY	/* Journal is in process of destroying */
+ };
+ 
+ static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
+diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
+index 0c77697d5e90d0..ada46189b08603 100644
+--- a/fs/ext4/ext4_jbd2.h
++++ b/fs/ext4/ext4_jbd2.h
+@@ -513,4 +513,33 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
+ 	return 1;
+ }
+ 
++/*
++ * Pass journal explicitly as it may not be cached in the sbi->s_journal in some
++ * cases
++ */
++static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal)
++{
++	int err = 0;
++
++	/*
++	 * At this point only two things can be operating on the journal.
++	 * JBD2 thread performing transaction commit and s_sb_upd_work
++	 * issuing sb update through the journal. Once we set
++	 * EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not
++	 * queue s_sb_upd_work and ext4_force_commit() makes sure any
++	 * ext4_handle_error() calls from the running transaction commit are
++	 * finished. Hence no new s_sb_upd_work can be queued after we
++	 * flush it here.
++	 */
++	ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY);
++
++	ext4_force_commit(sbi->s_sb);
++	flush_work(&sbi->s_sb_upd_work);
++
++	err = jbd2_journal_destroy(journal);
++	sbi->s_journal = NULL;
++
++	return err;
++}
++
+ #endif	/* _EXT4_JBD2_H */
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 722ac723f49b6e..58d125ad237143 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -719,9 +719,13 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+ 		 * In case the fs should keep running, we need to writeout
+ 		 * superblock through the journal. Due to lock ordering
+ 		 * constraints, it may not be safe to do it right here so we
+-		 * defer superblock flushing to a workqueue.
++		 * defer superblock flushing to a workqueue. We just need to be
++		 * careful when the journal is already shutting down. If we get
++		 * here in that case, just update the sb directly as the last
++		 * transaction won't commit anyway.
+ 		 */
+-		if (continue_fs && journal)
++		if (continue_fs && journal &&
++		    !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
+ 			schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
+ 		else
+ 			ext4_commit_super(sb);
+@@ -1306,18 +1310,17 @@ static void ext4_put_super(struct super_block *sb)
+ 	ext4_unregister_li_request(sb);
+ 	ext4_quotas_off(sb, EXT4_MAXQUOTAS);
+ 
+-	flush_work(&sbi->s_sb_upd_work);
+ 	destroy_workqueue(sbi->rsv_conversion_wq);
+ 	ext4_release_orphan_info(sb);
+ 
+ 	if (sbi->s_journal) {
+ 		aborted = is_journal_aborted(sbi->s_journal);
+-		err = jbd2_journal_destroy(sbi->s_journal);
+-		sbi->s_journal = NULL;
++		err = ext4_journal_destroy(sbi, sbi->s_journal);
+ 		if ((err < 0) && !aborted) {
+ 			ext4_abort(sb, -err, "Couldn't clean up the journal");
+ 		}
+-	}
++	} else
++		flush_work(&sbi->s_sb_upd_work);
+ 
+ 	ext4_es_unregister_shrinker(sbi);
+ 	timer_shutdown_sync(&sbi->s_err_report);
+@@ -4955,10 +4958,7 @@ static int ext4_load_and_init_journal(struct super_block *sb,
+ 	return 0;
+ 
+ out:
+-	/* flush s_sb_upd_work before destroying the journal. */
+-	flush_work(&sbi->s_sb_upd_work);
+-	jbd2_journal_destroy(sbi->s_journal);
+-	sbi->s_journal = NULL;
++	ext4_journal_destroy(sbi, sbi->s_journal);
+ 	return -EINVAL;
+ }
+ 
+@@ -5647,10 +5647,7 @@ failed_mount8: __maybe_unused
+ 	sbi->s_ea_block_cache = NULL;
+ 
+ 	if (sbi->s_journal) {
+-		/* flush s_sb_upd_work before journal destroy. */
+-		flush_work(&sbi->s_sb_upd_work);
+-		jbd2_journal_destroy(sbi->s_journal);
+-		sbi->s_journal = NULL;
++		ext4_journal_destroy(sbi, sbi->s_journal);
+ 	}
+ failed_mount3a:
+ 	ext4_es_unregister_shrinker(sbi);
+@@ -5958,7 +5955,7 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
+ 	return journal;
+ 
+ out_journal:
+-	jbd2_journal_destroy(journal);
++	ext4_journal_destroy(EXT4_SB(sb), journal);
+ out_bdev:
+ 	bdev_fput(bdev_file);
+ 	return ERR_PTR(errno);
+@@ -6075,8 +6072,7 @@ static int ext4_load_journal(struct super_block *sb,
+ 	EXT4_SB(sb)->s_journal = journal;
+ 	err = ext4_clear_journal_err(sb, es);
+ 	if (err) {
+-		EXT4_SB(sb)->s_journal = NULL;
+-		jbd2_journal_destroy(journal);
++		ext4_journal_destroy(EXT4_SB(sb), journal);
+ 		return err;
+ 	}
+ 
+@@ -6094,7 +6090,7 @@ static int ext4_load_journal(struct super_block *sb,
+ 	return 0;
+ 
+ err_out:
+-	jbd2_journal_destroy(journal);
++	ext4_journal_destroy(EXT4_SB(sb), journal);
+ 	return err;
+ }
+ 
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 2391b09f4cedec..4ae226778d646c 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2572,10 +2572,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ 			wakeup_bdi = inode_io_list_move_locked(inode, wb,
+ 							       dirty_list);
+ 
+-			spin_unlock(&wb->list_lock);
+-			spin_unlock(&inode->i_lock);
+-			trace_writeback_dirty_inode_enqueue(inode);
+-
+ 			/*
+ 			 * If this is the first dirty inode for this bdi,
+ 			 * we have to wake-up the corresponding bdi thread
+@@ -2585,6 +2581,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ 			if (wakeup_bdi &&
+ 			    (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
+ 				wb_wakeup_delayed(wb);
++
++			spin_unlock(&wb->list_lock);
++			spin_unlock(&inode->i_lock);
++			trace_writeback_dirty_inode_enqueue(inode);
++
+ 			return;
+ 		}
+ 	}
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 962fda4fa2467e..c8519302f58240 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2227,7 +2227,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
+ 	namespace_unlock();
+ }
+ 
+-bool has_locked_children(struct mount *mnt, struct dentry *dentry)
++static bool __has_locked_children(struct mount *mnt, struct dentry *dentry)
+ {
+ 	struct mount *child;
+ 
+@@ -2241,6 +2241,16 @@ bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+ 	return false;
+ }
+ 
++bool has_locked_children(struct mount *mnt, struct dentry *dentry)
++{
++	bool res;
++
++	read_seqlock_excl(&mount_lock);
++	res = __has_locked_children(mnt, dentry);
++	read_sequnlock_excl(&mount_lock);
++	return res;
++}
++
+ /**
+  * clone_private_mount - create a private clone of a path
+  * @path: path to clone
+@@ -2268,7 +2278,7 @@ struct vfsmount *clone_private_mount(const struct path *path)
+ 		return ERR_PTR(-EPERM);
+ 	}
+ 
+-	if (has_locked_children(old_mnt, path->dentry))
++	if (__has_locked_children(old_mnt, path->dentry))
+ 		goto invalid;
+ 
+ 	new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
+@@ -2762,7 +2772,7 @@ static struct mount *__do_loopback(struct path *old_path, int recurse)
+ 	if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
+ 		return mnt;
+ 
+-	if (!recurse && has_locked_children(old, old_path->dentry))
++	if (!recurse && __has_locked_children(old, old_path->dentry))
+ 		return mnt;
+ 
+ 	if (recurse)
+@@ -3152,7 +3162,7 @@ static int do_set_group(struct path *from_path, struct path *to_path)
+ 		goto out;
+ 
+ 	/* From mount should not have locked children in place of To's root */
+-	if (has_locked_children(from, to->mnt.mnt_root))
++	if (__has_locked_children(from, to->mnt.mnt_root))
+ 		goto out;
+ 
+ 	/* Setting sharing groups is only allowed on private mounts */
+diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
+index 2cc5c99fe94167..4a7509389cf38d 100644
+--- a/fs/ocfs2/inode.c
++++ b/fs/ocfs2/inode.c
+@@ -1205,6 +1205,9 @@ static void ocfs2_clear_inode(struct inode *inode)
+ 	 * the journal is flushed before journal shutdown. Thus it is safe to
+ 	 * have inodes get cleaned up after journal shutdown.
+ 	 */
++	if (!osb->journal)
++		return;
++
+ 	jbd2_journal_release_jbd_inode(osb->journal->j_journal,
+ 				       &oi->ip_jinode);
+ }
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index e21d99fa926322..a87a9404e0d0c8 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -362,6 +362,25 @@ static const struct inode_operations proc_dir_inode_operations = {
+ 	.setattr	= proc_notify_change,
+ };
+ 
++static void pde_set_flags(struct proc_dir_entry *pde)
++{
++	const struct proc_ops *proc_ops = pde->proc_ops;
++
++	if (!proc_ops)
++		return;
++
++	if (proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
++		pde->flags |= PROC_ENTRY_PERMANENT;
++	if (proc_ops->proc_read_iter)
++		pde->flags |= PROC_ENTRY_proc_read_iter;
++#ifdef CONFIG_COMPAT
++	if (proc_ops->proc_compat_ioctl)
++		pde->flags |= PROC_ENTRY_proc_compat_ioctl;
++#endif
++	if (proc_ops->proc_lseek)
++		pde->flags |= PROC_ENTRY_proc_lseek;
++}
++
+ /* returns the registered entry, or frees dp and returns NULL on failure */
+ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
+ 		struct proc_dir_entry *dp)
+@@ -369,6 +388,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
+ 	if (proc_alloc_inum(&dp->low_ino))
+ 		goto out_free_entry;
+ 
++	pde_set_flags(dp);
++
+ 	write_lock(&proc_subdir_lock);
+ 	dp->parent = dir;
+ 	if (pde_subdir_insert(dir, dp) == false) {
+@@ -557,20 +578,6 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
+ 	return p;
+ }
+ 
+-static void pde_set_flags(struct proc_dir_entry *pde)
+-{
+-	if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
+-		pde->flags |= PROC_ENTRY_PERMANENT;
+-	if (pde->proc_ops->proc_read_iter)
+-		pde->flags |= PROC_ENTRY_proc_read_iter;
+-#ifdef CONFIG_COMPAT
+-	if (pde->proc_ops->proc_compat_ioctl)
+-		pde->flags |= PROC_ENTRY_proc_compat_ioctl;
+-#endif
+-	if (pde->proc_ops->proc_lseek)
+-		pde->flags |= PROC_ENTRY_proc_lseek;
+-}
+-
+ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
+ 		struct proc_dir_entry *parent,
+ 		const struct proc_ops *proc_ops, void *data)
+@@ -581,7 +588,6 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
+ 	if (!p)
+ 		return NULL;
+ 	p->proc_ops = proc_ops;
+-	pde_set_flags(p);
+ 	return proc_register(parent, p);
+ }
+ EXPORT_SYMBOL(proc_create_data);
+@@ -632,7 +638,6 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
+ 	p->proc_ops = &proc_seq_ops;
+ 	p->seq_ops = ops;
+ 	p->state_size = state_size;
+-	pde_set_flags(p);
+ 	return proc_register(parent, p);
+ }
+ EXPORT_SYMBOL(proc_create_seq_private);
+@@ -663,7 +668,6 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
+ 		return NULL;
+ 	p->proc_ops = &proc_single_ops;
+ 	p->single_show = show;
+-	pde_set_flags(p);
+ 	return proc_register(parent, p);
+ }
+ EXPORT_SYMBOL(proc_create_single_data);
+diff --git a/fs/smb/client/cifs_unicode.c b/fs/smb/client/cifs_unicode.c
+index 4cc6e0896fad37..f8659d36793f17 100644
+--- a/fs/smb/client/cifs_unicode.c
++++ b/fs/smb/client/cifs_unicode.c
+@@ -629,6 +629,9 @@ cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len,
+ 	int len;
+ 	__le16 *dst;
+ 
++	if (!src)
++		return NULL;
++
+ 	len = cifs_local_to_utf16_bytes(src, maxlen, cp);
+ 	len += 2; /* NULL */
+ 	dst = kmalloc(len, GFP_KERNEL);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index a901aed77141f0..cd9c97f6f9484b 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -990,6 +990,8 @@ queue_limits_start_update(struct request_queue *q)
+ 	mutex_lock(&q->limits_lock);
+ 	return q->limits;
+ }
++int queue_limits_commit_update_frozen(struct request_queue *q,
++		struct queue_limits *lim);
+ int queue_limits_commit_update(struct request_queue *q,
+ 		struct queue_limits *lim);
+ int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index 7e029c82ae45f0..26ee360c345fa1 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -77,9 +77,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+ extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+ #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
+ 
+-#define for_each_cgroup_storage_type(stype) \
+-	for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+-
+ struct bpf_cgroup_storage_map;
+ 
+ struct bpf_storage_buffer {
+@@ -518,8 +515,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ 				       kernel_optval) ({ 0; })
+ 
+-#define for_each_cgroup_storage_type(stype) for (; false; )
+-
+ #endif /* CONFIG_CGROUP_BPF */
+ 
+ #endif /* _BPF_CGROUP_H */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 1150a595aa54c2..6db72c66de91d6 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -205,6 +205,20 @@ enum btf_field_type {
+ 	BPF_WORKQUEUE  = (1 << 10),
+ };
+ 
++enum bpf_cgroup_storage_type {
++	BPF_CGROUP_STORAGE_SHARED,
++	BPF_CGROUP_STORAGE_PERCPU,
++	__BPF_CGROUP_STORAGE_MAX
++#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
++};
++
++#ifdef CONFIG_CGROUP_BPF
++# define for_each_cgroup_storage_type(stype) \
++	for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
++#else
++# define for_each_cgroup_storage_type(stype) for (; false; )
++#endif /* CONFIG_CGROUP_BPF */
++
+ typedef void (*btf_dtor_kfunc_t)(void *);
+ 
+ struct btf_field_kptr {
+@@ -256,6 +270,19 @@ struct bpf_list_node_kern {
+ 	void *owner;
+ } __attribute__((aligned(8)));
+ 
++/* 'Ownership' of program-containing map is claimed by the first program
++ * that is going to use this map or by the first program which FD is
++ * stored in the map to make sure that all callers and callees have the
++ * same prog type, JITed flag and xdp_has_frags flag.
++ */
++struct bpf_map_owner {
++	enum bpf_prog_type type;
++	bool jited;
++	bool xdp_has_frags;
++	u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
++	const struct btf_type *attach_func_proto;
++};
++
+ struct bpf_map {
+ 	const struct bpf_map_ops *ops;
+ 	struct bpf_map *inner_map_meta;
+@@ -288,24 +315,15 @@ struct bpf_map {
+ 		struct rcu_head rcu;
+ 	};
+ 	atomic64_t writecnt;
+-	/* 'Ownership' of program-containing map is claimed by the first program
+-	 * that is going to use this map or by the first program which FD is
+-	 * stored in the map to make sure that all callers and callees have the
+-	 * same prog type, JITed flag and xdp_has_frags flag.
+-	 */
+-	struct {
+-		const struct btf_type *attach_func_proto;
+-		spinlock_t lock;
+-		enum bpf_prog_type type;
+-		bool jited;
+-		bool xdp_has_frags;
+-	} owner;
++	spinlock_t owner_lock;
++	struct bpf_map_owner *owner;
+ 	bool bypass_spec_v1;
+ 	bool frozen; /* write-once; write-protected by freeze_mutex */
+ 	bool free_after_mult_rcu_gp;
+ 	bool free_after_rcu_gp;
+ 	atomic64_t sleepable_refcnt;
+ 	s64 __percpu *elem_count;
++	u64 cookie; /* write-once */
+ };
+ 
+ static inline const char *btf_field_type_name(enum btf_field_type type)
+@@ -1025,14 +1043,6 @@ struct bpf_prog_offload {
+ 	u32			jited_len;
+ };
+ 
+-enum bpf_cgroup_storage_type {
+-	BPF_CGROUP_STORAGE_SHARED,
+-	BPF_CGROUP_STORAGE_PERCPU,
+-	__BPF_CGROUP_STORAGE_MAX
+-};
+-
+-#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+-
+ /* The longest tracepoint has 12 args.
+  * See include/trace/bpf_probe.h
+  */
+@@ -1980,6 +1990,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
+ 	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+ }
+ 
++static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
++{
++	return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
++}
++
++static inline void bpf_map_owner_free(struct bpf_map *map)
++{
++	kfree(map->owner);
++}
++
+ struct bpf_event_entry {
+ 	struct perf_event *event;
+ 	struct file *perf_file;
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 017d31f1d27b8f..7d8d09318fa91d 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -978,7 +978,6 @@ const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ 					     struct hid_driver *hdrv);
+ bool hid_compare_device_paths(struct hid_device *hdev_a,
+ 			      struct hid_device *hdev_b, char separator);
+-s32 hid_snto32(__u32 value, unsigned n);
+ __u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
+ 		     unsigned offset, unsigned n);
+ 
+diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
+index 5ce332fc6ff507..61675ea95e0b98 100644
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -646,8 +646,16 @@ struct io_kiocb {
+ 	atomic_t			refs;
+ 	bool				cancel_seq_set;
+ 	struct io_task_work		io_task_work;
+-	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
+-	struct hlist_node		hash_node;
++	union {
++		/*
++		 * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
++		 * poll
++		 */
++		struct hlist_node	hash_node;
++
++		/* for private io_kiocb freeing */
++		struct rcu_head		rcu_head;
++	};
+ 	/* internal polling, see IORING_FEAT_FAST_POLL */
+ 	struct async_poll		*apoll;
+ 	/* opcode allocated if it needs to store data for async defer */
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index be6ca84db4d85c..1ba6e32909f89d 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1697,6 +1697,22 @@ static inline int pmd_protnone(pmd_t pmd)
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+ 
++/*
++ * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
++ * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
++ * needs to be called.
++ */
++#ifndef ARCH_PAGE_TABLE_SYNC_MASK
++#define ARCH_PAGE_TABLE_SYNC_MASK 0
++#endif
++
++/*
++ * There is no default implementation for arch_sync_kernel_mappings(). It is
++ * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
++ * is 0.
++ */
++void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
++
+ #endif /* CONFIG_MMU */
+ 
+ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index b2827fce5a2de7..314328ab0b8438 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -3153,9 +3153,15 @@ static inline int skb_inner_network_offset(const struct sk_buff *skb)
+ 	return skb_inner_network_header(skb) - skb->data;
+ }
+ 
++static inline enum skb_drop_reason
++pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len)
++{
++	return pskb_may_pull_reason(skb, skb_network_offset(skb) + len);
++}
++
+ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+ {
+-	return pskb_may_pull(skb, skb_network_offset(skb) + len);
++	return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
+ }
+ 
+ /*
+diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
+index 2dcf7621913114..1fce4f60677066 100644
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -209,22 +209,6 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
+ extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ 							unsigned long pgoff);
+ 
+-/*
+- * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
+- * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
+- * needs to be called.
+- */
+-#ifndef ARCH_PAGE_TABLE_SYNC_MASK
+-#define ARCH_PAGE_TABLE_SYNC_MASK 0
+-#endif
+-
+-/*
+- * There is no default implementation for arch_sync_kernel_mappings(). It is
+- * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
+- * is 0.
+- */
+-void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
+-
+ /*
+  *	Lowlevel-APIs (not for driver use!)
+  */
+diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
+index 4748680e8c88ee..02e7be19b0428f 100644
+--- a/include/net/dropreason-core.h
++++ b/include/net/dropreason-core.h
+@@ -92,6 +92,14 @@
+ 	FN(PACKET_SOCK_ERROR)		\
+ 	FN(TC_CHAIN_NOTFOUND)		\
+ 	FN(TC_RECLASSIFY_LOOP)		\
++	FN(VXLAN_INVALID_HDR)		\
++	FN(VXLAN_VNI_NOT_FOUND)		\
++	FN(MAC_INVALID_SOURCE)		\
++	FN(VXLAN_ENTRY_EXISTS)		\
++	FN(NO_TX_TARGET)		\
++	FN(IP_TUNNEL_ECN)		\
++	FN(TUNNEL_TXINFO)		\
++	FN(LOCAL_MAC)			\
+ 	FNe(MAX)
+ 
+ /**
+@@ -418,6 +426,38 @@ enum skb_drop_reason {
+ 	 * iterations.
+ 	 */
+ 	SKB_DROP_REASON_TC_RECLASSIFY_LOOP,
++	/**
++	 * @SKB_DROP_REASON_VXLAN_INVALID_HDR: VXLAN header is invalid. E.g.:
++	 * 1) reserved fields are not zero
++	 * 2) "I" flag is not set
++	 */
++	SKB_DROP_REASON_VXLAN_INVALID_HDR,
++	/** @SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND: no VXLAN device found for VNI */
++	SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND,
++	/** @SKB_DROP_REASON_MAC_INVALID_SOURCE: source mac is invalid */
++	SKB_DROP_REASON_MAC_INVALID_SOURCE,
++	/**
++	 * @SKB_DROP_REASON_VXLAN_ENTRY_EXISTS: trying to migrate a static
++	 * entry or an entry pointing to a nexthop.
++	 */
++	SKB_DROP_REASON_VXLAN_ENTRY_EXISTS,
++	/** @SKB_DROP_REASON_NO_TX_TARGET: no target found for xmit */
++	SKB_DROP_REASON_NO_TX_TARGET,
++	/**
++	 * @SKB_DROP_REASON_IP_TUNNEL_ECN: skb is dropped according to
++	 * RFC 6040 4.2, see __INET_ECN_decapsulate() for detail.
++	 */
++	SKB_DROP_REASON_IP_TUNNEL_ECN,
++	/**
++	 * @SKB_DROP_REASON_TUNNEL_TXINFO: packet without necessary metadata
++	 * reached a device which is in "external" mode.
++	 */
++	SKB_DROP_REASON_TUNNEL_TXINFO,
++	/**
++	 * @SKB_DROP_REASON_LOCAL_MAC: the source MAC address is equal to
++	 * the MAC address of the local netdev.
++	 */
++	SKB_DROP_REASON_LOCAL_MAC,
+ 	/**
+ 	 * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
+ 	 * shouldn't be used as a real 'reason' - only for tracing code gen
+diff --git a/include/net/dsa.h b/include/net/dsa.h
+index d7a6c2930277ea..877f9b270cf6fe 100644
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -1003,6 +1003,7 @@ struct dsa_switch_ops {
+ 	/*
+ 	 * Port's MAC EEE settings
+ 	 */
++	bool	(*support_eee)(struct dsa_switch *ds, int port);
+ 	int	(*set_mac_eee)(struct dsa_switch *ds, int port,
+ 			       struct ethtool_keee *e);
+ 	int	(*get_mac_eee)(struct dsa_switch *ds, int port,
+@@ -1398,5 +1399,6 @@ static inline bool dsa_user_dev_check(const struct net_device *dev)
+ 
+ netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
+ void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
++bool dsa_supports_eee(struct dsa_switch *ds, int port);
+ 
+ #endif
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index 6a070478254d84..ae83a969ae64b2 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -439,7 +439,8 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
+ int ip_tunnel_encap_setup(struct ip_tunnel *t,
+ 			  struct ip_tunnel_encap *ipencap);
+ 
+-static inline bool pskb_inet_may_pull(struct sk_buff *skb)
++static inline enum skb_drop_reason
++pskb_inet_may_pull_reason(struct sk_buff *skb)
+ {
+ 	int nhlen;
+ 
+@@ -456,7 +457,12 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+ 		nhlen = 0;
+ 	}
+ 
+-	return pskb_network_may_pull(skb, nhlen);
++	return pskb_network_may_pull_reason(skb, nhlen);
++}
++
++static inline bool pskb_inet_may_pull(struct sk_buff *skb)
++{
++	return pskb_inet_may_pull_reason(skb) == SKB_NOT_DROPPED_YET;
+ }
+ 
+ /* Variant of pskb_inet_may_pull().
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index 35b1b585e9cbe6..b68e009bce2180 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -82,7 +82,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
+ 		spin_unlock(&ctx->msg_lock);
+ 	}
+ 	if (req)
+-		kmem_cache_free(req_cachep, req);
++		kfree_rcu(req, rcu_head);
+ 	percpu_ref_put(&ctx->refs);
+ }
+ 
+@@ -91,7 +91,7 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ {
+ 	req->task = READ_ONCE(ctx->submitter_task);
+ 	if (!req->task) {
+-		kmem_cache_free(req_cachep, req);
++		kfree_rcu(req, rcu_head);
+ 		return -EOWNERDEAD;
+ 	}
+ 	req->opcode = IORING_OP_NOP;
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 767dcb8471f63b..6f91e3a123e554 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2310,28 +2310,44 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ 				      const struct bpf_prog *fp)
+ {
+ 	enum bpf_prog_type prog_type = resolve_prog_type(fp);
+-	bool ret;
+ 	struct bpf_prog_aux *aux = fp->aux;
++	enum bpf_cgroup_storage_type i;
++	bool ret = false;
++	u64 cookie;
+ 
+ 	if (fp->kprobe_override)
+-		return false;
++		return ret;
+ 
+-	spin_lock(&map->owner.lock);
+-	if (!map->owner.type) {
+-		/* There's no owner yet where we could check for
+-		 * compatibility.
+-		 */
+-		map->owner.type  = prog_type;
+-		map->owner.jited = fp->jited;
+-		map->owner.xdp_has_frags = aux->xdp_has_frags;
+-		map->owner.attach_func_proto = aux->attach_func_proto;
++	spin_lock(&map->owner_lock);
++	/* There's no owner yet where we could check for compatibility. */
++	if (!map->owner) {
++		map->owner = bpf_map_owner_alloc(map);
++		if (!map->owner)
++			goto err;
++		map->owner->type  = prog_type;
++		map->owner->jited = fp->jited;
++		map->owner->xdp_has_frags = aux->xdp_has_frags;
++		map->owner->attach_func_proto = aux->attach_func_proto;
++		for_each_cgroup_storage_type(i) {
++			map->owner->storage_cookie[i] =
++				aux->cgroup_storage[i] ?
++				aux->cgroup_storage[i]->cookie : 0;
++		}
+ 		ret = true;
+ 	} else {
+-		ret = map->owner.type  == prog_type &&
+-		      map->owner.jited == fp->jited &&
+-		      map->owner.xdp_has_frags == aux->xdp_has_frags;
++		ret = map->owner->type  == prog_type &&
++		      map->owner->jited == fp->jited &&
++		      map->owner->xdp_has_frags == aux->xdp_has_frags;
++		for_each_cgroup_storage_type(i) {
++			if (!ret)
++				break;
++			cookie = aux->cgroup_storage[i] ?
++				 aux->cgroup_storage[i]->cookie : 0;
++			ret = map->owner->storage_cookie[i] == cookie ||
++			      !cookie;
++		}
+ 		if (ret &&
+-		    map->owner.attach_func_proto != aux->attach_func_proto) {
++		    map->owner->attach_func_proto != aux->attach_func_proto) {
+ 			switch (prog_type) {
+ 			case BPF_PROG_TYPE_TRACING:
+ 			case BPF_PROG_TYPE_LSM:
+@@ -2344,8 +2360,8 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
+ 			}
+ 		}
+ 	}
+-	spin_unlock(&map->owner.lock);
+-
++err:
++	spin_unlock(&map->owner_lock);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index ab74a226e3d6d9..ba4543e771a6ef 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -35,6 +35,7 @@
+ #include <linux/rcupdate_trace.h>
+ #include <linux/memcontrol.h>
+ #include <linux/trace_events.h>
++#include <linux/cookie.h>
+ 
+ #include <net/netfilter/nf_bpf_link.h>
+ #include <net/netkit.h>
+@@ -51,6 +52,7 @@
+ #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
+ 
+ DEFINE_PER_CPU(int, bpf_prog_active);
++DEFINE_COOKIE(bpf_map_cookie);
+ static DEFINE_IDR(prog_idr);
+ static DEFINE_SPINLOCK(prog_idr_lock);
+ static DEFINE_IDR(map_idr);
+@@ -765,6 +767,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
+ 
+ 	security_bpf_map_free(map);
+ 	bpf_map_release_memcg(map);
++	bpf_map_owner_free(map);
+ 	bpf_map_free(map);
+ }
+ 
+@@ -859,12 +862,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
+ 	struct bpf_map *map = filp->private_data;
+ 	u32 type = 0, jited = 0;
+ 
+-	if (map_type_contains_progs(map)) {
+-		spin_lock(&map->owner.lock);
+-		type  = map->owner.type;
+-		jited = map->owner.jited;
+-		spin_unlock(&map->owner.lock);
++	spin_lock(&map->owner_lock);
++	if (map->owner) {
++		type  = map->owner->type;
++		jited = map->owner->jited;
+ 	}
++	spin_unlock(&map->owner_lock);
+ 
+ 	seq_printf(m,
+ 		   "map_type:\t%u\n"
+@@ -1360,10 +1363,14 @@ static int map_create(union bpf_attr *attr)
+ 	if (err < 0)
+ 		goto free_map;
+ 
++	preempt_disable();
++	map->cookie = gen_cookie_next(&bpf_map_cookie);
++	preempt_enable();
++
+ 	atomic64_set(&map->refcnt, 1);
+ 	atomic64_set(&map->usercnt, 1);
+ 	mutex_init(&map->freeze_mutex);
+-	spin_lock_init(&map->owner.lock);
++	spin_lock_init(&map->owner_lock);
+ 
+ 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
+ 	    /* Even the map's value is a kernel's struct,
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 9748a4c8d66853..4bd825c24e2646 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -2174,6 +2174,8 @@ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+ 		goto unlock;
+ 
+ 	hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp);
++	if (!hop_masks)
++		goto unlock;
+ 	hop = hop_masks	- k.masks;
+ 
+ 	ret = hop ?
+diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
+index d8fb281e439d56..d4ac26ad1d3e53 100644
+--- a/mm/kasan/kasan_test_c.c
++++ b/mm/kasan/kasan_test_c.c
+@@ -1548,6 +1548,7 @@ static void kasan_strings(struct kunit *test)
+ 
+ 	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
++	OPTIMIZER_HIDE_VAR(ptr);
+ 
+ 	kfree(ptr);
+ 
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 91894fc54c645f..0aecd537645a54 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -432,9 +432,15 @@ static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
+ 		else if (untagged_objp == untagged_ptr || alias)
+ 			return object;
+ 		else {
++			/*
++			 * Printk deferring due to the kmemleak_lock held.
++			 * This is done to avoid deadlock.
++			 */
++			printk_deferred_enter();
+ 			kmemleak_warn("Found object by alias at 0x%08lx\n",
+ 				      ptr);
+ 			dump_object_info(object);
++			printk_deferred_exit();
+ 			break;
+ 		}
+ 	}
+@@ -731,6 +737,11 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
+ 		else if (untagged_objp + parent->size <= untagged_ptr)
+ 			link = &parent->rb_node.rb_right;
+ 		else {
++			/*
++			 * Printk deferring due to the kmemleak_lock held.
++			 * This is done to avoid deadlock.
++			 */
++			printk_deferred_enter();
+ 			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
+ 				      ptr);
+ 			/*
+@@ -738,6 +749,7 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
+ 			 * be freed while the kmemleak_lock is held.
+ 			 */
+ 			dump_object_info(parent);
++			printk_deferred_exit();
+ 			return -EEXIST;
+ 		}
+ 	}
+@@ -851,13 +863,8 @@ static void delete_object_part(unsigned long ptr, size_t size,
+ 
+ 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
+ 	object = __find_and_remove_object(ptr, 1, objflags);
+-	if (!object) {
+-#ifdef DEBUG
+-		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
+-			      ptr, size);
+-#endif
++	if (!object)
+ 		goto unlock;
+-	}
+ 
+ 	/*
+ 	 * Create one or two objects that may result from the memory block
+@@ -877,8 +884,14 @@ static void delete_object_part(unsigned long ptr, size_t size,
+ 
+ unlock:
+ 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
+-	if (object)
++	if (object) {
+ 		__delete_object(object);
++	} else {
++#ifdef DEBUG
++		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
++			      ptr, size);
++#endif
++	}
+ 
+ out:
+ 	if (object_l)
+diff --git a/mm/slub.c b/mm/slub.c
+index dc527b59f5a98a..7fbba36f7aac52 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -935,19 +935,19 @@ static struct track *get_track(struct kmem_cache *s, void *object,
+ }
+ 
+ #ifdef CONFIG_STACKDEPOT
+-static noinline depot_stack_handle_t set_track_prepare(void)
++static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
+ {
+ 	depot_stack_handle_t handle;
+ 	unsigned long entries[TRACK_ADDRS_COUNT];
+ 	unsigned int nr_entries;
+ 
+ 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
+-	handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
++	handle = stack_depot_save(entries, nr_entries, gfp_flags);
+ 
+ 	return handle;
+ }
+ #else
+-static inline depot_stack_handle_t set_track_prepare(void)
++static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
+ {
+ 	return 0;
+ }
+@@ -969,9 +969,9 @@ static void set_track_update(struct kmem_cache *s, void *object,
+ }
+ 
+ static __always_inline void set_track(struct kmem_cache *s, void *object,
+-				      enum track_item alloc, unsigned long addr)
++				      enum track_item alloc, unsigned long addr, gfp_t gfp_flags)
+ {
+-	depot_stack_handle_t handle = set_track_prepare();
++	depot_stack_handle_t handle = set_track_prepare(gfp_flags);
+ 
+ 	set_track_update(s, object, alloc, addr, handle);
+ }
+@@ -1027,22 +1027,31 @@ void skip_orig_size_check(struct kmem_cache *s, const void *object)
+ 	set_orig_size(s, (void *)object, s->object_size);
+ }
+ 
+-static void slab_bug(struct kmem_cache *s, char *fmt, ...)
++static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
+ {
+ 	struct va_format vaf;
+ 	va_list args;
+ 
+-	va_start(args, fmt);
++	va_copy(args, argsp);
+ 	vaf.fmt = fmt;
+ 	vaf.va = &args;
+ 	pr_err("=============================================================================\n");
+-	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
++	pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
+ 	pr_err("-----------------------------------------------------------------------------\n\n");
+ 	va_end(args);
+ }
+ 
++static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
++{
++	va_list args;
++
++	va_start(args, fmt);
++	__slab_bug(s, fmt, args);
++	va_end(args);
++}
++
+ __printf(2, 3)
+-static void slab_fix(struct kmem_cache *s, char *fmt, ...)
++static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
+ {
+ 	struct va_format vaf;
+ 	va_list args;
+@@ -1095,19 +1104,24 @@ static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
+ 		/* Beginning of the filler is the free pointer */
+ 		print_section(KERN_ERR, "Padding  ", p + off,
+ 			      size_from_object(s) - off);
+-
+-	dump_stack();
+ }
+ 
+ static void object_err(struct kmem_cache *s, struct slab *slab,
+-			u8 *object, char *reason)
++			u8 *object, const char *reason)
+ {
+ 	if (slab_add_kunit_errors())
+ 		return;
+ 
+-	slab_bug(s, "%s", reason);
+-	print_trailer(s, slab, object);
++	slab_bug(s, reason);
++	if (!object || !check_valid_pointer(s, slab, object)) {
++		print_slab_info(slab);
++		pr_err("Invalid pointer 0x%p\n", object);
++	} else {
++		print_trailer(s, slab, object);
++	}
+ 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
++
++	WARN_ON(1);
+ }
+ 
+ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
+@@ -1124,22 +1138,30 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
+ 	return false;
+ }
+ 
++static void __slab_err(struct slab *slab)
++{
++	if (slab_in_kunit_test())
++		return;
++
++	print_slab_info(slab);
++	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
++
++	WARN_ON(1);
++}
++
+ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
+ 			const char *fmt, ...)
+ {
+ 	va_list args;
+-	char buf[100];
+ 
+ 	if (slab_add_kunit_errors())
+ 		return;
+ 
+ 	va_start(args, fmt);
+-	vsnprintf(buf, sizeof(buf), fmt, args);
++	__slab_bug(s, fmt, args);
+ 	va_end(args);
+-	slab_bug(s, "%s", buf);
+-	print_slab_info(slab);
+-	dump_stack();
+-	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
++
++	__slab_err(slab);
+ }
+ 
+ static void init_object(struct kmem_cache *s, void *object, u8 val)
+@@ -1176,7 +1198,7 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
+ 					  s->inuse - poison_size);
+ }
+ 
+-static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
++static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
+ 						void *from, void *to)
+ {
+ 	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
+@@ -1191,8 +1213,8 @@ static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
+ 
+ static pad_check_attributes int
+ check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
+-		       u8 *object, char *what,
+-		       u8 *start, unsigned int value, unsigned int bytes)
++		       u8 *object, const char *what, u8 *start, unsigned int value,
++		       unsigned int bytes, bool slab_obj_print)
+ {
+ 	u8 *fault;
+ 	u8 *end;
+@@ -1211,10 +1233,11 @@ check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
+ 	if (slab_add_kunit_errors())
+ 		goto skip_bug_print;
+ 
+-	slab_bug(s, "%s overwritten", what);
+-	pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
+-					fault, end - 1, fault - addr,
+-					fault[0], value);
++	pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
++	       what, fault, end - 1, fault - addr, fault[0], value);
++
++	if (slab_obj_print)
++		object_err(s, slab, object, "Object corrupt");
+ 
+ skip_bug_print:
+ 	restore_bytes(s, what, value, fault, end);
+@@ -1278,7 +1301,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
+ 		return 1;
+ 
+ 	return check_bytes_and_report(s, slab, p, "Object padding",
+-			p + off, POISON_INUSE, size_from_object(s) - off);
++			p + off, POISON_INUSE, size_from_object(s) - off, true);
+ }
+ 
+ /* Check the pad bytes at the end of a slab page */
+@@ -1311,9 +1334,10 @@ slab_pad_check(struct kmem_cache *s, struct slab *slab)
+ 	while (end > fault && end[-1] == POISON_INUSE)
+ 		end--;
+ 
+-	slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
+-			fault, end - 1, fault - start);
++	slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
++		 fault, end - 1, fault - start);
+ 	print_section(KERN_ERR, "Padding ", pad, remainder);
++	__slab_err(slab);
+ 
+ 	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
+ }
+@@ -1328,11 +1352,11 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
+ 
+ 	if (s->flags & SLAB_RED_ZONE) {
+ 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
+-			object - s->red_left_pad, val, s->red_left_pad))
++			object - s->red_left_pad, val, s->red_left_pad, ret))
+ 			ret = 0;
+ 
+ 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
+-			endobject, val, s->inuse - s->object_size))
++			endobject, val, s->inuse - s->object_size, ret))
+ 			ret = 0;
+ 
+ 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
+@@ -1341,7 +1365,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
+ 			if (s->object_size > orig_size  &&
+ 				!check_bytes_and_report(s, slab, object,
+ 					"kmalloc Redzone", p + orig_size,
+-					val, s->object_size - orig_size)) {
++					val, s->object_size - orig_size, ret)) {
+ 				ret = 0;
+ 			}
+ 		}
+@@ -1349,7 +1373,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
+ 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
+ 			if (!check_bytes_and_report(s, slab, p, "Alignment padding",
+ 				endobject, POISON_INUSE,
+-				s->inuse - s->object_size))
++				s->inuse - s->object_size, ret))
+ 				ret = 0;
+ 		}
+ 	}
+@@ -1365,11 +1389,11 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
+ 			if (kasan_meta_size < s->object_size - 1 &&
+ 			    !check_bytes_and_report(s, slab, p, "Poison",
+ 					p + kasan_meta_size, POISON_FREE,
+-					s->object_size - kasan_meta_size - 1))
++					s->object_size - kasan_meta_size - 1, ret))
+ 				ret = 0;
+ 			if (kasan_meta_size < s->object_size &&
+ 			    !check_bytes_and_report(s, slab, p, "End Poison",
+-					p + s->object_size - 1, POISON_END, 1))
++					p + s->object_size - 1, POISON_END, 1, ret))
+ 				ret = 0;
+ 		}
+ 		/*
+@@ -1395,11 +1419,6 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
+ 		ret = 0;
+ 	}
+ 
+-	if (!ret && !slab_in_kunit_test()) {
+-		print_trailer(s, slab, object);
+-		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+-	}
+-
+ 	return ret;
+ }
+ 
+@@ -1634,12 +1653,12 @@ static inline int free_consistency_checks(struct kmem_cache *s,
+ 			slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
+ 				 object);
+ 		} else if (!slab->slab_cache) {
+-			pr_err("SLUB <none>: no slab for object 0x%p.\n",
+-			       object);
+-			dump_stack();
+-		} else
++			slab_err(NULL, slab, "No slab cache for object 0x%p",
++				 object);
++		} else {
+ 			object_err(s, slab, object,
+-					"page slab pointer corrupt.");
++				   "page slab pointer corrupt.");
++		}
+ 		return 0;
+ 	}
+ 	return 1;
+@@ -1872,9 +1891,9 @@ static inline bool free_debug_processing(struct kmem_cache *s,
+ static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
+ static inline int check_object(struct kmem_cache *s, struct slab *slab,
+ 			void *object, u8 val) { return 1; }
+-static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
++static inline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags) { return 0; }
+ static inline void set_track(struct kmem_cache *s, void *object,
+-			     enum track_item alloc, unsigned long addr) {}
++			     enum track_item alloc, unsigned long addr, gfp_t gfp_flags) {}
+ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ 					struct slab *slab) {}
+ static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
+@@ -3822,9 +3841,14 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ 			 * For debug caches here we had to go through
+ 			 * alloc_single_from_partial() so just store the
+ 			 * tracking info and return the object.
++			 *
++			 * Due to disabled preemption we need to disallow
++			 * blocking. The flags are further adjusted by
++			 * gfp_nested_mask() in stack_depot itself.
+ 			 */
+ 			if (s->flags & SLAB_STORE_USER)
+-				set_track(s, freelist, TRACK_ALLOC, addr);
++				set_track(s, freelist, TRACK_ALLOC, addr,
++					  gfpflags & ~(__GFP_DIRECT_RECLAIM));
+ 
+ 			return freelist;
+ 		}
+@@ -3856,7 +3880,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ 			goto new_objects;
+ 
+ 		if (s->flags & SLAB_STORE_USER)
+-			set_track(s, freelist, TRACK_ALLOC, addr);
++			set_track(s, freelist, TRACK_ALLOC, addr,
++				  gfpflags & ~(__GFP_DIRECT_RECLAIM));
+ 
+ 		return freelist;
+ 	}
+@@ -4344,8 +4369,12 @@ static noinline void free_to_partial_list(
+ 	unsigned long flags;
+ 	depot_stack_handle_t handle = 0;
+ 
++	/*
++	 * We cannot use GFP_NOWAIT as there are callsites where waking up
++	 * kswapd could deadlock
++	 */
+ 	if (s->flags & SLAB_STORE_USER)
+-		handle = set_track_prepare();
++		handle = set_track_prepare(__GFP_NOWARN);
+ 
+ 	spin_lock_irqsave(&n->list_lock, flags);
+ 
+@@ -5444,14 +5473,14 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
+ 	return !!oo_objects(s->oo);
+ }
+ 
+-static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
+-			      const char *text)
++static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
+ {
+ #ifdef CONFIG_SLUB_DEBUG
+ 	void *addr = slab_address(slab);
+ 	void *p;
+ 
+-	slab_err(s, slab, text, s->name);
++	if (!slab_add_kunit_errors())
++		slab_bug(s, "Objects remaining on __kmem_cache_shutdown()");
+ 
+ 	spin_lock(&object_map_lock);
+ 	__fill_map(object_map, s, slab);
+@@ -5466,6 +5495,8 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
+ 		}
+ 	}
+ 	spin_unlock(&object_map_lock);
++
++	__slab_err(slab);
+ #endif
+ }
+ 
+@@ -5486,8 +5517,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+ 			remove_partial(n, slab);
+ 			list_add(&slab->slab_list, &discard);
+ 		} else {
+-			list_slab_objects(s, slab,
+-			  "Objects remaining in %s on __kmem_cache_shutdown()");
++			list_slab_objects(s, slab);
+ 		}
+ 	}
+ 	spin_unlock_irq(&n->list_lock);
+diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
+index c0388b2e959da6..2628fc02be08b9 100644
+--- a/mm/sparse-vmemmap.c
++++ b/mm/sparse-vmemmap.c
+@@ -474,10 +474,5 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn,
+ 	if (r < 0)
+ 		return NULL;
+ 
+-	if (system_state == SYSTEM_BOOTING)
+-		memmap_boot_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
+-	else
+-		memmap_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
+-
+ 	return pfn_to_page(pfn);
+ }
+diff --git a/mm/sparse.c b/mm/sparse.c
+index dc38539f85603b..eb6c5cb27ed1ec 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -462,9 +462,6 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
+ 	 */
+ 	sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
+ 	sparsemap_buf_end = sparsemap_buf + size;
+-#ifndef CONFIG_SPARSEMEM_VMEMMAP
+-	memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
+-#endif
+ }
+ 
+ static void __init sparse_buffer_fini(void)
+@@ -532,6 +529,8 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
+ 			sparse_buffer_fini();
+ 			goto failed;
+ 		}
++		memmap_boot_pages_add(DIV_ROUND_UP(PAGES_PER_SECTION * sizeof(struct page),
++						   PAGE_SIZE));
+ 		check_usemap_section_nr(nid, usage);
+ 		sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
+ 				SECTION_IS_EARLY);
+@@ -643,7 +642,6 @@ static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
+ 	unsigned long start = (unsigned long) pfn_to_page(pfn);
+ 	unsigned long end = start + nr_pages * sizeof(struct page);
+ 
+-	memmap_pages_add(-1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
+ 	vmemmap_free(start, end, altmap);
+ }
+ static void free_map_bootmem(struct page *memmap)
+@@ -819,10 +817,14 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ 	 * The memmap of early sections is always fully populated. See
+ 	 * section_activate() and pfn_valid() .
+ 	 */
+-	if (!section_is_early)
++	if (!section_is_early) {
++		memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
+ 		depopulate_section_memmap(pfn, nr_pages, altmap);
+-	else if (memmap)
++	} else if (memmap) {
++		memmap_boot_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page),
++							  PAGE_SIZE)));
+ 		free_map_bootmem(memmap);
++	}
+ 
+ 	if (empty)
+ 		ms->section_mem_map = (unsigned long)NULL;
+@@ -867,6 +869,7 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
+ 		section_deactivate(pfn, nr_pages, altmap);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
++	memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
+ 
+ 	return memmap;
+ }
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 8b0f2fbd6a759d..904095f69a6e31 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -1432,10 +1432,15 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 		folio_unlock(src_folio);
+ 		folio_put(src_folio);
+ 	}
+-	if (dst_pte)
+-		pte_unmap(dst_pte);
++	/*
++	 * Unmap in reverse order (LIFO) to maintain proper kmap_local
++	 * index ordering when CONFIG_HIGHPTE is enabled. We mapped dst_pte
++	 * first, then src_pte, so we must unmap src_pte first, then dst_pte.
++	 */
+ 	if (src_pte)
+ 		pte_unmap(src_pte);
++	if (dst_pte)
++		pte_unmap(dst_pte);
+ 	mmu_notifier_invalidate_range_end(&range);
+ 	if (si)
+ 		put_swap_device(si);
+diff --git a/net/atm/resources.c b/net/atm/resources.c
+index b19d851e1f4439..7c6fdedbcf4e5c 100644
+--- a/net/atm/resources.c
++++ b/net/atm/resources.c
+@@ -112,7 +112,9 @@ struct atm_dev *atm_dev_register(const char *type, struct device *parent,
+ 
+ 	if (atm_proc_dev_register(dev) < 0) {
+ 		pr_err("atm_proc_dev_register failed for dev %s\n", type);
+-		goto out_fail;
++		mutex_unlock(&atm_dev_mutex);
++		kfree(dev);
++		return NULL;
+ 	}
+ 
+ 	if (atm_register_sysfs(dev, parent) < 0) {
+@@ -128,7 +130,7 @@ struct atm_dev *atm_dev_register(const char *type, struct device *parent,
+ 	return dev;
+ 
+ out_fail:
+-	kfree(dev);
++	put_device(&dev->class_dev);
+ 	dev = NULL;
+ 	goto out;
+ }
+diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
+index 1cac25aca63784..f2d66af8635957 100644
+--- a/net/ax25/ax25_in.c
++++ b/net/ax25/ax25_in.c
+@@ -433,6 +433,10 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
+ int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		  struct packet_type *ptype, struct net_device *orig_dev)
+ {
++	skb = skb_share_check(skb, GFP_ATOMIC);
++	if (!skb)
++		return NET_RX_DROP;
++
+ 	skb_orphan(skb);
+ 
+ 	if (!net_eq(dev_net(dev), &init_net)) {
+diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
+index 71ebd0284f95d2..0adc783fb83ca2 100644
+--- a/net/batman-adv/network-coding.c
++++ b/net/batman-adv/network-coding.c
+@@ -1687,7 +1687,12 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ 
+ 	coding_len = ntohs(coded_packet_tmp.coded_len);
+ 
+-	if (coding_len > skb->len)
++	/* ensure dst buffer is large enough (payload only) */
++	if (coding_len + h_size > skb->len)
++		return NULL;
++
++	/* ensure src buffer is large enough (payload only) */
++	if (coding_len + h_size > nc_packet->skb->len)
+ 		return NULL;
+ 
+ 	/* Here the magic is reversed:
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index bc2aa514b8c5d8..5f5137764b80af 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3354,7 +3354,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev)
+ 	 * advertising data. This also applies to the case
+ 	 * where BR/EDR was toggled during the AUTO_OFF phase.
+ 	 */
+-	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
++	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ 	    list_empty(&hdev->adv_instances)) {
+ 		if (ext_adv_capable(hdev)) {
+ 			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 615c18e290ab92..b35f1551b9be7d 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1409,7 +1409,10 @@ static int l2cap_sock_release(struct socket *sock)
+ 	if (!sk)
+ 		return 0;
+ 
++	lock_sock_nested(sk, L2CAP_NESTING_PARENT);
+ 	l2cap_sock_cleanup_listen(sk);
++	release_sock(sk);
++
+ 	bt_sock_unlink(&l2cap_sk_list, sk);
+ 
+ 	err = l2cap_sock_shutdown(sock, SHUT_RDWR);
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 17a5f5923d615d..5ad3f3ef4ca75f 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -653,9 +653,6 @@ static unsigned int br_nf_local_in(void *priv,
+ 		break;
+ 	}
+ 
+-	ct = container_of(nfct, struct nf_conn, ct_general);
+-	WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
+-
+ 	return ret;
+ }
+ #endif
+diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
+index 412816076b8bc5..392f1cb5cc4791 100644
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -90,10 +90,12 @@ static void est_timer(struct timer_list *t)
+ 	rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
+ 	rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
+ 
++	preempt_disable_nested();
+ 	write_seqcount_begin(&est->seq);
+ 	est->avbps += brate;
+ 	est->avpps += rate;
+ 	write_seqcount_end(&est->seq);
++	preempt_enable_nested();
+ 
+ 	est->last_bytes = b_bytes;
+ 	est->last_packets = b_packets;
+diff --git a/net/dsa/port.c b/net/dsa/port.c
+index 25258b33e59e01..9c77c80e8fe975 100644
+--- a/net/dsa/port.c
++++ b/net/dsa/port.c
+@@ -1589,6 +1589,22 @@ dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
+ 	return pcs;
+ }
+ 
++/* dsa_supports_eee - indicate that EEE is supported
++ * @ds: pointer to &struct dsa_switch
++ * @port: port index
++ *
++ * A default implementation for the .support_eee() DSA operations member,
++ * which drivers can use to indicate that they support EEE on all of their
++ * user ports.
++ *
++ * Returns: true
++ */
++bool dsa_supports_eee(struct dsa_switch *ds, int port)
++{
++	return true;
++}
++EXPORT_SYMBOL_GPL(dsa_supports_eee);
++
+ static void dsa_port_phylink_mac_config(struct phylink_config *config,
+ 					unsigned int mode,
+ 					const struct phylink_link_state *state)
+diff --git a/net/dsa/user.c b/net/dsa/user.c
+index 64f660d2334b77..06267c526dc4e6 100644
+--- a/net/dsa/user.c
++++ b/net/dsa/user.c
+@@ -1231,6 +1231,10 @@ static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
+ 	struct dsa_switch *ds = dp->ds;
+ 	int ret;
+ 
++	/* Check whether the switch supports EEE */
++	if (ds->ops->support_eee && !ds->ops->support_eee(ds, dp->index))
++		return -EOPNOTSUPP;
++
+ 	/* Port's PHY and MAC both need to be EEE capable */
+ 	if (!dev->phydev || !dp->pl)
+ 		return -ENODEV;
+@@ -1251,6 +1255,10 @@ static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
+ 	struct dsa_switch *ds = dp->ds;
+ 	int ret;
+ 
++	/* Check whether the switch supports EEE */
++	if (ds->ops->support_eee && !ds->ops->support_eee(ds, dp->index))
++		return -EOPNOTSUPP;
++
+ 	/* Port's PHY and MAC both need to be EEE capable */
+ 	if (!dev->phydev || !dp->pl)
+ 		return -ENODEV;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index a55e95046984da..46fa50576f5819 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -351,14 +351,13 @@ static void inetdev_destroy(struct in_device *in_dev)
+ 
+ static int __init inet_blackhole_dev_init(void)
+ {
+-	int err = 0;
++	struct in_device *in_dev;
+ 
+ 	rtnl_lock();
+-	if (!inetdev_init(blackhole_netdev))
+-		err = -ENOMEM;
++	in_dev = inetdev_init(blackhole_netdev);
+ 	rtnl_unlock();
+ 
+-	return err;
++	return PTR_ERR_OR_ZERO(in_dev);
+ }
+ late_initcall(inet_blackhole_dev_init);
+ 
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index b8111ec651b545..8f11870b77377d 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -799,11 +799,12 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+ 	struct sk_buff *cloned_skb = NULL;
+ 	struct ip_options opts = { 0 };
+ 	enum ip_conntrack_info ctinfo;
++	enum ip_conntrack_dir dir;
+ 	struct nf_conn *ct;
+ 	__be32 orig_ip;
+ 
+ 	ct = nf_ct_get(skb_in, &ctinfo);
+-	if (!ct || !(ct->status & IPS_SRC_NAT)) {
++	if (!ct || !(READ_ONCE(ct->status) & IPS_NAT_MASK)) {
+ 		__icmp_send(skb_in, type, code, info, &opts);
+ 		return;
+ 	}
+@@ -818,7 +819,8 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+ 		goto out;
+ 
+ 	orig_ip = ip_hdr(skb_in)->saddr;
+-	ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
++	dir = CTINFO2DIR(ctinfo);
++	ip_hdr(skb_in)->saddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ 	__icmp_send(skb_in, type, code, info, &opts);
+ 	ip_hdr(skb_in)->saddr = orig_ip;
+ out:
+diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
+index 9e3574880cb03e..233914b63bdb82 100644
+--- a/net/ipv6/ip6_icmp.c
++++ b/net/ipv6/ip6_icmp.c
+@@ -54,11 +54,12 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+ 	struct inet6_skb_parm parm = { 0 };
+ 	struct sk_buff *cloned_skb = NULL;
+ 	enum ip_conntrack_info ctinfo;
++	enum ip_conntrack_dir dir;
+ 	struct in6_addr orig_ip;
+ 	struct nf_conn *ct;
+ 
+ 	ct = nf_ct_get(skb_in, &ctinfo);
+-	if (!ct || !(ct->status & IPS_SRC_NAT)) {
++	if (!ct || !(READ_ONCE(ct->status) & IPS_NAT_MASK)) {
+ 		__icmpv6_send(skb_in, type, code, info, &parm);
+ 		return;
+ 	}
+@@ -73,7 +74,8 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+ 		goto out;
+ 
+ 	orig_ip = ipv6_hdr(skb_in)->saddr;
+-	ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6;
++	dir = CTINFO2DIR(ctinfo);
++	ipv6_hdr(skb_in)->saddr = ct->tuplehash[dir].tuple.src.u3.in6;
+ 	__icmpv6_send(skb_in, type, code, info, &parm);
+ 	ipv6_hdr(skb_in)->saddr = orig_ip;
+ out:
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 59173f58ce9923..882ce5444572ea 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1417,17 +1417,17 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 	ireq = inet_rsk(req);
+ 
+ 	if (sk_acceptq_is_full(sk))
+-		goto out_overflow;
++		goto exit_overflow;
+ 
+ 	if (!dst) {
+ 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
+ 		if (!dst)
+-			goto out;
++			goto exit;
+ 	}
+ 
+ 	newsk = tcp_create_openreq_child(sk, req, skb);
+ 	if (!newsk)
+-		goto out_nonewsk;
++		goto exit_nonewsk;
+ 
+ 	/*
+ 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
+@@ -1517,25 +1517,19 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 			const union tcp_md5_addr *addr;
+ 
+ 			addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
+-			if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
+-				inet_csk_prepare_forced_close(newsk);
+-				tcp_done(newsk);
+-				goto out;
+-			}
++			if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key))
++				goto put_and_exit;
+ 		}
+ 	}
+ #endif
+ #ifdef CONFIG_TCP_AO
+ 	/* Copy over tcp_ao_info if any */
+ 	if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6))
+-		goto out; /* OOM */
++		goto put_and_exit; /* OOM */
+ #endif
+ 
+-	if (__inet_inherit_port(sk, newsk) < 0) {
+-		inet_csk_prepare_forced_close(newsk);
+-		tcp_done(newsk);
+-		goto out;
+-	}
++	if (__inet_inherit_port(sk, newsk) < 0)
++		goto put_and_exit;
+ 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
+ 				       &found_dup_sk);
+ 	if (*own_req) {
+@@ -1562,13 +1556,17 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 
+ 	return newsk;
+ 
+-out_overflow:
++exit_overflow:
+ 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+-out_nonewsk:
++exit_nonewsk:
+ 	dst_release(dst);
+-out:
++exit:
+ 	tcp_listendrop(sk);
+ 	return NULL;
++put_and_exit:
++	inet_csk_prepare_forced_close(newsk);
++	tcp_done(newsk);
++	goto exit;
+ }
+ 
+ INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index 70aeebfc4182e1..9a552569143bbf 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -346,7 +346,7 @@ static int mctp_getsockopt(struct socket *sock, int level, int optname,
+ 		return 0;
+ 	}
+ 
+-	return -EINVAL;
++	return -ENOPROTOOPT;
+ }
+ 
+ /* helpers for reading/writing the tag ioc, handling compatibility across the
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index d9c8e5a5f9ce9a..19ff259d7bc437 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -325,6 +325,7 @@ static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {}
+ static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {}
+ #endif
+ 
++/* takes ownership of skb, both in success and failure cases */
+ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
+ {
+ 	struct mctp_hdr *hdr = mctp_hdr(skb);
+@@ -334,8 +335,10 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
+ 		& MCTP_HDR_SEQ_MASK;
+ 
+ 	if (!key->reasm_head) {
+-		/* Since we're manipulating the shared frag_list, ensure it isn't
+-		 * shared with any other SKBs.
++		/* Since we're manipulating the shared frag_list, ensure it
++		 * isn't shared with any other SKBs. In the cloned case,
++		 * this will free the skb; callers can no longer access it
++		 * safely.
+ 		 */
+ 		key->reasm_head = skb_unshare(skb, GFP_ATOMIC);
+ 		if (!key->reasm_head)
+@@ -349,10 +352,10 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
+ 	exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK;
+ 
+ 	if (this_seq != exp_seq)
+-		return -EINVAL;
++		goto err_free;
+ 
+ 	if (key->reasm_head->len + skb->len > mctp_message_maxlen)
+-		return -EINVAL;
++		goto err_free;
+ 
+ 	skb->next = NULL;
+ 	skb->sk = NULL;
+@@ -366,6 +369,10 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
+ 	key->reasm_head->truesize += skb->truesize;
+ 
+ 	return 0;
++
++err_free:
++	kfree_skb(skb);
++	return -EINVAL;
+ }
+ 
+ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+@@ -476,18 +483,16 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 			 * key isn't observable yet
+ 			 */
+ 			mctp_frag_queue(key, skb);
++			skb = NULL;
+ 
+ 			/* if the key_add fails, we've raced with another
+ 			 * SOM packet with the same src, dest and tag. There's
+ 			 * no way to distinguish future packets, so all we
+-			 * can do is drop; we'll free the skb on exit from
+-			 * this function.
++			 * can do is drop.
+ 			 */
+ 			rc = mctp_key_add(key, msk);
+-			if (!rc) {
++			if (!rc)
+ 				trace_mctp_key_acquire(key);
+-				skb = NULL;
+-			}
+ 
+ 			/* we don't need to release key->lock on exit, so
+ 			 * clean up here and suppress the unlock via
+@@ -505,8 +510,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 				key = NULL;
+ 			} else {
+ 				rc = mctp_frag_queue(key, skb);
+-				if (!rc)
+-					skb = NULL;
++				skb = NULL;
+ 			}
+ 		}
+ 
+@@ -516,17 +520,16 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 		 */
+ 
+ 		/* we need to be continuing an existing reassembly... */
+-		if (!key->reasm_head)
++		if (!key->reasm_head) {
+ 			rc = -EINVAL;
+-		else
++		} else {
+ 			rc = mctp_frag_queue(key, skb);
++			skb = NULL;
++		}
+ 
+ 		if (rc)
+ 			goto out_unlock;
+ 
+-		/* we've queued; the queue owns the skb now */
+-		skb = NULL;
+-
+ 		/* end of message? deliver to socket, and we're done with
+ 		 * the reassembly/response key
+ 		 */
+diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
+index 4ed5878cb25b16..ceb48c3ca0a439 100644
+--- a/net/netfilter/nf_conntrack_helper.c
++++ b/net/netfilter/nf_conntrack_helper.c
+@@ -368,7 +368,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
+ 			    (cur->tuple.src.l3num == NFPROTO_UNSPEC ||
+ 			     cur->tuple.src.l3num == me->tuple.src.l3num) &&
+ 			    cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+-				ret = -EEXIST;
++				ret = -EBUSY;
+ 				goto out;
+ 			}
+ 		}
+@@ -379,7 +379,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
+ 		hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
+ 			if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple,
+ 						     &mask)) {
+-				ret = -EEXIST;
++				ret = -EBUSY;
+ 				goto out;
+ 			}
+ 		}
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index 521f5df80e10ca..8a794333e9927c 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -426,8 +426,6 @@ smc_clc_msg_decl_valid(struct smc_clc_msg_decline *dclc)
+ {
+ 	struct smc_clc_msg_hdr *hdr = &dclc->hdr;
+ 
+-	if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D)
+-		return false;
+ 	if (hdr->version == SMC_V1) {
+ 		if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline))
+ 			return false;
+diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
+index 9c563cdbea9086..fc07fc4ed99862 100644
+--- a/net/smc/smc_ib.c
++++ b/net/smc/smc_ib.c
+@@ -743,6 +743,9 @@ bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
+ 	unsigned int i;
+ 	bool ret = false;
+ 
++	if (!lnk->smcibdev->ibdev->dma_device)
++		return ret;
++
+ 	/* for now there is just one DMA address */
+ 	for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
+ 		    buf_slot->sgt[lnk->link_idx].nents, i) {
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index d80ab1725f28dd..f00ccc6d803be3 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1868,7 +1868,8 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
+ 			 */
+ 
+ 			f = rcu_access_pointer(new->pub.beacon_ies);
+-			kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head);
++			if (!new->pub.hidden_beacon_bss)
++				kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head);
+ 			return false;
+ 		}
+ 
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 26817160008766..e0d3c713538b5a 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -903,13 +903,16 @@ void __cfg80211_connect_result(struct net_device *dev,
+ 	if (!wdev->u.client.ssid_len) {
+ 		rcu_read_lock();
+ 		for_each_valid_link(cr, link) {
++			u32 ssid_len;
++
+ 			ssid = ieee80211_bss_get_elem(cr->links[link].bss,
+ 						      WLAN_EID_SSID);
+ 
+ 			if (!ssid || !ssid->datalen)
+ 				continue;
+ 
+-			memcpy(wdev->u.client.ssid, ssid->data, ssid->datalen);
++			ssid_len = min(ssid->datalen, IEEE80211_MAX_SSID_LEN);
++			memcpy(wdev->u.client.ssid, ssid->data, ssid_len);
+ 			wdev->u.client.ssid_len = ssid->datalen;
+ 			break;
+ 		}
+diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
+index 693dbbebebba10..0ba2aac3b8dc00 100644
+--- a/scripts/Makefile.kasan
++++ b/scripts/Makefile.kasan
+@@ -86,10 +86,14 @@ kasan_params += hwasan-instrument-stack=$(stack_enable) \
+ 		hwasan-use-short-granules=0 \
+ 		hwasan-inline-all-checks=0
+ 
+-# Instrument memcpy/memset/memmove calls by using instrumented __hwasan_mem*().
+-ifeq ($(call clang-min-version, 150000)$(call gcc-min-version, 130000),y)
+-	kasan_params += hwasan-kernel-mem-intrinsic-prefix=1
+-endif
++# Instrument memcpy/memset/memmove calls by using instrumented __(hw)asan_mem*().
++ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
++	ifdef CONFIG_CC_IS_GCC
++		kasan_params += asan-kernel-mem-intrinsic-prefix=1
++	else
++		kasan_params += hwasan-kernel-mem-intrinsic-prefix=1
++	endif
++endif # CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
+ 
+ endif # CONFIG_KASAN_SW_TAGS
+ 
+diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
+index 4fd6b6ab3e329d..32a4e6bfa047e9 100644
+--- a/scripts/generate_rust_target.rs
++++ b/scripts/generate_rust_target.rs
+@@ -223,7 +223,11 @@ fn main() {
+         ts.push("features", features);
+         ts.push("llvm-target", "x86_64-linux-gnu");
+         ts.push("supported-sanitizers", ["kcfi", "kernel-address"]);
+-        ts.push("target-pointer-width", "64");
++        if cfg.rustc_version_atleast(1, 91, 0) {
++            ts.push("target-pointer-width", 64);
++        } else {
++            ts.push("target-pointer-width", "64");
++        }
+     } else if cfg.has("X86_32") {
+         // This only works on UML, as i386 otherwise needs regparm support in rustc
+         if !cfg.has("UML") {
+@@ -243,7 +247,11 @@ fn main() {
+         }
+         ts.push("features", features);
+         ts.push("llvm-target", "i386-unknown-linux-gnu");
+-        ts.push("target-pointer-width", "32");
++        if cfg.rustc_version_atleast(1, 91, 0) {
++            ts.push("target-pointer-width", 32);
++        } else {
++            ts.push("target-pointer-width", "32");
++        }
+     } else if cfg.has("LOONGARCH") {
+         panic!("loongarch uses the builtin rustc loongarch64-unknown-none-softfloat target");
+     } else {
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index b05ef4bec6609f..70a90117361c59 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1991,6 +1991,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
+ static const struct snd_pci_quirk force_connect_list[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1),
++	SND_PCI_QUIRK(0x103c, 0x845a, "HP EliteDesk 800 G4 DM 65W", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b31b15cf453a5b..a28a59926adad1 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11328,6 +11328,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d05, 0x1409, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1d05, 0x300f, "TongFang X6AR5xxY", ALC2XX_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1d05, 0x3019, "TongFang X6FR5xxY", ALC2XX_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ 	SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ef30d4aaf81a4f..7bd87193c6177b 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -4212,9 +4212,11 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
+ 			snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
+ 		break;
+ 	/* lowest playback value is muted on some devices */
++	case USB_ID(0x0572, 0x1b09): /* Conexant Systems (Rockwell), Inc. */
+ 	case USB_ID(0x0d8c, 0x000c): /* C-Media */
+ 	case USB_ID(0x0d8c, 0x0014): /* C-Media */
+ 	case USB_ID(0x19f7, 0x0003): /* RODE NT-USB */
++	case USB_ID(0x2d99, 0x0026): /* HECATE G2 GAMING HEADSET */
+ 		if (strstr(kctl->id.name, "Playback"))
+ 			cval->min_mute = 1;
+ 		break;
+diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
+index ed565eb52275f1..342e056c8c665a 100644
+--- a/tools/gpio/Makefile
++++ b/tools/gpio/Makefile
+@@ -77,7 +77,7 @@ $(OUTPUT)gpio-watch: $(GPIO_WATCH_IN)
+ 
+ clean:
+ 	rm -f $(ALL_PROGRAMS)
+-	rm -f $(OUTPUT)include/linux/gpio.h
++	rm -rf $(OUTPUT)include
+ 	find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
+ 
+ install: $(ALL_PROGRAMS)
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index c81444059ad077..d9123c9637baaf 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -290,9 +290,15 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
+ 
+ 		info_node->info_linear = info_linear;
+ 		if (!perf_env__insert_bpf_prog_info(env, info_node)) {
+-			free(info_linear);
++			/*
++			 * Insert failed, likely because of a duplicate event
++			 * made by the sideband thread. Ignore synthesizing the
++			 * metadata.
++			 */
+ 			free(info_node);
++			goto out;
+ 		}
++		/* info_linear is now owned by info_node and shouldn't be freed below. */
+ 		info_linear = NULL;
+ 
+ 		/*
+@@ -451,18 +457,18 @@ int perf_event__synthesize_bpf_events(struct perf_session *session,
+ 	return err;
+ }
+ 
+-static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
++static int perf_env__add_bpf_info(struct perf_env *env, u32 id)
+ {
+ 	struct bpf_prog_info_node *info_node;
+ 	struct perf_bpil *info_linear;
+ 	struct btf *btf = NULL;
+ 	u64 arrays;
+ 	u32 btf_id;
+-	int fd;
++	int fd, err = 0;
+ 
+ 	fd = bpf_prog_get_fd_by_id(id);
+ 	if (fd < 0)
+-		return;
++		return -EINVAL;
+ 
+ 	arrays = 1UL << PERF_BPIL_JITED_KSYMS;
+ 	arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
+@@ -475,6 +481,7 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+ 	info_linear = get_bpf_prog_info_linear(fd, arrays);
+ 	if (IS_ERR_OR_NULL(info_linear)) {
+ 		pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
++		err = PTR_ERR(info_linear);
+ 		goto out;
+ 	}
+ 
+@@ -484,38 +491,46 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+ 	if (info_node) {
+ 		info_node->info_linear = info_linear;
+ 		if (!perf_env__insert_bpf_prog_info(env, info_node)) {
++			pr_debug("%s: duplicate add bpf info request for id %u\n",
++				 __func__, btf_id);
+ 			free(info_linear);
+ 			free(info_node);
++			goto out;
+ 		}
+-	} else
++	} else {
+ 		free(info_linear);
++		err = -ENOMEM;
++		goto out;
++	}
+ 
+ 	if (btf_id == 0)
+ 		goto out;
+ 
+ 	btf = btf__load_from_kernel_by_id(btf_id);
+-	if (libbpf_get_error(btf)) {
+-		pr_debug("%s: failed to get BTF of id %u, aborting\n",
+-			 __func__, btf_id);
+-		goto out;
++	if (!btf) {
++		err = -errno;
++		pr_debug("%s: failed to get BTF of id %u %d\n", __func__, btf_id, err);
++	} else {
++		perf_env__fetch_btf(env, btf_id, btf);
+ 	}
+-	perf_env__fetch_btf(env, btf_id, btf);
+ 
+ out:
+ 	btf__free(btf);
+ 	close(fd);
++	return err;
+ }
+ 
+ static int bpf_event__sb_cb(union perf_event *event, void *data)
+ {
+ 	struct perf_env *env = data;
++	int ret = 0;
+ 
+ 	if (event->header.type != PERF_RECORD_BPF_EVENT)
+ 		return -1;
+ 
+ 	switch (event->bpf.type) {
+ 	case PERF_BPF_EVENT_PROG_LOAD:
+-		perf_env__add_bpf_info(env, event->bpf.id);
++		ret = perf_env__add_bpf_info(env, event->bpf.id);
+ 
+ 	case PERF_BPF_EVENT_PROG_UNLOAD:
+ 		/*
+@@ -529,7 +544,7 @@ static int bpf_event__sb_cb(union perf_event *event, void *data)
+ 		break;
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
+diff --git a/tools/perf/util/bpf-utils.c b/tools/perf/util/bpf-utils.c
+index 80b1d2b3729ba4..5a66dc8594aa88 100644
+--- a/tools/perf/util/bpf-utils.c
++++ b/tools/perf/util/bpf-utils.c
+@@ -20,7 +20,7 @@ struct bpil_array_desc {
+ 				 */
+ };
+ 
+-static struct bpil_array_desc bpil_array_desc[] = {
++static const struct bpil_array_desc bpil_array_desc[] = {
+ 	[PERF_BPIL_JITED_INSNS] = {
+ 		offsetof(struct bpf_prog_info, jited_prog_insns),
+ 		offsetof(struct bpf_prog_info, jited_prog_len),
+@@ -115,7 +115,7 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
+ 	__u32 info_len = sizeof(info);
+ 	__u32 data_len = 0;
+ 	int i, err;
+-	void *ptr;
++	__u8 *ptr;
+ 
+ 	if (arrays >> PERF_BPIL_LAST_ARRAY)
+ 		return ERR_PTR(-EINVAL);
+@@ -126,15 +126,15 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
+ 		pr_debug("can't get prog info: %s", strerror(errno));
+ 		return ERR_PTR(-EFAULT);
+ 	}
++	if (info.type >= __MAX_BPF_PROG_TYPE)
++		pr_debug("%s:%d: unexpected program type %u\n", __func__, __LINE__, info.type);
+ 
+ 	/* step 2: calculate total size of all arrays */
+ 	for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
++		const struct bpil_array_desc *desc = &bpil_array_desc[i];
+ 		bool include_array = (arrays & (1UL << i)) > 0;
+-		struct bpil_array_desc *desc;
+ 		__u32 count, size;
+ 
+-		desc = bpil_array_desc + i;
+-
+ 		/* kernel is too old to support this field */
+ 		if (info_len < desc->array_offset + sizeof(__u32) ||
+ 		    info_len < desc->count_offset + sizeof(__u32) ||
+@@ -163,19 +163,20 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
+ 	ptr = info_linear->data;
+ 
+ 	for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
+-		struct bpil_array_desc *desc;
++		const struct bpil_array_desc *desc = &bpil_array_desc[i];
+ 		__u32 count, size;
+ 
+ 		if ((arrays & (1UL << i)) == 0)
+ 			continue;
+ 
+-		desc  = bpil_array_desc + i;
+ 		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ 		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+ 		bpf_prog_info_set_offset_u32(&info_linear->info,
+ 					     desc->count_offset, count);
+ 		bpf_prog_info_set_offset_u32(&info_linear->info,
+ 					     desc->size_offset, size);
++		assert(ptr >= info_linear->data);
++		assert(ptr < &info_linear->data[data_len]);
+ 		bpf_prog_info_set_offset_u64(&info_linear->info,
+ 					     desc->array_offset,
+ 					     ptr_to_u64(ptr));
+@@ -189,27 +190,45 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
+ 		free(info_linear);
+ 		return ERR_PTR(-EFAULT);
+ 	}
++	if (info_linear->info.type >= __MAX_BPF_PROG_TYPE) {
++		pr_debug("%s:%d: unexpected program type %u\n",
++			 __func__, __LINE__, info_linear->info.type);
++	}
+ 
+ 	/* step 6: verify the data */
++	ptr = info_linear->data;
+ 	for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
+-		struct bpil_array_desc *desc;
+-		__u32 v1, v2;
++		const struct bpil_array_desc *desc = &bpil_array_desc[i];
++		__u32 count1, count2, size1, size2;
++		__u64 ptr2;
+ 
+ 		if ((arrays & (1UL << i)) == 0)
+ 			continue;
+ 
+-		desc = bpil_array_desc + i;
+-		v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+-		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
++		count1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
++		count2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ 						   desc->count_offset);
+-		if (v1 != v2)
+-			pr_warning("%s: mismatch in element count\n", __func__);
++		if (count1 != count2) {
++			pr_warning("%s: mismatch in element count %u vs %u\n", __func__, count1, count2);
++			free(info_linear);
++			return ERR_PTR(-ERANGE);
++		}
+ 
+-		v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+-		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
++		size1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
++		size2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ 						   desc->size_offset);
+-		if (v1 != v2)
+-			pr_warning("%s: mismatch in rec size\n", __func__);
++		if (size1 != size2) {
++			pr_warning("%s: mismatch in rec size %u vs %u\n", __func__, size1, size2);
++			free(info_linear);
++			return ERR_PTR(-ERANGE);
++		}
++		ptr2 = bpf_prog_info_read_offset_u64(&info_linear->info, desc->array_offset);
++		if (ptr_to_u64(ptr) != ptr2) {
++			pr_warning("%s: mismatch in array %p vs %llx\n", __func__, ptr, ptr2);
++			free(info_linear);
++			return ERR_PTR(-ERANGE);
++		}
++		ptr += roundup(count1 * size1, sizeof(__u64));
+ 	}
+ 
+ 	/* step 7: update info_len and data_len */
+@@ -224,13 +243,12 @@ void bpil_addr_to_offs(struct perf_bpil *info_linear)
+ 	int i;
+ 
+ 	for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
+-		struct bpil_array_desc *desc;
++		const struct bpil_array_desc *desc = &bpil_array_desc[i];
+ 		__u64 addr, offs;
+ 
+ 		if ((info_linear->arrays & (1UL << i)) == 0)
+ 			continue;
+ 
+-		desc = bpil_array_desc + i;
+ 		addr = bpf_prog_info_read_offset_u64(&info_linear->info,
+ 						     desc->array_offset);
+ 		offs = addr - ptr_to_u64(info_linear->data);
+@@ -244,13 +262,12 @@ void bpil_offs_to_addr(struct perf_bpil *info_linear)
+ 	int i;
+ 
+ 	for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
+-		struct bpil_array_desc *desc;
++		const struct bpil_array_desc *desc = &bpil_array_desc[i];
+ 		__u64 addr, offs;
+ 
+ 		if ((info_linear->arrays & (1UL << i)) == 0)
+ 			continue;
+ 
+-		desc = bpil_array_desc + i;
+ 		offs = bpf_prog_info_read_offset_u64(&info_linear->info,
+ 						     desc->array_offset);
+ 		addr = offs + ptr_to_u64(info_linear->data);
+diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
+index 0677b58374abf1..59ace394cf3ef9 100644
+--- a/tools/power/cpupower/utils/cpupower-set.c
++++ b/tools/power/cpupower/utils/cpupower-set.c
+@@ -62,8 +62,8 @@ int cmd_set(int argc, char **argv)
+ 
+ 	params.params = 0;
+ 	/* parameter parsing */
+-	while ((ret = getopt_long(argc, argv, "b:e:m:",
+-						set_opts, NULL)) != -1) {
++	while ((ret = getopt_long(argc, argv, "b:e:m:t:",
++				  set_opts, NULL)) != -1) {
+ 		switch (ret) {
+ 		case 'b':
+ 			if (params.perf_bias)
+diff --git a/tools/testing/selftests/drivers/net/hw/csum.py b/tools/testing/selftests/drivers/net/hw/csum.py
+index cb40497faee441..b7e55be9bd9fd3 100755
+--- a/tools/testing/selftests/drivers/net/hw/csum.py
++++ b/tools/testing/selftests/drivers/net/hw/csum.py
+@@ -20,7 +20,7 @@ def test_receive(cfg, ipv4=False, extra_args=None):
+         ip_args = f"-6 -S {cfg.remote_v6} -D {cfg.v6}"
+ 
+     rx_cmd = f"{cfg.bin_local} -i {cfg.ifname} -n 100 {ip_args} -r 1 -R {extra_args}"
+-    tx_cmd = f"{cfg.bin_remote} -i {cfg.ifname} -n 100 {ip_args} -r 1 -T {extra_args}"
++    tx_cmd = f"{cfg.bin_remote} -i {cfg.remote_ifname} -n 100 {ip_args} -r 1 -T {extra_args}"
+ 
+     with bkg(rx_cmd, exit_wait=True):
+         wait_port_listen(34000, proto="udp")
+@@ -43,7 +43,7 @@ def test_transmit(cfg, ipv4=False, extra_args=None):
+     if extra_args != "-U -Z":
+         extra_args += " -r 1"
+ 
+-    rx_cmd = f"{cfg.bin_remote} -i {cfg.ifname} -L 1 -n 100 {ip_args} -R {extra_args}"
++    rx_cmd = f"{cfg.bin_remote} -i {cfg.remote_ifname} -L 1 -n 100 {ip_args} -R {extra_args}"
+     tx_cmd = f"{cfg.bin_local} -i {cfg.ifname} -L 1 -n 100 {ip_args} -T {extra_args}"
+ 
+     with bkg(rx_cmd, host=cfg.remote, exit_wait=True):
+diff --git a/tools/testing/selftests/net/bind_bhash.c b/tools/testing/selftests/net/bind_bhash.c
+index 57ff67a3751eb3..da04b0b19b73ca 100644
+--- a/tools/testing/selftests/net/bind_bhash.c
++++ b/tools/testing/selftests/net/bind_bhash.c
+@@ -75,7 +75,7 @@ static void *setup(void *arg)
+ 	int *array = (int *)arg;
+ 
+ 	for (i = 0; i < MAX_CONNECTIONS; i++) {
+-		sock_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, setup_addr);
++		sock_fd = bind_socket(SO_REUSEPORT, setup_addr);
+ 		if (sock_fd < 0) {
+ 			ret = sock_fd;
+ 			pthread_exit(&ret);
+@@ -103,7 +103,7 @@ int main(int argc, const char *argv[])
+ 
+ 	setup_addr = use_v6 ? setup_addr_v6 : setup_addr_v4;
+ 
+-	listener_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, setup_addr);
++	listener_fd = bind_socket(SO_REUSEPORT, setup_addr);
+ 	if (listen(listener_fd, 100) < 0) {
+ 		perror("listen failed");
+ 		return -1;
+diff --git a/tools/testing/selftests/net/netfilter/nft_flowtable.sh b/tools/testing/selftests/net/netfilter/nft_flowtable.sh
+index a4ee5496f2a17c..45832df982950c 100755
+--- a/tools/testing/selftests/net/netfilter/nft_flowtable.sh
++++ b/tools/testing/selftests/net/netfilter/nft_flowtable.sh
+@@ -20,6 +20,7 @@ ret=0
+ SOCAT_TIMEOUT=60
+ 
+ nsin=""
++nsin_small=""
+ ns1out=""
+ ns2out=""
+ 
+@@ -36,7 +37,7 @@ cleanup() {
+ 
+ 	cleanup_all_ns
+ 
+-	rm -f "$nsin" "$ns1out" "$ns2out"
++	rm -f "$nsin" "$nsin_small" "$ns1out" "$ns2out"
+ 
+ 	[ "$log_netns" -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns="$log_netns"
+ }
+@@ -72,6 +73,7 @@ lmtu=1500
+ rmtu=2000
+ 
+ filesize=$((2 * 1024 * 1024))
++filesize_small=$((filesize / 16))
+ 
+ usage(){
+ 	echo "nft_flowtable.sh [OPTIONS]"
+@@ -89,7 +91,10 @@ do
+ 		o) omtu=$OPTARG;;
+ 		l) lmtu=$OPTARG;;
+ 		r) rmtu=$OPTARG;;
+-		s) filesize=$OPTARG;;
++		s)
++			filesize=$OPTARG
++			filesize_small=$((OPTARG / 16))
++		;;
+ 		*) usage;;
+ 	esac
+ done
+@@ -215,6 +220,7 @@ if ! ip netns exec "$ns2" ping -c 1 -q 10.0.1.99 > /dev/null; then
+ fi
+ 
+ nsin=$(mktemp)
++nsin_small=$(mktemp)
+ ns1out=$(mktemp)
+ ns2out=$(mktemp)
+ 
+@@ -265,6 +271,7 @@ check_counters()
+ check_dscp()
+ {
+ 	local what=$1
++	local pmtud="$2"
+ 	local ok=1
+ 
+ 	local counter
+@@ -277,37 +284,39 @@ check_dscp()
+ 	local pc4z=${counter%*bytes*}
+ 	local pc4z=${pc4z#*packets}
+ 
++	local failmsg="FAIL: pmtu $pmtu: $what counters do not match, expected"
++
+ 	case "$what" in
+ 	"dscp_none")
+ 		if [ "$pc4" -gt 0 ] || [ "$pc4z" -eq 0 ]; then
+-			echo "FAIL: dscp counters do not match, expected dscp3 == 0, dscp0 > 0, but got $pc4,$pc4z" 1>&2
++			echo "$failmsg dscp3 == 0, dscp0 > 0, but got $pc4,$pc4z" 1>&2
+ 			ret=1
+ 			ok=0
+ 		fi
+ 		;;
+ 	"dscp_fwd")
+ 		if [ "$pc4" -eq 0 ] || [ "$pc4z" -eq 0 ]; then
+-			echo "FAIL: dscp counters do not match, expected dscp3 and dscp0 > 0 but got $pc4,$pc4z" 1>&2
++			echo "$failmsg dscp3 and dscp0 > 0 but got $pc4,$pc4z" 1>&2
+ 			ret=1
+ 			ok=0
+ 		fi
+ 		;;
+ 	"dscp_ingress")
+ 		if [ "$pc4" -eq 0 ] || [ "$pc4z" -gt 0 ]; then
+-			echo "FAIL: dscp counters do not match, expected dscp3 > 0, dscp0 == 0 but got $pc4,$pc4z" 1>&2
++			echo "$failmsg dscp3 > 0, dscp0 == 0 but got $pc4,$pc4z" 1>&2
+ 			ret=1
+ 			ok=0
+ 		fi
+ 		;;
+ 	"dscp_egress")
+ 		if [ "$pc4" -eq 0 ] || [ "$pc4z" -gt 0 ]; then
+-			echo "FAIL: dscp counters do not match, expected dscp3 > 0, dscp0 == 0 but got $pc4,$pc4z" 1>&2
++			echo "$failmsg dscp3 > 0, dscp0 == 0 but got $pc4,$pc4z" 1>&2
+ 			ret=1
+ 			ok=0
+ 		fi
+ 		;;
+ 	*)
+-		echo "FAIL: Unknown DSCP check" 1>&2
++		echo "$failmsg: Unknown DSCP check" 1>&2
+ 		ret=1
+ 		ok=0
+ 	esac
+@@ -319,9 +328,9 @@ check_dscp()
+ 
+ check_transfer()
+ {
+-	in=$1
+-	out=$2
+-	what=$3
++	local in=$1
++	local out=$2
++	local what=$3
+ 
+ 	if ! cmp "$in" "$out" > /dev/null 2>&1; then
+ 		echo "FAIL: file mismatch for $what" 1>&2
+@@ -342,25 +351,39 @@ test_tcp_forwarding_ip()
+ {
+ 	local nsa=$1
+ 	local nsb=$2
+-	local dstip=$3
+-	local dstport=$4
++	local pmtu=$3
++	local dstip=$4
++	local dstport=$5
+ 	local lret=0
++	local socatc
++	local socatl
++	local infile="$nsin"
++
++	if [ $pmtu -eq 0 ]; then
++		infile="$nsin_small"
++	fi
+ 
+-	timeout "$SOCAT_TIMEOUT" ip netns exec "$nsb" socat -4 TCP-LISTEN:12345,reuseaddr STDIO < "$nsin" > "$ns2out" &
++	timeout "$SOCAT_TIMEOUT" ip netns exec "$nsb" socat -4 TCP-LISTEN:12345,reuseaddr STDIO < "$infile" > "$ns2out" &
+ 	lpid=$!
+ 
+ 	busywait 1000 listener_ready
+ 
+-	timeout "$SOCAT_TIMEOUT" ip netns exec "$nsa" socat -4 TCP:"$dstip":"$dstport" STDIO < "$nsin" > "$ns1out"
++	timeout "$SOCAT_TIMEOUT" ip netns exec "$nsa" socat -4 TCP:"$dstip":"$dstport" STDIO < "$infile" > "$ns1out"
++	socatc=$?
+ 
+ 	wait $lpid
++	socatl=$?
+ 
+-	if ! check_transfer "$nsin" "$ns2out" "ns1 -> ns2"; then
++	if [ $socatl -ne 0 ] || [ $socatc -ne 0 ];then
++		rc=1
++	fi
++
++	if ! check_transfer "$infile" "$ns2out" "ns1 -> ns2"; then
+ 		lret=1
+ 		ret=1
+ 	fi
+ 
+-	if ! check_transfer "$nsin" "$ns1out" "ns1 <- ns2"; then
++	if ! check_transfer "$infile" "$ns1out" "ns1 <- ns2"; then
+ 		lret=1
+ 		ret=1
+ 	fi
+@@ -370,14 +393,16 @@ test_tcp_forwarding_ip()
+ 
+ test_tcp_forwarding()
+ {
+-	test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
++	local pmtu="$3"
++
++	test_tcp_forwarding_ip "$1" "$2" "$pmtu" 10.0.2.99 12345
+ 
+ 	return $?
+ }
+ 
+ test_tcp_forwarding_set_dscp()
+ {
+-	check_dscp "dscp_none"
++	local pmtu="$3"
+ 
+ ip netns exec "$nsr1" nft -f - <<EOF
+ table netdev dscpmangle {
+@@ -388,8 +413,8 @@ table netdev dscpmangle {
+ }
+ EOF
+ if [ $? -eq 0 ]; then
+-	test_tcp_forwarding_ip "$1" "$2"  10.0.2.99 12345
+-	check_dscp "dscp_ingress"
++	test_tcp_forwarding_ip "$1" "$2" "$3" 10.0.2.99 12345
++	check_dscp "dscp_ingress" "$pmtu"
+ 
+ 	ip netns exec "$nsr1" nft delete table netdev dscpmangle
+ else
+@@ -405,10 +430,10 @@ table netdev dscpmangle {
+ }
+ EOF
+ if [ $? -eq 0 ]; then
+-	test_tcp_forwarding_ip "$1" "$2"  10.0.2.99 12345
+-	check_dscp "dscp_egress"
++	test_tcp_forwarding_ip "$1" "$2" "$pmtu"  10.0.2.99 12345
++	check_dscp "dscp_egress" "$pmtu"
+ 
+-	ip netns exec "$nsr1" nft flush table netdev dscpmangle
++	ip netns exec "$nsr1" nft delete table netdev dscpmangle
+ else
+ 	echo "SKIP: Could not load netdev:egress for veth1"
+ fi
+@@ -416,48 +441,53 @@ fi
+ 	# partial.  If flowtable really works, then both dscp-is-0 and dscp-is-cs3
+ 	# counters should have seen packets (before and after ft offload kicks in).
+ 	ip netns exec "$nsr1" nft -a insert rule inet filter forward ip dscp set cs3
+-	test_tcp_forwarding_ip "$1" "$2"  10.0.2.99 12345
+-	check_dscp "dscp_fwd"
++	test_tcp_forwarding_ip "$1" "$2" "$pmtu"  10.0.2.99 12345
++	check_dscp "dscp_fwd" "$pmtu"
+ }
+ 
+ test_tcp_forwarding_nat()
+ {
++	local nsa="$1"
++	local nsb="$2"
++	local pmtu="$3"
++	local what="$4"
+ 	local lret
+-	local pmtu
+ 
+-	test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
+-	lret=$?
++	[ "$pmtu" -eq 0 ] && what="$what (pmtu disabled)"
+ 
+-	pmtu=$3
+-	what=$4
++	test_tcp_forwarding_ip "$nsa" "$nsb" "$pmtu" 10.0.2.99 12345
++	lret=$?
+ 
+ 	if [ "$lret" -eq 0 ] ; then
+ 		if [ "$pmtu" -eq 1 ] ;then
+-			check_counters "flow offload for ns1/ns2 with masquerade and pmtu discovery $what"
++			check_counters "flow offload for ns1/ns2 with masquerade $what"
+ 		else
+ 			echo "PASS: flow offload for ns1/ns2 with masquerade $what"
+ 		fi
+ 
+-		test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666
++		test_tcp_forwarding_ip "$1" "$2" "$pmtu" 10.6.6.6 1666
+ 		lret=$?
+ 		if [ "$pmtu" -eq 1 ] ;then
+-			check_counters "flow offload for ns1/ns2 with dnat and pmtu discovery $what"
++			check_counters "flow offload for ns1/ns2 with dnat $what"
+ 		elif [ "$lret" -eq 0 ] ; then
+ 			echo "PASS: flow offload for ns1/ns2 with dnat $what"
+ 		fi
++	else
++		echo "FAIL: flow offload for ns1/ns2 with dnat $what"
+ 	fi
+ 
+ 	return $lret
+ }
+ 
+ make_file "$nsin" "$filesize"
++make_file "$nsin_small" "$filesize_small"
+ 
+ # First test:
+ # No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed.
+ # Due to MTU mismatch in both directions, all packets (except small packets like pure
+ # acks) have to be handled by normal forwarding path.  Therefore, packet counters
+ # are not checked.
+-if test_tcp_forwarding "$ns1" "$ns2"; then
++if test_tcp_forwarding "$ns1" "$ns2" 0; then
+ 	echo "PASS: flow offloaded for ns1/ns2"
+ else
+ 	echo "FAIL: flow offload for ns1/ns2:" 1>&2
+@@ -489,8 +519,9 @@ table ip nat {
+ }
+ EOF
+ 
++check_dscp "dscp_none" "0"
+ if ! test_tcp_forwarding_set_dscp "$ns1" "$ns2" 0 ""; then
+-	echo "FAIL: flow offload for ns1/ns2 with dscp update" 1>&2
++	echo "FAIL: flow offload for ns1/ns2 with dscp update and no pmtu discovery" 1>&2
+ 	exit 0
+ fi
+ 
+@@ -512,6 +543,14 @@ ip netns exec "$ns2" sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
+ # are lower than file size and packets were forwarded via flowtable layer.
+ # For earlier tests (large mtus), packets cannot be handled via flowtable
+ # (except pure acks and other small packets).
++ip netns exec "$nsr1" nft reset counters table inet filter >/dev/null
++ip netns exec "$ns2"  nft reset counters table inet filter >/dev/null
++
++if ! test_tcp_forwarding_set_dscp "$ns1" "$ns2" 1 ""; then
++	echo "FAIL: flow offload for ns1/ns2 with dscp update and pmtu discovery" 1>&2
++	exit 0
++fi
++
+ ip netns exec "$nsr1" nft reset counters table inet filter >/dev/null
+ 
+ if ! test_tcp_forwarding_nat "$ns1" "$ns2" 1 ""; then
+@@ -644,7 +683,7 @@ ip -net "$ns2" route del 192.168.10.1 via 10.0.2.1
+ ip -net "$ns2" route add default via 10.0.2.1
+ ip -net "$ns2" route add default via dead:2::1
+ 
+-if test_tcp_forwarding "$ns1" "$ns2"; then
++if test_tcp_forwarding "$ns1" "$ns2" 1; then
+ 	check_counters "ipsec tunnel mode for ns1/ns2"
+ else
+ 	echo "FAIL: ipsec tunnel mode for ns1/ns2"
+@@ -668,7 +707,7 @@ if [ "$1" = "" ]; then
+ 	fi
+ 
+ 	echo "re-run with random mtus and file size: -o $o -l $l -r $r -s $filesize"
+-	$0 -o "$o" -l "$l" -r "$r" -s "$filesize"
++	$0 -o "$o" -l "$l" -r "$r" -s "$filesize" || ret=1
+ fi
+ 
+ exit $ret


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-05 12:48 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-05 12:48 UTC (permalink / raw
  To: gentoo-commits

commit:     e6e684a396fcef0da2e31379480ba316fee7013f
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Sep  5 12:47:48 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Sep  5 12:48:34 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e6e684a3

Add proc: fix type confusion in pde_set_flags()

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README                                        |  4 +++
 ..._proc_fix_type_confusion_in_pde_set_flags.patch | 40 ++++++++++++++++++++++
 2 files changed, 44 insertions(+)

diff --git a/0000_README b/0000_README
index c7ef2822..a7d5ef72 100644
--- a/0000_README
+++ b/0000_README
@@ -247,6 +247,10 @@ Patch:  1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch
 From:   https://lore.kernel.org/all/20250821105806.1453833-1-wangzijie1@honor.com/
 Desc:   proc: fix missing pde_set_flags() for net proc files
 
+Patch:  1801_proc_fix_type_confusion_in_pde_set_flags.patch
+From:   https://lore.kernel.org/linux-fsdevel/20250904135715.3972782-1-wangzijie1@honor.com/
+Desc:   proc: fix type confusion in pde_set_flags()
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1801_proc_fix_type_confusion_in_pde_set_flags.patch b/1801_proc_fix_type_confusion_in_pde_set_flags.patch
new file mode 100644
index 00000000..4777dbdc
--- /dev/null
+++ b/1801_proc_fix_type_confusion_in_pde_set_flags.patch
@@ -0,0 +1,40 @@
+Subject: [PATCH] proc: fix type confusion in pde_set_flags()
+
+Commit 2ce3d282bd50 ("proc: fix missing pde_set_flags() for net proc files")
+missed a key part in the definition of proc_dir_entry:
+
+union {
+	const struct proc_ops *proc_ops;
+	const struct file_operations *proc_dir_ops;
+};
+
+So dereference of ->proc_ops assumes it is a proc_ops structure results in
+type confusion and make NULL check for 'proc_ops' not work for proc dir.
+
+Add !S_ISDIR(dp->mode) test before calling pde_set_flags() to fix it.
+
+Fixes: 2ce3d282bd50 ("proc: fix missing pde_set_flags() for net proc files")
+Reported-by: Brad Spengler <spender@grsecurity.net>
+Signed-off-by: wangzijie <wangzijie1@honor.com>
+---
+ fs/proc/generic.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index bd0c099cf..176281112 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -393,7 +393,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
+ 	if (proc_alloc_inum(&dp->low_ino))
+ 		goto out_free_entry;
+ 
+-	pde_set_flags(dp);
++	if (!S_ISDIR(dp->mode))
++		pde_set_flags(dp);
+ 
+ 	write_lock(&proc_subdir_lock);
+ 	dp->parent = dir;
+-- 
+2.25.1
+
+


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-04 15:48 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-04 15:48 UTC (permalink / raw
  To: gentoo-commits

commit:     0d0dc789aca0a5a1d460327660d3fb89a342931b
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Sep  4 15:48:16 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Sep  4 15:48:16 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0d0dc789

Remove net/ipv4: Fix regression in local-broadcast routes

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README                                        |   4 -
 ..._fix_regression_in_local-broadcast_routes.patch | 134 ---------------------
 2 files changed, 138 deletions(-)

diff --git a/0000_README b/0000_README
index deb0a996..c7ef2822 100644
--- a/0000_README
+++ b/0000_README
@@ -251,10 +251,6 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
-Patch:  2010_ipv4_fix_regression_in_local-broadcast_routes.patch
-From:   https://lore.kernel.org/regressions/20250826121750.8451-1-oscmaes92@gmail.com/
-Desc:   net: ipv4: fix regression in local-broadcast routes
-
 Patch:  2910_bfp-mark-get-entry-ip-as--maybe-unused.patch
 From:   https://www.spinics.net/lists/stable/msg604665.html
 Desc:   bpf: mark get_entry_ip as __maybe_unused

diff --git a/2010_ipv4_fix_regression_in_local-broadcast_routes.patch b/2010_ipv4_fix_regression_in_local-broadcast_routes.patch
deleted file mode 100644
index a306132d..00000000
--- a/2010_ipv4_fix_regression_in_local-broadcast_routes.patch
+++ /dev/null
@@ -1,134 +0,0 @@
-From mboxrd@z Thu Jan  1 00:00:00 1970
-Received: from mail-wm1-f48.google.com (mail-wm1-f48.google.com [209.85.128.48])
-	(using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits))
-	(No client certificate requested)
-	by smtp.subspace.kernel.org (Postfix) with ESMTPS id 35E81393DF2
-	for <regressions@lists.linux.dev>; Tue, 26 Aug 2025 12:18:24 +0000 (UTC)
-Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=209.85.128.48
-ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;
-	t=1756210706; cv=none; b=Da/rRcCPW+gdgl9sh1AJU0E8vP05G0xfCEUnpWuqnQjaf8/mvVPUhzba4pXCTtFhHNsTTT3iEOPPiPqCzdNwRexxsZIkyL6JGG+hXkV8cn+i7XctZ961TmWYP8ACY74i8MLs7Iud+2gt8y4VrLoMeHXcE7ripzyOxa8THiVuFTc=
-ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org;
-	s=arc-20240116; t=1756210706; c=relaxed/simple;
-	bh=WNRFfbyB1JScy1/30FZa+Ntq9RVZSUi/ijHlpcIjNBs=;
-	h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References:
-	 MIME-Version; b=Y3iH3AFJjiR147yq3M5X/KlRR6baEAus+ZHb4N2PZZKa0T3Ln2c2/SnZLXQgRCa8rdr3MCFoXaoDuRUcx8k744Dh1j64HY9sRnYjM01rc0Kh+iaf3nZ0jYkC+zOL+8Wv5eWgNbDX5Qg+WzwUQMQhrC5xEQNjNorKTxd+SRFGpao=
-ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=gmail.com; spf=pass smtp.mailfrom=gmail.com; dkim=pass (2048-bit key) header.d=gmail.com header.i=@gmail.com header.b=dvEwPzW3; arc=none smtp.client-ip=209.85.128.48
-Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=gmail.com
-Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gmail.com
-Authentication-Results: smtp.subspace.kernel.org;
-	dkim=pass (2048-bit key) header.d=gmail.com header.i=@gmail.com header.b="dvEwPzW3"
-Received: by mail-wm1-f48.google.com with SMTP id 5b1f17b1804b1-45a1b004954so43862245e9.0
-        for <regressions@lists.linux.dev>; Tue, 26 Aug 2025 05:18:23 -0700 (PDT)
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
-        d=gmail.com; s=20230601; t=1756210702; x=1756815502; darn=lists.linux.dev;
-        h=content-transfer-encoding:mime-version:references:in-reply-to
-         :message-id:date:subject:cc:to:from:from:to:cc:subject:date
-         :message-id:reply-to;
-        bh=LChTYlNX7jpHHdvqfK7E+ehTE+2KqMA/oVeIigfrSAA=;
-        b=dvEwPzW3bP5r/IJF4+nyqmSsoFRE2/TxvBct7S3hXKOLTfxpExbkGfpZTxb/wRhBAJ
-         wQL8iEOoH47boqy/i72LQhH6bNLS72yU2FMpqZNVENRJqtwB6lq8PJlRvDn7gEVW4awK
-         8Phof2i45jLRu1288bEfZkYpSVK0hclcCXgP/5f7t0zNSdutKc/aOXCyLeoIeciLR4Zx
-         JmtIedPpVahUnw0oCgxmQbOkHd3yf1xoxAiEblYfya59tRXPty3gfMnh2Ox4gTYn29NF
-         kp+PqMg4GxVf0j4TZMuCnBqnjtYFkfDpGyqNr4HBBV4PzdZjjbuJ8bPNOVUNOzk14j+4
-         JE9Q==
-X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
-        d=1e100.net; s=20230601; t=1756210702; x=1756815502;
-        h=content-transfer-encoding:mime-version:references:in-reply-to
-         :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc
-         :subject:date:message-id:reply-to;
-        bh=LChTYlNX7jpHHdvqfK7E+ehTE+2KqMA/oVeIigfrSAA=;
-        b=XlKRIeF/DtRxj+OA67VSgyB25oK9Z0gak5vT5pjoH+XFlP+Y6y9GSx70oRvvIgIZE0
-         apTakbKssvoFmeCLmAFQRStZfubuWoor6Ond1N/6K7j7VBU11eysPUkeo6jQSTzdSQMt
-         v9Jq11Lnaii0ms5s6kIaWWPR9lGAWFb++ZJNYkXt59iXnhEVlVW4dFssD6VR/VJnyX+e
-         A+eGOVoa1k3c4ae23Wmq55GQR1iKbviKO28+BXatjKRWcFjaTgedk1WATZrrwcRYdD2E
-         a3r6R5iTOkMNX/TOJ4v2X7s69ndC+qxxJQ0yLTAsmfV1EDGUp3kwwkdIVl3UDqUhHszh
-         N0+w==
-X-Forwarded-Encrypted: i=1; AJvYcCX+aV0s2nW7qE7ZH57rmDl4GNnOxwFmQdMvPxxvM100/HNXQPAUrKLUeYBdO5rTpnftaJQ4J3zRSge7XA==@lists.linux.dev
-X-Gm-Message-State: AOJu0Yy9j79mStVe7fYpUjVZm00DYURS6tKQYofu48lxIG03z+fJEMUq
-	NKggf5H7k0btf9k9VXff6yWYNoL6JnO/uWjuPcDWrTtpRme13iQ8weyk
-X-Gm-Gg: ASbGncv/CVsSHrFQyxd//IAOzxZbvxje250ZYi2TUZi9g/Gf4x/86XgM4MjXoZFeBZB
-	4c00kmZrQIKWk4ToI+ySCSydYzZQbrw+nGnrad6FqeWQESk5tqOBYnIYKTUT+rseRkG5dukKJdE
-	lNeFu0sfmmoAnvNyKtLNqG9VwFQtqSwODKIKH+CZ92mMBuWe4ePVv4JQpz/fUhIRN+eZBdfDwUZ
-	eWZScFkZRFJo2SrVq9Ku3CIOA8hD0ktkkBDaFj57r+4YoToeLSvbCzzZrcFGoj2E1zqyTSlUhbf
-	SsKS4HgBDjkhx2k41IZAVyT+pE/GfU2BgS6BmY/VUxh72VrmHWCvbCnGX1TsHixJSwCGJiilLTg
-	KuDu6j0RQCZjyzUt7t8/H5A==
-X-Google-Smtp-Source: AGHT+IGDspJcry+lZbYtZeVg4+4kmcTBPmZuyilfg0+W2o8HlDsbRsJZkF4781x4cl6MBUZul/po1A==
-X-Received: by 2002:a05:600c:3b2a:b0:453:2066:4a26 with SMTP id 5b1f17b1804b1-45b5179f3d1mr194717105e9.16.1756210702258;
-        Tue, 26 Aug 2025 05:18:22 -0700 (PDT)
-Received: from oscar-xps.. ([45.128.133.231])
-        by smtp.gmail.com with ESMTPSA id 5b1f17b1804b1-45b5758a0bfsm149513675e9.20.2025.08.26.05.18.18
-        (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);
-        Tue, 26 Aug 2025 05:18:21 -0700 (PDT)
-From: Oscar Maes <oscmaes92@gmail.com>
-To: bacs@librecast.net,
-	brett@librecast.net,
-	kuba@kernel.org
-Cc: davem@davemloft.net,
-	dsahern@kernel.org,
-	netdev@vger.kernel.org,
-	regressions@lists.linux.dev,
-	stable@vger.kernel.org,
-	Oscar Maes <oscmaes92@gmail.com>
-Subject: [PATCH net v2 1/2] net: ipv4: fix regression in local-broadcast routes
-Date: Tue, 26 Aug 2025 14:17:49 +0200
-Message-Id: <20250826121750.8451-1-oscmaes92@gmail.com>
-X-Mailer: git-send-email 2.39.5
-In-Reply-To: <20250826121126-oscmaes92@gmail.com>
-References: <20250826121126-oscmaes92@gmail.com>
-Precedence: bulk
-X-Mailing-List: regressions@lists.linux.dev
-List-Id: <regressions.lists.linux.dev>
-List-Subscribe: <mailto:regressions+subscribe@lists.linux.dev>
-List-Unsubscribe: <mailto:regressions+unsubscribe@lists.linux.dev>
-MIME-Version: 1.0
-Content-Transfer-Encoding: 8bit
-
-Commit 9e30ecf23b1b ("net: ipv4: fix incorrect MTU in broadcast routes")
-introduced a regression where local-broadcast packets would have their
-gateway set in __mkroute_output, which was caused by fi = NULL being
-removed.
-
-Fix this by resetting the fib_info for local-broadcast packets. This
-preserves the intended changes for directed-broadcast packets.
-
-Cc: stable@vger.kernel.org
-Fixes: 9e30ecf23b1b ("net: ipv4: fix incorrect MTU in broadcast routes")
-Reported-by: Brett A C Sheffield <bacs@librecast.net>
-Closes: https://lore.kernel.org/regressions/20250822165231.4353-4-bacs@librecast.net
-Signed-off-by: Oscar Maes <oscmaes92@gmail.com>
----
-
-Thanks to Brett Sheffield for finding the regression and writing
-the initial fix!
----
- net/ipv4/route.c | 10 +++++++---
- 1 file changed, 7 insertions(+), 3 deletions(-)
-
-diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 1f212b2ce4c6..24c898b7654f 100644
---- a/net/ipv4/route.c
-+++ b/net/ipv4/route.c
-@@ -2575,12 +2575,16 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
- 		    !netif_is_l3_master(dev_out))
- 			return ERR_PTR(-EINVAL);
- 
--	if (ipv4_is_lbcast(fl4->daddr))
-+	if (ipv4_is_lbcast(fl4->daddr)) {
- 		type = RTN_BROADCAST;
--	else if (ipv4_is_multicast(fl4->daddr))
-+
-+		/* reset fi to prevent gateway resolution */
-+		fi = NULL;
-+	} else if (ipv4_is_multicast(fl4->daddr)) {
- 		type = RTN_MULTICAST;
--	else if (ipv4_is_zeronet(fl4->daddr))
-+	} else if (ipv4_is_zeronet(fl4->daddr)) {
- 		return ERR_PTR(-EINVAL);
-+	}
- 
- 	if (dev_out->flags & IFF_LOOPBACK)
- 		flags |= RTCF_LOCAL;
--- 
-2.39.5
-
-


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-09-04 15:33 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-09-04 15:33 UTC (permalink / raw
  To: gentoo-commits

commit:     48b8bf888cf34f1466dc95b4d64ca224564d1036
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Sep  4 15:33:46 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Sep  4 15:33:46 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=48b8bf88

Linux patch 6.12.45

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1044_linux-6.12.45.patch | 3752 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3756 insertions(+)

diff --git a/0000_README b/0000_README
index ab6b7b1d..deb0a996 100644
--- a/0000_README
+++ b/0000_README
@@ -219,6 +219,10 @@ Patch:  1043_linux-6.12.44.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.44
 
+Patch:  1044_linux-6.12.45.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.45
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1044_linux-6.12.45.patch b/1044_linux-6.12.45.patch
new file mode 100644
index 00000000..182f8678
--- /dev/null
+++ b/1044_linux-6.12.45.patch
@@ -0,0 +1,3752 @@
+diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml
+index e153f8d26e7aae..2735c78b0b67af 100644
+--- a/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml
++++ b/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml
+@@ -60,7 +60,6 @@ properties:
+           - const: bus
+           - const: core
+           - const: vsync
+-          - const: lut
+           - const: tbu
+           - const: tbu_rt
+         # MSM8996 has additional iommu clock
+diff --git a/Makefile b/Makefile
+index 208a50953301b2..cc59990e379679 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 44
++SUBLEVEL = 45
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/mips/boot/dts/lantiq/danube_easy50712.dts b/arch/mips/boot/dts/lantiq/danube_easy50712.dts
+index 1ce20b7d05cb8c..c4d7aa5753b043 100644
+--- a/arch/mips/boot/dts/lantiq/danube_easy50712.dts
++++ b/arch/mips/boot/dts/lantiq/danube_easy50712.dts
+@@ -82,13 +82,16 @@ conf_out {
+ 			};
+ 		};
+ 
+-		etop@e180000 {
++		ethernet@e180000 {
+ 			compatible = "lantiq,etop-xway";
+ 			reg = <0xe180000 0x40000>;
+ 			interrupt-parent = <&icu0>;
+ 			interrupts = <73 78>;
++			interrupt-names = "tx", "rx";
+ 			phy-mode = "rmii";
+ 			mac-address = [ 00 11 22 33 44 55 ];
++			lantiq,rx-burst-length = <4>;
++			lantiq,tx-burst-length = <4>;
+ 		};
+ 
+ 		stp0: stp@e100bb0 {
+diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
+index 5a75283d17f10e..6031a0272d8743 100644
+--- a/arch/mips/lantiq/xway/sysctrl.c
++++ b/arch/mips/lantiq/xway/sysctrl.c
+@@ -497,7 +497,7 @@ void __init ltq_soc_init(void)
+ 		ifccr = CGU_IFCCR_VR9;
+ 		pcicr = CGU_PCICR_VR9;
+ 	} else {
+-		clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE);
++		clkdev_add_pmu("1e180000.ethernet", NULL, 1, 0, PMU_PPE);
+ 	}
+ 
+ 	if (!of_machine_is_compatible("lantiq,ase"))
+@@ -531,9 +531,9 @@ void __init ltq_soc_init(void)
+ 						CLOCK_133M, CLOCK_133M);
+ 		clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ 		clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+-		clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE);
+-		clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY);
+-		clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY);
++		clkdev_add_pmu("1e180000.ethernet", "ppe", 1, 0, PMU_PPE);
++		clkdev_add_cgu("1e180000.ethernet", "ephycgu", CGU_EPHY);
++		clkdev_add_pmu("1e180000.ethernet", "ephy", 1, 0, PMU_EPHY);
+ 		clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO);
+ 		clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ 	} else if (of_machine_is_compatible("lantiq,grx390")) {
+@@ -592,7 +592,7 @@ void __init ltq_soc_init(void)
+ 		clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ 		clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
+ 		clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
+-		clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
++		clkdev_add_pmu("1e180000.ethernet", "switch", 1, 0, PMU_SWITCH);
+ 		clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ 		clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ 		clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
+index 5b3c093611baf1..7209d00a9c2576 100644
+--- a/arch/powerpc/kernel/kvm.c
++++ b/arch/powerpc/kernel/kvm.c
+@@ -632,19 +632,19 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
+ #endif
+ 	}
+ 
+-	switch (inst_no_rt & ~KVM_MASK_RB) {
+ #ifdef CONFIG_PPC_BOOK3S_32
++	switch (inst_no_rt & ~KVM_MASK_RB) {
+ 	case KVM_INST_MTSRIN:
+ 		if (features & KVM_MAGIC_FEAT_SR) {
+ 			u32 inst_rb = _inst & KVM_MASK_RB;
+ 			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
+ 		}
+ 		break;
+-#endif
+ 	}
++#endif
+ 
+-	switch (_inst) {
+ #ifdef CONFIG_BOOKE
++	switch (_inst) {
+ 	case KVM_INST_WRTEEI_0:
+ 		kvm_patch_ins_wrteei_0(inst);
+ 		break;
+@@ -652,8 +652,8 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
+ 	case KVM_INST_WRTEEI_1:
+ 		kvm_patch_ins_wrtee(inst, 0, 1);
+ 		break;
+-#endif
+ 	}
++#endif
+ }
+ 
+ extern u32 kvm_template_start[];
+diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c
+index d92d1348045c8c..8454c1c3655a4c 100644
+--- a/arch/riscv/kvm/vcpu_vector.c
++++ b/arch/riscv/kvm/vcpu_vector.c
+@@ -181,6 +181,8 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
+ 		struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+ 		unsigned long reg_val;
+ 
++		if (reg_size != sizeof(reg_val))
++			return -EINVAL;
+ 		if (copy_from_user(&reg_val, uaddr, reg_size))
+ 			return -EFAULT;
+ 		if (reg_val != cntx->vector.vlenb)
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 765b4646648f7b..910accfeb78569 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -159,8 +159,28 @@ static int cmp_id(const void *key, const void *elem)
+ 		return 1;
+ }
+ 
++static u32 cpuid_to_ucode_rev(unsigned int val)
++{
++	union zen_patch_rev p = {};
++	union cpuid_1_eax c;
++
++	c.full = val;
++
++	p.stepping  = c.stepping;
++	p.model     = c.model;
++	p.ext_model = c.ext_model;
++	p.ext_fam   = c.ext_fam;
++
++	return p.ucode_rev;
++}
++
+ static bool need_sha_check(u32 cur_rev)
+ {
++	if (!cur_rev) {
++		cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
++		pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
++	}
++
+ 	switch (cur_rev >> 8) {
+ 	case 0x80012: return cur_rev <= 0x800126f; break;
+ 	case 0x80082: return cur_rev <= 0x800820f; break;
+@@ -741,8 +761,6 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi
+ 	n.equiv_cpu = equiv_cpu;
+ 	n.patch_id  = uci->cpu_sig.rev;
+ 
+-	WARN_ON_ONCE(!n.patch_id);
+-
+ 	list_for_each_entry(p, &microcode_cache, plist)
+ 		if (patch_cpus_equivalent(p, &n, false))
+ 			return p;
+diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
+index 7d476fa697ca53..0fab130a8249ef 100644
+--- a/arch/x86/kernel/cpu/topology_amd.c
++++ b/arch/x86/kernel/cpu/topology_amd.c
+@@ -80,20 +80,25 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
+ 
+ 	cpuid_leaf(0x8000001e, &leaf);
+ 
+-	tscan->c->topo.initial_apicid = leaf.ext_apic_id;
+-
+ 	/*
+-	 * If leaf 0xb is available, then the domain shifts are set
+-	 * already and nothing to do here. Only valid for family >= 0x17.
++	 * If leaf 0xb/0x26 is available, then the APIC ID and the domain
++	 * shifts are set already.
+ 	 */
+-	if (!has_topoext && tscan->c->x86 >= 0x17) {
++	if (!has_topoext) {
++		tscan->c->topo.initial_apicid = leaf.ext_apic_id;
++
+ 		/*
+-		 * Leaf 0x80000008 set the CORE domain shift already.
+-		 * Update the SMT domain, but do not propagate it.
++		 * Leaf 0x8000008 sets the CORE domain shift but not the
++		 * SMT domain shift. On CPUs with family >= 0x17, there
++		 * might be hyperthreads.
+ 		 */
+-		unsigned int nthreads = leaf.core_nthreads + 1;
++		if (tscan->c->x86 >= 0x17) {
++			/* Update the SMT domain, but do not propagate it. */
++			unsigned int nthreads = leaf.core_nthreads + 1;
+ 
+-		topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
++			topology_update_dom(tscan, TOPO_SMT_DOMAIN,
++					    get_count_order(nthreads), nthreads);
++		}
+ 	}
+ 
+ 	store_node(tscan, leaf.nnodes_per_socket + 1, leaf.node_id);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 1a8148dec4afe9..33a6cb1ac6031e 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -860,6 +860,8 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
+ 	if (min > map->max_apic_id)
+ 		return 0;
+ 
++	min = array_index_nospec(min, map->max_apic_id + 1);
++
+ 	for_each_set_bit(i, ipi_bitmap,
+ 		min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+ 		if (map->phys_map[min + i]) {
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index dbd295ef3eba2e..17ec4c4a3d92e6 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9969,8 +9969,11 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
+ 	rcu_read_lock();
+ 	map = rcu_dereference(vcpu->kvm->arch.apic_map);
+ 
+-	if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
+-		target = map->phys_map[dest_id]->vcpu;
++	if (likely(map) && dest_id <= map->max_apic_id) {
++		dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
++		if (map->phys_map[dest_id])
++			target = map->phys_map[dest_id]->vcpu;
++	}
+ 
+ 	rcu_read_unlock();
+ 
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 24c80078ca442e..5915fb98ffdcea 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1281,14 +1281,14 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
+ 	struct block_device *bdev;
+ 	unsigned long flags;
+ 	struct bio *bio;
++	bool prepared;
+ 
+ 	/*
+ 	 * Submit the next plugged BIO. If we do not have any, clear
+ 	 * the plugged flag.
+ 	 */
+-	spin_lock_irqsave(&zwplug->lock, flags);
+-
+ again:
++	spin_lock_irqsave(&zwplug->lock, flags);
+ 	bio = bio_list_pop(&zwplug->bio_list);
+ 	if (!bio) {
+ 		zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
+@@ -1296,13 +1296,14 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
+ 		goto put_zwplug;
+ 	}
+ 
+-	if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
++	prepared = blk_zone_wplug_prepare_bio(zwplug, bio);
++	spin_unlock_irqrestore(&zwplug->lock, flags);
++
++	if (!prepared) {
+ 		blk_zone_wplug_bio_io_error(zwplug, bio);
+ 		goto again;
+ 	}
+ 
+-	spin_unlock_irqrestore(&zwplug->lock, flags);
+-
+ 	bdev = bio->bi_bdev;
+ 
+ 	/*
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index e614e4bef9ea1b..a813bc97cf4258 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -2329,6 +2329,12 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ 		}
+ 	},
++	{
++		// TUXEDO InfinityBook Pro AMD Gen9
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
++		},
++	},
+ 	{ },
+ };
+ 
+diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
+index eeae160c898d38..fa3c76a2b49d1f 100644
+--- a/drivers/atm/atmtcp.c
++++ b/drivers/atm/atmtcp.c
+@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
+         return NULL;
+ }
+ 
++static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
++{
++	struct atmtcp_hdr *hdr;
++
++	if (skb->len < sizeof(struct atmtcp_hdr))
++		return -EINVAL;
++
++	hdr = (struct atmtcp_hdr *)skb->data;
++	if (hdr->length == ATMTCP_HDR_MAGIC)
++		return -EINVAL;
++
++	return 0;
++}
+ 
+ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ {
+@@ -288,9 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ 	struct sk_buff *new_skb;
+ 	int result = 0;
+ 
+-	if (skb->len < sizeof(struct atmtcp_hdr))
+-		goto done;
+-
+ 	dev = vcc->dev_data;
+ 	hdr = (struct atmtcp_hdr *) skb->data;
+ 	if (hdr->length == ATMTCP_HDR_MAGIC) {
+@@ -347,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
+ 
+ static const struct atmdev_ops atmtcp_c_dev_ops = {
+ 	.close		= atmtcp_c_close,
++	.pre_send	= atmtcp_c_pre_send,
+ 	.send		= atmtcp_c_send
+ };
+ 
+diff --git a/drivers/firmware/efi/stmm/tee_stmm_efi.c b/drivers/firmware/efi/stmm/tee_stmm_efi.c
+index f741ca279052bb..e15d11ed165eef 100644
+--- a/drivers/firmware/efi/stmm/tee_stmm_efi.c
++++ b/drivers/firmware/efi/stmm/tee_stmm_efi.c
+@@ -143,6 +143,10 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
+ 	return var_hdr->ret_status;
+ }
+ 
++#define COMM_BUF_SIZE(__payload_size)	(MM_COMMUNICATE_HEADER_SIZE + \
++					 MM_VARIABLE_COMMUNICATE_SIZE + \
++					 (__payload_size))
++
+ /**
+  * setup_mm_hdr() -	Allocate a buffer for StandAloneMM and initialize the
+  *			header data.
+@@ -173,9 +177,8 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
+ 		return NULL;
+ 	}
+ 
+-	comm_buf = kzalloc(MM_COMMUNICATE_HEADER_SIZE +
+-				   MM_VARIABLE_COMMUNICATE_SIZE + payload_size,
+-			   GFP_KERNEL);
++	comm_buf = alloc_pages_exact(COMM_BUF_SIZE(payload_size),
++				     GFP_KERNEL | __GFP_ZERO);
+ 	if (!comm_buf) {
+ 		*ret = EFI_OUT_OF_RESOURCES;
+ 		return NULL;
+@@ -239,7 +242,7 @@ static efi_status_t get_max_payload(size_t *size)
+ 	 */
+ 	*size -= 2;
+ out:
+-	kfree(comm_buf);
++	free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
+ 	return ret;
+ }
+ 
+@@ -282,7 +285,7 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
+ 	memcpy(var_property, &smm_property->property, sizeof(*var_property));
+ 
+ out:
+-	kfree(comm_buf);
++	free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
+ 	return ret;
+ }
+ 
+@@ -347,7 +350,7 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
+ 	memcpy(data, (u8 *)var_acc->name + var_acc->name_size,
+ 	       var_acc->data_size);
+ out:
+-	kfree(comm_buf);
++	free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
+ 	return ret;
+ }
+ 
+@@ -404,7 +407,7 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
+ 	memcpy(name, var_getnext->name, var_getnext->name_size);
+ 
+ out:
+-	kfree(comm_buf);
++	free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
+ 	return ret;
+ }
+ 
+@@ -467,7 +470,7 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ 	ret = mm_communicate(comm_buf, payload_size);
+ 	dev_dbg(pvt_data.dev, "Set Variable %s %d %lx\n", __FILE__, __LINE__, ret);
+ out:
+-	kfree(comm_buf);
++	free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
+ 	return ret;
+ }
+ 
+@@ -507,7 +510,7 @@ static efi_status_t tee_query_variable_info(u32 attributes,
+ 	*max_variable_size = mm_query_info->max_variable_size;
+ 
+ out:
+-	kfree(comm_buf);
++	free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+index dfb6cfd8376069..02138aa557935e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+@@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ 	}
+ 
+ 	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+-			     AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+-			     AMDGPU_VM_PAGE_EXECUTABLE);
++			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
++			     AMDGPU_PTE_EXECUTABLE);
+ 
+ 	if (r) {
+ 		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index c4fdd82a00429c..57b5d90ca89b3f 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -3668,14 +3668,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ 		effective_mode &= ~S_IWUSR;
+ 
+ 	/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
+-	if (((adev->family == AMDGPU_FAMILY_SI) ||
+-	     ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
+-	      (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
+-	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+-	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
+-	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
+-	     attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
+-		return 0;
++	if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
++	    attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
++	    attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
++	    attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
++		if (adev->family == AMDGPU_FAMILY_SI ||
++		    ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
++		     (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
++		    (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
++			return 0;
++	}
+ 
+ 	/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
+ 	if (((adev->family == AMDGPU_FAMILY_SI) ||
+diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
+index bb61bbdcce5b3c..9fa13da513d24e 100644
+--- a/drivers/gpu/drm/display/drm_dp_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_helper.c
+@@ -664,7 +664,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ 	 * monitor doesn't power down exactly after the throw away read.
+ 	 */
+ 	if (!aux->is_remote) {
+-		ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS);
++		ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 42e62b0409612e..2508e9e9431dcd 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -381,19 +381,19 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ 
+ 		of_id = of_match_node(mtk_drm_of_ids, node);
+ 		if (!of_id)
+-			continue;
++			goto next_put_node;
+ 
+ 		pdev = of_find_device_by_node(node);
+ 		if (!pdev)
+-			continue;
++			goto next_put_node;
+ 
+ 		drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
+ 		if (!drm_dev)
+-			continue;
++			goto next_put_device_pdev_dev;
+ 
+ 		temp_drm_priv = dev_get_drvdata(drm_dev);
+ 		if (!temp_drm_priv)
+-			continue;
++			goto next_put_device_drm_dev;
+ 
+ 		if (temp_drm_priv->data->main_len)
+ 			all_drm_priv[CRTC_MAIN] = temp_drm_priv;
+@@ -405,10 +405,17 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ 		if (temp_drm_priv->mtk_drm_bound)
+ 			cnt++;
+ 
+-		if (cnt == MAX_CRTC) {
+-			of_node_put(node);
++next_put_device_drm_dev:
++		put_device(drm_dev);
++
++next_put_device_pdev_dev:
++		put_device(&pdev->dev);
++
++next_put_node:
++		of_node_put(node);
++
++		if (cnt == MAX_CRTC)
+ 			break;
+-		}
+ 	}
+ 
+ 	if (drm_priv->data->mmsys_dev_num == cnt) {
+diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
+index 74c2704efb6642..6e20f7037b5bbc 100644
+--- a/drivers/gpu/drm/mediatek/mtk_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_plane.c
+@@ -292,7 +292,8 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
+ 	wmb(); /* Make sure the above parameter is set before update */
+ 	mtk_plane_state->pending.dirty = true;
+ 
+-	mtk_crtc_plane_disable(old_state->crtc, plane);
++	if (old_state && old_state->crtc)
++		mtk_crtc_plane_disable(old_state->crtc, plane);
+ }
+ 
+ static void mtk_plane_atomic_update(struct drm_plane *plane,
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 4b3a8ee8e278f0..3eee6517541e3f 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -879,12 +879,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 
+ 	if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ 		sync_file = sync_file_create(submit->user_fence);
+-		if (!sync_file) {
++		if (!sync_file)
+ 			ret = -ENOMEM;
+-		} else {
+-			fd_install(out_fence_fd, sync_file->file);
+-			args->fence_fd = out_fence_fd;
+-		}
+ 	}
+ 
+ 	if (ret)
+@@ -912,10 +908,14 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ out_unlock:
+ 	mutex_unlock(&queue->lock);
+ out_post_unlock:
+-	if (ret && (out_fence_fd >= 0)) {
+-		put_unused_fd(out_fence_fd);
++	if (ret) {
++		if (out_fence_fd >= 0)
++			put_unused_fd(out_fence_fd);
+ 		if (sync_file)
+ 			fput(sync_file->file);
++	} else if (sync_file) {
++		fd_install(out_fence_fd, sync_file->file);
++		args->fence_fd = out_fence_fd;
+ 	}
+ 
+ 	if (!IS_ERR_OR_NULL(submit)) {
+diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
+index 6749f0fbca96d5..52464a1346f810 100644
+--- a/drivers/gpu/drm/msm/msm_kms.c
++++ b/drivers/gpu/drm/msm/msm_kms.c
+@@ -241,6 +241,12 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
+ 	if (ret)
+ 		return ret;
+ 
++	ret = msm_disp_snapshot_init(ddev);
++	if (ret) {
++		DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
++		return ret;
++	}
++
+ 	ret = priv->kms_init(ddev);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev, "failed to load kms\n");
+@@ -293,10 +299,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
+ 		goto err_msm_uninit;
+ 	}
+ 
+-	ret = msm_disp_snapshot_init(ddev);
+-	if (ret)
+-		DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+-
+ 	drm_mode_config_reset(ddev);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/msm/registers/display/dsi.xml b/drivers/gpu/drm/msm/registers/display/dsi.xml
+index 501ffc585a9f69..c7a7b633d747bc 100644
+--- a/drivers/gpu/drm/msm/registers/display/dsi.xml
++++ b/drivers/gpu/drm/msm/registers/display/dsi.xml
+@@ -159,28 +159,28 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+ 		<bitfield name="RGB_SWAP" low="12" high="14" type="dsi_rgb_swap"/>
+ 	</reg32>
+ 	<reg32 offset="0x00020" name="ACTIVE_H">
+-		<bitfield name="START" low="0" high="11" type="uint"/>
+-		<bitfield name="END" low="16" high="27" type="uint"/>
++		<bitfield name="START" low="0" high="15" type="uint"/>
++		<bitfield name="END" low="16" high="31" type="uint"/>
+ 	</reg32>
+ 	<reg32 offset="0x00024" name="ACTIVE_V">
+-		<bitfield name="START" low="0" high="11" type="uint"/>
+-		<bitfield name="END" low="16" high="27" type="uint"/>
++		<bitfield name="START" low="0" high="15" type="uint"/>
++		<bitfield name="END" low="16" high="31" type="uint"/>
+ 	</reg32>
+ 	<reg32 offset="0x00028" name="TOTAL">
+-		<bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
+-		<bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
++		<bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
++		<bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
+ 	</reg32>
+ 	<reg32 offset="0x0002c" name="ACTIVE_HSYNC">
+-		<bitfield name="START" low="0" high="11" type="uint"/>
+-		<bitfield name="END" low="16" high="27" type="uint"/>
++		<bitfield name="START" low="0" high="15" type="uint"/>
++		<bitfield name="END" low="16" high="31" type="uint"/>
+ 	</reg32>
+ 	<reg32 offset="0x00030" name="ACTIVE_VSYNC_HPOS">
+-		<bitfield name="START" low="0" high="11" type="uint"/>
+-		<bitfield name="END" low="16" high="27" type="uint"/>
++		<bitfield name="START" low="0" high="15" type="uint"/>
++		<bitfield name="END" low="16" high="31" type="uint"/>
+ 	</reg32>
+ 	<reg32 offset="0x00034" name="ACTIVE_VSYNC_VPOS">
+-		<bitfield name="START" low="0" high="11" type="uint"/>
+-		<bitfield name="END" low="16" high="27" type="uint"/>
++		<bitfield name="START" low="0" high="15" type="uint"/>
++		<bitfield name="END" low="16" high="31" type="uint"/>
+ 	</reg32>
+ 
+ 	<reg32 offset="0x00038" name="CMD_DMA_CTRL">
+@@ -209,8 +209,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+ 		<bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
+ 	</reg32>
+ 	<reg32 offset="0x00058" name="CMD_MDP_STREAM0_TOTAL">
+-		<bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
+-		<bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
++		<bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
++		<bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
+ 	</reg32>
+ 	<reg32 offset="0x0005c" name="CMD_MDP_STREAM1_CTRL">
+ 		<bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+index 7a2cceaee6e97f..1199dfc1194c80 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -663,6 +663,10 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
+ 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ 	uint8_t i;
+ 
++	/* All chipsets can display all formats in linear layout */
++	if (modifier == DRM_FORMAT_MOD_LINEAR)
++		return true;
++
+ 	if (drm->client.device.info.chipset < 0xc0) {
+ 		const struct drm_format_info *info = drm_format_info(format);
+ 		const uint8_t kind = (modifier >> 12) & 0xff;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
+index b7da3ab44c277d..7c43397c19e61d 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
++++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
+@@ -103,7 +103,7 @@ gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 i
+ static void
+ gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
+ {
+-	nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
++	nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag);
+ 	while (len >= 4) {
+ 		nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
+ 		img += 4;
+@@ -249,9 +249,11 @@ int
+ gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
+ {
+ 	struct nvkm_falcon *falcon = fw->falcon;
+-	int target, ret;
++	int ret;
+ 
+ 	if (fw->inst) {
++		int target;
++
+ 		nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
+ 
+ 		switch (nvkm_memory_target(fw->inst)) {
+@@ -285,15 +287,6 @@ gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
+ 	}
+ 
+ 	if (fw->boot) {
+-		switch (nvkm_memory_target(&fw->fw.mem.memory)) {
+-		case NVKM_MEM_TARGET_VRAM: target = 4; break;
+-		case NVKM_MEM_TARGET_HOST: target = 5; break;
+-		case NVKM_MEM_TARGET_NCOH: target = 6; break;
+-		default:
+-			WARN_ON(1);
+-			return -EINVAL;
+-		}
+-
+ 		ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
+ 					 IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
+ 					 fw->boot_addr >> 8, false);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+index 52412965fac107..5b721bd9d79949 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+@@ -209,11 +209,12 @@ nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name,
+ 	fw->boot_addr = bld->start_tag << 8;
+ 	fw->boot_size = bld->code_size;
+ 	fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL);
+-	if (!fw->boot)
+-		ret = -ENOMEM;
+ 
+ 	nvkm_firmware_put(bl);
+ 
++	if (!fw->boot)
++		return -ENOMEM;
++
+ 	/* Patch in interface data. */
+ 	return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
+index 5f745d9ed6cc25..445bbe0299b08f 100644
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -671,7 +671,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
+ 	}
+ 
+ 	if (ttm_bo->type == ttm_bo_type_sg) {
+-		ret = xe_bo_move_notify(bo, ctx);
++		if (new_mem->mem_type == XE_PL_SYSTEM)
++			ret = xe_bo_move_notify(bo, ctx);
+ 		if (!ret)
+ 			ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
+ 		return ret;
+diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
+index b0684e6d2047b1..dd7bd766ae1845 100644
+--- a/drivers/gpu/drm/xe/xe_sync.c
++++ b/drivers/gpu/drm/xe/xe_sync.c
+@@ -77,6 +77,7 @@ static void user_fence_worker(struct work_struct *w)
+ {
+ 	struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
+ 
++	WRITE_ONCE(ufence->signalled, 1);
+ 	if (mmget_not_zero(ufence->mm)) {
+ 		kthread_use_mm(ufence->mm);
+ 		if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
+@@ -89,7 +90,6 @@ static void user_fence_worker(struct work_struct *w)
+ 	 * Wake up waiters only after updating the ufence state, allowing the UMD
+ 	 * to safely reuse the same ufence without encountering -EBUSY errors.
+ 	 */
+-	WRITE_ONCE(ufence->signalled, 1);
+ 	wake_up_all(&ufence->xe->ufence_wq);
+ 	user_fence_put(ufence);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 15fd497c920c8e..a4845d4213b006 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -1402,8 +1402,12 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
+ 
+ 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
+ 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
+-		if (IS_ERR(vm->scratch_pt[id][i]))
+-			return PTR_ERR(vm->scratch_pt[id][i]);
++		if (IS_ERR(vm->scratch_pt[id][i])) {
++			int err = PTR_ERR(vm->scratch_pt[id][i]);
++
++			vm->scratch_pt[id][i] = NULL;
++			return err;
++		}
+ 
+ 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
+ 	}
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index c5bdf0f1b32f76..6b90d2c03e889a 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -1210,7 +1210,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 		return ret;
+ 	}
+ 
+-	if (!drvdata->input) {
++	/*
++	 * Check that input registration succeeded. Checking that
++	 * HID_CLAIMED_INPUT is set prevents a UAF when all input devices
++	 * were freed during registration due to no usages being mapped,
++	 * leaving drvdata->input pointing to freed memory.
++	 */
++	if (!drvdata->input || !(hdev->claimed & HID_CLAIMED_INPUT)) {
+ 		hid_err(hdev, "Asus input not registered\n");
+ 		ret = -ENOMEM;
+ 		goto err_stop_hw;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b472140421f5af..18c4e5f143a77a 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -825,6 +825,8 @@
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019	0x6019
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E	0x602e
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093	0x6093
++#define USB_DEVICE_ID_LENOVO_LEGION_GO_DUAL_DINPUT	0x6184
++#define USB_DEVICE_ID_LENOVO_LEGION_GO2_DUAL_DINPUT	0x61ed
+ 
+ #define USB_VENDOR_ID_LETSKETCH		0x6161
+ #define USB_DEVICE_ID_WP9620N		0x4d15
+@@ -898,6 +900,7 @@
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2		0xc534
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1	0xc539
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1	0xc53f
++#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2	0xc543
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY	0xc53a
+ #define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER	0xc548
+ #define USB_DEVICE_ID_SPACETRAVELLER	0xc623
+diff --git a/drivers/hid/hid-input-test.c b/drivers/hid/hid-input-test.c
+index 77c2d45ac62a7f..6f5c71660d823b 100644
+--- a/drivers/hid/hid-input-test.c
++++ b/drivers/hid/hid-input-test.c
+@@ -7,7 +7,7 @@
+ 
+ #include <kunit/test.h>
+ 
+-static void hid_test_input_set_battery_charge_status(struct kunit *test)
++static void hid_test_input_update_battery_charge_status(struct kunit *test)
+ {
+ 	struct hid_device *dev;
+ 	bool handled;
+@@ -15,15 +15,15 @@ static void hid_test_input_set_battery_charge_status(struct kunit *test)
+ 	dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ 
+-	handled = hidinput_set_battery_charge_status(dev, HID_DG_HEIGHT, 0);
++	handled = hidinput_update_battery_charge_status(dev, HID_DG_HEIGHT, 0);
+ 	KUNIT_EXPECT_FALSE(test, handled);
+ 	KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_UNKNOWN);
+ 
+-	handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 0);
++	handled = hidinput_update_battery_charge_status(dev, HID_BAT_CHARGING, 0);
+ 	KUNIT_EXPECT_TRUE(test, handled);
+ 	KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_DISCHARGING);
+ 
+-	handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 1);
++	handled = hidinput_update_battery_charge_status(dev, HID_BAT_CHARGING, 1);
+ 	KUNIT_EXPECT_TRUE(test, handled);
+ 	KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_CHARGING);
+ }
+@@ -63,7 +63,7 @@ static void hid_test_input_get_battery_property(struct kunit *test)
+ }
+ 
+ static struct kunit_case hid_input_tests[] = {
+-	KUNIT_CASE(hid_test_input_set_battery_charge_status),
++	KUNIT_CASE(hid_test_input_update_battery_charge_status),
+ 	KUNIT_CASE(hid_test_input_get_battery_property),
+ 	{ }
+ };
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 9d80635a91ebd8..f5c217ac4bfaa7 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -595,13 +595,33 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
+ 	dev->battery = NULL;
+ }
+ 
+-static void hidinput_update_battery(struct hid_device *dev, int value)
++static bool hidinput_update_battery_charge_status(struct hid_device *dev,
++						  unsigned int usage, int value)
++{
++	switch (usage) {
++	case HID_BAT_CHARGING:
++		dev->battery_charge_status = value ?
++					     POWER_SUPPLY_STATUS_CHARGING :
++					     POWER_SUPPLY_STATUS_DISCHARGING;
++		return true;
++	}
++
++	return false;
++}
++
++static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
++				    int value)
+ {
+ 	int capacity;
+ 
+ 	if (!dev->battery)
+ 		return;
+ 
++	if (hidinput_update_battery_charge_status(dev, usage, value)) {
++		power_supply_changed(dev->battery);
++		return;
++	}
++
+ 	if (value == 0 || value < dev->battery_min || value > dev->battery_max)
+ 		return;
+ 
+@@ -617,20 +637,6 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
+ 		power_supply_changed(dev->battery);
+ 	}
+ }
+-
+-static bool hidinput_set_battery_charge_status(struct hid_device *dev,
+-					       unsigned int usage, int value)
+-{
+-	switch (usage) {
+-	case HID_BAT_CHARGING:
+-		dev->battery_charge_status = value ?
+-					     POWER_SUPPLY_STATUS_CHARGING :
+-					     POWER_SUPPLY_STATUS_DISCHARGING;
+-		return true;
+-	}
+-
+-	return false;
+-}
+ #else  /* !CONFIG_HID_BATTERY_STRENGTH */
+ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ 				  struct hid_field *field, bool is_percentage)
+@@ -642,14 +648,9 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
+ {
+ }
+ 
+-static void hidinput_update_battery(struct hid_device *dev, int value)
+-{
+-}
+-
+-static bool hidinput_set_battery_charge_status(struct hid_device *dev,
+-					       unsigned int usage, int value)
++static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
++				    int value)
+ {
+-	return false;
+ }
+ #endif	/* CONFIG_HID_BATTERY_STRENGTH */
+ 
+@@ -1515,11 +1516,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 		return;
+ 
+ 	if (usage->type == EV_PWR) {
+-		bool handled = hidinput_set_battery_charge_status(hid, usage->hid, value);
+-
+-		if (!handled)
+-			hidinput_update_battery(hid, value);
+-
++		hidinput_update_battery(hid, usage->hid, value);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 34fa71ceec2b20..cce54dd9884a3e 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1983,6 +1983,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
+ 	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 		USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
+ 	 .driver_data = recvr_type_gaming_hidpp},
++	{ /* Logitech lightspeed receiver (0xc543) */
++	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
++		USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
++	 .driver_data = recvr_type_gaming_hidpp},
+ 
+ 	{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
+ 	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index cf7a6986cf2013..234ddd4422d902 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4624,6 +4624,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
+ 	{ /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
+ 	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
++	{ /* Logitech G PRO 2 LIGHTSPEED Wireless Mouse over USB */
++	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xc09a) },
+ 
+ 	{ /* G935 Gaming Headset */
+ 	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 641292cfdaa6f9..5c424010bc025c 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1453,6 +1453,14 @@ static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
+ 	    (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
+ 	     hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
++		if (*size < 608) {
++			dev_info(
++				&hdev->dev,
++				"GT7868Q fixup: report descriptor is only %u bytes, skipping\n",
++				*size);
++			return rdesc;
++		}
++
+ 		if (rdesc[607] == 0x15) {
+ 			rdesc[607] = 0x25;
+ 			dev_info(
+diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
+index 2738ce947434f9..0f76e241e0afb4 100644
+--- a/drivers/hid/hid-ntrig.c
++++ b/drivers/hid/hid-ntrig.c
+@@ -144,6 +144,9 @@ static void ntrig_report_version(struct hid_device *hdev)
+ 	struct usb_device *usb_dev = hid_to_usb_dev(hdev);
+ 	unsigned char *data = kmalloc(8, GFP_KERNEL);
+ 
++	if (!hid_is_usb(hdev))
++		return;
++
+ 	if (!data)
+ 		goto err_free;
+ 
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 80372342c176af..64f9728018b885 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -124,6 +124,8 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_T609A), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE), HID_QUIRK_ALWAYS_POLL },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_LEGION_GO_DUAL_DINPUT), HID_QUIRK_MULTI_INPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_LEGION_GO2_DUAL_DINPUT), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index c7033ffaba3919..a076dc0b60ee20 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -684,6 +684,7 @@ static bool wacom_is_art_pen(int tool_id)
+ 	case 0x885:	/* Intuos3 Marker Pen */
+ 	case 0x804:	/* Intuos4/5 13HD/24HD Marker Pen */
+ 	case 0x10804:	/* Intuos4/5 13HD/24HD Art Pen */
++	case 0x204:     /* Art Pen 2 */
+ 		is_art_pen = true;
+ 		break;
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index f4bafc71a7399b..08886c3a28c61c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7780,7 +7780,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
+ 	}
+ 	rx_rings = min_t(int, rx_rings, hwr.grp);
+ 	hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
+-	if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
++	if (bnxt_ulp_registered(bp->edev) &&
++	    hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ 		hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
+ 	hwr.cp = min_t(int, hwr.cp, hwr.stat);
+ 	rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
+@@ -7788,6 +7789,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
+ 		hwr.rx = rx_rings << 1;
+ 	tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
+ 	hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
++	if (hwr.tx != bp->tx_nr_rings) {
++		netdev_warn(bp->dev,
++			    "Able to reserve only %d out of %d requested TX rings\n",
++			    hwr.tx, bp->tx_nr_rings);
++	}
+ 	bp->tx_nr_rings = hwr.tx;
+ 
+ 	/* If we cannot reserve all the RX rings, reset the RSS map only
+@@ -12241,6 +12247,17 @@ static int bnxt_set_xps_mapping(struct bnxt *bp)
+ 	return rc;
+ }
+ 
++static int bnxt_tx_nr_rings(struct bnxt *bp)
++{
++	return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
++			    bp->tx_nr_rings_per_tc;
++}
++
++static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
++{
++	return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
++}
++
+ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ {
+ 	int rc = 0;
+@@ -12258,6 +12275,13 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ 	if (rc)
+ 		return rc;
+ 
++	/* Make adjustments if reserved TX rings are less than requested */
++	bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
++	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
++	if (bp->tx_nr_rings_xdp) {
++		bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
++		bp->tx_nr_rings += bp->tx_nr_rings_xdp;
++	}
+ 	rc = bnxt_alloc_mem(bp, irq_re_init);
+ 	if (rc) {
+ 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+@@ -15676,7 +15700,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
+ 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
+ 	bp->rx_nr_rings = bp->cp_nr_rings;
+ 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
+-	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
++	bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
+ }
+ 
+ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
+@@ -15708,7 +15732,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
+ 		bnxt_trim_dflt_sh_rings(bp);
+ 	else
+ 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
+-	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
++	bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
+ 
+ 	avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
+ 	if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
+@@ -15721,7 +15745,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
+ 	rc = __bnxt_reserve_rings(bp);
+ 	if (rc && rc != -ENODEV)
+ 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+-	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ 	if (sh)
+ 		bnxt_trim_dflt_sh_rings(bp);
+ 
+@@ -15730,7 +15754,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
+ 		rc = __bnxt_reserve_rings(bp);
+ 		if (rc && rc != -ENODEV)
+ 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
+-		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++		bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ 	}
+ 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ 		bp->rx_nr_rings++;
+@@ -15764,7 +15788,7 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
+ 	if (rc)
+ 		goto init_dflt_ring_err;
+ 
+-	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++	bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ 
+ 	bnxt_set_dflt_rfs(bp);
+ 
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 3c2a7919b1289d..6c2d69ef1a8dbd 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -5225,19 +5225,16 @@ static void macb_remove(struct platform_device *pdev)
+ 
+ 	if (dev) {
+ 		bp = netdev_priv(dev);
++		unregister_netdev(dev);
+ 		phy_exit(bp->sgmii_phy);
+ 		mdiobus_unregister(bp->mii_bus);
+ 		mdiobus_free(bp->mii_bus);
+ 
+-		unregister_netdev(dev);
++		device_set_wakeup_enable(&bp->pdev->dev, 0);
+ 		cancel_work_sync(&bp->hresp_err_bh_work);
+ 		pm_runtime_disable(&pdev->dev);
+ 		pm_runtime_dont_use_autosuspend(&pdev->dev);
+-		if (!pm_runtime_suspended(&pdev->dev)) {
+-			macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
+-					  bp->rx_clk, bp->tsu_clk);
+-			pm_runtime_set_suspended(&pdev->dev);
+-		}
++		pm_runtime_set_suspended(&pdev->dev);
+ 		phylink_destroy(bp->phylink);
+ 		free_netdev(dev);
+ 	}
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index 787218d60c6b16..2c1b551e144231 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -1091,7 +1091,7 @@ get_stats (struct net_device *dev)
+ 	dev->stats.rx_bytes += dr32(OctetRcvOk);
+ 	dev->stats.tx_bytes += dr32(OctetXmtOk);
+ 
+-	dev->stats.multicast = dr32(McstFramesRcvdOk);
++	dev->stats.multicast += dr32(McstFramesRcvdOk);
+ 	dev->stats.collisions += dr32(SingleColFrames)
+ 			     +  dr32(MultiColFrames);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
+index 66e070095d1bbe..10285995c9eddd 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
+@@ -13,16 +13,45 @@
+ static DEFINE_XARRAY(ice_adapters);
+ static DEFINE_MUTEX(ice_adapters_mutex);
+ 
+-static unsigned long ice_adapter_index(u64 dsn)
++#define ICE_ADAPTER_FIXED_INDEX	BIT_ULL(63)
++
++#define ICE_ADAPTER_INDEX_E825C	\
++	(ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
++
++static u64 ice_adapter_index(struct pci_dev *pdev)
+ {
++	switch (pdev->device) {
++	case ICE_DEV_ID_E825C_BACKPLANE:
++	case ICE_DEV_ID_E825C_QSFP:
++	case ICE_DEV_ID_E825C_SFP:
++	case ICE_DEV_ID_E825C_SGMII:
++		/* E825C devices have multiple NACs which are connected to the
++		 * same clock source, and which must share the same
++		 * ice_adapter structure. We can't use the serial number since
++		 * each NAC has its own NVM generated with its own unique
++		 * Device Serial Number. Instead, rely on the embedded nature
++		 * of the E825C devices, and use a fixed index. This relies on
++		 * the fact that all E825C physical functions in a given
++		 * system are part of the same overall device.
++		 */
++		return ICE_ADAPTER_INDEX_E825C;
++	default:
++		return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
++	}
++}
++
++static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
++{
++	u64 index = ice_adapter_index(pdev);
++
+ #if BITS_PER_LONG == 64
+-	return dsn;
++	return index;
+ #else
+-	return (u32)dsn ^ (u32)(dsn >> 32);
++	return (u32)index ^ (u32)(index >> 32);
+ #endif
+ }
+ 
+-static struct ice_adapter *ice_adapter_new(u64 dsn)
++static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
+ {
+ 	struct ice_adapter *adapter;
+ 
+@@ -30,7 +59,7 @@ static struct ice_adapter *ice_adapter_new(u64 dsn)
+ 	if (!adapter)
+ 		return NULL;
+ 
+-	adapter->device_serial_number = dsn;
++	adapter->index = ice_adapter_index(pdev);
+ 	spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ 	refcount_set(&adapter->refcount, 1);
+ 
+@@ -63,24 +92,23 @@ static void ice_adapter_free(struct ice_adapter *adapter)
+  */
+ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
+ {
+-	u64 dsn = pci_get_dsn(pdev);
+ 	struct ice_adapter *adapter;
+ 	unsigned long index;
+ 	int err;
+ 
+-	index = ice_adapter_index(dsn);
++	index = ice_adapter_xa_index(pdev);
+ 	scoped_guard(mutex, &ice_adapters_mutex) {
+ 		err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
+ 		if (err == -EBUSY) {
+ 			adapter = xa_load(&ice_adapters, index);
+ 			refcount_inc(&adapter->refcount);
+-			WARN_ON_ONCE(adapter->device_serial_number != dsn);
++			WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
+ 			return adapter;
+ 		}
+ 		if (err)
+ 			return ERR_PTR(err);
+ 
+-		adapter = ice_adapter_new(dsn);
++		adapter = ice_adapter_new(pdev);
+ 		if (!adapter)
+ 			return ERR_PTR(-ENOMEM);
+ 		xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
+@@ -99,11 +127,10 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
+  */
+ void ice_adapter_put(struct pci_dev *pdev)
+ {
+-	u64 dsn = pci_get_dsn(pdev);
+ 	struct ice_adapter *adapter;
+ 	unsigned long index;
+ 
+-	index = ice_adapter_index(dsn);
++	index = ice_adapter_xa_index(pdev);
+ 	scoped_guard(mutex, &ice_adapters_mutex) {
+ 		adapter = xa_load(&ice_adapters, index);
+ 		if (WARN_ON(!adapter))
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
+index ac15c0d2bc1a47..409467847c7536 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
+@@ -32,7 +32,7 @@ struct ice_port_list {
+  * @refcount: Reference count. struct ice_pf objects hold the references.
+  * @ctrl_pf: Control PF of the adapter
+  * @ports: Ports list
+- * @device_serial_number: DSN cached for collision detection on 32bit systems
++ * @index: 64-bit index cached for collision detection on 32bit systems
+  */
+ struct ice_adapter {
+ 	refcount_t refcount;
+@@ -41,7 +41,7 @@ struct ice_adapter {
+ 
+ 	struct ice_pf *ctrl_pf;
+ 	struct ice_port_list ports;
+-	u64 device_serial_number;
++	u64 index;
+ };
+ 
+ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
+diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
+index e4c8cd12a41d13..04bec5d8e70841 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
+@@ -2352,7 +2352,13 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
+  * The function will apply the new Tx topology from the package buffer
+  * if available.
+  *
+- * Return: zero when update was successful, negative values otherwise.
++ * Return:
++ * * 0 - Successfully applied topology configuration.
++ * * -EBUSY - Failed to acquire global configuration lock.
++ * * -EEXIST - Topology configuration has already been applied.
++ * * -EIO - Unable to apply topology configuration.
++ * * -ENODEV - Failed to re-initialize device after applying configuration.
++ * * Other negative error codes indicate unexpected failures.
+  */
+ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
+ {
+@@ -2385,7 +2391,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
+ 
+ 	if (status) {
+ 		ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
+-		return status;
++		return -EIO;
+ 	}
+ 
+ 	/* Is default topology already applied ? */
+@@ -2472,31 +2478,45 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
+ 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+ 	if (status) {
+ 		ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
+-		return status;
++		return -EBUSY;
+ 	}
+ 
+ 	/* Check if reset was triggered already. */
+ 	reg = rd32(hw, GLGEN_RSTAT);
+ 	if (reg & GLGEN_RSTAT_DEVSTATE_M) {
+-		/* Reset is in progress, re-init the HW again */
+ 		ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
+ 		ice_check_reset(hw);
+-		return 0;
++		/* Reset is in progress, re-init the HW again */
++		goto reinit_hw;
+ 	}
+ 
+ 	/* Set new topology */
+ 	status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
+ 	if (status) {
+-		ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
+-		return status;
++		ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n",
++			  ERR_PTR(status));
++		/* only report -EIO here as the caller checks the error value
++		 * and reports an informational error message informing that
++		 * the driver failed to program Tx topology.
++		 */
++		status = -EIO;
+ 	}
+ 
+-	/* New topology is updated, delay 1 second before issuing the CORER */
++	/* Even if Tx topology config failed, we need to CORE reset here to
++	 * clear the global configuration lock. Delay 1 second to allow
++	 * hardware to settle then issue a CORER
++	 */
+ 	msleep(1000);
+ 	ice_reset(hw, ICE_RESET_CORER);
+-	/* CORER will clear the global lock, so no explicit call
+-	 * required for release.
+-	 */
++	ice_check_reset(hw);
++
++reinit_hw:
++	/* Since we triggered a CORER, re-initialize hardware */
++	ice_deinit_hw(hw);
++	if (ice_init_hw(hw)) {
++		ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n");
++		return -ENODEV;
++	}
+ 
+-	return 0;
++	return status;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index d1abd21cfc647c..74d4f2fde3e0f1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -4559,17 +4559,23 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
+ 			dev_info(dev, "Tx scheduling layers switching feature disabled\n");
+ 		else
+ 			dev_info(dev, "Tx scheduling layers switching feature enabled\n");
+-		/* if there was a change in topology ice_cfg_tx_topo triggered
+-		 * a CORER and we need to re-init hw
++		return 0;
++	} else if (err == -ENODEV) {
++		/* If we failed to re-initialize the device, we can no longer
++		 * continue loading.
+ 		 */
+-		ice_deinit_hw(hw);
+-		err = ice_init_hw(hw);
+-
++		dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
+ 		return err;
+ 	} else if (err == -EIO) {
+ 		dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
++		return 0;
++	} else if (err == -EEXIST) {
++		return 0;
+ 	}
+ 
++	/* Do not treat this as a fatal error. */
++	dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
++		 ERR_PTR(err));
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index f522dd42093a9f..cde69f56866562 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -1295,7 +1295,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 			skb = ice_construct_skb(rx_ring, xdp);
+ 		/* exit if we failed to retrieve a buffer */
+ 		if (!skb) {
+-			rx_ring->ring_stats->rx_stats.alloc_page_failed++;
++			rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ 			xdp_verdict = ICE_XDP_CONSUMED;
+ 		}
+ 		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index a2cf3e79693dd8..511b3ba2454207 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -107,7 +107,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
+ 	if (err)
+ 		return err;
+ 
+-	mlx5_unload_one_devl_locked(dev, true);
++	mlx5_sync_reset_unload_flow(dev, true);
+ 	err = mlx5_health_wait_pci_up(dev);
+ 	if (err)
+ 		NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+index 3efa8bf1d14ef4..4720523813b976 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -575,7 +575,6 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 		if (err)
+ 			return err;
+ 	}
+-	priv->dcbx.xoff = xoff;
+ 
+ 	/* Apply the settings */
+ 	if (update_buffer) {
+@@ -584,6 +583,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 			return err;
+ 	}
+ 
++	priv->dcbx.xoff = xoff;
++
+ 	if (update_prio2buffer)
+ 		err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+index f4a19ffbb641c0..66d276a1be836a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+@@ -66,11 +66,23 @@ struct mlx5e_port_buffer {
+ 	struct mlx5e_bufferx_reg  buffer[MLX5E_MAX_NETWORK_BUFFER];
+ };
+ 
++#ifdef CONFIG_MLX5_CORE_EN_DCB
+ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 				    u32 change, unsigned int mtu,
+ 				    struct ieee_pfc *pfc,
+ 				    u32 *buffer_size,
+ 				    u8 *prio2buffer);
++#else
++static inline int
++mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
++				u32 change, unsigned int mtu,
++				void *pfc,
++				u32 *buffer_size,
++				u8 *prio2buffer)
++{
++	return 0;
++}
++#endif
+ 
+ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
+ 			    struct mlx5e_port_buffer *port_buffer);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 4a2f58a9d70660..6176457b846bc1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -47,6 +47,7 @@
+ #include "en.h"
+ #include "en/dim.h"
+ #include "en/txrx.h"
++#include "en/port_buffer.h"
+ #include "en_tc.h"
+ #include "en_rep.h"
+ #include "en_accel/ipsec.h"
+@@ -134,6 +135,8 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ 	if (up) {
+ 		netdev_info(priv->netdev, "Link up\n");
+ 		netif_carrier_on(priv->netdev);
++		mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
++						NULL, NULL, NULL);
+ 	} else {
+ 		netdev_info(priv->netdev, "Link down\n");
+ 		netif_carrier_off(priv->netdev);
+@@ -2917,9 +2920,11 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
+ 	struct mlx5e_params *params = &priv->channels.params;
+ 	struct net_device *netdev = priv->netdev;
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+-	u16 mtu;
++	u16 mtu, prev_mtu;
+ 	int err;
+ 
++	mlx5e_query_mtu(mdev, params, &prev_mtu);
++
+ 	err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
+ 	if (err)
+ 		return err;
+@@ -2929,6 +2934,18 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
+ 		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+ 			    __func__, mtu, params->sw_mtu);
+ 
++	if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) {
++		err = mlx5e_port_manual_buffer_config(priv, 0, mtu,
++						      NULL, NULL, NULL);
++		if (err) {
++			netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n",
++				    __func__, mtu, err, prev_mtu);
++
++			mlx5e_set_mtu(mdev, params, prev_mtu);
++			return err;
++		}
++	}
++
+ 	params->sw_mtu = mtu;
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index 4f55e55ecb5513..516df7f1997ebc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -6,13 +6,15 @@
+ #include "fw_reset.h"
+ #include "diag/fw_tracer.h"
+ #include "lib/tout.h"
++#include "sf/sf.h"
+ 
+ enum {
+ 	MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
+ 	MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
+ 	MLX5_FW_RESET_FLAGS_PENDING_COMP,
+ 	MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+-	MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
++	MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED,
++	MLX5_FW_RESET_FLAGS_UNLOAD_EVENT,
+ };
+ 
+ struct mlx5_fw_reset {
+@@ -218,7 +220,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
+ 	return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
+ }
+ 
+-static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
++static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
+ {
+ 	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ 	struct devlink *devlink = priv_to_devlink(dev);
+@@ -227,8 +229,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
+ 	if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
+ 		complete(&fw_reset->done);
+ 	} else {
+-		if (!unloaded)
+-			mlx5_unload_one(dev, false);
++		mlx5_sync_reset_unload_flow(dev, false);
+ 		if (mlx5_health_wait_pci_up(dev))
+ 			mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ 		else
+@@ -271,7 +272,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
+ 
+ 	mlx5_sync_reset_clear_reset_requested(dev, false);
+ 	mlx5_enter_error_state(dev, true);
+-	mlx5_fw_reset_complete_reload(dev, false);
++	mlx5_fw_reset_complete_reload(dev);
+ }
+ 
+ #define MLX5_RESET_POLL_INTERVAL	(HZ / 10)
+@@ -423,6 +424,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
+ 		return false;
+ 	}
+ 
++	if (!mlx5_core_is_ecpf(dev) && !mlx5_sf_table_empty(dev)) {
++		mlx5_core_warn(dev, "SFs should be removed before reset\n");
++		return false;
++	}
++
+ #if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+ 	if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
+ 		err = mlx5_check_hotplug_interrupt(dev);
+@@ -581,6 +587,59 @@ static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
+ 	return err;
+ }
+ 
++void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked)
++{
++	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
++	unsigned long timeout;
++	bool reset_action;
++	u8 rst_state;
++	int err;
++
++	if (locked)
++		mlx5_unload_one_devl_locked(dev, false);
++	else
++		mlx5_unload_one(dev, false);
++
++	if (!test_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags))
++		return;
++
++	mlx5_set_fw_rst_ack(dev);
++	mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
++
++	reset_action = false;
++	timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
++	do {
++		rst_state = mlx5_get_fw_rst_state(dev);
++		if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
++		    rst_state == MLX5_FW_RST_STATE_IDLE) {
++			reset_action = true;
++			break;
++		}
++		msleep(20);
++	} while (!time_after(jiffies, timeout));
++
++	if (!reset_action) {
++		mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
++			      rst_state);
++		fw_reset->ret = -ETIMEDOUT;
++		goto done;
++	}
++
++	mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n",
++		       rst_state);
++	if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
++		err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
++		if (err) {
++			mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n",
++				       err);
++			fw_reset->ret = err;
++		}
++	}
++
++done:
++	clear_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
++}
++
+ static void mlx5_sync_reset_now_event(struct work_struct *work)
+ {
+ 	struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+@@ -608,16 +667,13 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
+ 	mlx5_enter_error_state(dev, true);
+ done:
+ 	fw_reset->ret = err;
+-	mlx5_fw_reset_complete_reload(dev, false);
++	mlx5_fw_reset_complete_reload(dev);
+ }
+ 
+ static void mlx5_sync_reset_unload_event(struct work_struct *work)
+ {
+ 	struct mlx5_fw_reset *fw_reset;
+ 	struct mlx5_core_dev *dev;
+-	unsigned long timeout;
+-	bool reset_action;
+-	u8 rst_state;
+ 	int err;
+ 
+ 	fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
+@@ -626,6 +682,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
+ 	if (mlx5_sync_reset_clear_reset_requested(dev, false))
+ 		return;
+ 
++	set_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
+ 	mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
+ 
+ 	err = mlx5_cmd_fast_teardown_hca(dev);
+@@ -634,44 +691,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
+ 	else
+ 		mlx5_enter_error_state(dev, true);
+ 
+-	if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
+-		mlx5_unload_one_devl_locked(dev, false);
+-	else
+-		mlx5_unload_one(dev, false);
+-
+-	mlx5_set_fw_rst_ack(dev);
+-	mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
+-
+-	reset_action = false;
+-	timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
+-	do {
+-		rst_state = mlx5_get_fw_rst_state(dev);
+-		if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
+-		    rst_state == MLX5_FW_RST_STATE_IDLE) {
+-			reset_action = true;
+-			break;
+-		}
+-		msleep(20);
+-	} while (!time_after(jiffies, timeout));
+-
+-	if (!reset_action) {
+-		mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
+-			      rst_state);
+-		fw_reset->ret = -ETIMEDOUT;
+-		goto done;
+-	}
+-
+-	mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
+-	if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
+-		err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
+-		if (err) {
+-			mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
+-			fw_reset->ret = err;
+-		}
+-	}
+-
+-done:
+-	mlx5_fw_reset_complete_reload(dev, true);
++	mlx5_fw_reset_complete_reload(dev);
+ }
+ 
+ static void mlx5_sync_reset_abort_event(struct work_struct *work)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+index ea527d06a85f07..d5b28525c960dc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+@@ -12,6 +12,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
+ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
+ 
+ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
++void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked);
+ int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
+ 				     struct netlink_ext_ack *extack);
+ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+index b96909fbeb12de..bdac3db1bd61de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+@@ -518,3 +518,13 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
+ 	WARN_ON(!xa_empty(&table->function_ids));
+ 	kfree(table);
+ }
++
++bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
++{
++	struct mlx5_sf_table *table = dev->priv.sf_table;
++
++	if (!table)
++		return true;
++
++	return xa_empty(&table->function_ids);
++}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+index 860f9ddb7107b8..89559a37997ad6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+@@ -17,6 +17,7 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
+ 
+ int mlx5_sf_table_init(struct mlx5_core_dev *dev);
+ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
++bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
+ 
+ int mlx5_devlink_sf_port_new(struct devlink *devlink,
+ 			     const struct devlink_port_new_attrs *add_attr,
+@@ -61,6 +62,11 @@ static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
+ {
+ }
+ 
++static inline bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
++{
++	return true;
++}
++
+ #endif
+ 
+ #endif
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+index 79e94632533c80..a8c95b1732f4d8 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+@@ -53,6 +53,8 @@ int __fbnic_open(struct fbnic_net *fbn)
+ 	fbnic_bmc_rpc_init(fbd);
+ 	fbnic_rss_reinit(fbd, fbn);
+ 
++	phylink_resume(fbn->phylink);
++
+ 	return 0;
+ release_ownership:
+ 	fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+@@ -79,6 +81,8 @@ static int fbnic_stop(struct net_device *netdev)
+ {
+ 	struct fbnic_net *fbn = netdev_priv(netdev);
+ 
++	phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
++
+ 	fbnic_down(fbn);
+ 	fbnic_pcs_irq_disable(fbn->fbd);
+ 
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+index 268489b15616fd..72bdc6c76c0c5e 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+@@ -116,14 +116,12 @@ static void fbnic_service_task_start(struct fbnic_net *fbn)
+ 	struct fbnic_dev *fbd = fbn->fbd;
+ 
+ 	schedule_delayed_work(&fbd->service_task, HZ);
+-	phylink_resume(fbn->phylink);
+ }
+ 
+ static void fbnic_service_task_stop(struct fbnic_net *fbn)
+ {
+ 	struct fbnic_dev *fbd = fbn->fbd;
+ 
+-	phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd));
+ 	cancel_delayed_work(&fbd->service_task);
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index f519d43738b080..445259f2ee9353 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -47,6 +47,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
+ 	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+ }
+ 
++static void dwxgmac2_update_caps(struct stmmac_priv *priv)
++{
++	if (!priv->dma_cap.mbps_10_100)
++		priv->hw->link.caps &= ~(MAC_10 | MAC_100);
++	else if (!priv->dma_cap.half_duplex)
++		priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
++}
++
+ static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
+ {
+ 	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
+@@ -1532,6 +1540,7 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
+ 
+ const struct stmmac_ops dwxgmac210_ops = {
+ 	.core_init = dwxgmac2_core_init,
++	.update_caps = dwxgmac2_update_caps,
+ 	.set_mac = dwxgmac2_set_mac,
+ 	.rx_ipc = dwxgmac2_rx_ipc,
+ 	.rx_queue_enable = dwxgmac2_rx_queue_enable,
+@@ -1646,8 +1655,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
+ 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ 
+ 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+-			 MAC_1000FD | MAC_2500FD | MAC_5000FD |
+-			 MAC_10000FD;
++			 MAC_10 | MAC_100 | MAC_1000FD |
++			 MAC_2500FD | MAC_5000FD | MAC_10000FD;
+ 	mac->link.duplex = 0;
+ 	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
+ 	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+index 5dcc95bc0ad28b..4d6bb995d8d84c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+@@ -203,10 +203,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
+ 	}
+ 
+ 	writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+-
+-	/* Enable MTL RX overflow */
+-	value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
+-	writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
+ }
+ 
+ static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
+@@ -386,8 +382,11 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
+ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ 				   struct dma_features *dma_cap)
+ {
++	struct stmmac_priv *priv;
+ 	u32 hw_cap;
+ 
++	priv = container_of(dma_cap, struct stmmac_priv, dma_cap);
++
+ 	/* MAC HW feature 0 */
+ 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ 	dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
+@@ -410,6 +409,8 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ 	dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
+ 	dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
+ 	dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
++	if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20)
++		dma_cap->mbps_10_100 = 1;
+ 
+ 	/* MAC HW feature 1 */
+ 	hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 058cd9e9fd71dc..40d56ff66b6a82 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2488,6 +2488,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+ 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
+ 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
++	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
+ 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
+ 	unsigned int entry = tx_q->cur_tx;
+ 	struct dma_desc *tx_desc = NULL;
+@@ -2573,7 +2574,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+ 		}
+ 
+ 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
+-				       true, priv->mode, true, true,
++				       csum, priv->mode, true, true,
+ 				       xdp_desc.len);
+ 
+ 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
+@@ -4902,6 +4903,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
+ {
+ 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
+ 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
++	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
+ 	unsigned int entry = tx_q->cur_tx;
+ 	struct dma_desc *tx_desc;
+ 	dma_addr_t dma_addr;
+@@ -4953,7 +4955,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
+ 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+ 
+ 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
+-			       true, priv->mode, true, true,
++			       csum, priv->mode, true, true,
+ 			       xdpf->len);
+ 
+ 	tx_q->tx_count_frames++;
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 807465dd4c8e34..5f14799b68c532 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -712,8 +712,13 @@ void netvsc_device_remove(struct hv_device *device)
+ 	for (i = 0; i < net_device->num_chn; i++) {
+ 		/* See also vmbus_reset_channel_cb(). */
+ 		/* only disable enabled NAPI channel */
+-		if (i < ndev->real_num_rx_queues)
++		if (i < ndev->real_num_rx_queues) {
++			netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_TX,
++					     NULL);
++			netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_RX,
++					     NULL);
+ 			napi_disable(&net_device->chan_table[i].napi);
++		}
+ 
+ 		netif_napi_del(&net_device->chan_table[i].napi);
+ 	}
+@@ -1806,6 +1811,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
+ 
+ 	/* Enable NAPI handler before init callbacks */
+ 	netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
++	napi_enable(&net_device->chan_table[0].napi);
++	netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
++			     &net_device->chan_table[0].napi);
++	netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
++			     &net_device->chan_table[0].napi);
+ 
+ 	/* Open the channel */
+ 	device->channel->next_request_id_callback = vmbus_next_request_id;
+@@ -1825,8 +1835,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
+ 	/* Channel is opened */
+ 	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
+ 
+-	napi_enable(&net_device->chan_table[0].napi);
+-
+ 	/* Connect with the NetVsp */
+ 	ret = netvsc_connect_vsp(device, net_device, device_info);
+ 	if (ret != 0) {
+@@ -1844,12 +1852,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
+ 
+ close:
+ 	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
+-	napi_disable(&net_device->chan_table[0].napi);
+ 
+ 	/* Now, we can close the channel safely */
+ 	vmbus_close(device->channel);
+ 
+ cleanup:
++	netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
++	netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
++	napi_disable(&net_device->chan_table[0].napi);
+ 	netif_napi_del(&net_device->chan_table[0].napi);
+ 
+ cleanup2:
+diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
+index e457f809fe3110..9a92552ee35c28 100644
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -1252,13 +1252,27 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
+ 	new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
+ 	new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+ 
++	/* Enable napi before opening the vmbus channel to avoid races
++	 * as the host placing data on the host->guest ring may be left
++	 * out if napi was not enabled.
++	 */
++	napi_enable(&nvchan->napi);
++	netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
++			     &nvchan->napi);
++	netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
++			     &nvchan->napi);
++
+ 	ret = vmbus_open(new_sc, netvsc_ring_bytes,
+ 			 netvsc_ring_bytes, NULL, 0,
+ 			 netvsc_channel_cb, nvchan);
+-	if (ret == 0)
+-		napi_enable(&nvchan->napi);
+-	else
++	if (ret != 0) {
+ 		netdev_notice(ndev, "sub channel open failed: %d\n", ret);
++		netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
++				     NULL);
++		netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
++				     NULL);
++		napi_disable(&nvchan->napi);
++	}
+ 
+ 	if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
+ 		wake_up(&nvscdev->subchan_open);
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 58c6d47fbe046d..2bfe314ef881c3 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -481,6 +481,7 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev)
+ void vsc85xx_link_change_notify(struct phy_device *phydev);
+ void vsc8584_config_ts_intr(struct phy_device *phydev);
+ int vsc8584_ptp_init(struct phy_device *phydev);
++void vsc8584_ptp_deinit(struct phy_device *phydev);
+ int vsc8584_ptp_probe_once(struct phy_device *phydev);
+ int vsc8584_ptp_probe(struct phy_device *phydev);
+ irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev);
+@@ -495,6 +496,9 @@ static inline int vsc8584_ptp_init(struct phy_device *phydev)
+ {
+ 	return 0;
+ }
++static inline void vsc8584_ptp_deinit(struct phy_device *phydev)
++{
++}
+ static inline int vsc8584_ptp_probe_once(struct phy_device *phydev)
+ {
+ 	return 0;
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 42cafa68c40098..19983b206405c6 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -2337,9 +2337,7 @@ static int vsc85xx_probe(struct phy_device *phydev)
+ 
+ static void vsc85xx_remove(struct phy_device *phydev)
+ {
+-	struct vsc8531_private *priv = phydev->priv;
+-
+-	skb_queue_purge(&priv->rx_skbs_list);
++	vsc8584_ptp_deinit(phydev);
+ }
+ 
+ /* Microsemi VSC85xx PHYs */
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index 80992827a3bdd1..920f35f8f84e71 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -1295,7 +1295,6 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
+ 
+ static int __vsc8584_init_ptp(struct phy_device *phydev)
+ {
+-	struct vsc8531_private *vsc8531 = phydev->priv;
+ 	static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
+ 	static const u8  ltc_seq_a[] = { 8, 6, 5, 4, 2 };
+ 	u32 val;
+@@ -1512,17 +1511,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev)
+ 
+ 	vsc85xx_ts_eth_cmp1_sig(phydev);
+ 
+-	vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
+-	vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
+-	vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
+-	vsc8531->mii_ts.ts_info  = vsc85xx_ts_info;
+-	phydev->mii_ts = &vsc8531->mii_ts;
+-
+-	memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
+-
+-	vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
+-						     &phydev->mdio.dev);
+-	return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
++	return 0;
+ }
+ 
+ void vsc8584_config_ts_intr(struct phy_device *phydev)
+@@ -1549,6 +1538,16 @@ int vsc8584_ptp_init(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++void vsc8584_ptp_deinit(struct phy_device *phydev)
++{
++	struct vsc8531_private *vsc8531 = phydev->priv;
++
++	if (vsc8531->ptp->ptp_clock) {
++		ptp_clock_unregister(vsc8531->ptp->ptp_clock);
++		skb_queue_purge(&vsc8531->rx_skbs_list);
++	}
++}
++
+ irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
+ {
+ 	struct vsc8531_private *priv = phydev->priv;
+@@ -1609,7 +1608,16 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
+ 
+ 	vsc8531->ptp->phydev = phydev;
+ 
+-	return 0;
++	vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
++	vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
++	vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
++	vsc8531->mii_ts.ts_info  = vsc85xx_ts_info;
++	phydev->mii_ts = &vsc8531->mii_ts;
++
++	memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
++	vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
++						     &phydev->mdio.dev);
++	return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
+ }
+ 
+ int vsc8584_ptp_probe_once(struct phy_device *phydev)
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 7e0608f5683531..0a0f0e18762bb1 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1355,6 +1355,9 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
+ 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1034, 2)}, /* Telit LE910C4-WWX */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1037, 4)}, /* Telit LE910C4-WWX */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1038, 3)}, /* Telit LE910C4-WWX */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)},	/* Telit LE922A */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)},	/* Telit FN980 */
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 110104a936d9c1..492f0354a79227 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -935,10 +935,15 @@ static int of_changeset_add_prop_helper(struct of_changeset *ocs,
+ 		return -ENOMEM;
+ 
+ 	ret = of_changeset_add_property(ocs, np, new_pp);
+-	if (ret)
++	if (ret) {
+ 		__of_prop_free(new_pp);
++		return ret;
++	}
+ 
+-	return ret;
++	new_pp->next = np->deadprops;
++	np->deadprops = new_pp;
++
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 45445a1600a968..7b5d6562fe4a02 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -24,6 +24,7 @@
+ #include <linux/memblock.h>
+ #include <linux/kmemleak.h>
+ #include <linux/cma.h>
++#include <linux/dma-map-ops.h>
+ 
+ #include "of_private.h"
+ 
+@@ -128,13 +129,17 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
+ 		base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ 		size = dt_mem_next_cell(dt_root_size_cells, &prop);
+ 
+-		if (size &&
+-		    early_init_dt_reserve_memory(base, size, nomap) == 0)
++		if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
++			/* Architecture specific contiguous memory fixup. */
++			if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
++			    of_get_flat_dt_prop(node, "reusable", NULL))
++				dma_contiguous_early_fixup(base, size);
+ 			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
+ 				uname, &base, (unsigned long)(size / SZ_1M));
+-		else
++		} else {
+ 			pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
+ 			       uname, &base, (unsigned long)(size / SZ_1M));
++		}
+ 
+ 		len -= t_len;
+ 	}
+@@ -417,7 +422,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
+ 		       uname, (unsigned long)(size / SZ_1M));
+ 		return -ENOMEM;
+ 	}
+-
++	/* Architecture specific contiguous memory fixup. */
++	if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
++	    of_get_flat_dt_prop(node, "reusable", NULL))
++		dma_contiguous_early_fixup(base, size);
+ 	/* Save region in the reserved_mem array */
+ 	fdt_reserved_mem_save_node(node, uname, base, size);
+ 	return 0;
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index d40afe74ddd1a3..f9473b8160778c 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -655,6 +655,14 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
+ 		return -ETIMEDOUT;
+ 	}
+ 
++	/*
++	 * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link
++	 * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms
++	 * after Link training completes before sending a Configuration Request.
++	 */
++	if (pci->max_link_speed > 2)
++		msleep(PCIE_RESET_CONFIG_WAIT_MS);
++
+ 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ 	val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+ 
+diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
+index 0564fdce47c2a3..0a0b5a7d84d7ef 100644
+--- a/drivers/pci/controller/plda/pcie-starfive.c
++++ b/drivers/pci/controller/plda/pcie-starfive.c
+@@ -368,7 +368,7 @@ static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
+ 	 * of 100ms following exit from a conventional reset before
+ 	 * sending a configuration request to the device.
+ 	 */
+-	msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS);
++	msleep(PCIE_RESET_CONFIG_WAIT_MS);
+ 
+ 	if (starfive_pcie_host_wait_for_link(pcie))
+ 		dev_info(dev, "port link down\n");
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index b65868e7095179..c951f861a69b27 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -57,7 +57,7 @@
+  *    completes before sending a Configuration Request to the device
+  *    immediately below that Port."
+  */
+-#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS	100
++#define PCIE_RESET_CONFIG_WAIT_MS	100
+ 
+ /* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
+ #define PCIE_MSG_TYPE_R_RC	0
+diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
+index 354536de564b67..e05174e5efbc3a 100644
+--- a/drivers/pinctrl/Kconfig
++++ b/drivers/pinctrl/Kconfig
+@@ -504,6 +504,7 @@ config PINCTRL_STMFX
+ 	tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
+ 	depends on I2C
+ 	depends on OF_GPIO
++	depends on HAS_IOMEM
+ 	select GENERIC_PINCONF
+ 	select GPIOLIB_IRQCHIP
+ 	select MFD_STMFX
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 32f94db6d6bf5d..e669768a7a5bf5 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -265,7 +265,7 @@ show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
+ 	return show_shost_mode(supported_mode, buf);
+ }
+ 
+-static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
++static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL);
+ 
+ static ssize_t
+ show_shost_active_mode(struct device *dev,
+@@ -279,7 +279,7 @@ show_shost_active_mode(struct device *dev,
+ 		return show_shost_mode(shost->active_mode, buf);
+ }
+ 
+-static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
++static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL);
+ 
+ static int check_reset_type(const char *str)
+ {
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index ae063d1bc95f86..017191b9f8645f 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -121,7 +121,11 @@ struct lvts_ctrl_data {
+ 
+ struct lvts_data {
+ 	const struct lvts_ctrl_data *lvts_ctrl;
++	const u32 *conn_cmd;
++	const u32 *init_cmd;
+ 	int num_lvts_ctrl;
++	int num_conn_cmd;
++	int num_init_cmd;
+ 	int temp_factor;
+ 	int temp_offset;
+ 	int gt_calib_bit_offset;
+@@ -880,7 +884,7 @@ static void lvts_ctrl_monitor_enable(struct device *dev, struct lvts_ctrl *lvts_
+  * each write in the configuration register must be separated by a
+  * delay of 2 us.
+  */
+-static void lvts_write_config(struct lvts_ctrl *lvts_ctrl, u32 *cmds, int nr_cmds)
++static void lvts_write_config(struct lvts_ctrl *lvts_ctrl, const u32 *cmds, int nr_cmds)
+ {
+ 	int i;
+ 
+@@ -963,9 +967,10 @@ static int lvts_ctrl_set_enable(struct lvts_ctrl *lvts_ctrl, int enable)
+ 
+ static int lvts_ctrl_connect(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+ {
+-	u32 id, cmds[] = { 0xC103FFFF, 0xC502FF55 };
++	const struct lvts_data *lvts_data = lvts_ctrl->lvts_data;
++	u32 id;
+ 
+-	lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds));
++	lvts_write_config(lvts_ctrl, lvts_data->conn_cmd, lvts_data->num_conn_cmd);
+ 
+ 	/*
+ 	 * LVTS_ID : Get ID and status of the thermal controller
+@@ -984,17 +989,9 @@ static int lvts_ctrl_connect(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+ 
+ static int lvts_ctrl_initialize(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+ {
+-	/*
+-	 * Write device mask: 0xC1030000
+-	 */
+-	u32 cmds[] = {
+-		0xC1030E01, 0xC1030CFC, 0xC1030A8C, 0xC103098D, 0xC10308F1,
+-		0xC10307A6, 0xC10306B8, 0xC1030500, 0xC1030420, 0xC1030300,
+-		0xC1030030, 0xC10300F6, 0xC1030050, 0xC1030060, 0xC10300AC,
+-		0xC10300FC, 0xC103009D, 0xC10300F1, 0xC10300E1
+-	};
++	const struct lvts_data *lvts_data = lvts_ctrl->lvts_data;
+ 
+-	lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds));
++	lvts_write_config(lvts_ctrl, lvts_data->init_cmd, lvts_data->num_init_cmd);
+ 
+ 	return 0;
+ }
+@@ -1423,6 +1420,25 @@ static int lvts_resume(struct device *dev)
+ 	return 0;
+ }
+ 
++static const u32 default_conn_cmds[] = { 0xC103FFFF, 0xC502FF55 };
++static const u32 mt7988_conn_cmds[] = { 0xC103FFFF, 0xC502FC55 };
++
++/*
++ * Write device mask: 0xC1030000
++ */
++static const u32 default_init_cmds[] = {
++	0xC1030E01, 0xC1030CFC, 0xC1030A8C, 0xC103098D, 0xC10308F1,
++	0xC10307A6, 0xC10306B8, 0xC1030500, 0xC1030420, 0xC1030300,
++	0xC1030030, 0xC10300F6, 0xC1030050, 0xC1030060, 0xC10300AC,
++	0xC10300FC, 0xC103009D, 0xC10300F1, 0xC10300E1
++};
++
++static const u32 mt7988_init_cmds[] = {
++	0xC1030300, 0xC1030420, 0xC1030500, 0xC10307A6, 0xC1030CFC,
++	0xC1030A8C, 0xC103098D, 0xC10308F1, 0xC1030B04, 0xC1030E01,
++	0xC10306B8
++};
++
+ /*
+  * The MT8186 calibration data is stored as packed 3-byte little-endian
+  * values using a weird layout that makes sense only when viewed as a 32-bit
+@@ -1717,7 +1733,11 @@ static const struct lvts_ctrl_data mt8195_lvts_ap_data_ctrl[] = {
+ 
+ static const struct lvts_data mt7988_lvts_ap_data = {
+ 	.lvts_ctrl	= mt7988_lvts_ap_data_ctrl,
++	.conn_cmd	= mt7988_conn_cmds,
++	.init_cmd	= mt7988_init_cmds,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt7988_lvts_ap_data_ctrl),
++	.num_conn_cmd	= ARRAY_SIZE(mt7988_conn_cmds),
++	.num_init_cmd	= ARRAY_SIZE(mt7988_init_cmds),
+ 	.temp_factor	= LVTS_COEFF_A_MT7988,
+ 	.temp_offset	= LVTS_COEFF_B_MT7988,
+ 	.gt_calib_bit_offset = 24,
+@@ -1725,7 +1745,11 @@ static const struct lvts_data mt7988_lvts_ap_data = {
+ 
+ static const struct lvts_data mt8186_lvts_data = {
+ 	.lvts_ctrl	= mt8186_lvts_data_ctrl,
++	.conn_cmd	= default_conn_cmds,
++	.init_cmd	= default_init_cmds,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8186_lvts_data_ctrl),
++	.num_conn_cmd	= ARRAY_SIZE(default_conn_cmds),
++	.num_init_cmd	= ARRAY_SIZE(default_init_cmds),
+ 	.temp_factor	= LVTS_COEFF_A_MT7988,
+ 	.temp_offset	= LVTS_COEFF_B_MT7988,
+ 	.gt_calib_bit_offset = 24,
+@@ -1734,7 +1758,11 @@ static const struct lvts_data mt8186_lvts_data = {
+ 
+ static const struct lvts_data mt8188_lvts_mcu_data = {
+ 	.lvts_ctrl	= mt8188_lvts_mcu_data_ctrl,
++	.conn_cmd	= default_conn_cmds,
++	.init_cmd	= default_init_cmds,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8188_lvts_mcu_data_ctrl),
++	.num_conn_cmd	= ARRAY_SIZE(default_conn_cmds),
++	.num_init_cmd	= ARRAY_SIZE(default_init_cmds),
+ 	.temp_factor	= LVTS_COEFF_A_MT8195,
+ 	.temp_offset	= LVTS_COEFF_B_MT8195,
+ 	.gt_calib_bit_offset = 20,
+@@ -1743,7 +1771,11 @@ static const struct lvts_data mt8188_lvts_mcu_data = {
+ 
+ static const struct lvts_data mt8188_lvts_ap_data = {
+ 	.lvts_ctrl	= mt8188_lvts_ap_data_ctrl,
++	.conn_cmd	= default_conn_cmds,
++	.init_cmd	= default_init_cmds,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8188_lvts_ap_data_ctrl),
++	.num_conn_cmd	= ARRAY_SIZE(default_conn_cmds),
++	.num_init_cmd	= ARRAY_SIZE(default_init_cmds),
+ 	.temp_factor	= LVTS_COEFF_A_MT8195,
+ 	.temp_offset	= LVTS_COEFF_B_MT8195,
+ 	.gt_calib_bit_offset = 20,
+@@ -1752,7 +1784,11 @@ static const struct lvts_data mt8188_lvts_ap_data = {
+ 
+ static const struct lvts_data mt8192_lvts_mcu_data = {
+ 	.lvts_ctrl	= mt8192_lvts_mcu_data_ctrl,
++	.conn_cmd	= default_conn_cmds,
++	.init_cmd	= default_init_cmds,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8192_lvts_mcu_data_ctrl),
++	.num_conn_cmd	= ARRAY_SIZE(default_conn_cmds),
++	.num_init_cmd	= ARRAY_SIZE(default_init_cmds),
+ 	.temp_factor	= LVTS_COEFF_A_MT8195,
+ 	.temp_offset	= LVTS_COEFF_B_MT8195,
+ 	.gt_calib_bit_offset = 24,
+@@ -1761,7 +1797,11 @@ static const struct lvts_data mt8192_lvts_mcu_data = {
+ 
+ static const struct lvts_data mt8192_lvts_ap_data = {
+ 	.lvts_ctrl	= mt8192_lvts_ap_data_ctrl,
++	.conn_cmd	= default_conn_cmds,
++	.init_cmd	= default_init_cmds,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8192_lvts_ap_data_ctrl),
++	.num_conn_cmd	= ARRAY_SIZE(default_conn_cmds),
++	.num_init_cmd	= ARRAY_SIZE(default_init_cmds),
+ 	.temp_factor	= LVTS_COEFF_A_MT8195,
+ 	.temp_offset	= LVTS_COEFF_B_MT8195,
+ 	.gt_calib_bit_offset = 24,
+@@ -1770,7 +1810,11 @@ static const struct lvts_data mt8192_lvts_ap_data = {
+ 
+ static const struct lvts_data mt8195_lvts_mcu_data = {
+ 	.lvts_ctrl	= mt8195_lvts_mcu_data_ctrl,
++	.conn_cmd	= default_conn_cmds,
++	.init_cmd	= default_init_cmds,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8195_lvts_mcu_data_ctrl),
++	.num_conn_cmd	= ARRAY_SIZE(default_conn_cmds),
++	.num_init_cmd	= ARRAY_SIZE(default_init_cmds),
+ 	.temp_factor	= LVTS_COEFF_A_MT8195,
+ 	.temp_offset	= LVTS_COEFF_B_MT8195,
+ 	.gt_calib_bit_offset = 24,
+@@ -1779,7 +1823,11 @@ static const struct lvts_data mt8195_lvts_mcu_data = {
+ 
+ static const struct lvts_data mt8195_lvts_ap_data = {
+ 	.lvts_ctrl	= mt8195_lvts_ap_data_ctrl,
++	.conn_cmd	= default_conn_cmds,
++	.init_cmd	= default_init_cmds,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8195_lvts_ap_data_ctrl),
++	.num_conn_cmd	= ARRAY_SIZE(default_conn_cmds),
++	.num_init_cmd	= ARRAY_SIZE(default_init_cmds),
+ 	.temp_factor	= LVTS_COEFF_A_MT8195,
+ 	.temp_offset	= LVTS_COEFF_B_MT8195,
+ 	.gt_calib_bit_offset = 24,
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index f16279351db56e..aff4ec78356287 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -96,6 +96,7 @@ struct vhost_net_ubuf_ref {
+ 	atomic_t refcount;
+ 	wait_queue_head_t wait;
+ 	struct vhost_virtqueue *vq;
++	struct rcu_head rcu;
+ };
+ 
+ #define VHOST_NET_BATCH 64
+@@ -247,9 +248,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
+ 
+ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+ {
+-	int r = atomic_sub_return(1, &ubufs->refcount);
++	int r;
++
++	rcu_read_lock();
++	r = atomic_sub_return(1, &ubufs->refcount);
+ 	if (unlikely(!r))
+ 		wake_up(&ubufs->wait);
++	rcu_read_unlock();
+ 	return r;
+ }
+ 
+@@ -262,7 +267,7 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
+ static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
+ {
+ 	vhost_net_ubuf_put_and_wait(ubufs);
+-	kfree(ubufs);
++	kfree_rcu(ubufs, rcu);
+ }
+ 
+ static void vhost_net_clear_ubuf_info(struct vhost_net *n)
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 11ebddc57bc73a..1831e015b2f26f 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -127,6 +127,10 @@ static int efivarfs_d_compare(const struct dentry *dentry,
+ {
+ 	int guid = len - EFI_VARIABLE_GUID_LEN;
+ 
++	/* Parallel lookups may produce a temporary invalid filename */
++	if (guid <= 0)
++		return 1;
++
+ 	if (name->len != len)
+ 		return 1;
+ 
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index f35d2eb0ed11c5..63acd91d15aada 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1410,6 +1410,16 @@ static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
+ }
+ #endif
+ 
++/* Use (kthread_)work in atomic contexts to minimize scheduling overhead */
++static inline bool z_erofs_in_atomic(void)
++{
++	if (IS_ENABLED(CONFIG_PREEMPTION) && rcu_preempt_depth())
++		return true;
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return true;
++	return !preemptible();
++}
++
+ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ 				       int bios)
+ {
+@@ -1424,8 +1434,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ 
+ 	if (atomic_add_return(bios, &io->pending_bios))
+ 		return;
+-	/* Use (kthread_)work and sync decompression for atomic contexts only */
+-	if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
++	if (z_erofs_in_atomic()) {
+ #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
+ 		struct kthread_worker *worker;
+ 
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 9d96b833015c82..64dc7ec045d87d 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1348,6 +1348,20 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ 			truncate_setsize(target_inode, new_size);
+ 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
+ 					      new_size);
++		} else if (rc == -EOPNOTSUPP) {
++			/*
++			 * copy_file_range syscall man page indicates EINVAL
++			 * is returned e.g when "fd_in and fd_out refer to the
++			 * same file and the source and target ranges overlap."
++			 * Test generic/157 was what showed these cases where
++			 * we need to remap EOPNOTSUPP to EINVAL
++			 */
++			if (off >= src_inode->i_size) {
++				rc = -EINVAL;
++			} else if (src_inode == target_inode) {
++				if (off + len > destoff)
++					rc = -EINVAL;
++			}
+ 		}
+ 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
+ 			target_cifsi->netfs.zero_point = new_size;
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 31fce0a1b57191..c0df2c1841243e 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1917,15 +1917,24 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ 	struct tcon_link *tlink;
+ 	struct cifs_tcon *tcon;
++	__u32 dosattr = 0, origattr = 0;
+ 	struct TCP_Server_Info *server;
+ 	struct iattr *attrs = NULL;
+-	__u32 dosattr = 0, origattr = 0;
++	bool rehash = false;
+ 
+ 	cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry);
+ 
+ 	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+ 		return -EIO;
+ 
++	/* Unhash dentry in advance to prevent any concurrent opens */
++	spin_lock(&dentry->d_lock);
++	if (!d_unhashed(dentry)) {
++		__d_drop(dentry);
++		rehash = true;
++	}
++	spin_unlock(&dentry->d_lock);
++
+ 	tlink = cifs_sb_tlink(cifs_sb);
+ 	if (IS_ERR(tlink))
+ 		return PTR_ERR(tlink);
+@@ -1977,7 +1986,8 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
+ 			cifs_drop_nlink(inode);
+ 		}
+ 	} else if (rc == -ENOENT) {
+-		d_drop(dentry);
++		if (simple_positive(dentry))
++			d_delete(dentry);
+ 	} else if (rc == -EBUSY) {
+ 		if (server->ops->rename_pending_delete) {
+ 			rc = server->ops->rename_pending_delete(full_path,
+@@ -2030,6 +2040,8 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
+ 	kfree(attrs);
+ 	free_xid(xid);
+ 	cifs_put_tlink(tlink);
++	if (rehash)
++		d_rehash(dentry);
+ 	return rc;
+ }
+ 
+@@ -2429,6 +2441,7 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
+ 	struct cifs_sb_info *cifs_sb;
+ 	struct tcon_link *tlink;
+ 	struct cifs_tcon *tcon;
++	bool rehash = false;
+ 	unsigned int xid;
+ 	int rc, tmprc;
+ 	int retry_count = 0;
+@@ -2444,6 +2457,17 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
+ 	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+ 		return -EIO;
+ 
++	/*
++	 * Prevent any concurrent opens on the target by unhashing the dentry.
++	 * VFS already unhashes the target when renaming directories.
++	 */
++	if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) {
++		if (!d_unhashed(target_dentry)) {
++			d_drop(target_dentry);
++			rehash = true;
++		}
++	}
++
+ 	tlink = cifs_sb_tlink(cifs_sb);
+ 	if (IS_ERR(tlink))
+ 		return PTR_ERR(tlink);
+@@ -2485,6 +2509,8 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
+ 		}
+ 	}
+ 
++	if (!rc)
++		rehash = false;
+ 	/*
+ 	 * No-replace is the natural behavior for CIFS, so skip unlink hacks.
+ 	 */
+@@ -2543,12 +2569,16 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
+ 			goto cifs_rename_exit;
+ 		rc = cifs_do_rename(xid, source_dentry, from_name,
+ 				    target_dentry, to_name);
++		if (!rc)
++			rehash = false;
+ 	}
+ 
+ 	/* force revalidate to go get info when needed */
+ 	CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
+ 
+ cifs_rename_exit:
++	if (rehash)
++		d_rehash(target_dentry);
+ 	kfree(info_buf_source);
+ 	free_dentry_path(page2);
+ 	free_dentry_path(page1);
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 6048b3fed3e787..b51ccfb8843941 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -206,8 +206,10 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 	server = cifs_pick_channel(ses);
+ 
+ 	vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+-	if (vars == NULL)
+-		return -ENOMEM;
++	if (vars == NULL) {
++		rc = -ENOMEM;
++		goto out;
++	}
+ 	rqst = &vars->rqst[0];
+ 	rsp_iov = &vars->rsp_iov[0];
+ 
+@@ -832,6 +834,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 	    smb2_should_replay(tcon, &retries, &cur_sleep))
+ 		goto replay_again;
+ 
++out:
+ 	if (cfile)
+ 		cifsFileInfo_put(cfile);
+ 
+diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
+index 4c44ce1c8a644b..bff3dc226f8128 100644
+--- a/fs/xfs/libxfs/xfs_attr_remote.c
++++ b/fs/xfs/libxfs/xfs_attr_remote.c
+@@ -435,6 +435,13 @@ xfs_attr_rmtval_get(
+ 					0, &bp, &xfs_attr3_rmt_buf_ops);
+ 			if (xfs_metadata_is_sick(error))
+ 				xfs_dirattr_mark_sick(args->dp, XFS_ATTR_FORK);
++			/*
++			 * ENODATA from disk implies a disk medium failure;
++			 * ENODATA for xattrs means attribute not found, so
++			 * disambiguate that here.
++			 */
++			if (error == -ENODATA)
++				error = -EIO;
+ 			if (error)
+ 				return error;
+ 
+diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
+index 17d9e6154f1978..723a0643b8386c 100644
+--- a/fs/xfs/libxfs/xfs_da_btree.c
++++ b/fs/xfs/libxfs/xfs_da_btree.c
+@@ -2833,6 +2833,12 @@ xfs_da_read_buf(
+ 			&bp, ops);
+ 	if (xfs_metadata_is_sick(error))
+ 		xfs_dirattr_mark_sick(dp, whichfork);
++	/*
++	 * ENODATA from disk implies a disk medium failure; ENODATA for
++	 * xattrs means attribute not found, so disambiguate that here.
++	 */
++	if (error == -ENODATA && whichfork == XFS_ATTR_FORK)
++		error = -EIO;
+ 	if (error)
+ 		goto out_free;
+ 
+diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
+index 45f2f278b50a8a..70807c679f1abc 100644
+--- a/include/linux/atmdev.h
++++ b/include/linux/atmdev.h
+@@ -185,6 +185,7 @@ struct atmdev_ops { /* only send is required */
+ 	int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
+ 			    void __user *arg);
+ #endif
++	int (*pre_send)(struct atm_vcc *vcc, struct sk_buff *skb);
+ 	int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
+ 	int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
+ 	int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
+diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
+index b7773201414c27..b42408a24ad13a 100644
+--- a/include/linux/dma-map-ops.h
++++ b/include/linux/dma-map-ops.h
+@@ -153,6 +153,9 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
+ {
+ 	__free_pages(page, get_order(size));
+ }
++static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
++{
++}
+ #endif /* CONFIG_DMA_CMA*/
+ 
+ #ifdef CONFIG_DMA_DECLARE_COHERENT
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index dbabc17b30cdfa..17e5112f7840e0 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -93,7 +93,7 @@ int hci_update_class_sync(struct hci_dev *hdev);
+ 
+ int hci_update_eir_sync(struct hci_dev *hdev);
+ int hci_update_class_sync(struct hci_dev *hdev);
+-int hci_update_name_sync(struct hci_dev *hdev);
++int hci_update_name_sync(struct hci_dev *hdev, const u8 *name);
+ int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
+ 
+ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+diff --git a/include/net/rose.h b/include/net/rose.h
+index 23267b4efcfa32..2b5491bbf39ab5 100644
+--- a/include/net/rose.h
++++ b/include/net/rose.h
+@@ -8,6 +8,7 @@
+ #ifndef _ROSE_H
+ #define _ROSE_H 
+ 
++#include <linux/refcount.h>
+ #include <linux/rose.h>
+ #include <net/ax25.h>
+ #include <net/sock.h>
+@@ -96,7 +97,7 @@ struct rose_neigh {
+ 	ax25_cb			*ax25;
+ 	struct net_device		*dev;
+ 	unsigned short		count;
+-	unsigned short		use;
++	refcount_t		use;
+ 	unsigned int		number;
+ 	char			restarted;
+ 	char			dce_mode;
+@@ -151,6 +152,21 @@ struct rose_sock {
+ 
+ #define rose_sk(sk) ((struct rose_sock *)(sk))
+ 
++static inline void rose_neigh_hold(struct rose_neigh *rose_neigh)
++{
++	refcount_inc(&rose_neigh->use);
++}
++
++static inline void rose_neigh_put(struct rose_neigh *rose_neigh)
++{
++	if (refcount_dec_and_test(&rose_neigh->use)) {
++		if (rose_neigh->ax25)
++			ax25_cb_put(rose_neigh->ax25);
++		kfree(rose_neigh->digipeat);
++		kfree(rose_neigh);
++	}
++}
++
+ /* af_rose.c */
+ extern ax25_address rose_callsign;
+ extern int  sysctl_rose_restart_request_timeout;
+diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
+index 1c7e7035fc49d1..96b178f1bd5ca5 100644
+--- a/include/uapi/linux/vhost.h
++++ b/include/uapi/linux/vhost.h
+@@ -254,7 +254,7 @@
+  * When fork_owner is set to VHOST_FORK_OWNER_KTHREAD:
+  *   - Vhost will create vhost workers as kernel threads.
+  */
+-#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x83, __u8)
++#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x84, __u8)
+ 
+ /**
+  * VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device.
+@@ -262,6 +262,6 @@
+  *
+  * @return: An 8-bit value indicating the current thread mode.
+  */
+-#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x84, __u8)
++#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x85, __u8)
+ 
+ #endif
+diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
+index 8df0dfaaca18ee..9e5d63efe7c577 100644
+--- a/kernel/dma/contiguous.c
++++ b/kernel/dma/contiguous.c
+@@ -480,8 +480,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
+ 		pr_err("Reserved memory: unable to setup CMA region\n");
+ 		return err;
+ 	}
+-	/* Architecture specific contiguous memory fixup. */
+-	dma_contiguous_early_fixup(rmem->base, rmem->size);
+ 
+ 	if (default_cma)
+ 		dma_contiguous_default_area = cma;
+diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
+index 7b04f7575796b8..ee45dee33d4916 100644
+--- a/kernel/dma/pool.c
++++ b/kernel/dma/pool.c
+@@ -102,8 +102,8 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
+ 
+ #ifdef CONFIG_DMA_DIRECT_REMAP
+ 	addr = dma_common_contiguous_remap(page, pool_size,
+-					   pgprot_dmacoherent(PAGE_KERNEL),
+-					   __builtin_return_address(0));
++			pgprot_decrypted(pgprot_dmacoherent(PAGE_KERNEL)),
++			__builtin_return_address(0));
+ 	if (!addr)
+ 		goto free_page;
+ #else
+diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
+index c12335499ec91e..2eed8bc672f913 100644
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -1316,6 +1316,7 @@ int register_ftrace_graph(struct fgraph_ops *gops)
+ 		ftrace_graph_active--;
+ 		gops->saved_func = NULL;
+ 		fgraph_lru_release_index(i);
++		unregister_pm_notifier(&ftrace_suspend_notifier);
+ 	}
+ 	return ret;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 2f662ca4d3ffd7..ba3358eef34baa 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -10130,10 +10130,10 @@ static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_m
+ 			ret = print_trace_line(&iter);
+ 			if (ret != TRACE_TYPE_NO_CONSUME)
+ 				trace_consume(&iter);
++
++			trace_printk_seq(&iter.seq);
+ 		}
+ 		touch_nmi_watchdog();
+-
+-		trace_printk_seq(&iter.seq);
+ 	}
+ 
+ 	if (!cnt)
+diff --git a/net/atm/common.c b/net/atm/common.c
+index d7f7976ea13ac6..881c7f259dbd46 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -635,18 +635,27 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
+ 
+ 	skb->dev = NULL; /* for paths shared with net_device interfaces */
+ 	if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
+-		atm_return_tx(vcc, skb);
+-		kfree_skb(skb);
+ 		error = -EFAULT;
+-		goto out;
++		goto free_skb;
+ 	}
+ 	if (eff != size)
+ 		memset(skb->data + size, 0, eff-size);
++
++	if (vcc->dev->ops->pre_send) {
++		error = vcc->dev->ops->pre_send(vcc, skb);
++		if (error)
++			goto free_skb;
++	}
++
+ 	error = vcc->dev->ops->send(vcc, skb);
+ 	error = error ? error : size;
+ out:
+ 	release_sock(sk);
+ 	return error;
++free_skb:
++	atm_return_tx(vcc, skb);
++	kfree_skb(skb);
++	goto out;
+ }
+ 
+ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 768bd5fd808f2b..262ff30261d67d 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2694,7 +2694,7 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
+ 	if (!conn)
+ 		goto unlock;
+ 
+-	if (status) {
++	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
+ 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
+ 				       conn->dst_type, status);
+ 
+@@ -2709,6 +2709,12 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
+ 		goto done;
+ 	}
+ 
++	/* During suspend, mark connection as closed immediately
++	 * since we might not receive HCI_EV_DISCONN_COMPLETE
++	 */
++	if (hdev->suspended)
++		conn->state = BT_CLOSED;
++
+ 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
+ 
+ 	if (conn->type == ACL_LINK) {
+@@ -4389,7 +4395,17 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
+ 		if (!conn)
+ 			continue;
+ 
+-		conn->sent -= count;
++		/* Check if there is really enough packets outstanding before
++		 * attempting to decrease the sent counter otherwise it could
++		 * underflow..
++		 */
++		if (conn->sent >= count) {
++			conn->sent -= count;
++		} else {
++			bt_dev_warn(hdev, "hcon %p sent %u < count %u",
++				    conn, conn->sent, count);
++			conn->sent = 0;
++		}
+ 
+ 		switch (conn->type) {
+ 		case ACL_LINK:
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index af86df9de941df..bc2aa514b8c5d8 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3491,13 +3491,13 @@ int hci_update_scan_sync(struct hci_dev *hdev)
+ 	return hci_write_scan_enable_sync(hdev, scan);
+ }
+ 
+-int hci_update_name_sync(struct hci_dev *hdev)
++int hci_update_name_sync(struct hci_dev *hdev, const u8 *name)
+ {
+ 	struct hci_cp_write_local_name cp;
+ 
+ 	memset(&cp, 0, sizeof(cp));
+ 
+-	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
++	memcpy(cp.name, name, sizeof(cp.name));
+ 
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
+ 					    sizeof(cp), &cp,
+@@ -3550,7 +3550,7 @@ int hci_powered_update_sync(struct hci_dev *hdev)
+ 			hci_write_fast_connectable_sync(hdev, false);
+ 		hci_update_scan_sync(hdev);
+ 		hci_update_class_sync(hdev);
+-		hci_update_name_sync(hdev);
++		hci_update_name_sync(hdev, hdev->dev_name);
+ 		hci_update_eir_sync(hdev);
+ 	}
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index ade93532db34b5..8b75647076baec 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -3826,8 +3826,11 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
+ 
+ static int set_name_sync(struct hci_dev *hdev, void *data)
+ {
++	struct mgmt_pending_cmd *cmd = data;
++	struct mgmt_cp_set_local_name *cp = cmd->param;
++
+ 	if (lmp_bredr_capable(hdev)) {
+-		hci_update_name_sync(hdev);
++		hci_update_name_sync(hdev, cp->name);
+ 		hci_update_eir_sync(hdev);
+ 	}
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 9a5c9497b3931d..261ddb6542a40f 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2532,12 +2532,16 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		    !netif_is_l3_master(dev_out))
+ 			return ERR_PTR(-EINVAL);
+ 
+-	if (ipv4_is_lbcast(fl4->daddr))
++	if (ipv4_is_lbcast(fl4->daddr)) {
+ 		type = RTN_BROADCAST;
+-	else if (ipv4_is_multicast(fl4->daddr))
++
++		/* reset fi to prevent gateway resolution */
++		fi = NULL;
++	} else if (ipv4_is_multicast(fl4->daddr)) {
+ 		type = RTN_MULTICAST;
+-	else if (ipv4_is_zeronet(fl4->daddr))
++	} else if (ipv4_is_zeronet(fl4->daddr)) {
+ 		return ERR_PTR(-EINVAL);
++	}
+ 
+ 	if (dev_out->flags & IFF_LOOPBACK)
+ 		flags |= RTCF_LOCAL;
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 53baf2dd5d5dab..16c514f628eaca 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -129,22 +129,12 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = {
+ 
+ static const struct proto_ops pppol2tp_ops;
+ 
+-/* Retrieves the pppol2tp socket associated to a session.
+- * A reference is held on the returned socket, so this function must be paired
+- * with sock_put().
+- */
++/* Retrieves the pppol2tp socket associated to a session. */
+ static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session)
+ {
+ 	struct pppol2tp_session *ps = l2tp_session_priv(session);
+-	struct sock *sk;
+-
+-	rcu_read_lock();
+-	sk = rcu_dereference(ps->sk);
+-	if (sk)
+-		sock_hold(sk);
+-	rcu_read_unlock();
+ 
+-	return sk;
++	return rcu_dereference(ps->sk);
+ }
+ 
+ /* Helpers to obtain tunnel/session contexts from sockets.
+@@ -206,14 +196,13 @@ static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg,
+ 
+ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
+ {
+-	struct pppol2tp_session *ps = l2tp_session_priv(session);
+-	struct sock *sk = NULL;
++	struct sock *sk;
+ 
+ 	/* If the socket is bound, send it in to PPP's input queue. Otherwise
+ 	 * queue it on the session socket.
+ 	 */
+ 	rcu_read_lock();
+-	sk = rcu_dereference(ps->sk);
++	sk = pppol2tp_session_get_sock(session);
+ 	if (!sk)
+ 		goto no_sock;
+ 
+@@ -510,13 +499,14 @@ static void pppol2tp_show(struct seq_file *m, void *arg)
+ 	struct l2tp_session *session = arg;
+ 	struct sock *sk;
+ 
++	rcu_read_lock();
+ 	sk = pppol2tp_session_get_sock(session);
+ 	if (sk) {
+ 		struct pppox_sock *po = pppox_sk(sk);
+ 
+ 		seq_printf(m, "   interface %s\n", ppp_dev_name(&po->chan));
+-		sock_put(sk);
+ 	}
++	rcu_read_unlock();
+ }
+ 
+ static void pppol2tp_session_init(struct l2tp_session *session)
+@@ -1529,6 +1519,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
+ 		port = ntohs(inet->inet_sport);
+ 	}
+ 
++	rcu_read_lock();
+ 	sk = pppol2tp_session_get_sock(session);
+ 	if (sk) {
+ 		state = sk->sk_state;
+@@ -1564,8 +1555,8 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
+ 		struct pppox_sock *po = pppox_sk(sk);
+ 
+ 		seq_printf(m, "   interface %s\n", ppp_dev_name(&po->chan));
+-		sock_put(sk);
+ 	}
++	rcu_read_unlock();
+ }
+ 
+ static int pppol2tp_seq_show(struct seq_file *m, void *v)
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index a4a668b88a8f27..b8078b42f5de67 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -170,7 +170,7 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
+ 
+ 		if (rose->neighbour == neigh) {
+ 			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
+-			rose->neighbour->use--;
++			rose_neigh_put(rose->neighbour);
+ 			rose->neighbour = NULL;
+ 		}
+ 	}
+@@ -212,7 +212,7 @@ static void rose_kill_by_device(struct net_device *dev)
+ 		if (rose->device == dev) {
+ 			rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
+ 			if (rose->neighbour)
+-				rose->neighbour->use--;
++				rose_neigh_put(rose->neighbour);
+ 			netdev_put(rose->device, &rose->dev_tracker);
+ 			rose->device = NULL;
+ 		}
+@@ -655,7 +655,7 @@ static int rose_release(struct socket *sock)
+ 		break;
+ 
+ 	case ROSE_STATE_2:
+-		rose->neighbour->use--;
++		rose_neigh_put(rose->neighbour);
+ 		release_sock(sk);
+ 		rose_disconnect(sk, 0, -1, -1);
+ 		lock_sock(sk);
+@@ -823,6 +823,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
+ 	rose->lci = rose_new_lci(rose->neighbour);
+ 	if (!rose->lci) {
+ 		err = -ENETUNREACH;
++		rose_neigh_put(rose->neighbour);
+ 		goto out_release;
+ 	}
+ 
+@@ -834,12 +835,14 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
+ 		dev = rose_dev_first();
+ 		if (!dev) {
+ 			err = -ENETUNREACH;
++			rose_neigh_put(rose->neighbour);
+ 			goto out_release;
+ 		}
+ 
+ 		user = ax25_findbyuid(current_euid());
+ 		if (!user) {
+ 			err = -EINVAL;
++			rose_neigh_put(rose->neighbour);
+ 			dev_put(dev);
+ 			goto out_release;
+ 		}
+@@ -874,8 +877,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
+ 
+ 	rose->state = ROSE_STATE_1;
+ 
+-	rose->neighbour->use++;
+-
+ 	rose_write_internal(sk, ROSE_CALL_REQUEST);
+ 	rose_start_heartbeat(sk);
+ 	rose_start_t1timer(sk);
+@@ -1077,7 +1078,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
+ 			     GFP_ATOMIC);
+ 	make_rose->facilities    = facilities;
+ 
+-	make_rose->neighbour->use++;
++	rose_neigh_hold(make_rose->neighbour);
+ 
+ 	if (rose_sk(sk)->defer) {
+ 		make_rose->state = ROSE_STATE_5;
+diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
+index 4d67f36dce1b49..7caae93937ee9b 100644
+--- a/net/rose/rose_in.c
++++ b/net/rose/rose_in.c
+@@ -56,7 +56,7 @@ static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int framety
+ 	case ROSE_CLEAR_REQUEST:
+ 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
+ 		rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
+-		rose->neighbour->use--;
++		rose_neigh_put(rose->neighbour);
+ 		break;
+ 
+ 	default:
+@@ -79,12 +79,12 @@ static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int framety
+ 	case ROSE_CLEAR_REQUEST:
+ 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
+ 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
+-		rose->neighbour->use--;
++		rose_neigh_put(rose->neighbour);
+ 		break;
+ 
+ 	case ROSE_CLEAR_CONFIRMATION:
+ 		rose_disconnect(sk, 0, -1, -1);
+-		rose->neighbour->use--;
++		rose_neigh_put(rose->neighbour);
+ 		break;
+ 
+ 	default:
+@@ -120,7 +120,7 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
+ 	case ROSE_CLEAR_REQUEST:
+ 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
+ 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
+-		rose->neighbour->use--;
++		rose_neigh_put(rose->neighbour);
+ 		break;
+ 
+ 	case ROSE_RR:
+@@ -233,7 +233,7 @@ static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int framety
+ 	case ROSE_CLEAR_REQUEST:
+ 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
+ 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
+-		rose->neighbour->use--;
++		rose_neigh_put(rose->neighbour);
+ 		break;
+ 
+ 	default:
+@@ -253,7 +253,7 @@ static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int framety
+ 	if (frametype == ROSE_CLEAR_REQUEST) {
+ 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
+ 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
+-		rose_sk(sk)->neighbour->use--;
++		rose_neigh_put(rose_sk(sk)->neighbour);
+ 	}
+ 
+ 	return 0;
+diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
+index a7054546f52dfa..28746ae5a25828 100644
+--- a/net/rose/rose_route.c
++++ b/net/rose/rose_route.c
+@@ -93,11 +93,11 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
+ 		rose_neigh->ax25      = NULL;
+ 		rose_neigh->dev       = dev;
+ 		rose_neigh->count     = 0;
+-		rose_neigh->use       = 0;
+ 		rose_neigh->dce_mode  = 0;
+ 		rose_neigh->loopback  = 0;
+ 		rose_neigh->number    = rose_neigh_no++;
+ 		rose_neigh->restarted = 0;
++		refcount_set(&rose_neigh->use, 1);
+ 
+ 		skb_queue_head_init(&rose_neigh->queue);
+ 
+@@ -178,6 +178,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
+ 			}
+ 		}
+ 		rose_neigh->count++;
++		rose_neigh_hold(rose_neigh);
+ 
+ 		goto out;
+ 	}
+@@ -187,6 +188,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
+ 		rose_node->neighbour[rose_node->count] = rose_neigh;
+ 		rose_node->count++;
+ 		rose_neigh->count++;
++		rose_neigh_hold(rose_neigh);
+ 	}
+ 
+ out:
+@@ -234,20 +236,12 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
+ 
+ 	if ((s = rose_neigh_list) == rose_neigh) {
+ 		rose_neigh_list = rose_neigh->next;
+-		if (rose_neigh->ax25)
+-			ax25_cb_put(rose_neigh->ax25);
+-		kfree(rose_neigh->digipeat);
+-		kfree(rose_neigh);
+ 		return;
+ 	}
+ 
+ 	while (s != NULL && s->next != NULL) {
+ 		if (s->next == rose_neigh) {
+ 			s->next = rose_neigh->next;
+-			if (rose_neigh->ax25)
+-				ax25_cb_put(rose_neigh->ax25);
+-			kfree(rose_neigh->digipeat);
+-			kfree(rose_neigh);
+ 			return;
+ 		}
+ 
+@@ -263,10 +257,10 @@ static void rose_remove_route(struct rose_route *rose_route)
+ 	struct rose_route *s;
+ 
+ 	if (rose_route->neigh1 != NULL)
+-		rose_route->neigh1->use--;
++		rose_neigh_put(rose_route->neigh1);
+ 
+ 	if (rose_route->neigh2 != NULL)
+-		rose_route->neigh2->use--;
++		rose_neigh_put(rose_route->neigh2);
+ 
+ 	if ((s = rose_route_list) == rose_route) {
+ 		rose_route_list = rose_route->next;
+@@ -330,9 +324,12 @@ static int rose_del_node(struct rose_route_struct *rose_route,
+ 	for (i = 0; i < rose_node->count; i++) {
+ 		if (rose_node->neighbour[i] == rose_neigh) {
+ 			rose_neigh->count--;
++			rose_neigh_put(rose_neigh);
+ 
+-			if (rose_neigh->count == 0 && rose_neigh->use == 0)
++			if (rose_neigh->count == 0) {
+ 				rose_remove_neigh(rose_neigh);
++				rose_neigh_put(rose_neigh);
++			}
+ 
+ 			rose_node->count--;
+ 
+@@ -381,11 +378,11 @@ void rose_add_loopback_neigh(void)
+ 	sn->ax25      = NULL;
+ 	sn->dev       = NULL;
+ 	sn->count     = 0;
+-	sn->use       = 0;
+ 	sn->dce_mode  = 1;
+ 	sn->loopback  = 1;
+ 	sn->number    = rose_neigh_no++;
+ 	sn->restarted = 1;
++	refcount_set(&sn->use, 1);
+ 
+ 	skb_queue_head_init(&sn->queue);
+ 
+@@ -436,6 +433,7 @@ int rose_add_loopback_node(const rose_address *address)
+ 	rose_node_list  = rose_node;
+ 
+ 	rose_loopback_neigh->count++;
++	rose_neigh_hold(rose_loopback_neigh);
+ 
+ out:
+ 	spin_unlock_bh(&rose_node_list_lock);
+@@ -467,6 +465,7 @@ void rose_del_loopback_node(const rose_address *address)
+ 	rose_remove_node(rose_node);
+ 
+ 	rose_loopback_neigh->count--;
++	rose_neigh_put(rose_loopback_neigh);
+ 
+ out:
+ 	spin_unlock_bh(&rose_node_list_lock);
+@@ -506,6 +505,7 @@ void rose_rt_device_down(struct net_device *dev)
+ 				memmove(&t->neighbour[i], &t->neighbour[i + 1],
+ 					sizeof(t->neighbour[0]) *
+ 						(t->count - i));
++				rose_neigh_put(s);
+ 			}
+ 
+ 			if (t->count <= 0)
+@@ -513,6 +513,7 @@ void rose_rt_device_down(struct net_device *dev)
+ 		}
+ 
+ 		rose_remove_neigh(s);
++		rose_neigh_put(s);
+ 	}
+ 	spin_unlock_bh(&rose_neigh_list_lock);
+ 	spin_unlock_bh(&rose_node_list_lock);
+@@ -548,6 +549,7 @@ static int rose_clear_routes(void)
+ {
+ 	struct rose_neigh *s, *rose_neigh;
+ 	struct rose_node  *t, *rose_node;
++	int i;
+ 
+ 	spin_lock_bh(&rose_node_list_lock);
+ 	spin_lock_bh(&rose_neigh_list_lock);
+@@ -558,17 +560,21 @@ static int rose_clear_routes(void)
+ 	while (rose_node != NULL) {
+ 		t         = rose_node;
+ 		rose_node = rose_node->next;
+-		if (!t->loopback)
++
++		if (!t->loopback) {
++			for (i = 0; i < t->count; i++)
++				rose_neigh_put(t->neighbour[i]);
+ 			rose_remove_node(t);
++		}
+ 	}
+ 
+ 	while (rose_neigh != NULL) {
+ 		s          = rose_neigh;
+ 		rose_neigh = rose_neigh->next;
+ 
+-		if (s->use == 0 && !s->loopback) {
+-			s->count = 0;
++		if (!s->loopback) {
+ 			rose_remove_neigh(s);
++			rose_neigh_put(s);
+ 		}
+ 	}
+ 
+@@ -684,6 +690,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
+ 			for (i = 0; i < node->count; i++) {
+ 				if (node->neighbour[i]->restarted) {
+ 					res = node->neighbour[i];
++					rose_neigh_hold(node->neighbour[i]);
+ 					goto out;
+ 				}
+ 			}
+@@ -695,6 +702,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
+ 				for (i = 0; i < node->count; i++) {
+ 					if (!rose_ftimer_running(node->neighbour[i])) {
+ 						res = node->neighbour[i];
++						rose_neigh_hold(node->neighbour[i]);
+ 						goto out;
+ 					}
+ 					failed = 1;
+@@ -784,13 +792,13 @@ static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
+ 		}
+ 
+ 		if (rose_route->neigh1 == rose_neigh) {
+-			rose_route->neigh1->use--;
++			rose_neigh_put(rose_route->neigh1);
+ 			rose_route->neigh1 = NULL;
+ 			rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0);
+ 		}
+ 
+ 		if (rose_route->neigh2 == rose_neigh) {
+-			rose_route->neigh2->use--;
++			rose_neigh_put(rose_route->neigh2);
+ 			rose_route->neigh2 = NULL;
+ 			rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0);
+ 		}
+@@ -919,7 +927,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ 			rose_clear_queues(sk);
+ 			rose->cause	 = ROSE_NETWORK_CONGESTION;
+ 			rose->diagnostic = 0;
+-			rose->neighbour->use--;
++			rose_neigh_put(rose->neighbour);
+ 			rose->neighbour	 = NULL;
+ 			rose->lci	 = 0;
+ 			rose->state	 = ROSE_STATE_0;
+@@ -1044,12 +1052,12 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ 
+ 	if ((new_lci = rose_new_lci(new_neigh)) == 0) {
+ 		rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71);
+-		goto out;
++		goto put_neigh;
+ 	}
+ 
+ 	if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) {
+ 		rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120);
+-		goto out;
++		goto put_neigh;
+ 	}
+ 
+ 	rose_route->lci1      = lci;
+@@ -1062,8 +1070,8 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ 	rose_route->lci2      = new_lci;
+ 	rose_route->neigh2    = new_neigh;
+ 
+-	rose_route->neigh1->use++;
+-	rose_route->neigh2->use++;
++	rose_neigh_hold(rose_route->neigh1);
++	rose_neigh_hold(rose_route->neigh2);
+ 
+ 	rose_route->next = rose_route_list;
+ 	rose_route_list  = rose_route;
+@@ -1075,6 +1083,8 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ 	rose_transmit_link(skb, rose_route->neigh2);
+ 	res = 1;
+ 
++put_neigh:
++	rose_neigh_put(new_neigh);
+ out:
+ 	spin_unlock_bh(&rose_route_list_lock);
+ 	spin_unlock_bh(&rose_neigh_list_lock);
+@@ -1190,7 +1200,7 @@ static int rose_neigh_show(struct seq_file *seq, void *v)
+ 			   (rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
+ 			   rose_neigh->dev ? rose_neigh->dev->name : "???",
+ 			   rose_neigh->count,
+-			   rose_neigh->use,
++			   refcount_read(&rose_neigh->use) - rose_neigh->count - 1,
+ 			   (rose_neigh->dce_mode) ? "DCE" : "DTE",
+ 			   (rose_neigh->restarted) ? "yes" : "no",
+ 			   ax25_display_timer(&rose_neigh->t0timer) / HZ,
+@@ -1295,18 +1305,22 @@ void __exit rose_rt_free(void)
+ 	struct rose_neigh *s, *rose_neigh = rose_neigh_list;
+ 	struct rose_node  *t, *rose_node  = rose_node_list;
+ 	struct rose_route *u, *rose_route = rose_route_list;
++	int i;
+ 
+ 	while (rose_neigh != NULL) {
+ 		s          = rose_neigh;
+ 		rose_neigh = rose_neigh->next;
+ 
+ 		rose_remove_neigh(s);
++		rose_neigh_put(s);
+ 	}
+ 
+ 	while (rose_node != NULL) {
+ 		t         = rose_node;
+ 		rose_node = rose_node->next;
+ 
++		for (i = 0; i < t->count; i++)
++			rose_neigh_put(t->neighbour[i]);
+ 		rose_remove_node(t);
+ 	}
+ 
+diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
+index 1525773e94aa17..c52d7d20c5199b 100644
+--- a/net/rose/rose_timer.c
++++ b/net/rose/rose_timer.c
+@@ -180,7 +180,7 @@ static void rose_timer_expiry(struct timer_list *t)
+ 		break;
+ 
+ 	case ROSE_STATE_2:	/* T3 */
+-		rose->neighbour->use--;
++		rose_neigh_put(rose->neighbour);
+ 		rose_disconnect(sk, ETIMEDOUT, -1, -1);
+ 		break;
+ 
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 38e2fbdcbeac4b..9f835e674c599a 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -546,7 +546,9 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
+ {
+ 	addr->v6.sin6_family = AF_INET6;
+ 	addr->v6.sin6_port = 0;
++	addr->v6.sin6_flowinfo = 0;
+ 	addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
++	addr->v6.sin6_scope_id = 0;
+ }
+ 
+ /* Initialize sk->sk_rcv_saddr from sctp_addr. */
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index a134584acf909e..74e69572796b58 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -2230,7 +2230,7 @@ static int tx_macro_register_mclk_output(struct tx_macro *tx)
+ }
+ 
+ static const struct snd_soc_component_driver tx_macro_component_drv = {
+-	.name = "RX-MACRO",
++	.name = "TX-MACRO",
+ 	.probe = tx_macro_component_probe,
+ 	.controls = tx_macro_snd_controls,
+ 	.num_controls = ARRAY_SIZE(tx_macro_snd_controls),
+diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
+index 36c1d3090689fc..f114f75ebeb989 100644
+--- a/tools/perf/util/symbol-minimal.c
++++ b/tools/perf/util/symbol-minimal.c
+@@ -4,7 +4,6 @@
+ 
+ #include <errno.h>
+ #include <unistd.h>
+-#include <stdio.h>
+ #include <fcntl.h>
+ #include <string.h>
+ #include <stdlib.h>
+@@ -88,11 +87,8 @@ int filename__read_debuglink(const char *filename __maybe_unused,
+  */
+ int filename__read_build_id(const char *filename, struct build_id *bid)
+ {
+-	FILE *fp;
+-	int ret = -1;
++	int fd, ret = -1;
+ 	bool need_swap = false, elf32;
+-	u8 e_ident[EI_NIDENT];
+-	int i;
+ 	union {
+ 		struct {
+ 			Elf32_Ehdr ehdr32;
+@@ -103,28 +99,27 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
+ 			Elf64_Phdr *phdr64;
+ 		};
+ 	} hdrs;
+-	void *phdr;
+-	size_t phdr_size;
+-	void *buf = NULL;
+-	size_t buf_size = 0;
++	void *phdr, *buf = NULL;
++	ssize_t phdr_size, ehdr_size, buf_size = 0;
+ 
+-	fp = fopen(filename, "r");
+-	if (fp == NULL)
++	fd = open(filename, O_RDONLY);
++	if (fd < 0)
+ 		return -1;
+ 
+-	if (fread(e_ident, sizeof(e_ident), 1, fp) != 1)
++	if (read(fd, hdrs.ehdr32.e_ident, EI_NIDENT) != EI_NIDENT)
+ 		goto out;
+ 
+-	if (memcmp(e_ident, ELFMAG, SELFMAG) ||
+-	    e_ident[EI_VERSION] != EV_CURRENT)
++	if (memcmp(hdrs.ehdr32.e_ident, ELFMAG, SELFMAG) ||
++	    hdrs.ehdr32.e_ident[EI_VERSION] != EV_CURRENT)
+ 		goto out;
+ 
+-	need_swap = check_need_swap(e_ident[EI_DATA]);
+-	elf32 = e_ident[EI_CLASS] == ELFCLASS32;
++	need_swap = check_need_swap(hdrs.ehdr32.e_ident[EI_DATA]);
++	elf32 = hdrs.ehdr32.e_ident[EI_CLASS] == ELFCLASS32;
++	ehdr_size = (elf32 ? sizeof(hdrs.ehdr32) : sizeof(hdrs.ehdr64)) - EI_NIDENT;
+ 
+-	if (fread(elf32 ? (void *)&hdrs.ehdr32 : (void *)&hdrs.ehdr64,
+-		  elf32 ? sizeof(hdrs.ehdr32) : sizeof(hdrs.ehdr64),
+-		  1, fp) != 1)
++	if (read(fd,
++		 (elf32 ? (void *)&hdrs.ehdr32 : (void *)&hdrs.ehdr64) + EI_NIDENT,
++		 ehdr_size) != ehdr_size)
+ 		goto out;
+ 
+ 	if (need_swap) {
+@@ -138,14 +133,18 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
+ 			hdrs.ehdr64.e_phnum = bswap_16(hdrs.ehdr64.e_phnum);
+ 		}
+ 	}
+-	phdr_size = elf32 ? hdrs.ehdr32.e_phentsize * hdrs.ehdr32.e_phnum
+-			  : hdrs.ehdr64.e_phentsize * hdrs.ehdr64.e_phnum;
++	if ((elf32 && hdrs.ehdr32.e_phentsize != sizeof(Elf32_Phdr)) ||
++	    (!elf32 && hdrs.ehdr64.e_phentsize != sizeof(Elf64_Phdr)))
++		goto out;
++
++	phdr_size = elf32 ? sizeof(Elf32_Phdr) * hdrs.ehdr32.e_phnum
++			  : sizeof(Elf64_Phdr) * hdrs.ehdr64.e_phnum;
+ 	phdr = malloc(phdr_size);
+ 	if (phdr == NULL)
+ 		goto out;
+ 
+-	fseek(fp, elf32 ? hdrs.ehdr32.e_phoff : hdrs.ehdr64.e_phoff, SEEK_SET);
+-	if (fread(phdr, phdr_size, 1, fp) != 1)
++	lseek(fd, elf32 ? hdrs.ehdr32.e_phoff : hdrs.ehdr64.e_phoff, SEEK_SET);
++	if (read(fd, phdr, phdr_size) != phdr_size)
+ 		goto out_free;
+ 
+ 	if (elf32)
+@@ -153,8 +152,8 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
+ 	else
+ 		hdrs.phdr64 = phdr;
+ 
+-	for (i = 0; i < elf32 ? hdrs.ehdr32.e_phnum : hdrs.ehdr64.e_phnum; i++) {
+-		size_t p_filesz;
++	for (int i = 0; i < (elf32 ? hdrs.ehdr32.e_phnum : hdrs.ehdr64.e_phnum); i++) {
++		ssize_t p_filesz;
+ 
+ 		if (need_swap) {
+ 			if (elf32) {
+@@ -180,8 +179,8 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
+ 				goto out_free;
+ 			buf = tmp;
+ 		}
+-		fseek(fp, elf32 ? hdrs.phdr32[i].p_offset : hdrs.phdr64[i].p_offset, SEEK_SET);
+-		if (fread(buf, p_filesz, 1, fp) != 1)
++		lseek(fd, elf32 ? hdrs.phdr32[i].p_offset : hdrs.phdr64[i].p_offset, SEEK_SET);
++		if (read(fd, buf, p_filesz) != p_filesz)
+ 			goto out_free;
+ 
+ 		ret = read_build_id(buf, p_filesz, bid, need_swap);
+@@ -194,7 +193,7 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
+ 	free(buf);
+ 	free(phdr);
+ out:
+-	fclose(fp);
++	close(fd);
+ 	return ret;
+ }
+ 
+diff --git a/tools/tracing/latency/Makefile.config b/tools/tracing/latency/Makefile.config
+index 0fe6b50f029bf7..6efa13e3ca93fd 100644
+--- a/tools/tracing/latency/Makefile.config
++++ b/tools/tracing/latency/Makefile.config
+@@ -1,7 +1,15 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ 
++include $(srctree)/tools/scripts/utilities.mak
++
+ STOP_ERROR :=
+ 
++ifndef ($(NO_LIBTRACEEVENT),1)
++  ifeq ($(call get-executable,$(PKG_CONFIG)),)
++    $(error Error: $(PKG_CONFIG) needed by libtraceevent/libtracefs is missing on this system, please install it)
++  endif
++endif
++
+ define lib_setup
+   $(eval LIB_INCLUDES += $(shell sh -c "$(PKG_CONFIG) --cflags lib$(1)"))
+   $(eval LDFLAGS += $(shell sh -c "$(PKG_CONFIG) --libs-only-L lib$(1)"))
+diff --git a/tools/tracing/rtla/Makefile.config b/tools/tracing/rtla/Makefile.config
+index 5f8c286712d4c1..a35d6ee55ffcd7 100644
+--- a/tools/tracing/rtla/Makefile.config
++++ b/tools/tracing/rtla/Makefile.config
+@@ -1,10 +1,18 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ 
++include $(srctree)/tools/scripts/utilities.mak
++
+ STOP_ERROR :=
+ 
+ LIBTRACEEVENT_MIN_VERSION = 1.5
+ LIBTRACEFS_MIN_VERSION = 1.6
+ 
++ifndef ($(NO_LIBTRACEEVENT),1)
++  ifeq ($(call get-executable,$(PKG_CONFIG)),)
++    $(error Error: $(PKG_CONFIG) needed by libtraceevent/libtracefs is missing on this system, please install it)
++  endif
++endif
++
+ define lib_setup
+   $(eval LIB_INCLUDES += $(shell sh -c "$(PKG_CONFIG) --cflags lib$(1)"))
+   $(eval LDFLAGS += $(shell sh -c "$(PKG_CONFIG) --libs-only-L lib$(1)"))


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-08-28 15:24 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-08-28 15:24 UTC (permalink / raw
  To: gentoo-commits

commit:     6292f81d5963df90bc016cb5929136d75a64e547
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 28 15:24:27 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Aug 28 15:24:27 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6292f81d

Linux patch 6.12.44

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |     4 +
 1043_linux-6.12.44.patch | 14333 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 14337 insertions(+)

diff --git a/0000_README b/0000_README
index 860bf8f3..ab6b7b1d 100644
--- a/0000_README
+++ b/0000_README
@@ -215,6 +215,10 @@ Patch:  1042_linux-6.12.43.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.43
 
+Patch:  1043_linux-6.12.44.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.44
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1043_linux-6.12.44.patch b/1043_linux-6.12.44.patch
new file mode 100644
index 00000000..1df17513
--- /dev/null
+++ b/1043_linux-6.12.44.patch
@@ -0,0 +1,14333 @@
+diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml
+index 4ebea60b8c5ba5..8c52fa0ea5f8ee 100644
+--- a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml
++++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml
+@@ -25,7 +25,7 @@ properties:
+     maxItems: 1
+ 
+   clocks:
+-    minItems: 2
++    maxItems: 2
+ 
+   clock-names:
+     items:
+diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml
+index bc5594d1864301..300bf2252c3e8e 100644
+--- a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml
++++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml
+@@ -20,7 +20,7 @@ properties:
+     maxItems: 2
+ 
+   clocks:
+-    minItems: 1
++    maxItems: 1
+ 
+   clock-names:
+     items:
+diff --git a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
+index 32fd535a514ad1..20f341d25ebc3f 100644
+--- a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
++++ b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
+@@ -33,6 +33,10 @@ properties:
+ 
+   vcc-supply: true
+ 
++  mediatek,ufs-disable-mcq:
++    $ref: /schemas/types.yaml#/definitions/flag
++    description: The mask to disable MCQ (Multi-Circular Queue) for UFS host.
++
+ required:
+   - compatible
+   - clocks
+diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
+index 95598c21fc8e87..09be0e68b9afa2 100644
+--- a/Documentation/networking/mptcp-sysctl.rst
++++ b/Documentation/networking/mptcp-sysctl.rst
+@@ -12,6 +12,8 @@ add_addr_timeout - INTEGER (seconds)
+ 	resent to an MPTCP peer that has not acknowledged a previous
+ 	ADD_ADDR message.
+ 
++	Do not retransmit if set to 0.
++
+ 	The default value matches TCP_RTO_MAX. This is a per-namespace
+ 	sysctl.
+ 
+diff --git a/Makefile b/Makefile
+index 3dc8acf73bfaf5..208a50953301b2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 43
++SUBLEVEL = 44
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -1069,7 +1069,7 @@ KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD
+ KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
+ 
+ # userspace programs are linked via the compiler, use the correct linker
+-ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy)
++ifdef CONFIG_CC_IS_CLANG
+ KBUILD_USERLDFLAGS += --ld-path=$(LD)
+ endif
+ 
+diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+index 7caa2f3ef134af..a509a59def428c 100644
+--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
++++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+@@ -1360,6 +1360,7 @@ ufs_0: ufs@14700000 {
+ 				 <&cmu_hsi2 CLK_GOUT_HSI2_SYSREG_HSI2_PCLK>;
+ 			clock-names = "core_clk", "sclk_unipro_main", "fmp",
+ 				      "aclk", "pclk", "sysreg";
++			dma-coherent;
+ 			freq-table-hz = <0 0>, <0 0>, <0 0>, <0 0>, <0 0>, <0 0>;
+ 			pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+ 			pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts b/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
+index 8e9fc00a6b3c74..4609f366006e4c 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62-lp-sk.dts
+@@ -69,20 +69,39 @@ vddshv_sdio: regulator-4 {
+ 		gpios = <&main_gpio0 31 GPIO_ACTIVE_HIGH>;
+ 		states = <1800000 0x0>,
+ 			 <3300000 0x1>;
++		bootph-all;
+ 	};
+ };
+ 
+ &main_pmx0 {
++	main_mmc0_pins_default: main-mmc0-default-pins {
++		bootph-all;
++		pinctrl-single,pins = <
++			AM62X_IOPAD(0x220, PIN_INPUT, 0) /* (V3) MMC0_CMD */
++			AM62X_IOPAD(0x218, PIN_INPUT, 0) /* (Y1) MMC0_CLK */
++			AM62X_IOPAD(0x214, PIN_INPUT, 0) /* (V2) MMC0_DAT0 */
++			AM62X_IOPAD(0x210, PIN_INPUT, 0) /* (V1) MMC0_DAT1 */
++			AM62X_IOPAD(0x20c, PIN_INPUT, 0) /* (W2) MMC0_DAT2 */
++			AM62X_IOPAD(0x208, PIN_INPUT, 0) /* (W1) MMC0_DAT3 */
++			AM62X_IOPAD(0x204, PIN_INPUT, 0) /* (Y2) MMC0_DAT4 */
++			AM62X_IOPAD(0x200, PIN_INPUT, 0) /* (W3) MMC0_DAT5 */
++			AM62X_IOPAD(0x1fc, PIN_INPUT, 0) /* (W4) MMC0_DAT6 */
++			AM62X_IOPAD(0x1f8, PIN_INPUT, 0) /* (V4) MMC0_DAT7 */
++		>;
++	};
++
+ 	vddshv_sdio_pins_default: vddshv-sdio-default-pins {
+ 		pinctrl-single,pins = <
+ 			AM62X_IOPAD(0x07c, PIN_OUTPUT, 7) /* (M19) GPMC0_CLK.GPIO0_31 */
+ 		>;
++		bootph-all;
+ 	};
+ 
+ 	main_gpio1_ioexp_intr_pins_default: main-gpio1-ioexp-intr-default-pins {
+ 		pinctrl-single,pins = <
+ 			AM62X_IOPAD(0x01d4, PIN_INPUT, 7) /* (C13) UART0_RTSn.GPIO1_23 */
+ 		>;
++		bootph-all;
+ 	};
+ 
+ 	pmic_irq_pins_default: pmic-irq-default-pins {
+@@ -118,6 +137,7 @@ exp1: gpio@22 {
+ 
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&main_gpio1_ioexp_intr_pins_default>;
++		bootph-all;
+ 	};
+ 
+ 	exp2: gpio@23 {
+@@ -140,6 +160,14 @@ exp2: gpio@23 {
+ 	};
+ };
+ 
++&sdhci0 {
++	bootph-all;
++	non-removable;
++	pinctrl-names = "default";
++	pinctrl-0 = <&main_mmc0_pins_default>;
++	status = "okay";
++};
++
+ &sdhci1 {
+ 	vmmc-supply = <&vdd_mmc1>;
+ 	vqmmc-supply = <&vddshv_sdio>;
+@@ -229,6 +257,14 @@ &tlv320aic3106 {
+ 	DVDD-supply = <&buck2_reg>;
+ };
+ 
++&main_gpio0 {
++	bootph-all;
++};
++
++&main_gpio1 {
++	bootph-all;
++};
++
+ &gpmc0 {
+ 	ranges = <0 0 0x00 0x51000000 0x01000000>; /* CS0 space. Min partition = 16MB */
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index 3f3a31eced9707..a74c8b523542cb 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -553,7 +553,6 @@ sdhci0: mmc@fa10000 {
+ 		clocks = <&k3_clks 57 5>, <&k3_clks 57 6>;
+ 		clock-names = "clk_ahb", "clk_xin";
+ 		bus-width = <8>;
+-		mmc-ddr-1_8v;
+ 		mmc-hs200-1_8v;
+ 		ti,clkbuf-sel = <0x7>;
+ 		ti,otap-del-sel-legacy = <0x0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
+index 43488cc8bcb1e1..ec87d18568fa13 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
+@@ -317,7 +317,6 @@ serial_flash: flash@0 {
+ &sdhci0 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&main_mmc0_pins_default>;
+-	disable-wp;
+ 	non-removable;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+index f0eac05f7483ea..86e7f98d430ecb 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+@@ -507,16 +507,16 @@ AM62X_IOPAD(0x01ec, PIN_INPUT_PULLUP, 0) /* (A17) I2C1_SDA */ /* SODIMM 12 */
+ 	/* Verdin I2C_2_DSI */
+ 	pinctrl_i2c2: main-i2c2-default-pins {
+ 		pinctrl-single,pins = <
+-			AM62X_IOPAD(0x00b0, PIN_INPUT, 1) /* (K22) GPMC0_CSn2.I2C2_SCL */ /* SODIMM 55 */
+-			AM62X_IOPAD(0x00b4, PIN_INPUT, 1) /* (K24) GPMC0_CSn3.I2C2_SDA */ /* SODIMM 53 */
++			AM62X_IOPAD(0x00b0, PIN_INPUT_PULLUP, 1) /* (K22) GPMC0_CSn2.I2C2_SCL */ /* SODIMM 55 */
++			AM62X_IOPAD(0x00b4, PIN_INPUT_PULLUP, 1) /* (K24) GPMC0_CSn3.I2C2_SDA */ /* SODIMM 53 */
+ 		>;
+ 	};
+ 
+ 	/* Verdin I2C_4_CSI */
+ 	pinctrl_i2c3: main-i2c3-default-pins {
+ 		pinctrl-single,pins = <
+-			AM62X_IOPAD(0x01d0, PIN_INPUT, 2) /* (A15) UART0_CTSn.I2C3_SCL */ /* SODIMM 95 */
+-			AM62X_IOPAD(0x01d4, PIN_INPUT, 2) /* (B15) UART0_RTSn.I2C3_SDA */ /* SODIMM 93 */
++			AM62X_IOPAD(0x01d0, PIN_INPUT_PULLUP, 2) /* (A15) UART0_CTSn.I2C3_SCL */ /* SODIMM 95 */
++			AM62X_IOPAD(0x01d4, PIN_INPUT_PULLUP, 2) /* (B15) UART0_RTSn.I2C3_SDA */ /* SODIMM 93 */
+ 		>;
+ 	};
+ 
+@@ -786,8 +786,8 @@ AM62X_MCU_IOPAD(0x0010, PIN_INPUT, 7) /* (C9) MCU_SPI0_D1.MCU_GPIO0_4 */ /* SODI
+ 	/* Verdin I2C_3_HDMI */
+ 	pinctrl_mcu_i2c0: mcu-i2c0-default-pins {
+ 		pinctrl-single,pins = <
+-			AM62X_MCU_IOPAD(0x0044, PIN_INPUT, 0) /*  (A8) MCU_I2C0_SCL */ /* SODIMM 59 */
+-			AM62X_MCU_IOPAD(0x0048, PIN_INPUT, 0) /* (D10) MCU_I2C0_SDA */ /* SODIMM 57 */
++			AM62X_MCU_IOPAD(0x0044, PIN_INPUT_PULLUP, 0) /*  (A8) MCU_I2C0_SCL */ /* SODIMM 59 */
++			AM62X_MCU_IOPAD(0x0048, PIN_INPUT_PULLUP, 0) /* (D10) MCU_I2C0_SDA */ /* SODIMM 57 */
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+index a1cd47d7f5e304..f6ef1549801be3 100644
+--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
++++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+@@ -818,9 +818,9 @@ &main_spi2 {
+ 
+ &sdhci0 {
+ 	bootph-all;
++	non-removable;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&emmc_pins_default>;
+-	disable-wp;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-am625-sk.dts b/arch/arm64/boot/dts/ti/k3-am625-sk.dts
+index ae81ebb39d02d6..0fa11d3aa71e96 100644
+--- a/arch/arm64/boot/dts/ti/k3-am625-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am625-sk.dts
+@@ -106,6 +106,22 @@ vcc_1v8: regulator-5 {
+ };
+ 
+ &main_pmx0 {
++	main_mmc0_pins_default: main-mmc0-default-pins {
++		bootph-all;
++		pinctrl-single,pins = <
++			AM62X_IOPAD(0x220, PIN_INPUT, 0) /* (Y3) MMC0_CMD */
++			AM62X_IOPAD(0x218, PIN_INPUT, 0) /* (AB1) MMC0_CLK */
++			AM62X_IOPAD(0x214, PIN_INPUT, 0) /* (AA2) MMC0_DAT0 */
++			AM62X_IOPAD(0x210, PIN_INPUT_PULLUP, 0) /* (AA1) MMC0_DAT1 */
++			AM62X_IOPAD(0x20c, PIN_INPUT_PULLUP, 0) /* (AA3) MMC0_DAT2 */
++			AM62X_IOPAD(0x208, PIN_INPUT_PULLUP, 0) /* (Y4) MMC0_DAT3 */
++			AM62X_IOPAD(0x204, PIN_INPUT_PULLUP, 0) /* (AB2) MMC0_DAT4 */
++			AM62X_IOPAD(0x200, PIN_INPUT_PULLUP, 0) /* (AC1) MMC0_DAT5 */
++			AM62X_IOPAD(0x1fc, PIN_INPUT_PULLUP, 0) /* (AD2) MMC0_DAT6 */
++			AM62X_IOPAD(0x1f8, PIN_INPUT_PULLUP, 0) /* (AC2) MMC0_DAT7 */
++		>;
++	};
++
+ 	main_rgmii2_pins_default: main-rgmii2-default-pins {
+ 		bootph-all;
+ 		pinctrl-single,pins = <
+@@ -195,6 +211,14 @@ exp1: gpio@22 {
+ 	};
+ };
+ 
++&sdhci0 {
++	bootph-all;
++	non-removable;
++	pinctrl-names = "default";
++	pinctrl-0 = <&main_mmc0_pins_default>;
++	status = "okay";
++};
++
+ &sdhci1 {
+ 	vmmc-supply = <&vdd_mmc1>;
+ 	vqmmc-supply = <&vdd_sd_dv>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
+index a5aceaa3967051..960a409d6fea73 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
+@@ -324,7 +324,6 @@ serial_flash: flash@0 {
+ &sdhci0 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&main_mmc0_pins_default>;
+-	disable-wp;
+ 	non-removable;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+index 67faf46d7a35a5..274a92d747d698 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+@@ -259,8 +259,8 @@ AM62AX_IOPAD(0x1cc, PIN_OUTPUT, 0) /* (D15) UART0_TXD */
+ 
+ 	main_uart1_pins_default: main-uart1-default-pins {
+ 		pinctrl-single,pins = <
+-			AM62AX_IOPAD(0x01e8, PIN_INPUT, 1) /* (C17) I2C1_SCL.UART1_RXD */
+-			AM62AX_IOPAD(0x01ec, PIN_OUTPUT, 1) /* (E17) I2C1_SDA.UART1_TXD */
++			AM62AX_IOPAD(0x01ac, PIN_INPUT, 2) /* (B21) MCASP0_AFSR.UART1_RXD */
++			AM62AX_IOPAD(0x01b0, PIN_OUTPUT, 2) /* (A21) MCASP0_ACLKR.UART1_TXD */
+ 			AM62AX_IOPAD(0x0194, PIN_INPUT, 2) /* (C19) MCASP0_AXR3.UART1_CTSn */
+ 			AM62AX_IOPAD(0x0198, PIN_OUTPUT, 2) /* (B19) MCASP0_AXR2.UART1_RTSn */
+ 		>;
+@@ -301,6 +301,7 @@ AM62AX_IOPAD(0x200, PIN_INPUT_PULLUP, 0) /* (AC1) MMC0_DAT5 */
+ 			AM62AX_IOPAD(0x1fc, PIN_INPUT_PULLUP, 0) /* (AD2) MMC0_DAT6 */
+ 			AM62AX_IOPAD(0x1f8, PIN_INPUT_PULLUP, 0) /* (AC2) MMC0_DAT7 */
+ 		>;
++		bootph-all;
+ 	};
+ 
+ 	main_mmc1_pins_default: main-mmc1-default-pins {
+@@ -602,7 +603,7 @@ &sdhci0 {
+ 	non-removable;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&main_mmc0_pins_default>;
+-	disable-wp;
++	bootph-all;
+ };
+ 
+ &sdhci1 {
+diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+index 3efa12bb725462..b94093a7a392a4 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+@@ -444,8 +444,8 @@ &main_i2c2 {
+ 
+ &sdhci0 {
+ 	status = "okay";
++	non-removable;
+ 	ti,driver-strength-ohm = <50>;
+-	disable-wp;
+ 	bootph-all;
+ };
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+index 44ff67b6bf1e48..4f2d45fd36766e 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+@@ -182,22 +182,6 @@ AM62X_IOPAD(0x0b4, PIN_INPUT_PULLUP, 1) /* (K24/H19) GPMC0_CSn3.I2C2_SDA */
+ 		>;
+ 	};
+ 
+-	main_mmc0_pins_default: main-mmc0-default-pins {
+-		bootph-all;
+-		pinctrl-single,pins = <
+-			AM62X_IOPAD(0x220, PIN_INPUT, 0) /* (Y3/V3) MMC0_CMD */
+-			AM62X_IOPAD(0x218, PIN_INPUT, 0) /* (AB1/Y1) MMC0_CLK */
+-			AM62X_IOPAD(0x214, PIN_INPUT, 0) /* (AA2/V2) MMC0_DAT0 */
+-			AM62X_IOPAD(0x210, PIN_INPUT, 0) /* (AA1/V1) MMC0_DAT1 */
+-			AM62X_IOPAD(0x20c, PIN_INPUT, 0) /* (AA3/W2) MMC0_DAT2 */
+-			AM62X_IOPAD(0x208, PIN_INPUT, 0) /* (Y4/W1) MMC0_DAT3 */
+-			AM62X_IOPAD(0x204, PIN_INPUT, 0) /* (AB2/Y2) MMC0_DAT4 */
+-			AM62X_IOPAD(0x200, PIN_INPUT, 0) /* (AC1/W3) MMC0_DAT5 */
+-			AM62X_IOPAD(0x1fc, PIN_INPUT, 0) /* (AD2/W4) MMC0_DAT6 */
+-			AM62X_IOPAD(0x1f8, PIN_INPUT, 0) /* (AC2/V4) MMC0_DAT7 */
+-		>;
+-	};
+-
+ 	main_mmc1_pins_default: main-mmc1-default-pins {
+ 		bootph-all;
+ 		pinctrl-single,pins = <
+@@ -413,14 +397,6 @@ &main_i2c2 {
+ 	clock-frequency = <400000>;
+ };
+ 
+-&sdhci0 {
+-	bootph-all;
+-	status = "okay";
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&main_mmc0_pins_default>;
+-	disable-wp;
+-};
+-
+ &sdhci1 {
+ 	/* SD/MMC */
+ 	bootph-all;
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+index 97ca16f00cd260..95c20e39342cc8 100644
+--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
++++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+@@ -584,7 +584,6 @@ &sdhci0 {
+ 	status = "okay";
+ 	non-removable;
+ 	ti,driver-strength-ohm = <50>;
+-	disable-wp;
+ 	bootph-all;
+ };
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+index aa7139cc8a92b4..c30425960398eb 100644
+--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+@@ -456,7 +456,6 @@ &sdhci0 {
+ 	bus-width = <8>;
+ 	non-removable;
+ 	ti,driver-strength-ohm = <50>;
+-	disable-wp;
+ };
+ 
+ /*
+diff --git a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi
+index ae842b85b70de0..12af6cb7f65cfb 100644
+--- a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-common.dtsi
+@@ -50,5 +50,4 @@ &sdhci0 {
+ 	bus-width = <8>;
+ 	non-removable;
+ 	ti,driver-strength-ohm = <50>;
+-	disable-wp;
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+index 1e36965a14032c..3238dd17016a87 100644
+--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+@@ -926,7 +926,6 @@ &main_sdhci0 {
+ 	status = "okay";
+ 	non-removable;
+ 	ti,driver-strength-ohm = <50>;
+-	disable-wp;
+ };
+ 
+ &main_sdhci1 {
+diff --git a/arch/arm64/boot/dts/ti/k3-pinctrl.h b/arch/arm64/boot/dts/ti/k3-pinctrl.h
+index 22b8d73cfd3264..04bbedb56b5838 100644
+--- a/arch/arm64/boot/dts/ti/k3-pinctrl.h
++++ b/arch/arm64/boot/dts/ti/k3-pinctrl.h
+@@ -8,11 +8,16 @@
+ #ifndef DTS_ARM64_TI_K3_PINCTRL_H
+ #define DTS_ARM64_TI_K3_PINCTRL_H
+ 
++#define ST_EN_SHIFT		(14)
+ #define PULLUDEN_SHIFT		(16)
+ #define PULLTYPESEL_SHIFT	(17)
+ #define RXACTIVE_SHIFT		(18)
+ #define DEBOUNCE_SHIFT		(11)
+ 
++/* Schmitt trigger configuration */
++#define ST_DISABLE		(0 << ST_EN_SHIFT)
++#define ST_ENABLE		(1 << ST_EN_SHIFT)
++
+ #define PULL_DISABLE		(1 << PULLUDEN_SHIFT)
+ #define PULL_ENABLE		(0 << PULLUDEN_SHIFT)
+ 
+@@ -26,9 +31,13 @@
+ #define PIN_OUTPUT		(INPUT_DISABLE | PULL_DISABLE)
+ #define PIN_OUTPUT_PULLUP	(INPUT_DISABLE | PULL_UP)
+ #define PIN_OUTPUT_PULLDOWN	(INPUT_DISABLE | PULL_DOWN)
+-#define PIN_INPUT		(INPUT_EN | PULL_DISABLE)
+-#define PIN_INPUT_PULLUP	(INPUT_EN | PULL_UP)
+-#define PIN_INPUT_PULLDOWN	(INPUT_EN | PULL_DOWN)
++#define PIN_INPUT		(INPUT_EN | ST_ENABLE | PULL_DISABLE)
++#define PIN_INPUT_PULLUP	(INPUT_EN | ST_ENABLE | PULL_UP)
++#define PIN_INPUT_PULLDOWN	(INPUT_EN | ST_ENABLE | PULL_DOWN)
++/* Input configurations with Schmitt Trigger disabled */
++#define PIN_INPUT_NOST		(INPUT_EN | PULL_DISABLE)
++#define PIN_INPUT_PULLUP_NOST	(INPUT_EN | PULL_UP)
++#define PIN_INPUT_PULLDOWN_NOST	(INPUT_EN | PULL_DOWN)
+ 
+ #define PIN_DEBOUNCE_DISABLE	(0 << DEBOUNCE_SHIFT)
+ #define PIN_DEBOUNCE_CONF1	(1 << DEBOUNCE_SHIFT)
+diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
+index e2f30ff9afde82..a43ba7f9f9872a 100644
+--- a/arch/loongarch/kernel/module-sections.c
++++ b/arch/loongarch/kernel/module-sections.c
+@@ -8,6 +8,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleloader.h>
+ #include <linux/ftrace.h>
++#include <linux/sort.h>
+ 
+ Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
+ {
+@@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v
+ 	return (Elf_Addr)&plt[nr];
+ }
+ 
+-static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+-{
+-	return x->r_info == y->r_info && x->r_addend == y->r_addend;
+-}
++#define cmp_3way(a, b)  ((a) < (b) ? -1 : (a) > (b))
+ 
+-static bool duplicate_rela(const Elf_Rela *rela, int idx)
++static int compare_rela(const void *x, const void *y)
+ {
+-	int i;
++	int ret;
++	const Elf_Rela *rela_x = x, *rela_y = y;
+ 
+-	for (i = 0; i < idx; i++) {
+-		if (is_rela_equal(&rela[i], &rela[idx]))
+-			return true;
+-	}
++	ret = cmp_3way(rela_x->r_info, rela_y->r_info);
++	if (ret == 0)
++		ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);
+ 
+-	return false;
++	return ret;
+ }
+ 
+ static void count_max_entries(Elf_Rela *relas, int num,
+ 			      unsigned int *plts, unsigned int *gots)
+ {
+-	unsigned int i, type;
++	unsigned int i;
++
++	sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);
+ 
+ 	for (i = 0; i < num; i++) {
+-		type = ELF_R_TYPE(relas[i].r_info);
+-		switch (type) {
++		if (i && !compare_rela(&relas[i-1], &relas[i]))
++			continue;
++
++		switch (ELF_R_TYPE(relas[i].r_info)) {
+ 		case R_LARCH_SOP_PUSH_PLT_PCREL:
+ 		case R_LARCH_B26:
+-			if (!duplicate_rela(relas, i))
+-				(*plts)++;
++			(*plts)++;
+ 			break;
+ 		case R_LARCH_GOT_PC_HI20:
+-			if (!duplicate_rela(relas, i))
+-				(*gots)++;
++			(*gots)++;
+ 			break;
+ 		default:
+ 			break; /* Do nothing. */
+diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
+index 4b0ae29b8acab8..b5439a10b7652b 100644
+--- a/arch/loongarch/kvm/vcpu.c
++++ b/arch/loongarch/kvm/vcpu.c
+@@ -1249,9 +1249,11 @@ int kvm_own_lbt(struct kvm_vcpu *vcpu)
+ 		return -EINVAL;
+ 
+ 	preempt_disable();
+-	set_csr_euen(CSR_EUEN_LBTEN);
+-	_restore_lbt(&vcpu->arch.lbt);
+-	vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
++	if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
++		set_csr_euen(CSR_EUEN_LBTEN);
++		_restore_lbt(&vcpu->arch.lbt);
++		vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
++	}
+ 	preempt_enable();
+ 
+ 	return 0;
+diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
+index ba22bc2f3d6d86..d96685489aac98 100644
+--- a/arch/m68k/kernel/head.S
++++ b/arch/m68k/kernel/head.S
+@@ -3400,6 +3400,7 @@ L(console_clear_loop):
+ 
+ 	movel	%d4,%d1				/* screen height in pixels */
+ 	divul	%a0@(FONT_DESC_HEIGHT),%d1	/* d1 = max num rows */
++	subql	#1,%d1				/* row range is 0 to num - 1 */
+ 
+ 	movel	%d0,%a2@(Lconsole_struct_num_columns)
+ 	movel	%d1,%a2@(Lconsole_struct_num_rows)
+@@ -3546,15 +3547,14 @@ func_start	console_putc,%a0/%a1/%d0-%d7
+ 	cmpib	#10,%d7
+ 	jne	L(console_not_lf)
+ 	movel	%a0@(Lconsole_struct_cur_row),%d0
+-	addil	#1,%d0
+-	movel	%d0,%a0@(Lconsole_struct_cur_row)
+ 	movel	%a0@(Lconsole_struct_num_rows),%d1
+ 	cmpl	%d1,%d0
+ 	jcs	1f
+-	subil	#1,%d0
+-	movel	%d0,%a0@(Lconsole_struct_cur_row)
+ 	console_scroll
++	jra	L(console_exit)
+ 1:
++	addql	#1,%d0
++	movel	%d0,%a0@(Lconsole_struct_cur_row)
+ 	jra	L(console_exit)
+ 
+ L(console_not_lf):
+@@ -3581,12 +3581,6 @@ L(console_not_cr):
+  */
+ L(console_not_home):
+ 	movel	%a0@(Lconsole_struct_cur_column),%d0
+-	addql	#1,%a0@(Lconsole_struct_cur_column)
+-	movel	%a0@(Lconsole_struct_num_columns),%d1
+-	cmpl	%d1,%d0
+-	jcs	1f
+-	console_putc	#'\n'	/* recursion is OK! */
+-1:
+ 	movel	%a0@(Lconsole_struct_cur_row),%d1
+ 
+ 	/*
+@@ -3633,6 +3627,23 @@ L(console_do_font_scanline):
+ 	addq	#1,%d1
+ 	dbra	%d7,L(console_read_char_scanline)
+ 
++	/*
++	 *	Register usage in the code below:
++	 *	a0 = pointer to console globals
++	 *	d0 = cursor column
++	 *	d1 = cursor column limit
++	 */
++
++	lea	%pc@(L(console_globals)),%a0
++
++	movel	%a0@(Lconsole_struct_cur_column),%d0
++	addql	#1,%d0
++	movel	%d0,%a0@(Lconsole_struct_cur_column)	/* Update cursor pos */
++	movel	%a0@(Lconsole_struct_num_columns),%d1
++	cmpl	%d1,%d0
++	jcs	L(console_exit)
++	console_putc	#'\n'		/* Line wrap using tail recursion */
++
+ L(console_exit):
+ func_return	console_putc
+ 
+diff --git a/arch/mips/crypto/chacha-core.S b/arch/mips/crypto/chacha-core.S
+index 5755f69cfe0074..706aeb850fb0d6 100644
+--- a/arch/mips/crypto/chacha-core.S
++++ b/arch/mips/crypto/chacha-core.S
+@@ -55,17 +55,13 @@
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define MSB 0
+ #define LSB 3
+-#define ROTx rotl
+-#define ROTR(n) rotr n, 24
+ #define	CPU_TO_LE32(n) \
+-	wsbh	n; \
++	wsbh	n, n; \
+ 	rotr	n, 16;
+ #else
+ #define MSB 3
+ #define LSB 0
+-#define ROTx rotr
+ #define CPU_TO_LE32(n)
+-#define ROTR(n)
+ #endif
+ 
+ #define FOR_EACH_WORD(x) \
+@@ -192,10 +188,10 @@ CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
+ 	xor	X(W), X(B); \
+ 	xor	X(Y), X(C); \
+ 	xor	X(Z), X(D); \
+-	rotl	X(V), S;    \
+-	rotl	X(W), S;    \
+-	rotl	X(Y), S;    \
+-	rotl	X(Z), S;
++	rotr	X(V), 32 - S; \
++	rotr	X(W), 32 - S; \
++	rotr	X(Y), 32 - S; \
++	rotr	X(Z), 32 - S;
+ 
+ .text
+ .set	reorder
+@@ -372,21 +368,19 @@ chacha_crypt_arch:
+ 	/* First byte */
+ 	lbu	T1, 0(IN)
+ 	addiu	$at, BYTES, 1
+-	CPU_TO_LE32(SAVED_X)
+-	ROTR(SAVED_X)
+ 	xor	T1, SAVED_X
+ 	sb	T1, 0(OUT)
+ 	beqz	$at, .Lchacha_mips_xor_done
+ 	/* Second byte */
+ 	lbu	T1, 1(IN)
+ 	addiu	$at, BYTES, 2
+-	ROTx	SAVED_X, 8
++	rotr	SAVED_X, 8
+ 	xor	T1, SAVED_X
+ 	sb	T1, 1(OUT)
+ 	beqz	$at, .Lchacha_mips_xor_done
+ 	/* Third byte */
+ 	lbu	T1, 2(IN)
+-	ROTx	SAVED_X, 8
++	rotr	SAVED_X, 8
+ 	xor	T1, SAVED_X
+ 	sb	T1, 2(OUT)
+ 	b	.Lchacha_mips_xor_done
+diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
+index 9cd9aa3d16f29a..48ae3c79557a51 100644
+--- a/arch/parisc/Makefile
++++ b/arch/parisc/Makefile
+@@ -39,7 +39,9 @@ endif
+ 
+ export LD_BFD
+ 
+-# Set default 32 bits cross compilers for vdso
++# Set default 32 bits cross compilers for vdso.
++# This means that for 64BIT, both the 64-bit tools and the 32-bit tools
++# need to be in the path.
+ CC_ARCHES_32 = hppa hppa2.0 hppa1.1
+ CC_SUFFIXES  = linux linux-gnu unknown-linux-gnu suse-linux
+ CROSS32_COMPILE := $(call cc-cross-prefix, \
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index babf65751e8180..3446a5e2520b22 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -276,7 +276,7 @@ extern unsigned long *empty_zero_page;
+ #define pte_none(x)     (pte_val(x) == 0)
+ #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
+ #define pte_user(x)	(pte_val(x) & _PAGE_USER)
+-#define pte_clear(mm, addr, xp)  set_pte(xp, __pte(0))
++#define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0))
+ 
+ #define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK)
+ #define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+@@ -398,6 +398,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ 	}
+ }
+ #define set_ptes set_ptes
++#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
+ 
+ /* Used for deferring calls to flush_dcache_page() */
+ 
+@@ -462,7 +463,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
+ 	if (!pte_young(pte)) {
+ 		return 0;
+ 	}
+-	set_pte(ptep, pte_mkold(pte));
++	set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ 	return 1;
+ }
+ 
+@@ -472,7 +473,7 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *pt
+ struct mm_struct;
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+-	set_pte(ptep, pte_wrprotect(*ptep));
++	set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
+ }
+ 
+ #define pte_same(A,B)	(pte_val(A) == pte_val(B))
+diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
+index 51f40eaf778065..1013eeba31e5bb 100644
+--- a/arch/parisc/include/asm/special_insns.h
++++ b/arch/parisc/include/asm/special_insns.h
+@@ -32,6 +32,34 @@
+ 	pa;						\
+ })
+ 
++/**
++ * prober_user() - Probe user read access
++ * @sr:		Space regster.
++ * @va:		Virtual address.
++ *
++ * Return: Non-zero if address is accessible.
++ *
++ * Due to the way _PAGE_READ is handled in TLB entries, we need
++ * a special check to determine whether a user address is accessible.
++ * The ldb instruction does the initial access check. If it is
++ * successful, the probe instruction checks user access rights.
++ */
++#define prober_user(sr, va)	({			\
++	unsigned long read_allowed;			\
++	__asm__ __volatile__(				\
++		"copy %%r0,%0\n"			\
++		"8:\tldb 0(%%sr%1,%2),%%r0\n"		\
++		"\tproberi (%%sr%1,%2),%3,%0\n"		\
++		"9:\n"					\
++		ASM_EXCEPTIONTABLE_ENTRY(8b, 9b,	\
++				"or %%r0,%%r0,%%r0")	\
++		: "=&r" (read_allowed)			\
++		: "i" (sr), "r" (va), "i" (PRIV_USER)	\
++		: "memory"				\
++	);						\
++	read_allowed;					\
++})
++
+ #define CR_EIEM 15	/* External Interrupt Enable Mask */
+ #define CR_CR16 16	/* CR16 Interval Timer */
+ #define CR_EIRR 23	/* External Interrupt Request Register */
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 88d0ae5769dde5..6c531d2c847eb1 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -42,9 +42,24 @@
+ 	__gu_err;					\
+ })
+ 
+-#define __get_user(val, ptr)				\
+-({							\
+-	__get_user_internal(SR_USER, val, ptr);	\
++#define __probe_user_internal(sr, error, ptr)			\
++({								\
++	__asm__("\tproberi (%%sr%1,%2),%3,%0\n"			\
++		"\tcmpiclr,= 1,%0,%0\n"				\
++		"\tldi %4,%0\n"					\
++		: "=r"(error)					\
++		: "i"(sr), "r"(ptr), "i"(PRIV_USER),		\
++		  "i"(-EFAULT));				\
++})
++
++#define __get_user(val, ptr)					\
++({								\
++	register long __gu_err;					\
++								\
++	__gu_err = __get_user_internal(SR_USER, val, ptr);	\
++	if (likely(!__gu_err))					\
++		__probe_user_internal(SR_USER, __gu_err, ptr);	\
++	__gu_err;						\
+ })
+ 
+ #define __get_user_asm(sr, val, ldx, ptr)		\
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index db531e58d70ef0..37ca484cc49511 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -429,7 +429,7 @@ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
+ 	return ptep;
+ }
+ 
+-static inline bool pte_needs_flush(pte_t pte)
++static inline bool pte_needs_cache_flush(pte_t pte)
+ {
+ 	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
+ 		== (_PAGE_PRESENT | _PAGE_ACCESSED);
+@@ -630,7 +630,7 @@ static void flush_cache_page_if_present(struct vm_area_struct *vma,
+ 	ptep = get_ptep(vma->vm_mm, vmaddr);
+ 	if (ptep) {
+ 		pte = ptep_get(ptep);
+-		needs_flush = pte_needs_flush(pte);
++		needs_flush = pte_needs_cache_flush(pte);
+ 		pte_unmap(ptep);
+ 	}
+ 	if (needs_flush)
+@@ -841,7 +841,7 @@ void flush_cache_vmap(unsigned long start, unsigned long end)
+ 	}
+ 
+ 	vm = find_vm_area((void *)start);
+-	if (WARN_ON_ONCE(!vm)) {
++	if (!vm) {
+ 		flush_cache_all();
+ 		return;
+ 	}
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index ea57bcc21dc5fe..f4bf61a34701e5 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -499,6 +499,12 @@
+ 	 * this happens is quite subtle, read below */
+ 	.macro		make_insert_tlb	spc,pte,prot,tmp
+ 	space_to_prot   \spc \prot        /* create prot id from space */
++
++#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
++	/* need to drop DMB bit, as it's used as SPECIAL flag */
++	depi		0,_PAGE_SPECIAL_BIT,1,\pte
++#endif
++
+ 	/* The following is the real subtlety.  This is depositing
+ 	 * T <-> _PAGE_REFTRAP
+ 	 * D <-> _PAGE_DIRTY
+@@ -511,17 +517,18 @@
+ 	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
+ 	 * trigger an access rights trap in user space if the user
+ 	 * tries to read an unreadable page */
+-#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
+-	/* need to drop DMB bit, as it's used as SPECIAL flag */
+-	depi		0,_PAGE_SPECIAL_BIT,1,\pte
+-#endif
+ 	depd            \pte,8,7,\prot
+ 
+ 	/* PAGE_USER indicates the page can be read with user privileges,
+ 	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
+-	 * contains _PAGE_READ) */
++	 * contains _PAGE_READ). While the kernel can't directly write
++	 * user pages which have _PAGE_WRITE zero, it can read pages
++	 * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel
++	 * exception fault handler doesn't trigger when reading pages
++	 * that aren't user read accessible */
+ 	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
+ 	depdi		7,11,3,\prot
++
+ 	/* If we're a gateway page, drop PL2 back to zero for promotion
+ 	 * to kernel privilege (so we can execute the page as kernel).
+ 	 * Any privilege promotion page always denys read and write */
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 0fa81bf1466b15..f58c4bccfbce0e 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -613,6 +613,9 @@ lws_compare_and_swap32:
+ lws_compare_and_swap:
+ 	/* Trigger memory reference interruptions without writing to memory */
+ 1:	ldw	0(%r26), %r28
++	proberi	(%r26), PRIV_USER, %r28
++	comb,=,n	%r28, %r0, lws_fault /* backwards, likely not taken */
++	nop
+ 2:	stbys,e	%r0, 0(%r26)
+ 
+ 	/* Calculate 8-bit hash index from virtual address */
+@@ -767,6 +770,9 @@ cas2_lock_start:
+ 	copy	%r26, %r28
+ 	depi_safe	0, 31, 2, %r28
+ 10:	ldw	0(%r28), %r1
++	proberi	(%r28), PRIV_USER, %r1
++	comb,=,n	%r1, %r0, lws_fault /* backwards, likely not taken */
++	nop
+ 11:	stbys,e	%r0, 0(%r28)
+ 
+ 	/* Calculate 8-bit hash index from virtual address */
+@@ -951,41 +957,47 @@ atomic_xchg_begin:
+ 
+ 	/* 8-bit exchange */
+ 1:	ldb	0(%r24), %r20
++	proberi	(%r24), PRIV_USER, %r20
++	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
++	nop
+ 	copy	%r23, %r20
+ 	depi_safe	0, 31, 2, %r20
+ 	b	atomic_xchg_start
+ 2:	stbys,e	%r0, 0(%r20)
+-	nop
+-	nop
+-	nop
+ 
+ 	/* 16-bit exchange */
+ 3:	ldh	0(%r24), %r20
++	proberi	(%r24), PRIV_USER, %r20
++	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
++	nop
+ 	copy	%r23, %r20
+ 	depi_safe	0, 31, 2, %r20
+ 	b	atomic_xchg_start
+ 4:	stbys,e	%r0, 0(%r20)
+-	nop
+-	nop
+-	nop
+ 
+ 	/* 32-bit exchange */
+ 5:	ldw	0(%r24), %r20
++	proberi	(%r24), PRIV_USER, %r20
++	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
++	nop
+ 	b	atomic_xchg_start
+ 6:	stbys,e	%r0, 0(%r23)
+ 	nop
+ 	nop
+-	nop
+-	nop
+-	nop
+ 
+ 	/* 64-bit exchange */
+ #ifdef CONFIG_64BIT
+ 7:	ldd	0(%r24), %r20
++	proberi	(%r24), PRIV_USER, %r20
++	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
++	nop
+ 8:	stdby,e	%r0, 0(%r23)
+ #else
+ 7:	ldw	0(%r24), %r20
+ 8:	ldw	4(%r24), %r20
++	proberi	(%r24), PRIV_USER, %r20
++	comb,=,n	%r20, %r0, lws_fault /* backwards, likely not taken */
++	nop
+ 	copy	%r23, %r20
+ 	depi_safe	0, 31, 2, %r20
+ 9:	stbys,e	%r0, 0(%r20)
+diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
+index 5fc0c852c84c8d..69d65ffab31263 100644
+--- a/arch/parisc/lib/memcpy.c
++++ b/arch/parisc/lib/memcpy.c
+@@ -12,6 +12,7 @@
+ #include <linux/module.h>
+ #include <linux/compiler.h>
+ #include <linux/uaccess.h>
++#include <linux/mm.h>
+ 
+ #define get_user_space()	mfsp(SR_USER)
+ #define get_kernel_space()	SR_KERNEL
+@@ -32,9 +33,25 @@ EXPORT_SYMBOL(raw_copy_to_user);
+ unsigned long raw_copy_from_user(void *dst, const void __user *src,
+ 			       unsigned long len)
+ {
++	unsigned long start = (unsigned long) src;
++	unsigned long end = start + len;
++	unsigned long newlen = len;
++
+ 	mtsp(get_user_space(), SR_TEMP1);
+ 	mtsp(get_kernel_space(), SR_TEMP2);
+-	return pa_memcpy(dst, (void __force *)src, len);
++
++	/* Check region is user accessible */
++	if (start)
++	while (start < end) {
++		if (!prober_user(SR_TEMP1, start)) {
++			newlen = (start - (unsigned long) src);
++			break;
++		}
++		start += PAGE_SIZE;
++		/* align to page boundry which may have different permission */
++		start = PAGE_ALIGN_DOWN(start);
++	}
++	return len - newlen + pa_memcpy(dst, (void __force *)src, newlen);
+ }
+ EXPORT_SYMBOL(raw_copy_from_user);
+ 
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index c39de84e98b051..f1785640b049b5 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -363,6 +363,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
+ 	mmap_read_unlock(mm);
+ 
+ bad_area_nosemaphore:
++	if (!user_mode(regs) && fixup_exception(regs)) {
++		return;
++	}
++
+ 	if (user_mode(regs)) {
+ 		int signo, si_code;
+ 
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index fa8518067d38ee..60a495771c0506 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -70,6 +70,7 @@ BOOTCPPFLAGS	:= -nostdinc $(LINUXINCLUDE)
+ BOOTCPPFLAGS	+= -isystem $(shell $(BOOTCC) -print-file-name=include)
+ 
+ BOOTCFLAGS	:= $(BOOTTARGETFLAGS) \
++		   -std=gnu11 \
+ 		   -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ 		   -fno-strict-aliasing -O2 \
+ 		   -msoft-float -mno-altivec -mno-vsx \
+diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
+index 3fa28db2fe59f4..14aee852402176 100644
+--- a/arch/s390/boot/vmem.c
++++ b/arch/s390/boot/vmem.c
+@@ -471,6 +471,9 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
+ 			 lowcore_address + sizeof(struct lowcore),
+ 			 POPULATE_LOWCORE);
+ 	for_each_physmem_usable_range(i, &start, &end) {
++		/* Do not map lowcore with identity mapping */
++		if (!start)
++			start = sizeof(struct lowcore);
+ 		pgtable_populate((unsigned long)__identity_va(start),
+ 				 (unsigned long)__identity_va(end),
+ 				 POPULATE_IDENTITY);
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index 5d9effb0867cde..41a0d2066fa002 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -6,6 +6,7 @@
+  * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+  */
+ 
++#include <linux/security.h>
+ #include <linux/slab.h>
+ #include "hypfs.h"
+ 
+@@ -66,23 +67,27 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	long rc;
+ 
+ 	mutex_lock(&df->lock);
+-	if (df->unlocked_ioctl)
+-		rc = df->unlocked_ioctl(file, cmd, arg);
+-	else
+-		rc = -ENOTTY;
++	rc = df->unlocked_ioctl(file, cmd, arg);
+ 	mutex_unlock(&df->lock);
+ 	return rc;
+ }
+ 
+-static const struct file_operations dbfs_ops = {
++static const struct file_operations dbfs_ops_ioctl = {
+ 	.read		= dbfs_read,
+ 	.unlocked_ioctl = dbfs_ioctl,
+ };
+ 
++static const struct file_operations dbfs_ops = {
++	.read		= dbfs_read,
++};
++
+ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+-	df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+-					 &dbfs_ops);
++	const struct file_operations *fops = &dbfs_ops;
++
++	if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
++		fops = &dbfs_ops_ioctl;
++	df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ 	mutex_init(&df->lock);
+ }
+ 
+diff --git a/arch/x86/coco/sev/shared.c b/arch/x86/coco/sev/shared.c
+index f5936da235c713..16b799f37d6cb4 100644
+--- a/arch/x86/coco/sev/shared.c
++++ b/arch/x86/coco/sev/shared.c
+@@ -1285,6 +1285,7 @@ static void svsm_pval_4k_page(unsigned long paddr, bool validate)
+ 	pc->entry[0].page_size = RMP_PG_SIZE_4K;
+ 	pc->entry[0].action    = validate;
+ 	pc->entry[0].ignore_cf = 0;
++	pc->entry[0].rsvd      = 0;
+ 	pc->entry[0].pfn       = paddr >> PAGE_SHIFT;
+ 
+ 	/* Protocol 0, Call ID 1 */
+@@ -1373,6 +1374,7 @@ static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
+ 		pe->page_size = RMP_PG_SIZE_4K;
+ 		pe->action    = action;
+ 		pe->ignore_cf = 0;
++		pe->rsvd      = 0;
+ 		pe->pfn       = pfn;
+ 
+ 		pe++;
+@@ -1403,6 +1405,7 @@ static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int d
+ 		pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+ 		pe->action    = e->operation == SNP_PAGE_STATE_PRIVATE;
+ 		pe->ignore_cf = 0;
++		pe->rsvd      = 0;
+ 		pe->pfn       = e->gfn;
+ 
+ 		pe++;
+diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
+index 97771b9d33af30..2759524b8ffc3a 100644
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -94,12 +94,13 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
+ #ifdef MODULE
+ #define __ADDRESSABLE_xen_hypercall
+ #else
+-#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
++#define __ADDRESSABLE_xen_hypercall \
++	__stringify(.global STATIC_CALL_KEY(xen_hypercall);)
+ #endif
+ 
+ #define __HYPERCALL					\
+ 	__ADDRESSABLE_xen_hypercall			\
+-	"call __SCT__xen_hypercall"
++	__stringify(call STATIC_CALL_TRAMP(xen_hypercall))
+ 
+ #define __HYPERCALL_ENTRY(x)	"a" (x)
+ 
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index c5191b06f9f21b..d2157f1d276992 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -15,6 +15,7 @@
+ #include <asm/cacheinfo.h>
+ #include <asm/spec-ctrl.h>
+ #include <asm/delay.h>
++#include <asm/resctrl.h>
+ 
+ #include "cpu.h"
+ 
+@@ -116,6 +117,8 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c)
+ 			x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
+ 		}
+ 	}
++
++	resctrl_cpu_detect(c);
+ }
+ 
+ static void early_init_hygon(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 8edfb4e4a73d0e..700926eb77dfa0 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -7578,7 +7578,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
+ 	return true;
+ }
+ 
+-static void kvm_mmu_start_lpage_recovery(struct once *once)
++static int kvm_mmu_start_lpage_recovery(struct once *once)
+ {
+ 	struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
+ 	struct kvm *kvm = container_of(ka, struct kvm, arch);
+@@ -7590,12 +7590,13 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
+ 				      kvm, "kvm-nx-lpage-recovery");
+ 
+ 	if (IS_ERR(nx_thread))
+-		return;
++		return PTR_ERR(nx_thread);
+ 
+ 	vhost_task_start(nx_thread);
+ 
+ 	/* Make the task visible only once it is fully started. */
+ 	WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
++	return 0;
+ }
+ 
+ int kvm_mmu_post_init_vm(struct kvm *kvm)
+@@ -7603,10 +7604,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
+ 	if (nx_hugepage_mitigation_hard_disabled)
+ 		return 0;
+ 
+-	call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
+-	if (!kvm->arch.nx_huge_page_recovery_thread)
+-		return -ENOMEM;
+-	return 0;
++	return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
+ }
+ 
+ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
+diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
+index a38b88baadf2ba..5722e4128d3cee 100644
+--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
++++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
+@@ -10437,7 +10437,7 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
+ 				(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
+ 	WREG32(sob_addr, 0);
+ 
+-	kfree(lin_dma_pkts_arr);
++	kvfree(lin_dma_pkts_arr);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
+index 8b2910995fc1a9..35c7b04bc9d302 100644
+--- a/drivers/acpi/pfr_update.c
++++ b/drivers/acpi/pfr_update.c
+@@ -310,7 +310,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
+ 	if (type == PFRU_CODE_INJECT_TYPE)
+ 		return payload_hdr->rt_ver >= cap->code_rt_version;
+ 
+-	return payload_hdr->rt_ver >= cap->drv_rt_version;
++	return payload_hdr->svn_ver >= cap->drv_svn;
+ }
+ 
+ static void print_update_debug_info(struct pfru_updated_result *result,
+diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
+index e00536b495529b..120a2b7067fc7b 100644
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -117,23 +117,39 @@ config SATA_AHCI
+ 
+ config SATA_MOBILE_LPM_POLICY
+ 	int "Default SATA Link Power Management policy"
+-	range 0 4
++	range 0 5
+ 	default 3
+ 	depends on SATA_AHCI
+ 	help
+ 	  Select the Default SATA Link Power Management (LPM) policy to use
+ 	  for chipsets / "South Bridges" supporting low-power modes. Such
+ 	  chipsets are ubiquitous across laptops, desktops and servers.
+-
+-	  The value set has the following meanings:
++	  Each policy combines power saving states and features:
++	   - Partial: The Phy logic is powered but is in a reduced power
++                      state. The exit latency from this state is no longer than
++                      10us).
++	   - Slumber: The Phy logic is powered but is in an even lower power
++                      state. The exit latency from this state is potentially
++		      longer, but no longer than 10ms.
++	   - DevSleep: The Phy logic may be powered down. The exit latency from
++	               this state is no longer than 20 ms, unless otherwise
++		       specified by DETO in the device Identify Device Data log.
++	   - HIPM: Host Initiated Power Management (host automatically
++		   transitions to partial and slumber).
++	   - DIPM: Device Initiated Power Management (device automatically
++		   transitions to partial and slumber).
++
++	  The possible values for the default SATA link power management
++	  policies are:
+ 		0 => Keep firmware settings
+-		1 => Maximum performance
+-		2 => Medium power
+-		3 => Medium power with Device Initiated PM enabled
+-		4 => Minimum power
+-
+-	  Note "Minimum power" is known to cause issues, including disk
+-	  corruption, with some disks and should not be used.
++		1 => No power savings (maximum performance)
++		2 => HIPM (Partial)
++		3 => HIPM (Partial) and DIPM (Partial and Slumber)
++		4 => HIPM (Partial and DevSleep) and DIPM (Partial and Slumber)
++		5 => HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber)
++
++	  Excluding the value 0, higher values represent policies with higher
++	  power savings.
+ 
+ config SATA_AHCI_PLATFORM
+ 	tristate "Platform AHCI SATA support"
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 1660f46dc08b59..50f5d697297acd 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -855,18 +855,14 @@ static void ata_to_sense_error(u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
+ 		{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+ 	};
+ 	static const unsigned char stat_table[][4] = {
+-		/* Must be first because BUSY means no other bits valid */
+-		{0x80,		ABORTED_COMMAND, 0x47, 0x00},
+-		// Busy, fake parity for now
+-		{0x40,		ILLEGAL_REQUEST, 0x21, 0x04},
+-		// Device ready, unaligned write command
+-		{0x20,		HARDWARE_ERROR,  0x44, 0x00},
+-		// Device fault, internal target failure
+-		{0x08,		ABORTED_COMMAND, 0x47, 0x00},
+-		// Timed out in xfer, fake parity for now
+-		{0x04,		RECOVERED_ERROR, 0x11, 0x00},
+-		// Recovered ECC error	  Medium error, recovered
+-		{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
++		/* Busy: must be first because BUSY means no other bits valid */
++		{ ATA_BUSY,	ABORTED_COMMAND, 0x00, 0x00 },
++		/* Device fault: INTERNAL TARGET FAILURE */
++		{ ATA_DF,	HARDWARE_ERROR,  0x44, 0x00 },
++		/* Corrected data error */
++		{ ATA_CORR,	RECOVERED_ERROR, 0x00, 0x00 },
++
++		{ 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */
+ 	};
+ 
+ 	/*
+@@ -938,6 +934,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+ 	if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ 		ata_dev_dbg(dev,
+ 			    "missing result TF: can't generate ATA PT sense data\n");
++		if (qc->err_mask)
++			ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+ 		return;
+ 	}
+ 
+@@ -995,8 +993,8 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
+ 
+ 	if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ 		ata_dev_dbg(dev,
+-			    "missing result TF: can't generate sense data\n");
+-		return;
++			    "Missing result TF: reporting aborted command\n");
++		goto aborted;
+ 	}
+ 
+ 	/* Use ata_to_sense_error() to map status register bits
+@@ -1007,19 +1005,20 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
+ 		ata_to_sense_error(tf->status, tf->error,
+ 				   &sense_key, &asc, &ascq);
+ 		ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
+-	} else {
+-		/* Could not decode error */
+-		ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
+-			     tf->status, qc->err_mask);
+-		ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+-		return;
+-	}
+ 
+-	block = ata_tf_read_block(&qc->result_tf, dev);
+-	if (block == U64_MAX)
++		block = ata_tf_read_block(&qc->result_tf, dev);
++		if (block != U64_MAX)
++			scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE,
++						   block);
+ 		return;
++	}
+ 
+-	scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
++	/* Could not decode error */
++	ata_dev_warn(dev,
++		"Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n",
++		tf->error, tf->status, qc->err_mask);
++aborted:
++	ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+ }
+ 
+ void ata_scsi_sdev_config(struct scsi_device *sdev)
+@@ -3756,21 +3755,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
+ 	/* Check cdl_ctrl */
+ 	switch (buf[0] & 0x03) {
+ 	case 0:
+-		/* Disable CDL if it is enabled */
+-		if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
+-			return 0;
++		/* Disable CDL */
+ 		ata_dev_dbg(dev, "Disabling CDL\n");
+ 		cdl_action = 0;
+ 		dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
+ 		break;
+ 	case 0x02:
+ 		/*
+-		 * Enable CDL if not already enabled. Since this is mutually
+-		 * exclusive with NCQ priority, allow this only if NCQ priority
+-		 * is disabled.
++		 * Enable CDL. Since CDL is mutually exclusive with NCQ
++		 * priority, allow this only if NCQ priority is disabled.
+ 		 */
+-		if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+-			return 0;
+ 		if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
+ 			ata_dev_err(dev,
+ 				"NCQ priority must be disabled to enable CDL\n");
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 317505eab1266a..c7ec69597a955f 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1183,10 +1183,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
+  *
+  * Return -EINVAL if runtime PM is disabled for @dev.
+  *
+- * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
+- * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
+- * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
+- * without changing the usage counter.
++ * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
++ * is set, or (2) @dev is not ignoring children and its active child count is
++ * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment
++ * the usage counter of @dev and return 1.
++ *
++ * Otherwise, return 0 without changing the usage counter.
+  *
+  * If @ign_usage_count is %true, this function can be used to prevent suspending
+  * the device when its runtime PM status is %RPM_ACTIVE.
+@@ -1208,7 +1210,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
+ 		retval = -EINVAL;
+ 	} else if (dev->power.runtime_status != RPM_ACTIVE) {
+ 		retval = 0;
+-	} else if (ign_usage_count) {
++	} else if (ign_usage_count || (!dev->power.ignore_children &&
++		   atomic_read(&dev->power.child_count) > 0)) {
+ 		retval = 1;
+ 		atomic_inc(&dev->power.usage_count);
+ 	} else {
+@@ -1241,10 +1244,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
+  * @dev: Target device.
+  *
+  * Increment the runtime PM usage counter of @dev if its runtime PM status is
+- * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
+- * it returns 1. If the device is in a different state or its usage_count is 0,
+- * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
+- * in which case also the usage_count will remain unmodified.
++ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
++ * ignoring children and its active child count is nonzero.  1 is returned in
++ * this case.
++ *
++ * If @dev is in a different state or it is not in use (that is, its usage
++ * counter is 0, or it is ignoring children, or its active child count is 0),
++ * 0 is returned.
++ *
++ * -EINVAL is returned if runtime PM is disabled for the device, in which case
++ * also the usage counter of @dev is not updated.
+  */
+ int pm_runtime_get_if_in_use(struct device *dev)
+ {
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index 05de2e6f563de4..07979d47eb76e0 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
+ 	 * WMT command.
+ 	 */
+ 	err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
+-				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+-	if (err == -EINTR) {
+-		bt_dev_err(hdev, "Execution of wmt command interrupted");
+-		clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+-		goto err_free_wc;
+-	}
++				  TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ 
+ 	if (err) {
+ 		bt_dev_err(hdev, "Execution of wmt command timed out");
+diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
+index dedd29ca8db355..05c896b192fd70 100644
+--- a/drivers/bus/mhi/host/boot.c
++++ b/drivers/bus/mhi/host/boot.c
+@@ -31,8 +31,8 @@ int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ 	int ret;
+ 
+ 	for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+-		bhi_vec->dma_addr = mhi_buf->dma_addr;
+-		bhi_vec->size = mhi_buf->len;
++		bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
++		bhi_vec->size = cpu_to_le64(mhi_buf->len);
+ 	}
+ 
+ 	dev_dbg(dev, "BHIe programming for RDDM\n");
+@@ -375,8 +375,8 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+ 	while (remainder) {
+ 		to_cpy = min(remainder, mhi_buf->len);
+ 		memcpy(mhi_buf->buf, buf, to_cpy);
+-		bhi_vec->dma_addr = mhi_buf->dma_addr;
+-		bhi_vec->size = to_cpy;
++		bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
++		bhi_vec->size = cpu_to_le64(to_cpy);
+ 
+ 		buf += to_cpy;
+ 		remainder -= to_cpy;
+diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
+index d057e877932e3a..762df4bb7f6462 100644
+--- a/drivers/bus/mhi/host/internal.h
++++ b/drivers/bus/mhi/host/internal.h
+@@ -25,8 +25,8 @@ struct mhi_ctxt {
+ };
+ 
+ struct bhi_vec_entry {
+-	u64 dma_addr;
+-	u64 size;
++	__le64 dma_addr;
++	__le64 size;
+ };
+ 
+ enum mhi_ch_state_type {
+diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
+index aa8a0ef697c779..45ec1b585577dd 100644
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -602,7 +602,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ 	{
+ 		dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
+ 		struct mhi_ring_element *local_rp, *ev_tre;
+-		void *dev_rp;
++		void *dev_rp, *next_rp;
+ 		struct mhi_buf_info *buf_info;
+ 		u16 xfer_len;
+ 
+@@ -621,6 +621,16 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ 		result.dir = mhi_chan->dir;
+ 
+ 		local_rp = tre_ring->rp;
++
++		next_rp = local_rp + 1;
++		if (next_rp >= tre_ring->base + tre_ring->len)
++			next_rp = tre_ring->base;
++		if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) {
++			dev_err(&mhi_cntrl->mhi_dev->dev,
++				"Event element points to an unexpected TRE\n");
++			break;
++		}
++
+ 		while (local_rp != dev_rp) {
+ 			buf_info = buf_ring->rp;
+ 			/* If it's the last TRE, get length from the event */
+diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
+index 04b578a0be17c2..61f1a290ff0890 100644
+--- a/drivers/cdx/controller/cdx_rpmsg.c
++++ b/drivers/cdx/controller/cdx_rpmsg.c
+@@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
+ 
+ 	chinfo.src = RPMSG_ADDR_ANY;
+ 	chinfo.dst = rpdev->dst;
+-	strscpy(chinfo.name, cdx_rpmsg_id_table[0].name,
+-		strlen(cdx_rpmsg_id_table[0].name));
++	strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
+ 
+ 	cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
+ 	if (!cdx_mcdi->ept) {
+diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
+index bd8a44ea62d2d0..2a65d7fd03750d 100644
+--- a/drivers/comedi/comedi_fops.c
++++ b/drivers/comedi/comedi_fops.c
+@@ -1587,6 +1587,9 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
+ 				memset(&data[n], 0, (MIN_SAMPLES - n) *
+ 						    sizeof(unsigned int));
+ 			}
++		} else {
++			memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) *
++					sizeof(unsigned int));
+ 		}
+ 		ret = parse_insn(dev, insns + i, data, file);
+ 		if (ret < 0)
+@@ -1670,6 +1673,8 @@ static int do_insn_ioctl(struct comedi_device *dev,
+ 			memset(&data[insn->n], 0,
+ 			       (MIN_SAMPLES - insn->n) * sizeof(unsigned int));
+ 		}
++	} else {
++		memset(data, 0, n_data * sizeof(unsigned int));
+ 	}
+ 	ret = parse_insn(dev, insn, data, file);
+ 	if (ret < 0)
+diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
+index f1dc854928c176..c9ebaadc5e82af 100644
+--- a/drivers/comedi/drivers.c
++++ b/drivers/comedi/drivers.c
+@@ -620,11 +620,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
+ 	unsigned int chan = CR_CHAN(insn->chanspec);
+ 	unsigned int base_chan = (chan < 32) ? 0 : chan;
+ 	unsigned int _data[2];
++	unsigned int i;
+ 	int ret;
+ 
+-	if (insn->n == 0)
+-		return 0;
+-
+ 	memset(_data, 0, sizeof(_data));
+ 	memset(&_insn, 0, sizeof(_insn));
+ 	_insn.insn = INSN_BITS;
+@@ -635,18 +633,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
+ 	if (insn->insn == INSN_WRITE) {
+ 		if (!(s->subdev_flags & SDF_WRITABLE))
+ 			return -EINVAL;
+-		_data[0] = 1U << (chan - base_chan);		     /* mask */
+-		_data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */
++		_data[0] = 1U << (chan - base_chan);		/* mask */
+ 	}
++	for (i = 0; i < insn->n; i++) {
++		if (insn->insn == INSN_WRITE)
++			_data[1] = data[i] ? _data[0] : 0;	/* bits */
+ 
+-	ret = s->insn_bits(dev, s, &_insn, _data);
+-	if (ret < 0)
+-		return ret;
++		ret = s->insn_bits(dev, s, &_insn, _data);
++		if (ret < 0)
++			return ret;
+ 
+-	if (insn->insn == INSN_READ)
+-		data[0] = (_data[1] >> (chan - base_chan)) & 1;
++		if (insn->insn == INSN_READ)
++			data[i] = (_data[1] >> (chan - base_chan)) & 1;
++	}
+ 
+-	return 1;
++	return insn->n;
+ }
+ 
+ static int __comedi_device_postconfig_async(struct comedi_device *dev,
+diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c
+index 0430630e6ebb90..b542896fa0e427 100644
+--- a/drivers/comedi/drivers/pcl726.c
++++ b/drivers/comedi/drivers/pcl726.c
+@@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev,
+ 	 * Hook up the external trigger source interrupt only if the
+ 	 * user config option is valid and the board supports interrupts.
+ 	 */
+-	if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
++	if (it->options[1] > 0 && it->options[1] < 16 &&
++	    (board->irq_mask & (1U << it->options[1]))) {
+ 		ret = request_irq(it->options[1], pcl726_interrupt, 0,
+ 				  dev->board_name, dev);
+ 		if (ret == 0) {
+diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
+index ccbc826cc4c01c..e7bb4e9c70e0a0 100644
+--- a/drivers/cpufreq/armada-8k-cpufreq.c
++++ b/drivers/cpufreq/armada-8k-cpufreq.c
+@@ -103,7 +103,7 @@ static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
+ {
+ 	int opps_index, nb_cpus = num_possible_cpus();
+ 
+-	for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
++	for (opps_index = 0 ; opps_index < nb_cpus; opps_index++) {
+ 		int i;
+ 
+ 		/* If cpu_dev is NULL then we reached the end of the array */
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
+index 01322a9054143b..3eb543b1644dce 100644
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -19,7 +19,7 @@
+ 
+ #include "gov.h"
+ 
+-#define BUCKETS 12
++#define BUCKETS 6
+ #define INTERVAL_SHIFT 3
+ #define INTERVALS (1UL << INTERVAL_SHIFT)
+ #define RESOLUTION 1024
+@@ -29,12 +29,11 @@
+ /*
+  * Concepts and ideas behind the menu governor
+  *
+- * For the menu governor, there are 3 decision factors for picking a C
++ * For the menu governor, there are 2 decision factors for picking a C
+  * state:
+  * 1) Energy break even point
+- * 2) Performance impact
+- * 3) Latency tolerance (from pmqos infrastructure)
+- * These three factors are treated independently.
++ * 2) Latency tolerance (from pmqos infrastructure)
++ * These two factors are treated independently.
+  *
+  * Energy break even point
+  * -----------------------
+@@ -75,30 +74,6 @@
+  * intervals and if the stand deviation of these 8 intervals is below a
+  * threshold value, we use the average of these intervals as prediction.
+  *
+- * Limiting Performance Impact
+- * ---------------------------
+- * C states, especially those with large exit latencies, can have a real
+- * noticeable impact on workloads, which is not acceptable for most sysadmins,
+- * and in addition, less performance has a power price of its own.
+- *
+- * As a general rule of thumb, menu assumes that the following heuristic
+- * holds:
+- *     The busier the system, the less impact of C states is acceptable
+- *
+- * This rule-of-thumb is implemented using a performance-multiplier:
+- * If the exit latency times the performance multiplier is longer than
+- * the predicted duration, the C state is not considered a candidate
+- * for selection due to a too high performance impact. So the higher
+- * this multiplier is, the longer we need to be idle to pick a deep C
+- * state, and thus the less likely a busy CPU will hit such a deep
+- * C state.
+- *
+- * Currently there is only one value determining the factor:
+- * 10 points are added for each process that is waiting for IO on this CPU.
+- * (This value was experimentally determined.)
+- * Utilization is no longer a factor as it was shown that it never contributed
+- * significantly to the performance multiplier in the first place.
+- *
+  */
+ 
+ struct menu_device {
+@@ -112,19 +87,10 @@ struct menu_device {
+ 	int		interval_ptr;
+ };
+ 
+-static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
++static inline int which_bucket(u64 duration_ns)
+ {
+ 	int bucket = 0;
+ 
+-	/*
+-	 * We keep two groups of stats; one with no
+-	 * IO pending, one without.
+-	 * This allows us to calculate
+-	 * E(duration)|iowait
+-	 */
+-	if (nr_iowaiters)
+-		bucket = BUCKETS/2;
+-
+ 	if (duration_ns < 10ULL * NSEC_PER_USEC)
+ 		return bucket;
+ 	if (duration_ns < 100ULL * NSEC_PER_USEC)
+@@ -138,19 +104,6 @@ static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
+ 	return bucket + 5;
+ }
+ 
+-/*
+- * Return a multiplier for the exit latency that is intended
+- * to take performance requirements into account.
+- * The more performance critical we estimate the system
+- * to be, the higher this multiplier, and thus the higher
+- * the barrier to go to an expensive C state.
+- */
+-static inline int performance_multiplier(unsigned int nr_iowaiters)
+-{
+-	/* for IO wait tasks (per cpu!) we add 10x each */
+-	return 1 + 10 * nr_iowaiters;
+-}
+-
+ static DEFINE_PER_CPU(struct menu_device, menu_devices);
+ 
+ static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
+@@ -277,8 +230,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 	struct menu_device *data = this_cpu_ptr(&menu_devices);
+ 	s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+ 	u64 predicted_ns;
+-	u64 interactivity_req;
+-	unsigned int nr_iowaiters;
+ 	ktime_t delta, delta_tick;
+ 	int i, idx;
+ 
+@@ -295,8 +246,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 		menu_update_intervals(data, UINT_MAX);
+ 	}
+ 
+-	nr_iowaiters = nr_iowait_cpu(dev->cpu);
+-
+ 	/* Find the shortest expected idle interval. */
+ 	predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
+ 	if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
+@@ -310,7 +259,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 		}
+ 
+ 		data->next_timer_ns = delta;
+-		data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
++		data->bucket = which_bucket(data->next_timer_ns);
+ 
+ 		/* Round up the result for half microseconds. */
+ 		timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
+@@ -328,7 +277,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 		 */
+ 		data->next_timer_ns = KTIME_MAX;
+ 		delta_tick = TICK_NSEC / 2;
+-		data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
++		data->bucket = which_bucket(KTIME_MAX);
+ 	}
+ 
+ 	if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
+@@ -344,27 +293,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 		return 0;
+ 	}
+ 
+-	if (tick_nohz_tick_stopped()) {
+-		/*
+-		 * If the tick is already stopped, the cost of possible short
+-		 * idle duration misprediction is much higher, because the CPU
+-		 * may be stuck in a shallow idle state for a long time as a
+-		 * result of it.  In that case say we might mispredict and use
+-		 * the known time till the closest timer event for the idle
+-		 * state selection.
+-		 */
+-		if (predicted_ns < TICK_NSEC)
+-			predicted_ns = data->next_timer_ns;
+-	} else {
+-		/*
+-		 * Use the performance multiplier and the user-configurable
+-		 * latency_req to determine the maximum exit latency.
+-		 */
+-		interactivity_req = div64_u64(predicted_ns,
+-					      performance_multiplier(nr_iowaiters));
+-		if (latency_req > interactivity_req)
+-			latency_req = interactivity_req;
+-	}
++	/*
++	 * If the tick is already stopped, the cost of possible short idle
++	 * duration misprediction is much higher, because the CPU may be stuck
++	 * in a shallow idle state for a long time as a result of it.  In that
++	 * case, say we might mispredict and use the known time till the closest
++	 * timer event for the idle state selection.
++	 */
++	if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)
++		predicted_ns = data->next_timer_ns;
+ 
+ 	/*
+ 	 * Find the idle state with the lowest power while satisfying
+@@ -380,13 +317,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 		if (idx == -1)
+ 			idx = i; /* first enabled state */
+ 
++		if (s->exit_latency_ns > latency_req)
++			break;
++
+ 		if (s->target_residency_ns > predicted_ns) {
+ 			/*
+ 			 * Use a physical idle state, not busy polling, unless
+ 			 * a timer is going to trigger soon enough.
+ 			 */
+ 			if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+-			    s->exit_latency_ns <= latency_req &&
+ 			    s->target_residency_ns <= data->next_timer_ns) {
+ 				predicted_ns = s->target_residency_ns;
+ 				idx = i;
+@@ -418,8 +357,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 
+ 			return idx;
+ 		}
+-		if (s->exit_latency_ns > latency_req)
+-			break;
+ 
+ 		idx = i;
+ 	}
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
+index d4b39184dbdb95..707760fa1978e7 100644
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -830,7 +830,7 @@ static int caam_ctrl_suspend(struct device *dev)
+ {
+ 	const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ 
+-	if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
++	if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0)
+ 		caam_state_save(dev);
+ 
+ 	return 0;
+@@ -841,7 +841,7 @@ static int caam_ctrl_resume(struct device *dev)
+ 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ 	int ret = 0;
+ 
+-	if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
++	if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0) {
+ 		caam_state_restore(dev);
+ 
+ 		/* HW and rng will be reset so deinstantiation can be removed */
+@@ -907,6 +907,7 @@ static int caam_probe(struct platform_device *pdev)
+ 
+ 		imx_soc_data = imx_soc_match->data;
+ 		reg_access = reg_access && imx_soc_data->page0_access;
++		ctrlpriv->no_page0 = !reg_access;
+ 		/*
+ 		 * CAAM clocks cannot be controlled from kernel.
+ 		 */
+diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
+index e5132015087209..51c90d17a40d23 100644
+--- a/drivers/crypto/caam/intern.h
++++ b/drivers/crypto/caam/intern.h
+@@ -115,6 +115,7 @@ struct caam_drv_private {
+ 	u8 blob_present;	/* Nonzero if BLOB support present in device */
+ 	u8 mc_en;		/* Nonzero if MC f/w is active */
+ 	u8 optee_en;		/* Nonzero if OP-TEE f/w is active */
++	u8 no_page0;		/* Nonzero if register page 0 is not controlled by Linux */
+ 	bool pr_support;        /* RNG prediction resistance available */
+ 	int secvio_irq;		/* Security violation interrupt number */
+ 	int virt_en;		/* Virtualization enabled in CAAM */
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+index f7ecabdf7805db..25c940b06c3631 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+@@ -190,6 +190,7 @@ void adf_exit_misc_wq(void);
+ bool adf_misc_wq_queue_work(struct work_struct *work);
+ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+ 				    unsigned long delay);
++void adf_misc_wq_flush(void);
+ #if defined(CONFIG_PCI_IOV)
+ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+ void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
+index f189cce7d15358..46491048e0bb42 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
+@@ -404,6 +404,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+ 		hw_data->exit_admin_comms(accel_dev);
+ 
+ 	adf_cleanup_etr_data(accel_dev);
++	adf_misc_wq_flush();
+ 	adf_dev_restore(accel_dev);
+ }
+ 
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
+index cae1aee5479aff..12e5656136610c 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
+@@ -407,3 +407,8 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+ {
+ 	return queue_delayed_work(adf_misc_wq, work, delay);
+ }
++
++void adf_misc_wq_flush(void)
++{
++	flush_workqueue(adf_misc_wq);
++}
+diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
+index 3c4bba4a87795e..d69cc1e5e0239e 100644
+--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
+@@ -1277,7 +1277,7 @@ static struct aead_alg qat_aeads[] = { {
+ 	.base = {
+ 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
+ 		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
+-		.cra_priority = 4001,
++		.cra_priority = 100,
+ 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ 		.cra_blocksize = AES_BLOCK_SIZE,
+ 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+@@ -1294,7 +1294,7 @@ static struct aead_alg qat_aeads[] = { {
+ 	.base = {
+ 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
+ 		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
+-		.cra_priority = 4001,
++		.cra_priority = 100,
+ 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ 		.cra_blocksize = AES_BLOCK_SIZE,
+ 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+@@ -1311,7 +1311,7 @@ static struct aead_alg qat_aeads[] = { {
+ 	.base = {
+ 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
+ 		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
+-		.cra_priority = 4001,
++		.cra_priority = 100,
+ 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ 		.cra_blocksize = AES_BLOCK_SIZE,
+ 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+@@ -1329,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { {
+ static struct skcipher_alg qat_skciphers[] = { {
+ 	.base.cra_name = "cbc(aes)",
+ 	.base.cra_driver_name = "qat_aes_cbc",
+-	.base.cra_priority = 4001,
++	.base.cra_priority = 100,
+ 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ 	.base.cra_blocksize = AES_BLOCK_SIZE,
+ 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+@@ -1347,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { {
+ }, {
+ 	.base.cra_name = "ctr(aes)",
+ 	.base.cra_driver_name = "qat_aes_ctr",
+-	.base.cra_priority = 4001,
++	.base.cra_priority = 100,
+ 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ 	.base.cra_blocksize = 1,
+ 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+@@ -1365,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { {
+ }, {
+ 	.base.cra_name = "xts(aes)",
+ 	.base.cra_driver_name = "qat_aes_xts",
+-	.base.cra_priority = 4001,
++	.base.cra_priority = 100,
+ 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+ 			  CRYPTO_ALG_ALLOCATES_MEMORY,
+ 	.base.cra_blocksize = AES_BLOCK_SIZE,
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
+index e27e849b01dfc0..90a031421aacbf 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
++++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
+@@ -34,6 +34,9 @@
+ #define SG_COMP_2    2
+ #define SG_COMP_1    1
+ 
++#define OTX2_CPT_DPTR_RPTR_ALIGN	8
++#define OTX2_CPT_RES_ADDR_ALIGN		32
++
+ union otx2_cpt_opcode {
+ 	u16 flags;
+ 	struct {
+@@ -347,22 +350,48 @@ static inline struct otx2_cpt_inst_info *
+ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ 		       gfp_t gfp)
+ {
+-	u32 dlen = 0, g_len, sg_len, info_len;
+-	int align = OTX2_CPT_DMA_MINALIGN;
++	u32 dlen = 0, g_len, s_len, sg_len, info_len;
+ 	struct otx2_cpt_inst_info *info;
+-	u16 g_sz_bytes, s_sz_bytes;
+ 	u32 total_mem_len;
+ 	int i;
+ 
+-	g_sz_bytes = ((req->in_cnt + 2) / 3) *
+-		      sizeof(struct cn10kb_cpt_sglist_component);
+-	s_sz_bytes = ((req->out_cnt + 2) / 3) *
+-		      sizeof(struct cn10kb_cpt_sglist_component);
++	/* Allocate memory to meet below alignment requirement:
++	 *  ------------------------------------
++	 * |    struct otx2_cpt_inst_info       |
++	 * |    (No alignment required)         |
++	 * |    --------------------------------|
++	 * |   | padding for ARCH_DMA_MINALIGN  |
++	 * |   | alignment                      |
++	 * |------------------------------------|
++	 * |    SG List Gather/Input memory     |
++	 * |    Length = multiple of 32Bytes    |
++	 * |    Alignment = 8Byte               |
++	 * |----------------------------------  |
++	 * |    SG List Scatter/Output memory   |
++	 * |    Length = multiple of 32Bytes    |
++	 * |    Alignment = 8Byte               |
++	 * |     -------------------------------|
++	 * |    | padding for 32B alignment     |
++	 * |------------------------------------|
++	 * |    Result response memory          |
++	 * |    Alignment = 32Byte              |
++	 *  ------------------------------------
++	 */
++
++	info_len = sizeof(*info);
++
++	g_len = ((req->in_cnt + 2) / 3) *
++		 sizeof(struct cn10kb_cpt_sglist_component);
++	s_len = ((req->out_cnt + 2) / 3) *
++		 sizeof(struct cn10kb_cpt_sglist_component);
++	sg_len = g_len + s_len;
+ 
+-	g_len = ALIGN(g_sz_bytes, align);
+-	sg_len = ALIGN(g_len + s_sz_bytes, align);
+-	info_len = ALIGN(sizeof(*info), align);
+-	total_mem_len = sg_len + info_len + sizeof(union otx2_cpt_res_s);
++	/* Allocate extra memory for SG and response address alignment */
++	total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
++	total_mem_len += (ARCH_DMA_MINALIGN - 1) &
++			  ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
++	total_mem_len += ALIGN(sg_len, OTX2_CPT_RES_ADDR_ALIGN);
++	total_mem_len += sizeof(union otx2_cpt_res_s);
+ 
+ 	info = kzalloc(total_mem_len, gfp);
+ 	if (unlikely(!info))
+@@ -372,7 +401,8 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ 		dlen += req->in[i].size;
+ 
+ 	info->dlen = dlen;
+-	info->in_buffer = (u8 *)info + info_len;
++	info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
++	info->out_buffer = info->in_buffer + g_len;
+ 	info->gthr_sz = req->in_cnt;
+ 	info->sctr_sz = req->out_cnt;
+ 
+@@ -384,7 +414,7 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ 	}
+ 
+ 	if (sgv2io_components_setup(pdev, req->out, req->out_cnt,
+-				    &info->in_buffer[g_len])) {
++				    info->out_buffer)) {
+ 		dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ 		goto destroy_info;
+ 	}
+@@ -401,8 +431,10 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ 	 * Get buffer for union otx2_cpt_res_s response
+ 	 * structure and its physical address
+ 	 */
+-	info->completion_addr = info->in_buffer + sg_len;
+-	info->comp_baddr = info->dptr_baddr + sg_len;
++	info->completion_addr = PTR_ALIGN((info->in_buffer + sg_len),
++					  OTX2_CPT_RES_ADDR_ALIGN);
++	info->comp_baddr = ALIGN((info->dptr_baddr + sg_len),
++				 OTX2_CPT_RES_ADDR_ALIGN);
+ 
+ 	return info;
+ 
+@@ -417,10 +449,9 @@ static inline struct otx2_cpt_inst_info *
+ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ 		    gfp_t gfp)
+ {
+-	int align = OTX2_CPT_DMA_MINALIGN;
+ 	struct otx2_cpt_inst_info *info;
+-	u32 dlen, align_dlen, info_len;
+-	u16 g_sz_bytes, s_sz_bytes;
++	u32 dlen, info_len;
++	u16 g_len, s_len;
+ 	u32 total_mem_len;
+ 
+ 	if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
+@@ -429,22 +460,54 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ 		return NULL;
+ 	}
+ 
+-	g_sz_bytes = ((req->in_cnt + 3) / 4) *
+-		      sizeof(struct otx2_cpt_sglist_component);
+-	s_sz_bytes = ((req->out_cnt + 3) / 4) *
+-		      sizeof(struct otx2_cpt_sglist_component);
++	/* Allocate memory to meet below alignment requirement:
++	 *  ------------------------------------
++	 * |    struct otx2_cpt_inst_info       |
++	 * |    (No alignment required)         |
++	 * |    --------------------------------|
++	 * |   | padding for ARCH_DMA_MINALIGN  |
++	 * |   | alignment                      |
++	 * |------------------------------------|
++	 * |    SG List Header of 8 Byte        |
++	 * |------------------------------------|
++	 * |    SG List Gather/Input memory     |
++	 * |    Length = multiple of 32Bytes    |
++	 * |    Alignment = 8Byte               |
++	 * |----------------------------------  |
++	 * |    SG List Scatter/Output memory   |
++	 * |    Length = multiple of 32Bytes    |
++	 * |    Alignment = 8Byte               |
++	 * |     -------------------------------|
++	 * |    | padding for 32B alignment     |
++	 * |------------------------------------|
++	 * |    Result response memory          |
++	 * |    Alignment = 32Byte              |
++	 *  ------------------------------------
++	 */
++
++	info_len = sizeof(*info);
++
++	g_len = ((req->in_cnt + 3) / 4) *
++		 sizeof(struct otx2_cpt_sglist_component);
++	s_len = ((req->out_cnt + 3) / 4) *
++		 sizeof(struct otx2_cpt_sglist_component);
++
++	dlen = g_len + s_len + SG_LIST_HDR_SIZE;
+ 
+-	dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+-	align_dlen = ALIGN(dlen, align);
+-	info_len = ALIGN(sizeof(*info), align);
+-	total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
++	/* Allocate extra memory for SG and response address alignment */
++	total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
++	total_mem_len += (ARCH_DMA_MINALIGN - 1) &
++			  ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
++	total_mem_len += ALIGN(dlen, OTX2_CPT_RES_ADDR_ALIGN);
++	total_mem_len += sizeof(union otx2_cpt_res_s);
+ 
+ 	info = kzalloc(total_mem_len, gfp);
+ 	if (unlikely(!info))
+ 		return NULL;
+ 
+ 	info->dlen = dlen;
+-	info->in_buffer = (u8 *)info + info_len;
++	info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
++	info->out_buffer = info->in_buffer + SG_LIST_HDR_SIZE + g_len;
+ 
+ 	((u16 *)info->in_buffer)[0] = req->out_cnt;
+ 	((u16 *)info->in_buffer)[1] = req->in_cnt;
+@@ -460,7 +523,7 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ 	}
+ 
+ 	if (setup_sgio_components(pdev, req->out, req->out_cnt,
+-				  &info->in_buffer[8 + g_sz_bytes])) {
++				  info->out_buffer)) {
+ 		dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ 		goto destroy_info;
+ 	}
+@@ -476,8 +539,10 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ 	 * Get buffer for union otx2_cpt_res_s response
+ 	 * structure and its physical address
+ 	 */
+-	info->completion_addr = info->in_buffer + align_dlen;
+-	info->comp_baddr = info->dptr_baddr + align_dlen;
++	info->completion_addr = PTR_ALIGN((info->in_buffer + dlen),
++					  OTX2_CPT_RES_ADDR_ALIGN);
++	info->comp_baddr = ALIGN((info->dptr_baddr + dlen),
++				 OTX2_CPT_RES_ADDR_ALIGN);
+ 
+ 	return info;
+ 
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+index 357a7c6ac83713..1493a373baf71e 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+@@ -1490,12 +1490,13 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+ 	union otx2_cpt_opcode opcode;
+ 	union otx2_cpt_res_s *result;
+ 	union otx2_cpt_inst_s inst;
++	dma_addr_t result_baddr;
+ 	dma_addr_t rptr_baddr;
+ 	struct pci_dev *pdev;
+-	u32 len, compl_rlen;
+ 	int timeout = 10000;
++	void *base, *rptr;
+ 	int ret, etype;
+-	void *rptr;
++	u32 len;
+ 
+ 	/*
+ 	 * We don't get capabilities if it was already done
+@@ -1520,22 +1521,28 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+ 	if (ret)
+ 		goto delete_grps;
+ 
+-	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
+-	len = compl_rlen + LOADFVC_RLEN;
++	/* Allocate extra memory for "rptr" and "result" pointer alignment */
++	len = LOADFVC_RLEN + ARCH_DMA_MINALIGN +
++	       sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;
+ 
+-	result = kzalloc(len, GFP_KERNEL);
+-	if (!result) {
++	base = kzalloc(len, GFP_KERNEL);
++	if (!base) {
+ 		ret = -ENOMEM;
+ 		goto lf_cleanup;
+ 	}
+-	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
+-				    DMA_BIDIRECTIONAL);
++
++	rptr = PTR_ALIGN(base, ARCH_DMA_MINALIGN);
++	rptr_baddr = dma_map_single(&pdev->dev, rptr, len, DMA_BIDIRECTIONAL);
+ 	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
+ 		dev_err(&pdev->dev, "DMA mapping failed\n");
+ 		ret = -EFAULT;
+-		goto free_result;
++		goto free_rptr;
+ 	}
+-	rptr = (u8 *)result + compl_rlen;
++
++	result = (union otx2_cpt_res_s *)PTR_ALIGN(rptr + LOADFVC_RLEN,
++						   OTX2_CPT_RES_ADDR_ALIGN);
++	result_baddr = ALIGN(rptr_baddr + LOADFVC_RLEN,
++			     OTX2_CPT_RES_ADDR_ALIGN);
+ 
+ 	/* Fill in the command */
+ 	opcode.s.major = LOADFVC_MAJOR_OP;
+@@ -1547,14 +1554,14 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+ 	/* 64-bit swap for microcode data reads, not needed for addresses */
+ 	cpu_to_be64s(&iq_cmd.cmd.u);
+ 	iq_cmd.dptr = 0;
+-	iq_cmd.rptr = rptr_baddr + compl_rlen;
++	iq_cmd.rptr = rptr_baddr;
+ 	iq_cmd.cptr.u = 0;
+ 
+ 	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
+ 		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+ 		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ 							 etype);
+-		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
++		otx2_cpt_fill_inst(&inst, &iq_cmd, result_baddr);
+ 		lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+ 		timeout = 10000;
+ 
+@@ -1577,8 +1584,8 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+ 
+ error_no_response:
+ 	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+-free_result:
+-	kfree(result);
++free_rptr:
++	kfree(base);
+ lf_cleanup:
+ 	otx2_cptlf_shutdown(lfs);
+ delete_grps:
+diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
+index 4db3d80e10b090..e5272644a4a076 100644
+--- a/drivers/fpga/zynq-fpga.c
++++ b/drivers/fpga/zynq-fpga.c
+@@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
+ 		}
+ 	}
+ 
+-	priv->dma_nelms =
+-	    dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+-	if (priv->dma_nelms == 0) {
++	err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
++	if (err) {
+ 		dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
+-		return -ENOMEM;
++		return err;
+ 	}
++	priv->dma_nelms = sgt->nents;
+ 
+ 	/* enable clock */
+ 	err = clk_enable(priv->clk);
+@@ -478,7 +478,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
+ 	clk_disable(priv->clk);
+ 
+ out_free:
+-	dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
++	dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
+ 	return err;
+ }
+ 
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 1160a439e92a85..0dd0d996e53e9a 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -105,10 +105,15 @@ config DRM_KMS_HELPER
+ 	help
+ 	  CRTC helpers for KMS drivers.
+ 
++config DRM_DRAW
++	bool
++	depends on DRM
++
+ config DRM_PANIC
+ 	bool "Display a user-friendly message when a kernel panic occurs"
+ 	depends on DRM
+ 	select FONT_SUPPORT
++	select DRM_DRAW
+ 	help
+ 	  Enable a drm panic handler, which will display a user-friendly message
+ 	  when a kernel panic occurs. It's useful when using a user-space
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 1ec44529447a76..f4a5edf746d2d6 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -89,6 +89,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
+ 	drm_privacy_screen_x86.o
+ drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
+ drm-$(CONFIG_DRM_PANIC) += drm_panic.o
++drm-$(CONFIG_DRM_DRAW) += drm_draw.o
+ drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
+ obj-$(CONFIG_DRM)	+= drm.o
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8cf224fd4ff28a..373c626247a1a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2387,9 +2387,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ 
+ 	adev->firmware.gpu_info_fw = NULL;
+ 
+-	if (adev->mman.discovery_bin)
+-		return 0;
+-
+ 	switch (adev->asic_type) {
+ 	default:
+ 		return 0;
+@@ -2411,6 +2408,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ 		chip_name = "arcturus";
+ 		break;
+ 	case CHIP_NAVI12:
++		if (adev->mman.discovery_bin)
++			return 0;
+ 		chip_name = "navi12";
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index eee434743deb49..6042956cd5c3c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -273,7 +273,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ 	int i, ret = 0;
+ 
+ 	if (!amdgpu_sriov_vf(adev)) {
+-		/* It can take up to a second for IFWI init to complete on some dGPUs,
++		/* It can take up to two second for IFWI init to complete on some dGPUs,
+ 		 * but generally it should be in the 60-100ms range.  Normally this starts
+ 		 * as soon as the device gets power so by the time the OS loads this has long
+ 		 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
+@@ -281,7 +281,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ 		 * continue.
+ 		 */
+ 
+-		for (i = 0; i < 1000; i++) {
++		for (i = 0; i < 2000; i++) {
+ 			msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ 			if (msg & 0x80000000)
+ 				break;
+@@ -2455,40 +2455,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 
+ 	switch (adev->asic_type) {
+ 	case CHIP_VEGA10:
+-	case CHIP_VEGA12:
+-	case CHIP_RAVEN:
+-	case CHIP_VEGA20:
+-	case CHIP_ARCTURUS:
+-	case CHIP_ALDEBARAN:
+-		/* this is not fatal.  We have a fallback below
+-		 * if the new firmwares are not present. some of
+-		 * this will be overridden below to keep things
+-		 * consistent with the current behavior.
++		/* This is not fatal.  We only need the discovery
++		 * binary for sysfs.  We don't need it for a
++		 * functional system.
+ 		 */
+-		r = amdgpu_discovery_reg_base_init(adev);
+-		if (!r) {
+-			amdgpu_discovery_harvest_ip(adev);
+-			amdgpu_discovery_get_gfx_info(adev);
+-			amdgpu_discovery_get_mall_info(adev);
+-			amdgpu_discovery_get_vcn_info(adev);
+-		}
+-		break;
+-	default:
+-		r = amdgpu_discovery_reg_base_init(adev);
+-		if (r) {
+-			drm_err(&adev->ddev, "discovery failed: %d\n", r);
+-			return r;
+-		}
+-
+-		amdgpu_discovery_harvest_ip(adev);
+-		amdgpu_discovery_get_gfx_info(adev);
+-		amdgpu_discovery_get_mall_info(adev);
+-		amdgpu_discovery_get_vcn_info(adev);
+-		break;
+-	}
+-
+-	switch (adev->asic_type) {
+-	case CHIP_VEGA10:
++		amdgpu_discovery_init(adev);
+ 		vega10_reg_base_init(adev);
+ 		adev->sdma.num_instances = 2;
+ 		adev->gmc.num_umc = 4;
+@@ -2511,6 +2482,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
+ 		break;
+ 	case CHIP_VEGA12:
++		/* This is not fatal.  We only need the discovery
++		 * binary for sysfs.  We don't need it for a
++		 * functional system.
++		 */
++		amdgpu_discovery_init(adev);
+ 		vega10_reg_base_init(adev);
+ 		adev->sdma.num_instances = 2;
+ 		adev->gmc.num_umc = 4;
+@@ -2533,6 +2509,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
+ 		break;
+ 	case CHIP_RAVEN:
++		/* This is not fatal.  We only need the discovery
++		 * binary for sysfs.  We don't need it for a
++		 * functional system.
++		 */
++		amdgpu_discovery_init(adev);
+ 		vega10_reg_base_init(adev);
+ 		adev->sdma.num_instances = 1;
+ 		adev->vcn.num_vcn_inst = 1;
+@@ -2572,6 +2553,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		}
+ 		break;
+ 	case CHIP_VEGA20:
++		/* This is not fatal.  We only need the discovery
++		 * binary for sysfs.  We don't need it for a
++		 * functional system.
++		 */
++		amdgpu_discovery_init(adev);
+ 		vega20_reg_base_init(adev);
+ 		adev->sdma.num_instances = 2;
+ 		adev->gmc.num_umc = 8;
+@@ -2595,6 +2581,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
+ 		break;
+ 	case CHIP_ARCTURUS:
++		/* This is not fatal.  We only need the discovery
++		 * binary for sysfs.  We don't need it for a
++		 * functional system.
++		 */
++		amdgpu_discovery_init(adev);
+ 		arct_reg_base_init(adev);
+ 		adev->sdma.num_instances = 8;
+ 		adev->vcn.num_vcn_inst = 2;
+@@ -2623,6 +2614,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
+ 		break;
+ 	case CHIP_ALDEBARAN:
++		/* This is not fatal.  We only need the discovery
++		 * binary for sysfs.  We don't need it for a
++		 * functional system.
++		 */
++		amdgpu_discovery_init(adev);
+ 		aldebaran_reg_base_init(adev);
+ 		adev->sdma.num_instances = 5;
+ 		adev->vcn.num_vcn_inst = 2;
+@@ -2649,6 +2645,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ 		break;
+ 	default:
++		r = amdgpu_discovery_reg_base_init(adev);
++		if (r) {
++			drm_err(&adev->ddev, "discovery failed: %d\n", r);
++			return r;
++		}
++
++		amdgpu_discovery_harvest_ip(adev);
++		amdgpu_discovery_get_gfx_info(adev);
++		amdgpu_discovery_get_mall_info(adev);
++		amdgpu_discovery_get_vcn_info(adev);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 37d53578825b33..0adb106e2c4256 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2292,13 +2292,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
+  */
+ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
+ {
+-	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
+-					DMA_RESV_USAGE_BOOKKEEP,
+-					true, timeout);
++	timeout = drm_sched_entity_flush(&vm->immediate, timeout);
+ 	if (timeout <= 0)
+ 		return timeout;
+ 
+-	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
++	return drm_sched_entity_flush(&vm->delayed, timeout);
+ }
+ 
+ static void amdgpu_vm_destroy_task_info(struct kref *kref)
+diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+index 1341f02920314e..10054d07f20bc9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+@@ -361,7 +361,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev,
+ static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev)
+ {
+ 	u32 reg_data, size = 0;
+-	const u32 *data;
++	const u32 *data = NULL;
+ 	int r = -EINVAL;
+ 
+ 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2);
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+index 134c4ec1088785..910337dc28d105 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+@@ -36,40 +36,47 @@
+ 
+ static const char *mmhub_client_ids_v3_0_1[][2] = {
+ 	[0][0] = "VMC",
++	[1][0] = "ISPXT",
++	[2][0] = "ISPIXT",
+ 	[4][0] = "DCEDMC",
+ 	[5][0] = "DCEVGA",
+ 	[6][0] = "MP0",
+ 	[7][0] = "MP1",
+-	[8][0] = "MPIO",
+-	[16][0] = "HDP",
+-	[17][0] = "LSDMA",
+-	[18][0] = "JPEG",
+-	[19][0] = "VCNU0",
+-	[21][0] = "VSCH",
+-	[22][0] = "VCNU1",
+-	[23][0] = "VCN1",
+-	[32+20][0] = "VCN0",
+-	[2][1] = "DBGUNBIO",
++	[8][0] = "MPM",
++	[12][0] = "ISPTNR",
++	[14][0] = "ISPCRD0",
++	[15][0] = "ISPCRD1",
++	[16][0] = "ISPCRD2",
++	[22][0] = "HDP",
++	[23][0] = "LSDMA",
++	[24][0] = "JPEG",
++	[27][0] = "VSCH",
++	[28][0] = "VCNU",
++	[29][0] = "VCN",
++	[1][1] = "ISPXT",
++	[2][1] = "ISPIXT",
+ 	[3][1] = "DCEDWB",
+ 	[4][1] = "DCEDMC",
+ 	[5][1] = "DCEVGA",
+ 	[6][1] = "MP0",
+ 	[7][1] = "MP1",
+-	[8][1] = "MPIO",
+-	[10][1] = "DBGU0",
+-	[11][1] = "DBGU1",
+-	[12][1] = "DBGU2",
+-	[13][1] = "DBGU3",
+-	[14][1] = "XDP",
+-	[15][1] = "OSSSYS",
+-	[16][1] = "HDP",
+-	[17][1] = "LSDMA",
+-	[18][1] = "JPEG",
+-	[19][1] = "VCNU0",
+-	[20][1] = "VCN0",
+-	[21][1] = "VSCH",
+-	[22][1] = "VCNU1",
+-	[23][1] = "VCN1",
++	[8][1] = "MPM",
++	[10][1] = "ISPMWR0",
++	[11][1] = "ISPMWR1",
++	[12][1] = "ISPTNR",
++	[13][1] = "ISPSWR",
++	[14][1] = "ISPCWR0",
++	[15][1] = "ISPCWR1",
++	[16][1] = "ISPCWR2",
++	[17][1] = "ISPCWR3",
++	[18][1] = "XDP",
++	[21][1] = "OSSSYS",
++	[22][1] = "HDP",
++	[23][1] = "LSDMA",
++	[24][1] = "JPEG",
++	[27][1] = "VSCH",
++	[28][1] = "VCNU",
++	[29][1] = "VCN",
+ };
+ 
+ static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid,
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+index f2ab5001b49249..951998454b2572 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+@@ -37,39 +37,31 @@
+ static const char *mmhub_client_ids_v4_1_0[][2] = {
+ 	[0][0] = "VMC",
+ 	[4][0] = "DCEDMC",
+-	[5][0] = "DCEVGA",
+ 	[6][0] = "MP0",
+ 	[7][0] = "MP1",
+ 	[8][0] = "MPIO",
+-	[16][0] = "HDP",
+-	[17][0] = "LSDMA",
+-	[18][0] = "JPEG",
+-	[19][0] = "VCNU0",
+-	[21][0] = "VSCH",
+-	[22][0] = "VCNU1",
+-	[23][0] = "VCN1",
+-	[32+20][0] = "VCN0",
+-	[2][1] = "DBGUNBIO",
++	[16][0] = "LSDMA",
++	[17][0] = "JPEG",
++	[19][0] = "VCNU",
++	[22][0] = "VSCH",
++	[23][0] = "HDP",
++	[32+23][0] = "VCNRD",
+ 	[3][1] = "DCEDWB",
+ 	[4][1] = "DCEDMC",
+-	[5][1] = "DCEVGA",
+ 	[6][1] = "MP0",
+ 	[7][1] = "MP1",
+ 	[8][1] = "MPIO",
+ 	[10][1] = "DBGU0",
+ 	[11][1] = "DBGU1",
+-	[12][1] = "DBGU2",
+-	[13][1] = "DBGU3",
++	[12][1] = "DBGUNBIO",
+ 	[14][1] = "XDP",
+ 	[15][1] = "OSSSYS",
+-	[16][1] = "HDP",
+-	[17][1] = "LSDMA",
+-	[18][1] = "JPEG",
+-	[19][1] = "VCNU0",
+-	[20][1] = "VCN0",
+-	[21][1] = "VSCH",
+-	[22][1] = "VCNU1",
+-	[23][1] = "VCN1",
++	[16][1] = "LSDMA",
++	[17][1] = "JPEG",
++	[18][1] = "VCNWR",
++	[19][1] = "VCNU",
++	[22][1] = "VSCH",
++	[23][1] = "HDP",
+ };
+ 
+ static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 4cbe0da100d8f3..c162149b5494f9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1183,6 +1183,8 @@ static int soc15_common_early_init(void *handle)
+ 			AMD_PG_SUPPORT_JPEG;
+ 		/*TODO: need a new external_rev_id for GC 9.4.4? */
+ 		adev->external_rev_id = adev->rev_id + 0x46;
++		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
++			adev->external_rev_id = adev->rev_id + 0x50;
+ 		break;
+ 	default:
+ 		/* FIXME: not supported yet */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index aee2212e52f69a..33aa23450b3f72 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -78,8 +78,8 @@ static int kfd_init(void)
+ static void kfd_exit(void)
+ {
+ 	kfd_cleanup_processes();
+-	kfd_debugfs_fini();
+ 	kfd_process_destroy_wq();
++	kfd_debugfs_fini();
+ 	kfd_procfs_shutdown();
+ 	kfd_topology_shutdown();
+ 	kfd_chardev_exit();
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 33a3e5e28fbc35..9763752cf5cded 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7583,6 +7583,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
+ 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
+ 	int ret;
+ 
++	if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
++		return -EINVAL;
++
+ 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
+ 
+ 	if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 9a31e5da368792..2d3e6270327400 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -302,6 +302,25 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
+ 	irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
+ 
+ 	if (enable) {
++		struct dc *dc = adev->dm.dc;
++		struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
++		struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
++		struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
++		bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
++								pr->config.replay_supported;
++
++		/*
++		 * IPS & self-refresh feature can cause vblank counter resets between
++		 * vblank disable and enable.
++		 * It may cause system stuck due to waiting for the vblank counter.
++		 * Call this function to estimate missed vblanks by using timestamps and
++		 * update the vblank counter in DRM.
++		 */
++		if (dc->caps.ips_support &&
++			dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
++			sr_supported && vblank->config.disable_immediate)
++			drm_crtc_vblank_restore(crtc);
++
+ 		/* vblank irq on -> Only need vupdate irq in vrr mode */
+ 		if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ 			rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
+@@ -664,6 +683,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
++		struct drm_plane_state *primary_state;
++
++		/* Pull in primary plane for correct VRR handling */
++		primary_state = drm_atomic_get_plane_state(state, crtc->primary);
++		if (IS_ERR(primary_state))
++			return PTR_ERR(primary_state);
++	}
++
+ 	/* In some use cases, like reset, no stream is attached */
+ 	if (!dm_crtc_state->stream)
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 15d94d2a0e2fb3..97a9b37f78a245 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -3932,7 +3932,7 @@ static int capabilities_show(struct seq_file *m, void *unused)
+ 
+ 	struct hubbub *hubbub = dc->res_pool->hubbub;
+ 
+-	if (hubbub->funcs->get_mall_en)
++	if (hubbub && hubbub->funcs->get_mall_en)
+ 		hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
+ 
+ 	if (dc->cap_funcs.get_subvp_en)
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+index 3bacf470f7c5b7..a523c5cfcd2489 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+@@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
+ 		return object_id;
+ 	}
+ 
+-	if (tbl->ucNumberOfObjects <= i) {
+-		dm_error("Can't find connector id %d in connector table of size %d.\n",
+-			 i, tbl->ucNumberOfObjects);
++	if (tbl->ucNumberOfObjects <= i)
+ 		return object_id;
+-	}
+ 
+ 	id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ 	object_id = object_id_from_bios_object_id(id);
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+index 2bcae0643e61db..58e88778da7ffd 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+@@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3(
+ 	allocation.sPCLKInput.usFbDiv =
+ 			cpu_to_le16((uint16_t)bp_params->feedback_divider);
+ 	allocation.sPCLKInput.ucFracFbDiv =
+-			(uint8_t)bp_params->fractional_feedback_divider;
++			(uint8_t)(bp_params->fractional_feedback_divider / 100000);
+ 	allocation.sPCLKInput.ucPostDiv =
+ 			(uint8_t)bp_params->pixel_clock_post_divider;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+index 4c3e58c730b11c..a0c1072c59a236 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+@@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
+ 			return NULL;
+ 		}
+ 		dce60_clk_mgr_construct(ctx, clk_mgr);
+-		dce_clk_mgr_construct(ctx, clk_mgr);
+ 		return &clk_mgr->base;
+ 	}
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+index 26feefbb8990ae..b268c367c27cc4 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+@@ -386,8 +386,6 @@ static void dce_pplib_apply_display_requirements(
+ {
+ 	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+ 
+-	pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+-
+ 	dce110_fill_display_configs(context, pp_display_cfg);
+ 
+ 	if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+index f8409453434c1c..13cf415e38e501 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+@@ -120,9 +120,15 @@ void dce110_fill_display_configs(
+ 	const struct dc_state *context,
+ 	struct dm_pp_display_configuration *pp_display_cfg)
+ {
++	struct dc *dc = context->clk_mgr->ctx->dc;
+ 	int j;
+ 	int num_cfgs = 0;
+ 
++	pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
++	pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
++	pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
++	pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator;
++
+ 	for (j = 0; j < context->stream_count; j++) {
+ 		int k;
+ 
+@@ -164,6 +170,23 @@ void dce110_fill_display_configs(
+ 		cfg->v_refresh /= stream->timing.h_total;
+ 		cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+ 							/ stream->timing.v_total;
++
++		/* Find first CRTC index and calculate its line time.
++		 * This is necessary for DPM on SI GPUs.
++		 */
++		if (cfg->pipe_idx < pp_display_cfg->crtc_index) {
++			const struct dc_crtc_timing *timing =
++				&context->streams[0]->timing;
++
++			pp_display_cfg->crtc_index = cfg->pipe_idx;
++			pp_display_cfg->line_time_in_us =
++				timing->h_total * 10000 / timing->pix_clk_100hz;
++		}
++	}
++
++	if (!num_cfgs) {
++		pp_display_cfg->crtc_index = 0;
++		pp_display_cfg->line_time_in_us = 0;
+ 	}
+ 
+ 	pp_display_cfg->display_count = num_cfgs;
+@@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements(
+ 	pp_display_cfg->min_engine_clock_deep_sleep_khz
+ 			= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
+ 
+-	pp_display_cfg->avail_mclk_switch_time_us =
+-						dce110_get_min_vblank_time_us(context);
+-	/* TODO: dce11.2*/
+-	pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+-
+-	pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
+-
+ 	dce110_fill_display_configs(context, pp_display_cfg);
+ 
+-	/* TODO: is this still applicable?*/
+-	if (pp_display_cfg->display_count == 1) {
+-		const struct dc_crtc_timing *timing =
+-			&context->streams[0]->timing;
+-
+-		pp_display_cfg->crtc_index =
+-			pp_display_cfg->disp_configs[0].pipe_idx;
+-		pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
+-	}
+-
+ 	if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+ 		dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+index 0267644717b27a..a39641a0ff09ef 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+@@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
+ static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
+ {
+ 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+-	int dprefclk_wdivider;
+-	int dp_ref_clk_khz;
+-	int target_div;
++	struct dc_context *ctx = clk_mgr_base->ctx;
++	int dp_ref_clk_khz = 0;
+ 
+-	/* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
+-
+-	/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+-	 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+-	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+-
+-	/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+-	target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+-
+-	/* Calculate the current DFS clock, in kHz.*/
+-	dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+-		* clk_mgr->base.dentist_vco_freq_khz) / target_div;
++	if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev))
++		dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency;
++	else
++		dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
+ 
+ 	return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
+ }
+@@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements(
+ {
+ 	struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+ 
+-	pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+-
+ 	dce110_fill_display_configs(context, pp_display_cfg);
+ 
+ 	if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) !=  0)
+@@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
+ {
+ 	struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ 	struct dm_pp_power_level_change_request level_change_req;
+-	int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+-
+-	/*TODO: W/A for dal3 linux, investigate why this works */
+-	if (!clk_mgr_dce->dfs_bypass_active)
+-		patched_disp_clk = patched_disp_clk * 115 / 100;
++	const int max_disp_clk =
++		clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
++	int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
+ 
+ 	level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
+ 	/* get max clock state from PPLIB */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index f5d938b9504c07..84e377113e580a 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -215,11 +215,24 @@ static bool create_links(
+ 		connectors_num,
+ 		num_virtual_links);
+ 
+-	// condition loop on link_count to allow skipping invalid indices
++	/* When getting the number of connectors, the VBIOS reports the number of valid indices,
++	 * but it doesn't say which indices are valid, and not every index has an actual connector.
++	 * So, if we don't find a connector on an index, that is not an error.
++	 *
++	 * - There is no guarantee that the first N indices will be valid
++	 * - VBIOS may report a higher amount of valid indices than there are actual connectors
++	 * - Some VBIOS have valid configurations for more connectors than there actually are
++	 *   on the card. This may be because the manufacturer used the same VBIOS for different
++	 *   variants of the same card.
++	 */
+ 	for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
++		struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
+ 		struct link_init_data link_init_params = {0};
+ 		struct dc_link *link;
+ 
++		if (connector_id.id == CONNECTOR_ID_UNKNOWN)
++			continue;
++
+ 		DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
+ 
+ 		link_init_params.ctx = dc->ctx;
+@@ -890,17 +903,18 @@ static void dc_destruct(struct dc *dc)
+ 	if (dc->link_srv)
+ 		link_destroy_link_service(&dc->link_srv);
+ 
+-	if (dc->ctx->gpio_service)
+-		dal_gpio_service_destroy(&dc->ctx->gpio_service);
+-
+-	if (dc->ctx->created_bios)
+-		dal_bios_parser_destroy(&dc->ctx->dc_bios);
++	if (dc->ctx) {
++		if (dc->ctx->gpio_service)
++			dal_gpio_service_destroy(&dc->ctx->gpio_service);
+ 
+-	kfree(dc->ctx->logger);
+-	dc_perf_trace_destroy(&dc->ctx->perf_trace);
++		if (dc->ctx->created_bios)
++			dal_bios_parser_destroy(&dc->ctx->dc_bios);
++		kfree(dc->ctx->logger);
++		dc_perf_trace_destroy(&dc->ctx->perf_trace);
+ 
+-	kfree(dc->ctx);
+-	dc->ctx = NULL;
++		kfree(dc->ctx);
++		dc->ctx = NULL;
++	}
+ 
+ 	kfree(dc->bw_vbios);
+ 	dc->bw_vbios = NULL;
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index e58e7b93810be7..6b7db8ec9a53b2 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+ 		return MOD_HDCP_STATUS_FAILURE;
+ 	}
+ 
++	if (!display)
++		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ 	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+ 
+ 	mutex_lock(&psp->hdcp_context.mutex);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 3fd8da5dc761ef..b6657abe62fc4c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -2153,6 +2153,12 @@ static int smu_resume(void *handle)
+ 
+ 	adev->pm.dpm_enabled = true;
+ 
++	if (smu->current_power_limit) {
++		ret = smu_set_power_limit(smu, smu->current_power_limit);
++		if (ret && ret != -EOPNOTSUPP)
++			return ret;
++	}
++
+ 	dev_info(adev->dev, "SMU is resumed successfully!\n");
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+index e98a6a2f3e6acc..d0aed85db18cc7 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+@@ -1668,9 +1668,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
+ 				       uint32_t *min_power_limit)
+ {
+ 	struct smu_table_context *table_context = &smu->smu_table;
++	struct smu_14_0_2_powerplay_table *powerplay_table =
++		table_context->power_play_table;
+ 	PPTable_t *pptable = table_context->driver_pptable;
+ 	CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
+-	uint32_t power_limit;
++	uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
+ 	uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ 
+ 	if (smu_v14_0_get_current_power_limit(smu, &power_limit))
+@@ -1683,11 +1685,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
+ 	if (default_power_limit)
+ 		*default_power_limit = power_limit;
+ 
+-	if (max_power_limit)
+-		*max_power_limit = msg_limit;
++	if (powerplay_table) {
++		if (smu->od_enabled &&
++		    smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
++			od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt;
++			od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
++		} else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
++			od_percent_upper = 0;
++			od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
++		}
++	}
++
++	dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
++					od_percent_upper, od_percent_lower, power_limit);
++
++	if (max_power_limit) {
++		*max_power_limit = msg_limit * (100 + od_percent_upper);
++		*max_power_limit /= 100;
++	}
+ 
+-	if (min_power_limit)
+-		*min_power_limit = 0;
++	if (min_power_limit) {
++		*min_power_limit = power_limit * (100 + od_percent_lower);
++		*min_power_limit /= 100;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
+index 9fa13da513d24e..bb61bbdcce5b3c 100644
+--- a/drivers/gpu/drm/display/drm_dp_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_helper.c
+@@ -664,7 +664,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ 	 * monitor doesn't power down exactly after the throw away read.
+ 	 */
+ 	if (!aux->is_remote) {
+-		ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
++		ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
+new file mode 100644
+index 00000000000000..d41f8ae1c14833
+--- /dev/null
++++ b/drivers/gpu/drm/drm_draw.c
+@@ -0,0 +1,155 @@
++// SPDX-License-Identifier: GPL-2.0 or MIT
++/*
++ * Copyright (c) 2023 Red Hat.
++ * Author: Jocelyn Falempe <jfalempe@redhat.com>
++ */
++
++#include <linux/bits.h>
++#include <linux/iosys-map.h>
++#include <linux/types.h>
++
++#include <drm/drm_fourcc.h>
++
++#include "drm_draw_internal.h"
++#include "drm_format_internal.h"
++
++/**
++ * drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
++ * @color: input color, in xrgb8888 format
++ * @format: output format
++ *
++ * Returns:
++ * Color in the format specified, casted to u32.
++ * Or 0 if the format is not supported.
++ */
++u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
++{
++	switch (format) {
++	case DRM_FORMAT_RGB565:
++		return drm_pixel_xrgb8888_to_rgb565(color);
++	case DRM_FORMAT_RGBA5551:
++		return drm_pixel_xrgb8888_to_rgba5551(color);
++	case DRM_FORMAT_XRGB1555:
++		return drm_pixel_xrgb8888_to_xrgb1555(color);
++	case DRM_FORMAT_ARGB1555:
++		return drm_pixel_xrgb8888_to_argb1555(color);
++	case DRM_FORMAT_RGB888:
++	case DRM_FORMAT_XRGB8888:
++		return color;
++	case DRM_FORMAT_ARGB8888:
++		return drm_pixel_xrgb8888_to_argb8888(color);
++	case DRM_FORMAT_XBGR8888:
++		return drm_pixel_xrgb8888_to_xbgr8888(color);
++	case DRM_FORMAT_ABGR8888:
++		return drm_pixel_xrgb8888_to_abgr8888(color);
++	case DRM_FORMAT_XRGB2101010:
++		return drm_pixel_xrgb8888_to_xrgb2101010(color);
++	case DRM_FORMAT_ARGB2101010:
++		return drm_pixel_xrgb8888_to_argb2101010(color);
++	case DRM_FORMAT_ABGR2101010:
++		return drm_pixel_xrgb8888_to_abgr2101010(color);
++	default:
++		WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
++		return 0;
++	}
++}
++EXPORT_SYMBOL(drm_draw_color_from_xrgb8888);
++
++/*
++ * Blit functions
++ */
++void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
++		     const u8 *sbuf8, unsigned int spitch,
++		     unsigned int height, unsigned int width,
++		     unsigned int scale, u16 fg16)
++{
++	unsigned int y, x;
++
++	for (y = 0; y < height; y++)
++		for (x = 0; x < width; x++)
++			if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
++				iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
++}
++EXPORT_SYMBOL(drm_draw_blit16);
++
++void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
++		     const u8 *sbuf8, unsigned int spitch,
++		     unsigned int height, unsigned int width,
++		     unsigned int scale, u32 fg32)
++{
++	unsigned int y, x;
++
++	for (y = 0; y < height; y++) {
++		for (x = 0; x < width; x++) {
++			u32 off = y * dpitch + x * 3;
++
++			if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
++				/* write blue-green-red to output in little endianness */
++				iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
++				iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
++				iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
++			}
++		}
++	}
++}
++EXPORT_SYMBOL(drm_draw_blit24);
++
++void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
++		     const u8 *sbuf8, unsigned int spitch,
++		     unsigned int height, unsigned int width,
++		     unsigned int scale, u32 fg32)
++{
++	unsigned int y, x;
++
++	for (y = 0; y < height; y++)
++		for (x = 0; x < width; x++)
++			if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
++				iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
++}
++EXPORT_SYMBOL(drm_draw_blit32);
++
++/*
++ * Fill functions
++ */
++void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
++		     unsigned int height, unsigned int width,
++		     u16 color)
++{
++	unsigned int y, x;
++
++	for (y = 0; y < height; y++)
++		for (x = 0; x < width; x++)
++			iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
++}
++EXPORT_SYMBOL(drm_draw_fill16);
++
++void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
++		     unsigned int height, unsigned int width,
++		     u16 color)
++{
++	unsigned int y, x;
++
++	for (y = 0; y < height; y++) {
++		for (x = 0; x < width; x++) {
++			unsigned int off = y * dpitch + x * 3;
++
++			/* write blue-green-red to output in little endianness */
++			iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
++			iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
++			iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
++		}
++	}
++}
++EXPORT_SYMBOL(drm_draw_fill24);
++
++void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
++		     unsigned int height, unsigned int width,
++		     u32 color)
++{
++	unsigned int y, x;
++
++	for (y = 0; y < height; y++)
++		for (x = 0; x < width; x++)
++			iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
++}
++EXPORT_SYMBOL(drm_draw_fill32);
+diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
+new file mode 100644
+index 00000000000000..f121ee7339dc11
+--- /dev/null
++++ b/drivers/gpu/drm/drm_draw_internal.h
+@@ -0,0 +1,56 @@
++/* SPDX-License-Identifier: GPL-2.0 or MIT */
++/*
++ * Copyright (c) 2023 Red Hat.
++ * Author: Jocelyn Falempe <jfalempe@redhat.com>
++ */
++
++#ifndef __DRM_DRAW_INTERNAL_H__
++#define __DRM_DRAW_INTERNAL_H__
++
++#include <linux/font.h>
++#include <linux/types.h>
++
++struct iosys_map;
++
++/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
++static inline bool drm_draw_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
++{
++	return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
++}
++
++static inline const u8 *drm_draw_get_char_bitmap(const struct font_desc *font,
++						 char c, size_t font_pitch)
++{
++	return font->data + (c * font->height) * font_pitch;
++}
++
++u32 drm_draw_color_from_xrgb8888(u32 color, u32 format);
++
++void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
++		     const u8 *sbuf8, unsigned int spitch,
++		     unsigned int height, unsigned int width,
++		     unsigned int scale, u16 fg16);
++
++void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
++		     const u8 *sbuf8, unsigned int spitch,
++		     unsigned int height, unsigned int width,
++		     unsigned int scale, u32 fg32);
++
++void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
++		     const u8 *sbuf8, unsigned int spitch,
++		     unsigned int height, unsigned int width,
++		     unsigned int scale, u32 fg32);
++
++void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
++		     unsigned int height, unsigned int width,
++		     u16 color);
++
++void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
++		     unsigned int height, unsigned int width,
++		     u16 color);
++
++void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
++		     unsigned int height, unsigned int width,
++		     u32 color);
++
++#endif /* __DRM_DRAW_INTERNAL_H__ */
+diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
+index b1be458ed4dda5..3769760b15cd19 100644
+--- a/drivers/gpu/drm/drm_format_helper.c
++++ b/drivers/gpu/drm/drm_format_helper.c
+@@ -20,6 +20,8 @@
+ #include <drm/drm_print.h>
+ #include <drm/drm_rect.h>
+ 
++#include "drm_format_internal.h"
++
+ /**
+  * drm_format_conv_state_init - Initialize format-conversion state
+  * @state: The state to initialize
+@@ -244,6 +246,18 @@ static int drm_fb_xfrm(struct iosys_map *dst,
+ 				     xfrm_line);
+ }
+ 
++static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf,
++						    unsigned int pixels,
++						    u32 (*xfrm_pixel)(u32))
++{
++	__le32 *dbuf32 = dbuf;
++	const __le32 *sbuf32 = sbuf;
++	const __le32 *send32 = sbuf32 + pixels;
++
++	while (sbuf32 < send32)
++		*dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++)));
++}
++
+ /**
+  * drm_fb_memcpy - Copy clip buffer
+  * @dst: Array of destination buffers
+@@ -702,20 +716,62 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi
+ }
+ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
+ 
+-static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
++static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+-	__le32 *dbuf32 = dbuf;
++	u8 *dbuf8 = dbuf;
+ 	const __le32 *sbuf32 = sbuf;
+ 	unsigned int x;
+ 	u32 pix;
+ 
+ 	for (x = 0; x < pixels; x++) {
+ 		pix = le32_to_cpu(sbuf32[x]);
+-		pix |= GENMASK(31, 24); /* fill alpha bits */
+-		dbuf32[x] = cpu_to_le32(pix);
++		/* write red-green-blue to output in little endianness */
++		*dbuf8++ = (pix & 0x00ff0000) >> 16;
++		*dbuf8++ = (pix & 0x0000ff00) >> 8;
++		*dbuf8++ = (pix & 0x000000ff) >> 0;
+ 	}
+ }
+ 
++/**
++ * drm_fb_xrgb8888_to_bgr888 - Convert XRGB8888 to BGR888 clip buffer
++ * @dst: Array of BGR888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ *             within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffers
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. Destination and framebuffer formats must match. The
++ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
++ * least as many entries as there are planes in @fb's format. Each entry stores the
++ * value for the format's respective color plane at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for BGR888 devices that don't natively
++ * support XRGB8888.
++ */
++void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
++			       const struct iosys_map *src, const struct drm_framebuffer *fb,
++			       const struct drm_rect *clip, struct drm_format_conv_state *state)
++{
++	static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
++		3,
++	};
++
++	drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
++		    drm_fb_xrgb8888_to_bgr888_line);
++}
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
++
++static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
++{
++	drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888);
++}
++
+ /**
+  * drm_fb_xrgb8888_to_argb8888 - Convert XRGB8888 to ARGB8888 clip buffer
+  * @dst: Array of ARGB8888 destination buffers
+@@ -753,26 +809,36 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
+ 
+ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+-	__le32 *dbuf32 = dbuf;
+-	const __le32 *sbuf32 = sbuf;
+-	unsigned int x;
+-	u32 pix;
+-
+-	for (x = 0; x < pixels; x++) {
+-		pix = le32_to_cpu(sbuf32[x]);
+-		pix = ((pix & 0x00ff0000) >> 16) <<  0 |
+-		      ((pix & 0x0000ff00) >>  8) <<  8 |
+-		      ((pix & 0x000000ff) >>  0) << 16 |
+-		      GENMASK(31, 24); /* fill alpha bits */
+-		*dbuf32++ = cpu_to_le32(pix);
+-	}
++	drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
+ }
+ 
+-static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+-					const struct iosys_map *src,
+-					const struct drm_framebuffer *fb,
+-					const struct drm_rect *clip,
+-					struct drm_format_conv_state *state)
++/**
++ * drm_fb_xrgb8888_to_abgr8888 - Convert XRGB8888 to ABGR8888 clip buffer
++ * @dst: Array of ABGR8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ *             within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for ABGR8888 devices that don't support XRGB8888
++ * natively. It sets an opaque alpha channel as part of the conversion.
++ */
++void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++				 const struct iosys_map *src,
++				 const struct drm_framebuffer *fb,
++				 const struct drm_rect *clip,
++				 struct drm_format_conv_state *state)
+ {
+ 	static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 		4,
+@@ -781,29 +847,40 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
+ 	drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ 		    drm_fb_xrgb8888_to_abgr8888_line);
+ }
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_abgr8888);
+ 
+ static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+-	__le32 *dbuf32 = dbuf;
+-	const __le32 *sbuf32 = sbuf;
+-	unsigned int x;
+-	u32 pix;
+-
+-	for (x = 0; x < pixels; x++) {
+-		pix = le32_to_cpu(sbuf32[x]);
+-		pix = ((pix & 0x00ff0000) >> 16) <<  0 |
+-		      ((pix & 0x0000ff00) >>  8) <<  8 |
+-		      ((pix & 0x000000ff) >>  0) << 16 |
+-		      ((pix & 0xff000000) >> 24) << 24;
+-		*dbuf32++ = cpu_to_le32(pix);
+-	}
++	drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
+ }
+ 
+-static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+-					const struct iosys_map *src,
+-					const struct drm_framebuffer *fb,
+-					const struct drm_rect *clip,
+-					struct drm_format_conv_state *state)
++/**
++ * drm_fb_xrgb8888_to_xbgr8888 - Convert XRGB8888 to XBGR8888 clip buffer
++ * @dst: Array of XBGR8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ *             within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for XBGR8888 devices that don't support XRGB8888
++ * natively.
++ */
++void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++				 const struct iosys_map *src,
++				 const struct drm_framebuffer *fb,
++				 const struct drm_rect *clip,
++				 struct drm_format_conv_state *state)
+ {
+ 	static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 		4,
+@@ -812,23 +889,53 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
+ 	drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ 		    drm_fb_xrgb8888_to_xbgr8888_line);
+ }
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_xbgr8888);
+ 
+-static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
++static void drm_fb_xrgb8888_to_bgrx8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+-	__le32 *dbuf32 = dbuf;
+-	const __le32 *sbuf32 = sbuf;
+-	unsigned int x;
+-	u32 val32;
+-	u32 pix;
++	drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgrx8888);
++}
+ 
+-	for (x = 0; x < pixels; x++) {
+-		pix = le32_to_cpu(sbuf32[x]);
+-		val32 = ((pix & 0x000000FF) << 2) |
+-			((pix & 0x0000FF00) << 4) |
+-			((pix & 0x00FF0000) << 6);
+-		pix = val32 | ((val32 >> 8) & 0x00300C03);
+-		*dbuf32++ = cpu_to_le32(pix);
+-	}
++/**
++ * drm_fb_xrgb8888_to_bgrx8888 - Convert XRGB8888 to BGRX8888 clip buffer
++ * @dst: Array of BGRX8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ *             within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for BGRX8888 devices that don't support XRGB8888
++ * natively.
++ */
++void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++				 const struct iosys_map *src,
++				 const struct drm_framebuffer *fb,
++				 const struct drm_rect *clip,
++				 struct drm_format_conv_state *state)
++{
++	static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
++		4,
++	};
++
++	drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
++		    drm_fb_xrgb8888_to_bgrx8888_line);
++}
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgrx8888);
++
++static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
++{
++	drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010);
+ }
+ 
+ /**
+@@ -869,21 +976,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
+ 
+ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+-	__le32 *dbuf32 = dbuf;
+-	const __le32 *sbuf32 = sbuf;
+-	unsigned int x;
+-	u32 val32;
+-	u32 pix;
+-
+-	for (x = 0; x < pixels; x++) {
+-		pix = le32_to_cpu(sbuf32[x]);
+-		val32 = ((pix & 0x000000ff) << 2) |
+-			((pix & 0x0000ff00) << 4) |
+-			((pix & 0x00ff0000) << 6);
+-		pix = GENMASK(31, 30) | /* set alpha bits */
+-		      val32 | ((val32 >> 8) & 0x00300c03);
+-		*dbuf32++ = cpu_to_le32(pix);
+-	}
++	drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010);
+ }
+ 
+ /**
+@@ -1035,6 +1128,9 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
+ 		} else if (dst_format == DRM_FORMAT_RGB888) {
+ 			drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
+ 			return 0;
++		} else if (dst_format == DRM_FORMAT_BGR888) {
++			drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state);
++			return 0;
+ 		} else if (dst_format == DRM_FORMAT_ARGB8888) {
+ 			drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
+ 			return 0;
+diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
+new file mode 100644
+index 00000000000000..f06f09989ddc0b
+--- /dev/null
++++ b/drivers/gpu/drm/drm_format_internal.h
+@@ -0,0 +1,127 @@
++/* SPDX-License-Identifier: GPL-2.0 or MIT */
++
++#ifndef DRM_FORMAT_INTERNAL_H
++#define DRM_FORMAT_INTERNAL_H
++
++#include <linux/bits.h>
++#include <linux/types.h>
++
++/*
++ * Each pixel-format conversion helper takes a raw pixel in a
++ * specific input format and returns a raw pixel in a specific
++ * output format. All pixels are in little-endian byte order.
++ *
++ * Function names are
++ *
++ *   drm_pixel_<input>_to_<output>_<algorithm>()
++ *
++ * where <input> and <output> refer to pixel formats. The
++ * <algorithm> is optional and hints to the method used for the
++ * conversion. Helpers with no algorithm given apply pixel-bit
++ * shifting.
++ *
++ * The argument type is u32. We expect this to be wide enough to
++ * hold all conversion input from 32-bit RGB to any output format.
++ * The Linux kernel should avoid format conversion for anything
++ * but XRGB8888 input data. Converting from other format can still
++ * be acceptable in some cases.
++ *
++ * The return type is u32. It is wide enough to hold all conversion
++ * output from XRGB8888. For output formats wider than 32 bit, a
++ * return type of u64 would be acceptable.
++ */
++
++/*
++ * Conversions from XRGB8888
++ */
++
++static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
++{
++	return ((pix & 0x00f80000) >> 8) |
++	       ((pix & 0x0000fc00) >> 5) |
++	       ((pix & 0x000000f8) >> 3);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
++{
++	return ((pix & 0x00f80000) >> 8) |
++	       ((pix & 0x0000f800) >> 5) |
++	       ((pix & 0x000000f8) >> 2);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix)
++{
++	return drm_pixel_xrgb8888_to_rgbx5551(pix) |
++	       BIT(0); /* set alpha bit */
++}
++
++static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix)
++{
++	return ((pix & 0x00f80000) >> 9) |
++	       ((pix & 0x0000f800) >> 6) |
++	       ((pix & 0x000000f8) >> 3);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix)
++{
++	return BIT(15) | /* set alpha bit */
++	       drm_pixel_xrgb8888_to_xrgb1555(pix);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix)
++{
++	return GENMASK(31, 24) | /* fill alpha bits */
++	       pix;
++}
++
++static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
++{
++	return ((pix & 0xff000000)) | /* also copy filler bits */
++	       ((pix & 0x00ff0000) >> 16) |
++	       ((pix & 0x0000ff00)) |
++	       ((pix & 0x000000ff) << 16);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_bgrx8888(u32 pix)
++{
++	return ((pix & 0xff000000) >> 24) | /* also copy filler bits */
++	       ((pix & 0x00ff0000) >> 8) |
++	       ((pix & 0x0000ff00) << 8) |
++	       ((pix & 0x000000ff) << 24);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
++{
++	return GENMASK(31, 24) | /* fill alpha bits */
++	       drm_pixel_xrgb8888_to_xbgr8888(pix);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix)
++{
++	pix = ((pix & 0x000000ff) << 2) |
++	      ((pix & 0x0000ff00) << 4) |
++	      ((pix & 0x00ff0000) << 6);
++	return pix | ((pix >> 8) & 0x00300c03);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix)
++{
++	return GENMASK(31, 30) | /* set alpha bits */
++	       drm_pixel_xrgb8888_to_xrgb2101010(pix);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix)
++{
++	pix = ((pix & 0x00ff0000) >> 14) |
++	      ((pix & 0x0000ff00) << 4) |
++	      ((pix & 0x000000ff) << 22);
++	return pix | ((pix >> 8) & 0x00300c03);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix)
++{
++	return GENMASK(31, 30) | /* set alpha bits */
++	       drm_pixel_xrgb8888_to_xbgr2101010(pix);
++}
++
++#endif
+diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
+index 0a9ecc1380d2a4..f128d345b16dfb 100644
+--- a/drivers/gpu/drm/drm_panic.c
++++ b/drivers/gpu/drm/drm_panic.c
+@@ -31,6 +31,7 @@
+ #include <drm/drm_rect.h>
+ 
+ #include "drm_crtc_internal.h"
++#include "drm_draw_internal.h"
+ 
+ MODULE_AUTHOR("Jocelyn Falempe");
+ MODULE_DESCRIPTION("DRM panic handler");
+@@ -139,181 +140,8 @@ device_initcall(drm_panic_setup_logo);
+ #endif
+ 
+ /*
+- * Color conversion
++ *  Blit & Fill functions
+  */
+-
+-static u16 convert_xrgb8888_to_rgb565(u32 pix)
+-{
+-	return ((pix & 0x00F80000) >> 8) |
+-	       ((pix & 0x0000FC00) >> 5) |
+-	       ((pix & 0x000000F8) >> 3);
+-}
+-
+-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
+-{
+-	return ((pix & 0x00f80000) >> 8) |
+-	       ((pix & 0x0000f800) >> 5) |
+-	       ((pix & 0x000000f8) >> 2) |
+-	       BIT(0); /* set alpha bit */
+-}
+-
+-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
+-{
+-	return ((pix & 0x00f80000) >> 9) |
+-	       ((pix & 0x0000f800) >> 6) |
+-	       ((pix & 0x000000f8) >> 3);
+-}
+-
+-static u16 convert_xrgb8888_to_argb1555(u32 pix)
+-{
+-	return BIT(15) | /* set alpha bit */
+-	       ((pix & 0x00f80000) >> 9) |
+-	       ((pix & 0x0000f800) >> 6) |
+-	       ((pix & 0x000000f8) >> 3);
+-}
+-
+-static u32 convert_xrgb8888_to_argb8888(u32 pix)
+-{
+-	return pix | GENMASK(31, 24); /* fill alpha bits */
+-}
+-
+-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
+-{
+-	return ((pix & 0x00ff0000) >> 16) <<  0 |
+-	       ((pix & 0x0000ff00) >>  8) <<  8 |
+-	       ((pix & 0x000000ff) >>  0) << 16 |
+-	       ((pix & 0xff000000) >> 24) << 24;
+-}
+-
+-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
+-{
+-	return ((pix & 0x00ff0000) >> 16) <<  0 |
+-	       ((pix & 0x0000ff00) >>  8) <<  8 |
+-	       ((pix & 0x000000ff) >>  0) << 16 |
+-	       GENMASK(31, 24); /* fill alpha bits */
+-}
+-
+-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
+-{
+-	pix = ((pix & 0x000000FF) << 2) |
+-	      ((pix & 0x0000FF00) << 4) |
+-	      ((pix & 0x00FF0000) << 6);
+-	return pix | ((pix >> 8) & 0x00300C03);
+-}
+-
+-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+-{
+-	pix = ((pix & 0x000000FF) << 2) |
+-	      ((pix & 0x0000FF00) << 4) |
+-	      ((pix & 0x00FF0000) << 6);
+-	return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+-}
+-
+-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
+-{
+-	pix = ((pix & 0x00FF0000) >> 14) |
+-	      ((pix & 0x0000FF00) << 4) |
+-	      ((pix & 0x000000FF) << 22);
+-	return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+-}
+-
+-/*
+- * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+- * @color: input color, in xrgb8888 format
+- * @format: output format
+- *
+- * Returns:
+- * Color in the format specified, casted to u32.
+- * Or 0 if the format is not supported.
+- */
+-static u32 convert_from_xrgb8888(u32 color, u32 format)
+-{
+-	switch (format) {
+-	case DRM_FORMAT_RGB565:
+-		return convert_xrgb8888_to_rgb565(color);
+-	case DRM_FORMAT_RGBA5551:
+-		return convert_xrgb8888_to_rgba5551(color);
+-	case DRM_FORMAT_XRGB1555:
+-		return convert_xrgb8888_to_xrgb1555(color);
+-	case DRM_FORMAT_ARGB1555:
+-		return convert_xrgb8888_to_argb1555(color);
+-	case DRM_FORMAT_RGB888:
+-	case DRM_FORMAT_XRGB8888:
+-		return color;
+-	case DRM_FORMAT_ARGB8888:
+-		return convert_xrgb8888_to_argb8888(color);
+-	case DRM_FORMAT_XBGR8888:
+-		return convert_xrgb8888_to_xbgr8888(color);
+-	case DRM_FORMAT_ABGR8888:
+-		return convert_xrgb8888_to_abgr8888(color);
+-	case DRM_FORMAT_XRGB2101010:
+-		return convert_xrgb8888_to_xrgb2101010(color);
+-	case DRM_FORMAT_ARGB2101010:
+-		return convert_xrgb8888_to_argb2101010(color);
+-	case DRM_FORMAT_ABGR2101010:
+-		return convert_xrgb8888_to_abgr2101010(color);
+-	default:
+-		WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+-		return 0;
+-	}
+-}
+-
+-/*
+- * Blit & Fill
+- */
+-/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
+-static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
+-{
+-	return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
+-}
+-
+-static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
+-			     const u8 *sbuf8, unsigned int spitch,
+-			     unsigned int height, unsigned int width,
+-			     unsigned int scale, u16 fg16)
+-{
+-	unsigned int y, x;
+-
+-	for (y = 0; y < height; y++)
+-		for (x = 0; x < width; x++)
+-			if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+-				iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
+-}
+-
+-static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
+-			     const u8 *sbuf8, unsigned int spitch,
+-			     unsigned int height, unsigned int width,
+-			     unsigned int scale, u32 fg32)
+-{
+-	unsigned int y, x;
+-
+-	for (y = 0; y < height; y++) {
+-		for (x = 0; x < width; x++) {
+-			u32 off = y * dpitch + x * 3;
+-
+-			if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+-				/* write blue-green-red to output in little endianness */
+-				iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
+-				iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
+-				iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
+-			}
+-		}
+-	}
+-}
+-
+-static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
+-			     const u8 *sbuf8, unsigned int spitch,
+-			     unsigned int height, unsigned int width,
+-			     unsigned int scale, u32 fg32)
+-{
+-	unsigned int y, x;
+-
+-	for (y = 0; y < height; y++)
+-		for (x = 0; x < width; x++)
+-			if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+-				iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
+-}
+-
+ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip,
+ 				 const u8 *sbuf8, unsigned int spitch, unsigned int scale,
+ 				 u32 fg_color)
+@@ -322,7 +150,7 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
+ 
+ 	for (y = 0; y < drm_rect_height(clip); y++)
+ 		for (x = 0; x < drm_rect_width(clip); x++)
+-			if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
++			if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ 				sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
+ }
+ 
+@@ -354,62 +182,22 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
+ 
+ 	switch (sb->format->cpp[0]) {
+ 	case 2:
+-		drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch,
+-				 drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
++		drm_draw_blit16(&map, sb->pitch[0], sbuf8, spitch,
++				drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ 	break;
+ 	case 3:
+-		drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch,
+-				 drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
++		drm_draw_blit24(&map, sb->pitch[0], sbuf8, spitch,
++				drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ 	break;
+ 	case 4:
+-		drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch,
+-				 drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
++		drm_draw_blit32(&map, sb->pitch[0], sbuf8, spitch,
++				drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ 	break;
+ 	default:
+ 		WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]);
+ 	}
+ }
+ 
+-static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch,
+-			     unsigned int height, unsigned int width,
+-			     u16 color)
+-{
+-	unsigned int y, x;
+-
+-	for (y = 0; y < height; y++)
+-		for (x = 0; x < width; x++)
+-			iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
+-}
+-
+-static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch,
+-			     unsigned int height, unsigned int width,
+-			     u32 color)
+-{
+-	unsigned int y, x;
+-
+-	for (y = 0; y < height; y++) {
+-		for (x = 0; x < width; x++) {
+-			unsigned int off = y * dpitch + x * 3;
+-
+-			/* write blue-green-red to output in little endianness */
+-			iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
+-			iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
+-			iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
+-		}
+-	}
+-}
+-
+-static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch,
+-			     unsigned int height, unsigned int width,
+-			     u32 color)
+-{
+-	unsigned int y, x;
+-
+-	for (y = 0; y < height; y++)
+-		for (x = 0; x < width; x++)
+-			iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
+-}
+-
+ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
+ 				 struct drm_rect *clip,
+ 				 u32 color)
+@@ -442,27 +230,22 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
+ 
+ 	switch (sb->format->cpp[0]) {
+ 	case 2:
+-		drm_panic_fill16(&map, sb->pitch[0], drm_rect_height(clip),
+-				 drm_rect_width(clip), color);
++		drm_draw_fill16(&map, sb->pitch[0], drm_rect_height(clip),
++				drm_rect_width(clip), color);
+ 	break;
+ 	case 3:
+-		drm_panic_fill24(&map, sb->pitch[0], drm_rect_height(clip),
+-				 drm_rect_width(clip), color);
++		drm_draw_fill24(&map, sb->pitch[0], drm_rect_height(clip),
++				drm_rect_width(clip), color);
+ 	break;
+ 	case 4:
+-		drm_panic_fill32(&map, sb->pitch[0], drm_rect_height(clip),
+-				 drm_rect_width(clip), color);
++		drm_draw_fill32(&map, sb->pitch[0], drm_rect_height(clip),
++				drm_rect_width(clip), color);
+ 	break;
+ 	default:
+ 		WARN_ONCE(1, "Can't fill with pixel width %d\n", sb->format->cpp[0]);
+ 	}
+ }
+ 
+-static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch)
+-{
+-	return font->data + (c * font->height) * font_pitch;
+-}
+-
+ static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len)
+ {
+ 	int i;
+@@ -501,7 +284,7 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
+ 			rec.x1 += (drm_rect_width(clip) - (line_len * font->width)) / 2;
+ 
+ 		for (j = 0; j < line_len; j++) {
+-			src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
++			src = drm_draw_get_char_bitmap(font, msg[i].txt[j], font_pitch);
+ 			rec.x2 = rec.x1 + font->width;
+ 			drm_panic_blit(sb, &rec, src, font_pitch, 1, color);
+ 			rec.x1 += font->width;
+@@ -533,8 +316,10 @@ static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *
+ 
+ static void draw_panic_static_user(struct drm_scanout_buffer *sb)
+ {
+-	u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
+-	u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
++	u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
++						    sb->format->format);
++	u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
++						    sb->format->format);
+ 	const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ 	struct drm_rect r_screen, r_logo, r_msg;
+ 	unsigned int msg_width, msg_height;
+@@ -600,8 +385,10 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
+  */
+ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
+ {
+-	u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
+-	u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
++	u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
++						    sb->format->format);
++	u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
++						    sb->format->format);
+ 	const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ 	struct drm_rect r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
+ 	struct kmsg_dump_iter iter;
+@@ -791,8 +578,10 @@ static int drm_panic_get_qr_code(u8 **qr_image)
+  */
+ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
+ {
+-	u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
+-	u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
++	u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
++						    sb->format->format);
++	u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
++						    sb->format->format);
+ 	const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ 	struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas;
+ 	unsigned int max_qr_size, scale;
+@@ -878,7 +667,7 @@ static bool drm_panic_is_format_supported(const struct drm_format_info *format)
+ {
+ 	if (format->num_planes != 1)
+ 		return false;
+-	return convert_from_xrgb8888(0xffffff, format->format) != 0;
++	return drm_draw_color_from_xrgb8888(0xffffff, format->format) != 0;
+ }
+ 
+ static void draw_panic_dispatch(struct drm_scanout_buffer *sb)
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index 9f9b19ea058799..1640609cdbc0e3 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -258,13 +258,13 @@ static int hibmc_load(struct drm_device *dev)
+ 
+ 	ret = hibmc_hw_init(priv);
+ 	if (ret)
+-		goto err;
++		return ret;
+ 
+ 	ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
+ 				    pci_resource_len(pdev, 0));
+ 	if (ret) {
+ 		drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
+-		goto err;
++		return ret;
+ 	}
+ 
+ 	ret = hibmc_kms_init(priv);
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+index 6b566f3aeecbca..6eb0d41a0f688a 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+@@ -20,9 +20,10 @@
+ 
+ #include <drm/drm_framebuffer.h>
+ 
+-struct hibmc_connector {
+-	struct drm_connector base;
+-
++struct hibmc_vdac {
++	struct drm_device *dev;
++	struct drm_encoder encoder;
++	struct drm_connector connector;
+ 	struct i2c_adapter adapter;
+ 	struct i2c_algo_bit_data bit_data;
+ };
+@@ -35,13 +36,12 @@ struct hibmc_drm_private {
+ 	struct drm_device dev;
+ 	struct drm_plane primary_plane;
+ 	struct drm_crtc crtc;
+-	struct drm_encoder encoder;
+-	struct hibmc_connector connector;
++	struct hibmc_vdac vdac;
+ };
+ 
+-static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
++static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
+ {
+-	return container_of(connector, struct hibmc_connector, base);
++	return container_of(connector, struct hibmc_vdac, connector);
+ }
+ 
+ static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
+@@ -57,6 +57,7 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
+ int hibmc_de_init(struct hibmc_drm_private *priv);
+ int hibmc_vdac_init(struct hibmc_drm_private *priv);
+ 
+-int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
++int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
++void hibmc_ddc_del(struct hibmc_vdac *vdac);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+index e6e48651c15c63..44860011855eb6 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+@@ -25,8 +25,8 @@
+ 
+ static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+ {
+-	struct hibmc_connector *hibmc_connector = data;
+-	struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
++	struct hibmc_vdac *vdac = data;
++	struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
+ 	u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+ 
+ 	if (value) {
+@@ -45,8 +45,8 @@ static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+ 
+ static int hibmc_get_i2c_signal(void *data, u32 mask)
+ {
+-	struct hibmc_connector *hibmc_connector = data;
+-	struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
++	struct hibmc_vdac *vdac = data;
++	struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
+ 	u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+ 
+ 	if ((tmp_dir & mask) != mask) {
+@@ -77,22 +77,26 @@ static int hibmc_ddc_getscl(void *data)
+ 	return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
+ }
+ 
+-int hibmc_ddc_create(struct drm_device *drm_dev,
+-		     struct hibmc_connector *connector)
++int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
+ {
+-	connector->adapter.owner = THIS_MODULE;
+-	snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+-	connector->adapter.dev.parent = drm_dev->dev;
+-	i2c_set_adapdata(&connector->adapter, connector);
+-	connector->adapter.algo_data = &connector->bit_data;
+-
+-	connector->bit_data.udelay = 20;
+-	connector->bit_data.timeout = usecs_to_jiffies(2000);
+-	connector->bit_data.data = connector;
+-	connector->bit_data.setsda = hibmc_ddc_setsda;
+-	connector->bit_data.setscl = hibmc_ddc_setscl;
+-	connector->bit_data.getsda = hibmc_ddc_getsda;
+-	connector->bit_data.getscl = hibmc_ddc_getscl;
+-
+-	return i2c_bit_add_bus(&connector->adapter);
++	vdac->adapter.owner = THIS_MODULE;
++	snprintf(vdac->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
++	vdac->adapter.dev.parent = drm_dev->dev;
++	i2c_set_adapdata(&vdac->adapter, vdac);
++	vdac->adapter.algo_data = &vdac->bit_data;
++
++	vdac->bit_data.udelay = 20;
++	vdac->bit_data.timeout = usecs_to_jiffies(2000);
++	vdac->bit_data.data = vdac;
++	vdac->bit_data.setsda = hibmc_ddc_setsda;
++	vdac->bit_data.setscl = hibmc_ddc_setscl;
++	vdac->bit_data.getsda = hibmc_ddc_getsda;
++	vdac->bit_data.getscl = hibmc_ddc_getscl;
++
++	return i2c_bit_add_bus(&vdac->adapter);
++}
++
++void hibmc_ddc_del(struct hibmc_vdac *vdac)
++{
++	i2c_del_adapter(&vdac->adapter);
+ }
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+index 409c551c92af8c..9e29386700c87e 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+@@ -24,11 +24,11 @@
+ 
+ static int hibmc_connector_get_modes(struct drm_connector *connector)
+ {
+-	struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
++	struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
+ 	const struct drm_edid *drm_edid;
+ 	int count;
+ 
+-	drm_edid = drm_edid_read_ddc(connector, &hibmc_connector->adapter);
++	drm_edid = drm_edid_read_ddc(connector, &vdac->adapter);
+ 
+ 	drm_edid_connector_update(connector, drm_edid);
+ 
+@@ -51,9 +51,9 @@ static int hibmc_connector_get_modes(struct drm_connector *connector)
+ 
+ static void hibmc_connector_destroy(struct drm_connector *connector)
+ {
+-	struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
++	struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
+ 
+-	i2c_del_adapter(&hibmc_connector->adapter);
++	hibmc_ddc_del(vdac);
+ 	drm_connector_cleanup(connector);
+ }
+ 
+@@ -93,23 +93,23 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
+ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ {
+ 	struct drm_device *dev = &priv->dev;
+-	struct hibmc_connector *hibmc_connector = &priv->connector;
+-	struct drm_encoder *encoder = &priv->encoder;
++	struct hibmc_vdac *vdac = &priv->vdac;
++	struct drm_encoder *encoder = &vdac->encoder;
+ 	struct drm_crtc *crtc = &priv->crtc;
+-	struct drm_connector *connector = &hibmc_connector->base;
++	struct drm_connector *connector = &vdac->connector;
+ 	int ret;
+ 
+-	ret = hibmc_ddc_create(dev, hibmc_connector);
++	ret = hibmc_ddc_create(dev, vdac);
+ 	if (ret) {
+ 		drm_err(dev, "failed to create ddc: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+ 	encoder->possible_crtcs = drm_crtc_mask(crtc);
+-	ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
++	ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
+ 	if (ret) {
+ 		drm_err(dev, "failed to init encoder: %d\n", ret);
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
+@@ -117,10 +117,10 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ 	ret = drm_connector_init_with_ddc(dev, connector,
+ 					  &hibmc_connector_funcs,
+ 					  DRM_MODE_CONNECTOR_VGA,
+-					  &hibmc_connector->adapter);
++					  &vdac->adapter);
+ 	if (ret) {
+ 		drm_err(dev, "failed to init connector: %d\n", ret);
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
+@@ -128,4 +128,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ 	drm_connector_attach_encoder(connector, encoder);
+ 
+ 	return 0;
++
++err:
++	hibmc_ddc_del(vdac);
++
++	return ret;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
+index 6f2ee7dbc43b35..2fabddc8b6d942 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.c
++++ b/drivers/gpu/drm/i915/display/intel_tc.c
+@@ -63,6 +63,7 @@ struct intel_tc_port {
+ 	enum tc_port_mode init_mode;
+ 	enum phy_fia phy_fia;
+ 	u8 phy_fia_idx;
++	u8 max_lane_count;
+ };
+ 
+ static enum intel_display_power_domain
+@@ -366,12 +367,12 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+ 	}
+ }
+ 
+-int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
++static int get_max_lane_count(struct intel_tc_port *tc)
+ {
++	struct intel_digital_port *dig_port = tc->dig_port;
+ 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+-	struct intel_tc_port *tc = to_tc_port(dig_port);
+ 
+-	if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
++	if (tc->mode != TC_PORT_DP_ALT)
+ 		return 4;
+ 
+ 	assert_tc_cold_blocked(tc);
+@@ -385,6 +386,21 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
+ 	return intel_tc_port_get_max_lane_count(dig_port);
+ }
+ 
++static void read_pin_configuration(struct intel_tc_port *tc)
++{
++	tc->max_lane_count = get_max_lane_count(tc);
++}
++
++int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
++{
++	struct intel_tc_port *tc = to_tc_port(dig_port);
++
++	if (!intel_encoder_is_tc(&dig_port->base))
++		return 4;
++
++	return get_max_lane_count(tc);
++}
++
+ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ 				      int required_lanes)
+ {
+@@ -597,9 +613,12 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
+ 	tc_cold_wref = __tc_cold_block(tc, &domain);
+ 
+ 	tc->mode = tc_phy_get_current_mode(tc);
+-	if (tc->mode != TC_PORT_DISCONNECTED)
++	if (tc->mode != TC_PORT_DISCONNECTED) {
+ 		tc->lock_wakeref = tc_cold_block(tc);
+ 
++		read_pin_configuration(tc);
++	}
++
+ 	__tc_cold_unblock(tc, domain, tc_cold_wref);
+ }
+ 
+@@ -657,8 +676,11 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
+ 
+ 	tc->lock_wakeref = tc_cold_block(tc);
+ 
+-	if (tc->mode == TC_PORT_TBT_ALT)
++	if (tc->mode == TC_PORT_TBT_ALT) {
++		read_pin_configuration(tc);
++
+ 		return true;
++	}
+ 
+ 	if ((!tc_phy_is_ready(tc) ||
+ 	     !icl_tc_phy_take_ownership(tc, true)) &&
+@@ -669,6 +691,7 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
+ 		goto out_unblock_tc_cold;
+ 	}
+ 
++	read_pin_configuration(tc);
+ 
+ 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
+ 		goto out_release_phy;
+@@ -859,9 +882,12 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
+ 	port_wakeref = intel_display_power_get(i915, port_power_domain);
+ 
+ 	tc->mode = tc_phy_get_current_mode(tc);
+-	if (tc->mode != TC_PORT_DISCONNECTED)
++	if (tc->mode != TC_PORT_DISCONNECTED) {
+ 		tc->lock_wakeref = tc_cold_block(tc);
+ 
++		read_pin_configuration(tc);
++	}
++
+ 	intel_display_power_put(i915, port_power_domain, port_wakeref);
+ }
+ 
+@@ -874,6 +900,9 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
+ 
+ 	if (tc->mode == TC_PORT_TBT_ALT) {
+ 		tc->lock_wakeref = tc_cold_block(tc);
++
++		read_pin_configuration(tc);
++
+ 		return true;
+ 	}
+ 
+@@ -895,6 +924,8 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
+ 
+ 	tc->lock_wakeref = tc_cold_block(tc);
+ 
++	read_pin_configuration(tc);
++
+ 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
+ 		goto out_unblock_tc_cold;
+ 
+@@ -1094,9 +1125,12 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
+ 	tc_cold_wref = __tc_cold_block(tc, &domain);
+ 
+ 	tc->mode = tc_phy_get_current_mode(tc);
+-	if (tc->mode != TC_PORT_DISCONNECTED)
++	if (tc->mode != TC_PORT_DISCONNECTED) {
+ 		tc->lock_wakeref = tc_cold_block(tc);
+ 
++		read_pin_configuration(tc);
++	}
++
+ 	drm_WARN_ON(&i915->drm,
+ 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
+ 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
+@@ -1108,14 +1142,19 @@ static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
+ {
+ 	tc->lock_wakeref = tc_cold_block(tc);
+ 
+-	if (tc->mode == TC_PORT_TBT_ALT)
++	if (tc->mode == TC_PORT_TBT_ALT) {
++		read_pin_configuration(tc);
++
+ 		return true;
++	}
+ 
+ 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
+ 		goto out_unblock_tccold;
+ 
+ 	xelpdp_tc_phy_take_ownership(tc, true);
+ 
++	read_pin_configuration(tc);
++
+ 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
+ 		goto out_release_phy;
+ 
+@@ -1416,7 +1455,8 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
+ 
+ 		aux_domain = intel_aux_power_domain(dig_port);
+ 		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
+-		drm_WARN_ON(&i915->drm, aux_powered);
++		drm_dbg_kms(&i915->drm, "Port %s: AUX powered %d\n",
++			    tc->port_name, aux_powered);
+ 	}
+ 
+ 	tc_phy_disconnect(tc);
+diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
+index 99296f03371ae0..07c1ebc2a94141 100644
+--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
++++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
+@@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
+ 	case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
+ 	default:
+ 		WARN_ON(1);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto done;
+ 	}
+ 
+ 	memcpy(args->data, argv, argc);
+diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
+index 08992636ec05ff..e17643c408bf4b 100644
+--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
+@@ -60,6 +60,11 @@ struct convert_to_rgb888_result {
+ 	const u8 expected[TEST_BUF_SIZE];
+ };
+ 
++struct convert_to_bgr888_result {
++	unsigned int dst_pitch;
++	const u8 expected[TEST_BUF_SIZE];
++};
++
+ struct convert_to_argb8888_result {
+ 	unsigned int dst_pitch;
+ 	const u32 expected[TEST_BUF_SIZE];
+@@ -107,6 +112,7 @@ struct convert_xrgb8888_case {
+ 	struct convert_to_argb1555_result argb1555_result;
+ 	struct convert_to_rgba5551_result rgba5551_result;
+ 	struct convert_to_rgb888_result rgb888_result;
++	struct convert_to_bgr888_result bgr888_result;
+ 	struct convert_to_argb8888_result argb8888_result;
+ 	struct convert_to_xrgb2101010_result xrgb2101010_result;
+ 	struct convert_to_argb2101010_result argb2101010_result;
+@@ -151,6 +157,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ 			.dst_pitch = TEST_USE_DEFAULT_PITCH,
+ 			.expected = { 0x00, 0x00, 0xFF },
+ 		},
++		.bgr888_result = {
++			.dst_pitch = TEST_USE_DEFAULT_PITCH,
++			.expected = { 0xFF, 0x00, 0x00 },
++		},
+ 		.argb8888_result = {
+ 			.dst_pitch = TEST_USE_DEFAULT_PITCH,
+ 			.expected = { 0xFFFF0000 },
+@@ -217,6 +227,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ 			.dst_pitch = TEST_USE_DEFAULT_PITCH,
+ 			.expected = { 0x00, 0x00, 0xFF },
+ 		},
++		.bgr888_result = {
++			.dst_pitch = TEST_USE_DEFAULT_PITCH,
++			.expected = { 0xFF, 0x00, 0x00 },
++		},
+ 		.argb8888_result = {
+ 			.dst_pitch = TEST_USE_DEFAULT_PITCH,
+ 			.expected = { 0xFFFF0000 },
+@@ -330,6 +344,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ 				0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
+ 			},
+ 		},
++		.bgr888_result = {
++			.dst_pitch = TEST_USE_DEFAULT_PITCH,
++			.expected = {
++				0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
++				0xFF, 0x00, 0x00, 0x00, 0xFF, 0x00,
++				0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF,
++				0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF,
++			},
++		},
+ 		.argb8888_result = {
+ 			.dst_pitch = TEST_USE_DEFAULT_PITCH,
+ 			.expected = {
+@@ -468,6 +491,17 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ 				0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 			},
+ 		},
++		.bgr888_result = {
++			.dst_pitch = 15,
++			.expected = {
++				0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, 0xA8, 0xF3, 0x03,
++				0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++				0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05,
++				0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++				0xA8, 0x03, 0x03, 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C,
++				0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++			},
++		},
+ 		.argb8888_result = {
+ 			.dst_pitch = 20,
+ 			.expected = {
+@@ -714,14 +748,9 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
+ 	buf = dst.vaddr;
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
+-
++	drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
++				  &fmtcnv_state, false);
+ 	buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -761,14 +790,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
+ 	buf = dst.vaddr; /* restore original value of buf */
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
+-
++	drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ 	buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -808,14 +831,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
+ 	buf = dst.vaddr; /* restore original value of buf */
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
+-
++	drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ 	buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -855,14 +872,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
+ 	buf = dst.vaddr; /* restore original value of buf */
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
+-
++	drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ 	buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -905,12 +916,49 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
+ 	buf = dst.vaddr; /* restore original value of buf */
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result = 0;
++	drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
++	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
++}
++
++static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
++{
++	const struct convert_xrgb8888_case *params = test->param_value;
++	const struct convert_to_bgr888_result *result = &params->bgr888_result;
++	size_t dst_size;
++	u8 *buf = NULL;
++	__le32 *xrgb8888 = NULL;
++	struct iosys_map dst, src;
++
++	struct drm_framebuffer fb = {
++		.format = drm_format_info(DRM_FORMAT_XRGB8888),
++		.pitches = { params->pitch, 0, 0 },
++	};
++
++	dst_size = conversion_buf_size(DRM_FORMAT_BGR888, result->dst_pitch,
++				       &params->clip, 0);
++	KUNIT_ASSERT_GT(test, dst_size, 0);
++
++	buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
++	iosys_map_set_vaddr(&dst, buf);
++
++	xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
++	iosys_map_set_vaddr(&src, xrgb8888);
+ 
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip,
++	/*
++	 * BGR888 expected results are already in little-endian
++	 * order, so there's no need to convert the test output.
++	 */
++	drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip,
+ 				  &fmtcnv_state);
++	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ 
+-	KUNIT_EXPECT_FALSE(test, blit_result);
++	buf = dst.vaddr; /* restore original value of buf */
++	memset(buf, 0, dst_size);
++
++	drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip,
++				  &fmtcnv_state);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -950,14 +998,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
+ 	buf = dst.vaddr; /* restore original value of buf */
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
+-
++	drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ 	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -991,18 +1033,14 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+ 		NULL : &result->dst_pitch;
+ 
+ 	drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+-	buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
++	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ 
+ 	buf = dst.vaddr; /* restore original value of buf */
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
+-				  &params->clip, &fmtcnv_state);
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
++	drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
++	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -1042,14 +1080,8 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
+ 	buf = dst.vaddr; /* restore original value of buf */
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
+-				  &params->clip, &fmtcnv_state);
+-
++	drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ 	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -1122,23 +1154,15 @@ static void drm_test_fb_swab(struct kunit *test)
+ 	buf = dst.vaddr; /* restore original value of buf */
+ 	memset(buf, 0, dst_size);
+ 
+-	int blit_result;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
+-				  &src, &fb, &params->clip, &fmtcnv_state);
++	drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
+ 	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ 
+ 	buf = dst.vaddr;
+ 	memset(buf, 0, dst_size);
+ 
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
++	drm_fb_xrgb8888_to_bgrx8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ 	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ 
+ 	buf = dst.vaddr;
+@@ -1149,11 +1173,8 @@ static void drm_test_fb_swab(struct kunit *test)
+ 	mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
+ 	fb.format = &mock_format;
+ 
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
++	drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
+ 	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -1186,14 +1207,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
+ 	const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
+ 		NULL : &result->dst_pitch;
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
+-
++	drm_fb_xrgb8888_to_abgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ 	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -1226,14 +1241,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
+ 	const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
+ 		NULL : &result->dst_pitch;
+ 
+-	int blit_result = 0;
+-
+-	blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip,
+-				  &fmtcnv_state);
+-
++	drm_fb_xrgb8888_to_xbgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ 	buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+ 
+@@ -1830,12 +1839,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
+ 		memset(buf[i], 0, dst_size[i]);
+ 	}
+ 
+-	int blit_result;
+-
+-	blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip,
+-				  &fmtcnv_state);
++	drm_fb_memcpy(dst, dst_pitches, src, &fb, &params->clip);
+ 
+-	KUNIT_EXPECT_FALSE(test, blit_result);
+ 	for (size_t i = 0; i < fb.format->num_planes; i++) {
+ 		expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE);
+ 		KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i],
+@@ -1851,6 +1856,7 @@ static struct kunit_case drm_format_helper_test_cases[] = {
+ 	KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params),
+ 	KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params),
+ 	KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
++	KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_bgr888, convert_xrgb8888_gen_params),
+ 	KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params),
+ 	KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
+ 	KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
+diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
+index 93e742c1f21e74..f15c27070ff4e4 100644
+--- a/drivers/gpu/drm/xe/Kconfig
++++ b/drivers/gpu/drm/xe/Kconfig
+@@ -3,6 +3,7 @@ config DRM_XE
+ 	tristate "Intel Xe Graphics"
+ 	depends on DRM && PCI && MMU
+ 	depends on KUNIT || !KUNIT
++	depends on PAGE_SIZE_4KB || COMPILE_TEST || BROKEN
+ 	select INTERVAL_TREE
+ 	# we need shmfs for the swappable backing store, and in particular
+ 	# the shmem_readpage() which depends upon tmpfs
+diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
+index 4514f3ed90ccd6..3e065e6ab4fcc7 100644
+--- a/drivers/hwmon/gsc-hwmon.c
++++ b/drivers/hwmon/gsc-hwmon.c
+@@ -65,7 +65,7 @@ static ssize_t pwm_auto_point_temp_show(struct device *dev,
+ 		return ret;
+ 
+ 	ret = regs[0] | regs[1] << 8;
+-	return sprintf(buf, "%d\n", ret * 10);
++	return sprintf(buf, "%d\n", ret * 100);
+ }
+ 
+ static ssize_t pwm_auto_point_temp_store(struct device *dev,
+@@ -100,7 +100,7 @@ static ssize_t pwm_auto_point_pwm_show(struct device *dev,
+ {
+ 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ 
+-	return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)));
++	return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100);
+ }
+ 
+ static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0);
+diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
+index 2eebc6f761a632..19b583e00753e2 100644
+--- a/drivers/iio/adc/ad7173.c
++++ b/drivers/iio/adc/ad7173.c
+@@ -1243,6 +1243,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
+ 		chan_st_priv->cfg.bipolar = false;
+ 		chan_st_priv->cfg.input_buf = st->info->has_input_buf;
+ 		chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
++		chan_st_priv->cfg.odr = st->info->odr_start_value;
+ 		st->adc_mode |= AD7173_ADC_MODE_REF_EN;
+ 
+ 		chan_index++;
+@@ -1307,7 +1308,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
+ 		chan->channel = ain[0];
+ 		chan_st_priv->chan_reg = chan_index;
+ 		chan_st_priv->cfg.input_buf = st->info->has_input_buf;
+-		chan_st_priv->cfg.odr = 0;
++		chan_st_priv->cfg.odr = st->info->odr_start_value;
+ 
+ 		chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
+ 		if (chan_st_priv->cfg.bipolar)
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index 39196a2862cf75..5d0bfabc69ea91 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -407,7 +407,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
+ 	return ret;
+ }
+ 
+-static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
++static int ad_sd_buffer_predisable(struct iio_dev *indio_dev)
+ {
+ 	struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ 
+@@ -535,7 +535,7 @@ static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned l
+ 
+ static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = {
+ 	.postenable = &ad_sd_buffer_postenable,
+-	.postdisable = &ad_sd_buffer_postdisable,
++	.predisable = &ad_sd_buffer_predisable,
+ 	.validate_scan_mask = &ad_sd_validate_scan_mask,
+ };
+ 
+diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c
+index ea6519b22b2f34..0b2d6ad699f30c 100644
+--- a/drivers/iio/imu/bno055/bno055.c
++++ b/drivers/iio/imu/bno055/bno055.c
+@@ -118,6 +118,7 @@ struct bno055_sysfs_attr {
+ 	int len;
+ 	int *fusion_vals;
+ 	int *hw_xlate;
++	int hw_xlate_len;
+ 	int type;
+ };
+ 
+@@ -170,20 +171,24 @@ static int bno055_gyr_scale_vals[] = {
+ 	1000, 1877467, 2000, 1877467,
+ };
+ 
++static int bno055_gyr_scale_hw_xlate[] = {0, 1, 2, 3, 4};
+ static struct bno055_sysfs_attr bno055_gyr_scale = {
+ 	.vals = bno055_gyr_scale_vals,
+ 	.len = ARRAY_SIZE(bno055_gyr_scale_vals),
+ 	.fusion_vals = (int[]){1, 900},
+-	.hw_xlate = (int[]){4, 3, 2, 1, 0},
++	.hw_xlate = bno055_gyr_scale_hw_xlate,
++	.hw_xlate_len = ARRAY_SIZE(bno055_gyr_scale_hw_xlate),
+ 	.type = IIO_VAL_FRACTIONAL,
+ };
+ 
+ static int bno055_gyr_lpf_vals[] = {12, 23, 32, 47, 64, 116, 230, 523};
++static int bno055_gyr_lpf_hw_xlate[] = {5, 4, 7, 3, 6, 2, 1, 0};
+ static struct bno055_sysfs_attr bno055_gyr_lpf = {
+ 	.vals = bno055_gyr_lpf_vals,
+ 	.len = ARRAY_SIZE(bno055_gyr_lpf_vals),
+ 	.fusion_vals = (int[]){32},
+-	.hw_xlate = (int[]){5, 4, 7, 3, 6, 2, 1, 0},
++	.hw_xlate = bno055_gyr_lpf_hw_xlate,
++	.hw_xlate_len = ARRAY_SIZE(bno055_gyr_lpf_hw_xlate),
+ 	.type = IIO_VAL_INT,
+ };
+ 
+@@ -561,7 +566,7 @@ static int bno055_get_regmask(struct bno055_priv *priv, int *val, int *val2,
+ 
+ 	idx = (hwval & mask) >> shift;
+ 	if (attr->hw_xlate)
+-		for (i = 0; i < attr->len; i++)
++		for (i = 0; i < attr->hw_xlate_len; i++)
+ 			if (attr->hw_xlate[i] == idx) {
+ 				idx = i;
+ 				break;
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+index 18787a43477b89..76c3802beda8e7 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+@@ -164,11 +164,11 @@ struct inv_icm42600_state {
+ 	struct inv_icm42600_suspended suspended;
+ 	struct iio_dev *indio_gyro;
+ 	struct iio_dev *indio_accel;
+-	uint8_t buffer[2] __aligned(IIO_DMA_MINALIGN);
++	u8 buffer[2] __aligned(IIO_DMA_MINALIGN);
+ 	struct inv_icm42600_fifo fifo;
+ 	struct {
+-		int64_t gyro;
+-		int64_t accel;
++		s64 gyro;
++		s64 accel;
+ 	} timestamp;
+ };
+ 
+@@ -410,7 +410,7 @@ const struct iio_mount_matrix *
+ inv_icm42600_get_mount_matrix(const struct iio_dev *indio_dev,
+ 			      const struct iio_chan_spec *chan);
+ 
+-uint32_t inv_icm42600_odr_to_period(enum inv_icm42600_odr odr);
++u32 inv_icm42600_odr_to_period(enum inv_icm42600_odr odr);
+ 
+ int inv_icm42600_set_accel_conf(struct inv_icm42600_state *st,
+ 				struct inv_icm42600_sensor_conf *conf,
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+index 7968aa27f9fd79..8da15cde388a20 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+@@ -177,8 +177,8 @@ static const struct iio_chan_spec inv_icm42600_accel_channels[] = {
+  */
+ struct inv_icm42600_accel_buffer {
+ 	struct inv_icm42600_fifo_sensor_data accel;
+-	int16_t temp;
+-	int64_t timestamp __aligned(8);
++	s16 temp;
++	aligned_s64 timestamp;
+ };
+ 
+ #define INV_ICM42600_SCAN_MASK_ACCEL_3AXIS				\
+@@ -241,7 +241,7 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
+ 
+ static int inv_icm42600_accel_read_sensor(struct iio_dev *indio_dev,
+ 					  struct iio_chan_spec const *chan,
+-					  int16_t *val)
++					  s16 *val)
+ {
+ 	struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+ 	struct inv_icm42600_sensor_state *accel_st = iio_priv(indio_dev);
+@@ -284,7 +284,7 @@ static int inv_icm42600_accel_read_sensor(struct iio_dev *indio_dev,
+ 	if (ret)
+ 		goto exit;
+ 
+-	*val = (int16_t)be16_to_cpup(data);
++	*val = (s16)be16_to_cpup(data);
+ 	if (*val == INV_ICM42600_DATA_INVALID)
+ 		ret = -EINVAL;
+ exit:
+@@ -492,11 +492,11 @@ static int inv_icm42600_accel_read_offset(struct inv_icm42600_state *st,
+ 					  int *val, int *val2)
+ {
+ 	struct device *dev = regmap_get_device(st->map);
+-	int64_t val64;
+-	int32_t bias;
++	s64 val64;
++	s32 bias;
+ 	unsigned int reg;
+-	int16_t offset;
+-	uint8_t data[2];
++	s16 offset;
++	u8 data[2];
+ 	int ret;
+ 
+ 	if (chan->type != IIO_ACCEL)
+@@ -550,7 +550,7 @@ static int inv_icm42600_accel_read_offset(struct inv_icm42600_state *st,
+ 	 * result in micro (1000000)
+ 	 * (offset * 5 * 9.806650 * 1000000) / 10000
+ 	 */
+-	val64 = (int64_t)offset * 5LL * 9806650LL;
++	val64 = (s64)offset * 5LL * 9806650LL;
+ 	/* for rounding, add + or - divisor (10000) divided by 2 */
+ 	if (val64 >= 0)
+ 		val64 += 10000LL / 2LL;
+@@ -568,10 +568,10 @@ static int inv_icm42600_accel_write_offset(struct inv_icm42600_state *st,
+ 					   int val, int val2)
+ {
+ 	struct device *dev = regmap_get_device(st->map);
+-	int64_t val64;
+-	int32_t min, max;
++	s64 val64;
++	s32 min, max;
+ 	unsigned int reg, regval;
+-	int16_t offset;
++	s16 offset;
+ 	int ret;
+ 
+ 	if (chan->type != IIO_ACCEL)
+@@ -596,7 +596,7 @@ static int inv_icm42600_accel_write_offset(struct inv_icm42600_state *st,
+ 	      inv_icm42600_accel_calibbias[1];
+ 	max = inv_icm42600_accel_calibbias[4] * 1000000L +
+ 	      inv_icm42600_accel_calibbias[5];
+-	val64 = (int64_t)val * 1000000LL + (int64_t)val2;
++	val64 = (s64)val * 1000000LL + (s64)val2;
+ 	if (val64 < min || val64 > max)
+ 		return -EINVAL;
+ 
+@@ -671,7 +671,7 @@ static int inv_icm42600_accel_read_raw(struct iio_dev *indio_dev,
+ 				       int *val, int *val2, long mask)
+ {
+ 	struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+-	int16_t data;
++	s16 data;
+ 	int ret;
+ 
+ 	switch (chan->type) {
+@@ -905,7 +905,8 @@ int inv_icm42600_accel_parse_fifo(struct iio_dev *indio_dev)
+ 	const int8_t *temp;
+ 	unsigned int odr;
+ 	int64_t ts_val;
+-	struct inv_icm42600_accel_buffer buffer;
++	/* buffer is copied to userspace, zeroing it to avoid any data leak */
++	struct inv_icm42600_accel_buffer buffer = { };
+ 
+ 	/* parse all fifo packets */
+ 	for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) {
+@@ -924,8 +925,6 @@ int inv_icm42600_accel_parse_fifo(struct iio_dev *indio_dev)
+ 			inv_sensors_timestamp_apply_odr(ts, st->fifo.period,
+ 							st->fifo.nb.total, no);
+ 
+-		/* buffer is copied to userspace, zeroing it to avoid any data leak */
+-		memset(&buffer, 0, sizeof(buffer));
+ 		memcpy(&buffer.accel, accel, sizeof(buffer.accel));
+ 		/* convert 8 bits FIFO temperature in high resolution format */
+ 		buffer.temp = temp ? (*temp * 64) : 0;
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+index aae7c56481a3fa..00b9db52ca7855 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+@@ -26,28 +26,28 @@
+ #define INV_ICM42600_FIFO_HEADER_ODR_GYRO	BIT(0)
+ 
+ struct inv_icm42600_fifo_1sensor_packet {
+-	uint8_t header;
++	u8 header;
+ 	struct inv_icm42600_fifo_sensor_data data;
+-	int8_t temp;
++	s8 temp;
+ } __packed;
+ #define INV_ICM42600_FIFO_1SENSOR_PACKET_SIZE		8
+ 
+ struct inv_icm42600_fifo_2sensors_packet {
+-	uint8_t header;
++	u8 header;
+ 	struct inv_icm42600_fifo_sensor_data accel;
+ 	struct inv_icm42600_fifo_sensor_data gyro;
+-	int8_t temp;
++	s8 temp;
+ 	__be16 timestamp;
+ } __packed;
+ #define INV_ICM42600_FIFO_2SENSORS_PACKET_SIZE		16
+ 
+ ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel,
+-					const void **gyro, const int8_t **temp,
++					const void **gyro, const s8 **temp,
+ 					const void **timestamp, unsigned int *odr)
+ {
+ 	const struct inv_icm42600_fifo_1sensor_packet *pack1 = packet;
+ 	const struct inv_icm42600_fifo_2sensors_packet *pack2 = packet;
+-	uint8_t header = *((const uint8_t *)packet);
++	u8 header = *((const u8 *)packet);
+ 
+ 	/* FIFO empty */
+ 	if (header & INV_ICM42600_FIFO_HEADER_MSG) {
+@@ -100,7 +100,7 @@ ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel,
+ 
+ void inv_icm42600_buffer_update_fifo_period(struct inv_icm42600_state *st)
+ {
+-	uint32_t period_gyro, period_accel, period;
++	u32 period_gyro, period_accel, period;
+ 
+ 	if (st->fifo.en & INV_ICM42600_SENSOR_GYRO)
+ 		period_gyro = inv_icm42600_odr_to_period(st->conf.gyro.odr);
+@@ -204,8 +204,8 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
+ {
+ 	size_t packet_size, wm_size;
+ 	unsigned int wm_gyro, wm_accel, watermark;
+-	uint32_t period_gyro, period_accel, period;
+-	uint32_t latency_gyro, latency_accel, latency;
++	u32 period_gyro, period_accel, period;
++	u32 latency_gyro, latency_accel, latency;
+ 	bool restore;
+ 	__le16 raw_wm;
+ 	int ret;
+@@ -459,7 +459,7 @@ int inv_icm42600_buffer_fifo_read(struct inv_icm42600_state *st,
+ 	__be16 *raw_fifo_count;
+ 	ssize_t i, size;
+ 	const void *accel, *gyro, *timestamp;
+-	const int8_t *temp;
++	const s8 *temp;
+ 	unsigned int odr;
+ 	int ret;
+ 
+@@ -550,7 +550,7 @@ int inv_icm42600_buffer_hwfifo_flush(struct inv_icm42600_state *st,
+ 	struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ 	struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
+ 	struct inv_sensors_timestamp *ts;
+-	int64_t gyro_ts, accel_ts;
++	s64 gyro_ts, accel_ts;
+ 	int ret;
+ 
+ 	gyro_ts = iio_get_time_ns(st->indio_gyro);
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
+index f6c85daf42b00b..ffca4da1e24936 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
+@@ -28,7 +28,7 @@ struct inv_icm42600_state;
+ struct inv_icm42600_fifo {
+ 	unsigned int on;
+ 	unsigned int en;
+-	uint32_t period;
++	u32 period;
+ 	struct {
+ 		unsigned int gyro;
+ 		unsigned int accel;
+@@ -41,7 +41,7 @@ struct inv_icm42600_fifo {
+ 		size_t accel;
+ 		size_t total;
+ 	} nb;
+-	uint8_t data[2080] __aligned(IIO_DMA_MINALIGN);
++	u8 data[2080] __aligned(IIO_DMA_MINALIGN);
+ };
+ 
+ /* FIFO data packet */
+@@ -52,7 +52,7 @@ struct inv_icm42600_fifo_sensor_data {
+ } __packed;
+ #define INV_ICM42600_FIFO_DATA_INVALID		-32768
+ 
+-static inline int16_t inv_icm42600_fifo_get_sensor_data(__be16 d)
++static inline s16 inv_icm42600_fifo_get_sensor_data(__be16 d)
+ {
+ 	return be16_to_cpu(d);
+ }
+@@ -60,7 +60,7 @@ static inline int16_t inv_icm42600_fifo_get_sensor_data(__be16 d)
+ static inline bool
+ inv_icm42600_fifo_is_data_valid(const struct inv_icm42600_fifo_sensor_data *s)
+ {
+-	int16_t x, y, z;
++	s16 x, y, z;
+ 
+ 	x = inv_icm42600_fifo_get_sensor_data(s->x);
+ 	y = inv_icm42600_fifo_get_sensor_data(s->y);
+@@ -75,7 +75,7 @@ inv_icm42600_fifo_is_data_valid(const struct inv_icm42600_fifo_sensor_data *s)
+ }
+ 
+ ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel,
+-					const void **gyro, const int8_t **temp,
++					const void **gyro, const s8 **temp,
+ 					const void **timestamp, unsigned int *odr);
+ 
+ extern const struct iio_buffer_setup_ops inv_icm42600_buffer_ops;
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+index a0bed49c3ba674..73aeddf53b767d 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -103,7 +103,7 @@ const struct regmap_config inv_icm42600_spi_regmap_config = {
+ EXPORT_SYMBOL_NS_GPL(inv_icm42600_spi_regmap_config, IIO_ICM42600);
+ 
+ struct inv_icm42600_hw {
+-	uint8_t whoami;
++	u8 whoami;
+ 	const char *name;
+ 	const struct inv_icm42600_conf *conf;
+ };
+@@ -188,9 +188,9 @@ inv_icm42600_get_mount_matrix(const struct iio_dev *indio_dev,
+ 	return &st->orientation;
+ }
+ 
+-uint32_t inv_icm42600_odr_to_period(enum inv_icm42600_odr odr)
++u32 inv_icm42600_odr_to_period(enum inv_icm42600_odr odr)
+ {
+-	static uint32_t odr_periods[INV_ICM42600_ODR_NB] = {
++	static u32 odr_periods[INV_ICM42600_ODR_NB] = {
+ 		/* reserved values */
+ 		0, 0, 0,
+ 		/* 8kHz */
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+index c6bb68bf5e1449..6c7430dac6db83 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+@@ -77,8 +77,8 @@ static const struct iio_chan_spec inv_icm42600_gyro_channels[] = {
+  */
+ struct inv_icm42600_gyro_buffer {
+ 	struct inv_icm42600_fifo_sensor_data gyro;
+-	int16_t temp;
+-	int64_t timestamp __aligned(8);
++	s16 temp;
++	aligned_s64 timestamp;
+ };
+ 
+ #define INV_ICM42600_SCAN_MASK_GYRO_3AXIS				\
+@@ -139,7 +139,7 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
+ 
+ static int inv_icm42600_gyro_read_sensor(struct inv_icm42600_state *st,
+ 					 struct iio_chan_spec const *chan,
+-					 int16_t *val)
++					 s16 *val)
+ {
+ 	struct device *dev = regmap_get_device(st->map);
+ 	struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
+@@ -179,7 +179,7 @@ static int inv_icm42600_gyro_read_sensor(struct inv_icm42600_state *st,
+ 	if (ret)
+ 		goto exit;
+ 
+-	*val = (int16_t)be16_to_cpup(data);
++	*val = (s16)be16_to_cpup(data);
+ 	if (*val == INV_ICM42600_DATA_INVALID)
+ 		ret = -EINVAL;
+ exit:
+@@ -399,11 +399,11 @@ static int inv_icm42600_gyro_read_offset(struct inv_icm42600_state *st,
+ 					 int *val, int *val2)
+ {
+ 	struct device *dev = regmap_get_device(st->map);
+-	int64_t val64;
+-	int32_t bias;
++	s64 val64;
++	s32 bias;
+ 	unsigned int reg;
+-	int16_t offset;
+-	uint8_t data[2];
++	s16 offset;
++	u8 data[2];
+ 	int ret;
+ 
+ 	if (chan->type != IIO_ANGL_VEL)
+@@ -457,7 +457,7 @@ static int inv_icm42600_gyro_read_offset(struct inv_icm42600_state *st,
+ 	 * result in nano (1000000000)
+ 	 * (offset * 64 * Pi * 1000000000) / (2048 * 180)
+ 	 */
+-	val64 = (int64_t)offset * 64LL * 3141592653LL;
++	val64 = (s64)offset * 64LL * 3141592653LL;
+ 	/* for rounding, add + or - divisor (2048 * 180) divided by 2 */
+ 	if (val64 >= 0)
+ 		val64 += 2048 * 180 / 2;
+@@ -475,9 +475,9 @@ static int inv_icm42600_gyro_write_offset(struct inv_icm42600_state *st,
+ 					  int val, int val2)
+ {
+ 	struct device *dev = regmap_get_device(st->map);
+-	int64_t val64, min, max;
++	s64 val64, min, max;
+ 	unsigned int reg, regval;
+-	int16_t offset;
++	s16 offset;
+ 	int ret;
+ 
+ 	if (chan->type != IIO_ANGL_VEL)
+@@ -498,11 +498,11 @@ static int inv_icm42600_gyro_write_offset(struct inv_icm42600_state *st,
+ 	}
+ 
+ 	/* inv_icm42600_gyro_calibbias: min - step - max in nano */
+-	min = (int64_t)inv_icm42600_gyro_calibbias[0] * 1000000000LL +
+-	      (int64_t)inv_icm42600_gyro_calibbias[1];
+-	max = (int64_t)inv_icm42600_gyro_calibbias[4] * 1000000000LL +
+-	      (int64_t)inv_icm42600_gyro_calibbias[5];
+-	val64 = (int64_t)val * 1000000000LL + (int64_t)val2;
++	min = (s64)inv_icm42600_gyro_calibbias[0] * 1000000000LL +
++	      (s64)inv_icm42600_gyro_calibbias[1];
++	max = (s64)inv_icm42600_gyro_calibbias[4] * 1000000000LL +
++	      (s64)inv_icm42600_gyro_calibbias[5];
++	val64 = (s64)val * 1000000000LL + (s64)val2;
+ 	if (val64 < min || val64 > max)
+ 		return -EINVAL;
+ 
+@@ -577,7 +577,7 @@ static int inv_icm42600_gyro_read_raw(struct iio_dev *indio_dev,
+ 				      int *val, int *val2, long mask)
+ {
+ 	struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+-	int16_t data;
++	s16 data;
+ 	int ret;
+ 
+ 	switch (chan->type) {
+@@ -806,10 +806,11 @@ int inv_icm42600_gyro_parse_fifo(struct iio_dev *indio_dev)
+ 	ssize_t i, size;
+ 	unsigned int no;
+ 	const void *accel, *gyro, *timestamp;
+-	const int8_t *temp;
++	const s8 *temp;
+ 	unsigned int odr;
+-	int64_t ts_val;
+-	struct inv_icm42600_gyro_buffer buffer;
++	s64 ts_val;
++	/* buffer is copied to userspace, zeroing it to avoid any data leak */
++	struct inv_icm42600_gyro_buffer buffer = { };
+ 
+ 	/* parse all fifo packets */
+ 	for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) {
+@@ -828,8 +829,6 @@ int inv_icm42600_gyro_parse_fifo(struct iio_dev *indio_dev)
+ 			inv_sensors_timestamp_apply_odr(ts, st->fifo.period,
+ 							st->fifo.nb.total, no);
+ 
+-		/* buffer is copied to userspace, zeroing it to avoid any data leak */
+-		memset(&buffer, 0, sizeof(buffer));
+ 		memcpy(&buffer.gyro, gyro, sizeof(buffer.gyro));
+ 		/* convert 8 bits FIFO temperature in high resolution format */
+ 		buffer.temp = temp ? (*temp * 64) : 0;
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
+index 91f0f381082bda..51430b4f5e51b6 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
+@@ -13,7 +13,7 @@
+ #include "inv_icm42600.h"
+ #include "inv_icm42600_temp.h"
+ 
+-static int inv_icm42600_temp_read(struct inv_icm42600_state *st, int16_t *temp)
++static int inv_icm42600_temp_read(struct inv_icm42600_state *st, s16 *temp)
+ {
+ 	struct device *dev = regmap_get_device(st->map);
+ 	__be16 *raw;
+@@ -31,9 +31,13 @@ static int inv_icm42600_temp_read(struct inv_icm42600_state *st, int16_t *temp)
+ 	if (ret)
+ 		goto exit;
+ 
+-	*temp = (int16_t)be16_to_cpup(raw);
++	*temp = (s16)be16_to_cpup(raw);
++	/*
++	 * Temperature data is invalid if both accel and gyro are off.
++	 * Return -EBUSY in this case.
++	 */
+ 	if (*temp == INV_ICM42600_DATA_INVALID)
+-		ret = -EINVAL;
++		ret = -EBUSY;
+ 
+ exit:
+ 	mutex_unlock(&st->lock);
+@@ -48,7 +52,7 @@ int inv_icm42600_temp_read_raw(struct iio_dev *indio_dev,
+ 			       int *val, int *val2, long mask)
+ {
+ 	struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+-	int16_t temp;
++	s16 temp;
+ 	int ret;
+ 
+ 	if (chan->type != IIO_TEMP)
+diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
+index c1b43053fbc73d..cf96e3dd8bc675 100644
+--- a/drivers/iio/light/adjd_s311.c
++++ b/drivers/iio/light/adjd_s311.c
+@@ -56,7 +56,7 @@ struct adjd_s311_data {
+ 	struct i2c_client *client;
+ 	struct {
+ 		s16 chans[4];
+-		s64 ts __aligned(8);
++		aligned_s64 ts;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
+index 11fbdcdd26d656..36f6f2eb53b2cd 100644
+--- a/drivers/iio/light/as73211.c
++++ b/drivers/iio/light/as73211.c
+@@ -642,8 +642,8 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
+ 	struct as73211_data *data = iio_priv(indio_dev);
+ 	struct {
+ 		__le16 chan[4];
+-		s64 ts __aligned(8);
+-	} scan;
++		aligned_s64 ts;
++	} scan = { };
+ 	int data_result, ret;
+ 
+ 	mutex_lock(&data->mutex);
+diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
+index a025e279df0747..617d098d202ad2 100644
+--- a/drivers/iio/light/bh1745.c
++++ b/drivers/iio/light/bh1745.c
+@@ -743,7 +743,7 @@ static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
+ 	struct bh1745_data *data = iio_priv(indio_dev);
+ 	struct {
+ 		u16 chans[4];
+-		s64 timestamp __aligned(8);
++		aligned_s64 timestamp;
+ 	} scan;
+ 	u16 value;
+ 	int ret;
+diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
+index b176bf4c884ba0..326dc39e79291d 100644
+--- a/drivers/iio/light/isl29125.c
++++ b/drivers/iio/light/isl29125.c
+@@ -54,7 +54,7 @@ struct isl29125_data {
+ 	/* Ensure timestamp is naturally aligned */
+ 	struct {
+ 		u16 chans[3];
+-		s64 timestamp __aligned(8);
++		aligned_s64 timestamp;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
+index 640a5d3aa2c6e7..8c0b616815b22a 100644
+--- a/drivers/iio/light/ltr501.c
++++ b/drivers/iio/light/ltr501.c
+@@ -1285,7 +1285,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
+ 	struct ltr501_data *data = iio_priv(indio_dev);
+ 	struct {
+ 		u16 channels[3];
+-		s64 ts __aligned(8);
++		aligned_s64 ts;
+ 	} scan;
+ 	__le16 als_buf[2];
+ 	u8 mask = 0;
+diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
+index b935976871a6f0..e8b76768013320 100644
+--- a/drivers/iio/light/max44000.c
++++ b/drivers/iio/light/max44000.c
+@@ -78,7 +78,7 @@ struct max44000_data {
+ 	/* Ensure naturally aligned timestamp */
+ 	struct {
+ 		u16 channels[2];
+-		s64 ts __aligned(8);
++		aligned_s64 ts;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
+index 76711c3cdf7c02..29da3313addbd4 100644
+--- a/drivers/iio/light/rohm-bu27034.c
++++ b/drivers/iio/light/rohm-bu27034.c
+@@ -205,7 +205,7 @@ struct bu27034_data {
+ 	struct {
+ 		u32 mlux;
+ 		__le16 channels[BU27034_NUM_HW_DATA_CHANS];
+-		s64 ts __aligned(8);
++		aligned_s64 ts;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
+index 78c08e0bd0776a..0a5408c12cc0e1 100644
+--- a/drivers/iio/light/rpr0521.c
++++ b/drivers/iio/light/rpr0521.c
+@@ -203,7 +203,7 @@ struct rpr0521_data {
+ 	struct {
+ 		__le16 channels[3];
+ 		u8 garbage;
+-		s64 ts __aligned(8);
++		aligned_s64 ts;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
+index 283086887caf5d..1f93e3dc45c2b0 100644
+--- a/drivers/iio/light/st_uvis25.h
++++ b/drivers/iio/light/st_uvis25.h
+@@ -30,7 +30,7 @@ struct st_uvis25_hw {
+ 	/* Ensure timestamp is naturally aligned */
+ 	struct {
+ 		u8 chan;
+-		s64 ts __aligned(8);
++		aligned_s64 ts;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
+index 4fecdf10aeb197..884e43e4cda4af 100644
+--- a/drivers/iio/light/tcs3414.c
++++ b/drivers/iio/light/tcs3414.c
+@@ -56,7 +56,7 @@ struct tcs3414_data {
+ 	/* Ensure timestamp is naturally aligned */
+ 	struct {
+ 		u16 chans[4];
+-		s64 timestamp __aligned(8);
++		aligned_s64 timestamp;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
+index 04452b4664f306..afc90b5bb0eca0 100644
+--- a/drivers/iio/light/tcs3472.c
++++ b/drivers/iio/light/tcs3472.c
+@@ -67,7 +67,7 @@ struct tcs3472_data {
+ 	/* Ensure timestamp is naturally aligned */
+ 	struct {
+ 		u16 chans[4];
+-		s64 timestamp __aligned(8);
++		aligned_s64 timestamp;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index a8b97b9b046182..9e46aa65acef77 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -2727,11 +2727,12 @@ int bmp280_common_probe(struct device *dev,
+ 
+ 	/* Bring chip out of reset if there is an assigned GPIO line */
+ 	gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
++	if (IS_ERR(gpiod))
++		return dev_err_probe(dev, PTR_ERR(gpiod), "failed to get reset GPIO\n");
++
+ 	/* Deassert the signal */
+-	if (gpiod) {
+-		dev_info(dev, "release reset\n");
+-		gpiod_set_value(gpiod, 0);
+-	}
++	dev_info(dev, "release reset\n");
++	gpiod_set_value(gpiod, 0);
+ 
+ 	data->regmap = regmap;
+ 
+diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
+index dc66ca9bba6b46..fde9bdd14506b0 100644
+--- a/drivers/iio/proximity/isl29501.c
++++ b/drivers/iio/proximity/isl29501.c
+@@ -938,12 +938,18 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p)
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct isl29501_private *isl29501 = iio_priv(indio_dev);
+ 	const unsigned long *active_mask = indio_dev->active_scan_mask;
+-	u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */
+-
+-	if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
+-		isl29501_register_read(isl29501, REG_DISTANCE, buffer);
++	u32 value;
++	struct {
++		u16 data;
++		aligned_s64 ts;
++	} scan = { };
++
++	if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) {
++		isl29501_register_read(isl29501, REG_DISTANCE, &value);
++		scan.data = value;
++	}
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
++	iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+ 	iio_trigger_notify_done(indio_dev->trig);
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
+index 555a61e2f3fdd1..44fba61ccfe27d 100644
+--- a/drivers/iio/temperature/maxim_thermocouple.c
++++ b/drivers/iio/temperature/maxim_thermocouple.c
+@@ -12,6 +12,7 @@
+ #include <linux/mutex.h>
+ #include <linux/err.h>
+ #include <linux/spi/spi.h>
++#include <linux/types.h>
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+ #include <linux/iio/trigger.h>
+@@ -122,8 +123,15 @@ struct maxim_thermocouple_data {
+ 	struct spi_device *spi;
+ 	const struct maxim_thermocouple_chip *chip;
+ 	char tc_type;
+-
+-	u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
++	/* Buffer for reading up to 2 hardware channels. */
++	struct {
++		union {
++			__be16 raw16;
++			__be32 raw32;
++			__be16 raw[2];
++		};
++		aligned_s64 timestamp;
++	} buffer __aligned(IIO_DMA_MINALIGN);
+ };
+ 
+ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
+@@ -131,18 +139,16 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
+ {
+ 	unsigned int storage_bytes = data->chip->read_size;
+ 	unsigned int shift = chan->scan_type.shift + (chan->address * 8);
+-	__be16 buf16;
+-	__be32 buf32;
+ 	int ret;
+ 
+ 	switch (storage_bytes) {
+ 	case 2:
+-		ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
+-		*val = be16_to_cpu(buf16);
++		ret = spi_read(data->spi, &data->buffer.raw16, storage_bytes);
++		*val = be16_to_cpu(data->buffer.raw16);
+ 		break;
+ 	case 4:
+-		ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
+-		*val = be32_to_cpu(buf32);
++		ret = spi_read(data->spi, &data->buffer.raw32, storage_bytes);
++		*val = be32_to_cpu(data->buffer.raw32);
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+@@ -167,9 +173,9 @@ static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private)
+ 	struct maxim_thermocouple_data *data = iio_priv(indio_dev);
+ 	int ret;
+ 
+-	ret = spi_read(data->spi, data->buffer, data->chip->read_size);
++	ret = spi_read(data->spi, data->buffer.raw, data->chip->read_size);
+ 	if (!ret) {
+-		iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
++		iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+ 						   iio_get_time_ns(indio_dev));
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 4a3ce61a3bba6a..b222bf4f38e1c7 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -1874,7 +1874,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ 					       ib_srq);
+ 	struct bnxt_re_dev *rdev = srq->rdev;
+-	int rc;
+ 
+ 	switch (srq_attr_mask) {
+ 	case IB_SRQ_MAX_WR:
+@@ -1886,11 +1885,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ 			return -EINVAL;
+ 
+ 		srq->qplib_srq.threshold = srq_attr->srq_limit;
+-		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+-		if (rc) {
+-			ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
+-			return rc;
+-		}
++		bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
++
+ 		/* On success, update the shadow */
+ 		srq->srq_limit = srq_attr->srq_limit;
+ 		/* No need to Build and send response back to udata */
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 9bd837a5b8a1ad..b213ecca2854d2 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -1615,6 +1615,28 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
+ 	rdev->nqr = NULL;
+ }
+ 
++/* When DEL_GID fails, driver is not freeing GID ctx memory.
++ * To avoid the memory leak, free the memory during unload
++ */
++static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
++{
++	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
++	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
++	int i;
++
++	if (!sgid_tbl->active)
++		return;
++
++	ctx_tbl = sgid_tbl->ctx;
++	for (i = 0; i < sgid_tbl->max; i++) {
++		if (sgid_tbl->hw_id[i] == 0xFFFF)
++			continue;
++
++		ctx = ctx_tbl[i];
++		kfree(ctx);
++	}
++}
++
+ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+ {
+ 	u8 type;
+@@ -1623,6 +1645,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+ 	if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
+ 		cancel_delayed_work_sync(&rdev->worker);
+ 
++	bnxt_re_free_gid_ctx(rdev);
+ 	if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
+ 			       &rdev->flags))
+ 		bnxt_re_cleanup_res(rdev);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 7436ce55157972..0f50c1ffbe0163 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -704,9 +704,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ 	srq->dbinfo.db = srq->dpi->dbr;
+ 	srq->dbinfo.max_slot = 1;
+ 	srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+-	if (srq->threshold)
+-		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+-	srq->arm_req = false;
++	bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+ 
+ 	return 0;
+ fail:
+@@ -716,24 +714,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ 	return rc;
+ }
+ 
+-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+-			  struct bnxt_qplib_srq *srq)
+-{
+-	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+-	u32 count;
+-
+-	count = __bnxt_qplib_get_avail(srq_hwq);
+-	if (count > srq->threshold) {
+-		srq->arm_req = false;
+-		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+-	} else {
+-		/* Deferred arming */
+-		srq->arm_req = true;
+-	}
+-
+-	return 0;
+-}
+-
+ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ 			 struct bnxt_qplib_srq *srq)
+ {
+@@ -775,7 +755,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ 	struct rq_wqe *srqe;
+ 	struct sq_sge *hw_sge;
+-	u32 count = 0;
+ 	int i, next;
+ 
+ 	spin_lock(&srq_hwq->lock);
+@@ -807,15 +786,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ 
+ 	bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
+ 
+-	spin_lock(&srq_hwq->lock);
+-	count = __bnxt_qplib_get_avail(srq_hwq);
+-	spin_unlock(&srq_hwq->lock);
+ 	/* Ring DB */
+ 	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
+-	if (srq->arm_req == true && count > srq->threshold) {
+-		srq->arm_req = false;
+-		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+-	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 6f02954eb1429f..fd4f9fada46a61 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -521,8 +521,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
+ 			 srqn_handler_t srq_handler);
+ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ 			  struct bnxt_qplib_srq *srq);
+-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+-			  struct bnxt_qplib_srq *srq);
+ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ 			 struct bnxt_qplib_srq *srq);
+ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 02922a0987ad7a..b785d9e7774c76 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ 	pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
+ 	if (!pbl->pg_arr)
+ 		return -ENOMEM;
++	memset(pbl->pg_arr, 0, pages * sizeof(void *));
+ 
+ 	pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
+ 	if (!pbl->pg_map_arr) {
+@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ 		pbl->pg_arr = NULL;
+ 		return -ENOMEM;
+ 	}
++	memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
+ 	pbl->pg_count = 0;
+ 	pbl->pg_size = sginfo->pgsize;
+ 
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index e56ba86d460e0a..a50fb03c96431b 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -991,7 +991,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ 		if (ret)
+ 			goto err_out_cmd;
+ 	} else {
+-		init_kernel_qp(dev, qp, attrs);
++		ret = init_kernel_qp(dev, qp, attrs);
++		if (ret)
++			goto err_out_xa;
+ 	}
+ 
+ 	qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 53fe0ef3883d21..6a6daca9f606cb 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3028,7 +3028,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
+ 	if (!hr_dev->is_vf)
+ 		hns_roce_free_link_table(hr_dev);
+ 
+-	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
++	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ 		free_dip_entry(hr_dev);
+ }
+ 
+@@ -5498,7 +5498,7 @@ static int hns_roce_v2_query_srqc(struct hns_roce_dev *hr_dev, u32 srqn,
+ 	return ret;
+ }
+ 
+-static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
++static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn,
+ 				  void *buffer)
+ {
+ 	struct hns_roce_v2_scc_context *context;
+@@ -5510,7 +5510,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
+ 		return PTR_ERR(mailbox);
+ 
+ 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
+-				qpn);
++				sccn);
+ 	if (ret)
+ 		goto out;
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+index f637b73b946e44..230187dda6a07b 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
++++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+@@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
+ 		struct hns_roce_v2_qp_context qpc;
+ 		struct hns_roce_v2_scc_context sccc;
+ 	} context = {};
++	u32 sccn = hr_qp->qpn;
+ 	int ret;
+ 
+ 	if (!hr_dev->hw->query_qpc)
+@@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
+ 	    !hr_dev->hw->query_sccc)
+ 		goto out;
+ 
+-	ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
++	if (hr_qp->cong_type == CONG_TYPE_DIP) {
++		if (!hr_qp->dip)
++			goto out;
++		sccn = hr_qp->dip->dip_idx;
++	}
++
++	ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc);
+ 	if (ret)
+ 		ibdev_warn_ratelimited(&hr_dev->ib_dev,
+ 				       "failed to query SCCC, ret = %d.\n",
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 8cc64ceeb3569b..726b67e6330144 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -345,33 +345,15 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ 
+ static void rxe_skb_tx_dtor(struct sk_buff *skb)
+ {
+-	struct net_device *ndev = skb->dev;
+-	struct rxe_dev *rxe;
+-	unsigned int qp_index;
+-	struct rxe_qp *qp;
++	struct rxe_qp *qp = skb->sk->sk_user_data;
+ 	int skb_out;
+ 
+-	rxe = rxe_get_dev_from_net(ndev);
+-	if (!rxe && is_vlan_dev(ndev))
+-		rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
+-	if (WARN_ON(!rxe))
+-		return;
+-
+-	qp_index = (int)(uintptr_t)skb->sk->sk_user_data;
+-	if (!qp_index)
+-		return;
+-
+-	qp = rxe_pool_get_index(&rxe->qp_pool, qp_index);
+-	if (!qp)
+-		goto put_dev;
+-
+ 	skb_out = atomic_dec_return(&qp->skb_out);
+-	if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)
++	if (unlikely(qp->need_req_skb &&
++		skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
+ 		rxe_sched_task(&qp->send_task);
+ 
+ 	rxe_put(qp);
+-put_dev:
+-	ib_device_put(&rxe->ib_dev);
+ 	sock_put(skb->sk);
+ }
+ 
+@@ -383,6 +365,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+ 	sock_hold(sk);
+ 	skb->sk = sk;
+ 	skb->destructor = rxe_skb_tx_dtor;
++	rxe_get(pkt->qp);
+ 	atomic_inc(&pkt->qp->skb_out);
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+@@ -405,6 +388,7 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+ 	sock_hold(sk);
+ 	skb->sk = sk;
+ 	skb->destructor = rxe_skb_tx_dtor;
++	rxe_get(pkt->qp);
+ 	atomic_inc(&pkt->qp->skb_out);
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+@@ -497,6 +481,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ 		goto out;
+ 	}
+ 
++	/* Add time stamp to skb. */
++	skb->tstamp = ktime_get();
++
+ 	skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
+ 
+ 	/* FIXME: hold reference to this netdev until life of this skb. */
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 8b805b16136e5f..88fa62cd9ce51f 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
+ 	if (err < 0)
+ 		return err;
+-	qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
++	qp->sk->sk->sk_user_data = qp;
+ 
+ 	/* pick a source UDP port number for this QP based on
+ 	 * the source QPN. this spreads traffic for different QPs
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index ff11cd7e5c068c..f5b544e0f230bc 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3598,7 +3598,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ {
+ 	u32 seg = 0, bus, dev, fn;
+ 	char *hid, *uid, *p, *addr;
+-	char acpiid[ACPIID_LEN] = {0};
++	char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
+ 	int i;
+ 
+ 	addr = strchr(str, '@');
+@@ -3624,7 +3624,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ 	/* We have the '@', make it the terminator to get just the acpiid */
+ 	*addr++ = 0;
+ 
+-	if (strlen(str) > ACPIID_LEN + 1)
++	if (strlen(str) > ACPIID_LEN)
+ 		goto not_found;
+ 
+ 	if (sscanf(str, "=%s", acpiid) != 1)
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 780e2d9e4ea819..172ce203019712 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -2778,9 +2778,9 @@ static void arm_smmu_attach_commit(struct arm_smmu_attach_state *state)
+ 		/* ATS is being switched off, invalidate the entire ATC */
+ 		arm_smmu_atc_inv_master(master, IOMMU_NO_PASID);
+ 	}
+-	master->ats_enabled = state->ats_enabled;
+ 
+ 	arm_smmu_remove_master_domain(master, state->old_domain, state->ssid);
++	master->ats_enabled = state->ats_enabled;
+ }
+ 
+ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 78c975d7cd5f42..b0ca9c9effe9aa 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -253,17 +253,35 @@ MODULE_PARM_DESC(max_read_size, "Maximum size of a read request");
+ static unsigned int max_write_size = 0;
+ module_param(max_write_size, uint, 0644);
+ MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
+-static unsigned get_max_request_size(struct crypt_config *cc, bool wrt)
++
++static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio)
+ {
++	struct crypt_config *cc = ti->private;
+ 	unsigned val, sector_align;
+-	val = !wrt ? READ_ONCE(max_read_size) : READ_ONCE(max_write_size);
+-	if (likely(!val))
+-		val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE;
+-	if (wrt || cc->used_tag_size) {
+-		if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT))
+-			val = BIO_MAX_VECS << PAGE_SHIFT;
+-	}
+-	sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned)cc->sector_size);
++	bool wrt = op_is_write(bio_op(bio));
++
++	if (wrt) {
++		/*
++		 * For zoned devices, splitting write operations creates the
++		 * risk of deadlocking queue freeze operations with zone write
++		 * plugging BIO work when the reminder of a split BIO is
++		 * issued. So always allow the entire BIO to proceed.
++		 */
++		if (ti->emulate_zone_append)
++			return bio_sectors(bio);
++
++		val = min_not_zero(READ_ONCE(max_write_size),
++				   DM_CRYPT_DEFAULT_MAX_WRITE_SIZE);
++	} else {
++		val = min_not_zero(READ_ONCE(max_read_size),
++				   DM_CRYPT_DEFAULT_MAX_READ_SIZE);
++	}
++
++	if (wrt || cc->used_tag_size)
++		val = min(val, BIO_MAX_VECS << PAGE_SHIFT);
++
++	sector_align = max(bdev_logical_block_size(cc->dev->bdev),
++			   (unsigned)cc->sector_size);
+ 	val = round_down(val, sector_align);
+ 	if (unlikely(!val))
+ 		val = sector_align;
+@@ -3517,7 +3535,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
+ 	/*
+ 	 * Check if bio is too large, split as needed.
+ 	 */
+-	max_sectors = get_max_request_size(cc, bio_data_dir(bio) == WRITE);
++	max_sectors = get_max_request_sectors(ti, bio);
+ 	if (unlikely(bio_sectors(bio) > max_sectors))
+ 		dm_accept_partial_bio(bio, max_sectors);
+ 
+@@ -3754,6 +3772,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
+ 		max_t(unsigned int, limits->physical_block_size, cc->sector_size);
+ 	limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
+ 	limits->dma_alignment = limits->logical_block_size - 1;
++
++	/*
++	 * For zoned dm-crypt targets, there will be no internal splitting of
++	 * write BIOs to avoid exceeding BIO_MAX_VECS vectors per BIO. But
++	 * without respecting this limit, crypt_alloc_buffer() will trigger a
++	 * BUG(). Avoid this by forcing DM core to split write BIOs to this
++	 * limit.
++	 */
++	if (ti->emulate_zone_append)
++		limits->max_hw_sectors = min(limits->max_hw_sectors,
++					     BIO_MAX_VECS << PAGE_SECTORS_SHIFT);
+ }
+ 
+ static struct target_type crypt_target = {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index c5dcd632404cce..a7deeda59a55a7 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1307,8 +1307,9 @@ static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ /*
+  * A target may call dm_accept_partial_bio only from the map routine.  It is
+  * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
+- * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
+- * __send_duplicate_bios().
++ * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated
++ * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced
++ * by __send_duplicate_bios().
+  *
+  * dm_accept_partial_bio informs the dm that the target only wants to process
+  * additional n_sectors sectors of the bio and the rest of the data should be
+@@ -1341,11 +1342,19 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
+ 	unsigned int bio_sectors = bio_sectors(bio);
+ 
+ 	BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
+-	BUG_ON(op_is_zone_mgmt(bio_op(bio)));
+-	BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
+ 	BUG_ON(bio_sectors > *tio->len_ptr);
+ 	BUG_ON(n_sectors > bio_sectors);
+ 
++	if (static_branch_unlikely(&zoned_enabled) &&
++	    unlikely(bdev_is_zoned(bio->bi_bdev))) {
++		enum req_op op = bio_op(bio);
++
++		BUG_ON(op_is_zone_mgmt(op));
++		BUG_ON(op == REQ_OP_WRITE);
++		BUG_ON(op == REQ_OP_WRITE_ZEROES);
++		BUG_ON(op == REQ_OP_ZONE_APPEND);
++	}
++
+ 	*tio->len_ptr -= bio_sectors - n_sectors;
+ 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
+ 
+diff --git a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
+index ee870ea1a88601..6f8d6797c61459 100644
+--- a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
++++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
+@@ -171,11 +171,12 @@ static irqreturn_t rain_interrupt(struct serio *serio, unsigned char data,
+ {
+ 	struct rain *rain = serio_get_drvdata(serio);
+ 
++	spin_lock(&rain->buf_lock);
+ 	if (rain->buf_len == DATA_SIZE) {
++		spin_unlock(&rain->buf_lock);
+ 		dev_warn_once(rain->dev, "buffer overflow\n");
+ 		return IRQ_HANDLED;
+ 	}
+-	spin_lock(&rain->buf_lock);
+ 	rain->buf_len++;
+ 	rain->buf[rain->buf_wr_idx] = data;
+ 	rain->buf_wr_idx = (rain->buf_wr_idx + 1) & 0xff;
+diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
+index 3c84cf07275f4c..b915ad6e9f4fd3 100644
+--- a/drivers/media/i2c/hi556.c
++++ b/drivers/media/i2c/hi556.c
+@@ -756,21 +756,23 @@ static int hi556_test_pattern(struct hi556 *hi556, u32 pattern)
+ 	int ret;
+ 	u32 val;
+ 
+-	if (pattern) {
+-		ret = hi556_read_reg(hi556, HI556_REG_ISP,
+-				     HI556_REG_VALUE_08BIT, &val);
+-		if (ret)
+-			return ret;
++	ret = hi556_read_reg(hi556, HI556_REG_ISP,
++			     HI556_REG_VALUE_08BIT, &val);
++	if (ret)
++		return ret;
+ 
+-		ret = hi556_write_reg(hi556, HI556_REG_ISP,
+-				      HI556_REG_VALUE_08BIT,
+-				      val | HI556_REG_ISP_TPG_EN);
+-		if (ret)
+-			return ret;
+-	}
++	val = pattern ? (val | HI556_REG_ISP_TPG_EN) :
++		(val & ~HI556_REG_ISP_TPG_EN);
++
++	ret = hi556_write_reg(hi556, HI556_REG_ISP,
++			      HI556_REG_VALUE_08BIT, val);
++	if (ret)
++		return ret;
++
++	val = pattern ? BIT(pattern - 1) : 0;
+ 
+ 	return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN,
+-			       HI556_REG_VALUE_08BIT, pattern);
++			       HI556_REG_VALUE_08BIT, val);
+ }
+ 
+ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
+diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
+index 5f0b0ad8f885f1..c00f9412d08eba 100644
+--- a/drivers/media/i2c/mt9m114.c
++++ b/drivers/media/i2c/mt9m114.c
+@@ -1599,13 +1599,9 @@ static int mt9m114_ifp_get_frame_interval(struct v4l2_subdev *sd,
+ 	if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ 		return -EINVAL;
+ 
+-	mutex_lock(sensor->ifp.hdl.lock);
+-
+ 	ival->numerator = 1;
+ 	ival->denominator = sensor->ifp.frame_rate;
+ 
+-	mutex_unlock(sensor->ifp.hdl.lock);
+-
+ 	return 0;
+ }
+ 
+@@ -1624,8 +1620,6 @@ static int mt9m114_ifp_set_frame_interval(struct v4l2_subdev *sd,
+ 	if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ 		return -EINVAL;
+ 
+-	mutex_lock(sensor->ifp.hdl.lock);
+-
+ 	if (ival->numerator != 0 && ival->denominator != 0)
+ 		sensor->ifp.frame_rate = min_t(unsigned int,
+ 					       ival->denominator / ival->numerator,
+@@ -1639,8 +1633,6 @@ static int mt9m114_ifp_set_frame_interval(struct v4l2_subdev *sd,
+ 	if (sensor->streaming)
+ 		ret = mt9m114_set_frame_rate(sensor);
+ 
+-	mutex_unlock(sensor->ifp.hdl.lock);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
+index 06b7896c3eaf14..586b31ba076b60 100644
+--- a/drivers/media/i2c/ov2659.c
++++ b/drivers/media/i2c/ov2659.c
+@@ -1469,14 +1469,15 @@ static int ov2659_probe(struct i2c_client *client)
+ 				     V4L2_CID_TEST_PATTERN,
+ 				     ARRAY_SIZE(ov2659_test_pattern_menu) - 1,
+ 				     0, 0, ov2659_test_pattern_menu);
+-	ov2659->sd.ctrl_handler = &ov2659->ctrls;
+ 
+ 	if (ov2659->ctrls.error) {
+ 		dev_err(&client->dev, "%s: control initialization error %d\n",
+ 			__func__, ov2659->ctrls.error);
++		v4l2_ctrl_handler_free(&ov2659->ctrls);
+ 		return  ov2659->ctrls.error;
+ 	}
+ 
++	ov2659->sd.ctrl_handler = &ov2659->ctrls;
+ 	sd = &ov2659->sd;
+ 	client->flags |= I2C_CLIENT_SCCB;
+ 
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
+index 051898ce53f439..08148bfc2b4bf5 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
+@@ -360,9 +360,9 @@ static int ipu6_isys_csi2_enable_streams(struct v4l2_subdev *sd,
+ 	remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]);
+ 	remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+ 
+-	sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC,
+-						       CSI2_PAD_SINK,
+-						       &streams_mask);
++	sink_streams =
++		v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK,
++						&streams_mask);
+ 
+ 	ret = ipu6_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV);
+ 	if (ret)
+@@ -390,9 +390,9 @@ static int ipu6_isys_csi2_disable_streams(struct v4l2_subdev *sd,
+ 	struct media_pad *remote_pad;
+ 	u64 sink_streams;
+ 
+-	sink_streams = v4l2_subdev_state_xlate_streams(state, CSI2_PAD_SRC,
+-						       CSI2_PAD_SINK,
+-						       &streams_mask);
++	sink_streams =
++		v4l2_subdev_state_xlate_streams(state, pad, CSI2_PAD_SINK,
++						&streams_mask);
+ 
+ 	remote_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]);
+ 	remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+diff --git a/drivers/media/pci/intel/ivsc/mei_ace.c b/drivers/media/pci/intel/ivsc/mei_ace.c
+index 3622271c71c883..50d18b627e152e 100644
+--- a/drivers/media/pci/intel/ivsc/mei_ace.c
++++ b/drivers/media/pci/intel/ivsc/mei_ace.c
+@@ -529,6 +529,8 @@ static void mei_ace_remove(struct mei_cl_device *cldev)
+ 
+ 	ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
+ 
++	mei_cldev_disable(cldev);
++
+ 	mutex_destroy(&ace->lock);
+ }
+ 
+diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
+index 2a9c12c975cac1..bd3683b5edf656 100644
+--- a/drivers/media/pci/intel/ivsc/mei_csi.c
++++ b/drivers/media/pci/intel/ivsc/mei_csi.c
+@@ -786,6 +786,8 @@ static void mei_csi_remove(struct mei_cl_device *cldev)
+ 
+ 	pm_runtime_disable(&cldev->dev);
+ 
++	mei_cldev_disable(cldev);
++
+ 	mutex_destroy(&csi->lock);
+ }
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 8c3bce738f2a8f..d00475d1bc571a 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -2275,7 +2275,7 @@ static int camss_probe(struct platform_device *pdev)
+ 	ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
+-		goto err_genpd_cleanup;
++		goto err_media_device_cleanup;
+ 	}
+ 
+ 	v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
+@@ -2330,6 +2330,8 @@ static int camss_probe(struct platform_device *pdev)
+ 	v4l2_device_unregister(&camss->v4l2_dev);
+ 	v4l2_async_nf_cleanup(&camss->notifier);
+ 	pm_runtime_disable(dev);
++err_media_device_cleanup:
++	media_device_cleanup(&camss->media_dev);
+ err_genpd_cleanup:
+ 	camss_genpd_cleanup(camss);
+ 
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index 4d10e94eefe9e8..e26bb48f335d5e 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -340,13 +340,13 @@ static int venus_probe(struct platform_device *pdev)
+ 	INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
+ 	init_waitqueue_head(&core->sys_err_done);
+ 
+-	ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread,
+-					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+-					"venus", core);
++	ret = hfi_create(core, &venus_core_ops);
+ 	if (ret)
+ 		goto err_core_put;
+ 
+-	ret = hfi_create(core, &venus_core_ops);
++	ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread,
++					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++					"venus", core);
+ 	if (ret)
+ 		goto err_core_put;
+ 
+@@ -593,11 +593,11 @@ static const struct venus_resources msm8996_res = {
+ };
+ 
+ static const struct freq_tbl msm8998_freq_table[] = {
+-	{ 1944000, 465000000 },	/* 4k UHD @ 60 (decode only) */
+-	{  972000, 465000000 },	/* 4k UHD @ 30 */
+-	{  489600, 360000000 },	/* 1080p @ 60 */
+-	{  244800, 186000000 },	/* 1080p @ 30 */
+-	{  108000, 100000000 },	/* 720p @ 30 */
++	{ 1728000, 533000000 },	/* 4k UHD @ 60 (decode only) */
++	{ 1036800, 444000000 },	/* 2k @ 120 */
++	{  829440, 355200000 },	/* 4k @ 44 */
++	{  489600, 269330000 },/* 4k @ 30 */
++	{  108000, 200000000 },	/* 1080p @ 60 */
+ };
+ 
+ static const struct reg_val msm8998_reg_preset[] = {
+diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
+index 55202b89e1b9fc..4a6ff5704c8d39 100644
+--- a/drivers/media/platform/qcom/venus/core.h
++++ b/drivers/media/platform/qcom/venus/core.h
+@@ -27,6 +27,8 @@
+ #define VIDC_VCODEC_CLKS_NUM_MAX	2
+ #define VIDC_RESETS_NUM_MAX		2
+ 
++#define VENUS_MAX_FPS			240
++
+ extern int venus_fw_debug;
+ 
+ struct freq_tbl {
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index ab93757fff4b31..8e211527960118 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -239,6 +239,7 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
+ static int venus_read_queue(struct venus_hfi_device *hdev,
+ 			    struct iface_queue *queue, void *pkt, u32 *tx_req)
+ {
++	struct hfi_pkt_hdr *pkt_hdr = NULL;
+ 	struct hfi_queue_header *qhdr;
+ 	u32 dwords, new_rd_idx;
+ 	u32 rd_idx, wr_idx, type, qsize;
+@@ -304,6 +305,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
+ 			memcpy(pkt, rd_ptr, len);
+ 			memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
+ 		}
++		pkt_hdr = (struct hfi_pkt_hdr *)(pkt);
++		if ((pkt_hdr->size >> 2) != dwords)
++			return -EINVAL;
+ 	} else {
+ 		/* bad packet received, dropping */
+ 		new_rd_idx = qhdr->write_idx;
+@@ -1689,6 +1693,7 @@ void venus_hfi_destroy(struct venus_core *core)
+ 	venus_interface_queues_release(hdev);
+ 	mutex_destroy(&hdev->lock);
+ 	kfree(hdev);
++	disable_irq(core->irq);
+ 	core->ops = NULL;
+ }
+ 
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
+index d12089370d91e7..6846973a11594d 100644
+--- a/drivers/media/platform/qcom/venus/vdec.c
++++ b/drivers/media/platform/qcom/venus/vdec.c
+@@ -481,11 +481,10 @@ static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+ 	us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
+ 	do_div(us_per_frame, timeperframe->denominator);
+ 
+-	if (!us_per_frame)
+-		return -EINVAL;
+-
++	us_per_frame = clamp(us_per_frame, 1, USEC_PER_SEC);
+ 	fps = (u64)USEC_PER_SEC;
+ 	do_div(fps, us_per_frame);
++	fps = min(VENUS_MAX_FPS, fps);
+ 
+ 	inst->fps = fps;
+ 	inst->timeperframe = *timeperframe;
+diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
+index 3ec2fb8d9fab60..cf5af5ea11e53f 100644
+--- a/drivers/media/platform/qcom/venus/venc.c
++++ b/drivers/media/platform/qcom/venus/venc.c
+@@ -411,11 +411,10 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+ 	us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
+ 	do_div(us_per_frame, timeperframe->denominator);
+ 
+-	if (!us_per_frame)
+-		return -EINVAL;
+-
++	us_per_frame = clamp(us_per_frame, 1, USEC_PER_SEC);
+ 	fps = (u64)USEC_PER_SEC;
+ 	do_div(fps, us_per_frame);
++	fps = min(VENUS_MAX_FPS, fps);
+ 
+ 	inst->timeperframe = *timeperframe;
+ 	inst->fps = fps;
+diff --git a/drivers/media/platform/raspberrypi/pisp_be/Kconfig b/drivers/media/platform/raspberrypi/pisp_be/Kconfig
+index 46765a2e4c4d15..a9e51fd94aadc6 100644
+--- a/drivers/media/platform/raspberrypi/pisp_be/Kconfig
++++ b/drivers/media/platform/raspberrypi/pisp_be/Kconfig
+@@ -3,6 +3,7 @@ config VIDEO_RASPBERRYPI_PISP_BE
+ 	depends on V4L_PLATFORM_DRIVERS
+ 	depends on VIDEO_DEV
+ 	depends on ARCH_BCM2835 || COMPILE_TEST
++	depends on PM
+ 	select VIDEO_V4L2_SUBDEV_API
+ 	select MEDIA_CONTROLLER
+ 	select VIDEOBUF2_DMA_CONTIG
+diff --git a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
+index 65ff2382cffe9e..49594e539c4fe0 100644
+--- a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
++++ b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
+@@ -1726,7 +1726,7 @@ static int pispbe_probe(struct platform_device *pdev)
+ 	pm_runtime_use_autosuspend(pispbe->dev);
+ 	pm_runtime_enable(pispbe->dev);
+ 
+-	ret = pispbe_runtime_resume(pispbe->dev);
++	ret = pm_runtime_resume_and_get(pispbe->dev);
+ 	if (ret)
+ 		goto pm_runtime_disable_err;
+ 
+@@ -1748,7 +1748,7 @@ static int pispbe_probe(struct platform_device *pdev)
+ disable_devs_err:
+ 	pispbe_destroy_devices(pispbe);
+ pm_runtime_suspend_err:
+-	pispbe_runtime_suspend(pispbe->dev);
++	pm_runtime_put(pispbe->dev);
+ pm_runtime_disable_err:
+ 	pm_runtime_dont_use_autosuspend(pispbe->dev);
+ 	pm_runtime_disable(pispbe->dev);
+@@ -1762,7 +1762,6 @@ static void pispbe_remove(struct platform_device *pdev)
+ 
+ 	pispbe_destroy_devices(pispbe);
+ 
+-	pispbe_runtime_suspend(pispbe->dev);
+ 	pm_runtime_dont_use_autosuspend(pispbe->dev);
+ 	pm_runtime_disable(pispbe->dev);
+ }
+diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+index 964122e7c35593..842040f713c15e 100644
+--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
++++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+@@ -17,7 +17,6 @@
+ 
+ #define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000)
+ #define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
+-#define RK3588_ACLK_MAX_FREQ (300 * 1000 * 1000)
+ 
+ #define ROCKCHIP_VPU981_MIN_SIZE 64
+ 
+@@ -440,13 +439,6 @@ static int rk3066_vpu_hw_init(struct hantro_dev *vpu)
+ 	return 0;
+ }
+ 
+-static int rk3588_vpu981_hw_init(struct hantro_dev *vpu)
+-{
+-	/* Bump ACLKs to max. possible freq. to improve performance. */
+-	clk_set_rate(vpu->clocks[0].clk, RK3588_ACLK_MAX_FREQ);
+-	return 0;
+-}
+-
+ static int rockchip_vpu_hw_init(struct hantro_dev *vpu)
+ {
+ 	/* Bump ACLK to max. possible freq. to improve performance. */
+@@ -807,7 +799,6 @@ const struct hantro_variant rk3588_vpu981_variant = {
+ 	.codec_ops = rk3588_vpu981_codec_ops,
+ 	.irqs = rk3588_vpu981_irqs,
+ 	.num_irqs = ARRAY_SIZE(rk3588_vpu981_irqs),
+-	.init = rk3588_vpu981_hw_init,
+ 	.clk_names = rk3588_vpu981_vpu_clk_names,
+ 	.num_clocks = ARRAY_SIZE(rk3588_vpu981_vpu_clk_names)
+ };
+diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
+index 2b5c8fbcd0a278..3fb4e08ac725ba 100644
+--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
++++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
+@@ -243,7 +243,8 @@ static const struct v4l2_ctrl_config vivid_ctrl_u8_pixel_array = {
+ 	.min = 0x00,
+ 	.max = 0xff,
+ 	.step = 1,
+-	.dims = { 640 / PIXEL_ARRAY_DIV, 360 / PIXEL_ARRAY_DIV },
++	.dims = { DIV_ROUND_UP(360, PIXEL_ARRAY_DIV),
++		  DIV_ROUND_UP(640, PIXEL_ARRAY_DIV) },
+ };
+ 
+ static const struct v4l2_ctrl_config vivid_ctrl_s32_array = {
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index 0d5919e0007562..cc84d2671d8407 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -453,8 +453,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
+ 	if (keep_controls)
+ 		return;
+ 
+-	dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV);
+-	dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV);
++	dims[0] = DIV_ROUND_UP(dev->src_rect.height, PIXEL_ARRAY_DIV);
++	dims[1] = DIV_ROUND_UP(dev->src_rect.width, PIXEL_ARRAY_DIV);
+ 	v4l2_ctrl_modify_dimensions(dev->pixel_array, dims);
+ }
+ 
+diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
+index d98343fd33fe34..91e177aa8136fd 100644
+--- a/drivers/media/usb/gspca/vicam.c
++++ b/drivers/media/usb/gspca/vicam.c
+@@ -227,6 +227,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
+ 	const struct ihex_binrec *rec;
+ 	const struct firmware *fw;
+ 	u8 *firmware_buf;
++	int len;
+ 
+ 	ret = request_ihex_firmware(&fw, VICAM_FIRMWARE,
+ 				    &gspca_dev->dev->dev);
+@@ -241,9 +242,14 @@ static int sd_init(struct gspca_dev *gspca_dev)
+ 		goto exit;
+ 	}
+ 	for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
+-		memcpy(firmware_buf, rec->data, be16_to_cpu(rec->len));
++		len = be16_to_cpu(rec->len);
++		if (len > PAGE_SIZE) {
++			ret = -EINVAL;
++			break;
++		}
++		memcpy(firmware_buf, rec->data, len);
+ 		ret = vicam_control_msg(gspca_dev, 0xff, 0, 0, firmware_buf,
+-					be16_to_cpu(rec->len));
++					len);
+ 		if (ret < 0)
+ 			break;
+ 	}
+diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
+index 702f1c8bd2ab3d..9dc882c1a7809d 100644
+--- a/drivers/media/usb/usbtv/usbtv-video.c
++++ b/drivers/media/usb/usbtv/usbtv-video.c
+@@ -73,6 +73,10 @@ static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
+ 	}
+ 
+ 	if (params) {
++		if (vb2_is_busy(&usbtv->vb2q) &&
++		    (usbtv->width != params->cap_width ||
++		     usbtv->height != params->cap_height))
++			return -EBUSY;
+ 		usbtv->width = params->cap_width;
+ 		usbtv->height = params->cap_height;
+ 		usbtv->n_chunks = usbtv->width * usbtv->height
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+index 675642af8601f8..4cc32685124c3a 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+@@ -1582,7 +1582,6 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+ 	kvfree(hdl->buckets);
+ 	hdl->buckets = NULL;
+ 	hdl->cached = NULL;
+-	hdl->error = 0;
+ 	mutex_unlock(hdl->lock);
+ 	mutex_destroy(&hdl->_lock);
+ }
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
+index e6801ad14318b9..2fcc40aa96340b 100644
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -547,7 +547,6 @@ EXPORT_SYMBOL(memstick_add_host);
+  */
+ void memstick_remove_host(struct memstick_host *host)
+ {
+-	host->removing = 1;
+ 	flush_workqueue(workqueue);
+ 	mutex_lock(&host->lock);
+ 	if (host->card)
+diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
+index d99f8922d4ad04..3f983d599d2379 100644
+--- a/drivers/memstick/host/rtsx_usb_ms.c
++++ b/drivers/memstick/host/rtsx_usb_ms.c
+@@ -812,6 +812,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
+ 	int err;
+ 
+ 	host->eject = true;
++	msh->removing = true;
+ 	cancel_work_sync(&host->handle_req);
+ 	cancel_delayed_work_sync(&host->poll_card);
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 68ce4920e01e35..8477b9dd80b746 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -215,6 +215,20 @@
+ #define GLI_MAX_TUNING_LOOP 40
+ 
+ /* Genesys Logic chipset */
++static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev)
++{
++	int aer;
++	u32 value;
++
++	/* mask the replay timer timeout of AER */
++	aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
++	if (aer) {
++		pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
++		value |= PCI_ERR_COR_REP_TIMER;
++		pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
++	}
++}
++
+ static inline void gl9750_wt_on(struct sdhci_host *host)
+ {
+ 	u32 wt_value;
+@@ -535,7 +549,6 @@ static void gl9750_hw_setting(struct sdhci_host *host)
+ {
+ 	struct sdhci_pci_slot *slot = sdhci_priv(host);
+ 	struct pci_dev *pdev;
+-	int aer;
+ 	u32 value;
+ 
+ 	pdev = slot->chip->pdev;
+@@ -554,12 +567,7 @@ static void gl9750_hw_setting(struct sdhci_host *host)
+ 	pci_set_power_state(pdev, PCI_D0);
+ 
+ 	/* mask the replay timer timeout of AER */
+-	aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+-	if (aer) {
+-		pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
+-		value |= PCI_ERR_COR_REP_TIMER;
+-		pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
+-	}
++	sdhci_gli_mask_replay_timer_timeout(pdev);
+ 
+ 	gl9750_wt_off(host);
+ }
+@@ -734,7 +742,6 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
+ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
+ {
+ 	struct pci_dev *pdev = slot->chip->pdev;
+-	int aer;
+ 	u32 value;
+ 
+ 	gl9755_wt_on(pdev);
+@@ -769,12 +776,7 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
+ 	pci_set_power_state(pdev, PCI_D0);
+ 
+ 	/* mask the replay timer timeout of AER */
+-	aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+-	if (aer) {
+-		pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
+-		value |= PCI_ERR_COR_REP_TIMER;
+-		pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
+-	}
++	sdhci_gli_mask_replay_timer_timeout(pdev);
+ 
+ 	gl9755_wt_off(pdev);
+ }
+@@ -1333,7 +1335,7 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
+ 	return ret;
+ }
+ 
+-static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
++static void gl9763e_hw_setting(struct sdhci_pci_slot *slot)
+ {
+ 	struct pci_dev *pdev = slot->chip->pdev;
+ 	u32 value;
+@@ -1362,6 +1364,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+ 	value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5);
+ 	pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
+ 
++	/* mask the replay timer timeout of AER */
++	sdhci_gli_mask_replay_timer_timeout(pdev);
++
+ 	pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ 	value &= ~GLI_9763E_VHS_REV;
+ 	value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+@@ -1505,7 +1510,7 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
+ 	gli_pcie_enable_msi(slot);
+ 	host->mmc_host_ops.hs400_enhanced_strobe =
+ 					gl9763e_hs400_enhanced_strobe;
+-	gli_set_gl9763e(slot);
++	gl9763e_hw_setting(slot);
+ 	sdhci_enable_v4_mode(host);
+ 
+ 	return 0;
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 8e0eb0acf4428a..47344e29a4c9cf 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -155,6 +155,7 @@ struct sdhci_am654_data {
+ 	u32 tuning_loop;
+ 
+ #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
++#define SDHCI_AM654_QUIRK_DISABLE_HS400 BIT(1)
+ };
+ 
+ struct window {
+@@ -734,6 +735,7 @@ static int sdhci_am654_init(struct sdhci_host *host)
+ {
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
++	struct device *dev = mmc_dev(host->mmc);
+ 	u32 ctl_cfg_2 = 0;
+ 	u32 mask;
+ 	u32 val;
+@@ -789,6 +791,12 @@ static int sdhci_am654_init(struct sdhci_host *host)
+ 	if (ret)
+ 		goto err_cleanup_host;
+ 
++	if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_DISABLE_HS400 &&
++	    host->mmc->caps2 & (MMC_CAP2_HS400 | MMC_CAP2_HS400_ES)) {
++		dev_info(dev, "HS400 mode not supported on this silicon revision, disabling it\n");
++		host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
++	}
++
+ 	ret = __sdhci_add_host(host);
+ 	if (ret)
+ 		goto err_cleanup_host;
+@@ -852,6 +860,12 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
+ 	return 0;
+ }
+ 
++static const struct soc_device_attribute sdhci_am654_descope_hs400[] = {
++	{ .family = "AM62PX", .revision = "SR1.0" },
++	{ .family = "AM62PX", .revision = "SR1.1" },
++	{ /* sentinel */ }
++};
++
+ static const struct of_device_id sdhci_am654_of_match[] = {
+ 	{
+ 		.compatible = "ti,am654-sdhci-5.1",
+@@ -943,6 +957,10 @@ static int sdhci_am654_probe(struct platform_device *pdev)
+ 		goto err_pltfm_free;
+ 	}
+ 
++	soc = soc_device_match(sdhci_am654_descope_hs400);
++	if (soc)
++		sdhci_am654->quirks |= SDHCI_AM654_QUIRK_DISABLE_HS400;
++
+ 	host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+ 
+ 	pm_runtime_get_noresume(dev);
+diff --git a/drivers/most/core.c b/drivers/most/core.c
+index a635d5082ebb64..da319d108ea1df 100644
+--- a/drivers/most/core.c
++++ b/drivers/most/core.c
+@@ -538,8 +538,8 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
+ 	dev = bus_find_device_by_name(&mostbus, NULL, mdev);
+ 	if (!dev)
+ 		return NULL;
+-	put_device(dev);
+ 	iface = dev_get_drvdata(dev);
++	put_device(dev);
+ 	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
+ 		if (!strcmp(dev_name(&c->dev), mdev_ch))
+ 			return c;
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index 811982da355740..fe5912d31beea4 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -503,6 +503,8 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+ 
+ 	dma_dev = chan->device;
+ 	dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
++	if (dma_mapping_error(dma_dev->dev, dma_addr))
++		return -EINVAL;
+ 
+ 	if (direction == DMA_TO_DEVICE) {
+ 		dma_src = dma_addr;
+diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c
+index 0e92d50c5249b0..ed45d0add3e964 100644
+--- a/drivers/mtd/nand/raw/renesas-nand-controller.c
++++ b/drivers/mtd/nand/raw/renesas-nand-controller.c
+@@ -426,6 +426,9 @@ static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ 	/* Configure DMA */
+ 	dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
+ 				  DMA_FROM_DEVICE);
++	if (dma_mapping_error(rnandc->dev, dma_addr))
++		return -ENOMEM;
++
+ 	writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
+ 	writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
+ 	writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
+@@ -606,6 +609,9 @@ static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ 	/* Configure DMA */
+ 	dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
+ 				  DMA_TO_DEVICE);
++	if (dma_mapping_error(rnandc->dev, dma_addr))
++		return -ENOMEM;
++
+ 	writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
+ 	writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
+ 	writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
+diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
+index 241f6a4df16c1f..c523a1a22c2b0f 100644
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -659,7 +659,10 @@ static int spinand_write_page(struct spinand_device *spinand,
+ 			   SPINAND_WRITE_INITIAL_DELAY_US,
+ 			   SPINAND_WRITE_POLL_DELAY_US,
+ 			   &status);
+-	if (!ret && (status & STATUS_PROG_FAILED))
++	if (ret)
++		return ret;
++
++	if (status & STATUS_PROG_FAILED)
+ 		return -EIO;
+ 
+ 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
+diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
+index e48c3cff247a87..fdc411f2a23c57 100644
+--- a/drivers/mtd/spi-nor/swp.c
++++ b/drivers/mtd/spi-nor/swp.c
+@@ -55,7 +55,6 @@ static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
+ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ 					u64 *len)
+ {
+-	struct mtd_info *mtd = &nor->mtd;
+ 	u64 min_prot_len;
+ 	u8 mask = spi_nor_get_sr_bp_mask(nor);
+ 	u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+@@ -76,13 +75,13 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ 	min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ 	*len = min_prot_len << (bp - 1);
+ 
+-	if (*len > mtd->size)
+-		*len = mtd->size;
++	if (*len > nor->params->size)
++		*len = nor->params->size;
+ 
+ 	if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
+ 		*ofs = 0;
+ 	else
+-		*ofs = mtd->size - *len;
++		*ofs = nor->params->size - *len;
+ }
+ 
+ /*
+@@ -157,7 +156,6 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, u64 len,
+  */
+ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
+ {
+-	struct mtd_info *mtd = &nor->mtd;
+ 	u64 min_prot_len;
+ 	int ret, status_old, status_new;
+ 	u8 mask = spi_nor_get_sr_bp_mask(nor);
+@@ -182,7 +180,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
+ 		can_be_bottom = false;
+ 
+ 	/* If anything above us is unlocked, we can't use 'top' protection */
+-	if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
++	if (!spi_nor_is_locked_sr(nor, ofs + len, nor->params->size - (ofs + len),
+ 				  status_old))
+ 		can_be_top = false;
+ 
+@@ -194,11 +192,11 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
+ 
+ 	/* lock_len: length of region that should end up locked */
+ 	if (use_top)
+-		lock_len = mtd->size - ofs;
++		lock_len = nor->params->size - ofs;
+ 	else
+ 		lock_len = ofs + len;
+ 
+-	if (lock_len == mtd->size) {
++	if (lock_len == nor->params->size) {
+ 		val = mask;
+ 	} else {
+ 		min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+@@ -247,7 +245,6 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
+  */
+ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
+ {
+-	struct mtd_info *mtd = &nor->mtd;
+ 	u64 min_prot_len;
+ 	int ret, status_old, status_new;
+ 	u8 mask = spi_nor_get_sr_bp_mask(nor);
+@@ -272,7 +269,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
+ 		can_be_top = false;
+ 
+ 	/* If anything above us is locked, we can't use 'bottom' protection */
+-	if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
++	if (!spi_nor_is_unlocked_sr(nor, ofs + len, nor->params->size - (ofs + len),
+ 				    status_old))
+ 		can_be_bottom = false;
+ 
+@@ -284,7 +281,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
+ 
+ 	/* lock_len: length of region that should remain locked */
+ 	if (use_top)
+-		lock_len = mtd->size - (ofs + len);
++		lock_len = nor->params->size - (ofs + len);
+ 	else
+ 		lock_len = ofs;
+ 
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index c6807e473ab706..4c2560ae8866a1 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -95,13 +95,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
+ static void ad_mux_machine(struct port *port, bool *update_slave_arr);
+ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
+ static void ad_tx_machine(struct port *port);
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
++static void ad_periodic_machine(struct port *port);
+ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
+ static void ad_agg_selection_logic(struct aggregator *aggregator,
+ 				   bool *update_slave_arr);
+ static void ad_clear_agg(struct aggregator *aggregator);
+ static void ad_initialize_agg(struct aggregator *aggregator);
+-static void ad_initialize_port(struct port *port, int lacp_fast);
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
+ static void ad_enable_collecting(struct port *port);
+ static void ad_disable_distributing(struct port *port,
+ 				    bool *update_slave_arr);
+@@ -1296,10 +1296,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
+ 			 * case of EXPIRED even if LINK_DOWN didn't arrive for
+ 			 * the port.
+ 			 */
+-			port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ 			port->sm_vars &= ~AD_PORT_MATCHED;
++			/* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
++			 * machine state diagram, the statue should be
++			 * Partner_Oper_Port_State.Synchronization = FALSE;
++			 * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
++			 * start current_while_timer(Short Timeout);
++			 * Actor_Oper_Port_State.Expired = TRUE;
++			 */
++			port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ 			port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
+-			port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
+ 			port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
+ 			port->actor_oper_port_state |= LACP_STATE_EXPIRED;
+ 			port->sm_vars |= AD_PORT_CHURNED;
+@@ -1405,11 +1411,10 @@ static void ad_tx_machine(struct port *port)
+ /**
+  * ad_periodic_machine - handle a port's periodic state machine
+  * @port: the port we're looking at
+- * @bond_params: bond parameters we will use
+  *
+  * Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
+  */
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
++static void ad_periodic_machine(struct port *port)
+ {
+ 	periodic_states_t last_state;
+ 
+@@ -1418,8 +1423,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
+ 
+ 	/* check if port was reinitialized */
+ 	if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
+-	    (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
+-	    !bond_params->lacp_active) {
++	    (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
+ 		port->sm_periodic_state = AD_NO_PERIODIC;
+ 	}
+ 	/* check if state machine should change state */
+@@ -1943,16 +1947,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
+ /**
+  * ad_initialize_port - initialize a given port's parameters
+  * @port: the port we're looking at
+- * @lacp_fast: boolean. whether fast periodic should be used
++ * @bond_params: bond parameters we will use
+  */
+-static void ad_initialize_port(struct port *port, int lacp_fast)
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
+ {
+ 	static const struct port_params tmpl = {
+ 		.system_priority = 0xffff,
+ 		.key             = 1,
+ 		.port_number     = 1,
+ 		.port_priority   = 0xff,
+-		.port_state      = 1,
++		.port_state      = 0,
+ 	};
+ 	static const struct lacpdu lacpdu = {
+ 		.subtype		= 0x01,
+@@ -1970,12 +1974,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
+ 		port->actor_port_priority = 0xff;
+ 		port->actor_port_aggregator_identifier = 0;
+ 		port->ntt = false;
+-		port->actor_admin_port_state = LACP_STATE_AGGREGATION |
+-					       LACP_STATE_LACP_ACTIVITY;
+-		port->actor_oper_port_state  = LACP_STATE_AGGREGATION |
+-					       LACP_STATE_LACP_ACTIVITY;
++		port->actor_admin_port_state = LACP_STATE_AGGREGATION;
++		port->actor_oper_port_state  = LACP_STATE_AGGREGATION;
++		if (bond_params->lacp_active) {
++			port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
++			port->actor_oper_port_state  |= LACP_STATE_LACP_ACTIVITY;
++		}
+ 
+-		if (lacp_fast)
++		if (bond_params->lacp_fast)
+ 			port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
+ 
+ 		memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
+@@ -2187,7 +2193,7 @@ void bond_3ad_bind_slave(struct slave *slave)
+ 		/* port initialization */
+ 		port = &(SLAVE_AD_INFO(slave)->port);
+ 
+-		ad_initialize_port(port, bond->params.lacp_fast);
++		ad_initialize_port(port, &bond->params);
+ 
+ 		port->slave = slave;
+ 		port->actor_port_number = SLAVE_AD_INFO(slave)->id;
+@@ -2499,7 +2505,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
+ 		}
+ 
+ 		ad_rx_machine(NULL, port);
+-		ad_periodic_machine(port, &bond->params);
++		ad_periodic_machine(port);
+ 		ad_port_selection_logic(port, &update_slave_arr);
+ 		ad_mux_machine(port, &update_slave_arr);
+ 		ad_tx_machine(port);
+@@ -2869,6 +2875,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
+ 	spin_unlock_bh(&bond->mode_lock);
+ }
+ 
++/**
++ * bond_3ad_update_lacp_active - change the lacp active
++ * @bond: bonding struct
++ *
++ * Update actor_oper_port_state when lacp_active is modified.
++ */
++void bond_3ad_update_lacp_active(struct bonding *bond)
++{
++	struct port *port = NULL;
++	struct list_head *iter;
++	struct slave *slave;
++	int lacp_active;
++
++	lacp_active = bond->params.lacp_active;
++	spin_lock_bh(&bond->mode_lock);
++	bond_for_each_slave(bond, slave, iter) {
++		port = &(SLAVE_AD_INFO(slave)->port);
++		if (lacp_active)
++			port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++		else
++			port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
++	}
++	spin_unlock_bh(&bond->mode_lock);
++}
++
+ size_t bond_3ad_stats_size(void)
+ {
+ 	return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index d1b095af253bdc..e27d913b487b52 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1634,6 +1634,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
+ 	netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
+ 		   newval->string, newval->value);
+ 	bond->params.lacp_active = newval->value;
++	bond_3ad_update_lacp_active(bond);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index e6d6661a908ab1..644e8b8eb91e74 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -383,7 +383,7 @@ static void ti_hecc_start(struct net_device *ndev)
+ 	 * overflows instead of the hardware silently dropping the
+ 	 * messages.
+ 	 */
+-	mbx_mask = ~BIT_U32(HECC_RX_LAST_MBOX);
++	mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
+ 	hecc_write(priv, HECC_CANOPC, mbx_mask);
+ 
+ 	/* Enable interrupts */
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index bf26cd0abf6dd9..0a34fd6887fc0a 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -2208,6 +2208,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
+ 		dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
+ 	}
+ 
++	/* HSR ports are setup once so need to use the assigned membership
++	 * when the port is enabled.
++	 */
++	if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
++	    (dev->hsr_ports & BIT(port)))
++		port_member = dev->hsr_ports;
+ 	dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
+ }
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 8ea3c7493663fc..497a19ca198d16 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -2726,6 +2726,8 @@ static void gve_shutdown(struct pci_dev *pdev)
+ 	struct gve_priv *priv = netdev_priv(netdev);
+ 	bool was_up = netif_running(priv->dev);
+ 
++	netif_device_detach(netdev);
++
+ 	rtnl_lock();
+ 	if (was_up && gve_close(priv->dev)) {
+ 		/* If the dev was up, attempt to close, if close fails, reset */
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 2a0c5a343e4727..aadc0667fa04a4 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6987,6 +6987,13 @@ static int igc_probe(struct pci_dev *pdev,
+ 	adapter->port_num = hw->bus.func;
+ 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ 
++	/* PCI config space info */
++	hw->vendor_id = pdev->vendor;
++	hw->device_id = pdev->device;
++	hw->revision_id = pdev->revision;
++	hw->subsystem_vendor_id = pdev->subsystem_vendor;
++	hw->subsystem_device_id = pdev->subsystem_device;
++
+ 	/* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ 	if (igc_is_device_id_i226(hw))
+ 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+@@ -7013,13 +7020,6 @@ static int igc_probe(struct pci_dev *pdev,
+ 	netdev->mem_start = pci_resource_start(pdev, 0);
+ 	netdev->mem_end = pci_resource_end(pdev, 0);
+ 
+-	/* PCI config space info */
+-	hw->vendor_id = pdev->vendor;
+-	hw->device_id = pdev->device;
+-	hw->revision_id = pdev->revision;
+-	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+-	hw->subsystem_device_id = pdev->subsystem_device;
+-
+ 	/* Copy the default MAC and PHY function pointers */
+ 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index 3e3b471e53f067..b12c487f36cf15 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -398,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ 	dma_addr_t dma;
+ 	u32 cmd_type;
+ 
+-	while (budget-- > 0) {
++	while (likely(budget)) {
+ 		if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ 			work_done = false;
+ 			break;
+@@ -433,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ 		xdp_ring->next_to_use++;
+ 		if (xdp_ring->next_to_use == xdp_ring->count)
+ 			xdp_ring->next_to_use = 0;
++
++		budget--;
+ 	}
+ 
+ 	if (tx_desc) {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+index 150635de2bd5a1..0c484120be7993 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+@@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+ 		if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ 			*features &= ~BIT_ULL(NPC_OUTER_VID);
+ 
+-	/* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
+-	if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
++	/* Allow extracting SPI field from AH and ESP headers at same offset */
++	if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
+ 	    (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
+ 		*features |= BIT_ULL(NPC_IPSEC_SPI);
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index c855fb799ce145..e9bd3274198379 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
+ 	if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ 		return -1;
+ 
++	rcu_read_lock();
+ 	err = dev_fill_forward_path(dev, addr, &stack);
++	rcu_read_unlock();
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+index b59aee75de94e2..2c98a5299df337 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+@@ -26,7 +26,6 @@ struct mlx5e_dcbx {
+ 	u8                         cap;
+ 
+ 	/* Buffer configuration */
+-	bool                       manual_buffer;
+ 	u32                        cable_len;
+ 	u32                        xoff;
+ 	u16                        port_buff_cell_sz;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+index 5ae787656a7ca0..3efa8bf1d14ef4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
+ 	/* Total shared buffer size is split in a ratio of 3:1 between
+ 	 * lossy and lossless pools respectively.
+ 	 */
+-	lossy_epool_size = (shared_buffer_size / 4) * 3;
+ 	lossless_ipool_size = shared_buffer_size / 4;
++	lossy_epool_size    = shared_buffer_size - lossless_ipool_size;
+ 
+ 	mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
+ 			    lossy_epool_size);
+@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+-	u32 new_headroom_size = 0;
+-	u32 current_headroom_size;
++	u32 current_headroom_cells = 0;
++	u32 new_headroom_cells = 0;
+ 	void *in;
+ 	int err;
+ 	int i;
+ 
+-	current_headroom_size = port_buffer->headroom_size;
+-
+ 	in = kzalloc(sz, GFP_KERNEL);
+ 	if (!in)
+ 		return -ENOMEM;
+@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ 
+ 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
+ 		void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
++		current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
++
+ 		u64 size = port_buffer->buffer[i].size;
+ 		u64 xoff = port_buffer->buffer[i].xoff;
+ 		u64 xon = port_buffer->buffer[i].xon;
+ 
+-		new_headroom_size += size;
+ 		do_div(size, port_buff_cell_sz);
++		new_headroom_cells += size;
+ 		do_div(xoff, port_buff_cell_sz);
+ 		do_div(xon, port_buff_cell_sz);
+ 		MLX5_SET(bufferx_reg, buffer, size, size);
+@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ 		MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
+ 	}
+ 
+-	new_headroom_size /= port_buff_cell_sz;
+-	current_headroom_size /= port_buff_cell_sz;
+-	err = port_update_shared_buffer(priv->mdev, current_headroom_size,
+-					new_headroom_size);
++	err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
++					new_headroom_cells);
+ 	if (err)
+ 		goto out;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+index 8705cffc747ffb..b08328fe1aa300 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+@@ -362,6 +362,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
+ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+ 				   struct ieee_pfc *pfc)
+ {
++	u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
+ 	struct mlx5e_priv *priv = netdev_priv(dev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	u32 old_cable_len = priv->dcbx.cable_len;
+@@ -389,7 +390,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+ 
+ 	if (MLX5_BUFFER_SUPPORTED(mdev)) {
+ 		pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
+-		if (priv->dcbx.manual_buffer)
++		ret = mlx5_query_port_buffer_ownership(mdev,
++						       &buffer_ownership);
++		if (ret)
++			netdev_err(dev,
++				   "%s, Failed to get buffer ownership: %d\n",
++				   __func__, ret);
++
++		if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
+ 			ret = mlx5e_port_manual_buffer_config(priv, changed,
+ 							      dev->mtu, &pfc_new,
+ 							      NULL, NULL);
+@@ -982,7 +990,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
+ 	if (!changed)
+ 		return 0;
+ 
+-	priv->dcbx.manual_buffer = true;
+ 	err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
+ 					      buffer_size, prio2buffer);
+ 	return err;
+@@ -1250,7 +1257,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
+ 		priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+ 
+ 	priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
+-	priv->dcbx.manual_buffer = false;
+ 	priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
+ 
+ 	mlx5e_ets_init(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+index f8869c9b68029f..b0c97648ffc712 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+@@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
+ 		devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ 					      vport_num - 1, external);
+ 	}  else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
++		u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
++
+ 		memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ 		dl_port->attrs.switch_id.id_len = ppid.id_len;
+ 		devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
+-					      vport_num - 1, false);
++					      vport_num - base_vport, false);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index 62c770b0eaa83a..dc6965f6746ec2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -114,6 +114,21 @@ struct mlx5_cmd_alias_obj_create_attr {
+ 	u8 access_key[ACCESS_KEY_LEN];
+ };
+ 
++struct mlx5_port_eth_proto {
++	u32 cap;
++	u32 admin;
++	u32 oper;
++};
++
++struct mlx5_module_eeprom_query_params {
++	u16 size;
++	u16 offset;
++	u16 i2c_address;
++	u32 page;
++	u32 bank;
++	u32 module_number;
++};
++
+ static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
+ {
+ 	struct device *device = dev->device;
+@@ -278,6 +293,78 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
+ void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+ 
++void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
++int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
++			       enum mlx5_port_status status);
++int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
++				 enum mlx5_port_status *status);
++int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
++
++int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
++int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
++int mlx5_query_port_pause(struct mlx5_core_dev *dev,
++			  u32 *rx_pause, u32 *tx_pause);
++
++int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
++int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
++			u8 *pfc_en_rx);
++
++int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
++				  u16 stall_critical_watermark,
++				  u16 stall_minor_watermark);
++int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
++				    u16 *stall_critical_watermark,
++				    u16 *stall_minor_watermark);
++
++int mlx5_max_tc(struct mlx5_core_dev *mdev);
++int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
++int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
++			    u8 prio, u8 *tc);
++int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
++int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
++			     u8 tc, u8 *tc_group);
++int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
++int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
++				u8 tc, u8 *bw_pct);
++int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
++				    u8 *max_bw_value,
++				    u8 *max_bw_unit);
++int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
++				   u8 *max_bw_value,
++				   u8 *max_bw_unit);
++int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
++int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
++
++int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
++int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
++int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
++void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
++			 bool *enabled);
++int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
++			     u16 offset, u16 size, u8 *data);
++int
++mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
++				 struct mlx5_module_eeprom_query_params *params,
++				 u8 *data);
++
++int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
++int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
++int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
++int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
++int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
++				     u8 *buffer_ownership);
++int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
++int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
++
++int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
++			      struct mlx5_port_eth_proto *eproto);
++bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
++u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
++			 bool force_legacy);
++u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
++			      bool force_legacy);
++int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
++
+ #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) &&		\
+ 			    MLX5_CAP_GEN((mdev), pps_modify) &&		\
+ 			    MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) &&	\
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index 50931584132b99..389b34d56b751a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -196,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+ 	if (ps == MLX5_PORT_UP)
+ 		mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
+ 
+ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ 			       enum mlx5_port_status status)
+@@ -210,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ 	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ 				    sizeof(out), MLX5_REG_PAOS, 0, 1);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
+ 
+ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ 				 enum mlx5_port_status *status)
+@@ -227,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ 	*status = MLX5_GET(paos_reg, out, admin_status);
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
+ 
+ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+ 				u16 *max_mtu, u16 *oper_mtu, u8 port)
+@@ -257,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
+ 	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ 				   sizeof(out), MLX5_REG_PMTU, 0, 1);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+ 
+ void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
+ 			     u8 port)
+@@ -447,7 +443,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ 
+ 	return mlx5_query_mcia(dev, &query, data);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+ 
+ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ 				     struct mlx5_module_eeprom_query_params *params,
+@@ -467,7 +462,6 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ 
+ 	return mlx5_query_mcia(dev, params, data);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
+ 
+ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+ 				int pvlc_size,  u8 local_port)
+@@ -518,7 +512,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+ 	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ 				    sizeof(out), MLX5_REG_PFCC, 0, 1);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+ 
+ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ 			  u32 *rx_pause, u32 *tx_pause)
+@@ -538,7 +531,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
+ 
+ int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ 				  u16 stall_critical_watermark,
+@@ -597,7 +589,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
+ 	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ 				    sizeof(out), MLX5_REG_PFCC, 0, 1);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
+ 
+ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
+ {
+@@ -616,7 +607,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+ 
+ int mlx5_max_tc(struct mlx5_core_dev *mdev)
+ {
+@@ -667,7 +657,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
+ 
+ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ 			    u8 prio, u8 *tc)
+@@ -689,7 +678,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ 
+ 	return err;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
+ 
+ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
+ 				   int inlen)
+@@ -728,7 +716,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
+ 
+ 	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
+ 
+ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ 			     u8 tc, u8 *tc_group)
+@@ -749,7 +736,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
+ 
+ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
+ {
+@@ -763,7 +749,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
+ 
+ 	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
+ 
+ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ 				u8 tc, u8 *bw_pct)
+@@ -784,7 +769,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
+ 
+ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ 				    u8 *max_bw_value,
+@@ -808,7 +792,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ 
+ 	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+ }
+-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
+ 
+ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ 				   u8 *max_bw_value,
+@@ -834,7 +817,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
+ 
+ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
+ {
+@@ -845,7 +827,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
+ 	MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
+ 	return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
+ 
+ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
+ {
+@@ -860,7 +841,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
+ 
+ 	return err;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
+ 
+ int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
+ {
+@@ -988,6 +968,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
+ 	return err;
+ }
+ 
++int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
++				     u8 *buffer_ownership)
++{
++	u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
++	int err;
++
++	if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
++		*buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
++		return 0;
++	}
++
++	err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
++	if (err)
++		return err;
++
++	*buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
++
++	return 0;
++}
++
+ int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
+ {
+ 	int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 3f5e5d99251b75..26401bb57572db 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2507,6 +2507,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ 			     ROUTER_EXP, false),
+ 	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ 			     ROUTER_EXP, false),
++	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
++			     ROUTER_EXP, false),
+ 	/* Multicast Router Traps */
+ 	MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ 	MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+index 83477c8e6971b8..5bfc1499347a93 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+@@ -95,6 +95,7 @@ enum {
+ 	MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ 	MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ 	MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
++	MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
+ 	MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ 	MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
+ 	MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+index dd436bdff0f86d..84c41f19356126 100644
+--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
++++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+@@ -32,6 +32,10 @@
+ /* MAC Specific Addr 1 Top Reg */
+ #define LAN865X_REG_MAC_H_SADDR1	0x00010023
+ 
++/* MAC TSU Timer Increment Register */
++#define LAN865X_REG_MAC_TSU_TIMER_INCR		0x00010077
++#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS	0x0028
++
+ struct lan865x_priv {
+ 	struct work_struct multicast_work;
+ 	struct net_device *netdev;
+@@ -311,6 +315,8 @@ static int lan865x_net_open(struct net_device *netdev)
+ 
+ 	phy_start(netdev->phydev);
+ 
++	netif_start_queue(netdev);
++
+ 	return 0;
+ }
+ 
+@@ -344,6 +350,21 @@ static int lan865x_probe(struct spi_device *spi)
+ 		goto free_netdev;
+ 	}
+ 
++	/* LAN865x Rev.B0/B1 configuration parameters from AN1760
++	 * As per the Configuration Application Note AN1760 published in the
++	 * link, https://www.microchip.com/en-us/application-notes/an1760
++	 * Revision F (DS60001760G - June 2024), configure the MAC to set time
++	 * stamping at the end of the Start of Frame Delimiter (SFD) and set the
++	 * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock.
++	 */
++	ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR,
++				    MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS);
++	if (ret) {
++		dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n",
++			ret);
++		goto oa_tc6_exit;
++	}
++
+ 	/* As per the point s3 in the below errata, SPI receive Ethernet frame
+ 	 * transfer may halt when starting the next frame in the same data block
+ 	 * (chunk) as the end of a previous frame. The RFA field should be
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
+index 4a4434869b10a8..b3310e342ccf40 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase.h
++++ b/drivers/net/ethernet/realtek/rtase/rtase.h
+@@ -239,7 +239,7 @@ union rtase_rx_desc {
+ #define RTASE_RX_RES        BIT(20)
+ #define RTASE_RX_RUNT       BIT(19)
+ #define RTASE_RX_RWT        BIT(18)
+-#define RTASE_RX_CRC        BIT(16)
++#define RTASE_RX_CRC        BIT(17)
+ #define RTASE_RX_V6F        BIT(31)
+ #define RTASE_RX_V4F        BIT(30)
+ #define RTASE_RX_UDPT       BIT(29)
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index ddbc4624ae8876..055c5765bd8612 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -240,6 +240,44 @@ static void prueth_emac_stop(struct prueth *prueth)
+ 	}
+ }
+ 
++static void icssg_enable_fw_offload(struct prueth *prueth)
++{
++	struct prueth_emac *emac;
++	int mac;
++
++	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
++		emac = prueth->emac[mac];
++		if (prueth->is_hsr_offload_mode) {
++			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
++				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
++			else
++				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
++		}
++
++		if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
++			if (netif_running(emac->ndev)) {
++				icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
++						  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
++						  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
++						  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
++						  ICSSG_FDB_ENTRY_BLOCK,
++						  true);
++				icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
++						  BIT(emac->port_id) | DEFAULT_PORT_MASK,
++						  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
++						  true);
++				if (prueth->is_hsr_offload_mode)
++					icssg_vtbl_modify(emac, DEFAULT_VID,
++							  DEFAULT_PORT_MASK,
++							  DEFAULT_UNTAG_MASK, true);
++				icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
++				if (prueth->is_switch_mode)
++					icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
++			}
++		}
++	}
++}
++
+ static int prueth_emac_common_start(struct prueth *prueth)
+ {
+ 	struct prueth_emac *emac;
+@@ -690,6 +728,7 @@ static int emac_ndo_open(struct net_device *ndev)
+ 		ret = prueth_emac_common_start(prueth);
+ 		if (ret)
+ 			goto free_rx_irq;
++		icssg_enable_fw_offload(prueth);
+ 	}
+ 
+ 	flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+@@ -1146,8 +1185,7 @@ static int prueth_emac_restart(struct prueth *prueth)
+ 
+ static void icssg_change_mode(struct prueth *prueth)
+ {
+-	struct prueth_emac *emac;
+-	int mac, ret;
++	int ret;
+ 
+ 	ret = prueth_emac_restart(prueth);
+ 	if (ret) {
+@@ -1155,35 +1193,7 @@ static void icssg_change_mode(struct prueth *prueth)
+ 		return;
+ 	}
+ 
+-	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+-		emac = prueth->emac[mac];
+-		if (prueth->is_hsr_offload_mode) {
+-			if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+-				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+-			else
+-				icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+-		}
+-
+-		if (netif_running(emac->ndev)) {
+-			icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+-					  ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+-					  ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+-					  ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+-					  ICSSG_FDB_ENTRY_BLOCK,
+-					  true);
+-			icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+-					  BIT(emac->port_id) | DEFAULT_PORT_MASK,
+-					  BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+-					  true);
+-			if (prueth->is_hsr_offload_mode)
+-				icssg_vtbl_modify(emac, DEFAULT_VID,
+-						  DEFAULT_PORT_MASK,
+-						  DEFAULT_UNTAG_MASK, true);
+-			icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+-			if (prueth->is_switch_mode)
+-				icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+-		}
+-	}
++	icssg_enable_fw_offload(prueth);
+ }
+ 
+ static int prueth_netdevice_port_link(struct net_device *ndev,
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 2d47b35443af00..1775e060d39d38 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1119,6 +1119,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+ 	struct axienet_local *lp = data;
+ 	struct sk_buff *skb;
+ 	u32 *app_metadata;
++	int i;
+ 
+ 	skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
+ 	skb = skbuf_dma->skb;
+@@ -1137,7 +1138,10 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+ 	u64_stats_add(&lp->rx_packets, 1);
+ 	u64_stats_add(&lp->rx_bytes, rx_len);
+ 	u64_stats_update_end(&lp->rx_stat_sync);
+-	axienet_rx_submit_desc(lp->ndev);
++
++	for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
++				   RX_BUF_NUM_DEFAULT); i++)
++		axienet_rx_submit_desc(lp->ndev);
+ 	dma_async_issue_pending(lp->rx_chan);
+ }
+ 
+@@ -1394,7 +1398,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
+ 	if (!skbuf_dma)
+ 		return;
+ 
+-	lp->rx_ring_head++;
+ 	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ 	if (!skb)
+ 		return;
+@@ -1419,6 +1422,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
+ 	skbuf_dma->desc = dma_rx_desc;
+ 	dma_rx_desc->callback_param = lp;
+ 	dma_rx_desc->callback_result = axienet_dma_rx_cb;
++	lp->rx_ring_head++;
+ 	dmaengine_submit(dma_rx_desc);
+ 
+ 	return;
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 6a3d8a754eb8de..58c6d47fbe046d 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -362,6 +362,13 @@ struct vsc85xx_hw_stat {
+ 	u16 mask;
+ };
+ 
++struct vsc8531_skb_cb {
++	u32 ns;
++};
++
++#define VSC8531_SKB_CB(skb) \
++	((struct vsc8531_skb_cb *)((skb)->cb))
++
+ struct vsc8531_private {
+ 	int rate_magic;
+ 	u16 supp_led_modes;
+@@ -410,6 +417,11 @@ struct vsc8531_private {
+ 	 */
+ 	struct mutex ts_lock;
+ 	struct mutex phc_lock;
++
++	/* list of skbs that were received and need timestamp information but it
++	 * didn't received it yet
++	 */
++	struct sk_buff_head rx_skbs_list;
+ };
+ 
+ /* Shared structure between the PHYs of the same package.
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 6f74ce0ab1aad8..42cafa68c40098 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -2335,6 +2335,13 @@ static int vsc85xx_probe(struct phy_device *phydev)
+ 	return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ }
+ 
++static void vsc85xx_remove(struct phy_device *phydev)
++{
++	struct vsc8531_private *priv = phydev->priv;
++
++	skb_queue_purge(&priv->rx_skbs_list);
++}
++
+ /* Microsemi VSC85xx PHYs */
+ static struct phy_driver vsc85xx_driver[] = {
+ {
+@@ -2589,6 +2596,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ 	.config_intr    = &vsc85xx_config_intr,
+ 	.suspend	= &genphy_suspend,
+ 	.resume		= &genphy_resume,
++	.remove		= &vsc85xx_remove,
+ 	.probe		= &vsc8574_probe,
+ 	.set_wol	= &vsc85xx_wol_set,
+ 	.get_wol	= &vsc85xx_wol_get,
+@@ -2614,6 +2622,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ 	.config_intr    = &vsc85xx_config_intr,
+ 	.suspend	= &genphy_suspend,
+ 	.resume		= &genphy_resume,
++	.remove		= &vsc85xx_remove,
+ 	.probe		= &vsc8574_probe,
+ 	.set_wol	= &vsc85xx_wol_set,
+ 	.get_wol	= &vsc85xx_wol_get,
+@@ -2639,6 +2648,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ 	.config_intr    = &vsc85xx_config_intr,
+ 	.suspend	= &genphy_suspend,
+ 	.resume		= &genphy_resume,
++	.remove		= &vsc85xx_remove,
+ 	.probe		= &vsc8584_probe,
+ 	.get_tunable	= &vsc85xx_get_tunable,
+ 	.set_tunable	= &vsc85xx_set_tunable,
+@@ -2662,6 +2672,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ 	.config_intr    = &vsc85xx_config_intr,
+ 	.suspend	= &genphy_suspend,
+ 	.resume		= &genphy_resume,
++	.remove		= &vsc85xx_remove,
+ 	.probe		= &vsc8584_probe,
+ 	.get_tunable	= &vsc85xx_get_tunable,
+ 	.set_tunable	= &vsc85xx_set_tunable,
+@@ -2685,6 +2696,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ 	.config_intr    = &vsc85xx_config_intr,
+ 	.suspend	= &genphy_suspend,
+ 	.resume		= &genphy_resume,
++	.remove		= &vsc85xx_remove,
+ 	.probe		= &vsc8584_probe,
+ 	.get_tunable	= &vsc85xx_get_tunable,
+ 	.set_tunable	= &vsc85xx_set_tunable,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index bce6cc5b04ee0f..80992827a3bdd1 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -1191,9 +1191,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ {
+ 	struct vsc8531_private *vsc8531 =
+ 		container_of(mii_ts, struct vsc8531_private, mii_ts);
+-	struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ 	struct vsc85xx_ptphdr *ptphdr;
+-	struct timespec64 ts;
+ 	unsigned long ns;
+ 
+ 	if (!vsc8531->ptp->configured)
+@@ -1203,27 +1201,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ 	    type == PTP_CLASS_NONE)
+ 		return false;
+ 
+-	vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
+-
+ 	ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
+ 	if (!ptphdr)
+ 		return false;
+ 
+-	shhwtstamps = skb_hwtstamps(skb);
+-	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+-
+ 	ns = ntohl(ptphdr->rsrvd2);
+ 
+-	/* nsec is in reserved field */
+-	if (ts.tv_nsec < ns)
+-		ts.tv_sec--;
++	VSC8531_SKB_CB(skb)->ns = ns;
++	skb_queue_tail(&vsc8531->rx_skbs_list, skb);
+ 
+-	shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
+-	netif_rx(skb);
++	ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
+ 
+ 	return true;
+ }
+ 
++static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
++{
++	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
++	struct skb_shared_hwtstamps *shhwtstamps = NULL;
++	struct phy_device *phydev = ptp->phydev;
++	struct vsc8531_private *priv = phydev->priv;
++	struct sk_buff_head received;
++	struct sk_buff *rx_skb;
++	struct timespec64 ts;
++	unsigned long flags;
++
++	__skb_queue_head_init(&received);
++	spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
++	skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
++	spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
++
++	vsc85xx_gettime(info, &ts);
++	while ((rx_skb = __skb_dequeue(&received)) != NULL) {
++		shhwtstamps = skb_hwtstamps(rx_skb);
++		memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
++
++		if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
++			ts.tv_sec--;
++
++		shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
++						  VSC8531_SKB_CB(rx_skb)->ns);
++		netif_rx(rx_skb);
++	}
++
++	return -1;
++}
++
+ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ 	.owner		= THIS_MODULE,
+ 	.name		= "VSC85xx timer",
+@@ -1237,6 +1260,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ 	.adjfine	= &vsc85xx_adjfine,
+ 	.gettime64	= &vsc85xx_gettime,
+ 	.settime64	= &vsc85xx_settime,
++	.do_aux_work	= &vsc85xx_do_aux_work,
+ };
+ 
+ static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
+@@ -1564,6 +1588,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
+ 
+ 	mutex_init(&vsc8531->phc_lock);
+ 	mutex_init(&vsc8531->ts_lock);
++	skb_queue_head_init(&vsc8531->rx_skbs_list);
+ 
+ 	/* Retrieve the shared load/save GPIO. Request it as non exclusive as
+ 	 * the same GPIO can be requested by all the PHYs of the same package.
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 1420c4efa48e68..0553b0b356b308 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -33,6 +33,7 @@
+ #include <linux/ppp_channel.h>
+ #include <linux/ppp-comp.h>
+ #include <linux/skbuff.h>
++#include <linux/rculist.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/if_arp.h>
+ #include <linux/ip.h>
+@@ -1613,11 +1614,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
+ 	if (ppp->flags & SC_MULTILINK)
+ 		return -EOPNOTSUPP;
+ 
+-	if (list_empty(&ppp->channels))
++	pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
++	if (!pch)
++		return -ENODEV;
++
++	chan = READ_ONCE(pch->chan);
++	if (!chan)
+ 		return -ENODEV;
+ 
+-	pch = list_first_entry(&ppp->channels, struct channel, clist);
+-	chan = pch->chan;
+ 	if (!chan->ops->fill_forward_path)
+ 		return -EOPNOTSUPP;
+ 
+@@ -3000,7 +3004,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ 	 */
+ 	down_write(&pch->chan_sem);
+ 	spin_lock_bh(&pch->downl);
+-	pch->chan = NULL;
++	WRITE_ONCE(pch->chan, NULL);
+ 	spin_unlock_bh(&pch->downl);
+ 	up_write(&pch->chan_sem);
+ 	ppp_disconnect_channel(pch);
+@@ -3506,7 +3510,7 @@ ppp_connect_channel(struct channel *pch, int unit)
+ 	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
+ 	if (hdrlen > ppp->dev->hard_header_len)
+ 		ppp->dev->hard_header_len = hdrlen;
+-	list_add_tail(&pch->clist, &ppp->channels);
++	list_add_tail_rcu(&pch->clist, &ppp->channels);
+ 	++ppp->n_channels;
+ 	pch->ppp = ppp;
+ 	refcount_inc(&ppp->file.refcnt);
+@@ -3536,10 +3540,11 @@ ppp_disconnect_channel(struct channel *pch)
+ 	if (ppp) {
+ 		/* remove it from the ppp unit's list */
+ 		ppp_lock(ppp);
+-		list_del(&pch->clist);
++		list_del_rcu(&pch->clist);
+ 		if (--ppp->n_channels == 0)
+ 			wake_up_interruptible(&ppp->file.rwait);
+ 		ppp_unlock(ppp);
++		synchronize_net();
+ 		if (refcount_dec_and_test(&ppp->file.refcnt))
+ 			ppp_destroy_interface(ppp);
+ 		err = 0;
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index d9f5942ccc447b..792ddda1ad493d 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -676,7 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ 	priv->mdio->read = &asix_mdio_bus_read;
+ 	priv->mdio->write = &asix_mdio_bus_write;
+ 	priv->mdio->name = "Asix MDIO Bus";
+-	priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
++	priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
+ 	/* mii bus name is usb-<usb bus number>-<usb device number> */
+ 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ 		 dev->udev->bus->busnum, dev->udev->devnum);
+diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
+index 9d8efec46508a1..39d9aad33bc690 100644
+--- a/drivers/net/wireless/ath/ath11k/ce.c
++++ b/drivers/net/wireless/ath/ath11k/ce.c
+@@ -393,9 +393,6 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
+ 		goto err;
+ 	}
+ 
+-	/* Make sure descriptor is read after the head pointer. */
+-	dma_rmb();
+-
+ 	*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
+ 
+ 	*skb = pipe->dest_ring->skb[sw_index];
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 007d8695904235..66a00f330734d1 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -2650,9 +2650,6 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ try_again:
+ 	ath11k_hal_srng_access_begin(ab, srng);
+ 
+-	/* Make sure descriptor is read after the head pointer. */
+-	dma_rmb();
+-
+ 	while (likely(desc =
+ 	      (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
+ 									     srng))) {
+diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
+index f38decae77a935..65e52ab742b417 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.c
++++ b/drivers/net/wireless/ath/ath11k/hal.c
+@@ -823,13 +823,23 @@ u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
+ 
+ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
+ {
++	u32 hp;
++
+ 	lockdep_assert_held(&srng->lock);
+ 
+ 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ 		srng->u.src_ring.cached_tp =
+ 			*(volatile u32 *)srng->u.src_ring.tp_addr;
+ 	} else {
+-		srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
++		hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
++
++		if (hp != srng->u.dst_ring.cached_hp) {
++			srng->u.dst_ring.cached_hp = hp;
++			/* Make sure descriptor is read after the head
++			 * pointer.
++			 */
++			dma_rmb();
++		}
+ 
+ 		/* Try to prefetch the next descriptor in the ring */
+ 		if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+@@ -844,7 +854,6 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
+ {
+ 	lockdep_assert_held(&srng->lock);
+ 
+-	/* TODO: See if we need a write memory barrier here */
+ 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ 		/* For LMAC rings, ring pointer updates are done through FW and
+ 		 * hence written to a shared memory location that is read by FW
+@@ -852,21 +861,37 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
+ 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ 			srng->u.src_ring.last_tp =
+ 				*(volatile u32 *)srng->u.src_ring.tp_addr;
+-			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
++			/* Make sure descriptor is written before updating the
++			 * head pointer.
++			 */
++			dma_wmb();
++			WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
+ 		} else {
+ 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+-			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
++			/* Make sure descriptor is read before updating the
++			 * tail pointer.
++			 */
++			dma_mb();
++			WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
+ 		}
+ 	} else {
+ 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ 			srng->u.src_ring.last_tp =
+ 				*(volatile u32 *)srng->u.src_ring.tp_addr;
++			/* Assume implementation use an MMIO write accessor
++			 * which has the required wmb() so that the descriptor
++			 * is written before the updating the head pointer.
++			 */
+ 			ath11k_hif_write32(ab,
+ 					   (unsigned long)srng->u.src_ring.hp_addr -
+ 					   (unsigned long)ab->mem,
+ 					   srng->u.src_ring.hp);
+ 		} else {
+ 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
++			/* Make sure descriptor is read before updating the
++			 * tail pointer.
++			 */
++			mb();
+ 			ath11k_hif_write32(ab,
+ 					   (unsigned long)srng->u.dst_ring.tp_addr -
+ 					   (unsigned long)ab->mem,
+diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
+index 740586fe49d1f9..b66d23d6b2bd9e 100644
+--- a/drivers/net/wireless/ath/ath12k/ce.c
++++ b/drivers/net/wireless/ath/ath12k/ce.c
+@@ -343,9 +343,6 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
+ 		goto err;
+ 	}
+ 
+-	/* Make sure descriptor is read after the head pointer. */
+-	dma_rmb();
+-
+ 	*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
+ 
+ 	*skb = pipe->dest_ring->skb[sw_index];
+diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
+index 3afb11c7bf18eb..cc187f59ff1c48 100644
+--- a/drivers/net/wireless/ath/ath12k/hal.c
++++ b/drivers/net/wireless/ath/ath12k/hal.c
+@@ -2107,13 +2107,24 @@ void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
+ 
+ void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
+ {
++	u32 hp;
++
+ 	lockdep_assert_held(&srng->lock);
+ 
+-	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
++	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ 		srng->u.src_ring.cached_tp =
+ 			*(volatile u32 *)srng->u.src_ring.tp_addr;
+-	else
+-		srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
++	} else {
++		hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
++
++		if (hp != srng->u.dst_ring.cached_hp) {
++			srng->u.dst_ring.cached_hp = hp;
++			/* Make sure descriptor is read after the head
++			 * pointer.
++			 */
++			dma_rmb();
++		}
++	}
+ }
+ 
+ /* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
+@@ -2123,7 +2134,6 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
+ {
+ 	lockdep_assert_held(&srng->lock);
+ 
+-	/* TODO: See if we need a write memory barrier here */
+ 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ 		/* For LMAC rings, ring pointer updates are done through FW and
+ 		 * hence written to a shared memory location that is read by FW
+@@ -2131,21 +2141,37 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
+ 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ 			srng->u.src_ring.last_tp =
+ 				*(volatile u32 *)srng->u.src_ring.tp_addr;
+-			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
++			/* Make sure descriptor is written before updating the
++			 * head pointer.
++			 */
++			dma_wmb();
++			WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
+ 		} else {
+ 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+-			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
++			/* Make sure descriptor is read before updating the
++			 * tail pointer.
++			 */
++			dma_mb();
++			WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
+ 		}
+ 	} else {
+ 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ 			srng->u.src_ring.last_tp =
+ 				*(volatile u32 *)srng->u.src_ring.tp_addr;
++			/* Assume implementation use an MMIO write accessor
++			 * which has the required wmb() so that the descriptor
++			 * is written before the updating the head pointer.
++			 */
+ 			ath12k_hif_write32(ab,
+ 					   (unsigned long)srng->u.src_ring.hp_addr -
+ 					   (unsigned long)ab->mem,
+ 					   srng->u.src_ring.hp);
+ 		} else {
+ 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
++			/* Make sure descriptor is read before updating the
++			 * tail pointer.
++			 */
++			mb();
+ 			ath12k_hif_write32(ab,
+ 					   (unsigned long)srng->u.dst_ring.tp_addr -
+ 					   (unsigned long)ab->mem,
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+index d0faba24056105..b4bba67a45ec36 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+@@ -919,7 +919,7 @@ void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti)
+ 
+ static void
+ wlc_lcnphy_common_read_table(struct brcms_phy *pi, u32 tbl_id,
+-			     const u16 *tbl_ptr, u32 tbl_len,
++			     u16 *tbl_ptr, u32 tbl_len,
+ 			     u32 tbl_width, u32 tbl_offset)
+ {
+ 	struct phytbl_info tab;
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 3b24fed3177de8..c5254241942d34 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -72,6 +72,7 @@ enum imx_pcie_variants {
+ 	IMX8MQ_EP,
+ 	IMX8MM_EP,
+ 	IMX8MP_EP,
++	IMX8Q_EP,
+ 	IMX95_EP,
+ };
+ 
+@@ -778,7 +779,6 @@ static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+ {
+ 	reset_control_assert(imx_pcie->pciephy_reset);
+-	reset_control_assert(imx_pcie->apps_reset);
+ 
+ 	if (imx_pcie->drvdata->core_reset)
+ 		imx_pcie->drvdata->core_reset(imx_pcie, true);
+@@ -790,7 +790,6 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+ static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+ {
+ 	reset_control_deassert(imx_pcie->pciephy_reset);
+-	reset_control_deassert(imx_pcie->apps_reset);
+ 
+ 	if (imx_pcie->drvdata->core_reset)
+ 		imx_pcie->drvdata->core_reset(imx_pcie, false);
+@@ -997,6 +996,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
+ 		}
+ 	}
+ 
++	/* Make sure that PCIe LTSSM is cleared */
++	imx_pcie_ltssm_disable(dev);
++
+ 	ret = imx_pcie_deassert_core_reset(imx_pcie);
+ 	if (ret < 0) {
+ 		dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+@@ -1097,6 +1099,18 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
+ 	.msix_capable = false,
+ 	.bar[BAR_1] = { .type = BAR_RESERVED, },
+ 	.bar[BAR_3] = { .type = BAR_RESERVED, },
++	.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, },
++	.bar[BAR_5] = { .type = BAR_RESERVED, },
++	.align = SZ_64K,
++};
++
++static const struct pci_epc_features imx8q_pcie_epc_features = {
++	.linkup_notifier = false,
++	.msi_capable = true,
++	.msix_capable = false,
++	.bar[BAR_1] = { .type = BAR_RESERVED, },
++	.bar[BAR_3] = { .type = BAR_RESERVED, },
++	.bar[BAR_5] = { .type = BAR_RESERVED, },
+ 	.align = SZ_64K,
+ };
+ 
+@@ -1188,9 +1202,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
+ 
+ 	pci_epc_init_notify(ep->epc);
+ 
+-	/* Start LTSSM. */
+-	imx_pcie_ltssm_enable(dev);
+-
+ 	return 0;
+ }
+ 
+@@ -1665,7 +1676,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
+ 		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ 		.mode_off[1] = IOMUXC_GPR12,
+ 		.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+-		.epc_features = &imx8m_pcie_epc_features,
++		.epc_features = &imx8q_pcie_epc_features,
+ 		.init_phy = imx8mq_pcie_init_phy,
+ 		.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ 	},
+@@ -1695,6 +1706,14 @@ static const struct imx_pcie_drvdata drvdata[] = {
+ 		.epc_features = &imx8m_pcie_epc_features,
+ 		.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ 	},
++	[IMX8Q_EP] = {
++		.variant = IMX8Q_EP,
++		.flags = IMX_PCIE_FLAG_HAS_PHYDRV,
++		.mode = DW_PCIE_EP_TYPE,
++		.epc_features = &imx8q_pcie_epc_features,
++		.clk_names = imx8q_clks,
++		.clks_cnt = ARRAY_SIZE(imx8q_clks),
++	},
+ 	[IMX95_EP] = {
+ 		.variant = IMX95_EP,
+ 		.flags = IMX_PCIE_FLAG_HAS_SERDES |
+@@ -1724,6 +1743,7 @@ static const struct of_device_id imx_pcie_of_match[] = {
+ 	{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
+ 	{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
+ 	{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
++	{ .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
+ 	{ .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
+ 	{},
+ };
+diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
+index 18e65571c1459b..ea1df03edc2e47 100644
+--- a/drivers/pci/controller/pcie-rockchip-host.c
++++ b/drivers/pci/controller/pcie-rockchip-host.c
+@@ -11,6 +11,7 @@
+  * ARM PCI Host generic driver.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/bitrev.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+@@ -40,18 +41,18 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+ {
+ 	u32 status;
+ 
+-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
++	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ 	status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
++	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ }
+ 
+ static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+ {
+ 	u32 status;
+ 
+-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
++	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ 	status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
+-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
++	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ }
+ 
+ static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+@@ -269,7 +270,7 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+ 	scale = 3; /* 0.001x */
+ 	curr = curr / 1000; /* convert to mA */
+ 	power = (curr * 3300) / 1000; /* milliwatt */
+-	while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
++	while (power > FIELD_MAX(PCI_EXP_DEVCAP_PWR_VAL)) {
+ 		if (!scale) {
+ 			dev_warn(rockchip->dev, "invalid power supply\n");
+ 			return;
+@@ -278,10 +279,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+ 		power = power / 10;
+ 	}
+ 
+-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+-	status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+-		  (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
++	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP);
++	status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_VAL, power);
++	status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_SCL, scale);
++	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP);
+ }
+ 
+ /**
+@@ -309,14 +310,14 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+ 	rockchip_pcie_set_power_limit(rockchip);
+ 
+ 	/* Set RC's clock architecture as common clock */
+-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
++	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ 	status |= PCI_EXP_LNKSTA_SLC << 16;
+-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
++	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ 
+ 	/* Set RC's RCB to 128 */
+-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
++	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ 	status |= PCI_EXP_LNKCTL_RCB;
+-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
++	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ 
+ 	/* Enable Gen1 training */
+ 	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+@@ -341,9 +342,13 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+ 		 * Enable retrain for gen2. This should be configured only after
+ 		 * gen1 finished.
+ 		 */
+-		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
++		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2);
++		status &= ~PCI_EXP_LNKCTL2_TLS;
++		status |= PCI_EXP_LNKCTL2_TLS_5_0GT;
++		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2);
++		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ 		status |= PCI_EXP_LNKCTL_RL;
+-		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
++		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
+ 
+ 		err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ 					 status, PCIE_LINK_IS_GEN2(status), 20,
+@@ -380,15 +385,15 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+ 
+ 	/* Clear L0s from RC's link cap */
+ 	if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
+-		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
+-		status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
+-		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
++		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP);
++		status &= ~PCI_EXP_LNKCAP_ASPM_L0S;
++		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP);
+ 	}
+ 
+-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+-	status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+-	status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
++	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
++	status &= ~PCI_EXP_DEVCTL_PAYLOAD;
++	status |= PCI_EXP_DEVCTL_PAYLOAD_256B;
++	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
+ 
+ 	return 0;
+ err_power_off_phy:
+diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
+index 688f51d9bde631..d916fcc8badb82 100644
+--- a/drivers/pci/controller/pcie-rockchip.h
++++ b/drivers/pci/controller/pcie-rockchip.h
+@@ -144,16 +144,7 @@
+ #define PCIE_EP_CONFIG_BASE		0xa00000
+ #define PCIE_EP_CONFIG_DID_VID		(PCIE_EP_CONFIG_BASE + 0x00)
+ #define PCIE_RC_CONFIG_RID_CCR		(PCIE_RC_CONFIG_BASE + 0x08)
+-#define PCIE_RC_CONFIG_DCR		(PCIE_RC_CONFIG_BASE + 0xc4)
+-#define   PCIE_RC_CONFIG_DCR_CSPL_SHIFT		18
+-#define   PCIE_RC_CONFIG_DCR_CSPL_LIMIT		0xff
+-#define   PCIE_RC_CONFIG_DCR_CPLS_SHIFT		26
+-#define PCIE_RC_CONFIG_DCSR		(PCIE_RC_CONFIG_BASE + 0xc8)
+-#define   PCIE_RC_CONFIG_DCSR_MPS_MASK		GENMASK(7, 5)
+-#define   PCIE_RC_CONFIG_DCSR_MPS_256		(0x1 << 5)
+-#define PCIE_RC_CONFIG_LINK_CAP		(PCIE_RC_CONFIG_BASE + 0xcc)
+-#define   PCIE_RC_CONFIG_LINK_CAP_L0S		BIT(10)
+-#define PCIE_RC_CONFIG_LCS		(PCIE_RC_CONFIG_BASE + 0xd0)
++#define PCIE_RC_CONFIG_CR		(PCIE_RC_CONFIG_BASE + 0xc0)
+ #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+ #define PCIE_RC_CONFIG_THP_CAP		(PCIE_RC_CONFIG_BASE + 0x274)
+ #define   PCIE_RC_CONFIG_THP_CAP_NEXT_MASK	GENMASK(31, 20)
+diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
+index d712c7a866d261..ef50c82e647f4d 100644
+--- a/drivers/pci/endpoint/pci-ep-cfs.c
++++ b/drivers/pci/endpoint/pci-ep-cfs.c
+@@ -691,6 +691,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group)
+ 	if (IS_ERR_OR_NULL(group))
+ 		return;
+ 
++	list_del(&group->group_entry);
+ 	configfs_unregister_default_group(group);
+ }
+ EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
+diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
+index 963d2f3aa5d476..9e7166a7557923 100644
+--- a/drivers/pci/endpoint/pci-epf-core.c
++++ b/drivers/pci/endpoint/pci-epf-core.c
+@@ -334,7 +334,7 @@ static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
+ 	mutex_lock(&pci_epf_mutex);
+ 	list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
+ 		pci_ep_cfs_remove_epf_group(group);
+-	list_del(&driver->epf_group);
++	WARN_ON(!list_empty(&driver->epf_group));
+ 	mutex_unlock(&pci_epf_mutex);
+ }
+ 
+diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
+index 604c055f607867..ec2c768c687f02 100644
+--- a/drivers/pci/pcie/portdrv.c
++++ b/drivers/pci/pcie/portdrv.c
+@@ -220,7 +220,7 @@ static int get_port_device_capability(struct pci_dev *dev)
+ 	struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+ 	int services = 0;
+ 
+-	if (dev->is_hotplug_bridge &&
++	if (dev->is_pciehp &&
+ 	    (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ 	     pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) &&
+ 	    (pcie_ports_native || host->native_pcie_hotplug)) {
+diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
+index 20d4c020a83c1f..8b0f8a3a059c21 100644
+--- a/drivers/phy/qualcomm/phy-qcom-m31.c
++++ b/drivers/phy/qualcomm/phy-qcom-m31.c
+@@ -58,14 +58,16 @@
+  #define USB2_0_TX_ENABLE		BIT(2)
+ 
+ #define USB2PHY_USB_PHY_M31_XCFGI_4	0xc8
+- #define HSTX_SLEW_RATE_565PS		GENMASK(1, 0)
++ #define HSTX_SLEW_RATE_400PS		GENMASK(2, 0)
+  #define PLL_CHARGING_PUMP_CURRENT_35UA	GENMASK(4, 3)
+  #define ODT_VALUE_38_02_OHM		GENMASK(7, 6)
+ 
+ #define USB2PHY_USB_PHY_M31_XCFGI_5	0xcc
+- #define ODT_VALUE_45_02_OHM		BIT(2)
+  #define HSTX_PRE_EMPHASIS_LEVEL_0_55MA	BIT(0)
+ 
++#define USB2PHY_USB_PHY_M31_XCFGI_9	0xdc
++ #define HSTX_CURRENT_17_1MA_385MV	BIT(1)
++
+ #define USB2PHY_USB_PHY_M31_XCFGI_11	0xe4
+  #define XCFG_COARSE_TUNE_NUM		BIT(1)
+  #define XCFG_FINE_TUNE_NUM		BIT(3)
+@@ -164,7 +166,7 @@ static struct m31_phy_regs m31_ipq5332_regs[] = {
+ 	},
+ 	{
+ 		USB2PHY_USB_PHY_M31_XCFGI_4,
+-		HSTX_SLEW_RATE_565PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM,
++		HSTX_SLEW_RATE_400PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM,
+ 		0
+ 	},
+ 	{
+@@ -174,9 +176,13 @@ static struct m31_phy_regs m31_ipq5332_regs[] = {
+ 	},
+ 	{
+ 		USB2PHY_USB_PHY_M31_XCFGI_5,
+-		ODT_VALUE_45_02_OHM | HSTX_PRE_EMPHASIS_LEVEL_0_55MA,
++		HSTX_PRE_EMPHASIS_LEVEL_0_55MA,
+ 		4
+ 	},
++	{
++		USB2PHY_USB_PHY_M31_XCFGI_9,
++		HSTX_CURRENT_17_1MA_385MV,
++	},
+ 	{
+ 		USB_PHY_UTMI_CTRL5,
+ 		0x0,
+diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
+index e821b3d3959094..05b84f3b7f6918 100644
+--- a/drivers/platform/chrome/cros_ec.c
++++ b/drivers/platform/chrome/cros_ec.c
+@@ -313,6 +313,9 @@ EXPORT_SYMBOL(cros_ec_register);
+  */
+ void cros_ec_unregister(struct cros_ec_device *ec_dev)
+ {
++	if (ec_dev->mkbp_event_supported)
++		blocking_notifier_chain_unregister(&ec_dev->event_notifier,
++						   &ec_dev->notifier_ready);
+ 	platform_device_unregister(ec_dev->pd);
+ 	platform_device_unregister(ec_dev->ec);
+ 	mutex_destroy(&ec_dev->lock);
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+index 5ab45b75166628..9a5ff9163988da 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+@@ -189,9 +189,14 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu
+ static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
+ {
+ 	struct tpmi_uncore_cluster_info *cluster_info;
++	struct tpmi_uncore_struct *uncore_root;
+ 	u64 control;
+ 
+ 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
++	uncore_root = cluster_info->uncore_root;
++
++	if (uncore_root->write_blocked)
++		return -EPERM;
+ 
+ 	if (cluster_info->root_domain)
+ 		return -ENODATA;
+diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
+index 7ee7b65b9b90c5..5b399de16d6040 100644
+--- a/drivers/pwm/pwm-imx-tpm.c
++++ b/drivers/pwm/pwm-imx-tpm.c
+@@ -204,6 +204,15 @@ static int pwm_imx_tpm_apply_hw(struct pwm_chip *chip,
+ 		val |= FIELD_PREP(PWM_IMX_TPM_SC_PS, p->prescale);
+ 		writel(val, tpm->base + PWM_IMX_TPM_SC);
+ 
++		/*
++		 * if the counter is disabled (CMOD == 0), programming the new
++		 * period length (MOD) will not reset the counter (CNT). If
++		 * CNT.COUNT happens to be bigger than the new MOD value then
++		 * the counter will end up being reset way too late. Therefore,
++		 * manually reset it to 0.
++		 */
++		if (!cmod)
++			writel(0x0, tpm->base + PWM_IMX_TPM_CNT);
+ 		/*
+ 		 * set period count:
+ 		 * if the PWM is disabled (CMOD[1:0] = 2b00), then MOD register
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
+index 33d3554b9197ab..bfbfe7f2917b1d 100644
+--- a/drivers/pwm/pwm-mediatek.c
++++ b/drivers/pwm/pwm-mediatek.c
+@@ -115,6 +115,26 @@ static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip,
+ 	writel(value, chip->regs + chip->soc->reg_offset[num] + offset);
+ }
+ 
++static void pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm)
++{
++	struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
++	u32 value;
++
++	value = readl(pc->regs);
++	value |= BIT(pwm->hwpwm);
++	writel(value, pc->regs);
++}
++
++static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm)
++{
++	struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
++	u32 value;
++
++	value = readl(pc->regs);
++	value &= ~BIT(pwm->hwpwm);
++	writel(value, pc->regs);
++}
++
+ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 			       int duty_ns, int period_ns)
+ {
+@@ -144,7 +164,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	do_div(resolution, clk_rate);
+ 
+ 	cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
+-	while (cnt_period > 8191) {
++	if (!cnt_period)
++		return -EINVAL;
++
++	while (cnt_period > 8192) {
+ 		resolution *= 2;
+ 		clkdiv++;
+ 		cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000,
+@@ -167,9 +190,16 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	}
+ 
+ 	cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution);
++
+ 	pwm_mediatek_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv);
+-	pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period);
+-	pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
++	pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period - 1);
++
++	if (cnt_duty) {
++		pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty - 1);
++		pwm_mediatek_enable(chip, pwm);
++	} else {
++		pwm_mediatek_disable(chip, pwm);
++	}
+ 
+ out:
+ 	pwm_mediatek_clk_disable(chip, pwm);
+@@ -177,35 +207,6 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return ret;
+ }
+ 
+-static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+-{
+-	struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
+-	u32 value;
+-	int ret;
+-
+-	ret = pwm_mediatek_clk_enable(chip, pwm);
+-	if (ret < 0)
+-		return ret;
+-
+-	value = readl(pc->regs);
+-	value |= BIT(pwm->hwpwm);
+-	writel(value, pc->regs);
+-
+-	return 0;
+-}
+-
+-static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+-{
+-	struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
+-	u32 value;
+-
+-	value = readl(pc->regs);
+-	value &= ~BIT(pwm->hwpwm);
+-	writel(value, pc->regs);
+-
+-	pwm_mediatek_clk_disable(chip, pwm);
+-}
+-
+ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 			      const struct pwm_state *state)
+ {
+@@ -215,8 +216,10 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		return -EINVAL;
+ 
+ 	if (!state->enabled) {
+-		if (pwm->state.enabled)
++		if (pwm->state.enabled) {
+ 			pwm_mediatek_disable(chip, pwm);
++			pwm_mediatek_clk_disable(chip, pwm);
++		}
+ 
+ 		return 0;
+ 	}
+@@ -226,7 +229,7 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		return err;
+ 
+ 	if (!pwm->state.enabled)
+-		err = pwm_mediatek_enable(chip, pwm);
++		err = pwm_mediatek_clk_enable(chip, pwm);
+ 
+ 	return err;
+ }
+diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
+index 45bd001206a2b8..d9899b4d476727 100644
+--- a/drivers/s390/char/sclp.c
++++ b/drivers/s390/char/sclp.c
+@@ -76,6 +76,13 @@ unsigned long sclp_console_full;
+ /* The currently active SCLP command word. */
+ static sclp_cmdw_t active_cmd;
+ 
++static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int)
++{
++	if (sccb_int)
++		return __va(sccb_int);
++	return NULL;
++}
++
+ static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
+ {
+ 	struct sclp_trace_entry e;
+@@ -619,7 +626,7 @@ __sclp_find_req(u32 sccb)
+ 
+ static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
+ {
+-	struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
++	struct sccb_header *sccb = sclpint_to_sccb(sccb_int);
+ 	struct evbuf_header *evbuf;
+ 	u16 response;
+ 
+@@ -658,7 +665,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
+ 
+ 	/* INT: Interrupt received (a=intparm, b=cmd) */
+ 	sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
+-			(struct sccb_header *)__va(finished_sccb),
++			sclpint_to_sccb(finished_sccb),
+ 			!ok_response(finished_sccb, active_cmd));
+ 
+ 	if (finished_sccb) {
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index ab7c5f1fc04121..840195373084e3 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -1131,6 +1131,8 @@ struct scmd_priv {
+  * @logdata_buf: Circular buffer to store log data entries
+  * @logdata_buf_idx: Index of entry in buffer to store
+  * @logdata_entry_sz: log data entry size
++ * @adm_req_q_bar_writeq_lock: Admin request queue lock
++ * @adm_reply_q_bar_writeq_lock: Admin reply queue lock
+  * @pend_large_data_sz: Counter to track pending large data
+  * @io_throttle_data_length: I/O size to track in 512b blocks
+  * @io_throttle_high: I/O size to start throttle in 512b blocks
+@@ -1175,7 +1177,7 @@ struct mpi3mr_ioc {
+ 	char name[MPI3MR_NAME_LENGTH];
+ 	char driver_name[MPI3MR_NAME_LENGTH];
+ 
+-	volatile struct mpi3_sysif_registers __iomem *sysif_regs;
++	struct mpi3_sysif_registers __iomem *sysif_regs;
+ 	resource_size_t sysif_regs_phys;
+ 	int bars;
+ 	u64 dma_mask;
+@@ -1328,6 +1330,8 @@ struct mpi3mr_ioc {
+ 	u8 *logdata_buf;
+ 	u16 logdata_buf_idx;
+ 	u16 logdata_entry_sz;
++	spinlock_t adm_req_q_bar_writeq_lock;
++	spinlock_t adm_reply_q_bar_writeq_lock;
+ 
+ 	atomic_t pend_large_data_sz;
+ 	u32 io_throttle_data_length;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 604f37e5c0c355..08c751884b3272 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -23,17 +23,22 @@ module_param(poll_queues, int, 0444);
+ MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
+ 
+ #if defined(writeq) && defined(CONFIG_64BIT)
+-static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
++static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
++	spinlock_t *write_queue_lock)
+ {
+ 	writeq(b, addr);
+ }
+ #else
+-static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
++static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
++	spinlock_t *write_queue_lock)
+ {
+ 	__u64 data_out = b;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(write_queue_lock, flags);
+ 	writel((u32)(data_out), addr);
+ 	writel((u32)(data_out >> 32), (addr + 4));
++	spin_unlock_irqrestore(write_queue_lock, flags);
+ }
+ #endif
+ 
+@@ -428,8 +433,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
+ 				       MPI3MR_SENSE_BUF_SZ);
+ 			}
+ 			if (cmdptr->is_waiting) {
+-				complete(&cmdptr->done);
+ 				cmdptr->is_waiting = 0;
++				complete(&cmdptr->done);
+ 			} else if (cmdptr->callback)
+ 				cmdptr->callback(mrioc, cmdptr);
+ 		}
+@@ -2931,9 +2936,11 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
+ 	    (mrioc->num_admin_req);
+ 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
+ 	mpi3mr_writeq(mrioc->admin_req_dma,
+-	    &mrioc->sysif_regs->admin_request_queue_address);
++		&mrioc->sysif_regs->admin_request_queue_address,
++		&mrioc->adm_req_q_bar_writeq_lock);
+ 	mpi3mr_writeq(mrioc->admin_reply_dma,
+-	    &mrioc->sysif_regs->admin_reply_queue_address);
++		&mrioc->sysif_regs->admin_reply_queue_address,
++		&mrioc->adm_reply_q_bar_writeq_lock);
+ 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+ 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ 	return retval;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 990646e1e18d8e..1930e47cbf7bd2 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -5251,6 +5251,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	spin_lock_init(&mrioc->tgtdev_lock);
+ 	spin_lock_init(&mrioc->watchdog_lock);
+ 	spin_lock_init(&mrioc->chain_buf_lock);
++	spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock);
++	spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock);
+ 	spin_lock_init(&mrioc->sas_node_lock);
+ 	spin_lock_init(&mrioc->trigger_lock);
+ 
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 97e9ca5a2a02c3..59ff6bb11d84c6 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+ 
+ 	ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ 	vfree(dst_addr);
++	if (IS_ERR(ep))
++		return NULL;
+ 	return ep;
+ }
+ 
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index ce4b428b63f832..a4cafc688c2a13 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -210,6 +210,9 @@ static int scsi_check_passthrough(struct scsi_cmnd *scmd,
+ 	struct scsi_sense_hdr sshdr;
+ 	enum sam_status status;
+ 
++	if (!scmd->result)
++		return 0;
++
+ 	if (!failures)
+ 		return 0;
+ 
+diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
+index 44589d10b15b50..64e0facc392e5d 100644
+--- a/drivers/soc/qcom/mdt_loader.c
++++ b/drivers/soc/qcom/mdt_loader.c
+@@ -18,6 +18,37 @@
+ #include <linux/slab.h>
+ #include <linux/soc/qcom/mdt_loader.h>
+ 
++static bool mdt_header_valid(const struct firmware *fw)
++{
++	const struct elf32_hdr *ehdr;
++	size_t phend;
++	size_t shend;
++
++	if (fw->size < sizeof(*ehdr))
++		return false;
++
++	ehdr = (struct elf32_hdr *)fw->data;
++
++	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG))
++		return false;
++
++	if (ehdr->e_phentsize != sizeof(struct elf32_phdr))
++		return false;
++
++	phend = size_add(size_mul(sizeof(struct elf32_phdr), ehdr->e_phnum), ehdr->e_phoff);
++	if (phend > fw->size)
++		return false;
++
++	if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
++		return false;
++
++	shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
++	if (shend > fw->size)
++		return false;
++
++	return true;
++}
++
+ static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
+ {
+ 	if (phdr->p_type != PT_LOAD)
+@@ -82,6 +113,9 @@ ssize_t qcom_mdt_get_size(const struct firmware *fw)
+ 	phys_addr_t max_addr = 0;
+ 	int i;
+ 
++	if (!mdt_header_valid(fw))
++		return -EINVAL;
++
+ 	ehdr = (struct elf32_hdr *)fw->data;
+ 	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+ 
+@@ -134,6 +168,9 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
+ 	ssize_t ret;
+ 	void *data;
+ 
++	if (!mdt_header_valid(fw))
++		return ERR_PTR(-EINVAL);
++
+ 	ehdr = (struct elf32_hdr *)fw->data;
+ 	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+ 
+@@ -214,6 +251,9 @@ int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ 	int ret;
+ 	int i;
+ 
++	if (!mdt_header_valid(fw))
++		return -EINVAL;
++
+ 	ehdr = (struct elf32_hdr *)fw->data;
+ 	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+ 
+@@ -310,6 +350,9 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ 	if (!fw || !mem_region || !mem_phys || !mem_size)
+ 		return -EINVAL;
+ 
++	if (!mdt_header_valid(fw))
++		return -EINVAL;
++
+ 	is_split = qcom_mdt_bins_are_split(fw, fw_name);
+ 	ehdr = (struct elf32_hdr *)fw->data;
+ 	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
+index a08c377933c505..0fdccd73620949 100644
+--- a/drivers/soc/tegra/pmc.c
++++ b/drivers/soc/tegra/pmc.c
+@@ -1233,7 +1233,7 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
+ }
+ 
+ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
+-					 struct device_node *np, bool off)
++					 struct device_node *np)
+ {
+ 	struct device *dev = pg->pmc->dev;
+ 	int err;
+@@ -1248,22 +1248,6 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
+ 	err = reset_control_acquire(pg->reset);
+ 	if (err < 0) {
+ 		pr_err("failed to acquire resets: %d\n", err);
+-		goto out;
+-	}
+-
+-	if (off) {
+-		err = reset_control_assert(pg->reset);
+-	} else {
+-		err = reset_control_deassert(pg->reset);
+-		if (err < 0)
+-			goto out;
+-
+-		reset_control_release(pg->reset);
+-	}
+-
+-out:
+-	if (err) {
+-		reset_control_release(pg->reset);
+ 		reset_control_put(pg->reset);
+ 	}
+ 
+@@ -1308,20 +1292,43 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
+ 		goto set_available;
+ 	}
+ 
+-	err = tegra_powergate_of_get_resets(pg, np, off);
++	err = tegra_powergate_of_get_resets(pg, np);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
+ 		goto remove_clks;
+ 	}
+ 
+-	if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+-		if (off)
+-			WARN_ON(tegra_powergate_power_up(pg, true));
++	/*
++	 * If the power-domain is off, then ensure the resets are asserted.
++	 * If the power-domain is on, then power down to ensure that when is
++	 * it turned on the power-domain, clocks and resets are all in the
++	 * expected state.
++	 */
++	if (off) {
++		err = reset_control_assert(pg->reset);
++		if (err) {
++			pr_err("failed to assert resets: %d\n", err);
++			goto remove_resets;
++		}
++	} else {
++		err = tegra_powergate_power_down(pg);
++		if (err) {
++			dev_err(dev, "failed to turn off PM domain %s: %d\n",
++				pg->genpd.name, err);
++			goto remove_resets;
++		}
++	}
+ 
++	/*
++	 * If PM_GENERIC_DOMAINS is not enabled, power-on
++	 * the domain and skip the genpd registration.
++	 */
++	if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
++		WARN_ON(tegra_powergate_power_up(pg, true));
+ 		goto remove_resets;
+ 	}
+ 
+-	err = pm_genpd_init(&pg->genpd, NULL, off);
++	err = pm_genpd_init(&pg->genpd, NULL, true);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
+ 		       err);
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 29b9676fe43d89..f8cacb9c7408f3 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -330,13 +330,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ 	}
+ 
+ 	if (config.speed_hz > perclk_rate / 2) {
+-		dev_err(fsl_lpspi->dev,
+-		      "per-clk should be at least two times of transfer speed");
+-		return -EINVAL;
++		div = 2;
++	} else {
++		div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+ 	}
+ 
+-	div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+-
+ 	for (prescale = 0; prescale <= prescale_max; prescale++) {
+ 		scldiv = div / (1 << prescale) - 2;
+ 		if (scldiv >= 0 && scldiv < 256) {
+diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
+index 95cca281e8a378..07104e7f5a5f9d 100644
+--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
++++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
+@@ -914,7 +914,7 @@ imx_media_csc_scaler_device_init(struct imx_media_dev *md)
+ 	return &priv->vdev;
+ 
+ err_m2m:
+-	video_set_drvdata(vfd, NULL);
++	video_device_release(vfd);
+ err_vfd:
+ 	kfree(priv);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 05196799296522..03aca7eaca160d 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2351,9 +2351,8 @@ int serial8250_do_startup(struct uart_port *port)
+ 	/*
+ 	 * Now, initialize the UART
+ 	 */
+-	serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
+-
+ 	uart_port_lock_irqsave(port, &flags);
++	serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
+ 	if (up->port.flags & UPF_FOURPORT) {
+ 		if (!up->port.irq)
+ 			up->port.mctrl |= TIOCM_OUT1;
+diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
+index 0c043e4f292e8a..6af7bf8d5460c5 100644
+--- a/drivers/tty/vt/defkeymap.c_shipped
++++ b/drivers/tty/vt/defkeymap.c_shipped
+@@ -23,6 +23,22 @@ unsigned short plain_map[NR_KEYS] = {
+ 	0xf118,	0xf601,	0xf602,	0xf117,	0xf600,	0xf119,	0xf115,	0xf116,
+ 	0xf11a,	0xf10c,	0xf10d,	0xf11b,	0xf11c,	0xf110,	0xf311,	0xf11d,
+ 	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+ };
+ 
+ static unsigned short shift_map[NR_KEYS] = {
+@@ -42,6 +58,22 @@ static unsigned short shift_map[NR_KEYS] = {
+ 	0xf20b,	0xf601,	0xf602,	0xf117,	0xf600,	0xf20a,	0xf115,	0xf116,
+ 	0xf11a,	0xf10c,	0xf10d,	0xf11b,	0xf11c,	0xf110,	0xf311,	0xf11d,
+ 	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+ };
+ 
+ static unsigned short altgr_map[NR_KEYS] = {
+@@ -61,6 +93,22 @@ static unsigned short altgr_map[NR_KEYS] = {
+ 	0xf118,	0xf601,	0xf602,	0xf117,	0xf600,	0xf119,	0xf115,	0xf116,
+ 	0xf11a,	0xf10c,	0xf10d,	0xf11b,	0xf11c,	0xf110,	0xf311,	0xf11d,
+ 	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+ };
+ 
+ static unsigned short ctrl_map[NR_KEYS] = {
+@@ -80,6 +128,22 @@ static unsigned short ctrl_map[NR_KEYS] = {
+ 	0xf118,	0xf601,	0xf602,	0xf117,	0xf600,	0xf119,	0xf115,	0xf116,
+ 	0xf11a,	0xf10c,	0xf10d,	0xf11b,	0xf11c,	0xf110,	0xf311,	0xf11d,
+ 	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+ };
+ 
+ static unsigned short shift_ctrl_map[NR_KEYS] = {
+@@ -99,6 +163,22 @@ static unsigned short shift_ctrl_map[NR_KEYS] = {
+ 	0xf118,	0xf601,	0xf602,	0xf117,	0xf600,	0xf119,	0xf115,	0xf116,
+ 	0xf11a,	0xf10c,	0xf10d,	0xf11b,	0xf11c,	0xf110,	0xf311,	0xf11d,
+ 	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+ };
+ 
+ static unsigned short alt_map[NR_KEYS] = {
+@@ -118,6 +198,22 @@ static unsigned short alt_map[NR_KEYS] = {
+ 	0xf118,	0xf210,	0xf211,	0xf117,	0xf600,	0xf119,	0xf115,	0xf116,
+ 	0xf11a,	0xf10c,	0xf10d,	0xf11b,	0xf11c,	0xf110,	0xf311,	0xf11d,
+ 	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+ };
+ 
+ static unsigned short ctrl_alt_map[NR_KEYS] = {
+@@ -137,6 +233,22 @@ static unsigned short ctrl_alt_map[NR_KEYS] = {
+ 	0xf118,	0xf601,	0xf602,	0xf117,	0xf600,	0xf119,	0xf115,	0xf20c,
+ 	0xf11a,	0xf10c,	0xf10d,	0xf11b,	0xf11c,	0xf110,	0xf311,	0xf11d,
+ 	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
++	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+ };
+ 
+ unsigned short *key_maps[MAX_NR_KEYMAPS] = {
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index 804355da46f5a0..00caf1c2bcee66 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -1494,7 +1494,7 @@ static void kbd_keycode(unsigned int keycode, int down, bool hw_raw)
+ 		rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ 						KBD_UNICODE, &param);
+ 		if (rc != NOTIFY_STOP)
+-			if (down && !raw_mode)
++			if (down && !(raw_mode || kbd->kbdmode == VC_OFF))
+ 				k_unicode(vc, keysym, !down);
+ 		return;
+ 	}
+diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
+index 5ba17ccf6417fe..6bd1532bfd1d6d 100644
+--- a/drivers/ufs/host/ufs-exynos.c
++++ b/drivers/ufs/host/ufs-exynos.c
+@@ -1078,8 +1078,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
+ 	hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
+ 
+ 	hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
+-	hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
+-	hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
++	hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
++	hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
+ 	hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
+ 
+ 	if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
+diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
+index 850ff71130d5e4..570067483a049c 100644
+--- a/drivers/ufs/host/ufshcd-pci.c
++++ b/drivers/ufs/host/ufshcd-pci.c
+@@ -216,6 +216,32 @@ static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
+ 	return ret;
+ }
+ 
++static void ufs_intel_ctrl_uic_compl(struct ufs_hba *hba, bool enable)
++{
++	u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
++
++	if (enable)
++		set |= UIC_COMMAND_COMPL;
++	else
++		set &= ~UIC_COMMAND_COMPL;
++	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
++}
++
++static void ufs_intel_mtl_h8_notify(struct ufs_hba *hba,
++				    enum uic_cmd_dme cmd,
++				    enum ufs_notify_change_status status)
++{
++	/*
++	 * Disable UIC COMPL INTR to prevent access to UFSHCI after
++	 * checking HCS.UPMCRS
++	 */
++	if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
++		ufs_intel_ctrl_uic_compl(hba, false);
++
++	if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
++		ufs_intel_ctrl_uic_compl(hba, true);
++}
++
+ #define INTEL_ACTIVELTR		0x804
+ #define INTEL_IDLELTR		0x808
+ 
+@@ -442,10 +468,23 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
+ 	return ufs_intel_common_init(hba);
+ }
+ 
++static void ufs_intel_mtl_late_init(struct ufs_hba *hba)
++{
++	hba->rpm_lvl = UFS_PM_LVL_2;
++	hba->spm_lvl = UFS_PM_LVL_2;
++}
++
+ static int ufs_intel_mtl_init(struct ufs_hba *hba)
+ {
++	struct ufs_host *ufs_host;
++	int err;
++
+ 	hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
+-	return ufs_intel_common_init(hba);
++	err = ufs_intel_common_init(hba);
++	/* Get variant after it is set in ufs_intel_common_init() */
++	ufs_host = ufshcd_get_variant(hba);
++	ufs_host->late_init = ufs_intel_mtl_late_init;
++	return err;
+ }
+ 
+ static int ufs_qemu_get_hba_mac(struct ufs_hba *hba)
+@@ -533,6 +572,7 @@ static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
+ 	.init			= ufs_intel_mtl_init,
+ 	.exit			= ufs_intel_common_exit,
+ 	.hce_enable_notify	= ufs_intel_hce_enable_notify,
++	.hibern8_notify		= ufs_intel_mtl_h8_notify,
+ 	.link_startup_notify	= ufs_intel_link_startup_notify,
+ 	.resume			= ufs_intel_resume,
+ 	.device_reset		= ufs_intel_device_reset,
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index 47d06af33747d0..08faf82ec31d82 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -980,25 +980,60 @@ static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw,
+ 	return ret;
+ }
+ 
+-static void cxacru_upload_firmware(struct cxacru_data *instance,
+-				   const struct firmware *fw,
+-				   const struct firmware *bp)
++
++static int cxacru_find_firmware(struct cxacru_data *instance,
++				char *phase, const struct firmware **fw_p)
+ {
+-	int ret;
++	struct usbatm_data *usbatm = instance->usbatm;
++	struct device *dev = &usbatm->usb_intf->dev;
++	char buf[16];
++
++	sprintf(buf, "cxacru-%s.bin", phase);
++	usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf);
++
++	if (request_firmware(fw_p, buf, dev)) {
++		usb_dbg(usbatm, "no stage %s firmware found\n", phase);
++		return -ENOENT;
++	}
++
++	usb_info(usbatm, "found firmware %s\n", buf);
++
++	return 0;
++}
++
++static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
++			     struct usb_interface *usb_intf)
++{
++	const struct firmware *fw, *bp;
++	struct cxacru_data *instance = usbatm_instance->driver_data;
+ 	struct usbatm_data *usbatm = instance->usbatm;
+ 	struct usb_device *usb_dev = usbatm->usb_dev;
+ 	__le16 signature[] = { usb_dev->descriptor.idVendor,
+ 			       usb_dev->descriptor.idProduct };
+ 	__le32 val;
++	int ret;
+ 
+-	usb_dbg(usbatm, "%s\n", __func__);
++	ret = cxacru_find_firmware(instance, "fw", &fw);
++	if (ret) {
++		usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n");
++		return ret;
++	}
++
++	if (instance->modem_type->boot_rom_patch) {
++		ret = cxacru_find_firmware(instance, "bp", &bp);
++		if (ret) {
++			usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n");
++			release_firmware(fw);
++			return ret;
++		}
++	}
+ 
+ 	/* FirmwarePllFClkValue */
+ 	val = cpu_to_le32(instance->modem_type->pll_f_clk);
+ 	ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLFCLK_ADDR, (u8 *) &val, 4);
+ 	if (ret) {
+ 		usb_err(usbatm, "FirmwarePllFClkValue failed: %d\n", ret);
+-		return;
++		goto done;
+ 	}
+ 
+ 	/* FirmwarePllBClkValue */
+@@ -1006,7 +1041,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
+ 	ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLBCLK_ADDR, (u8 *) &val, 4);
+ 	if (ret) {
+ 		usb_err(usbatm, "FirmwarePllBClkValue failed: %d\n", ret);
+-		return;
++		goto done;
+ 	}
+ 
+ 	/* Enable SDRAM */
+@@ -1014,7 +1049,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
+ 	ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SDRAMEN_ADDR, (u8 *) &val, 4);
+ 	if (ret) {
+ 		usb_err(usbatm, "Enable SDRAM failed: %d\n", ret);
+-		return;
++		goto done;
+ 	}
+ 
+ 	/* Firmware */
+@@ -1022,7 +1057,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
+ 	ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size);
+ 	if (ret) {
+ 		usb_err(usbatm, "Firmware upload failed: %d\n", ret);
+-		return;
++		goto done;
+ 	}
+ 
+ 	/* Boot ROM patch */
+@@ -1031,7 +1066,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
+ 		ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size);
+ 		if (ret) {
+ 			usb_err(usbatm, "Boot ROM patching failed: %d\n", ret);
+-			return;
++			goto done;
+ 		}
+ 	}
+ 
+@@ -1039,7 +1074,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
+ 	ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SIG_ADDR, (u8 *) signature, 4);
+ 	if (ret) {
+ 		usb_err(usbatm, "Signature storing failed: %d\n", ret);
+-		return;
++		goto done;
+ 	}
+ 
+ 	usb_info(usbatm, "starting device\n");
+@@ -1051,7 +1086,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
+ 	}
+ 	if (ret) {
+ 		usb_err(usbatm, "Passing control to firmware failed: %d\n", ret);
+-		return;
++		goto done;
+ 	}
+ 
+ 	/* Delay to allow firmware to start up. */
+@@ -1065,53 +1100,10 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
+ 	ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0);
+ 	if (ret < 0) {
+ 		usb_err(usbatm, "modem failed to initialize: %d\n", ret);
+-		return;
+-	}
+-}
+-
+-static int cxacru_find_firmware(struct cxacru_data *instance,
+-				char *phase, const struct firmware **fw_p)
+-{
+-	struct usbatm_data *usbatm = instance->usbatm;
+-	struct device *dev = &usbatm->usb_intf->dev;
+-	char buf[16];
+-
+-	sprintf(buf, "cxacru-%s.bin", phase);
+-	usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf);
+-
+-	if (request_firmware(fw_p, buf, dev)) {
+-		usb_dbg(usbatm, "no stage %s firmware found\n", phase);
+-		return -ENOENT;
+-	}
+-
+-	usb_info(usbatm, "found firmware %s\n", buf);
+-
+-	return 0;
+-}
+-
+-static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
+-			     struct usb_interface *usb_intf)
+-{
+-	const struct firmware *fw, *bp;
+-	struct cxacru_data *instance = usbatm_instance->driver_data;
+-	int ret = cxacru_find_firmware(instance, "fw", &fw);
+-
+-	if (ret) {
+-		usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n");
+-		return ret;
++		goto done;
+ 	}
+ 
+-	if (instance->modem_type->boot_rom_patch) {
+-		ret = cxacru_find_firmware(instance, "bp", &bp);
+-		if (ret) {
+-			usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n");
+-			release_firmware(fw);
+-			return ret;
+-		}
+-	}
+-
+-	cxacru_upload_firmware(instance, fw, bp);
+-
++done:
+ 	if (instance->modem_type->boot_rom_patch)
+ 		release_firmware(bp);
+ 	release_firmware(fw);
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 0b2490347b9fe7..bc795257696ef6 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1623,7 +1623,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
+ 	struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
+ 	struct usb_anchor *anchor = urb->anchor;
+ 	int status = urb->unlinked;
+-	unsigned long flags;
+ 
+ 	urb->hcpriv = NULL;
+ 	if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+@@ -1641,14 +1640,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
+ 	/* pass ownership to the completion handler */
+ 	urb->status = status;
+ 	/*
+-	 * Only collect coverage in the softirq context and disable interrupts
+-	 * to avoid scenarios with nested remote coverage collection sections
+-	 * that KCOV does not support.
+-	 * See the comment next to kcov_remote_start_usb_softirq() for details.
++	 * This function can be called in task context inside another remote
++	 * coverage collection section, but kcov doesn't support that kind of
++	 * recursion yet. Only collect coverage in softirq context for now.
+ 	 */
+-	flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
++	kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
+ 	urb->complete(urb);
+-	kcov_remote_stop_softirq(flags);
++	kcov_remote_stop_softirq();
+ 
+ 	usb_anchor_resume_wakeups(anchor);
+ 	atomic_dec(&urb->use_count);
+@@ -2153,7 +2151,7 @@ static struct urb *request_single_step_set_feature_urb(
+ 	urb->complete = usb_ehset_completion;
+ 	urb->status = -EINPROGRESS;
+ 	urb->actual_length = 0;
+-	urb->transfer_flags = URB_DIR_IN;
++	urb->transfer_flags = URB_DIR_IN | URB_NO_TRANSFER_DMA_MAP;
+ 	usb_get_urb(urb);
+ 	atomic_inc(&urb->use_count);
+ 	atomic_inc(&urb->dev->urbnum);
+@@ -2217,9 +2215,15 @@ int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+ 
+ 	/* Complete remaining DATA and STATUS stages using the same URB */
+ 	urb->status = -EINPROGRESS;
++	urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
+ 	usb_get_urb(urb);
+ 	atomic_inc(&urb->use_count);
+ 	atomic_inc(&urb->dev->urbnum);
++	if (map_urb_for_dma(hcd, urb, GFP_KERNEL)) {
++		usb_put_urb(urb);
++		goto out1;
++	}
++
+ 	retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 0);
+ 	if (!retval && !wait_for_completion_timeout(&done,
+ 						msecs_to_jiffies(2000))) {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 46db600fdd824e..bfd97cad8aa4d7 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -371,6 +371,7 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+ 
+ 	/* SanDisk Corp. SanDisk 3.2Gen1 */
++	{ USB_DEVICE(0x0781, 0x5596), .driver_info = USB_QUIRK_DELAY_INIT },
+ 	{ USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
+ 	/* SanDisk Extreme 55AE */
+diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
+index e99faf014c78a6..449c12bb1d4b97 100644
+--- a/drivers/usb/dwc3/dwc3-imx8mp.c
++++ b/drivers/usb/dwc3/dwc3-imx8mp.c
+@@ -244,7 +244,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
+ 					IRQF_ONESHOT, dev_name(dev), dwc3_imx);
+ 	if (err) {
+ 		dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
+-		goto depopulate;
++		goto put_dwc3;
+ 	}
+ 
+ 	device_set_wakeup_capable(dev, true);
+@@ -252,6 +252,8 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
++put_dwc3:
++	put_device(&dwc3_imx->dwc3->dev);
+ depopulate:
+ 	of_platform_depopulate(dev);
+ remove_swnode:
+@@ -265,8 +267,11 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
+ 
+ static void dwc3_imx8mp_remove(struct platform_device *pdev)
+ {
++	struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev);
+ 	struct device *dev = &pdev->dev;
+ 
++	put_device(&dwc3_imx->dwc3->dev);
++
+ 	pm_runtime_get_sync(dev);
+ 	of_platform_depopulate(dev);
+ 	device_remove_software_node(dev);
+diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
+index 2c07c038b584dc..6ea1a876203d9a 100644
+--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
+@@ -837,6 +837,9 @@ static void dwc3_meson_g12a_remove(struct platform_device *pdev)
+ 
+ 	usb_role_switch_unregister(priv->role_switch);
+ 
++	put_device(priv->switch_desc.udc);
++	put_device(priv->switch_desc.usb2_port);
++
+ 	of_platform_depopulate(dev);
+ 
+ 	for (i = 0 ; i < PHY_COUNT ; ++i) {
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 54a4ee2b90b7f4..39c72cb52ce76a 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -41,6 +41,7 @@
+ #define PCI_DEVICE_ID_INTEL_TGPLP		0xa0ee
+ #define PCI_DEVICE_ID_INTEL_TGPH		0x43ee
+ #define PCI_DEVICE_ID_INTEL_JSP			0x4dee
++#define PCI_DEVICE_ID_INTEL_WCL			0x4d7e
+ #define PCI_DEVICE_ID_INTEL_ADL			0x460e
+ #define PCI_DEVICE_ID_INTEL_ADL_PCH		0x51ee
+ #define PCI_DEVICE_ID_INTEL_ADLN		0x465e
+@@ -431,6 +432,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, TGPLP, &dwc3_pci_intel_swnode) },
+ 	{ PCI_DEVICE_DATA(INTEL, TGPH, &dwc3_pci_intel_swnode) },
+ 	{ PCI_DEVICE_DATA(INTEL, JSP, &dwc3_pci_intel_swnode) },
++	{ PCI_DEVICE_DATA(INTEL, WCL, &dwc3_pci_intel_swnode) },
+ 	{ PCI_DEVICE_DATA(INTEL, ADL, &dwc3_pci_intel_swnode) },
+ 	{ PCI_DEVICE_DATA(INTEL, ADL_PCH, &dwc3_pci_intel_swnode) },
+ 	{ PCI_DEVICE_DATA(INTEL, ADLN, &dwc3_pci_intel_swnode) },
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 874497f86499b3..876a839f2d1d09 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -288,7 +288,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
+ 	dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
+ 			DWC3_TRBCTL_CONTROL_SETUP, false);
+ 	ret = dwc3_ep0_start_trans(dep);
+-	WARN_ON(ret < 0);
++	if (ret < 0)
++		dev_err(dwc->dev, "ep0 out start transfer failed: %d\n", ret);
++
+ 	for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) {
+ 		struct dwc3_ep *dwc3_ep;
+ 
+@@ -1061,7 +1063,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ 		ret = dwc3_ep0_start_trans(dep);
+ 	}
+ 
+-	WARN_ON(ret < 0);
++	if (ret < 0)
++		dev_err(dwc->dev,
++			"ep0 data phase start transfer failed: %d\n", ret);
+ }
+ 
+ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
+@@ -1078,7 +1082,12 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
+ 
+ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
+ {
+-	WARN_ON(dwc3_ep0_start_control_status(dep));
++	int	ret;
++
++	ret = dwc3_ep0_start_control_status(dep);
++	if (ret)
++		dev_err(dwc->dev,
++			"ep0 status phase start transfer failed: %d\n", ret);
+ }
+ 
+ static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
+@@ -1121,7 +1130,10 @@ void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
+ 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+ 	memset(&params, 0, sizeof(params));
+ 	ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+-	WARN_ON_ONCE(ret);
++	if (ret)
++		dev_err_ratelimited(dwc->dev,
++			"ep0 data phase end transfer failed: %d\n", ret);
++
+ 	dep->resource_index = 0;
+ }
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 37ae1dd3345d0d..c137b2f395c325 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1763,7 +1763,11 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
+ 		dep->flags |= DWC3_EP_DELAY_STOP;
+ 		return 0;
+ 	}
+-	WARN_ON_ONCE(ret);
++
++	if (ret)
++		dev_err_ratelimited(dep->dwc->dev,
++				"end transfer failed: %d\n", ret);
++
+ 	dep->resource_index = 0;
+ 
+ 	if (!interrupt)
+@@ -3707,6 +3711,15 @@ static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
+ static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
+ 		const struct dwc3_event_depevt *event)
+ {
++	/*
++	 * During a device-initiated disconnect, a late xferNotReady event can
++	 * be generated after the End Transfer command resets the event filter,
++	 * but before the controller is halted. Ignore it to prevent a new
++	 * transfer from starting.
++	 */
++	if (!dep->dwc->connected)
++		return;
++
+ 	dwc3_gadget_endpoint_frame_from_event(dep, event);
+ 
+ 	/*
+@@ -4008,7 +4021,9 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
+ 		dep->flags &= ~DWC3_EP_STALL;
+ 
+ 		ret = dwc3_send_clear_stall_ep_cmd(dep);
+-		WARN_ON_ONCE(ret);
++		if (ret)
++			dev_err_ratelimited(dwc->dev,
++				"failed to clear STALL on %s\n", dep->name);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index a93ad93390ba17..34685c714473dd 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -2658,6 +2658,7 @@ static void renesas_usb3_remove(struct platform_device *pdev)
+ 	struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
+ 
+ 	debugfs_remove_recursive(usb3->dentry);
++	put_device(usb3->host_dev);
+ 	device_remove_file(&pdev->dev, &dev_attr_role);
+ 
+ 	cancel_work_sync(&usb3->role_work);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 1952e05033407f..69aedce9d67bcd 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -704,8 +704,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
+ 		if (!xhci->devs[i])
+ 			continue;
+ 
+-		retval = xhci_disable_slot(xhci, i);
+-		xhci_free_virt_device(xhci, i);
++		retval = xhci_disable_and_free_slot(xhci, i);
+ 		if (retval)
+ 			xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
+ 				 i, retval);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 1111650757eab3..69188afa526660 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -842,21 +842,20 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+  * will be manipulated by the configure endpoint, allocate device, or update
+  * hub functions while this function is removing the TT entries from the list.
+  */
+-void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
++void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev,
++		int slot_id)
+ {
+-	struct xhci_virt_device *dev;
+ 	int i;
+ 	int old_active_eps = 0;
+ 
+ 	/* Slot ID 0 is reserved */
+-	if (slot_id == 0 || !xhci->devs[slot_id])
++	if (slot_id == 0 || !dev)
+ 		return;
+ 
+-	dev = xhci->devs[slot_id];
+-
+-	xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
+-	if (!dev)
+-		return;
++	/* If device ctx array still points to _this_ device, clear it */
++	if (dev->out_ctx &&
++	    xhci->dcbaa->dev_context_ptrs[slot_id] == cpu_to_le64(dev->out_ctx->dma))
++		xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
+ 
+ 	trace_xhci_free_virt_device(dev);
+ 
+@@ -897,8 +896,9 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+ 		dev->udev->slot_id = 0;
+ 	if (dev->rhub_port && dev->rhub_port->slot_id == slot_id)
+ 		dev->rhub_port->slot_id = 0;
+-	kfree(xhci->devs[slot_id]);
+-	xhci->devs[slot_id] = NULL;
++	if (xhci->devs[slot_id] == dev)
++		xhci->devs[slot_id] = NULL;
++	kfree(dev);
+ }
+ 
+ /*
+@@ -939,7 +939,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i
+ out:
+ 	/* we are now at a leaf device */
+ 	xhci_debugfs_remove_slot(xhci, slot_id);
+-	xhci_free_virt_device(xhci, slot_id);
++	xhci_free_virt_device(xhci, vdev, slot_id);
+ }
+ 
+ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
+index 65fc9319d5e70f..d8bd8813945608 100644
+--- a/drivers/usb/host/xhci-pci-renesas.c
++++ b/drivers/usb/host/xhci-pci-renesas.c
+@@ -47,8 +47,9 @@
+ #define RENESAS_ROM_ERASE_MAGIC				0x5A65726F
+ #define RENESAS_ROM_WRITE_MAGIC				0x53524F4D
+ 
+-#define RENESAS_RETRY	10000
+-#define RENESAS_DELAY	10
++#define RENESAS_RETRY			50000	/* 50000 * RENESAS_DELAY ~= 500ms */
++#define RENESAS_CHIP_ERASE_RETRY	500000	/* 500000 * RENESAS_DELAY ~= 5s */
++#define RENESAS_DELAY			10
+ 
+ #define RENESAS_FW_NAME	"renesas_usb_fw.mem"
+ 
+@@ -407,7 +408,7 @@ static void renesas_rom_erase(struct pci_dev *pdev)
+ 	/* sleep a bit while ROM is erased */
+ 	msleep(20);
+ 
+-	for (i = 0; i < RENESAS_RETRY; i++) {
++	for (i = 0; i < RENESAS_CHIP_ERASE_RETRY; i++) {
+ 		retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS,
+ 					      &status);
+ 		status &= RENESAS_ROM_STATUS_ERASE;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index f6ecb3b9fb14e0..1002fa51a25aa2 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1562,7 +1562,8 @@ static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *comman
+ 		command->slot_id = 0;
+ }
+ 
+-static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
++static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id,
++					u32 cmd_comp_code)
+ {
+ 	struct xhci_virt_device *virt_dev;
+ 	struct xhci_slot_ctx *slot_ctx;
+@@ -1577,6 +1578,10 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
+ 	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ 		/* Delete default control endpoint resources */
+ 		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
++	if (cmd_comp_code == COMP_SUCCESS) {
++		xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
++		xhci->devs[slot_id] = NULL;
++	}
+ }
+ 
+ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id)
+@@ -1824,7 +1829,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ 		xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code);
+ 		break;
+ 	case TRB_DISABLE_SLOT:
+-		xhci_handle_cmd_disable_slot(xhci, slot_id);
++		xhci_handle_cmd_disable_slot(xhci, slot_id, cmd_comp_code);
+ 		break;
+ 	case TRB_CONFIG_EP:
+ 		if (!cmd->completion)
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index e399638d60004e..d5bcd5475b72b1 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3763,8 +3763,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
+ 		 * Obtaining a new device slot to inform the xHCI host that
+ 		 * the USB device has been reset.
+ 		 */
+-		ret = xhci_disable_slot(xhci, udev->slot_id);
+-		xhci_free_virt_device(xhci, udev->slot_id);
++		ret = xhci_disable_and_free_slot(xhci, udev->slot_id);
+ 		if (!ret) {
+ 			ret = xhci_alloc_dev(hcd, udev);
+ 			if (ret == 1)
+@@ -3919,7 +3918,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ 	xhci_disable_slot(xhci, udev->slot_id);
+ 
+ 	spin_lock_irqsave(&xhci->lock, flags);
+-	xhci_free_virt_device(xhci, udev->slot_id);
++	xhci_free_virt_device(xhci, virt_dev, udev->slot_id);
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+ 
+ }
+@@ -3968,6 +3967,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+ 	return 0;
+ }
+ 
++int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id)
++{
++	struct xhci_virt_device *vdev = xhci->devs[slot_id];
++	int ret;
++
++	ret = xhci_disable_slot(xhci, slot_id);
++	xhci_free_virt_device(xhci, vdev, slot_id);
++	return ret;
++}
++
+ /*
+  * Checks if we have enough host controller resources for the default control
+  * endpoint.
+@@ -4074,8 +4083,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ 	return 1;
+ 
+ disable_slot:
+-	xhci_disable_slot(xhci, udev->slot_id);
+-	xhci_free_virt_device(xhci, udev->slot_id);
++	xhci_disable_and_free_slot(xhci, udev->slot_id);
+ 
+ 	return 0;
+ }
+@@ -4211,8 +4219,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 		dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
+ 
+ 		mutex_unlock(&xhci->mutex);
+-		ret = xhci_disable_slot(xhci, udev->slot_id);
+-		xhci_free_virt_device(xhci, udev->slot_id);
++		ret = xhci_disable_and_free_slot(xhci, udev->slot_id);
+ 		if (!ret) {
+ 			if (xhci_alloc_dev(hcd, udev) == 1)
+ 				xhci_setup_addressable_virt_dev(xhci, udev);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 11580495e09c12..67ee2e04994330 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1774,7 +1774,7 @@ void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
+ /* xHCI memory management */
+ void xhci_mem_cleanup(struct xhci_hcd *xhci);
+ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
+-void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
++void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, int slot_id);
+ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
+ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
+ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
+@@ -1866,6 +1866,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ 			   struct usb_tt *tt, gfp_t mem_flags);
+ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
++int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id);
+ int xhci_ext_cap_init(struct xhci_hcd *xhci);
+ 
+ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index b4a4c1df4e0d96..a4668c6d575dcf 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -400,7 +400,7 @@ static int omap2430_probe(struct platform_device *pdev)
+ 	ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to add resources\n");
+-		goto err2;
++		goto err_put_control_otghs;
+ 	}
+ 
+ 	if (populate_irqs) {
+@@ -413,7 +413,7 @@ static int omap2430_probe(struct platform_device *pdev)
+ 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 		if (!res) {
+ 			ret = -EINVAL;
+-			goto err2;
++			goto err_put_control_otghs;
+ 		}
+ 
+ 		musb_res[i].start = res->start;
+@@ -441,14 +441,14 @@ static int omap2430_probe(struct platform_device *pdev)
+ 		ret = platform_device_add_resources(musb, musb_res, i);
+ 		if (ret) {
+ 			dev_err(&pdev->dev, "failed to add IRQ resources\n");
+-			goto err2;
++			goto err_put_control_otghs;
+ 		}
+ 	}
+ 
+ 	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to add platform_data\n");
+-		goto err2;
++		goto err_put_control_otghs;
+ 	}
+ 
+ 	pm_runtime_enable(glue->dev);
+@@ -463,7 +463,9 @@ static int omap2430_probe(struct platform_device *pdev)
+ 
+ err3:
+ 	pm_runtime_disable(glue->dev);
+-
++err_put_control_otghs:
++	if (!IS_ERR(glue->control_otghs))
++		put_device(glue->control_otghs);
+ err2:
+ 	platform_device_put(musb);
+ 
+@@ -477,6 +479,8 @@ static void omap2430_remove(struct platform_device *pdev)
+ 
+ 	platform_device_unregister(glue->musb);
+ 	pm_runtime_disable(glue->dev);
++	if (!IS_ERR(glue->control_otghs))
++		put_device(glue->control_otghs);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
+index 0c423916d7bfa4..a026c6cb6e684b 100644
+--- a/drivers/usb/storage/realtek_cr.c
++++ b/drivers/usb/storage/realtek_cr.c
+@@ -252,7 +252,7 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
+ 		return USB_STOR_TRANSPORT_ERROR;
+ 	}
+ 
+-	residue = bcs->Residue;
++	residue = le32_to_cpu(bcs->Residue);
+ 	if (bcs->Tag != us->tag)
+ 		return USB_STOR_TRANSPORT_ERROR;
+ 
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 54f0b1c83317cd..dfa5276a5a43e2 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -934,6 +934,13 @@ UNUSUAL_DEV(  0x05e3, 0x0723, 0x9451, 0x9451,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_SANE_SENSE ),
+ 
++/* Added by Maël GUERIN <mael.guerin@murena.io> */
++UNUSUAL_DEV(  0x0603, 0x8611, 0x0000, 0xffff,
++		"Novatek",
++		"NTK96550-based camera",
++		USB_SC_SCSI, USB_PR_BULK, NULL,
++		US_FL_BULK_IGNORE_TAG ),
++
+ /*
+  * Reported by Hanno Boeck <hanno@gmx.de>
+  * Taken from the Lycoris Kernel
+@@ -1494,6 +1501,28 @@ UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_WP_DETECT ),
+ 
++/*
++ * Reported by Zenm Chen <zenmchen@gmail.com>
++ * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch
++ * the device into Wi-Fi mode.
++ */
++UNUSUAL_DEV( 0x0bda, 0x1a2b, 0x0000, 0xffff,
++		"Realtek",
++		"DISK",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_IGNORE_DEVICE ),
++
++/*
++ * Reported by Zenm Chen <zenmchen@gmail.com>
++ * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch
++ * the device into Wi-Fi mode.
++ */
++UNUSUAL_DEV( 0x0bda, 0xa192, 0x0000, 0xffff,
++		"Realtek",
++		"DISK",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_IGNORE_DEVICE ),
++
+ UNUSUAL_DEV(  0x0d49, 0x7310, 0x0000, 0x9999,
+ 		"Maxtor",
+ 		"USB to SATA",
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 5c75634b8fa380..c9c3dea8ba0755 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -10,6 +10,7 @@
+ #include <linux/mutex.h>
+ #include <linux/property.h>
+ #include <linux/slab.h>
++#include <linux/string_choices.h>
+ #include <linux/usb/pd_vdo.h>
+ #include <linux/usb/typec_mux.h>
+ #include <linux/usb/typec_retimer.h>
+@@ -354,7 +355,7 @@ active_show(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ 	struct typec_altmode *alt = to_typec_altmode(dev);
+ 
+-	return sprintf(buf, "%s\n", alt->active ? "yes" : "no");
++	return sprintf(buf, "%s\n", str_yes_no(alt->active));
+ }
+ 
+ static ssize_t active_store(struct device *dev, struct device_attribute *attr,
+@@ -630,7 +631,7 @@ static ssize_t supports_usb_power_delivery_show(struct device *dev,
+ {
+ 	struct typec_partner *p = to_typec_partner(dev);
+ 
+-	return sprintf(buf, "%s\n", p->usb_pd ? "yes" : "no");
++	return sprintf(buf, "%s\n", str_yes_no(p->usb_pd));
+ }
+ static DEVICE_ATTR_RO(supports_usb_power_delivery);
+ 
+@@ -1688,7 +1689,7 @@ static ssize_t vconn_source_show(struct device *dev,
+ 	struct typec_port *port = to_typec_port(dev);
+ 
+ 	return sprintf(buf, "%s\n",
+-		       port->vconn_role == TYPEC_SOURCE ? "yes" : "no");
++		       str_yes_no(port->vconn_role == TYPEC_SOURCE));
+ }
+ static DEVICE_ATTR_RW(vconn_source);
+ 
+diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
+index e2fe479e16ada0..870a71f953f6cd 100644
+--- a/drivers/usb/typec/tcpm/fusb302.c
++++ b/drivers/usb/typec/tcpm/fusb302.c
+@@ -24,6 +24,7 @@
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ #include <linux/string.h>
++#include <linux/string_choices.h>
+ #include <linux/types.h>
+ #include <linux/usb.h>
+ #include <linux/usb/typec.h>
+@@ -103,6 +104,7 @@ struct fusb302_chip {
+ 	bool vconn_on;
+ 	bool vbus_on;
+ 	bool charge_on;
++	bool pd_rx_on;
+ 	bool vbus_present;
+ 	enum typec_cc_polarity cc_polarity;
+ 	enum typec_cc_status cc1;
+@@ -733,7 +735,7 @@ static int tcpm_set_vconn(struct tcpc_dev *dev, bool on)
+ 
+ 	mutex_lock(&chip->lock);
+ 	if (chip->vconn_on == on) {
+-		fusb302_log(chip, "vconn is already %s", on ? "On" : "Off");
++		fusb302_log(chip, "vconn is already %s", str_on_off(on));
+ 		goto done;
+ 	}
+ 	if (on) {
+@@ -746,7 +748,7 @@ static int tcpm_set_vconn(struct tcpc_dev *dev, bool on)
+ 	if (ret < 0)
+ 		goto done;
+ 	chip->vconn_on = on;
+-	fusb302_log(chip, "vconn := %s", on ? "On" : "Off");
++	fusb302_log(chip, "vconn := %s", str_on_off(on));
+ done:
+ 	mutex_unlock(&chip->lock);
+ 
+@@ -761,7 +763,7 @@ static int tcpm_set_vbus(struct tcpc_dev *dev, bool on, bool charge)
+ 
+ 	mutex_lock(&chip->lock);
+ 	if (chip->vbus_on == on) {
+-		fusb302_log(chip, "vbus is already %s", on ? "On" : "Off");
++		fusb302_log(chip, "vbus is already %s", str_on_off(on));
+ 	} else {
+ 		if (on)
+ 			ret = regulator_enable(chip->vbus);
+@@ -769,15 +771,14 @@ static int tcpm_set_vbus(struct tcpc_dev *dev, bool on, bool charge)
+ 			ret = regulator_disable(chip->vbus);
+ 		if (ret < 0) {
+ 			fusb302_log(chip, "cannot %s vbus regulator, ret=%d",
+-				    on ? "enable" : "disable", ret);
++				    str_enable_disable(on), ret);
+ 			goto done;
+ 		}
+ 		chip->vbus_on = on;
+-		fusb302_log(chip, "vbus := %s", on ? "On" : "Off");
++		fusb302_log(chip, "vbus := %s", str_on_off(on));
+ 	}
+ 	if (chip->charge_on == charge)
+-		fusb302_log(chip, "charge is already %s",
+-			    charge ? "On" : "Off");
++		fusb302_log(chip, "charge is already %s", str_on_off(charge));
+ 	else
+ 		chip->charge_on = charge;
+ 
+@@ -841,6 +842,11 @@ static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on)
+ 	int ret = 0;
+ 
+ 	mutex_lock(&chip->lock);
++	if (chip->pd_rx_on == on) {
++		fusb302_log(chip, "pd is already %s", str_on_off(on));
++		goto done;
++	}
++
+ 	ret = fusb302_pd_rx_flush(chip);
+ 	if (ret < 0) {
+ 		fusb302_log(chip, "cannot flush pd rx buffer, ret=%d", ret);
+@@ -854,16 +860,18 @@ static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on)
+ 	ret = fusb302_pd_set_auto_goodcrc(chip, on);
+ 	if (ret < 0) {
+ 		fusb302_log(chip, "cannot turn %s auto GCRC, ret=%d",
+-			    on ? "on" : "off", ret);
++			    str_on_off(on), ret);
+ 		goto done;
+ 	}
+ 	ret = fusb302_pd_set_interrupts(chip, on);
+ 	if (ret < 0) {
+ 		fusb302_log(chip, "cannot turn %s pd interrupts, ret=%d",
+-			    on ? "on" : "off", ret);
++			    str_on_off(on), ret);
+ 		goto done;
+ 	}
+-	fusb302_log(chip, "pd := %s", on ? "on" : "off");
++
++	chip->pd_rx_on = on;
++	fusb302_log(chip, "pd := %s", str_on_off(on));
+ done:
+ 	mutex_unlock(&chip->lock);
+ 
+@@ -1531,7 +1539,7 @@ static void fusb302_irq_work(struct work_struct *work)
+ 	if (interrupt & FUSB_REG_INTERRUPT_VBUSOK) {
+ 		vbus_present = !!(status0 & FUSB_REG_STATUS0_VBUSOK);
+ 		fusb302_log(chip, "IRQ: VBUS_OK, vbus=%s",
+-			    vbus_present ? "On" : "Off");
++			    str_on_off(vbus_present));
+ 		if (vbus_present != chip->vbus_present) {
+ 			chip->vbus_present = vbus_present;
+ 			tcpm_vbus_change(chip->tcpm_port);
+@@ -1562,7 +1570,7 @@ static void fusb302_irq_work(struct work_struct *work)
+ 	if ((interrupt & FUSB_REG_INTERRUPT_COMP_CHNG) && intr_comp_chng) {
+ 		comp_result = !!(status0 & FUSB_REG_STATUS0_COMP);
+ 		fusb302_log(chip, "IRQ: COMP_CHNG, comp=%s",
+-			    comp_result ? "true" : "false");
++			    str_true_false(comp_result));
+ 		if (comp_result) {
+ 			/* cc level > Rd_threshold, detach */
+ 			chip->cc1 = TYPEC_CC_OPEN;
+diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
+index 0cdda06592fd3c..af8da6dc60ae0b 100644
+--- a/drivers/usb/typec/tcpm/maxim_contaminant.c
++++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
+@@ -188,6 +188,11 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/* Disable low power mode */
++	ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL,
++				 FIELD_PREP(CCLPMODESEL,
++					    LOW_POWER_MODE_DISABLE));
++
+ 	/* Sleep to allow comparators settle */
+ 	usleep_range(5000, 6000);
+ 	ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1);
+@@ -324,6 +329,39 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
+ 	return 0;
+ }
+ 
++static int max_contaminant_enable_toggling(struct max_tcpci_chip *chip)
++{
++	struct regmap *regmap = chip->data.regmap;
++	int ret;
++
++	/* Disable dry detection if enabled. */
++	ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL,
++				 FIELD_PREP(CCLPMODESEL,
++					    LOW_POWER_MODE_DISABLE));
++	if (ret)
++		return ret;
++
++	ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, 0);
++	if (ret)
++		return ret;
++
++	ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP |
++			       FIELD_PREP(TCPC_ROLE_CTRL_CC1,
++					  TCPC_ROLE_CTRL_CC_RD) |
++			       FIELD_PREP(TCPC_ROLE_CTRL_CC2,
++					  TCPC_ROLE_CTRL_CC_RD));
++	if (ret)
++		return ret;
++
++	ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL,
++				 TCPC_TCPC_CTRL_EN_LK4CONN_ALRT,
++				 TCPC_TCPC_CTRL_EN_LK4CONN_ALRT);
++	if (ret)
++		return ret;
++
++	return max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION);
++}
++
+ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce,
+ 				    bool *cc_handled)
+ {
+@@ -340,6 +378,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
+ 	if (ret < 0)
+ 		return false;
+ 
++	if (cc_status & TCPC_CC_STATUS_TOGGLING) {
++		if (chip->contaminant_state == DETECTED)
++			return true;
++		return false;
++	}
++
+ 	if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) {
+ 		if (!disconnect_while_debounce)
+ 			msleep(100);
+@@ -372,6 +416,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
+ 				max_contaminant_enable_dry_detection(chip);
+ 				return true;
+ 			}
++
++			ret = max_contaminant_enable_toggling(chip);
++			if (ret)
++				dev_err(chip->dev,
++					"Failed to enable toggling, ret=%d",
++					ret);
+ 		}
+ 	} else if (chip->contaminant_state == DETECTED) {
+ 		if (!(cc_status & TCPC_CC_STATUS_TOGGLING)) {
+@@ -379,6 +429,14 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
+ 			if (chip->contaminant_state == DETECTED) {
+ 				max_contaminant_enable_dry_detection(chip);
+ 				return true;
++			} else {
++				ret = max_contaminant_enable_toggling(chip);
++				if (ret) {
++					dev_err(chip->dev,
++						"Failed to enable toggling, ret=%d",
++						ret);
++					return true;
++				}
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+index 726423684bae0a..18303b34594bbf 100644
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+@@ -12,6 +12,7 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
++#include <linux/string_choices.h>
+ #include <linux/usb/pd.h>
+ #include <linux/usb/tcpm.h>
+ #include "qcom_pmic_typec.h"
+@@ -418,7 +419,7 @@ static int qcom_pmic_typec_pdphy_set_pd_rx(struct tcpc_dev *tcpc, bool on)
+ 
+ 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
+ 
+-	dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
++	dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", str_on_off(on));
+ 
+ 	return ret;
+ }
+diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
+index df79059cda6755..8fac171778daf4 100644
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
+@@ -12,6 +12,7 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
++#include <linux/string_choices.h>
+ #include <linux/usb/pd.h>
+ #include <linux/usb/tcpm.h>
+ #include "qcom_pmic_typec.h"
+@@ -38,7 +39,7 @@ static int qcom_pmic_typec_pdphy_stub_set_pd_rx(struct tcpc_dev *tcpc, bool on)
+ 	struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ 	struct device *dev = tcpm->dev;
+ 
+-	dev_dbg(dev, "set_pd_rx: %s\n", on ? "on" : "off");
++	dev_dbg(dev, "set_pd_rx: %s\n", str_on_off(on));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+index c37dede62e12cd..4fc83dcfae643e 100644
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+@@ -13,6 +13,7 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
++#include <linux/string_choices.h>
+ #include <linux/usb/tcpm.h>
+ #include <linux/usb/typec_mux.h>
+ #include <linux/workqueue.h>
+@@ -562,7 +563,8 @@ static int qcom_pmic_typec_port_set_vconn(struct tcpc_dev *tcpc, bool on)
+ 	spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
+ 
+ 	dev_dbg(dev, "set_vconn: orientation %d control 0x%08x state %s cc %s vconn %s\n",
+-		orientation, value, on ? "on" : "off", misc_to_vconn(misc), misc_to_cc(misc));
++		orientation, value, str_on_off(on), misc_to_vconn(misc),
++		misc_to_cc(misc));
+ 
+ 	return ret;
+ }
+diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
+index 76270d5c283880..b33540a42a953d 100644
+--- a/drivers/usb/typec/tcpm/tcpci_maxim.h
++++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
+@@ -21,6 +21,7 @@
+ #define CCOVPDIS                                BIT(6)
+ #define SBURPCTRL                               BIT(5)
+ #define CCLPMODESEL                             GENMASK(4, 3)
++#define LOW_POWER_MODE_DISABLE                  0
+ #define ULTRA_LOW_POWER_MODE                    1
+ #define CCRPCTRL                                GENMASK(2, 0)
+ #define UA_1_SRC                                1
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index aa2fa720af1551..43e3dac5129fa5 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -21,6 +21,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/string_choices.h>
+ #include <linux/usb.h>
+ #include <linux/usb/pd.h>
+ #include <linux/usb/pd_ado.h>
+@@ -874,8 +875,8 @@ static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
+ 
+ 	if (port->tcpc->enable_auto_vbus_discharge) {
+ 		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
+-		tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
+-			       ret);
++		tcpm_log_force(port, "%s vbus discharge ret:%d",
++			       str_enable_disable(enable), ret);
+ 		if (!ret)
+ 			port->auto_vbus_discharge_enabled = enable;
+ 	}
+@@ -4429,7 +4430,7 @@ static void tcpm_unregister_altmodes(struct tcpm_port *port)
+ 
+ static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
+ {
+-	tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
++	tcpm_log(port, "Setting usb_comm capable %s", str_true_false(capable));
+ 
+ 	if (port->tcpc->set_partner_usb_comm_capable)
+ 		port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index 802153e230730b..66a0f060770ef2 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -344,6 +344,9 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
+ 
+ 	len = iov_length(vq->iov, out);
+ 
++	if (len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM)
++		return NULL;
++
+ 	/* len contains both payload and hdr */
+ 	skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
+ 	if (!skb)
+@@ -367,8 +370,7 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
+ 		return skb;
+ 
+ 	/* The pkt is too big or the length in the header is invalid */
+-	if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
+-	    payload_len + sizeof(*hdr) > len) {
++	if (payload_len + sizeof(*hdr) > len) {
+ 		kfree_skb(skb);
+ 		return NULL;
+ 	}
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index f9cdbf8c53e34b..37bd18730fe0df 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+ 				     c->vc_screenbuf_size - delta);
+ 			c->vc_origin = vga_vram_end - c->vc_screenbuf_size;
+ 			vga_rolled_over = 0;
+-		} else if (oldo - delta >= (unsigned long)c->vc_screenbuf)
++		} else
+ 			c->vc_origin -= delta;
+ 		c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
+ 		scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char,
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 7eef79ece5b3ca..83a196521670b9 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1481,6 +1481,32 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
+ 	return ret == 0;
+ }
+ 
++/*
++ * Link the block_group to a list via bg_list.
++ *
++ * @bg:       The block_group to link to the list.
++ * @list:     The list to link it to.
++ *
++ * Use this rather than list_add_tail() directly to ensure proper respect
++ * to locking and refcounting.
++ *
++ * Returns: true if the bg was linked with a refcount bump and false otherwise.
++ */
++static bool btrfs_link_bg_list(struct btrfs_block_group *bg, struct list_head *list)
++{
++	struct btrfs_fs_info *fs_info = bg->fs_info;
++	bool added = false;
++
++	spin_lock(&fs_info->unused_bgs_lock);
++	if (list_empty(&bg->bg_list)) {
++		btrfs_get_block_group(bg);
++		list_add_tail(&bg->bg_list, list);
++		added = true;
++	}
++	spin_unlock(&fs_info->unused_bgs_lock);
++	return added;
++}
++
+ /*
+  * Process the unused_bgs list and remove any that don't have any allocated
+  * space inside of them.
+@@ -1597,8 +1623,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 			 * drop under the "next" label for the
+ 			 * fs_info->unused_bgs list.
+ 			 */
+-			btrfs_get_block_group(block_group);
+-			list_add_tail(&block_group->bg_list, &retry_list);
++			btrfs_link_bg_list(block_group, &retry_list);
+ 
+ 			trace_btrfs_skip_unused_block_group(block_group);
+ 			spin_unlock(&block_group->lock);
+@@ -1621,8 +1646,10 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 		ret = btrfs_zone_finish(block_group);
+ 		if (ret < 0) {
+ 			btrfs_dec_block_group_ro(block_group);
+-			if (ret == -EAGAIN)
++			if (ret == -EAGAIN) {
++				btrfs_link_bg_list(block_group, &retry_list);
+ 				ret = 0;
++			}
+ 			goto next;
+ 		}
+ 
+@@ -1971,20 +1998,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 		spin_unlock(&space_info->lock);
+ 
+ next:
+-		if (ret && !READ_ONCE(space_info->periodic_reclaim)) {
+-			/* Refcount held by the reclaim_bgs list after splice. */
+-			spin_lock(&fs_info->unused_bgs_lock);
+-			/*
+-			 * This block group might be added to the unused list
+-			 * during the above process. Move it back to the
+-			 * reclaim list otherwise.
+-			 */
+-			if (list_empty(&bg->bg_list)) {
+-				btrfs_get_block_group(bg);
+-				list_add_tail(&bg->bg_list, &retry_list);
+-			}
+-			spin_unlock(&fs_info->unused_bgs_lock);
+-		}
++		if (ret && !READ_ONCE(space_info->periodic_reclaim))
++			btrfs_link_bg_list(bg, &retry_list);
+ 		btrfs_put_block_group(bg);
+ 
+ 		mutex_unlock(&fs_info->reclaim_bgs_lock);
+@@ -2024,13 +2039,8 @@ void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
+ {
+ 	struct btrfs_fs_info *fs_info = bg->fs_info;
+ 
+-	spin_lock(&fs_info->unused_bgs_lock);
+-	if (list_empty(&bg->bg_list)) {
+-		btrfs_get_block_group(bg);
++	if (btrfs_link_bg_list(bg, &fs_info->reclaim_bgs))
+ 		trace_btrfs_add_reclaim_block_group(bg);
+-		list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs);
+-	}
+-	spin_unlock(&fs_info->unused_bgs_lock);
+ }
+ 
+ static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key,
+@@ -2807,6 +2817,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
+ 		spin_lock(&fs_info->unused_bgs_lock);
+ 		list_del_init(&block_group->bg_list);
+ 		clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
++		btrfs_put_block_group(block_group);
+ 		spin_unlock(&fs_info->unused_bgs_lock);
+ 
+ 		/*
+@@ -2945,7 +2956,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
+ 	}
+ #endif
+ 
+-	list_add_tail(&cache->bg_list, &trans->new_bgs);
++	btrfs_link_bg_list(cache, &trans->new_bgs);
+ 	btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info);
+ 
+ 	set_avail_alloc_bits(fs_info, type);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 3ba15d9c3e8861..81735d19feff5f 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -350,7 +350,14 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ 
+ 	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
+ 
+-	WARN_ON(btrfs_header_generation(buf) > trans->transid);
++	if (unlikely(btrfs_header_generation(buf) > trans->transid)) {
++		btrfs_tree_unlock(cow);
++		free_extent_buffer(cow);
++		ret = -EUCLEAN;
++		btrfs_abort_transaction(trans, ret);
++		return ret;
++	}
++
+ 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
+ 		ret = btrfs_inc_ref(trans, root, cow, 1);
+ 	else
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index 308abbf8855b0a..51f286d5d00ab3 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1379,12 +1379,17 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
+ 	clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags);
+ 
+ 	ret = add_new_free_space_info(trans, block_group, path);
+-	if (ret)
++	if (ret) {
++		btrfs_abort_transaction(trans, ret);
+ 		return ret;
++	}
++
++	ret = __add_to_free_space_tree(trans, block_group, path,
++				       block_group->start, block_group->length);
++	if (ret)
++		btrfs_abort_transaction(trans, ret);
+ 
+-	return __add_to_free_space_tree(trans, block_group, path,
+-					block_group->start,
+-					block_group->length);
++	return 0;
+ }
+ 
+ int add_block_group_free_space(struct btrfs_trans_handle *trans,
+@@ -1404,16 +1409,14 @@ int add_block_group_free_space(struct btrfs_trans_handle *trans,
+ 	path = btrfs_alloc_path();
+ 	if (!path) {
+ 		ret = -ENOMEM;
++		btrfs_abort_transaction(trans, ret);
+ 		goto out;
+ 	}
+ 
+ 	ret = __add_block_group_free_space(trans, block_group, path);
+-
+ out:
+ 	btrfs_free_path(path);
+ 	mutex_unlock(&block_group->free_space_lock);
+-	if (ret)
+-		btrfs_abort_transaction(trans, ret);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 6b181bf9f15617..530a2bab6ada00 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -226,8 +226,7 @@ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
+ 	return qgroup;
+ }
+ 
+-static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
+-			    struct btrfs_qgroup *qgroup)
++static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
+ {
+ 	struct btrfs_qgroup_list *list;
+ 
+@@ -258,7 +257,7 @@ static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
+ 		return -ENOENT;
+ 
+ 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
+-	__del_qgroup_rb(fs_info, qgroup);
++	__del_qgroup_rb(qgroup);
+ 	return 0;
+ }
+ 
+@@ -631,22 +630,30 @@ bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
+ 
+ /*
+  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
+- * first two are in single-threaded paths.And for the third one, we have set
+- * quota_root to be null with qgroup_lock held before, so it is safe to clean
+- * up the in-memory structures without qgroup_lock held.
++ * first two are in single-threaded paths.
+  */
+ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
+ {
+ 	struct rb_node *n;
+ 	struct btrfs_qgroup *qgroup;
+ 
++	/*
++	 * btrfs_quota_disable() can be called concurrently with
++	 * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
++	 * lock.
++	 */
++	spin_lock(&fs_info->qgroup_lock);
+ 	while ((n = rb_first(&fs_info->qgroup_tree))) {
+ 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
+ 		rb_erase(n, &fs_info->qgroup_tree);
+-		__del_qgroup_rb(fs_info, qgroup);
++		__del_qgroup_rb(qgroup);
++		spin_unlock(&fs_info->qgroup_lock);
+ 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
+ 		kfree(qgroup);
++		spin_lock(&fs_info->qgroup_lock);
+ 	}
++	spin_unlock(&fs_info->qgroup_lock);
++
+ 	/*
+ 	 * We call btrfs_free_qgroup_config() when unmounting
+ 	 * filesystem and disabling quota, so we set qgroup_ulist
+@@ -4057,12 +4064,21 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
+ 	qgroup_rescan_zero_tracking(fs_info);
+ 
+ 	mutex_lock(&fs_info->qgroup_rescan_lock);
+-	fs_info->qgroup_rescan_running = true;
+-	btrfs_queue_work(fs_info->qgroup_rescan_workers,
+-			 &fs_info->qgroup_rescan_work);
++	/*
++	 * The rescan worker is only for full accounting qgroups, check if it's
++	 * enabled as it is pointless to queue it otherwise. A concurrent quota
++	 * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
++	 */
++	if (btrfs_qgroup_full_accounting(fs_info)) {
++		fs_info->qgroup_rescan_running = true;
++		btrfs_queue_work(fs_info->qgroup_rescan_workers,
++				 &fs_info->qgroup_rescan_work);
++	} else {
++		ret = -ENOTCONN;
++	}
+ 	mutex_unlock(&fs_info->qgroup_rescan_lock);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index c843b4aefb8ac2..41b7cbd0702548 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -4,6 +4,7 @@
+  */
+ 
+ #include <linux/bsearch.h>
++#include <linux/falloc.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
+ #include <linux/sort.h>
+@@ -178,6 +179,7 @@ struct send_ctx {
+ 	u64 cur_inode_rdev;
+ 	u64 cur_inode_last_extent;
+ 	u64 cur_inode_next_write_offset;
++	struct fs_path cur_inode_path;
+ 	bool cur_inode_new;
+ 	bool cur_inode_new_gen;
+ 	bool cur_inode_deleted;
+@@ -436,6 +438,14 @@ static void fs_path_reset(struct fs_path *p)
+ 	}
+ }
+ 
++static void init_path(struct fs_path *p)
++{
++	p->reversed = 0;
++	p->buf = p->inline_buf;
++	p->buf_len = FS_PATH_INLINE_SIZE;
++	fs_path_reset(p);
++}
++
+ static struct fs_path *fs_path_alloc(void)
+ {
+ 	struct fs_path *p;
+@@ -443,10 +453,7 @@ static struct fs_path *fs_path_alloc(void)
+ 	p = kmalloc(sizeof(*p), GFP_KERNEL);
+ 	if (!p)
+ 		return NULL;
+-	p->reversed = 0;
+-	p->buf = p->inline_buf;
+-	p->buf_len = FS_PATH_INLINE_SIZE;
+-	fs_path_reset(p);
++	init_path(p);
+ 	return p;
+ }
+ 
+@@ -471,7 +478,7 @@ static void fs_path_free(struct fs_path *p)
+ 	kfree(p);
+ }
+ 
+-static int fs_path_len(struct fs_path *p)
++static inline int fs_path_len(const struct fs_path *p)
+ {
+ 	return p->end - p->start;
+ }
+@@ -624,6 +631,14 @@ static void fs_path_unreverse(struct fs_path *p)
+ 	p->reversed = 0;
+ }
+ 
++static inline bool is_current_inode_path(const struct send_ctx *sctx,
++					 const struct fs_path *path)
++{
++	const struct fs_path *cur = &sctx->cur_inode_path;
++
++	return (strncmp(path->start, cur->start, fs_path_len(cur)) == 0);
++}
++
+ static struct btrfs_path *alloc_path_for_send(void)
+ {
+ 	struct btrfs_path *path;
+@@ -2450,6 +2465,14 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
+ 	u64 parent_inode = 0;
+ 	u64 parent_gen = 0;
+ 	int stop = 0;
++	const bool is_cur_inode = (ino == sctx->cur_ino && gen == sctx->cur_inode_gen);
++
++	if (is_cur_inode && fs_path_len(&sctx->cur_inode_path) > 0) {
++		if (dest != &sctx->cur_inode_path)
++			return fs_path_copy(dest, &sctx->cur_inode_path);
++
++		return 0;
++	}
+ 
+ 	name = fs_path_alloc();
+ 	if (!name) {
+@@ -2501,8 +2524,12 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
+ 
+ out:
+ 	fs_path_free(name);
+-	if (!ret)
++	if (!ret) {
+ 		fs_path_unreverse(dest);
++		if (is_cur_inode && dest != &sctx->cur_inode_path)
++			ret = fs_path_copy(&sctx->cur_inode_path, dest);
++	}
++
+ 	return ret;
+ }
+ 
+@@ -2597,6 +2624,47 @@ static int send_subvol_begin(struct send_ctx *sctx)
+ 	return ret;
+ }
+ 
++static struct fs_path *get_cur_inode_path(struct send_ctx *sctx)
++{
++	if (fs_path_len(&sctx->cur_inode_path) == 0) {
++		int ret;
++
++		ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
++				   &sctx->cur_inode_path);
++		if (ret < 0)
++			return ERR_PTR(ret);
++	}
++
++	return &sctx->cur_inode_path;
++}
++
++static struct fs_path *get_path_for_command(struct send_ctx *sctx, u64 ino, u64 gen)
++{
++	struct fs_path *path;
++	int ret;
++
++	if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen)
++		return get_cur_inode_path(sctx);
++
++	path = fs_path_alloc();
++	if (!path)
++		return ERR_PTR(-ENOMEM);
++
++	ret = get_cur_path(sctx, ino, gen, path);
++	if (ret < 0) {
++		fs_path_free(path);
++		return ERR_PTR(ret);
++	}
++
++	return path;
++}
++
++static void free_path_for_command(const struct send_ctx *sctx, struct fs_path *path)
++{
++	if (path != &sctx->cur_inode_path)
++		fs_path_free(path);
++}
++
+ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
+ {
+ 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
+@@ -2605,17 +2673,14 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
+ 
+ 	btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
++	p = get_path_for_command(sctx, ino, gen);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
+ 
+ 	ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = get_cur_path(sctx, ino, gen, p);
+-	if (ret < 0)
+-		goto out;
+ 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
+ 
+@@ -2623,7 +2688,7 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
+ 
+ tlv_put_failure:
+ out:
+-	fs_path_free(p);
++	free_path_for_command(sctx, p);
+ 	return ret;
+ }
+ 
+@@ -2635,17 +2700,14 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
+ 
+ 	btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
++	p = get_path_for_command(sctx, ino, gen);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
+ 
+ 	ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = get_cur_path(sctx, ino, gen, p);
+-	if (ret < 0)
+-		goto out;
+ 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
+ 
+@@ -2653,7 +2715,7 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
+ 
+ tlv_put_failure:
+ out:
+-	fs_path_free(p);
++	free_path_for_command(sctx, p);
+ 	return ret;
+ }
+ 
+@@ -2668,17 +2730,14 @@ static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
+ 
+ 	btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr);
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
++	p = get_path_for_command(sctx, ino, gen);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
+ 
+ 	ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR);
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = get_cur_path(sctx, ino, gen, p);
+-	if (ret < 0)
+-		goto out;
+ 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr);
+ 
+@@ -2686,7 +2745,7 @@ static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
+ 
+ tlv_put_failure:
+ out:
+-	fs_path_free(p);
++	free_path_for_command(sctx, p);
+ 	return ret;
+ }
+ 
+@@ -2699,17 +2758,14 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
+ 	btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
+ 		    ino, uid, gid);
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
++	p = get_path_for_command(sctx, ino, gen);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
+ 
+ 	ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = get_cur_path(sctx, ino, gen, p);
+-	if (ret < 0)
+-		goto out;
+ 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
+@@ -2718,7 +2774,7 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
+ 
+ tlv_put_failure:
+ out:
+-	fs_path_free(p);
++	free_path_for_command(sctx, p);
+ 	return ret;
+ }
+ 
+@@ -2735,9 +2791,9 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
+ 
+ 	btrfs_debug(fs_info, "send_utimes %llu", ino);
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
++	p = get_path_for_command(sctx, ino, gen);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
+ 
+ 	path = alloc_path_for_send();
+ 	if (!path) {
+@@ -2762,9 +2818,6 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = get_cur_path(sctx, ino, gen, p);
+-	if (ret < 0)
+-		goto out;
+ 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ 	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
+ 	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
+@@ -2776,7 +2829,7 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
+ 
+ tlv_put_failure:
+ out:
+-	fs_path_free(p);
++	free_path_for_command(sctx, p);
+ 	btrfs_free_path(path);
+ 	return ret;
+ }
+@@ -3112,6 +3165,11 @@ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
+ 		goto out;
+ 
+ 	ret = send_rename(sctx, path, orphan);
++	if (ret < 0)
++		goto out;
++
++	if (ino == sctx->cur_ino && gen == sctx->cur_inode_gen)
++		ret = fs_path_copy(&sctx->cur_inode_path, orphan);
+ 
+ out:
+ 	fs_path_free(orphan);
+@@ -4165,6 +4223,23 @@ static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
+ 	return ret;
+ }
+ 
++static int rename_current_inode(struct send_ctx *sctx,
++				struct fs_path *current_path,
++				struct fs_path *new_path)
++{
++	int ret;
++
++	ret = send_rename(sctx, current_path, new_path);
++	if (ret < 0)
++		return ret;
++
++	ret = fs_path_copy(&sctx->cur_inode_path, new_path);
++	if (ret < 0)
++		return ret;
++
++	return fs_path_copy(current_path, new_path);
++}
++
+ /*
+  * This does all the move/link/unlink/rmdir magic.
+  */
+@@ -4179,9 +4254,9 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 	u64 ow_inode = 0;
+ 	u64 ow_gen;
+ 	u64 ow_mode;
+-	int did_overwrite = 0;
+-	int is_orphan = 0;
+ 	u64 last_dir_ino_rm = 0;
++	bool did_overwrite = false;
++	bool is_orphan = false;
+ 	bool can_rename = true;
+ 	bool orphanized_dir = false;
+ 	bool orphanized_ancestor = false;
+@@ -4223,14 +4298,14 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 		if (ret < 0)
+ 			goto out;
+ 		if (ret)
+-			did_overwrite = 1;
++			did_overwrite = true;
+ 	}
+ 	if (sctx->cur_inode_new || did_overwrite) {
+ 		ret = gen_unique_name(sctx, sctx->cur_ino,
+ 				sctx->cur_inode_gen, valid_path);
+ 		if (ret < 0)
+ 			goto out;
+-		is_orphan = 1;
++		is_orphan = true;
+ 	} else {
+ 		ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ 				valid_path);
+@@ -4355,6 +4430,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 				if (ret > 0) {
+ 					orphanized_ancestor = true;
+ 					fs_path_reset(valid_path);
++					fs_path_reset(&sctx->cur_inode_path);
+ 					ret = get_cur_path(sctx, sctx->cur_ino,
+ 							   sctx->cur_inode_gen,
+ 							   valid_path);
+@@ -4450,13 +4526,10 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 		 * it depending on the inode mode.
+ 		 */
+ 		if (is_orphan && can_rename) {
+-			ret = send_rename(sctx, valid_path, cur->full_path);
+-			if (ret < 0)
+-				goto out;
+-			is_orphan = 0;
+-			ret = fs_path_copy(valid_path, cur->full_path);
++			ret = rename_current_inode(sctx, valid_path, cur->full_path);
+ 			if (ret < 0)
+ 				goto out;
++			is_orphan = false;
+ 		} else if (can_rename) {
+ 			if (S_ISDIR(sctx->cur_inode_mode)) {
+ 				/*
+@@ -4464,10 +4537,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 				 * dirs, we always have one new and one deleted
+ 				 * ref. The deleted ref is ignored later.
+ 				 */
+-				ret = send_rename(sctx, valid_path,
+-						  cur->full_path);
+-				if (!ret)
+-					ret = fs_path_copy(valid_path,
++				ret = rename_current_inode(sctx, valid_path,
+ 							   cur->full_path);
+ 				if (ret < 0)
+ 					goto out;
+@@ -4514,7 +4584,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 					sctx->cur_inode_gen, valid_path);
+ 			if (ret < 0)
+ 				goto out;
+-			is_orphan = 1;
++			is_orphan = true;
+ 		}
+ 
+ 		list_for_each_entry(cur, &sctx->deleted_refs, list) {
+@@ -4560,6 +4630,8 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 				ret = send_unlink(sctx, cur->full_path);
+ 				if (ret < 0)
+ 					goto out;
++				if (is_current_inode_path(sctx, cur->full_path))
++					fs_path_reset(&sctx->cur_inode_path);
+ 			}
+ 			ret = dup_ref(cur, &check_dirs);
+ 			if (ret < 0)
+@@ -4878,11 +4950,15 @@ static int process_all_refs(struct send_ctx *sctx,
+ }
+ 
+ static int send_set_xattr(struct send_ctx *sctx,
+-			  struct fs_path *path,
+ 			  const char *name, int name_len,
+ 			  const char *data, int data_len)
+ {
+-	int ret = 0;
++	struct fs_path *path;
++	int ret;
++
++	path = get_cur_inode_path(sctx);
++	if (IS_ERR(path))
++		return PTR_ERR(path);
+ 
+ 	ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
+ 	if (ret < 0)
+@@ -4923,19 +4999,13 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
+ 			       const char *name, int name_len, const char *data,
+ 			       int data_len, void *ctx)
+ {
+-	int ret;
+ 	struct send_ctx *sctx = ctx;
+-	struct fs_path *p;
+ 	struct posix_acl_xattr_header dummy_acl;
+ 
+ 	/* Capabilities are emitted by finish_inode_if_needed */
+ 	if (!strncmp(name, XATTR_NAME_CAPS, name_len))
+ 		return 0;
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
+-
+ 	/*
+ 	 * This hack is needed because empty acls are stored as zero byte
+ 	 * data in xattrs. Problem with that is, that receiving these zero byte
+@@ -4952,38 +5022,21 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
+ 		}
+ 	}
+ 
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+-	if (ret < 0)
+-		goto out;
+-
+-	ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
+-
+-out:
+-	fs_path_free(p);
+-	return ret;
++	return send_set_xattr(sctx, name, name_len, data, data_len);
+ }
+ 
+ static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
+ 				   const char *name, int name_len,
+ 				   const char *data, int data_len, void *ctx)
+ {
+-	int ret;
+ 	struct send_ctx *sctx = ctx;
+ 	struct fs_path *p;
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
+-
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+-	if (ret < 0)
+-		goto out;
+-
+-	ret = send_remove_xattr(sctx, p, name, name_len);
++	p = get_cur_inode_path(sctx);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
+ 
+-out:
+-	fs_path_free(p);
+-	return ret;
++	return send_remove_xattr(sctx, p, name, name_len);
+ }
+ 
+ static int process_new_xattr(struct send_ctx *sctx)
+@@ -5216,21 +5269,13 @@ static int process_verity(struct send_ctx *sctx)
+ 	if (ret < 0)
+ 		goto iput;
+ 
+-	p = fs_path_alloc();
+-	if (!p) {
+-		ret = -ENOMEM;
++	p = get_cur_inode_path(sctx);
++	if (IS_ERR(p)) {
++		ret = PTR_ERR(p);
+ 		goto iput;
+ 	}
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+-	if (ret < 0)
+-		goto free_path;
+ 
+ 	ret = send_verity(sctx, p, sctx->verity_descriptor);
+-	if (ret < 0)
+-		goto free_path;
+-
+-free_path:
+-	fs_path_free(p);
+ iput:
+ 	iput(inode);
+ 	return ret;
+@@ -5352,31 +5397,25 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
+ 	int ret = 0;
+ 	struct fs_path *p;
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
+-
+ 	btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
+ 
+-	ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
+-	if (ret < 0)
+-		goto out;
++	p = get_cur_inode_path(sctx);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
+ 
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
++	ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
+ 	if (ret < 0)
+-		goto out;
++		return ret;
+ 
+ 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ 	ret = put_file_data(sctx, offset, len);
+ 	if (ret < 0)
+-		goto out;
++		return ret;
+ 
+ 	ret = send_cmd(sctx);
+ 
+ tlv_put_failure:
+-out:
+-	fs_path_free(p);
+ 	return ret;
+ }
+ 
+@@ -5389,6 +5428,7 @@ static int send_clone(struct send_ctx *sctx,
+ {
+ 	int ret = 0;
+ 	struct fs_path *p;
++	struct fs_path *cur_inode_path;
+ 	u64 gen;
+ 
+ 	btrfs_debug(sctx->send_root->fs_info,
+@@ -5396,6 +5436,10 @@ static int send_clone(struct send_ctx *sctx,
+ 		    offset, len, btrfs_root_id(clone_root->root),
+ 		    clone_root->ino, clone_root->offset);
+ 
++	cur_inode_path = get_cur_inode_path(sctx);
++	if (IS_ERR(cur_inode_path))
++		return PTR_ERR(cur_inode_path);
++
+ 	p = fs_path_alloc();
+ 	if (!p)
+ 		return -ENOMEM;
+@@ -5404,13 +5448,9 @@ static int send_clone(struct send_ctx *sctx,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+-	if (ret < 0)
+-		goto out;
+-
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
+-	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
++	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, cur_inode_path);
+ 
+ 	if (clone_root->root == sctx->send_root) {
+ 		ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen);
+@@ -5461,27 +5501,45 @@ static int send_update_extent(struct send_ctx *sctx,
+ 	int ret = 0;
+ 	struct fs_path *p;
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
++	p = get_cur_inode_path(sctx);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
+ 
+ 	ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
+ 	if (ret < 0)
+-		goto out;
++		return ret;
++
++	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
++	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
++	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
++
++	ret = send_cmd(sctx);
+ 
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
++tlv_put_failure:
++	return ret;
++}
++
++static int send_fallocate(struct send_ctx *sctx, u32 mode, u64 offset, u64 len)
++{
++	struct fs_path *path;
++	int ret;
++
++	path = get_cur_inode_path(sctx);
++	if (IS_ERR(path))
++		return PTR_ERR(path);
++
++	ret = begin_cmd(sctx, BTRFS_SEND_C_FALLOCATE);
+ 	if (ret < 0)
+-		goto out;
++		return ret;
+ 
+-	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
++	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
++	TLV_PUT_U32(sctx, BTRFS_SEND_A_FALLOCATE_MODE, mode);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ 	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
+ 
+ 	ret = send_cmd(sctx);
+ 
+ tlv_put_failure:
+-out:
+-	fs_path_free(p);
+ 	return ret;
+ }
+ 
+@@ -5492,6 +5550,14 @@ static int send_hole(struct send_ctx *sctx, u64 end)
+ 	u64 offset = sctx->cur_inode_last_extent;
+ 	int ret = 0;
+ 
++	/*
++	 * Starting with send stream v2 we have fallocate and can use it to
++	 * punch holes instead of sending writes full of zeroes.
++	 */
++	if (proto_cmd_ok(sctx, BTRFS_SEND_C_FALLOCATE))
++		return send_fallocate(sctx, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
++				      offset, end - offset);
++
+ 	/*
+ 	 * A hole that starts at EOF or beyond it. Since we do not yet support
+ 	 * fallocate (for extent preallocation and hole punching), sending a
+@@ -5510,12 +5576,10 @@ static int send_hole(struct send_ctx *sctx, u64 end)
+ 	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
+ 		return send_update_extent(sctx, offset, end - offset);
+ 
+-	p = fs_path_alloc();
+-	if (!p)
+-		return -ENOMEM;
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+-	if (ret < 0)
+-		goto tlv_put_failure;
++	p = get_cur_inode_path(sctx);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
++
+ 	while (offset < end) {
+ 		u64 len = min(end - offset, read_size);
+ 
+@@ -5536,7 +5600,6 @@ static int send_hole(struct send_ctx *sctx, u64 end)
+ 	}
+ 	sctx->cur_inode_next_write_offset = offset;
+ tlv_put_failure:
+-	fs_path_free(p);
+ 	return ret;
+ }
+ 
+@@ -5559,9 +5622,9 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
+ 	if (IS_ERR(inode))
+ 		return PTR_ERR(inode);
+ 
+-	fspath = fs_path_alloc();
+-	if (!fspath) {
+-		ret = -ENOMEM;
++	fspath = get_cur_inode_path(sctx);
++	if (IS_ERR(fspath)) {
++		ret = PTR_ERR(fspath);
+ 		goto out;
+ 	}
+ 
+@@ -5569,10 +5632,6 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
+-	if (ret < 0)
+-		goto out;
+-
+ 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+ 	ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei);
+@@ -5601,7 +5660,6 @@ static int send_encoded_inline_extent(struct send_ctx *sctx,
+ 
+ tlv_put_failure:
+ out:
+-	fs_path_free(fspath);
+ 	iput(inode);
+ 	return ret;
+ }
+@@ -5626,9 +5684,9 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
+ 	if (IS_ERR(inode))
+ 		return PTR_ERR(inode);
+ 
+-	fspath = fs_path_alloc();
+-	if (!fspath) {
+-		ret = -ENOMEM;
++	fspath = get_cur_inode_path(sctx);
++	if (IS_ERR(fspath)) {
++		ret = PTR_ERR(fspath);
+ 		goto out;
+ 	}
+ 
+@@ -5636,10 +5694,6 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
+-	if (ret < 0)
+-		goto out;
+-
+ 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+ 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+@@ -5706,7 +5760,6 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
+ 
+ tlv_put_failure:
+ out:
+-	fs_path_free(fspath);
+ 	iput(inode);
+ 	return ret;
+ }
+@@ -5836,7 +5889,6 @@ static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
+  */
+ static int send_capabilities(struct send_ctx *sctx)
+ {
+-	struct fs_path *fspath = NULL;
+ 	struct btrfs_path *path;
+ 	struct btrfs_dir_item *di;
+ 	struct extent_buffer *leaf;
+@@ -5862,25 +5914,19 @@ static int send_capabilities(struct send_ctx *sctx)
+ 	leaf = path->nodes[0];
+ 	buf_len = btrfs_dir_data_len(leaf, di);
+ 
+-	fspath = fs_path_alloc();
+ 	buf = kmalloc(buf_len, GFP_KERNEL);
+-	if (!fspath || !buf) {
++	if (!buf) {
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+ 
+-	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
+-	if (ret < 0)
+-		goto out;
+-
+ 	data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
+ 	read_extent_buffer(leaf, buf, data_ptr, buf_len);
+ 
+-	ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
++	ret = send_set_xattr(sctx, XATTR_NAME_CAPS,
+ 			strlen(XATTR_NAME_CAPS), buf, buf_len);
+ out:
+ 	kfree(buf);
+-	fs_path_free(fspath);
+ 	btrfs_free_path(path);
+ 	return ret;
+ }
+@@ -6906,6 +6952,7 @@ static int changed_inode(struct send_ctx *sctx,
+ 	sctx->cur_inode_last_extent = (u64)-1;
+ 	sctx->cur_inode_next_write_offset = 0;
+ 	sctx->ignore_cur_inode = false;
++	fs_path_reset(&sctx->cur_inode_path);
+ 
+ 	/*
+ 	 * Set send_progress to current inode. This will tell all get_cur_xxx
+@@ -8178,6 +8225,7 @@ long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_a
+ 		goto out;
+ 	}
+ 
++	init_path(&sctx->cur_inode_path);
+ 	INIT_LIST_HEAD(&sctx->new_refs);
+ 	INIT_LIST_HEAD(&sctx->deleted_refs);
+ 
+@@ -8463,6 +8511,9 @@ long btrfs_ioctl_send(struct btrfs_inode *inode, const struct btrfs_ioctl_send_a
+ 		btrfs_lru_cache_clear(&sctx->dir_created_cache);
+ 		btrfs_lru_cache_clear(&sctx->dir_utimes_cache);
+ 
++		if (sctx->cur_inode_path.buf != sctx->cur_inode_path.inline_buf)
++			kfree(sctx->cur_inode_path.buf);
++
+ 		kfree(sctx);
+ 	}
+ 
+diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
+index 88a01d51ab11f1..71a56aaac7ad27 100644
+--- a/fs/btrfs/subpage.c
++++ b/fs/btrfs/subpage.c
+@@ -452,8 +452,25 @@ void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
+ 
+ 	spin_lock_irqsave(&subpage->lock, flags);
+ 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
++
++	/*
++	 * Don't clear the TOWRITE tag when starting writeback on a still-dirty
++	 * folio. Doing so can cause WB_SYNC_ALL writepages() to overlook it,
++	 * assume writeback is complete, and exit too early — violating sync
++	 * ordering guarantees.
++	 */
+ 	if (!folio_test_writeback(folio))
+-		folio_start_writeback(folio);
++		__folio_start_writeback(folio, true);
++	if (!folio_test_dirty(folio)) {
++		struct address_space *mapping = folio_mapping(folio);
++		XA_STATE(xas, &mapping->i_pages, folio->index);
++		unsigned long flags;
++
++		xas_lock_irqsave(&xas, flags);
++		xas_load(&xas);
++		xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
++		xas_unlock_irqrestore(&xas, flags);
++	}
+ 	spin_unlock_irqrestore(&subpage->lock, flags);
+ }
+ 
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 6119a06b056938..69f9d5f5cc3c6d 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -88,6 +88,9 @@ struct btrfs_fs_context {
+ 	refcount_t refs;
+ };
+ 
++static void btrfs_emit_options(struct btrfs_fs_info *info,
++			       struct btrfs_fs_context *old);
++
+ enum {
+ 	Opt_acl,
+ 	Opt_clear_cache,
+@@ -697,12 +700,9 @@ bool btrfs_check_options(const struct btrfs_fs_info *info,
+ 
+ 	if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) {
+ 		if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
+-			btrfs_info(info, "disk space caching is enabled");
+ 			btrfs_warn(info,
+ "space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2");
+ 		}
+-		if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE))
+-			btrfs_info(info, "using free-space-tree");
+ 	}
+ 
+ 	return ret;
+@@ -979,6 +979,8 @@ static int btrfs_fill_super(struct super_block *sb,
+ 		return err;
+ 	}
+ 
++	btrfs_emit_options(fs_info, NULL);
++
+ 	inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
+ 	if (IS_ERR(inode)) {
+ 		err = PTR_ERR(inode);
+@@ -1436,7 +1438,7 @@ static void btrfs_emit_options(struct btrfs_fs_info *info,
+ {
+ 	btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
+ 	btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts");
+-	btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
++	btrfs_info_if_set(info, old, NODATACOW, "setting nodatacow");
+ 	btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations");
+ 	btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme");
+ 	btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers");
+@@ -1458,10 +1460,11 @@ static void btrfs_emit_options(struct btrfs_fs_info *info,
+ 	btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums");
+ 	btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags");
+ 
++	btrfs_info_if_unset(info, old, NODATASUM, "setting datasum");
+ 	btrfs_info_if_unset(info, old, NODATACOW, "setting datacow");
+ 	btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations");
+ 	btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme");
+-	btrfs_info_if_unset(info, old, NOBARRIER, "turning off barriers");
++	btrfs_info_if_unset(info, old, NOBARRIER, "turning on barriers");
+ 	btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log");
+ 	btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching");
+ 	btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree");
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index dbef80cd5a9f1c..1a029392eac524 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -2113,6 +2113,7 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
+ 		*/
+ 	       spin_lock(&fs_info->unused_bgs_lock);
+                list_del_init(&block_group->bg_list);
++	       btrfs_put_block_group(block_group);
+ 	       spin_unlock(&fs_info->unused_bgs_lock);
+        }
+ }
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 53d8c49ec0588d..2fdb2987c83ac2 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2092,10 +2092,15 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ 		goto out_unlock;
+ 	}
+ 
+-	/* No space left */
+-	if (btrfs_zoned_bg_is_full(block_group)) {
+-		ret = false;
+-		goto out_unlock;
++	if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) {
++		/* The caller should check if the block group is full. */
++		if (WARN_ON_ONCE(btrfs_zoned_bg_is_full(block_group))) {
++			ret = false;
++			goto out_unlock;
++		}
++	} else {
++		/* Since it is already written, it should have been active. */
++		WARN_ON_ONCE(block_group->meta_write_pointer != block_group->start);
+ 	}
+ 
+ 	for (i = 0; i < map->num_stripes; i++) {
+diff --git a/fs/buffer.c b/fs/buffer.c
+index e9e84512a027af..79c19ffa440155 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -157,8 +157,8 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
+  */
+ void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
+ {
+-	__end_buffer_read_notouch(bh, uptodate);
+ 	put_bh(bh);
++	__end_buffer_read_notouch(bh, uptodate);
+ }
+ EXPORT_SYMBOL(end_buffer_read_sync);
+ 
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 66d9b3b4c5881d..525f3aa780cd39 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -183,6 +183,9 @@ static int debugfs_reconfigure(struct fs_context *fc)
+ 	struct debugfs_fs_info *sb_opts = sb->s_fs_info;
+ 	struct debugfs_fs_info *new_opts = fc->s_fs_info;
+ 
++	if (!new_opts)
++		return 0;
++
+ 	sync_filesystem(sb);
+ 
+ 	/* structure copy of new mount options to sb */
+@@ -269,10 +272,16 @@ static int debugfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ 
+ static int debugfs_get_tree(struct fs_context *fc)
+ {
++	int err;
++
+ 	if (!(debugfs_allow & DEBUGFS_ALLOW_API))
+ 		return -EPERM;
+ 
+-	return get_tree_single(fc, debugfs_fill_super);
++	err = get_tree_single(fc, debugfs_fill_super);
++	if (err)
++		return err;
++
++	return debugfs_reconfigure(fc);
+ }
+ 
+ static void debugfs_free_fc(struct fs_context *fc)
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index 383c6edea6dd31..91185c40f755a5 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -393,6 +393,14 @@ static unsigned int ext4_getfsmap_find_sb(struct super_block *sb,
+ 	/* Reserved GDT blocks */
+ 	if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) {
+ 		len = le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
++
++		/*
++		 * mkfs.ext4 can set s_reserved_gdt_blocks as 0 in some cases,
++		 * check for that.
++		 */
++		if (!len)
++			return 0;
++
+ 		error = ext4_getfsmap_fill(meta_list, fsb, len,
+ 					   EXT4_FMR_OWN_RESV_GDT);
+ 		if (error)
+@@ -526,6 +534,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
+ 	ext4_group_t end_ag;
+ 	ext4_grpblk_t first_cluster;
+ 	ext4_grpblk_t last_cluster;
++	struct ext4_fsmap irec;
+ 	int error = 0;
+ 
+ 	bofs = le32_to_cpu(sbi->s_es->s_first_data_block);
+@@ -609,10 +618,18 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
+ 			goto err;
+ 	}
+ 
+-	/* Report any gaps at the end of the bg */
++	/*
++	 * The dummy record below will cause ext4_getfsmap_helper() to report
++	 * any allocated blocks at the end of the range.
++	 */
++	irec.fmr_device = 0;
++	irec.fmr_physical = end_fsb + 1;
++	irec.fmr_length = 0;
++	irec.fmr_owner = EXT4_FMR_OWN_FREE;
++	irec.fmr_flags = 0;
++
+ 	info->gfi_last = true;
+-	error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster + 1,
+-					     0, info);
++	error = ext4_getfsmap_helper(sb, info, &irec);
+ 	if (error)
+ 		goto err;
+ 
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 7de327fa7b1c51..d45124318200d8 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -539,7 +539,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ 	int indirect_blks;
+ 	int blocks_to_boundary = 0;
+ 	int depth;
+-	int count = 0;
++	u64 count = 0;
+ 	ext4_fsblk_t first_block = 0;
+ 
+ 	trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
+@@ -588,7 +588,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ 		count++;
+ 		/* Fill in size of a hole we found */
+ 		map->m_pblk = 0;
+-		map->m_len = min_t(unsigned int, map->m_len, count);
++		map->m_len = umin(map->m_len, count);
+ 		goto cleanup;
+ 	}
+ 
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 232131804bb810..7923602271ad0a 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -151,7 +151,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+  */
+ int ext4_inode_is_fast_symlink(struct inode *inode)
+ {
+-	if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
++	if (!ext4_has_feature_ea_inode(inode->i_sb)) {
+ 		int ea_blocks = EXT4_I(inode)->i_file_acl ?
+ 				EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
+ 
+diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
+index e5b47dda331759..a23b0c01f8096d 100644
+--- a/fs/ext4/orphan.c
++++ b/fs/ext4/orphan.c
+@@ -590,8 +590,9 @@ int ext4_init_orphan_info(struct super_block *sb)
+ 	}
+ 	oi->of_blocks = inode->i_size >> sb->s_blocksize_bits;
+ 	oi->of_csum_seed = EXT4_I(inode)->i_csum_seed;
+-	oi->of_binfo = kmalloc(oi->of_blocks*sizeof(struct ext4_orphan_block),
+-			       GFP_KERNEL);
++	oi->of_binfo = kmalloc_array(oi->of_blocks,
++				     sizeof(struct ext4_orphan_block),
++				     GFP_KERNEL);
+ 	if (!oi->of_binfo) {
+ 		ret = -ENOMEM;
+ 		goto out_put;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 99117d1e1bdd5b..722ac723f49b6e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2019,6 +2019,9 @@ int ext4_init_fs_context(struct fs_context *fc)
+ 	fc->fs_private = ctx;
+ 	fc->ops = &ext4_context_ops;
+ 
++	/* i_version is always enabled now */
++	fc->sb_flags |= SB_I_VERSION;
++
+ 	return 0;
+ }
+ 
+@@ -5277,9 +5280,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ 		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
+ 
+-	/* i_version is always enabled now */
+-	sb->s_flags |= SB_I_VERSION;
+-
+ 	err = ext4_check_feature_compatibility(sb, es, silent);
+ 	if (err)
+ 		goto failed_mount;
+@@ -5373,6 +5373,8 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 		err = ext4_load_and_init_journal(sb, es, ctx);
+ 		if (err)
+ 			goto failed_mount3a;
++		if (bdev_read_only(sb->s_bdev))
++		    needs_recovery = 0;
+ 	} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
+ 		   ext4_has_feature_journal_needs_recovery(sb)) {
+ 		ext4_msg(sb, KERN_ERR, "required journal recovery "
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 12c76e3d1cd49d..7c27878293697b 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -799,6 +799,16 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
+ 	for (i = 1; i <= level; i++) {
+ 		bool done = false;
+ 
++		if (nids[i] && nids[i] == dn->inode->i_ino) {
++			err = -EFSCORRUPTED;
++			f2fs_err_ratelimited(sbi,
++				"inode mapping table is corrupted, run fsck to fix it, "
++				"ino:%lu, nid:%u, level:%d, offset:%d",
++				dn->inode->i_ino, nids[i], level, offset[level]);
++			set_sbi_flag(sbi, SBI_NEED_FSCK);
++			goto release_pages;
++		}
++
+ 		if (!nids[i] && mode == ALLOC_NODE) {
+ 			/* alloc new node */
+ 			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
+diff --git a/fs/file.c b/fs/file.c
+index 4579c329649877..bfc9eb9e722984 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -90,18 +90,11 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
+  * 'unsigned long' in some places, but simply because that is how the Linux
+  * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
+  * they are very much "bits in an array of unsigned long".
+- *
+- * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
+- * by that "1024/sizeof(ptr)" before, we already know there are sufficient
+- * clear low bits. Clang seems to realize that, gcc ends up being confused.
+- *
+- * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
+- * let's consider it documentation (and maybe a test-case for gcc to improve
+- * its code generation ;)
+  */
+-static struct fdtable * alloc_fdtable(unsigned int nr)
++static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
+ {
+ 	struct fdtable *fdt;
++	unsigned int nr;
+ 	void *data;
+ 
+ 	/*
+@@ -109,22 +102,32 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
+ 	 * Allocation steps are keyed to the size of the fdarray, since it
+ 	 * grows far faster than any of the other dynamic data. We try to fit
+ 	 * the fdarray into comfortable page-tuned chunks: starting at 1024B
+-	 * and growing in powers of two from there on.
++	 * and growing in powers of two from there on.  Since we called only
++	 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab
++	 * already gives BITS_PER_LONG slots), the above boils down to
++	 * 1.  use the smallest power of two large enough to give us that many
++	 * slots.
++	 * 2.  on 32bit skip 64 and 128 - the minimal capacity we want there is
++	 * 256 slots (i.e. 1Kb fd array).
++	 * 3.  on 64bit don't skip anything, 1Kb fd array means 128 slots there
++	 * and we are never going to be asked for 64 or less.
+ 	 */
+-	nr /= (1024 / sizeof(struct file *));
+-	nr = roundup_pow_of_two(nr + 1);
+-	nr *= (1024 / sizeof(struct file *));
+-	nr = ALIGN(nr, BITS_PER_LONG);
++	if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256)
++		nr = 256;
++	else
++		nr = roundup_pow_of_two(slots_wanted);
+ 	/*
+ 	 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
+-	 * had been set lower between the check in expand_files() and here.  Deal
+-	 * with that in caller, it's cheaper that way.
++	 * had been set lower between the check in expand_files() and here.
+ 	 *
+ 	 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
+ 	 * bitmaps handling below becomes unpleasant, to put it mildly...
+ 	 */
+-	if (unlikely(nr > sysctl_nr_open))
+-		nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
++	if (unlikely(nr > sysctl_nr_open)) {
++		nr = round_down(sysctl_nr_open, BITS_PER_LONG);
++		if (nr < slots_wanted)
++			return ERR_PTR(-EMFILE);
++	}
+ 
+ 	/*
+ 	 * Check if the allocation size would exceed INT_MAX. kvmalloc_array()
+@@ -168,7 +171,7 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
+ out_fdt:
+ 	kfree(fdt);
+ out:
+-	return NULL;
++	return ERR_PTR(-ENOMEM);
+ }
+ 
+ /*
+@@ -185,7 +188,7 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr)
+ 	struct fdtable *new_fdt, *cur_fdt;
+ 
+ 	spin_unlock(&files->file_lock);
+-	new_fdt = alloc_fdtable(nr);
++	new_fdt = alloc_fdtable(nr + 1);
+ 
+ 	/* make sure all fd_install() have seen resize_in_progress
+ 	 * or have finished their rcu_read_lock_sched() section.
+@@ -194,16 +197,8 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr)
+ 		synchronize_rcu();
+ 
+ 	spin_lock(&files->file_lock);
+-	if (!new_fdt)
+-		return -ENOMEM;
+-	/*
+-	 * extremely unlikely race - sysctl_nr_open decreased between the check in
+-	 * caller and alloc_fdtable().  Cheaper to catch it here...
+-	 */
+-	if (unlikely(new_fdt->max_fds <= nr)) {
+-		__free_fdtable(new_fdt);
+-		return -EMFILE;
+-	}
++	if (IS_ERR(new_fdt))
++		return PTR_ERR(new_fdt);
+ 	cur_fdt = files_fdtable(files);
+ 	BUG_ON(nr < cur_fdt->max_fds);
+ 	copy_fdtable(new_fdt, cur_fdt);
+@@ -322,7 +317,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho
+ 	struct file **old_fds, **new_fds;
+ 	unsigned int open_files, i;
+ 	struct fdtable *old_fdt, *new_fdt;
+-	int error;
+ 
+ 	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
+ 	if (!newf)
+@@ -354,17 +348,10 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho
+ 		if (new_fdt != &newf->fdtab)
+ 			__free_fdtable(new_fdt);
+ 
+-		new_fdt = alloc_fdtable(open_files - 1);
+-		if (!new_fdt) {
+-			error = -ENOMEM;
+-			goto out_release;
+-		}
+-
+-		/* beyond sysctl_nr_open; nothing to do */
+-		if (unlikely(new_fdt->max_fds < open_files)) {
+-			__free_fdtable(new_fdt);
+-			error = -EMFILE;
+-			goto out_release;
++		new_fdt = alloc_fdtable(open_files);
++		if (IS_ERR(new_fdt)) {
++			kmem_cache_free(files_cachep, newf);
++			return ERR_CAST(new_fdt);
+ 		}
+ 
+ 		/*
+@@ -413,10 +400,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho
+ 	rcu_assign_pointer(newf->fdt, new_fdt);
+ 
+ 	return newf;
+-
+-out_release:
+-	kmem_cache_free(files_cachep, newf);
+-	return ERR_PTR(error);
+ }
+ 
+ static struct fdtable *close_files(struct files_struct * files)
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index b3971e91e8eb80..38861ca04899f0 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -285,6 +285,7 @@ int jbd2_log_do_checkpoint(journal_t *journal)
+ 		retry:
+ 			if (batch_count)
+ 				__flush_batch(journal, &batch_count);
++			cond_resched();
+ 			spin_lock(&journal->j_list_lock);
+ 			goto restart;
+ 	}
+diff --git a/fs/namespace.c b/fs/namespace.c
+index bb1560b0d25c33..962fda4fa2467e 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2683,6 +2683,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
+ 	return attach_recursive_mnt(mnt, p, mp, 0);
+ }
+ 
++static int may_change_propagation(const struct mount *m)
++{
++        struct mnt_namespace *ns = m->mnt_ns;
++
++	 // it must be mounted in some namespace
++	 if (IS_ERR_OR_NULL(ns))         // is_mounted()
++		 return -EINVAL;
++	 // and the caller must be admin in userns of that namespace
++	 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
++		 return -EPERM;
++	 return 0;
++}
++
+ /*
+  * Sanity check the flags to change_mnt_propagation.
+  */
+@@ -2719,10 +2732,10 @@ static int do_change_type(struct path *path, int ms_flags)
+ 		return -EINVAL;
+ 
+ 	namespace_lock();
+-	if (!check_mnt(mnt)) {
+-		err = -EINVAL;
++	err = may_change_propagation(mnt);
++	if (err)
+ 		goto out_unlock;
+-	}
++
+ 	if (type == MS_SHARED) {
+ 		err = invent_group_ids(mnt, recurse);
+ 		if (err)
+@@ -3116,18 +3129,11 @@ static int do_set_group(struct path *from_path, struct path *to_path)
+ 
+ 	namespace_lock();
+ 
+-	err = -EINVAL;
+-	/* To and From must be mounted */
+-	if (!is_mounted(&from->mnt))
+-		goto out;
+-	if (!is_mounted(&to->mnt))
+-		goto out;
+-
+-	err = -EPERM;
+-	/* We should be allowed to modify mount namespaces of both mounts */
+-	if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
++	err = may_change_propagation(from);
++	if (err)
+ 		goto out;
+-	if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
++	err = may_change_propagation(to);
++	if (err)
+ 		goto out;
+ 
+ 	err = -EINVAL;
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index a968688a732342..c349867d74c34d 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -433,6 +433,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
+ 			if (front->start + front->transferred > stream->collected_to) {
+ 				stream->collected_to = front->start + front->transferred;
+ 				stream->transferred = stream->collected_to - wreq->start;
++				stream->transferred_valid = true;
+ 				notes |= MADE_PROGRESS;
+ 			}
+ 			if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+@@ -538,6 +539,7 @@ void netfs_write_collection_worker(struct work_struct *work)
+ 	struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
+ 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
+ 	size_t transferred;
++	bool transferred_valid = false;
+ 	int s;
+ 
+ 	_enter("R=%x", wreq->debug_id);
+@@ -568,12 +570,16 @@ void netfs_write_collection_worker(struct work_struct *work)
+ 			netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
+ 			return;
+ 		}
+-		if (stream->transferred < transferred)
++		if (stream->transferred_valid &&
++		    stream->transferred < transferred) {
+ 			transferred = stream->transferred;
++			transferred_valid = true;
++		}
+ 	}
+ 
+ 	/* Okay, declare that all I/O is complete. */
+-	wreq->transferred = transferred;
++	if (transferred_valid)
++		wreq->transferred = transferred;
+ 	trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
+ 
+ 	if (wreq->io_streams[1].active &&
+diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
+index bf6d507578e531..b7830a15ae40f3 100644
+--- a/fs/netfs/write_issue.c
++++ b/fs/netfs/write_issue.c
+@@ -115,12 +115,12 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
+ 	wreq->io_streams[0].prepare_write	= ictx->ops->prepare_write;
+ 	wreq->io_streams[0].issue_write		= ictx->ops->issue_write;
+ 	wreq->io_streams[0].collected_to	= start;
+-	wreq->io_streams[0].transferred		= LONG_MAX;
++	wreq->io_streams[0].transferred		= 0;
+ 
+ 	wreq->io_streams[1].stream_nr		= 1;
+ 	wreq->io_streams[1].source		= NETFS_WRITE_TO_CACHE;
+ 	wreq->io_streams[1].collected_to	= start;
+-	wreq->io_streams[1].transferred		= LONG_MAX;
++	wreq->io_streams[1].transferred		= 0;
+ 	if (fscache_resources_valid(&wreq->cache_resources)) {
+ 		wreq->io_streams[1].avail	= true;
+ 		wreq->io_streams[1].active	= true;
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index e27c07bd892905..82c3e2ca59a23e 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -253,13 +253,14 @@ nfs_page_group_unlock(struct nfs_page *req)
+ 	nfs_page_clear_headlock(req);
+ }
+ 
+-/*
+- * nfs_page_group_sync_on_bit_locked
++/**
++ * nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
++ * @req: request in page group
++ * @bit: PG_* bit that is used to sync page group
+  *
+  * must be called with page group lock held
+  */
+-static bool
+-nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
++bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
+ {
+ 	struct nfs_page *head = req->wb_head;
+ 	struct nfs_page *tmp;
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 8ff8db09a1e066..2b6b3542405c30 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -153,20 +153,10 @@ nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
+ 	}
+ }
+ 
+-static int
+-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
++static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+ {
+-	int ret;
+-
+-	if (!test_bit(PG_REMOVE, &req->wb_flags))
+-		return 0;
+-	ret = nfs_page_group_lock(req);
+-	if (ret)
+-		return ret;
+ 	if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
+ 		nfs_page_set_inode_ref(req, inode);
+-	nfs_page_group_unlock(req);
+-	return 0;
+ }
+ 
+ /**
+@@ -585,19 +575,18 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ 		}
+ 	}
+ 
++	ret = nfs_page_group_lock(head);
++	if (ret < 0)
++		goto out_unlock;
++
+ 	/* Ensure that nobody removed the request before we locked it */
+ 	if (head != folio->private) {
++		nfs_page_group_unlock(head);
+ 		nfs_unlock_and_release_request(head);
+ 		goto retry;
+ 	}
+ 
+-	ret = nfs_cancel_remove_inode(head, inode);
+-	if (ret < 0)
+-		goto out_unlock;
+-
+-	ret = nfs_page_group_lock(head);
+-	if (ret < 0)
+-		goto out_unlock;
++	nfs_cancel_remove_inode(head, inode);
+ 
+ 	/* lock each request in the page group */
+ 	for (subreq = head->wb_this_page;
+@@ -801,7 +790,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
+ {
+ 	struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
+ 
+-	if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
++	nfs_page_group_lock(req);
++	if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
+ 		struct folio *folio = nfs_page_to_folio(req->wb_head);
+ 		struct address_space *mapping = folio->mapping;
+ 
+@@ -812,6 +802,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
+ 		}
+ 		spin_unlock(&mapping->i_private_lock);
+ 	}
++	nfs_page_group_unlock(req);
+ 
+ 	if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
+ 		atomic_long_dec(&nfsi->nrequests);
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 4388004a319d0c..a00af67cee98f0 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -780,7 +780,7 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
+ 		return err;
+ 
+ 	ovl_start_write(c->dentry);
+-	inode_lock(wdir);
++	inode_lock_nested(wdir, I_MUTEX_PARENT);
+ 	temp = ovl_create_temp(ofs, c->workdir, &cattr);
+ 	inode_unlock(wdir);
+ 	ovl_end_write(c->dentry);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 4bababee965a08..ab911a9672465c 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4522,7 +4522,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
+ 	for (int i = 1; i < num_rqst; i++) {
+ 		struct smb_rqst *old = &old_rq[i - 1];
+ 		struct smb_rqst *new = &new_rq[i];
+-		struct folio_queue *buffer;
++		struct folio_queue *buffer = NULL;
+ 		size_t size = iov_iter_count(&old->rq_iter);
+ 
+ 		orig_len += smb_rqst_len(server, old);
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index 9eb3e6010aa68a..1c37d1e9aef386 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -503,7 +503,8 @@ void ksmbd_conn_transport_destroy(void)
+ {
+ 	mutex_lock(&init_lock);
+ 	ksmbd_tcp_destroy();
+-	ksmbd_rdma_destroy();
++	ksmbd_rdma_stop_listening();
+ 	stop_sessions();
++	ksmbd_rdma_destroy();
+ 	mutex_unlock(&init_lock);
+ }
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 31dd1caac1e8a8..2aa8084bb59302 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -46,7 +46,12 @@ struct ksmbd_conn {
+ 	struct mutex			srv_mutex;
+ 	int				status;
+ 	unsigned int			cli_cap;
+-	__be32				inet_addr;
++	union {
++		__be32			inet_addr;
++#if IS_ENABLED(CONFIG_IPV6)
++		u8			inet6_addr[16];
++#endif
++	};
+ 	char				*request_buf;
+ 	struct ksmbd_transport		*transport;
+ 	struct nls_table		*local_nls;
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index d7a8a580d01362..a04d5702820d07 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1102,8 +1102,10 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
+ 			if (!atomic_inc_not_zero(&opinfo->refcount))
+ 				continue;
+ 
+-			if (ksmbd_conn_releasing(opinfo->conn))
++			if (ksmbd_conn_releasing(opinfo->conn)) {
++				opinfo_put(opinfo);
+ 				continue;
++			}
+ 
+ 			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL);
+ 			opinfo_put(opinfo);
+@@ -1139,8 +1141,11 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
+ 			if (!atomic_inc_not_zero(&opinfo->refcount))
+ 				continue;
+ 
+-			if (ksmbd_conn_releasing(opinfo->conn))
++			if (ksmbd_conn_releasing(opinfo->conn)) {
++				opinfo_put(opinfo);
+ 				continue;
++			}
++
+ 			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL);
+ 			opinfo_put(opinfo);
+ 		}
+@@ -1343,8 +1348,10 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 		if (!atomic_inc_not_zero(&brk_op->refcount))
+ 			continue;
+ 
+-		if (ksmbd_conn_releasing(brk_op->conn))
++		if (ksmbd_conn_releasing(brk_op->conn)) {
++			opinfo_put(brk_op);
+ 			continue;
++		}
+ 
+ 		if (brk_op->is_lease && (brk_op->o_lease->state &
+ 		    (~(SMB2_LEASE_READ_CACHING_LE |
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 805c20f619b0b8..67c989e5ddaa79 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -2193,7 +2193,7 @@ int ksmbd_rdma_init(void)
+ 	return 0;
+ }
+ 
+-void ksmbd_rdma_destroy(void)
++void ksmbd_rdma_stop_listening(void)
+ {
+ 	if (!smb_direct_listener.cm_id)
+ 		return;
+@@ -2202,7 +2202,10 @@ void ksmbd_rdma_destroy(void)
+ 	rdma_destroy_id(smb_direct_listener.cm_id);
+ 
+ 	smb_direct_listener.cm_id = NULL;
++}
+ 
++void ksmbd_rdma_destroy(void)
++{
+ 	if (smb_direct_wq) {
+ 		destroy_workqueue(smb_direct_wq);
+ 		smb_direct_wq = NULL;
+diff --git a/fs/smb/server/transport_rdma.h b/fs/smb/server/transport_rdma.h
+index 77aee4e5c9dcd8..a2291b77488a15 100644
+--- a/fs/smb/server/transport_rdma.h
++++ b/fs/smb/server/transport_rdma.h
+@@ -54,13 +54,15 @@ struct smb_direct_data_transfer {
+ 
+ #ifdef CONFIG_SMB_SERVER_SMBDIRECT
+ int ksmbd_rdma_init(void);
++void ksmbd_rdma_stop_listening(void);
+ void ksmbd_rdma_destroy(void);
+ bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
+ void init_smbd_max_io_size(unsigned int sz);
+ unsigned int get_smbd_max_read_write_size(void);
+ #else
+ static inline int ksmbd_rdma_init(void) { return 0; }
+-static inline int ksmbd_rdma_destroy(void) { return 0; }
++static inline void ksmbd_rdma_stop_listening(void) { }
++static inline void ksmbd_rdma_destroy(void) { }
+ static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; }
+ static inline void init_smbd_max_io_size(unsigned int sz) { }
+ static inline unsigned int get_smbd_max_read_write_size(void) { return 0; }
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index d72588f33b9cd1..756833c91b140b 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -87,7 +87,14 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
+ 		return NULL;
+ 	}
+ 
++#if IS_ENABLED(CONFIG_IPV6)
++	if (client_sk->sk->sk_family == AF_INET6)
++		memcpy(&conn->inet6_addr, &client_sk->sk->sk_v6_daddr, 16);
++	else
++		conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
++#else
+ 	conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
++#endif
+ 	conn->transport = KSMBD_TRANS(t);
+ 	KSMBD_TRANS(t)->conn = conn;
+ 	KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
+@@ -231,7 +238,6 @@ static int ksmbd_kthread_fn(void *p)
+ {
+ 	struct socket *client_sk = NULL;
+ 	struct interface *iface = (struct interface *)p;
+-	struct inet_sock *csk_inet;
+ 	struct ksmbd_conn *conn;
+ 	int ret;
+ 
+@@ -254,13 +260,27 @@ static int ksmbd_kthread_fn(void *p)
+ 		/*
+ 		 * Limits repeated connections from clients with the same IP.
+ 		 */
+-		csk_inet = inet_sk(client_sk->sk);
+ 		down_read(&conn_list_lock);
+ 		list_for_each_entry(conn, &conn_list, conns_list)
+-			if (csk_inet->inet_daddr == conn->inet_addr) {
++#if IS_ENABLED(CONFIG_IPV6)
++			if (client_sk->sk->sk_family == AF_INET6) {
++				if (memcmp(&client_sk->sk->sk_v6_daddr,
++					   &conn->inet6_addr, 16) == 0) {
++					ret = -EAGAIN;
++					break;
++				}
++			} else if (inet_sk(client_sk->sk)->inet_daddr ==
++				 conn->inet_addr) {
++				ret = -EAGAIN;
++				break;
++			}
++#else
++			if (inet_sk(client_sk->sk)->inet_daddr ==
++			    conn->inet_addr) {
+ 				ret = -EAGAIN;
+ 				break;
+ 			}
++#endif
+ 		up_read(&conn_list_lock);
+ 		if (ret == -EAGAIN)
+ 			continue;
+diff --git a/fs/splice.c b/fs/splice.c
+index 38f8c94267315d..ed8177f6d620f9 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -744,6 +744,9 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
+ 		sd.pos = kiocb.ki_pos;
+ 		if (ret <= 0)
+ 			break;
++		WARN_ONCE(ret > sd.total_len - left,
++			  "Splice Exceeded! ret=%zd tot=%zu left=%zu\n",
++			  ret, sd.total_len, left);
+ 
+ 		sd.num_spliced += ret;
+ 		sd.total_len -= ret;
+diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
+index 3a27d4268b3c4a..494d21777ed00b 100644
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -187,10 +187,15 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	unsigned short flags;
+ 	unsigned int fragments;
+ 	u64 lookup_table_start, xattr_id_table_start, next_table;
+-	int err;
++	int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
+ 
+ 	TRACE("Entered squashfs_fill_superblock\n");
+ 
++	if (!devblksize) {
++		errorf(fc, "squashfs: unable to set blocksize\n");
++		return -EINVAL;
++	}
++
+ 	sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
+ 	if (sb->s_fs_info == NULL) {
+ 		ERROR("Failed to allocate squashfs_sb_info\n");
+@@ -201,12 +206,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ 
+ 	msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
+ 
+-	msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
+-	if (!msblk->devblksize) {
+-		errorf(fc, "squashfs: unable to set blocksize\n");
+-		return -EINVAL;
+-	}
+-
++	msblk->devblksize = devblksize;
+ 	msblk->devblksize_log2 = ffz(~msblk->devblksize);
+ 
+ 	mutex_init(&msblk->meta_index_mutex);
+diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
+index c0757ab994957b..dc395cd2f33b04 100644
+--- a/fs/xfs/xfs_itable.c
++++ b/fs/xfs/xfs_itable.c
+@@ -430,11 +430,15 @@ xfs_inumbers(
+ 		.breq		= breq,
+ 	};
+ 	struct xfs_trans	*tp;
++	unsigned int		iwalk_flags = 0;
+ 	int			error = 0;
+ 
+ 	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
+ 		return 0;
+ 
++	if (breq->flags & XFS_IBULK_SAME_AG)
++		iwalk_flags |= XFS_IWALK_SAME_AG;
++
+ 	/*
+ 	 * Grab an empty transaction so that we can use its recursive buffer
+ 	 * locking abilities to detect cycles in the inobt without deadlocking.
+@@ -443,7 +447,7 @@ xfs_inumbers(
+ 	if (error)
+ 		goto out;
+ 
+-	error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags,
++	error = xfs_inobt_walk(breq->mp, tp, breq->startino, iwalk_flags,
+ 			xfs_inumbers_walk, breq->icount, &ic);
+ 	xfs_trans_cancel(tp);
+ out:
+diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
+index 428d81afe2151a..2de9974992c3bd 100644
+--- a/include/drm/drm_format_helper.h
++++ b/include/drm/drm_format_helper.h
+@@ -96,9 +96,21 @@ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_
+ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ 			       const struct iosys_map *src, const struct drm_framebuffer *fb,
+ 			       const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
++			       const struct iosys_map *src, const struct drm_framebuffer *fb,
++			       const struct drm_rect *clip, struct drm_format_conv_state *state);
+ void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ 				 const struct iosys_map *src, const struct drm_framebuffer *fb,
+ 				 const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++				 const struct iosys_map *src, const struct drm_framebuffer *fb,
++				 const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++				 const struct iosys_map *src, const struct drm_framebuffer *fb,
++				 const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++				 const struct iosys_map *src, const struct drm_framebuffer *fb,
++				 const struct drm_rect *clip, struct drm_format_conv_state *state);
+ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ 				    const struct iosys_map *src, const struct drm_framebuffer *fb,
+ 				    const struct drm_rect *clip,
+diff --git a/include/linux/call_once.h b/include/linux/call_once.h
+index 6261aa0b3fb00d..13cd6469e7e56e 100644
+--- a/include/linux/call_once.h
++++ b/include/linux/call_once.h
+@@ -26,20 +26,41 @@ do {									\
+ 	__once_init((once), #once, &__key);				\
+ } while (0)
+ 
+-static inline void call_once(struct once *once, void (*cb)(struct once *))
++/*
++ * call_once - Ensure a function has been called exactly once
++ *
++ * @once: Tracking struct
++ * @cb: Function to be called
++ *
++ * If @once has never completed successfully before, call @cb and, if
++ * it returns a zero or positive value, mark @once as completed.  Return
++ * the value returned by @cb
++ *
++ * If @once has completed succesfully before, return 0.
++ *
++ * The call to @cb is implicitly surrounded by a mutex, though for
++ * efficiency the * function avoids taking it after the first call.
++ */
++static inline int call_once(struct once *once, int (*cb)(struct once *))
+ {
+-        /* Pairs with atomic_set_release() below.  */
+-        if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
+-                return;
+-
+-        guard(mutex)(&once->lock);
+-        WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
+-        if (atomic_read(&once->state) != ONCE_NOT_STARTED)
+-                return;
+-
+-        atomic_set(&once->state, ONCE_RUNNING);
+-        cb(once);
+-        atomic_set_release(&once->state, ONCE_COMPLETED);
++	int r, state;
++
++	/* Pairs with atomic_set_release() below.  */
++	if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
++		return 0;
++
++	guard(mutex)(&once->lock);
++	state = atomic_read(&once->state);
++	if (unlikely(state != ONCE_NOT_STARTED))
++		return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
++
++	atomic_set(&once->state, ONCE_RUNNING);
++	r = cb(once);
++	if (r < 0)
++		atomic_set(&once->state, ONCE_NOT_STARTED);
++	else
++		atomic_set_release(&once->state, ONCE_COMPLETED);
++	return r;
+ }
+ 
+ #endif /* _LINUX_CALL_ONCE_H */
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index b15911e201bf95..d18542d7e17bf9 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -223,14 +223,6 @@ static inline void *offset_to_ptr(const int *off)
+ #define __ADDRESSABLE(sym) \
+ 	___ADDRESSABLE(sym, __section(".discard.addressable"))
+ 
+-#define __ADDRESSABLE_ASM(sym)						\
+-	.pushsection .discard.addressable,"aw";				\
+-	.align ARCH_SEL(8,4);						\
+-	ARCH_SEL(.quad, .long) __stringify(sym);			\
+-	.popsection;
+-
+-#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
+-
+ /* &a[0] degrades to a pointer: a different type from an array */
+ #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+ 
+diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
+index 4696abfd311cc1..3e85afe794c0aa 100644
+--- a/include/linux/iosys-map.h
++++ b/include/linux/iosys-map.h
+@@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
+  */
+ static inline void iosys_map_clear(struct iosys_map *map)
+ {
+-	if (map->is_iomem) {
+-		map->vaddr_iomem = NULL;
+-		map->is_iomem = false;
+-	} else {
+-		map->vaddr = NULL;
+-	}
++	memset(map, 0, sizeof(*map));
+ }
+ 
+ /**
+diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
+index c4aa58032faf87..f9a17fbbd3980b 100644
+--- a/include/linux/iov_iter.h
++++ b/include/linux/iov_iter.h
+@@ -160,7 +160,7 @@ size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2
+ 
+ 	do {
+ 		struct folio *folio = folioq_folio(folioq, slot);
+-		size_t part, remain, consumed;
++		size_t part, remain = 0, consumed;
+ 		size_t fsize;
+ 		void *base;
+ 
+@@ -168,14 +168,16 @@ size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2
+ 			break;
+ 
+ 		fsize = folioq_folio_size(folioq, slot);
+-		base = kmap_local_folio(folio, skip);
+-		part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
+-		remain = step(base, progress, part, priv, priv2);
+-		kunmap_local(base);
+-		consumed = part - remain;
+-		len -= consumed;
+-		progress += consumed;
+-		skip += consumed;
++		if (skip < fsize) {
++			base = kmap_local_folio(folio, skip);
++			part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
++			remain = step(base, progress, part, priv, priv2);
++			kunmap_local(base);
++			consumed = part - remain;
++			len -= consumed;
++			progress += consumed;
++			skip += consumed;
++		}
+ 		if (skip >= fsize) {
+ 			skip = 0;
+ 			slot++;
+diff --git a/include/linux/kcov.h b/include/linux/kcov.h
+index 75a2fb8b16c329..0143358874b07b 100644
+--- a/include/linux/kcov.h
++++ b/include/linux/kcov.h
+@@ -57,47 +57,21 @@ static inline void kcov_remote_start_usb(u64 id)
+ 
+ /*
+  * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+- * workaround for KCOV's lack of nested remote coverage sections support.
+- *
+- * Adding support is tracked in https://bugzilla.kernel.org/show_bug.cgi?id=210337.
+- *
+- * kcov_remote_start_usb_softirq():
+- *
+- * 1. Only collects coverage when called in the softirq context. This allows
+- *    avoiding nested remote coverage collection sections in the task context.
+- *    For example, USB/IP calls usb_hcd_giveback_urb() in the task context
+- *    within an existing remote coverage collection section. Thus, KCOV should
+- *    not attempt to start collecting coverage within the coverage collection
+- *    section in __usb_hcd_giveback_urb() in this case.
+- *
+- * 2. Disables interrupts for the duration of the coverage collection section.
+- *    This allows avoiding nested remote coverage collection sections in the
+- *    softirq context (a softirq might occur during the execution of a work in
+- *    the BH workqueue, which runs with in_serving_softirq() > 0).
+- *    For example, usb_giveback_urb_bh() runs in the BH workqueue with
+- *    interrupts enabled, so __usb_hcd_giveback_urb() might be interrupted in
+- *    the middle of its remote coverage collection section, and the interrupt
+- *    handler might invoke __usb_hcd_giveback_urb() again.
++ * work around for kcov's lack of nested remote coverage sections support in
++ * task context. Adding support for nested sections is tracked in:
++ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+  */
+ 
+-static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
++static inline void kcov_remote_start_usb_softirq(u64 id)
+ {
+-	unsigned long flags = 0;
+-
+-	if (in_serving_softirq()) {
+-		local_irq_save(flags);
++	if (in_serving_softirq() && !in_hardirq())
+ 		kcov_remote_start_usb(id);
+-	}
+-
+-	return flags;
+ }
+ 
+-static inline void kcov_remote_stop_softirq(unsigned long flags)
++static inline void kcov_remote_stop_softirq(void)
+ {
+-	if (in_serving_softirq()) {
++	if (in_serving_softirq() && !in_hardirq())
+ 		kcov_remote_stop();
+-		local_irq_restore(flags);
+-	}
+ }
+ 
+ #ifdef CONFIG_64BIT
+@@ -131,11 +105,8 @@ static inline u64 kcov_common_handle(void)
+ }
+ static inline void kcov_remote_start_common(u64 id) {}
+ static inline void kcov_remote_start_usb(u64 id) {}
+-static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
+-{
+-	return 0;
+-}
+-static inline void kcov_remote_stop_softirq(unsigned long flags) {}
++static inline void kcov_remote_start_usb_softirq(u64 id) {}
++static inline void kcov_remote_stop_softirq(void) {}
+ 
+ #endif /* CONFIG_KCOV */
+ #endif /* _LINUX_KCOV_H */
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 512e25c416ae29..2b1a816e4d59c3 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -10358,8 +10358,16 @@ struct mlx5_ifc_pifr_reg_bits {
+ 	u8         port_filter_update_en[8][0x20];
+ };
+ 
++enum {
++	MLX5_BUF_OWNERSHIP_UNKNOWN	= 0x0,
++	MLX5_BUF_OWNERSHIP_FW_OWNED	= 0x1,
++	MLX5_BUF_OWNERSHIP_SW_OWNED	= 0x2,
++};
++
+ struct mlx5_ifc_pfcc_reg_bits {
+-	u8         reserved_at_0[0x8];
++	u8         reserved_at_0[0x4];
++	u8	   buf_ownership[0x2];
++	u8	   reserved_at_6[0x2];
+ 	u8         local_port[0x8];
+ 	u8         reserved_at_10[0xb];
+ 	u8         ppan_mask_n[0x1];
+@@ -10491,7 +10499,9 @@ struct mlx5_ifc_mtutc_reg_bits {
+ struct mlx5_ifc_pcam_enhanced_features_bits {
+ 	u8         reserved_at_0[0x48];
+ 	u8         fec_100G_per_lane_in_pplm[0x1];
+-	u8         reserved_at_49[0x1f];
++	u8         reserved_at_49[0xa];
++	u8	   buffer_ownership[0x1];
++	u8	   resereved_at_54[0x14];
+ 	u8         fec_50G_per_lane_in_pplm[0x1];
+ 	u8         reserved_at_69[0x4];
+ 	u8         rx_icrc_encapsulated_counter[0x1];
+diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
+index e68d42b8ce652c..e288569225bdce 100644
+--- a/include/linux/mlx5/port.h
++++ b/include/linux/mlx5/port.h
+@@ -61,15 +61,6 @@ enum mlx5_an_status {
+ #define MLX5_EEPROM_PAGE_LENGTH		256
+ #define MLX5_EEPROM_HIGH_PAGE_LENGTH	128
+ 
+-struct mlx5_module_eeprom_query_params {
+-	u16 size;
+-	u16 offset;
+-	u16 i2c_address;
+-	u32 page;
+-	u32 bank;
+-	u32 module_number;
+-};
+-
+ enum mlx5e_link_mode {
+ 	MLX5E_1000BASE_CX_SGMII	 = 0,
+ 	MLX5E_1000BASE_KX	 = 1,
+@@ -142,12 +133,6 @@ enum mlx5_ptys_width {
+ 	MLX5_PTYS_WIDTH_12X	= 1 << 4,
+ };
+ 
+-struct mlx5_port_eth_proto {
+-	u32 cap;
+-	u32 admin;
+-	u32 oper;
+-};
+-
+ #define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
+ #define MLX5_GET_ETH_PROTO(reg, out, ext, field)	\
+ 	(ext ? MLX5_GET(reg, out, ext_##field) :	\
+@@ -160,14 +145,7 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ 
+ int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ 			    u16 *proto_oper, u8 local_port, u8 plane_index);
+-void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+-			       enum mlx5_port_status status);
+-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+-				 enum mlx5_port_status *status);
+-int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+-
+-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
++
+ void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+ 			      u8 port);
+@@ -175,65 +153,4 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ 			      u8 *vl_hw_cap, u8 local_port);
+ 
+-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+-			  u32 *rx_pause, u32 *tx_pause);
+-
+-int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+-int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+-			u8 *pfc_en_rx);
+-
+-int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+-				  u16 stall_critical_watermark,
+-				  u16 stall_minor_watermark);
+-int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+-				    u16 *stall_critical_watermark, u16 *stall_minor_watermark);
+-
+-int mlx5_max_tc(struct mlx5_core_dev *mdev);
+-
+-int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+-int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+-			    u8 prio, u8 *tc);
+-int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+-int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+-			     u8 tc, u8 *tc_group);
+-int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+-int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+-				u8 tc, u8 *bw_pct);
+-int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+-				    u8 *max_bw_value,
+-				    u8 *max_bw_unit);
+-int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+-				   u8 *max_bw_value,
+-				   u8 *max_bw_unit);
+-int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+-int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+-
+-int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+-int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
+-int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+-void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+-			 bool *enabled);
+-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+-			     u16 offset, u16 size, u8 *data);
+-int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+-				     struct mlx5_module_eeprom_query_params *params, u8 *data);
+-
+-int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+-int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+-
+-int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+-int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+-int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+-int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+-
+-int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+-			      struct mlx5_port_eth_proto *eproto);
+-bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+-			 bool force_legacy);
+-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+-			      bool force_legacy);
+-int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+-
+ #endif /* __MLX5_PORT_H__ */
+diff --git a/include/linux/netfs.h b/include/linux/netfs.h
+index 474481ee8b7c29..83d313718cd509 100644
+--- a/include/linux/netfs.h
++++ b/include/linux/netfs.h
+@@ -150,6 +150,7 @@ struct netfs_io_stream {
+ 	bool			active;		/* T if stream is active */
+ 	bool			need_retry;	/* T if this stream needs retrying */
+ 	bool			failed;		/* T if this stream failed */
++	bool			transferred_valid; /* T is ->transferred is valid */
+ };
+ 
+ /*
+diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
+index 169b4ae30ff479..9aed39abc94bc3 100644
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -160,6 +160,7 @@ extern void nfs_join_page_group(struct nfs_page *head,
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
++extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
+ extern	int nfs_page_set_headlock(struct nfs_page *req);
+ extern void nfs_page_clear_headlock(struct nfs_page *req);
+ extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index 2053cd8e788a73..dba369a2cf27ef 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -307,6 +307,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ 			 struct slave *slave);
+ int bond_3ad_set_carrier(struct bonding *bond);
+ void bond_3ad_update_lacp_rate(struct bonding *bond);
++void bond_3ad_update_lacp_active(struct bonding *bond);
+ void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+ int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+ size_t bond_3ad_stats_size(void);
+diff --git a/include/uapi/linux/pfrut.h b/include/uapi/linux/pfrut.h
+index 42fa15f8310d6b..b77d5c210c2620 100644
+--- a/include/uapi/linux/pfrut.h
++++ b/include/uapi/linux/pfrut.h
+@@ -89,6 +89,7 @@ struct pfru_payload_hdr {
+ 	__u32 hw_ver;
+ 	__u32 rt_ver;
+ 	__u8 platform_id[16];
++	__u32 svn_ver;
+ };
+ 
+ enum pfru_dsm_status {
+diff --git a/io_uring/futex.c b/io_uring/futex.c
+index 01f044f89f8fa9..a3d2b700b480c9 100644
+--- a/io_uring/futex.c
++++ b/io_uring/futex.c
+@@ -337,6 +337,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
+ 		goto done_unlock;
+ 	}
+ 
++	req->flags |= REQ_F_ASYNC_DATA;
+ 	req->async_data = ifd;
+ 	ifd->q = futex_q_init;
+ 	ifd->q.bitset = iof->futex_mask;
+@@ -359,6 +360,8 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (ret < 0)
+ 		req_set_fail(req);
+ 	io_req_set_res(req, ret, 0);
++	req->async_data = NULL;
++	req->flags &= ~REQ_F_ASYNC_DATA;
+ 	kfree(ifd);
+ 	return IOU_OK;
+ }
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 356f95c33aa281..b7c93765fcff81 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -498,6 +498,15 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
+ 	return nbufs;
+ }
+ 
++static int io_net_kbuf_recyle(struct io_kiocb *req,
++			      struct io_async_msghdr *kmsg, int len)
++{
++	req->flags |= REQ_F_BL_NO_RECYCLE;
++	if (req->flags & REQ_F_BUFFERS_COMMIT)
++		io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len));
++	return -EAGAIN;
++}
++
+ static inline bool io_send_finish(struct io_kiocb *req, int *ret,
+ 				  struct io_async_msghdr *kmsg,
+ 				  unsigned issue_flags)
+@@ -566,8 +575,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
+ 			kmsg->msg.msg_controllen = 0;
+ 			kmsg->msg.msg_control = NULL;
+ 			sr->done_io += ret;
+-			req->flags |= REQ_F_BL_NO_RECYCLE;
+-			return -EAGAIN;
++			return io_net_kbuf_recyle(req, kmsg, ret);
+ 		}
+ 		if (ret == -ERESTARTSYS)
+ 			ret = -EINTR;
+@@ -664,8 +672,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
+ 			sr->len -= ret;
+ 			sr->buf += ret;
+ 			sr->done_io += ret;
+-			req->flags |= REQ_F_BL_NO_RECYCLE;
+-			return -EAGAIN;
++			return io_net_kbuf_recyle(req, kmsg, ret);
+ 		}
+ 		if (ret == -ERESTARTSYS)
+ 			ret = -EINTR;
+@@ -1068,8 +1075,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
+ 		}
+ 		if (ret > 0 && io_net_retry(sock, flags)) {
+ 			sr->done_io += ret;
+-			req->flags |= REQ_F_BL_NO_RECYCLE;
+-			return -EAGAIN;
++			return io_net_kbuf_recyle(req, kmsg, ret);
+ 		}
+ 		if (ret == -ERESTARTSYS)
+ 			ret = -EINTR;
+@@ -1211,8 +1217,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
+ 			sr->len -= ret;
+ 			sr->buf += ret;
+ 			sr->done_io += ret;
+-			req->flags |= REQ_F_BL_NO_RECYCLE;
+-			return -EAGAIN;
++			return io_net_kbuf_recyle(req, kmsg, ret);
+ 		}
+ 		if (ret == -ERESTARTSYS)
+ 			ret = -EINTR;
+@@ -1441,8 +1446,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
+ 			zc->len -= ret;
+ 			zc->buf += ret;
+ 			zc->done_io += ret;
+-			req->flags |= REQ_F_BL_NO_RECYCLE;
+-			return -EAGAIN;
++			return io_net_kbuf_recyle(req, kmsg, ret);
+ 		}
+ 		if (ret == -ERESTARTSYS)
+ 			ret = -EINTR;
+@@ -1502,8 +1506,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 		if (ret > 0 && io_net_retry(sock, flags)) {
+ 			sr->done_io += ret;
+-			req->flags |= REQ_F_BL_NO_RECYCLE;
+-			return -EAGAIN;
++			return io_net_kbuf_recyle(req, kmsg, ret);
+ 		}
+ 		if (ret == -ERESTARTSYS)
+ 			ret = -EINTR;
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index d1fb4bfbbd4c3b..25f9565f798d45 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -267,7 +267,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
+ {
+ 	if (!cpusets_insane_config() &&
+ 		movable_only_nodes(nodes)) {
+-		static_branch_enable(&cpusets_insane_config_key);
++		static_branch_enable_cpuslocked(&cpusets_insane_config_key);
+ 		pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
+ 			"Cpuset allocations might fail even with a lot of memory available.\n",
+ 			nodemask_pr_args(nodes));
+@@ -1771,7 +1771,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
+ 			if (is_partition_valid(cs))
+ 				adding = cpumask_and(tmp->addmask,
+ 						xcpus, parent->effective_xcpus);
+-		} else if (is_partition_invalid(cs) &&
++		} else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
+ 			   cpumask_subset(xcpus, parent->effective_xcpus)) {
+ 			struct cgroup_subsys_state *css;
+ 			struct cpuset *child;
+@@ -3792,9 +3792,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ 		partcmd = partcmd_invalidate;
+ 	/*
+ 	 * On the other hand, an invalid partition root may be transitioned
+-	 * back to a regular one.
++	 * back to a regular one with a non-empty effective xcpus.
+ 	 */
+-	else if (is_partition_valid(parent) && is_partition_invalid(cs))
++	else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
++		 !cpumask_empty(cs->effective_xcpus))
+ 		partcmd = partcmd_update;
+ 
+ 	if (partcmd >= 0) {
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index c801dd20c63d93..563a7dc2ece6f5 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -5220,6 +5220,13 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+ 	for_each_possible_cpu(cpu)
+ 		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
+ 
++	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
++		reset_idle_masks();
++		static_branch_enable(&scx_builtin_idle_enabled);
++	} else {
++		static_branch_disable(&scx_builtin_idle_enabled);
++	}
++
+ 	/*
+ 	 * Keep CPUs stable during enable so that the BPF scheduler can track
+ 	 * online CPUs by watching ->on/offline_cpu() after ->init().
+@@ -5287,13 +5294,6 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+ 	if (scx_ops.cpu_acquire || scx_ops.cpu_release)
+ 		static_branch_enable(&scx_ops_cpu_preempt);
+ 
+-	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
+-		reset_idle_masks();
+-		static_branch_enable(&scx_builtin_idle_enabled);
+-	} else {
+-		static_branch_disable(&scx_builtin_idle_enabled);
+-	}
+-
+ 	/*
+ 	 * Lock out forks, cgroup on/offlining and moves before opening the
+ 	 * floodgate so that they don't wander into the operations prematurely.
+@@ -5372,6 +5372,9 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+ 			__setscheduler_class(p->policy, p->prio);
+ 		struct sched_enq_and_set_ctx ctx;
+ 
++		if (!tryget_task_struct(p))
++			continue;
++
+ 		if (old_class != new_class && p->se.sched_delayed)
+ 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+ 
+@@ -5384,6 +5387,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+ 		sched_enq_and_set_task(&ctx);
+ 
+ 		check_class_changed(task_rq(p), p, old_class, p->prio);
++		put_task_struct(p);
+ 	}
+ 	scx_task_iter_stop(&sti);
+ 	percpu_up_write(&scx_fork_rwsem);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index ad7db84b04090a..370cde32c696e7 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -4569,13 +4569,17 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
+ 	        } else {
+ 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
+ 		}
++	} else {
++		if (hash)
++			iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash);
++		else
++			iter->hash = EMPTY_HASH;
++	}
+ 
+-		if (!iter->hash) {
+-			trace_parser_put(&iter->parser);
+-			goto out_unlock;
+-		}
+-	} else
+-		iter->hash = hash;
++	if (!iter->hash) {
++		trace_parser_put(&iter->parser);
++		goto out_unlock;
++	}
+ 
+ 	ret = 0;
+ 
+@@ -6445,9 +6449,6 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
+ 		ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
+ 						      iter->hash, filter_hash);
+ 		mutex_unlock(&ftrace_lock);
+-	} else {
+-		/* For read only, the hash is the ops hash */
+-		iter->hash = NULL;
+ 	}
+ 
+ 	mutex_unlock(&iter->ops->func_hash->regex_lock);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 801def692f9299..2f662ca4d3ffd7 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1754,7 +1754,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ 
+ 	ret = get_user(ch, ubuf++);
+ 	if (ret)
+-		goto out;
++		goto fail;
+ 
+ 	read++;
+ 	cnt--;
+@@ -1768,7 +1768,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ 		while (cnt && isspace(ch)) {
+ 			ret = get_user(ch, ubuf++);
+ 			if (ret)
+-				goto out;
++				goto fail;
+ 			read++;
+ 			cnt--;
+ 		}
+@@ -1778,8 +1778,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ 		/* only spaces were written */
+ 		if (isspace(ch) || !ch) {
+ 			*ppos += read;
+-			ret = read;
+-			goto out;
++			return read;
+ 		}
+ 	}
+ 
+@@ -1789,11 +1788,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ 			parser->buffer[parser->idx++] = ch;
+ 		else {
+ 			ret = -EINVAL;
+-			goto out;
++			goto fail;
+ 		}
++
+ 		ret = get_user(ch, ubuf++);
+ 		if (ret)
+-			goto out;
++			goto fail;
+ 		read++;
+ 		cnt--;
+ 	}
+@@ -1809,13 +1809,13 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ 		parser->buffer[parser->idx] = 0;
+ 	} else {
+ 		ret = -EINVAL;
+-		goto out;
++		goto fail;
+ 	}
+ 
+ 	*ppos += read;
+-	ret = read;
+-
+-out:
++	return read;
++fail:
++	trace_parser_fail(parser);
+ 	return ret;
+ }
+ 
+@@ -2318,10 +2318,10 @@ int __init register_tracer(struct tracer *type)
+ 	mutex_unlock(&trace_types_lock);
+ 
+ 	if (ret || !default_bootup_tracer)
+-		goto out_unlock;
++		return ret;
+ 
+ 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
+-		goto out_unlock;
++		return 0;
+ 
+ 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
+ 	/* Do we want this tracer to start on bootup? */
+@@ -2333,8 +2333,7 @@ int __init register_tracer(struct tracer *type)
+ 	/* disable other selftests, since this will break it. */
+ 	disable_tracing_selftest("running a tracer");
+ 
+- out_unlock:
+-	return ret;
++	return 0;
+ }
+ 
+ static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
+@@ -8563,12 +8562,12 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
+  out_reg:
+ 	ret = tracing_arm_snapshot(tr);
+ 	if (ret < 0)
+-		goto out;
++		return ret;
+ 
+ 	ret = register_ftrace_function_probe(glob, tr, ops, count);
+ 	if (ret < 0)
+ 		tracing_disarm_snapshot(tr);
+- out:
++
+ 	return ret < 0 ? ret : 0;
+ }
+ 
+@@ -10469,7 +10468,7 @@ __init static int tracer_alloc_buffers(void)
+ 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
+ 
+ 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
+-		goto out;
++		return -ENOMEM;
+ 
+ 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
+ 		goto out_free_buffer_mask;
+@@ -10582,7 +10581,6 @@ __init static int tracer_alloc_buffers(void)
+ 	free_cpumask_var(global_trace.tracing_cpumask);
+ out_free_buffer_mask:
+ 	free_cpumask_var(tracing_buffer_mask);
+-out:
+ 	return ret;
+ }
+ 
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 57e1af1d3e6d45..9b2ae7652cbc17 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1230,6 +1230,7 @@ bool ftrace_event_is_function(struct trace_event_call *call);
+  */
+ struct trace_parser {
+ 	bool		cont;
++	bool		fail;
+ 	char		*buffer;
+ 	unsigned	idx;
+ 	unsigned	size;
+@@ -1237,7 +1238,7 @@ struct trace_parser {
+ 
+ static inline bool trace_parser_loaded(struct trace_parser *parser)
+ {
+-	return (parser->idx != 0);
++	return !parser->fail && parser->idx != 0;
+ }
+ 
+ static inline bool trace_parser_cont(struct trace_parser *parser)
+@@ -1251,6 +1252,11 @@ static inline void trace_parser_clear(struct trace_parser *parser)
+ 	parser->idx = 0;
+ }
+ 
++static inline void trace_parser_fail(struct trace_parser *parser)
++{
++	parser->fail = true;
++}
++
+ extern int trace_parser_get_init(struct trace_parser *parser, int size);
+ extern void trace_parser_put(struct trace_parser *parser);
+ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+@@ -2145,7 +2151,7 @@ static inline bool is_good_system_name(const char *name)
+ static inline void sanitize_event_name(char *name)
+ {
+ 	while (*name++ != '\0')
+-		if (*name == ':' || *name == '.')
++		if (*name == ':' || *name == '.' || *name == '*')
+ 			*name = '_';
+ }
+ 
+diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
+index 8813038abc6fb3..4120a73f49339f 100644
+--- a/mm/damon/paddr.c
++++ b/mm/damon/paddr.c
+@@ -431,6 +431,10 @@ static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
+ 	if (list_empty(folio_list))
+ 		return nr_migrated;
+ 
++	if (target_nid < 0 || target_nid >= MAX_NUMNODES ||
++			!node_state(target_nid, N_MEMORY))
++		return nr_migrated;
++
+ 	noreclaim_flag = memalloc_noreclaim_save();
+ 
+ 	nid = folio_nid(lru_to_folio(folio_list));
+diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
+index bc748f700a9e11..80cc409ba78a5c 100644
+--- a/mm/debug_vm_pgtable.c
++++ b/mm/debug_vm_pgtable.c
+@@ -1049,29 +1049,34 @@ static void __init destroy_args(struct pgtable_debug_args *args)
+ 
+ 	/* Free page table entries */
+ 	if (args->start_ptep) {
++		pmd_clear(args->pmdp);
+ 		pte_free(args->mm, args->start_ptep);
+ 		mm_dec_nr_ptes(args->mm);
+ 	}
+ 
+ 	if (args->start_pmdp) {
++		pud_clear(args->pudp);
+ 		pmd_free(args->mm, args->start_pmdp);
+ 		mm_dec_nr_pmds(args->mm);
+ 	}
+ 
+ 	if (args->start_pudp) {
++		p4d_clear(args->p4dp);
+ 		pud_free(args->mm, args->start_pudp);
+ 		mm_dec_nr_puds(args->mm);
+ 	}
+ 
+-	if (args->start_p4dp)
++	if (args->start_p4dp) {
++		pgd_clear(args->pgdp);
+ 		p4d_free(args->mm, args->start_p4dp);
++	}
+ 
+ 	/* Free vma and mm struct */
+ 	if (args->vma)
+ 		vm_area_free(args->vma);
+ 
+ 	if (args->mm)
+-		mmdrop(args->mm);
++		mmput(args->mm);
+ }
+ 
+ static struct page * __init
+diff --git a/mm/filemap.c b/mm/filemap.c
+index fa18e71f9c8895..ec69fadf014cd7 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1750,8 +1750,9 @@ pgoff_t page_cache_next_miss(struct address_space *mapping,
+ 			     pgoff_t index, unsigned long max_scan)
+ {
+ 	XA_STATE(xas, &mapping->i_pages, index);
++	unsigned long nr = max_scan;
+ 
+-	while (max_scan--) {
++	while (nr--) {
+ 		void *entry = xas_next(&xas);
+ 		if (!entry || xa_is_value(entry))
+ 			return xas.xa_index;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 70b2ccf0d51eed..8c8d78d6d3062e 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -845,9 +845,17 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
+ #define hwpoison_hugetlb_range	NULL
+ #endif
+ 
++static int hwpoison_test_walk(unsigned long start, unsigned long end,
++			     struct mm_walk *walk)
++{
++	/* We also want to consider pages mapped into VM_PFNMAP. */
++	return 0;
++}
++
+ static const struct mm_walk_ops hwpoison_walk_ops = {
+ 	.pmd_entry = hwpoison_pte_range,
+ 	.hugetlb_entry = hwpoison_hugetlb_range,
++	.test_walk = hwpoison_test_walk,
+ 	.walk_lock = PGWALK_RDLOCK,
+ };
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index c6c1232db4e28f..dad90204741496 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -338,7 +338,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
+ 	case BT_CODEC_TRANSPARENT:
+ 		if (!find_next_esco_param(conn, esco_param_msbc,
+ 					  ARRAY_SIZE(esco_param_msbc)))
+-			return false;
++			return -EINVAL;
++
+ 		param = &esco_param_msbc[conn->attempt - 1];
+ 		cp.tx_coding_format.id = 0x03;
+ 		cp.rx_coding_format.id = 0x03;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 38643ffa65a930..768bd5fd808f2b 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6725,8 +6725,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 		qos->ucast.out.latency =
+ 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 					  1000);
+-		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
+-		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
++		qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
++		qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
+ 		qos->ucast.in.phy = ev->c_phy;
+ 		qos->ucast.out.phy = ev->p_phy;
+ 		break;
+@@ -6740,8 +6740,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 		qos->ucast.in.latency =
+ 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 					  1000);
+-		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
+-		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
++		qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
++		qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
+ 		qos->ucast.out.phy = ev->c_phy;
+ 		qos->ucast.in.phy = ev->p_phy;
+ 		break;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index bbd809414b2f2c..af86df9de941df 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -6960,8 +6960,6 @@ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+ 
+ 	hci_dev_lock(hdev);
+ 
+-	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+-
+ 	if (!hci_conn_valid(hdev, conn))
+ 		clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+ 
+@@ -7022,10 +7020,13 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ 	/* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update
+ 	 * it.
+ 	 */
+-	if (conn->sid == HCI_SID_INVALID)
+-		__hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
+-					 HCI_EV_LE_EXT_ADV_REPORT,
+-					 conn->conn_timeout, NULL);
++	if (conn->sid == HCI_SID_INVALID) {
++		err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
++					       HCI_EV_LE_EXT_ADV_REPORT,
++					       conn->conn_timeout, NULL);
++		if (err == -ETIMEDOUT)
++			goto done;
++	}
+ 
+ 	memset(&cp, 0, sizeof(cp));
+ 	cp.options = qos->bcast.options;
+@@ -7055,6 +7056,12 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ 		__hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
+ 				      0, NULL, HCI_CMD_TIMEOUT);
+ 
++done:
++	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
++
++	/* Update passive scan since HCI_PA_SYNC flag has been cleared */
++	hci_update_passive_scan_sync(hdev);
++
+ 	return err;
+ }
+ 
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 733ff6b758f691..0a00c3f578156b 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -4808,6 +4808,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ 		intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+ 	}
+ 
++	if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
++		br_info(brmctx->br,
++			"trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
++			jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
++			jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
++		intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
++	}
++
+ 	brmctx->multicast_query_interval = intvl_jiffies;
+ }
+ 
+@@ -4824,6 +4832,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ 		intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+ 	}
+ 
++	if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
++		br_info(brmctx->br,
++			"trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
++			jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
++			jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
++		intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
++	}
++
+ 	brmctx->multicast_startup_query_interval = intvl_jiffies;
+ }
+ 
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 6a1bce8959afa2..5026a256bf92d1 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -31,6 +31,8 @@
+ #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+ #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+ #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
++#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */
++#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
+ 
+ #define BR_HWDOM_MAX BITS_PER_LONG
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2ba2160dd093af..cfd32bd02a6989 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3603,6 +3603,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ 			features &= ~NETIF_F_TSO_MANGLEID;
+ 	}
+ 
++	/* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
++	 * so neither does TSO that depends on it.
++	 */
++	if (features & NETIF_F_IPV6_CSUM &&
++	    (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++	     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++	      vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
++	    skb_transport_header_was_set(skb) &&
++	    skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++	    !ipv6_has_hopopt_jumbo(skb))
++		features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
++
+ 	return features;
+ }
+ 
+diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
+index 464f683e016dbb..b17909ef6632ff 100644
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -63,8 +63,14 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
+ 	skb_push(skb, ETH_HLEN);
+ 	skb_reset_mac_header(skb);
+ 	if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
+-	    protocol == htons(ETH_P_HSR))
++	    protocol == htons(ETH_P_HSR)) {
++		if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) {
++			kfree_skb(skb);
++			goto finish_consume;
++		}
++
+ 		skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
++	}
+ 	skb_reset_mac_len(skb);
+ 
+ 	/* Only the frames received over the interlink port will assign a
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 87fd945a0d27a5..0d3cb2ba6fc841 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ 	if (!oth)
+ 		return;
+ 
+-	if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+-	    nf_reject_fill_skb_dst(oldskb) < 0)
++	if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
+ 		return;
+ 
+ 	if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -321,8 +320,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+ 	if (iph->frag_off & htons(IP_OFFSET))
+ 		return;
+ 
+-	if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+-	    nf_reject_fill_skb_dst(skb_in) < 0)
++	if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
+ 		return;
+ 
+ 	if (skb_csum_unnecessary(skb_in) ||
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index 9ae2b2725bf99a..c3d64c4b69d7de 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -293,7 +293,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ 	fl6.fl6_sport = otcph->dest;
+ 	fl6.fl6_dport = otcph->source;
+ 
+-	if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
++	if (!skb_dst(oldskb)) {
+ 		nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+ 		if (!dst)
+ 			return;
+@@ -397,8 +397,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ 	if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ 		skb_in->dev = net->loopback_dev;
+ 
+-	if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+-	    nf_reject6_fill_skb_dst(skb_in) < 0)
++	if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
+ 		return;
+ 
+ 	icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index bbf5b84a70fcab..5d21a74c116549 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -35,6 +35,7 @@
+ #include <net/xfrm.h>
+ 
+ #include <crypto/hash.h>
++#include <crypto/utils.h>
+ #include <net/seg6.h>
+ #include <net/genetlink.h>
+ #include <net/seg6_hmac.h>
+@@ -271,7 +272,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
+ 	if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output))
+ 		return false;
+ 
+-	if (memcmp(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN) != 0)
++	if (crypto_memneq(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN))
+ 		return false;
+ 
+ 	return true;
+@@ -295,6 +296,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
+ 	struct seg6_pernet_data *sdata = seg6_pernet(net);
+ 	int err;
+ 
++	if (!__hmac_get_algo(hinfo->alg_id))
++		return -EINVAL;
++
+ 	err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
+ 					    rht_params);
+ 
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index a97505b786712e..7d4718a57bdccd 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -1118,7 +1118,9 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
+ 	return hmac == mp_opt->ahmac;
+ }
+ 
+-/* Return false if a subflow has been reset, else return true */
++/* Return false in case of error (or subflow has been reset),
++ * else return true.
++ */
+ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+@@ -1222,7 +1224,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
+ 
+ 	mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
+ 	if (!mpext)
+-		return true;
++		return false;
+ 
+ 	memset(mpext, 0, sizeof(*mpext));
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 2a085ec5bfd097..b763729b85e0a6 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -293,6 +293,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 	struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer);
+ 	struct mptcp_sock *msk = entry->sock;
+ 	struct sock *sk = (struct sock *)msk;
++	unsigned int timeout;
+ 
+ 	pr_debug("msk=%p\n", msk);
+ 
+@@ -310,6 +311,10 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 		goto out;
+ 	}
+ 
++	timeout = mptcp_get_add_addr_timeout(sock_net(sk));
++	if (!timeout)
++		goto out;
++
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+ 	if (!mptcp_pm_should_add_signal_addr(msk)) {
+@@ -321,7 +326,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 
+ 	if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
+ 		sk_reset_timer(sk, timer,
+-			       jiffies + mptcp_get_add_addr_timeout(sock_net(sk)));
++			       jiffies + timeout);
+ 
+ 	spin_unlock_bh(&msk->pm.lock);
+ 
+@@ -363,6 +368,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ 	struct mptcp_pm_add_entry *add_entry = NULL;
+ 	struct sock *sk = (struct sock *)msk;
+ 	struct net *net = sock_net(sk);
++	unsigned int timeout;
+ 
+ 	lockdep_assert_held(&msk->pm.lock);
+ 
+@@ -372,9 +378,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ 		if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
+ 			return false;
+ 
+-		sk_reset_timer(sk, &add_entry->add_timer,
+-			       jiffies + mptcp_get_add_addr_timeout(net));
+-		return true;
++		goto reset_timer;
+ 	}
+ 
+ 	add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
+@@ -388,8 +392,10 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ 	add_entry->retrans_times = 0;
+ 
+ 	timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
+-	sk_reset_timer(sk, &add_entry->add_timer,
+-		       jiffies + mptcp_get_add_addr_timeout(net));
++reset_timer:
++	timeout = mptcp_get_add_addr_timeout(net);
++	if (timeout)
++		sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout);
+ 
+ 	return true;
+ }
+@@ -1737,7 +1743,6 @@ static void __flush_addrs(struct list_head *list)
+ static void __reset_counters(struct pm_nl_pernet *pernet)
+ {
+ 	WRITE_ONCE(pernet->add_addr_signal_max, 0);
+-	WRITE_ONCE(pernet->add_addr_accept_max, 0);
+ 	WRITE_ONCE(pernet->local_addr_max, 0);
+ 	pernet->addrs = 0;
+ }
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 2c2e2a67f3b244..6cbe8a7a0e5cc6 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1745,7 +1745,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	ktime_t now = ktime_get();
+ 	struct cake_tin_data *b;
+ 	struct cake_flow *flow;
+-	u32 idx;
++	u32 idx, tin;
+ 
+ 	/* choose flow to insert into */
+ 	idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+@@ -1755,6 +1755,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		__qdisc_drop(skb, to_free);
+ 		return ret;
+ 	}
++	tin = (u32)(b - q->tins);
+ 	idx--;
+ 	flow = &b->flows[idx];
+ 
+@@ -1922,13 +1923,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		q->buffer_max_used = q->buffer_used;
+ 
+ 	if (q->buffer_used > q->buffer_limit) {
++		bool same_flow = false;
+ 		u32 dropped = 0;
++		u32 drop_id;
+ 
+ 		while (q->buffer_used > q->buffer_limit) {
+ 			dropped++;
+-			cake_drop(sch, to_free);
++			drop_id = cake_drop(sch, to_free);
++
++			if ((drop_id >> 16) == tin &&
++			    (drop_id & 0xFFFF) == idx)
++				same_flow = true;
+ 		}
+ 		b->drop_overlimit += dropped;
++
++		if (same_flow)
++			return NET_XMIT_CN;
+ 	}
+ 	return NET_XMIT_SUCCESS;
+ }
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 1021681a571822..2c13de8bf16f40 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -592,7 +592,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
+  */
+ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ {
+-	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
++	WARN_ON(cl->level || !cl->leaf.q);
+ 
+ 	if (!cl->prio_activity) {
+ 		cl->prio_activity = 1 << cl->prio;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index cdd445d40b945d..02e08ac1da3aa4 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2565,8 +2565,9 @@ static void smc_listen_work(struct work_struct *work)
+ 			goto out_decl;
+ 	}
+ 
+-	smc_listen_out_connected(new_smc);
+ 	SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
++	/* smc_listen_out() will release smcsk */
++	smc_listen_out_connected(new_smc);
+ 	goto out_free;
+ 
+ out_unlock:
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 6385329ef98ddc..ee92ce3255f936 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1774,6 +1774,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
+ 	return tls_decrypt_sg(sk, NULL, sgout, &darg);
+ }
+ 
++/* All records returned from a recvmsg() call must have the same type.
++ * 0 is not a valid content type. Use it as "no type reported, yet".
++ */
+ static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
+ 				   u8 *control)
+ {
+@@ -2017,8 +2020,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ 	if (err < 0)
+ 		goto end;
+ 
++	/* process_rx_list() will set @control if it processed any records */
+ 	copied = err;
+-	if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
++	if (len <= copied || rx_more ||
++	    (control && control != TLS_RECORD_TYPE_DATA))
+ 		goto end;
+ 
+ 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index f01f9e8781061e..1ef6f7829d2942 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -624,8 +624,9 @@ static void virtio_transport_rx_work(struct work_struct *work)
+ 	do {
+ 		virtqueue_disable_cb(vq);
+ 		for (;;) {
++			unsigned int len, payload_len;
++			struct virtio_vsock_hdr *hdr;
+ 			struct sk_buff *skb;
+-			unsigned int len;
+ 
+ 			if (!virtio_transport_more_replies(vsock)) {
+ 				/* Stop rx until the device processes already
+@@ -642,12 +643,19 @@ static void virtio_transport_rx_work(struct work_struct *work)
+ 			vsock->rx_buf_nr--;
+ 
+ 			/* Drop short/long packets */
+-			if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
++			if (unlikely(len < sizeof(*hdr) ||
+ 				     len > virtio_vsock_skb_len(skb))) {
+ 				kfree_skb(skb);
+ 				continue;
+ 			}
+ 
++			hdr = virtio_vsock_hdr(skb);
++			payload_len = le32_to_cpu(hdr->len);
++			if (unlikely(payload_len > len - sizeof(*hdr))) {
++				kfree_skb(skb);
++				continue;
++			}
++
+ 			virtio_vsock_skb_rx_put(skb);
+ 			virtio_transport_deliver_tap_pkt(skb);
+ 			virtio_transport_recv_pkt(&virtio_transport, skb);
+diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
+index 439985e29fbc0e..e4cd2910000724 100644
+--- a/rust/kernel/alloc/allocator.rs
++++ b/rust/kernel/alloc/allocator.rs
+@@ -43,17 +43,6 @@
+ /// For more details see [self].
+ pub struct KVmalloc;
+ 
+-/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
+-fn aligned_size(new_layout: Layout) -> usize {
+-    // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
+-    let layout = new_layout.pad_to_align();
+-
+-    // Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`
+-    // which together with the slab guarantees means the `krealloc` will return a properly aligned
+-    // object (see comments in `kmalloc()` for more information).
+-    layout.size()
+-}
+-
+ /// # Invariants
+ ///
+ /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
+@@ -87,7 +76,7 @@ unsafe fn call(
+         old_layout: Layout,
+         flags: Flags,
+     ) -> Result<NonNull<[u8]>, AllocError> {
+-        let size = aligned_size(layout);
++        let size = layout.size();
+         let ptr = match ptr {
+             Some(ptr) => {
+                 if old_layout.size() == 0 {
+@@ -122,6 +111,17 @@ unsafe fn call(
+     }
+ }
+ 
++impl Kmalloc {
++    /// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
++    /// `layout`.
++    pub fn aligned_layout(layout: Layout) -> Layout {
++        // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
++        // `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
++        // a properly aligned object (see comments in `kmalloc()` for more information).
++        layout.pad_to_align()
++    }
++}
++
+ // SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
+ // - memory remains valid until it is explicitly freed,
+ // - passing a pointer to a valid memory allocation is OK,
+@@ -134,6 +134,8 @@ unsafe fn realloc(
+         old_layout: Layout,
+         flags: Flags,
+     ) -> Result<NonNull<[u8]>, AllocError> {
++        let layout = Kmalloc::aligned_layout(layout);
++
+         // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
+         unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
+     }
+@@ -175,6 +177,10 @@ unsafe fn realloc(
+         old_layout: Layout,
+         flags: Flags,
+     ) -> Result<NonNull<[u8]>, AllocError> {
++        // `KVmalloc` may use the `Kmalloc` backend, hence we have to enforce a `Kmalloc`
++        // compatible layout.
++        let layout = Kmalloc::aligned_layout(layout);
++
+         // TODO: Support alignments larger than PAGE_SIZE.
+         if layout.align() > bindings::PAGE_SIZE {
+             pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
+diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
+index c37d4c0c64e9f9..ec13385489dfe3 100644
+--- a/rust/kernel/alloc/allocator_test.rs
++++ b/rust/kernel/alloc/allocator_test.rs
+@@ -22,6 +22,17 @@
+ pub type Vmalloc = Kmalloc;
+ pub type KVmalloc = Kmalloc;
+ 
++impl Cmalloc {
++    /// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
++    /// `layout`.
++    pub fn aligned_layout(layout: Layout) -> Layout {
++        // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
++        // `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
++        // a properly aligned object (see comments in `kmalloc()` for more information).
++        layout.pad_to_align()
++    }
++}
++
+ extern "C" {
+     #[link_name = "aligned_alloc"]
+     fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index f5d05297d59ee4..9a78fd36542d62 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -2144,12 +2144,12 @@ static int __init apparmor_nf_ip_init(void)
+ __initcall(apparmor_nf_ip_init);
+ #endif
+ 
+-static char nulldfa_src[] = {
++static char nulldfa_src[] __aligned(8) = {
+ 	#include "nulldfa.in"
+ };
+ static struct aa_dfa *nulldfa;
+ 
+-static char stacksplitdfa_src[] = {
++static char stacksplitdfa_src[] __aligned(8) = {
+ 	#include "stacksplitdfa.in"
+ };
+ struct aa_dfa *stacksplitdfa;
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index d774b9b71ce238..a0dcb4ebb05982 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -2139,14 +2139,14 @@ static int snd_utimer_create(struct snd_timer_uinfo *utimer_info,
+ 		goto err_take_id;
+ 	}
+ 
++	utimer->id = utimer_id;
++
+ 	utimer->name = kasprintf(GFP_KERNEL, "snd-utimer%d", utimer_id);
+ 	if (!utimer->name) {
+ 		err = -ENOMEM;
+ 		goto err_get_name;
+ 	}
+ 
+-	utimer->id = utimer_id;
+-
+ 	tid.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
+ 	tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
+ 	tid.card = -1;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 840cde49935d0c..b31b15cf453a5b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10576,6 +10576,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8548, "HP EliteBook x360 830 G6", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x854a, "HP EliteBook 830 G6", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11),
+ 	SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+diff --git a/sound/soc/sof/amd/acp-loader.c b/sound/soc/sof/amd/acp-loader.c
+index 077af9e2af8d09..5cc7d98801805e 100644
+--- a/sound/soc/sof/amd/acp-loader.c
++++ b/sound/soc/sof/amd/acp-loader.c
+@@ -65,7 +65,7 @@ int acp_dsp_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_t
+ 			dma_size = page_count * ACP_PAGE_SIZE;
+ 			adata->bin_buf = dma_alloc_coherent(&pci->dev, dma_size,
+ 							    &adata->sha_dma_addr,
+-							    GFP_ATOMIC);
++							    GFP_KERNEL);
+ 			if (!adata->bin_buf)
+ 				return -ENOMEM;
+ 		}
+@@ -77,7 +77,7 @@ int acp_dsp_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_t
+ 			adata->data_buf = dma_alloc_coherent(&pci->dev,
+ 							     ACP_DEFAULT_DRAM_LENGTH,
+ 							     &adata->dma_addr,
+-							     GFP_ATOMIC);
++							     GFP_KERNEL);
+ 			if (!adata->data_buf)
+ 				return -ENOMEM;
+ 		}
+@@ -90,7 +90,7 @@ int acp_dsp_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_t
+ 			adata->sram_data_buf = dma_alloc_coherent(&pci->dev,
+ 								  ACP_DEFAULT_SRAM_LENGTH,
+ 								  &adata->sram_dma_addr,
+-								  GFP_ATOMIC);
++								  GFP_KERNEL);
+ 			if (!adata->sram_data_buf)
+ 				return -ENOMEM;
+ 		}
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 1cb52373e70f64..db2c9bac00adca 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ 		u16 cs_len;
+ 		u8 cs_type;
+ 
+-		if (len < sizeof(*p))
++		if (len < sizeof(*cs_desc))
+ 			break;
+ 		cs_len = le16_to_cpu(cs_desc->wLength);
+ 		if (len < cs_len)
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 4f4e8e87a14cd0..a0d55b77c9941d 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ 	/* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
+ 	FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
+ 	FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
+-	FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
++	FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
+ 	/*  UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
+ 	FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
+ 	FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),
+diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+index 2e6648a2b2c0c6..ac7ec6f9402376 100755
+--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
++++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+@@ -198,6 +198,7 @@ set_limits 1 9 2>/dev/null
+ check "get_limits" "${default_limits}" "subflows above hard limit"
+ 
+ set_limits 8 8
++flush_endpoint  ## to make sure it doesn't affect the limits
+ check "get_limits" "$(format_limits 8 8)" "set limits"
+ 
+ flush_endpoint


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-08-28 13:54 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-08-28 13:54 UTC (permalink / raw
  To: gentoo-commits

commit:     84702fafb1c2c0a1eaf741847959d41c105b7940
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 28 13:53:40 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Aug 28 13:53:40 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=84702faf

Add proc: fix missing pde_set_flags() for net proc files

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README                                        |   4 +
 ..._missing_pde_set_flags_for_net_proc_files.patch | 164 +++++++++++++++++++++
 2 files changed, 168 insertions(+)

diff --git a/0000_README b/0000_README
index bef879a3..860bf8f3 100644
--- a/0000_README
+++ b/0000_README
@@ -235,6 +235,10 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
+Patch:  1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch
+From:   https://lore.kernel.org/all/20250821105806.1453833-1-wangzijie1@honor.com/
+Desc:   proc: fix missing pde_set_flags() for net proc files
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch b/1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch
new file mode 100644
index 00000000..d42d9d07
--- /dev/null
+++ b/1800_proc_fix_missing_pde_set_flags_for_net_proc_files.patch
@@ -0,0 +1,164 @@
+From mboxrd@z Thu Jan  1 00:00:00 1970
+Received: from mta21.hihonor.com (mta21.honor.com [81.70.160.142])
+	(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
+	(No client certificate requested)
+	by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7D18E308F18
+	for <regressions@lists.linux.dev>; Thu, 21 Aug 2025 10:58:11 +0000 (UTC)
+Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=81.70.160.142
+ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;
+	t=1755773896; cv=none; b=uf0SHAKs+B4CVnXckwKblCkTaQzofZ/iIeFqe1l9Igj3XxpGpi8FFZKzT1q5rOZYOSOY9gOoTGx+Z8Zy+I08SQQzgoJxttAoeEklbFCCKFPTRLRQthyO+J1EEf6vI2T1GLJZUhHTthgDrKCTPFbrIf2oGmPkSrJHH+STidm3krA=
+ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org;
+	s=arc-20240116; t=1755773896; c=relaxed/simple;
+	bh=l3jMKIk6lYIIzI4SccsDJUmeup9tuX38D/XM4qedxQU=;
+	h=From:To:CC:Subject:Date:Message-ID:MIME-Version:Content-Type; b=k9YALQ1oS/qWu7MXOxgISf6DxjFqOtw8eXno5JFk+wgaTBuvUyqz5D0hD+ir4kHDBBIz6MHiHUQixuW774vBGzujNekFLweIbzdfYn7osOYNvr8bD80Qam+1D1B5DM0OtVAEBNN8YmkB0RY49sa91xx9G3U5fqJoKOHT+28M+/0=
+ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=honor.com; spf=pass smtp.mailfrom=honor.com; arc=none smtp.client-ip=81.70.160.142
+Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=honor.com
+Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=honor.com
+Received: from w011.hihonor.com (unknown [10.68.20.122])
+	by mta21.hihonor.com (SkyGuard) with ESMTPS id 4c70fC2v03zYl7h7;
+	Thu, 21 Aug 2025 18:57:55 +0800 (CST)
+Received: from a011.hihonor.com (10.68.31.243) by w011.hihonor.com
+ (10.68.20.122) with Microsoft SMTP Server (version=TLS1_2,
+ cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Thu, 21 Aug
+ 2025 18:58:08 +0800
+Received: from localhost.localdomain (10.144.23.14) by a011.hihonor.com
+ (10.68.31.243) with Microsoft SMTP Server (version=TLS1_2,
+ cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Thu, 21 Aug
+ 2025 18:58:07 +0800
+From: wangzijie <wangzijie1@honor.com>
+To: <akpm@linux-foundation.org>, <brauner@kernel.org>,
+	<viro@zeniv.linux.org.uk>, <adobriyan@gmail.com>,
+	<rick.p.edgecombe@intel.com>, <ast@kernel.org>, <k.shutemov@gmail.com>,
+	<jirislaby@kernel.org>, <linux-fsdevel@vger.kernel.org>
+CC: <polynomial-c@gmx.de>, <gregkh@linuxfoundation.org>,
+	<stable@vger.kernel.org>, <regressions@lists.linux.dev>, wangzijie
+	<wangzijie1@honor.com>
+Subject: [PATCH v3] proc: fix missing pde_set_flags() for net proc files
+Date: Thu, 21 Aug 2025 18:58:06 +0800
+Message-ID: <20250821105806.1453833-1-wangzijie1@honor.com>
+X-Mailer: git-send-email 2.25.1
+Precedence: bulk
+X-Mailing-List: regressions@lists.linux.dev
+List-Id: <regressions.lists.linux.dev>
+List-Subscribe: <mailto:regressions+subscribe@lists.linux.dev>
+List-Unsubscribe: <mailto:regressions+unsubscribe@lists.linux.dev>
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
+Content-Type: text/plain
+X-ClientProxiedBy: w002.hihonor.com (10.68.28.120) To a011.hihonor.com
+ (10.68.31.243)
+
+To avoid potential UAF issues during module removal races, we use pde_set_flags()
+to save proc_ops flags in PDE itself before proc_register(), and then use
+pde_has_proc_*() helpers instead of directly dereferencing pde->proc_ops->*.
+
+However, the pde_set_flags() call was missing when creating net related proc files.
+This omission caused incorrect behavior which FMODE_LSEEK was being cleared
+inappropriately in proc_reg_open() for net proc files. Lars reported it in this link[1].
+
+Fix this by ensuring pde_set_flags() is called when register proc entry, and add
+NULL check for proc_ops in pde_set_flags().
+
+[1]: https://lore.kernel.org/all/20250815195616.64497967@chagall.paradoxon.rec/
+
+Fixes: ff7ec8dc1b64 ("proc: use the same treatment to check proc_lseek as ones for proc_read_iter et.al")
+Cc: stable@vger.kernel.org
+Reported-by: Lars Wendler <polynomial-c@gmx.de>
+Signed-off-by: wangzijie <wangzijie1@honor.com>
+---
+v3:
+- followed by Christian's suggestion to stash pde->proc_ops in a local const variable
+v2:
+- followed by Jiri's suggestion to refractor code and reformat commit message
+---
+ fs/proc/generic.c | 38 +++++++++++++++++++++-----------------
+ 1 file changed, 21 insertions(+), 17 deletions(-)
+
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index 76e800e38..bd0c099cf 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -367,6 +367,25 @@ static const struct inode_operations proc_dir_inode_operations = {
+ 	.setattr	= proc_notify_change,
+ };
+ 
++static void pde_set_flags(struct proc_dir_entry *pde)
++{
++	const struct proc_ops *proc_ops = pde->proc_ops;
++
++	if (!proc_ops)
++		return;
++
++	if (proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
++		pde->flags |= PROC_ENTRY_PERMANENT;
++	if (proc_ops->proc_read_iter)
++		pde->flags |= PROC_ENTRY_proc_read_iter;
++#ifdef CONFIG_COMPAT
++	if (proc_ops->proc_compat_ioctl)
++		pde->flags |= PROC_ENTRY_proc_compat_ioctl;
++#endif
++	if (proc_ops->proc_lseek)
++		pde->flags |= PROC_ENTRY_proc_lseek;
++}
++
+ /* returns the registered entry, or frees dp and returns NULL on failure */
+ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
+ 		struct proc_dir_entry *dp)
+@@ -374,6 +393,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
+ 	if (proc_alloc_inum(&dp->low_ino))
+ 		goto out_free_entry;
+ 
++	pde_set_flags(dp);
++
+ 	write_lock(&proc_subdir_lock);
+ 	dp->parent = dir;
+ 	if (pde_subdir_insert(dir, dp) == false) {
+@@ -561,20 +582,6 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
+ 	return p;
+ }
+ 
+-static void pde_set_flags(struct proc_dir_entry *pde)
+-{
+-	if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
+-		pde->flags |= PROC_ENTRY_PERMANENT;
+-	if (pde->proc_ops->proc_read_iter)
+-		pde->flags |= PROC_ENTRY_proc_read_iter;
+-#ifdef CONFIG_COMPAT
+-	if (pde->proc_ops->proc_compat_ioctl)
+-		pde->flags |= PROC_ENTRY_proc_compat_ioctl;
+-#endif
+-	if (pde->proc_ops->proc_lseek)
+-		pde->flags |= PROC_ENTRY_proc_lseek;
+-}
+-
+ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
+ 		struct proc_dir_entry *parent,
+ 		const struct proc_ops *proc_ops, void *data)
+@@ -585,7 +592,6 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
+ 	if (!p)
+ 		return NULL;
+ 	p->proc_ops = proc_ops;
+-	pde_set_flags(p);
+ 	return proc_register(parent, p);
+ }
+ EXPORT_SYMBOL(proc_create_data);
+@@ -636,7 +642,6 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
+ 	p->proc_ops = &proc_seq_ops;
+ 	p->seq_ops = ops;
+ 	p->state_size = state_size;
+-	pde_set_flags(p);
+ 	return proc_register(parent, p);
+ }
+ EXPORT_SYMBOL(proc_create_seq_private);
+@@ -667,7 +672,6 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
+ 		return NULL;
+ 	p->proc_ops = &proc_single_ops;
+ 	p->single_show = show;
+-	pde_set_flags(p);
+ 	return proc_register(parent, p);
+ }
+ EXPORT_SYMBOL(proc_create_single_data);
+-- 
+2.25.1
+
+


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-08-28 13:27 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-08-28 13:27 UTC (permalink / raw
  To: gentoo-commits

commit:     bc0c89e3b9fc22dce88db41cccad192c71668eb9
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 28 13:26:32 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Aug 28 13:26:32 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bc0c89e3

net/ipv4: Fix regression in local-broadcast routes

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README                                        |   4 +
 ..._fix_regression_in_local-broadcast_routes.patch | 134 +++++++++++++++++++++
 2 files changed, 138 insertions(+)

diff --git a/0000_README b/0000_README
index 806e7f6a..bef879a3 100644
--- a/0000_README
+++ b/0000_README
@@ -239,6 +239,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2010_ipv4_fix_regression_in_local-broadcast_routes.patch
+From:   https://lore.kernel.org/regressions/20250826121750.8451-1-oscmaes92@gmail.com/
+Desc:   net: ipv4: fix regression in local-broadcast routes
+
 Patch:  2910_bfp-mark-get-entry-ip-as--maybe-unused.patch
 From:   https://www.spinics.net/lists/stable/msg604665.html
 Desc:   bpf: mark get_entry_ip as __maybe_unused

diff --git a/2010_ipv4_fix_regression_in_local-broadcast_routes.patch b/2010_ipv4_fix_regression_in_local-broadcast_routes.patch
new file mode 100644
index 00000000..a306132d
--- /dev/null
+++ b/2010_ipv4_fix_regression_in_local-broadcast_routes.patch
@@ -0,0 +1,134 @@
+From mboxrd@z Thu Jan  1 00:00:00 1970
+Received: from mail-wm1-f48.google.com (mail-wm1-f48.google.com [209.85.128.48])
+	(using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits))
+	(No client certificate requested)
+	by smtp.subspace.kernel.org (Postfix) with ESMTPS id 35E81393DF2
+	for <regressions@lists.linux.dev>; Tue, 26 Aug 2025 12:18:24 +0000 (UTC)
+Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=209.85.128.48
+ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;
+	t=1756210706; cv=none; b=Da/rRcCPW+gdgl9sh1AJU0E8vP05G0xfCEUnpWuqnQjaf8/mvVPUhzba4pXCTtFhHNsTTT3iEOPPiPqCzdNwRexxsZIkyL6JGG+hXkV8cn+i7XctZ961TmWYP8ACY74i8MLs7Iud+2gt8y4VrLoMeHXcE7ripzyOxa8THiVuFTc=
+ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org;
+	s=arc-20240116; t=1756210706; c=relaxed/simple;
+	bh=WNRFfbyB1JScy1/30FZa+Ntq9RVZSUi/ijHlpcIjNBs=;
+	h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References:
+	 MIME-Version; b=Y3iH3AFJjiR147yq3M5X/KlRR6baEAus+ZHb4N2PZZKa0T3Ln2c2/SnZLXQgRCa8rdr3MCFoXaoDuRUcx8k744Dh1j64HY9sRnYjM01rc0Kh+iaf3nZ0jYkC+zOL+8Wv5eWgNbDX5Qg+WzwUQMQhrC5xEQNjNorKTxd+SRFGpao=
+ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=gmail.com; spf=pass smtp.mailfrom=gmail.com; dkim=pass (2048-bit key) header.d=gmail.com header.i=@gmail.com header.b=dvEwPzW3; arc=none smtp.client-ip=209.85.128.48
+Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=gmail.com
+Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gmail.com
+Authentication-Results: smtp.subspace.kernel.org;
+	dkim=pass (2048-bit key) header.d=gmail.com header.i=@gmail.com header.b="dvEwPzW3"
+Received: by mail-wm1-f48.google.com with SMTP id 5b1f17b1804b1-45a1b004954so43862245e9.0
+        for <regressions@lists.linux.dev>; Tue, 26 Aug 2025 05:18:23 -0700 (PDT)
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
+        d=gmail.com; s=20230601; t=1756210702; x=1756815502; darn=lists.linux.dev;
+        h=content-transfer-encoding:mime-version:references:in-reply-to
+         :message-id:date:subject:cc:to:from:from:to:cc:subject:date
+         :message-id:reply-to;
+        bh=LChTYlNX7jpHHdvqfK7E+ehTE+2KqMA/oVeIigfrSAA=;
+        b=dvEwPzW3bP5r/IJF4+nyqmSsoFRE2/TxvBct7S3hXKOLTfxpExbkGfpZTxb/wRhBAJ
+         wQL8iEOoH47boqy/i72LQhH6bNLS72yU2FMpqZNVENRJqtwB6lq8PJlRvDn7gEVW4awK
+         8Phof2i45jLRu1288bEfZkYpSVK0hclcCXgP/5f7t0zNSdutKc/aOXCyLeoIeciLR4Zx
+         JmtIedPpVahUnw0oCgxmQbOkHd3yf1xoxAiEblYfya59tRXPty3gfMnh2Ox4gTYn29NF
+         kp+PqMg4GxVf0j4TZMuCnBqnjtYFkfDpGyqNr4HBBV4PzdZjjbuJ8bPNOVUNOzk14j+4
+         JE9Q==
+X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
+        d=1e100.net; s=20230601; t=1756210702; x=1756815502;
+        h=content-transfer-encoding:mime-version:references:in-reply-to
+         :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc
+         :subject:date:message-id:reply-to;
+        bh=LChTYlNX7jpHHdvqfK7E+ehTE+2KqMA/oVeIigfrSAA=;
+        b=XlKRIeF/DtRxj+OA67VSgyB25oK9Z0gak5vT5pjoH+XFlP+Y6y9GSx70oRvvIgIZE0
+         apTakbKssvoFmeCLmAFQRStZfubuWoor6Ond1N/6K7j7VBU11eysPUkeo6jQSTzdSQMt
+         v9Jq11Lnaii0ms5s6kIaWWPR9lGAWFb++ZJNYkXt59iXnhEVlVW4dFssD6VR/VJnyX+e
+         A+eGOVoa1k3c4ae23Wmq55GQR1iKbviKO28+BXatjKRWcFjaTgedk1WATZrrwcRYdD2E
+         a3r6R5iTOkMNX/TOJ4v2X7s69ndC+qxxJQ0yLTAsmfV1EDGUp3kwwkdIVl3UDqUhHszh
+         N0+w==
+X-Forwarded-Encrypted: i=1; AJvYcCX+aV0s2nW7qE7ZH57rmDl4GNnOxwFmQdMvPxxvM100/HNXQPAUrKLUeYBdO5rTpnftaJQ4J3zRSge7XA==@lists.linux.dev
+X-Gm-Message-State: AOJu0Yy9j79mStVe7fYpUjVZm00DYURS6tKQYofu48lxIG03z+fJEMUq
+	NKggf5H7k0btf9k9VXff6yWYNoL6JnO/uWjuPcDWrTtpRme13iQ8weyk
+X-Gm-Gg: ASbGncv/CVsSHrFQyxd//IAOzxZbvxje250ZYi2TUZi9g/Gf4x/86XgM4MjXoZFeBZB
+	4c00kmZrQIKWk4ToI+ySCSydYzZQbrw+nGnrad6FqeWQESk5tqOBYnIYKTUT+rseRkG5dukKJdE
+	lNeFu0sfmmoAnvNyKtLNqG9VwFQtqSwODKIKH+CZ92mMBuWe4ePVv4JQpz/fUhIRN+eZBdfDwUZ
+	eWZScFkZRFJo2SrVq9Ku3CIOA8hD0ktkkBDaFj57r+4YoToeLSvbCzzZrcFGoj2E1zqyTSlUhbf
+	SsKS4HgBDjkhx2k41IZAVyT+pE/GfU2BgS6BmY/VUxh72VrmHWCvbCnGX1TsHixJSwCGJiilLTg
+	KuDu6j0RQCZjyzUt7t8/H5A==
+X-Google-Smtp-Source: AGHT+IGDspJcry+lZbYtZeVg4+4kmcTBPmZuyilfg0+W2o8HlDsbRsJZkF4781x4cl6MBUZul/po1A==
+X-Received: by 2002:a05:600c:3b2a:b0:453:2066:4a26 with SMTP id 5b1f17b1804b1-45b5179f3d1mr194717105e9.16.1756210702258;
+        Tue, 26 Aug 2025 05:18:22 -0700 (PDT)
+Received: from oscar-xps.. ([45.128.133.231])
+        by smtp.gmail.com with ESMTPSA id 5b1f17b1804b1-45b5758a0bfsm149513675e9.20.2025.08.26.05.18.18
+        (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);
+        Tue, 26 Aug 2025 05:18:21 -0700 (PDT)
+From: Oscar Maes <oscmaes92@gmail.com>
+To: bacs@librecast.net,
+	brett@librecast.net,
+	kuba@kernel.org
+Cc: davem@davemloft.net,
+	dsahern@kernel.org,
+	netdev@vger.kernel.org,
+	regressions@lists.linux.dev,
+	stable@vger.kernel.org,
+	Oscar Maes <oscmaes92@gmail.com>
+Subject: [PATCH net v2 1/2] net: ipv4: fix regression in local-broadcast routes
+Date: Tue, 26 Aug 2025 14:17:49 +0200
+Message-Id: <20250826121750.8451-1-oscmaes92@gmail.com>
+X-Mailer: git-send-email 2.39.5
+In-Reply-To: <20250826121126-oscmaes92@gmail.com>
+References: <20250826121126-oscmaes92@gmail.com>
+Precedence: bulk
+X-Mailing-List: regressions@lists.linux.dev
+List-Id: <regressions.lists.linux.dev>
+List-Subscribe: <mailto:regressions+subscribe@lists.linux.dev>
+List-Unsubscribe: <mailto:regressions+unsubscribe@lists.linux.dev>
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
+
+Commit 9e30ecf23b1b ("net: ipv4: fix incorrect MTU in broadcast routes")
+introduced a regression where local-broadcast packets would have their
+gateway set in __mkroute_output, which was caused by fi = NULL being
+removed.
+
+Fix this by resetting the fib_info for local-broadcast packets. This
+preserves the intended changes for directed-broadcast packets.
+
+Cc: stable@vger.kernel.org
+Fixes: 9e30ecf23b1b ("net: ipv4: fix incorrect MTU in broadcast routes")
+Reported-by: Brett A C Sheffield <bacs@librecast.net>
+Closes: https://lore.kernel.org/regressions/20250822165231.4353-4-bacs@librecast.net
+Signed-off-by: Oscar Maes <oscmaes92@gmail.com>
+---
+
+Thanks to Brett Sheffield for finding the regression and writing
+the initial fix!
+---
+ net/ipv4/route.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 1f212b2ce4c6..24c898b7654f 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2575,12 +2575,16 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		    !netif_is_l3_master(dev_out))
+ 			return ERR_PTR(-EINVAL);
+ 
+-	if (ipv4_is_lbcast(fl4->daddr))
++	if (ipv4_is_lbcast(fl4->daddr)) {
+ 		type = RTN_BROADCAST;
+-	else if (ipv4_is_multicast(fl4->daddr))
++
++		/* reset fi to prevent gateway resolution */
++		fi = NULL;
++	} else if (ipv4_is_multicast(fl4->daddr)) {
+ 		type = RTN_MULTICAST;
+-	else if (ipv4_is_zeronet(fl4->daddr))
++	} else if (ipv4_is_zeronet(fl4->daddr)) {
+ 		return ERR_PTR(-EINVAL);
++	}
+ 
+ 	if (dev_out->flags & IFF_LOOPBACK)
+ 		flags |= RTCF_LOCAL;
+-- 
+2.39.5
+
+


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-08-28 11:49 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-08-28 11:49 UTC (permalink / raw
  To: gentoo-commits

commit:     7e90bb7885c15f9b071072648ab3d24e829bc95d
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 28 11:48:52 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Aug 28 11:48:52 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7e90bb78

Add fs_prevent_file_descriptor_table_allocation

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README                                        |  4 +++
 ..._prevent_file_descriptor_table_allocation.patch | 30 ++++++++++++++++++++++
 2 files changed, 34 insertions(+)

diff --git a/0000_README b/0000_README
index 55894e22..806e7f6a 100644
--- a/0000_README
+++ b/0000_README
@@ -223,6 +223,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1520_revert_fs_prevent_file_descriptor_table_allocation.patch
+From:   https://bugs.gentoo.org/961922
+Desc:   revert prevent file descriptor table allocation, possible causing crashes
+
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:   https://github.com/KSPP/linux/issues/109
 Desc:   Address -Warray-bounds warnings 

diff --git a/1520_revert_fs_prevent_file_descriptor_table_allocation.patch b/1520_revert_fs_prevent_file_descriptor_table_allocation.patch
new file mode 100644
index 00000000..aef6add0
--- /dev/null
+++ b/1520_revert_fs_prevent_file_descriptor_table_allocation.patch
@@ -0,0 +1,30 @@
+---
+ fs/file.c | 15 ---------------
+ 1 file changed, 15 deletions(-)
+
+diff --git a/fs/file.c b/fs/file.c
+index 4579c3296498..b6fb6d18ac3b 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -126,21 +126,6 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
+ 	if (unlikely(nr > sysctl_nr_open))
+ 		nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
+ 
+-	/*
+-	 * Check if the allocation size would exceed INT_MAX. kvmalloc_array()
+-	 * and kvmalloc() will warn if the allocation size is greater than
+-	 * INT_MAX, as filp_cachep objects are not __GFP_NOWARN.
+-	 *
+-	 * This can happen when sysctl_nr_open is set to a very high value and
+-	 * a process tries to use a file descriptor near that limit. For example,
+-	 * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what
+-	 * systemd typically sets it to - then trying to use a file descriptor
+-	 * close to that value will require allocating a file descriptor table
+-	 * that exceeds 8GB in size.
+-	 */
+-	if (unlikely(nr > INT_MAX / sizeof(struct file *)))
+-		return ERR_PTR(-EMFILE);
+-
+ 	fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
+ 	if (!fdt)
+ 		goto out;


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-08-21  1:11 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-08-21  1:11 UTC (permalink / raw
  To: gentoo-commits

commit:     ff66f729026bc5a12286d99a67ccf853638d46c0
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 21 01:11:05 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Aug 21 01:11:05 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ff66f729

Linux patch 6.12.43

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |     4 +
 1042_linux-6.12.43.patch | 15322 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 15326 insertions(+)

diff --git a/0000_README b/0000_README
index 31e38399..55894e22 100644
--- a/0000_README
+++ b/0000_README
@@ -211,6 +211,10 @@ Patch:  1041_linux-6.12.42.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.42
 
+Patch:  1042_linux-6.12.43.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.43
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1042_linux-6.12.43.patch b/1042_linux-6.12.43.patch
new file mode 100644
index 00000000..da7db2f1
--- /dev/null
+++ b/1042_linux-6.12.43.patch
@@ -0,0 +1,15322 @@
+diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
+index 04eaab01314bc1..b07368e949aeab 100644
+--- a/Documentation/filesystems/fscrypt.rst
++++ b/Documentation/filesystems/fscrypt.rst
+@@ -141,9 +141,8 @@ However, these ioctls have some limitations:
+   CONFIG_PAGE_POISONING=y in your kernel config and add page_poison=1
+   to your kernel command line.  However, this has a performance cost.
+ 
+-- Secret keys might still exist in CPU registers, in crypto
+-  accelerator hardware (if used by the crypto API to implement any of
+-  the algorithms), or in other places not explicitly considered here.
++- Secret keys might still exist in CPU registers or in other places
++  not explicitly considered here.
+ 
+ Limitations of v1 policies
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+@@ -378,9 +377,12 @@ the work is done by XChaCha12, which is much faster than AES when AES
+ acceleration is unavailable.  For more information about Adiantum, see
+ `the Adiantum paper <https://eprint.iacr.org/2018/720.pdf>`_.
+ 
+-The (AES-128-CBC-ESSIV, AES-128-CBC-CTS) pair exists only to support
+-systems whose only form of AES acceleration is an off-CPU crypto
+-accelerator such as CAAM or CESA that does not support XTS.
++The (AES-128-CBC-ESSIV, AES-128-CBC-CTS) pair was added to try to
++provide a more efficient option for systems that lack AES instructions
++in the CPU but do have a non-inline crypto engine such as CAAM or CESA
++that supports AES-CBC (and not AES-XTS).  This is deprecated.  It has
++been shown that just doing AES on the CPU is actually faster.
++Moreover, Adiantum is faster still and is recommended on such systems.
+ 
+ The remaining mode pairs are the "national pride ciphers":
+ 
+@@ -1289,22 +1291,13 @@ this by validating all top-level encryption policies prior to access.
+ Inline encryption support
+ =========================
+ 
+-By default, fscrypt uses the kernel crypto API for all cryptographic
+-operations (other than HKDF, which fscrypt partially implements
+-itself).  The kernel crypto API supports hardware crypto accelerators,
+-but only ones that work in the traditional way where all inputs and
+-outputs (e.g. plaintexts and ciphertexts) are in memory.  fscrypt can
+-take advantage of such hardware, but the traditional acceleration
+-model isn't particularly efficient and fscrypt hasn't been optimized
+-for it.
+-
+-Instead, many newer systems (especially mobile SoCs) have *inline
+-encryption hardware* that can encrypt/decrypt data while it is on its
+-way to/from the storage device.  Linux supports inline encryption
+-through a set of extensions to the block layer called *blk-crypto*.
+-blk-crypto allows filesystems to attach encryption contexts to bios
+-(I/O requests) to specify how the data will be encrypted or decrypted
+-in-line.  For more information about blk-crypto, see
++Many newer systems (especially mobile SoCs) have *inline encryption
++hardware* that can encrypt/decrypt data while it is on its way to/from
++the storage device.  Linux supports inline encryption through a set of
++extensions to the block layer called *blk-crypto*.  blk-crypto allows
++filesystems to attach encryption contexts to bios (I/O requests) to
++specify how the data will be encrypted or decrypted in-line.  For more
++information about blk-crypto, see
+ :ref:`Documentation/block/inline-encryption.rst <inline_encryption>`.
+ 
+ On supported filesystems (currently ext4 and f2fs), fscrypt can use
+diff --git a/Documentation/firmware-guide/acpi/i2c-muxes.rst b/Documentation/firmware-guide/acpi/i2c-muxes.rst
+index 3a8997ccd7c4b6..f366539acd792a 100644
+--- a/Documentation/firmware-guide/acpi/i2c-muxes.rst
++++ b/Documentation/firmware-guide/acpi/i2c-muxes.rst
+@@ -14,7 +14,7 @@ Consider this topology::
+     |      |   | 0x70 |--CH01--> i2c client B (0x50)
+     +------+   +------+
+ 
+-which corresponds to the following ASL::
++which corresponds to the following ASL (in the scope of \_SB)::
+ 
+     Device (SMB1)
+     {
+@@ -24,7 +24,7 @@ which corresponds to the following ASL::
+             Name (_HID, ...)
+             Name (_CRS, ResourceTemplate () {
+                 I2cSerialBus (0x70, ControllerInitiated, I2C_SPEED,
+-                            AddressingMode7Bit, "^SMB1", 0x00,
++                            AddressingMode7Bit, "\\_SB.SMB1", 0x00,
+                             ResourceConsumer,,)
+             }
+ 
+@@ -37,7 +37,7 @@ which corresponds to the following ASL::
+                     Name (_HID, ...)
+                     Name (_CRS, ResourceTemplate () {
+                         I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
+-                                    AddressingMode7Bit, "^CH00", 0x00,
++                                    AddressingMode7Bit, "\\_SB.SMB1.CH00", 0x00,
+                                     ResourceConsumer,,)
+                     }
+                 }
+@@ -52,7 +52,7 @@ which corresponds to the following ASL::
+                     Name (_HID, ...)
+                     Name (_CRS, ResourceTemplate () {
+                         I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
+-                                    AddressingMode7Bit, "^CH01", 0x00,
++                                    AddressingMode7Bit, "\\_SB.SMB1.CH01", 0x00,
+                                     ResourceConsumer,,)
+                     }
+                 }
+diff --git a/Makefile b/Makefile
+index 265dba73ce3373..3dc8acf73bfaf5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 42
++SUBLEVEL = 43
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
+index 36915a073c2340..f432d22bfed844 100644
+--- a/arch/arm/mach-rockchip/platsmp.c
++++ b/arch/arm/mach-rockchip/platsmp.c
+@@ -279,11 +279,6 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
+ 	}
+ 
+ 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
+-		if (rockchip_smp_prepare_sram(node)) {
+-			of_node_put(node);
+-			return;
+-		}
+-
+ 		/* enable the SCU power domain */
+ 		pmu_set_power_domain(PMU_PWRDN_SCU, true);
+ 
+@@ -316,11 +311,19 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
+ 		asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+ 		ncores = ((l2ctlr >> 24) & 0x3) + 1;
+ 	}
+-	of_node_put(node);
+ 
+ 	/* Make sure that all cores except the first are really off */
+ 	for (i = 1; i < ncores; i++)
+ 		pmu_set_power_domain(0 + i, false);
++
++	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
++		if (rockchip_smp_prepare_sram(node)) {
++			of_node_put(node);
++			return;
++		}
++	}
++
++	of_node_put(node);
+ }
+ 
+ static void __init rk3036_smp_prepare_cpus(unsigned int max_cpus)
+diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
+index d5c805adf7a82b..ea706fac63587a 100644
+--- a/arch/arm/mach-tegra/reset.c
++++ b/arch/arm/mach-tegra/reset.c
+@@ -63,7 +63,7 @@ static void __init tegra_cpu_reset_handler_enable(void)
+ 	BUG_ON(is_enabled);
+ 	BUG_ON(tegra_cpu_reset_handler_size > TEGRA_IRAM_RESET_HANDLER_SIZE);
+ 
+-	memcpy(iram_base, (void *)__tegra_cpu_reset_handler_start,
++	memcpy_toio(iram_base, (void *)__tegra_cpu_reset_handler_start,
+ 			tegra_cpu_reset_handler_size);
+ 
+ 	err = call_firmware_op(set_cpu_boot_addr, 0, reset_address);
+diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+index 710f80a14b6472..98fc1c0f86a423 100644
+--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
++++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+@@ -495,8 +495,8 @@ exp1: gpio@23 {
+ 		p05-hog {
+ 			/* P05 - USB2.0_MUX_SEL */
+ 			gpio-hog;
+-			gpios = <5 GPIO_ACTIVE_HIGH>;
+-			output-high;
++			gpios = <5 GPIO_ACTIVE_LOW>;
++			output-low;
+ 		};
+ 
+ 		p01_hog: p01-hog {
+diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
+index a407f9cd549edc..c07a58b96329d8 100644
+--- a/arch/arm64/include/asm/acpi.h
++++ b/arch/arm64/include/asm/acpi.h
+@@ -150,7 +150,7 @@ acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
+ {}
+ #endif
+ 
+-static inline const char *acpi_get_enable_method(int cpu)
++static __always_inline const char *acpi_get_enable_method(int cpu)
+ {
+ 	if (acpi_psci_present())
+ 		return "psci";
+diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
+index e6f66491fbe932..862bb1cba4f04a 100644
+--- a/arch/arm64/kernel/acpi.c
++++ b/arch/arm64/kernel/acpi.c
+@@ -197,6 +197,8 @@ static int __init acpi_fadt_sanity_check(void)
+  */
+ void __init acpi_boot_table_init(void)
+ {
++	int ret;
++
+ 	/*
+ 	 * Enable ACPI instead of device tree unless
+ 	 * - ACPI has been disabled explicitly (acpi=off), or
+@@ -250,10 +252,12 @@ void __init acpi_boot_table_init(void)
+ 		 * behaviour, use acpi=nospcr to disable console in ACPI SPCR
+ 		 * table as default serial console.
+ 		 */
+-		acpi_parse_spcr(earlycon_acpi_spcr_enable,
++		ret = acpi_parse_spcr(earlycon_acpi_spcr_enable,
+ 			!param_acpi_nospcr);
+-		pr_info("Use ACPI SPCR as default console: %s\n",
+-				param_acpi_nospcr ? "No" : "Yes");
++		if (!ret || param_acpi_nospcr || !IS_ENABLED(CONFIG_ACPI_SPCR_TABLE))
++			pr_info("Use ACPI SPCR as default console: No\n");
++		else
++			pr_info("Use ACPI SPCR as default console: Yes\n");
+ 
+ 		if (IS_ENABLED(CONFIG_ACPI_BGRT))
+ 			acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index 2729faaee4b4c2..1d60a08a218439 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -123,6 +123,8 @@ kunwind_recover_return_address(struct kunwind_state *state)
+ 		orig_pc = kretprobe_find_ret_addr(state->task,
+ 						  (void *)state->common.fp,
+ 						  &state->kr_cur);
++		if (!orig_pc)
++			return -EINVAL;
+ 		state->common.pc = orig_pc;
+ 	}
+ #endif /* CONFIG_KRETPROBES */
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 563cbce1112696..e2e8ffa65aa586 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -921,6 +921,7 @@ void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigne
+ 
+ void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
+ {
++	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+ 	console_verbose();
+ 
+ 	pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n",
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 850307b49babde..2d1ebc0c3437f2 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -798,6 +798,7 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
+ 		 */
+ 		siaddr  = untagged_addr(far);
+ 	}
++	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+ 	arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
+ 
+ 	return 0;
+diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
+index 68bf1a125502da..1e308328c07966 100644
+--- a/arch/arm64/mm/ptdump_debugfs.c
++++ b/arch/arm64/mm/ptdump_debugfs.c
+@@ -1,6 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/debugfs.h>
+-#include <linux/memory_hotplug.h>
+ #include <linux/seq_file.h>
+ 
+ #include <asm/ptdump.h>
+@@ -9,9 +8,7 @@ static int ptdump_show(struct seq_file *m, void *v)
+ {
+ 	struct ptdump_info *info = m->private;
+ 
+-	get_online_mems();
+ 	ptdump_walk(m, info);
+-	put_online_mems();
+ 	return 0;
+ }
+ DEFINE_SHOW_ATTRIBUTE(ptdump);
+diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
+index 27144de5c5fe4f..c0a5dc9aeae287 100644
+--- a/arch/loongarch/kernel/env.c
++++ b/arch/loongarch/kernel/env.c
+@@ -39,16 +39,19 @@ void __init init_environ(void)
+ 
+ static int __init init_cpu_fullname(void)
+ {
+-	struct device_node *root;
+ 	int cpu, ret;
+-	char *model;
++	char *cpuname;
++	const char *model;
++	struct device_node *root;
+ 
+ 	/* Parsing cpuname from DTS model property */
+ 	root = of_find_node_by_path("/");
+-	ret = of_property_read_string(root, "model", (const char **)&model);
++	ret = of_property_read_string(root, "model", &model);
++	if (ret == 0) {
++		cpuname = kstrdup(model, GFP_KERNEL);
++		loongson_sysconf.cpuname = strsep(&cpuname, " ");
++	}
+ 	of_node_put(root);
+-	if (ret == 0)
+-		loongson_sysconf.cpuname = strsep(&model, " ");
+ 
+ 	if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
+ 		for (cpu = 0; cpu < NR_CPUS; cpu++)
+diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S
+index 84e6de2fd97354..8b5140ac9ea112 100644
+--- a/arch/loongarch/kernel/relocate_kernel.S
++++ b/arch/loongarch/kernel/relocate_kernel.S
+@@ -109,4 +109,4 @@ SYM_CODE_END(kexec_smp_wait)
+ relocate_new_kernel_end:
+ 
+ 	.section ".data"
+-SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel)
++SYM_DATA(relocate_new_kernel_size, .quad relocate_new_kernel_end - relocate_new_kernel)
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index b2572287633179..b4b4ac8dbf4178 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -507,7 +507,7 @@ bool unwind_next_frame(struct unwind_state *state)
+ 
+ 	state->pc = bt_address(pc);
+ 	if (!state->pc) {
+-		pr_err("cannot find unwind pc at %pK\n", (void *)pc);
++		pr_err("cannot find unwind pc at %p\n", (void *)pc);
+ 		goto err;
+ 	}
+ 
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index fa1500d4aa3e3a..5ba3249cea98a2 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -208,11 +208,9 @@ bool bpf_jit_supports_far_kfunc_call(void)
+ 	return true;
+ }
+ 
+-/* initialized on the first pass of build_body() */
+-static int out_offset = -1;
+-static int emit_bpf_tail_call(struct jit_ctx *ctx)
++static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn)
+ {
+-	int off;
++	int off, tc_ninsn = 0;
+ 	u8 tcc = tail_call_reg(ctx);
+ 	u8 a1 = LOONGARCH_GPR_A1;
+ 	u8 a2 = LOONGARCH_GPR_A2;
+@@ -222,7 +220,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
+ 	const int idx0 = ctx->idx;
+ 
+ #define cur_offset (ctx->idx - idx0)
+-#define jmp_offset (out_offset - (cur_offset))
++#define jmp_offset (tc_ninsn - (cur_offset))
+ 
+ 	/*
+ 	 * a0: &ctx
+@@ -232,6 +230,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
+ 	 * if (index >= array->map.max_entries)
+ 	 *	 goto out;
+ 	 */
++	tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0];
+ 	off = offsetof(struct bpf_array, map.max_entries);
+ 	emit_insn(ctx, ldwu, t1, a1, off);
+ 	/* bgeu $a2, $t1, jmp_offset */
+@@ -263,15 +262,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
+ 	emit_insn(ctx, ldd, t3, t2, off);
+ 	__build_epilogue(ctx, true);
+ 
+-	/* out: */
+-	if (out_offset == -1)
+-		out_offset = cur_offset;
+-	if (cur_offset != out_offset) {
+-		pr_err_once("tail_call out_offset = %d, expected %d!\n",
+-			    cur_offset, out_offset);
+-		return -1;
+-	}
+-
+ 	return 0;
+ 
+ toofar:
+@@ -916,7 +906,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 	/* tail call */
+ 	case BPF_JMP | BPF_TAIL_CALL:
+ 		mark_tail_call(ctx);
+-		if (emit_bpf_tail_call(ctx) < 0)
++		if (emit_bpf_tail_call(ctx, i) < 0)
+ 			return -EINVAL;
+ 		break;
+ 
+@@ -1342,7 +1332,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	if (tmp_blinded)
+ 		bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
+ 
+-	out_offset = -1;
+ 
+ 	return prog;
+ 
+diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
+index 61fd4d0aeda41f..c0769dc4b85321 100644
+--- a/arch/mips/include/asm/vpe.h
++++ b/arch/mips/include/asm/vpe.h
+@@ -119,4 +119,12 @@ void cleanup_tc(struct tc *tc);
+ 
+ int __init vpe_module_init(void);
+ void __exit vpe_module_exit(void);
++
++#ifdef CONFIG_MIPS_VPE_LOADER_MT
++void *vpe_alloc(void);
++int vpe_start(void *vpe, unsigned long start);
++int vpe_stop(void *vpe);
++int vpe_free(void *vpe);
++#endif /* CONFIG_MIPS_VPE_LOADER_MT */
++
+ #endif /* _ASM_VPE_H */
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index b630604c577f9f..02aa6a04a21da4 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -690,18 +690,20 @@ unsigned long mips_stack_top(void)
+ 	}
+ 
+ 	/* Space for the VDSO, data page & GIC user page */
+-	top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+-	top -= PAGE_SIZE;
+-	top -= mips_gic_present() ? PAGE_SIZE : 0;
++	if (current->thread.abi) {
++		top -= PAGE_ALIGN(current->thread.abi->vdso->size);
++		top -= PAGE_SIZE;
++		top -= mips_gic_present() ? PAGE_SIZE : 0;
++
++		/* Space to randomize the VDSO base */
++		if (current->flags & PF_RANDOMIZE)
++			top -= VDSO_RANDOMIZE_SIZE;
++	}
+ 
+ 	/* Space for cache colour alignment */
+ 	if (cpu_has_dc_aliases)
+ 		top -= shm_align_mask + 1;
+ 
+-	/* Space to randomize the VDSO base */
+-	if (current->flags & PF_RANDOMIZE)
+-		top -= VDSO_RANDOMIZE_SIZE;
+-
+ 	return top;
+ }
+ 
+diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
+index 1187729d8cbb1b..357543996ee661 100644
+--- a/arch/mips/lantiq/falcon/sysctrl.c
++++ b/arch/mips/lantiq/falcon/sysctrl.c
+@@ -214,19 +214,16 @@ void __init ltq_soc_init(void)
+ 	of_node_put(np_syseth);
+ 	of_node_put(np_sysgpe);
+ 
+-	if ((request_mem_region(res_status.start, resource_size(&res_status),
+-				res_status.name) < 0) ||
+-		(request_mem_region(res_ebu.start, resource_size(&res_ebu),
+-				res_ebu.name) < 0) ||
+-		(request_mem_region(res_sys[0].start,
+-				resource_size(&res_sys[0]),
+-				res_sys[0].name) < 0) ||
+-		(request_mem_region(res_sys[1].start,
+-				resource_size(&res_sys[1]),
+-				res_sys[1].name) < 0) ||
+-		(request_mem_region(res_sys[2].start,
+-				resource_size(&res_sys[2]),
+-				res_sys[2].name) < 0))
++	if ((!request_mem_region(res_status.start, resource_size(&res_status),
++				 res_status.name)) ||
++	    (!request_mem_region(res_ebu.start, resource_size(&res_ebu),
++				 res_ebu.name)) ||
++	    (!request_mem_region(res_sys[0].start, resource_size(&res_sys[0]),
++				 res_sys[0].name)) ||
++	    (!request_mem_region(res_sys[1].start, resource_size(&res_sys[1]),
++				 res_sys[1].name)) ||
++	    (!request_mem_region(res_sys[2].start, resource_size(&res_sys[2]),
++				 res_sys[2].name)))
+ 		pr_err("Failed to request core resources");
+ 
+ 	status_membase = ioremap(res_status.start,
+diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
+index 21b8166a688394..9cd9aa3d16f29a 100644
+--- a/arch/parisc/Makefile
++++ b/arch/parisc/Makefile
+@@ -139,7 +139,7 @@ palo lifimage: vmlinuz
+ 	fi
+ 	@if test ! -f "$(PALOCONF)"; then \
+ 		cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \
+-		echo 'A generic palo config file ($(objree)/palo.conf) has been created for you.'; \
++		echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \
+ 		echo 'You should check it and re-run "make palo".'; \
+ 		echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \
+ 		false; \
+diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
+index f8ce178b43b783..34abf8bea2ccd6 100644
+--- a/arch/powerpc/include/asm/floppy.h
++++ b/arch/powerpc/include/asm/floppy.h
+@@ -144,9 +144,12 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+ 		bus_addr = 0;
+ 	}
+ 
+-	if (!bus_addr)	/* need to map it */
++	if (!bus_addr) {	/* need to map it */
+ 		bus_addr = dma_map_single(&isa_bridge_pcidev->dev, addr, size,
+ 					  dir);
++		if (dma_mapping_error(&isa_bridge_pcidev->dev, bus_addr))
++			return -ENOMEM;
++	}
+ 
+ 	/* remember this one as prev */
+ 	prev_addr = addr;
+diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
+index 9668b052cd4b3a..f251e0f6826204 100644
+--- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
++++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
+@@ -240,10 +240,8 @@ static int mpc512x_lpbfifo_kick(void)
+ 	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ 
+ 	/* Make DMA channel work with LPB FIFO data register */
+-	if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) {
+-		ret = -EINVAL;
+-		goto err_dma_prep;
+-	}
++	if (dma_dev->device_config(lpbfifo.chan, &dma_conf))
++		return -EINVAL;
+ 
+ 	sg_init_table(&sg, 1);
+ 
+diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
+index 9d5f657a251b32..1289cc6d3700cd 100644
+--- a/arch/riscv/mm/ptdump.c
++++ b/arch/riscv/mm/ptdump.c
+@@ -6,7 +6,6 @@
+ #include <linux/efi.h>
+ #include <linux/init.h>
+ #include <linux/debugfs.h>
+-#include <linux/memory_hotplug.h>
+ #include <linux/seq_file.h>
+ #include <linux/ptdump.h>
+ 
+@@ -371,9 +370,7 @@ bool ptdump_check_wx(void)
+ 
+ static int ptdump_show(struct seq_file *m, void *v)
+ {
+-	get_online_mems();
+ 	ptdump_walk(m, m->private);
+-	put_online_mems();
+ 
+ 	return 0;
+ }
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index 640901f2fbc3cc..811ec531954b63 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -192,13 +192,6 @@ static inline unsigned long get_tod_clock_fast(void)
+ 	asm volatile("stckf %0" : "=Q" (clk) : : "cc");
+ 	return clk;
+ }
+-
+-static inline cycles_t get_cycles(void)
+-{
+-	return (cycles_t) get_tod_clock() >> 2;
+-}
+-#define get_cycles get_cycles
+-
+ int get_phys_clock(unsigned long *clock);
+ void init_cpu_timer(void);
+ 
+@@ -226,6 +219,12 @@ static inline unsigned long get_tod_clock_monotonic(void)
+ 	return tod;
+ }
+ 
++static inline cycles_t get_cycles(void)
++{
++	return (cycles_t)get_tod_clock_monotonic() >> 2;
++}
++#define get_cycles get_cycles
++
+ /**
+  * tod_to_ns - convert a TOD format value to nanoseconds
+  * @todval: to be converted TOD format value
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 62f8f5a750a308..0c054e2d1e03ef 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -183,6 +183,7 @@ void __init __do_early_pgm_check(struct pt_regs *regs)
+ 
+ 	regs->int_code = lc->pgm_int_code;
+ 	regs->int_parm_long = lc->trans_exc_code;
++	regs->last_break = lc->pgm_last_break;
+ 	ip = __rewind_psw(regs->psw, regs->int_code >> 16);
+ 
+ 	/* Monitor Event? Might be a warning */
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index b713effe057967..96fc41f26d7e89 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -579,7 +579,7 @@ static int stp_sync_clock(void *data)
+ 		atomic_dec(&sync->cpus);
+ 		/* Wait for in_sync to be set. */
+ 		while (READ_ONCE(sync->in_sync) == 0)
+-			__udelay(1);
++			;
+ 	}
+ 	if (sync->in_sync != 1)
+ 		/* Didn't work. Clear per-cpu in sync bit again. */
+diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
+index fa54f3bc0c8d37..70f184ca648f4c 100644
+--- a/arch/s390/mm/dump_pagetables.c
++++ b/arch/s390/mm/dump_pagetables.c
+@@ -203,11 +203,9 @@ static int ptdump_show(struct seq_file *m, void *v)
+ 		.marker = markers,
+ 	};
+ 
+-	get_online_mems();
+ 	mutex_lock(&cpa_mutex);
+ 	ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
+ 	mutex_unlock(&cpa_mutex);
+-	put_online_mems();
+ 	return 0;
+ }
+ DEFINE_SHOW_ATTRIBUTE(ptdump);
+diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
+index c7b4b49826a2aa..40d823f36c0941 100644
+--- a/arch/um/include/asm/thread_info.h
++++ b/arch/um/include/asm/thread_info.h
+@@ -68,7 +68,11 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_NOTIFY_SIGNAL	(1 << TIF_NOTIFY_SIGNAL)
+ #define _TIF_MEMDIE		(1 << TIF_MEMDIE)
+ #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
++#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
+ #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+ #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
+ 
++#define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
++				 _TIF_NOTIFY_RESUME)
++
+ #endif
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 9c6cf03ed02b03..eaeeedd629305f 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -81,14 +81,18 @@ struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to
+ void interrupt_end(void)
+ {
+ 	struct pt_regs *regs = &current->thread.regs;
+-
+-	if (need_resched())
+-		schedule();
+-	if (test_thread_flag(TIF_SIGPENDING) ||
+-	    test_thread_flag(TIF_NOTIFY_SIGNAL))
+-		do_signal(regs);
+-	if (test_thread_flag(TIF_NOTIFY_RESUME))
+-		resume_user_mode_work(regs);
++	unsigned long thread_flags;
++
++	thread_flags = read_thread_flags();
++	while (thread_flags & _TIF_WORK_MASK) {
++		if (thread_flags & _TIF_NEED_RESCHED)
++			schedule();
++		if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
++			do_signal(regs);
++		if (thread_flags & _TIF_NOTIFY_RESUME)
++			resume_user_mode_work(regs);
++		thread_flags = read_thread_flags();
++	}
+ }
+ 
+ int get_current_pid(void)
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index cfb22f8c451a7f..861d080ed4c6ab 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -47,7 +47,6 @@ KVM_X86_OP(set_idt)
+ KVM_X86_OP(get_gdt)
+ KVM_X86_OP(set_gdt)
+ KVM_X86_OP(sync_dirty_debug_regs)
+-KVM_X86_OP(set_dr6)
+ KVM_X86_OP(set_dr7)
+ KVM_X86_OP(cache_reg)
+ KVM_X86_OP(get_rflags)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 0caa3293f6db90..d27df86aa62c79 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1627,6 +1627,12 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
+ 	return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
+ }
+ 
++enum kvm_x86_run_flags {
++	KVM_RUN_FORCE_IMMEDIATE_EXIT	= BIT(0),
++	KVM_RUN_LOAD_GUEST_DR6		= BIT(1),
++	KVM_RUN_LOAD_DEBUGCTL		= BIT(2),
++};
++
+ struct kvm_x86_ops {
+ 	const char *name;
+ 
+@@ -1654,6 +1660,12 @@ struct kvm_x86_ops {
+ 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+ 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ 
++	/*
++	 * Mask of DEBUGCTL bits that are owned by the host, i.e. that need to
++	 * match the host's value even while the guest is active.
++	 */
++	const u64 HOST_OWNED_DEBUGCTL;
++
+ 	void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
+ 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
+@@ -1675,7 +1687,6 @@ struct kvm_x86_ops {
+ 	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ 	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ 	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
+-	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
+ 	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
+ 	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+ 	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+@@ -1706,7 +1717,7 @@ struct kvm_x86_ops {
+ 
+ 	int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
+ 	enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
+-						  bool force_immediate_exit);
++						  u64 run_flags);
+ 	int (*handle_exit)(struct kvm_vcpu *vcpu,
+ 		enum exit_fastpath_completion exit_fastpath);
+ 	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 7ebe76f69417ae..2b6e3127ef4e2d 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -417,6 +417,7 @@
+ #define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI	(1UL << 12)
+ #define DEBUGCTLMSR_FREEZE_IN_SMM_BIT	14
+ #define DEBUGCTLMSR_FREEZE_IN_SMM	(1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
++#define DEBUGCTLMSR_RTM_DEBUG		BIT(15)
+ 
+ #define MSR_PEBS_FRONTEND		0x000003f7
+ 
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index c2c7b76d953f77..31b4b73e54053b 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -71,10 +71,9 @@ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
+ 
+ static void __init set_return_thunk(void *thunk)
+ {
+-	if (x86_return_thunk != __x86_return_thunk)
+-		pr_warn("x86/bugs: return thunk changed\n");
+-
+ 	x86_return_thunk = thunk;
++
++	pr_info("active return thunk: %ps\n", thunk);
+ }
+ 
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 1f42a71b15c023..800f781475c028 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4226,9 +4226,9 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
+ 	guest_state_exit_irqoff();
+ }
+ 
+-static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+-					  bool force_immediate_exit)
++static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
+ {
++	bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 	bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
+ 
+@@ -4270,10 +4270,13 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+ 	svm_hv_update_vp_id(svm->vmcb, vcpu);
+ 
+ 	/*
+-	 * Run with all-zero DR6 unless needed, so that we can get the exact cause
+-	 * of a #DB.
++	 * Run with all-zero DR6 unless the guest can write DR6 freely, so that
++	 * KVM can get the exact cause of a #DB.  Note, loading guest DR6 from
++	 * KVM's snapshot is only necessary when DR accesses won't exit.
+ 	 */
+-	if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
++	if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6))
++		svm_set_dr6(vcpu, vcpu->arch.dr6);
++	else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
+ 		svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
+ 
+ 	clgi();
+@@ -5084,7 +5087,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.set_idt = svm_set_idt,
+ 	.get_gdt = svm_get_gdt,
+ 	.set_gdt = svm_set_gdt,
+-	.set_dr6 = svm_set_dr6,
+ 	.set_dr7 = svm_set_dr7,
+ 	.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
+ 	.cache_reg = svm_cache_reg,
+diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
+index 47476fcc179a52..3f83e36a657b9e 100644
+--- a/arch/x86/kvm/vmx/main.c
++++ b/arch/x86/kvm/vmx/main.c
+@@ -42,6 +42,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
+ 	.vcpu_load = vmx_vcpu_load,
+ 	.vcpu_put = vmx_vcpu_put,
+ 
++	.HOST_OWNED_DEBUGCTL = DEBUGCTLMSR_FREEZE_IN_SMM,
++
+ 	.update_exception_bitmap = vmx_update_exception_bitmap,
+ 	.get_feature_msr = vmx_get_feature_msr,
+ 	.get_msr = vmx_get_msr,
+@@ -60,7 +62,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
+ 	.set_idt = vmx_set_idt,
+ 	.get_gdt = vmx_get_gdt,
+ 	.set_gdt = vmx_set_gdt,
+-	.set_dr6 = vmx_set_dr6,
+ 	.set_dr7 = vmx_set_dr7,
+ 	.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
+ 	.cache_reg = vmx_cache_reg,
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 903e874041ac8d..60bd2791d933d8 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2653,10 +2653,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ 	if (vmx->nested.nested_run_pending &&
+ 	    (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
+ 		kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
+-		vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
++		vmx_guest_debugctl_write(vcpu, vmcs12->guest_ia32_debugctl &
++					       vmx_get_supported_debugctl(vcpu, false));
+ 	} else {
+ 		kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
+-		vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl);
++		vmx_guest_debugctl_write(vcpu, vmx->nested.pre_vmenter_debugctl);
+ 	}
+ 	if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
+ 	    !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
+@@ -3135,7 +3136,8 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ 		return -EINVAL;
+ 
+ 	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
+-	    CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
++	    (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) ||
++	     CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false))))
+ 		return -EINVAL;
+ 
+ 	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
+@@ -3525,7 +3527,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ 
+ 	if (!vmx->nested.nested_run_pending ||
+ 	    !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
+-		vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
++		vmx->nested.pre_vmenter_debugctl = vmx_guest_debugctl_read();
+ 	if (kvm_mpx_supported() &&
+ 	    (!vmx->nested.nested_run_pending ||
+ 	     !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
+@@ -4576,6 +4578,12 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+ 		(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
+ 		(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
+ 
++	/*
++	 * Note!  Save DR7, but intentionally don't grab DEBUGCTL from vmcs02.
++	 * Writes to DEBUGCTL that aren't intercepted by L1 are immediately
++	 * propagated to vmcs12 (see vmx_set_msr()), as the value loaded into
++	 * vmcs02 doesn't strictly track vmcs12.
++	 */
+ 	if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
+ 		vmcs12->guest_dr7 = vcpu->arch.dr7;
+ 
+@@ -4766,7 +4774,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 	__vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
+ 
+ 	kvm_set_dr(vcpu, 7, 0x400);
+-	vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
++	vmx_guest_debugctl_write(vcpu, 0);
+ 
+ 	if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
+ 				vmcs12->vm_exit_msr_load_count))
+@@ -4821,6 +4829,9 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
+ 			WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
+ 	}
+ 
++	/* Reload DEBUGCTL to ensure vmcs01 has a fresh FREEZE_IN_SMM value. */
++	vmx_reload_guest_debugctl(vcpu);
++
+ 	/*
+ 	 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
+ 	 * handle a variety of side effects to KVM's software model.
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 9c9d4a3361664e..a5edc623166aca 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -605,11 +605,11 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
+  */
+ static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
+ {
+-	u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
++	u64 data = vmx_guest_debugctl_read();
+ 
+ 	if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
+ 		data &= ~DEBUGCTLMSR_LBR;
+-		vmcs_write64(GUEST_IA32_DEBUGCTL, data);
++		vmx_guest_debugctl_write(vcpu, data);
+ 	}
+ }
+ 
+@@ -679,7 +679,7 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
+ 
+ 	if (!lbr_desc->event) {
+ 		vmx_disable_lbr_msrs_passthrough(vcpu);
+-		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
++		if (vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR)
+ 			goto warn;
+ 		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
+ 			goto warn;
+@@ -701,7 +701,7 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
+ 
+ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
+ {
+-	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
++	if (!(vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR))
+ 		intel_pmu_release_guest_lbr_event(vcpu);
+ }
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 9a4ebf3dfbfc88..6c185a260c5bc8 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2148,7 +2148,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
+ 		break;
+ 	case MSR_IA32_DEBUGCTLMSR:
+-		msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
++		msr_info->data = vmx_guest_debugctl_read();
+ 		break;
+ 	default:
+ 	find_uret_msr:
+@@ -2173,7 +2173,7 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
+ 	return (unsigned long)data;
+ }
+ 
+-static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
++u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
+ {
+ 	u64 debugctl = 0;
+ 
+@@ -2185,9 +2185,25 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
+ 	    (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
+ 		debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
+ 
++	if (boot_cpu_has(X86_FEATURE_RTM) &&
++	    (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_RTM)))
++		debugctl |= DEBUGCTLMSR_RTM_DEBUG;
++
+ 	return debugctl;
+ }
+ 
++bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated)
++{
++	u64 invalid;
++
++	invalid = data & ~vmx_get_supported_debugctl(vcpu, host_initiated);
++	if (invalid & (DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR)) {
++		kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
++		invalid &= ~(DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR);
++	}
++	return !invalid;
++}
++
+ /*
+  * Writes msr value into the appropriate "register".
+  * Returns 0 on success, non-0 otherwise.
+@@ -2256,29 +2272,22 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		}
+ 		vmcs_writel(GUEST_SYSENTER_ESP, data);
+ 		break;
+-	case MSR_IA32_DEBUGCTLMSR: {
+-		u64 invalid;
+-
+-		invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
+-		if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
+-			kvm_pr_unimpl_wrmsr(vcpu, msr_index, data);
+-			data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
+-			invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
+-		}
+-
+-		if (invalid)
++	case MSR_IA32_DEBUGCTLMSR:
++		if (!vmx_is_valid_debugctl(vcpu, data, msr_info->host_initiated))
+ 			return 1;
+ 
++		data &= vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
++
+ 		if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
+ 						VM_EXIT_SAVE_DEBUG_CONTROLS)
+ 			get_vmcs12(vcpu)->guest_ia32_debugctl = data;
+ 
+-		vmcs_write64(GUEST_IA32_DEBUGCTL, data);
++		vmx_guest_debugctl_write(vcpu, data);
++
+ 		if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
+ 		    (data & DEBUGCTLMSR_LBR))
+ 			intel_pmu_create_guest_lbr_event(vcpu);
+ 		return 0;
+-	}
+ 	case MSR_IA32_BNDCFGS:
+ 		if (!kvm_mpx_supported() ||
+ 		    (!msr_info->host_initiated &&
+@@ -4823,7 +4832,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
+ 	vmcs_write32(GUEST_SYSENTER_CS, 0);
+ 	vmcs_writel(GUEST_SYSENTER_ESP, 0);
+ 	vmcs_writel(GUEST_SYSENTER_EIP, 0);
+-	vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
++
++	vmx_guest_debugctl_write(&vmx->vcpu, 0);
+ 
+ 	if (cpu_has_vmx_tpr_shadow()) {
+ 		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
+@@ -5630,12 +5640,6 @@ void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+ 	set_debugreg(DR6_RESERVED, 6);
+ }
+ 
+-void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
+-{
+-	lockdep_assert_irqs_disabled();
+-	set_debugreg(vcpu->arch.dr6, 6);
+-}
+-
+ void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+ {
+ 	vmcs_writel(GUEST_DR7, val);
+@@ -7353,8 +7357,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ 	guest_state_exit_irqoff();
+ }
+ 
+-fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
++fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
+ {
++	bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 	unsigned long cr3, cr4;
+ 
+@@ -7399,6 +7404,12 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+ 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+ 	vcpu->arch.regs_dirty = 0;
+ 
++	if (run_flags & KVM_RUN_LOAD_GUEST_DR6)
++		set_debugreg(vcpu->arch.dr6, 6);
++
++	if (run_flags & KVM_RUN_LOAD_DEBUGCTL)
++		vmx_reload_guest_debugctl(vcpu);
++
+ 	/*
+ 	 * Refresh vmcs.HOST_CR3 if necessary.  This must be done immediately
+ 	 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index cf57fbf12104f5..a7e2de50d27f68 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -435,6 +435,32 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
+ 
+ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
+ 
++u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated);
++bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated);
++
++static inline void vmx_guest_debugctl_write(struct kvm_vcpu *vcpu, u64 val)
++{
++	WARN_ON_ONCE(val & DEBUGCTLMSR_FREEZE_IN_SMM);
++
++	val |= vcpu->arch.host_debugctl & DEBUGCTLMSR_FREEZE_IN_SMM;
++	vmcs_write64(GUEST_IA32_DEBUGCTL, val);
++}
++
++static inline u64 vmx_guest_debugctl_read(void)
++{
++	return vmcs_read64(GUEST_IA32_DEBUGCTL) & ~DEBUGCTLMSR_FREEZE_IN_SMM;
++}
++
++static inline void vmx_reload_guest_debugctl(struct kvm_vcpu *vcpu)
++{
++	u64 val = vmcs_read64(GUEST_IA32_DEBUGCTL);
++
++	if (!((val ^ vcpu->arch.host_debugctl) & DEBUGCTLMSR_FREEZE_IN_SMM))
++		return;
++
++	vmx_guest_debugctl_write(vcpu, val & ~DEBUGCTLMSR_FREEZE_IN_SMM);
++}
++
+ /*
+  * Note, early Intel manuals have the write-low and read-high bitmap offsets
+  * the wrong way round.  The bitmaps control MSRs 0x00000000-0x00001fff and
+diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
+index 4aba200f435d42..5e4ce13ab30515 100644
+--- a/arch/x86/kvm/vmx/x86_ops.h
++++ b/arch/x86/kvm/vmx/x86_ops.h
+@@ -21,7 +21,7 @@ void vmx_vm_destroy(struct kvm *kvm);
+ int vmx_vcpu_precreate(struct kvm *kvm);
+ int vmx_vcpu_create(struct kvm_vcpu *vcpu);
+ int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
+-fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
++fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags);
+ void vmx_vcpu_free(struct kvm_vcpu *vcpu);
+ void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 213af0fda7682d..dbd295ef3eba2e 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10711,6 +10711,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		dm_request_for_irq_injection(vcpu) &&
+ 		kvm_cpu_accept_dm_intr(vcpu);
+ 	fastpath_t exit_fastpath;
++	u64 run_flags, debug_ctl;
+ 
+ 	bool req_immediate_exit = false;
+ 
+@@ -10955,8 +10956,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		goto cancel_injection;
+ 	}
+ 
+-	if (req_immediate_exit)
++	run_flags = 0;
++	if (req_immediate_exit) {
++		run_flags |= KVM_RUN_FORCE_IMMEDIATE_EXIT;
+ 		kvm_make_request(KVM_REQ_EVENT, vcpu);
++	}
+ 
+ 	fpregs_assert_state_consistent();
+ 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
+@@ -10973,12 +10977,22 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		set_debugreg(vcpu->arch.eff_db[3], 3);
+ 		/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
+ 		if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
+-			kvm_x86_call(set_dr6)(vcpu, vcpu->arch.dr6);
++			run_flags |= KVM_RUN_LOAD_GUEST_DR6;
+ 	} else if (unlikely(hw_breakpoint_active())) {
+ 		set_debugreg(DR7_FIXED_1, 7);
+ 	}
+ 
+-	vcpu->arch.host_debugctl = get_debugctlmsr();
++	/*
++	 * Refresh the host DEBUGCTL snapshot after disabling IRQs, as DEBUGCTL
++	 * can be modified in IRQ context, e.g. via SMP function calls.  Inform
++	 * vendor code if any host-owned bits were changed, e.g. so that the
++	 * value loaded into hardware while running the guest can be updated.
++	 */
++	debug_ctl = get_debugctlmsr();
++	if ((debug_ctl ^ vcpu->arch.host_debugctl) & kvm_x86_ops.HOST_OWNED_DEBUGCTL &&
++	    !vcpu->arch.guest_state_protected)
++		run_flags |= KVM_RUN_LOAD_DEBUGCTL;
++	vcpu->arch.host_debugctl = debug_ctl;
+ 
+ 	guest_timing_enter_irqoff();
+ 
+@@ -10992,8 +11006,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
+ 			     (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
+ 
+-		exit_fastpath = kvm_x86_call(vcpu_run)(vcpu,
+-						       req_immediate_exit);
++		exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, run_flags);
+ 		if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
+ 			break;
+ 
+@@ -11005,6 +11018,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 			break;
+ 		}
+ 
++		run_flags = 0;
++
+ 		/* Note, VM-Exits that go down the "slow" path are accounted below. */
+ 		++vcpu->stat.exits;
+ 	}
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index cad16c163611b5..68359e1b92e20a 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -701,17 +701,13 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
+ {
+ 	struct bfq_data *bfqd = data->q->elevator->elevator_data;
+ 	struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
+-	int depth;
+-	unsigned limit = data->q->nr_requests;
+-	unsigned int act_idx;
++	unsigned int limit, act_idx;
+ 
+ 	/* Sync reads have full depth available */
+-	if (op_is_sync(opf) && !op_is_write(opf)) {
+-		depth = 0;
+-	} else {
+-		depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
+-		limit = (limit * depth) >> bfqd->full_depth_shift;
+-	}
++	if (op_is_sync(opf) && !op_is_write(opf))
++		limit = data->q->nr_requests;
++	else
++		limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
+ 
+ 	for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
+ 		/* Fast path to check if bfqq is already allocated. */
+@@ -725,14 +721,16 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
+ 		 * available requests and thus starve other entities.
+ 		 */
+ 		if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) {
+-			depth = 1;
++			limit = 1;
+ 			break;
+ 		}
+ 	}
++
+ 	bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
+-		__func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
+-	if (depth)
+-		data->shallow_depth = depth;
++		__func__, bfqd->wr_busy_queues, op_is_sync(opf), limit);
++
++	if (limit < data->q->nr_requests)
++		data->shallow_depth = limit;
+ }
+ 
+ static struct bfq_queue *
+@@ -7128,9 +7126,8 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
+  */
+ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
+ {
+-	unsigned int depth = 1U << bt->sb.shift;
++	unsigned int nr_requests = bfqd->queue->nr_requests;
+ 
+-	bfqd->full_depth_shift = bt->sb.shift;
+ 	/*
+ 	 * In-word depths if no bfq_queue is being weight-raised:
+ 	 * leaving 25% of tags only for sync reads.
+@@ -7142,13 +7139,13 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
+ 	 * limit 'something'.
+ 	 */
+ 	/* no more than 50% of tags for async I/O */
+-	bfqd->word_depths[0][0] = max(depth >> 1, 1U);
++	bfqd->async_depths[0][0] = max(nr_requests >> 1, 1U);
+ 	/*
+ 	 * no more than 75% of tags for sync writes (25% extra tags
+ 	 * w.r.t. async I/O, to prevent async I/O from starving sync
+ 	 * writes)
+ 	 */
+-	bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
++	bfqd->async_depths[0][1] = max((nr_requests * 3) >> 2, 1U);
+ 
+ 	/*
+ 	 * In-word depths in case some bfq_queue is being weight-
+@@ -7158,9 +7155,9 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
+ 	 * shortage.
+ 	 */
+ 	/* no more than ~18% of tags for async I/O */
+-	bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
++	bfqd->async_depths[1][0] = max((nr_requests * 3) >> 4, 1U);
+ 	/* no more than ~37% of tags for sync writes (~20% extra tags) */
+-	bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
++	bfqd->async_depths[1][1] = max((nr_requests * 6) >> 4, 1U);
+ }
+ 
+ static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
+diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
+index 687a3a7ba78478..31217f196f4f1b 100644
+--- a/block/bfq-iosched.h
++++ b/block/bfq-iosched.h
+@@ -813,8 +813,7 @@ struct bfq_data {
+ 	 * Depth limits used in bfq_limit_depth (see comments on the
+ 	 * function)
+ 	 */
+-	unsigned int word_depths[2][2];
+-	unsigned int full_depth_shift;
++	unsigned int async_depths[2][2];
+ 
+ 	/*
+ 	 * Number of independent actuators. This is equal to 1 in
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index a7765e96cf40e5..e1bca29dc358be 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3114,8 +3114,10 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+ 		goto queue_exit;
+ 
+-	if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs))
+-		goto queue_exit;
++	if (bio_needs_zone_write_plugging(bio)) {
++		if (blk_zone_plug_bio(bio, nr_segs))
++			goto queue_exit;
++	}
+ 
+ new_request:
+ 	if (!rq) {
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 22ce7fa4fe20a8..9ae3eee4b5ae5f 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -623,7 +623,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 	}
+ 
+ 	/* chunk_sectors a multiple of the physical block size? */
+-	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
++	if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
+ 		t->chunk_sectors = 0;
+ 		t->flags |= BLK_FLAG_MISALIGNED;
+ 		ret = -1;
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index d84946eb2f21e1..24c80078ca442e 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1131,25 +1131,7 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+ {
+ 	struct block_device *bdev = bio->bi_bdev;
+ 
+-	if (!bdev->bd_disk->zone_wplugs_hash)
+-		return false;
+-
+-	/*
+-	 * If the BIO already has the plugging flag set, then it was already
+-	 * handled through this path and this is a submission from the zone
+-	 * plug bio submit work.
+-	 */
+-	if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
+-		return false;
+-
+-	/*
+-	 * We do not need to do anything special for empty flush BIOs, e.g
+-	 * BIOs such as issued by blkdev_issue_flush(). The is because it is
+-	 * the responsibility of the user to first wait for the completion of
+-	 * write operations for flush to have any effect on the persistence of
+-	 * the written data.
+-	 */
+-	if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
++	if (WARN_ON_ONCE(!bdev->bd_disk->zone_wplugs_hash))
+ 		return false;
+ 
+ 	/*
+diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
+index 4155594aefc657..ccfefa6a366964 100644
+--- a/block/kyber-iosched.c
++++ b/block/kyber-iosched.c
+@@ -157,10 +157,7 @@ struct kyber_queue_data {
+ 	 */
+ 	struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
+ 
+-	/*
+-	 * Async request percentage, converted to per-word depth for
+-	 * sbitmap_get_shallow().
+-	 */
++	/* Number of allowed async requests. */
+ 	unsigned int async_depth;
+ 
+ 	struct kyber_cpu_latency __percpu *cpu_latency;
+@@ -454,10 +451,8 @@ static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
+ {
+ 	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
+ 	struct blk_mq_tags *tags = hctx->sched_tags;
+-	unsigned int shift = tags->bitmap_tags.sb.shift;
+-
+-	kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+ 
++	kqd->async_depth = hctx->queue->nr_requests * KYBER_ASYNC_PERCENT / 100U;
+ 	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
+ }
+ 
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 91b3789f710e7a..19473a9b504404 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -487,20 +487,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ 	return rq;
+ }
+ 
+-/*
+- * 'depth' is a number in the range 1..INT_MAX representing a number of
+- * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
+- * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
+- * Values larger than q->nr_requests have the same effect as q->nr_requests.
+- */
+-static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
+-{
+-	struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
+-	const unsigned int nrr = hctx->queue->nr_requests;
+-
+-	return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
+-}
+-
+ /*
+  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
+  * function is used by __blk_mq_get_tag().
+@@ -517,7 +503,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
+ 	 * Throttle asynchronous requests and writes such that these requests
+ 	 * do not block the allocation of synchronous requests.
+ 	 */
+-	data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
++	data->shallow_depth = dd->async_depth;
+ }
+ 
+ /* Called by blk_mq_update_nr_requests(). */
+diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
+index c24d4ff2b4a8b0..1266eb790708b8 100644
+--- a/crypto/jitterentropy-kcapi.c
++++ b/crypto/jitterentropy-kcapi.c
+@@ -144,7 +144,7 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
+ 	 * Inject the data from the previous loop into the pool. This data is
+ 	 * not considered to contain any entropy, but it stirs the pool a bit.
+ 	 */
+-	ret = crypto_shash_update(desc, intermediary, sizeof(intermediary));
++	ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary));
+ 	if (ret)
+ 		goto err;
+ 
+@@ -157,11 +157,12 @@ int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
+ 	 * conditioning operation to have an identical amount of input data
+ 	 * according to section 3.1.5.
+ 	 */
+-	if (!stuck) {
+-		ret = crypto_shash_update(hash_state_desc, (u8 *)&time,
+-					  sizeof(__u64));
++	if (stuck) {
++		time = 0;
+ 	}
+ 
++	ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64));
++
+ err:
+ 	shash_desc_zero(desc);
+ 	memzero_explicit(intermediary, sizeof(intermediary));
+diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
+index 3348ad12c23751..11c55fd76db58d 100644
+--- a/drivers/accel/habanalabs/common/memory.c
++++ b/drivers/accel/habanalabs/common/memory.c
+@@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
+ 	struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
+ 	struct hl_ctx *ctx;
+ 
+-	if (!hl_dmabuf)
+-		return;
+-
+ 	ctx = hl_dmabuf->ctx;
+ 
+ 	if (hl_dmabuf->memhash_hnode)
+@@ -1859,7 +1856,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
+ {
+ 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ 	struct hl_device *hdev = ctx->hdev;
+-	int rc, fd;
++	CLASS(get_unused_fd, fd)(flags);
++
++	if (fd < 0) {
++		dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
++		return fd;
++	}
+ 
+ 	exp_info.ops = &habanalabs_dmabuf_ops;
+ 	exp_info.size = total_size;
+@@ -1872,13 +1874,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
+ 		return PTR_ERR(hl_dmabuf->dmabuf);
+ 	}
+ 
+-	fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
+-	if (fd < 0) {
+-		dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
+-		rc = fd;
+-		goto err_dma_buf_put;
+-	}
+-
+ 	hl_dmabuf->ctx = ctx;
+ 	hl_ctx_get(hl_dmabuf->ctx);
+ 	atomic_inc(&ctx->hdev->dmabuf_export_cnt);
+@@ -1890,13 +1885,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
+ 	get_file(ctx->hpriv->file_priv->filp);
+ 
+ 	*dmabuf_fd = fd;
++	fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
+ 
+ 	return 0;
+-
+-err_dma_buf_put:
+-	hl_dmabuf->dmabuf->priv = NULL;
+-	dma_buf_put(hl_dmabuf->dmabuf);
+-	return rc;
+ }
+ 
+ static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 7cf6101cb4c731..2a99f5eb69629a 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -275,7 +275,7 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr,
+ 
+ static int acpi_processor_get_info(struct acpi_device *device)
+ {
+-	union acpi_object object = { 0 };
++	union acpi_object object = { .processor = { 0 } };
+ 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
+ 	struct acpi_processor *pr = acpi_driver_data(device);
+ 	int device_declaration = 0;
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 6cf40e8ac321e0..91f9267c07ea25 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -799,6 +799,17 @@ static bool ghes_do_proc(struct ghes *ghes,
+ 		}
+ 	}
+ 
++	/*
++	 * If no memory failure work is queued for abnormal synchronous
++	 * errors, do a force kill.
++	 */
++	if (sync && !queued) {
++		dev_err(ghes->dev,
++			HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n",
++			current->comm, task_pid_nr(current));
++		force_sig(SIGBUS);
++	}
++
+ 	return queued;
+ }
+ 
+@@ -985,6 +996,8 @@ static void __ghes_panic(struct ghes *ghes,
+ 
+ 	__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
+ 
++	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
++
+ 	ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
+ 
+ 	if (!panic_timeout)
+diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
+index e549914a636c66..be033bbb126a44 100644
+--- a/drivers/acpi/prmt.c
++++ b/drivers/acpi/prmt.c
+@@ -85,8 +85,6 @@ static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
+ 		}
+ 	}
+ 
+-	pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
+-
+ 	return 0;
+ }
+ 
+@@ -154,13 +152,37 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
+ 		guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+ 		th->handler_addr =
+ 			(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
++		/*
++		 * Print a warning message if handler_addr is zero which is not expected to
++		 * ever happen.
++		 */
++		if (unlikely(!th->handler_addr))
++			pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx",
++				&th->guid, handler_info->handler_address);
+ 
+ 		th->static_data_buffer_addr =
+ 			efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
++		/*
++		 * According to the PRM specification, static_data_buffer_address can be zero,
++		 * so avoid printing a warning message in that case.  Otherwise, if the
++		 * return value of efi_pa_va_lookup() is zero, print the message.
++		 */
++		if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address))
++			pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx",
++				&th->guid, handler_info->static_data_buffer_address);
+ 
+ 		th->acpi_param_buffer_addr =
+ 			efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
+ 
++		/*
++		 * According to the PRM specification, acpi_param_buffer_address can be zero,
++		 * so avoid printing a warning message in that case.  Otherwise, if the
++		 * return value of efi_pa_va_lookup() is zero, print the message.
++		 */
++		if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address))
++			pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx",
++				&th->guid, handler_info->acpi_param_buffer_address);
++
+ 	} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
+ 
+ 	return 0;
+diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
+index 4265814c74f81a..d81f30ce2341a0 100644
+--- a/drivers/acpi/processor_perflib.c
++++ b/drivers/acpi/processor_perflib.c
+@@ -174,6 +174,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
+ {
+ 	unsigned int cpu;
+ 
++	if (ignore_ppc == 1)
++		return;
++
+ 	for_each_cpu(cpu, policy->related_cpus) {
+ 		struct acpi_processor *pr = per_cpu(processors, cpu);
+ 		int ret;
+@@ -194,6 +197,14 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
+ 		if (ret < 0)
+ 			pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+ 			       cpu, ret);
++
++		if (!pr->performance)
++			continue;
++
++		ret = acpi_processor_get_platform_limit(pr);
++		if (ret)
++			pr_err("Failed to update freq constraint for CPU%d (%d)\n",
++			       cpu, ret);
+ 	}
+ }
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index a6a66d79476386..944e44caa26061 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1781,11 +1781,21 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
+ 		return;
+ 	}
+ 
++	/* If no Partial or no Slumber, we cannot support DIPM. */
++	if ((ap->host->flags & ATA_HOST_NO_PART) ||
++	    (ap->host->flags & ATA_HOST_NO_SSC)) {
++		ata_port_dbg(ap, "Host does not support DIPM\n");
++		ap->flags |= ATA_FLAG_NO_DIPM;
++	}
++
+ 	/* If no LPM states are supported by the HBA, do not bother with LPM */
+ 	if ((ap->host->flags & ATA_HOST_NO_PART) &&
+ 	    (ap->host->flags & ATA_HOST_NO_SSC) &&
+ 	    (ap->host->flags & ATA_HOST_NO_DEVSLP)) {
+-		ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
++		ata_port_dbg(ap,
++			"No LPM states supported, forcing LPM max_power\n");
++		ap->flags |= ATA_FLAG_NO_LPM;
++		ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ 		return;
+ 	}
+ 
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 093b940bc953f0..d3cda803ae06df 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -1089,6 +1089,7 @@ static struct ata_port_operations ich_pata_ops = {
+ };
+ 
+ static struct attribute *piix_sidpr_shost_attrs[] = {
++	&dev_attr_link_power_management_supported.attr,
+ 	&dev_attr_link_power_management_policy.attr,
+ 	NULL
+ };
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index a28ffe1e596918..7824e8836a54ef 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -111,6 +111,7 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
+ static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
+ 
+ static struct attribute *ahci_shost_attrs[] = {
++	&dev_attr_link_power_management_supported.attr,
+ 	&dev_attr_link_power_management_policy.attr,
+ 	&dev_attr_em_message_type.attr,
+ 	&dev_attr_em_message.attr,
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
+index a7442dc0bd8e10..cad3855373cb1a 100644
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -900,14 +900,52 @@ static const char *ata_lpm_policy_names[] = {
+ 	[ATA_LPM_MIN_POWER]		= "min_power",
+ };
+ 
++/*
++ * Check if a port supports link power management.
++ * Must be called with the port locked.
++ */
++static bool ata_scsi_lpm_supported(struct ata_port *ap)
++{
++	struct ata_link *link;
++	struct ata_device *dev;
++
++	if (ap->flags & ATA_FLAG_NO_LPM)
++		return false;
++
++	ata_for_each_link(link, ap, EDGE) {
++		ata_for_each_dev(dev, &ap->link, ENABLED) {
++			if (dev->quirks & ATA_QUIRK_NOLPM)
++				return false;
++		}
++	}
++
++	return true;
++}
++
++static ssize_t ata_scsi_lpm_supported_show(struct device *dev,
++				 struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(dev);
++	struct ata_port *ap = ata_shost_to_port(shost);
++	unsigned long flags;
++	bool supported;
++
++	spin_lock_irqsave(ap->lock, flags);
++	supported = ata_scsi_lpm_supported(ap);
++	spin_unlock_irqrestore(ap->lock, flags);
++
++	return sysfs_emit(buf, "%d\n", supported);
++}
++DEVICE_ATTR(link_power_management_supported, S_IRUGO,
++	    ata_scsi_lpm_supported_show, NULL);
++EXPORT_SYMBOL_GPL(dev_attr_link_power_management_supported);
++
+ static ssize_t ata_scsi_lpm_store(struct device *device,
+ 				  struct device_attribute *attr,
+ 				  const char *buf, size_t count)
+ {
+ 	struct Scsi_Host *shost = class_to_shost(device);
+ 	struct ata_port *ap = ata_shost_to_port(shost);
+-	struct ata_link *link;
+-	struct ata_device *dev;
+ 	enum ata_lpm_policy policy;
+ 	unsigned long flags;
+ 
+@@ -924,13 +962,9 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
+ 
+ 	spin_lock_irqsave(ap->lock, flags);
+ 
+-	ata_for_each_link(link, ap, EDGE) {
+-		ata_for_each_dev(dev, &ap->link, ENABLED) {
+-			if (dev->quirks & ATA_QUIRK_NOLPM) {
+-				count = -EOPNOTSUPP;
+-				goto out_unlock;
+-			}
+-		}
++	if (!ata_scsi_lpm_supported(ap)) {
++		count = -EOPNOTSUPP;
++		goto out_unlock;
+ 	}
+ 
+ 	ap->target_lpm_policy = policy;
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 99f25d6b2027ad..317505eab1266a 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1787,6 +1787,11 @@ void pm_runtime_reinit(struct device *dev)
+ 				pm_runtime_put(dev->parent);
+ 		}
+ 	}
++	/*
++	 * Clear power.needs_force_resume in case it has been set by
++	 * pm_runtime_force_suspend() invoked from a driver remove callback.
++	 */
++	dev->power.needs_force_resume = false;
+ }
+ 
+ /**
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 0c9f54197768d6..ac18d36b0ea84e 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -2500,7 +2500,11 @@ static int handle_write_conflicts(struct drbd_device *device,
+ 			peer_req->w.cb = superseded ? e_send_superseded :
+ 						   e_send_retry_write;
+ 			list_add_tail(&peer_req->w.list, &device->done_ee);
+-			queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
++			/* put is in drbd_send_acks_wf() */
++			kref_get(&device->kref);
++			if (!queue_work(connection->ack_sender,
++					&peer_req->peer_device->send_acks_work))
++				kref_put(&device->kref, drbd_destroy_device);
+ 
+ 			err = -ENOENT;
+ 			goto out;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 2f42d164461846..db9b5164cccaf3 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1404,16 +1404,33 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
+ 	return error;
+ }
+ 
+-static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
++static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode,
++			       struct block_device *bdev, unsigned long arg)
+ {
+ 	struct queue_limits lim;
+ 	int err = 0;
+ 
+-	if (lo->lo_state != Lo_bound)
+-		return -ENXIO;
++	/*
++	 * If we don't hold exclusive handle for the device, upgrade to it
++	 * here to avoid changing device under exclusive owner.
++	 */
++	if (!(mode & BLK_OPEN_EXCL)) {
++		err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL);
++		if (err)
++			return err;
++	}
++
++	err = mutex_lock_killable(&lo->lo_mutex);
++	if (err)
++		goto abort_claim;
++
++	if (lo->lo_state != Lo_bound) {
++		err = -ENXIO;
++		goto unlock;
++	}
+ 
+ 	if (lo->lo_queue->limits.logical_block_size == arg)
+-		return 0;
++		goto unlock;
+ 
+ 	sync_blockdev(lo->lo_device);
+ 	invalidate_bdev(lo->lo_device);
+@@ -1425,6 +1442,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+ 	loop_update_dio(lo);
+ 	blk_mq_unfreeze_queue(lo->lo_queue);
+ 
++unlock:
++	mutex_unlock(&lo->lo_mutex);
++abort_claim:
++	if (!(mode & BLK_OPEN_EXCL))
++		bd_abort_claiming(bdev, loop_set_block_size);
+ 	return err;
+ }
+ 
+@@ -1443,9 +1465,6 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
+ 	case LOOP_SET_DIRECT_IO:
+ 		err = loop_set_dio(lo, arg);
+ 		break;
+-	case LOOP_SET_BLOCK_SIZE:
+-		err = loop_set_block_size(lo, arg);
+-		break;
+ 	default:
+ 		err = -EINVAL;
+ 	}
+@@ -1500,9 +1519,12 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
+ 		break;
+ 	case LOOP_GET_STATUS64:
+ 		return loop_get_status64(lo, argp);
++	case LOOP_SET_BLOCK_SIZE:
++		if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
++			return -EPERM;
++		return loop_set_block_size(lo, mode, bdev, arg);
+ 	case LOOP_SET_CAPACITY:
+ 	case LOOP_SET_DIRECT_IO:
+-	case LOOP_SET_BLOCK_SIZE:
+ 		if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
+ 			return -EPERM;
+ 		fallthrough;
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index 2d38331ee66793..2b249703dc6859 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -957,8 +957,10 @@ static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
+ 	dev = device_find_child(vdev->dev.parent, &port_data,
+ 				vdc_device_probed);
+ 
+-	if (dev)
++	if (dev) {
++		put_device(dev);
+ 		return true;
++	}
+ 
+ 	return false;
+ }
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 1d2e85b418204e..d7aaaeb4fe326e 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -702,6 +702,8 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe14e), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 6a4a8ecd0edd02..09405668ebb378 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -4617,10 +4617,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
+ 		 * The NetFN and Command in the response is not even
+ 		 * marginally correct.
+ 		 */
+-		dev_warn(intf->si_dev,
+-			 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+-			 (msg->data[0] >> 2) | 1, msg->data[1],
+-			 msg->rsp[0] >> 2, msg->rsp[1]);
++		dev_warn_ratelimited(intf->si_dev,
++				     "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
++				     (msg->data[0] >> 2) | 1, msg->data[1],
++				     msg->rsp[0] >> 2, msg->rsp[1]);
+ 
+ 		goto return_unspecified;
+ 	}
+diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
+index 335eea80054eef..37ab5806b1a4a9 100644
+--- a/drivers/char/ipmi/ipmi_watchdog.c
++++ b/drivers/char/ipmi/ipmi_watchdog.c
+@@ -1189,14 +1189,8 @@ static struct ipmi_smi_watcher smi_watcher = {
+ 	.smi_gone = ipmi_smi_gone
+ };
+ 
+-static int action_op(const char *inval, char *outval)
++static int action_op_set_val(const char *inval)
+ {
+-	if (outval)
+-		strcpy(outval, action);
+-
+-	if (!inval)
+-		return 0;
+-
+ 	if (strcmp(inval, "reset") == 0)
+ 		action_val = WDOG_TIMEOUT_RESET;
+ 	else if (strcmp(inval, "none") == 0)
+@@ -1207,18 +1201,26 @@ static int action_op(const char *inval, char *outval)
+ 		action_val = WDOG_TIMEOUT_POWER_DOWN;
+ 	else
+ 		return -EINVAL;
+-	strcpy(action, inval);
+ 	return 0;
+ }
+ 
+-static int preaction_op(const char *inval, char *outval)
++static int action_op(const char *inval, char *outval)
+ {
++	int rv;
++
+ 	if (outval)
+-		strcpy(outval, preaction);
++		strcpy(outval, action);
+ 
+ 	if (!inval)
+ 		return 0;
++	rv = action_op_set_val(inval);
++	if (!rv)
++		strcpy(action, inval);
++	return rv;
++}
+ 
++static int preaction_op_set_val(const char *inval)
++{
+ 	if (strcmp(inval, "pre_none") == 0)
+ 		preaction_val = WDOG_PRETIMEOUT_NONE;
+ 	else if (strcmp(inval, "pre_smi") == 0)
+@@ -1231,18 +1233,26 @@ static int preaction_op(const char *inval, char *outval)
+ 		preaction_val = WDOG_PRETIMEOUT_MSG_INT;
+ 	else
+ 		return -EINVAL;
+-	strcpy(preaction, inval);
+ 	return 0;
+ }
+ 
+-static int preop_op(const char *inval, char *outval)
++static int preaction_op(const char *inval, char *outval)
+ {
++	int rv;
++
+ 	if (outval)
+-		strcpy(outval, preop);
++		strcpy(outval, preaction);
+ 
+ 	if (!inval)
+ 		return 0;
++	rv = preaction_op_set_val(inval);
++	if (!rv)
++		strcpy(preaction, inval);
++	return 0;
++}
+ 
++static int preop_op_set_val(const char *inval)
++{
+ 	if (strcmp(inval, "preop_none") == 0)
+ 		preop_val = WDOG_PREOP_NONE;
+ 	else if (strcmp(inval, "preop_panic") == 0)
+@@ -1251,7 +1261,22 @@ static int preop_op(const char *inval, char *outval)
+ 		preop_val = WDOG_PREOP_GIVE_DATA;
+ 	else
+ 		return -EINVAL;
+-	strcpy(preop, inval);
++	return 0;
++}
++
++static int preop_op(const char *inval, char *outval)
++{
++	int rv;
++
++	if (outval)
++		strcpy(outval, preop);
++
++	if (!inval)
++		return 0;
++
++	rv = preop_op_set_val(inval);
++	if (!rv)
++		strcpy(preop, inval);
+ 	return 0;
+ }
+ 
+@@ -1288,18 +1313,18 @@ static int __init ipmi_wdog_init(void)
+ {
+ 	int rv;
+ 
+-	if (action_op(action, NULL)) {
++	if (action_op_set_val(action)) {
+ 		action_op("reset", NULL);
+ 		pr_info("Unknown action '%s', defaulting to reset\n", action);
+ 	}
+ 
+-	if (preaction_op(preaction, NULL)) {
++	if (preaction_op_set_val(preaction)) {
+ 		preaction_op("pre_none", NULL);
+ 		pr_info("Unknown preaction '%s', defaulting to none\n",
+ 			preaction);
+ 	}
+ 
+-	if (preop_op(preop, NULL)) {
++	if (preop_op_set_val(preop)) {
+ 		preop_op("preop_none", NULL);
+ 		pr_info("Unknown preop '%s', defaulting to none\n", preop);
+ 	}
+diff --git a/drivers/char/misc.c b/drivers/char/misc.c
+index dda466f9181acf..30178e20d962d4 100644
+--- a/drivers/char/misc.c
++++ b/drivers/char/misc.c
+@@ -314,8 +314,8 @@ static int __init misc_init(void)
+ 	if (err)
+ 		goto fail_remove;
+ 
+-	err = -EIO;
+-	if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
++	err = __register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops);
++	if (err < 0)
+ 		goto fail_printk;
+ 	return 0;
+ 
+diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
+index 70f5dcb96700f5..24eb4c40da6346 100644
+--- a/drivers/clk/qcom/gcc-ipq5018.c
++++ b/drivers/clk/qcom/gcc-ipq5018.c
+@@ -1371,7 +1371,7 @@ static struct clk_branch gcc_xo_clk = {
+ 				&gcc_xo_clk_src.clkr.hw,
+ 			},
+ 			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
++			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index 7258ba5c09001e..1329ea28d70313 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -1895,10 +1895,10 @@ static const struct freq_conf ftbl_nss_port6_tx_clk_src_125[] = {
+ static const struct freq_multi_tbl ftbl_nss_port6_tx_clk_src[] = {
+ 	FMS(19200000, P_XO, 1, 0, 0),
+ 	FM(25000000, ftbl_nss_port6_tx_clk_src_25),
+-	FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
++	FMS(78125000, P_UNIPHY2_TX, 4, 0, 0),
+ 	FM(125000000, ftbl_nss_port6_tx_clk_src_125),
+-	FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+-	FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
++	FMS(156250000, P_UNIPHY2_TX, 2, 0, 0),
++	FMS(312500000, P_UNIPHY2_TX, 1, 0, 0),
+ 	{ }
+ };
+ 
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 97d42328fa81ac..e2ecc9d36e0512 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -1398,10 +1398,6 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
+ 		goto fail;
+ 	}
+ 
+-	clk = clock->hw.clk;
+-	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
+-	priv->clks[id] = clk;
+-
+ 	if (mod->is_coupled) {
+ 		struct mstp_clock *sibling;
+ 
+@@ -1413,6 +1409,10 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
+ 		}
+ 	}
+ 
++	clk = clock->hw.clk;
++	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
++	priv->clks[id] = clk;
++
+ 	return;
+ 
+ fail:
+diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
+index e00e213b1201c8..e2a63ea7d4cbaf 100644
+--- a/drivers/clk/samsung/clk-exynos850.c
++++ b/drivers/clk/samsung/clk-exynos850.c
+@@ -1360,7 +1360,7 @@ static const unsigned long cpucl1_clk_regs[] __initconst = {
+ 	CLK_CON_GAT_GATE_CLK_CPUCL1_CPU,
+ };
+ 
+-/* List of parent clocks for Muxes in CMU_CPUCL0 */
++/* List of parent clocks for Muxes in CMU_CPUCL1 */
+ PNAME(mout_pll_cpucl1_p)		 = { "oscclk", "fout_cpucl1_pll" };
+ PNAME(mout_cpucl1_switch_user_p)	 = { "oscclk", "dout_cpucl1_switch" };
+ PNAME(mout_cpucl1_dbg_user_p)		 = { "oscclk", "dout_cpucl1_dbg" };
+diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
+index 4d4363bc8b28db..fa628fab28ac4e 100644
+--- a/drivers/clk/samsung/clk-gs101.c
++++ b/drivers/clk/samsung/clk-gs101.c
+@@ -1154,7 +1154,7 @@ static const struct samsung_div_clock cmu_top_div_clks[] __initconst = {
+ 	    CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
+ 	DIV(CLK_DOUT_CMU_G3AA_G3AA, "dout_cmu_g3aa_g3aa", "gout_cmu_g3aa_g3aa",
+ 	    CLK_CON_DIV_CLKCMU_G3AA_G3AA, 0, 4),
+-	DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
++	DIV(CLK_DOUT_CMU_G3D_BUSD, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
+ 	    CLK_CON_DIV_CLKCMU_G3D_BUSD, 0, 4),
+ 	DIV(CLK_DOUT_CMU_G3D_GLB, "dout_cmu_g3d_glb", "gout_cmu_g3d_glb",
+ 	    CLK_CON_DIV_CLKCMU_G3D_GLB, 0, 4),
+@@ -2129,7 +2129,7 @@ PNAME(mout_hsi0_usbdpdbg_user_p)	= { "oscclk",
+ 					    "dout_cmu_hsi0_usbdpdbg" };
+ PNAME(mout_hsi0_bus_p)			= { "mout_hsi0_bus_user",
+ 					    "mout_hsi0_alt_user" };
+-PNAME(mout_hsi0_usb20_ref_p)		= { "fout_usb_pll",
++PNAME(mout_hsi0_usb20_ref_p)		= { "mout_pll_usb",
+ 					    "mout_hsi0_tcxo_user" };
+ PNAME(mout_hsi0_usb31drd_p)		= { "fout_usb_pll",
+ 					    "mout_hsi0_usb31drd_user",
+diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
+index 0626650a7011cc..c9fc52a36fce9c 100644
+--- a/drivers/clk/tegra/clk-periph.c
++++ b/drivers/clk/tegra/clk-periph.c
+@@ -51,7 +51,7 @@ static int clk_periph_determine_rate(struct clk_hw *hw,
+ 	struct tegra_clk_periph *periph = to_clk_periph(hw);
+ 	const struct clk_ops *div_ops = periph->div_ops;
+ 	struct clk_hw *div_hw = &periph->divider.hw;
+-	unsigned long rate;
++	long rate;
+ 
+ 	__clk_hw_set_clk(div_hw, hw);
+ 
+@@ -59,7 +59,7 @@ static int clk_periph_determine_rate(struct clk_hw *hw,
+ 	if (rate < 0)
+ 		return rate;
+ 
+-	req->rate = rate;
++	req->rate = (unsigned long)rate;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
+index 6ab89245af1217..c8ebacc6934ab6 100644
+--- a/drivers/clk/thead/clk-th1520-ap.c
++++ b/drivers/clk/thead/clk-th1520-ap.c
+@@ -799,11 +799,12 @@ static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_ac
+ 		0x134, BIT(8), 0);
+ static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_pd,
+ 		0x134, BIT(7), 0);
+-static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
++static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd,
++		0x138, BIT(8), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
+ 		0x140, BIT(9), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
+-		0x150, BIT(9), 0);
++		0x150, BIT(9), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
+ 		0x150, BIT(10), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
+diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
+index 07bc81a706b4d3..bd8a44ea62d2d0 100644
+--- a/drivers/comedi/comedi_fops.c
++++ b/drivers/comedi/comedi_fops.c
+@@ -787,6 +787,7 @@ static int is_device_busy(struct comedi_device *dev)
+ 	struct comedi_subdevice *s;
+ 	int i;
+ 
++	lockdep_assert_held_write(&dev->attach_lock);
+ 	lockdep_assert_held(&dev->mutex);
+ 	if (!dev->attached)
+ 		return 0;
+@@ -795,7 +796,16 @@ static int is_device_busy(struct comedi_device *dev)
+ 		s = &dev->subdevices[i];
+ 		if (s->busy)
+ 			return 1;
+-		if (s->async && comedi_buf_is_mmapped(s))
++		if (!s->async)
++			continue;
++		if (comedi_buf_is_mmapped(s))
++			return 1;
++		/*
++		 * There may be tasks still waiting on the subdevice's wait
++		 * queue, although they should already be about to be removed
++		 * from it since the subdevice has no active async command.
++		 */
++		if (wq_has_sleeper(&s->async->wait_head))
+ 			return 1;
+ 	}
+ 
+@@ -825,15 +835,22 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
+ 		return -EPERM;
+ 
+ 	if (!arg) {
+-		if (is_device_busy(dev))
+-			return -EBUSY;
++		int rc = 0;
++
+ 		if (dev->attached) {
+-			struct module *driver_module = dev->driver->module;
++			down_write(&dev->attach_lock);
++			if (is_device_busy(dev)) {
++				rc = -EBUSY;
++			} else {
++				struct module *driver_module =
++					dev->driver->module;
+ 
+-			comedi_device_detach(dev);
+-			module_put(driver_module);
++				comedi_device_detach_locked(dev);
++				module_put(driver_module);
++			}
++			up_write(&dev->attach_lock);
+ 		}
+-		return 0;
++		return rc;
+ 	}
+ 
+ 	if (copy_from_user(&it, arg, sizeof(it)))
+diff --git a/drivers/comedi/comedi_internal.h b/drivers/comedi/comedi_internal.h
+index 9b3631a654c895..cf10ba016ebc81 100644
+--- a/drivers/comedi/comedi_internal.h
++++ b/drivers/comedi/comedi_internal.h
+@@ -50,6 +50,7 @@ extern struct mutex comedi_drivers_list_lock;
+ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
+ 	       struct comedi_insn *insn, unsigned int *data);
+ 
++void comedi_device_detach_locked(struct comedi_device *dev);
+ void comedi_device_detach(struct comedi_device *dev);
+ int comedi_device_attach(struct comedi_device *dev,
+ 			 struct comedi_devconfig *it);
+diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
+index 9e4b7c840a8f5a..f1dc854928c176 100644
+--- a/drivers/comedi/drivers.c
++++ b/drivers/comedi/drivers.c
+@@ -158,7 +158,7 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
+ 	int i;
+ 	struct comedi_subdevice *s;
+ 
+-	lockdep_assert_held(&dev->attach_lock);
++	lockdep_assert_held_write(&dev->attach_lock);
+ 	lockdep_assert_held(&dev->mutex);
+ 	if (dev->subdevices) {
+ 		for (i = 0; i < dev->n_subdevices; i++) {
+@@ -196,16 +196,23 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
+ 	comedi_clear_hw_dev(dev);
+ }
+ 
+-void comedi_device_detach(struct comedi_device *dev)
++void comedi_device_detach_locked(struct comedi_device *dev)
+ {
++	lockdep_assert_held_write(&dev->attach_lock);
+ 	lockdep_assert_held(&dev->mutex);
+ 	comedi_device_cancel_all(dev);
+-	down_write(&dev->attach_lock);
+ 	dev->attached = false;
+ 	dev->detach_count++;
+ 	if (dev->driver)
+ 		dev->driver->detach(dev);
+ 	comedi_device_detach_cleanup(dev);
++}
++
++void comedi_device_detach(struct comedi_device *dev)
++{
++	lockdep_assert_held(&dev->mutex);
++	down_write(&dev->attach_lock);
++	comedi_device_detach_locked(dev);
+ 	up_write(&dev->attach_lock);
+ }
+ 
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 36ea181260c7ee..8d5279c21e6cfe 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -841,7 +841,7 @@ static struct freq_attr *cppc_cpufreq_attr[] = {
+ };
+ 
+ static struct cpufreq_driver cppc_cpufreq_driver = {
+-	.flags = CPUFREQ_CONST_LOOPS,
++	.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
+ 	.verify = cppc_verify_policy,
+ 	.target = cppc_cpufreq_set_target,
+ 	.get = cppc_cpufreq_get_rate,
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index fab94ffcb22ce8..bd55c235630350 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2724,10 +2724,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ 	pr_debug("starting governor %s failed\n", policy->governor->name);
+ 	if (old_gov) {
+ 		policy->governor = old_gov;
+-		if (cpufreq_init_governor(policy))
++		if (cpufreq_init_governor(policy)) {
+ 			policy->governor = NULL;
+-		else
+-			cpufreq_start_governor(policy);
++		} else if (cpufreq_start_governor(policy)) {
++			cpufreq_exit_governor(policy);
++			policy->governor = NULL;
++		}
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index b86372aa341dae..e9087109203895 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2654,6 +2654,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ 	X86_MATCH(INTEL_TIGERLAKE,		core_funcs),
+ 	X86_MATCH(INTEL_SAPPHIRERAPIDS_X,	core_funcs),
+ 	X86_MATCH(INTEL_EMERALDRAPIDS_X,	core_funcs),
++	X86_MATCH(INTEL_GRANITERAPIDS_D,	core_funcs),
++	X86_MATCH(INTEL_GRANITERAPIDS_X,	core_funcs),
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
+index 97ffadc7e57a64..01322a9054143b 100644
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -153,6 +153,14 @@ static inline int performance_multiplier(unsigned int nr_iowaiters)
+ 
+ static DEFINE_PER_CPU(struct menu_device, menu_devices);
+ 
++static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
++{
++	/* Update the repeating-pattern data. */
++	data->intervals[data->interval_ptr++] = interval_us;
++	if (data->interval_ptr >= INTERVALS)
++		data->interval_ptr = 0;
++}
++
+ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
+ 
+ /*
+@@ -277,6 +285,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 	if (data->needs_update) {
+ 		menu_update(drv, dev);
+ 		data->needs_update = 0;
++	} else if (!dev->last_residency_ns) {
++		/*
++		 * This happens when the driver rejects the previously selected
++		 * idle state and returns an error, so update the recent
++		 * intervals table to prevent invalid information from being
++		 * used going forward.
++		 */
++		menu_update_intervals(data, UINT_MAX);
+ 	}
+ 
+ 	nr_iowaiters = nr_iowait_cpu(dev->cpu);
+@@ -546,10 +562,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+ 
+ 	data->correction_factor[data->bucket] = new_factor;
+ 
+-	/* update the repeating-pattern data */
+-	data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
+-	if (data->interval_ptr >= INTERVALS)
+-		data->interval_ptr = 0;
++	menu_update_intervals(data, ktime_to_us(measured_ns));
+ }
+ 
+ /**
+diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
+index 2ebc878da16095..224edaaa737b6c 100644
+--- a/drivers/crypto/ccp/sp-pci.c
++++ b/drivers/crypto/ccp/sp-pci.c
+@@ -451,6 +451,7 @@ static const struct psp_vdata pspv6 = {
+ 	.cmdresp_reg		= 0x10944,	/* C2PMSG_17 */
+ 	.cmdbuff_addr_lo_reg	= 0x10948,	/* C2PMSG_18 */
+ 	.cmdbuff_addr_hi_reg	= 0x1094c,	/* C2PMSG_19 */
++	.bootloader_info_reg	= 0x109ec,	/* C2PMSG_59 */
+ 	.feature_reg            = 0x109fc,	/* C2PMSG_63 */
+ 	.inten_reg              = 0x10510,	/* P2CMSG_INTEN */
+ 	.intsts_reg             = 0x10514,	/* P2CMSG_INTSTS */
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+index c167dbd6c7d623..e71f1e4597640e 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+@@ -1487,11 +1487,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
+ 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ 
++	/* Do unmap before data processing */
++	hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
++
+ 	p = sg_virt(areq->dst);
+ 	memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
+ 	memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
+ 
+-	hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ 	kpp_request_complete(areq, ret);
+ 
+ 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+@@ -1801,9 +1803,11 @@ static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
+ 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ 
++	/* Do unmap before data processing */
++	hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
++
+ 	hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
+ 
+-	hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ 	kpp_request_complete(areq, ret);
+ 
+ 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+index 5c94846461725d..357a7c6ac83713 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+@@ -1493,6 +1493,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+ 	dma_addr_t rptr_baddr;
+ 	struct pci_dev *pdev;
+ 	u32 len, compl_rlen;
++	int timeout = 10000;
+ 	int ret, etype;
+ 	void *rptr;
+ 
+@@ -1555,16 +1556,27 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+ 							 etype);
+ 		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ 		lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
++		timeout = 10000;
+ 
+ 		while (lfs->ops->cpt_get_compcode(result) ==
+-						OTX2_CPT_COMPLETION_CODE_INIT)
++						OTX2_CPT_COMPLETION_CODE_INIT) {
+ 			cpu_relax();
++			udelay(1);
++			timeout--;
++			if (!timeout) {
++				ret = -ENODEV;
++				cptpf->is_eng_caps_discovered = false;
++				dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n");
++				goto error_no_response;
++			}
++		}
+ 
+ 		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
+ 	}
+-	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+ 	cptpf->is_eng_caps_discovered = true;
+ 
++error_no_response:
++	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+ free_result:
+ 	kfree(result);
+ lf_cleanup:
+diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
+index d1aa6806b683ac..175de0c0b50e08 100644
+--- a/drivers/devfreq/governor_userspace.c
++++ b/drivers/devfreq/governor_userspace.c
+@@ -9,6 +9,7 @@
+ #include <linux/slab.h>
+ #include <linux/device.h>
+ #include <linux/devfreq.h>
++#include <linux/kstrtox.h>
+ #include <linux/pm.h>
+ #include <linux/mutex.h>
+ #include <linux/module.h>
+@@ -39,10 +40,13 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr,
+ 	unsigned long wanted;
+ 	int err = 0;
+ 
++	err = kstrtoul(buf, 0, &wanted);
++	if (err)
++		return err;
++
+ 	mutex_lock(&devfreq->lock);
+ 	data = devfreq->governor_data;
+ 
+-	sscanf(buf, "%lu", &wanted);
+ 	data->user_frequency = wanted;
+ 	data->valid = true;
+ 	err = update_devfreq(devfreq);
+diff --git a/drivers/dma/stm32/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c
+index 917f8e9223739a..0e39f99bce8be8 100644
+--- a/drivers/dma/stm32/stm32-dma.c
++++ b/drivers/dma/stm32/stm32-dma.c
+@@ -744,7 +744,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
+ 		/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
+ 		if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
+ 			stm32_dma_post_resume_reconfigure(chan);
+-		else if (scr & STM32_DMA_SCR_DBM)
++		else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2)
+ 			stm32_dma_configure_next_sg(chan);
+ 	} else {
+ 		chan->busy = false;
+diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
+index d7416166fd8a42..ec3ed5e2b2d7ef 100644
+--- a/drivers/edac/synopsys_edac.c
++++ b/drivers/edac/synopsys_edac.c
+@@ -332,20 +332,26 @@ struct synps_edac_priv {
+ #endif
+ };
+ 
++enum synps_platform_type {
++	ZYNQ,
++	ZYNQMP,
++	SYNPS,
++};
++
+ /**
+  * struct synps_platform_data -  synps platform data structure.
++ * @platform:		Identifies the target hardware platform
+  * @get_error_info:	Get EDAC error info.
+  * @get_mtype:		Get mtype.
+  * @get_dtype:		Get dtype.
+- * @get_ecc_state:	Get ECC state.
+  * @get_mem_info:	Get EDAC memory info
+  * @quirks:		To differentiate IPs.
+  */
+ struct synps_platform_data {
++	enum synps_platform_type platform;
+ 	int (*get_error_info)(struct synps_edac_priv *priv);
+ 	enum mem_type (*get_mtype)(const void __iomem *base);
+ 	enum dev_type (*get_dtype)(const void __iomem *base);
+-	bool (*get_ecc_state)(void __iomem *base);
+ #ifdef CONFIG_EDAC_DEBUG
+ 	u64 (*get_mem_info)(struct synps_edac_priv *priv);
+ #endif
+@@ -720,51 +726,38 @@ static enum dev_type zynqmp_get_dtype(const void __iomem *base)
+ 	return dt;
+ }
+ 
+-/**
+- * zynq_get_ecc_state - Return the controller ECC enable/disable status.
+- * @base:	DDR memory controller base address.
+- *
+- * Get the ECC enable/disable status of the controller.
+- *
+- * Return: true if enabled, otherwise false.
+- */
+-static bool zynq_get_ecc_state(void __iomem *base)
++static bool get_ecc_state(struct synps_edac_priv *priv)
+ {
++	u32 ecctype, clearval;
+ 	enum dev_type dt;
+-	u32 ecctype;
+-
+-	dt = zynq_get_dtype(base);
+-	if (dt == DEV_UNKNOWN)
+-		return false;
+ 
+-	ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
+-	if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
+-		return true;
+-
+-	return false;
+-}
+-
+-/**
+- * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
+- * @base:	DDR memory controller base address.
+- *
+- * Get the ECC enable/disable status for the controller.
+- *
+- * Return: a ECC status boolean i.e true/false - enabled/disabled.
+- */
+-static bool zynqmp_get_ecc_state(void __iomem *base)
+-{
+-	enum dev_type dt;
+-	u32 ecctype;
+-
+-	dt = zynqmp_get_dtype(base);
+-	if (dt == DEV_UNKNOWN)
+-		return false;
+-
+-	ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
+-	if ((ecctype == SCRUB_MODE_SECDED) &&
+-	    ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
+-		return true;
++	if (priv->p_data->platform == ZYNQ) {
++		dt = zynq_get_dtype(priv->baseaddr);
++		if (dt == DEV_UNKNOWN)
++			return false;
++
++		ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK;
++		if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) {
++			clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR;
++			writel(clearval, priv->baseaddr + ECC_CTRL_OFST);
++			writel(0x0, priv->baseaddr + ECC_CTRL_OFST);
++			return true;
++		}
++	} else {
++		dt = zynqmp_get_dtype(priv->baseaddr);
++		if (dt == DEV_UNKNOWN)
++			return false;
++
++		ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
++		if (ecctype == SCRUB_MODE_SECDED &&
++		    (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) {
++			clearval = readl(priv->baseaddr + ECC_CLR_OFST) |
++			ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
++			ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
++			writel(clearval, priv->baseaddr + ECC_CLR_OFST);
++			return true;
++		}
++	}
+ 
+ 	return false;
+ }
+@@ -934,18 +927,18 @@ static int setup_irq(struct mem_ctl_info *mci,
+ }
+ 
+ static const struct synps_platform_data zynq_edac_def = {
++	.platform = ZYNQ,
+ 	.get_error_info	= zynq_get_error_info,
+ 	.get_mtype	= zynq_get_mtype,
+ 	.get_dtype	= zynq_get_dtype,
+-	.get_ecc_state	= zynq_get_ecc_state,
+ 	.quirks		= 0,
+ };
+ 
+ static const struct synps_platform_data zynqmp_edac_def = {
++	.platform = ZYNQMP,
+ 	.get_error_info	= zynqmp_get_error_info,
+ 	.get_mtype	= zynqmp_get_mtype,
+ 	.get_dtype	= zynqmp_get_dtype,
+-	.get_ecc_state	= zynqmp_get_ecc_state,
+ #ifdef CONFIG_EDAC_DEBUG
+ 	.get_mem_info	= zynqmp_get_mem_info,
+ #endif
+@@ -957,10 +950,10 @@ static const struct synps_platform_data zynqmp_edac_def = {
+ };
+ 
+ static const struct synps_platform_data synopsys_edac_def = {
++	.platform = SYNPS,
+ 	.get_error_info	= zynqmp_get_error_info,
+ 	.get_mtype	= zynqmp_get_mtype,
+ 	.get_dtype	= zynqmp_get_dtype,
+-	.get_ecc_state	= zynqmp_get_ecc_state,
+ 	.quirks         = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
+ #ifdef CONFIG_EDAC_DEBUG
+ 			  | DDR_ECC_DATA_POISON_SUPPORT
+@@ -1390,10 +1383,6 @@ static int mc_probe(struct platform_device *pdev)
+ 	if (!p_data)
+ 		return -ENODEV;
+ 
+-	if (!p_data->get_ecc_state(baseaddr)) {
+-		edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
+-		return -ENXIO;
+-	}
+ 
+ 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ 	layers[0].size = SYNPS_EDAC_NR_CSROWS;
+@@ -1413,6 +1402,12 @@ static int mc_probe(struct platform_device *pdev)
+ 	priv = mci->pvt_info;
+ 	priv->baseaddr = baseaddr;
+ 	priv->p_data = p_data;
++	if (!get_ecc_state(priv)) {
++		edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
++		rc = -ENODEV;
++		goto free_edac_mc;
++	}
++
+ 	spin_lock_init(&priv->reglock);
+ 
+ 	mc_init(mci, pdev);
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 83dad9c2da0641..9fdfccbc6479a8 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -1815,7 +1815,7 @@ static int __init ffa_init(void)
+ 	kfree(drv_info);
+ 	return ret;
+ }
+-module_init(ffa_init);
++rootfs_initcall(ffa_init);
+ 
+ static void __exit ffa_exit(void)
+ {
+diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c
+index 21f467a9294288..955736336061d2 100644
+--- a/drivers/firmware/arm_scmi/scmi_power_control.c
++++ b/drivers/firmware/arm_scmi/scmi_power_control.c
+@@ -46,6 +46,7 @@
+ #include <linux/math.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/pm.h>
+ #include <linux/printk.h>
+ #include <linux/reboot.h>
+ #include <linux/scmi_protocol.h>
+@@ -324,12 +325,7 @@ static int scmi_userspace_notifier(struct notifier_block *nb,
+ 
+ static void scmi_suspend_work_func(struct work_struct *work)
+ {
+-	struct scmi_syspower_conf *sc =
+-		container_of(work, struct scmi_syspower_conf, suspend_work);
+-
+ 	pm_suspend(PM_SUSPEND_MEM);
+-
+-	sc->state = SCMI_SYSPOWER_IDLE;
+ }
+ 
+ static int scmi_syspower_probe(struct scmi_device *sdev)
+@@ -354,6 +350,7 @@ static int scmi_syspower_probe(struct scmi_device *sdev)
+ 	sc->required_transition = SCMI_SYSTEM_MAX;
+ 	sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
+ 	sc->dev = &sdev->dev;
++	dev_set_drvdata(&sdev->dev, sc);
+ 
+ 	INIT_WORK(&sc->suspend_work, scmi_suspend_work_func);
+ 
+@@ -363,6 +360,18 @@ static int scmi_syspower_probe(struct scmi_device *sdev)
+ 						       NULL, &sc->userspace_nb);
+ }
+ 
++static int scmi_system_power_resume(struct device *dev)
++{
++	struct scmi_syspower_conf *sc = dev_get_drvdata(dev);
++
++	sc->state = SCMI_SYSPOWER_IDLE;
++	return 0;
++}
++
++static const struct dev_pm_ops scmi_system_power_pmops = {
++	SYSTEM_SLEEP_PM_OPS(NULL, scmi_system_power_resume)
++};
++
+ static const struct scmi_device_id scmi_id_table[] = {
+ 	{ SCMI_PROTOCOL_SYSTEM, "syspower" },
+ 	{ },
+@@ -370,6 +379,9 @@ static const struct scmi_device_id scmi_id_table[] = {
+ MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+ 
+ static struct scmi_driver scmi_system_power_driver = {
++	.driver	= {
++		.pm = pm_sleep_ptr(&scmi_system_power_pmops),
++	},
+ 	.name = "scmi-system-power",
+ 	.probe = scmi_syspower_probe,
+ 	.id_table = scmi_id_table,
+diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
+index cde1ab8bd9d1cb..91f2320c0d0f89 100644
+--- a/drivers/firmware/tegra/Kconfig
++++ b/drivers/firmware/tegra/Kconfig
+@@ -2,7 +2,7 @@
+ menu "Tegra firmware driver"
+ 
+ config TEGRA_IVC
+-	bool "Tegra IVC protocol"
++	bool "Tegra IVC protocol" if COMPILE_TEST
+ 	depends on ARCH_TEGRA
+ 	help
+ 	  IVC (Inter-VM Communication) protocol is part of the IPC
+@@ -13,8 +13,9 @@ config TEGRA_IVC
+ 
+ config TEGRA_BPMP
+ 	bool "Tegra BPMP driver"
+-	depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
++	depends on ARCH_TEGRA && TEGRA_HSP_MBOX
+ 	depends on !CPU_BIG_ENDIAN
++	select TEGRA_IVC
+ 	help
+ 	  BPMP (Boot and Power Management Processor) is designed to off-loading
+ 	  the PM functions which include clock/DVFS/thermal/power from the CPU.
+diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
+index 6abe01bc39c3e1..c03945af8538e3 100644
+--- a/drivers/gpio/gpio-mlxbf2.c
++++ b/drivers/gpio/gpio-mlxbf2.c
+@@ -397,7 +397,7 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
+ 	gc->ngpio = npins;
+ 	gc->owner = THIS_MODULE;
+ 
+-	irq = platform_get_irq(pdev, 0);
++	irq = platform_get_irq_optional(pdev, 0);
+ 	if (irq >= 0) {
+ 		girq = &gs->gc.irq;
+ 		gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
+diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
+index 9875e34bde72a4..ed29b07d16c190 100644
+--- a/drivers/gpio/gpio-mlxbf3.c
++++ b/drivers/gpio/gpio-mlxbf3.c
+@@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
+ 	struct mlxbf3_gpio_context *gs;
+ 	struct gpio_irq_chip *girq;
+ 	struct gpio_chip *gc;
+-	char *colon_ptr;
+ 	int ret, irq;
+-	long num;
+ 
+ 	gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
+ 	if (!gs)
+@@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
+ 	gc->owner = THIS_MODULE;
+ 	gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
+ 
+-	colon_ptr = strchr(dev_name(dev), ':');
+-	if (!colon_ptr) {
+-		dev_err(dev, "invalid device name format\n");
+-		return -EINVAL;
+-	}
+-
+-	ret = kstrtol(++colon_ptr, 16, &num);
+-	if (ret) {
+-		dev_err(dev, "invalid device instance\n");
+-		return ret;
+-	}
+-
+-	if (!num) {
+-		irq = platform_get_irq(pdev, 0);
+-		if (irq >= 0) {
+-			girq = &gs->gc.irq;
+-			gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
+-			girq->default_type = IRQ_TYPE_NONE;
+-			/* This will let us handle the parent IRQ in the driver */
+-			girq->num_parents = 0;
+-			girq->parents = NULL;
+-			girq->parent_handler = NULL;
+-			girq->handler = handle_bad_irq;
+-
+-			/*
+-			 * Directly request the irq here instead of passing
+-			 * a flow-handler because the irq is shared.
+-			 */
+-			ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
+-					       IRQF_SHARED, dev_name(dev), gs);
+-			if (ret)
+-				return dev_err_probe(dev, ret, "failed to request IRQ");
+-		}
++	irq = platform_get_irq_optional(pdev, 0);
++	if (irq >= 0) {
++		girq = &gs->gc.irq;
++		gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
++		girq->default_type = IRQ_TYPE_NONE;
++		/* This will let us handle the parent IRQ in the driver */
++		girq->num_parents = 0;
++		girq->parents = NULL;
++		girq->parent_handler = NULL;
++		girq->handler = handle_bad_irq;
++
++		/*
++		 * Directly request the irq here instead of passing
++		 * a flow-handler because the irq is shared.
++		 */
++		ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
++				       IRQF_SHARED, dev_name(dev), gs);
++		if (ret)
++			return dev_err_probe(dev, ret, "failed to request IRQ");
+ 	}
+ 
+ 	platform_set_drvdata(pdev, gs);
+diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
+index fab771cb6a87bf..bac757c191c2ea 100644
+--- a/drivers/gpio/gpio-tps65912.c
++++ b/drivers/gpio/gpio-tps65912.c
+@@ -49,10 +49,13 @@ static int tps65912_gpio_direction_output(struct gpio_chip *gc,
+ 					  unsigned offset, int value)
+ {
+ 	struct tps65912_gpio *gpio = gpiochip_get_data(gc);
++	int ret;
+ 
+ 	/* Set the initial value */
+-	regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+-			   GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
++	ret = regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
++				 GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
++	if (ret)
++		return ret;
+ 
+ 	return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ 				  GPIO_CFG_MASK, GPIO_CFG_MASK);
+diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
+index 93544ff6251334..5b56f3f6156309 100644
+--- a/drivers/gpio/gpio-virtio.c
++++ b/drivers/gpio/gpio-virtio.c
+@@ -539,7 +539,6 @@ static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio,
+ 
+ static int virtio_gpio_probe(struct virtio_device *vdev)
+ {
+-	struct virtio_gpio_config config;
+ 	struct device *dev = &vdev->dev;
+ 	struct virtio_gpio *vgpio;
+ 	u32 gpio_names_size;
+@@ -551,9 +550,11 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
+ 		return -ENOMEM;
+ 
+ 	/* Read configuration */
+-	virtio_cread_bytes(vdev, 0, &config, sizeof(config));
+-	gpio_names_size = le32_to_cpu(config.gpio_names_size);
+-	ngpio = le16_to_cpu(config.ngpio);
++	gpio_names_size =
++		virtio_cread32(vdev, offsetof(struct virtio_gpio_config,
++					      gpio_names_size));
++	ngpio =  virtio_cread16(vdev, offsetof(struct virtio_gpio_config,
++					       ngpio));
+ 	if (!ngpio) {
+ 		dev_err(dev, "Number of GPIOs can't be zero\n");
+ 		return -EINVAL;
+diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
+index 2bba27b13947f1..cfa7b0a50c8e33 100644
+--- a/drivers/gpio/gpio-wcd934x.c
++++ b/drivers/gpio/gpio-wcd934x.c
+@@ -46,9 +46,12 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
+ 				     int val)
+ {
+ 	struct wcd_gpio_data *data = gpiochip_get_data(chip);
++	int ret;
+ 
+-	regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+-			   WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
++	ret = regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
++				 WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
++	if (ret)
++		return ret;
+ 
+ 	return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ 				  WCD_PIN_MASK(pin),
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+index 02138aa557935e..dfb6cfd8376069 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+@@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ 	}
+ 
+ 	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+-			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+-			     AMDGPU_PTE_EXECUTABLE);
++			     AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
++			     AMDGPU_VM_PAGE_EXECUTABLE);
+ 
+ 	if (r) {
+ 		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 732c79e201c6e8..ea4df412decff9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
+ 	list_for_each_entry(block, &vres->blocks, link)
+ 		vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
+ 
+-	amdgpu_vram_mgr_do_reserve(man);
+-
+ 	drm_buddy_free_list(mm, &vres->blocks, vres->flags);
++	amdgpu_vram_mgr_do_reserve(man);
+ 	mutex_unlock(&mgr->lock);
+ 
+ 	atomic64_sub(vis_usage, &mgr->vis_usage);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 084d9ed325af63..33a3e5e28fbc35 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5223,7 +5223,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 
+ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+ {
+-	drm_atomic_private_obj_fini(&dm->atomic_obj);
++	if (dm->atomic_obj.state)
++		drm_atomic_private_obj_fini(&dm->atomic_obj);
+ }
+ 
+ /******************************************************************************
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index e140b7a04d7246..d63038ec4ec70c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -127,8 +127,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+ 		psr_config.allow_multi_disp_optimizations =
+ 			(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
+ 
+-		if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+-			return false;
++		if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
++			if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
++				return false;
++		}
+ 
+ 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index a99d3e2256f196..f5d938b9504c07 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -5121,8 +5121,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
+ 	else
+ 		ret = update_planes_and_stream_v2(dc, srf_updates,
+ 			surface_count, stream, stream_update);
+-
+-	if (ret)
++	if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 ||
++		dc->ctx->dce_version == DCN_VERSION_3_01))
+ 		clear_update_flags(srf_updates, surface_count, stream);
+ 
+ 	return ret;
+@@ -5153,7 +5153,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 		ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ 				stream_update, state);
+ 
+-	if (ret)
++	if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
+ 		clear_update_flags(srf_updates, surface_count, stream);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+index 9c5cdb3b80b5de..08fc2a2c399f60 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -273,14 +273,13 @@ void dcn20_setup_gsl_group_as_lock(
+ 	}
+ 
+ 	/* at this point we want to program whether it's to enable or disable */
+-	if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
+-		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
++	if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) {
+ 		pipe_ctx->stream_res.tg->funcs->set_gsl(
+ 			pipe_ctx->stream_res.tg,
+ 			&gsl);
+-
+-		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+-			pipe_ctx->stream_res.tg, group_idx,	enable ? 4 : 0);
++		if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL)
++			pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
++				pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ 	} else
+ 		BREAK_TO_DEBUGGER();
+ }
+@@ -946,7 +945,7 @@ enum dc_status dcn20_enable_stream_timing(
+ 		return DC_ERROR_UNEXPECTED;
+ 	}
+ 
+-	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
++	fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
+ 
+ 	params.vertical_total_min = stream->adjust.v_total_min;
+ 	params.vertical_total_max = stream->adjust.v_total_max;
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+index aa28001297675a..9d740659521a43 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+@@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
+ 				}
+ 		}
+ 
+-		if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
++		if (((!link->wa_flags.dp_keep_receiver_powered) || hw_init) &&
++			(link->type != dc_connection_none))
+ 			dpcd_write_rx_power_ctrl(link, false);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+index 37ab5a4eefc7cd..0f531cfd3c49b7 100644
+--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
++++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+@@ -571,7 +571,7 @@ void mpc401_get_gamut_remap(struct mpc *mpc,
+ 	struct mpc_grph_gamut_adjustment *adjust)
+ {
+ 	uint16_t arr_reg_val[12] = {0};
+-	uint32_t mode_select;
++	uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0;
+ 
+ 	read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+index 01d95108ce662b..585c3e8a219488 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+@@ -927,6 +927,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.seamless_boot_odm_combine = true,
+ 	.enable_legacy_fast_update = true,
+ 	.using_dml2 = false,
++	.disable_dsc_power_gate = true,
+ };
+ 
+ static const struct dc_panel_config panel_config_defaults = {
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+index 4581eb47945180..01e83c6ce70107 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+@@ -92,19 +92,15 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
+ 	uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
+ 
+ 	REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
++	REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
+ 
+-	if (in_reset == 0) {
++	if (in_reset == 0 && is_enabled != 0) {
+ 		cmd.bits.status = 1;
+ 		cmd.bits.command_code = DMUB_GPINT__STOP_FW;
+ 		cmd.bits.param = 0;
+ 
+ 		dmub->hw_funcs.set_gpint(dmub, cmd);
+ 
+-		/**
+-		 * Timeout covers both the ACK and the wait
+-		 * for remaining work to finish.
+-		 */
+-
+ 		for (i = 0; i < timeout; ++i) {
+ 			if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ 				break;
+@@ -130,11 +126,9 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
+ 		/* Force reset in case we timed out, DMCUB is likely hung. */
+ 	}
+ 
+-	REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
+-
+ 	if (is_enabled) {
+ 		REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+-		REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
++		udelay(1);
+ 		REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ 	}
+ 
+@@ -160,11 +154,7 @@ void dmub_dcn35_reset_release(struct dmub_srv *dmub)
+ 		     LONO_SOCCLK_GATE_DISABLE, 1,
+ 		     LONO_DMCUBCLK_GATE_DISABLE, 1);
+ 
+-	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+-	udelay(1);
+ 	REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
+-	REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+-	udelay(1);
+ 	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
+ 	REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0);
+ }
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index bfdfba676025e7..c4fdd82a00429c 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -1490,6 +1490,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
+ 			if (ret)
+ 				return -EINVAL;
+ 			parameter_size++;
++			if (!tmp_str)
++				break;
+ 			while (isspace(*tmp_str))
+ 				tmp_str++;
+ 		}
+@@ -3853,6 +3855,9 @@ static int parse_input_od_command_lines(const char *buf,
+ 			return -EINVAL;
+ 		parameter_size++;
+ 
++		if (!tmp_str)
++			break;
++
+ 		while (isspace(*tmp_str))
+ 			tmp_str++;
+ 	}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index 9bca748ac2e947..3d3765815e2408 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -664,7 +664,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
+ {
+ 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ 	SmuMetrics_t metrics;
+-	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ 	int i, idx, size = 0, ret = 0;
+ 	uint32_t cur_value = 0, value = 0, count = 0;
+ 	bool cur_value_match_level = false;
+@@ -680,31 +679,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
+ 
+ 	switch (clk_type) {
+ 	case SMU_OD_SCLK:
+-		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+-			size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+-			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+-			(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+-			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+-			(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
+-		}
++		size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
++		size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
++		(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
++		size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
++		(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
+ 		break;
+ 	case SMU_OD_CCLK:
+-		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+-			size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+-			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+-			(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+-			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+-			(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+-		}
++		size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
++		size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
++		(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
++		size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
++		(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+ 		break;
+ 	case SMU_OD_RANGE:
+-		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+-			size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+-			size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+-				smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+-			size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
+-				smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
+-		}
++		size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
++		size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
++			smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
++		size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
++			smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
+ 		break;
+ 	case SMU_SOCCLK:
+ 		/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
+index 850b318605da4c..d97613c6a0a9ba 100644
+--- a/drivers/gpu/drm/imagination/pvr_power.c
++++ b/drivers/gpu/drm/imagination/pvr_power.c
+@@ -317,6 +317,63 @@ pvr_power_device_idle(struct device *dev)
+ 	return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
+ }
+ 
++static int
++pvr_power_clear_error(struct pvr_device *pvr_dev)
++{
++	struct device *dev = from_pvr_device(pvr_dev)->dev;
++	int err;
++
++	/* Ensure the device state is known and nothing is happening past this point */
++	pm_runtime_disable(dev);
++
++	/* Attempt to clear the runtime PM error by setting the current state again */
++	if (pm_runtime_status_suspended(dev))
++		err = pm_runtime_set_suspended(dev);
++	else
++		err = pm_runtime_set_active(dev);
++
++	if (err) {
++		drm_err(from_pvr_device(pvr_dev),
++			"%s: Failed to clear runtime PM error (new error %d)\n",
++			__func__, err);
++	}
++
++	pm_runtime_enable(dev);
++
++	return err;
++}
++
++/**
++ * pvr_power_get_clear() - Acquire a power reference, correcting any errors
++ * @pvr_dev: Device pointer
++ *
++ * Attempt to acquire a power reference on the device. If the runtime PM
++ * is in error state, attempt to clear the error and retry.
++ *
++ * Returns:
++ *  * 0 on success, or
++ *  * Any error code returned by pvr_power_get() or the runtime PM API.
++ */
++static int
++pvr_power_get_clear(struct pvr_device *pvr_dev)
++{
++	int err;
++
++	err = pvr_power_get(pvr_dev);
++	if (err == 0)
++		return err;
++
++	drm_warn(from_pvr_device(pvr_dev),
++		 "%s: pvr_power_get returned error %d, attempting recovery\n",
++		 __func__, err);
++
++	err = pvr_power_clear_error(pvr_dev);
++	if (err)
++		return err;
++
++	return pvr_power_get(pvr_dev);
++}
++
+ /**
+  * pvr_power_reset() - Reset the GPU
+  * @pvr_dev: Device pointer
+@@ -341,7 +398,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
+ 	 * Take a power reference during the reset. This should prevent any interference with the
+ 	 * power state during reset.
+ 	 */
+-	WARN_ON(pvr_power_get(pvr_dev));
++	WARN_ON(pvr_power_get_clear(pvr_dev));
+ 
+ 	down_write(&pvr_dev->reset_sem);
+ 
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 8c13b08708d228..197d8d9a421d32 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -550,6 +550,7 @@ static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
+ 					   u32 metadata_size)
+ {
+ 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
++	void *new_metadata;
+ 	void *buf;
+ 	int ret;
+ 
+@@ -567,8 +568,14 @@ static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
+ 	if (ret)
+ 		goto out;
+ 
+-	msm_obj->metadata =
++	new_metadata =
+ 		krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
++	if (!new_metadata) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	msm_obj->metadata = new_metadata;
+ 	msm_obj->metadata_size = metadata_size;
+ 	memcpy(msm_obj->metadata, buf, metadata_size);
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index ebc9ba66efb89d..eeb3b65dd4d13e 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -963,7 +963,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
+ 	uint64_t off = drm_vma_node_start(&obj->vma_node);
+ 	const char *madv;
+ 
+-	msm_gem_lock(obj);
++	if (!msm_gem_trylock(obj))
++		return;
+ 
+ 	stats->all.count++;
+ 	stats->all.size += obj->size;
+diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
+index 85f0257e83dab6..748053f70ca7a7 100644
+--- a/drivers/gpu/drm/msm/msm_gem.h
++++ b/drivers/gpu/drm/msm/msm_gem.h
+@@ -188,6 +188,12 @@ msm_gem_lock(struct drm_gem_object *obj)
+ 	dma_resv_lock(obj->resv, NULL);
+ }
+ 
++static inline bool __must_check
++msm_gem_trylock(struct drm_gem_object *obj)
++{
++	return dma_resv_trylock(obj->resv);
++}
++
+ static inline int
+ msm_gem_lock_interruptible(struct drm_gem_object *obj)
+ {
+diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+index 10febea473cde9..6cec796dd463f6 100644
+--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
++++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+@@ -585,6 +585,9 @@ rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ 	if (mode->clock > 148500)
+ 		return MODE_CLOCK_HIGH;
+ 
++	if (mode->clock < 5803)
++		return MODE_CLOCK_LOW;
++
+ 	return MODE_OK;
+ }
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index 8504dbe19c1a0f..4ae9d33cf485d0 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -630,7 +630,6 @@ void ttm_pool_fini(struct ttm_pool *pool)
+ }
+ EXPORT_SYMBOL(ttm_pool_fini);
+ 
+-/* As long as pages are available make sure to release at least one */
+ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
+ 					    struct shrink_control *sc)
+ {
+@@ -638,9 +637,12 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
+ 
+ 	do
+ 		num_freed += ttm_pool_shrink();
+-	while (!num_freed && atomic_long_read(&allocated_pages));
++	while (num_freed < sc->nr_to_scan &&
++	       atomic_long_read(&allocated_pages));
+ 
+-	return num_freed;
++	sc->nr_scanned = num_freed;
++
++	return num_freed ?: SHRINK_STOP;
+ }
+ 
+ /* Return the number of pages available or SHRINK_EMPTY if we have none */
+diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
+index 6d764ba88aab63..53f258f39ceb52 100644
+--- a/drivers/gpu/drm/ttm/ttm_resource.c
++++ b/drivers/gpu/drm/ttm/ttm_resource.c
+@@ -501,6 +501,9 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
+ 		cond_resched();
+ 	} while (!ret);
+ 
++	if (ret && ret != -ENOENT)
++		return ret;
++
+ 	spin_lock(&man->move_lock);
+ 	fence = dma_fence_get(man->move);
+ 	spin_unlock(&man->move_lock);
+diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+index 4c39f01e4f5286..a3f421e2adc03b 100644
+--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
++++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+@@ -20,6 +20,8 @@ struct xe_exec_queue;
+ struct xe_guc_exec_queue {
+ 	/** @q: Backpointer to parent xe_exec_queue */
+ 	struct xe_exec_queue *q;
++	/** @rcu: For safe freeing of exported dma fences */
++	struct rcu_head rcu;
+ 	/** @sched: GPU scheduler for this xe_exec_queue */
+ 	struct xe_gpu_scheduler sched;
+ 	/** @entity: Scheduler entity for this xe_exec_queue */
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index 0e17820a35e2ce..cf6946424fc357 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -1241,7 +1241,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
+ 	xe_sched_entity_fini(&ge->entity);
+ 	xe_sched_fini(&ge->sched);
+ 
+-	kfree(ge);
++	/*
++	 * RCU free due sched being exported via DRM scheduler fences
++	 * (timeline name).
++	 */
++	kfree_rcu(ge, rcu);
+ 	xe_exec_queue_fini(q);
+ 	xe_pm_runtime_put(guc_to_xe(guc));
+ }
+@@ -1427,6 +1431,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
+ 
+ 	q->guc = ge;
+ 	ge->q = q;
++	init_rcu_head(&ge->rcu);
+ 	init_waitqueue_head(&ge->suspend_wait);
+ 
+ 	for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
+diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
+index 0b4f12be3692ab..6e2221b606885f 100644
+--- a/drivers/gpu/drm/xe/xe_hw_fence.c
++++ b/drivers/gpu/drm/xe/xe_hw_fence.c
+@@ -100,6 +100,9 @@ void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
+ 		spin_unlock_irqrestore(&irq->lock, flags);
+ 		dma_fence_end_signalling(tmp);
+ 	}
++
++	/* Safe release of the irq->lock used in dma_fence_init. */
++	synchronize_rcu();
+ }
+ 
+ void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
+diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
+index 6fec5d1a1eb44b..6e7c940d7e2275 100644
+--- a/drivers/gpu/drm/xe/xe_query.c
++++ b/drivers/gpu/drm/xe/xe_query.c
+@@ -366,6 +366,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
+ 	struct drm_xe_query_gt_list __user *query_ptr =
+ 		u64_to_user_ptr(query->data);
+ 	struct drm_xe_query_gt_list *gt_list;
++	int iter = 0;
+ 	u8 id;
+ 
+ 	if (query->size == 0) {
+@@ -383,12 +384,12 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
+ 
+ 	for_each_gt(gt, xe, id) {
+ 		if (xe_gt_is_media_type(gt))
+-			gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
++			gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
+ 		else
+-			gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
+-		gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id;
+-		gt_list->gt_list[id].gt_id = gt->info.id;
+-		gt_list->gt_list[id].reference_clock = gt->info.reference_clock;
++			gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN;
++		gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id;
++		gt_list->gt_list[iter].gt_id = gt->info.id;
++		gt_list->gt_list[iter].reference_clock = gt->info.reference_clock;
+ 		/*
+ 		 * The mem_regions indexes in the mask below need to
+ 		 * directly identify the struct
+@@ -404,19 +405,21 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
+ 		 * assumption.
+ 		 */
+ 		if (!IS_DGFX(xe))
+-			gt_list->gt_list[id].near_mem_regions = 0x1;
++			gt_list->gt_list[iter].near_mem_regions = 0x1;
+ 		else
+-			gt_list->gt_list[id].near_mem_regions =
++			gt_list->gt_list[iter].near_mem_regions =
+ 				BIT(gt_to_tile(gt)->id) << 1;
+-		gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
+-			gt_list->gt_list[id].near_mem_regions;
++		gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
++			gt_list->gt_list[iter].near_mem_regions;
+ 
+-		gt_list->gt_list[id].ip_ver_major =
++		gt_list->gt_list[iter].ip_ver_major =
+ 			REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
+-		gt_list->gt_list[id].ip_ver_minor =
++		gt_list->gt_list[iter].ip_ver_minor =
+ 			REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
+-		gt_list->gt_list[id].ip_ver_rev =
++		gt_list->gt_list[iter].ip_ver_rev =
+ 			REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
++
++		iter++;
+ 	}
+ 
+ 	if (copy_to_user(query_ptr, gt_list, size)) {
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index c00ce5bfec4ab5..25d1edb6a21075 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -934,10 +934,12 @@ static int apple_probe(struct hid_device *hdev,
+ 		return ret;
+ 	}
+ 
+-	timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0);
+-	mod_timer(&asc->battery_timer,
+-		  jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS));
+-	apple_fetch_battery(hdev);
++	if (quirks & APPLE_RDESC_BATTERY) {
++		timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0);
++		mod_timer(&asc->battery_timer,
++			  jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS));
++		apple_fetch_battery(hdev);
++	}
+ 
+ 	if (quirks & APPLE_BACKLIGHT_CTL)
+ 		apple_backlight_init(hdev);
+@@ -951,7 +953,9 @@ static int apple_probe(struct hid_device *hdev,
+ 	return 0;
+ 
+ out_err:
+-	del_timer_sync(&asc->battery_timer);
++	if (quirks & APPLE_RDESC_BATTERY)
++		del_timer_sync(&asc->battery_timer);
++
+ 	hid_hw_stop(hdev);
+ 	return ret;
+ }
+@@ -960,7 +964,8 @@ static void apple_remove(struct hid_device *hdev)
+ {
+ 	struct apple_sc *asc = hid_get_drvdata(hdev);
+ 
+-	del_timer_sync(&asc->battery_timer);
++	if (asc->quirks & APPLE_RDESC_BATTERY)
++		del_timer_sync(&asc->battery_timer);
+ 
+ 	hid_hw_stop(hdev);
+ }
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index ec110dea87726d..542b3e86d56f4f 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -775,16 +775,30 @@ static void magicmouse_enable_mt_work(struct work_struct *work)
+ 		hid_err(msc->hdev, "unable to request touch data (%d)\n", ret);
+ }
+ 
++static bool is_usb_magicmouse2(__u32 vendor, __u32 product)
++{
++	if (vendor != USB_VENDOR_ID_APPLE)
++		return false;
++	return product == USB_DEVICE_ID_APPLE_MAGICMOUSE2;
++}
++
++static bool is_usb_magictrackpad2(__u32 vendor, __u32 product)
++{
++	if (vendor != USB_VENDOR_ID_APPLE)
++		return false;
++	return product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++	       product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC;
++}
++
+ static int magicmouse_fetch_battery(struct hid_device *hdev)
+ {
+ #ifdef CONFIG_HID_BATTERY_STRENGTH
+ 	struct hid_report_enum *report_enum;
+ 	struct hid_report *report;
+ 
+-	if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
+-	    (hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+-	     hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+-	     hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
++	if (!hdev->battery ||
++	    (!is_usb_magicmouse2(hdev->vendor, hdev->product) &&
++	     !is_usb_magictrackpad2(hdev->vendor, hdev->product)))
+ 		return -1;
+ 
+ 	report_enum = &hdev->report_enum[hdev->battery_report_type];
+@@ -846,16 +860,17 @@ static int magicmouse_probe(struct hid_device *hdev,
+ 		return ret;
+ 	}
+ 
+-	timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0);
+-	mod_timer(&msc->battery_timer,
+-		  jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS));
+-	magicmouse_fetch_battery(hdev);
++	if (is_usb_magicmouse2(id->vendor, id->product) ||
++	    is_usb_magictrackpad2(id->vendor, id->product)) {
++		timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0);
++		mod_timer(&msc->battery_timer,
++			  jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS));
++		magicmouse_fetch_battery(hdev);
++	}
+ 
+-	if (id->vendor == USB_VENDOR_ID_APPLE &&
+-	    (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+-	     ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+-	       id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
+-	      hdev->type != HID_TYPE_USBMOUSE)))
++	if (is_usb_magicmouse2(id->vendor, id->product) ||
++	    (is_usb_magictrackpad2(id->vendor, id->product) &&
++	     hdev->type != HID_TYPE_USBMOUSE))
+ 		return 0;
+ 
+ 	if (!msc->input) {
+@@ -911,7 +926,10 @@ static int magicmouse_probe(struct hid_device *hdev,
+ 
+ 	return 0;
+ err_stop_hw:
+-	del_timer_sync(&msc->battery_timer);
++	if (is_usb_magicmouse2(id->vendor, id->product) ||
++	    is_usb_magictrackpad2(id->vendor, id->product))
++		del_timer_sync(&msc->battery_timer);
++
+ 	hid_hw_stop(hdev);
+ 	return ret;
+ }
+@@ -922,7 +940,9 @@ static void magicmouse_remove(struct hid_device *hdev)
+ 
+ 	if (msc) {
+ 		cancel_delayed_work_sync(&msc->work);
+-		del_timer_sync(&msc->battery_timer);
++		if (is_usb_magicmouse2(hdev->vendor, hdev->product) ||
++		    is_usb_magictrackpad2(hdev->vendor, hdev->product))
++			del_timer_sync(&msc->battery_timer);
+ 	}
+ 
+ 	hid_hw_stop(hdev);
+@@ -939,10 +959,8 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	 *   0x05, 0x01,       // Usage Page (Generic Desktop)        0
+ 	 *   0x09, 0x02,       // Usage (Mouse)                       2
+ 	 */
+-	if (hdev->vendor == USB_VENDOR_ID_APPLE &&
+-	    (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+-	     hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+-	     hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
++	if ((is_usb_magicmouse2(hdev->vendor, hdev->product) ||
++	     is_usb_magictrackpad2(hdev->vendor, hdev->product)) &&
+ 	    *rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
+ 		hid_info(hdev,
+ 			 "fixing up magicmouse battery report descriptor\n");
+diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
+index 4d39fbd8376939..5b5fccac96353f 100644
+--- a/drivers/hwmon/emc2305.c
++++ b/drivers/hwmon/emc2305.c
+@@ -300,6 +300,12 @@ static int emc2305_set_single_tz(struct device *dev, int idx)
+ 		dev_err(dev, "Failed to register cooling device %s\n", emc2305_fan_name[idx]);
+ 		return PTR_ERR(data->cdev_data[cdev_idx].cdev);
+ 	}
++
++	if (data->cdev_data[cdev_idx].cur_state > 0)
++		/* Update pwm when temperature is above trips */
++		pwm = EMC2305_PWM_STATE2DUTY(data->cdev_data[cdev_idx].cur_state,
++					     data->max_state, EMC2305_FAN_MAX);
++
+ 	/* Set minimal PWM speed. */
+ 	if (data->pwm_separate) {
+ 		ret = emc2305_set_pwm(dev, pwm, cdev_idx);
+@@ -313,10 +319,10 @@ static int emc2305_set_single_tz(struct device *dev, int idx)
+ 		}
+ 	}
+ 	data->cdev_data[cdev_idx].cur_state =
+-		EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state,
++		EMC2305_PWM_DUTY2STATE(pwm, data->max_state,
+ 				       EMC2305_FAN_MAX);
+ 	data->cdev_data[cdev_idx].last_hwmon_state =
+-		EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state,
++		EMC2305_PWM_DUTY2STATE(pwm, data->max_state,
+ 				       EMC2305_FAN_MAX);
+ 	return 0;
+ }
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index d2499f302b5083..f43067f6797e94 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -370,6 +370,7 @@ static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = {
+ 	 * the device works without issues on Windows at what is expected to be
+ 	 * a 400KHz frequency. The root cause of the issue is not known.
+ 	 */
++	{ "DLL0945", 0 },
+ 	{ "ELAN06FA", 0 },
+ 	{}
+ };
+diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
+index 433f6088b7cec8..ce04aa4f269e09 100644
+--- a/drivers/i3c/internals.h
++++ b/drivers/i3c/internals.h
+@@ -9,6 +9,7 @@
+ #define I3C_INTERNALS_H
+ 
+ #include <linux/i3c/master.h>
++#include <linux/io.h>
+ 
+ void i3c_bus_normaluse_lock(struct i3c_bus *bus);
+ void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 7c1dc42b809bfc..c8e5c9291ea43f 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1439,7 +1439,7 @@ static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
+ 
+ 	if (dev->info.bcr & I3C_BCR_HDR_CAP) {
+ 		ret = i3c_master_gethdrcap_locked(master, &dev->info);
+-		if (ret)
++		if (ret && ret != -ENOTSUPP)
+ 			return ret;
+ 	}
+ 
+@@ -2471,6 +2471,8 @@ static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action
+ 	case BUS_NOTIFY_DEL_DEVICE:
+ 		ret = i3c_master_i2c_detach(adap, client);
+ 		break;
++	default:
++		ret = -EINVAL;
+ 	}
+ 	i3c_bus_maintenance_unlock(&master->bus);
+ 
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 524ed143f875d3..4506e1cc4b65d6 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -1608,7 +1608,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
+ };
+ 
+ static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
+-	X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
++	X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL),
+ 	{}
+ };
+ 
+diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
+index a9248a85466ea3..7956948166ab18 100644
+--- a/drivers/iio/adc/ad7768-1.c
++++ b/drivers/iio/adc/ad7768-1.c
+@@ -203,6 +203,24 @@ static int ad7768_spi_reg_write(struct ad7768_state *st,
+ 	return spi_write(st->spi, st->data.d8, 2);
+ }
+ 
++static int ad7768_send_sync_pulse(struct ad7768_state *st)
++{
++	/*
++	 * The datasheet specifies a minimum SYNC_IN pulse width of 1.5 × Tmclk,
++	 * where Tmclk is the MCLK period. The supported MCLK frequencies range
++	 * from 0.6 MHz to 17 MHz, which corresponds to a minimum SYNC_IN pulse
++	 * width of approximately 2.5 µs in the worst-case scenario (0.6 MHz).
++	 *
++	 * Add a delay to ensure the pulse width is always sufficient to
++	 * trigger synchronization.
++	 */
++	gpiod_set_value_cansleep(st->gpio_sync_in, 1);
++	fsleep(3);
++	gpiod_set_value_cansleep(st->gpio_sync_in, 0);
++
++	return 0;
++}
++
+ static int ad7768_set_mode(struct ad7768_state *st,
+ 			   enum ad7768_conv_mode mode)
+ {
+@@ -288,10 +306,7 @@ static int ad7768_set_dig_fil(struct ad7768_state *st,
+ 		return ret;
+ 
+ 	/* A sync-in pulse is required every time the filter dec rate changes */
+-	gpiod_set_value(st->gpio_sync_in, 1);
+-	gpiod_set_value(st->gpio_sync_in, 0);
+-
+-	return 0;
++	return ad7768_send_sync_pulse(st);
+ }
+ 
+ static int ad7768_set_freq(struct ad7768_state *st,
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index 3df1d4f6bc959e..39196a2862cf75 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -379,7 +379,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
+ 			return ret;
+ 	}
+ 
+-	samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits, 8);
++	samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits / 8, 8);
+ 	samples_buf_size += sizeof(int64_t);
+ 	samples_buf = devm_krealloc(&sigma_delta->spi->dev, sigma_delta->samples_buf,
+ 				    samples_buf_size, GFP_KERNEL);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index f121899863034a..fef11a80647c49 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -1468,10 +1468,11 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
+ 
+ };
+ 
+-static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+-			       struct netlink_ext_ack *extack,
+-			       enum rdma_restrack_type res_type,
+-			       res_fill_func_t fill_func)
++static noinline_for_stack int
++res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
++		    struct netlink_ext_ack *extack,
++		    enum rdma_restrack_type res_type,
++		    res_fill_func_t fill_func)
+ {
+ 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
+ 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+@@ -2256,10 +2257,10 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	return ret;
+ }
+ 
+-static int stat_get_doit_default_counter(struct sk_buff *skb,
+-					 struct nlmsghdr *nlh,
+-					 struct netlink_ext_ack *extack,
+-					 struct nlattr *tb[])
++static noinline_for_stack int
++stat_get_doit_default_counter(struct sk_buff *skb, struct nlmsghdr *nlh,
++			      struct netlink_ext_ack *extack,
++			      struct nlattr *tb[])
+ {
+ 	struct rdma_hw_stats *stats;
+ 	struct nlattr *table_attr;
+@@ -2349,8 +2350,9 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
+ 	return ret;
+ }
+ 
+-static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
+-			    struct netlink_ext_ack *extack, struct nlattr *tb[])
++static noinline_for_stack int
++stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
++		 struct netlink_ext_ack *extack, struct nlattr *tb[])
+ 
+ {
+ 	static enum rdma_nl_counter_mode mode;
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 0b21d8b5d96296..4a3ce61a3bba6a 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -4642,7 +4642,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
+ 		return err;
+ 
+ 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+-			     &offset, sizeof(length));
++			     &offset, sizeof(offset));
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
+index 7ead8746b79b38..f2c530ab85a563 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -964,31 +964,35 @@ static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
+ 				struct hfi1_affinity_node_list *affinity)
+ {
+ 	int possible, curr_cpu, i;
+-	uint num_cores_per_socket = node_affinity.num_online_cpus /
++	uint num_cores_per_socket;
++
++	cpumask_copy(hw_thread_mask, &affinity->proc.mask);
++
++	if (affinity->num_core_siblings == 0)
++		return;
++
++	num_cores_per_socket = node_affinity.num_online_cpus /
+ 					affinity->num_core_siblings /
+ 						node_affinity.num_online_nodes;
+ 
+-	cpumask_copy(hw_thread_mask, &affinity->proc.mask);
+-	if (affinity->num_core_siblings > 0) {
+-		/* Removing other siblings not needed for now */
+-		possible = cpumask_weight(hw_thread_mask);
+-		curr_cpu = cpumask_first(hw_thread_mask);
+-		for (i = 0;
+-		     i < num_cores_per_socket * node_affinity.num_online_nodes;
+-		     i++)
+-			curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
+-
+-		for (; i < possible; i++) {
+-			cpumask_clear_cpu(curr_cpu, hw_thread_mask);
+-			curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
+-		}
++	/* Removing other siblings not needed for now */
++	possible = cpumask_weight(hw_thread_mask);
++	curr_cpu = cpumask_first(hw_thread_mask);
++	for (i = 0;
++	     i < num_cores_per_socket * node_affinity.num_online_nodes;
++	     i++)
++		curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
+ 
+-		/* Identifying correct HW threads within physical cores */
+-		cpumask_shift_left(hw_thread_mask, hw_thread_mask,
+-				   num_cores_per_socket *
+-				   node_affinity.num_online_nodes *
+-				   hw_thread_no);
++	for (; i < possible; i++) {
++		cpumask_clear_cpu(curr_cpu, hw_thread_mask);
++		curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
+ 	}
++
++	/* Identifying correct HW threads within physical cores */
++	cpumask_shift_left(hw_thread_mask, hw_thread_mask,
++			   num_cores_per_socket *
++			   node_affinity.num_online_nodes *
++			   hw_thread_no);
+ }
+ 
+ int hfi1_get_proc_affinity(int node)
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index a034264c566986..43ff1afd3d0185 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -334,18 +334,17 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
+ 		if (!sendpage_ok(page[i]))
+ 			msg.msg_flags &= ~MSG_SPLICE_PAGES;
+ 		bvec_set_page(&bvec, page[i], bytes, offset);
+-		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
++		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, bytes);
+ 
+ try_page_again:
+ 		lock_sock(sk);
+-		rv = tcp_sendmsg_locked(sk, &msg, size);
++		rv = tcp_sendmsg_locked(sk, &msg, bytes);
+ 		release_sock(sk);
+ 
+ 		if (rv > 0) {
+ 			size -= rv;
+ 			sent += rv;
+ 			if (rv != bytes) {
+-				offset += rv;
+ 				bytes -= rv;
+ 				goto try_page_again;
+ 			}
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index 601fb878d0ef25..0c35a235ab6d05 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -258,6 +258,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
+ 	{ .compatible = "qcom,sdm670-mdss" },
+ 	{ .compatible = "qcom,sdm845-mdss" },
+ 	{ .compatible = "qcom,sdm845-mss-pil" },
++	{ .compatible = "qcom,sm6115-mdss" },
+ 	{ .compatible = "qcom,sm6350-mdss" },
+ 	{ .compatible = "qcom,sm6375-mdss" },
+ 	{ .compatible = "qcom,sm8150-mdss" },
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index af4e6c1e55db6f..b300f72cf01e68 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1957,6 +1957,18 @@ static bool dev_is_real_dma_subdevice(struct device *dev)
+ 	       pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
+ }
+ 
++static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
++				       struct intel_iommu *iommu)
++{
++	if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
++		return true;
++
++	if (rwbf_quirk || cap_rwbf(iommu->cap))
++		return true;
++
++	return false;
++}
++
+ static int dmar_domain_attach_device(struct dmar_domain *domain,
+ 				     struct device *dev)
+ {
+@@ -1994,6 +2006,8 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
+ 	if (ret)
+ 		goto out_block_translation;
+ 
++	domain->iotlb_sync_map |= domain_need_iotlb_sync_map(domain, iommu);
++
+ 	return 0;
+ 
+ out_block_translation:
+@@ -4278,7 +4292,10 @@ static bool risky_device(struct pci_dev *pdev)
+ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ 				      unsigned long iova, size_t size)
+ {
+-	cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1);
++	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
++
++	if (dmar_domain->iotlb_sync_map)
++		cache_tag_flush_range_np(dmar_domain, iova, iova + size - 1);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index 6f16eeb2ac6554..f521155fb793b8 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -614,6 +614,9 @@ struct dmar_domain {
+ 	u8 has_mappings:1;		/* Has mappings configured through
+ 					 * iommu_map() interface.
+ 					 */
++	u8 iotlb_sync_map:1;		/* Need to flush IOTLB cache or write
++					 * buffer when creating mappings.
++					 */
+ 
+ 	spinlock_t lock;		/* Protect device tracking lists */
+ 	struct list_head devices;	/* all devices' list */
+diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
+index 4bf7ccd39d465c..067222b238b7e1 100644
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -70,36 +70,45 @@ struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter)
+ 	return iter->area;
+ }
+ 
+-static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
+-				    unsigned long length,
+-				    unsigned long iova_alignment,
+-				    unsigned long page_offset)
++static bool __alloc_iova_check_range(unsigned long *start, unsigned long last,
++				     unsigned long length,
++				     unsigned long iova_alignment,
++				     unsigned long page_offset)
+ {
+-	if (span->is_used || span->last_hole - span->start_hole < length - 1)
++	unsigned long aligned_start;
++
++	/* ALIGN_UP() */
++	if (check_add_overflow(*start, iova_alignment - 1, &aligned_start))
+ 		return false;
++	aligned_start &= ~(iova_alignment - 1);
++	aligned_start |= page_offset;
+ 
+-	span->start_hole = ALIGN(span->start_hole, iova_alignment) |
+-			   page_offset;
+-	if (span->start_hole > span->last_hole ||
+-	    span->last_hole - span->start_hole < length - 1)
++	if (aligned_start >= last || last - aligned_start < length - 1)
+ 		return false;
++	*start = aligned_start;
+ 	return true;
+ }
+ 
+-static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
++static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
+ 				    unsigned long length,
+ 				    unsigned long iova_alignment,
+ 				    unsigned long page_offset)
+ {
+-	if (span->is_hole || span->last_used - span->start_used < length - 1)
++	if (span->is_used)
+ 		return false;
++	return __alloc_iova_check_range(&span->start_hole, span->last_hole,
++					length, iova_alignment, page_offset);
++}
+ 
+-	span->start_used = ALIGN(span->start_used, iova_alignment) |
+-			   page_offset;
+-	if (span->start_used > span->last_used ||
+-	    span->last_used - span->start_used < length - 1)
++static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
++				    unsigned long length,
++				    unsigned long iova_alignment,
++				    unsigned long page_offset)
++{
++	if (span->is_hole)
+ 		return false;
+-	return true;
++	return __alloc_iova_check_range(&span->start_used, span->last_used,
++					length, iova_alignment, page_offset);
+ }
+ 
+ /*
+@@ -696,8 +705,10 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
+ 			iommufd_access_notify_unmap(iopt, area_first, length);
+ 			/* Something is not responding to unmap requests. */
+ 			tries++;
+-			if (WARN_ON(tries > 100))
+-				return -EDEADLOCK;
++			if (WARN_ON(tries > 100)) {
++				rc = -EDEADLOCK;
++				goto out_unmapped;
++			}
+ 			goto again;
+ 		}
+ 
+@@ -719,6 +730,7 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
+ out_unlock_iova:
+ 	up_write(&iopt->iova_rwsem);
+ 	up_read(&iopt->domains_rwsem);
++out_unmapped:
+ 	if (unmapped)
+ 		*unmapped = unmapped_bytes;
+ 	return rc;
+diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
+index 41ce034f700ee5..07a83bb2dfdf62 100644
+--- a/drivers/leds/flash/leds-qcom-flash.c
++++ b/drivers/leds/flash/leds-qcom-flash.c
+@@ -117,7 +117,7 @@ enum {
+ 	REG_MAX_COUNT,
+ };
+ 
+-static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
++static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
+ 	REG_FIELD(0x08, 0, 7),			/* status1	*/
+ 	REG_FIELD(0x09, 0, 7),                  /* status2	*/
+ 	REG_FIELD(0x0a, 0, 7),                  /* status3	*/
+@@ -132,7 +132,7 @@ static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
+ 	REG_FIELD(0x58, 0, 2),			/* therm_thrsh3 */
+ };
+ 
+-static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
++static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
+ 	REG_FIELD(0x06, 0, 7),			/* status1	*/
+ 	REG_FIELD(0x07, 0, 6),			/* status2	*/
+ 	REG_FIELD(0x09, 0, 7),			/* status3	*/
+@@ -855,11 +855,17 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
+ 	if (val == FLASH_SUBTYPE_3CH_PM8150_VAL || val == FLASH_SUBTYPE_3CH_PMI8998_VAL) {
+ 		flash_data->hw_type = QCOM_MVFLASH_3CH;
+ 		flash_data->max_channels = 3;
+-		regs = mvflash_3ch_regs;
++		regs = devm_kmemdup(dev, mvflash_3ch_regs, sizeof(mvflash_3ch_regs),
++				    GFP_KERNEL);
++		if (!regs)
++			return -ENOMEM;
+ 	} else if (val == FLASH_SUBTYPE_4CH_VAL) {
+ 		flash_data->hw_type = QCOM_MVFLASH_4CH;
+ 		flash_data->max_channels = 4;
+-		regs = mvflash_4ch_regs;
++		regs = devm_kmemdup(dev, mvflash_4ch_regs, sizeof(mvflash_4ch_regs),
++				    GFP_KERNEL);
++		if (!regs)
++			return -ENOMEM;
+ 
+ 		rc = regmap_read(regmap, reg_base + FLASH_REVISION_REG, &val);
+ 		if (rc < 0) {
+@@ -881,6 +887,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
+ 		dev_err(dev, "Failed to allocate regmap field, rc=%d\n", rc);
+ 		return rc;
+ 	}
++	devm_kfree(dev, regs); /* devm_regmap_field_bulk_alloc() makes copies */
+ 
+ 	platform_set_drvdata(pdev, flash_data);
+ 	mutex_init(&flash_data->lock);
+diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
+index 175d4b06659bbb..e9eb0ad6751d5f 100644
+--- a/drivers/leds/leds-lp50xx.c
++++ b/drivers/leds/leds-lp50xx.c
+@@ -481,6 +481,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
+ 		}
+ 
+ 		fwnode_for_each_child_node(child, led_node) {
++			int multi_index;
+ 			ret = fwnode_property_read_u32(led_node, "color",
+ 						       &color_id);
+ 			if (ret) {
+@@ -488,8 +489,16 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
+ 				dev_err(priv->dev, "Cannot read color\n");
+ 				goto child_out;
+ 			}
++			ret = fwnode_property_read_u32(led_node, "reg", &multi_index);
++			if (ret != 0) {
++				dev_err(priv->dev, "reg must be set\n");
++				return -EINVAL;
++			} else if (multi_index >= LP50XX_LEDS_PER_MODULE) {
++				dev_err(priv->dev, "reg %i out of range\n", multi_index);
++				return -EINVAL;
++			}
+ 
+-			mc_led_info[num_colors].color_index = color_id;
++			mc_led_info[multi_index].color_index = color_id;
+ 			num_colors++;
+ 		}
+ 
+diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
+index 356a55ced2c289..4b0863db901a9e 100644
+--- a/drivers/leds/trigger/ledtrig-netdev.c
++++ b/drivers/leds/trigger/ledtrig-netdev.c
+@@ -68,7 +68,6 @@ struct led_netdev_data {
+ 	unsigned int last_activity;
+ 
+ 	unsigned long mode;
+-	unsigned long blink_delay;
+ 	int link_speed;
+ 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_link_modes);
+ 	u8 duplex;
+@@ -87,10 +86,6 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
+ 	/* Already validated, hw control is possible with the requested mode */
+ 	if (trigger_data->hw_control) {
+ 		led_cdev->hw_control_set(led_cdev, trigger_data->mode);
+-		if (led_cdev->blink_set) {
+-			led_cdev->blink_set(led_cdev, &trigger_data->blink_delay,
+-					    &trigger_data->blink_delay);
+-		}
+ 
+ 		return;
+ 	}
+@@ -459,11 +454,10 @@ static ssize_t interval_store(struct device *dev,
+ 			      size_t size)
+ {
+ 	struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+-	struct led_classdev *led_cdev = trigger_data->led_cdev;
+ 	unsigned long value;
+ 	int ret;
+ 
+-	if (trigger_data->hw_control && !led_cdev->blink_set)
++	if (trigger_data->hw_control)
+ 		return -EINVAL;
+ 
+ 	ret = kstrtoul(buf, 0, &value);
+@@ -472,13 +466,9 @@ static ssize_t interval_store(struct device *dev,
+ 
+ 	/* impose some basic bounds on the timer interval */
+ 	if (value >= 5 && value <= 10000) {
+-		if (trigger_data->hw_control) {
+-			trigger_data->blink_delay = value;
+-		} else {
+-			cancel_delayed_work_sync(&trigger_data->work);
++		cancel_delayed_work_sync(&trigger_data->work);
+ 
+-			atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+-		}
++		atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ 		set_baseline_state(trigger_data);	/* resets timer */
+ 	}
+ 
+diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
+index b49e10d76d0302..2c8626a83de437 100644
+--- a/drivers/md/dm-ps-historical-service-time.c
++++ b/drivers/md/dm-ps-historical-service-time.c
+@@ -541,8 +541,10 @@ static int __init dm_hst_init(void)
+ {
+ 	int r = dm_register_path_selector(&hst_ps);
+ 
+-	if (r < 0)
++	if (r < 0) {
+ 		DMERR("register failed %d", r);
++		return r;
++	}
+ 
+ 	DMINFO("version " HST_VERSION " loaded");
+ 
+diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c
+index e305f05ad1e5e8..eb543e6431e038 100644
+--- a/drivers/md/dm-ps-queue-length.c
++++ b/drivers/md/dm-ps-queue-length.c
+@@ -260,8 +260,10 @@ static int __init dm_ql_init(void)
+ {
+ 	int r = dm_register_path_selector(&ql_ps);
+ 
+-	if (r < 0)
++	if (r < 0) {
+ 		DMERR("register failed %d", r);
++		return r;
++	}
+ 
+ 	DMINFO("version " QL_VERSION " loaded");
+ 
+diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
+index d1745b123dc19c..66a15ac0c22c8b 100644
+--- a/drivers/md/dm-ps-round-robin.c
++++ b/drivers/md/dm-ps-round-robin.c
+@@ -220,8 +220,10 @@ static int __init dm_rr_init(void)
+ {
+ 	int r = dm_register_path_selector(&rr_ps);
+ 
+-	if (r < 0)
++	if (r < 0) {
+ 		DMERR("register failed %d", r);
++		return r;
++	}
+ 
+ 	DMINFO("version " RR_VERSION " loaded");
+ 
+diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c
+index 969d31c40272e2..f8c43aecdb27ad 100644
+--- a/drivers/md/dm-ps-service-time.c
++++ b/drivers/md/dm-ps-service-time.c
+@@ -341,8 +341,10 @@ static int __init dm_st_init(void)
+ {
+ 	int r = dm_register_path_selector(&st_ps);
+ 
+-	if (r < 0)
++	if (r < 0) {
+ 		DMERR("register failed %d", r);
++		return r;
++	}
+ 
+ 	DMINFO("version " ST_VERSION " loaded");
+ 
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index 4112071de0be0c..c68dc1653cfd1b 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -459,6 +459,7 @@ static void stripe_io_hints(struct dm_target *ti,
+ 	struct stripe_c *sc = ti->private;
+ 	unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ 
++	limits->chunk_sectors = sc->chunk_size;
+ 	limits->io_min = chunk_size;
+ 	limits->io_opt = chunk_size * sc->stripes;
+ }
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index e45cffdd419a8d..20b8f560a2da50 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -894,17 +894,17 @@ static bool dm_table_supports_dax(struct dm_table *t,
+ 	return true;
+ }
+ 
+-static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
+-				  sector_t start, sector_t len, void *data)
++static int device_is_not_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
++				      sector_t start, sector_t len, void *data)
+ {
+ 	struct block_device *bdev = dev->bdev;
+ 	struct request_queue *q = bdev_get_queue(bdev);
+ 
+ 	/* request-based cannot stack on partitions! */
+ 	if (bdev_is_partition(bdev))
+-		return false;
++		return true;
+ 
+-	return queue_is_mq(q);
++	return !queue_is_mq(q);
+ }
+ 
+ static int dm_table_determine_type(struct dm_table *t)
+@@ -1000,7 +1000,7 @@ static int dm_table_determine_type(struct dm_table *t)
+ 
+ 	/* Non-request-stackable devices can't be used for request-based dm */
+ 	if (!ti->type->iterate_devices ||
+-	    !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
++	    ti->type->iterate_devices(ti, device_is_not_rq_stackable, NULL)) {
+ 		DMERR("table load rejected: including non-request-stackable devices");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index 6141fc25d8421a..c38bd6e4c27377 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -1061,7 +1061,7 @@ static int dmz_iterate_devices(struct dm_target *ti,
+ 	struct dmz_target *dmz = ti->private;
+ 	unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
+ 	sector_t capacity;
+-	int i, r;
++	int i, r = 0;
+ 
+ 	for (i = 0; i < dmz->nr_ddevs; i++) {
+ 		capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 92e5a233f51607..c5dcd632404cce 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1788,19 +1788,35 @@ static void init_clone_info(struct clone_info *ci, struct dm_io *io,
+ }
+ 
+ #ifdef CONFIG_BLK_DEV_ZONED
+-static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
+-					   struct bio *bio)
++static inline bool dm_zone_bio_needs_split(struct bio *bio)
+ {
+ 	/*
+-	 * For mapped device that need zone append emulation, we must
+-	 * split any large BIO that straddles zone boundaries.
++	 * Special case the zone operations that cannot or should not be split.
+ 	 */
+-	return dm_emulate_zone_append(md) && bio_straddles_zones(bio) &&
+-		!bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
++	switch (bio_op(bio)) {
++	case REQ_OP_ZONE_APPEND:
++	case REQ_OP_ZONE_FINISH:
++	case REQ_OP_ZONE_RESET:
++	case REQ_OP_ZONE_RESET_ALL:
++		return false;
++	default:
++		break;
++	}
++
++	/*
++	 * When mapped devices use the block layer zone write plugging, we must
++	 * split any large BIO to the mapped device limits to not submit BIOs
++	 * that span zone boundaries and to avoid potential deadlocks with
++	 * queue freeze operations.
++	 */
++	return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
+ }
++
+ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+ {
+-	return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
++	if (!bio_needs_zone_write_plugging(bio))
++		return false;
++	return blk_zone_plug_bio(bio, 0);
+ }
+ 
+ static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
+@@ -1916,8 +1932,7 @@ static blk_status_t __send_zone_reset_all(struct clone_info *ci)
+ }
+ 
+ #else
+-static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
+-					   struct bio *bio)
++static inline bool dm_zone_bio_needs_split(struct bio *bio)
+ {
+ 	return false;
+ }
+@@ -1944,9 +1959,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
+ 
+ 	is_abnormal = is_abnormal_io(bio);
+ 	if (static_branch_unlikely(&zoned_enabled)) {
+-		/* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */
+-		need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) &&
+-			(is_abnormal || dm_zone_bio_needs_split(md, bio));
++		need_split = is_abnormal || dm_zone_bio_needs_split(bio);
+ 	} else {
+ 		need_split = is_abnormal;
+ 	}
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 5cdc599fcad3ce..7515a98001ca7f 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3968,6 +3968,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
+ 	md_init_stacking_limits(&lim);
+ 	lim.max_write_zeroes_sectors = 0;
+ 	lim.io_min = mddev->chunk_sectors << 9;
++	lim.chunk_sectors = mddev->chunk_sectors;
+ 	lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+ 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+ 	if (err)
+diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
+index c5582d4fa5be85..40c5b1dc7d91a2 100644
+--- a/drivers/media/dvb-frontends/dib7000p.c
++++ b/drivers/media/dvb-frontends/dib7000p.c
+@@ -2193,6 +2193,8 @@ static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_ms
+ 	struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
+ 	u8 n_overflow = 1;
+ 	u16 i = 1000;
++	if (msg[0].len < 3)
++		return -EOPNOTSUPP;
+ 	u16 serpar_num = msg[0].buf[0];
+ 
+ 	while (n_overflow == 1 && i) {
+@@ -2212,6 +2214,8 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg
+ 	struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
+ 	u8 n_overflow = 1, n_empty = 1;
+ 	u16 i = 1000;
++	if (msg[0].len < 1 || msg[1].len < 2)
++		return -EOPNOTSUPP;
+ 	u16 serpar_num = msg[0].buf[0];
+ 	u16 read_word;
+ 
+@@ -2256,8 +2260,12 @@ static int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap,
+ 	u16 word;
+ 
+ 	if (num == 1) {		/* write */
++		if (msg[0].len < 3)
++			return -EOPNOTSUPP;
+ 		dib7000p_write_word(state, apb_address, ((msg[0].buf[1] << 8) | (msg[0].buf[2])));
+ 	} else {
++		if (msg[1].len < 2)
++			return -EOPNOTSUPP;
+ 		word = dib7000p_read_word(state, apb_address);
+ 		msg[1].buf[0] = (word >> 8) & 0xff;
+ 		msg[1].buf[1] = (word) & 0xff;
+diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
+index 0e89aff9c664da..3c84cf07275f4c 100644
+--- a/drivers/media/i2c/hi556.c
++++ b/drivers/media/i2c/hi556.c
+@@ -1321,7 +1321,12 @@ static int hi556_resume(struct device *dev)
+ 		return ret;
+ 	}
+ 
+-	gpiod_set_value_cansleep(hi556->reset_gpio, 0);
++	if (hi556->reset_gpio) {
++		/* Assert reset for at least 2ms on back to back off-on */
++		usleep_range(2000, 2200);
++		gpiod_set_value_cansleep(hi556->reset_gpio, 0);
++	}
++
+ 	usleep_range(5000, 5500);
+ 	return 0;
+ }
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index 344a670e732fa5..d1306f39fa135d 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -110,7 +110,7 @@ static inline struct tc358743_state *to_state(struct v4l2_subdev *sd)
+ 
+ /* --------------- I2C --------------- */
+ 
+-static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
++static int i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+ {
+ 	struct tc358743_state *state = to_state(sd);
+ 	struct i2c_client *client = state->i2c_client;
+@@ -136,6 +136,7 @@ static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+ 		v4l2_err(sd, "%s: reading register 0x%x from 0x%x failed: %d\n",
+ 				__func__, reg, client->addr, err);
+ 	}
++	return err != ARRAY_SIZE(msgs);
+ }
+ 
+ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+@@ -192,15 +193,24 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+ 	}
+ }
+ 
+-static noinline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n)
++static noinline u32 i2c_rdreg_err(struct v4l2_subdev *sd, u16 reg, u32 n,
++				  int *err)
+ {
++	int error;
+ 	__le32 val = 0;
+ 
+-	i2c_rd(sd, reg, (u8 __force *)&val, n);
++	error = i2c_rd(sd, reg, (u8 __force *)&val, n);
++	if (err)
++		*err = error;
+ 
+ 	return le32_to_cpu(val);
+ }
+ 
++static inline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n)
++{
++	return i2c_rdreg_err(sd, reg, n, NULL);
++}
++
+ static noinline void i2c_wrreg(struct v4l2_subdev *sd, u16 reg, u32 val, u32 n)
+ {
+ 	__le32 raw = cpu_to_le32(val);
+@@ -229,6 +239,13 @@ static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
+ 	return i2c_rdreg(sd, reg, 2);
+ }
+ 
++static int i2c_rd16_err(struct v4l2_subdev *sd, u16 reg, u16 *value)
++{
++	int err;
++	*value = i2c_rdreg_err(sd, reg, 2, &err);
++	return err;
++}
++
+ static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val)
+ {
+ 	i2c_wrreg(sd, reg, val, 2);
+@@ -1660,12 +1677,23 @@ static int tc358743_enum_mbus_code(struct v4l2_subdev *sd,
+ 	return 0;
+ }
+ 
++static u32 tc358743_g_colorspace(u32 code)
++{
++	switch (code) {
++	case MEDIA_BUS_FMT_RGB888_1X24:
++		return V4L2_COLORSPACE_SRGB;
++	case MEDIA_BUS_FMT_UYVY8_1X16:
++		return V4L2_COLORSPACE_SMPTE170M;
++	default:
++		return 0;
++	}
++}
++
+ static int tc358743_get_fmt(struct v4l2_subdev *sd,
+ 		struct v4l2_subdev_state *sd_state,
+ 		struct v4l2_subdev_format *format)
+ {
+ 	struct tc358743_state *state = to_state(sd);
+-	u8 vi_rep = i2c_rd8(sd, VI_REP);
+ 
+ 	if (format->pad != 0)
+ 		return -EINVAL;
+@@ -1675,23 +1703,7 @@ static int tc358743_get_fmt(struct v4l2_subdev *sd,
+ 	format->format.height = state->timings.bt.height;
+ 	format->format.field = V4L2_FIELD_NONE;
+ 
+-	switch (vi_rep & MASK_VOUT_COLOR_SEL) {
+-	case MASK_VOUT_COLOR_RGB_FULL:
+-	case MASK_VOUT_COLOR_RGB_LIMITED:
+-		format->format.colorspace = V4L2_COLORSPACE_SRGB;
+-		break;
+-	case MASK_VOUT_COLOR_601_YCBCR_LIMITED:
+-	case MASK_VOUT_COLOR_601_YCBCR_FULL:
+-		format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
+-		break;
+-	case MASK_VOUT_COLOR_709_YCBCR_FULL:
+-	case MASK_VOUT_COLOR_709_YCBCR_LIMITED:
+-		format->format.colorspace = V4L2_COLORSPACE_REC709;
+-		break;
+-	default:
+-		format->format.colorspace = 0;
+-		break;
+-	}
++	format->format.colorspace = tc358743_g_colorspace(format->format.code);
+ 
+ 	return 0;
+ }
+@@ -1705,19 +1717,14 @@ static int tc358743_set_fmt(struct v4l2_subdev *sd,
+ 	u32 code = format->format.code; /* is overwritten by get_fmt */
+ 	int ret = tc358743_get_fmt(sd, sd_state, format);
+ 
+-	format->format.code = code;
++	if (code == MEDIA_BUS_FMT_RGB888_1X24 ||
++	    code == MEDIA_BUS_FMT_UYVY8_1X16)
++		format->format.code = code;
++	format->format.colorspace = tc358743_g_colorspace(format->format.code);
+ 
+ 	if (ret)
+ 		return ret;
+ 
+-	switch (code) {
+-	case MEDIA_BUS_FMT_RGB888_1X24:
+-	case MEDIA_BUS_FMT_UYVY8_1X16:
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+ 	if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ 		return 0;
+ 
+@@ -1941,8 +1948,19 @@ static int tc358743_probe_of(struct tc358743_state *state)
+ 	state->pdata.refclk_hz = clk_get_rate(refclk);
+ 	state->pdata.ddc5v_delay = DDC5V_DELAY_100_MS;
+ 	state->pdata.enable_hdcp = false;
+-	/* A FIFO level of 16 should be enough for 2-lane 720p60 at 594 MHz. */
+-	state->pdata.fifo_level = 16;
++	/*
++	 * Ideally the FIFO trigger level should be set based on the input and
++	 * output data rates, but the calculations required are buried in
++	 * Toshiba's register settings spreadsheet.
++	 * A value of 16 works with a 594Mbps data rate for 720p60 (using 2
++	 * lanes) and 1080p60 (using 4 lanes), but fails when the data rate
++	 * is increased, or a lower pixel clock is used that result in CSI
++	 * reading out faster than the data is arriving.
++	 *
++	 * A value of 374 works with both those modes at 594Mbps, and with most
++	 * modes on 972Mbps.
++	 */
++	state->pdata.fifo_level = 374;
+ 	/*
+ 	 * The PLL input clock is obtained by dividing refclk by pll_prd.
+ 	 * It must be between 6 MHz and 40 MHz, lower frequency is better.
+@@ -2030,6 +2048,7 @@ static int tc358743_probe(struct i2c_client *client)
+ 	struct tc358743_platform_data *pdata = client->dev.platform_data;
+ 	struct v4l2_subdev *sd;
+ 	u16 irq_mask = MASK_HDMI_MSK | MASK_CSI_MSK;
++	u16 chipid;
+ 	int err;
+ 
+ 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+@@ -2061,7 +2080,8 @@ static int tc358743_probe(struct i2c_client *client)
+ 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ 
+ 	/* i2c access */
+-	if ((i2c_rd16(sd, CHIPID) & MASK_CHIPID) != 0) {
++	if (i2c_rd16_err(sd, CHIPID, &chipid) ||
++	    (chipid & MASK_CHIPID) != 0) {
+ 		v4l2_info(sd, "not a TC358743 on address 0x%x\n",
+ 			  client->addr << 1);
+ 		return -ENODEV;
+diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
+index a0e9a71580b5dc..4e921c75162937 100644
+--- a/drivers/media/pci/intel/ipu-bridge.c
++++ b/drivers/media/pci/intel/ipu-bridge.c
+@@ -59,6 +59,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
+ 	IPU_SENSOR_CONFIG("INT33BE", 1, 419200000),
+ 	/* Omnivision OV2740 */
+ 	IPU_SENSOR_CONFIG("INT3474", 1, 180000000),
++	/* Omnivision OV5670 */
++	IPU_SENSOR_CONFIG("INT3479", 1, 422400000),
+ 	/* Omnivision OV8865 */
+ 	IPU_SENSOR_CONFIG("INT347A", 1, 360000000),
+ 	/* Omnivision OV7251 */
+diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
+index 0a041b4db9efc5..cf0d97cbc4631f 100644
+--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
+@@ -33,8 +33,9 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
+ 	struct hfi_buffer_requirements *bufreq;
+ 	struct hfi_extradata_input_crop *crop;
+ 	struct hfi_dpb_counts *dpb_count;
++	u32 ptype, rem_bytes;
++	u32 size_read = 0;
+ 	u8 *data_ptr;
+-	u32 ptype;
+ 
+ 	inst->error = HFI_ERR_NONE;
+ 
+@@ -44,86 +45,118 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
+ 		break;
+ 	default:
+ 		inst->error = HFI_ERR_SESSION_INVALID_PARAMETER;
+-		goto done;
++		inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
++		return;
+ 	}
+ 
+ 	event.event_type = pkt->event_data1;
+ 
+ 	num_properties_changed = pkt->event_data2;
+-	if (!num_properties_changed) {
+-		inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
+-		goto done;
+-	}
++	if (!num_properties_changed)
++		goto error;
+ 
+ 	data_ptr = (u8 *)&pkt->ext_event_data[0];
++	rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
++
+ 	do {
++		if (rem_bytes < sizeof(u32))
++			goto error;
+ 		ptype = *((u32 *)data_ptr);
++
++		data_ptr += sizeof(u32);
++		rem_bytes -= sizeof(u32);
++
+ 		switch (ptype) {
+ 		case HFI_PROPERTY_PARAM_FRAME_SIZE:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(struct hfi_framesize))
++				goto error;
++
+ 			frame_sz = (struct hfi_framesize *)data_ptr;
+ 			event.width = frame_sz->width;
+ 			event.height = frame_sz->height;
+-			data_ptr += sizeof(*frame_sz);
++			size_read = sizeof(struct hfi_framesize);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(struct hfi_profile_level))
++				goto error;
++
+ 			profile_level = (struct hfi_profile_level *)data_ptr;
+ 			event.profile = profile_level->profile;
+ 			event.level = profile_level->level;
+-			data_ptr += sizeof(*profile_level);
++			size_read = sizeof(struct hfi_profile_level);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(struct hfi_bit_depth))
++				goto error;
++
+ 			pixel_depth = (struct hfi_bit_depth *)data_ptr;
+ 			event.bit_depth = pixel_depth->bit_depth;
+-			data_ptr += sizeof(*pixel_depth);
++			size_read = sizeof(struct hfi_bit_depth);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(struct hfi_pic_struct))
++				goto error;
++
+ 			pic_struct = (struct hfi_pic_struct *)data_ptr;
+ 			event.pic_struct = pic_struct->progressive_only;
+-			data_ptr += sizeof(*pic_struct);
++			size_read = sizeof(struct hfi_pic_struct);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(struct hfi_colour_space))
++				goto error;
++
+ 			colour_info = (struct hfi_colour_space *)data_ptr;
+ 			event.colour_space = colour_info->colour_space;
+-			data_ptr += sizeof(*colour_info);
++			size_read = sizeof(struct hfi_colour_space);
+ 			break;
+ 		case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(u32))
++				goto error;
++
+ 			event.entropy_mode = *(u32 *)data_ptr;
+-			data_ptr += sizeof(u32);
++			size_read = sizeof(u32);
+ 			break;
+ 		case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(struct hfi_buffer_requirements))
++				goto error;
++
+ 			bufreq = (struct hfi_buffer_requirements *)data_ptr;
+ 			event.buf_count = hfi_bufreq_get_count_min(bufreq, ver);
+-			data_ptr += sizeof(*bufreq);
++			size_read = sizeof(struct hfi_buffer_requirements);
+ 			break;
+ 		case HFI_INDEX_EXTRADATA_INPUT_CROP:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(struct hfi_extradata_input_crop))
++				goto error;
++
+ 			crop = (struct hfi_extradata_input_crop *)data_ptr;
+ 			event.input_crop.left = crop->left;
+ 			event.input_crop.top = crop->top;
+ 			event.input_crop.width = crop->width;
+ 			event.input_crop.height = crop->height;
+-			data_ptr += sizeof(*crop);
++			size_read = sizeof(struct hfi_extradata_input_crop);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
+-			data_ptr += sizeof(u32);
++			if (rem_bytes < sizeof(struct hfi_dpb_counts))
++				goto error;
++
+ 			dpb_count = (struct hfi_dpb_counts *)data_ptr;
+ 			event.buf_count = dpb_count->fw_min_cnt;
+-			data_ptr += sizeof(*dpb_count);
++			size_read = sizeof(struct hfi_dpb_counts);
+ 			break;
+ 		default:
++			size_read = 0;
+ 			break;
+ 		}
++		data_ptr += size_read;
++		rem_bytes -= size_read;
+ 		num_properties_changed--;
+ 	} while (num_properties_changed > 0);
+ 
+-done:
++	inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
++	return;
++
++error:
++	inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
+ 	inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
+ }
+ 
+diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
+index 070559b01b01b8..54956a8ff15e86 100644
+--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
++++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
+@@ -165,10 +165,16 @@ static const struct i2c_algorithm hdpvr_algo = {
+ 	.functionality = hdpvr_functionality,
+ };
+ 
++/* prevent invalid 0-length usb_control_msg */
++static const struct i2c_adapter_quirks hdpvr_quirks = {
++	.flags = I2C_AQ_NO_ZERO_LEN_READ,
++};
++
+ static const struct i2c_adapter hdpvr_i2c_adapter_template = {
+ 	.name   = "Hauppauge HD PVR I2C",
+ 	.owner  = THIS_MODULE,
+ 	.algo   = &hdpvr_algo,
++	.quirks = &hdpvr_quirks,
+ };
+ 
+ static int hdpvr_activate_ir(struct hdpvr_device *dev)
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 241b3f95f32706..fde5cc70bf79c5 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -235,6 +235,9 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 	unsigned int i, n;
+ 	u8 ftype;
+ 
++	if (buflen < 4)
++		return -EINVAL;
++
+ 	format->type = buffer[2];
+ 	format->index = buffer[3];
+ 	format->frames = frames;
+@@ -2487,6 +2490,15 @@ static const struct uvc_device_info uvc_quirk_force_y8 = {
+  * Sort these by vendor/product ID.
+  */
+ static const struct usb_device_id uvc_ids[] = {
++	/* HP Webcam HD 2300 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x03f0,
++	  .idProduct		= 0xe207,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= (kernel_ulong_t)&uvc_quirk_stream_no_fid },
+ 	/* Quanta ACER HD User Facing */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index eab7b8f5573057..57e6f9af536ff8 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -258,6 +258,15 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
+ 
+ 		ctrl->dwMaxPayloadTransferSize = bandwidth;
+ 	}
++
++	if (stream->intf->num_altsetting > 1 &&
++	    ctrl->dwMaxPayloadTransferSize > stream->maxpsize) {
++		dev_warn_ratelimited(&stream->intf->dev,
++				     "UVC non compliance: the max payload transmission size (%u) exceeds the size of the ep max packet (%u). Using the max size.\n",
++				     ctrl->dwMaxPayloadTransferSize,
++				     stream->maxpsize);
++		ctrl->dwMaxPayloadTransferSize = stream->maxpsize;
++	}
+ }
+ 
+ static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
+@@ -1428,12 +1437,6 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
+ 	if (!meta_buf || length == 2)
+ 		return;
+ 
+-	if (meta_buf->length - meta_buf->bytesused <
+-	    length + sizeof(meta->ns) + sizeof(meta->sof)) {
+-		meta_buf->error = 1;
+-		return;
+-	}
+-
+ 	has_pts = mem[1] & UVC_STREAM_PTS;
+ 	has_scr = mem[1] & UVC_STREAM_SCR;
+ 
+@@ -1454,6 +1457,12 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
+ 				  !memcmp(scr, stream->clock.last_scr, 6)))
+ 		return;
+ 
++	if (meta_buf->length - meta_buf->bytesused <
++	    length + sizeof(meta->ns) + sizeof(meta->sof)) {
++		meta_buf->error = 1;
++		return;
++	}
++
+ 	meta = (struct uvc_meta_buf *)((u8 *)meta_buf->mem + meta_buf->bytesused);
+ 	local_irq_save(flags);
+ 	time = uvc_video_get_time();
+diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
+index 0a2f4f0d0a0739..ad0b9d5fbee7b2 100644
+--- a/drivers/media/v4l2-core/v4l2-common.c
++++ b/drivers/media/v4l2-core/v4l2-common.c
+@@ -312,6 +312,12 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
+ 		{ .format = V4L2_PIX_FMT_NV61M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ 		{ .format = V4L2_PIX_FMT_P012M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ 
++		/* Tiled YUV formats, non contiguous variant */
++		{ .format = V4L2_PIX_FMT_NV12MT,        .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2,
++		  .block_w = { 64, 32, 0, 0 },	.block_h = { 32, 16, 0, 0 }},
++		{ .format = V4L2_PIX_FMT_NV12MT_16X16,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2,
++		  .block_w = { 16,  8, 0, 0 },	.block_h = { 16,  8, 0, 0 }},
++
+ 		/* Bayer RGB formats */
+ 		{ .format = V4L2_PIX_FMT_SBGGR8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ 		{ .format = V4L2_PIX_FMT_SGBRG8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+@@ -494,10 +500,10 @@ s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
+ 
+ 		freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
+ 
+-		pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
+-			__func__);
+-		pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
+-			__func__);
++		pr_warn_once("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
++			     __func__);
++		pr_warn_once("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
++			     __func__);
+ 	}
+ 
+ 	return freq > 0 ? freq : -EINVAL;
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
+index 3780929039710c..cbf1029d033358 100644
+--- a/drivers/mfd/axp20x.c
++++ b/drivers/mfd/axp20x.c
+@@ -1034,7 +1034,8 @@ static const struct mfd_cell axp152_cells[] = {
+ };
+ 
+ static struct mfd_cell axp313a_cells[] = {
+-	MFD_CELL_NAME("axp20x-regulator"),
++	/* AXP323 is sometimes paired with AXP717 as sub-PMIC */
++	MFD_CELL_BASIC("axp20x-regulator", NULL, NULL, 0, 1),
+ 	MFD_CELL_RES("axp313a-pek", axp313a_pek_resources),
+ };
+ 
+diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
+index f3dc812b359f34..78c48dc624e889 100644
+--- a/drivers/mfd/cros_ec_dev.c
++++ b/drivers/mfd/cros_ec_dev.c
+@@ -87,7 +87,6 @@ static const struct mfd_cell cros_ec_sensorhub_cells[] = {
+ };
+ 
+ static const struct mfd_cell cros_usbpd_charger_cells[] = {
+-	{ .name = "cros-charge-control", },
+ 	{ .name = "cros-usbpd-charger", },
+ 	{ .name = "cros-usbpd-logger", },
+ };
+@@ -108,6 +107,10 @@ static const struct mfd_cell cros_ec_keyboard_leds_cells[] = {
+ 	{ .name = "cros-keyboard-leds", },
+ };
+ 
++static const struct mfd_cell cros_ec_charge_control_cells[] = {
++	{ .name = "cros-charge-control", },
++};
++
+ static const struct cros_feature_to_cells cros_subdevices[] = {
+ 	{
+ 		.id		= EC_FEATURE_CEC,
+@@ -144,6 +147,11 @@ static const struct cros_feature_to_cells cros_subdevices[] = {
+ 		.mfd_cells	= cros_ec_keyboard_leds_cells,
+ 		.num_cells	= ARRAY_SIZE(cros_ec_keyboard_leds_cells),
+ 	},
++	{
++		.id		= EC_FEATURE_CHARGER,
++		.mfd_cells	= cros_ec_charge_control_cells,
++		.num_cells	= ARRAY_SIZE(cros_ec_charge_control_cells),
++	},
+ };
+ 
+ static const struct mfd_cell cros_ec_platform_cells[] = {
+diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
+index f150d8769f1986..f546b050cb495f 100644
+--- a/drivers/misc/cardreader/rtsx_usb.c
++++ b/drivers/misc/cardreader/rtsx_usb.c
+@@ -698,6 +698,12 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
+ }
+ 
+ #ifdef CONFIG_PM
++static int rtsx_usb_resume_child(struct device *dev, void *data)
++{
++	pm_request_resume(dev);
++	return 0;
++}
++
+ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
+ {
+ 	struct rtsx_ucr *ucr =
+@@ -713,8 +719,10 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
+ 			mutex_unlock(&ucr->dev_mutex);
+ 
+ 			/* Defer the autosuspend if card exists */
+-			if (val & (SD_CD | MS_CD))
++			if (val & (SD_CD | MS_CD)) {
++				device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
+ 				return -EAGAIN;
++			}
+ 		} else {
+ 			/* There is an ongoing operation*/
+ 			return -EAGAIN;
+@@ -724,12 +732,6 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
+ 	return 0;
+ }
+ 
+-static int rtsx_usb_resume_child(struct device *dev, void *data)
+-{
+-	pm_request_resume(dev);
+-	return 0;
+-}
+-
+ static int rtsx_usb_resume(struct usb_interface *intf)
+ {
+ 	device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 5576146ab13bcb..04f9a4b79d8508 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -1353,10 +1353,16 @@ static void mei_dev_bus_put(struct mei_device *bus)
+ static void mei_cl_bus_dev_release(struct device *dev)
+ {
+ 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
++	struct mei_device *mdev = cldev->cl->dev;
++	struct mei_cl *cl;
+ 
+ 	mei_cl_flush_queues(cldev->cl, NULL);
+ 	mei_me_cl_put(cldev->me_cl);
+ 	mei_dev_bus_put(cldev->bus);
++
++	list_for_each_entry(cl, &mdev->file_list, link)
++		WARN_ON(cl == cldev->cl);
++
+ 	kfree(cldev->cl);
+ 	kfree(cldev);
+ }
+diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
+index 4e86f0a705b60a..2bf51fe11a0904 100644
+--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
+@@ -1032,9 +1032,7 @@ static int sd_set_power_mode(struct rtsx_usb_sdmmc *host,
+ 		err = sd_power_on(host);
+ 	}
+ 
+-	if (!err)
+-		host->power_mode = power_mode;
+-
++	host->power_mode = power_mode;
+ 	return err;
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 82808cc373f68b..c2144a3efb308e 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -1564,6 +1564,7 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+ {
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
++	struct mmc_host *mmc = host->mmc;
+ 	bool done = false;
+ 	u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
+ 	const struct sdhci_msm_offset *msm_offset =
+@@ -1621,6 +1622,12 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+ 				 "%s: pwr_irq for req: (%d) timed out\n",
+ 				 mmc_hostname(host->mmc), req_type);
+ 	}
++
++	if ((req_type & REQ_BUS_ON) && mmc->card && !mmc->ops->get_cd(mmc)) {
++		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++		host->pwr = 0;
++	}
++
+ 	pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ 			__func__, req_type);
+ }
+@@ -1679,6 +1686,13 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
+ 		udelay(10);
+ 	}
+ 
++	if ((irq_status & CORE_PWRCTL_BUS_ON) && mmc->card &&
++	    !mmc->ops->get_cd(mmc)) {
++		msm_host_writel(msm_host, CORE_PWRCTL_BUS_FAIL, host,
++				msm_offset->core_pwrctl_ctl);
++		return;
++	}
++
+ 	/* Handle BUS ON/OFF*/
+ 	if (irq_status & CORE_PWRCTL_BUS_ON) {
+ 		pwr_state = REQ_BUS_ON;
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 644e8b8eb91e74..e6d6661a908ab1 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -383,7 +383,7 @@ static void ti_hecc_start(struct net_device *ndev)
+ 	 * overflows instead of the hardware silently dropping the
+ 	 * messages.
+ 	 */
+-	mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
++	mbx_mask = ~BIT_U32(HECC_RX_LAST_MBOX);
+ 	hecc_write(priv, HECC_CANOPC, mbx_mask);
+ 
+ 	/* Enable interrupts */
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 71c30a81c36dbd..844cf2b8f72786 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -359,18 +359,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
+ 
+ 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+ 
+-	/* Include IMP port in dumb forwarding mode
+-	 */
+-	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+-	mgmt |= B53_MII_DUMB_FWDG_EN;
+-	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+-
+-	/* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
+-	 * frames should be flooded or not.
+-	 */
+-	b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+-	mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
+-	b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
++	if (!is5325(dev)) {
++		/* Include IMP port in dumb forwarding mode */
++		b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
++		mgmt |= B53_MII_DUMB_FWDG_EN;
++		b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
++
++		/* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
++		 * frames should be flooded or not.
++		 */
++		b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
++		mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
++		b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
++	} else {
++		b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
++		mgmt |= B53_IP_MCAST_25;
++		b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
++	}
+ }
+ 
+ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+@@ -527,6 +532,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
+ 	unsigned int i;
+ 	u16 pvlan;
+ 
++	/* BCM5325 CPU port is at 8 */
++	if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
++		cpu_port = B53_CPU_PORT;
++
+ 	/* Enable the IMP port to be in the same VLAN as the other ports
+ 	 * on a per-port basis such that we only have Port i and IMP in
+ 	 * the same VLAN.
+@@ -577,6 +586,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
+ {
+ 	u16 reg;
+ 
++	if (is5325(dev))
++		return;
++
+ 	b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
+ 	if (learning)
+ 		reg &= ~BIT(port);
+@@ -613,6 +625,19 @@ int b53_setup_port(struct dsa_switch *ds, int port)
+ 	if (dsa_is_user_port(ds, port))
+ 		b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+ 
++	if (is5325(dev) &&
++	    in_range(port, 1, 4)) {
++		u8 reg;
++
++		b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, &reg);
++		reg &= ~PD_MODE_POWER_DOWN_PORT(0);
++		if (dsa_is_unused_port(ds, port))
++			reg |= PD_MODE_POWER_DOWN_PORT(port);
++		else
++			reg &= ~PD_MODE_POWER_DOWN_PORT(port);
++		b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg);
++	}
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(b53_setup_port);
+@@ -1252,6 +1277,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
+ 	if (port == dev->imp_port) {
+ 		off = B53_PORT_OVERRIDE_CTRL;
+ 		val = PORT_OVERRIDE_EN;
++	} else if (is5325(dev)) {
++		return;
+ 	} else {
+ 		off = B53_GMII_PORT_OVERRIDE_CTRL(port);
+ 		val = GMII_PO_EN;
+@@ -1276,6 +1303,8 @@ static void b53_force_port_config(struct b53_device *dev, int port,
+ 	if (port == dev->imp_port) {
+ 		off = B53_PORT_OVERRIDE_CTRL;
+ 		val = PORT_OVERRIDE_EN;
++	} else if (is5325(dev)) {
++		return;
+ 	} else {
+ 		off = B53_GMII_PORT_OVERRIDE_CTRL(port);
+ 		val = GMII_PO_EN;
+@@ -1306,10 +1335,19 @@ static void b53_force_port_config(struct b53_device *dev, int port,
+ 		return;
+ 	}
+ 
+-	if (rx_pause)
+-		reg |= PORT_OVERRIDE_RX_FLOW;
+-	if (tx_pause)
+-		reg |= PORT_OVERRIDE_TX_FLOW;
++	if (rx_pause) {
++		if (is5325(dev))
++			reg |= PORT_OVERRIDE_LP_FLOW_25;
++		else
++			reg |= PORT_OVERRIDE_RX_FLOW;
++	}
++
++	if (tx_pause) {
++		if (is5325(dev))
++			reg |= PORT_OVERRIDE_LP_FLOW_25;
++		else
++			reg |= PORT_OVERRIDE_TX_FLOW;
++	}
+ 
+ 	b53_write8(dev, B53_CTRL_PAGE, off, reg);
+ }
+@@ -2167,7 +2205,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
+ 		     struct switchdev_brport_flags flags,
+ 		     struct netlink_ext_ack *extack)
+ {
+-	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
++	struct b53_device *dev = ds->priv;
++	unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD);
++
++	if (!is5325(dev))
++		mask |= BR_LEARNING;
++
++	if (flags.mask & ~mask)
+ 		return -EINVAL;
+ 
+ 	return 0;
+diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
+index 5f7a0e5c5709d3..5741231e0841de 100644
+--- a/drivers/net/dsa/b53/b53_regs.h
++++ b/drivers/net/dsa/b53/b53_regs.h
+@@ -95,17 +95,22 @@
+ #define   PORT_OVERRIDE_SPEED_10M	(0 << PORT_OVERRIDE_SPEED_S)
+ #define   PORT_OVERRIDE_SPEED_100M	(1 << PORT_OVERRIDE_SPEED_S)
+ #define   PORT_OVERRIDE_SPEED_1000M	(2 << PORT_OVERRIDE_SPEED_S)
++#define   PORT_OVERRIDE_LP_FLOW_25	BIT(3) /* BCM5325 only */
+ #define   PORT_OVERRIDE_RV_MII_25	BIT(4) /* BCM5325 only */
+ #define   PORT_OVERRIDE_RX_FLOW		BIT(4)
+ #define   PORT_OVERRIDE_TX_FLOW		BIT(5)
+ #define   PORT_OVERRIDE_SPEED_2000M	BIT(6) /* BCM5301X only, requires setting 1000M */
+ #define   PORT_OVERRIDE_EN		BIT(7) /* Use the register contents */
+ 
+-/* Power-down mode control */
++/* Power-down mode control (8 bit) */
+ #define B53_PD_MODE_CTRL_25		0x0f
++#define  PD_MODE_PORT_MASK		0x1f
++/* Bit 0 also powers down the switch. */
++#define  PD_MODE_POWER_DOWN_PORT(i)	BIT(i)
+ 
+ /* IP Multicast control (8 bit) */
+ #define B53_IP_MULTICAST_CTRL		0x21
++#define  B53_IP_MCAST_25		BIT(0)
+ #define  B53_IPMC_FWD_EN		BIT(1)
+ #define  B53_UC_FWD_EN			BIT(6)
+ #define  B53_MC_FWD_EN			BIT(7)
+diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
+index b325e0cef120fd..c956151adee5d0 100644
+--- a/drivers/net/ethernet/agere/et131x.c
++++ b/drivers/net/ethernet/agere/et131x.c
+@@ -2459,6 +2459,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
+ 							  skb->data,
+ 							  skb_headlen(skb),
+ 							  DMA_TO_DEVICE);
++				if (dma_mapping_error(&adapter->pdev->dev,
++						      dma_addr))
++					return -ENOMEM;
++
+ 				desc[frag].addr_lo = lower_32_bits(dma_addr);
+ 				desc[frag].addr_hi = upper_32_bits(dma_addr);
+ 				frag++;
+@@ -2468,6 +2472,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
+ 							  skb->data,
+ 							  skb_headlen(skb) / 2,
+ 							  DMA_TO_DEVICE);
++				if (dma_mapping_error(&adapter->pdev->dev,
++						      dma_addr))
++					return -ENOMEM;
++
+ 				desc[frag].addr_lo = lower_32_bits(dma_addr);
+ 				desc[frag].addr_hi = upper_32_bits(dma_addr);
+ 				frag++;
+@@ -2478,6 +2486,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
+ 							  skb_headlen(skb) / 2,
+ 							  skb_headlen(skb) / 2,
+ 							  DMA_TO_DEVICE);
++				if (dma_mapping_error(&adapter->pdev->dev,
++						      dma_addr))
++					goto unmap_first_out;
++
+ 				desc[frag].addr_lo = lower_32_bits(dma_addr);
+ 				desc[frag].addr_hi = upper_32_bits(dma_addr);
+ 				frag++;
+@@ -2489,6 +2501,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
+ 						    0,
+ 						    desc[frag].len_vlan,
+ 						    DMA_TO_DEVICE);
++			if (dma_mapping_error(&adapter->pdev->dev, dma_addr))
++				goto unmap_out;
++
+ 			desc[frag].addr_lo = lower_32_bits(dma_addr);
+ 			desc[frag].addr_hi = upper_32_bits(dma_addr);
+ 			frag++;
+@@ -2578,6 +2593,27 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
+ 		       &adapter->regs->global.watchdog_timer);
+ 	}
+ 	return 0;
++
++unmap_out:
++	// Unmap the body of the packet with map_page
++	while (--i) {
++		frag--;
++		dma_addr = desc[frag].addr_lo;
++		dma_addr |= (u64)desc[frag].addr_hi << 32;
++		dma_unmap_page(&adapter->pdev->dev, dma_addr,
++			       desc[frag].len_vlan, DMA_TO_DEVICE);
++	}
++
++unmap_first_out:
++	// Unmap the header with map_single
++	while (frag--) {
++		dma_addr = desc[frag].addr_lo;
++		dma_addr |= (u64)desc[frag].addr_hi << 32;
++		dma_unmap_single(&adapter->pdev->dev, dma_addr,
++				 desc[frag].len_vlan, DMA_TO_DEVICE);
++	}
++
++	return -ENOMEM;
+ }
+ 
+ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+index f010bda61c9611..2cd0352a11f4ae 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+@@ -113,6 +113,8 @@ struct aq_stats_s {
+ #define AQ_HW_POWER_STATE_D0   0U
+ #define AQ_HW_POWER_STATE_D3   3U
+ 
++#define	AQ_FW_WAKE_ON_LINK_RTPM BIT(10)
++
+ #define AQ_HW_FLAG_STARTED     0x00000004U
+ #define AQ_HW_FLAG_STOPPING    0x00000008U
+ #define AQ_HW_FLAG_RESETTING   0x00000010U
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+index 52e2070a4a2f0c..7370e3f76b6208 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+@@ -462,6 +462,44 @@ static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
+ 	return aq_a2_fw_get_phy_temp(self, temp);
+ }
+ 
++static int aq_a2_fw_set_wol_params(struct aq_hw_s *self, const u8 *mac, u32 wol)
++{
++	struct mac_address_aligned_s mac_address;
++	struct link_control_s link_control;
++	struct wake_on_lan_s wake_on_lan;
++
++	memcpy(mac_address.aligned.mac_address, mac, ETH_ALEN);
++	hw_atl2_shared_buffer_write(self, mac_address, mac_address);
++
++	memset(&wake_on_lan, 0, sizeof(wake_on_lan));
++
++	if (wol & WAKE_MAGIC)
++		wake_on_lan.wake_on_magic_packet = 1U;
++
++	if (wol & (WAKE_PHY | AQ_FW_WAKE_ON_LINK_RTPM))
++		wake_on_lan.wake_on_link_up = 1U;
++
++	hw_atl2_shared_buffer_write(self, sleep_proxy, wake_on_lan);
++
++	hw_atl2_shared_buffer_get(self, link_control, link_control);
++	link_control.mode = AQ_HOST_MODE_SLEEP_PROXY;
++	hw_atl2_shared_buffer_write(self, link_control, link_control);
++
++	return hw_atl2_shared_buffer_finish_ack(self);
++}
++
++static int aq_a2_fw_set_power(struct aq_hw_s *self, unsigned int power_state,
++			      const u8 *mac)
++{
++	u32 wol = self->aq_nic_cfg->wol;
++	int err = 0;
++
++	if (wol)
++		err = aq_a2_fw_set_wol_params(self, mac, wol);
++
++	return err;
++}
++
+ static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
+ {
+ 	struct link_options_s link_options;
+@@ -605,6 +643,7 @@ const struct aq_fw_ops aq_a2_fw_ops = {
+ 	.set_state          = aq_a2_fw_set_state,
+ 	.update_link_status = aq_a2_fw_update_link_status,
+ 	.update_stats       = aq_a2_fw_update_stats,
++	.set_power          = aq_a2_fw_set_power,
+ 	.get_mac_temp       = aq_a2_fw_get_mac_temp,
+ 	.get_phy_temp       = aq_a2_fw_get_phy_temp,
+ 	.set_eee_rate       = aq_a2_fw_set_eee_rate,
+diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
+index 9586b6894f7e7f..bccc7e7b2a8481 100644
+--- a/drivers/net/ethernet/atheros/ag71xx.c
++++ b/drivers/net/ethernet/atheros/ag71xx.c
+@@ -1213,6 +1213,11 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
+ 	buf->rx.rx_buf = data;
+ 	buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
+ 					  DMA_FROM_DEVICE);
++	if (dma_mapping_error(&ag->pdev->dev, buf->rx.dma_addr)) {
++		skb_free_frag(data);
++		buf->rx.rx_buf = NULL;
++		return false;
++	}
+ 	desc->data = (u32)buf->rx.dma_addr + offset;
+ 	return true;
+ }
+@@ -1511,6 +1516,10 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
+ 
+ 	dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
+ 				  DMA_TO_DEVICE);
++	if (dma_mapping_error(&ag->pdev->dev, dma_addr)) {
++		netif_dbg(ag, tx_err, ndev, "DMA mapping error\n");
++		goto err_drop;
++	}
+ 
+ 	i = ring->curr & ring_mask;
+ 	desc = ag71xx_ring_desc(ring, i);
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index 608cc6af5af1c7..aa80c370223237 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -1429,9 +1429,9 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
+ {
+ 	struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+ 	struct bgx *bgx = context;
+-	char bgx_sel[5];
++	char bgx_sel[7];
+ 
+-	snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
++	snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id);
+ 	if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
+ 		pr_warn("Invalid link device\n");
+ 		return AE_OK;
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 3d2e2159211917..490af665942947 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -1465,10 +1465,10 @@ static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+ 						 ntohs(tcphdr->source));
+ 					dev_info(dev, "TCP dest port %d\n",
+ 						 ntohs(tcphdr->dest));
+-					dev_info(dev, "TCP sequence num %d\n",
+-						 ntohs(tcphdr->seq));
+-					dev_info(dev, "TCP ack_seq %d\n",
+-						 ntohs(tcphdr->ack_seq));
++					dev_info(dev, "TCP sequence num %u\n",
++						 ntohl(tcphdr->seq));
++					dev_info(dev, "TCP ack_seq %u\n",
++						 ntohl(tcphdr->ack_seq));
+ 				} else if (ip_hdr(skb)->protocol ==
+ 					   IPPROTO_UDP) {
+ 					udphdr = udp_hdr(skb);
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 0b61f548fd188f..d41832ff8bbfb1 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -1730,16 +1730,17 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
+ static void ftgmac100_phy_disconnect(struct net_device *netdev)
+ {
+ 	struct ftgmac100 *priv = netdev_priv(netdev);
++	struct phy_device *phydev = netdev->phydev;
+ 
+-	if (!netdev->phydev)
++	if (!phydev)
+ 		return;
+ 
+-	phy_disconnect(netdev->phydev);
++	phy_disconnect(phydev);
+ 	if (of_phy_is_fixed_link(priv->dev->of_node))
+ 		of_phy_deregister_fixed_link(priv->dev->of_node);
+ 
+ 	if (priv->use_ncsi)
+-		fixed_phy_unregister(netdev->phydev);
++		fixed_phy_unregister(phydev);
+ }
+ 
+ static void ftgmac100_destroy_mdio(struct net_device *netdev)
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index e15dd3d858df21..c5c1d3cb027847 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -28,7 +28,6 @@
+ #include <linux/percpu.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/sort.h>
+-#include <linux/phy_fixed.h>
+ #include <linux/bpf.h>
+ #include <linux/bpf_trace.h>
+ #include <soc/fsl/bman.h>
+@@ -3129,7 +3128,6 @@ static const struct net_device_ops dpaa_ops = {
+ 	.ndo_stop = dpaa_eth_stop,
+ 	.ndo_tx_timeout = dpaa_tx_timeout,
+ 	.ndo_get_stats64 = dpaa_get_stats64,
+-	.ndo_change_carrier = fixed_phy_change_carrier,
+ 	.ndo_set_mac_address = dpaa_set_mac_address,
+ 	.ndo_validate_addr = eth_validate_addr,
+ 	.ndo_set_rx_mode = dpaa_set_rx_mode,
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+index b0060cf96090e8..51ee995c61480e 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+@@ -415,8 +415,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
+ 		of_node_put(ptp_node);
+ 	}
+ 
+-	if (ptp_dev)
++	if (ptp_dev) {
+ 		ptp = platform_get_drvdata(ptp_dev);
++		put_device(&ptp_dev->dev);
++	}
+ 
+ 	if (ptp)
+ 		info->phc_index = ptp->phc_index;
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index c95a7c083b0f48..f376bf3110a5df 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -1182,19 +1182,29 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
+ {
+ 	struct platform_device *ierb_pdev;
+ 	struct device_node *ierb_node;
++	int ret;
+ 
+ 	ierb_node = of_find_compatible_node(NULL, NULL,
+ 					    "fsl,ls1028a-enetc-ierb");
+-	if (!ierb_node || !of_device_is_available(ierb_node))
++	if (!ierb_node)
+ 		return -ENODEV;
+ 
++	if (!of_device_is_available(ierb_node)) {
++		of_node_put(ierb_node);
++		return -ENODEV;
++	}
++
+ 	ierb_pdev = of_find_device_by_node(ierb_node);
+ 	of_node_put(ierb_node);
+ 
+ 	if (!ierb_pdev)
+ 		return -EPROBE_DEFER;
+ 
+-	return enetc_ierb_register_pf(ierb_pdev, pdev);
++	ret = enetc_ierb_register_pf(ierb_pdev, pdev);
++
++	put_device(&ierb_pdev->dev);
++
++	return ret;
+ }
+ 
+ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 479ced24096b80..a1cc338cf20f38 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3122,27 +3122,25 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
+ static void fec_enet_itr_coal_set(struct net_device *ndev)
+ {
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+-	int rx_itr, tx_itr;
++	u32 rx_itr = 0, tx_itr = 0;
++	int rx_ictt, tx_ictt;
+ 
+-	/* Must be greater than zero to avoid unpredictable behavior */
+-	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
+-	    !fep->tx_time_itr || !fep->tx_pkts_itr)
+-		return;
+-
+-	/* Select enet system clock as Interrupt Coalescing
+-	 * timer Clock Source
+-	 */
+-	rx_itr = FEC_ITR_CLK_SEL;
+-	tx_itr = FEC_ITR_CLK_SEL;
++	rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
++	tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+ 
+-	/* set ICFT and ICTT */
+-	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+-	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
+-	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+-	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
++	if (rx_ictt > 0 && fep->rx_pkts_itr > 1) {
++		/* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
++		rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
++		rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
++		rx_itr |= FEC_ITR_ICTT(rx_ictt);
++	}
+ 
+-	rx_itr |= FEC_ITR_EN;
+-	tx_itr |= FEC_ITR_EN;
++	if (tx_ictt > 0 && fep->tx_pkts_itr > 1) {
++		/* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
++		tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
++		tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
++		tx_itr |= FEC_ITR_ICTT(tx_ictt);
++	}
+ 
+ 	writel(tx_itr, fep->hwp + FEC_TXIC0);
+ 	writel(rx_itr, fep->hwp + FEC_RXIC0);
+diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
+index a99b95c4bcfbc5..ec13a587fe41b4 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
++++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
+@@ -1464,8 +1464,10 @@ static int gfar_get_ts_info(struct net_device *dev,
+ 	if (ptp_node) {
+ 		ptp_dev = of_find_device_by_node(ptp_node);
+ 		of_node_put(ptp_node);
+-		if (ptp_dev)
++		if (ptp_dev) {
+ 			ptp = platform_get_drvdata(ptp_dev);
++			put_device(&ptp_dev->dev);
++		}
+ 	}
+ 
+ 	if (ptp)
+diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
+index 060e0e6749380f..36acbcd8f62a4f 100644
+--- a/drivers/net/ethernet/google/gve/gve_adminq.c
++++ b/drivers/net/ethernet/google/gve/gve_adminq.c
+@@ -564,6 +564,7 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
+ 		break;
+ 	default:
+ 		dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
++		return -EINVAL;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
+index 70dbf80f3bb75b..a2b346d91879e5 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf.h
++++ b/drivers/net/ethernet/intel/idpf/idpf.h
+@@ -369,10 +369,28 @@ struct idpf_rss_data {
+ 	u32 *cached_lut;
+ };
+ 
++/**
++ * struct idpf_q_coalesce - User defined coalescing configuration values for
++ *			   a single queue.
++ * @tx_intr_mode: Dynamic TX ITR or not
++ * @rx_intr_mode: Dynamic RX ITR or not
++ * @tx_coalesce_usecs: TX interrupt throttling rate
++ * @rx_coalesce_usecs: RX interrupt throttling rate
++ *
++ * Used to restore user coalescing configuration after a reset.
++ */
++struct idpf_q_coalesce {
++	u32 tx_intr_mode;
++	u32 rx_intr_mode;
++	u32 tx_coalesce_usecs;
++	u32 rx_coalesce_usecs;
++};
++
+ /**
+  * struct idpf_vport_user_config_data - User defined configuration values for
+  *					each vport.
+  * @rss_data: See struct idpf_rss_data
++ * @q_coalesce: Array of per queue coalescing data
+  * @num_req_tx_qs: Number of user requested TX queues through ethtool
+  * @num_req_rx_qs: Number of user requested RX queues through ethtool
+  * @num_req_txq_desc: Number of user requested TX queue descriptors through
+@@ -386,6 +404,7 @@ struct idpf_rss_data {
+  */
+ struct idpf_vport_user_config_data {
+ 	struct idpf_rss_data rss_data;
++	struct idpf_q_coalesce *q_coalesce;
+ 	u16 num_req_tx_qs;
+ 	u16 num_req_rx_qs;
+ 	u32 num_req_txq_desc;
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+index f72420cf68216c..f0f0ced0d95fed 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+@@ -1089,12 +1089,14 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ /**
+  * __idpf_set_q_coalesce - set ITR values for specific queue
+  * @ec: ethtool structure from user to update ITR settings
++ * @q_coal: per queue coalesce settings
+  * @qv: queue vector for which itr values has to be set
+  * @is_rxq: is queue type rx
+  *
+  * Returns 0 on success, negative otherwise.
+  */
+ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
++				 struct idpf_q_coalesce *q_coal,
+ 				 struct idpf_q_vector *qv, bool is_rxq)
+ {
+ 	u32 use_adaptive_coalesce, coalesce_usecs;
+@@ -1138,20 +1140,25 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ 
+ 	if (is_rxq) {
+ 		qv->rx_itr_value = coalesce_usecs;
++		q_coal->rx_coalesce_usecs = coalesce_usecs;
+ 		if (use_adaptive_coalesce) {
+ 			qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
++			q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ 		} else {
+ 			qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
+-			idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
+-						  false);
++			q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
++			idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
+ 		}
+ 	} else {
+ 		qv->tx_itr_value = coalesce_usecs;
++		q_coal->tx_coalesce_usecs = coalesce_usecs;
+ 		if (use_adaptive_coalesce) {
+ 			qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
++			q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ 		} else {
+ 			qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
+-			idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
++			q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
++			idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
+ 		}
+ 	}
+ 
+@@ -1164,6 +1171,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ /**
+  * idpf_set_q_coalesce - set ITR values for specific queue
+  * @vport: vport associated to the queue that need updating
++ * @q_coal: per queue coalesce settings
+  * @ec: coalesce settings to program the device with
+  * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+  * @is_rxq: is queue type rx
+@@ -1171,6 +1179,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+  * Return 0 on success, and negative on failure
+  */
+ static int idpf_set_q_coalesce(const struct idpf_vport *vport,
++			       struct idpf_q_coalesce *q_coal,
+ 			       const struct ethtool_coalesce *ec,
+ 			       int q_num, bool is_rxq)
+ {
+@@ -1179,7 +1188,7 @@ static int idpf_set_q_coalesce(const struct idpf_vport *vport,
+ 	qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
+ 		      idpf_find_txq_vec(vport, q_num);
+ 
+-	if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
++	if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -1200,9 +1209,13 @@ static int idpf_set_coalesce(struct net_device *netdev,
+ 			     struct netlink_ext_ack *extack)
+ {
+ 	struct idpf_netdev_priv *np = netdev_priv(netdev);
++	struct idpf_vport_user_config_data *user_config;
++	struct idpf_q_coalesce *q_coal;
+ 	struct idpf_vport *vport;
+ 	int i, err = 0;
+ 
++	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
++
+ 	idpf_vport_ctrl_lock(netdev);
+ 	vport = idpf_netdev_to_vport(netdev);
+ 
+@@ -1210,13 +1223,15 @@ static int idpf_set_coalesce(struct net_device *netdev,
+ 		goto unlock_mutex;
+ 
+ 	for (i = 0; i < vport->num_txq; i++) {
+-		err = idpf_set_q_coalesce(vport, ec, i, false);
++		q_coal = &user_config->q_coalesce[i];
++		err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
+ 		if (err)
+ 			goto unlock_mutex;
+ 	}
+ 
+ 	for (i = 0; i < vport->num_rxq; i++) {
+-		err = idpf_set_q_coalesce(vport, ec, i, true);
++		q_coal = &user_config->q_coalesce[i];
++		err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
+ 		if (err)
+ 			goto unlock_mutex;
+ 	}
+@@ -1238,20 +1253,25 @@ static int idpf_set_coalesce(struct net_device *netdev,
+ static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ 				   struct ethtool_coalesce *ec)
+ {
++	struct idpf_netdev_priv *np = netdev_priv(netdev);
++	struct idpf_vport_user_config_data *user_config;
++	struct idpf_q_coalesce *q_coal;
+ 	struct idpf_vport *vport;
+ 	int err;
+ 
+ 	idpf_vport_ctrl_lock(netdev);
+ 	vport = idpf_netdev_to_vport(netdev);
++	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
++	q_coal = &user_config->q_coalesce[q_num];
+ 
+-	err = idpf_set_q_coalesce(vport, ec, q_num, false);
++	err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
+ 	if (err) {
+ 		idpf_vport_ctrl_unlock(netdev);
+ 
+ 		return err;
+ 	}
+ 
+-	err = idpf_set_q_coalesce(vport, ec, q_num, true);
++	err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
+ 
+ 	idpf_vport_ctrl_unlock(netdev);
+ 
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index 746b655337275f..1468a0f0df2bab 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -1089,8 +1089,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
+ 	if (!vport)
+ 		return vport;
+ 
++	num_max_q = max(max_q->max_txq, max_q->max_rxq);
+ 	if (!adapter->vport_config[idx]) {
+ 		struct idpf_vport_config *vport_config;
++		struct idpf_q_coalesce *q_coal;
+ 
+ 		vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
+ 		if (!vport_config) {
+@@ -1099,6 +1101,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
+ 			return NULL;
+ 		}
+ 
++		q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL);
++		if (!q_coal) {
++			kfree(vport_config);
++			kfree(vport);
++
++			return NULL;
++		}
++		for (int i = 0; i < num_max_q; i++) {
++			q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
++			q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
++			q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
++			q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
++		}
++		vport_config->user_config.q_coalesce = q_coal;
++
+ 		adapter->vport_config[idx] = vport_config;
+ 	}
+ 
+@@ -1108,7 +1125,6 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
+ 	vport->default_vport = adapter->num_alloc_vports <
+ 			       idpf_get_default_vports(adapter);
+ 
+-	num_max_q = max(max_q->max_txq, max_q->max_rxq);
+ 	vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
+ 	if (!vport->q_vector_idxs)
+ 		goto free_vport;
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
+index 734da1680c5a48..4c48a1a6aab0d1 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
+@@ -62,6 +62,7 @@ static void idpf_remove(struct pci_dev *pdev)
+ 	destroy_workqueue(adapter->vc_event_wq);
+ 
+ 	for (i = 0; i < adapter->max_vports; i++) {
++		kfree(adapter->vport_config[i]->user_config.q_coalesce);
+ 		kfree(adapter->vport_config[i]);
+ 		adapter->vport_config[i] = NULL;
+ 	}
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index c6c36de58b9d12..4086a6ef352e59 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -4190,9 +4190,13 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
+ int idpf_vport_intr_alloc(struct idpf_vport *vport)
+ {
+ 	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
++	struct idpf_vport_user_config_data *user_config;
+ 	struct idpf_q_vector *q_vector;
++	struct idpf_q_coalesce *q_coal;
+ 	u32 complqs_per_vector, v_idx;
++	u16 idx = vport->idx;
+ 
++	user_config = &vport->adapter->vport_config[idx]->user_config;
+ 	vport->q_vectors = kcalloc(vport->num_q_vectors,
+ 				   sizeof(struct idpf_q_vector), GFP_KERNEL);
+ 	if (!vport->q_vectors)
+@@ -4210,14 +4214,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
+ 
+ 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ 		q_vector = &vport->q_vectors[v_idx];
++		q_coal = &user_config->q_coalesce[v_idx];
+ 		q_vector->vport = vport;
+ 
+-		q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
+-		q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
++		q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
++		q_vector->tx_intr_mode = q_coal->tx_intr_mode;
+ 		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
+ 
+-		q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
+-		q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
++		q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
++		q_vector->rx_intr_mode = q_coal->rx_intr_mode;
+ 		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
+ 
+ 		if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
+index e212a4ba92751f..499ca700012599 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -2794,7 +2794,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ 	if (!pdev)
+ 		goto err_of_node_put;
+ 
+-	get_device(&pdev->dev);
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0)
+ 		goto err_put_device;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+index f0744a45db92c3..4e461cb03b83dd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+@@ -374,7 +374,7 @@ void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_que
+ void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
+ {
+ 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
+-	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
++	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 
+ 	if (!qdisc)
+ 		return;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 3d3f936779f7d9..d6bea71528057b 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -3526,10 +3526,6 @@ void ionic_lif_free(struct ionic_lif *lif)
+ 	lif->info = NULL;
+ 	lif->info_pa = 0;
+ 
+-	/* unmap doorbell page */
+-	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
+-	lif->kern_dbpage = NULL;
+-
+ 	mutex_destroy(&lif->config_lock);
+ 	mutex_destroy(&lif->queue_lock);
+ 
+@@ -3555,6 +3551,9 @@ void ionic_lif_deinit(struct ionic_lif *lif)
+ 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
+ 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ 
++	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
++	lif->kern_dbpage = NULL;
++
+ 	ionic_lif_reset(lif);
+ }
+ 
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 2a1c43316f462b..d8c9fe1d98c475 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -621,7 +621,8 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
+ 
+ static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
+ {
+-	u32 val, cap, ret = 0;
++	u32 val, cap;
++	int ret = 0;
+ 
+ 	mutex_lock(&iep->ptp_clk_mutex);
+ 
+@@ -685,10 +686,16 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
+ 	struct platform_device *pdev;
+ 	struct device_node *iep_np;
+ 	struct icss_iep *iep;
++	int ret;
+ 
+ 	iep_np = of_parse_phandle(np, "ti,iep", idx);
+-	if (!iep_np || !of_device_is_available(iep_np))
++	if (!iep_np)
++		return ERR_PTR(-ENODEV);
++
++	if (!of_device_is_available(iep_np)) {
++		of_node_put(iep_np);
+ 		return ERR_PTR(-ENODEV);
++	}
+ 
+ 	pdev = of_find_device_by_node(iep_np);
+ 	of_node_put(iep_np);
+@@ -698,21 +705,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
+ 		return ERR_PTR(-EPROBE_DEFER);
+ 
+ 	iep = platform_get_drvdata(pdev);
+-	if (!iep)
+-		return ERR_PTR(-EPROBE_DEFER);
++	if (!iep) {
++		ret = -EPROBE_DEFER;
++		goto err_put_pdev;
++	}
+ 
+ 	device_lock(iep->dev);
+ 	if (iep->client_np) {
+ 		device_unlock(iep->dev);
+ 		dev_err(iep->dev, "IEP is already acquired by %s",
+ 			iep->client_np->name);
+-		return ERR_PTR(-EBUSY);
++		ret = -EBUSY;
++		goto err_put_pdev;
+ 	}
+ 	iep->client_np = np;
+ 	device_unlock(iep->dev);
+-	get_device(iep->dev);
+ 
+ 	return iep;
++
++err_put_pdev:
++	put_device(&pdev->dev);
++
++	return ERR_PTR(ret);
+ }
+ EXPORT_SYMBOL_GPL(icss_iep_get_idx);
+ 
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index 0769e1ade30b4c..ddbc4624ae8876 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -50,6 +50,8 @@
+ /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
+ #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
+ 
++static void emac_adjust_link(struct net_device *ndev);
++
+ static int emac_get_tx_ts(struct prueth_emac *emac,
+ 			  struct emac_tx_ts_response *rsp)
+ {
+@@ -266,6 +268,10 @@ static int prueth_emac_common_start(struct prueth *prueth)
+ 		ret = icssg_config(prueth, emac, slice);
+ 		if (ret)
+ 			goto disable_class;
++
++		mutex_lock(&emac->ndev->phydev->lock);
++		emac_adjust_link(emac->ndev);
++		mutex_unlock(&emac->ndev->phydev->lock);
+ 	}
+ 
+ 	ret = prueth_emac_start(prueth);
+diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
+index a4963766fd996b..0733493cfa6f88 100644
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -1061,6 +1061,7 @@ struct net_device_context {
+ 	struct net_device __rcu *vf_netdev;
+ 	struct netvsc_vf_pcpu_stats __percpu *vf_stats;
+ 	struct delayed_work vf_takeover;
++	struct delayed_work vfns_work;
+ 
+ 	/* 1: allocated, serial number is valid. 0: not allocated */
+ 	u32 vf_alloc;
+@@ -1075,6 +1076,8 @@ struct net_device_context {
+ 	struct netvsc_device_info *saved_netvsc_dev_info;
+ };
+ 
++void netvsc_vfns_work(struct work_struct *w);
++
+ /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
+  * packets. We can use ethtool to change UDP hash level when necessary.
+  */
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 4376e116eb9f0f..d6fe8b5184a99f 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2516,6 +2516,7 @@ static int netvsc_probe(struct hv_device *dev,
+ 	spin_lock_init(&net_device_ctx->lock);
+ 	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
+ 	INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
++	INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work);
+ 
+ 	net_device_ctx->vf_stats
+ 		= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
+@@ -2658,6 +2659,8 @@ static void netvsc_remove(struct hv_device *dev)
+ 	cancel_delayed_work_sync(&ndev_ctx->dwork);
+ 
+ 	rtnl_lock();
++	cancel_delayed_work_sync(&ndev_ctx->vfns_work);
++
+ 	nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ 	if (nvdev) {
+ 		cancel_work_sync(&nvdev->subchan_work);
+@@ -2699,6 +2702,7 @@ static int netvsc_suspend(struct hv_device *dev)
+ 	cancel_delayed_work_sync(&ndev_ctx->dwork);
+ 
+ 	rtnl_lock();
++	cancel_delayed_work_sync(&ndev_ctx->vfns_work);
+ 
+ 	nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ 	if (nvdev == NULL) {
+@@ -2792,6 +2796,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev)
+ 	}
+ }
+ 
++void netvsc_vfns_work(struct work_struct *w)
++{
++	struct net_device_context *ndev_ctx =
++		container_of(w, struct net_device_context, vfns_work.work);
++	struct net_device *ndev;
++
++	if (!rtnl_trylock()) {
++		schedule_delayed_work(&ndev_ctx->vfns_work, 1);
++		return;
++	}
++
++	ndev = hv_get_drvdata(ndev_ctx->device_ctx);
++	if (!ndev)
++		goto out;
++
++	netvsc_event_set_vf_ns(ndev);
++
++out:
++	rtnl_unlock();
++}
++
+ /*
+  * On Hyper-V, every VF interface is matched with a corresponding
+  * synthetic interface. The synthetic interface is presented first
+@@ -2802,10 +2827,12 @@ static int netvsc_netdev_event(struct notifier_block *this,
+ 			       unsigned long event, void *ptr)
+ {
+ 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
++	struct net_device_context *ndev_ctx;
+ 	int ret = 0;
+ 
+ 	if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
+-		netvsc_event_set_vf_ns(event_dev);
++		ndev_ctx = netdev_priv(event_dev);
++		schedule_delayed_work(&ndev_ctx->vfns_work, 0);
+ 		return NOTIFY_DONE;
+ 	}
+ 
+diff --git a/drivers/net/pcs/pcs-xpcs-plat.c b/drivers/net/pcs/pcs-xpcs-plat.c
+index 629315f1e57cb3..9dcaf7a66113ed 100644
+--- a/drivers/net/pcs/pcs-xpcs-plat.c
++++ b/drivers/net/pcs/pcs-xpcs-plat.c
+@@ -66,7 +66,7 @@ static int xpcs_mmio_read_reg_indirect(struct dw_xpcs_plat *pxpcs,
+ 	switch (pxpcs->reg_width) {
+ 	case 4:
+ 		writel(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 2));
+-		ret = readl(pxpcs->reg_base + (ofs << 2));
++		ret = readl(pxpcs->reg_base + (ofs << 2)) & 0xffff;
+ 		break;
+ 	default:
+ 		writew(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 1));
+@@ -124,7 +124,7 @@ static int xpcs_mmio_read_reg_direct(struct dw_xpcs_plat *pxpcs,
+ 
+ 	switch (pxpcs->reg_width) {
+ 	case 4:
+-		ret = readl(pxpcs->reg_base + (csr << 2));
++		ret = readl(pxpcs->reg_base + (csr << 2)) & 0xffff;
+ 		break;
+ 	default:
+ 		ret = readw(pxpcs->reg_base + (csr << 1));
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index d2a9cf3fde5ace..9260c822e46758 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -655,7 +655,7 @@ static int bcm5481x_read_abilities(struct phy_device *phydev)
+ {
+ 	struct device_node *np = phydev->mdio.dev.of_node;
+ 	struct bcm54xx_phy_priv *priv = phydev->priv;
+-	int i, val, err;
++	int i, val, err, aneg;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(bcm54811_linkmodes); i++)
+ 		linkmode_clear_bit(bcm54811_linkmodes[i], phydev->supported);
+@@ -676,9 +676,19 @@ static int bcm5481x_read_abilities(struct phy_device *phydev)
+ 		if (val < 0)
+ 			return val;
+ 
++		/* BCM54811 is not capable of LDS but the corresponding bit
++		 * in LRESR is set to 1 and marked "Ignore" in the datasheet.
++		 * So we must read the bcm54811 as unable to auto-negotiate
++		 * in BroadR-Reach mode.
++		 */
++		if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
++			aneg = 0;
++		else
++			aneg = val & LRESR_LDSABILITY;
++
+ 		linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ 				 phydev->supported,
+-				 val & LRESR_LDSABILITY);
++				 aneg);
+ 		linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ 				 phydev->supported,
+ 				 val & LRESR_100_1PAIR);
+@@ -735,8 +745,15 @@ static int bcm54811_config_aneg(struct phy_device *phydev)
+ 
+ 	/* Aneg firstly. */
+ 	if (priv->brr_mode) {
+-		/* BCM54811 is only capable of autonegotiation in IEEE mode */
+-		phydev->autoneg = 0;
++		/* BCM54811 is only capable of autonegotiation in IEEE mode.
++		 * In BroadR-Reach mode, disable the Long Distance Signaling,
++		 * the BRR mode autoneg as supported in other Broadcom PHYs.
++		 * This bit is marked as "Reserved" and "Default 1, must be
++		 *  written to 0 after every device reset" in the datasheet.
++		 */
++		ret = phy_modify(phydev, MII_BCM54XX_LRECR, LRECR_LDSEN, 0);
++		if (ret < 0)
++			return ret;
+ 		ret = bcm_config_lre_aneg(phydev, false);
+ 	} else {
+ 		ret = genphy_config_aneg(phydev);
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 64926240b0071d..92e9eb4146d9b2 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -470,6 +470,8 @@ static const struct kszphy_type ksz8051_type = {
+ 
+ static const struct kszphy_type ksz8081_type = {
+ 	.led_mode_reg		= MII_KSZPHY_CTRL_2,
++	.cable_diag_reg		= KSZ8081_LMD,
++	.pair_mask		= KSZPHY_WIRE_PAIR_MASK,
+ 	.has_broadcast_disable	= true,
+ 	.has_nand_tree_disable	= true,
+ 	.has_rmii_ref_clk_sel	= true,
+@@ -5392,6 +5394,14 @@ static int lan8841_suspend(struct phy_device *phydev)
+ 	return kszphy_generic_suspend(phydev);
+ }
+ 
++static int ksz9131_resume(struct phy_device *phydev)
++{
++	if (phydev->suspended && phy_interface_is_rgmii(phydev))
++		ksz9131_config_rgmii_delay(phydev);
++
++	return kszphy_resume(phydev);
++}
++
+ static struct phy_driver ksphy_driver[] = {
+ {
+ 	.phy_id		= PHY_ID_KS8737,
+@@ -5637,7 +5647,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	.get_strings	= kszphy_get_strings,
+ 	.get_stats	= kszphy_get_stats,
+ 	.suspend	= kszphy_suspend,
+-	.resume		= kszphy_resume,
++	.resume		= ksz9131_resume,
+ 	.cable_test_start	= ksz9x31_cable_test_start,
+ 	.cable_test_get_status	= ksz9x31_cable_test_get_status,
+ 	.get_features	= ksz9477_get_features,
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index 6a43f6d6e85cb3..de66b621eb9927 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -784,6 +784,7 @@ static struct phy_driver smsc_phy_driver[] = {
+ 
+ 	/* PHY_BASIC_FEATURES */
+ 
++	.flags		= PHY_RST_AFTER_CLK_EN,
+ 	.probe		= smsc_phy_probe,
+ 
+ 	/* basic functions */
+diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c
+index 0a53ec293d0408..dcaa62377808c2 100644
+--- a/drivers/net/thunderbolt/main.c
++++ b/drivers/net/thunderbolt/main.c
+@@ -396,9 +396,9 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
+ 
+ 		ret = tb_xdomain_disable_paths(net->xd,
+ 					       net->local_transmit_path,
+-					       net->rx_ring.ring->hop,
++					       net->tx_ring.ring->hop,
+ 					       net->remote_transmit_path,
+-					       net->tx_ring.ring->hop);
++					       net->rx_ring.ring->hop);
+ 		if (ret)
+ 			netdev_warn(net->dev, "failed to disable DMA paths\n");
+ 
+@@ -662,9 +662,9 @@ static void tbnet_connected_work(struct work_struct *work)
+ 		goto err_free_rx_buffers;
+ 
+ 	ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
+-				      net->rx_ring.ring->hop,
++				      net->tx_ring.ring->hop,
+ 				      net->remote_transmit_path,
+-				      net->tx_ring.ring->hop);
++				      net->rx_ring.ring->hop);
+ 	if (ret) {
+ 		netdev_err(net->dev, "failed to enable DMA paths\n");
+ 		goto err_free_tx_buffers;
+@@ -924,8 +924,12 @@ static int tbnet_open(struct net_device *dev)
+ 
+ 	netif_carrier_off(dev);
+ 
+-	ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
+-				RING_FLAG_FRAME);
++	flags = RING_FLAG_FRAME;
++	/* Only enable full E2E if the other end supports it too */
++	if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
++		flags |= RING_FLAG_E2E;
++
++	ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags);
+ 	if (!ring) {
+ 		netdev_err(dev, "failed to allocate Tx ring\n");
+ 		return -ENOMEM;
+@@ -944,11 +948,6 @@ static int tbnet_open(struct net_device *dev)
+ 	sof_mask = BIT(TBIP_PDF_FRAME_START);
+ 	eof_mask = BIT(TBIP_PDF_FRAME_END);
+ 
+-	flags = RING_FLAG_FRAME;
+-	/* Only enable full E2E if the other end supports it too */
+-	if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
+-		flags |= RING_FLAG_E2E;
+-
+ 	ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
+ 				net->tx_ring.ring->hop, sof_mask,
+ 				eof_mask, tbnet_start_poll, net);
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 9b0318fb50b55c..d9f5942ccc447b 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -676,6 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ 	priv->mdio->read = &asix_mdio_bus_read;
+ 	priv->mdio->write = &asix_mdio_bus_write;
+ 	priv->mdio->name = "Asix MDIO Bus";
++	priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
+ 	/* mii bus name is usb-<usb bus number>-<usb device number> */
+ 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ 		 dev->udev->bus->busnum, dev->udev->devnum);
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index d5c47a2a62dcc3..4abfdfcf0e289c 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -893,6 +893,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
+ 		}
+ 	}
+ 
++	if (ctx->func_desc)
++		ctx->filtering_supported = !!(ctx->func_desc->bmNetworkCapabilities
++			& USB_CDC_NCM_NCAP_ETH_FILTER);
++
+ 	iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
+ 
+ 	/* Device-specific flags */
+@@ -1899,6 +1903,14 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
+ 	}
+ }
+ 
++static void cdc_ncm_update_filter(struct usbnet *dev)
++{
++	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
++
++	if (ctx->filtering_supported)
++		usbnet_cdc_update_filter(dev);
++}
++
+ static const struct driver_info cdc_ncm_info = {
+ 	.description = "CDC NCM (NO ZLP)",
+ 	.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+@@ -1909,7 +1921,7 @@ static const struct driver_info cdc_ncm_info = {
+ 	.status = cdc_ncm_status,
+ 	.rx_fixup = cdc_ncm_rx_fixup,
+ 	.tx_fixup = cdc_ncm_tx_fixup,
+-	.set_rx_mode = usbnet_cdc_update_filter,
++	.set_rx_mode = cdc_ncm_update_filter,
+ };
+ 
+ /* Same as cdc_ncm_info, but with FLAG_SEND_ZLP  */
+@@ -1923,7 +1935,7 @@ static const struct driver_info cdc_ncm_zlp_info = {
+ 	.status = cdc_ncm_status,
+ 	.rx_fixup = cdc_ncm_rx_fixup,
+ 	.tx_fixup = cdc_ncm_tx_fixup,
+-	.set_rx_mode = usbnet_cdc_update_filter,
++	.set_rx_mode = cdc_ncm_update_filter,
+ };
+ 
+ /* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
+@@ -1965,7 +1977,7 @@ static const struct driver_info wwan_info = {
+ 	.status = cdc_ncm_status,
+ 	.rx_fixup = cdc_ncm_rx_fixup,
+ 	.tx_fixup = cdc_ncm_tx_fixup,
+-	.set_rx_mode = usbnet_cdc_update_filter,
++	.set_rx_mode = cdc_ncm_update_filter,
+ };
+ 
+ /* Same as wwan_info, but with FLAG_NOARP  */
+@@ -1979,7 +1991,7 @@ static const struct driver_info wwan_noarp_info = {
+ 	.status = cdc_ncm_status,
+ 	.rx_fixup = cdc_ncm_rx_fixup,
+ 	.tx_fixup = cdc_ncm_tx_fixup,
+-	.set_rx_mode = usbnet_cdc_update_filter,
++	.set_rx_mode = cdc_ncm_update_filter,
+ };
+ 
+ static const struct usb_device_id cdc_devs[] = {
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index b3294287bce1f7..7b6812909ab316 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -2484,12 +2484,50 @@ static int ath10k_init_hw_params(struct ath10k *ar)
+ 	return 0;
+ }
+ 
++static bool ath10k_core_needs_recovery(struct ath10k *ar)
++{
++	long time_left;
++
++	/* Sometimes the recovery will fail and then the next all recovery fail,
++	 * so avoid infinite recovery.
++	 */
++	if (atomic_read(&ar->fail_cont_count) >= ATH10K_RECOVERY_MAX_FAIL_COUNT) {
++		ath10k_err(ar, "consecutive fail %d times, will shutdown driver!",
++			   atomic_read(&ar->fail_cont_count));
++		ar->state = ATH10K_STATE_WEDGED;
++		return false;
++	}
++
++	ath10k_dbg(ar, ATH10K_DBG_BOOT, "total recovery count: %d", ++ar->recovery_count);
++
++	if (atomic_read(&ar->pending_recovery)) {
++		/* Sometimes it happened another recovery work before the previous one
++		 * completed, then the second recovery work will destroy the previous
++		 * one, thus below is to avoid that.
++		 */
++		time_left = wait_for_completion_timeout(&ar->driver_recovery,
++							ATH10K_RECOVERY_TIMEOUT_HZ);
++		if (time_left) {
++			ath10k_warn(ar, "previous recovery succeeded, skip this!\n");
++			return false;
++		}
++
++		/* Record the continuous recovery fail count when recovery failed. */
++		atomic_inc(&ar->fail_cont_count);
++
++		/* Avoid having multiple recoveries at the same time. */
++		return false;
++	}
++
++	atomic_inc(&ar->pending_recovery);
++
++	return true;
++}
++
+ void ath10k_core_start_recovery(struct ath10k *ar)
+ {
+-	if (test_and_set_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags)) {
+-		ath10k_warn(ar, "already restarting\n");
++	if (!ath10k_core_needs_recovery(ar))
+ 		return;
+-	}
+ 
+ 	queue_work(ar->workqueue, &ar->restart_work);
+ }
+@@ -2525,6 +2563,8 @@ static void ath10k_core_restart(struct work_struct *work)
+ 	struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ 	int ret;
+ 
++	reinit_completion(&ar->driver_recovery);
++
+ 	set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+ 
+ 	/* Place a barrier to make sure the compiler doesn't reorder
+@@ -2589,8 +2629,6 @@ static void ath10k_core_restart(struct work_struct *work)
+ 	if (ret)
+ 		ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
+ 			    ret);
+-
+-	complete(&ar->driver_recovery);
+ }
+ 
+ static void ath10k_core_set_coverage_class_work(struct work_struct *work)
+diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
+index 446dca74f06a63..85e16c945b5c20 100644
+--- a/drivers/net/wireless/ath/ath10k/core.h
++++ b/drivers/net/wireless/ath/ath10k/core.h
+@@ -4,6 +4,7 @@
+  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+  */
+ 
+ #ifndef _CORE_H_
+@@ -87,6 +88,8 @@
+ 				  IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
+ #define ATH10K_ITER_RESUME_FLAGS (IEEE80211_IFACE_ITER_RESUME_ALL |\
+ 				  IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
++#define ATH10K_RECOVERY_TIMEOUT_HZ			(5 * HZ)
++#define ATH10K_RECOVERY_MAX_FAIL_COUNT			4
+ 
+ struct ath10k;
+ 
+@@ -865,9 +868,6 @@ enum ath10k_dev_flags {
+ 	/* Per Station statistics service */
+ 	ATH10K_FLAG_PEER_STATS,
+ 
+-	/* Indicates that ath10k device is during recovery process and not complete */
+-	ATH10K_FLAG_RESTARTING,
+-
+ 	/* protected by conf_mutex */
+ 	ATH10K_FLAG_NAPI_ENABLED,
+ };
+@@ -1211,6 +1211,11 @@ struct ath10k {
+ 	struct work_struct bundle_tx_work;
+ 	struct work_struct tx_complete_work;
+ 
++	atomic_t pending_recovery;
++	unsigned int recovery_count;
++	/* continuous recovery fail count */
++	atomic_t fail_cont_count;
++
+ 	/* cycle count is reported twice for each visited channel during scan.
+ 	 * access protected by data_lock
+ 	 */
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 6b467696bc982c..6493731333abb6 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -8125,7 +8125,12 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ 		ath10k_info(ar, "device successfully recovered\n");
+ 		ar->state = ATH10K_STATE_ON;
+ 		ieee80211_wake_queues(ar->hw);
+-		clear_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags);
++
++		/* Clear recovery state. */
++		complete(&ar->driver_recovery);
++		atomic_set(&ar->fail_cont_count, 0);
++		atomic_set(&ar->pending_recovery, 0);
++
+ 		if (ar->hw_params.hw_restart_disconnect) {
+ 			list_for_each_entry(arvif, &ar->arvifs, list) {
+ 				if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 5e061f7525a6bd..09066e6aca4025 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -4,6 +4,7 @@
+  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+  */
+ 
+ #include <linux/skbuff.h>
+@@ -1941,6 +1942,11 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
+ 	}
+ 
+ 	wait_event_timeout(ar->wmi.tx_credits_wq, ({
++		if (ar->state == ATH10K_STATE_WEDGED) {
++			ret = -ESHUTDOWN;
++			ath10k_dbg(ar, ATH10K_DBG_WMI,
++				   "drop wmi command %d, hardware is wedged\n", cmd_id);
++		}
+ 		/* try to send pending beacons first. they take priority */
+ 		ath10k_wmi_tx_beacons_nowait(ar);
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
+index 217eb57663f058..3244f7c3ca4f1a 100644
+--- a/drivers/net/wireless/ath/ath12k/dp.c
++++ b/drivers/net/wireless/ath/ath12k/dp.c
+@@ -79,6 +79,7 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
+ 	ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
+ 	if (ret) {
+ 		ath12k_warn(ab, "failed to setup rx defrag context\n");
++		tid--;
+ 		goto peer_clean;
+ 	}
+ 
+@@ -96,7 +97,7 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
+ 		return -ENOENT;
+ 	}
+ 
+-	for (; tid >= 0; tid--)
++	for (tid--; tid >= 0; tid--)
+ 		ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
+ 
+ 	spin_unlock_bh(&ab->base_lock);
+diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
+index e3eb22bb9e1cb9..057ef2d282b256 100644
+--- a/drivers/net/wireless/ath/ath12k/hw.c
++++ b/drivers/net/wireless/ath/ath12k/hw.c
+@@ -1084,7 +1084,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
+ 		.download_calib = true,
+ 		.supports_suspend = false,
+ 		.tcl_ring_retry = true,
+-		.reoq_lut_support = false,
++		.reoq_lut_support = true,
+ 		.supports_shadow_regs = false,
+ 
+ 		.num_tcl_banks = 48,
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 4ca684278c3672..4b3fbec397ac0e 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -9218,6 +9218,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
+ 
+ 	wiphy->mbssid_max_interfaces = mbssid_max_interfaces;
+ 	wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
++	ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ 
+ 	if (is_6ghz) {
+ 		wiphy_ext_feature_set(wiphy,
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index d5892e17494f7d..5c5fc2b7642f61 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -5229,6 +5229,11 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
+ 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ 
+ 	info = IEEE80211_SKB_CB(msdu);
++	memset(&info->status, 0, sizeof(info->status));
++
++	/* skip tx rate update from ieee80211_status*/
++	info->status.rates[0].idx = -1;
++
+ 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
+ 		info->flags |= IEEE80211_TX_STAT_ACK;
+ 
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+index 05c4af41bdb960..a94cf27ffe4b0c 100644
+--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+@@ -1575,8 +1575,11 @@ il4965_tx_cmd_build_rate(struct il_priv *il,
+ 	    || rate_idx > RATE_COUNT_LEGACY)
+ 		rate_idx = rate_lowest_index(&il->bands[info->band], sta);
+ 	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+-	if (info->band == NL80211_BAND_5GHZ)
++	if (info->band == NL80211_BAND_5GHZ) {
+ 		rate_idx += IL_FIRST_OFDM_RATE;
++		if (rate_idx > IL_LAST_OFDM_RATE)
++			rate_idx = IL_LAST_OFDM_RATE;
++	}
+ 	/* Get PLCP rate for tx_cmd->rate_n_flags */
+ 	rate_plcp = il_rates[rate_idx].plcp;
+ 	/* Zero out flags for this packet */
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+index 8879e668ef0da0..ed964103281ed5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+@@ -2899,7 +2899,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
+ 		/* Repeat initial/next rate.
+ 		 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+ 		 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+-		while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
++		while (repeat_rate > 0 && index < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
+ 			if (is_legacy(tbl_type.lq_type)) {
+ 				if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ 					ant_toggle_cnt++;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index cd284767ff4bad..385755af82374a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -2955,6 +2955,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ 	struct iwl_fw_dump_desc *desc;
+ 	unsigned int delay = 0;
+ 	bool monitor_only = false;
++	int ret;
+ 
+ 	if (trigger) {
+ 		u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
+@@ -2985,7 +2986,11 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ 	desc->trig_desc.type = cpu_to_le32(trig);
+ 	memcpy(desc->trig_desc.data, str, len);
+ 
+-	return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
++	ret = iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
++	if (ret)
++		kfree(desc);
++
++	return ret;
+ }
+ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 6a4300c01d41d1..7e258dcdf50104 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2375,6 +2375,7 @@ static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status,
+ 
+ 	status->gtk[0].len = data->key_len;
+ 	status->gtk[0].flags = data->key_flags;
++	status->gtk[0].id = status->gtk[0].flags & IWL_WOWLAN_GTK_IDX_MASK;
+ 
+ 	memcpy(status->gtk[0].key, data->key, sizeof(data->key));
+ 
+@@ -2686,6 +2687,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
+ 		 * currently used key.
+ 		 */
+ 		status->gtk[0].flags = v6->gtk.key_index | BIT(7);
++		status->gtk[0].id = v6->gtk.key_index;
+ 	} else if (notif_ver == 7) {
+ 		struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 0b52d77f578375..64bd5d10765dc6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -854,10 +854,15 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+ 	 * already ahead and it will be dropped.
+ 	 * If the last sub-frame is not on this queue - we will get frame
+ 	 * release notification with up to date NSSN.
++	 * If this is the first frame that is stored in the buffer, the head_sn
++	 * may be outdated. Update it based on the last NSSN to make sure it
++	 * will be released when the frame release notification arrives.
+ 	 */
+ 	if (!amsdu || last_subframe)
+ 		iwl_mvm_release_frames(mvm, sta, napi, baid_data,
+ 				       buffer, nssn);
++	else if (buffer->num_stored == 1)
++		buffer->head_sn = nssn;
+ 
+ 	spin_unlock_bh(&buffer->lock);
+ 	return true;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index ddcbd80a49fb2b..853b95709a7927 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -835,7 +835,7 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+ 				     int n_channels)
+ {
+ 	return ((n_ssids <= PROBE_OPTION_MAX) &&
+-		(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
++		(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &&
+ 		(ies->common_ie_len +
+ 		 ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
+ 		 ies->len[NL80211_BAND_6GHZ] <=
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 87d0dd040001c5..3398c25cb03c0d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -2071,16 +2071,21 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
+ {
+ 	int ret;
+ 
+-	/* make sure fw is download state */
+-	if (mt7915_firmware_state(dev, false)) {
+-		/* restart firmware once */
+-		mt76_connac_mcu_restart(&dev->mt76);
+-		ret = mt7915_firmware_state(dev, false);
+-		if (ret) {
+-			dev_err(dev->mt76.dev,
+-				"Firmware is not ready for download\n");
+-			return ret;
+-		}
++	/* Release Semaphore if taken by previous failed attempt */
++	ret = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
++	if (ret != PATCH_REL_SEM_SUCCESS) {
++		dev_err(dev->mt76.dev, "Could not release semaphore\n");
++		/* Continue anyways */
++	}
++
++	/* Always restart MCU firmware */
++	mt76_connac_mcu_restart(&dev->mt76);
++
++	/* Check if MCU is ready */
++	ret = mt7915_firmware_state(dev, false);
++	if (ret) {
++		dev_err(dev->mt76.dev, "Firmware did not enter download state\n");
++		return ret;
+ 	}
+ 
+ 	ret = mt76_connac2_load_patch(&dev->mt76, fw_name_var(dev, ROM_PATCH));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 898f597f70a96d..d080469264cf89 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -572,8 +572,11 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
+ 		dma_map_single(&rtlpci->pdev->dev, skb_tail_pointer(skb),
+ 			       rtlpci->rxbuffersize, DMA_FROM_DEVICE);
+ 	bufferaddress = *((dma_addr_t *)skb->cb);
+-	if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress))
++	if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress)) {
++		if (!new_skb)
++			kfree_skb(skb);
+ 		return 0;
++	}
+ 	rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
+ 	if (rtlpriv->use_new_trx_flow) {
+ 		/* skb->cb may be 64 bit address */
+@@ -802,13 +805,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 		skb = new_skb;
+ no_new:
+ 		if (rtlpriv->use_new_trx_flow) {
+-			_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
+-						 rxring_idx,
+-						 rtlpci->rx_ring[rxring_idx].idx);
++			if (!_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
++						      rxring_idx,
++						      rtlpci->rx_ring[rxring_idx].idx)) {
++				if (new_skb)
++					dev_kfree_skb_any(skb);
++			}
+ 		} else {
+-			_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+-						 rxring_idx,
+-						 rtlpci->rx_ring[rxring_idx].idx);
++			if (!_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
++						      rxring_idx,
++						      rtlpci->rx_ring[rxring_idx].idx)) {
++				if (new_skb)
++					dev_kfree_skb_any(skb);
++			}
+ 			if (rtlpci->rx_ring[rxring_idx].idx ==
+ 			    rtlpci->rxringcount - 1)
+ 				rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
+diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
+index 4df4e04c3e67d7..559aa60e3df042 100644
+--- a/drivers/net/wireless/realtek/rtw89/chan.c
++++ b/drivers/net/wireless/realtek/rtw89/chan.c
+@@ -2682,6 +2682,9 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ 	rtwvif_link->chanctx_assigned = true;
+ 	cfg->ref_count++;
+ 
++	if (rtwdev->scanning)
++		rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
++
+ 	if (list_empty(&rtwvif->mgnt_entry))
+ 		list_add_tail(&rtwvif->mgnt_entry, &mgnt->active_list);
+ 
+@@ -2715,6 +2718,9 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ 	rtwvif_link->chanctx_assigned = false;
+ 	cfg->ref_count--;
+ 
++	if (rtwdev->scanning)
++		rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
++
+ 	if (!rtw89_vif_is_active_role(rtwvif))
+ 		list_del_init(&rtwvif->mgnt_entry);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index 7dbce3b10a7de4..10a3a66a9981d8 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -5934,13 +5934,18 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
+ 	const u32 *c2h_reg = chip->c2h_regs;
+-	u32 ret;
++	u32 ret, timeout;
+ 	u8 i, val;
+ 
+ 	info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
+ 
++	if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
++		timeout = RTW89_C2H_TIMEOUT_USB;
++	else
++		timeout = RTW89_C2H_TIMEOUT;
++
+ 	ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
+-				       RTW89_C2H_TIMEOUT, false, rtwdev,
++				       timeout, false, rtwdev,
+ 				       chip->c2h_ctrl_reg);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "c2h reg timeout\n");
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
+index 502ece540b9dca..2981d6eeb678d0 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.h
++++ b/drivers/net/wireless/realtek/rtw89/fw.h
+@@ -97,6 +97,8 @@ struct rtw89_h2creg_sch_tx_en {
+ #define RTW89_C2HREG_HDR_LEN 2
+ #define RTW89_H2CREG_HDR_LEN 2
+ #define RTW89_C2H_TIMEOUT 1000000
++#define RTW89_C2H_TIMEOUT_USB 4000
++
+ struct rtw89_mac_c2h_info {
+ 	u8 id;
+ 	u8 content_len;
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index 2188bca899e392..8b7ca63af7ed0a 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -1441,6 +1441,23 @@ void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
+ 	rtw89_mac_send_rpwm(rtwdev, state, true);
+ }
+ 
++static void rtw89_mac_power_switch_boot_mode(struct rtw89_dev *rtwdev)
++{
++	u32 boot_mode;
++
++	if (rtwdev->hci.type != RTW89_HCI_TYPE_USB)
++		return;
++
++	boot_mode = rtw89_read32_mask(rtwdev, R_AX_GPIO_MUXCFG, B_AX_BOOT_MODE);
++	if (!boot_mode)
++		return;
++
++	rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
++	rtw89_write32_clr(rtwdev, R_AX_SYS_STATUS1, B_AX_AUTO_WLPON);
++	rtw89_write32_clr(rtwdev, R_AX_GPIO_MUXCFG, B_AX_BOOT_MODE);
++	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
++}
++
+ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
+ {
+ #define PWR_ACT 1
+@@ -1450,6 +1467,8 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
+ 	int ret;
+ 	u8 val;
+ 
++	rtw89_mac_power_switch_boot_mode(rtwdev);
++
+ 	if (on) {
+ 		cfg_seq = chip->pwr_on_seq;
+ 		cfg_func = chip->ops->pwr_on_func;
+diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
+index 9fbcc7fee290fa..7ec516979fe2b9 100644
+--- a/drivers/net/wireless/realtek/rtw89/reg.h
++++ b/drivers/net/wireless/realtek/rtw89/reg.h
+@@ -182,6 +182,7 @@
+ 
+ #define R_AX_SYS_STATUS1 0x00F4
+ #define B_AX_SEL_0XC0_MASK GENMASK(17, 16)
++#define B_AX_AUTO_WLPON BIT(10)
+ #define B_AX_PAD_HCI_SEL_V2_MASK GENMASK(5, 3)
+ #define MAC_AX_HCI_SEL_SDIO_UART 0
+ #define MAC_AX_HCI_SEL_MULTI_USB 1
+diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
+index 3e81fd974ec180..fdb715dc175c1e 100644
+--- a/drivers/net/wireless/realtek/rtw89/wow.c
++++ b/drivers/net/wireless/realtek/rtw89/wow.c
+@@ -1415,6 +1415,8 @@ static void rtw89_fw_release_pno_pkt_list(struct rtw89_dev *rtwdev,
+ static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ 					   struct rtw89_vif_link *rtwvif_link)
+ {
++	static const u8 basic_rate_ie[] = {WLAN_EID_SUPP_RATES, 0x08,
++		 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c};
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ 	u8 num = nd_config->n_match_sets, i;
+@@ -1426,10 +1428,11 @@ static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ 		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
+ 					     nd_config->match_sets[i].ssid.ssid,
+ 					     nd_config->match_sets[i].ssid.ssid_len,
+-					     nd_config->ie_len);
++					     nd_config->ie_len + sizeof(basic_rate_ie));
+ 		if (!skb)
+ 			return -ENOMEM;
+ 
++		skb_put_data(skb, basic_rate_ie, sizeof(basic_rate_ie));
+ 		skb_put_data(skb, nd_config->ie, nd_config->ie_len);
+ 
+ 		info = kzalloc(sizeof(*info), GFP_KERNEL);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 809b407cece15e..2042399e890aa3 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -637,8 +637,6 @@ static int xennet_xdp_xmit_one(struct net_device *dev,
+ 	tx_stats->packets++;
+ 	u64_stats_update_end(&tx_stats->syncp);
+ 
+-	xennet_tx_buf_gc(queue);
+-
+ 	return 0;
+ }
+ 
+@@ -848,9 +846,6 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
+ 	tx_stats->packets++;
+ 	u64_stats_update_end(&tx_stats->syncp);
+ 
+-	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
+-	xennet_tx_buf_gc(queue);
+-
+ 	if (!netfront_tx_slot_available(queue))
+ 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 37fd1a8ace127e..2bddc9f60fecc9 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1888,8 +1888,28 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
+ 	 * might be pointing at!
+ 	 */
+ 	result = nvme_disable_ctrl(&dev->ctrl, false);
+-	if (result < 0)
+-		return result;
++	if (result < 0) {
++		struct pci_dev *pdev = to_pci_dev(dev->dev);
++
++		/*
++		 * The NVMe Controller Reset method did not get an expected
++		 * CSTS.RDY transition, so something with the device appears to
++		 * be stuck. Use the lower level and bigger hammer PCIe
++		 * Function Level Reset to attempt restoring the device to its
++		 * initial state, and try again.
++		 */
++		result = pcie_reset_flr(pdev, false);
++		if (result < 0)
++			return result;
++
++		pci_restore_state(pdev);
++		result = nvme_disable_ctrl(&dev->ctrl, false);
++		if (result < 0)
++			return result;
++
++		dev_info(dev->ctrl.device,
++			"controller reset completed after pcie flr\n");
++	}
+ 
+ 	result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
+ 	if (result)
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 25e486e6e8054a..83a6b18b01ada0 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1777,9 +1777,14 @@ static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
+ 			qid, ret);
+ 		tls_handshake_cancel(queue->sock->sk);
+ 	} else {
+-		dev_dbg(nctrl->device,
+-			"queue %d: TLS handshake complete, error %d\n",
+-			qid, queue->tls_err);
++		if (queue->tls_err) {
++			dev_err(nctrl->device,
++				"queue %d: TLS handshake complete, error %d\n",
++				qid, queue->tls_err);
++		} else {
++			dev_dbg(nctrl->device,
++				"queue %d: TLS handshake complete\n", qid);
++		}
+ 		ret = queue->tls_err;
+ 	}
+ 	return ret;
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index af370628e58393..99c58ee09fbb0b 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -816,13 +816,11 @@ int pci_acpi_program_hp_params(struct pci_dev *dev)
+ bool pciehp_is_native(struct pci_dev *bridge)
+ {
+ 	const struct pci_host_bridge *host;
+-	u32 slot_cap;
+ 
+ 	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ 		return false;
+ 
+-	pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
+-	if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
++	if (!bridge->is_pciehp)
+ 		return false;
+ 
+ 	if (pcie_ports_native)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 51a09e48967f23..3d1365f558d3a3 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3023,8 +3023,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
+  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
+  * @bridge: Bridge to check
+  *
+- * This function checks if it is possible to move the bridge to D3.
+- * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
++ * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
++ *
++ * Return: Whether it is possible to move the bridge to D3.
++ *
++ * The return value is guaranteed to be constant across the entire lifetime
++ * of the bridge, including its hot-removal.
+  */
+ bool pci_bridge_d3_possible(struct pci_dev *bridge)
+ {
+@@ -3068,10 +3072,10 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
+ 			return false;
+ 
+ 		/*
+-		 * It should be safe to put PCIe ports from 2015 or newer
+-		 * to D3.
++		 * Out of caution, we only allow PCIe ports from 2015 or newer
++		 * into D3 on x86.
+ 		 */
+-		if (dmi_get_bios_year() >= 2015)
++		if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
+ 			return true;
+ 		break;
+ 	}
+@@ -6199,38 +6203,66 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ EXPORT_SYMBOL(pcie_bandwidth_available);
+ 
+ /**
+- * pcie_get_speed_cap - query for the PCI device's link speed capability
++ * pcie_get_supported_speeds - query Supported Link Speed Vector
+  * @dev: PCI device to query
+  *
+- * Query the PCI device speed capability.  Return the maximum link speed
+- * supported by the device.
++ * Query @dev supported link speeds.
++ *
++ * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining
++ * supported link speeds using the Supported Link Speeds Vector in the Link
++ * Capabilities 2 Register (when available).
++ *
++ * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.
++ *
++ * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link
++ * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s
++ * speeds were defined.
++ *
++ * For @dev without Supported Link Speed Vector, the field is synthesized
++ * from the Max Link Speed field in the Link Capabilities Register.
++ *
++ * Return: Supported Link Speeds Vector (+ reserved 0 at LSB).
+  */
+-enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
++u8 pcie_get_supported_speeds(struct pci_dev *dev)
+ {
+ 	u32 lnkcap2, lnkcap;
++	u8 speeds;
+ 
+ 	/*
+-	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
+-	 * implementation note there recommends using the Supported Link
+-	 * Speeds Vector in Link Capabilities 2 when supported.
+-	 *
+-	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
+-	 * should use the Supported Link Speeds field in Link Capabilities,
+-	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
++	 * Speeds retain the reserved 0 at LSB before PCIe Supported Link
++	 * Speeds Vector to allow using SLS Vector bit defines directly.
+ 	 */
+ 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
++	speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
++
++	/* Ignore speeds higher than Max Link Speed */
++	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
++	speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
+ 
+ 	/* PCIe r3.0-compliant */
+-	if (lnkcap2)
+-		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
++	if (speeds)
++		return speeds;
+ 
+-	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
++	/* Synthesize from the Max Link Speed field */
+ 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
+-		return PCIE_SPEED_5_0GT;
++		speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
+ 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
+-		return PCIE_SPEED_2_5GT;
++		speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
++
++	return speeds;
++}
+ 
+-	return PCI_SPEED_UNKNOWN;
++/**
++ * pcie_get_speed_cap - query for the PCI device's link speed capability
++ * @dev: PCI device to query
++ *
++ * Query the PCI device speed capability.
++ *
++ * Return: the maximum link speed supported by the device.
++ */
++enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
++{
++	return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds);
+ }
+ EXPORT_SYMBOL(pcie_get_speed_cap);
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 65df6d2ac0032e..b65868e7095179 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -390,6 +390,7 @@ static inline int pcie_dev_speed_mbps(enum pci_bus_speed speed)
+ 	return -EINVAL;
+ }
+ 
++u8 pcie_get_supported_speeds(struct pci_dev *dev);
+ const char *pci_speed_string(enum pci_bus_speed speed);
+ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
+ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index cf7c7886b64203..b358b93a02753c 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1627,7 +1627,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
+ 
+ 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
+ 	if (reg32 & PCI_EXP_SLTCAP_HPC)
+-		pdev->is_hotplug_bridge = 1;
++		pdev->is_hotplug_bridge = pdev->is_pciehp = 1;
+ }
+ 
+ static void set_pcie_thunderbolt(struct pci_dev *dev)
+@@ -1972,6 +1972,9 @@ int pci_setup_device(struct pci_dev *dev)
+ 
+ 	set_pcie_untrusted(dev);
+ 
++	if (pci_is_pcie(dev))
++		dev->supported_speeds = pcie_get_supported_speeds(dev);
++
+ 	/* "Unknown power state" */
+ 	dev->current_state = PCI_UNKNOWN;
+ 
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index ff17e0f95fbb84..978b239ec10bde 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -2661,6 +2661,7 @@ static struct platform_driver arm_cmn_driver = {
+ 		.name = "arm-cmn",
+ 		.of_match_table = of_match_ptr(arm_cmn_of_match),
+ 		.acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
++		.suppress_bind_attrs = true,
+ 	},
+ 	.probe = arm_cmn_probe,
+ 	.remove_new = arm_cmn_remove,
+diff --git a/drivers/perf/arm-ni.c b/drivers/perf/arm-ni.c
+index 4b9d53dae8978b..fb09730a9aa0e0 100644
+--- a/drivers/perf/arm-ni.c
++++ b/drivers/perf/arm-ni.c
+@@ -710,6 +710,7 @@ static struct platform_driver arm_ni_driver = {
+ 		.name = "arm-ni",
+ 		.of_match_table = of_match_ptr(arm_ni_of_match),
+ 		.acpi_match_table = ACPI_PTR(arm_ni_acpi_match),
++		.suppress_bind_attrs = true,
+ 	},
+ 	.probe = arm_ni_probe,
+ 	.remove = arm_ni_remove,
+diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
+index 43d68b69e6300f..16328569fde93a 100644
+--- a/drivers/perf/cxl_pmu.c
++++ b/drivers/perf/cxl_pmu.c
+@@ -870,7 +870,7 @@ static int cxl_pmu_probe(struct device *dev)
+ 		return rc;
+ 	irq = rc;
+ 
+-	irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow\n", dev_name);
++	irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow", dev_name);
+ 	if (!irq_name)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
+index 51cc5ece0e6372..a75affbb49b69a 100644
+--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
++++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
+@@ -30,9 +30,8 @@
+ #define PHY_CFG_ADDR_SHIFT    1
+ #define PHY_CFG_DATA_MASK     0xf
+ #define PHY_CFG_ADDR_MASK     0x3f
+-#define PHY_CFG_RD_MASK       0x3ff
+ #define PHY_CFG_WR_ENABLE     1
+-#define PHY_CFG_WR_DISABLE    1
++#define PHY_CFG_WR_DISABLE    0
+ #define PHY_CFG_WR_SHIFT      0
+ #define PHY_CFG_WR_MASK       1
+ #define PHY_CFG_PLL_LOCK      0x10
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index 03f3f707d27555..2659a854a514e0 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -407,6 +407,7 @@ static struct irq_chip stm32_gpio_irq_chip = {
+ 	.irq_set_wake	= irq_chip_set_wake_parent,
+ 	.irq_request_resources = stm32_gpio_irq_request_resources,
+ 	.irq_release_resources = stm32_gpio_irq_release_resources,
++	.irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL,
+ };
+ 
+ static int stm32_gpio_domain_translate(struct irq_domain *d,
+diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
+index 50cdae67fa3204..9bad8f72680ea8 100644
+--- a/drivers/platform/chrome/cros_ec_sensorhub.c
++++ b/drivers/platform/chrome/cros_ec_sensorhub.c
+@@ -8,6 +8,7 @@
+ 
+ #include <linux/init.h>
+ #include <linux/device.h>
++#include <linux/delay.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/platform_data/cros_ec_commands.h>
+@@ -18,6 +19,7 @@
+ #include <linux/types.h>
+ 
+ #define DRV_NAME		"cros-ec-sensorhub"
++#define CROS_EC_CMD_INFO_RETRIES 50
+ 
+ static void cros_ec_sensorhub_free_sensor(void *arg)
+ {
+@@ -53,7 +55,7 @@ static int cros_ec_sensorhub_register(struct device *dev,
+ 	int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
+ 	struct cros_ec_command *msg = sensorhub->msg;
+ 	struct cros_ec_dev *ec = sensorhub->ec;
+-	int ret, i;
++	int ret, i, retries;
+ 	char *name;
+ 
+ 
+@@ -65,12 +67,25 @@ static int cros_ec_sensorhub_register(struct device *dev,
+ 		sensorhub->params->cmd = MOTIONSENSE_CMD_INFO;
+ 		sensorhub->params->info.sensor_num = i;
+ 
+-		ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
++		retries = CROS_EC_CMD_INFO_RETRIES;
++		do {
++			ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
++			if (ret == -EBUSY) {
++				/* The EC is still busy initializing sensors. */
++				usleep_range(5000, 6000);
++				retries--;
++			}
++		} while (ret == -EBUSY && retries);
++
+ 		if (ret < 0) {
+-			dev_warn(dev, "no info for EC sensor %d : %d/%d\n",
+-				 i, ret, msg->result);
++			dev_err(dev, "no info for EC sensor %d : %d/%d\n",
++				i, ret, msg->result);
+ 			continue;
+ 		}
++		if (retries < CROS_EC_CMD_INFO_RETRIES) {
++			dev_warn(dev, "%d retries needed to bring up sensor %d\n",
++				 CROS_EC_CMD_INFO_RETRIES - retries, i);
++		}
+ 
+ 		switch (sensorhub->resp->info.type) {
+ 		case MOTIONSENSE_TYPE_ACCEL:
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
+index f1324466efac65..ca665b901010e3 100644
+--- a/drivers/platform/chrome/cros_ec_typec.c
++++ b/drivers/platform/chrome/cros_ec_typec.c
+@@ -1226,8 +1226,8 @@ static int cros_typec_probe(struct platform_device *pdev)
+ 
+ 	typec->ec = dev_get_drvdata(pdev->dev.parent);
+ 	if (!typec->ec) {
+-		dev_err(dev, "couldn't find parent EC device\n");
+-		return -ENODEV;
++		dev_warn(dev, "couldn't find parent EC device\n");
++		return -EPROBE_DEFER;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, typec);
+diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+index 7ed12c1d3b34c0..04686ae1e976bd 100644
+--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+@@ -189,6 +189,15 @@ static const struct dmi_system_id fwbug_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
+ 		}
+ 	},
++	/* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */
++	{
++		.ident = "Lenovo Yoga 6 13ALC6",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82ND"),
++		}
++	},
+ 	/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
+ 	{
+ 		.ident = "HP Laptop 15s-eq2xxx",
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 0528af4ed8d694..2c67d9758e6b4c 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -561,12 +561,12 @@ static unsigned long __init tpacpi_check_quirks(
+ 	return 0;
+ }
+ 
+-static inline bool __pure __init tpacpi_is_lenovo(void)
++static __always_inline bool __pure __init tpacpi_is_lenovo(void)
+ {
+ 	return thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO;
+ }
+ 
+-static inline bool __pure __init tpacpi_is_ibm(void)
++static __always_inline bool __pure __init tpacpi_is_ibm(void)
+ {
+ 	return thinkpad_id.vendor == PCI_VENDOR_ID_IBM;
+ }
+diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
+index ca942d7929c2ba..8b7b175f58969d 100644
+--- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c
++++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
+@@ -665,6 +665,11 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = {
+ #define  LCDIF_1_RD_HURRY	GENMASK(15, 13)
+ #define  LCDIF_0_RD_HURRY	GENMASK(12, 10)
+ 
++#define ISI_CACHE_CTRL		0x50
++#define  ISI_V_WR_HURRY		GENMASK(28, 26)
++#define  ISI_U_WR_HURRY		GENMASK(25, 23)
++#define  ISI_Y_WR_HURRY		GENMASK(22, 20)
++
+ static int imx8mp_media_power_notifier(struct notifier_block *nb,
+ 				unsigned long action, void *data)
+ {
+@@ -694,6 +699,11 @@ static int imx8mp_media_power_notifier(struct notifier_block *nb,
+ 		regmap_set_bits(bc->regmap, LCDIF_ARCACHE_CTRL,
+ 				FIELD_PREP(LCDIF_1_RD_HURRY, 7) |
+ 				FIELD_PREP(LCDIF_0_RD_HURRY, 7));
++		/* Same here for ISI */
++		regmap_set_bits(bc->regmap, ISI_CACHE_CTRL,
++				FIELD_PREP(ISI_V_WR_HURRY, 7) |
++				FIELD_PREP(ISI_U_WR_HURRY, 7) |
++				FIELD_PREP(ISI_Y_WR_HURRY, 7));
+ 	}
+ 
+ 	return NOTIFY_OK;
+diff --git a/drivers/pmdomain/ti/Kconfig b/drivers/pmdomain/ti/Kconfig
+index 67c608bf7ed026..5386b362a7ab25 100644
+--- a/drivers/pmdomain/ti/Kconfig
++++ b/drivers/pmdomain/ti/Kconfig
+@@ -10,7 +10,7 @@ if SOC_TI
+ config TI_SCI_PM_DOMAINS
+ 	tristate "TI SCI PM Domains Driver"
+ 	depends on TI_SCI_PROTOCOL
+-	depends on PM_GENERIC_DOMAINS
++	select PM_GENERIC_DOMAINS if PM
+ 	help
+ 	  Generic power domain implementation for TI device implementing
+ 	  the TI SCI protocol.
+diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
+index f0a64c00ddaae0..c2037b58fbcdf2 100644
+--- a/drivers/power/supply/qcom_battmgr.c
++++ b/drivers/power/supply/qcom_battmgr.c
+@@ -980,6 +980,8 @@ static unsigned int qcom_battmgr_sc8280xp_parse_technology(const char *chemistry
+ {
+ 	if (!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN))
+ 		return POWER_SUPPLY_TECHNOLOGY_LION;
++	if (!strncmp(chemistry, "LIP", BATTMGR_CHEMISTRY_LEN))
++		return POWER_SUPPLY_TECHNOLOGY_LIPO;
+ 
+ 	pr_err("Unknown battery technology '%s'\n", chemistry);
+ 	return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
+index 93e662912b5313..1412f8af15f277 100644
+--- a/drivers/pps/clients/pps-gpio.c
++++ b/drivers/pps/clients/pps-gpio.c
+@@ -206,8 +206,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* register IRQ interrupt handler */
+-	ret = devm_request_irq(dev, data->irq, pps_gpio_irq_handler,
+-			get_irqf_trigger_flags(data), data->info.name, data);
++	ret = request_irq(data->irq, pps_gpio_irq_handler,
++			  get_irqf_trigger_flags(data), data->info.name, data);
+ 	if (ret) {
+ 		pps_unregister_source(data->pps);
+ 		dev_err(dev, "failed to acquire IRQ %d\n", data->irq);
+@@ -224,6 +224,7 @@ static void pps_gpio_remove(struct platform_device *pdev)
+ {
+ 	struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
+ 
++	free_irq(data->irq, data);
+ 	pps_unregister_source(data->pps);
+ 	del_timer_sync(&data->echo_timer);
+ 	/* reset echo pin in any case */
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index b892a7323084dc..642a540861d439 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -96,7 +96,7 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp
+ 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ 
+ 	if (ptp_clock_freerun(ptp)) {
+-		pr_err("ptp: physical clock is free running\n");
++		pr_err_ratelimited("ptp: physical clock is free running\n");
+ 		return -EBUSY;
+ 	}
+ 
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index a6aad743c282f4..b352df4cd3f972 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -24,6 +24,11 @@
+ #define PTP_DEFAULT_MAX_VCLOCKS 20
+ #define PTP_MAX_CHANNELS 2048
+ 
++enum {
++	PTP_LOCK_PHYSICAL = 0,
++	PTP_LOCK_VIRTUAL,
++};
++
+ struct timestamp_event_queue {
+ 	struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
+ 	int head;
+diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
+index 7febfdcbde8bc6..8ed4b85989242f 100644
+--- a/drivers/ptp/ptp_vclock.c
++++ b/drivers/ptp/ptp_vclock.c
+@@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
+ 	return PTP_VCLOCK_REFRESH_INTERVAL;
+ }
+ 
++static void ptp_vclock_set_subclass(struct ptp_clock *ptp)
++{
++	lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL);
++}
++
+ static const struct ptp_clock_info ptp_vclock_info = {
+ 	.owner		= THIS_MODULE,
+ 	.name		= "ptp virtual clock",
+@@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
+ 		return NULL;
+ 	}
+ 
++	ptp_vclock_set_subclass(vclock->clock);
++
+ 	timecounter_init(&vclock->tc, &vclock->cc, 0);
+ 	ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
+ 
+diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
+index 800015ff7ff923..cc3f5b7fe9dd1c 100644
+--- a/drivers/remoteproc/imx_rproc.c
++++ b/drivers/remoteproc/imx_rproc.c
+@@ -1029,8 +1029,8 @@ static int imx_rproc_clk_enable(struct imx_rproc *priv)
+ 	struct device *dev = priv->dev;
+ 	int ret;
+ 
+-	/* Remote core is not under control of Linux */
+-	if (dcfg->method == IMX_RPROC_NONE)
++	/* Remote core is not under control of Linux or it is managed by SCU API */
++	if (dcfg->method == IMX_RPROC_NONE || dcfg->method == IMX_RPROC_SCU_API)
+ 		return 0;
+ 
+ 	priv->clk = devm_clk_get(dev, NULL);
+diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
+index 5484a65f66b953..1cf577e3223beb 100644
+--- a/drivers/reset/Kconfig
++++ b/drivers/reset/Kconfig
+@@ -51,8 +51,8 @@ config RESET_BERLIN
+ 
+ config RESET_BRCMSTB
+ 	tristate "Broadcom STB reset controller"
+-	depends on ARCH_BRCMSTB || COMPILE_TEST
+-	default ARCH_BRCMSTB
++	depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
++	default ARCH_BRCMSTB || ARCH_BCM2835
+ 	help
+ 	  This enables the reset controller driver for Broadcom STB SoCs using
+ 	  a SUN_TOP_CTRL_SW_INIT style controller.
+@@ -60,11 +60,11 @@ config RESET_BRCMSTB
+ config RESET_BRCMSTB_RESCAL
+ 	tristate "Broadcom STB RESCAL reset controller"
+ 	depends on HAS_IOMEM
+-	depends on ARCH_BRCMSTB || COMPILE_TEST
+-	default ARCH_BRCMSTB
++	depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
++	default ARCH_BRCMSTB || ARCH_BCM2835
+ 	help
+ 	  This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
+-	  BCM7216.
++	  BCM7216 or the BCM2712.
+ 
+ config RESET_EYEQ
+ 	bool "Mobileye EyeQ reset controller"
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index c8a666de9cbe91..1960d1bd851cb0 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -279,6 +279,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
+ 		if (tmp & DS1340_BIT_OSF)
+ 			return -EINVAL;
+ 		break;
++	case ds_1341:
++		ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &tmp);
++		if (ret)
++			return ret;
++		if (tmp & DS1337_BIT_OSF)
++			return -EINVAL;
++		break;
+ 	case ds_1388:
+ 		ret = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &tmp);
+ 		if (ret)
+@@ -377,6 +384,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
+ 		regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG,
+ 				   DS1340_BIT_OSF, 0);
+ 		break;
++	case ds_1341:
++		regmap_update_bits(ds1307->regmap, DS1337_REG_STATUS,
++				   DS1337_BIT_OSF, 0);
++		break;
+ 	case ds_1388:
+ 		regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
+ 				   DS1388_BIT_OSF, 0);
+@@ -1813,10 +1824,8 @@ static int ds1307_probe(struct i2c_client *client)
+ 		regmap_write(ds1307->regmap, DS1337_REG_CONTROL,
+ 			     regs[0]);
+ 
+-		/* oscillator fault?  clear flag, and warn */
++		/* oscillator fault? warn */
+ 		if (regs[1] & DS1337_BIT_OSF) {
+-			regmap_write(ds1307->regmap, DS1337_REG_STATUS,
+-				     regs[1] & ~DS1337_BIT_OSF);
+ 			dev_warn(ds1307->dev, "SET TIME!\n");
+ 		}
+ 		break;
+diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
+index 28cf18955a088e..726c8531b7d3fb 100644
+--- a/drivers/scsi/aacraid/comminit.c
++++ b/drivers/scsi/aacraid/comminit.c
+@@ -481,8 +481,7 @@ void aac_define_int_mode(struct aac_dev *dev)
+ 	    pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
+ 		min_msix = 2;
+ 		i = pci_alloc_irq_vectors(dev->pdev,
+-					  min_msix, msi_count,
+-					  PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
++					  min_msix, msi_count, PCI_IRQ_MSIX);
+ 		if (i > 0) {
+ 			dev->msi_enabled = 1;
+ 			msi_count = i;
+diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
+index 66fb701401de76..0cc04c84b25952 100644
+--- a/drivers/scsi/bfa/bfad_im.c
++++ b/drivers/scsi/bfa/bfad_im.c
+@@ -706,6 +706,7 @@ bfad_im_probe(struct bfad_s *bfad)
+ 
+ 	if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
+ 		kfree(im);
++		bfad->im = NULL;
+ 		return BFA_STATUS_FAILED;
+ 	}
+ 
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 2b1bf990a9dc07..29af3722ea220c 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -3184,7 +3184,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ 		return NULL;
+ 	conn = cls_conn->dd_data;
+ 
+-	conn->dd_data = cls_conn->dd_data + sizeof(*conn);
++	if (dd_size)
++		conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ 	conn->session = session;
+ 	conn->cls_conn = cls_conn;
+ 	conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 3fd1aa5cc78cc8..1b601e45bc45c1 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -6289,7 +6289,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			}
+ 			phba->nvmeio_trc_on = 1;
+ 			phba->nvmeio_trc_output_idx = 0;
+-			phba->nvmeio_trc = NULL;
+ 		} else {
+ nvmeio_off:
+ 			phba->nvmeio_trc_size = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index b5dd17eecf82da..3ba515c1fe3a12 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -177,7 +177,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 
+ 	/* Don't schedule a worker thread event if the vport is going down. */
+ 	if (test_bit(FC_UNLOADING, &vport->load_flag) ||
+-	    !test_bit(HBA_SETUP, &phba->hba_flag)) {
++	    (phba->sli_rev == LPFC_SLI_REV4 &&
++	    !test_bit(HBA_SETUP, &phba->hba_flag))) {
+ 
+ 		spin_lock_irqsave(&ndlp->lock, iflags);
+ 		ndlp->rport = NULL;
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 905026a4782cf9..67e08988118175 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -390,6 +390,10 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+ 	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ 		return;
+ 
++	/* may be called before queues established if hba_setup fails */
++	if (!phba->sli4_hba.hdwq)
++		return;
++
+ 	spin_lock_irqsave(&phba->hbalock, iflag);
+ 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ 		qp = &phba->sli4_hba.hdwq[idx];
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 1e8735538b238e..990646e1e18d8e 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -49,6 +49,13 @@ static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ 
+ #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH	(0xFFFE)
+ 
++/*
++ * SAS Log info code for a NCQ collateral abort after an NCQ error:
++ * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR
++ * See: drivers/message/fusion/lsi/mpi_log_sas.h
++ */
++#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR	0x31080000
++
+ /**
+  * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
+  * @mrioc: Adapter instance reference
+@@ -3397,7 +3404,18 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ 		scmd->result = DID_NO_CONNECT << 16;
+ 		break;
+ 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+-		scmd->result = DID_SOFT_ERROR << 16;
++		if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) {
++			/*
++			 * This is a ATA NCQ command aborted due to another NCQ
++			 * command failure. We must retry this command
++			 * immediately but without incrementing its retry
++			 * counter.
++			 */
++			WARN_ON_ONCE(xfer_count != 0);
++			scmd->result = DID_IMM_RETRY << 16;
++		} else {
++			scmd->result = DID_SOFT_ERROR << 16;
++		}
+ 		break;
+ 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
+ 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 91aa9de3b84f45..9719da76461c06 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -195,6 +195,14 @@ struct sense_info {
+ #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+ #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
+ #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
++
++/*
++ * SAS Log info code for a NCQ collateral abort after an NCQ error:
++ * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR
++ * See: drivers/message/fusion/lsi/mpi_log_sas.h
++ */
++#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR	0x31080000
++
+ /**
+  * struct fw_event_work - firmware event struct
+  * @list: link list framework
+@@ -5814,6 +5822,17 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+ 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ 			goto out;
+ 		}
++		if (log_info == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) {
++			/*
++			 * This is a ATA NCQ command aborted due to another NCQ
++			 * command failure. We must retry this command
++			 * immediately but without incrementing its retry
++			 * counter.
++			 */
++			WARN_ON_ONCE(xfer_cnt != 0);
++			scmd->result = DID_IMM_RETRY << 16;
++			break;
++		}
+ 		if (log_info == 0x31110630) {
+ 			if (scmd->retries > 2) {
+ 				scmd->result = DID_NO_CONNECT << 16;
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index fe08af4dcb67cf..36e0b310546071 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1881,7 +1881,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
+ 
+ 	return 0;
+ }
+-
++EXPORT_SYMBOL(scsi_scan_host_selected);
+ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+ {
+ 	struct scsi_device *sdev;
+diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
+index 4e33f1661e4c1d..d322802c7790f6 100644
+--- a/drivers/scsi/scsi_transport_sas.c
++++ b/drivers/scsi/scsi_transport_sas.c
+@@ -40,6 +40,8 @@
+ #include <scsi/scsi_transport_sas.h>
+ 
+ #include "scsi_sas_internal.h"
++#include "scsi_priv.h"
++
+ struct sas_host_attrs {
+ 	struct list_head rphy_list;
+ 	struct mutex lock;
+@@ -1681,32 +1683,66 @@ int scsi_is_sas_rphy(const struct device *dev)
+ }
+ EXPORT_SYMBOL(scsi_is_sas_rphy);
+ 
+-
+-/*
+- * SCSI scan helper
+- */
+-
+-static int sas_user_scan(struct Scsi_Host *shost, uint channel,
+-		uint id, u64 lun)
++static void scan_channel_zero(struct Scsi_Host *shost, uint id, u64 lun)
+ {
+ 	struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ 	struct sas_rphy *rphy;
+ 
+-	mutex_lock(&sas_host->lock);
+ 	list_for_each_entry(rphy, &sas_host->rphy_list, list) {
+ 		if (rphy->identify.device_type != SAS_END_DEVICE ||
+ 		    rphy->scsi_target_id == -1)
+ 			continue;
+ 
+-		if ((channel == SCAN_WILD_CARD || channel == 0) &&
+-		    (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
++		if (id == SCAN_WILD_CARD || id == rphy->scsi_target_id) {
+ 			scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id,
+ 					 lun, SCSI_SCAN_MANUAL);
+ 		}
+ 	}
+-	mutex_unlock(&sas_host->lock);
++}
+ 
+-	return 0;
++/*
++ * SCSI scan helper
++ */
++
++static int sas_user_scan(struct Scsi_Host *shost, uint channel,
++		uint id, u64 lun)
++{
++	struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
++	int res = 0;
++	int i;
++
++	switch (channel) {
++	case 0:
++		mutex_lock(&sas_host->lock);
++		scan_channel_zero(shost, id, lun);
++		mutex_unlock(&sas_host->lock);
++		break;
++
++	case SCAN_WILD_CARD:
++		mutex_lock(&sas_host->lock);
++		scan_channel_zero(shost, id, lun);
++		mutex_unlock(&sas_host->lock);
++
++		for (i = 1; i <= shost->max_channel; i++) {
++			res = scsi_scan_host_selected(shost, i, id, lun,
++						      SCSI_SCAN_MANUAL);
++			if (res)
++				goto exit_scan;
++		}
++		break;
++
++	default:
++		if (channel < shost->max_channel) {
++			res = scsi_scan_host_selected(shost, channel, id, lun,
++						      SCSI_SCAN_MANUAL);
++		} else {
++			res = -EINVAL;
++		}
++		break;
++	}
++
++exit_scan:
++	return res;
+ }
+ 
+ 
+diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
+index b2c0fb55d4ae67..44589d10b15b50 100644
+--- a/drivers/soc/qcom/mdt_loader.c
++++ b/drivers/soc/qcom/mdt_loader.c
+@@ -83,7 +83,7 @@ ssize_t qcom_mdt_get_size(const struct firmware *fw)
+ 	int i;
+ 
+ 	ehdr = (struct elf32_hdr *)fw->data;
+-	phdrs = (struct elf32_phdr *)(ehdr + 1);
++	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+ 
+ 	for (i = 0; i < ehdr->e_phnum; i++) {
+ 		phdr = &phdrs[i];
+@@ -135,7 +135,7 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
+ 	void *data;
+ 
+ 	ehdr = (struct elf32_hdr *)fw->data;
+-	phdrs = (struct elf32_phdr *)(ehdr + 1);
++	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+ 
+ 	if (ehdr->e_phnum < 2)
+ 		return ERR_PTR(-EINVAL);
+@@ -215,7 +215,7 @@ int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ 	int i;
+ 
+ 	ehdr = (struct elf32_hdr *)fw->data;
+-	phdrs = (struct elf32_phdr *)(ehdr + 1);
++	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+ 
+ 	for (i = 0; i < ehdr->e_phnum; i++) {
+ 		phdr = &phdrs[i];
+@@ -270,7 +270,7 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_na
+ 	int i;
+ 
+ 	ehdr = (struct elf32_hdr *)fw->data;
+-	phdrs = (struct elf32_phdr *)(ehdr + 1);
++	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+ 
+ 	for (i = 0; i < ehdr->e_phnum; i++) {
+ 		/*
+@@ -312,7 +312,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ 
+ 	is_split = qcom_mdt_bins_are_split(fw, fw_name);
+ 	ehdr = (struct elf32_hdr *)fw->data;
+-	phdrs = (struct elf32_phdr *)(ehdr + 1);
++	phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+ 
+ 	for (i = 0; i < ehdr->e_phnum; i++) {
+ 		phdr = &phdrs[i];
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index de86009ecd913d..641f29a98cbd28 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -1075,7 +1075,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
+ 	drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
+ 	drv->ver.minor >>= MINOR_VER_SHIFT;
+ 
+-	if (drv->ver.major == 3)
++	if (drv->ver.major >= 3)
+ 		drv->regs = rpmh_rsc_reg_offset_ver_3_0;
+ 	else
+ 		drv->regs = rpmh_rsc_reg_offset_ver_2_7;
+diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
+index 1895fba5e70bbc..a325ce52c39639 100644
+--- a/drivers/soundwire/amd_manager.c
++++ b/drivers/soundwire/amd_manager.c
+@@ -972,6 +972,7 @@ static void amd_sdw_manager_remove(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	pm_runtime_disable(&pdev->dev);
++	cancel_work_sync(&amd_manager->amd_sdw_work);
+ 	amd_disable_sdw_interrupts(amd_manager);
+ 	sdw_bus_master_delete(&amd_manager->bus);
+ 	ret = amd_disable_sdw_manager(amd_manager);
+@@ -1076,10 +1077,10 @@ static int __maybe_unused amd_pm_prepare(struct device *dev)
+ 	 * device is not in runtime suspend state, observed that device alerts are missing
+ 	 * without pm_prepare on AMD platforms in clockstop mode0.
+ 	 */
+-	if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+-		ret = pm_request_resume(dev);
++	if (amd_manager->power_mode_mask) {
++		ret = pm_runtime_resume(dev);
+ 		if (ret < 0) {
+-			dev_err(bus->dev, "pm_request_resume failed: %d\n", ret);
++			dev_err(bus->dev, "pm_runtime_resume failed: %d\n", ret);
+ 			return 0;
+ 		}
+ 	}
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index 6ca06cce41d3c4..6c7989e2079e08 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -1732,15 +1732,15 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
+ 
+ 		/* Update the Slave driver */
+ 		if (slave_notify) {
++			if (slave->prop.use_domain_irq && slave->irq)
++				handle_nested_irq(slave->irq);
++
+ 			mutex_lock(&slave->sdw_dev_lock);
+ 
+ 			if (slave->probed) {
+ 				struct device *dev = &slave->dev;
+ 				struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+ 
+-				if (slave->prop.use_domain_irq && slave->irq)
+-					handle_nested_irq(slave->irq);
+-
+ 				if (drv->ops && drv->ops->interrupt_callback) {
+ 					slave_intr.sdca_cascade = sdca_cascade;
+ 					slave_intr.control_port = clear;
+diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
+index 43f47e3aa4482c..ec7bc6e3022891 100644
+--- a/drivers/target/target_core_fabric_lib.c
++++ b/drivers/target/target_core_fabric_lib.c
+@@ -257,11 +257,41 @@ static int iscsi_get_pr_transport_id_len(
+ 	return len;
+ }
+ 
+-static char *iscsi_parse_pr_out_transport_id(
++static void sas_parse_pr_out_transport_id(char *buf, char *i_str)
++{
++	char hex[17] = {};
++
++	bin2hex(hex, buf + 4, 8);
++	snprintf(i_str, TRANSPORT_IQN_LEN, "naa.%s", hex);
++}
++
++static void srp_parse_pr_out_transport_id(char *buf, char *i_str)
++{
++	char hex[33] = {};
++
++	bin2hex(hex, buf + 8, 16);
++	snprintf(i_str, TRANSPORT_IQN_LEN, "0x%s", hex);
++}
++
++static void fcp_parse_pr_out_transport_id(char *buf, char *i_str)
++{
++	snprintf(i_str, TRANSPORT_IQN_LEN, "%8phC", buf + 8);
++}
++
++static void sbp_parse_pr_out_transport_id(char *buf, char *i_str)
++{
++	char hex[17] = {};
++
++	bin2hex(hex, buf + 8, 8);
++	snprintf(i_str, TRANSPORT_IQN_LEN, "%s", hex);
++}
++
++static bool iscsi_parse_pr_out_transport_id(
+ 	struct se_portal_group *se_tpg,
+ 	char *buf,
+ 	u32 *out_tid_len,
+-	char **port_nexus_ptr)
++	char **port_nexus_ptr,
++	char *i_str)
+ {
+ 	char *p;
+ 	int i;
+@@ -282,7 +312,7 @@ static char *iscsi_parse_pr_out_transport_id(
+ 	if ((format_code != 0x00) && (format_code != 0x40)) {
+ 		pr_err("Illegal format code: 0x%02x for iSCSI"
+ 			" Initiator Transport ID\n", format_code);
+-		return NULL;
++		return false;
+ 	}
+ 	/*
+ 	 * If the caller wants the TransportID Length, we set that value for the
+@@ -306,7 +336,7 @@ static char *iscsi_parse_pr_out_transport_id(
+ 			pr_err("Unable to locate \",i,0x\" separator"
+ 				" for Initiator port identifier: %s\n",
+ 				&buf[4]);
+-			return NULL;
++			return false;
+ 		}
+ 		*p = '\0'; /* Terminate iSCSI Name */
+ 		p += 5; /* Skip over ",i,0x" separator */
+@@ -339,7 +369,8 @@ static char *iscsi_parse_pr_out_transport_id(
+ 	} else
+ 		*port_nexus_ptr = NULL;
+ 
+-	return &buf[4];
++	strscpy(i_str, &buf[4], TRANSPORT_IQN_LEN);
++	return true;
+ }
+ 
+ int target_get_pr_transport_id_len(struct se_node_acl *nacl,
+@@ -387,33 +418,35 @@ int target_get_pr_transport_id(struct se_node_acl *nacl,
+ 	}
+ }
+ 
+-const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+-		char *buf, u32 *out_tid_len, char **port_nexus_ptr)
++bool target_parse_pr_out_transport_id(struct se_portal_group *tpg,
++		char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str)
+ {
+-	u32 offset;
+-
+ 	switch (tpg->proto_id) {
+ 	case SCSI_PROTOCOL_SAS:
+ 		/*
+ 		 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+ 		 * for initiator ports using SCSI over SAS Serial SCSI Protocol.
+ 		 */
+-		offset = 4;
++		sas_parse_pr_out_transport_id(buf, i_str);
+ 		break;
+-	case SCSI_PROTOCOL_SBP:
+ 	case SCSI_PROTOCOL_SRP:
++		srp_parse_pr_out_transport_id(buf, i_str);
++		break;
+ 	case SCSI_PROTOCOL_FCP:
+-		offset = 8;
++		fcp_parse_pr_out_transport_id(buf, i_str);
++		break;
++	case SCSI_PROTOCOL_SBP:
++		sbp_parse_pr_out_transport_id(buf, i_str);
+ 		break;
+ 	case SCSI_PROTOCOL_ISCSI:
+ 		return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len,
+-					port_nexus_ptr);
++					port_nexus_ptr, i_str);
+ 	default:
+ 		pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id);
+-		return NULL;
++		return false;
+ 	}
+ 
+ 	*port_nexus_ptr = NULL;
+ 	*out_tid_len = 24;
+-	return buf + offset;
++	return true;
+ }
+diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
+index 408be26d2e9b4d..20aab1f505655c 100644
+--- a/drivers/target/target_core_internal.h
++++ b/drivers/target/target_core_internal.h
+@@ -103,8 +103,8 @@ int	target_get_pr_transport_id_len(struct se_node_acl *nacl,
+ int	target_get_pr_transport_id(struct se_node_acl *nacl,
+ 		struct t10_pr_registration *pr_reg, int *format_code,
+ 		unsigned char *buf);
+-const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+-		char *buf, u32 *out_tid_len, char **port_nexus_ptr);
++bool target_parse_pr_out_transport_id(struct se_portal_group *tpg,
++		char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str);
+ 
+ /* target_core_hba.c */
+ struct se_hba *core_alloc_hba(const char *, u32, u32);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 47fe50b80c2294..82061cbe678131 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -1478,11 +1478,12 @@ core_scsi3_decode_spec_i_port(
+ 	LIST_HEAD(tid_dest_list);
+ 	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
+ 	unsigned char *buf, *ptr, proto_ident;
+-	const unsigned char *i_str = NULL;
++	unsigned char i_str[TRANSPORT_IQN_LEN];
+ 	char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
+ 	sense_reason_t ret;
+ 	u32 tpdl, tid_len = 0;
+ 	u32 dest_rtpi = 0;
++	bool tid_found;
+ 
+ 	/*
+ 	 * Allocate a struct pr_transport_id_holder and setup the
+@@ -1571,9 +1572,9 @@ core_scsi3_decode_spec_i_port(
+ 			dest_rtpi = tmp_lun->lun_tpg->tpg_rtpi;
+ 
+ 			iport_ptr = NULL;
+-			i_str = target_parse_pr_out_transport_id(tmp_tpg,
+-					ptr, &tid_len, &iport_ptr);
+-			if (!i_str)
++			tid_found = target_parse_pr_out_transport_id(tmp_tpg,
++					ptr, &tid_len, &iport_ptr, i_str);
++			if (!tid_found)
+ 				continue;
+ 			/*
+ 			 * Determine if this SCSI device server requires that
+@@ -3153,13 +3154,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
+ 	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
+ 	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ 	unsigned char *buf;
+-	const unsigned char *initiator_str;
++	unsigned char initiator_str[TRANSPORT_IQN_LEN];
+ 	char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { };
+ 	u32 tid_len, tmp_tid_len;
+ 	int new_reg = 0, type, scope, matching_iname;
+ 	sense_reason_t ret;
+ 	unsigned short rtpi;
+ 	unsigned char proto_ident;
++	bool tid_found;
+ 
+ 	if (!se_sess || !se_lun) {
+ 		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+@@ -3278,9 +3280,9 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
+ 		ret = TCM_INVALID_PARAMETER_LIST;
+ 		goto out;
+ 	}
+-	initiator_str = target_parse_pr_out_transport_id(dest_se_tpg,
+-			&buf[24], &tmp_tid_len, &iport_ptr);
+-	if (!initiator_str) {
++	tid_found = target_parse_pr_out_transport_id(dest_se_tpg,
++			&buf[24], &tmp_tid_len, &iport_ptr, initiator_str);
++	if (!tid_found) {
+ 		pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ 			" initiator_str from Transport ID\n");
+ 		ret = TCM_INVALID_PARAMETER_LIST;
+diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+index c2d59cbfaea912..a575585c737be5 100644
+--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
++++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+  * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+  */
+ 
+ #include <linux/bitops.h>
+@@ -16,6 +17,7 @@
+ 
+ #include "../thermal_hwmon.h"
+ 
++#define QPNP_TM_REG_DIG_MINOR		0x00
+ #define QPNP_TM_REG_DIG_MAJOR		0x01
+ #define QPNP_TM_REG_TYPE		0x04
+ #define QPNP_TM_REG_SUBTYPE		0x05
+@@ -31,7 +33,7 @@
+ #define STATUS_GEN2_STATE_MASK		GENMASK(6, 4)
+ #define STATUS_GEN2_STATE_SHIFT		4
+ 
+-#define SHUTDOWN_CTRL1_OVERRIDE_S2	BIT(6)
++#define SHUTDOWN_CTRL1_OVERRIDE_STAGE2	BIT(6)
+ #define SHUTDOWN_CTRL1_THRESHOLD_MASK	GENMASK(1, 0)
+ 
+ #define SHUTDOWN_CTRL1_RATE_25HZ	BIT(3)
+@@ -78,6 +80,7 @@ struct qpnp_tm_chip {
+ 	/* protects .thresh, .stage and chip registers */
+ 	struct mutex			lock;
+ 	bool				initialized;
++	bool				require_stage2_shutdown;
+ 
+ 	struct iio_channel		*adc;
+ 	const long			(*temp_map)[THRESH_COUNT][STAGE_COUNT];
+@@ -220,13 +223,13 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
+ {
+ 	long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1];
+ 	long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1];
+-	bool disable_s2_shutdown = false;
++	bool disable_stage2_shutdown = false;
+ 	u8 reg;
+ 
+ 	WARN_ON(!mutex_is_locked(&chip->lock));
+ 
+ 	/*
+-	 * Default: S2 and S3 shutdown enabled, thresholds at
++	 * Default: Stage 2 and Stage 3 shutdown enabled, thresholds at
+ 	 * lowest threshold set, monitoring at 25Hz
+ 	 */
+ 	reg = SHUTDOWN_CTRL1_RATE_25HZ;
+@@ -241,12 +244,12 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
+ 		chip->thresh = THRESH_MAX -
+ 			((stage2_threshold_max - temp) /
+ 			 TEMP_THRESH_STEP);
+-		disable_s2_shutdown = true;
++		disable_stage2_shutdown = true;
+ 	} else {
+ 		chip->thresh = THRESH_MAX;
+ 
+ 		if (chip->adc)
+-			disable_s2_shutdown = true;
++			disable_stage2_shutdown = true;
+ 		else
+ 			dev_warn(chip->dev,
+ 				 "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n",
+@@ -255,8 +258,8 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
+ 
+ skip:
+ 	reg |= chip->thresh;
+-	if (disable_s2_shutdown)
+-		reg |= SHUTDOWN_CTRL1_OVERRIDE_S2;
++	if (disable_stage2_shutdown && !chip->require_stage2_shutdown)
++		reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2;
+ 
+ 	return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
+ }
+@@ -350,8 +353,8 @@ static int qpnp_tm_probe(struct platform_device *pdev)
+ {
+ 	struct qpnp_tm_chip *chip;
+ 	struct device_node *node;
+-	u8 type, subtype, dig_major;
+-	u32 res;
++	u8 type, subtype, dig_major, dig_minor;
++	u32 res, dig_revision;
+ 	int ret, irq;
+ 
+ 	node = pdev->dev.of_node;
+@@ -403,6 +406,11 @@ static int qpnp_tm_probe(struct platform_device *pdev)
+ 		return dev_err_probe(&pdev->dev, ret,
+ 				     "could not read dig_major\n");
+ 
++	ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MINOR, &dig_minor);
++	if (ret < 0)
++		return dev_err_probe(&pdev->dev, ret,
++				     "could not read dig_minor\n");
++
+ 	if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
+ 				     && subtype != QPNP_TM_SUBTYPE_GEN2)) {
+ 		dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
+@@ -416,6 +424,23 @@ static int qpnp_tm_probe(struct platform_device *pdev)
+ 	else
+ 		chip->temp_map = &temp_map_gen1;
+ 
++	if (chip->subtype == QPNP_TM_SUBTYPE_GEN2) {
++		dig_revision = (dig_major << 8) | dig_minor;
++		/*
++		 * Check if stage 2 automatic partial shutdown must remain
++		 * enabled to avoid potential repeated faults upon reaching
++		 * over-temperature stage 3.
++		 */
++		switch (dig_revision) {
++		case 0x0001:
++		case 0x0002:
++		case 0x0100:
++		case 0x0101:
++			chip->require_stage2_shutdown = true;
++			break;
++		}
++	}
++
+ 	/*
+ 	 * Register the sensor before initializing the hardware to be able to
+ 	 * read the trip points. get_temp() returns the default temperature
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index 1838aa729bb50a..c58c53d4ecc668 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -40,10 +40,13 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf)
+ 
+ 	ret = thermal_zone_get_temp(tz, &temperature);
+ 
+-	if (ret)
+-		return ret;
++	if (!ret)
++		return sprintf(buf, "%d\n", temperature);
+ 
+-	return sprintf(buf, "%d\n", temperature);
++	if (ret == -EAGAIN)
++		return -ENODATA;
++
++	return ret;
+ }
+ 
+ static ssize_t
+diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
+index 144d0232a70c11..b692618ed9d4f4 100644
+--- a/drivers/thunderbolt/domain.c
++++ b/drivers/thunderbolt/domain.c
+@@ -36,7 +36,7 @@ static bool match_service_id(const struct tb_service_id *id,
+ 			return false;
+ 	}
+ 
+-	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
++	if (id->match_flags & TBSVC_MATCH_PROTOCOL_REVISION) {
+ 		if (id->protocol_revision != svc->prtcrevs)
+ 			return false;
+ 	}
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index d94d73e45fb6de..440303566b14a0 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1353,28 +1353,28 @@ static void uart_sanitize_serial_rs485_delays(struct uart_port *port,
+ 	if (!port->rs485_supported.delay_rts_before_send) {
+ 		if (rs485->delay_rts_before_send) {
+ 			dev_warn_ratelimited(port->dev,
+-				"%s (%d): RTS delay before sending not supported\n",
++				"%s (%u): RTS delay before sending not supported\n",
+ 				port->name, port->line);
+ 		}
+ 		rs485->delay_rts_before_send = 0;
+ 	} else if (rs485->delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+ 		rs485->delay_rts_before_send = RS485_MAX_RTS_DELAY;
+ 		dev_warn_ratelimited(port->dev,
+-			"%s (%d): RTS delay before sending clamped to %u ms\n",
++			"%s (%u): RTS delay before sending clamped to %u ms\n",
+ 			port->name, port->line, rs485->delay_rts_before_send);
+ 	}
+ 
+ 	if (!port->rs485_supported.delay_rts_after_send) {
+ 		if (rs485->delay_rts_after_send) {
+ 			dev_warn_ratelimited(port->dev,
+-				"%s (%d): RTS delay after sending not supported\n",
++				"%s (%u): RTS delay after sending not supported\n",
+ 				port->name, port->line);
+ 		}
+ 		rs485->delay_rts_after_send = 0;
+ 	} else if (rs485->delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+ 		rs485->delay_rts_after_send = RS485_MAX_RTS_DELAY;
+ 		dev_warn_ratelimited(port->dev,
+-			"%s (%d): RTS delay after sending clamped to %u ms\n",
++			"%s (%u): RTS delay after sending clamped to %u ms\n",
+ 			port->name, port->line, rs485->delay_rts_after_send);
+ 	}
+ }
+@@ -1404,14 +1404,14 @@ static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs4
+ 			rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ 
+ 			dev_warn_ratelimited(port->dev,
+-				"%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
++				"%s (%u): invalid RTS setting, using RTS_ON_SEND instead\n",
+ 				port->name, port->line);
+ 		} else {
+ 			rs485->flags |= SER_RS485_RTS_AFTER_SEND;
+ 			rs485->flags &= ~SER_RS485_RTS_ON_SEND;
+ 
+ 			dev_warn_ratelimited(port->dev,
+-				"%s (%d): invalid RTS setting, using RTS_AFTER_SEND instead\n",
++				"%s (%u): invalid RTS setting, using RTS_AFTER_SEND instead\n",
+ 				port->name, port->line);
+ 		}
+ 	}
+@@ -1850,7 +1850,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
+ 
+ 	expire = jiffies + timeout;
+ 
+-	pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n",
++	pr_debug("uart_wait_until_sent(%u), jiffies=%lu, expire=%lu...\n",
+ 		port->line, jiffies, expire);
+ 
+ 	/*
+@@ -2046,7 +2046,7 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
+ 		return;
+ 
+ 	mmio = uport->iotype >= UPIO_MEM;
+-	seq_printf(m, "%d: uart:%s %s%08llX irq:%d",
++	seq_printf(m, "%u: uart:%s %s%08llX irq:%u",
+ 			uport->line, uart_type(uport),
+ 			mmio ? "mmio:0x" : "port:",
+ 			mmio ? (unsigned long long)uport->mapbase
+@@ -2068,18 +2068,18 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
+ 		if (pm_state != UART_PM_STATE_ON)
+ 			uart_change_pm(state, pm_state);
+ 
+-		seq_printf(m, " tx:%d rx:%d",
++		seq_printf(m, " tx:%u rx:%u",
+ 				uport->icount.tx, uport->icount.rx);
+ 		if (uport->icount.frame)
+-			seq_printf(m, " fe:%d",	uport->icount.frame);
++			seq_printf(m, " fe:%u",	uport->icount.frame);
+ 		if (uport->icount.parity)
+-			seq_printf(m, " pe:%d",	uport->icount.parity);
++			seq_printf(m, " pe:%u",	uport->icount.parity);
+ 		if (uport->icount.brk)
+-			seq_printf(m, " brk:%d", uport->icount.brk);
++			seq_printf(m, " brk:%u", uport->icount.brk);
+ 		if (uport->icount.overrun)
+-			seq_printf(m, " oe:%d", uport->icount.overrun);
++			seq_printf(m, " oe:%u", uport->icount.overrun);
+ 		if (uport->icount.buf_overrun)
+-			seq_printf(m, " bo:%d", uport->icount.buf_overrun);
++			seq_printf(m, " bo:%u", uport->icount.buf_overrun);
+ 
+ #define INFOBIT(bit, str) \
+ 	if (uport->mctrl & (bit)) \
+@@ -2571,7 +2571,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
+ 		break;
+ 	}
+ 
+-	pr_info("%s%s%s at %s (irq = %d, base_baud = %d) is a %s\n",
++	pr_info("%s%s%s at %s (irq = %u, base_baud = %u) is a %s\n",
+ 	       port->dev ? dev_name(port->dev) : "",
+ 	       port->dev ? ": " : "",
+ 	       port->name,
+@@ -2579,7 +2579,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
+ 
+ 	/* The magic multiplier feature is a bit obscure, so report it too.  */
+ 	if (port->flags & UPF_MAGIC_MULTIPLIER)
+-		pr_info("%s%s%s extra baud rates supported: %d, %d",
++		pr_info("%s%s%s extra baud rates supported: %u, %u",
+ 			port->dev ? dev_name(port->dev) : "",
+ 			port->dev ? ": " : "",
+ 			port->name,
+@@ -2978,7 +2978,7 @@ static ssize_t close_delay_show(struct device *dev,
+ 	struct tty_port *port = dev_get_drvdata(dev);
+ 
+ 	uart_get_info(port, &tmp);
+-	return sprintf(buf, "%d\n", tmp.close_delay);
++	return sprintf(buf, "%u\n", tmp.close_delay);
+ }
+ 
+ static ssize_t closing_wait_show(struct device *dev,
+@@ -2988,7 +2988,7 @@ static ssize_t closing_wait_show(struct device *dev,
+ 	struct tty_port *port = dev_get_drvdata(dev);
+ 
+ 	uart_get_info(port, &tmp);
+-	return sprintf(buf, "%d\n", tmp.closing_wait);
++	return sprintf(buf, "%u\n", tmp.closing_wait);
+ }
+ 
+ static ssize_t custom_divisor_show(struct device *dev,
+@@ -3008,7 +3008,7 @@ static ssize_t io_type_show(struct device *dev,
+ 	struct tty_port *port = dev_get_drvdata(dev);
+ 
+ 	uart_get_info(port, &tmp);
+-	return sprintf(buf, "%d\n", tmp.io_type);
++	return sprintf(buf, "%u\n", tmp.io_type);
+ }
+ 
+ static ssize_t iomem_base_show(struct device *dev,
+@@ -3028,7 +3028,7 @@ static ssize_t iomem_reg_shift_show(struct device *dev,
+ 	struct tty_port *port = dev_get_drvdata(dev);
+ 
+ 	uart_get_info(port, &tmp);
+-	return sprintf(buf, "%d\n", tmp.iomem_reg_shift);
++	return sprintf(buf, "%u\n", tmp.iomem_reg_shift);
+ }
+ 
+ static ssize_t console_show(struct device *dev,
+@@ -3168,7 +3168,7 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
+ 	state->pm_state = UART_PM_STATE_UNDEFINED;
+ 	uart_port_set_cons(uport, drv->cons);
+ 	uport->minor = drv->tty_driver->minor_start + uport->line;
+-	uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
++	uport->name = kasprintf(GFP_KERNEL, "%s%u", drv->dev_name,
+ 				drv->tty_driver->name_base + uport->line);
+ 	if (!uport->name) {
+ 		ret = -ENOMEM;
+@@ -3211,7 +3211,7 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
+ 		device_set_wakeup_capable(tty_dev, 1);
+ 	} else {
+ 		uport->flags |= UPF_DEAD;
+-		dev_err(uport->dev, "Cannot register tty device on line %d\n",
++		dev_err(uport->dev, "Cannot register tty device on line %u\n",
+ 		       uport->line);
+ 	}
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index c2ecfa3c83496f..5a334e370f4d66 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1520,6 +1520,12 @@ static int acm_probe(struct usb_interface *intf,
+ 			goto err_remove_files;
+ 	}
+ 
++	if (quirks & CLEAR_HALT_CONDITIONS) {
++		/* errors intentionally ignored */
++		usb_clear_halt(usb_dev, acm->in);
++		usb_clear_halt(usb_dev, acm->out);
++	}
++
+ 	tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
+ 			&control_interface->dev);
+ 	if (IS_ERR(tty_dev)) {
+@@ -1527,11 +1533,6 @@ static int acm_probe(struct usb_interface *intf,
+ 		goto err_release_data_interface;
+ 	}
+ 
+-	if (quirks & CLEAR_HALT_CONDITIONS) {
+-		usb_clear_halt(usb_dev, acm->in);
+-		usb_clear_halt(usb_dev, acm->out);
+-	}
+-
+ 	dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
+ 
+ 	return 0;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 880d52c0949d47..9565d14d7c071d 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -81,8 +81,14 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 	 */
+ 	desc = (struct usb_ss_ep_comp_descriptor *) buffer;
+ 
+-	if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
+-			size < USB_DT_SS_EP_COMP_SIZE) {
++	if (size < USB_DT_SS_EP_COMP_SIZE) {
++		dev_notice(ddev,
++			   "invalid SuperSpeed endpoint companion descriptor "
++			   "of length %d, skipping\n", size);
++		return;
++	}
++
++	if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+ 		dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
+ 				" interface %d altsetting %d ep %d: "
+ 				"using minimum values\n",
+diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
+index 7576920e2d5a3e..9f202f575cecce 100644
+--- a/drivers/usb/core/urb.c
++++ b/drivers/usb/core/urb.c
+@@ -500,7 +500,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
+ 
+ 	/* Check that the pipe's type matches the endpoint's type */
+ 	if (usb_pipe_type_check(urb->dev, urb->pipe))
+-		dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
++		dev_warn_once(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
+ 			usb_pipetype(urb->pipe), pipetypes[xfertype]);
+ 
+ 	/* Check against a simple/standard policy */
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 91178b8dbbf086..1111650757eab3 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1172,6 +1172,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
+ 	ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
+ 				   dev->eps[0].ring->cycle_state);
+ 
++	ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8));
++
+ 	trace_xhci_setup_addressable_virt_device(dev);
+ 
+ 	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2ff8787f753c90..f6ecb3b9fb14e0 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1313,12 +1313,15 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+  */
+ void xhci_hc_died(struct xhci_hcd *xhci)
+ {
++	bool notify;
+ 	int i, j;
+ 
+ 	if (xhci->xhc_state & XHCI_STATE_DYING)
+ 		return;
+ 
+-	xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
++	notify = !(xhci->xhc_state & XHCI_STATE_REMOVING);
++	if (notify)
++		xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
+ 	xhci->xhc_state |= XHCI_STATE_DYING;
+ 
+ 	xhci_cleanup_command_queue(xhci);
+@@ -1332,7 +1335,7 @@ void xhci_hc_died(struct xhci_hcd *xhci)
+ 	}
+ 
+ 	/* inform usb core hc died if PCI remove isn't already handling it */
+-	if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
++	if (notify)
+ 		usb_hc_died(xhci_to_hcd(xhci));
+ }
+ 
+@@ -4378,7 +4381,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ 
+ 	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ 		(xhci->xhc_state & XHCI_STATE_HALTED)) {
+-		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
++		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command. state: 0x%x\n",
++			 xhci->xhc_state);
+ 		return -ESHUTDOWN;
+ 	}
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 09a5a660496205..e399638d60004e 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -119,7 +119,8 @@ int xhci_halt(struct xhci_hcd *xhci)
+ 	ret = xhci_handshake(&xhci->op_regs->status,
+ 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+ 	if (ret) {
+-		xhci_warn(xhci, "Host halt failed, %d\n", ret);
++		if (!(xhci->xhc_state & XHCI_STATE_DYING))
++			xhci_warn(xhci, "Host halt failed, %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -178,7 +179,8 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
+ 	state = readl(&xhci->op_regs->status);
+ 
+ 	if (state == ~(u32)0) {
+-		xhci_warn(xhci, "Host not accessible, reset failed.\n");
++		if (!(xhci->xhc_state & XHCI_STATE_DYING))
++			xhci_warn(xhci, "Host not accessible, reset failed.\n");
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
+index 46b4c5c3a6beb3..32343f567d44e0 100644
+--- a/drivers/usb/typec/mux/intel_pmc_mux.c
++++ b/drivers/usb/typec/mux/intel_pmc_mux.c
+@@ -754,7 +754,7 @@ static int pmc_usb_probe(struct platform_device *pdev)
+ 
+ 	pmc->ipc = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ 	if (!pmc->ipc)
+-		return -ENODEV;
++		return -EPROBE_DEFER;
+ 
+ 	pmc->dev = &pdev->dev;
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+index 648311f5e3cf13..eeaf79e97261af 100644
+--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
++++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+@@ -421,21 +421,6 @@ static irqreturn_t max_tcpci_isr(int irq, void *dev_id)
+ 	return IRQ_WAKE_THREAD;
+ }
+ 
+-static int max_tcpci_init_alert(struct max_tcpci_chip *chip, struct i2c_client *client)
+-{
+-	int ret;
+-
+-	ret = devm_request_threaded_irq(chip->dev, client->irq, max_tcpci_isr, max_tcpci_irq,
+-					(IRQF_TRIGGER_LOW | IRQF_ONESHOT), dev_name(chip->dev),
+-					chip);
+-
+-	if (ret < 0)
+-		return ret;
+-
+-	enable_irq_wake(client->irq);
+-	return 0;
+-}
+-
+ static int max_tcpci_start_toggling(struct tcpci *tcpci, struct tcpci_data *tdata,
+ 				    enum typec_cc_status cc)
+ {
+@@ -532,7 +517,9 @@ static int max_tcpci_probe(struct i2c_client *client)
+ 
+ 	chip->port = tcpci_get_tcpm_port(chip->tcpci);
+ 
+-	ret = max_tcpci_init_alert(chip, client);
++	ret = devm_request_threaded_irq(&client->dev, client->irq, max_tcpci_isr, max_tcpci_irq,
++					(IRQF_TRIGGER_LOW | IRQF_ONESHOT), dev_name(chip->dev),
++					chip);
+ 	if (ret < 0)
+ 		return dev_err_probe(&client->dev, ret,
+ 				     "IRQ initialization failed\n");
+@@ -541,6 +528,32 @@ static int max_tcpci_probe(struct i2c_client *client)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
++static int max_tcpci_resume(struct device *dev)
++{
++	struct i2c_client *client = to_i2c_client(dev);
++	int ret = 0;
++
++	if (client->irq && device_may_wakeup(dev))
++		ret = disable_irq_wake(client->irq);
++
++	return ret;
++}
++
++static int max_tcpci_suspend(struct device *dev)
++{
++	struct i2c_client *client = to_i2c_client(dev);
++	int ret = 0;
++
++	if (client->irq && device_may_wakeup(dev))
++		ret = enable_irq_wake(client->irq);
++
++	return ret;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++static SIMPLE_DEV_PM_OPS(max_tcpci_pm_ops, max_tcpci_suspend, max_tcpci_resume);
++
+ static const struct i2c_device_id max_tcpci_id[] = {
+ 	{ "maxtcpc" },
+ 	{ }
+@@ -559,6 +572,7 @@ static struct i2c_driver max_tcpci_i2c_driver = {
+ 	.driver = {
+ 		.name = "maxtcpc",
+ 		.of_match_table = of_match_ptr(max_tcpci_of_match),
++		.pm = &max_tcpci_pm_ops,
+ 	},
+ 	.probe = max_tcpci_probe,
+ 	.id_table = max_tcpci_id,
+diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
+index 1c631c7855a960..9447a50716ec10 100644
+--- a/drivers/usb/typec/ucsi/psy.c
++++ b/drivers/usb/typec/ucsi/psy.c
+@@ -164,7 +164,7 @@ static int ucsi_psy_get_current_max(struct ucsi_connector *con,
+ 	case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ 	/* UCSI can't tell b/w DCP/CDP or USB2/3x1/3x2 SDP chargers */
+ 	default:
+-		val->intval = 0;
++		val->intval = UCSI_TYPEC_DEFAULT_CURRENT * 1000;
+ 		break;
+ 	}
+ 	return 0;
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 8eee3d8e588a29..896e6bc1b5e29c 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1225,6 +1225,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 
+ 	if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
+ 		typec_set_pwr_role(con->port, role);
++		ucsi_port_psy_changed(con);
+ 
+ 		/* Complete pending power role swap */
+ 		if (!completion_done(&con->complete))
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 0568e643e8447a..25cff965896607 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -412,9 +412,10 @@ struct ucsi {
+ #define UCSI_MAX_SVID		5
+ #define UCSI_MAX_ALTMODES	(UCSI_MAX_SVID * 6)
+ 
+-#define UCSI_TYPEC_VSAFE5V	5000
+-#define UCSI_TYPEC_1_5_CURRENT	1500
+-#define UCSI_TYPEC_3_0_CURRENT	3000
++#define UCSI_TYPEC_VSAFE5V		5000
++#define UCSI_TYPEC_DEFAULT_CURRENT	 100
++#define UCSI_TYPEC_1_5_CURRENT		1500
++#define UCSI_TYPEC_3_0_CURRENT		3000
+ 
+ struct ucsi_connector {
+ 	int num;
+diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
+index eb7387ee6ebd10..e7d2251db62679 100644
+--- a/drivers/vfio/pci/mlx5/cmd.c
++++ b/drivers/vfio/pci/mlx5/cmd.c
+@@ -1538,8 +1538,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ 	log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size);
+ 	max_msg_size = (1ULL << log_max_msg_size);
+ 	/* The RQ must hold at least 4 WQEs/messages for successful QP creation */
+-	if (rq_size < 4 * max_msg_size)
+-		rq_size = 4 * max_msg_size;
++	if (rq_size < 4ULL * max_msg_size)
++		rq_size = 4ULL * max_msg_size;
+ 
+ 	memset(tracker, 0, sizeof(*tracker));
+ 	tracker->uar = mlx5_get_uars_page(mdev);
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 8338cfd61fe14a..124997ce00d631 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -619,6 +619,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
+ 
+ 	while (npage) {
+ 		if (!batch->size) {
++			/*
++			 * Large mappings may take a while to repeatedly refill
++			 * the batch, so conditionally relinquish the CPU when
++			 * needed to avoid stalls.
++			 */
++			cond_resched();
++
+ 			/* Empty batch, so refill it. */
+ 			long req_pages = min_t(long, npage, batch->capacity);
+ 
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 79b0b7cd28601a..71604668e53f60 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2971,6 +2971,9 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
+ 	}
+ 	r = __vhost_add_used_n(vq, heads, count);
+ 
++	if (r < 0)
++		return r;
++
+ 	/* Make sure buffer is written before we update index. */
+ 	smp_wmb();
+ 	if (vhost_put_used_idx(vq)) {
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 678d2802760c8c..893fd66b5269c7 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -826,7 +826,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
+ 				   fg_vc->vc_rows);
+ 	}
+ 
+-	update_screen(vc_cons[fg_console].d);
++	if (fg_console != unit)
++		update_screen(vc_cons[fg_console].d);
+ }
+ 
+ /**
+@@ -1363,6 +1364,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
+ 	struct vc_data *svc;
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 	int rows, cols;
++	unsigned long ret = 0;
+ 
+ 	p = &fb_display[unit];
+ 
+@@ -1413,11 +1415,10 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
+ 	rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 	cols /= vc->vc_font.width;
+ 	rows /= vc->vc_font.height;
+-	vc_resize(vc, cols, rows);
++	ret = vc_resize(vc, cols, rows);
+ 
+-	if (con_is_visible(vc)) {
++	if (con_is_visible(vc) && !ret)
+ 		update_screen(vc);
+-	}
+ }
+ 
+ static __inline__ void ywrap_up(struct vc_data *vc, int count)
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index eca2498f243685..6a033bf17ab602 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -403,6 +403,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ 		if (!registered_fb[i])
+ 			break;
+ 
++	if (i >= FB_MAX)
++		return -ENXIO;
++
+ 	if (!fb_info->modelist.prev || !fb_info->modelist.next)
+ 		INIT_LIST_HEAD(&fb_info->modelist);
+ 
+diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c
+index cd29e66b1543ff..8482be108e82e4 100644
+--- a/drivers/virt/coco/efi_secret/efi_secret.c
++++ b/drivers/virt/coco/efi_secret/efi_secret.c
+@@ -136,15 +136,7 @@ static int efi_secret_unlink(struct inode *dir, struct dentry *dentry)
+ 		if (s->fs_files[i] == dentry)
+ 			s->fs_files[i] = NULL;
+ 
+-	/*
+-	 * securityfs_remove tries to lock the directory's inode, but we reach
+-	 * the unlink callback when it's already locked
+-	 */
+-	inode_unlock(dir);
+-	securityfs_remove(dentry);
+-	inode_lock(dir);
+-
+-	return 0;
++	return simple_unlink(inode, dentry);
+ }
+ 
+ static const struct inode_operations efi_secret_dir_inode_operations = {
+diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
+index 84dca3695f862d..e5e6d7f159180f 100644
+--- a/drivers/watchdog/dw_wdt.c
++++ b/drivers/watchdog/dw_wdt.c
+@@ -644,6 +644,8 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
+ 	} else {
+ 		wdd->timeout = DW_WDT_DEFAULT_SECONDS;
+ 		watchdog_init_timeout(wdd, 0, dev);
++		/* Limit timeout value to hardware constraints. */
++		dw_wdt_set_timeout(wdd, wdd->timeout);
+ 	}
+ 
+ 	platform_set_drvdata(pdev, dw_wdt);
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index f01ed38aba6751..d5c91759dc00c6 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -601,7 +601,11 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
+ 	/* Check that the heartbeat value is within it's range;
+ 	   if not reset to the default */
+ 	if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) {
+-		iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
++		ret = iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
++		if (ret != 0) {
++			dev_err(dev, "Failed to set watchdog timeout (%d)\n", WATCHDOG_TIMEOUT);
++			return ret;
++		}
+ 		dev_info(dev, "timeout value out of range, using %d\n",
+ 			WATCHDOG_TIMEOUT);
+ 	}
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index 5f23913ce3b49c..6ce1bfb3906413 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -75,11 +75,17 @@
+ #define SBSA_GWDT_VERSION_MASK  0xF
+ #define SBSA_GWDT_VERSION_SHIFT 16
+ 
++#define SBSA_GWDT_IMPL_MASK	0x7FF
++#define SBSA_GWDT_IMPL_SHIFT	0
++#define SBSA_GWDT_IMPL_MEDIATEK	0x426
++
+ /**
+  * struct sbsa_gwdt - Internal representation of the SBSA GWDT
+  * @wdd:		kernel watchdog_device structure
+  * @clk:		store the System Counter clock frequency, in Hz.
+  * @version:            store the architecture version
++ * @need_ws0_race_workaround:
++ *			indicate whether to adjust wdd->timeout to avoid a race with WS0
+  * @refresh_base:	Virtual address of the watchdog refresh frame
+  * @control_base:	Virtual address of the watchdog control frame
+  */
+@@ -87,6 +93,7 @@ struct sbsa_gwdt {
+ 	struct watchdog_device	wdd;
+ 	u32			clk;
+ 	int			version;
++	bool			need_ws0_race_workaround;
+ 	void __iomem		*refresh_base;
+ 	void __iomem		*control_base;
+ };
+@@ -161,6 +168,31 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ 		 */
+ 		sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
+ 
++	/*
++	 * Some watchdog hardware has a race condition where it will ignore
++	 * sbsa_gwdt_keepalive() if it is called at the exact moment that a
++	 * timeout occurs and WS0 is being asserted. Unfortunately, the default
++	 * behavior of the watchdog core is very likely to trigger this race
++	 * when action=0 because it programs WOR to be half of the desired
++	 * timeout, and watchdog_next_keepalive() chooses the exact same time to
++	 * send keepalive pings.
++	 *
++	 * This triggers a race where sbsa_gwdt_keepalive() can be called right
++	 * as WS0 is being asserted, and affected hardware will ignore that
++	 * write and continue to assert WS0. After another (timeout / 2)
++	 * seconds, the same race happens again. If the driver wins then the
++	 * explicit refresh will reset WS0 to false but if the hardware wins,
++	 * then WS1 is asserted and the system resets.
++	 *
++	 * Avoid the problem by scheduling keepalive heartbeats one second later
++	 * than the WOR timeout.
++	 *
++	 * This workaround might not be needed in a future revision of the
++	 * hardware.
++	 */
++	if (gwdt->need_ws0_race_workaround)
++		wdd->min_hw_heartbeat_ms = timeout * 500 + 1000;
++
+ 	return 0;
+ }
+ 
+@@ -202,12 +234,15 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd)
+ static void sbsa_gwdt_get_version(struct watchdog_device *wdd)
+ {
+ 	struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+-	int ver;
++	int iidr, ver, impl;
+ 
+-	ver = readl(gwdt->control_base + SBSA_GWDT_W_IIDR);
+-	ver = (ver >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK;
++	iidr = readl(gwdt->control_base + SBSA_GWDT_W_IIDR);
++	ver = (iidr >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK;
++	impl = (iidr >> SBSA_GWDT_IMPL_SHIFT) & SBSA_GWDT_IMPL_MASK;
+ 
+ 	gwdt->version = ver;
++	gwdt->need_ws0_race_workaround =
++		!action && (impl == SBSA_GWDT_IMPL_MEDIATEK);
+ }
+ 
+ static int sbsa_gwdt_start(struct watchdog_device *wdd)
+@@ -299,6 +334,15 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
+ 	else
+ 		wdd->max_hw_heartbeat_ms = GENMASK_ULL(47, 0) / gwdt->clk * 1000;
+ 
++	if (gwdt->need_ws0_race_workaround) {
++		/*
++		 * A timeout of 3 seconds means that WOR will be set to 1.5
++		 * seconds and the heartbeat will be scheduled every 2.5
++		 * seconds.
++		 */
++		wdd->min_timeout = 3;
++	}
++
+ 	status = readl(cf_base + SBSA_GWDT_WCS);
+ 	if (status & SBSA_GWDT_WCS_WS1) {
+ 		dev_warn(dev, "System reset by WDT.\n");
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index dd35e29d80824c..7eef79ece5b3ca 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -34,6 +34,19 @@ int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group
+ }
+ #endif
+ 
++static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group)
++{
++	/* The meta_write_pointer is available only on the zoned setup. */
++	if (!btrfs_is_zoned(block_group->fs_info))
++		return false;
++
++	if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
++		return false;
++
++	return block_group->start + block_group->alloc_offset >
++		block_group->meta_write_pointer;
++}
++
+ /*
+  * Return target flags in extended format or 0 if restripe for this chunk_type
+  * is not in progress
+@@ -1249,6 +1262,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ 		goto out;
+ 
+ 	spin_lock(&block_group->lock);
++	/*
++	 * Hitting this WARN means we removed a block group with an unwritten
++	 * region. It will cause "unable to find chunk map for logical" errors.
++	 */
++	if (WARN_ON(has_unwritten_metadata(block_group)))
++		btrfs_warn(fs_info,
++			   "block group %llu is removed before metadata write out",
++			   block_group->start);
++
+ 	set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
+ 
+ 	/*
+@@ -1567,8 +1589,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 		 * needing to allocate extents from the block group.
+ 		 */
+ 		used = btrfs_space_info_used(space_info, true);
+-		if (space_info->total_bytes - block_group->length < used &&
+-		    block_group->zone_unusable < block_group->length) {
++		if ((space_info->total_bytes - block_group->length < used &&
++		     block_group->zone_unusable < block_group->length) ||
++		    has_unwritten_metadata(block_group)) {
+ 			/*
+ 			 * Add a reference for the list, compensate for the ref
+ 			 * drop under the "next" label for the
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 29c16459740112..3ba15d9c3e8861 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -2901,6 +2901,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
+ 	if (ret < 0) {
+ 		int ret2;
+ 
++		btrfs_clear_buffer_dirty(trans, c);
+ 		ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1);
+ 		if (ret2 < 0)
+ 			btrfs_abort_transaction(trans, ret2);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 4ceffbef32987b..bb3602059906de 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3654,6 +3654,21 @@ btrfs_release_block_group(struct btrfs_block_group *cache,
+ 	btrfs_put_block_group(cache);
+ }
+ 
++static bool find_free_extent_check_size_class(const struct find_free_extent_ctl *ffe_ctl,
++					      const struct btrfs_block_group *bg)
++{
++	if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED)
++		return true;
++	if (!btrfs_block_group_should_use_size_class(bg))
++		return true;
++	if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS)
++		return true;
++	if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS &&
++	    bg->size_class == BTRFS_BG_SZ_NONE)
++		return true;
++	return ffe_ctl->size_class == bg->size_class;
++}
++
+ /*
+  * Helper function for find_free_extent().
+  *
+@@ -3675,7 +3690,8 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
+ 	if (!cluster_bg)
+ 		goto refill_cluster;
+ 	if (cluster_bg != bg && (cluster_bg->ro ||
+-	    !block_group_bits(cluster_bg, ffe_ctl->flags)))
++	    !block_group_bits(cluster_bg, ffe_ctl->flags) ||
++	    !find_free_extent_check_size_class(ffe_ctl, cluster_bg)))
+ 		goto release_cluster;
+ 
+ 	offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
+@@ -4231,21 +4247,6 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
+ 	return -ENOSPC;
+ }
+ 
+-static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl,
+-					      struct btrfs_block_group *bg)
+-{
+-	if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED)
+-		return true;
+-	if (!btrfs_block_group_should_use_size_class(bg))
+-		return true;
+-	if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS)
+-		return true;
+-	if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS &&
+-	    bg->size_class == BTRFS_BG_SZ_NONE)
+-		return true;
+-	return ffe_ctl->size_class == bg->size_class;
+-}
+-
+ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
+ 					struct find_free_extent_ctl *ffe_ctl,
+ 					struct btrfs_space_info *space_info,
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index e9f58cdeeb5f3c..6b181bf9f15617 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1373,11 +1373,14 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 
+ 	/*
+ 	 * We have nothing held here and no trans handle, just return the error
+-	 * if there is one.
++	 * if there is one and set back the quota enabled bit since we didn't
++	 * actually disable quotas.
+ 	 */
+ 	ret = flush_reservations(fs_info);
+-	if (ret)
++	if (ret) {
++		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ 		return ret;
++	}
+ 
+ 	/*
+ 	 * 1 For the root item
+@@ -1489,7 +1492,6 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
+ 				    struct btrfs_qgroup *src, int sign)
+ {
+ 	struct btrfs_qgroup *qgroup;
+-	struct btrfs_qgroup *cur;
+ 	LIST_HEAD(qgroup_list);
+ 	u64 num_bytes = src->excl;
+ 	int ret = 0;
+@@ -1499,7 +1501,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
+ 		goto out;
+ 
+ 	qgroup_iterator_add(&qgroup_list, qgroup);
+-	list_for_each_entry(cur, &qgroup_list, iterator) {
++	list_for_each_entry(qgroup, &qgroup_list, iterator) {
+ 		struct btrfs_qgroup_list *glist;
+ 
+ 		qgroup->rfer += sign * num_bytes;
+@@ -1698,9 +1700,6 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ 	struct btrfs_qgroup *prealloc = NULL;
+ 	int ret = 0;
+ 
+-	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
+-		return 0;
+-
+ 	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	if (!fs_info->quota_root) {
+ 		ret = -ENOTCONN;
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index f24a80857cd600..79eb984041dd69 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -687,6 +687,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 	if (btrfs_root_id(root) == objectid) {
+ 		u64 commit_root_gen;
+ 
++		/*
++		 * Relocation will wait for cleaner thread, and any half-dropped
++		 * subvolume will be fully cleaned up at mount time.
++		 * So here we shouldn't hit a subvolume with non-zero drop_progress.
++		 *
++		 * If this isn't the case, error out since it can make us attempt to
++		 * drop references for extents that were already dropped before.
++		 */
++		if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) {
++			struct btrfs_key cpu_key;
++
++			btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress);
++			btrfs_err(fs_info,
++	"cannot relocate partially dropped subvolume %llu, drop progress key (%llu %u %llu)",
++				  objectid, cpu_key.objectid, cpu_key.type, cpu_key.offset);
++			ret = -EUCLEAN;
++			goto fail;
++		}
++
+ 		/* called by btrfs_init_reloc_root */
+ 		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
+ 				      BTRFS_TREE_RELOC_OBJECTID);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 24806e19c7c410..dbef80cd5a9f1c 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1739,8 +1739,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 
+ 	ret = btrfs_create_qgroup(trans, objectid);
+ 	if (ret && ret != -EEXIST) {
+-		btrfs_abort_transaction(trans, ret);
+-		goto fail;
++		if (ret != -ENOTCONN || btrfs_qgroup_enabled(fs_info)) {
++			btrfs_abort_transaction(trans, ret);
++			goto fail;
++		}
+ 	}
+ 
+ 	/*
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 16b4474ded4bc3..31adea5b0b96a3 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -324,8 +324,7 @@ struct walk_control {
+ 
+ 	/*
+ 	 * Ignore any items from the inode currently being processed. Needs
+-	 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
+-	 * the LOG_WALK_REPLAY_INODES stage.
++	 * to be set every time we find a BTRFS_INODE_ITEM_KEY.
+ 	 */
+ 	bool ignore_cur_inode;
+ 
+@@ -1396,6 +1395,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 	dir = btrfs_iget_logging(parent_objectid, root);
+ 	if (IS_ERR(dir)) {
+ 		ret = PTR_ERR(dir);
++		if (ret == -ENOENT)
++			ret = 0;
+ 		dir = NULL;
+ 		goto out;
+ 	}
+@@ -1411,6 +1412,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 		if (log_ref_ver) {
+ 			ret = extref_get_fields(eb, ref_ptr, &name,
+ 						&ref_index, &parent_objectid);
++			if (ret)
++				goto out;
+ 			/*
+ 			 * parent object can change from one array
+ 			 * item to another.
+@@ -1420,14 +1423,30 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 				if (IS_ERR(dir)) {
+ 					ret = PTR_ERR(dir);
+ 					dir = NULL;
++					/*
++					 * A new parent dir may have not been
++					 * logged and not exist in the subvolume
++					 * tree, see the comment above before
++					 * the loop when getting the first
++					 * parent dir.
++					 */
++					if (ret == -ENOENT) {
++						/*
++						 * The next extref may refer to
++						 * another parent dir that
++						 * exists, so continue.
++						 */
++						ret = 0;
++						goto next;
++					}
+ 					goto out;
+ 				}
+ 			}
+ 		} else {
+ 			ret = ref_get_fields(eb, ref_ptr, &name, &ref_index);
++			if (ret)
++				goto out;
+ 		}
+-		if (ret)
+-			goto out;
+ 
+ 		ret = inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
+ 				   ref_index, &name);
+@@ -1461,10 +1480,11 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 		}
+ 		/* Else, ret == 1, we already have a perfect match, we're done. */
+ 
++next:
+ 		ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len;
+ 		kfree(name.name);
+ 		name.name = NULL;
+-		if (log_ref_ver) {
++		if (log_ref_ver && dir) {
+ 			iput(&dir->vfs_inode);
+ 			dir = NULL;
+ 		}
+@@ -2426,23 +2446,30 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+ 
+ 	nritems = btrfs_header_nritems(eb);
+ 	for (i = 0; i < nritems; i++) {
+-		btrfs_item_key_to_cpu(eb, &key, i);
++		struct btrfs_inode_item *inode_item;
+ 
+-		/* inode keys are done during the first stage */
+-		if (key.type == BTRFS_INODE_ITEM_KEY &&
+-		    wc->stage == LOG_WALK_REPLAY_INODES) {
+-			struct btrfs_inode_item *inode_item;
+-			u32 mode;
++		btrfs_item_key_to_cpu(eb, &key, i);
+ 
+-			inode_item = btrfs_item_ptr(eb, i,
+-					    struct btrfs_inode_item);
++		if (key.type == BTRFS_INODE_ITEM_KEY) {
++			inode_item = btrfs_item_ptr(eb, i, struct btrfs_inode_item);
+ 			/*
+-			 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
+-			 * and never got linked before the fsync, skip it, as
+-			 * replaying it is pointless since it would be deleted
+-			 * later. We skip logging tmpfiles, but it's always
+-			 * possible we are replaying a log created with a kernel
+-			 * that used to log tmpfiles.
++			 * An inode with no links is either:
++			 *
++			 * 1) A tmpfile (O_TMPFILE) that got fsync'ed and never
++			 *    got linked before the fsync, skip it, as replaying
++			 *    it is pointless since it would be deleted later.
++			 *    We skip logging tmpfiles, but it's always possible
++			 *    we are replaying a log created with a kernel that
++			 *    used to log tmpfiles;
++			 *
++			 * 2) A non-tmpfile which got its last link deleted
++			 *    while holding an open fd on it and later got
++			 *    fsynced through that fd. We always log the
++			 *    parent inodes when inode->last_unlink_trans is
++			 *    set to the current transaction, so ignore all the
++			 *    inode items for this inode. We will delete the
++			 *    inode when processing the parent directory with
++			 *    replay_dir_deletes().
+ 			 */
+ 			if (btrfs_inode_nlink(eb, inode_item) == 0) {
+ 				wc->ignore_cur_inode = true;
+@@ -2450,8 +2477,14 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+ 			} else {
+ 				wc->ignore_cur_inode = false;
+ 			}
+-			ret = replay_xattr_deletes(wc->trans, root, log,
+-						   path, key.objectid);
++		}
++
++		/* Inode keys are done during the first stage. */
++		if (key.type == BTRFS_INODE_ITEM_KEY &&
++		    wc->stage == LOG_WALK_REPLAY_INODES) {
++			u32 mode;
++
++			ret = replay_xattr_deletes(wc->trans, root, log, path, key.objectid);
+ 			if (ret)
+ 				break;
+ 			mode = btrfs_inode_mode(eb, inode_item);
+@@ -2532,9 +2565,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+ 			   key.type == BTRFS_INODE_EXTREF_KEY) {
+ 			ret = add_inode_ref(wc->trans, root, log, path,
+ 					    eb, i, &key);
+-			if (ret && ret != -ENOENT)
++			if (ret)
+ 				break;
+-			ret = 0;
+ 		} else if (key.type == BTRFS_EXTENT_DATA_KEY) {
+ 			ret = replay_one_extent(wc->trans, root, path,
+ 						eb, i, &key);
+@@ -2555,14 +2587,14 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+ /*
+  * Correctly adjust the reserved bytes occupied by a log tree extent buffer
+  */
+-static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
++static int unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
+ {
+ 	struct btrfs_block_group *cache;
+ 
+ 	cache = btrfs_lookup_block_group(fs_info, start);
+ 	if (!cache) {
+ 		btrfs_err(fs_info, "unable to find block group for %llu", start);
+-		return;
++		return -ENOENT;
+ 	}
+ 
+ 	spin_lock(&cache->space_info->lock);
+@@ -2573,27 +2605,22 @@ static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
+ 	spin_unlock(&cache->space_info->lock);
+ 
+ 	btrfs_put_block_group(cache);
++
++	return 0;
+ }
+ 
+ static int clean_log_buffer(struct btrfs_trans_handle *trans,
+ 			    struct extent_buffer *eb)
+ {
+-	int ret;
+-
+ 	btrfs_tree_lock(eb);
+ 	btrfs_clear_buffer_dirty(trans, eb);
+ 	wait_on_extent_buffer_writeback(eb);
+ 	btrfs_tree_unlock(eb);
+ 
+-	if (trans) {
+-		ret = btrfs_pin_reserved_extent(trans, eb);
+-		if (ret)
+-			return ret;
+-	} else {
+-		unaccount_log_buffer(eb->fs_info, eb->start);
+-	}
++	if (trans)
++		return btrfs_pin_reserved_extent(trans, eb);
+ 
+-	return 0;
++	return unaccount_log_buffer(eb->fs_info, eb->start);
+ }
+ 
+ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
+@@ -4225,6 +4252,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
+ 	btrfs_set_token_timespec_nsec(&token, &item->ctime,
+ 				      inode_get_ctime_nsec(inode));
+ 
++	btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
++	btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
++
+ 	/*
+ 	 * We do not need to set the nbytes field, in fact during a fast fsync
+ 	 * its value may not even be correct, since a fast fsync does not wait
+@@ -7295,11 +7325,14 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
+ 
+ 		wc.replay_dest->log_root = log;
+ 		ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
+-		if (ret)
++		if (ret) {
+ 			/* The loop needs to continue due to the root refs */
+ 			btrfs_abort_transaction(trans, ret);
+-		else
++		} else {
+ 			ret = walk_log_tree(trans, log, &wc);
++			if (ret)
++				btrfs_abort_transaction(trans, ret);
++		}
+ 
+ 		if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
+ 			ret = fixup_inode_link_counts(trans, wc.replay_dest,
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 2603c9d60fd21b..53d8c49ec0588d 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2456,8 +2456,8 @@ bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
+ {
+ 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ 	struct btrfs_device *device;
++	u64 total = btrfs_super_total_bytes(fs_info->super_copy);
+ 	u64 used = 0;
+-	u64 total = 0;
+ 	u64 factor;
+ 
+ 	ASSERT(btrfs_is_zoned(fs_info));
+@@ -2470,7 +2470,6 @@ bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
+ 		if (!device->bdev)
+ 			continue;
+ 
+-		total += device->disk_total_bytes;
+ 		used += device->bytes_used;
+ 	}
+ 	mutex_unlock(&fs_devices->device_list_mutex);
+@@ -2524,7 +2523,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
+ 
+ 		spin_lock(&block_group->lock);
+ 		if (block_group->reserved || block_group->alloc_offset == 0 ||
+-		    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
++		    !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) ||
+ 		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ 			spin_unlock(&block_group->lock);
+ 			continue;
+diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
+index 8371e4e1f596a9..25bcfcc2d70637 100644
+--- a/fs/crypto/fscrypt_private.h
++++ b/fs/crypto/fscrypt_private.h
+@@ -27,6 +27,23 @@
+  */
+ #define FSCRYPT_MIN_KEY_SIZE	16
+ 
++/*
++ * This mask is passed as the third argument to the crypto_alloc_*() functions
++ * to prevent fscrypt from using the Crypto API drivers for non-inline crypto
++ * engines.  Those drivers have been problematic for fscrypt.  fscrypt users
++ * have reported hangs and even incorrect en/decryption with these drivers.
++ * Since going to the driver, off CPU, and back again is really slow, such
++ * drivers can be over 50 times slower than the CPU-based code for fscrypt's
++ * workload.  Even on platforms that lack AES instructions on the CPU, using the
++ * offloads has been shown to be slower, even staying with AES.  (Of course,
++ * Adiantum is faster still, and is the recommended option on such platforms...)
++ *
++ * Note that fscrypt also supports inline crypto engines.  Those don't use the
++ * Crypto API and work much better than the old-style (non-inline) engines.
++ */
++#define FSCRYPT_CRYPTOAPI_MASK \
++	(CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY)
++
+ #define FSCRYPT_CONTEXT_V1	1
+ #define FSCRYPT_CONTEXT_V2	2
+ 
+diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c
+index 5a384dad2c72f3..b7f5e7884e03b9 100644
+--- a/fs/crypto/hkdf.c
++++ b/fs/crypto/hkdf.c
+@@ -72,7 +72,7 @@ int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
+ 	u8 prk[HKDF_HASHLEN];
+ 	int err;
+ 
+-	hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, 0);
++	hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, FSCRYPT_CRYPTOAPI_MASK);
+ 	if (IS_ERR(hmac_tfm)) {
+ 		fscrypt_err(NULL, "Error allocating " HKDF_HMAC_ALG ": %ld",
+ 			    PTR_ERR(hmac_tfm));
+diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
+index b4fe01ea4bd4c9..2896046a49771c 100644
+--- a/fs/crypto/keysetup.c
++++ b/fs/crypto/keysetup.c
+@@ -103,7 +103,8 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
+ 	struct crypto_skcipher *tfm;
+ 	int err;
+ 
+-	tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
++	tfm = crypto_alloc_skcipher(mode->cipher_str, 0,
++				    FSCRYPT_CRYPTOAPI_MASK);
+ 	if (IS_ERR(tfm)) {
+ 		if (PTR_ERR(tfm) == -ENOENT) {
+ 			fscrypt_warn(inode,
+diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
+index cf3b58ec32ccec..d19d1d4c2e7e53 100644
+--- a/fs/crypto/keysetup_v1.c
++++ b/fs/crypto/keysetup_v1.c
+@@ -52,7 +52,8 @@ static int derive_key_aes(const u8 *master_key,
+ 	struct skcipher_request *req = NULL;
+ 	DECLARE_CRYPTO_WAIT(wait);
+ 	struct scatterlist src_sg, dst_sg;
+-	struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
++	struct crypto_skcipher *tfm =
++		crypto_alloc_skcipher("ecb(aes)", 0, FSCRYPT_CRYPTOAPI_MASK);
+ 
+ 	if (IS_ERR(tfm)) {
+ 		res = PTR_ERR(tfm);
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 99eed91d03ebee..075fee4ba29bcd 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -218,6 +218,7 @@ struct eventpoll {
+ 	/* used to optimize loop detection check */
+ 	u64 gen;
+ 	struct hlist_head refs;
++	u8 loop_check_depth;
+ 
+ 	/*
+ 	 * usage count, used together with epitem->dying to
+@@ -2091,23 +2092,24 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ }
+ 
+ /**
+- * ep_loop_check_proc - verify that adding an epoll file inside another
+- *                      epoll structure does not violate the constraints, in
+- *                      terms of closed loops, or too deep chains (which can
+- *                      result in excessive stack usage).
++ * ep_loop_check_proc - verify that adding an epoll file @ep inside another
++ *                      epoll file does not create closed loops, and
++ *                      determine the depth of the subtree starting at @ep
+  *
+  * @ep: the &struct eventpoll to be currently checked.
+  * @depth: Current depth of the path being checked.
+  *
+- * Return: %zero if adding the epoll @file inside current epoll
+- *          structure @ep does not violate the constraints, or %-1 otherwise.
++ * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep.
+  */
+ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
+ {
+-	int error = 0;
++	int result = 0;
+ 	struct rb_node *rbp;
+ 	struct epitem *epi;
+ 
++	if (ep->gen == loop_check_gen)
++		return ep->loop_check_depth;
++
+ 	mutex_lock_nested(&ep->mtx, depth + 1);
+ 	ep->gen = loop_check_gen;
+ 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+@@ -2115,13 +2117,11 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
+ 		if (unlikely(is_file_epoll(epi->ffd.file))) {
+ 			struct eventpoll *ep_tovisit;
+ 			ep_tovisit = epi->ffd.file->private_data;
+-			if (ep_tovisit->gen == loop_check_gen)
+-				continue;
+ 			if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
+-				error = -1;
++				result = INT_MAX;
+ 			else
+-				error = ep_loop_check_proc(ep_tovisit, depth + 1);
+-			if (error != 0)
++				result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1);
++			if (result > EP_MAX_NESTS)
+ 				break;
+ 		} else {
+ 			/*
+@@ -2135,9 +2135,27 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
+ 			list_file(epi->ffd.file);
+ 		}
+ 	}
++	ep->loop_check_depth = result;
+ 	mutex_unlock(&ep->mtx);
+ 
+-	return error;
++	return result;
++}
++
++/**
++ * ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards
++ */
++static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth)
++{
++	int result = 0;
++	struct epitem *epi;
++
++	if (ep->gen == loop_check_gen)
++		return ep->loop_check_depth;
++	hlist_for_each_entry_rcu(epi, &ep->refs, fllink)
++		result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1);
++	ep->gen = loop_check_gen;
++	ep->loop_check_depth = result;
++	return result;
+ }
+ 
+ /**
+@@ -2153,8 +2171,22 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
+  */
+ static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
+ {
++	int depth, upwards_depth;
++
+ 	inserting_into = ep;
+-	return ep_loop_check_proc(to, 0);
++	/*
++	 * Check how deep down we can get from @to, and whether it is possible
++	 * to loop up to @ep.
++	 */
++	depth = ep_loop_check_proc(to, 0);
++	if (depth > EP_MAX_NESTS)
++		return -1;
++	/* Check how far up we can go from @ep. */
++	rcu_read_lock();
++	upwards_depth = ep_get_upwards_depth_proc(ep, 0);
++	rcu_read_unlock();
++
++	return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0;
+ }
+ 
+ static void clear_tfile_check_list(void)
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 9d8848872fe8ac..1c428f7f83f5d9 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -1015,6 +1015,7 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
+ 	struct exfat_hint_femp candi_empty;
+ 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ 	int num_entries = exfat_calc_num_entries(p_uniname);
++	unsigned int clu_count = 0;
+ 
+ 	if (num_entries < 0)
+ 		return num_entries;
+@@ -1152,6 +1153,10 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
+ 		} else {
+ 			if (exfat_get_next_cluster(sb, &clu.dir))
+ 				return -EIO;
++
++			/* break if the cluster chain includes a loop */
++			if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi)))
++				goto not_found;
+ 		}
+ 	}
+ 
+@@ -1214,6 +1219,7 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
+ 	int i, count = 0;
+ 	int dentries_per_clu;
+ 	unsigned int entry_type;
++	unsigned int clu_count = 0;
+ 	struct exfat_chain clu;
+ 	struct exfat_dentry *ep;
+ 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+@@ -1246,6 +1252,12 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
+ 		} else {
+ 			if (exfat_get_next_cluster(sb, &(clu.dir)))
+ 				return -EIO;
++
++			if (unlikely(++clu_count > sbi->used_clusters)) {
++				exfat_fs_error(sb, "FAT or bitmap is corrupted");
++				return -EIO;
++			}
++
+ 		}
+ 	}
+ 
+diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
+index 8df5ad6ebb10cb..0c60ddc24c54a8 100644
+--- a/fs/exfat/fatent.c
++++ b/fs/exfat/fatent.c
+@@ -461,5 +461,15 @@ int exfat_count_num_clusters(struct super_block *sb,
+ 	}
+ 
+ 	*ret_count = count;
++
++	/*
++	 * since exfat_count_used_clusters() is not called, sbi->used_clusters
++	 * cannot be used here.
++	 */
++	if (unlikely(i == sbi->num_clusters && clu != EXFAT_EOF_CLUSTER)) {
++		exfat_fs_error(sb, "The cluster chain has a loop");
++		return -EIO;
++	}
++
+ 	return 0;
+ }
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index 7b3951951f8af1..e9624eb61cbc9a 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -888,6 +888,7 @@ static int exfat_check_dir_empty(struct super_block *sb,
+ {
+ 	int i, dentries_per_clu;
+ 	unsigned int type;
++	unsigned int clu_count = 0;
+ 	struct exfat_chain clu;
+ 	struct exfat_dentry *ep;
+ 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+@@ -924,6 +925,10 @@ static int exfat_check_dir_empty(struct super_block *sb,
+ 		} else {
+ 			if (exfat_get_next_cluster(sb, &(clu.dir)))
+ 				return -EIO;
++
++			/* break if the cluster chain includes a loop */
++			if (unlikely(++clu_count > EXFAT_DATA_CLUSTER_COUNT(sbi)))
++				break;
+ 		}
+ 	}
+ 
+diff --git a/fs/exfat/super.c b/fs/exfat/super.c
+index bd57844414aa6d..7aaf1ed6aee910 100644
+--- a/fs/exfat/super.c
++++ b/fs/exfat/super.c
+@@ -370,13 +370,12 @@ static void exfat_hash_init(struct super_block *sb)
+ 		INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
+ }
+ 
+-static int exfat_read_root(struct inode *inode)
++static int exfat_read_root(struct inode *inode, struct exfat_chain *root_clu)
+ {
+ 	struct super_block *sb = inode->i_sb;
+ 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ 	struct exfat_inode_info *ei = EXFAT_I(inode);
+-	struct exfat_chain cdir;
+-	int num_subdirs, num_clu = 0;
++	int num_subdirs;
+ 
+ 	exfat_chain_set(&ei->dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+ 	ei->entry = -1;
+@@ -389,12 +388,9 @@ static int exfat_read_root(struct inode *inode)
+ 	ei->hint_stat.clu = sbi->root_dir;
+ 	ei->hint_femp.eidx = EXFAT_HINT_NONE;
+ 
+-	exfat_chain_set(&cdir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+-	if (exfat_count_num_clusters(sb, &cdir, &num_clu))
+-		return -EIO;
+-	i_size_write(inode, num_clu << sbi->cluster_size_bits);
++	i_size_write(inode, EXFAT_CLU_TO_B(root_clu->size, sbi));
+ 
+-	num_subdirs = exfat_count_dir_entries(sb, &cdir);
++	num_subdirs = exfat_count_dir_entries(sb, root_clu);
+ 	if (num_subdirs < 0)
+ 		return -EIO;
+ 	set_nlink(inode, num_subdirs + EXFAT_MIN_SUBDIR);
+@@ -608,7 +604,8 @@ static int exfat_verify_boot_region(struct super_block *sb)
+ }
+ 
+ /* mount the file system volume */
+-static int __exfat_fill_super(struct super_block *sb)
++static int __exfat_fill_super(struct super_block *sb,
++		struct exfat_chain *root_clu)
+ {
+ 	int ret;
+ 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+@@ -625,6 +622,18 @@ static int __exfat_fill_super(struct super_block *sb)
+ 		goto free_bh;
+ 	}
+ 
++	/*
++	 * Call exfat_count_num_cluster() before searching for up-case and
++	 * bitmap directory entries to avoid infinite loop if they are missing
++	 * and the cluster chain includes a loop.
++	 */
++	exfat_chain_set(root_clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
++	ret = exfat_count_num_clusters(sb, root_clu, &root_clu->size);
++	if (ret) {
++		exfat_err(sb, "failed to count the number of clusters in root");
++		goto free_bh;
++	}
++
+ 	ret = exfat_create_upcase_table(sb);
+ 	if (ret) {
+ 		exfat_err(sb, "failed to load upcase table");
+@@ -657,6 +666,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	struct exfat_sb_info *sbi = sb->s_fs_info;
+ 	struct exfat_mount_options *opts = &sbi->options;
+ 	struct inode *root_inode;
++	struct exfat_chain root_clu;
+ 	int err;
+ 
+ 	if (opts->allow_utime == (unsigned short)-1)
+@@ -675,7 +685,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	sb->s_time_min = EXFAT_MIN_TIMESTAMP_SECS;
+ 	sb->s_time_max = EXFAT_MAX_TIMESTAMP_SECS;
+ 
+-	err = __exfat_fill_super(sb);
++	err = __exfat_fill_super(sb, &root_clu);
+ 	if (err) {
+ 		exfat_err(sb, "failed to recognize exfat type");
+ 		goto check_nls_io;
+@@ -710,7 +720,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
+ 
+ 	root_inode->i_ino = EXFAT_ROOT_INO;
+ 	inode_set_iversion(root_inode, 1);
+-	err = exfat_read_root(root_inode);
++	err = exfat_read_root(root_inode, &root_clu);
+ 	if (err) {
+ 		exfat_err(sb, "failed to initialize root inode");
+ 		goto put_inode;
+diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
+index 30f8201c155f40..177b1f852b63ac 100644
+--- a/fs/ext2/inode.c
++++ b/fs/ext2/inode.c
+@@ -895,9 +895,19 @@ int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 		u64 start, u64 len)
+ {
+ 	int ret;
++	loff_t i_size;
+ 
+ 	inode_lock(inode);
+-	len = min_t(u64, len, i_size_read(inode));
++	i_size = i_size_read(inode);
++	/*
++	 * iomap_fiemap() returns EINVAL for 0 length. Make sure we don't trim
++	 * length to 0 but still trim the range as much as possible since
++	 * ext2_get_blocks() iterates unmapped space block by block which is
++	 * slow.
++	 */
++	if (i_size == 0)
++		i_size = 1;
++	len = min_t(u64, len, i_size);
+ 	ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops);
+ 	inode_unlock(inode);
+ 
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index e02a3141637a0b..9fb5e0f172a78e 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -298,7 +298,11 @@ static int ext4_create_inline_data(handle_t *handle,
+ 	if (error)
+ 		goto out;
+ 
+-	BUG_ON(!is.s.not_found);
++	if (!is.s.not_found) {
++		EXT4_ERROR_INODE(inode, "unexpected inline data xattr");
++		error = -EFSCORRUPTED;
++		goto out;
++	}
+ 
+ 	error = ext4_xattr_ibody_set(handle, inode, &i, &is);
+ 	if (error) {
+@@ -349,7 +353,11 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+ 	if (error)
+ 		goto out;
+ 
+-	BUG_ON(is.s.not_found);
++	if (is.s.not_found) {
++		EXT4_ERROR_INODE(inode, "missing inline data xattr");
++		error = -EFSCORRUPTED;
++		goto out;
++	}
+ 
+ 	len -= EXT4_MIN_INLINE_DATA_SIZE;
+ 	value = kzalloc(len, GFP_NOFS);
+@@ -1969,7 +1977,12 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+ 			if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
+ 				goto out_error;
+ 
+-			BUG_ON(is.s.not_found);
++			if (is.s.not_found) {
++				EXT4_ERROR_INODE(inode,
++						 "missing inline data xattr");
++				err = -EFSCORRUPTED;
++				goto out_error;
++			}
+ 
+ 			value_len = le32_to_cpu(is.s.here->e_value_size);
+ 			value = kmalloc(value_len, GFP_NOFS);
+diff --git a/fs/ext4/mballoc-test.c b/fs/ext4/mballoc-test.c
+index bb2a223b207c19..f13db95284d9e6 100644
+--- a/fs/ext4/mballoc-test.c
++++ b/fs/ext4/mballoc-test.c
+@@ -155,6 +155,7 @@ static struct super_block *mbt_ext4_alloc_super_block(void)
+ 	bgl_lock_init(sbi->s_blockgroup_lock);
+ 
+ 	sbi->s_es = &fsb->es;
++	sbi->s_sb = sb;
+ 	sb->s_fs_info = sbi;
+ 
+ 	up_write(&sb->s_umount);
+@@ -801,6 +802,10 @@ static void test_mb_mark_used(struct kunit *test)
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+ 	grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
++	grp->bb_largest_free_order = -1;
++	grp->bb_avg_fragment_size_order = -1;
++	INIT_LIST_HEAD(&grp->bb_largest_free_order_node);
++	INIT_LIST_HEAD(&grp->bb_avg_fragment_size_node);
+ 	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ 	for (i = 0; i < TEST_RANGE_COUNT; i++)
+ 		test_mb_mark_used_range(test, &e4b, ranges[i].start,
+@@ -873,6 +878,10 @@ static void test_mb_free_blocks(struct kunit *test)
+ 	ext4_unlock_group(sb, TEST_GOAL_GROUP);
+ 
+ 	grp->bb_free = 0;
++	grp->bb_largest_free_order = -1;
++	grp->bb_avg_fragment_size_order = -1;
++	INIT_LIST_HEAD(&grp->bb_largest_free_order_node);
++	INIT_LIST_HEAD(&grp->bb_avg_fragment_size_node);
+ 	memset(bitmap, 0xff, sb->s_blocksize);
+ 
+ 	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 109cf88e7caacf..76331cdb4cb51e 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -841,30 +841,30 @@ static void
+ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	int new_order;
++	int new, old;
+ 
+-	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
++	if (!test_opt2(sb, MB_OPTIMIZE_SCAN))
+ 		return;
+ 
+-	new_order = mb_avg_fragment_size_order(sb,
+-					grp->bb_free / grp->bb_fragments);
+-	if (new_order == grp->bb_avg_fragment_size_order)
++	old = grp->bb_avg_fragment_size_order;
++	new = grp->bb_fragments == 0 ? -1 :
++	      mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments);
++	if (new == old)
+ 		return;
+ 
+-	if (grp->bb_avg_fragment_size_order != -1) {
+-		write_lock(&sbi->s_mb_avg_fragment_size_locks[
+-					grp->bb_avg_fragment_size_order]);
++	if (old >= 0) {
++		write_lock(&sbi->s_mb_avg_fragment_size_locks[old]);
+ 		list_del(&grp->bb_avg_fragment_size_node);
+-		write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+-					grp->bb_avg_fragment_size_order]);
++		write_unlock(&sbi->s_mb_avg_fragment_size_locks[old]);
++	}
++
++	grp->bb_avg_fragment_size_order = new;
++	if (new >= 0) {
++		write_lock(&sbi->s_mb_avg_fragment_size_locks[new]);
++		list_add_tail(&grp->bb_avg_fragment_size_node,
++				&sbi->s_mb_avg_fragment_size[new]);
++		write_unlock(&sbi->s_mb_avg_fragment_size_locks[new]);
+ 	}
+-	grp->bb_avg_fragment_size_order = new_order;
+-	write_lock(&sbi->s_mb_avg_fragment_size_locks[
+-					grp->bb_avg_fragment_size_order]);
+-	list_add_tail(&grp->bb_avg_fragment_size_node,
+-		&sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
+-	write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+-					grp->bb_avg_fragment_size_order]);
+ }
+ 
+ /*
+@@ -1150,33 +1150,28 @@ static void
+ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	int i;
++	int new, old = grp->bb_largest_free_order;
+ 
+-	for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
+-		if (grp->bb_counters[i] > 0)
++	for (new = MB_NUM_ORDERS(sb) - 1; new >= 0; new--)
++		if (grp->bb_counters[new] > 0)
+ 			break;
++
+ 	/* No need to move between order lists? */
+-	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
+-	    i == grp->bb_largest_free_order) {
+-		grp->bb_largest_free_order = i;
++	if (new == old)
+ 		return;
+-	}
+ 
+-	if (grp->bb_largest_free_order >= 0) {
+-		write_lock(&sbi->s_mb_largest_free_orders_locks[
+-					      grp->bb_largest_free_order]);
++	if (old >= 0 && !list_empty(&grp->bb_largest_free_order_node)) {
++		write_lock(&sbi->s_mb_largest_free_orders_locks[old]);
+ 		list_del_init(&grp->bb_largest_free_order_node);
+-		write_unlock(&sbi->s_mb_largest_free_orders_locks[
+-					      grp->bb_largest_free_order]);
++		write_unlock(&sbi->s_mb_largest_free_orders_locks[old]);
+ 	}
+-	grp->bb_largest_free_order = i;
+-	if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
+-		write_lock(&sbi->s_mb_largest_free_orders_locks[
+-					      grp->bb_largest_free_order]);
++
++	grp->bb_largest_free_order = new;
++	if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) {
++		write_lock(&sbi->s_mb_largest_free_orders_locks[new]);
+ 		list_add_tail(&grp->bb_largest_free_order_node,
+-		      &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
+-		write_unlock(&sbi->s_mb_largest_free_orders_locks[
+-					      grp->bb_largest_free_order]);
++			      &sbi->s_mb_largest_free_orders[new]);
++		write_unlock(&sbi->s_mb_largest_free_orders_locks[new]);
+ 	}
+ }
+ 
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index d9037e74631c0a..fa77841f3e2cca 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1003,6 +1003,18 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ 		return -EIO;
+ 
++	err = setattr_prepare(idmap, dentry, attr);
++	if (err)
++		return err;
++
++	err = fscrypt_prepare_setattr(dentry, attr);
++	if (err)
++		return err;
++
++	err = fsverity_prepare_setattr(dentry, attr);
++	if (err)
++		return err;
++
+ 	if (unlikely(IS_IMMUTABLE(inode)))
+ 		return -EPERM;
+ 
+@@ -1020,18 +1032,6 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 			return -EINVAL;
+ 	}
+ 
+-	err = setattr_prepare(idmap, dentry, attr);
+-	if (err)
+-		return err;
+-
+-	err = fscrypt_prepare_setattr(dentry, attr);
+-	if (err)
+-		return err;
+-
+-	err = fsverity_prepare_setattr(dentry, attr);
+-	if (err)
+-		return err;
+-
+ 	if (is_quota_modification(idmap, inode, attr)) {
+ 		err = f2fs_dquot_initialize(inode);
+ 		if (err)
+diff --git a/fs/file.c b/fs/file.c
+index b6fb6d18ac3b9b..4579c329649877 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -126,6 +126,21 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
+ 	if (unlikely(nr > sysctl_nr_open))
+ 		nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
+ 
++	/*
++	 * Check if the allocation size would exceed INT_MAX. kvmalloc_array()
++	 * and kvmalloc() will warn if the allocation size is greater than
++	 * INT_MAX, as filp_cachep objects are not __GFP_NOWARN.
++	 *
++	 * This can happen when sysctl_nr_open is set to a very high value and
++	 * a process tries to use a file descriptor near that limit. For example,
++	 * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what
++	 * systemd typically sets it to - then trying to use a file descriptor
++	 * close to that value will require allocating a file descriptor table
++	 * that exceeds 8GB in size.
++	 */
++	if (unlikely(nr > INT_MAX / sizeof(struct file *)))
++		return ERR_PTR(-EMFILE);
++
+ 	fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
+ 	if (!fdt)
+ 		goto out;
+diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
+index dbf1aede744c12..509e2f0d97e787 100644
+--- a/fs/gfs2/dir.c
++++ b/fs/gfs2/dir.c
+@@ -60,6 +60,7 @@
+ #include <linux/crc32.h>
+ #include <linux/vmalloc.h>
+ #include <linux/bio.h>
++#include <linux/log2.h>
+ 
+ #include "gfs2.h"
+ #include "incore.h"
+@@ -912,7 +913,6 @@ static int dir_make_exhash(struct inode *inode)
+ 	struct qstr args;
+ 	struct buffer_head *bh, *dibh;
+ 	struct gfs2_leaf *leaf;
+-	int y;
+ 	u32 x;
+ 	__be64 *lp;
+ 	u64 bn;
+@@ -979,9 +979,7 @@ static int dir_make_exhash(struct inode *inode)
+ 	i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
+ 	gfs2_add_inode_blocks(&dip->i_inode, 1);
+ 	dip->i_diskflags |= GFS2_DIF_EXHASH;
+-
+-	for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
+-	dip->i_depth = y;
++	dip->i_depth = ilog2(sdp->sd_hash_ptrs);
+ 
+ 	gfs2_dinode_out(dip, dibh->b_data);
+ 
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 4b6b23c638e296..1ed42f0e6ec7b3 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -11,6 +11,7 @@
+ #include <linux/bio.h>
+ #include <linux/posix_acl.h>
+ #include <linux/security.h>
++#include <linux/log2.h>
+ 
+ #include "gfs2.h"
+ #include "incore.h"
+@@ -450,6 +451,11 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+ 		gfs2_consist_inode(ip);
+ 		return -EIO;
+ 	}
++	if ((ip->i_diskflags & GFS2_DIF_EXHASH) &&
++	    depth < ilog2(sdp->sd_hash_ptrs)) {
++		gfs2_consist_inode(ip);
++		return -EIO;
++	}
+ 	ip->i_depth = (u8)depth;
+ 	ip->i_entries = be32_to_cpu(str->di_entries);
+ 
+diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
+index 960d6afcdfad81..b795ca7765cdc4 100644
+--- a/fs/gfs2/meta_io.c
++++ b/fs/gfs2/meta_io.c
+@@ -103,6 +103,7 @@ const struct address_space_operations gfs2_meta_aops = {
+ 	.invalidate_folio = block_invalidate_folio,
+ 	.writepages = gfs2_aspace_writepages,
+ 	.release_folio = gfs2_release_folio,
++	.migrate_folio = buffer_migrate_folio_norefs,
+ };
+ 
+ const struct address_space_operations gfs2_rgrp_aops = {
+@@ -110,6 +111,7 @@ const struct address_space_operations gfs2_rgrp_aops = {
+ 	.invalidate_folio = block_invalidate_folio,
+ 	.writepages = gfs2_aspace_writepages,
+ 	.release_folio = gfs2_release_folio,
++	.migrate_folio = buffer_migrate_folio_norefs,
+ };
+ 
+ /**
+diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
+index ef9498a6e88acd..34e9804e0f3601 100644
+--- a/fs/hfs/bfind.c
++++ b/fs/hfs/bfind.c
+@@ -16,6 +16,9 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
+ {
+ 	void *ptr;
+ 
++	if (!tree || !fd)
++		return -EINVAL;
++
+ 	fd->tree = tree;
+ 	fd->bnode = NULL;
+ 	ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL);
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index cb823a8a6ba960..e8cd1a31f2470c 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -15,6 +15,48 @@
+ 
+ #include "btree.h"
+ 
++static inline
++bool is_bnode_offset_valid(struct hfs_bnode *node, int off)
++{
++	bool is_valid = off < node->tree->node_size;
++
++	if (!is_valid) {
++		pr_err("requested invalid offset: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off);
++	}
++
++	return is_valid;
++}
++
++static inline
++int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len)
++{
++	unsigned int node_size;
++
++	if (!is_bnode_offset_valid(node, off))
++		return 0;
++
++	node_size = node->tree->node_size;
++
++	if ((off + len) > node_size) {
++		int new_len = (int)node_size - off;
++
++		pr_err("requested length has been corrected: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d, "
++		       "requested_len %d, corrected_len %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off, len, new_len);
++
++		return new_len;
++	}
++
++	return len;
++}
++
+ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+ {
+ 	struct page *page;
+@@ -22,6 +64,20 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+ 	int bytes_read;
+ 	int bytes_to_read;
+ 
++	if (!is_bnode_offset_valid(node, off))
++		return;
++
++	if (len == 0) {
++		pr_err("requested zero length: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d, len %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off, len);
++		return;
++	}
++
++	len = check_and_correct_requested_length(node, off, len);
++
+ 	off += node->page_offset;
+ 	pagenum = off >> PAGE_SHIFT;
+ 	off &= ~PAGE_MASK; /* compute page offset for the first page */
+@@ -80,6 +136,20 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
+ {
+ 	struct page *page;
+ 
++	if (!is_bnode_offset_valid(node, off))
++		return;
++
++	if (len == 0) {
++		pr_err("requested zero length: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d, len %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off, len);
++		return;
++	}
++
++	len = check_and_correct_requested_length(node, off, len);
++
+ 	off += node->page_offset;
+ 	page = node->page[0];
+ 
+@@ -104,6 +174,20 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
+ {
+ 	struct page *page;
+ 
++	if (!is_bnode_offset_valid(node, off))
++		return;
++
++	if (len == 0) {
++		pr_err("requested zero length: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d, len %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off, len);
++		return;
++	}
++
++	len = check_and_correct_requested_length(node, off, len);
++
+ 	off += node->page_offset;
+ 	page = node->page[0];
+ 
+@@ -119,6 +203,10 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
+ 	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
+ 	if (!len)
+ 		return;
++
++	len = check_and_correct_requested_length(src_node, src, len);
++	len = check_and_correct_requested_length(dst_node, dst, len);
++
+ 	src += src_node->page_offset;
+ 	dst += dst_node->page_offset;
+ 	src_page = src_node->page[0];
+@@ -136,6 +224,10 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
+ 	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
+ 	if (!len)
+ 		return;
++
++	len = check_and_correct_requested_length(node, src, len);
++	len = check_and_correct_requested_length(node, dst, len);
++
+ 	src += node->page_offset;
+ 	dst += node->page_offset;
+ 	page = node->page[0];
+@@ -482,6 +574,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
+ 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
+ 			hfs_bnode_unhash(node);
+ 			spin_unlock(&tree->hash_lock);
++			hfs_bnode_clear(node, 0, tree->node_size);
+ 			hfs_bmap_free(node);
+ 			hfs_bnode_free(node);
+ 			return;
+diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
+index 2fa4b1f8cc7fb0..e86e1e235658fa 100644
+--- a/fs/hfs/btree.c
++++ b/fs/hfs/btree.c
+@@ -21,8 +21,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
+ 	struct hfs_btree *tree;
+ 	struct hfs_btree_header_rec *head;
+ 	struct address_space *mapping;
+-	struct page *page;
++	struct folio *folio;
++	struct buffer_head *bh;
+ 	unsigned int size;
++	u16 dblock;
++	sector_t start_block;
++	loff_t offset;
+ 
+ 	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
+ 	if (!tree)
+@@ -75,12 +79,40 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
+ 	unlock_new_inode(tree->inode);
+ 
+ 	mapping = tree->inode->i_mapping;
+-	page = read_mapping_page(mapping, 0, NULL);
+-	if (IS_ERR(page))
++	folio = filemap_grab_folio(mapping, 0);
++	if (IS_ERR(folio))
+ 		goto free_inode;
+ 
++	folio_zero_range(folio, 0, folio_size(folio));
++
++	dblock = hfs_ext_find_block(HFS_I(tree->inode)->first_extents, 0);
++	start_block = HFS_SB(sb)->fs_start + (dblock * HFS_SB(sb)->fs_div);
++
++	size = folio_size(folio);
++	offset = 0;
++	while (size > 0) {
++		size_t len;
++
++		bh = sb_bread(sb, start_block);
++		if (!bh) {
++			pr_err("unable to read tree header\n");
++			goto put_folio;
++		}
++
++		len = min_t(size_t, folio_size(folio), sb->s_blocksize);
++		memcpy_to_folio(folio, offset, bh->b_data, sb->s_blocksize);
++
++		brelse(bh);
++
++		start_block++;
++		offset += len;
++		size -= len;
++	}
++
++	folio_mark_uptodate(folio);
++
+ 	/* Load the header */
+-	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
++	head = (struct hfs_btree_header_rec *)(kmap_local_folio(folio, 0) +
+ 					       sizeof(struct hfs_bnode_desc));
+ 	tree->root = be32_to_cpu(head->root);
+ 	tree->leaf_count = be32_to_cpu(head->leaf_count);
+@@ -95,22 +127,22 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
+ 
+ 	size = tree->node_size;
+ 	if (!is_power_of_2(size))
+-		goto fail_page;
++		goto fail_folio;
+ 	if (!tree->node_count)
+-		goto fail_page;
++		goto fail_folio;
+ 	switch (id) {
+ 	case HFS_EXT_CNID:
+ 		if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
+ 			pr_err("invalid extent max_key_len %d\n",
+ 			       tree->max_key_len);
+-			goto fail_page;
++			goto fail_folio;
+ 		}
+ 		break;
+ 	case HFS_CAT_CNID:
+ 		if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
+ 			pr_err("invalid catalog max_key_len %d\n",
+ 			       tree->max_key_len);
+-			goto fail_page;
++			goto fail_folio;
+ 		}
+ 		break;
+ 	default:
+@@ -121,12 +153,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
+ 	tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ 
+ 	kunmap_local(head);
+-	put_page(page);
++	folio_unlock(folio);
++	folio_put(folio);
+ 	return tree;
+ 
+-fail_page:
++fail_folio:
+ 	kunmap_local(head);
+-	put_page(page);
++put_folio:
++	folio_unlock(folio);
++	folio_put(folio);
+ free_inode:
+ 	tree->inode->i_mapping->a_ops = &hfs_aops;
+ 	iput(tree->inode);
+diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
+index 4a0ce131e233fe..580c62981dbd3d 100644
+--- a/fs/hfs/extent.c
++++ b/fs/hfs/extent.c
+@@ -71,7 +71,7 @@ int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
+  *
+  * Find a block within an extent record
+  */
+-static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
++u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
+ {
+ 	int i;
+ 	u16 count;
+diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
+index a0c7cb0f79fcc9..732c5c4c7545d6 100644
+--- a/fs/hfs/hfs_fs.h
++++ b/fs/hfs/hfs_fs.h
+@@ -190,6 +190,7 @@ extern const struct inode_operations hfs_dir_inode_operations;
+ 
+ /* extent.c */
+ extern int hfs_ext_keycmp(const btree_key *, const btree_key *);
++extern u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off);
+ extern int hfs_free_fork(struct super_block *, struct hfs_cat_file *, int);
+ extern int hfs_ext_write_extent(struct inode *);
+ extern int hfs_extend_file(struct inode *);
+diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
+index 079ea80534f7de..14f4995588ff03 100644
+--- a/fs/hfsplus/bnode.c
++++ b/fs/hfsplus/bnode.c
+@@ -18,12 +18,68 @@
+ #include "hfsplus_fs.h"
+ #include "hfsplus_raw.h"
+ 
++static inline
++bool is_bnode_offset_valid(struct hfs_bnode *node, int off)
++{
++	bool is_valid = off < node->tree->node_size;
++
++	if (!is_valid) {
++		pr_err("requested invalid offset: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off);
++	}
++
++	return is_valid;
++}
++
++static inline
++int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len)
++{
++	unsigned int node_size;
++
++	if (!is_bnode_offset_valid(node, off))
++		return 0;
++
++	node_size = node->tree->node_size;
++
++	if ((off + len) > node_size) {
++		int new_len = (int)node_size - off;
++
++		pr_err("requested length has been corrected: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d, "
++		       "requested_len %d, corrected_len %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off, len, new_len);
++
++		return new_len;
++	}
++
++	return len;
++}
++
+ /* Copy a specified range of bytes from the raw data of a node */
+ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+ {
+ 	struct page **pagep;
+ 	int l;
+ 
++	if (!is_bnode_offset_valid(node, off))
++		return;
++
++	if (len == 0) {
++		pr_err("requested zero length: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d, len %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off, len);
++		return;
++	}
++
++	len = check_and_correct_requested_length(node, off, len);
++
+ 	off += node->page_offset;
+ 	pagep = node->page + (off >> PAGE_SHIFT);
+ 	off &= ~PAGE_MASK;
+@@ -81,6 +137,20 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
+ 	struct page **pagep;
+ 	int l;
+ 
++	if (!is_bnode_offset_valid(node, off))
++		return;
++
++	if (len == 0) {
++		pr_err("requested zero length: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d, len %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off, len);
++		return;
++	}
++
++	len = check_and_correct_requested_length(node, off, len);
++
+ 	off += node->page_offset;
+ 	pagep = node->page + (off >> PAGE_SHIFT);
+ 	off &= ~PAGE_MASK;
+@@ -109,6 +179,20 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
+ 	struct page **pagep;
+ 	int l;
+ 
++	if (!is_bnode_offset_valid(node, off))
++		return;
++
++	if (len == 0) {
++		pr_err("requested zero length: "
++		       "NODE: id %u, type %#x, height %u, "
++		       "node_size %u, offset %d, len %d\n",
++		       node->this, node->type, node->height,
++		       node->tree->node_size, off, len);
++		return;
++	}
++
++	len = check_and_correct_requested_length(node, off, len);
++
+ 	off += node->page_offset;
+ 	pagep = node->page + (off >> PAGE_SHIFT);
+ 	off &= ~PAGE_MASK;
+@@ -133,6 +217,10 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
+ 	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
+ 	if (!len)
+ 		return;
++
++	len = check_and_correct_requested_length(src_node, src, len);
++	len = check_and_correct_requested_length(dst_node, dst, len);
++
+ 	src += src_node->page_offset;
+ 	dst += dst_node->page_offset;
+ 	src_page = src_node->page + (src >> PAGE_SHIFT);
+@@ -187,6 +275,10 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
+ 	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
+ 	if (!len)
+ 		return;
++
++	len = check_and_correct_requested_length(node, src, len);
++	len = check_and_correct_requested_length(node, dst, len);
++
+ 	src += node->page_offset;
+ 	dst += node->page_offset;
+ 	if (dst > src) {
+diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
+index 73342c925a4b6e..36b6cf2a3abba4 100644
+--- a/fs/hfsplus/unicode.c
++++ b/fs/hfsplus/unicode.c
+@@ -132,7 +132,14 @@ int hfsplus_uni2asc(struct super_block *sb,
+ 
+ 	op = astr;
+ 	ip = ustr->unicode;
++
+ 	ustrlen = be16_to_cpu(ustr->length);
++	if (ustrlen > HFSPLUS_MAX_STRLEN) {
++		ustrlen = HFSPLUS_MAX_STRLEN;
++		pr_err("invalid length %u has been corrected to %d\n",
++			be16_to_cpu(ustr->length), ustrlen);
++	}
++
+ 	len = *len_p;
+ 	ce1 = NULL;
+ 	compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
+diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
+index 9a1a93e3888b92..18dc3d254d218c 100644
+--- a/fs/hfsplus/xattr.c
++++ b/fs/hfsplus/xattr.c
+@@ -172,7 +172,11 @@ static int hfsplus_create_attributes_file(struct super_block *sb)
+ 		return PTR_ERR(attr_file);
+ 	}
+ 
+-	BUG_ON(i_size_read(attr_file) != 0);
++	if (i_size_read(attr_file) != 0) {
++		err = -EIO;
++		pr_err("detected inconsistent attributes file, running fsck.hfsplus is recommended.\n");
++		goto end_attr_file_creation;
++	}
+ 
+ 	hip = HFSPLUS_I(attr_file);
+ 
+diff --git a/fs/jfs/file.c b/fs/jfs/file.c
+index 01b6912e60f808..742cadd1f37e84 100644
+--- a/fs/jfs/file.c
++++ b/fs/jfs/file.c
+@@ -44,6 +44,9 @@ static int jfs_open(struct inode *inode, struct file *file)
+ {
+ 	int rc;
+ 
++	if (S_ISREG(inode->i_mode) && inode->i_size < 0)
++		return -EIO;
++
+ 	if ((rc = dquot_file_open(inode, file)))
+ 		return rc;
+ 
+diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
+index 07cfdc4405968b..5fe8cb4742c21d 100644
+--- a/fs/jfs/inode.c
++++ b/fs/jfs/inode.c
+@@ -145,9 +145,9 @@ void jfs_evict_inode(struct inode *inode)
+ 	if (!inode->i_nlink && !is_bad_inode(inode)) {
+ 		dquot_initialize(inode);
+ 
++		truncate_inode_pages_final(&inode->i_data);
+ 		if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
+ 			struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
+-			truncate_inode_pages_final(&inode->i_data);
+ 
+ 			if (test_cflag(COMMIT_Freewmap, inode))
+ 				jfs_free_zero_link(inode);
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 5a877261c3fe48..cdfa699cd7c8fa 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -1389,6 +1389,12 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
+ 	    (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
+ 	ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
+ 
++	if (ti < 0 || ti >= le32_to_cpu(dcp->nleafs)) {
++		jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
++		release_metapage(mp);
++		return -EIO;
++	}
++
+ 	/* dmap control page trees fan-out by 4 and a single allocation
+ 	 * group may be described by 1 or 2 subtrees within the ag level
+ 	 * dmap control page, depending upon the ag size. examine the ag's
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 3cb49463a84969..87432416784966 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -608,7 +608,7 @@ void simple_recursive_removal(struct dentry *dentry,
+ 		struct dentry *victim = NULL, *child;
+ 		struct inode *inode = this->d_inode;
+ 
+-		inode_lock(inode);
++		inode_lock_nested(inode, I_MUTEX_CHILD);
+ 		if (d_is_dir(this))
+ 			inode->i_flags |= S_DEAD;
+ 		while ((child = find_next_child(this, victim)) == NULL) {
+@@ -620,7 +620,7 @@ void simple_recursive_removal(struct dentry *dentry,
+ 			victim = this;
+ 			this = this->d_parent;
+ 			inode = this->d_inode;
+-			inode_lock(inode);
++			inode_lock_nested(inode, I_MUTEX_CHILD);
+ 			if (simple_positive(victim)) {
+ 				d_invalidate(victim);	// avoid lost mounts
+ 				if (d_is_dir(victim))
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 47189476b5538b..5d6edafbed202a 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -149,8 +149,8 @@ do_add_page_to_bio(struct bio *bio, int npg, enum req_op op, sector_t isect,
+ 
+ 	/* limit length to what the device mapping allows */
+ 	end = disk_addr + *len;
+-	if (end >= map->start + map->len)
+-		*len = map->start + map->len - disk_addr;
++	if (end >= map->disk_offset + map->len)
++		*len = map->disk_offset + map->len - disk_addr;
+ 
+ retry:
+ 	if (!bio) {
+diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
+index cab8809f0e0f48..44306ac22353be 100644
+--- a/fs/nfs/blocklayout/dev.c
++++ b/fs/nfs/blocklayout/dev.c
+@@ -257,10 +257,11 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
+ 	struct pnfs_block_dev *child;
+ 	u64 chunk;
+ 	u32 chunk_idx;
++	u64 disk_chunk;
+ 	u64 disk_offset;
+ 
+ 	chunk = div_u64(offset, dev->chunk_size);
+-	div_u64_rem(chunk, dev->nr_children, &chunk_idx);
++	disk_chunk = div_u64_rem(chunk, dev->nr_children, &chunk_idx);
+ 
+ 	if (chunk_idx >= dev->nr_children) {
+ 		dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
+@@ -273,7 +274,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
+ 	offset = chunk * dev->chunk_size;
+ 
+ 	/* disk offset of the stripe */
+-	disk_offset = div_u64(offset, dev->nr_children);
++	disk_offset = disk_chunk * dev->chunk_size;
+ 
+ 	child = &dev->children[chunk_idx];
+ 	child->map(child, disk_offset, map);
+diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
+index 8f7cff7a42938e..0add0f329816b0 100644
+--- a/fs/nfs/blocklayout/extent_tree.c
++++ b/fs/nfs/blocklayout/extent_tree.c
+@@ -552,6 +552,15 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
+ 	return ret;
+ }
+ 
++/**
++ * ext_tree_prepare_commit - encode extents that need to be committed
++ * @arg: layout commit data
++ *
++ * Return values:
++ *   %0: Success, all required extents are encoded
++ *   %-ENOSPC: Some extents are encoded, but not all, due to RPC size limit
++ *   %-ENOMEM: Out of memory, extents not encoded
++ */
+ int
+ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
+ {
+@@ -568,12 +577,12 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
+ 	start_p = page_address(arg->layoutupdate_page);
+ 	arg->layoutupdate_pages = &arg->layoutupdate_page;
+ 
+-retry:
+-	ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten);
++	ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size,
++			&count, &arg->lastbytewritten);
+ 	if (unlikely(ret)) {
+ 		ext_tree_free_commitdata(arg, buffer_size);
+ 
+-		buffer_size = ext_tree_layoutupdate_size(bl, count);
++		buffer_size = NFS_SERVER(arg->inode)->wsize;
+ 		count = 0;
+ 
+ 		arg->layoutupdate_pages =
+@@ -588,7 +597,8 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
+ 			return -ENOMEM;
+ 		}
+ 
+-		goto retry;
++		ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size,
++				&count, &arg->lastbytewritten);
+ 	}
+ 
+ 	*start_p = cpu_to_be32(count);
+@@ -608,7 +618,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
+ 	}
+ 
+ 	dprintk("%s found %zu ranges\n", __func__, count);
+-	return 0;
++	return ret;
+ }
+ 
+ void
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 4503758e9594bf..17edc124d03f22 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -676,6 +676,44 @@ struct nfs_client *nfs_init_client(struct nfs_client *clp,
+ }
+ EXPORT_SYMBOL_GPL(nfs_init_client);
+ 
++static void nfs4_server_set_init_caps(struct nfs_server *server)
++{
++#if IS_ENABLED(CONFIG_NFS_V4)
++	/* Set the basic capabilities */
++	server->caps = server->nfs_client->cl_mvops->init_caps;
++	if (server->flags & NFS_MOUNT_NORDIRPLUS)
++		server->caps &= ~NFS_CAP_READDIRPLUS;
++	if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA)
++		server->caps &= ~NFS_CAP_READ_PLUS;
++
++	/*
++	 * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
++	 * authentication.
++	 */
++	if (nfs4_disable_idmapping &&
++	    server->client->cl_auth->au_flavor == RPC_AUTH_UNIX)
++		server->caps |= NFS_CAP_UIDGID_NOMAP;
++#endif
++}
++
++void nfs_server_set_init_caps(struct nfs_server *server)
++{
++	switch (server->nfs_client->rpc_ops->version) {
++	case 2:
++		server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
++		break;
++	case 3:
++		server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
++		if (!(server->flags & NFS_MOUNT_NORDIRPLUS))
++			server->caps |= NFS_CAP_READDIRPLUS;
++		break;
++	default:
++		nfs4_server_set_init_caps(server);
++		break;
++	}
++}
++EXPORT_SYMBOL_GPL(nfs_server_set_init_caps);
++
+ /*
+  * Create a version 2 or 3 client
+  */
+@@ -717,7 +755,6 @@ static int nfs_init_server(struct nfs_server *server,
+ 	/* Initialise the client representation from the mount data */
+ 	server->flags = ctx->flags;
+ 	server->options = ctx->options;
+-	server->caps |= NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS;
+ 
+ 	switch (clp->rpc_ops->version) {
+ 	case 2:
+@@ -753,6 +790,8 @@ static int nfs_init_server(struct nfs_server *server,
+ 	if (error < 0)
+ 		goto error;
+ 
++	nfs_server_set_init_caps(server);
++
+ 	/* Preserve the values of mount_server-related mount options */
+ 	if (ctx->mount_server.addrlen) {
+ 		memcpy(&server->mountd_address, &ctx->mount_server.address,
+@@ -927,7 +966,6 @@ void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *sour
+ 	target->acregmax = source->acregmax;
+ 	target->acdirmin = source->acdirmin;
+ 	target->acdirmax = source->acdirmax;
+-	target->caps = source->caps;
+ 	target->options = source->options;
+ 	target->auth_info = source->auth_info;
+ 	target->port = source->port;
+@@ -1161,6 +1199,8 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
+ 	if (error < 0)
+ 		goto out_free_server;
+ 
++	nfs_server_set_init_caps(server);
++
+ 	/* probe the filesystem info for this server filesystem */
+ 	error = nfs_probe_server(server, fh);
+ 	if (error < 0)
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 9840b779f0dfd8..882d804089add1 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -231,7 +231,7 @@ extern struct nfs_client *
+ nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
+ 				struct nfs4_sessionid *, u32);
+ extern struct nfs_server *nfs_create_server(struct fs_context *);
+-extern void nfs4_server_set_init_caps(struct nfs_server *);
++extern void nfs_server_set_init_caps(struct nfs_server *);
+ extern struct nfs_server *nfs4_create_server(struct fs_context *);
+ extern struct nfs_server *nfs4_create_referral_server(struct fs_context *);
+ extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 83378f69b35ea5..37c17f70cebe30 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -1081,24 +1081,6 @@ static void nfs4_session_limit_xasize(struct nfs_server *server)
+ #endif
+ }
+ 
+-void nfs4_server_set_init_caps(struct nfs_server *server)
+-{
+-	/* Set the basic capabilities */
+-	server->caps |= server->nfs_client->cl_mvops->init_caps;
+-	if (server->flags & NFS_MOUNT_NORDIRPLUS)
+-			server->caps &= ~NFS_CAP_READDIRPLUS;
+-	if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA)
+-		server->caps &= ~NFS_CAP_READ_PLUS;
+-
+-	/*
+-	 * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
+-	 * authentication.
+-	 */
+-	if (nfs4_disable_idmapping &&
+-			server->client->cl_auth->au_flavor == RPC_AUTH_UNIX)
+-		server->caps |= NFS_CAP_UIDGID_NOMAP;
+-}
+-
+ static int nfs4_server_common_setup(struct nfs_server *server,
+ 		struct nfs_fh *mntfh, bool auth_probe)
+ {
+@@ -1113,7 +1095,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
+ 	if (error < 0)
+ 		goto out;
+ 
+-	nfs4_server_set_init_caps(server);
++	nfs_server_set_init_caps(server);
+ 
+ 	/* Probe the root fh to retrieve its FSID and filehandle */
+ 	error = nfs4_get_rootfh(server, mntfh, auth_probe);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index e27cd2c7cfd191..e6b7cbc06c9c8e 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -4064,7 +4064,7 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
+ 	};
+ 	int err;
+ 
+-	nfs4_server_set_init_caps(server);
++	nfs_server_set_init_caps(server);
+ 	do {
+ 		err = nfs4_handle_exception(server,
+ 				_nfs4_server_capabilities(server, fhandle),
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 6b888e9ff394a5..89d49dd3978f94 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -3332,6 +3332,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
+ 	struct nfs_inode *nfsi = NFS_I(inode);
+ 	loff_t end_pos;
+ 	int status;
++	bool mark_as_dirty = false;
+ 
+ 	if (!pnfs_layoutcommit_outstanding(inode))
+ 		return 0;
+@@ -3383,19 +3384,23 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
+ 	if (ld->prepare_layoutcommit) {
+ 		status = ld->prepare_layoutcommit(&data->args);
+ 		if (status) {
+-			put_cred(data->cred);
++			if (status != -ENOSPC)
++				put_cred(data->cred);
+ 			spin_lock(&inode->i_lock);
+ 			set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
+ 			if (end_pos > nfsi->layout->plh_lwb)
+ 				nfsi->layout->plh_lwb = end_pos;
+-			goto out_unlock;
++			if (status != -ENOSPC)
++				goto out_unlock;
++			spin_unlock(&inode->i_lock);
++			mark_as_dirty = true;
+ 		}
+ 	}
+ 
+ 
+ 	status = nfs4_proc_layoutcommit(data, sync);
+ out:
+-	if (status)
++	if (status || mark_as_dirty)
+ 		mark_inode_dirty_sync(inode);
+ 	dprintk("<-- %s status %d\n", __func__, status);
+ 	return status;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c50839a015e94f..bcb44400e24398 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4550,10 +4550,16 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
+ 	}
+ 	status = nfs_ok;
+ 	if (conf) {
+-		old = unconf;
+-		unhash_client_locked(old);
+-		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
+-	} else {
++		if (get_client_locked(conf) == nfs_ok) {
++			old = unconf;
++			unhash_client_locked(old);
++			nfsd4_change_callback(conf, &unconf->cl_cb_conn);
++		} else {
++			conf = NULL;
++		}
++	}
++
++	if (!conf) {
+ 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
+ 		if (old) {
+ 			status = nfserr_clid_inuse;
+@@ -4570,10 +4576,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
+ 			}
+ 			trace_nfsd_clid_replaced(&old->cl_clientid);
+ 		}
++		status = get_client_locked(unconf);
++		if (status != nfs_ok) {
++			old = NULL;
++			goto out;
++		}
+ 		move_to_confirmed(unconf);
+ 		conf = unconf;
+ 	}
+-	get_client_locked(conf);
+ 	spin_unlock(&nn->client_lock);
+ 	if (conf == unconf)
+ 		fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
+@@ -6140,6 +6150,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ 		status = nfs4_check_deleg(cl, open, &dp);
+ 		if (status)
+ 			goto out;
++		if (dp && nfsd4_is_deleg_cur(open) &&
++				(dp->dl_stid.sc_file != fp)) {
++			/*
++			 * RFC8881 section 8.2.4 mandates the server to return
++			 * NFS4ERR_BAD_STATEID if the selected table entry does
++			 * not match the current filehandle. However returning
++			 * NFS4ERR_BAD_STATEID in the OPEN can cause the client
++			 * to repeatedly retry the operation with the same
++			 * stateid, since the stateid itself is valid. To avoid
++			 * this situation NFSD returns NFS4ERR_INVAL instead.
++			 */
++			status = nfserr_inval;
++			goto out;
++		}
+ 		stp = nfsd4_find_and_lock_existing_open(fp, open);
+ 	} else {
+ 		open->op_file = NULL;
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index b6da80c69ca634..600e66035c1b70 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -304,6 +304,9 @@ static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
+ 	if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
+ 		return true;
+ 
++	if (fname->name_len + sizeof(struct NTFS_DE) > le16_to_cpu(e->size))
++		return true;
++
+ 	name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
+ 				     PATH_MAX);
+ 	if (name_len <= 0) {
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index 5c05cccd2d40b1..9077c7b6273364 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -1102,10 +1102,10 @@ int inode_read_data(struct inode *inode, void *data, size_t bytes)
+  * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
+  * for unicode string of @uni_len length.
+  */
+-static inline u32 ntfs_reparse_bytes(u32 uni_len)
++static inline u32 ntfs_reparse_bytes(u32 uni_len, bool is_absolute)
+ {
+ 	/* Header + unicode string + decorated unicode string. */
+-	return sizeof(short) * (2 * uni_len + 4) +
++	return sizeof(short) * (2 * uni_len + (is_absolute ? 4 : 0)) +
+ 	       offsetof(struct REPARSE_DATA_BUFFER,
+ 			SymbolicLinkReparseBuffer.PathBuffer);
+ }
+@@ -1118,8 +1118,11 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
+ 	struct REPARSE_DATA_BUFFER *rp;
+ 	__le16 *rp_name;
+ 	typeof(rp->SymbolicLinkReparseBuffer) *rs;
++	bool is_absolute;
+ 
+-	rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
++	is_absolute = (strlen(symname) > 1 && symname[1] == ':');
++
++	rp = kzalloc(ntfs_reparse_bytes(2 * size + 2, is_absolute), GFP_NOFS);
+ 	if (!rp)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -1134,7 +1137,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
+ 		goto out;
+ 
+ 	/* err = the length of unicode name of symlink. */
+-	*nsize = ntfs_reparse_bytes(err);
++	*nsize = ntfs_reparse_bytes(err, is_absolute);
+ 
+ 	if (*nsize > sbi->reparse.max_size) {
+ 		err = -EFBIG;
+@@ -1154,7 +1157,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
+ 
+ 	/* PrintName + SubstituteName. */
+ 	rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
+-	rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
++	rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0));
+ 	rs->PrintNameLength = rs->SubstituteNameOffset;
+ 
+ 	/*
+@@ -1162,16 +1165,18 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
+ 	 * parse this path.
+ 	 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
+ 	 */
+-	rs->Flags = 0;
++	rs->Flags = cpu_to_le32(is_absolute ? 0 : SYMLINK_FLAG_RELATIVE);
+ 
+-	memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
++	memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name, sizeof(short) * err);
+ 
+-	/* Decorate SubstituteName. */
+-	rp_name += err;
+-	rp_name[0] = cpu_to_le16('\\');
+-	rp_name[1] = cpu_to_le16('?');
+-	rp_name[2] = cpu_to_le16('?');
+-	rp_name[3] = cpu_to_le16('\\');
++	if (is_absolute) {
++		/* Decorate SubstituteName. */
++		rp_name += err;
++		rp_name[0] = cpu_to_le16('\\');
++		rp_name[1] = cpu_to_le16('?');
++		rp_name[2] = cpu_to_le16('?');
++		rp_name[3] = cpu_to_le16('\\');
++	}
+ 
+ 	return rp;
+ out:
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index b57140ebfad0f7..cd4bfd92ebd6e8 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -354,7 +354,7 @@ static ssize_t orangefs_debug_read(struct file *file,
+ 		goto out;
+ 
+ 	mutex_lock(&orangefs_debug_lock);
+-	sprintf_ret = sprintf(buf, "%s", (char *)file->private_data);
++	sprintf_ret = scnprintf(buf, ORANGEFS_MAX_DEBUG_STRING_LEN, "%s", (char *)file->private_data);
+ 	mutex_unlock(&orangefs_debug_lock);
+ 
+ 	read_ret = simple_read_from_buffer(ubuf, count, ppos, buf, sprintf_ret);
+diff --git a/fs/pidfs.c b/fs/pidfs.c
+index 52b7e4f7673274..5a8d8eb8df23be 100644
+--- a/fs/pidfs.c
++++ b/fs/pidfs.c
+@@ -382,6 +382,8 @@ static int pidfs_init_fs_context(struct fs_context *fc)
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 
++	fc->s_iflags |= SB_I_NOEXEC;
++	fc->s_iflags |= SB_I_NODEV;
+ 	ctx->ops = &pidfs_sops;
+ 	ctx->dops = &pidfs_dentry_operations;
+ 	fc->s_fs_info = (void *)&pidfs_stashed_ops;
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 72a58681f0316b..2257bf52fb2a49 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1007,10 +1007,13 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
+ {
+ 	struct mem_size_stats *mss = walk->private;
+ 	struct vm_area_struct *vma = walk->vma;
+-	pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
+ 	struct folio *folio = NULL;
+ 	bool present = false;
++	spinlock_t *ptl;
++	pte_t ptent;
+ 
++	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
++	ptent = huge_ptep_get(walk->mm, addr, pte);
+ 	if (pte_present(ptent)) {
+ 		folio = page_folio(pte_page(ptent));
+ 		present = true;
+@@ -1029,6 +1032,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
+ 		else
+ 			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
+ 	}
++	spin_unlock(ptl);
+ 	return 0;
+ }
+ #else
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index e3d9367eaec373..8a257cb29c955e 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -4028,6 +4028,12 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
+ 			pSMB->FileName[name_len] = 0;
+ 			pSMB->FileName[name_len+1] = 0;
+ 			name_len += 2;
++		} else if (!searchName[0]) {
++			pSMB->FileName[0] = CIFS_DIR_SEP(cifs_sb);
++			pSMB->FileName[1] = 0;
++			pSMB->FileName[2] = 0;
++			pSMB->FileName[3] = 0;
++			name_len = 4;
+ 		}
+ 	} else {
+ 		name_len = copy_path_name(pSMB->FileName, searchName);
+@@ -4039,6 +4045,10 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
+ 			pSMB->FileName[name_len] = '*';
+ 			pSMB->FileName[name_len+1] = 0;
+ 			name_len += 2;
++		} else if (!searchName[0]) {
++			pSMB->FileName[0] = CIFS_DIR_SEP(cifs_sb);
++			pSMB->FileName[1] = 0;
++			name_len = 2;
+ 		}
+ 	}
+ 
+diff --git a/fs/smb/client/compress.c b/fs/smb/client/compress.c
+index 766b4de13da76a..db709f5cd2e1ff 100644
+--- a/fs/smb/client/compress.c
++++ b/fs/smb/client/compress.c
+@@ -155,58 +155,29 @@ static int cmp_bkt(const void *_a, const void *_b)
+ }
+ 
+ /*
+- * TODO:
+- * Support other iter types, if required.
+- * Only ITER_XARRAY is supported for now.
++ * Collect some 2K samples with 2K gaps between.
+  */
+-static int collect_sample(const struct iov_iter *iter, ssize_t max, u8 *sample)
++static int collect_sample(const struct iov_iter *source, ssize_t max, u8 *sample)
+ {
+-	struct folio *folios[16], *folio;
+-	unsigned int nr, i, j, npages;
+-	loff_t start = iter->xarray_start + iter->iov_offset;
+-	pgoff_t last, index = start / PAGE_SIZE;
+-	size_t len, off, foff;
+-	void *p;
+-	int s = 0;
+-
+-	last = (start + max - 1) / PAGE_SIZE;
+-	do {
+-		nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios),
+-				XA_PRESENT);
+-		if (nr == 0)
+-			return -EIO;
+-
+-		for (i = 0; i < nr; i++) {
+-			folio = folios[i];
+-			npages = folio_nr_pages(folio);
+-			foff = start - folio_pos(folio);
+-			off = foff % PAGE_SIZE;
+-
+-			for (j = foff / PAGE_SIZE; j < npages; j++) {
+-				size_t len2;
+-
+-				len = min_t(size_t, max, PAGE_SIZE - off);
+-				len2 = min_t(size_t, len, SZ_2K);
+-
+-				p = kmap_local_page(folio_page(folio, j));
+-				memcpy(&sample[s], p, len2);
+-				kunmap_local(p);
+-
+-				s += len2;
+-
+-				if (len2 < SZ_2K || s >= max - SZ_2K)
+-					return s;
+-
+-				max -= len;
+-				if (max <= 0)
+-					return s;
+-
+-				start += len;
+-				off = 0;
+-				index++;
+-			}
+-		}
+-	} while (nr == ARRAY_SIZE(folios));
++	struct iov_iter iter = *source;
++	size_t s = 0;
++
++	while (iov_iter_count(&iter) >= SZ_2K) {
++		size_t part = umin(umin(iov_iter_count(&iter), SZ_2K), max);
++		size_t n;
++
++		n = copy_from_iter(sample + s, part, &iter);
++		if (n != part)
++			return -EFAULT;
++
++		s += n;
++		max -= n;
++
++		if (iov_iter_count(&iter) < PAGE_SIZE - SZ_2K)
++			break;
++
++		iov_iter_advance(&iter, SZ_2K);
++	}
+ 
+ 	return s;
+ }
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index ebc380b18da737..0d4c811e0334c3 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -3165,18 +3165,15 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 		struct net *net = cifs_net_ns(server);
+ 		struct sock *sk;
+ 
+-		rc = __sock_create(net, sfamily, SOCK_STREAM,
+-				   IPPROTO_TCP, &server->ssocket, 1);
++		rc = sock_create_kern(net, sfamily, SOCK_STREAM,
++				      IPPROTO_TCP, &server->ssocket);
+ 		if (rc < 0) {
+ 			cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
+ 			return rc;
+ 		}
+ 
+ 		sk = server->ssocket->sk;
+-		__netns_tracker_free(net, &sk->ns_tracker, false);
+-		sk->sk_net_refcnt = 1;
+-		get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+-		sock_inuse_add(net, 1);
++		sk_net_refcnt_upgrade(sk);
+ 
+ 		/* BB other socket options to set KEEPALIVE, NODELAY? */
+ 		cifs_dbg(FYI, "Socket created\n");
+@@ -4001,7 +3998,6 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+ 		return 0;
+ 	}
+ 
+-	server->lstrp = jiffies;
+ 	server->tcpStatus = CifsInNegotiate;
+ 	server->neg_start = jiffies;
+ 	spin_unlock(&server->srv_lock);
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 8be7c4d2d9d623..a11a6ebae3860f 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -360,6 +360,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ 	struct cifs_server_iface *old_iface = NULL;
+ 	struct cifs_server_iface *last_iface = NULL;
+ 	struct sockaddr_storage ss;
++	int retry = 0;
+ 
+ 	spin_lock(&ses->chan_lock);
+ 	chan_index = cifs_ses_get_chan_index(ses, server);
+@@ -388,6 +389,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ 		return;
+ 	}
+ 
++try_again:
+ 	last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
+ 				     iface_head);
+ 	iface_min_speed = last_iface->speed;
+@@ -425,6 +427,13 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ 	}
+ 
+ 	if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++		list_for_each_entry(iface, &ses->iface_list, iface_head)
++			iface->weight_fulfilled = 0;
++
++		/* see if it can be satisfied in second attempt */
++		if (!retry++)
++			goto try_again;
++
+ 		iface = NULL;
+ 		cifs_dbg(FYI, "unable to find a suitable iface\n");
+ 	}
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 78a546ef69e889..4bababee965a08 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -772,6 +772,13 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 			bytes_left -= sizeof(*p);
+ 			break;
+ 		}
++		/* Validate that Next doesn't point beyond the buffer */
++		if (next > bytes_left) {
++			cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n",
++				 __func__, next, bytes_left);
++			rc = -EINVAL;
++			goto out;
++		}
+ 		p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+ 		bytes_left -= next;
+ 	}
+@@ -783,7 +790,9 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 	}
+ 
+ 	/* Azure rounds the buffer size up 8, to a 16 byte boundary */
+-	if ((bytes_left > 8) || p->Next)
++	if ((bytes_left > 8) ||
++	    (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next)
++	     + sizeof(p->Next) && p->Next))
+ 		cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+ 
+ 	ses->iface_last_update = jiffies;
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index c661a8e6c18b85..b9bb531717a651 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -277,18 +277,20 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
+ 		request, wc->status);
+ 
+-	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+-		log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
+-			wc->status, wc->opcode);
+-		smbd_disconnect_rdma_connection(request->info);
+-	}
+-
+ 	for (i = 0; i < request->num_sge; i++)
+ 		ib_dma_unmap_single(sc->ib.dev,
+ 			request->sge[i].addr,
+ 			request->sge[i].length,
+ 			DMA_TO_DEVICE);
+ 
++	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
++		log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
++			wc->status, wc->opcode);
++		mempool_free(request, info->request_mempool);
++		smbd_disconnect_rdma_connection(info);
++		return;
++	}
++
+ 	if (atomic_dec_and_test(&request->info->send_pending))
+ 		wake_up(&request->info->wait_send_pending);
+ 
+@@ -1314,10 +1316,6 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ 	log_rdma_event(INFO, "cancelling idle timer\n");
+ 	cancel_delayed_work_sync(&info->idle_timer_work);
+ 
+-	log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
+-	wait_event(info->wait_send_pending,
+-		atomic_read(&info->send_pending) == 0);
+-
+ 	/* It's not possible for upper layer to get to reassembly */
+ 	log_rdma_event(INFO, "drain the reassembly queue\n");
+ 	do {
+@@ -1691,7 +1689,6 @@ static struct smbd_connection *_smbd_get_connection(
+ 	cancel_delayed_work_sync(&info->idle_timer_work);
+ 	destroy_caches_and_workqueue(info);
+ 	sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
+-	init_waitqueue_head(&info->conn_wait);
+ 	rdma_disconnect(sc->rdma.cm_id);
+ 	wait_event(info->conn_wait,
+ 		sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+@@ -1963,7 +1960,11 @@ int smbd_send(struct TCP_Server_Info *server,
+ 	 */
+ 
+ 	wait_event(info->wait_send_pending,
+-		atomic_read(&info->send_pending) == 0);
++		atomic_read(&info->send_pending) == 0 ||
++		sc->status != SMBDIRECT_SOCKET_CONNECTED);
++
++	if (sc->status != SMBDIRECT_SOCKET_CONNECTED && rc == 0)
++		rc = -EAGAIN;
+ 
+ 	return rc;
+ }
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 495a9faa298bdf..6dafc2fbac2585 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -6023,7 +6023,6 @@ static int smb2_create_link(struct ksmbd_work *work,
+ {
+ 	char *link_name = NULL, *target_name = NULL, *pathname = NULL;
+ 	struct path path, parent_path;
+-	bool file_present = false;
+ 	int rc;
+ 
+ 	if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
+@@ -6056,11 +6055,8 @@ static int smb2_create_link(struct ksmbd_work *work,
+ 	if (rc) {
+ 		if (rc != -ENOENT)
+ 			goto out;
+-	} else
+-		file_present = true;
+-
+-	if (file_info->ReplaceIfExists) {
+-		if (file_present) {
++	} else {
++		if (file_info->ReplaceIfExists) {
+ 			rc = ksmbd_vfs_remove_file(work, &path);
+ 			if (rc) {
+ 				rc = -EINVAL;
+@@ -6068,21 +6064,17 @@ static int smb2_create_link(struct ksmbd_work *work,
+ 					    link_name);
+ 				goto out;
+ 			}
+-		}
+-	} else {
+-		if (file_present) {
++		} else {
+ 			rc = -EEXIST;
+ 			ksmbd_debug(SMB, "link already exists\n");
+ 			goto out;
+ 		}
++		ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+ 	}
+-
+ 	rc = ksmbd_vfs_link(work, target_name, link_name);
+ 	if (rc)
+ 		rc = -EINVAL;
+ out:
+-	if (file_present)
+-		ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+ 
+ 	if (!IS_ERR(link_name))
+ 		kfree(link_name);
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index cfc614c638daf6..9f15d606dfde79 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -464,9 +464,20 @@ static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 	return !(ei && ei->is_freed);
+ }
+ 
++static int tracefs_d_delete(const struct dentry *dentry)
++{
++	/*
++	 * We want to keep eventfs dentries around but not tracefs
++	 * ones. eventfs dentries have content in d_fsdata.
++	 * Use d_fsdata to determine if it's a eventfs dentry or not.
++	 */
++	return dentry->d_fsdata == NULL;
++}
++
+ static const struct dentry_operations tracefs_dentry_operations = {
+ 	.d_revalidate = tracefs_d_revalidate,
+ 	.d_release = tracefs_d_release,
++	.d_delete = tracefs_d_delete,
+ };
+ 
+ static int tracefs_fill_super(struct super_block *sb, struct fs_context *fc)
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 1c8a736b33097e..b2f168b0a0d18e 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1440,7 +1440,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ 	struct genericPartitionMap *gpm;
+ 	uint16_t ident;
+ 	struct buffer_head *bh;
+-	unsigned int table_len;
++	unsigned int table_len, part_map_count;
+ 	int ret;
+ 
+ 	bh = udf_read_tagged(sb, block, block, &ident);
+@@ -1461,7 +1461,16 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ 					   "logical volume");
+ 	if (ret)
+ 		goto out_bh;
+-	ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
++
++	part_map_count = le32_to_cpu(lvd->numPartitionMaps);
++	if (part_map_count > table_len / sizeof(struct genericPartitionMap1)) {
++		udf_err(sb, "error loading logical volume descriptor: "
++			"Too many partition maps (%u > %u)\n", part_map_count,
++			table_len / (unsigned)sizeof(struct genericPartitionMap1));
++		ret = -EIO;
++		goto out_bh;
++	}
++	ret = udf_sb_alloc_partition_maps(sb, part_map_count);
+ 	if (ret)
+ 		goto out_bh;
+ 
+diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
+index da773fee8638af..2fbc8508ccdf81 100644
+--- a/fs/xfs/scrub/trace.h
++++ b/fs/xfs/scrub/trace.h
+@@ -467,7 +467,7 @@ DECLARE_EVENT_CLASS(xchk_dqiter_class,
+ 		__field(xfs_exntst_t, state)
+ 	),
+ 	TP_fast_assign(
+-		__entry->dev = cursor->sc->ip->i_mount->m_super->s_dev;
++		__entry->dev = cursor->sc->mp->m_super->s_dev;
+ 		__entry->dqtype = cursor->dqtype;
+ 		__entry->ino = cursor->quota_ip->i_ino;
+ 		__entry->cur_id = cursor->id;
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index f826bb59556afe..0e8d684ad9f43a 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1472,7 +1472,7 @@ int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
+ #else
+ static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
+ {
+-	return 0;
++	return -ENODEV;
+ }
+ #endif
+ 
+diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
+index dce7615c35e7e3..f3f52ebc3e1edc 100644
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -342,11 +342,11 @@ enum req_op {
+ 	/* Close a zone */
+ 	REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)11,
+ 	/* Transition a zone to full */
+-	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)12,
++	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)13,
+ 	/* reset a zone write pointer */
+-	REQ_OP_ZONE_RESET	= (__force blk_opf_t)13,
++	REQ_OP_ZONE_RESET	= (__force blk_opf_t)15,
+ 	/* reset all the zone present on the device */
+-	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)15,
++	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)17,
+ 
+ 	/* Driver private requests */
+ 	REQ_OP_DRV_IN		= (__force blk_opf_t)34,
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index b94dc4b796f5a1..a901aed77141f0 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -682,12 +682,67 @@ static inline unsigned int disk_nr_zones(struct gendisk *disk)
+ {
+ 	return disk->nr_zones;
+ }
++
++/**
++ * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
++ *				   write plugging
++ * @bio: The BIO being submitted
++ *
++ * Return true whenever @bio execution needs to be handled through zone
++ * write plugging (using blk_zone_plug_bio()). Return false otherwise.
++ */
++static inline bool bio_needs_zone_write_plugging(struct bio *bio)
++{
++	enum req_op op = bio_op(bio);
++
++	/*
++	 * Only zoned block devices have a zone write plug hash table. But not
++	 * all of them have one (e.g. DM devices may not need one).
++	 */
++	if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
++		return false;
++
++	/* Only write operations need zone write plugging. */
++	if (!op_is_write(op))
++		return false;
++
++	/* Ignore empty flush */
++	if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
++		return false;
++
++	/* Ignore BIOs that already have been handled by zone write plugging. */
++	if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
++		return false;
++
++	/*
++	 * All zone write operations must be handled through zone write plugging
++	 * using blk_zone_plug_bio().
++	 */
++	switch (op) {
++	case REQ_OP_ZONE_APPEND:
++	case REQ_OP_WRITE:
++	case REQ_OP_WRITE_ZEROES:
++	case REQ_OP_ZONE_FINISH:
++	case REQ_OP_ZONE_RESET:
++	case REQ_OP_ZONE_RESET_ALL:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+ #else /* CONFIG_BLK_DEV_ZONED */
+ static inline unsigned int disk_nr_zones(struct gendisk *disk)
+ {
+ 	return 0;
+ }
++
++static inline bool bio_needs_zone_write_plugging(struct bio *bio)
++{
++	return false;
++}
++
+ static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+ {
+ 	return false;
+diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
+index 9efbc54e35e596..be5417303ecf69 100644
+--- a/include/linux/hypervisor.h
++++ b/include/linux/hypervisor.h
+@@ -37,6 +37,9 @@ static inline bool hypervisor_isolated_pci_functions(void)
+ 	if (IS_ENABLED(CONFIG_S390))
+ 		return true;
+ 
++	if (IS_ENABLED(CONFIG_LOONGARCH))
++		return true;
++
+ 	return jailhouse_paravirt();
+ }
+ 
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index d65b5d71b93bf8..9551dba15cc250 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -79,11 +79,6 @@ static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
+ /* found in socket.c */
+ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
+ 
+-static inline bool is_vlan_dev(const struct net_device *dev)
+-{
+-        return dev->priv_flags & IFF_802_1Q_VLAN;
+-}
+-
+ #define skb_vlan_tag_present(__skb)	(!!(__skb)->vlan_all)
+ #define skb_vlan_tag_get(__skb)		((__skb)->vlan_tci)
+ #define skb_vlan_tag_get_id(__skb)	((__skb)->vlan_tci & VLAN_VID_MASK)
+@@ -199,6 +194,11 @@ struct vlan_dev_priv {
+ #endif
+ };
+ 
++static inline bool is_vlan_dev(const struct net_device *dev)
++{
++	return dev->priv_flags & IFF_802_1Q_VLAN;
++}
++
+ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
+ {
+ 	return netdev_priv(dev);
+@@ -236,6 +236,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
+ extern bool vlan_uses_dev(const struct net_device *dev);
+ 
+ #else
++static inline bool is_vlan_dev(const struct net_device *dev)
++{
++	return false;
++}
++
+ static inline struct net_device *
+ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
+ 		     __be16 vlan_proto, u16 vlan_id)
+@@ -253,19 +258,19 @@ vlan_for_each(struct net_device *dev,
+ 
+ static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
+ {
+-	BUG();
++	WARN_ON_ONCE(1);
+ 	return NULL;
+ }
+ 
+ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
+ {
+-	BUG();
++	WARN_ON_ONCE(1);
+ 	return 0;
+ }
+ 
+ static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
+ {
+-	BUG();
++	WARN_ON_ONCE(1);
+ 	return 0;
+ }
+ 
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 2d3bfec568ebe5..1983a98e3d6776 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -547,6 +547,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes)
+ 
+ extern struct device_attribute dev_attr_unload_heads;
+ #ifdef CONFIG_SATA_HOST
++extern struct device_attribute dev_attr_link_power_management_supported;
+ extern struct device_attribute dev_attr_link_power_management_policy;
+ extern struct device_attribute dev_attr_ncq_prio_supported;
+ extern struct device_attribute dev_attr_ncq_prio_enable;
+diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
+index 0dc0cf2863e2ad..7a805796fcfd07 100644
+--- a/include/linux/memory-tiers.h
++++ b/include/linux/memory-tiers.h
+@@ -18,7 +18,7 @@
+  * adistance value (slightly faster) than default DRAM adistance to be part of
+  * the same memory tier.
+  */
+-#define MEMTIER_ADISTANCE_DRAM	((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
++#define MEMTIER_ADISTANCE_DRAM	((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+ 
+ struct memory_tier;
+ struct memory_dev_type {
+diff --git a/include/linux/packing.h b/include/linux/packing.h
+index 8d6571feb95de8..60117401c7ee19 100644
+--- a/include/linux/packing.h
++++ b/include/linux/packing.h
+@@ -5,8 +5,12 @@
+ #ifndef _LINUX_PACKING_H
+ #define _LINUX_PACKING_H
+ 
+-#include <linux/types.h>
++#include <linux/array_size.h>
+ #include <linux/bitops.h>
++#include <linux/build_bug.h>
++#include <linux/minmax.h>
++#include <linux/stddef.h>
++#include <linux/types.h>
+ 
+ #define QUIRK_MSB_ON_THE_RIGHT	BIT(0)
+ #define QUIRK_LITTLE_ENDIAN	BIT(1)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index ade889ded4e1e9..6b3fef24d60e72 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -320,7 +320,19 @@ struct pci_sriov;
+ struct pci_p2pdma;
+ struct rcec_ea;
+ 
+-/* The pci_dev structure describes PCI devices */
++/* struct pci_dev - describes a PCI device
++ *
++ * @supported_speeds:	PCIe Supported Link Speeds Vector (+ reserved 0 at
++ *			LSB). 0 when the supported speeds cannot be
++ *			determined (e.g., for Root Complex Integrated
++ *			Endpoints without the relevant Capability
++ *			Registers).
++ * @is_hotplug_bridge:	Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
++ *			Conventional PCI Hot-Plug, ACPI slot).
++ *			Such bridges are allocated additional MMIO and bus
++ *			number resources to allow for hierarchy expansion.
++ * @is_pciehp:		PCIe Hot-Plug Capable bridge.
++ */
+ struct pci_dev {
+ 	struct list_head bus_list;	/* Node in per-bus list */
+ 	struct pci_bus	*bus;		/* Bus this device is on */
+@@ -443,6 +455,7 @@ struct pci_dev {
+ 	unsigned int	is_physfn:1;
+ 	unsigned int	is_virtfn:1;
+ 	unsigned int	is_hotplug_bridge:1;
++	unsigned int	is_pciehp:1;
+ 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
+ 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
+ 	/*
+@@ -524,6 +537,7 @@ struct pci_dev {
+ 	struct npem	*npem;		/* Native PCIe Enclosure Management */
+ #endif
+ 	u16		acs_cap;	/* ACS Capability offset */
++	u8		supported_speeds; /* Supported Link Speeds Vector */
+ 	phys_addr_t	rom;		/* Physical address if not from BAR */
+ 	size_t		romlen;		/* Length if not from BAR */
+ 	/*
+diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
+index 189140bf11fc40..4adf4b364fcda9 100644
+--- a/include/linux/sbitmap.h
++++ b/include/linux/sbitmap.h
+@@ -213,12 +213,12 @@ int sbitmap_get(struct sbitmap *sb);
+  * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
+  * limiting the depth used from each word.
+  * @sb: Bitmap to allocate from.
+- * @shallow_depth: The maximum number of bits to allocate from a single word.
++ * @shallow_depth: The maximum number of bits to allocate from the bitmap.
+  *
+  * This rather specific operation allows for having multiple users with
+  * different allocation limits. E.g., there can be a high-priority class that
+  * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
+- * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
++ * with a @shallow_depth of (sb->depth >> 1). Then, the low-priority
+  * class can only allocate half of the total bits in the bitmap, preventing it
+  * from starving out the high-priority class.
+  *
+@@ -478,7 +478,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+  * sbitmap_queue, limiting the depth used from each word, with preemption
+  * already disabled.
+  * @sbq: Bitmap queue to allocate from.
+- * @shallow_depth: The maximum number of bits to allocate from a single word.
++ * @shallow_depth: The maximum number of bits to allocate from the queue.
+  * See sbitmap_get_shallow().
+  *
+  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index a726a698aac405..b2827fce5a2de7 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -3638,7 +3638,13 @@ static inline void *skb_frag_address(const skb_frag_t *frag)
+  */
+ static inline void *skb_frag_address_safe(const skb_frag_t *frag)
+ {
+-	void *ptr = page_address(skb_frag_page(frag));
++	struct page *page = skb_frag_page(frag);
++	void *ptr;
++
++	if (!page)
++		return NULL;
++
++	ptr = page_address(page);
+ 	if (unlikely(!ptr))
+ 		return NULL;
+ 
+diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
+index 2d207cb4837dbf..4ac082a6317381 100644
+--- a/include/linux/usb/cdc_ncm.h
++++ b/include/linux/usb/cdc_ncm.h
+@@ -119,6 +119,7 @@ struct cdc_ncm_ctx {
+ 	u32 timer_interval;
+ 	u32 max_ndp_size;
+ 	u8 is_ndp16;
++	u8 filtering_supported;
+ 	union {
+ 		struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ 		struct usb_cdc_ncm_ndp32 *delayed_ndp32;
+diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
+index 36fb3edfa403d9..6c00687539cf46 100644
+--- a/include/linux/virtio_vsock.h
++++ b/include/linux/virtio_vsock.h
+@@ -111,7 +111,12 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
+ 	return (size_t)(skb_end_pointer(skb) - skb->head);
+ }
+ 
+-#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE	(1024 * 4)
++/* Dimension the RX SKB so that the entire thing fits exactly into
++ * a single 4KiB page. This avoids wasting memory due to alloc_skb()
++ * rounding up to the next page order and also means that we
++ * don't leave higher-order pages sitting around in the RX queue.
++ */
++#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE	SKB_WITH_OVERHEAD(1024 * 4)
+ #define VIRTIO_VSOCK_MAX_BUF_SIZE		0xFFFFFFFFUL
+ #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE		(1024 * 64)
+ 
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index bb1862536f9ca5..c555d9964702ca 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -633,7 +633,7 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
+ 	const struct ieee80211_sband_iftype_data *data;
+ 	int i;
+ 
+-	if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
++	if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
+ 		return NULL;
+ 
+ 	if (iftype == NL80211_IFTYPE_AP_VLAN)
+diff --git a/include/net/kcm.h b/include/net/kcm.h
+index 441e993be634ce..d9c35e71ecea40 100644
+--- a/include/net/kcm.h
++++ b/include/net/kcm.h
+@@ -71,7 +71,6 @@ struct kcm_sock {
+ 	struct list_head wait_psock_list;
+ 	struct sk_buff *seq_skb;
+ 	struct mutex tx_mutex;
+-	u32 tx_stopped : 1;
+ 
+ 	/* Don't use bit fields here, these are set under different locks */
+ 	bool tx_wait;
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 8e70941602064e..80259a37e72476 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -4269,6 +4269,8 @@ struct ieee80211_prep_tx_info {
+  * @mgd_complete_tx: Notify the driver that the response frame for a previously
+  *	transmitted frame announced with @mgd_prepare_tx was received, the data
+  *	is filled similarly to @mgd_prepare_tx though the duration is not used.
++ *	Note that this isn't always called for each mgd_prepare_tx() call, for
++ *	example for SAE the 'confirm' messages can be on the air in any order.
+  *
+  * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
+  *	a TDLS discovery-request, we expect a reply to arrive on the AP's
+@@ -4433,6 +4435,8 @@ struct ieee80211_prep_tx_info {
+  *	new links bitmaps may be 0 if going from/to a non-MLO situation.
+  *	The @old array contains pointers to the old bss_conf structures
+  *	that were already removed, in case they're needed.
++ *	Note that removal of link should always succeed, so the return value
++ *	will be ignored in a removal only case.
+  *	This callback can sleep.
+  * @change_sta_links: Change the valid links of a station, similar to
+  *	@change_vif_links. This callback can sleep.
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index a44f262a738415..cb5f835a5d61b4 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -180,6 +180,7 @@ struct pneigh_entry {
+ 	netdevice_tracker	dev_tracker;
+ 	u32			flags;
+ 	u8			protocol;
++	bool			permanent;
+ 	u32			key[];
+ };
+ 
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index da93873df4dbd7..022ee2fc627cff 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -291,6 +291,7 @@ static inline int check_net(const struct net *net)
+ }
+ 
+ void net_drop_ns(void *);
++void net_passive_dec(struct net *net);
+ 
+ #else
+ 
+@@ -320,8 +321,23 @@ static inline int check_net(const struct net *net)
+ }
+ 
+ #define net_drop_ns NULL
++
++static inline void net_passive_dec(struct net *net)
++{
++	refcount_dec(&net->passive);
++}
+ #endif
+ 
++static inline void net_passive_inc(struct net *net)
++{
++	refcount_inc(&net->passive);
++}
++
++/* Returns true if the netns initialization is completed successfully */
++static inline bool net_initialized(const struct net *net)
++{
++	return READ_ONCE(net->list.next);
++}
+ 
+ static inline void __netns_tracker_alloc(struct net *net,
+ 					 netns_tracker *tracker,
+diff --git a/include/net/sock.h b/include/net/sock.h
+index b7270b6b9e9cc1..722f409cccd35c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1780,6 +1780,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk)
+ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ 		      struct proto *prot, int kern);
+ void sk_free(struct sock *sk);
++void sk_net_refcnt_upgrade(struct sock *sk);
+ void sk_destruct(struct sock *sk);
+ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
+ void sk_free_unlock_clone(struct sock *sk);
+diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
+index f50048af5fcc28..c8fe879d5828bd 100644
+--- a/include/trace/events/thp.h
++++ b/include/trace/events/thp.h
+@@ -8,6 +8,7 @@
+ #include <linux/types.h>
+ #include <linux/tracepoint.h>
+ 
++#ifdef CONFIG_PPC_BOOK3S_64
+ DECLARE_EVENT_CLASS(hugepage_set,
+ 
+ 	    TP_PROTO(unsigned long addr, unsigned long pte),
+@@ -66,6 +67,7 @@ DEFINE_EVENT(hugepage_update, hugepage_update_pud,
+ 	    TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set),
+ 	    TP_ARGS(addr, pud, clr, set)
+ );
++#endif /* CONFIG_PPC_BOOK3S_64 */
+ 
+ DECLARE_EVENT_CLASS(migration_pmd,
+ 
+diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
+index ff8d21f9e95b77..5a47339ef7d768 100644
+--- a/include/uapi/linux/in6.h
++++ b/include/uapi/linux/in6.h
+@@ -152,7 +152,6 @@ struct in6_flowlabel_req {
+ /*
+  *	IPV6 socket options
+  */
+-#if __UAPI_DEF_IPV6_OPTIONS
+ #define IPV6_ADDRFORM		1
+ #define IPV6_2292PKTINFO	2
+ #define IPV6_2292HOPOPTS	3
+@@ -169,8 +168,10 @@ struct in6_flowlabel_req {
+ #define IPV6_MULTICAST_IF	17
+ #define IPV6_MULTICAST_HOPS	18
+ #define IPV6_MULTICAST_LOOP	19
++#if __UAPI_DEF_IPV6_OPTIONS
+ #define IPV6_ADD_MEMBERSHIP	20
+ #define IPV6_DROP_MEMBERSHIP	21
++#endif
+ #define IPV6_ROUTER_ALERT	22
+ #define IPV6_MTU_DISCOVER	23
+ #define IPV6_MTU		24
+@@ -203,7 +204,6 @@ struct in6_flowlabel_req {
+ #define IPV6_IPSEC_POLICY	34
+ #define IPV6_XFRM_POLICY	35
+ #define IPV6_HDRINCL		36
+-#endif
+ 
+ /*
+  * Multicast:
+diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
+index 1fe79e750470bf..33cbe3a4ed3edb 100644
+--- a/include/uapi/linux/io_uring.h
++++ b/include/uapi/linux/io_uring.h
+@@ -50,7 +50,7 @@ struct io_uring_sqe {
+ 	};
+ 	__u32	len;		/* buffer size or number of iovecs */
+ 	union {
+-		__kernel_rwf_t	rw_flags;
++		__u32		rw_flags;
+ 		__u32		fsync_flags;
+ 		__u16		poll_events;	/* compatibility */
+ 		__u32		poll32_events;	/* word-reversed for BE */
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index 12323b3334a9c1..f3c9de0a497cf4 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -678,6 +678,7 @@
+ #define PCI_EXP_DEVSTA2		0x2a	/* Device Status 2 */
+ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c	/* end of v2 EPs w/o link */
+ #define PCI_EXP_LNKCAP2		0x2c	/* Link Capabilities 2 */
++#define  PCI_EXP_LNKCAP2_SLS		0x000000fe /* Supported Link Speeds Vector */
+ #define  PCI_EXP_LNKCAP2_SLS_2_5GB	0x00000002 /* Supported Speed 2.5GT/s */
+ #define  PCI_EXP_LNKCAP2_SLS_5_0GB	0x00000004 /* Supported Speed 5GT/s */
+ #define  PCI_EXP_LNKCAP2_SLS_8_0GB	0x00000008 /* Supported Speed 8GT/s */
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index a1ed64760eba2d..3ad104cf1e7d83 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -282,7 +282,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ 
+ 	rw->addr = READ_ONCE(sqe->addr);
+ 	rw->len = READ_ONCE(sqe->len);
+-	rw->flags = READ_ONCE(sqe->rw_flags);
++	rw->flags = (__force rwf_t) READ_ONCE(sqe->rw_flags);
+ 	return io_prep_rw_setup(req, ddir, do_import);
+ }
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 531412c5103dcc..24ae8f33e5d76d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -400,7 +400,8 @@ static bool reg_not_null(const struct bpf_reg_state *reg)
+ 		type == PTR_TO_MAP_KEY ||
+ 		type == PTR_TO_SOCK_COMMON ||
+ 		(type == PTR_TO_BTF_ID && is_trusted_reg(reg)) ||
+-		type == PTR_TO_MEM;
++		type == PTR_TO_MEM ||
++		type == CONST_PTR_TO_MAP;
+ }
+ 
+ static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
+@@ -15052,6 +15053,10 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
+ 		if (!is_reg_const(reg2, is_jmp32))
+ 			break;
+ 		val = reg_const_value(reg2, is_jmp32);
++		/* Forget the ranges before narrowing tnums, to avoid invariant
++		 * violations if we're on a dead branch.
++		 */
++		__mark_reg_unbounded(reg1);
+ 		if (is_jmp32) {
+ 			t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val));
+ 			reg1->var_off = tnum_with_subreg(reg1->var_off, t);
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 6908062f456039..4511d0a4762a2f 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -703,14 +703,16 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
+ 	struct module *mod;
+ 	char name[MODULE_NAME_LEN];
+ 	char buf[MODULE_FLAGS_BUF_SIZE];
+-	int ret, forced = 0;
++	int ret, len, forced = 0;
+ 
+ 	if (!capable(CAP_SYS_MODULE) || modules_disabled)
+ 		return -EPERM;
+ 
+-	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
+-		return -EFAULT;
+-	name[MODULE_NAME_LEN-1] = '\0';
++	len = strncpy_from_user(name, name_user, MODULE_NAME_LEN);
++	if (len == 0 || len == MODULE_NAME_LEN)
++		return -ENOENT;
++	if (len < 0)
++		return len;
+ 
+ 	audit_log_kern_module(name);
+ 
+diff --git a/kernel/power/console.c b/kernel/power/console.c
+index fcdf0e14a47d47..19c48aa5355d2b 100644
+--- a/kernel/power/console.c
++++ b/kernel/power/console.c
+@@ -16,6 +16,7 @@
+ #define SUSPEND_CONSOLE	(MAX_NR_CONSOLES-1)
+ 
+ static int orig_fgconsole, orig_kmsg;
++static bool vt_switch_done;
+ 
+ static DEFINE_MUTEX(vt_switch_mutex);
+ 
+@@ -136,17 +137,21 @@ void pm_prepare_console(void)
+ 	if (orig_fgconsole < 0)
+ 		return;
+ 
++	vt_switch_done = true;
++
+ 	orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
+ 	return;
+ }
+ 
+ void pm_restore_console(void)
+ {
+-	if (!pm_vt_switch())
++	if (!pm_vt_switch() && !vt_switch_done)
+ 		return;
+ 
+ 	if (orig_fgconsole >= 0) {
+ 		vt_move_to_console(orig_fgconsole, 0);
+ 		vt_kmsg_redirect(orig_kmsg);
+ 	}
++
++	vt_switch_done = false;
+ }
+diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
+index fd12efcc4aeda8..e7a3af81b17397 100644
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -214,8 +214,9 @@ static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
+ 
+ /**
+  * nbcon_context_try_acquire_direct - Try to acquire directly
+- * @ctxt:	The context of the caller
+- * @cur:	The current console state
++ * @ctxt:		The context of the caller
++ * @cur:		The current console state
++ * @is_reacquire:	This acquire is a reacquire
+  *
+  * Acquire the console when it is released. Also acquire the console when
+  * the current owner has a lower priority and the console is in a safe state.
+@@ -225,17 +226,17 @@ static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
+  *
+  * Errors:
+  *
+- *	-EPERM:		A panic is in progress and this is not the panic CPU.
+- *			Or the current owner or waiter has the same or higher
+- *			priority. No acquire method can be successful in
+- *			this case.
++ *	-EPERM:		A panic is in progress and this is neither the panic
++ *			CPU nor is this a reacquire. Or the current owner or
++ *			waiter has the same or higher priority. No acquire
++ *			method can be successful in these cases.
+  *
+  *	-EBUSY:		The current owner has a lower priority but the console
+  *			in an unsafe state. The caller should try using
+  *			the handover acquire method.
+  */
+ static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
+-					    struct nbcon_state *cur)
++					    struct nbcon_state *cur, bool is_reacquire)
+ {
+ 	unsigned int cpu = smp_processor_id();
+ 	struct console *con = ctxt->console;
+@@ -243,14 +244,20 @@ static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
+ 
+ 	do {
+ 		/*
+-		 * Panic does not imply that the console is owned. However, it
+-		 * is critical that non-panic CPUs during panic are unable to
+-		 * acquire ownership in order to satisfy the assumptions of
+-		 * nbcon_waiter_matches(). In particular, the assumption that
+-		 * lower priorities are ignored during panic.
++		 * Panic does not imply that the console is owned. However,
++		 * since all non-panic CPUs are stopped during panic(), it
++		 * is safer to have them avoid gaining console ownership.
++		 *
++		 * If this acquire is a reacquire (and an unsafe takeover
++		 * has not previously occurred) then it is allowed to attempt
++		 * a direct acquire in panic. This gives console drivers an
++		 * opportunity to perform any necessary cleanup if they were
++		 * interrupted by the panic CPU while printing.
+ 		 */
+-		if (other_cpu_in_panic())
++		if (other_cpu_in_panic() &&
++		    (!is_reacquire || cur->unsafe_takeover)) {
+ 			return -EPERM;
++		}
+ 
+ 		if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
+ 			return -EPERM;
+@@ -301,8 +308,9 @@ static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
+ 	 * Event #1 implies this context is EMERGENCY.
+ 	 * Event #2 implies the new context is PANIC.
+ 	 * Event #3 occurs when panic() has flushed the console.
+-	 * Events #4 and #5 are not possible due to the other_cpu_in_panic()
+-	 * check in nbcon_context_try_acquire_direct().
++	 * Event #4 occurs when a non-panic CPU reacquires.
++	 * Event #5 is not possible due to the other_cpu_in_panic() check
++	 *          in nbcon_context_try_acquire_handover().
+ 	 */
+ 
+ 	return (cur->req_prio == expected_prio);
+@@ -431,6 +439,16 @@ static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
+ 	WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
+ 	WARN_ON_ONCE(!cur->unsafe);
+ 
++	/*
++	 * Panic does not imply that the console is owned. However, it
++	 * is critical that non-panic CPUs during panic are unable to
++	 * wait for a handover in order to satisfy the assumptions of
++	 * nbcon_waiter_matches(). In particular, the assumption that
++	 * lower priorities are ignored during panic.
++	 */
++	if (other_cpu_in_panic())
++		return -EPERM;
++
+ 	/* Handover is not possible on the same CPU. */
+ 	if (cur->cpu == cpu)
+ 		return -EBUSY;
+@@ -558,7 +576,8 @@ static struct printk_buffers panic_nbcon_pbufs;
+ 
+ /**
+  * nbcon_context_try_acquire - Try to acquire nbcon console
+- * @ctxt:	The context of the caller
++ * @ctxt:		The context of the caller
++ * @is_reacquire:	This acquire is a reacquire
+  *
+  * Context:	Under @ctxt->con->device_lock() or local_irq_save().
+  * Return:	True if the console was acquired. False otherwise.
+@@ -568,7 +587,7 @@ static struct printk_buffers panic_nbcon_pbufs;
+  * in an unsafe state. Otherwise, on success the caller may assume
+  * the console is not in an unsafe state.
+  */
+-static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
++static bool nbcon_context_try_acquire(struct nbcon_context *ctxt, bool is_reacquire)
+ {
+ 	unsigned int cpu = smp_processor_id();
+ 	struct console *con = ctxt->console;
+@@ -577,7 +596,7 @@ static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
+ 
+ 	nbcon_state_read(con, &cur);
+ try_again:
+-	err = nbcon_context_try_acquire_direct(ctxt, &cur);
++	err = nbcon_context_try_acquire_direct(ctxt, &cur, is_reacquire);
+ 	if (err != -EBUSY)
+ 		goto out;
+ 
+@@ -913,7 +932,7 @@ void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt)
+ {
+ 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ 
+-	while (!nbcon_context_try_acquire(ctxt))
++	while (!nbcon_context_try_acquire(ctxt, true))
+ 		cpu_relax();
+ 
+ 	nbcon_write_context_set_buf(wctxt, NULL, 0);
+@@ -1101,7 +1120,7 @@ static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
+ 		cant_migrate();
+ 	}
+ 
+-	if (!nbcon_context_try_acquire(ctxt))
++	if (!nbcon_context_try_acquire(ctxt, false))
+ 		goto out;
+ 
+ 	/*
+@@ -1486,7 +1505,7 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
+ 	ctxt->prio			= nbcon_get_default_prio();
+ 	ctxt->allow_unsafe_takeover	= allow_unsafe_takeover;
+ 
+-	if (!nbcon_context_try_acquire(ctxt))
++	if (!nbcon_context_try_acquire(ctxt, false))
+ 		return -EPERM;
+ 
+ 	while (nbcon_seq_read(con) < stop_seq) {
+@@ -1762,7 +1781,7 @@ bool nbcon_device_try_acquire(struct console *con)
+ 	ctxt->console	= con;
+ 	ctxt->prio	= NBCON_PRIO_NORMAL;
+ 
+-	if (!nbcon_context_try_acquire(ctxt))
++	if (!nbcon_context_try_acquire(ctxt, false))
+ 		return false;
+ 
+ 	if (!nbcon_context_enter_unsafe(ctxt))
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 552464dcffe270..7b073b8b5e91af 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -5027,6 +5027,8 @@ int rcutree_prepare_cpu(unsigned int cpu)
+ 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
+ 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
+ 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
++
++	rcu_preempt_deferred_qs_init(rdp);
+ 	rcu_spawn_rnp_kthreads(rnp);
+ 	rcu_spawn_cpu_nocb_kthread(cpu);
+ 	ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 1bba2225e7448b..8ba04b179416a0 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -174,6 +174,17 @@ struct rcu_snap_record {
+ 	unsigned long   jiffies;	/* Track jiffies value */
+ };
+ 
++/*
++ * An IRQ work (deferred_qs_iw) is used by RCU to get the scheduler's attention.
++ * to report quiescent states at the soonest possible time.
++ * The request can be in one of the following states:
++ * - DEFER_QS_IDLE: An IRQ work is yet to be scheduled.
++ * - DEFER_QS_PENDING: An IRQ work was scheduled but either not yet run, or it
++ *                     ran and we still haven't reported a quiescent state.
++ */
++#define DEFER_QS_IDLE		0
++#define DEFER_QS_PENDING	1
++
+ /* Per-CPU data for read-copy update. */
+ struct rcu_data {
+ 	/* 1) quiescent-state and grace-period handling : */
+@@ -191,7 +202,7 @@ struct rcu_data {
+ 					/*  during and after the last grace */
+ 					/* period it is aware of. */
+ 	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */
+-	bool defer_qs_iw_pending;	/* Scheduler attention pending? */
++	int defer_qs_iw_pending;	/* Scheduler attention pending? */
+ 	struct work_struct strict_work;	/* Schedule readers for strict GPs. */
+ 
+ 	/* 2) batch handling */
+@@ -476,6 +487,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp);
+ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
+ static void rcu_flavor_sched_clock_irq(int user);
+ static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
++static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp);
+ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
+ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
+ static bool rcu_is_callbacks_kthread(struct rcu_data *rdp);
+diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
+index 2ad3a88623a7c4..a1a5942d70170b 100644
+--- a/kernel/rcu/tree_nocb.h
++++ b/kernel/rcu/tree_nocb.h
+@@ -1152,7 +1152,6 @@ static bool rcu_nocb_rdp_offload_wait_cond(struct rcu_data *rdp)
+ static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
+ {
+ 	int wake_gp;
+-	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+ 
+ 	WARN_ON_ONCE(cpu_online(rdp->cpu));
+ 	/*
+@@ -1162,7 +1161,7 @@ static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
+ 	if (!rdp->nocb_gp_rdp)
+ 		return -EINVAL;
+ 
+-	if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
++	if (WARN_ON_ONCE(!rdp->nocb_gp_kthread))
+ 		return -EINVAL;
+ 
+ 	pr_info("Offloading %d\n", rdp->cpu);
+@@ -1172,7 +1171,7 @@ static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
+ 
+ 	wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
+ 	if (wake_gp)
+-		wake_up_process(rdp_gp->nocb_gp_kthread);
++		wake_up_process(rdp->nocb_gp_kthread);
+ 
+ 	swait_event_exclusive(rdp->nocb_state_wq,
+ 			      rcu_nocb_rdp_offload_wait_cond(rdp));
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 304e3405e6ec76..2d865b2096bebb 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -485,13 +485,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
+ 	struct rcu_node *rnp;
+ 	union rcu_special special;
+ 
++	rdp = this_cpu_ptr(&rcu_data);
++	if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING)
++		rdp->defer_qs_iw_pending = DEFER_QS_IDLE;
++
+ 	/*
+ 	 * If RCU core is waiting for this CPU to exit its critical section,
+ 	 * report the fact that it has exited.  Because irqs are disabled,
+ 	 * t->rcu_read_unlock_special cannot change.
+ 	 */
+ 	special = t->rcu_read_unlock_special;
+-	rdp = this_cpu_ptr(&rcu_data);
+ 	if (!special.s && !rdp->cpu_no_qs.b.exp) {
+ 		local_irq_restore(flags);
+ 		return;
+@@ -623,10 +626,29 @@ notrace void rcu_preempt_deferred_qs(struct task_struct *t)
+  */
+ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
+ {
++	unsigned long flags;
+ 	struct rcu_data *rdp;
+ 
+ 	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
+-	rdp->defer_qs_iw_pending = false;
++	local_irq_save(flags);
++
++	/*
++	 * If the IRQ work handler happens to run in the middle of RCU read-side
++	 * critical section, it could be ineffective in getting the scheduler's
++	 * attention to report a deferred quiescent state (the whole point of the
++	 * IRQ work). For this reason, requeue the IRQ work.
++	 *
++	 * Basically, we want to avoid following situation:
++	 * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING)
++	 * 2. CPU enters new rcu_read_lock()
++	 * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0
++	 * 4. rcu_read_unlock() does not re-queue work (state still PENDING)
++	 * 5. Deferred QS reporting does not happen.
++	 */
++	if (rcu_preempt_depth() > 0)
++		WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);
++
++	local_irq_restore(flags);
+ }
+ 
+ /*
+@@ -672,17 +694,11 @@ static void rcu_read_unlock_special(struct task_struct *t)
+ 			set_tsk_need_resched(current);
+ 			set_preempt_need_resched();
+ 			if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
+-			    expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
++			    expboost && rdp->defer_qs_iw_pending != DEFER_QS_PENDING &&
++			    cpu_online(rdp->cpu)) {
+ 				// Get scheduler to re-evaluate and call hooks.
+ 				// If !IRQ_WORK, FQS scan will eventually IPI.
+-				if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
+-				    IS_ENABLED(CONFIG_PREEMPT_RT))
+-					rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(
+-								rcu_preempt_deferred_qs_handler);
+-				else
+-					init_irq_work(&rdp->defer_qs_iw,
+-						      rcu_preempt_deferred_qs_handler);
+-				rdp->defer_qs_iw_pending = true;
++				rdp->defer_qs_iw_pending = DEFER_QS_PENDING;
+ 				irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
+ 			}
+ 		}
+@@ -821,6 +837,10 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
+ 	}
+ }
+ 
++static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp)
++{
++	rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler);
++}
+ #else /* #ifdef CONFIG_PREEMPT_RCU */
+ 
+ /*
+@@ -1020,6 +1040,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
+ 	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
+ }
+ 
++static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) { }
++
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+ 
+ /*
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 0a47e5155897cd..53e3670fbb1e05 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -3227,6 +3227,9 @@ void sched_dl_do_global(void)
+ 	if (global_rt_runtime() != RUNTIME_INF)
+ 		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
+ 
++	for_each_possible_cpu(cpu)
++		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
++
+ 	for_each_possible_cpu(cpu) {
+ 		rcu_read_lock_sched();
+ 
+@@ -3242,7 +3245,6 @@ void sched_dl_do_global(void)
+ 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+ 
+ 		rcu_read_unlock_sched();
+-		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
+ 	}
+ }
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 7280ed04c96cef..af61769b1d5020 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -12220,8 +12220,14 @@ static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
+ 		/*
+ 		 * Track max cost of a domain to make sure to not delay the
+ 		 * next wakeup on the CPU.
++		 *
++		 * sched_balance_newidle() bumps the cost whenever newidle
++		 * balance fails, and we don't want things to grow out of
++		 * control.  Use the sysctl_sched_migration_cost as the upper
++		 * limit, plus a litle extra to avoid off by ones.
+ 		 */
+-		sd->max_newidle_lb_cost = cost;
++		sd->max_newidle_lb_cost =
++			min(cost, sysctl_sched_migration_cost + 200);
+ 		sd->last_decay_max_lb_cost = jiffies;
+ 	} else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) {
+ 		/*
+@@ -12926,10 +12932,17 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
+ 
+ 			t1 = sched_clock_cpu(this_cpu);
+ 			domain_cost = t1 - t0;
+-			update_newidle_cost(sd, domain_cost);
+-
+ 			curr_cost += domain_cost;
+ 			t0 = t1;
++
++			/*
++			 * Failing newidle means it is not effective;
++			 * bump the cost so we end up doing less of it.
++			 */
++			if (!pulled_task)
++				domain_cost = (3 * sd->max_newidle_lb_cost) / 2;
++
++			update_newidle_cost(sd, domain_cost);
+ 		}
+ 
+ 		/*
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 172c588de54270..6ad6717084ed8f 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2951,6 +2951,12 @@ static int sched_rt_handler(const struct ctl_table *table, int write, void *buff
+ 	}
+ 	mutex_unlock(&mutex);
+ 
++	/*
++	 * After changing maximum available bandwidth for DEADLINE, we need to
++	 * recompute per root domain and per cpus variables accordingly.
++	 */
++	rebuild_sched_domains();
++
+ 	return ret;
+ }
+ 
+diff --git a/lib/sbitmap.c b/lib/sbitmap.c
+index d3412984170c03..c07e3cd82e29d7 100644
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -208,8 +208,28 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
+ 	return nr;
+ }
+ 
++static unsigned int __map_depth_with_shallow(const struct sbitmap *sb,
++					     int index,
++					     unsigned int shallow_depth)
++{
++	u64 shallow_word_depth;
++	unsigned int word_depth, reminder;
++
++	word_depth = __map_depth(sb, index);
++	if (shallow_depth >= sb->depth)
++		return word_depth;
++
++	shallow_word_depth = word_depth * shallow_depth;
++	reminder = do_div(shallow_word_depth, sb->depth);
++
++	if (reminder >= (index + 1) * word_depth)
++		shallow_word_depth++;
++
++	return (unsigned int)shallow_word_depth;
++}
++
+ static int sbitmap_find_bit(struct sbitmap *sb,
+-			    unsigned int depth,
++			    unsigned int shallow_depth,
+ 			    unsigned int index,
+ 			    unsigned int alloc_hint,
+ 			    bool wrap)
+@@ -218,12 +238,12 @@ static int sbitmap_find_bit(struct sbitmap *sb,
+ 	int nr = -1;
+ 
+ 	for (i = 0; i < sb->map_nr; i++) {
+-		nr = sbitmap_find_bit_in_word(&sb->map[index],
+-					      min_t(unsigned int,
+-						    __map_depth(sb, index),
+-						    depth),
+-					      alloc_hint, wrap);
++		unsigned int depth = __map_depth_with_shallow(sb, index,
++							      shallow_depth);
+ 
++		if (depth)
++			nr = sbitmap_find_bit_in_word(&sb->map[index], depth,
++						      alloc_hint, wrap);
+ 		if (nr != -1) {
+ 			nr += index << sb->shift;
+ 			break;
+@@ -406,27 +426,9 @@ EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
+ static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
+ 					unsigned int depth)
+ {
+-	unsigned int wake_batch;
+-	unsigned int shallow_depth;
+-
+-	/*
+-	 * Each full word of the bitmap has bits_per_word bits, and there might
+-	 * be a partial word. There are depth / bits_per_word full words and
+-	 * depth % bits_per_word bits left over. In bitwise arithmetic:
+-	 *
+-	 * bits_per_word = 1 << shift
+-	 * depth / bits_per_word = depth >> shift
+-	 * depth % bits_per_word = depth & ((1 << shift) - 1)
+-	 *
+-	 * Each word can be limited to sbq->min_shallow_depth bits.
+-	 */
+-	shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
+-	depth = ((depth >> sbq->sb.shift) * shallow_depth +
+-		 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
+-	wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
+-			     SBQ_WAKE_BATCH);
+-
+-	return wake_batch;
++	return clamp_t(unsigned int,
++		       min(depth, sbq->min_shallow_depth) / SBQ_WAIT_QUEUES,
++		       1, SBQ_WAKE_BATCH);
+ }
+ 
+ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 54f4dd8d549f06..9689f542523832 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -837,6 +837,7 @@ static int damos_commit(struct damos *dst, struct damos *src)
+ 		return err;
+ 
+ 	dst->wmarks = src->wmarks;
++	dst->target_nid = src->target_nid;
+ 
+ 	err = damos_commit_filters(dst, src);
+ 	return err;
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 44bb798423dd39..91894fc54c645f 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -465,6 +465,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
+ {
+ 	unsigned long flags;
+ 	struct kmemleak_object *object;
++	bool warn = false;
+ 
+ 	/* try the slab allocator first */
+ 	if (object_cache) {
+@@ -483,8 +484,10 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
+ 	else if (mem_pool_free_count)
+ 		object = &mem_pool[--mem_pool_free_count];
+ 	else
+-		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
++		warn = true;
+ 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
++	if (warn)
++		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
+ 
+ 	return object;
+ }
+@@ -2107,6 +2110,7 @@ static const struct file_operations kmemleak_fops = {
+ static void __kmemleak_do_cleanup(void)
+ {
+ 	struct kmemleak_object *object, *tmp;
++	unsigned int cnt = 0;
+ 
+ 	/*
+ 	 * Kmemleak has already been disabled, no need for RCU list traversal
+@@ -2115,6 +2119,10 @@ static void __kmemleak_do_cleanup(void)
+ 	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
+ 		__remove_object(object);
+ 		__delete_object(object);
++
++		/* Call cond_resched() once per 64 iterations to avoid soft lockup */
++		if (!(++cnt & 0x3f))
++			cond_resched();
+ 	}
+ }
+ 
+diff --git a/mm/ptdump.c b/mm/ptdump.c
+index 106e1d66e9f9ee..3e78bf33da420d 100644
+--- a/mm/ptdump.c
++++ b/mm/ptdump.c
+@@ -153,6 +153,7 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
+ {
+ 	const struct ptdump_range *range = st->range;
+ 
++	get_online_mems();
+ 	mmap_write_lock(mm);
+ 	while (range->start != range->end) {
+ 		walk_page_range_novma(mm, range->start, range->end,
+@@ -160,6 +161,7 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
+ 		range++;
+ 	}
+ 	mmap_write_unlock(mm);
++	put_online_mems();
+ 
+ 	/* Flush out the last page */
+ 	st->note_page(st, 0, -1, 0);
+diff --git a/mm/slub.c b/mm/slub.c
+index 66f86e5328182d..dc527b59f5a98a 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4225,7 +4225,12 @@ static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
+ 		flags = kmalloc_fix_flags(flags);
+ 
+ 	flags |= __GFP_COMP;
+-	folio = (struct folio *)alloc_pages_node_noprof(node, flags, order);
++
++	if (node == NUMA_NO_NODE)
++		folio = (struct folio *)alloc_pages_noprof(flags, order);
++	else
++		folio = (struct folio *)__alloc_pages_noprof(flags, order, node, NULL);
++
+ 	if (folio) {
+ 		ptr = folio_address(folio);
+ 		lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 2646b75163d5ff..8b0f2fbd6a759d 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -1800,13 +1800,16 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
+ 			/* Check if we can move the pmd without splitting it. */
+ 			if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
+ 			    !pmd_none(dst_pmdval)) {
+-				struct folio *folio = pmd_folio(*src_pmd);
+-
+-				if (!folio || (!is_huge_zero_folio(folio) &&
+-					       !PageAnonExclusive(&folio->page))) {
+-					spin_unlock(ptl);
+-					err = -EBUSY;
+-					break;
++				/* Can be a migration entry */
++				if (pmd_present(*src_pmd)) {
++					struct folio *folio = pmd_folio(*src_pmd);
++
++					if (!is_huge_zero_folio(folio) &&
++					    !PageAnonExclusive(&folio->page)) {
++						spin_unlock(ptl);
++						err = -EBUSY;
++						break;
++					}
+ 				}
+ 
+ 				spin_unlock(ptl);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 022b86797acdc5..4ad5296d79345d 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -118,7 +118,7 @@ static void hci_sock_free_cookie(struct sock *sk)
+ 	int id = hci_pi(sk)->cookie;
+ 
+ 	if (id) {
+-		hci_pi(sk)->cookie = 0xffffffff;
++		hci_pi(sk)->cookie = 0;
+ 		ida_free(&sock_cookie_ida, id);
+ 	}
+ }
+diff --git a/net/core/ieee8021q_helpers.c b/net/core/ieee8021q_helpers.c
+index 759a9b9f3f898b..669b357b73b2d7 100644
+--- a/net/core/ieee8021q_helpers.c
++++ b/net/core/ieee8021q_helpers.c
+@@ -7,6 +7,11 @@
+ #include <net/dscp.h>
+ #include <net/ieee8021q.h>
+ 
++/* verify that table covers all 8 traffic types */
++#define TT_MAP_SIZE_OK(tbl)                                 \
++	compiletime_assert(ARRAY_SIZE(tbl) == IEEE8021Q_TT_MAX, \
++			   #tbl " size mismatch")
++
+ /* The following arrays map Traffic Types (TT) to traffic classes (TC) for
+  * different number of queues as shown in the example provided by
+  * IEEE 802.1Q-2022 in Annex I "I.3 Traffic type to traffic class mapping" and
+@@ -101,51 +106,28 @@ int ieee8021q_tt_to_tc(enum ieee8021q_traffic_type tt, unsigned int num_queues)
+ 
+ 	switch (num_queues) {
+ 	case 8:
+-		compiletime_assert(ARRAY_SIZE(ieee8021q_8queue_tt_tc_map) !=
+-				   IEEE8021Q_TT_MAX - 1,
+-				   "ieee8021q_8queue_tt_tc_map != max - 1");
++		TT_MAP_SIZE_OK(ieee8021q_8queue_tt_tc_map);
+ 		return ieee8021q_8queue_tt_tc_map[tt];
+ 	case 7:
+-		compiletime_assert(ARRAY_SIZE(ieee8021q_7queue_tt_tc_map) !=
+-				   IEEE8021Q_TT_MAX - 1,
+-				   "ieee8021q_7queue_tt_tc_map != max - 1");
+-
++		TT_MAP_SIZE_OK(ieee8021q_7queue_tt_tc_map);
+ 		return ieee8021q_7queue_tt_tc_map[tt];
+ 	case 6:
+-		compiletime_assert(ARRAY_SIZE(ieee8021q_6queue_tt_tc_map) !=
+-				   IEEE8021Q_TT_MAX - 1,
+-				   "ieee8021q_6queue_tt_tc_map != max - 1");
+-
++		TT_MAP_SIZE_OK(ieee8021q_6queue_tt_tc_map);
+ 		return ieee8021q_6queue_tt_tc_map[tt];
+ 	case 5:
+-		compiletime_assert(ARRAY_SIZE(ieee8021q_5queue_tt_tc_map) !=
+-				   IEEE8021Q_TT_MAX - 1,
+-				   "ieee8021q_5queue_tt_tc_map != max - 1");
+-
++		TT_MAP_SIZE_OK(ieee8021q_5queue_tt_tc_map);
+ 		return ieee8021q_5queue_tt_tc_map[tt];
+ 	case 4:
+-		compiletime_assert(ARRAY_SIZE(ieee8021q_4queue_tt_tc_map) !=
+-				   IEEE8021Q_TT_MAX - 1,
+-				   "ieee8021q_4queue_tt_tc_map != max - 1");
+-
++		TT_MAP_SIZE_OK(ieee8021q_4queue_tt_tc_map);
+ 		return ieee8021q_4queue_tt_tc_map[tt];
+ 	case 3:
+-		compiletime_assert(ARRAY_SIZE(ieee8021q_3queue_tt_tc_map) !=
+-				   IEEE8021Q_TT_MAX - 1,
+-				   "ieee8021q_3queue_tt_tc_map != max - 1");
+-
++		TT_MAP_SIZE_OK(ieee8021q_3queue_tt_tc_map);
+ 		return ieee8021q_3queue_tt_tc_map[tt];
+ 	case 2:
+-		compiletime_assert(ARRAY_SIZE(ieee8021q_2queue_tt_tc_map) !=
+-				   IEEE8021Q_TT_MAX - 1,
+-				   "ieee8021q_2queue_tt_tc_map != max - 1");
+-
++		TT_MAP_SIZE_OK(ieee8021q_2queue_tt_tc_map);
+ 		return ieee8021q_2queue_tt_tc_map[tt];
+ 	case 1:
+-		compiletime_assert(ARRAY_SIZE(ieee8021q_1queue_tt_tc_map) !=
+-				   IEEE8021Q_TT_MAX - 1,
+-				   "ieee8021q_1queue_tt_tc_map != max - 1");
+-
++		TT_MAP_SIZE_OK(ieee8021q_1queue_tt_tc_map);
+ 		return ieee8021q_1queue_tt_tc_map[tt];
+ 	}
+ 
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 8082cc6be4fc1b..96786016dbb4ef 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -55,7 +55,8 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
+ 			   u32 pid);
+ static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
+ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+-				    struct net_device *dev);
++				    struct net_device *dev,
++				    bool skip_perm);
+ 
+ #ifdef CONFIG_PROC_FS
+ static const struct seq_operations neigh_stat_seq_ops;
+@@ -444,7 +445,7 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
+ {
+ 	write_lock_bh(&tbl->lock);
+ 	neigh_flush_dev(tbl, dev, skip_perm);
+-	pneigh_ifdown_and_unlock(tbl, dev);
++	pneigh_ifdown_and_unlock(tbl, dev, skip_perm);
+ 	pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
+ 			   tbl->family);
+ 	if (skb_queue_empty_lockless(&tbl->proxy_queue))
+@@ -847,7 +848,8 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
+ }
+ 
+ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+-				    struct net_device *dev)
++				    struct net_device *dev,
++				    bool skip_perm)
+ {
+ 	struct pneigh_entry *n, **np, *freelist = NULL;
+ 	u32 h;
+@@ -855,12 +857,15 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+ 		np = &tbl->phash_buckets[h];
+ 		while ((n = *np) != NULL) {
++			if (skip_perm && n->permanent)
++				goto skip;
+ 			if (!dev || n->dev == dev) {
+ 				*np = n->next;
+ 				n->next = freelist;
+ 				freelist = n;
+ 				continue;
+ 			}
++skip:
+ 			np = &n->next;
+ 		}
+ 	}
+@@ -2041,6 +2046,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
+ 		if (pn) {
+ 			pn->flags = ndm_flags;
++			pn->permanent = !!(ndm->ndm_state & NUD_PERMANENT);
+ 			if (protocol)
+ 				pn->protocol = protocol;
+ 			err = 0;
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 70fea7c1a4b0a4..ee3c1b37d06c11 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -458,7 +458,7 @@ static void net_complete_free(void)
+ 
+ }
+ 
+-static void net_free(struct net *net)
++void net_passive_dec(struct net *net)
+ {
+ 	if (refcount_dec_and_test(&net->passive)) {
+ 		kfree(rcu_access_pointer(net->gen));
+@@ -476,7 +476,7 @@ void net_drop_ns(void *p)
+ 	struct net *net = (struct net *)p;
+ 
+ 	if (net)
+-		net_free(net);
++		net_passive_dec(net);
+ }
+ 
+ struct net *copy_net_ns(unsigned long flags,
+@@ -517,7 +517,7 @@ struct net *copy_net_ns(unsigned long flags,
+ 		key_remove_domain(net->key_domain);
+ #endif
+ 		put_user_ns(user_ns);
+-		net_free(net);
++		net_passive_dec(net);
+ dec_ucounts:
+ 		dec_net_namespaces(ucounts);
+ 		return ERR_PTR(rv);
+@@ -662,7 +662,7 @@ static void cleanup_net(struct work_struct *work)
+ 		key_remove_domain(net->key_domain);
+ #endif
+ 		put_user_ns(net->user_ns);
+-		net_free(net);
++		net_passive_dec(net);
+ 	}
+ }
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 9c63da2829f6ee..d392cb37a864f7 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2243,6 +2243,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ 			get_net_track(net, &sk->ns_tracker, priority);
+ 			sock_inuse_add(net, 1);
+ 		} else {
++			net_passive_inc(net);
+ 			__netns_tracker_alloc(net, &sk->ns_tracker,
+ 					      false, priority);
+ 		}
+@@ -2267,6 +2268,7 @@ EXPORT_SYMBOL(sk_alloc);
+ static void __sk_destruct(struct rcu_head *head)
+ {
+ 	struct sock *sk = container_of(head, struct sock, sk_rcu);
++	struct net *net = sock_net(sk);
+ 	struct sk_filter *filter;
+ 
+ 	if (sk->sk_destruct)
+@@ -2298,14 +2300,28 @@ static void __sk_destruct(struct rcu_head *head)
+ 	put_cred(sk->sk_peer_cred);
+ 	put_pid(sk->sk_peer_pid);
+ 
+-	if (likely(sk->sk_net_refcnt))
+-		put_net_track(sock_net(sk), &sk->ns_tracker);
+-	else
+-		__netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
+-
++	if (likely(sk->sk_net_refcnt)) {
++		put_net_track(net, &sk->ns_tracker);
++	} else {
++		__netns_tracker_free(net, &sk->ns_tracker, false);
++		net_passive_dec(net);
++	}
+ 	sk_prot_free(sk->sk_prot_creator, sk);
+ }
+ 
++void sk_net_refcnt_upgrade(struct sock *sk)
++{
++	struct net *net = sock_net(sk);
++
++	WARN_ON_ONCE(sk->sk_net_refcnt);
++	__netns_tracker_free(net, &sk->ns_tracker, false);
++	net_passive_dec(net);
++	sk->sk_net_refcnt = 1;
++	get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
++	sock_inuse_add(net, 1);
++}
++EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade);
++
+ void sk_destruct(struct sock *sk)
+ {
+ 	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
+@@ -2402,6 +2418,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+ 		 * is not properly dismantling its kernel sockets at netns
+ 		 * destroy time.
+ 		 */
++		net_passive_inc(sock_net(newsk));
+ 		__netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
+ 				      false, priority);
+ 	}
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 73d555593f5c56..9a5c9497b3931d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2545,7 +2545,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 	do_cache = true;
+ 	if (type == RTN_BROADCAST) {
+ 		flags |= RTCF_BROADCAST | RTCF_LOCAL;
+-		fi = NULL;
+ 	} else if (type == RTN_MULTICAST) {
+ 		flags |= RTCF_MULTICAST | RTCF_LOCAL;
+ 		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 5de47dd5e9093d..12ba1a8db93af8 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -61,7 +61,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
+ 	remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
+ 	skb->remcsum_offload = remcsum;
+ 
+-	need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
++	need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb);
+ 	/* Try to offload checksum if possible */
+ 	offload_csum = !!(need_csum &&
+ 			  !need_ipsec &&
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index be51b8792b96f4..49ec223f2eda49 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2228,13 +2228,12 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
+ 	in6_ifa_put(ifp);
+ }
+ 
+-/* Join to solicited addr multicast group.
+- * caller must hold RTNL */
++/* Join to solicited addr multicast group. */
+ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
+ {
+ 	struct in6_addr maddr;
+ 
+-	if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
++	if (READ_ONCE(dev->flags) & (IFF_LOOPBACK | IFF_NOARP))
+ 		return;
+ 
+ 	addrconf_addr_solict_mult(addr, &maddr);
+@@ -3883,7 +3882,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
+ 	 *	   Do not dev_put!
+ 	 */
+ 	if (unregister) {
+-		idev->dead = 1;
++		WRITE_ONCE(idev->dead, 1);
+ 
+ 		/* protected by rtnl_lock */
+ 		RCU_INIT_POINTER(dev->ip6_ptr, NULL);
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 9949554e3211b5..e2a11a2f3b255d 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -907,23 +907,22 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
+ static int __ipv6_dev_mc_inc(struct net_device *dev,
+ 			     const struct in6_addr *addr, unsigned int mode)
+ {
+-	struct ifmcaddr6 *mc;
+ 	struct inet6_dev *idev;
+-
+-	ASSERT_RTNL();
++	struct ifmcaddr6 *mc;
+ 
+ 	/* we need to take a reference on idev */
+ 	idev = in6_dev_get(dev);
+-
+ 	if (!idev)
+ 		return -EINVAL;
+ 
+-	if (idev->dead) {
++	mutex_lock(&idev->mc_lock);
++
++	if (READ_ONCE(idev->dead)) {
++		mutex_unlock(&idev->mc_lock);
+ 		in6_dev_put(idev);
+ 		return -ENODEV;
+ 	}
+ 
+-	mutex_lock(&idev->mc_lock);
+ 	for_each_mc_mclock(idev, mc) {
+ 		if (ipv6_addr_equal(&mc->mca_addr, addr)) {
+ 			mc->mca_users++;
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index d4118c796290e5..1d37b26ea2ef7b 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -429,7 +429,7 @@ static void psock_write_space(struct sock *sk)
+ 
+ 	/* Check if the socket is reserved so someone is waiting for sending. */
+ 	kcm = psock->tx_kcm;
+-	if (kcm && !unlikely(kcm->tx_stopped))
++	if (kcm)
+ 		queue_work(kcm_wq, &kcm->tx_work);
+ 
+ 	spin_unlock_bh(&mux->lock);
+@@ -1696,12 +1696,6 @@ static int kcm_release(struct socket *sock)
+ 	 */
+ 	__skb_queue_purge(&sk->sk_write_queue);
+ 
+-	/* Set tx_stopped. This is checked when psock is bound to a kcm and we
+-	 * get a writespace callback. This prevents further work being queued
+-	 * from the callback (unbinding the psock occurs after canceling work.
+-	 */
+-	kcm->tx_stopped = 1;
+-
+ 	release_sock(sk);
+ 
+ 	spin_lock_bh(&mux->lock);
+@@ -1717,7 +1711,7 @@ static int kcm_release(struct socket *sock)
+ 	/* Cancel work. After this point there should be no outside references
+ 	 * to the kcm socket.
+ 	 */
+-	cancel_work_sync(&kcm->tx_work);
++	disable_work_sync(&kcm->tx_work);
+ 
+ 	lock_sock(sk);
+ 	psock = kcm->tx_psock;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index a72c1d9edb4acc..2890dde9b3bf4a 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1879,12 +1879,12 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ 	}
+ 
+ 	if (params->supported_rates &&
+-	    params->supported_rates_len) {
+-		ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
+-					 sband, params->supported_rates,
+-					 params->supported_rates_len,
+-					 &link_sta->pub->supp_rates[sband->band]);
+-	}
++	    params->supported_rates_len &&
++	    !ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
++				      sband, params->supported_rates,
++				      params->supported_rates_len,
++				      &link_sta->pub->supp_rates[sband->band]))
++		return -EINVAL;
+ 
+ 	if (params->ht_capa)
+ 		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
+index 282e8c13e2bfc5..e3b46df95b71b2 100644
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -1349,6 +1349,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
+ 		goto out;
+ 	}
+ 
++	link->radar_required = link->reserved_radar_required;
+ 	list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links);
+ 	rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf);
+ 
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index 9484449d6a3476..cafedc5ecd4436 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -2,7 +2,7 @@
+ /*
+  * MLO link handling
+  *
+- * Copyright (C) 2022-2024 Intel Corporation
++ * Copyright (C) 2022-2025 Intel Corporation
+  */
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+@@ -365,6 +365,13 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
+ 			ieee80211_update_apvlan_links(sdata);
+ 	}
+ 
++	/*
++	 * Ignore errors if we are only removing links as removal should
++	 * always succeed
++	 */
++	if (!new_links)
++		ret = 0;
++
+ 	if (ret) {
+ 		/* restore config */
+ 		memcpy(sdata->link, old_data, sizeof(old_data));
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index fd7434995a475d..5a9a84a0cc35dd 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2243,7 +2243,8 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
+ 	if (!local->ops->abort_channel_switch)
+ 		return;
+ 
+-	ieee80211_link_unreserve_chanctx(link);
++	if (rcu_access_pointer(link->conf->chanctx_conf))
++		ieee80211_link_unreserve_chanctx(link);
+ 
+ 	ieee80211_vif_unblock_queues_csa(sdata);
+ 
+@@ -4291,6 +4292,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ 	struct ieee80211_prep_tx_info info = {
+ 		.subtype = IEEE80211_STYPE_AUTH,
+ 	};
++	bool sae_need_confirm = false;
+ 
+ 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+ 
+@@ -4336,6 +4338,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ 				jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY;
+ 			ifmgd->auth_data->timeout_started = true;
+ 			run_again(sdata, ifmgd->auth_data->timeout);
++			if (auth_transaction == 1)
++				sae_need_confirm = true;
+ 			goto notify_driver;
+ 		}
+ 
+@@ -4378,6 +4382,9 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ 	     ifmgd->auth_data->expected_transaction == 2)) {
+ 		if (!ieee80211_mark_sta_auth(sdata))
+ 			return; /* ignore frame -- wait for timeout */
++	} else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
++		   auth_transaction == 1) {
++		sae_need_confirm = true;
+ 	} else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
+ 		   auth_transaction == 2) {
+ 		sdata_info(sdata, "SAE peer confirmed\n");
+@@ -4386,7 +4393,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ 
+ 	cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+ notify_driver:
+-	drv_mgd_complete_tx(sdata->local, sdata, &info);
++	if (!sae_need_confirm)
++		drv_mgd_complete_tx(sdata->local, sdata, &info);
+ }
+ 
+ #define case_WLAN(type) \
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 8e1d00efa62e5c..8c0d91dfd7e2b3 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4283,10 +4283,16 @@ static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
+ 		rx->link_sta = NULL;
+ 	}
+ 
+-	if (link_id < 0)
+-		rx->link = &rx->sdata->deflink;
+-	else if (!ieee80211_rx_data_set_link(rx, link_id))
++	if (link_id < 0) {
++		if (ieee80211_vif_is_mld(&rx->sdata->vif) &&
++		    sta && !sta->sta.valid_links)
++			rx->link =
++				rcu_dereference(rx->sdata->link[sta->deflink.link_id]);
++		else
++			rx->link = &rx->sdata->deflink;
++	} else if (!ieee80211_rx_data_set_link(rx, link_id)) {
+ 		return false;
++	}
+ 
+ 	return true;
+ }
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index 57850d4dac5db9..70aeebfc4182e1 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -73,7 +73,6 @@ static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
+ 
+ 	lock_sock(sk);
+ 
+-	/* TODO: allow rebind */
+ 	if (sk_hashed(sk)) {
+ 		rc = -EADDRINUSE;
+ 		goto out_release;
+@@ -629,15 +628,36 @@ static void mctp_sk_close(struct sock *sk, long timeout)
+ static int mctp_sk_hash(struct sock *sk)
+ {
+ 	struct net *net = sock_net(sk);
++	struct sock *existing;
++	struct mctp_sock *msk;
++	int rc;
++
++	msk = container_of(sk, struct mctp_sock, sk);
+ 
+ 	/* Bind lookup runs under RCU, remain live during that. */
+ 	sock_set_flag(sk, SOCK_RCU_FREE);
+ 
+ 	mutex_lock(&net->mctp.bind_lock);
++
++	/* Prevent duplicate binds. */
++	sk_for_each(existing, &net->mctp.binds) {
++		struct mctp_sock *mex =
++			container_of(existing, struct mctp_sock, sk);
++
++		if (mex->bind_type == msk->bind_type &&
++		    mex->bind_addr == msk->bind_addr &&
++		    mex->bind_net == msk->bind_net) {
++			rc = -EADDRINUSE;
++			goto out;
++		}
++	}
++
+ 	sk_add_node_rcu(sk, &net->mctp.binds);
+-	mutex_unlock(&net->mctp.bind_lock);
++	rc = 0;
+ 
+-	return 0;
++out:
++	mutex_unlock(&net->mctp.bind_lock);
++	return rc;
+ }
+ 
+ static void mctp_sk_unhash(struct sock *sk)
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 0253a863a621c8..a05f201d194c52 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1768,10 +1768,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+ 	 * needs it.
+ 	 * Update ns_tracker to current stack trace and refcounted tracker.
+ 	 */
+-	__netns_tracker_free(net, &sf->sk->ns_tracker, false);
+-	sf->sk->sk_net_refcnt = 1;
+-	get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
+-	sock_inuse_add(net, 1);
++	sk_net_refcnt_upgrade(sf->sk);
+ 	err = tcp_set_ulp(sf->sk, "mptcp");
+ 	if (err)
+ 		goto err_free;
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index 2c260f33b55cc5..ad1f671ffc37fa 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -110,7 +110,7 @@ struct ncsi_channel_version {
+ 	u8   update;		/* NCSI version update */
+ 	char alpha1;		/* NCSI version alpha1 */
+ 	char alpha2;		/* NCSI version alpha2 */
+-	u8  fw_name[12];	/* Firmware name string                */
++	u8  fw_name[12 + 1];	/* Firmware name string                */
+ 	u32 fw_version;		/* Firmware version                   */
+ 	u16 pci_ids[4];		/* PCI identification                 */
+ 	u32 mf_id;		/* Manufacture ID                     */
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 8668888c5a2f99..d5ed80731e8928 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -775,6 +775,7 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
+ 	ncv->alpha1 = rsp->alpha1;
+ 	ncv->alpha2 = rsp->alpha2;
+ 	memcpy(ncv->fw_name, rsp->fw_name, 12);
++	ncv->fw_name[12] = '\0';
+ 	ncv->fw_version = ntohl(rsp->fw_version);
+ 	for (i = 0; i < ARRAY_SIZE(ncv->pci_ids); i++)
+ 		ncv->pci_ids[i] = ntohs(rsp->pci_ids[i]);
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 6a1239433830fb..18a91c031554cb 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -860,8 +860,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
+ 
+ static int ctnetlink_done(struct netlink_callback *cb)
+ {
+-	if (cb->args[1])
+-		nf_ct_put((struct nf_conn *)cb->args[1]);
+ 	kfree(cb->data);
+ 	return 0;
+ }
+@@ -1184,19 +1182,26 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
+ 	return 0;
+ }
+ 
++static unsigned long ctnetlink_get_id(const struct nf_conn *ct)
++{
++	unsigned long id = nf_ct_get_id(ct);
++
++	return id ? id : 1;
++}
++
+ static int
+ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ 	unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
+ 	struct net *net = sock_net(skb->sk);
+-	struct nf_conn *ct, *last;
++	unsigned long last_id = cb->args[1];
+ 	struct nf_conntrack_tuple_hash *h;
+ 	struct hlist_nulls_node *n;
+ 	struct nf_conn *nf_ct_evict[8];
++	struct nf_conn *ct;
+ 	int res, i;
+ 	spinlock_t *lockp;
+ 
+-	last = (struct nf_conn *)cb->args[1];
+ 	i = 0;
+ 
+ 	local_bh_disable();
+@@ -1233,7 +1238,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
+ 				continue;
+ 
+ 			if (cb->args[1]) {
+-				if (ct != last)
++				if (ctnetlink_get_id(ct) != last_id)
+ 					continue;
+ 				cb->args[1] = 0;
+ 			}
+@@ -1246,8 +1251,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
+ 					    NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ 					    ct, true, flags);
+ 			if (res < 0) {
+-				nf_conntrack_get(&ct->ct_general);
+-				cb->args[1] = (unsigned long)ct;
++				cb->args[1] = ctnetlink_get_id(ct);
+ 				spin_unlock(lockp);
+ 				goto out;
+ 			}
+@@ -1260,12 +1264,10 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
+ 	}
+ out:
+ 	local_bh_enable();
+-	if (last) {
++	if (last_id) {
+ 		/* nf ct hash resize happened, now clear the leftover. */
+-		if ((struct nf_conn *)cb->args[1] == last)
++		if (cb->args[1] == last_id)
+ 			cb->args[1] = 0;
+-
+-		nf_ct_put(last);
+ 	}
+ 
+ 	while (i) {
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index c5855069bdaba0..9e4e25f2458f99 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1219,7 +1219,7 @@ static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int c
+ 
+ 	mem = s;
+ 	mem -= s->align_off;
+-	kfree(mem);
++	kvfree(mem);
+ }
+ 
+ /**
+@@ -1240,10 +1240,9 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
+ 		void *scratch_aligned;
+ 		u32 align_off;
+ #endif
+-		scratch = kzalloc_node(struct_size(scratch, map,
+-						   bsize_max * 2) +
+-				       NFT_PIPAPO_ALIGN_HEADROOM,
+-				       GFP_KERNEL_ACCOUNT, cpu_to_node(i));
++		scratch = kvzalloc_node(struct_size(scratch, map, bsize_max * 2) +
++					NFT_PIPAPO_ALIGN_HEADROOM,
++					GFP_KERNEL_ACCOUNT, cpu_to_node(i));
+ 		if (!scratch) {
+ 			/* On failure, there's no need to undo previous
+ 			 * allocations: this means that some scratch maps have
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index b02fb75f8d4fd2..8b060465a2be1a 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -794,16 +794,6 @@ static int netlink_release(struct socket *sock)
+ 
+ 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
+ 
+-	/* Because struct net might disappear soon, do not keep a pointer. */
+-	if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
+-		__netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
+-		/* Because of deferred_put_nlk_sk and use of work queue,
+-		 * it is possible  netns will be freed before this socket.
+-		 */
+-		sock_net_set(sk, &init_net);
+-		__netns_tracker_alloc(&init_net, &sk->ns_tracker,
+-				      false, GFP_KERNEL);
+-	}
+ 	call_rcu(&nlk->rcu, deferred_put_nlk_sk);
+ 	return 0;
+ }
+@@ -1222,7 +1212,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
+ 	nlk = nlk_sk(sk);
+ 	rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
+ 
+-	if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) &&
++	if ((rmem == skb->truesize || rmem <= READ_ONCE(sk->sk_rcvbuf)) &&
+ 	    !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
+ 		netlink_skb_set_owner_r(skb, sk);
+ 		return 0;
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 0581c53e651704..3cc2f303bf7865 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -504,12 +504,8 @@ bool rds_tcp_tune(struct socket *sock)
+ 			release_sock(sk);
+ 			return false;
+ 		}
+-		/* Update ns_tracker to current stack trace and refcounted tracker */
+-		__netns_tracker_free(net, &sk->ns_tracker, false);
+-
+-		sk->sk_net_refcnt = 1;
+-		netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
+-		sock_inuse_add(net, 1);
++		sk_net_refcnt_upgrade(sk);
++		put_net(net);
+ 	}
+ 	rtn = net_generic(net, rds_tcp_netid);
+ 	if (rtn->sndbuf_size > 0) {
+diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
+index 037f764822b965..82635dd2cfa59f 100644
+--- a/net/sched/sch_ets.c
++++ b/net/sched/sch_ets.c
+@@ -651,6 +651,12 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	sch_tree_lock(sch);
+ 
++	for (i = nbands; i < oldbands; i++) {
++		if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
++			list_del_init(&q->classes[i].alist);
++		qdisc_purge_queue(q->classes[i].qdisc);
++	}
++
+ 	WRITE_ONCE(q->nbands, nbands);
+ 	for (i = nstrict; i < q->nstrict; i++) {
+ 		if (q->classes[i].qdisc->q.qlen) {
+@@ -658,11 +664,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
+ 			q->classes[i].deficit = quanta[i];
+ 		}
+ 	}
+-	for (i = q->nbands; i < oldbands; i++) {
+-		if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
+-			list_del_init(&q->classes[i].alist);
+-		qdisc_purge_queue(q->classes[i].qdisc);
+-	}
+ 	WRITE_ONCE(q->nstrict, nstrict);
+ 	memcpy(q->prio2band, priomap, sizeof(priomap));
+ 
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index a8a254a5008e52..032a10d82302c3 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb)
+ 	 * it's better to just linearize it otherwise crc computing
+ 	 * takes longer.
+ 	 */
+-	if ((!is_gso && skb_linearize(skb)) ||
++	if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) ||
+ 	    !pskb_may_pull(skb, sizeof(struct sctphdr)))
+ 		goto discard_it;
+ 
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 3c43239f09d367..cdd445d40b945d 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -3353,10 +3353,7 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family)
+ 	 * which need net ref.
+ 	 */
+ 	sk = smc->clcsock->sk;
+-	__netns_tracker_free(net, &sk->ns_tracker, false);
+-	sk->sk_net_refcnt = 1;
+-	get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+-	sock_inuse_add(net, 1);
++	sk_net_refcnt_upgrade(sk);
+ 	return 0;
+ }
+ 
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 3c115936b7198d..e61e945760582a 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1568,10 +1568,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
+ 	newlen = error;
+ 
+ 	if (protocol == IPPROTO_TCP) {
+-		__netns_tracker_free(net, &sock->sk->ns_tracker, false);
+-		sock->sk->sk_net_refcnt = 1;
+-		get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL);
+-		sock_inuse_add(net, 1);
++		sk_net_refcnt_upgrade(sock->sk);
+ 		if ((error = kernel_listen(sock, 64)) < 0)
+ 			goto bummer;
+ 	}
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 1397bb48cdde39..92cec227215aea 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1960,12 +1960,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
+ 		goto out;
+ 	}
+ 
+-	if (protocol == IPPROTO_TCP) {
+-		__netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false);
+-		sock->sk->sk_net_refcnt = 1;
+-		get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL);
+-		sock_inuse_add(xprt->xprt_net, 1);
+-	}
++	if (protocol == IPPROTO_TCP)
++		sk_net_refcnt_upgrade(sock->sk);
+ 
+ 	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+ 	if (IS_ERR(filp))
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index e5e47452308ab7..e1eaf12b374264 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -195,7 +195,7 @@ void tls_strp_msg_done(struct tls_strparser *strp);
+ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb);
+ void tls_rx_msg_ready(struct tls_strparser *strp);
+ 
+-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
++bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
+ int tls_strp_msg_cow(struct tls_sw_context_rx *ctx);
+ struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx);
+ int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst);
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index 095cf31bae0ba9..d71643b494a1ae 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -475,7 +475,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
+ 	strp->stm.offset = offset;
+ }
+ 
+-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
++bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
+ {
+ 	struct strp_msg *rxm;
+ 	struct tls_msg *tlm;
+@@ -484,8 +484,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
+ 	DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
+ 
+ 	if (!strp->copy_mode && force_refresh) {
+-		if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
+-			return;
++		if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) {
++			WRITE_ONCE(strp->msg_ready, 0);
++			memset(&strp->stm, 0, sizeof(strp->stm));
++			return false;
++		}
+ 
+ 		tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
+ 	}
+@@ -495,6 +498,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
+ 	rxm->offset	= strp->stm.offset;
+ 	tlm = tls_msg(strp->anchor);
+ 	tlm->control	= strp->mark;
++
++	return true;
+ }
+ 
+ /* Called with lock held on lower socket */
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 1d7caadd0cbc45..6385329ef98ddc 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1380,7 +1380,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ 			return sock_intr_errno(timeo);
+ 	}
+ 
+-	tls_strp_msg_load(&ctx->strp, released);
++	if (unlikely(!tls_strp_msg_load(&ctx->strp, released)))
++		return tls_rx_rec_wait(sk, psock, nonblock, false);
+ 
+ 	return 1;
+ }
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index f0e48e6911fc46..f01f9e8781061e 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -307,7 +307,7 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+ 
+ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
+ {
+-	int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
++	int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
+ 	struct scatterlist pkt, *p;
+ 	struct virtqueue *vq;
+ 	struct sk_buff *skb;
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index a5eb92d93074e6..d1a66410b9c551 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -843,7 +843,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
+ 
+ 	mgmt = (const struct ieee80211_mgmt *)params->buf;
+ 
+-	if (!ieee80211_is_mgmt(mgmt->frame_control))
++	if (!ieee80211_is_mgmt(mgmt->frame_control) ||
++	    ieee80211_has_order(mgmt->frame_control))
+ 		return -EINVAL;
+ 
+ 	stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index ad0fe884947142..6f99fd2d966c65 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1637,6 +1637,26 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
+ }
+ EXPORT_SYMBOL(xfrm_state_lookup_byspi);
+ 
++static struct xfrm_state *xfrm_state_lookup_spi_proto(struct net *net, __be32 spi, u8 proto)
++{
++	struct xfrm_state *x;
++	unsigned int i;
++
++	rcu_read_lock();
++	for (i = 0; i <= net->xfrm.state_hmask; i++) {
++		hlist_for_each_entry_rcu(x, &net->xfrm.state_byspi[i], byspi) {
++			if (x->id.spi == spi && x->id.proto == proto) {
++				if (!xfrm_state_hold_rcu(x))
++					continue;
++				rcu_read_unlock();
++				return x;
++			}
++		}
++	}
++	rcu_read_unlock();
++	return NULL;
++}
++
+ static void __xfrm_state_insert(struct xfrm_state *x)
+ {
+ 	struct net *net = xs_net(x);
+@@ -2465,10 +2485,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
+ 	unsigned int h;
+ 	struct xfrm_state *x0;
+ 	int err = -ENOENT;
+-	__be32 minspi = htonl(low);
+-	__be32 maxspi = htonl(high);
++	u32 range = high - low + 1;
+ 	__be32 newspi = 0;
+-	u32 mark = x->mark.v & x->mark.m;
+ 
+ 	spin_lock_bh(&x->lock);
+ 	if (x->km.state == XFRM_STATE_DEAD) {
+@@ -2482,38 +2500,34 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
+ 
+ 	err = -ENOENT;
+ 
+-	if (minspi == maxspi) {
+-		x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
+-		if (x0) {
+-			NL_SET_ERR_MSG(extack, "Requested SPI is already in use");
+-			xfrm_state_put(x0);
++	for (h = 0; h < range; h++) {
++		u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high);
++		newspi = htonl(spi);
++
++		spin_lock_bh(&net->xfrm.xfrm_state_lock);
++		x0 = xfrm_state_lookup_spi_proto(net, newspi, x->id.proto);
++		if (!x0) {
++			x->id.spi = newspi;
++			h = xfrm_spi_hash(net, &x->id.daddr, newspi, x->id.proto, x->props.family);
++			XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, x->xso.type);
++			spin_unlock_bh(&net->xfrm.xfrm_state_lock);
++			err = 0;
+ 			goto unlock;
+ 		}
+-		newspi = minspi;
+-	} else {
+-		u32 spi = 0;
+-		for (h = 0; h < high-low+1; h++) {
+-			spi = get_random_u32_inclusive(low, high);
+-			x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
+-			if (x0 == NULL) {
+-				newspi = htonl(spi);
+-				break;
+-			}
+-			xfrm_state_put(x0);
++		xfrm_state_put(x0);
++		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
++
++		if (signal_pending(current)) {
++			err = -ERESTARTSYS;
++			goto unlock;
+ 		}
++
++		if (low == high)
++			break;
+ 	}
+-	if (newspi) {
+-		spin_lock_bh(&net->xfrm.xfrm_state_lock);
+-		x->id.spi = newspi;
+-		h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
+-		XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
+-				  x->xso.type);
+-		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 
+-		err = 0;
+-	} else {
++	if (err)
+ 		NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
+-	}
+ 
+ unlock:
+ 	spin_unlock_bh(&x->lock);
+diff --git a/rust/Makefile b/rust/Makefile
+index 17491d8229a430..07c13100000cd7 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -55,6 +55,10 @@ core-cfgs = \
+ 
+ core-edition := $(if $(call rustc-min-version,108700),2024,2021)
+ 
++# `rustdoc` did not save the target modifiers, thus workaround for
++# the time being (https://github.com/rust-lang/rust/issues/144521).
++rustdoc_modifiers_workaround := $(if $(call rustc-min-version,108800),-Cunsafe-allow-abi-mismatch=fixed-x18)
++
+ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+       cmd_rustdoc = \
+ 	OBJTREE=$(abspath $(objtree)) \
+@@ -63,6 +67,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+ 		-Zunstable-options --generate-link-to-definition \
+ 		--output $(rustdoc_output) \
+ 		--crate-name $(subst rustdoc-,,$@) \
++		$(rustdoc_modifiers_workaround) \
+ 		$(if $(rustdoc_host),,--sysroot=/dev/null) \
+ 		@$(objtree)/include/generated/rustc_cfg $<
+ 
+@@ -92,14 +97,14 @@ rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
+ rustdoc-macros: private rustdoc_host = yes
+ rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
+     --extern proc_macro
+-rustdoc-macros: $(src)/macros/lib.rs FORCE
++rustdoc-macros: $(src)/macros/lib.rs rustdoc-clean FORCE
+ 	+$(call if_changed,rustdoc)
+ 
+ # Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should
+ # not be needed -- see https://github.com/rust-lang/rust/pull/128307.
+ rustdoc-core: private skip_flags = --edition=2021 -Wrustdoc::unescaped_backticks
+ rustdoc-core: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs)
+-rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
++rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs rustdoc-clean FORCE
+ 	+$(call if_changed,rustdoc)
+ 
+ rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
+@@ -116,6 +121,9 @@ rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-ffi rustdoc-macros \
+     $(obj)/bindings.o FORCE
+ 	+$(call if_changed,rustdoc)
+ 
++rustdoc-clean: FORCE
++	$(Q)rm -rf $(rustdoc_output)
++
+ quiet_cmd_rustc_test_library = RUSTC TL $<
+       cmd_rustc_test_library = \
+ 	OBJTREE=$(abspath $(objtree)) \
+@@ -175,6 +183,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
+ 		--extern bindings --extern uapi \
+ 		--no-run --crate-name kernel -Zunstable-options \
+ 		--sysroot=/dev/null \
++		$(rustdoc_modifiers_workaround) \
+ 		--test-builder $(objtree)/scripts/rustdoc_test_builder \
+ 		$< $(rustdoc_test_kernel_quiet); \
+ 	$(objtree)/scripts/rustdoc_test_gen
+diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
+index c0f46f18906073..0caf0ced13df4a 100644
+--- a/scripts/kconfig/gconf.c
++++ b/scripts/kconfig/gconf.c
+@@ -748,7 +748,7 @@ static void renderer_edited(GtkCellRendererText * cell,
+ 	struct symbol *sym;
+ 
+ 	if (!gtk_tree_model_get_iter(model2, &iter, path))
+-		return;
++		goto free;
+ 
+ 	gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
+ 	sym = menu->sym;
+@@ -760,6 +760,7 @@ static void renderer_edited(GtkCellRendererText * cell,
+ 
+ 	update_tree(&rootmenu, NULL);
+ 
++free:
+ 	gtk_tree_path_free(path);
+ }
+ 
+@@ -942,13 +943,14 @@ on_treeview2_key_press_event(GtkWidget * widget,
+ void
+ on_treeview2_cursor_changed(GtkTreeView * treeview, gpointer user_data)
+ {
++	GtkTreeModel *model = gtk_tree_view_get_model(treeview);
+ 	GtkTreeSelection *selection;
+ 	GtkTreeIter iter;
+ 	struct menu *menu;
+ 
+ 	selection = gtk_tree_view_get_selection(treeview);
+-	if (gtk_tree_selection_get_selected(selection, &model2, &iter)) {
+-		gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
++	if (gtk_tree_selection_get_selected(selection, &model, &iter)) {
++		gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1);
+ 		text_insert_help(menu);
+ 	}
+ }
+diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
+index 3c6e24b20f5be6..5e4a131724f288 100644
+--- a/scripts/kconfig/lxdialog/inputbox.c
++++ b/scripts/kconfig/lxdialog/inputbox.c
+@@ -39,8 +39,10 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
+ 
+ 	if (!init)
+ 		instr[0] = '\0';
+-	else
+-		strcpy(instr, init);
++	else {
++		strncpy(instr, init, sizeof(dialog_input_result) - 1);
++		instr[sizeof(dialog_input_result) - 1] = '\0';
++	}
+ 
+ do_resize:
+ 	if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGHT_MIN))
+diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c
+index 6e6244df0c56e3..d4c19b7beebbd4 100644
+--- a/scripts/kconfig/lxdialog/menubox.c
++++ b/scripts/kconfig/lxdialog/menubox.c
+@@ -264,7 +264,7 @@ int dialog_menu(const char *title, const char *prompt,
+ 		if (key < 256 && isalpha(key))
+ 			key = tolower(key);
+ 
+-		if (strchr("ynmh", key))
++		if (strchr("ynmh ", key))
+ 			i = max_choice;
+ 		else {
+ 			for (i = choice + 1; i < max_choice; i++) {
+diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
+index 063b4f7ccbdb36..5f484422278e13 100644
+--- a/scripts/kconfig/nconf.c
++++ b/scripts/kconfig/nconf.c
+@@ -593,6 +593,8 @@ static void item_add_str(const char *fmt, ...)
+ 		tmp_str,
+ 		sizeof(k_menu_items[index].str));
+ 
++	k_menu_items[index].str[sizeof(k_menu_items[index].str) - 1] = '\0';
++
+ 	free_item(curses_menu_items[index]);
+ 	curses_menu_items[index] = new_item(
+ 			k_menu_items[index].str,
+diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
+index 72b605efe549d9..1526c52bc34cd0 100644
+--- a/scripts/kconfig/nconf.gui.c
++++ b/scripts/kconfig/nconf.gui.c
+@@ -350,6 +350,7 @@ int dialog_inputbox(WINDOW *main_window,
+ 	x = (columns-win_cols)/2;
+ 
+ 	strncpy(result, init, *result_len);
++	result[*result_len - 1] = '\0';
+ 
+ 	/* create the windows */
+ 	win = newwin(win_lines, win_cols, y, x);
+diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
+index 571158ec6188f9..cccd61cca509ce 100644
+--- a/security/apparmor/domain.c
++++ b/security/apparmor/domain.c
+@@ -509,6 +509,7 @@ static const char *next_name(int xtype, const char *name)
+  * @name: returns: name tested to find label (NOT NULL)
+  *
+  * Returns: refcounted label, or NULL on failure (MAYBE NULL)
++ *          @name will always be set with the last name tried
+  */
+ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
+ 				const char **name)
+@@ -518,6 +519,7 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
+ 	struct aa_label *label = NULL;
+ 	u32 xtype = xindex & AA_X_TYPE_MASK;
+ 	int index = xindex & AA_X_INDEX_MASK;
++	const char *next;
+ 
+ 	AA_BUG(!name);
+ 
+@@ -525,25 +527,27 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
+ 	/* TODO: move lookup parsing to unpack time so this is a straight
+ 	 *       index into the resultant label
+ 	 */
+-	for (*name = rules->file->trans.table[index]; !label && *name;
+-	     *name = next_name(xtype, *name)) {
++	for (next = rules->file->trans.table[index]; next;
++	     next = next_name(xtype, next)) {
++		const char *lookup = (*next == '&') ? next + 1 : next;
++		*name = next;
+ 		if (xindex & AA_X_CHILD) {
+-			struct aa_profile *new_profile;
+-			/* release by caller */
+-			new_profile = aa_find_child(profile, *name);
+-			if (new_profile)
+-				label = &new_profile->label;
++			/* TODO: switich to parse to get stack of child */
++			struct aa_profile *new = aa_find_child(profile, lookup);
++
++			if (new)
++				/* release by caller */
++				return &new->label;
+ 			continue;
+ 		}
+-		label = aa_label_parse(&profile->label, *name, GFP_KERNEL,
++		label = aa_label_parse(&profile->label, lookup, GFP_KERNEL,
+ 				       true, false);
+-		if (IS_ERR(label))
+-			label = NULL;
++		if (!IS_ERR_OR_NULL(label))
++			/* release by caller */
++			return label;
+ 	}
+ 
+-	/* released by caller */
+-
+-	return label;
++	return NULL;
+ }
+ 
+ /**
+@@ -568,9 +572,9 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
+ 	struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ 						    typeof(*rules), list);
+ 	struct aa_label *new = NULL;
++	struct aa_label *stack = NULL;
+ 	struct aa_ns *ns = profile->ns;
+ 	u32 xtype = xindex & AA_X_TYPE_MASK;
+-	const char *stack = NULL;
+ 
+ 	switch (xtype) {
+ 	case AA_X_NONE:
+@@ -579,13 +583,14 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
+ 		break;
+ 	case AA_X_TABLE:
+ 		/* TODO: fix when perm mapping done at unload */
+-		stack = rules->file->trans.table[xindex & AA_X_INDEX_MASK];
+-		if (*stack != '&') {
+-			/* released by caller */
+-			new = x_table_lookup(profile, xindex, lookupname);
+-			stack = NULL;
++		/* released by caller
++		 * if null for both stack and direct want to try fallback
++		 */
++		new = x_table_lookup(profile, xindex, lookupname);
++		if (!new || **lookupname != '&')
+ 			break;
+-		}
++		stack = new;
++		new = NULL;
+ 		fallthrough;	/* to X_NAME */
+ 	case AA_X_NAME:
+ 		if (xindex & AA_X_CHILD)
+@@ -600,6 +605,7 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
+ 		break;
+ 	}
+ 
++	/* fallback transition check */
+ 	if (!new) {
+ 		if (xindex & AA_X_INHERIT) {
+ 			/* (p|c|n)ix - don't change profile but do
+@@ -618,12 +624,12 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
+ 		/* base the stack on post domain transition */
+ 		struct aa_label *base = new;
+ 
+-		new = aa_label_parse(base, stack, GFP_KERNEL, true, false);
+-		if (IS_ERR(new))
+-			new = NULL;
++		new = aa_label_merge(base, stack, GFP_KERNEL);
++		/* null on error */
+ 		aa_put_label(base);
+ 	}
+ 
++	aa_put_label(stack);
+ 	/* released by caller */
+ 	return new;
+ }
+diff --git a/security/apparmor/file.c b/security/apparmor/file.c
+index d52a5b14dad4c7..62bc46e037588a 100644
+--- a/security/apparmor/file.c
++++ b/security/apparmor/file.c
+@@ -423,9 +423,11 @@ int aa_path_link(const struct cred *subj_cred,
+ {
+ 	struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
+ 	struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
++	struct inode *inode = d_backing_inode(old_dentry);
++	vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(target.mnt), inode);
+ 	struct path_cond cond = {
+-		d_backing_inode(old_dentry)->i_uid,
+-		d_backing_inode(old_dentry)->i_mode
++		.uid = vfsuid_into_kuid(vfsuid),
++		.mode = inode->i_mode,
+ 	};
+ 	char *buffer = NULL, *buffer2 = NULL;
+ 	struct aa_profile *profile;
+diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
+index d7a894b1031ffd..1ec00113a056fc 100644
+--- a/security/apparmor/include/lib.h
++++ b/security/apparmor/include/lib.h
+@@ -48,7 +48,11 @@ extern struct aa_dfa *stacksplitdfa;
+ #define AA_BUG_FMT(X, fmt, args...)					\
+ 	WARN((X), "AppArmor WARN %s: (" #X "): " fmt, __func__, ##args)
+ #else
+-#define AA_BUG_FMT(X, fmt, args...) no_printk(fmt, ##args)
++#define AA_BUG_FMT(X, fmt, args...)					\
++	do {								\
++		BUILD_BUG_ON_INVALID(X);				\
++		no_printk(fmt, ##args);					\
++	} while (0)
+ #endif
+ 
+ #define AA_ERROR(fmt, args...)						\
+diff --git a/security/inode.c b/security/inode.c
+index da3ab44c8e571f..58cc60c50498d2 100644
+--- a/security/inode.c
++++ b/security/inode.c
+@@ -159,7 +159,6 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
+ 		inode->i_fop = fops;
+ 	}
+ 	d_instantiate(dentry, inode);
+-	dget(dentry);
+ 	inode_unlock(dir);
+ 	return dentry;
+ 
+@@ -306,7 +305,6 @@ void securityfs_remove(struct dentry *dentry)
+ 			simple_rmdir(dir, dentry);
+ 		else
+ 			simple_unlink(dir, dentry);
+-		dput(dentry);
+ 	}
+ 	inode_unlock(dir);
+ 	simple_release_fs(&mount, &mount_count);
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 0a1ba26872f846..9b91f68b3fff07 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -24,6 +24,7 @@
+ #include <sound/minors.h>
+ #include <linux/uio.h>
+ #include <linux/delay.h>
++#include <linux/bitops.h>
+ 
+ #include "pcm_local.h"
+ 
+@@ -3130,13 +3131,23 @@ struct snd_pcm_sync_ptr32 {
+ static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
+ {
+ 	snd_pcm_uframes_t boundary;
++	snd_pcm_uframes_t border;
++	int order;
+ 
+ 	if (! runtime->buffer_size)
+ 		return 0;
+-	boundary = runtime->buffer_size;
+-	while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
+-		boundary *= 2;
+-	return boundary;
++
++	border = 0x7fffffffUL - runtime->buffer_size;
++	if (runtime->buffer_size > border)
++		return runtime->buffer_size;
++
++	order = __fls(border) - __fls(runtime->buffer_size);
++	boundary = runtime->buffer_size << order;
++
++	if (boundary <= border)
++		return boundary;
++	else
++		return boundary / 2;
+ }
+ 
+ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 46a2204049993d..db2487cfd5da9d 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -639,24 +639,16 @@ static void hda_jackpoll_work(struct work_struct *work)
+ 	struct hda_codec *codec =
+ 		container_of(work, struct hda_codec, jackpoll_work.work);
+ 
+-	/* for non-polling trigger: we need nothing if already powered on */
+-	if (!codec->jackpoll_interval && snd_hdac_is_power_on(&codec->core))
++	if (!codec->jackpoll_interval)
+ 		return;
+ 
+ 	/* the power-up/down sequence triggers the runtime resume */
+-	snd_hda_power_up_pm(codec);
++	snd_hda_power_up(codec);
+ 	/* update jacks manually if polling is required, too */
+-	if (codec->jackpoll_interval) {
+-		snd_hda_jack_set_dirty_all(codec);
+-		snd_hda_jack_poll_all(codec);
+-	}
+-	snd_hda_power_down_pm(codec);
+-
+-	if (!codec->jackpoll_interval)
+-		return;
+-
+-	schedule_delayed_work(&codec->jackpoll_work,
+-			      codec->jackpoll_interval);
++	snd_hda_jack_set_dirty_all(codec);
++	snd_hda_jack_poll_all(codec);
++	schedule_delayed_work(&codec->jackpoll_work, codec->jackpoll_interval);
++	snd_hda_power_down(codec);
+ }
+ 
+ /* release all pincfg lists */
+@@ -2926,12 +2918,12 @@ static void hda_call_codec_resume(struct hda_codec *codec)
+ 		snd_hda_regmap_sync(codec);
+ 	}
+ 
+-	if (codec->jackpoll_interval)
+-		hda_jackpoll_work(&codec->jackpoll_work.work);
+-	else
+-		snd_hda_jack_report_sync(codec);
++	snd_hda_jack_report_sync(codec);
+ 	codec->core.dev.power.power_state = PMSG_ON;
+ 	snd_hdac_leave_pm(&codec->core);
++	if (codec->jackpoll_interval)
++		schedule_delayed_work(&codec->jackpoll_work,
++				      codec->jackpoll_interval);
+ }
+ 
+ static int hda_codec_runtime_suspend(struct device *dev)
+@@ -2943,8 +2935,6 @@ static int hda_codec_runtime_suspend(struct device *dev)
+ 	if (!codec->card)
+ 		return 0;
+ 
+-	cancel_delayed_work_sync(&codec->jackpoll_work);
+-
+ 	state = hda_call_codec_suspend(codec);
+ 	if (codec->link_down_at_suspend ||
+ 	    (codec_has_clkstop(codec) && codec_has_epss(codec) &&
+@@ -2952,10 +2942,6 @@ static int hda_codec_runtime_suspend(struct device *dev)
+ 		snd_hdac_codec_link_down(&codec->core);
+ 	snd_hda_codec_display_power(codec, false);
+ 
+-	if (codec->bus->jackpoll_in_suspend &&
+-		(dev->power.power_state.event != PM_EVENT_SUSPEND))
+-		schedule_delayed_work(&codec->jackpoll_work,
+-					codec->jackpoll_interval);
+ 	return 0;
+ }
+ 
+@@ -3052,6 +3038,7 @@ void snd_hda_codec_shutdown(struct hda_codec *codec)
+ 	if (!codec->core.registered)
+ 		return;
+ 
++	codec->jackpoll_interval = 0; /* don't poll any longer */
+ 	cancel_delayed_work_sync(&codec->jackpoll_work);
+ 	list_for_each_entry(cpcm, &codec->pcm_list_head, list)
+ 		snd_pcm_suspend_all(cpcm->pcm);
+@@ -3118,10 +3105,11 @@ int snd_hda_codec_build_controls(struct hda_codec *codec)
+ 	if (err < 0)
+ 		return err;
+ 
++	snd_hda_jack_report_sync(codec); /* call at the last init point */
+ 	if (codec->jackpoll_interval)
+-		hda_jackpoll_work(&codec->jackpoll_work.work);
+-	else
+-		snd_hda_jack_report_sync(codec); /* call at the last init point */
++		schedule_delayed_work(&codec->jackpoll_work,
++				      codec->jackpoll_interval);
++
+ 	sync_power_up_states(codec);
+ 	return 0;
+ }
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 77432e06f3e32c..a2f57d7424bb84 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4410,7 +4410,7 @@ static int add_tuning_control(struct hda_codec *codec,
+ 	}
+ 	knew.private_value =
+ 		HDA_COMPOSE_AMP_VAL(nid, 1, 0, type);
+-	sprintf(namestr, "%s %s Volume", name, dirstr[dir]);
++	snprintf(namestr, sizeof(namestr), "%s %s Volume", name, dirstr[dir]);
+ 	return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
+ }
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6ef635d37f456b..840cde49935d0c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11302,6 +11302,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1854, 0x0440, "LG CQ6", ALC256_FIXUP_HEADPHONE_AMP_VOL),
+ 	SND_PCI_QUIRK(0x1854, 0x0441, "LG CQ6 AIO", ALC256_FIXUP_HEADPHONE_AMP_VOL),
+ 	SND_PCI_QUIRK(0x1854, 0x0488, "LG gram 16 (16Z90R)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
++	SND_PCI_QUIRK(0x1854, 0x0489, "LG gram 16 (16Z90R-A)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
+ 	SND_PCI_QUIRK(0x1854, 0x048a, "LG gram 17 (17ZD90R)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
+ 	SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ 	SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -11331,6 +11332,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1ee7, 0x2078, "HONOR BRB-X M1010", ALC2XX_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -11347,6 +11349,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0xf111, 0x000b, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index e4bb99f71c2c9e..95f0bd2e15323c 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -2249,7 +2249,7 @@ static int snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
+ 			tmp |= chip->ac97_sdin[0] << ICH_DI1L_SHIFT;
+ 			for (i = 1; i < 4; i++) {
+ 				if (pcm->r[0].codec[i]) {
+-					tmp |= chip->ac97_sdin[pcm->r[0].codec[1]->num] << ICH_DI2L_SHIFT;
++					tmp |= chip->ac97_sdin[pcm->r[0].codec[i]->num] << ICH_DI2L_SHIFT;
+ 					break;
+ 				}
+ 			}
+diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
+index e1a7f0b0c0f331..33c7ba842eee93 100644
+--- a/sound/soc/codecs/hdac_hdmi.c
++++ b/sound/soc/codecs/hdac_hdmi.c
+@@ -1233,7 +1233,8 @@ static int hdac_hdmi_parse_eld(struct hdac_device *hdev,
+ 						>> DRM_ELD_VER_SHIFT;
+ 
+ 	if (ver != ELD_VER_CEA_861D && ver != ELD_VER_PARTIAL) {
+-		dev_err(&hdev->dev, "HDMI: Unknown ELD version %d\n", ver);
++		dev_err_ratelimited(&hdev->dev,
++				    "HDMI: Unknown ELD version %d\n", ver);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1241,7 +1242,8 @@ static int hdac_hdmi_parse_eld(struct hdac_device *hdev,
+ 		DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+ 
+ 	if (mnl > ELD_MAX_MNL) {
+-		dev_err(&hdev->dev, "HDMI: MNL Invalid %d\n", mnl);
++		dev_err_ratelimited(&hdev->dev,
++				    "HDMI: MNL Invalid %d\n", mnl);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1300,8 +1302,8 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
+ 
+ 	if (!port->eld.monitor_present || !port->eld.eld_valid) {
+ 
+-		dev_err(&hdev->dev, "%s: disconnect for pin:port %d:%d\n",
+-						__func__, pin->nid, port->id);
++		dev_dbg(&hdev->dev, "%s: disconnect for pin:port %d:%d\n",
++			__func__, pin->nid, port->id);
+ 
+ 		/*
+ 		 * PCMs are not registered during device probe, so don't
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index 855139348edb4c..c366ae2275e860 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -3013,6 +3013,11 @@ static int rt5640_i2c_probe(struct i2c_client *i2c)
+ 	}
+ 
+ 	regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val);
++	if (val != RT5640_DEVICE_ID) {
++		usleep_range(60000, 100000);
++		regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val);
++	}
++
+ 	if (val != RT5640_DEVICE_ID) {
+ 		dev_err(&i2c->dev,
+ 			"Device with ID register %#x is not rt5640/39\n", val);
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 25d4b27f5b7662..57614c0b711ea9 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -777,9 +777,9 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
+ 	 * are running concurrently.
+ 	 */
+ 	/* Software Reset */
+-	regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR);
++	regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR);
+ 	/* Clear SR bit to finish the reset */
+-	regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0);
++	regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, 0);
+ }
+ 
+ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+@@ -898,11 +898,11 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
+ 	unsigned int ofs = sai->soc_data->reg_offset;
+ 
+ 	/* Software Reset for both Tx and Rx */
+-	regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR);
+-	regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR);
++	regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR);
++	regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR);
+ 	/* Clear SR bit to finish the reset */
+-	regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0);
+-	regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0);
++	regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0);
++	regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0);
+ 
+ 	regmap_update_bits(sai->regmap, FSL_SAI_TCR1(ofs),
+ 			   FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth),
+@@ -1785,11 +1785,11 @@ static int fsl_sai_runtime_resume(struct device *dev)
+ 
+ 	regcache_cache_only(sai->regmap, false);
+ 	regcache_mark_dirty(sai->regmap);
+-	regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR);
+-	regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR);
++	regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR);
++	regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR);
+ 	usleep_range(1000, 2000);
+-	regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0);
+-	regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0);
++	regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0);
++	regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0);
+ 
+ 	ret = regcache_sync(sai->regmap);
+ 	if (ret)
+diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
+index 82839d0994ee3e..80b526554bbf48 100644
+--- a/sound/soc/intel/avs/core.c
++++ b/sound/soc/intel/avs/core.c
+@@ -439,6 +439,8 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ 	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
+ 	if (!adev)
+ 		return -ENOMEM;
++	bus = &adev->base.core;
++
+ 	ret = avs_bus_init(adev, pci, id);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to init avs bus: %d\n", ret);
+@@ -449,7 +451,6 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	bus = &adev->base.core;
+ 	bus->addr = pci_resource_start(pci, 0);
+ 	bus->remap_addr = pci_ioremap_bar(pci, 0);
+ 	if (!bus->remap_addr) {
+diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
+index addd2c4bdd3e8f..b6a33b1f4f7e36 100644
+--- a/sound/soc/qcom/lpass-platform.c
++++ b/sound/soc/qcom/lpass-platform.c
+@@ -202,7 +202,6 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
+ 	struct regmap *map;
+ 	unsigned int dai_id = cpu_dai->driver->id;
+ 
+-	component->id = dai_id;
+ 	data = kzalloc(sizeof(*data), GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+@@ -1190,13 +1189,14 @@ static int lpass_platform_pcmops_suspend(struct snd_soc_component *component)
+ {
+ 	struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ 	struct regmap *map;
+-	unsigned int dai_id = component->id;
+ 
+-	if (dai_id == LPASS_DP_RX)
++	if (drvdata->hdmi_port_enable) {
+ 		map = drvdata->hdmiif_map;
+-	else
+-		map = drvdata->lpaif_map;
++		regcache_cache_only(map, true);
++		regcache_mark_dirty(map);
++	}
+ 
++	map = drvdata->lpaif_map;
+ 	regcache_cache_only(map, true);
+ 	regcache_mark_dirty(map);
+ 
+@@ -1207,14 +1207,19 @@ static int lpass_platform_pcmops_resume(struct snd_soc_component *component)
+ {
+ 	struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ 	struct regmap *map;
+-	unsigned int dai_id = component->id;
++	int ret;
+ 
+-	if (dai_id == LPASS_DP_RX)
++	if (drvdata->hdmi_port_enable) {
+ 		map = drvdata->hdmiif_map;
+-	else
+-		map = drvdata->lpaif_map;
++		regcache_cache_only(map, false);
++		ret = regcache_sync(map);
++		if (ret)
++			return ret;
++	}
+ 
++	map = drvdata->lpaif_map;
+ 	regcache_cache_only(map, false);
++
+ 	return regcache_sync(map);
+ }
+ 
+@@ -1224,7 +1229,9 @@ static int lpass_platform_copy(struct snd_soc_component *component,
+ 			       unsigned long bytes)
+ {
+ 	struct snd_pcm_runtime *rt = substream->runtime;
+-	unsigned int dai_id = component->id;
++	struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++	struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
++	unsigned int dai_id = cpu_dai->driver->id;
+ 	int ret = 0;
+ 
+ 	void __iomem *dma_buf = (void __iomem *) (rt->dma_area + pos +
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index e3c8d4f20b9c13..4ac870c2dafa2c 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1138,6 +1138,9 @@ static int snd_soc_compensate_channel_connection_map(struct snd_soc_card *card,
+ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
+ 				struct snd_soc_pcm_runtime *rtd)
+ {
++	if (!rtd)
++		return;
++
+ 	lockdep_assert_held(&client_mutex);
+ 
+ 	/*
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 99521c784a9b16..196799b2fe24db 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -740,6 +740,10 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm,
+ out:
+ 	trace_snd_soc_bias_level_done(dapm, level);
+ 
++	/* success */
++	if (ret == 0)
++		snd_soc_dapm_init_bias_level(dapm, level);
++
+ 	return ret;
+ }
+ 
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index f9708b8fd73b6a..0104257df930ea 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -2364,14 +2364,25 @@ static int sof_dspless_widget_ready(struct snd_soc_component *scomp, int index,
+ 				    struct snd_soc_dapm_widget *w,
+ 				    struct snd_soc_tplg_dapm_widget *tw)
+ {
++	struct snd_soc_tplg_private *priv = &tw->priv;
++	int ret;
++
++	/* for snd_soc_dapm_widget.no_wname_in_kcontrol_name */
++	ret = sof_parse_tokens(scomp, w, dapm_widget_tokens,
++			       ARRAY_SIZE(dapm_widget_tokens),
++			       priv->array, le32_to_cpu(priv->size));
++	if (ret < 0) {
++		dev_err(scomp->dev, "failed to parse dapm widget tokens for %s\n",
++			w->name);
++		return ret;
++	}
++
+ 	if (WIDGET_IS_DAI(w->id)) {
+ 		static const struct sof_topology_token dai_tokens[] = {
+ 			{SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type, 0}};
+ 		struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+-		struct snd_soc_tplg_private *priv = &tw->priv;
+ 		struct snd_sof_widget *swidget;
+ 		struct snd_sof_dai *sdai;
+-		int ret;
+ 
+ 		swidget = kzalloc(sizeof(*swidget), GFP_KERNEL);
+ 		if (!swidget)
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 1e7192cb4693c0..ef30d4aaf81a4f 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -2152,15 +2152,15 @@ static int dell_dock_mixer_init(struct usb_mixer_interface *mixer)
+ #define SND_RME_CLK_FREQMUL_SHIFT		18
+ #define SND_RME_CLK_FREQMUL_MASK		0x7
+ #define SND_RME_CLK_SYSTEM(x) \
+-	((x >> SND_RME_CLK_SYSTEM_SHIFT) & SND_RME_CLK_SYSTEM_MASK)
++	(((x) >> SND_RME_CLK_SYSTEM_SHIFT) & SND_RME_CLK_SYSTEM_MASK)
+ #define SND_RME_CLK_AES(x) \
+-	((x >> SND_RME_CLK_AES_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK)
++	(((x) >> SND_RME_CLK_AES_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK)
+ #define SND_RME_CLK_SPDIF(x) \
+-	((x >> SND_RME_CLK_SPDIF_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK)
++	(((x) >> SND_RME_CLK_SPDIF_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK)
+ #define SND_RME_CLK_SYNC(x) \
+-	((x >> SND_RME_CLK_SYNC_SHIFT) & SND_RME_CLK_SYNC_MASK)
++	(((x) >> SND_RME_CLK_SYNC_SHIFT) & SND_RME_CLK_SYNC_MASK)
+ #define SND_RME_CLK_FREQMUL(x) \
+-	((x >> SND_RME_CLK_FREQMUL_SHIFT) & SND_RME_CLK_FREQMUL_MASK)
++	(((x) >> SND_RME_CLK_FREQMUL_SHIFT) & SND_RME_CLK_FREQMUL_MASK)
+ #define SND_RME_CLK_AES_LOCK			0x1
+ #define SND_RME_CLK_AES_SYNC			0x4
+ #define SND_RME_CLK_SPDIF_LOCK			0x2
+@@ -2169,9 +2169,9 @@ static int dell_dock_mixer_init(struct usb_mixer_interface *mixer)
+ #define SND_RME_SPDIF_FORMAT_SHIFT		5
+ #define SND_RME_BINARY_MASK			0x1
+ #define SND_RME_SPDIF_IF(x) \
+-	((x >> SND_RME_SPDIF_IF_SHIFT) & SND_RME_BINARY_MASK)
++	(((x) >> SND_RME_SPDIF_IF_SHIFT) & SND_RME_BINARY_MASK)
+ #define SND_RME_SPDIF_FORMAT(x) \
+-	((x >> SND_RME_SPDIF_FORMAT_SHIFT) & SND_RME_BINARY_MASK)
++	(((x) >> SND_RME_SPDIF_FORMAT_SHIFT) & SND_RME_BINARY_MASK)
+ 
+ static const u32 snd_rme_rate_table[] = {
+ 	32000, 44100, 48000, 50000,
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index aa91d63749f2ca..1cb52373e70f64 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -341,20 +341,28 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ 
+ 	len = le16_to_cpu(cluster->wLength);
+ 	c = 0;
+-	p += sizeof(struct uac3_cluster_header_descriptor);
++	p += sizeof(*cluster);
++	len -= sizeof(*cluster);
+ 
+-	while (((p - (void *)cluster) < len) && (c < channels)) {
++	while (len > 0 && (c < channels)) {
+ 		struct uac3_cluster_segment_descriptor *cs_desc = p;
+ 		u16 cs_len;
+ 		u8 cs_type;
+ 
++		if (len < sizeof(*p))
++			break;
+ 		cs_len = le16_to_cpu(cs_desc->wLength);
++		if (len < cs_len)
++			break;
+ 		cs_type = cs_desc->bSegmentType;
+ 
+ 		if (cs_type == UAC3_CHANNEL_INFORMATION) {
+ 			struct uac3_cluster_information_segment_descriptor *is = p;
+ 			unsigned char map;
+ 
++			if (cs_len < sizeof(*is))
++				break;
++
+ 			/*
+ 			 * TODO: this conversion is not complete, update it
+ 			 * after adding UAC3 values to asound.h
+@@ -456,6 +464,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ 			chmap->map[c++] = map;
+ 		}
+ 		p += cs_len;
++		len -= cs_len;
+ 	}
+ 
+ 	if (channels < c)
+@@ -880,7 +889,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
+ 	u64 badd_formats = 0;
+ 	unsigned int num_channels;
+ 	struct audioformat *fp;
+-	u16 cluster_id, wLength;
++	u16 cluster_id, wLength, cluster_wLength;
+ 	int clock = 0;
+ 	int err;
+ 
+@@ -1010,6 +1019,16 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
+ 		return ERR_PTR(-EIO);
+ 	}
+ 
++	cluster_wLength = le16_to_cpu(cluster->wLength);
++	if (cluster_wLength < sizeof(*cluster) ||
++	    cluster_wLength > wLength) {
++		dev_err(&dev->dev,
++			"%u:%d : invalid Cluster Descriptor size\n",
++			iface_no, altno);
++		kfree(cluster);
++		return ERR_PTR(-EIO);
++	}
++
+ 	num_channels = cluster->bNrChannels;
+ 	chmap = convert_chmap_v3(cluster);
+ 	kfree(cluster);
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 6fe206f6e91105..4f4e8e87a14cd0 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -221,6 +221,17 @@ static bool validate_uac3_feature_unit(const void *p,
+ 	return d->bLength >= sizeof(*d) + 4 + 2;
+ }
+ 
++static bool validate_uac3_power_domain_unit(const void *p,
++					    const struct usb_desc_validator *v)
++{
++	const struct uac3_power_domain_descriptor *d = p;
++
++	if (d->bLength < sizeof(*d))
++		return false;
++	/* baEntities[] + wPDomainDescrStr */
++	return d->bLength >= sizeof(*d) + d->bNrEntities + 2;
++}
++
+ static bool validate_midi_out_jack(const void *p,
+ 				   const struct usb_desc_validator *v)
+ {
+@@ -285,6 +296,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ 	      struct uac3_clock_multiplier_descriptor),
+ 	/* UAC_VERSION_3, UAC3_SAMPLE_RATE_CONVERTER: not implemented yet */
+ 	/* UAC_VERSION_3, UAC3_CONNECTORS: not implemented yet */
++	FUNC(UAC_VERSION_3, UAC3_POWER_DOMAIN, validate_uac3_power_domain_unit),
+ 	{ } /* terminator */
+ };
+ 
+diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
+index 08d0ac543c6746..a0536528dfde26 100644
+--- a/tools/bpf/bpftool/main.c
++++ b/tools/bpf/bpftool/main.c
+@@ -534,9 +534,9 @@ int main(int argc, char **argv)
+ 		usage();
+ 
+ 	if (version_requested)
+-		return do_version(argc, argv);
+-
+-	ret = cmd_select(commands, argc, argv, do_help);
++		ret = do_version(argc, argv);
++	else
++		ret = cmd_select(commands, argc, argv, do_help);
+ 
+ 	if (json_output)
+ 		jsonw_destroy(&json_wtr);
+diff --git a/tools/hv/hv_fcopy_uio_daemon.c b/tools/hv/hv_fcopy_uio_daemon.c
+index 9caa24caa0801a..e68a824d67b2b9 100644
+--- a/tools/hv/hv_fcopy_uio_daemon.c
++++ b/tools/hv/hv_fcopy_uio_daemon.c
+@@ -35,7 +35,10 @@
+ #define WIN8_SRV_MINOR		1
+ #define WIN8_SRV_VERSION	(WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+ 
+-#define FCOPY_UIO		"/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/uio"
++#define FCOPY_DEVICE_PATH(subdir) \
++	"/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/" #subdir
++#define FCOPY_UIO_PATH          FCOPY_DEVICE_PATH(uio)
++#define FCOPY_CHANNELS_PATH     FCOPY_DEVICE_PATH(channels)
+ 
+ #define FCOPY_VER_COUNT		1
+ static const int fcopy_versions[] = {
+@@ -47,9 +50,62 @@ static const int fw_versions[] = {
+ 	UTIL_FW_VERSION
+ };
+ 
+-#define HV_RING_SIZE		0x4000 /* 16KB ring buffer size */
++static uint32_t get_ring_buffer_size(void)
++{
++	char ring_path[PATH_MAX];
++	DIR *dir;
++	struct dirent *entry;
++	struct stat st;
++	uint32_t ring_size = 0;
++	int retry_count = 0;
++
++	/* Find the channel directory */
++	dir = opendir(FCOPY_CHANNELS_PATH);
++	if (!dir) {
++		usleep(100 * 1000); /* Avoid race with kernel, wait 100ms and retry once */
++		dir = opendir(FCOPY_CHANNELS_PATH);
++		if (!dir) {
++			syslog(LOG_ERR, "Failed to open channels directory: %s", strerror(errno));
++			return 0;
++		}
++	}
++
++retry_once:
++	while ((entry = readdir(dir)) != NULL) {
++		if (entry->d_type == DT_DIR && strcmp(entry->d_name, ".") != 0 &&
++		    strcmp(entry->d_name, "..") != 0) {
++			snprintf(ring_path, sizeof(ring_path), "%s/%s/ring",
++				 FCOPY_CHANNELS_PATH, entry->d_name);
++
++			if (stat(ring_path, &st) == 0) {
++				/*
++				 * stat returns size of Tx, Rx rings combined,
++				 * so take half of it for individual ring size.
++				 */
++				ring_size = (uint32_t)st.st_size / 2;
++				syslog(LOG_INFO, "Ring buffer size from %s: %u bytes",
++				       ring_path, ring_size);
++				break;
++			}
++		}
++	}
+ 
+-static unsigned char desc[HV_RING_SIZE];
++	if (!ring_size && retry_count == 0) {
++		retry_count = 1;
++		rewinddir(dir);
++		usleep(100 * 1000); /* Wait 100ms and retry once */
++		goto retry_once;
++	}
++
++	closedir(dir);
++
++	if (!ring_size)
++		syslog(LOG_ERR, "Could not determine ring size");
++
++	return ring_size;
++}
++
++static unsigned char *desc;
+ 
+ static int target_fd;
+ static char target_fname[PATH_MAX];
+@@ -397,7 +453,7 @@ int main(int argc, char *argv[])
+ 	int daemonize = 1, long_index = 0, opt, ret = -EINVAL;
+ 	struct vmbus_br txbr, rxbr;
+ 	void *ring;
+-	uint32_t len = HV_RING_SIZE;
++	uint32_t ring_size, len;
+ 	char uio_name[NAME_MAX] = {0};
+ 	char uio_dev_path[PATH_MAX] = {0};
+ 
+@@ -428,7 +484,20 @@ int main(int argc, char *argv[])
+ 	openlog("HV_UIO_FCOPY", 0, LOG_USER);
+ 	syslog(LOG_INFO, "starting; pid is:%d", getpid());
+ 
+-	fcopy_get_first_folder(FCOPY_UIO, uio_name);
++	ring_size = get_ring_buffer_size();
++	if (!ring_size) {
++		ret = -ENODEV;
++		goto exit;
++	}
++
++	desc = malloc(ring_size * sizeof(unsigned char));
++	if (!desc) {
++		syslog(LOG_ERR, "malloc failed for desc buffer");
++		ret = -ENOMEM;
++		goto exit;
++	}
++
++	fcopy_get_first_folder(FCOPY_UIO_PATH, uio_name);
+ 	snprintf(uio_dev_path, sizeof(uio_dev_path), "/dev/%s", uio_name);
+ 	fcopy_fd = open(uio_dev_path, O_RDWR);
+ 
+@@ -436,17 +505,17 @@ int main(int argc, char *argv[])
+ 		syslog(LOG_ERR, "open %s failed; error: %d %s",
+ 		       uio_dev_path, errno, strerror(errno));
+ 		ret = fcopy_fd;
+-		goto exit;
++		goto free_desc;
+ 	}
+ 
+-	ring = vmbus_uio_map(&fcopy_fd, HV_RING_SIZE);
++	ring = vmbus_uio_map(&fcopy_fd, ring_size);
+ 	if (!ring) {
+ 		ret = errno;
+ 		syslog(LOG_ERR, "mmap ringbuffer failed; error: %d %s", ret, strerror(ret));
+ 		goto close;
+ 	}
+-	vmbus_br_setup(&txbr, ring, HV_RING_SIZE);
+-	vmbus_br_setup(&rxbr, (char *)ring + HV_RING_SIZE, HV_RING_SIZE);
++	vmbus_br_setup(&txbr, ring, ring_size);
++	vmbus_br_setup(&rxbr, (char *)ring + ring_size, ring_size);
+ 
+ 	rxbr.vbr->imask = 0;
+ 
+@@ -461,7 +530,7 @@ int main(int argc, char *argv[])
+ 			continue;
+ 		}
+ 
+-		len = HV_RING_SIZE;
++		len = ring_size;
+ 		ret = rte_vmbus_chan_recv_raw(&rxbr, desc, &len);
+ 		if (unlikely(ret <= 0)) {
+ 			/* This indicates a failure to communicate (or worse) */
+@@ -481,6 +550,8 @@ int main(int argc, char *argv[])
+ 	}
+ close:
+ 	close(fcopy_fd);
++free_desc:
++	free(desc);
+ exit:
+ 	return ret;
+ }
+diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h
+index 933bc0be7e1c6b..a9d8b5b51f37f8 100644
+--- a/tools/include/nolibc/std.h
++++ b/tools/include/nolibc/std.h
+@@ -20,6 +20,8 @@
+ 
+ #include "stdint.h"
+ 
++#include <linux/types.h>
++
+ /* those are commonly provided by sys/types.h */
+ typedef unsigned int          dev_t;
+ typedef unsigned long         ino_t;
+@@ -31,6 +33,6 @@ typedef unsigned long       nlink_t;
+ typedef   signed long         off_t;
+ typedef   signed long     blksize_t;
+ typedef   signed long      blkcnt_t;
+-typedef   signed long        time_t;
++typedef __kernel_old_time_t  time_t;
+ 
+ #endif /* _NOLIBC_STD_H */
+diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
+index b26a5d0c417c7c..9d606c7138a86f 100644
+--- a/tools/include/nolibc/types.h
++++ b/tools/include/nolibc/types.h
+@@ -127,7 +127,7 @@ typedef struct {
+ 		int __fd = (fd);					\
+ 		if (__fd >= 0)						\
+ 			__set->fds[__fd / FD_SETIDXMASK] &=		\
+-				~(1U << (__fd & FX_SETBITMASK));	\
++				~(1U << (__fd & FD_SETBITMASK));	\
+ 	} while (0)
+ 
+ #define FD_SET(fd, set) do {						\
+@@ -144,7 +144,7 @@ typedef struct {
+ 		int __r = 0;						\
+ 		if (__fd >= 0)						\
+ 			__r = !!(__set->fds[__fd / FD_SETIDXMASK] &	\
+-1U << (__fd & FD_SET_BITMASK));						\
++1U << (__fd & FD_SETBITMASK));						\
+ 		__r;							\
+ 	})
+ 
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 747cef47e685bc..e33cf3caf8b645 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -4546,6 +4546,11 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
+ 
+ 	/* arena data relocation */
+ 	if (shdr_idx == obj->efile.arena_data_shndx) {
++		if (obj->arena_map_idx < 0) {
++			pr_warn("prog '%s': bad arena data relocation at insn %u, no arena maps defined\n",
++				prog->name, insn_idx);
++			return -LIBBPF_ERRNO__RELOC;
++		}
+ 		reloc_desc->type = RELO_DATA;
+ 		reloc_desc->insn_idx = insn_idx;
+ 		reloc_desc->map_idx = obj->arena_map_idx;
+diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+index 08a399b0be286c..6ab9139f16af90 100644
+--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
++++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+@@ -240,9 +240,9 @@ static int mperf_stop(void)
+ 	int cpu;
+ 
+ 	for (cpu = 0; cpu < cpu_count; cpu++) {
+-		mperf_measure_stats(cpu);
+-		mperf_get_tsc(&tsc_at_measure_end[cpu]);
+ 		clock_gettime(CLOCK_REALTIME, &time_end[cpu]);
++		mperf_get_tsc(&tsc_at_measure_end[cpu]);
++		mperf_measure_stats(cpu);
+ 	}
+ 
+ 	return 0;
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 4c322586730d44..b663a76d31f1fa 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -67,6 +67,7 @@
+ #include <stdbool.h>
+ #include <assert.h>
+ #include <linux/kernel.h>
++#include <limits.h>
+ 
+ #define UNUSED(x) (void)(x)
+ 
+@@ -6245,8 +6246,16 @@ int check_for_cap_sys_rawio(void)
+ 	int ret = 0;
+ 
+ 	caps = cap_get_proc();
+-	if (caps == NULL)
++	if (caps == NULL) {
++		/*
++		 * CONFIG_MULTIUSER=n kernels have no cap_get_proc()
++		 * Allow them to continue and attempt to access MSRs
++		 */
++		if (errno == ENOSYS)
++			return 0;
++
+ 		return 1;
++	}
+ 
+ 	if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value)) {
+ 		ret = 1;
+@@ -6409,7 +6418,8 @@ static void probe_intel_uncore_frequency_legacy(void)
+ 			sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d", i,
+ 				j);
+ 
+-			if (access(path_base, R_OK))
++			sprintf(path, "%s/current_freq_khz", path_base);
++			if (access(path, R_OK))
+ 				continue;
+ 
+ 			BIC_PRESENT(BIC_UNCORE_MHZ);
+diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
+index 0aa4005017c72f..5f2afd95de4303 100644
+--- a/tools/scripts/Makefile.include
++++ b/tools/scripts/Makefile.include
+@@ -98,7 +98,9 @@ else ifneq ($(CROSS_COMPILE),)
+ # Allow userspace to override CLANG_CROSS_FLAGS to specify their own
+ # sysroots and flags or to avoid the GCC call in pure Clang builds.
+ ifeq ($(CLANG_CROSS_FLAGS),)
+-CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
++CLANG_TARGET := $(notdir $(CROSS_COMPILE:%-=%))
++CLANG_TARGET := $(subst s390-linux,s390x-linux,$(CLANG_TARGET))
++CLANG_CROSS_FLAGS := --target=$(CLANG_TARGET)
+ GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null))
+ ifneq ($(GCC_TOOLCHAIN_DIR),)
+ CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index 7e524601e01ada..bad227ee1b5b91 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -1371,7 +1371,10 @@ sub __eval_option {
+ 	# If a variable contains itself, use the default var
+ 	if (($var eq $name) && defined($opt{$var})) {
+ 	    $o = $opt{$var};
+-	    $retval = "$retval$o";
++	    # Only append if the default doesn't contain itself
++	    if ($o !~ m/\$\{$var\}/) {
++		$retval = "$retval$o";
++	    }
+ 	} elsif (defined($opt{$o})) {
+ 	    $o = $opt{$o};
+ 	    $retval = "$retval$o";
+diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c
+index c6228176dd1a0c..408fb1c5c2f856 100644
+--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c
++++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c
+@@ -168,7 +168,7 @@ static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
+ 	memset(&sve, 0, sizeof(sve));
+ 	sve.size = sizeof(sve);
+ 	sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
+-	sve.flags = SVE_PT_VL_INHERIT;
++	sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE;
+ 	ret = set_sve(child, type, &sve);
+ 	if (ret != 0) {
+ 		ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
+@@ -233,6 +233,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
+ 	/* Set the VL by doing a set with no register payload */
+ 	memset(&sve, 0, sizeof(sve));
+ 	sve.size = sizeof(sve);
++	sve.flags = SVE_PT_REGS_SVE;
+ 	sve.vl = vl;
+ 	ret = set_sve(child, type, &sve);
+ 	if (ret != 0) {
+diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+index da430df45aa497..d1e4cb28a72c6b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
++++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+@@ -97,7 +97,7 @@ static void ringbuf_write_subtest(void)
+ 	if (!ASSERT_OK_PTR(skel, "skel_open"))
+ 		return;
+ 
+-	skel->maps.ringbuf.max_entries = 0x4000;
++	skel->maps.ringbuf.max_entries = 0x40000;
+ 
+ 	err = test_ringbuf_write_lskel__load(skel);
+ 	if (!ASSERT_OK(err, "skel_load"))
+@@ -108,7 +108,7 @@ static void ringbuf_write_subtest(void)
+ 	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
+ 	if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
+ 		goto cleanup;
+-	*mmap_ptr = 0x3000;
++	*mmap_ptr = 0x30000;
+ 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
+ 
+ 	skel->bss->pid = getpid();
+diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
+index d424e7ecbd12d0..9fd3ae98732102 100644
+--- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
++++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
+@@ -21,8 +21,7 @@
+ #include "../progs/test_user_ringbuf.h"
+ 
+ static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
+-static const long c_ringbuf_size = 1 << 12; /* 1 small page */
+-static const long c_max_entries = c_ringbuf_size / c_sample_size;
++static long c_ringbuf_size, c_max_entries;
+ 
+ static void drain_current_samples(void)
+ {
+@@ -424,7 +423,9 @@ static void test_user_ringbuf_loop(void)
+ 	uint32_t remaining_samples = total_samples;
+ 	int err;
+ 
+-	BUILD_BUG_ON(total_samples <= c_max_entries);
++	if (!ASSERT_LT(c_max_entries, total_samples, "compare_c_max_entries"))
++		return;
++
+ 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ 	if (err)
+ 		return;
+@@ -686,6 +687,9 @@ void test_user_ringbuf(void)
+ {
+ 	int i;
+ 
++	c_ringbuf_size = getpagesize(); /* 1 page */
++	c_max_entries = c_ringbuf_size / c_sample_size;
++
+ 	for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+ 		if (!test__start_subtest(success_tests[i].test_name))
+ 			continue;
+diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
+index 350513c0e4c985..f063a0013f8506 100644
+--- a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
++++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
+@@ -26,11 +26,11 @@ int test_ringbuf_write(void *ctx)
+ 	if (cur_pid != pid)
+ 		return 0;
+ 
+-	sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
++	sample1 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
+ 	if (!sample1)
+ 		return 0;
+ 	/* first one can pass */
+-	sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
++	sample2 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
+ 	if (!sample2) {
+ 		bpf_ringbuf_discard(sample1, 0);
+ 		__sync_fetch_and_add(&discarded, 1);
+diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
+index 7ea535bfbacd3e..e4ef82a6ee38c8 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c
++++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
+@@ -619,7 +619,7 @@ __naked void pass_pointer_to_tail_call(void)
+ 
+ SEC("socket")
+ __description("unpriv: cmp map pointer with zero")
+-__success __failure_unpriv __msg_unpriv("R1 pointer comparison")
++__success __success_unpriv
+ __retval(0)
+ __naked void cmp_map_pointer_with_zero(void)
+ {
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+index 4b994b6df5ac30..ed81eaf2afd6d9 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+@@ -29,7 +29,7 @@ ftrace_filter_check 'schedule*' '^schedule.*$'
+ ftrace_filter_check '*pin*lock' '.*pin.*lock$'
+ 
+ # filter by start*mid*
+-ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
++ftrace_filter_check 'mutex*unl*' '^mutex.*unl.*'
+ 
+ # Advanced full-glob matching feature is recently supported.
+ # Skip the tests if we are sure the kernel does not support it.
+diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h
+index ddbcfc9b7bac4a..7a5fd1d5355e7e 100644
+--- a/tools/testing/selftests/futex/include/futextest.h
++++ b/tools/testing/selftests/futex/include/futextest.h
+@@ -47,6 +47,17 @@ typedef volatile u_int32_t futex_t;
+ 					 FUTEX_PRIVATE_FLAG)
+ #endif
+ 
++/*
++ * SYS_futex is expected from system C library, in glibc some 32-bit
++ * architectures (e.g. RV32) are using 64-bit time_t, therefore it doesn't have
++ * SYS_futex defined but just SYS_futex_time64. Define SYS_futex as
++ * SYS_futex_time64 in this situation to ensure the compilation and the
++ * compatibility.
++ */
++#if !defined(SYS_futex) && defined(SYS_futex_time64)
++#define SYS_futex SYS_futex_time64
++#endif
++
+ /**
+  * futex() - SYS_futex syscall wrapper
+  * @uaddr:	address of first futex
+diff --git a/tools/testing/selftests/net/netfilter/config b/tools/testing/selftests/net/netfilter/config
+index 43d8b500d391a2..8cc6036f97dc48 100644
+--- a/tools/testing/selftests/net/netfilter/config
++++ b/tools/testing/selftests/net/netfilter/config
+@@ -91,4 +91,4 @@ CONFIG_XFRM_STATISTICS=y
+ CONFIG_NET_PKTGEN=m
+ CONFIG_TUN=m
+ CONFIG_INET_DIAG=m
+-CONFIG_SCTP_DIAG=m
++CONFIG_INET_SCTP_DIAG=m
+diff --git a/tools/testing/selftests/vDSO/vdso_test_getrandom.c b/tools/testing/selftests/vDSO/vdso_test_getrandom.c
+index 95057f7567db22..ff8d5675da2b0e 100644
+--- a/tools/testing/selftests/vDSO/vdso_test_getrandom.c
++++ b/tools/testing/selftests/vDSO/vdso_test_getrandom.c
+@@ -242,6 +242,7 @@ static void kselftest(void)
+ 	pid_t child;
+ 
+ 	ksft_print_header();
++	vgetrandom_init();
+ 	ksft_set_plan(2);
+ 
+ 	for (size_t i = 0; i < 1000; ++i) {
+@@ -295,8 +296,6 @@ static void usage(const char *argv0)
+ 
+ int main(int argc, char *argv[])
+ {
+-	vgetrandom_init();
+-
+ 	if (argc == 1) {
+ 		kselftest();
+ 		return 0;
+@@ -306,6 +305,9 @@ int main(int argc, char *argv[])
+ 		usage(argv[0]);
+ 		return 1;
+ 	}
++
++	vgetrandom_init();
++
+ 	if (!strcmp(argv[1], "bench-single"))
+ 		bench_single();
+ 	else if (!strcmp(argv[1], "bench-multi"))


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-08-16  3:10 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-08-16  3:10 UTC (permalink / raw
  To: gentoo-commits

commit:     e7ccf52053e28db53394e92962bea65c1dec5428
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Aug 16 03:10:17 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Aug 16 03:10:17 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e7ccf520

Linux patch 6.12.42

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |     4 +
 1041_linux-6.12.42.patch | 13841 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 13845 insertions(+)

diff --git a/0000_README b/0000_README
index 96e25596..31e38399 100644
--- a/0000_README
+++ b/0000_README
@@ -207,6 +207,10 @@ Patch:  1040_linux-6.12.41.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.41
 
+Patch:  1041_linux-6.12.42.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.42
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1041_linux-6.12.42.patch b/1041_linux-6.12.42.patch
new file mode 100644
index 00000000..d06025dc
--- /dev/null
+++ b/1041_linux-6.12.42.patch
@@ -0,0 +1,13841 @@
+diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
+index 68a0885fb5e69e..fdf31514fb1c89 100644
+--- a/Documentation/filesystems/f2fs.rst
++++ b/Documentation/filesystems/f2fs.rst
+@@ -235,9 +235,9 @@ usrjquota=<file>	 Appoint specified file and type during mount, so that quota
+ grpjquota=<file>	 information can be properly updated during recovery flow,
+ prjjquota=<file>	 <quota file>: must be in root directory;
+ jqfmt=<quota type>	 <quota type>: [vfsold,vfsv0,vfsv1].
+-offusrjquota		 Turn off user journalled quota.
+-offgrpjquota		 Turn off group journalled quota.
+-offprjjquota		 Turn off project journalled quota.
++usrjquota=		 Turn off user journalled quota.
++grpjquota=		 Turn off group journalled quota.
++prjjquota=		 Turn off project journalled quota.
+ quota			 Enable plain user disk quota accounting.
+ noquota			 Disable all plain disk quota option.
+ alloc_mode=%s		 Adjust block allocation policy, which supports "reuse"
+diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
+index f6c5d8214c7e98..4936aa5855b133 100644
+--- a/Documentation/netlink/specs/ethtool.yaml
++++ b/Documentation/netlink/specs/ethtool.yaml
+@@ -1682,9 +1682,6 @@ operations:
+ 
+       do: &module-eeprom-get-op
+         request:
+-          attributes:
+-            - header
+-        reply:
+           attributes:
+             - header
+             - offset
+@@ -1692,6 +1689,9 @@ operations:
+             - page
+             - bank
+             - i2c-address
++        reply:
++          attributes:
++            - header
+             - data
+       dump: *module-eeprom-get-op
+     -
+diff --git a/Makefile b/Makefile
+index fbaebf00a33b70..265dba73ce3373 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 41
++SUBLEVEL = 42
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
+index 29d2f86d5e34a7..f4c45e964daf8f 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
+@@ -168,7 +168,6 @@ &uart2 {
+ 	pinctrl-0 = <&pinctrl_uart2>;
+ 	linux,rs485-enabled-at-boot-time;
+ 	rs485-rx-during-tx;
+-	rs485-rts-active-low;
+ 	uart-has-rtscts;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi b/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
+index acccf9a3c898e0..27422a343f148c 100644
+--- a/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
++++ b/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
+@@ -604,7 +604,7 @@ usbmisc1: usb@400b4800 {
+ 
+ 			ftm: ftm@400b8000 {
+ 				compatible = "fsl,ftm-timer";
+-				reg = <0x400b8000 0x1000 0x400b9000 0x1000>;
++				reg = <0x400b8000 0x1000>, <0x400b9000 0x1000>;
+ 				interrupts = <44 IRQ_TYPE_LEVEL_HIGH>;
+ 				clock-names = "ftm-evt", "ftm-src",
+ 					"ftm-evt-counter-en", "ftm-src-counter-en";
+diff --git a/arch/arm/boot/dts/ti/omap/am335x-boneblack.dts b/arch/arm/boot/dts/ti/omap/am335x-boneblack.dts
+index 16b567e3cb4722..b4fdcf9c02b500 100644
+--- a/arch/arm/boot/dts/ti/omap/am335x-boneblack.dts
++++ b/arch/arm/boot/dts/ti/omap/am335x-boneblack.dts
+@@ -35,7 +35,7 @@ &gpio0 {
+ 		"P9_18 [spi0_d1]",
+ 		"P9_17 [spi0_cs0]",
+ 		"[mmc0_cd]",
+-		"P8_42A [ecappwm0]",
++		"P9_42A [ecappwm0]",
+ 		"P8_35 [lcd d12]",
+ 		"P8_33 [lcd d13]",
+ 		"P8_31 [lcd d14]",
+diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
+index f6be80b5938b14..2fad3a0c056379 100644
+--- a/arch/arm/crypto/aes-neonbs-glue.c
++++ b/arch/arm/crypto/aes-neonbs-glue.c
+@@ -232,7 +232,7 @@ static int ctr_encrypt(struct skcipher_request *req)
+ 	while (walk.nbytes > 0) {
+ 		const u8 *src = walk.src.virt.addr;
+ 		u8 *dst = walk.dst.virt.addr;
+-		int bytes = walk.nbytes;
++		unsigned int bytes = walk.nbytes;
+ 
+ 		if (unlikely(bytes < AES_BLOCK_SIZE))
+ 			src = dst = memcpy(buf + sizeof(buf) - bytes,
+diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+index b8f8255f840b13..7caa2f3ef134af 100644
+--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
++++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+@@ -155,6 +155,7 @@ ANANKE_CPU_SLEEP: cpu-ananke-sleep {
+ 				idle-state-name = "c2";
+ 				compatible = "arm,idle-state";
+ 				arm,psci-suspend-param = <0x0010000>;
++				local-timer-stop;
+ 				entry-latency-us = <70>;
+ 				exit-latency-us = <160>;
+ 				min-residency-us = <2000>;
+@@ -164,6 +165,7 @@ ENYO_CPU_SLEEP: cpu-enyo-sleep {
+ 				idle-state-name = "c2";
+ 				compatible = "arm,idle-state";
+ 				arm,psci-suspend-param = <0x0010000>;
++				local-timer-stop;
+ 				entry-latency-us = <150>;
+ 				exit-latency-us = <190>;
+ 				min-residency-us = <2500>;
+@@ -173,6 +175,7 @@ HERA_CPU_SLEEP: cpu-hera-sleep {
+ 				idle-state-name = "c2";
+ 				compatible = "arm,idle-state";
+ 				arm,psci-suspend-param = <0x0010000>;
++				local-timer-stop;
+ 				entry-latency-us = <235>;
+ 				exit-latency-us = <220>;
+ 				min-residency-us = <3500>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+index 9ba0cb89fa24e0..c0f00835e47d7a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+@@ -286,6 +286,8 @@ &usdhc3 {
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ 	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
++	assigned-clocks = <&clk IMX8MM_CLK_USDHC3>;
++	assigned-clock-rates = <400000000>;
+ 	bus-width = <8>;
+ 	non-removable;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
+index bb11590473a4c7..353d0c9ff35c2e 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
+@@ -297,6 +297,8 @@ &usdhc3 {
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ 	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
++	assigned-clocks = <&clk IMX8MN_CLK_USDHC3>;
++	assigned-clock-rates = <400000000>;
+ 	bus-width = <8>;
+ 	non-removable;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
+index 2cabdae2422739..09385b058664c3 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+ /*
+- * Copyright (c) 2022 TQ-Systems GmbH <linux@ew.tq-group.com>,
++ * Copyright (c) 2022-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+  * D-82229 Seefeld, Germany.
+  * Author: Markus Niebel
+  */
+@@ -110,11 +110,11 @@ buck1: BUCK1 {
+ 				regulator-ramp-delay = <3125>;
+ 			};
+ 
+-			/* V_DDRQ - 1.1 LPDDR4 or 0.6 LPDDR4X */
++			/* V_DDRQ - 0.6 V for LPDDR4X */
+ 			buck2: BUCK2 {
+ 				regulator-name = "BUCK2";
+ 				regulator-min-microvolt = <600000>;
+-				regulator-max-microvolt = <1100000>;
++				regulator-max-microvolt = <600000>;
+ 				regulator-boot-on;
+ 				regulator-always-on;
+ 				regulator-ramp-delay = <3125>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+index 06af6e5ec578ed..884b5bb54ba824 100644
+--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+@@ -1330,6 +1330,7 @@ blsp1_dma: dma-controller@7884000 {
+ 			clock-names = "bam_clk";
+ 			#dma-cells = <1>;
+ 			qcom,ee = <0>;
++			qcom,controlled-remotely;
+ 		};
+ 
+ 		blsp1_uart1: serial@78af000 {
+@@ -1450,6 +1451,7 @@ blsp2_dma: dma-controller@7ac4000 {
+ 			clock-names = "bam_clk";
+ 			#dma-cells = <1>;
+ 			qcom,ee = <0>;
++			qcom,controlled-remotely;
+ 		};
+ 
+ 		blsp2_uart2: serial@7af0000 {
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+index b28fa598cebb3d..60f3b545304b6e 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+@@ -3797,8 +3797,8 @@ remoteproc_gpdsp0: remoteproc@20c00000 {
+ 
+ 			interrupts-extended = <&intc GIC_SPI 768 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_gpdsp0_in 0 0>,
+-					      <&smp2p_gpdsp0_in 2 0>,
+ 					      <&smp2p_gpdsp0_in 1 0>,
++					      <&smp2p_gpdsp0_in 2 0>,
+ 					      <&smp2p_gpdsp0_in 3 0>;
+ 			interrupt-names = "wdog", "fatal", "ready",
+ 					  "handover", "stop-ack";
+@@ -3840,8 +3840,8 @@ remoteproc_gpdsp1: remoteproc@21c00000 {
+ 
+ 			interrupts-extended = <&intc GIC_SPI 624 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_gpdsp1_in 0 0>,
+-					      <&smp2p_gpdsp1_in 2 0>,
+ 					      <&smp2p_gpdsp1_in 1 0>,
++					      <&smp2p_gpdsp1_in 2 0>,
+ 					      <&smp2p_gpdsp1_in 3 0>;
+ 			interrupt-names = "wdog", "fatal", "ready",
+ 					  "handover", "stop-ack";
+@@ -3965,8 +3965,8 @@ remoteproc_cdsp0: remoteproc@26300000 {
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp0_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp0_in 2 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp0_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp0_in 2 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp0_in 3 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "wdog", "fatal", "ready",
+ 					  "handover", "stop-ack";
+@@ -4097,8 +4097,8 @@ remoteproc_cdsp1: remoteproc@2a300000 {
+ 
+ 			interrupts-extended = <&intc GIC_SPI 798 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp1_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp1_in 2 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp1_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp1_in 2 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp1_in 3 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "wdog", "fatal", "ready",
+ 					  "handover", "stop-ack";
+@@ -4253,8 +4253,8 @@ remoteproc_adsp: remoteproc@30000000 {
+ 
+ 			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "wdog", "fatal", "ready", "handover",
+ 					  "stop-ack";
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index 249b257fc6a74b..6ae5ca00c7187a 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -3524,18 +3524,18 @@ spmi_bus: spmi@c440000 {
+ 			#interrupt-cells = <4>;
+ 		};
+ 
+-		sram@146aa000 {
++		sram@14680000 {
+ 			compatible = "qcom,sc7180-imem", "syscon", "simple-mfd";
+-			reg = <0 0x146aa000 0 0x2000>;
++			reg = <0 0x14680000 0 0x2e000>;
+ 
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 
+-			ranges = <0 0 0x146aa000 0x2000>;
++			ranges = <0 0 0x14680000 0x2e000>;
+ 
+-			pil-reloc@94c {
++			pil-reloc@2a94c {
+ 				compatible = "qcom,pil-reloc-info";
+-				reg = <0x94c 0xc8>;
++				reg = <0x2a94c 0xc8>;
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 0a0cef9dfcc416..9bf7a405a964c2 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -5074,18 +5074,18 @@ spmi_bus: spmi@c440000 {
+ 			#interrupt-cells = <4>;
+ 		};
+ 
+-		sram@146bf000 {
++		sram@14680000 {
+ 			compatible = "qcom,sdm845-imem", "syscon", "simple-mfd";
+-			reg = <0 0x146bf000 0 0x1000>;
++			reg = <0 0x14680000 0 0x40000>;
+ 
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 
+-			ranges = <0 0 0x146bf000 0x1000>;
++			ranges = <0 0 0x14680000 0x40000>;
+ 
+-			pil-reloc@94c {
++			pil-reloc@3f94c {
+ 				compatible = "qcom,pil-reloc-info";
+-				reg = <0x94c 0xc8>;
++				reg = <0x3f94c 0xc8>;
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi
+index cd9b92144a42cb..ed7804f061895d 100644
+--- a/arch/arm64/boot/dts/st/stm32mp251.dtsi
++++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi
+@@ -149,7 +149,7 @@ timer {
+ 			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+-		always-on;
++		arm,no-tick-in-suspend;
+ 	};
+ 
+ 	soc@0 {
+diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
+index 77fe2b27cb58d0..239acfcc3a5c05 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
+@@ -250,7 +250,7 @@ secure_proxy_sa3: mailbox@43600000 {
+ 
+ 	main_pmx0: pinctrl@f4000 {
+ 		compatible = "pinctrl-single";
+-		reg = <0x00 0xf4000 0x00 0x2ac>;
++		reg = <0x00 0xf4000 0x00 0x2b0>;
+ 		#pinctrl-cells = <1>;
+ 		pinctrl-single,register-width = <32>;
+ 		pinctrl-single,function-mask = <0xffffffff>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+index 60285d736e07a3..78df1b43a633d6 100644
+--- a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+@@ -319,6 +319,8 @@ AM64X_IOPAD(0x0040, PIN_OUTPUT, 7)	/* (U21) GPMC0_AD1.GPIO0_16 */
+ &icssg0_mdio {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&icssg0_mdio_pins_default &clkout0_pins_default>;
++	assigned-clocks = <&k3_clks 157 123>;
++	assigned-clock-parents = <&k3_clks 157 125>;
+ 	status = "okay";
+ 
+ 	icssg0_phy1: ethernet-phy@1 {
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 515c411c2c839d..5553508c364402 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -399,6 +399,7 @@ static void push_callee_regs(struct jit_ctx *ctx)
+ 		emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
+ 		emit(A64_PUSH(A64_R(25), A64_R(26), A64_SP), ctx);
+ 		emit(A64_PUSH(A64_R(27), A64_R(28), A64_SP), ctx);
++		ctx->fp_used = true;
+ 	} else {
+ 		find_used_callee_regs(ctx);
+ 		for (i = 0; i + 1 < ctx->nr_used_callee_reg; i += 2) {
+diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
+index 30638a6e8edcb3..d036f903864c26 100644
+--- a/arch/m68k/Kconfig.debug
++++ b/arch/m68k/Kconfig.debug
+@@ -10,7 +10,7 @@ config BOOTPARAM_STRING
+ 
+ config EARLY_PRINTK
+ 	bool "Early printk"
+-	depends on !(SUN3 || M68000 || COLDFIRE)
++	depends on MMU_MOTOROLA
+ 	help
+ 	  Write kernel log output directly to a serial port.
+ 	  Where implemented, output goes to the framebuffer as well.
+diff --git a/arch/m68k/kernel/early_printk.c b/arch/m68k/kernel/early_printk.c
+index f11ef9f1f56fcf..521cbb8a150c99 100644
+--- a/arch/m68k/kernel/early_printk.c
++++ b/arch/m68k/kernel/early_printk.c
+@@ -16,25 +16,10 @@
+ #include "../mvme147/mvme147.h"
+ #include "../mvme16x/mvme16x.h"
+ 
+-asmlinkage void __init debug_cons_nputs(const char *s, unsigned n);
+-
+-static void __ref debug_cons_write(struct console *c,
+-				   const char *s, unsigned n)
+-{
+-#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
+-      defined(CONFIG_COLDFIRE))
+-	if (MACH_IS_MVME147)
+-		mvme147_scc_write(c, s, n);
+-	else if (MACH_IS_MVME16x)
+-		mvme16x_cons_write(c, s, n);
+-	else
+-		debug_cons_nputs(s, n);
+-#endif
+-}
++asmlinkage void __init debug_cons_nputs(struct console *c, const char *s, unsigned int n);
+ 
+ static struct console early_console_instance = {
+ 	.name  = "debug",
+-	.write = debug_cons_write,
+ 	.flags = CON_PRINTBUFFER | CON_BOOT,
+ 	.index = -1
+ };
+@@ -44,6 +29,12 @@ static int __init setup_early_printk(char *buf)
+ 	if (early_console || buf)
+ 		return 0;
+ 
++	if (MACH_IS_MVME147)
++		early_console_instance.write = mvme147_scc_write;
++	else if (MACH_IS_MVME16x)
++		early_console_instance.write = mvme16x_cons_write;
++	else
++		early_console_instance.write = debug_cons_nputs;
+ 	early_console = &early_console_instance;
+ 	register_console(early_console);
+ 
+@@ -51,20 +42,15 @@ static int __init setup_early_printk(char *buf)
+ }
+ early_param("earlyprintk", setup_early_printk);
+ 
+-/*
+- * debug_cons_nputs() defined in arch/m68k/kernel/head.S cannot be called
+- * after init sections are discarded (for platforms that use it).
+- */
+-#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
+-      defined(CONFIG_COLDFIRE))
+-
+ static int __init unregister_early_console(void)
+ {
+-	if (!early_console || MACH_IS_MVME16x)
+-		return 0;
++	/*
++	 * debug_cons_nputs() defined in arch/m68k/kernel/head.S cannot be
++	 * called after init sections are discarded (for platforms that use it).
++	 */
++	if (early_console && early_console->write == debug_cons_nputs)
++		return unregister_console(early_console);
+ 
+-	return unregister_console(early_console);
++	return 0;
+ }
+ late_initcall(unregister_early_console);
+-
+-#endif
+diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
+index 852255cf60dec1..ba22bc2f3d6d86 100644
+--- a/arch/m68k/kernel/head.S
++++ b/arch/m68k/kernel/head.S
+@@ -3263,8 +3263,8 @@ func_return	putn
+  *	turns around and calls the internal routines.  This routine
+  *	is used by the boot console.
+  *
+- *	The calling parameters are:
+- *		void debug_cons_nputs(const char *str, unsigned length)
++ *	The function signature is -
++ *		void debug_cons_nputs(struct console *c, const char *s, unsigned int n)
+  *
+  *	This routine does NOT understand variable arguments only
+  *	simple strings!
+@@ -3273,8 +3273,8 @@ ENTRY(debug_cons_nputs)
+ 	moveml	%d0/%d1/%a0,%sp@-
+ 	movew	%sr,%sp@-
+ 	ori	#0x0700,%sr
+-	movel	%sp@(18),%a0		/* fetch parameter */
+-	movel	%sp@(22),%d1		/* fetch parameter */
++	movel	%sp@(22),%a0		/* char *s */
++	movel	%sp@(26),%d1		/* unsigned int n */
+ 	jra	2f
+ 1:
+ #ifdef CONSOLE_DEBUG
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 76f3b9c0a9f0ce..347126dc010dd5 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -508,6 +508,60 @@ static int __init set_ntlb(char *str)
+ 
+ __setup("ntlb=", set_ntlb);
+ 
++/* Initialise all TLB entries with unique values */
++static void r4k_tlb_uniquify(void)
++{
++	int entry = num_wired_entries();
++
++	htw_stop();
++	write_c0_entrylo0(0);
++	write_c0_entrylo1(0);
++
++	while (entry < current_cpu_data.tlbsize) {
++		unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
++		unsigned long asid = 0;
++		int idx;
++
++		/* Skip wired MMID to make ginvt_mmid work */
++		if (cpu_has_mmid)
++			asid = MMID_KERNEL_WIRED + 1;
++
++		/* Check for match before using UNIQUE_ENTRYHI */
++		do {
++			if (cpu_has_mmid) {
++				write_c0_memorymapid(asid);
++				write_c0_entryhi(UNIQUE_ENTRYHI(entry));
++			} else {
++				write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
++			}
++			mtc0_tlbw_hazard();
++			tlb_probe();
++			tlb_probe_hazard();
++			idx = read_c0_index();
++			/* No match or match is on current entry */
++			if (idx < 0 || idx == entry)
++				break;
++			/*
++			 * If we hit a match, we need to try again with
++			 * a different ASID.
++			 */
++			asid++;
++		} while (asid < asid_mask);
++
++		if (idx >= 0 && idx != entry)
++			panic("Unable to uniquify TLB entry %d", idx);
++
++		write_c0_index(entry);
++		mtc0_tlbw_hazard();
++		tlb_write_indexed();
++		entry++;
++	}
++
++	tlbw_use_hazard();
++	htw_start();
++	flush_micro_tlb();
++}
++
+ /*
+  * Configure TLB (for init or after a CPU has been powered off).
+  */
+@@ -547,7 +601,7 @@ static void r4k_tlb_configure(void)
+ 	temp_tlb_entry = current_cpu_data.tlbsize - 1;
+ 
+ 	/* From this point on the ARC firmware is dead.	 */
+-	local_flush_tlb_all();
++	r4k_tlb_uniquify();
+ 
+ 	/* Did I tell you that ARC SUCKS?  */
+ }
+diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
+index c06344db0eb37b..1c67f64739a084 100644
+--- a/arch/powerpc/configs/ppc6xx_defconfig
++++ b/arch/powerpc/configs/ppc6xx_defconfig
+@@ -253,7 +253,6 @@ CONFIG_NET_SCH_DSMARK=m
+ CONFIG_NET_SCH_NETEM=m
+ CONFIG_NET_SCH_INGRESS=m
+ CONFIG_NET_CLS_BASIC=m
+-CONFIG_NET_CLS_TCINDEX=m
+ CONFIG_NET_CLS_ROUTE4=m
+ CONFIG_NET_CLS_FW=m
+ CONFIG_NET_CLS_U32=m
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index ca7f7bb2b47869..2b5f3323e1072d 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -1139,6 +1139,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(eeh_unfreeze_pe);
+ 
+ 
+ static struct pci_device_id eeh_reset_ids[] = {
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 7efe04c68f0fe3..dd50de91c43834 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -257,13 +257,12 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
+ 	struct pci_driver *driver;
+ 	enum pci_ers_result new_result;
+ 
+-	pci_lock_rescan_remove();
+ 	pdev = edev->pdev;
+ 	if (pdev)
+ 		get_device(&pdev->dev);
+-	pci_unlock_rescan_remove();
+ 	if (!pdev) {
+ 		eeh_edev_info(edev, "no device");
++		*result = PCI_ERS_RESULT_DISCONNECT;
+ 		return;
+ 	}
+ 	device_lock(&pdev->dev);
+@@ -304,8 +303,9 @@ static void eeh_pe_report(const char *name, struct eeh_pe *root,
+ 	struct eeh_dev *edev, *tmp;
+ 
+ 	pr_info("EEH: Beginning: '%s'\n", name);
+-	eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
+-		eeh_pe_report_edev(edev, fn, result);
++	eeh_for_each_pe(root, pe)
++		eeh_pe_for_each_dev(pe, edev, tmp)
++			eeh_pe_report_edev(edev, fn, result);
+ 	if (result)
+ 		pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
+ 			name, pci_ers_result_name(*result));
+@@ -383,6 +383,8 @@ static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
+ 	if (!edev)
+ 		return;
+ 
++	pci_lock_rescan_remove();
++
+ 	/*
+ 	 * The content in the config space isn't saved because
+ 	 * the blocked config space on some adapters. We have
+@@ -393,14 +395,19 @@ static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
+ 		if (list_is_last(&edev->entry, &edev->pe->edevs))
+ 			eeh_pe_restore_bars(edev->pe);
+ 
++		pci_unlock_rescan_remove();
+ 		return;
+ 	}
+ 
+ 	pdev = eeh_dev_to_pci_dev(edev);
+-	if (!pdev)
++	if (!pdev) {
++		pci_unlock_rescan_remove();
+ 		return;
++	}
+ 
+ 	pci_restore_state(pdev);
++
++	pci_unlock_rescan_remove();
+ }
+ 
+ /**
+@@ -647,9 +654,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
+ 	if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
+ 		eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
+ 	} else {
+-		pci_lock_rescan_remove();
+ 		pci_hp_remove_devices(bus);
+-		pci_unlock_rescan_remove();
+ 	}
+ 
+ 	/*
+@@ -665,8 +670,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
+ 	if (rc)
+ 		return rc;
+ 
+-	pci_lock_rescan_remove();
+-
+ 	/* Restore PE */
+ 	eeh_ops->configure_bridge(pe);
+ 	eeh_pe_restore_bars(pe);
+@@ -674,7 +677,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
+ 	/* Clear frozen state */
+ 	rc = eeh_clear_pe_frozen_state(pe, false);
+ 	if (rc) {
+-		pci_unlock_rescan_remove();
+ 		return rc;
+ 	}
+ 
+@@ -709,7 +711,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
+ 	pe->tstamp = tstamp;
+ 	pe->freeze_count = cnt;
+ 
+-	pci_unlock_rescan_remove();
+ 	return 0;
+ }
+ 
+@@ -843,10 +844,13 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ 		{LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
+ 	int devices = 0;
+ 
++	pci_lock_rescan_remove();
++
+ 	bus = eeh_pe_bus_get(pe);
+ 	if (!bus) {
+ 		pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
+ 			__func__, pe->phb->global_number, pe->addr);
++		pci_unlock_rescan_remove();
+ 		return;
+ 	}
+ 
+@@ -1094,10 +1098,15 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ 		eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+ 		eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ 
+-		pci_lock_rescan_remove();
+-		pci_hp_remove_devices(bus);
+-		pci_unlock_rescan_remove();
++		bus = eeh_pe_bus_get(pe);
++		if (bus)
++			pci_hp_remove_devices(bus);
++		else
++			pr_err("%s: PCI bus for PHB#%x-PE#%x disappeared\n",
++				__func__, pe->phb->global_number, pe->addr);
++
+ 		/* The passed PE should no longer be used */
++		pci_unlock_rescan_remove();
+ 		return;
+ 	}
+ 
+@@ -1114,6 +1123,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ 			eeh_clear_slot_attention(edev->pdev);
+ 
+ 	eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
++
++	pci_unlock_rescan_remove();
+ }
+ 
+ /**
+@@ -1132,6 +1143,7 @@ void eeh_handle_special_event(void)
+ 	unsigned long flags;
+ 	int rc;
+ 
++	pci_lock_rescan_remove();
+ 
+ 	do {
+ 		rc = eeh_ops->next_error(&pe);
+@@ -1171,10 +1183,12 @@ void eeh_handle_special_event(void)
+ 
+ 			break;
+ 		case EEH_NEXT_ERR_NONE:
++			pci_unlock_rescan_remove();
+ 			return;
+ 		default:
+ 			pr_warn("%s: Invalid value %d from next_error()\n",
+ 				__func__, rc);
++			pci_unlock_rescan_remove();
+ 			return;
+ 		}
+ 
+@@ -1186,7 +1200,9 @@ void eeh_handle_special_event(void)
+ 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
+ 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
+ 			eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
++			pci_unlock_rescan_remove();
+ 			eeh_handle_normal_event(pe);
++			pci_lock_rescan_remove();
+ 		} else {
+ 			eeh_for_each_pe(pe, tmp_pe)
+ 				eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
+@@ -1199,7 +1215,6 @@ void eeh_handle_special_event(void)
+ 				eeh_report_failure, NULL);
+ 			eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+ 
+-			pci_lock_rescan_remove();
+ 			list_for_each_entry(hose, &hose_list, list_node) {
+ 				phb_pe = eeh_phb_pe_get(hose);
+ 				if (!phb_pe ||
+@@ -1218,7 +1233,6 @@ void eeh_handle_special_event(void)
+ 				}
+ 				pci_hp_remove_devices(bus);
+ 			}
+-			pci_unlock_rescan_remove();
+ 		}
+ 
+ 		/*
+@@ -1228,4 +1242,6 @@ void eeh_handle_special_event(void)
+ 		if (rc == EEH_NEXT_ERR_DEAD_IOC)
+ 			break;
+ 	} while (rc != EEH_NEXT_ERR_NONE);
++
++	pci_unlock_rescan_remove();
+ }
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index d283d281d28e8b..e740101fadf3b1 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -671,10 +671,12 @@ static void eeh_bridge_check_link(struct eeh_dev *edev)
+ 	eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
+ 
+ 	/* Check link */
+-	if (!edev->pdev->link_active_reporting) {
+-		eeh_edev_dbg(edev, "No link reporting capability\n");
+-		msleep(1000);
+-		return;
++	if (edev->pdev) {
++		if (!edev->pdev->link_active_reporting) {
++			eeh_edev_dbg(edev, "No link reporting capability\n");
++			msleep(1000);
++			return;
++		}
+ 	}
+ 
+ 	/* Wait the link is up until timeout (5s) */
+diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
+index 9ea74973d78d5a..6f444d0822d820 100644
+--- a/arch/powerpc/kernel/pci-hotplug.c
++++ b/arch/powerpc/kernel/pci-hotplug.c
+@@ -141,6 +141,9 @@ void pci_hp_add_devices(struct pci_bus *bus)
+ 	struct pci_controller *phb;
+ 	struct device_node *dn = pci_bus_to_OF_node(bus);
+ 
++	if (!dn)
++		return;
++
+ 	phb = pci_bus_to_host(bus);
+ 
+ 	mode = PCI_PROBE_NORMAL;
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index 213aa26dc8b337..979487da65223d 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -404,6 +404,45 @@ get_device_node_with_drc_info(u32 index)
+ 	return NULL;
+ }
+ 
++static struct device_node *
++get_device_node_with_drc_indexes(u32 drc_index)
++{
++	struct device_node *np = NULL;
++	u32 nr_indexes, index;
++	int i, rc;
++
++	for_each_node_with_property(np, "ibm,drc-indexes") {
++		/*
++		 * First element in the array is the total number of
++		 * DRC indexes returned.
++		 */
++		rc = of_property_read_u32_index(np, "ibm,drc-indexes",
++				0, &nr_indexes);
++		if (rc)
++			goto out_put_np;
++
++		/*
++		 * Retrieve DRC index from the list and return the
++		 * device node if matched with the specified index.
++		 */
++		for (i = 0; i < nr_indexes; i++) {
++			rc = of_property_read_u32_index(np, "ibm,drc-indexes",
++							i+1, &index);
++			if (rc)
++				goto out_put_np;
++
++			if (drc_index == index)
++				return np;
++		}
++	}
++
++	return NULL;
++
++out_put_np:
++	of_node_put(np);
++	return NULL;
++}
++
+ static int dlpar_hp_dt_add(u32 index)
+ {
+ 	struct device_node *np, *nodes;
+@@ -423,10 +462,19 @@ static int dlpar_hp_dt_add(u32 index)
+ 		goto out;
+ 	}
+ 
++	/*
++	 * Recent FW provides ibm,drc-info property. So search
++	 * for the user specified DRC index from ibm,drc-info
++	 * property. If this property is not available, search
++	 * in the indexes array from ibm,drc-indexes property.
++	 */
+ 	np = get_device_node_with_drc_info(index);
+ 
+-	if (!np)
+-		return -EIO;
++	if (!np) {
++		np = get_device_node_with_drc_indexes(index);
++		if (!np)
++			return -EIO;
++	}
+ 
+ 	/* Next, configure the connector. */
+ 	nodes = dlpar_configure_connector(cpu_to_be32(index), np);
+diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
+index 395b02d6a13374..352108727d7e62 100644
+--- a/arch/s390/include/asm/ap.h
++++ b/arch/s390/include/asm/ap.h
+@@ -103,7 +103,7 @@ struct ap_tapq_hwinfo {
+ 			unsigned int accel :  1; /* A */
+ 			unsigned int ep11  :  1; /* X */
+ 			unsigned int apxa  :  1; /* APXA */
+-			unsigned int	   :  1;
++			unsigned int slcf  :  1; /* Cmd filtering avail. */
+ 			unsigned int class :  8;
+ 			unsigned int bs	   :  2; /* SE bind/assoc */
+ 			unsigned int	   : 14;
+diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
+index f691e0fb66a250..f5dece93535357 100644
+--- a/arch/s390/mm/pgalloc.c
++++ b/arch/s390/mm/pgalloc.c
+@@ -219,11 +219,6 @@ void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
+ 	struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
+ 
+ 	call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
+-	/*
+-	 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
+-	 * Turn to the generic pte_free_defer() version once gmap is removed.
+-	 */
+-	WARN_ON_ONCE(mm_has_pgste(mm));
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index 665b8228afebcd..dd971826652ac1 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -63,13 +63,12 @@ void *vmem_crst_alloc(unsigned long val)
+ 
+ pte_t __ref *vmem_pte_alloc(void)
+ {
+-	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
+ 	pte_t *pte;
+ 
+ 	if (slab_is_available())
+-		pte = (pte_t *) page_table_alloc(&init_mm);
++		pte = (pte_t *)page_table_alloc(&init_mm);
+ 	else
+-		pte = (pte_t *) memblock_alloc(size, size);
++		pte = (pte_t *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ 	if (!pte)
+ 		return NULL;
+ 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
+diff --git a/arch/sh/Makefile b/arch/sh/Makefile
+index cab2f9c011a8db..7b420424b6d7c4 100644
+--- a/arch/sh/Makefile
++++ b/arch/sh/Makefile
+@@ -103,16 +103,16 @@ UTS_MACHINE		:= sh
+ LDFLAGS_vmlinux		+= -e _stext
+ 
+ ifdef CONFIG_CPU_LITTLE_ENDIAN
+-ld-bfd			:= elf32-sh-linux
+-LDFLAGS_vmlinux		+= --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
++ld_bfd			:= elf32-sh-linux
++LDFLAGS_vmlinux		+= --defsym jiffies=jiffies_64 --oformat $(ld_bfd)
+ KBUILD_LDFLAGS		+= -EL
+ else
+-ld-bfd			:= elf32-shbig-linux
+-LDFLAGS_vmlinux		+= --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
++ld_bfd			:= elf32-shbig-linux
++LDFLAGS_vmlinux		+= --defsym jiffies=jiffies_64+4 --oformat $(ld_bfd)
+ KBUILD_LDFLAGS		+= -EB
+ endif
+ 
+-export ld-bfd
++export ld_bfd
+ 
+ # Mach groups
+ machdir-$(CONFIG_SOLUTION_ENGINE)		+= mach-se
+diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
+index 8bc319ff54bf93..58df491778b29a 100644
+--- a/arch/sh/boot/compressed/Makefile
++++ b/arch/sh/boot/compressed/Makefile
+@@ -27,7 +27,7 @@ endif
+ 
+ ccflags-remove-$(CONFIG_MCOUNT) += -pg
+ 
+-LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
++LDFLAGS_vmlinux := --oformat $(ld_bfd) -Ttext $(IMAGE_OFFSET) -e startup \
+ 		   -T $(obj)/../../kernel/vmlinux.lds
+ 
+ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+@@ -51,7 +51,7 @@ $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+ 
+ OBJCOPYFLAGS += -R .empty_zero_page
+ 
+-LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T
++LDFLAGS_piggy.o := -r --format binary --oformat $(ld_bfd) -T
+ 
+ $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
+ 	$(call if_changed,ld)
+diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile
+index c7c8be58400cd9..17b03df0a8de4d 100644
+--- a/arch/sh/boot/romimage/Makefile
++++ b/arch/sh/boot/romimage/Makefile
+@@ -13,7 +13,7 @@ mmcif-obj-$(CONFIG_CPU_SUBTYPE_SH7724)	:= $(obj)/mmcif-sh7724.o
+ load-$(CONFIG_ROMIMAGE_MMCIF)		:= $(mmcif-load-y)
+ obj-$(CONFIG_ROMIMAGE_MMCIF)		:= $(mmcif-obj-y)
+ 
+-LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(load-y) -e romstart \
++LDFLAGS_vmlinux := --oformat $(ld_bfd) -Ttext $(load-y) -e romstart \
+ 		   -T $(obj)/../../kernel/vmlinux.lds
+ 
+ $(obj)/vmlinux: $(obj)/head.o $(obj-y) $(obj)/piggy.o FORCE
+@@ -24,7 +24,7 @@ OBJCOPYFLAGS += -j .empty_zero_page
+ $(obj)/zeropage.bin: vmlinux FORCE
+ 	$(call if_changed,objcopy)
+ 
+-LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T
++LDFLAGS_piggy.o := -r --format binary --oformat $(ld_bfd) -T
+ 
+ $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE
+ 	$(call if_changed,ld)
+diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c
+index 7c3cec4c68cffe..006a5a164ea91d 100644
+--- a/arch/um/drivers/rtc_user.c
++++ b/arch/um/drivers/rtc_user.c
+@@ -28,7 +28,7 @@ int uml_rtc_start(bool timetravel)
+ 	int err;
+ 
+ 	if (timetravel) {
+-		int err = os_pipe(uml_rtc_irq_fds, 1, 1);
++		err = os_pipe(uml_rtc_irq_fds, 1, 1);
+ 		if (err)
+ 			goto fail;
+ 	} else {
+diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
+index d75237ba7ce944..5660d3229d29c2 100644
+--- a/arch/x86/boot/cpuflags.c
++++ b/arch/x86/boot/cpuflags.c
+@@ -115,5 +115,18 @@ void get_cpuflags(void)
+ 			cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
+ 			      &cpu.flags[1]);
+ 		}
++
++		if (max_amd_level >= 0x8000001f) {
++			u32 ebx;
++
++			/*
++			 * The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
++			 * the virtualization flags entry (word 8) and set by
++			 * scattered.c, so the bit needs to be explicitly set.
++			 */
++			cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
++			if (ebx & BIT(31))
++				set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
++		}
+ 	}
+ }
+diff --git a/arch/x86/coco/sev/shared.c b/arch/x86/coco/sev/shared.c
+index 71de5319408910..f5936da235c713 100644
+--- a/arch/x86/coco/sev/shared.c
++++ b/arch/x86/coco/sev/shared.c
+@@ -1243,6 +1243,24 @@ static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svs
+ 	__pval_terminate(pfn, action, page_size, ret, svsm_ret);
+ }
+ 
++static inline void sev_evict_cache(void *va, int npages)
++{
++	volatile u8 val __always_unused;
++	u8 *bytes = va;
++	int page_idx;
++
++	/*
++	 * For SEV guests, a read from the first/last cache-lines of a 4K page
++	 * using the guest key is sufficient to cause a flush of all cache-lines
++	 * associated with that 4K page without incurring all the overhead of a
++	 * full CLFLUSH sequence.
++	 */
++	for (page_idx = 0; page_idx < npages; page_idx++) {
++		val = bytes[page_idx * PAGE_SIZE];
++		val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
++	}
++}
++
+ static void svsm_pval_4k_page(unsigned long paddr, bool validate)
+ {
+ 	struct svsm_pvalidate_call *pc;
+@@ -1295,6 +1313,13 @@ static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool val
+ 		if (ret)
+ 			__pval_terminate(PHYS_PFN(paddr), validate, RMP_PG_SIZE_4K, ret, 0);
+ 	}
++
++	/*
++	 * If validating memory (making it private) and affected by the
++	 * cache-coherency vulnerability, perform the cache eviction mitigation.
++	 */
++	if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
++		sev_evict_cache((void *)vaddr, 1);
+ }
+ 
+ static void pval_pages(struct snp_psc_desc *desc)
+@@ -1479,10 +1504,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
+ 
+ static void pvalidate_pages(struct snp_psc_desc *desc)
+ {
++	struct psc_entry *e;
++	unsigned int i;
++
+ 	if (snp_vmpl)
+ 		svsm_pval_pages(desc);
+ 	else
+ 		pval_pages(desc);
++
++	/*
++	 * If not affected by the cache-coherency vulnerability there is no need
++	 * to perform the cache eviction mitigation.
++	 */
++	if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
++		return;
++
++	for (i = 0; i <= desc->hdr.end_entry; i++) {
++		e = &desc->entries[i];
++
++		/*
++		 * If validating memory (making it private) perform the cache
++		 * eviction mitigation.
++		 */
++		if (e->operation == SNP_PAGE_STATE_PRIVATE)
++			sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
++	}
+ }
+ 
+ static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index ef5749a0d8c24d..98e72c1391f240 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -227,6 +227,7 @@
+ #define X86_FEATURE_FLEXPRIORITY	( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
+ #define X86_FEATURE_EPT			( 8*32+ 2) /* "ept" Intel Extended Page Table */
+ #define X86_FEATURE_VPID		( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
++#define X86_FEATURE_COHERENCY_SFW_NO	( 8*32+ 4) /* SNP cache coherency software work around not needed */
+ 
+ #define X86_FEATURE_VMMCALL		( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
+ #define X86_FEATURE_XENPV		( 8*32+16) /* Xen paravirtual guest */
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index edebf1020e0497..6bb3d9a86abe3c 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -92,8 +92,6 @@ struct irq_cfg {
+ 
+ extern struct irq_cfg *irq_cfg(unsigned int irq);
+ extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
+-extern void lock_vector_lock(void);
+-extern void unlock_vector_lock(void);
+ #ifdef CONFIG_SMP
+ extern void vector_schedule_cleanup(struct irq_cfg *);
+ extern void irq_complete_move(struct irq_cfg *cfg);
+@@ -101,12 +99,16 @@ extern void irq_complete_move(struct irq_cfg *cfg);
+ static inline void vector_schedule_cleanup(struct irq_cfg *c) { }
+ static inline void irq_complete_move(struct irq_cfg *c) { }
+ #endif
+-
+ extern void apic_ack_edge(struct irq_data *data);
+-#else	/*  CONFIG_IRQ_DOMAIN_HIERARCHY */
++#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++extern void lock_vector_lock(void);
++extern void unlock_vector_lock(void);
++#else
+ static inline void lock_vector_lock(void) {}
+ static inline void unlock_vector_lock(void) {}
+-#endif	/* CONFIG_IRQ_DOMAIN_HIERARCHY */
++#endif
+ 
+ /* Statistics */
+ extern atomic_t irq_err_count;
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index bc4993aa41edf2..c463363ae1d49e 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -47,6 +47,7 @@ static const struct cpuid_bit cpuid_bits[] = {
+ 	{ X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
+ 	{ X86_FEATURE_FAST_CPPC, 	CPUID_EDX, 15, 0x80000007, 0 },
+ 	{ X86_FEATURE_MBA,		CPUID_EBX,  6, 0x80000008, 0 },
++	{ X86_FEATURE_COHERENCY_SFW_NO,	CPUID_EBX, 31, 0x8000001f, 0 },
+ 	{ X86_FEATURE_SMBA,		CPUID_EBX,  2, 0x80000020, 0 },
+ 	{ X86_FEATURE_BMEC,		CPUID_EBX,  3, 0x80000020, 0 },
+ 	{ X86_FEATURE_TSA_SQ_NO,	CPUID_ECX,  1, 0x80000021, 0 },
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 85fa2db38dc42b..9400730e538ed6 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -251,26 +251,59 @@ static __always_inline void handle_irq(struct irq_desc *desc,
+ 		__handle_irq(desc, regs);
+ }
+ 
+-static __always_inline int call_irq_handler(int vector, struct pt_regs *regs)
++static struct irq_desc *reevaluate_vector(int vector)
+ {
+-	struct irq_desc *desc;
+-	int ret = 0;
++	struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
++
++	if (!IS_ERR_OR_NULL(desc))
++		return desc;
++
++	if (desc == VECTOR_UNUSED)
++		pr_emerg_ratelimited("No irq handler for %d.%u\n", smp_processor_id(), vector);
++	else
++		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
++	return NULL;
++}
++
++static __always_inline bool call_irq_handler(int vector, struct pt_regs *regs)
++{
++	struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
+ 
+-	desc = __this_cpu_read(vector_irq[vector]);
+ 	if (likely(!IS_ERR_OR_NULL(desc))) {
+ 		handle_irq(desc, regs);
+-	} else {
+-		ret = -EINVAL;
+-		if (desc == VECTOR_UNUSED) {
+-			pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
+-					     __func__, smp_processor_id(),
+-					     vector);
+-		} else {
+-			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+-		}
++		return true;
+ 	}
+ 
+-	return ret;
++	/*
++	 * Reevaluate with vector_lock held to prevent a race against
++	 * request_irq() setting up the vector:
++	 *
++	 * CPU0				CPU1
++	 *				interrupt is raised in APIC IRR
++	 *				but not handled
++	 * free_irq()
++	 *   per_cpu(vector_irq, CPU1)[vector] = VECTOR_SHUTDOWN;
++	 *
++	 * request_irq()		common_interrupt()
++	 *				  d = this_cpu_read(vector_irq[vector]);
++	 *
++	 * per_cpu(vector_irq, CPU1)[vector] = desc;
++	 *
++	 *				  if (d == VECTOR_SHUTDOWN)
++	 *				    this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
++	 *
++	 * This requires that the same vector on the same target CPU is
++	 * handed out or that a spurious interrupt hits that CPU/vector.
++	 */
++	lock_vector_lock();
++	desc = reevaluate_vector(vector);
++	unlock_vector_lock();
++
++	if (!desc)
++		return false;
++
++	handle_irq(desc, regs);
++	return true;
+ }
+ 
+ /*
+@@ -284,7 +317,7 @@ DEFINE_IDTENTRY_IRQ(common_interrupt)
+ 	/* entry code tells RCU that we're not quiescent.  Check it. */
+ 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
+ 
+-	if (unlikely(call_irq_handler(vector, regs)))
++	if (unlikely(!call_irq_handler(vector, regs)))
+ 		apic_eoi();
+ 
+ 	set_irq_regs(old_regs);
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
+index 51986e8a9d3535..52e22d3e1a82f8 100644
+--- a/arch/x86/mm/extable.c
++++ b/arch/x86/mm/extable.c
+@@ -122,13 +122,12 @@ static bool ex_handler_sgx(const struct exception_table_entry *fixup,
+ static bool ex_handler_fprestore(const struct exception_table_entry *fixup,
+ 				 struct pt_regs *regs)
+ {
+-	regs->ip = ex_fixup_addr(fixup);
+-
+ 	WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
+ 		  (void *)instruction_pointer(regs));
+ 
+ 	fpu_reset_from_exception_fixup();
+-	return true;
++
++	return ex_handler_default(fixup, regs);
+ }
+ 
+ /*
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 7858c92b44834f..22ce7fa4fe20a8 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -320,12 +320,19 @@ static int blk_validate_limits(struct queue_limits *lim)
+ 	lim->max_discard_sectors =
+ 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
+ 
++	/*
++	 * When discard is not supported, discard_granularity should be reported
++	 * as 0 to userspace.
++	 */
++	if (lim->max_discard_sectors)
++		lim->discard_granularity =
++			max(lim->discard_granularity, lim->physical_block_size);
++	else
++		lim->discard_granularity = 0;
++
+ 	if (!lim->max_discard_segments)
+ 		lim->max_discard_segments = 1;
+ 
+-	if (lim->discard_granularity < lim->physical_block_size)
+-		lim->discard_granularity = lim->physical_block_size;
+-
+ 	/*
+ 	 * By default there is no limit on the segment boundary alignment,
+ 	 * but if there is one it can't be smaller than the page size as
+diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
+index 1edf6e56440261..df89c1c0da6dd7 100644
+--- a/drivers/accel/ivpu/ivpu_debugfs.c
++++ b/drivers/accel/ivpu/ivpu_debugfs.c
+@@ -346,49 +346,23 @@ static const struct file_operations ivpu_force_recovery_fops = {
+ 	.write = ivpu_force_recovery_fn,
+ };
+ 
+-static ssize_t
+-ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
++static int ivpu_reset_engine_fn(void *data, u64 val)
+ {
+-	struct ivpu_device *vdev = file->private_data;
+-
+-	if (!size)
+-		return -EINVAL;
++	struct ivpu_device *vdev = (struct ivpu_device *)data;
+ 
+-	if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
+-		return -ENODEV;
+-	if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
+-		return -ENODEV;
+-
+-	return size;
++	return ivpu_jsm_reset_engine(vdev, (u32)val);
+ }
+ 
+-static const struct file_operations ivpu_reset_engine_fops = {
+-	.owner = THIS_MODULE,
+-	.open = simple_open,
+-	.write = ivpu_reset_engine_fn,
+-};
++DEFINE_DEBUGFS_ATTRIBUTE(ivpu_reset_engine_fops, NULL, ivpu_reset_engine_fn, "0x%02llx\n");
+ 
+-static ssize_t
+-ivpu_resume_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
++static int ivpu_resume_engine_fn(void *data, u64 val)
+ {
+-	struct ivpu_device *vdev = file->private_data;
+-
+-	if (!size)
+-		return -EINVAL;
++	struct ivpu_device *vdev = (struct ivpu_device *)data;
+ 
+-	if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
+-		return -ENODEV;
+-	if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COPY))
+-		return -ENODEV;
+-
+-	return size;
++	return ivpu_jsm_hws_resume_engine(vdev, (u32)val);
+ }
+ 
+-static const struct file_operations ivpu_resume_engine_fops = {
+-	.owner = THIS_MODULE,
+-	.open = simple_open,
+-	.write = ivpu_resume_engine_fn,
+-};
++DEFINE_DEBUGFS_ATTRIBUTE(ivpu_resume_engine_fops, NULL, ivpu_resume_engine_fn, "0x%02llx\n");
+ 
+ static int dct_active_get(void *data, u64 *active_percent)
+ {
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 3b1a5cdd631161..defcc964ecab65 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -2116,7 +2116,7 @@ static void ublk_deinit_queues(struct ublk_device *ub)
+ 
+ 	for (i = 0; i < nr_queues; i++)
+ 		ublk_deinit_queue(ub, i);
+-	kfree(ub->__queues);
++	kvfree(ub->__queues);
+ }
+ 
+ static int ublk_init_queues(struct ublk_device *ub)
+@@ -2127,7 +2127,7 @@ static int ublk_init_queues(struct ublk_device *ub)
+ 	int i, ret = -ENOMEM;
+ 
+ 	ub->queue_size = ubq_size;
+-	ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
++	ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL);
+ 	if (!ub->__queues)
+ 		return ret;
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 72b5297573735c..1d2e85b418204e 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -511,6 +511,10 @@ static const struct usb_device_id quirks_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 
++	/* Realtek 8851BU Bluetooth devices */
++	{ USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++
+ 	/* Realtek 8852AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
+index acfd673834ed73..6505ce6ab1a233 100644
+--- a/drivers/bus/mhi/host/pci_generic.c
++++ b/drivers/bus/mhi/host/pci_generic.c
+@@ -509,8 +509,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
+ 	.sideband_wake = false,
+ };
+ 
+-static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
+-	.name = "foxconn-t99w515",
++static const struct mhi_pci_dev_info mhi_foxconn_t99w640_info = {
++	.name = "foxconn-t99w640",
+ 	.edl = "qcom/sdx72m/foxconn/edl.mbn",
+ 	.edl_trigger = true,
+ 	.config = &modem_foxconn_sdx72_config,
+@@ -792,9 +792,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
+ 	/* DW5932e (sdx62), Non-eSIM */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
+ 		.driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
+-	/* T99W515 (sdx72) */
++	/* T99W640 (sdx72) */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118),
+-		.driver_data = (kernel_ulong_t) &mhi_foxconn_t99w515_info },
++		.driver_data = (kernel_ulong_t) &mhi_foxconn_t99w640_info },
+ 	/* DW5934e(sdx72), With eSIM */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d),
+ 		.driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
+diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
+index 1e3048f2bb38f0..6c4e40d0365f00 100644
+--- a/drivers/char/hw_random/mtk-rng.c
++++ b/drivers/char/hw_random/mtk-rng.c
+@@ -142,7 +142,9 @@ static int mtk_rng_probe(struct platform_device *pdev)
+ 	dev_set_drvdata(&pdev->dev, priv);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+-	devm_pm_runtime_enable(&pdev->dev);
++	ret = devm_pm_runtime_enable(&pdev->dev);
++	if (ret)
++		return ret;
+ 
+ 	dev_info(&pdev->dev, "registered RNG driver\n");
+ 
+diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
+index cbb8b220f16bcd..ffab32b047a017 100644
+--- a/drivers/clk/at91/sam9x7.c
++++ b/drivers/clk/at91/sam9x7.c
+@@ -61,44 +61,44 @@ static const struct clk_master_layout sam9x7_master_layout = {
+ 
+ /* Fractional PLL core output range. */
+ static const struct clk_range plla_core_outputs[] = {
+-	{ .min = 375000000, .max = 1600000000 },
++	{ .min = 800000000, .max = 1600000000 },
+ };
+ 
+ static const struct clk_range upll_core_outputs[] = {
+-	{ .min = 600000000, .max = 1200000000 },
++	{ .min = 600000000, .max = 960000000 },
+ };
+ 
+ static const struct clk_range lvdspll_core_outputs[] = {
+-	{ .min = 400000000, .max = 800000000 },
++	{ .min = 600000000, .max = 1200000000 },
+ };
+ 
+ static const struct clk_range audiopll_core_outputs[] = {
+-	{ .min = 400000000, .max = 800000000 },
++	{ .min = 600000000, .max = 1200000000 },
+ };
+ 
+ static const struct clk_range plladiv2_core_outputs[] = {
+-	{ .min = 375000000, .max = 1600000000 },
++	{ .min = 800000000, .max = 1600000000 },
+ };
+ 
+ /* Fractional PLL output range. */
+ static const struct clk_range plla_outputs[] = {
+-	{ .min = 732421, .max = 800000000 },
++	{ .min = 400000000, .max = 800000000 },
+ };
+ 
+ static const struct clk_range upll_outputs[] = {
+-	{ .min = 300000000, .max = 600000000 },
++	{ .min = 300000000, .max = 480000000 },
+ };
+ 
+ static const struct clk_range lvdspll_outputs[] = {
+-	{ .min = 10000000, .max = 800000000 },
++	{ .min = 175000000, .max = 550000000 },
+ };
+ 
+ static const struct clk_range audiopll_outputs[] = {
+-	{ .min = 10000000, .max = 800000000 },
++	{ .min = 0, .max = 300000000 },
+ };
+ 
+ static const struct clk_range plladiv2_outputs[] = {
+-	{ .min = 366210, .max = 400000000 },
++	{ .min = 200000000, .max = 400000000 },
+ };
+ 
+ /* PLL characteristics. */
+diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
+index 934e53a96dddac..00bf799964c61a 100644
+--- a/drivers/clk/clk-axi-clkgen.c
++++ b/drivers/clk/clk-axi-clkgen.c
+@@ -118,7 +118,7 @@ static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
+ 
+ static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
+ 	.fpfd_min = 10000,
+-	.fpfd_max = 300000,
++	.fpfd_max = 450000,
+ 	.fvco_min = 600000,
+ 	.fvco_max = 1200000,
+ };
+diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
+index 355d1be0b5d8da..a37fea7542b237 100644
+--- a/drivers/clk/davinci/psc.c
++++ b/drivers/clk/davinci/psc.c
+@@ -277,6 +277,11 @@ davinci_lpsc_clk_register(struct device *dev, const char *name,
+ 
+ 	lpsc->pm_domain.name = devm_kasprintf(dev, GFP_KERNEL, "%s: %s",
+ 					      best_dev_name(dev), name);
++	if (!lpsc->pm_domain.name) {
++		clk_hw_unregister(&lpsc->hw);
++		kfree(lpsc);
++		return ERR_PTR(-ENOMEM);
++	}
+ 	lpsc->pm_domain.attach_dev = davinci_psc_genpd_attach_dev;
+ 	lpsc->pm_domain.detach_dev = davinci_psc_genpd_detach_dev;
+ 	lpsc->pm_domain.flags = GENPD_FLAG_PM_CLK;
+diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
+index 564e9f3f7508da..5030e6e60b669f 100644
+--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
++++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
+@@ -323,8 +323,10 @@ static int imx95_bc_probe(struct platform_device *pdev)
+ 	if (!clk_hw_data)
+ 		return -ENOMEM;
+ 
+-	if (bc_data->rpm_enabled)
+-		pm_runtime_enable(&pdev->dev);
++	if (bc_data->rpm_enabled) {
++		devm_pm_runtime_enable(&pdev->dev);
++		pm_runtime_resume_and_get(&pdev->dev);
++	}
+ 
+ 	clk_hw_data->num = bc_data->num_clks;
+ 	hws = clk_hw_data->hws;
+@@ -364,8 +366,10 @@ static int imx95_bc_probe(struct platform_device *pdev)
+ 		goto cleanup;
+ 	}
+ 
+-	if (pm_runtime_enabled(bc->dev))
++	if (pm_runtime_enabled(bc->dev)) {
++		pm_runtime_put_sync(&pdev->dev);
+ 		clk_disable_unprepare(bc->clk_apb);
++	}
+ 
+ 	return 0;
+ 
+@@ -376,9 +380,6 @@ static int imx95_bc_probe(struct platform_device *pdev)
+ 		clk_hw_unregister(hws[i]);
+ 	}
+ 
+-	if (bc_data->rpm_enabled)
+-		pm_runtime_disable(&pdev->dev);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
+index b524a9d33610f6..5f8116e39e2225 100644
+--- a/drivers/clk/renesas/rzv2h-cpg.c
++++ b/drivers/clk/renesas/rzv2h-cpg.c
+@@ -312,6 +312,7 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
+ 	init.ops = &rzv2h_ddiv_clk_divider_ops;
+ 	init.parent_names = &parent_name;
+ 	init.num_parents = 1;
++	init.flags = CLK_SET_RATE_PARENT;
+ 
+ 	ddiv->priv = priv;
+ 	ddiv->mon = cfg_ddiv.monbit;
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+index d24c0d8dfee4cc..3416e00207995a 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+@@ -347,8 +347,7 @@ static SUNXI_CCU_GATE(dram_ohci_clk,	"dram-ohci",	"dram",
+ 
+ static const char * const de_parents[] = { "pll-video", "pll-periph0" };
+ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
+-				 0x104, 0, 4, 24, 2, BIT(31),
+-				 CLK_SET_RATE_PARENT);
++				 0x104, 0, 4, 24, 3, BIT(31), 0);
+ 
+ static const char * const tcon_parents[] = { "pll-video" };
+ static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
+diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
+index 4c9555fc61844d..6ab89245af1217 100644
+--- a/drivers/clk/thead/clk-th1520-ap.c
++++ b/drivers/clk/thead/clk-th1520-ap.c
+@@ -582,7 +582,14 @@ static const struct clk_parent_data peri2sys_apb_pclk_pd[] = {
+ 	{ .hw = &peri2sys_apb_pclk.common.hw }
+ };
+ 
+-static CLK_FIXED_FACTOR_FW_NAME(osc12m_clk, "osc_12m", "osc_24m", 2, 1, 0);
++static struct clk_fixed_factor osc12m_clk = {
++	.div		= 2,
++	.mult		= 1,
++	.hw.init	= CLK_HW_INIT_PARENTS_DATA("osc_12m",
++						   osc_24m_clk,
++						   &clk_fixed_factor_ops,
++						   0),
++};
+ 
+ static const char * const out_parents[] = { "osc_24m", "osc_12m" };
+ 
+diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
+index 81501b48412ee6..88b3fd8250c202 100644
+--- a/drivers/clk/xilinx/xlnx_vcu.c
++++ b/drivers/clk/xilinx/xlnx_vcu.c
+@@ -587,8 +587,8 @@ static void xvcu_unregister_clock_provider(struct xvcu_device *xvcu)
+ 		xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_MCU]);
+ 	if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_CORE]))
+ 		xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_CORE]);
+-
+-	clk_hw_unregister_fixed_factor(xvcu->pll_post);
++	if (!IS_ERR_OR_NULL(xvcu->pll_post))
++		clk_hw_unregister_fixed_factor(xvcu->pll_post);
+ }
+ 
+ /**
+diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
+index 7a979db81f0982..ccbc826cc4c01c 100644
+--- a/drivers/cpufreq/armada-8k-cpufreq.c
++++ b/drivers/cpufreq/armada-8k-cpufreq.c
+@@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
+ 	int ret = 0, opps_index = 0, cpu, nb_cpus;
+ 	struct freq_table *freq_tables;
+ 	struct device_node *node;
+-	static struct cpumask cpus;
++	static struct cpumask cpus, shared_cpus;
+ 
+ 	node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
+ 					       NULL);
+@@ -154,7 +154,6 @@ static int __init armada_8k_cpufreq_init(void)
+ 	 * divisions of it).
+ 	 */
+ 	for_each_cpu(cpu, &cpus) {
+-		struct cpumask shared_cpus;
+ 		struct device *cpu_dev;
+ 		struct clk *clk;
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 1f52bced4c2959..fab94ffcb22ce8 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1275,6 +1275,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
+ 		goto err_free_real_cpus;
+ 	}
+ 
++	init_rwsem(&policy->rwsem);
++
+ 	freq_constraints_init(&policy->constraints);
+ 
+ 	policy->nb_min.notifier_call = cpufreq_notifier_min;
+@@ -1297,7 +1299,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
+ 	}
+ 
+ 	INIT_LIST_HEAD(&policy->policy_list);
+-	init_rwsem(&policy->rwsem);
+ 	spin_lock_init(&policy->transition_lock);
+ 	init_waitqueue_head(&policy->transition_wait);
+ 	INIT_WORK(&policy->update, handle_update);
+@@ -2961,15 +2962,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ 	cpufreq_driver = driver_data;
+ 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ 
+-	/*
+-	 * Mark support for the scheduler's frequency invariance engine for
+-	 * drivers that implement target(), target_index() or fast_switch().
+-	 */
+-	if (!cpufreq_driver->setpolicy) {
+-		static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+-		pr_debug("supports frequency invariance");
+-	}
+-
+ 	if (driver_data->setpolicy)
+ 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
+ 
+@@ -3000,6 +2992,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ 	hp_online = ret;
+ 	ret = 0;
+ 
++	/*
++	 * Mark support for the scheduler's frequency invariance engine for
++	 * drivers that implement target(), target_index() or fast_switch().
++	 */
++	if (!cpufreq_driver->setpolicy) {
++		static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
++		pr_debug("supports frequency invariance");
++	}
++
+ 	pr_debug("driver %s up and running\n", driver_data->name);
+ 	goto out;
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 54e7310454cc64..b86372aa341dae 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -3128,8 +3128,8 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
+ 		int max_pstate = policy->strict_target ?
+ 					target_pstate : cpu->max_perf_ratio;
+ 
+-		intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
+-					 fast_switch);
++		intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
++					 target_pstate, fast_switch);
+ 	} else if (target_pstate != old_pstate) {
+ 		intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
+ 	}
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+index 05f67661553c9a..63e66a85477e54 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+@@ -265,8 +265,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
+ 	}
+ 
+ 	chan->timeout = areq->cryptlen;
+-	rctx->nr_sgs = nr_sgs;
+-	rctx->nr_sgd = nr_sgd;
++	rctx->nr_sgs = ns;
++	rctx->nr_sgd = nd;
+ 	return 0;
+ 
+ theend_sgs:
+diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
+index a1055554b47a24..dc26bc22c91d1d 100644
+--- a/drivers/crypto/ccp/ccp-debugfs.c
++++ b/drivers/crypto/ccp/ccp-debugfs.c
+@@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
+ 
+ void ccp5_debugfs_destroy(void)
+ {
++	mutex_lock(&ccp_debugfs_lock);
+ 	debugfs_remove_recursive(ccp_debugfs_dir);
++	ccp_debugfs_dir = NULL;
++	mutex_unlock(&ccp_debugfs_lock);
+ }
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index af018afd9cd7fc..4d072c084d7b0d 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -424,7 +424,7 @@ static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, boo
+ 	return rc;
+ }
+ 
+-static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
++static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked)
+ {
+ 	unsigned long npages = 1ul << order, paddr;
+ 	struct sev_device *sev;
+@@ -443,7 +443,7 @@ static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
+ 		return page;
+ 
+ 	paddr = __pa((unsigned long)page_address(page));
+-	if (rmp_mark_pages_firmware(paddr, npages, false))
++	if (rmp_mark_pages_firmware(paddr, npages, locked))
+ 		return NULL;
+ 
+ 	return page;
+@@ -453,7 +453,7 @@ void *snp_alloc_firmware_page(gfp_t gfp_mask)
+ {
+ 	struct page *page;
+ 
+-	page = __snp_alloc_firmware_pages(gfp_mask, 0);
++	page = __snp_alloc_firmware_pages(gfp_mask, 0, false);
+ 
+ 	return page ? page_address(page) : NULL;
+ }
+@@ -488,7 +488,7 @@ static void *sev_fw_alloc(unsigned long len)
+ {
+ 	struct page *page;
+ 
+-	page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len));
++	page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true);
+ 	if (!page)
+ 		return NULL;
+ 
+diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
+index 7e93159c3b6b96..d5df3d2da50c59 100644
+--- a/drivers/crypto/img-hash.c
++++ b/drivers/crypto/img-hash.c
+@@ -436,7 +436,7 @@ static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
+ 	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+ 
+ 	if (ctx->flags & DRIVER_FLAGS_SG)
+-		dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
++		dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
+index f44c08f5f5ec4a..af4b978189e519 100644
+--- a/drivers/crypto/inside-secure/safexcel_hash.c
++++ b/drivers/crypto/inside-secure/safexcel_hash.c
+@@ -249,7 +249,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
+ 	safexcel_complete(priv, ring);
+ 
+ 	if (sreq->nents) {
+-		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
++		dma_unmap_sg(priv->dev, areq->src,
++			     sg_nents_for_len(areq->src, areq->nbytes),
++			     DMA_TO_DEVICE);
+ 		sreq->nents = 0;
+ 	}
+ 
+@@ -497,7 +499,9 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ 			 DMA_FROM_DEVICE);
+ unmap_sg:
+ 	if (req->nents) {
+-		dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
++		dma_unmap_sg(priv->dev, areq->src,
++			     sg_nents_for_len(areq->src, areq->nbytes),
++			     DMA_TO_DEVICE);
+ 		req->nents = 0;
+ 	}
+ cdesc_rollback:
+diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+index e54c79890d44f5..fdeca861933cb5 100644
+--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
++++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+@@ -68,6 +68,7 @@ struct ocs_hcu_ctx {
+  * @sg_data_total:  Total data in the SG list at any time.
+  * @sg_data_offset: Offset into the data of the current individual SG node.
+  * @sg_dma_nents:   Number of sg entries mapped in dma_list.
++ * @nents:          Number of entries in the scatterlist.
+  */
+ struct ocs_hcu_rctx {
+ 	struct ocs_hcu_dev	*hcu_dev;
+@@ -91,6 +92,7 @@ struct ocs_hcu_rctx {
+ 	unsigned int		sg_data_total;
+ 	unsigned int		sg_data_offset;
+ 	unsigned int		sg_dma_nents;
++	unsigned int		nents;
+ };
+ 
+ /**
+@@ -199,7 +201,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
+ 
+ 	/* Unmap req->src (if mapped). */
+ 	if (rctx->sg_dma_nents) {
+-		dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
++		dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE);
+ 		rctx->sg_dma_nents = 0;
+ 	}
+ 
+@@ -260,6 +262,10 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
+ 			rc = -ENOMEM;
+ 			goto cleanup;
+ 		}
++
++		/* Save the value of nents to pass to dma_unmap_sg. */
++		rctx->nents = nents;
++
+ 		/*
+ 		 * The value returned by dma_map_sg() can be < nents; so update
+ 		 * nents accordingly.
+diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+index a17adc4beda2e3..ef5f03be419064 100644
+--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+@@ -199,7 +199,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+ 			  ICP_ACCEL_CAPABILITIES_SM4 |
+ 			  ICP_ACCEL_CAPABILITIES_AES_V2 |
+ 			  ICP_ACCEL_CAPABILITIES_ZUC |
+-			  ICP_ACCEL_CAPABILITIES_ZUC_256 |
+ 			  ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
+ 			  ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
+ 
+@@ -231,17 +230,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+ 
+ 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
+ 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+-		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
+ 	}
+ 
+-	if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
++	if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE)
+ 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+-		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+-	}
+-
+-	if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
+-		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ 
+ 	capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ 			  ICP_ACCEL_CAPABILITIES_SM2 |
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+index 41a0979e68c177..e70adb90e5e44f 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+@@ -585,6 +585,28 @@ static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ 	ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
+ 	ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
+ 	ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
++
++	/*
++	 * Verify whether any exceptions were raised during the bank save process.
++	 * If exceptions occurred, the status and exception registers cannot
++	 * be directly restored. Consequently, further restoration is not
++	 * feasible, and the current state of the ring should be maintained.
++	 */
++	val = state->ringexpstat;
++	if (val) {
++		pr_info("QAT: Bank %u state not fully restored due to exception in saved state (%#x)\n",
++			bank, val);
++		return 0;
++	}
++
++	/* Ensure that the restoration process completed without exceptions */
++	tmp_val = ops->read_csr_exp_stat(base, bank);
++	if (tmp_val) {
++		pr_err("QAT: Bank %u restored with exception: %#x\n",
++		       bank, tmp_val);
++		return -EFAULT;
++	}
++
+ 	ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
+ 
+ 	/* Check that all ring statuses match the saved state. */
+@@ -618,13 +640,6 @@ static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ 	if (ret)
+ 		return ret;
+ 
+-	tmp_val = ops->read_csr_exp_stat(base, bank);
+-	val = state->ringexpstat;
+-	if (tmp_val && !val) {
+-		pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
+-		return -EINVAL;
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+index c75d0b6cb0ada3..31d1ef0cb1f52e 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+@@ -155,7 +155,6 @@ static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
+ 	if (!device_iommu_mapped(&GET_DEV(accel_dev))) {
+ 		dev_warn(&GET_DEV(accel_dev),
+ 			 "IOMMU should be enabled for SR-IOV to work correctly\n");
+-		return -EINVAL;
+ 	}
+ 
+ 	if (adf_dev_started(accel_dev)) {
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+index e2dd568b87b519..621b5d3dfcef91 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+@@ -31,8 +31,10 @@ static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+ 	struct adf_etr_ring_data *ring = sfile->private;
+ 
+ 	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+-		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
++		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) {
++		(*pos)++;
+ 		return NULL;
++	}
+ 
+ 	return ring->base_addr +
+ 		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
+index 338acf29c487b6..5a7b43f9150d3b 100644
+--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
+@@ -38,7 +38,7 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+ 		for (i = 0; i < blout->num_mapped_bufs; i++) {
+ 			dma_unmap_single(dev, blout->buffers[i].addr,
+ 					 blout->buffers[i].len,
+-					 DMA_FROM_DEVICE);
++					 DMA_BIDIRECTIONAL);
+ 		}
+ 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+ 
+@@ -162,7 +162,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ 			}
+ 			buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+ 							 sg->length - left,
+-							 DMA_FROM_DEVICE);
++							 DMA_BIDIRECTIONAL);
+ 			if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
+ 				goto err_out;
+ 			buffers[y].len = sg->length;
+@@ -204,7 +204,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ 		if (!dma_mapping_error(dev, buflout->buffers[i].addr))
+ 			dma_unmap_single(dev, buflout->buffers[i].addr,
+ 					 buflout->buffers[i].len,
+-					 DMA_FROM_DEVICE);
++					 DMA_BIDIRECTIONAL);
+ 	}
+ 
+ 	if (!buf->sgl_dst_valid)
+diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
+index 7842a9f22178c2..cf94ba3011d51b 100644
+--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
+@@ -197,7 +197,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
+ 	struct adf_dc_data *dc_data = NULL;
+ 	u8 *obuff = NULL;
+ 
+-	dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
++	dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev));
+ 	if (!dc_data)
+ 		goto err;
+ 
+@@ -205,7 +205,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
+ 	if (!obuff)
+ 		goto err;
+ 
+-	obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
++	obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL);
+ 	if (unlikely(dma_mapping_error(dev, obuff_p)))
+ 		goto err;
+ 
+@@ -233,9 +233,9 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
+ 		return;
+ 
+ 	dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
+-			 DMA_FROM_DEVICE);
++			 DMA_BIDIRECTIONAL);
+ 	kfree_sensitive(dc_data->ovf_buff);
+-	devm_kfree(dev, dc_data);
++	kfree(dc_data);
+ 	accel_dev->dc_data = NULL;
+ }
+ 
+diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
+index 3876e3ce822f44..eabed9d977df6c 100644
+--- a/drivers/crypto/marvell/cesa/cipher.c
++++ b/drivers/crypto/marvell/cesa/cipher.c
+@@ -75,9 +75,12 @@ mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
+ static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
+ {
+ 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
++	struct mv_cesa_engine *engine = creq->base.engine;
+ 
+ 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+ 		mv_cesa_skcipher_dma_cleanup(req);
++
++	atomic_sub(req->cryptlen, &engine->load);
+ }
+ 
+ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
+@@ -212,7 +215,6 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
+ 	struct mv_cesa_engine *engine = creq->base.engine;
+ 	unsigned int ivsize;
+ 
+-	atomic_sub(skreq->cryptlen, &engine->load);
+ 	ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
+ 
+ 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
+diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
+index 6815eddc906812..e339ce7ad53310 100644
+--- a/drivers/crypto/marvell/cesa/hash.c
++++ b/drivers/crypto/marvell/cesa/hash.c
+@@ -110,9 +110,12 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
+ static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
+ {
+ 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
++	struct mv_cesa_engine *engine = creq->base.engine;
+ 
+ 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+ 		mv_cesa_ahash_dma_cleanup(req);
++
++	atomic_sub(req->nbytes, &engine->load);
+ }
+ 
+ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
+@@ -395,8 +398,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
+ 			}
+ 		}
+ 	}
+-
+-	atomic_sub(ahashreq->nbytes, &engine->load);
+ }
+ 
+ static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 98657d3b9435c7..0d9f3d3282ec94 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -1382,15 +1382,11 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
+ 		int ret;
+ 		struct device *dev = devfreq->dev.parent;
+ 
++		if (!devfreq->governor)
++			continue;
++
+ 		if (!strncmp(devfreq->governor->name, governor->name,
+ 			     DEVFREQ_NAME_LEN)) {
+-			/* we should have a devfreq governor! */
+-			if (!devfreq->governor) {
+-				dev_warn(dev, "%s: Governor %s NOT present\n",
+-					 __func__, governor->name);
+-				continue;
+-				/* Fall through */
+-			}
+ 			ret = devfreq->governor->event_handler(devfreq,
+ 						DEVFREQ_GOV_STOP, NULL);
+ 			if (ret) {
+@@ -1743,7 +1739,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ 	for (i = 0; i < max_state; i++) {
+ 		if (len >= PAGE_SIZE - 1)
+ 			break;
+-		if (df->freq_table[2] == df->previous_freq)
++		if (df->freq_table[i] == df->previous_freq)
+ 			len += sysfs_emit_at(buf, len, "*");
+ 		else
+ 			len += sysfs_emit_at(buf, len, " ");
+diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
+index b76fe99e115163..f88792049be559 100644
+--- a/drivers/dma/mmp_tdma.c
++++ b/drivers/dma/mmp_tdma.c
+@@ -641,7 +641,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
+ 	int chan_num = TDMA_CHANNEL_NUM;
+ 	struct gen_pool *pool = NULL;
+ 
+-	type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev);
++	type = (kernel_ulong_t)device_get_match_data(&pdev->dev);
+ 
+ 	/* always have couple channels */
+ 	tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index 40b76b40bc30c2..184813766cd15f 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
+ 	 */
+ 	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
+ 		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
++	if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr))
++		return ERR_PTR(-ENOMEM);
++
+ 	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
+ 		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
++	if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) {
++		ret = -ENOMEM;
++		goto err_unmap_src;
++	}
++
+ 
+ 	/* allocate coherent memory for hardware descriptors
+ 	 * note: writecombine gives slightly better performance, but
+@@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
+ 	mv_chan->dma_desc_pool_virt =
+ 	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
+ 		       GFP_KERNEL);
+-	if (!mv_chan->dma_desc_pool_virt)
+-		return ERR_PTR(-ENOMEM);
++	if (!mv_chan->dma_desc_pool_virt) {
++		ret = -ENOMEM;
++		goto err_unmap_dst;
++	}
+ 
+ 	/* discover transaction capabilities from the platform data */
+ 	dma_dev->cap_mask = cap_mask;
+@@ -1155,6 +1165,13 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
+ err_free_dma:
+ 	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
++err_unmap_dst:
++	dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
++			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
++err_unmap_src:
++	dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr,
++			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
++
+ 	return ERR_PTR(ret);
+ }
+ 
+diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
+index 5f5d6242427e18..2fa6e90643d583 100644
+--- a/drivers/dma/nbpfaxi.c
++++ b/drivers/dma/nbpfaxi.c
+@@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
+ 		list_add_tail(&ldesc->node, &lhead);
+ 		ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
+ 					hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
++		if (dma_mapping_error(dchan->device->dev,
++				      ldesc->hwdesc_dma_addr))
++			goto unmap_error;
+ 
+ 		dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
+ 			hwdesc, &ldesc->hwdesc_dma_addr);
+@@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
+ 	spin_unlock_irq(&chan->lock);
+ 
+ 	return ARRAY_SIZE(dpage->desc);
++
++unmap_error:
++	while (i--) {
++		ldesc--; hwdesc--;
++
++		dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
++				 sizeof(hwdesc), DMA_TO_DEVICE);
++	}
++
++	return -ENOMEM;
+ }
+ 
+ static void nbpf_desc_put(struct nbpf_desc *desc)
+diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
+index c7e5a34b254bf4..683fd9b85c5ce2 100644
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -892,7 +892,7 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
+ 			freq = dom->opp[idx].indicative_freq * dom->mult_factor;
+ 
+ 		/* All OPPs above the sustained frequency are treated as turbo */
+-		data.turbo = freq > dom->sustained_freq_khz * 1000;
++		data.turbo = freq > dom->sustained_freq_khz * 1000UL;
+ 
+ 		data.level = dom->opp[idx].perf;
+ 		data.freq = freq;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 24d711b0e6346c..9a1c9dbad12695 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -9510,9 +9510,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
+ 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 				   0, 0);
+ 	amdgpu_ring_commit(kiq_ring);
+-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+-
+ 	r = amdgpu_ring_test_ring(kiq_ring);
++	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ 	if (r)
+ 		return r;
+ 
+@@ -9559,9 +9558,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
+ 	}
+ 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ 	amdgpu_ring_commit(kiq_ring);
+-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+-
+ 	r = amdgpu_ring_test_ring(kiq_ring);
++	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 114653a0b57013..91af1adbf5e86d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -7318,8 +7318,8 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
+ 	}
+ 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ 	amdgpu_ring_commit(kiq_ring);
+-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ 	r = amdgpu_ring_test_ring(kiq_ring);
++	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ 	if (r) {
+ 		DRM_ERROR("fail to remap queue\n");
+ 		return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index 5dc3454d7d3610..f27ccb8f3c8c57 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -3640,9 +3640,8 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
+ 	}
+ 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ 	amdgpu_ring_commit(kiq_ring);
+-	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+-
+ 	r = amdgpu_ring_test_ring(kiq_ring);
++	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ 	if (r) {
+ 		dev_err(adev->dev, "fail to remap queue\n");
+ 		return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+index d1bd79bbae532f..8e401f8b2a0540 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+@@ -32,9 +32,6 @@
+ 
+ #define NPS_MODE_MASK 0x000000FFL
+ 
+-/* Core 0 Port 0 counter */
+-#define smnPCIEP_NAK_COUNTER 0x1A340218
+-
+ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
+ {
+ 	WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+@@ -453,22 +450,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
+ 	}
+ }
+ 
+-static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
+-{
+-	u32 val, nak_r, nak_g;
+-
+-	if (adev->flags & AMD_IS_APU)
+-		return 0;
+-
+-	/* Get the number of NAKs received and generated */
+-	val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
+-	nak_r = val & 0xFFFF;
+-	nak_g = val >> 16;
+-
+-	/* Add the total number of NAKs, i.e the number of replays */
+-	return (nak_r + nak_g);
+-}
+-
+ #define MMIO_REG_HOLE_OFFSET 0x1A000
+ 
+ static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev)
+@@ -509,7 +490,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
+ 	.get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode,
+ 	.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
+ 	.init_registers = nbio_v7_9_init_registers,
+-	.get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
+ 	.set_reg_remap = nbio_v7_9_set_reg_remap,
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+index 79a566f3564a57..c305ea4ec17d21 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+@@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+ 	}
+ 
+ 	cgs_write_register(hwmgr->device, indirect_port, index);
+-	return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
++	return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask);
+ }
+ 
+ int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
+index 46f23bdb4c176e..b89a364a3924c5 100644
+--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
++++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
+@@ -683,7 +683,7 @@ static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port
+ 			 "Platform does not support HDMI %c\n", port_name(port));
+ }
+ 
+-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
++bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ 		   i915_reg_t hdmi_reg, enum port port)
+ {
+ 	struct intel_display *display = &dev_priv->display;
+@@ -693,10 +693,10 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ 	struct intel_connector *intel_connector;
+ 
+ 	if (!assert_port_valid(dev_priv, port))
+-		return;
++		return false;
+ 
+ 	if (!assert_hdmi_port_valid(dev_priv, port))
+-		return;
++		return false;
+ 
+ 	devdata = intel_bios_encoder_data_lookup(display, port);
+ 
+@@ -707,15 +707,13 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ 
+ 	dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ 	if (!dig_port)
+-		return;
++		return false;
+ 
+ 	dig_port->aux_ch = AUX_CH_NONE;
+ 
+ 	intel_connector = intel_connector_alloc();
+-	if (!intel_connector) {
+-		kfree(dig_port);
+-		return;
+-	}
++	if (!intel_connector)
++		goto err_connector_alloc;
+ 
+ 	intel_encoder = &dig_port->base;
+ 
+@@ -723,9 +721,10 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ 
+ 	mutex_init(&dig_port->hdcp_mutex);
+ 
+-	drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+-			 &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+-			 "HDMI %c", port_name(port));
++	if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
++			     &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
++			     "HDMI %c", port_name(port)))
++		goto err_encoder_init;
+ 
+ 	intel_encoder->hotplug = intel_hdmi_hotplug;
+ 	intel_encoder->compute_config = g4x_hdmi_compute_config;
+@@ -788,5 +787,17 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ 
+ 	intel_infoframe_init(dig_port);
+ 
+-	intel_hdmi_init_connector(dig_port, intel_connector);
++	if (!intel_hdmi_init_connector(dig_port, intel_connector))
++		goto err_init_connector;
++
++	return true;
++
++err_init_connector:
++	drm_encoder_cleanup(&intel_encoder->base);
++err_encoder_init:
++	kfree(intel_connector);
++err_connector_alloc:
++	kfree(dig_port);
++
++	return false;
+ }
+diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
+index 817f55c7a3a1ef..a52e8986ec7ab7 100644
+--- a/drivers/gpu/drm/i915/display/g4x_hdmi.h
++++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
+@@ -16,14 +16,15 @@ struct drm_connector;
+ struct drm_i915_private;
+ 
+ #ifdef I915
+-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
++bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ 		   i915_reg_t hdmi_reg, enum port port);
+ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
+ 				    struct drm_atomic_state *state);
+ #else
+-static inline void g4x_hdmi_init(struct drm_i915_private *dev_priv,
++static inline bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ 				 i915_reg_t hdmi_reg, int port)
+ {
++	return false;
+ }
+ static inline int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
+ 						  struct drm_atomic_state *state)
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 34dee523f0b612..5b24460c013417 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -4413,8 +4413,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
+ 	.late_register = intel_ddi_encoder_late_register,
+ };
+ 
+-static struct intel_connector *
+-intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
++static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+ {
+ 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ 	struct intel_connector *connector;
+@@ -4422,7 +4421,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+ 
+ 	connector = intel_connector_alloc();
+ 	if (!connector)
+-		return NULL;
++		return -ENOMEM;
+ 
+ 	dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ 	if (DISPLAY_VER(i915) >= 14)
+@@ -4437,7 +4436,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+ 
+ 	if (!intel_dp_init_connector(dig_port, connector)) {
+ 		kfree(connector);
+-		return NULL;
++		return -EINVAL;
+ 	}
+ 
+ 	if (dig_port->base.type == INTEL_OUTPUT_EDP) {
+@@ -4453,7 +4452,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+ 		}
+ 	}
+ 
+-	return connector;
++	return 0;
+ }
+ 
+ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
+@@ -4623,20 +4622,28 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
+ 	return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+ }
+ 
+-static struct intel_connector *
+-intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
++static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
+ {
+ 	struct intel_connector *connector;
+ 	enum port port = dig_port->base.port;
+ 
+ 	connector = intel_connector_alloc();
+ 	if (!connector)
+-		return NULL;
++		return -ENOMEM;
+ 
+ 	dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+-	intel_hdmi_init_connector(dig_port, connector);
+ 
+-	return connector;
++	if (!intel_hdmi_init_connector(dig_port, connector)) {
++		/*
++		 * HDMI connector init failures may just mean conflicting DDC
++		 * pins or not having enough lanes. Handle them gracefully, but
++		 * don't fail the entire DDI init.
++		 */
++		dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
++		kfree(connector);
++	}
++
++	return 0;
+ }
+ 
+ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
+@@ -4791,8 +4798,10 @@ static void intel_ddi_tc_encoder_suspend_complete(struct intel_encoder *encoder)
+ 
+ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
+ {
+-	intel_dp_encoder_shutdown(encoder);
+-	intel_hdmi_encoder_shutdown(encoder);
++	if (intel_encoder_is_dp(encoder))
++		intel_dp_encoder_shutdown(encoder);
++	if (intel_encoder_is_hdmi(encoder))
++		intel_hdmi_encoder_shutdown(encoder);
+ }
+ 
+ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder)
+@@ -5185,7 +5194,7 @@ void intel_ddi_init(struct intel_display *display,
+ 	intel_infoframe_init(dig_port);
+ 
+ 	if (init_dp) {
+-		if (!intel_ddi_init_dp_connector(dig_port))
++		if (intel_ddi_init_dp_connector(dig_port))
+ 			goto err;
+ 
+ 		dig_port->hpd_pulse = intel_dp_hpd_pulse;
+@@ -5199,7 +5208,7 @@ void intel_ddi_init(struct intel_display *display,
+ 	 * but leave it just in case we have some really bad VBTs...
+ 	 */
+ 	if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+-		if (!intel_ddi_init_hdmi_connector(dig_port))
++		if (intel_ddi_init_hdmi_connector(dig_port))
+ 			goto err;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 3e24d2e90d3cfb..9812191e7ef29c 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -2075,6 +2075,19 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
+ 	}
+ }
+ 
++static inline bool intel_encoder_is_hdmi(struct intel_encoder *encoder)
++{
++	switch (encoder->type) {
++	case INTEL_OUTPUT_HDMI:
++		return true;
++	case INTEL_OUTPUT_DDI:
++		/* See if the HDMI encoder is valid. */
++		return i915_mmio_reg_valid(enc_to_intel_hdmi(encoder)->hdmi_reg);
++	default:
++		return false;
++	}
++}
++
+ static inline struct intel_lspcon *
+ enc_to_intel_lspcon(struct intel_encoder *encoder)
+ {
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
+index cd9ee171e0df3d..c5b2fbaeff897f 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -3015,7 +3015,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
+ 	}
+ }
+ 
+-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
++bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+ 			       struct intel_connector *intel_connector)
+ {
+ 	struct intel_display *display = to_intel_display(dig_port);
+@@ -3033,17 +3033,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+ 		    intel_encoder->base.base.id, intel_encoder->base.name);
+ 
+ 	if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A))
+-		return;
++		return false;
+ 
+ 	if (drm_WARN(dev, dig_port->max_lanes < 4,
+ 		     "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ 		     dig_port->max_lanes, intel_encoder->base.base.id,
+ 		     intel_encoder->base.name))
+-		return;
++		return false;
+ 
+ 	ddc_pin = intel_hdmi_ddc_pin(intel_encoder);
+ 	if (!ddc_pin)
+-		return;
++		return false;
+ 
+ 	drm_connector_init_with_ddc(dev, connector,
+ 				    &intel_hdmi_connector_funcs,
+@@ -3088,6 +3088,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+ 					   &conn_info);
+ 	if (!intel_hdmi->cec_notifier)
+ 		drm_dbg_kms(display->drm, "CEC notifier get failed\n");
++
++	return true;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
+index 9b97623665c51b..fc64a3affc7165 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
+@@ -22,7 +22,7 @@ struct intel_encoder;
+ struct intel_hdmi;
+ union hdmi_infoframe;
+ 
+-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
++bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+ 			       struct intel_connector *intel_connector);
+ bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder,
+ 				      const struct intel_crtc_state *crtc_state,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index 485c3041c80188..67f0694a2f10ff 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -383,6 +383,7 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
+ 	.min_core_ib = 2400000,
+ 	.min_llcc_ib = 800000,
+ 	.min_dram_ib = 800000,
++	.min_prefill_lines = 24,
+ 	.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ 	.safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
+ 	.qos_lut_tbl = {
+diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+index 3385fd3ef41a47..5d0dce10336ba3 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
++++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+@@ -29,7 +29,7 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
+ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
+ 				   u32 flags)
+ {
+-	struct panfrost_device *ptdev = dev_get_drvdata(dev);
++	struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ 	struct dev_pm_opp *opp;
+ 	int err;
+ 
+@@ -40,7 +40,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
+ 
+ 	err = dev_pm_opp_set_rate(dev, *freq);
+ 	if (!err)
+-		ptdev->pfdevfreq.current_frequency = *freq;
++		pfdev->pfdevfreq.current_frequency = *freq;
+ 
+ 	return err;
+ }
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+index cfe8b793d34467..69ab8d4f289cd8 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+@@ -52,16 +52,9 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+ 	}
+ 
+ 	if (drm_is_afbc(mode_cmd->modifier[0])) {
+-		int ret, i;
+-
+ 		ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+ 		if (ret) {
+-			struct drm_gem_object **obj = afbc_fb->base.obj;
+-
+-			for (i = 0; i < info->num_planes; ++i)
+-				drm_gem_object_put(obj[i]);
+-
+-			kfree(afbc_fb);
++			drm_framebuffer_put(&afbc_fb->base);
+ 			return ERR_PTR(ret);
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+index 7fb1c88bcc475f..69dfe69ce0f87d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+@@ -896,7 +896,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ 		.busy_domain = VMW_BO_DOMAIN_SYS,
+ 		.bo_type = ttm_bo_type_device,
+ 		.size = size,
+-		.pin = true,
++		.pin = false,
+ 		.keep_resv = true,
+ 	};
+ 
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index 82da51a6616a18..2e1d6d248d2e0a 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -549,6 +549,7 @@ static void update_device_info(struct xe_device *xe)
+ 	/* disable features that are not available/applicable to VFs */
+ 	if (IS_SRIOV_VF(xe)) {
+ 		xe->info.probe_display = 0;
++		xe->info.has_heci_cscfi = 0;
+ 		xe->info.has_heci_gscfi = 0;
+ 		xe->info.skip_guc_pc = 1;
+ 		xe->info.skip_pcode = 1;
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index d900dd05c335c3..c00ce5bfec4ab5 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -890,7 +890,8 @@ static int apple_magic_backlight_init(struct hid_device *hdev)
+ 	backlight->brightness = report_enum->report_id_hash[APPLE_MAGIC_REPORT_ID_BRIGHTNESS];
+ 	backlight->power = report_enum->report_id_hash[APPLE_MAGIC_REPORT_ID_POWER];
+ 
+-	if (!backlight->brightness || !backlight->power)
++	if (!backlight->brightness || backlight->brightness->maxfield < 2 ||
++	    !backlight->power || backlight->power->maxfield < 2)
+ 		return -ENODEV;
+ 
+ 	backlight->cdev.name = ":white:" LED_FUNCTION_KBD_BACKLIGHT;
+diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c
+index 284ff4afeeacab..d3b32b794172ad 100644
+--- a/drivers/i2c/muxes/i2c-mux-mule.c
++++ b/drivers/i2c/muxes/i2c-mux-mule.c
+@@ -47,7 +47,6 @@ static int mule_i2c_mux_probe(struct platform_device *pdev)
+ 	struct mule_i2c_reg_mux *priv;
+ 	struct i2c_client *client;
+ 	struct i2c_mux_core *muxc;
+-	struct device_node *dev;
+ 	unsigned int readback;
+ 	int ndev, ret;
+ 	bool old_fw;
+@@ -95,7 +94,7 @@ static int mule_i2c_mux_probe(struct platform_device *pdev)
+ 				     "Failed to register mux remove\n");
+ 
+ 	/* Create device adapters */
+-	for_each_child_of_node(mux_dev->of_node, dev) {
++	for_each_child_of_node_scoped(mux_dev->of_node, dev) {
+ 		u32 reg;
+ 
+ 		ret = of_property_read_u32(dev, "reg", &reg);
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index 51d619edb6c5d2..e56ba86d460e0a 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -597,7 +597,8 @@ static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev,
+ static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
+ 				     struct erdma_mtt *mtt)
+ {
+-	dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE);
++	dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
++		     DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
+ 	vfree(mtt->sglist);
+ }
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 560a1d9de408ff..cbe73d9ad52536 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -856,6 +856,7 @@ struct hns_roce_caps {
+ 	u16		default_ceq_arm_st;
+ 	u8		cong_cap;
+ 	enum hns_roce_cong_type default_cong_type;
++	u32             max_ack_req_msg_len;
+ };
+ 
+ enum hns_roce_device_state {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index ca0798224e565c..3d479c63b117a9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -249,15 +249,12 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ }
+ 
+ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
+-					       unsigned long hem_alloc_size,
+-					       gfp_t gfp_mask)
++					       unsigned long hem_alloc_size)
+ {
+ 	struct hns_roce_hem *hem;
+ 	int order;
+ 	void *buf;
+ 
+-	WARN_ON(gfp_mask & __GFP_HIGHMEM);
+-
+ 	order = get_order(hem_alloc_size);
+ 	if (PAGE_SIZE << order != hem_alloc_size) {
+ 		dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n",
+@@ -265,13 +262,12 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
+ 		return NULL;
+ 	}
+ 
+-	hem = kmalloc(sizeof(*hem),
+-		      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
++	hem = kmalloc(sizeof(*hem), GFP_KERNEL);
+ 	if (!hem)
+ 		return NULL;
+ 
+ 	buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size,
+-				 &hem->dma, gfp_mask);
++				 &hem->dma, GFP_KERNEL);
+ 	if (!buf)
+ 		goto fail;
+ 
+@@ -378,7 +374,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
+ {
+ 	u32 bt_size = mhop->bt_chunk_size;
+ 	struct device *dev = hr_dev->dev;
+-	gfp_t flag;
+ 	u64 bt_ba;
+ 	u32 size;
+ 	int ret;
+@@ -417,8 +412,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
+ 	 * alloc bt space chunk for MTT/CQE.
+ 	 */
+ 	size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
+-	flag = GFP_KERNEL | __GFP_NOWARN;
+-	table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag);
++	table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size);
+ 	if (!table->hem[index->buf]) {
+ 		ret = -ENOMEM;
+ 		goto err_alloc_hem;
+@@ -546,9 +540,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ 		goto out;
+ 	}
+ 
+-	table->hem[i] = hns_roce_alloc_hem(hr_dev,
+-				       table->table_chunk_size,
+-				       GFP_KERNEL | __GFP_NOWARN);
++	table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size);
+ 	if (!table->hem[i]) {
+ 		ret = -ENOMEM;
+ 		goto out;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 81e44b73812295..53fe0ef3883d21 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2181,31 +2181,36 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
+ 
+ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
+ {
+-	struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
++	struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM] = {};
+ 	struct hns_roce_caps *caps = &hr_dev->caps;
+ 	struct hns_roce_query_pf_caps_a *resp_a;
+ 	struct hns_roce_query_pf_caps_b *resp_b;
+ 	struct hns_roce_query_pf_caps_c *resp_c;
+ 	struct hns_roce_query_pf_caps_d *resp_d;
+ 	struct hns_roce_query_pf_caps_e *resp_e;
++	struct hns_roce_query_pf_caps_f *resp_f;
+ 	enum hns_roce_opcode_type cmd;
+ 	int ctx_hop_num;
+ 	int pbl_hop_num;
++	int cmd_num;
+ 	int ret;
+ 	int i;
+ 
+ 	cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM :
+ 	      HNS_ROCE_OPC_QUERY_PF_CAPS_NUM;
++	cmd_num = hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
++		  HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 :
++		  HNS_ROCE_QUERY_PF_CAPS_CMD_NUM;
+ 
+-	for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
++	for (i = 0; i < cmd_num - 1; i++) {
+ 		hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true);
+-		if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
+-			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+-		else
+-			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
++		desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ 	}
+ 
+-	ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
++	hns_roce_cmq_setup_basic_desc(&desc[cmd_num - 1], cmd, true);
++	desc[cmd_num - 1].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
++
++	ret = hns_roce_cmq_send(hr_dev, desc, cmd_num);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -2214,6 +2219,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
+ 	resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
+ 	resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
+ 	resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
++	resp_f = (struct hns_roce_query_pf_caps_f *)desc[5].data;
+ 
+ 	caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
+ 	caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
+@@ -2278,6 +2284,8 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
+ 	caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
+ 	caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
+ 
++	caps->max_ack_req_msg_len = le32_to_cpu(resp_f->max_ack_req_msg_len);
++
+ 	caps->qpc_hop_num = ctx_hop_num;
+ 	caps->sccc_hop_num = ctx_hop_num;
+ 	caps->srqc_hop_num = ctx_hop_num;
+@@ -2971,14 +2979,22 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+ {
+ 	int ret;
+ 
++	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
++		ret = free_mr_init(hr_dev);
++		if (ret) {
++			dev_err(hr_dev->dev, "failed to init free mr!\n");
++			return ret;
++		}
++	}
++
+ 	/* The hns ROCEE requires the extdb info to be cleared before using */
+ 	ret = hns_roce_clear_extdb_list_info(hr_dev);
+ 	if (ret)
+-		return ret;
++		goto err_clear_extdb_failed;
+ 
+ 	ret = get_hem_table(hr_dev);
+ 	if (ret)
+-		return ret;
++		goto err_get_hem_table_failed;
+ 
+ 	if (hr_dev->is_vf)
+ 		return 0;
+@@ -2993,6 +3009,11 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+ 
+ err_llm_init_failed:
+ 	put_hem_table(hr_dev);
++err_get_hem_table_failed:
++	hns_roce_function_clear(hr_dev);
++err_clear_extdb_failed:
++	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
++		free_mr_exit(hr_dev);
+ 
+ 	return ret;
+ }
+@@ -4546,7 +4567,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 	dma_addr_t trrl_ba;
+ 	dma_addr_t irrl_ba;
+ 	enum ib_mtu ib_mtu;
++	u8 ack_req_freq;
+ 	const u8 *smac;
++	int lp_msg_len;
+ 	u8 lp_pktn_ini;
+ 	u64 *mtts;
+ 	u8 *dmac;
+@@ -4629,7 +4652,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 		return -EINVAL;
+ #define MIN_LP_MSG_LEN 1024
+ 	/* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
+-	lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
++	lp_msg_len = max(mtu, MIN_LP_MSG_LEN);
++	lp_pktn_ini = ilog2(lp_msg_len / mtu);
+ 
+ 	if (attr_mask & IB_QP_PATH_MTU) {
+ 		hr_reg_write(context, QPC_MTU, ib_mtu);
+@@ -4639,8 +4663,22 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 	hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
+ 	hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
+ 
+-	/* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
+-	hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
++	/*
++	 * There are several constraints for ACK_REQ_FREQ:
++	 * 1. mtu * (2 ^ ACK_REQ_FREQ) should not be too large, otherwise
++	 *    it may cause some unexpected retries when sending large
++	 *    payload.
++	 * 2. ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI.
++	 * 3. ACK_REQ_FREQ must be equal to LP_PKTN_INI when using LDCP
++	 *    or HC3 congestion control algorithm.
++	 */
++	if (hr_qp->cong_type == CONG_TYPE_LDCP ||
++	    hr_qp->cong_type == CONG_TYPE_HC3 ||
++	    hr_dev->caps.max_ack_req_msg_len < lp_msg_len)
++		ack_req_freq = lp_pktn_ini;
++	else
++		ack_req_freq = ilog2(hr_dev->caps.max_ack_req_msg_len / mtu);
++	hr_reg_write(context, QPC_ACK_REQ_FREQ, ack_req_freq);
+ 	hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
+ 
+ 	hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
+@@ -5333,11 +5371,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+-	struct hns_roce_v2_qp_context ctx[2];
+-	struct hns_roce_v2_qp_context *context = ctx;
+-	struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
++	struct hns_roce_v2_qp_context *context;
++	struct hns_roce_v2_qp_context *qpc_mask;
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+-	int ret;
++	int ret = -ENOMEM;
+ 
+ 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ 		return -EOPNOTSUPP;
+@@ -5348,7 +5385,11 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ 	 * we should set all bits of the relevant fields in context mask to
+ 	 * 0 at the same time, else set them to 0x1.
+ 	 */
+-	memset(context, 0, hr_dev->caps.qpc_sz);
++	context = kvzalloc(sizeof(*context), GFP_KERNEL);
++	qpc_mask = kvzalloc(sizeof(*qpc_mask), GFP_KERNEL);
++	if (!context || !qpc_mask)
++		goto out;
++
+ 	memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
+ 
+ 	ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
+@@ -5390,6 +5431,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ 		clear_qp(hr_qp);
+ 
+ out:
++	kvfree(qpc_mask);
++	kvfree(context);
+ 	return ret;
+ }
+ 
+@@ -7027,21 +7070,11 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ 		goto error_failed_roce_init;
+ 	}
+ 
+-	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+-		ret = free_mr_init(hr_dev);
+-		if (ret) {
+-			dev_err(hr_dev->dev, "failed to init free mr!\n");
+-			goto error_failed_free_mr_init;
+-		}
+-	}
+ 
+ 	handle->priv = hr_dev;
+ 
+ 	return 0;
+ 
+-error_failed_free_mr_init:
+-	hns_roce_exit(hr_dev);
+-
+ error_failed_roce_init:
+ 	kfree(hr_dev->priv);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index bc7466830eaf9d..1c2660305d27c8 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1168,7 +1168,8 @@ struct hns_roce_cfg_gmv_tb_b {
+ #define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32)
+ #define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64)
+ 
+-#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5
++#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 5
++#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 6
+ struct hns_roce_query_pf_caps_a {
+ 	u8 number_ports;
+ 	u8 local_ca_ack_delay;
+@@ -1280,6 +1281,11 @@ struct hns_roce_query_pf_caps_e {
+ 	__le16 aeq_period;
+ };
+ 
++struct hns_roce_query_pf_caps_f {
++	__le32 max_ack_req_msg_len;
++	__le32 rsv[5];
++};
++
+ #define PF_CAPS_E_FIELD_LOC(h, l) \
+ 	FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l)
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index e7a497cc125cc3..11fa64044a8d85 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -947,10 +947,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
+ static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev)
+ {
+ 	hns_roce_cleanup_bitmap(hr_dev);
+-
+-	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+-	    hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+-		mutex_destroy(&hr_dev->pgdir_mutex);
++	mutex_destroy(&hr_dev->pgdir_mutex);
+ }
+ 
+ /**
+@@ -965,11 +962,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
+ 
+ 	spin_lock_init(&hr_dev->sm_lock);
+ 
+-	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+-	    hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
+-		INIT_LIST_HEAD(&hr_dev->pgdir_list);
+-		mutex_init(&hr_dev->pgdir_mutex);
+-	}
++	INIT_LIST_HEAD(&hr_dev->qp_list);
++	spin_lock_init(&hr_dev->qp_list_lock);
++
++	INIT_LIST_HEAD(&hr_dev->pgdir_list);
++	mutex_init(&hr_dev->pgdir_mutex);
+ 
+ 	hns_roce_init_uar_table(hr_dev);
+ 
+@@ -1001,9 +998,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
+ 
+ err_uar_table_free:
+ 	ida_destroy(&hr_dev->uar_ida.ida);
+-	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+-	    hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
+-		mutex_destroy(&hr_dev->pgdir_mutex);
++	mutex_destroy(&hr_dev->pgdir_mutex);
+ 
+ 	return ret;
+ }
+@@ -1132,9 +1127,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
+ 		}
+ 	}
+ 
+-	INIT_LIST_HEAD(&hr_dev->qp_list);
+-	spin_lock_init(&hr_dev->qp_list_lock);
+-
+ 	ret = hns_roce_register_device(hr_dev);
+ 	if (ret)
+ 		goto error_failed_register_device;
+diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
+index 73d67c853b6f3f..48fef989318b49 100644
+--- a/drivers/infiniband/hw/mana/qp.c
++++ b/drivers/infiniband/hw/mana/qp.c
+@@ -561,7 +561,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 		req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
+ 		req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ 							  ibqp->qp_num, attr->dest_qp_num);
+-		req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class;
++		req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
+ 		req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
+index b4c97fb62abfcc..9ded2b7c1e3199 100644
+--- a/drivers/infiniband/hw/mlx5/dm.c
++++ b/drivers/infiniband/hw/mlx5/dm.c
+@@ -282,7 +282,7 @@ static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
+ 	int err;
+ 	u64 address;
+ 
+-	if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
++	if (!dm_db || !MLX5_CAP_DEV_MEM(dm_db->dev, memic))
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 
+ 	dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
+index 793f3c5c4d0126..80c665d152189d 100644
+--- a/drivers/infiniband/hw/mlx5/umr.c
++++ b/drivers/infiniband/hw/mlx5/umr.c
+@@ -32,13 +32,15 @@ static __be64 get_umr_disable_mr_mask(void)
+ 	return cpu_to_be64(result);
+ }
+ 
+-static __be64 get_umr_update_translation_mask(void)
++static __be64 get_umr_update_translation_mask(struct mlx5_ib_dev *dev)
+ {
+ 	u64 result;
+ 
+ 	result = MLX5_MKEY_MASK_LEN |
+ 		 MLX5_MKEY_MASK_PAGE_SIZE |
+ 		 MLX5_MKEY_MASK_START_ADDR;
++	if (MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5))
++		result |= MLX5_MKEY_MASK_PAGE_SIZE_5;
+ 
+ 	return cpu_to_be64(result);
+ }
+@@ -654,7 +656,7 @@ static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
+ 		flags & MLX5_IB_UPD_XLT_ENABLE || flags & MLX5_IB_UPD_XLT_ADDR;
+ 
+ 	if (update_translation) {
+-		wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask();
++		wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask(dev);
+ 		if (!mr->ibmr.length)
+ 			MLX5_SET(mkc, &wqe->mkey_seg, length64, 1);
+ 	}
+diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
+index 03d626776ba17a..576f90a7d55fad 100644
+--- a/drivers/interconnect/qcom/sc8180x.c
++++ b/drivers/interconnect/qcom/sc8180x.c
+@@ -1492,34 +1492,40 @@ static struct qcom_icc_bcm bcm_sh3 = {
+ 
+ static struct qcom_icc_bcm bcm_sn0 = {
+ 	.name = "SN0",
++	.num_nodes = 1,
+ 	.nodes = { &slv_qns_gemnoc_sf }
+ };
+ 
+ static struct qcom_icc_bcm bcm_sn1 = {
+ 	.name = "SN1",
++	.num_nodes = 1,
+ 	.nodes = { &slv_qxs_imem }
+ };
+ 
+ static struct qcom_icc_bcm bcm_sn2 = {
+ 	.name = "SN2",
+ 	.keepalive = true,
++	.num_nodes = 1,
+ 	.nodes = { &slv_qns_gemnoc_gc }
+ };
+ 
+ static struct qcom_icc_bcm bcm_co2 = {
+ 	.name = "CO2",
++	.num_nodes = 1,
+ 	.nodes = { &mas_qnm_npu }
+ };
+ 
+ static struct qcom_icc_bcm bcm_sn3 = {
+ 	.name = "SN3",
+ 	.keepalive = true,
++	.num_nodes = 2,
+ 	.nodes = { &slv_srvc_aggre1_noc,
+ 		  &slv_qns_cnoc }
+ };
+ 
+ static struct qcom_icc_bcm bcm_sn4 = {
+ 	.name = "SN4",
++	.num_nodes = 1,
+ 	.nodes = { &slv_qxs_pimem }
+ };
+ 
+diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
+index 7acd152bf0dd8d..fab5978ed9d32c 100644
+--- a/drivers/interconnect/qcom/sc8280xp.c
++++ b/drivers/interconnect/qcom/sc8280xp.c
+@@ -48,6 +48,7 @@ static struct qcom_icc_node qnm_a1noc_cfg = {
+ 	.id = SC8280XP_MASTER_A1NOC_CFG,
+ 	.channels = 1,
+ 	.buswidth = 4,
++	.num_links = 1,
+ 	.links = { SC8280XP_SLAVE_SERVICE_A1NOC },
+ };
+ 
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 23e78a034da8f3..6a019670efc7ca 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -483,8 +483,8 @@ static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
+ 
+ static void pdev_enable_caps(struct pci_dev *pdev)
+ {
+-	pdev_enable_cap_ats(pdev);
+ 	pdev_enable_cap_pasid(pdev);
++	pdev_enable_cap_ats(pdev);
+ 	pdev_enable_cap_pri(pdev);
+ }
+ 
+@@ -2352,8 +2352,21 @@ static inline u64 dma_max_address(void)
+ 	if (amd_iommu_pgtable == AMD_IOMMU_V1)
+ 		return ~0ULL;
+ 
+-	/* V2 with 4/5 level page table */
+-	return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
++	/*
++	 * V2 with 4/5 level page table. Note that "2.2.6.5 AMD64 4-Kbyte Page
++	 * Translation" shows that the V2 table sign extends the top of the
++	 * address space creating a reserved region in the middle of the
++	 * translation, just like the CPU does. Further Vasant says the docs are
++	 * incomplete and this only applies to non-zero PASIDs. If the AMDv2
++	 * page table is assigned to the 0 PASID then there is no sign extension
++	 * check.
++	 *
++	 * Since the IOMMU must have a fixed geometry, and the core code does
++	 * not understand sign extended addressing, we have to chop off the high
++	 * bit to get consistent behavior with attachments of the domain to any
++	 * PASID.
++	 */
++	return ((1ULL << (PM_LEVEL_SHIFT(amd_iommu_gpt_level) - 1)) - 1);
+ }
+ 
+ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index a799a89195c515..5d5b3cf381b92d 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -506,6 +506,7 @@ config IMX_MU_MSI
+ 	tristate "i.MX MU used as MSI controller"
+ 	depends on OF && HAS_IOMEM
+ 	depends on ARCH_MXC || COMPILE_TEST
++	depends on ARM || ARM64
+ 	default m if ARCH_MXC
+ 	select IRQ_DOMAIN
+ 	select IRQ_DOMAIN_HIERARCHY
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index da50f6661bae46..48ce750bf70af9 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -164,68 +164,40 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
+  * prio is worth 1/8th of what INITIAL_PRIO is worth.
+  */
+ 
+-static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b)
+-{
+-	unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;
+-
+-	return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);
+-}
+-
+-static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args)
+-{
+-	struct bucket **lhs = (struct bucket **)l;
+-	struct bucket **rhs = (struct bucket **)r;
+-	struct cache *ca = args;
+-
+-	return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs);
+-}
+-
+-static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
+-{
+-	struct bucket **lhs = (struct bucket **)l;
+-	struct bucket **rhs = (struct bucket **)r;
+-	struct cache *ca = args;
+-
+-	return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
+-}
+-
+-static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
+-{
+-	struct bucket **lhs = l, **rhs = r;
++#define bucket_prio(b)							\
++({									\
++	unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\
++									\
++	(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);	\
++})
+ 
+-	swap(*lhs, *rhs);
+-}
++#define bucket_max_cmp(l, r)	(bucket_prio(l) < bucket_prio(r))
++#define bucket_min_cmp(l, r)	(bucket_prio(l) > bucket_prio(r))
+ 
+ static void invalidate_buckets_lru(struct cache *ca)
+ {
+ 	struct bucket *b;
+-	const struct min_heap_callbacks bucket_max_cmp_callback = {
+-		.less = new_bucket_max_cmp,
+-		.swp = new_bucket_swap,
+-	};
+-	const struct min_heap_callbacks bucket_min_cmp_callback = {
+-		.less = new_bucket_min_cmp,
+-		.swp = new_bucket_swap,
+-	};
++	ssize_t i;
+ 
+-	ca->heap.nr = 0;
++	ca->heap.used = 0;
+ 
+ 	for_each_bucket(b, ca) {
+ 		if (!bch_can_invalidate_bucket(ca, b))
+ 			continue;
+ 
+-		if (!min_heap_full(&ca->heap))
+-			min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca);
+-		else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) {
++		if (!heap_full(&ca->heap))
++			heap_add(&ca->heap, b, bucket_max_cmp);
++		else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
+ 			ca->heap.data[0] = b;
+-			min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca);
++			heap_sift(&ca->heap, 0, bucket_max_cmp);
+ 		}
+ 	}
+ 
+-	min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca);
++	for (i = ca->heap.used / 2 - 1; i >= 0; --i)
++		heap_sift(&ca->heap, i, bucket_min_cmp);
+ 
+ 	while (!fifo_full(&ca->free_inc)) {
+-		if (!ca->heap.nr) {
++		if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
+ 			/*
+ 			 * We don't want to be calling invalidate_buckets()
+ 			 * multiple times when it can't do anything
+@@ -234,8 +206,6 @@ static void invalidate_buckets_lru(struct cache *ca)
+ 			wake_up_gc(ca->set);
+ 			return;
+ 		}
+-		b = min_heap_peek(&ca->heap)[0];
+-		min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca);
+ 
+ 		bch_invalidate_one_bucket(ca, b);
+ 	}
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 785b0d9008face..1d33e40d26ea51 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -458,7 +458,7 @@ struct cache {
+ 	/* Allocation stuff: */
+ 	struct bucket		*buckets;
+ 
+-	DEFINE_MIN_HEAP(struct bucket *, cache_heap) heap;
++	DECLARE_HEAP(struct bucket *, heap);
+ 
+ 	/*
+ 	 * If nonzero, we know we aren't going to find any buckets to invalidate
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index bd97d862688744..463eb13bd0b2a7 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -54,11 +54,9 @@ void bch_dump_bucket(struct btree_keys *b)
+ int __bch_count_data(struct btree_keys *b)
+ {
+ 	unsigned int ret = 0;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ 	if (b->ops->is_extents)
+ 		for_each_key(b, k, &iter)
+ 			ret += KEY_SIZE(k);
+@@ -69,11 +67,9 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+ {
+ 	va_list args;
+ 	struct bkey *k, *p = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	const char *err;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ 	for_each_key(b, k, &iter) {
+ 		if (b->ops->is_extents) {
+ 			err = "Keys out of order";
+@@ -114,9 +110,9 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+ 
+ static void bch_btree_iter_next_check(struct btree_iter *iter)
+ {
+-	struct bkey *k = iter->heap.data->k, *next = bkey_next(k);
++	struct bkey *k = iter->data->k, *next = bkey_next(k);
+ 
+-	if (next < iter->heap.data->end &&
++	if (next < iter->data->end &&
+ 	    bkey_cmp(k, iter->b->ops->is_extents ?
+ 		     &START_KEY(next) : next) > 0) {
+ 		bch_dump_bucket(iter->b);
+@@ -883,14 +879,12 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ 	unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
+ 	struct bset *i = bset_tree_last(b)->data;
+ 	struct bkey *m, *prev = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey preceding_key_on_stack = ZERO_KEY;
+ 	struct bkey *preceding_key_p = &preceding_key_on_stack;
+ 
+ 	BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ 	/*
+ 	 * If k has preceding key, preceding_key_p will be set to address
+ 	 *  of k's preceding key; otherwise preceding_key_p will be set
+@@ -901,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ 	else
+ 		preceding_key(k, &preceding_key_p);
+ 
+-	m = bch_btree_iter_init(b, &iter, preceding_key_p);
++	m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
+ 
+-	if (b->ops->insert_fixup(b, k, &iter, replace_key))
++	if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
+ 		return status;
+ 
+ 	status = BTREE_INSERT_STATUS_INSERT;
+@@ -1083,102 +1077,79 @@ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+ 
+ /* Btree iterator */
+ 
+-typedef bool (new_btree_iter_cmp_fn)(const void *, const void *, void *);
+-
+-static inline bool new_btree_iter_cmp(const void *l, const void *r, void __always_unused *args)
+-{
+-	const struct btree_iter_set *_l = l;
+-	const struct btree_iter_set *_r = r;
+-
+-	return bkey_cmp(_l->k, _r->k) <= 0;
+-}
++typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
++				 struct btree_iter_set);
+ 
+-static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
++static inline bool btree_iter_cmp(struct btree_iter_set l,
++				  struct btree_iter_set r)
+ {
+-	struct btree_iter_set *_iter1 = iter1;
+-	struct btree_iter_set *_iter2 = iter2;
+-
+-	swap(*_iter1, *_iter2);
++	return bkey_cmp(l.k, r.k) > 0;
+ }
+ 
+ static inline bool btree_iter_end(struct btree_iter *iter)
+ {
+-	return !iter->heap.nr;
++	return !iter->used;
+ }
+ 
+ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ 			 struct bkey *end)
+ {
+-	const struct min_heap_callbacks callbacks = {
+-		.less = new_btree_iter_cmp,
+-		.swp = new_btree_iter_swap,
+-	};
+-
+ 	if (k != end)
+-		BUG_ON(!min_heap_push(&iter->heap,
+-				 &((struct btree_iter_set) { k, end }),
+-				 &callbacks,
+-				 NULL));
++		BUG_ON(!heap_add(iter,
++				 ((struct btree_iter_set) { k, end }),
++				 btree_iter_cmp));
+ }
+ 
+-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+-					  struct btree_iter *iter,
+-					  struct bkey *search,
+-					  struct bset_tree *start)
++static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
++						struct btree_iter_stack *iter,
++						struct bkey *search,
++						struct bset_tree *start)
+ {
+ 	struct bkey *ret = NULL;
+ 
+-	iter->heap.size = ARRAY_SIZE(iter->heap.preallocated);
+-	iter->heap.nr = 0;
++	iter->iter.size = ARRAY_SIZE(iter->stack_data);
++	iter->iter.used = 0;
+ 
+ #ifdef CONFIG_BCACHE_DEBUG
+-	iter->b = b;
++	iter->iter.b = b;
+ #endif
+ 
+ 	for (; start <= bset_tree_last(b); start++) {
+ 		ret = bch_bset_search(b, start, search);
+-		bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
++		bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
+ 	}
+ 
+ 	return ret;
+ }
+ 
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+-				 struct btree_iter *iter,
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++				 struct btree_iter_stack *iter,
+ 				 struct bkey *search)
+ {
+-	return __bch_btree_iter_init(b, iter, search, b->set);
++	return __bch_btree_iter_stack_init(b, iter, search, b->set);
+ }
+ 
+ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+-						 new_btree_iter_cmp_fn *cmp)
++						 btree_iter_cmp_fn *cmp)
+ {
+ 	struct btree_iter_set b __maybe_unused;
+ 	struct bkey *ret = NULL;
+-	const struct min_heap_callbacks callbacks = {
+-		.less = cmp,
+-		.swp = new_btree_iter_swap,
+-	};
+ 
+ 	if (!btree_iter_end(iter)) {
+ 		bch_btree_iter_next_check(iter);
+ 
+-		ret = iter->heap.data->k;
+-		iter->heap.data->k = bkey_next(iter->heap.data->k);
++		ret = iter->data->k;
++		iter->data->k = bkey_next(iter->data->k);
+ 
+-		if (iter->heap.data->k > iter->heap.data->end) {
++		if (iter->data->k > iter->data->end) {
+ 			WARN_ONCE(1, "bset was corrupt!\n");
+-			iter->heap.data->k = iter->heap.data->end;
++			iter->data->k = iter->data->end;
+ 		}
+ 
+-		if (iter->heap.data->k == iter->heap.data->end) {
+-			if (iter->heap.nr) {
+-				b = min_heap_peek(&iter->heap)[0];
+-				min_heap_pop(&iter->heap, &callbacks, NULL);
+-			}
+-		}
++		if (iter->data->k == iter->data->end)
++			heap_pop(iter, b, cmp);
+ 		else
+-			min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
++			heap_sift(iter, 0, cmp);
+ 	}
+ 
+ 	return ret;
+@@ -1186,7 +1157,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+ 
+ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+ {
+-	return __bch_btree_iter_next(iter, new_btree_iter_cmp);
++	return __bch_btree_iter_next(iter, btree_iter_cmp);
+ 
+ }
+ 
+@@ -1224,18 +1195,16 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
+ 			    struct btree_iter *iter,
+ 			    bool fixup, bool remove_stale)
+ {
++	int i;
+ 	struct bkey *k, *last = NULL;
+ 	BKEY_PADDED(k) tmp;
+ 	bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
+ 		? bch_ptr_bad
+ 		: bch_ptr_invalid;
+-	const struct min_heap_callbacks callbacks = {
+-		.less = b->ops->sort_cmp,
+-		.swp = new_btree_iter_swap,
+-	};
+ 
+ 	/* Heapify the iterator, using our comparison function */
+-	min_heapify_all(&iter->heap, &callbacks, NULL);
++	for (i = iter->used / 2 - 1; i >= 0; --i)
++		heap_sift(iter, i, b->ops->sort_cmp);
+ 
+ 	while (!btree_iter_end(iter)) {
+ 		if (b->ops->sort_fixup && fixup)
+@@ -1324,11 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ 			    struct bset_sort_state *state)
+ {
+ 	size_t order = b->page_order, keys = 0;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	int oldsize = bch_count_data(b);
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
++	__bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
+ 
+ 	if (start) {
+ 		unsigned int i;
+@@ -1339,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ 		order = get_order(__set_bytes(b->set->data, keys));
+ 	}
+ 
+-	__btree_sort(b, &iter, start, order, false, state);
++	__btree_sort(b, &iter.iter, start, order, false, state);
+ 
+ 	EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
+ }
+@@ -1355,13 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ 			 struct bset_sort_state *state)
+ {
+ 	uint64_t start_time = local_clock();
+-	struct btree_iter iter;
+-
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
++	struct btree_iter_stack iter;
+ 
+-	bch_btree_iter_init(b, &iter, NULL);
++	bch_btree_iter_stack_init(b, &iter, NULL);
+ 
+-	btree_mergesort(b, new->set->data, &iter, false, true);
++	btree_mergesort(b, new->set->data, &iter.iter, false, true);
+ 
+ 	bch_time_stats_update(&state->time, start_time);
+ 
+diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
+index f79441acd4c18e..011f6062c4c04f 100644
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -187,9 +187,8 @@ struct bset_tree {
+ };
+ 
+ struct btree_keys_ops {
+-	bool		(*sort_cmp)(const void *l,
+-				    const void *r,
+-					void *args);
++	bool		(*sort_cmp)(struct btree_iter_set l,
++				    struct btree_iter_set r);
+ 	struct bkey	*(*sort_fixup)(struct btree_iter *iter,
+ 				       struct bkey *tmp);
+ 	bool		(*insert_fixup)(struct btree_keys *b,
+@@ -313,17 +312,23 @@ enum {
+ 	BTREE_INSERT_STATUS_FRONT_MERGE,
+ };
+ 
+-struct btree_iter_set {
+-	struct bkey *k, *end;
+-};
+-
+ /* Btree key iteration */
+ 
+ struct btree_iter {
++	size_t size, used;
+ #ifdef CONFIG_BCACHE_DEBUG
+ 	struct btree_keys *b;
+ #endif
+-	MIN_HEAP_PREALLOCATED(struct btree_iter_set, btree_iter_heap, MAX_BSETS) heap;
++	struct btree_iter_set {
++		struct bkey *k, *end;
++	} data[];
++};
++
++/* Fixed-size btree_iter that can be allocated on the stack */
++
++struct btree_iter_stack {
++	struct btree_iter iter;
++	struct btree_iter_set stack_data[MAX_BSETS];
+ };
+ 
+ typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
+@@ -335,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+ 
+ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ 			 struct bkey *end);
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+-				 struct btree_iter *iter,
+-				 struct bkey *search);
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++				       struct btree_iter_stack *iter,
++				       struct bkey *search);
+ 
+ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+ 			       const struct bkey *search);
+@@ -352,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
+ 	return search ? __bch_bset_search(b, t, search) : t->data->start;
+ }
+ 
+-#define for_each_key_filter(b, k, iter, filter)				\
+-	for (bch_btree_iter_init((b), (iter), NULL);			\
+-	     ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
++#define for_each_key_filter(b, k, stack_iter, filter)                      \
++	for (bch_btree_iter_stack_init((b), (stack_iter), NULL);           \
++	     ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
++					       filter));)
+ 
+-#define for_each_key(b, k, iter)					\
+-	for (bch_btree_iter_init((b), (iter), NULL);			\
+-	     ((k) = bch_btree_iter_next(iter));)
++#define for_each_key(b, k, stack_iter)                           \
++	for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
++	     ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
+ 
+ /* Sorting */
+ 
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index ed40d86006564d..4e6ccf2c8a0bf3 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -149,19 +149,19 @@ void bch_btree_node_read_done(struct btree *b)
+ {
+ 	const char *err = "bad btree header";
+ 	struct bset *i = btree_bset_first(b);
+-	struct btree_iter iter;
++	struct btree_iter *iter;
+ 
+ 	/*
+ 	 * c->fill_iter can allocate an iterator with more memory space
+ 	 * than static MAX_BSETS.
+ 	 * See the comment arount cache_set->fill_iter.
+ 	 */
+-	iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
+-	iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
+-	iter.heap.nr = 0;
++	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
++	iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
++	iter->used = 0;
+ 
+ #ifdef CONFIG_BCACHE_DEBUG
+-	iter.b = &b->keys;
++	iter->b = &b->keys;
+ #endif
+ 
+ 	if (!i->seq)
+@@ -199,7 +199,7 @@ void bch_btree_node_read_done(struct btree *b)
+ 		if (i != b->keys.set[0].data && !i->keys)
+ 			goto err;
+ 
+-		bch_btree_iter_push(&iter, i->start, bset_bkey_last(i));
++		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
+ 
+ 		b->written += set_blocks(i, block_bytes(b->c->cache));
+ 	}
+@@ -211,7 +211,7 @@ void bch_btree_node_read_done(struct btree *b)
+ 		if (i->seq == b->keys.set[0].data->seq)
+ 			goto err;
+ 
+-	bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort);
++	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
+ 
+ 	i = b->keys.set[0].data;
+ 	err = "short btree key";
+@@ -223,7 +223,7 @@ void bch_btree_node_read_done(struct btree *b)
+ 		bch_bset_init_next(&b->keys, write_block(b),
+ 				   bset_magic(&b->c->cache->sb));
+ out:
+-	mempool_free(iter.heap.data, &b->c->fill_iter);
++	mempool_free(iter, &b->c->fill_iter);
+ 	return;
+ err:
+ 	set_btree_node_io_error(b);
+@@ -1309,11 +1309,9 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
+ 	uint8_t stale = 0;
+ 	unsigned int keys = 0, good_keys = 0;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bset_tree *t;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ 	gc->nodes++;
+ 
+ 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
+@@ -1572,11 +1570,9 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ static unsigned int btree_gc_count_keys(struct btree *b)
+ {
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	unsigned int ret = 0;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+ 		ret += bkey_u64s(k);
+ 
+@@ -1615,18 +1611,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ 	int ret = 0;
+ 	bool should_rewrite;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct gc_merge_info r[GC_MERGE_NODES];
+ 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
++	bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
+ 
+ 	for (i = r; i < r + ARRAY_SIZE(r); i++)
+ 		i->b = ERR_PTR(-EINTR);
+ 
+ 	while (1) {
+-		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
++		k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++					       bch_ptr_bad);
+ 		if (k) {
+ 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
+ 						  true, b);
+@@ -1921,9 +1917,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+ {
+ 	int ret = 0;
+ 	struct bkey *k, *p = NULL;
+-	struct btree_iter iter;
+-
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
++	struct btree_iter_stack iter;
+ 
+ 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
+ 		bch_initial_mark_key(b->c, b->level, k);
+@@ -1931,10 +1925,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+ 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
+ 
+ 	if (b->level) {
+-		bch_btree_iter_init(&b->keys, &iter, NULL);
++		bch_btree_iter_stack_init(&b->keys, &iter, NULL);
+ 
+ 		do {
+-			k = bch_btree_iter_next_filter(&iter, &b->keys,
++			k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ 						       bch_ptr_bad);
+ 			if (k) {
+ 				btree_node_prefetch(b, k);
+@@ -1962,7 +1956,7 @@ static int bch_btree_check_thread(void *arg)
+ 	struct btree_check_info *info = arg;
+ 	struct btree_check_state *check_state = info->state;
+ 	struct cache_set *c = check_state->c;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k, *p;
+ 	int cur_idx, prev_idx, skip_nr;
+ 
+@@ -1970,11 +1964,9 @@ static int bch_btree_check_thread(void *arg)
+ 	cur_idx = prev_idx = 0;
+ 	ret = 0;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ 	/* root node keys are checked before thread created */
+-	bch_btree_iter_init(&c->root->keys, &iter, NULL);
+-	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++	bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++	k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ 	BUG_ON(!k);
+ 
+ 	p = k;
+@@ -1992,7 +1984,7 @@ static int bch_btree_check_thread(void *arg)
+ 		skip_nr = cur_idx - prev_idx;
+ 
+ 		while (skip_nr) {
+-			k = bch_btree_iter_next_filter(&iter,
++			k = bch_btree_iter_next_filter(&iter.iter,
+ 						       &c->root->keys,
+ 						       bch_ptr_bad);
+ 			if (k)
+@@ -2065,11 +2057,9 @@ int bch_btree_check(struct cache_set *c)
+ 	int ret = 0;
+ 	int i;
+ 	struct bkey *k = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct btree_check_state check_state;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ 	/* check and mark root node keys */
+ 	for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
+ 		bch_initial_mark_key(c, c->root->level, k);
+@@ -2563,12 +2553,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+ 
+ 	if (b->level) {
+ 		struct bkey *k;
+-		struct btree_iter iter;
++		struct btree_iter_stack iter;
+ 
+-		min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-		bch_btree_iter_init(&b->keys, &iter, from);
++		bch_btree_iter_stack_init(&b->keys, &iter, from);
+ 
+-		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
++		while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ 						       bch_ptr_bad))) {
+ 			ret = bcache_btree(map_nodes_recurse, k, b,
+ 				    op, from, fn, flags);
+@@ -2597,12 +2586,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ {
+ 	int ret = MAP_CONTINUE;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-	bch_btree_iter_init(&b->keys, &iter, from);
++	bch_btree_iter_stack_init(&b->keys, &iter, from);
+ 
+-	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
++	while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++					       bch_ptr_bad))) {
+ 		ret = !b->level
+ 			? fn(op, b, k)
+ 			: bcache_btree(map_keys_recurse, k,
+diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
+index a7221e5dbe8175..d626ffcbecb99c 100644
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -33,16 +33,15 @@ static void sort_key_next(struct btree_iter *iter,
+ 	i->k = bkey_next(i->k);
+ 
+ 	if (i->k == i->end)
+-		*i = iter->heap.data[--iter->heap.nr];
++		*i = iter->data[--iter->used];
+ }
+ 
+-static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
++static bool bch_key_sort_cmp(struct btree_iter_set l,
++			     struct btree_iter_set r)
+ {
+-	struct btree_iter_set *_l = (struct btree_iter_set *)l;
+-	struct btree_iter_set *_r = (struct btree_iter_set *)r;
+-	int64_t c = bkey_cmp(_l->k, _r->k);
++	int64_t c = bkey_cmp(l.k, r.k);
+ 
+-	return !(c ? c > 0 : _l->k < _r->k);
++	return c ? c > 0 : l.k < r.k;
+ }
+ 
+ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+@@ -239,7 +238,7 @@ static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
+ }
+ 
+ const struct btree_keys_ops bch_btree_keys_ops = {
+-	.sort_cmp	= new_bch_key_sort_cmp,
++	.sort_cmp	= bch_key_sort_cmp,
+ 	.insert_fixup	= bch_btree_ptr_insert_fixup,
+ 	.key_invalid	= bch_btree_ptr_invalid,
+ 	.key_bad	= bch_btree_ptr_bad,
+@@ -256,36 +255,22 @@ const struct btree_keys_ops bch_btree_keys_ops = {
+  * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+  * equal in different sets, we have to process them newest to oldest.
+  */
+-
+-static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
+-{
+-	struct btree_iter_set *_l = (struct btree_iter_set *)l;
+-	struct btree_iter_set *_r = (struct btree_iter_set *)r;
+-	int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
+-
+-	return !(c ? c > 0 : _l->k < _r->k);
+-}
+-
+-static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
++static bool bch_extent_sort_cmp(struct btree_iter_set l,
++				struct btree_iter_set r)
+ {
+-	struct btree_iter_set *_iter1 = iter1;
+-	struct btree_iter_set *_iter2 = iter2;
++	int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+ 
+-	swap(*_iter1, *_iter2);
++	return c ? c > 0 : l.k < r.k;
+ }
+ 
+ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+ 					  struct bkey *tmp)
+ {
+-	const struct min_heap_callbacks callbacks = {
+-		.less = new_bch_extent_sort_cmp,
+-		.swp = new_btree_iter_swap,
+-	};
+-	while (iter->heap.nr > 1) {
+-		struct btree_iter_set *top = iter->heap.data, *i = top + 1;
+-
+-		if (iter->heap.nr > 2 &&
+-		    !new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
++	while (iter->used > 1) {
++		struct btree_iter_set *top = iter->data, *i = top + 1;
++
++		if (iter->used > 2 &&
++		    bch_extent_sort_cmp(i[0], i[1]))
+ 			i++;
+ 
+ 		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+@@ -293,7 +278,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+ 
+ 		if (!KEY_SIZE(i->k)) {
+ 			sort_key_next(iter, i);
+-			min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
++			heap_sift(iter, i - top, bch_extent_sort_cmp);
+ 			continue;
+ 		}
+ 
+@@ -303,7 +288,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+ 			else
+ 				bch_cut_front(top->k, i->k);
+ 
+-			min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
++			heap_sift(iter, i - top, bch_extent_sort_cmp);
+ 		} else {
+ 			/* can't happen because of comparison func */
+ 			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+@@ -313,7 +298,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+ 
+ 				bch_cut_back(&START_KEY(i->k), tmp);
+ 				bch_cut_front(i->k, top->k);
+-				min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
++				heap_sift(iter, 0, bch_extent_sort_cmp);
+ 
+ 				return tmp;
+ 			} else {
+@@ -633,7 +618,7 @@ static bool bch_extent_merge(struct btree_keys *bk,
+ }
+ 
+ const struct btree_keys_ops bch_extent_keys_ops = {
+-	.sort_cmp	= new_bch_extent_sort_cmp,
++	.sort_cmp	= bch_extent_sort_cmp,
+ 	.sort_fixup	= bch_extent_sort_fixup,
+ 	.insert_fixup	= bch_extent_insert_fixup,
+ 	.key_invalid	= bch_extent_invalid,
+diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
+index 7f482729c56de6..ebd500bdf0b2fb 100644
+--- a/drivers/md/bcache/movinggc.c
++++ b/drivers/md/bcache/movinggc.c
+@@ -182,27 +182,16 @@ err:		if (!IS_ERR_OR_NULL(w->private))
+ 	closure_sync(&cl);
+ }
+ 
+-static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *args)
++static bool bucket_cmp(struct bucket *l, struct bucket *r)
+ {
+-	struct bucket **_l = (struct bucket **)l;
+-	struct bucket **_r = (struct bucket **)r;
+-
+-	return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
+-}
+-
+-static void new_bucket_swap(void *l, void *r, void __always_unused *args)
+-{
+-	struct bucket **_l = l;
+-	struct bucket **_r = r;
+-
+-	swap(*_l, *_r);
++	return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
+ }
+ 
+ static unsigned int bucket_heap_top(struct cache *ca)
+ {
+ 	struct bucket *b;
+ 
+-	return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0;
++	return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
+ }
+ 
+ void bch_moving_gc(struct cache_set *c)
+@@ -210,10 +199,6 @@ void bch_moving_gc(struct cache_set *c)
+ 	struct cache *ca = c->cache;
+ 	struct bucket *b;
+ 	unsigned long sectors_to_move, reserve_sectors;
+-	const struct min_heap_callbacks callbacks = {
+-		.less = new_bucket_cmp,
+-		.swp = new_bucket_swap,
+-	};
+ 
+ 	if (!c->copy_gc_enabled)
+ 		return;
+@@ -224,7 +209,7 @@ void bch_moving_gc(struct cache_set *c)
+ 	reserve_sectors = ca->sb.bucket_size *
+ 			     fifo_used(&ca->free[RESERVE_MOVINGGC]);
+ 
+-	ca->heap.nr = 0;
++	ca->heap.used = 0;
+ 
+ 	for_each_bucket(b, ca) {
+ 		if (GC_MARK(b) == GC_MARK_METADATA ||
+@@ -233,31 +218,25 @@ void bch_moving_gc(struct cache_set *c)
+ 		    atomic_read(&b->pin))
+ 			continue;
+ 
+-		if (!min_heap_full(&ca->heap)) {
++		if (!heap_full(&ca->heap)) {
+ 			sectors_to_move += GC_SECTORS_USED(b);
+-			min_heap_push(&ca->heap, &b, &callbacks, NULL);
+-		} else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) {
++			heap_add(&ca->heap, b, bucket_cmp);
++		} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+ 			sectors_to_move -= bucket_heap_top(ca);
+ 			sectors_to_move += GC_SECTORS_USED(b);
+ 
+ 			ca->heap.data[0] = b;
+-			min_heap_sift_down(&ca->heap, 0, &callbacks, NULL);
++			heap_sift(&ca->heap, 0, bucket_cmp);
+ 		}
+ 	}
+ 
+ 	while (sectors_to_move > reserve_sectors) {
+-		if (ca->heap.nr) {
+-			b = min_heap_peek(&ca->heap)[0];
+-			min_heap_pop(&ca->heap, &callbacks, NULL);
+-		}
++		heap_pop(&ca->heap, b, bucket_cmp);
+ 		sectors_to_move -= GC_SECTORS_USED(b);
+ 	}
+ 
+-	while (ca->heap.nr) {
+-		b = min_heap_peek(&ca->heap)[0];
+-		min_heap_pop(&ca->heap, &callbacks, NULL);
++	while (heap_pop(&ca->heap, b, bucket_cmp))
+ 		SET_GC_MOVE(b, 1);
+-	}
+ 
+ 	mutex_unlock(&c->bucket_lock);
+ 
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index f5171167819b51..1084b3f0dfe719 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1912,7 +1912,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ 	INIT_LIST_HEAD(&c->btree_cache_freed);
+ 	INIT_LIST_HEAD(&c->data_buckets);
+ 
+-	iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
++	iter_size = sizeof(struct btree_iter) +
++		    ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
+ 			    sizeof(struct btree_iter_set);
+ 
+ 	c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index e8f696cb58c056..826b14cae4e58e 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -660,9 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c)
+ 	unsigned int bytes = 0;
+ 	struct bkey *k;
+ 	struct btree *b;
+-	struct btree_iter iter;
+-
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
++	struct btree_iter_stack iter;
+ 
+ 	goto lock_root;
+ 
+diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
+index 539454d8e2d089..f61ab1bada6cf5 100644
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -9,7 +9,6 @@
+ #include <linux/kernel.h>
+ #include <linux/sched/clock.h>
+ #include <linux/llist.h>
+-#include <linux/min_heap.h>
+ #include <linux/ratelimit.h>
+ #include <linux/vmalloc.h>
+ #include <linux/workqueue.h>
+@@ -31,10 +30,16 @@ struct closure;
+ 
+ #endif
+ 
++#define DECLARE_HEAP(type, name)					\
++	struct {							\
++		size_t size, used;					\
++		type *data;						\
++	} name
++
+ #define init_heap(heap, _size, gfp)					\
+ ({									\
+ 	size_t _bytes;							\
+-	(heap)->nr = 0;						\
++	(heap)->used = 0;						\
+ 	(heap)->size = (_size);						\
+ 	_bytes = (heap)->size * sizeof(*(heap)->data);			\
+ 	(heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL);		\
+@@ -47,6 +52,64 @@ do {									\
+ 	(heap)->data = NULL;						\
+ } while (0)
+ 
++#define heap_swap(h, i, j)	swap((h)->data[i], (h)->data[j])
++
++#define heap_sift(h, i, cmp)						\
++do {									\
++	size_t _r, _j = i;						\
++									\
++	for (; _j * 2 + 1 < (h)->used; _j = _r) {			\
++		_r = _j * 2 + 1;					\
++		if (_r + 1 < (h)->used &&				\
++		    cmp((h)->data[_r], (h)->data[_r + 1]))		\
++			_r++;						\
++									\
++		if (cmp((h)->data[_r], (h)->data[_j]))			\
++			break;						\
++		heap_swap(h, _r, _j);					\
++	}								\
++} while (0)
++
++#define heap_sift_down(h, i, cmp)					\
++do {									\
++	while (i) {							\
++		size_t p = (i - 1) / 2;					\
++		if (cmp((h)->data[i], (h)->data[p]))			\
++			break;						\
++		heap_swap(h, i, p);					\
++		i = p;							\
++	}								\
++} while (0)
++
++#define heap_add(h, d, cmp)						\
++({									\
++	bool _r = !heap_full(h);					\
++	if (_r) {							\
++		size_t _i = (h)->used++;				\
++		(h)->data[_i] = d;					\
++									\
++		heap_sift_down(h, _i, cmp);				\
++		heap_sift(h, _i, cmp);					\
++	}								\
++	_r;								\
++})
++
++#define heap_pop(h, d, cmp)						\
++({									\
++	bool _r = (h)->used;						\
++	if (_r) {							\
++		(d) = (h)->data[0];					\
++		(h)->used--;						\
++		heap_swap(h, 0, (h)->used);				\
++		heap_sift(h, 0, cmp);					\
++	}								\
++	_r;								\
++})
++
++#define heap_peek(h)	((h)->used ? (h)->data[0] : NULL)
++
++#define heap_full(h)	((h)->used == (h)->size)
++
+ #define DECLARE_FIFO(type, name)					\
+ 	struct {							\
+ 		size_t front, back, size, mask;				\
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index c1d28e365910b9..792e070ccf38ba 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -908,16 +908,15 @@ static int bch_dirty_init_thread(void *arg)
+ 	struct dirty_init_thrd_info *info = arg;
+ 	struct bch_dirty_init_state *state = info->state;
+ 	struct cache_set *c = state->c;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k, *p;
+ 	int cur_idx, prev_idx, skip_nr;
+ 
+ 	k = p = NULL;
+ 	prev_idx = 0;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-	bch_btree_iter_init(&c->root->keys, &iter, NULL);
+-	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++	bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++	k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ 	BUG_ON(!k);
+ 
+ 	p = k;
+@@ -931,7 +930,7 @@ static int bch_dirty_init_thread(void *arg)
+ 		skip_nr = cur_idx - prev_idx;
+ 
+ 		while (skip_nr) {
+-			k = bch_btree_iter_next_filter(&iter,
++			k = bch_btree_iter_next_filter(&iter.iter,
+ 						       &c->root->keys,
+ 						       bch_ptr_bad);
+ 			if (k)
+@@ -980,13 +979,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
+ 	int i;
+ 	struct btree *b = NULL;
+ 	struct bkey *k = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct sectors_dirty_init op;
+ 	struct cache_set *c = d->c;
+ 	struct bch_dirty_init_state state;
+ 
+-	min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ retry_lock:
+ 	b = c->root;
+ 	rw_lock(0, b, b->level);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 7809b951e09aa0..4b32917236703e 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -9702,8 +9702,8 @@ void md_check_recovery(struct mddev *mddev)
+ 			 * remove disk.
+ 			 */
+ 			rdev_for_each_safe(rdev, tmp, mddev) {
+-				if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
+-						rdev->raid_disk < 0)
++				if (rdev->raid_disk < 0 &&
++				    test_and_clear_bit(ClusterRemove, &rdev->flags))
+ 					md_kick_rdev_from_array(rdev);
+ 			}
+ 		}
+@@ -10000,8 +10000,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+ 
+ 	/* Check for change of roles in the active devices */
+ 	rdev_for_each_safe(rdev2, tmp, mddev) {
+-		if (test_bit(Faulty, &rdev2->flags))
++		if (test_bit(Faulty, &rdev2->flags)) {
++			if (test_bit(ClusterRemove, &rdev2->flags))
++				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ 			continue;
++		}
+ 
+ 		/* Check if the roles changed */
+ 		role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
+diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+index 22442fce760785..3853245fcf6e58 100644
+--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
++++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+@@ -619,6 +619,7 @@ static void ti_csi2rx_dma_callback(void *param)
+ 
+ 		if (ti_csi2rx_start_dma(csi, buf)) {
+ 			dev_err(csi->dev, "Failed to queue the next buffer for DMA\n");
++			list_del(&buf->list);
+ 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ 		} else {
+ 			list_move_tail(&buf->list, &dma->submitted);
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+index eeab6a5eb7baca..675642af8601f8 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+@@ -897,12 +897,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ 
+ 			p_h264_sps->flags &=
+ 				~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
+-
+-			if (p_h264_sps->chroma_format_idc < 3)
+-				p_h264_sps->flags &=
+-					~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
+ 		}
+ 
++		if (p_h264_sps->chroma_format_idc < 3)
++			p_h264_sps->flags &=
++				~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
++
+ 		if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
+ 			p_h264_sps->flags &=
+ 				~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
+diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
+index 20a11b299bcd00..ab80bd3271b21a 100644
+--- a/drivers/misc/mei/platform-vsc.c
++++ b/drivers/misc/mei/platform-vsc.c
+@@ -379,6 +379,8 @@ static int mei_vsc_probe(struct platform_device *pdev)
+ err_cancel:
+ 	mei_cancel_work(mei_dev);
+ 
++	vsc_tp_register_event_cb(tp, NULL, NULL);
++
+ 	mei_disable_interrupts(mei_dev);
+ 
+ 	return ret;
+@@ -387,11 +389,14 @@ static int mei_vsc_probe(struct platform_device *pdev)
+ static void mei_vsc_remove(struct platform_device *pdev)
+ {
+ 	struct mei_device *mei_dev = platform_get_drvdata(pdev);
++	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ 
+ 	pm_runtime_disable(mei_dev->dev);
+ 
+ 	mei_stop(mei_dev);
+ 
++	vsc_tp_register_event_cb(hw->tp, NULL, NULL);
++
+ 	mei_disable_interrupts(mei_dev);
+ 
+ 	mei_deregister(mei_dev);
+diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
+index 5e44b518f36c74..27c921c752e9a4 100644
+--- a/drivers/misc/mei/vsc-tp.c
++++ b/drivers/misc/mei/vsc-tp.c
+@@ -79,9 +79,8 @@ struct vsc_tp {
+ 
+ 	vsc_tp_event_cb_t event_notify;
+ 	void *event_notify_context;
+-
+-	/* used to protect command download */
+-	struct mutex mutex;
++	struct mutex event_notify_mutex;	/* protects event_notify + context */
++	struct mutex mutex;			/* protects command download */
+ };
+ 
+ /* GPIO resources */
+@@ -113,6 +112,8 @@ static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+ {
+ 	struct vsc_tp *tp = data;
+ 
++	guard(mutex)(&tp->event_notify_mutex);
++
+ 	if (tp->event_notify)
+ 		tp->event_notify(tp->event_notify_context);
+ 
+@@ -401,6 +402,8 @@ EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
+ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
+ 			    void *context)
+ {
++	guard(mutex)(&tp->event_notify_mutex);
++
+ 	tp->event_notify = event_cb;
+ 	tp->event_notify_context = context;
+ 
+@@ -532,6 +535,7 @@ static int vsc_tp_probe(struct spi_device *spi)
+ 		return ret;
+ 
+ 	mutex_init(&tp->mutex);
++	mutex_init(&tp->event_notify_mutex);
+ 
+ 	/* only one child acpi device */
+ 	ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
+@@ -554,10 +558,11 @@ static int vsc_tp_probe(struct spi_device *spi)
+ 	return 0;
+ 
+ err_destroy_lock:
+-	mutex_destroy(&tp->mutex);
+-
+ 	free_irq(spi->irq, tp);
+ 
++	mutex_destroy(&tp->event_notify_mutex);
++	mutex_destroy(&tp->mutex);
++
+ 	return ret;
+ }
+ 
+@@ -567,9 +572,10 @@ static void vsc_tp_remove(struct spi_device *spi)
+ 
+ 	platform_device_unregister(tp->pdev);
+ 
+-	mutex_destroy(&tp->mutex);
+-
+ 	free_irq(spi->irq, tp);
++
++	mutex_destroy(&tp->event_notify_mutex);
++	mutex_destroy(&tp->mutex);
+ }
+ 
+ static void vsc_tp_shutdown(struct spi_device *spi)
+diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
+index 8c22064ead3870..f2bd1984609ccc 100644
+--- a/drivers/mtd/ftl.c
++++ b/drivers/mtd/ftl.c
+@@ -344,7 +344,7 @@ static int erase_xfer(partition_t *part,
+             return -ENOMEM;
+ 
+     erase->addr = xfer->Offset;
+-    erase->len = 1 << part->header.EraseUnitSize;
++    erase->len = 1ULL << part->header.EraseUnitSize;
+ 
+     ret = mtd_erase(part->mbd.mtd, erase);
+     if (!ret) {
+diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
+index f9ccfd02e80456..6f8eb7fa4c59fc 100644
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -373,7 +373,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
+ 	dma_cookie_t cookie;
+ 
+ 	buf_dma = dma_map_single(nc->dev, buf, len, dir);
+-	if (dma_mapping_error(nc->dev, dev_dma)) {
++	if (dma_mapping_error(nc->dev, buf_dma)) {
+ 		dev_err(nc->dev,
+ 			"Failed to prepare a buffer for DMA access\n");
+ 		goto err;
+diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
+index 3c7dee1be21df1..0b402823b619cf 100644
+--- a/drivers/mtd/nand/raw/atmel/pmecc.c
++++ b/drivers/mtd/nand/raw/atmel/pmecc.c
+@@ -143,6 +143,7 @@ struct atmel_pmecc_caps {
+ 	int nstrengths;
+ 	int el_offset;
+ 	bool correct_erased_chunks;
++	bool clk_ctrl;
+ };
+ 
+ struct atmel_pmecc {
+@@ -843,6 +844,10 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
+ 	if (IS_ERR(pmecc->regs.errloc))
+ 		return ERR_CAST(pmecc->regs.errloc);
+ 
++	/* pmecc data setup time */
++	if (caps->clk_ctrl)
++		writel(PMECC_CLK_133MHZ, pmecc->regs.base + ATMEL_PMECC_CLK);
++
+ 	/* Disable all interrupts before registering the PMECC handler. */
+ 	writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
+ 	atmel_pmecc_reset(pmecc);
+@@ -896,6 +901,7 @@ static struct atmel_pmecc_caps at91sam9g45_caps = {
+ 	.strengths = atmel_pmecc_strengths,
+ 	.nstrengths = 5,
+ 	.el_offset = 0x8c,
++	.clk_ctrl = true,
+ };
+ 
+ static struct atmel_pmecc_caps sama5d4_caps = {
+diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+index 51c9cf9013dc28..1b65b3aa6aa2fd 100644
+--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
++++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+@@ -656,9 +656,16 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ 
+ 	dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
+ 				  mtd->writesize, DMA_TO_DEVICE);
++	if (dma_mapping_error(nfc->dev, dma_data))
++		return -ENOMEM;
++
+ 	dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
+ 				 ecc->steps * oob_step,
+ 				 DMA_TO_DEVICE);
++	if (dma_mapping_error(nfc->dev, dma_oob)) {
++		dma_unmap_single(nfc->dev, dma_data, mtd->writesize, DMA_TO_DEVICE);
++		return -ENOMEM;
++	}
+ 
+ 	reinit_completion(&nfc->done);
+ 	writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
+@@ -772,9 +779,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
+ 	dma_data = dma_map_single(nfc->dev, nfc->page_buf,
+ 				  mtd->writesize,
+ 				  DMA_FROM_DEVICE);
++	if (dma_mapping_error(nfc->dev, dma_data))
++		return -ENOMEM;
++
+ 	dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
+ 				 ecc->steps * oob_step,
+ 				 DMA_FROM_DEVICE);
++	if (dma_mapping_error(nfc->dev, dma_oob)) {
++		dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
++				 DMA_FROM_DEVICE);
++		return -ENOMEM;
++	}
+ 
+ 	/*
+ 	 * The first blocks (4, 8 or 16 depending on the device)
+diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
+index 5a88a6096ca8c9..fcd081b75784b0 100644
+--- a/drivers/mtd/spi-nor/spansion.c
++++ b/drivers/mtd/spi-nor/spansion.c
+@@ -17,6 +17,7 @@
+ 
+ #define SPINOR_OP_CLSR		0x30	/* Clear status register 1 */
+ #define SPINOR_OP_CLPEF		0x82	/* Clear program/erase failure flags */
++#define SPINOR_OP_CYPRESS_EX4B	0xB8	/* Exit 4-byte address mode */
+ #define SPINOR_OP_CYPRESS_DIE_ERASE		0x61	/* Chip (die) erase */
+ #define SPINOR_OP_RD_ANY_REG			0x65	/* Read any register */
+ #define SPINOR_OP_WR_ANY_REG			0x71	/* Write any register */
+@@ -58,6 +59,13 @@
+ 		   SPI_MEM_OP_DUMMY(ndummy, 0),				\
+ 		   SPI_MEM_OP_DATA_IN(1, buf, 0))
+ 
++#define CYPRESS_NOR_EN4B_EX4B_OP(enable)				\
++	SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? SPINOR_OP_EN4B :		\
++					   SPINOR_OP_CYPRESS_EX4B, 0),	\
++		   SPI_MEM_OP_NO_ADDR,					\
++		   SPI_MEM_OP_NO_DUMMY,					\
++		   SPI_MEM_OP_NO_DATA)
++
+ #define SPANSION_OP(opcode)						\
+ 	SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0),				\
+ 		   SPI_MEM_OP_NO_ADDR,					\
+@@ -356,6 +364,20 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
+ 	return 0;
+ }
+ 
++static int cypress_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
++{
++	int ret;
++	struct spi_mem_op op = CYPRESS_NOR_EN4B_EX4B_OP(enable);
++
++	spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
++
++	ret = spi_mem_exec_op(nor->spimem, &op);
++	if (ret)
++		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
++
++	return ret;
++}
++
+ /**
+  * cypress_nor_determine_addr_mode_by_sr1() - Determine current address mode
+  *                                            (3 or 4-byte) by querying status
+@@ -526,6 +548,9 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
+ 	struct spi_mem_op op;
+ 	int ret;
+ 
++	/* Assign 4-byte address mode method that is not determined in BFPT */
++	nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
++
+ 	ret = cypress_nor_set_addr_mode_nbytes(nor);
+ 	if (ret)
+ 		return ret;
+@@ -591,6 +616,9 @@ s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ {
+ 	int ret;
+ 
++	/* Assign 4-byte address mode method that is not determined in BFPT */
++	nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
++
+ 	ret = cypress_nor_set_addr_mode_nbytes(nor);
+ 	if (ret)
+ 		return ret;
+@@ -718,6 +746,9 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ 				   const struct sfdp_parameter_header *bfpt_header,
+ 				   const struct sfdp_bfpt *bfpt)
+ {
++	/* Assign 4-byte address mode method that is not determined in BFPT */
++	nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode;
++
+ 	return cypress_nor_set_addr_mode_nbytes(nor);
+ }
+ 
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 3fa83f05bfcc8e..2bef6da4befa29 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -981,6 +981,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+ 		can->completed_tx_bytes = 0;
+ 		can->bec.txerr = 0;
+ 		can->bec.rxerr = 0;
++		can->can.dev->dev_port = i;
+ 
+ 		init_completion(&can->start_comp);
+ 		init_completion(&can->flush_comp);
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 7d12776ab63e6d..8bd7e800af8fad 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -851,6 +851,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ 	netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
+ 	SET_NETDEV_DEV(netdev, &dev->intf->dev);
+ 	netdev->dev_id = channel;
++	netdev->dev_port = channel;
+ 
+ 	dev->nets[channel] = priv;
+ 
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index 4d85b29a17b787..ebefc274b50a5f 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -49,7 +49,7 @@ struct __packed pcan_ufd_fw_info {
+ 	__le32	ser_no;		/* S/N */
+ 	__le32	flags;		/* special functions */
+ 
+-	/* extended data when type == PCAN_USBFD_TYPE_EXT */
++	/* extended data when type >= PCAN_USBFD_TYPE_EXT */
+ 	u8	cmd_out_ep;	/* ep for cmd */
+ 	u8	cmd_in_ep;	/* ep for replies */
+ 	u8	data_out_ep[2];	/* ep for CANx TX */
+@@ -982,10 +982,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
+ 			dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
+ 		}
+ 
+-		/* if vendor rsp is of type 2, then it contains EP numbers to
+-		 * use for cmds pipes. If not, then default EP should be used.
++		/* if vendor rsp type is greater than or equal to 2, then it
++		 * contains EP numbers to use for cmds pipes. If not, then
++		 * default EP should be used.
+ 		 */
+-		if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
++		if (le16_to_cpu(fw_info->type) < PCAN_USBFD_TYPE_EXT) {
+ 			fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
+ 			fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
+ 		}
+@@ -1018,11 +1019,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
+ 	dev->can_channel_id =
+ 		le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ 
+-	/* if vendor rsp is of type 2, then it contains EP numbers to
+-	 * use for data pipes. If not, then statically defined EP are used
+-	 * (see peak_usb_create_dev()).
++	/* if vendor rsp type is greater than or equal to 2, then it contains EP
++	 * numbers to use for data pipes. If not, then statically defined EP are
++	 * used (see peak_usb_create_dev()).
+ 	 */
+-	if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
++	if (le16_to_cpu(fw_info->type) >= PCAN_USBFD_TYPE_EXT) {
+ 		dev->ep_msg_in = fw_info->data_in_ep;
+ 		dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
+ 	}
+diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c
+index da7110d675583d..6c7454a43bced6 100644
+--- a/drivers/net/dsa/microchip/ksz8.c
++++ b/drivers/net/dsa/microchip/ksz8.c
+@@ -371,6 +371,9 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ 	addr -= dev->info->reg_mib_cnt;
+ 	ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
+ 			   KSZ8863_MIB_PACKET_DROPPED_RX_0;
++	if (ksz_is_8895_family(dev) &&
++	    ctrl_addr == KSZ8863_MIB_PACKET_DROPPED_RX_0)
++		ctrl_addr = KSZ8895_MIB_PACKET_DROPPED_RX_0;
+ 	ctrl_addr += port;
+ 	ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+ 
+diff --git a/drivers/net/dsa/microchip/ksz8_reg.h b/drivers/net/dsa/microchip/ksz8_reg.h
+index 329688603a582b..da80e659c64809 100644
+--- a/drivers/net/dsa/microchip/ksz8_reg.h
++++ b/drivers/net/dsa/microchip/ksz8_reg.h
+@@ -784,7 +784,9 @@
+ #define KSZ8795_MIB_TOTAL_TX_1		0x105
+ 
+ #define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100
+-#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105
++#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x103
++
++#define KSZ8895_MIB_PACKET_DROPPED_RX_0 0x105
+ 
+ #define MIB_PACKET_DROPPED		0x0000FFFF
+ 
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
+index a89aa4ac0a064a..779f1324bb5f82 100644
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -3852,8 +3852,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ 	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+-	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ 	spin_unlock_bh(&adapter->mcc_lock);
++	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ 	return status;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
+index 6119a410883815..65a2816142d962 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
++++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
+@@ -189,13 +189,14 @@ struct fm10k_q_vector {
+ 	struct fm10k_ring_container rx, tx;
+ 
+ 	struct napi_struct napi;
++	struct rcu_head rcu;	/* to avoid race with update stats on free */
++
+ 	cpumask_t affinity_mask;
+ 	char name[IFNAMSIZ + 9];
+ 
+ #ifdef CONFIG_DEBUG_FS
+ 	struct dentry *dbg_q_vector;
+ #endif /* CONFIG_DEBUG_FS */
+-	struct rcu_head rcu;	/* to avoid race with update stats on free */
+ 
+ 	/* for dynamic allocation of rings associated with this q_vector */
+ 	struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index d4255c2706fa33..b22bb0ae9b9d18 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -943,6 +943,7 @@ struct i40e_q_vector {
+ 	u16 reg_idx;		/* register index of the interrupt */
+ 
+ 	struct napi_struct napi;
++	struct rcu_head rcu;	/* to avoid race with update stats on free */
+ 
+ 	struct i40e_ring_container rx;
+ 	struct i40e_ring_container tx;
+@@ -953,7 +954,6 @@ struct i40e_q_vector {
+ 	cpumask_t affinity_mask;
+ 	struct irq_affinity_notify affinity_notify;
+ 
+-	struct rcu_head rcu;	/* to avoid race with update stats on free */
+ 	char name[I40E_INT_NAME_STR_LEN];
+ 	bool arm_wb_state;
+ 	bool in_busy_poll;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index 7c6f81beaee460..369c968a0117d0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -2226,6 +2226,7 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
+ 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ 			system->cycles = hh_ts;
+ 			system->cs_id = CSID_X86_ART;
++			system->use_nsecs = true;
+ 			/* Read Device source clock time */
+ 			hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
+ 			hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+index 559b443c409f7c..c1f29296c1d595 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+@@ -503,9 +503,10 @@ struct ixgbe_q_vector {
+ 	struct ixgbe_ring_container rx, tx;
+ 
+ 	struct napi_struct napi;
++	struct rcu_head rcu;	/* to avoid race with update stats on free */
++
+ 	cpumask_t affinity_mask;
+ 	int numa_node;
+-	struct rcu_head rcu;	/* to avoid race with update stats on free */
+ 	char name[IFNAMSIZ + 9];
+ 
+ 	/* for dynamic allocation of rings associated with this q_vector */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+index 8e25f4ef5cccee..5ae787656a7ca0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -331,6 +331,9 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto out;
+ 
++	/* RO bits should be set to 0 on write */
++	MLX5_SET(pbmc_reg, in, port_buffer_size, 0);
++
+ 	err = mlx5e_port_set_pbmc(mdev, in);
+ out:
+ 	kfree(in);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+index 727fa7c185238c..6056106edcc647 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+@@ -327,6 +327,10 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
+ 	if (unlikely(!sa_entry)) {
+ 		rcu_read_unlock();
+ 		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
++		/* Clear secpath to prevent invalid dereference
++		 * in downstream XFRM policy checks.
++		 */
++		secpath_reset(skb);
+ 		return;
+ 	}
+ 	xfrm_state_hold(sa_entry->x);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 8ed47e7a7515b8..673043d9ed11a5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1569,6 +1569,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ 		unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+ 
+ 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
++		skb_shinfo(skb)->gso_segs = lro_num_seg;
+ 		/* Subtract one since we already counted this as one
+ 		 * "regular" packet in mlx5e_complete_rx_cqe()
+ 		 */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+index 7c5516b0a84494..8115071c34a4ae 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+@@ -30,7 +30,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
+ 
+ 	dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ 	if (!dm)
+-		return ERR_PTR(-ENOMEM);
++		return NULL;
+ 
+ 	spin_lock_init(&dm->lock);
+ 
+@@ -96,7 +96,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
+ err_steering:
+ 	kfree(dm);
+ 
+-	return ERR_PTR(-ENOMEM);
++	return NULL;
+ }
+ 
+ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 5bc947f703b5ea..11d8739b9497a7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1092,9 +1092,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
+ 	}
+ 
+ 	dev->dm = mlx5_dm_create(dev);
+-	if (IS_ERR(dev->dm))
+-		mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
+-
+ 	dev->tracer = mlx5_fw_tracer_create(dev);
+ 	dev->hv_vhca = mlx5_hv_vhca_create(dev);
+ 	dev->rsc_dump = mlx5_rsc_dump_create(dev);
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+index 6a6d7e22f1a722..fc52db8e36f2e1 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+@@ -389,8 +389,8 @@ static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
+ {
+ 	struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ 
+-	page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
+-	rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
++	page_pool_fragment_page(page, FBNIC_PAGECNT_BIAS_MAX);
++	rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
+ 	rx_buf->page = page;
+ }
+ 
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+index 2f91f68d11d57c..05cde71db9dfda 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+@@ -59,10 +59,8 @@ struct fbnic_queue_stats {
+ 	struct u64_stats_sync syncp;
+ };
+ 
+-/* Pagecnt bias is long max to reserve the last bit to catch overflow
+- * cases where if we overcharge the bias it will flip over to be negative.
+- */
+-#define PAGECNT_BIAS_MAX	LONG_MAX
++#define FBNIC_PAGECNT_BIAS_MAX	PAGE_SIZE
++
+ struct fbnic_rx_buf {
+ 	struct page *page;
+ 	long pagecnt_bias;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 36328298dc9b81..058cd9e9fd71dc 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2500,7 +2500,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+ 
+ 	budget = min(budget, stmmac_tx_avail(priv, queue));
+ 
+-	while (budget-- > 0) {
++	for (; budget > 0; budget--) {
+ 		struct stmmac_metadata_request meta_req;
+ 		struct xsk_tx_metadata *meta = NULL;
+ 		dma_addr_t dma_addr;
+diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
+index a59bd215494c9b..a53e9e6f6cdf50 100644
+--- a/drivers/net/ipa/ipa_sysfs.c
++++ b/drivers/net/ipa/ipa_sysfs.c
+@@ -37,8 +37,12 @@ static const char *ipa_version_string(struct ipa *ipa)
+ 		return "4.11";
+ 	case IPA_VERSION_5_0:
+ 		return "5.0";
++	case IPA_VERSION_5_1:
++		return "5.1";
++	case IPA_VERSION_5_5:
++		return "5.5";
+ 	default:
+-		return "0.0";	/* Won't happen (checked at probe time) */
++		return "0.0";	/* Should not happen */
+ 	}
+ }
+ 
+diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
+index b7bc70586ee0a4..369540b43ada45 100644
+--- a/drivers/net/mdio/mdio-bcm-unimac.c
++++ b/drivers/net/mdio/mdio-bcm-unimac.c
+@@ -209,10 +209,9 @@ static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!priv->clk)
++	rate = clk_get_rate(priv->clk);
++	if (!rate)
+ 		rate = 250000000;
+-	else
+-		rate = clk_get_rate(priv->clk);
+ 
+ 	div = (rate / (2 * priv->clk_freq)) - 1;
+ 	if (div & ~MDIO_CLK_DIV_MASK) {
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index ce49f3ac6939b6..bce6cc5b04ee0f 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -897,6 +897,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
+ 				     get_unaligned_be32(ptp_multicast));
+ 	} else {
+ 		val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
++		val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
+ 		vsc85xx_ts_write_csr(phydev, blk,
+ 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
+ 		vsc85xx_ts_write_csr(phydev, blk,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
+index da3465360e9018..ae9ad925bfa8c0 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.h
++++ b/drivers/net/phy/mscc/mscc_ptp.h
+@@ -98,6 +98,7 @@
+ #define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK	GENMASK(22, 20)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST	0x400000
++#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST	0x200000
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR	0x100000
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK	GENMASK(17, 16)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST	0x020000
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 689687bd2574bc..cec3bb22471b54 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -159,19 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ 	int len;
+ 	unsigned char *data;
+ 	__u32 seq_recv;
+-
+-
+ 	struct rtable *rt;
+ 	struct net_device *tdev;
+ 	struct iphdr  *iph;
+ 	int    max_headroom;
+ 
+ 	if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+-		goto tx_error;
++		goto tx_drop;
+ 
+ 	rt = pptp_route_output(po, &fl4);
+ 	if (IS_ERR(rt))
+-		goto tx_error;
++		goto tx_drop;
+ 
+ 	tdev = rt->dst.dev;
+ 
+@@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ 
+ 	if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
+ 		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+-		if (!new_skb) {
+-			ip_rt_put(rt);
++
++		if (!new_skb)
+ 			goto tx_error;
+-		}
++
+ 		if (skb->sk)
+ 			skb_set_owner_w(new_skb, skb->sk);
+ 		consume_skb(skb);
+ 		skb = new_skb;
+ 	}
+ 
++	/* Ensure we can safely access protocol field and LCP code */
++	if (!pskb_may_pull(skb, 3))
++		goto tx_error;
++
+ 	data = skb->data;
+ 	islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+ 
+@@ -262,6 +264,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ 	return 1;
+ 
+ tx_error:
++	ip_rt_put(rt);
++tx_drop:
+ 	kfree_skb(skb);
+ 	return 1;
+ }
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 724b93aa4f7eb3..ccf45ca2feb56d 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1113,6 +1113,9 @@ static void __handle_link_change(struct usbnet *dev)
+ 	if (!test_bit(EVENT_DEV_OPEN, &dev->flags))
+ 		return;
+ 
++	if (test_and_clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags))
++		netif_carrier_on(dev->net);
++
+ 	if (!netif_carrier_ok(dev->net)) {
+ 		/* kill URBs for reading packets to save bus bandwidth */
+ 		unlink_urbs(dev, &dev->rxq);
+@@ -2009,10 +2012,12 @@ EXPORT_SYMBOL(usbnet_manage_power);
+ void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset)
+ {
+ 	/* update link after link is reseted */
+-	if (link && !need_reset)
+-		netif_carrier_on(dev->net);
+-	else
++	if (link && !need_reset) {
++		set_bit(EVENT_LINK_CARRIER_ON, &dev->flags);
++	} else {
++		clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags);
+ 		netif_carrier_off(dev->net);
++	}
+ 
+ 	if (need_reset && link)
+ 		usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 4087f72f0d2be8..89dde220058a2c 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1324,6 +1324,8 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
+ 	struct net *net = dev_net(vrf_dev);
+ 	struct rt6_info *rt6;
+ 
++	skb_dst_drop(skb);
++
+ 	rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
+ 				   RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
+ 	if (unlikely(!rt6))
+diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
+index c445bf5cd83211..f38decae77a935 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.c
++++ b/drivers/net/wireless/ath/ath11k/hal.c
+@@ -1346,6 +1346,10 @@ EXPORT_SYMBOL(ath11k_hal_srng_init);
+ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
+ {
+ 	struct ath11k_hal *hal = &ab->hal;
++	int i;
++
++	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++)
++		ab->hal.srng_list[i].initialized = 0;
+ 
+ 	ath11k_hal_unregister_srng_key(ab);
+ 	ath11k_hal_free_cont_rdp(ab);
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 7ead581f5bfd1d..ddf4ec6b244b46 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -8681,9 +8681,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ 				    arvif->vdev_id, ret);
+ 			return ret;
+ 		}
+-		ieee80211_iterate_stations_atomic(ar->hw,
+-						  ath11k_mac_disable_peer_fixed_rate,
+-						  arvif);
++		ieee80211_iterate_stations_mtx(ar->hw,
++					       ath11k_mac_disable_peer_fixed_rate,
++					       arvif);
+ 	} else if (ath11k_mac_bitrate_mask_get_single_nss(ar, arvif, band, mask,
+ 							  &single_nss)) {
+ 		rate = WMI_FIXED_RATE_NONE;
+@@ -8750,9 +8750,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ 		}
+ 
+ 		mutex_lock(&ar->conf_mutex);
+-		ieee80211_iterate_stations_atomic(ar->hw,
+-						  ath11k_mac_disable_peer_fixed_rate,
+-						  arvif);
++		ieee80211_iterate_stations_mtx(ar->hw,
++					       ath11k_mac_disable_peer_fixed_rate,
++					       arvif);
+ 
+ 		arvif->bitrate_mask = *mask;
+ 		ieee80211_iterate_stations_atomic(ar->hw,
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index 5c2130f77dac66..d5892e17494f7d 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -6589,7 +6589,7 @@ static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
+ 					  void *data)
+ {
+ 	const struct wmi_service_available_event *ev;
+-	u32 *wmi_ext2_service_bitmap;
++	__le32 *wmi_ext2_service_bitmap;
+ 	int i, j;
+ 	u16 expected_len;
+ 
+@@ -6621,12 +6621,12 @@ static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
+ 			   ev->wmi_service_segment_bitmap[3]);
+ 		break;
+ 	case WMI_TAG_ARRAY_UINT32:
+-		wmi_ext2_service_bitmap = (u32 *)ptr;
++		wmi_ext2_service_bitmap = (__le32 *)ptr;
+ 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
+ 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
+ 		     i++) {
+ 			do {
+-				if (wmi_ext2_service_bitmap[i] &
++				if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) &
+ 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ 					set_bit(j, ab->wmi_ab.svc_map);
+ 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+@@ -6634,8 +6634,10 @@ static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
+ 
+ 		ath12k_dbg(ab, ATH12K_DBG_WMI,
+ 			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
+-			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
+-			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
++			   __le32_to_cpu(wmi_ext2_service_bitmap[0]),
++			   __le32_to_cpu(wmi_ext2_service_bitmap[1]),
++			   __le32_to_cpu(wmi_ext2_service_bitmap[2]),
++			   __le32_to_cpu(wmi_ext2_service_bitmap[3]));
+ 		break;
+ 	}
+ 	return 0;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 349aa3439502cb..708a4e2ad83996 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -1541,10 +1541,6 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+ 		return -EAGAIN;
+ 	}
+ 
+-	/* If scan req comes for p2p0, send it over primary I/F */
+-	if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+-		vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+-
+ 	brcmf_dbg(SCAN, "START ESCAN\n");
+ 
+ 	cfg->scan_request = request;
+@@ -1560,6 +1556,10 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+ 	if (err)
+ 		goto scan_out;
+ 
++	/* If scan req comes for p2p0, send it over primary I/F */
++	if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
++		vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
++
+ 	err = brcmf_do_escan(vif->ifp, request);
+ 	if (err)
+ 		goto scan_out;
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+index e0b14be25b0238..b8713ebd719086 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+@@ -1048,9 +1048,11 @@ static void iwl_bg_restart(struct work_struct *data)
+  *
+  *****************************************************************************/
+ 
+-static void iwl_setup_deferred_work(struct iwl_priv *priv)
++static int iwl_setup_deferred_work(struct iwl_priv *priv)
+ {
+ 	priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
++	if (!priv->workqueue)
++		return -ENOMEM;
+ 
+ 	INIT_WORK(&priv->restart, iwl_bg_restart);
+ 	INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
+@@ -1067,6 +1069,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
+ 	timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
+ 
+ 	timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
++
++	return 0;
+ }
+ 
+ void iwl_cancel_deferred_work(struct iwl_priv *priv)
+@@ -1454,7 +1458,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
+ 	/********************
+ 	 * 6. Setup services
+ 	 ********************/
+-	iwl_setup_deferred_work(priv);
++	if (iwl_setup_deferred_work(priv))
++		goto out_uninit_drv;
++
+ 	iwl_setup_rx_handlers(priv);
+ 
+ 	iwl_power_initialize(priv);
+@@ -1492,6 +1498,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
+ 	iwl_cancel_deferred_work(priv);
+ 	destroy_workqueue(priv->workqueue);
+ 	priv->workqueue = NULL;
++out_uninit_drv:
+ 	iwl_uninit_drv(priv);
+ out_free_eeprom_blob:
+ 	kfree(priv->eeprom_blob);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 4dd4a9d5c71fc7..a7dbc0a5ea84e9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -61,8 +61,10 @@ static int __init iwl_mvm_init(void)
+ 	}
+ 
+ 	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
+-	if (ret)
++	if (ret) {
+ 		pr_err("Unable to register MVM op_mode: %d\n", ret);
++		iwl_mvm_rate_control_unregister();
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
+index bab9ef37a1ab80..8bcb1d0dd61887 100644
+--- a/drivers/net/wireless/marvell/mwl8k.c
++++ b/drivers/net/wireless/marvell/mwl8k.c
+@@ -1227,6 +1227,10 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
+ 
+ 		addr = dma_map_single(&priv->pdev->dev, skb->data,
+ 				      MWL8K_RX_MAXSZ, DMA_FROM_DEVICE);
++		if (dma_mapping_error(&priv->pdev->dev, addr)) {
++			kfree_skb(skb);
++			break;
++		}
+ 
+ 		rxq->rxd_count++;
+ 		rx = rxq->tail++;
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
+index 82d1bf7edba20d..a7f5d287e369bd 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
+@@ -99,11 +99,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
+ 	return r;
+ }
+ 
+-void plfxlc_mac_release(struct plfxlc_mac *mac)
+-{
+-	plfxlc_chip_release(&mac->chip);
+-}
+-
+ int plfxlc_op_start(struct ieee80211_hw *hw)
+ {
+ 	plfxlc_hw_mac(hw)->chip.usb.initialized = 1;
+@@ -755,3 +750,9 @@ struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf)
+ 	SET_IEEE80211_DEV(hw, &intf->dev);
+ 	return hw;
+ }
++
++void plfxlc_mac_release_hw(struct ieee80211_hw *hw)
++{
++	plfxlc_chip_release(&plfxlc_hw_mac(hw)->chip);
++	ieee80211_free_hw(hw);
++}
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h
+index 9384acddcf26a3..56da502999c1aa 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.h
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.h
+@@ -168,7 +168,7 @@ static inline u8 *plfxlc_mac_get_perm_addr(struct plfxlc_mac *mac)
+ }
+ 
+ struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf);
+-void plfxlc_mac_release(struct plfxlc_mac *mac);
++void plfxlc_mac_release_hw(struct ieee80211_hw *hw);
+ 
+ int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address);
+ int plfxlc_mac_init_hw(struct ieee80211_hw *hw);
+diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
+index 7e7bfa532ed255..966a9e211963dd 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
++++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
+@@ -604,7 +604,7 @@ static int probe(struct usb_interface *intf,
+ 	r = plfxlc_upload_mac_and_serial(intf, hw_address, serial_number);
+ 	if (r) {
+ 		dev_err(&intf->dev, "MAC and Serial upload failed (%d)\n", r);
+-		goto error;
++		goto error_free_hw;
+ 	}
+ 
+ 	chip->unit_type = STA;
+@@ -613,13 +613,13 @@ static int probe(struct usb_interface *intf,
+ 	r = plfxlc_mac_preinit_hw(hw, hw_address);
+ 	if (r) {
+ 		dev_err(&intf->dev, "Init mac failed (%d)\n", r);
+-		goto error;
++		goto error_free_hw;
+ 	}
+ 
+ 	r = ieee80211_register_hw(hw);
+ 	if (r) {
+ 		dev_err(&intf->dev, "Register device failed (%d)\n", r);
+-		goto error;
++		goto error_free_hw;
+ 	}
+ 
+ 	if ((le16_to_cpu(interface_to_usbdev(intf)->descriptor.idVendor) ==
+@@ -632,7 +632,7 @@ static int probe(struct usb_interface *intf,
+ 	}
+ 	if (r != 0) {
+ 		dev_err(&intf->dev, "FPGA download failed (%d)\n", r);
+-		goto error;
++		goto error_unreg_hw;
+ 	}
+ 
+ 	tx->mac_fifo_full = 0;
+@@ -642,21 +642,21 @@ static int probe(struct usb_interface *intf,
+ 	r = plfxlc_usb_init_hw(usb);
+ 	if (r < 0) {
+ 		dev_err(&intf->dev, "usb_init_hw failed (%d)\n", r);
+-		goto error;
++		goto error_unreg_hw;
+ 	}
+ 
+ 	msleep(PLF_MSLEEP_TIME);
+ 	r = plfxlc_chip_switch_radio(chip, PLFXLC_RADIO_ON);
+ 	if (r < 0) {
+ 		dev_dbg(&intf->dev, "chip_switch_radio_on failed (%d)\n", r);
+-		goto error;
++		goto error_unreg_hw;
+ 	}
+ 
+ 	msleep(PLF_MSLEEP_TIME);
+ 	r = plfxlc_chip_set_rate(chip, 8);
+ 	if (r < 0) {
+ 		dev_dbg(&intf->dev, "chip_set_rate failed (%d)\n", r);
+-		goto error;
++		goto error_unreg_hw;
+ 	}
+ 
+ 	msleep(PLF_MSLEEP_TIME);
+@@ -664,7 +664,7 @@ static int probe(struct usb_interface *intf,
+ 			    hw_address, ETH_ALEN, USB_REQ_MAC_WR);
+ 	if (r < 0) {
+ 		dev_dbg(&intf->dev, "MAC_WR failure (%d)\n", r);
+-		goto error;
++		goto error_unreg_hw;
+ 	}
+ 
+ 	plfxlc_chip_enable_rxtx(chip);
+@@ -691,12 +691,12 @@ static int probe(struct usb_interface *intf,
+ 	plfxlc_mac_init_hw(hw);
+ 	usb->initialized = true;
+ 	return 0;
++
++error_unreg_hw:
++	ieee80211_unregister_hw(hw);
++error_free_hw:
++	plfxlc_mac_release_hw(hw);
+ error:
+-	if (hw) {
+-		plfxlc_mac_release(plfxlc_hw_mac(hw));
+-		ieee80211_unregister_hw(hw);
+-		ieee80211_free_hw(hw);
+-	}
+ 	dev_err(&intf->dev, "pureLifi:Device error");
+ 	return r;
+ }
+@@ -730,8 +730,7 @@ static void disconnect(struct usb_interface *intf)
+ 	 */
+ 	usb_reset_device(interface_to_usbdev(intf));
+ 
+-	plfxlc_mac_release(mac);
+-	ieee80211_free_hw(hw);
++	plfxlc_mac_release_hw(hw);
+ }
+ 
+ static void plfxlc_usb_resume(struct plfxlc_usb *usb)
+diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+index 220ac5bdf279a1..8a57d6c72335ef 100644
+--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
++++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+@@ -1041,10 +1041,11 @@ static void rtl8187_stop(struct ieee80211_hw *dev, bool suspend)
+ 	rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF);
+ 	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+ 
++	usb_kill_anchored_urbs(&priv->anchored);
++
+ 	while ((skb = skb_dequeue(&priv->b_tx_status.queue)))
+ 		dev_kfree_skb_any(skb);
+ 
+-	usb_kill_anchored_urbs(&priv->anchored);
+ 	mutex_unlock(&priv->conf_mutex);
+ 
+ 	if (!priv->is_rtl8187b)
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+index 569856ca677f62..c6f69d87c38d41 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+@@ -6617,7 +6617,7 @@ static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+ 		skb_size = fops->rx_agg_buf_size;
+ 		skb_size += (rx_desc_sz + sizeof(struct rtl8723au_phy_stats));
+ 	} else {
+-		skb_size = IEEE80211_MAX_FRAME_LEN;
++		skb_size = IEEE80211_MAX_FRAME_LEN + rx_desc_sz;
+ 	}
+ 
+ 	skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index a808af2f085ec8..01c8b748b20b34 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -329,7 +329,7 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ 	struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ 	int i;
+ 
+-	if (vif->type == NL80211_IFTYPE_STATION) {
++	if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ 		si->mac_id = rtwvif->mac_id;
+ 	} else {
+ 		si->mac_id = rtw_acquire_macid(rtwdev);
+@@ -366,7 +366,7 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ 
+ 	cancel_work_sync(&si->rc_work);
+ 
+-	if (vif->type != NL80211_IFTYPE_STATION)
++	if (vif->type != NL80211_IFTYPE_STATION || sta->tdls)
+ 		rtw_release_macid(rtwdev, si->mac_id);
+ 	if (fw_exist)
+ 		rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index 83b22bd0ce81a3..c336c66ac8e354 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -2014,6 +2014,11 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
+ 	if (rx_status->band != NL80211_BAND_6GHZ)
+ 		return;
+ 
++	if (unlikely(!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))) {
++		rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rx on unsupported 6 GHz\n");
++		return;
++	}
++
+ 	ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, ies, skb->len);
+ 
+ 	list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 4606c881366691..710e74d3ec3e98 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1714,24 +1714,24 @@ static int __init nvmet_init(void)
+ 	if (!nvmet_wq)
+ 		goto out_free_buffered_work_queue;
+ 
+-	error = nvmet_init_discovery();
++	error = nvmet_init_debugfs();
+ 	if (error)
+ 		goto out_free_nvmet_work_queue;
+ 
+-	error = nvmet_init_debugfs();
++	error = nvmet_init_discovery();
+ 	if (error)
+-		goto out_exit_discovery;
++		goto out_exit_debugfs;
+ 
+ 	error = nvmet_init_configfs();
+ 	if (error)
+-		goto out_exit_debugfs;
++		goto out_exit_discovery;
+ 
+ 	return 0;
+ 
+-out_exit_debugfs:
+-	nvmet_exit_debugfs();
+ out_exit_discovery:
+ 	nvmet_exit_discovery();
++out_exit_debugfs:
++	nvmet_exit_debugfs();
+ out_free_nvmet_work_queue:
+ 	destroy_workqueue(nvmet_wq);
+ out_free_buffered_work_queue:
+@@ -1746,8 +1746,8 @@ static int __init nvmet_init(void)
+ static void __exit nvmet_exit(void)
+ {
+ 	nvmet_exit_configfs();
+-	nvmet_exit_debugfs();
+ 	nvmet_exit_discovery();
++	nvmet_exit_debugfs();
+ 	ida_destroy(&cntlid_ida);
+ 	destroy_workqueue(nvmet_wq);
+ 	destroy_workqueue(buffered_io_wq);
+diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
+index 481dcc476c556b..18e65571c1459b 100644
+--- a/drivers/pci/controller/pcie-rockchip-host.c
++++ b/drivers/pci/controller/pcie-rockchip-host.c
+@@ -439,7 +439,7 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+ 			dev_dbg(dev, "malformed TLP received from the link\n");
+ 
+ 		if (sub_reg & PCIE_CORE_INT_UCR)
+-			dev_dbg(dev, "malformed TLP received from the link\n");
++			dev_dbg(dev, "Unexpected Completion received from the link\n");
+ 
+ 		if (sub_reg & PCIE_CORE_INT_FCE)
+ 			dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+index 874cb097b093ae..62d09a528e6885 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
++++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+@@ -530,7 +530,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+ 	struct device *dev = &ntb->epf->dev;
+ 	int ret;
+ 	struct pci_epf_bar *epf_bar;
+-	void __iomem *mw_addr;
++	void *mw_addr;
+ 	enum pci_barno barno;
+ 	size_t size = sizeof(u32) * ntb->db_count;
+ 
+@@ -700,7 +700,7 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+ 		barno = pci_epc_get_next_free_bar(epc_features, barno);
+ 		if (barno < 0) {
+ 			dev_err(dev, "Fail to get NTB function BAR\n");
+-			return barno;
++			return -ENOENT;
+ 		}
+ 		ntb->epf_ntb_bar[bar] = barno;
+ 	}
+diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
+index 573a41869c153f..4f85e7fe29ec23 100644
+--- a/drivers/pci/hotplug/pnv_php.c
++++ b/drivers/pci/hotplug/pnv_php.c
+@@ -3,12 +3,15 @@
+  * PCI Hotplug Driver for PowerPC PowerNV platform.
+  *
+  * Copyright Gavin Shan, IBM Corporation 2016.
++ * Copyright (C) 2025 Raptor Engineering, LLC
++ * Copyright (C) 2025 Raptor Computing Systems, LLC
+  */
+ 
+ #include <linux/bitfield.h>
+ #include <linux/libfdt.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
++#include <linux/delay.h>
+ #include <linux/pci_hotplug.h>
+ #include <linux/of_fdt.h>
+ 
+@@ -36,8 +39,10 @@ static void pnv_php_register(struct device_node *dn);
+ static void pnv_php_unregister_one(struct device_node *dn);
+ static void pnv_php_unregister(struct device_node *dn);
+ 
++static void pnv_php_enable_irq(struct pnv_php_slot *php_slot);
++
+ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+-				bool disable_device)
++				bool disable_device, bool disable_msi)
+ {
+ 	struct pci_dev *pdev = php_slot->pdev;
+ 	u16 ctrl;
+@@ -53,19 +58,15 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ 		php_slot->irq = 0;
+ 	}
+ 
+-	if (php_slot->wq) {
+-		destroy_workqueue(php_slot->wq);
+-		php_slot->wq = NULL;
+-	}
+-
+-	if (disable_device) {
++	if (disable_device || disable_msi) {
+ 		if (pdev->msix_enabled)
+ 			pci_disable_msix(pdev);
+ 		else if (pdev->msi_enabled)
+ 			pci_disable_msi(pdev);
++	}
+ 
++	if (disable_device)
+ 		pci_disable_device(pdev);
+-	}
+ }
+ 
+ static void pnv_php_free_slot(struct kref *kref)
+@@ -74,7 +75,8 @@ static void pnv_php_free_slot(struct kref *kref)
+ 					struct pnv_php_slot, kref);
+ 
+ 	WARN_ON(!list_empty(&php_slot->children));
+-	pnv_php_disable_irq(php_slot, false);
++	pnv_php_disable_irq(php_slot, false, false);
++	destroy_workqueue(php_slot->wq);
+ 	kfree(php_slot->name);
+ 	kfree(php_slot);
+ }
+@@ -391,6 +393,20 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
+ 	return 0;
+ }
+ 
++static int pcie_check_link_active(struct pci_dev *pdev)
++{
++	u16 lnk_status;
++	int ret;
++
++	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
++	if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
++		return -ENODEV;
++
++	ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
++
++	return ret;
++}
++
+ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
+ {
+ 	struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+@@ -403,6 +419,19 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
+ 	 */
+ 	ret = pnv_pci_get_presence_state(php_slot->id, &presence);
+ 	if (ret >= 0) {
++		if (pci_pcie_type(php_slot->pdev) == PCI_EXP_TYPE_DOWNSTREAM &&
++			presence == OPAL_PCI_SLOT_EMPTY) {
++			/*
++			 * Similar to pciehp_hpc, check whether the Link Active
++			 * bit is set to account for broken downstream bridges
++			 * that don't properly assert Presence Detect State, as
++			 * was observed on the Microsemi Switchtec PM8533 PFX
++			 * [11f8:8533].
++			 */
++			if (pcie_check_link_active(php_slot->pdev) > 0)
++				presence = OPAL_PCI_SLOT_PRESENT;
++		}
++
+ 		*state = presence;
+ 		ret = 0;
+ 	} else {
+@@ -442,6 +471,61 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
+ 	return 0;
+ }
+ 
++static int pnv_php_activate_slot(struct pnv_php_slot *php_slot,
++				 struct hotplug_slot *slot)
++{
++	int ret, i;
++
++	/*
++	 * Issue initial slot activation command to firmware
++	 *
++	 * Firmware will power slot on, attempt to train the link, and
++	 * discover any downstream devices. If this process fails, firmware
++	 * will return an error code and an invalid device tree. Failure
++	 * can be caused for multiple reasons, including a faulty
++	 * downstream device, poor connection to the downstream device, or
++	 * a previously latched PHB fence.  On failure, issue fundamental
++	 * reset up to three times before aborting.
++	 */
++	ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
++	if (ret) {
++		SLOT_WARN(
++			php_slot,
++			"PCI slot activation failed with error code %d, possible frozen PHB",
++			ret);
++		SLOT_WARN(
++			php_slot,
++			"Attempting complete PHB reset before retrying slot activation\n");
++		for (i = 0; i < 3; i++) {
++			/*
++			 * Slot activation failed, PHB may be fenced from a
++			 * prior device failure.
++			 *
++			 * Use the OPAL fundamental reset call to both try a
++			 * device reset and clear any potentially active PHB
++			 * fence / freeze.
++			 */
++			SLOT_WARN(php_slot, "Try %d...\n", i + 1);
++			pci_set_pcie_reset_state(php_slot->pdev,
++						 pcie_warm_reset);
++			msleep(250);
++			pci_set_pcie_reset_state(php_slot->pdev,
++						 pcie_deassert_reset);
++
++			ret = pnv_php_set_slot_power_state(
++				slot, OPAL_PCI_SLOT_POWER_ON);
++			if (!ret)
++				break;
++		}
++
++		if (i >= 3)
++			SLOT_WARN(php_slot,
++				  "Failed to bring slot online, aborting!\n");
++	}
++
++	return ret;
++}
++
+ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
+ {
+ 	struct hotplug_slot *slot = &php_slot->slot;
+@@ -504,7 +588,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
+ 		goto scan;
+ 
+ 	/* Power is off, turn it on and then scan the slot */
+-	ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
++	ret = pnv_php_activate_slot(php_slot, slot);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -561,8 +645,58 @@ static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe)
+ static int pnv_php_enable_slot(struct hotplug_slot *slot)
+ {
+ 	struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
++	u32 prop32;
++	int ret;
++
++	ret = pnv_php_enable(php_slot, true);
++	if (ret)
++		return ret;
++
++	/* (Re-)enable interrupt if the slot supports surprise hotplug */
++	ret = of_property_read_u32(php_slot->dn, "ibm,slot-surprise-pluggable",
++				   &prop32);
++	if (!ret && prop32)
++		pnv_php_enable_irq(php_slot);
++
++	return 0;
++}
++
++/*
++ * Disable any hotplug interrupts for all slots on the provided bus, as well as
++ * all downstream slots in preparation for a hot unplug.
++ */
++static int pnv_php_disable_all_irqs(struct pci_bus *bus)
++{
++	struct pci_bus *child_bus;
++	struct pci_slot *slot;
++
++	/* First go down child buses */
++	list_for_each_entry(child_bus, &bus->children, node)
++		pnv_php_disable_all_irqs(child_bus);
++
++	/* Disable IRQs for all pnv_php slots on this bus */
++	list_for_each_entry(slot, &bus->slots, list) {
++		struct pnv_php_slot *php_slot = to_pnv_php_slot(slot->hotplug);
+ 
+-	return pnv_php_enable(php_slot, true);
++		pnv_php_disable_irq(php_slot, false, true);
++	}
++
++	return 0;
++}
++
++/*
++ * Disable any hotplug interrupts for all downstream slots on the provided
++ * bus in preparation for a hot unplug.
++ */
++static int pnv_php_disable_all_downstream_irqs(struct pci_bus *bus)
++{
++	struct pci_bus *child_bus;
++
++	/* Go down child buses, recursively deactivating their IRQs */
++	list_for_each_entry(child_bus, &bus->children, node)
++		pnv_php_disable_all_irqs(child_bus);
++
++	return 0;
+ }
+ 
+ static int pnv_php_disable_slot(struct hotplug_slot *slot)
+@@ -579,6 +713,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
+ 	    php_slot->state != PNV_PHP_STATE_REGISTERED)
+ 		return 0;
+ 
++	/*
++	 * Free all IRQ resources from all child slots before remove.
++	 * Note that we do not disable the root slot IRQ here as that
++	 * would also deactivate the slot hot (re)plug interrupt!
++	 */
++	pnv_php_disable_all_downstream_irqs(php_slot->bus);
++
+ 	/* Remove all devices behind the slot */
+ 	pci_lock_rescan_remove();
+ 	pci_hp_remove_devices(php_slot->bus);
+@@ -647,6 +788,15 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
+ 		return NULL;
+ 	}
+ 
++	/* Allocate workqueue for this slot's interrupt handling */
++	php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
++	if (!php_slot->wq) {
++		SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
++		kfree(php_slot->name);
++		kfree(php_slot);
++		return NULL;
++	}
++
+ 	if (dn->child && PCI_DN(dn->child))
+ 		php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
+ 	else
+@@ -745,16 +895,63 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
+ 	return entry.vector;
+ }
+ 
++static void
++pnv_php_detect_clear_suprise_removal_freeze(struct pnv_php_slot *php_slot)
++{
++	struct pci_dev *pdev = php_slot->pdev;
++	struct eeh_dev *edev;
++	struct eeh_pe *pe;
++	int i, rc;
++
++	/*
++	 * When a device is surprise removed from a downstream bridge slot,
++	 * the upstream bridge port can still end up frozen due to related EEH
++	 * events, which will in turn block the MSI interrupts for slot hotplug
++	 * detection.
++	 *
++	 * Detect and thaw any frozen upstream PE after slot deactivation.
++	 */
++	edev = pci_dev_to_eeh_dev(pdev);
++	pe = edev ? edev->pe : NULL;
++	rc = eeh_pe_get_state(pe);
++	if ((rc == -ENODEV) || (rc == -ENOENT)) {
++		SLOT_WARN(
++			php_slot,
++			"Upstream bridge PE state unknown, hotplug detect may fail\n");
++	} else {
++		if (pe->state & EEH_PE_ISOLATED) {
++			SLOT_WARN(
++				php_slot,
++				"Upstream bridge PE %02x frozen, thawing...\n",
++				pe->addr);
++			for (i = 0; i < 3; i++)
++				if (!eeh_unfreeze_pe(pe))
++					break;
++			if (i >= 3)
++				SLOT_WARN(
++					php_slot,
++					"Unable to thaw PE %02x, hotplug detect will fail!\n",
++					pe->addr);
++			else
++				SLOT_WARN(php_slot,
++					  "PE %02x thawed successfully\n",
++					  pe->addr);
++		}
++	}
++}
++
+ static void pnv_php_event_handler(struct work_struct *work)
+ {
+ 	struct pnv_php_event *event =
+ 		container_of(work, struct pnv_php_event, work);
+ 	struct pnv_php_slot *php_slot = event->php_slot;
+ 
+-	if (event->added)
++	if (event->added) {
+ 		pnv_php_enable_slot(&php_slot->slot);
+-	else
++	} else {
+ 		pnv_php_disable_slot(&php_slot->slot);
++		pnv_php_detect_clear_suprise_removal_freeze(php_slot);
++	}
+ 
+ 	kfree(event);
+ }
+@@ -843,14 +1040,6 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
+ 	u16 sts, ctrl;
+ 	int ret;
+ 
+-	/* Allocate workqueue */
+-	php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
+-	if (!php_slot->wq) {
+-		SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
+-		pnv_php_disable_irq(php_slot, true);
+-		return;
+-	}
+-
+ 	/* Check PDC (Presence Detection Change) is broken or not */
+ 	ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc",
+ 				   &broken_pdc);
+@@ -869,7 +1058,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
+ 	ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
+ 			  php_slot->name, php_slot);
+ 	if (ret) {
+-		pnv_php_disable_irq(php_slot, true);
++		pnv_php_disable_irq(php_slot, true, true);
+ 		SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq);
+ 		return;
+ 	}
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index a392e060ca2f42..b8289b6c395bc4 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -81,24 +81,44 @@ void pci_configure_aspm_l1ss(struct pci_dev *pdev)
+ 
+ void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
+ {
++	struct pci_dev *parent = pdev->bus->self;
+ 	struct pci_cap_saved_state *save_state;
+-	u16 l1ss = pdev->l1ss;
+ 	u32 *cap;
+ 
++	/*
++	 * If this is a Downstream Port, we never restore the L1SS state
++	 * directly; we only restore it when we restore the state of the
++	 * Upstream Port below it.
++	 */
++	if (pcie_downstream_port(pdev) || !parent)
++		return;
++
++	if (!pdev->l1ss || !parent->l1ss)
++		return;
++
+ 	/*
+ 	 * Save L1 substate configuration. The ASPM L0s/L1 configuration
+ 	 * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
+ 	 */
+-	if (!l1ss)
++	save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
++	if (!save_state)
+ 		return;
+ 
+-	save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
++	cap = &save_state->cap.data[0];
++	pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
++	pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
++
++	/*
++	 * Save parent's L1 substate configuration so we have it for
++	 * pci_restore_aspm_l1ss_state(pdev) to restore.
++	 */
++	save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
+ 	if (!save_state)
+ 		return;
+ 
+ 	cap = &save_state->cap.data[0];
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
++	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
++	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
+ }
+ 
+ void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
+diff --git a/drivers/perf/arm-ni.c b/drivers/perf/arm-ni.c
+index b87d3a9ba7d545..4b9d53dae8978b 100644
+--- a/drivers/perf/arm-ni.c
++++ b/drivers/perf/arm-ni.c
+@@ -545,6 +545,8 @@ static int arm_ni_init_cd(struct arm_ni *ni, struct arm_ni_node *node, u64 res_s
+ 		return err;
+ 
+ 	cd->cpu = cpumask_local_spread(0, dev_to_node(ni->dev));
++	irq_set_affinity(cd->irq, cpumask_of(cd->cpu));
++
+ 	cd->pmu = (struct pmu) {
+ 		.module = THIS_MODULE,
+ 		.parent = ni->dev,
+diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+index 68cc8e24f38367..163950e16dbe13 100644
+--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
++++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+@@ -37,32 +37,13 @@
+ #define EUSB2_TUNE_EUSB_EQU		0x5A
+ #define EUSB2_TUNE_EUSB_HS_COMP_CUR	0x5B
+ 
+-enum eusb2_reg_layout {
+-	TUNE_EUSB_HS_COMP_CUR,
+-	TUNE_EUSB_EQU,
+-	TUNE_EUSB_SLEW,
+-	TUNE_USB2_HS_COMP_CUR,
+-	TUNE_USB2_PREEM,
+-	TUNE_USB2_EQU,
+-	TUNE_USB2_SLEW,
+-	TUNE_SQUELCH_U,
+-	TUNE_HSDISC,
+-	TUNE_RES_FSDIF,
+-	TUNE_IUSB2,
+-	TUNE_USB2_CROSSOVER,
+-	NUM_TUNE_FIELDS,
+-
+-	FORCE_VAL_5 = NUM_TUNE_FIELDS,
+-	FORCE_EN_5,
+-
+-	EN_CTL1,
+-
+-	RPTR_STATUS,
+-	LAYOUT_SIZE,
++struct eusb2_repeater_init_tbl_reg {
++	unsigned int reg;
++	unsigned int value;
+ };
+ 
+ struct eusb2_repeater_cfg {
+-	const u32 *init_tbl;
++	const struct eusb2_repeater_init_tbl_reg *init_tbl;
+ 	int init_tbl_num;
+ 	const char * const *vreg_list;
+ 	int num_vregs;
+@@ -82,16 +63,16 @@ static const char * const pm8550b_vreg_l[] = {
+ 	"vdd18", "vdd3",
+ };
+ 
+-static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
+-	[TUNE_IUSB2] = 0x8,
+-	[TUNE_SQUELCH_U] = 0x3,
+-	[TUNE_USB2_PREEM] = 0x5,
++static const struct eusb2_repeater_init_tbl_reg pm8550b_init_tbl[] = {
++	{ EUSB2_TUNE_IUSB2, 0x8 },
++	{ EUSB2_TUNE_SQUELCH_U, 0x3 },
++	{ EUSB2_TUNE_USB2_PREEM, 0x5 },
+ };
+ 
+-static const u32 smb2360_init_tbl[NUM_TUNE_FIELDS] = {
+-	[TUNE_IUSB2] = 0x5,
+-	[TUNE_SQUELCH_U] = 0x3,
+-	[TUNE_USB2_PREEM] = 0x2,
++static const struct eusb2_repeater_init_tbl_reg smb2360_init_tbl[] = {
++	{ EUSB2_TUNE_IUSB2, 0x5 },
++	{ EUSB2_TUNE_SQUELCH_U, 0x3 },
++	{ EUSB2_TUNE_USB2_PREEM, 0x2 },
+ };
+ 
+ static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
+@@ -129,17 +110,10 @@ static int eusb2_repeater_init(struct phy *phy)
+ 	struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+ 	struct device_node *np = rptr->dev->of_node;
+ 	struct regmap *regmap = rptr->regmap;
+-	const u32 *init_tbl = rptr->cfg->init_tbl;
+-	u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
+-	u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
+-	u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
+ 	u32 base = rptr->base;
+-	u32 val;
++	u32 poll_val;
+ 	int ret;
+-
+-	of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
+-	of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
+-	of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
++	u8 val;
+ 
+ 	ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
+ 	if (ret)
+@@ -147,21 +121,24 @@ static int eusb2_repeater_init(struct phy *phy)
+ 
+ 	regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
+ 
+-	regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
+-	regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
+-	regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
+-	regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
+-	regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
+-	regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
+-	regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
+-	regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
+-	regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
+-
+-	regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
+-	regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
+-	regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
+-
+-	ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
++	/* Write registers from init table */
++	for (int i = 0; i < rptr->cfg->init_tbl_num; i++)
++		regmap_write(regmap, base + rptr->cfg->init_tbl[i].reg,
++			     rptr->cfg->init_tbl[i].value);
++
++	/* Override registers from devicetree values */
++	if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
++		regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, val);
++
++	if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &val))
++		regmap_write(regmap, base + EUSB2_TUNE_HSDISC, val);
++
++	if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
++		regmap_write(regmap, base + EUSB2_TUNE_IUSB2, val);
++
++	/* Wait for status OK */
++	ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, poll_val,
++				       poll_val & RPTR_OK, 10, 5);
+ 	if (ret)
+ 		dev_err(rptr->dev, "initialization timed-out\n");
+ 
+diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
+index c372a2a24be4bb..9dc2da8056b722 100644
+--- a/drivers/pinctrl/berlin/berlin.c
++++ b/drivers/pinctrl/berlin/berlin.c
+@@ -204,6 +204,7 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
+ 	const struct berlin_desc_group *desc_group;
+ 	const struct berlin_desc_function *desc_function;
+ 	int i, max_functions = 0;
++	struct pinfunction *new_functions;
+ 
+ 	pctrl->nfunctions = 0;
+ 
+@@ -229,12 +230,15 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	pctrl->functions = krealloc(pctrl->functions,
++	new_functions = krealloc(pctrl->functions,
+ 				    pctrl->nfunctions * sizeof(*pctrl->functions),
+ 				    GFP_KERNEL);
+-	if (!pctrl->functions)
++	if (!new_functions) {
++		kfree(pctrl->functions);
+ 		return -ENOMEM;
++	}
+ 
++	pctrl->functions = new_functions;
+ 	/* map functions to theirs groups */
+ 	for (i = 0; i < pctrl->desc->ngroups; i++) {
+ 		desc_group = pctrl->desc->groups + i;
+diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
+index 0743190da59e81..2c31e7f2a27a86 100644
+--- a/drivers/pinctrl/pinmux.c
++++ b/drivers/pinctrl/pinmux.c
+@@ -236,18 +236,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
+ 			if (desc->mux_usecount)
+ 				return NULL;
+ 		}
+-	}
+-
+-	/*
+-	 * If there is no kind of request function for the pin we just assume
+-	 * we got it by default and proceed.
+-	 */
+-	if (gpio_range && ops->gpio_disable_free)
+-		ops->gpio_disable_free(pctldev, gpio_range, pin);
+-	else if (ops->free)
+-		ops->free(pctldev, pin);
+ 
+-	scoped_guard(mutex, &desc->mux_lock) {
+ 		if (gpio_range) {
+ 			owner = desc->gpio_owner;
+ 			desc->gpio_owner = NULL;
+@@ -258,6 +247,15 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
+ 		}
+ 	}
+ 
++	/*
++	 * If there is no kind of request function for the pin we just assume
++	 * we got it by default and proceed.
++	 */
++	if (gpio_range && ops->gpio_disable_free)
++		ops->gpio_disable_free(pctldev, gpio_range, pin);
++	else if (ops->free)
++		ops->free(pctldev, pin);
++
+ 	module_put(pctldev->owner);
+ 
+ 	return owner;
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+index bde67ee31417f0..8fbbdcc52debb1 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+@@ -395,6 +395,7 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	const char *function, *pin_prop;
+ 	const char *group;
+ 	int ret, npins, nmaps, configlen = 0, i = 0;
++	struct pinctrl_map *new_map;
+ 
+ 	*map = NULL;
+ 	*num_maps = 0;
+@@ -469,9 +470,13 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	 * We know have the number of maps we need, we can resize our
+ 	 * map array
+ 	 */
+-	*map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL);
+-	if (!*map)
+-		return -ENOMEM;
++	new_map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL);
++	if (!new_map) {
++		ret = -ENOMEM;
++		goto err_free_map;
++	}
++
++	*map = new_map;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
+index c3ca2ac91b0569..d2f0aea6ea5009 100644
+--- a/drivers/platform/x86/intel/pmt/class.c
++++ b/drivers/platform/x86/intel/pmt/class.c
+@@ -97,7 +97,7 @@ intel_pmt_read(struct file *filp, struct kobject *kobj,
+ 	if (count > entry->size - off)
+ 		count = entry->size - off;
+ 
+-	count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf,
++	count = pmt_telem_read_mmio(entry->pcidev, entry->cb, entry->header.guid, buf,
+ 				    entry->base, off, count);
+ 
+ 	return count;
+@@ -252,6 +252,7 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
+ 		return -EINVAL;
+ 	}
+ 
++	entry->pcidev = pci_dev;
+ 	entry->guid = header->guid;
+ 	entry->size = header->size;
+ 	entry->cb = ivdev->priv_data;
+diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
+index b2006d57779d66..f6ce80c4e05111 100644
+--- a/drivers/platform/x86/intel/pmt/class.h
++++ b/drivers/platform/x86/intel/pmt/class.h
+@@ -39,6 +39,7 @@ struct intel_pmt_header {
+ 
+ struct intel_pmt_entry {
+ 	struct telem_endpoint	*ep;
++	struct pci_dev		*pcidev;
+ 	struct intel_pmt_header	header;
+ 	struct bin_attribute	pmt_bin_attr;
+ 	struct kobject		*kobj;
+diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
+index 91e7292d86bb77..66dc622159a6bb 100644
+--- a/drivers/power/supply/cpcap-charger.c
++++ b/drivers/power/supply/cpcap-charger.c
+@@ -688,9 +688,8 @@ static void cpcap_usb_detect(struct work_struct *work)
+ 		struct power_supply *battery;
+ 
+ 		battery = power_supply_get_by_name("battery");
+-		if (IS_ERR_OR_NULL(battery)) {
+-			dev_err(ddata->dev, "battery power_supply not available %li\n",
+-					PTR_ERR(battery));
++		if (!battery) {
++			dev_err(ddata->dev, "battery power_supply not available\n");
+ 			return;
+ 		}
+ 
+diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
+index b28c04157709a7..90d40b35256fa3 100644
+--- a/drivers/power/supply/max14577_charger.c
++++ b/drivers/power/supply/max14577_charger.c
+@@ -501,7 +501,7 @@ static struct max14577_charger_platform_data *max14577_charger_dt_init(
+ static struct max14577_charger_platform_data *max14577_charger_dt_init(
+ 		struct platform_device *pdev)
+ {
+-	return NULL;
++	return ERR_PTR(-ENODATA);
+ }
+ #endif /* CONFIG_OF */
+ 
+@@ -572,7 +572,7 @@ static int max14577_charger_probe(struct platform_device *pdev)
+ 	chg->max14577 = max14577;
+ 
+ 	chg->pdata = max14577_charger_dt_init(pdev);
+-	if (IS_ERR_OR_NULL(chg->pdata))
++	if (IS_ERR(chg->pdata))
+ 		return PTR_ERR(chg->pdata);
+ 
+ 	ret = max14577_charger_reg_init(chg);
+diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
+index 6b6f51b215501b..99390ec1481f83 100644
+--- a/drivers/powercap/dtpm_cpu.c
++++ b/drivers/powercap/dtpm_cpu.c
+@@ -96,6 +96,8 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ 	int i;
+ 
+ 	pd = em_cpu_get(dtpm_cpu->cpu);
++	if (!pd)
++		return 0;
+ 
+ 	pd_mask = em_span_cpus(pd);
+ 
+diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
+index 6a02245ea35fec..9463232af8d2e6 100644
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -41,6 +41,9 @@ static __poll_t pps_cdev_poll(struct file *file, poll_table *wait)
+ 
+ 	poll_wait(file, &pps->queue, wait);
+ 
++	if (pps->last_fetched_ev == pps->last_ev)
++		return 0;
++
+ 	return EPOLLIN | EPOLLRDNORM;
+ }
+ 
+@@ -186,9 +189,11 @@ static long pps_cdev_ioctl(struct file *file,
+ 		if (err)
+ 			return err;
+ 
+-		/* Return the fetched timestamp */
++		/* Return the fetched timestamp and save last fetched event  */
+ 		spin_lock_irq(&pps->lock);
+ 
++		pps->last_fetched_ev = pps->last_ev;
++
+ 		fdata.info.assert_sequence = pps->assert_sequence;
+ 		fdata.info.clear_sequence = pps->clear_sequence;
+ 		fdata.info.assert_tu = pps->assert_tu;
+@@ -272,9 +277,11 @@ static long pps_cdev_compat_ioctl(struct file *file,
+ 		if (err)
+ 			return err;
+ 
+-		/* Return the fetched timestamp */
++		/* Return the fetched timestamp and save last fetched event  */
+ 		spin_lock_irq(&pps->lock);
+ 
++		pps->last_fetched_ev = pps->last_ev;
++
+ 		compat.info.assert_sequence = pps->assert_sequence;
+ 		compat.info.clear_sequence = pps->clear_sequence;
+ 		compat.info.current_mode = pps->current_mode;
+diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
+index 5aeedeaf3c415e..c165422d06516b 100644
+--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
++++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
+@@ -906,6 +906,8 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
+ 
+ 	rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM);
+ 
++	r5_rproc->recovery_disabled = true;
++	r5_rproc->has_iommu = false;
+ 	r5_rproc->auto_boot = false;
+ 	r5_core = r5_rproc->priv;
+ 	r5_core->dev = cdev;
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index 5efbe69bf5ca8c..c8a666de9cbe91 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -1466,7 +1466,7 @@ static long ds3231_clk_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
+ 			return ds3231_clk_sqw_rates[i];
+ 	}
+ 
+-	return 0;
++	return ds3231_clk_sqw_rates[ARRAY_SIZE(ds3231_clk_sqw_rates) - 1];
+ }
+ 
+ static int ds3231_clk_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
+index 63f11ea3589d64..759dc2ad6e3b2a 100644
+--- a/drivers/rtc/rtc-hym8563.c
++++ b/drivers/rtc/rtc-hym8563.c
+@@ -294,7 +294,7 @@ static long hym8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ 		if (clkout_rates[i] <= rate)
+ 			return clkout_rates[i];
+ 
+-	return 0;
++	return clkout_rates[0];
+ }
+ 
+ static int hym8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
+index 76c5f464b2daeb..cea05fca0bccdd 100644
+--- a/drivers/rtc/rtc-nct3018y.c
++++ b/drivers/rtc/rtc-nct3018y.c
+@@ -376,7 +376,7 @@ static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ 		if (clkout_rates[i] <= rate)
+ 			return clkout_rates[i];
+ 
+-	return 0;
++	return clkout_rates[0];
+ }
+ 
+ static int nct3018y_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
+index 73848f764559b4..2b4921c23467d7 100644
+--- a/drivers/rtc/rtc-pcf85063.c
++++ b/drivers/rtc/rtc-pcf85063.c
+@@ -410,7 +410,7 @@ static long pcf85063_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ 		if (clkout_rates[i] <= rate)
+ 			return clkout_rates[i];
+ 
+-	return 0;
++	return clkout_rates[0];
+ }
+ 
+ static int pcf85063_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
+index 647d52f1f5c5e8..23b21b9089159a 100644
+--- a/drivers/rtc/rtc-pcf8563.c
++++ b/drivers/rtc/rtc-pcf8563.c
+@@ -386,7 +386,7 @@ static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ 		if (clkout_rates[i] <= rate)
+ 			return clkout_rates[i];
+ 
+-	return 0;
++	return clkout_rates[0];
+ }
+ 
+ static int pcf8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
+index 2f001c59c61d54..86b7f821e937b2 100644
+--- a/drivers/rtc/rtc-rv3028.c
++++ b/drivers/rtc/rtc-rv3028.c
+@@ -738,7 +738,7 @@ static long rv3028_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ 		if (clkout_rates[i] <= rate)
+ 			return clkout_rates[i];
+ 
+-	return 0;
++	return clkout_rates[0];
+ }
+ 
+ static int rv3028_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index f4622ee4d89473..6111913c858c04 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -180,7 +180,7 @@ struct ap_card {
+ 	atomic64_t total_request_count;	/* # requests ever for this AP device.*/
+ };
+ 
+-#define TAPQ_CARD_HWINFO_MASK 0xFEFF0000FFFF0F0FUL
++#define TAPQ_CARD_HWINFO_MASK 0xFFFF0000FFFF0F0FUL
+ #define ASSOC_IDX_INVALID 0x10000
+ 
+ #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
+index 9ac69356b13e08..bd3d489e56ae93 100644
+--- a/drivers/scsi/elx/efct/efct_lio.c
++++ b/drivers/scsi/elx/efct/efct_lio.c
+@@ -382,7 +382,7 @@ efct_lio_sg_unmap(struct efct_io *io)
+ 		return;
+ 
+ 	dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
+-		     ocp->seg_map_cnt, cmd->data_direction);
++		     cmd->t_data_nents, cmd->data_direction);
+ 	ocp->seg_map_cnt = 0;
+ }
+ 
+diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c
+index 8a0e28aec928e4..0ecad398ed3db0 100644
+--- a/drivers/scsi/ibmvscsi_tgt/libsrp.c
++++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c
+@@ -184,7 +184,8 @@ static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
+ 	err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
+ 
+ 	if (dma_map)
+-		dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
++		dma_unmap_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
++			     DMA_BIDIRECTIONAL);
+ 
+ 	return err;
+ }
+@@ -256,7 +257,8 @@ static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
+ 	err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
+ 
+ 	if (dma_map)
+-		dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
++		dma_unmap_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
++			     DMA_BIDIRECTIONAL);
+ 
+ free_mem:
+ 	if (token && dma_map) {
+diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
+index 355a0bc0828e74..bb89a2e33eb407 100644
+--- a/drivers/scsi/isci/request.c
++++ b/drivers/scsi/isci/request.c
+@@ -2904,7 +2904,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
+ 					 task->total_xfer_len, task->data_dir);
+ 		else  /* unmap the sgl dma addresses */
+ 			dma_unmap_sg(&ihost->pdev->dev, task->scatter,
+-				     request->num_sg_entries, task->data_dir);
++				     task->num_scatter, task->data_dir);
+ 		break;
+ 	case SAS_PROTOCOL_SMP: {
+ 		struct scatterlist *sg = &task->smp_task.smp_req;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 9599d7a5002868..91aa9de3b84f45 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -10790,8 +10790,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+ 		break;
+ 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ 		_scsih_pcie_topology_change_event(ioc, fw_event);
+-		ioc->current_event = NULL;
+-		return;
++		break;
+ 	}
+ out:
+ 	fw_event_work_put(fw_event);
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 1444b1f1c4c886..d6897432cf0f56 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -828,7 +828,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
+ 	dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
+ 	if (!sas_protocol_ata(task->task_proto))
+ 		if (n_elem)
+-			dma_unmap_sg(mvi->dev, task->scatter, n_elem,
++			dma_unmap_sg(mvi->dev, task->scatter, task->num_scatter,
+ 				     task->data_dir);
+ prep_out:
+ 	return rc;
+@@ -874,7 +874,7 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+ 	if (!sas_protocol_ata(task->task_proto))
+ 		if (slot->n_elem)
+ 			dma_unmap_sg(mvi->dev, task->scatter,
+-				     slot->n_elem, task->data_dir);
++				     task->num_scatter, task->data_dir);
+ 
+ 	switch (task->task_proto) {
+ 	case SAS_PROTOCOL_SMP:
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 7a5bebf5b096cd..7528bb7c06bb45 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2170,6 +2170,8 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+ 		return 0;
+ 
+ 	iscsi_remove_conn(iscsi_dev_to_conn(dev));
++	iscsi_put_conn(iscsi_dev_to_conn(dev));
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 86dde3e7debba4..e1b06f803a94b1 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -4179,7 +4179,9 @@ static void sd_shutdown(struct device *dev)
+ 	if ((system_state != SYSTEM_RESTART &&
+ 	     sdkp->device->manage_system_start_stop) ||
+ 	    (system_state == SYSTEM_POWER_OFF &&
+-	     sdkp->device->manage_shutdown)) {
++	     sdkp->device->manage_shutdown) ||
++	    (system_state == SYSTEM_RUNNING &&
++	     sdkp->device->manage_runtime_start_stop)) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ 		sd_start_stop_device(sdkp, 0);
+ 	}
+diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
+index baa4ac6704a901..5963f49f6e6e6f 100644
+--- a/drivers/soc/qcom/pmic_glink.c
++++ b/drivers/soc/qcom/pmic_glink.c
+@@ -169,7 +169,10 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ 	return 0;
+ }
+ 
+-static void pmic_glink_aux_release(struct device *dev) {}
++static void pmic_glink_aux_release(struct device *dev)
++{
++	of_node_put(dev->of_node);
++}
+ 
+ static int pmic_glink_add_aux_device(struct pmic_glink *pg,
+ 				     struct auxiliary_device *aux,
+@@ -183,8 +186,10 @@ static int pmic_glink_add_aux_device(struct pmic_glink *pg,
+ 	aux->dev.release = pmic_glink_aux_release;
+ 	device_set_of_node_from_dev(&aux->dev, parent);
+ 	ret = auxiliary_device_init(aux);
+-	if (ret)
++	if (ret) {
++		of_node_put(aux->dev.of_node);
+ 		return ret;
++	}
+ 
+ 	ret = auxiliary_device_add(aux);
+ 	if (ret)
+diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
+index bb09eff85cff3b..dafe0a4c202e14 100644
+--- a/drivers/soc/qcom/qmi_encdec.c
++++ b/drivers/soc/qcom/qmi_encdec.c
+@@ -304,6 +304,8 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
+ 	const void *buf_src;
+ 	int encode_tlv = 0;
+ 	int rc;
++	u8 val8;
++	u16 val16;
+ 
+ 	if (!ei_array)
+ 		return 0;
+@@ -338,7 +340,6 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
+ 			break;
+ 
+ 		case QMI_DATA_LEN:
+-			memcpy(&data_len_value, buf_src, temp_ei->elem_size);
+ 			data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ 					sizeof(u8) : sizeof(u16);
+ 			/* Check to avoid out of range buffer access */
+@@ -348,8 +349,17 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
+ 				       __func__);
+ 				return -ETOOSMALL;
+ 			}
+-			rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
+-						   1, data_len_sz);
++			if (data_len_sz == sizeof(u8)) {
++				val8 = *(u8 *)buf_src;
++				data_len_value = (u32)val8;
++				rc = qmi_encode_basic_elem(buf_dst, &val8,
++							   1, data_len_sz);
++			} else {
++				val16 = *(u16 *)buf_src;
++				data_len_value = (u32)le16_to_cpu(val16);
++				rc = qmi_encode_basic_elem(buf_dst, &val16,
++							   1, data_len_sz);
++			}
+ 			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ 						encoded_bytes, tlv_len,
+ 						encode_tlv, rc);
+@@ -523,14 +533,23 @@ static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
+ 	u32 string_len = 0;
+ 	u32 string_len_sz = 0;
+ 	const struct qmi_elem_info *temp_ei = ei_array;
++	u8 val8;
++	u16 val16;
+ 
+ 	if (dec_level == 1) {
+ 		string_len = tlv_len;
+ 	} else {
+ 		string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ 				sizeof(u8) : sizeof(u16);
+-		rc = qmi_decode_basic_elem(&string_len, buf_src,
+-					   1, string_len_sz);
++		if (string_len_sz == sizeof(u8)) {
++			rc = qmi_decode_basic_elem(&val8, buf_src,
++						   1, string_len_sz);
++			string_len = (u32)val8;
++		} else {
++			rc = qmi_decode_basic_elem(&val16, buf_src,
++						   1, string_len_sz);
++			string_len = (u32)val16;
++		}
+ 		decoded_bytes += rc;
+ 	}
+ 
+@@ -604,6 +623,9 @@ static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
+ 	u32 decoded_bytes = 0;
+ 	const void *buf_src = in_buf;
+ 	int rc;
++	u8 val8;
++	u16 val16;
++	u32 val32;
+ 
+ 	while (decoded_bytes < in_buf_len) {
+ 		if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
+@@ -642,9 +664,17 @@ static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
+ 		if (temp_ei->data_type == QMI_DATA_LEN) {
+ 			data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ 					sizeof(u8) : sizeof(u16);
+-			rc = qmi_decode_basic_elem(&data_len_value, buf_src,
+-						   1, data_len_sz);
+-			memcpy(buf_dst, &data_len_value, sizeof(u32));
++			if (data_len_sz == sizeof(u8)) {
++				rc = qmi_decode_basic_elem(&val8, buf_src,
++							   1, data_len_sz);
++				data_len_value = (u32)val8;
++			} else {
++				rc = qmi_decode_basic_elem(&val16, buf_src,
++							   1, data_len_sz);
++				data_len_value = (u32)val16;
++			}
++			val32 = cpu_to_le32(data_len_value);
++			memcpy(buf_dst, &val32, sizeof(u32));
+ 			temp_ei = temp_ei + 1;
+ 			buf_dst = out_c_struct + temp_ei->offset;
+ 			tlv_len -= data_len_sz;
+diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
+index 5cf0e8c341644c..e8cc46874c7297 100644
+--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
++++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
+@@ -185,6 +185,8 @@ static void tegra234_cbb_error_clear(struct tegra_cbb *cbb)
+ {
+ 	struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ 
++	writel(0, priv->mon + FABRIC_MN_MASTER_ERR_FORCE_0);
++
+ 	writel(0x3f, priv->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ 	dsb(sy);
+ }
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index 7aa4900dcf3172..6c1e3aed816248 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1414,7 +1414,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
+ 		if (ret < 0) {
+ 			dev_err(bus->dev, "Prepare port(s) failed ret = %d\n",
+ 				ret);
+-			return ret;
++			goto restore_params;
+ 		}
+ 	}
+ 
+diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
+index 5b8ed65f8094d2..7a02fb42a88ba6 100644
+--- a/drivers/spi/spi-cs42l43.c
++++ b/drivers/spi/spi-cs42l43.c
+@@ -265,7 +265,7 @@ static struct spi_board_info *cs42l43_create_bridge_amp(struct cs42l43_spi *priv
+ 	struct spi_board_info *info;
+ 
+ 	if (spkid >= 0) {
+-		props = devm_kmalloc(priv->dev, sizeof(*props), GFP_KERNEL);
++		props = devm_kcalloc(priv->dev, 2, sizeof(*props), GFP_KERNEL);
+ 		if (!props)
+ 			return NULL;
+ 
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index fc72a89fb3a7b7..3b1b810f33bf79 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -2069,9 +2069,15 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 	struct resource *res;
+ 	struct reset_control *rst;
+ 	struct device_node *np = pdev->dev.of_node;
++	const struct stm32_spi_cfg *cfg;
+ 	bool device_mode;
+ 	int ret;
+-	const struct stm32_spi_cfg *cfg = of_device_get_match_data(&pdev->dev);
++
++	cfg = of_device_get_match_data(&pdev->dev);
++	if (!cfg) {
++		dev_err(&pdev->dev, "Failed to get match data for platform\n");
++		return -ENODEV;
++	}
+ 
+ 	device_mode = of_property_read_bool(np, "spi-slave");
+ 	if (!cfg->has_device_mode && device_mode) {
+diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
+index 4cfa494243b983..8fab5126765d49 100644
+--- a/drivers/staging/fbtft/fbtft-core.c
++++ b/drivers/staging/fbtft/fbtft-core.c
+@@ -694,6 +694,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
+ 	return info;
+ 
+ release_framebuf:
++	fb_deferred_io_cleanup(info);
+ 	framebuffer_release(info);
+ 
+ alloc_fail:
+diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
+index 6adcad28663305..60cf09a302a7e3 100644
+--- a/drivers/staging/greybus/gbphy.c
++++ b/drivers/staging/greybus/gbphy.c
+@@ -102,8 +102,8 @@ static int gbphy_dev_uevent(const struct device *dev, struct kobj_uevent_env *en
+ }
+ 
+ static const struct gbphy_device_id *
+-gbphy_dev_match_id(struct gbphy_device *gbphy_dev,
+-		   struct gbphy_driver *gbphy_drv)
++gbphy_dev_match_id(const struct gbphy_device *gbphy_dev,
++		   const struct gbphy_driver *gbphy_drv)
+ {
+ 	const struct gbphy_device_id *id = gbphy_drv->id_table;
+ 
+@@ -119,7 +119,7 @@ gbphy_dev_match_id(struct gbphy_device *gbphy_dev,
+ 
+ static int gbphy_dev_match(struct device *dev, const struct device_driver *drv)
+ {
+-	struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
++	const struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
+ 	struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+ 	const struct gbphy_device_id *id;
+ 
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+index e176483df301f5..b86494faa63adb 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+@@ -1358,14 +1358,15 @@ static int gmin_get_config_var(struct device *maindev,
+ 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ 		status = efi.get_variable(var16, &GMIN_CFG_VAR_EFI_GUID, NULL,
+ 					  (unsigned long *)out_len, out);
+-	if (status == EFI_SUCCESS)
++	if (status == EFI_SUCCESS) {
+ 		dev_info(maindev, "found EFI entry for '%s'\n", var8);
+-	else if (is_gmin)
++		return 0;
++	}
++	if (is_gmin)
+ 		dev_info(maindev, "Failed to find EFI gmin variable %s\n", var8);
+ 	else
+ 		dev_info(maindev, "Failed to find EFI variable %s\n", var8);
+-
+-	return ret;
++	return -ENOENT;
+ }
+ 
+ int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def)
+diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
+index 9943b1fff1905d..573521e1703bb5 100644
+--- a/drivers/staging/nvec/nvec_power.c
++++ b/drivers/staging/nvec/nvec_power.c
+@@ -194,7 +194,7 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
+ 		break;
+ 	case MANUFACTURER:
+ 		memcpy(power->bat_manu, &res->plc, res->length - 2);
+-		power->bat_model[res->length - 2] = '\0';
++		power->bat_manu[res->length - 2] = '\0';
+ 		break;
+ 	case MODEL:
+ 		memcpy(power->bat_model, &res->plc, res->length - 2);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index a6299cb19237c0..e079cb5d9ec690 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -4337,7 +4337,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 	hba->uic_async_done = NULL;
+ 	if (reenable_intr)
+ 		ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+-	if (ret) {
++	if (ret && !hba->pm_op_in_progress) {
+ 		ufshcd_set_link_broken(hba);
+ 		ufshcd_schedule_eh_work(hba);
+ 	}
+@@ -4345,6 +4345,14 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 	mutex_unlock(&hba->uic_cmd_mutex);
+ 
++	/*
++	 * If the h8 exit fails during the runtime resume process, it becomes
++	 * stuck and cannot be recovered through the error handler.  To fix
++	 * this, use link recovery instead of the error handler.
++	 */
++	if (ret && hba->pm_op_in_progress)
++		ret = ufshcd_link_recovery(hba);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
+index 341408410ed934..41118bba91978d 100644
+--- a/drivers/usb/early/xhci-dbc.c
++++ b/drivers/usb/early/xhci-dbc.c
+@@ -681,6 +681,10 @@ int __init early_xdbc_setup_hardware(void)
+ 
+ 		xdbc.table_base = NULL;
+ 		xdbc.out_buf = NULL;
++
++		early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
++		xdbc.xhci_base = NULL;
++		xdbc.xhci_length = 0;
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 301a435b9ee373..460a102c1419cb 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -2489,6 +2489,11 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
+ 	if (!cdev->os_desc_req->buf) {
+ 		ret = -ENOMEM;
+ 		usb_ep_free_request(ep0, cdev->os_desc_req);
++		/*
++		 * Set os_desc_req to NULL so that composite_dev_cleanup()
++		 * will not try to free it again.
++		 */
++		cdev->os_desc_req = NULL;
+ 		goto end;
+ 	}
+ 	cdev->os_desc_req->context = cdev;
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index d8bd2d82e9ec63..ab4d170469f578 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -1275,18 +1275,19 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	if (!hidg->workqueue) {
+ 		status = -ENOMEM;
+-		goto fail;
++		goto fail_free_descs;
+ 	}
+ 
+ 	/* create char device */
+ 	cdev_init(&hidg->cdev, &f_hidg_fops);
+ 	status = cdev_device_add(&hidg->cdev, &hidg->dev);
+ 	if (status)
+-		goto fail_free_descs;
++		goto fail_free_all;
+ 
+ 	return 0;
+-fail_free_descs:
++fail_free_all:
+ 	destroy_workqueue(hidg->workqueue);
++fail_free_descs:
+ 	usb_free_all_descriptors(f);
+ fail:
+ 	ERROR(f->config->cdev, "hidg_bind FAILED\n");
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 3a9bdf91675568..8cf278a40bd916 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -152,7 +152,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ 	int			ret;
+ 	int			irq;
+ 	struct xhci_plat_priv	*priv = NULL;
+-	bool			of_match;
++	const struct of_device_id *of_match;
+ 
+ 	if (usb_disabled())
+ 		return -ENODEV;
+diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c
+index ac8695195c13c8..8e852f4b8262e6 100644
+--- a/drivers/usb/misc/apple-mfi-fastcharge.c
++++ b/drivers/usb/misc/apple-mfi-fastcharge.c
+@@ -44,6 +44,7 @@ MODULE_DEVICE_TABLE(usb, mfi_fc_id_table);
+ struct mfi_device {
+ 	struct usb_device *udev;
+ 	struct power_supply *battery;
++	struct power_supply_desc battery_desc;
+ 	int charge_type;
+ };
+ 
+@@ -178,6 +179,7 @@ static int mfi_fc_probe(struct usb_device *udev)
+ {
+ 	struct power_supply_config battery_cfg = {};
+ 	struct mfi_device *mfi = NULL;
++	char *battery_name;
+ 	int err;
+ 
+ 	if (!mfi_fc_match(udev))
+@@ -187,23 +189,38 @@ static int mfi_fc_probe(struct usb_device *udev)
+ 	if (!mfi)
+ 		return -ENOMEM;
+ 
++	battery_name = kasprintf(GFP_KERNEL, "apple_mfi_fastcharge_%d-%d",
++				 udev->bus->busnum, udev->devnum);
++	if (!battery_name) {
++		err = -ENOMEM;
++		goto err_free_mfi;
++	}
++
++	mfi->battery_desc = apple_mfi_fc_desc;
++	mfi->battery_desc.name = battery_name;
++
+ 	battery_cfg.drv_data = mfi;
+ 
+ 	mfi->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ 	mfi->battery = power_supply_register(&udev->dev,
+-						&apple_mfi_fc_desc,
++						&mfi->battery_desc,
+ 						&battery_cfg);
+ 	if (IS_ERR(mfi->battery)) {
+ 		dev_err(&udev->dev, "Can't register battery\n");
+ 		err = PTR_ERR(mfi->battery);
+-		kfree(mfi);
+-		return err;
++		goto err_free_name;
+ 	}
+ 
+ 	mfi->udev = usb_get_dev(udev);
+ 	dev_set_drvdata(&udev->dev, mfi);
+ 
+ 	return 0;
++
++err_free_name:
++	kfree(battery_name);
++err_free_mfi:
++	kfree(mfi);
++	return err;
+ }
+ 
+ static void mfi_fc_disconnect(struct usb_device *udev)
+@@ -213,6 +230,7 @@ static void mfi_fc_disconnect(struct usb_device *udev)
+ 	mfi = dev_get_drvdata(&udev->dev);
+ 	if (mfi->battery)
+ 		power_supply_unregister(mfi->battery);
++	kfree(mfi->battery_desc.name);
+ 	dev_set_drvdata(&udev->dev, NULL);
+ 	usb_put_dev(mfi->udev);
+ 	kfree(mfi);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 147ca50c94beec..e5cd3309342364 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2346,6 +2346,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff),			/* Foxconn T99W651 RNDIS */
+ 	  .driver_info = RSVD(5) | RSVD(6) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe15f, 0xff),                     /* Foxconn T99W709 */
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe167, 0xff),                     /* Foxconn T99W640 MBIM */
+ 	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 (IOT version) */
+diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+index 40e5da4fd2a454..080b6369a88cfd 100644
+--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
++++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+@@ -133,17 +133,30 @@ static int yoga_c630_ucsi_probe(struct auxiliary_device *adev,
+ 
+ 	ret = yoga_c630_ec_register_notify(ec, &uec->nb);
+ 	if (ret)
+-		return ret;
++		goto err_destroy;
++
++	ret = ucsi_register(uec->ucsi);
++	if (ret)
++		goto err_unregister;
++
++	return 0;
+ 
+-	return ucsi_register(uec->ucsi);
++err_unregister:
++	yoga_c630_ec_unregister_notify(uec->ec, &uec->nb);
++
++err_destroy:
++	ucsi_destroy(uec->ucsi);
++
++	return ret;
+ }
+ 
+ static void yoga_c630_ucsi_remove(struct auxiliary_device *adev)
+ {
+ 	struct yoga_c630_ucsi *uec = auxiliary_get_drvdata(adev);
+ 
+-	yoga_c630_ec_unregister_notify(uec->ec, &uec->nb);
+ 	ucsi_unregister(uec->ucsi);
++	yoga_c630_ec_unregister_notify(uec->ec, &uec->nb);
++	ucsi_destroy(uec->ucsi);
+ }
+ 
+ static const struct auxiliary_device_id yoga_c630_ucsi_id_table[] = {
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index 61424342c09641..c7a20278bc3ca5 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -908,6 +908,9 @@ void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
+ {
+ 	struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+ 
++	if (!mres->wq_gc)
++		return;
++
+ 	atomic_set(&mres->shutdown, 1);
+ 
+ 	flush_delayed_work(&mres->gc_dwork_ent);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 76aedac37a788a..2e0b8c5bec8d25 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2491,7 +2491,7 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
+         }
+ 
+ 	mvq = &ndev->vqs[idx];
+-	ndev->needs_teardown = num != mvq->num_ent;
++	ndev->needs_teardown |= num != mvq->num_ent;
+ 	mvq->num_ent = num;
+ }
+ 
+@@ -3432,15 +3432,17 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
+ 
+ 	ndev = to_mlx5_vdpa_ndev(mvdev);
+ 
++	/* Functions called here should be able to work with
++	 * uninitialized resources.
++	 */
+ 	free_fixed_resources(ndev);
+ 	mlx5_vdpa_clean_mrs(mvdev);
+ 	mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+-	mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
+-
+ 	if (!is_zero_ether_addr(ndev->config.mac)) {
+ 		pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
+ 		mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
+ 	}
++	mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
+ 	mlx5_vdpa_free_resources(&ndev->mvdev);
+ 	free_irqs(ndev);
+ 	kfree(ndev->event_cbs);
+@@ -3888,6 +3890,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ 	mvdev->actual_features =
+ 			(device_features & BIT_ULL(VIRTIO_F_VERSION_1));
+ 
++	mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
++
+ 	ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
+ 	ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
+ 	if (!ndev->vqs || !ndev->event_cbs) {
+@@ -3960,8 +3964,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ 		ndev->rqt_size = 1;
+ 	}
+ 
+-	mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
+-
+ 	ndev->mvdev.mlx_features = device_features;
+ 	mvdev->vdev.dma_dev = &mdev->pdev->dev;
+ 	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 7ae99691efdf92..7f569ce8fc7bea 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -2215,6 +2215,7 @@ static void vduse_exit(void)
+ 	cdev_del(&vduse_ctrl_cdev);
+ 	unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
+ 	class_unregister(&vduse_class);
++	idr_destroy(&vduse_idr);
+ }
+ module_exit(vduse_exit);
+ 
+diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
+index 95b336de8a1732..5f2b2c950bbc1d 100644
+--- a/drivers/vfio/group.c
++++ b/drivers/vfio/group.c
+@@ -194,11 +194,10 @@ static int vfio_df_group_open(struct vfio_device_file *df)
+ 		 * implies they expected translation to exist
+ 		 */
+ 		if (!capable(CAP_SYS_RAWIO) ||
+-		    vfio_iommufd_device_has_compat_ioas(device, df->iommufd))
++		    vfio_iommufd_device_has_compat_ioas(device, df->iommufd)) {
+ 			ret = -EPERM;
+-		else
+-			ret = 0;
+-		goto out_put_kvm;
++			goto out_put_kvm;
++		}
+ 	}
+ 
+ 	ret = vfio_df_open(df);
+diff --git a/drivers/vfio/iommufd.c b/drivers/vfio/iommufd.c
+index 82eba6966fa508..02852899c2aee4 100644
+--- a/drivers/vfio/iommufd.c
++++ b/drivers/vfio/iommufd.c
+@@ -25,6 +25,10 @@ int vfio_df_iommufd_bind(struct vfio_device_file *df)
+ 
+ 	lockdep_assert_held(&vdev->dev_set->lock);
+ 
++	/* Returns 0 to permit device opening under noiommu mode */
++	if (vfio_device_is_noiommu(vdev))
++		return 0;
++
+ 	return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
+ }
+ 
+diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
+index 76a80ae7087b51..f6e0253a8a14e9 100644
+--- a/drivers/vfio/pci/pds/vfio_dev.c
++++ b/drivers/vfio/pci/pds/vfio_dev.c
+@@ -204,6 +204,7 @@ static const struct vfio_device_ops pds_vfio_ops = {
+ 	.bind_iommufd = vfio_iommufd_physical_bind,
+ 	.unbind_iommufd = vfio_iommufd_physical_unbind,
+ 	.attach_ioas = vfio_iommufd_physical_attach_ioas,
++	.detach_ioas = vfio_iommufd_physical_detach_ioas,
+ };
+ 
+ const struct vfio_device_ops *pds_vfio_ops_info(void)
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index 087c273a547fa9..595503fa9ca89f 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -2153,7 +2153,7 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
+ 		return -EBUSY;
+ 	}
+ 
+-	if (pci_is_root_bus(pdev->bus)) {
++	if (pci_is_root_bus(pdev->bus) || pdev->is_virtfn) {
+ 		ret = vfio_assign_device_set(&vdev->vdev, vdev);
+ 	} else if (!pci_probe_reset_slot(pdev->slot)) {
+ 		ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
+diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
+index a5a62d9d963f72..ae78822f2d7155 100644
+--- a/drivers/vfio/vfio_main.c
++++ b/drivers/vfio/vfio_main.c
+@@ -583,7 +583,8 @@ void vfio_df_close(struct vfio_device_file *df)
+ 
+ 	lockdep_assert_held(&device->dev_set->lock);
+ 
+-	vfio_assert_device_open(device);
++	if (!vfio_assert_device_open(device))
++		return;
+ 	if (device->open_count == 1)
+ 		vfio_df_device_last_close(df);
+ 	device->open_count--;
+diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
+index b455d9ab6f3d9c..a4730217bfb65a 100644
+--- a/drivers/vhost/Kconfig
++++ b/drivers/vhost/Kconfig
+@@ -94,4 +94,22 @@ config VHOST_CROSS_ENDIAN_LEGACY
+ 
+ 	  If unsure, say "N".
+ 
++config VHOST_ENABLE_FORK_OWNER_CONTROL
++	bool "Enable VHOST_ENABLE_FORK_OWNER_CONTROL"
++	default y
++	help
++	  This option enables two IOCTLs: VHOST_SET_FORK_FROM_OWNER and
++	  VHOST_GET_FORK_FROM_OWNER. These allow userspace applications
++	  to modify the vhost worker mode for vhost devices.
++
++	  Also expose module parameter 'fork_from_owner_default' to allow users
++	  to configure the default mode for vhost workers.
++
++	  By default, `VHOST_ENABLE_FORK_OWNER_CONTROL` is set to `y`,
++	  users can change the worker thread mode as needed.
++	  If this config is disabled (n),the related IOCTLs and parameters will
++	  be unavailable.
++
++	  If unsure, say "Y".
++
+ endif
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 38d243d914d00b..88f213d1106f9f 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1088,10 +1088,8 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
+ 			/* validated at handler entry */
+ 			vs_tpg = vhost_vq_get_backend(vq);
+ 			tpg = READ_ONCE(vs_tpg[*vc->target]);
+-			if (unlikely(!tpg)) {
+-				vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
++			if (unlikely(!tpg))
+ 				goto out;
+-			}
+ 		}
+ 
+ 		if (tpgp)
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 63612faeab7271..79b0b7cd28601a 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -22,6 +22,7 @@
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/kthread.h>
++#include <linux/cgroup.h>
+ #include <linux/module.h>
+ #include <linux/sort.h>
+ #include <linux/sched/mm.h>
+@@ -41,6 +42,13 @@ static int max_iotlb_entries = 2048;
+ module_param(max_iotlb_entries, int, 0444);
+ MODULE_PARM_DESC(max_iotlb_entries,
+ 	"Maximum number of iotlb entries. (default: 2048)");
++static bool fork_from_owner_default = VHOST_FORK_OWNER_TASK;
++
++#ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL
++module_param(fork_from_owner_default, bool, 0444);
++MODULE_PARM_DESC(fork_from_owner_default,
++		 "Set task mode as the default(default: Y)");
++#endif
+ 
+ enum {
+ 	VHOST_MEMORY_F_LOG = 0x1,
+@@ -242,7 +250,7 @@ static void vhost_worker_queue(struct vhost_worker *worker,
+ 		 * test_and_set_bit() implies a memory barrier.
+ 		 */
+ 		llist_add(&work->node, &worker->work_list);
+-		vhost_task_wake(worker->vtsk);
++		worker->ops->wakeup(worker);
+ 	}
+ }
+ 
+@@ -388,6 +396,44 @@ static void vhost_vq_reset(struct vhost_dev *dev,
+ 	__vhost_vq_meta_reset(vq);
+ }
+ 
++static int vhost_run_work_kthread_list(void *data)
++{
++	struct vhost_worker *worker = data;
++	struct vhost_work *work, *work_next;
++	struct vhost_dev *dev = worker->dev;
++	struct llist_node *node;
++
++	kthread_use_mm(dev->mm);
++
++	for (;;) {
++		/* mb paired w/ kthread_stop */
++		set_current_state(TASK_INTERRUPTIBLE);
++
++		if (kthread_should_stop()) {
++			__set_current_state(TASK_RUNNING);
++			break;
++		}
++		node = llist_del_all(&worker->work_list);
++		if (!node)
++			schedule();
++
++		node = llist_reverse_order(node);
++		/* make sure flag is seen after deletion */
++		smp_wmb();
++		llist_for_each_entry_safe(work, work_next, node, node) {
++			clear_bit(VHOST_WORK_QUEUED, &work->flags);
++			__set_current_state(TASK_RUNNING);
++			kcov_remote_start_common(worker->kcov_handle);
++			work->fn(work);
++			kcov_remote_stop();
++			cond_resched();
++		}
++	}
++	kthread_unuse_mm(dev->mm);
++
++	return 0;
++}
++
+ static bool vhost_run_work_list(void *data)
+ {
+ 	struct vhost_worker *worker = data;
+@@ -552,6 +598,7 @@ void vhost_dev_init(struct vhost_dev *dev,
+ 	dev->byte_weight = byte_weight;
+ 	dev->use_worker = use_worker;
+ 	dev->msg_handler = msg_handler;
++	dev->fork_owner = fork_from_owner_default;
+ 	init_waitqueue_head(&dev->wait);
+ 	INIT_LIST_HEAD(&dev->read_list);
+ 	INIT_LIST_HEAD(&dev->pending_list);
+@@ -581,6 +628,46 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
+ }
+ EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
+ 
++struct vhost_attach_cgroups_struct {
++	struct vhost_work work;
++	struct task_struct *owner;
++	int ret;
++};
++
++static void vhost_attach_cgroups_work(struct vhost_work *work)
++{
++	struct vhost_attach_cgroups_struct *s;
++
++	s = container_of(work, struct vhost_attach_cgroups_struct, work);
++	s->ret = cgroup_attach_task_all(s->owner, current);
++}
++
++static int vhost_attach_task_to_cgroups(struct vhost_worker *worker)
++{
++	struct vhost_attach_cgroups_struct attach;
++	int saved_cnt;
++
++	attach.owner = current;
++
++	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
++	vhost_worker_queue(worker, &attach.work);
++
++	mutex_lock(&worker->mutex);
++
++	/*
++	 * Bypass attachment_cnt check in __vhost_worker_flush:
++	 * Temporarily change it to INT_MAX to bypass the check
++	 */
++	saved_cnt = worker->attachment_cnt;
++	worker->attachment_cnt = INT_MAX;
++	__vhost_worker_flush(worker);
++	worker->attachment_cnt = saved_cnt;
++
++	mutex_unlock(&worker->mutex);
++
++	return attach.ret;
++}
++
+ /* Caller should have device mutex */
+ bool vhost_dev_has_owner(struct vhost_dev *dev)
+ {
+@@ -626,7 +713,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev,
+ 
+ 	WARN_ON(!llist_empty(&worker->work_list));
+ 	xa_erase(&dev->worker_xa, worker->id);
+-	vhost_task_stop(worker->vtsk);
++	worker->ops->stop(worker);
+ 	kfree(worker);
+ }
+ 
+@@ -649,42 +736,115 @@ static void vhost_workers_free(struct vhost_dev *dev)
+ 	xa_destroy(&dev->worker_xa);
+ }
+ 
++static void vhost_task_wakeup(struct vhost_worker *worker)
++{
++	return vhost_task_wake(worker->vtsk);
++}
++
++static void vhost_kthread_wakeup(struct vhost_worker *worker)
++{
++	wake_up_process(worker->kthread_task);
++}
++
++static void vhost_task_do_stop(struct vhost_worker *worker)
++{
++	return vhost_task_stop(worker->vtsk);
++}
++
++static void vhost_kthread_do_stop(struct vhost_worker *worker)
++{
++	kthread_stop(worker->kthread_task);
++}
++
++static int vhost_task_worker_create(struct vhost_worker *worker,
++				    struct vhost_dev *dev, const char *name)
++{
++	struct vhost_task *vtsk;
++	u32 id;
++	int ret;
++
++	vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
++				 worker, name);
++	if (IS_ERR(vtsk))
++		return PTR_ERR(vtsk);
++
++	worker->vtsk = vtsk;
++	vhost_task_start(vtsk);
++	ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
++	if (ret < 0) {
++		vhost_task_do_stop(worker);
++		return ret;
++	}
++	worker->id = id;
++	return 0;
++}
++
++static int vhost_kthread_worker_create(struct vhost_worker *worker,
++				       struct vhost_dev *dev, const char *name)
++{
++	struct task_struct *task;
++	u32 id;
++	int ret;
++
++	task = kthread_create(vhost_run_work_kthread_list, worker, "%s", name);
++	if (IS_ERR(task))
++		return PTR_ERR(task);
++
++	worker->kthread_task = task;
++	wake_up_process(task);
++	ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
++	if (ret < 0)
++		goto stop_worker;
++
++	ret = vhost_attach_task_to_cgroups(worker);
++	if (ret)
++		goto stop_worker;
++
++	worker->id = id;
++	return 0;
++
++stop_worker:
++	vhost_kthread_do_stop(worker);
++	return ret;
++}
++
++static const struct vhost_worker_ops kthread_ops = {
++	.create = vhost_kthread_worker_create,
++	.stop = vhost_kthread_do_stop,
++	.wakeup = vhost_kthread_wakeup,
++};
++
++static const struct vhost_worker_ops vhost_task_ops = {
++	.create = vhost_task_worker_create,
++	.stop = vhost_task_do_stop,
++	.wakeup = vhost_task_wakeup,
++};
++
+ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+ {
+ 	struct vhost_worker *worker;
+-	struct vhost_task *vtsk;
+ 	char name[TASK_COMM_LEN];
+ 	int ret;
+-	u32 id;
++	const struct vhost_worker_ops *ops = dev->fork_owner ? &vhost_task_ops :
++							       &kthread_ops;
+ 
+ 	worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
+ 	if (!worker)
+ 		return NULL;
+ 
+ 	worker->dev = dev;
++	worker->ops = ops;
+ 	snprintf(name, sizeof(name), "vhost-%d", current->pid);
+ 
+-	vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
+-				 worker, name);
+-	if (IS_ERR(vtsk))
+-		goto free_worker;
+-
+ 	mutex_init(&worker->mutex);
+ 	init_llist_head(&worker->work_list);
+ 	worker->kcov_handle = kcov_common_handle();
+-	worker->vtsk = vtsk;
+-
+-	vhost_task_start(vtsk);
+-
+-	ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
++	ret = ops->create(worker, dev, name);
+ 	if (ret < 0)
+-		goto stop_worker;
+-	worker->id = id;
++		goto free_worker;
+ 
+ 	return worker;
+ 
+-stop_worker:
+-	vhost_task_stop(vtsk);
+ free_worker:
+ 	kfree(worker);
+ 	return NULL;
+@@ -865,6 +1025,14 @@ long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
+ 	switch (ioctl) {
+ 	/* dev worker ioctls */
+ 	case VHOST_NEW_WORKER:
++		/*
++		 * vhost_tasks will account for worker threads under the parent's
++		 * NPROC value but kthreads do not. To avoid userspace overflowing
++		 * the system with worker threads fork_owner must be true.
++		 */
++		if (!dev->fork_owner)
++			return -EFAULT;
++
+ 		ret = vhost_new_worker(dev, &state);
+ 		if (!ret && copy_to_user(argp, &state, sizeof(state)))
+ 			ret = -EFAULT;
+@@ -982,6 +1150,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
+ 
+ 	vhost_dev_cleanup(dev);
+ 
++	dev->fork_owner = fork_from_owner_default;
+ 	dev->umem = umem;
+ 	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
+ 	 * VQs aren't running.
+@@ -2135,6 +2304,45 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
+ 		goto done;
+ 	}
+ 
++#ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL
++	if (ioctl == VHOST_SET_FORK_FROM_OWNER) {
++		/* Only allow modification before owner is set */
++		if (vhost_dev_has_owner(d)) {
++			r = -EBUSY;
++			goto done;
++		}
++		u8 fork_owner_val;
++
++		if (get_user(fork_owner_val, (u8 __user *)argp)) {
++			r = -EFAULT;
++			goto done;
++		}
++		if (fork_owner_val != VHOST_FORK_OWNER_TASK &&
++		    fork_owner_val != VHOST_FORK_OWNER_KTHREAD) {
++			r = -EINVAL;
++			goto done;
++		}
++		d->fork_owner = !!fork_owner_val;
++		r = 0;
++		goto done;
++	}
++	if (ioctl == VHOST_GET_FORK_FROM_OWNER) {
++		u8 fork_owner_val = d->fork_owner;
++
++		if (fork_owner_val != VHOST_FORK_OWNER_TASK &&
++		    fork_owner_val != VHOST_FORK_OWNER_KTHREAD) {
++			r = -EINVAL;
++			goto done;
++		}
++		if (put_user(fork_owner_val, (u8 __user *)argp)) {
++			r = -EFAULT;
++			goto done;
++		}
++		r = 0;
++		goto done;
++	}
++#endif
++
+ 	/* You must be the owner to do anything else */
+ 	r = vhost_dev_check_owner(d);
+ 	if (r)
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index bb75a292d50cd3..ab704d84fb3446 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -26,7 +26,18 @@ struct vhost_work {
+ 	unsigned long		flags;
+ };
+ 
++struct vhost_worker;
++struct vhost_dev;
++
++struct vhost_worker_ops {
++	int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
++		      const char *name);
++	void (*stop)(struct vhost_worker *worker);
++	void (*wakeup)(struct vhost_worker *worker);
++};
++
+ struct vhost_worker {
++	struct task_struct *kthread_task;
+ 	struct vhost_task	*vtsk;
+ 	struct vhost_dev	*dev;
+ 	/* Used to serialize device wide flushing with worker swapping. */
+@@ -36,6 +47,7 @@ struct vhost_worker {
+ 	u32			id;
+ 	int			attachment_cnt;
+ 	bool			killed;
++	const struct vhost_worker_ops *ops;
+ };
+ 
+ /* Poll a file (eventfd or socket) */
+@@ -176,6 +188,16 @@ struct vhost_dev {
+ 	int byte_weight;
+ 	struct xarray worker_xa;
+ 	bool use_worker;
++	/*
++	 * If fork_owner is true we use vhost_tasks to create
++	 * the worker so all settings/limits like cgroups, NPROC,
++	 * scheduler, etc are inherited from the owner. If false,
++	 * we use kthreads and only attach to the same cgroups
++	 * as the owner for compat with older kernels.
++	 * here we use true as default value.
++	 * The default value is set by fork_from_owner_default
++	 */
++	bool fork_owner;
+ 	int (*msg_handler)(struct vhost_dev *dev, u32 asid,
+ 			   struct vhost_iotlb_msg *msg);
+ };
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index c98786996c6471..678d2802760c8c 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -953,13 +953,13 @@ static const char *fbcon_startup(void)
+ 	int rows, cols;
+ 
+ 	/*
+-	 *  If num_registered_fb is zero, this is a call for the dummy part.
++	 *  If fbcon_num_registered_fb is zero, this is a call for the dummy part.
+ 	 *  The frame buffer devices weren't initialized yet.
+ 	 */
+ 	if (!fbcon_num_registered_fb || info_idx == -1)
+ 		return display_desc;
+ 	/*
+-	 * Instead of blindly using registered_fb[0], we use info_idx, set by
++	 * Instead of blindly using fbcon_registered_fb[0], we use info_idx, set by
+ 	 * fbcon_fb_registered();
+ 	 */
+ 	info = fbcon_registered_fb[info_idx];
+diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
+index ff343e4ed35ba5..c0bdfb10f681b0 100644
+--- a/drivers/video/fbdev/imxfb.c
++++ b/drivers/video/fbdev/imxfb.c
+@@ -1007,8 +1007,13 @@ static int imxfb_probe(struct platform_device *pdev)
+ 	info->fix.smem_start = fbi->map_dma;
+ 
+ 	INIT_LIST_HEAD(&info->modelist);
+-	for (i = 0; i < fbi->num_modes; i++)
+-		fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
++	for (i = 0; i < fbi->num_modes; i++) {
++		ret = fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
++		if (ret) {
++			dev_err(&pdev->dev, "Failed to add videomode\n");
++			goto failed_cmap;
++		}
++	}
+ 
+ 	/*
+ 	 * This makes sure that our colour bitfield
+diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
+index 775838346bb50b..09d6721c7bfa59 100644
+--- a/drivers/watchdog/ziirave_wdt.c
++++ b/drivers/watchdog/ziirave_wdt.c
+@@ -302,6 +302,9 @@ static int ziirave_firm_verify(struct watchdog_device *wdd,
+ 		const u16 len = be16_to_cpu(rec->len);
+ 		const u32 addr = be32_to_cpu(rec->addr);
+ 
++		if (len > sizeof(data))
++			return -EINVAL;
++
+ 		if (ziirave_firm_addr_readonly(addr))
+ 			continue;
+ 
+diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
+index 9c286b2a190016..ac8ce3179ba2e9 100644
+--- a/drivers/xen/gntdev-common.h
++++ b/drivers/xen/gntdev-common.h
+@@ -26,6 +26,10 @@ struct gntdev_priv {
+ 	/* lock protects maps and freeable_maps. */
+ 	struct mutex lock;
+ 
++	/* Free instances of struct gntdev_copy_batch. */
++	struct gntdev_copy_batch *batch;
++	struct mutex batch_lock;
++
+ #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ 	/* Device for which DMA memory is allocated. */
+ 	struct device *dma_dev;
+diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
+index 42adc2c1e06b37..5ab973627d183d 100644
+--- a/drivers/xen/gntdev-dmabuf.c
++++ b/drivers/xen/gntdev-dmabuf.c
+@@ -357,8 +357,11 @@ struct gntdev_dmabuf_export_args {
+ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
+ {
+ 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+-	struct gntdev_dmabuf *gntdev_dmabuf;
+-	int ret;
++	struct gntdev_dmabuf *gntdev_dmabuf __free(kfree) = NULL;
++	CLASS(get_unused_fd, ret)(O_CLOEXEC);
++
++	if (ret < 0)
++		return ret;
+ 
+ 	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
+ 	if (!gntdev_dmabuf)
+@@ -383,32 +386,21 @@ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
+ 	exp_info.priv = gntdev_dmabuf;
+ 
+ 	gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
+-	if (IS_ERR(gntdev_dmabuf->dmabuf)) {
+-		ret = PTR_ERR(gntdev_dmabuf->dmabuf);
+-		gntdev_dmabuf->dmabuf = NULL;
+-		goto fail;
+-	}
+-
+-	ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
+-	if (ret < 0)
+-		goto fail;
++	if (IS_ERR(gntdev_dmabuf->dmabuf))
++		return PTR_ERR(gntdev_dmabuf->dmabuf);
+ 
+ 	gntdev_dmabuf->fd = ret;
+ 	args->fd = ret;
+ 
+ 	pr_debug("Exporting DMA buffer with fd %d\n", ret);
+ 
++	get_file(gntdev_dmabuf->priv->filp);
+ 	mutex_lock(&args->dmabuf_priv->lock);
+ 	list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
+ 	mutex_unlock(&args->dmabuf_priv->lock);
+-	get_file(gntdev_dmabuf->priv->filp);
+-	return 0;
+ 
+-fail:
+-	if (gntdev_dmabuf->dmabuf)
+-		dma_buf_put(gntdev_dmabuf->dmabuf);
+-	kfree(gntdev_dmabuf);
+-	return ret;
++	fd_install(take_fd(ret), no_free_ptr(gntdev_dmabuf)->dmabuf->file);
++	return 0;
+ }
+ 
+ static struct gntdev_grant_map *
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 61faea1f066305..1f21607656182a 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -56,6 +56,18 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
+ 	      "Gerd Hoffmann <kraxel@redhat.com>");
+ MODULE_DESCRIPTION("User-space granted page access driver");
+ 
++#define GNTDEV_COPY_BATCH 16
++
++struct gntdev_copy_batch {
++	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
++	struct page *pages[GNTDEV_COPY_BATCH];
++	s16 __user *status[GNTDEV_COPY_BATCH];
++	unsigned int nr_ops;
++	unsigned int nr_pages;
++	bool writeable;
++	struct gntdev_copy_batch *next;
++};
++
+ static unsigned int limit = 64*1024;
+ module_param(limit, uint, 0644);
+ MODULE_PARM_DESC(limit,
+@@ -584,6 +596,8 @@ static int gntdev_open(struct inode *inode, struct file *flip)
+ 	INIT_LIST_HEAD(&priv->maps);
+ 	mutex_init(&priv->lock);
+ 
++	mutex_init(&priv->batch_lock);
++
+ #ifdef CONFIG_XEN_GNTDEV_DMABUF
+ 	priv->dmabuf_priv = gntdev_dmabuf_init(flip);
+ 	if (IS_ERR(priv->dmabuf_priv)) {
+@@ -608,6 +622,7 @@ static int gntdev_release(struct inode *inode, struct file *flip)
+ {
+ 	struct gntdev_priv *priv = flip->private_data;
+ 	struct gntdev_grant_map *map;
++	struct gntdev_copy_batch *batch;
+ 
+ 	pr_debug("priv %p\n", priv);
+ 
+@@ -620,6 +635,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
+ 	}
+ 	mutex_unlock(&priv->lock);
+ 
++	mutex_lock(&priv->batch_lock);
++	while (priv->batch) {
++		batch = priv->batch;
++		priv->batch = batch->next;
++		kfree(batch);
++	}
++	mutex_unlock(&priv->batch_lock);
++
+ #ifdef CONFIG_XEN_GNTDEV_DMABUF
+ 	gntdev_dmabuf_fini(priv->dmabuf_priv);
+ #endif
+@@ -785,17 +808,6 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
+ 	return rc;
+ }
+ 
+-#define GNTDEV_COPY_BATCH 16
+-
+-struct gntdev_copy_batch {
+-	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
+-	struct page *pages[GNTDEV_COPY_BATCH];
+-	s16 __user *status[GNTDEV_COPY_BATCH];
+-	unsigned int nr_ops;
+-	unsigned int nr_pages;
+-	bool writeable;
+-};
+-
+ static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
+ 				unsigned long *gfn)
+ {
+@@ -953,36 +965,53 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
+ static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
+ {
+ 	struct ioctl_gntdev_grant_copy copy;
+-	struct gntdev_copy_batch batch;
++	struct gntdev_copy_batch *batch;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+ 	if (copy_from_user(&copy, u, sizeof(copy)))
+ 		return -EFAULT;
+ 
+-	batch.nr_ops = 0;
+-	batch.nr_pages = 0;
++	mutex_lock(&priv->batch_lock);
++	if (!priv->batch) {
++		batch = kmalloc(sizeof(*batch), GFP_KERNEL);
++	} else {
++		batch = priv->batch;
++		priv->batch = batch->next;
++	}
++	mutex_unlock(&priv->batch_lock);
++	if (!batch)
++		return -ENOMEM;
++
++	batch->nr_ops = 0;
++	batch->nr_pages = 0;
+ 
+ 	for (i = 0; i < copy.count; i++) {
+ 		struct gntdev_grant_copy_segment seg;
+ 
+ 		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
+ 			ret = -EFAULT;
++			gntdev_put_pages(batch);
+ 			goto out;
+ 		}
+ 
+-		ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
+-		if (ret < 0)
++		ret = gntdev_grant_copy_seg(batch, &seg, &copy.segments[i].status);
++		if (ret < 0) {
++			gntdev_put_pages(batch);
+ 			goto out;
++		}
+ 
+ 		cond_resched();
+ 	}
+-	if (batch.nr_ops)
+-		ret = gntdev_copy(&batch);
+-	return ret;
++	if (batch->nr_ops)
++		ret = gntdev_copy(batch);
++
++ out:
++	mutex_lock(&priv->batch_lock);
++	batch->next = priv->batch;
++	priv->batch = batch;
++	mutex_unlock(&priv->batch_lock);
+ 
+-  out:
+-	gntdev_put_pages(&batch);
+ 	return ret;
+ }
+ 
+diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
+index 3b3c4d8d401ece..9c70622458800a 100644
+--- a/fs/ceph/crypto.c
++++ b/fs/ceph/crypto.c
+@@ -215,35 +215,31 @@ static struct inode *parse_longname(const struct inode *parent,
+ 	struct ceph_client *cl = ceph_inode_to_client(parent);
+ 	struct inode *dir = NULL;
+ 	struct ceph_vino vino = { .snap = CEPH_NOSNAP };
+-	char *inode_number;
+-	char *name_end;
+-	int orig_len = *name_len;
++	char *name_end, *inode_number;
+ 	int ret = -EIO;
+-
++	/* NUL-terminate */
++	char *str __free(kfree) = kmemdup_nul(name, *name_len, GFP_KERNEL);
++	if (!str)
++		return ERR_PTR(-ENOMEM);
+ 	/* Skip initial '_' */
+-	name++;
+-	name_end = strrchr(name, '_');
++	str++;
++	name_end = strrchr(str, '_');
+ 	if (!name_end) {
+-		doutc(cl, "failed to parse long snapshot name: %s\n", name);
++		doutc(cl, "failed to parse long snapshot name: %s\n", str);
+ 		return ERR_PTR(-EIO);
+ 	}
+-	*name_len = (name_end - name);
++	*name_len = (name_end - str);
+ 	if (*name_len <= 0) {
+ 		pr_err_client(cl, "failed to parse long snapshot name\n");
+ 		return ERR_PTR(-EIO);
+ 	}
+ 
+ 	/* Get the inode number */
+-	inode_number = kmemdup_nul(name_end + 1,
+-				   orig_len - *name_len - 2,
+-				   GFP_KERNEL);
+-	if (!inode_number)
+-		return ERR_PTR(-ENOMEM);
++	inode_number = name_end + 1;
+ 	ret = kstrtou64(inode_number, 10, &vino.ino);
+ 	if (ret) {
+-		doutc(cl, "failed to parse inode number: %s\n", name);
+-		dir = ERR_PTR(ret);
+-		goto out;
++		doutc(cl, "failed to parse inode number: %s\n", str);
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	/* And finally the inode */
+@@ -254,9 +250,6 @@ static struct inode *parse_longname(const struct inode *parent,
+ 		if (IS_ERR(dir))
+ 			doutc(cl, "can't find inode %s (%s)\n", inode_number, name);
+ 	}
+-
+-out:
+-	kfree(inode_number);
+ 	return dir;
+ }
+ 
+diff --git a/fs/exfat/file.c b/fs/exfat/file.c
+index 841a5b18e3dfdb..7ac5126aa4f1ea 100644
+--- a/fs/exfat/file.c
++++ b/fs/exfat/file.c
+@@ -623,9 +623,8 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	if (pos > valid_size)
+ 		pos = valid_size;
+ 
+-	if (iocb_is_dsync(iocb) && iocb->ki_pos > pos) {
+-		ssize_t err = vfs_fsync_range(file, pos, iocb->ki_pos - 1,
+-				iocb->ki_flags & IOCB_SYNC);
++	if (iocb->ki_pos > pos) {
++		ssize_t err = generic_write_sync(iocb, iocb->ki_pos - pos);
+ 		if (err < 0)
+ 			return err;
+ 	}
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 05b148d6fc7114..e02a3141637a0b 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -606,6 +606,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
+ 	} else
+ 		ret = ext4_block_write_begin(handle, folio, from, to,
+ 					     ext4_get_block);
++	clear_buffer_new(folio_buffers(folio));
+ 
+ 	if (!ret && ext4_should_journal_data(inode)) {
+ 		ret = ext4_walk_page_buffers(handle, inode,
+@@ -867,6 +868,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
+ 		return ret;
+ 	}
+ 
++	clear_buffer_new(folio_buffers(folio));
+ 	folio_mark_dirty(folio);
+ 	folio_mark_uptodate(folio);
+ 	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index eb092133c6b882..232131804bb810 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1056,7 +1056,7 @@ int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+ 			}
+ 			continue;
+ 		}
+-		if (buffer_new(bh))
++		if (WARN_ON_ONCE(buffer_new(bh)))
+ 			clear_buffer_new(bh);
+ 		if (!buffer_mapped(bh)) {
+ 			WARN_ON(bh->b_size != blocksize);
+@@ -1272,6 +1272,7 @@ static int write_end_fn(handle_t *handle, struct inode *inode,
+ 	ret = ext4_dirty_journalled_data(handle, bh);
+ 	clear_buffer_meta(bh);
+ 	clear_buffer_prio(bh);
++	clear_buffer_new(bh);
+ 	return ret;
+ }
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 654f672639b3c7..efc30626760a6e 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -287,7 +287,7 @@ static void f2fs_read_end_io(struct bio *bio)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
+ 	struct bio_post_read_ctx *ctx;
+-	bool intask = in_task();
++	bool intask = in_task() && !irqs_disabled();
+ 
+ 	iostat_update_and_unbind_ctx(bio);
+ 	ctx = bio->bi_private;
+@@ -1573,8 +1573,11 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
+ 	end = pgofs + maxblocks;
+ 
+ next_dnode:
+-	if (map->m_may_create)
++	if (map->m_may_create) {
++		if (f2fs_lfs_mode(sbi))
++			f2fs_balance_fs(sbi, true);
+ 		f2fs_map_lock(sbi, flag);
++	}
+ 
+ 	/* When reading holes, we need its node page */
+ 	set_new_dnode(&dn, inode, NULL, NULL, 0);
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index fb09c8e9bc5732..2ccc8687509949 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -381,7 +381,7 @@ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
+ 	struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
+ 	struct extent_tree *et;
+ 	struct extent_node *en;
+-	struct extent_info ei;
++	struct extent_info ei = {0};
+ 
+ 	if (!__may_extent_tree(inode, EX_READ)) {
+ 		/* drop largest read extent */
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index a435550b2839b1..2dec22f2ea639b 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1260,7 +1260,7 @@ struct f2fs_bio_info {
+ struct f2fs_dev_info {
+ 	struct file *bdev_file;
+ 	struct block_device *bdev;
+-	char path[MAX_PATH_LEN];
++	char path[MAX_PATH_LEN + 1];
+ 	unsigned int total_segments;
+ 	block_t start_blk;
+ 	block_t end_blk;
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index cd56c0e66657be..c0e43d6056a0a6 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1899,6 +1899,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
+ 	/* Let's run FG_GC, if we don't have enough space. */
+ 	if (has_not_enough_free_secs(sbi, 0, 0)) {
+ 		gc_type = FG_GC;
++		gc_control->one_time = false;
+ 
+ 		/*
+ 		 * For example, if there are many prefree_segments below given
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 06688b9957c81f..41ead6c772e48c 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -902,6 +902,19 @@ void f2fs_evict_inode(struct inode *inode)
+ 		f2fs_update_inode_page(inode);
+ 		if (dquot_initialize_needed(inode))
+ 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
++
++		/*
++		 * If both f2fs_truncate() and f2fs_update_inode_page() failed
++		 * due to fuzzed corrupted inode, call f2fs_inode_synced() to
++		 * avoid triggering later f2fs_bug_on().
++		 */
++		if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
++			f2fs_warn(sbi,
++				"f2fs_evict_inode: inode is dirty, ino:%lu",
++				inode->i_ino);
++			f2fs_inode_synced(inode);
++			set_sbi_flag(sbi, SBI_NEED_FSCK);
++		}
+ 	}
+ 	if (freeze_protected)
+ 		sb_end_intwrite(inode->i_sb);
+@@ -918,8 +931,12 @@ void f2fs_evict_inode(struct inode *inode)
+ 	if (likely(!f2fs_cp_error(sbi) &&
+ 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
+-	else
+-		f2fs_inode_synced(inode);
++
++	/*
++	 * anyway, it needs to remove the inode from sbi->inode_list[DIRTY_META]
++	 * list to avoid UAF in f2fs_sync_inode_meta() during checkpoint.
++	 */
++	f2fs_inode_synced(inode);
+ 
+ 	/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
+ 	if (inode->i_ino)
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 52bb1a28193573..f8f94301350cfe 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -626,8 +626,7 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi,
+ 	unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
+ 	unsigned int data_blocks = 0;
+ 
+-	if (f2fs_lfs_mode(sbi) &&
+-		unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
++	if (f2fs_lfs_mode(sbi)) {
+ 		total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA);
+ 		data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi);
+ 		data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi);
+@@ -636,7 +635,7 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi,
+ 	if (lower_p)
+ 		*lower_p = node_secs + dent_secs + data_secs;
+ 	if (upper_p)
+-		*upper_p = node_secs + dent_secs +
++		*upper_p = node_secs + dent_secs + data_secs +
+ 			(node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) +
+ 			(data_blocks ? 1 : 0);
+ 	if (curseg_p)
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 3f2c6fa3623ba6..875aef2fc52052 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -3388,6 +3388,7 @@ static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
+ 		f2fs_bug_on(sbi, 1);
+ 
+ 	ret = submit_bio_wait(bio);
++	bio_put(bio);
+ 	folio_end_writeback(folio);
+ 
+ 	return ret;
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 7df638f901a1f5..eb84b9418ac114 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -623,6 +623,27 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
+ 		return count;
+ 	}
+ 
++	if (!strcmp(a->attr.name, "gc_no_zoned_gc_percent")) {
++		if (t > 100)
++			return -EINVAL;
++		*ui = (unsigned int)t;
++		return count;
++	}
++
++	if (!strcmp(a->attr.name, "gc_boost_zoned_gc_percent")) {
++		if (t > 100)
++			return -EINVAL;
++		*ui = (unsigned int)t;
++		return count;
++	}
++
++	if (!strcmp(a->attr.name, "gc_valid_thresh_ratio")) {
++		if (t > 100)
++			return -EINVAL;
++		*ui = (unsigned int)t;
++		return count;
++	}
++
+ #ifdef CONFIG_F2FS_IOSTAT
+ 	if (!strcmp(a->attr.name, "iostat_enable")) {
+ 		sbi->iostat_enable = !!t;
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 13be8d1d228b8f..ee198a261d4fad 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -232,32 +232,23 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 	 */
+ 	ret = gfs2_glock_nq(&sdp->sd_live_gh);
+ 
++	gfs2_glock_put(live_gl); /* drop extra reference we acquired */
++	clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
++
+ 	/*
+ 	 * If we actually got the "live" lock in EX mode, there are no other
+-	 * nodes available to replay our journal. So we try to replay it
+-	 * ourselves. We hold the "live" glock to prevent other mounters
+-	 * during recovery, then just dequeue it and reacquire it in our
+-	 * normal SH mode. Just in case the problem that caused us to
+-	 * withdraw prevents us from recovering our journal (e.g. io errors
+-	 * and such) we still check if the journal is clean before proceeding
+-	 * but we may wait forever until another mounter does the recovery.
++	 * nodes available to replay our journal.
+ 	 */
+ 	if (ret == 0) {
+-		fs_warn(sdp, "No other mounters found. Trying to recover our "
+-			"own journal jid %d.\n", sdp->sd_lockstruct.ls_jid);
+-		if (gfs2_recover_journal(sdp->sd_jdesc, 1))
+-			fs_warn(sdp, "Unable to recover our journal jid %d.\n",
+-				sdp->sd_lockstruct.ls_jid);
+-		gfs2_glock_dq_wait(&sdp->sd_live_gh);
+-		gfs2_holder_reinit(LM_ST_SHARED,
+-				   LM_FLAG_NOEXP | GL_EXACT | GL_NOPID,
+-				   &sdp->sd_live_gh);
+-		gfs2_glock_nq(&sdp->sd_live_gh);
++		fs_warn(sdp, "No other mounters found.\n");
++		/*
++		 * We are about to release the lockspace.  By keeping live_gl
++		 * locked here, we ensure that the next mounter coming along
++		 * will be a "first" mounter which will perform recovery.
++		 */
++		goto skip_recovery;
+ 	}
+ 
+-	gfs2_glock_put(live_gl); /* drop extra reference we acquired */
+-	clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
+-
+ 	/*
+ 	 * At this point our journal is evicted, so we need to get a new inode
+ 	 * for it. Once done, we need to call gfs2_find_jhead which
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
+index a81ce7a740b918..451115360f73a0 100644
+--- a/fs/hfs/inode.c
++++ b/fs/hfs/inode.c
+@@ -692,6 +692,7 @@ static const struct file_operations hfs_file_operations = {
+ 	.write_iter	= generic_file_write_iter,
+ 	.mmap		= generic_file_mmap,
+ 	.splice_read	= filemap_splice_read,
++	.splice_write	= iter_file_splice_write,
+ 	.fsync		= hfs_file_fsync,
+ 	.open		= hfs_file_open,
+ 	.release	= hfs_file_release,
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index a6d61685ae79bb..b1699b3c246ae4 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -342,9 +342,6 @@ static int hfsplus_free_extents(struct super_block *sb,
+ 	int i;
+ 	int err = 0;
+ 
+-	/* Mapping the allocation file may lock the extent tree */
+-	WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
+-
+ 	hfsplus_dump_extent(extent);
+ 	for (i = 0; i < 8; extent++, i++) {
+ 		count = be32_to_cpu(extent->block_count);
+diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
+index f331e957421783..c85b5802ec0f95 100644
+--- a/fs/hfsplus/inode.c
++++ b/fs/hfsplus/inode.c
+@@ -368,6 +368,7 @@ static const struct file_operations hfsplus_file_operations = {
+ 	.write_iter	= generic_file_write_iter,
+ 	.mmap		= generic_file_mmap,
+ 	.splice_read	= filemap_splice_read,
++	.splice_write	= iter_file_splice_write,
+ 	.fsync		= hfsplus_file_fsync,
+ 	.open		= hfsplus_file_open,
+ 	.release	= hfsplus_file_release,
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 35e063c9f3a42e..5a877261c3fe48 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -1809,8 +1809,10 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
+ 			return -EIO;
+ 		dp = (struct dmap *) mp->data;
+ 
+-		if (dp->tree.budmin < 0)
++		if (dp->tree.budmin < 0) {
++			release_metapage(mp);
+ 			return -EIO;
++		}
+ 
+ 		/* try to allocate the blocks.
+ 		 */
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index f9f4a92f63e929..bbc625e742aa30 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1837,9 +1837,7 @@ static void block_revalidate(struct dentry *dentry)
+ 
+ static void unblock_revalidate(struct dentry *dentry)
+ {
+-	/* store_release ensures wait_var_event() sees the update */
+-	smp_store_release(&dentry->d_fsdata, NULL);
+-	wake_up_var(&dentry->d_fsdata);
++	store_release_wake_up(&dentry->d_fsdata, NULL);
+ }
+ 
+ /*
+diff --git a/fs/nfs/export.c b/fs/nfs/export.c
+index be686b8e0c5465..aeb17adcb2b646 100644
+--- a/fs/nfs/export.c
++++ b/fs/nfs/export.c
+@@ -66,14 +66,21 @@ nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ {
+ 	struct nfs_fattr *fattr = NULL;
+ 	struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
+-	size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
++	size_t fh_size = offsetof(struct nfs_fh, data);
+ 	const struct nfs_rpc_ops *rpc_ops;
+ 	struct dentry *dentry;
+ 	struct inode *inode;
+-	int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
++	int len = EMBED_FH_OFF;
+ 	u32 *p = fid->raw;
+ 	int ret;
+ 
++	/* Initial check of bounds */
++	if (fh_len < len + XDR_QUADLEN(fh_size) ||
++	    fh_len > XDR_QUADLEN(NFS_MAXFHSIZE))
++		return NULL;
++	/* Calculate embedded filehandle size */
++	fh_size += server_fh->size;
++	len += XDR_QUADLEN(fh_size);
+ 	/* NULL translates to ESTALE */
+ 	if (fh_len < len || fh_type != len)
+ 		return NULL;
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index bf96f7a8900c10..b685e763ef11be 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -761,14 +761,14 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
+ {
+ 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+ 	struct nfs4_ff_layout_mirror *mirror;
+-	struct nfs4_pnfs_ds *ds;
++	struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
+ 	u32 idx;
+ 
+ 	/* mirrors are initially sorted by efficiency */
+ 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
+ 		mirror = FF_LAYOUT_COMP(lseg, idx);
+ 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
+-		if (!ds)
++		if (IS_ERR(ds))
+ 			continue;
+ 
+ 		if (check_device &&
+@@ -776,10 +776,10 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
+ 			continue;
+ 
+ 		*best_idx = idx;
+-		return ds;
++		break;
+ 	}
+ 
+-	return NULL;
++	return ds;
+ }
+ 
+ static struct nfs4_pnfs_ds *
+@@ -941,7 +941,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ 	for (i = 0; i < pgio->pg_mirror_count; i++) {
+ 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
+ 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
+-		if (!ds) {
++		if (IS_ERR(ds)) {
+ 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+ 				goto out_mds;
+ 			pnfs_generic_pg_cleanup(pgio);
+@@ -1848,6 +1848,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+ 	u32 idx = hdr->pgio_mirror_idx;
+ 	int vers;
+ 	struct nfs_fh *fh;
++	bool ds_fatal_error = false;
+ 
+ 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
+ 		__func__, hdr->inode->i_ino,
+@@ -1855,8 +1856,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+ 
+ 	mirror = FF_LAYOUT_COMP(lseg, idx);
+ 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
+-	if (!ds)
++	if (IS_ERR(ds)) {
++		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
+ 		goto out_failed;
++	}
+ 
+ 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+ 						   hdr->inode);
+@@ -1904,7 +1907,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+ 	return PNFS_ATTEMPTED;
+ 
+ out_failed:
+-	if (ff_layout_avoid_mds_available_ds(lseg))
++	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
+ 		return PNFS_TRY_AGAIN;
+ 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
+ 			hdr->args.offset, hdr->args.count,
+@@ -1926,11 +1929,14 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+ 	int vers;
+ 	struct nfs_fh *fh;
+ 	u32 idx = hdr->pgio_mirror_idx;
++	bool ds_fatal_error = false;
+ 
+ 	mirror = FF_LAYOUT_COMP(lseg, idx);
+ 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
+-	if (!ds)
++	if (IS_ERR(ds)) {
++		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
+ 		goto out_failed;
++	}
+ 
+ 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+ 						   hdr->inode);
+@@ -1981,7 +1987,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+ 	return PNFS_ATTEMPTED;
+ 
+ out_failed:
+-	if (ff_layout_avoid_mds_available_ds(lseg))
++	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
+ 		return PNFS_TRY_AGAIN;
+ 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
+ 			hdr->args.offset, hdr->args.count,
+@@ -2024,7 +2030,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
+ 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ 	mirror = FF_LAYOUT_COMP(lseg, idx);
+ 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
+-	if (!ds)
++	if (IS_ERR(ds))
+ 		goto out_err;
+ 
+ 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index 4a304cf17c4b07..ef535baeefb60f 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -370,11 +370,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
+ 			  struct nfs4_ff_layout_mirror *mirror,
+ 			  bool fail_return)
+ {
+-	struct nfs4_pnfs_ds *ds = NULL;
++	struct nfs4_pnfs_ds *ds;
+ 	struct inode *ino = lseg->pls_layout->plh_inode;
+ 	struct nfs_server *s = NFS_SERVER(ino);
+ 	unsigned int max_payload;
+-	int status;
++	int status = -EAGAIN;
+ 
+ 	if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
+ 		goto noconnect;
+@@ -418,7 +418,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
+ 	ff_layout_send_layouterror(lseg);
+ 	if (fail_return || !ff_layout_has_available_ds(lseg))
+ 		pnfs_error_mark_layout_for_return(ino, lseg);
+-	ds = NULL;
++	ds = ERR_PTR(status);
+ out:
+ 	return ds;
+ }
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 1be4be3d4a2b6b..9840b779f0dfd8 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -668,9 +668,12 @@ nfs_write_match_verf(const struct nfs_writeverf *verf,
+ 
+ static inline gfp_t nfs_io_gfp_mask(void)
+ {
+-	if (current->flags & PF_WQ_WORKER)
+-		return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+-	return GFP_KERNEL;
++	gfp_t ret = current_gfp_context(GFP_KERNEL);
++
++	/* For workers __GFP_NORETRY only with __GFP_IO or __GFP_FS */
++	if ((current->flags & PF_WQ_WORKER) && ret == GFP_KERNEL)
++		ret |= __GFP_NORETRY | __GFP_NOWARN;
++	return ret;
+ }
+ 
+ /*
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 77b239b10d4187..e27cd2c7cfd191 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -10819,7 +10819,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+ 
+ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ {
+-	ssize_t error, error2, error3, error4;
++	ssize_t error, error2, error3, error4 = 0;
+ 	size_t left = size;
+ 
+ 	error = generic_listxattr(dentry, list, left);
+@@ -10847,9 +10847,11 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ 		left -= error3;
+ 	}
+ 
+-	error4 = security_inode_listsecurity(d_inode(dentry), list, left);
+-	if (error4 < 0)
+-		return error4;
++	if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
++		error4 = security_inode_listsecurity(d_inode(dentry), list, left);
++		if (error4 < 0)
++			return error4;
++	}
+ 
+ 	error += error2 + error3 + error4;
+ 	if (size && error > size)
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index 224bccaab4cc3f..bb00e1e1683838 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -441,7 +441,13 @@ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
+ 	dwords = fh_len >> 2;
+ 	type = exportfs_encode_fid(inode, buf, &dwords);
+ 	err = -EINVAL;
+-	if (type <= 0 || type == FILEID_INVALID || fh_len != dwords << 2)
++	/*
++	 * Unlike file_handle, type and len of struct fanotify_fh are u8.
++	 * Traditionally, filesystem return handle_type < 0xff, but there
++	 * is no enforecement for that in vfs.
++	 */
++	BUILD_BUG_ON(MAX_HANDLE_SZ > 0xff || FILEID_INVALID > 0xff);
++	if (type <= 0 || type >= FILEID_INVALID || fh_len != dwords << 2)
+ 		goto out_err;
+ 
+ 	fh->type = type;
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index 748c4be912db56..902dc8ba878ef8 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -392,7 +392,10 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 		}
+ 
+ 		if (ni->i_valid < to) {
+-			inode_lock(inode);
++			if (!inode_trylock(inode)) {
++				err = -EAGAIN;
++				goto out;
++			}
+ 			err = ntfs_extend_initialized_size(file, ni,
+ 							   ni->i_valid, to);
+ 			inode_unlock(inode);
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 608634361a302f..ed38014d175059 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -3057,8 +3057,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
+  * ni_rename - Remove one name and insert new name.
+  */
+ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
+-	      struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de,
+-	      bool *is_bad)
++	      struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de)
+ {
+ 	int err;
+ 	struct NTFS_DE *de2 = NULL;
+@@ -3081,8 +3080,8 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
+ 	err = ni_add_name(new_dir_ni, ni, new_de);
+ 	if (!err) {
+ 		err = ni_remove_name(dir_ni, ni, de, &de2, &undo);
+-		if (err && ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo))
+-			*is_bad = true;
++		WARN_ON(err && ni_remove_name(new_dir_ni, ni, new_de, &de2,
++			&undo));
+ 	}
+ 
+ 	/*
+diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
+index abf7e81584a9b2..71a5a959a48cb7 100644
+--- a/fs/ntfs3/namei.c
++++ b/fs/ntfs3/namei.c
+@@ -244,7 +244,7 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
+ 	struct ntfs_inode *ni = ntfs_i(inode);
+ 	struct inode *new_inode = d_inode(new_dentry);
+ 	struct NTFS_DE *de, *new_de;
+-	bool is_same, is_bad;
++	bool is_same;
+ 	/*
+ 	 * de		- memory of PATH_MAX bytes:
+ 	 * [0-1024)	- original name (dentry->d_name)
+@@ -313,12 +313,8 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
+ 	if (dir_ni != new_dir_ni)
+ 		ni_lock_dir2(new_dir_ni);
+ 
+-	is_bad = false;
+-	err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad);
+-	if (is_bad) {
+-		/* Restore after failed rename failed too. */
+-		_ntfs_bad_inode(inode);
+-	} else if (!err) {
++	err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de);
++	if (!err) {
+ 		simple_rename_timestamp(dir, dentry, new_dir, new_dentry);
+ 		mark_inode_dirty(inode);
+ 		mark_inode_dirty(dir);
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index cd8e8374bb5a0a..ff7f241a25b245 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -584,8 +584,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
+ 		struct NTFS_DE *de);
+ 
+ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
+-	      struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de,
+-	      bool *is_bad);
++	      struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de);
+ 
+ bool ni_is_dirty(struct inode *inode);
+ int ni_set_compress(struct inode *inode, bool compr);
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index fa41db08848802..b57140ebfad0f7 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -728,8 +728,8 @@ static void do_k_string(void *k_mask, int index)
+ 
+ 	if (*mask & s_kmod_keyword_mask_map[index].mask_val) {
+ 		if ((strlen(kernel_debug_string) +
+-		     strlen(s_kmod_keyword_mask_map[index].keyword))
+-			< ORANGEFS_MAX_DEBUG_STRING_LEN - 1) {
++		     strlen(s_kmod_keyword_mask_map[index].keyword) + 1)
++			< ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ 				strcat(kernel_debug_string,
+ 				       s_kmod_keyword_mask_map[index].keyword);
+ 				strcat(kernel_debug_string, ",");
+@@ -756,7 +756,7 @@ static void do_c_string(void *c_mask, int index)
+ 	    (mask->mask2 & cdm_array[index].mask2)) {
+ 		if ((strlen(client_debug_string) +
+ 		     strlen(cdm_array[index].keyword) + 1)
+-			< ORANGEFS_MAX_DEBUG_STRING_LEN - 2) {
++			< ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ 				strcat(client_debug_string,
+ 				       cdm_array[index].keyword);
+ 				strcat(client_debug_string, ",");
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index 3431b083f7d05c..e21d99fa926322 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -567,6 +567,8 @@ static void pde_set_flags(struct proc_dir_entry *pde)
+ 	if (pde->proc_ops->proc_compat_ioctl)
+ 		pde->flags |= PROC_ENTRY_proc_compat_ioctl;
+ #endif
++	if (pde->proc_ops->proc_lseek)
++		pde->flags |= PROC_ENTRY_proc_lseek;
+ }
+ 
+ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 3604b616311c27..129490151be147 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -473,7 +473,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
+ 	typeof_member(struct proc_ops, proc_open) open;
+ 	struct pde_opener *pdeo;
+ 
+-	if (!pde->proc_ops->proc_lseek)
++	if (!pde_has_proc_lseek(pde))
+ 		file->f_mode &= ~FMODE_LSEEK;
+ 
+ 	if (pde_is_permanent(pde)) {
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index 4e0c5b57ffdbb8..edd4eb6fa12ac0 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -99,6 +99,11 @@ static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde)
+ #endif
+ }
+ 
++static inline bool pde_has_proc_lseek(const struct proc_dir_entry *pde)
++{
++	return pde->flags & PROC_ENTRY_proc_lseek;
++}
++
+ extern struct kmem_cache *proc_dir_entry_cache;
+ void pde_free(struct proc_dir_entry *pde);
+ 
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index c0196be0e65fc0..9092051776fc1b 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -432,10 +432,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ 			server->smbd_conn->receive_credit_target);
+ 		seq_printf(m, "\nPending send_pending: %x ",
+ 			atomic_read(&server->smbd_conn->send_pending));
+-		seq_printf(m, "\nReceive buffers count_receive_queue: %x "
+-			"count_empty_packet_queue: %x",
+-			server->smbd_conn->count_receive_queue,
+-			server->smbd_conn->count_empty_packet_queue);
++		seq_printf(m, "\nReceive buffers count_receive_queue: %x ",
++			server->smbd_conn->count_receive_queue);
+ 		seq_printf(m, "\nMR responder_resources: %x "
+ 			"max_frmr_depth: %x mr_type: %x",
+ 			server->smbd_conn->responder_resources,
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 754e94a0e07f50..c661a8e6c18b85 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -13,8 +13,6 @@
+ #include "cifsproto.h"
+ #include "smb2proto.h"
+ 
+-static struct smbd_response *get_empty_queue_buffer(
+-		struct smbd_connection *info);
+ static struct smbd_response *get_receive_buffer(
+ 		struct smbd_connection *info);
+ static void put_receive_buffer(
+@@ -23,8 +21,6 @@ static void put_receive_buffer(
+ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
+ static void destroy_receive_buffers(struct smbd_connection *info);
+ 
+-static void put_empty_packet(
+-		struct smbd_connection *info, struct smbd_response *response);
+ static void enqueue_reassembly(
+ 		struct smbd_connection *info,
+ 		struct smbd_response *response, int data_length);
+@@ -391,7 +387,6 @@ static bool process_negotiation_response(
+ static void smbd_post_send_credits(struct work_struct *work)
+ {
+ 	int ret = 0;
+-	int use_receive_queue = 1;
+ 	int rc;
+ 	struct smbd_response *response;
+ 	struct smbd_connection *info =
+@@ -407,18 +402,9 @@ static void smbd_post_send_credits(struct work_struct *work)
+ 	if (info->receive_credit_target >
+ 		atomic_read(&info->receive_credits)) {
+ 		while (true) {
+-			if (use_receive_queue)
+-				response = get_receive_buffer(info);
+-			else
+-				response = get_empty_queue_buffer(info);
+-			if (!response) {
+-				/* now switch to empty packet queue */
+-				if (use_receive_queue) {
+-					use_receive_queue = 0;
+-					continue;
+-				} else
+-					break;
+-			}
++			response = get_receive_buffer(info);
++			if (!response)
++				break;
+ 
+ 			response->type = SMBD_TRANSFER_DATA;
+ 			response->first_segment = false;
+@@ -466,7 +452,6 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+ 		log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
+ 			wc->status, wc->opcode);
+-		smbd_disconnect_rdma_connection(info);
+ 		goto error;
+ 	}
+ 
+@@ -483,18 +468,15 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		info->full_packet_received = true;
+ 		info->negotiate_done =
+ 			process_negotiation_response(response, wc->byte_len);
++		put_receive_buffer(info, response);
+ 		complete(&info->negotiate_completion);
+-		break;
++		return;
+ 
+ 	/* SMBD data transfer packet */
+ 	case SMBD_TRANSFER_DATA:
+ 		data_transfer = smbd_response_payload(response);
+ 		data_length = le32_to_cpu(data_transfer->data_length);
+ 
+-		/*
+-		 * If this is a packet with data playload place the data in
+-		 * reassembly queue and wake up the reading thread
+-		 */
+ 		if (data_length) {
+ 			if (info->full_packet_received)
+ 				response->first_segment = true;
+@@ -503,16 +485,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 				info->full_packet_received = false;
+ 			else
+ 				info->full_packet_received = true;
+-
+-			enqueue_reassembly(
+-				info,
+-				response,
+-				data_length);
+-		} else
+-			put_empty_packet(info, response);
+-
+-		if (data_length)
+-			wake_up_interruptible(&info->wait_reassembly_queue);
++		}
+ 
+ 		atomic_dec(&info->receive_credits);
+ 		info->receive_credit_target =
+@@ -540,15 +513,27 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 			info->keep_alive_requested = KEEP_ALIVE_PENDING;
+ 		}
+ 
+-		return;
++		/*
++		 * If this is a packet with data playload place the data in
++		 * reassembly queue and wake up the reading thread
++		 */
++		if (data_length) {
++			enqueue_reassembly(info, response, data_length);
++			wake_up_interruptible(&info->wait_reassembly_queue);
++		} else
++			put_receive_buffer(info, response);
+ 
+-	default:
+-		log_rdma_recv(ERR,
+-			"unexpected response type=%d\n", response->type);
++		return;
+ 	}
+ 
++	/*
++	 * This is an internal error!
++	 */
++	log_rdma_recv(ERR, "unexpected response type=%d\n", response->type);
++	WARN_ON_ONCE(response->type != SMBD_TRANSFER_DATA);
+ error:
+ 	put_receive_buffer(info, response);
++	smbd_disconnect_rdma_connection(info);
+ }
+ 
+ static struct rdma_cm_id *smbd_create_id(
+@@ -1069,6 +1054,7 @@ static int smbd_post_recv(
+ 	if (rc) {
+ 		ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+ 				    response->sge.length, DMA_FROM_DEVICE);
++		response->sge.length = 0;
+ 		smbd_disconnect_rdma_connection(info);
+ 		log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+ 	}
+@@ -1113,17 +1099,6 @@ static int smbd_negotiate(struct smbd_connection *info)
+ 	return rc;
+ }
+ 
+-static void put_empty_packet(
+-		struct smbd_connection *info, struct smbd_response *response)
+-{
+-	spin_lock(&info->empty_packet_queue_lock);
+-	list_add_tail(&response->list, &info->empty_packet_queue);
+-	info->count_empty_packet_queue++;
+-	spin_unlock(&info->empty_packet_queue_lock);
+-
+-	queue_work(info->workqueue, &info->post_send_credits_work);
+-}
+-
+ /*
+  * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
+  * This is a queue for reassembling upper layer payload and present to upper
+@@ -1172,25 +1147,6 @@ static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
+ 	return ret;
+ }
+ 
+-static struct smbd_response *get_empty_queue_buffer(
+-		struct smbd_connection *info)
+-{
+-	struct smbd_response *ret = NULL;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
+-	if (!list_empty(&info->empty_packet_queue)) {
+-		ret = list_first_entry(
+-			&info->empty_packet_queue,
+-			struct smbd_response, list);
+-		list_del(&ret->list);
+-		info->count_empty_packet_queue--;
+-	}
+-	spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
+-
+-	return ret;
+-}
+-
+ /*
+  * Get a receive buffer
+  * For each remote send, we need to post a receive. The receive buffers are
+@@ -1228,8 +1184,13 @@ static void put_receive_buffer(
+ 	struct smbdirect_socket *sc = &info->socket;
+ 	unsigned long flags;
+ 
+-	ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+-		response->sge.length, DMA_FROM_DEVICE);
++	if (likely(response->sge.length != 0)) {
++		ib_dma_unmap_single(sc->ib.dev,
++				    response->sge.addr,
++				    response->sge.length,
++				    DMA_FROM_DEVICE);
++		response->sge.length = 0;
++	}
+ 
+ 	spin_lock_irqsave(&info->receive_queue_lock, flags);
+ 	list_add_tail(&response->list, &info->receive_queue);
+@@ -1255,10 +1216,6 @@ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+ 	spin_lock_init(&info->receive_queue_lock);
+ 	info->count_receive_queue = 0;
+ 
+-	INIT_LIST_HEAD(&info->empty_packet_queue);
+-	spin_lock_init(&info->empty_packet_queue_lock);
+-	info->count_empty_packet_queue = 0;
+-
+ 	init_waitqueue_head(&info->wait_receive_queues);
+ 
+ 	for (i = 0; i < num_buf; i++) {
+@@ -1267,6 +1224,7 @@ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+ 			goto allocate_failed;
+ 
+ 		response->info = info;
++		response->sge.length = 0;
+ 		list_add_tail(&response->list, &info->receive_queue);
+ 		info->count_receive_queue++;
+ 	}
+@@ -1292,9 +1250,6 @@ static void destroy_receive_buffers(struct smbd_connection *info)
+ 
+ 	while ((response = get_receive_buffer(info)))
+ 		mempool_free(response, info->response_mempool);
+-
+-	while ((response = get_empty_queue_buffer(info)))
+-		mempool_free(response, info->response_mempool);
+ }
+ 
+ /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
+@@ -1381,8 +1336,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ 
+ 	log_rdma_event(INFO, "free receive buffers\n");
+ 	wait_event(info->wait_receive_queues,
+-		info->count_receive_queue + info->count_empty_packet_queue
+-			== sp->recv_credit_max);
++		info->count_receive_queue == sp->recv_credit_max);
+ 	destroy_receive_buffers(info);
+ 
+ 	/*
+@@ -1680,8 +1634,10 @@ static struct smbd_connection *_smbd_get_connection(
+ 		goto rdma_connect_failed;
+ 	}
+ 
+-	wait_event_interruptible(
+-		info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING);
++	wait_event_interruptible_timeout(
++		info->conn_wait,
++		sc->status != SMBDIRECT_SOCKET_CONNECTING,
++		msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ 
+ 	if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ 		log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index 3d552ab27e0f3d..fb8db71735f322 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -110,10 +110,6 @@ struct smbd_connection {
+ 	int count_receive_queue;
+ 	spinlock_t receive_queue_lock;
+ 
+-	struct list_head empty_packet_queue;
+-	int count_empty_packet_queue;
+-	spinlock_t empty_packet_queue_lock;
+-
+ 	wait_queue_head_t wait_receive_queues;
+ 
+ 	/* Reassembly queue */
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index dd3e0e3f7bf046..31dd1caac1e8a8 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -46,6 +46,7 @@ struct ksmbd_conn {
+ 	struct mutex			srv_mutex;
+ 	int				status;
+ 	unsigned int			cli_cap;
++	__be32				inet_addr;
+ 	char				*request_buf;
+ 	struct ksmbd_transport		*transport;
+ 	struct nls_table		*local_nls;
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index a97a2885730da2..495a9faa298bdf 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1619,11 +1619,24 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ 
+ 	rsp->SecurityBufferLength = cpu_to_le16(out_len);
+ 
+-	if ((conn->sign || server_conf.enforced_signing) ||
++	/*
++	 * If session state is SMB2_SESSION_VALID, We can assume
++	 * that it is reauthentication. And the user/password
++	 * has been verified, so return it here.
++	 */
++	if (sess->state == SMB2_SESSION_VALID) {
++		if (conn->binding)
++			goto binding_session;
++		return 0;
++	}
++
++	if ((rsp->SessionFlags != SMB2_SESSION_FLAG_IS_GUEST_LE &&
++	    (conn->sign || server_conf.enforced_signing)) ||
+ 	    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
+ 		sess->sign = true;
+ 
+-	if (smb3_encryption_negotiated(conn)) {
++	if (smb3_encryption_negotiated(conn) &&
++	    !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+ 		retval = conn->ops->generate_encryptionkey(conn, sess);
+ 		if (retval) {
+ 			ksmbd_debug(SMB,
+@@ -1636,6 +1649,7 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ 		sess->sign = false;
+ 	}
+ 
++binding_session:
+ 	if (conn->dialect >= SMB30_PROT_ID) {
+ 		chann = lookup_chann_list(sess, conn);
+ 		if (!chann) {
+@@ -1831,8 +1845,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 				ksmbd_conn_set_good(conn);
+ 				sess->state = SMB2_SESSION_VALID;
+ 			}
+-			kfree(sess->Preauth_HashValue);
+-			sess->Preauth_HashValue = NULL;
+ 		} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+ 			if (negblob->MessageType == NtLmNegotiate) {
+ 				rc = ntlm_negotiate(work, negblob, negblob_len, rsp);
+@@ -1859,8 +1871,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 						kfree(preauth_sess);
+ 					}
+ 				}
+-				kfree(sess->Preauth_HashValue);
+-				sess->Preauth_HashValue = NULL;
+ 			} else {
+ 				pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
+ 						le32_to_cpu(negblob->MessageType));
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index 191df59748e003..a29c0494dccb47 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -515,7 +515,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
+ 
+ 	p = strrchr(longname, '.');
+ 	if (p == longname) { /*name starts with a dot*/
+-		strscpy(extension, "___", strlen("___"));
++		strscpy(extension, "___", sizeof(extension));
+ 	} else {
+ 		if (p) {
+ 			p++;
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 3ab8c04f72e48f..805c20f619b0b8 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -128,9 +128,6 @@ struct smb_direct_transport {
+ 	spinlock_t		recvmsg_queue_lock;
+ 	struct list_head	recvmsg_queue;
+ 
+-	spinlock_t		empty_recvmsg_queue_lock;
+-	struct list_head	empty_recvmsg_queue;
+-
+ 	int			send_credit_target;
+ 	atomic_t		send_credits;
+ 	spinlock_t		lock_new_recv_credits;
+@@ -267,40 +264,19 @@ smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
+ static void put_recvmsg(struct smb_direct_transport *t,
+ 			struct smb_direct_recvmsg *recvmsg)
+ {
+-	ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+-			    recvmsg->sge.length, DMA_FROM_DEVICE);
++	if (likely(recvmsg->sge.length != 0)) {
++		ib_dma_unmap_single(t->cm_id->device,
++				    recvmsg->sge.addr,
++				    recvmsg->sge.length,
++				    DMA_FROM_DEVICE);
++		recvmsg->sge.length = 0;
++	}
+ 
+ 	spin_lock(&t->recvmsg_queue_lock);
+ 	list_add(&recvmsg->list, &t->recvmsg_queue);
+ 	spin_unlock(&t->recvmsg_queue_lock);
+ }
+ 
+-static struct
+-smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
+-{
+-	struct smb_direct_recvmsg *recvmsg = NULL;
+-
+-	spin_lock(&t->empty_recvmsg_queue_lock);
+-	if (!list_empty(&t->empty_recvmsg_queue)) {
+-		recvmsg = list_first_entry(&t->empty_recvmsg_queue,
+-					   struct smb_direct_recvmsg, list);
+-		list_del(&recvmsg->list);
+-	}
+-	spin_unlock(&t->empty_recvmsg_queue_lock);
+-	return recvmsg;
+-}
+-
+-static void put_empty_recvmsg(struct smb_direct_transport *t,
+-			      struct smb_direct_recvmsg *recvmsg)
+-{
+-	ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+-			    recvmsg->sge.length, DMA_FROM_DEVICE);
+-
+-	spin_lock(&t->empty_recvmsg_queue_lock);
+-	list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
+-	spin_unlock(&t->empty_recvmsg_queue_lock);
+-}
+-
+ static void enqueue_reassembly(struct smb_direct_transport *t,
+ 			       struct smb_direct_recvmsg *recvmsg,
+ 			       int data_length)
+@@ -385,9 +361,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+ 	spin_lock_init(&t->recvmsg_queue_lock);
+ 	INIT_LIST_HEAD(&t->recvmsg_queue);
+ 
+-	spin_lock_init(&t->empty_recvmsg_queue_lock);
+-	INIT_LIST_HEAD(&t->empty_recvmsg_queue);
+-
+ 	init_waitqueue_head(&t->wait_send_pending);
+ 	atomic_set(&t->send_pending, 0);
+ 
+@@ -547,13 +520,13 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	t = recvmsg->transport;
+ 
+ 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
++		put_recvmsg(t, recvmsg);
+ 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ 			pr_err("Recv error. status='%s (%d)' opcode=%d\n",
+ 			       ib_wc_status_msg(wc->status), wc->status,
+ 			       wc->opcode);
+ 			smb_direct_disconnect_rdma_connection(t);
+ 		}
+-		put_empty_recvmsg(t, recvmsg);
+ 		return;
+ 	}
+ 
+@@ -567,7 +540,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	switch (recvmsg->type) {
+ 	case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+ 		if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
+-			put_empty_recvmsg(t, recvmsg);
++			put_recvmsg(t, recvmsg);
++			smb_direct_disconnect_rdma_connection(t);
+ 			return;
+ 		}
+ 		t->negotiation_requested = true;
+@@ -575,7 +549,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		t->status = SMB_DIRECT_CS_CONNECTED;
+ 		enqueue_reassembly(t, recvmsg, 0);
+ 		wake_up_interruptible(&t->wait_status);
+-		break;
++		return;
+ 	case SMB_DIRECT_MSG_DATA_TRANSFER: {
+ 		struct smb_direct_data_transfer *data_transfer =
+ 			(struct smb_direct_data_transfer *)recvmsg->packet;
+@@ -584,7 +558,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 
+ 		if (wc->byte_len <
+ 		    offsetof(struct smb_direct_data_transfer, padding)) {
+-			put_empty_recvmsg(t, recvmsg);
++			put_recvmsg(t, recvmsg);
++			smb_direct_disconnect_rdma_connection(t);
+ 			return;
+ 		}
+ 
+@@ -592,7 +567,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		if (data_length) {
+ 			if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+ 			    (u64)data_length) {
+-				put_empty_recvmsg(t, recvmsg);
++				put_recvmsg(t, recvmsg);
++				smb_direct_disconnect_rdma_connection(t);
+ 				return;
+ 			}
+ 
+@@ -604,16 +580,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 			else
+ 				t->full_packet_received = true;
+ 
+-			enqueue_reassembly(t, recvmsg, (int)data_length);
+-			wake_up_interruptible(&t->wait_reassembly_queue);
+-
+ 			spin_lock(&t->receive_credit_lock);
+ 			receive_credits = --(t->recv_credits);
+ 			avail_recvmsg_count = t->count_avail_recvmsg;
+ 			spin_unlock(&t->receive_credit_lock);
+ 		} else {
+-			put_empty_recvmsg(t, recvmsg);
+-
+ 			spin_lock(&t->receive_credit_lock);
+ 			receive_credits = --(t->recv_credits);
+ 			avail_recvmsg_count = ++(t->count_avail_recvmsg);
+@@ -635,11 +606,23 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
+ 			mod_delayed_work(smb_direct_wq,
+ 					 &t->post_recv_credits_work, 0);
+-		break;
++
++		if (data_length) {
++			enqueue_reassembly(t, recvmsg, (int)data_length);
++			wake_up_interruptible(&t->wait_reassembly_queue);
++		} else
++			put_recvmsg(t, recvmsg);
++
++		return;
+ 	}
+-	default:
+-		break;
+ 	}
++
++	/*
++	 * This is an internal error!
++	 */
++	WARN_ON_ONCE(recvmsg->type != SMB_DIRECT_MSG_DATA_TRANSFER);
++	put_recvmsg(t, recvmsg);
++	smb_direct_disconnect_rdma_connection(t);
+ }
+ 
+ static int smb_direct_post_recv(struct smb_direct_transport *t,
+@@ -669,6 +652,7 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
+ 		ib_dma_unmap_single(t->cm_id->device,
+ 				    recvmsg->sge.addr, recvmsg->sge.length,
+ 				    DMA_FROM_DEVICE);
++		recvmsg->sge.length = 0;
+ 		smb_direct_disconnect_rdma_connection(t);
+ 		return ret;
+ 	}
+@@ -810,7 +794,6 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
+ 	struct smb_direct_recvmsg *recvmsg;
+ 	int receive_credits, credits = 0;
+ 	int ret;
+-	int use_free = 1;
+ 
+ 	spin_lock(&t->receive_credit_lock);
+ 	receive_credits = t->recv_credits;
+@@ -818,18 +801,9 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
+ 
+ 	if (receive_credits < t->recv_credit_target) {
+ 		while (true) {
+-			if (use_free)
+-				recvmsg = get_free_recvmsg(t);
+-			else
+-				recvmsg = get_empty_recvmsg(t);
+-			if (!recvmsg) {
+-				if (use_free) {
+-					use_free = 0;
+-					continue;
+-				} else {
+-					break;
+-				}
+-			}
++			recvmsg = get_free_recvmsg(t);
++			if (!recvmsg)
++				break;
+ 
+ 			recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
+ 			recvmsg->first_segment = false;
+@@ -1805,8 +1779,6 @@ static void smb_direct_destroy_pools(struct smb_direct_transport *t)
+ 
+ 	while ((recvmsg = get_free_recvmsg(t)))
+ 		mempool_free(recvmsg, t->recvmsg_mempool);
+-	while ((recvmsg = get_empty_recvmsg(t)))
+-		mempool_free(recvmsg, t->recvmsg_mempool);
+ 
+ 	mempool_destroy(t->recvmsg_mempool);
+ 	t->recvmsg_mempool = NULL;
+@@ -1862,6 +1834,7 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
+ 		if (!recvmsg)
+ 			goto err;
+ 		recvmsg->transport = t;
++		recvmsg->sge.length = 0;
+ 		list_add(&recvmsg->list, &t->recvmsg_queue);
+ 	}
+ 	t->count_avail_recvmsg = t->recv_credit_max;
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index 4e9f98db9ff409..d72588f33b9cd1 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -87,6 +87,7 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
+ 		return NULL;
+ 	}
+ 
++	conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
+ 	conn->transport = KSMBD_TRANS(t);
+ 	KSMBD_TRANS(t)->conn = conn;
+ 	KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
+@@ -230,6 +231,8 @@ static int ksmbd_kthread_fn(void *p)
+ {
+ 	struct socket *client_sk = NULL;
+ 	struct interface *iface = (struct interface *)p;
++	struct inet_sock *csk_inet;
++	struct ksmbd_conn *conn;
+ 	int ret;
+ 
+ 	while (!kthread_should_stop()) {
+@@ -248,6 +251,20 @@ static int ksmbd_kthread_fn(void *p)
+ 			continue;
+ 		}
+ 
++		/*
++		 * Limits repeated connections from clients with the same IP.
++		 */
++		csk_inet = inet_sk(client_sk->sk);
++		down_read(&conn_list_lock);
++		list_for_each_entry(conn, &conn_list, conns_list)
++			if (csk_inet->inet_daddr == conn->inet_addr) {
++				ret = -EAGAIN;
++				break;
++			}
++		up_read(&conn_list_lock);
++		if (ret == -EAGAIN)
++			continue;
++
+ 		if (server_conf.max_connections &&
+ 		    atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
+ 			pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index a662aae5126c0a..9d38a651431c00 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -563,7 +563,8 @@ int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat)
+ {
+ 	int err;
+ 
+-	err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT);
++	err = vfs_getattr(path, stat, STATX_BASIC_STATS | STATX_BTIME,
++			AT_STATX_SYNC_AS_STAT);
+ 	if (err)
+ 		pr_err("getattr failed, err %d\n", err);
+ 	return err;
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index 0050ef288ab3ce..a394614ccd0b81 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -417,7 +417,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
+ extern void __audit_log_capset(const struct cred *new, const struct cred *old);
+ extern void __audit_mmap_fd(int fd, int flags);
+ extern void __audit_openat2_how(struct open_how *how);
+-extern void __audit_log_kern_module(char *name);
++extern void __audit_log_kern_module(const char *name);
+ extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar);
+ extern void __audit_tk_injoffset(struct timespec64 offset);
+ extern void __audit_ntp_log(const struct audit_ntp_data *ad);
+@@ -519,7 +519,7 @@ static inline void audit_openat2_how(struct open_how *how)
+ 		__audit_openat2_how(how);
+ }
+ 
+-static inline void audit_log_kern_module(char *name)
++static inline void audit_log_kern_module(const char *name)
+ {
+ 	if (!audit_dummy_context())
+ 		__audit_log_kern_module(name);
+@@ -677,9 +677,8 @@ static inline void audit_mmap_fd(int fd, int flags)
+ static inline void audit_openat2_how(struct open_how *how)
+ { }
+ 
+-static inline void audit_log_kern_module(char *name)
+-{
+-}
++static inline void audit_log_kern_module(const char *name)
++{ }
+ 
+ static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
+ { }
+diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
+index 0d99bf11d260a3..71f9dcf5612829 100644
+--- a/include/linux/fortify-string.h
++++ b/include/linux/fortify-string.h
+@@ -596,7 +596,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ 	if (p_size != SIZE_MAX && p_size < size)
+ 		fortify_panic(func, FORTIFY_WRITE, p_size, size, true);
+ 	else if (q_size != SIZE_MAX && q_size < size)
+-		fortify_panic(func, FORTIFY_READ, p_size, size, true);
++		fortify_panic(func, FORTIFY_READ, q_size, size, true);
+ 
+ 	/*
+ 	 * Warn when writing beyond destination field size.
+diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
+index 4b4bfef6f053a4..d86922b5435bd4 100644
+--- a/include/linux/fs_context.h
++++ b/include/linux/fs_context.h
+@@ -202,7 +202,7 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
+  */
+ #define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__)
+ #define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__)
+-#define infofc(p, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
++#define infofc(fc, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
+ 
+ /**
+  * warnf - Store supplementary warning message
+diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
+index b25377b6ea98dd..5210e8371238f1 100644
+--- a/include/linux/ioprio.h
++++ b/include/linux/ioprio.h
+@@ -60,7 +60,8 @@ static inline int __get_task_ioprio(struct task_struct *p)
+ 	int prio;
+ 
+ 	if (!ioc)
+-		return IOPRIO_DEFAULT;
++		return IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
++					 task_nice_ioprio(p));
+ 
+ 	if (p != current)
+ 		lockdep_assert_held(&p->alloc_lock);
+diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
+index cc647992f3d1e1..35bf9cdc1b2216 100644
+--- a/include/linux/mlx5/device.h
++++ b/include/linux/mlx5/device.h
+@@ -280,6 +280,7 @@ enum {
+ 	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
+ 	MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE	= 1ull << 25,
+ 	MLX5_MKEY_MASK_FREE			= 1ull << 29,
++	MLX5_MKEY_MASK_PAGE_SIZE_5		= 1ull << 42,
+ 	MLX5_MKEY_MASK_RELAXED_ORDERING_READ	= 1ull << 47,
+ };
+ 
+diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
+index bfb85fd13e1fae..110e9d09de2436 100644
+--- a/include/linux/moduleparam.h
++++ b/include/linux/moduleparam.h
+@@ -282,10 +282,9 @@ struct kparam_array
+ #define __moduleparam_const const
+ #endif
+ 
+-/* This is the fundamental function for registering boot/module
+-   parameters. */
++/* This is the fundamental function for registering boot/module parameters. */
+ #define __module_param_call(prefix, name, ops, arg, perm, level, flags)	\
+-	/* Default value instead of permissions? */			\
++	static_assert(sizeof(""prefix) - 1 <= MAX_PARAM_PREFIX_LEN);	\
+ 	static const char __param_str_##name[] = prefix #name;		\
+ 	static struct kernel_param __moduleparam_const __param_##name	\
+ 	__used __section("__param")					\
+diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
+index c7abce28ed2995..aab0aebb529e02 100644
+--- a/include/linux/pps_kernel.h
++++ b/include/linux/pps_kernel.h
+@@ -52,6 +52,7 @@ struct pps_device {
+ 	int current_mode;			/* PPS mode at event time */
+ 
+ 	unsigned int last_ev;			/* last PPS event id */
++	unsigned int last_fetched_ev;		/* last fetched PPS event id */
+ 	wait_queue_head_t queue;		/* PPS event queue */
+ 
+ 	unsigned int id;			/* PPS source unique ID */
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
+index ea62201c74c402..703d0c76cc9a0a 100644
+--- a/include/linux/proc_fs.h
++++ b/include/linux/proc_fs.h
+@@ -27,6 +27,7 @@ enum {
+ 
+ 	PROC_ENTRY_proc_read_iter	= 1U << 1,
+ 	PROC_ENTRY_proc_compat_ioctl	= 1U << 2,
++	PROC_ENTRY_proc_lseek		= 1U << 3,
+ };
+ 
+ struct proc_ops {
+diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
+index f1fd3a8044e0ec..dd10c22299ab82 100644
+--- a/include/linux/psi_types.h
++++ b/include/linux/psi_types.h
+@@ -84,11 +84,9 @@ enum psi_aggregators {
+ struct psi_group_cpu {
+ 	/* 1st cacheline updated by the scheduler */
+ 
+-	/* Aggregator needs to know of concurrent changes */
+-	seqcount_t seq ____cacheline_aligned_in_smp;
+-
+ 	/* States of the tasks belonging to this group */
+-	unsigned int tasks[NR_PSI_TASK_COUNTS];
++	unsigned int tasks[NR_PSI_TASK_COUNTS]
++			____cacheline_aligned_in_smp;
+ 
+ 	/* Aggregate pressure state derived from the tasks */
+ 	u32 state_mask;
+diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
+index 17fbb78552952d..d8424abcf726c0 100644
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -152,9 +152,7 @@ ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
+ 		    unsigned long *lost_events);
+ 
+ struct ring_buffer_iter *
+-ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
+-void ring_buffer_read_prepare_sync(void);
+-void ring_buffer_read_start(struct ring_buffer_iter *iter);
++ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags);
+ void ring_buffer_read_finish(struct ring_buffer_iter *iter);
+ 
+ struct ring_buffer_event *
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 39f1d16f362887..a726a698aac405 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2991,6 +2991,29 @@ static inline void skb_reset_transport_header(struct sk_buff *skb)
+ 	skb->transport_header = skb->data - skb->head;
+ }
+ 
++/**
++ * skb_reset_transport_header_careful - conditionally reset transport header
++ * @skb: buffer to alter
++ *
++ * Hardened version of skb_reset_transport_header().
++ *
++ * Returns: true if the operation was a success.
++ */
++static inline bool __must_check
++skb_reset_transport_header_careful(struct sk_buff *skb)
++{
++	long offset = skb->data - skb->head;
++
++	if (unlikely(offset != (typeof(skb->transport_header))offset))
++		return false;
++
++	if (unlikely(offset == (typeof(skb->transport_header))~0U))
++		return false;
++
++	skb->transport_header = offset;
++	return true;
++}
++
+ static inline void skb_set_transport_header(struct sk_buff *skb,
+ 					    const int offset)
+ {
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index 0b9f1e598e3a6b..4bc6bb01a0eb8b 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -76,6 +76,7 @@ struct usbnet {
+ #		define EVENT_LINK_CHANGE	11
+ #		define EVENT_SET_RX_MODE	12
+ #		define EVENT_NO_IP_ALIGN	13
++#		define EVENT_LINK_CARRIER_ON	14
+ /* This one is special, as it indicates that the device is going away
+  * there are cyclic dependencies between tasklet, timer and bh
+  * that must be broken
+diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
+index 7725b7579b7819..2209c227e85920 100644
+--- a/include/linux/wait_bit.h
++++ b/include/linux/wait_bit.h
+@@ -335,4 +335,64 @@ static inline void clear_and_wake_up_bit(int bit, void *word)
+ 	wake_up_bit(word, bit);
+ }
+ 
++/**
++ * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit
++ * @bit: the bit of the word being waited on
++ * @word: the address of memory containing that bit
++ *
++ * If the bit is set and can be atomically cleared, any tasks waiting in
++ * wait_on_bit() or similar will be woken.  This call has the same
++ * complete ordering semantics as test_and_clear_bit().  Any changes to
++ * memory made before this call are guaranteed to be visible after the
++ * corresponding wait_on_bit() completes.
++ *
++ * Returns %true if the bit was successfully set and the wake up was sent.
++ */
++static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word)
++{
++	if (!test_and_clear_bit(bit, word))
++		return false;
++	/* no extra barrier required */
++	wake_up_bit(word, bit);
++	return true;
++}
++
++/**
++ * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters
++ * @var: the variable to dec and test
++ *
++ * Decrements the atomic variable and if it reaches zero, send a wake_up to any
++ * processes waiting on the variable.
++ *
++ * This function has the same complete ordering semantics as atomic_dec_and_test.
++ *
++ * Returns %true is the variable reaches zero and the wake up was sent.
++ */
++
++static inline bool atomic_dec_and_wake_up(atomic_t *var)
++{
++	if (!atomic_dec_and_test(var))
++		return false;
++	/* No extra barrier required */
++	wake_up_var(var);
++	return true;
++}
++
++/**
++ * store_release_wake_up - update a variable and send a wake_up
++ * @var: the address of the variable to be updated and woken
++ * @val: the value to store in the variable.
++ *
++ * Store the given value in the variable send a wake up to any tasks
++ * waiting on the variable.  All necessary barriers are included to ensure
++ * the task calling wait_var_event() sees the new value and all values
++ * written to memory before this call.
++ */
++#define store_release_wake_up(var, val)					\
++do {									\
++	smp_store_release(var, val);					\
++	smp_mb();							\
++	wake_up_var(var);						\
++} while (0)
++
+ #endif /* _LINUX_WAIT_BIT_H */
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 40fce4193cc1dd..4b3200542fe66a 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -2612,6 +2612,7 @@ struct hci_ev_le_conn_complete {
+ #define LE_EXT_ADV_DIRECT_IND		0x0004
+ #define LE_EXT_ADV_SCAN_RSP		0x0008
+ #define LE_EXT_ADV_LEGACY_PDU		0x0010
++#define LE_EXT_ADV_DATA_STATUS_MASK	0x0060
+ #define LE_EXT_ADV_EVT_TYPE_MASK	0x007f
+ 
+ #define ADDR_LE_DEV_PUBLIC		0x00
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 3d1d7296aed911..df4af45f8603cd 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -29,6 +29,7 @@
+ #include <linux/idr.h>
+ #include <linux/leds.h>
+ #include <linux/rculist.h>
++#include <linux/spinlock.h>
+ #include <linux/srcu.h>
+ 
+ #include <net/bluetooth/hci.h>
+@@ -93,6 +94,7 @@ struct discovery_state {
+ 	u16			uuid_count;
+ 	u8			(*uuids)[16];
+ 	unsigned long		name_resolve_timeout;
++	spinlock_t		lock;
+ };
+ 
+ #define SUSPEND_NOTIFIER_TIMEOUT	msecs_to_jiffies(2000) /* 2 seconds */
+@@ -873,6 +875,7 @@ static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
+ 
+ static inline void discovery_init(struct hci_dev *hdev)
+ {
++	spin_lock_init(&hdev->discovery.lock);
+ 	hdev->discovery.state = DISCOVERY_STOPPED;
+ 	INIT_LIST_HEAD(&hdev->discovery.all);
+ 	INIT_LIST_HEAD(&hdev->discovery.unknown);
+@@ -887,8 +890,11 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
+ 	hdev->discovery.report_invalid_rssi = true;
+ 	hdev->discovery.rssi = HCI_RSSI_INVALID;
+ 	hdev->discovery.uuid_count = 0;
++
++	spin_lock(&hdev->discovery.lock);
+ 	kfree(hdev->discovery.uuids);
+ 	hdev->discovery.uuids = NULL;
++	spin_unlock(&hdev->discovery.lock);
+ }
+ 
+ bool hci_discovery_active(struct hci_dev *hdev);
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 08647c99d79c9a..e18826cd055952 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -456,7 +456,7 @@ INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
+ /* Output packet to network from transport.  */
+ static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	return INDIRECT_CALL_INET(skb_dst(skb)->output,
++	return INDIRECT_CALL_INET(READ_ONCE(skb_dst(skb)->output),
+ 				  ip6_output, ip_output,
+ 				  net, sk, skb);
+ }
+@@ -466,7 +466,7 @@ INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
+ /* Input packet from network to transport.  */
+ static inline int dst_input(struct sk_buff *skb)
+ {
+-	return INDIRECT_CALL_INET(skb_dst(skb)->input,
++	return INDIRECT_CALL_INET(READ_ONCE(skb_dst(skb)->input),
+ 				  ip6_input, ip_local_deliver, skb);
+ }
+ 
+diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
+index 53bd2d02a4f0db..09791f5d9b6ec8 100644
+--- a/include/net/lwtunnel.h
++++ b/include/net/lwtunnel.h
+@@ -138,12 +138,12 @@ int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
+ static inline void lwtunnel_set_redirect(struct dst_entry *dst)
+ {
+ 	if (lwtunnel_output_redirect(dst->lwtstate)) {
+-		dst->lwtstate->orig_output = dst->output;
+-		dst->output = lwtunnel_output;
++		dst->lwtstate->orig_output = READ_ONCE(dst->output);
++		WRITE_ONCE(dst->output, lwtunnel_output);
+ 	}
+ 	if (lwtunnel_input_redirect(dst->lwtstate)) {
+-		dst->lwtstate->orig_input = dst->input;
+-		dst->input = lwtunnel_input;
++		dst->lwtstate->orig_input = READ_ONCE(dst->input);
++		WRITE_ONCE(dst->input, lwtunnel_input);
+ 	}
+ }
+ #else
+diff --git a/include/net/tc_act/tc_ctinfo.h b/include/net/tc_act/tc_ctinfo.h
+index f071c1d70a25e1..a04bcac7adf4b6 100644
+--- a/include/net/tc_act/tc_ctinfo.h
++++ b/include/net/tc_act/tc_ctinfo.h
+@@ -18,9 +18,9 @@ struct tcf_ctinfo_params {
+ struct tcf_ctinfo {
+ 	struct tc_action common;
+ 	struct tcf_ctinfo_params __rcu *params;
+-	u64 stats_dscp_set;
+-	u64 stats_dscp_error;
+-	u64 stats_cpmark_set;
++	atomic64_t stats_dscp_set;
++	atomic64_t stats_dscp_error;
++	atomic64_t stats_cpmark_set;
+ };
+ 
+ enum {
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 61222545ab1cfd..0b2e3a5e01d874 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -461,6 +461,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ {
+ 	netdev_features_t features = NETIF_F_SG;
+ 	struct sk_buff *segs;
++	int drop_count;
++
++	/*
++	 * Segmentation in UDP receive path is only for UDP GRO, drop udp
++	 * fragmentation offload (UFO) packets.
++	 */
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
++		drop_count = 1;
++		goto drop;
++	}
+ 
+ 	/* Avoid csum recalculation by skb_segment unless userspace explicitly
+ 	 * asks for the final checksum values
+@@ -484,16 +494,18 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ 	 */
+ 	segs = __skb_gso_segment(skb, features, false);
+ 	if (IS_ERR_OR_NULL(segs)) {
+-		int segs_nr = skb_shinfo(skb)->gso_segs;
+-
+-		atomic_add(segs_nr, &sk->sk_drops);
+-		SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+-		kfree_skb(skb);
+-		return NULL;
++		drop_count = skb_shinfo(skb)->gso_segs;
++		goto drop;
+ 	}
+ 
+ 	consume_skb(skb);
+ 	return segs;
++
++drop:
++	atomic_add(drop_count, &sk->sk_drops);
++	SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
++	kfree_skb(skb);
++	return NULL;
+ }
+ 
+ static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
+diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
+index d87263e43fdb61..ef9b9f19d21205 100644
+--- a/include/sound/tas2781-tlv.h
++++ b/include/sound/tas2781-tlv.h
+@@ -15,7 +15,7 @@
+ #ifndef __TAS2781_TLV_H__
+ #define __TAS2781_TLV_H__
+ 
+-static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
++static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 50, 0);
+ static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
+ 
+ #endif
+diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
+index e23a7f9b0eacd1..21af62e3cc6f6c 100644
+--- a/include/uapi/drm/panthor_drm.h
++++ b/include/uapi/drm/panthor_drm.h
+@@ -327,6 +327,9 @@ struct drm_panthor_gpu_info {
+ 	/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
+ 	__u32 as_present;
+ 
++	/** @pad0: MBZ. */
++	__u32 pad0;
++
+ 	/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
+ 	__u64 shader_present;
+ 
+diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
+index b95dd84eef2db2..1c7e7035fc49d1 100644
+--- a/include/uapi/linux/vhost.h
++++ b/include/uapi/linux/vhost.h
+@@ -235,4 +235,33 @@
+  */
+ #define VHOST_VDPA_GET_VRING_SIZE	_IOWR(VHOST_VIRTIO, 0x82,	\
+ 					      struct vhost_vring_state)
++
++/* fork_owner values for vhost */
++#define VHOST_FORK_OWNER_KTHREAD 0
++#define VHOST_FORK_OWNER_TASK 1
++
++/**
++ * VHOST_SET_FORK_FROM_OWNER - Set the fork_owner flag for the vhost device,
++ * This ioctl must called before VHOST_SET_OWNER.
++ * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y
++ *
++ * @param fork_owner: An 8-bit value that determines the vhost thread mode
++ *
++ * When fork_owner is set to VHOST_FORK_OWNER_TASK(default value):
++ *   - Vhost will create vhost worker as tasks forked from the owner,
++ *     inheriting all of the owner's attributes.
++ *
++ * When fork_owner is set to VHOST_FORK_OWNER_KTHREAD:
++ *   - Vhost will create vhost workers as kernel threads.
++ */
++#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x83, __u8)
++
++/**
++ * VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device.
++ * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y
++ *
++ * @return: An 8-bit value indicating the current thread mode.
++ */
++#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x84, __u8)
++
+ #endif
+diff --git a/init/Kconfig b/init/Kconfig
+index d3755b2264bdfb..45990792cb4a68 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1741,7 +1741,7 @@ config IO_URING
+ 
+ config GCOV_PROFILE_URING
+ 	bool "Enable GCOV profiling on the io_uring subsystem"
+-	depends on GCOV_KERNEL
++	depends on IO_URING && GCOV_KERNEL
+ 	help
+ 	  Enable GCOV profiling on the io_uring subsystem, to facilitate
+ 	  code coverage testing.
+diff --git a/kernel/audit.h b/kernel/audit.h
+index a60d2840559e2b..5156ecd3545733 100644
+--- a/kernel/audit.h
++++ b/kernel/audit.h
+@@ -199,7 +199,7 @@ struct audit_context {
+ 			int			argc;
+ 		} execve;
+ 		struct {
+-			char			*name;
++			const char		*name;
+ 		} module;
+ 		struct {
+ 			struct audit_ntp_data	ntp_data;
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index cd57053b4a6993..dae80e4dfccee3 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -2870,7 +2870,7 @@ void __audit_openat2_how(struct open_how *how)
+ 	context->type = AUDIT_OPENAT2;
+ }
+ 
+-void __audit_log_kern_module(char *name)
++void __audit_log_kern_module(const char *name)
+ {
+ 	struct audit_context *context = audit_context();
+ 
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 68a327158989b9..767dcb8471f63b 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -778,7 +778,10 @@ bool is_bpf_text_address(unsigned long addr)
+ 
+ struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+ {
+-	struct bpf_ksym *ksym = bpf_ksym_find(addr);
++	struct bpf_ksym *ksym;
++
++	WARN_ON_ONCE(!rcu_read_lock_held());
++	ksym = bpf_ksym_find(addr);
+ 
+ 	return ksym && ksym->prog ?
+ 	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 6cf165c55bdacc..be4429463599f6 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -2781,9 +2781,16 @@ static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
+ 	struct bpf_throw_ctx *ctx = cookie;
+ 	struct bpf_prog *prog;
+ 
+-	if (!is_bpf_text_address(ip))
+-		return !ctx->cnt;
++	/*
++	 * The RCU read lock is held to safely traverse the latch tree, but we
++	 * don't need its protection when accessing the prog, since it has an
++	 * active stack frame on the current stack trace, and won't disappear.
++	 */
++	rcu_read_lock();
+ 	prog = bpf_prog_ksym_find(ip);
++	rcu_read_unlock();
++	if (!prog)
++		return !ctx->cnt;
+ 	ctx->cnt++;
+ 	if (bpf_is_subprog(prog))
+ 		return true;
+diff --git a/kernel/bpf/preload/Kconfig b/kernel/bpf/preload/Kconfig
+index c9d45c9d6918d1..f9b11d01c3b50d 100644
+--- a/kernel/bpf/preload/Kconfig
++++ b/kernel/bpf/preload/Kconfig
+@@ -10,7 +10,6 @@ menuconfig BPF_PRELOAD
+ 	# The dependency on !COMPILE_TEST prevents it from being enabled
+ 	# in allmodconfig or allyesconfig configurations
+ 	depends on !COMPILE_TEST
+-	select USERMODE_DRIVER
+ 	help
+ 	  This builds kernel module with several embedded BPF programs that are
+ 	  pinned into BPF FS mount point as human readable files that are
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index dd745485b0f46d..3cc06ffb60c1bc 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6617,11 +6617,21 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ 	ring_buffer_put(rb); /* could be last */
+ }
+ 
++static int perf_mmap_may_split(struct vm_area_struct *vma, unsigned long addr)
++{
++	/*
++	 * Forbid splitting perf mappings to prevent refcount leaks due to
++	 * the resulting non-matching offsets and sizes. See open()/close().
++	 */
++	return -EINVAL;
++}
++
+ static const struct vm_operations_struct perf_mmap_vmops = {
+ 	.open		= perf_mmap_open,
+ 	.close		= perf_mmap_close, /* non mergeable */
+ 	.fault		= perf_mmap_fault,
+ 	.page_mkwrite	= perf_mmap_fault,
++	.may_split	= perf_mmap_may_split,
+ };
+ 
+ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+@@ -6713,9 +6723,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 			goto unlock;
+ 		}
+ 
+-		atomic_set(&rb->aux_mmap_count, 1);
+ 		user_extra = nr_pages;
+-
+ 		goto accounting;
+ 	}
+ 
+@@ -6817,8 +6825,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 	} else {
+ 		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
+ 				   event->attr.aux_watermark, flags);
+-		if (!ret)
++		if (!ret) {
++			atomic_set(&rb->aux_mmap_count, 1);
+ 			rb->aux_mmap_locked = extra;
++		}
+ 	}
+ 
+ unlock:
+@@ -6828,6 +6838,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 
+ 		atomic_inc(&event->mmap_count);
+ 	} else if (rb) {
++		/* AUX allocation failed */
+ 		atomic_dec(&rb->mmap_count);
+ 	}
+ aux_unlock:
+@@ -6835,6 +6846,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 		mutex_unlock(aux_mutex);
+ 	mutex_unlock(&event->mmap_mutex);
+ 
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * Since pinned accounting is per vm we cannot allow fork() to copy our
+ 	 * vma.
+diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
+index 117d9d4d3c3bd6..5121d9a6dd3b50 100644
+--- a/kernel/kcsan/kcsan_test.c
++++ b/kernel/kcsan/kcsan_test.c
+@@ -533,7 +533,7 @@ static void test_barrier_nothreads(struct kunit *test)
+ 	struct kcsan_scoped_access *reorder_access = NULL;
+ #endif
+ 	arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
+-	atomic_t dummy;
++	atomic_t dummy = ATOMIC_INIT(0);
+ 
+ 	KCSAN_TEST_REQUIRES(test, reorder_access != NULL);
+ 	KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP));
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 93a07387af3b75..6908062f456039 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -2898,7 +2898,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ 
+ 	module_allocated = true;
+ 
+-	audit_log_kern_module(mod->name);
++	audit_log_kern_module(info->name);
+ 
+ 	/* Reserve our place in the list. */
+ 	err = add_unformed_module(mod);
+@@ -3058,8 +3058,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ 	 * failures once the proper module was allocated and
+ 	 * before that.
+ 	 */
+-	if (!module_allocated)
++	if (!module_allocated) {
++		audit_log_kern_module(info->name ? info->name : "?");
+ 		mod_stat_bump_becoming(info, flags);
++	}
+ 	free_copy(info, flags);
+ 	return err;
+ }
+diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
+index 0db9db73f57f25..36b78d5a067533 100644
+--- a/kernel/rcu/refscale.c
++++ b/kernel/rcu/refscale.c
+@@ -81,7 +81,7 @@ torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
+ // Number of typesafe_lookup structures, that is, the degree of concurrency.
+ torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures.");
+ // Number of loops per experiment, all readers execute operations concurrently.
+-torture_param(long, loops, 10000, "Number of loops per experiment.");
++torture_param(int, loops, 10000, "Number of loops per experiment.");
+ // Number of readers, with -1 defaulting to about 75% of the CPUs.
+ torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
+ // Number of runs.
+@@ -1029,7 +1029,7 @@ static void
+ ref_scale_print_module_parms(const struct ref_scale_ops *cur_ops, const char *tag)
+ {
+ 	pr_alert("%s" SCALE_FLAG
+-		 "--- %s:  verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
++		 "--- %s:  verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%d nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
+ 		 verbose, verbose_batched, shutdown, holdoff, lookup_instances, loops, nreaders, nruns, readdelay);
+ }
+ 
+@@ -1126,12 +1126,16 @@ ref_scale_init(void)
+ 	// Reader tasks (default to ~75% of online CPUs).
+ 	if (nreaders < 0)
+ 		nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
+-	if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
++	if (WARN_ONCE(loops <= 0, "%s: loops = %d, adjusted to 1\n", __func__, loops))
+ 		loops = 1;
+ 	if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
+ 		nreaders = 1;
+ 	if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
+ 		nruns = 1;
++	if (WARN_ONCE(loops > INT_MAX / nreaders,
++		      "%s: nreaders * loops will overflow, adjusted loops to %d",
++		      __func__, INT_MAX / nreaders))
++		loops = INT_MAX / nreaders;
+ 	reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
+ 			       GFP_KERNEL);
+ 	if (!reader_tasks) {
+diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
+index 2605dd234a13c8..2ad3a88623a7c4 100644
+--- a/kernel/rcu/tree_nocb.h
++++ b/kernel/rcu/tree_nocb.h
+@@ -276,7 +276,7 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
+ 	 * callback storms, no need to wake up too early.
+ 	 */
+ 	if (waketype == RCU_NOCB_WAKE_LAZY &&
+-	    rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
++	    rdp_gp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
+ 		mod_timer(&rdp_gp->nocb_timer, jiffies + rcu_get_jiffies_lazy_flush());
+ 		WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+ 	} else if (waketype == RCU_NOCB_WAKE_BYPASS) {
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 84dad1511d1e48..7d0f8fdd48a34c 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -172,17 +172,35 @@ struct psi_group psi_system = {
+ 	.pcpu = &system_group_pcpu,
+ };
+ 
++static DEFINE_PER_CPU(seqcount_t, psi_seq) = SEQCNT_ZERO(psi_seq);
++
++static inline void psi_write_begin(int cpu)
++{
++	write_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
++}
++
++static inline void psi_write_end(int cpu)
++{
++	write_seqcount_end(per_cpu_ptr(&psi_seq, cpu));
++}
++
++static inline u32 psi_read_begin(int cpu)
++{
++	return read_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
++}
++
++static inline bool psi_read_retry(int cpu, u32 seq)
++{
++	return read_seqcount_retry(per_cpu_ptr(&psi_seq, cpu), seq);
++}
++
+ static void psi_avgs_work(struct work_struct *work);
+ 
+ static void poll_timer_fn(struct timer_list *t);
+ 
+ static void group_init(struct psi_group *group)
+ {
+-	int cpu;
+-
+ 	group->enabled = true;
+-	for_each_possible_cpu(cpu)
+-		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
+ 	group->avg_last_update = sched_clock();
+ 	group->avg_next_update = group->avg_last_update + psi_period;
+ 	mutex_init(&group->avgs_lock);
+@@ -262,14 +280,14 @@ static void get_recent_times(struct psi_group *group, int cpu,
+ 
+ 	/* Snapshot a coherent view of the CPU state */
+ 	do {
+-		seq = read_seqcount_begin(&groupc->seq);
++		seq = psi_read_begin(cpu);
+ 		now = cpu_clock(cpu);
+ 		memcpy(times, groupc->times, sizeof(groupc->times));
+ 		state_mask = groupc->state_mask;
+ 		state_start = groupc->state_start;
+ 		if (cpu == current_cpu)
+ 			memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
+-	} while (read_seqcount_retry(&groupc->seq, seq));
++	} while (psi_read_retry(cpu, seq));
+ 
+ 	/* Calculate state time deltas against the previous snapshot */
+ 	for (s = 0; s < NR_PSI_STATES; s++) {
+@@ -768,30 +786,20 @@ static void record_times(struct psi_group_cpu *groupc, u64 now)
+ 		groupc->times[PSI_NONIDLE] += delta;
+ }
+ 
++#define for_each_group(iter, group) \
++	for (typeof(group) iter = group; iter; iter = iter->parent)
++
+ static void psi_group_change(struct psi_group *group, int cpu,
+ 			     unsigned int clear, unsigned int set,
+-			     bool wake_clock)
++			     u64 now, bool wake_clock)
+ {
+ 	struct psi_group_cpu *groupc;
+ 	unsigned int t, m;
+ 	u32 state_mask;
+-	u64 now;
+ 
+ 	lockdep_assert_rq_held(cpu_rq(cpu));
+ 	groupc = per_cpu_ptr(group->pcpu, cpu);
+ 
+-	/*
+-	 * First we update the task counts according to the state
+-	 * change requested through the @clear and @set bits.
+-	 *
+-	 * Then if the cgroup PSI stats accounting enabled, we
+-	 * assess the aggregate resource states this CPU's tasks
+-	 * have been in since the last change, and account any
+-	 * SOME and FULL time these may have resulted in.
+-	 */
+-	write_seqcount_begin(&groupc->seq);
+-	now = cpu_clock(cpu);
+-
+ 	/*
+ 	 * Start with TSK_ONCPU, which doesn't have a corresponding
+ 	 * task count - it's just a boolean flag directly encoded in
+@@ -843,7 +851,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ 
+ 		groupc->state_mask = state_mask;
+ 
+-		write_seqcount_end(&groupc->seq);
+ 		return;
+ 	}
+ 
+@@ -864,8 +871,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ 
+ 	groupc->state_mask = state_mask;
+ 
+-	write_seqcount_end(&groupc->seq);
+-
+ 	if (state_mask & group->rtpoll_states)
+ 		psi_schedule_rtpoll_work(group, 1, false);
+ 
+@@ -900,24 +905,29 @@ static void psi_flags_change(struct task_struct *task, int clear, int set)
+ void psi_task_change(struct task_struct *task, int clear, int set)
+ {
+ 	int cpu = task_cpu(task);
+-	struct psi_group *group;
++	u64 now;
+ 
+ 	if (!task->pid)
+ 		return;
+ 
+ 	psi_flags_change(task, clear, set);
+ 
+-	group = task_psi_group(task);
+-	do {
+-		psi_group_change(group, cpu, clear, set, true);
+-	} while ((group = group->parent));
++	psi_write_begin(cpu);
++	now = cpu_clock(cpu);
++	for_each_group(group, task_psi_group(task))
++		psi_group_change(group, cpu, clear, set, now, true);
++	psi_write_end(cpu);
+ }
+ 
+ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ 		     bool sleep)
+ {
+-	struct psi_group *group, *common = NULL;
++	struct psi_group *common = NULL;
+ 	int cpu = task_cpu(prev);
++	u64 now;
++
++	psi_write_begin(cpu);
++	now = cpu_clock(cpu);
+ 
+ 	if (next->pid) {
+ 		psi_flags_change(next, 0, TSK_ONCPU);
+@@ -926,16 +936,15 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ 		 * ancestors with @prev, those will already have @prev's
+ 		 * TSK_ONCPU bit set, and we can stop the iteration there.
+ 		 */
+-		group = task_psi_group(next);
+-		do {
+-			if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
+-			    PSI_ONCPU) {
++		for_each_group(group, task_psi_group(next)) {
++			struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
++
++			if (groupc->state_mask & PSI_ONCPU) {
+ 				common = group;
+ 				break;
+ 			}
+-
+-			psi_group_change(group, cpu, 0, TSK_ONCPU, true);
+-		} while ((group = group->parent));
++			psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
++		}
+ 	}
+ 
+ 	if (prev->pid) {
+@@ -968,12 +977,11 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ 
+ 		psi_flags_change(prev, clear, set);
+ 
+-		group = task_psi_group(prev);
+-		do {
++		for_each_group(group, task_psi_group(prev)) {
+ 			if (group == common)
+ 				break;
+-			psi_group_change(group, cpu, clear, set, wake_clock);
+-		} while ((group = group->parent));
++			psi_group_change(group, cpu, clear, set, now, wake_clock);
++		}
+ 
+ 		/*
+ 		 * TSK_ONCPU is handled up to the common ancestor. If there are
+@@ -983,20 +991,21 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ 		 */
+ 		if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
+ 			clear &= ~TSK_ONCPU;
+-			for (; group; group = group->parent)
+-				psi_group_change(group, cpu, clear, set, wake_clock);
++			for_each_group(group, common)
++				psi_group_change(group, cpu, clear, set, now, wake_clock);
+ 		}
+ 	}
++	psi_write_end(cpu);
+ }
+ 
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
+ {
+ 	int cpu = task_cpu(curr);
+-	struct psi_group *group;
+ 	struct psi_group_cpu *groupc;
+ 	s64 delta;
+ 	u64 irq;
++	u64 now;
+ 
+ 	if (static_branch_likely(&psi_disabled))
+ 		return;
+@@ -1005,8 +1014,7 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
+ 		return;
+ 
+ 	lockdep_assert_rq_held(rq);
+-	group = task_psi_group(curr);
+-	if (prev && task_psi_group(prev) == group)
++	if (prev && task_psi_group(prev) == task_psi_group(curr))
+ 		return;
+ 
+ 	irq = irq_time_read(cpu);
+@@ -1015,25 +1023,22 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
+ 		return;
+ 	rq->psi_irq_time = irq;
+ 
+-	do {
+-		u64 now;
++	psi_write_begin(cpu);
++	now = cpu_clock(cpu);
+ 
++	for_each_group(group, task_psi_group(curr)) {
+ 		if (!group->enabled)
+ 			continue;
+ 
+ 		groupc = per_cpu_ptr(group->pcpu, cpu);
+ 
+-		write_seqcount_begin(&groupc->seq);
+-		now = cpu_clock(cpu);
+-
+ 		record_times(groupc, now);
+ 		groupc->times[PSI_IRQ_FULL] += delta;
+ 
+-		write_seqcount_end(&groupc->seq);
+-
+ 		if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
+ 			psi_schedule_rtpoll_work(group, 1, false);
+-	} while ((group = group->parent));
++	}
++	psi_write_end(cpu);
+ }
+ #endif
+ 
+@@ -1221,12 +1226,14 @@ void psi_cgroup_restart(struct psi_group *group)
+ 		return;
+ 
+ 	for_each_possible_cpu(cpu) {
+-		struct rq *rq = cpu_rq(cpu);
+-		struct rq_flags rf;
++		u64 now;
+ 
+-		rq_lock_irq(rq, &rf);
+-		psi_group_change(group, cpu, 0, 0, true);
+-		rq_unlock_irq(rq, &rf);
++		guard(rq_lock_irq)(cpu_rq(cpu));
++
++		psi_write_begin(cpu);
++		now = cpu_clock(cpu);
++		psi_group_change(group, cpu, 0, 0, now, true);
++		psi_write_end(cpu);
+ 	}
+ }
+ #endif /* CONFIG_CGROUPS */
+diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
+index 314ffc143039c5..acb0c971a4082a 100644
+--- a/kernel/trace/preemptirq_delay_test.c
++++ b/kernel/trace/preemptirq_delay_test.c
+@@ -117,12 +117,15 @@ static int preemptirq_delay_run(void *data)
+ {
+ 	int i;
+ 	int s = MIN(burst_size, NR_TEST_FUNCS);
+-	struct cpumask cpu_mask;
++	cpumask_var_t cpu_mask;
++
++	if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
++		return -ENOMEM;
+ 
+ 	if (cpu_affinity > -1) {
+-		cpumask_clear(&cpu_mask);
+-		cpumask_set_cpu(cpu_affinity, &cpu_mask);
+-		if (set_cpus_allowed_ptr(current, &cpu_mask))
++		cpumask_clear(cpu_mask);
++		cpumask_set_cpu(cpu_affinity, cpu_mask);
++		if (set_cpus_allowed_ptr(current, cpu_mask))
+ 			pr_err("cpu_affinity:%d, failed\n", cpu_affinity);
+ 	}
+ 
+@@ -139,6 +142,8 @@ static int preemptirq_delay_run(void *data)
+ 
+ 	__set_current_state(TASK_RUNNING);
+ 
++	free_cpumask_var(cpu_mask);
++
+ 	return 0;
+ }
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 6ab740d3185bc3..95641a46db4cd3 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -5813,24 +5813,20 @@ ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
+ EXPORT_SYMBOL_GPL(ring_buffer_consume);
+ 
+ /**
+- * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
++ * ring_buffer_read_start - start a non consuming read of the buffer
+  * @buffer: The ring buffer to read from
+  * @cpu: The cpu buffer to iterate over
+  * @flags: gfp flags to use for memory allocation
+  *
+- * This performs the initial preparations necessary to iterate
+- * through the buffer.  Memory is allocated, buffer resizing
+- * is disabled, and the iterator pointer is returned to the caller.
+- *
+- * After a sequence of ring_buffer_read_prepare calls, the user is
+- * expected to make at least one call to ring_buffer_read_prepare_sync.
+- * Afterwards, ring_buffer_read_start is invoked to get things going
+- * for real.
++ * This creates an iterator to allow non-consuming iteration through
++ * the buffer. If the buffer is disabled for writing, it will produce
++ * the same information each time, but if the buffer is still writing
++ * then the first hit of a write will cause the iteration to stop.
+  *
+- * This overall must be paired with ring_buffer_read_finish.
++ * Must be paired with ring_buffer_read_finish.
+  */
+ struct ring_buffer_iter *
+-ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
++ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct ring_buffer_iter *iter;
+@@ -5856,51 +5852,12 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
+ 
+ 	atomic_inc(&cpu_buffer->resize_disabled);
+ 
+-	return iter;
+-}
+-EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
+-
+-/**
+- * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
+- *
+- * All previously invoked ring_buffer_read_prepare calls to prepare
+- * iterators will be synchronized.  Afterwards, read_buffer_read_start
+- * calls on those iterators are allowed.
+- */
+-void
+-ring_buffer_read_prepare_sync(void)
+-{
+-	synchronize_rcu();
+-}
+-EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
+-
+-/**
+- * ring_buffer_read_start - start a non consuming read of the buffer
+- * @iter: The iterator returned by ring_buffer_read_prepare
+- *
+- * This finalizes the startup of an iteration through the buffer.
+- * The iterator comes from a call to ring_buffer_read_prepare and
+- * an intervening ring_buffer_read_prepare_sync must have been
+- * performed.
+- *
+- * Must be paired with ring_buffer_read_finish.
+- */
+-void
+-ring_buffer_read_start(struct ring_buffer_iter *iter)
+-{
+-	struct ring_buffer_per_cpu *cpu_buffer;
+-	unsigned long flags;
+-
+-	if (!iter)
+-		return;
+-
+-	cpu_buffer = iter->cpu_buffer;
+-
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	guard(raw_spinlock_irqsave)(&cpu_buffer->reader_lock);
+ 	arch_spin_lock(&cpu_buffer->lock);
+ 	rb_iter_reset(iter);
+ 	arch_spin_unlock(&cpu_buffer->lock);
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++
++	return iter;
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_read_start);
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 2dc5cfecb016b1..801def692f9299 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4648,21 +4648,15 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
+ 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+ 		for_each_tracing_cpu(cpu) {
+ 			iter->buffer_iter[cpu] =
+-				ring_buffer_read_prepare(iter->array_buffer->buffer,
+-							 cpu, GFP_KERNEL);
+-		}
+-		ring_buffer_read_prepare_sync();
+-		for_each_tracing_cpu(cpu) {
+-			ring_buffer_read_start(iter->buffer_iter[cpu]);
++				ring_buffer_read_start(iter->array_buffer->buffer,
++						       cpu, GFP_KERNEL);
+ 			tracing_iter_reset(iter, cpu);
+ 		}
+ 	} else {
+ 		cpu = iter->cpu_file;
+ 		iter->buffer_iter[cpu] =
+-			ring_buffer_read_prepare(iter->array_buffer->buffer,
+-						 cpu, GFP_KERNEL);
+-		ring_buffer_read_prepare_sync();
+-		ring_buffer_read_start(iter->buffer_iter[cpu]);
++			ring_buffer_read_start(iter->array_buffer->buffer,
++					       cpu, GFP_KERNEL);
+ 		tracing_iter_reset(iter, cpu);
+ 	}
+ 
+diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
+index 59857a1ee44cdf..628c25693cef2f 100644
+--- a/kernel/trace/trace_kdb.c
++++ b/kernel/trace/trace_kdb.c
+@@ -43,17 +43,15 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
+ 	if (cpu_file == RING_BUFFER_ALL_CPUS) {
+ 		for_each_tracing_cpu(cpu) {
+ 			iter.buffer_iter[cpu] =
+-			ring_buffer_read_prepare(iter.array_buffer->buffer,
+-						 cpu, GFP_ATOMIC);
+-			ring_buffer_read_start(iter.buffer_iter[cpu]);
++			ring_buffer_read_start(iter.array_buffer->buffer,
++					       cpu, GFP_ATOMIC);
+ 			tracing_iter_reset(&iter, cpu);
+ 		}
+ 	} else {
+ 		iter.cpu_file = cpu_file;
+ 		iter.buffer_iter[cpu_file] =
+-			ring_buffer_read_prepare(iter.array_buffer->buffer,
++			ring_buffer_read_start(iter.array_buffer->buffer,
+ 						 cpu_file, GFP_ATOMIC);
+-		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
+ 		tracing_iter_reset(&iter, cpu_file);
+ 	}
+ 
+diff --git a/kernel/ucount.c b/kernel/ucount.c
+index 696406939be554..78f4c4255358f9 100644
+--- a/kernel/ucount.c
++++ b/kernel/ucount.c
+@@ -212,7 +212,7 @@ void put_ucounts(struct ucounts *ucounts)
+ 	}
+ }
+ 
+-static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
++static inline bool atomic_long_inc_below(atomic_long_t *v, long u)
+ {
+ 	long c, old;
+ 	c = atomic_long_read(v);
+diff --git a/mm/hmm.c b/mm/hmm.c
+index 7e0229ae4a5a6b..a67776aeb01999 100644
+--- a/mm/hmm.c
++++ b/mm/hmm.c
+@@ -173,6 +173,7 @@ static inline unsigned long hmm_pfn_flags_order(unsigned long order)
+ 	return order << HMM_PFN_ORDER_SHIFT;
+ }
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
+ 						 pmd_t pmd)
+ {
+@@ -183,7 +184,6 @@ static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
+ 	       hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
+ }
+ 
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
+ 			      unsigned long end, unsigned long hmm_pfns[],
+ 			      pmd_t pmd)
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index b0a9071cfe1dae..c02493d9c7bee5 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -3237,43 +3237,30 @@ static unsigned long read_swap_header(struct swap_info_struct *si,
+ #define SWAP_CLUSTER_COLS						\
+ 	max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
+ 
+-static int setup_swap_map_and_extents(struct swap_info_struct *si,
+-					union swap_header *swap_header,
+-					unsigned char *swap_map,
+-					unsigned long maxpages,
+-					sector_t *span)
++static int setup_swap_map(struct swap_info_struct *si,
++			  union swap_header *swap_header,
++			  unsigned char *swap_map,
++			  unsigned long maxpages)
+ {
+-	unsigned int nr_good_pages;
+ 	unsigned long i;
+-	int nr_extents;
+-
+-	nr_good_pages = maxpages - 1;	/* omit header page */
+ 
++	swap_map[0] = SWAP_MAP_BAD; /* omit header page */
+ 	for (i = 0; i < swap_header->info.nr_badpages; i++) {
+ 		unsigned int page_nr = swap_header->info.badpages[i];
+ 		if (page_nr == 0 || page_nr > swap_header->info.last_page)
+ 			return -EINVAL;
+ 		if (page_nr < maxpages) {
+ 			swap_map[page_nr] = SWAP_MAP_BAD;
+-			nr_good_pages--;
++			si->pages--;
+ 		}
+ 	}
+ 
+-	if (nr_good_pages) {
+-		swap_map[0] = SWAP_MAP_BAD;
+-		si->max = maxpages;
+-		si->pages = nr_good_pages;
+-		nr_extents = setup_swap_extents(si, span);
+-		if (nr_extents < 0)
+-			return nr_extents;
+-		nr_good_pages = si->pages;
+-	}
+-	if (!nr_good_pages) {
++	if (!si->pages) {
+ 		pr_warn("Empty swap-file\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	return nr_extents;
++	return 0;
+ }
+ 
+ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
+@@ -3318,13 +3305,17 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
+ 	 * Mark unusable pages as unavailable. The clusters aren't
+ 	 * marked free yet, so no list operations are involved yet.
+ 	 *
+-	 * See setup_swap_map_and_extents(): header page, bad pages,
++	 * See setup_swap_map(): header page, bad pages,
+ 	 * and the EOF part of the last cluster.
+ 	 */
+ 	inc_cluster_info_page(si, cluster_info, 0);
+-	for (i = 0; i < swap_header->info.nr_badpages; i++)
+-		inc_cluster_info_page(si, cluster_info,
+-				      swap_header->info.badpages[i]);
++	for (i = 0; i < swap_header->info.nr_badpages; i++) {
++		unsigned int page_nr = swap_header->info.badpages[i];
++
++		if (page_nr >= maxpages)
++			continue;
++		inc_cluster_info_page(si, cluster_info, page_nr);
++	}
+ 	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
+ 		inc_cluster_info_page(si, cluster_info, i);
+ 
+@@ -3456,6 +3447,21 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ 		goto bad_swap_unlock_inode;
+ 	}
+ 
++	si->max = maxpages;
++	si->pages = maxpages - 1;
++	nr_extents = setup_swap_extents(si, &span);
++	if (nr_extents < 0) {
++		error = nr_extents;
++		goto bad_swap_unlock_inode;
++	}
++	if (si->pages != si->max - 1) {
++		pr_err("swap:%u != (max:%u - 1)\n", si->pages, si->max);
++		error = -EINVAL;
++		goto bad_swap_unlock_inode;
++	}
++
++	maxpages = si->max;
++
+ 	/* OK, set up the swap map and apply the bad block list */
+ 	swap_map = vzalloc(maxpages);
+ 	if (!swap_map) {
+@@ -3467,12 +3473,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ 	if (error)
+ 		goto bad_swap_unlock_inode;
+ 
+-	nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map,
+-						maxpages, &span);
+-	if (unlikely(nr_extents < 0)) {
+-		error = nr_extents;
++	error = setup_swap_map(si, swap_header, swap_map, maxpages);
++	if (error)
+ 		goto bad_swap_unlock_inode;
+-	}
+ 
+ 	/*
+ 	 * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index b7dcebc701898d..38643ffa65a930 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6221,6 +6221,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
+ 
+ static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
+ {
++	u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
++
++	if (!pdu_type)
++		return LE_ADV_NONCONN_IND;
++
+ 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
+ 		switch (evt_type) {
+ 		case LE_LEGACY_ADV_IND:
+@@ -6252,8 +6257,7 @@ static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
+ 	if (evt_type & LE_EXT_ADV_SCAN_IND)
+ 		return LE_ADV_SCAN_IND;
+ 
+-	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
+-	    evt_type & LE_EXT_ADV_DIRECT_IND)
++	if (evt_type & LE_EXT_ADV_DIRECT_IND)
+ 		return LE_ADV_NONCONN_IND;
+ 
+ invalid:
+diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
+index 20139fa1be1fff..06b604cf9d58c0 100644
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -351,17 +351,154 @@ int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
+ 	return found;
+ }
+ 
++static int cfctrl_link_setup(struct cfctrl *cfctrl, struct cfpkt *pkt, u8 cmdrsp)
++{
++	u8 len;
++	u8 linkid = 0;
++	enum cfctrl_srv serv;
++	enum cfctrl_srv servtype;
++	u8 endpoint;
++	u8 physlinkid;
++	u8 prio;
++	u8 tmp;
++	u8 *cp;
++	int i;
++	struct cfctrl_link_param linkparam;
++	struct cfctrl_request_info rsp, *req;
++
++	memset(&linkparam, 0, sizeof(linkparam));
++
++	tmp = cfpkt_extr_head_u8(pkt);
++
++	serv = tmp & CFCTRL_SRV_MASK;
++	linkparam.linktype = serv;
++
++	servtype = tmp >> 4;
++	linkparam.chtype = servtype;
++
++	tmp = cfpkt_extr_head_u8(pkt);
++	physlinkid = tmp & 0x07;
++	prio = tmp >> 3;
++
++	linkparam.priority = prio;
++	linkparam.phyid = physlinkid;
++	endpoint = cfpkt_extr_head_u8(pkt);
++	linkparam.endpoint = endpoint & 0x03;
++
++	switch (serv) {
++	case CFCTRL_SRV_VEI:
++	case CFCTRL_SRV_DBG:
++		if (CFCTRL_ERR_BIT & cmdrsp)
++			break;
++		/* Link ID */
++		linkid = cfpkt_extr_head_u8(pkt);
++		break;
++	case CFCTRL_SRV_VIDEO:
++		tmp = cfpkt_extr_head_u8(pkt);
++		linkparam.u.video.connid = tmp;
++		if (CFCTRL_ERR_BIT & cmdrsp)
++			break;
++		/* Link ID */
++		linkid = cfpkt_extr_head_u8(pkt);
++		break;
++
++	case CFCTRL_SRV_DATAGRAM:
++		linkparam.u.datagram.connid = cfpkt_extr_head_u32(pkt);
++		if (CFCTRL_ERR_BIT & cmdrsp)
++			break;
++		/* Link ID */
++		linkid = cfpkt_extr_head_u8(pkt);
++		break;
++	case CFCTRL_SRV_RFM:
++		/* Construct a frame, convert
++		 * DatagramConnectionID
++		 * to network format long and copy it out...
++		 */
++		linkparam.u.rfm.connid = cfpkt_extr_head_u32(pkt);
++		cp = (u8 *) linkparam.u.rfm.volume;
++		for (tmp = cfpkt_extr_head_u8(pkt);
++		     cfpkt_more(pkt) && tmp != '\0';
++		     tmp = cfpkt_extr_head_u8(pkt))
++			*cp++ = tmp;
++		*cp = '\0';
++
++		if (CFCTRL_ERR_BIT & cmdrsp)
++			break;
++		/* Link ID */
++		linkid = cfpkt_extr_head_u8(pkt);
++
++		break;
++	case CFCTRL_SRV_UTIL:
++		/* Construct a frame, convert
++		 * DatagramConnectionID
++		 * to network format long and copy it out...
++		 */
++		/* Fifosize KB */
++		linkparam.u.utility.fifosize_kb = cfpkt_extr_head_u16(pkt);
++		/* Fifosize bufs */
++		linkparam.u.utility.fifosize_bufs = cfpkt_extr_head_u16(pkt);
++		/* name */
++		cp = (u8 *) linkparam.u.utility.name;
++		caif_assert(sizeof(linkparam.u.utility.name)
++			     >= UTILITY_NAME_LENGTH);
++		for (i = 0; i < UTILITY_NAME_LENGTH && cfpkt_more(pkt); i++) {
++			tmp = cfpkt_extr_head_u8(pkt);
++			*cp++ = tmp;
++		}
++		/* Length */
++		len = cfpkt_extr_head_u8(pkt);
++		linkparam.u.utility.paramlen = len;
++		/* Param Data */
++		cp = linkparam.u.utility.params;
++		while (cfpkt_more(pkt) && len--) {
++			tmp = cfpkt_extr_head_u8(pkt);
++			*cp++ = tmp;
++		}
++		if (CFCTRL_ERR_BIT & cmdrsp)
++			break;
++		/* Link ID */
++		linkid = cfpkt_extr_head_u8(pkt);
++		/* Length */
++		len = cfpkt_extr_head_u8(pkt);
++		/* Param Data */
++		cfpkt_extr_head(pkt, NULL, len);
++		break;
++	default:
++		pr_warn("Request setup, invalid type (%d)\n", serv);
++		return -1;
++	}
++
++	rsp.cmd = CFCTRL_CMD_LINK_SETUP;
++	rsp.param = linkparam;
++	spin_lock_bh(&cfctrl->info_list_lock);
++	req = cfctrl_remove_req(cfctrl, &rsp);
++
++	if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
++		cfpkt_erroneous(pkt)) {
++		pr_err("Invalid O/E bit or parse error "
++				"on CAIF control channel\n");
++		cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 0,
++				       req ? req->client_layer : NULL);
++	} else {
++		cfctrl->res.linksetup_rsp(cfctrl->serv.layer.up, linkid,
++					  serv, physlinkid,
++					  req ?  req->client_layer : NULL);
++	}
++
++	kfree(req);
++
++	spin_unlock_bh(&cfctrl->info_list_lock);
++
++	return 0;
++}
++
+ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
+ {
+ 	u8 cmdrsp;
+ 	u8 cmd;
+-	int ret = -1;
+-	u8 len;
+-	u8 param[255];
++	int ret = 0;
+ 	u8 linkid = 0;
+ 	struct cfctrl *cfctrl = container_obj(layer);
+-	struct cfctrl_request_info rsp, *req;
+-
+ 
+ 	cmdrsp = cfpkt_extr_head_u8(pkt);
+ 	cmd = cmdrsp & CFCTRL_CMD_MASK;
+@@ -374,150 +511,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
+ 
+ 	switch (cmd) {
+ 	case CFCTRL_CMD_LINK_SETUP:
+-		{
+-			enum cfctrl_srv serv;
+-			enum cfctrl_srv servtype;
+-			u8 endpoint;
+-			u8 physlinkid;
+-			u8 prio;
+-			u8 tmp;
+-			u8 *cp;
+-			int i;
+-			struct cfctrl_link_param linkparam;
+-			memset(&linkparam, 0, sizeof(linkparam));
+-
+-			tmp = cfpkt_extr_head_u8(pkt);
+-
+-			serv = tmp & CFCTRL_SRV_MASK;
+-			linkparam.linktype = serv;
+-
+-			servtype = tmp >> 4;
+-			linkparam.chtype = servtype;
+-
+-			tmp = cfpkt_extr_head_u8(pkt);
+-			physlinkid = tmp & 0x07;
+-			prio = tmp >> 3;
+-
+-			linkparam.priority = prio;
+-			linkparam.phyid = physlinkid;
+-			endpoint = cfpkt_extr_head_u8(pkt);
+-			linkparam.endpoint = endpoint & 0x03;
+-
+-			switch (serv) {
+-			case CFCTRL_SRV_VEI:
+-			case CFCTRL_SRV_DBG:
+-				if (CFCTRL_ERR_BIT & cmdrsp)
+-					break;
+-				/* Link ID */
+-				linkid = cfpkt_extr_head_u8(pkt);
+-				break;
+-			case CFCTRL_SRV_VIDEO:
+-				tmp = cfpkt_extr_head_u8(pkt);
+-				linkparam.u.video.connid = tmp;
+-				if (CFCTRL_ERR_BIT & cmdrsp)
+-					break;
+-				/* Link ID */
+-				linkid = cfpkt_extr_head_u8(pkt);
+-				break;
+-
+-			case CFCTRL_SRV_DATAGRAM:
+-				linkparam.u.datagram.connid =
+-				    cfpkt_extr_head_u32(pkt);
+-				if (CFCTRL_ERR_BIT & cmdrsp)
+-					break;
+-				/* Link ID */
+-				linkid = cfpkt_extr_head_u8(pkt);
+-				break;
+-			case CFCTRL_SRV_RFM:
+-				/* Construct a frame, convert
+-				 * DatagramConnectionID
+-				 * to network format long and copy it out...
+-				 */
+-				linkparam.u.rfm.connid =
+-				    cfpkt_extr_head_u32(pkt);
+-				cp = (u8 *) linkparam.u.rfm.volume;
+-				for (tmp = cfpkt_extr_head_u8(pkt);
+-				     cfpkt_more(pkt) && tmp != '\0';
+-				     tmp = cfpkt_extr_head_u8(pkt))
+-					*cp++ = tmp;
+-				*cp = '\0';
+-
+-				if (CFCTRL_ERR_BIT & cmdrsp)
+-					break;
+-				/* Link ID */
+-				linkid = cfpkt_extr_head_u8(pkt);
+-
+-				break;
+-			case CFCTRL_SRV_UTIL:
+-				/* Construct a frame, convert
+-				 * DatagramConnectionID
+-				 * to network format long and copy it out...
+-				 */
+-				/* Fifosize KB */
+-				linkparam.u.utility.fifosize_kb =
+-				    cfpkt_extr_head_u16(pkt);
+-				/* Fifosize bufs */
+-				linkparam.u.utility.fifosize_bufs =
+-				    cfpkt_extr_head_u16(pkt);
+-				/* name */
+-				cp = (u8 *) linkparam.u.utility.name;
+-				caif_assert(sizeof(linkparam.u.utility.name)
+-					     >= UTILITY_NAME_LENGTH);
+-				for (i = 0;
+-				     i < UTILITY_NAME_LENGTH
+-				     && cfpkt_more(pkt); i++) {
+-					tmp = cfpkt_extr_head_u8(pkt);
+-					*cp++ = tmp;
+-				}
+-				/* Length */
+-				len = cfpkt_extr_head_u8(pkt);
+-				linkparam.u.utility.paramlen = len;
+-				/* Param Data */
+-				cp = linkparam.u.utility.params;
+-				while (cfpkt_more(pkt) && len--) {
+-					tmp = cfpkt_extr_head_u8(pkt);
+-					*cp++ = tmp;
+-				}
+-				if (CFCTRL_ERR_BIT & cmdrsp)
+-					break;
+-				/* Link ID */
+-				linkid = cfpkt_extr_head_u8(pkt);
+-				/* Length */
+-				len = cfpkt_extr_head_u8(pkt);
+-				/* Param Data */
+-				cfpkt_extr_head(pkt, &param, len);
+-				break;
+-			default:
+-				pr_warn("Request setup, invalid type (%d)\n",
+-					serv);
+-				goto error;
+-			}
+-
+-			rsp.cmd = cmd;
+-			rsp.param = linkparam;
+-			spin_lock_bh(&cfctrl->info_list_lock);
+-			req = cfctrl_remove_req(cfctrl, &rsp);
+-
+-			if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
+-				cfpkt_erroneous(pkt)) {
+-				pr_err("Invalid O/E bit or parse error "
+-						"on CAIF control channel\n");
+-				cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
+-						       0,
+-						       req ? req->client_layer
+-						       : NULL);
+-			} else {
+-				cfctrl->res.linksetup_rsp(cfctrl->serv.
+-							  layer.up, linkid,
+-							  serv, physlinkid,
+-							  req ? req->
+-							  client_layer : NULL);
+-			}
+-
+-			kfree(req);
+-
+-			spin_unlock_bh(&cfctrl->info_list_lock);
+-		}
++		ret = cfctrl_link_setup(cfctrl, pkt, cmdrsp);
+ 		break;
+ 	case CFCTRL_CMD_LINK_DESTROY:
+ 		linkid = cfpkt_extr_head_u8(pkt);
+@@ -544,9 +538,9 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
+ 		break;
+ 	default:
+ 		pr_err("Unrecognized Control Frame\n");
++		ret = -1;
+ 		goto error;
+ 	}
+-	ret = 0;
+ error:
+ 	cfpkt_destroy(pkt);
+ 	return ret;
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 6d76b799ce645d..cc990706b64515 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -148,8 +148,8 @@ void dst_dev_put(struct dst_entry *dst)
+ 	dst->obsolete = DST_OBSOLETE_DEAD;
+ 	if (dst->ops->ifdown)
+ 		dst->ops->ifdown(dst, dev);
+-	dst->input = dst_discard;
+-	dst->output = dst_discard_out;
++	WRITE_ONCE(dst->input, dst_discard);
++	WRITE_ONCE(dst->output, dst_discard_out);
+ 	dst->dev = blackhole_netdev;
+ 	netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
+ 			   GFP_ATOMIC);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 1c0cf6f2fff52b..02fedc404d7f7d 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -9407,6 +9407,9 @@ static bool flow_dissector_is_valid_access(int off, int size,
+ 	if (off < 0 || off >= sizeof(struct __sk_buff))
+ 		return false;
+ 
++	if (off % size != 0)
++		return false;
++
+ 	if (type == BPF_WRITE)
+ 		return false;
+ 
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index e95c2933756df9..87182a4272bfdd 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -784,6 +784,13 @@ int netpoll_setup(struct netpoll *np)
+ 	if (err)
+ 		goto put;
+ 	rtnl_unlock();
++
++	/* Make sure all NAPI polls which started before dev->npinfo
++	 * was visible have exited before we start calling NAPI poll.
++	 * NAPI skips locking if dev->npinfo is NULL.
++	 */
++	synchronize_rcu();
++
+ 	return 0;
+ 
+ put:
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 97f52394d1eb16..adb3166ede9727 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -655,6 +655,13 @@ static void sk_psock_backlog(struct work_struct *work)
+ 	bool ingress;
+ 	int ret;
+ 
++	/* If sk is quickly removed from the map and then added back, the old
++	 * psock should not be scheduled, because there are now two psocks
++	 * pointing to the same sk.
++	 */
++	if (!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
++		return;
++
+ 	/* Increment the psock refcnt to synchronize with close(fd) path in
+ 	 * sock_map_close(), ensuring we wait for backlog thread completion
+ 	 * before sk_socket freed. If refcnt increment fails, it indicates
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 88d7c96bfac06f..73d555593f5c56 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1684,8 +1684,8 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
+ 		else if (rt->rt_gw_family == AF_INET6)
+ 			new_rt->rt_gw6 = rt->rt_gw6;
+ 
+-		new_rt->dst.input = rt->dst.input;
+-		new_rt->dst.output = rt->dst.output;
++		new_rt->dst.input = READ_ONCE(rt->dst.input);
++		new_rt->dst.output = READ_ONCE(rt->dst.output);
+ 		new_rt->dst.error = rt->dst.error;
+ 		new_rt->dst.lastuse = jiffies;
+ 		new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index d176e7888a203c..30f4375f8431b8 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4970,8 +4970,9 @@ static void tcp_ofo_queue(struct sock *sk)
+ 
+ 		if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
+ 			__u32 dsack = dsack_high;
++
+ 			if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
+-				dsack_high = TCP_SKB_CB(skb)->end_seq;
++				dsack = TCP_SKB_CB(skb)->end_seq;
+ 			tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
+ 		}
+ 		p = rb_next(p);
+@@ -5039,6 +5040,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+ 		return;
+ 	}
+ 
++	tcp_measure_rcv_mss(sk, skb);
+ 	/* Disable header prediction. */
+ 	tp->pred_flags = 0;
+ 	inet_csk_schedule_ack(sk);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 9a1c59275a1099..aa1046fbf28e51 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -440,15 +440,17 @@ struct fib6_dump_arg {
+ static int fib6_rt_dump(struct fib6_info *rt, struct fib6_dump_arg *arg)
+ {
+ 	enum fib_event_type fib_event = FIB_EVENT_ENTRY_REPLACE;
++	unsigned int nsiblings;
+ 	int err;
+ 
+ 	if (!rt || rt == arg->net->ipv6.fib6_null_entry)
+ 		return 0;
+ 
+-	if (rt->fib6_nsiblings)
++	nsiblings = READ_ONCE(rt->fib6_nsiblings);
++	if (nsiblings)
+ 		err = call_fib6_multipath_entry_notifier(arg->nb, fib_event,
+ 							 rt,
+-							 rt->fib6_nsiblings,
++							 nsiblings,
+ 							 arg->extack);
+ 	else
+ 		err = call_fib6_entry_notifier(arg->nb, fib_event, rt,
+@@ -1126,7 +1128,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ 
+ 			if (rt6_duplicate_nexthop(iter, rt)) {
+ 				if (rt->fib6_nsiblings)
+-					rt->fib6_nsiblings = 0;
++					WRITE_ONCE(rt->fib6_nsiblings, 0);
+ 				if (!(iter->fib6_flags & RTF_EXPIRES))
+ 					return -EEXIST;
+ 				if (!(rt->fib6_flags & RTF_EXPIRES)) {
+@@ -1155,7 +1157,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ 			 */
+ 			if (rt_can_ecmp &&
+ 			    rt6_qualify_for_ecmp(iter))
+-				rt->fib6_nsiblings++;
++				WRITE_ONCE(rt->fib6_nsiblings,
++					   rt->fib6_nsiblings + 1);
+ 		}
+ 
+ 		if (iter->fib6_metric > rt->fib6_metric)
+@@ -1205,7 +1208,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ 		fib6_nsiblings = 0;
+ 		list_for_each_entry_safe(sibling, temp_sibling,
+ 					 &rt->fib6_siblings, fib6_siblings) {
+-			sibling->fib6_nsiblings++;
++			WRITE_ONCE(sibling->fib6_nsiblings,
++				   sibling->fib6_nsiblings + 1);
+ 			BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
+ 			fib6_nsiblings++;
+ 		}
+@@ -1250,8 +1254,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ 				list_for_each_entry_safe(sibling, next_sibling,
+ 							 &rt->fib6_siblings,
+ 							 fib6_siblings)
+-					sibling->fib6_nsiblings--;
+-				rt->fib6_nsiblings = 0;
++					WRITE_ONCE(sibling->fib6_nsiblings,
++						   sibling->fib6_nsiblings - 1);
++				WRITE_ONCE(rt->fib6_nsiblings, 0);
+ 				list_del_rcu(&rt->fib6_siblings);
+ 				rt6_multipath_rebalance(next_sibling);
+ 				return err;
+@@ -1968,8 +1973,9 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
+ 			notify_del = true;
+ 		list_for_each_entry_safe(sibling, next_sibling,
+ 					 &rt->fib6_siblings, fib6_siblings)
+-			sibling->fib6_nsiblings--;
+-		rt->fib6_nsiblings = 0;
++			WRITE_ONCE(sibling->fib6_nsiblings,
++				   sibling->fib6_nsiblings - 1);
++		WRITE_ONCE(rt->fib6_nsiblings, 0);
+ 		list_del_rcu(&rt->fib6_siblings);
+ 		rt6_multipath_rebalance(next_sibling);
+ 	}
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 9822163428b028..fce91183797a60 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -148,7 +148,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+ 
+ 	ops = rcu_dereference(inet6_offloads[proto]);
+ 	if (likely(ops && ops->callbacks.gso_segment)) {
+-		skb_reset_transport_header(skb);
++		if (!skb_reset_transport_header_careful(skb))
++			goto out;
++
+ 		segs = ops->callbacks.gso_segment(skb, features);
+ 		if (!segs)
+ 			skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 440048d609c37a..68bc518500f94a 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -2032,6 +2032,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
+ 			  struct sk_buff *skb, int vifi)
+ {
+ 	struct vif_device *vif = &mrt->vif_table[vifi];
++	struct net_device *indev = skb->dev;
+ 	struct net_device *vif_dev;
+ 	struct ipv6hdr *ipv6h;
+ 	struct dst_entry *dst;
+@@ -2094,7 +2095,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
+ 	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
+ 
+ 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
+-		       net, NULL, skb, skb->dev, vif_dev,
++		       net, NULL, skb, indev, skb->dev,
+ 		       ip6mr_forward2_finish);
+ 
+ out_free:
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index d9ab070e78e052..22866444efc05e 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -5240,7 +5240,8 @@ static void ip6_route_mpath_notify(struct fib6_info *rt,
+ 	 */
+ 	rcu_read_lock();
+ 
+-	if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
++	if ((nlflags & NLM_F_APPEND) && rt_last &&
++	    READ_ONCE(rt_last->fib6_nsiblings)) {
+ 		rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
+ 					    struct fib6_info,
+ 					    fib6_siblings);
+@@ -5587,32 +5588,34 @@ static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
+ 
+ static size_t rt6_nlmsg_size(struct fib6_info *f6i)
+ {
++	struct fib6_info *sibling;
++	struct fib6_nh *nh;
+ 	int nexthop_len;
+ 
+ 	if (f6i->nh) {
+ 		nexthop_len = nla_total_size(4); /* RTA_NH_ID */
+ 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
+ 					 &nexthop_len);
+-	} else {
+-		struct fib6_nh *nh = f6i->fib6_nh;
+-		struct fib6_info *sibling;
+-
+-		nexthop_len = 0;
+-		if (f6i->fib6_nsiblings) {
+-			rt6_nh_nlmsg_size(nh, &nexthop_len);
+-
+-			rcu_read_lock();
++		goto common;
++	}
+ 
+-			list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
+-						fib6_siblings) {
+-				rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
+-			}
++	rcu_read_lock();
++retry:
++	nh = f6i->fib6_nh;
++	nexthop_len = 0;
++	if (READ_ONCE(f6i->fib6_nsiblings)) {
++		rt6_nh_nlmsg_size(nh, &nexthop_len);
+ 
+-			rcu_read_unlock();
++		list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
++					fib6_siblings) {
++			rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
++			if (!READ_ONCE(f6i->fib6_nsiblings))
++				goto retry;
+ 		}
+-		nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
+ 	}
+-
++	rcu_read_unlock();
++	nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
++common:
+ 	return NLMSG_ALIGN(sizeof(struct rtmsg))
+ 	       + nla_total_size(16) /* RTA_SRC */
+ 	       + nla_total_size(16) /* RTA_DST */
+@@ -5771,7 +5774,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 		if (dst->lwtstate &&
+ 		    lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ 			goto nla_put_failure;
+-	} else if (rt->fib6_nsiblings) {
++	} else if (READ_ONCE(rt->fib6_nsiblings)) {
+ 		struct fib6_info *sibling;
+ 		struct nlattr *mp;
+ 
+@@ -5873,16 +5876,21 @@ static bool fib6_info_uses_dev(const struct fib6_info *f6i,
+ 	if (f6i->fib6_nh->fib_nh_dev == dev)
+ 		return true;
+ 
+-	if (f6i->fib6_nsiblings) {
+-		struct fib6_info *sibling, *next_sibling;
++	if (READ_ONCE(f6i->fib6_nsiblings)) {
++		const struct fib6_info *sibling;
+ 
+-		list_for_each_entry_safe(sibling, next_sibling,
+-					 &f6i->fib6_siblings, fib6_siblings) {
+-			if (sibling->fib6_nh->fib_nh_dev == dev)
++		rcu_read_lock();
++		list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
++					fib6_siblings) {
++			if (sibling->fib6_nh->fib_nh_dev == dev) {
++				rcu_read_unlock();
+ 				return true;
++			}
++			if (!READ_ONCE(f6i->fib6_nsiblings))
++				break;
+ 		}
++		rcu_read_unlock();
+ 	}
+-
+ 	return false;
+ }
+ 
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index cf2b8a05c3389d..a72c1d9edb4acc 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1078,13 +1078,13 @@ ieee80211_copy_rnr_beacon(u8 *pos, struct cfg80211_rnr_elems *dst,
+ {
+ 	int i, offset = 0;
+ 
++	dst->cnt = src->cnt;
+ 	for (i = 0; i < src->cnt; i++) {
+ 		memcpy(pos + offset, src->elem[i].data, src->elem[i].len);
+ 		dst->elem[i].len = src->elem[i].len;
+ 		dst->elem[i].data = pos + offset;
+ 		offset += dst->elem[i].len;
+ 	}
+-	dst->cnt = src->cnt;
+ 
+ 	return offset;
+ }
+diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
+index f07b409164854c..1cb42c5b9b04b0 100644
+--- a/net/mac80211/tdls.c
++++ b/net/mac80211/tdls.c
+@@ -1421,7 +1421,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ 	if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ 		return -EOPNOTSUPP;
+ 
+-	if (sdata->vif.type != NL80211_IFTYPE_STATION)
++	if (sdata->vif.type != NL80211_IFTYPE_STATION || !sdata->vif.cfg.assoc)
+ 		return -EINVAL;
+ 
+ 	switch (oper) {
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 00c309e7768e17..9c515fb8ebe112 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -622,6 +622,12 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
+ 	else
+ 		tx->key = NULL;
+ 
++	if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
++		if (tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
++			info->control.hw_key = &tx->key->conf;
++		return TX_CONTINUE;
++	}
++
+ 	if (tx->key) {
+ 		bool skip_hw = false;
+ 
+@@ -1437,7 +1443,7 @@ static void ieee80211_txq_enqueue(struct ieee80211_local *local,
+ {
+ 	struct fq *fq = &local->fq;
+ 	struct fq_tin *tin = &txqi->tin;
+-	u32 flow_idx = fq_flow_idx(fq, skb);
++	u32 flow_idx;
+ 
+ 	ieee80211_set_skb_enqueue_time(skb);
+ 
+@@ -1453,6 +1459,7 @@ static void ieee80211_txq_enqueue(struct ieee80211_local *local,
+ 			IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
+ 		__skb_queue_tail(&txqi->frags, skb);
+ 	} else {
++		flow_idx = fq_flow_idx(fq, skb);
+ 		fq_tin_enqueue(fq, tin, flow_idx, skb,
+ 			       fq_skb_free_func);
+ 	}
+@@ -3885,6 +3892,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 	 * The key can be removed while the packet was queued, so need to call
+ 	 * this here to get the current key.
+ 	 */
++	info->control.hw_key = NULL;
+ 	r = ieee80211_tx_h_select_key(&tx);
+ 	if (r != TX_CONTINUE) {
+ 		ieee80211_free_txskb(&local->hw, skb);
+@@ -4106,7 +4114,9 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ 
+ 	spin_lock_bh(&local->active_txq_lock[txq->ac]);
+ 
+-	has_queue = force || txq_has_queue(txq);
++	has_queue = force ||
++		    (!test_bit(IEEE80211_TXQ_STOP, &txqi->flags) &&
++		     txq_has_queue(txq));
+ 	if (list_empty(&txqi->schedule_order) &&
+ 	    (has_queue || ieee80211_txq_keep_active(txqi))) {
+ 		/* If airtime accounting is active, always enqueue STAs at the
+diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
+index 3d64a4511fcfdd..b5e4ca9026a8e4 100644
+--- a/net/netfilter/nf_bpf_link.c
++++ b/net/netfilter/nf_bpf_link.c
+@@ -17,7 +17,7 @@ static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb,
+ 		.skb = skb,
+ 	};
+ 
+-	return bpf_prog_run(prog, &ctx);
++	return bpf_prog_run_pin_on_cpu(prog, &ctx);
+ }
+ 
+ struct bpf_nf_link {
+@@ -295,6 +295,9 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
+ 	if (off < 0 || off >= sizeof(struct bpf_nf_ctx))
+ 		return false;
+ 
++	if (off % size != 0)
++		return false;
++
+ 	if (type == BPF_WRITE)
+ 		return false;
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index bdee187bc5dd45..3743e4249dc8c7 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1029,11 +1029,6 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+ 			 NFTA_TABLE_PAD))
+ 		goto nla_put_failure;
+ 
+-	if (event == NFT_MSG_DELTABLE) {
+-		nlmsg_end(skb, nlh);
+-		return 0;
+-	}
+-
+ 	if (nla_put_be32(skb, NFTA_TABLE_FLAGS,
+ 			 htonl(table->flags & NFT_TABLE_F_MASK)))
+ 		goto nla_put_failure;
+@@ -1889,11 +1884,6 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+ 			 NFTA_CHAIN_PAD))
+ 		goto nla_put_failure;
+ 
+-	if (event == NFT_MSG_DELCHAIN && !hook_list) {
+-		nlmsg_end(skb, nlh);
+-		return 0;
+-	}
+-
+ 	if (nft_is_base_chain(chain)) {
+ 		const struct nft_base_chain *basechain = nft_base_chain(chain);
+ 		struct nft_stats __percpu *stats;
+@@ -3884,7 +3874,7 @@ void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule)
+ /* can only be used if rule is no longer visible to dumps */
+ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
+ {
+-	lockdep_commit_lock_is_held(ctx->net);
++	WARN_ON_ONCE(!lockdep_commit_lock_is_held(ctx->net));
+ 
+ 	nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
+ 	nf_tables_rule_destroy(ctx, rule);
+@@ -4678,11 +4668,6 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 			 NFTA_SET_PAD))
+ 		goto nla_put_failure;
+ 
+-	if (event == NFT_MSG_DELSET) {
+-		nlmsg_end(skb, nlh);
+-		return 0;
+-	}
+-
+ 	if (set->flags != 0)
+ 		if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
+ 			goto nla_put_failure;
+@@ -5674,7 +5659,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase)
+ {
+-	lockdep_commit_lock_is_held(ctx->net);
++	WARN_ON_ONCE(!lockdep_commit_lock_is_held(ctx->net));
+ 
+ 	switch (phase) {
+ 	case NFT_TRANS_PREPARE_ERROR:
+@@ -8017,11 +8002,6 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
+ 			 NFTA_OBJ_PAD))
+ 		goto nla_put_failure;
+ 
+-	if (event == NFT_MSG_DELOBJ) {
+-		nlmsg_end(skb, nlh);
+-		return 0;
+-	}
+-
+ 	if (nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
+ 	    nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
+ 	    nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
+@@ -9040,11 +9020,6 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+ 			 NFTA_FLOWTABLE_PAD))
+ 		goto nla_put_failure;
+ 
+-	if (event == NFT_MSG_DELFLOWTABLE && !hook_list) {
+-		nlmsg_end(skb, nlh);
+-		return 0;
+-	}
+-
+ 	if (nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
+ 	    nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
+ 		goto nla_put_failure;
+diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
+index 7c6bf1c168131a..0ca1cdfc4095b6 100644
+--- a/net/netfilter/xt_nfacct.c
++++ b/net/netfilter/xt_nfacct.c
+@@ -38,8 +38,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
+ 
+ 	nfacct = nfnl_acct_find_get(par->net, info->name);
+ 	if (nfacct == NULL) {
+-		pr_info_ratelimited("accounting object `%s' does not exists\n",
+-				    info->name);
++		pr_info_ratelimited("accounting object `%.*s' does not exist\n",
++				    NFACCT_NAME_MAX, info->name);
+ 		return -ENOENT;
+ 	}
+ 	info->nfacct = nfacct;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 19c4c1f27e586c..e8589fede4d4c9 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4562,10 +4562,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	spin_lock(&po->bind_lock);
+ 	was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
+ 	num = po->num;
+-	if (was_running) {
+-		WRITE_ONCE(po->num, 0);
++	WRITE_ONCE(po->num, 0);
++	if (was_running)
+ 		__unregister_prot_hook(sk, false);
+-	}
++
+ 	spin_unlock(&po->bind_lock);
+ 
+ 	synchronize_net();
+@@ -4597,10 +4597,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	mutex_unlock(&po->pg_vec_lock);
+ 
+ 	spin_lock(&po->bind_lock);
+-	if (was_running) {
+-		WRITE_ONCE(po->num, num);
++	WRITE_ONCE(po->num, num);
++	if (was_running)
+ 		register_prot_hook(sk);
+-	}
++
+ 	spin_unlock(&po->bind_lock);
+ 	if (pg_vec && (po->tp_version > TPACKET_V2)) {
+ 		/* Because we don't support block-based V3 on tx-ring */
+diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
+index 5dd41a012110e0..ae571bfe84c7d9 100644
+--- a/net/sched/act_ctinfo.c
++++ b/net/sched/act_ctinfo.c
+@@ -44,9 +44,9 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
+ 				ipv4_change_dsfield(ip_hdr(skb),
+ 						    INET_ECN_MASK,
+ 						    newdscp);
+-				ca->stats_dscp_set++;
++				atomic64_inc(&ca->stats_dscp_set);
+ 			} else {
+-				ca->stats_dscp_error++;
++				atomic64_inc(&ca->stats_dscp_error);
+ 			}
+ 		}
+ 		break;
+@@ -57,9 +57,9 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
+ 				ipv6_change_dsfield(ipv6_hdr(skb),
+ 						    INET_ECN_MASK,
+ 						    newdscp);
+-				ca->stats_dscp_set++;
++				atomic64_inc(&ca->stats_dscp_set);
+ 			} else {
+-				ca->stats_dscp_error++;
++				atomic64_inc(&ca->stats_dscp_error);
+ 			}
+ 		}
+ 		break;
+@@ -72,7 +72,7 @@ static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
+ 				  struct tcf_ctinfo_params *cp,
+ 				  struct sk_buff *skb)
+ {
+-	ca->stats_cpmark_set++;
++	atomic64_inc(&ca->stats_cpmark_set);
+ 	skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask;
+ }
+ 
+@@ -323,15 +323,18 @@ static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a,
+ 	}
+ 
+ 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET,
+-			      ci->stats_dscp_set, TCA_CTINFO_PAD))
++			      atomic64_read(&ci->stats_dscp_set),
++			      TCA_CTINFO_PAD))
+ 		goto nla_put_failure;
+ 
+ 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR,
+-			      ci->stats_dscp_error, TCA_CTINFO_PAD))
++			      atomic64_read(&ci->stats_dscp_error),
++			      TCA_CTINFO_PAD))
+ 		goto nla_put_failure;
+ 
+ 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET,
+-			      ci->stats_cpmark_set, TCA_CTINFO_PAD))
++			      atomic64_read(&ci->stats_cpmark_set),
++			      TCA_CTINFO_PAD))
+ 		goto nla_put_failure;
+ 
+ 	spin_unlock_bh(&ci->tcf_lock);
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 51d4013b612198..f3e5ef9a959256 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -152,7 +152,7 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
+ static const struct
+ nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
+ 	[TCA_MQPRIO_TC_ENTRY_INDEX]	= NLA_POLICY_MAX(NLA_U32,
+-							 TC_QOPT_MAX_QUEUE),
++							 TC_QOPT_MAX_QUEUE - 1),
+ 	[TCA_MQPRIO_TC_ENTRY_FP]	= NLA_POLICY_RANGE(NLA_U32,
+ 							   TC_FP_EXPRESS,
+ 							   TC_FP_PREEMPTIBLE),
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 68a08f6d1fbce2..2270547b51df8e 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -972,6 +972,41 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+ 	return 0;
+ }
+ 
++static const struct Qdisc_class_ops netem_class_ops;
++
++static int check_netem_in_tree(struct Qdisc *sch, bool duplicates,
++			       struct netlink_ext_ack *extack)
++{
++	struct Qdisc *root, *q;
++	unsigned int i;
++
++	root = qdisc_root_sleeping(sch);
++
++	if (sch != root && root->ops->cl_ops == &netem_class_ops) {
++		if (duplicates ||
++		    ((struct netem_sched_data *)qdisc_priv(root))->duplicate)
++			goto err;
++	}
++
++	if (!qdisc_dev(root))
++		return 0;
++
++	hash_for_each(qdisc_dev(root)->qdisc_hash, i, q, hash) {
++		if (sch != q && q->ops->cl_ops == &netem_class_ops) {
++			if (duplicates ||
++			    ((struct netem_sched_data *)qdisc_priv(q))->duplicate)
++				goto err;
++		}
++	}
++
++	return 0;
++
++err:
++	NL_SET_ERR_MSG(extack,
++		       "netem: cannot mix duplicating netems with other netems in tree");
++	return -EINVAL;
++}
++
+ /* Parse netlink message to set options */
+ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 			struct netlink_ext_ack *extack)
+@@ -1030,6 +1065,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 	q->gap = qopt->gap;
+ 	q->counter = 0;
+ 	q->loss = qopt->loss;
++
++	ret = check_netem_in_tree(sch, qopt->duplicate, extack);
++	if (ret)
++		goto unlock;
++
+ 	q->duplicate = qopt->duplicate;
+ 
+ 	/* for compatibility with earlier versions.
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 3142715d7e41eb..1620f0fd78ce7c 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -43,6 +43,11 @@ static struct static_key_false taprio_have_working_mqprio;
+ #define TAPRIO_SUPPORTED_FLAGS \
+ 	(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
+ #define TAPRIO_FLAGS_INVALID U32_MAX
++/* Minimum value for picos_per_byte to ensure non-zero duration
++ * for minimum-sized Ethernet frames (ETH_ZLEN = 60).
++ * 60 * 17 > PSEC_PER_NSEC (1000)
++ */
++#define TAPRIO_PICOS_PER_BYTE_MIN 17
+ 
+ struct sched_entry {
+ 	/* Durations between this GCL entry and the GCL entry where the
+@@ -1284,7 +1289,8 @@ static void taprio_start_sched(struct Qdisc *sch,
+ }
+ 
+ static void taprio_set_picos_per_byte(struct net_device *dev,
+-				      struct taprio_sched *q)
++				      struct taprio_sched *q,
++				      struct netlink_ext_ack *extack)
+ {
+ 	struct ethtool_link_ksettings ecmd;
+ 	int speed = SPEED_10;
+@@ -1300,6 +1306,15 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
+ 
+ skip:
+ 	picos_per_byte = (USEC_PER_SEC * 8) / speed;
++	if (picos_per_byte < TAPRIO_PICOS_PER_BYTE_MIN) {
++		if (!extack)
++			pr_warn("Link speed %d is too high. Schedule may be inaccurate.\n",
++				speed);
++		NL_SET_ERR_MSG_FMT_MOD(extack,
++				       "Link speed %d is too high. Schedule may be inaccurate.",
++				       speed);
++		picos_per_byte = TAPRIO_PICOS_PER_BYTE_MIN;
++	}
+ 
+ 	atomic64_set(&q->picos_per_byte, picos_per_byte);
+ 	netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
+@@ -1324,7 +1339,7 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
+ 		if (dev != qdisc_dev(q->root))
+ 			continue;
+ 
+-		taprio_set_picos_per_byte(dev, q);
++		taprio_set_picos_per_byte(dev, q, NULL);
+ 
+ 		stab = rtnl_dereference(q->root->stab);
+ 
+@@ -1848,7 +1863,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+ 	q->flags = taprio_flags;
+ 
+ 	/* Needed for length_to_duration() during netlink attribute parsing */
+-	taprio_set_picos_per_byte(dev, q);
++	taprio_set_picos_per_byte(dev, q, extack);
+ 
+ 	err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
+ 	if (err < 0)
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 3bfbb789c4beed..3c115936b7198d 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -257,20 +257,47 @@ svc_tcp_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+ }
+ 
+ static int
+-svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg)
++svc_tcp_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags)
+ {
+ 	union {
+ 		struct cmsghdr	cmsg;
+ 		u8		buf[CMSG_SPACE(sizeof(u8))];
+ 	} u;
+-	struct socket *sock = svsk->sk_sock;
++	u8 alert[2];
++	struct kvec alert_kvec = {
++		.iov_base = alert,
++		.iov_len = sizeof(alert),
++	};
++	struct msghdr msg = {
++		.msg_flags = *msg_flags,
++		.msg_control = &u,
++		.msg_controllen = sizeof(u),
++	};
++	int ret;
++
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
++		      alert_kvec.iov_len);
++	ret = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
++	if (ret > 0 &&
++	    tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
++		iov_iter_revert(&msg.msg_iter, ret);
++		ret = svc_tcp_sock_process_cmsg(sock, &msg, &u.cmsg, -EAGAIN);
++	}
++	return ret;
++}
++
++static int
++svc_tcp_sock_recvmsg(struct svc_sock *svsk, struct msghdr *msg)
++{
+ 	int ret;
++	struct socket *sock = svsk->sk_sock;
+ 
+-	msg->msg_control = &u;
+-	msg->msg_controllen = sizeof(u);
+ 	ret = sock_recvmsg(sock, msg, MSG_DONTWAIT);
+-	if (unlikely(msg->msg_controllen != sizeof(u)))
+-		ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret);
++	if (msg->msg_flags & MSG_CTRUNC) {
++		msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
++		if (ret == 0 || ret == -EIO)
++			ret = svc_tcp_sock_recv_cmsg(sock, &msg->msg_flags);
++	}
+ 	return ret;
+ }
+ 
+@@ -321,7 +348,7 @@ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
+ 		iov_iter_advance(&msg.msg_iter, seek);
+ 		buflen -= seek;
+ 	}
+-	len = svc_tcp_sock_recv_cmsg(svsk, &msg);
++	len = svc_tcp_sock_recvmsg(svsk, &msg);
+ 	if (len > 0)
+ 		svc_flush_bvec(bvec, len, seek);
+ 
+@@ -1019,7 +1046,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
+ 		iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
+ 		iov.iov_len  = want;
+ 		iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
+-		len = svc_tcp_sock_recv_cmsg(svsk, &msg);
++		len = svc_tcp_sock_recvmsg(svsk, &msg);
+ 		if (len < 0)
+ 			return len;
+ 		svsk->sk_tcplen += len;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 67d099c7c66259..1397bb48cdde39 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -358,7 +358,7 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
+ 
+ static int
+ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+-		     struct cmsghdr *cmsg, int ret)
++		     unsigned int *msg_flags, struct cmsghdr *cmsg, int ret)
+ {
+ 	u8 content_type = tls_get_record_type(sock->sk, cmsg);
+ 	u8 level, description;
+@@ -371,7 +371,7 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+ 		 * record, even though there might be more frames
+ 		 * waiting to be decrypted.
+ 		 */
+-		msg->msg_flags &= ~MSG_EOR;
++		*msg_flags &= ~MSG_EOR;
+ 		break;
+ 	case TLS_RECORD_TYPE_ALERT:
+ 		tls_alert_recv(sock->sk, msg, &level, &description);
+@@ -386,19 +386,33 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+ }
+ 
+ static int
+-xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags)
++xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags)
+ {
+ 	union {
+ 		struct cmsghdr	cmsg;
+ 		u8		buf[CMSG_SPACE(sizeof(u8))];
+ 	} u;
++	u8 alert[2];
++	struct kvec alert_kvec = {
++		.iov_base = alert,
++		.iov_len = sizeof(alert),
++	};
++	struct msghdr msg = {
++		.msg_flags = *msg_flags,
++		.msg_control = &u,
++		.msg_controllen = sizeof(u),
++	};
+ 	int ret;
+ 
+-	msg->msg_control = &u;
+-	msg->msg_controllen = sizeof(u);
+-	ret = sock_recvmsg(sock, msg, flags);
+-	if (msg->msg_controllen != sizeof(u))
+-		ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
++		      alert_kvec.iov_len);
++	ret = sock_recvmsg(sock, &msg, flags);
++	if (ret > 0 &&
++	    tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
++		iov_iter_revert(&msg.msg_iter, ret);
++		ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
++					   -EAGAIN);
++	}
+ 	return ret;
+ }
+ 
+@@ -408,7 +422,13 @@ xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
+ 	ssize_t ret;
+ 	if (seek != 0)
+ 		iov_iter_advance(&msg->msg_iter, seek);
+-	ret = xs_sock_recv_cmsg(sock, msg, flags);
++	ret = sock_recvmsg(sock, msg, flags);
++	/* Handle TLS inband control message lazily */
++	if (msg->msg_flags & MSG_CTRUNC) {
++		msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
++		if (ret == 0 || ret == -EIO)
++			ret = xs_sock_recv_cmsg(sock, &msg->msg_flags, flags);
++	}
+ 	return ret > 0 ? ret + seek : ret;
+ }
+ 
+@@ -434,7 +454,7 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
+ 		size_t count)
+ {
+ 	iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
+-	return xs_sock_recv_cmsg(sock, msg, flags);
++	return xs_sock_recvmsg(sock, msg, flags, 0);
+ }
+ 
+ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 8fb5925f2389e9..1d7caadd0cbc45 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -872,6 +872,19 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
+ 		delta = msg->sg.size;
+ 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
+ 		delta -= msg->sg.size;
++
++		if ((s32)delta > 0) {
++			/* It indicates that we executed bpf_msg_pop_data(),
++			 * causing the plaintext data size to decrease.
++			 * Therefore the encrypted data size also needs to
++			 * correspondingly decrease. We only need to subtract
++			 * delta to calculate the new ciphertext length since
++			 * ktls does not support block encryption.
++			 */
++			struct sk_msg *enc = &ctx->open_rec->msg_encrypted;
++
++			sk_msg_trim(sk, enc, enc->sg.size - delta);
++		}
+ 	}
+ 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
+ 	    !enospc && !full_record) {
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 08565e41b8e924..ef519b55a3d9ae 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -689,7 +689,8 @@ static int __vsock_bind_connectible(struct vsock_sock *vsk,
+ 		unsigned int i;
+ 
+ 		for (i = 0; i < MAX_PORT_RETRIES; i++) {
+-			if (port <= LAST_RESERVED_PORT)
++			if (port == VMADDR_PORT_ANY ||
++			    port <= LAST_RESERVED_PORT)
+ 				port = LAST_RESERVED_PORT + 1;
+ 
+ 			new_addr.svm_port = port++;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 4eb44821c70d3d..ec8265f2d5680f 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -16789,6 +16789,7 @@ static int nl80211_set_sar_specs(struct sk_buff *skb, struct genl_info *info)
+ 	if (!sar_spec)
+ 		return -ENOMEM;
+ 
++	sar_spec->num_sub_specs = specs;
+ 	sar_spec->type = type;
+ 	specs = 0;
+ 	nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem) {
+diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
+index 867debd3b9124c..1d7254bcb44cb7 100644
+--- a/samples/mei/mei-amt-version.c
++++ b/samples/mei/mei-amt-version.c
+@@ -69,11 +69,11 @@
+ #include <string.h>
+ #include <fcntl.h>
+ #include <sys/ioctl.h>
++#include <sys/time.h>
+ #include <unistd.h>
+ #include <errno.h>
+ #include <stdint.h>
+ #include <stdbool.h>
+-#include <bits/wordsize.h>
+ #include <linux/mei.h>
+ 
+ /*****************************************************************************
+diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
+index e260cab1c2afff..4b375abda77288 100644
+--- a/scripts/kconfig/qconf.cc
++++ b/scripts/kconfig/qconf.cc
+@@ -481,7 +481,7 @@ void ConfigList::updateListAllForAll()
+ 	while (it.hasNext()) {
+ 		ConfigList *list = it.next();
+ 
+-		list->updateList();
++		list->updateListAll();
+ 	}
+ }
+ 
+diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
+index 4bb0405c91908a..ae31a8a631fc6a 100644
+--- a/security/apparmor/include/match.h
++++ b/security/apparmor/include/match.h
+@@ -135,17 +135,15 @@ aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start,
+ 
+ void aa_dfa_free_kref(struct kref *kref);
+ 
+-#define WB_HISTORY_SIZE 24
++/* This needs to be a power of 2 */
++#define WB_HISTORY_SIZE 32
+ struct match_workbuf {
+-	unsigned int count;
+ 	unsigned int pos;
+ 	unsigned int len;
+-	unsigned int size;	/* power of 2, same as history size */
+-	unsigned int history[WB_HISTORY_SIZE];
++	aa_state_t history[WB_HISTORY_SIZE];
+ };
+ #define DEFINE_MATCH_WB(N)		\
+ struct match_workbuf N = {		\
+-	.count = 0,			\
+ 	.pos = 0,			\
+ 	.len = 0,			\
+ }
+diff --git a/security/apparmor/match.c b/security/apparmor/match.c
+index 517d77d3c34cc9..12e036f8ce0f72 100644
+--- a/security/apparmor/match.c
++++ b/security/apparmor/match.c
+@@ -624,34 +624,35 @@ aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start,
+ 	return state;
+ }
+ 
+-#define inc_wb_pos(wb)						\
+-do {								\
++#define inc_wb_pos(wb)							\
++do {									\
++	BUILD_BUG_ON_NOT_POWER_OF_2(WB_HISTORY_SIZE);			\
+ 	wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1);		\
+-	wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1);		\
++	wb->len = (wb->len + 1) > WB_HISTORY_SIZE ? WB_HISTORY_SIZE :	\
++				wb->len + 1;				\
+ } while (0)
+ 
+ /* For DFAs that don't support extended tagging of states */
++/* adjust is only set if is_loop returns true */
+ static bool is_loop(struct match_workbuf *wb, aa_state_t state,
+ 		    unsigned int *adjust)
+ {
+-	aa_state_t pos = wb->pos;
+-	aa_state_t i;
++	int pos = wb->pos;
++	int i;
+ 
+ 	if (wb->history[pos] < state)
+ 		return false;
+ 
+-	for (i = 0; i <= wb->len; i++) {
++	for (i = 0; i < wb->len; i++) {
+ 		if (wb->history[pos] == state) {
+ 			*adjust = i;
+ 			return true;
+ 		}
+-		if (pos == 0)
+-			pos = WB_HISTORY_SIZE;
+-		pos--;
++		/* -1 wraps to WB_HISTORY_SIZE - 1 */
++		pos = (pos - 1) & (WB_HISTORY_SIZE - 1);
+ 	}
+ 
+-	*adjust = i;
+-	return true;
++	return false;
+ }
+ 
+ static aa_state_t leftmatch_fb(struct aa_dfa *dfa, aa_state_t start,
+diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
+index f070902da8fcce..a7ac0ccc6cfedd 100644
+--- a/security/apparmor/policy_unpack_test.c
++++ b/security/apparmor/policy_unpack_test.c
+@@ -9,6 +9,8 @@
+ #include "include/policy.h"
+ #include "include/policy_unpack.h"
+ 
++#include <linux/unaligned.h>
++
+ #define TEST_STRING_NAME "TEST_STRING"
+ #define TEST_STRING_DATA "testing"
+ #define TEST_STRING_BUF_OFFSET \
+@@ -80,7 +82,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ 	*(buf + 1) = strlen(TEST_U32_NAME) + 1;
+ 	strscpy(buf + 3, TEST_U32_NAME, e->end - (void *)(buf + 3));
+ 	*(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
+-	*((__le32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = cpu_to_le32(TEST_U32_DATA);
++	put_unaligned_le32(TEST_U32_DATA, buf + 3 + strlen(TEST_U32_NAME) + 2);
+ 
+ 	buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
+ 	*buf = AA_NAME;
+@@ -103,7 +105,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ 	*(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
+ 	strscpy(buf + 3, TEST_ARRAY_NAME, e->end - (void *)(buf + 3));
+ 	*(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
+-	*((__le16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = cpu_to_le16(TEST_ARRAY_SIZE);
++	put_unaligned_le16(TEST_ARRAY_SIZE, buf + 3 + strlen(TEST_ARRAY_NAME) + 2);
+ 
+ 	return e;
+ }
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index 7baf3b506eefec..7823f71012a8a9 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -876,6 +876,52 @@ static int cs35l56_hda_system_resume(struct device *dev)
+ 	return 0;
+ }
+ 
++static int cs35l56_hda_fixup_yoga9(struct cs35l56_hda *cs35l56, int *bus_addr)
++{
++	/* The cirrus,dev-index property has the wrong values */
++	switch (*bus_addr) {
++	case 0x30:
++		cs35l56->index = 1;
++		return 0;
++	case 0x31:
++		cs35l56->index = 0;
++		return 0;
++	default:
++		/* There is a pseudo-address for broadcast to both amps - ignore it */
++		dev_dbg(cs35l56->base.dev, "Ignoring I2C address %#x\n", *bus_addr);
++		return 0;
++	}
++}
++
++static const struct {
++	const char *sub;
++	int (*fixup_fn)(struct cs35l56_hda *cs35l56, int *bus_addr);
++} cs35l56_hda_fixups[] = {
++	{
++		.sub = "17AA390B", /* Lenovo Yoga Book 9i GenX */
++		.fixup_fn = cs35l56_hda_fixup_yoga9,
++	},
++};
++
++static int cs35l56_hda_apply_platform_fixups(struct cs35l56_hda *cs35l56, const char *sub,
++					     int *bus_addr)
++{
++	int i;
++
++	if (IS_ERR(sub))
++		return 0;
++
++	for (i = 0; i < ARRAY_SIZE(cs35l56_hda_fixups); i++) {
++		if (strcasecmp(cs35l56_hda_fixups[i].sub, sub) == 0) {
++			dev_dbg(cs35l56->base.dev, "Applying fixup for %s\n",
++				cs35l56_hda_fixups[i].sub);
++			return (cs35l56_hda_fixups[i].fixup_fn)(cs35l56, bus_addr);
++		}
++	}
++
++	return 0;
++}
++
+ static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id)
+ {
+ 	u32 values[HDA_MAX_COMPONENTS];
+@@ -900,39 +946,47 @@ static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id)
+ 		ACPI_COMPANION_SET(cs35l56->base.dev, adev);
+ 	}
+ 
+-	property = "cirrus,dev-index";
+-	ret = device_property_count_u32(cs35l56->base.dev, property);
+-	if (ret <= 0)
+-		goto err;
+-
+-	if (ret > ARRAY_SIZE(values)) {
+-		ret = -EINVAL;
+-		goto err;
+-	}
+-	nval = ret;
++	/* Initialize things that could be overwritten by a fixup */
++	cs35l56->index = -1;
+ 
+-	ret = device_property_read_u32_array(cs35l56->base.dev, property, values, nval);
++	sub = acpi_get_subsystem_id(ACPI_HANDLE(cs35l56->base.dev));
++	ret = cs35l56_hda_apply_platform_fixups(cs35l56, sub, &id);
+ 	if (ret)
+-		goto err;
++		return ret;
+ 
+-	cs35l56->index = -1;
+-	for (i = 0; i < nval; i++) {
+-		if (values[i] == id) {
+-			cs35l56->index = i;
+-			break;
+-		}
+-	}
+-	/*
+-	 * It's not an error for the ID to be missing: for I2C there can be
+-	 * an alias address that is not a real device. So reject silently.
+-	 */
+ 	if (cs35l56->index == -1) {
+-		dev_dbg(cs35l56->base.dev, "No index found in %s\n", property);
+-		ret = -ENODEV;
+-		goto err;
+-	}
++		property = "cirrus,dev-index";
++		ret = device_property_count_u32(cs35l56->base.dev, property);
++		if (ret <= 0)
++			goto err;
+ 
+-	sub = acpi_get_subsystem_id(ACPI_HANDLE(cs35l56->base.dev));
++		if (ret > ARRAY_SIZE(values)) {
++			ret = -EINVAL;
++			goto err;
++		}
++		nval = ret;
++
++		ret = device_property_read_u32_array(cs35l56->base.dev, property, values, nval);
++		if (ret)
++			goto err;
++
++		for (i = 0; i < nval; i++) {
++			if (values[i] == id) {
++				cs35l56->index = i;
++				break;
++			}
++		}
++
++		/*
++		 * It's not an error for the ID to be missing: for I2C there can be
++		 * an alias address that is not a real device. So reject silently.
++		 */
++		if (cs35l56->index == -1) {
++			dev_dbg(cs35l56->base.dev, "No index found in %s\n", property);
++			ret = -ENODEV;
++			goto err;
++		}
++	}
+ 
+ 	if (IS_ERR(sub)) {
+ 		dev_info(cs35l56->base.dev,
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index d40197fb5fbd58..77432e06f3e32c 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4802,7 +4802,8 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
+ 	if (err < 0)
+ 		goto exit;
+ 
+-	if (ca0132_alt_select_out_quirk_set(codec) < 0)
++	err = ca0132_alt_select_out_quirk_set(codec);
++	if (err < 0)
+ 		goto exit;
+ 
+ 	switch (spec->cur_out_type) {
+@@ -4892,6 +4893,8 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
+ 				spec->bass_redirection_val);
+ 	else
+ 		err = ca0132_alt_surround_set_bass_redirection(codec, 0);
++	if (err < 0)
++		goto exit;
+ 
+ 	/* Unmute DSP now that we're done with output selection. */
+ 	err = dspio_set_uint_param(codec, 0x96,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 085f0697bff14f..6ef635d37f456b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10678,6 +10678,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
++	SND_PCI_QUIRK(0x103c, 0x8a26, "HP Victus 16-d1xxx (MB 8A26)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8a28, "HP Envy 13", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a29, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a2a, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10736,6 +10737,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8bbe, "HP Victus 16-r0xxx (MB 8BBE)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
++	SND_PCI_QUIRK(0x103c, 0x8bd4, "HP Victus 16-s0xxx (MB 8BD4)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10788,6 +10790,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c99, "HP Victus 16-r1xxx (MB 8C99)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8c9c, "HP Victus 16-s1xxx (MB 8C9C)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 1689b6b22598e2..e362c2865ec131 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -409,6 +409,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "M6501RM"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -528,6 +535,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb1xxx"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -577,6 +591,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "8A7F"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8A81"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
+index 4341269eb97780..0a67987c316e87 100644
+--- a/sound/soc/fsl/fsl_xcvr.c
++++ b/sound/soc/fsl/fsl_xcvr.c
+@@ -1239,6 +1239,26 @@ static irqreturn_t irq0_isr(int irq, void *devid)
+ 				/* clear CS control register */
+ 				memset_io(reg_ctrl, 0, sizeof(val));
+ 			}
++		} else {
++			regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_0,
++				    (u32 *)&xcvr->rx_iec958.status[0]);
++			regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_1,
++				    (u32 *)&xcvr->rx_iec958.status[4]);
++			regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_2,
++				    (u32 *)&xcvr->rx_iec958.status[8]);
++			regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_3,
++				    (u32 *)&xcvr->rx_iec958.status[12]);
++			regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_4,
++				    (u32 *)&xcvr->rx_iec958.status[16]);
++			regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_5,
++				    (u32 *)&xcvr->rx_iec958.status[20]);
++			for (i = 0; i < 6; i++) {
++				val = *(u32 *)(xcvr->rx_iec958.status + i * 4);
++				*(u32 *)(xcvr->rx_iec958.status + i * 4) =
++					bitrev32(val);
++			}
++			regmap_set_bits(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL,
++					FSL_XCVR_RX_DPTH_CTRL_CSA);
+ 		}
+ 	}
+ 	if (isr & FSL_XCVR_IRQ_NEW_UD) {
+diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
+index 8dee46abf346d6..aed95d1583e099 100644
+--- a/sound/soc/intel/boards/Kconfig
++++ b/sound/soc/intel/boards/Kconfig
+@@ -11,7 +11,7 @@ menuconfig SND_SOC_INTEL_MACH
+ 	 kernel: saying N will just cause the configurator to skip all
+ 	 the questions about Intel ASoC machine drivers.
+ 
+-if SND_SOC_INTEL_MACH
++if SND_SOC_INTEL_MACH && (SND_SOC_SOF_INTEL_COMMON || !SND_SOC_SOF_INTEL_COMMON)
+ 
+ config SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES
+ 	bool "Use more user friendly long card names"
+diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+index 6b633058394140..70fd05d5ff486c 100644
+--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
++++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+@@ -120,7 +120,9 @@ int mtk_afe_pcm_new(struct snd_soc_component *component,
+ 	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+ 
+ 	size = afe->mtk_afe_hardware->buffer_bytes_max;
+-	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, afe->dev, 0, size);
++	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, afe->dev,
++				       afe->preallocate_buffers ? size : 0,
++				       size);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/mediatek/common/mtk-base-afe.h b/sound/soc/mediatek/common/mtk-base-afe.h
+index f51578b6c50a35..a406f2e3e7a878 100644
+--- a/sound/soc/mediatek/common/mtk-base-afe.h
++++ b/sound/soc/mediatek/common/mtk-base-afe.h
+@@ -117,6 +117,7 @@ struct mtk_base_afe {
+ 	struct mtk_base_afe_irq *irqs;
+ 	int irqs_size;
+ 	int memif_32bit_supported;
++	bool preallocate_buffers;
+ 
+ 	struct list_head sub_dais;
+ 	struct snd_soc_dai_driver *dai_drivers;
+diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+index 03250273ea9c19..47a88edf78cad2 100644
+--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
++++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+@@ -13,6 +13,7 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
++#include <linux/of_reserved_mem.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/pm_runtime.h>
+ #include <sound/soc.h>
+@@ -1070,6 +1071,12 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+ 
+ 	afe->dev = &pdev->dev;
+ 
++	ret = of_reserved_mem_device_init(&pdev->dev);
++	if (ret) {
++		dev_info(&pdev->dev, "no reserved memory found, pre-allocating buffers instead\n");
++		afe->preallocate_buffers = true;
++	}
++
+ 	irq_id = platform_get_irq(pdev, 0);
+ 	if (irq_id <= 0)
+ 		return irq_id < 0 ? irq_id : -ENXIO;
+diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
+index 3f377ba4ad53a1..501ea9d92aea13 100644
+--- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
++++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
+@@ -10,6 +10,7 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
++#include <linux/of_reserved_mem.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/reset.h>
+ 
+@@ -1094,6 +1095,12 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
+ 	afe->dev = &pdev->dev;
+ 	dev = afe->dev;
+ 
++	ret = of_reserved_mem_device_init(dev);
++	if (ret) {
++		dev_info(dev, "no reserved memory found, pre-allocating buffers instead\n");
++		afe->preallocate_buffers = true;
++	}
++
+ 	/* initial audio related clock */
+ 	ret = mt8183_init_clock(afe);
+ 	if (ret) {
+diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c b/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
+index bafbef96a42da2..e6d3caf8b6fcbc 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
++++ b/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
+@@ -10,6 +10,7 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
++#include <linux/of_reserved_mem.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/reset.h>
+ #include <sound/soc.h>
+@@ -2835,6 +2836,12 @@ static int mt8186_afe_pcm_dev_probe(struct platform_device *pdev)
+ 	afe_priv = afe->platform_priv;
+ 	afe->dev = &pdev->dev;
+ 
++	ret = of_reserved_mem_device_init(dev);
++	if (ret) {
++		dev_info(dev, "no reserved memory found, pre-allocating buffers instead\n");
++		afe->preallocate_buffers = true;
++	}
++
+ 	afe->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(afe->base_addr))
+ 		return PTR_ERR(afe->base_addr);
+diff --git a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
+index 9b502f4cd6ea0b..69ed34495d0f73 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
++++ b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
+@@ -12,6 +12,7 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
++#include <linux/of_reserved_mem.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/reset.h>
+ #include <sound/soc.h>
+@@ -2180,6 +2181,12 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
+ 	afe->dev = &pdev->dev;
+ 	dev = afe->dev;
+ 
++	ret = of_reserved_mem_device_init(dev);
++	if (ret) {
++		dev_info(dev, "no reserved memory found, pre-allocating buffers instead\n");
++		afe->preallocate_buffers = true;
++	}
++
+ 	/* init audio related clock */
+ 	ret = mt8192_init_clock(afe);
+ 	if (ret) {
+diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
+index de09d21add4539..ad106087dd4bf9 100644
+--- a/sound/soc/soc-dai.c
++++ b/sound/soc/soc-dai.c
+@@ -273,13 +273,15 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
+ 		&rx_mask,
+ 	};
+ 
+-	if (dai->driver->ops &&
+-	    dai->driver->ops->xlate_tdm_slot_mask)
+-		ret = dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
+-	else
+-		ret = snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
+-	if (ret)
+-		goto err;
++	if (slots) {
++		if (dai->driver->ops &&
++		    dai->driver->ops->xlate_tdm_slot_mask)
++			ret = dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
++		else
++			ret = snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
++		if (ret)
++			goto err;
++	}
+ 
+ 	for_each_pcm_streams(stream)
+ 		snd_soc_dai_tdm_mask_set(dai, stream, *tdm_mask[stream]);
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index fb11003d56cf65..669b95cb4850f1 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -642,28 +642,32 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
+ static int snd_soc_clip_to_platform_max(struct snd_kcontrol *kctl)
+ {
+ 	struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
+-	struct snd_ctl_elem_value uctl;
++	struct snd_ctl_elem_value *uctl;
+ 	int ret;
+ 
+ 	if (!mc->platform_max)
+ 		return 0;
+ 
+-	ret = kctl->get(kctl, &uctl);
++	uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
++	if (!uctl)
++		return -ENOMEM;
++
++	ret = kctl->get(kctl, uctl);
+ 	if (ret < 0)
+-		return ret;
++		goto out;
+ 
+-	if (uctl.value.integer.value[0] > mc->platform_max)
+-		uctl.value.integer.value[0] = mc->platform_max;
++	if (uctl->value.integer.value[0] > mc->platform_max)
++		uctl->value.integer.value[0] = mc->platform_max;
+ 
+ 	if (snd_soc_volsw_is_stereo(mc) &&
+-	    uctl.value.integer.value[1] > mc->platform_max)
+-		uctl.value.integer.value[1] = mc->platform_max;
++	    uctl->value.integer.value[1] > mc->platform_max)
++		uctl->value.integer.value[1] = mc->platform_max;
+ 
+-	ret = kctl->put(kctl, &uctl);
+-	if (ret < 0)
+-		return ret;
++	ret = kctl->put(kctl, uctl);
+ 
+-	return 0;
++out:
++	kfree(uctl);
++	return ret;
+ }
+ 
+ /**
+diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
+index 4cddf84db631c6..8e91fce6274f46 100644
+--- a/sound/usb/mixer_scarlett2.c
++++ b/sound/usb/mixer_scarlett2.c
+@@ -2329,6 +2329,8 @@ static int scarlett2_usb(
+ 	struct scarlett2_usb_packet *req, *resp = NULL;
+ 	size_t req_buf_size = struct_size(req, data, req_size);
+ 	size_t resp_buf_size = struct_size(resp, data, resp_size);
++	int retries = 0;
++	const int max_retries = 5;
+ 	int err;
+ 
+ 	req = kmalloc(req_buf_size, GFP_KERNEL);
+@@ -2352,10 +2354,15 @@ static int scarlett2_usb(
+ 	if (req_size)
+ 		memcpy(req->data, req_data, req_size);
+ 
++retry:
+ 	err = scarlett2_usb_tx(dev, private->bInterfaceNumber,
+ 			       req, req_buf_size);
+ 
+ 	if (err != req_buf_size) {
++		if (err == -EPROTO && ++retries <= max_retries) {
++			msleep(5 * (1 << (retries - 1)));
++			goto retry;
++		}
+ 		usb_audio_err(
+ 			mixer->chip,
+ 			"%s USB request result cmd %x was %d\n",
+diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
+index d41ea09ffbe56b..571a092b128a21 100644
+--- a/sound/x86/intel_hdmi_audio.c
++++ b/sound/x86/intel_hdmi_audio.c
+@@ -1767,7 +1767,7 @@ static int __hdmi_lpe_audio_probe(struct platform_device *pdev)
+ 		/* setup private data which can be retrieved when required */
+ 		pcm->private_data = ctx;
+ 		pcm->info_flags = 0;
+-		strscpy(pcm->name, card->shortname, strlen(card->shortname));
++		strscpy(pcm->name, card->shortname, sizeof(pcm->name));
+ 		/* setup the ops for playback */
+ 		snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &had_pcm_ops);
+ 
+diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
+index d2242d9f84411f..39f208928cdb56 100644
+--- a/tools/bpf/bpftool/net.c
++++ b/tools/bpf/bpftool/net.c
+@@ -366,17 +366,18 @@ static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+ {
+ 	struct bpf_netdev_t *netinfo = cookie;
+ 	struct ifinfomsg *ifinfo = msg;
++	struct ip_devname_ifindex *tmp;
+ 
+ 	if (netinfo->filter_idx > 0 && netinfo->filter_idx != ifinfo->ifi_index)
+ 		return 0;
+ 
+ 	if (netinfo->used_len == netinfo->array_len) {
+-		netinfo->devices = realloc(netinfo->devices,
+-			(netinfo->array_len + 16) *
+-			sizeof(struct ip_devname_ifindex));
+-		if (!netinfo->devices)
++		tmp = realloc(netinfo->devices,
++			(netinfo->array_len + 16) * sizeof(struct ip_devname_ifindex));
++		if (!tmp)
+ 			return -ENOMEM;
+ 
++		netinfo->devices = tmp;
+ 		netinfo->array_len += 16;
+ 	}
+ 	netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index;
+@@ -395,6 +396,7 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+ {
+ 	struct bpf_tcinfo_t *tcinfo = cookie;
+ 	struct tcmsg *info = msg;
++	struct tc_kind_handle *tmp;
+ 
+ 	if (tcinfo->is_qdisc) {
+ 		/* skip clsact qdisc */
+@@ -406,11 +408,12 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+ 	}
+ 
+ 	if (tcinfo->used_len == tcinfo->array_len) {
+-		tcinfo->handle_array = realloc(tcinfo->handle_array,
++		tmp = realloc(tcinfo->handle_array,
+ 			(tcinfo->array_len + 16) * sizeof(struct tc_kind_handle));
+-		if (!tcinfo->handle_array)
++		if (!tmp)
+ 			return -ENOMEM;
+ 
++		tcinfo->handle_array = tmp;
+ 		tcinfo->array_len += 16;
+ 	}
+ 	tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle;
+diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py
+index 270c28a0d09801..6bf4bde77903c3 100644
+--- a/tools/cgroup/memcg_slabinfo.py
++++ b/tools/cgroup/memcg_slabinfo.py
+@@ -146,11 +146,11 @@ def detect_kernel_config():
+ 
+ 
+ def for_each_slab(prog):
+-    PGSlab = ~prog.constant('PG_slab')
++    slabtype = prog.constant('PGTY_slab')
+ 
+     for page in for_each_page(prog):
+         try:
+-            if page.page_type.value_() == PGSlab:
++            if (page.page_type.value_() >> 24) == slabtype:
+                 yield cast('struct slab *', page)
+         except FaultError:
+             pass
+diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
+index 8561b0f01a2476..9ef569492560ef 100644
+--- a/tools/lib/subcmd/help.c
++++ b/tools/lib/subcmd/help.c
+@@ -9,6 +9,7 @@
+ #include <sys/stat.h>
+ #include <unistd.h>
+ #include <dirent.h>
++#include <assert.h>
+ #include "subcmd-util.h"
+ #include "help.h"
+ #include "exec-cmd.h"
+@@ -82,10 +83,11 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
+ 				ci++;
+ 				cj++;
+ 			} else {
+-				zfree(&cmds->names[cj]);
+-				cmds->names[cj++] = cmds->names[ci++];
++				cmds->names[cj++] = cmds->names[ci];
++				cmds->names[ci++] = NULL;
+ 			}
+ 		} else if (cmp == 0) {
++			zfree(&cmds->names[ci]);
+ 			ci++;
+ 			ei++;
+ 		} else if (cmp > 0) {
+@@ -94,12 +96,12 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
+ 	}
+ 	if (ci != cj) {
+ 		while (ci < cmds->cnt) {
+-			zfree(&cmds->names[cj]);
+-			cmds->names[cj++] = cmds->names[ci++];
++			cmds->names[cj++] = cmds->names[ci];
++			cmds->names[ci++] = NULL;
+ 		}
+ 	}
+ 	for (ci = cj; ci < cmds->cnt; ci++)
+-		zfree(&cmds->names[ci]);
++		assert(cmds->names[ci] == NULL);
+ 	cmds->cnt = cj;
+ }
+ 
+diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
+index f5b81d439387a1..1ef45d8030242c 100644
+--- a/tools/perf/.gitignore
++++ b/tools/perf/.gitignore
+@@ -48,8 +48,6 @@ libbpf/
+ libperf/
+ libsubcmd/
+ libsymbol/
+-libtraceevent/
+-libtraceevent_plugins/
+ fixdep
+ Documentation/doc.dep
+ python_ext_build/
+diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
+index 5981cc51abc88b..64bf3ac237f2ef 100644
+--- a/tools/perf/builtin-sched.c
++++ b/tools/perf/builtin-sched.c
+@@ -999,7 +999,7 @@ thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
+ 		else if (cmp < 0)
+ 			node = node->rb_right;
+ 		else {
+-			BUG_ON(thread != atoms->thread);
++			BUG_ON(!RC_CHK_EQUAL(thread, atoms->thread));
+ 			return atoms;
+ 		}
+ 	}
+@@ -1116,6 +1116,21 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
+ 	atoms->nb_atoms++;
+ }
+ 
++static void free_work_atoms(struct work_atoms *atoms)
++{
++	struct work_atom *atom, *tmp;
++
++	if (atoms == NULL)
++		return;
++
++	list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
++		list_del(&atom->list);
++		free(atom);
++	}
++	thread__zput(atoms->thread);
++	free(atoms);
++}
++
+ static int latency_switch_event(struct perf_sched *sched,
+ 				struct evsel *evsel,
+ 				struct perf_sample *sample,
+@@ -1639,6 +1654,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
+ 	const char *color = PERF_COLOR_NORMAL;
+ 	char stimestamp[32];
+ 	const char *str;
++	int ret = -1;
+ 
+ 	BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
+ 
+@@ -1669,17 +1685,20 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
+ 	sched_in = map__findnew_thread(sched, machine, -1, next_pid);
+ 	sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
+ 	if (sched_in == NULL || sched_out == NULL)
+-		return -1;
++		goto out;
+ 
+ 	tr = thread__get_runtime(sched_in);
+-	if (tr == NULL) {
+-		thread__put(sched_in);
+-		return -1;
+-	}
++	if (tr == NULL)
++		goto out;
++
++	thread__put(sched->curr_thread[this_cpu.cpu]);
++	thread__put(sched->curr_out_thread[this_cpu.cpu]);
+ 
+ 	sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
+ 	sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
+ 
++	ret = 0;
++
+ 	str = thread__comm_str(sched_in);
+ 	new_shortname = 0;
+ 	if (!tr->shortname[0]) {
+@@ -1774,12 +1793,10 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
+ 	color_fprintf(stdout, color, "\n");
+ 
+ out:
+-	if (sched->map.task_name)
+-		thread__put(sched_out);
+-
++	thread__put(sched_out);
+ 	thread__put(sched_in);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int process_sched_switch_event(const struct perf_tool *tool,
+@@ -2023,6 +2040,16 @@ static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
+ 	return r->last_time[cpu];
+ }
+ 
++static void timehist__evsel_priv_destructor(void *priv)
++{
++	struct evsel_runtime *r = priv;
++
++	if (r) {
++		free(r->last_time);
++		free(r);
++	}
++}
++
+ static int comm_width = 30;
+ 
+ static char *timehist_get_commstr(struct thread *thread)
+@@ -3276,6 +3303,8 @@ static int perf_sched__timehist(struct perf_sched *sched)
+ 
+ 	setup_pager();
+ 
++	evsel__set_priv_destructor(timehist__evsel_priv_destructor);
++
+ 	/* prefer sched_waking if it is captured */
+ 	if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
+ 		handlers[1].handler = timehist_sched_wakeup_ignore;
+@@ -3376,13 +3405,13 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d
+ 			this->total_runtime += data->total_runtime;
+ 			this->nb_atoms += data->nb_atoms;
+ 			this->total_lat += data->total_lat;
+-			list_splice(&data->work_list, &this->work_list);
++			list_splice_init(&data->work_list, &this->work_list);
+ 			if (this->max_lat < data->max_lat) {
+ 				this->max_lat = data->max_lat;
+ 				this->max_lat_start = data->max_lat_start;
+ 				this->max_lat_end = data->max_lat_end;
+ 			}
+-			zfree(&data);
++			free_work_atoms(data);
+ 			return;
+ 		}
+ 	}
+@@ -3461,7 +3490,6 @@ static int perf_sched__lat(struct perf_sched *sched)
+ 		work_list = rb_entry(next, struct work_atoms, node);
+ 		output_lat_thread(sched, work_list);
+ 		next = rb_next(next);
+-		thread__zput(work_list->thread);
+ 	}
+ 
+ 	printf(" -----------------------------------------------------------------------------------------------------------------\n");
+@@ -3475,6 +3503,13 @@ static int perf_sched__lat(struct perf_sched *sched)
+ 
+ 	rc = 0;
+ 
++	while ((next = rb_first_cached(&sched->sorted_atom_root))) {
++		struct work_atoms *data;
++
++		data = rb_entry(next, struct work_atoms, node);
++		rb_erase_cached(next, &sched->sorted_atom_root);
++		free_work_atoms(data);
++	}
+ out_free_cpus_switch_event:
+ 	free_cpus_switch_event(sched);
+ 	return rc;
+@@ -3546,10 +3581,10 @@ static int perf_sched__map(struct perf_sched *sched)
+ 
+ 	sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
+ 	if (!sched->curr_out_thread)
+-		return rc;
++		goto out_free_curr_thread;
+ 
+ 	if (setup_cpus_switch_event(sched))
+-		goto out_free_curr_thread;
++		goto out_free_curr_out_thread;
+ 
+ 	if (setup_map_cpus(sched))
+ 		goto out_free_cpus_switch_event;
+@@ -3580,7 +3615,14 @@ static int perf_sched__map(struct perf_sched *sched)
+ out_free_cpus_switch_event:
+ 	free_cpus_switch_event(sched);
+ 
++out_free_curr_out_thread:
++	for (int i = 0; i < MAX_CPUS; i++)
++		thread__put(sched->curr_out_thread[i]);
++	zfree(&sched->curr_out_thread);
++
+ out_free_curr_thread:
++	for (int i = 0; i < MAX_CPUS; i++)
++		thread__put(sched->curr_thread[i]);
+ 	zfree(&sched->curr_thread);
+ 	return rc;
+ }
+@@ -3887,13 +3929,15 @@ int cmd_sched(int argc, const char **argv)
+ 	if (!argc)
+ 		usage_with_options(sched_usage, sched_options);
+ 
++	thread__set_priv_destructor(free);
++
+ 	/*
+ 	 * Aliased to 'perf script' for now:
+ 	 */
+ 	if (!strcmp(argv[0], "script")) {
+-		return cmd_script(argc, argv);
++		ret = cmd_script(argc, argv);
+ 	} else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
+-		return __cmd_record(argc, argv);
++		ret = __cmd_record(argc, argv);
+ 	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
+ 		sched.tp_handler = &lat_ops;
+ 		if (argc > 1) {
+@@ -3902,7 +3946,7 @@ int cmd_sched(int argc, const char **argv)
+ 				usage_with_options(latency_usage, latency_options);
+ 		}
+ 		setup_sorting(&sched, latency_options, latency_usage);
+-		return perf_sched__lat(&sched);
++		ret = perf_sched__lat(&sched);
+ 	} else if (!strcmp(argv[0], "map")) {
+ 		if (argc) {
+ 			argc = parse_options(argc, argv, map_options, map_usage, 0);
+@@ -3913,13 +3957,14 @@ int cmd_sched(int argc, const char **argv)
+ 				sched.map.task_names = strlist__new(sched.map.task_name, NULL);
+ 				if (sched.map.task_names == NULL) {
+ 					fprintf(stderr, "Failed to parse task names\n");
+-					return -1;
++					ret = -1;
++					goto out;
+ 				}
+ 			}
+ 		}
+ 		sched.tp_handler = &map_ops;
+ 		setup_sorting(&sched, latency_options, latency_usage);
+-		return perf_sched__map(&sched);
++		ret = perf_sched__map(&sched);
+ 	} else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
+ 		sched.tp_handler = &replay_ops;
+ 		if (argc) {
+@@ -3927,7 +3972,7 @@ int cmd_sched(int argc, const char **argv)
+ 			if (argc)
+ 				usage_with_options(replay_usage, replay_options);
+ 		}
+-		return perf_sched__replay(&sched);
++		ret = perf_sched__replay(&sched);
+ 	} else if (!strcmp(argv[0], "timehist")) {
+ 		if (argc) {
+ 			argc = parse_options(argc, argv, timehist_options,
+@@ -3943,19 +3988,19 @@ int cmd_sched(int argc, const char **argv)
+ 				parse_options_usage(NULL, timehist_options, "w", true);
+ 			if (sched.show_next)
+ 				parse_options_usage(NULL, timehist_options, "n", true);
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto out;
+ 		}
+ 		ret = symbol__validate_sym_arguments();
+-		if (ret)
+-			return ret;
+-
+-		return perf_sched__timehist(&sched);
++		if (!ret)
++			ret = perf_sched__timehist(&sched);
+ 	} else {
+ 		usage_with_options(sched_usage, sched_options);
+ 	}
+ 
++out:
+ 	/* free usage string allocated by parse_options_subcommand */
+ 	free((void *)sched_usage[0]);
+ 
+-	return 0;
++	return ret;
+ }
+diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
+index 4cb7d486b5c178..047433c977bc9d 100644
+--- a/tools/perf/tests/bp_account.c
++++ b/tools/perf/tests/bp_account.c
+@@ -104,6 +104,7 @@ static int bp_accounting(int wp_cnt, int share)
+ 		fd_wp = wp_event((void *)&the_var, &attr_new);
+ 		TEST_ASSERT_VAL("failed to create max wp\n", fd_wp != -1);
+ 		pr_debug("wp max created\n");
++		close(fd_wp);
+ 	}
+ 
+ 	for (i = 0; i < wp_cnt; i++)
+diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
+index e763e8d99a4367..ee00313d5d7e2a 100644
+--- a/tools/perf/util/build-id.c
++++ b/tools/perf/util/build-id.c
+@@ -864,7 +864,7 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
+ 	char *allocated_name = NULL;
+ 	int ret = 0;
+ 
+-	if (!dso__has_build_id(dso))
++	if (!dso__has_build_id(dso) || !dso__hit(dso))
+ 		return 0;
+ 
+ 	if (dso__is_kcore(dso)) {
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index dbf9c8cee3c565..6d7249cc1a993a 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -1477,6 +1477,15 @@ static void evsel__free_config_terms(struct evsel *evsel)
+ 	free_config_terms(&evsel->config_terms);
+ }
+ 
++static void (*evsel__priv_destructor)(void *priv);
++
++void evsel__set_priv_destructor(void (*destructor)(void *priv))
++{
++	assert(evsel__priv_destructor == NULL);
++
++	evsel__priv_destructor = destructor;
++}
++
+ void evsel__exit(struct evsel *evsel)
+ {
+ 	assert(list_empty(&evsel->core.node));
+@@ -1502,6 +1511,8 @@ void evsel__exit(struct evsel *evsel)
+ 	hashmap__free(evsel->per_pkg_mask);
+ 	evsel->per_pkg_mask = NULL;
+ 	zfree(&evsel->metric_events);
++	if (evsel__priv_destructor)
++		evsel__priv_destructor(evsel->priv);
+ 	perf_evsel__object.fini(evsel);
+ 	if (evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME ||
+ 	    evsel__tool_event(evsel) == PERF_TOOL_USER_TIME)
+diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
+index 15e745a9a798fa..26574a33a72502 100644
+--- a/tools/perf/util/evsel.h
++++ b/tools/perf/util/evsel.h
+@@ -282,6 +282,8 @@ void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
+ void evsel__exit(struct evsel *evsel);
+ void evsel__delete(struct evsel *evsel);
+ 
++void evsel__set_priv_destructor(void (*destructor)(void *priv));
++
+ struct callchain_param;
+ 
+ void evsel__config(struct evsel *evsel, struct record_opts *opts,
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 3bbf173ad822bc..c0ec5ed4f1aa41 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -1405,6 +1405,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
+ 				goto out_err;
+ 			}
+ 		}
++		map__zput(new_node->map);
+ 		free(new_node);
+ 	}
+ 
+diff --git a/tools/testing/selftests/alsa/utimer-test.c b/tools/testing/selftests/alsa/utimer-test.c
+index 32ee3ce577216b..37964f311a3397 100644
+--- a/tools/testing/selftests/alsa/utimer-test.c
++++ b/tools/testing/selftests/alsa/utimer-test.c
+@@ -135,6 +135,7 @@ TEST_F(timer_f, utimer) {
+ 	pthread_join(ticking_thread, NULL);
+ 	ASSERT_EQ(total_ticks, TICKS_COUNT);
+ 	pclose(rfp);
++	free(buf);
+ }
+ 
+ TEST(wrong_timers_test) {
+diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c
+index 6d61992fe8a01e..c6228176dd1a0c 100644
+--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c
++++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c
+@@ -251,7 +251,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
+ 		return;
+ 	}
+ 
+-	ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n",
++	ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n",
+ 			 type->name, vl);
+ 
+ 	free(new_sve);
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+index 4ee1148d22be3d..1cfed83156b035 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+@@ -924,6 +924,8 @@ static void redir_partial(int family, int sotype, int sock_map, int parser_map)
+ 		goto close;
+ 
+ 	n = xsend(c1, buf, sizeof(buf), 0);
++	if (n == -1)
++		goto close;
+ 	if (n < sizeof(buf))
+ 		FAIL("incomplete write");
+ 
+diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+index 6065f862d96438..758b09a5eb88b9 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
++++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+@@ -29,12 +29,12 @@ int big_alloc1(void *ctx)
+ 	if (!page1)
+ 		return 1;
+ 	*page1 = 1;
+-	page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
++	page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2,
+ 				      1, NUMA_NO_NODE, 0);
+ 	if (!page2)
+ 		return 2;
+ 	*page2 = 2;
+-	no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE,
++	no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
+ 					1, NUMA_NO_NODE, 0);
+ 	if (no_page)
+ 		return 3;
+@@ -66,4 +66,110 @@ int big_alloc1(void *ctx)
+ #endif
+ 	return 0;
+ }
++
++#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
++#define PAGE_CNT 100
++__u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */
++__u8 __arena *base;
++
++/*
++ * Check that arena's range_tree algorithm allocates pages sequentially
++ * on the first pass and then fills in all gaps on the second pass.
++ */
++__noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass,
++		int max_idx, int step)
++{
++	__u8 __arena *pg;
++	int i, pg_idx;
++
++	for (i = 0; i < page_cnt; i++) {
++		pg = bpf_arena_alloc_pages(&arena, NULL, pages_atonce,
++					   NUMA_NO_NODE, 0);
++		if (!pg)
++			return step;
++		pg_idx = (unsigned long) (pg - base) / PAGE_SIZE;
++		if (first_pass) {
++			/* Pages must be allocated sequentially */
++			if (pg_idx != i)
++				return step + 100;
++		} else {
++			/* Allocator must fill into gaps */
++			if (pg_idx >= max_idx || (pg_idx & 1))
++				return step + 200;
++		}
++		*pg = pg_idx;
++		page[pg_idx] = pg;
++		cond_break;
++	}
++	return 0;
++}
++
++//SEC("syscall")
++//__success __retval(0)
++int big_alloc2(void *ctx)
++{
++	__u8 __arena *pg;
++	int i, err;
++
++	base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
++	if (!base)
++		return 1;
++	bpf_arena_free_pages(&arena, (void __arena *)base, 1);
++
++	err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2);
++	if (err)
++		return err;
++
++	/* Clear all even pages */
++	for (i = 0; i < PAGE_CNT; i += 2) {
++		pg = page[i];
++		if (*pg != i)
++			return 3;
++		bpf_arena_free_pages(&arena, (void __arena *)pg, 1);
++		page[i] = NULL;
++		cond_break;
++	}
++
++	/* Allocate into freed gaps */
++	err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4);
++	if (err)
++		return err;
++
++	/* Free pairs of pages */
++	for (i = 0; i < PAGE_CNT; i += 4) {
++		pg = page[i];
++		if (*pg != i)
++			return 5;
++		bpf_arena_free_pages(&arena, (void __arena *)pg, 2);
++		page[i] = NULL;
++		page[i + 1] = NULL;
++		cond_break;
++	}
++
++	/* Allocate 2 pages at a time into freed gaps */
++	err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6);
++	if (err)
++		return err;
++
++	/* Check pages without freeing */
++	for (i = 0; i < PAGE_CNT; i += 2) {
++		pg = page[i];
++		if (*pg != i)
++			return 7;
++		cond_break;
++	}
++
++	pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
++
++	if (!pg)
++		return 8;
++	/*
++	 * The first PAGE_CNT pages are occupied. The new page
++	 * must be above.
++	 */
++	if ((pg - base) / PAGE_SIZE < PAGE_CNT)
++		return 9;
++	return 0;
++}
++#endif
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
+index 1ec5c4c47235e7..7b6b9c4cadb570 100644
+--- a/tools/testing/selftests/bpf/veristat.c
++++ b/tools/testing/selftests/bpf/veristat.c
+@@ -309,6 +309,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
+ 			fprintf(stderr, "invalid top N specifier: %s\n", arg);
+ 			argp_usage(state);
+ 		}
++		break;
+ 	case 'C':
+ 		env.comparison_mode = true;
+ 		break;
+diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+index 8d275f03e977f5..8d233ac95696be 100644
+--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
++++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+@@ -127,22 +127,42 @@ int run_test(int cpu)
+ 	return KSFT_PASS;
+ }
+ 
++/*
++ * Reads the suspend success count from sysfs.
++ * Returns the count on success or exits on failure.
++ */
++static int get_suspend_success_count_or_fail(void)
++{
++	FILE *fp;
++	int val;
++
++	fp = fopen("/sys/power/suspend_stats/success", "r");
++	if (!fp)
++		ksft_exit_fail_msg(
++			"Failed to open suspend_stats/success: %s\n",
++			strerror(errno));
++
++	if (fscanf(fp, "%d", &val) != 1) {
++		fclose(fp);
++		ksft_exit_fail_msg(
++			"Failed to read suspend success count\n");
++	}
++
++	fclose(fp);
++	return val;
++}
++
+ void suspend(void)
+ {
+-	int power_state_fd;
+ 	int timerfd;
+ 	int err;
++	int count_before;
++	int count_after;
+ 	struct itimerspec spec = {};
+ 
+ 	if (getuid() != 0)
+ 		ksft_exit_skip("Please run the test as root - Exiting.\n");
+ 
+-	power_state_fd = open("/sys/power/state", O_RDWR);
+-	if (power_state_fd < 0)
+-		ksft_exit_fail_msg(
+-			"open(\"/sys/power/state\") failed %s)\n",
+-			strerror(errno));
+-
+ 	timerfd = timerfd_create(CLOCK_BOOTTIME_ALARM, 0);
+ 	if (timerfd < 0)
+ 		ksft_exit_fail_msg("timerfd_create() failed\n");
+@@ -152,14 +172,15 @@ void suspend(void)
+ 	if (err < 0)
+ 		ksft_exit_fail_msg("timerfd_settime() failed\n");
+ 
++	count_before = get_suspend_success_count_or_fail();
++
+ 	system("(echo mem > /sys/power/state) 2> /dev/null");
+ 
+-	timerfd_gettime(timerfd, &spec);
+-	if (spec.it_value.tv_sec != 0 || spec.it_value.tv_nsec != 0)
++	count_after = get_suspend_success_count_or_fail();
++	if (count_after <= count_before)
+ 		ksft_exit_fail_msg("Failed to enter Suspend state\n");
+ 
+ 	close(timerfd);
+-	close(power_state_fd);
+ }
+ 
+ int main(int argc, char **argv)
+diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
+index 1ea9bb695e9401..3f35faca655991 100644
+--- a/tools/testing/selftests/drivers/net/lib/py/env.py
++++ b/tools/testing/selftests/drivers/net/lib/py/env.py
+@@ -224,7 +224,7 @@ class NetDrvEpEnv:
+             if not self._require_cmd(comm, "local"):
+                 raise KsftSkipEx("Test requires command: " + comm)
+         if remote:
+-            if not self._require_cmd(comm, "remote"):
++            if not self._require_cmd(comm, "remote", host=self.remote):
+                 raise KsftSkipEx("Test requires (remote) command: " + comm)
+ 
+     def wait_hw_stats_settle(self):
+diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+index b7c8f29c09a978..65916bb55dfbbf 100644
+--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
++++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+@@ -14,11 +14,35 @@ fail() { #msg
+     exit_fail
+ }
+ 
++# As reading trace can last forever, simply look for 3 different
++# events then exit out of reading the file. If there's not 3 different
++# events, then the test has failed.
++check_unique() {
++    cat trace | grep -v '^#' | awk '
++	BEGIN { cnt = 0; }
++	{
++	    for (i = 0; i < cnt; i++) {
++		if (event[i] == $5) {
++		    break;
++		}
++	    }
++	    if (i == cnt) {
++		event[cnt++] = $5;
++		if (cnt > 2) {
++		    exit;
++		}
++	    }
++	}
++	END {
++	    printf "%d", cnt;
++	}'
++}
++
+ echo 'sched:*' > set_event
+ 
+ yield
+ 
+-count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`check_unique`
+ if [ $count -lt 3 ]; then
+     fail "at least fork, exec and exit events should be recorded"
+ fi
+@@ -29,7 +53,7 @@ echo 1 > events/sched/enable
+ 
+ yield
+ 
+-count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`check_unique`
+ if [ $count -lt 3 ]; then
+     fail "at least fork, exec and exit events should be recorded"
+ fi
+diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
+index 87dce3efe31e4a..8a92432177d3b2 100755
+--- a/tools/testing/selftests/net/rtnetlink.sh
++++ b/tools/testing/selftests/net/rtnetlink.sh
+@@ -738,6 +738,11 @@ kci_test_ipsec_offload()
+ 	sysfsf=$sysfsd/ipsec
+ 	sysfsnet=/sys/bus/netdevsim/devices/netdevsim0/net/
+ 	probed=false
++	esp4_offload_probed_default=false
++
++	if lsmod | grep -q esp4_offload; then
++		esp4_offload_probed_default=true
++	fi
+ 
+ 	if ! mount | grep -q debugfs; then
+ 		mount -t debugfs none /sys/kernel/debug/ &> /dev/null
+@@ -831,6 +836,7 @@ EOF
+ 	fi
+ 
+ 	# clean up any leftovers
++	! "$esp4_offload_probed_default" && lsmod | grep -q esp4_offload && rmmod esp4_offload
+ 	echo 0 > /sys/bus/netdevsim/del_device
+ 	$probed && rmmod netdevsim
+ 
+diff --git a/tools/testing/selftests/perf_events/.gitignore b/tools/testing/selftests/perf_events/.gitignore
+index ee93dc4969b8b5..4931b3b6bbd397 100644
+--- a/tools/testing/selftests/perf_events/.gitignore
++++ b/tools/testing/selftests/perf_events/.gitignore
+@@ -2,3 +2,4 @@
+ sigtrap_threads
+ remove_on_exec
+ watermark_signal
++mmap
+diff --git a/tools/testing/selftests/perf_events/Makefile b/tools/testing/selftests/perf_events/Makefile
+index 70e3ff21127890..2e5d85770dfead 100644
+--- a/tools/testing/selftests/perf_events/Makefile
++++ b/tools/testing/selftests/perf_events/Makefile
+@@ -2,5 +2,5 @@
+ CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ LDFLAGS += -lpthread
+ 
+-TEST_GEN_PROGS := sigtrap_threads remove_on_exec watermark_signal
++TEST_GEN_PROGS := sigtrap_threads remove_on_exec watermark_signal mmap
+ include ../lib.mk
+diff --git a/tools/testing/selftests/perf_events/mmap.c b/tools/testing/selftests/perf_events/mmap.c
+new file mode 100644
+index 00000000000000..ea0427aac1f98f
+--- /dev/null
++++ b/tools/testing/selftests/perf_events/mmap.c
+@@ -0,0 +1,236 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#define _GNU_SOURCE
++
++#include <dirent.h>
++#include <sched.h>
++#include <stdbool.h>
++#include <stdio.h>
++#include <unistd.h>
++
++#include <sys/ioctl.h>
++#include <sys/mman.h>
++#include <sys/syscall.h>
++#include <sys/types.h>
++
++#include <linux/perf_event.h>
++
++#include "../kselftest_harness.h"
++
++#define RB_SIZE		0x3000
++#define AUX_SIZE	0x10000
++#define AUX_OFFS	0x4000
++
++#define HOLE_SIZE	0x1000
++
++/* Reserve space for rb, aux with space for shrink-beyond-vma testing. */
++#define REGION_SIZE	(2 * RB_SIZE + 2 * AUX_SIZE)
++#define REGION_AUX_OFFS (2 * RB_SIZE)
++
++#define MAP_BASE	1
++#define MAP_AUX		2
++
++#define EVENT_SRC_DIR	"/sys/bus/event_source/devices"
++
++FIXTURE(perf_mmap)
++{
++	int		fd;
++	void		*ptr;
++	void		*region;
++};
++
++FIXTURE_VARIANT(perf_mmap)
++{
++	bool		aux;
++	unsigned long	ptr_size;
++};
++
++FIXTURE_VARIANT_ADD(perf_mmap, rb)
++{
++	.aux = false,
++	.ptr_size = RB_SIZE,
++};
++
++FIXTURE_VARIANT_ADD(perf_mmap, aux)
++{
++	.aux = true,
++	.ptr_size = AUX_SIZE,
++};
++
++static bool read_event_type(struct dirent *dent, __u32 *type)
++{
++	char typefn[512];
++	FILE *fp;
++	int res;
++
++	snprintf(typefn, sizeof(typefn), "%s/%s/type", EVENT_SRC_DIR, dent->d_name);
++	fp = fopen(typefn, "r");
++	if (!fp)
++		return false;
++
++	res = fscanf(fp, "%u", type);
++	fclose(fp);
++	return res > 0;
++}
++
++FIXTURE_SETUP(perf_mmap)
++{
++	struct perf_event_attr attr = {
++		.size		= sizeof(attr),
++		.disabled	= 1,
++		.exclude_kernel	= 1,
++		.exclude_hv	= 1,
++	};
++	struct perf_event_attr attr_ok = {};
++	unsigned int eacces = 0, map = 0;
++	struct perf_event_mmap_page *rb;
++	struct dirent *dent;
++	void *aux, *region;
++	DIR *dir;
++
++	self->ptr = NULL;
++
++	dir = opendir(EVENT_SRC_DIR);
++	if (!dir)
++		SKIP(return, "perf not available.");
++
++	region = mmap(NULL, REGION_SIZE, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
++	ASSERT_NE(region, MAP_FAILED);
++	self->region = region;
++
++	// Try to find a suitable event on this system
++	while ((dent = readdir(dir))) {
++		int fd;
++
++		if (!read_event_type(dent, &attr.type))
++			continue;
++
++		fd = syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
++		if (fd < 0) {
++			if (errno == EACCES)
++				eacces++;
++			continue;
++		}
++
++		// Check whether the event supports mmap()
++		rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
++		if (rb == MAP_FAILED) {
++			close(fd);
++			continue;
++		}
++
++		if (!map) {
++			// Save the event in case that no AUX capable event is found
++			attr_ok = attr;
++			map = MAP_BASE;
++		}
++
++		if (!variant->aux)
++			continue;
++
++		rb->aux_offset = AUX_OFFS;
++		rb->aux_size = AUX_SIZE;
++
++		// Check whether it supports a AUX buffer
++		aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
++			   MAP_SHARED | MAP_FIXED, fd, AUX_OFFS);
++		if (aux == MAP_FAILED) {
++			munmap(rb, RB_SIZE);
++			close(fd);
++			continue;
++		}
++
++		attr_ok = attr;
++		map = MAP_AUX;
++		munmap(aux, AUX_SIZE);
++		munmap(rb, RB_SIZE);
++		close(fd);
++		break;
++	}
++	closedir(dir);
++
++	if (!map) {
++		if (!eacces)
++			SKIP(return, "No mappable perf event found.");
++		else
++			SKIP(return, "No permissions for perf_event_open()");
++	}
++
++	self->fd = syscall(SYS_perf_event_open, &attr_ok, 0, -1, -1, 0);
++	ASSERT_NE(self->fd, -1);
++
++	rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, self->fd, 0);
++	ASSERT_NE(rb, MAP_FAILED);
++
++	if (!variant->aux) {
++		self->ptr = rb;
++		return;
++	}
++
++	if (map != MAP_AUX)
++		SKIP(return, "No AUX event found.");
++
++	rb->aux_offset = AUX_OFFS;
++	rb->aux_size = AUX_SIZE;
++	aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
++		   MAP_SHARED | MAP_FIXED, self->fd, AUX_OFFS);
++	ASSERT_NE(aux, MAP_FAILED);
++	self->ptr = aux;
++}
++
++FIXTURE_TEARDOWN(perf_mmap)
++{
++	ASSERT_EQ(munmap(self->region, REGION_SIZE), 0);
++	if (self->fd != -1)
++		ASSERT_EQ(close(self->fd), 0);
++}
++
++TEST_F(perf_mmap, remap)
++{
++	void *tmp, *ptr = self->ptr;
++	unsigned long size = variant->ptr_size;
++
++	// Test the invalid remaps
++	ASSERT_EQ(mremap(ptr, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
++	ASSERT_EQ(mremap(ptr + HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
++	ASSERT_EQ(mremap(ptr + size - HOLE_SIZE, HOLE_SIZE, size, MREMAP_MAYMOVE), MAP_FAILED);
++	// Shrink the end of the mapping such that we only unmap past end of the VMA,
++	// which should succeed and poke a hole into the PROT_NONE region
++	ASSERT_NE(mremap(ptr + size - HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
++
++	// Remap the whole buffer to a new address
++	tmp = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
++	ASSERT_NE(tmp, MAP_FAILED);
++
++	// Try splitting offset 1 hole size into VMA, this should fail
++	ASSERT_EQ(mremap(ptr + HOLE_SIZE, size - HOLE_SIZE, size - HOLE_SIZE,
++			 MREMAP_MAYMOVE | MREMAP_FIXED, tmp), MAP_FAILED);
++	// Remapping the whole thing should succeed fine
++	ptr = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tmp);
++	ASSERT_EQ(ptr, tmp);
++	ASSERT_EQ(munmap(tmp, size), 0);
++}
++
++TEST_F(perf_mmap, unmap)
++{
++	unsigned long size = variant->ptr_size;
++
++	// Try to poke holes into the mappings
++	ASSERT_NE(munmap(self->ptr, HOLE_SIZE), 0);
++	ASSERT_NE(munmap(self->ptr + HOLE_SIZE, HOLE_SIZE), 0);
++	ASSERT_NE(munmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE), 0);
++}
++
++TEST_F(perf_mmap, map)
++{
++	unsigned long size = variant->ptr_size;
++
++	// Try to poke holes into the mappings by mapping anonymous memory over it
++	ASSERT_EQ(mmap(self->ptr, HOLE_SIZE, PROT_READ | PROT_WRITE,
++		       MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
++	ASSERT_EQ(mmap(self->ptr + HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
++		       MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
++	ASSERT_EQ(mmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
++		       MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
++}
++
++TEST_HARNESS_MAIN
+diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+index d975a67673299f..48cf01aeec3e77 100644
+--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
++++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+@@ -79,6 +79,21 @@ TEST_SIGNAL(dispatch_trigger_sigsys, SIGSYS)
+ 	}
+ }
+ 
++static void prctl_valid(struct __test_metadata *_metadata,
++			unsigned long op, unsigned long off,
++			unsigned long size, void *sel)
++{
++	EXPECT_EQ(0, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, sel));
++}
++
++static void prctl_invalid(struct __test_metadata *_metadata,
++			  unsigned long op, unsigned long off,
++			  unsigned long size, void *sel, int err)
++{
++	EXPECT_EQ(-1, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, sel));
++	EXPECT_EQ(err, errno);
++}
++
+ TEST(bad_prctl_param)
+ {
+ 	char sel = SYSCALL_DISPATCH_FILTER_ALLOW;
+@@ -86,57 +101,42 @@ TEST(bad_prctl_param)
+ 
+ 	/* Invalid op */
+ 	op = -1;
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0, 0, &sel);
+-	ASSERT_EQ(EINVAL, errno);
++	prctl_invalid(_metadata, op, 0, 0, &sel, EINVAL);
+ 
+ 	/* PR_SYS_DISPATCH_OFF */
+ 	op = PR_SYS_DISPATCH_OFF;
+ 
+ 	/* offset != 0 */
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, 0);
+-	EXPECT_EQ(EINVAL, errno);
++	prctl_invalid(_metadata, op, 0x1, 0x0, 0, EINVAL);
+ 
+ 	/* len != 0 */
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0xff, 0);
+-	EXPECT_EQ(EINVAL, errno);
++	prctl_invalid(_metadata, op, 0x0, 0xff, 0, EINVAL);
+ 
+ 	/* sel != NULL */
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, &sel);
+-	EXPECT_EQ(EINVAL, errno);
++	prctl_invalid(_metadata, op, 0x0, 0x0, &sel, EINVAL);
+ 
+ 	/* Valid parameter */
+-	errno = 0;
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, 0x0);
+-	EXPECT_EQ(0, errno);
++	prctl_valid(_metadata, op, 0x0, 0x0, 0x0);
+ 
+ 	/* PR_SYS_DISPATCH_ON */
+ 	op = PR_SYS_DISPATCH_ON;
+ 
+ 	/* Dispatcher region is bad (offset > 0 && len == 0) */
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, &sel);
+-	EXPECT_EQ(EINVAL, errno);
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, op, -1L, 0x0, &sel);
+-	EXPECT_EQ(EINVAL, errno);
++	prctl_invalid(_metadata, op, 0x1, 0x0, &sel, EINVAL);
++	prctl_invalid(_metadata, op, -1L, 0x0, &sel, EINVAL);
+ 
+ 	/* Invalid selector */
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x1, (void *) -1);
+-	ASSERT_EQ(EFAULT, errno);
++	prctl_invalid(_metadata, op, 0x0, 0x1, (void *) -1, EFAULT);
+ 
+ 	/*
+ 	 * Dispatcher range overflows unsigned long
+ 	 */
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 1, -1L, &sel);
+-	ASSERT_EQ(EINVAL, errno) {
+-		TH_LOG("Should reject bad syscall range");
+-	}
++	prctl_invalid(_metadata, PR_SYS_DISPATCH_ON, 1, -1L, &sel, EINVAL);
+ 
+ 	/*
+ 	 * Allowed range overflows usigned long
+ 	 */
+-	prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, -1L, 0x1, &sel);
+-	ASSERT_EQ(EINVAL, errno) {
+-		TH_LOG("Should reject bad syscall range");
+-	}
++	prctl_invalid(_metadata, PR_SYS_DISPATCH_ON, -1L, 0x1, &sel, EINVAL);
+ }
+ 
+ /*
+diff --git a/tools/testing/selftests/vDSO/vdso_test_chacha.c b/tools/testing/selftests/vDSO/vdso_test_chacha.c
+index 8757f738b0b1a7..0aad682b12c883 100644
+--- a/tools/testing/selftests/vDSO/vdso_test_chacha.c
++++ b/tools/testing/selftests/vDSO/vdso_test_chacha.c
+@@ -76,7 +76,8 @@ static void reference_chacha20_blocks(uint8_t *dst_bytes, const uint32_t *key, u
+ 
+ void __weak __arch_chacha20_blocks_nostack(uint8_t *dst_bytes, const uint32_t *key, uint32_t *counter, size_t nblocks)
+ {
+-	ksft_exit_skip("Not implemented on architecture\n");
++	ksft_test_result_skip("Not implemented on architecture\n");
++	ksft_finished();
+ }
+ 
+ int main(int argc, char *argv[])
+diff --git a/tools/verification/rv/src/in_kernel.c b/tools/verification/rv/src/in_kernel.c
+index f04479ecc96c0b..ced72950cb1eed 100644
+--- a/tools/verification/rv/src/in_kernel.c
++++ b/tools/verification/rv/src/in_kernel.c
+@@ -353,7 +353,7 @@ ikm_event_handler(struct trace_seq *s, struct tep_record *record,
+ 
+ 	if (config_has_id && (config_my_pid == id))
+ 		return 0;
+-	else if (config_my_pid && (config_my_pid == pid))
++	else if (config_my_pid == pid)
+ 		return 0;
+ 
+ 	tep_print_event(trace_event->tep, s, record, "%16s-%-8d ", TEP_PRINT_COMM, TEP_PRINT_PID);
+@@ -595,7 +595,7 @@ static int parse_arguments(char *monitor_name, int argc, char **argv)
+ 			config_reactor = optarg;
+ 			break;
+ 		case 's':
+-			config_my_pid = 0;
++			config_my_pid = -1;
+ 			break;
+ 		case 't':
+ 			config_trace = 1;


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-08-01 10:31 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-08-01 10:31 UTC (permalink / raw
  To: gentoo-commits

commit:     90a1d7b7ab9e348bfac768c675204eb1be8e638c
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Aug  1 10:30:51 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Aug  1 10:30:51 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=90a1d7b7

Linux patch 6.12.41

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1040_linux-6.12.41.patch | 6110 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6114 insertions(+)

diff --git a/0000_README b/0000_README
index 229d421f..96e25596 100644
--- a/0000_README
+++ b/0000_README
@@ -203,6 +203,10 @@ Patch:  1039_linux-6.12.40.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.40
 
+Patch:  1040_linux-6.12.41.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.41
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1040_linux-6.12.41.patch b/1040_linux-6.12.41.patch
new file mode 100644
index 00000000..d116603b
--- /dev/null
+++ b/1040_linux-6.12.41.patch
@@ -0,0 +1,6110 @@
+diff --git a/.clippy.toml b/.clippy.toml
+index 5d99a317f7d6fc..137f41d203de37 100644
+--- a/.clippy.toml
++++ b/.clippy.toml
+@@ -1,5 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
++msrv = "1.78.0"
++
+ check-private-items = true
+ 
+ disallowed-macros = [
+diff --git a/Makefile b/Makefile
+index c891f51637d5bd..fbaebf00a33b70 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index d0040fb67c36f3..d5bf16462bdba9 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -118,7 +118,7 @@ config ARM
+ 	select HAVE_KERNEL_XZ
+ 	select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
+ 	select HAVE_KRETPROBES if HAVE_KPROBES
+-	select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY)
++	select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD) && LD_CAN_USE_KEEP_IN_OVERLAY
+ 	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_NMI
+ 	select HAVE_OPTPROBES if !THUMB2_KERNEL
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index aafebf145738ab..dee8c9fe25a2e2 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -149,7 +149,7 @@ endif
+ # Need -Uarm for gcc < 3.x
+ KBUILD_CPPFLAGS	+=$(cpp-y)
+ KBUILD_CFLAGS	+=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
+-KBUILD_AFLAGS	+=$(CFLAGS_ABI) $(AFLAGS_ISA) -Wa,$(arch-y) $(tune-y) -include asm/unified.h -msoft-float
++KBUILD_AFLAGS	+=$(CFLAGS_ABI) $(AFLAGS_ISA) -Wa,$(arch-y) $(tune-y) -include $(srctree)/arch/arm/include/asm/unified.h -msoft-float
+ 
+ CHECKFLAGS	+= -D__arm__
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+index b1fa8f3558b3fc..02ae736a2205df 100644
+--- a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
++++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+@@ -232,6 +232,7 @@ vreg_l12b_1p2: ldo12 {
+ 			regulator-min-microvolt = <1200000>;
+ 			regulator-max-microvolt = <1200000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l13b_3p0: ldo13 {
+@@ -253,6 +254,7 @@ vreg_l15b_1p8: ldo15 {
+ 			regulator-min-microvolt = <1800000>;
+ 			regulator-max-microvolt = <1800000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l17b_2p5: ldo17 {
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+index 2a504a449b0bb8..e5d0d7d898c38a 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+@@ -659,8 +659,8 @@ vreg_l1j_0p8: ldo1 {
+ 
+ 		vreg_l2j_1p2: ldo2 {
+ 			regulator-name = "vreg_l2j_1p2";
+-			regulator-min-microvolt = <1200000>;
+-			regulator-max-microvolt = <1200000>;
++			regulator-min-microvolt = <1256000>;
++			regulator-max-microvolt = <1256000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ 		};
+ 
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
+index c1f45fd6b3e9a9..d8ffccee8194ce 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -41,6 +41,11 @@
+ /*
+  * Save/restore interrupts.
+  */
++	.macro save_and_disable_daif, flags
++	mrs	\flags, daif
++	msr	daifset, #0xf
++	.endm
++
+ 	.macro	save_and_disable_irq, flags
+ 	mrs	\flags, daif
+ 	msr	daifset, #3
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 7ef0e127b149fc..189ce50055d1fe 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -823,6 +823,7 @@ SYM_CODE_END(__bp_harden_el1_vectors)
+  *
+  */
+ SYM_FUNC_START(cpu_switch_to)
++	save_and_disable_daif x11
+ 	mov	x10, #THREAD_CPU_CONTEXT
+ 	add	x8, x0, x10
+ 	mov	x9, sp
+@@ -846,6 +847,7 @@ SYM_FUNC_START(cpu_switch_to)
+ 	ptrauth_keys_install_kernel x1, x8, x9, x10
+ 	scs_save x0
+ 	scs_load_current
++	restore_irq x11
+ 	ret
+ SYM_FUNC_END(cpu_switch_to)
+ NOKPROBE(cpu_switch_to)
+@@ -872,6 +874,7 @@ NOKPROBE(ret_from_fork)
+  * Calls func(regs) using this CPU's irq stack and shadow irq stack.
+  */
+ SYM_FUNC_START(call_on_irq_stack)
++	save_and_disable_daif x9
+ #ifdef CONFIG_SHADOW_CALL_STACK
+ 	get_current_task x16
+ 	scs_save x16
+@@ -886,8 +889,10 @@ SYM_FUNC_START(call_on_irq_stack)
+ 
+ 	/* Move to the new stack and call the function there */
+ 	add	sp, x16, #IRQ_STACK_SIZE
++	restore_irq x9
+ 	blr	x1
+ 
++	save_and_disable_daif x9
+ 	/*
+ 	 * Restore the SP from the FP, and restore the FP and LR from the frame
+ 	 * record.
+@@ -895,6 +900,7 @@ SYM_FUNC_START(call_on_irq_stack)
+ 	mov	sp, x29
+ 	ldp	x29, x30, [sp], #16
+ 	scs_load_current
++	restore_irq x9
+ 	ret
+ SYM_FUNC_END(call_on_irq_stack)
+ NOKPROBE(call_on_irq_stack)
+diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
+index 7012fa55aceb91..15783f2307dfe3 100644
+--- a/arch/powerpc/crypto/Kconfig
++++ b/arch/powerpc/crypto/Kconfig
+@@ -143,6 +143,7 @@ config CRYPTO_CHACHA20_P10
+ config CRYPTO_POLY1305_P10
+ 	tristate "Hash functions: Poly1305 (P10 or later)"
+ 	depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
++	depends on BROKEN # Needs to be fixed to work in softirq context
+ 	select CRYPTO_HASH
+ 	select CRYPTO_LIB_POLY1305_GENERIC
+ 	help
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index 95eada2994e150..239f612816e6d9 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -730,3 +730,36 @@ bool hv_is_hyperv_initialized(void)
+ 	return hypercall_msr.enable;
+ }
+ EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
++
++int hv_apicid_to_vp_index(u32 apic_id)
++{
++	u64 control;
++	u64 status;
++	unsigned long irq_flags;
++	struct hv_get_vp_from_apic_id_in *input;
++	u32 *output, ret;
++
++	local_irq_save(irq_flags);
++
++	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
++	memset(input, 0, sizeof(*input));
++	input->partition_id = HV_PARTITION_ID_SELF;
++	input->apic_ids[0] = apic_id;
++
++	output = *this_cpu_ptr(hyperv_pcpu_output_arg);
++
++	control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_ID_FROM_APIC_ID;
++	status = hv_do_hypercall(control, input, output);
++	ret = output[0];
++
++	local_irq_restore(irq_flags);
++
++	if (!hv_result_success(status)) {
++		pr_err("failed to get vp index from apic id %d, status %#llx\n",
++		       apic_id, status);
++		return -EINVAL;
++	}
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(hv_apicid_to_vp_index);
+diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
+index d04ccd4b3b4af0..2510e91b29b08c 100644
+--- a/arch/x86/hyperv/hv_vtl.c
++++ b/arch/x86/hyperv/hv_vtl.c
+@@ -175,41 +175,9 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
+ 	return ret;
+ }
+ 
+-static int hv_vtl_apicid_to_vp_id(u32 apic_id)
+-{
+-	u64 control;
+-	u64 status;
+-	unsigned long irq_flags;
+-	struct hv_get_vp_from_apic_id_in *input;
+-	u32 *output, ret;
+-
+-	local_irq_save(irq_flags);
+-
+-	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+-	memset(input, 0, sizeof(*input));
+-	input->partition_id = HV_PARTITION_ID_SELF;
+-	input->apic_ids[0] = apic_id;
+-
+-	output = (u32 *)input;
+-
+-	control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_ID_FROM_APIC_ID;
+-	status = hv_do_hypercall(control, input, output);
+-	ret = output[0];
+-
+-	local_irq_restore(irq_flags);
+-
+-	if (!hv_result_success(status)) {
+-		pr_err("failed to get vp id from apic id %d, status %#llx\n",
+-		       apic_id, status);
+-		return -EINVAL;
+-	}
+-
+-	return ret;
+-}
+-
+ static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
+ {
+-	int vp_id, cpu;
++	int vp_index, cpu;
+ 
+ 	/* Find the logical CPU for the APIC ID */
+ 	for_each_present_cpu(cpu) {
+@@ -220,18 +188,18 @@ static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
+ 		return -EINVAL;
+ 
+ 	pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid);
+-	vp_id = hv_vtl_apicid_to_vp_id(apicid);
++	vp_index = hv_apicid_to_vp_index(apicid);
+ 
+-	if (vp_id < 0) {
++	if (vp_index < 0) {
+ 		pr_err("Couldn't find CPU with APIC ID %d\n", apicid);
+ 		return -EINVAL;
+ 	}
+-	if (vp_id > ms_hyperv.max_vp_index) {
+-		pr_err("Invalid CPU id %d for APIC ID %d\n", vp_id, apicid);
++	if (vp_index > ms_hyperv.max_vp_index) {
++		pr_err("Invalid CPU id %d for APIC ID %d\n", vp_index, apicid);
+ 		return -EINVAL;
+ 	}
+ 
+-	return hv_vtl_bringup_vcpu(vp_id, cpu, start_eip);
++	return hv_vtl_bringup_vcpu(vp_index, cpu, start_eip);
+ }
+ 
+ int __init hv_vtl_early_init(void)
+diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c
+index 3215a4a07408a8..939b7081c5ab9b 100644
+--- a/arch/x86/hyperv/irqdomain.c
++++ b/arch/x86/hyperv/irqdomain.c
+@@ -192,7 +192,6 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+ 	struct pci_dev *dev;
+ 	struct hv_interrupt_entry out_entry, *stored_entry;
+ 	struct irq_cfg *cfg = irqd_cfg(data);
+-	const cpumask_t *affinity;
+ 	int cpu;
+ 	u64 status;
+ 
+@@ -204,8 +203,7 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+ 		return;
+ 	}
+ 
+-	affinity = irq_data_get_effective_affinity_mask(data);
+-	cpu = cpumask_first_and(affinity, cpu_online_mask);
++	cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+ 
+ 	if (data->chip_data) {
+ 		/*
+diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
+index 4065f5ef3ae08e..af87f440bc2aca 100644
+--- a/arch/x86/hyperv/ivm.c
++++ b/arch/x86/hyperv/ivm.c
+@@ -10,6 +10,7 @@
+ #include <linux/hyperv.h>
+ #include <linux/types.h>
+ #include <linux/slab.h>
++#include <linux/cpu.h>
+ #include <asm/svm.h>
+ #include <asm/sev.h>
+ #include <asm/io.h>
+@@ -289,7 +290,7 @@ static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
+ 		free_page((unsigned long)vmsa);
+ }
+ 
+-int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
++int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip)
+ {
+ 	struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
+ 		__get_free_page(GFP_KERNEL | __GFP_ZERO);
+@@ -298,10 +299,27 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
+ 	u64 ret, retry = 5;
+ 	struct hv_enable_vp_vtl *start_vp_input;
+ 	unsigned long flags;
++	int cpu, vp_index;
+ 
+ 	if (!vmsa)
+ 		return -ENOMEM;
+ 
++	/* Find the Hyper-V VP index which might be not the same as APIC ID */
++	vp_index = hv_apicid_to_vp_index(apic_id);
++	if (vp_index < 0 || vp_index > ms_hyperv.max_vp_index)
++		return -EINVAL;
++
++	/*
++	 * Find the Linux CPU number for addressing the per-CPU data, and it
++	 * might not be the same as APIC ID.
++	 */
++	for_each_present_cpu(cpu) {
++		if (arch_match_cpu_phys_id(cpu, apic_id))
++			break;
++	}
++	if (cpu >= nr_cpu_ids)
++		return -EINVAL;
++
+ 	native_store_gdt(&gdtr);
+ 
+ 	vmsa->gdtr.base = gdtr.address;
+@@ -349,7 +367,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
+ 	start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
+ 	memset(start_vp_input, 0, sizeof(*start_vp_input));
+ 	start_vp_input->partition_id = -1;
+-	start_vp_input->vp_index = cpu;
++	start_vp_input->vp_index = vp_index;
+ 	start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
+ 	*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
+ 
+diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
+index fdbbbfec745aa5..820b4aeabd0c24 100644
+--- a/arch/x86/include/asm/debugreg.h
++++ b/arch/x86/include/asm/debugreg.h
+@@ -9,6 +9,14 @@
+ #include <asm/cpufeature.h>
+ #include <asm/msr.h>
+ 
++/*
++ * Define bits that are always set to 1 in DR7, only bit 10 is
++ * architecturally reserved to '1'.
++ *
++ * This is also the init/reset value for DR7.
++ */
++#define DR7_FIXED_1	0x00000400
++
+ DECLARE_PER_CPU(unsigned long, cpu_dr7);
+ 
+ #ifndef CONFIG_PARAVIRT_XXL
+@@ -100,8 +108,8 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
+ 
+ static inline void hw_breakpoint_disable(void)
+ {
+-	/* Zero the control register for HW Breakpoint */
+-	set_debugreg(0UL, 7);
++	/* Reset the control register for HW Breakpoint */
++	set_debugreg(DR7_FIXED_1, 7);
+ 
+ 	/* Zero-out the individual HW breakpoint address registers */
+ 	set_debugreg(0UL, 0);
+@@ -125,9 +133,12 @@ static __always_inline unsigned long local_db_save(void)
+ 		return 0;
+ 
+ 	get_debugreg(dr7, 7);
+-	dr7 &= ~0x400; /* architecturally set bit */
++
++	/* Architecturally set bit */
++	dr7 &= ~DR7_FIXED_1;
+ 	if (dr7)
+-		set_debugreg(0, 7);
++		set_debugreg(DR7_FIXED_1, 7);
++
+ 	/*
+ 	 * Ensure the compiler doesn't lower the above statements into
+ 	 * the critical section; disabling breakpoints late would not
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index e4dd840e0becd4..0caa3293f6db90 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -31,6 +31,7 @@
+ 
+ #include <asm/apic.h>
+ #include <asm/pvclock-abi.h>
++#include <asm/debugreg.h>
+ #include <asm/desc.h>
+ #include <asm/mtrr.h>
+ #include <asm/msr-index.h>
+@@ -246,7 +247,6 @@ enum x86_intercept_stage;
+ #define DR7_BP_EN_MASK	0x000000ff
+ #define DR7_GE		(1 << 9)
+ #define DR7_GD		(1 << 13)
+-#define DR7_FIXED_1	0x00000400
+ #define DR7_VOLATILE	0xffff2bff
+ 
+ #define KVM_GUESTDBG_VALID_MASK \
+diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
+index 5f0bc6a6d02556..a42439c2ed248c 100644
+--- a/arch/x86/include/asm/mshyperv.h
++++ b/arch/x86/include/asm/mshyperv.h
+@@ -275,11 +275,11 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ bool hv_ghcb_negotiate_protocol(void);
+ void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
+-int hv_snp_boot_ap(u32 cpu, unsigned long start_ip);
++int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip);
+ #else
+ static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
+ static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
+-static inline int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) { return 0; }
++static inline int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip) { return 0; }
+ #endif
+ 
+ #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
+@@ -313,6 +313,7 @@ static __always_inline u64 hv_raw_get_msr(unsigned int reg)
+ {
+ 	return __rdmsr(reg);
+ }
++int hv_apicid_to_vp_index(u32 apic_id);
+ 
+ #else /* CONFIG_HYPERV */
+ static inline void hyperv_init(void) {}
+@@ -334,6 +335,7 @@ static inline void hv_set_msr(unsigned int reg, u64 value) { }
+ static inline u64 hv_get_msr(unsigned int reg) { return 0; }
+ static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { }
+ static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; }
++static inline int hv_apicid_to_vp_index(u32 apic_id) { return -EINVAL; }
+ #endif /* CONFIG_HYPERV */
+ 
+ 
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index efd42ee9d1cc61..4810271302d0c9 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -378,6 +378,8 @@ static bool amd_check_tsa_microcode(void)
+ 	p.model		= c->x86_model;
+ 	p.ext_model	= c->x86_model >> 4;
+ 	p.stepping	= c->x86_stepping;
++	/* reserved bits are expected to be 0 in test below */
++	p.__reserved	= 0;
+ 
+ 	if (cpu_has(c, X86_FEATURE_ZEN3) ||
+ 	    cpu_has(c, X86_FEATURE_ZEN4)) {
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index ed072b126111c3..976545ec8fdcb3 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -2160,7 +2160,7 @@ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
+ static void initialize_debug_regs(void)
+ {
+ 	/* Control register first -- to make sure everything is disabled. */
+-	set_debugreg(0, 7);
++	set_debugreg(DR7_FIXED_1, 7);
+ 	set_debugreg(DR6_RESERVED, 6);
+ 	/* dr5 and dr4 don't exist */
+ 	set_debugreg(0, 3);
+diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
+index 9c9faa1634fb9a..e5faeec20b1f4b 100644
+--- a/arch/x86/kernel/kgdb.c
++++ b/arch/x86/kernel/kgdb.c
+@@ -385,7 +385,7 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs)
+ 	struct perf_event *bp;
+ 
+ 	/* Disable hardware debugging while we are in kgdb: */
+-	set_debugreg(0UL, 7);
++	set_debugreg(DR7_FIXED_1, 7);
+ 	for (i = 0; i < HBP_NUM; i++) {
+ 		if (!breakinfo[i].enabled)
+ 			continue;
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 0917c7f25720be..f10c14cb6ef8bf 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -93,7 +93,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
+ 
+ 	/* Only print out debug registers if they are in their non-default state. */
+ 	if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
+-	    (d6 == DR6_RESERVED) && (d7 == 0x400))
++	    (d6 == DR6_RESERVED) && (d7 == DR7_FIXED_1))
+ 		return;
+ 
+ 	printk("%sDR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 226472332a70dd..266366a945ed2b 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -132,7 +132,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
+ 
+ 	/* Only print out debug registers if they are in their non-default state. */
+ 	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
+-	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
++	    (d6 == DR6_RESERVED) && (d7 == DR7_FIXED_1))) {
+ 		printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n",
+ 		       log_lvl, d0, d1, d2);
+ 		printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n",
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index ad479cfb91bc7b..f16a7b2c2adcf4 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -2,7 +2,6 @@
+ #ifndef ARCH_X86_KVM_CPUID_H
+ #define ARCH_X86_KVM_CPUID_H
+ 
+-#include "x86.h"
+ #include "reverse_cpuid.h"
+ #include <asm/cpu.h>
+ #include <asm/processor.h>
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index e72aed25d72126..60986f67c35a88 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -651,9 +651,10 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
+ }
+ 
+ static inline bool emul_is_noncanonical_address(u64 la,
+-						struct x86_emulate_ctxt *ctxt)
++						struct x86_emulate_ctxt *ctxt,
++						unsigned int flags)
+ {
+-	return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
++	return !ctxt->ops->is_canonical_addr(ctxt, la, flags);
+ }
+ 
+ /*
+@@ -1733,7 +1734,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 		if (ret != X86EMUL_CONTINUE)
+ 			return ret;
+ 		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
+-						 ((u64)base3 << 32), ctxt))
++						 ((u64)base3 << 32), ctxt,
++						 X86EMUL_F_DT_LOAD))
+ 			return emulate_gp(ctxt, err_code);
+ 	}
+ 
+@@ -2516,8 +2518,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ 		ss_sel = cs_sel + 8;
+ 		cs.d = 0;
+ 		cs.l = 1;
+-		if (emul_is_noncanonical_address(rcx, ctxt) ||
+-		    emul_is_noncanonical_address(rdx, ctxt))
++		if (emul_is_noncanonical_address(rcx, ctxt, 0) ||
++		    emul_is_noncanonical_address(rdx, ctxt, 0))
+ 			return emulate_gp(ctxt, 0);
+ 		break;
+ 	}
+@@ -3494,7 +3496,8 @@ static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
+-	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
++	    emul_is_noncanonical_address(desc_ptr.address, ctxt,
++					 X86EMUL_F_DT_LOAD))
+ 		return emulate_gp(ctxt, 0);
+ 	if (lgdt)
+ 		ctxt->ops->set_gdt(ctxt, &desc_ptr);
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 44c88537448c74..79d06a8a5b7d82 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1980,6 +1980,9 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
+ 		if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
+ 			goto out_flush_all;
+ 
++		if (is_noncanonical_invlpg_address(entries[i], vcpu))
++			continue;
++
+ 		/*
+ 		 * Lower 12 bits of 'address' encode the number of additional
+ 		 * pages to flush.
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index 55a18e2f2dcd99..10495fffb8905c 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -94,6 +94,8 @@ struct x86_instruction_info {
+ #define X86EMUL_F_FETCH			BIT(1)
+ #define X86EMUL_F_IMPLICIT		BIT(2)
+ #define X86EMUL_F_INVLPG		BIT(3)
++#define X86EMUL_F_MSR			BIT(4)
++#define X86EMUL_F_DT_LOAD		BIT(5)
+ 
+ struct x86_emulate_ops {
+ 	void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
+@@ -235,6 +237,9 @@ struct x86_emulate_ops {
+ 
+ 	gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
+ 				   unsigned int flags);
++
++	bool (*is_canonical_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
++				  unsigned int flags);
+ };
+ 
+ /* Type, address-of, and value of an instruction's operand. */
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index 9dc5dd43ae7f21..e9322358678b67 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/kvm_host.h>
+ #include "kvm_cache_regs.h"
++#include "x86.h"
+ #include "cpuid.h"
+ 
+ extern bool __read_mostly enable_mmio_caching;
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 4607610ef06283..8edfb4e4a73d0e 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6234,7 +6234,7 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
+ 	if (mmu != &vcpu->arch.guest_mmu) {
+ 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
+-		if (is_noncanonical_address(addr, vcpu))
++		if (is_noncanonical_invlpg_address(addr, vcpu))
+ 			return;
+ 
+ 		kvm_x86_call(flush_tlb_gva)(vcpu, addr);
+diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
+index 05490b9d8a434f..6f74e2b27c1ed5 100644
+--- a/arch/x86/kvm/mtrr.c
++++ b/arch/x86/kvm/mtrr.c
+@@ -19,6 +19,7 @@
+ #include <asm/mtrr.h>
+ 
+ #include "cpuid.h"
++#include "x86.h"
+ 
+ static u64 *find_mtrr(struct kvm_vcpu *vcpu, unsigned int msr)
+ {
+diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c
+index fab6a1ad98dc18..fa41d036acd49e 100644
+--- a/arch/x86/kvm/vmx/hyperv.c
++++ b/arch/x86/kvm/vmx/hyperv.c
+@@ -4,6 +4,7 @@
+ #include <linux/errno.h>
+ #include <linux/smp.h>
+ 
++#include "x86.h"
+ #include "../cpuid.h"
+ #include "hyperv.h"
+ #include "nested.h"
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 22bee8a711442d..903e874041ac8d 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -7,6 +7,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/mmu_context.h>
+ 
++#include "x86.h"
+ #include "cpuid.h"
+ #include "hyperv.h"
+ #include "mmu.h"
+@@ -16,7 +17,6 @@
+ #include "sgx.h"
+ #include "trace.h"
+ #include "vmx.h"
+-#include "x86.h"
+ #include "smm.h"
+ 
+ static bool __read_mostly enable_shadow_vmcs = 1;
+@@ -3020,8 +3020,8 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
+ 	    CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3)))
+ 		return -EINVAL;
+ 
+-	if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
+-	    CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
++	if (CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
++	    CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
+ 		return -EINVAL;
+ 
+ 	if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
+@@ -3055,12 +3055,12 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
+ 	    CC(vmcs12->host_ss_selector == 0 && !ia32e))
+ 		return -EINVAL;
+ 
+-	if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
+-	    CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
+-	    CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
+-	    CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
+-	    CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
+-	    CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
++	if (CC(is_noncanonical_base_address(vmcs12->host_fs_base, vcpu)) ||
++	    CC(is_noncanonical_base_address(vmcs12->host_gs_base, vcpu)) ||
++	    CC(is_noncanonical_base_address(vmcs12->host_gdtr_base, vcpu)) ||
++	    CC(is_noncanonical_base_address(vmcs12->host_idtr_base, vcpu)) ||
++	    CC(is_noncanonical_base_address(vmcs12->host_tr_base, vcpu)) ||
++	    CC(is_noncanonical_address(vmcs12->host_rip, vcpu, 0)))
+ 		return -EINVAL;
+ 
+ 	/*
+@@ -3178,7 +3178,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ 	}
+ 
+ 	if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+-	    (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
++	    (CC(is_noncanonical_msr_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
+ 	     CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
+ 		return -EINVAL;
+ 
+@@ -5172,7 +5172,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ 		 * non-canonical form. This is the only check on the memory
+ 		 * destination for long mode!
+ 		 */
+-		exn = is_noncanonical_address(*ret, vcpu);
++		exn = is_noncanonical_address(*ret, vcpu, 0);
+ 	} else {
+ 		/*
+ 		 * When not in long mode, the virtual/linear address is
+@@ -5983,7 +5983,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+ 		 * invalidation.
+ 		 */
+ 		if (!operand.vpid ||
+-		    is_noncanonical_address(operand.gla, vcpu))
++		    is_noncanonical_invlpg_address(operand.gla, vcpu))
+ 			return nested_vmx_fail(vcpu,
+ 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ 		vpid_sync_vcpu_addr(vpid02, operand.gla);
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 83382a4d1d66fd..9c9d4a3361664e 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -365,7 +365,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		}
+ 		break;
+ 	case MSR_IA32_DS_AREA:
+-		if (is_noncanonical_address(data, vcpu))
++		if (is_noncanonical_msr_address(data, vcpu))
+ 			return 1;
+ 
+ 		pmu->ds_area = data;
+diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
+index a3c3d2a51f47d1..b352a3ba7354a2 100644
+--- a/arch/x86/kvm/vmx/sgx.c
++++ b/arch/x86/kvm/vmx/sgx.c
+@@ -4,12 +4,11 @@
+ 
+ #include <asm/sgx.h>
+ 
+-#include "cpuid.h"
++#include "x86.h"
+ #include "kvm_cache_regs.h"
+ #include "nested.h"
+ #include "sgx.h"
+ #include "vmx.h"
+-#include "x86.h"
+ 
+ bool __read_mostly enable_sgx = 1;
+ module_param_named(sgx, enable_sgx, bool, 0444);
+@@ -38,7 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
+ 		fault = true;
+ 	} else if (likely(is_64_bit_mode(vcpu))) {
+ 		*gva = vmx_get_untagged_addr(vcpu, *gva, 0);
+-		fault = is_noncanonical_address(*gva, vcpu);
++		fault = is_noncanonical_address(*gva, vcpu, 0);
+ 	} else {
+ 		*gva &= 0xffffffff;
+ 		fault = (s.unusable) ||
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 029fbf3791f17f..9a4ebf3dfbfc88 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2284,7 +2284,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		    (!msr_info->host_initiated &&
+ 		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
+ 			return 1;
+-		if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
++		if (is_noncanonical_msr_address(data & PAGE_MASK, vcpu) ||
+ 		    (data & MSR_IA32_BNDCFGS_RSVD))
+ 			return 1;
+ 
+@@ -2449,7 +2449,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
+ 		if (index >= 2 * vmx->pt_desc.num_address_ranges)
+ 			return 1;
+-		if (is_noncanonical_address(data, vcpu))
++		if (is_noncanonical_msr_address(data, vcpu))
+ 			return 1;
+ 		if (index % 2)
+ 			vmx->pt_desc.guest.addr_b[index / 2] = data;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f378d479fea3f9..213af0fda7682d 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1845,7 +1845,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
+ 	case MSR_KERNEL_GS_BASE:
+ 	case MSR_CSTAR:
+ 	case MSR_LSTAR:
+-		if (is_noncanonical_address(data, vcpu))
++		if (is_noncanonical_msr_address(data, vcpu))
+ 			return 1;
+ 		break;
+ 	case MSR_IA32_SYSENTER_EIP:
+@@ -1862,7 +1862,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
+ 		 * value, and that something deterministic happens if the guest
+ 		 * invokes 64-bit SYSENTER.
+ 		 */
+-		data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
++		data = __canonical_address(data, max_host_virt_addr_bits());
+ 		break;
+ 	case MSR_TSC_AUX:
+ 		if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+@@ -8608,6 +8608,12 @@ static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
+ 					       addr, flags);
+ }
+ 
++static bool emulator_is_canonical_addr(struct x86_emulate_ctxt *ctxt,
++				       gva_t addr, unsigned int flags)
++{
++	return !is_noncanonical_address(addr, emul_to_vcpu(ctxt), flags);
++}
++
+ static const struct x86_emulate_ops emulate_ops = {
+ 	.vm_bugged           = emulator_vm_bugged,
+ 	.read_gpr            = emulator_read_gpr,
+@@ -8654,6 +8660,7 @@ static const struct x86_emulate_ops emulate_ops = {
+ 	.triple_fault        = emulator_triple_fault,
+ 	.set_xcr             = emulator_set_xcr,
+ 	.get_untagged_addr   = emulator_get_untagged_addr,
++	.is_canonical_addr   = emulator_is_canonical_addr,
+ };
+ 
+ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
+@@ -10959,7 +10966,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
+ 
+ 	if (unlikely(vcpu->arch.switch_db_regs)) {
+-		set_debugreg(0, 7);
++		set_debugreg(DR7_FIXED_1, 7);
+ 		set_debugreg(vcpu->arch.eff_db[0], 0);
+ 		set_debugreg(vcpu->arch.eff_db[1], 1);
+ 		set_debugreg(vcpu->arch.eff_db[2], 2);
+@@ -10968,7 +10975,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
+ 			kvm_x86_call(set_dr6)(vcpu, vcpu->arch.dr6);
+ 	} else if (unlikely(hw_breakpoint_active())) {
+-		set_debugreg(0, 7);
++		set_debugreg(DR7_FIXED_1, 7);
+ 	}
+ 
+ 	vcpu->arch.host_debugctl = get_debugctlmsr();
+@@ -12888,11 +12895,11 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+ 		mutex_unlock(&kvm->slots_lock);
+ 	}
+ 	kvm_unload_vcpu_mmus(kvm);
++	kvm_destroy_vcpus(kvm);
+ 	kvm_x86_call(vm_destroy)(kvm);
+ 	kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
+ 	kvm_pic_destroy(kvm);
+ 	kvm_ioapic_destroy(kvm);
+-	kvm_destroy_vcpus(kvm);
+ 	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
+ 	kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
+ 	kvm_mmu_uninit_vm(kvm);
+@@ -13756,7 +13763,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
+ 		 * invalidation.
+ 		 */
+ 		if ((!pcid_enabled && (operand.pcid != 0)) ||
+-		    is_noncanonical_address(operand.gla, vcpu)) {
++		    is_noncanonical_invlpg_address(operand.gla, vcpu)) {
+ 			kvm_inject_gp(vcpu, 0);
+ 			return 1;
+ 		}
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index a84c48ef527853..ec623d23d13d2e 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -8,6 +8,7 @@
+ #include <asm/pvclock.h>
+ #include "kvm_cache_regs.h"
+ #include "kvm_emulate.h"
++#include "cpuid.h"
+ 
+ struct kvm_caps {
+ 	/* control of guest tsc rate supported? */
+@@ -233,9 +234,52 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
+ 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
+ }
+ 
+-static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
++static inline u8 max_host_virt_addr_bits(void)
+ {
+-	return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
++	return kvm_cpu_cap_has(X86_FEATURE_LA57) ? 57 : 48;
++}
++
++/*
++ * x86 MSRs which contain linear addresses, x86 hidden segment bases, and
++ * IDT/GDT bases have static canonicality checks, the size of which depends
++ * only on the CPU's support for 5-level paging, rather than on the state of
++ * CR4.LA57.  This applies to both WRMSR and to other instructions that set
++ * their values, e.g. SGDT.
++ *
++ * KVM passes through most of these MSRS and also doesn't intercept the
++ * instructions that set the hidden segment bases.
++ *
++ * Because of this, to be consistent with hardware, even if the guest doesn't
++ * have LA57 enabled in its CPUID, perform canonicality checks based on *host*
++ * support for 5 level paging.
++ *
++ * Finally, instructions which are related to MMU invalidation of a given
++ * linear address, also have a similar static canonical check on address.
++ * This allows for example to invalidate 5-level addresses of a guest from a
++ * host which uses 4-level paging.
++ */
++static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu,
++					   unsigned int flags)
++{
++	if (flags & (X86EMUL_F_INVLPG | X86EMUL_F_MSR | X86EMUL_F_DT_LOAD))
++		return !__is_canonical_address(la, max_host_virt_addr_bits());
++	else
++		return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
++}
++
++static inline bool is_noncanonical_msr_address(u64 la, struct kvm_vcpu *vcpu)
++{
++	return is_noncanonical_address(la, vcpu, X86EMUL_F_MSR);
++}
++
++static inline bool is_noncanonical_base_address(u64 la, struct kvm_vcpu *vcpu)
++{
++	return is_noncanonical_address(la, vcpu, X86EMUL_F_DT_LOAD);
++}
++
++static inline bool is_noncanonical_invlpg_address(u64 la, struct kvm_vcpu *vcpu)
++{
++	return is_noncanonical_address(la, vcpu, X86EMUL_F_INVLPG);
+ }
+ 
+ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 5962ea1230a17e..de4e2f3db942a4 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1174,6 +1174,8 @@ struct regmap *__regmap_init(struct device *dev,
+ err_map:
+ 	kfree(map);
+ err:
++	if (bus && bus->free_on_exit)
++		kfree(bus);
+ 	return ERR_PTR(ret);
+ }
+ EXPORT_SYMBOL_GPL(__regmap_init);
+diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
+index 58d16ff166c2db..4575d9a4e5ed6f 100644
+--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
+@@ -942,6 +942,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ 	struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
+ 	struct dprc_endpoint endpoint1 = {{ 0 }};
+ 	struct dprc_endpoint endpoint2 = {{ 0 }};
++	struct fsl_mc_bus *mc_bus;
+ 	int state, err;
+ 
+ 	mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+@@ -965,6 +966,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ 	strcpy(endpoint_desc.type, endpoint2.type);
+ 	endpoint_desc.id = endpoint2.id;
+ 	endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
++	if (endpoint)
++		return endpoint;
+ 
+ 	/*
+ 	 * We know that the device has an endpoint because we verified by
+@@ -972,17 +975,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ 	 * yet discovered by the fsl-mc bus, thus the lookup returned NULL.
+ 	 * Force a rescan of the devices in this container and retry the lookup.
+ 	 */
+-	if (!endpoint) {
+-		struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+-
+-		if (mutex_trylock(&mc_bus->scan_mutex)) {
+-			err = dprc_scan_objects(mc_bus_dev, true);
+-			mutex_unlock(&mc_bus->scan_mutex);
+-		}
+-
+-		if (err < 0)
+-			return ERR_PTR(err);
++	mc_bus = to_fsl_mc_bus(mc_bus_dev);
++	if (mutex_trylock(&mc_bus->scan_mutex)) {
++		err = dprc_scan_objects(mc_bus_dev, true);
++		mutex_unlock(&mc_bus->scan_mutex);
+ 	}
++	if (err < 0)
++		return ERR_PTR(err);
+ 
+ 	endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ 	/*
+diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
+index 05ae9122823f80..e713ef611434da 100644
+--- a/drivers/comedi/drivers/comedi_test.c
++++ b/drivers/comedi/drivers/comedi_test.c
+@@ -790,7 +790,7 @@ static void waveform_detach(struct comedi_device *dev)
+ {
+ 	struct waveform_private *devpriv = dev->private;
+ 
+-	if (devpriv) {
++	if (devpriv && dev->n_subdevices) {
+ 		del_timer_sync(&devpriv->ai_timer);
+ 		del_timer_sync(&devpriv->ao_timer);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 24e41b42c638b0..8cf224fd4ff28a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4954,6 +4954,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
+ 		dev->dev->power.disable_depth--;
+ #endif
+ 	}
++
++	amdgpu_vram_mgr_clear_reset_blocks(adev);
+ 	adev->in_suspend = false;
+ 
+ 	if (adev->enable_mes)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index b7742fa74e1de2..3c883f1cf06878 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -153,6 +153,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
+ 				  uint64_t start, uint64_t size);
+ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
+ 				      uint64_t start);
++void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
+ 
+ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ 			    struct ttm_resource *res);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 8f58ec6f140093..732c79e201c6e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -782,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
+ 	return atomic64_read(&mgr->vis_usage);
+ }
+ 
++/**
++ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Reset the cleared drm buddy blocks.
++ */
++void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
++{
++	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
++	struct drm_buddy *mm = &mgr->mm;
++
++	mutex_lock(&mgr->lock);
++	drm_buddy_reset_clear(mm, false);
++	mutex_unlock(&mgr->lock);
++}
++
+ /**
+  * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
+  *
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 5500767cda7e4f..4d17d1e1c38b4b 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -1352,7 +1352,7 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
+ 			regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
+ 					   HPD_DISABLE, 0);
+ 		mutex_unlock(&pdata->comms_mutex);
+-	};
++	}
+ 
+ 	drm_bridge_add(&pdata->bridge);
+ 
+diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
+index ca42e6081d27c4..16dea3f2fb1188 100644
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -400,6 +400,49 @@ drm_get_buddy(struct drm_buddy_block *block)
+ }
+ EXPORT_SYMBOL(drm_get_buddy);
+ 
++/**
++ * drm_buddy_reset_clear - reset blocks clear state
++ *
++ * @mm: DRM buddy manager
++ * @is_clear: blocks clear state
++ *
++ * Reset the clear state based on @is_clear value for each block
++ * in the freelist.
++ */
++void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
++{
++	u64 root_size, size, start;
++	unsigned int order;
++	int i;
++
++	size = mm->size;
++	for (i = 0; i < mm->n_roots; ++i) {
++		order = ilog2(size) - ilog2(mm->chunk_size);
++		start = drm_buddy_block_offset(mm->roots[i]);
++		__force_merge(mm, start, start + size, order);
++
++		root_size = mm->chunk_size << order;
++		size -= root_size;
++	}
++
++	for (i = 0; i <= mm->max_order; ++i) {
++		struct drm_buddy_block *block;
++
++		list_for_each_entry_reverse(block, &mm->free_list[i], link) {
++			if (is_clear != drm_buddy_block_is_clear(block)) {
++				if (is_clear) {
++					mark_cleared(block);
++					mm->clear_avail += drm_buddy_block_size(mm, block);
++				} else {
++					clear_reset(block);
++					mm->clear_avail -= drm_buddy_block_size(mm, block);
++				}
++			}
++		}
++	}
++}
++EXPORT_SYMBOL(drm_buddy_reset_clear);
++
+ /**
+  * drm_buddy_free_block - free a block
+  *
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index ca9e0c730013d7..af80f1ac888064 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1506,6 +1506,12 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
+ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+ 			   u8 *link_bw, u8 *rate_select)
+ {
++	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
++
++	/* FIXME g4x can't generate an exact 2.7GHz with the 96MHz non-SSC refclk */
++	if (IS_G4X(i915) && port_clock == 268800)
++		port_clock = 270000;
++
+ 	/* eDP 1.4 rate select method. */
+ 	if (intel_dp->use_rate_select) {
+ 		*link_bw = 0;
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index c9c50e3b18a23e..3e75fc1f660722 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -368,17 +368,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
+ }
+ EXPORT_SYMBOL(drm_sched_entity_destroy);
+ 
+-/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
+-static void drm_sched_entity_clear_dep(struct dma_fence *f,
+-				       struct dma_fence_cb *cb)
+-{
+-	struct drm_sched_entity *entity =
+-		container_of(cb, struct drm_sched_entity, cb);
+-
+-	entity->dependency = NULL;
+-	dma_fence_put(f);
+-}
+-
+ /*
+  * drm_sched_entity_clear_dep - callback to clear the entities dependency and
+  * wake up scheduler
+@@ -389,7 +378,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
+ 	struct drm_sched_entity *entity =
+ 		container_of(cb, struct drm_sched_entity, cb);
+ 
+-	drm_sched_entity_clear_dep(f, cb);
++	entity->dependency = NULL;
++	dma_fence_put(f);
+ 	drm_sched_wakeup(entity->rq->sched);
+ }
+ 
+@@ -442,13 +432,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
+ 		fence = dma_fence_get(&s_fence->scheduled);
+ 		dma_fence_put(entity->dependency);
+ 		entity->dependency = fence;
+-		if (!dma_fence_add_callback(fence, &entity->cb,
+-					    drm_sched_entity_clear_dep))
+-			return true;
+-
+-		/* Ignore it when it is already scheduled */
+-		dma_fence_put(fence);
+-		return false;
+ 	}
+ 
+ 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
+index 61a7d20ce42bfe..bf3f97d0c9c709 100644
+--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
++++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
+@@ -43,14 +43,12 @@ static void read_l3cc_table(struct xe_gt *gt,
+ {
+ 	struct kunit *test = kunit_get_current_test();
+ 	u32 l3cc, l3cc_expected;
+-	unsigned int fw_ref, i;
++	unsigned int i;
+ 	u32 reg_val;
++	u32 ret;
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+-		xe_force_wake_put(gt_to_fw(gt), fw_ref);
+-		KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+-	}
++	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
+ 
+ 	for (i = 0; i < info->num_mocs_regs; i++) {
+ 		if (!(i & 1)) {
+@@ -74,7 +72,7 @@ static void read_l3cc_table(struct xe_gt *gt,
+ 		KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc,
+ 				    "l3cc idx=%u has incorrect val.\n", i);
+ 	}
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ }
+ 
+ static void read_mocs_table(struct xe_gt *gt,
+@@ -82,14 +80,15 @@ static void read_mocs_table(struct xe_gt *gt,
+ {
+ 	struct kunit *test = kunit_get_current_test();
+ 	u32 mocs, mocs_expected;
+-	unsigned int fw_ref, i;
++	unsigned int i;
+ 	u32 reg_val;
++	u32 ret;
+ 
+ 	KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index,
+ 			      "Unused entries index should have been defined\n");
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
++	ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
+ 
+ 	for (i = 0; i < info->num_mocs_regs; i++) {
+ 		if (regs_are_mcr(gt))
+@@ -107,7 +106,7 @@ static void read_mocs_table(struct xe_gt *gt,
+ 				    "mocs reg 0x%x has incorrect val.\n", i);
+ 	}
+ 
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
+ 
+ static int mocs_kernel_test_run_device(struct xe_device *xe)
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
+index 8050938389b68f..e412a70323ccbd 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.c
++++ b/drivers/gpu/drm/xe/xe_devcoredump.c
+@@ -197,7 +197,6 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+ 	struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+ 	struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
+ 	struct xe_device *xe = coredump_to_xe(coredump);
+-	unsigned int fw_ref;
+ 
+ 	/*
+ 	 * NB: Despite passing a GFP_ flags parameter here, more allocations are done
+@@ -211,12 +210,11 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+ 	xe_pm_runtime_get(xe);
+ 
+ 	/* keep going if fw fails as we still want to save the memory and SW data */
+-	fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+-	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
++	if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
+ 		xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ 	xe_vm_snapshot_capture_delayed(ss->vm);
+ 	xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
+-	xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+ 
+ 	xe_pm_runtime_put(xe);
+ 
+@@ -243,9 +241,8 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ 	u32 width_mask = (0x1 << q->width) - 1;
+ 	const char *process_name = "no process";
+ 
+-	unsigned int fw_ref;
+-	bool cookie;
+ 	int i;
++	bool cookie;
+ 
+ 	ss->snapshot_time = ktime_get_real();
+ 	ss->boot_time = ktime_get_boottime();
+@@ -268,7 +265,8 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ 	}
+ 
+ 	/* keep going if fw fails as we still want to save the memory and SW data */
+-	fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
++	if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL))
++		xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ 
+ 	ss->ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
+ 	ss->ge = xe_guc_exec_queue_snapshot_capture(q);
+@@ -286,7 +284,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ 
+ 	queue_work(system_unbound_wq, &ss->work);
+ 
+-	xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ 	dma_fence_end_signalling(cookie);
+ }
+ 
+diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h
+index 1608a55edc846e..a2577672f4e3e6 100644
+--- a/drivers/gpu/drm/xe/xe_force_wake.h
++++ b/drivers/gpu/drm/xe/xe_force_wake.h
+@@ -46,20 +46,4 @@ xe_force_wake_assert_held(struct xe_force_wake *fw,
+ 	xe_gt_assert(fw->gt, fw->awake_domains & domain);
+ }
+ 
+-/**
+- * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref
+- * @fw_ref : the force_wake reference
+- * @domain : forcewake domain to verify
+- *
+- * This function confirms whether the @fw_ref includes a reference to the
+- * specified @domain.
+- *
+- * Return: true if domain is refcounted.
+- */
+-static inline bool
+-xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain)
+-{
+-	return fw_ref & domain;
+-}
+-
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index 30ec13cb5b6d8e..3b53d46aad54a7 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -98,14 +98,14 @@ void xe_gt_sanitize(struct xe_gt *gt)
+ 
+ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+ 	u32 reg;
++	int err;
+ 
+ 	if (!XE_WA(gt, 16023588340))
+ 		return;
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	if (!fw_ref)
++	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	if (WARN_ON(err))
+ 		return;
+ 
+ 	if (!xe_gt_is_media_type(gt)) {
+@@ -115,13 +115,13 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
+ 	}
+ 
+ 	xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
+ 
+ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+ 	u32 reg;
++	int err;
+ 
+ 	if (!XE_WA(gt, 16023588340))
+ 		return;
+@@ -129,15 +129,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
+ 	if (xe_gt_is_media_type(gt))
+ 		return;
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	if (!fw_ref)
++	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	if (WARN_ON(err))
+ 		return;
+ 
+ 	reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
+ 	reg &= ~CG_DIS_CNTLBUS;
+ 	xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
+ 
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
+ 
+ /**
+@@ -407,14 +407,11 @@ static void dump_pat_on_error(struct xe_gt *gt)
+ 
+ static int gt_fw_domain_init(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+ 	int err, i;
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	if (!fw_ref) {
+-		err = -ETIMEDOUT;
++	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	if (err)
+ 		goto err_hw_fence_irq;
+-	}
+ 
+ 	if (!xe_gt_is_media_type(gt)) {
+ 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
+@@ -449,12 +446,14 @@ static int gt_fw_domain_init(struct xe_gt *gt)
+ 	 */
+ 	gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
+ 
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++	XE_WARN_ON(err);
++
+ 	return 0;
+ 
+ err_force_wake:
+ 	dump_pat_on_error(gt);
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ err_hw_fence_irq:
+ 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+@@ -464,14 +463,11 @@ static int gt_fw_domain_init(struct xe_gt *gt)
+ 
+ static int all_fw_domain_init(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+ 	int err, i;
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+-		err = -ETIMEDOUT;
+-		goto err_force_wake;
+-	}
++	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (err)
++		goto err_hw_fence_irq;
+ 
+ 	xe_gt_mcr_set_implicit_defaults(gt);
+ 	xe_wa_process_gt(gt);
+@@ -537,12 +533,14 @@ static int all_fw_domain_init(struct xe_gt *gt)
+ 	if (IS_SRIOV_PF(gt_to_xe(gt)))
+ 		xe_gt_sriov_pf_init_hw(gt);
+ 
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	XE_WARN_ON(err);
+ 
+ 	return 0;
+ 
+ err_force_wake:
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++err_hw_fence_irq:
+ 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+ 
+@@ -555,12 +553,11 @@ static int all_fw_domain_init(struct xe_gt *gt)
+  */
+ int xe_gt_init_hwconfig(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+ 	int err;
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	if (!fw_ref)
+-		return -ETIMEDOUT;
++	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	if (err)
++		goto out;
+ 
+ 	xe_gt_mcr_init_early(gt);
+ 	xe_pat_init(gt);
+@@ -578,7 +575,8 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
+ 	xe_gt_enable_host_l2_vram(gt);
+ 
+ out_fw:
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++out:
+ 	return err;
+ }
+ 
+@@ -746,7 +744,6 @@ static int do_gt_restart(struct xe_gt *gt)
+ 
+ static int gt_reset(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+ 	int err;
+ 
+ 	if (xe_device_wedged(gt_to_xe(gt)))
+@@ -767,11 +764,9 @@ static int gt_reset(struct xe_gt *gt)
+ 
+ 	xe_gt_sanitize(gt);
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+-		err = -ETIMEDOUT;
+-		goto err_out;
+-	}
++	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (err)
++		goto err_msg;
+ 
+ 	if (IS_SRIOV_PF(gt_to_xe(gt)))
+ 		xe_gt_sriov_pf_stop_prepare(gt);
+@@ -792,7 +787,8 @@ static int gt_reset(struct xe_gt *gt)
+ 	if (err)
+ 		goto err_out;
+ 
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	XE_WARN_ON(err);
+ 	xe_pm_runtime_put(gt_to_xe(gt));
+ 
+ 	xe_gt_info(gt, "reset done\n");
+@@ -800,7 +796,8 @@ static int gt_reset(struct xe_gt *gt)
+ 	return 0;
+ 
+ err_out:
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
++err_msg:
+ 	XE_WARN_ON(xe_uc_start(&gt->uc));
+ err_fail:
+ 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
+@@ -832,25 +829,22 @@ void xe_gt_reset_async(struct xe_gt *gt)
+ 
+ void xe_gt_suspend_prepare(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+-
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ 
+ 	xe_uc_suspend_prepare(&gt->uc);
+ 
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ }
+ 
+ int xe_gt_suspend(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+ 	int err;
+ 
+ 	xe_gt_dbg(gt, "suspending\n");
+ 	xe_gt_sanitize(gt);
+ 
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
++	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (err)
+ 		goto err_msg;
+ 
+ 	err = xe_uc_suspend(&gt->uc);
+@@ -861,15 +855,14 @@ int xe_gt_suspend(struct xe_gt *gt)
+ 
+ 	xe_gt_disable_host_l2_vram(gt);
+ 
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ 	xe_gt_dbg(gt, "suspended\n");
+ 
+ 	return 0;
+ 
+-err_msg:
+-	err = -ETIMEDOUT;
+ err_force_wake:
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
++err_msg:
+ 	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
+ 
+ 	return err;
+@@ -877,11 +870,9 @@ int xe_gt_suspend(struct xe_gt *gt)
+ 
+ void xe_gt_shutdown(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+-
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ 	do_gt_reset(gt);
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ }
+ 
+ /**
+@@ -906,12 +897,11 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
+ 
+ int xe_gt_resume(struct xe_gt *gt)
+ {
+-	unsigned int fw_ref;
+ 	int err;
+ 
+ 	xe_gt_dbg(gt, "resuming\n");
+-	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
++	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (err)
+ 		goto err_msg;
+ 
+ 	err = do_gt_restart(gt);
+@@ -920,15 +910,14 @@ int xe_gt_resume(struct xe_gt *gt)
+ 
+ 	xe_gt_idle_enable_pg(gt);
+ 
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ 	xe_gt_dbg(gt, "resumed\n");
+ 
+ 	return 0;
+ 
+-err_msg:
+-	err = -ETIMEDOUT;
+ err_force_wake:
+-	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
++err_msg:
+ 	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
+ 
+ 	return err;
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 1f519e925f0601..616e63fb2f1510 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1810,7 +1810,6 @@ static struct bin_attribute chan_attr_ring_buffer = {
+ 		.name = "ring",
+ 		.mode = 0600,
+ 	},
+-	.size = 2 * SZ_2M,
+ 	.mmap = hv_mmap_ring_buffer_wrapper,
+ };
+ static struct attribute *vmbus_chan_attrs[] = {
+@@ -1866,6 +1865,7 @@ static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj,
+ 	/* Hide ring attribute if channel's ring_sysfs_visible is set to false */
+ 	if (attr ==  &chan_attr_ring_buffer && !channel->ring_sysfs_visible)
+ 		return 0;
++	attr->size = channel->ringbuffer_pagecount << PAGE_SHIFT;
+ 
+ 	return attr->attr.mode;
+ }
+diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
+index eb97abcb4cd330..592b754f48e923 100644
+--- a/drivers/i2c/busses/i2c-qup.c
++++ b/drivers/i2c/busses/i2c-qup.c
+@@ -452,8 +452,10 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
+ 		if (!(status & I2C_STATUS_BUS_ACTIVE))
+ 			break;
+ 
+-		if (time_after(jiffies, timeout))
++		if (time_after(jiffies, timeout)) {
+ 			ret = -ETIMEDOUT;
++			break;
++		}
+ 
+ 		usleep_range(len, len * 2);
+ 	}
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 89ce8a62b37c62..fbab82d457fb0e 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -607,7 +607,6 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
+ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+ {
+ 	u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode;
+-	acpi_handle handle = ACPI_HANDLE(i2c_dev->dev);
+ 	struct i2c_timings *t = &i2c_dev->timings;
+ 	int err;
+ 
+@@ -619,11 +618,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+ 	 * emit a noisy warning on error, which won't stay unnoticed and
+ 	 * won't hose machine entirely.
+ 	 */
+-	if (handle)
+-		err = acpi_evaluate_object(handle, "_RST", NULL, NULL);
+-	else
+-		err = reset_control_reset(i2c_dev->rst);
+-
++	err = device_reset(i2c_dev->dev);
+ 	WARN_ON_ONCE(err);
+ 
+ 	if (IS_DVC(i2c_dev))
+@@ -1666,19 +1661,6 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
+ 		i2c_dev->is_vi = true;
+ }
+ 
+-static int tegra_i2c_init_reset(struct tegra_i2c_dev *i2c_dev)
+-{
+-	if (ACPI_HANDLE(i2c_dev->dev))
+-		return 0;
+-
+-	i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c");
+-	if (IS_ERR(i2c_dev->rst))
+-		return dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst),
+-				      "failed to get reset control\n");
+-
+-	return 0;
+-}
+-
+ static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev)
+ {
+ 	int err;
+@@ -1788,10 +1770,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
+ 
+ 	tegra_i2c_parse_dt(i2c_dev);
+ 
+-	err = tegra_i2c_init_reset(i2c_dev);
+-	if (err)
+-		return err;
+-
+ 	err = tegra_i2c_init_clocks(i2c_dev);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
+index 2a351f961b8993..c8c40ff9765da3 100644
+--- a/drivers/i2c/busses/i2c-virtio.c
++++ b/drivers/i2c/busses/i2c-virtio.c
+@@ -116,15 +116,16 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
+ 	for (i = 0; i < num; i++) {
+ 		struct virtio_i2c_req *req = &reqs[i];
+ 
+-		wait_for_completion(&req->completion);
+-
+-		if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK)
+-			failed = true;
++		if (!failed) {
++			if (wait_for_completion_interruptible(&req->completion))
++				failed = true;
++			else if (req->in_hdr.status != VIRTIO_I2C_MSG_OK)
++				failed = true;
++			else
++				j++;
++		}
+ 
+ 		i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
+-
+-		if (!failed)
+-			j++;
+ 	}
+ 
+ 	return j;
+diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
+index edd0c3a35ab73c..202561cad4012b 100644
+--- a/drivers/iio/adc/ad7949.c
++++ b/drivers/iio/adc/ad7949.c
+@@ -308,7 +308,6 @@ static void ad7949_disable_reg(void *reg)
+ 
+ static int ad7949_spi_probe(struct spi_device *spi)
+ {
+-	u32 spi_ctrl_mask = spi->controller->bits_per_word_mask;
+ 	struct device *dev = &spi->dev;
+ 	const struct ad7949_adc_spec *spec;
+ 	struct ad7949_adc_chip *ad7949_adc;
+@@ -337,11 +336,11 @@ static int ad7949_spi_probe(struct spi_device *spi)
+ 	ad7949_adc->resolution = spec->resolution;
+ 
+ 	/* Set SPI bits per word */
+-	if (spi_ctrl_mask & SPI_BPW_MASK(ad7949_adc->resolution)) {
++	if (spi_is_bpw_supported(spi, ad7949_adc->resolution)) {
+ 		spi->bits_per_word = ad7949_adc->resolution;
+-	} else if (spi_ctrl_mask == SPI_BPW_MASK(16)) {
++	} else if (spi_is_bpw_supported(spi, 16)) {
+ 		spi->bits_per_word = 16;
+-	} else if (spi_ctrl_mask == SPI_BPW_MASK(8)) {
++	} else if (spi_is_bpw_supported(spi, 8)) {
+ 		spi->bits_per_word = 8;
+ 	} else {
+ 		dev_err(dev, "unable to find common BPW with spi controller\n");
+diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
+index 26c481d2998c1f..25901d91a613f4 100644
+--- a/drivers/iio/light/hid-sensor-prox.c
++++ b/drivers/iio/light/hid-sensor-prox.c
+@@ -102,8 +102,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
+ 		ret_type = prox_state->scale_precision;
+ 		break;
+ 	case IIO_CHAN_INFO_OFFSET:
+-		*val = hid_sensor_convert_exponent(
+-				prox_state->prox_attr.unit_expo);
++		*val = 0;
+ 		ret_type = IIO_VAL_INT;
+ 		break;
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+@@ -227,6 +226,11 @@ static int prox_parse_report(struct platform_device *pdev,
+ 	dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr.index,
+ 			st->prox_attr.report_id);
+ 
++	st->scale_precision = hid_sensor_format_scale(hsdev->usage,
++						      &st->prox_attr,
++						      &st->scale_pre_decml,
++						      &st->scale_post_decml);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index b7c078b7f7cfd4..a1291f475466df 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -582,8 +582,8 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
+ out_unlock:
+ 	mutex_unlock(&table->lock);
+ 	if (ret)
+-		pr_warn("%s: unable to add gid %pI6 error=%d\n",
+-			__func__, gid->raw, ret);
++		pr_warn_ratelimited("%s: unable to add gid %pI6 error=%d\n",
++				    __func__, gid->raw, ret);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
+index 9514f577995faf..cd14017e7df62b 100644
+--- a/drivers/input/keyboard/gpio_keys.c
++++ b/drivers/input/keyboard/gpio_keys.c
+@@ -488,7 +488,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
+ 	if (bdata->release_delay)
+ 		hrtimer_start(&bdata->release_timer,
+ 			      ms_to_ktime(bdata->release_delay),
+-			      HRTIMER_MODE_REL_HARD);
++			      HRTIMER_MODE_REL);
+ out:
+ 	return IRQ_HANDLED;
+ }
+@@ -633,7 +633,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
+ 
+ 		bdata->release_delay = button->debounce_interval;
+ 		hrtimer_init(&bdata->release_timer,
+-			     CLOCK_REALTIME, HRTIMER_MODE_REL_HARD);
++			     CLOCK_REALTIME, HRTIMER_MODE_REL);
+ 		bdata->release_timer.function = gpio_keys_irq_timer;
+ 
+ 		isr = gpio_keys_irq_isr;
+diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
+index 167971f8e8bec3..fdb02a87e3124e 100644
+--- a/drivers/interconnect/qcom/sc7280.c
++++ b/drivers/interconnect/qcom/sc7280.c
+@@ -238,6 +238,7 @@ static struct qcom_icc_node xm_pcie3_1 = {
+ 	.id = SC7280_MASTER_PCIE_1,
+ 	.channels = 1,
+ 	.buswidth = 8,
++	.num_links = 1,
+ 	.links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
+ };
+ 
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index beafca6ba0df4d..275d34119acdcb 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -2858,7 +2858,12 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
+ 	const struct nand_op_instr *instr = NULL;
+ 	unsigned int op_id = 0;
+ 	unsigned int len = 0;
+-	int ret;
++	int ret, reg_base;
++
++	reg_base = NAND_READ_LOCATION_0;
++
++	if (nandc->props->qpic_v2)
++		reg_base = NAND_READ_LOCATION_LAST_CW_0;
+ 
+ 	ret = qcom_parse_instructions(chip, subop, &q_op);
+ 	if (ret)
+@@ -2910,7 +2915,10 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
+ 	op_id = q_op.data_instr_idx;
+ 	len = nand_subop_get_data_len(subop, op_id);
+ 
+-	nandc_set_read_loc(chip, 0, 0, 0, len, 1);
++	if (nandc->props->qpic_v2)
++		nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
++	else
++		nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
+ 
+ 	if (!nandc->props->qpic_v2) {
+ 		write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+index 681643ab37804e..63e4a495b1371d 100644
+--- a/drivers/net/can/dev/dev.c
++++ b/drivers/net/can/dev/dev.c
+@@ -147,13 +147,16 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
+ EXPORT_SYMBOL_GPL(can_change_state);
+ 
+ /* CAN device restart for bus-off recovery */
+-static void can_restart(struct net_device *dev)
++static int can_restart(struct net_device *dev)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+ 	struct sk_buff *skb;
+ 	struct can_frame *cf;
+ 	int err;
+ 
++	if (!priv->do_set_mode)
++		return -EOPNOTSUPP;
++
+ 	if (netif_carrier_ok(dev))
+ 		netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
+ 
+@@ -175,10 +178,14 @@ static void can_restart(struct net_device *dev)
+ 	if (err) {
+ 		netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err));
+ 		netif_carrier_off(dev);
++
++		return err;
+ 	} else {
+ 		netdev_dbg(dev, "Restarted\n");
+ 		priv->can_stats.restarts++;
+ 	}
++
++	return 0;
+ }
+ 
+ static void can_restart_work(struct work_struct *work)
+@@ -203,9 +210,8 @@ int can_restart_now(struct net_device *dev)
+ 		return -EBUSY;
+ 
+ 	cancel_delayed_work_sync(&priv->restart_work);
+-	can_restart(dev);
+ 
+-	return 0;
++	return can_restart(dev);
+ }
+ 
+ /* CAN bus-off
+diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
+index 01aacdcda26066..abe8dc051d94f1 100644
+--- a/drivers/net/can/dev/netlink.c
++++ b/drivers/net/can/dev/netlink.c
+@@ -285,6 +285,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ 	}
+ 
+ 	if (data[IFLA_CAN_RESTART_MS]) {
++		if (!priv->do_set_mode) {
++			NL_SET_ERR_MSG(extack,
++				       "Device doesn't support restart from Bus Off");
++			return -EOPNOTSUPP;
++		}
++
+ 		/* Do not allow changing restart delay while running */
+ 		if (dev->flags & IFF_UP)
+ 			return -EBUSY;
+@@ -292,6 +298,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ 	}
+ 
+ 	if (data[IFLA_CAN_RESTART]) {
++		if (!priv->do_set_mode) {
++			NL_SET_ERR_MSG(extack,
++				       "Device doesn't support restart from Bus Off");
++			return -EOPNOTSUPP;
++		}
++
+ 		/* Do not allow a restart while not running */
+ 		if (!(dev->flags & IFF_UP))
+ 			return -EINVAL;
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index efd0048acd3b2d..c744e10e640339 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -4655,12 +4655,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
+ 		return PTR_ERR(dpmac_dev);
+ 	}
+ 
+-	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
++	if (IS_ERR(dpmac_dev))
+ 		return 0;
+ 
++	if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
++		err = 0;
++		goto out_put_device;
++	}
++
+ 	mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
+-	if (!mac)
+-		return -ENOMEM;
++	if (!mac) {
++		err = -ENOMEM;
++		goto out_put_device;
++	}
+ 
+ 	mac->mc_dev = dpmac_dev;
+ 	mac->mc_io = priv->mc_io;
+@@ -4694,6 +4701,8 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
+ 	dpaa2_mac_close(mac);
+ err_free_mac:
+ 	kfree(mac);
++out_put_device:
++	put_device(&dpmac_dev->dev);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index a293b08f36d46d..cbd3859ea475bf 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1447,12 +1447,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+ 	if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ 		return PTR_ERR(dpmac_dev);
+ 
+-	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
++	if (IS_ERR(dpmac_dev))
+ 		return 0;
+ 
++	if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
++		err = 0;
++		goto out_put_device;
++	}
++
+ 	mac = kzalloc(sizeof(*mac), GFP_KERNEL);
+-	if (!mac)
+-		return -ENOMEM;
++	if (!mac) {
++		err = -ENOMEM;
++		goto out_put_device;
++	}
+ 
+ 	mac->mc_dev = dpmac_dev;
+ 	mac->mc_io = port_priv->ethsw_data->mc_io;
+@@ -1482,6 +1489,8 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+ 	dpaa2_mac_close(mac);
+ err_free_mac:
+ 	kfree(mac);
++out_put_device:
++	put_device(&dpmac_dev->dev);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 14f39d1f59d361..8ea3c7493663fc 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -1972,49 +1972,56 @@ static void gve_turnup_and_check_status(struct gve_priv *priv)
+ 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
+ }
+ 
+-static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
++static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
++							unsigned int txqueue)
+ {
+-	struct gve_notify_block *block;
+-	struct gve_tx_ring *tx = NULL;
+-	struct gve_priv *priv;
+-	u32 last_nic_done;
+-	u32 current_time;
+ 	u32 ntfy_idx;
+ 
+-	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+-	priv = netdev_priv(dev);
+ 	if (txqueue > priv->tx_cfg.num_queues)
+-		goto reset;
++		return NULL;
+ 
+ 	ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
+ 	if (ntfy_idx >= priv->num_ntfy_blks)
+-		goto reset;
++		return NULL;
++
++	return &priv->ntfy_blocks[ntfy_idx];
++}
++
++static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
++				      unsigned int txqueue)
++{
++	struct gve_notify_block *block;
++	u32 current_time;
+ 
+-	block = &priv->ntfy_blocks[ntfy_idx];
+-	tx = block->tx;
++	block = gve_get_tx_notify_block(priv, txqueue);
++
++	if (!block)
++		return false;
+ 
+ 	current_time = jiffies_to_msecs(jiffies);
+-	if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+-		goto reset;
++	if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
++		return false;
+ 
+-	/* Check to see if there are missed completions, which will allow us to
+-	 * kick the queue.
+-	 */
+-	last_nic_done = gve_tx_load_event_counter(priv, tx);
+-	if (last_nic_done - tx->done) {
+-		netdev_info(dev, "Kicking queue %d", txqueue);
+-		iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
+-		napi_schedule(&block->napi);
+-		tx->last_kick_msec = current_time;
+-		goto out;
+-	} // Else reset.
++	netdev_info(priv->dev, "Kicking queue %d", txqueue);
++	napi_schedule(&block->napi);
++	block->tx->last_kick_msec = current_time;
++	return true;
++}
+ 
+-reset:
+-	gve_schedule_reset(priv);
++static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
++{
++	struct gve_notify_block *block;
++	struct gve_priv *priv;
+ 
+-out:
+-	if (tx)
+-		tx->queue_timeout++;
++	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
++	priv = netdev_priv(dev);
++
++	if (!gve_tx_timeout_try_q_kick(priv, txqueue))
++		gve_schedule_reset(priv);
++
++	block = gve_get_tx_notify_block(priv, txqueue);
++	if (block)
++		block->tx->queue_timeout++;
+ 	priv->tx_timeo_cnt++;
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 24062a40a7793f..94432e237640d9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -11,6 +11,7 @@
+ #include <linux/irq.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
++#include <linux/iommu.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/skbuff.h>
+@@ -1039,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
+ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
+ {
+ 	u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
++	struct net_device *netdev = ring_to_netdev(ring);
++	struct hns3_nic_priv *priv = netdev_priv(netdev);
+ 	struct hns3_tx_spare *tx_spare;
+ 	struct page *page;
+ 	dma_addr_t dma;
+@@ -1080,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
+ 	tx_spare->buf = page_address(page);
+ 	tx_spare->len = PAGE_SIZE << order;
+ 	ring->tx_spare = tx_spare;
++	ring->tx_copybreak = priv->tx_copybreak;
+ 	return;
+ 
+ dma_mapping_error:
+@@ -4874,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
+ 	devm_kfree(&pdev->dev, priv->tqp_vector);
+ }
+ 
++static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
++{
++#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
++#define HNS3_MAX_PACKET_SIZE (64 * 1024)
++
++	struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
++	struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
++	struct hnae3_handle *handle = priv->ae_handle;
++
++	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
++		return;
++
++	if (!(domain && iommu_is_dma_domain(domain)))
++		return;
++
++	priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
++	priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
++
++	if (priv->tx_copybreak < priv->min_tx_copybreak)
++		priv->tx_copybreak = priv->min_tx_copybreak;
++	if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
++		handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
++}
++
+ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
+ 			      unsigned int ring_type)
+ {
+@@ -5107,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
+ 	int i, j;
+ 	int ret;
+ 
++	hns3_update_tx_spare_buf_config(priv);
+ 	for (i = 0; i < ring_num; i++) {
+ 		ret = hns3_alloc_ring_memory(&priv->ring[i]);
+ 		if (ret) {
+@@ -5311,6 +5340,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
+ 	priv->ae_handle = handle;
+ 	priv->tx_timeout_count = 0;
+ 	priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
++	priv->min_tx_copybreak = 0;
++	priv->min_tx_spare_buf_size = 0;
+ 	set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ 
+ 	handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+index d36c4ed16d8dd2..caf7a4df858527 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -596,6 +596,8 @@ struct hns3_nic_priv {
+ 	struct hns3_enet_coalesce rx_coal;
+ 	u32 tx_copybreak;
+ 	u32 rx_copybreak;
++	u32 min_tx_copybreak;
++	u32 min_tx_spare_buf_size;
+ };
+ 
+ union l3_hdr_info {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 06eedf80cfac4f..407ad0b985b4f2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -9586,33 +9586,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
+ 	return false;
+ }
+ 
+-int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
++static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport,
++					    bool request_en)
+ {
+-	struct hclge_dev *hdev = vport->back;
+ 	bool need_en;
+ 	int ret;
+ 
+-	mutex_lock(&hdev->vport_lock);
+-
+-	vport->req_vlan_fltr_en = request_en;
+-
+ 	need_en = hclge_need_enable_vport_vlan_filter(vport);
+-	if (need_en == vport->cur_vlan_fltr_en) {
+-		mutex_unlock(&hdev->vport_lock);
++	if (need_en == vport->cur_vlan_fltr_en)
+ 		return 0;
+-	}
+ 
+ 	ret = hclge_set_vport_vlan_filter(vport, need_en);
+-	if (ret) {
+-		mutex_unlock(&hdev->vport_lock);
++	if (ret)
+ 		return ret;
+-	}
+ 
+ 	vport->cur_vlan_fltr_en = need_en;
+ 
++	return 0;
++}
++
++int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
++{
++	struct hclge_dev *hdev = vport->back;
++	int ret;
++
++	mutex_lock(&hdev->vport_lock);
++	vport->req_vlan_fltr_en = request_en;
++	ret = __hclge_enable_vport_vlan_filter(vport, request_en);
+ 	mutex_unlock(&hdev->vport_lock);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+@@ -10633,16 +10636,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
+ 					&vport->state))
+ 			continue;
+ 
+-		ret = hclge_enable_vport_vlan_filter(vport,
+-						     vport->req_vlan_fltr_en);
++		mutex_lock(&hdev->vport_lock);
++		ret = __hclge_enable_vport_vlan_filter(vport,
++						       vport->req_vlan_fltr_en);
+ 		if (ret) {
+ 			dev_err(&hdev->pdev->dev,
+ 				"failed to sync vlan filter state for vport%u, ret = %d\n",
+ 				vport->vport_id, ret);
+ 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ 				&vport->state);
++			mutex_unlock(&hdev->vport_lock);
+ 			return;
+ 		}
++		mutex_unlock(&hdev->vport_lock);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index 0ffda5146bae58..16ef5a584af36b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -496,14 +496,14 @@ int hclge_ptp_init(struct hclge_dev *hdev)
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"failed to init freq, ret = %d\n", ret);
+-		goto out;
++		goto out_clear_int;
+ 	}
+ 
+ 	ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"failed to init ts mode, ret = %d\n", ret);
+-		goto out;
++		goto out_clear_int;
+ 	}
+ 
+ 	ktime_get_real_ts64(&ts);
+@@ -511,7 +511,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"failed to init ts time, ret = %d\n", ret);
+-		goto out;
++		goto out_clear_int;
+ 	}
+ 
+ 	set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+@@ -519,6 +519,9 @@ int hclge_ptp_init(struct hclge_dev *hdev)
+ 
+ 	return 0;
+ 
++out_clear_int:
++	clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
++	hclge_ptp_int_en(hdev, false);
+ out:
+ 	hclge_ptp_destroy_clock(hdev);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 8f5a85b97ac0c5..e8573358309ca3 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -3096,11 +3096,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 
+ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+ {
+-	struct hnae3_handle *nic = &hdev->nic;
+-	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+-
+-	return min_t(u32, hdev->rss_size_max,
+-		     hdev->num_tqps / kinfo->tc_info.num_tc);
++	return min(hdev->rss_size_max, hdev->num_tqps);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
+index 8294a7c4f122c3..ba331899d1861b 100644
+--- a/drivers/net/ethernet/intel/e1000e/defines.h
++++ b/drivers/net/ethernet/intel/e1000e/defines.h
+@@ -638,6 +638,9 @@
+ /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+ #define NVM_SUM                    0xBABA
+ 
++/* Uninitialized ("empty") checksum word value */
++#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF
++
+ /* PBA (printed board assembly) number words */
+ #define NVM_PBA_OFFSET_0           8
+ #define NVM_PBA_OFFSET_1           9
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index 364378133526a1..df4e7d781cb1cb 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -4274,6 +4274,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+ 			ret_val = e1000e_update_nvm_checksum(hw);
+ 			if (ret_val)
+ 				return ret_val;
++		} else if (hw->mac.type == e1000_pch_tgp) {
++			return 0;
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
+index e609f4df86f455..16369e6d245a4a 100644
+--- a/drivers/net/ethernet/intel/e1000e/nvm.c
++++ b/drivers/net/ethernet/intel/e1000e/nvm.c
+@@ -558,6 +558,12 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
+ 		checksum += nvm_data;
+ 	}
+ 
++	if (hw->mac.type == e1000_pch_tgp &&
++	    nvm_data == NVM_CHECKSUM_UNINITIALIZED) {
++		e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n");
++		return 0;
++	}
++
+ 	if (checksum != (u16)NVM_SUM) {
+ 		e_dbg("NVM Checksum Invalid\n");
+ 		return -E1000_ERR_NVM;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 625fa93fc18bb1..97f32a0c68d09e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -3137,10 +3137,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 		const u8 *addr = al->list[i].addr;
+ 
+ 		/* Allow to delete VF primary MAC only if it was not set
+-		 * administratively by PF or if VF is trusted.
++		 * administratively by PF.
+ 		 */
+ 		if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+-			if (i40e_can_vf_change_mac(vf))
++			if (!vf->pf_set_mac)
+ 				was_unimac_deleted = true;
+ 			else
+ 				continue;
+@@ -5006,7 +5006,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ 	vf_stats->broadcast  = stats->rx_broadcast;
+ 	vf_stats->multicast  = stats->rx_multicast;
+ 	vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
+-	vf_stats->tx_dropped = stats->tx_discards;
++	vf_stats->tx_dropped = stats->tx_errors;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
+index 272fd823a825d0..e4c8cd12a41d13 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
+@@ -2277,6 +2277,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
+ 		return ICE_DDP_PKG_ERR;
+ 
+ 	buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
++	if (!buf_copy)
++		return ICE_DDP_PKG_ERR;
+ 
+ 	state = ice_init_pkg(hw, buf_copy, len);
+ 	if (!ice_is_init_pkg_successful(state)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index e733b81e18a21a..5bb4940da59d48 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1925,8 +1925,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ 
+ 	err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
+ 			      pages_queue, token, force_polling);
+-	if (callback)
+-		return err;
++	if (callback && !err)
++		return 0;
+ 
+ 	if (err > 0) /* Failed in FW, command didn't execute */
+ 		err = deliv_status_to_err(err);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 988df7047b01d7..558962423521c5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -1181,19 +1181,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
+ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ 				       struct mlx5_core_dev *peer_dev)
+ {
++	struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
+ 	struct mlx5_flow_destination dest = {};
+ 	struct mlx5_flow_act flow_act = {0};
+ 	struct mlx5_flow_handle **flows;
+-	/* total vports is the same for both e-switches */
+-	int nvports = esw->total_vports;
+ 	struct mlx5_flow_handle *flow;
++	struct mlx5_vport *peer_vport;
+ 	struct mlx5_flow_spec *spec;
+-	struct mlx5_vport *vport;
+ 	int err, pfindex;
+ 	unsigned long i;
+ 	void *misc;
+ 
+-	if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
++	if (!MLX5_VPORT_MANAGER(peer_dev) &&
++	    !mlx5_core_is_ecpf_esw_manager(peer_dev))
+ 		return 0;
+ 
+ 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+@@ -1202,7 +1202,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ 
+ 	peer_miss_rules_setup(esw, peer_dev, spec, &dest);
+ 
+-	flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
++	flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL);
+ 	if (!flows) {
+ 		err = -ENOMEM;
+ 		goto alloc_flows_err;
+@@ -1212,10 +1212,10 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ 			    misc_parameters);
+ 
+-	if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+-		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+-		esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
+-						   spec, MLX5_VPORT_PF);
++	if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
++		peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
++		esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
++						   MLX5_VPORT_PF);
+ 
+ 		flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
+ 					   spec, &flow_act, &dest, 1);
+@@ -1223,11 +1223,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ 			err = PTR_ERR(flow);
+ 			goto add_pf_flow_err;
+ 		}
+-		flows[vport->index] = flow;
++		flows[peer_vport->index] = flow;
+ 	}
+ 
+-	if (mlx5_ecpf_vport_exists(esw->dev)) {
+-		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
++	if (mlx5_ecpf_vport_exists(peer_dev)) {
++		peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ 		MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
+ 		flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
+ 					   spec, &flow_act, &dest, 1);
+@@ -1235,13 +1235,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ 			err = PTR_ERR(flow);
+ 			goto add_ecpf_flow_err;
+ 		}
+-		flows[vport->index] = flow;
++		flows[peer_vport->index] = flow;
+ 	}
+ 
+-	mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
++	mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
++				   mlx5_core_max_vfs(peer_dev)) {
+ 		esw_set_peer_miss_rule_source_port(esw,
+-						   peer_dev->priv.eswitch,
+-						   spec, vport->vport);
++						   peer_esw,
++						   spec, peer_vport->vport);
+ 
+ 		flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
+ 					   spec, &flow_act, &dest, 1);
+@@ -1249,22 +1250,22 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ 			err = PTR_ERR(flow);
+ 			goto add_vf_flow_err;
+ 		}
+-		flows[vport->index] = flow;
++		flows[peer_vport->index] = flow;
+ 	}
+ 
+-	if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+-		mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+-			if (i >= mlx5_core_max_ec_vfs(peer_dev))
+-				break;
+-			esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
+-							   spec, vport->vport);
++	if (mlx5_core_ec_sriov_enabled(peer_dev)) {
++		mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
++					      mlx5_core_max_ec_vfs(peer_dev)) {
++			esw_set_peer_miss_rule_source_port(esw, peer_esw,
++							   spec,
++							   peer_vport->vport);
+ 			flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ 						   spec, &flow_act, &dest, 1);
+ 			if (IS_ERR(flow)) {
+ 				err = PTR_ERR(flow);
+ 				goto add_ec_vf_flow_err;
+ 			}
+-			flows[vport->index] = flow;
++			flows[peer_vport->index] = flow;
+ 		}
+ 	}
+ 
+@@ -1281,25 +1282,27 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ 	return 0;
+ 
+ add_ec_vf_flow_err:
+-	mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+-		if (!flows[vport->index])
++	mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
++				      mlx5_core_max_ec_vfs(peer_dev)) {
++		if (!flows[peer_vport->index])
+ 			continue;
+-		mlx5_del_flow_rules(flows[vport->index]);
++		mlx5_del_flow_rules(flows[peer_vport->index]);
+ 	}
+ add_vf_flow_err:
+-	mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
+-		if (!flows[vport->index])
++	mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
++				   mlx5_core_max_vfs(peer_dev)) {
++		if (!flows[peer_vport->index])
+ 			continue;
+-		mlx5_del_flow_rules(flows[vport->index]);
++		mlx5_del_flow_rules(flows[peer_vport->index]);
+ 	}
+-	if (mlx5_ecpf_vport_exists(esw->dev)) {
+-		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+-		mlx5_del_flow_rules(flows[vport->index]);
++	if (mlx5_ecpf_vport_exists(peer_dev)) {
++		peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
++		mlx5_del_flow_rules(flows[peer_vport->index]);
+ 	}
+ add_ecpf_flow_err:
+-	if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+-		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+-		mlx5_del_flow_rules(flows[vport->index]);
++	if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
++		peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
++		mlx5_del_flow_rules(flows[peer_vport->index]);
+ 	}
+ add_pf_flow_err:
+ 	esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
+@@ -1312,37 +1315,34 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ 					struct mlx5_core_dev *peer_dev)
+ {
++	struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
+ 	u16 peer_index = mlx5_get_dev_index(peer_dev);
+ 	struct mlx5_flow_handle **flows;
+-	struct mlx5_vport *vport;
++	struct mlx5_vport *peer_vport;
+ 	unsigned long i;
+ 
+ 	flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
+ 	if (!flows)
+ 		return;
+ 
+-	if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+-		mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+-			/* The flow for a particular vport could be NULL if the other ECPF
+-			 * has fewer or no VFs enabled
+-			 */
+-			if (!flows[vport->index])
+-				continue;
+-			mlx5_del_flow_rules(flows[vport->index]);
+-		}
++	if (mlx5_core_ec_sriov_enabled(peer_dev)) {
++		mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
++					      mlx5_core_max_ec_vfs(peer_dev))
++			mlx5_del_flow_rules(flows[peer_vport->index]);
+ 	}
+ 
+-	mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
+-		mlx5_del_flow_rules(flows[vport->index]);
++	mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
++				   mlx5_core_max_vfs(peer_dev))
++		mlx5_del_flow_rules(flows[peer_vport->index]);
+ 
+-	if (mlx5_ecpf_vport_exists(esw->dev)) {
+-		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+-		mlx5_del_flow_rules(flows[vport->index]);
++	if (mlx5_ecpf_vport_exists(peer_dev)) {
++		peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
++		mlx5_del_flow_rules(flows[peer_vport->index]);
+ 	}
+ 
+-	if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+-		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+-		mlx5_del_flow_rules(flows[vport->index]);
++	if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
++		peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
++		mlx5_del_flow_rules(flows[peer_vport->index]);
+ 	}
+ 
+ 	kvfree(flows);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
+index ddfd1c02a88544..da53eb04b0a43d 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
+@@ -288,8 +288,12 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
+ 	int i;
+ 
+ 	addr = lower_32_bits(prueth->msmcram.pa);
+-	if (slice)
+-		addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
++	if (slice) {
++		if (prueth->pdata.banked_ms_ram)
++			addr += MSMC_RAM_BANK_SIZE;
++		else
++			addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE;
++	}
+ 
+ 	if (addr % SZ_64K) {
+ 		dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
+@@ -297,43 +301,66 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
+ 	}
+ 
+ 	bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+-	/* workaround for f/w bug. bpool 0 needs to be initialized */
+-	for (i = 0; i <  PRUETH_NUM_BUF_POOLS; i++) {
++
++	/* Configure buffer pools for forwarding buffers
++	 * - used by firmware to store packets to be forwarded to other port
++	 * - 8 total pools per slice
++	 */
++	for (i = 0; i <  PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
+ 		writel(addr, &bpool_cfg[i].addr);
+-		writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
+-		addr += PRUETH_EMAC_BUF_POOL_SIZE;
++		writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len);
++		addr += PRUETH_SW_FWD_BUF_POOL_SIZE;
+ 	}
+ 
+-	if (!slice)
+-		addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+-	else
+-		addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
+-
+-	for (i = PRUETH_NUM_BUF_POOLS;
+-	     i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS;
+-	     i++) {
+-		/* The driver only uses first 4 queues per PRU so only initialize them */
+-		if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) {
+-			writel(addr, &bpool_cfg[i].addr);
+-			writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len);
+-			addr += PRUETH_SW_BUF_POOL_SIZE_HOST;
++	/* Configure buffer pools for Local Injection buffers
++	 *  - used by firmware to store packets received from host core
++	 *  - 16 total pools per slice
++	 */
++	for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
++		int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
++
++		/* The driver only uses first 4 queues per PRU,
++		 * so only initialize buffer for them
++		 */
++		if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE)
++			 < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
++			writel(addr, &bpool_cfg[cfg_idx].addr);
++			writel(PRUETH_SW_LI_BUF_POOL_SIZE,
++			       &bpool_cfg[cfg_idx].len);
++			addr += PRUETH_SW_LI_BUF_POOL_SIZE;
+ 		} else {
+-			writel(0, &bpool_cfg[i].addr);
+-			writel(0, &bpool_cfg[i].len);
++			writel(0, &bpool_cfg[cfg_idx].addr);
++			writel(0, &bpool_cfg[cfg_idx].len);
+ 		}
+ 	}
+ 
+-	if (!slice)
+-		addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
+-	else
+-		addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
++	/* Express RX buffer queue
++	 *  - used by firmware to store express packets to be transmitted
++	 *    to the host core
++	 */
++	rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
++	for (i = 0; i < 3; i++)
++		writel(addr, &rxq_ctx->start[i]);
++
++	addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE;
++	writel(addr, &rxq_ctx->end);
+ 
++	/* Pre-emptible RX buffer queue
++	 *  - used by firmware to store preemptible packets to be transmitted
++	 *    to the host core
++	 */
+ 	rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ 	for (i = 0; i < 3; i++)
+ 		writel(addr, &rxq_ctx->start[i]);
+ 
+-	addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+-	writel(addr - SZ_2K, &rxq_ctx->end);
++	addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE;
++	writel(addr, &rxq_ctx->end);
++
++	/* Set pointer for default dropped packet write
++	 *  - used by firmware to temporarily store packet to be dropped
++	 */
++	rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
++	writel(addr, &rxq_ctx->start[0]);
+ 
+ 	return 0;
+ }
+@@ -347,13 +374,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
+ 	u32 addr;
+ 	int i;
+ 
+-	/* Layout to have 64KB aligned buffer pool
+-	 * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
+-	 */
+-
+ 	addr = lower_32_bits(prueth->msmcram.pa);
+-	if (slice)
+-		addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
++	if (slice) {
++		if (prueth->pdata.banked_ms_ram)
++			addr += MSMC_RAM_BANK_SIZE;
++		else
++			addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE;
++	}
+ 
+ 	if (addr % SZ_64K) {
+ 		dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
+@@ -361,39 +388,66 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
+ 	}
+ 
+ 	bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+-	/* workaround for f/w bug. bpool 0 needs to be initilalized */
+-	writel(addr, &bpool_cfg[0].addr);
+-	writel(0, &bpool_cfg[0].len);
+ 
+-	for (i = PRUETH_EMAC_BUF_POOL_START;
+-	     i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS;
+-	     i++) {
+-		writel(addr, &bpool_cfg[i].addr);
+-		writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
+-		addr += PRUETH_EMAC_BUF_POOL_SIZE;
++	/* Configure buffer pools for forwarding buffers
++	 *  - in mac mode - no forwarding so initialize all pools to 0
++	 *  - 8 total pools per slice
++	 */
++	for (i = 0; i <  PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
++		writel(0, &bpool_cfg[i].addr);
++		writel(0, &bpool_cfg[i].len);
+ 	}
+ 
+-	if (!slice)
+-		addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+-	else
+-		addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
++	/* Configure buffer pools for Local Injection buffers
++	 *  - used by firmware to store packets received from host core
++	 *  - 16 total pools per slice
++	 */
++	bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
++	for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
++		int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
++
++		/* In EMAC mode, only first 4 buffers are used,
++		 * as 1 slice needs to handle only 1 port
++		 */
++		if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
++			writel(addr, &bpool_cfg[cfg_idx].addr);
++			writel(PRUETH_EMAC_LI_BUF_POOL_SIZE,
++			       &bpool_cfg[cfg_idx].len);
++			addr += PRUETH_EMAC_LI_BUF_POOL_SIZE;
++		} else {
++			writel(0, &bpool_cfg[cfg_idx].addr);
++			writel(0, &bpool_cfg[cfg_idx].len);
++		}
++	}
+ 
+-	/* Pre-emptible RX buffer queue */
+-	rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
++	/* Express RX buffer queue
++	 *  - used by firmware to store express packets to be transmitted
++	 *    to host core
++	 */
++	rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ 	for (i = 0; i < 3; i++)
+ 		writel(addr, &rxq_ctx->start[i]);
+ 
+-	addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
++	addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE;
+ 	writel(addr, &rxq_ctx->end);
+ 
+-	/* Express RX buffer queue */
+-	rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
++	/* Pre-emptible RX buffer queue
++	 *  - used by firmware to store preemptible packets to be transmitted
++	 *    to host core
++	 */
++	rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ 	for (i = 0; i < 3; i++)
+ 		writel(addr, &rxq_ctx->start[i]);
+ 
+-	addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
++	addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE;
+ 	writel(addr, &rxq_ctx->end);
+ 
++	/* Set pointer for default dropped packet write
++	 *  - used by firmware to temporarily store packet to be dropped
++	 */
++	rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
++	writel(addr, &rxq_ctx->start[0]);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
+index c884e9fa099e6f..60d69744ffae28 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
++++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
+@@ -26,21 +26,71 @@ struct icssg_flow_cfg {
+ #define PRUETH_MAX_RX_FLOWS	1	/* excluding default flow */
+ #define PRUETH_RX_FLOW_DATA	0
+ 
+-#define PRUETH_EMAC_BUF_POOL_SIZE	SZ_8K
+-#define PRUETH_EMAC_POOLS_PER_SLICE	24
+-#define PRUETH_EMAC_BUF_POOL_START	8
+-#define PRUETH_NUM_BUF_POOLS	8
+-#define PRUETH_EMAC_RX_CTX_BUF_SIZE	SZ_16K	/* per slice */
+-#define MSMC_RAM_SIZE	\
+-	(2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
+-	 PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
+-
+-#define PRUETH_SW_BUF_POOL_SIZE_HOST	SZ_4K
+-#define PRUETH_SW_NUM_BUF_POOLS_HOST	8
+-#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4
+-#define MSMC_RAM_SIZE_SWITCH_MODE \
+-	(MSMC_RAM_SIZE + \
+-	(2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST))
++/* Defines for forwarding path buffer pools:
++ *   - used by firmware to store packets to be forwarded to other port
++ *   - 8 total pools per slice
++ *   - only used in switch mode (as no forwarding in mac mode)
++ */
++#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE			8
++#define PRUETH_SW_FWD_BUF_POOL_SIZE				(SZ_8K)
++
++/* Defines for local injection path buffer pools:
++ *   - used by firmware to store packets received from host core
++ *   - 16 total pools per slice
++ *   - 8 pools per port per slice and each slice handles both ports
++ *   - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG)
++ *   - switch mode: 8 total pools used
++ *   - mac mode:    4 total pools used
++ */
++#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE			16
++#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE		8
++#define PRUETH_SW_LI_BUF_POOL_SIZE				SZ_4K
++#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE			8
++#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE		4
++#define PRUETH_EMAC_LI_BUF_POOL_SIZE				SZ_8K
++#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE			4
++#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE	4
++
++/* Defines for host egress path - express and preemptible buffers
++ *   - used by firmware to store express and preemptible packets
++ *     to be transmitted to host core
++ *   - used by both mac/switch modes
++ */
++#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE	SZ_16K
++#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE	(SZ_16K - SZ_2K)
++#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE	PRUETH_SW_HOST_EXP_BUF_POOL_SIZE
++#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE	PRUETH_SW_HOST_PRE_BUF_POOL_SIZE
++
++/* Buffer used by firmware to temporarily store packet to be dropped */
++#define PRUETH_SW_DROP_PKT_BUF_SIZE		SZ_2K
++#define PRUETH_EMAC_DROP_PKT_BUF_SIZE		PRUETH_SW_DROP_PKT_BUF_SIZE
++
++/* Total switch mode memory usage for buffers per slice */
++#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \
++	(PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \
++	 PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \
++	 PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \
++	 PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \
++	 PRUETH_SW_DROP_PKT_BUF_SIZE)
++
++/* Total switch mode memory usage for all buffers */
++#define PRUETH_SW_TOTAL_BUF_SIZE \
++	(2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE)
++
++/* Total mac mode memory usage for buffers per slice */
++#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \
++	(PRUETH_EMAC_LI_BUF_POOL_SIZE * \
++	 PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \
++	 PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \
++	 PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \
++	 PRUETH_EMAC_DROP_PKT_BUF_SIZE)
++
++/* Total mac mode memory usage for all buffers */
++#define PRUETH_EMAC_TOTAL_BUF_SIZE \
++	(2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE)
++
++/* Size of 1 bank of MSMC/OC_SRAM memory */
++#define MSMC_RAM_BANK_SIZE			SZ_256K
+ 
+ #define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
+ 
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index 6f0700d156e710..0769e1ade30b4c 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -1510,10 +1510,15 @@ static int prueth_probe(struct platform_device *pdev)
+ 		goto put_mem;
+ 	}
+ 
+-	msmc_ram_size = MSMC_RAM_SIZE;
+ 	prueth->is_switchmode_supported = prueth->pdata.switch_mode;
+-	if (prueth->is_switchmode_supported)
+-		msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
++	if (prueth->pdata.banked_ms_ram) {
++		/* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
++		msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
++	} else {
++		msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
++		if (prueth->is_switchmode_supported)
++			msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
++	}
+ 
+ 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
+ 	prueth->msmcram.va =
+@@ -1670,7 +1675,8 @@ static int prueth_probe(struct platform_device *pdev)
+ 
+ free_pool:
+ 	gen_pool_free(prueth->sram_pool,
+-		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
++		      (unsigned long)prueth->msmcram.va,
++		      prueth->msmcram.size);
+ 
+ put_mem:
+ 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
+@@ -1722,8 +1728,8 @@ static void prueth_remove(struct platform_device *pdev)
+ 	icss_iep_put(prueth->iep0);
+ 
+ 	gen_pool_free(prueth->sram_pool,
+-		      (unsigned long)prueth->msmcram.va,
+-		      MSMC_RAM_SIZE);
++		(unsigned long)prueth->msmcram.va,
++		prueth->msmcram.size);
+ 
+ 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
+ 
+@@ -1740,12 +1746,14 @@ static const struct prueth_pdata am654_icssg_pdata = {
+ 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ 	.quirk_10m_link_issue = 1,
+ 	.switch_mode = 1,
++	.banked_ms_ram = 0,
+ };
+ 
+ static const struct prueth_pdata am64x_icssg_pdata = {
+ 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ 	.quirk_10m_link_issue = 1,
+ 	.switch_mode = 1,
++	.banked_ms_ram = 1,
+ };
+ 
+ static const struct of_device_id prueth_dt_match[] = {
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+index e456a11c5d4e38..693c8731c094d6 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+@@ -207,11 +207,13 @@ struct prueth_emac {
+  * @fdqring_mode: Free desc queue mode
+  * @quirk_10m_link_issue: 10M link detect errata
+  * @switch_mode: switch firmware support
++ * @banked_ms_ram: banked memory support
+  */
+ struct prueth_pdata {
+ 	enum k3_ring_mode fdqring_mode;
+ 	u32	quirk_10m_link_issue:1;
+ 	u32	switch_mode:1;
++	u32	banked_ms_ram:1;
+ };
+ 
+ struct icssg_firmwares {
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+index 424a7e945ea84a..12541a12ebd672 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
++++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+@@ -180,6 +180,9 @@
+ /* Used to notify the FW of the current link speed */
+ #define PORT_LINK_SPEED_OFFSET                             0x00A8
+ 
++/* 2k memory pointer reserved for default writes by PRU0*/
++#define DEFAULT_MSMC_Q_OFFSET                              0x00AC
++
+ /* TAS gate mask for windows list0 */
+ #define TAS_GATE_MASK_LIST0                                0x0100
+ 
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 0408c21bb1220a..fb3908798458be 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3275,6 +3275,12 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
+ {
+ 	int qindex, err;
+ 
++	if (ring_num <= MAX_SKB_FRAGS + 2) {
++		netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n",
++			   ring_num, MAX_SKB_FRAGS + 2);
++		return -EINVAL;
++	}
++
+ 	qindex = sq - vi->sq;
+ 
+ 	virtnet_tx_pause(vi, sq);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+index ca5f1dc05815f4..a635b223dab18e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+@@ -1155,7 +1155,12 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
+ 		struct mt792x_bss_conf *mconf;
+ 
+ 		mconf = mt792x_link_conf_to_mconf(link_conf);
+-		mt792x_mac_link_bss_remove(dev, mconf, mlink);
++
++		if (ieee80211_vif_is_mld(vif))
++			mt792x_mac_link_bss_remove(dev, mconf, mlink);
++		else
++			mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
++						link_sta, false);
+ 	}
+ 
+ 	spin_lock_bh(&mdev->sta_poll_lock);
+@@ -1175,6 +1180,31 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 	struct mt76_wcid *wcid;
+ 	unsigned int link_id;
+ 
++	/* clean up bss before starec */
++	for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
++		struct ieee80211_link_sta *link_sta;
++		struct ieee80211_bss_conf *link_conf;
++		struct mt792x_bss_conf *mconf;
++		struct mt792x_link_sta *mlink;
++
++		link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
++		if (!link_sta)
++			continue;
++
++		mlink = mt792x_sta_to_link(msta, link_id);
++		if (!mlink)
++			continue;
++
++		link_conf = mt792x_vif_to_bss_conf(vif, link_id);
++		if (!link_conf)
++			continue;
++
++		mconf = mt792x_link_conf_to_mconf(link_conf);
++
++		mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
++					link_sta, false);
++	}
++
+ 	for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ 		struct ieee80211_link_sta *link_sta;
+ 		struct mt792x_link_sta *mlink;
+@@ -1213,44 +1243,14 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ {
+ 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+-	struct {
+-		struct {
+-			u8 omac_idx;
+-			u8 band_idx;
+-			__le16 pad;
+-		} __packed hdr;
+-		struct req_tlv {
+-			__le16 tag;
+-			__le16 len;
+-			u8 active;
+-			u8 link_idx; /* hw link idx */
+-			u8 omac_addr[ETH_ALEN];
+-		} __packed tlv;
+-	} dev_req = {
+-		.hdr = {
+-			.omac_idx = 0,
+-			.band_idx = 0,
+-		},
+-		.tlv = {
+-			.tag = cpu_to_le16(DEV_INFO_ACTIVE),
+-			.len = cpu_to_le16(sizeof(struct req_tlv)),
+-			.active = true,
+-		},
+-	};
+ 	unsigned long rem;
+ 
+ 	rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
+ 
+ 	mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+ 
+-	if (ieee80211_vif_is_mld(vif)) {
+-		mt7925_mcu_set_dbdc(&dev->mphy, false);
+-
+-		/* recovery omac address for the legacy interface */
+-		memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+-		mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
+-				  &dev_req, sizeof(dev_req), true);
+-	}
++	if (ieee80211_vif_is_mld(vif))
++		mt7925_mcu_del_dev(mdev, vif);
+ 
+ 	if (vif->type == NL80211_IFTYPE_STATION) {
+ 		struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+@@ -1296,22 +1296,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	case IEEE80211_AMPDU_RX_START:
+ 		mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
+ 				   params->buf_size);
+-		mt7925_mcu_uni_rx_ba(dev, vif, params, true);
++		mt7925_mcu_uni_rx_ba(dev, params, true);
+ 		break;
+ 	case IEEE80211_AMPDU_RX_STOP:
+ 		mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
+-		mt7925_mcu_uni_rx_ba(dev, vif, params, false);
++		mt7925_mcu_uni_rx_ba(dev, params, false);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_OPERATIONAL:
+ 		mtxq->aggr = true;
+ 		mtxq->send_bar = false;
+-		mt7925_mcu_uni_tx_ba(dev, vif, params, true);
++		mt7925_mcu_uni_tx_ba(dev, params, true);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ 		mtxq->aggr = false;
+ 		clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+-		mt7925_mcu_uni_tx_ba(dev, vif, params, false);
++		mt7925_mcu_uni_tx_ba(dev, params, false);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_START:
+ 		set_bit(tid, &msta->deflink.wcid.ampdu_state);
+@@ -1320,7 +1320,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	case IEEE80211_AMPDU_TX_STOP_CONT:
+ 		mtxq->aggr = false;
+ 		clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+-		mt7925_mcu_uni_tx_ba(dev, vif, params, false);
++		mt7925_mcu_uni_tx_ba(dev, params, false);
+ 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ 		break;
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index 2aeb9ba4256aba..e42b4f0abbe7a2 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -529,10 +529,10 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
+ 
+ static int
+ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+-		  struct mt76_wcid *wcid,
+ 		  struct ieee80211_ampdu_params *params,
+ 		  bool enable, bool tx)
+ {
++	struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
+ 	struct sta_rec_ba_uni *ba;
+ 	struct sk_buff *skb;
+ 	struct tlv *tlv;
+@@ -560,60 +560,28 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+ 
+ /** starec & wtbl **/
+ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
+-			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable)
+ {
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
+-	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+-	struct mt792x_link_sta *mlink;
+-	struct mt792x_bss_conf *mconf;
+-	unsigned long usable_links = ieee80211_vif_usable_links(vif);
+-	struct mt76_wcid *wcid;
+-	u8 link_id, ret;
+-
+-	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+-		mconf = mt792x_vif_to_link(mvif, link_id);
+-		mlink = mt792x_sta_to_link(msta, link_id);
+-		wcid = &mlink->wcid;
+-
+-		if (enable && !params->amsdu)
+-			mlink->wcid.amsdu = false;
++	struct mt792x_vif *mvif = msta->vif;
+ 
+-		ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
+-					enable, true);
+-		if (ret < 0)
+-			break;
+-	}
++	if (enable && !params->amsdu)
++		msta->deflink.wcid.amsdu = false;
+ 
+-	return ret;
++	return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
++				 enable, true);
+ }
+ 
+ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+-			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable)
+ {
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
+-	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+-	struct mt792x_link_sta *mlink;
+-	struct mt792x_bss_conf *mconf;
+-	unsigned long usable_links = ieee80211_vif_usable_links(vif);
+-	struct mt76_wcid *wcid;
+-	u8 link_id, ret;
+-
+-	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+-		mconf = mt792x_vif_to_link(mvif, link_id);
+-		mlink = mt792x_sta_to_link(msta, link_id);
+-		wcid = &mlink->wcid;
+-
+-		ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
+-					enable, false);
+-		if (ret < 0)
+-			break;
+-	}
++	struct mt792x_vif *mvif = msta->vif;
+ 
+-	return ret;
++	return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
++				 enable, false);
+ }
+ 
+ static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val)
+@@ -2694,6 +2662,62 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ 				     MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+ }
+ 
++void mt7925_mcu_del_dev(struct mt76_dev *mdev,
++			struct ieee80211_vif *vif)
++{
++	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
++	struct {
++		struct {
++			u8 omac_idx;
++			u8 band_idx;
++			__le16 pad;
++		} __packed hdr;
++		struct req_tlv {
++			__le16 tag;
++			__le16 len;
++			u8 active;
++			u8 link_idx; /* hw link idx */
++			u8 omac_addr[ETH_ALEN];
++		} __packed tlv;
++	} dev_req = {
++		.tlv = {
++			.tag = cpu_to_le16(DEV_INFO_ACTIVE),
++			.len = cpu_to_le16(sizeof(struct req_tlv)),
++			.active = true,
++		},
++	};
++	struct {
++		struct {
++			u8 bss_idx;
++			u8 pad[3];
++		} __packed hdr;
++		struct mt76_connac_bss_basic_tlv basic;
++	} basic_req = {
++		.basic = {
++			.tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
++			.len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
++			.active = true,
++			.conn_state = 1,
++		},
++	};
++
++	dev_req.hdr.omac_idx = mvif->bss_conf.mt76.omac_idx;
++	dev_req.hdr.band_idx = mvif->bss_conf.mt76.band_idx;
++
++	basic_req.hdr.bss_idx = mvif->bss_conf.mt76.idx;
++	basic_req.basic.omac_idx = mvif->bss_conf.mt76.omac_idx;
++	basic_req.basic.band_idx = mvif->bss_conf.mt76.band_idx;
++	basic_req.basic.link_idx = mvif->bss_conf.mt76.link_idx;
++
++	mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE),
++			  &basic_req, sizeof(basic_req), true);
++
++	/* recovery omac address for the legacy interface */
++	memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
++	mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
++			  &dev_req, sizeof(dev_req), true);
++}
++
+ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
+ 			    struct ieee80211_chanctx_conf *ctx,
+ 			    struct ieee80211_bss_conf *link_conf,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+index 780c5921679aa3..ee89d5778adfae 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+@@ -627,6 +627,8 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy,
+ int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
+ 				 struct ieee80211_vif *vif,
+ 				 bool enable);
++void mt7925_mcu_del_dev(struct mt76_dev *mdev,
++			struct ieee80211_vif *vif);
+ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
+ 			    struct ieee80211_chanctx_conf *ctx,
+ 			    struct ieee80211_bss_conf *link_conf,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+index 4ad779329b8f08..c83b8a2104985c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+@@ -245,11 +245,9 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
+ 				 struct ieee80211_vif *vif,
+ 				 bool enable);
+ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
+-			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable);
+ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+-			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable);
+ void mt7925_scan_work(struct work_struct *work);
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index fbb8128d19de48..9a0220b4de3c8b 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -15,6 +15,7 @@
+ #include <linux/hwmon.h>
+ #include <linux/platform_device.h>
+ #include <linux/string.h>
++#include <linux/string_helpers.h>
+ #include <uapi/linux/psci.h>
+ 
+ #define MLXBF_PMC_WRITE_REG_32 0x82000009
+@@ -1067,7 +1068,7 @@ static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
+ 	return -ENODEV;
+ }
+ 
+-/* Get the event number given the name */
++/* Get the event name given the number */
+ static char *mlxbf_pmc_get_event_name(const char *blk, u32 evt)
+ {
+ 	const struct mlxbf_pmc_events *events;
+@@ -1625,6 +1626,7 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
+ 		attr, struct mlxbf_pmc_attribute, dev_attr);
+ 	unsigned int blk_num, cnt_num;
+ 	bool is_l3 = false;
++	char *evt_name;
+ 	int evt_num;
+ 	int err;
+ 
+@@ -1632,14 +1634,23 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
+ 	cnt_num = attr_event->index;
+ 
+ 	if (isalpha(buf[0])) {
++		/* Remove the trailing newline character if present */
++		evt_name = kstrdup_and_replace(buf, '\n', '\0', GFP_KERNEL);
++		if (!evt_name)
++			return -ENOMEM;
++
+ 		evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
+-						  buf);
++						  evt_name);
++		kfree(evt_name);
+ 		if (evt_num < 0)
+ 			return -EINVAL;
+ 	} else {
+ 		err = kstrtouint(buf, 0, &evt_num);
+ 		if (err < 0)
+ 			return err;
++
++		if (!mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num))
++			return -EINVAL;
+ 	}
+ 
+ 	if (strstr(pmc->block_name[blk_num], "l3cache"))
+@@ -1720,13 +1731,14 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
+ {
+ 	struct mlxbf_pmc_attribute *attr_enable = container_of(
+ 		attr, struct mlxbf_pmc_attribute, dev_attr);
+-	unsigned int en, blk_num;
++	unsigned int blk_num;
+ 	u32 word;
+ 	int err;
++	bool en;
+ 
+ 	blk_num = attr_enable->nr;
+ 
+-	err = kstrtouint(buf, 0, &en);
++	err = kstrtobool(buf, &en);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -1746,14 +1758,11 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
+ 			MLXBF_PMC_CRSPACE_PERFMON_CTL(pmc->block[blk_num].counters),
+ 			MLXBF_PMC_WRITE_REG_32, word);
+ 	} else {
+-		if (en && en != 1)
+-			return -EINVAL;
+-
+ 		err = mlxbf_pmc_config_l3_counters(blk_num, false, !!en);
+ 		if (err)
+ 			return err;
+ 
+-		if (en == 1) {
++		if (en) {
+ 			err = mlxbf_pmc_config_l3_counters(blk_num, true, false);
+ 			if (err)
+ 				return err;
+diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
+index e1b14294706747..4631c7bb22cd08 100644
+--- a/drivers/platform/x86/Makefile
++++ b/drivers/platform/x86/Makefile
+@@ -58,6 +58,8 @@ obj-$(CONFIG_X86_PLATFORM_DRIVERS_HP)	+= hp/
+ # Hewlett Packard Enterprise
+ obj-$(CONFIG_UV_SYSFS)       += uv_sysfs.o
+ 
++obj-$(CONFIG_FW_ATTR_CLASS)	+= firmware_attributes_class.o
++
+ # IBM Thinkpad and Lenovo
+ obj-$(CONFIG_IBM_RTL)		+= ibm_rtl.o
+ obj-$(CONFIG_IDEAPAD_LAPTOP)	+= ideapad-laptop.o
+@@ -120,7 +122,6 @@ obj-$(CONFIG_SYSTEM76_ACPI)	+= system76_acpi.o
+ obj-$(CONFIG_TOPSTAR_LAPTOP)	+= topstar-laptop.o
+ 
+ # Platform drivers
+-obj-$(CONFIG_FW_ATTR_CLASS)		+= firmware_attributes_class.o
+ obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE)	+= serial-multi-instantiate.o
+ obj-$(CONFIG_MLX_PLATFORM)		+= mlx-platform.o
+ obj-$(CONFIG_TOUCHSCREEN_DMI)		+= touchscreen_dmi.o
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index a5933980ade3d6..90ad0045fec5ff 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -529,6 +529,15 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		},
+ 		.driver_data = &quirk_asus_zenbook_duo_kbd,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUS Zenbook Duo UX8406CA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "UX8406CA"),
++		},
++		.driver_data = &quirk_asus_zenbook_duo_kbd,
++	},
+ 	{},
+ };
+ 
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 93aa72bff3f00d..c7e8bcf3d62399 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1672,7 +1672,7 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
+ 	priv->kbd_bl.led.name                    = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
+ 	priv->kbd_bl.led.brightness_get          = ideapad_kbd_bl_led_cdev_brightness_get;
+ 	priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
+-	priv->kbd_bl.led.flags                   = LED_BRIGHT_HW_CHANGED;
++	priv->kbd_bl.led.flags                   = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN;
+ 
+ 	err = led_classdev_register(&priv->platform_device->dev, &priv->kbd_bl.led);
+ 	if (err)
+@@ -1731,7 +1731,7 @@ static int ideapad_fn_lock_led_init(struct ideapad_private *priv)
+ 	priv->fn_lock.led.name                    = "platform::" LED_FUNCTION_FNLOCK;
+ 	priv->fn_lock.led.brightness_get          = ideapad_fn_lock_led_cdev_get;
+ 	priv->fn_lock.led.brightness_set_blocking = ideapad_fn_lock_led_cdev_set;
+-	priv->fn_lock.led.flags                   = LED_BRIGHT_HW_CHANGED;
++	priv->fn_lock.led.flags                   = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN;
+ 
+ 	err = led_classdev_register(&priv->platform_device->dev, &priv->fn_lock.led);
+ 	if (err)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 1f4698d724bb78..e7f2a8b6594775 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5536,6 +5536,7 @@ static void regulator_remove_coupling(struct regulator_dev *rdev)
+ 				 ERR_PTR(err));
+ 	}
+ 
++	rdev->coupling_desc.n_coupled = 0;
+ 	kfree(rdev->coupling_desc.coupled_rdevs);
+ 	rdev->coupling_desc.coupled_rdevs = NULL;
+ }
+diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
+index 2f34761e64135c..7cfc4f9862977c 100644
+--- a/drivers/s390/net/ism_drv.c
++++ b/drivers/s390/net/ism_drv.c
+@@ -131,6 +131,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
+ 	struct ism_req_hdr *req = cmd;
+ 	struct ism_resp_hdr *resp = cmd;
+ 
++	spin_lock(&ism->cmd_lock);
+ 	__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
+ 	__ism_write_cmd(ism, req, 0, sizeof(*req));
+ 
+@@ -144,6 +145,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
+ 	}
+ 	__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
+ out:
++	spin_unlock(&ism->cmd_lock);
+ 	return resp->ret;
+ }
+ 
+@@ -607,6 +609,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		return -ENOMEM;
+ 
+ 	spin_lock_init(&ism->lock);
++	spin_lock_init(&ism->cmd_lock);
+ 	dev_set_drvdata(&pdev->dev, ism);
+ 	ism->pdev = pdev;
+ 	ism->dev.parent = &pdev->dev;
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 12f8073cb5968e..56be0b6901a879 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1931,11 +1931,6 @@ static int cqspi_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(dev);
+ 
+-	if (cqspi->rx_chan) {
+-		dma_release_channel(cqspi->rx_chan);
+-		goto probe_setup_failed;
+-	}
+-
+ 	pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT);
+ 	pm_runtime_use_autosuspend(dev);
+ 	pm_runtime_get_noresume(dev);
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index 97787002080a18..20ad6b1e44bc42 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -739,8 +739,7 @@ int vchiq_shutdown(struct vchiq_instance *instance)
+ 	struct vchiq_state *state = instance->state;
+ 	int ret = 0;
+ 
+-	if (mutex_lock_killable(&state->mutex))
+-		return -EAGAIN;
++	mutex_lock(&state->mutex);
+ 
+ 	/* Remove all services */
+ 	vchiq_shutdown_internal(state, instance);
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 9838a2c8c1b857..aa2fa720af1551 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1132,7 +1132,7 @@ static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
+ 				     port->data_role);
+ }
+ 
+-static int tcpm_set_roles(struct tcpm_port *port, bool attached,
++static int tcpm_set_roles(struct tcpm_port *port, bool attached, int state,
+ 			  enum typec_role role, enum typec_data_role data)
+ {
+ 	enum typec_orientation orientation;
+@@ -1169,7 +1169,7 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached,
+ 		}
+ 	}
+ 
+-	ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
++	ret = tcpm_mux_set(port, state, usb_role, orientation);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -4339,16 +4339,6 @@ static int tcpm_src_attach(struct tcpm_port *port)
+ 
+ 	tcpm_enable_auto_vbus_discharge(port, true);
+ 
+-	ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
+-	if (ret < 0)
+-		return ret;
+-
+-	if (port->pd_supported) {
+-		ret = port->tcpc->set_pd_rx(port->tcpc, true);
+-		if (ret < 0)
+-			goto out_disable_mux;
+-	}
+-
+ 	/*
+ 	 * USB Type-C specification, version 1.2,
+ 	 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
+@@ -4358,13 +4348,24 @@ static int tcpm_src_attach(struct tcpm_port *port)
+ 	    (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
+ 		ret = tcpm_set_vconn(port, true);
+ 		if (ret < 0)
+-			goto out_disable_pd;
++			return ret;
+ 	}
+ 
+ 	ret = tcpm_set_vbus(port, true);
+ 	if (ret < 0)
+ 		goto out_disable_vconn;
+ 
++	ret = tcpm_set_roles(port, true, TYPEC_STATE_USB, TYPEC_SOURCE,
++			     tcpm_data_role_for_source(port));
++	if (ret < 0)
++		goto out_disable_vbus;
++
++	if (port->pd_supported) {
++		ret = port->tcpc->set_pd_rx(port->tcpc, true);
++		if (ret < 0)
++			goto out_disable_mux;
++	}
++
+ 	port->pd_capable = false;
+ 
+ 	port->partner = NULL;
+@@ -4375,14 +4376,14 @@ static int tcpm_src_attach(struct tcpm_port *port)
+ 
+ 	return 0;
+ 
+-out_disable_vconn:
+-	tcpm_set_vconn(port, false);
+-out_disable_pd:
+-	if (port->pd_supported)
+-		port->tcpc->set_pd_rx(port->tcpc, false);
+ out_disable_mux:
+ 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
+ 		     TYPEC_ORIENTATION_NONE);
++out_disable_vbus:
++	tcpm_set_vbus(port, false);
++out_disable_vconn:
++	tcpm_set_vconn(port, false);
++
+ 	return ret;
+ }
+ 
+@@ -4514,7 +4515,8 @@ static int tcpm_snk_attach(struct tcpm_port *port)
+ 
+ 	tcpm_enable_auto_vbus_discharge(port, true);
+ 
+-	ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
++	ret = tcpm_set_roles(port, true, TYPEC_STATE_USB,
++			     TYPEC_SINK, tcpm_data_role_for_sink(port));
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -4537,12 +4539,24 @@ static void tcpm_snk_detach(struct tcpm_port *port)
+ static int tcpm_acc_attach(struct tcpm_port *port)
+ {
+ 	int ret;
++	enum typec_role role;
++	enum typec_data_role data;
++	int state = TYPEC_STATE_USB;
+ 
+ 	if (port->attached)
+ 		return 0;
+ 
+-	ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
+-			     tcpm_data_role_for_source(port));
++	role = tcpm_port_is_sink(port) ? TYPEC_SINK : TYPEC_SOURCE;
++	data = tcpm_port_is_sink(port) ? tcpm_data_role_for_sink(port)
++				       : tcpm_data_role_for_source(port);
++
++	if (tcpm_port_is_audio(port))
++		state = TYPEC_MODE_AUDIO;
++
++	if (tcpm_port_is_debug(port))
++		state = TYPEC_MODE_DEBUG;
++
++	ret = tcpm_set_roles(port, true, state, role, data);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -5301,7 +5315,7 @@ static void run_state_machine(struct tcpm_port *port)
+ 		 */
+ 		tcpm_set_vconn(port, false);
+ 		tcpm_set_vbus(port, false);
+-		tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
++		tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SOURCE,
+ 			       tcpm_data_role_for_source(port));
+ 		/*
+ 		 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
+@@ -5333,7 +5347,7 @@ static void run_state_machine(struct tcpm_port *port)
+ 		tcpm_set_vconn(port, false);
+ 		if (port->pd_capable)
+ 			tcpm_set_charge(port, false);
+-		tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
++		tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SINK,
+ 			       tcpm_data_role_for_sink(port));
+ 		/*
+ 		 * VBUS may or may not toggle, depending on the adapter.
+@@ -5457,10 +5471,10 @@ static void run_state_machine(struct tcpm_port *port)
+ 	case DR_SWAP_CHANGE_DR:
+ 		tcpm_unregister_altmodes(port);
+ 		if (port->data_role == TYPEC_HOST)
+-			tcpm_set_roles(port, true, port->pwr_role,
++			tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
+ 				       TYPEC_DEVICE);
+ 		else
+-			tcpm_set_roles(port, true, port->pwr_role,
++			tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
+ 				       TYPEC_HOST);
+ 		tcpm_ams_finish(port);
+ 		tcpm_set_state(port, ready_state(port), 0);
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 147926c8bae09d..c0276979675df1 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -2741,7 +2741,7 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ 		     void (*recycle_done)(struct virtqueue *vq))
+ {
+ 	struct vring_virtqueue *vq = to_vvq(_vq);
+-	int err;
++	int err, err_reset;
+ 
+ 	if (num > vq->vq.num_max)
+ 		return -E2BIG;
+@@ -2763,7 +2763,11 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ 	else
+ 		err = virtqueue_resize_split(_vq, num);
+ 
+-	return virtqueue_enable_after_reset(_vq);
++	err_reset = virtqueue_enable_after_reset(_vq);
++	if (err_reset)
++		return err_reset;
++
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_resize);
+ 
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 3d06fda70f318b..856463a702b2cb 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -277,13 +277,8 @@ struct erofs_inode {
+ 			unsigned char  z_algorithmtype[2];
+ 			unsigned char  z_logical_clusterbits;
+ 			unsigned long  z_tailextent_headlcn;
+-			union {
+-				struct {
+-					erofs_off_t    z_idataoff;
+-					unsigned short z_idata_size;
+-				};
+-				erofs_off_t z_fragmentoff;
+-			};
++			erofs_off_t    z_fragmentoff;
++			unsigned short z_idata_size;
+ 		};
+ #endif	/* CONFIG_EROFS_FS_ZIP */
+ 	};
+@@ -329,10 +324,12 @@ static inline struct folio *erofs_grab_folio_nowait(struct address_space *as,
+ /* The length of extent is full */
+ #define EROFS_MAP_FULL_MAPPED	0x0008
+ /* Located in the special packed inode */
+-#define EROFS_MAP_FRAGMENT	0x0010
++#define __EROFS_MAP_FRAGMENT	0x0010
+ /* The extent refers to partial decompressed data */
+ #define EROFS_MAP_PARTIAL_REF	0x0020
+ 
++#define EROFS_MAP_FRAGMENT	(EROFS_MAP_MAPPED | __EROFS_MAP_FRAGMENT)
++
+ struct erofs_map_blocks {
+ 	struct erofs_buf buf;
+ 
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 94c1e2d64df961..f35d2eb0ed11c5 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1016,7 +1016,7 @@ static int z_erofs_scan_folio(struct z_erofs_frontend *f,
+ 		if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ 			folio_zero_segment(folio, cur, end);
+ 			tight = false;
+-		} else if (map->m_flags & EROFS_MAP_FRAGMENT) {
++		} else if (map->m_flags & __EROFS_MAP_FRAGMENT) {
+ 			erofs_off_t fpos = offset + cur - map->m_la;
+ 
+ 			err = z_erofs_read_fragment(inode->i_sb, folio, cur,
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 4535f2f0a0147e..25a4b82c183c0d 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -97,17 +97,48 @@ static int get_compacted_la_distance(unsigned int lobits,
+ 	return d1;
+ }
+ 
+-static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+-				  unsigned int amortizedshift,
+-				  erofs_off_t pos, bool lookahead)
++static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
++					 unsigned long lcn, bool lookahead)
+ {
+-	struct erofs_inode *const vi = EROFS_I(m->inode);
++	struct inode *const inode = m->inode;
++	struct erofs_inode *const vi = EROFS_I(inode);
++	const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
++		ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+ 	const unsigned int lclusterbits = vi->z_logical_clusterbits;
++	const unsigned int totalidx = erofs_iblks(inode);
++	unsigned int compacted_4b_initial, compacted_2b, amortizedshift;
+ 	unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
+-	bool big_pcluster;
++	bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
++	erofs_off_t pos;
+ 	u8 *in, type;
+ 	int i;
+ 
++	if (lcn >= totalidx || lclusterbits > 14)
++		return -EINVAL;
++
++	m->lcn = lcn;
++	/* used to align to 32-byte (compacted_2b) alignment */
++	compacted_4b_initial = ((32 - ebase % 32) / 4) & 7;
++	compacted_2b = 0;
++	if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
++	    compacted_4b_initial < totalidx)
++		compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
++
++	pos = ebase;
++	amortizedshift = 2;	/* compact_4b */
++	if (lcn >= compacted_4b_initial) {
++		pos += compacted_4b_initial * 4;
++		lcn -= compacted_4b_initial;
++		if (lcn < compacted_2b) {
++			amortizedshift = 1;
++		} else {
++			pos += compacted_2b * 2;
++			lcn -= compacted_2b;
++		}
++	}
++	pos += lcn * (1 << amortizedshift);
++
++	/* figure out the lcluster count in this pack */
+ 	if (1 << amortizedshift == 4 && lclusterbits <= 14)
+ 		vcnt = 2;
+ 	else if (1 << amortizedshift == 2 && lclusterbits <= 12)
+@@ -122,7 +153,6 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ 	/* it doesn't equal to round_up(..) */
+ 	m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
+ 			 (vcnt << amortizedshift);
+-	big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
+ 	lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
+ 	encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
+ 	bytes = pos & ((vcnt << amortizedshift) - 1);
+@@ -207,53 +237,6 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ 	return 0;
+ }
+ 
+-static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
+-					 unsigned long lcn, bool lookahead)
+-{
+-	struct inode *const inode = m->inode;
+-	struct erofs_inode *const vi = EROFS_I(inode);
+-	const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
+-		ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+-	unsigned int totalidx = erofs_iblks(inode);
+-	unsigned int compacted_4b_initial, compacted_2b;
+-	unsigned int amortizedshift;
+-	erofs_off_t pos;
+-
+-	if (lcn >= totalidx || vi->z_logical_clusterbits > 14)
+-		return -EINVAL;
+-
+-	m->lcn = lcn;
+-	/* used to align to 32-byte (compacted_2b) alignment */
+-	compacted_4b_initial = (32 - ebase % 32) / 4;
+-	if (compacted_4b_initial == 32 / 4)
+-		compacted_4b_initial = 0;
+-
+-	if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+-	    compacted_4b_initial < totalidx)
+-		compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
+-	else
+-		compacted_2b = 0;
+-
+-	pos = ebase;
+-	if (lcn < compacted_4b_initial) {
+-		amortizedshift = 2;
+-		goto out;
+-	}
+-	pos += compacted_4b_initial * 4;
+-	lcn -= compacted_4b_initial;
+-
+-	if (lcn < compacted_2b) {
+-		amortizedshift = 1;
+-		goto out;
+-	}
+-	pos += compacted_2b * 2;
+-	lcn -= compacted_2b;
+-	amortizedshift = 2;
+-out:
+-	pos += lcn * (1 << amortizedshift);
+-	return unpack_compacted_index(m, amortizedshift, pos, lookahead);
+-}
+-
+ static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
+ 					   unsigned int lcn, bool lookahead)
+ {
+@@ -282,26 +265,22 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
+ 		if (err)
+ 			return err;
+ 
+-		switch (m->type) {
+-		case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
++		if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
++			erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
++				  m->type, lcn, vi->nid);
++			DBG_BUGON(1);
++			return -EOPNOTSUPP;
++		} else if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ 			lookback_distance = m->delta[0];
+ 			if (!lookback_distance)
+-				goto err_bogus;
++				break;
+ 			continue;
+-		case Z_EROFS_LCLUSTER_TYPE_PLAIN:
+-		case Z_EROFS_LCLUSTER_TYPE_HEAD1:
+-		case Z_EROFS_LCLUSTER_TYPE_HEAD2:
++		} else {
+ 			m->headtype = m->type;
+ 			m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
+ 			return 0;
+-		default:
+-			erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
+-				  m->type, lcn, vi->nid);
+-			DBG_BUGON(1);
+-			return -EOPNOTSUPP;
+ 		}
+ 	}
+-err_bogus:
+ 	erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
+ 		  lookback_distance, m->lcn, vi->nid);
+ 	DBG_BUGON(1);
+@@ -311,27 +290,23 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
+ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
+ 					    unsigned int initial_lcn)
+ {
+-	struct super_block *sb = m->inode->i_sb;
+-	struct erofs_inode *const vi = EROFS_I(m->inode);
+-	struct erofs_map_blocks *const map = m->map;
+-	const unsigned int lclusterbits = vi->z_logical_clusterbits;
+-	unsigned long lcn;
++	struct inode *inode = m->inode;
++	struct super_block *sb = inode->i_sb;
++	struct erofs_inode *vi = EROFS_I(inode);
++	bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
++	bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2;
++	unsigned long lcn = m->lcn + 1;
+ 	int err;
+ 
+-	DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
+-		  m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
+-		  m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
++	DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
+ 	DBG_BUGON(m->type != m->headtype);
+ 
+-	if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
+-	    ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
+-	     !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
+-	    ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
+-	     !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
+-		map->m_plen = 1ULL << lclusterbits;
+-		return 0;
+-	}
+-	lcn = m->lcn + 1;
++	if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) ||
++	    ((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
++	      m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) ||
++	    (lcn << vi->z_logical_clusterbits) >= inode->i_size)
++		m->compressedblks = 1;
++
+ 	if (m->compressedblks)
+ 		goto out;
+ 
+@@ -350,35 +325,28 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
+ 	DBG_BUGON(lcn == initial_lcn &&
+ 		  m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
+ 
+-	switch (m->type) {
+-	case Z_EROFS_LCLUSTER_TYPE_PLAIN:
+-	case Z_EROFS_LCLUSTER_TYPE_HEAD1:
+-	case Z_EROFS_LCLUSTER_TYPE_HEAD2:
++	if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
++		if (m->delta[0] != 1) {
++			erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
++			DBG_BUGON(1);
++			return -EFSCORRUPTED;
++		}
++		if (m->compressedblks)
++			goto out;
++	} else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
+ 		/*
+ 		 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
+-		 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
++		 * rather than CBLKCNT, it's a 1 block-sized pcluster.
+ 		 */
+-		m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
+-		break;
+-	case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
+-		if (m->delta[0] != 1)
+-			goto err_bonus_cblkcnt;
+-		if (m->compressedblks)
+-			break;
+-		fallthrough;
+-	default:
+-		erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
+-			  vi->nid);
+-		DBG_BUGON(1);
+-		return -EFSCORRUPTED;
++		m->compressedblks = 1;
++		goto out;
+ 	}
+-out:
+-	map->m_plen = erofs_pos(sb, m->compressedblks);
+-	return 0;
+-err_bonus_cblkcnt:
+-	erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
++	erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
+ 	DBG_BUGON(1);
+ 	return -EFSCORRUPTED;
++out:
++	m->map->m_plen = erofs_pos(sb, m->compressedblks);
++	return 0;
+ }
+ 
+ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
+@@ -407,9 +375,7 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
+ 				m->delta[1] = 1;
+ 				DBG_BUGON(1);
+ 			}
+-		} else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
+-			   m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
+-			   m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
++		} else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
+ 			if (lcn != headlcn)
+ 				break;	/* ends at the next HEAD lcluster */
+ 			m->delta[1] = 1;
+@@ -428,9 +394,10 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
+ static int z_erofs_do_map_blocks(struct inode *inode,
+ 				 struct erofs_map_blocks *map, int flags)
+ {
+-	struct erofs_inode *const vi = EROFS_I(inode);
+-	bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
++	struct erofs_inode *vi = EROFS_I(inode);
++	struct super_block *sb = inode->i_sb;
+ 	bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
++	bool ztailpacking = vi->z_idata_size;
+ 	struct z_erofs_maprecorder m = {
+ 		.inode = inode,
+ 		.map = map,
+@@ -449,9 +416,8 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 	if (err)
+ 		goto unmap_out;
+ 
+-	if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
+-		vi->z_idataoff = m.nextpackoff;
+-
++	if ((flags & EROFS_GET_BLOCKS_FINDTAIL) && ztailpacking)
++		vi->z_fragmentoff = m.nextpackoff;
+ 	map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
+ 	end = (m.lcn + 1ULL) << lclusterbits;
+ 
+@@ -473,8 +439,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 		}
+ 		/* m.lcn should be >= 1 if endoff < m.clusterofs */
+ 		if (!m.lcn) {
+-			erofs_err(inode->i_sb,
+-				  "invalid logical cluster 0 at nid %llu",
++			erofs_err(sb, "invalid logical cluster 0 at nid %llu",
+ 				  vi->nid);
+ 			err = -EFSCORRUPTED;
+ 			goto unmap_out;
+@@ -490,8 +455,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 			goto unmap_out;
+ 		break;
+ 	default:
+-		erofs_err(inode->i_sb,
+-			  "unknown type %u @ offset %llu of nid %llu",
++		erofs_err(sb, "unknown type %u @ offset %llu of nid %llu",
+ 			  m.type, ofs, vi->nid);
+ 		err = -EOPNOTSUPP;
+ 		goto unmap_out;
+@@ -508,12 +472,18 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 	}
+ 	if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
+ 		map->m_flags |= EROFS_MAP_META;
+-		map->m_pa = vi->z_idataoff;
++		map->m_pa = vi->z_fragmentoff;
+ 		map->m_plen = vi->z_idata_size;
++		if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
++			erofs_err(sb, "invalid tail-packing pclustersize %llu",
++				  map->m_plen);
++			err = -EFSCORRUPTED;
++			goto unmap_out;
++		}
+ 	} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
+-		map->m_flags |= EROFS_MAP_FRAGMENT;
++		map->m_flags = EROFS_MAP_FRAGMENT;
+ 	} else {
+-		map->m_pa = erofs_pos(inode->i_sb, m.pblk);
++		map->m_pa = erofs_pos(sb, m.pblk);
+ 		err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
+ 		if (err)
+ 			goto unmap_out;
+@@ -532,7 +502,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 		afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
+ 			vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
+ 		if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
+-			erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
++			erofs_err(sb, "inconsistent algorithmtype %u for nid %llu",
+ 				  afmt, vi->nid);
+ 			err = -EFSCORRUPTED;
+ 			goto unmap_out;
+@@ -601,6 +571,10 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ 	vi->z_advise = le16_to_cpu(h->h_advise);
+ 	vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
+ 	vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
++	if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
++		vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
++	else if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER)
++		vi->z_idata_size = le16_to_cpu(h->h_idata_size);
+ 
+ 	headnr = 0;
+ 	if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
+@@ -629,33 +603,12 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ 		goto out_put_metabuf;
+ 	}
+ 
+-	if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
+-		struct erofs_map_blocks map = {
+-			.buf = __EROFS_BUF_INITIALIZER
+-		};
+-
+-		vi->z_idata_size = le16_to_cpu(h->h_idata_size);
+-		err = z_erofs_do_map_blocks(inode, &map,
+-					    EROFS_GET_BLOCKS_FINDTAIL);
+-		erofs_put_metabuf(&map.buf);
+-
+-		if (!map.m_plen ||
+-		    erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
+-			erofs_err(sb, "invalid tail-packing pclustersize %llu",
+-				  map.m_plen);
+-			err = -EFSCORRUPTED;
+-		}
+-		if (err < 0)
+-			goto out_put_metabuf;
+-	}
+-
+-	if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
+-	    !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
++	if (vi->z_idata_size ||
++	    (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
+ 		struct erofs_map_blocks map = {
+ 			.buf = __EROFS_BUF_INITIALIZER
+ 		};
+ 
+-		vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
+ 		err = z_erofs_do_map_blocks(inode, &map,
+ 					    EROFS_GET_BLOCKS_FINDTAIL);
+ 		erofs_put_metabuf(&map.buf);
+@@ -691,8 +644,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ 			    !vi->z_tailextent_headlcn) {
+ 				map->m_la = 0;
+ 				map->m_llen = inode->i_size;
+-				map->m_flags = EROFS_MAP_MAPPED |
+-					EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT;
++				map->m_flags = EROFS_MAP_FRAGMENT;
+ 			} else {
+ 				err = z_erofs_do_map_blocks(inode, map, flags);
+ 			}
+@@ -725,7 +677,7 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
+ 	iomap->length = map.m_llen;
+ 	if (map.m_flags & EROFS_MAP_MAPPED) {
+ 		iomap->type = IOMAP_MAPPED;
+-		iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
++		iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
+ 			      IOMAP_NULL_ADDR : map.m_pa;
+ 	} else {
+ 		iomap->type = IOMAP_HOLE;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index e94df69ee2e0d2..a95525bfb99cf2 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -368,6 +368,8 @@ struct ext4_io_submit {
+ #define EXT4_MAX_BLOCKS(size, offset, blkbits) \
+ 	((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
+ 								  blkbits))
++#define EXT4_B_TO_LBLK(inode, offset) \
++	(round_up((offset), i_blocksize(inode)) >> (inode)->i_blkbits)
+ 
+ /* Translate a block number to a cluster number */
+ #define EXT4_B2C(sbi, blk)	((blk) >> (sbi)->s_cluster_bits)
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index b16d72275e1054..2f9c3cd4f26ccb 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4569,122 +4569,65 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 			    loff_t len, int mode)
+ {
+ 	struct inode *inode = file_inode(file);
+-	struct address_space *mapping = file->f_mapping;
+ 	handle_t *handle = NULL;
+-	unsigned int max_blocks;
+ 	loff_t new_size = 0;
+-	int ret = 0;
+-	int flags;
+-	int credits;
+-	int partial_begin, partial_end;
+-	loff_t start, end;
+-	ext4_lblk_t lblk;
++	loff_t end = offset + len;
++	ext4_lblk_t start_lblk, end_lblk;
++	unsigned int blocksize = i_blocksize(inode);
+ 	unsigned int blkbits = inode->i_blkbits;
++	int ret, flags, credits;
+ 
+ 	trace_ext4_zero_range(inode, offset, len, mode);
++	WARN_ON_ONCE(!inode_is_locked(inode));
+ 
+-	/*
+-	 * Round up offset. This is not fallocate, we need to zero out
+-	 * blocks, so convert interior block aligned part of the range to
+-	 * unwritten and possibly manually zero out unaligned parts of the
+-	 * range. Here, start and partial_begin are inclusive, end and
+-	 * partial_end are exclusive.
+-	 */
+-	start = round_up(offset, 1 << blkbits);
+-	end = round_down((offset + len), 1 << blkbits);
+-
+-	if (start < offset || end > offset + len)
+-		return -EINVAL;
+-	partial_begin = offset & ((1 << blkbits) - 1);
+-	partial_end = (offset + len) & ((1 << blkbits) - 1);
+-
+-	lblk = start >> blkbits;
+-	max_blocks = (end >> blkbits);
+-	if (max_blocks < lblk)
+-		max_blocks = 0;
+-	else
+-		max_blocks -= lblk;
+-
+-	inode_lock(inode);
+-
+-	/*
+-	 * Indirect files do not support unwritten extents
+-	 */
+-	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+-		ret = -EOPNOTSUPP;
+-		goto out_mutex;
+-	}
++	/* Indirect files do not support unwritten extents */
++	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
++		return -EOPNOTSUPP;
+ 
+ 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+-	    (offset + len > inode->i_size ||
+-	     offset + len > EXT4_I(inode)->i_disksize)) {
+-		new_size = offset + len;
++	    (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
++		new_size = end;
+ 		ret = inode_newsize_ok(inode, new_size);
+ 		if (ret)
+-			goto out_mutex;
++			return ret;
+ 	}
+ 
+ 	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+-
+-	/* Wait all existing dio workers, newcomers will block on i_rwsem */
+-	inode_dio_wait(inode);
+-
+-	ret = file_modified(file);
+-	if (ret)
+-		goto out_mutex;
+-
+ 	/* Preallocate the range including the unaligned edges */
+-	if (partial_begin || partial_end) {
+-		ret = ext4_alloc_file_blocks(file,
+-				round_down(offset, 1 << blkbits) >> blkbits,
+-				(round_up((offset + len), 1 << blkbits) -
+-				 round_down(offset, 1 << blkbits)) >> blkbits,
+-				new_size, flags);
+-		if (ret)
+-			goto out_mutex;
++	if (!IS_ALIGNED(offset | end, blocksize)) {
++		ext4_lblk_t alloc_lblk = offset >> blkbits;
++		ext4_lblk_t len_lblk = EXT4_MAX_BLOCKS(len, offset, blkbits);
+ 
++		ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
++					     new_size, flags);
++		if (ret)
++			return ret;
+ 	}
+ 
+-	/* Zero range excluding the unaligned edges */
+-	if (max_blocks > 0) {
+-		flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+-			  EXT4_EX_NOCACHE);
+-
+-		/*
+-		 * Prevent page faults from reinstantiating pages we have
+-		 * released from page cache.
+-		 */
+-		filemap_invalidate_lock(mapping);
+-
+-		ret = ext4_break_layouts(inode);
+-		if (ret) {
+-			filemap_invalidate_unlock(mapping);
+-			goto out_mutex;
+-		}
+-
+-		ret = ext4_update_disksize_before_punch(inode, offset, len);
+-		if (ret) {
+-			filemap_invalidate_unlock(mapping);
+-			goto out_mutex;
+-		}
+-
+-		/* Now release the pages and zero block aligned part of pages */
+-		ret = ext4_truncate_page_cache_block_range(inode, start, end);
+-		if (ret) {
+-			filemap_invalidate_unlock(mapping);
+-			goto out_mutex;
+-		}
++	ret = ext4_update_disksize_before_punch(inode, offset, len);
++	if (ret)
++		return ret;
+ 
+-		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
++	/* Now release the pages and zero block aligned part of pages */
++	ret = ext4_truncate_page_cache_block_range(inode, offset, end);
++	if (ret)
++		return ret;
+ 
+-		ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+-					     flags);
+-		filemap_invalidate_unlock(mapping);
++	/* Zero range excluding the unaligned edges */
++	start_lblk = EXT4_B_TO_LBLK(inode, offset);
++	end_lblk = end >> blkbits;
++	if (end_lblk > start_lblk) {
++		ext4_lblk_t zero_blks = end_lblk - start_lblk;
++
++		flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE);
++		ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
++					     new_size, flags);
+ 		if (ret)
+-			goto out_mutex;
++			return ret;
+ 	}
+-	if (!partial_begin && !partial_end)
+-		goto out_mutex;
++	/* Finish zeroing out if it doesn't contain partial block */
++	if (IS_ALIGNED(offset | end, blocksize))
++		return ret;
+ 
+ 	/*
+ 	 * In worst case we have to writeout two nonadjacent unwritten
+@@ -4697,27 +4640,69 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 	if (IS_ERR(handle)) {
+ 		ret = PTR_ERR(handle);
+ 		ext4_std_error(inode->i_sb, ret);
+-		goto out_mutex;
++		return ret;
+ 	}
+ 
+-	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
++	/* Zero out partial block at the edges of the range */
++	ret = ext4_zero_partial_blocks(handle, inode, offset, len);
++	if (ret)
++		goto out_handle;
++
+ 	if (new_size)
+ 		ext4_update_inode_size(inode, new_size);
+ 	ret = ext4_mark_inode_dirty(handle, inode);
+ 	if (unlikely(ret))
+ 		goto out_handle;
+-	/* Zero out partial block at the edges of the range */
+-	ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+-	if (ret >= 0)
+-		ext4_update_inode_fsync_trans(handle, inode, 1);
+ 
++	ext4_update_inode_fsync_trans(handle, inode, 1);
+ 	if (file->f_flags & O_SYNC)
+ 		ext4_handle_sync(handle);
+ 
+ out_handle:
+ 	ext4_journal_stop(handle);
+-out_mutex:
+-	inode_unlock(inode);
++	return ret;
++}
++
++static long ext4_do_fallocate(struct file *file, loff_t offset,
++			      loff_t len, int mode)
++{
++	struct inode *inode = file_inode(file);
++	loff_t end = offset + len;
++	loff_t new_size = 0;
++	ext4_lblk_t start_lblk, len_lblk;
++	int ret;
++
++	trace_ext4_fallocate_enter(inode, offset, len, mode);
++	WARN_ON_ONCE(!inode_is_locked(inode));
++
++	start_lblk = offset >> inode->i_blkbits;
++	len_lblk = EXT4_MAX_BLOCKS(len, offset, inode->i_blkbits);
++
++	/* We only support preallocation for extent-based files only. */
++	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
++	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
++	    (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
++		new_size = end;
++		ret = inode_newsize_ok(inode, new_size);
++		if (ret)
++			goto out;
++	}
++
++	ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
++				     EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
++	if (ret)
++		goto out;
++
++	if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
++		ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
++					EXT4_I(inode)->i_sync_tid);
++	}
++out:
++	trace_ext4_fallocate_exit(inode, offset, len_lblk, ret);
+ 	return ret;
+ }
+ 
+@@ -4731,12 +4716,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ {
+ 	struct inode *inode = file_inode(file);
+-	loff_t new_size = 0;
+-	unsigned int max_blocks;
+-	int ret = 0;
+-	int flags;
+-	ext4_lblk_t lblk;
+-	unsigned int blkbits = inode->i_blkbits;
++	struct address_space *mapping = file->f_mapping;
++	int ret;
+ 
+ 	/*
+ 	 * Encrypted inodes can't handle collapse range or insert
+@@ -4756,73 +4737,47 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ 
+ 	inode_lock(inode);
+ 	ret = ext4_convert_inline_data(inode);
+-	inode_unlock(inode);
+ 	if (ret)
+-		goto exit;
++		goto out_inode_lock;
+ 
+-	if (mode & FALLOC_FL_PUNCH_HOLE) {
+-		ret = ext4_punch_hole(file, offset, len);
+-		goto exit;
+-	}
+-
+-	if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+-		ret = ext4_collapse_range(file, offset, len);
+-		goto exit;
+-	}
++	/* Wait all existing dio workers, newcomers will block on i_rwsem */
++	inode_dio_wait(inode);
+ 
+-	if (mode & FALLOC_FL_INSERT_RANGE) {
+-		ret = ext4_insert_range(file, offset, len);
+-		goto exit;
+-	}
++	ret = file_modified(file);
++	if (ret)
++		goto out_inode_lock;
+ 
+-	if (mode & FALLOC_FL_ZERO_RANGE) {
+-		ret = ext4_zero_range(file, offset, len, mode);
+-		goto exit;
++	if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ALLOCATE_RANGE) {
++		ret = ext4_do_fallocate(file, offset, len, mode);
++		goto out_inode_lock;
+ 	}
+-	trace_ext4_fallocate_enter(inode, offset, len, mode);
+-	lblk = offset >> blkbits;
+-
+-	max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
+-	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+-
+-	inode_lock(inode);
+ 
+ 	/*
+-	 * We only support preallocation for extent-based files only
++	 * Follow-up operations will drop page cache, hold invalidate lock
++	 * to prevent page faults from reinstantiating pages we have
++	 * released from page cache.
+ 	 */
+-	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+-		ret = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+-	    (offset + len > inode->i_size ||
+-	     offset + len > EXT4_I(inode)->i_disksize)) {
+-		new_size = offset + len;
+-		ret = inode_newsize_ok(inode, new_size);
+-		if (ret)
+-			goto out;
+-	}
+-
+-	/* Wait all existing dio workers, newcomers will block on i_rwsem */
+-	inode_dio_wait(inode);
++	filemap_invalidate_lock(mapping);
+ 
+-	ret = file_modified(file);
++	ret = ext4_break_layouts(inode);
+ 	if (ret)
+-		goto out;
++		goto out_invalidate_lock;
+ 
+-	ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
+-	if (ret)
+-		goto out;
++	if (mode & FALLOC_FL_PUNCH_HOLE)
++		ret = ext4_punch_hole(file, offset, len);
++	else if (mode & FALLOC_FL_COLLAPSE_RANGE)
++		ret = ext4_collapse_range(file, offset, len);
++	else if (mode & FALLOC_FL_INSERT_RANGE)
++		ret = ext4_insert_range(file, offset, len);
++	else if (mode & FALLOC_FL_ZERO_RANGE)
++		ret = ext4_zero_range(file, offset, len, mode);
++	else
++		ret = -EOPNOTSUPP;
+ 
+-	if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
+-		ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
+-					EXT4_I(inode)->i_sync_tid);
+-	}
+-out:
++out_invalidate_lock:
++	filemap_invalidate_unlock(mapping);
++out_inode_lock:
+ 	inode_unlock(inode);
+-	trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
+-exit:
+ 	return ret;
+ }
+ 
+@@ -5319,109 +5274,72 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ 	struct inode *inode = file_inode(file);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct address_space *mapping = inode->i_mapping;
+-	ext4_lblk_t punch_start, punch_stop;
++	loff_t end = offset + len;
++	ext4_lblk_t start_lblk, end_lblk;
+ 	handle_t *handle;
+ 	unsigned int credits;
+-	loff_t new_size, ioffset;
++	loff_t start, new_size;
+ 	int ret;
+ 
+-	/*
+-	 * We need to test this early because xfstests assumes that a
+-	 * collapse range of (0, 1) will return EOPNOTSUPP if the file
+-	 * system does not support collapse range.
+-	 */
++	trace_ext4_collapse_range(inode, offset, len);
++	WARN_ON_ONCE(!inode_is_locked(inode));
++
++	/* Currently just for extent based files */
+ 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ 		return -EOPNOTSUPP;
+-
+ 	/* Collapse range works only on fs cluster size aligned regions. */
+ 	if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
+ 		return -EINVAL;
+-
+-	trace_ext4_collapse_range(inode, offset, len);
+-
+-	punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+-	punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
+-
+-	inode_lock(inode);
+ 	/*
+ 	 * There is no need to overlap collapse range with EOF, in which case
+ 	 * it is effectively a truncate operation
+ 	 */
+-	if (offset + len >= inode->i_size) {
+-		ret = -EINVAL;
+-		goto out_mutex;
+-	}
+-
+-	/* Currently just for extent based files */
+-	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+-		ret = -EOPNOTSUPP;
+-		goto out_mutex;
+-	}
+-
+-	/* Wait for existing dio to complete */
+-	inode_dio_wait(inode);
+-
+-	ret = file_modified(file);
+-	if (ret)
+-		goto out_mutex;
+-
+-	/*
+-	 * Prevent page faults from reinstantiating pages we have released from
+-	 * page cache.
+-	 */
+-	filemap_invalidate_lock(mapping);
+-
+-	ret = ext4_break_layouts(inode);
+-	if (ret)
+-		goto out_mmap;
++	if (end >= inode->i_size)
++		return -EINVAL;
+ 
+ 	/*
++	 * Write tail of the last page before removed range and data that
++	 * will be shifted since they will get removed from the page cache
++	 * below. We are also protected from pages becoming dirty by
++	 * i_rwsem and invalidate_lock.
+ 	 * Need to round down offset to be aligned with page size boundary
+ 	 * for page size > block size.
+ 	 */
+-	ioffset = round_down(offset, PAGE_SIZE);
+-	/*
+-	 * Write tail of the last page before removed range since it will get
+-	 * removed from the page cache below.
+-	 */
+-	ret = filemap_write_and_wait_range(mapping, ioffset, offset);
++	start = round_down(offset, PAGE_SIZE);
++	ret = filemap_write_and_wait_range(mapping, start, offset);
++	if (!ret)
++		ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
+ 	if (ret)
+-		goto out_mmap;
+-	/*
+-	 * Write data that will be shifted to preserve them when discarding
+-	 * page cache below. We are also protected from pages becoming dirty
+-	 * by i_rwsem and invalidate_lock.
+-	 */
+-	ret = filemap_write_and_wait_range(mapping, offset + len,
+-					   LLONG_MAX);
+-	if (ret)
+-		goto out_mmap;
+-	truncate_pagecache(inode, ioffset);
++		return ret;
++
++	truncate_pagecache(inode, start);
+ 
+ 	credits = ext4_writepage_trans_blocks(inode);
+ 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+-	if (IS_ERR(handle)) {
+-		ret = PTR_ERR(handle);
+-		goto out_mmap;
+-	}
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
+ 	ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+ 
++	start_lblk = offset >> inode->i_blkbits;
++	end_lblk = (offset + len) >> inode->i_blkbits;
++
+ 	down_write(&EXT4_I(inode)->i_data_sem);
+ 	ext4_discard_preallocations(inode);
+-	ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
++	ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
+ 
+-	ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
++	ret = ext4_ext_remove_space(inode, start_lblk, end_lblk - 1);
+ 	if (ret) {
+ 		up_write(&EXT4_I(inode)->i_data_sem);
+-		goto out_stop;
++		goto out_handle;
+ 	}
+ 	ext4_discard_preallocations(inode);
+ 
+-	ret = ext4_ext_shift_extents(inode, handle, punch_stop,
+-				     punch_stop - punch_start, SHIFT_LEFT);
++	ret = ext4_ext_shift_extents(inode, handle, end_lblk,
++				     end_lblk - start_lblk, SHIFT_LEFT);
+ 	if (ret) {
+ 		up_write(&EXT4_I(inode)->i_data_sem);
+-		goto out_stop;
++		goto out_handle;
+ 	}
+ 
+ 	new_size = inode->i_size - len;
+@@ -5429,18 +5347,16 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ 	EXT4_I(inode)->i_disksize = new_size;
+ 
+ 	up_write(&EXT4_I(inode)->i_data_sem);
+-	if (IS_SYNC(inode))
+-		ext4_handle_sync(handle);
+-	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ 	ret = ext4_mark_inode_dirty(handle, inode);
++	if (ret)
++		goto out_handle;
++
+ 	ext4_update_inode_fsync_trans(handle, inode, 1);
++	if (IS_SYNC(inode))
++		ext4_handle_sync(handle);
+ 
+-out_stop:
++out_handle:
+ 	ext4_journal_stop(handle);
+-out_mmap:
+-	filemap_invalidate_unlock(mapping);
+-out_mutex:
+-	inode_unlock(inode);
+ 	return ret;
+ }
+ 
+@@ -5460,100 +5376,63 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ 	handle_t *handle;
+ 	struct ext4_ext_path *path;
+ 	struct ext4_extent *extent;
+-	ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
++	ext4_lblk_t start_lblk, len_lblk, ee_start_lblk = 0;
+ 	unsigned int credits, ee_len;
+-	int ret = 0, depth, split_flag = 0;
+-	loff_t ioffset;
++	int ret, depth, split_flag = 0;
++	loff_t start;
+ 
+-	/*
+-	 * We need to test this early because xfstests assumes that an
+-	 * insert range of (0, 1) will return EOPNOTSUPP if the file
+-	 * system does not support insert range.
+-	 */
++	trace_ext4_insert_range(inode, offset, len);
++	WARN_ON_ONCE(!inode_is_locked(inode));
++
++	/* Currently just for extent based files */
+ 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ 		return -EOPNOTSUPP;
+-
+ 	/* Insert range works only on fs cluster size aligned regions. */
+ 	if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
+ 		return -EINVAL;
+-
+-	trace_ext4_insert_range(inode, offset, len);
+-
+-	offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+-	len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
+-
+-	inode_lock(inode);
+-	/* Currently just for extent based files */
+-	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+-		ret = -EOPNOTSUPP;
+-		goto out_mutex;
+-	}
+-
+-	/* Check whether the maximum file size would be exceeded */
+-	if (len > inode->i_sb->s_maxbytes - inode->i_size) {
+-		ret = -EFBIG;
+-		goto out_mutex;
+-	}
+-
+ 	/* Offset must be less than i_size */
+-	if (offset >= inode->i_size) {
+-		ret = -EINVAL;
+-		goto out_mutex;
+-	}
+-
+-	/* Wait for existing dio to complete */
+-	inode_dio_wait(inode);
+-
+-	ret = file_modified(file);
+-	if (ret)
+-		goto out_mutex;
++	if (offset >= inode->i_size)
++		return -EINVAL;
++	/* Check whether the maximum file size would be exceeded */
++	if (len > inode->i_sb->s_maxbytes - inode->i_size)
++		return -EFBIG;
+ 
+ 	/*
+-	 * Prevent page faults from reinstantiating pages we have released from
+-	 * page cache.
++	 * Write out all dirty pages. Need to round down to align start offset
++	 * to page size boundary for page size > block size.
+ 	 */
+-	filemap_invalidate_lock(mapping);
+-
+-	ret = ext4_break_layouts(inode);
++	start = round_down(offset, PAGE_SIZE);
++	ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
+ 	if (ret)
+-		goto out_mmap;
++		return ret;
+ 
+-	/*
+-	 * Need to round down to align start offset to page size boundary
+-	 * for page size > block size.
+-	 */
+-	ioffset = round_down(offset, PAGE_SIZE);
+-	/* Write out all dirty pages */
+-	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+-			LLONG_MAX);
+-	if (ret)
+-		goto out_mmap;
+-	truncate_pagecache(inode, ioffset);
++	truncate_pagecache(inode, start);
+ 
+ 	credits = ext4_writepage_trans_blocks(inode);
+ 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+-	if (IS_ERR(handle)) {
+-		ret = PTR_ERR(handle);
+-		goto out_mmap;
+-	}
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
+ 	ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+ 
+ 	/* Expand file to avoid data loss if there is error while shifting */
+ 	inode->i_size += len;
+ 	EXT4_I(inode)->i_disksize += len;
+-	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ 	ret = ext4_mark_inode_dirty(handle, inode);
+ 	if (ret)
+-		goto out_stop;
++		goto out_handle;
++
++	start_lblk = offset >> inode->i_blkbits;
++	len_lblk = len >> inode->i_blkbits;
+ 
+ 	down_write(&EXT4_I(inode)->i_data_sem);
+ 	ext4_discard_preallocations(inode);
+ 
+-	path = ext4_find_extent(inode, offset_lblk, NULL, 0);
++	path = ext4_find_extent(inode, start_lblk, NULL, 0);
+ 	if (IS_ERR(path)) {
+ 		up_write(&EXT4_I(inode)->i_data_sem);
+ 		ret = PTR_ERR(path);
+-		goto out_stop;
++		goto out_handle;
+ 	}
+ 
+ 	depth = ext_depth(inode);
+@@ -5563,16 +5442,16 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ 		ee_len = ext4_ext_get_actual_len(extent);
+ 
+ 		/*
+-		 * If offset_lblk is not the starting block of extent, split
+-		 * the extent @offset_lblk
++		 * If start_lblk is not the starting block of extent, split
++		 * the extent @start_lblk
+ 		 */
+-		if ((offset_lblk > ee_start_lblk) &&
+-				(offset_lblk < (ee_start_lblk + ee_len))) {
++		if ((start_lblk > ee_start_lblk) &&
++				(start_lblk < (ee_start_lblk + ee_len))) {
+ 			if (ext4_ext_is_unwritten(extent))
+ 				split_flag = EXT4_EXT_MARK_UNWRIT1 |
+ 					EXT4_EXT_MARK_UNWRIT2;
+ 			path = ext4_split_extent_at(handle, inode, path,
+-					offset_lblk, split_flag,
++					start_lblk, split_flag,
+ 					EXT4_EX_NOCACHE |
+ 					EXT4_GET_BLOCKS_PRE_IO |
+ 					EXT4_GET_BLOCKS_METADATA_NOFAIL);
+@@ -5581,32 +5460,29 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ 		if (IS_ERR(path)) {
+ 			up_write(&EXT4_I(inode)->i_data_sem);
+ 			ret = PTR_ERR(path);
+-			goto out_stop;
++			goto out_handle;
+ 		}
+ 	}
+ 
+ 	ext4_free_ext_path(path);
+-	ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
++	ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
+ 
+ 	/*
+-	 * if offset_lblk lies in a hole which is at start of file, use
++	 * if start_lblk lies in a hole which is at start of file, use
+ 	 * ee_start_lblk to shift extents
+ 	 */
+ 	ret = ext4_ext_shift_extents(inode, handle,
+-		max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT);
+-
++		max(ee_start_lblk, start_lblk), len_lblk, SHIFT_RIGHT);
+ 	up_write(&EXT4_I(inode)->i_data_sem);
++	if (ret)
++		goto out_handle;
++
++	ext4_update_inode_fsync_trans(handle, inode, 1);
+ 	if (IS_SYNC(inode))
+ 		ext4_handle_sync(handle);
+-	if (ret >= 0)
+-		ext4_update_inode_fsync_trans(handle, inode, 1);
+ 
+-out_stop:
++out_handle:
+ 	ext4_journal_stop(handle);
+-out_mmap:
+-	filemap_invalidate_unlock(mapping);
+-out_mutex:
+-	inode_unlock(inode);
+ 	return ret;
+ }
+ 
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index f769f5cb6deb78..eb092133c6b882 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3991,83 +3991,56 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ {
+ 	struct inode *inode = file_inode(file);
+ 	struct super_block *sb = inode->i_sb;
+-	ext4_lblk_t first_block, stop_block;
+-	struct address_space *mapping = inode->i_mapping;
+-	loff_t first_block_offset, last_block_offset, max_length;
+-	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++	ext4_lblk_t start_lblk, end_lblk;
++	loff_t max_end = sb->s_maxbytes;
++	loff_t end = offset + length;
+ 	handle_t *handle;
+ 	unsigned int credits;
+-	int ret = 0, ret2 = 0;
++	int ret;
+ 
+ 	trace_ext4_punch_hole(inode, offset, length, 0);
++	WARN_ON_ONCE(!inode_is_locked(inode));
+ 
+-	inode_lock(inode);
++	/*
++	 * For indirect-block based inodes, make sure that the hole within
++	 * one block before last range.
++	 */
++	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++		max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
+ 
+ 	/* No need to punch hole beyond i_size */
+-	if (offset >= inode->i_size)
+-		goto out_mutex;
++	if (offset >= inode->i_size || offset >= max_end)
++		return 0;
+ 
+ 	/*
+-	 * If the hole extends beyond i_size, set the hole
+-	 * to end after the page that contains i_size
++	 * If the hole extends beyond i_size, set the hole to end after
++	 * the page that contains i_size.
+ 	 */
+-	if (offset + length > inode->i_size) {
+-		length = inode->i_size +
+-		   PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
+-		   offset;
+-	}
++	if (end > inode->i_size)
++		end = round_up(inode->i_size, PAGE_SIZE);
++	if (end > max_end)
++		end = max_end;
++	length = end - offset;
+ 
+ 	/*
+-	 * For punch hole the length + offset needs to be within one block
+-	 * before last range. Adjust the length if it goes beyond that limit.
++	 * Attach jinode to inode for jbd2 if we do any zeroing of partial
++	 * block.
+ 	 */
+-	max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
+-	if (offset + length > max_length)
+-		length = max_length - offset;
+-
+-	if (offset & (sb->s_blocksize - 1) ||
+-	    (offset + length) & (sb->s_blocksize - 1)) {
+-		/*
+-		 * Attach jinode to inode for jbd2 if we do any zeroing of
+-		 * partial block
+-		 */
++	if (!IS_ALIGNED(offset | end, sb->s_blocksize)) {
+ 		ret = ext4_inode_attach_jinode(inode);
+ 		if (ret < 0)
+-			goto out_mutex;
+-
++			return ret;
+ 	}
+ 
+-	/* Wait all existing dio workers, newcomers will block on i_rwsem */
+-	inode_dio_wait(inode);
+-
+-	ret = file_modified(file);
+-	if (ret)
+-		goto out_mutex;
+-
+-	/*
+-	 * Prevent page faults from reinstantiating pages we have released from
+-	 * page cache.
+-	 */
+-	filemap_invalidate_lock(mapping);
+ 
+-	ret = ext4_break_layouts(inode);
++	ret = ext4_update_disksize_before_punch(inode, offset, length);
+ 	if (ret)
+-		goto out_dio;
+-
+-	first_block_offset = round_up(offset, sb->s_blocksize);
+-	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
++		return ret;
+ 
+ 	/* Now release the pages and zero block aligned part of pages*/
+-	if (last_block_offset > first_block_offset) {
+-		ret = ext4_update_disksize_before_punch(inode, offset, length);
+-		if (ret)
+-			goto out_dio;
+-
+-		ret = ext4_truncate_page_cache_block_range(inode,
+-				first_block_offset, last_block_offset + 1);
+-		if (ret)
+-			goto out_dio;
+-	}
++	ret = ext4_truncate_page_cache_block_range(inode, offset, end);
++	if (ret)
++		return ret;
+ 
+ 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ 		credits = ext4_writepage_trans_blocks(inode);
+@@ -4077,54 +4050,51 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ 	if (IS_ERR(handle)) {
+ 		ret = PTR_ERR(handle);
+ 		ext4_std_error(sb, ret);
+-		goto out_dio;
++		return ret;
+ 	}
+ 
+-	ret = ext4_zero_partial_blocks(handle, inode, offset,
+-				       length);
++	ret = ext4_zero_partial_blocks(handle, inode, offset, length);
+ 	if (ret)
+-		goto out_stop;
+-
+-	first_block = (offset + sb->s_blocksize - 1) >>
+-		EXT4_BLOCK_SIZE_BITS(sb);
+-	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
++		goto out_handle;
+ 
+ 	/* If there are blocks to remove, do it */
+-	if (stop_block > first_block) {
+-		ext4_lblk_t hole_len = stop_block - first_block;
++	start_lblk = EXT4_B_TO_LBLK(inode, offset);
++	end_lblk = end >> inode->i_blkbits;
++
++	if (end_lblk > start_lblk) {
++		ext4_lblk_t hole_len = end_lblk - start_lblk;
+ 
+ 		down_write(&EXT4_I(inode)->i_data_sem);
+ 		ext4_discard_preallocations(inode);
+ 
+-		ext4_es_remove_extent(inode, first_block, hole_len);
++		ext4_es_remove_extent(inode, start_lblk, hole_len);
+ 
+ 		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+-			ret = ext4_ext_remove_space(inode, first_block,
+-						    stop_block - 1);
++			ret = ext4_ext_remove_space(inode, start_lblk,
++						    end_lblk - 1);
+ 		else
+-			ret = ext4_ind_remove_space(handle, inode, first_block,
+-						    stop_block);
++			ret = ext4_ind_remove_space(handle, inode, start_lblk,
++						    end_lblk);
++		if (ret) {
++			up_write(&EXT4_I(inode)->i_data_sem);
++			goto out_handle;
++		}
+ 
+-		ext4_es_insert_extent(inode, first_block, hole_len, ~0,
++		ext4_es_insert_extent(inode, start_lblk, hole_len, ~0,
+ 				      EXTENT_STATUS_HOLE, 0);
+ 		up_write(&EXT4_I(inode)->i_data_sem);
+ 	}
+-	ext4_fc_track_range(handle, inode, first_block, stop_block);
++	ext4_fc_track_range(handle, inode, start_lblk, end_lblk);
++
++	ret = ext4_mark_inode_dirty(handle, inode);
++	if (unlikely(ret))
++		goto out_handle;
++
++	ext4_update_inode_fsync_trans(handle, inode, 1);
+ 	if (IS_SYNC(inode))
+ 		ext4_handle_sync(handle);
+-
+-	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+-	ret2 = ext4_mark_inode_dirty(handle, inode);
+-	if (unlikely(ret2))
+-		ret = ret2;
+-	if (ret >= 0)
+-		ext4_update_inode_fsync_trans(handle, inode, 1);
+-out_stop:
++out_handle:
+ 	ext4_journal_stop(handle);
+-out_dio:
+-	filemap_invalidate_unlock(mapping);
+-out_mutex:
+-	inode_unlock(inode);
+ 	return ret;
+ }
+ 
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 8ddc14c56501ac..ecb8e05b8b8481 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -3029,14 +3029,23 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
+  *
+  * RETURN VALUES:
+  *	0	- success
+- *	-ENOMEM	- insufficient memory
++ *	-EINVAL	- unexpected inode type
+  */
+ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
+ {
+ 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+ 	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
++	int fileset = le32_to_cpu(dip->di_fileset);
++
++	switch (fileset) {
++	case AGGR_RESERVED_I: case AGGREGATE_I: case BMAP_I:
++	case LOG_I: case BADBLOCK_I: case FILESYSTEM_I:
++		break;
++	default:
++		return -EINVAL;
++	}
+ 
+-	jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
++	jfs_ip->fileset = fileset;
+ 	jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
+ 	jfs_set_inode_flags(ip);
+ 
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index fd2de4b2bef1a8..afb3b963774093 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -499,11 +499,18 @@ static int __nilfs_read_inode(struct super_block *sb,
+ 		inode->i_op = &nilfs_symlink_inode_operations;
+ 		inode_nohighmem(inode);
+ 		inode->i_mapping->a_ops = &nilfs_aops;
+-	} else {
++	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
++		   S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ 		inode->i_op = &nilfs_special_inode_operations;
+ 		init_special_inode(
+ 			inode, inode->i_mode,
+ 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
++	} else {
++		nilfs_error(sb,
++			    "invalid file type bits in mode 0%o for inode %lu",
++			    inode->i_mode, ino);
++		err = -EIO;
++		goto failed_unmap;
+ 	}
+ 	nilfs_ifile_unmap_inode(raw_inode);
+ 	brelse(bh);
+diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
+index 9689a7c5dd36b2..513837632b7d37 100644
+--- a/include/drm/drm_buddy.h
++++ b/include/drm/drm_buddy.h
+@@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
+ 			 u64 new_size,
+ 			 struct list_head *blocks);
+ 
++void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
++
+ void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
+ 
+ void drm_buddy_free_list(struct drm_buddy *mm,
+diff --git a/include/linux/ism.h b/include/linux/ism.h
+index 5428edd9098231..8358b4cd7ba6ae 100644
+--- a/include/linux/ism.h
++++ b/include/linux/ism.h
+@@ -28,6 +28,7 @@ struct ism_dmb {
+ 
+ struct ism_dev {
+ 	spinlock_t lock; /* protects the ism device */
++	spinlock_t cmd_lock; /* serializes cmds */
+ 	struct list_head list;
+ 	struct pci_dev *pdev;
+ 
+diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h
+index 33dcbec719254a..9e13b8040b12ca 100644
+--- a/include/linux/sprintf.h
++++ b/include/linux/sprintf.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/compiler_attributes.h>
+ #include <linux/types.h>
++#include <linux/stdarg.h>
+ 
+ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width);
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index f01477cecf3934..531412c5103dcc 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -15480,6 +15480,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 
+ 		if (src_reg->type == PTR_TO_STACK)
+ 			insn_flags |= INSN_F_SRC_REG_STACK;
++		if (dst_reg->type == PTR_TO_STACK)
++			insn_flags |= INSN_F_DST_REG_STACK;
+ 	} else {
+ 		if (insn->src_reg != BPF_REG_0) {
+ 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
+@@ -15489,10 +15491,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		memset(src_reg, 0, sizeof(*src_reg));
+ 		src_reg->type = SCALAR_VALUE;
+ 		__mark_reg_known(src_reg, insn->imm);
++
++		if (dst_reg->type == PTR_TO_STACK)
++			insn_flags |= INSN_F_DST_REG_STACK;
+ 	}
+ 
+-	if (dst_reg->type == PTR_TO_STACK)
+-		insn_flags |= INSN_F_DST_REG_STACK;
+ 	if (insn_flags) {
+ 		err = push_insn_history(env, this_branch, insn_flags, 0);
+ 		if (err)
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 4101016e8b205c..1d48ae86463524 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -1268,8 +1268,9 @@ static int __request_region_locked(struct resource *res, struct resource *parent
+ 		 * become unavailable to other users.  Conflicts are
+ 		 * not expected.  Warn to aid debugging if encountered.
+ 		 */
+-		if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
+-			pr_warn("Unaddressable device %s %pR conflicts with %pR",
++		if (parent == &iomem_resource &&
++		    conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
++			pr_warn("Unaddressable device %s %pR conflicts with %pR\n",
+ 				conflict->name, conflict, res);
+ 		}
+ 		if (conflict != parent) {
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 96933082431fe0..e3896b2be45321 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1218,7 +1218,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
+ 				  struct system_time_snapshot *history_begin,
+ 				  struct system_device_crosststamp *xtstamp)
+ {
+-	struct system_counterval_t system_counterval;
++	struct system_counterval_t system_counterval = {};
+ 	struct timekeeper *tk = &tk_core.timekeeper;
+ 	u64 cycles, now, interval_start;
+ 	unsigned int clock_was_set_seq = 0;
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index 5675d6a412ef17..f1726541015604 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -398,7 +398,9 @@ static void print_address_description(void *addr, u8 tag,
+ 	}
+ 
+ 	if (is_vmalloc_addr(addr)) {
+-		pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr);
++		pr_err("The buggy address belongs to a");
++		if (!vmalloc_dump_obj(addr))
++			pr_cont(" vmalloc virtual mapping\n");
+ 		page = vmalloc_to_page(addr);
+ 	}
+ 
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index b538c3d48386a5..abd5764e48642d 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -2404,7 +2404,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
+ 			VM_BUG_ON(khugepaged_scan.address < hstart ||
+ 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
+ 				  hend);
+-			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
++			if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
+ 				struct file *file = get_file(vma->vm_file);
+ 				pgoff_t pgoff = linear_page_index(vma,
+ 						khugepaged_scan.address);
+@@ -2750,7 +2750,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
+ 		mmap_assert_locked(mm);
+ 		memset(cc->node_load, 0, sizeof(cc->node_load));
+ 		nodes_clear(cc->alloc_nmask);
+-		if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
++		if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
+ 			struct file *file = get_file(vma->vm_file);
+ 			pgoff_t pgoff = linear_page_index(vma, addr);
+ 
+diff --git a/mm/ksm.c b/mm/ksm.c
+index a2e2a521df0aec..17e6c16ab81d50 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -3643,10 +3643,10 @@ static ssize_t advisor_mode_show(struct kobject *kobj,
+ {
+ 	const char *output;
+ 
+-	if (ksm_advisor == KSM_ADVISOR_NONE)
+-		output = "[none] scan-time";
+-	else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
++	if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ 		output = "none [scan-time]";
++	else
++		output = "[none] scan-time";
+ 
+ 	return sysfs_emit(buf, "%s\n", output);
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index ec1c71abe88dfd..70b2ccf0d51eed 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1559,6 +1559,10 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
+ 	return ret;
+ }
+ 
++/*
++ * The caller must guarantee the folio isn't large folio, except hugetlb.
++ * try_to_unmap() can't handle it.
++ */
+ int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
+ {
+ 	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 0eb5d510d4f6b6..e3c1e2e1560d75 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1080,6 +1080,14 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
+ 			goto keep;
+ 
+ 		if (folio_contain_hwpoisoned_page(folio)) {
++			/*
++			 * unmap_poisoned_folio() can't handle large
++			 * folio, just skip it. memory_failure() will
++			 * handle it if the UCE is triggered again.
++			 */
++			if (folio_test_large(folio))
++				goto keep_locked;
++
+ 			unmap_poisoned_folio(folio, folio_pfn(folio), false);
+ 			folio_unlock(folio);
+ 			folio_put(folio);
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 16a07def09c96a..e4326af00e5eb8 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -976,6 +976,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
+ 	if (!zspage)
+ 		return NULL;
+ 
++	if (!IS_ENABLED(CONFIG_COMPACTION))
++		gfp &= ~__GFP_MOVABLE;
++
+ 	zspage->magic = ZSPAGE_MAGIC;
+ 	migrate_lock_init(zspage);
+ 
+diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
+index 9fa0b246902bef..70c5a508ffac17 100644
+--- a/net/appletalk/aarp.c
++++ b/net/appletalk/aarp.c
+@@ -35,6 +35,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/export.h>
+ #include <linux/etherdevice.h>
++#include <linux/refcount.h>
+ 
+ int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME;
+ int sysctl_aarp_tick_time = AARP_TICK_TIME;
+@@ -44,6 +45,7 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME;
+ /* Lists of aarp entries */
+ /**
+  *	struct aarp_entry - AARP entry
++ *	@refcnt: Reference count
+  *	@last_sent: Last time we xmitted the aarp request
+  *	@packet_queue: Queue of frames wait for resolution
+  *	@status: Used for proxy AARP
+@@ -55,6 +57,7 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME;
+  *	@next: Next entry in chain
+  */
+ struct aarp_entry {
++	refcount_t			refcnt;
+ 	/* These first two are only used for unresolved entries */
+ 	unsigned long		last_sent;
+ 	struct sk_buff_head	packet_queue;
+@@ -79,6 +82,17 @@ static DEFINE_RWLOCK(aarp_lock);
+ /* Used to walk the list and purge/kick entries.  */
+ static struct timer_list aarp_timer;
+ 
++static inline void aarp_entry_get(struct aarp_entry *a)
++{
++	refcount_inc(&a->refcnt);
++}
++
++static inline void aarp_entry_put(struct aarp_entry *a)
++{
++	if (refcount_dec_and_test(&a->refcnt))
++		kfree(a);
++}
++
+ /*
+  *	Delete an aarp queue
+  *
+@@ -87,7 +101,7 @@ static struct timer_list aarp_timer;
+ static void __aarp_expire(struct aarp_entry *a)
+ {
+ 	skb_queue_purge(&a->packet_queue);
+-	kfree(a);
++	aarp_entry_put(a);
+ }
+ 
+ /*
+@@ -380,9 +394,11 @@ static void aarp_purge(void)
+ static struct aarp_entry *aarp_alloc(void)
+ {
+ 	struct aarp_entry *a = kmalloc(sizeof(*a), GFP_ATOMIC);
++	if (!a)
++		return NULL;
+ 
+-	if (a)
+-		skb_queue_head_init(&a->packet_queue);
++	refcount_set(&a->refcnt, 1);
++	skb_queue_head_init(&a->packet_queue);
+ 	return a;
+ }
+ 
+@@ -508,6 +524,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa)
+ 	entry->dev = atif->dev;
+ 
+ 	write_lock_bh(&aarp_lock);
++	aarp_entry_get(entry);
+ 
+ 	hash = sa->s_node % (AARP_HASH_SIZE - 1);
+ 	entry->next = proxies[hash];
+@@ -533,6 +550,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa)
+ 		retval = 1;
+ 	}
+ 
++	aarp_entry_put(entry);
+ 	write_unlock_bh(&aarp_lock);
+ out:
+ 	return retval;
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index 17d3fc2fab4ccb..12a1a0f421956c 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -202,6 +202,9 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ 	if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
+ 		goto out;
+ 
++	/* set the transport header to ESP */
++	skb_set_transport_header(skb, offset);
++
+ 	NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
+ 
+ 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 841c81abaaf4ff..9005fc156a20e6 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -202,6 +202,9 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ 	if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
+ 		goto out;
+ 
++	/* set the transport header to ESP */
++	skb_set_transport_header(skb, offset);
++
+ 	NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
+ 
+ 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 8e60fb5a7083bc..5a345ef35c9f65 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -539,9 +539,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 
+ static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
+ {
+-	struct qfq_sched *q = qdisc_priv(sch);
+-
+-	qfq_rm_from_agg(q, cl);
+ 	gen_kill_estimator(&cl->rate_est);
+ 	qdisc_put(cl->qdisc);
+ 	kfree(cl);
+@@ -562,10 +559,11 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
+ 
+ 	qdisc_purge_queue(cl->qdisc);
+ 	qdisc_class_hash_remove(&q->clhash, &cl->common);
+-	qfq_destroy_class(sch, cl);
++	qfq_rm_from_agg(q, cl);
+ 
+ 	sch_tree_unlock(sch);
+ 
++	qfq_destroy_class(sch, cl);
+ 	return 0;
+ }
+ 
+@@ -1506,6 +1504,7 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
+ 	for (i = 0; i < q->clhash.hashsize; i++) {
+ 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
+ 					  common.hnode) {
++			qfq_rm_from_agg(q, cl);
+ 			qfq_destroy_class(sch, cl);
+ 		}
+ 	}
+diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
+index 98f1e2b67c76b3..b95d882f9dbcbf 100644
+--- a/net/xfrm/xfrm_interface_core.c
++++ b/net/xfrm/xfrm_interface_core.c
+@@ -874,7 +874,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
+ 		return -EINVAL;
+ 	}
+ 
+-	if (p.collect_md) {
++	if (p.collect_md || xi->p.collect_md) {
+ 		NL_SET_ERR_MSG(extack, "collect_md can't be changed");
+ 		return -EINVAL;
+ 	}
+@@ -885,11 +885,6 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
+ 	} else {
+ 		if (xi->dev != dev)
+ 			return -EEXIST;
+-		if (xi->p.collect_md) {
+-			NL_SET_ERR_MSG(extack,
+-				       "device can't be changed to collect_md");
+-			return -EINVAL;
+-		}
+ 	}
+ 
+ 	return xfrmi_update(xi, &p);
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 7a298058fc16cf..ad0fe884947142 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1242,14 +1242,8 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
+ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
+ 			       const struct flowi *fl, unsigned short family,
+ 			       struct xfrm_state **best, int *acq_in_progress,
+-			       int *error)
++			       int *error, unsigned int pcpu_id)
+ {
+-	/* We need the cpu id just as a lookup key,
+-	 * we don't require it to be stable.
+-	 */
+-	unsigned int pcpu_id = get_cpu();
+-	put_cpu();
+-
+ 	/* Resolution logic:
+ 	 * 1. There is a valid state with matching selector. Done.
+ 	 * 2. Valid state with inappropriate selector. Skip.
+@@ -1316,14 +1310,15 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 	/* We need the cpu id just as a lookup key,
+ 	 * we don't require it to be stable.
+ 	 */
+-	pcpu_id = get_cpu();
+-	put_cpu();
++	pcpu_id = raw_smp_processor_id();
+ 
+ 	to_put = NULL;
+ 
+ 	sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
+ 
+ 	rcu_read_lock();
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
+ 	hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
+ 		if (x->props.family == encap_family &&
+ 		    x->props.reqid == tmpl->reqid &&
+@@ -1335,7 +1330,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 		    tmpl->id.proto == x->id.proto &&
+ 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
+ 			xfrm_state_look_at(pol, x, fl, encap_family,
+-					   &best, &acquire_in_progress, &error);
++					   &best, &acquire_in_progress, &error, pcpu_id);
+ 	}
+ 
+ 	if (best)
+@@ -1352,7 +1347,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 		    tmpl->id.proto == x->id.proto &&
+ 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
+ 			xfrm_state_look_at(pol, x, fl, family,
+-					   &best, &acquire_in_progress, &error);
++					   &best, &acquire_in_progress, &error, pcpu_id);
+ 	}
+ 
+ cached:
+@@ -1364,8 +1359,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 	else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
+ 		WARN_ON(1);
+ 
+-	xfrm_hash_ptrs_get(net, &state_ptrs);
+-
+ 	h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
+ 	hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
+ #ifdef CONFIG_XFRM_OFFLOAD
+@@ -1395,7 +1388,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 		    tmpl->id.proto == x->id.proto &&
+ 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
+ 			xfrm_state_look_at(pol, x, fl, family,
+-					   &best, &acquire_in_progress, &error);
++					   &best, &acquire_in_progress, &error, pcpu_id);
+ 	}
+ 	if (best || acquire_in_progress)
+ 		goto found;
+@@ -1430,7 +1423,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 		    tmpl->id.proto == x->id.proto &&
+ 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
+ 			xfrm_state_look_at(pol, x, fl, family,
+-					   &best, &acquire_in_progress, &error);
++					   &best, &acquire_in_progress, &error, pcpu_id);
+ 	}
+ 
+ found:
+diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
+index d967e70a705859..12a144a269ee89 100644
+--- a/sound/pci/hda/hda_tegra.c
++++ b/sound/pci/hda/hda_tegra.c
+@@ -72,6 +72,10 @@
+ struct hda_tegra_soc {
+ 	bool has_hda2codec_2x_reset;
+ 	bool has_hda2hdmi;
++	bool has_hda2codec_2x;
++	bool input_stream;
++	bool always_on;
++	bool requires_init;
+ };
+ 
+ struct hda_tegra {
+@@ -187,7 +191,9 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev)
+ 	if (rc != 0)
+ 		return rc;
+ 	if (chip->running) {
+-		hda_tegra_init(hda);
++		if (hda->soc->requires_init)
++			hda_tegra_init(hda);
++
+ 		azx_init_chip(chip, 1);
+ 		/* disable controller wake up event*/
+ 		azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
+@@ -252,7 +258,8 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
+ 	bus->remap_addr = hda->regs + HDA_BAR0;
+ 	bus->addr = res->start + HDA_BAR0;
+ 
+-	hda_tegra_init(hda);
++	if (hda->soc->requires_init)
++		hda_tegra_init(hda);
+ 
+ 	return 0;
+ }
+@@ -325,7 +332,7 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
+ 	 * starts with offset 0 which is wrong as HW register for output stream
+ 	 * offset starts with 4.
+ 	 */
+-	if (of_device_is_compatible(np, "nvidia,tegra234-hda"))
++	if (!hda->soc->input_stream)
+ 		chip->capture_streams = 4;
+ 
+ 	chip->playback_streams = (gcap >> 12) & 0x0f;
+@@ -421,7 +428,6 @@ static int hda_tegra_create(struct snd_card *card,
+ 	chip->driver_caps = driver_caps;
+ 	chip->driver_type = driver_caps & 0xff;
+ 	chip->dev_index = 0;
+-	chip->jackpoll_interval = msecs_to_jiffies(5000);
+ 	INIT_LIST_HEAD(&chip->pcm_list);
+ 
+ 	chip->codec_probe_mask = -1;
+@@ -438,7 +444,16 @@ static int hda_tegra_create(struct snd_card *card,
+ 	chip->bus.core.sync_write = 0;
+ 	chip->bus.core.needs_damn_long_delay = 1;
+ 	chip->bus.core.aligned_mmio = 1;
+-	chip->bus.jackpoll_in_suspend = 1;
++
++	/*
++	 * HDA power domain and clocks are always on for Tegra264 and
++	 * the jack detection logic would work always, so no need of
++	 * jack polling mechanism running.
++	 */
++	if (!hda->soc->always_on) {
++		chip->jackpoll_interval = msecs_to_jiffies(5000);
++		chip->bus.jackpoll_in_suspend = 1;
++	}
+ 
+ 	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ 	if (err < 0) {
+@@ -452,22 +467,44 @@ static int hda_tegra_create(struct snd_card *card,
+ static const struct hda_tegra_soc tegra30_data = {
+ 	.has_hda2codec_2x_reset = true,
+ 	.has_hda2hdmi = true,
++	.has_hda2codec_2x = true,
++	.input_stream = true,
++	.always_on = false,
++	.requires_init = true,
+ };
+ 
+ static const struct hda_tegra_soc tegra194_data = {
+ 	.has_hda2codec_2x_reset = false,
+ 	.has_hda2hdmi = true,
++	.has_hda2codec_2x = true,
++	.input_stream = true,
++	.always_on = false,
++	.requires_init = true,
+ };
+ 
+ static const struct hda_tegra_soc tegra234_data = {
+ 	.has_hda2codec_2x_reset = true,
+ 	.has_hda2hdmi = false,
++	.has_hda2codec_2x = true,
++	.input_stream = false,
++	.always_on = false,
++	.requires_init = true,
++};
++
++static const struct hda_tegra_soc tegra264_data = {
++	.has_hda2codec_2x_reset = true,
++	.has_hda2hdmi = false,
++	.has_hda2codec_2x = false,
++	.input_stream = false,
++	.always_on = true,
++	.requires_init = false,
+ };
+ 
+ static const struct of_device_id hda_tegra_match[] = {
+ 	{ .compatible = "nvidia,tegra30-hda", .data = &tegra30_data },
+ 	{ .compatible = "nvidia,tegra194-hda", .data = &tegra194_data },
+ 	{ .compatible = "nvidia,tegra234-hda", .data = &tegra234_data },
++	{ .compatible = "nvidia,tegra264-hda", .data = &tegra264_data },
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, hda_tegra_match);
+@@ -522,7 +559,9 @@ static int hda_tegra_probe(struct platform_device *pdev)
+ 	hda->clocks[hda->nclocks++].id = "hda";
+ 	if (hda->soc->has_hda2hdmi)
+ 		hda->clocks[hda->nclocks++].id = "hda2hdmi";
+-	hda->clocks[hda->nclocks++].id = "hda2codec_2x";
++
++	if (hda->soc->has_hda2codec_2x)
++		hda->clocks[hda->nclocks++].id = "hda2codec_2x";
+ 
+ 	err = devm_clk_bulk_get(&pdev->dev, hda->nclocks, hda->clocks);
+ 	if (err < 0)
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 643e0496b09362..b05ef4bec6609f 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -4551,6 +4551,9 @@ HDA_CODEC_ENTRY(0x10de002e, "Tegra186 HDMI/DP1", patch_tegra_hdmi),
+ HDA_CODEC_ENTRY(0x10de002f, "Tegra194 HDMI/DP2", patch_tegra_hdmi),
+ HDA_CODEC_ENTRY(0x10de0030, "Tegra194 HDMI/DP3", patch_tegra_hdmi),
+ HDA_CODEC_ENTRY(0x10de0031, "Tegra234 HDMI/DP", patch_tegra234_hdmi),
++HDA_CODEC_ENTRY(0x10de0033, "SoC 33 HDMI/DP",	patch_tegra234_hdmi),
++HDA_CODEC_ENTRY(0x10de0034, "Tegra264 HDMI/DP",	patch_tegra234_hdmi),
++HDA_CODEC_ENTRY(0x10de0035, "SoC 35 HDMI/DP",	patch_tegra234_hdmi),
+ HDA_CODEC_ENTRY(0x10de0040, "GPU 40 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP",	patch_nvhdmi),
+@@ -4589,15 +4592,32 @@ HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de009b, "GPU 9b HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de009c, "GPU 9c HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a1, "GPU a1 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a3, "GPU a3 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a4, "GPU a4 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a5, "GPU a5 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a6, "GPU a6 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a8, "GPU a8 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a9, "GPU a9 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00aa, "GPU aa HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00ab, "GPU ab HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00ad, "GPU ad HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00ae, "GPU ae HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00af, "GPU af HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00b0, "GPU b0 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00b1, "GPU b1 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00c0, "GPU c0 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00c1, "GPU c1 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00c3, "GPU c3 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00c4, "GPU c4 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00c5, "GPU c5 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",	patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",	patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x67663d82, "Arise 82 HDMI/DP",	patch_gf_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f033214bf77fd6..085f0697bff14f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4755,7 +4755,7 @@ static void alc245_fixup_hp_mute_led_v1_coefbit(struct hda_codec *codec,
+ 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ 		spec->mute_led_polarity = 0;
+ 		spec->mute_led_coef.idx = 0x0b;
+-		spec->mute_led_coef.mask = 1 << 3;
++		spec->mute_led_coef.mask = 3 << 2;
+ 		spec->mute_led_coef.on = 1 << 3;
+ 		spec->mute_led_coef.off = 0;
+ 		snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set);
+@@ -10608,6 +10608,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87cc, "HP Pavilion 15-eg0xxx", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87df, "HP ProBook 430 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+@@ -10686,6 +10687,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8a2e, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a30, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a31, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8a4f, "HP Victus 15-fa0xxx (MB 8A4F)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4),
+ 	SND_PCI_QUIRK(0x103c, 0x8a74, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+diff --git a/sound/soc/mediatek/mt8365/mt8365-dai-i2s.c b/sound/soc/mediatek/mt8365/mt8365-dai-i2s.c
+index 11b9a5bc716382..89575bb8afedd8 100644
+--- a/sound/soc/mediatek/mt8365/mt8365-dai-i2s.c
++++ b/sound/soc/mediatek/mt8365/mt8365-dai-i2s.c
+@@ -812,11 +812,10 @@ static const struct snd_soc_dapm_route mtk_dai_i2s_routes[] = {
+ static int mt8365_dai_i2s_set_priv(struct mtk_base_afe *afe)
+ {
+ 	int i, ret;
+-	struct mt8365_afe_private *afe_priv = afe->platform_priv;
+ 
+ 	for (i = 0; i < DAI_I2S_NUM; i++) {
+ 		ret = mt8365_dai_set_priv(afe, mt8365_i2s_priv[i].id,
+-					  sizeof(*afe_priv),
++					  sizeof(mt8365_i2s_priv[i]),
+ 					  &mt8365_i2s_priv[i]);
+ 		if (ret)
+ 			return ret;
+diff --git a/tools/hv/hv_fcopy_uio_daemon.c b/tools/hv/hv_fcopy_uio_daemon.c
+index 12743d7f164f0d..9caa24caa0801a 100644
+--- a/tools/hv/hv_fcopy_uio_daemon.c
++++ b/tools/hv/hv_fcopy_uio_daemon.c
+@@ -62,8 +62,11 @@ static int hv_fcopy_create_file(char *file_name, char *path_name, __u32 flags)
+ 
+ 	filesize = 0;
+ 	p = path_name;
+-	snprintf(target_fname, sizeof(target_fname), "%s/%s",
+-		 path_name, file_name);
++	if (snprintf(target_fname, sizeof(target_fname), "%s/%s",
++		     path_name, file_name) >= sizeof(target_fname)) {
++		syslog(LOG_ERR, "target file name is too long: %s/%s", path_name, file_name);
++		goto done;
++	}
+ 
+ 	/*
+ 	 * Check to see if the path is already in place; if not,
+@@ -270,7 +273,7 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size)
+ {
+ 	size_t len = 0;
+ 
+-	while (len < dest_size) {
++	while (len < dest_size && *src) {
+ 		if (src[len] < 0x80)
+ 			dest[len++] = (char)(*src++);
+ 		else
+@@ -282,27 +285,15 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size)
+ 
+ static int hv_fcopy_start(struct hv_start_fcopy *smsg_in)
+ {
+-	setlocale(LC_ALL, "en_US.utf8");
+-	size_t file_size, path_size;
+-	char *file_name, *path_name;
+-	char *in_file_name = (char *)smsg_in->file_name;
+-	char *in_path_name = (char *)smsg_in->path_name;
+-
+-	file_size = wcstombs(NULL, (const wchar_t *restrict)in_file_name, 0) + 1;
+-	path_size = wcstombs(NULL, (const wchar_t *restrict)in_path_name, 0) + 1;
+-
+-	file_name = (char *)malloc(file_size * sizeof(char));
+-	path_name = (char *)malloc(path_size * sizeof(char));
+-
+-	if (!file_name || !path_name) {
+-		free(file_name);
+-		free(path_name);
+-		syslog(LOG_ERR, "Can't allocate memory for file name and/or path name");
+-		return HV_E_FAIL;
+-	}
++	/*
++	 * file_name and path_name should have same length with appropriate
++	 * member of hv_start_fcopy.
++	 */
++	char file_name[W_MAX_PATH], path_name[W_MAX_PATH];
+ 
+-	wcstoutf8(file_name, (__u16 *)in_file_name, file_size);
+-	wcstoutf8(path_name, (__u16 *)in_path_name, path_size);
++	setlocale(LC_ALL, "en_US.utf8");
++	wcstoutf8(file_name, smsg_in->file_name, W_MAX_PATH - 1);
++	wcstoutf8(path_name, smsg_in->path_name, W_MAX_PATH - 1);
+ 
+ 	return hv_fcopy_create_file(file_name, path_name, smsg_in->copy_flags);
+ }
+diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
+index 6b564d4c09866a..051d1962a4c775 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_precision.c
++++ b/tools/testing/selftests/bpf/progs/verifier_precision.c
+@@ -130,4 +130,57 @@ __naked int state_loop_first_last_equal(void)
+ 	);
+ }
+ 
++__used __naked static void __bpf_cond_op_r10(void)
++{
++	asm volatile (
++	"r2 = 2314885393468386424 ll;"
++	"goto +0;"
++	"if r2 <= r10 goto +3;"
++	"if r1 >= -1835016 goto +0;"
++	"if r2 <= 8 goto +0;"
++	"if r3 <= 0 goto +0;"
++	"exit;"
++	::: __clobber_all);
++}
++
++SEC("?raw_tp")
++__success __log_level(2)
++__msg("8: (bd) if r2 <= r10 goto pc+3")
++__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0")
++__msg("10: (b5) if r2 <= 0x8 goto pc+0")
++__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1")
++__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0")
++__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3")
++__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0")
++__naked void bpf_cond_op_r10(void)
++{
++	asm volatile (
++	"r3 = 0 ll;"
++	"call __bpf_cond_op_r10;"
++	"r0 = 0;"
++	"exit;"
++	::: __clobber_all);
++}
++
++SEC("?raw_tp")
++__success __log_level(2)
++__msg("3: (bf) r3 = r10")
++__msg("4: (bd) if r3 <= r2 goto pc+1")
++__msg("5: (b5) if r2 <= 0x8 goto pc+2")
++__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
++__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1")
++__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
++__naked void bpf_cond_op_not_r10(void)
++{
++	asm volatile (
++	"r0 = 0;"
++	"r2 = 2314885393468386424 ll;"
++	"r3 = r10;"
++	"if r3 <= r2 goto +1;"
++	"if r2 <= 8 goto +2;"
++	"r0 = 2 ll;"
++	"exit;"
++	::: __clobber_all);
++}
++
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py
+index d9c10613ae67b3..44151b7b1a24b6 100644
+--- a/tools/testing/selftests/drivers/net/lib/py/load.py
++++ b/tools/testing/selftests/drivers/net/lib/py/load.py
+@@ -1,5 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
++import re
+ import time
+ 
+ from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen
+@@ -10,12 +11,11 @@ class GenerateTraffic:
+ 
+         self.env = env
+ 
+-        if port is None:
+-            port = rand_port()
+-        self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True)
+-        wait_port_listen(port)
++        self.port = rand_port() if port is None else port
++        self._iperf_server = cmd(f"iperf3 -s -1 -p {self.port}", background=True)
++        wait_port_listen(self.port)
+         time.sleep(0.1)
+-        self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400",
++        self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {self.port} -t 86400",
+                                  background=True, host=env.remote)
+ 
+         # Wait for traffic to ramp up
+@@ -56,3 +56,16 @@ class GenerateTraffic:
+             ksft_pr(">> Server:")
+             ksft_pr(self._iperf_server.stdout)
+             ksft_pr(self._iperf_server.stderr)
++        self._wait_client_stopped()
++
++    def _wait_client_stopped(self, sleep=0.005, timeout=5):
++        end = time.monotonic() + timeout
++
++        live_port_pattern = re.compile(fr":{self.port:04X} 0[^6] ")
++
++        while time.monotonic() < end:
++            data = cmd("cat /proc/net/tcp*", host=self.env.remote).stdout
++            if not live_port_pattern.search(data):
++                return
++            time.sleep(sleep)
++        raise Exception(f"Waiting for client to stop timed out after {timeout}s")
+diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
+index 580610c46e5aef..4524085784df2e 100644
+--- a/tools/testing/selftests/net/mptcp/Makefile
++++ b/tools/testing/selftests/net/mptcp/Makefile
+@@ -4,7 +4,8 @@ top_srcdir = ../../../../..
+ 
+ CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
+ 
+-TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
++TEST_PROGS := mptcp_connect.sh mptcp_connect_mmap.sh mptcp_connect_sendfile.sh \
++	      mptcp_connect_checksum.sh pm_netlink.sh mptcp_join.sh diag.sh \
+ 	      simult_flows.sh mptcp_sockopt.sh userspace_pm.sh
+ 
+ TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
+new file mode 100644
+index 00000000000000..ce93ec2f107fba
+--- /dev/null
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
+@@ -0,0 +1,5 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
++	"$(dirname "${0}")/mptcp_connect.sh" -C "${@}"
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
+new file mode 100644
+index 00000000000000..5dd30f9394af6a
+--- /dev/null
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
+@@ -0,0 +1,5 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
++	"$(dirname "${0}")/mptcp_connect.sh" -m mmap "${@}"
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
+new file mode 100644
+index 00000000000000..1d16fb1cc9bb6d
+--- /dev/null
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
+@@ -0,0 +1,5 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
++	"$(dirname "${0}")/mptcp_connect.sh" -m sendfile "${@}"


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-07-18 12:05 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-07-18 12:05 UTC (permalink / raw
  To: gentoo-commits

commit:     1a8045c3de598aa1ab0648635cf8cdc6a90120b8
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 18 12:05:40 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Jul 18 12:05:40 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1a8045c3

Linux patch 6.12.39

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1038_linux-6.12.39.patch | 6830 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6834 insertions(+)

diff --git a/0000_README b/0000_README
index e2568c6a..b596d7c2 100644
--- a/0000_README
+++ b/0000_README
@@ -195,6 +195,10 @@ Patch:  1037_linux-6.12.38.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.38
 
+Patch:  1038_linux-6.12.39.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.39
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1038_linux-6.12.39.patch b/1038_linux-6.12.39.patch
new file mode 100644
index 00000000..ae19d036
--- /dev/null
+++ b/1038_linux-6.12.39.patch
@@ -0,0 +1,6830 @@
+diff --git a/Documentation/bpf/map_hash.rst b/Documentation/bpf/map_hash.rst
+index d2343952f2cbd3..8606bf958a8cf0 100644
+--- a/Documentation/bpf/map_hash.rst
++++ b/Documentation/bpf/map_hash.rst
+@@ -233,10 +233,16 @@ attempts in order to enforce the LRU property which have increasing impacts on
+ other CPUs involved in the following operation attempts:
+ 
+ - Attempt to use CPU-local state to batch operations
+-- Attempt to fetch free nodes from global lists
++- Attempt to fetch ``target_free`` free nodes from global lists
+ - Attempt to pull any node from a global list and remove it from the hashmap
+ - Attempt to pull any node from any CPU's list and remove it from the hashmap
+ 
++The number of nodes to borrow from the global list in a batch, ``target_free``,
++depends on the size of the map. Larger batch size reduces lock contention, but
++may also exhaust the global structure. The value is computed at map init to
++avoid exhaustion, by limiting aggregate reservation by all CPUs to half the map
++size. With a minimum of a single element and maximum budget of 128 at a time.
++
+ This algorithm is described visually in the following diagram. See the
+ description in commit 3a08c2fd7634 ("bpf: LRU List") for a full explanation of
+ the corresponding operations:
+diff --git a/Documentation/bpf/map_lru_hash_update.dot b/Documentation/bpf/map_lru_hash_update.dot
+index a0fee349d29c27..ab10058f5b79f5 100644
+--- a/Documentation/bpf/map_lru_hash_update.dot
++++ b/Documentation/bpf/map_lru_hash_update.dot
+@@ -35,18 +35,18 @@ digraph {
+   fn_bpf_lru_list_pop_free_to_local [shape=rectangle,fillcolor=2,
+     label="Flush local pending,
+     Rotate Global list, move
+-    LOCAL_FREE_TARGET
++    target_free
+     from global -> local"]
+   // Also corresponds to:
+   // fn__local_list_flush()
+   // fn_bpf_lru_list_rotate()
+   fn___bpf_lru_node_move_to_free[shape=diamond,fillcolor=2,
+-    label="Able to free\nLOCAL_FREE_TARGET\nnodes?"]
++    label="Able to free\ntarget_free\nnodes?"]
+ 
+   fn___bpf_lru_list_shrink_inactive [shape=rectangle,fillcolor=3,
+     label="Shrink inactive list
+       up to remaining
+-      LOCAL_FREE_TARGET
++      target_free
+       (global LRU -> local)"]
+   fn___bpf_lru_list_shrink [shape=diamond,fillcolor=2,
+     label="> 0 entries in\nlocal free list?"]
+diff --git a/Makefile b/Makefile
+index 28c9acdd9b3583..ba6054d96398dd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 05ccf4ec278f78..9ca5ffd8d817f7 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2959,6 +2959,13 @@ static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
+ }
+ #endif
+ 
++#ifdef CONFIG_ARM64_SME
++static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope)
++{
++	return system_supports_sme() && has_user_cpuid_feature(cap, scope);
++}
++#endif
++
+ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ 	HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
+@@ -3037,25 +3044,25 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
+ #ifdef CONFIG_ARM64_SME
+ 	HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
+-	HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
++	HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
+ #endif /* CONFIG_ARM64_SME */
+ 	HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT),
+ 	HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 2bbcbb11d844c9..2edf88c1c69576 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -544,6 +544,11 @@ static void permission_overlay_switch(struct task_struct *next)
+ 	current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+ 	if (current->thread.por_el0 != next->thread.por_el0) {
+ 		write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
++		/*
++		 * No ISB required as we can tolerate spurious Overlay faults -
++		 * the fault handler will check again based on the new value
++		 * of POR_EL0.
++		 */
+ 	}
+ }
+ 
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 8b281cf308b30f..850307b49babde 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -487,17 +487,29 @@ static void do_bad_area(unsigned long far, unsigned long esr,
+ 	}
+ }
+ 
+-static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma,
+-			unsigned int mm_flags)
++static bool fault_from_pkey(struct vm_area_struct *vma, unsigned int mm_flags)
+ {
+-	unsigned long iss2 = ESR_ELx_ISS2(esr);
+-
+ 	if (!system_supports_poe())
+ 		return false;
+ 
+-	if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay))
+-		return true;
+-
++	/*
++	 * We do not check whether an Overlay fault has occurred because we
++	 * cannot make a decision based solely on its value:
++	 *
++	 * - If Overlay is set, a fault did occur due to POE, but it may be
++	 *   spurious in those cases where we update POR_EL0 without ISB (e.g.
++	 *   on context-switch). We would then need to manually check POR_EL0
++	 *   against vma_pkey(vma), which is exactly what
++	 *   arch_vma_access_permitted() does.
++	 *
++	 * - If Overlay is not set, we may still need to report a pkey fault.
++	 *   This is the case if an access was made within a mapping but with no
++	 *   page mapped, and POR_EL0 forbids the access (according to
++	 *   vma_pkey()). Such access will result in a SIGSEGV regardless
++	 *   because core code checks arch_vma_access_permitted(), but in order
++	 *   to report the correct error code - SEGV_PKUERR - we must handle
++	 *   that case here.
++	 */
+ 	return !arch_vma_access_permitted(vma,
+ 			mm_flags & FAULT_FLAG_WRITE,
+ 			mm_flags & FAULT_FLAG_INSTRUCTION,
+@@ -595,7 +607,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
+ 		goto bad_area;
+ 	}
+ 
+-	if (fault_from_pkey(esr, vma, mm_flags)) {
++	if (fault_from_pkey(vma, mm_flags)) {
+ 		pkey = vma_pkey(vma);
+ 		vma_end_read(vma);
+ 		fault = 0;
+@@ -639,7 +651,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
+ 		goto bad_area;
+ 	}
+ 
+-	if (fault_from_pkey(esr, vma, mm_flags)) {
++	if (fault_from_pkey(vma, mm_flags)) {
+ 		pkey = vma_pkey(vma);
+ 		mmap_read_unlock(mm);
+ 		fault = 0;
+diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
+index cbe2a179331d25..99e51f7755393e 100644
+--- a/arch/riscv/kernel/vdso/vdso.lds.S
++++ b/arch/riscv/kernel/vdso/vdso.lds.S
+@@ -31,7 +31,7 @@ SECTIONS
+ 		*(.data .data.* .gnu.linkonce.d.*)
+ 		*(.dynbss)
+ 		*(.bss .bss.* .gnu.linkonce.b.*)
+-	}
++	}						:text
+ 
+ 	.note		: { *(.note.*) }		:text	:note
+ 
+diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
+index bc3a22704e0930..10950953429e66 100644
+--- a/arch/s390/crypto/sha1_s390.c
++++ b/arch/s390/crypto/sha1_s390.c
+@@ -38,6 +38,7 @@ static int s390_sha1_init(struct shash_desc *desc)
+ 	sctx->state[4] = SHA1_H4;
+ 	sctx->count = 0;
+ 	sctx->func = CPACF_KIMD_SHA_1;
++	sctx->first_message_part = 0;
+ 
+ 	return 0;
+ }
+@@ -62,6 +63,7 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in)
+ 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ 	memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
+ 	sctx->func = CPACF_KIMD_SHA_1;
++	sctx->first_message_part = 0;
+ 	return 0;
+ }
+ 
+diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
+index 6f1ccdf93d3e5e..0204d4bca34032 100644
+--- a/arch/s390/crypto/sha256_s390.c
++++ b/arch/s390/crypto/sha256_s390.c
+@@ -31,6 +31,7 @@ static int s390_sha256_init(struct shash_desc *desc)
+ 	sctx->state[7] = SHA256_H7;
+ 	sctx->count = 0;
+ 	sctx->func = CPACF_KIMD_SHA_256;
++	sctx->first_message_part = 0;
+ 
+ 	return 0;
+ }
+@@ -55,6 +56,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
+ 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ 	sctx->func = CPACF_KIMD_SHA_256;
++	sctx->first_message_part = 0;
+ 	return 0;
+ }
+ 
+@@ -90,6 +92,7 @@ static int s390_sha224_init(struct shash_desc *desc)
+ 	sctx->state[7] = SHA224_H7;
+ 	sctx->count = 0;
+ 	sctx->func = CPACF_KIMD_SHA_256;
++	sctx->first_message_part = 0;
+ 
+ 	return 0;
+ }
+diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
+index 04f11c40776345..b53a7793bd244f 100644
+--- a/arch/s390/crypto/sha512_s390.c
++++ b/arch/s390/crypto/sha512_s390.c
+@@ -32,6 +32,7 @@ static int sha512_init(struct shash_desc *desc)
+ 	*(__u64 *)&ctx->state[14] = SHA512_H7;
+ 	ctx->count = 0;
+ 	ctx->func = CPACF_KIMD_SHA_512;
++	ctx->first_message_part = 0;
+ 
+ 	return 0;
+ }
+@@ -60,6 +61,7 @@ static int sha512_import(struct shash_desc *desc, const void *in)
+ 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ 	sctx->func = CPACF_KIMD_SHA_512;
++	sctx->first_message_part = 0;
+ 	return 0;
+ }
+ 
+@@ -97,6 +99,7 @@ static int sha384_init(struct shash_desc *desc)
+ 	*(__u64 *)&ctx->state[14] = SHA384_H7;
+ 	ctx->count = 0;
+ 	ctx->func = CPACF_KIMD_SHA_512;
++	ctx->first_message_part = 0;
+ 
+ 	return 0;
+ }
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index 64c09db392c16a..7a88b13d289f15 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -1592,35 +1592,19 @@ static void vector_eth_configure(
+ 
+ 	device->dev = dev;
+ 
+-	*vp = ((struct vector_private)
+-		{
+-		.list			= LIST_HEAD_INIT(vp->list),
+-		.dev			= dev,
+-		.unit			= n,
+-		.options		= get_transport_options(def),
+-		.rx_irq			= 0,
+-		.tx_irq			= 0,
+-		.parsed			= def,
+-		.max_packet		= get_mtu(def) + ETH_HEADER_OTHER,
+-		/* TODO - we need to calculate headroom so that ip header
+-		 * is 16 byte aligned all the time
+-		 */
+-		.headroom		= get_headroom(def),
+-		.form_header		= NULL,
+-		.verify_header		= NULL,
+-		.header_rxbuffer	= NULL,
+-		.header_txbuffer	= NULL,
+-		.header_size		= 0,
+-		.rx_header_size		= 0,
+-		.rexmit_scheduled	= false,
+-		.opened			= false,
+-		.transport_data		= NULL,
+-		.in_write_poll		= false,
+-		.coalesce		= 2,
+-		.req_size		= get_req_size(def),
+-		.in_error		= false,
+-		.bpf			= NULL
+-	});
++	INIT_LIST_HEAD(&vp->list);
++	vp->dev		= dev;
++	vp->unit	= n;
++	vp->options	= get_transport_options(def);
++	vp->parsed	= def;
++	vp->max_packet	= get_mtu(def) + ETH_HEADER_OTHER;
++	/*
++	 * TODO - we need to calculate headroom so that ip header
++	 * is 16 byte aligned all the time
++	 */
++	vp->headroom	= get_headroom(def);
++	vp->coalesce	= 2;
++	vp->req_size	= get_req_size(def);
+ 
+ 	dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
+ 	INIT_WORK(&vp->reset_tx, vector_reset_tx);
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index dfa334e3d1a033..2df0ae2a5e5d0e 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -137,7 +137,7 @@ config X86
+ 	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ 	select ARCH_WANTS_NO_INSTR
+ 	select ARCH_WANT_GENERAL_HUGETLB
+-	select ARCH_WANT_HUGE_PMD_SHARE
++	select ARCH_WANT_HUGE_PMD_SHARE		if X86_64
+ 	select ARCH_WANT_LD_ORPHAN_WARN
+ 	select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP	if X86_64
+ 	select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP	if X86_64
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index ac25f9eb591209..7ebe76f69417ae 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -621,6 +621,7 @@
+ #define MSR_AMD64_OSVW_STATUS		0xc0010141
+ #define MSR_AMD_PPIN_CTL		0xc00102f0
+ #define MSR_AMD_PPIN			0xc00102f1
++#define MSR_AMD64_CPUID_FN_7		0xc0011002
+ #define MSR_AMD64_CPUID_FN_1		0xc0011004
+ #define MSR_AMD64_LS_CFG		0xc0011020
+ #define MSR_AMD64_DC_CFG		0xc0011022
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index b42307200e98f3..efd42ee9d1cc61 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -977,6 +977,13 @@ static void init_amd_zen2(struct cpuinfo_x86 *c)
+ 	init_spectral_chicken(c);
+ 	fix_erratum_1386(c);
+ 	zen2_zenbleed_check(c);
++
++	/* Disable RDSEED on AMD Cyan Skillfish because of an error. */
++	if (c->x86_model == 0x47 && c->x86_stepping == 0x0) {
++		clear_cpu_cap(c, X86_FEATURE_RDSEED);
++		msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
++		pr_emerg("RDSEED is not reliable on this platform; disabling.\n");
++	}
+ }
+ 
+ static void init_amd_zen3(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index 14bf8c232e457f..dac4564e1d7ca4 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -327,7 +327,6 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
+ 
+ struct thresh_restart {
+ 	struct threshold_block	*b;
+-	int			reset;
+ 	int			set_lvt_off;
+ 	int			lvt_off;
+ 	u16			old_limit;
+@@ -422,13 +421,13 @@ static void threshold_restart_bank(void *_tr)
+ 
+ 	rdmsr(tr->b->address, lo, hi);
+ 
+-	if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
+-		tr->reset = 1;	/* limit cannot be lower than err count */
+-
+-	if (tr->reset) {		/* reset err count and overflow bit */
+-		hi =
+-		    (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
+-		    (THRESHOLD_MAX - tr->b->threshold_limit);
++	/*
++	 * Reset error count and overflow bit.
++	 * This is done during init or after handling an interrupt.
++	 */
++	if (hi & MASK_OVERFLOW_HI || tr->set_lvt_off) {
++		hi &= ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI);
++		hi |= THRESHOLD_MAX - tr->b->threshold_limit;
+ 	} else if (tr->old_limit) {	/* change limit w/o reset */
+ 		int new_count = (hi & THRESHOLD_MAX) +
+ 		    (tr->old_limit - tr->b->threshold_limit);
+@@ -1099,13 +1098,20 @@ static const char *get_name(unsigned int cpu, unsigned int bank, struct threshol
+ 	}
+ 
+ 	bank_type = smca_get_bank_type(cpu, bank);
+-	if (bank_type >= N_SMCA_BANK_TYPES)
+-		return NULL;
+ 
+ 	if (b && (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2)) {
+ 		if (b->block < ARRAY_SIZE(smca_umc_block_names))
+ 			return smca_umc_block_names[b->block];
+-		return NULL;
++	}
++
++	if (b && b->block) {
++		snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_block_%u", b->block);
++		return buf_mcatype;
++	}
++
++	if (bank_type >= N_SMCA_BANK_TYPES) {
++		snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_bank_%u", bank);
++		return buf_mcatype;
+ 	}
+ 
+ 	if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1)
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 2a938f429c4d50..d8f3d9af8acf0d 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1688,6 +1688,11 @@ static void mc_poll_banks_default(void)
+ 
+ void (*mc_poll_banks)(void) = mc_poll_banks_default;
+ 
++static bool should_enable_timer(unsigned long iv)
++{
++	return !mca_cfg.ignore_ce && iv;
++}
++
+ static void mce_timer_fn(struct timer_list *t)
+ {
+ 	struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
+@@ -1711,7 +1716,7 @@ static void mce_timer_fn(struct timer_list *t)
+ 
+ 	if (mce_get_storm_mode()) {
+ 		__start_timer(t, HZ);
+-	} else {
++	} else if (should_enable_timer(iv)) {
+ 		__this_cpu_write(mce_next_interval, iv);
+ 		__start_timer(t, iv);
+ 	}
+@@ -2111,11 +2116,10 @@ static void mce_start_timer(struct timer_list *t)
+ {
+ 	unsigned long iv = check_interval * HZ;
+ 
+-	if (mca_cfg.ignore_ce || !iv)
+-		return;
+-
+-	this_cpu_write(mce_next_interval, iv);
+-	__start_timer(t, iv);
++	if (should_enable_timer(iv)) {
++		this_cpu_write(mce_next_interval, iv);
++		__start_timer(t, iv);
++	}
+ }
+ 
+ static void __mcheck_cpu_setup_timer(void)
+@@ -2756,15 +2760,9 @@ static int mce_cpu_dead(unsigned int cpu)
+ static int mce_cpu_online(unsigned int cpu)
+ {
+ 	struct timer_list *t = this_cpu_ptr(&mce_timer);
+-	int ret;
+ 
+ 	mce_device_create(cpu);
+-
+-	ret = mce_threshold_create_device(cpu);
+-	if (ret) {
+-		mce_device_remove(cpu);
+-		return ret;
+-	}
++	mce_threshold_create_device(cpu);
+ 	mce_reenable_cpu();
+ 	mce_start_timer(t);
+ 	return 0;
+diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
+index f6103e6bf69a8b..bb0a60b1ed637f 100644
+--- a/arch/x86/kernel/cpu/mce/intel.c
++++ b/arch/x86/kernel/cpu/mce/intel.c
+@@ -477,6 +477,7 @@ void mce_intel_feature_init(struct cpuinfo_x86 *c)
+ void mce_intel_feature_clear(struct cpuinfo_x86 *c)
+ {
+ 	intel_clear_lmce();
++	cmci_clear();
+ }
+ 
+ bool intel_filter_mce(struct mce *m)
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 02196db26a0842..8f587c5bb6bc4e 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -822,6 +822,7 @@ void kvm_set_cpu_caps(void)
+ 	kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
+ 	kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
+ 	kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
++	kvm_cpu_cap_check_and_set(X86_FEATURE_VERW_CLEAR);
+ 
+ 	kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
+ 		F(PERFMON_V2)
+@@ -831,6 +832,9 @@ void kvm_set_cpu_caps(void)
+ 		F(TSA_SQ_NO) | F(TSA_L1_NO)
+ 	);
+ 
++	kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_SQ_NO);
++	kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_L1_NO);
++
+ 	/*
+ 	 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
+ 	 * KVM's supported CPUID if the feature is reported as supported by the
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 6154cb450b448b..c4ae73541fc56c 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2058,6 +2058,10 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
+ 	struct kvm_vcpu *src_vcpu;
+ 	unsigned long i;
+ 
++	if (src->created_vcpus != atomic_read(&src->online_vcpus) ||
++	    dst->created_vcpus != atomic_read(&dst->online_vcpus))
++		return -EBUSY;
++
+ 	if (!sev_es_guest(src))
+ 		return 0;
+ 
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index 622fe24da91064..759cc3e9c0fac7 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -1916,8 +1916,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
+ {
+ 	struct kvm_vcpu *vcpu;
+ 
+-	if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
+-		return -EINVAL;
++	/*
++	 * Don't check for the port being within range of max_evtchn_port().
++	 * Userspace can configure what ever targets it likes; events just won't
++	 * be delivered if/while the target is invalid, just like userspace can
++	 * configure MSIs which target non-existent APICs.
++	 *
++	 * This allow on Live Migration and Live Update, the IRQ routing table
++	 * can be restored *independently* of other things like creating vCPUs,
++	 * without imposing an ordering dependency on userspace.  In this
++	 * particular case, the problematic ordering would be with setting the
++	 * Xen 'long mode' flag, which changes max_evtchn_port() to allow 4096
++	 * instead of 1024 event channels.
++	 */
+ 
+ 	/* We only support 2 level event channels for now */
+ 	if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
+diff --git a/crypto/ecc.c b/crypto/ecc.c
+index 50ad2d4ed672c5..6cf9a945fc6c28 100644
+--- a/crypto/ecc.c
++++ b/crypto/ecc.c
+@@ -71,7 +71,7 @@ EXPORT_SYMBOL(ecc_get_curve);
+ void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
+ 			   u64 *out, unsigned int ndigits)
+ {
+-	int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64));
++	int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64));
+ 	unsigned int o = nbytes & 7;
+ 	__be64 msd = 0;
+ 
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 6a7ac34d73bda6..65fa3444367a13 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -243,23 +243,10 @@ static int acpi_battery_get_property(struct power_supply *psy,
+ 		break;
+ 	case POWER_SUPPLY_PROP_CURRENT_NOW:
+ 	case POWER_SUPPLY_PROP_POWER_NOW:
+-		if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
++		if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ 			ret = -ENODEV;
+-			break;
+-		}
+-
+-		val->intval = battery->rate_now * 1000;
+-		/*
+-		 * When discharging, the current should be reported as a
+-		 * negative number as per the power supply class interface
+-		 * definition.
+-		 */
+-		if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
+-		    (battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
+-		    acpi_battery_handle_discharging(battery)
+-				== POWER_SUPPLY_STATUS_DISCHARGING)
+-			val->intval = -val->intval;
+-
++		else
++			val->intval = battery->rate_now * 1000;
+ 		break;
+ 	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ 	case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index a876024d8a05f9..63d41320cd5cf0 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
+ 
+ 	IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
+ 						 skb->len, DMA_TO_DEVICE);
++	if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb)))
++		return -ENOMEM;
+ 
+ 	error = -EINVAL;
+ 
+@@ -1857,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
+ 		paddr = dma_map_single(&card->pcidev->dev, skb->data,
+ 				       skb_end_pointer(skb) - skb->data,
+ 				       DMA_FROM_DEVICE);
++		if (dma_mapping_error(&card->pcidev->dev, paddr))
++			goto outpoolrm;
+ 		IDT77252_PRV_PADDR(skb) = paddr;
+ 
+ 		if (push_rx_skb(card, skb, queue)) {
+@@ -1871,6 +1875,7 @@ add_rx_skb(struct idt77252_dev *card, int queue,
+ 	dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+ 			 skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
+ 
++outpoolrm:
+ 	handle = IDT77252_PRV_POOL(skb);
+ 	card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
+ 
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 450458267e6e64..c705acc4d6f4b9 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -2136,9 +2136,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
+ 				goto out;
+ 		}
+ 	}
+-	ret = nbd_start_device(nbd);
+-	if (ret)
+-		goto out;
++
+ 	if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
+ 		nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
+ 					  GFP_KERNEL);
+@@ -2154,6 +2152,8 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
+ 		goto out;
+ 	}
+ 	set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
++
++	ret = nbd_start_device(nbd);
+ out:
+ 	mutex_unlock(&nbd->config_lock);
+ 	if (!ret) {
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 746ef36e58df20..3b1a5cdd631161 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -2457,7 +2457,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+ 	if (copy_from_user(&info, argp, sizeof(info)))
+ 		return -EFAULT;
+ 
+-	if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || info.nr_hw_queues > UBLK_MAX_NR_QUEUES)
++	if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth ||
++	    info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues)
+ 		return -EINVAL;
+ 
+ 	if (capable(CAP_SYS_ADMIN))
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 025b9a07c08751..e6ad01d5e1d5db 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2363,10 +2363,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 			 */
+ 			qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
+ 								   "bluetooth");
+-			if (IS_ERR(qcadev->bt_power->pwrseq))
+-				return PTR_ERR(qcadev->bt_power->pwrseq);
+ 
+-			break;
++			/*
++			 * Some modules have BT_EN enabled via a hardware pull-up,
++			 * meaning it is not defined in the DTS and is not controlled
++			 * through the power sequence. In such cases, fall through
++			 * to follow the legacy flow.
++			 */
++			if (IS_ERR(qcadev->bt_power->pwrseq))
++				qcadev->bt_power->pwrseq = NULL;
++			else
++				break;
+ 		}
+ 		fallthrough;
+ 	case QCA_WCN3988:
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index e12b531f5c2f33..6a4a8ecd0edd02 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -1241,7 +1241,7 @@ int ipmi_create_user(unsigned int          if_num,
+ 	}
+ 	/* Not found, return an error */
+ 	rv = -EINVAL;
+-	goto out_kfree;
++	goto out_unlock;
+ 
+  found:
+ 	if (atomic_add_return(1, &intf->nr_users) > max_users) {
+@@ -1283,6 +1283,7 @@ int ipmi_create_user(unsigned int          if_num,
+ 
+ out_kfree:
+ 	atomic_dec(&intf->nr_users);
++out_unlock:
+ 	srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ 	vfree(new_user);
+ 	return rv;
+diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
+index 15510c2ff21c03..1b1561c84127b9 100644
+--- a/drivers/clk/clk-scmi.c
++++ b/drivers/clk/clk-scmi.c
+@@ -404,6 +404,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
+ 	const struct scmi_handle *handle = sdev->handle;
+ 	struct scmi_protocol_handle *ph;
+ 	const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
++	struct scmi_clk *sclks;
+ 
+ 	if (!handle)
+ 		return -ENODEV;
+@@ -430,18 +431,21 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
+ 	transport_is_atomic = handle->is_transport_atomic(handle,
+ 							  &atomic_threshold_us);
+ 
++	sclks = devm_kcalloc(dev, count, sizeof(*sclks), GFP_KERNEL);
++	if (!sclks)
++		return -ENOMEM;
++
++	for (idx = 0; idx < count; idx++)
++		hws[idx] = &sclks[idx].hw;
++
+ 	for (idx = 0; idx < count; idx++) {
+-		struct scmi_clk *sclk;
++		struct scmi_clk *sclk = &sclks[idx];
+ 		const struct clk_ops *scmi_ops;
+ 
+-		sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
+-		if (!sclk)
+-			return -ENOMEM;
+-
+ 		sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
+ 		if (!sclk->info) {
+ 			dev_dbg(dev, "invalid clock info for idx %d\n", idx);
+-			devm_kfree(dev, sclk);
++			hws[idx] = NULL;
+ 			continue;
+ 		}
+ 
+@@ -479,13 +483,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
+ 		if (err) {
+ 			dev_err(dev, "failed to register clock %d\n", idx);
+ 			devm_kfree(dev, sclk->parent_data);
+-			devm_kfree(dev, sclk);
+ 			hws[idx] = NULL;
+ 		} else {
+ 			dev_dbg(dev, "Registered clock:%s%s\n",
+ 				sclk->info->name,
+ 				scmi_ops->enable ? " (atomic ops)" : "");
+-			hws[idx] = &sclk->hw;
+ 		}
+ 	}
+ 
+diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
+index 19a62da74be450..564e9f3f7508da 100644
+--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
++++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
+@@ -219,11 +219,15 @@ static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = {
+ 	.clk_reg_offset = 0,
+ };
+ 
++static const char * const disp_engine_parents[] = {
++	"videopll1", "dsi_pll", "ldb_pll_div7"
++};
++
+ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
+ 	[IMX95_CLK_DISPMIX_ENG0_SEL] = {
+ 		.name = "disp_engine0_sel",
+-		.parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
+-		.num_parents = 4,
++		.parent_names = disp_engine_parents,
++		.num_parents = ARRAY_SIZE(disp_engine_parents),
+ 		.reg = 0,
+ 		.bit_idx = 0,
+ 		.bit_width = 2,
+@@ -232,8 +236,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
+ 	},
+ 	[IMX95_CLK_DISPMIX_ENG1_SEL] = {
+ 		.name = "disp_engine1_sel",
+-		.parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
+-		.num_parents = 4,
++		.parent_names = disp_engine_parents,
++		.num_parents = ARRAY_SIZE(disp_engine_parents),
+ 		.reg = 0,
+ 		.bit_idx = 2,
+ 		.bit_width = 2,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index a55e611605fcab..24e41b42c638b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4144,7 +4144,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 	mutex_init(&adev->grbm_idx_mutex);
+ 	mutex_init(&adev->mn_lock);
+ 	mutex_init(&adev->virt.vf_errors.lock);
+-	mutex_init(&adev->virt.rlcg_reg_lock);
+ 	hash_init(adev->mn_hash);
+ 	mutex_init(&adev->psp.mutex);
+ 	mutex_init(&adev->notifier_lock);
+@@ -4170,6 +4169,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 	spin_lock_init(&adev->se_cac_idx_lock);
+ 	spin_lock_init(&adev->audio_endpt_idx_lock);
+ 	spin_lock_init(&adev->mm_stats.lock);
++	spin_lock_init(&adev->virt.rlcg_reg_lock);
+ 	spin_lock_init(&adev->wb.lock);
+ 
+ 	INIT_LIST_HEAD(&adev->reset_list);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 34d41e3ce34746..eee434743deb49 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -112,6 +112,14 @@
+ #endif
+ 
+ MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
++MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
++MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
++MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
++MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
++MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
++MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
++MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
++MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
+ 
+ #define mmIP_DISCOVERY_VERSION  0x16A00
+ #define mmRCC_CONFIG_MEMSIZE	0xde3
+@@ -400,7 +408,27 @@ static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
+ 	if (amdgpu_discovery == 2)
+ 		return "amdgpu/ip_discovery.bin";
+ 
+-	return NULL;
++	switch (adev->asic_type) {
++	case CHIP_VEGA10:
++		return "amdgpu/vega10_ip_discovery.bin";
++	case CHIP_VEGA12:
++		return "amdgpu/vega12_ip_discovery.bin";
++	case CHIP_RAVEN:
++		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
++			return "amdgpu/raven2_ip_discovery.bin";
++		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
++			return "amdgpu/picasso_ip_discovery.bin";
++		else
++			return "amdgpu/raven_ip_discovery.bin";
++	case CHIP_VEGA20:
++		return "amdgpu/vega20_ip_discovery.bin";
++	case CHIP_ARCTURUS:
++		return "amdgpu/arcturus_ip_discovery.bin";
++	case CHIP_ALDEBARAN:
++		return "amdgpu/aldebaran_ip_discovery.bin";
++	default:
++		return NULL;
++	}
+ }
+ 
+ static int amdgpu_discovery_init(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index b6397d3229e1ba..01dccd489a8050 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -1010,6 +1010,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
+ 	void *scratch_reg2;
+ 	void *scratch_reg3;
+ 	void *spare_int;
++	unsigned long flags;
+ 
+ 	if (!adev->gfx.rlc.rlcg_reg_access_supported) {
+ 		dev_err(adev->dev,
+@@ -1031,7 +1032,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
+ 	scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+ 	scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
+ 
+-	mutex_lock(&adev->virt.rlcg_reg_lock);
++	spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
+ 
+ 	if (reg_access_ctrl->spare_int)
+ 		spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+@@ -1090,7 +1091,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
+ 
+ 	ret = readl(scratch_reg0);
+ 
+-	mutex_unlock(&adev->virt.rlcg_reg_lock);
++	spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index b650a2032c42bd..6a2087abfb7e4a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -275,7 +275,8 @@ struct amdgpu_virt {
+ 	/* the ucode id to signal the autoload */
+ 	uint32_t autoload_ucode_id;
+ 
+-	struct mutex rlcg_reg_lock;
++	/* Spinlock to protect access to the RLCG register interface */
++	spinlock_t rlcg_reg_lock;
+ };
+ 
+ struct amdgpu_video_codec_info;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index f00d41be7fca24..3e9e0f36cd3f47 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -1170,13 +1170,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start,
+ }
+ 
+ static void
+-svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
+-		    struct svm_range *pchild, enum svm_work_list_ops op)
++svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
+ {
+ 	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
+ 		 pchild, pchild->start, pchild->last, prange, op);
+ 
+-	pchild->work_item.mm = mm;
++	pchild->work_item.mm = NULL;
+ 	pchild->work_item.op = op;
+ 	list_add_tail(&pchild->child_list, &prange->child_list);
+ }
+@@ -2384,15 +2383,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
+ 		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
+ 			prange->work_item.op = op;
+ 	} else {
+-		prange->work_item.op = op;
+-
+-		/* Pairs with mmput in deferred_list_work */
+-		mmget(mm);
+-		prange->work_item.mm = mm;
+-		list_add_tail(&prange->deferred_list,
+-			      &prange->svms->deferred_range_list);
+-		pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
+-			 prange, prange->start, prange->last, op);
++		/* Pairs with mmput in deferred_list_work.
++		 * If process is exiting and mm is gone, don't update mmu notifier.
++		 */
++		if (mmget_not_zero(mm)) {
++			prange->work_item.mm = mm;
++			prange->work_item.op = op;
++			list_add_tail(&prange->deferred_list,
++				      &prange->svms->deferred_range_list);
++			pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
++				 prange, prange->start, prange->last, op);
++		}
+ 	}
+ 	spin_unlock(&svms->deferred_list_lock);
+ }
+@@ -2406,8 +2407,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms)
+ }
+ 
+ static void
+-svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
+-		      struct svm_range *prange, unsigned long start,
++svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
+ 		      unsigned long last)
+ {
+ 	struct svm_range *head;
+@@ -2428,12 +2428,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
+ 		svm_range_split(tail, last + 1, tail->last, &head);
+ 
+ 	if (head != prange && tail != prange) {
+-		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
+-		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
++		svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
++		svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
+ 	} else if (tail != prange) {
+-		svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
++		svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
+ 	} else if (head != prange) {
+-		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
++		svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
+ 	} else if (parent != prange) {
+ 		prange->work_item.op = SVM_OP_UNMAP_RANGE;
+ 	}
+@@ -2510,14 +2510,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
+ 		l = min(last, pchild->last);
+ 		if (l >= s)
+ 			svm_range_unmap_from_gpus(pchild, s, l, trigger);
+-		svm_range_unmap_split(mm, prange, pchild, start, last);
++		svm_range_unmap_split(prange, pchild, start, last);
+ 		mutex_unlock(&pchild->lock);
+ 	}
+ 	s = max(start, prange->start);
+ 	l = min(last, prange->last);
+ 	if (l >= s)
+ 		svm_range_unmap_from_gpus(prange, s, l, trigger);
+-	svm_range_unmap_split(mm, prange, prange, start, last);
++	svm_range_unmap_split(prange, prange, start, last);
+ 
+ 	if (unmap_parent)
+ 		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
+@@ -2560,8 +2560,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
+ 
+ 	if (range->event == MMU_NOTIFY_RELEASE)
+ 		return true;
+-	if (!mmget_not_zero(mni->mm))
+-		return true;
+ 
+ 	start = mni->interval_tree.start;
+ 	last = mni->interval_tree.last;
+@@ -2588,7 +2586,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
+ 	}
+ 
+ 	svm_range_unlock(prange);
+-	mmput(mni->mm);
+ 
+ 	return true;
+ }
+diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
+index 888aadb6a4acbb..d6550b54fac168 100644
+--- a/drivers/gpu/drm/drm_framebuffer.c
++++ b/drivers/gpu/drm/drm_framebuffer.c
+@@ -860,11 +860,23 @@ void drm_framebuffer_free(struct kref *kref)
+ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ 			 const struct drm_framebuffer_funcs *funcs)
+ {
++	unsigned int i;
+ 	int ret;
++	bool exists;
+ 
+ 	if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
+ 		return -EINVAL;
+ 
++	for (i = 0; i < fb->format->num_planes; i++) {
++		if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)))
++			fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
++		if (fb->obj[i]) {
++			exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]);
++			if (exists)
++				fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
++		}
++	}
++
+ 	INIT_LIST_HEAD(&fb->filp_head);
+ 
+ 	fb->funcs = funcs;
+@@ -873,7 +885,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ 	ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
+ 				    false, drm_framebuffer_free);
+ 	if (ret)
+-		goto out;
++		goto err;
+ 
+ 	mutex_lock(&dev->mode_config.fb_lock);
+ 	dev->mode_config.num_fb++;
+@@ -881,7 +893,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ 	mutex_unlock(&dev->mode_config.fb_lock);
+ 
+ 	drm_mode_object_register(dev, &fb->base);
+-out:
++
++	return 0;
++
++err:
++	for (i = 0; i < fb->format->num_planes; i++) {
++		if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) {
++			drm_gem_object_handle_put_unlocked(fb->obj[i]);
++			fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
++		}
++	}
+ 	return ret;
+ }
+ EXPORT_SYMBOL(drm_framebuffer_init);
+@@ -958,6 +979,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+ {
+ 	struct drm_device *dev = fb->dev;
++	unsigned int i;
++
++	for (i = 0; i < fb->format->num_planes; i++) {
++		if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))
++			drm_gem_object_handle_put_unlocked(fb->obj[i]);
++	}
+ 
+ 	mutex_lock(&dev->mode_config.fb_lock);
+ 	list_del(&fb->head);
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 426d0867882dfb..9e8a4da313a0ea 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
+ }
+ EXPORT_SYMBOL(drm_gem_private_object_fini);
+ 
++static void drm_gem_object_handle_get(struct drm_gem_object *obj)
++{
++	struct drm_device *dev = obj->dev;
++
++	drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
++
++	if (obj->handle_count++ == 0)
++		drm_gem_object_get(obj);
++}
++
++/**
++ * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
++ * @obj: GEM object
++ *
++ * Acquires a reference on the GEM buffer object's handle. Required to keep
++ * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
++ * to release the reference. Does nothing if the buffer object has no handle.
++ *
++ * Returns:
++ * True if a handle exists, or false otherwise
++ */
++bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
++{
++	struct drm_device *dev = obj->dev;
++
++	guard(mutex)(&dev->object_name_lock);
++
++	/*
++	 * First ref taken during GEM object creation, if any. Some
++	 * drivers set up internal framebuffers with GEM objects that
++	 * do not have a GEM handle. Hence, this counter can be zero.
++	 */
++	if (!obj->handle_count)
++		return false;
++
++	drm_gem_object_handle_get(obj);
++
++	return true;
++}
++
+ /**
+  * drm_gem_object_handle_free - release resources bound to userspace handles
+  * @obj: GEM object to clean up.
+@@ -216,20 +256,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+ 	}
+ }
+ 
+-static void
+-drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
++/**
++ * drm_gem_object_handle_put_unlocked - releases reference on user-space handle
++ * @obj: GEM object
++ *
++ * Releases a reference on the GEM buffer object's handle. Possibly releases
++ * the GEM buffer object and associated dma-buf objects.
++ */
++void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
+ {
+ 	struct drm_device *dev = obj->dev;
+ 	bool final = false;
+ 
+-	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
++	if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
+ 		return;
+ 
+ 	/*
+-	* Must bump handle count first as this may be the last
+-	* ref, in which case the object would disappear before we
+-	* checked for a name
+-	*/
++	 * Must bump handle count first as this may be the last
++	 * ref, in which case the object would disappear before
++	 * we checked for a name.
++	 */
+ 
+ 	mutex_lock(&dev->object_name_lock);
+ 	if (--obj->handle_count == 0) {
+@@ -253,6 +299,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
+ 	struct drm_file *file_priv = data;
+ 	struct drm_gem_object *obj = ptr;
+ 
++	if (drm_WARN_ON(obj->dev, !data))
++		return 0;
++
+ 	if (obj->funcs->close)
+ 		obj->funcs->close(obj, file_priv);
+ 
+@@ -363,8 +412,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
+ 	int ret;
+ 
+ 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+-	if (obj->handle_count++ == 0)
+-		drm_gem_object_get(obj);
++
++	drm_gem_object_handle_get(obj);
+ 
+ 	/*
+ 	 * Get the user-visible handle using idr.  Preload and perform
+@@ -373,7 +422,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
+ 	idr_preload(GFP_KERNEL);
+ 	spin_lock(&file_priv->table_lock);
+ 
+-	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
++	ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT);
+ 
+ 	spin_unlock(&file_priv->table_lock);
+ 	idr_preload_end();
+@@ -394,6 +443,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
+ 			goto err_revoke;
+ 	}
+ 
++	/* mirrors drm_gem_handle_delete to avoid races */
++	spin_lock(&file_priv->table_lock);
++	obj = idr_replace(&file_priv->object_idr, obj, handle);
++	WARN_ON(obj != NULL);
++	spin_unlock(&file_priv->table_lock);
+ 	*handlep = handle;
+ 	return 0;
+ 
+diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
+index 1705bfc90b1e7f..98b73c581c4260 100644
+--- a/drivers/gpu/drm/drm_internal.h
++++ b/drivers/gpu/drm/drm_internal.h
+@@ -153,6 +153,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
+ 
+ /* drm_gem.c */
+ int drm_gem_init(struct drm_device *dev);
++bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj);
++void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
+ int drm_gem_handle_create_tail(struct drm_file *file_priv,
+ 			       struct drm_gem_object *obj,
+ 			       u32 *handlep);
+diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+index 0d185c0564b911..9eeba254cf45df 100644
+--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+@@ -601,6 +601,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
+ 	if (!ctx->drm_dev)
+ 		goto out;
+ 
++	/* check if crtc and vblank have been initialized properly */
++	if (!drm_dev_has_vblank(ctx->drm_dev))
++		goto out;
++
+ 	if (!ctx->i80_if) {
+ 		drm_crtc_handle_vblank(&ctx->crtc->base);
+ 
+diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
+index ba7816fd28ec77..850b318605da4c 100644
+--- a/drivers/gpu/drm/imagination/pvr_power.c
++++ b/drivers/gpu/drm/imagination/pvr_power.c
+@@ -363,13 +363,13 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
+ 		if (!err) {
+ 			if (hard_reset) {
+ 				pvr_dev->fw_dev.booted = false;
+-				WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev));
++				WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev));
+ 
+ 				err = pvr_fw_hard_reset(pvr_dev);
+ 				if (err)
+ 					goto err_device_lost;
+ 
+-				err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev);
++				err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev);
+ 				pvr_dev->fw_dev.booted = true;
+ 				if (err)
+ 					goto err_device_lost;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+index fc84ca214f247d..3ad4f6e9a8ac21 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+@@ -1454,7 +1454,6 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
+ 	union acpi_object argv4 = {
+ 		.buffer.type    = ACPI_TYPE_BUFFER,
+ 		.buffer.length  = 4,
+-		.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+ 	}, *obj;
+ 
+ 	caps->status = 0xffff;
+@@ -1462,17 +1461,22 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
+ 	if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a)))
+ 		return;
+ 
++	argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL);
++	if (!argv4.buffer.pointer)
++		return;
++
+ 	obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4);
+ 	if (!obj)
+-		return;
++		goto done;
+ 
+ 	if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+ 	    WARN_ON(obj->buffer.length != 4))
+-		return;
++		goto done;
+ 
+ 	caps->status = 0;
+ 	caps->optimusCaps = *(u32 *)obj->buffer.pointer;
+ 
++done:
+ 	ACPI_FREE(obj);
+ 
+ 	kfree(argv4.buffer.pointer);
+@@ -1489,24 +1493,28 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
+ 	union acpi_object argv4 = {
+ 		.buffer.type    = ACPI_TYPE_BUFFER,
+ 		.buffer.length  = sizeof(caps),
+-		.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+ 	}, *obj;
+ 
+ 	jt->status = 0xffff;
+ 
++	argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL);
++	if (!argv4.buffer.pointer)
++		return;
++
+ 	obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4);
+ 	if (!obj)
+-		return;
++		goto done;
+ 
+ 	if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+ 	    WARN_ON(obj->buffer.length != 4))
+-		return;
++		goto done;
+ 
+ 	jt->status = 0;
+ 	jt->jtCaps = *(u32 *)obj->buffer.pointer;
+ 	jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
+ 	jt->bSBIOSCaps = 0;
+ 
++done:
+ 	ACPI_FREE(obj);
+ 
+ 	kfree(argv4.buffer.pointer);
+diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
+index 4860790666af51..14ef61b44f47cd 100644
+--- a/drivers/gpu/drm/tegra/nvdec.c
++++ b/drivers/gpu/drm/tegra/nvdec.c
+@@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
+ 
+ 	if (!client->group) {
+ 		virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
+-
+-		err = dma_mapping_error(nvdec->dev, iova);
+-		if (err < 0)
+-			return err;
++		if (!virt)
++			return -ENOMEM;
+ 	} else {
+ 		virt = tegra_drm_alloc(tegra, size, &iova);
+ 		if (IS_ERR(virt))
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+index 3c07f4712d5cce..b600be2a5c8491 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -254,6 +254,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ 	ret = dma_resv_trylock(&fbo->base.base._resv);
+ 	WARN_ON(!ret);
+ 
++	ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
++	if (ret) {
++		dma_resv_unlock(&fbo->base.base._resv);
++		kfree(fbo);
++		return ret;
++	}
++
+ 	if (fbo->base.resource) {
+ 		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ 		bo->resource = NULL;
+@@ -262,12 +269,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ 		fbo->base.bulk_move = NULL;
+ 	}
+ 
+-	ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
+-	if (ret) {
+-		kfree(fbo);
+-		return ret;
+-	}
+-
+ 	ttm_bo_get(bo);
+ 	fbo->bo = bo;
+ 
+diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
+index db540c8be6c7c5..656c2ab6ca9f3c 100644
+--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
++++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
+@@ -432,6 +432,7 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
+ #define PF_MULTIPLIER	8
+ 	pf_queue->num_dw =
+ 		(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
++	pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw);
+ #undef PF_MULTIPLIER
+ 
+ 	pf_queue->gt = gt;
+diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
+index 8999ac511555f2..485658f69fba84 100644
+--- a/drivers/gpu/drm/xe/xe_lmtt.c
++++ b/drivers/gpu/drm/xe/xe_lmtt.c
+@@ -78,6 +78,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
+ 	}
+ 
+ 	lmtt_assert(lmtt, xe_bo_is_vram(bo));
++	lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
++
++	xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size);
+ 
+ 	pt->level = level;
+ 	pt->bo = bo;
+@@ -91,6 +94,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
+ 
+ static void lmtt_pt_free(struct xe_lmtt_pt *pt)
+ {
++	lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n",
++		   pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE));
++
+ 	xe_bo_unpin_map_no_vm(pt->bo);
+ 	kfree(pt);
+ }
+@@ -226,9 +232,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
+ 
+ 	switch (lmtt->ops->lmtt_pte_size(level)) {
+ 	case sizeof(u32):
++		lmtt_assert(lmtt, !overflows_type(pte, u32));
++		lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32));
++
+ 		xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte);
+ 		break;
+ 	case sizeof(u64):
++		lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64));
++
+ 		xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte);
+ 		break;
+ 	default:
+diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
+index 6431697c616939..c2da2691fd2b9b 100644
+--- a/drivers/gpu/drm/xe/xe_migrate.c
++++ b/drivers/gpu/drm/xe/xe_migrate.c
+@@ -860,7 +860,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
+ 		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
+ 			xe_res_next(&src_it, src_L0);
+ 		else
+-			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
++			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
+ 				 &src_it, src_L0, src);
+ 
+ 		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
+diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
+index 23028afbbe1d1f..da09c26249f5fd 100644
+--- a/drivers/gpu/drm/xe/xe_pci.c
++++ b/drivers/gpu/drm/xe/xe_pci.c
+@@ -164,7 +164,6 @@ static const struct xe_graphics_desc graphics_xelpg = {
+ 	.has_asid = 1, \
+ 	.has_atomic_enable_pte_bit = 1, \
+ 	.has_flat_ccs = 1, \
+-	.has_indirect_ring_state = 1, \
+ 	.has_range_tlb_invalidation = 1, \
+ 	.has_usm = 1, \
+ 	.va_bits = 48, \
+diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
+index 06f50aa313267a..46c73ff10c7471 100644
+--- a/drivers/gpu/drm/xe/xe_pm.c
++++ b/drivers/gpu/drm/xe/xe_pm.c
+@@ -682,11 +682,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
+ }
+ 
+ /**
+- * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
++ * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold
+  * @xe: xe device instance
+- * @threshold: VRAM size in bites for the D3cold threshold
++ * @threshold: VRAM size in MiB for the D3cold threshold
+  *
+- * Returns 0 for success, negative error code otherwise.
++ * Return:
++ * * 0		- success
++ * * -EINVAL	- invalid argument
+  */
+ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
+ {
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index c6424f6259487e..b472140421f5af 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -311,6 +311,8 @@
+ #define USB_DEVICE_ID_ASUS_AK1D		0x1125
+ #define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A	0x1408
+ #define USB_DEVICE_ID_CHICONY_ACER_SWITCH12	0x1421
++#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA	0xb824
++#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2	0xb82c
+ 
+ #define USB_VENDOR_ID_CHUNGHWAT		0x2247
+ #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH	0x0001
+@@ -814,6 +816,7 @@
+ #define USB_DEVICE_ID_LENOVO_TPPRODOCK	0x6067
+ #define USB_DEVICE_ID_LENOVO_X1_COVER	0x6085
+ #define USB_DEVICE_ID_LENOVO_X1_TAB	0x60a3
++#define USB_DEVICE_ID_LENOVO_X1_TAB2	0x60a4
+ #define USB_DEVICE_ID_LENOVO_X1_TAB3	0x60b5
+ #define USB_DEVICE_ID_LENOVO_X12_TAB	0x60fe
+ #define USB_DEVICE_ID_LENOVO_X12_TAB2	0x61ae
+@@ -1518,4 +1521,7 @@
+ #define USB_VENDOR_ID_SIGNOTEC			0x2133
+ #define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011	0x0018
+ 
++#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY              0x4c4a
++#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155         0x4155
++
+ #endif
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 56e530860caefb..8482852c662dd4 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev,
+ 		return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
+ 							       usage, bit, max);
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB:
++	case USB_DEVICE_ID_LENOVO_X1_TAB2:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB3:
+ 		return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
+ 	default:
+@@ -587,6 +588,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
+ 		break;
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB:
++	case USB_DEVICE_ID_LENOVO_X1_TAB2:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB3:
+ 		ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
+ 		if (ret)
+@@ -781,6 +783,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
+ 		return lenovo_event_cptkbd(hdev, field, usage, value);
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB:
++	case USB_DEVICE_ID_LENOVO_X1_TAB2:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB3:
+ 		return lenovo_event_tp10ubkbd(hdev, field, usage, value);
+ 	default:
+@@ -1062,6 +1065,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
+ 		break;
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB:
++	case USB_DEVICE_ID_LENOVO_X1_TAB2:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB3:
+ 		ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
+ 		break;
+@@ -1293,6 +1297,7 @@ static int lenovo_probe(struct hid_device *hdev,
+ 		break;
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB:
++	case USB_DEVICE_ID_LENOVO_X1_TAB2:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB3:
+ 		ret = lenovo_probe_tp10ubkbd(hdev);
+ 		break;
+@@ -1380,6 +1385,7 @@ static void lenovo_remove(struct hid_device *hdev)
+ 		break;
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB:
++	case USB_DEVICE_ID_LENOVO_X1_TAB2:
+ 	case USB_DEVICE_ID_LENOVO_X1_TAB3:
+ 		lenovo_remove_tp10ubkbd(hdev);
+ 		break;
+@@ -1430,6 +1436,8 @@ static const struct hid_device_id lenovo_devices[] = {
+ 	 */
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
++	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++		     USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB2) },
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) },
+ 	{ }
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 93b5c648ef82c9..641292cfdaa6f9 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2116,12 +2116,18 @@ static const struct hid_device_id mt_devices[] = {
+ 		HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+ 			USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
+ 
+-	/* Lenovo X1 TAB Gen 2 */
++	/* Lenovo X1 TAB Gen 1 */
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ 		HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ 			   USB_VENDOR_ID_LENOVO,
+ 			   USB_DEVICE_ID_LENOVO_X1_TAB) },
+ 
++	/* Lenovo X1 TAB Gen 2 */
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
++			   USB_VENDOR_ID_LENOVO,
++			   USB_DEVICE_ID_LENOVO_X1_TAB2) },
++
+ 	/* Lenovo X1 TAB Gen 3 */
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ 		HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
+index 55153a2f79886b..2a3ae1068739d6 100644
+--- a/drivers/hid/hid-nintendo.c
++++ b/drivers/hid/hid-nintendo.c
+@@ -308,6 +308,7 @@ enum joycon_ctlr_state {
+ 	JOYCON_CTLR_STATE_INIT,
+ 	JOYCON_CTLR_STATE_READ,
+ 	JOYCON_CTLR_STATE_REMOVED,
++	JOYCON_CTLR_STATE_SUSPENDED,
+ };
+ 
+ /* Controller type received as part of device info */
+@@ -2754,14 +2755,46 @@ static void nintendo_hid_remove(struct hid_device *hdev)
+ 
+ static int nintendo_hid_resume(struct hid_device *hdev)
+ {
+-	int ret = joycon_init(hdev);
++	struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
++	int ret;
++
++	hid_dbg(hdev, "resume\n");
++	if (!joycon_using_usb(ctlr)) {
++		hid_dbg(hdev, "no-op resume for bt ctlr\n");
++		ctlr->ctlr_state = JOYCON_CTLR_STATE_READ;
++		return 0;
++	}
+ 
++	ret = joycon_init(hdev);
+ 	if (ret)
+-		hid_err(hdev, "Failed to restore controller after resume");
++		hid_err(hdev,
++			"Failed to restore controller after resume: %d\n",
++			ret);
++	else
++		ctlr->ctlr_state = JOYCON_CTLR_STATE_READ;
+ 
+ 	return ret;
+ }
+ 
++static int nintendo_hid_suspend(struct hid_device *hdev, pm_message_t message)
++{
++	struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
++
++	hid_dbg(hdev, "suspend: %d\n", message.event);
++	/*
++	 * Avoid any blocking loops in suspend/resume transitions.
++	 *
++	 * joycon_enforce_subcmd_rate() can result in repeated retries if for
++	 * whatever reason the controller stops providing input reports.
++	 *
++	 * This has been observed with bluetooth controllers which lose
++	 * connectivity prior to suspend (but not long enough to result in
++	 * complete disconnection).
++	 */
++	ctlr->ctlr_state = JOYCON_CTLR_STATE_SUSPENDED;
++	return 0;
++}
++
+ #endif
+ 
+ static const struct hid_device_id nintendo_hid_devices[] = {
+@@ -2800,6 +2833,7 @@ static struct hid_driver nintendo_hid_driver = {
+ 
+ #ifdef CONFIG_PM
+ 	.resume		= nintendo_hid_resume,
++	.suspend	= nintendo_hid_suspend,
+ #endif
+ };
+ static int __init nintendo_init(void)
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 73979643315bfd..80372342c176af 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -747,6 +747,8 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
+@@ -894,6 +896,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ #endif
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) },
+ 	{ }
+ };
+ 
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index c1f30483600859..a799a89195c515 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -71,6 +71,7 @@ config ARM_VIC_NR
+ 
+ config IRQ_MSI_LIB
+ 	bool
++	select GENERIC_MSI_IRQ
+ 
+ config ARMADA_370_XP_IRQ
+ 	bool
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index c12359fd3a420c..0da1d0723f882e 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -2355,8 +2355,7 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
+ 
+ 	if (!bitmap)
+ 		return -ENOENT;
+-	if (!bitmap->mddev->bitmap_info.external &&
+-	    !bitmap->storage.sb_page)
++	if (!bitmap->storage.sb_page)
+ 		return -EINVAL;
+ 	sb = kmap_local_page(bitmap->storage.sb_page);
+ 	stats->sync_size = le64_to_cpu(sb->sync_size);
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 6b6cd753d61a9a..fe1599db69c848 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -3380,6 +3380,7 @@ static int raid1_reshape(struct mddev *mddev)
+ 	/* ok, everything is stopped */
+ 	oldpool = conf->r1bio_pool;
+ 	conf->r1bio_pool = newpool;
++	init_waitqueue_head(&conf->r1bio_pool.wait);
+ 
+ 	for (d = d2 = 0; d < conf->raid_disks; d++) {
+ 		struct md_rdev *rdev = conf->mirrors[d].rdev;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index cc194f6ec18dab..5cdc599fcad3ce 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1181,8 +1181,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ 		}
+ 	}
+ 
+-	if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
++	if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) {
++		raid_end_bio_io(r10_bio);
+ 		return;
++	}
++
+ 	rdev = read_balance(conf, r10_bio, &max_sectors);
+ 	if (!rdev) {
+ 		if (err_rdev) {
+@@ -1368,8 +1371,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 	}
+ 
+ 	sectors = r10_bio->sectors;
+-	if (!regular_request_wait(mddev, conf, bio, sectors))
++	if (!regular_request_wait(mddev, conf, bio, sectors)) {
++		raid_end_bio_io(r10_bio);
+ 		return;
++	}
++
+ 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ 	    (mddev->reshape_backwards
+ 	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index dbd4d8796f9b06..dbcf17fb3ef256 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -665,7 +665,7 @@ static int m_can_handle_lost_msg(struct net_device *dev)
+ 	struct can_frame *frame;
+ 	u32 timestamp = 0;
+ 
+-	netdev_err(dev, "msg lost in rxf0\n");
++	netdev_dbg(dev, "msg lost in rxf0\n");
+ 
+ 	stats->rx_errors++;
+ 	stats->rx_over_errors++;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index ad4aec522f4f84..f4bafc71a7399b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -11061,11 +11061,9 @@ static void bnxt_free_irq(struct bnxt *bp)
+ 
+ static int bnxt_request_irq(struct bnxt *bp)
+ {
++	struct cpu_rmap *rmap = NULL;
+ 	int i, j, rc = 0;
+ 	unsigned long flags = 0;
+-#ifdef CONFIG_RFS_ACCEL
+-	struct cpu_rmap *rmap;
+-#endif
+ 
+ 	rc = bnxt_setup_int_mode(bp);
+ 	if (rc) {
+@@ -11080,15 +11078,15 @@ static int bnxt_request_irq(struct bnxt *bp)
+ 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
+ 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
+ 
+-#ifdef CONFIG_RFS_ACCEL
+-		if (rmap && bp->bnapi[i]->rx_ring) {
++		if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
++		    rmap && bp->bnapi[i]->rx_ring) {
+ 			rc = irq_cpu_rmap_add(rmap, irq->vector);
+ 			if (rc)
+ 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+ 					    j);
+ 			j++;
+ 		}
+-#endif
++
+ 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ 				 bp->bnapi[i]);
+ 		if (rc)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+index 0dbb880a7aa0e7..71e14be2507e1e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+@@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
+ 
+ 		if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
+ 			return -EINVAL;
++	}
+ 
++	for (i = 0; i < max_tc; i++) {
+ 		switch (ets->tc_tsa[i]) {
+ 		case IEEE_8021QAZ_TSA_STRICT:
+ 			break;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index 8726657f5cb9e0..844812bd653635 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
+ 	tx_buf->action = XDP_REDIRECT;
+ 	tx_buf->xdpf = xdpf;
+ 	dma_unmap_addr_set(tx_buf, mapping, mapping);
+-	dma_unmap_len_set(tx_buf, len, 0);
++	dma_unmap_len_set(tx_buf, len, len);
+ }
+ 
+ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index a189038d88df03..246ddce753f929 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -211,7 +211,6 @@ struct ibmvnic_statistics {
+ 	u8 reserved[72];
+ } __packed __aligned(8);
+ 
+-#define NUM_TX_STATS 3
+ struct ibmvnic_tx_queue_stats {
+ 	u64 batched_packets;
+ 	u64 direct_packets;
+@@ -219,13 +218,18 @@ struct ibmvnic_tx_queue_stats {
+ 	u64 dropped_packets;
+ };
+ 
+-#define NUM_RX_STATS 3
++#define NUM_TX_STATS \
++	(sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64))
++
+ struct ibmvnic_rx_queue_stats {
+ 	u64 packets;
+ 	u64 bytes;
+ 	u64 interrupts;
+ };
+ 
++#define NUM_RX_STATS \
++	(sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64))
++
+ struct ibmvnic_acl_buffer {
+ 	__be32 len;
+ 	__be32 version;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+index 1e8b7d33070144..b5aac0e1a68ecf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+@@ -18,7 +18,8 @@ enum {
+ 
+ enum {
+ 	MLX5E_TC_PRIO = 0,
+-	MLX5E_NIC_PRIO
++	MLX5E_PROMISC_PRIO,
++	MLX5E_NIC_PRIO,
+ };
+ 
+ struct mlx5e_flow_table {
+@@ -68,9 +69,13 @@ struct mlx5e_l2_table {
+ 				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+ 				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+ 
+-/* NIC prio FTS */
++/* NIC promisc FT level */
+ enum {
+ 	MLX5E_PROMISC_FT_LEVEL,
++};
++
++/* NIC prio FTS */
++enum {
+ 	MLX5E_VLAN_FT_LEVEL,
+ 	MLX5E_L2_FT_LEVEL,
+ 	MLX5E_TTC_FT_LEVEL,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+index 298bb74ec5e942..d1d629697e285f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+@@ -113,7 +113,7 @@ int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable)
+ 		__set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+ 	} else {
+ 		__clear_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+-
++		synchronize_net();
+ 		mlx5e_dim_disable(rq->dim);
+ 		rq->dim = NULL;
+ 	}
+@@ -140,7 +140,7 @@ int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable)
+ 		__set_bit(MLX5E_SQ_STATE_DIM, &sq->state);
+ 	} else {
+ 		__clear_bit(MLX5E_SQ_STATE_DIM, &sq->state);
+-
++		synchronize_net();
+ 		mlx5e_dim_disable(sq->dim);
+ 		sq->dim = NULL;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 05058710d2c79d..537e732085b22a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -776,7 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
+ 	ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
+ 	ft_attr.autogroup.max_num_groups = 1;
+ 	ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
+-	ft_attr.prio = MLX5E_NIC_PRIO;
++	ft_attr.prio = MLX5E_PROMISC_PRIO;
+ 
+ 	ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
+ 	if (IS_ERR(ft->t)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 1bc88743d2dfa9..7ef0a4af89e48a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -113,13 +113,16 @@
+ #define ETHTOOL_PRIO_NUM_LEVELS 1
+ #define ETHTOOL_NUM_PRIOS 11
+ #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
+-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
++/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
+  * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
+  */
+-#define KERNEL_NIC_PRIO_NUM_LEVELS 11
++#define KERNEL_NIC_PRIO_NUM_LEVELS 10
+ #define KERNEL_NIC_NUM_PRIOS 1
+-/* One more level for tc */
+-#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
++/* One more level for tc, and one more for promisc */
++#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
++
++#define KERNEL_NIC_PROMISC_NUM_PRIOS 1
++#define KERNEL_NIC_PROMISC_NUM_LEVELS 1
+ 
+ #define KERNEL_NIC_TC_NUM_PRIOS  1
+ #define KERNEL_NIC_TC_NUM_LEVELS 3
+@@ -187,6 +190,8 @@ static struct init_tree_node {
+ 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
+ 						    KERNEL_NIC_TC_NUM_LEVELS),
++				  ADD_MULTIPLE_PRIO(KERNEL_NIC_PROMISC_NUM_PRIOS,
++						    KERNEL_NIC_PROMISC_NUM_LEVELS),
+ 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+ 						    KERNEL_NIC_PRIO_NUM_LEVELS))),
+ 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index 9bac4083d8a091..876de6db63c4f4 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -28,6 +28,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
+ 	gc->db_page_base = gc->bar0_va +
+ 				mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+ 
++	gc->phys_db_page_base = gc->bar0_pa +
++				mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
++
+ 	sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
+ 
+ 	sriov_base_va = gc->bar0_va + sriov_base_off;
+diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
+index 6b3f7fca8d1572..05c4b6c8c9c3d0 100644
+--- a/drivers/net/ethernet/renesas/rtsn.c
++++ b/drivers/net/ethernet/renesas/rtsn.c
+@@ -1259,7 +1259,12 @@ static int rtsn_probe(struct platform_device *pdev)
+ 	priv = netdev_priv(ndev);
+ 	priv->pdev = pdev;
+ 	priv->ndev = ndev;
++
+ 	priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
++	if (!priv->ptp_priv) {
++		ret = -ENOMEM;
++		goto error_free;
++	}
+ 
+ 	spin_lock_init(&priv->lock);
+ 	platform_set_drvdata(pdev, priv);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+index 7840bc403788ef..5dcc95bc0ad28b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+@@ -364,19 +364,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
+ 	}
+ 
+ 	/* TX/RX NORMAL interrupts */
+-	if (likely(intr_status & XGMAC_NIS)) {
+-		if (likely(intr_status & XGMAC_RI)) {
+-			u64_stats_update_begin(&stats->syncp);
+-			u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+-			u64_stats_update_end(&stats->syncp);
+-			ret |= handle_rx;
+-		}
+-		if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
+-			u64_stats_update_begin(&stats->syncp);
+-			u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+-			u64_stats_update_end(&stats->syncp);
+-			ret |= handle_tx;
+-		}
++	if (likely(intr_status & XGMAC_RI)) {
++		u64_stats_update_begin(&stats->syncp);
++		u64_stats_inc(&stats->rx_normal_irq_n[chan]);
++		u64_stats_update_end(&stats->syncp);
++		ret |= handle_rx;
++	}
++	if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
++		u64_stats_update_begin(&stats->syncp);
++		u64_stats_inc(&stats->tx_normal_irq_n[chan]);
++		u64_stats_update_end(&stats->syncp);
++		ret |= handle_tx;
+ 	}
+ 
+ 	/* Clear interrupts */
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 393cc5192e90d1..6b5cff087686ec 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -612,8 +612,6 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ {
+ 	struct sk_buff *skb;
+ 
+-	len += AM65_CPSW_HEADROOM;
+-
+ 	skb = build_skb(page_addr, len);
+ 	if (unlikely(!skb))
+ 		return NULL;
+@@ -1217,7 +1215,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ 	}
+ 
+ 	skb = am65_cpsw_build_skb(page_addr, ndev,
+-				  AM65_CPSW_MAX_PACKET_SIZE, headroom);
++				  PAGE_SIZE, headroom);
+ 	if (unlikely(!skb)) {
+ 		new_page = page;
+ 		goto requeue;
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index ea2123ea6e387c..e711797a3a8cff 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -1624,7 +1624,7 @@ static void wx_set_num_queues(struct wx *wx)
+  */
+ static int wx_acquire_msix_vectors(struct wx *wx)
+ {
+-	struct irq_affinity affd = { .pre_vectors = 1 };
++	struct irq_affinity affd = { .post_vectors = 1 };
+ 	int nvecs, i;
+ 
+ 	/* We start by asking for one vector per queue pair */
+@@ -1661,16 +1661,17 @@ static int wx_acquire_msix_vectors(struct wx *wx)
+ 		return nvecs;
+ 	}
+ 
+-	wx->msix_entry->entry = 0;
+-	wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
+ 	nvecs -= 1;
+ 	for (i = 0; i < nvecs; i++) {
+ 		wx->msix_q_entries[i].entry = i;
+-		wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
++		wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i);
+ 	}
+ 
+ 	wx->num_q_vectors = nvecs;
+ 
++	wx->msix_entry->entry = nvecs;
++	wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs);
++
+ 	return 0;
+ }
+ 
+@@ -2120,7 +2121,6 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
+ 		wr32(wx, WX_PX_MISC_IVAR, ivar);
+ 	} else {
+ 		/* tx or rx causes */
+-		msix_vector += 1; /* offset for queue vectors */
+ 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ 		index = ((16 * (queue & 1)) + (8 * direction));
+ 		ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
+@@ -2151,7 +2151,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
+ 
+ 	itr_reg |= WX_PX_ITR_CNT_WDIS;
+ 
+-	wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
++	wr32(wx, WX_PX_ITR(v_idx), itr_reg);
+ }
+ 
+ /**
+@@ -2197,9 +2197,9 @@ void wx_configure_vectors(struct wx *wx)
+ 		wx_write_eitr(q_vector);
+ 	}
+ 
+-	wx_set_ivar(wx, -1, 0, 0);
++	wx_set_ivar(wx, -1, 0, v_idx);
+ 	if (pdev->msix_enabled)
+-		wr32(wx, WX_PX_ITR(0), 1950);
++		wr32(wx, WX_PX_ITR(v_idx), 1950);
+ }
+ EXPORT_SYMBOL(wx_configure_vectors);
+ 
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+index b54bffda027b40..dbac133eacfc58 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+@@ -1136,7 +1136,7 @@ struct wx {
+ };
+ 
+ #define WX_INTR_ALL (~0ULL)
+-#define WX_INTR_Q(i) BIT((i) + 1)
++#define WX_INTR_Q(i) BIT((i))
+ 
+ /* register operations */
+ #define wr32(a, reg, value)	writel((value), ((a)->hw_addr + (reg)))
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+index 1be2a5cc4a83c3..d2fb77f1d876b3 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+@@ -154,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
+ 	if (queues)
+ 		wx_intr_enable(wx, NGBE_INTR_ALL);
+ 	else
+-		wx_intr_enable(wx, NGBE_INTR_MISC);
++		wx_intr_enable(wx, NGBE_INTR_MISC(wx));
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+index f48ed7fc1805ab..f4dc4acbedaeaf 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+@@ -80,7 +80,7 @@
+ 				NGBE_PX_MISC_IEN_GPIO)
+ 
+ #define NGBE_INTR_ALL				0x1FF
+-#define NGBE_INTR_MISC				BIT(0)
++#define NGBE_INTR_MISC(A)			BIT((A)->num_q_vectors)
+ 
+ #define NGBE_PHY_CONFIG(reg_offset)		(0x14000 + ((reg_offset) * 4))
+ #define NGBE_CFG_LAN_SPEED			0x14440
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+index c698f4ec751a2e..76d33c042eee59 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+@@ -21,7 +21,7 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
+ 	wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+ 
+ 	/* unmask interrupt */
+-	wx_intr_enable(wx, TXGBE_INTR_MISC);
++	wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ 	if (queues)
+ 		wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
+ }
+@@ -147,7 +147,7 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
+ 		nhandled++;
+ 	}
+ 
+-	wx_intr_enable(wx, TXGBE_INTR_MISC);
++	wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ 	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+ }
+ 
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+index 8ea413a7abe9d3..5fe415f3f2ca9c 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+@@ -264,8 +264,8 @@ struct txgbe_fdir_filter {
+ #define TXGBE_DEFAULT_RX_WORK           128
+ #endif
+ 
+-#define TXGBE_INTR_MISC       BIT(0)
+-#define TXGBE_INTR_QALL(A)    GENMASK((A)->num_q_vectors, 1)
++#define TXGBE_INTR_MISC(A)    BIT((A)->num_q_vectors)
++#define TXGBE_INTR_QALL(A)    (TXGBE_INTR_MISC(A) - 1)
+ 
+ #define TXGBE_MAX_EITR        GENMASK(11, 3)
+ 
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 1072e2210aed32..6b93418224e7e2 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1309,7 +1309,7 @@ ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ 	if (ering->rx_pending > RX_BD_NUM_MAX ||
+ 	    ering->rx_mini_pending ||
+ 	    ering->rx_jumbo_pending ||
+-	    ering->rx_pending > TX_BD_NUM_MAX)
++	    ering->tx_pending > TX_BD_NUM_MAX)
+ 		return -EINVAL;
+ 
+ 	if (netif_running(ndev))
+diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
+index e3a5961dced9bb..ffca1cec4ec992 100644
+--- a/drivers/net/phy/microchip.c
++++ b/drivers/net/phy/microchip.c
+@@ -332,7 +332,7 @@ static void lan88xx_link_change_notify(struct phy_device *phydev)
+ 	 * As workaround, set to 10 before setting to 100
+ 	 * at forced 100 F/H mode.
+ 	 */
+-	if (!phydev->autoneg && phydev->speed == 100) {
++	if (phydev->state == PHY_NOLINK && !phydev->autoneg && phydev->speed == 100) {
+ 		/* disable phy interrupt */
+ 		temp = phy_read(phydev, LAN88XX_INT_MASK);
+ 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+@@ -486,6 +486,7 @@ static struct phy_driver microchip_phy_driver[] = {
+ 	.config_init	= lan88xx_config_init,
+ 	.config_aneg	= lan88xx_config_aneg,
+ 	.link_change_notify = lan88xx_link_change_notify,
++	.soft_reset	= genphy_soft_reset,
+ 
+ 	/* Interrupt handling is broken, do not define related
+ 	 * functions to force polling.
+diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
+index 105602581a0336..ac909ad8a87b48 100644
+--- a/drivers/net/phy/qcom/at803x.c
++++ b/drivers/net/phy/qcom/at803x.c
+@@ -26,9 +26,6 @@
+ 
+ #define AT803X_LED_CONTROL			0x18
+ 
+-#define AT803X_PHY_MMD3_WOL_CTRL		0x8012
+-#define AT803X_WOL_EN				BIT(5)
+-
+ #define AT803X_REG_CHIP_CONFIG			0x1f
+ #define AT803X_BT_BX_REG_SEL			0x8000
+ 
+@@ -866,30 +863,6 @@ static int at8031_config_init(struct phy_device *phydev)
+ 	return at803x_config_init(phydev);
+ }
+ 
+-static int at8031_set_wol(struct phy_device *phydev,
+-			  struct ethtool_wolinfo *wol)
+-{
+-	int ret;
+-
+-	/* First setup MAC address and enable WOL interrupt */
+-	ret = at803x_set_wol(phydev, wol);
+-	if (ret)
+-		return ret;
+-
+-	if (wol->wolopts & WAKE_MAGIC)
+-		/* Enable WOL function for 1588 */
+-		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-				     AT803X_PHY_MMD3_WOL_CTRL,
+-				     0, AT803X_WOL_EN);
+-	else
+-		/* Disable WoL function for 1588 */
+-		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-				     AT803X_PHY_MMD3_WOL_CTRL,
+-				     AT803X_WOL_EN, 0);
+-
+-	return ret;
+-}
+-
+ static int at8031_config_intr(struct phy_device *phydev)
+ {
+ 	struct at803x_priv *priv = phydev->priv;
+diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
+index 5048304ccc9e85..c3aad0e6b700aa 100644
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -633,7 +633,7 @@ static struct phy_driver qca808x_driver[] = {
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.get_tunable		= at803x_get_tunable,
+ 	.set_tunable		= at803x_set_tunable,
+-	.set_wol		= at803x_set_wol,
++	.set_wol		= at8031_set_wol,
+ 	.get_wol		= at803x_get_wol,
+ 	.get_features		= qca808x_get_features,
+ 	.config_aneg		= qca808x_config_aneg,
+diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c
+index d28815ef56bbf3..af7d0d8e81be5c 100644
+--- a/drivers/net/phy/qcom/qcom-phy-lib.c
++++ b/drivers/net/phy/qcom/qcom-phy-lib.c
+@@ -115,6 +115,31 @@ int at803x_set_wol(struct phy_device *phydev,
+ }
+ EXPORT_SYMBOL_GPL(at803x_set_wol);
+ 
++int at8031_set_wol(struct phy_device *phydev,
++		   struct ethtool_wolinfo *wol)
++{
++	int ret;
++
++	/* First setup MAC address and enable WOL interrupt */
++	ret = at803x_set_wol(phydev, wol);
++	if (ret)
++		return ret;
++
++	if (wol->wolopts & WAKE_MAGIC)
++		/* Enable WOL function for 1588 */
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     AT803X_PHY_MMD3_WOL_CTRL,
++				     0, AT803X_WOL_EN);
++	else
++		/* Disable WoL function for 1588 */
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     AT803X_PHY_MMD3_WOL_CTRL,
++				     AT803X_WOL_EN, 0);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(at8031_set_wol);
++
+ void at803x_get_wol(struct phy_device *phydev,
+ 		    struct ethtool_wolinfo *wol)
+ {
+diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h
+index 4bb541728846d3..7f7151c8bacaa5 100644
+--- a/drivers/net/phy/qcom/qcom.h
++++ b/drivers/net/phy/qcom/qcom.h
+@@ -172,6 +172,9 @@
+ #define AT803X_LOC_MAC_ADDR_16_31_OFFSET	0x804B
+ #define AT803X_LOC_MAC_ADDR_32_47_OFFSET	0x804A
+ 
++#define AT803X_PHY_MMD3_WOL_CTRL		0x8012
++#define AT803X_WOL_EN				BIT(5)
++
+ #define AT803X_DEBUG_ADDR			0x1D
+ #define AT803X_DEBUG_DATA			0x1E
+ 
+@@ -215,6 +218,8 @@ int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);
+ int at803x_set_wol(struct phy_device *phydev,
+ 		   struct ethtool_wolinfo *wol);
++int at8031_set_wol(struct phy_device *phydev,
++		   struct ethtool_wolinfo *wol);
+ void at803x_get_wol(struct phy_device *phydev,
+ 		    struct ethtool_wolinfo *wol);
+ int at803x_ack_interrupt(struct phy_device *phydev);
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index 150aea7c9c3675..6a43f6d6e85cb3 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -155,10 +155,29 @@ static int smsc_phy_reset(struct phy_device *phydev)
+ 
+ static int lan87xx_config_aneg(struct phy_device *phydev)
+ {
+-	int rc;
++	u8 mdix_ctrl;
+ 	int val;
++	int rc;
++
++	/* When auto-negotiation is disabled (forced mode), the PHY's
++	 * Auto-MDIX will continue toggling the TX/RX pairs.
++	 *
++	 * To establish a stable link, we must select a fixed MDI mode.
++	 * If the user has not specified a fixed MDI mode (i.e., mdix_ctrl is
++	 * 'auto'), we default to ETH_TP_MDI. This choice of a ETH_TP_MDI mode
++	 * mirrors the behavior the hardware would exhibit if the AUTOMDIX_EN
++	 * strap were configured for a fixed MDI connection.
++	 */
++	if (phydev->autoneg == AUTONEG_DISABLE) {
++		if (phydev->mdix_ctrl == ETH_TP_MDI_AUTO)
++			mdix_ctrl = ETH_TP_MDI;
++		else
++			mdix_ctrl = phydev->mdix_ctrl;
++	} else {
++		mdix_ctrl = phydev->mdix_ctrl;
++	}
+ 
+-	switch (phydev->mdix_ctrl) {
++	switch (mdix_ctrl) {
+ 	case ETH_TP_MDI:
+ 		val = SPECIAL_CTRL_STS_OVRRD_AMDIX_;
+ 		break;
+@@ -167,7 +186,8 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
+ 			SPECIAL_CTRL_STS_AMDIX_STATE_;
+ 		break;
+ 	case ETH_TP_MDI_AUTO:
+-		val = SPECIAL_CTRL_STS_AMDIX_ENABLE_;
++		val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
++			SPECIAL_CTRL_STS_AMDIX_ENABLE_;
+ 		break;
+ 	default:
+ 		return genphy_config_aneg(phydev);
+@@ -183,7 +203,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
+ 	rc |= val;
+ 	phy_write(phydev, SPECIAL_CTRL_STS, rc);
+ 
+-	phydev->mdix = phydev->mdix_ctrl;
++	phydev->mdix = mdix_ctrl;
+ 	return genphy_config_aneg(phydev);
+ }
+ 
+@@ -261,6 +281,33 @@ int lan87xx_read_status(struct phy_device *phydev)
+ }
+ EXPORT_SYMBOL_GPL(lan87xx_read_status);
+ 
++static int lan87xx_phy_config_init(struct phy_device *phydev)
++{
++	int rc;
++
++	/* The LAN87xx PHY's initial MDI-X mode is determined by the AUTOMDIX_EN
++	 * hardware strap, but the driver cannot read the strap's status. This
++	 * creates an unpredictable initial state.
++	 *
++	 * To ensure consistent and reliable behavior across all boards,
++	 * override the strap configuration on initialization and force the PHY
++	 * into a known state with Auto-MDIX enabled, which is the expected
++	 * default for modern hardware.
++	 */
++	rc = phy_modify(phydev, SPECIAL_CTRL_STS,
++			SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
++			SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
++			SPECIAL_CTRL_STS_AMDIX_STATE_,
++			SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
++			SPECIAL_CTRL_STS_AMDIX_ENABLE_);
++	if (rc < 0)
++		return rc;
++
++	phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
++
++	return smsc_phy_config_init(phydev);
++}
++
+ static int lan874x_phy_config_init(struct phy_device *phydev)
+ {
+ 	u16 val;
+@@ -694,7 +741,7 @@ static struct phy_driver smsc_phy_driver[] = {
+ 
+ 	/* basic functions */
+ 	.read_status	= lan87xx_read_status,
+-	.config_init	= smsc_phy_config_init,
++	.config_init	= lan87xx_phy_config_init,
+ 	.soft_reset	= smsc_phy_reset,
+ 	.config_aneg	= lan87xx_config_aneg,
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 944a33361dae59..7e0608f5683531 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1426,6 +1426,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
+ 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
+ 	{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)},	/* SIMCom 7100E, 7230E, 7600E ++ */
++	{QMI_QUIRK_SET_DTR(0x1e0e, 0x9071, 3)},	/* SIMCom 8230C ++ */
+ 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
+ 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)},	/* Quectel EG91 */
+ 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)},	/* Quectel EG95 */
+diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
+index 1f1f6280a0f251..86e20edb593b3f 100644
+--- a/drivers/net/wireless/marvell/mwifiex/util.c
++++ b/drivers/net/wireless/marvell/mwifiex/util.c
+@@ -477,7 +477,9 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
+ 				    "auth: receive authentication from %pM\n",
+ 				    ieee_hdr->addr3);
+ 		} else {
+-			if (!priv->wdev.connected)
++			if (!priv->wdev.connected ||
++			    !ether_addr_equal(ieee_hdr->addr3,
++					      priv->curr_bss_params.bss_descriptor.mac_address))
+ 				return 0;
+ 
+ 			if (ieee80211_is_deauth(ieee_hdr->frame_control)) {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 452579ccc49228..a6324f6ead781f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -1696,8 +1696,8 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 		if (!sreq->ssids[i].ssid_len)
+ 			continue;
+ 
+-		req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
+-		memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
++		req->ssids[n_ssids].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
++		memcpy(req->ssids[n_ssids].ssid, sreq->ssids[i].ssid,
+ 		       sreq->ssids[i].ssid_len);
+ 		n_ssids++;
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 9c245c23a2d730..5b832f1aa00d72 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -1173,6 +1173,9 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ 	struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ 
++	if (!msta->deflink.wcid.sta)
++		return;
++
+ 	mt792x_mutex_acquire(dev);
+ 
+ 	if (enabled)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+index 14553dcc61c577..02899320da5c1b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+@@ -52,6 +52,8 @@ static int mt7925_thermal_init(struct mt792x_phy *phy)
+ 
+ 	name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7925_%s",
+ 			      wiphy_name(wiphy));
++	if (!name)
++		return -ENOMEM;
+ 
+ 	hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
+ 						       mt7925_hwmon_groups);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+index d2a98c92e1147d..ca5f1dc05815f4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+@@ -1565,6 +1565,9 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw,
+ 	unsigned long valid = mvif->valid_links;
+ 	u8 i;
+ 
++	if (!msta->vif)
++		return;
++
+ 	mt792x_mutex_acquire(dev);
+ 
+ 	valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0);
+@@ -1579,6 +1582,9 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw,
+ 		else
+ 			clear_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags);
+ 
++		if (!mlink->wcid.sta)
++			continue;
++
+ 		mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i);
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index 57a1db394dda46..2aeb9ba4256aba 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -2823,8 +2823,8 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 		if (!sreq->ssids[i].ssid_len)
+ 			continue;
+ 
+-		ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
+-		memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid,
++		ssid->ssids[n_ssids].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
++		memcpy(ssid->ssids[n_ssids].ssid, sreq->ssids[i].ssid,
+ 		       sreq->ssids[i].ssid_len);
+ 		n_ssids++;
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
+index 547489092c2947..341987e47f67a0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
+@@ -58,7 +58,7 @@
+ 
+ #define MT_INT_TX_DONE_MCU		(MT_INT_TX_DONE_MCU_WM |	\
+ 					 MT_INT_TX_DONE_FWDL)
+-#define MT_INT_TX_DONE_ALL		(MT_INT_TX_DONE_MCU_WM |	\
++#define MT_INT_TX_DONE_ALL		(MT_INT_TX_DONE_MCU |	\
+ 					 MT_INT_TX_DONE_BAND0 |	\
+ 					GENMASK(18, 4))
+ 
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
+index eface610178d2e..f7f3a2340c3929 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
+@@ -108,7 +108,7 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
+ }
+ EXPORT_SYMBOL_GPL(rt2x00soc_probe);
+ 
+-int rt2x00soc_remove(struct platform_device *pdev)
++void rt2x00soc_remove(struct platform_device *pdev)
+ {
+ 	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ 	struct rt2x00_dev *rt2x00dev = hw->priv;
+@@ -119,8 +119,6 @@ int rt2x00soc_remove(struct platform_device *pdev)
+ 	rt2x00lib_remove_dev(rt2x00dev);
+ 	rt2x00soc_free_reg(rt2x00dev);
+ 	ieee80211_free_hw(hw);
+-
+-	return 0;
+ }
+ EXPORT_SYMBOL_GPL(rt2x00soc_remove);
+ 
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h
+index 021fd06b362723..d6226b8a10e00b 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h
+@@ -17,7 +17,7 @@
+  * SoC driver handlers.
+  */
+ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops);
+-int rt2x00soc_remove(struct platform_device *pdev);
++void rt2x00soc_remove(struct platform_device *pdev);
+ #ifdef CONFIG_PM
+ int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
+ int rt2x00soc_resume(struct platform_device *pdev);
+diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+index f90c33d19b3993..8fd7be37e209c6 100644
+--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
++++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+@@ -583,7 +583,11 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
+ 
+ 		skb_queue_tail(q, skb);
+ 		while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
+-			zd_mac_tx_status(hw, skb_dequeue(q),
++			skb = skb_dequeue(q);
++			if (!skb)
++				break;
++
++			zd_mac_tx_status(hw, skb,
+ 					 mac->ack_pending ? mac->ack_signal : 0,
+ 					 NULL);
+ 			mac->ack_pending = 0;
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index b78e0e41732445..af370628e58393 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -1676,19 +1676,24 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+ 		return NULL;
+ 
+ 	root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
+-	if (!root_ops)
+-		goto free_ri;
++	if (!root_ops) {
++		kfree(ri);
++		return NULL;
++	}
+ 
+ 	ri->cfg = pci_acpi_setup_ecam_mapping(root);
+-	if (!ri->cfg)
+-		goto free_root_ops;
++	if (!ri->cfg) {
++		kfree(ri);
++		kfree(root_ops);
++		return NULL;
++	}
+ 
+ 	root_ops->release_info = pci_acpi_generic_release_info;
+ 	root_ops->prepare_resources = pci_acpi_root_prepare_resources;
+ 	root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
+ 	bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
+ 	if (!bus)
+-		goto free_cfg;
++		return NULL;
+ 
+ 	/* If we must preserve the resource configuration, claim now */
+ 	host = pci_find_host_bridge(bus);
+@@ -1705,14 +1710,6 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+ 		pcie_bus_configure_settings(child);
+ 
+ 	return bus;
+-
+-free_cfg:
+-	pci_ecam_free(ri->cfg);
+-free_root_ops:
+-	kfree(root_ops);
+-free_ri:
+-	kfree(ri);
+-	return NULL;
+ }
+ 
+ void pcibios_add_bus(struct pci_bus *bus)
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index a12766b3bc8a73..debf36ce57857e 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -933,6 +933,17 @@ static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend
+ 				  pin, is_suspend ? "suspend" : "hibernate");
+ 		}
+ 
++		/*
++		 * debounce enabled over suspend has shown issues with a GPIO
++		 * being unable to wake the system, as we're only interested in
++		 * the actual wakeup event, clear it.
++		 */
++		if (gpio_dev->saved_regs[i] & (DB_CNTRl_MASK << DB_CNTRL_OFF)) {
++			amd_gpio_set_debounce(gpio_dev, pin, 0);
++			pm_pr_dbg("Clearing debounce for GPIO #%d during %s.\n",
++				  pin, is_suspend ? "suspend" : "hibernate");
++		}
++
+ 		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ 	}
+ 
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 018e96d921c050..5532328097894e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -1035,6 +1035,25 @@ static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d,
+ 	       test_bit(d->hwirq, pctrl->skip_wake_irqs);
+ }
+ 
++static void msm_gpio_irq_init_valid_mask(struct gpio_chip *gc,
++					 unsigned long *valid_mask,
++					 unsigned int ngpios)
++{
++	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
++	const struct msm_pingroup *g;
++	int i;
++
++	bitmap_fill(valid_mask, ngpios);
++
++	for (i = 0; i < ngpios; i++) {
++		g = &pctrl->soc->groups[i];
++
++		if (g->intr_detection_width != 1 &&
++		    g->intr_detection_width != 2)
++			clear_bit(i, valid_mask);
++	}
++}
++
+ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+@@ -1438,6 +1457,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
+ 	girq->default_type = IRQ_TYPE_NONE;
+ 	girq->handler = handle_bad_irq;
+ 	girq->parents[0] = pctrl->irq;
++	girq->init_valid_mask = msm_gpio_irq_init_valid_mask;
+ 
+ 	ret = gpiochip_add_data(&pctrl->chip, pctrl);
+ 	if (ret) {
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index 174939359ae3eb..3697781c017932 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -148,7 +148,7 @@ static bool pwm_state_valid(const struct pwm_state *state)
+ 	 * and supposed to be ignored. So also ignore any strange values and
+ 	 * consider the state ok.
+ 	 */
+-	if (state->enabled)
++	if (!state->enabled)
+ 		return true;
+ 
+ 	if (!state->period)
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
+index 7eaab58314995c..33d3554b9197ab 100644
+--- a/drivers/pwm/pwm-mediatek.c
++++ b/drivers/pwm/pwm-mediatek.c
+@@ -130,8 +130,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		return ret;
+ 
+ 	clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]);
+-	if (!clk_rate)
+-		return -EINVAL;
++	if (!clk_rate) {
++		ret = -EINVAL;
++		goto out;
++	}
+ 
+ 	/* Make sure we use the bus clock and not the 26MHz clock */
+ 	if (pc->soc->has_ck_26m_sel)
+@@ -150,9 +152,9 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	}
+ 
+ 	if (clkdiv > PWM_CLK_DIV_MAX) {
+-		pwm_mediatek_clk_disable(chip, pwm);
+ 		dev_err(pwmchip_parent(chip), "period of %d ns not supported\n", period_ns);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) {
+@@ -169,9 +171,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period);
+ 	pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
+ 
++out:
+ 	pwm_mediatek_clk_disable(chip, pwm);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index be5564ed8c018a..5b09ce71345b64 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -4566,6 +4566,7 @@ void do_unblank_screen(int leaving_gfx)
+ 	set_palette(vc);
+ 	set_cursor(vc);
+ 	vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num);
++	notify_update(vc);
+ }
+ EXPORT_SYMBOL(do_unblank_screen);
+ 
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 53d9fc41acc522..2412f81f441201 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -294,8 +294,8 @@ __acquires(&port->port_lock)
+ 			break;
+ 	}
+ 
+-	if (do_tty_wake && port->port.tty)
+-		tty_wakeup(port->port.tty);
++	if (do_tty_wake)
++		tty_port_tty_wakeup(&port->port);
+ 	return status;
+ }
+ 
+@@ -543,20 +543,16 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ static int gs_start_io(struct gs_port *port)
+ {
+ 	struct list_head	*head = &port->read_pool;
+-	struct usb_ep		*ep;
++	struct usb_ep		*ep = port->port_usb->out;
+ 	int			status;
+ 	unsigned		started;
+ 
+-	if (!port->port_usb || !port->port.tty)
+-		return -EIO;
+-
+ 	/* Allocate RX and TX I/O buffers.  We can't easily do this much
+ 	 * earlier (with GFP_KERNEL) because the requests are coupled to
+ 	 * endpoints, as are the packet sizes we'll be using.  Different
+ 	 * configurations may use different endpoints with a given port;
+ 	 * and high speed vs full speed changes packet sizes too.
+ 	 */
+-	ep = port->port_usb->out;
+ 	status = gs_alloc_requests(ep, head, gs_read_complete,
+ 		&port->read_allocated);
+ 	if (status)
+@@ -577,7 +573,7 @@ static int gs_start_io(struct gs_port *port)
+ 		gs_start_tx(port);
+ 		/* Unblock any pending writes into our circular buffer, in case
+ 		 * we didn't in gs_start_tx() */
+-		tty_wakeup(port->port.tty);
++		tty_port_tty_wakeup(&port->port);
+ 	} else {
+ 		/* Free reqs only if we are still connected */
+ 		if (port->port_usb) {
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index 7ba50e133921a4..308abbf8855b0a 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1104,11 +1104,21 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
+ 	ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0);
+ 	if (ret < 0)
+ 		goto out_locked;
+-	ASSERT(ret == 0);
++	/*
++	 * If ret is 1 (no key found), it means this is an empty block group,
++	 * without any extents allocated from it and there's no block group
++	 * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
++	 * because we are using the block group tree feature, so block group
++	 * items are stored in the block group tree. It also means there are no
++	 * extents allocated for block groups with a start offset beyond this
++	 * block group's end offset (this is the last, highest, block group).
++	 */
++	if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE))
++		ASSERT(ret == 0);
+ 
+ 	start = block_group->start;
+ 	end = block_group->start + block_group->length;
+-	while (1) {
++	while (ret == 0) {
+ 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ 
+ 		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
+@@ -1138,8 +1148,6 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
+ 		ret = btrfs_next_item(extent_root, path);
+ 		if (ret < 0)
+ 			goto out_locked;
+-		if (ret)
+-			break;
+ 	}
+ 	if (start < end) {
+ 		ret = __add_to_free_space_tree(trans, block_group, path2,
+diff --git a/fs/erofs/data.c b/fs/erofs/data.c
+index 722151d3fee8b4..91182d5e3a66c8 100644
+--- a/fs/erofs/data.c
++++ b/fs/erofs/data.c
+@@ -240,9 +240,11 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
+ 
+ /*
+  * bit 30: I/O error occurred on this folio
++ * bit 29: CPU has dirty data in D-cache (needs aliasing handling);
+  * bit 0 - 29: remaining parts to complete this folio
+  */
+-#define EROFS_ONLINEFOLIO_EIO			(1 << 30)
++#define EROFS_ONLINEFOLIO_EIO		30
++#define EROFS_ONLINEFOLIO_DIRTY		29
+ 
+ void erofs_onlinefolio_init(struct folio *folio)
+ {
+@@ -259,19 +261,23 @@ void erofs_onlinefolio_split(struct folio *folio)
+ 	atomic_inc((atomic_t *)&folio->private);
+ }
+ 
+-void erofs_onlinefolio_end(struct folio *folio, int err)
++void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty)
+ {
+ 	int orig, v;
+ 
+ 	do {
+ 		orig = atomic_read((atomic_t *)&folio->private);
+-		v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0);
++		DBG_BUGON(orig <= 0);
++		v = dirty << EROFS_ONLINEFOLIO_DIRTY;
++		v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO);
+ 	} while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
+ 
+-	if (v & ~EROFS_ONLINEFOLIO_EIO)
++	if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1))
+ 		return;
+ 	folio->private = 0;
+-	folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO));
++	if (v & BIT(EROFS_ONLINEFOLIO_DIRTY))
++		flush_dcache_folio(folio);
++	folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO)));
+ }
+ 
+ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+@@ -378,11 +384,16 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+  */
+ static int erofs_read_folio(struct file *file, struct folio *folio)
+ {
++	trace_erofs_read_folio(folio, true);
++
+ 	return iomap_read_folio(folio, &erofs_iomap_ops);
+ }
+ 
+ static void erofs_readahead(struct readahead_control *rac)
+ {
++	trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
++					readahead_count(rac), true);
++
+ 	return iomap_readahead(rac, &erofs_iomap_ops);
+ }
+ 
+diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
+index eb318c7ddd80ed..dc61a6a8f69652 100644
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -331,13 +331,11 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
+ 		cur = min(cur, rq->outputsize);
+ 		if (cur && rq->out[0]) {
+ 			kin = kmap_local_page(rq->in[nrpages_in - 1]);
+-			if (rq->out[0] == rq->in[nrpages_in - 1]) {
++			if (rq->out[0] == rq->in[nrpages_in - 1])
+ 				memmove(kin + rq->pageofs_out, kin + pi, cur);
+-				flush_dcache_page(rq->out[0]);
+-			} else {
++			else
+ 				memcpy_to_page(rq->out[0], rq->pageofs_out,
+ 					       kin + pi, cur);
+-			}
+ 			kunmap_local(kin);
+ 		}
+ 		rq->outputsize -= cur;
+@@ -355,14 +353,12 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
+ 			po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
+ 			DBG_BUGON(no >= nrpages_out);
+ 			cnt = min(insz - pi, PAGE_SIZE - po);
+-			if (rq->out[no] == rq->in[ni]) {
++			if (rq->out[no] == rq->in[ni])
+ 				memmove(kin + po,
+ 					kin + rq->pageofs_in + pi, cnt);
+-				flush_dcache_page(rq->out[no]);
+-			} else if (rq->out[no]) {
++			else if (rq->out[no])
+ 				memcpy_to_page(rq->out[no], po,
+ 					       kin + rq->pageofs_in + pi, cnt);
+-			}
+ 			pi += cnt;
+ 		} while (pi < insz);
+ 		kunmap_local(kin);
+diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
+index 12e709d93445ea..c865a7a610306e 100644
+--- a/fs/erofs/fileio.c
++++ b/fs/erofs/fileio.c
+@@ -38,7 +38,7 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
+ 	} else {
+ 		bio_for_each_folio_all(fi, &rq->bio) {
+ 			DBG_BUGON(folio_test_uptodate(fi.folio));
+-			erofs_onlinefolio_end(fi.folio, ret);
++			erofs_onlinefolio_end(fi.folio, ret, false);
+ 		}
+ 	}
+ 	bio_uninit(&rq->bio);
+@@ -158,7 +158,7 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
+ 		}
+ 		cur += len;
+ 	}
+-	erofs_onlinefolio_end(folio, err);
++	erofs_onlinefolio_end(folio, err, false);
+ 	return err;
+ }
+ 
+@@ -180,7 +180,7 @@ static void erofs_fileio_readahead(struct readahead_control *rac)
+ 	struct folio *folio;
+ 	int err;
+ 
+-	trace_erofs_readpages(inode, readahead_index(rac),
++	trace_erofs_readahead(inode, readahead_index(rac),
+ 			      readahead_count(rac), true);
+ 	while ((folio = readahead_folio(rac))) {
+ 		err = erofs_fileio_scan_folio(&io, folio);
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 2c11e8f3048e94..3d06fda70f318b 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -405,7 +405,7 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map);
+ void erofs_onlinefolio_init(struct folio *folio);
+ void erofs_onlinefolio_split(struct folio *folio);
+-void erofs_onlinefolio_end(struct folio *folio, int err);
++void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty);
+ struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid);
+ int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
+ 		  struct kstat *stat, u32 request_mask,
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 74521d7dbee1d8..94c1e2d64df961 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -12,12 +12,6 @@
+ #define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
+ #define Z_EROFS_INLINE_BVECS		2
+ 
+-/*
+- * let's leave a type here in case of introducing
+- * another tagged pointer later.
+- */
+-typedef void *z_erofs_next_pcluster_t;
+-
+ struct z_erofs_bvec {
+ 	struct page *page;
+ 	int offset;
+@@ -48,7 +42,7 @@ struct z_erofs_pcluster {
+ 	struct lockref lockref;
+ 
+ 	/* A: point to next chained pcluster or TAILs */
+-	z_erofs_next_pcluster_t next;
++	struct z_erofs_pcluster *next;
+ 
+ 	/* I: start block address of this pcluster */
+ 	erofs_off_t index;
+@@ -91,12 +85,11 @@ struct z_erofs_pcluster {
+ 
+ /* the end of a chain of pclusters */
+ #define Z_EROFS_PCLUSTER_TAIL           ((void *) 0x700 + POISON_POINTER_DELTA)
+-#define Z_EROFS_PCLUSTER_NIL            (NULL)
+ 
+ struct z_erofs_decompressqueue {
+ 	struct super_block *sb;
++	struct z_erofs_pcluster *head;
+ 	atomic_t pending_bios;
+-	z_erofs_next_pcluster_t head;
+ 
+ 	union {
+ 		struct completion done;
+@@ -460,39 +453,32 @@ int __init z_erofs_init_subsystem(void)
+ }
+ 
+ enum z_erofs_pclustermode {
++	/* It has previously been linked into another processing chain */
+ 	Z_EROFS_PCLUSTER_INFLIGHT,
+ 	/*
+-	 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
+-	 * could be dispatched into bypass queue later due to uptodated managed
+-	 * pages. All related online pages cannot be reused for inplace I/O (or
+-	 * bvpage) since it can be directly decoded without I/O submission.
++	 * A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it
++	 * may be dispatched to the bypass queue later due to uptodated managed
++	 * folios.  All file-backed folios related to this pcluster cannot be
++	 * reused for in-place I/O (or bvpage) since the pcluster may be decoded
++	 * in a separate queue (and thus out of order).
+ 	 */
+ 	Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
+ 	/*
+-	 * The pcluster was just linked to a decompression chain by us.  It can
+-	 * also be linked with the remaining pclusters, which means if the
+-	 * processing page is the tail page of a pcluster, this pcluster can
+-	 * safely use the whole page (since the previous pcluster is within the
+-	 * same chain) for in-place I/O, as illustrated below:
+-	 *  ___________________________________________________
+-	 * |  tail (partial) page  |    head (partial) page    |
+-	 * |  (of the current pcl) |   (of the previous pcl)   |
+-	 * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
+-	 *
+-	 * [  (*) the page above can be used as inplace I/O.   ]
++	 * The pcluster has just been linked to our processing chain.
++	 * File-backed folios (except for the head page) related to it can be
++	 * used for in-place I/O (or bvpage).
+ 	 */
+ 	Z_EROFS_PCLUSTER_FOLLOWED,
+ };
+ 
+-struct z_erofs_decompress_frontend {
++struct z_erofs_frontend {
+ 	struct inode *const inode;
+ 	struct erofs_map_blocks map;
+ 	struct z_erofs_bvec_iter biter;
+ 
+ 	struct page *pagepool;
+ 	struct page *candidate_bvpage;
+-	struct z_erofs_pcluster *pcl;
+-	z_erofs_next_pcluster_t owned_head;
++	struct z_erofs_pcluster *pcl, *head;
+ 	enum z_erofs_pclustermode mode;
+ 
+ 	erofs_off_t headoffset;
+@@ -501,11 +487,11 @@ struct z_erofs_decompress_frontend {
+ 	unsigned int icur;
+ };
+ 
+-#define DECOMPRESS_FRONTEND_INIT(__i) { \
+-	.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
+-	.mode = Z_EROFS_PCLUSTER_FOLLOWED }
++#define Z_EROFS_DEFINE_FRONTEND(fe, i, ho) struct z_erofs_frontend fe = { \
++	.inode = i, .head = Z_EROFS_PCLUSTER_TAIL, \
++	.mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho }
+ 
+-static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
++static bool z_erofs_should_alloc_cache(struct z_erofs_frontend *fe)
+ {
+ 	unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
+ 
+@@ -522,7 +508,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
+ 	return false;
+ }
+ 
+-static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
++static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
+ {
+ 	struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
+ 	struct z_erofs_pcluster *pcl = fe->pcl;
+@@ -679,7 +665,7 @@ int z_erofs_init_super(struct super_block *sb)
+ }
+ 
+ /* callers must be with pcluster lock held */
+-static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
++static int z_erofs_attach_page(struct z_erofs_frontend *fe,
+ 			       struct z_erofs_bvec *bvec, bool exclusive)
+ {
+ 	struct z_erofs_pcluster *pcl = fe->pcl;
+@@ -725,7 +711,7 @@ static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
+ 	return true;
+ }
+ 
+-static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
++static int z_erofs_register_pcluster(struct z_erofs_frontend *fe)
+ {
+ 	struct erofs_map_blocks *map = &fe->map;
+ 	struct super_block *sb = fe->inode->i_sb;
+@@ -750,9 +736,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 	pcl->algorithmformat = map->m_algorithmformat;
+ 	pcl->length = 0;
+ 	pcl->partial = true;
+-
+-	/* new pclusters should be claimed as type 1, primary and followed */
+-	pcl->next = fe->owned_head;
++	pcl->next = fe->head;
+ 	pcl->pageofs_out = map->m_la & ~PAGE_MASK;
+ 	fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
+ 
+@@ -788,8 +772,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 			goto err_out;
+ 		}
+ 	}
+-	fe->owned_head = &pcl->next;
+-	fe->pcl = pcl;
++	fe->head = fe->pcl = pcl;
+ 	return 0;
+ 
+ err_out:
+@@ -798,7 +781,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 	return err;
+ }
+ 
+-static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
++static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
+ {
+ 	struct erofs_map_blocks *map = &fe->map;
+ 	struct super_block *sb = fe->inode->i_sb;
+@@ -808,7 +791,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
+ 
+ 	DBG_BUGON(fe->pcl);
+ 	/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
+-	DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
++	DBG_BUGON(!fe->head);
+ 
+ 	if (!(map->m_flags & EROFS_MAP_META)) {
+ 		while (1) {
+@@ -836,10 +819,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
+ 	if (ret == -EEXIST) {
+ 		mutex_lock(&fe->pcl->lock);
+ 		/* check if this pcluster hasn't been linked into any chain. */
+-		if (cmpxchg(&fe->pcl->next, Z_EROFS_PCLUSTER_NIL,
+-			    fe->owned_head) == Z_EROFS_PCLUSTER_NIL) {
++		if (!cmpxchg(&fe->pcl->next, NULL, fe->head)) {
+ 			/* .. so it can be attached to our submission chain */
+-			fe->owned_head = &fe->pcl->next;
++			fe->head = fe->pcl;
+ 			fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
+ 		} else {	/* otherwise, it belongs to an inflight chain */
+ 			fe->mode = Z_EROFS_PCLUSTER_INFLIGHT;
+@@ -872,24 +854,16 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
+ 	return 0;
+ }
+ 
+-/*
+- * keep in mind that no referenced pclusters will be freed
+- * only after a RCU grace period.
+- */
+ static void z_erofs_rcu_callback(struct rcu_head *head)
+ {
+-	z_erofs_free_pcluster(container_of(head,
+-			struct z_erofs_pcluster, rcu));
++	z_erofs_free_pcluster(container_of(head, struct z_erofs_pcluster, rcu));
+ }
+ 
+-static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
++static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
+ 					  struct z_erofs_pcluster *pcl)
+ {
+-	int free = false;
+-
+-	spin_lock(&pcl->lockref.lock);
+ 	if (pcl->lockref.count)
+-		goto out;
++		return false;
+ 
+ 	/*
+ 	 * Note that all cached folios should be detached before deleted from
+@@ -897,7 +871,7 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
+ 	 * orphan old pcluster when the new one is available in the tree.
+ 	 */
+ 	if (erofs_try_to_free_all_cached_folios(sbi, pcl))
+-		goto out;
++		return false;
+ 
+ 	/*
+ 	 * It's impossible to fail after the pcluster is freezed, but in order
+@@ -906,8 +880,16 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
+ 	DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->index) != pcl);
+ 
+ 	lockref_mark_dead(&pcl->lockref);
+-	free = true;
+-out:
++	return true;
++}
++
++static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
++					  struct z_erofs_pcluster *pcl)
++{
++	bool free;
++
++	spin_lock(&pcl->lockref.lock);
++	free = __erofs_try_to_release_pcluster(sbi, pcl);
+ 	spin_unlock(&pcl->lockref.lock);
+ 	if (free) {
+ 		atomic_long_dec(&erofs_global_shrink_cnt);
+@@ -916,8 +898,7 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
+ 	return free;
+ }
+ 
+-unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
+-				  unsigned long nr_shrink)
++unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr)
+ {
+ 	struct z_erofs_pcluster *pcl;
+ 	unsigned long index, freed = 0;
+@@ -930,7 +911,7 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
+ 		xa_unlock(&sbi->managed_pslots);
+ 
+ 		++freed;
+-		if (!--nr_shrink)
++		if (!--nr)
+ 			return freed;
+ 		xa_lock(&sbi->managed_pslots);
+ 	}
+@@ -938,19 +919,28 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
+ 	return freed;
+ }
+ 
+-static void z_erofs_put_pcluster(struct z_erofs_pcluster *pcl)
++static void z_erofs_put_pcluster(struct erofs_sb_info *sbi,
++		struct z_erofs_pcluster *pcl, bool try_free)
+ {
++	bool free = false;
++
+ 	if (lockref_put_or_lock(&pcl->lockref))
+ 		return;
+ 
+ 	DBG_BUGON(__lockref_is_dead(&pcl->lockref));
+-	if (pcl->lockref.count == 1)
+-		atomic_long_inc(&erofs_global_shrink_cnt);
+-	--pcl->lockref.count;
++	if (!--pcl->lockref.count) {
++		if (try_free && xa_trylock(&sbi->managed_pslots)) {
++			free = __erofs_try_to_release_pcluster(sbi, pcl);
++			xa_unlock(&sbi->managed_pslots);
++		}
++		atomic_long_add(!free, &erofs_global_shrink_cnt);
++	}
+ 	spin_unlock(&pcl->lockref.lock);
++	if (free)
++		call_rcu(&pcl->rcu, z_erofs_rcu_callback);
+ }
+ 
+-static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
++static void z_erofs_pcluster_end(struct z_erofs_frontend *fe)
+ {
+ 	struct z_erofs_pcluster *pcl = fe->pcl;
+ 
+@@ -963,13 +953,9 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
+ 	if (fe->candidate_bvpage)
+ 		fe->candidate_bvpage = NULL;
+ 
+-	/*
+-	 * if all pending pages are added, don't hold its reference
+-	 * any longer if the pcluster isn't hosted by ourselves.
+-	 */
++	/* Drop refcount if it doesn't belong to our processing chain */
+ 	if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
+-		z_erofs_put_pcluster(pcl);
+-
++		z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false);
+ 	fe->pcl = NULL;
+ }
+ 
+@@ -998,7 +984,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
+ 	return 0;
+ }
+ 
+-static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
++static int z_erofs_scan_folio(struct z_erofs_frontend *f,
+ 			      struct folio *folio, bool ra)
+ {
+ 	struct inode *const inode = f->inode;
+@@ -1087,7 +1073,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
+ 			tight = (bs == PAGE_SIZE);
+ 		}
+ 	} while ((end = cur) > 0);
+-	erofs_onlinefolio_end(folio, err);
++	erofs_onlinefolio_end(folio, err, false);
+ 	return err;
+ }
+ 
+@@ -1111,7 +1097,7 @@ static bool z_erofs_page_is_invalidated(struct page *page)
+ 	return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
+ }
+ 
+-struct z_erofs_decompress_backend {
++struct z_erofs_backend {
+ 	struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
+ 	struct super_block *sb;
+ 	struct z_erofs_pcluster *pcl;
+@@ -1132,7 +1118,7 @@ struct z_erofs_bvec_item {
+ 	struct list_head list;
+ };
+ 
+-static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
++static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be,
+ 					 struct z_erofs_bvec *bvec)
+ {
+ 	int poff = bvec->offset + be->pcl->pageofs_out;
+@@ -1157,8 +1143,7 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
+ 	list_add(&item->list, &be->decompressed_secondary_bvecs);
+ }
+ 
+-static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
+-				      int err)
++static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err)
+ {
+ 	unsigned int off0 = be->pcl->pageofs_out;
+ 	struct list_head *p, *n;
+@@ -1193,13 +1178,13 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
+ 			cur += len;
+ 		}
+ 		kunmap_local(dst);
+-		erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
++		erofs_onlinefolio_end(page_folio(bvi->bvec.page), err, true);
+ 		list_del(p);
+ 		kfree(bvi);
+ 	}
+ }
+ 
+-static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
++static void z_erofs_parse_out_bvecs(struct z_erofs_backend *be)
+ {
+ 	struct z_erofs_pcluster *pcl = be->pcl;
+ 	struct z_erofs_bvec_iter biter;
+@@ -1224,8 +1209,7 @@ static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
+ 		z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
+ }
+ 
+-static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
+-				  bool *overlapped)
++static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped)
+ {
+ 	struct z_erofs_pcluster *pcl = be->pcl;
+ 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+@@ -1260,8 +1244,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
+ 	return err;
+ }
+ 
+-static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+-				       int err)
++static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
+ {
+ 	struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
+ 	struct z_erofs_pcluster *pcl = be->pcl;
+@@ -1271,6 +1254,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 	int i, j, jtop, err2;
+ 	struct page *page;
+ 	bool overlapped;
++	bool try_free = true;
+ 
+ 	mutex_lock(&pcl->lock);
+ 	be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
+@@ -1328,9 +1312,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 		/* managed folios are still left in compressed_bvecs[] */
+ 		for (i = 0; i < pclusterpages; ++i) {
+ 			page = be->compressed_pages[i];
+-			if (!page ||
+-			    erofs_folio_is_managed(sbi, page_folio(page)))
++			if (!page)
+ 				continue;
++			if (erofs_folio_is_managed(sbi, page_folio(page))) {
++				try_free = false;
++				continue;
++			}
+ 			(void)z_erofs_put_shortlivedpage(be->pagepool, page);
+ 			WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
+ 		}
+@@ -1348,7 +1335,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 
+ 		DBG_BUGON(z_erofs_page_is_invalidated(page));
+ 		if (!z_erofs_is_shortlived_page(page)) {
+-			erofs_onlinefolio_end(page_folio(page), err);
++			erofs_onlinefolio_end(page_folio(page), err, true);
+ 			continue;
+ 		}
+ 		if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
+@@ -1373,34 +1360,33 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 	pcl->vcnt = 0;
+ 
+ 	/* pcluster lock MUST be taken before the following line */
+-	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
++	WRITE_ONCE(pcl->next, NULL);
+ 	mutex_unlock(&pcl->lock);
++
++	if (z_erofs_is_inline_pcluster(pcl))
++		z_erofs_free_pcluster(pcl);
++	else
++		z_erofs_put_pcluster(sbi, pcl, try_free);
+ 	return err;
+ }
+ 
+ static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ 				    struct page **pagepool)
+ {
+-	struct z_erofs_decompress_backend be = {
++	struct z_erofs_backend be = {
+ 		.sb = io->sb,
+ 		.pagepool = pagepool,
+ 		.decompressed_secondary_bvecs =
+ 			LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
++		.pcl = io->head,
+ 	};
+-	z_erofs_next_pcluster_t owned = io->head;
++	struct z_erofs_pcluster *next;
+ 	int err = io->eio ? -EIO : 0;
+ 
+-	while (owned != Z_EROFS_PCLUSTER_TAIL) {
+-		DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
+-
+-		be.pcl = container_of(owned, struct z_erofs_pcluster, next);
+-		owned = READ_ONCE(be.pcl->next);
+-
++	for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) {
++		DBG_BUGON(!be.pcl);
++		next = READ_ONCE(be.pcl->next);
+ 		err = z_erofs_decompress_pcluster(&be, err) ?: err;
+-		if (z_erofs_is_inline_pcluster(be.pcl))
+-			z_erofs_free_pcluster(be.pcl);
+-		else
+-			z_erofs_put_pcluster(be.pcl);
+ 	}
+ 	return err;
+ }
+@@ -1465,7 +1451,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ }
+ 
+ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
+-				 struct z_erofs_decompress_frontend *f,
++				 struct z_erofs_frontend *f,
+ 				 struct z_erofs_pcluster *pcl,
+ 				 unsigned int nr,
+ 				 struct address_space *mc)
+@@ -1609,18 +1595,13 @@ enum {
+ 	NR_JOBQUEUES,
+ };
+ 
+-static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
+-				    z_erofs_next_pcluster_t qtail[],
+-				    z_erofs_next_pcluster_t owned_head)
++static void z_erofs_move_to_bypass_queue(struct z_erofs_pcluster *pcl,
++					 struct z_erofs_pcluster *next,
++					 struct z_erofs_pcluster **qtail[])
+ {
+-	z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
+-	z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
+-
+ 	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
+-
+-	WRITE_ONCE(*submit_qtail, owned_head);
+-	WRITE_ONCE(*bypass_qtail, &pcl->next);
+-
++	WRITE_ONCE(*qtail[JQ_SUBMIT], next);
++	WRITE_ONCE(*qtail[JQ_BYPASS], pcl);
+ 	qtail[JQ_BYPASS] = &pcl->next;
+ }
+ 
+@@ -1649,15 +1630,15 @@ static void z_erofs_endio(struct bio *bio)
+ 		bio_put(bio);
+ }
+ 
+-static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
++static void z_erofs_submit_queue(struct z_erofs_frontend *f,
+ 				 struct z_erofs_decompressqueue *fgq,
+ 				 bool *force_fg, bool readahead)
+ {
+ 	struct super_block *sb = f->inode->i_sb;
+ 	struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
+-	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
++	struct z_erofs_pcluster **qtail[NR_JOBQUEUES];
+ 	struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
+-	z_erofs_next_pcluster_t owned_head = f->owned_head;
++	struct z_erofs_pcluster *pcl, *next;
+ 	/* bio is NULL initially, so no need to initialize last_{index,bdev} */
+ 	erofs_off_t last_pa;
+ 	unsigned int nr_bios = 0;
+@@ -1673,22 +1654,19 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ 	qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
+ 
+ 	/* by default, all need io submission */
+-	q[JQ_SUBMIT]->head = owned_head;
++	q[JQ_SUBMIT]->head = next = f->head;
+ 
+ 	do {
+ 		struct erofs_map_dev mdev;
+-		struct z_erofs_pcluster *pcl;
+ 		erofs_off_t cur, end;
+ 		struct bio_vec bvec;
+ 		unsigned int i = 0;
+ 		bool bypass = true;
+ 
+-		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
+-		pcl = container_of(owned_head, struct z_erofs_pcluster, next);
+-		owned_head = READ_ONCE(pcl->next);
+-
++		pcl = next;
++		next = READ_ONCE(pcl->next);
+ 		if (z_erofs_is_inline_pcluster(pcl)) {
+-			move_to_bypass_jobqueue(pcl, qtail, owned_head);
++			z_erofs_move_to_bypass_queue(pcl, next, qtail);
+ 			continue;
+ 		}
+ 
+@@ -1760,8 +1738,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ 		if (!bypass)
+ 			qtail[JQ_SUBMIT] = &pcl->next;
+ 		else
+-			move_to_bypass_jobqueue(pcl, qtail, owned_head);
+-	} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
++			z_erofs_move_to_bypass_queue(pcl, next, qtail);
++	} while (next != Z_EROFS_PCLUSTER_TAIL);
+ 
+ 	if (bio) {
+ 		if (erofs_is_fileio_mode(EROFS_SB(sb)))
+@@ -1785,17 +1763,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ 	z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
+ }
+ 
+-static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
+-			    unsigned int ra_folios)
++static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages)
+ {
+ 	struct z_erofs_decompressqueue io[NR_JOBQUEUES];
+ 	struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
+-	bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios);
++	bool force_fg = z_erofs_is_sync_decompress(sbi, rapages);
+ 	int err;
+ 
+-	if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
++	if (f->head == Z_EROFS_PCLUSTER_TAIL)
+ 		return 0;
+-	z_erofs_submit_queue(f, io, &force_fg, !!ra_folios);
++	z_erofs_submit_queue(f, io, &force_fg, !!rapages);
+ 
+ 	/* handle bypass queue (no i/o pclusters) immediately */
+ 	err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
+@@ -1813,7 +1790,7 @@ static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
+  * Since partial uptodate is still unimplemented for now, we have to use
+  * approximate readmore strategies as a start.
+  */
+-static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
++static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f,
+ 		struct readahead_control *rac, bool backmost)
+ {
+ 	struct inode *inode = f->inode;
+@@ -1868,12 +1845,10 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
+ static int z_erofs_read_folio(struct file *file, struct folio *folio)
+ {
+ 	struct inode *const inode = folio->mapping->host;
+-	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
++	Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio));
+ 	int err;
+ 
+ 	trace_erofs_read_folio(folio, false);
+-	f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
+-
+ 	z_erofs_pcluster_readmore(&f, NULL, true);
+ 	err = z_erofs_scan_folio(&f, folio, false);
+ 	z_erofs_pcluster_readmore(&f, NULL, false);
+@@ -1893,17 +1868,13 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
+ static void z_erofs_readahead(struct readahead_control *rac)
+ {
+ 	struct inode *const inode = rac->mapping->host;
+-	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
++	Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac));
++	unsigned int nrpages = readahead_count(rac);
+ 	struct folio *head = NULL, *folio;
+-	unsigned int nr_folios;
+ 	int err;
+ 
+-	f.headoffset = readahead_pos(rac);
+-
++	trace_erofs_readahead(inode, readahead_index(rac), nrpages, false);
+ 	z_erofs_pcluster_readmore(&f, rac, true);
+-	nr_folios = readahead_count(rac);
+-	trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
+-
+ 	while ((folio = readahead_folio(rac))) {
+ 		folio->private = head;
+ 		head = folio;
+@@ -1922,7 +1893,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
+ 	z_erofs_pcluster_readmore(&f, rac, false);
+ 	z_erofs_pcluster_end(&f);
+ 
+-	(void)z_erofs_runqueue(&f, nr_folios);
++	(void)z_erofs_runqueue(&f, nrpages);
+ 	erofs_put_metabuf(&f.map.buf);
+ 	erofs_release_pages(&f.pagepool);
+ }
+diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
+index 75704f58ecfa92..0dd65cefce33e2 100644
+--- a/fs/erofs/zutil.c
++++ b/fs/erofs/zutil.c
+@@ -230,9 +230,10 @@ void erofs_shrinker_unregister(struct super_block *sb)
+ 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ 
+ 	mutex_lock(&sbi->umount_mutex);
+-	/* clean up all remaining pclusters in memory */
+-	z_erofs_shrink_scan(sbi, ~0UL);
+-
++	while (!xa_empty(&sbi->managed_pslots)) {
++		z_erofs_shrink_scan(sbi, ~0UL);
++		cond_resched();
++	}
+ 	spin_lock(&erofs_sb_list_lock);
+ 	list_del(&sbi->list);
+ 	spin_unlock(&erofs_sb_list_lock);
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 1a06e462b6efba..99eed91d03ebee 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -854,7 +854,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
+ 	kfree_rcu(epi, rcu);
+ 
+ 	percpu_counter_dec(&ep->user->epoll_watches);
+-	return ep_refcount_dec_and_test(ep);
++	return true;
+ }
+ 
+ /*
+@@ -862,14 +862,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
+  */
+ static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
+ {
+-	WARN_ON_ONCE(__ep_remove(ep, epi, false));
++	if (__ep_remove(ep, epi, false))
++		WARN_ON_ONCE(ep_refcount_dec_and_test(ep));
+ }
+ 
+ static void ep_clear_and_put(struct eventpoll *ep)
+ {
+ 	struct rb_node *rbp, *next;
+ 	struct epitem *epi;
+-	bool dispose;
+ 
+ 	/* We need to release all tasks waiting for these file */
+ 	if (waitqueue_active(&ep->poll_wait))
+@@ -902,10 +902,8 @@ static void ep_clear_and_put(struct eventpoll *ep)
+ 		cond_resched();
+ 	}
+ 
+-	dispose = ep_refcount_dec_and_test(ep);
+ 	mutex_unlock(&ep->mtx);
+-
+-	if (dispose)
++	if (ep_refcount_dec_and_test(ep))
+ 		ep_free(ep);
+ }
+ 
+@@ -1108,7 +1106,7 @@ void eventpoll_release_file(struct file *file)
+ 		dispose = __ep_remove(ep, epi, true);
+ 		mutex_unlock(&ep->mtx);
+ 
+-		if (dispose)
++		if (dispose && ep_refcount_dec_and_test(ep))
+ 			ep_free(ep);
+ 		goto again;
+ 	}
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 7cb21da40a0a48..a968688a732342 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -285,7 +285,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
+ 			trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
+ 					     refcount_read(&subreq->ref),
+ 					     netfs_sreq_trace_new);
+-			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
++			trace_netfs_sreq(subreq, netfs_sreq_trace_split);
+ 
+ 			list_add(&subreq->rreq_link, &to->rreq_link);
+ 			to = list_next_entry(to, rreq_link);
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index a3eb3b740f7664..3604b616311c27 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -42,7 +42,7 @@ static void proc_evict_inode(struct inode *inode)
+ 
+ 	head = ei->sysctl;
+ 	if (head) {
+-		RCU_INIT_POINTER(ei->sysctl, NULL);
++		WRITE_ONCE(ei->sysctl, NULL);
+ 		proc_sys_evict_inode(inode, head);
+ 	}
+ }
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index d11ebc055ce0dd..e785db5fa499ed 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -911,17 +911,21 @@ static int proc_sys_compare(const struct dentry *dentry,
+ 	struct ctl_table_header *head;
+ 	struct inode *inode;
+ 
+-	/* Although proc doesn't have negative dentries, rcu-walk means
+-	 * that inode here can be NULL */
+-	/* AV: can it, indeed? */
+-	inode = d_inode_rcu(dentry);
+-	if (!inode)
+-		return 1;
+ 	if (name->len != len)
+ 		return 1;
+ 	if (memcmp(name->name, str, len))
+ 		return 1;
+-	head = rcu_dereference(PROC_I(inode)->sysctl);
++
++	// false positive is fine here - we'll recheck anyway
++	if (d_in_lookup(dentry))
++		return 0;
++
++	inode = d_inode_rcu(dentry);
++	// we just might have run into dentry in the middle of __dentry_kill()
++	if (!inode)
++		return 1;
++
++	head = READ_ONCE(PROC_I(inode)->sysctl);
+ 	return !head || !sysctl_is_seen(head);
+ }
+ 
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 96fe904b2ac587..72a58681f0316b 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -36,9 +36,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ 	unsigned long text, lib, swap, anon, file, shmem;
+ 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
+ 
+-	anon = get_mm_counter(mm, MM_ANONPAGES);
+-	file = get_mm_counter(mm, MM_FILEPAGES);
+-	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
++	anon = get_mm_counter_sum(mm, MM_ANONPAGES);
++	file = get_mm_counter_sum(mm, MM_FILEPAGES);
++	shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES);
+ 
+ 	/*
+ 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
+@@ -59,7 +59,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ 	text = min(text, mm->exec_vm << PAGE_SHIFT);
+ 	lib = (mm->exec_vm << PAGE_SHIFT) - text;
+ 
+-	swap = get_mm_counter(mm, MM_SWAPENTS);
++	swap = get_mm_counter_sum(mm, MM_SWAPENTS);
+ 	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
+ 	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
+ 	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
+@@ -92,12 +92,12 @@ unsigned long task_statm(struct mm_struct *mm,
+ 			 unsigned long *shared, unsigned long *text,
+ 			 unsigned long *data, unsigned long *resident)
+ {
+-	*shared = get_mm_counter(mm, MM_FILEPAGES) +
+-			get_mm_counter(mm, MM_SHMEMPAGES);
++	*shared = get_mm_counter_sum(mm, MM_FILEPAGES) +
++			get_mm_counter_sum(mm, MM_SHMEMPAGES);
+ 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
+ 								>> PAGE_SHIFT;
+ 	*data = mm->data_vm + mm->stack_vm;
+-	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
++	*resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES);
+ 	return mm->total_vm;
+ }
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 5d2324c09a0704..a97a2885730da2 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -8517,11 +8517,6 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
+ 		goto err_out;
+ 	}
+ 
+-	opinfo->op_state = OPLOCK_STATE_NONE;
+-	wake_up_interruptible_all(&opinfo->oplock_q);
+-	opinfo_put(opinfo);
+-	ksmbd_fd_put(work, fp);
+-
+ 	rsp->StructureSize = cpu_to_le16(24);
+ 	rsp->OplockLevel = rsp_oplevel;
+ 	rsp->Reserved = 0;
+@@ -8529,16 +8524,15 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
+ 	rsp->VolatileFid = volatile_id;
+ 	rsp->PersistentFid = persistent_id;
+ 	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break));
+-	if (!ret)
+-		return;
+-
++	if (ret) {
+ err_out:
++		smb2_set_err_rsp(work);
++	}
++
+ 	opinfo->op_state = OPLOCK_STATE_NONE;
+ 	wake_up_interruptible_all(&opinfo->oplock_q);
+-
+ 	opinfo_put(opinfo);
+ 	ksmbd_fd_put(work, fp);
+-	smb2_set_err_rsp(work);
+ }
+ 
+ static int check_lease_state(struct lease *lease, __le32 req_state)
+@@ -8668,11 +8662,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ 	}
+ 
+ 	lease_state = lease->state;
+-	opinfo->op_state = OPLOCK_STATE_NONE;
+-	wake_up_interruptible_all(&opinfo->oplock_q);
+-	atomic_dec(&opinfo->breaking_cnt);
+-	wake_up_interruptible_all(&opinfo->oplock_brk);
+-	opinfo_put(opinfo);
+ 
+ 	rsp->StructureSize = cpu_to_le16(36);
+ 	rsp->Reserved = 0;
+@@ -8681,16 +8670,16 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ 	rsp->LeaseState = lease_state;
+ 	rsp->LeaseDuration = 0;
+ 	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack));
+-	if (!ret)
+-		return;
+-
++	if (ret) {
+ err_out:
++		smb2_set_err_rsp(work);
++	}
++
++	opinfo->op_state = OPLOCK_STATE_NONE;
+ 	wake_up_interruptible_all(&opinfo->oplock_q);
+ 	atomic_dec(&opinfo->breaking_cnt);
+ 	wake_up_interruptible_all(&opinfo->oplock_brk);
+-
+ 	opinfo_put(opinfo);
+-	smb2_set_err_rsp(work);
+ }
+ 
+ /**
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 6921d62934bcb0..3ab8c04f72e48f 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -432,7 +432,8 @@ static void free_transport(struct smb_direct_transport *t)
+ 	if (t->qp) {
+ 		ib_drain_qp(t->qp);
+ 		ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
+-		ib_destroy_qp(t->qp);
++		t->qp = NULL;
++		rdma_destroy_qp(t->cm_id);
+ 	}
+ 
+ 	ksmbd_debug(RDMA, "drain the reassembly queue\n");
+@@ -1939,8 +1940,8 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
+ 	return 0;
+ err:
+ 	if (t->qp) {
+-		ib_destroy_qp(t->qp);
+ 		t->qp = NULL;
++		rdma_destroy_qp(t->cm_id);
+ 	}
+ 	if (t->recv_cq) {
+ 		ib_destroy_cq(t->recv_cq);
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index 59ae63ab868574..a662aae5126c0a 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -1298,6 +1298,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ 
+ 		err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
+ 		if (err) {
++			mnt_drop_write(parent_path->mnt);
+ 			path_put(path);
+ 			path_put(parent_path);
+ 		}
+diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
+index 8c0030c7730816..2bc88d2d4a84e4 100644
+--- a/include/drm/drm_file.h
++++ b/include/drm/drm_file.h
+@@ -300,6 +300,9 @@ struct drm_file {
+ 	 *
+ 	 * Mapping of mm object handles to object pointers. Used by the GEM
+ 	 * subsystem. Protected by @table_lock.
++	 *
++	 * Note that allocated entries might be NULL as a transient state when
++	 * creating or deleting a handle.
+ 	 */
+ 	struct idr object_idr;
+ 
+diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
+index 668077009fced0..38b24fc8978d34 100644
+--- a/include/drm/drm_framebuffer.h
++++ b/include/drm/drm_framebuffer.h
+@@ -23,6 +23,7 @@
+ #ifndef __DRM_FRAMEBUFFER_H__
+ #define __DRM_FRAMEBUFFER_H__
+ 
++#include <linux/bits.h>
+ #include <linux/ctype.h>
+ #include <linux/list.h>
+ #include <linux/sched.h>
+@@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
+ 		     unsigned num_clips);
+ };
+ 
++#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i)	BIT(0u + (_i))
++
+ /**
+  * struct drm_framebuffer - frame buffer object
+  *
+@@ -188,6 +191,10 @@ struct drm_framebuffer {
+ 	 * DRM_MODE_FB_MODIFIERS.
+ 	 */
+ 	int flags;
++	/**
++	 * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
++	 */
++	unsigned int internal_flags;
+ 	/**
+ 	 * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
+ 	 */
+diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h
+index 125f096c88cb96..ee9df8cc67b730 100644
+--- a/include/drm/spsc_queue.h
++++ b/include/drm/spsc_queue.h
+@@ -70,9 +70,11 @@ static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *n
+ 
+ 	preempt_disable();
+ 
++	atomic_inc(&queue->job_count);
++	smp_mb__after_atomic();
++
+ 	tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
+ 	WRITE_ONCE(*tail, node);
+-	atomic_inc(&queue->job_count);
+ 
+ 	/*
+ 	 * In case of first element verify new node will be visible to the consumer
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index d07c1f0ad3de33..7ecdde54e1edda 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -662,18 +662,6 @@ static inline bool ieee80211_s1g_has_cssid(__le16 fc)
+ 		(fc & cpu_to_le16(IEEE80211_S1G_BCN_CSSID));
+ }
+ 
+-/**
+- * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
+- * @fc: frame control bytes in little-endian byteorder
+- * Return: whether or not the frame is an S1G short beacon,
+- *	i.e. it is an S1G beacon with 'next TBTT' flag set
+- */
+-static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
+-{
+-	return ieee80211_is_s1g_beacon(fc) &&
+-		(fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT));
+-}
+-
+ /**
+  * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
+  * @fc: frame control bytes in little-endian byteorder
+@@ -4863,6 +4851,39 @@ static inline bool ieee80211_is_ftm(struct sk_buff *skb)
+ 	return false;
+ }
+ 
++/**
++ * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
++ * @fc: frame control bytes in little-endian byteorder
++ * @variable: pointer to the beacon frame elements
++ * @variable_len: length of the frame elements
++ * Return: whether or not the frame is an S1G short beacon. As per
++ *	IEEE80211-2024 11.1.3.10.1, The S1G beacon compatibility element shall
++ *	always be present as the first element in beacon frames generated at a
++ *	TBTT (Target Beacon Transmission Time), so any frame not containing
++ *	this element must have been generated at a TSBTT (Target Short Beacon
++ *	Transmission Time) that is not a TBTT. Additionally, short beacons are
++ *	prohibited from containing the S1G beacon compatibility element as per
++ *	IEEE80211-2024 9.3.4.3 Table 9-76, so if we have an S1G beacon with
++ *	either no elements or the first element is not the beacon compatibility
++ *	element, we have a short beacon.
++ */
++static inline bool ieee80211_is_s1g_short_beacon(__le16 fc, const u8 *variable,
++						 size_t variable_len)
++{
++	if (!ieee80211_is_s1g_beacon(fc))
++		return false;
++
++	/*
++	 * If the frame does not contain at least 1 element (this is perfectly
++	 * valid in a short beacon) and is an S1G beacon, we have a short
++	 * beacon.
++	 */
++	if (variable_len < 2)
++		return true;
++
++	return variable[0] != WLAN_EID_S1G_BCN_COMPAT;
++}
++
+ struct element {
+ 	u8 id;
+ 	u8 datalen;
+diff --git a/include/linux/math.h b/include/linux/math.h
+index f5f18dc3616b01..0198c92cbe3ef5 100644
+--- a/include/linux/math.h
++++ b/include/linux/math.h
+@@ -34,6 +34,18 @@
+  */
+ #define round_down(x, y) ((x) & ~__round_mask(x, y))
+ 
++/**
++ * DIV_ROUND_UP_POW2 - divide and round up
++ * @n: numerator
++ * @d: denominator (must be a power of 2)
++ *
++ * Divides @n by @d and rounds up to next multiple of @d (which must be a power
++ * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP().
++ * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP().
++ */
++#define DIV_ROUND_UP_POW2(n, d) \
++	((n) / (d) + !!((n) & ((d) - 1)))
++
+ #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
+ 
+ #define DIV_ROUND_DOWN_ULL(ll, d) \
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 059ca4767e148f..deeb535f920c8a 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2592,6 +2592,11 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
+ 	return percpu_counter_read_positive(&mm->rss_stat[member]);
+ }
+ 
++static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
++{
++	return percpu_counter_sum_positive(&mm->rss_stat[member]);
++}
++
+ void mm_trace_rss_stat(struct mm_struct *mm, int member);
+ 
+ static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
+diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
+index 903ddfea858505..613a8209bed275 100644
+--- a/include/linux/psp-sev.h
++++ b/include/linux/psp-sev.h
+@@ -594,6 +594,7 @@ struct sev_data_snp_addr {
+  * @imi_en: launch flow is launching an IMI (Incoming Migration Image) for the
+  *          purpose of guest-assisted migration.
+  * @rsvd: reserved
++ * @desired_tsc_khz: hypervisor desired mean TSC freq in kHz of the guest
+  * @gosvw: guest OS-visible workarounds, as defined by hypervisor
+  */
+ struct sev_data_snp_launch_start {
+@@ -603,6 +604,7 @@ struct sev_data_snp_launch_start {
+ 	u32 ma_en:1;				/* In */
+ 	u32 imi_en:1;				/* In */
+ 	u32 rsvd:30;
++	u32 desired_tsc_khz;			/* In */
+ 	u8 gosvw[16];				/* In */
+ } __packed;
+ 
+diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
+index 9e85424c834353..70302c92d329f6 100644
+--- a/include/net/af_vsock.h
++++ b/include/net/af_vsock.h
+@@ -242,8 +242,8 @@ int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ 			size_t len, int flags);
+ 
+-#ifdef CONFIG_BPF_SYSCALL
+ extern struct proto vsock_proto;
++#ifdef CONFIG_BPF_SYSCALL
+ int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+ void __init vsock_bpf_build_proto(void);
+ #else
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index b63d53bb9dd6db..1a6fca0131653d 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -369,7 +369,7 @@ static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
+ 
+ static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
+ {
+-	if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
++	if (!pskb_may_pull(skb, ETH_HLEN + PPPOE_SES_HLEN))
+ 		return false;
+ 
+ 	*inner_proto = __nf_flow_pppoe_proto(skb);
+diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
+index 60d3b86a4660ff..6293ab852c142b 100644
+--- a/include/sound/soc-acpi.h
++++ b/include/sound/soc-acpi.h
+@@ -10,6 +10,7 @@
+ #include <linux/acpi.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/soundwire/sdw.h>
++#include <sound/soc.h>
+ 
+ struct snd_soc_acpi_package_context {
+ 	char *name;           /* package name */
+@@ -189,6 +190,15 @@ struct snd_soc_acpi_link_adr {
+  *  is not constant since this field may be updated at run-time
+  * @sof_tplg_filename: Sound Open Firmware topology file name, if enabled
+  * @tplg_quirk_mask: quirks to select different topology files dynamically
++ * @get_function_tplg_files: This is an optional callback, if specified then instead of
++ *	the single sof_tplg_filename the callback will return the list of function topology
++ *	files to be loaded.
++ *	Return value: The number of the files or negative ERRNO. 0 means that the single topology
++ *		      file should be used, no function topology split can be used on the machine.
++ *	@card: the pointer of the card
++ *	@mach: the pointer of the machine driver
++ *	@prefix: the prefix of the topology file name. Typically, it is the path.
++ *	@tplg_files: the pointer of the array of the topology file names.
+  */
+ /* Descriptor for SST ASoC machine driver */
+ struct snd_soc_acpi_mach {
+@@ -207,6 +217,9 @@ struct snd_soc_acpi_mach {
+ 	struct snd_soc_acpi_mach_params mach_params;
+ 	const char *sof_tplg_filename;
+ 	const u32 tplg_quirk_mask;
++	int (*get_function_tplg_files)(struct snd_soc_card *card,
++				       const struct snd_soc_acpi_mach *mach,
++				       const char *prefix, const char ***tplg_files);
+ };
+ 
+ #define SND_SOC_ACPI_MAX_CODECS 3
+diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
+index ad79f1ca4fb5a3..198a0c644bea19 100644
+--- a/include/trace/events/erofs.h
++++ b/include/trace/events/erofs.h
+@@ -113,7 +113,7 @@ TRACE_EVENT(erofs_read_folio,
+ 		__entry->raw)
+ );
+ 
+-TRACE_EVENT(erofs_readpages,
++TRACE_EVENT(erofs_readahead,
+ 
+ 	TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage,
+ 		bool raw),
+diff --git a/io_uring/opdef.c b/io_uring/opdef.c
+index a2be3bbca5ffa1..5dc1cba158a060 100644
+--- a/io_uring/opdef.c
++++ b/io_uring/opdef.c
+@@ -214,6 +214,7 @@ const struct io_issue_def io_issue_defs[] = {
+ 	},
+ 	[IORING_OP_FALLOCATE] = {
+ 		.needs_file		= 1,
++		.hash_reg_file          = 1,
+ 		.prep			= io_fallocate_prep,
+ 		.issue			= io_fallocate,
+ 	},
+diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
+index 3dabdd137d1021..2d6e1c98d8adc3 100644
+--- a/kernel/bpf/bpf_lru_list.c
++++ b/kernel/bpf/bpf_lru_list.c
+@@ -337,12 +337,12 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
+ 				 list) {
+ 		__bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
+ 					    BPF_LRU_LOCAL_LIST_T_FREE);
+-		if (++nfree == LOCAL_FREE_TARGET)
++		if (++nfree == lru->target_free)
+ 			break;
+ 	}
+ 
+-	if (nfree < LOCAL_FREE_TARGET)
+-		__bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
++	if (nfree < lru->target_free)
++		__bpf_lru_list_shrink(lru, l, lru->target_free - nfree,
+ 				      local_free_list(loc_l),
+ 				      BPF_LRU_LOCAL_LIST_T_FREE);
+ 
+@@ -577,6 +577,9 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
+ 		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
+ 		buf += elem_size;
+ 	}
++
++	lru->target_free = clamp((nr_elems / num_possible_cpus()) / 2,
++				 1, LOCAL_FREE_TARGET);
+ }
+ 
+ static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
+diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h
+index cbd8d3720c2bbe..fe2661a58ea94a 100644
+--- a/kernel/bpf/bpf_lru_list.h
++++ b/kernel/bpf/bpf_lru_list.h
+@@ -58,6 +58,7 @@ struct bpf_lru {
+ 	del_from_htab_func del_from_htab;
+ 	void *del_arg;
+ 	unsigned int hash_offset;
++	unsigned int target_free;
+ 	unsigned int nr_scans;
+ 	bool percpu;
+ };
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7210104b3345ca..dd745485b0f46d 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -905,8 +905,6 @@ static void perf_cgroup_switch(struct task_struct *task)
+ 	if (READ_ONCE(cpuctx->cgrp) == NULL)
+ 		return;
+ 
+-	WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
+-
+ 	cgrp = perf_cgroup_from_task(task, NULL);
+ 	if (READ_ONCE(cpuctx->cgrp) == cgrp)
+ 		return;
+@@ -918,6 +916,8 @@ static void perf_cgroup_switch(struct task_struct *task)
+ 	if (READ_ONCE(cpuctx->cgrp) == NULL)
+ 		return;
+ 
++	WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
++
+ 	perf_ctx_disable(&cpuctx->ctx, true);
+ 
+ 	ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
+@@ -10737,7 +10737,7 @@ static int perf_uprobe_event_init(struct perf_event *event)
+ 	if (event->attr.type != perf_uprobe.type)
+ 		return -ENOENT;
+ 
+-	if (!perfmon_capable())
++	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 
+ 	/*
+diff --git a/kernel/rseq.c b/kernel/rseq.c
+index 9de6e35fe67914..23894ba8250cf9 100644
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -149,6 +149,29 @@ static int rseq_reset_rseq_cpu_node_id(struct task_struct *t)
+ 	return 0;
+ }
+ 
++/*
++ * Get the user-space pointer value stored in the 'rseq_cs' field.
++ */
++static int rseq_get_rseq_cs_ptr_val(struct rseq __user *rseq, u64 *rseq_cs)
++{
++	if (!rseq_cs)
++		return -EFAULT;
++
++#ifdef CONFIG_64BIT
++	if (get_user(*rseq_cs, &rseq->rseq_cs))
++		return -EFAULT;
++#else
++	if (copy_from_user(rseq_cs, &rseq->rseq_cs, sizeof(*rseq_cs)))
++		return -EFAULT;
++#endif
++
++	return 0;
++}
++
++/*
++ * If the rseq_cs field of 'struct rseq' contains a valid pointer to
++ * user-space, copy 'struct rseq_cs' from user-space and validate its fields.
++ */
+ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
+ {
+ 	struct rseq_cs __user *urseq_cs;
+@@ -157,17 +180,16 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
+ 	u32 sig;
+ 	int ret;
+ 
+-#ifdef CONFIG_64BIT
+-	if (get_user(ptr, &t->rseq->rseq_cs))
+-		return -EFAULT;
+-#else
+-	if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr)))
+-		return -EFAULT;
+-#endif
++	ret = rseq_get_rseq_cs_ptr_val(t->rseq, &ptr);
++	if (ret)
++		return ret;
++
++	/* If the rseq_cs pointer is NULL, return a cleared struct rseq_cs. */
+ 	if (!ptr) {
+ 		memset(rseq_cs, 0, sizeof(*rseq_cs));
+ 		return 0;
+ 	}
++	/* Check that the pointer value fits in the user-space process space. */
+ 	if (ptr >= TASK_SIZE)
+ 		return -EINVAL;
+ 	urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
+@@ -243,7 +265,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
+ 	return !!event_mask;
+ }
+ 
+-static int clear_rseq_cs(struct task_struct *t)
++static int clear_rseq_cs(struct rseq __user *rseq)
+ {
+ 	/*
+ 	 * The rseq_cs field is set to NULL on preemption or signal
+@@ -254,9 +276,9 @@ static int clear_rseq_cs(struct task_struct *t)
+ 	 * Set rseq_cs to NULL.
+ 	 */
+ #ifdef CONFIG_64BIT
+-	return put_user(0UL, &t->rseq->rseq_cs);
++	return put_user(0UL, &rseq->rseq_cs);
+ #else
+-	if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs)))
++	if (clear_user(&rseq->rseq_cs, sizeof(rseq->rseq_cs)))
+ 		return -EFAULT;
+ 	return 0;
+ #endif
+@@ -288,11 +310,11 @@ static int rseq_ip_fixup(struct pt_regs *regs)
+ 	 * Clear the rseq_cs pointer and return.
+ 	 */
+ 	if (!in_rseq_cs(ip, &rseq_cs))
+-		return clear_rseq_cs(t);
++		return clear_rseq_cs(t->rseq);
+ 	ret = rseq_need_restart(t, rseq_cs.flags);
+ 	if (ret <= 0)
+ 		return ret;
+-	ret = clear_rseq_cs(t);
++	ret = clear_rseq_cs(t->rseq);
+ 	if (ret)
+ 		return ret;
+ 	trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset,
+@@ -366,6 +388,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
+ 		int, flags, u32, sig)
+ {
+ 	int ret;
++	u64 rseq_cs;
+ 
+ 	if (flags & RSEQ_FLAG_UNREGISTER) {
+ 		if (flags & ~RSEQ_FLAG_UNREGISTER)
+@@ -420,6 +443,19 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
+ 		return -EINVAL;
+ 	if (!access_ok(rseq, rseq_len))
+ 		return -EFAULT;
++
++	/*
++	 * If the rseq_cs pointer is non-NULL on registration, clear it to
++	 * avoid a potential segfault on return to user-space. The proper thing
++	 * to do would have been to fail the registration but this would break
++	 * older libcs that reuse the rseq area for new threads without
++	 * clearing the fields.
++	 */
++	if (rseq_get_rseq_cs_ptr_val(rseq, &rseq_cs))
++	        return -EFAULT;
++	if (rseq_cs && clear_rseq_cs(rseq))
++		return -EFAULT;
++
+ 	current->rseq = rseq;
+ 	current->rseq_len = rseq_len;
+ 	current->rseq_sig = sig;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 50531e462a4ba8..4b1953b6c76ab4 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3891,6 +3891,11 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
+ 	if (task_on_scx(p))
+ 		return false;
+ 
++#ifdef CONFIG_SMP
++	if (p->sched_class == &stop_sched_class)
++		return false;
++#endif
++
+ 	/*
+ 	 * Do not complicate things with the async wake_list while the CPU is
+ 	 * in hotplug state.
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 5e7ae404c8d2a4..0a47e5155897cd 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -1485,7 +1485,9 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
+ 	if (dl_entity_is_special(dl_se))
+ 		return;
+ 
+-	scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
++	scaled_delta_exec = delta_exec;
++	if (!dl_server(dl_se))
++		scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
+ 
+ 	dl_se->runtime -= scaled_delta_exec;
+ 
+@@ -1592,7 +1594,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
+  */
+ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
+ {
+-	s64 delta_exec, scaled_delta_exec;
++	s64 delta_exec;
+ 
+ 	if (!rq->fair_server.dl_defer)
+ 		return;
+@@ -1605,9 +1607,7 @@ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
+ 	if (delta_exec < 0)
+ 		return;
+ 
+-	scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec);
+-
+-	rq->fair_server.runtime -= scaled_delta_exec;
++	rq->fair_server.runtime -= delta_exec;
+ 
+ 	if (rq->fair_server.runtime < 0) {
+ 		rq->fair_server.dl_defer_running = 0;
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index da821ce258ea7f..d758e66ad59e40 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -82,18 +82,15 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
+ }
+ 
+ static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
+-					struct cpu_stop_work *work,
+-					struct wake_q_head *wakeq)
++				  struct cpu_stop_work *work)
+ {
+ 	list_add_tail(&work->list, &stopper->works);
+-	wake_q_add(wakeq, stopper->thread);
+ }
+ 
+ /* queue @work to @stopper.  if offline, @work is completed immediately */
+ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
+ {
+ 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+-	DEFINE_WAKE_Q(wakeq);
+ 	unsigned long flags;
+ 	bool enabled;
+ 
+@@ -101,12 +98,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
+ 	raw_spin_lock_irqsave(&stopper->lock, flags);
+ 	enabled = stopper->enabled;
+ 	if (enabled)
+-		__cpu_stop_queue_work(stopper, work, &wakeq);
++		__cpu_stop_queue_work(stopper, work);
+ 	else if (work->done)
+ 		cpu_stop_signal_done(work->done);
+ 	raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ 
+-	wake_up_q(&wakeq);
++	if (enabled)
++		wake_up_process(stopper->thread);
+ 	preempt_enable();
+ 
+ 	return enabled;
+@@ -263,7 +261,6 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+ {
+ 	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
+ 	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
+-	DEFINE_WAKE_Q(wakeq);
+ 	int err;
+ 
+ retry:
+@@ -299,8 +296,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+ 	}
+ 
+ 	err = 0;
+-	__cpu_stop_queue_work(stopper1, work1, &wakeq);
+-	__cpu_stop_queue_work(stopper2, work2, &wakeq);
++	__cpu_stop_queue_work(stopper1, work1);
++	__cpu_stop_queue_work(stopper2, work2);
+ 
+ unlock:
+ 	raw_spin_unlock(&stopper2->lock);
+@@ -315,7 +312,10 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+ 		goto retry;
+ 	}
+ 
+-	wake_up_q(&wakeq);
++	if (!err) {
++		wake_up_process(stopper1->thread);
++		wake_up_process(stopper2->thread);
++	}
+ 	preempt_enable();
+ 
+ 	return err;
+diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
+index 81e5f9a70f2207..e76c40bf29d06f 100644
+--- a/lib/alloc_tag.c
++++ b/lib/alloc_tag.c
+@@ -113,6 +113,9 @@ size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sl
+ 	struct codetag_bytes n;
+ 	unsigned int i, nr = 0;
+ 
++	if (IS_ERR_OR_NULL(alloc_tag_cttype))
++		return 0;
++
+ 	if (can_sleep)
+ 		codetag_lock_module_list(alloc_tag_cttype, true);
+ 	else if (!codetag_trylock_module_list(alloc_tag_cttype))
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 44441ec5b0affc..59f83ece202409 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5335,6 +5335,7 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
+ 	struct maple_enode *start;
+ 
+ 	if (mte_is_leaf(enode)) {
++		mte_set_node_dead(enode);
+ 		node->type = mte_node_type(enode);
+ 		goto free_leaf;
+ 	}
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index c7c0083203cb73..5675d6a412ef17 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -398,17 +398,8 @@ static void print_address_description(void *addr, u8 tag,
+ 	}
+ 
+ 	if (is_vmalloc_addr(addr)) {
+-		struct vm_struct *va = find_vm_area(addr);
+-
+-		if (va) {
+-			pr_err("The buggy address belongs to the virtual mapping at\n"
+-			       " [%px, %px) created by:\n"
+-			       " %pS\n",
+-			       va->addr, va->addr + va->size, va->caller);
+-			pr_err("\n");
+-
+-			page = vmalloc_to_page(addr);
+-		}
++		pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr);
++		page = vmalloc_to_page(addr);
+ 	}
+ 
+ 	if (page) {
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 7888600b6a795b..3519c4e4f841dd 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -487,6 +487,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
+ 		pgtbl_mod_mask *mask)
+ {
++	int err = 0;
+ 	pte_t *pte;
+ 
+ 	/*
+@@ -500,18 +501,25 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 	do {
+ 		struct page *page = pages[*nr];
+ 
+-		if (WARN_ON(!pte_none(ptep_get(pte))))
+-			return -EBUSY;
+-		if (WARN_ON(!page))
+-			return -ENOMEM;
+-		if (WARN_ON(!pfn_valid(page_to_pfn(page))))
+-			return -EINVAL;
++		if (WARN_ON(!pte_none(ptep_get(pte)))) {
++			err = -EBUSY;
++			break;
++		}
++		if (WARN_ON(!page)) {
++			err = -ENOMEM;
++			break;
++		}
++		if (WARN_ON(!pfn_valid(page_to_pfn(page)))) {
++			err = -EINVAL;
++			break;
++		}
+ 
+ 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
+ 		(*nr)++;
+ 	} while (pte++, addr += PAGE_SIZE, addr != end);
+ 	*mask |= PGTBL_PTE_MODIFIED;
+-	return 0;
++
++	return err;
+ }
+ 
+ static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index b068651984fe3d..fa7f002b14fa3c 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -576,6 +576,7 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
+ 
+ 	/* Fill in the routing entry */
+ 	rt->target  = ta->sat_addr;
++	dev_put(rt->dev); /* Release old device */
+ 	dev_hold(devhint);
+ 	rt->dev     = devhint;
+ 	rt->flags   = r->rt_flags;
+diff --git a/net/atm/clip.c b/net/atm/clip.c
+index 0d7744442b25ac..ebba0d6ae32484 100644
+--- a/net/atm/clip.c
++++ b/net/atm/clip.c
+@@ -45,7 +45,8 @@
+ #include <net/atmclip.h>
+ 
+ static struct net_device *clip_devs;
+-static struct atm_vcc *atmarpd;
++static struct atm_vcc __rcu *atmarpd;
++static DEFINE_MUTEX(atmarpd_lock);
+ static struct timer_list idle_timer;
+ static const struct neigh_ops clip_neigh_ops;
+ 
+@@ -53,24 +54,35 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
+ {
+ 	struct sock *sk;
+ 	struct atmarp_ctrl *ctrl;
++	struct atm_vcc *vcc;
+ 	struct sk_buff *skb;
++	int err = 0;
+ 
+ 	pr_debug("(%d)\n", type);
+-	if (!atmarpd)
+-		return -EUNATCH;
++
++	rcu_read_lock();
++	vcc = rcu_dereference(atmarpd);
++	if (!vcc) {
++		err = -EUNATCH;
++		goto unlock;
++	}
+ 	skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
+-	if (!skb)
+-		return -ENOMEM;
++	if (!skb) {
++		err = -ENOMEM;
++		goto unlock;
++	}
+ 	ctrl = skb_put(skb, sizeof(struct atmarp_ctrl));
+ 	ctrl->type = type;
+ 	ctrl->itf_num = itf;
+ 	ctrl->ip = ip;
+-	atm_force_charge(atmarpd, skb->truesize);
++	atm_force_charge(vcc, skb->truesize);
+ 
+-	sk = sk_atm(atmarpd);
++	sk = sk_atm(vcc);
+ 	skb_queue_tail(&sk->sk_receive_queue, skb);
+ 	sk->sk_data_ready(sk);
+-	return 0;
++unlock:
++	rcu_read_unlock();
++	return err;
+ }
+ 
+ static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
+@@ -417,6 +429,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
+ 
+ 	if (!vcc->push)
+ 		return -EBADFD;
++	if (vcc->user_back)
++		return -EINVAL;
+ 	clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
+ 	if (!clip_vcc)
+ 		return -ENOMEM;
+@@ -607,17 +621,27 @@ static void atmarpd_close(struct atm_vcc *vcc)
+ {
+ 	pr_debug("\n");
+ 
+-	rtnl_lock();
+-	atmarpd = NULL;
++	mutex_lock(&atmarpd_lock);
++	RCU_INIT_POINTER(atmarpd, NULL);
++	mutex_unlock(&atmarpd_lock);
++
++	synchronize_rcu();
+ 	skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
+-	rtnl_unlock();
+ 
+ 	pr_debug("(done)\n");
+ 	module_put(THIS_MODULE);
+ }
+ 
++static int atmarpd_send(struct atm_vcc *vcc, struct sk_buff *skb)
++{
++	atm_return_tx(vcc, skb);
++	dev_kfree_skb_any(skb);
++	return 0;
++}
++
+ static const struct atmdev_ops atmarpd_dev_ops = {
+-	.close = atmarpd_close
++	.close = atmarpd_close,
++	.send = atmarpd_send
+ };
+ 
+ 
+@@ -631,15 +655,18 @@ static struct atm_dev atmarpd_dev = {
+ 
+ static int atm_init_atmarp(struct atm_vcc *vcc)
+ {
+-	rtnl_lock();
++	if (vcc->push == clip_push)
++		return -EINVAL;
++
++	mutex_lock(&atmarpd_lock);
+ 	if (atmarpd) {
+-		rtnl_unlock();
++		mutex_unlock(&atmarpd_lock);
+ 		return -EADDRINUSE;
+ 	}
+ 
+ 	mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
+ 
+-	atmarpd = vcc;
++	rcu_assign_pointer(atmarpd, vcc);
+ 	set_bit(ATM_VF_META, &vcc->flags);
+ 	set_bit(ATM_VF_READY, &vcc->flags);
+ 	    /* allow replies and avoid getting closed if signaling dies */
+@@ -648,13 +675,14 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
+ 	vcc->push = NULL;
+ 	vcc->pop = NULL; /* crash */
+ 	vcc->push_oam = NULL; /* crash */
+-	rtnl_unlock();
++	mutex_unlock(&atmarpd_lock);
+ 	return 0;
+ }
+ 
+ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ {
+ 	struct atm_vcc *vcc = ATM_SD(sock);
++	struct sock *sk = sock->sk;
+ 	int err = 0;
+ 
+ 	switch (cmd) {
+@@ -675,14 +703,18 @@ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ 		err = clip_create(arg);
+ 		break;
+ 	case ATMARPD_CTRL:
++		lock_sock(sk);
+ 		err = atm_init_atmarp(vcc);
+ 		if (!err) {
+ 			sock->state = SS_CONNECTED;
+ 			__module_get(THIS_MODULE);
+ 		}
++		release_sock(sk);
+ 		break;
+ 	case ATMARP_MKIP:
++		lock_sock(sk);
+ 		err = clip_mkip(vcc, arg);
++		release_sock(sk);
+ 		break;
+ 	case ATMARP_SETENTRY:
+ 		err = clip_setentry(vcc, (__force __be32)arg);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 7fdf17351e4a2a..b7dcebc701898d 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6945,7 +6945,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
+ 
+ 		if (!ev->status) {
++			bis->state = BT_CONNECTED;
+ 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
++			hci_debugfs_create_conn(bis);
++			hci_conn_add_sysfs(bis);
+ 			hci_iso_setup_path(bis);
+ 		}
+ 	}
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 79d1a6ed08b294..bc01135e43f3ea 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -1345,7 +1345,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ 	 * Command Disallowed error, so we must first disable the
+ 	 * instance if it is active.
+ 	 */
+-	if (adv && !adv->pending) {
++	if (adv) {
+ 		err = hci_disable_ext_adv_instance_sync(hdev, instance);
+ 		if (err)
+ 			return err;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index b731a4a8f2b0d5..156da81bce068e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1145,7 +1145,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
+ 		goto do_error;
+ 
+ 	while (msg_data_left(msg)) {
+-		ssize_t copy = 0;
++		int copy = 0;
+ 
+ 		skb = tcp_write_queue_tail(sk);
+ 		if (skb)
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 16ba3bb12fc4b9..be51b8792b96f4 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3548,11 +3548,9 @@ static void addrconf_gre_config(struct net_device *dev)
+ 
+ 	ASSERT_RTNL();
+ 
+-	idev = ipv6_find_idev(dev);
+-	if (IS_ERR(idev)) {
+-		pr_debug("%s: add_dev failed\n", __func__);
++	idev = addrconf_add_dev(dev);
++	if (IS_ERR(idev))
+ 		return;
+-	}
+ 
+ 	/* Generate the IPv6 link-local address using addrconf_addr_gen(),
+ 	 * unless we have an IPv4 GRE device not bound to an IP address and
+@@ -3566,9 +3564,6 @@ static void addrconf_gre_config(struct net_device *dev)
+ 	}
+ 
+ 	add_v4_addrs(idev);
+-
+-	if (dev->flags & IFF_POINTOPOINT)
+-		addrconf_add_mroute(dev);
+ }
+ #endif
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 16bb3db67eaac0..fd7434995a475d 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -6702,6 +6702,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
+ 	struct ieee80211_bss_conf *bss_conf = link->conf;
+ 	struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
+ 	struct ieee80211_mgmt *mgmt = (void *) hdr;
++	struct ieee80211_ext *ext = NULL;
+ 	size_t baselen;
+ 	struct ieee802_11_elems *elems;
+ 	struct ieee80211_local *local = sdata->local;
+@@ -6727,7 +6728,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
+ 	/* Process beacon from the current BSS */
+ 	bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type);
+ 	if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+-		struct ieee80211_ext *ext = (void *) mgmt;
++		ext = (void *)mgmt;
+ 		variable = ext->u.s1g_beacon.variable +
+ 			   ieee80211_s1g_optional_len(ext->frame_control);
+ 	}
+@@ -6914,7 +6915,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
+ 	}
+ 
+ 	if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) ||
+-	    ieee80211_is_s1g_short_beacon(mgmt->frame_control))
++	    (ext && ieee80211_is_s1g_short_beacon(ext->frame_control,
++						  parse_params.start,
++						  parse_params.len)))
+ 		goto free;
+ 	link->u.mgd.beacon_crc = ncrc;
+ 	link->u.mgd.beacon_crc_valid = true;
+diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
+index 6da39c864f45ba..922ea9a6e2412c 100644
+--- a/net/mac80211/parse.c
++++ b/net/mac80211/parse.c
+@@ -758,7 +758,6 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
+ {
+ 	const struct element *elem, *sub;
+ 	size_t profile_len = 0;
+-	bool found = false;
+ 
+ 	if (!bss || !bss->transmitted_bss)
+ 		return profile_len;
+@@ -809,15 +808,14 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
+ 					       index[2],
+ 					       new_bssid);
+ 			if (ether_addr_equal(new_bssid, bss->bssid)) {
+-				found = true;
+ 				elems->bssid_index_len = index[1];
+ 				elems->bssid_index = (void *)&index[2];
+-				break;
++				return profile_len;
+ 			}
+ 		}
+ 	}
+ 
+-	return found ? profile_len : 0;
++	return 0;
+ }
+ 
+ static void
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 775d707ec708a7..b02fb75f8d4fd2 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -387,7 +387,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+ 	WARN_ON(skb->sk != NULL);
+ 	skb->sk = sk;
+ 	skb->destructor = netlink_skb_destructor;
+-	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ 	sk_mem_charge(sk, skb->truesize);
+ }
+ 
+@@ -1216,41 +1215,48 @@ struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
+ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
+ 		      long *timeo, struct sock *ssk)
+ {
++	DECLARE_WAITQUEUE(wait, current);
+ 	struct netlink_sock *nlk;
++	unsigned int rmem;
+ 
+ 	nlk = nlk_sk(sk);
++	rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
+ 
+-	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+-	     test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
+-		DECLARE_WAITQUEUE(wait, current);
+-		if (!*timeo) {
+-			if (!ssk || netlink_is_kernel(ssk))
+-				netlink_overrun(sk);
+-			sock_put(sk);
+-			kfree_skb(skb);
+-			return -EAGAIN;
+-		}
+-
+-		__set_current_state(TASK_INTERRUPTIBLE);
+-		add_wait_queue(&nlk->wait, &wait);
++	if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) &&
++	    !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
++		netlink_skb_set_owner_r(skb, sk);
++		return 0;
++	}
+ 
+-		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+-		     test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
+-		    !sock_flag(sk, SOCK_DEAD))
+-			*timeo = schedule_timeout(*timeo);
++	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ 
+-		__set_current_state(TASK_RUNNING);
+-		remove_wait_queue(&nlk->wait, &wait);
++	if (!*timeo) {
++		if (!ssk || netlink_is_kernel(ssk))
++			netlink_overrun(sk);
+ 		sock_put(sk);
++		kfree_skb(skb);
++		return -EAGAIN;
++	}
+ 
+-		if (signal_pending(current)) {
+-			kfree_skb(skb);
+-			return sock_intr_errno(*timeo);
+-		}
+-		return 1;
++	__set_current_state(TASK_INTERRUPTIBLE);
++	add_wait_queue(&nlk->wait, &wait);
++	rmem = atomic_read(&sk->sk_rmem_alloc);
++
++	if (((rmem && rmem + skb->truesize > READ_ONCE(sk->sk_rcvbuf)) ||
++	     test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
++	    !sock_flag(sk, SOCK_DEAD))
++		*timeo = schedule_timeout(*timeo);
++
++	__set_current_state(TASK_RUNNING);
++	remove_wait_queue(&nlk->wait, &wait);
++	sock_put(sk);
++
++	if (signal_pending(current)) {
++		kfree_skb(skb);
++		return sock_intr_errno(*timeo);
+ 	}
+-	netlink_skb_set_owner_r(skb, sk);
+-	return 0;
++
++	return 1;
+ }
+ 
+ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+@@ -1310,6 +1316,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
+ 	ret = -ECONNREFUSED;
+ 	if (nlk->netlink_rcv != NULL) {
+ 		ret = skb->len;
++		atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ 		netlink_skb_set_owner_r(skb, sk);
+ 		NETLINK_CB(skb).sk = ssk;
+ 		netlink_deliver_tap_kernel(sk, ssk, skb);
+@@ -1386,13 +1393,19 @@ EXPORT_SYMBOL_GPL(netlink_strict_get_check);
+ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct netlink_sock *nlk = nlk_sk(sk);
++	unsigned int rmem, rcvbuf;
+ 
+-	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
++	rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
++	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
++
++	if ((rmem == skb->truesize || rmem <= rcvbuf) &&
+ 	    !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
+ 		netlink_skb_set_owner_r(skb, sk);
+ 		__netlink_sendskb(sk, skb);
+-		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
++		return rmem > (rcvbuf >> 1);
+ 	}
++
++	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ 	return -1;
+ }
+ 
+@@ -2248,6 +2261,7 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
+ 	struct netlink_ext_ack extack = {};
+ 	struct netlink_callback *cb;
+ 	struct sk_buff *skb = NULL;
++	unsigned int rmem, rcvbuf;
+ 	size_t max_recvmsg_len;
+ 	struct module *module;
+ 	int err = -ENOBUFS;
+@@ -2261,9 +2275,6 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
+ 		goto errout_skb;
+ 	}
+ 
+-	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
+-		goto errout_skb;
+-
+ 	/* NLMSG_GOODSIZE is small to avoid high order allocations being
+ 	 * required, but it makes sense to _attempt_ a 16K bytes allocation
+ 	 * to reduce number of system calls on dump operations, if user
+@@ -2286,6 +2297,13 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
+ 	if (!skb)
+ 		goto errout_skb;
+ 
++	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
++	rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
++	if (rmem != skb->truesize && rmem >= rcvbuf) {
++		atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
++		goto errout_skb;
++	}
++
+ 	/* Trim skb to allocated size. User is expected to provide buffer as
+ 	 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
+ 	 * netlink_recvmsg())). dump will pack as many smaller messages as
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 0f5a1d77b890f8..773bdb2e37dafd 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -149,6 +149,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
+ 
+ id_in_use:
+ 	write_unlock(&rx->call_lock);
++	rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EBADSLT);
+ 	rxrpc_cleanup_call(call);
+ 	_leave(" = -EBADSLT");
+ 	return -EBADSLT;
+@@ -253,6 +254,9 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
+ 	unsigned short call_tail, conn_tail, peer_tail;
+ 	unsigned short call_count, conn_count;
+ 
++	if (!b)
++		return NULL;
++
+ 	/* #calls >= #conns >= #peers must hold true. */
+ 	call_head = smp_load_acquire(&b->call_backlog_head);
+ 	call_tail = b->call_backlog_tail;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 26378eac1bd08b..c56a01992cb284 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -334,17 +334,22 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
+ 	return q;
+ }
+ 
+-static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
++static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid,
++				struct netlink_ext_ack *extack)
+ {
+ 	unsigned long cl;
+ 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
+ 
+-	if (cops == NULL)
+-		return NULL;
++	if (cops == NULL) {
++		NL_SET_ERR_MSG(extack, "Parent qdisc is not classful");
++		return ERR_PTR(-EOPNOTSUPP);
++	}
+ 	cl = cops->find(p, classid);
+ 
+-	if (cl == 0)
+-		return NULL;
++	if (cl == 0) {
++		NL_SET_ERR_MSG(extack, "Specified class not found");
++		return ERR_PTR(-ENOENT);
++	}
+ 	return cops->leaf(p, cl);
+ }
+ 
+@@ -1526,7 +1531,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
+ 					return -ENOENT;
+ 				}
+-				q = qdisc_leaf(p, clid);
++				q = qdisc_leaf(p, clid, extack);
+ 			} else if (dev_ingress_queue(dev)) {
+ 				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
+ 			}
+@@ -1537,6 +1542,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
+ 			return -ENOENT;
+ 		}
++		if (IS_ERR(q))
++			return PTR_ERR(q);
+ 
+ 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
+ 			NL_SET_ERR_MSG(extack, "Invalid handle");
+@@ -1630,7 +1637,9 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
+ 					return -ENOENT;
+ 				}
+-				q = qdisc_leaf(p, clid);
++				q = qdisc_leaf(p, clid, extack);
++				if (IS_ERR(q))
++					return PTR_ERR(q);
+ 			} else if (dev_ingress_queue_create(dev)) {
+ 				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
+ 			}
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index 8ee0c07d00e9bb..ffe577bf6b5155 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -704,8 +704,10 @@ static void tipc_topsrv_stop(struct net *net)
+ 	for (id = 0; srv->idr_in_use; id++) {
+ 		con = idr_find(&srv->conn_idr, id);
+ 		if (con) {
++			conn_get(con);
+ 			spin_unlock_bh(&srv->idr_lock);
+ 			tipc_conn_close(con);
++			conn_put(con);
+ 			spin_lock_bh(&srv->idr_lock);
+ 		}
+ 	}
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index d08f205b33dccf..08565e41b8e924 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
+ 
+ static bool vsock_use_local_transport(unsigned int remote_cid)
+ {
++	lockdep_assert_held(&vsock_register_mutex);
++
+ 	if (!transport_local)
+ 		return false;
+ 
+@@ -464,6 +466,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
+ 
+ 	remote_flags = vsk->remote_addr.svm_flags;
+ 
++	mutex_lock(&vsock_register_mutex);
++
+ 	switch (sk->sk_type) {
+ 	case SOCK_DGRAM:
+ 		new_transport = transport_dgram;
+@@ -479,12 +483,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
+ 			new_transport = transport_h2g;
+ 		break;
+ 	default:
+-		return -ESOCKTNOSUPPORT;
++		ret = -ESOCKTNOSUPPORT;
++		goto err;
+ 	}
+ 
+ 	if (vsk->transport) {
+-		if (vsk->transport == new_transport)
+-			return 0;
++		if (vsk->transport == new_transport) {
++			ret = 0;
++			goto err;
++		}
+ 
+ 		/* transport->release() must be called with sock lock acquired.
+ 		 * This path can only be taken during vsock_connect(), where we
+@@ -508,8 +515,16 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
+ 	/* We increase the module refcnt to prevent the transport unloading
+ 	 * while there are open sockets assigned to it.
+ 	 */
+-	if (!new_transport || !try_module_get(new_transport->module))
+-		return -ENODEV;
++	if (!new_transport || !try_module_get(new_transport->module)) {
++		ret = -ENODEV;
++		goto err;
++	}
++
++	/* It's safe to release the mutex after a successful try_module_get().
++	 * Whichever transport `new_transport` points at, it won't go away until
++	 * the last module_put() below or in vsock_deassign_transport().
++	 */
++	mutex_unlock(&vsock_register_mutex);
+ 
+ 	if (sk->sk_type == SOCK_SEQPACKET) {
+ 		if (!new_transport->seqpacket_allow ||
+@@ -528,12 +543,31 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
+ 	vsk->transport = new_transport;
+ 
+ 	return 0;
++err:
++	mutex_unlock(&vsock_register_mutex);
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(vsock_assign_transport);
+ 
++/*
++ * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks.
++ * Otherwise we may race with module removal. Do not use on `vsk->transport`.
++ */
++static u32 vsock_registered_transport_cid(const struct vsock_transport **transport)
++{
++	u32 cid = VMADDR_CID_ANY;
++
++	mutex_lock(&vsock_register_mutex);
++	if (*transport)
++		cid = (*transport)->get_local_cid();
++	mutex_unlock(&vsock_register_mutex);
++
++	return cid;
++}
++
+ bool vsock_find_cid(unsigned int cid)
+ {
+-	if (transport_g2h && cid == transport_g2h->get_local_cid())
++	if (cid == vsock_registered_transport_cid(&transport_g2h))
+ 		return true;
+ 
+ 	if (transport_h2g && cid == VMADDR_CID_HOST)
+@@ -2502,18 +2536,19 @@ static long vsock_dev_do_ioctl(struct file *filp,
+ 			       unsigned int cmd, void __user *ptr)
+ {
+ 	u32 __user *p = ptr;
+-	u32 cid = VMADDR_CID_ANY;
+ 	int retval = 0;
++	u32 cid;
+ 
+ 	switch (cmd) {
+ 	case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
+ 		/* To be compatible with the VMCI behavior, we prioritize the
+ 		 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
+ 		 */
+-		if (transport_g2h)
+-			cid = transport_g2h->get_local_cid();
+-		else if (transport_h2g)
+-			cid = transport_h2g->get_local_cid();
++		cid = vsock_registered_transport_cid(&transport_g2h);
++		if (cid == VMADDR_CID_ANY)
++			cid = vsock_registered_transport_cid(&transport_h2g);
++		if (cid == VMADDR_CID_ANY)
++			cid = vsock_registered_transport_cid(&transport_local);
+ 
+ 		if (put_user(cid, p) != 0)
+ 			retval = -EFAULT;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c778ffa1c8efd7..4eb44821c70d3d 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -229,6 +229,7 @@ static int validate_beacon_head(const struct nlattr *attr,
+ 	unsigned int len = nla_len(attr);
+ 	const struct element *elem;
+ 	const struct ieee80211_mgmt *mgmt = (void *)data;
++	const struct ieee80211_ext *ext;
+ 	unsigned int fixedlen, hdrlen;
+ 	bool s1g_bcn;
+ 
+@@ -237,8 +238,10 @@ static int validate_beacon_head(const struct nlattr *attr,
+ 
+ 	s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
+ 	if (s1g_bcn) {
+-		fixedlen = offsetof(struct ieee80211_ext,
+-				    u.s1g_beacon.variable);
++		ext = (struct ieee80211_ext *)mgmt;
++		fixedlen =
++			offsetof(struct ieee80211_ext, u.s1g_beacon.variable) +
++			ieee80211_s1g_optional_len(ext->frame_control);
+ 		hdrlen = offsetof(struct ieee80211_ext, u.s1g_beacon);
+ 	} else {
+ 		fixedlen = offsetof(struct ieee80211_mgmt,
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 18585b1416c662..b115489a846f8c 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -820,6 +820,52 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
+ }
+ EXPORT_SYMBOL(ieee80211_is_valid_amsdu);
+ 
++
++/*
++ * Detects if an MSDU frame was maliciously converted into an A-MSDU
++ * frame by an adversary. This is done by parsing the received frame
++ * as if it were a regular MSDU, even though the A-MSDU flag is set.
++ *
++ * For non-mesh interfaces, detection involves checking whether the
++ * payload, when interpreted as an MSDU, begins with a valid RFC1042
++ * header. This is done by comparing the A-MSDU subheader's destination
++ * address to the start of the RFC1042 header.
++ *
++ * For mesh interfaces, the MSDU includes a 6-byte Mesh Control field
++ * and an optional variable-length Mesh Address Extension field before
++ * the RFC1042 header. The position of the RFC1042 header must therefore
++ * be calculated based on the mesh header length.
++ *
++ * Since this function intentionally parses an A-MSDU frame as an MSDU,
++ * it only assumes that the A-MSDU subframe header is present, and
++ * beyond this it performs its own bounds checks under the assumption
++ * that the frame is instead parsed as a non-aggregated MSDU.
++ */
++static bool
++is_amsdu_aggregation_attack(struct ethhdr *eth, struct sk_buff *skb,
++			    enum nl80211_iftype iftype)
++{
++	int offset;
++
++	/* Non-mesh case can be directly compared */
++	if (iftype != NL80211_IFTYPE_MESH_POINT)
++		return ether_addr_equal(eth->h_dest, rfc1042_header);
++
++	offset = __ieee80211_get_mesh_hdrlen(eth->h_dest[0]);
++	if (offset == 6) {
++		/* Mesh case with empty address extension field */
++		return ether_addr_equal(eth->h_source, rfc1042_header);
++	} else if (offset + ETH_ALEN <= skb->len) {
++		/* Mesh case with non-empty address extension field */
++		u8 temp[ETH_ALEN];
++
++		skb_copy_bits(skb, offset, temp, ETH_ALEN);
++		return ether_addr_equal(temp, rfc1042_header);
++	}
++
++	return false;
++}
++
+ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ 			      const u8 *addr, enum nl80211_iftype iftype,
+ 			      const unsigned int extra_headroom,
+@@ -861,8 +907,10 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ 		/* the last MSDU has no padding */
+ 		if (subframe_len > remaining)
+ 			goto purge;
+-		/* mitigate A-MSDU aggregation injection attacks */
+-		if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header))
++		/* mitigate A-MSDU aggregation injection attacks, to be
++		 * checked when processing first subframe (offset == 0).
++		 */
++		if (offset == 0 && is_amsdu_aggregation_attack(&hdr.eth, skb, iftype))
+ 			goto purge;
+ 
+ 		offset += sizeof(struct ethhdr);
+diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
+index b7213962a6a5ac..e530028bb9edb6 100644
+--- a/rust/kernel/init/macros.rs
++++ b/rust/kernel/init/macros.rs
+@@ -924,6 +924,7 @@ impl<'__pin, $($impl_generics)*> ::core::marker::Unpin for $name<$($ty_generics)
+         // We prevent this by creating a trait that will be implemented for all types implementing
+         // `Drop`. Additionally we will implement this trait for the struct leading to a conflict,
+         // if it also implements `Drop`
++        #[allow(dead_code)]
+         trait MustNotImplDrop {}
+         #[expect(drop_bounds)]
+         impl<T: ::core::ops::Drop> MustNotImplDrop for T {}
+@@ -932,6 +933,7 @@ impl<$($impl_generics)*> MustNotImplDrop for $name<$($ty_generics)*>
+         // We also take care to prevent users from writing a useless `PinnedDrop` implementation.
+         // They might implement `PinnedDrop` correctly for the struct, but forget to give
+         // `PinnedDrop` as the parameter to `#[pin_data]`.
++        #[allow(dead_code)]
+         #[expect(non_camel_case_types)]
+         trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
+         impl<T: $crate::init::PinnedDrop>
+diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
+index fd6bd69c5096ac..f795302ddfa8b3 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -20,6 +20,7 @@
+ #include <linux/of_fdt.h>
+ #include <linux/page_ext.h>
+ #include <linux/radix-tree.h>
++#include <linux/maple_tree.h>
+ #include <linux/slab.h>
+ #include <linux/threads.h>
+ #include <linux/vmalloc.h>
+@@ -93,6 +94,12 @@ LX_GDBPARSED(RADIX_TREE_MAP_SIZE)
+ LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
+ LX_GDBPARSED(RADIX_TREE_MAP_MASK)
+ 
++/* linux/maple_tree.h */
++LX_VALUE(MAPLE_NODE_SLOTS)
++LX_VALUE(MAPLE_RANGE64_SLOTS)
++LX_VALUE(MAPLE_ARANGE64_SLOTS)
++LX_GDBPARSED(MAPLE_NODE_MASK)
++
+ /* linux/vmalloc.h */
+ LX_VALUE(VM_IOREMAP)
+ LX_VALUE(VM_ALLOC)
+diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py
+index 616a5f26377a8c..f4f715a8f0e36e 100644
+--- a/scripts/gdb/linux/interrupts.py
++++ b/scripts/gdb/linux/interrupts.py
+@@ -7,7 +7,7 @@ import gdb
+ from linux import constants
+ from linux import cpus
+ from linux import utils
+-from linux import radixtree
++from linux import mapletree
+ 
+ irq_desc_type = utils.CachedType("struct irq_desc")
+ 
+@@ -23,12 +23,12 @@ def irqd_is_level(desc):
+ def show_irq_desc(prec, irq):
+     text = ""
+ 
+-    desc = radixtree.lookup(gdb.parse_and_eval("&irq_desc_tree"), irq)
++    desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq)
+     if desc is None:
+         return text
+ 
+-    desc = desc.cast(irq_desc_type.get_type())
+-    if desc is None:
++    desc = desc.cast(irq_desc_type.get_type().pointer())
++    if desc == 0:
+         return text
+ 
+     if irq_settings_is_hidden(desc):
+@@ -110,7 +110,7 @@ def x86_show_mce(prec, var, pfx, desc):
+     pvar = gdb.parse_and_eval(var)
+     text = "%*s: " % (prec, pfx)
+     for cpu in cpus.each_online_cpu():
+-        text += "%10u " % (cpus.per_cpu(pvar, cpu))
++        text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference())
+     text += "  %s\n" % (desc)
+     return text
+ 
+@@ -142,7 +142,7 @@ def x86_show_interupts(prec):
+ 
+     if constants.LX_CONFIG_X86_MCE:
+         text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions")
+-        text == x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls")
++        text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls")
+ 
+     text += show_irq_err_count(prec)
+ 
+@@ -221,8 +221,8 @@ class LxInterruptList(gdb.Command):
+             gdb.write("CPU%-8d" % cpu)
+         gdb.write("\n")
+ 
+-        if utils.gdb_eval_or_none("&irq_desc_tree") is None:
+-            return
++        if utils.gdb_eval_or_none("&sparse_irqs") is None:
++            raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?")
+ 
+         for irq in range(nr_irqs):
+             gdb.write(show_irq_desc(prec, irq))
+diff --git a/scripts/gdb/linux/mapletree.py b/scripts/gdb/linux/mapletree.py
+new file mode 100644
+index 00000000000000..d52d51c0a03fcb
+--- /dev/null
++++ b/scripts/gdb/linux/mapletree.py
+@@ -0,0 +1,252 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++#  Maple tree helpers
++#
++# Copyright (c) 2025 Broadcom
++#
++# Authors:
++#  Florian Fainelli <florian.fainelli@broadcom.com>
++
++import gdb
++
++from linux import utils
++from linux import constants
++from linux import xarray
++
++maple_tree_root_type = utils.CachedType("struct maple_tree")
++maple_node_type = utils.CachedType("struct maple_node")
++maple_enode_type = utils.CachedType("void")
++
++maple_dense = 0
++maple_leaf_64 = 1
++maple_range_64 = 2
++maple_arange_64 = 3
++
++class Mas(object):
++    ma_active = 0
++    ma_start = 1
++    ma_root = 2
++    ma_none = 3
++    ma_pause = 4
++    ma_overflow = 5
++    ma_underflow = 6
++    ma_error = 7
++
++    def __init__(self, mt, first, end):
++        if mt.type == maple_tree_root_type.get_type().pointer():
++            self.tree = mt.dereference()
++        elif mt.type != maple_tree_root_type.get_type():
++            raise gdb.GdbError("must be {} not {}"
++                               .format(maple_tree_root_type.get_type().pointer(), mt.type))
++        self.tree = mt
++        self.index = first
++        self.last = end
++        self.node = None
++        self.status = self.ma_start
++        self.min = 0
++        self.max = -1
++
++    def is_start(self):
++        # mas_is_start()
++        return self.status == self.ma_start
++
++    def is_ptr(self):
++        # mas_is_ptr()
++        return self.status == self.ma_root
++
++    def is_none(self):
++        # mas_is_none()
++        return self.status == self.ma_none
++
++    def root(self):
++        # mas_root()
++        return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer())
++
++    def start(self):
++        # mas_start()
++        if self.is_start() is False:
++            return None
++
++        self.min = 0
++        self.max = ~0
++
++        while True:
++            self.depth = 0
++            root = self.root()
++            if xarray.xa_is_node(root):
++                self.depth = 0
++                self.status = self.ma_active
++                self.node = mte_safe_root(root)
++                self.offset = 0
++                if mte_dead_node(self.node) is True:
++                    continue
++
++                return None
++
++            self.node = None
++            # Empty tree
++            if root is None:
++                self.status = self.ma_none
++                self.offset = constants.LX_MAPLE_NODE_SLOTS
++                return None
++
++            # Single entry tree
++            self.status = self.ma_root
++            self.offset = constants.LX_MAPLE_NODE_SLOTS
++
++            if self.index != 0:
++                return None
++
++            return root
++
++        return None
++
++    def reset(self):
++        # mas_reset()
++        self.status = self.ma_start
++        self.node = None
++
++def mte_safe_root(node):
++    if node.type != maple_enode_type.get_type().pointer():
++        raise gdb.GdbError("{} must be {} not {}"
++                           .format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type))
++    ulong_type = utils.get_ulong_type()
++    indirect_ptr = node.cast(ulong_type) & ~0x2
++    val = indirect_ptr.cast(maple_enode_type.get_type().pointer())
++    return val
++
++def mte_node_type(entry):
++    ulong_type = utils.get_ulong_type()
++    val = None
++    if entry.type == maple_enode_type.get_type().pointer():
++        val = entry.cast(ulong_type)
++    elif entry.type == ulong_type:
++        val = entry
++    else:
++        raise gdb.GdbError("{} must be {} not {}"
++                           .format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type))
++    return (val >> 0x3) & 0xf
++
++def ma_dead_node(node):
++    if node.type != maple_node_type.get_type().pointer():
++        raise gdb.GdbError("{} must be {} not {}"
++                           .format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type))
++    ulong_type = utils.get_ulong_type()
++    parent = node['parent']
++    indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK
++    return indirect_ptr == node
++
++def mte_to_node(enode):
++    ulong_type = utils.get_ulong_type()
++    if enode.type == maple_enode_type.get_type().pointer():
++        indirect_ptr = enode.cast(ulong_type)
++    elif enode.type == ulong_type:
++        indirect_ptr = enode
++    else:
++        raise gdb.GdbError("{} must be {} not {}"
++                           .format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type))
++    indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK
++    return indirect_ptr.cast(maple_node_type.get_type().pointer())
++
++def mte_dead_node(enode):
++    if enode.type != maple_enode_type.get_type().pointer():
++        raise gdb.GdbError("{} must be {} not {}"
++                           .format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type))
++    node = mte_to_node(enode)
++    return ma_dead_node(node)
++
++def ma_is_leaf(tp):
++    result = tp < maple_range_64
++    return tp < maple_range_64
++
++def mt_pivots(t):
++    if t == maple_dense:
++        return 0
++    elif t == maple_leaf_64 or t == maple_range_64:
++        return constants.LX_MAPLE_RANGE64_SLOTS - 1
++    elif t == maple_arange_64:
++        return constants.LX_MAPLE_ARANGE64_SLOTS - 1
++
++def ma_pivots(node, t):
++    if node.type != maple_node_type.get_type().pointer():
++        raise gdb.GdbError("{}: must be {} not {}"
++                           .format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type))
++    if t == maple_arange_64:
++        return node['ma64']['pivot']
++    elif t == maple_leaf_64 or t == maple_range_64:
++        return node['mr64']['pivot']
++    else:
++        return None
++
++def ma_slots(node, tp):
++    if node.type != maple_node_type.get_type().pointer():
++        raise gdb.GdbError("{}: must be {} not {}"
++                           .format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type))
++    if tp == maple_arange_64:
++        return node['ma64']['slot']
++    elif tp == maple_range_64 or tp == maple_leaf_64:
++        return node['mr64']['slot']
++    elif tp == maple_dense:
++        return node['slot']
++    else:
++        return None
++
++def mt_slot(mt, slots, offset):
++    ulong_type = utils.get_ulong_type()
++    return slots[offset].cast(ulong_type)
++
++def mtree_lookup_walk(mas):
++    ulong_type = utils.get_ulong_type()
++    n = mas.node
++
++    while True:
++        node = mte_to_node(n)
++        tp = mte_node_type(n)
++        pivots = ma_pivots(node, tp)
++        end = mt_pivots(tp)
++        offset = 0
++        while True:
++            if pivots[offset] >= mas.index:
++                break
++            if offset >= end:
++                break
++            offset += 1
++
++        slots = ma_slots(node, tp)
++        n = mt_slot(mas.tree, slots, offset)
++        if ma_dead_node(node) is True:
++            mas.reset()
++            return None
++            break
++
++        if ma_is_leaf(tp) is True:
++            break
++
++    return n
++
++def mtree_load(mt, index):
++    ulong_type = utils.get_ulong_type()
++    # MT_STATE(...)
++    mas = Mas(mt, index, index)
++    entry = None
++
++    while True:
++        entry = mas.start()
++        if mas.is_none():
++            return None
++
++        if mas.is_ptr():
++            if index != 0:
++                entry = None
++            return entry
++
++        entry = mtree_lookup_walk(mas)
++        if entry is None and mas.is_start():
++            continue
++        else:
++            break
++
++    if xarray.xa_is_zero(entry):
++        return None
++
++    return entry
+diff --git a/scripts/gdb/linux/xarray.py b/scripts/gdb/linux/xarray.py
+new file mode 100644
+index 00000000000000..f4477b5def75fc
+--- /dev/null
++++ b/scripts/gdb/linux/xarray.py
+@@ -0,0 +1,28 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++#  Xarray helpers
++#
++# Copyright (c) 2025 Broadcom
++#
++# Authors:
++#  Florian Fainelli <florian.fainelli@broadcom.com>
++
++import gdb
++
++from linux import utils
++from linux import constants
++
++def xa_is_internal(entry):
++    ulong_type = utils.get_ulong_type()
++    return ((entry.cast(ulong_type) & 3) == 2)
++
++def xa_mk_internal(v):
++    return ((v << 2) | 2)
++
++def xa_is_zero(entry):
++    ulong_type = utils.get_ulong_type()
++    return entry.cast(ulong_type) == xa_mk_internal(257)
++
++def xa_is_node(entry):
++    ulong_type = utils.get_ulong_type()
++    return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096)
+diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c
+index 99006dc4777e91..5c9e2d41d9005f 100644
+--- a/sound/isa/ad1816a/ad1816a.c
++++ b/sound/isa/ad1816a/ad1816a.c
+@@ -98,7 +98,7 @@ static int snd_card_ad1816a_pnp(int dev, struct pnp_card_link *card,
+ 	pdev = pnp_request_card_device(card, id->devs[1].id, NULL);
+ 	if (pdev == NULL) {
+ 		mpu_port[dev] = -1;
+-		dev_warn(&pdev->dev, "MPU401 device busy, skipping.\n");
++		pr_warn("MPU401 device busy, skipping.\n");
+ 		return 0;
+ 	}
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 30e9e26c5b2a7d..e98823bd3634f8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2658,6 +2658,7 @@ static const struct hda_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+ 	SND_PCI_QUIRK(0x1558, 0x3702, "Clevo X370SN[VW]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x5802, "Clevo X58[05]WN[RST]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+@@ -6611,6 +6612,7 @@ static void alc294_fixup_bass_speaker_15(struct hda_codec *codec,
+ 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ 		static const hda_nid_t conn[] = { 0x02, 0x03 };
+ 		snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn), conn);
++		snd_hda_gen_add_micmute_led_cdev(codec, NULL);
+ 	}
+ }
+ 
+@@ -10654,6 +10656,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x897d, "HP mt440 Mobile Thin Client U74", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4),
++	SND_PCI_QUIRK(0x103c, 0x898a, "HP Pavilion 15-eg100", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+@@ -11044,6 +11047,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x14a1, "Clevo L141MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x2624, "Clevo L240TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x28c1, "Clevo V370VND", ALC2XX_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1558, 0x35a1, "Clevo V3[56]0EN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x35b1, "Clevo V3[57]0WN[MNP]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -11071,6 +11076,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x51b1, "Clevo NS50AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x51b3, "Clevo NS70AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x5700, "Clevo X560WN[RST]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -11110,6 +11116,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa741, "Clevo V54x_6x_TNE", ALC245_FIXUP_CLEVO_NOISY_MIC),
++	SND_PCI_QUIRK(0x1558, 0xa743, "Clevo V54x_6x_TU", ALC245_FIXUP_CLEVO_NOISY_MIC),
+ 	SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC245_FIXUP_CLEVO_NOISY_MIC),
+ 	SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 723cb7bc128516..1689b6b22598e2 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -346,6 +346,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "RB"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Nitro ANV15-41"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
+index 195841a567c3d4..9007484b31c71f 100644
+--- a/sound/soc/codecs/cs35l56-shared.c
++++ b/sound/soc/codecs/cs35l56-shared.c
+@@ -811,7 +811,7 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
+ 		break;
+ 	default:
+ 		dev_err(cs35l56_base->dev, "Unknown device %x\n", devid);
+-		return ret;
++		return -ENODEV;
+ 	}
+ 
+ 	cs35l56_base->type = devid & 0xFF;
+diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
+index bd5c46d763c0ff..ffd4a6ca5f3cb2 100644
+--- a/sound/soc/fsl/fsl_asrc.c
++++ b/sound/soc/fsl/fsl_asrc.c
+@@ -517,7 +517,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate)
+ 	regmap_update_bits(asrc->regmap, REG_ASRCTR,
+ 			   ASRCTR_ATSi_MASK(index), ASRCTR_ATS(index));
+ 	regmap_update_bits(asrc->regmap, REG_ASRCTR,
+-			   ASRCTR_USRi_MASK(index), 0);
++			   ASRCTR_IDRi_MASK(index) | ASRCTR_USRi_MASK(index),
++			   ASRCTR_USR(index));
+ 
+ 	/* Set the input and output clock sources */
+ 	regmap_update_bits(asrc->regmap, REG_ASRCSR,
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index c5efbceb06d1fc..25d4b27f5b7662 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -771,13 +771,15 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
+ 	 * anymore. Add software reset to fix this issue.
+ 	 * This is a hardware bug, and will be fix in the
+ 	 * next sai version.
++	 *
++	 * In consumer mode, this can happen even after a
++	 * single open/close, especially if both tx and rx
++	 * are running concurrently.
+ 	 */
+-	if (!sai->is_consumer_mode[tx]) {
+-		/* Software Reset */
+-		regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR);
+-		/* Clear SR bit to finish the reset */
+-		regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0);
+-	}
++	/* Software Reset */
++	regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR);
++	/* Clear SR bit to finish the reset */
++	regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0);
+ }
+ 
+ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
+index cc10ae58b0c7ed..8dee46abf346d6 100644
+--- a/sound/soc/intel/boards/Kconfig
++++ b/sound/soc/intel/boards/Kconfig
+@@ -42,6 +42,7 @@ config SND_SOC_INTEL_SOF_NUVOTON_COMMON
+ 	tristate
+ 
+ config SND_SOC_INTEL_SOF_BOARD_HELPERS
++	select SND_SOC_ACPI_INTEL_MATCH
+ 	tristate
+ 
+ if SND_SOC_INTEL_CATPT
+diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
+index 91e146e2487da2..a9a740e2496984 100644
+--- a/sound/soc/intel/common/Makefile
++++ b/sound/soc/intel/common/Makefile
+@@ -14,7 +14,7 @@ snd-soc-acpi-intel-match-y := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-matc
+ 	soc-acpi-intel-lnl-match.o \
+ 	soc-acpi-intel-ptl-match.o \
+ 	soc-acpi-intel-hda-match.o \
+-	soc-acpi-intel-sdw-mockup-match.o
++	soc-acpi-intel-sdw-mockup-match.o sof-function-topology-lib.o
+ 
+ snd-soc-acpi-intel-match-y += soc-acpi-intel-ssp-common.o
+ 
+diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+index 24d850df77ca8e..1ad704ca2c5f2b 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-arl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+@@ -8,6 +8,7 @@
+ #include <sound/soc-acpi.h>
+ #include <sound/soc-acpi-intel-match.h>
+ #include <sound/soc-acpi-intel-ssp-common.h>
++#include "sof-function-topology-lib.h"
+ 
+ static const struct snd_soc_acpi_endpoint single_endpoint = {
+ 	.num = 0,
+@@ -138,7 +139,7 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_r1_adr[] = {
+ 	},
+ };
+ 
+-static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
++static const struct snd_soc_acpi_adr_device cs35l56_3_l3_adr[] = {
+ 	{
+ 		.adr = 0x00033301fa355601ull,
+ 		.num_endpoints = 1,
+@@ -147,6 +148,24 @@ static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
+ 	},
+ };
+ 
++static const struct snd_soc_acpi_adr_device cs35l56_2_r3_adr[] = {
++	{
++		.adr = 0x00023301fa355601ull,
++		.num_endpoints = 1,
++		.endpoints = &spk_r_endpoint,
++		.name_prefix = "AMP2"
++	},
++};
++
++static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
++	{
++		.adr = 0x00033101fa355601ull,
++		.num_endpoints = 1,
++		.endpoints = &spk_l_endpoint,
++		.name_prefix = "AMP1"
++	},
++};
++
+ static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = {
+ 	{ /* Jack Playback Endpoint */
+ 		.num = 0,
+@@ -304,6 +323,25 @@ static const struct snd_soc_acpi_link_adr arl_cs42l43_l0_cs35l56_2_l23[] = {
+ 		.num_adr = ARRAY_SIZE(cs35l56_2_r1_adr),
+ 		.adr_d = cs35l56_2_r1_adr,
+ 	},
++	{
++		.mask = BIT(3),
++		.num_adr = ARRAY_SIZE(cs35l56_3_l3_adr),
++		.adr_d = cs35l56_3_l3_adr,
++	},
++	{}
++};
++
++static const struct snd_soc_acpi_link_adr arl_cs42l43_l0_cs35l56_3_l23[] = {
++	{
++		.mask = BIT(0),
++		.num_adr = ARRAY_SIZE(cs42l43_0_adr),
++		.adr_d = cs42l43_0_adr,
++	},
++	{
++		.mask = BIT(2),
++		.num_adr = ARRAY_SIZE(cs35l56_2_r3_adr),
++		.adr_d = cs35l56_2_r3_adr,
++	},
+ 	{
+ 		.mask = BIT(3),
+ 		.num_adr = ARRAY_SIZE(cs35l56_3_l1_adr),
+@@ -399,36 +437,49 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = {
+ 		.links = arl_cs42l43_l0_cs35l56_l23,
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg",
++		.get_function_tplg_files = sof_sdw_get_tplg_files,
+ 	},
+ 	{
+ 		.link_mask = BIT(0) | BIT(2) | BIT(3),
+ 		.links = arl_cs42l43_l0_cs35l56_2_l23,
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg",
++		.get_function_tplg_files = sof_sdw_get_tplg_files,
++	},
++	{
++		.link_mask = BIT(0) | BIT(2) | BIT(3),
++		.links = arl_cs42l43_l0_cs35l56_3_l23,
++		.drv_name = "sof_sdw",
++		.sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg",
++		.get_function_tplg_files = sof_sdw_get_tplg_files,
+ 	},
+ 	{
+ 		.link_mask = BIT(0) | BIT(2),
+ 		.links = arl_cs42l43_l0_cs35l56_l2,
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l2.tplg",
++		.get_function_tplg_files = sof_sdw_get_tplg_files,
+ 	},
+ 	{
+ 		.link_mask = BIT(0),
+ 		.links = arl_cs42l43_l0,
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-arl-cs42l43-l0.tplg",
+-	},
+-	{
+-		.link_mask = BIT(2),
+-		.links = arl_cs42l43_l2,
+-		.drv_name = "sof_sdw",
+-		.sof_tplg_filename = "sof-arl-cs42l43-l2.tplg",
++		.get_function_tplg_files = sof_sdw_get_tplg_files,
+ 	},
+ 	{
+ 		.link_mask = BIT(2) | BIT(3),
+ 		.links = arl_cs42l43_l2_cs35l56_l3,
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-arl-cs42l43-l2-cs35l56-l3.tplg",
++		.get_function_tplg_files = sof_sdw_get_tplg_files,
++	},
++	{
++		.link_mask = BIT(2),
++		.links = arl_cs42l43_l2,
++		.drv_name = "sof_sdw",
++		.sof_tplg_filename = "sof-arl-cs42l43-l2.tplg",
++		.get_function_tplg_files = sof_sdw_get_tplg_files,
+ 	},
+ 	{
+ 		.link_mask = 0x1, /* link0 required */
+@@ -447,6 +498,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = {
+ 		.links = arl_rt722_l0_rt1320_l2,
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-arl-rt722-l0_rt1320-l2.tplg",
++		.get_function_tplg_files = sof_sdw_get_tplg_files,
+ 	},
+ 	{},
+ };
+diff --git a/sound/soc/intel/common/sof-function-topology-lib.c b/sound/soc/intel/common/sof-function-topology-lib.c
+new file mode 100644
+index 00000000000000..3cc81dcf047e3a
+--- /dev/null
++++ b/sound/soc/intel/common/sof-function-topology-lib.c
+@@ -0,0 +1,136 @@
++// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
++//
++// This file is provided under a dual BSD/GPLv2 license.  When using or
++// redistributing this file, you may do so under either license.
++//
++// Copyright(c) 2025 Intel Corporation.
++//
++
++#include <linux/device.h>
++#include <linux/errno.h>
++#include <linux/firmware.h>
++#include <sound/soc.h>
++#include <sound/soc-acpi.h>
++#include "sof-function-topology-lib.h"
++
++enum tplg_device_id {
++	TPLG_DEVICE_SDCA_JACK,
++	TPLG_DEVICE_SDCA_AMP,
++	TPLG_DEVICE_SDCA_MIC,
++	TPLG_DEVICE_INTEL_PCH_DMIC,
++	TPLG_DEVICE_HDMI,
++	TPLG_DEVICE_MAX
++};
++
++#define SDCA_DEVICE_MASK (BIT(TPLG_DEVICE_SDCA_JACK) | BIT(TPLG_DEVICE_SDCA_AMP) | \
++			  BIT(TPLG_DEVICE_SDCA_MIC))
++
++#define SOF_INTEL_PLATFORM_NAME_MAX 4
++
++int sof_sdw_get_tplg_files(struct snd_soc_card *card, const struct snd_soc_acpi_mach *mach,
++			   const char *prefix, const char ***tplg_files)
++{
++	struct snd_soc_acpi_mach_params mach_params = mach->mach_params;
++	struct snd_soc_dai_link *dai_link;
++	const struct firmware *fw;
++	char platform[SOF_INTEL_PLATFORM_NAME_MAX];
++	unsigned long tplg_mask = 0;
++	int tplg_num = 0;
++	int tplg_dev;
++	int ret;
++	int i;
++
++	ret = sscanf(mach->sof_tplg_filename, "sof-%3s-*.tplg", platform);
++	if (ret != 1) {
++		dev_err(card->dev, "Invalid platform name %s of tplg %s\n",
++			platform, mach->sof_tplg_filename);
++		return -EINVAL;
++	}
++
++	for_each_card_prelinks(card, i, dai_link) {
++		char *tplg_dev_name;
++
++		dev_dbg(card->dev, "dai_link %s id %d\n", dai_link->name, dai_link->id);
++		if (strstr(dai_link->name, "SimpleJack")) {
++			tplg_dev = TPLG_DEVICE_SDCA_JACK;
++			tplg_dev_name = "sdca-jack";
++		} else if (strstr(dai_link->name, "SmartAmp")) {
++			tplg_dev = TPLG_DEVICE_SDCA_AMP;
++			tplg_dev_name = devm_kasprintf(card->dev, GFP_KERNEL,
++						       "sdca-%damp", dai_link->num_cpus);
++			if (!tplg_dev_name)
++				return -ENOMEM;
++		} else if (strstr(dai_link->name, "SmartMic")) {
++			tplg_dev = TPLG_DEVICE_SDCA_MIC;
++			tplg_dev_name = "sdca-mic";
++		} else if (strstr(dai_link->name, "dmic")) {
++			switch (mach_params.dmic_num) {
++			case 2:
++				tplg_dev_name = "dmic-2ch";
++				break;
++			case 4:
++				tplg_dev_name = "dmic-4ch";
++				break;
++			default:
++				dev_warn(card->dev,
++					 "unsupported number of dmics: %d\n",
++					 mach_params.dmic_num);
++				continue;
++			}
++			tplg_dev = TPLG_DEVICE_INTEL_PCH_DMIC;
++		} else if (strstr(dai_link->name, "iDisp")) {
++			tplg_dev = TPLG_DEVICE_HDMI;
++			tplg_dev_name = "hdmi-pcm5";
++
++		} else {
++			/* The dai link is not supported by separated tplg yet */
++			dev_dbg(card->dev,
++				"dai_link %s is not supported by separated tplg yet\n",
++				dai_link->name);
++			return 0;
++		}
++		if (tplg_mask & BIT(tplg_dev))
++			continue;
++
++		tplg_mask |= BIT(tplg_dev);
++
++		/*
++		 * The tplg file naming rule is sof-<platform>-<function>-id<BE id number>.tplg
++		 * where <platform> is only required for the DMIC function as the nhlt blob
++		 * is platform dependent.
++		 */
++		switch (tplg_dev) {
++		case TPLG_DEVICE_INTEL_PCH_DMIC:
++			(*tplg_files)[tplg_num] = devm_kasprintf(card->dev, GFP_KERNEL,
++								 "%s/sof-%s-%s-id%d.tplg",
++								 prefix, platform,
++								 tplg_dev_name, dai_link->id);
++			break;
++		default:
++			(*tplg_files)[tplg_num] = devm_kasprintf(card->dev, GFP_KERNEL,
++								 "%s/sof-%s-id%d.tplg",
++								 prefix, tplg_dev_name,
++								 dai_link->id);
++			break;
++		}
++		if (!(*tplg_files)[tplg_num])
++			return -ENOMEM;
++		tplg_num++;
++	}
++
++	dev_dbg(card->dev, "tplg_mask %#lx tplg_num %d\n", tplg_mask, tplg_num);
++
++	/* Check presence of sub-topologies */
++	for (i = 0; i < tplg_num; i++) {
++		ret = firmware_request_nowarn(&fw, (*tplg_files)[i], card->dev);
++		if (!ret) {
++			release_firmware(fw);
++		} else {
++			dev_dbg(card->dev, "Failed to open topology file: %s\n", (*tplg_files)[i]);
++			return 0;
++		}
++	}
++
++	return tplg_num;
++}
++
+diff --git a/sound/soc/intel/common/sof-function-topology-lib.h b/sound/soc/intel/common/sof-function-topology-lib.h
+new file mode 100644
+index 00000000000000..e7d0c39d07883c
+--- /dev/null
++++ b/sound/soc/intel/common/sof-function-topology-lib.h
+@@ -0,0 +1,15 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * soc-acpi-intel-get-tplg.h - get-tplg-files ops
++ *
++ * Copyright (c) 2025, Intel Corporation.
++ *
++ */
++
++#ifndef _SND_SOC_ACPI_INTEL_GET_TPLG_H
++#define _SND_SOC_ACPI_INTEL_GET_TPLG_H
++
++int sof_sdw_get_tplg_files(struct snd_soc_card *card, const struct snd_soc_acpi_mach *mach,
++			   const char *prefix, const char ***tplg_files);
++
++#endif
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 9c8f79e55ec5d8..624598c9e2df80 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -1209,11 +1209,11 @@ static int check_tplg_quirk_mask(struct snd_soc_acpi_mach *mach)
+ 	return 0;
+ }
+ 
+-static char *remove_file_ext(const char *tplg_filename)
++static char *remove_file_ext(struct device *dev, const char *tplg_filename)
+ {
+ 	char *filename, *tmp;
+ 
+-	filename = kstrdup(tplg_filename, GFP_KERNEL);
++	filename = devm_kstrdup(dev, tplg_filename, GFP_KERNEL);
+ 	if (!filename)
+ 		return NULL;
+ 
+@@ -1297,7 +1297,7 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
+ 		 */
+ 		if (!sof_pdata->tplg_filename) {
+ 			/* remove file extension if it exists */
+-			tplg_filename = remove_file_ext(mach->sof_tplg_filename);
++			tplg_filename = remove_file_ext(sdev->dev, mach->sof_tplg_filename);
+ 			if (!tplg_filename)
+ 				return NULL;
+ 
+diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
+index 3ae84c3b8e6dba..3deb6c11f13441 100644
+--- a/tools/arch/x86/include/asm/msr-index.h
++++ b/tools/arch/x86/include/asm/msr-index.h
+@@ -612,6 +612,7 @@
+ #define MSR_AMD64_OSVW_STATUS		0xc0010141
+ #define MSR_AMD_PPIN_CTL		0xc00102f0
+ #define MSR_AMD_PPIN			0xc00102f1
++#define MSR_AMD64_CPUID_FN_7		0xc0011002
+ #define MSR_AMD64_CPUID_FN_1		0xc0011004
+ #define MSR_AMD64_LS_CFG		0xc0011020
+ #define MSR_AMD64_DC_CFG		0xc0011022
+diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h
+index 5a37ccbec54fbc..f61a01dd7eb7c7 100644
+--- a/tools/include/linux/kallsyms.h
++++ b/tools/include/linux/kallsyms.h
+@@ -18,6 +18,7 @@ static inline const char *kallsyms_lookup(unsigned long addr,
+ 	return NULL;
+ }
+ 
++#ifdef HAVE_BACKTRACE_SUPPORT
+ #include <execinfo.h>
+ #include <stdlib.h>
+ static inline void print_ip_sym(const char *loglvl, unsigned long ip)
+@@ -30,5 +31,8 @@ static inline void print_ip_sym(const char *loglvl, unsigned long ip)
+ 
+ 	free(name);
+ }
++#else
++static inline void print_ip_sym(const char *loglvl, unsigned long ip) {}
++#endif
+ 
+ #endif
+diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
+index fda7589c50236c..0921939532c6c2 100644
+--- a/tools/testing/selftests/bpf/test_lru_map.c
++++ b/tools/testing/selftests/bpf/test_lru_map.c
+@@ -138,6 +138,18 @@ static int sched_next_online(int pid, int *next_to_try)
+ 	return ret;
+ }
+ 
++/* Derive target_free from map_size, same as bpf_common_lru_populate */
++static unsigned int __tgt_size(unsigned int map_size)
++{
++	return (map_size / nr_cpus) / 2;
++}
++
++/* Inverse of how bpf_common_lru_populate derives target_free from map_size. */
++static unsigned int __map_size(unsigned int tgt_free)
++{
++	return tgt_free * nr_cpus * 2;
++}
++
+ /* Size of the LRU map is 2
+  * Add key=1 (+1 key)
+  * Add key=2 (+1 key)
+@@ -231,11 +243,11 @@ static void test_lru_sanity0(int map_type, int map_flags)
+ 	printf("Pass\n");
+ }
+ 
+-/* Size of the LRU map is 1.5*tgt_free
+- * Insert 1 to tgt_free (+tgt_free keys)
+- * Lookup 1 to tgt_free/2
+- * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
+- * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
++/* Verify that unreferenced elements are recycled before referenced ones.
++ * Insert elements.
++ * Reference a subset of these.
++ * Insert more, enough to trigger recycling.
++ * Verify that unreferenced are recycled.
+  */
+ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
+ {
+@@ -257,7 +269,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
+ 	batch_size = tgt_free / 2;
+ 	assert(batch_size * 2 == tgt_free);
+ 
+-	map_size = tgt_free + batch_size;
++	map_size = __map_size(tgt_free) + batch_size;
+ 	lru_map_fd = create_map(map_type, map_flags, map_size);
+ 	assert(lru_map_fd != -1);
+ 
+@@ -266,13 +278,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
+ 
+ 	value[0] = 1234;
+ 
+-	/* Insert 1 to tgt_free (+tgt_free keys) */
+-	end_key = 1 + tgt_free;
++	/* Insert map_size - batch_size keys */
++	end_key = 1 + __map_size(tgt_free);
+ 	for (key = 1; key < end_key; key++)
+ 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ 					    BPF_NOEXIST));
+ 
+-	/* Lookup 1 to tgt_free/2 */
++	/* Lookup 1 to batch_size */
+ 	end_key = 1 + batch_size;
+ 	for (key = 1; key < end_key; key++) {
+ 		assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
+@@ -280,12 +292,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
+ 					    BPF_NOEXIST));
+ 	}
+ 
+-	/* Insert 1+tgt_free to 2*tgt_free
+-	 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
++	/* Insert another map_size - batch_size keys
++	 * Map will contain 1 to batch_size plus these latest, i.e.,
++	 * => previous 1+batch_size to map_size - batch_size will have been
+ 	 * removed by LRU
+ 	 */
+-	key = 1 + tgt_free;
+-	end_key = key + tgt_free;
++	key = 1 + __map_size(tgt_free);
++	end_key = key + __map_size(tgt_free);
+ 	for (; key < end_key; key++) {
+ 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ 					    BPF_NOEXIST));
+@@ -301,17 +314,8 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
+ 	printf("Pass\n");
+ }
+ 
+-/* Size of the LRU map 1.5 * tgt_free
+- * Insert 1 to tgt_free (+tgt_free keys)
+- * Update 1 to tgt_free/2
+- *   => The original 1 to tgt_free/2 will be removed due to
+- *      the LRU shrink process
+- * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
+- * Insert 1+tgt_free to tgt_free*3/2
+- * Insert 1+tgt_free*3/2 to tgt_free*5/2
+- *   => Key 1+tgt_free to tgt_free*3/2
+- *      will be removed from LRU because it has never
+- *      been lookup and ref bit is not set
++/* Verify that insertions exceeding map size will recycle the oldest.
++ * Verify that unreferenced elements are recycled before referenced.
+  */
+ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
+ {
+@@ -334,7 +338,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
+ 	batch_size = tgt_free / 2;
+ 	assert(batch_size * 2 == tgt_free);
+ 
+-	map_size = tgt_free + batch_size;
++	map_size = __map_size(tgt_free) + batch_size;
+ 	lru_map_fd = create_map(map_type, map_flags, map_size);
+ 	assert(lru_map_fd != -1);
+ 
+@@ -343,8 +347,8 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
+ 
+ 	value[0] = 1234;
+ 
+-	/* Insert 1 to tgt_free (+tgt_free keys) */
+-	end_key = 1 + tgt_free;
++	/* Insert map_size - batch_size keys */
++	end_key = 1 + __map_size(tgt_free);
+ 	for (key = 1; key < end_key; key++)
+ 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ 					    BPF_NOEXIST));
+@@ -357,8 +361,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
+ 	 * shrink the inactive list to get tgt_free
+ 	 * number of free nodes.
+ 	 *
+-	 * Hence, the oldest key 1 to tgt_free/2
+-	 * are removed from the LRU list.
++	 * Hence, the oldest key is removed from the LRU list.
+ 	 */
+ 	key = 1;
+ 	if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+@@ -370,8 +373,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
+ 					   BPF_EXIST));
+ 	}
+ 
+-	/* Re-insert 1 to tgt_free/2 again and do a lookup
+-	 * immeidately.
++	/* Re-insert 1 to batch_size again and do a lookup immediately.
+ 	 */
+ 	end_key = 1 + batch_size;
+ 	value[0] = 4321;
+@@ -387,17 +389,18 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
+ 
+ 	value[0] = 1234;
+ 
+-	/* Insert 1+tgt_free to tgt_free*3/2 */
+-	end_key = 1 + tgt_free + batch_size;
+-	for (key = 1 + tgt_free; key < end_key; key++)
++	/* Insert batch_size new elements */
++	key = 1 + __map_size(tgt_free);
++	end_key = key + batch_size;
++	for (; key < end_key; key++)
+ 		/* These newly added but not referenced keys will be
+ 		 * gone during the next LRU shrink.
+ 		 */
+ 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ 					    BPF_NOEXIST));
+ 
+-	/* Insert 1+tgt_free*3/2 to  tgt_free*5/2 */
+-	end_key = key + tgt_free;
++	/* Insert map_size - batch_size elements */
++	end_key += __map_size(tgt_free);
+ 	for (; key < end_key; key++) {
+ 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ 					    BPF_NOEXIST));
+@@ -413,12 +416,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
+ 	printf("Pass\n");
+ }
+ 
+-/* Size of the LRU map is 2*tgt_free
+- * It is to test the active/inactive list rotation
+- * Insert 1 to 2*tgt_free (+2*tgt_free keys)
+- * Lookup key 1 to tgt_free*3/2
+- * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
+- *  => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
++/* Test the active/inactive list rotation
++ *
++ * Fill the whole map, deplete the free list.
++ * Reference all except the last lru->target_free elements.
++ * Insert lru->target_free new elements. This triggers one shrink.
++ * Verify that the non-referenced elements are replaced.
+  */
+ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
+ {
+@@ -437,8 +440,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
+ 
+ 	assert(sched_next_online(0, &next_cpu) != -1);
+ 
+-	batch_size = tgt_free / 2;
+-	assert(batch_size * 2 == tgt_free);
++	batch_size = __tgt_size(tgt_free);
+ 
+ 	map_size = tgt_free * 2;
+ 	lru_map_fd = create_map(map_type, map_flags, map_size);
+@@ -449,23 +451,21 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
+ 
+ 	value[0] = 1234;
+ 
+-	/* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
+-	end_key = 1 + (2 * tgt_free);
++	/* Fill the map */
++	end_key = 1 + map_size;
+ 	for (key = 1; key < end_key; key++)
+ 		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ 					    BPF_NOEXIST));
+ 
+-	/* Lookup key 1 to tgt_free*3/2 */
+-	end_key = tgt_free + batch_size;
++	/* Reference all but the last batch_size */
++	end_key = 1 + map_size - batch_size;
+ 	for (key = 1; key < end_key; key++) {
+ 		assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
+ 		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ 					    BPF_NOEXIST));
+ 	}
+ 
+-	/* Add 1+2*tgt_free to tgt_free*5/2
+-	 * (+tgt_free/2 keys)
+-	 */
++	/* Insert new batch_size: replaces the non-referenced elements */
+ 	key = 2 * tgt_free + 1;
+ 	end_key = key + batch_size;
+ 	for (; key < end_key; key++) {
+@@ -500,7 +500,8 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
+ 		lru_map_fd = create_map(map_type, map_flags,
+ 					3 * tgt_free * nr_cpus);
+ 	else
+-		lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
++		lru_map_fd = create_map(map_type, map_flags,
++					3 * __map_size(tgt_free));
+ 	assert(lru_map_fd != -1);
+ 
+ 	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
+diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
+index c992e385159c0c..195360082d9498 100644
+--- a/tools/testing/selftests/net/forwarding/lib.sh
++++ b/tools/testing/selftests/net/forwarding/lib.sh
+@@ -48,7 +48,6 @@ declare -A NETIFS=(
+ : "${WAIT_TIME:=5}"
+ 
+ # Whether to pause on, respectively, after a failure and before cleanup.
+-: "${PAUSE_ON_FAIL:=no}"
+ : "${PAUSE_ON_CLEANUP:=no}"
+ 
+ # Whether to create virtual interfaces, and what netdevice type they should be.
+@@ -446,22 +445,6 @@ done
+ ##############################################################################
+ # Helpers
+ 
+-# Exit status to return at the end. Set in case one of the tests fails.
+-EXIT_STATUS=0
+-# Per-test return value. Clear at the beginning of each test.
+-RET=0
+-
+-ret_set_ksft_status()
+-{
+-	local ksft_status=$1; shift
+-	local msg=$1; shift
+-
+-	RET=$(ksft_status_merge $RET $ksft_status)
+-	if (( $? )); then
+-		retmsg=$msg
+-	fi
+-}
+-
+ # Whether FAILs should be interpreted as XFAILs. Internal.
+ FAIL_TO_XFAIL=
+ 
+@@ -535,102 +518,6 @@ xfail_on_veth()
+ 	fi
+ }
+ 
+-log_test_result()
+-{
+-	local test_name=$1; shift
+-	local opt_str=$1; shift
+-	local result=$1; shift
+-	local retmsg=$1; shift
+-
+-	printf "TEST: %-60s  [%s]\n" "$test_name $opt_str" "$result"
+-	if [[ $retmsg ]]; then
+-		printf "\t%s\n" "$retmsg"
+-	fi
+-}
+-
+-pause_on_fail()
+-{
+-	if [[ $PAUSE_ON_FAIL == yes ]]; then
+-		echo "Hit enter to continue, 'q' to quit"
+-		read a
+-		[[ $a == q ]] && exit 1
+-	fi
+-}
+-
+-handle_test_result_pass()
+-{
+-	local test_name=$1; shift
+-	local opt_str=$1; shift
+-
+-	log_test_result "$test_name" "$opt_str" " OK "
+-}
+-
+-handle_test_result_fail()
+-{
+-	local test_name=$1; shift
+-	local opt_str=$1; shift
+-
+-	log_test_result "$test_name" "$opt_str" FAIL "$retmsg"
+-	pause_on_fail
+-}
+-
+-handle_test_result_xfail()
+-{
+-	local test_name=$1; shift
+-	local opt_str=$1; shift
+-
+-	log_test_result "$test_name" "$opt_str" XFAIL "$retmsg"
+-	pause_on_fail
+-}
+-
+-handle_test_result_skip()
+-{
+-	local test_name=$1; shift
+-	local opt_str=$1; shift
+-
+-	log_test_result "$test_name" "$opt_str" SKIP "$retmsg"
+-}
+-
+-log_test()
+-{
+-	local test_name=$1
+-	local opt_str=$2
+-
+-	if [[ $# -eq 2 ]]; then
+-		opt_str="($opt_str)"
+-	fi
+-
+-	if ((RET == ksft_pass)); then
+-		handle_test_result_pass "$test_name" "$opt_str"
+-	elif ((RET == ksft_xfail)); then
+-		handle_test_result_xfail "$test_name" "$opt_str"
+-	elif ((RET == ksft_skip)); then
+-		handle_test_result_skip "$test_name" "$opt_str"
+-	else
+-		handle_test_result_fail "$test_name" "$opt_str"
+-	fi
+-
+-	EXIT_STATUS=$(ksft_exit_status_merge $EXIT_STATUS $RET)
+-	return $RET
+-}
+-
+-log_test_skip()
+-{
+-	RET=$ksft_skip retmsg= log_test "$@"
+-}
+-
+-log_test_xfail()
+-{
+-	RET=$ksft_xfail retmsg= log_test "$@"
+-}
+-
+-log_info()
+-{
+-	local msg=$1
+-
+-	echo "INFO: $msg"
+-}
+-
+ not()
+ {
+ 	"$@"
+diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
+index be8707bfb46e5f..bb4d2f8d50d672 100644
+--- a/tools/testing/selftests/net/lib.sh
++++ b/tools/testing/selftests/net/lib.sh
+@@ -6,6 +6,9 @@
+ 
+ : "${WAIT_TIMEOUT:=20}"
+ 
++# Whether to pause on after a failure.
++: "${PAUSE_ON_FAIL:=no}"
++
+ BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms
+ 
+ # Kselftest framework constants.
+@@ -17,6 +20,11 @@ ksft_skip=4
+ # namespace list created by setup_ns
+ NS_LIST=()
+ 
++# Exit status to return at the end. Set in case one of the tests fails.
++EXIT_STATUS=0
++# Per-test return value. Clear at the beginning of each test.
++RET=0
++
+ ##############################################################################
+ # Helpers
+ 
+@@ -233,3 +241,110 @@ tc_rule_handle_stats_get()
+ 	    | jq ".[] | select(.options.handle == $handle) | \
+ 		  .options.actions[0].stats$selector"
+ }
++
++ret_set_ksft_status()
++{
++	local ksft_status=$1; shift
++	local msg=$1; shift
++
++	RET=$(ksft_status_merge $RET $ksft_status)
++	if (( $? )); then
++		retmsg=$msg
++	fi
++}
++
++log_test_result()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++	local result=$1; shift
++	local retmsg=$1
++
++	printf "TEST: %-60s  [%s]\n" "$test_name $opt_str" "$result"
++	if [[ $retmsg ]]; then
++		printf "\t%s\n" "$retmsg"
++	fi
++}
++
++pause_on_fail()
++{
++	if [[ $PAUSE_ON_FAIL == yes ]]; then
++		echo "Hit enter to continue, 'q' to quit"
++		read a
++		[[ $a == q ]] && exit 1
++	fi
++}
++
++handle_test_result_pass()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++
++	log_test_result "$test_name" "$opt_str" " OK "
++}
++
++handle_test_result_fail()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++
++	log_test_result "$test_name" "$opt_str" FAIL "$retmsg"
++	pause_on_fail
++}
++
++handle_test_result_xfail()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++
++	log_test_result "$test_name" "$opt_str" XFAIL "$retmsg"
++	pause_on_fail
++}
++
++handle_test_result_skip()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++
++	log_test_result "$test_name" "$opt_str" SKIP "$retmsg"
++}
++
++log_test()
++{
++	local test_name=$1
++	local opt_str=$2
++
++	if [[ $# -eq 2 ]]; then
++		opt_str="($opt_str)"
++	fi
++
++	if ((RET == ksft_pass)); then
++		handle_test_result_pass "$test_name" "$opt_str"
++	elif ((RET == ksft_xfail)); then
++		handle_test_result_xfail "$test_name" "$opt_str"
++	elif ((RET == ksft_skip)); then
++		handle_test_result_skip "$test_name" "$opt_str"
++	else
++		handle_test_result_fail "$test_name" "$opt_str"
++	fi
++
++	EXIT_STATUS=$(ksft_exit_status_merge $EXIT_STATUS $RET)
++	return $RET
++}
++
++log_test_skip()
++{
++	RET=$ksft_skip retmsg= log_test "$@"
++}
++
++log_test_xfail()
++{
++	RET=$ksft_xfail retmsg= log_test "$@"
++}
++
++log_info()
++{
++	local msg=$1
++
++	echo "INFO: $msg"
++}
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index b99de3b5ffbc03..aba4078ae2250e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2557,6 +2557,8 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ 		r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
+ 		if (r)
+ 			goto out_unlock;
++
++		cond_resched();
+ 	}
+ 
+ 	kvm_handle_gfn_range(kvm, &pre_set_range);
+@@ -2565,6 +2567,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ 		r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
+ 				    GFP_KERNEL_ACCOUNT));
+ 		KVM_BUG_ON(r, kvm);
++		cond_resched();
+ 	}
+ 
+ 	kvm_handle_gfn_range(kvm, &post_set_range);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-07-14 16:20 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-07-14 16:20 UTC (permalink / raw
  To: gentoo-commits

commit:     b573dbd49b00e26ea910847439da70cc311db1f7
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Jul 14 16:20:17 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Jul 14 16:20:17 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b573dbd4

Linux patch 6.12.38

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |  4 ++++
 1037_linux-6.12.38.patch | 25 +++++++++++++++++++++++++
 2 files changed, 29 insertions(+)

diff --git a/0000_README b/0000_README
index 21e46663..e2568c6a 100644
--- a/0000_README
+++ b/0000_README
@@ -191,6 +191,10 @@ Patch:  1036_linux-6.12.37.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.37
 
+Patch:  1037_linux-6.12.38.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.38
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1037_linux-6.12.38.patch b/1037_linux-6.12.38.patch
new file mode 100644
index 00000000..a6c3c36c
--- /dev/null
+++ b/1037_linux-6.12.38.patch
@@ -0,0 +1,25 @@
+diff --git a/Makefile b/Makefile
+index ca3225cbf130aa..28c9acdd9b3583 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 37
++SUBLEVEL = 38
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 8a740e92e483ed..b42307200e98f3 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -376,6 +376,7 @@ static bool amd_check_tsa_microcode(void)
+ 
+ 	p.ext_fam	= c->x86 - 0xf;
+ 	p.model		= c->x86_model;
++	p.ext_model	= c->x86_model >> 4;
+ 	p.stepping	= c->x86_stepping;
+ 
+ 	if (cpu_has(c, X86_FEATURE_ZEN3) ||


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-07-11  2:28 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-07-11  2:28 UTC (permalink / raw
  To: gentoo-commits

commit:     f3c3ddaaf9d4ee41cbc6e718543078624ce3111b
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 11 02:26:41 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Jul 11 02:26:41 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f3c3ddaa

Linux patch 6.12.37

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |     4 +
 1036_linux-6.12.37.patch | 19098 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 19102 insertions(+)

diff --git a/0000_README b/0000_README
index eda6f446..21e46663 100644
--- a/0000_README
+++ b/0000_README
@@ -187,6 +187,10 @@ Patch:  1035_linux-6.12.36.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.36
 
+Patch:  1036_linux-6.12.37.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.37
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1036_linux-6.12.37.patch b/1036_linux-6.12.37.patch
new file mode 100644
index 00000000..5f9ac7e0
--- /dev/null
+++ b/1036_linux-6.12.37.patch
@@ -0,0 +1,19098 @@
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index 6a1acabb29d85f..53755b2021ed01 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -523,6 +523,7 @@ What:		/sys/devices/system/cpu/vulnerabilities
+ 		/sys/devices/system/cpu/vulnerabilities/spectre_v1
+ 		/sys/devices/system/cpu/vulnerabilities/spectre_v2
+ 		/sys/devices/system/cpu/vulnerabilities/srbds
++		/sys/devices/system/cpu/vulnerabilities/tsa
+ 		/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ Date:		January 2018
+ Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
+diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
+index 5fa6655aee8409..16f17e91ee4969 100644
+--- a/Documentation/ABI/testing/sysfs-driver-ufs
++++ b/Documentation/ABI/testing/sysfs-driver-ufs
+@@ -711,7 +711,7 @@ Description:	This file shows the thin provisioning type. This is one of
+ 
+ 		The file is read only.
+ 
+-What:		/sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resourse_count
++What:		/sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resource_count
+ Date:		February 2018
+ Contact:	Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
+ Description:	This file shows the total physical memory resources. This is
+diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+index 1302fd1b55e83c..6dba18dbb9abc8 100644
+--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+@@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
+ combination with a microcode update. The microcode clears the affected CPU
+ buffers when the VERW instruction is executed.
+ 
+-Kernel reuses the MDS function to invoke the buffer clearing:
+-
+-	mds_clear_cpu_buffers()
++Kernel does the buffer clearing with x86_clear_cpu_buffers().
+ 
+ On MDS affected CPUs, the kernel already invokes CPU buffer clear on
+ kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index b5cb3614855413..f402bbaccc8aa3 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -6993,6 +6993,19 @@
+ 			having this key zero'ed is acceptable. E.g. in testing
+ 			scenarios.
+ 
++	tsa=		[X86] Control mitigation for Transient Scheduler
++			Attacks on AMD CPUs. Search the following in your
++			favourite search engine for more details:
++
++			"Technical guidance for mitigating transient scheduler
++			attacks".
++
++			off		- disable the mitigation
++			on		- enable the mitigation (default)
++			user		- mitigate only user/kernel transitions
++			vm		- mitigate only guest/host transitions
++
++
+ 	tsc=		Disable clocksource stability checks for TSC.
+ 			Format: <string>
+ 			[x86] reliable: mark tsc clocksource as reliable, this
+diff --git a/Documentation/arch/x86/mds.rst b/Documentation/arch/x86/mds.rst
+index 5a2e6c0ef04a53..3518671e1a8503 100644
+--- a/Documentation/arch/x86/mds.rst
++++ b/Documentation/arch/x86/mds.rst
+@@ -93,7 +93,7 @@ enters a C-state.
+ 
+ The kernel provides a function to invoke the buffer clearing:
+ 
+-    mds_clear_cpu_buffers()
++    x86_clear_cpu_buffers()
+ 
+ Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
+ Other than CFLAGS.ZF, this macro doesn't clobber any registers.
+@@ -185,9 +185,9 @@ Mitigation points
+    idle clearing would be a window dressing exercise and is therefore not
+    activated.
+ 
+-   The invocation is controlled by the static key mds_idle_clear which is
+-   switched depending on the chosen mitigation mode and the SMT state of
+-   the system.
++   The invocation is controlled by the static key cpu_buf_idle_clear which is
++   switched depending on the chosen mitigation mode and the SMT state of the
++   system.
+ 
+    The buffer clear is only invoked before entering the C-State to prevent
+    that stale data from the idling CPU from spilling to the Hyper-Thread
+diff --git a/Documentation/core-api/symbol-namespaces.rst b/Documentation/core-api/symbol-namespaces.rst
+index d1154eb438101a..cca94469fa4149 100644
+--- a/Documentation/core-api/symbol-namespaces.rst
++++ b/Documentation/core-api/symbol-namespaces.rst
+@@ -28,6 +28,9 @@ kernel. As of today, modules that make use of symbols exported into namespaces,
+ are required to import the namespace. Otherwise the kernel will, depending on
+ its configuration, reject loading the module or warn about a missing import.
+ 
++Additionally, it is possible to put symbols into a module namespace, strictly
++limiting which modules are allowed to use these symbols.
++
+ 2. How to define Symbol Namespaces
+ ==================================
+ 
+@@ -84,6 +87,22 @@ unit as preprocessor statement. The above example would then read::
+ within the corresponding compilation unit before any EXPORT_SYMBOL macro is
+ used.
+ 
++2.3 Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro
++===================================================
++
++Symbols exported using this macro are put into a module namespace. This
++namespace cannot be imported.
++
++The macro takes a comma separated list of module names, allowing only those
++modules to access this symbol. Simple tail-globs are supported.
++
++For example:
++
++  EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")
++
++will limit usage of this symbol to modules whoes name matches the given
++patterns.
++
+ 3. How to use Symbols exported in Namespaces
+ ============================================
+ 
+@@ -155,3 +174,6 @@ in-tree modules::
+ You can also run nsdeps for external module builds. A typical usage is::
+ 
+ 	$ make -C <path_to_kernel_src> M=$PWD nsdeps
++
++Note: it will happily generate an import statement for the module namespace;
++which will not work and generates build and runtime failures.
+diff --git a/Makefile b/Makefile
+index 7012820523fff4..ca3225cbf130aa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 36
++SUBLEVEL = 37
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
+index 5988a4eb6efaa0..cb78ce7af0b380 100644
+--- a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
++++ b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
+@@ -71,7 +71,7 @@ hpm1: usb-pd@3f {
+  */
+ &port00 {
+ 	bus-range = <1 1>;
+-	wifi0: network@0,0 {
++	wifi0: wifi@0,0 {
+ 		compatible = "pci14e4,4425";
+ 		reg = <0x10000 0x0 0x0 0x0 0x0>;
+ 		/* To be filled by the loader */
+diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+index edde21972f5ac1..bd91624bd3bfc9 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+@@ -68,18 +68,18 @@ cpus {
+ 		#address-cells = <2>;
+ 		#size-cells = <0>;
+ 
+-		CPU0: cpu@0 {
++		cpu0: cpu@0 {
+ 			device_type = "cpu";
+ 			compatible = "arm,cortex-a520";
+ 			reg = <0 0>;
+ 
+ 			clocks = <&cpufreq_hw 0>;
+ 
+-			power-domains = <&CPU_PD0>;
++			power-domains = <&cpu_pd0>;
+ 			power-domain-names = "psci";
+ 
+ 			enable-method = "psci";
+-			next-level-cache = <&L2_0>;
++			next-level-cache = <&l2_0>;
+ 			capacity-dmips-mhz = <1024>;
+ 			dynamic-power-coefficient = <100>;
+ 
+@@ -87,13 +87,13 @@ CPU0: cpu@0 {
+ 
+ 			#cooling-cells = <2>;
+ 
+-			L2_0: l2-cache {
++			l2_0: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 
+-				L3_0: l3-cache {
++				l3_0: l3-cache {
+ 					compatible = "cache";
+ 					cache-level = <3>;
+ 					cache-unified;
+@@ -101,18 +101,18 @@ L3_0: l3-cache {
+ 			};
+ 		};
+ 
+-		CPU1: cpu@100 {
++		cpu1: cpu@100 {
+ 			device_type = "cpu";
+ 			compatible = "arm,cortex-a520";
+ 			reg = <0 0x100>;
+ 
+ 			clocks = <&cpufreq_hw 0>;
+ 
+-			power-domains = <&CPU_PD1>;
++			power-domains = <&cpu_pd1>;
+ 			power-domain-names = "psci";
+ 
+ 			enable-method = "psci";
+-			next-level-cache = <&L2_0>;
++			next-level-cache = <&l2_0>;
+ 			capacity-dmips-mhz = <1024>;
+ 			dynamic-power-coefficient = <100>;
+ 
+@@ -121,18 +121,18 @@ CPU1: cpu@100 {
+ 			#cooling-cells = <2>;
+ 		};
+ 
+-		CPU2: cpu@200 {
++		cpu2: cpu@200 {
+ 			device_type = "cpu";
+ 			compatible = "arm,cortex-a720";
+ 			reg = <0 0x200>;
+ 
+ 			clocks = <&cpufreq_hw 3>;
+ 
+-			power-domains = <&CPU_PD2>;
++			power-domains = <&cpu_pd2>;
+ 			power-domain-names = "psci";
+ 
+ 			enable-method = "psci";
+-			next-level-cache = <&L2_200>;
++			next-level-cache = <&l2_200>;
+ 			capacity-dmips-mhz = <1792>;
+ 			dynamic-power-coefficient = <238>;
+ 
+@@ -140,46 +140,53 @@ CPU2: cpu@200 {
+ 
+ 			#cooling-cells = <2>;
+ 
+-			L2_200: l2-cache {
++			l2_200: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU3: cpu@300 {
++		cpu3: cpu@300 {
+ 			device_type = "cpu";
+ 			compatible = "arm,cortex-a720";
+ 			reg = <0 0x300>;
+ 
+ 			clocks = <&cpufreq_hw 3>;
+ 
+-			power-domains = <&CPU_PD3>;
++			power-domains = <&cpu_pd3>;
+ 			power-domain-names = "psci";
+ 
+ 			enable-method = "psci";
+-			next-level-cache = <&L2_200>;
++			next-level-cache = <&l2_300>;
+ 			capacity-dmips-mhz = <1792>;
+ 			dynamic-power-coefficient = <238>;
+ 
+ 			qcom,freq-domain = <&cpufreq_hw 3>;
+ 
+ 			#cooling-cells = <2>;
++
++			l2_300: l2-cache {
++				compatible = "cache";
++				cache-level = <2>;
++				cache-unified;
++				next-level-cache = <&l3_0>;
++			};
+ 		};
+ 
+-		CPU4: cpu@400 {
++		cpu4: cpu@400 {
+ 			device_type = "cpu";
+ 			compatible = "arm,cortex-a720";
+ 			reg = <0 0x400>;
+ 
+ 			clocks = <&cpufreq_hw 3>;
+ 
+-			power-domains = <&CPU_PD4>;
++			power-domains = <&cpu_pd4>;
+ 			power-domain-names = "psci";
+ 
+ 			enable-method = "psci";
+-			next-level-cache = <&L2_400>;
++			next-level-cache = <&l2_400>;
+ 			capacity-dmips-mhz = <1792>;
+ 			dynamic-power-coefficient = <238>;
+ 
+@@ -187,26 +194,26 @@ CPU4: cpu@400 {
+ 
+ 			#cooling-cells = <2>;
+ 
+-			L2_400: l2-cache {
++			l2_400: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU5: cpu@500 {
++		cpu5: cpu@500 {
+ 			device_type = "cpu";
+ 			compatible = "arm,cortex-a720";
+ 			reg = <0 0x500>;
+ 
+ 			clocks = <&cpufreq_hw 1>;
+ 
+-			power-domains = <&CPU_PD5>;
++			power-domains = <&cpu_pd5>;
+ 			power-domain-names = "psci";
+ 
+ 			enable-method = "psci";
+-			next-level-cache = <&L2_500>;
++			next-level-cache = <&l2_500>;
+ 			capacity-dmips-mhz = <1792>;
+ 			dynamic-power-coefficient = <238>;
+ 
+@@ -214,26 +221,26 @@ CPU5: cpu@500 {
+ 
+ 			#cooling-cells = <2>;
+ 
+-			L2_500: l2-cache {
++			l2_500: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU6: cpu@600 {
++		cpu6: cpu@600 {
+ 			device_type = "cpu";
+ 			compatible = "arm,cortex-a720";
+ 			reg = <0 0x600>;
+ 
+ 			clocks = <&cpufreq_hw 1>;
+ 
+-			power-domains = <&CPU_PD6>;
++			power-domains = <&cpu_pd6>;
+ 			power-domain-names = "psci";
+ 
+ 			enable-method = "psci";
+-			next-level-cache = <&L2_600>;
++			next-level-cache = <&l2_600>;
+ 			capacity-dmips-mhz = <1792>;
+ 			dynamic-power-coefficient = <238>;
+ 
+@@ -241,26 +248,26 @@ CPU6: cpu@600 {
+ 
+ 			#cooling-cells = <2>;
+ 
+-			L2_600: l2-cache {
++			l2_600: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU7: cpu@700 {
++		cpu7: cpu@700 {
+ 			device_type = "cpu";
+ 			compatible = "arm,cortex-x4";
+ 			reg = <0 0x700>;
+ 
+ 			clocks = <&cpufreq_hw 2>;
+ 
+-			power-domains = <&CPU_PD7>;
++			power-domains = <&cpu_pd7>;
+ 			power-domain-names = "psci";
+ 
+ 			enable-method = "psci";
+-			next-level-cache = <&L2_700>;
++			next-level-cache = <&l2_700>;
+ 			capacity-dmips-mhz = <1894>;
+ 			dynamic-power-coefficient = <588>;
+ 
+@@ -268,46 +275,46 @@ CPU7: cpu@700 {
+ 
+ 			#cooling-cells = <2>;
+ 
+-			L2_700: l2-cache {
++			l2_700: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+ 		cpu-map {
+ 			cluster0 {
+ 				core0 {
+-					cpu = <&CPU0>;
++					cpu = <&cpu0>;
+ 				};
+ 
+ 				core1 {
+-					cpu = <&CPU1>;
++					cpu = <&cpu1>;
+ 				};
+ 
+ 				core2 {
+-					cpu = <&CPU2>;
++					cpu = <&cpu2>;
+ 				};
+ 
+ 				core3 {
+-					cpu = <&CPU3>;
++					cpu = <&cpu3>;
+ 				};
+ 
+ 				core4 {
+-					cpu = <&CPU4>;
++					cpu = <&cpu4>;
+ 				};
+ 
+ 				core5 {
+-					cpu = <&CPU5>;
++					cpu = <&cpu5>;
+ 				};
+ 
+ 				core6 {
+-					cpu = <&CPU6>;
++					cpu = <&cpu6>;
+ 				};
+ 
+ 				core7 {
+-					cpu = <&CPU7>;
++					cpu = <&cpu7>;
+ 				};
+ 			};
+ 		};
+@@ -315,7 +322,7 @@ core7 {
+ 		idle-states {
+ 			entry-method = "psci";
+ 
+-			SILVER_CPU_SLEEP_0: cpu-sleep-0-0 {
++			silver_cpu_sleep_0: cpu-sleep-0-0 {
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "silver-rail-power-collapse";
+ 				arm,psci-suspend-param = <0x40000004>;
+@@ -325,7 +332,7 @@ SILVER_CPU_SLEEP_0: cpu-sleep-0-0 {
+ 				local-timer-stop;
+ 			};
+ 
+-			GOLD_CPU_SLEEP_0: cpu-sleep-1-0 {
++			gold_cpu_sleep_0: cpu-sleep-1-0 {
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "gold-rail-power-collapse";
+ 				arm,psci-suspend-param = <0x40000004>;
+@@ -335,7 +342,7 @@ GOLD_CPU_SLEEP_0: cpu-sleep-1-0 {
+ 				local-timer-stop;
+ 			};
+ 
+-			GOLD_PLUS_CPU_SLEEP_0: cpu-sleep-2-0 {
++			gold_plus_cpu_sleep_0: cpu-sleep-2-0 {
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "gold-plus-rail-power-collapse";
+ 				arm,psci-suspend-param = <0x40000004>;
+@@ -347,7 +354,7 @@ GOLD_PLUS_CPU_SLEEP_0: cpu-sleep-2-0 {
+ 		};
+ 
+ 		domain-idle-states {
+-			CLUSTER_SLEEP_0: cluster-sleep-0 {
++			cluster_sleep_0: cluster-sleep-0 {
+ 				compatible = "domain-idle-state";
+ 				arm,psci-suspend-param = <0x41000044>;
+ 				entry-latency-us = <750>;
+@@ -355,7 +362,7 @@ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ 				min-residency-us = <9144>;
+ 			};
+ 
+-			CLUSTER_SLEEP_1: cluster-sleep-1 {
++			cluster_sleep_1: cluster-sleep-1 {
+ 				compatible = "domain-idle-state";
+ 				arm,psci-suspend-param = <0x4100c344>;
+ 				entry-latency-us = <2800>;
+@@ -411,58 +418,58 @@ psci {
+ 		compatible = "arm,psci-1.0";
+ 		method = "smc";
+ 
+-		CPU_PD0: power-domain-cpu0 {
++		cpu_pd0: power-domain-cpu0 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&SILVER_CPU_SLEEP_0>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&silver_cpu_sleep_0>;
+ 		};
+ 
+-		CPU_PD1: power-domain-cpu1 {
++		cpu_pd1: power-domain-cpu1 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&SILVER_CPU_SLEEP_0>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&silver_cpu_sleep_0>;
+ 		};
+ 
+-		CPU_PD2: power-domain-cpu2 {
++		cpu_pd2: power-domain-cpu2 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&SILVER_CPU_SLEEP_0>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&gold_cpu_sleep_0>;
+ 		};
+ 
+-		CPU_PD3: power-domain-cpu3 {
++		cpu_pd3: power-domain-cpu3 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&GOLD_CPU_SLEEP_0>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&gold_cpu_sleep_0>;
+ 		};
+ 
+-		CPU_PD4: power-domain-cpu4 {
++		cpu_pd4: power-domain-cpu4 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&GOLD_CPU_SLEEP_0>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&gold_cpu_sleep_0>;
+ 		};
+ 
+-		CPU_PD5: power-domain-cpu5 {
++		cpu_pd5: power-domain-cpu5 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&GOLD_CPU_SLEEP_0>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&gold_cpu_sleep_0>;
+ 		};
+ 
+-		CPU_PD6: power-domain-cpu6 {
++		cpu_pd6: power-domain-cpu6 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&GOLD_CPU_SLEEP_0>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&gold_cpu_sleep_0>;
+ 		};
+ 
+-		CPU_PD7: power-domain-cpu7 {
++		cpu_pd7: power-domain-cpu7 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&GOLD_PLUS_CPU_SLEEP_0>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&gold_plus_cpu_sleep_0>;
+ 		};
+ 
+-		CLUSTER_PD: power-domain-cluster {
++		cluster_pd: power-domain-cluster {
+ 			#power-domain-cells = <0>;
+-			domain-idle-states = <&CLUSTER_SLEEP_0>,
+-					     <&CLUSTER_SLEEP_1>;
++			domain-idle-states = <&cluster_sleep_0>,
++					     <&cluster_sleep_1>;
+ 		};
+ 	};
+ 
+@@ -5233,7 +5240,7 @@ apps_rsc: rsc@17a00000 {
+ 				     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ 
+-			power-domains = <&CLUSTER_PD>;
++			power-domains = <&cluster_pd>;
+ 
+ 			qcom,tcs-offset = <0xd00>;
+ 			qcom,drv-id = <2>;
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+index 044a2f1432fe32..2a504a449b0bb8 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+@@ -419,6 +419,7 @@ vreg_l12b_1p2: ldo12 {
+ 			regulator-min-microvolt = <1200000>;
+ 			regulator-max-microvolt = <1200000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l13b_3p0: ldo13 {
+@@ -440,6 +441,7 @@ vreg_l15b_1p8: ldo15 {
+ 			regulator-min-microvolt = <1800000>;
+ 			regulator-max-microvolt = <1800000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l16b_2p9: ldo16 {
+diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+index 68b04e56ae5623..5a15a956702a6b 100644
+--- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+@@ -62,8 +62,7 @@ phy0: ethernet-phy@0 {
+ 		compatible = "ethernet-phy-id0022.1640",
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		reg = <0>;
+-		interrupt-parent = <&gpio2>;
+-		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio2 11 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi
+index 8c9da8b4bd60bf..191b051ecfd458 100644
+--- a/arch/arm64/boot/dts/renesas/cat875.dtsi
++++ b/arch/arm64/boot/dts/renesas/cat875.dtsi
+@@ -25,8 +25,7 @@ phy0: ethernet-phy@0 {
+ 		compatible = "ethernet-phy-id001c.c915",
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		reg = <0>;
+-		interrupt-parent = <&gpio2>;
+-		interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/condor-common.dtsi b/arch/arm64/boot/dts/renesas/condor-common.dtsi
+index 8b7c0c34eadce5..b2d99dfaa0cdf1 100644
+--- a/arch/arm64/boot/dts/renesas/condor-common.dtsi
++++ b/arch/arm64/boot/dts/renesas/condor-common.dtsi
+@@ -166,8 +166,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio4>;
+-		interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio4 23 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/draak.dtsi b/arch/arm64/boot/dts/renesas/draak.dtsi
+index 6f133f54ded54e..402112a37d75a8 100644
+--- a/arch/arm64/boot/dts/renesas/draak.dtsi
++++ b/arch/arm64/boot/dts/renesas/draak.dtsi
+@@ -247,8 +247,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio5>;
+-		interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio5 19 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio5 18 GPIO_ACTIVE_LOW>;
+ 		/*
+ 		 * TX clock internal delay mode is required for reliable
+diff --git a/arch/arm64/boot/dts/renesas/ebisu.dtsi b/arch/arm64/boot/dts/renesas/ebisu.dtsi
+index cba2fde9dd3688..1aedd093fb41bf 100644
+--- a/arch/arm64/boot/dts/renesas/ebisu.dtsi
++++ b/arch/arm64/boot/dts/renesas/ebisu.dtsi
+@@ -314,8 +314,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio2>;
+-		interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+ 		/*
+ 		 * TX clock internal delay mode is required for reliable
+diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
+index ad898c6db4e62d..4113710d55226d 100644
+--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
++++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
+@@ -27,8 +27,7 @@ phy0: ethernet-phy@0 {
+ 		compatible = "ethernet-phy-id001c.c915",
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		reg = <0>;
+-		interrupt-parent = <&gpio2>;
+-		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio2 11 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts b/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
+index 0608dce92e4059..7dd9e13cf00744 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
+@@ -111,8 +111,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio1>;
+-		interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio1 17 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
+index e36999e91af533..0a103f93b14d71 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
+@@ -117,8 +117,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio1>;
+-		interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio1 17 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts b/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts
+index 77d22df25fffac..a8a20c748ffcd1 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts
+@@ -124,8 +124,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio4>;
+-		interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio4 23 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
+index 63db822e5f4662..6bd580737f25d3 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
++++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
+@@ -31,8 +31,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio4>;
+-		interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio4 16 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi
+index 33c1015e9ab38e..5d38669ed1ec34 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi
+@@ -60,8 +60,7 @@ mdio {
+ 				u101: ethernet-phy@1 {
+ 					reg = <1>;
+ 					compatible = "ethernet-phy-ieee802.3-c45";
+-					interrupt-parent = <&gpio3>;
+-					interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
++					interrupts-extended = <&gpio3 10 IRQ_TYPE_LEVEL_LOW>;
+ 				};
+ 			};
+ 		};
+@@ -78,8 +77,7 @@ mdio {
+ 				u201: ethernet-phy@2 {
+ 					reg = <2>;
+ 					compatible = "ethernet-phy-ieee802.3-c45";
+-					interrupt-parent = <&gpio3>;
+-					interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
++					interrupts-extended = <&gpio3 11 IRQ_TYPE_LEVEL_LOW>;
+ 				};
+ 			};
+ 		};
+@@ -96,8 +94,7 @@ mdio {
+ 				u301: ethernet-phy@3 {
+ 					reg = <3>;
+ 					compatible = "ethernet-phy-ieee802.3-c45";
+-					interrupt-parent = <&gpio3>;
+-					interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
++					interrupts-extended = <&gpio3 9 IRQ_TYPE_LEVEL_LOW>;
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
+index fa910b85859e99..5d71d52f9c6547 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
++++ b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
+@@ -197,8 +197,7 @@ mdio {
+ 				ic99: ethernet-phy@1 {
+ 					reg = <1>;
+ 					compatible = "ethernet-phy-ieee802.3-c45";
+-					interrupt-parent = <&gpio3>;
+-					interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
++					interrupts-extended = <&gpio3 10 IRQ_TYPE_LEVEL_LOW>;
+ 				};
+ 			};
+ 		};
+@@ -216,8 +215,7 @@ mdio {
+ 				ic102: ethernet-phy@2 {
+ 					reg = <2>;
+ 					compatible = "ethernet-phy-ieee802.3-c45";
+-					interrupt-parent = <&gpio3>;
+-					interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
++					interrupts-extended = <&gpio3 11 IRQ_TYPE_LEVEL_LOW>;
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
+index 50a428572d9bd9..48befde389376b 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
++++ b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
+@@ -7,71 +7,10 @@
+ 
+ /dts-v1/;
+ #include "r8a779g2.dtsi"
+-#include "white-hawk-cpu-common.dtsi"
+-#include "white-hawk-common.dtsi"
++#include "white-hawk-single.dtsi"
+ 
+ / {
+ 	model = "Renesas White Hawk Single board based on r8a779g2";
+ 	compatible = "renesas,white-hawk-single", "renesas,r8a779g2",
+ 		     "renesas,r8a779g0";
+ };
+-
+-&hscif0 {
+-	uart-has-rtscts;
+-};
+-
+-&hscif0_pins {
+-	groups = "hscif0_data", "hscif0_ctrl";
+-	function = "hscif0";
+-};
+-
+-&pfc {
+-	tsn0_pins: tsn0 {
+-		mux {
+-			groups = "tsn0_link", "tsn0_mdio", "tsn0_rgmii",
+-				 "tsn0_txcrefclk";
+-			function = "tsn0";
+-		};
+-
+-		link {
+-			groups = "tsn0_link";
+-			bias-disable;
+-		};
+-
+-		mdio {
+-			groups = "tsn0_mdio";
+-			drive-strength = <24>;
+-			bias-disable;
+-		};
+-
+-		rgmii {
+-			groups = "tsn0_rgmii";
+-			drive-strength = <24>;
+-			bias-disable;
+-		};
+-	};
+-};
+-
+-&tsn0 {
+-	pinctrl-0 = <&tsn0_pins>;
+-	pinctrl-names = "default";
+-	phy-mode = "rgmii";
+-	phy-handle = <&phy3>;
+-	status = "okay";
+-
+-	mdio {
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-
+-		reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
+-		reset-post-delay-us = <4000>;
+-
+-		phy3: ethernet-phy@0 {
+-			compatible = "ethernet-phy-id002b.0980",
+-				     "ethernet-phy-ieee802.3-c22";
+-			reg = <0>;
+-			interrupt-parent = <&gpio4>;
+-			interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+-		};
+-	};
+-};
+diff --git a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
+index 9a1917b87f6138..f4d721a7f505c1 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
++++ b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
+@@ -175,8 +175,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio7>;
+-		interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio7 5 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+index 83f5642d0d35c2..502d9f17bf16d0 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+@@ -102,8 +102,7 @@ phy0: ethernet-phy@7 {
+ 		compatible = "ethernet-phy-id0022.1640",
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		reg = <7>;
+-		interrupt-parent = <&irqc>;
+-		interrupts = <RZG2L_IRQ2 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&irqc RZG2L_IRQ2 IRQ_TYPE_LEVEL_LOW>;
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+@@ -130,8 +129,7 @@ phy1: ethernet-phy@7 {
+ 		compatible = "ethernet-phy-id0022.1640",
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		reg = <7>;
+-		interrupt-parent = <&irqc>;
+-		interrupts = <RZG2L_IRQ3 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&irqc RZG2L_IRQ3 IRQ_TYPE_LEVEL_LOW>;
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+index b4ef5ea8a9e345..de39311a77dc2a 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+@@ -82,8 +82,7 @@ phy0: ethernet-phy@7 {
+ 		compatible = "ethernet-phy-id0022.1640",
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		reg = <7>;
+-		interrupt-parent = <&irqc>;
+-		interrupts = <RZG2L_IRQ0 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&irqc RZG2L_IRQ0 IRQ_TYPE_LEVEL_LOW>;
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
+index 79443fb3f58103..1a6fd58bd3682a 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
+@@ -78,8 +78,7 @@ phy0: ethernet-phy@7 {
+ 		compatible = "ethernet-phy-id0022.1640",
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		reg = <7>;
+-		interrupt-parent = <&irqc>;
+-		interrupts = <RZG2L_IRQ2 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&irqc RZG2L_IRQ2 IRQ_TYPE_LEVEL_LOW>;
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+@@ -107,8 +106,7 @@ phy1: ethernet-phy@7 {
+ 		compatible = "ethernet-phy-id0022.1640",
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		reg = <7>;
+-		interrupt-parent = <&irqc>;
+-		interrupts = <RZG2L_IRQ7 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&irqc RZG2L_IRQ7 IRQ_TYPE_LEVEL_LOW>;
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+index 612cdc7efabbcc..d2d367c09abd4b 100644
+--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+@@ -98,8 +98,7 @@ &eth0 {
+ 
+ 	phy0: ethernet-phy@7 {
+ 		reg = <7>;
+-		interrupt-parent = <&pinctrl>;
+-		interrupts = <RZG2L_GPIO(12, 0) IRQ_TYPE_EDGE_FALLING>;
++		interrupts-extended = <&pinctrl RZG2L_GPIO(12, 0) IRQ_TYPE_EDGE_FALLING>;
+ 		rxc-skew-psec = <0>;
+ 		txc-skew-psec = <0>;
+ 		rxdv-skew-psec = <0>;
+@@ -124,8 +123,7 @@ &eth1 {
+ 
+ 	phy1: ethernet-phy@7 {
+ 		reg = <7>;
+-		interrupt-parent = <&pinctrl>;
+-		interrupts = <RZG2L_GPIO(12, 1) IRQ_TYPE_EDGE_FALLING>;
++		interrupts-extended = <&pinctrl RZG2L_GPIO(12, 1) IRQ_TYPE_EDGE_FALLING>;
+ 		rxc-skew-psec = <0>;
+ 		txc-skew-psec = <0>;
+ 		rxdv-skew-psec = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+index 1eb4883b321970..c5035232956a81 100644
+--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
++++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+@@ -353,8 +353,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio2>;
+-		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio2 11 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+index a2f66f91604849..4cf141a701c062 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+@@ -150,8 +150,7 @@ phy0: ethernet-phy@0 {
+ 			     "ethernet-phy-ieee802.3-c22";
+ 		rxc-skew-ps = <1500>;
+ 		reg = <0>;
+-		interrupt-parent = <&gpio2>;
+-		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
++		interrupts-extended = <&gpio2 11 IRQ_TYPE_LEVEL_LOW>;
+ 		reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
+index 3845b413bd24cd..69e4fddebd4e4f 100644
+--- a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
++++ b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
+@@ -167,8 +167,7 @@ avb0_phy: ethernet-phy@0 {
+ 				     "ethernet-phy-ieee802.3-c22";
+ 			rxc-skew-ps = <1500>;
+ 			reg = <0>;
+-			interrupt-parent = <&gpio7>;
+-			interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
++			interrupts-extended = <&gpio7 5 IRQ_TYPE_LEVEL_LOW>;
+ 			reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi
+index 595ec4ff4cdd01..ad94bf3f5e6c42 100644
+--- a/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi
++++ b/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi
+@@ -29,8 +29,7 @@ mdio {
+ 		avb1_phy: ethernet-phy@0 {
+ 			compatible = "ethernet-phy-ieee802.3-c45";
+ 			reg = <0>;
+-			interrupt-parent = <&gpio6>;
+-			interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
++			interrupts-extended = <&gpio6 3 IRQ_TYPE_LEVEL_LOW>;
+ 		};
+ 	};
+ };
+@@ -51,8 +50,7 @@ mdio {
+ 		avb2_phy: ethernet-phy@0 {
+ 			compatible = "ethernet-phy-ieee802.3-c45";
+ 			reg = <0>;
+-			interrupt-parent = <&gpio5>;
+-			interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
++			interrupts-extended = <&gpio5 4 IRQ_TYPE_LEVEL_LOW>;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi
+new file mode 100644
+index 00000000000000..976a3ab44e5a52
+--- /dev/null
++++ b/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi
+@@ -0,0 +1,77 @@
++// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
++/*
++ * Device Tree Source for the White Hawk Single board
++ *
++ * Copyright (C) 2023-2024 Glider bv
++ */
++
++#include "white-hawk-cpu-common.dtsi"
++#include "white-hawk-common.dtsi"
++
++/ {
++	model = "Renesas White Hawk Single board";
++	compatible = "renesas,white-hawk-single";
++
++	aliases {
++		ethernet3 = &tsn0;
++	};
++};
++
++&hscif0 {
++	uart-has-rtscts;
++};
++
++&hscif0_pins {
++	groups = "hscif0_data", "hscif0_ctrl";
++	function = "hscif0";
++};
++
++&pfc {
++	tsn0_pins: tsn0 {
++		mux {
++			groups = "tsn0_link", "tsn0_mdio", "tsn0_rgmii",
++				 "tsn0_txcrefclk";
++			function = "tsn0";
++		};
++
++		link {
++			groups = "tsn0_link";
++			bias-disable;
++		};
++
++		mdio {
++			groups = "tsn0_mdio";
++			drive-strength = <24>;
++			bias-disable;
++		};
++
++		rgmii {
++			groups = "tsn0_rgmii";
++			drive-strength = <24>;
++			bias-disable;
++		};
++	};
++};
++
++&tsn0 {
++	pinctrl-0 = <&tsn0_pins>;
++	pinctrl-names = "default";
++	phy-mode = "rgmii";
++	phy-handle = <&tsn0_phy>;
++	status = "okay";
++
++	mdio {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
++		reset-post-delay-us = <4000>;
++
++		tsn0_phy: ethernet-phy@0 {
++			compatible = "ethernet-phy-id002b.0980",
++				     "ethernet-phy-ieee802.3-c22";
++			reg = <0>;
++			interrupts-extended = <&gpio4 3 IRQ_TYPE_LEVEL_LOW>;
++		};
++	};
++};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index 257636d0d2cbb0..0a73218ea37b36 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -59,17 +59,7 @@ vcc3v3_sys: vcc3v3-sys {
+ 		vin-supply = <&vcc5v0_sys>;
+ 	};
+ 
+-	vcc5v0_host: vcc5v0-host-regulator {
+-		compatible = "regulator-fixed";
+-		gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
+-		pinctrl-names = "default";
+-		pinctrl-0 = <&vcc5v0_host_en>;
+-		regulator-name = "vcc5v0_host";
+-		regulator-always-on;
+-		vin-supply = <&vcc5v0_sys>;
+-	};
+-
+-	vcc5v0_sys: vcc5v0-sys {
++	vcc5v0_sys: regulator-vcc5v0-sys {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vcc5v0_sys";
+ 		regulator-always-on;
+@@ -509,10 +499,10 @@ pmic_int_l: pmic-int-l {
+ 		};
+ 	};
+ 
+-	usb2 {
+-		vcc5v0_host_en: vcc5v0-host-en {
++	usb {
++		cy3304_reset: cy3304-reset {
+ 			rockchip,pins =
+-			  <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
++			  <4 RK_PA3 RK_FUNC_GPIO &pcfg_output_high>;
+ 		};
+ 	};
+ 
+@@ -579,7 +569,6 @@ u2phy1_otg: otg-port {
+ 	};
+ 
+ 	u2phy1_host: host-port {
+-		phy-supply = <&vcc5v0_host>;
+ 		status = "okay";
+ 	};
+ };
+@@ -591,6 +580,29 @@ &usbdrd3_1 {
+ &usbdrd_dwc3_1 {
+ 	status = "okay";
+ 	dr_mode = "host";
++	pinctrl-names = "default";
++	pinctrl-0 = <&cy3304_reset>;
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	hub_2_0: hub@1 {
++		compatible = "usb4b4,6502", "usb4b4,6506";
++		reg = <1>;
++		peer-hub = <&hub_3_0>;
++		reset-gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
++		vdd-supply = <&vcc1v2_phy>;
++		vdd2-supply = <&vcc3v3_sys>;
++
++	};
++
++	hub_3_0: hub@2 {
++		compatible = "usb4b4,6500", "usb4b4,6504";
++		reg = <2>;
++		peer-hub = <&hub_2_0>;
++		reset-gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
++		vdd-supply = <&vcc1v2_phy>;
++		vdd2-supply = <&vcc3v3_sys>;
++	};
+ };
+ 
+ &usb_host1_ehci {
+diff --git a/arch/powerpc/include/uapi/asm/ioctls.h b/arch/powerpc/include/uapi/asm/ioctls.h
+index 2c145da3b774a1..b5211e413829a2 100644
+--- a/arch/powerpc/include/uapi/asm/ioctls.h
++++ b/arch/powerpc/include/uapi/asm/ioctls.h
+@@ -23,10 +23,10 @@
+ #define TCSETSW		_IOW('t', 21, struct termios)
+ #define TCSETSF		_IOW('t', 22, struct termios)
+ 
+-#define TCGETA		_IOR('t', 23, struct termio)
+-#define TCSETA		_IOW('t', 24, struct termio)
+-#define TCSETAW		_IOW('t', 25, struct termio)
+-#define TCSETAF		_IOW('t', 28, struct termio)
++#define TCGETA		0x40147417 /* _IOR('t', 23, struct termio) */
++#define TCSETA		0x80147418 /* _IOW('t', 24, struct termio) */
++#define TCSETAW		0x80147419 /* _IOW('t', 25, struct termio) */
++#define TCSETAF		0x8014741c /* _IOW('t', 28, struct termio) */
+ 
+ #define TCSBRK		_IO('t', 29)
+ #define TCXONC		_IO('t', 30)
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index b4006a4a112161..04d6a1e8ff9a22 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -162,9 +162,7 @@ endif
+ 
+ obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM)	+= tm.o
+ 
+-ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)$(CONFIG_PPC_BOOK3S),)
+ obj-y				+= ppc_save_regs.o
+-endif
+ 
+ obj-$(CONFIG_EPAPR_PARAVIRT)	+= epapr_paravirt.o epapr_hcalls.o
+ obj-$(CONFIG_KVM_GUEST)		+= kvm.o kvm_emul.o
+diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
+index e6fbaaf549562d..87d6559448039c 100644
+--- a/arch/riscv/kernel/cpu_ops_sbi.c
++++ b/arch/riscv/kernel/cpu_ops_sbi.c
+@@ -18,10 +18,10 @@ const struct cpu_operations cpu_ops_sbi;
+ 
+ /*
+  * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
+- * be invoked from multiple threads in parallel. Define a per cpu data
++ * be invoked from multiple threads in parallel. Define an array of boot data
+  * to handle that.
+  */
+-static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
++static struct sbi_hart_boot_data boot_data[NR_CPUS];
+ 
+ static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
+ 			      unsigned long priv)
+@@ -67,7 +67,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
+ 	unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
+ 	unsigned long hartid = cpuid_to_hartid_map(cpuid);
+ 	unsigned long hsm_data;
+-	struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
++	struct sbi_hart_boot_data *bdata = &boot_data[cpuid];
+ 
+ 	/* Make sure tidle is updated */
+ 	smp_mb();
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index 6f48a1073c6e81..ef44feb1a9daa0 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -105,6 +105,10 @@ static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev,
+ 	struct zpci_dev *zdev = to_zpci(pdev);
+ 	int rc;
+ 
++	/* The underlying device may have been disabled by the event */
++	if (!zdev_enabled(zdev))
++		return PCI_ERS_RESULT_NEED_RESET;
++
+ 	pr_info("%s: Unblocking device access for examination\n", pci_name(pdev));
+ 	rc = zpci_reset_load_store_blocked(zdev);
+ 	if (rc) {
+@@ -260,6 +264,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ 	struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+ 	struct pci_dev *pdev = NULL;
+ 	pci_ers_result_t ers_res;
++	u32 fh = 0;
++	int rc;
+ 
+ 	zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n",
+ 		 ccdf->fid, ccdf->fh, ccdf->pec);
+@@ -268,6 +274,15 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ 
+ 	if (zdev) {
+ 		mutex_lock(&zdev->state_lock);
++		rc = clp_refresh_fh(zdev->fid, &fh);
++		if (rc)
++			goto no_pdev;
++		if (!fh || ccdf->fh != fh) {
++			/* Ignore events with stale handles */
++			zpci_dbg(3, "err fid:%x, fh:%x (stale %x)\n",
++				 ccdf->fid, fh, ccdf->fh);
++			goto no_pdev;
++		}
+ 		zpci_update_fh(zdev, ccdf->fh);
+ 		if (zdev->zbus->bus)
+ 			pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 15425c9bdc2bcf..dfa334e3d1a033 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2760,6 +2760,15 @@ config MITIGATION_ITS
+ 	  disabled, mitigation cannot be enabled via cmdline.
+ 	  See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
+ 
++config MITIGATION_TSA
++	bool "Mitigate Transient Scheduler Attacks"
++	depends on CPU_SUP_AMD
++	default y
++	help
++	  Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
++	  security vulnerability on AMD CPUs which can lead to forwarding of
++	  invalid info to subsequent instructions and thus can affect their
++	  timing and thereby cause a leakage.
+ endif
+ 
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
+index 5b96249734ada1..b0d5ab951231cc 100644
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -33,20 +33,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
+ 
+ /*
+  * Define the VERW operand that is disguised as entry code so that
+- * it can be referenced with KPTI enabled. This ensure VERW can be
++ * it can be referenced with KPTI enabled. This ensures VERW can be
+  * used late in exit-to-user path after page tables are switched.
+  */
+ .pushsection .entry.text, "ax"
+ 
+ .align L1_CACHE_BYTES, 0xcc
+-SYM_CODE_START_NOALIGN(mds_verw_sel)
++SYM_CODE_START_NOALIGN(x86_verw_sel)
+ 	UNWIND_HINT_UNDEFINED
+ 	ANNOTATE_NOENDBR
+ 	.word __KERNEL_DS
+ .align L1_CACHE_BYTES, 0xcc
+-SYM_CODE_END(mds_verw_sel);
++SYM_CODE_END(x86_verw_sel);
+ /* For KVM */
+-EXPORT_SYMBOL_GPL(mds_verw_sel);
++EXPORT_SYMBOL_GPL(x86_verw_sel);
+ 
+ .popsection
+ 
+diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
+index aa30fd8cad7f52..b6099456477cd7 100644
+--- a/arch/x86/include/asm/cpu.h
++++ b/arch/x86/include/asm/cpu.h
+@@ -69,4 +69,16 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);
+ 
+ extern struct cpumask cpus_stop_mask;
+ 
++union zen_patch_rev {
++	struct {
++		__u32 rev	 : 8,
++		      stepping	 : 4,
++		      model	 : 4,
++		      __reserved : 4,
++		      ext_model	 : 4,
++		      ext_fam	 : 8;
++	};
++	__u32 ucode_rev;
++};
++
+ #endif /* _ASM_X86_CPU_H */
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 308e7d97135cf6..ef5749a0d8c24d 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -455,6 +455,7 @@
+ #define X86_FEATURE_NO_NESTED_DATA_BP	(20*32+ 0) /* No Nested Data Breakpoints */
+ #define X86_FEATURE_WRMSR_XX_BASE_NS	(20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
+ #define X86_FEATURE_LFENCE_RDTSC	(20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
++#define X86_FEATURE_VERW_CLEAR		(20*32+ 5) /* The memory form of VERW mitigates TSA */
+ #define X86_FEATURE_NULL_SEL_CLR_BASE	(20*32+ 6) /* Null Selector Clears Base */
+ #define X86_FEATURE_AUTOIBRS		(20*32+ 8) /* Automatic IBRS */
+ #define X86_FEATURE_NO_SMM_CTL_MSR	(20*32+ 9) /* SMM_CTL MSR is not present */
+@@ -477,6 +478,10 @@
+ #define X86_FEATURE_FAST_CPPC		(21*32 + 5) /* AMD Fast CPPC */
+ #define X86_FEATURE_INDIRECT_THUNK_ITS	(21*32 + 6) /* Use thunk for indirect branches in lower half of cacheline */
+ 
++#define X86_FEATURE_TSA_SQ_NO          (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
++#define X86_FEATURE_TSA_L1_NO          (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
++#define X86_FEATURE_CLEAR_CPU_BUF_VM   (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
++
+ /*
+  * BUG word(s)
+  */
+@@ -529,4 +534,5 @@
+ #define X86_BUG_IBPB_NO_RET	   	X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+ #define X86_BUG_ITS			X86_BUG(1*32 + 5) /* "its" CPU is affected by Indirect Target Selection */
+ #define X86_BUG_ITS_NATIVE_ONLY		X86_BUG(1*32 + 6) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
++#define X86_BUG_TSA			X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index 1c2db11a2c3cb9..2b75fe80fcb200 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
+ 
+ static __always_inline void native_safe_halt(void)
+ {
+-	mds_idle_clear_cpu_buffers();
++	x86_idle_clear_cpu_buffers();
+ 	asm volatile("sti; hlt": : :"memory");
+ }
+ 
+ static __always_inline void native_halt(void)
+ {
+-	mds_idle_clear_cpu_buffers();
++	x86_idle_clear_cpu_buffers();
+ 	asm volatile("hlt": : :"memory");
+ }
+ 
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index 3e4e85f71a6ad0..7f9a97c572fe29 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -44,8 +44,6 @@ static __always_inline void __monitorx(const void *eax, unsigned long ecx,
+ 
+ static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
+-	mds_idle_clear_cpu_buffers();
+-
+ 	/* "mwait %eax, %ecx;" */
+ 	asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ 		     :: "a" (eax), "c" (ecx));
+@@ -80,7 +78,7 @@ static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
+ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
+ 				     unsigned long ecx)
+ {
+-	/* No MDS buffer clear as this is AMD/HYGON only */
++	/* No need for TSA buffer clearing on AMD */
+ 
+ 	/* "mwaitx %eax, %ebx, %ecx;" */
+ 	asm volatile(".byte 0x0f, 0x01, 0xfb;"
+@@ -98,7 +96,7 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
+  */
+ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
+-	mds_idle_clear_cpu_buffers();
++
+ 	/* "mwait %eax, %ecx;" */
+ 	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+ 		     :: "a" (eax), "c" (ecx));
+@@ -116,21 +114,29 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+  */
+ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+ {
++	if (need_resched())
++		return;
++
++	x86_idle_clear_cpu_buffers();
++
+ 	if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
+ 		const void *addr = &current_thread_info()->flags;
+ 
+ 		alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
+ 		__monitor(addr, 0, 0);
+ 
+-		if (!need_resched()) {
+-			if (ecx & 1) {
+-				__mwait(eax, ecx);
+-			} else {
+-				__sti_mwait(eax, ecx);
+-				raw_local_irq_disable();
+-			}
++		if (need_resched())
++			goto out;
++
++		if (ecx & 1) {
++			__mwait(eax, ecx);
++		} else {
++			__sti_mwait(eax, ecx);
++			raw_local_irq_disable();
+ 		}
+ 	}
++
++out:
+ 	current_clr_polling();
+ }
+ 
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index f7bb0016d7d9e5..331f6a05535d4c 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -315,25 +315,31 @@
+ .endm
+ 
+ /*
+- * Macro to execute VERW instruction that mitigate transient data sampling
+- * attacks such as MDS. On affected systems a microcode update overloaded VERW
+- * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
+- *
++ * Macro to execute VERW insns that mitigate transient data sampling
++ * attacks such as MDS or TSA. On affected systems a microcode update
++ * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
++ * CFLAGS.ZF.
+  * Note: Only the memory operand variant of VERW clears the CPU buffers.
+  */
+-.macro CLEAR_CPU_BUFFERS
++.macro __CLEAR_CPU_BUFFERS feature
+ #ifdef CONFIG_X86_64
+-	ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
++	ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
+ #else
+ 	/*
+ 	 * In 32bit mode, the memory operand must be a %cs reference. The data
+ 	 * segments may not be usable (vm86 mode), and the stack segment may not
+ 	 * be flat (ESPFIX32).
+ 	 */
+-	ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
++	ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
+ #endif
+ .endm
+ 
++#define CLEAR_CPU_BUFFERS \
++	__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
++
++#define VM_CLEAR_CPU_BUFFERS \
++	__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
++
+ #ifdef CONFIG_X86_64
+ .macro CLEAR_BRANCH_HISTORY
+ 	ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
+@@ -582,24 +588,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+ 
+-DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
++DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
+ 
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+ 
+ DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+ 
+-extern u16 mds_verw_sel;
++extern u16 x86_verw_sel;
+ 
+ #include <asm/segment.h>
+ 
+ /**
+- * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
++ * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
+  *
+  * This uses the otherwise unused and obsolete VERW instruction in
+  * combination with microcode which triggers a CPU buffer flush when the
+  * instruction is executed.
+  */
+-static __always_inline void mds_clear_cpu_buffers(void)
++static __always_inline void x86_clear_cpu_buffers(void)
+ {
+ 	static const u16 ds = __KERNEL_DS;
+ 
+@@ -616,14 +622,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
+ }
+ 
+ /**
+- * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
++ * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
++ * and TSA vulnerabilities.
+  *
+  * Clear CPU buffers if the corresponding static key is enabled
+  */
+-static __always_inline void mds_idle_clear_cpu_buffers(void)
++static __always_inline void x86_idle_clear_cpu_buffers(void)
+ {
+-	if (static_branch_likely(&mds_idle_clear))
+-		mds_clear_cpu_buffers();
++	if (static_branch_likely(&cpu_buf_idle_clear))
++		x86_clear_cpu_buffers();
+ }
+ 
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index e432910859cb1a..8a740e92e483ed 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -368,6 +368,63 @@ static void bsp_determine_snp(struct cpuinfo_x86 *c)
+ #endif
+ }
+ 
++static bool amd_check_tsa_microcode(void)
++{
++	struct cpuinfo_x86 *c = &boot_cpu_data;
++	union zen_patch_rev p;
++	u32 min_rev = 0;
++
++	p.ext_fam	= c->x86 - 0xf;
++	p.model		= c->x86_model;
++	p.stepping	= c->x86_stepping;
++
++	if (cpu_has(c, X86_FEATURE_ZEN3) ||
++	    cpu_has(c, X86_FEATURE_ZEN4)) {
++		switch (p.ucode_rev >> 8) {
++		case 0xa0011:	min_rev = 0x0a0011d7; break;
++		case 0xa0012:	min_rev = 0x0a00123b; break;
++		case 0xa0082:	min_rev = 0x0a00820d; break;
++		case 0xa1011:	min_rev = 0x0a10114c; break;
++		case 0xa1012:	min_rev = 0x0a10124c; break;
++		case 0xa1081:	min_rev = 0x0a108109; break;
++		case 0xa2010:	min_rev = 0x0a20102e; break;
++		case 0xa2012:	min_rev = 0x0a201211; break;
++		case 0xa4041:	min_rev = 0x0a404108; break;
++		case 0xa5000:	min_rev = 0x0a500012; break;
++		case 0xa6012:	min_rev = 0x0a60120a; break;
++		case 0xa7041:	min_rev = 0x0a704108; break;
++		case 0xa7052:	min_rev = 0x0a705208; break;
++		case 0xa7080:	min_rev = 0x0a708008; break;
++		case 0xa70c0:	min_rev = 0x0a70c008; break;
++		case 0xaa002:	min_rev = 0x0aa00216; break;
++		default:
++			pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n",
++				 __func__, p.ucode_rev, c->microcode);
++			return false;
++		}
++	}
++
++	if (!min_rev)
++		return false;
++
++	return c->microcode >= min_rev;
++}
++
++static void tsa_init(struct cpuinfo_x86 *c)
++{
++	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
++		return;
++
++	if (cpu_has(c, X86_FEATURE_ZEN3) ||
++	    cpu_has(c, X86_FEATURE_ZEN4)) {
++		if (amd_check_tsa_microcode())
++			setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
++	} else {
++		setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
++		setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
++	}
++}
++
+ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ {
+ 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
+@@ -475,6 +532,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ 	}
+ 
+ 	bsp_determine_snp(c);
++
++	tsa_init(c);
++
+ 	return;
+ 
+ warn:
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 0e9ab0b9a4942c..c2c7b76d953f77 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -50,6 +50,7 @@ static void __init l1d_flush_select_mitigation(void);
+ static void __init srso_select_mitigation(void);
+ static void __init gds_select_mitigation(void);
+ static void __init its_select_mitigation(void);
++static void __init tsa_select_mitigation(void);
+ 
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -122,9 +123,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ /* Control unconditional IBPB in switch_mm() */
+ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+ 
+-/* Control MDS CPU buffer clear before idling (halt, mwait) */
+-DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+-EXPORT_SYMBOL_GPL(mds_idle_clear);
++/* Control CPU buffer clear before idling (halt, mwait) */
++DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
++EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
+ 
+ /*
+  * Controls whether l1d flush based mitigations are enabled,
+@@ -185,6 +186,7 @@ void __init cpu_select_mitigations(void)
+ 	srso_select_mitigation();
+ 	gds_select_mitigation();
+ 	its_select_mitigation();
++	tsa_select_mitigation();
+ }
+ 
+ /*
+@@ -448,7 +450,7 @@ static void __init mmio_select_mitigation(void)
+ 	 * is required irrespective of SMT state.
+ 	 */
+ 	if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
+-		static_branch_enable(&mds_idle_clear);
++		static_branch_enable(&cpu_buf_idle_clear);
+ 
+ 	/*
+ 	 * Check if the system has the right microcode.
+@@ -2092,10 +2094,10 @@ static void update_mds_branch_idle(void)
+ 		return;
+ 
+ 	if (sched_smt_active()) {
+-		static_branch_enable(&mds_idle_clear);
++		static_branch_enable(&cpu_buf_idle_clear);
+ 	} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+ 		   (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
+-		static_branch_disable(&mds_idle_clear);
++		static_branch_disable(&cpu_buf_idle_clear);
+ 	}
+ }
+ 
+@@ -2103,6 +2105,94 @@ static void update_mds_branch_idle(void)
+ #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
+ #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
+ 
++#undef pr_fmt
++#define pr_fmt(fmt)	"Transient Scheduler Attacks: " fmt
++
++enum tsa_mitigations {
++	TSA_MITIGATION_NONE,
++	TSA_MITIGATION_UCODE_NEEDED,
++	TSA_MITIGATION_USER_KERNEL,
++	TSA_MITIGATION_VM,
++	TSA_MITIGATION_FULL,
++};
++
++static const char * const tsa_strings[] = {
++	[TSA_MITIGATION_NONE]		= "Vulnerable",
++	[TSA_MITIGATION_UCODE_NEEDED]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
++	[TSA_MITIGATION_USER_KERNEL]	= "Mitigation: Clear CPU buffers: user/kernel boundary",
++	[TSA_MITIGATION_VM]		= "Mitigation: Clear CPU buffers: VM",
++	[TSA_MITIGATION_FULL]		= "Mitigation: Clear CPU buffers",
++};
++
++static enum tsa_mitigations tsa_mitigation __ro_after_init =
++	IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE;
++
++static int __init tsa_parse_cmdline(char *str)
++{
++	if (!str)
++		return -EINVAL;
++
++	if (!strcmp(str, "off"))
++		tsa_mitigation = TSA_MITIGATION_NONE;
++	else if (!strcmp(str, "on"))
++		tsa_mitigation = TSA_MITIGATION_FULL;
++	else if (!strcmp(str, "user"))
++		tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
++	else if (!strcmp(str, "vm"))
++		tsa_mitigation = TSA_MITIGATION_VM;
++	else
++		pr_err("Ignoring unknown tsa=%s option.\n", str);
++
++	return 0;
++}
++early_param("tsa", tsa_parse_cmdline);
++
++static void __init tsa_select_mitigation(void)
++{
++	if (tsa_mitigation == TSA_MITIGATION_NONE)
++		return;
++
++	if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
++		tsa_mitigation = TSA_MITIGATION_NONE;
++		return;
++	}
++
++	if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
++		tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
++
++	switch (tsa_mitigation) {
++	case TSA_MITIGATION_USER_KERNEL:
++		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++		break;
++
++	case TSA_MITIGATION_VM:
++		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
++		break;
++
++	case TSA_MITIGATION_UCODE_NEEDED:
++		if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
++			goto out;
++
++		pr_notice("Forcing mitigation on in a VM\n");
++
++		/*
++		 * On the off-chance that microcode has been updated
++		 * on the host, enable the mitigation in the guest just
++		 * in case.
++		 */
++		fallthrough;
++	case TSA_MITIGATION_FULL:
++		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
++		break;
++	default:
++		break;
++	}
++
++out:
++	pr_info("%s\n", tsa_strings[tsa_mitigation]);
++}
++
+ void cpu_bugs_smt_update(void)
+ {
+ 	mutex_lock(&spec_ctrl_mutex);
+@@ -2156,6 +2246,24 @@ void cpu_bugs_smt_update(void)
+ 		break;
+ 	}
+ 
++	switch (tsa_mitigation) {
++	case TSA_MITIGATION_USER_KERNEL:
++	case TSA_MITIGATION_VM:
++	case TSA_MITIGATION_FULL:
++	case TSA_MITIGATION_UCODE_NEEDED:
++		/*
++		 * TSA-SQ can potentially lead to info leakage between
++		 * SMT threads.
++		 */
++		if (sched_smt_active())
++			static_branch_enable(&cpu_buf_idle_clear);
++		else
++			static_branch_disable(&cpu_buf_idle_clear);
++		break;
++	case TSA_MITIGATION_NONE:
++		break;
++	}
++
+ 	mutex_unlock(&spec_ctrl_mutex);
+ }
+ 
+@@ -3084,6 +3192,11 @@ static ssize_t gds_show_state(char *buf)
+ 	return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
+ }
+ 
++static ssize_t tsa_show_state(char *buf)
++{
++	return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ 			       char *buf, unsigned int bug)
+ {
+@@ -3145,6 +3258,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ 	case X86_BUG_ITS:
+ 		return its_show_state(buf);
+ 
++	case X86_BUG_TSA:
++		return tsa_show_state(buf);
++
+ 	default:
+ 		break;
+ 	}
+@@ -3229,6 +3345,11 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
+ {
+ 	return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
+ }
++
++ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
++}
+ #endif
+ 
+ void __warn_thunk(void)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index a11c61fd7d52cf..ed072b126111c3 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1233,6 +1233,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ #define ITS		BIT(8)
+ /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
+ #define ITS_NATIVE_ONLY	BIT(9)
++/* CPU is affected by Transient Scheduler Attacks */
++#define TSA		BIT(10)
+ 
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE,		X86_STEPPING_ANY,		SRBDS),
+@@ -1280,7 +1282,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_AMD(0x16, RETBLEED),
+ 	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
+ 	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
+-	VULNBL_AMD(0x19, SRSO),
++	VULNBL_AMD(0x19, SRSO | TSA),
+ 	{}
+ };
+ 
+@@ -1490,6 +1492,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 			setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
+ 	}
+ 
++	if (c->x86_vendor == X86_VENDOR_AMD) {
++		if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
++		    !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
++			if (cpu_matches(cpu_vuln_blacklist, TSA) ||
++			    /* Enable bug on Zen guests to allow for live migration. */
++			    (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
++				setup_force_cpu_bug(X86_BUG_TSA);
++		}
++	}
++
+ 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ 		return;
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 2f84164b20e011..765b4646648f7b 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -94,18 +94,6 @@ static struct equiv_cpu_table {
+ 	struct equiv_cpu_entry *entry;
+ } equiv_table;
+ 
+-union zen_patch_rev {
+-	struct {
+-		__u32 rev	 : 8,
+-		      stepping	 : 4,
+-		      model	 : 4,
+-		      __reserved : 4,
+-		      ext_model	 : 4,
+-		      ext_fam	 : 8;
+-	};
+-	__u32 ucode_rev;
+-};
+-
+ union cpuid_1_eax {
+ 	struct {
+ 		__u32 stepping    : 4,
+diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c
+index 2a1655b1fdd883..1fd349cfc8024a 100644
+--- a/arch/x86/kernel/cpu/microcode/amd_shas.c
++++ b/arch/x86/kernel/cpu/microcode/amd_shas.c
+@@ -231,6 +231,13 @@ static const struct patch_digest phashes[] = {
+ 		0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
+ 	}
+  },
++ { 0xa0011d7, {
++                0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b,
++                0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12,
++                0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2,
++                0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36,
++        }
++ },
+  { 0xa001223, {
+ 		0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
+ 		0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
+@@ -294,6 +301,13 @@ static const struct patch_digest phashes[] = {
+ 		0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
+ 	}
+  },
++ { 0xa00123b, {
++		0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2,
++		0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3,
++		0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28,
++		0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72,
++	}
++ },
+  { 0xa00820c, {
+ 		0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
+ 		0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
+@@ -301,6 +315,13 @@ static const struct patch_digest phashes[] = {
+ 		0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
+ 	}
+  },
++ { 0xa00820d, {
++		0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4,
++		0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca,
++		0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1,
++		0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7,
++	}
++ },
+  { 0xa10113e, {
+ 		0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
+ 		0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
+@@ -322,6 +343,13 @@ static const struct patch_digest phashes[] = {
+ 		0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
+ 	}
+  },
++ { 0xa10114c, {
++		0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64,
++		0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74,
++		0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a,
++		0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0,
++	}
++ },
+  { 0xa10123e, {
+ 		0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
+ 		0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
+@@ -343,6 +371,13 @@ static const struct patch_digest phashes[] = {
+ 		0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
+ 	}
+  },
++ { 0xa10124c, {
++		0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90,
++		0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46,
++		0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc,
++		0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2,
++	}
++ },
+  { 0xa108108, {
+ 		0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
+ 		0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
+@@ -350,6 +385,13 @@ static const struct patch_digest phashes[] = {
+ 		0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
+ 	}
+  },
++ { 0xa108109, {
++		0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa,
++		0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b,
++		0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35,
++		0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59,
++	}
++ },
+  { 0xa20102d, {
+ 		0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
+ 		0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
+@@ -357,6 +399,13 @@ static const struct patch_digest phashes[] = {
+ 		0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
+ 	}
+  },
++ { 0xa20102e, {
++		0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd,
++		0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6,
++		0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5,
++		0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe,
++	}
++ },
+  { 0xa201210, {
+ 		0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
+ 		0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
+@@ -364,6 +413,13 @@ static const struct patch_digest phashes[] = {
+ 		0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
+ 	}
+  },
++ { 0xa201211, {
++		0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95,
++		0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27,
++		0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff,
++		0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27,
++	}
++ },
+  { 0xa404107, {
+ 		0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
+ 		0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
+@@ -371,6 +427,13 @@ static const struct patch_digest phashes[] = {
+ 		0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
+ 	}
+  },
++ { 0xa404108, {
++		0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc,
++		0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb,
++		0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19,
++		0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00,
++	}
++ },
+  { 0xa500011, {
+ 		0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
+ 		0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
+@@ -378,6 +441,13 @@ static const struct patch_digest phashes[] = {
+ 		0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
+ 	}
+  },
++ { 0xa500012, {
++		0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4,
++		0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16,
++		0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f,
++		0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63,
++	}
++ },
+  { 0xa601209, {
+ 		0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
+ 		0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
+@@ -385,6 +455,13 @@ static const struct patch_digest phashes[] = {
+ 		0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
+ 	}
+  },
++ { 0xa60120a, {
++		0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d,
++		0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b,
++		0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d,
++		0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c,
++	}
++ },
+  { 0xa704107, {
+ 		0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
+ 		0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
+@@ -392,6 +469,13 @@ static const struct patch_digest phashes[] = {
+ 		0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
+ 	}
+  },
++ { 0xa704108, {
++		0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93,
++		0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98,
++		0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49,
++		0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a,
++	}
++ },
+  { 0xa705206, {
+ 		0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
+ 		0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
+@@ -399,6 +483,13 @@ static const struct patch_digest phashes[] = {
+ 		0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
+ 	}
+  },
++ { 0xa705208, {
++		0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19,
++		0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2,
++		0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11,
++		0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff,
++	}
++ },
+  { 0xa708007, {
+ 		0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
+ 		0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
+@@ -406,6 +497,13 @@ static const struct patch_digest phashes[] = {
+ 		0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
+ 	}
+  },
++ { 0xa708008, {
++		0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46,
++		0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab,
++		0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16,
++		0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b,
++	}
++ },
+  { 0xa70c005, {
+ 		0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
+ 		0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
+@@ -413,6 +511,13 @@ static const struct patch_digest phashes[] = {
+ 		0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
+ 	}
+  },
++ { 0xa70c008, {
++		0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21,
++		0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e,
++		0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66,
++		0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0,
++	}
++ },
+  { 0xaa00116, {
+ 		0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
+ 		0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
+@@ -441,4 +546,11 @@ static const struct patch_digest phashes[] = {
+ 		0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
+ 	}
+  },
++ { 0xaa00216, {
++		0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5,
++		0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08,
++		0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2,
++		0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1,
++	}
++ },
+ };
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index c84c30188fdf24..bc4993aa41edf2 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -49,6 +49,8 @@ static const struct cpuid_bit cpuid_bits[] = {
+ 	{ X86_FEATURE_MBA,		CPUID_EBX,  6, 0x80000008, 0 },
+ 	{ X86_FEATURE_SMBA,		CPUID_EBX,  2, 0x80000020, 0 },
+ 	{ X86_FEATURE_BMEC,		CPUID_EBX,  3, 0x80000020, 0 },
++	{ X86_FEATURE_TSA_SQ_NO,	CPUID_ECX,  1, 0x80000021, 0 },
++	{ X86_FEATURE_TSA_L1_NO,	CPUID_ECX,  2, 0x80000021, 0 },
+ 	{ X86_FEATURE_PERFMON_V2,	CPUID_EAX,  0, 0x80000022, 0 },
+ 	{ X86_FEATURE_AMD_LBR_V2,	CPUID_EAX,  1, 0x80000022, 0 },
+ 	{ X86_FEATURE_AMD_LBR_PMC_FREEZE,	CPUID_EAX,  2, 0x80000022, 0 },
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 1dbd7a34645c29..4c9c98c5deabda 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -911,16 +911,24 @@ static __init bool prefer_mwait_c1_over_halt(void)
+  */
+ static __cpuidle void mwait_idle(void)
+ {
++	if (need_resched())
++		return;
++
++	x86_idle_clear_cpu_buffers();
++
+ 	if (!current_set_polling_and_test()) {
+ 		const void *addr = &current_thread_info()->flags;
+ 
+ 		alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
+ 		__monitor(addr, 0, 0);
+-		if (!need_resched()) {
+-			__sti_mwait(0, 0);
+-			raw_local_irq_disable();
+-		}
++		if (need_resched())
++			goto out;
++
++		__sti_mwait(0, 0);
++		raw_local_irq_disable();
+ 	}
++
++out:
+ 	__current_clr_polling();
+ }
+ 
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index c92e43f2d0c4ec..02196db26a0842 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -814,6 +814,7 @@ void kvm_set_cpu_caps(void)
+ 
+ 	kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
+ 		F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
++		F(VERW_CLEAR) |
+ 		F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
+ 		F(WRMSR_XX_BASE_NS)
+ 	);
+@@ -826,6 +827,10 @@ void kvm_set_cpu_caps(void)
+ 		F(PERFMON_V2)
+ 	);
+ 
++	kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX,
++		F(TSA_SQ_NO) | F(TSA_L1_NO)
++	);
++
+ 	/*
+ 	 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
+ 	 * KVM's supported CPUID if the feature is reported as supported by the
+@@ -1376,8 +1381,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ 		break;
+ 	case 0x80000021:
+-		entry->ebx = entry->ecx = entry->edx = 0;
++		entry->ebx = entry->edx = 0;
+ 		cpuid_entry_override(entry, CPUID_8000_0021_EAX);
++		cpuid_entry_override(entry, CPUID_8000_0021_ECX);
+ 		break;
+ 	/* AMD Extended Performance Monitoring and Debug */
+ 	case 0x80000022: {
+diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
+index 0d17d6b7063964..0ea847b8235411 100644
+--- a/arch/x86/kvm/reverse_cpuid.h
++++ b/arch/x86/kvm/reverse_cpuid.h
+@@ -18,6 +18,7 @@ enum kvm_only_cpuid_leafs {
+ 	CPUID_8000_0022_EAX,
+ 	CPUID_7_2_EDX,
+ 	CPUID_24_0_EBX,
++	CPUID_8000_0021_ECX,
+ 	NR_KVM_CPU_CAPS,
+ 
+ 	NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
+@@ -68,6 +69,10 @@ enum kvm_only_cpuid_leafs {
+ /* CPUID level 0x80000022 (EAX) */
+ #define KVM_X86_FEATURE_PERFMON_V2	KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0)
+ 
++/* CPUID level 0x80000021 (ECX) */
++#define KVM_X86_FEATURE_TSA_SQ_NO	KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1)
++#define KVM_X86_FEATURE_TSA_L1_NO	KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2)
++
+ struct cpuid_reg {
+ 	u32 function;
+ 	u32 index;
+@@ -98,6 +103,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
+ 	[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
+ 	[CPUID_7_2_EDX]       = {         7, 2, CPUID_EDX},
+ 	[CPUID_24_0_EBX]      = {      0x24, 0, CPUID_EBX},
++	[CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
+ };
+ 
+ /*
+@@ -137,6 +143,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
+ 	KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
+ 	KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
+ 	KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
++	KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO);
++	KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO);
+ 	default:
+ 		return x86_feature;
+ 	}
+diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
+index 0c61153b275f64..235c4af6b692a4 100644
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -169,6 +169,9 @@ SYM_FUNC_START(__svm_vcpu_run)
+ #endif
+ 	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
+ 
++	/* Clobbers EFLAGS.ZF */
++	VM_CLEAR_CPU_BUFFERS
++
+ 	/* Enter guest mode */
+ 3:	vmrun %_ASM_AX
+ 4:
+@@ -335,6 +338,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
+ 	mov SVM_current_vmcb(%rdi), %rax
+ 	mov KVM_VMCB_pa(%rax), %rax
+ 
++	/* Clobbers EFLAGS.ZF */
++	VM_CLEAR_CPU_BUFFERS
++
+ 	/* Enter guest mode */
+ 1:	vmrun %rax
+ 2:
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index bcbedddacc4804..029fbf3791f17f 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7313,7 +7313,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ 		vmx_l1d_flush(vcpu);
+ 	else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+ 		 kvm_arch_has_assigned_device(vcpu->kvm))
+-		mds_clear_cpu_buffers();
++		x86_clear_cpu_buffers();
+ 
+ 	vmx_disable_fb_clear(vmx);
+ 
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index e809c2aed78aed..a232746d150a75 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -483,6 +483,13 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+ 		return_ACPI_STATUS(AE_NULL_OBJECT);
+ 	}
+ 
++	if (this_walk_state->num_operands < obj_desc->method.param_count) {
++		ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]",
++			    acpi_ut_get_node_name(method_node)));
++
++		return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
++	}
++
+ 	/* Init for new method, possibly wait on method mutex */
+ 
+ 	status =
+diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
+index 78db38c7076e45..125d7df8f30ae2 100644
+--- a/drivers/acpi/thermal.c
++++ b/drivers/acpi/thermal.c
+@@ -803,7 +803,13 @@ static int acpi_thermal_add(struct acpi_device *device)
+ 
+ 	acpi_thermal_aml_dependency_fix(tz);
+ 
+-	/* Get trip points [_CRT, _PSV, etc.] (required). */
++	/*
++	 * Set the cooling mode [_SCP] to active cooling. This needs to happen before
++	 * we retrieve the trip point values.
++	 */
++	acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE);
++
++	/* Get trip points [_ACi, _PSV, etc.] (required). */
+ 	acpi_thermal_get_trip_points(tz);
+ 
+ 	crit_temp = acpi_thermal_get_critical_trip(tz);
+@@ -814,10 +820,6 @@ static int acpi_thermal_add(struct acpi_device *device)
+ 	if (result)
+ 		goto free_memory;
+ 
+-	/* Set the cooling mode [_SCP] to active cooling. */
+-	acpi_execute_simple_method(tz->device->handle, "_SCP",
+-				   ACPI_THERMAL_MODE_ACTIVE);
+-
+ 	/* Determine the default polling frequency [_TZP]. */
+ 	if (tzp)
+ 		tz->polling_frequency = tzp;
+diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
+index d36e71f475abdc..39a350755a1baf 100644
+--- a/drivers/ata/libata-acpi.c
++++ b/drivers/ata/libata-acpi.c
+@@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
+ 
+ /**
+- * ata_acpi_cbl_80wire		-	Check for 80 wire cable
++ * ata_acpi_cbl_pata_type - Return PATA cable type
+  * @ap: Port to check
+- * @gtm: GTM data to use
+  *
+- * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
++ * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS
+  */
+-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
++int ata_acpi_cbl_pata_type(struct ata_port *ap)
+ {
+ 	struct ata_device *dev;
++	int ret = ATA_CBL_PATA_UNK;
++	const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
++
++	if (!gtm)
++		return ATA_CBL_PATA40;
+ 
+ 	ata_for_each_dev(dev, &ap->link, ENABLED) {
+ 		unsigned int xfer_mask, udma_mask;
+@@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+ 		xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
+ 		ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
+ 
+-		if (udma_mask & ~ATA_UDMA_MASK_40C)
+-			return 1;
++		ret = ATA_CBL_PATA40;
++
++		if (udma_mask & ~ATA_UDMA_MASK_40C) {
++			ret = ATA_CBL_PATA80;
++			break;
++		}
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+-EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
++EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type);
+ 
+ static void ata_acpi_gtf_to_tf(struct ata_device *dev,
+ 			       const struct ata_acpi_gtf *gtf,
+diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
+index b811efd2cc346a..73e81e160c91fb 100644
+--- a/drivers/ata/pata_cs5536.c
++++ b/drivers/ata/pata_cs5536.c
+@@ -27,7 +27,7 @@
+ #include <scsi/scsi_host.h>
+ #include <linux/dmi.h>
+ 
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86) && defined(CONFIG_X86_32)
+ #include <asm/msr.h>
+ static int use_msr;
+ module_param_named(msr, use_msr, int, 0644);
+diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
+index d82728a01832b5..bb80e7800dcbe9 100644
+--- a/drivers/ata/pata_via.c
++++ b/drivers/ata/pata_via.c
+@@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) {
+ 	   two drives */
+ 	if (ata66 & (0x10100000 >> (16 * ap->port_no)))
+ 		return ATA_CBL_PATA80;
++
+ 	/* Check with ACPI so we can spot BIOS reported SATA bridges */
+-	if (ata_acpi_init_gtm(ap) &&
+-	    ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
+-		return ATA_CBL_PATA80;
+-	return ATA_CBL_PATA40;
++	return ata_acpi_cbl_pata_type(ap);
+ }
+ 
+ static int via_pre_reset(struct ata_link *link, unsigned long deadline)
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index d88f721cf68cde..02870e70ed5955 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -600,6 +600,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
+ CPU_SHOW_VULN_FALLBACK(gds);
+ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+ CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
++CPU_SHOW_VULN_FALLBACK(tsa);
+ 
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+@@ -616,6 +617,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
+ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+ static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
++static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+ 
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_meltdown.attr,
+@@ -633,6 +635,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_gather_data_sampling.attr,
+ 	&dev_attr_reg_file_data_sampling.attr,
+ 	&dev_attr_indirect_target_selection.attr,
++	&dev_attr_tsa.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
+index 749ae1246f4cf8..d35caa3c69e15e 100644
+--- a/drivers/block/aoe/aoe.h
++++ b/drivers/block/aoe/aoe.h
+@@ -80,6 +80,7 @@ enum {
+ 	DEVFL_NEWSIZE = (1<<6),	/* need to update dev size in block layer */
+ 	DEVFL_FREEING = (1<<7),	/* set when device is being cleaned up */
+ 	DEVFL_FREED = (1<<8),	/* device has been cleaned up */
++	DEVFL_DEAD = (1<<9),	/* device has timed out of aoe_deadsecs */
+ };
+ 
+ enum {
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index 92b06d1de4cc7b..6c94cfd1c480ea 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer)
+ 
+ 	utgts = count_targets(d, NULL);
+ 
+-	if (d->flags & DEVFL_TKILL) {
++	if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) {
+ 		spin_unlock_irqrestore(&d->lock, flags);
+ 		return;
+ 	}
+@@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer)
+ 			 * to clean up.
+ 			 */
+ 			list_splice(&flist, &d->factive[0]);
+-			aoedev_downdev(d);
++			d->flags |= DEVFL_DEAD;
++			queue_work(aoe_wq, &d->work);
+ 			goto out;
+ 		}
+ 
+@@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work)
+ {
+ 	struct aoedev *d = container_of(work, struct aoedev, work);
+ 
++	if (d->flags & DEVFL_DEAD)
++		aoedev_downdev(d);
++
+ 	if (d->flags & DEVFL_GDALLOC)
+ 		aoeblk_gdalloc(d);
+ 
+diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
+index 280679bde3a506..4240e11adfb769 100644
+--- a/drivers/block/aoe/aoedev.c
++++ b/drivers/block/aoe/aoedev.c
+@@ -200,8 +200,11 @@ aoedev_downdev(struct aoedev *d)
+ 	struct list_head *head, *pos, *nx;
+ 	struct request *rq, *rqnext;
+ 	int i;
++	unsigned long flags;
+ 
+-	d->flags &= ~DEVFL_UP;
++	spin_lock_irqsave(&d->lock, flags);
++	d->flags &= ~(DEVFL_UP | DEVFL_DEAD);
++	spin_unlock_irqrestore(&d->lock, flags);
+ 
+ 	/* clean out active and to-be-retransmitted buffers */
+ 	for (i = 0; i < NFACTIVE; i++) {
+diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
+index e1f60f0f507c96..df2728cccf8b30 100644
+--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
++++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
+@@ -1126,8 +1126,7 @@ static int iaa_compress(struct crypto_tfm *tfm,	struct acomp_req *req,
+ 			struct idxd_wq *wq,
+ 			dma_addr_t src_addr, unsigned int slen,
+ 			dma_addr_t dst_addr, unsigned int *dlen,
+-			u32 *compression_crc,
+-			bool disable_async)
++			u32 *compression_crc)
+ {
+ 	struct iaa_device_compression_mode *active_compression_mode;
+ 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+@@ -1170,7 +1169,7 @@ static int iaa_compress(struct crypto_tfm *tfm,	struct acomp_req *req,
+ 	desc->src2_size = sizeof(struct aecs_comp_table_record);
+ 	desc->completion_addr = idxd_desc->compl_dma;
+ 
+-	if (ctx->use_irq && !disable_async) {
++	if (ctx->use_irq) {
+ 		desc->flags |= IDXD_OP_FLAG_RCI;
+ 
+ 		idxd_desc->crypto.req = req;
+@@ -1183,8 +1182,7 @@ static int iaa_compress(struct crypto_tfm *tfm,	struct acomp_req *req,
+ 			" src_addr %llx, dst_addr %llx\n", __func__,
+ 			active_compression_mode->name,
+ 			src_addr, dst_addr);
+-	} else if (ctx->async_mode && !disable_async)
+-		req->base.data = idxd_desc;
++	}
+ 
+ 	dev_dbg(dev, "%s: compression mode %s,"
+ 		" desc->src1_addr %llx, desc->src1_size %d,"
+@@ -1204,7 +1202,7 @@ static int iaa_compress(struct crypto_tfm *tfm,	struct acomp_req *req,
+ 	update_total_comp_calls();
+ 	update_wq_comp_calls(wq);
+ 
+-	if (ctx->async_mode && !disable_async) {
++	if (ctx->async_mode) {
+ 		ret = -EINPROGRESS;
+ 		dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
+ 		goto out;
+@@ -1224,7 +1222,7 @@ static int iaa_compress(struct crypto_tfm *tfm,	struct acomp_req *req,
+ 
+ 	*compression_crc = idxd_desc->iax_completion->crc;
+ 
+-	if (!ctx->async_mode || disable_async)
++	if (!ctx->async_mode)
+ 		idxd_free_desc(wq, idxd_desc);
+ out:
+ 	return ret;
+@@ -1421,8 +1419,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
+ 			" src_addr %llx, dst_addr %llx\n", __func__,
+ 			active_compression_mode->name,
+ 			src_addr, dst_addr);
+-	} else if (ctx->async_mode && !disable_async)
+-		req->base.data = idxd_desc;
++	}
+ 
+ 	dev_dbg(dev, "%s: decompression mode %s,"
+ 		" desc->src1_addr %llx, desc->src1_size %d,"
+@@ -1490,13 +1487,11 @@ static int iaa_comp_acompress(struct acomp_req *req)
+ 	struct iaa_compression_ctx *compression_ctx;
+ 	struct crypto_tfm *tfm = req->base.tfm;
+ 	dma_addr_t src_addr, dst_addr;
+-	bool disable_async = false;
+ 	int nr_sgs, cpu, ret = 0;
+ 	struct iaa_wq *iaa_wq;
+ 	u32 compression_crc;
+ 	struct idxd_wq *wq;
+ 	struct device *dev;
+-	int order = -1;
+ 
+ 	compression_ctx = crypto_tfm_ctx(tfm);
+ 
+@@ -1526,21 +1521,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
+ 
+ 	iaa_wq = idxd_wq_get_private(wq);
+ 
+-	if (!req->dst) {
+-		gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+-
+-		/* incompressible data will always be < 2 * slen */
+-		req->dlen = 2 * req->slen;
+-		order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
+-		req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
+-		if (!req->dst) {
+-			ret = -ENOMEM;
+-			order = -1;
+-			goto out;
+-		}
+-		disable_async = true;
+-	}
+-
+ 	dev = &wq->idxd->pdev->dev;
+ 
+ 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+@@ -1570,7 +1550,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
+ 		req->dst, req->dlen, sg_dma_len(req->dst));
+ 
+ 	ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
+-			   &req->dlen, &compression_crc, disable_async);
++			   &req->dlen, &compression_crc);
+ 	if (ret == -EINPROGRESS)
+ 		return ret;
+ 
+@@ -1601,100 +1581,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
+ out:
+ 	iaa_wq_put(wq);
+ 
+-	if (order >= 0)
+-		sgl_free_order(req->dst, order);
+-
+-	return ret;
+-}
+-
+-static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
+-{
+-	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+-		GFP_KERNEL : GFP_ATOMIC;
+-	struct crypto_tfm *tfm = req->base.tfm;
+-	dma_addr_t src_addr, dst_addr;
+-	int nr_sgs, cpu, ret = 0;
+-	struct iaa_wq *iaa_wq;
+-	struct device *dev;
+-	struct idxd_wq *wq;
+-	int order = -1;
+-
+-	cpu = get_cpu();
+-	wq = wq_table_next_wq(cpu);
+-	put_cpu();
+-	if (!wq) {
+-		pr_debug("no wq configured for cpu=%d\n", cpu);
+-		return -ENODEV;
+-	}
+-
+-	ret = iaa_wq_get(wq);
+-	if (ret) {
+-		pr_debug("no wq available for cpu=%d\n", cpu);
+-		return -ENODEV;
+-	}
+-
+-	iaa_wq = idxd_wq_get_private(wq);
+-
+-	dev = &wq->idxd->pdev->dev;
+-
+-	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+-	if (nr_sgs <= 0 || nr_sgs > 1) {
+-		dev_dbg(dev, "couldn't map src sg for iaa device %d,"
+-			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+-			iaa_wq->wq->id, ret);
+-		ret = -EIO;
+-		goto out;
+-	}
+-	src_addr = sg_dma_address(req->src);
+-	dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+-		" req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
+-		req->src, req->slen, sg_dma_len(req->src));
+-
+-	req->dlen = 4 * req->slen; /* start with ~avg comp rato */
+-alloc_dest:
+-	order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
+-	req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
+-	if (!req->dst) {
+-		ret = -ENOMEM;
+-		order = -1;
+-		goto out;
+-	}
+-
+-	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+-	if (nr_sgs <= 0 || nr_sgs > 1) {
+-		dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
+-			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+-			iaa_wq->wq->id, ret);
+-		ret = -EIO;
+-		goto err_map_dst;
+-	}
+-
+-	dst_addr = sg_dma_address(req->dst);
+-	dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+-		" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
+-		req->dst, req->dlen, sg_dma_len(req->dst));
+-	ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
+-			     dst_addr, &req->dlen, true);
+-	if (ret == -EOVERFLOW) {
+-		dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+-		req->dlen *= 2;
+-		if (req->dlen > CRYPTO_ACOMP_DST_MAX)
+-			goto err_map_dst;
+-		goto alloc_dest;
+-	}
+-
+-	if (ret != 0)
+-		dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
+-
+-	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+-err_map_dst:
+-	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+-out:
+-	iaa_wq_put(wq);
+-
+-	if (order >= 0)
+-		sgl_free_order(req->dst, order);
+-
+ 	return ret;
+ }
+ 
+@@ -1717,9 +1603,6 @@ static int iaa_comp_adecompress(struct acomp_req *req)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!req->dst)
+-		return iaa_comp_adecompress_alloc_dest(req);
+-
+ 	cpu = get_cpu();
+ 	wq = wq_table_next_wq(cpu);
+ 	put_cpu();
+@@ -1800,19 +1683,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
+ 	return 0;
+ }
+ 
+-static void dst_free(struct scatterlist *sgl)
+-{
+-	/*
+-	 * Called for req->dst = NULL cases but we free elsewhere
+-	 * using sgl_free_order().
+-	 */
+-}
+-
+ static struct acomp_alg iaa_acomp_fixed_deflate = {
+ 	.init			= iaa_comp_init_fixed,
+ 	.compress		= iaa_comp_acompress,
+ 	.decompress		= iaa_comp_adecompress,
+-	.dst_free               = dst_free,
+ 	.base			= {
+ 		.cra_name		= "deflate",
+ 		.cra_driver_name	= "deflate-iaa",
+diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
+index 1bcec6f46c9c75..9b5345068604f4 100644
+--- a/drivers/crypto/xilinx/zynqmp-sha.c
++++ b/drivers/crypto/xilinx/zynqmp-sha.c
+@@ -3,18 +3,19 @@
+  * Xilinx ZynqMP SHA Driver.
+  * Copyright (c) 2022 Xilinx Inc.
+  */
+-#include <linux/cacheflush.h>
+ #include <crypto/hash.h>
+ #include <crypto/internal/hash.h>
+ #include <crypto/sha3.h>
+-#include <linux/crypto.h>
++#include <linux/cacheflush.h>
++#include <linux/cleanup.h>
+ #include <linux/device.h>
+ #include <linux/dma-mapping.h>
++#include <linux/err.h>
+ #include <linux/firmware/xlnx-zynqmp.h>
+-#include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/spinlock.h>
+ #include <linux/platform_device.h>
+ 
+ #define ZYNQMP_DMA_BIT_MASK		32U
+@@ -43,6 +44,8 @@ struct zynqmp_sha_desc_ctx {
+ static dma_addr_t update_dma_addr, final_dma_addr;
+ static char *ubuf, *fbuf;
+ 
++static DEFINE_SPINLOCK(zynqmp_sha_lock);
++
+ static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
+ {
+ 	const char *fallback_driver_name = crypto_shash_alg_name(hash);
+@@ -124,7 +127,8 @@ static int zynqmp_sha_export(struct shash_desc *desc, void *out)
+ 	return crypto_shash_export(&dctx->fbk_req, out);
+ }
+ 
+-static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
++static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data,
++			       unsigned int len, u8 *out)
+ {
+ 	unsigned int remaining_len = len;
+ 	int update_size;
+@@ -159,6 +163,12 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i
+ 	return ret;
+ }
+ 
++static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
++{
++	scoped_guard(spinlock_bh, &zynqmp_sha_lock)
++		return __zynqmp_sha_digest(desc, data, len, out);
++}
++
+ static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
+ 	.sha3_384 = {
+ 		.init = zynqmp_sha_init,
+diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
+index b1ef4546346d44..bea3e9858aca56 100644
+--- a/drivers/dma-buf/dma-resv.c
++++ b/drivers/dma-buf/dma-resv.c
+@@ -685,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ 	dma_resv_iter_begin(&cursor, obj, usage);
+ 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ 
+-		ret = dma_fence_wait_timeout(fence, intr, ret);
+-		if (ret <= 0) {
+-			dma_resv_iter_end(&cursor);
+-			return ret;
+-		}
++		ret = dma_fence_wait_timeout(fence, intr, timeout);
++		if (ret <= 0)
++			break;
++
++		/* Even for zero timeout the return value is 1 */
++		if (timeout)
++			timeout = ret;
+ 	}
+ 	dma_resv_iter_end(&cursor);
+ 
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 47751b2c057ae6..83dad9c2da0641 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -110,7 +110,7 @@ struct ffa_drv_info {
+ 	struct work_struct sched_recv_irq_work;
+ 	struct xarray partition_info;
+ 	DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
+-	struct mutex notify_lock; /* lock to protect notifier hashtable  */
++	rwlock_t notify_lock; /* lock to protect notifier hashtable  */
+ };
+ 
+ static struct ffa_drv_info *drv_info;
+@@ -1141,12 +1141,11 @@ notifier_hash_node_get(u16 notify_id, enum notify_type type)
+ 	return NULL;
+ }
+ 
+-static int
+-update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
+-		   void *cb_data, bool is_registration)
++static int update_notifier_cb(int notify_id, enum notify_type type,
++			      struct notifier_cb_info *cb)
+ {
+ 	struct notifier_cb_info *cb_info = NULL;
+-	bool cb_found;
++	bool cb_found, is_registration = !!cb;
+ 
+ 	cb_info = notifier_hash_node_get(notify_id, type);
+ 	cb_found = !!cb_info;
+@@ -1155,17 +1154,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
+ 		return -EINVAL;
+ 
+ 	if (is_registration) {
+-		cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
+-		if (!cb_info)
+-			return -ENOMEM;
+-
+-		cb_info->type = type;
+-		cb_info->cb = cb;
+-		cb_info->cb_data = cb_data;
+-
+-		hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
++		hash_add(drv_info->notifier_hash, &cb->hnode, notify_id);
+ 	} else {
+ 		hash_del(&cb_info->hnode);
++		kfree(cb_info);
+ 	}
+ 
+ 	return 0;
+@@ -1190,18 +1182,18 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+ 	if (notify_id >= FFA_MAX_NOTIFICATIONS)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&drv_info->notify_lock);
++	write_lock(&drv_info->notify_lock);
+ 
+-	rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
++	rc = update_notifier_cb(notify_id, type, NULL);
+ 	if (rc) {
+ 		pr_err("Could not unregister notification callback\n");
+-		mutex_unlock(&drv_info->notify_lock);
++		write_unlock(&drv_info->notify_lock);
+ 		return rc;
+ 	}
+ 
+ 	rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ 
+-	mutex_unlock(&drv_info->notify_lock);
++	write_unlock(&drv_info->notify_lock);
+ 
+ 	return rc;
+ }
+@@ -1211,6 +1203,7 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ {
+ 	int rc;
+ 	u32 flags = 0;
++	struct notifier_cb_info *cb_info = NULL;
+ 	enum notify_type type = ffa_notify_type_get(dev->vm_id);
+ 
+ 	if (ffa_notifications_disabled())
+@@ -1219,24 +1212,34 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ 	if (notify_id >= FFA_MAX_NOTIFICATIONS)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&drv_info->notify_lock);
++	cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
++	if (!cb_info)
++		return -ENOMEM;
++
++	cb_info->type = type;
++	cb_info->cb_data = cb_data;
++	cb_info->cb = cb;
++
++	write_lock(&drv_info->notify_lock);
+ 
+ 	if (is_per_vcpu)
+ 		flags = PER_VCPU_NOTIFICATION_FLAG;
+ 
+ 	rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
+-	if (rc) {
+-		mutex_unlock(&drv_info->notify_lock);
+-		return rc;
+-	}
++	if (rc)
++		goto out_unlock_free;
+ 
+-	rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
++	rc = update_notifier_cb(notify_id, type, cb_info);
+ 	if (rc) {
+ 		pr_err("Failed to register callback for %d - %d\n",
+ 		       notify_id, rc);
+ 		ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ 	}
+-	mutex_unlock(&drv_info->notify_lock);
++
++out_unlock_free:
++	write_unlock(&drv_info->notify_lock);
++	if (rc)
++		kfree(cb_info);
+ 
+ 	return rc;
+ }
+@@ -1266,9 +1269,9 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
+ 		if (!(bitmap & 1))
+ 			continue;
+ 
+-		mutex_lock(&drv_info->notify_lock);
++		read_lock(&drv_info->notify_lock);
+ 		cb_info = notifier_hash_node_get(notify_id, type);
+-		mutex_unlock(&drv_info->notify_lock);
++		read_unlock(&drv_info->notify_lock);
+ 
+ 		if (cb_info && cb_info->cb)
+ 			cb_info->cb(notify_id, cb_info->cb_data);
+@@ -1718,7 +1721,7 @@ static void ffa_notifications_setup(void)
+ 		goto cleanup;
+ 
+ 	hash_init(drv_info->notifier_hash);
+-	mutex_init(&drv_info->notify_lock);
++	rwlock_init(&drv_info->notify_lock);
+ 
+ 	drv_info->notif_enabled = true;
+ 	return;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index 7d4b540340e021..41b88e0ea98b89 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -860,7 +860,9 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
+ 	queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ 	queue_input.wptr_addr = ring->wptr_gpu_addr;
+ 
++	amdgpu_mes_lock(&adev->mes);
+ 	r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
++	amdgpu_mes_unlock(&adev->mes);
+ 	if (r)
+ 		DRM_ERROR("failed to map legacy queue\n");
+ 
+@@ -883,7 +885,9 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
+ 	queue_input.trail_fence_addr = gpu_addr;
+ 	queue_input.trail_fence_data = seq;
+ 
++	amdgpu_mes_lock(&adev->mes);
+ 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
++	amdgpu_mes_unlock(&adev->mes);
+ 	if (r)
+ 		DRM_ERROR("failed to unmap legacy queue\n");
+ 
+@@ -910,7 +914,9 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ 	queue_input.vmid = vmid;
+ 	queue_input.use_mmio = use_mmio;
+ 
++	amdgpu_mes_lock(&adev->mes);
+ 	r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
++	amdgpu_mes_unlock(&adev->mes);
+ 	if (r)
+ 		DRM_ERROR("failed to reset legacy queue\n");
+ 
+@@ -931,7 +937,9 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
+ 		goto error;
+ 	}
+ 
++	amdgpu_mes_lock(&adev->mes);
+ 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
++	amdgpu_mes_unlock(&adev->mes);
+ 	if (r)
+ 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
+ 	else
+@@ -957,7 +965,9 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
+ 		goto error;
+ 	}
+ 
++	amdgpu_mes_lock(&adev->mes);
+ 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
++	amdgpu_mes_unlock(&adev->mes);
+ 	if (r)
+ 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
+ 
+@@ -984,7 +994,9 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
+ 		goto error;
+ 	}
+ 
++	amdgpu_mes_lock(&adev->mes);
+ 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
++	amdgpu_mes_unlock(&adev->mes);
+ 	if (r)
+ 		DRM_ERROR("failed to reg_write_reg_wait\n");
+ 
+@@ -1009,7 +1021,9 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
+ 		goto error;
+ 	}
+ 
++	amdgpu_mes_lock(&adev->mes);
+ 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
++	amdgpu_mes_unlock(&adev->mes);
+ 	if (r)
+ 		DRM_ERROR("failed to reg_write_reg_wait\n");
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 48e30e5f833891..3d42f6c3308ed3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -3430,7 +3430,10 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
+ 	uint8_t *ucode_array_start_addr;
+ 	int err = 0;
+ 
+-	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
++	if (amdgpu_is_kicker_fw(adev))
++		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos_kicker.bin", chip_name);
++	else
++		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
+ 	if (err)
+ 		goto out;
+ 
+@@ -3672,7 +3675,10 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
+ 	struct amdgpu_device *adev = psp->adev;
+ 	int err;
+ 
+-	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
++	if (amdgpu_is_kicker_fw(adev))
++		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta_kicker.bin", chip_name);
++	else
++		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 1f06b22dbe7c63..96e5c520af3168 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -84,6 +84,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
+@@ -734,6 +735,9 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
+ 		    adev->pdev->revision == 0xCE)
+ 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ 						   "amdgpu/gc_11_0_0_rlc_1.bin");
++		else if (amdgpu_is_kicker_fw(adev))
++			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
++						   "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
+ 		else
+ 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ 						   "amdgpu/%s_rlc.bin", ucode_prefix);
+diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+index d4f72e47ae9e20..c4f5cbf1ecd7d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+@@ -32,6 +32,7 @@
+ #include "gc/gc_11_0_0_sh_mask.h"
+ 
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
+@@ -50,7 +51,10 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
+ 	DRM_DEBUG("\n");
+ 
+ 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
+-	err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
++	if (amdgpu_is_kicker_fw(adev))
++		err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu_kicker.bin", ucode_prefix);
++	else
++		err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
+ 	if (err)
+ 		goto out;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+index bf00de763acb0e..124f74e862d7ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+@@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
++MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
++MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+new file mode 100644
+index 00000000000000..cdefd7fcb0da60
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+@@ -0,0 +1,1624 @@
++/*
++ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include <linux/firmware.h>
++#include "amdgpu.h"
++#include "amdgpu_vcn.h"
++#include "amdgpu_pm.h"
++#include "soc15.h"
++#include "soc15d.h"
++#include "soc15_hw_ip.h"
++#include "vcn_v2_0.h"
++#include "vcn_v4_0_3.h"
++#include "mmsch_v5_0.h"
++
++#include "vcn/vcn_5_0_0_offset.h"
++#include "vcn/vcn_5_0_0_sh_mask.h"
++#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
++#include "vcn_v5_0_0.h"
++#include "vcn_v5_0_1.h"
++
++#include <drm/drm_drv.h>
++
++static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
++static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
++static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
++static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
++				   enum amd_powergating_state state);
++static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
++static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
++/**
++ * vcn_v5_0_1_early_init - set function pointers and load microcode
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ *
++ * Set ring and irq function pointers
++ * Load microcode from filesystem
++ */
++static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	int i, r;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
++		/* re-use enc ring as unified ring */
++		adev->vcn.inst[i].num_enc_rings = 1;
++
++	vcn_v5_0_1_set_unified_ring_funcs(adev);
++	vcn_v5_0_1_set_irq_funcs(adev);
++	vcn_v5_0_1_set_ras_funcs(adev);
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
++		adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state;
++
++		r = amdgpu_vcn_early_init(adev, i);
++		if (r)
++			return r;
++	}
++
++	return 0;
++}
++
++static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
++{
++	struct amdgpu_vcn5_fw_shared *fw_shared;
++
++	fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
++
++	if (fw_shared->sq.is_enabled)
++		return;
++	fw_shared->present_flag_0 =
++		cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
++	fw_shared->sq.is_enabled = 1;
++
++	if (amdgpu_vcnfw_log)
++		amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
++}
++
++/**
++ * vcn_v5_0_1_sw_init - sw init for VCN block
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ *
++ * Load firmware and sw initialization
++ */
++static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	struct amdgpu_ring *ring;
++	int i, r, vcn_inst;
++
++	/* VCN UNIFIED TRAP */
++	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
++		VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
++	if (r)
++		return r;
++
++	/* VCN POISON TRAP */
++	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
++		VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++		vcn_inst = GET_INST(VCN, i);
++
++		r = amdgpu_vcn_sw_init(adev, i);
++		if (r)
++			return r;
++
++		amdgpu_vcn_setup_ucode(adev, i);
++
++		r = amdgpu_vcn_resume(adev, i);
++		if (r)
++			return r;
++
++		ring = &adev->vcn.inst[i].ring_enc[0];
++		ring->use_doorbell = true;
++		if (!amdgpu_sriov_vf(adev))
++			ring->doorbell_index =
++				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
++				11 * vcn_inst;
++		else
++			ring->doorbell_index =
++				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
++				32 * vcn_inst;
++
++		ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
++		sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
++
++		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
++					AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score);
++		if (r)
++			return r;
++
++		vcn_v5_0_1_fw_shared_init(adev, i);
++	}
++
++	/* TODO: Add queue reset mask when FW fully supports it */
++	adev->vcn.supported_reset =
++		amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
++
++	if (amdgpu_sriov_vf(adev)) {
++		r = amdgpu_virt_alloc_mm_table(adev);
++		if (r)
++			return r;
++	}
++
++	vcn_v5_0_0_alloc_ip_dump(adev);
++
++	return amdgpu_vcn_sysfs_reset_mask_init(adev);
++}
++
++/**
++ * vcn_v5_0_1_sw_fini - sw fini for VCN block
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ *
++ * VCN suspend and free up sw allocation
++ */
++static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	int i, r, idx;
++
++	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
++		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++			volatile struct amdgpu_vcn5_fw_shared *fw_shared;
++
++			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
++			fw_shared->present_flag_0 = 0;
++			fw_shared->sq.is_enabled = 0;
++		}
++
++		drm_dev_exit(idx);
++	}
++
++	if (amdgpu_sriov_vf(adev))
++		amdgpu_virt_free_mm_table(adev);
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++		r = amdgpu_vcn_suspend(adev, i);
++		if (r)
++			return r;
++	}
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++		r = amdgpu_vcn_sw_fini(adev, i);
++		if (r)
++			return r;
++	}
++
++	amdgpu_vcn_sysfs_reset_mask_fini(adev);
++
++	kfree(adev->vcn.ip_dump);
++
++	return 0;
++}
++
++/**
++ * vcn_v5_0_1_hw_init - start and test VCN block
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ *
++ * Initialize the hardware, boot up the VCPU and do some testing
++ */
++static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	struct amdgpu_ring *ring;
++	int i, r, vcn_inst;
++
++	if (amdgpu_sriov_vf(adev)) {
++		r = vcn_v5_0_1_start_sriov(adev);
++		if (r)
++			return r;
++
++		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
++			ring = &adev->vcn.inst[i].ring_enc[0];
++			ring->wptr = 0;
++			ring->wptr_old = 0;
++			vcn_v5_0_1_unified_ring_set_wptr(ring);
++			ring->sched.ready = true;
++		}
++	} else {
++		if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
++			adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
++		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
++			vcn_inst = GET_INST(VCN, i);
++			ring = &adev->vcn.inst[i].ring_enc[0];
++
++			if (ring->use_doorbell)
++				adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
++					((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
++					11 * vcn_inst),
++					adev->vcn.inst[i].aid_id);
++
++			/* Re-init fw_shared, if required */
++			vcn_v5_0_1_fw_shared_init(adev, i);
++
++			r = amdgpu_ring_test_helper(ring);
++			if (r)
++				return r;
++		}
++	}
++
++	return 0;
++}
++
++/**
++ * vcn_v5_0_1_hw_fini - stop the hardware block
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ *
++ * Stop the VCN block, mark ring as not ready any more
++ */
++static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	int i;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
++		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
++
++		cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
++		if (vinst->cur_state != AMD_PG_STATE_GATE)
++			vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
++	}
++
++	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
++		amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
++
++	return 0;
++}
++
++/**
++ * vcn_v5_0_1_suspend - suspend VCN block
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ *
++ * HW fini and suspend VCN block
++ */
++static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	int r, i;
++
++	r = vcn_v5_0_1_hw_fini(ip_block);
++	if (r)
++		return r;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++		r = amdgpu_vcn_suspend(ip_block->adev, i);
++		if (r)
++			return r;
++	}
++
++	return r;
++}
++
++/**
++ * vcn_v5_0_1_resume - resume VCN block
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ *
++ * Resume firmware and hw init VCN block
++ */
++static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	int r, i;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
++
++		if (amdgpu_in_reset(adev))
++			vinst->cur_state = AMD_PG_STATE_GATE;
++
++		r = amdgpu_vcn_resume(ip_block->adev, i);
++		if (r)
++			return r;
++	}
++
++	r = vcn_v5_0_1_hw_init(ip_block);
++
++	return r;
++}
++
++/**
++ * vcn_v5_0_1_mc_resume - memory controller programming
++ *
++ * @vinst: VCN instance
++ *
++ * Let the VCN memory controller know it's offsets
++ */
++static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst)
++{
++	struct amdgpu_device *adev = vinst->adev;
++	int inst = vinst->inst;
++	uint32_t offset, size, vcn_inst;
++	const struct common_firmware_header *hdr;
++
++	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
++	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
++
++	vcn_inst = GET_INST(VCN, inst);
++	/* cache window 0: fw */
++	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
++			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
++		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
++			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
++		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
++		offset = 0;
++	} else {
++		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
++			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
++		WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
++			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
++		offset = size;
++		WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
++				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
++	}
++	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
++
++	/* cache window 1: stack */
++	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
++		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
++		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
++
++	/* cache window 2: context */
++	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
++		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
++		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
++
++	/* non-cache window */
++	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
++		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
++		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
++		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
++}
++
++/**
++ * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
++ *
++ * @vinst: VCN instance
++ * @indirect: indirectly write sram
++ *
++ * Let the VCN memory controller know it's offsets with dpg mode
++ */
++static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
++					  bool indirect)
++{
++	struct amdgpu_device *adev = vinst->adev;
++	int inst_idx = vinst->inst;
++	uint32_t offset, size;
++	const struct common_firmware_header *hdr;
++
++	hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
++	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
++
++	/* cache window 0: fw */
++	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++		if (!indirect) {
++			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
++				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
++				 inst_idx].tmr_mc_addr_lo), 0, indirect);
++			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
++				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
++				 inst_idx].tmr_mc_addr_hi), 0, indirect);
++			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
++		} else {
++			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
++			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
++			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
++		}
++		offset = 0;
++	} else {
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
++			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
++			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
++		offset = size;
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
++			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
++	}
++
++	if (!indirect)
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
++	else
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
++
++	/* cache window 1: stack */
++	if (!indirect) {
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
++			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
++			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
++	} else {
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
++		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
++	}
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++			VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
++
++	/* cache window 2: context */
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
++		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
++			AMDGPU_VCN_STACK_SIZE), 0, indirect);
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
++		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
++			AMDGPU_VCN_STACK_SIZE), 0, indirect);
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
++
++	/* non-cache window */
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
++		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
++		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
++		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
++
++	/* VCN global tiling registers */
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
++}
++
++/**
++ * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
++ *
++ * @vinst: VCN instance
++ *
++ * Disable clock gating for VCN block
++ */
++static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
++{
++}
++
++/**
++ * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
++ *
++ * @vinst: VCN instance
++ *
++ * Enable clock gating for VCN block
++ */
++static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
++{
++}
++
++/**
++ * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode
++ *
++ * @vinst: VCN instance
++ * @new_state: pause state
++ *
++ * Pause dpg mode for VCN block
++ */
++static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
++				     struct dpg_pause_state *new_state)
++{
++	struct amdgpu_device *adev = vinst->adev;
++	uint32_t reg_data = 0;
++	int vcn_inst;
++
++	vcn_inst = GET_INST(VCN, vinst->inst);
++
++	/* pause/unpause if state is changed */
++	if (vinst->pause_state.fw_based != new_state->fw_based) {
++		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n",
++			vinst->pause_state.fw_based, new_state->fw_based,
++			new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE");
++		reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) &
++			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
++
++		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
++			/* pause DPG */
++			reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
++			WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
++
++			/* wait for ACK */
++			SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE,
++					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
++					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
++		} else {
++			/* unpause DPG, no need to wait */
++			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
++			WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data);
++		}
++		vinst->pause_state.fw_based = new_state->fw_based;
++	}
++
++	return 0;
++}
++
++
++/**
++ * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
++ *
++ * @vinst: VCN instance
++ * @indirect: indirectly write sram
++ *
++ * Start VCN block with dpg mode
++ */
++static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
++				     bool indirect)
++{
++	struct amdgpu_device *adev = vinst->adev;
++	int inst_idx = vinst->inst;
++	volatile struct amdgpu_vcn5_fw_shared *fw_shared =
++		adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
++	struct amdgpu_ring *ring;
++	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
++	int vcn_inst;
++	uint32_t tmp;
++
++	vcn_inst = GET_INST(VCN, inst_idx);
++
++	/* disable register anti-hang mechanism */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
++		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
++
++	/* enable dynamic power gating mode */
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
++	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
++	WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
++
++	if (indirect) {
++		adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
++			(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
++		/* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
++		WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF,
++				adev->vcn.inst[inst_idx].aid_id, 0, true);
++	}
++
++	/* enable VCPU clock */
++	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
++	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
++
++	/* disable master interrupt */
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
++
++	/* setup regUVD_LMI_CTRL */
++	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
++		UVD_LMI_CTRL__REQ_MODE_MASK |
++		UVD_LMI_CTRL__CRC_RESET_MASK |
++		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
++		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
++		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
++		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
++		0x00100000L);
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
++
++	vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect);
++
++	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
++	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
++
++	/* enable LMI MC and UMC channels */
++	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
++
++	/* enable master interrupt */
++	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
++		VCN, 0, regUVD_MASTINT_EN),
++		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
++
++	if (indirect)
++		amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
++
++	/* resetting ring, fw should not check RB ring */
++	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
++
++	/* Pause dpg */
++	vcn_v5_0_1_pause_dpg_mode(vinst, &state);
++
++	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
++
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
++
++	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
++	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
++	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
++
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
++
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
++	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
++
++	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
++	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
++	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
++	/* resetting done, fw can check RB ring */
++	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
++
++	WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
++		ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
++		VCN_RB1_DB_CTRL__EN_MASK);
++	/* Read DB_CTRL to flush the write DB_CTRL command. */
++	RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
++
++	return 0;
++}
++
++static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
++{
++	int i, vcn_inst;
++	struct amdgpu_ring *ring_enc;
++	uint64_t cache_addr;
++	uint64_t rb_enc_addr;
++	uint64_t ctx_addr;
++	uint32_t param, resp, expected;
++	uint32_t offset, cache_size;
++	uint32_t tmp, timeout;
++
++	struct amdgpu_mm_table *table = &adev->virt.mm_table;
++	uint32_t *table_loc;
++	uint32_t table_size;
++	uint32_t size, size_dw;
++	uint32_t init_status;
++	uint32_t enabled_vcn;
++
++	struct mmsch_v5_0_cmd_direct_write
++		direct_wt = { {0} };
++	struct mmsch_v5_0_cmd_direct_read_modify_write
++		direct_rd_mod_wt = { {0} };
++	struct mmsch_v5_0_cmd_end end = { {0} };
++	struct mmsch_v5_0_init_header header;
++
++	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
++	volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
++
++	direct_wt.cmd_header.command_type =
++		MMSCH_COMMAND__DIRECT_REG_WRITE;
++	direct_rd_mod_wt.cmd_header.command_type =
++		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
++	end.cmd_header.command_type = MMSCH_COMMAND__END;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
++		vcn_inst = GET_INST(VCN, i);
++
++		vcn_v5_0_1_fw_shared_init(adev, vcn_inst);
++
++		memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
++		header.version = MMSCH_VERSION;
++		header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
++
++		table_loc = (uint32_t *)table->cpu_addr;
++		table_loc += header.total_size;
++
++		table_size = 0;
++
++		MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
++			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
++
++		cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
++
++		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
++				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
++
++			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
++				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
++
++			offset = 0;
++			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++				regUVD_VCPU_CACHE_OFFSET0), 0);
++		} else {
++			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++				regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
++				lower_32_bits(adev->vcn.inst[i].gpu_addr));
++			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++				regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
++				upper_32_bits(adev->vcn.inst[i].gpu_addr));
++			offset = cache_size;
++			MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++				regUVD_VCPU_CACHE_OFFSET0),
++				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
++		}
++
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_VCPU_CACHE_SIZE0),
++			cache_size);
++
++		cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_VCPU_CACHE_OFFSET1), 0);
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
++
++		cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
++			AMDGPU_VCN_STACK_SIZE;
++
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
++
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
++
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_VCPU_CACHE_OFFSET2), 0);
++
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
++
++		fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
++		rb_setup = &fw_shared->rb_setup;
++
++		ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
++		ring_enc->wptr = 0;
++		rb_enc_addr = ring_enc->gpu_addr;
++
++		rb_setup->is_rb_enabled_flags |= RB_ENABLED;
++		rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
++		rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
++		rb_setup->rb_size = ring_enc->ring_size / 4;
++		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
++
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
++			lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
++			upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
++		MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
++			regUVD_VCPU_NONCACHE_SIZE0),
++			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
++		MMSCH_V5_0_INSERT_END();
++
++		header.vcn0.init_status = 0;
++		header.vcn0.table_offset = header.total_size;
++		header.vcn0.table_size = table_size;
++		header.total_size += table_size;
++
++		/* Send init table to mmsch */
++		size = sizeof(struct mmsch_v5_0_init_header);
++		table_loc = (uint32_t *)table->cpu_addr;
++		memcpy((void *)table_loc, &header, size);
++
++		ctx_addr = table->gpu_addr;
++		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
++		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
++
++		tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
++		tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
++		tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
++		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
++
++		size = header.total_size;
++		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
++
++		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
++
++		param = 0x00000001;
++		WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
++		tmp = 0;
++		timeout = 1000;
++		resp = 0;
++		expected = MMSCH_VF_MAILBOX_RESP__OK;
++		while (resp != expected) {
++			resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
++			if (resp != 0)
++				break;
++
++			udelay(10);
++			tmp = tmp + 10;
++			if (tmp >= timeout) {
++				DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
++					" waiting for regMMSCH_VF_MAILBOX_RESP "\
++					"(expected=0x%08x, readback=0x%08x)\n",
++					tmp, expected, resp);
++				return -EBUSY;
++			}
++		}
++
++		enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
++		init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status;
++		if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
++					&& init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
++			DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
++				"status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
++		}
++	}
++
++	return 0;
++}
++
++/**
++ * vcn_v5_0_1_start - VCN start
++ *
++ * @vinst: VCN instance
++ *
++ * Start VCN block
++ */
++static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
++{
++	struct amdgpu_device *adev = vinst->adev;
++	int i = vinst->inst;
++	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
++	struct amdgpu_ring *ring;
++	uint32_t tmp;
++	int j, k, r, vcn_inst;
++
++	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
++
++	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
++		return vcn_v5_0_1_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
++
++	vcn_inst = GET_INST(VCN, i);
++
++	/* set VCN status busy */
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
++	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
++
++	/* enable VCPU clock */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
++		 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
++
++	/* disable master interrupt */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
++		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
++
++	/* enable LMI MC and UMC channels */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
++		 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
++
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
++	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
++	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
++	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
++
++	/* setup regUVD_LMI_CTRL */
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
++		     UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
++		     UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
++		     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
++		     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
++
++	vcn_v5_0_1_mc_resume(vinst);
++
++	/* VCN global tiling registers */
++	WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
++		     adev->gfx.config.gb_addr_config);
++
++	/* unblock VCPU register access */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
++		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
++
++	/* release VCPU reset to boot */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
++		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
++
++	for (j = 0; j < 10; ++j) {
++		uint32_t status;
++
++		for (k = 0; k < 100; ++k) {
++			status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
++			if (status & 2)
++				break;
++			mdelay(100);
++			if (amdgpu_emu_mode == 1)
++				msleep(20);
++		}
++
++		if (amdgpu_emu_mode == 1) {
++			r = -1;
++			if (status & 2) {
++				r = 0;
++				break;
++			}
++		} else {
++			r = 0;
++			if (status & 2)
++				break;
++
++			dev_err(adev->dev,
++				"VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
++			WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
++				 UVD_VCPU_CNTL__BLK_RST_MASK,
++				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
++			mdelay(10);
++			WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
++				 ~UVD_VCPU_CNTL__BLK_RST_MASK);
++
++			mdelay(10);
++			r = -1;
++		}
++	}
++
++	if (r) {
++		dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
++		return r;
++	}
++
++	/* enable master interrupt */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
++		 UVD_MASTINT_EN__VCPU_EN_MASK,
++		 ~UVD_MASTINT_EN__VCPU_EN_MASK);
++
++	/* clear the busy bit of VCN_STATUS */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
++		 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
++
++	ring = &adev->vcn.inst[i].ring_enc[0];
++
++	WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
++		     ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
++		     VCN_RB1_DB_CTRL__EN_MASK);
++
++	/* Read DB_CTRL to flush the write DB_CTRL command. */
++	RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
++
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
++
++	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
++	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
++	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
++	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
++
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
++	WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
++	ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
++
++	tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
++	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
++	WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
++	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
++
++	/* Keeping one read-back to ensure all register writes are done,
++	 * otherwise it may introduce race conditions.
++	 */
++	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
++
++	return 0;
++}
++
++/**
++ * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
++ *
++ * @vinst: VCN instance
++ *
++ * Stop VCN block with dpg mode
++ */
++static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
++{
++	struct amdgpu_device *adev = vinst->adev;
++	int inst_idx = vinst->inst;
++	uint32_t tmp;
++	int vcn_inst;
++	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
++
++	vcn_inst = GET_INST(VCN, inst_idx);
++
++	/* Unpause dpg */
++	vcn_v5_0_1_pause_dpg_mode(vinst, &state);
++
++	/* Wait for power status to be 1 */
++	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
++		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
++
++	/* wait for read ptr to be equal to write ptr */
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
++	SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
++
++	/* disable dynamic power gating mode */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
++		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
++
++	/* Keeping one read-back to ensure all register writes are done,
++	 * otherwise it may introduce race conditions.
++	 */
++	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
++}
++
++/**
++ * vcn_v5_0_1_stop - VCN stop
++ *
++ * @vinst: VCN instance
++ *
++ * Stop VCN block
++ */
++static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
++{
++	struct amdgpu_device *adev = vinst->adev;
++	int i = vinst->inst;
++	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
++	uint32_t tmp;
++	int r = 0, vcn_inst;
++
++	vcn_inst = GET_INST(VCN, i);
++
++	fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
++	fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
++
++	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
++		vcn_v5_0_1_stop_dpg_mode(vinst);
++		return 0;
++	}
++
++	/* wait for vcn idle */
++	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
++	if (r)
++		return r;
++
++	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
++		UVD_LMI_STATUS__READ_CLEAN_MASK |
++		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
++		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
++	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
++	if (r)
++		return r;
++
++	/* disable LMI UMC channel */
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
++	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
++	WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
++	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
++		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
++	r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
++	if (r)
++		return r;
++
++	/* block VCPU register access */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
++		 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
++		 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
++
++	/* reset VCPU */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
++		 UVD_VCPU_CNTL__BLK_RST_MASK,
++		 ~UVD_VCPU_CNTL__BLK_RST_MASK);
++
++	/* disable VCPU clock */
++	WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
++		 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
++
++	/* apply soft reset */
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
++	tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
++	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
++	tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
++	tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
++	WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
++
++	/* clear status */
++	WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
++
++	/* Keeping one read-back to ensure all register writes are done,
++	 * otherwise it may introduce race conditions.
++	 */
++	RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
++
++	return 0;
++}
++
++/**
++ * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer
++ *
++ * @ring: amdgpu_ring pointer
++ *
++ * Returns the current hardware unified read pointer
++ */
++static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
++		DRM_ERROR("wrong ring id is identified in %s", __func__);
++
++	return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
++}
++
++/**
++ * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer
++ *
++ * @ring: amdgpu_ring pointer
++ *
++ * Returns the current hardware unified write pointer
++ */
++static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
++		DRM_ERROR("wrong ring id is identified in %s", __func__);
++
++	if (ring->use_doorbell)
++		return *ring->wptr_cpu_addr;
++	else
++		return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR);
++}
++
++/**
++ * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer
++ *
++ * @ring: amdgpu_ring pointer
++ *
++ * Commits the enc write pointer to the hardware
++ */
++static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
++		DRM_ERROR("wrong ring id is identified in %s", __func__);
++
++	if (ring->use_doorbell) {
++		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
++		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
++	} else {
++		WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
++				lower_32_bits(ring->wptr));
++	}
++}
++
++static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
++	.type = AMDGPU_RING_TYPE_VCN_ENC,
++	.align_mask = 0x3f,
++	.nop = VCN_ENC_CMD_NO_OP,
++	.get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
++	.get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
++	.set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
++	.emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
++			   SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
++			   4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
++			   5 +
++			   5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
++			   1, /* vcn_v2_0_enc_ring_insert_end */
++	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
++	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
++	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
++	.emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
++	.emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
++	.test_ring = amdgpu_vcn_enc_ring_test_ring,
++	.test_ib = amdgpu_vcn_unified_ring_test_ib,
++	.insert_nop = amdgpu_ring_insert_nop,
++	.insert_end = vcn_v2_0_enc_ring_insert_end,
++	.pad_ib = amdgpu_ring_generic_pad_ib,
++	.begin_use = amdgpu_vcn_ring_begin_use,
++	.end_use = amdgpu_vcn_ring_end_use,
++	.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
++	.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
++	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
++};
++
++/**
++ * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Set unified ring functions
++ */
++static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
++{
++	int i, vcn_inst;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
++		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs;
++		adev->vcn.inst[i].ring_enc[0].me = i;
++		vcn_inst = GET_INST(VCN, i);
++		adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid;
++	}
++}
++
++/**
++ * vcn_v5_0_1_is_idle - check VCN block is idle
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block structure
++ *
++ * Check whether VCN block is idle
++ */
++static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	int i, ret = 1;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
++		ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE);
++
++	return ret;
++}
++
++/**
++ * vcn_v5_0_1_wait_for_idle - wait for VCN block idle
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ *
++ * Wait for VCN block idle
++ */
++static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	int i, ret = 0;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
++		ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE,
++			UVD_STATUS__IDLE);
++		if (ret)
++			return ret;
++	}
++
++	return ret;
++}
++
++/**
++ * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state
++ *
++ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
++ * @state: clock gating state
++ *
++ * Set VCN block clockgating state
++ */
++static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
++					    enum amd_clockgating_state state)
++{
++	struct amdgpu_device *adev = ip_block->adev;
++	bool enable = state == AMD_CG_STATE_GATE;
++	int i;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
++		struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
++
++		if (enable) {
++			if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
++				return -EBUSY;
++			vcn_v5_0_1_enable_clock_gating(vinst);
++		} else {
++			vcn_v5_0_1_disable_clock_gating(vinst);
++		}
++	}
++
++	return 0;
++}
++
++static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
++				   enum amd_powergating_state state)
++{
++	struct amdgpu_device *adev = vinst->adev;
++	int ret = 0;
++
++	/* for SRIOV, guest should not control VCN Power-gating
++	 * MMSCH FW should control Power-gating and clock-gating
++	 * guest should avoid touching CGC and PG
++	 */
++	if (amdgpu_sriov_vf(adev)) {
++		vinst->cur_state = AMD_PG_STATE_UNGATE;
++		return 0;
++	}
++
++	if (state == vinst->cur_state)
++		return 0;
++
++	if (state == AMD_PG_STATE_GATE)
++		ret = vcn_v5_0_1_stop(vinst);
++	else
++		ret = vcn_v5_0_1_start(vinst);
++
++	if (!ret)
++		vinst->cur_state = state;
++
++	return ret;
++}
++
++/**
++ * vcn_v5_0_1_process_interrupt - process VCN block interrupt
++ *
++ * @adev: amdgpu_device pointer
++ * @source: interrupt sources
++ * @entry: interrupt entry from clients and sources
++ *
++ * Process VCN block interrupt
++ */
++static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
++	struct amdgpu_iv_entry *entry)
++{
++	uint32_t i, inst;
++
++	i = node_id_to_phys_map[entry->node_id];
++
++	DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
++
++	for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
++		if (adev->vcn.inst[inst].aid_id == i)
++			break;
++	if (inst >= adev->vcn.num_vcn_inst) {
++		dev_WARN_ONCE(adev->dev, 1,
++				"Interrupt received for unknown VCN instance %d",
++				entry->node_id);
++		return 0;
++	}
++
++	switch (entry->src_id) {
++	case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
++		amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
++		break;
++	default:
++		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
++			  entry->src_id, entry->src_data[0]);
++		break;
++	}
++
++	return 0;
++}
++
++static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
++					struct amdgpu_irq_src *source,
++					unsigned int type,
++					enum amdgpu_interrupt_state state)
++{
++	return 0;
++}
++
++static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
++	.process = vcn_v5_0_1_process_interrupt,
++};
++
++static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = {
++	.set = vcn_v5_0_1_set_ras_interrupt_state,
++	.process = amdgpu_vcn_process_poison_irq,
++};
++
++
++/**
++ * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Set VCN block interrupt irq functions
++ */
++static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
++{
++	int i;
++
++	for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
++		adev->vcn.inst->irq.num_types++;
++
++	adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
++
++	adev->vcn.inst->ras_poison_irq.num_types = 1;
++	adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs;
++
++}
++
++static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
++	.name = "vcn_v5_0_1",
++	.early_init = vcn_v5_0_1_early_init,
++	.late_init = NULL,
++	.sw_init = vcn_v5_0_1_sw_init,
++	.sw_fini = vcn_v5_0_1_sw_fini,
++	.hw_init = vcn_v5_0_1_hw_init,
++	.hw_fini = vcn_v5_0_1_hw_fini,
++	.suspend = vcn_v5_0_1_suspend,
++	.resume = vcn_v5_0_1_resume,
++	.is_idle = vcn_v5_0_1_is_idle,
++	.wait_for_idle = vcn_v5_0_1_wait_for_idle,
++	.check_soft_reset = NULL,
++	.pre_soft_reset = NULL,
++	.soft_reset = NULL,
++	.post_soft_reset = NULL,
++	.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
++	.set_powergating_state = vcn_set_powergating_state,
++	.dump_ip_state = vcn_v5_0_0_dump_ip_state,
++	.print_ip_state = vcn_v5_0_0_print_ip_state,
++};
++
++const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
++	.type = AMD_IP_BLOCK_TYPE_VCN,
++	.major = 5,
++	.minor = 0,
++	.rev = 1,
++	.funcs = &vcn_v5_0_1_ip_funcs,
++};
++
++static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
++			uint32_t instance, uint32_t sub_block)
++{
++	uint32_t poison_stat = 0, reg_value = 0;
++
++	switch (sub_block) {
++	case AMDGPU_VCN_V5_0_1_VCPU_VCODEC:
++		reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
++		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
++		break;
++	default:
++		break;
++	}
++
++	if (poison_stat)
++		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
++			instance, sub_block);
++
++	return poison_stat;
++}
++
++static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev)
++{
++	uint32_t inst, sub;
++	uint32_t poison_stat = 0;
++
++	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
++		for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++)
++			poison_stat +=
++			vcn_v5_0_1_query_poison_by_instance(adev, inst, sub);
++
++	return !!poison_stat;
++}
++
++static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = {
++	.query_poison_status = vcn_v5_0_1_query_poison_status,
++};
++
++static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
++				      enum aca_smu_type type, void *data)
++{
++	struct aca_bank_info info;
++	u64 misc0;
++	int ret;
++
++	ret = aca_bank_info_decode(bank, &info);
++	if (ret)
++		return ret;
++
++	misc0 = bank->regs[ACA_REG_IDX_MISC0];
++	switch (type) {
++	case ACA_SMU_TYPE_UE:
++		bank->aca_err_type = ACA_ERROR_TYPE_UE;
++		ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
++						     1ULL);
++		break;
++	case ACA_SMU_TYPE_CE:
++		bank->aca_err_type = ACA_ERROR_TYPE_CE;
++		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
++						     ACA_REG__MISC0__ERRCNT(misc0));
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	return ret;
++}
++
++/* reference to smu driver if header file */
++static int vcn_v5_0_1_err_codes[] = {
++	14, 15, /* VCN */
++};
++
++static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
++					 enum aca_smu_type type, void *data)
++{
++	u32 instlo;
++
++	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
++	instlo &= GENMASK(31, 1);
++
++	if (instlo != mmSMNAID_AID0_MCA_SMU)
++		return false;
++
++	if (aca_bank_check_error_codes(handle->adev, bank,
++				       vcn_v5_0_1_err_codes,
++				       ARRAY_SIZE(vcn_v5_0_1_err_codes)))
++		return false;
++
++	return true;
++}
++
++static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = {
++	.aca_bank_parser = vcn_v5_0_1_aca_bank_parser,
++	.aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid,
++};
++
++static const struct aca_info vcn_v5_0_1_aca_info = {
++	.hwip = ACA_HWIP_TYPE_SMU,
++	.mask = ACA_ERROR_UE_MASK,
++	.bank_ops = &vcn_v5_0_1_aca_bank_ops,
++};
++
++static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
++{
++	int r;
++
++	r = amdgpu_ras_block_late_init(adev, ras_block);
++	if (r)
++		return r;
++
++	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
++				&vcn_v5_0_1_aca_info, NULL);
++	if (r)
++		goto late_fini;
++
++	return 0;
++
++late_fini:
++	amdgpu_ras_block_late_fini(adev, ras_block);
++
++	return r;
++}
++
++static struct amdgpu_vcn_ras vcn_v5_0_1_ras = {
++	.ras_block = {
++		.hw_ops = &vcn_v5_0_1_ras_hw_ops,
++		.ras_late_init = vcn_v5_0_1_ras_late_init,
++	},
++};
++
++static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
++{
++	adev->vcn.ras = &vcn_v5_0_1_ras;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index ca446e08f6a270..21aff7fa6375d8 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -1019,8 +1019,22 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
+ 		if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
+ 			update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
+ 
+-		if (pipe_ctx->stream_res.dsc)
++		if (pipe_ctx->stream_res.dsc) {
+ 			update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
++			if (dc->caps.sequential_ono) {
++				update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
++				update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
++
++				/* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
++				if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
++				    pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
++					for (j = 0; j < dc->res_pool->pipe_count; ++j) {
++						update_state->pg_pipe_res_update[PG_HUBP][j] = false;
++						update_state->pg_pipe_res_update[PG_DPP][j] = false;
++					}
++				}
++			}
++		}
+ 
+ 		if (pipe_ctx->stream_res.opp)
+ 			update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
+@@ -1165,6 +1179,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
+ 		update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
+ 
+ 	if (dc->caps.sequential_ono) {
++		for (i = 0; i < dc->res_pool->pipe_count; i++) {
++			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
++
++			if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
++			    update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
++				update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
++				update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
++
++				/* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
++				if (new_pipe->plane_res.hubp &&
++				    new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
++					for (j = 0; j < dc->res_pool->pipe_count; ++j) {
++						update_state->pg_pipe_res_update[PG_HUBP][j] = true;
++						update_state->pg_pipe_res_update[PG_DPP][j] = true;
++					}
++				}
++			}
++		}
++
+ 		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ 			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ 			    update_state->pg_pipe_res_update[PG_DPP][i]) {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 4f78c84da780c7..c5bca3019de070 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -58,6 +58,7 @@
+ 
+ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
+ MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
++MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
+ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
+ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
+ 
+@@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
+ int smu_v13_0_init_microcode(struct smu_context *smu)
+ {
+ 	struct amdgpu_device *adev = smu->adev;
+-	char ucode_prefix[15];
++	char ucode_prefix[30];
+ 	int err = 0;
+ 	const struct smc_firmware_header_v1_0 *hdr;
+ 	const struct common_firmware_header *header;
+@@ -103,7 +104,10 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
+ 		return 0;
+ 
+ 	amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
+-	err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
++	if (amdgpu_is_kicker_fw(adev))
++		err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s_kicker.bin", ucode_prefix);
++	else
++		err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ 	if (err)
+ 		goto out;
+ 
+diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+index 6886db2d9e00c4..8e889a38fad005 100644
+--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
++++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+@@ -64,10 +64,11 @@ struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, str
+ 	adev->id = ret;
+ 	adev->name = "dp_hpd_bridge";
+ 	adev->dev.parent = parent;
+-	adev->dev.of_node = of_node_get(parent->of_node);
+ 	adev->dev.release = drm_aux_hpd_bridge_release;
+ 	adev->dev.platform_data = of_node_get(np);
+ 
++	device_set_of_node_from_dev(&adev->dev, parent);
++
+ 	ret = auxiliary_device_init(adev);
+ 	if (ret) {
+ 		of_node_put(adev->dev.platform_data);
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+index f57df8c4813911..05e4a5a63f5d80 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+@@ -187,6 +187,7 @@ struct fimd_context {
+ 	u32				i80ifcon;
+ 	bool				i80_if;
+ 	bool				suspended;
++	bool				dp_clk_enabled;
+ 	wait_queue_head_t		wait_vsync_queue;
+ 	atomic_t			wait_vsync_event;
+ 	atomic_t			win_updated;
+@@ -1047,7 +1048,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
+ 	struct fimd_context *ctx = container_of(clk, struct fimd_context,
+ 						dp_clk);
+ 	u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
++
++	if (enable == ctx->dp_clk_enabled)
++		return;
++
++	if (enable)
++		pm_runtime_resume_and_get(ctx->dev);
++
++	ctx->dp_clk_enabled = enable;
+ 	writel(val, ctx->regs + DP_MIE_CLKCON);
++
++	if (!enable)
++		pm_runtime_put(ctx->dev);
+ }
+ 
+ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 45cca965c11b48..ca9e0c730013d7 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -4300,6 +4300,24 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
+ static bool
+ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
+ {
++	struct intel_display *display = to_intel_display(intel_dp);
++	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
++
++	/*
++	 * Display WA for HSD #13013007775: mtl/arl/lnl
++	 * Read the sink count and link service IRQ registers in separate
++	 * transactions to prevent disconnecting the sink on a TBT link
++	 * inadvertently.
++	 */
++	if (IS_DISPLAY_VER(display, 14, 20) && !IS_BATTLEMAGE(i915)) {
++		if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 3) != 3)
++			return false;
++
++		/* DP_SINK_COUNT_ESI + 3 == DP_LINK_SERVICE_IRQ_VECTOR_ESI0 */
++		return drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
++					 &esi[3]) == 1;
++	}
++
+ 	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
+index 1e925c75fb080d..c43febc862dc3d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
++++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
+@@ -284,7 +284,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
+ 	if (gt->gsc.intf[intf_id].irq < 0)
+ 		return;
+ 
+-	ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
++	ret = generic_handle_irq_safe(gt->gsc.intf[intf_id].irq);
+ 	if (ret)
+ 		gt_err_ratelimited(gt, "error handling GSC irq: %d\n", ret);
+ }
+diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+index 72277bc8322e81..f84fa09cdb3394 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+@@ -575,7 +575,6 @@ static int ring_context_alloc(struct intel_context *ce)
+ 	/* One ringbuffer to rule them all */
+ 	GEM_BUG_ON(!engine->legacy.ring);
+ 	ce->ring = engine->legacy.ring;
+-	ce->timeline = intel_timeline_get(engine->legacy.timeline);
+ 
+ 	GEM_BUG_ON(ce->state);
+ 	if (engine->context_size) {
+@@ -588,6 +587,8 @@ static int ring_context_alloc(struct intel_context *ce)
+ 		ce->state = vma;
+ 	}
+ 
++	ce->timeline = intel_timeline_get(engine->legacy.timeline);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
+index acae30a04a947c..0122719ee9218e 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_request.c
++++ b/drivers/gpu/drm/i915/selftests/i915_request.c
+@@ -73,8 +73,8 @@ static int igt_add_request(void *arg)
+ 	/* Basic preliminary test to create a request and let it loose! */
+ 
+ 	request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
+-	if (!request)
+-		return -ENOMEM;
++	if (IS_ERR(request))
++		return PTR_ERR(request);
+ 
+ 	i915_request_add(request);
+ 
+@@ -91,8 +91,8 @@ static int igt_wait_request(void *arg)
+ 	/* Submit a request, then wait upon it */
+ 
+ 	request = mock_request(rcs0(i915)->kernel_context, T);
+-	if (!request)
+-		return -ENOMEM;
++	if (IS_ERR(request))
++		return PTR_ERR(request);
+ 
+ 	i915_request_get(request);
+ 
+@@ -160,8 +160,8 @@ static int igt_fence_wait(void *arg)
+ 	/* Submit a request, treat it as a fence and wait upon it */
+ 
+ 	request = mock_request(rcs0(i915)->kernel_context, T);
+-	if (!request)
+-		return -ENOMEM;
++	if (IS_ERR(request))
++		return PTR_ERR(request);
+ 
+ 	if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
+ 		pr_err("fence wait success before submit (expected timeout)!\n");
+@@ -219,8 +219,8 @@ static int igt_request_rewind(void *arg)
+ 	GEM_BUG_ON(IS_ERR(ce));
+ 	request = mock_request(ce, 2 * HZ);
+ 	intel_context_put(ce);
+-	if (!request) {
+-		err = -ENOMEM;
++	if (IS_ERR(request)) {
++		err = PTR_ERR(request);
+ 		goto err_context_0;
+ 	}
+ 
+@@ -237,8 +237,8 @@ static int igt_request_rewind(void *arg)
+ 	GEM_BUG_ON(IS_ERR(ce));
+ 	vip = mock_request(ce, 0);
+ 	intel_context_put(ce);
+-	if (!vip) {
+-		err = -ENOMEM;
++	if (IS_ERR(vip)) {
++		err = PTR_ERR(vip);
+ 		goto err_context_1;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
+index 09f747228dff57..1b0cf073e9643f 100644
+--- a/drivers/gpu/drm/i915/selftests/mock_request.c
++++ b/drivers/gpu/drm/i915/selftests/mock_request.c
+@@ -35,7 +35,7 @@ mock_request(struct intel_context *ce, unsigned long delay)
+ 	/* NB the i915->requests slab cache is enlarged to fit mock_request */
+ 	request = intel_context_create_request(ce);
+ 	if (IS_ERR(request))
+-		return NULL;
++		return request;
+ 
+ 	request->mock.delay = delay;
+ 	return request;
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index f775638d239a5c..4b3a8ee8e278f0 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref)
+ 			container_of(kref, struct msm_gem_submit, ref);
+ 	unsigned i;
+ 
++	/*
++	 * In error paths, we could unref the submit without calling
++	 * drm_sched_entity_push_job(), so msm_job_free() will never
++	 * get called.  Since drm_sched_job_cleanup() will NULL out
++	 * s_fence, we can use that to detect this case.
++	 */
++	if (submit->base.s_fence)
++		drm_sched_job_cleanup(&submit->base);
++
+ 	if (submit->fence_id) {
+ 		spin_lock(&submit->queue->idr_lock);
+ 		idr_remove(&submit->queue->fence_idr, submit->fence_id);
+@@ -658,6 +667,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 	struct msm_ringbuffer *ring;
+ 	struct msm_submit_post_dep *post_deps = NULL;
+ 	struct drm_syncobj **syncobjs_to_reset = NULL;
++	struct sync_file *sync_file = NULL;
+ 	int out_fence_fd = -1;
+ 	unsigned i;
+ 	int ret;
+@@ -868,7 +878,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 	}
+ 
+ 	if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+-		struct sync_file *sync_file = sync_file_create(submit->user_fence);
++		sync_file = sync_file_create(submit->user_fence);
+ 		if (!sync_file) {
+ 			ret = -ENOMEM;
+ 		} else {
+@@ -902,8 +912,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ out_unlock:
+ 	mutex_unlock(&queue->lock);
+ out_post_unlock:
+-	if (ret && (out_fence_fd >= 0))
++	if (ret && (out_fence_fd >= 0)) {
+ 		put_unused_fd(out_fence_fd);
++		if (sync_file)
++			fput(sync_file->file);
++	}
+ 
+ 	if (!IS_ERR_OR_NULL(submit)) {
+ 		msm_gem_submit_put(submit);
+diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
+index d19e102894282f..07abaf27315f78 100644
+--- a/drivers/gpu/drm/tiny/simpledrm.c
++++ b/drivers/gpu/drm/tiny/simpledrm.c
+@@ -284,7 +284,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
+ 
+ static void simpledrm_device_release_clocks(void *res)
+ {
+-	struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
++	struct simpledrm_device *sdev = res;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < sdev->clk_count; ++i) {
+@@ -382,7 +382,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
+ 
+ static void simpledrm_device_release_regulators(void *res)
+ {
+-	struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
++	struct simpledrm_device *sdev = res;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < sdev->regulator_count; ++i) {
+diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
+index 75b4725d49c7e1..d4b0549205c29e 100644
+--- a/drivers/gpu/drm/v3d/v3d_drv.h
++++ b/drivers/gpu/drm/v3d/v3d_drv.h
+@@ -95,6 +95,12 @@ struct v3d_perfmon {
+ 	u64 values[] __counted_by(ncounters);
+ };
+ 
++enum v3d_irq {
++	V3D_CORE_IRQ,
++	V3D_HUB_IRQ,
++	V3D_MAX_IRQS,
++};
++
+ struct v3d_dev {
+ 	struct drm_device drm;
+ 
+@@ -106,6 +112,8 @@ struct v3d_dev {
+ 
+ 	bool single_irq_line;
+ 
++	int irq[V3D_MAX_IRQS];
++
+ 	struct v3d_perfmon_info perfmon_info;
+ 
+ 	void __iomem *hub_regs;
+diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
+index da8faf3b901165..6b6ba7a68fcb40 100644
+--- a/drivers/gpu/drm/v3d/v3d_gem.c
++++ b/drivers/gpu/drm/v3d/v3d_gem.c
+@@ -118,6 +118,8 @@ v3d_reset(struct v3d_dev *v3d)
+ 	if (false)
+ 		v3d_idle_axi(v3d, 0);
+ 
++	v3d_irq_disable(v3d);
++
+ 	v3d_idle_gca(v3d);
+ 	v3d_reset_v3d(v3d);
+ 
+diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
+index 72b6a119412fa7..b98e1a4b33c71c 100644
+--- a/drivers/gpu/drm/v3d/v3d_irq.c
++++ b/drivers/gpu/drm/v3d/v3d_irq.c
+@@ -228,7 +228,7 @@ v3d_hub_irq(int irq, void *arg)
+ int
+ v3d_irq_init(struct v3d_dev *v3d)
+ {
+-	int irq1, ret, core;
++	int irq, ret, core;
+ 
+ 	INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
+ 
+@@ -239,17 +239,24 @@ v3d_irq_init(struct v3d_dev *v3d)
+ 		V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
+ 	V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver));
+ 
+-	irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
+-	if (irq1 == -EPROBE_DEFER)
+-		return irq1;
+-	if (irq1 > 0) {
+-		ret = devm_request_irq(v3d->drm.dev, irq1,
++	irq = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
++	if (irq == -EPROBE_DEFER)
++		return irq;
++	if (irq > 0) {
++		v3d->irq[V3D_CORE_IRQ] = irq;
++
++		ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
+ 				       v3d_irq, IRQF_SHARED,
+ 				       "v3d_core0", v3d);
+ 		if (ret)
+ 			goto fail;
+-		ret = devm_request_irq(v3d->drm.dev,
+-				       platform_get_irq(v3d_to_pdev(v3d), 0),
++
++		irq = platform_get_irq(v3d_to_pdev(v3d), 0);
++		if (irq < 0)
++			return irq;
++		v3d->irq[V3D_HUB_IRQ] = irq;
++
++		ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ],
+ 				       v3d_hub_irq, IRQF_SHARED,
+ 				       "v3d_hub", v3d);
+ 		if (ret)
+@@ -257,8 +264,12 @@ v3d_irq_init(struct v3d_dev *v3d)
+ 	} else {
+ 		v3d->single_irq_line = true;
+ 
+-		ret = devm_request_irq(v3d->drm.dev,
+-				       platform_get_irq(v3d_to_pdev(v3d), 0),
++		irq = platform_get_irq(v3d_to_pdev(v3d), 0);
++		if (irq < 0)
++			return irq;
++		v3d->irq[V3D_CORE_IRQ] = irq;
++
++		ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
+ 				       v3d_irq, IRQF_SHARED,
+ 				       "v3d", v3d);
+ 		if (ret)
+@@ -299,6 +310,12 @@ v3d_irq_disable(struct v3d_dev *v3d)
+ 		V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
+ 	V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
+ 
++	/* Finish any interrupt handler still in flight. */
++	for (int i = 0; i < V3D_MAX_IRQS; i++) {
++		if (v3d->irq[i])
++			synchronize_irq(v3d->irq[i]);
++	}
++
+ 	/* Clear any pending interrupts we might have left. */
+ 	for (core = 0; core < v3d->cores; core++)
+ 		V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
+diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
+index 7bbe46a98ff1f4..93e742c1f21e74 100644
+--- a/drivers/gpu/drm/xe/Kconfig
++++ b/drivers/gpu/drm/xe/Kconfig
+@@ -1,7 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config DRM_XE
+ 	tristate "Intel Xe Graphics"
+-	depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
++	depends on DRM && PCI && MMU
++	depends on KUNIT || !KUNIT
+ 	select INTERVAL_TREE
+ 	# we need shmfs for the swappable backing store, and in particular
+ 	# the shmem_readpage() which depends upon tmpfs
+diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
+index 8f86a16dc5777c..f58198cf2cf639 100644
+--- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
++++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
+@@ -52,6 +52,7 @@ struct guc_ct_buffer_desc {
+ #define GUC_CTB_STATUS_OVERFLOW				(1 << 0)
+ #define GUC_CTB_STATUS_UNDERFLOW			(1 << 1)
+ #define GUC_CTB_STATUS_MISMATCH				(1 << 2)
++#define GUC_CTB_STATUS_DISABLED				(1 << 3)
+ 	u32 reserved[13];
+ } __packed;
+ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
+diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+index cb6c7598824be3..9c4cf050059ac2 100644
+--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
++++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+@@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
+ 
+ 	bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
+ 				       NULL, size, start, end,
+-				       ttm_bo_type_kernel, flags);
++				       ttm_bo_type_kernel, flags, 0);
+ 	if (IS_ERR(bo)) {
+ 		err = PTR_ERR(bo);
+ 		bo = NULL;
+diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+index f99d901a3214f9..9f941fc2e36bb2 100644
+--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
++++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+@@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+ 
+ void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+ {
+-	struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+-
+ 	iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
+-	xe_device_l2_flush(xe);
+ }
+ 
+ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+@@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+ 
+ void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+ {
+-	struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+-
+ 	WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
+ 
+ 	iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
+-	xe_device_l2_flush(xe);
+ }
+ 
+ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
+@@ -48,11 +42,12 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
+ 	if (!vma)
+ 		return false;
+ 
++	/* Set scanout flag for WC mapping */
+ 	obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
+ 				   NULL, PAGE_ALIGN(size),
+ 				   ttm_bo_type_kernel,
+ 				   XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+-				   XE_BO_FLAG_GGTT);
++				   XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT);
+ 	if (IS_ERR(obj)) {
+ 		kfree(vma);
+ 		return false;
+@@ -73,5 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+ 
+ void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+ {
+-	/* TODO: add xe specific flush_map() for dsb buffer object. */
++	struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
++
++	/*
++	 * The memory barrier here is to ensure coherency of DSB vs MMIO,
++	 * both for weak ordering archs and discrete cards.
++	 */
++	xe_device_wmb(xe);
++	xe_device_l2_flush(xe);
+ }
+diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
+index b58fc4ba2aacb5..0558b106f8b602 100644
+--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
++++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
+@@ -153,7 +153,10 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
+ 	}
+ 
+ 	vma->dpt = dpt;
+-	vma->node = dpt->ggtt_node;
++	vma->node = dpt->ggtt_node[tile0->id];
++
++	/* Ensure DPT writes are flushed */
++	xe_device_l2_flush(xe);
+ 	return 0;
+ }
+ 
+@@ -203,8 +206,8 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
+ 	if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
+ 		align = max_t(u32, align, SZ_64K);
+ 
+-	if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) {
+-		vma->node = bo->ggtt_node;
++	if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
++		vma->node = bo->ggtt_node[ggtt->tile->id];
+ 	} else if (view->type == I915_GTT_VIEW_NORMAL) {
+ 		u32 x, size = bo->ttm.base.size;
+ 
+@@ -318,8 +321,6 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
+ 	if (ret)
+ 		goto err_unpin;
+ 
+-	/* Ensure DPT writes are flushed */
+-	xe_device_l2_flush(xe);
+ 	return vma;
+ 
+ err_unpin:
+@@ -333,10 +334,12 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
+ 
+ static void __xe_unpin_fb_vma(struct i915_vma *vma)
+ {
++	u8 tile_id = vma->node->ggtt->tile->id;
++
+ 	if (vma->dpt)
+ 		xe_bo_unpin_map_no_vm(vma->dpt);
+-	else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) ||
+-		 vma->bo->ggtt_node->base.start != vma->node->base.start)
++	else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) ||
++		 vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start)
+ 		xe_ggtt_node_remove(vma->node, false);
+ 
+ 	ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
+diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+index 23f7dc5bbe995b..51fd40ffafcb93 100644
+--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
++++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+@@ -128,7 +128,7 @@ struct xe_reg_mcr {
+  *       options.
+  */
+ #define XE_REG_MCR(r_, ...)	((const struct xe_reg_mcr){					\
+-				 .__reg = XE_REG_INITIALIZER(r_,  ##__VA_ARGS__, .mcr = 1)	\
++				 .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1)	\
+ 				 })
+ 
+ static inline bool xe_reg_is_valid(struct xe_reg r)
+diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
+index 8acc4640f0a285..5f745d9ed6cc25 100644
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -1130,6 +1130,8 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
+ {
+ 	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ 	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
++	struct xe_tile *tile;
++	u8 id;
+ 
+ 	if (bo->ttm.base.import_attach)
+ 		drm_prime_gem_destroy(&bo->ttm.base, NULL);
+@@ -1137,8 +1139,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
+ 
+ 	xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
+ 
+-	if (bo->ggtt_node && bo->ggtt_node->base.size)
+-		xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
++	for_each_tile(tile, xe, id)
++		if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size)
++			xe_ggtt_remove_bo(tile->mem.ggtt, bo);
+ 
+ #ifdef CONFIG_PROC_FS
+ 	if (bo->client)
+@@ -1308,6 +1311,10 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	/* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
++	if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
++		return ERR_PTR(-EINVAL);
++
+ 	if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
+ 	    !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
+ 	    ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
+@@ -1454,7 +1461,8 @@ static struct xe_bo *
+ __xe_bo_create_locked(struct xe_device *xe,
+ 		      struct xe_tile *tile, struct xe_vm *vm,
+ 		      size_t size, u64 start, u64 end,
+-		      u16 cpu_caching, enum ttm_bo_type type, u32 flags)
++		      u16 cpu_caching, enum ttm_bo_type type, u32 flags,
++		      u64 alignment)
+ {
+ 	struct xe_bo *bo = NULL;
+ 	int err;
+@@ -1483,6 +1491,8 @@ __xe_bo_create_locked(struct xe_device *xe,
+ 	if (IS_ERR(bo))
+ 		return bo;
+ 
++	bo->min_align = alignment;
++
+ 	/*
+ 	 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
+ 	 * to ensure the shared resv doesn't disappear under the bo, the bo
+@@ -1495,19 +1505,29 @@ __xe_bo_create_locked(struct xe_device *xe,
+ 	bo->vm = vm;
+ 
+ 	if (bo->flags & XE_BO_FLAG_GGTT) {
+-		if (!tile && flags & XE_BO_FLAG_STOLEN)
+-			tile = xe_device_get_root_tile(xe);
++		struct xe_tile *t;
++		u8 id;
+ 
+-		xe_assert(xe, tile);
++		if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
++			if (!tile && flags & XE_BO_FLAG_STOLEN)
++				tile = xe_device_get_root_tile(xe);
+ 
+-		if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
+-			err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
+-						   start + bo->size, U64_MAX);
+-		} else {
+-			err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
++			xe_assert(xe, tile);
++		}
++
++		for_each_tile(t, xe, id) {
++			if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
++				continue;
++
++			if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
++				err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
++							   start + bo->size, U64_MAX);
++			} else {
++				err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
++			}
++			if (err)
++				goto err_unlock_put_bo;
+ 		}
+-		if (err)
+-			goto err_unlock_put_bo;
+ 	}
+ 
+ 	return bo;
+@@ -1523,16 +1543,18 @@ struct xe_bo *
+ xe_bo_create_locked_range(struct xe_device *xe,
+ 			  struct xe_tile *tile, struct xe_vm *vm,
+ 			  size_t size, u64 start, u64 end,
+-			  enum ttm_bo_type type, u32 flags)
++			  enum ttm_bo_type type, u32 flags, u64 alignment)
+ {
+-	return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
++	return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
++				     flags, alignment);
+ }
+ 
+ struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
+ 				  struct xe_vm *vm, size_t size,
+ 				  enum ttm_bo_type type, u32 flags)
+ {
+-	return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
++	return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
++				     flags, 0);
+ }
+ 
+ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
+@@ -1542,7 +1564,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
+ {
+ 	struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
+ 						 cpu_caching, ttm_bo_type_device,
+-						 flags | XE_BO_FLAG_USER);
++						 flags | XE_BO_FLAG_USER, 0);
+ 	if (!IS_ERR(bo))
+ 		xe_bo_unlock_vm_held(bo);
+ 
+@@ -1565,6 +1587,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
+ 				      struct xe_vm *vm,
+ 				      size_t size, u64 offset,
+ 				      enum ttm_bo_type type, u32 flags)
++{
++	return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
++					       type, flags, 0);
++}
++
++struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
++					      struct xe_tile *tile,
++					      struct xe_vm *vm,
++					      size_t size, u64 offset,
++					      enum ttm_bo_type type, u32 flags,
++					      u64 alignment)
+ {
+ 	struct xe_bo *bo;
+ 	int err;
+@@ -1576,7 +1609,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
+ 		flags |= XE_BO_FLAG_GGTT;
+ 
+ 	bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
+-				       flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
++				       flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
++				       alignment);
+ 	if (IS_ERR(bo))
+ 		return bo;
+ 
+@@ -2355,14 +2389,18 @@ void xe_bo_put_commit(struct llist_head *deferred)
+ 
+ void xe_bo_put(struct xe_bo *bo)
+ {
++	struct xe_tile *tile;
++	u8 id;
++
+ 	might_sleep();
+ 	if (bo) {
+ #ifdef CONFIG_PROC_FS
+ 		if (bo->client)
+ 			might_lock(&bo->client->bos_lock);
+ #endif
+-		if (bo->ggtt_node && bo->ggtt_node->ggtt)
+-			might_lock(&bo->ggtt_node->ggtt->lock);
++		for_each_tile(tile, xe_bo_device(bo), id)
++			if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
++				might_lock(&bo->ggtt_node[id]->ggtt->lock);
+ 		drm_gem_object_put(&bo->ttm.base);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
+index d22269a230aa19..d04159c598465a 100644
+--- a/drivers/gpu/drm/xe/xe_bo.h
++++ b/drivers/gpu/drm/xe/xe_bo.h
+@@ -39,10 +39,22 @@
+ #define XE_BO_FLAG_NEEDS_64K		BIT(15)
+ #define XE_BO_FLAG_NEEDS_2M		BIT(16)
+ #define XE_BO_FLAG_GGTT_INVALIDATE	BIT(17)
++#define XE_BO_FLAG_GGTT0                BIT(18)
++#define XE_BO_FLAG_GGTT1                BIT(19)
++#define XE_BO_FLAG_GGTT2                BIT(20)
++#define XE_BO_FLAG_GGTT3                BIT(21)
++#define XE_BO_FLAG_GGTT_ALL             (XE_BO_FLAG_GGTT0 | \
++					 XE_BO_FLAG_GGTT1 | \
++					 XE_BO_FLAG_GGTT2 | \
++					 XE_BO_FLAG_GGTT3)
++
+ /* this one is trigger internally only */
+ #define XE_BO_FLAG_INTERNAL_TEST	BIT(30)
+ #define XE_BO_FLAG_INTERNAL_64K		BIT(31)
+ 
++#define XE_BO_FLAG_GGTTx(tile) \
++	(XE_BO_FLAG_GGTT0 << (tile)->id)
++
+ #define XE_PTE_SHIFT			12
+ #define XE_PAGE_SIZE			(1 << XE_PTE_SHIFT)
+ #define XE_PTE_MASK			(XE_PAGE_SIZE - 1)
+@@ -77,7 +89,7 @@ struct xe_bo *
+ xe_bo_create_locked_range(struct xe_device *xe,
+ 			  struct xe_tile *tile, struct xe_vm *vm,
+ 			  size_t size, u64 start, u64 end,
+-			  enum ttm_bo_type type, u32 flags);
++			  enum ttm_bo_type type, u32 flags, u64 alignment);
+ struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
+ 				  struct xe_vm *vm, size_t size,
+ 				  enum ttm_bo_type type, u32 flags);
+@@ -94,6 +106,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
+ 				      struct xe_vm *vm, size_t size, u64 offset,
+ 				      enum ttm_bo_type type, u32 flags);
++struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
++					      struct xe_tile *tile,
++					      struct xe_vm *vm,
++					      size_t size, u64 offset,
++					      enum ttm_bo_type type, u32 flags,
++					      u64 alignment);
+ struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
+ 				     const void *data, size_t size,
+ 				     enum ttm_bo_type type, u32 flags);
+@@ -188,14 +206,24 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
+ }
+ 
+ static inline u32
+-xe_bo_ggtt_addr(struct xe_bo *bo)
++__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
+ {
+-	if (XE_WARN_ON(!bo->ggtt_node))
++	struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
++
++	if (XE_WARN_ON(!ggtt_node))
+ 		return 0;
+ 
+-	XE_WARN_ON(bo->ggtt_node->base.size > bo->size);
+-	XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32));
+-	return bo->ggtt_node->base.start;
++	XE_WARN_ON(ggtt_node->base.size > bo->size);
++	XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
++	return ggtt_node->base.start;
++}
++
++static inline u32
++xe_bo_ggtt_addr(struct xe_bo *bo)
++{
++	xe_assert(xe_bo_device(bo), bo->tile);
++
++	return __xe_bo_ggtt_addr(bo, bo->tile->id);
+ }
+ 
+ int xe_bo_vmap(struct xe_bo *bo);
+diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
+index 8fb2be0610035b..6a40eedd9db104 100644
+--- a/drivers/gpu/drm/xe/xe_bo_evict.c
++++ b/drivers/gpu/drm/xe/xe_bo_evict.c
+@@ -152,11 +152,17 @@ int xe_bo_restore_kernel(struct xe_device *xe)
+ 		}
+ 
+ 		if (bo->flags & XE_BO_FLAG_GGTT) {
+-			struct xe_tile *tile = bo->tile;
++			struct xe_tile *tile;
++			u8 id;
+ 
+-			mutex_lock(&tile->mem.ggtt->lock);
+-			xe_ggtt_map_bo(tile->mem.ggtt, bo);
+-			mutex_unlock(&tile->mem.ggtt->lock);
++			for_each_tile(tile, xe, id) {
++				if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
++					continue;
++
++				mutex_lock(&tile->mem.ggtt->lock);
++				xe_ggtt_map_bo(tile->mem.ggtt, bo);
++				mutex_unlock(&tile->mem.ggtt->lock);
++			}
+ 		}
+ 
+ 		/*
+diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
+index 2ed558ac2264a6..aa298d33c2508f 100644
+--- a/drivers/gpu/drm/xe/xe_bo_types.h
++++ b/drivers/gpu/drm/xe/xe_bo_types.h
+@@ -13,6 +13,7 @@
+ #include <drm/ttm/ttm_execbuf_util.h>
+ #include <drm/ttm/ttm_placement.h>
+ 
++#include "xe_device_types.h"
+ #include "xe_ggtt_types.h"
+ 
+ struct xe_device;
+@@ -39,8 +40,8 @@ struct xe_bo {
+ 	struct ttm_place placements[XE_BO_MAX_PLACEMENTS];
+ 	/** @placement: current placement for this BO */
+ 	struct ttm_placement placement;
+-	/** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
+-	struct xe_ggtt_node *ggtt_node;
++	/** @ggtt_node: Array of GGTT nodes if this BO is mapped in the GGTTs */
++	struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE];
+ 	/** @vmap: iosys map of this buffer */
+ 	struct iosys_map vmap;
+ 	/** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
+@@ -76,6 +77,11 @@ struct xe_bo {
+ 
+ 	/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
+ 		struct list_head vram_userfault_link;
++
++	/** @min_align: minimum alignment needed for this BO if different
++	 * from default
++	 */
++	u64 min_align;
+ };
+ 
+ #define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index 0c3db53b93d8a9..82da51a6616a18 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -37,6 +37,7 @@
+ #include "xe_gt_printk.h"
+ #include "xe_gt_sriov_vf.h"
+ #include "xe_guc.h"
++#include "xe_guc_pc.h"
+ #include "xe_hw_engine_group.h"
+ #include "xe_hwmon.h"
+ #include "xe_irq.h"
+@@ -871,31 +872,37 @@ void xe_device_td_flush(struct xe_device *xe)
+ 	if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
+ 		return;
+ 
+-	if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) {
++	gt = xe_root_mmio_gt(xe);
++	if (XE_WA(gt, 16023588340)) {
++		/* A transient flush is not sufficient: flush the L2 */
+ 		xe_device_l2_flush(xe);
+-		return;
+-	}
+-
+-	for_each_gt(gt, xe, id) {
+-		if (xe_gt_is_media_type(gt))
+-			continue;
+-
+-		if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
+-			return;
+-
+-		xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
+-		/*
+-		 * FIXME: We can likely do better here with our choice of
+-		 * timeout. Currently we just assume the worst case, i.e. 150us,
+-		 * which is believed to be sufficient to cover the worst case
+-		 * scenario on current platforms if all cache entries are
+-		 * transient and need to be flushed..
+-		 */
+-		if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
+-				   150, NULL, false))
+-			xe_gt_err_once(gt, "TD flush timeout\n");
+-
+-		xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++	} else {
++		xe_guc_pc_apply_flush_freq_limit(&gt->uc.guc.pc);
++		
++		/* Execute TDF flush on all graphics GTs */
++		for_each_gt(gt, xe, id) {
++			if (xe_gt_is_media_type(gt))
++				continue;
++
++			if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
++				return;
++
++			xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
++			/*
++			 * FIXME: We can likely do better here with our choice of
++			 * timeout. Currently we just assume the worst case, i.e. 150us,
++			 * which is believed to be sufficient to cover the worst case
++			 * scenario on current platforms if all cache entries are
++			 * transient and need to be flushed..
++			 */
++			if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
++					   150, NULL, false))
++				xe_gt_err_once(gt, "TD flush timeout\n");
++
++			xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++		}
++		
++		xe_guc_pc_remove_flush_freq_limit(&xe_root_mmio_gt(xe)->uc.guc.pc);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
+index e9820126feb969..76e1092f51d923 100644
+--- a/drivers/gpu/drm/xe/xe_ggtt.c
++++ b/drivers/gpu/drm/xe/xe_ggtt.c
+@@ -605,10 +605,10 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+ 	u64 start;
+ 	u64 offset, pte;
+ 
+-	if (XE_WARN_ON(!bo->ggtt_node))
++	if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
+ 		return;
+ 
+-	start = bo->ggtt_node->base.start;
++	start = bo->ggtt_node[ggtt->tile->id]->base.start;
+ 
+ 	for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
+ 		pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
+@@ -619,15 +619,16 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
+ 				  u64 start, u64 end)
+ {
++	u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
++	u8 tile_id = ggtt->tile->id;
+ 	int err;
+-	u64 alignment = XE_PAGE_SIZE;
+ 
+ 	if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
+ 		alignment = SZ_64K;
+ 
+-	if (XE_WARN_ON(bo->ggtt_node)) {
++	if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
+ 		/* Someone's already inserted this BO in the GGTT */
+-		xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
++		xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
+ 		return 0;
+ 	}
+ 
+@@ -637,19 +638,19 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
+ 
+ 	xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+ 
+-	bo->ggtt_node = xe_ggtt_node_init(ggtt);
+-	if (IS_ERR(bo->ggtt_node)) {
+-		err = PTR_ERR(bo->ggtt_node);
+-		bo->ggtt_node = NULL;
++	bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
++	if (IS_ERR(bo->ggtt_node[tile_id])) {
++		err = PTR_ERR(bo->ggtt_node[tile_id]);
++		bo->ggtt_node[tile_id] = NULL;
+ 		goto out;
+ 	}
+ 
+ 	mutex_lock(&ggtt->lock);
+-	err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
+-					  alignment, 0, start, end, 0);
++	err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
++					  bo->size, alignment, 0, start, end, 0);
+ 	if (err) {
+-		xe_ggtt_node_fini(bo->ggtt_node);
+-		bo->ggtt_node = NULL;
++		xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
++		bo->ggtt_node[tile_id] = NULL;
+ 	} else {
+ 		xe_ggtt_map_bo(ggtt, bo);
+ 	}
+@@ -698,13 +699,15 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+  */
+ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+ {
+-	if (XE_WARN_ON(!bo->ggtt_node))
++	u8 tile_id = ggtt->tile->id;
++
++	if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
+ 		return;
+ 
+ 	/* This BO is not currently in the GGTT */
+-	xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
++	xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
+ 
+-	xe_ggtt_node_remove(bo->ggtt_node,
++	xe_ggtt_node_remove(bo->ggtt_node[tile_id],
+ 			    bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
+ }
+ 
+diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
+index 52df28032a6ffe..96373cdb366be5 100644
+--- a/drivers/gpu/drm/xe/xe_guc.c
++++ b/drivers/gpu/drm/xe/xe_guc.c
+@@ -985,7 +985,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
+ 		BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
+ 		BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
+ 
+-		ret = xe_mmio_wait32(gt, reply_reg,  resp_mask, resp_mask,
++		ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask,
+ 				     1000000, &header, false);
+ 
+ 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
+@@ -1175,7 +1175,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
+ 
+ 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ 
+-	xe_guc_ct_print(&guc->ct, p, false);
++	xe_guc_ct_print(&guc->ct, p);
+ 	xe_guc_submit_print(guc, p);
+ }
+ 
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
+index 1f74f6bd50f319..f1ce4e14dcb5f0 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct.c
++++ b/drivers/gpu/drm/xe/xe_guc_ct.c
+@@ -25,12 +25,53 @@
+ #include "xe_gt_sriov_pf_monitor.h"
+ #include "xe_gt_tlb_invalidation.h"
+ #include "xe_guc.h"
++#include "xe_guc_log.h"
+ #include "xe_guc_relay.h"
+ #include "xe_guc_submit.h"
+ #include "xe_map.h"
+ #include "xe_pm.h"
+ #include "xe_trace_guc.h"
+ 
++static void receive_g2h(struct xe_guc_ct *ct);
++static void g2h_worker_func(struct work_struct *w);
++static void safe_mode_worker_func(struct work_struct *w);
++static void ct_exit_safe_mode(struct xe_guc_ct *ct);
++
++#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
++enum {
++	/* Internal states, not error conditions */
++	CT_DEAD_STATE_REARM,			/* 0x0001 */
++	CT_DEAD_STATE_CAPTURE,			/* 0x0002 */
++
++	/* Error conditions */
++	CT_DEAD_SETUP,				/* 0x0004 */
++	CT_DEAD_H2G_WRITE,			/* 0x0008 */
++	CT_DEAD_H2G_HAS_ROOM,			/* 0x0010 */
++	CT_DEAD_G2H_READ,			/* 0x0020 */
++	CT_DEAD_G2H_RECV,			/* 0x0040 */
++	CT_DEAD_G2H_RELEASE,			/* 0x0080 */
++	CT_DEAD_DEADLOCK,			/* 0x0100 */
++	CT_DEAD_PROCESS_FAILED,			/* 0x0200 */
++	CT_DEAD_FAST_G2H,			/* 0x0400 */
++	CT_DEAD_PARSE_G2H_RESPONSE,		/* 0x0800 */
++	CT_DEAD_PARSE_G2H_UNKNOWN,		/* 0x1000 */
++	CT_DEAD_PARSE_G2H_ORIGIN,		/* 0x2000 */
++	CT_DEAD_PARSE_G2H_TYPE,			/* 0x4000 */
++};
++
++static void ct_dead_worker_func(struct work_struct *w);
++static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
++
++#define CT_DEAD(ct, ctb, reason_code)		ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
++#else
++#define CT_DEAD(ct, ctb, reason)			\
++	do {						\
++		struct guc_ctb *_ctb = (ctb);		\
++		if (_ctb)				\
++			_ctb->info.broken = true;	\
++	} while (0)
++#endif
++
+ /* Used when a CT send wants to block and / or receive data */
+ struct g2h_fence {
+ 	u32 *response_buffer;
+@@ -147,14 +188,11 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
+ {
+ 	struct xe_guc_ct *ct = arg;
+ 
++	ct_exit_safe_mode(ct);
+ 	destroy_workqueue(ct->g2h_wq);
+ 	xa_destroy(&ct->fence_lookup);
+ }
+ 
+-static void receive_g2h(struct xe_guc_ct *ct);
+-static void g2h_worker_func(struct work_struct *w);
+-static void safe_mode_worker_func(struct work_struct *w);
+-
+ static void primelockdep(struct xe_guc_ct *ct)
+ {
+ 	if (!IS_ENABLED(CONFIG_LOCKDEP))
+@@ -182,7 +220,11 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
+ 	spin_lock_init(&ct->fast_lock);
+ 	xa_init(&ct->fence_lookup);
+ 	INIT_WORK(&ct->g2h_worker, g2h_worker_func);
+-	INIT_DELAYED_WORK(&ct->safe_mode_worker,  safe_mode_worker_func);
++	INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
++#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
++	spin_lock_init(&ct->dead.lock);
++	INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
++#endif
+ 	init_waitqueue_head(&ct->wq);
+ 	init_waitqueue_head(&ct->g2h_fence_wq);
+ 
+@@ -419,10 +461,22 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
+ 	if (ct_needs_safe_mode(ct))
+ 		ct_enter_safe_mode(ct);
+ 
++#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
++	/*
++	 * The CT has now been reset so the dumper can be re-armed
++	 * after any existing dead state has been dumped.
++	 */
++	spin_lock_irq(&ct->dead.lock);
++	if (ct->dead.reason)
++		ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
++	spin_unlock_irq(&ct->dead.lock);
++#endif
++
+ 	return 0;
+ 
+ err_out:
+ 	xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
++	CT_DEAD(ct, NULL, SETUP);
+ 
+ 	return err;
+ }
+@@ -469,6 +523,19 @@ static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
+ 
+ 	if (cmd_len > h2g->info.space) {
+ 		h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
++
++		if (h2g->info.head > h2g->info.size) {
++			struct xe_device *xe = ct_to_xe(ct);
++			u32 desc_status = desc_read(xe, h2g, status);
++
++			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
++
++			xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
++				  h2g->info.head, h2g->info.size);
++			CT_DEAD(ct, h2g, H2G_HAS_ROOM);
++			return false;
++		}
++
+ 		h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
+ 					     h2g->info.size) -
+ 				  h2g->info.resv_space;
+@@ -524,10 +591,24 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
+ 
+ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
+ {
++	bool bad = false;
++
+ 	lockdep_assert_held(&ct->fast_lock);
+-	xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
+-		     ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
+-	xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding);
++
++	bad = ct->ctbs.g2h.info.space + g2h_len >
++		     ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
++	bad |= !ct->g2h_outstanding;
++
++	if (bad) {
++		xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
++			  ct->ctbs.g2h.info.space, g2h_len,
++			  ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
++			  ct->ctbs.g2h.info.space + g2h_len,
++			  ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
++			  ct->g2h_outstanding);
++		CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
++		return;
++	}
+ 
+ 	ct->ctbs.g2h.info.space += g2h_len;
+ 	if (!--ct->g2h_outstanding)
+@@ -554,12 +635,43 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ 	u32 full_len;
+ 	struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
+ 							 tail * sizeof(u32));
++	u32 desc_status;
+ 
+ 	full_len = len + GUC_CTB_HDR_LEN;
+ 
+ 	lockdep_assert_held(&ct->lock);
+ 	xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
+-	xe_gt_assert(gt, tail <= h2g->info.size);
++
++	desc_status = desc_read(xe, h2g, status);
++	if (desc_status) {
++		xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
++		goto corrupted;
++	}
++
++	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
++		u32 desc_tail = desc_read(xe, h2g, tail);
++		u32 desc_head = desc_read(xe, h2g, head);
++
++		if (tail != desc_tail) {
++			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
++			xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
++			goto corrupted;
++		}
++
++		if (tail > h2g->info.size) {
++			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
++			xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
++				  tail, h2g->info.size);
++			goto corrupted;
++		}
++
++		if (desc_head >= h2g->info.size) {
++			desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
++			xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
++				  desc_head, h2g->info.size);
++			goto corrupted;
++		}
++	}
+ 
+ 	/* Command will wrap, zero fill (NOPs), return and check credits again */
+ 	if (tail + full_len > h2g->info.size) {
+@@ -612,6 +724,10 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ 			     desc_read(xe, h2g, head), h2g->info.tail);
+ 
+ 	return 0;
++
++corrupted:
++	CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
++	return -EPIPE;
+ }
+ 
+ /*
+@@ -719,7 +835,6 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ {
+ 	struct xe_device *xe = ct_to_xe(ct);
+ 	struct xe_gt *gt = ct_to_gt(ct);
+-	struct drm_printer p = xe_gt_info_printer(gt);
+ 	unsigned int sleep_period_ms = 1;
+ 	int ret;
+ 
+@@ -772,8 +887,13 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ 			goto broken;
+ #undef g2h_avail
+ 
+-		if (dequeue_one_g2h(ct) < 0)
++		ret = dequeue_one_g2h(ct);
++		if (ret < 0) {
++			if (ret != -ECANCELED)
++				xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
++					  ERR_PTR(ret));
+ 			goto broken;
++		}
+ 
+ 		goto try_again;
+ 	}
+@@ -782,8 +902,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ 
+ broken:
+ 	xe_gt_err(gt, "No forward process on H2G, reset required\n");
+-	xe_guc_ct_print(ct, &p, true);
+-	ct->ctbs.h2g.info.broken = true;
++	CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
+ 
+ 	return -EDEADLK;
+ }
+@@ -851,7 +970,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
+ #define ct_alive(ct)	\
+ 	(xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
+ 	 !ct->ctbs.g2h.info.broken)
+-	if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct),  HZ * 5))
++	if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
+ 		return false;
+ #undef ct_alive
+ 
+@@ -1049,6 +1168,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
+ 		else
+ 			xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
+ 				  type, fence);
++		CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
+ 
+ 		return -EPROTO;
+ 	}
+@@ -1056,6 +1176,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
+ 	g2h_fence = xa_erase(&ct->fence_lookup, fence);
+ 	if (unlikely(!g2h_fence)) {
+ 		/* Don't tear down channel, as send could've timed out */
++		/* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
+ 		xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
+ 		g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
+ 		return 0;
+@@ -1100,7 +1221,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
+ 	if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
+ 		xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
+ 			  origin);
+-		ct->ctbs.g2h.info.broken = true;
++		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
+ 
+ 		return -EPROTO;
+ 	}
+@@ -1118,7 +1239,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
+ 	default:
+ 		xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
+ 			  type);
+-		ct->ctbs.g2h.info.broken = true;
++		CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
+ 
+ 		ret = -EOPNOTSUPP;
+ 	}
+@@ -1195,9 +1316,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
+ 		xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
+ 	}
+ 
+-	if (ret)
++	if (ret) {
+ 		xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
+ 			  action, ERR_PTR(ret));
++		CT_DEAD(ct, NULL, PROCESS_FAILED);
++	}
+ 
+ 	return 0;
+ }
+@@ -1207,7 +1330,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
+ 	struct xe_device *xe = ct_to_xe(ct);
+ 	struct xe_gt *gt = ct_to_gt(ct);
+ 	struct guc_ctb *g2h = &ct->ctbs.g2h;
+-	u32 tail, head, len;
++	u32 tail, head, len, desc_status;
+ 	s32 avail;
+ 	u32 action;
+ 	u32 *hxg;
+@@ -1226,6 +1349,63 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
+ 
+ 	xe_gt_assert(gt, xe_guc_ct_enabled(ct));
+ 
++	desc_status = desc_read(xe, g2h, status);
++	if (desc_status) {
++		if (desc_status & GUC_CTB_STATUS_DISABLED) {
++			/*
++			 * Potentially valid if a CLIENT_RESET request resulted in
++			 * contexts/engines being reset. But should never happen as
++			 * no contexts should be active when CLIENT_RESET is sent.
++			 */
++			xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
++			desc_status &= ~GUC_CTB_STATUS_DISABLED;
++		}
++
++		if (desc_status) {
++			xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
++			goto corrupted;
++		}
++	}
++
++	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
++		u32 desc_tail = desc_read(xe, g2h, tail);
++		/*
++		u32 desc_head = desc_read(xe, g2h, head);
++
++		 * info.head and desc_head are updated back-to-back at the end of
++		 * this function and nowhere else. Hence, they cannot be different
++		 * unless two g2h_read calls are running concurrently. Which is not
++		 * possible because it is guarded by ct->fast_lock. And yet, some
++		 * discrete platforms are reguarly hitting this error :(.
++		 *
++		 * desc_head rolling backwards shouldn't cause any noticeable
++		 * problems - just a delay in GuC being allowed to proceed past that
++		 * point in the queue. So for now, just disable the error until it
++		 * can be root caused.
++		 *
++		if (g2h->info.head != desc_head) {
++			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
++			xe_gt_err(gt, "CT read: head was modified %u != %u\n",
++				  desc_head, g2h->info.head);
++			goto corrupted;
++		}
++		 */
++
++		if (g2h->info.head > g2h->info.size) {
++			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
++			xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
++				  g2h->info.head, g2h->info.size);
++			goto corrupted;
++		}
++
++		if (desc_tail >= g2h->info.size) {
++			desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
++			xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
++				  desc_tail, g2h->info.size);
++			goto corrupted;
++		}
++	}
++
+ 	/* Calculate DW available to read */
+ 	tail = desc_read(xe, g2h, tail);
+ 	avail = tail - g2h->info.head;
+@@ -1242,9 +1422,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
+ 	if (len > avail) {
+ 		xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
+ 			  avail, len);
+-		g2h->info.broken = true;
+-
+-		return -EPROTO;
++		goto corrupted;
+ 	}
+ 
+ 	head = (g2h->info.head + 1) % g2h->info.size;
+@@ -1290,6 +1468,10 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
+ 			     action, len, g2h->info.head, tail);
+ 
+ 	return len;
++
++corrupted:
++	CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
++	return -EPROTO;
+ }
+ 
+ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
+@@ -1316,9 +1498,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
+ 		xe_gt_warn(gt, "NOT_POSSIBLE");
+ 	}
+ 
+-	if (ret)
++	if (ret) {
+ 		xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
+ 			  action, ERR_PTR(ret));
++		CT_DEAD(ct, NULL, FAST_G2H);
++	}
+ }
+ 
+ /**
+@@ -1378,7 +1562,6 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct)
+ 
+ static void receive_g2h(struct xe_guc_ct *ct)
+ {
+-	struct xe_gt *gt = ct_to_gt(ct);
+ 	bool ongoing;
+ 	int ret;
+ 
+@@ -1415,9 +1598,8 @@ static void receive_g2h(struct xe_guc_ct *ct)
+ 		mutex_unlock(&ct->lock);
+ 
+ 		if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
+-			struct drm_printer p = xe_gt_info_printer(gt);
+-
+-			xe_guc_ct_print(ct, &p, false);
++			xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
++			CT_DEAD(ct, NULL, G2H_RECV);
+ 			kick_reset(ct);
+ 		}
+ 	} while (ret == 1);
+@@ -1445,9 +1627,8 @@ static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
+ 
+ 	snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32),
+ 				       atomic ? GFP_ATOMIC : GFP_KERNEL);
+-
+ 	if (!snapshot->cmds) {
+-		drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n");
++		drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CT info will be available.\n");
+ 		return;
+ 	}
+ 
+@@ -1528,7 +1709,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
+ 			   atomic ? GFP_ATOMIC : GFP_KERNEL);
+ 
+ 	if (!snapshot) {
+-		drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n");
++		xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
+ 		return NULL;
+ 	}
+ 
+@@ -1592,16 +1773,119 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
+  * xe_guc_ct_print - GuC CT Print.
+  * @ct: GuC CT.
+  * @p: drm_printer where it will be printed out.
+- * @atomic: Boolean to indicate if this is called from atomic context like
+- * reset or CTB handler or from some regular path like debugfs.
+  *
+  * This function quickly capture a snapshot and immediately print it out.
+  */
+-void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
++void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p)
+ {
+ 	struct xe_guc_ct_snapshot *snapshot;
+ 
+-	snapshot = xe_guc_ct_snapshot_capture(ct, atomic);
++	snapshot = xe_guc_ct_snapshot_capture(ct, false);
+ 	xe_guc_ct_snapshot_print(snapshot, p);
+ 	xe_guc_ct_snapshot_free(snapshot);
+ }
++
++#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
++static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
++{
++	struct xe_guc_log_snapshot *snapshot_log;
++	struct xe_guc_ct_snapshot *snapshot_ct;
++	struct xe_guc *guc = ct_to_guc(ct);
++	unsigned long flags;
++	bool have_capture;
++
++	if (ctb)
++		ctb->info.broken = true;
++
++	/* Ignore further errors after the first dump until a reset */
++	if (ct->dead.reported)
++		return;
++
++	spin_lock_irqsave(&ct->dead.lock, flags);
++
++	/* And only capture one dump at a time */
++	have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
++	ct->dead.reason |= (1 << reason_code) |
++			   (1 << CT_DEAD_STATE_CAPTURE);
++
++	spin_unlock_irqrestore(&ct->dead.lock, flags);
++
++	if (have_capture)
++		return;
++
++	snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
++	snapshot_ct = xe_guc_ct_snapshot_capture((ct), true);
++
++	spin_lock_irqsave(&ct->dead.lock, flags);
++
++	if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
++		xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
++		xe_guc_log_snapshot_free(snapshot_log);
++		xe_guc_ct_snapshot_free(snapshot_ct);
++	} else {
++		ct->dead.snapshot_log = snapshot_log;
++		ct->dead.snapshot_ct = snapshot_ct;
++	}
++
++	spin_unlock_irqrestore(&ct->dead.lock, flags);
++
++	queue_work(system_unbound_wq, &(ct)->dead.worker);
++}
++
++static void ct_dead_print(struct xe_dead_ct *dead)
++{
++	struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
++	struct xe_device *xe = ct_to_xe(ct);
++	struct xe_gt *gt = ct_to_gt(ct);
++	static int g_count;
++	struct drm_printer ip = xe_gt_info_printer(gt);
++	struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
++
++	if (!dead->reason) {
++		xe_gt_err(gt, "CTB is dead for no reason!?\n");
++		return;
++	}
++
++	drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason);
++
++	/* Can't generate a genuine core dump at this point, so just do the good bits */
++	drm_puts(&lp, "**** Xe Device Coredump ****\n");
++	xe_device_snapshot_print(xe, &lp);
++
++	drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
++	drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
++
++	drm_puts(&lp, "**** GuC Log ****\n");
++	xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
++
++	drm_puts(&lp, "**** GuC CT ****\n");
++	xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
++
++	drm_puts(&lp, "Done.\n");
++}
++
++static void ct_dead_worker_func(struct work_struct *w)
++{
++	struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
++
++	if (!ct->dead.reported) {
++		ct->dead.reported = true;
++		ct_dead_print(&ct->dead);
++	}
++
++	spin_lock_irq(&ct->dead.lock);
++
++	xe_guc_log_snapshot_free(ct->dead.snapshot_log);
++	ct->dead.snapshot_log = NULL;
++	xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
++	ct->dead.snapshot_ct = NULL;
++
++	if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
++		/* A reset has occurred so re-arm the error reporting */
++		ct->dead.reason = 0;
++		ct->dead.reported = false;
++	}
++
++	spin_unlock_irq(&ct->dead.lock);
++}
++#endif
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
+index 13e316668e9012..c7ac9407b861ea 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct.h
++++ b/drivers/gpu/drm/xe/xe_guc_ct.h
+@@ -21,7 +21,7 @@ xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic);
+ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
+ 			      struct drm_printer *p);
+ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
+-void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
++void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p);
+ 
+ static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
+ {
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
+index 761cb903129843..85e127ec91d7af 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
++++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
+@@ -86,6 +86,24 @@ enum xe_guc_ct_state {
+ 	XE_GUC_CT_STATE_ENABLED,
+ };
+ 
++#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
++/** struct xe_dead_ct - Information for debugging a dead CT */
++struct xe_dead_ct {
++	/** @lock: protects memory allocation/free operations, and @reason updates */
++	spinlock_t lock;
++	/** @reason: bit mask of CT_DEAD_* reason codes */
++	unsigned int reason;
++	/** @reported: for preventing multiple dumps per error sequence */
++	bool reported;
++	/** @worker: worker thread to get out of interrupt context before dumping */
++	struct work_struct worker;
++	/** snapshot_ct: copy of CT state and CTB content at point of error */
++	struct xe_guc_ct_snapshot *snapshot_ct;
++	/** snapshot_log: copy of GuC log at point of error */
++	struct xe_guc_log_snapshot *snapshot_log;
++};
++#endif
++
+ /**
+  * struct xe_guc_ct - GuC command transport (CT) layer
+  *
+@@ -128,6 +146,11 @@ struct xe_guc_ct {
+ 	u32 msg[GUC_CTB_MSG_MAX_LEN];
+ 	/** @fast_msg: Message buffer */
+ 	u32 fast_msg[GUC_CTB_MSG_MAX_LEN];
++
++#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
++	/** @dead: information for debugging dead CTs */
++	struct xe_dead_ct dead;
++#endif
+ };
+ 
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
+index f978da8be35c24..af02803c145bf1 100644
+--- a/drivers/gpu/drm/xe/xe_guc_pc.c
++++ b/drivers/gpu/drm/xe/xe_guc_pc.c
+@@ -6,6 +6,9 @@
+ #include "xe_guc_pc.h"
+ 
+ #include <linux/delay.h>
++#include <linux/jiffies.h>
++#include <linux/ktime.h>
++#include <linux/wait_bit.h>
+ 
+ #include <drm/drm_managed.h>
+ #include <generated/xe_wa_oob.h>
+@@ -47,6 +50,12 @@
+ 
+ #define LNL_MERT_FREQ_CAP	800
+ #define BMG_MERT_FREQ_CAP	2133
++#define BMG_MIN_FREQ		1200
++#define BMG_MERT_FLUSH_FREQ_CAP	2600
++
++#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
++#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
++#define SLPC_ACT_FREQ_TIMEOUT_MS 100
+ 
+ /**
+  * DOC: GuC Power Conservation (PC)
+@@ -133,6 +142,36 @@ static int wait_for_pc_state(struct xe_guc_pc *pc,
+ 	return -ETIMEDOUT;
+ }
+ 
++static int wait_for_flush_complete(struct xe_guc_pc *pc)
++{
++	const unsigned long timeout = msecs_to_jiffies(30);
++
++	if (!wait_var_event_timeout(&pc->flush_freq_limit,
++				    !atomic_read(&pc->flush_freq_limit),
++				    timeout))
++		return -ETIMEDOUT;
++
++	return 0;
++}
++
++static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
++{
++	int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
++	int slept, wait = 10;
++
++	for (slept = 0; slept < timeout_us;) {
++		if (xe_guc_pc_get_act_freq(pc) <= freq)
++			return 0;
++
++		usleep_range(wait, wait << 1);
++		slept += wait;
++		wait <<= 1;
++		if (slept + wait > timeout_us)
++			wait = timeout_us - slept;
++	}
++
++	return -ETIMEDOUT;
++}
+ static int pc_action_reset(struct xe_guc_pc *pc)
+ {
+ 	struct xe_guc_ct *ct = pc_to_ct(pc);
+@@ -584,6 +623,11 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
+ {
+ 	int ret;
+ 
++	if (XE_WA(pc_to_gt(pc), 22019338487)) {
++		if (wait_for_flush_complete(pc) != 0)
++			return -EAGAIN;
++	}
++
+ 	mutex_lock(&pc->freq_lock);
+ 	if (!pc->freq_ready) {
+ 		/* Might be in the middle of a gt reset */
+@@ -793,6 +837,106 @@ static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
+ 	return ret;
+ }
+ 
++static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
++{
++	struct xe_gt *gt = pc_to_gt(pc);
++
++	return  XE_WA(gt, 22019338487) &&
++		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
++}
++
++/**
++ * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
++ * @pc: the xe_guc_pc object
++ *
++ * As per the WA, reduce max GT frequency during L2 cache flush
++ */
++void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
++{
++	struct xe_gt *gt = pc_to_gt(pc);
++	u32 max_freq;
++	int ret;
++
++	if (!needs_flush_freq_limit(pc))
++		return;
++
++	mutex_lock(&pc->freq_lock);
++
++	if (!pc->freq_ready) {
++		mutex_unlock(&pc->freq_lock);
++		return;
++	}
++
++	ret = pc_action_query_task_state(pc);
++	if (ret) {
++		mutex_unlock(&pc->freq_lock);
++		return;
++	}
++
++	max_freq = pc_get_max_freq(pc);
++	if (max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
++		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
++		if (ret) {
++			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
++				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
++			mutex_unlock(&pc->freq_lock);
++			return;
++		}
++
++		atomic_set(&pc->flush_freq_limit, 1);
++
++		/*
++		 * If user has previously changed max freq, stash that value to
++		 * restore later, otherwise use the current max. New user
++		 * requests wait on flush.
++		 */
++		if (pc->user_requested_max != 0)
++			pc->stashed_max_freq = pc->user_requested_max;
++		else
++			pc->stashed_max_freq = max_freq;
++	}
++
++	mutex_unlock(&pc->freq_lock);
++
++	/*
++	 * Wait for actual freq to go below the flush cap: even if the previous
++	 * max was below cap, the current one might still be above it
++	 */
++	ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
++	if (ret)
++		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
++			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
++}
++
++/**
++ * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
++ * @pc: the xe_guc_pc object
++ *
++ * Retrieve the previous GT max frequency value.
++ */
++void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
++{
++	struct xe_gt *gt = pc_to_gt(pc);
++	int ret = 0;
++
++	if (!needs_flush_freq_limit(pc))
++		return;
++
++	if (!atomic_read(&pc->flush_freq_limit))
++		return;
++
++	mutex_lock(&pc->freq_lock);
++
++	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
++	if (ret)
++		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
++			       pc->stashed_max_freq, ret);
++
++	atomic_set(&pc->flush_freq_limit, 0);
++	mutex_unlock(&pc->freq_lock);
++	wake_up_var(&pc->flush_freq_limit);
++}
++
+ static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
+ {
+ 	int ret = 0;
+diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
+index efda432fadfc8e..7154b3aab0d84c 100644
+--- a/drivers/gpu/drm/xe/xe_guc_pc.h
++++ b/drivers/gpu/drm/xe/xe_guc_pc.h
+@@ -34,5 +34,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc);
+ void xe_guc_pc_init_early(struct xe_guc_pc *pc);
+ int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc);
+ void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc);
++void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc);
++void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc);
+ 
+ #endif /* _XE_GUC_PC_H_ */
+diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
+index 13810be015db54..5b86d91296cb96 100644
+--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
++++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
+@@ -15,6 +15,8 @@
+ struct xe_guc_pc {
+ 	/** @bo: GGTT buffer object that is shared with GuC PC */
+ 	struct xe_bo *bo;
++	/** @flush_freq_limit: 1 when max freq changes are limited by driver */
++	atomic_t flush_freq_limit;
+ 	/** @rp0_freq: HW RP0 frequency - The Maximum one */
+ 	u32 rp0_freq;
+ 	/** @rpe_freq: HW RPe frequency - The Efficient one */
+diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
+index 5f2c368c35adb1..14c3a476597a74 100644
+--- a/drivers/gpu/drm/xe/xe_irq.c
++++ b/drivers/gpu/drm/xe/xe_irq.c
+@@ -173,7 +173,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
+ 		if (ccs_mask & (BIT(0)|BIT(1)))
+ 			xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask);
+ 		if (ccs_mask & (BIT(2)|BIT(3)))
+-			xe_mmio_write32(gt,  CCS2_CCS3_INTR_MASK, ~dmask);
++			xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask);
+ 	}
+ 
+ 	if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
+@@ -504,7 +504,7 @@ static void gt_irq_reset(struct xe_tile *tile)
+ 	if (ccs_mask & (BIT(0)|BIT(1)))
+ 		xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
+ 	if (ccs_mask & (BIT(2)|BIT(3)))
+-		xe_mmio_write32(mmio,  CCS2_CCS3_INTR_MASK, ~0);
++		xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
+ 
+ 	if ((tile->media_gt &&
+ 	     xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
+diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
+index ba0f61e7d2d6b9..4ff023b5d040de 100644
+--- a/drivers/gpu/drm/xe/xe_trace_bo.h
++++ b/drivers/gpu/drm/xe/xe_trace_bo.h
+@@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(xe_vm,
+ 			   ),
+ 
+ 		    TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev),
+-			      __entry->vm,  __entry->asid)
++			      __entry->vm, __entry->asid)
+ );
+ 
+ DEFINE_EVENT(xe_vm, xe_vm_kill,
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index 28188c6d0555e0..52dc666c3ef42e 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -346,6 +346,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ 
+ 	dev->msgs = msgs;
+ 	dev->msgs_num = num_msgs;
++	dev->msg_write_idx = 0;
+ 	i2c_dw_xfer_init(dev);
+ 
+ 	/* Initiate messages read/write transaction */
+diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
+index 81cfa74147a183..ad6c195d077bb2 100644
+--- a/drivers/infiniband/hw/mlx5/counters.c
++++ b/drivers/infiniband/hw/mlx5/counters.c
+@@ -391,7 +391,7 @@ static int do_get_hw_stats(struct ib_device *ibdev,
+ 		return ret;
+ 
+ 	/* We don't expose device counters over Vports */
+-	if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0)
++	if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0)
+ 		goto done;
+ 
+ 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+@@ -411,7 +411,7 @@ static int do_get_hw_stats(struct ib_device *ibdev,
+ 			 */
+ 			goto done;
+ 		}
+-		ret = mlx5_lag_query_cong_counters(dev->mdev,
++		ret = mlx5_lag_query_cong_counters(mdev,
+ 						   stats->value +
+ 						   cnts->num_q_counters,
+ 						   cnts->num_cong_counters,
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index 69999d8d24f379..f49f78b69ab9c8 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -1914,6 +1914,7 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
+ 			/* Level1 is valid for future use, no need to free */
+ 			return -ENOMEM;
+ 
++		INIT_LIST_HEAD(&obj_event->obj_sub_list);
+ 		err = xa_insert(&event->object_ids,
+ 				key_level2,
+ 				obj_event,
+@@ -1922,7 +1923,6 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
+ 			kfree(obj_event);
+ 			return err;
+ 		}
+-		INIT_LIST_HEAD(&obj_event->obj_sub_list);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 8c47cb4edd0a0a..435c456a4fd5b4 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1766,6 +1766,33 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
+ 					     context->devx_uid);
+ }
+ 
++static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
++				struct mlx5_core_dev *slave)
++{
++	int err;
++
++	err = mlx5_nic_vport_update_local_lb(master, true);
++	if (err)
++		return err;
++
++	err = mlx5_nic_vport_update_local_lb(slave, true);
++	if (err)
++		goto out;
++
++	return 0;
++
++out:
++	mlx5_nic_vport_update_local_lb(master, false);
++	return err;
++}
++
++static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
++				  struct mlx5_core_dev *slave)
++{
++	mlx5_nic_vport_update_local_lb(slave, false);
++	mlx5_nic_vport_update_local_lb(master, false);
++}
++
+ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+ {
+ 	int err = 0;
+@@ -3448,6 +3475,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ 
+ 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
+ 
++	mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
++
+ 	mlx5_core_mp_event_replay(ibdev->mdev,
+ 				  MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ 				  NULL);
+@@ -3543,6 +3572,10 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ 				  MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ 				  &key);
+ 
++	err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
++	if (err)
++		goto unbind;
++
+ 	return true;
+ 
+ unbind:
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 068eac3bdb50ba..726b81b6330c65 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1968,7 +1968,6 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
+ 
+ 	if (mr->mmkey.cache_ent) {
+ 		spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
+-		mr->mmkey.cache_ent->in_use--;
+ 		goto end;
+ 	}
+ 
+@@ -2029,32 +2028,62 @@ void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev)
+ 	}
+ }
+ 
+-static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
++static int mlx5_umr_revoke_mr_with_lock(struct mlx5_ib_mr *mr)
+ {
+-	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+-	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+-	bool is_odp = is_odp_mr(mr);
+ 	bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+-			!to_ib_umem_dmabuf(mr->umem)->pinned;
+-	int ret = 0;
++			      !to_ib_umem_dmabuf(mr->umem)->pinned;
++	bool is_odp = is_odp_mr(mr);
++	int ret;
+ 
+ 	if (is_odp)
+ 		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ 
+ 	if (is_odp_dma_buf)
+-		dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
++		dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
++			      NULL);
++
++	ret = mlx5r_umr_revoke_mr(mr);
+ 
+-	if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
++	if (is_odp) {
++		if (!ret)
++			to_ib_umem_odp(mr->umem)->private = NULL;
++		mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
++	}
++
++	if (is_odp_dma_buf) {
++		if (!ret)
++			to_ib_umem_dmabuf(mr->umem)->private = NULL;
++		dma_resv_unlock(
++			to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
++	}
++
++	return ret;
++}
++
++static int mlx5r_handle_mkey_cleanup(struct mlx5_ib_mr *mr)
++{
++	bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
++			      !to_ib_umem_dmabuf(mr->umem)->pinned;
++	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
++	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
++	bool is_odp = is_odp_mr(mr);
++	bool from_cache = !!ent;
++	int ret;
++
++	if (mr->mmkey.cacheable && !mlx5_umr_revoke_mr_with_lock(mr) &&
++	    !cache_ent_find_and_store(dev, mr)) {
+ 		ent = mr->mmkey.cache_ent;
+ 		/* upon storing to a clean temp entry - schedule its cleanup */
+ 		spin_lock_irq(&ent->mkeys_queue.lock);
++		if (from_cache)
++			ent->in_use--;
+ 		if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
+ 			mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
+ 					 msecs_to_jiffies(30 * 1000));
+ 			ent->tmp_cleanup_scheduled = true;
+ 		}
+ 		spin_unlock_irq(&ent->mkeys_queue.lock);
+-		goto out;
++		return 0;
+ 	}
+ 
+ 	if (ent) {
+@@ -2063,8 +2092,14 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 		mr->mmkey.cache_ent = NULL;
+ 		spin_unlock_irq(&ent->mkeys_queue.lock);
+ 	}
++
++	if (is_odp)
++		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
++
++	if (is_odp_dma_buf)
++		dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
++			      NULL);
+ 	ret = destroy_mkey(dev, mr);
+-out:
+ 	if (is_odp) {
+ 		if (!ret)
+ 			to_ib_umem_odp(mr->umem)->private = NULL;
+@@ -2074,9 +2109,9 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 	if (is_odp_dma_buf) {
+ 		if (!ret)
+ 			to_ib_umem_dmabuf(mr->umem)->private = NULL;
+-		dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
++		dma_resv_unlock(
++			to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ 	}
+-
+ 	return ret;
+ }
+ 
+@@ -2125,7 +2160,7 @@ static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+ 	}
+ 
+ 	/* Stop DMA */
+-	rc = mlx5_revoke_mr(mr);
++	rc = mlx5r_handle_mkey_cleanup(mr);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index e158d5b1ab17b1..98a76c9db7aba8 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -247,8 +247,8 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+ 	}
+ 
+ 	if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
+-		__xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+-			   mlx5_base_mkey(mr->mmkey.key));
++		xa_erase(&mr_to_mdev(mr)->odp_mkeys,
++			 mlx5_base_mkey(mr->mmkey.key));
+ 	xa_unlock(&imr->implicit_children);
+ 
+ 	/* Freeing a MR is a sleeping operation, so bounce to a work queue */
+@@ -521,8 +521,8 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ 	}
+ 
+ 	if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+-		ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+-				 &mr->mmkey, GFP_KERNEL);
++		ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
++			       &mr->mmkey, GFP_KERNEL);
+ 		if (xa_is_err(ret)) {
+ 			ret = ERR_PTR(xa_err(ret));
+ 			__xa_erase(&imr->implicit_children, idx);
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 91d329e903083c..8b805b16136e5f 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
+ 	spin_unlock_irqrestore(&qp->state_lock, flags);
+ 	qp->qp_timeout_jiffies = 0;
+ 
+-	if (qp_type(qp) == IB_QPT_RC) {
++	/* In the function timer_setup, .function is initialized. If .function
++	 * is NULL, it indicates the function timer_setup is not called, the
++	 * timer is not initialized. Or else, the timer is initialized.
++	 */
++	if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function &&
++		qp->rnr_nak_timer.function) {
+ 		del_timer_sync(&qp->retrans_timer);
+ 		del_timer_sync(&qp->rnr_nak_timer);
+ 	}
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index da5b14602a761d..6d679e235af6cc 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -174,6 +174,7 @@ static const struct xpad_device {
+ 	{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
+ 	{ 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
+ 	{ 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
++	{ 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX },
+ 	{ 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
+ 	{ 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
+ 	{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
+@@ -515,6 +516,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x045e),		/* Microsoft Xbox 360 controllers */
+ 	XPAD_XBOXONE_VENDOR(0x045e),		/* Microsoft Xbox One controllers */
+ 	XPAD_XBOX360_VENDOR(0x046d),		/* Logitech Xbox 360-style controllers */
++	XPAD_XBOX360_VENDOR(0x0502),		/* Acer Inc. Xbox 360 style controllers */
+ 	XPAD_XBOX360_VENDOR(0x056e),		/* Elecom JC-U3613M */
+ 	XPAD_XBOX360_VENDOR(0x06a3),		/* Saitek P3600 */
+ 	XPAD_XBOX360_VENDOR(0x0738),		/* Mad Catz Xbox 360 controllers */
+diff --git a/drivers/input/misc/cs40l50-vibra.c b/drivers/input/misc/cs40l50-vibra.c
+index dce3b0ec8cf368..330f0912363183 100644
+--- a/drivers/input/misc/cs40l50-vibra.c
++++ b/drivers/input/misc/cs40l50-vibra.c
+@@ -238,6 +238,8 @@ static int cs40l50_upload_owt(struct cs40l50_work *work_data)
+ 	header.data_words = len / sizeof(u32);
+ 
+ 	new_owt_effect_data = kmalloc(sizeof(header) + len, GFP_KERNEL);
++	if (!new_owt_effect_data)
++		return -ENOMEM;
+ 
+ 	memcpy(new_owt_effect_data, &header, sizeof(header));
+ 	memcpy(new_owt_effect_data + sizeof(header), work_data->custom_data, len);
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index 01c4009fd53e7b..846aac9a5c9df2 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -301,6 +301,7 @@ struct iqs7222_dev_desc {
+ 	int allow_offset;
+ 	int event_offset;
+ 	int comms_offset;
++	int ext_chan;
+ 	bool legacy_gesture;
+ 	struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS];
+ };
+@@ -315,6 +316,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 		.allow_offset = 9,
+ 		.event_offset = 10,
+ 		.comms_offset = 12,
++		.ext_chan = 10,
+ 		.reg_grps = {
+ 			[IQS7222_REG_GRP_STAT] = {
+ 				.base = IQS7222_SYS_STATUS,
+@@ -373,6 +375,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 		.allow_offset = 9,
+ 		.event_offset = 10,
+ 		.comms_offset = 12,
++		.ext_chan = 10,
+ 		.legacy_gesture = true,
+ 		.reg_grps = {
+ 			[IQS7222_REG_GRP_STAT] = {
+@@ -2244,7 +2247,7 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222,
+ 	const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ 	struct i2c_client *client = iqs7222->client;
+ 	int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+-	int ext_chan = rounddown(num_chan, 10);
++	int ext_chan = dev_desc->ext_chan ? : num_chan;
+ 	int error, i;
+ 	u16 *chan_setup = iqs7222->chan_setup[chan_index];
+ 	u16 *sys_setup = iqs7222->sys_setup;
+@@ -2448,7 +2451,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222,
+ 	const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ 	struct i2c_client *client = iqs7222->client;
+ 	int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+-	int ext_chan = rounddown(num_chan, 10);
++	int ext_chan = dev_desc->ext_chan ? : num_chan;
+ 	int count, error, reg_offset, i;
+ 	u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset];
+ 	u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];
+diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
+index ff55b8c3071269..ae69691471e9fe 100644
+--- a/drivers/iommu/ipmmu-vmsa.c
++++ b/drivers/iommu/ipmmu-vmsa.c
+@@ -1087,7 +1087,7 @@ static int ipmmu_probe(struct platform_device *pdev)
+ 	 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
+ 	 */
+ 	if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
+-		ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
++		ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s",
+ 					     dev_name(&pdev->dev));
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
+index 4b369419b32ce1..4c7f470a4752f2 100644
+--- a/drivers/iommu/rockchip-iommu.c
++++ b/drivers/iommu/rockchip-iommu.c
+@@ -1154,7 +1154,6 @@ static int rk_iommu_of_xlate(struct device *dev,
+ 	iommu_dev = of_find_device_by_node(args->np);
+ 
+ 	data->iommu = platform_get_drvdata(iommu_dev);
+-	data->iommu->domain = &rk_identity_domain;
+ 	dev_iommu_priv_set(dev, data);
+ 
+ 	platform_device_put(iommu_dev);
+@@ -1192,6 +1191,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
+ 	if (!iommu)
+ 		return -ENOMEM;
+ 
++	iommu->domain = &rk_identity_domain;
++
+ 	platform_set_drvdata(pdev, iommu);
+ 	iommu->dev = dev;
+ 	iommu->num_mmu = 0;
+diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
+index e36805f07282ef..8b5fed4760394f 100644
+--- a/drivers/mfd/exynos-lpass.c
++++ b/drivers/mfd/exynos-lpass.c
+@@ -104,11 +104,22 @@ static const struct regmap_config exynos_lpass_reg_conf = {
+ 	.fast_io	= true,
+ };
+ 
++static void exynos_lpass_disable_lpass(void *data)
++{
++	struct platform_device *pdev = data;
++	struct exynos_lpass *lpass = platform_get_drvdata(pdev);
++
++	pm_runtime_disable(&pdev->dev);
++	if (!pm_runtime_status_suspended(&pdev->dev))
++		exynos_lpass_disable(lpass);
++}
++
+ static int exynos_lpass_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct exynos_lpass *lpass;
+ 	void __iomem *base_top;
++	int ret;
+ 
+ 	lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL);
+ 	if (!lpass)
+@@ -134,16 +145,11 @@ static int exynos_lpass_probe(struct platform_device *pdev)
+ 	pm_runtime_enable(dev);
+ 	exynos_lpass_enable(lpass);
+ 
+-	return devm_of_platform_populate(dev);
+-}
+-
+-static void exynos_lpass_remove(struct platform_device *pdev)
+-{
+-	struct exynos_lpass *lpass = platform_get_drvdata(pdev);
++	ret = devm_add_action_or_reset(dev, exynos_lpass_disable_lpass, pdev);
++	if (ret)
++		return ret;
+ 
+-	pm_runtime_disable(&pdev->dev);
+-	if (!pm_runtime_status_suspended(&pdev->dev))
+-		exynos_lpass_disable(lpass);
++	return devm_of_platform_populate(dev);
+ }
+ 
+ static int __maybe_unused exynos_lpass_suspend(struct device *dev)
+@@ -183,7 +189,6 @@ static struct platform_driver exynos_lpass_driver = {
+ 		.of_match_table	= exynos_lpass_of_match,
+ 	},
+ 	.probe	= exynos_lpass_probe,
+-	.remove_new = exynos_lpass_remove,
+ };
+ module_platform_driver(exynos_lpass_driver);
+ 
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 7f893bafaa607d..c417ed34c05767 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -44,6 +44,12 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
+ 		   0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ 		   MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY),
+ 
++	/*
++	 * Some SD cards reports discard support while they don't
++	 */
++	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
++		  MMC_QUIRK_BROKEN_SD_DISCARD),
++
+ 	END_FIXUP
+ };
+ 
+@@ -147,12 +153,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ 	MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
+ 		  MMC_QUIRK_TRIM_BROKEN),
+ 
+-	/*
+-	 * Some SD cards reports discard support while they don't
+-	 */
+-	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
+-		  MMC_QUIRK_BROKEN_SD_DISCARD),
+-
+ 	END_FIXUP
+ };
+ 
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index d5d868cb4edc7b..be9954f5bc0a6c 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -776,12 +776,18 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
+ static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data)
+ {
+ 	if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
+-		data->host_cookie |= MSDC_PREPARE_FLAG;
+ 		data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
+ 					    mmc_get_dma_dir(data));
++		if (data->sg_count)
++			data->host_cookie |= MSDC_PREPARE_FLAG;
+ 	}
+ }
+ 
++static bool msdc_data_prepared(struct mmc_data *data)
++{
++	return data->host_cookie & MSDC_PREPARE_FLAG;
++}
++
+ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
+ {
+ 	if (data->host_cookie & MSDC_ASYNC_FLAG)
+@@ -1345,8 +1351,19 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ 	WARN_ON(host->mrq);
+ 	host->mrq = mrq;
+ 
+-	if (mrq->data)
++	if (mrq->data) {
+ 		msdc_prepare_data(host, mrq->data);
++		if (!msdc_data_prepared(mrq->data)) {
++			host->mrq = NULL;
++			/*
++			 * Failed to prepare DMA area, fail fast before
++			 * starting any commands.
++			 */
++			mrq->cmd->error = -ENOSPC;
++			mmc_request_done(mmc_from_priv(host), mrq);
++			return;
++		}
++	}
+ 
+ 	/* if SBC is required, we have HW option and SW option.
+ 	 * if HW option is enabled, and SBC does not have "special" flags,
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 8ae76300d157d0..4b91c9e9663575 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2035,15 +2035,10 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+ 
+ 	host->mmc->actual_clock = 0;
+ 
+-	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+-	if (clk & SDHCI_CLOCK_CARD_EN)
+-		sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN,
+-			SDHCI_CLOCK_CONTROL);
++	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ 
+-	if (clock == 0) {
+-		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
++	if (clock == 0)
+ 		return;
+-	}
+ 
+ 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ 	sdhci_enable_clk(host, clk);
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index f531b617f28d77..da1b0a46b1d9c3 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -825,4 +825,20 @@ void sdhci_switch_external_dma(struct sdhci_host *host, bool en);
+ void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+ void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
+ 
++#if defined(CONFIG_DYNAMIC_DEBUG) || \
++	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
++#define SDHCI_DBG_ANYWAY 0
++#elif defined(DEBUG)
++#define SDHCI_DBG_ANYWAY 1
++#else
++#define SDHCI_DBG_ANYWAY 0
++#endif
++
++#define sdhci_dbg_dumpregs(host, fmt)					\
++do {									\
++	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
++	if (DYNAMIC_DEBUG_BRANCH(descriptor) ||	SDHCI_DBG_ANYWAY)	\
++		sdhci_dumpregs(host);					\
++} while (0)
++
+ #endif /* __SDHCI_HW_H */
+diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
+index 4d76f9f71a0e9e..241f6a4df16c1f 100644
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -1496,6 +1496,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
+ {
+ 	struct nand_device *nand = spinand_to_nand(spinand);
+ 
++	nanddev_ecc_engine_cleanup(nand);
+ 	nanddev_cleanup(nand);
+ 	spinand_manufacturer_cleanup(spinand);
+ 	kfree(spinand->databuf);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 2a513dbbd97566..52ff0f9e04e079 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -497,9 +497,9 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
+ 		goto out;
+ 	}
+ 
+-	xs->xso.real_dev = real_dev;
+ 	err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
+ 	if (!err) {
++		xs->xso.real_dev = real_dev;
+ 		ipsec->xs = xs;
+ 		INIT_LIST_HEAD(&ipsec->list);
+ 		mutex_lock(&bond->ipsec_lock);
+@@ -541,11 +541,11 @@ static void bond_ipsec_add_sa_all(struct bonding *bond)
+ 		if (ipsec->xs->xso.real_dev == real_dev)
+ 			continue;
+ 
+-		ipsec->xs->xso.real_dev = real_dev;
+ 		if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
+ 			slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
+-			ipsec->xs->xso.real_dev = NULL;
++			continue;
+ 		}
++		ipsec->xs->xso.real_dev = real_dev;
+ 	}
+ out:
+ 	mutex_unlock(&bond->ipsec_lock);
+@@ -627,6 +627,7 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
+ 				   "%s: no slave xdo_dev_state_delete\n",
+ 				   __func__);
+ 		} else {
++			ipsec->xs->xso.real_dev = NULL;
+ 			real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ 			if (real_dev->xfrmdev_ops->xdo_dev_state_free)
+ 				real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
+@@ -661,6 +662,7 @@ static void bond_ipsec_free_sa(struct xfrm_state *xs)
+ 
+ 	WARN_ON(xs->xso.real_dev != real_dev);
+ 
++	xs->xso.real_dev = NULL;
+ 	if (real_dev && real_dev->xfrmdev_ops &&
+ 	    real_dev->xfrmdev_ops->xdo_dev_state_free)
+ 		real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 3b70f67376331e..aa25a8a0a106f6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -1373,6 +1373,8 @@
+ #define MDIO_VEND2_CTRL1_SS13		BIT(13)
+ #endif
+ 
++#define XGBE_VEND2_MAC_AUTO_SW		BIT(9)
++
+ /* MDIO mask values */
+ #define XGBE_AN_CL73_INT_CMPLT		BIT(0)
+ #define XGBE_AN_CL73_INC_LINK		BIT(1)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 07f4f3418d0187..ed76a8df6ec6ed 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -375,6 +375,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
+ 		reg |= MDIO_VEND2_CTRL1_AN_RESTART;
+ 
+ 	XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
++
++	reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL);
++	reg |= XGBE_VEND2_MAC_AUTO_SW;
++	XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg);
+ }
+ 
+ static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
+@@ -1003,6 +1007,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
+ 
+ 	netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
+ 		  (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
++
++	reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
++	reg &= ~MDIO_AN_CTRL1_ENABLE;
++	XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
++
+ }
+ 
+ static void xgbe_an73_init(struct xgbe_prv_data *pdata)
+@@ -1404,6 +1413,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+ 
+ 	pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
+ 							     &an_restart);
++	/* bail out if the link status register read fails */
++	if (pdata->phy.link < 0)
++		return;
++
+ 	if (an_restart) {
+ 		xgbe_phy_config_aneg(pdata);
+ 		goto adjust_link;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 268399dfcf22f0..32e633d1134843 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -2855,8 +2855,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+ {
+ 	struct xgbe_phy_data *phy_data = pdata->phy_data;
+-	unsigned int reg;
+-	int ret;
++	int reg, ret;
+ 
+ 	*an_restart = 0;
+ 
+@@ -2890,11 +2889,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+ 			return 0;
+ 	}
+ 
+-	/* Link status is latched low, so read once to clear
+-	 * and then read again to get current state
+-	 */
+-	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ 	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
++	if (reg < 0)
++		return reg;
++
++	/* Link status is latched low so that momentary link drops
++	 * can be detected. If link was already down read again
++	 * to get the latest state.
++	 */
++
++	if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
++		reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
++		if (reg < 0)
++			return reg;
++	}
+ 
+ 	if (pdata->en_rx_adap) {
+ 		/* if the link is available and adaptation is done,
+@@ -2913,9 +2921,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+ 			xgbe_phy_set_mode(pdata, phy_data->cur_mode);
+ 		}
+ 
+-		/* check again for the link and adaptation status */
+-		reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+-		if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
++		if (pdata->rx_adapt_done)
+ 			return 1;
+ 	} else if (reg & MDIO_STAT1_LSTATUS)
+ 		return 1;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index ed5d43c16d0e23..7526a0906b3914 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -292,12 +292,12 @@
+ #define XGBE_LINK_TIMEOUT		5
+ #define XGBE_KR_TRAINING_WAIT_ITER	50
+ 
+-#define XGBE_SGMII_AN_LINK_STATUS	BIT(1)
++#define XGBE_SGMII_AN_LINK_DUPLEX	BIT(1)
+ #define XGBE_SGMII_AN_LINK_SPEED	(BIT(2) | BIT(3))
+ #define XGBE_SGMII_AN_LINK_SPEED_10	0x00
+ #define XGBE_SGMII_AN_LINK_SPEED_100	0x04
+ #define XGBE_SGMII_AN_LINK_SPEED_1000	0x08
+-#define XGBE_SGMII_AN_LINK_DUPLEX	BIT(4)
++#define XGBE_SGMII_AN_LINK_STATUS	BIT(4)
+ 
+ /* ECC correctable error notification window (seconds) */
+ #define XGBE_ECC_LIMIT			60
+diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
+index 3afd3627ce485b..9c5d619909045a 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl1.c
++++ b/drivers/net/ethernet/atheros/atlx/atl1.c
+@@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
+ 			break;
+ 		}
+ 
+-		buffer_info->alloced = 1;
+-		buffer_info->skb = skb;
+-		buffer_info->length = (u16) adapter->rx_buffer_len;
+ 		page = virt_to_page(skb->data);
+ 		offset = offset_in_page(skb->data);
+ 		buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
+ 						adapter->rx_buffer_len,
+ 						DMA_FROM_DEVICE);
++		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
++			kfree_skb(skb);
++			adapter->soft_stats.rx_dropped++;
++			break;
++		}
++
++		buffer_info->alloced = 1;
++		buffer_info->skb = skb;
++		buffer_info->length = (u16)adapter->rx_buffer_len;
++
+ 		rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ 		rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
+ 		rfd_desc->coalese = 0;
+@@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 	return 0;
+ }
+ 
+-static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+-	struct tx_packet_desc *ptpd)
++static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
++			struct tx_packet_desc *ptpd)
+ {
+ 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ 	struct atl1_buffer *buffer_info;
+@@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 	unsigned int nr_frags;
+ 	unsigned int f;
+ 	int retval;
++	u16 first_mapped;
+ 	u16 next_to_use;
+ 	u16 data_len;
+ 	u8 hdr_len;
+@@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 	buf_len -= skb->data_len;
+ 	nr_frags = skb_shinfo(skb)->nr_frags;
+ 	next_to_use = atomic_read(&tpd_ring->next_to_use);
++	first_mapped = next_to_use;
+ 	buffer_info = &tpd_ring->buffer_info[next_to_use];
+ 	BUG_ON(buffer_info->skb);
+ 	/* put skb in last TPD */
+@@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 		buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
+ 						offset, hdr_len,
+ 						DMA_TO_DEVICE);
++		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
++			goto dma_err;
+ 
+ 		if (++next_to_use == tpd_ring->count)
+ 			next_to_use = 0;
+@@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 								page, offset,
+ 								buffer_info->length,
+ 								DMA_TO_DEVICE);
++				if (dma_mapping_error(&adapter->pdev->dev,
++						      buffer_info->dma))
++					goto dma_err;
+ 				if (++next_to_use == tpd_ring->count)
+ 					next_to_use = 0;
+ 			}
+@@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 		buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
+ 						offset, buf_len,
+ 						DMA_TO_DEVICE);
++		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
++			goto dma_err;
+ 		if (++next_to_use == tpd_ring->count)
+ 			next_to_use = 0;
+ 	}
+@@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 			buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ 				frag, i * ATL1_MAX_TX_BUF_LEN,
+ 				buffer_info->length, DMA_TO_DEVICE);
++			if (dma_mapping_error(&adapter->pdev->dev,
++					      buffer_info->dma))
++				goto dma_err;
+ 
+ 			if (++next_to_use == tpd_ring->count)
+ 				next_to_use = 0;
+@@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 
+ 	/* last tpd's buffer-info */
+ 	buffer_info->skb = skb;
++
++	return true;
++
++ dma_err:
++	while (first_mapped != next_to_use) {
++		buffer_info = &tpd_ring->buffer_info[first_mapped];
++		dma_unmap_page(&adapter->pdev->dev,
++			       buffer_info->dma,
++			       buffer_info->length,
++			       DMA_TO_DEVICE);
++		buffer_info->dma = 0;
++
++		if (++first_mapped == tpd_ring->count)
++			first_mapped = 0;
++	}
++	return false;
+ }
+ 
+ static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
+@@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
+ 
+ 	len = skb_headlen(skb);
+ 
+-	if (unlikely(skb->len <= 0)) {
+-		dev_kfree_skb_any(skb);
+-		return NETDEV_TX_OK;
+-	}
++	if (unlikely(skb->len <= 0))
++		goto drop_packet;
+ 
+ 	nr_frags = skb_shinfo(skb)->nr_frags;
+ 	for (f = 0; f < nr_frags; f++) {
+@@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
+ 	if (mss) {
+ 		if (skb->protocol == htons(ETH_P_IP)) {
+ 			proto_hdr_len = skb_tcp_all_headers(skb);
+-			if (unlikely(proto_hdr_len > len)) {
+-				dev_kfree_skb_any(skb);
+-				return NETDEV_TX_OK;
+-			}
++			if (unlikely(proto_hdr_len > len))
++				goto drop_packet;
++
+ 			/* need additional TPD ? */
+ 			if (proto_hdr_len != len)
+ 				count += (len - proto_hdr_len +
+@@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
+ 	}
+ 
+ 	tso = atl1_tso(adapter, skb, ptpd);
+-	if (tso < 0) {
+-		dev_kfree_skb_any(skb);
+-		return NETDEV_TX_OK;
+-	}
++	if (tso < 0)
++		goto drop_packet;
+ 
+ 	if (!tso) {
+ 		ret_val = atl1_tx_csum(adapter, skb, ptpd);
+-		if (ret_val < 0) {
+-			dev_kfree_skb_any(skb);
+-			return NETDEV_TX_OK;
+-		}
++		if (ret_val < 0)
++			goto drop_packet;
+ 	}
+ 
+-	atl1_tx_map(adapter, skb, ptpd);
++	if (!atl1_tx_map(adapter, skb, ptpd))
++		goto drop_packet;
++
+ 	atl1_tx_queue(adapter, count, ptpd);
+ 	atl1_update_mailbox(adapter);
+ 	return NETDEV_TX_OK;
++
++drop_packet:
++	adapter->soft_stats.tx_errors++;
++	dev_kfree_skb_any(skb);
++	return NETDEV_TX_OK;
+ }
+ 
+ static int atl1_rings_clean(struct napi_struct *napi, int budget)
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index ffed14b63d41d1..a432783756d8c4 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -2127,10 +2127,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
+ 	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
+ 		return -EOPNOTSUPP;
+ 
+-	if (netdev->mtu > enic->port_mtu)
++	if (new_mtu > enic->port_mtu)
+ 		netdev_warn(netdev,
+ 			    "interface MTU (%d) set higher than port MTU (%d)\n",
+-			    netdev->mtu, enic->port_mtu);
++			    new_mtu, enic->port_mtu);
+ 
+ 	return _enic_change_mtu(netdev, new_mtu);
+ }
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index 29886a8ba73f33..efd0048acd3b2d 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -3928,6 +3928,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
+ 					 MEM_TYPE_PAGE_ORDER0, NULL);
+ 	if (err) {
+ 		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
++		xdp_rxq_info_unreg(&fq->channel->xdp_rxq);
+ 		return err;
+ 	}
+ 
+@@ -4421,17 +4422,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
+ 			return -EINVAL;
+ 		}
+ 		if (err)
+-			return err;
++			goto out;
+ 	}
+ 
+ 	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
+ 			    DPNI_QUEUE_TX, &priv->tx_qdid);
+ 	if (err) {
+ 		dev_err(dev, "dpni_get_qdid() failed\n");
+-		return err;
++		goto out;
+ 	}
+ 
+ 	return 0;
++
++out:
++	while (i--) {
++		if (priv->fq[i].type == DPAA2_RX_FQ &&
++		    xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
++			xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
++	}
++	return err;
+ }
+ 
+ /* Allocate rings for storing incoming frame descriptors */
+@@ -4814,6 +4823,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
+ 	}
+ }
+ 
++static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv)
++{
++	int i;
++
++	for (i = 0; i < priv->num_fqs; i++) {
++		if (priv->fq[i].type == DPAA2_RX_FQ &&
++		    xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
++			xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
++	}
++}
++
+ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+ {
+ 	struct device *dev;
+@@ -5017,6 +5037,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+ 	free_percpu(priv->percpu_stats);
+ err_alloc_percpu_stats:
+ 	dpaa2_eth_del_ch_napi(priv);
++	dpaa2_eth_free_rx_xdp_rxq(priv);
+ err_bind:
+ 	dpaa2_eth_free_dpbps(priv);
+ err_dpbp_setup:
+@@ -5069,6 +5090,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+ 	free_percpu(priv->percpu_extras);
+ 
+ 	dpaa2_eth_del_ch_napi(priv);
++	dpaa2_eth_free_rx_xdp_rxq(priv);
+ 	dpaa2_eth_free_dpbps(priv);
+ 	dpaa2_eth_free_dpio(priv);
+ 	dpaa2_eth_free_dpni(priv);
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+index b28991dd187036..48b8e184f3db63 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+@@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+  */
+ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+ {
+-	mutex_lock(&cq->cq_lock);
++	spin_lock(&cq->cq_lock);
+ 
+ 	/* free ring buffers and the ring itself */
+ 	idpf_ctlq_dealloc_ring_res(hw, cq);
+@@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+ 	/* Set ring_size to 0 to indicate uninitialized queue */
+ 	cq->ring_size = 0;
+ 
+-	mutex_unlock(&cq->cq_lock);
+-	mutex_destroy(&cq->cq_lock);
++	spin_unlock(&cq->cq_lock);
+ }
+ 
+ /**
+@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
+ 
+ 	idpf_ctlq_init_regs(hw, cq, is_rxq);
+ 
+-	mutex_init(&cq->cq_lock);
++	spin_lock_init(&cq->cq_lock);
+ 
+ 	list_add(&cq->cq_list, &hw->cq_list_head);
+ 
+@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ 	int err = 0;
+ 	int i;
+ 
+-	mutex_lock(&cq->cq_lock);
++	spin_lock(&cq->cq_lock);
+ 
+ 	/* Ensure there are enough descriptors to send all messages */
+ 	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+@@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ 	wr32(hw, cq->reg.tail, cq->next_to_use);
+ 
+ err_unlock:
+-	mutex_unlock(&cq->cq_lock);
++	spin_unlock(&cq->cq_lock);
+ 
+ 	return err;
+ }
+@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+ 	if (*clean_count > cq->ring_size)
+ 		return -EBADR;
+ 
+-	mutex_lock(&cq->cq_lock);
++	spin_lock(&cq->cq_lock);
+ 
+ 	ntc = cq->next_to_clean;
+ 
+@@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+ 
+ 	cq->next_to_clean = ntc;
+ 
+-	mutex_unlock(&cq->cq_lock);
++	spin_unlock(&cq->cq_lock);
+ 
+ 	/* Return number of descriptors actually cleaned */
+ 	*clean_count = i;
+@@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ 	if (*buff_count > 0)
+ 		buffs_avail = true;
+ 
+-	mutex_lock(&cq->cq_lock);
++	spin_lock(&cq->cq_lock);
+ 
+ 	if (tbp >= cq->ring_size)
+ 		tbp = 0;
+@@ -524,7 +523,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ 		wr32(hw, cq->reg.tail, cq->next_to_post);
+ 	}
+ 
+-	mutex_unlock(&cq->cq_lock);
++	spin_unlock(&cq->cq_lock);
+ 
+ 	/* return the number of buffers that were not posted */
+ 	*buff_count = *buff_count - i;
+@@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ 	u16 i;
+ 
+ 	/* take the lock before we start messing with the ring */
+-	mutex_lock(&cq->cq_lock);
++	spin_lock(&cq->cq_lock);
+ 
+ 	ntc = cq->next_to_clean;
+ 
+@@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ 
+ 	cq->next_to_clean = ntc;
+ 
+-	mutex_unlock(&cq->cq_lock);
++	spin_unlock(&cq->cq_lock);
+ 
+ 	*num_q_msg = i;
+ 	if (*num_q_msg == 0)
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+index e8e046ef2f0d76..5890d8adca4a87 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+@@ -99,7 +99,7 @@ struct idpf_ctlq_info {
+ 
+ 	enum idpf_ctlq_type cq_type;
+ 	int q_id;
+-	struct mutex cq_lock;		/* control queue lock */
++	spinlock_t cq_lock;		/* control queue lock */
+ 	/* used for interrupt processing */
+ 	u16 next_to_use;
+ 	u16 next_to_clean;
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+index 59b1a1a099967f..f72420cf68216c 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+@@ -46,7 +46,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
+ 	struct idpf_vport_user_config_data *user_config;
+ 
+ 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+-		return -EOPNOTSUPP;
++		return 0;
+ 
+ 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ 
+@@ -65,7 +65,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
+ 	struct idpf_vport_user_config_data *user_config;
+ 
+ 	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+-		return -EOPNOTSUPP;
++		return 0;
+ 
+ 	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ 
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index ba645ab22d394a..746b655337275f 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -2315,8 +2315,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
+ 	struct idpf_adapter *adapter = hw->back;
+ 	size_t sz = ALIGN(size, 4096);
+ 
+-	mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
+-				     &mem->pa, GFP_KERNEL);
++	/* The control queue resources are freed under a spinlock, contiguous
++	 * pages will avoid IOMMU remapping and the use vmap (and vunmap in
++	 * dma_free_*() path.
++	 */
++	mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
++				  GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
+ 	mem->size = sz;
+ 
+ 	return mem->va;
+@@ -2331,8 +2335,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
+ {
+ 	struct idpf_adapter *adapter = hw->back;
+ 
+-	dma_free_coherent(&adapter->pdev->dev, mem->size,
+-			  mem->va, mem->pa);
++	dma_free_attrs(&adapter->pdev->dev, mem->size,
++		       mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
+ 	mem->size = 0;
+ 	mem->va = NULL;
+ 	mem->pa = 0;
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 082b0baf5d37c5..2a0c5a343e4727 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6987,6 +6987,10 @@ static int igc_probe(struct pci_dev *pdev,
+ 	adapter->port_num = hw->bus.func;
+ 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ 
++	/* Disable ASPM L1.2 on I226 devices to avoid packet loss */
++	if (igc_is_device_id_i226(hw))
++		pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
++
+ 	err = pci_save_state(pdev);
+ 	if (err)
+ 		goto err_ioremap;
+@@ -7368,6 +7372,9 @@ static int igc_resume(struct device *dev)
+ 	pci_enable_wake(pdev, PCI_D3hot, 0);
+ 	pci_enable_wake(pdev, PCI_D3cold, 0);
+ 
++	if (igc_is_device_id_i226(hw))
++		pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
++
+ 	if (igc_init_interrupt_scheme(adapter, true)) {
+ 		netdev_err(netdev, "Unable to allocate memory for queues\n");
+ 		return -ENOMEM;
+@@ -7480,6 +7487,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
+ 		pci_enable_wake(pdev, PCI_D3hot, 0);
+ 		pci_enable_wake(pdev, PCI_D3cold, 0);
+ 
++		if (igc_is_device_id_i226(hw))
++			pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
++
+ 		/* In case of PCI error, adapter loses its HW address
+ 		 * so we should re-assign it here.
+ 		 */
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index f5449b73b9a76b..1e4cf89bd79ad2 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
+ 
+ 	addr = np->ops->map_page(np->device, page, 0,
+ 				 PAGE_SIZE, DMA_FROM_DEVICE);
+-	if (!addr) {
++	if (np->ops->mapping_error(np->device, addr)) {
+ 		__free_page(page);
+ 		return -ENOMEM;
+ 	}
+@@ -6672,6 +6672,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
+ 	len = skb_headlen(skb);
+ 	mapping = np->ops->map_single(np->device, skb->data,
+ 				      len, DMA_TO_DEVICE);
++	if (np->ops->mapping_error(np->device, mapping))
++		goto out_drop;
+ 
+ 	prod = rp->prod;
+ 
+@@ -6713,6 +6715,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
+ 		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
+ 					    skb_frag_off(frag), len,
+ 					    DMA_TO_DEVICE);
++		if (np->ops->mapping_error(np->device, mapping))
++			goto out_unmap;
+ 
+ 		rp->tx_buffs[prod].skb = NULL;
+ 		rp->tx_buffs[prod].mapping = mapping;
+@@ -6737,6 +6741,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
+ out:
+ 	return NETDEV_TX_OK;
+ 
++out_unmap:
++	while (i--) {
++		const skb_frag_t *frag;
++
++		prod = PREVIOUS_TX(rp, prod);
++		frag = &skb_shinfo(skb)->frags[i];
++		np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping,
++				    skb_frag_size(frag), DMA_TO_DEVICE);
++	}
++
++	np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping,
++			      skb_headlen(skb), DMA_TO_DEVICE);
++
+ out_drop:
+ 	rp->tx_errors++;
+ 	kfree_skb(skb);
+@@ -9638,6 +9655,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
+ 	dma_unmap_single(dev, dma_address, size, direction);
+ }
+ 
++static int niu_pci_mapping_error(struct device *dev, u64 addr)
++{
++	return dma_mapping_error(dev, addr);
++}
++
+ static const struct niu_ops niu_pci_ops = {
+ 	.alloc_coherent	= niu_pci_alloc_coherent,
+ 	.free_coherent	= niu_pci_free_coherent,
+@@ -9645,6 +9667,7 @@ static const struct niu_ops niu_pci_ops = {
+ 	.unmap_page	= niu_pci_unmap_page,
+ 	.map_single	= niu_pci_map_single,
+ 	.unmap_single	= niu_pci_unmap_single,
++	.mapping_error	= niu_pci_mapping_error,
+ };
+ 
+ static void niu_driver_version(void)
+@@ -10011,6 +10034,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
+ 	/* Nothing to do.  */
+ }
+ 
++static int niu_phys_mapping_error(struct device *dev, u64 dma_address)
++{
++	return false;
++}
++
+ static const struct niu_ops niu_phys_ops = {
+ 	.alloc_coherent	= niu_phys_alloc_coherent,
+ 	.free_coherent	= niu_phys_free_coherent,
+@@ -10018,6 +10046,7 @@ static const struct niu_ops niu_phys_ops = {
+ 	.unmap_page	= niu_phys_unmap_page,
+ 	.map_single	= niu_phys_map_single,
+ 	.unmap_single	= niu_phys_unmap_single,
++	.mapping_error	= niu_phys_mapping_error,
+ };
+ 
+ static int niu_of_probe(struct platform_device *op)
+diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h
+index 04c215f91fc08e..0b169c08b0f2d1 100644
+--- a/drivers/net/ethernet/sun/niu.h
++++ b/drivers/net/ethernet/sun/niu.h
+@@ -2879,6 +2879,9 @@ struct tx_ring_info {
+ #define NEXT_TX(tp, index) \
+ 	(((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
+ 
++#define PREVIOUS_TX(tp, index) \
++	(((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1))
++
+ static inline u32 niu_tx_avail(struct tx_ring_info *tp)
+ {
+ 	return (tp->pending -
+@@ -3140,6 +3143,7 @@ struct niu_ops {
+ 			  enum dma_data_direction direction);
+ 	void (*unmap_single)(struct device *dev, u64 dma_address,
+ 			     size_t size, enum dma_data_direction direction);
++	int (*mapping_error)(struct device *dev, u64 dma_address);
+ };
+ 
+ struct niu_link_config {
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index d8a6fea961c02b..ea2123ea6e387c 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -1585,6 +1585,7 @@ static void wx_set_rss_queues(struct wx *wx)
+ 
+ 	clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ 
++	wx->ring_feature[RING_F_FDIR].indices = 1;
+ 	/* Use Flow Director in addition to RSS to ensure the best
+ 	 * distribution of flows across cores, even when an FDIR flow
+ 	 * isn't matched.
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+index 0ee73a265545c3..c698f4ec751a2e 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+@@ -68,7 +68,6 @@ int txgbe_request_queue_irqs(struct wx *wx)
+ 		free_irq(wx->msix_q_entries[vector].vector,
+ 			 wx->q_vector[vector]);
+ 	}
+-	wx_reset_interrupt_capability(wx);
+ 	return err;
+ }
+ 
+@@ -169,6 +168,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe)
+ 	free_irq(txgbe->link_irq, txgbe);
+ 	free_irq(txgbe->misc.irq, txgbe);
+ 	txgbe_del_irq_domain(txgbe);
++	txgbe->wx->misc_irq_domain = false;
+ }
+ 
+ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+index 7e352837184fad..9ede260b85dcbf 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -308,10 +308,14 @@ static int txgbe_open(struct net_device *netdev)
+ 
+ 	wx_configure(wx);
+ 
+-	err = txgbe_request_queue_irqs(wx);
++	err = txgbe_setup_misc_irq(wx->priv);
+ 	if (err)
+ 		goto err_free_resources;
+ 
++	err = txgbe_request_queue_irqs(wx);
++	if (err)
++		goto err_free_misc_irq;
++
+ 	/* Notify the stack of the actual queue counts. */
+ 	err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ 	if (err)
+@@ -327,6 +331,9 @@ static int txgbe_open(struct net_device *netdev)
+ 
+ err_free_irq:
+ 	wx_free_irq(wx);
++err_free_misc_irq:
++	txgbe_free_misc_irq(wx->priv);
++	wx_reset_interrupt_capability(wx);
+ err_free_resources:
+ 	wx_free_resources(wx);
+ err_reset:
+@@ -365,6 +372,7 @@ static int txgbe_close(struct net_device *netdev)
+ 
+ 	txgbe_down(wx);
+ 	wx_free_irq(wx);
++	txgbe_free_misc_irq(wx->priv);
+ 	wx_free_resources(wx);
+ 	txgbe_fdir_filter_exit(wx);
+ 	wx_control_hw(wx, false);
+@@ -410,7 +418,6 @@ static void txgbe_shutdown(struct pci_dev *pdev)
+ int txgbe_setup_tc(struct net_device *dev, u8 tc)
+ {
+ 	struct wx *wx = netdev_priv(dev);
+-	struct txgbe *txgbe = wx->priv;
+ 
+ 	/* Hardware has to reinitialize queues and interrupts to
+ 	 * match packet buffer alignment. Unfortunately, the
+@@ -421,7 +428,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
+ 	else
+ 		txgbe_reset(wx);
+ 
+-	txgbe_free_misc_irq(txgbe);
+ 	wx_clear_interrupt_scheme(wx);
+ 
+ 	if (tc)
+@@ -430,7 +436,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
+ 		netdev_reset_tc(dev);
+ 
+ 	wx_init_interrupt_scheme(wx);
+-	txgbe_setup_misc_irq(txgbe);
+ 
+ 	if (netif_running(dev))
+ 		txgbe_open(dev);
+@@ -677,13 +682,9 @@ static int txgbe_probe(struct pci_dev *pdev,
+ 
+ 	txgbe_init_fdir(txgbe);
+ 
+-	err = txgbe_setup_misc_irq(txgbe);
+-	if (err)
+-		goto err_release_hw;
+-
+ 	err = txgbe_init_phy(txgbe);
+ 	if (err)
+-		goto err_free_misc_irq;
++		goto err_release_hw;
+ 
+ 	err = register_netdev(netdev);
+ 	if (err)
+@@ -711,8 +712,6 @@ static int txgbe_probe(struct pci_dev *pdev,
+ 
+ err_remove_phy:
+ 	txgbe_remove_phy(txgbe);
+-err_free_misc_irq:
+-	txgbe_free_misc_irq(txgbe);
+ err_release_hw:
+ 	wx_clear_interrupt_scheme(wx);
+ 	wx_control_hw(wx, false);
+@@ -746,7 +745,6 @@ static void txgbe_remove(struct pci_dev *pdev)
+ 	unregister_netdev(netdev);
+ 
+ 	txgbe_remove_phy(txgbe);
+-	txgbe_free_misc_irq(txgbe);
+ 	wx_free_isb_resources(wx);
+ 
+ 	pci_release_selected_regions(pdev,
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 531b1b6a37d190..2f8637224b69e3 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -4229,8 +4229,6 @@ static void lan78xx_disconnect(struct usb_interface *intf)
+ 	if (!dev)
+ 		return;
+ 
+-	netif_napi_del(&dev->napi);
+-
+ 	udev = interface_to_usbdev(intf);
+ 	net = dev->net;
+ 
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 6d36cb204f9bc5..54c5d9a14c6724 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -765,6 +765,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
+ 	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
+ }
+ 
++static int check_mergeable_len(struct net_device *dev, void *mrg_ctx,
++			       unsigned int len)
++{
++	unsigned int headroom, tailroom, room, truesize;
++
++	truesize = mergeable_ctx_to_truesize(mrg_ctx);
++	headroom = mergeable_ctx_to_headroom(mrg_ctx);
++	tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
++	room = SKB_DATA_ALIGN(headroom + tailroom);
++
++	if (len > truesize - room) {
++		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
++			 dev->name, len, (unsigned long)(truesize - room));
++		DEV_STATS_INC(dev, rx_length_errors);
++		return -1;
++	}
++
++	return 0;
++}
++
+ static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
+ 					 unsigned int headroom,
+ 					 unsigned int len)
+@@ -1098,15 +1118,29 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
+ 	sg->length = len;
+ }
+ 
++/* Note that @len is the length of received data without virtio header */
+ static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
+-				   struct receive_queue *rq, void *buf, u32 len)
++				   struct receive_queue *rq, void *buf,
++				   u32 len, bool first_buf)
+ {
+ 	struct xdp_buff *xdp;
+ 	u32 bufsize;
+ 
+ 	xdp = (struct xdp_buff *)buf;
+ 
+-	bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
++	/* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for
++	 * virtio header and ask the vhost to fill data from
++	 *         hard_start + XDP_PACKET_HEADROOM - vi->hdr_len
++	 * The first buffer has virtio header so the remaining region for frame
++	 * data is
++	 *         xsk_pool_get_rx_frame_size()
++	 * While other buffers than the first one do not have virtio header, so
++	 * the maximum frame data's length can be
++	 *         xsk_pool_get_rx_frame_size() + vi->hdr_len
++	 */
++	bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool);
++	if (!first_buf)
++		bufsize += vi->hdr_len;
+ 
+ 	if (unlikely(len > bufsize)) {
+ 		pr_debug("%s: rx error: len %u exceeds truesize %u\n",
+@@ -1231,7 +1265,7 @@ static int xsk_append_merge_buffer(struct virtnet_info *vi,
+ 
+ 		u64_stats_add(&stats->bytes, len);
+ 
+-		xdp = buf_to_xdp(vi, rq, buf, len);
++		xdp = buf_to_xdp(vi, rq, buf, len, false);
+ 		if (!xdp)
+ 			goto err;
+ 
+@@ -1329,7 +1363,7 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu
+ 
+ 	u64_stats_add(&stats->bytes, len);
+ 
+-	xdp = buf_to_xdp(vi, rq, buf, len);
++	xdp = buf_to_xdp(vi, rq, buf, len, true);
+ 	if (!xdp)
+ 		return;
+ 
+@@ -1649,7 +1683,8 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
+  * across multiple buffers (num_buf > 1), and we make sure buffers
+  * have enough headroom.
+  */
+-static struct page *xdp_linearize_page(struct receive_queue *rq,
++static struct page *xdp_linearize_page(struct net_device *dev,
++				       struct receive_queue *rq,
+ 				       int *num_buf,
+ 				       struct page *p,
+ 				       int offset,
+@@ -1669,18 +1704,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
+ 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
+ 	page_off += *len;
+ 
++	/* Only mergeable mode can go inside this while loop. In small mode,
++	 * *num_buf == 1, so it cannot go inside.
++	 */
+ 	while (--*num_buf) {
+ 		unsigned int buflen;
+ 		void *buf;
++		void *ctx;
+ 		int off;
+ 
+-		buf = virtnet_rq_get_buf(rq, &buflen, NULL);
++		buf = virtnet_rq_get_buf(rq, &buflen, &ctx);
+ 		if (unlikely(!buf))
+ 			goto err_buf;
+ 
+ 		p = virt_to_head_page(buf);
+ 		off = buf - page_address(p);
+ 
++		if (check_mergeable_len(dev, ctx, buflen)) {
++			put_page(p);
++			goto err_buf;
++		}
++
+ 		/* guard against a misconfigured or uncooperative backend that
+ 		 * is sending packet larger than the MTU.
+ 		 */
+@@ -1769,7 +1813,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
+ 		headroom = vi->hdr_len + header_offset;
+ 		buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
+ 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-		xdp_page = xdp_linearize_page(rq, &num_buf, page,
++		xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
+ 					      offset, header_offset,
+ 					      &tlen);
+ 		if (!xdp_page)
+@@ -2104,7 +2148,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
+ 	 */
+ 	if (!xdp_prog->aux->xdp_has_frags) {
+ 		/* linearize data for XDP */
+-		xdp_page = xdp_linearize_page(rq, num_buf,
++		xdp_page = xdp_linearize_page(vi->dev, rq, num_buf,
+ 					      *page, offset,
+ 					      XDP_PACKET_HEADROOM,
+ 					      len);
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index 1623298ba2c47e..eebdcc16e8fc42 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -1868,8 +1868,7 @@ static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
+ 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ }
+ 
+-static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
+-				       enum hal_encrypt_type enctype)
++int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype)
+ {
+ 	switch (enctype) {
+ 	case HAL_ENCRYPT_TYPE_OPEN:
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
+index eb1f92559179bb..4232091d9e3289 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
+@@ -143,4 +143,7 @@ int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ 			   int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ 				       const void *ptr, void *data),
+ 			   void *data);
++
++int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype);
++
+ #endif /* ATH12K_DP_RX_H */
+diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
+index 734e3da4cbf191..21e07b5cee5705 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
+@@ -227,7 +227,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+ 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ 	struct hal_tcl_data_cmd *hal_tcl_desc;
+ 	struct hal_tx_msdu_ext_desc *msg;
+-	struct sk_buff *skb_ext_desc;
++	struct sk_buff *skb_ext_desc = NULL;
+ 	struct hal_srng *tcl_ring;
+ 	struct ieee80211_hdr *hdr = (void *)skb->data;
+ 	struct dp_tx_ring *tx_ring;
+@@ -397,17 +397,15 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+ 			if (ret < 0) {
+ 				ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ 					   "Failed to add HTT meta data, dropping packet\n");
+-				goto fail_unmap_dma;
++				goto fail_free_ext_skb;
+ 			}
+ 		}
+ 
+ 		ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
+ 					  skb_ext_desc->len, DMA_TO_DEVICE);
+ 		ret = dma_mapping_error(ab->dev, ti.paddr);
+-		if (ret) {
+-			kfree_skb(skb_ext_desc);
+-			goto fail_unmap_dma;
+-		}
++		if (ret)
++			goto fail_free_ext_skb;
+ 
+ 		ti.data_len = skb_ext_desc->len;
+ 		ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
+@@ -443,7 +441,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+ 			ring_selector++;
+ 		}
+ 
+-		goto fail_unmap_dma;
++		goto fail_unmap_dma_ext;
+ 	}
+ 
+ 	ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
+@@ -459,13 +457,16 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+ 
+ 	return 0;
+ 
+-fail_unmap_dma:
+-	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+-
++fail_unmap_dma_ext:
+ 	if (skb_cb->paddr_ext_desc)
+ 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ 				 sizeof(struct hal_tx_msdu_ext_desc),
+ 				 DMA_TO_DEVICE);
++fail_free_ext_skb:
++	kfree_skb(skb_ext_desc);
++
++fail_unmap_dma:
++	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+ 
+ fail_remove_tx_buf:
+ 	ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index fbf5d57283576f..4ca684278c3672 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -3864,8 +3864,8 @@ static int ath12k_install_key(struct ath12k_vif *arvif,
+ 
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_CCMP:
++	case WLAN_CIPHER_SUITE_CCMP_256:
+ 		arg.key_cipher = WMI_CIPHER_AES_CCM;
+-		/* TODO: Re-check if flag is valid */
+ 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ 		break;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+@@ -3873,12 +3873,10 @@ static int ath12k_install_key(struct ath12k_vif *arvif,
+ 		arg.key_txmic_len = 8;
+ 		arg.key_rxmic_len = 8;
+ 		break;
+-	case WLAN_CIPHER_SUITE_CCMP_256:
+-		arg.key_cipher = WMI_CIPHER_AES_CCM;
+-		break;
+ 	case WLAN_CIPHER_SUITE_GCMP:
+ 	case WLAN_CIPHER_SUITE_GCMP_256:
+ 		arg.key_cipher = WMI_CIPHER_AES_GCM;
++		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ 		break;
+ 	default:
+ 		ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+@@ -5725,6 +5723,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
+ 	struct ath12k_base *ab = ar->ab;
+ 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 	struct ieee80211_tx_info *info;
++	enum hal_encrypt_type enctype;
++	unsigned int mic_len;
+ 	dma_addr_t paddr;
+ 	int buf_id;
+ 	int ret;
+@@ -5738,12 +5738,16 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
+ 		return -ENOSPC;
+ 
+ 	info = IEEE80211_SKB_CB(skb);
+-	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
++	if ((ATH12K_SKB_CB(skb)->flags & ATH12K_SKB_CIPHER_SET) &&
++	    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ 		if ((ieee80211_is_action(hdr->frame_control) ||
+ 		     ieee80211_is_deauth(hdr->frame_control) ||
+ 		     ieee80211_is_disassoc(hdr->frame_control)) &&
+ 		     ieee80211_has_protected(hdr->frame_control)) {
+-			skb_put(skb, IEEE80211_CCMP_MIC_LEN);
++			enctype =
++			    ath12k_dp_tx_get_encrypt_type(ATH12K_SKB_CB(skb)->cipher);
++			mic_len = ath12k_dp_rx_crypto_mic_len(ar, enctype);
++			skb_put(skb, mic_len);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
+index af98e871199d31..5a9e93fd1ef42a 100644
+--- a/drivers/net/wireless/ath/ath6kl/bmi.c
++++ b/drivers/net/wireless/ath/ath6kl/bmi.c
+@@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
+ 		 * We need to do some backwards compatibility to make this work.
+ 		 */
+ 		if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
+-			WARN_ON(1);
++			ath6kl_err("mismatched byte count %d vs. expected %zd\n",
++				   le32_to_cpu(targ_info->byte_count),
++				   sizeof(*targ_info));
+ 			return -EINVAL;
+ 		}
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index eca764fede48f2..abd42598fc78b6 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -380,7 +380,7 @@ static void nvme_log_err_passthru(struct request *req)
+ 		nr->cmd->common.cdw12,
+ 		nr->cmd->common.cdw13,
+ 		nr->cmd->common.cdw14,
+-		nr->cmd->common.cdw14);
++		nr->cmd->common.cdw15);
+ }
+ 
+ enum nvme_disposition {
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 190f55e6d7532e..3062562c096a13 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -714,6 +714,8 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
+ {
+ 	if (bio != &req->b.inline_bio)
+ 		bio_put(bio);
++	else
++		bio_uninit(bio);
+ }
+ 
+ #ifdef CONFIG_NVME_TARGET_AUTH
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 9ff7b487dc4892..fbb8128d19de48 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -710,7 +710,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_llt_events[] = {
+ 	{101, "GDC_BANK0_HIT_DCL_PARTIAL"},
+ 	{102, "GDC_BANK0_EVICT_DCL"},
+ 	{103, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA0"},
+-	{103, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA1"},
++	{104, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA1"},
+ 	{105, "GDC_BANK0_ARB_STRB"},
+ 	{106, "GDC_BANK0_ARB_WAIT"},
+ 	{107, "GDC_BANK0_GGA_STRB"},
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index 6c834e39352d61..d2c27cc0733bbb 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -281,7 +281,8 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
+ 		vring->align = SMP_CACHE_BYTES;
+ 		vring->index = i;
+ 		vring->vdev_id = tm_vdev->vdev.id.device;
+-		vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
++		vring->drop_desc.len = cpu_to_virtio32(&tm_vdev->vdev,
++						       VRING_DROP_DESC_MAX_LEN);
+ 		dev = &tm_vdev->vdev.dev;
+ 
+ 		size = vring_size(vring->num, vring->align);
+diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
+index 43d119e3a47342..99152676dbd282 100644
+--- a/drivers/platform/mellanox/mlxreg-lc.c
++++ b/drivers/platform/mellanox/mlxreg-lc.c
+@@ -688,7 +688,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
+ 	if (regval & mlxreg_lc->data->mask) {
+ 		mlxreg_lc->state |= MLXREG_LC_SYNCED;
+ 		mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1);
+-		if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
++		if (!(mlxreg_lc->state & MLXREG_LC_POWERED)) {
+ 			err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
+ 			if (err)
+ 				goto mlxreg_lc_regmap_power_on_off_fail;
+diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
+index abe7be602f846e..e708521e52740a 100644
+--- a/drivers/platform/mellanox/nvsw-sn2201.c
++++ b/drivers/platform/mellanox/nvsw-sn2201.c
+@@ -1088,7 +1088,7 @@ static int nvsw_sn2201_i2c_completion_notify(void *handle, int id)
+ 	if (!nvsw_sn2201->main_mux_devs->adapter) {
+ 		err = -ENODEV;
+ 		dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n",
+-			nvsw_sn2201->cpld_devs->nr);
++			nvsw_sn2201->main_mux_devs->nr);
+ 		goto i2c_get_adapter_main_fail;
+ 	}
+ 
+diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+index 2e3f6fc67c568d..7ed12c1d3b34c0 100644
+--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+@@ -224,6 +224,15 @@ static const struct dmi_system_id fwbug_list[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
+ 		}
+ 	},
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */
++	{
++		.ident = "PCSpecialist Lafite Pro V 14M",
++		.driver_data = &quirk_spurious_8042,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
+index 3ad33a094588c6..817ee7ba07ca08 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h
+@@ -89,6 +89,11 @@ extern struct wmi_sysman_priv wmi_priv;
+ 
+ enum { ENUM, INT, STR, PO };
+ 
++#define ENUM_MIN_ELEMENTS		8
++#define INT_MIN_ELEMENTS		9
++#define STR_MIN_ELEMENTS		8
++#define PO_MIN_ELEMENTS			4
++
+ enum {
+ 	ATTR_NAME,
+ 	DISPL_NAME_LANG_CODE,
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
+index 8cc212c8526683..fc2f58b4cbc6ef 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
+@@ -23,9 +23,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
+ 	obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
+ 	if (!obj)
+ 		return -EIO;
+-	if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) {
++	if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < ENUM_MIN_ELEMENTS ||
++	    obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) {
+ 		kfree(obj);
+-		return -EINVAL;
++		return -EIO;
+ 	}
+ 	ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer);
+ 	kfree(obj);
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
+index 951e75b538fad4..73524806423914 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
+@@ -25,9 +25,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
+ 	obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
+ 	if (!obj)
+ 		return -EIO;
+-	if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) {
++	if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < INT_MIN_ELEMENTS ||
++	    obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) {
+ 		kfree(obj);
+-		return -EINVAL;
++		return -EIO;
+ 	}
+ 	ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[CURRENT_VAL].integer.value);
+ 	kfree(obj);
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
+index d8f1bf5e58a0f4..3167e06d416ede 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
+@@ -26,9 +26,10 @@ static ssize_t is_enabled_show(struct kobject *kobj, struct kobj_attribute *attr
+ 	obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
+ 	if (!obj)
+ 		return -EIO;
+-	if (obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) {
++	if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < PO_MIN_ELEMENTS ||
++	    obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) {
+ 		kfree(obj);
+-		return -EINVAL;
++		return -EIO;
+ 	}
+ 	ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[IS_PASS_SET].integer.value);
+ 	kfree(obj);
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
+index c392f0ecf8b55b..0d2c74f8d1aad7 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
+@@ -25,9 +25,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
+ 	obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
+ 	if (!obj)
+ 		return -EIO;
+-	if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) {
++	if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < STR_MIN_ELEMENTS ||
++	    obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) {
+ 		kfree(obj);
+-		return -EINVAL;
++		return -EIO;
+ 	}
+ 	ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer);
+ 	kfree(obj);
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+index 40ddc6eb75624e..f5402b71465729 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+@@ -25,7 +25,6 @@ struct wmi_sysman_priv wmi_priv = {
+ /* reset bios to defaults */
+ static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
+ static int reset_option = -1;
+-static const struct class *fw_attr_class;
+ 
+ 
+ /**
+@@ -408,10 +407,10 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 		return retval;
+ 
+ 	switch (attr_type) {
+-	case ENUM:	min_elements = 8;	break;
+-	case INT:	min_elements = 9;	break;
+-	case STR:	min_elements = 8;	break;
+-	case PO:	min_elements = 4;	break;
++	case ENUM:	min_elements = ENUM_MIN_ELEMENTS;	break;
++	case INT:	min_elements = INT_MIN_ELEMENTS;	break;
++	case STR:	min_elements = STR_MIN_ELEMENTS;	break;
++	case PO:	min_elements = PO_MIN_ELEMENTS;		break;
+ 	default:
+ 		pr_err("Error: Unknown attr_type: %d\n", attr_type);
+ 		return -EINVAL;
+@@ -541,15 +540,11 @@ static int __init sysman_init(void)
+ 		goto err_exit_bios_attr_pass_interface;
+ 	}
+ 
+-	ret = fw_attributes_class_get(&fw_attr_class);
+-	if (ret)
+-		goto err_exit_bios_attr_pass_interface;
+-
+-	wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
++	wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ 				  NULL, "%s", DRIVER_NAME);
+ 	if (IS_ERR(wmi_priv.class_dev)) {
+ 		ret = PTR_ERR(wmi_priv.class_dev);
+-		goto err_unregister_class;
++		goto err_exit_bios_attr_pass_interface;
+ 	}
+ 
+ 	wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
+@@ -602,10 +597,7 @@ static int __init sysman_init(void)
+ 	release_attributes_data();
+ 
+ err_destroy_classdev:
+-	device_destroy(fw_attr_class, MKDEV(0, 0));
+-
+-err_unregister_class:
+-	fw_attributes_class_put();
++	device_unregister(wmi_priv.class_dev);
+ 
+ err_exit_bios_attr_pass_interface:
+ 	exit_bios_attr_pass_interface();
+@@ -619,8 +611,7 @@ static int __init sysman_init(void)
+ static void __exit sysman_exit(void)
+ {
+ 	release_attributes_data();
+-	device_destroy(fw_attr_class, MKDEV(0, 0));
+-	fw_attributes_class_put();
++	device_unregister(wmi_priv.class_dev);
+ 	exit_bios_attr_set_interface();
+ 	exit_bios_attr_pass_interface();
+ }
+diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
+index 182a07d8ae3dfa..87672c49e86ae3 100644
+--- a/drivers/platform/x86/firmware_attributes_class.c
++++ b/drivers/platform/x86/firmware_attributes_class.c
+@@ -2,48 +2,35 @@
+ 
+ /* Firmware attributes class helper module */
+ 
+-#include <linux/mutex.h>
+-#include <linux/device/class.h>
+ #include <linux/module.h>
+ #include "firmware_attributes_class.h"
+ 
+-static DEFINE_MUTEX(fw_attr_lock);
+-static int fw_attr_inuse;
+-
+-static const struct class firmware_attributes_class = {
++const struct class firmware_attributes_class = {
+ 	.name = "firmware-attributes",
+ };
++EXPORT_SYMBOL_GPL(firmware_attributes_class);
++
++static __init int fw_attributes_class_init(void)
++{
++	return class_register(&firmware_attributes_class);
++}
++module_init(fw_attributes_class_init);
++
++static __exit void fw_attributes_class_exit(void)
++{
++	class_unregister(&firmware_attributes_class);
++}
++module_exit(fw_attributes_class_exit);
+ 
+ int fw_attributes_class_get(const struct class **fw_attr_class)
+ {
+-	int err;
+-
+-	mutex_lock(&fw_attr_lock);
+-	if (!fw_attr_inuse) { /*first time class is being used*/
+-		err = class_register(&firmware_attributes_class);
+-		if (err) {
+-			mutex_unlock(&fw_attr_lock);
+-			return err;
+-		}
+-	}
+-	fw_attr_inuse++;
+ 	*fw_attr_class = &firmware_attributes_class;
+-	mutex_unlock(&fw_attr_lock);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(fw_attributes_class_get);
+ 
+ int fw_attributes_class_put(void)
+ {
+-	mutex_lock(&fw_attr_lock);
+-	if (!fw_attr_inuse) {
+-		mutex_unlock(&fw_attr_lock);
+-		return -EINVAL;
+-	}
+-	fw_attr_inuse--;
+-	if (!fw_attr_inuse) /* No more consumers */
+-		class_unregister(&firmware_attributes_class);
+-	mutex_unlock(&fw_attr_lock);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(fw_attributes_class_put);
+diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h
+index 363c75f1ac1b89..ef6c3764a83497 100644
+--- a/drivers/platform/x86/firmware_attributes_class.h
++++ b/drivers/platform/x86/firmware_attributes_class.h
+@@ -5,6 +5,9 @@
+ #ifndef FW_ATTR_CLASS_H
+ #define FW_ATTR_CLASS_H
+ 
++#include <linux/device/class.h>
++
++extern const struct class firmware_attributes_class;
+ int fw_attributes_class_get(const struct class **fw_attr_class);
+ int fw_attributes_class_put(void);
+ 
+diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+index 2dc50152158a3d..00b04adb4f191b 100644
+--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
++++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+@@ -24,8 +24,6 @@ struct bioscfg_priv bioscfg_drv = {
+ 	.mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
+ };
+ 
+-static const struct class *fw_attr_class;
+-
+ ssize_t display_name_language_code_show(struct kobject *kobj,
+ 					struct kobj_attribute *attr,
+ 					char *buf)
+@@ -972,11 +970,7 @@ static int __init hp_init(void)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = fw_attributes_class_get(&fw_attr_class);
+-	if (ret)
+-		goto err_unregister_class;
+-
+-	bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
++	bioscfg_drv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ 					      NULL, "%s", DRIVER_NAME);
+ 	if (IS_ERR(bioscfg_drv.class_dev)) {
+ 		ret = PTR_ERR(bioscfg_drv.class_dev);
+@@ -1043,10 +1037,9 @@ static int __init hp_init(void)
+ 	release_attributes_data();
+ 
+ err_destroy_classdev:
+-	device_destroy(fw_attr_class, MKDEV(0, 0));
++	device_unregister(bioscfg_drv.class_dev);
+ 
+ err_unregister_class:
+-	fw_attributes_class_put();
+ 	hp_exit_attr_set_interface();
+ 
+ 	return ret;
+@@ -1055,9 +1048,8 @@ static int __init hp_init(void)
+ static void __exit hp_exit(void)
+ {
+ 	release_attributes_data();
+-	device_destroy(fw_attr_class, MKDEV(0, 0));
++	device_unregister(bioscfg_drv.class_dev);
+ 
+-	fw_attributes_class_put();
+ 	hp_exit_attr_set_interface();
+ }
+ 
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index 1abd8378f158d3..6ad2af46248b91 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -192,7 +192,6 @@ static const char * const level_options[] = {
+ 	[TLMI_LEVEL_MASTER] = "master",
+ };
+ static struct think_lmi tlmi_priv;
+-static const struct class *fw_attr_class;
+ static DEFINE_MUTEX(tlmi_mutex);
+ 
+ static inline struct tlmi_pwd_setting *to_tlmi_pwd_setting(struct kobject *kobj)
+@@ -907,6 +906,7 @@ static const struct attribute_group auth_attr_group = {
+ 	.is_visible = auth_attr_is_visible,
+ 	.attrs = auth_attrs,
+ };
++__ATTRIBUTE_GROUPS(auth_attr);
+ 
+ /* ---- Attributes sysfs --------------------------------------------------------- */
+ static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr,
+@@ -1122,6 +1122,7 @@ static const struct attribute_group tlmi_attr_group = {
+ 	.is_visible = attr_is_visible,
+ 	.attrs = tlmi_attrs,
+ };
++__ATTRIBUTE_GROUPS(tlmi_attr);
+ 
+ static void tlmi_attr_setting_release(struct kobject *kobj)
+ {
+@@ -1141,11 +1142,13 @@ static void tlmi_pwd_setting_release(struct kobject *kobj)
+ static const struct kobj_type tlmi_attr_setting_ktype = {
+ 	.release        = &tlmi_attr_setting_release,
+ 	.sysfs_ops	= &kobj_sysfs_ops,
++	.default_groups = tlmi_attr_groups,
+ };
+ 
+ static const struct kobj_type tlmi_pwd_setting_ktype = {
+ 	.release        = &tlmi_pwd_setting_release,
+ 	.sysfs_ops	= &kobj_sysfs_ops,
++	.default_groups = auth_attr_groups,
+ };
+ 
+ static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
+@@ -1314,21 +1317,18 @@ static struct kobj_attribute debug_cmd = __ATTR_WO(debug_cmd);
+ /* ---- Initialisation --------------------------------------------------------- */
+ static void tlmi_release_attr(void)
+ {
+-	int i;
++	struct kobject *pos, *n;
+ 
+ 	/* Attribute structures */
+-	for (i = 0; i < TLMI_SETTINGS_COUNT; i++) {
+-		if (tlmi_priv.setting[i]) {
+-			sysfs_remove_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group);
+-			kobject_put(&tlmi_priv.setting[i]->kobj);
+-		}
+-	}
+ 	sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+ 	sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &save_settings.attr);
+ 
+ 	if (tlmi_priv.can_debug_cmd && debug_support)
+ 		sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr);
+ 
++	list_for_each_entry_safe(pos, n, &tlmi_priv.attribute_kset->list, entry)
++		kobject_put(pos);
++
+ 	kset_unregister(tlmi_priv.attribute_kset);
+ 
+ 	/* Free up any saved signatures */
+@@ -1336,19 +1336,8 @@ static void tlmi_release_attr(void)
+ 	kfree(tlmi_priv.pwd_admin->save_signature);
+ 
+ 	/* Authentication structures */
+-	sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
+-	kobject_put(&tlmi_priv.pwd_admin->kobj);
+-	sysfs_remove_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
+-	kobject_put(&tlmi_priv.pwd_power->kobj);
+-
+-	if (tlmi_priv.opcode_support) {
+-		sysfs_remove_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group);
+-		kobject_put(&tlmi_priv.pwd_system->kobj);
+-		sysfs_remove_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group);
+-		kobject_put(&tlmi_priv.pwd_hdd->kobj);
+-		sysfs_remove_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group);
+-		kobject_put(&tlmi_priv.pwd_nvme->kobj);
+-	}
++	list_for_each_entry_safe(pos, n, &tlmi_priv.authentication_kset->list, entry)
++		kobject_put(pos);
+ 
+ 	kset_unregister(tlmi_priv.authentication_kset);
+ }
+@@ -1375,11 +1364,7 @@ static int tlmi_sysfs_init(void)
+ {
+ 	int i, ret;
+ 
+-	ret = fw_attributes_class_get(&fw_attr_class);
+-	if (ret)
+-		return ret;
+-
+-	tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
++	tlmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ 			NULL, "%s", "thinklmi");
+ 	if (IS_ERR(tlmi_priv.class_dev)) {
+ 		ret = PTR_ERR(tlmi_priv.class_dev);
+@@ -1393,6 +1378,14 @@ static int tlmi_sysfs_init(void)
+ 		goto fail_device_created;
+ 	}
+ 
++	tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
++							    &tlmi_priv.class_dev->kobj);
++	if (!tlmi_priv.authentication_kset) {
++		kset_unregister(tlmi_priv.attribute_kset);
++		ret = -ENOMEM;
++		goto fail_device_created;
++	}
++
+ 	for (i = 0; i < TLMI_SETTINGS_COUNT; i++) {
+ 		/* Check if index is a valid setting - skip if it isn't */
+ 		if (!tlmi_priv.setting[i])
+@@ -1409,12 +1402,8 @@ static int tlmi_sysfs_init(void)
+ 
+ 		/* Build attribute */
+ 		tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
+-		ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL,
+-				  "%s", tlmi_priv.setting[i]->display_name);
+-		if (ret)
+-			goto fail_create_attr;
+-
+-		ret = sysfs_create_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group);
++		ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype,
++					   NULL, "%s", tlmi_priv.setting[i]->display_name);
+ 		if (ret)
+ 			goto fail_create_attr;
+ 	}
+@@ -1434,55 +1423,34 @@ static int tlmi_sysfs_init(void)
+ 	}
+ 
+ 	/* Create authentication entries */
+-	tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
+-								&tlmi_priv.class_dev->kobj);
+-	if (!tlmi_priv.authentication_kset) {
+-		ret = -ENOMEM;
+-		goto fail_create_attr;
+-	}
+ 	tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
+-	ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin");
+-	if (ret)
+-		goto fail_create_attr;
+-
+-	ret = sysfs_create_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
++	ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype,
++				   NULL, "%s", "Admin");
+ 	if (ret)
+ 		goto fail_create_attr;
+ 
+ 	tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
+-	ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "Power-on");
+-	if (ret)
+-		goto fail_create_attr;
+-
+-	ret = sysfs_create_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
++	ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype,
++				   NULL, "%s", "Power-on");
+ 	if (ret)
+ 		goto fail_create_attr;
+ 
+ 	if (tlmi_priv.opcode_support) {
+ 		tlmi_priv.pwd_system->kobj.kset = tlmi_priv.authentication_kset;
+-		ret = kobject_add(&tlmi_priv.pwd_system->kobj, NULL, "%s", "System");
+-		if (ret)
+-			goto fail_create_attr;
+-
+-		ret = sysfs_create_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group);
++		ret = kobject_init_and_add(&tlmi_priv.pwd_system->kobj, &tlmi_pwd_setting_ktype,
++					   NULL, "%s", "System");
+ 		if (ret)
+ 			goto fail_create_attr;
+ 
+ 		tlmi_priv.pwd_hdd->kobj.kset = tlmi_priv.authentication_kset;
+-		ret = kobject_add(&tlmi_priv.pwd_hdd->kobj, NULL, "%s", "HDD");
+-		if (ret)
+-			goto fail_create_attr;
+-
+-		ret = sysfs_create_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group);
++		ret = kobject_init_and_add(&tlmi_priv.pwd_hdd->kobj, &tlmi_pwd_setting_ktype,
++					   NULL, "%s", "HDD");
+ 		if (ret)
+ 			goto fail_create_attr;
+ 
+ 		tlmi_priv.pwd_nvme->kobj.kset = tlmi_priv.authentication_kset;
+-		ret = kobject_add(&tlmi_priv.pwd_nvme->kobj, NULL, "%s", "NVMe");
+-		if (ret)
+-			goto fail_create_attr;
+-
+-		ret = sysfs_create_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group);
++		ret = kobject_init_and_add(&tlmi_priv.pwd_nvme->kobj, &tlmi_pwd_setting_ktype,
++					   NULL, "%s", "NVMe");
+ 		if (ret)
+ 			goto fail_create_attr;
+ 	}
+@@ -1492,9 +1460,8 @@ static int tlmi_sysfs_init(void)
+ fail_create_attr:
+ 	tlmi_release_attr();
+ fail_device_created:
+-	device_destroy(fw_attr_class, MKDEV(0, 0));
++	device_unregister(tlmi_priv.class_dev);
+ fail_class_created:
+-	fw_attributes_class_put();
+ 	return ret;
+ }
+ 
+@@ -1516,8 +1483,6 @@ static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type,
+ 	new_pwd->maxlen = tlmi_priv.pwdcfg.core.max_length;
+ 	new_pwd->index = 0;
+ 
+-	kobject_init(&new_pwd->kobj, &tlmi_pwd_setting_ktype);
+-
+ 	return new_pwd;
+ }
+ 
+@@ -1621,7 +1586,6 @@ static int tlmi_analyze(void)
+ 		if (setting->possible_values)
+ 			strreplace(setting->possible_values, ',', ';');
+ 
+-		kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
+ 		tlmi_priv.setting[i] = setting;
+ 		kfree(item);
+ 	}
+@@ -1717,8 +1681,7 @@ static int tlmi_analyze(void)
+ static void tlmi_remove(struct wmi_device *wdev)
+ {
+ 	tlmi_release_attr();
+-	device_destroy(fw_attr_class, MKDEV(0, 0));
+-	fw_attributes_class_put();
++	device_unregister(tlmi_priv.class_dev);
+ }
+ 
+ static int tlmi_probe(struct wmi_device *wdev, const void *context)
+diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
+index 5e793b80fd6b6b..c3de52e78e0166 100644
+--- a/drivers/powercap/intel_rapl_common.c
++++ b/drivers/powercap/intel_rapl_common.c
+@@ -340,12 +340,28 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
+ {
+ 	struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ 	struct rapl_defaults *defaults = get_defaults(rd->rp);
++	u64 val;
+ 	int ret;
+ 
+ 	cpus_read_lock();
+ 	ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode);
+-	if (!ret && defaults->set_floor_freq)
++	if (ret)
++		goto end;
++
++	ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, false, &val);
++	if (ret)
++		goto end;
++
++	if (mode != val) {
++		pr_debug("%s cannot be %s\n", power_zone->name,
++			 str_enabled_disabled(mode));
++		goto end;
++	}
++
++	if (defaults->set_floor_freq)
+ 		defaults->set_floor_freq(rd, mode);
++
++end:
+ 	cpus_read_unlock();
+ 
+ 	return ret;
+diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
+index bd9447dac5967a..c282236959b180 100644
+--- a/drivers/regulator/fan53555.c
++++ b/drivers/regulator/fan53555.c
+@@ -147,6 +147,7 @@ struct fan53555_device_info {
+ 	unsigned int slew_mask;
+ 	const unsigned int *ramp_delay_table;
+ 	unsigned int n_ramp_values;
++	unsigned int enable_time;
+ 	unsigned int slew_rate;
+ };
+ 
+@@ -282,6 +283,7 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
+ 	di->slew_mask = CTL_SLEW_MASK;
+ 	di->ramp_delay_table = slew_rates;
+ 	di->n_ramp_values = ARRAY_SIZE(slew_rates);
++	di->enable_time = 250;
+ 	di->vsel_count = FAN53526_NVOLTAGES;
+ 
+ 	return 0;
+@@ -296,10 +298,12 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
+ 		case FAN53555_CHIP_REV_00:
+ 			di->vsel_min = 600000;
+ 			di->vsel_step = 10000;
++			di->enable_time = 400;
+ 			break;
+ 		case FAN53555_CHIP_REV_13:
+ 			di->vsel_min = 800000;
+ 			di->vsel_step = 10000;
++			di->enable_time = 400;
+ 			break;
+ 		default:
+ 			dev_err(di->dev,
+@@ -311,13 +315,19 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
+ 	case FAN53555_CHIP_ID_01:
+ 	case FAN53555_CHIP_ID_03:
+ 	case FAN53555_CHIP_ID_05:
++		di->vsel_min = 600000;
++		di->vsel_step = 10000;
++		di->enable_time = 400;
++		break;
+ 	case FAN53555_CHIP_ID_08:
+ 		di->vsel_min = 600000;
+ 		di->vsel_step = 10000;
++		di->enable_time = 175;
+ 		break;
+ 	case FAN53555_CHIP_ID_04:
+ 		di->vsel_min = 603000;
+ 		di->vsel_step = 12826;
++		di->enable_time = 400;
+ 		break;
+ 	default:
+ 		dev_err(di->dev,
+@@ -350,6 +360,7 @@ static int fan53555_voltages_setup_rockchip(struct fan53555_device_info *di)
+ 	di->slew_mask = CTL_SLEW_MASK;
+ 	di->ramp_delay_table = slew_rates;
+ 	di->n_ramp_values = ARRAY_SIZE(slew_rates);
++	di->enable_time = 360;
+ 	di->vsel_count = FAN53555_NVOLTAGES;
+ 
+ 	return 0;
+@@ -372,6 +383,7 @@ static int rk8602_voltages_setup_rockchip(struct fan53555_device_info *di)
+ 	di->slew_mask = CTL_SLEW_MASK;
+ 	di->ramp_delay_table = slew_rates;
+ 	di->n_ramp_values = ARRAY_SIZE(slew_rates);
++	di->enable_time = 360;
+ 	di->vsel_count = RK8602_NVOLTAGES;
+ 
+ 	return 0;
+@@ -395,6 +407,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
+ 	di->slew_mask = CTL_SLEW_MASK;
+ 	di->ramp_delay_table = slew_rates;
+ 	di->n_ramp_values = ARRAY_SIZE(slew_rates);
++	di->enable_time = 400;
+ 	di->vsel_count = FAN53555_NVOLTAGES;
+ 
+ 	return 0;
+@@ -594,6 +607,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
+ 	rdesc->ramp_mask = di->slew_mask;
+ 	rdesc->ramp_delay_table = di->ramp_delay_table;
+ 	rdesc->n_ramp_values = di->n_ramp_values;
++	rdesc->enable_time = di->enable_time;
+ 	rdesc->owner = THIS_MODULE;
+ 
+ 	rdev = devm_regulator_register(di->dev, &di->desc, config);
+diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
+index 65927fa2ef161c..1bdd494cf8821e 100644
+--- a/drivers/regulator/gpio-regulator.c
++++ b/drivers/regulator/gpio-regulator.c
+@@ -260,8 +260,10 @@ static int gpio_regulator_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	}
+ 
+-	drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *),
+-				       GFP_KERNEL);
++	drvdata->gpiods = devm_kcalloc(dev, config->ngpios,
++				       sizeof(struct gpio_desc *), GFP_KERNEL);
++	if (!drvdata->gpiods)
++		return -ENOMEM;
+ 
+ 	if (config->input_supply) {
+ 		drvdata->desc.supply_name = devm_kstrdup(&pdev->dev,
+@@ -274,8 +276,6 @@ static int gpio_regulator_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	if (!drvdata->gpiods)
+-		return -ENOMEM;
+ 	for (i = 0; i < config->ngpios; i++) {
+ 		drvdata->gpiods[i] = devm_gpiod_get_index(dev,
+ 							  NULL,
+diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+index 2ae0655ddf1d22..73be3d21679148 100644
+--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+@@ -568,11 +568,9 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
+ 			return -EINVAL;
+ 
+ 		rmem = of_reserved_mem_lookup(rmem_np);
+-		if (!rmem) {
+-			of_node_put(rmem_np);
+-			return -EINVAL;
+-		}
+ 		of_node_put(rmem_np);
++		if (!rmem)
++			return -EINVAL;
+ 
+ 		kproc->rmem[i].bus_addr = rmem->base;
+ 		/* 64-bit address regions currently not supported */
+diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
+index fba6e393635e36..6cd50b16a8e82a 100644
+--- a/drivers/remoteproc/ti_k3_m4_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
+@@ -433,11 +433,9 @@ static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
+ 			return -EINVAL;
+ 
+ 		rmem = of_reserved_mem_lookup(rmem_np);
+-		if (!rmem) {
+-			of_node_put(rmem_np);
+-			return -EINVAL;
+-		}
+ 		of_node_put(rmem_np);
++		if (!rmem)
++			return -EINVAL;
+ 
+ 		kproc->rmem[i].bus_addr = rmem->base;
+ 		/* 64-bit address regions currently not supported */
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index 4894461aa65f3b..941bb130c85c40 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -440,13 +440,36 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
+ {
+ 	struct k3_r5_rproc *kproc = rproc->priv;
+ 	struct k3_r5_cluster *cluster = kproc->cluster;
+-	struct k3_r5_core *core = kproc->core;
++	struct k3_r5_core *core = kproc->core, *core0, *core1;
+ 	struct device *dev = kproc->dev;
+ 	u32 ctrl = 0, cfg = 0, stat = 0;
+ 	u64 boot_vec = 0;
+ 	bool mem_init_dis;
+ 	int ret;
+ 
++	/*
++	 * R5 cores require to be powered on sequentially, core0 should be in
++	 * higher power state than core1 in a cluster. So, wait for core0 to
++	 * power up before proceeding to core1 and put timeout of 2sec. This
++	 * waiting mechanism is necessary because rproc_auto_boot_callback() for
++	 * core1 can be called before core0 due to thread execution order.
++	 *
++	 * By placing the wait mechanism here in .prepare() ops, this condition
++	 * is enforced for rproc boot requests from sysfs as well.
++	 */
++	core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
++	core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
++	if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1 &&
++	    !core0->released_from_reset) {
++		ret = wait_event_interruptible_timeout(cluster->core_transition,
++						       core0->released_from_reset,
++						       msecs_to_jiffies(2000));
++		if (ret <= 0) {
++			dev_err(dev, "can not power up core1 before core0");
++			return -EPERM;
++		}
++	}
++
+ 	ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
+ 	if (ret < 0)
+ 		return ret;
+@@ -462,6 +485,14 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
+ 		return ret;
+ 	}
+ 
++	/*
++	 * Notify all threads in the wait queue when core0 state has changed so
++	 * that threads waiting for this condition can be executed.
++	 */
++	core->released_from_reset = true;
++	if (core == core0)
++		wake_up_interruptible(&cluster->core_transition);
++
+ 	/*
+ 	 * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+ 	 * of TCMs, so there is no need to perform the s/w memzero. This bit is
+@@ -507,10 +538,30 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
+ {
+ 	struct k3_r5_rproc *kproc = rproc->priv;
+ 	struct k3_r5_cluster *cluster = kproc->cluster;
+-	struct k3_r5_core *core = kproc->core;
++	struct k3_r5_core *core = kproc->core, *core0, *core1;
+ 	struct device *dev = kproc->dev;
+ 	int ret;
+ 
++	/*
++	 * Ensure power-down of cores is sequential in split mode. Core1 must
++	 * power down before Core0 to maintain the expected state. By placing
++	 * the wait mechanism here in .unprepare() ops, this condition is
++	 * enforced for rproc stop or shutdown requests from sysfs and device
++	 * removal as well.
++	 */
++	core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
++	core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
++	if (cluster->mode == CLUSTER_MODE_SPLIT && core == core0 &&
++	    core1->released_from_reset) {
++		ret = wait_event_interruptible_timeout(cluster->core_transition,
++						       !core1->released_from_reset,
++						       msecs_to_jiffies(2000));
++		if (ret <= 0) {
++			dev_err(dev, "can not power down core0 before core1");
++			return -EPERM;
++		}
++	}
++
+ 	/* Re-use LockStep-mode reset logic for Single-CPU mode */
+ 	ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ 	       cluster->mode == CLUSTER_MODE_SINGLECPU) ?
+@@ -518,6 +569,14 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
+ 	if (ret)
+ 		dev_err(dev, "unable to disable cores, ret = %d\n", ret);
+ 
++	/*
++	 * Notify all threads in the wait queue when core1 state has changed so
++	 * that threads waiting for this condition can be executed.
++	 */
++	core->released_from_reset = false;
++	if (core == core1)
++		wake_up_interruptible(&cluster->core_transition);
++
+ 	return ret;
+ }
+ 
+@@ -543,7 +602,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ 	struct k3_r5_rproc *kproc = rproc->priv;
+ 	struct k3_r5_cluster *cluster = kproc->cluster;
+ 	struct device *dev = kproc->dev;
+-	struct k3_r5_core *core0, *core;
++	struct k3_r5_core *core;
+ 	u32 boot_addr;
+ 	int ret;
+ 
+@@ -565,21 +624,9 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ 				goto unroll_core_run;
+ 		}
+ 	} else {
+-		/* do not allow core 1 to start before core 0 */
+-		core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
+-					 elem);
+-		if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
+-			dev_err(dev, "%s: can not start core 1 before core 0\n",
+-				__func__);
+-			return -EPERM;
+-		}
+-
+ 		ret = k3_r5_core_run(core);
+ 		if (ret)
+ 			return ret;
+-
+-		core->released_from_reset = true;
+-		wake_up_interruptible(&cluster->core_transition);
+ 	}
+ 
+ 	return 0;
+@@ -620,8 +667,7 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ {
+ 	struct k3_r5_rproc *kproc = rproc->priv;
+ 	struct k3_r5_cluster *cluster = kproc->cluster;
+-	struct device *dev = kproc->dev;
+-	struct k3_r5_core *core1, *core = kproc->core;
++	struct k3_r5_core *core = kproc->core;
+ 	int ret;
+ 
+ 	/* halt all applicable cores */
+@@ -634,16 +680,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ 			}
+ 		}
+ 	} else {
+-		/* do not allow core 0 to stop before core 1 */
+-		core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
+-					elem);
+-		if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
+-			dev_err(dev, "%s: can not stop core 0 before core 1\n",
+-				__func__);
+-			ret = -EPERM;
+-			goto out;
+-		}
+-
+ 		ret = k3_r5_core_halt(core);
+ 		if (ret)
+ 			goto out;
+@@ -947,6 +983,13 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
+ 	return ret;
+ }
+ 
++static void k3_r5_mem_release(void *data)
++{
++	struct device *dev = data;
++
++	of_reserved_mem_device_release(dev);
++}
++
+ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
+ {
+ 	struct device *dev = kproc->dev;
+@@ -977,28 +1020,25 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
+ 		return ret;
+ 	}
+ 
++	ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev);
++	if (ret)
++		return ret;
++
+ 	num_rmems--;
+-	kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+-	if (!kproc->rmem) {
+-		ret = -ENOMEM;
+-		goto release_rmem;
+-	}
++	kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
++	if (!kproc->rmem)
++		return -ENOMEM;
+ 
+ 	/* use remaining reserved memory regions for static carveouts */
+ 	for (i = 0; i < num_rmems; i++) {
+ 		rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+-		if (!rmem_np) {
+-			ret = -EINVAL;
+-			goto unmap_rmem;
+-		}
++		if (!rmem_np)
++			return -EINVAL;
+ 
+ 		rmem = of_reserved_mem_lookup(rmem_np);
+-		if (!rmem) {
+-			of_node_put(rmem_np);
+-			ret = -EINVAL;
+-			goto unmap_rmem;
+-		}
+ 		of_node_put(rmem_np);
++		if (!rmem)
++			return -EINVAL;
+ 
+ 		kproc->rmem[i].bus_addr = rmem->base;
+ 		/*
+@@ -1013,12 +1053,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
+ 		 */
+ 		kproc->rmem[i].dev_addr = (u32)rmem->base;
+ 		kproc->rmem[i].size = rmem->size;
+-		kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
++		kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ 		if (!kproc->rmem[i].cpu_addr) {
+ 			dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ 				i + 1, &rmem->base, &rmem->size);
+-			ret = -ENOMEM;
+-			goto unmap_rmem;
++			return -ENOMEM;
+ 		}
+ 
+ 		dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+@@ -1029,25 +1068,6 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
+ 	kproc->num_rmems = num_rmems;
+ 
+ 	return 0;
+-
+-unmap_rmem:
+-	for (i--; i >= 0; i--)
+-		iounmap(kproc->rmem[i].cpu_addr);
+-	kfree(kproc->rmem);
+-release_rmem:
+-	of_reserved_mem_device_release(dev);
+-	return ret;
+-}
+-
+-static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
+-{
+-	int i;
+-
+-	for (i = 0; i < kproc->num_rmems; i++)
+-		iounmap(kproc->rmem[i].cpu_addr);
+-	kfree(kproc->rmem);
+-
+-	of_reserved_mem_device_release(kproc->dev);
+ }
+ 
+ /*
+@@ -1274,10 +1294,10 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ 			goto out;
+ 		}
+ 
+-		ret = rproc_add(rproc);
++		ret = devm_rproc_add(dev, rproc);
+ 		if (ret) {
+-			dev_err(dev, "rproc_add failed, ret = %d\n", ret);
+-			goto err_add;
++			dev_err_probe(dev, ret, "rproc_add failed\n");
++			goto out;
+ 		}
+ 
+ 		/* create only one rproc in lockstep, single-cpu or
+@@ -1287,26 +1307,6 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ 		    cluster->mode == CLUSTER_MODE_SINGLECPU ||
+ 		    cluster->mode == CLUSTER_MODE_SINGLECORE)
+ 			break;
+-
+-		/*
+-		 * R5 cores require to be powered on sequentially, core0
+-		 * should be in higher power state than core1 in a cluster
+-		 * So, wait for current core to power up before proceeding
+-		 * to next core and put timeout of 2sec for each core.
+-		 *
+-		 * This waiting mechanism is necessary because
+-		 * rproc_auto_boot_callback() for core1 can be called before
+-		 * core0 due to thread execution order.
+-		 */
+-		ret = wait_event_interruptible_timeout(cluster->core_transition,
+-						       core->released_from_reset,
+-						       msecs_to_jiffies(2000));
+-		if (ret <= 0) {
+-			dev_err(dev,
+-				"Timed out waiting for %s core to power up!\n",
+-				rproc->name);
+-			goto err_powerup;
+-		}
+ 	}
+ 
+ 	return 0;
+@@ -1321,10 +1321,6 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-err_powerup:
+-	rproc_del(rproc);
+-err_add:
+-	k3_r5_reserved_mem_exit(kproc);
+ out:
+ 	/* undo core0 upon any failures on core1 in split-mode */
+ 	if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
+@@ -1367,10 +1363,6 @@ static void k3_r5_cluster_rproc_exit(void *data)
+ 		}
+ 
+ 		mbox_free_channel(kproc->mbox);
+-
+-		rproc_del(rproc);
+-
+-		k3_r5_reserved_mem_exit(kproc);
+ 	}
+ }
+ 
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 5849d2970bba45..095de4e0e4f388 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -697,8 +697,12 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
+ {
+ 	u8		irqstat;
+ 	u8		rtc_control;
++	unsigned long	flags;
+ 
+-	spin_lock(&rtc_lock);
++	/* We cannot use spin_lock() here, as cmos_interrupt() is also called
++	 * in a non-irq context.
++	 */
++	spin_lock_irqsave(&rtc_lock, flags);
+ 
+ 	/* When the HPET interrupt handler calls us, the interrupt
+ 	 * status is passed as arg1 instead of the irq number.  But
+@@ -732,7 +736,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
+ 			hpet_mask_rtc_irq_bit(RTC_AIE);
+ 		CMOS_READ(RTC_INTR_FLAGS);
+ 	}
+-	spin_unlock(&rtc_lock);
++	spin_unlock_irqrestore(&rtc_lock, flags);
+ 
+ 	if (is_intr(irqstat)) {
+ 		rtc_update_irq(p, 1, irqstat);
+@@ -1300,9 +1304,7 @@ static void cmos_check_wkalrm(struct device *dev)
+ 	 * ACK the rtc irq here
+ 	 */
+ 	if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
+-		local_irq_disable();
+ 		cmos_interrupt(0, (void *)cmos->rtc);
+-		local_irq_enable();
+ 		return;
+ 	}
+ 
+diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
+index 9c04c4e1a49c37..fc079b9dcf7192 100644
+--- a/drivers/rtc/rtc-pcf2127.c
++++ b/drivers/rtc/rtc-pcf2127.c
+@@ -1383,6 +1383,11 @@ static int pcf2127_i2c_probe(struct i2c_client *client)
+ 		variant = &pcf21xx_cfg[type];
+ 	}
+ 
++	if (variant->type == PCF2131) {
++		config.read_flag_mask = 0x0;
++		config.write_flag_mask = 0x0;
++	}
++
+ 	config.max_register = variant->max_register,
+ 
+ 	regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap,
+@@ -1456,7 +1461,7 @@ static int pcf2127_spi_probe(struct spi_device *spi)
+ 		variant = &pcf21xx_cfg[type];
+ 	}
+ 
+-	config.max_register = variant->max_register,
++	config.max_register = variant->max_register;
+ 
+ 	regmap = devm_regmap_init_spi(spi, &config);
+ 	if (IS_ERR(regmap)) {
+diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
+index 85059b83ea6b4f..1c6b024160da76 100644
+--- a/drivers/scsi/lpfc/lpfc_bsg.c
++++ b/drivers/scsi/lpfc/lpfc_bsg.c
+@@ -398,7 +398,11 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
+ 	/* in case no data is transferred */
+ 	bsg_reply->reply_payload_rcv_len = 0;
+ 
+-	if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
++	if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) ||
++	    test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) ||
++	    test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) ||
++	    test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) ||
++	    test_bit(NLP_RNID_SND, &ndlp->nlp_flag))
+ 		return -ENODEV;
+ 
+ 	/* allocate our bsg tracking structure */
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index d4e46a08f94daa..36470bd7161733 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -571,7 +571,7 @@ int lpfc_issue_reg_vfi(struct lpfc_vport *);
+ int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+ int lpfc_selective_reset(struct lpfc_hba *);
+ int lpfc_sli4_read_config(struct lpfc_hba *);
+-void lpfc_sli4_node_prep(struct lpfc_hba *);
++void lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba);
+ int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
+ int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba);
+ int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist);
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index ce3a1f42713dd8..30891ad17e2a41 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -735,7 +735,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
+ 
+ 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 					 "0238 Process x%06x NameServer Rsp "
+-					 "Data: x%x x%x x%x x%lx x%x\n", Did,
++					 "Data: x%lx x%x x%x x%lx x%x\n", Did,
+ 					 ndlp->nlp_flag, ndlp->nlp_fc4_type,
+ 					 ndlp->nlp_state, vport->fc_flag,
+ 					 vport->fc_rscn_id_cnt);
+@@ -744,7 +744,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
+ 			 * state of ndlp hit devloss, change state to
+ 			 * allow rediscovery.
+ 			 */
+-			if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
++			if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) &&
+ 			    ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ 				lpfc_nlp_set_state(vport, ndlp,
+ 						   NLP_STE_NPR_NODE);
+@@ -832,12 +832,10 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
+ 			if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
+ 			    ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ 				continue;
+-			spin_lock_irq(&ndlp->lock);
+ 			if (ndlp->nlp_DID == Did)
+-				ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
++				clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag);
+ 			else
+-				ndlp->nlp_flag |= NLP_NVMET_RECOV;
+-			spin_unlock_irq(&ndlp->lock);
++				set_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag);
+ 		}
+ 	}
+ }
+@@ -894,13 +892,11 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
+ 	 */
+ 	if (vport->phba->nvmet_support) {
+ 		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+-			if (!(ndlp->nlp_flag & NLP_NVMET_RECOV))
++			if (!test_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag))
+ 				continue;
+ 			lpfc_disc_state_machine(vport, ndlp, NULL,
+ 						NLP_EVT_DEVICE_RECOVERY);
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
+-			spin_unlock_irq(&ndlp->lock);
++			clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag);
+ 		}
+ 	}
+ 
+@@ -1440,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	if (ndlp) {
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 				 "0242 Process x%x GFF "
+-				 "NameServer Rsp Data: x%x x%lx x%x\n",
++				 "NameServer Rsp Data: x%lx x%lx x%x\n",
+ 				 did, ndlp->nlp_flag, vport->fc_flag,
+ 				 vport->fc_rscn_id_cnt);
+ 	} else {
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index a2d2b02b34187f..3fd1aa5cc78cc8 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -870,8 +870,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
+ 				wwn_to_u64(ndlp->nlp_nodename.u.wwn));
+ 		len += scnprintf(buf+len, size-len, "RPI:x%04x ",
+ 				 ndlp->nlp_rpi);
+-		len +=  scnprintf(buf+len, size-len, "flag:x%08x ",
+-			ndlp->nlp_flag);
++		len += scnprintf(buf+len, size-len, "flag:x%08lx ",
++				 ndlp->nlp_flag);
+ 		if (!ndlp->nlp_type)
+ 			len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+ 		if (ndlp->nlp_type & NLP_FC_NODE)
+diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
+index f5ae8cc158205c..af5d5bd75642c3 100644
+--- a/drivers/scsi/lpfc/lpfc_disc.h
++++ b/drivers/scsi/lpfc/lpfc_disc.h
+@@ -102,7 +102,7 @@ struct lpfc_nodelist {
+ 
+ 	spinlock_t	lock;			/* Node management lock */
+ 
+-	uint32_t         nlp_flag;		/* entry flags */
++	unsigned long    nlp_flag;		/* entry flags */
+ 	uint32_t         nlp_DID;		/* FC D_ID of entry */
+ 	uint32_t         nlp_last_elscmd;	/* Last ELS cmd sent */
+ 	uint16_t         nlp_type;
+@@ -182,37 +182,37 @@ struct lpfc_node_rrq {
+ #define lpfc_ndlp_check_qdepth(phba, ndlp) \
+ 	(ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri)
+ 
+-/* Defines for nlp_flag (uint32) */
+-#define NLP_IGNR_REG_CMPL  0x00000001 /* Rcvd rscn before we cmpl reg login */
+-#define NLP_REG_LOGIN_SEND 0x00000002   /* sent reglogin to adapter */
+-#define NLP_RELEASE_RPI    0x00000004   /* Release RPI to free pool */
+-#define NLP_SUPPRESS_RSP   0x00000010	/* Remote NPort supports suppress rsp */
+-#define NLP_PLOGI_SND      0x00000020	/* sent PLOGI request for this entry */
+-#define NLP_PRLI_SND       0x00000040	/* sent PRLI request for this entry */
+-#define NLP_ADISC_SND      0x00000080	/* sent ADISC request for this entry */
+-#define NLP_LOGO_SND       0x00000100	/* sent LOGO request for this entry */
+-#define NLP_RNID_SND       0x00000400	/* sent RNID request for this entry */
+-#define NLP_ELS_SND_MASK   0x000007e0	/* sent ELS request for this entry */
+-#define NLP_NVMET_RECOV    0x00001000   /* NVMET auditing node for recovery. */
+-#define NLP_UNREG_INP      0x00008000	/* UNREG_RPI cmd is in progress */
+-#define NLP_DROPPED        0x00010000	/* Init ref count has been dropped */
+-#define NLP_DELAY_TMO      0x00020000	/* delay timeout is running for node */
+-#define NLP_NPR_2B_DISC    0x00040000	/* node is included in num_disc_nodes */
+-#define NLP_RCV_PLOGI      0x00080000	/* Rcv'ed PLOGI from remote system */
+-#define NLP_LOGO_ACC       0x00100000	/* Process LOGO after ACC completes */
+-#define NLP_TGT_NO_SCSIID  0x00200000	/* good PRLI but no binding for scsid */
+-#define NLP_ISSUE_LOGO     0x00400000	/* waiting to issue a LOGO */
+-#define NLP_IN_DEV_LOSS    0x00800000	/* devloss in progress */
+-#define NLP_ACC_REGLOGIN   0x01000000	/* Issue Reg Login after successful
++/* nlp_flag mask bits */
++enum lpfc_nlp_flag {
++	NLP_IGNR_REG_CMPL  = 0,         /* Rcvd rscn before we cmpl reg login */
++	NLP_REG_LOGIN_SEND = 1,         /* sent reglogin to adapter */
++	NLP_SUPPRESS_RSP   = 4,         /* Remote NPort supports suppress rsp */
++	NLP_PLOGI_SND      = 5,         /* sent PLOGI request for this entry */
++	NLP_PRLI_SND       = 6,         /* sent PRLI request for this entry */
++	NLP_ADISC_SND      = 7,         /* sent ADISC request for this entry */
++	NLP_LOGO_SND       = 8,         /* sent LOGO request for this entry */
++	NLP_RNID_SND       = 10,        /* sent RNID request for this entry */
++	NLP_NVMET_RECOV    = 12,        /* NVMET auditing node for recovery. */
++	NLP_UNREG_INP      = 15,        /* UNREG_RPI cmd is in progress */
++	NLP_DROPPED        = 16,        /* Init ref count has been dropped */
++	NLP_DELAY_TMO      = 17,        /* delay timeout is running for node */
++	NLP_NPR_2B_DISC    = 18,        /* node is included in num_disc_nodes */
++	NLP_RCV_PLOGI      = 19,        /* Rcv'ed PLOGI from remote system */
++	NLP_LOGO_ACC       = 20,        /* Process LOGO after ACC completes */
++	NLP_TGT_NO_SCSIID  = 21,        /* good PRLI but no binding for scsid */
++	NLP_ISSUE_LOGO     = 22,        /* waiting to issue a LOGO */
++	NLP_IN_DEV_LOSS    = 23,        /* devloss in progress */
++	NLP_ACC_REGLOGIN   = 24,        /* Issue Reg Login after successful
+ 					   ACC */
+-#define NLP_NPR_ADISC      0x02000000	/* Issue ADISC when dq'ed from
++	NLP_NPR_ADISC      = 25,        /* Issue ADISC when dq'ed from
+ 					   NPR list */
+-#define NLP_RM_DFLT_RPI    0x04000000	/* need to remove leftover dflt RPI */
+-#define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
+-#define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
+-#define NLP_SC_REQ         0x20000000	/* Target requires authentication */
+-#define NLP_FIRSTBURST     0x40000000	/* Target supports FirstBurst */
+-#define NLP_RPI_REGISTERED 0x80000000	/* nlp_rpi is valid */
++	NLP_RM_DFLT_RPI    = 26,        /* need to remove leftover dflt RPI */
++	NLP_NODEV_REMOVE   = 27,        /* Defer removal till discovery ends */
++	NLP_TARGET_REMOVE  = 28,        /* Target remove in process */
++	NLP_SC_REQ         = 29,        /* Target requires authentication */
++	NLP_FIRSTBURST     = 30,        /* Target supports FirstBurst */
++	NLP_RPI_REGISTERED = 31         /* nlp_rpi is valid */
++};
+ 
+ /* There are 4 different double linked lists nodelist entries can reside on.
+  * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index d737b897ddd821..b5fa5054e952e5 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -725,11 +725,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		list_for_each_entry_safe(np, next_np,
+ 					&vport->fc_nodes, nlp_listp) {
+ 			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
+-				   !(np->nlp_flag & NLP_NPR_ADISC))
++			    !test_bit(NLP_NPR_ADISC, &np->nlp_flag))
+ 				continue;
+-			spin_lock_irq(&np->lock);
+-			np->nlp_flag &= ~NLP_NPR_ADISC;
+-			spin_unlock_irq(&np->lock);
++			clear_bit(NLP_NPR_ADISC, &np->nlp_flag);
+ 			lpfc_unreg_rpi(vport, np);
+ 		}
+ 		lpfc_cleanup_pending_mbox(vport);
+@@ -864,9 +862,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		       sizeof(struct lpfc_name));
+ 		/* Set state will put ndlp onto node list if not already done */
+ 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 
+ 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ 		if (!mbox)
+@@ -1018,7 +1014,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		 * registered with the SCSI transport, remove the initial
+ 		 * reference to trigger node release.
+ 		 */
+-		if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) &&
++		if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
+ 		    !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ 			lpfc_nlp_put(ndlp);
+ 
+@@ -1548,7 +1544,7 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
+ 		 * Otherwise, decrement node reference to trigger release.
+ 		 */
+ 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+-		    !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
++		    !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
+ 			lpfc_nlp_put(ndlp);
+ 		return 0;
+ 	}
+@@ -1597,7 +1593,7 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
+ 		 * Otherwise, decrement node reference to trigger release.
+ 		 */
+ 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+-		    !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
++		    !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
+ 			lpfc_nlp_put(ndlp);
+ 		return 0;
+ 	}
+@@ -1675,9 +1671,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 	struct lpfc_nodelist *new_ndlp;
+ 	struct serv_parm *sp;
+ 	uint8_t  name[sizeof(struct lpfc_name)];
+-	uint32_t keepDID = 0, keep_nlp_flag = 0;
++	uint32_t keepDID = 0;
+ 	int rc;
+-	uint32_t keep_new_nlp_flag = 0;
++	unsigned long keep_nlp_flag = 0, keep_new_nlp_flag = 0;
+ 	uint16_t keep_nlp_state;
+ 	u32 keep_nlp_fc4_type = 0;
+ 	struct lpfc_nvme_rport *keep_nrport = NULL;
+@@ -1704,8 +1700,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 	}
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
+-			 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
+-			 "new_ndlp x%x x%x x%x\n",
++			 "3178 PLOGI confirm: ndlp x%x x%lx x%x: "
++			 "new_ndlp x%x x%lx x%x\n",
+ 			 ndlp->nlp_DID, ndlp->nlp_flag,  ndlp->nlp_fc4_type,
+ 			 (new_ndlp ? new_ndlp->nlp_DID : 0),
+ 			 (new_ndlp ? new_ndlp->nlp_flag : 0),
+@@ -1769,48 +1765,48 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 	new_ndlp->nlp_flag = ndlp->nlp_flag;
+ 
+ 	/* if new_ndlp had NLP_UNREG_INP set, keep it */
+-	if (keep_new_nlp_flag & NLP_UNREG_INP)
+-		new_ndlp->nlp_flag |= NLP_UNREG_INP;
++	if (test_bit(NLP_UNREG_INP, &keep_new_nlp_flag))
++		set_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag);
+ 	else
+-		new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
++		clear_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag);
+ 
+ 	/* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
+-	if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
+-		new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
++	if (test_bit(NLP_RPI_REGISTERED, &keep_new_nlp_flag))
++		set_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag);
+ 	else
+-		new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
++		clear_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag);
+ 
+ 	/*
+ 	 * Retain the DROPPED flag. This will take care of the init
+ 	 * refcount when affecting the state change
+ 	 */
+-	if (keep_new_nlp_flag & NLP_DROPPED)
+-		new_ndlp->nlp_flag |= NLP_DROPPED;
++	if (test_bit(NLP_DROPPED, &keep_new_nlp_flag))
++		set_bit(NLP_DROPPED, &new_ndlp->nlp_flag);
+ 	else
+-		new_ndlp->nlp_flag &= ~NLP_DROPPED;
++		clear_bit(NLP_DROPPED, &new_ndlp->nlp_flag);
+ 
+ 	ndlp->nlp_flag = keep_new_nlp_flag;
+ 
+ 	/* if ndlp had NLP_UNREG_INP set, keep it */
+-	if (keep_nlp_flag & NLP_UNREG_INP)
+-		ndlp->nlp_flag |= NLP_UNREG_INP;
++	if (test_bit(NLP_UNREG_INP, &keep_nlp_flag))
++		set_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 	else
+-		ndlp->nlp_flag &= ~NLP_UNREG_INP;
++		clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 
+ 	/* if ndlp had NLP_RPI_REGISTERED set, keep it */
+-	if (keep_nlp_flag & NLP_RPI_REGISTERED)
+-		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
++	if (test_bit(NLP_RPI_REGISTERED, &keep_nlp_flag))
++		set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ 	else
+-		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
++		clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ 
+ 	/*
+ 	 * Retain the DROPPED flag. This will take care of the init
+ 	 * refcount when affecting the state change
+ 	 */
+-	if (keep_nlp_flag & NLP_DROPPED)
+-		ndlp->nlp_flag |= NLP_DROPPED;
++	if (test_bit(NLP_DROPPED, &keep_nlp_flag))
++		set_bit(NLP_DROPPED, &ndlp->nlp_flag);
+ 	else
+-		ndlp->nlp_flag &= ~NLP_DROPPED;
++		clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
+ 
+ 	spin_unlock_irq(&new_ndlp->lock);
+ 	spin_unlock_irq(&ndlp->lock);
+@@ -1888,7 +1884,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 			     phba->active_rrq_pool);
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
+-			 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
++			 "3173 PLOGI confirm exit: new_ndlp x%x x%lx x%x\n",
+ 			 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
+ 			 new_ndlp->nlp_fc4_type);
+ 
+@@ -2009,7 +2005,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	IOCB_t *irsp;
+ 	struct lpfc_nodelist *ndlp, *free_ndlp;
+ 	struct lpfc_dmabuf *prsp;
+-	int disc;
++	bool disc;
+ 	struct serv_parm *sp = NULL;
+ 	u32 ulp_status, ulp_word4, did, iotag;
+ 	bool release_node = false;
+@@ -2044,10 +2040,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	/* Since ndlp can be freed in the disc state machine, note if this node
+ 	 * is being used during discovery.
+ 	 */
+-	spin_lock_irq(&ndlp->lock);
+-	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+-	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-	spin_unlock_irq(&ndlp->lock);
++	disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 
+ 	/* PLOGI completes to NPort <nlp_DID> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+@@ -2060,9 +2053,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 
+ 	/* Check to see if link went down during discovery */
+ 	if (lpfc_els_chk_latt(vport)) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 		goto out;
+ 	}
+ 
+@@ -2070,11 +2061,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		/* Check for retry */
+ 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ 			/* ELS command is being retried */
+-			if (disc) {
+-				spin_lock_irq(&ndlp->lock);
+-				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-				spin_unlock_irq(&ndlp->lock);
+-			}
++			if (disc)
++				set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 			goto out;
+ 		}
+ 		/* Warn PLOGI status Don't print the vport to vport rjts */
+@@ -2097,7 +2085,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		 * with the reglogin process.
+ 		 */
+ 		spin_lock_irq(&ndlp->lock);
+-		if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
++		if ((test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag) ||
++		     test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) &&
+ 		    ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ 			spin_unlock_irq(&ndlp->lock);
+ 			goto out;
+@@ -2108,8 +2097,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		 * start the device remove process.
+ 		 */
+ 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+-			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-			if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
++			clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
++			if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
+ 				release_node = true;
+ 		}
+ 		spin_unlock_irq(&ndlp->lock);
+@@ -2212,12 +2201,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
+ 	 * outstanding UNREG_RPI mbox command completes, unless we
+ 	 * are going offline. This logic does not apply for Fabric DIDs
+ 	 */
+-	if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) &&
++	if ((test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) ||
++	     test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) &&
+ 	    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ 	    !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 				 "4110 Issue PLOGI x%x deferred "
+-				 "on NPort x%x rpi x%x flg x%x Data:"
++				 "on NPort x%x rpi x%x flg x%lx Data:"
+ 				 " x%px\n",
+ 				 ndlp->nlp_defer_did, ndlp->nlp_DID,
+ 				 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp);
+@@ -2335,10 +2325,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	ulp_status = get_job_ulpstatus(phba, rspiocb);
+ 	ulp_word4 = get_job_word4(phba, rspiocb);
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_PRLI_SND;
++	clear_bit(NLP_PRLI_SND, &ndlp->nlp_flag);
+ 
+ 	/* Driver supports multiple FC4 types.  Counters matter. */
++	spin_lock_irq(&ndlp->lock);
+ 	vport->fc_prli_sent--;
+ 	ndlp->fc4_prli_sent--;
+ 	spin_unlock_irq(&ndlp->lock);
+@@ -2379,7 +2369,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		/* Warn PRLI status */
+ 		lpfc_printf_vlog(vport, mode, LOG_ELS,
+ 				 "2754 PRLI DID:%06X Status:x%x/x%x, "
+-				 "data: x%x x%x x%x\n",
++				 "data: x%x x%x x%lx\n",
+ 				 ndlp->nlp_DID, ulp_status,
+ 				 ulp_word4, ndlp->nlp_state,
+ 				 ndlp->fc4_prli_sent, ndlp->nlp_flag);
+@@ -2396,10 +2386,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ 		     ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) ||
+ 		    (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+-		     ndlp->nlp_flag & NLP_DELAY_TMO)) {
+-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
++		     test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))) {
++			lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ 					 "2784 PRLI cmpl: Allow Node recovery "
+-					 "DID x%06x nstate x%x nflag x%x\n",
++					 "DID x%06x nstate x%x nflag x%lx\n",
+ 					 ndlp->nlp_DID, ndlp->nlp_state,
+ 					 ndlp->nlp_flag);
+ 			goto out;
+@@ -2420,8 +2410,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		spin_lock_irq(&ndlp->lock);
+ 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ 		    !ndlp->fc4_prli_sent) {
+-			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-			if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
++			clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
++			if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
+ 				release_node = true;
+ 		}
+ 		spin_unlock_irq(&ndlp->lock);
+@@ -2496,7 +2486,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ 	ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+-	ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
++	clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	ndlp->nvme_fb_size = 0;
+ 
+  send_next_prli:
+@@ -2627,8 +2618,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	 * the ndlp is used to track outstanding PRLIs for different
+ 	 * FC4 types.
+ 	 */
++	set_bit(NLP_PRLI_SND, &ndlp->nlp_flag);
+ 	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_PRLI_SND;
+ 	vport->fc_prli_sent++;
+ 	ndlp->fc4_prli_sent++;
+ 	spin_unlock_irq(&ndlp->lock);
+@@ -2789,7 +2780,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	struct lpfc_vport *vport = cmdiocb->vport;
+ 	IOCB_t *irsp;
+ 	struct lpfc_nodelist *ndlp;
+-	int  disc;
++	bool  disc;
+ 	u32 ulp_status, ulp_word4, tmo, iotag;
+ 	bool release_node = false;
+ 
+@@ -2818,10 +2809,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	/* Since ndlp can be freed in the disc state machine, note if this node
+ 	 * is being used during discovery.
+ 	 */
+-	spin_lock_irq(&ndlp->lock);
+-	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+-	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
+-	spin_unlock_irq(&ndlp->lock);
++	disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
++	clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag);
+ 	/* ADISC completes to NPort <nlp_DID> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0104 ADISC completes to NPort x%x "
+@@ -2832,9 +2821,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 
+ 	/* Check to see if link went down during discovery */
+ 	if (lpfc_els_chk_latt(vport)) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 		goto out;
+ 	}
+ 
+@@ -2843,9 +2830,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ 			/* ELS command is being retried */
+ 			if (disc) {
+-				spin_lock_irq(&ndlp->lock);
+-				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-				spin_unlock_irq(&ndlp->lock);
++				set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 				lpfc_set_disctmo(vport);
+ 			}
+ 			goto out;
+@@ -2864,8 +2849,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		 */
+ 		spin_lock_irq(&ndlp->lock);
+ 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+-			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-			if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
++			clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
++			if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
+ 				release_node = true;
+ 		}
+ 		spin_unlock_irq(&ndlp->lock);
+@@ -2938,9 +2923,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 	phba->fc_stat.elsXmitADISC++;
+ 	elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_ADISC_SND;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_ADISC_SND, &ndlp->nlp_flag);
+ 	elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ 	if (!elsiocb->ndlp) {
+ 		lpfc_els_free_iocb(phba, elsiocb);
+@@ -2961,9 +2944,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	return 0;
+ 
+ err:
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_ADISC_SND;
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag);
+ 	return 1;
+ }
+ 
+@@ -2985,7 +2966,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ 	struct lpfc_vport *vport = ndlp->vport;
+ 	IOCB_t *irsp;
+-	unsigned long flags;
+ 	uint32_t skip_recovery = 0;
+ 	int wake_up_waiter = 0;
+ 	u32 ulp_status;
+@@ -3007,8 +2987,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		iotag = irsp->ulpIoTag;
+ 	}
+ 
++	clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
+ 	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ 	if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ 		wake_up_waiter = 1;
+ 		ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+@@ -3023,7 +3003,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	/* LOGO completes to NPort <nlp_DID> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0105 LOGO completes to NPort x%x "
+-			 "IoTag x%x refcnt %d nflags x%x xflags x%x "
++			 "IoTag x%x refcnt %d nflags x%lx xflags x%x "
+ 			 "Data: x%x x%x x%x x%x\n",
+ 			 ndlp->nlp_DID, iotag,
+ 			 kref_read(&ndlp->kref), ndlp->nlp_flag,
+@@ -3061,12 +3041,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	/* The driver sets this flag for an NPIV instance that doesn't want to
+ 	 * log into the remote port.
+ 	 */
+-	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+-		spin_lock_irq(&ndlp->lock);
+-		if (phba->sli_rev == LPFC_SLI_REV4)
+-			ndlp->nlp_flag |= NLP_RELEASE_RPI;
+-		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag)) {
++		clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ 					NLP_EVT_DEVICE_RM);
+ 		goto out_rsrc_free;
+@@ -3089,9 +3065,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
+ 	    skip_recovery == 0) {
+ 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+-		spin_lock_irqsave(&ndlp->lock, flags);
+-		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-		spin_unlock_irqrestore(&ndlp->lock, flags);
++		set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 				 "3187 LOGO completes to NPort x%x: Start "
+@@ -3113,9 +3087,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	 * register with the transport.
+ 	 */
+ 	if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-		spin_unlock_irq(&ndlp->lock);
++		clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ 					NLP_EVT_DEVICE_RM);
+ 	}
+@@ -3156,12 +3128,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	uint16_t cmdsize;
+ 	int rc;
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	if (ndlp->nlp_flag & NLP_LOGO_SND) {
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag))
+ 		return 0;
+-	}
+-	spin_unlock_irq(&ndlp->lock);
+ 
+ 	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
+ 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+@@ -3180,10 +3148,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 	phba->fc_stat.elsXmitLOGO++;
+ 	elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_LOGO_SND;
+-	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
++	clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
+ 	elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ 	if (!elsiocb->ndlp) {
+ 		lpfc_els_free_iocb(phba, elsiocb);
+@@ -3208,9 +3174,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	return 0;
+ 
+ err:
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_LOGO_SND;
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
+ 	return 1;
+ }
+ 
+@@ -3286,13 +3250,13 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ static int
+ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
+ {
+-	int rc = 0;
++	int rc;
+ 	struct lpfc_hba *phba = vport->phba;
+ 	struct lpfc_nodelist *ns_ndlp;
+ 	LPFC_MBOXQ_t *mbox;
+ 
+-	if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
+-		return rc;
++	if (test_bit(NLP_RPI_REGISTERED, &fc_ndlp->nlp_flag))
++		return 0;
+ 
+ 	ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ 	if (!ns_ndlp)
+@@ -3309,7 +3273,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
+ 	if (!mbox) {
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ 				 "0936 %s: no memory for reg_login "
+-				 "Data: x%x x%x x%x x%x\n", __func__,
++				 "Data: x%x x%x x%lx x%x\n", __func__,
+ 				 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
+ 				 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
+ 		return -ENOMEM;
+@@ -3321,7 +3285,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
+ 		goto out;
+ 	}
+ 
+-	fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
++	set_bit(NLP_REG_LOGIN_SEND, &fc_ndlp->nlp_flag);
+ 	mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
+ 	mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
+ 	if (!mbox->ctx_ndlp) {
+@@ -3345,7 +3309,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
+ 	lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ 	lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ 			 "0938 %s: failed to format reg_login "
+-			 "Data: x%x x%x x%x x%x\n", __func__,
++			 "Data: x%x x%x x%lx x%x\n", __func__,
+ 			 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
+ 			 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
+ 	return rc;
+@@ -4384,11 +4348,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
+ {
+ 	struct lpfc_work_evt *evtp;
+ 
+-	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
++	if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag))
+ 		return;
+-	spin_lock_irq(&nlp->lock);
+-	nlp->nlp_flag &= ~NLP_DELAY_TMO;
+-	spin_unlock_irq(&nlp->lock);
+ 	del_timer_sync(&nlp->nlp_delayfunc);
+ 	nlp->nlp_last_elscmd = 0;
+ 	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
+@@ -4397,10 +4358,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
+ 		evtp = &nlp->els_retry_evt;
+ 		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
+ 	}
+-	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
+-		spin_lock_irq(&nlp->lock);
+-		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-		spin_unlock_irq(&nlp->lock);
++	if (test_and_clear_bit(NLP_NPR_2B_DISC, &nlp->nlp_flag)) {
+ 		if (vport->num_disc_nodes) {
+ 			if (vport->port_state < LPFC_VPORT_READY) {
+ 				/* Check if there are more ADISCs to be sent */
+@@ -4480,14 +4438,11 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
+ 	spin_lock_irq(&ndlp->lock);
+ 	cmd = ndlp->nlp_last_elscmd;
+ 	ndlp->nlp_last_elscmd = 0;
++	spin_unlock_irq(&ndlp->lock);
+ 
+-	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+-		spin_unlock_irq(&ndlp->lock);
++	if (!test_and_clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))
+ 		return;
+-	}
+ 
+-	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+-	spin_unlock_irq(&ndlp->lock);
+ 	/*
+ 	 * If a discovery event readded nlp_delayfunc after timer
+ 	 * firing and before processing the timer, cancel the
+@@ -5010,9 +4965,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 			/* delay is specified in milliseconds */
+ 			mod_timer(&ndlp->nlp_delayfunc,
+ 				jiffies + msecs_to_jiffies(delay));
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag |= NLP_DELAY_TMO;
+-			spin_unlock_irq(&ndlp->lock);
++			set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 
+ 			ndlp->nlp_prev_state = ndlp->nlp_state;
+ 			if ((cmd == ELS_CMD_PRLI) ||
+@@ -5072,7 +5025,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 				 "0108 No retry ELS command x%x to remote "
+ 				 "NPORT x%x Retried:%d Error:x%x/%x "
+-				 "IoTag x%x nflags x%x\n",
++				 "IoTag x%x nflags x%lx\n",
+ 				 cmd, did, cmdiocb->retry, ulp_status,
+ 				 ulp_word4, cmdiocb->iotag,
+ 				 (ndlp ? ndlp->nlp_flag : 0));
+@@ -5239,7 +5192,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	/* ACC to LOGO completes to NPort <nlp_DID> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0109 ACC to LOGO completes to NPort x%x refcnt %d "
+-			 "last els x%x Data: x%x x%x x%x\n",
++			 "last els x%x Data: x%lx x%x x%x\n",
+ 			 ndlp->nlp_DID, kref_read(&ndlp->kref),
+ 			 ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state,
+ 			 ndlp->nlp_rpi);
+@@ -5254,16 +5207,14 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		goto out;
+ 
+ 	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+-		if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
++		if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag))
+ 			lpfc_unreg_rpi(vport, ndlp);
+ 
+ 		/* If came from PRLO, then PRLO_ACC is done.
+ 		 * Start rediscovery now.
+ 		 */
+ 		if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) {
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-			spin_unlock_irq(&ndlp->lock);
++			set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 			ndlp->nlp_prev_state = ndlp->nlp_state;
+ 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+@@ -5300,7 +5251,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 
+ 	if (ndlp) {
+ 		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+-				 "0006 rpi x%x DID:%x flg:%x %d x%px "
++				 "0006 rpi x%x DID:%x flg:%lx %d x%px "
+ 				 "mbx_cmd x%x mbx_flag x%x x%px\n",
+ 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ 				 kref_read(&ndlp->kref), ndlp, mbx_cmd,
+@@ -5311,11 +5262,9 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 		 * first on an UNREG_LOGIN and then release the final
+ 		 * references.
+ 		 */
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
++		clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 		if (mbx_cmd == MBX_UNREG_LOGIN)
+-			ndlp->nlp_flag &= ~NLP_UNREG_INP;
+-		spin_unlock_irq(&ndlp->lock);
++			clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 		lpfc_nlp_put(ndlp);
+ 		lpfc_drop_node(ndlp->vport, ndlp);
+ 	}
+@@ -5381,23 +5330,23 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	/* ELS response tag <ulpIoTag> completes */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0110 ELS response tag x%x completes "
+-			 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
++			 "Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n",
+ 			 iotag, ulp_status, ulp_word4, tmo,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 			 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
+ 	if (mbox) {
+-		if (ulp_status == 0
+-		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
++		if (ulp_status == 0 &&
++		    test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) {
+ 			if (!lpfc_unreg_rpi(vport, ndlp) &&
+ 			    !test_bit(FC_PT2PT, &vport->fc_flag)) {
+-				if (ndlp->nlp_state ==  NLP_STE_PLOGI_ISSUE ||
++				if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ 				    ndlp->nlp_state ==
+ 				     NLP_STE_REG_LOGIN_ISSUE) {
+ 					lpfc_printf_vlog(vport, KERN_INFO,
+ 							 LOG_DISCOVERY,
+ 							 "0314 PLOGI recov "
+ 							 "DID x%x "
+-							 "Data: x%x x%x x%x\n",
++							 "Data: x%x x%x x%lx\n",
+ 							 ndlp->nlp_DID,
+ 							 ndlp->nlp_state,
+ 							 ndlp->nlp_rpi,
+@@ -5414,18 +5363,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 				goto out_free_mbox;
+ 
+ 			mbox->vport = vport;
+-			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
++			if (test_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag)) {
+ 				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ 				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+-			}
+-			else {
++			} else {
+ 				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ 				ndlp->nlp_prev_state = ndlp->nlp_state;
+ 				lpfc_nlp_set_state(vport, ndlp,
+ 					   NLP_STE_REG_LOGIN_ISSUE);
+ 			}
+ 
+-			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
++			set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ 			    != MBX_NOT_FINISHED)
+ 				goto out;
+@@ -5434,12 +5382,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 			 * set for this failed mailbox command.
+ 			 */
+ 			lpfc_nlp_put(ndlp);
+-			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
++			clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 
+ 			/* ELS rsp: Cannot issue reg_login for <NPortid> */
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 				"0138 ELS rsp: Cannot issue reg_login for x%x "
+-				"Data: x%x x%x x%x\n",
++				"Data: x%lx x%x x%x\n",
+ 				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 				ndlp->nlp_rpi);
+ 		}
+@@ -5448,32 +5396,20 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	}
+ out:
+ 	if (ndlp && shost) {
+-		spin_lock_irq(&ndlp->lock);
+ 		if (mbox)
+-			ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+-		ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
+-		spin_unlock_irq(&ndlp->lock);
++			clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
++		clear_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag);
+ 	}
+ 
+ 	/* An SLI4 NPIV instance wants to drop the node at this point under
+-	 * these conditions and release the RPI.
++	 * these conditions because it doesn't need the login.
+ 	 */
+ 	if (phba->sli_rev == LPFC_SLI_REV4 &&
+ 	    vport && vport->port_type == LPFC_NPIV_PORT &&
+ 	    !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+-		if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+-			if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
+-			    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+-				lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+-				spin_lock_irq(&ndlp->lock);
+-				ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+-				ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+-				spin_unlock_irq(&ndlp->lock);
+-			}
+-			lpfc_drop_node(vport, ndlp);
+-		} else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
+-			   ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
+-			   ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
++		if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
++		    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
++		    ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
+ 			/* Drop ndlp if there is no planned or outstanding
+ 			 * issued PRLI.
+ 			 *
+@@ -5540,9 +5476,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ 					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ 		if (!elsiocb) {
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+-			spin_unlock_irq(&ndlp->lock);
++			clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 			return 1;
+ 		}
+ 
+@@ -5570,7 +5504,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ 		pcmd += sizeof(uint32_t);
+ 
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-			"Issue ACC:       did:x%x flg:x%x",
++			"Issue ACC:       did:x%x flg:x%lx",
+ 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ 		break;
+ 	case ELS_CMD_FLOGI:
+@@ -5649,7 +5583,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ 		}
+ 
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-			"Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
++			"Issue ACC FLOGI/PLOGI: did:x%x flg:x%lx",
+ 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ 		break;
+ 	case ELS_CMD_PRLO:
+@@ -5687,7 +5621,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ 		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
+ 
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-			"Issue ACC PRLO:  did:x%x flg:x%x",
++			"Issue ACC PRLO:  did:x%x flg:x%lx",
+ 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ 		break;
+ 	case ELS_CMD_RDF:
+@@ -5732,12 +5666,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ 	default:
+ 		return 1;
+ 	}
+-	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+-		spin_lock_irq(&ndlp->lock);
+-		if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+-			ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
+-			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) {
++		if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) &&
++		    !test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag))
++			clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 		elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
+ 	} else {
+ 		elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+@@ -5760,7 +5692,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ 	/* Xmit ELS ACC response tag <ulpIoTag> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
+-			 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
++			 "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x "
+ 			 "RPI: x%x, fc_flag x%lx refcnt %d\n",
+ 			 rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+@@ -5835,13 +5767,13 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
+ 	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0129 Xmit ELS RJT x%x response tag x%x "
+-			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
++			 "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, "
+ 			 "rpi x%x\n",
+ 			 rejectError, elsiocb->iotag,
+ 			 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
+ 			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
++		"Issue LS_RJT:    did:x%x flg:x%lx err:x%x",
+ 		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
+ 
+ 	phba->fc_stat.elsXmitLSRJT++;
+@@ -5852,18 +5784,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
+ 		return 1;
+ 	}
+ 
+-	/* The NPIV instance is rejecting this unsolicited ELS. Make sure the
+-	 * node's assigned RPI gets released provided this node is not already
+-	 * registered with the transport.
+-	 */
+-	if (phba->sli_rev == LPFC_SLI_REV4 &&
+-	    vport->port_type == LPFC_NPIV_PORT &&
+-	    !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_RELEASE_RPI;
+-		spin_unlock_irq(&ndlp->lock);
+-	}
+-
+ 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ 	if (rc == IOCB_ERROR) {
+ 		lpfc_els_free_iocb(phba, elsiocb);
+@@ -5944,7 +5864,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ 		lpfc_format_edc_lft_desc(phba, tlv);
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-			      "Issue EDC ACC:      did:x%x flg:x%x refcnt %d",
++			      "Issue EDC ACC:      did:x%x flg:x%lx refcnt %d",
+ 			      ndlp->nlp_DID, ndlp->nlp_flag,
+ 			      kref_read(&ndlp->kref));
+ 	elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+@@ -5966,7 +5886,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ 	/* Xmit ELS ACC response tag <ulpIoTag> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
+-			 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
++			 "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x "
+ 			 "RPI: x%x, fc_flag x%lx\n",
+ 			 rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+@@ -6035,7 +5955,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ 	/* Xmit ADISC ACC response tag <ulpIoTag> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0130 Xmit ADISC ACC response iotag x%x xri: "
+-			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
++			 "x%x, did x%x, nlp_flag x%lx, nlp_state x%x rpi x%x\n",
+ 			 elsiocb->iotag, ulp_context,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 			 ndlp->nlp_rpi);
+@@ -6051,7 +5971,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ 	ap->DID = be32_to_cpu(vport->fc_myDID);
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-		      "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
++		      "Issue ACC ADISC: did:x%x flg:x%lx refcnt %d",
+ 		      ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
+ 
+ 	phba->fc_stat.elsXmitACC++;
+@@ -6157,7 +6077,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ 	/* Xmit PRLI ACC response tag <ulpIoTag> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
+-			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
++			 "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n",
+ 			 elsiocb->iotag, ulp_context,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 			 ndlp->nlp_rpi);
+@@ -6228,7 +6148,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ 
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ 				 "6015 NVME issue PRLI ACC word1 x%08x "
+-				 "word4 x%08x word5 x%08x flag x%x, "
++				 "word4 x%08x word5 x%08x flag x%lx, "
+ 				 "fcp_info x%x nlp_type x%x\n",
+ 				 npr_nvme->word1, npr_nvme->word4,
+ 				 npr_nvme->word5, ndlp->nlp_flag,
+@@ -6243,7 +6163,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ 				 ndlp->nlp_DID);
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-		      "Issue ACC PRLI:  did:x%x flg:x%x",
++		      "Issue ACC PRLI:  did:x%x flg:x%lx",
+ 		      ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
+ 
+ 	phba->fc_stat.elsXmitACC++;
+@@ -6357,7 +6277,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
+ 	}
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-		      "Issue ACC RNID:  did:x%x flg:x%x refcnt %d",
++		      "Issue ACC RNID:  did:x%x flg:x%lx refcnt %d",
+ 		      ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
+ 
+ 	phba->fc_stat.elsXmitACC++;
+@@ -6414,7 +6334,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
+ 			get_job_ulpcontext(phba, iocb));
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
++		"Clear RRQ:  did:x%x flg:x%lx exchg:x%.08x",
+ 		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
+ 	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+ 		xri = bf_get(rrq_oxid, rrq);
+@@ -6491,7 +6411,7 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+ 	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+-		      "Issue ACC ECHO:  did:x%x flg:x%x refcnt %d",
++		      "Issue ACC ECHO:  did:x%x flg:x%lx refcnt %d",
+ 		      ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
+ 
+ 	phba->fc_stat.elsXmitACC++;
+@@ -6541,14 +6461,12 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
+ 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ 
+ 		if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
+-		    !(ndlp->nlp_flag & NLP_NPR_ADISC))
++		    !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag))
+ 			continue;
+ 
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+-		spin_unlock_irq(&ndlp->lock);
++		clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 
+-		if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
++		if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
+ 			/* This node was marked for ADISC but was not picked
+ 			 * for discovery. This is possible if the node was
+ 			 * missing in gidft response.
+@@ -6606,9 +6524,9 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
+ 	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
+ 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ 		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+-				(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
+-				(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
+-				(ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
++		    test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) &&
++		    !test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) &&
++		    !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
+ 			ndlp->nlp_prev_state = ndlp->nlp_state;
+ 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+@@ -7104,7 +7022,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			"2171 Xmit RDP response tag x%x xri x%x, "
+-			"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
++			"did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x",
+ 			elsiocb->iotag, ulp_context,
+ 			ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 			ndlp->nlp_rpi);
+@@ -8078,7 +7996,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ 	 */
+ 	if (vport->port_state <= LPFC_NS_QRY) {
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
++			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%lx",
+ 			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+ 
+ 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+@@ -8108,7 +8026,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ 					 vport->fc_flag, payload_len,
+ 					 *lp, vport->fc_rscn_id_cnt);
+ 			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%x",
++				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%lx",
+ 				ndlp->nlp_DID, vport->port_state,
+ 				ndlp->nlp_flag);
+ 
+@@ -8145,7 +8063,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ 	if (test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
+ 	    test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) {
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%x",
++			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%lx",
+ 			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+ 
+ 		set_bit(FC_RSCN_DEFERRED, &vport->fc_flag);
+@@ -8201,7 +8119,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ 		return 0;
+ 	}
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-		"RCV RSCN:        did:x%x/ste:x%x flg:x%x",
++		"RCV RSCN:        did:x%x/ste:x%x flg:x%lx",
+ 		ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+ 
+ 	set_bit(FC_RSCN_MODE, &vport->fc_flag);
+@@ -8707,7 +8625,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
+ 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ 			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+-			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
++			 "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n",
+ 			 elsiocb->iotag, ulp_context,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 			 ndlp->nlp_rpi);
+@@ -8869,7 +8787,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
+ 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ 			 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+-			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
++			 "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x, "
+ 			 "Data: x%x x%x x%x\n",
+ 			 elsiocb->iotag, ulp_context,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+@@ -9066,7 +8984,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
+ 	/* Xmit ELS RPL ACC response tag <ulpIoTag> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "0120 Xmit ELS RPL ACC response tag x%x "
+-			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
++			 "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, "
+ 			 "rpi x%x\n",
+ 			 elsiocb->iotag, ulp_context,
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+@@ -10411,14 +10329,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 	 * Do not process any unsolicited ELS commands
+ 	 * if the ndlp is in DEV_LOSS
+ 	 */
+-	spin_lock_irq(&ndlp->lock);
+-	if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) {
+ 		if (newnode)
+ 			lpfc_nlp_put(ndlp);
+ 		goto dropit;
+ 	}
+-	spin_unlock_irq(&ndlp->lock);
+ 
+ 	elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ 	if (!elsiocb->ndlp)
+@@ -10447,7 +10362,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 	switch (cmd) {
+ 	case ELS_CMD_PLOGI:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV PLOGI:       did:x%x/ste:x%x flg:x%x",
++			"RCV PLOGI:       did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvPLOGI++;
+@@ -10486,9 +10401,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 			}
+ 		}
+ 
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
+-		spin_unlock_irq(&ndlp->lock);
++		clear_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag);
+ 
+ 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ 					NLP_EVT_RCV_PLOGI);
+@@ -10496,7 +10409,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_FLOGI:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV FLOGI:       did:x%x/ste:x%x flg:x%x",
++			"RCV FLOGI:       did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvFLOGI++;
+@@ -10523,7 +10436,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_LOGO:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV LOGO:        did:x%x/ste:x%x flg:x%x",
++			"RCV LOGO:        did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvLOGO++;
+@@ -10540,7 +10453,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_PRLO:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV PRLO:        did:x%x/ste:x%x flg:x%x",
++			"RCV PRLO:        did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvPRLO++;
+@@ -10569,7 +10482,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_ADISC:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
++			"RCV ADISC:       did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		lpfc_send_els_event(vport, ndlp, payload);
+@@ -10584,7 +10497,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_PDISC:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV PDISC:       did:x%x/ste:x%x flg:x%x",
++			"RCV PDISC:       did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvPDISC++;
+@@ -10598,7 +10511,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_FARPR:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV FARPR:       did:x%x/ste:x%x flg:x%x",
++			"RCV FARPR:       did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvFARPR++;
+@@ -10606,7 +10519,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_FARP:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV FARP:        did:x%x/ste:x%x flg:x%x",
++			"RCV FARP:        did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvFARP++;
+@@ -10614,7 +10527,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_FAN:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV FAN:         did:x%x/ste:x%x flg:x%x",
++			"RCV FAN:         did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvFAN++;
+@@ -10623,7 +10536,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 	case ELS_CMD_PRLI:
+ 	case ELS_CMD_NVMEPRLI:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV PRLI:        did:x%x/ste:x%x flg:x%x",
++			"RCV PRLI:        did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvPRLI++;
+@@ -10637,7 +10550,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_LIRR:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV LIRR:        did:x%x/ste:x%x flg:x%x",
++			"RCV LIRR:        did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvLIRR++;
+@@ -10648,7 +10561,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_RLS:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV RLS:         did:x%x/ste:x%x flg:x%x",
++			"RCV RLS:         did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvRLS++;
+@@ -10659,7 +10572,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_RPL:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV RPL:         did:x%x/ste:x%x flg:x%x",
++			"RCV RPL:         did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvRPL++;
+@@ -10670,7 +10583,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_RNID:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV RNID:        did:x%x/ste:x%x flg:x%x",
++			"RCV RNID:        did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvRNID++;
+@@ -10681,7 +10594,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_RTV:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV RTV:        did:x%x/ste:x%x flg:x%x",
++			"RCV RTV:        did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 		phba->fc_stat.elsRcvRTV++;
+ 		lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+@@ -10691,7 +10604,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_RRQ:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
++			"RCV RRQ:         did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvRRQ++;
+@@ -10702,7 +10615,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_ECHO:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-			"RCV ECHO:        did:x%x/ste:x%x flg:x%x",
++			"RCV ECHO:        did:x%x/ste:x%x flg:x%lx",
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvECHO++;
+@@ -10718,7 +10631,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		break;
+ 	case ELS_CMD_FPIN:
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+-				      "RCV FPIN:       did:x%x/ste:x%x flg:x%x",
++				      "RCV FPIN:       did:x%x/ste:x%x "
++				      "flg:x%lx",
+ 				      did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
+@@ -11226,9 +11140,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+ 		return;
+ 
+ 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_DELAY_TMO;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
+ 	phba->pport->port_state = LPFC_FLOGI;
+ 	return;
+@@ -11359,11 +11271,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		list_for_each_entry_safe(np, next_np,
+ 			&vport->fc_nodes, nlp_listp) {
+ 			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
+-			    !(np->nlp_flag & NLP_NPR_ADISC))
++			    !test_bit(NLP_NPR_ADISC, &np->nlp_flag))
+ 				continue;
+-			spin_lock_irq(&ndlp->lock);
+-			np->nlp_flag &= ~NLP_NPR_ADISC;
+-			spin_unlock_irq(&ndlp->lock);
++			clear_bit(NLP_NPR_ADISC, &np->nlp_flag);
+ 			lpfc_unreg_rpi(vport, np);
+ 		}
+ 		lpfc_cleanup_pending_mbox(vport);
+@@ -11566,7 +11476,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	/* NPIV LOGO completes to NPort <nlp_DID> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ 			 "2928 NPIV LOGO completes to NPort x%x "
+-			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
++			 "Data: x%x x%x x%x x%x x%x x%lx x%x\n",
+ 			 ndlp->nlp_DID, ulp_status, ulp_word4,
+ 			 tmo, vport->num_disc_nodes,
+ 			 kref_read(&ndlp->kref), ndlp->nlp_flag,
+@@ -11582,8 +11492,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		/* Wake up lpfc_vport_delete if waiting...*/
+ 		if (ndlp->logo_waitq)
+ 			wake_up(ndlp->logo_waitq);
++		clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
++		clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
+ 		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
+ 		ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ 		spin_unlock_irq(&ndlp->lock);
+ 	}
+@@ -11633,13 +11544,11 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+-		"Issue LOGO npiv  did:x%x flg:x%x",
++		"Issue LOGO npiv  did:x%x flg:x%lx",
+ 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ 
+ 	elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_LOGO_SND;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
+ 	elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ 	if (!elsiocb->ndlp) {
+ 		lpfc_els_free_iocb(phba, elsiocb);
+@@ -11655,9 +11564,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	return 0;
+ 
+ err:
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_LOGO_SND;
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
+ 	return 1;
+ }
+ 
+@@ -12138,7 +12045,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 			"3094 Start rport recovery on shost id 0x%x "
+ 			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+-			"flags 0x%x\n",
++			"flag 0x%lx\n",
+ 			shost->host_no, ndlp->nlp_DID,
+ 			vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+ 			ndlp->nlp_flag);
+@@ -12148,8 +12055,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ 	 */
+ 	spin_lock_irqsave(&ndlp->lock, flags);
+ 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+-	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ 	spin_unlock_irqrestore(&ndlp->lock, flags);
++	set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
+ 	lpfc_unreg_rpi(vport, ndlp);
+ }
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 34f77b250387c0..b5dd17eecf82da 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -137,7 +137,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
+ 	ndlp = rdata->pnode;
+ 	vport = ndlp->vport;
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+-			      "rport terminate: sid:x%x did:x%x flg:x%x",
++			      "rport terminate: sid:x%x did:x%x flg:x%lx",
+ 			      ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ 
+ 	if (ndlp->nlp_sid != NLP_NO_SID)
+@@ -155,7 +155,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 	struct lpfc_hba   *phba;
+ 	struct lpfc_work_evt *evtp;
+ 	unsigned long iflags;
+-	bool nvme_reg = false;
++	bool drop_initial_node_ref = false;
+ 
+ 	ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
+ 	if (!ndlp)
+@@ -165,11 +165,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 	phba  = vport->phba;
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+-		"rport devlosscb: sid:x%x did:x%x flg:x%x",
++		"rport devlosscb: sid:x%x did:x%x flg:x%lx",
+ 		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ 
+ 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+-			 "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
++			 "3181 dev_loss_callbk x%06x, rport x%px flg x%lx "
+ 			 "load_flag x%lx refcnt %u state %d xpt x%x\n",
+ 			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
+ 			 vport->load_flag, kref_read(&ndlp->kref),
+@@ -182,8 +182,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 		spin_lock_irqsave(&ndlp->lock, iflags);
+ 		ndlp->rport = NULL;
+ 
+-		if (ndlp->fc4_xpt_flags & NVME_XPT_REGD)
+-			nvme_reg = true;
++		/* Only 1 thread can drop the initial node reference.
++		 * If not registered for NVME and NLP_DROPPED flag is
++		 * clear, remove the initial reference.
++		 */
++		if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
++			if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
++				drop_initial_node_ref = true;
+ 
+ 		/* The scsi_transport is done with the rport so lpfc cannot
+ 		 * call to unregister.
+@@ -194,13 +199,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 			/* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
+ 			 * unregister calls were made to the scsi and nvme
+ 			 * transports and refcnt was already decremented. Clear
+-			 * the NLP_XPT_REGD flag only if the NVME Rport is
++			 * the NLP_XPT_REGD flag only if the NVME nrport is
+ 			 * confirmed unregistered.
+ 			 */
+-			if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+-				ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
++			if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
++				if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
++					ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ 				spin_unlock_irqrestore(&ndlp->lock, iflags);
+-				lpfc_nlp_put(ndlp); /* may free ndlp */
++
++				/* Release scsi transport reference */
++				lpfc_nlp_put(ndlp);
+ 			} else {
+ 				spin_unlock_irqrestore(&ndlp->lock, iflags);
+ 			}
+@@ -208,19 +216,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 			spin_unlock_irqrestore(&ndlp->lock, iflags);
+ 		}
+ 
+-		spin_lock_irqsave(&ndlp->lock, iflags);
+-
+-		/* Only 1 thread can drop the initial node reference.  If
+-		 * another thread has set NLP_DROPPED, this thread is done.
+-		 */
+-		if (nvme_reg || (ndlp->nlp_flag & NLP_DROPPED)) {
+-			spin_unlock_irqrestore(&ndlp->lock, iflags);
+-			return;
+-		}
+-
+-		ndlp->nlp_flag |= NLP_DROPPED;
+-		spin_unlock_irqrestore(&ndlp->lock, iflags);
+-		lpfc_nlp_put(ndlp);
++		if (drop_initial_node_ref)
++			lpfc_nlp_put(ndlp);
+ 		return;
+ 	}
+ 
+@@ -253,14 +250,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 		return;
+ 	}
+ 
+-	spin_lock_irqsave(&ndlp->lock, iflags);
+-	ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
++	set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
+ 
++	spin_lock_irqsave(&ndlp->lock, iflags);
+ 	/* If there is a PLOGI in progress, and we are in a
+ 	 * NLP_NPR_2B_DISC state, don't turn off the flag.
+ 	 */
+ 	if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
+-		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
++		clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 
+ 	/*
+ 	 * The backend does not expect any more calls associated with this
+@@ -289,15 +286,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 	} else {
+ 		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ 				 "3188 worker thread is stopped %s x%06x, "
+-				 " rport x%px flg x%x load_flag x%lx refcnt "
++				 " rport x%px flg x%lx load_flag x%lx refcnt "
+ 				 "%d\n", __func__, ndlp->nlp_DID,
+ 				 ndlp->rport, ndlp->nlp_flag,
+ 				 vport->load_flag, kref_read(&ndlp->kref));
+ 		if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+-			spin_lock_irqsave(&ndlp->lock, iflags);
+ 			/* Node is in dev loss.  No further transaction. */
+-			ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+-			spin_unlock_irqrestore(&ndlp->lock, iflags);
++			clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
+ 			lpfc_disc_state_machine(vport, ndlp, NULL,
+ 						NLP_EVT_DEVICE_RM);
+ 		}
+@@ -430,7 +425,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
+ 		lpfc_nlp_get(ndlp);
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
+ 				 "8438 Devloss timeout reversed on DID x%x "
+-				 "refcnt %d ndlp %p flag x%x "
++				 "refcnt %d ndlp %p flag x%lx "
+ 				 "port_state = x%x\n",
+ 				 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
+ 				 ndlp->nlp_flag, vport->port_state);
+@@ -473,7 +468,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 			      ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
+ 
+ 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+-			 "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n",
++			 "3182 %s x%06x, nflag x%lx xflags x%x refcnt %d\n",
+ 			 __func__, ndlp->nlp_DID, ndlp->nlp_flag,
+ 			 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
+ 
+@@ -487,9 +482,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 				 *(name+4), *(name+5), *(name+6), *(name+7),
+ 				 ndlp->nlp_DID);
+ 
+-		spin_lock_irqsave(&ndlp->lock, iflags);
+-		ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+-		spin_unlock_irqrestore(&ndlp->lock, iflags);
++		clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
+ 		return fcf_inuse;
+ 	}
+ 
+@@ -517,7 +510,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 			}
+ 			break;
+ 		case Fabric_Cntl_DID:
+-			if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
++			if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag))
+ 				recovering = true;
+ 			break;
+ 		case FDMI_DID:
+@@ -545,15 +538,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 		 * the following lpfc_nlp_put is necessary after fabric node is
+ 		 * recovered.
+ 		 */
+-		spin_lock_irqsave(&ndlp->lock, iflags);
+-		ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+-		spin_unlock_irqrestore(&ndlp->lock, iflags);
++		clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
+ 		if (recovering) {
+ 			lpfc_printf_vlog(vport, KERN_INFO,
+ 					 LOG_DISCOVERY | LOG_NODE,
+ 					 "8436 Devloss timeout marked on "
+ 					 "DID x%x refcnt %d ndlp %p "
+-					 "flag x%x port_state = x%x\n",
++					 "flag x%lx port_state = x%x\n",
+ 					 ndlp->nlp_DID, kref_read(&ndlp->kref),
+ 					 ndlp, ndlp->nlp_flag,
+ 					 vport->port_state);
+@@ -570,7 +561,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 					 LOG_DISCOVERY | LOG_NODE,
+ 					 "8437 Devloss timeout ignored on "
+ 					 "DID x%x refcnt %d ndlp %p "
+-					 "flag x%x port_state = x%x\n",
++					 "flag x%lx port_state = x%x\n",
+ 					 ndlp->nlp_DID, kref_read(&ndlp->kref),
+ 					 ndlp, ndlp->nlp_flag,
+ 					 vport->port_state);
+@@ -590,7 +581,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 				 "0203 Devloss timeout on "
+ 				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+-				 "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
++				 "NPort x%06x Data: x%lx x%x x%x refcnt %d\n",
+ 				 *name, *(name+1), *(name+2), *(name+3),
+ 				 *(name+4), *(name+5), *(name+6), *(name+7),
+ 				 ndlp->nlp_DID, ndlp->nlp_flag,
+@@ -600,15 +591,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
+ 				 "0204 Devloss timeout on "
+ 				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+-				 "NPort x%06x Data: x%x x%x x%x\n",
++				 "NPort x%06x Data: x%lx x%x x%x\n",
+ 				 *name, *(name+1), *(name+2), *(name+3),
+ 				 *(name+4), *(name+5), *(name+6), *(name+7),
+ 				 ndlp->nlp_DID, ndlp->nlp_flag,
+ 				 ndlp->nlp_state, ndlp->nlp_rpi);
+ 	}
+-	spin_lock_irqsave(&ndlp->lock, iflags);
+-	ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+-	spin_unlock_irqrestore(&ndlp->lock, iflags);
++	clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
+ 
+ 	/* If we are devloss, but we are in the process of rediscovering the
+ 	 * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
+@@ -1373,7 +1362,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
+ 			if (ndlp->nlp_DID != Fabric_DID)
+ 				lpfc_unreg_rpi(vport, ndlp);
+ 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+-		} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
++		} else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
+ 			/* Fail outstanding IO now since device is
+ 			 * marked for PLOGI.
+ 			 */
+@@ -3882,14 +3871,13 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 	pmb->ctx_ndlp = NULL;
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
+-			 "0002 rpi:%x DID:%x flg:%x %d x%px\n",
++			 "0002 rpi:%x DID:%x flg:%lx %d x%px\n",
+ 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ 			 kref_read(&ndlp->kref),
+ 			 ndlp);
+-	if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+-		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
++	clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 
+-	if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
++	if (test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) ||
+ 	    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+ 		/* We rcvd a rscn after issuing this
+ 		 * mbox reg login, we may have cycled
+@@ -3899,16 +3887,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 		 * there is another reg login in
+ 		 * process.
+ 		 */
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+-		spin_unlock_irq(&ndlp->lock);
++		clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
+ 
+ 		/*
+ 		 * We cannot leave the RPI registered because
+ 		 * if we go thru discovery again for this ndlp
+ 		 * a subsequent REG_RPI will fail.
+ 		 */
+-		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
++		set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ 		lpfc_unreg_rpi(vport, ndlp);
+ 	}
+ 
+@@ -4221,7 +4207,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 
+ 	if (phba->sli_rev < LPFC_SLI_REV4)
+ 		ndlp->nlp_rpi = mb->un.varWords[0];
+-	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
++	set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ 	ndlp->nlp_type |= NLP_FABRIC;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ 
+@@ -4352,9 +4338,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 		 * reference.
+ 		 */
+ 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-			spin_unlock_irq(&ndlp->lock);
++			clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 			lpfc_nlp_put(ndlp);
+ 		}
+ 
+@@ -4375,11 +4359,11 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 
+ 	if (phba->sli_rev < LPFC_SLI_REV4)
+ 		ndlp->nlp_rpi = mb->un.varWords[0];
+-	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
++	set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ 	ndlp->nlp_type |= NLP_FABRIC;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+-			 "0003 rpi:%x DID:%x flg:%x %d x%px\n",
++			 "0003 rpi:%x DID:%x flg:%lx %d x%px\n",
+ 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ 			 kref_read(&ndlp->kref),
+ 			 ndlp);
+@@ -4471,8 +4455,8 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 			 __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
+ 			 ndlp->nlp_state);
+ 
+-	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+-	ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
++	set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
++	clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 	ndlp->nlp_type |= NLP_FABRIC;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ 
+@@ -4506,7 +4490,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+-			      "rport add:       did:x%x flg:x%x type x%x",
++			      "rport add:       did:x%x flg:x%lx type x%x",
+ 			      ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ 
+ 	/* Don't add the remote port if unloading. */
+@@ -4574,7 +4558,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
+ 		return;
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+-		"rport delete:    did:x%x flg:x%x type x%x",
++		"rport delete:    did:x%x flg:x%lx type x%x",
+ 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+@@ -4690,7 +4674,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 		lpfc_printf_vlog(vport, KERN_INFO,
+ 				 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ 				 "0999 %s Not regd: ndlp x%px rport x%px DID "
+-				 "x%x FLG x%x XPT x%x\n",
++				 "x%x FLG x%lx XPT x%x\n",
+ 				  __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+ 				  ndlp->nlp_flag, ndlp->fc4_xpt_flags);
+ 		return;
+@@ -4706,7 +4690,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	} else if (!ndlp->rport) {
+ 		lpfc_printf_vlog(vport, KERN_INFO,
+ 				 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+-				 "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
++				 "1999 %s NDLP in devloss x%px DID x%x FLG x%lx"
+ 				 " XPT x%x refcnt %u\n",
+ 				 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+ 				 ndlp->fc4_xpt_flags,
+@@ -4751,7 +4735,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		ndlp->nlp_type |= NLP_FC_NODE;
+ 		fallthrough;
+ 	case NLP_STE_MAPPED_NODE:
+-		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
++		clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ 		lpfc_nlp_reg_node(vport, ndlp);
+ 		break;
+ 
+@@ -4762,7 +4746,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	 * backend, attempt it now
+ 	 */
+ 	case NLP_STE_NPR_NODE:
+-		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
++		clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
+ 		fallthrough;
+ 	default:
+ 		lpfc_nlp_unreg_node(vport, ndlp);
+@@ -4783,13 +4767,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	}
+ 
+ 	if (new_state == NLP_STE_UNMAPPED_NODE) {
+-		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
++		clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ 		ndlp->nlp_type |= NLP_FC_NODE;
+ 	}
+ 	if (new_state == NLP_STE_MAPPED_NODE)
+-		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
++		clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ 	if (new_state == NLP_STE_NPR_NODE)
+-		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
++		clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
+ 
+ 	/* Reg/Unreg for FCP and NVME Transport interface */
+ 	if ((old_state == NLP_STE_MAPPED_NODE ||
+@@ -4797,7 +4781,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		/* For nodes marked for ADISC, Handle unreg in ADISC cmpl
+ 		 * if linkup. In linkdown do unreg_node
+ 		 */
+-		if (!(ndlp->nlp_flag & NLP_NPR_ADISC) ||
++		if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag) ||
+ 		    !lpfc_is_link_up(vport->phba))
+ 			lpfc_nlp_unreg_node(vport, ndlp);
+ 	}
+@@ -4817,9 +4801,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	    (!ndlp->rport ||
+ 	     ndlp->rport->scsi_target_id == -1 ||
+ 	     ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag);
+ 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ 	}
+ }
+@@ -4851,7 +4833,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		   int state)
+ {
+ 	int  old_state = ndlp->nlp_state;
+-	int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
++	bool node_dropped = test_bit(NLP_DROPPED, &ndlp->nlp_flag);
+ 	char name1[16], name2[16];
+ 	unsigned long iflags;
+ 
+@@ -4867,7 +4849,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 	if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
+ 	    state != NLP_STE_UNUSED_NODE) {
+-		ndlp->nlp_flag &= ~NLP_DROPPED;
++		clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
+ 		lpfc_nlp_get(ndlp);
+ 	}
+ 
+@@ -4875,7 +4857,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	    state != NLP_STE_NPR_NODE)
+ 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ 	if (old_state == NLP_STE_UNMAPPED_NODE) {
+-		ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
++		clear_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag);
+ 		ndlp->nlp_type &= ~NLP_FC_NODE;
+ 	}
+ 
+@@ -4972,14 +4954,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	 * reference from lpfc_nlp_init.  If set, don't drop it again and
+ 	 * introduce an imbalance.
+ 	 */
+-	spin_lock_irq(&ndlp->lock);
+-	if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+-		ndlp->nlp_flag |= NLP_DROPPED;
+-		spin_unlock_irq(&ndlp->lock);
++	if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ 		lpfc_nlp_put(ndlp);
+-		return;
+-	}
+-	spin_unlock_irq(&ndlp->lock);
+ }
+ 
+ /*
+@@ -5094,9 +5070,9 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
+ 	} else if (pring->ringno == LPFC_FCP_RING) {
+ 		/* Skip match check if waiting to relogin to FCP target */
+ 		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+-		    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
++		    test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))
+ 			return 0;
+-		}
++
+ 		if (ulp_context == ndlp->nlp_rpi)
+ 			return 1;
+ 	}
+@@ -5166,7 +5142,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+ 	 * Everything that matches on txcmplq will be returned
+ 	 * by firmware with a no rpi error.
+ 	 */
+-	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
++	if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
+ 		if (phba->sli_rev != LPFC_SLI_REV4)
+ 			lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
+ 		else
+@@ -5200,29 +5176,19 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 	lpfc_issue_els_logo(vport, ndlp, 0);
+ 
+ 	/* Check to see if there are any deferred events to process */
+-	if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+-	    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
++	if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) &&
++	    ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) {
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 				 "1434 UNREG cmpl deferred logo x%x "
+ 				 "on NPort x%x Data: x%x x%px\n",
+ 				 ndlp->nlp_rpi, ndlp->nlp_DID,
+ 				 ndlp->nlp_defer_did, ndlp);
+ 
+-		ndlp->nlp_flag &= ~NLP_UNREG_INP;
++		clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 		ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
+ 		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ 	} else {
+-		/* NLP_RELEASE_RPI is only set for SLI4 ports. */
+-		if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+-			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+-			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+-			spin_unlock_irq(&ndlp->lock);
+-		}
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~NLP_UNREG_INP;
+-		spin_unlock_irq(&ndlp->lock);
++		clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 	}
+ 
+ 	/* The node has an outstanding reference for the unreg. Now
+@@ -5242,8 +5208,6 @@ static void
+ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ 	struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
+ {
+-	unsigned long iflags;
+-
+ 	/* Driver always gets a reference on the mailbox job
+ 	 * in support of async jobs.
+ 	 */
+@@ -5251,9 +5215,8 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ 	if (!mbox->ctx_ndlp)
+ 		return;
+ 
+-	if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
++	if (test_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag)) {
+ 		mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+-
+ 	} else if (phba->sli_rev == LPFC_SLI_REV4 &&
+ 		   !test_bit(FC_UNLOADING, &vport->load_flag) &&
+ 		    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+@@ -5261,13 +5224,6 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ 		    (kref_read(&ndlp->kref) > 0)) {
+ 		mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
+ 	} else {
+-		if (test_bit(FC_UNLOADING, &vport->load_flag)) {
+-			if (phba->sli_rev == LPFC_SLI_REV4) {
+-				spin_lock_irqsave(&ndlp->lock, iflags);
+-				ndlp->nlp_flag |= NLP_RELEASE_RPI;
+-				spin_unlock_irqrestore(&ndlp->lock, iflags);
+-			}
+-		}
+ 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ 	}
+ }
+@@ -5289,13 +5245,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	int rc, acc_plogi = 1;
+ 	uint16_t rpi;
+ 
+-	if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+-	    ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
+-		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
++	if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) ||
++	    test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) {
++		if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag))
+ 			lpfc_printf_vlog(vport, KERN_INFO,
+ 					 LOG_NODE | LOG_DISCOVERY,
+ 					 "3366 RPI x%x needs to be "
+-					 "unregistered nlp_flag x%x "
++					 "unregistered nlp_flag x%lx "
+ 					 "did x%x\n",
+ 					 ndlp->nlp_rpi, ndlp->nlp_flag,
+ 					 ndlp->nlp_DID);
+@@ -5303,11 +5259,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 		/* If there is already an UNREG in progress for this ndlp,
+ 		 * no need to queue up another one.
+ 		 */
+-		if (ndlp->nlp_flag & NLP_UNREG_INP) {
++		if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) {
+ 			lpfc_printf_vlog(vport, KERN_INFO,
+ 					 LOG_NODE | LOG_DISCOVERY,
+ 					 "1436 unreg_rpi SKIP UNREG x%x on "
+-					 "NPort x%x deferred x%x  flg x%x "
++					 "NPort x%x deferred x%x flg x%lx "
+ 					 "Data: x%px\n",
+ 					 ndlp->nlp_rpi, ndlp->nlp_DID,
+ 					 ndlp->nlp_defer_did,
+@@ -5330,27 +5286,24 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 				return 1;
+ 			}
+ 
++			/* Accept PLOGIs after unreg_rpi_cmpl. */
+ 			if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
+-				/*
+-				 * accept PLOGIs after unreg_rpi_cmpl
+-				 */
+ 				acc_plogi = 0;
+-			if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
+-			    Fabric_DID_MASK) &&
+-			    (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)))
+-				ndlp->nlp_flag |= NLP_UNREG_INP;
++
++			if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
++				set_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 
+ 			lpfc_printf_vlog(vport, KERN_INFO,
+ 					 LOG_NODE | LOG_DISCOVERY,
+ 					 "1433 unreg_rpi UNREG x%x on "
+-					 "NPort x%x deferred flg x%x "
++					 "NPort x%x deferred flg x%lx "
+ 					 "Data:x%px\n",
+ 					 ndlp->nlp_rpi, ndlp->nlp_DID,
+ 					 ndlp->nlp_flag, ndlp);
+ 
+ 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ 			if (rc == MBX_NOT_FINISHED) {
+-				ndlp->nlp_flag &= ~NLP_UNREG_INP;
++				clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 				mempool_free(mbox, phba->mbox_mem_pool);
+ 				acc_plogi = 1;
+ 				lpfc_nlp_put(ndlp);
+@@ -5360,7 +5313,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 					 LOG_NODE | LOG_DISCOVERY,
+ 					 "1444 Failed to allocate mempool "
+ 					 "unreg_rpi UNREG x%x, "
+-					 "DID x%x, flag x%x, "
++					 "DID x%x, flag x%lx, "
+ 					 "ndlp x%px\n",
+ 					 ndlp->nlp_rpi, ndlp->nlp_DID,
+ 					 ndlp->nlp_flag, ndlp);
+@@ -5370,7 +5323,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 			 * not unloading.
+ 			 */
+ 			if (!test_bit(FC_UNLOADING, &vport->load_flag)) {
+-				ndlp->nlp_flag &= ~NLP_UNREG_INP;
++				clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 				lpfc_issue_els_logo(vport, ndlp, 0);
+ 				ndlp->nlp_prev_state = ndlp->nlp_state;
+ 				lpfc_nlp_set_state(vport, ndlp,
+@@ -5383,13 +5336,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ out:
+ 		if (phba->sli_rev != LPFC_SLI_REV4)
+ 			ndlp->nlp_rpi = 0;
+-		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+-		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
++		clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
++		clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 		if (acc_plogi)
+-			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
++			clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 		return 1;
+ 	}
+-	ndlp->nlp_flag &= ~NLP_LOGO_ACC;
++	clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 	return 0;
+ }
+ 
+@@ -5417,7 +5370,7 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
+ 	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ 		spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
+ 		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+-			if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
++			if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
+ 				/* The mempool_alloc might sleep */
+ 				spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
+ 						       iflags);
+@@ -5505,7 +5458,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	/* Cleanup node for NPort <nlp_DID> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ 			 "0900 Cleanup node for NPort x%x "
+-			 "Data: x%x x%x x%x\n",
++			 "Data: x%lx x%x x%x\n",
+ 			 ndlp->nlp_DID, ndlp->nlp_flag,
+ 			 ndlp->nlp_state, ndlp->nlp_rpi);
+ 	lpfc_dequeue_node(vport, ndlp);
+@@ -5550,9 +5503,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 
+ 	lpfc_els_abort(phba, ndlp);
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 
+ 	ndlp->nlp_last_elscmd = 0;
+ 	del_timer_sync(&ndlp->nlp_delayfunc);
+@@ -5561,10 +5512,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ 	list_del_init(&ndlp->recovery_evt.evt_listp);
+ 	lpfc_cleanup_vports_rrqs(vport, ndlp);
+-
+-	if (phba->sli_rev == LPFC_SLI_REV4)
+-		ndlp->nlp_flag |= NLP_RELEASE_RPI;
+-
+ 	return 0;
+ }
+ 
+@@ -5639,7 +5586,7 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+ 				 );
+ 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
+ 					 "0929 FIND node DID "
+-					 "Data: x%px x%x x%x x%x x%x x%px\n",
++					 "Data: x%px x%x x%lx x%x x%x x%px\n",
+ 					 ndlp, ndlp->nlp_DID,
+ 					 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
+ 					 ndlp->active_rrqs_xri_bitmap);
+@@ -5692,7 +5639,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
+ 					       iflags);
+ 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
+ 					 "2025 FIND node DID MAPPED "
+-					 "Data: x%px x%x x%x x%x x%px\n",
++					 "Data: x%px x%x x%lx x%x x%px\n",
+ 					 ndlp, ndlp->nlp_DID,
+ 					 ndlp->nlp_flag, data1,
+ 					 ndlp->active_rrqs_xri_bitmap);
+@@ -5726,13 +5673,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+ 
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 				 "6453 Setup New Node 2B_DISC x%x "
+-				 "Data:x%x x%x x%lx\n",
++				 "Data:x%lx x%x x%lx\n",
+ 				 ndlp->nlp_DID, ndlp->nlp_flag,
+ 				 ndlp->nlp_state, vport->fc_flag);
+ 
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 		return ndlp;
+ 	}
+ 
+@@ -5751,7 +5696,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+ 
+ 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 					 "6455 Setup RSCN Node 2B_DISC x%x "
+-					 "Data:x%x x%x x%lx\n",
++					 "Data:x%lx x%x x%lx\n",
+ 					 ndlp->nlp_DID, ndlp->nlp_flag,
+ 					 ndlp->nlp_state, vport->fc_flag);
+ 
+@@ -5769,13 +5714,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+ 							NLP_EVT_DEVICE_RECOVERY);
+ 			}
+ 
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-			spin_unlock_irq(&ndlp->lock);
++			set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 		} else {
+ 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 					 "6456 Skip Setup RSCN Node x%x "
+-					 "Data:x%x x%x x%lx\n",
++					 "Data:x%lx x%x x%lx\n",
+ 					 ndlp->nlp_DID, ndlp->nlp_flag,
+ 					 ndlp->nlp_state, vport->fc_flag);
+ 			ndlp = NULL;
+@@ -5783,7 +5726,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+ 	} else {
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 				 "6457 Setup Active Node 2B_DISC x%x "
+-				 "Data:x%x x%x x%lx\n",
++				 "Data:x%lx x%x x%lx\n",
+ 				 ndlp->nlp_DID, ndlp->nlp_flag,
+ 				 ndlp->nlp_state, vport->fc_flag);
+ 
+@@ -5794,7 +5737,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+ 		if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
+ 		    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ 		    (!vport->phba->nvmet_support &&
+-		     ndlp->nlp_flag & NLP_RCV_PLOGI))
++		     test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)))
+ 			return NULL;
+ 
+ 		if (vport->phba->nvmet_support)
+@@ -5804,10 +5747,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+ 		 * allows for rediscovery
+ 		 */
+ 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+-
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	}
+ 	return ndlp;
+ }
+@@ -6178,7 +6118,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
+ 				/* Clean up the ndlp on Fabric connections */
+ 				lpfc_drop_node(vport, ndlp);
+ 
+-			} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
++			} else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
+ 				/* Fail outstanding IO now since device
+ 				 * is marked for PLOGI.
+ 				 */
+@@ -6391,11 +6331,11 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 
+ 	if (phba->sli_rev < LPFC_SLI_REV4)
+ 		ndlp->nlp_rpi = mb->un.varWords[0];
+-	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
++	set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ 	ndlp->nlp_type |= NLP_FABRIC;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+-			 "0004 rpi:%x DID:%x flg:%x %d x%px\n",
++			 "0004 rpi:%x DID:%x flg:%lx %d x%px\n",
+ 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ 			 kref_read(&ndlp->kref),
+ 			 ndlp);
+@@ -6445,7 +6385,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
+ 		if (filter(ndlp, param)) {
+ 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
+ 					 "3185 FIND node filter %ps DID "
+-					 "ndlp x%px did x%x flg x%x st x%x "
++					 "ndlp x%px did x%x flg x%lx st x%x "
+ 					 "xri x%x type x%x rpi x%x\n",
+ 					 filter, ndlp, ndlp->nlp_DID,
+ 					 ndlp->nlp_flag, ndlp->nlp_state,
+@@ -6580,9 +6520,10 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
+ 	INIT_LIST_HEAD(&ndlp->nlp_listp);
+ 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ 		ndlp->nlp_rpi = rpi;
+-		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+-				 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
+-				 "flg:x%x refcnt:%d\n",
++		lpfc_printf_vlog(vport, KERN_INFO,
++				 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
++				 "0007 Init New ndlp x%px, rpi:x%x DID:x%x "
++				 "flg:x%lx refcnt:%d\n",
+ 				 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
+ 				 ndlp->nlp_flag, kref_read(&ndlp->kref));
+ 
+@@ -6614,7 +6555,7 @@ lpfc_nlp_release(struct kref *kref)
+ 	struct lpfc_vport *vport = ndlp->vport;
+ 
+ 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+-		"node release:    did:x%x flg:x%x type:x%x",
++		"node release:    did:x%x flg:x%lx type:x%x",
+ 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+@@ -6626,19 +6567,12 @@ lpfc_nlp_release(struct kref *kref)
+ 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ 	lpfc_cleanup_node(vport, ndlp);
+ 
+-	/* Not all ELS transactions have registered the RPI with the port.
+-	 * In these cases the rpi usage is temporary and the node is
+-	 * released when the WQE is completed.  Catch this case to free the
+-	 * RPI to the pool.  Because this node is in the release path, a lock
+-	 * is unnecessary.  All references are gone and the node has been
+-	 * dequeued.
++	/* All nodes are initialized with an RPI that needs to be released
++	 * now. All references are gone and the node has been dequeued.
+ 	 */
+-	if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+-		if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
+-		    !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
+-			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+-			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+-		}
++	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
++		lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
++		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ 	}
+ 
+ 	/* The node is not freed back to memory, it is released to a pool so
+@@ -6667,7 +6601,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
+ 
+ 	if (ndlp) {
+ 		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+-			"node get:        did:x%x flg:x%x refcnt:x%x",
++			"node get:        did:x%x flg:x%lx refcnt:x%x",
+ 			ndlp->nlp_DID, ndlp->nlp_flag,
+ 			kref_read(&ndlp->kref));
+ 
+@@ -6699,7 +6633,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
+ {
+ 	if (ndlp) {
+ 		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+-				"node put:        did:x%x flg:x%x refcnt:x%x",
++				"node put:        did:x%x flg:x%lx refcnt:x%x",
+ 				ndlp->nlp_DID, ndlp->nlp_flag,
+ 				kref_read(&ndlp->kref));
+ 	} else {
+@@ -6752,11 +6686,12 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
+ 				spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
+ 						       iflags);
+ 				goto out;
+-			} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
++			} else if (test_bit(NLP_RPI_REGISTERED,
++					    &ndlp->nlp_flag)) {
+ 				ret = 1;
+ 				lpfc_printf_log(phba, KERN_INFO,
+ 						LOG_NODE | LOG_DISCOVERY,
+-						"2624 RPI %x DID %x flag %x "
++						"2624 RPI %x DID %x flag %lx "
+ 						"still logged in\n",
+ 						ndlp->nlp_rpi, ndlp->nlp_DID,
+ 						ndlp->nlp_flag);
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 50c761991191ff..3ddcaa864f0752 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -3092,7 +3092,8 @@ lpfc_cleanup(struct lpfc_vport *vport)
+ 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
+ 						 LOG_DISCOVERY,
+ 						 "0282 did:x%x ndlp:x%px "
+-						 "refcnt:%d xflags x%x nflag x%x\n",
++						 "refcnt:%d xflags x%x "
++						 "nflag x%lx\n",
+ 						 ndlp->nlp_DID, (void *)ndlp,
+ 						 kref_read(&ndlp->kref),
+ 						 ndlp->fc4_xpt_flags,
+@@ -3379,7 +3380,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
+ }
+ 
+ /**
+- * lpfc_sli4_node_prep - Assign RPIs for active nodes.
++ * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes.
+  * @phba: pointer to lpfc hba data structure.
+  *
+  * Allocate RPIs for all active remote nodes. This is needed whenever
+@@ -3387,7 +3388,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
+  * is to fixup the temporary rpi assignments.
+  **/
+ void
+-lpfc_sli4_node_prep(struct lpfc_hba *phba)
++lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba)
+ {
+ 	struct lpfc_nodelist  *ndlp, *next_ndlp;
+ 	struct lpfc_vport **vports;
+@@ -3397,10 +3398,10 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
+ 		return;
+ 
+ 	vports = lpfc_create_vport_work_array(phba);
+-	if (vports == NULL)
++	if (!vports)
+ 		return;
+ 
+-	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
++	for (i = 0; i <= phba->max_vports && vports[i]; i++) {
+ 		if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
+ 			continue;
+ 
+@@ -3409,14 +3410,20 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
+ 					 nlp_listp) {
+ 			rpi = lpfc_sli4_alloc_rpi(phba);
+ 			if (rpi == LPFC_RPI_ALLOC_ERROR) {
+-				/* TODO print log? */
++				lpfc_printf_vlog(ndlp->vport, KERN_INFO,
++						 LOG_NODE | LOG_DISCOVERY,
++						 "0099 RPI alloc error for "
++						 "ndlp x%px DID:x%06x "
++						 "flg:x%lx\n",
++						 ndlp, ndlp->nlp_DID,
++						 ndlp->nlp_flag);
+ 				continue;
+ 			}
+ 			ndlp->nlp_rpi = rpi;
+ 			lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ 					 LOG_NODE | LOG_DISCOVERY,
+ 					 "0009 Assign RPI x%x to ndlp x%px "
+-					 "DID:x%06x flg:x%x\n",
++					 "DID:x%06x flg:x%lx\n",
+ 					 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
+ 					 ndlp->nlp_flag);
+ 		}
+@@ -3820,35 +3827,12 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
+ 						 &vports[i]->fc_nodes,
+ 						 nlp_listp) {
+ 
+-				spin_lock_irq(&ndlp->lock);
+-				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+-				spin_unlock_irq(&ndlp->lock);
+-
++				clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 				if (offline || hba_pci_err) {
+-					spin_lock_irq(&ndlp->lock);
+-					ndlp->nlp_flag &= ~(NLP_UNREG_INP |
+-							    NLP_RPI_REGISTERED);
+-					spin_unlock_irq(&ndlp->lock);
+-					if (phba->sli_rev == LPFC_SLI_REV4)
+-						lpfc_sli_rpi_release(vports[i],
+-								     ndlp);
+-				} else {
+-					lpfc_unreg_rpi(vports[i], ndlp);
+-				}
+-				/*
+-				 * Whenever an SLI4 port goes offline, free the
+-				 * RPI. Get a new RPI when the adapter port
+-				 * comes back online.
+-				 */
+-				if (phba->sli_rev == LPFC_SLI_REV4) {
+-					lpfc_printf_vlog(vports[i], KERN_INFO,
+-						 LOG_NODE | LOG_DISCOVERY,
+-						 "0011 Free RPI x%x on "
+-						 "ndlp: x%px did x%x\n",
+-						 ndlp->nlp_rpi, ndlp,
+-						 ndlp->nlp_DID);
+-					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+-					ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
++					clear_bit(NLP_UNREG_INP,
++						  &ndlp->nlp_flag);
++					clear_bit(NLP_RPI_REGISTERED,
++						  &ndlp->nlp_flag);
+ 				}
+ 
+ 				if (ndlp->nlp_type & NLP_FABRIC) {
+@@ -6925,9 +6909,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
+ 			 */
+ 			mod_timer(&ndlp->nlp_delayfunc,
+ 				  jiffies + msecs_to_jiffies(1000));
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag |= NLP_DELAY_TMO;
+-			spin_unlock_irq(&ndlp->lock);
++			set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ 			vport->port_state = LPFC_FDISC;
+ 		} else {
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index 4574716c8764fb..4d88cfe71caed3 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -65,7 +65,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		 struct lpfc_name *nn, struct lpfc_name *pn)
+ {
+ 	/* First, we MUST have a RPI registered */
+-	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
++	if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag))
+ 		return 0;
+ 
+ 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
+@@ -239,7 +239,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+ 	/* Abort outstanding I/O on NPort <nlp_DID> */
+ 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
+ 			 "2819 Abort outstanding I/O on NPort x%x "
+-			 "Data: x%x x%x x%x\n",
++			 "Data: x%lx x%x x%x\n",
+ 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 			 ndlp->nlp_rpi);
+ 	/* Clean up all fabric IOs first.*/
+@@ -340,7 +340,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
+ 
+ 	/* Now process the REG_RPI cmpl */
+ 	lpfc_mbx_cmpl_reg_login(phba, login_mbox);
+-	ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
++	clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
+ 	kfree(save_iocb);
+ }
+ 
+@@ -404,7 +404,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 	/* PLOGI chkparm OK */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+-			 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
++			 "0114 PLOGI chkparm OK Data: x%x x%x x%lx "
+ 			 "x%x x%x x%lx\n",
+ 			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
+ 			 ndlp->nlp_rpi, vport->port_state,
+@@ -429,7 +429,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	/* if already logged in, do implicit logout */
+ 	switch (ndlp->nlp_state) {
+ 	case  NLP_STE_NPR_NODE:
+-		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
++		if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag))
+ 			break;
+ 		fallthrough;
+ 	case  NLP_STE_REG_LOGIN_ISSUE:
+@@ -449,7 +449,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ 			ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ 			ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
+-			ndlp->nlp_flag &= ~NLP_FIRSTBURST;
++			clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
+ 
+ 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+ 					 ndlp, NULL);
+@@ -480,7 +480,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ 	ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
+-	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
++	clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
+ 
+ 	login_mbox = NULL;
+ 	link_mbox = NULL;
+@@ -552,13 +552,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		lpfc_can_disctmo(vport);
+ 	}
+ 
+-	ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
++	clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag);
+ 	if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
+ 	    sp->cmn.valid_vendor_ver_level) {
+ 		vid = be32_to_cpu(sp->un.vv.vid);
+ 		flag = be32_to_cpu(sp->un.vv.flags);
+ 		if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
+-			ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
++			set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag);
+ 	}
+ 
+ 	login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+@@ -627,10 +627,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			 * this ELS request. The only way to do this is
+ 			 * to register, then unregister the RPI.
+ 			 */
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
+-					   NLP_RCV_PLOGI);
+-			spin_unlock_irq(&ndlp->lock);
++			set_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag);
++			set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
++			set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
+ 		}
+ 
+ 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+@@ -665,9 +664,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 	login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
++	set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
+ 
+ 	/* Start the ball rolling by issuing REG_LOGIN here */
+ 	rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
+@@ -797,7 +795,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		 */
+ 		if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
+ 			if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
+-			    !(ndlp->nlp_flag & NLP_NPR_ADISC))
++			    !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag))
+ 				lpfc_nlp_set_state(vport, ndlp,
+ 						   NLP_STE_MAPPED_NODE);
+ 		}
+@@ -814,9 +812,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	/* 1 sec timeout */
+ 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_DELAY_TMO;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ 	ndlp->nlp_prev_state = ndlp->nlp_state;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+@@ -835,9 +831,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
+ 	 * PLOGIs during LOGO storms from a device.
+ 	 */
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_LOGO_ACC;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 	if (els_cmd == ELS_CMD_PRLO)
+ 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+ 	else
+@@ -890,9 +884,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			 */
+ 			mod_timer(&ndlp->nlp_delayfunc,
+ 				  jiffies + msecs_to_jiffies(1000));
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag |= NLP_DELAY_TMO;
+-			spin_unlock_irq(&ndlp->lock);
++			set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ 			vport->port_state = LPFC_FDISC;
+ 		} else {
+@@ -915,14 +907,12 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		     ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
+ 			mod_timer(&ndlp->nlp_delayfunc,
+ 				  jiffies + msecs_to_jiffies(1000 * 1));
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag |= NLP_DELAY_TMO;
+-			spin_unlock_irq(&ndlp->lock);
++			set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 			ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ 			lpfc_printf_vlog(vport, KERN_INFO,
+ 					 LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ 					 "3204 Start nlpdelay on DID x%06x "
+-					 "nflag x%x lastels x%x ref cnt %u",
++					 "nflag x%lx lastels x%x ref cnt %u",
+ 					 ndlp->nlp_DID, ndlp->nlp_flag,
+ 					 ndlp->nlp_last_elscmd,
+ 					 kref_read(&ndlp->kref));
+@@ -935,9 +925,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	ndlp->nlp_prev_state = ndlp->nlp_state;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 	/* The driver has to wait until the ACC completes before it continues
+ 	 * processing the LOGO.  The action will resume in
+ 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
+@@ -978,7 +966,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
+ out:
+ 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+ 			 "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
+-			 "state x%x flags x%x port_type: x%x "
++			 "state x%x flags x%lx port_type: x%x "
+ 			 "npr->initfcn: x%x npr->tgtfcn: x%x\n",
+ 			 cmd, ndlp->nlp_rpi, ndlp->nlp_state,
+ 			 ndlp->nlp_flag, vport->port_type,
+@@ -1020,7 +1008,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			if (npr->prliType == PRLI_NVME_TYPE)
+ 				ndlp->nlp_type |= NLP_NVME_TARGET;
+ 			if (npr->writeXferRdyDis)
+-				ndlp->nlp_flag |= NLP_FIRSTBURST;
++				set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
+ 		}
+ 		if (npr->Retry && ndlp->nlp_type &
+ 					(NLP_FCP_INITIATOR | NLP_FCP_TARGET))
+@@ -1057,7 +1045,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			roles |= FC_RPORT_ROLE_FCP_TARGET;
+ 
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+-			"rport rolechg:   role:x%x did:x%x flg:x%x",
++			"rport rolechg:   role:x%x did:x%x flg:x%lx",
+ 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
+ 
+ 		if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+@@ -1068,10 +1056,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ static uint32_t
+ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ {
+-	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+-		spin_unlock_irq(&ndlp->lock);
++	if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
++		clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 		return 0;
+ 	}
+ 
+@@ -1081,16 +1067,12 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 		    (test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
+ 		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
+ 		     (ndlp->nlp_type & NLP_FCP_TARGET)))) {
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag |= NLP_NPR_ADISC;
+-			spin_unlock_irq(&ndlp->lock);
++			set_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 			return 1;
+ 		}
+ 	}
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 	lpfc_unreg_rpi(vport, ndlp);
+ 	return 0;
+ }
+@@ -1115,10 +1097,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ 	/* If there is already an UNREG in progress for this ndlp,
+ 	 * no need to queue up another one.
+ 	 */
+-	if (ndlp->nlp_flag & NLP_UNREG_INP) {
++	if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) {
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 				 "1435 release_rpi SKIP UNREG x%x on "
+-				 "NPort x%x deferred x%x  flg x%x "
++				 "NPort x%x deferred x%x  flg x%lx "
+ 				 "Data: x%px\n",
+ 				 ndlp->nlp_rpi, ndlp->nlp_DID,
+ 				 ndlp->nlp_defer_did,
+@@ -1143,11 +1125,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ 
+ 		if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ 		    (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)))
+-			ndlp->nlp_flag |= NLP_UNREG_INP;
++			set_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 				 "1437 release_rpi UNREG x%x "
+-				 "on NPort x%x flg x%x\n",
++				 "on NPort x%x flg x%lx\n",
+ 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
+ 
+ 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+@@ -1175,7 +1157,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	}
+ 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 			 "0271 Illegal State Transition: node x%x "
+-			 "event x%x, state x%x Data: x%x x%x\n",
++			 "event x%x, state x%x Data: x%x x%lx\n",
+ 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+ 			 ndlp->nlp_flag);
+ 	return ndlp->nlp_state;
+@@ -1190,13 +1172,12 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	 * working on the same NPortID, do nothing for this thread
+ 	 * to stop it.
+ 	 */
+-	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
++	if (!test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag))
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 				 "0272 Illegal State Transition: node x%x "
+-				 "event x%x, state x%x Data: x%x x%x\n",
++				 "event x%x, state x%x Data: x%x x%lx\n",
+ 				  ndlp->nlp_DID, evt, ndlp->nlp_state,
+ 				  ndlp->nlp_rpi, ndlp->nlp_flag);
+-	}
+ 	return ndlp->nlp_state;
+ }
+ 
+@@ -1230,9 +1211,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ {
+ 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_LOGO_ACC;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ 
+ 	return ndlp->nlp_state;
+@@ -1290,11 +1269,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			NULL);
+ 	} else {
+ 		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
+-		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+-		    (vport->num_disc_nodes)) {
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-			spin_unlock_irq(&ndlp->lock);
++		    test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) &&
++		    vport->num_disc_nodes) {
++			clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 			/* Check if there are more PLOGIs to be sent */
+ 			lpfc_more_plogi(vport);
+ 			if (vport->num_disc_nodes == 0) {
+@@ -1356,9 +1333,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 	/* Put ndlp in npr state set plogi timer for 1 sec */
+ 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_DELAY_TMO;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+@@ -1389,7 +1364,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 
+ 	ulp_status = get_job_ulpstatus(phba, rspiocb);
+ 
+-	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
++	if (test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) {
+ 		/* Recovery from PLOGI collision logic */
+ 		return ndlp->nlp_state;
+ 	}
+@@ -1418,7 +1393,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 		goto out;
+ 	/* PLOGI chkparm OK */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+-			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
++			 "0121 PLOGI chkparm OK Data: x%x x%x x%lx x%x\n",
+ 			 ndlp->nlp_DID, ndlp->nlp_state,
+ 			 ndlp->nlp_flag, ndlp->nlp_rpi);
+ 	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
+@@ -1446,14 +1421,14 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 			ed_tov = (phba->fc_edtov + 999999) / 1000000;
+ 		}
+ 
+-		ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
++		clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag);
+ 		if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
+ 		    sp->cmn.valid_vendor_ver_level) {
+ 			vid = be32_to_cpu(sp->un.vv.vid);
+ 			flag = be32_to_cpu(sp->un.vv.flags);
+ 			if ((vid == LPFC_VV_EMLX_ID) &&
+ 			    (flag & LPFC_VV_SUPPRESS_RSP))
+-				ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
++				set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag);
+ 		}
+ 
+ 		/*
+@@ -1476,7 +1451,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 						 LOG_TRACE_EVENT,
+ 						 "0133 PLOGI: no memory "
+ 						 "for config_link "
+-						 "Data: x%x x%x x%x x%x\n",
++						 "Data: x%x x%x x%lx x%x\n",
+ 						 ndlp->nlp_DID, ndlp->nlp_state,
+ 						 ndlp->nlp_flag, ndlp->nlp_rpi);
+ 				goto out;
+@@ -1500,7 +1475,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 	if (!mbox) {
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 				 "0018 PLOGI: no memory for reg_login "
+-				 "Data: x%x x%x x%x x%x\n",
++				 "Data: x%x x%x x%lx x%x\n",
+ 				 ndlp->nlp_DID, ndlp->nlp_state,
+ 				 ndlp->nlp_flag, ndlp->nlp_rpi);
+ 		goto out;
+@@ -1520,7 +1495,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
+ 			break;
+ 		default:
+-			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
++			set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ 		}
+ 
+@@ -1535,8 +1510,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 					   NLP_STE_REG_LOGIN_ISSUE);
+ 			return ndlp->nlp_state;
+ 		}
+-		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+-			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
++		clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 		/* decrement node reference count to the failed mbox
+ 		 * command
+ 		 */
+@@ -1544,7 +1518,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 		lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 				 "0134 PLOGI: cannot issue reg_login "
+-				 "Data: x%x x%x x%x x%x\n",
++				 "Data: x%x x%x x%lx x%x\n",
+ 				 ndlp->nlp_DID, ndlp->nlp_state,
+ 				 ndlp->nlp_flag, ndlp->nlp_rpi);
+ 	} else {
+@@ -1552,7 +1526,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ 
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 				 "0135 PLOGI: cannot format reg_login "
+-				 "Data: x%x x%x x%x x%x\n",
++				 "Data: x%x x%x x%lx x%x\n",
+ 				 ndlp->nlp_DID, ndlp->nlp_state,
+ 				 ndlp->nlp_flag, ndlp->nlp_rpi);
+ 	}
+@@ -1605,18 +1579,15 @@ static uint32_t
+ lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			   void *arg, uint32_t evt)
+ {
+-	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
++		set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ 		return ndlp->nlp_state;
+-	} else {
+-		/* software abort outstanding PLOGI */
+-		lpfc_els_abort(vport->phba, ndlp);
+-
+-		lpfc_drop_node(vport, ndlp);
+-		return NLP_STE_FREED_NODE;
+ 	}
++	/* software abort outstanding PLOGI */
++	lpfc_els_abort(vport->phba, ndlp);
++
++	lpfc_drop_node(vport, ndlp);
++	return NLP_STE_FREED_NODE;
+ }
+ 
+ static uint32_t
+@@ -1636,9 +1607,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
+ 
+ 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 
+ 	return ndlp->nlp_state;
+ }
+@@ -1656,10 +1626,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	cmdiocb = (struct lpfc_iocbq *) arg;
+ 
+ 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+-		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+-			spin_unlock_irq(&ndlp->lock);
++		if (test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
+ 			if (vport->num_disc_nodes)
+ 				lpfc_more_adisc(vport);
+ 		}
+@@ -1748,9 +1715,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
+ 		/* 1 sec timeout */
+ 		mod_timer(&ndlp->nlp_delayfunc,
+ 			  jiffies + msecs_to_jiffies(1000));
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_DELAY_TMO;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ 
+ 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+@@ -1789,18 +1754,15 @@ static uint32_t
+ lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			   void *arg, uint32_t evt)
+ {
+-	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
++		set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ 		return ndlp->nlp_state;
+-	} else {
+-		/* software abort outstanding ADISC */
+-		lpfc_els_abort(vport->phba, ndlp);
+-
+-		lpfc_drop_node(vport, ndlp);
+-		return NLP_STE_FREED_NODE;
+ 	}
++	/* software abort outstanding ADISC */
++	lpfc_els_abort(vport->phba, ndlp);
++
++	lpfc_drop_node(vport, ndlp);
++	return NLP_STE_FREED_NODE;
+ }
+ 
+ static uint32_t
+@@ -1820,9 +1782,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
+ 
+ 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	lpfc_disc_set_adisc(vport, ndlp);
+ 	return ndlp->nlp_state;
+ }
+@@ -1856,7 +1817,7 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
+ 		 * transition to UNMAPPED provided the RPI has completed
+ 		 * registration.
+ 		 */
+-		if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
++		if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
+ 			lpfc_rcv_prli(vport, ndlp, cmdiocb);
+ 			lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ 		} else {
+@@ -1895,7 +1856,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
+ 	if ((mb = phba->sli.mbox_active)) {
+ 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+ 		   (ndlp == mb->ctx_ndlp)) {
+-			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
++			clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 			lpfc_nlp_put(ndlp);
+ 			mb->ctx_ndlp = NULL;
+ 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+@@ -1906,7 +1867,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
+ 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+ 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+ 		   (ndlp == mb->ctx_ndlp)) {
+-			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
++			clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ 			lpfc_nlp_put(ndlp);
+ 			list_del(&mb->list);
+ 			phba->sli.mboxq_cnt--;
+@@ -1976,9 +1937,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+ 		/* Put ndlp in npr state set plogi timer for 1 sec */
+ 		mod_timer(&ndlp->nlp_delayfunc,
+ 			  jiffies + msecs_to_jiffies(1000 * 1));
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_DELAY_TMO;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
+ 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ 
+ 		lpfc_issue_els_logo(vport, ndlp, 0);
+@@ -1989,7 +1948,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+ 	if (phba->sli_rev < LPFC_SLI_REV4)
+ 		ndlp->nlp_rpi = mb->un.varWords[0];
+ 
+-	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
++	set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ 
+ 	/* Only if we are not a fabric nport do we issue PRLI */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+@@ -2061,15 +2020,12 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
+ 			      void *arg,
+ 			      uint32_t evt)
+ {
+-	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
++		set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ 		return ndlp->nlp_state;
+-	} else {
+-		lpfc_drop_node(vport, ndlp);
+-		return NLP_STE_FREED_NODE;
+ 	}
++	lpfc_drop_node(vport, ndlp);
++	return NLP_STE_FREED_NODE;
+ }
+ 
+ static uint32_t
+@@ -2084,17 +2040,16 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
+ 
+ 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+-	spin_lock_irq(&ndlp->lock);
+ 
+ 	/* If we are a target we won't immediately transition into PRLI,
+ 	 * so if REG_LOGIN already completed we don't need to ignore it.
+ 	 */
+-	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
++	if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) ||
+ 	    !vport->phba->nvmet_support)
+-		ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
++		set_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
+ 
+-	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	lpfc_disc_set_adisc(vport, ndlp);
+ 	return ndlp->nlp_state;
+ }
+@@ -2228,7 +2183,8 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			if (npr->targetFunc) {
+ 				ndlp->nlp_type |= NLP_FCP_TARGET;
+ 				if (npr->writeXferRdyDis)
+-					ndlp->nlp_flag |= NLP_FIRSTBURST;
++					set_bit(NLP_FIRSTBURST,
++						&ndlp->nlp_flag);
+ 			}
+ 			if (npr->Retry)
+ 				ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+@@ -2272,7 +2228,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 				/* Both sides support FB. The target's first
+ 				 * burst size is a 512 byte encoded value.
+ 				 */
+-				ndlp->nlp_flag |= NLP_FIRSTBURST;
++				set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
+ 				ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
+ 								 nvpr);
+ 
+@@ -2287,7 +2243,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ 				 "6029 NVME PRLI Cmpl w1 x%08x "
+-				 "w4 x%08x w5 x%08x flag x%x, "
++				 "w4 x%08x w5 x%08x flag x%lx, "
+ 				 "fcp_info x%x nlp_type x%x\n",
+ 				 be32_to_cpu(nvpr->word1),
+ 				 be32_to_cpu(nvpr->word4),
+@@ -2299,9 +2255,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	    (vport->port_type == LPFC_NPIV_PORT) &&
+ 	     vport->cfg_restrict_login) {
+ out:
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag);
+ 		lpfc_issue_els_logo(vport, ndlp, 0);
+ 
+ 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+@@ -2353,18 +2307,15 @@ static uint32_t
+ lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			  void *arg, uint32_t evt)
+ {
+-	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
++		set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ 		return ndlp->nlp_state;
+-	} else {
+-		/* software abort outstanding PLOGI */
+-		lpfc_els_abort(vport->phba, ndlp);
+-
+-		lpfc_drop_node(vport, ndlp);
+-		return NLP_STE_FREED_NODE;
+ 	}
++	/* software abort outstanding PLOGI */
++	lpfc_els_abort(vport->phba, ndlp);
++
++	lpfc_drop_node(vport, ndlp);
++	return NLP_STE_FREED_NODE;
+ }
+ 
+ 
+@@ -2401,9 +2352,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
+ 
+ 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	lpfc_disc_set_adisc(vport, ndlp);
+ 	return ndlp->nlp_state;
+ }
+@@ -2442,9 +2392,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ {
+ 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_LOGO_ACC;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ 	return ndlp->nlp_state;
+ }
+@@ -2483,9 +2431,8 @@ lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ {
+ 	ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+-	spin_unlock_irq(&ndlp->lock);
++	clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	lpfc_disc_set_adisc(vport, ndlp);
+ 	return ndlp->nlp_state;
+ }
+@@ -2591,8 +2538,9 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
+ {
+ 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
++	clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ 	ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
+ 	spin_unlock_irq(&ndlp->lock);
+ 	lpfc_disc_set_adisc(vport, ndlp);
+@@ -2653,9 +2601,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ 
+ 	/* Send PRLO_ACC */
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_LOGO_ACC;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+ 
+ 	/* Save ELS_CMD_PRLO as the last elscmd and then set to NPR.
+@@ -2665,7 +2611,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	ndlp->nlp_prev_state = ndlp->nlp_state;
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+-			 "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n",
++			 "3422 DID x%06x nflag x%lx lastels x%x ref cnt %u\n",
+ 			 ndlp->nlp_DID, ndlp->nlp_flag,
+ 			 ndlp->nlp_last_elscmd,
+ 			 kref_read(&ndlp->kref));
+@@ -2685,8 +2631,9 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
+ 
+ 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
+ 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
++	clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ 	ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
+ 	spin_unlock_irq(&ndlp->lock);
+ 	return ndlp->nlp_state;
+@@ -2699,16 +2646,16 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
+ 
+ 	/* Ignore PLOGI if we have an outstanding LOGO */
+-	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
++	if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) ||
++	    test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag))
+ 		return ndlp->nlp_state;
+ 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+ 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
+-		spin_unlock_irq(&ndlp->lock);
+-	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
++		clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
++		clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
++	} else if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
+ 		/* send PLOGI immediately, move to PLOGI issue state */
+-		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
++		if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
+ 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+@@ -2729,14 +2676,14 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ 
+-	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
++	if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
+ 		/*
+ 		 * ADISC nodes will be handled in regular discovery path after
+ 		 * receiving response from NS.
+ 		 *
+ 		 * For other nodes, Send PLOGI to trigger an implicit LOGO.
+ 		 */
+-		if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
++		if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
+ 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+@@ -2767,15 +2714,15 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	 * or discovery in progress for this node. Starting discovery
+ 	 * here will affect the counting of discovery threads.
+ 	 */
+-	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+-	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
++	if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) &&
++	    !test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
+ 		/*
+ 		 * ADISC nodes will be handled in regular discovery path after
+ 		 * receiving response from NS.
+ 		 *
+ 		 * For other nodes, Send PLOGI to trigger an implicit LOGO.
+ 		 */
+-		if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
++		if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
+ 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+@@ -2790,24 +2737,18 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ {
+ 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ 
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= NLP_LOGO_ACC;
+-	spin_unlock_irq(&ndlp->lock);
++	set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 
+ 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ 
+-	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
++	if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
+ 		mod_timer(&ndlp->nlp_delayfunc,
+ 			  jiffies + msecs_to_jiffies(1000 * 1));
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_DELAY_TMO;
+-		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+-		spin_unlock_irq(&ndlp->lock);
++		set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
++		clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ 	} else {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+-		spin_unlock_irq(&ndlp->lock);
++		clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ 	}
+ 	return ndlp->nlp_state;
+ }
+@@ -2844,7 +2785,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 	ulp_status = get_job_ulpstatus(phba, rspiocb);
+ 
+-	if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
++	if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) {
+ 		lpfc_drop_node(vport, ndlp);
+ 		return NLP_STE_FREED_NODE;
+ 	}
+@@ -2877,7 +2818,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 	ulp_status = get_job_ulpstatus(phba, rspiocb);
+ 
+-	if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
++	if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) {
+ 		lpfc_drop_node(vport, ndlp);
+ 		return NLP_STE_FREED_NODE;
+ 	}
+@@ -2896,12 +2837,11 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
+ 		/* SLI4 ports have preallocated logical rpis. */
+ 		if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ 			ndlp->nlp_rpi = mb->un.varWords[0];
+-		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+-		if (ndlp->nlp_flag & NLP_LOGO_ACC) {
++		set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
++		if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag))
+ 			lpfc_unreg_rpi(vport, ndlp);
+-		}
+ 	} else {
+-		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
++		if (test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) {
+ 			lpfc_drop_node(vport, ndlp);
+ 			return NLP_STE_FREED_NODE;
+ 		}
+@@ -2913,10 +2853,8 @@ static uint32_t
+ lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			void *arg, uint32_t evt)
+ {
+-	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+-		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+-		spin_unlock_irq(&ndlp->lock);
++	if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) {
++		set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ 		return ndlp->nlp_state;
+ 	}
+ 	lpfc_drop_node(vport, ndlp);
+@@ -2932,8 +2870,9 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		return ndlp->nlp_state;
+ 
+ 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
++	clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
++	clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ 	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ 	ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
+ 	spin_unlock_irq(&ndlp->lock);
+ 	return ndlp->nlp_state;
+@@ -3146,7 +3085,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 			 "0211 DSM in event x%x on NPort x%x in "
+-			 "state %d rpi x%x Data: x%x x%x\n",
++			 "state %d rpi x%x Data: x%lx x%x\n",
+ 			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
+ 			 ndlp->nlp_flag, data1);
+ 
+@@ -3163,12 +3102,12 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			((uint32_t)ndlp->nlp_type));
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ 			 "0212 DSM out state %d on NPort x%x "
+-			 "rpi x%x Data: x%x x%x\n",
++			 "rpi x%x Data: x%lx x%x\n",
+ 			 rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
+ 			 data1);
+ 
+ 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+-			"DSM out:         ste:%d did:x%x flg:x%x",
++			"DSM out:         ste:%d did:x%x flg:x%lx",
+ 			rc, ndlp->nlp_DID, ndlp->nlp_flag);
+ 		/* Decrement the ndlp reference count held for this function */
+ 		lpfc_nlp_put(ndlp);
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
+index fec23c7237304b..e9d9884830f302 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -1232,7 +1232,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
+ 
+ 			/* Word 5 */
+ 			if ((phba->cfg_nvme_enable_fb) &&
+-			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
++			    test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) {
+ 				req_len = lpfc_ncmd->nvmeCmd->payload_length;
+ 				if (req_len < pnode->nvme_fb_size)
+ 					wqe->fcp_iwrite.initial_xfer_len =
+@@ -2644,14 +2644,11 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 				 * reference. Check if another thread has set
+ 				 * NLP_DROPPED.
+ 				 */
+-				spin_lock_irq(&ndlp->lock);
+-				if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+-					ndlp->nlp_flag |= NLP_DROPPED;
+-					spin_unlock_irq(&ndlp->lock);
++				if (!test_and_set_bit(NLP_DROPPED,
++						      &ndlp->nlp_flag)) {
+ 					lpfc_nlp_put(ndlp);
+ 					return;
+ 				}
+-				spin_unlock_irq(&ndlp->lock);
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
+index 55c3e2c2bf8f7e..e6c9112a886275 100644
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -2854,7 +2854,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
+ 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
+ 
+ 			if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
+-				if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
++				if (test_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag))
+ 					bf_set(wqe_sup,
+ 					       &wqe->fcp_tsend.wqe_com, 1);
+ 			} else {
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 11c974bffa7209..905026a4782cf9 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -4629,7 +4629,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
+ 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
+ 			iocb_cmd->ulpPU = PARM_READ_CHECK;
+ 			if (vport->cfg_first_burst_size &&
+-			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
++			    test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) {
+ 				u32 xrdy_len;
+ 
+ 				fcpdl = scsi_bufflen(scsi_cmnd);
+@@ -5829,7 +5829,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ 			 "0702 Issue %s to TGT %d LUN %llu "
+-			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
++			 "rpi x%x nlp_flag x%lx Data: x%x x%x\n",
+ 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
+ 			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+ 			 iocbq->cmd_flag);
+@@ -6094,8 +6094,8 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 			"0722 Target Reset rport failure: rdata x%px\n", rdata);
+ 		if (pnode) {
++			clear_bit(NLP_NPR_ADISC, &pnode->nlp_flag);
+ 			spin_lock_irqsave(&pnode->lock, flags);
+-			pnode->nlp_flag &= ~NLP_NPR_ADISC;
+ 			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ 			spin_unlock_irqrestore(&pnode->lock, flags);
+ 		}
+@@ -6124,7 +6124,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
+ 		    !pnode->logo_waitq) {
+ 			pnode->logo_waitq = &waitq;
+ 			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+-			pnode->nlp_flag |= NLP_ISSUE_LOGO;
++			set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag);
+ 			pnode->save_flags |= NLP_WAIT_FOR_LOGO;
+ 			spin_unlock_irqrestore(&pnode->lock, flags);
+ 			lpfc_unreg_rpi(vport, pnode);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 4dccbaeb632835..c4acf594286e5d 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -2842,27 +2842,6 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+ 	return;
+ }
+ 
+-static void
+-__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+-{
+-	unsigned long iflags;
+-
+-	if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+-		lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+-		spin_lock_irqsave(&ndlp->lock, iflags);
+-		ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+-		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+-		spin_unlock_irqrestore(&ndlp->lock, iflags);
+-	}
+-	ndlp->nlp_flag &= ~NLP_UNREG_INP;
+-}
+-
+-void
+-lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+-{
+-	__lpfc_sli_rpi_release(vport, ndlp);
+-}
+-
+ /**
+  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
+  * @phba: Pointer to HBA context object.
+@@ -2932,18 +2911,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 				vport,
+ 				KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
+ 				"1438 UNREG cmpl deferred mbox x%x "
+-				"on NPort x%x Data: x%x x%x x%px x%lx x%x\n",
++				"on NPort x%x Data: x%lx x%x x%px x%lx x%x\n",
+ 				ndlp->nlp_rpi, ndlp->nlp_DID,
+ 				ndlp->nlp_flag, ndlp->nlp_defer_did,
+ 				ndlp, vport->load_flag, kref_read(&ndlp->kref));
+ 
+-			if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+-			    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
+-				ndlp->nlp_flag &= ~NLP_UNREG_INP;
++			if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) &&
++			    ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) {
++				clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 				ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
+ 				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ 			} else {
+-				__lpfc_sli_rpi_release(vport, ndlp);
++				clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 			}
+ 
+ 			/* The unreg_login mailbox is complete and had a
+@@ -2991,6 +2970,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ {
+ 	struct lpfc_vport  *vport = pmb->vport;
+ 	struct lpfc_nodelist *ndlp;
++	bool unreg_inp;
+ 
+ 	ndlp = pmb->ctx_ndlp;
+ 	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
+@@ -3003,20 +2983,26 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 					 vport, KERN_INFO,
+ 					 LOG_MBOX | LOG_SLI | LOG_NODE,
+ 					 "0010 UNREG_LOGIN vpi:x%x "
+-					 "rpi:%x DID:%x defer x%x flg x%x "
++					 "rpi:%x DID:%x defer x%x flg x%lx "
+ 					 "x%px\n",
+ 					 vport->vpi, ndlp->nlp_rpi,
+ 					 ndlp->nlp_DID, ndlp->nlp_defer_did,
+ 					 ndlp->nlp_flag,
+ 					 ndlp);
+-				ndlp->nlp_flag &= ~NLP_LOGO_ACC;
++
++				/* Cleanup the nlp_flag now that the UNREG RPI
++				 * has completed.
++				 */
++				unreg_inp = test_and_clear_bit(NLP_UNREG_INP,
++							       &ndlp->nlp_flag);
++				clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+ 
+ 				/* Check to see if there are any deferred
+ 				 * events to process
+ 				 */
+-				if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+-				    (ndlp->nlp_defer_did !=
+-				    NLP_EVT_NOTHING_PENDING)) {
++				if (unreg_inp &&
++				    ndlp->nlp_defer_did !=
++				    NLP_EVT_NOTHING_PENDING) {
+ 					lpfc_printf_vlog(
+ 						vport, KERN_INFO,
+ 						LOG_MBOX | LOG_SLI | LOG_NODE,
+@@ -3025,14 +3011,12 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 						"NPort x%x Data: x%x x%px\n",
+ 						ndlp->nlp_rpi, ndlp->nlp_DID,
+ 						ndlp->nlp_defer_did, ndlp);
+-					ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ 					ndlp->nlp_defer_did =
+ 						NLP_EVT_NOTHING_PENDING;
+ 					lpfc_issue_els_plogi(
+ 						vport, ndlp->nlp_DID, 0);
+-				} else {
+-					__lpfc_sli_rpi_release(vport, ndlp);
+ 				}
++
+ 				lpfc_nlp_put(ndlp);
+ 			}
+ 		}
+@@ -8750,6 +8734,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+ 				lpfc_sli_config_mbox_opcode_get(
+ 					phba, mboxq),
+ 				rc, dd);
++
+ 	/*
+ 	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
+ 	 * calls depends on these resources to complete port setup.
+@@ -8762,6 +8747,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+ 		goto out_free_mbox;
+ 	}
+ 
++	lpfc_sli4_node_rpi_restore(phba);
++
+ 	lpfc_set_host_data(phba, mboxq);
+ 
+ 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+@@ -8949,7 +8936,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+ 		rc = -ENODEV;
+ 		goto out_free_iocblist;
+ 	}
+-	lpfc_sli4_node_prep(phba);
+ 
+ 	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
+ 		if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
+@@ -14354,9 +14340,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+ 			 * an unsolicited PLOGI from the same NPortId from
+ 			 * starting another mailbox transaction.
+ 			 */
+-			spin_lock_irqsave(&ndlp->lock, iflags);
+-			ndlp->nlp_flag |= NLP_UNREG_INP;
+-			spin_unlock_irqrestore(&ndlp->lock, iflags);
++			set_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ 			lpfc_unreg_login(phba, vport->vpi,
+ 					 pmbox->un.varWords[0], pmb);
+ 			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+@@ -19105,9 +19089,9 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+ 	 * to free ndlp when transmit completes
+ 	 */
+ 	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
+-	    !(ndlp->nlp_flag & NLP_DROPPED) &&
++	    !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
+ 	    !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
+-		ndlp->nlp_flag |= NLP_DROPPED;
++		set_bit(NLP_DROPPED, &ndlp->nlp_flag);
+ 		lpfc_nlp_put(ndlp);
+ 	}
+ }
+@@ -21125,11 +21109,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+ 				/* Unregister the RPI when mailbox complete */
+ 				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ 				restart_loop = 1;
+-				spin_unlock_irq(&phba->hbalock);
+-				spin_lock(&ndlp->lock);
+-				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+-				spin_unlock(&ndlp->lock);
+-				spin_lock_irq(&phba->hbalock);
++				clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
+ 				break;
+ 			}
+ 		}
+@@ -21144,9 +21124,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+ 			ndlp = mb->ctx_ndlp;
+ 			mb->ctx_ndlp = NULL;
+ 			if (ndlp) {
+-				spin_lock(&ndlp->lock);
+-				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+-				spin_unlock(&ndlp->lock);
++				clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
+ 				lpfc_nlp_put(ndlp);
+ 			}
+ 		}
+@@ -21155,9 +21133,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+ 
+ 	/* Release the ndlp with the cleaned-up active mailbox command */
+ 	if (act_mbx_ndlp) {
+-		spin_lock(&act_mbx_ndlp->lock);
+-		act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+-		spin_unlock(&act_mbx_ndlp->lock);
++		clear_bit(NLP_IGNR_REG_CMPL, &act_mbx_ndlp->nlp_flag);
+ 		lpfc_nlp_put(act_mbx_ndlp);
+ 	}
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
+index 7a4d4d8e2ad55f..9e0e357633779f 100644
+--- a/drivers/scsi/lpfc/lpfc_vport.c
++++ b/drivers/scsi/lpfc/lpfc_vport.c
+@@ -496,7 +496,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	    !ndlp->logo_waitq) {
+ 		ndlp->logo_waitq = &waitq;
+ 		ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+-		ndlp->nlp_flag |= NLP_ISSUE_LOGO;
++		set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
+ 		ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ 	}
+ 	spin_unlock_irq(&ndlp->lock);
+@@ -515,8 +515,8 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	}
+ 
+ 	/* Error - clean up node flags. */
++	clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
+ 	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+ 	ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ 	spin_unlock_irq(&ndlp->lock);
+ 
+@@ -708,7 +708,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+ 
+ 			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
+ 					 "1829 DA_ID issue status %d. "
+-					 "SFlag x%x NState x%x, NFlag x%x "
++					 "SFlag x%x NState x%x, NFlag x%lx "
+ 					 "Rpi x%x\n",
+ 					 rc, ndlp->save_flags, ndlp->nlp_state,
+ 					 ndlp->nlp_flag, ndlp->nlp_rpi);
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 0cd6f3e1488249..13b6cb1b93acd9 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -2147,7 +2147,7 @@ qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
+ 
+ 	pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
+ 	    sizeof(*pdb), DMA_FROM_DEVICE);
+-	if (!pdb_dma) {
++	if (dma_mapping_error(&vha->hw->pdev->dev, pdb_dma)) {
+ 		ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
+ 		return QLA_MEMORY_ALLOC_FAILED;
+ 	}
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index d91f54a6e752f2..97e9ca5a2a02c3 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -3420,6 +3420,8 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
+ 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
+ 						     task->data_count,
+ 						     DMA_TO_DEVICE);
++		if (dma_mapping_error(&ha->pdev->dev, task_data->data_dma))
++			return -ENOMEM;
+ 	}
+ 
+ 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 8947dab132d789..86dde3e7debba4 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3388,7 +3388,7 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
+ 
+ 	rcu_read_lock();
+ 	vpd = rcu_dereference(sdkp->device->vpd_pgb7);
+-	if (vpd && vpd->len >= 2)
++	if (vpd && vpd->len >= 6)
+ 		sdkp->rscs = vpd->data[5] & 1;
+ 	rcu_read_unlock();
+ }
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 7c43df252328dc..e26363ae748909 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -983,11 +983,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ 		if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ 			status = dspi_dma_xfer(dspi);
+ 		} else {
++			/*
++			 * Reinitialize the completion before transferring data
++			 * to avoid the case where it might remain in the done
++			 * state due to a spurious interrupt from a previous
++			 * transfer. This could falsely signal that the current
++			 * transfer has completed.
++			 */
++			if (dspi->irq)
++				reinit_completion(&dspi->xfer_done);
++
+ 			dspi_fifo_write(dspi);
+ 
+ 			if (dspi->irq) {
+ 				wait_for_completion(&dspi->xfer_done);
+-				reinit_completion(&dspi->xfer_done);
+ 			} else {
+ 				do {
+ 					status = dspi_poll(dspi);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 4f4ad6af416c8f..47fe50b80c2294 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -1842,7 +1842,9 @@ core_scsi3_decode_spec_i_port(
+ 		}
+ 
+ 		kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
+-		core_scsi3_lunacl_undepend_item(dest_se_deve);
++
++		if (dest_se_deve)
++			core_scsi3_lunacl_undepend_item(dest_se_deve);
+ 
+ 		if (is_local)
+ 			continue;
+diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
+index f3af5666bb1182..f9ef7d94cebd7a 100644
+--- a/drivers/tee/optee/ffa_abi.c
++++ b/drivers/tee/optee/ffa_abi.c
+@@ -728,12 +728,21 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
+ 	return true;
+ }
+ 
++static void notif_work_fn(struct work_struct *work)
++{
++	struct optee_ffa *optee_ffa = container_of(work, struct optee_ffa,
++						   notif_work);
++	struct optee *optee = container_of(optee_ffa, struct optee, ffa);
++
++	optee_do_bottom_half(optee->ctx);
++}
++
+ static void notif_callback(int notify_id, void *cb_data)
+ {
+ 	struct optee *optee = cb_data;
+ 
+ 	if (notify_id == optee->ffa.bottom_half_value)
+-		optee_do_bottom_half(optee->ctx);
++		queue_work(optee->ffa.notif_wq, &optee->ffa.notif_work);
+ 	else
+ 		optee_notif_send(optee, notify_id);
+ }
+@@ -817,9 +826,11 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev)
+ 	struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
+ 	u32 bottom_half_id = optee->ffa.bottom_half_value;
+ 
+-	if (bottom_half_id != U32_MAX)
++	if (bottom_half_id != U32_MAX) {
+ 		ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev,
+ 							      bottom_half_id);
++		destroy_workqueue(optee->ffa.notif_wq);
++	}
+ 	optee_remove_common(optee);
+ 
+ 	mutex_destroy(&optee->ffa.mutex);
+@@ -835,6 +846,13 @@ static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev,
+ 	u32 notif_id = 0;
+ 	int rc;
+ 
++	INIT_WORK(&optee->ffa.notif_work, notif_work_fn);
++	optee->ffa.notif_wq = create_workqueue("optee_notification");
++	if (!optee->ffa.notif_wq) {
++		rc = -EINVAL;
++		goto err;
++	}
++
+ 	while (true) {
+ 		rc = ffa_dev->ops->notifier_ops->notify_request(ffa_dev,
+ 								is_per_vcpu,
+@@ -851,19 +869,24 @@ static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev,
+ 		 * notifications in that case.
+ 		 */
+ 		if (rc != -EACCES)
+-			return rc;
++			goto err_wq;
+ 		notif_id++;
+ 		if (notif_id >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE)
+-			return rc;
++			goto err_wq;
+ 	}
+ 	optee->ffa.bottom_half_value = notif_id;
+ 
+ 	rc = enable_async_notif(optee);
+-	if (rc < 0) {
+-		ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev,
+-							      notif_id);
+-		optee->ffa.bottom_half_value = U32_MAX;
+-	}
++	if (rc < 0)
++		goto err_rel;
++
++	return 0;
++err_rel:
++	ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev, notif_id);
++err_wq:
++	destroy_workqueue(optee->ffa.notif_wq);
++err:
++	optee->ffa.bottom_half_value = U32_MAX;
+ 
+ 	return rc;
+ }
+diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
+index dc0f355ef72aae..9526087f0e680f 100644
+--- a/drivers/tee/optee/optee_private.h
++++ b/drivers/tee/optee/optee_private.h
+@@ -165,6 +165,8 @@ struct optee_ffa {
+ 	/* Serializes access to @global_ids */
+ 	struct mutex mutex;
+ 	struct rhashtable global_ids;
++	struct workqueue_struct *notif_wq;
++	struct work_struct notif_work;
+ };
+ 
+ struct optee;
+diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
+index 796e37a1d859f2..f8397ef3cf8dfb 100644
+--- a/drivers/ufs/core/ufs-sysfs.c
++++ b/drivers/ufs/core/ufs-sysfs.c
+@@ -1608,7 +1608,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
+ UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
+ UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
+ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
+-UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
++UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8);
+ UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
+ UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
+ UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
+@@ -1625,7 +1625,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
+ 	&dev_attr_logical_block_count.attr,
+ 	&dev_attr_erase_block_size.attr,
+ 	&dev_attr_provisioning_type.attr,
+-	&dev_attr_physical_memory_resourse_count.attr,
++	&dev_attr_physical_memory_resource_count.attr,
+ 	&dev_attr_context_capabilities.attr,
+ 	&dev_attr_large_unit_granularity.attr,
+ 	&dev_attr_wb_buf_alloc_units.attr,
+diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h
+index cd138acdcce165..86860686d8363e 100644
+--- a/drivers/usb/cdns3/cdnsp-debug.h
++++ b/drivers/usb/cdns3/cdnsp-debug.h
+@@ -327,12 +327,13 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
+ 	case TRB_RESET_EP:
+ 	case TRB_HALT_ENDPOINT:
+ 		ret = scnprintf(str, size,
+-				"%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
++				"%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c %c",
+ 				cdnsp_trb_type_string(type),
+ 				ep_num, ep_id % 2 ? "out" : "in",
+ 				TRB_TO_EP_INDEX(field3), field1, field0,
+ 				TRB_TO_SLOT_ID(field3),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++				field3 & TRB_CYCLE ? 'C' : 'c',
++				field3 & TRB_ESP ? 'P' : 'p');
+ 		break;
+ 	case TRB_STOP_RING:
+ 		ret = scnprintf(str, size,
+diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
+index f317d3c8478108..5cd9b898ce971f 100644
+--- a/drivers/usb/cdns3/cdnsp-ep0.c
++++ b/drivers/usb/cdns3/cdnsp-ep0.c
+@@ -414,6 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
+ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ {
+ 	struct usb_ctrlrequest *ctrl = &pdev->setup;
++	struct cdnsp_ep *pep;
+ 	int ret = -EINVAL;
+ 	u16 len;
+ 
+@@ -427,10 +428,21 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ 		goto out;
+ 	}
+ 
++	pep = &pdev->eps[0];
++
+ 	/* Restore the ep0 to Stopped/Running state. */
+-	if (pdev->eps[0].ep_state & EP_HALTED) {
+-		trace_cdnsp_ep0_halted("Restore to normal state");
+-		cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
++	if (pep->ep_state & EP_HALTED) {
++		if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_HALTED)
++			cdnsp_halt_endpoint(pdev, pep, 0);
++
++		/*
++		 * Halt Endpoint Command for SSP2 for ep0 preserve current
++		 * endpoint state and driver has to synchronize the
++		 * software endpoint state with endpoint output context
++		 * state.
++		 */
++		pep->ep_state &= ~EP_HALTED;
++		pep->ep_state |= EP_STOPPED;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
+index 2afa3e558f85ca..a91cca509db080 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.h
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -987,6 +987,12 @@ enum cdnsp_setup_dev {
+ #define STREAM_ID_FOR_TRB(p)		((((p)) << 16) & GENMASK(31, 16))
+ #define SCT_FOR_TRB(p)			(((p) << 1) & 0x7)
+ 
++/*
++ * Halt Endpoint Command TRB field.
++ * The ESP bit only exists in the SSP2 controller.
++ */
++#define TRB_ESP				BIT(9)
++
+ /* Link TRB specific fields. */
+ #define TRB_TC				BIT(1)
+ 
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index fd06cb85c4ea84..0758f171f73ecf 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -772,7 +772,9 @@ static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
+ 	}
+ 
+ 	if (port_id != old_port) {
+-		cdnsp_disable_slot(pdev);
++		if (pdev->slot_id)
++			cdnsp_disable_slot(pdev);
++
+ 		pdev->active_port = port;
+ 		cdnsp_enable_slot(pdev);
+ 	}
+@@ -2483,7 +2485,8 @@ void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
+ {
+ 	cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
+ 			    SLOT_ID_FOR_TRB(pdev->slot_id) |
+-			    EP_ID_FOR_TRB(ep_index));
++			    EP_ID_FOR_TRB(ep_index) |
++			    (!ep_index ? TRB_ESP : 0));
+ }
+ 
+ void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index fd6032874bf33a..8f73bd5057a645 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -2362,6 +2362,10 @@ static void udc_suspend(struct ci_hdrc *ci)
+ 	 */
+ 	if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0)
+ 		hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0);
++
++	if (ci->gadget.connected &&
++	    (!ci->suspended || !device_may_wakeup(ci->dev)))
++		usb_gadget_disconnect(&ci->gadget);
+ }
+ 
+ static void udc_resume(struct ci_hdrc *ci, bool power_lost)
+@@ -2372,6 +2376,9 @@ static void udc_resume(struct ci_hdrc *ci, bool power_lost)
+ 					OTGSC_BSVIS | OTGSC_BSVIE);
+ 		if (ci->vbus_active)
+ 			usb_gadget_vbus_disconnect(&ci->gadget);
++	} else if (ci->vbus_active && ci->driver &&
++		   !ci->gadget.connected) {
++		usb_gadget_connect(&ci->gadget);
+ 	}
+ 
+ 	/* Restore value 0 if it was set for power lost check */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index da3d0e525b64e9..da6da5ec42372f 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2336,6 +2336,9 @@ void usb_disconnect(struct usb_device **pdev)
+ 	usb_remove_ep_devs(&udev->ep0);
+ 	usb_unlock_device(udev);
+ 
++	if (udev->usb4_link)
++		device_link_del(udev->usb4_link);
++
+ 	/* Unregister the device.  The device driver is responsible
+ 	 * for de-configuring the device and invoking the remove-device
+ 	 * notifier chain (used by usbfs and possibly others).
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index c979ecd0169a2d..46db600fdd824e 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -227,7 +227,8 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+ 	/* Logitech HD Webcam C270 */
+-	{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
++	{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME |
++		USB_QUIRK_NO_LPM},
+ 
+ 	/* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
+ 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
+index 494e21a11cd265..3bc68534dbcd37 100644
+--- a/drivers/usb/core/usb-acpi.c
++++ b/drivers/usb/core/usb-acpi.c
+@@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
+  */
+ static int usb_acpi_add_usb4_devlink(struct usb_device *udev)
+ {
+-	const struct device_link *link;
++	struct device_link *link;
+ 	struct usb_port *port_dev;
+ 	struct usb_hub *hub;
+ 
+@@ -188,6 +188,8 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev)
+ 	dev_dbg(&port_dev->dev, "Created device link from %s to %s\n",
+ 		dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev));
+ 
++	udev->usb4_link = link;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 7820d6815bedd5..f4209726f8ecfd 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -2364,6 +2364,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ {
+ 	u32 reg;
+ 	int i;
++	int ret;
+ 
+ 	if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) {
+ 		dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
+@@ -2382,7 +2383,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ 	case DWC3_GCTL_PRTCAP_DEVICE:
+ 		if (pm_runtime_suspended(dwc->dev))
+ 			break;
+-		dwc3_gadget_suspend(dwc);
++		ret = dwc3_gadget_suspend(dwc);
++		if (ret)
++			return ret;
+ 		synchronize_irq(dwc->irq_gadget);
+ 		dwc3_core_exit(dwc);
+ 		break;
+@@ -2417,7 +2420,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ 			break;
+ 
+ 		if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+-			dwc3_gadget_suspend(dwc);
++			ret = dwc3_gadget_suspend(dwc);
++			if (ret)
++				return ret;
+ 			synchronize_irq(dwc->irq_gadget);
+ 		}
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 76e6000c65c789..37ae1dd3345d0d 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -4788,26 +4788,22 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
+ 	int ret;
+ 
+ 	ret = dwc3_gadget_soft_disconnect(dwc);
+-	if (ret)
+-		goto err;
+-
+-	spin_lock_irqsave(&dwc->lock, flags);
+-	if (dwc->gadget_driver)
+-		dwc3_disconnect_gadget(dwc);
+-	spin_unlock_irqrestore(&dwc->lock, flags);
+-
+-	return 0;
+-
+-err:
+ 	/*
+ 	 * Attempt to reset the controller's state. Likely no
+ 	 * communication can be established until the host
+ 	 * performs a port reset.
+ 	 */
+-	if (dwc->softconnect)
++	if (ret && dwc->softconnect) {
+ 		dwc3_gadget_soft_connect(dwc);
++		return -EAGAIN;
++	}
+ 
+-	return ret;
++	spin_lock_irqsave(&dwc->lock, flags);
++	if (dwc->gadget_driver)
++		dwc3_disconnect_gadget(dwc);
++	spin_unlock_irqrestore(&dwc->lock, flags);
++
++	return 0;
+ }
+ 
+ int dwc3_gadget_resume(struct dwc3 *dwc)
+diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
+index d35f3a18dd13b0..bdc664ad6a934c 100644
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -651,6 +651,10 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
+ 	case DS_DISABLED:
+ 		return;
+ 	case DS_CONFIGURED:
++		spin_lock(&dbc->lock);
++		xhci_dbc_flush_requests(dbc);
++		spin_unlock(&dbc->lock);
++
+ 		if (dbc->driver->disconnect)
+ 			dbc->driver->disconnect(dbc);
+ 		break;
+diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
+index d719c16ea30b57..2b8558005cbb00 100644
+--- a/drivers/usb/host/xhci-dbgtty.c
++++ b/drivers/usb/host/xhci-dbgtty.c
+@@ -585,6 +585,7 @@ int dbc_tty_init(void)
+ 	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ 	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ 	dbc_tty_driver->init_termios = tty_std_termios;
++	dbc_tty_driver->init_termios.c_lflag &= ~ECHO;
+ 	dbc_tty_driver->init_termios.c_cflag =
+ 			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ 	dbc_tty_driver->init_termios.c_ispeed = 9600;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index f9c51e0f2e37c6..91178b8dbbf086 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1426,6 +1426,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 	/* Periodic endpoint bInterval limit quirk */
+ 	if (usb_endpoint_xfer_int(&ep->desc) ||
+ 	    usb_endpoint_xfer_isoc(&ep->desc)) {
++		if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_9) &&
++		    interval >= 9) {
++			interval = 8;
++		}
+ 		if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
+ 		    udev->speed >= USB_SPEED_HIGH &&
+ 		    interval >= 7) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 1b033c8ce188ef..234efb9731b2cf 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -71,12 +71,22 @@
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI		0x15ec
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI		0x15f0
+ 
++#define PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI		0x13ed
++#define PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI		0x13ee
++#define PCI_DEVICE_ID_AMD_STARSHIP_XHCI			0x148c
++#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI		0x15d4
++#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI		0x15d5
++#define PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI		0x15e0
++#define PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI		0x15e1
++#define PCI_DEVICE_ID_AMD_RAVEN2_XHCI			0x15e5
+ #define PCI_DEVICE_ID_AMD_RENOIR_XHCI			0x1639
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_2			0x43bb
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_1			0x43bc
+ 
++#define PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI		0x7316
++
+ #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI			0x1042
+ #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI		0x1142
+ #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI			0x1242
+@@ -286,6 +296,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_NEC)
+ 		xhci->quirks |= XHCI_NEC_HOST;
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
++	    (pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_AMD_STARSHIP_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_AMD_RAVEN2_XHCI))
++		xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9;
++
++	if (pdev->vendor == PCI_VENDOR_ID_ATI &&
++	    pdev->device == PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI)
++		xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
+ 		xhci->quirks |= XHCI_AMD_0x96_HOST;
+ 
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 2379a67e34e125..3a9bdf91675568 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -326,7 +326,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ 	}
+ 
+ 	usb3_hcd = xhci_get_usb3_hcd(xhci);
+-	if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4)
++	if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
++	    !(xhci->quirks & XHCI_BROKEN_STREAMS))
+ 		usb3_hcd->can_do_streams = 1;
+ 
+ 	if (xhci->shared_hcd) {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index fbc8419a547303..2ff8787f753c90 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -461,9 +461,8 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
+ 	 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
+ 	 * and try to recover a -ETIMEDOUT with a host controller reset.
+ 	 */
+-	ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring,
+-			CMD_RING_RUNNING, 0, 5 * 1000 * 1000,
+-			XHCI_STATE_REMOVING);
++	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
++			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
+ 	if (ret < 0) {
+ 		xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
+ 		xhci_halt(xhci);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 799941b6ad6c6a..09a5a660496205 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -82,29 +82,6 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
+ 	return ret;
+ }
+ 
+-/*
+- * xhci_handshake_check_state - same as xhci_handshake but takes an additional
+- * exit_state parameter, and bails out with an error immediately when xhc_state
+- * has exit_state flag set.
+- */
+-int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
+-		u32 mask, u32 done, int usec, unsigned int exit_state)
+-{
+-	u32	result;
+-	int	ret;
+-
+-	ret = readl_poll_timeout_atomic(ptr, result,
+-				(result & mask) == done ||
+-				result == U32_MAX ||
+-				xhci->xhc_state & exit_state,
+-				1, usec);
+-
+-	if (result == U32_MAX || xhci->xhc_state & exit_state)
+-		return -ENODEV;
+-
+-	return ret;
+-}
+-
+ /*
+  * Disable interrupts and begin the xHCI halting process.
+  */
+@@ -225,8 +202,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
+ 	if (xhci->quirks & XHCI_INTEL_HOST)
+ 		udelay(1000);
+ 
+-	ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
+-				CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING);
++	ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1094,7 +1070,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ 		xhci_dbg(xhci, "Stop HCD\n");
+ 		xhci_halt(xhci);
+ 		xhci_zero_64b_regs(xhci);
+-		retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
++		if (xhci->xhc_state & XHCI_STATE_REMOVING)
++			retval = -ENODEV;
++		else
++			retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
+ 		spin_unlock_irq(&xhci->lock);
+ 		if (retval)
+ 			return retval;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index c4d5b90ef90a86..11580495e09c12 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1626,6 +1626,7 @@ struct xhci_hcd {
+ #define XHCI_WRITE_64_HI_LO	BIT_ULL(47)
+ #define XHCI_CDNS_SCTX_QUIRK	BIT_ULL(48)
+ #define XHCI_ETRON_HOST	BIT_ULL(49)
++#define XHCI_LIMIT_ENDPOINT_INTERVAL_9 BIT_ULL(50)
+ 
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+@@ -1846,8 +1847,6 @@ void xhci_remove_secondary_interrupter(struct usb_hcd
+ /* xHCI host controller glue */
+ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
+ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
+-int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
+-		u32 mask, u32 done, int usec, unsigned int exit_state);
+ void xhci_quiesce(struct xhci_hcd *xhci);
+ int xhci_halt(struct xhci_hcd *xhci);
+ int xhci_start(struct xhci_hcd *xhci);
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 4976a7238b287d..6964f403a2d535 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -394,8 +394,7 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
+ 	case CMDT_RSP_NAK:
+ 		switch (cmd) {
+ 		case DP_CMD_STATUS_UPDATE:
+-			if (typec_altmode_exit(alt))
+-				dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
++			dp->state = DP_STATE_EXIT;
+ 			break;
+ 		case DP_CMD_CONFIGURE:
+ 			dp->data.conf = 0;
+@@ -677,7 +676,7 @@ static ssize_t pin_assignment_show(struct device *dev,
+ 
+ 	assignments = get_current_pin_assignments(dp);
+ 
+-	for (i = 0; assignments; assignments >>= 1, i++) {
++	for (i = 0; assignments && i < DP_PIN_ASSIGN_MAX; assignments >>= 1, i++) {
+ 		if (assignments & 1) {
+ 			if (i == cur)
+ 				len += sprintf(buf + len, "[%s] ",
+diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+index 68300fcd3c41b5..dda8cb3262e0b2 100644
+--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
++++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+@@ -554,12 +554,6 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 	vf_data->vf_qm_state = QM_READY;
+ 	hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+ 
+-	ret = vf_qm_cache_wb(vf_qm);
+-	if (ret) {
+-		dev_err(dev, "failed to writeback QM Cache!\n");
+-		return ret;
+-	}
+-
+ 	ret = qm_get_regs(vf_qm, vf_data);
+ 	if (ret)
+ 		return -EINVAL;
+@@ -985,6 +979,13 @@ static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev
+ 		dev_err(dev, "failed to check QM INT state!\n");
+ 		return ret;
+ 	}
++
++	ret = vf_qm_cache_wb(vf_qm);
++	if (ret) {
++		dev_err(dev, "failed to writeback QM cache!\n");
++		return ret;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -1358,6 +1359,7 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
+ 			struct hisi_acc_vf_core_device, core_device.vdev);
+ 	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ 
++	hisi_acc_vf_disable_fds(hisi_acc_vdev);
+ 	iounmap(vf_qm->io_base);
+ 	vfio_pci_core_close_device(core_vdev);
+ }
+diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
+index 42bd1cb7c9cdd4..35f765610802ab 100644
+--- a/fs/anon_inodes.c
++++ b/fs/anon_inodes.c
+@@ -55,25 +55,37 @@ static struct file_system_type anon_inode_fs_type = {
+ 	.kill_sb	= kill_anon_super,
+ };
+ 
+-static struct inode *anon_inode_make_secure_inode(
+-	const char *name,
+-	const struct inode *context_inode)
++/**
++ * anon_inode_make_secure_inode - allocate an anonymous inode with security context
++ * @sb:		[in]	Superblock to allocate from
++ * @name:	[in]	Name of the class of the newfile (e.g., "secretmem")
++ * @context_inode:
++ *		[in]	Optional parent inode for security inheritance
++ *
++ * The function ensures proper security initialization through the LSM hook
++ * security_inode_init_security_anon().
++ *
++ * Return:	Pointer to new inode on success, ERR_PTR on failure.
++ */
++struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
++					   const struct inode *context_inode)
+ {
+ 	struct inode *inode;
+-	const struct qstr qname = QSTR_INIT(name, strlen(name));
+ 	int error;
+ 
+-	inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
++	inode = alloc_anon_inode(sb);
+ 	if (IS_ERR(inode))
+ 		return inode;
+ 	inode->i_flags &= ~S_PRIVATE;
+-	error =	security_inode_init_security_anon(inode, &qname, context_inode);
++	error =	security_inode_init_security_anon(inode, &QSTR(name),
++						  context_inode);
+ 	if (error) {
+ 		iput(inode);
+ 		return ERR_PTR(error);
+ 	}
+ 	return inode;
+ }
++EXPORT_SYMBOL_GPL_FOR_MODULES(anon_inode_make_secure_inode, "kvm");
+ 
+ static struct file *__anon_inode_getfile(const char *name,
+ 					 const struct file_operations *fops,
+@@ -88,7 +100,8 @@ static struct file *__anon_inode_getfile(const char *name,
+ 		return ERR_PTR(-ENOENT);
+ 
+ 	if (make_inode) {
+-		inode =	anon_inode_make_secure_inode(name, context_inode);
++		inode =	anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb,
++						     name, context_inode);
+ 		if (IS_ERR(inode)) {
+ 			file = ERR_CAST(inode);
+ 			goto err;
+diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
+index 75c8a97a6954c6..7b3b63ed747cf5 100644
+--- a/fs/bcachefs/fsck.c
++++ b/fs/bcachefs/fsck.c
+@@ -405,7 +405,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *
+ 		return ret;
+ 
+ 	struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound);
+-	struct qstr name = (struct qstr) QSTR(name_buf);
++	struct qstr name = QSTR(name_buf);
+ 
+ 	inode->bi_dir = lostfound.bi_inum;
+ 
+diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
+index 3c7f941dde39ad..ebabba2968821b 100644
+--- a/fs/bcachefs/recovery.c
++++ b/fs/bcachefs/recovery.c
+@@ -32,8 +32,6 @@
+ #include <linux/sort.h>
+ #include <linux/stat.h>
+ 
+-#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
+-
+ void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
+ {
+ 	if (btree >= BTREE_ID_NR_MAX)
+diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
+index fb02c1c3600443..a27f4b84fe7752 100644
+--- a/fs/bcachefs/util.h
++++ b/fs/bcachefs/util.h
+@@ -647,8 +647,6 @@ static inline int cmp_le32(__le32 l, __le32 r)
+ 
+ #include <linux/uuid.h>
+ 
+-#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
+-
+ static inline bool qstr_eq(const struct qstr l, const struct qstr r)
+ {
+ 	return l.len == r.len && !memcmp(l.name, r.name, l.len);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index eaa991e6980492..0e63603ac5c78e 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1912,6 +1912,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+ 	struct extent_changeset *data_reserved = NULL;
+ 	unsigned long zero_start;
+ 	loff_t size;
++	size_t fsize = folio_size(folio);
+ 	vm_fault_t ret;
+ 	int ret2;
+ 	int reserved = 0;
+@@ -1922,7 +1923,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+ 
+ 	ASSERT(folio_order(folio) == 0);
+ 
+-	reserved_space = PAGE_SIZE;
++	reserved_space = fsize;
+ 
+ 	sb_start_pagefault(inode->i_sb);
+ 	page_start = folio_pos(folio);
+@@ -1976,7 +1977,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+ 	 * We can't set the delalloc bits if there are pending ordered
+ 	 * extents.  Drop our locks and wait for them to finish.
+ 	 */
+-	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
++	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, fsize);
+ 	if (ordered) {
+ 		unlock_extent(io_tree, page_start, page_end, &cached_state);
+ 		folio_unlock(folio);
+@@ -1988,11 +1989,11 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+ 
+ 	if (folio->index == ((size - 1) >> PAGE_SHIFT)) {
+ 		reserved_space = round_up(size - page_start, fs_info->sectorsize);
+-		if (reserved_space < PAGE_SIZE) {
++		if (reserved_space < fsize) {
+ 			end = page_start + reserved_space - 1;
+ 			btrfs_delalloc_release_space(BTRFS_I(inode),
+-					data_reserved, page_start,
+-					PAGE_SIZE - reserved_space, true);
++					data_reserved, end + 1,
++					fsize - reserved_space, true);
+ 		}
+ 	}
+ 
+@@ -2019,12 +2020,12 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+ 	if (page_start + folio_size(folio) > size)
+ 		zero_start = offset_in_folio(folio, size);
+ 	else
+-		zero_start = PAGE_SIZE;
++		zero_start = fsize;
+ 
+-	if (zero_start != PAGE_SIZE)
++	if (zero_start != fsize)
+ 		folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
+ 
+-	btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
++	btrfs_folio_clear_checked(fs_info, folio, page_start, fsize);
+ 	btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
+ 	btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
+ 
+@@ -2033,7 +2034,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+ 	unlock_extent(io_tree, page_start, page_end, &cached_state);
+ 	up_read(&BTRFS_I(inode)->i_mmap_lock);
+ 
+-	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
++	btrfs_delalloc_release_extents(BTRFS_I(inode), fsize);
+ 	sb_end_pagefault(inode->i_sb);
+ 	extent_changeset_free(data_reserved);
+ 	return VM_FAULT_LOCKED;
+@@ -2042,7 +2043,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+ 	folio_unlock(folio);
+ 	up_read(&BTRFS_I(inode)->i_mmap_lock);
+ out:
+-	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
++	btrfs_delalloc_release_extents(BTRFS_I(inode), fsize);
+ 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
+ 				     reserved_space, (ret != 0));
+ out_noreserve:
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 921ec3802648b0..f84e3f9fad84aa 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4734,7 +4734,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ 	int ret = 0;
+ 	struct btrfs_trans_handle *trans;
+-	u64 last_unlink_trans;
+ 	struct fscrypt_name fname;
+ 
+ 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
+@@ -4760,6 +4759,23 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 		goto out_notrans;
+ 	}
+ 
++	/*
++	 * Propagate the last_unlink_trans value of the deleted dir to its
++	 * parent directory. This is to prevent an unrecoverable log tree in the
++	 * case we do something like this:
++	 * 1) create dir foo
++	 * 2) create snapshot under dir foo
++	 * 3) delete the snapshot
++	 * 4) rmdir foo
++	 * 5) mkdir foo
++	 * 6) fsync foo or some file inside foo
++	 *
++	 * This is because we can't unlink other roots when replaying the dir
++	 * deletes for directory foo.
++	 */
++	if (BTRFS_I(inode)->last_unlink_trans >= trans->transid)
++		btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
++
+ 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+ 		ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
+ 		goto out;
+@@ -4769,27 +4785,11 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 	if (ret)
+ 		goto out;
+ 
+-	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
+-
+ 	/* now the directory is empty */
+ 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
+ 				 &fname.disk_name);
+-	if (!ret) {
++	if (!ret)
+ 		btrfs_i_size_write(BTRFS_I(inode), 0);
+-		/*
+-		 * Propagate the last_unlink_trans value of the deleted dir to
+-		 * its parent directory. This is to prevent an unrecoverable
+-		 * log tree in the case we do something like this:
+-		 * 1) create dir foo
+-		 * 2) create snapshot under dir foo
+-		 * 3) delete the snapshot
+-		 * 4) rmdir foo
+-		 * 5) mkdir foo
+-		 * 6) fsync foo or some file inside foo
+-		 */
+-		if (last_unlink_trans >= trans->transid)
+-			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
+-	}
+ out:
+ 	btrfs_end_transaction(trans);
+ out_notrans:
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 3e3722a7323936..1706f6d9b12e68 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -758,14 +758,14 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ 		goto out;
+ 	}
+ 
++	btrfs_record_new_subvolume(trans, BTRFS_I(dir));
++
+ 	ret = btrfs_create_new_inode(trans, &new_inode_args);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+ 		goto out;
+ 	}
+ 
+-	btrfs_record_new_subvolume(trans, BTRFS_I(dir));
+-
+ 	d_instantiate_new(dentry, new_inode_args.inode);
+ 	new_inode_args.inode = NULL;
+ 
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 9637c7cdc0cf92..16b4474ded4bc3 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -138,11 +138,14 @@ static void wait_log_commit(struct btrfs_root *root, int transid);
+  * and once to do all the other items.
+  */
+ 
+-static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
++static struct btrfs_inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
+ {
+ 	unsigned int nofs_flag;
+ 	struct inode *inode;
+ 
++	/* Only meant to be called for subvolume roots and not for log roots. */
++	ASSERT(is_fstree(btrfs_root_id(root)));
++
+ 	/*
+ 	 * We're holding a transaction handle whether we are logging or
+ 	 * replaying a log tree, so we must make sure NOFS semantics apply
+@@ -154,7 +157,10 @@ static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
+ 	inode = btrfs_iget(objectid, root);
+ 	memalloc_nofs_restore(nofs_flag);
+ 
+-	return inode;
++	if (IS_ERR(inode))
++		return ERR_CAST(inode);
++
++	return BTRFS_I(inode);
+ }
+ 
+ /*
+@@ -610,21 +616,6 @@ static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len,
+ 	return 0;
+ }
+ 
+-/*
+- * simple helper to read an inode off the disk from a given root
+- * This can only be called for subvolume roots and not for the log
+- */
+-static noinline struct inode *read_one_inode(struct btrfs_root *root,
+-					     u64 objectid)
+-{
+-	struct inode *inode;
+-
+-	inode = btrfs_iget_logging(objectid, root);
+-	if (IS_ERR(inode))
+-		inode = NULL;
+-	return inode;
+-}
+-
+ /* replays a single extent in 'eb' at 'slot' with 'key' into the
+  * subvolume 'root'.  path is released on entry and should be released
+  * on exit.
+@@ -650,7 +641,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ 	u64 start = key->offset;
+ 	u64 nbytes = 0;
+ 	struct btrfs_file_extent_item *item;
+-	struct inode *inode = NULL;
++	struct btrfs_inode *inode = NULL;
+ 	unsigned long size;
+ 	int ret = 0;
+ 
+@@ -674,23 +665,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ 		extent_end = ALIGN(start + size,
+ 				   fs_info->sectorsize);
+ 	} else {
+-		ret = 0;
+-		goto out;
++		return 0;
+ 	}
+ 
+-	inode = read_one_inode(root, key->objectid);
+-	if (!inode) {
+-		ret = -EIO;
+-		goto out;
+-	}
++	inode = btrfs_iget_logging(key->objectid, root);
++	if (IS_ERR(inode))
++		return PTR_ERR(inode);
+ 
+ 	/*
+ 	 * first check to see if we already have this extent in the
+ 	 * file.  This must be done before the btrfs_drop_extents run
+ 	 * so we don't try to drop this extent.
+ 	 */
+-	ret = btrfs_lookup_file_extent(trans, root, path,
+-			btrfs_ino(BTRFS_I(inode)), start, 0);
++	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), start, 0);
+ 
+ 	if (ret == 0 &&
+ 	    (found_type == BTRFS_FILE_EXTENT_REG ||
+@@ -724,7 +711,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ 	drop_args.start = start;
+ 	drop_args.end = extent_end;
+ 	drop_args.drop_cache = true;
+-	ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
++	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -902,16 +889,15 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ 			goto out;
+ 	}
+ 
+-	ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
+-						extent_end - start);
++	ret = btrfs_inode_set_file_extent_range(inode, start, extent_end - start);
+ 	if (ret)
+ 		goto out;
+ 
+ update_inode:
+-	btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
+-	ret = btrfs_update_inode(trans, BTRFS_I(inode));
++	btrfs_update_inode_bytes(inode, nbytes, drop_args.bytes_found);
++	ret = btrfs_update_inode(trans, inode);
+ out:
+-	iput(inode);
++	iput(&inode->vfs_inode);
+ 	return ret;
+ }
+ 
+@@ -948,7 +934,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+ 				      struct btrfs_dir_item *di)
+ {
+ 	struct btrfs_root *root = dir->root;
+-	struct inode *inode;
++	struct btrfs_inode *inode;
+ 	struct fscrypt_str name;
+ 	struct extent_buffer *leaf;
+ 	struct btrfs_key location;
+@@ -963,9 +949,10 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+ 
+ 	btrfs_release_path(path);
+ 
+-	inode = read_one_inode(root, location.objectid);
+-	if (!inode) {
+-		ret = -EIO;
++	inode = btrfs_iget_logging(location.objectid, root);
++	if (IS_ERR(inode)) {
++		ret = PTR_ERR(inode);
++		inode = NULL;
+ 		goto out;
+ 	}
+ 
+@@ -973,10 +960,11 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), &name);
++	ret = unlink_inode_for_log_replay(trans, dir, inode, &name);
+ out:
+ 	kfree(name.name);
+-	iput(inode);
++	if (inode)
++		iput(&inode->vfs_inode);
+ 	return ret;
+ }
+ 
+@@ -1087,7 +1075,9 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
+ 	search_key.type = BTRFS_INODE_REF_KEY;
+ 	search_key.offset = parent_objectid;
+ 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
+-	if (ret == 0) {
++	if (ret < 0) {
++		return ret;
++	} else if (ret == 0) {
+ 		struct btrfs_inode_ref *victim_ref;
+ 		unsigned long ptr;
+ 		unsigned long ptr_end;
+@@ -1149,7 +1139,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
+ 		u32 item_size;
+ 		u32 cur_offset = 0;
+ 		unsigned long base;
+-		struct inode *victim_parent;
++		struct btrfs_inode *victim_parent;
+ 
+ 		leaf = path->nodes[0];
+ 
+@@ -1160,13 +1150,13 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
+ 			struct fscrypt_str victim_name;
+ 
+ 			extref = (struct btrfs_inode_extref *)(base + cur_offset);
++			victim_name.len = btrfs_inode_extref_name_len(leaf, extref);
+ 
+ 			if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
+ 				goto next;
+ 
+ 			ret = read_alloc_one_name(leaf, &extref->name,
+-				 btrfs_inode_extref_name_len(leaf, extref),
+-				 &victim_name);
++						  victim_name.len, &victim_name);
+ 			if (ret)
+ 				return ret;
+ 
+@@ -1181,18 +1171,18 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
+ 				kfree(victim_name.name);
+ 				return ret;
+ 			} else if (!ret) {
+-				ret = -ENOENT;
+-				victim_parent = read_one_inode(root,
+-						parent_objectid);
+-				if (victim_parent) {
++				victim_parent = btrfs_iget_logging(parent_objectid, root);
++				if (IS_ERR(victim_parent)) {
++					ret = PTR_ERR(victim_parent);
++				} else {
+ 					inc_nlink(&inode->vfs_inode);
+ 					btrfs_release_path(path);
+ 
+ 					ret = unlink_inode_for_log_replay(trans,
+-							BTRFS_I(victim_parent),
++							victim_parent,
+ 							inode, &victim_name);
++					iput(&victim_parent->vfs_inode);
+ 				}
+-				iput(victim_parent);
+ 				kfree(victim_name.name);
+ 				if (ret)
+ 					return ret;
+@@ -1326,19 +1316,18 @@ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
+ 			ret = !!btrfs_find_name_in_backref(log_eb, log_slot, &name);
+ 
+ 		if (!ret) {
+-			struct inode *dir;
++			struct btrfs_inode *dir;
+ 
+ 			btrfs_release_path(path);
+-			dir = read_one_inode(root, parent_id);
+-			if (!dir) {
+-				ret = -ENOENT;
++			dir = btrfs_iget_logging(parent_id, root);
++			if (IS_ERR(dir)) {
++				ret = PTR_ERR(dir);
+ 				kfree(name.name);
+ 				goto out;
+ 			}
+-			ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir),
+-						 inode, &name);
++			ret = unlink_inode_for_log_replay(trans, dir, inode, &name);
+ 			kfree(name.name);
+-			iput(dir);
++			iput(&dir->vfs_inode);
+ 			if (ret)
+ 				goto out;
+ 			goto again;
+@@ -1370,8 +1359,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 				  struct extent_buffer *eb, int slot,
+ 				  struct btrfs_key *key)
+ {
+-	struct inode *dir = NULL;
+-	struct inode *inode = NULL;
++	struct btrfs_inode *dir = NULL;
++	struct btrfs_inode *inode = NULL;
+ 	unsigned long ref_ptr;
+ 	unsigned long ref_end;
+ 	struct fscrypt_str name = { 0 };
+@@ -1404,15 +1393,17 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 	 * copy the back ref in.  The link count fixup code will take
+ 	 * care of the rest
+ 	 */
+-	dir = read_one_inode(root, parent_objectid);
+-	if (!dir) {
+-		ret = -ENOENT;
++	dir = btrfs_iget_logging(parent_objectid, root);
++	if (IS_ERR(dir)) {
++		ret = PTR_ERR(dir);
++		dir = NULL;
+ 		goto out;
+ 	}
+ 
+-	inode = read_one_inode(root, inode_objectid);
+-	if (!inode) {
+-		ret = -EIO;
++	inode = btrfs_iget_logging(inode_objectid, root);
++	if (IS_ERR(inode)) {
++		ret = PTR_ERR(inode);
++		inode = NULL;
+ 		goto out;
+ 	}
+ 
+@@ -1424,11 +1415,13 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 			 * parent object can change from one array
+ 			 * item to another.
+ 			 */
+-			if (!dir)
+-				dir = read_one_inode(root, parent_objectid);
+ 			if (!dir) {
+-				ret = -ENOENT;
+-				goto out;
++				dir = btrfs_iget_logging(parent_objectid, root);
++				if (IS_ERR(dir)) {
++					ret = PTR_ERR(dir);
++					dir = NULL;
++					goto out;
++				}
+ 			}
+ 		} else {
+ 			ret = ref_get_fields(eb, ref_ptr, &name, &ref_index);
+@@ -1436,8 +1429,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 		if (ret)
+ 			goto out;
+ 
+-		ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
+-				   btrfs_ino(BTRFS_I(inode)), ref_index, &name);
++		ret = inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
++				   ref_index, &name);
+ 		if (ret < 0) {
+ 			goto out;
+ 		} else if (ret == 0) {
+@@ -1448,8 +1441,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 			 * overwrite any existing back reference, and we don't
+ 			 * want to create dangling pointers in the directory.
+ 			 */
+-			ret = __add_inode_ref(trans, root, path, log,
+-					      BTRFS_I(dir), BTRFS_I(inode),
++			ret = __add_inode_ref(trans, root, path, log, dir, inode,
+ 					      inode_objectid, parent_objectid,
+ 					      ref_index, &name);
+ 			if (ret) {
+@@ -1459,12 +1451,11 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 			}
+ 
+ 			/* insert our name */
+-			ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
+-					     &name, 0, ref_index);
++			ret = btrfs_add_link(trans, dir, inode, &name, 0, ref_index);
+ 			if (ret)
+ 				goto out;
+ 
+-			ret = btrfs_update_inode(trans, BTRFS_I(inode));
++			ret = btrfs_update_inode(trans, inode);
+ 			if (ret)
+ 				goto out;
+ 		}
+@@ -1474,7 +1465,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 		kfree(name.name);
+ 		name.name = NULL;
+ 		if (log_ref_ver) {
+-			iput(dir);
++			iput(&dir->vfs_inode);
+ 			dir = NULL;
+ 		}
+ 	}
+@@ -1487,8 +1478,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 	 * dir index entries exist for a name but there is no inode reference
+ 	 * item with the same name.
+ 	 */
+-	ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
+-				    key);
++	ret = unlink_old_inode_refs(trans, root, path, inode, eb, slot, key);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -1497,8 +1487,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ out:
+ 	btrfs_release_path(path);
+ 	kfree(name.name);
+-	iput(dir);
+-	iput(inode);
++	if (dir)
++		iput(&dir->vfs_inode);
++	if (inode)
++		iput(&inode->vfs_inode);
+ 	return ret;
+ }
+ 
+@@ -1670,12 +1662,13 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+ {
+ 	int ret;
+ 	struct btrfs_key key;
+-	struct inode *inode;
+ 
+ 	key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
+ 	key.type = BTRFS_ORPHAN_ITEM_KEY;
+ 	key.offset = (u64)-1;
+ 	while (1) {
++		struct btrfs_inode *inode;
++
+ 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ 		if (ret < 0)
+ 			break;
+@@ -1697,14 +1690,14 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+ 			break;
+ 
+ 		btrfs_release_path(path);
+-		inode = read_one_inode(root, key.offset);
+-		if (!inode) {
+-			ret = -EIO;
++		inode = btrfs_iget_logging(key.offset, root);
++		if (IS_ERR(inode)) {
++			ret = PTR_ERR(inode);
+ 			break;
+ 		}
+ 
+-		ret = fixup_inode_link_count(trans, inode);
+-		iput(inode);
++		ret = fixup_inode_link_count(trans, &inode->vfs_inode);
++		iput(&inode->vfs_inode);
+ 		if (ret)
+ 			break;
+ 
+@@ -1732,12 +1725,14 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
+ {
+ 	struct btrfs_key key;
+ 	int ret = 0;
+-	struct inode *inode;
++	struct btrfs_inode *inode;
++	struct inode *vfs_inode;
+ 
+-	inode = read_one_inode(root, objectid);
+-	if (!inode)
+-		return -EIO;
++	inode = btrfs_iget_logging(objectid, root);
++	if (IS_ERR(inode))
++		return PTR_ERR(inode);
+ 
++	vfs_inode = &inode->vfs_inode;
+ 	key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
+ 	key.type = BTRFS_ORPHAN_ITEM_KEY;
+ 	key.offset = objectid;
+@@ -1746,15 +1741,15 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
+ 
+ 	btrfs_release_path(path);
+ 	if (ret == 0) {
+-		if (!inode->i_nlink)
+-			set_nlink(inode, 1);
++		if (!vfs_inode->i_nlink)
++			set_nlink(vfs_inode, 1);
+ 		else
+-			inc_nlink(inode);
+-		ret = btrfs_update_inode(trans, BTRFS_I(inode));
++			inc_nlink(vfs_inode);
++		ret = btrfs_update_inode(trans, inode);
+ 	} else if (ret == -EEXIST) {
+ 		ret = 0;
+ 	}
+-	iput(inode);
++	iput(vfs_inode);
+ 
+ 	return ret;
+ }
+@@ -1770,27 +1765,26 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
+ 				    const struct fscrypt_str *name,
+ 				    struct btrfs_key *location)
+ {
+-	struct inode *inode;
+-	struct inode *dir;
++	struct btrfs_inode *inode;
++	struct btrfs_inode *dir;
+ 	int ret;
+ 
+-	inode = read_one_inode(root, location->objectid);
+-	if (!inode)
+-		return -ENOENT;
++	inode = btrfs_iget_logging(location->objectid, root);
++	if (IS_ERR(inode))
++		return PTR_ERR(inode);
+ 
+-	dir = read_one_inode(root, dirid);
+-	if (!dir) {
+-		iput(inode);
+-		return -EIO;
++	dir = btrfs_iget_logging(dirid, root);
++	if (IS_ERR(dir)) {
++		iput(&inode->vfs_inode);
++		return PTR_ERR(dir);
+ 	}
+ 
+-	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
+-			     1, index);
++	ret = btrfs_add_link(trans, dir, inode, name, 1, index);
+ 
+ 	/* FIXME, put inode into FIXUP list */
+ 
+-	iput(inode);
+-	iput(dir);
++	iput(&inode->vfs_inode);
++	iput(&dir->vfs_inode);
+ 	return ret;
+ }
+ 
+@@ -1852,16 +1846,16 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 	bool index_dst_matches = false;
+ 	struct btrfs_key log_key;
+ 	struct btrfs_key search_key;
+-	struct inode *dir;
++	struct btrfs_inode *dir;
+ 	u8 log_flags;
+ 	bool exists;
+ 	int ret;
+ 	bool update_size = true;
+ 	bool name_added = false;
+ 
+-	dir = read_one_inode(root, key->objectid);
+-	if (!dir)
+-		return -EIO;
++	dir = btrfs_iget_logging(key->objectid, root);
++	if (IS_ERR(dir))
++		return PTR_ERR(dir);
+ 
+ 	ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
+ 	if (ret)
+@@ -1882,9 +1876,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 		ret = PTR_ERR(dir_dst_di);
+ 		goto out;
+ 	} else if (dir_dst_di) {
+-		ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path,
+-						   dir_dst_di, &log_key,
+-						   log_flags, exists);
++		ret = delete_conflicting_dir_entry(trans, dir, path, dir_dst_di,
++						   &log_key, log_flags, exists);
+ 		if (ret < 0)
+ 			goto out;
+ 		dir_dst_matches = (ret == 1);
+@@ -1899,9 +1892,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 		ret = PTR_ERR(index_dst_di);
+ 		goto out;
+ 	} else if (index_dst_di) {
+-		ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path,
+-						   index_dst_di, &log_key,
+-						   log_flags, exists);
++		ret = delete_conflicting_dir_entry(trans, dir, path, index_dst_di,
++						   &log_key, log_flags, exists);
+ 		if (ret < 0)
+ 			goto out;
+ 		index_dst_matches = (ret == 1);
+@@ -1956,11 +1948,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 
+ out:
+ 	if (!ret && update_size) {
+-		btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2);
+-		ret = btrfs_update_inode(trans, BTRFS_I(dir));
++		btrfs_i_size_write(dir, dir->vfs_inode.i_size + name.len * 2);
++		ret = btrfs_update_inode(trans, dir);
+ 	}
+ 	kfree(name.name);
+-	iput(dir);
++	iput(&dir->vfs_inode);
+ 	if (!ret && name_added)
+ 		ret = 1;
+ 	return ret;
+@@ -2117,16 +2109,16 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ 				      struct btrfs_root *log,
+ 				      struct btrfs_path *path,
+ 				      struct btrfs_path *log_path,
+-				      struct inode *dir,
++				      struct btrfs_inode *dir,
+ 				      struct btrfs_key *dir_key)
+ {
+-	struct btrfs_root *root = BTRFS_I(dir)->root;
++	struct btrfs_root *root = dir->root;
+ 	int ret;
+ 	struct extent_buffer *eb;
+ 	int slot;
+ 	struct btrfs_dir_item *di;
+ 	struct fscrypt_str name = { 0 };
+-	struct inode *inode = NULL;
++	struct btrfs_inode *inode = NULL;
+ 	struct btrfs_key location;
+ 
+ 	/*
+@@ -2163,9 +2155,10 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ 	btrfs_dir_item_key_to_cpu(eb, di, &location);
+ 	btrfs_release_path(path);
+ 	btrfs_release_path(log_path);
+-	inode = read_one_inode(root, location.objectid);
+-	if (!inode) {
+-		ret = -EIO;
++	inode = btrfs_iget_logging(location.objectid, root);
++	if (IS_ERR(inode)) {
++		ret = PTR_ERR(inode);
++		inode = NULL;
+ 		goto out;
+ 	}
+ 
+@@ -2173,9 +2166,8 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ 	if (ret)
+ 		goto out;
+ 
+-	inc_nlink(inode);
+-	ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode),
+-					  &name);
++	inc_nlink(&inode->vfs_inode);
++	ret = unlink_inode_for_log_replay(trans, dir, inode, &name);
+ 	/*
+ 	 * Unlike dir item keys, dir index keys can only have one name (entry) in
+ 	 * them, as there are no key collisions since each key has a unique offset
+@@ -2185,7 +2177,8 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ 	btrfs_release_path(path);
+ 	btrfs_release_path(log_path);
+ 	kfree(name.name);
+-	iput(inode);
++	if (inode)
++		iput(&inode->vfs_inode);
+ 	return ret;
+ }
+ 
+@@ -2309,7 +2302,7 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
+ 	struct btrfs_key dir_key;
+ 	struct btrfs_key found_key;
+ 	struct btrfs_path *log_path;
+-	struct inode *dir;
++	struct btrfs_inode *dir;
+ 
+ 	dir_key.objectid = dirid;
+ 	dir_key.type = BTRFS_DIR_INDEX_KEY;
+@@ -2317,14 +2310,17 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
+ 	if (!log_path)
+ 		return -ENOMEM;
+ 
+-	dir = read_one_inode(root, dirid);
+-	/* it isn't an error if the inode isn't there, that can happen
+-	 * because we replay the deletes before we copy in the inode item
+-	 * from the log
++	dir = btrfs_iget_logging(dirid, root);
++	/*
++	 * It isn't an error if the inode isn't there, that can happen because
++	 * we replay the deletes before we copy in the inode item from the log.
+ 	 */
+-	if (!dir) {
++	if (IS_ERR(dir)) {
+ 		btrfs_free_path(log_path);
+-		return 0;
++		ret = PTR_ERR(dir);
++		if (ret == -ENOENT)
++			ret = 0;
++		return ret;
+ 	}
+ 
+ 	range_start = 0;
+@@ -2386,7 +2382,7 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
+ out:
+ 	btrfs_release_path(path);
+ 	btrfs_free_path(log_path);
+-	iput(dir);
++	iput(&dir->vfs_inode);
+ 	return ret;
+ }
+ 
+@@ -2480,30 +2476,28 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+ 			 */
+ 			if (S_ISREG(mode)) {
+ 				struct btrfs_drop_extents_args drop_args = { 0 };
+-				struct inode *inode;
++				struct btrfs_inode *inode;
+ 				u64 from;
+ 
+-				inode = read_one_inode(root, key.objectid);
+-				if (!inode) {
+-					ret = -EIO;
++				inode = btrfs_iget_logging(key.objectid, root);
++				if (IS_ERR(inode)) {
++					ret = PTR_ERR(inode);
+ 					break;
+ 				}
+-				from = ALIGN(i_size_read(inode),
++				from = ALIGN(i_size_read(&inode->vfs_inode),
+ 					     root->fs_info->sectorsize);
+ 				drop_args.start = from;
+ 				drop_args.end = (u64)-1;
+ 				drop_args.drop_cache = true;
+-				ret = btrfs_drop_extents(wc->trans, root,
+-							 BTRFS_I(inode),
++				ret = btrfs_drop_extents(wc->trans, root, inode,
+ 							 &drop_args);
+ 				if (!ret) {
+-					inode_sub_bytes(inode,
++					inode_sub_bytes(&inode->vfs_inode,
+ 							drop_args.bytes_found);
+ 					/* Update the inode's nbytes. */
+-					ret = btrfs_update_inode(wc->trans,
+-								 BTRFS_I(inode));
++					ret = btrfs_update_inode(wc->trans, inode);
+ 				}
+-				iput(inode);
++				iput(&inode->vfs_inode);
+ 				if (ret)
+ 					break;
+ 			}
+@@ -5485,7 +5479,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ 	ihold(&curr_inode->vfs_inode);
+ 
+ 	while (true) {
+-		struct inode *vfs_inode;
+ 		struct btrfs_key key;
+ 		struct btrfs_key found_key;
+ 		u64 next_index;
+@@ -5501,7 +5494,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ 			struct extent_buffer *leaf = path->nodes[0];
+ 			struct btrfs_dir_item *di;
+ 			struct btrfs_key di_key;
+-			struct inode *di_inode;
++			struct btrfs_inode *di_inode;
+ 			int log_mode = LOG_INODE_EXISTS;
+ 			int type;
+ 
+@@ -5528,17 +5521,16 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ 				goto out;
+ 			}
+ 
+-			if (!need_log_inode(trans, BTRFS_I(di_inode))) {
+-				btrfs_add_delayed_iput(BTRFS_I(di_inode));
++			if (!need_log_inode(trans, di_inode)) {
++				btrfs_add_delayed_iput(di_inode);
+ 				break;
+ 			}
+ 
+ 			ctx->log_new_dentries = false;
+ 			if (type == BTRFS_FT_DIR)
+ 				log_mode = LOG_INODE_ALL;
+-			ret = btrfs_log_inode(trans, BTRFS_I(di_inode),
+-					      log_mode, ctx);
+-			btrfs_add_delayed_iput(BTRFS_I(di_inode));
++			ret = btrfs_log_inode(trans, di_inode, log_mode, ctx);
++			btrfs_add_delayed_iput(di_inode);
+ 			if (ret)
+ 				goto out;
+ 			if (ctx->log_new_dentries) {
+@@ -5580,14 +5572,13 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ 		kfree(dir_elem);
+ 
+ 		btrfs_add_delayed_iput(curr_inode);
+-		curr_inode = NULL;
+ 
+-		vfs_inode = btrfs_iget_logging(ino, root);
+-		if (IS_ERR(vfs_inode)) {
+-			ret = PTR_ERR(vfs_inode);
++		curr_inode = btrfs_iget_logging(ino, root);
++		if (IS_ERR(curr_inode)) {
++			ret = PTR_ERR(curr_inode);
++			curr_inode = NULL;
+ 			break;
+ 		}
+-		curr_inode = BTRFS_I(vfs_inode);
+ 	}
+ out:
+ 	btrfs_free_path(path);
+@@ -5665,7 +5656,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
+ 				 struct btrfs_log_ctx *ctx)
+ {
+ 	struct btrfs_ino_list *ino_elem;
+-	struct inode *inode;
++	struct btrfs_inode *inode;
+ 
+ 	/*
+ 	 * It's rare to have a lot of conflicting inodes, in practice it is not
+@@ -5756,12 +5747,12 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
+ 	 * inode in LOG_INODE_EXISTS mode and rename operations update the log,
+ 	 * so that the log ends up with the new name and without the old name.
+ 	 */
+-	if (!need_log_inode(trans, BTRFS_I(inode))) {
+-		btrfs_add_delayed_iput(BTRFS_I(inode));
++	if (!need_log_inode(trans, inode)) {
++		btrfs_add_delayed_iput(inode);
+ 		return 0;
+ 	}
+ 
+-	btrfs_add_delayed_iput(BTRFS_I(inode));
++	btrfs_add_delayed_iput(inode);
+ 
+ 	ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
+ 	if (!ino_elem)
+@@ -5797,7 +5788,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ 	 */
+ 	while (!list_empty(&ctx->conflict_inodes)) {
+ 		struct btrfs_ino_list *curr;
+-		struct inode *inode;
++		struct btrfs_inode *inode;
+ 		u64 ino;
+ 		u64 parent;
+ 
+@@ -5833,9 +5824,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ 			 * dir index key range logged for the directory. So we
+ 			 * must make sure the deletion is recorded.
+ 			 */
+-			ret = btrfs_log_inode(trans, BTRFS_I(inode),
+-					      LOG_INODE_ALL, ctx);
+-			btrfs_add_delayed_iput(BTRFS_I(inode));
++			ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx);
++			btrfs_add_delayed_iput(inode);
+ 			if (ret)
+ 				break;
+ 			continue;
+@@ -5851,8 +5841,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ 		 * it again because if some other task logged the inode after
+ 		 * that, we can avoid doing it again.
+ 		 */
+-		if (!need_log_inode(trans, BTRFS_I(inode))) {
+-			btrfs_add_delayed_iput(BTRFS_I(inode));
++		if (!need_log_inode(trans, inode)) {
++			btrfs_add_delayed_iput(inode);
+ 			continue;
+ 		}
+ 
+@@ -5863,8 +5853,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ 		 * well because during a rename we pin the log and update the
+ 		 * log with the new name before we unpin it.
+ 		 */
+-		ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx);
+-		btrfs_add_delayed_iput(BTRFS_I(inode));
++		ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx);
++		btrfs_add_delayed_iput(inode);
+ 		if (ret)
+ 			break;
+ 	}
+@@ -6356,7 +6346,7 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
+ 
+ 	list_for_each_entry(item, delayed_ins_list, log_list) {
+ 		struct btrfs_dir_item *dir_item;
+-		struct inode *di_inode;
++		struct btrfs_inode *di_inode;
+ 		struct btrfs_key key;
+ 		int log_mode = LOG_INODE_EXISTS;
+ 
+@@ -6372,8 +6362,8 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
+ 			break;
+ 		}
+ 
+-		if (!need_log_inode(trans, BTRFS_I(di_inode))) {
+-			btrfs_add_delayed_iput(BTRFS_I(di_inode));
++		if (!need_log_inode(trans, di_inode)) {
++			btrfs_add_delayed_iput(di_inode);
+ 			continue;
+ 		}
+ 
+@@ -6381,12 +6371,12 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
+ 			log_mode = LOG_INODE_ALL;
+ 
+ 		ctx->log_new_dentries = false;
+-		ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx);
++		ret = btrfs_log_inode(trans, di_inode, log_mode, ctx);
+ 
+ 		if (!ret && ctx->log_new_dentries)
+-			ret = log_new_dir_dentries(trans, BTRFS_I(di_inode), ctx);
++			ret = log_new_dir_dentries(trans, di_inode, ctx);
+ 
+-		btrfs_add_delayed_iput(BTRFS_I(di_inode));
++		btrfs_add_delayed_iput(di_inode);
+ 
+ 		if (ret)
+ 			break;
+@@ -6794,7 +6784,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ 		ptr = btrfs_item_ptr_offset(leaf, slot);
+ 		while (cur_offset < item_size) {
+ 			struct btrfs_key inode_key;
+-			struct inode *dir_inode;
++			struct btrfs_inode *dir_inode;
+ 
+ 			inode_key.type = BTRFS_INODE_ITEM_KEY;
+ 			inode_key.offset = 0;
+@@ -6843,18 +6833,16 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ 				goto out;
+ 			}
+ 
+-			if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
+-				btrfs_add_delayed_iput(BTRFS_I(dir_inode));
++			if (!need_log_inode(trans, dir_inode)) {
++				btrfs_add_delayed_iput(dir_inode);
+ 				continue;
+ 			}
+ 
+ 			ctx->log_new_dentries = false;
+-			ret = btrfs_log_inode(trans, BTRFS_I(dir_inode),
+-					      LOG_INODE_ALL, ctx);
++			ret = btrfs_log_inode(trans, dir_inode, LOG_INODE_ALL, ctx);
+ 			if (!ret && ctx->log_new_dentries)
+-				ret = log_new_dir_dentries(trans,
+-						   BTRFS_I(dir_inode), ctx);
+-			btrfs_add_delayed_iput(BTRFS_I(dir_inode));
++				ret = log_new_dir_dentries(trans, dir_inode, ctx);
++			btrfs_add_delayed_iput(dir_inode);
+ 			if (ret)
+ 				goto out;
+ 		}
+@@ -6879,7 +6867,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
+ 		struct extent_buffer *leaf;
+ 		int slot;
+ 		struct btrfs_key search_key;
+-		struct inode *inode;
++		struct btrfs_inode *inode;
+ 		u64 ino;
+ 		int ret = 0;
+ 
+@@ -6894,11 +6882,10 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
+ 		if (IS_ERR(inode))
+ 			return PTR_ERR(inode);
+ 
+-		if (BTRFS_I(inode)->generation >= trans->transid &&
+-		    need_log_inode(trans, BTRFS_I(inode)))
+-			ret = btrfs_log_inode(trans, BTRFS_I(inode),
+-					      LOG_INODE_EXISTS, ctx);
+-		btrfs_add_delayed_iput(BTRFS_I(inode));
++		if (inode->generation >= trans->transid &&
++		    need_log_inode(trans, inode))
++			ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx);
++		btrfs_add_delayed_iput(inode);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -7476,6 +7463,8 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
+  * full log sync.
+  * Also we don't need to worry with renames, since btrfs_rename() marks the log
+  * for full commit when renaming a subvolume.
++ *
++ * Must be called before creating the subvolume entry in its parent directory.
+  */
+ void btrfs_record_new_subvolume(const struct btrfs_trans_handle *trans,
+ 				struct btrfs_inode *dir)
+diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
+index a90d7d64973905..60d2cf26e837e2 100644
+--- a/fs/erofs/xattr.c
++++ b/fs/erofs/xattr.c
+@@ -407,7 +407,7 @@ int erofs_getxattr(struct inode *inode, int index, const char *name,
+ 	}
+ 
+ 	it.index = index;
+-	it.name = (struct qstr)QSTR_INIT(name, strlen(name));
++	it.name = QSTR(name);
+ 	if (it.name.len > EROFS_NAME_LEN)
+ 		return -ERANGE;
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 62c7fd1168a15a..654f672639b3c7 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -3986,7 +3986,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
+ 
+ 		if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
+ 				nr_pblocks % blks_per_sec ||
+-				!f2fs_valid_pinned_area(sbi, pblock)) {
++				f2fs_is_sequential_zone_area(sbi, pblock)) {
+ 			bool last_extent = false;
+ 
+ 			not_aligned++;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 61b715cc2e231b..a435550b2839b1 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1762,6 +1762,7 @@ struct f2fs_sb_info {
+ 	unsigned int dirty_device;		/* for checkpoint data flush */
+ 	spinlock_t dev_lock;			/* protect dirty_device */
+ 	bool aligned_blksize;			/* all devices has the same logical blksize */
++	unsigned int first_seq_zone_segno;	/* first segno in sequential zone */
+ 
+ 	/* For write statistics */
+ 	u64 sectors_written_start;
+@@ -4556,12 +4557,16 @@ F2FS_FEATURE_FUNCS(compression, COMPRESSION);
+ F2FS_FEATURE_FUNCS(readonly, RO);
+ 
+ #ifdef CONFIG_BLK_DEV_ZONED
+-static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
+-				    block_t blkaddr)
++static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi,
++							unsigned int zone)
+ {
+-	unsigned int zno = blkaddr / sbi->blocks_per_blkz;
++	return test_bit(zone, FDEV(devi).blkz_seq);
++}
+ 
+-	return test_bit(zno, FDEV(devi).blkz_seq);
++static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
++								block_t blkaddr)
++{
++	return f2fs_zone_is_seq(sbi, devi, blkaddr / sbi->blocks_per_blkz);
+ }
+ #endif
+ 
+@@ -4633,15 +4638,31 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
+ 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
+ }
+ 
+-static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi,
++static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi,
+ 					  block_t blkaddr)
+ {
+ 	if (f2fs_sb_has_blkzoned(sbi)) {
++#ifdef CONFIG_BLK_DEV_ZONED
+ 		int devi = f2fs_target_device_index(sbi, blkaddr);
+ 
+-		return !bdev_is_zoned(FDEV(devi).bdev);
++		if (!bdev_is_zoned(FDEV(devi).bdev))
++			return false;
++
++		if (f2fs_is_multi_device(sbi)) {
++			if (blkaddr < FDEV(devi).start_blk ||
++				blkaddr > FDEV(devi).end_blk) {
++				f2fs_err(sbi, "Invalid block %x", blkaddr);
++				return false;
++			}
++			blkaddr -= FDEV(devi).start_blk;
++		}
++
++		return f2fs_blkz_is_seq(sbi, devi, blkaddr);
++#else
++		return false;
++#endif
+ 	}
+-	return true;
++	return false;
+ }
+ 
+ static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 02f438cd6bfaf6..d9037e74631c0a 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1828,7 +1828,8 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
+ 
+ 		map.m_len = sec_blks;
+ next_alloc:
+-		if (has_not_enough_free_secs(sbi, 0,
++		if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ?
++			ZONED_PIN_SEC_REQUIRED_COUNT :
+ 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
+ 			f2fs_down_write(&sbi->gc_lock);
+ 			stat_inc_gc_call_count(sbi, FOREGROUND);
+diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
+index 2914b678bf8fb9..5c1eaf55e1277b 100644
+--- a/fs/f2fs/gc.h
++++ b/fs/f2fs/gc.h
+@@ -35,6 +35,7 @@
+ #define LIMIT_BOOST_ZONED_GC	25 /* percentage over total user space of boosted gc for zoned devices */
+ #define DEF_MIGRATION_WINDOW_GRANULARITY_ZONED	3
+ #define BOOST_GC_MULTIPLE	5
++#define ZONED_PIN_SEC_REQUIRED_COUNT	1
+ 
+ #define DEF_GC_FAILED_PINNED_FILES	2048
+ #define MAX_GC_FAILED_PINNED_FILES	USHRT_MAX
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 449c0acbfabc03..e48b5e2efea281 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2719,7 +2719,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
+ 		if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
+ 			segno = 0;
+ 		else
+-			segno = max(first_zoned_segno(sbi), *newseg);
++			segno = max(sbi->first_seq_zone_segno, *newseg);
+ 		hint = GET_SEC_FROM_SEG(sbi, segno);
+ 	}
+ #endif
+@@ -2731,7 +2731,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
+ 	if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
+ 		/* Write only to sequential zones */
+ 		if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
+-			hint = GET_SEC_FROM_SEG(sbi, first_zoned_segno(sbi));
++			hint = GET_SEC_FROM_SEG(sbi, sbi->first_seq_zone_segno);
+ 			secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+ 		} else
+ 			secno = find_first_zero_bit(free_i->free_secmap,
+@@ -2784,9 +2784,9 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
+ 		goto out_unlock;
+ 	}
+ 
+-	/* no free section in conventional zone */
++	/* no free section in conventional device or conventional zone */
+ 	if (new_sec && pinning &&
+-		!f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) {
++		f2fs_is_sequential_zone_area(sbi, START_BLOCK(sbi, segno))) {
+ 		ret = -EAGAIN;
+ 		goto out_unlock;
+ 	}
+@@ -3250,7 +3250,8 @@ int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
+ 
+ 	if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
+ 		f2fs_down_write(&sbi->gc_lock);
+-		err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1);
++		err = f2fs_gc_range(sbi, 0, sbi->first_seq_zone_segno - 1,
++				true, ZONED_PIN_SEC_REQUIRED_COUNT);
+ 		f2fs_up_write(&sbi->gc_lock);
+ 
+ 		gc_required = false;
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 05a342933f98f6..52bb1a28193573 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -992,13 +992,3 @@ static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
+ 	dcc->discard_wake = true;
+ 	wake_up_interruptible_all(&dcc->discard_wait_queue);
+ }
+-
+-static inline unsigned int first_zoned_segno(struct f2fs_sb_info *sbi)
+-{
+-	int devi;
+-
+-	for (devi = 0; devi < sbi->s_ndevs; devi++)
+-		if (bdev_is_zoned(FDEV(devi).bdev))
+-			return GET_SEGNO(sbi, FDEV(devi).start_blk);
+-	return 0;
+-}
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index f0e83ea56e38c4..3f2c6fa3623ba6 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -4260,6 +4260,37 @@ static void f2fs_record_error_work(struct work_struct *work)
+ 	f2fs_record_stop_reason(sbi);
+ }
+ 
++static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi)
++{
++#ifdef CONFIG_BLK_DEV_ZONED
++	unsigned int zoneno, total_zones;
++	int devi;
++
++	if (!f2fs_sb_has_blkzoned(sbi))
++		return NULL_SEGNO;
++
++	for (devi = 0; devi < sbi->s_ndevs; devi++) {
++		if (!bdev_is_zoned(FDEV(devi).bdev))
++			continue;
++
++		total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments);
++
++		for (zoneno = 0; zoneno < total_zones; zoneno++) {
++			unsigned int segs, blks;
++
++			if (!f2fs_zone_is_seq(sbi, devi, zoneno))
++				continue;
++
++			segs = GET_SEG_FROM_SEC(sbi,
++					zoneno * sbi->secs_per_zone);
++			blks = SEGS_TO_BLKS(sbi, segs);
++			return GET_SEGNO(sbi, FDEV(devi).start_blk + blks);
++		}
++	}
++#endif
++	return NULL_SEGNO;
++}
++
+ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+ {
+ 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+@@ -4294,6 +4325,14 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+ #endif
+ 
+ 	for (i = 0; i < max_devices; i++) {
++		if (max_devices == 1) {
++			FDEV(i).total_segments =
++				le32_to_cpu(raw_super->segment_count_main);
++			FDEV(i).start_blk = 0;
++			FDEV(i).end_blk = FDEV(i).total_segments *
++						BLKS_PER_SEG(sbi);
++		}
++
+ 		if (i == 0)
+ 			FDEV(0).bdev_file = sbi->sb->s_bdev_file;
+ 		else if (!RDEV(i).path[0])
+@@ -4660,6 +4699,9 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+ 	/* For write statistics */
+ 	sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
+ 
++	/* get segno of first zoned block device */
++	sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi);
++
+ 	/* Read accumulated write IO statistics if exists */
+ 	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+ 	if (__exist_node_summaries(sbi))
+diff --git a/fs/file_table.c b/fs/file_table.c
+index 18735dc8269a10..cf3422edf737ca 100644
+--- a/fs/file_table.c
++++ b/fs/file_table.c
+@@ -332,9 +332,7 @@ static struct file *alloc_file(const struct path *path, int flags,
+ static inline int alloc_path_pseudo(const char *name, struct inode *inode,
+ 				    struct vfsmount *mnt, struct path *path)
+ {
+-	struct qstr this = QSTR_INIT(name, strlen(name));
+-
+-	path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
++	path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name));
+ 	if (!path->dentry)
+ 		return -ENOMEM;
+ 	path->mnt = mntget(mnt);
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index 68fc8af14700d3..eb4270e82ef8ee 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -37,27 +37,6 @@
+ #include "aops.h"
+ 
+ 
+-void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
+-			     size_t from, size_t len)
+-{
+-	struct buffer_head *head = folio_buffers(folio);
+-	unsigned int bsize = head->b_size;
+-	struct buffer_head *bh;
+-	size_t to = from + len;
+-	size_t start, end;
+-
+-	for (bh = head, start = 0; bh != head || !start;
+-	     bh = bh->b_this_page, start = end) {
+-		end = start + bsize;
+-		if (end <= from)
+-			continue;
+-		if (start >= to)
+-			break;
+-		set_buffer_uptodate(bh);
+-		gfs2_trans_add_data(ip->i_gl, bh);
+-	}
+-}
+-
+ /**
+  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
+  * @inode: The inode
+@@ -133,11 +112,42 @@ static int __gfs2_jdata_write_folio(struct folio *folio,
+ 					inode->i_sb->s_blocksize,
+ 					BIT(BH_Dirty)|BIT(BH_Uptodate));
+ 		}
+-		gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
++		gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio));
+ 	}
+ 	return gfs2_write_jdata_folio(folio, wbc);
+ }
+ 
++/**
++ * gfs2_jdata_writeback - Write jdata folios to the log
++ * @mapping: The mapping to write
++ * @wbc: The writeback control
++ *
++ * Returns: errno
++ */
++int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc)
++{
++	struct inode *inode = mapping->host;
++	struct gfs2_inode *ip = GFS2_I(inode);
++	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
++	struct folio *folio = NULL;
++	int error;
++
++	BUG_ON(current->journal_info);
++	if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
++		return 0;
++
++	while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
++		if (folio_test_checked(folio)) {
++			folio_redirty_for_writepage(wbc, folio);
++			folio_unlock(folio);
++			continue;
++		}
++		error = __gfs2_jdata_write_folio(folio, wbc);
++	}
++
++	return error;
++}
++
+ /**
+  * gfs2_writepages - Write a bunch of dirty pages back to disk
+  * @mapping: The mapping to write
+diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h
+index a10c4334d24893..bf002522a78220 100644
+--- a/fs/gfs2/aops.h
++++ b/fs/gfs2/aops.h
+@@ -9,7 +9,6 @@
+ #include "incore.h"
+ 
+ void adjust_fs_space(struct inode *inode);
+-void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
+-			     size_t from, size_t len);
++int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc);
+ 
+ #endif /* __AOPS_DOT_H__ */
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 1795c4e8dbf66a..28ad07b0034844 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -988,7 +988,8 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
+ 	struct gfs2_sbd *sdp = GFS2_SB(inode);
+ 
+ 	if (!gfs2_is_stuffed(ip))
+-		gfs2_trans_add_databufs(ip, folio, offset_in_folio(folio, pos),
++		gfs2_trans_add_databufs(ip->i_gl, folio,
++					offset_in_folio(folio, pos),
+ 					copied);
+ 
+ 	folio_unlock(folio);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index aecce4bb5e1a9c..161fc76ed5b0ed 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -807,6 +807,7 @@ __acquires(&gl->gl_lockref.lock)
+ 	}
+ 
+ 	if (ls->ls_ops->lm_lock) {
++		set_bit(GLF_PENDING_REPLY, &gl->gl_flags);
+ 		spin_unlock(&gl->gl_lockref.lock);
+ 		ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
+ 		spin_lock(&gl->gl_lockref.lock);
+@@ -825,6 +826,7 @@ __acquires(&gl->gl_lockref.lock)
+ 			/* The operation will be completed asynchronously. */
+ 			return;
+ 		}
++		clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
+ 	}
+ 
+ 	/* Complete the operation now. */
+@@ -985,16 +987,22 @@ static bool gfs2_try_evict(struct gfs2_glock *gl)
+ 		ip = NULL;
+ 	spin_unlock(&gl->gl_lockref.lock);
+ 	if (ip) {
+-		gl->gl_no_formal_ino = ip->i_no_formal_ino;
+-		set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
++		wait_on_inode(&ip->i_inode);
++		if (is_bad_inode(&ip->i_inode)) {
++			iput(&ip->i_inode);
++			ip = NULL;
++		}
++	}
++	if (ip) {
++		set_bit(GLF_DEFER_DELETE, &gl->gl_flags);
+ 		d_prune_aliases(&ip->i_inode);
+ 		iput(&ip->i_inode);
++		clear_bit(GLF_DEFER_DELETE, &gl->gl_flags);
+ 
+ 		/* If the inode was evicted, gl->gl_object will now be NULL. */
+ 		spin_lock(&gl->gl_lockref.lock);
+ 		ip = gl->gl_object;
+ 		if (ip) {
+-			clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
+ 			if (!igrab(&ip->i_inode))
+ 				ip = NULL;
+ 		}
+@@ -1954,6 +1962,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
+ 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
+ 
+ 	spin_lock(&gl->gl_lockref.lock);
++	clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
+ 	gl->gl_reply = ret;
+ 
+ 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
+@@ -2354,6 +2363,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
+ 		*p++ = 'f';
+ 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
+ 		*p++ = 'i';
++	if (test_bit(GLF_PENDING_REPLY, gflags))
++		*p++ = 'R';
+ 	if (test_bit(GLF_HAVE_REPLY, gflags))
+ 		*p++ = 'r';
+ 	if (test_bit(GLF_INITIAL, gflags))
+@@ -2378,6 +2389,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
+ 		*p++ = 'e';
+ 	if (test_bit(GLF_VERIFY_DELETE, gflags))
+ 		*p++ = 'E';
++	if (test_bit(GLF_DEFER_DELETE, gflags))
++		*p++ = 's';
+ 	*p = 0;
+ 	return buf;
+ }
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 72a0601ce65e2c..4b6b23c638e296 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -494,11 +494,18 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
+ static int inode_go_instantiate(struct gfs2_glock *gl)
+ {
+ 	struct gfs2_inode *ip = gl->gl_object;
++	struct gfs2_glock *io_gl;
++	int error;
+ 
+ 	if (!ip) /* no inode to populate - read it in later */
+ 		return 0;
+ 
+-	return gfs2_inode_refresh(ip);
++	error = gfs2_inode_refresh(ip);
++	if (error)
++		return error;
++	io_gl = ip->i_iopen_gh.gh_gl;
++	io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
++	return 0;
+ }
+ 
+ static int inode_go_held(struct gfs2_holder *gh)
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index e5535d7b465925..142f61228d15eb 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -330,6 +330,8 @@ enum {
+ 	GLF_UNLOCKED			= 16, /* Wait for glock to be unlocked */
+ 	GLF_TRY_TO_EVICT		= 17, /* iopen glocks only */
+ 	GLF_VERIFY_DELETE		= 18, /* iopen glocks only */
++	GLF_PENDING_REPLY		= 19,
++	GLF_DEFER_DELETE		= 20, /* iopen glocks only */
+ };
+ 
+ struct gfs2_glock {
+@@ -376,7 +378,6 @@ enum {
+ 	GIF_SW_PAGED		= 3,
+ 	GIF_FREE_VFS_INODE      = 5,
+ 	GIF_GLOP_PENDING	= 6,
+-	GIF_DEFERRED_DELETE	= 7,
+ };
+ 
+ struct gfs2_inode {
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 3be24285ab01da..0b546024f5ef7e 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -439,6 +439,74 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks)
+ 	return error;
+ }
+ 
++static void gfs2_final_release_pages(struct gfs2_inode *ip)
++{
++	struct inode *inode = &ip->i_inode;
++	struct gfs2_glock *gl = ip->i_gl;
++
++	if (unlikely(!gl)) {
++		/* This can only happen during incomplete inode creation. */
++		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
++		return;
++	}
++
++	truncate_inode_pages(gfs2_glock2aspace(gl), 0);
++	truncate_inode_pages(&inode->i_data, 0);
++
++	if (atomic_read(&gl->gl_revokes) == 0) {
++		clear_bit(GLF_LFLUSH, &gl->gl_flags);
++		clear_bit(GLF_DIRTY, &gl->gl_flags);
++	}
++}
++
++int gfs2_dinode_dealloc(struct gfs2_inode *ip)
++{
++	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
++	struct gfs2_rgrpd *rgd;
++	struct gfs2_holder gh;
++	int error;
++
++	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
++		gfs2_consist_inode(ip);
++		return -EIO;
++	}
++
++	gfs2_rindex_update(sdp);
++
++	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
++	if (error)
++		return error;
++
++	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
++	if (!rgd) {
++		gfs2_consist_inode(ip);
++		error = -EIO;
++		goto out_qs;
++	}
++
++	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
++				   LM_FLAG_NODE_SCOPE, &gh);
++	if (error)
++		goto out_qs;
++
++	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
++				 sdp->sd_jdesc->jd_blocks);
++	if (error)
++		goto out_rg_gunlock;
++
++	gfs2_free_di(rgd, ip);
++
++	gfs2_final_release_pages(ip);
++
++	gfs2_trans_end(sdp);
++
++out_rg_gunlock:
++	gfs2_glock_dq_uninit(&gh);
++out_qs:
++	gfs2_quota_unhold(ip);
++	return error;
++}
++
+ static void gfs2_init_dir(struct buffer_head *dibh,
+ 			  const struct gfs2_inode *parent)
+ {
+@@ -629,10 +697,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	struct gfs2_inode *dip = GFS2_I(dir), *ip;
+ 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
+ 	struct gfs2_glock *io_gl;
+-	int error;
++	int error, dealloc_error;
+ 	u32 aflags = 0;
+ 	unsigned blocks = 1;
+ 	struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
++	bool xattr_initialized = false;
+ 
+ 	if (!name->len || name->len > GFS2_FNAMESIZE)
+ 		return -ENAMETOOLONG;
+@@ -745,12 +814,13 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 
+ 	error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
+ 	if (error)
+-		goto fail_free_inode;
++		goto fail_dealloc_inode;
+ 
+ 	error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ 	if (error)
+-		goto fail_free_inode;
++		goto fail_dealloc_inode;
+ 	gfs2_cancel_delete_work(io_gl);
++	io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
+ 
+ retry:
+ 	error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
+@@ -772,8 +842,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	if (error)
+ 		goto fail_gunlock3;
+ 
+-	if (blocks > 1)
++	if (blocks > 1) {
+ 		gfs2_init_xattr(ip);
++		xattr_initialized = true;
++	}
+ 	init_dinode(dip, ip, symname);
+ 	gfs2_trans_end(sdp);
+ 
+@@ -828,6 +900,18 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+ fail_gunlock2:
+ 	gfs2_glock_put(io_gl);
++fail_dealloc_inode:
++	set_bit(GIF_ALLOC_FAILED, &ip->i_flags);
++	dealloc_error = 0;
++	if (ip->i_eattr)
++		dealloc_error = gfs2_ea_dealloc(ip, xattr_initialized);
++	clear_nlink(inode);
++	mark_inode_dirty(inode);
++	if (!dealloc_error)
++		dealloc_error = gfs2_dinode_dealloc(ip);
++	if (dealloc_error)
++		fs_warn(sdp, "%s: %d\n", __func__, dealloc_error);
++	ip->i_no_addr = 0;
+ fail_free_inode:
+ 	if (ip->i_gl) {
+ 		gfs2_glock_put(ip->i_gl);
+@@ -842,10 +926,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	gfs2_dir_no_add(&da);
+ 	gfs2_glock_dq_uninit(&d_gh);
+ 	if (!IS_ERR_OR_NULL(inode)) {
+-		set_bit(GIF_ALLOC_FAILED, &ip->i_flags);
+-		clear_nlink(inode);
+-		if (ip->i_no_addr)
+-			mark_inode_dirty(inode);
+ 		if (inode->i_state & I_NEW)
+ 			iget_failed(inode);
+ 		else
+diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
+index fd15d1c6b6fb1e..225b9d0038cd09 100644
+--- a/fs/gfs2/inode.h
++++ b/fs/gfs2/inode.h
+@@ -92,6 +92,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
+ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
+ 				  u64 no_formal_ino,
+ 				  unsigned int blktype);
++int gfs2_dinode_dealloc(struct gfs2_inode *ip);
+ 
+ int gfs2_inode_refresh(struct gfs2_inode *ip);
+ 
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index f9c5089783d24c..115c4ac457e90a 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -31,6 +31,7 @@
+ #include "dir.h"
+ #include "trace_gfs2.h"
+ #include "trans.h"
++#include "aops.h"
+ 
+ static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
+ 
+@@ -131,7 +132,11 @@ __acquires(&sdp->sd_ail_lock)
+ 		if (!mapping)
+ 			continue;
+ 		spin_unlock(&sdp->sd_ail_lock);
+-		ret = mapping->a_ops->writepages(mapping, wbc);
++		BUG_ON(GFS2_SB(mapping->host) != sdp);
++		if (gfs2_is_jdata(GFS2_I(mapping->host)))
++			ret = gfs2_jdata_writeback(mapping, wbc);
++		else
++			ret = mapping->a_ops->writepages(mapping, wbc);
+ 		if (need_resched()) {
+ 			blk_finish_plug(plug);
+ 			cond_resched();
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 5ecb857cf74e30..3b1303f97a3bc6 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -44,10 +44,10 @@
+ #include "xattr.h"
+ #include "lops.h"
+ 
+-enum dinode_demise {
+-	SHOULD_DELETE_DINODE,
+-	SHOULD_NOT_DELETE_DINODE,
+-	SHOULD_DEFER_EVICTION,
++enum evict_behavior {
++	EVICT_SHOULD_DELETE,
++	EVICT_SHOULD_SKIP_DELETE,
++	EVICT_SHOULD_DEFER_DELETE,
+ };
+ 
+ /**
+@@ -1175,74 +1175,6 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ 	return 0;
+ }
+ 
+-static void gfs2_final_release_pages(struct gfs2_inode *ip)
+-{
+-	struct inode *inode = &ip->i_inode;
+-	struct gfs2_glock *gl = ip->i_gl;
+-
+-	if (unlikely(!gl)) {
+-		/* This can only happen during incomplete inode creation. */
+-		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
+-		return;
+-	}
+-
+-	truncate_inode_pages(gfs2_glock2aspace(gl), 0);
+-	truncate_inode_pages(&inode->i_data, 0);
+-
+-	if (atomic_read(&gl->gl_revokes) == 0) {
+-		clear_bit(GLF_LFLUSH, &gl->gl_flags);
+-		clear_bit(GLF_DIRTY, &gl->gl_flags);
+-	}
+-}
+-
+-static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
+-{
+-	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+-	struct gfs2_rgrpd *rgd;
+-	struct gfs2_holder gh;
+-	int error;
+-
+-	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
+-		gfs2_consist_inode(ip);
+-		return -EIO;
+-	}
+-
+-	gfs2_rindex_update(sdp);
+-
+-	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
+-	if (error)
+-		return error;
+-
+-	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
+-	if (!rgd) {
+-		gfs2_consist_inode(ip);
+-		error = -EIO;
+-		goto out_qs;
+-	}
+-
+-	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+-				   LM_FLAG_NODE_SCOPE, &gh);
+-	if (error)
+-		goto out_qs;
+-
+-	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
+-				 sdp->sd_jdesc->jd_blocks);
+-	if (error)
+-		goto out_rg_gunlock;
+-
+-	gfs2_free_di(rgd, ip);
+-
+-	gfs2_final_release_pages(ip);
+-
+-	gfs2_trans_end(sdp);
+-
+-out_rg_gunlock:
+-	gfs2_glock_dq_uninit(&gh);
+-out_qs:
+-	gfs2_quota_unhold(ip);
+-	return error;
+-}
+-
+ /**
+  * gfs2_glock_put_eventually
+  * @gl:	The glock to put
+@@ -1315,23 +1247,21 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
+  *
+  * Returns: the fate of the dinode
+  */
+-static enum dinode_demise evict_should_delete(struct inode *inode,
+-					      struct gfs2_holder *gh)
++static enum evict_behavior evict_should_delete(struct inode *inode,
++					       struct gfs2_holder *gh)
+ {
+ 	struct gfs2_inode *ip = GFS2_I(inode);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct gfs2_sbd *sdp = sb->s_fs_info;
+ 	int ret;
+ 
+-	if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags)))
+-		goto should_delete;
+-
+-	if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
+-		return SHOULD_DEFER_EVICTION;
++	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
++	    test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags))
++		return EVICT_SHOULD_DEFER_DELETE;
+ 
+ 	/* Deletes should never happen under memory pressure anymore.  */
+ 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
+-		return SHOULD_DEFER_EVICTION;
++		return EVICT_SHOULD_DEFER_DELETE;
+ 
+ 	/* Must not read inode block until block type has been verified */
+ 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
+@@ -1339,34 +1269,33 @@ static enum dinode_demise evict_should_delete(struct inode *inode,
+ 		glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+ 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+ 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+-		return SHOULD_DEFER_EVICTION;
++		return EVICT_SHOULD_DEFER_DELETE;
+ 	}
+ 
+ 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
+-		return SHOULD_NOT_DELETE_DINODE;
++		return EVICT_SHOULD_SKIP_DELETE;
+ 	ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
+ 	if (ret)
+-		return SHOULD_NOT_DELETE_DINODE;
++		return EVICT_SHOULD_SKIP_DELETE;
+ 
+ 	ret = gfs2_instantiate(gh);
+ 	if (ret)
+-		return SHOULD_NOT_DELETE_DINODE;
++		return EVICT_SHOULD_SKIP_DELETE;
+ 
+ 	/*
+ 	 * The inode may have been recreated in the meantime.
+ 	 */
+ 	if (inode->i_nlink)
+-		return SHOULD_NOT_DELETE_DINODE;
++		return EVICT_SHOULD_SKIP_DELETE;
+ 
+-should_delete:
+ 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
+ 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
+ 		if (!gfs2_upgrade_iopen_glock(inode)) {
+ 			gfs2_holder_uninit(&ip->i_iopen_gh);
+-			return SHOULD_NOT_DELETE_DINODE;
++			return EVICT_SHOULD_SKIP_DELETE;
+ 		}
+ 	}
+-	return SHOULD_DELETE_DINODE;
++	return EVICT_SHOULD_DELETE;
+ }
+ 
+ /**
+@@ -1386,7 +1315,7 @@ static int evict_unlinked_inode(struct inode *inode)
+ 	}
+ 
+ 	if (ip->i_eattr) {
+-		ret = gfs2_ea_dealloc(ip);
++		ret = gfs2_ea_dealloc(ip, true);
+ 		if (ret)
+ 			goto out;
+ 	}
+@@ -1477,6 +1406,7 @@ static void gfs2_evict_inode(struct inode *inode)
+ 	struct gfs2_sbd *sdp = sb->s_fs_info;
+ 	struct gfs2_inode *ip = GFS2_I(inode);
+ 	struct gfs2_holder gh;
++	enum evict_behavior behavior;
+ 	int ret;
+ 
+ 	if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
+@@ -1491,10 +1421,10 @@ static void gfs2_evict_inode(struct inode *inode)
+ 		goto out;
+ 
+ 	gfs2_holder_mark_uninitialized(&gh);
+-	ret = evict_should_delete(inode, &gh);
+-	if (ret == SHOULD_DEFER_EVICTION)
++	behavior = evict_should_delete(inode, &gh);
++	if (behavior == EVICT_SHOULD_DEFER_DELETE)
+ 		goto out;
+-	if (ret == SHOULD_DELETE_DINODE)
++	if (behavior == EVICT_SHOULD_DELETE)
+ 		ret = evict_unlinked_inode(inode);
+ 	else
+ 		ret = evict_linked_inode(inode);
+diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
+index 8eae8d62a41322..43de603ab347e0 100644
+--- a/fs/gfs2/trace_gfs2.h
++++ b/fs/gfs2/trace_gfs2.h
+@@ -53,12 +53,19 @@
+ 	{(1UL << GLF_DIRTY),			"y" },		\
+ 	{(1UL << GLF_LFLUSH),			"f" },		\
+ 	{(1UL << GLF_INVALIDATE_IN_PROGRESS),	"i" },		\
++	{(1UL << GLF_PENDING_REPLY),		"R" },		\
+ 	{(1UL << GLF_HAVE_REPLY),		"r" },		\
+ 	{(1UL << GLF_INITIAL),			"a" },		\
+ 	{(1UL << GLF_HAVE_FROZEN_REPLY),	"F" },		\
+ 	{(1UL << GLF_LRU),			"L" },		\
+ 	{(1UL << GLF_OBJECT),			"o" },		\
+-	{(1UL << GLF_BLOCKING),			"b" })
++	{(1UL << GLF_BLOCKING),			"b" },		\
++	{(1UL << GLF_UNLOCKED),			"x" },		\
++	{(1UL << GLF_INSTANTIATE_NEEDED),	"n" },		\
++	{(1UL << GLF_INSTANTIATE_IN_PROG),	"N" },		\
++	{(1UL << GLF_TRY_TO_EVICT),		"e" },		\
++	{(1UL << GLF_VERIFY_DELETE),		"E" },		\
++	{(1UL << GLF_DEFER_DELETE),		"s" })
+ 
+ #ifndef NUMPTY
+ #define NUMPTY
+diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
+index 192213c7359af1..42cf8c5204db44 100644
+--- a/fs/gfs2/trans.c
++++ b/fs/gfs2/trans.c
+@@ -226,6 +226,27 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
+ 	unlock_buffer(bh);
+ }
+ 
++void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio,
++			     size_t from, size_t len)
++{
++	struct buffer_head *head = folio_buffers(folio);
++	unsigned int bsize = head->b_size;
++	struct buffer_head *bh;
++	size_t to = from + len;
++	size_t start, end;
++
++	for (bh = head, start = 0; bh != head || !start;
++	     bh = bh->b_this_page, start = end) {
++		end = start + bsize;
++		if (end <= from)
++			continue;
++		if (start >= to)
++			break;
++		set_buffer_uptodate(bh);
++		gfs2_trans_add_data(gl, bh);
++	}
++}
++
+ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
+ {
+ 
+diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
+index f8ce5302280d31..790c55f59e6121 100644
+--- a/fs/gfs2/trans.h
++++ b/fs/gfs2/trans.h
+@@ -42,6 +42,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
+ 
+ void gfs2_trans_end(struct gfs2_sbd *sdp);
+ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
++void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio,
++			     size_t from, size_t len);
+ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
+ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
+diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
+index 17ae5070a90e67..df9c93de94c793 100644
+--- a/fs/gfs2/xattr.c
++++ b/fs/gfs2/xattr.c
+@@ -1383,7 +1383,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
+ 	return error;
+ }
+ 
+-static int ea_dealloc_block(struct gfs2_inode *ip)
++static int ea_dealloc_block(struct gfs2_inode *ip, bool initialized)
+ {
+ 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ 	struct gfs2_rgrpd *rgd;
+@@ -1416,7 +1416,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
+ 	ip->i_eattr = 0;
+ 	gfs2_add_inode_blocks(&ip->i_inode, -1);
+ 
+-	if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) {
++	if (initialized) {
+ 		error = gfs2_meta_inode_buffer(ip, &dibh);
+ 		if (!error) {
+ 			gfs2_trans_add_meta(ip->i_gl, dibh);
+@@ -1435,11 +1435,12 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
+ /**
+  * gfs2_ea_dealloc - deallocate the extended attribute fork
+  * @ip: the inode
++ * @initialized: xattrs have been initialized
+  *
+  * Returns: errno
+  */
+ 
+-int gfs2_ea_dealloc(struct gfs2_inode *ip)
++int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized)
+ {
+ 	int error;
+ 
+@@ -1451,7 +1452,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
+ 	if (error)
+ 		return error;
+ 
+-	if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) {
++	if (initialized) {
+ 		error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
+ 		if (error)
+ 			goto out_quota;
+@@ -1463,7 +1464,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
+ 		}
+ 	}
+ 
+-	error = ea_dealloc_block(ip);
++	error = ea_dealloc_block(ip, initialized);
+ 
+ out_quota:
+ 	gfs2_quota_unhold(ip);
+diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
+index eb12eb7e37c194..3c9788e0e13750 100644
+--- a/fs/gfs2/xattr.h
++++ b/fs/gfs2/xattr.h
+@@ -54,7 +54,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name,
+ 		     const void *value, size_t size,
+ 		     int flags, int type);
+ ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
+-int gfs2_ea_dealloc(struct gfs2_inode *ip);
++int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized);
+ 
+ /* Exported to acl.c */
+ 
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 1943c8bd479bf6..2d9d5dfa19b87c 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -928,7 +928,7 @@ static void kernfs_notify_workfn(struct work_struct *work)
+ 		if (!inode)
+ 			continue;
+ 
+-		name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name));
++		name = QSTR(kn->name);
+ 		parent = kernfs_get_parent(kn);
+ 		if (parent) {
+ 			p_inode = ilookup(info->sb, kernfs_ino(parent));
+diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
+index b3910dfcb56d3e..896d1d4219ed9f 100644
+--- a/fs/netfs/buffered_write.c
++++ b/fs/netfs/buffered_write.c
+@@ -64,6 +64,7 @@ static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
+ 		return;
+ 	}
+ 
++	spin_lock(&inode->i_lock);
+ 	i_size_write(inode, pos);
+ #if IS_ENABLED(CONFIG_FSCACHE)
+ 	fscache_update_cookie(ctx->cache, NULL, &pos);
+@@ -77,6 +78,7 @@ static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
+ 					DIV_ROUND_UP(pos, SECTOR_SIZE),
+ 					inode->i_blocks + add);
+ 	}
++	spin_unlock(&inode->i_lock);
+ }
+ 
+ /**
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index 26cf9c94deebb3..8fbfaf71c154cc 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -14,13 +14,17 @@ static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
+ 	struct inode *inode = wreq->inode;
+ 	unsigned long long end = wreq->start + wreq->transferred;
+ 
+-	if (!wreq->error &&
+-	    i_size_read(inode) < end) {
++	if (wreq->error || end <= i_size_read(inode))
++		return;
++
++	spin_lock(&inode->i_lock);
++	if (end > i_size_read(inode)) {
+ 		if (wreq->netfs_ops->update_i_size)
+ 			wreq->netfs_ops->update_i_size(inode, end);
+ 		else
+ 			i_size_write(inode, end);
+ 	}
++	spin_unlock(&inode->i_lock);
+ }
+ 
+ /*
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 412d4da7422701..7cb21da40a0a48 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -176,9 +176,10 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
+ 			if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ 				break;
+ 			if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+-				struct iov_iter source = subreq->io_iter;
++				struct iov_iter source;
+ 
+-				iov_iter_revert(&source, subreq->len - source.count);
++				netfs_reset_iter(subreq);
++				source = subreq->io_iter;
+ 				__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ 				netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ 				netfs_reissue_write(stream, subreq, &source);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 8f7ea4076653db..bf96f7a8900c10 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1104,6 +1104,7 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
+ }
+ 
+ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
++					   u32 op_status,
+ 					   struct nfs4_state *state,
+ 					   struct nfs_client *clp,
+ 					   struct pnfs_layout_segment *lseg,
+@@ -1114,32 +1115,42 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
+ 
+-	switch (task->tk_status) {
+-	case -NFS4ERR_BADSESSION:
+-	case -NFS4ERR_BADSLOT:
+-	case -NFS4ERR_BAD_HIGH_SLOT:
+-	case -NFS4ERR_DEADSESSION:
+-	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+-	case -NFS4ERR_SEQ_FALSE_RETRY:
+-	case -NFS4ERR_SEQ_MISORDERED:
++	switch (op_status) {
++	case NFS4_OK:
++	case NFS4ERR_NXIO:
++		break;
++	case NFSERR_PERM:
++		if (!task->tk_xprt)
++			break;
++		xprt_force_disconnect(task->tk_xprt);
++		goto out_retry;
++	case NFS4ERR_BADSESSION:
++	case NFS4ERR_BADSLOT:
++	case NFS4ERR_BAD_HIGH_SLOT:
++	case NFS4ERR_DEADSESSION:
++	case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
++	case NFS4ERR_SEQ_FALSE_RETRY:
++	case NFS4ERR_SEQ_MISORDERED:
+ 		dprintk("%s ERROR %d, Reset session. Exchangeid "
+ 			"flags 0x%x\n", __func__, task->tk_status,
+ 			clp->cl_exchange_flags);
+ 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
+-		break;
+-	case -NFS4ERR_DELAY:
+-	case -NFS4ERR_GRACE:
++		goto out_retry;
++	case NFS4ERR_DELAY:
++		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
++		fallthrough;
++	case NFS4ERR_GRACE:
+ 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
+-		break;
+-	case -NFS4ERR_RETRY_UNCACHED_REP:
+-		break;
++		goto out_retry;
++	case NFS4ERR_RETRY_UNCACHED_REP:
++		goto out_retry;
+ 	/* Invalidate Layout errors */
+-	case -NFS4ERR_PNFS_NO_LAYOUT:
+-	case -ESTALE:           /* mapped NFS4ERR_STALE */
+-	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
+-	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
+-	case -NFS4ERR_FHEXPIRED:
+-	case -NFS4ERR_WRONG_TYPE:
++	case NFS4ERR_PNFS_NO_LAYOUT:
++	case NFS4ERR_STALE:
++	case NFS4ERR_BADHANDLE:
++	case NFS4ERR_ISDIR:
++	case NFS4ERR_FHEXPIRED:
++	case NFS4ERR_WRONG_TYPE:
+ 		dprintk("%s Invalid layout error %d\n", __func__,
+ 			task->tk_status);
+ 		/*
+@@ -1152,6 +1163,11 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ 		pnfs_destroy_layout(NFS_I(inode));
+ 		rpc_wake_up(&tbl->slot_tbl_waitq);
+ 		goto reset;
++	default:
++		break;
++	}
++
++	switch (task->tk_status) {
+ 	/* RPC connection errors */
+ 	case -ECONNREFUSED:
+ 	case -EHOSTDOWN:
+@@ -1167,26 +1183,56 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+ 				&devid->deviceid);
+ 		rpc_wake_up(&tbl->slot_tbl_waitq);
+-		fallthrough;
++		break;
+ 	default:
+-		if (ff_layout_avoid_mds_available_ds(lseg))
+-			return -NFS4ERR_RESET_TO_PNFS;
+-reset:
+-		dprintk("%s Retry through MDS. Error %d\n", __func__,
+-			task->tk_status);
+-		return -NFS4ERR_RESET_TO_MDS;
++		break;
+ 	}
++
++	if (ff_layout_avoid_mds_available_ds(lseg))
++		return -NFS4ERR_RESET_TO_PNFS;
++reset:
++	dprintk("%s Retry through MDS. Error %d\n", __func__,
++		task->tk_status);
++	return -NFS4ERR_RESET_TO_MDS;
++
++out_retry:
+ 	task->tk_status = 0;
+ 	return -EAGAIN;
+ }
+ 
+ /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
+ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
++					   u32 op_status,
++					   struct nfs_client *clp,
+ 					   struct pnfs_layout_segment *lseg,
+ 					   u32 idx)
+ {
+ 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+ 
++	switch (op_status) {
++	case NFS_OK:
++	case NFSERR_NXIO:
++		break;
++	case NFSERR_PERM:
++		if (!task->tk_xprt)
++			break;
++		xprt_force_disconnect(task->tk_xprt);
++		goto out_retry;
++	case NFSERR_ACCES:
++	case NFSERR_BADHANDLE:
++	case NFSERR_FBIG:
++	case NFSERR_IO:
++	case NFSERR_NOSPC:
++	case NFSERR_ROFS:
++	case NFSERR_STALE:
++		goto out_reset_to_pnfs;
++	case NFSERR_JUKEBOX:
++		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
++		goto out_retry;
++	default:
++		break;
++	}
++
+ 	switch (task->tk_status) {
+ 	/* File access problems. Don't mark the device as unavailable */
+ 	case -EACCES:
+@@ -1205,6 +1251,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+ 				&devid->deviceid);
+ 	}
++out_reset_to_pnfs:
+ 	/* FIXME: Need to prevent infinite looping here. */
+ 	return -NFS4ERR_RESET_TO_PNFS;
+ out_retry:
+@@ -1215,6 +1262,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ }
+ 
+ static int ff_layout_async_handle_error(struct rpc_task *task,
++					u32 op_status,
+ 					struct nfs4_state *state,
+ 					struct nfs_client *clp,
+ 					struct pnfs_layout_segment *lseg,
+@@ -1233,10 +1281,11 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
+ 
+ 	switch (vers) {
+ 	case 3:
+-		return ff_layout_async_handle_error_v3(task, lseg, idx);
+-	case 4:
+-		return ff_layout_async_handle_error_v4(task, state, clp,
++		return ff_layout_async_handle_error_v3(task, op_status, clp,
+ 						       lseg, idx);
++	case 4:
++		return ff_layout_async_handle_error_v4(task, op_status, state,
++						       clp, lseg, idx);
+ 	default:
+ 		/* should never happen */
+ 		WARN_ON_ONCE(1);
+@@ -1289,6 +1338,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+ 	switch (status) {
+ 	case NFS4ERR_DELAY:
+ 	case NFS4ERR_GRACE:
++	case NFS4ERR_PERM:
+ 		break;
+ 	case NFS4ERR_NXIO:
+ 		ff_layout_mark_ds_unreachable(lseg, idx);
+@@ -1321,7 +1371,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
+ 		trace_ff_layout_read_error(hdr);
+ 	}
+ 
+-	err = ff_layout_async_handle_error(task, hdr->args.context->state,
++	err = ff_layout_async_handle_error(task, hdr->res.op_status,
++					   hdr->args.context->state,
+ 					   hdr->ds_clp, hdr->lseg,
+ 					   hdr->pgio_mirror_idx);
+ 
+@@ -1491,7 +1542,8 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
+ 		trace_ff_layout_write_error(hdr);
+ 	}
+ 
+-	err = ff_layout_async_handle_error(task, hdr->args.context->state,
++	err = ff_layout_async_handle_error(task, hdr->res.op_status,
++					   hdr->args.context->state,
+ 					   hdr->ds_clp, hdr->lseg,
+ 					   hdr->pgio_mirror_idx);
+ 
+@@ -1537,8 +1589,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
+ 		trace_ff_layout_commit_error(data);
+ 	}
+ 
+-	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
+-					   data->lseg, data->ds_commit_index);
++	err = ff_layout_async_handle_error(task, data->res.op_status,
++					   NULL, data->ds_clp, data->lseg,
++					   data->ds_commit_index);
+ 
+ 	trace_nfs4_pnfs_commit_ds(data, err);
+ 	switch (err) {
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 16607b24ab9c15..8827cb00f86d52 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -2586,15 +2586,26 @@ EXPORT_SYMBOL_GPL(nfs_net_id);
+ static int nfs_net_init(struct net *net)
+ {
+ 	struct nfs_net *nn = net_generic(net, nfs_net_id);
++	int err;
+ 
+ 	nfs_clients_init(net);
+ 
+ 	if (!rpc_proc_register(net, &nn->rpcstats)) {
+-		nfs_clients_exit(net);
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto err_proc_rpc;
+ 	}
+ 
+-	return nfs_fs_proc_net_init(net);
++	err = nfs_fs_proc_net_init(net);
++	if (err)
++		goto err_proc_nfs;
++
++	return 0;
++
++err_proc_nfs:
++	rpc_proc_unregister(net, "nfs");
++err_proc_rpc:
++	nfs_clients_exit(net);
++	return err;
+ }
+ 
+ static void nfs_net_exit(struct net *net)
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 683e09be25adf3..6b888e9ff394a5 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2051,8 +2051,10 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
+ static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
+ {
+ 	if (atomic_dec_and_test(&lo->plh_outstanding) &&
+-	    test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags))
++	    test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) {
++		smp_mb__after_atomic();
+ 		wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
++	}
+ }
+ 
+ static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index c66655adecb2c9..b74637ae9085a5 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -743,6 +743,7 @@ struct TCP_Server_Info {
+ 	__le32 session_key_id; /* retrieved from negotiate response and send in session setup request */
+ 	struct session_key session_key;
+ 	unsigned long lstrp; /* when we got last response from this server */
++	unsigned long neg_start; /* when negotiate started (jiffies) */
+ 	struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
+ #define	CIFS_NEGFLAVOR_UNENCAP	1	/* wct == 17, but no ext_sec */
+ #define	CIFS_NEGFLAVOR_EXTENDED	2	/* wct == 17, ext_sec bit set */
+@@ -1275,6 +1276,7 @@ struct cifs_tcon {
+ 	bool use_persistent:1; /* use persistent instead of durable handles */
+ 	bool no_lease:1;    /* Do not request leases on files or directories */
+ 	bool use_witness:1; /* use witness protocol */
++	bool dummy:1; /* dummy tcon used for reconnecting channels */
+ 	__le32 capabilities;
+ 	__u32 share_flags;
+ 	__u32 maximal_access;
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 6e938b17875f5c..fee7bc9848a36a 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -136,6 +136,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
+ 			struct smb_hdr *out_buf,
+ 			int *bytes_returned);
+ 
++void smb2_query_server_interfaces(struct work_struct *work);
+ void
+ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ 				      bool all_channels);
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index d6ba55d4720d20..e3d9367eaec373 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -1310,6 +1310,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
+ 		break;
+ 	case MID_REQUEST_SUBMITTED:
+ 	case MID_RETRY_NEEDED:
++		__set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
+ 		rdata->result = -EAGAIN;
+ 		if (server->sign && rdata->got_bytes)
+ 			/* reset bytes number since we can not check a sign */
+@@ -1681,6 +1682,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
+ 		break;
+ 	case MID_REQUEST_SUBMITTED:
+ 	case MID_RETRY_NEEDED:
++		__set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
+ 		result = -EAGAIN;
+ 		break;
+ 	default:
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 9275e0d1e2f640..ebc380b18da737 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -113,7 +113,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
+ 	return rc;
+ }
+ 
+-static void smb2_query_server_interfaces(struct work_struct *work)
++void smb2_query_server_interfaces(struct work_struct *work)
+ {
+ 	int rc;
+ 	int xid;
+@@ -677,12 +677,12 @@ server_unresponsive(struct TCP_Server_Info *server)
+ 	/*
+ 	 * If we're in the process of mounting a share or reconnecting a session
+ 	 * and the server abruptly shut down (e.g. socket wasn't closed, packet
+-	 * had been ACK'ed but no SMB response), don't wait longer than 20s to
+-	 * negotiate protocol.
++	 * had been ACK'ed but no SMB response), don't wait longer than 20s from
++	 * when negotiate actually started.
+ 	 */
+ 	spin_lock(&server->srv_lock);
+ 	if (server->tcpStatus == CifsInNegotiate &&
+-	    time_after(jiffies, server->lstrp + 20 * HZ)) {
++	    time_after(jiffies, server->neg_start + 20 * HZ)) {
+ 		spin_unlock(&server->srv_lock);
+ 		cifs_reconnect(server, false);
+ 		return true;
+@@ -2819,20 +2819,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	tcon->max_cached_dirs = ctx->max_cached_dirs;
+ 	tcon->nodelete = ctx->nodelete;
+ 	tcon->local_lease = ctx->local_lease;
+-	INIT_LIST_HEAD(&tcon->pending_opens);
+ 	tcon->status = TID_GOOD;
+ 
+-	INIT_DELAYED_WORK(&tcon->query_interfaces,
+-			  smb2_query_server_interfaces);
+ 	if (ses->server->dialect >= SMB30_PROT_ID &&
+ 	    (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+ 		/* schedule query interfaces poll */
+ 		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+ 				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
+ 	}
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
+-#endif
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_add(&tcon->tcon_list, &ses->tcon_list);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+@@ -4009,6 +4003,7 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+ 
+ 	server->lstrp = jiffies;
+ 	server->tcpStatus = CifsInNegotiate;
++	server->neg_start = jiffies;
+ 	spin_unlock(&server->srv_lock);
+ 
+ 	rc = server->ops->negotiate(xid, ses, server);
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 5122f3895dfc29..57b6b191293eea 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -148,6 +148,12 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ 	INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+ #endif
++	INIT_LIST_HEAD(&ret_buf->pending_opens);
++	INIT_DELAYED_WORK(&ret_buf->query_interfaces,
++			  smb2_query_server_interfaces);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	INIT_DELAYED_WORK(&ret_buf->dfs_cache_work, dfs_cache_refresh);
++#endif
+ 
+ 	return ret_buf;
+ }
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index c3feb26fcfd03a..7bf3214117a91e 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -263,7 +263,7 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
+ 	/* The Mode field in the response can now include the file type as well */
+ 	fattr->cf_mode = wire_mode_to_posix(le32_to_cpu(info->Mode),
+ 					    fattr->cf_cifsattrs & ATTR_DIRECTORY);
+-	fattr->cf_dtype = S_DT(le32_to_cpu(info->Mode));
++	fattr->cf_dtype = S_DT(fattr->cf_mode);
+ 
+ 	switch (fattr->cf_mode & S_IFMT) {
+ 	case S_IFLNK:
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index c6ae395a46925c..d514f95deb7e76 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -440,9 +440,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		free_xid(xid);
+ 		ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
+ 
+-		/* regardless of rc value, setup polling */
+-		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+-				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
++		if (!tcon->ipc && !tcon->dummy)
++			queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
++					   (SMB_INTERFACE_POLL_INTERVAL * HZ));
+ 
+ 		mutex_unlock(&ses->session_mutex);
+ 
+@@ -4234,10 +4234,8 @@ void smb2_reconnect_server(struct work_struct *work)
+ 		}
+ 		goto done;
+ 	}
+-
+ 	tcon->status = TID_GOOD;
+-	tcon->retry = false;
+-	tcon->need_reconnect = false;
++	tcon->dummy = true;
+ 
+ 	/* now reconnect sessions for necessary channels */
+ 	list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+@@ -4871,6 +4869,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
+ 		break;
+ 	case MID_REQUEST_SUBMITTED:
+ 	case MID_RETRY_NEEDED:
++		__set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
+ 		result = -EAGAIN;
+ 		break;
+ 	case MID_RESPONSE_MALFORMED:
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 50eeb5b86ed70b..fb33458f2fc77b 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -349,7 +349,7 @@ struct bpf_func_state {
+ 
+ #define MAX_CALL_FRAMES 8
+ 
+-/* instruction history flags, used in bpf_jmp_history_entry.flags field */
++/* instruction history flags, used in bpf_insn_hist_entry.flags field */
+ enum {
+ 	/* instruction references stack slot through PTR_TO_STACK register;
+ 	 * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
+@@ -361,18 +361,22 @@ enum {
+ 	INSN_F_SPI_MASK = 0x3f, /* 6 bits */
+ 	INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
+ 
+-	INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
++	INSN_F_STACK_ACCESS = BIT(9),
++
++	INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
++	INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
++	/* total 12 bits are used now. */
+ };
+ 
+ static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
+ static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
+ 
+-struct bpf_jmp_history_entry {
++struct bpf_insn_hist_entry {
+ 	u32 idx;
+ 	/* insn idx can't be bigger than 1 million */
+-	u32 prev_idx : 22;
+-	/* special flags, e.g., whether insn is doing register stack spill/load */
+-	u32 flags : 10;
++	u32 prev_idx : 20;
++	/* special INSN_F_xxx flags */
++	u32 flags : 12;
+ 	/* additional registers that need precision tracking when this
+ 	 * jump is backtracked, vector of six 10-bit records
+ 	 */
+@@ -458,13 +462,14 @@ struct bpf_verifier_state {
+ 	 * See get_loop_entry() for more information.
+ 	 */
+ 	struct bpf_verifier_state *loop_entry;
+-	/* jmp history recorded from first to last.
+-	 * backtracking is using it to go from last to first.
+-	 * For most states jmp_history_cnt is [0-3].
++	/* Sub-range of env->insn_hist[] corresponding to this state's
++	 * instruction history.
++	 * Backtracking is using it to go from last to first.
++	 * For most states instruction history is short, 0-3 instructions.
+ 	 * For loops can go up to ~40.
+ 	 */
+-	struct bpf_jmp_history_entry *jmp_history;
+-	u32 jmp_history_cnt;
++	u32 insn_hist_start;
++	u32 insn_hist_end;
+ 	u32 dfs_depth;
+ 	u32 callback_unroll_depth;
+ 	u32 may_goto_depth;
+@@ -748,7 +753,9 @@ struct bpf_verifier_env {
+ 		int cur_stack;
+ 	} cfg;
+ 	struct backtrack_state bt;
+-	struct bpf_jmp_history_entry *cur_hist_ent;
++	struct bpf_insn_hist_entry *insn_hist;
++	struct bpf_insn_hist_entry *cur_hist_ent;
++	u32 insn_hist_cap;
+ 	u32 pass_cnt; /* number of times do_check() was called */
+ 	u32 subprog_cnt;
+ 	/* number of instructions analyzed by the verifier */
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index cc668a054d0960..4342b569490952 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -79,6 +79,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
+ 					       struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+ 						  struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+ 
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index bff956f7b2b984..3d53a60145911e 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -57,6 +57,7 @@ struct qstr {
+ };
+ 
+ #define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
++#define QSTR(n) (struct qstr)QSTR_INIT(n, strlen(n))
+ 
+ extern const struct qstr empty_name;
+ extern const struct qstr slash_name;
+diff --git a/include/linux/export.h b/include/linux/export.h
+index 1e04dbc675c2fa..b40ae79b767da1 100644
+--- a/include/linux/export.h
++++ b/include/linux/export.h
+@@ -24,11 +24,17 @@
+ 	.long sym
+ #endif
+ 
+-#define ___EXPORT_SYMBOL(sym, license, ns)		\
++/*
++ * LLVM integrated assembler cam merge adjacent string literals (like
++ * C and GNU-as) passed to '.ascii', but not to '.asciz' and chokes on:
++ *
++ *   .asciz "MODULE_" "kvm" ;
++ */
++#define ___EXPORT_SYMBOL(sym, license, ns...)		\
+ 	.section ".export_symbol","a"		ASM_NL	\
+ 	__export_symbol_##sym:			ASM_NL	\
+ 		.asciz license			ASM_NL	\
+-		.asciz ns			ASM_NL	\
++		.ascii ns "\0"			ASM_NL	\
+ 		__EXPORT_SYMBOL_REF(sym)	ASM_NL	\
+ 	.previous
+ 
+@@ -70,4 +76,6 @@
+ #define EXPORT_SYMBOL_NS(sym, ns)	__EXPORT_SYMBOL(sym, "", __stringify(ns))
+ #define EXPORT_SYMBOL_NS_GPL(sym, ns)	__EXPORT_SYMBOL(sym, "GPL", __stringify(ns))
+ 
++#define EXPORT_SYMBOL_GPL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods)
++
+ #endif /* _LINUX_EXPORT_H */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index b98f128c9afa78..a6de8d93838d1c 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3407,6 +3407,8 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
+ extern const struct address_space_operations ram_aops;
+ extern int always_delete_dentry(const struct dentry *);
+ extern struct inode *alloc_anon_inode(struct super_block *);
++struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
++					   const struct inode *context_inode);
+ extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
+ extern const struct dentry_operations simple_dentry_operations;
+ 
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 79974a99265fc2..2d3bfec568ebe5 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -1366,7 +1366,7 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
+ int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
+ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ 				   const struct ata_acpi_gtm *gtm);
+-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
++int ata_acpi_cbl_pata_type(struct ata_port *ap);
+ #else
+ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
+ {
+@@ -1391,10 +1391,9 @@ static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ 	return 0;
+ }
+ 
+-static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
+-				      const struct ata_acpi_gtm *gtm)
++static inline int ata_acpi_cbl_pata_type(struct ata_port *ap)
+ {
+-	return 0;
++	return ATA_CBL_PATA40;
+ }
+ #endif
+ 
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 63dd8cf3c3c2b6..d3561c4a080e2c 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -548,6 +548,12 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
+ 
+ DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+ 
++DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
++		    raw_spin_lock_bh(_T->lock),
++		    raw_spin_unlock_bh(_T->lock))
++
++DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
++
+ DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
+ 		    raw_spin_lock_irqsave(_T->lock, _T->flags),
+ 		    raw_spin_unlock_irqrestore(_T->lock, _T->flags),
+@@ -569,6 +575,13 @@ DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
+ DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
+ 			 spin_trylock_irq(_T->lock))
+ 
++DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
++		    spin_lock_bh(_T->lock),
++		    spin_unlock_bh(_T->lock))
++
++DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
++			 spin_trylock_bh(_T->lock))
++
+ DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
+ 		    spin_lock_irqsave(_T->lock, _T->flags),
+ 		    spin_unlock_irqrestore(_T->lock, _T->flags),
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 672d8fc2abdb02..e76e3515a1da0d 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -612,6 +612,7 @@ struct usb3_lpm_parameters {
+  *	FIXME -- complete doc
+  * @authenticated: Crypto authentication passed
+  * @tunnel_mode: Connection native or tunneled over USB4
++ * @usb4_link: device link to the USB4 host interface
+  * @lpm_capable: device supports LPM
+  * @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
+  * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
+@@ -722,6 +723,7 @@ struct usb_device {
+ 	unsigned reset_resume:1;
+ 	unsigned port_is_suspended:1;
+ 	enum usb_link_tunnel_mode tunnel_mode;
++	struct device_link *usb4_link;
+ 
+ 	int slot_id;
+ 	struct usb2_lpm_parameters l1_params;
+diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
+index f2da264d9c140c..acb0ad03bdacbd 100644
+--- a/include/linux/usb/typec_dp.h
++++ b/include/linux/usb/typec_dp.h
+@@ -57,6 +57,7 @@ enum {
+ 	DP_PIN_ASSIGN_D,
+ 	DP_PIN_ASSIGN_E,
+ 	DP_PIN_ASSIGN_F, /* Not supported after v1.0b */
++	DP_PIN_ASSIGN_MAX,
+ };
+ 
+ /* DisplayPort alt mode specific commands */
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 39a3d750f2ff94..f01477cecf3934 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1376,13 +1376,6 @@ static void free_func_state(struct bpf_func_state *state)
+ 	kfree(state);
+ }
+ 
+-static void clear_jmp_history(struct bpf_verifier_state *state)
+-{
+-	kfree(state->jmp_history);
+-	state->jmp_history = NULL;
+-	state->jmp_history_cnt = 0;
+-}
+-
+ static void free_verifier_state(struct bpf_verifier_state *state,
+ 				bool free_self)
+ {
+@@ -1392,7 +1385,6 @@ static void free_verifier_state(struct bpf_verifier_state *state,
+ 		free_func_state(state->frame[i]);
+ 		state->frame[i] = NULL;
+ 	}
+-	clear_jmp_history(state);
+ 	if (free_self)
+ 		kfree(state);
+ }
+@@ -1418,13 +1410,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ 	struct bpf_func_state *dst;
+ 	int i, err;
+ 
+-	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
+-					  src->jmp_history_cnt, sizeof(*dst_state->jmp_history),
+-					  GFP_USER);
+-	if (!dst_state->jmp_history)
+-		return -ENOMEM;
+-	dst_state->jmp_history_cnt = src->jmp_history_cnt;
+-
+ 	/* if dst has more stack frames then src frame, free them, this is also
+ 	 * necessary in case of exceptional exits using bpf_throw.
+ 	 */
+@@ -1443,6 +1428,8 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ 	dst_state->parent = src->parent;
+ 	dst_state->first_insn_idx = src->first_insn_idx;
+ 	dst_state->last_insn_idx = src->last_insn_idx;
++	dst_state->insn_hist_start = src->insn_hist_start;
++	dst_state->insn_hist_end = src->insn_hist_end;
+ 	dst_state->dfs_depth = src->dfs_depth;
+ 	dst_state->callback_unroll_depth = src->callback_unroll_depth;
+ 	dst_state->used_as_loop_entry = src->used_as_loop_entry;
+@@ -2496,9 +2483,14 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
+ 	 * The caller state doesn't matter.
+ 	 * This is async callback. It starts in a fresh stack.
+ 	 * Initialize it similar to do_check_common().
++	 * But we do need to make sure to not clobber insn_hist, so we keep
++	 * chaining insn_hist_start/insn_hist_end indices as for a normal
++	 * child state.
+ 	 */
+ 	elem->st.branches = 1;
+ 	elem->st.in_sleepable = is_sleepable;
++	elem->st.insn_hist_start = env->cur_state->insn_hist_end;
++	elem->st.insn_hist_end = elem->st.insn_hist_start;
+ 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ 	if (!frame)
+ 		goto err;
+@@ -3513,11 +3505,10 @@ static void linked_regs_unpack(u64 val, struct linked_regs *s)
+ }
+ 
+ /* for any branch, call, exit record the history of jmps in the given state */
+-static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
+-			    int insn_flags, u64 linked_regs)
++static int push_insn_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
++			     int insn_flags, u64 linked_regs)
+ {
+-	u32 cnt = cur->jmp_history_cnt;
+-	struct bpf_jmp_history_entry *p;
++	struct bpf_insn_hist_entry *p;
+ 	size_t alloc_size;
+ 
+ 	/* combine instruction flags if we already recorded this instruction */
+@@ -3537,29 +3528,32 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st
+ 		return 0;
+ 	}
+ 
+-	cnt++;
+-	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
+-	p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
+-	if (!p)
+-		return -ENOMEM;
+-	cur->jmp_history = p;
++	if (cur->insn_hist_end + 1 > env->insn_hist_cap) {
++		alloc_size = size_mul(cur->insn_hist_end + 1, sizeof(*p));
++		p = kvrealloc(env->insn_hist, alloc_size, GFP_USER);
++		if (!p)
++			return -ENOMEM;
++		env->insn_hist = p;
++		env->insn_hist_cap = alloc_size / sizeof(*p);
++	}
+ 
+-	p = &cur->jmp_history[cnt - 1];
++	p = &env->insn_hist[cur->insn_hist_end];
+ 	p->idx = env->insn_idx;
+ 	p->prev_idx = env->prev_insn_idx;
+ 	p->flags = insn_flags;
+ 	p->linked_regs = linked_regs;
+-	cur->jmp_history_cnt = cnt;
++
++	cur->insn_hist_end++;
+ 	env->cur_hist_ent = p;
+ 
+ 	return 0;
+ }
+ 
+-static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
+-						        u32 hist_end, int insn_idx)
++static struct bpf_insn_hist_entry *get_insn_hist_entry(struct bpf_verifier_env *env,
++						       u32 hist_start, u32 hist_end, int insn_idx)
+ {
+-	if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx)
+-		return &st->jmp_history[hist_end - 1];
++	if (hist_end > hist_start && env->insn_hist[hist_end - 1].idx == insn_idx)
++		return &env->insn_hist[hist_end - 1];
+ 	return NULL;
+ }
+ 
+@@ -3576,25 +3570,26 @@ static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_stat
+  * history entry recording a jump from last instruction of parent state and
+  * first instruction of given state.
+  */
+-static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
+-			     u32 *history)
++static int get_prev_insn_idx(const struct bpf_verifier_env *env,
++			     struct bpf_verifier_state *st,
++			     int insn_idx, u32 hist_start, u32 *hist_endp)
+ {
+-	u32 cnt = *history;
++	u32 hist_end = *hist_endp;
++	u32 cnt = hist_end - hist_start;
+ 
+-	if (i == st->first_insn_idx) {
++	if (insn_idx == st->first_insn_idx) {
+ 		if (cnt == 0)
+ 			return -ENOENT;
+-		if (cnt == 1 && st->jmp_history[0].idx == i)
++		if (cnt == 1 && env->insn_hist[hist_start].idx == insn_idx)
+ 			return -ENOENT;
+ 	}
+ 
+-	if (cnt && st->jmp_history[cnt - 1].idx == i) {
+-		i = st->jmp_history[cnt - 1].prev_idx;
+-		(*history)--;
++	if (cnt && env->insn_hist[hist_end - 1].idx == insn_idx) {
++		(*hist_endp)--;
++		return env->insn_hist[hist_end - 1].prev_idx;
+ 	} else {
+-		i--;
++		return insn_idx - 1;
+ 	}
+-	return i;
+ }
+ 
+ static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
+@@ -3766,7 +3761,7 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
+ /* If any register R in hist->linked_regs is marked as precise in bt,
+  * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs.
+  */
+-static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist)
++static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_insn_hist_entry *hist)
+ {
+ 	struct linked_regs linked_regs;
+ 	bool some_precise = false;
+@@ -3811,7 +3806,7 @@ static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
+  *   - *was* processed previously during backtracking.
+  */
+ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+-			  struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
++			  struct bpf_insn_hist_entry *hist, struct backtrack_state *bt)
+ {
+ 	const struct bpf_insn_cbs cbs = {
+ 		.cb_call	= disasm_kfunc_name,
+@@ -4071,8 +4066,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ 			 * before it would be equally necessary to
+ 			 * propagate it to dreg.
+ 			 */
+-			bt_set_reg(bt, dreg);
+-			bt_set_reg(bt, sreg);
++			if (!hist || !(hist->flags & INSN_F_SRC_REG_STACK))
++				bt_set_reg(bt, sreg);
++			if (!hist || !(hist->flags & INSN_F_DST_REG_STACK))
++				bt_set_reg(bt, dreg);
+ 		} else if (BPF_SRC(insn->code) == BPF_K) {
+ 			 /* dreg <cond> K
+ 			  * Only dreg still needs precision before
+@@ -4230,7 +4227,7 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_
+  * SCALARS, as well as any other registers and slots that contribute to
+  * a tracked state of given registers/stack slots, depending on specific BPF
+  * assembly instructions (see backtrack_insns() for exact instruction handling
+- * logic). This backtracking relies on recorded jmp_history and is able to
++ * logic). This backtracking relies on recorded insn_hist and is able to
+  * traverse entire chain of parent states. This process ends only when all the
+  * necessary registers/slots and their transitive dependencies are marked as
+  * precise.
+@@ -4347,8 +4344,9 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ 
+ 	for (;;) {
+ 		DECLARE_BITMAP(mask, 64);
+-		u32 history = st->jmp_history_cnt;
+-		struct bpf_jmp_history_entry *hist;
++		u32 hist_start = st->insn_hist_start;
++		u32 hist_end = st->insn_hist_end;
++		struct bpf_insn_hist_entry *hist;
+ 
+ 		if (env->log.level & BPF_LOG_LEVEL2) {
+ 			verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
+@@ -4387,7 +4385,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ 				err = 0;
+ 				skip_first = false;
+ 			} else {
+-				hist = get_jmp_hist_entry(st, history, i);
++				hist = get_insn_hist_entry(env, hist_start, hist_end, i);
+ 				err = backtrack_insn(env, i, subseq_idx, hist, bt);
+ 			}
+ 			if (err == -ENOTSUPP) {
+@@ -4404,7 +4402,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ 				 */
+ 				return 0;
+ 			subseq_idx = i;
+-			i = get_prev_insn_idx(st, i, &history);
++			i = get_prev_insn_idx(env, st, i, hist_start, &hist_end);
+ 			if (i == -ENOENT)
+ 				break;
+ 			if (i >= env->prog->len) {
+@@ -4771,7 +4769,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 	}
+ 
+ 	if (insn_flags)
+-		return push_jmp_history(env, env->cur_state, insn_flags, 0);
++		return push_insn_history(env, env->cur_state, insn_flags, 0);
+ 	return 0;
+ }
+ 
+@@ -5078,7 +5076,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ 		insn_flags = 0; /* we are not restoring spilled register */
+ 	}
+ 	if (insn_flags)
+-		return push_jmp_history(env, env->cur_state, insn_flags, 0);
++		return push_insn_history(env, env->cur_state, insn_flags, 0);
+ 	return 0;
+ }
+ 
+@@ -15419,6 +15417,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 	struct bpf_reg_state *eq_branch_regs;
+ 	struct linked_regs linked_regs = {};
+ 	u8 opcode = BPF_OP(insn->code);
++	int insn_flags = 0;
+ 	bool is_jmp32;
+ 	int pred = -1;
+ 	int err;
+@@ -15478,6 +15477,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 				insn->src_reg);
+ 			return -EACCES;
+ 		}
++
++		if (src_reg->type == PTR_TO_STACK)
++			insn_flags |= INSN_F_SRC_REG_STACK;
+ 	} else {
+ 		if (insn->src_reg != BPF_REG_0) {
+ 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
+@@ -15489,6 +15491,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		__mark_reg_known(src_reg, insn->imm);
+ 	}
+ 
++	if (dst_reg->type == PTR_TO_STACK)
++		insn_flags |= INSN_F_DST_REG_STACK;
++	if (insn_flags) {
++		err = push_insn_history(env, this_branch, insn_flags, 0);
++		if (err)
++			return err;
++	}
++
+ 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
+ 	pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32);
+ 	if (pred >= 0) {
+@@ -15542,7 +15552,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
+ 		collect_linked_regs(this_branch, dst_reg->id, &linked_regs);
+ 	if (linked_regs.cnt > 1) {
+-		err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
++		err = push_insn_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
+ 		if (err)
+ 			return err;
+ 	}
+@@ -17984,7 +17994,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ 
+ 	force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) ||
+ 			  /* Avoid accumulating infinitely long jmp history */
+-			  cur->jmp_history_cnt > 40;
++			  cur->insn_hist_end - cur->insn_hist_start > 40;
+ 
+ 	/* bpf progs typically have pruning point every 4 instructions
+ 	 * http://vger.kernel.org/bpfconf2019.html#session-1
+@@ -18182,7 +18192,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ 			 * the current state.
+ 			 */
+ 			if (is_jmp_point(env, env->insn_idx))
+-				err = err ? : push_jmp_history(env, cur, 0, 0);
++				err = err ? : push_insn_history(env, cur, 0, 0);
+ 			err = err ? : propagate_precision(env, &sl->state);
+ 			if (err)
+ 				return err;
+@@ -18281,8 +18291,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ 
+ 	cur->parent = new;
+ 	cur->first_insn_idx = insn_idx;
++	cur->insn_hist_start = cur->insn_hist_end;
+ 	cur->dfs_depth = new->dfs_depth + 1;
+-	clear_jmp_history(cur);
+ 	new_sl->next = *explored_state(env, insn_idx);
+ 	*explored_state(env, insn_idx) = new_sl;
+ 	/* connect new state to parentage chain. Current frame needs all
+@@ -18450,7 +18460,7 @@ static int do_check(struct bpf_verifier_env *env)
+ 		}
+ 
+ 		if (is_jmp_point(env, env->insn_idx)) {
+-			err = push_jmp_history(env, state, 0, 0);
++			err = push_insn_history(env, state, 0, 0);
+ 			if (err)
+ 				return err;
+ 		}
+@@ -22716,6 +22726,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
+ 	if (!is_priv)
+ 		mutex_unlock(&bpf_verifier_lock);
+ 	vfree(env->insn_aux_data);
++	kvfree(env->insn_hist);
+ err_free_env:
+ 	kvfree(env);
+ 	return ret;
+diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c
+index 1a3d483548e2f3..ae4c9cbd1b4b9e 100644
+--- a/kernel/irq/irq_sim.c
++++ b/kernel/irq/irq_sim.c
+@@ -202,7 +202,7 @@ struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode,
+ 					      void *data)
+ {
+ 	struct irq_sim_work_ctx *work_ctx __free(kfree) =
+-				kmalloc(sizeof(*work_ctx), GFP_KERNEL);
++				kzalloc(sizeof(*work_ctx), GFP_KERNEL);
+ 
+ 	if (!work_ctx)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index cefa831c8cb322..552464dcffe270 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3076,6 +3076,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
+ 	/* Misaligned rcu_head! */
+ 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
+ 
++	/* Avoid NULL dereference if callback is NULL. */
++	if (WARN_ON_ONCE(!func))
++		return;
++
+ 	if (debug_rcu_head_queue(head)) {
+ 		/*
+ 		 * Probable double call_rcu(), so leak the callback.
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index d4948a8629929c..50531e462a4ba8 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1303,7 +1303,7 @@ bool sched_can_stop_tick(struct rq *rq)
+ 	if (scx_enabled() && !scx_can_stop_tick(rq))
+ 		return false;
+ 
+-	if (rq->cfs.h_nr_running > 1)
++	if (rq->cfs.h_nr_queued > 1)
+ 		return false;
+ 
+ 	/*
+@@ -5976,7 +5976,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ 	 * opportunity to pull in more work from other CPUs.
+ 	 */
+ 	if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
+-		   rq->nr_running == rq->cfs.h_nr_running)) {
++		   rq->nr_running == rq->cfs.h_nr_queued)) {
+ 
+ 		p = pick_next_task_fair(rq, prev, rf);
+ 		if (unlikely(p == RETRY_TASK))
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 1e3bc0774efd51..9815f9a0cd592c 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -378,7 +378,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu
+ 			return  -EINVAL;
+ 		}
+ 
+-		if (rq->cfs.h_nr_running) {
++		if (rq->cfs.h_nr_queued) {
+ 			update_rq_clock(rq);
+ 			dl_server_stop(&rq->fair_server);
+ 		}
+@@ -391,7 +391,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu
+ 			printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
+ 					cpu_of(rq));
+ 
+-		if (rq->cfs.h_nr_running)
++		if (rq->cfs.h_nr_queued)
+ 			dl_server_start(&rq->fair_server);
+ 	}
+ 
+@@ -843,7 +843,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ 	spread = right_vruntime - left_vruntime;
+ 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
+ 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
+-	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
++	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
++	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
+ 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
+ 	SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
+ 			cfs_rq->idle_nr_running);
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index ddd4fa785264eb..c801dd20c63d93 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4058,12 +4058,12 @@ void scx_group_set_weight(struct task_group *tg, unsigned long weight)
+ {
+ 	percpu_down_read(&scx_cgroup_rwsem);
+ 
+-	if (scx_cgroup_enabled && tg->scx_weight != weight) {
+-		if (SCX_HAS_OP(cgroup_set_weight))
+-			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
+-				    tg_cgrp(tg), weight);
+-		tg->scx_weight = weight;
+-	}
++	if (scx_cgroup_enabled && SCX_HAS_OP(cgroup_set_weight) &&
++	    tg->scx_weight != weight)
++		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
++			    tg_cgrp(tg), weight);
++
++	tg->scx_weight = weight;
+ 
+ 	percpu_up_read(&scx_cgroup_rwsem);
+ }
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 443f6a9ef3f8f6..7280ed04c96cef 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2147,7 +2147,7 @@ static void update_numa_stats(struct task_numa_env *env,
+ 		ns->load += cpu_load(rq);
+ 		ns->runnable += cpu_runnable(rq);
+ 		ns->util += cpu_util_cfs(cpu);
+-		ns->nr_running += rq->cfs.h_nr_running;
++		ns->nr_running += rq->cfs.h_nr_queued;
+ 		ns->compute_capacity += capacity_of(cpu);
+ 
+ 		if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
+@@ -5427,7 +5427,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ 	 * When enqueuing a sched_entity, we must:
+ 	 *   - Update loads to have both entity and cfs_rq synced with now.
+ 	 *   - For group_entity, update its runnable_weight to reflect the new
+-	 *     h_nr_running of its group cfs_rq.
++	 *     h_nr_queued of its group cfs_rq.
+ 	 *   - For group_entity, update its weight to reflect the new share of
+ 	 *     its group cfs_rq
+ 	 *   - Add its new weight to cfs_rq->load.weight
+@@ -5511,6 +5511,7 @@ static void set_delayed(struct sched_entity *se)
+ 	for_each_sched_entity(se) {
+ 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ 
++		cfs_rq->h_nr_runnable--;
+ 		cfs_rq->h_nr_delayed++;
+ 		if (cfs_rq_throttled(cfs_rq))
+ 			break;
+@@ -5533,6 +5534,7 @@ static void clear_delayed(struct sched_entity *se)
+ 	for_each_sched_entity(se) {
+ 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ 
++		cfs_rq->h_nr_runnable++;
+ 		cfs_rq->h_nr_delayed--;
+ 		if (cfs_rq_throttled(cfs_rq))
+ 			break;
+@@ -5583,7 +5585,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ 	 * When dequeuing a sched_entity, we must:
+ 	 *   - Update loads to have both entity and cfs_rq synced with now.
+ 	 *   - For group_entity, update its runnable_weight to reflect the new
+-	 *     h_nr_running of its group cfs_rq.
++	 *     h_nr_queued of its group cfs_rq.
+ 	 *   - Subtract its previous weight from cfs_rq->load.weight.
+ 	 *   - For group entity, update its weight to reflect the new share
+ 	 *     of its group cfs_rq.
+@@ -5985,8 +5987,8 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ 	struct rq *rq = rq_of(cfs_rq);
+ 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+ 	struct sched_entity *se;
+-	long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
+-	long rq_h_nr_running = rq->cfs.h_nr_running;
++	long queued_delta, runnable_delta, idle_task_delta, delayed_delta, dequeue = 1;
++	long rq_h_nr_queued = rq->cfs.h_nr_queued;
+ 
+ 	raw_spin_lock(&cfs_b->lock);
+ 	/* This will start the period timer if necessary */
+@@ -6016,7 +6018,8 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
+ 	rcu_read_unlock();
+ 
+-	task_delta = cfs_rq->h_nr_running;
++	queued_delta = cfs_rq->h_nr_queued;
++	runnable_delta = cfs_rq->h_nr_runnable;
+ 	idle_task_delta = cfs_rq->idle_h_nr_running;
+ 	delayed_delta = cfs_rq->h_nr_delayed;
+ 	for_each_sched_entity(se) {
+@@ -6038,9 +6041,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ 		dequeue_entity(qcfs_rq, se, flags);
+ 
+ 		if (cfs_rq_is_idle(group_cfs_rq(se)))
+-			idle_task_delta = cfs_rq->h_nr_running;
++			idle_task_delta = cfs_rq->h_nr_queued;
+ 
+-		qcfs_rq->h_nr_running -= task_delta;
++		qcfs_rq->h_nr_queued -= queued_delta;
++		qcfs_rq->h_nr_runnable -= runnable_delta;
+ 		qcfs_rq->idle_h_nr_running -= idle_task_delta;
+ 		qcfs_rq->h_nr_delayed -= delayed_delta;
+ 
+@@ -6061,18 +6065,19 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ 		se_update_runnable(se);
+ 
+ 		if (cfs_rq_is_idle(group_cfs_rq(se)))
+-			idle_task_delta = cfs_rq->h_nr_running;
++			idle_task_delta = cfs_rq->h_nr_queued;
+ 
+-		qcfs_rq->h_nr_running -= task_delta;
++		qcfs_rq->h_nr_queued -= queued_delta;
++		qcfs_rq->h_nr_runnable -= runnable_delta;
+ 		qcfs_rq->idle_h_nr_running -= idle_task_delta;
+ 		qcfs_rq->h_nr_delayed -= delayed_delta;
+ 	}
+ 
+ 	/* At this point se is NULL and we are at root level*/
+-	sub_nr_running(rq, task_delta);
++	sub_nr_running(rq, queued_delta);
+ 
+ 	/* Stop the fair server if throttling resulted in no runnable tasks */
+-	if (rq_h_nr_running && !rq->cfs.h_nr_running)
++	if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
+ 		dl_server_stop(&rq->fair_server);
+ done:
+ 	/*
+@@ -6091,8 +6096,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 	struct rq *rq = rq_of(cfs_rq);
+ 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+ 	struct sched_entity *se;
+-	long task_delta, idle_task_delta, delayed_delta;
+-	long rq_h_nr_running = rq->cfs.h_nr_running;
++	long queued_delta, runnable_delta, idle_task_delta, delayed_delta;
++	long rq_h_nr_queued = rq->cfs.h_nr_queued;
+ 
+ 	se = cfs_rq->tg->se[cpu_of(rq)];
+ 
+@@ -6125,7 +6130,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 		goto unthrottle_throttle;
+ 	}
+ 
+-	task_delta = cfs_rq->h_nr_running;
++	queued_delta = cfs_rq->h_nr_queued;
++	runnable_delta = cfs_rq->h_nr_runnable;
+ 	idle_task_delta = cfs_rq->idle_h_nr_running;
+ 	delayed_delta = cfs_rq->h_nr_delayed;
+ 	for_each_sched_entity(se) {
+@@ -6141,9 +6147,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 		enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
+ 
+ 		if (cfs_rq_is_idle(group_cfs_rq(se)))
+-			idle_task_delta = cfs_rq->h_nr_running;
++			idle_task_delta = cfs_rq->h_nr_queued;
+ 
+-		qcfs_rq->h_nr_running += task_delta;
++		qcfs_rq->h_nr_queued += queued_delta;
++		qcfs_rq->h_nr_runnable += runnable_delta;
+ 		qcfs_rq->idle_h_nr_running += idle_task_delta;
+ 		qcfs_rq->h_nr_delayed += delayed_delta;
+ 
+@@ -6159,9 +6166,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 		se_update_runnable(se);
+ 
+ 		if (cfs_rq_is_idle(group_cfs_rq(se)))
+-			idle_task_delta = cfs_rq->h_nr_running;
++			idle_task_delta = cfs_rq->h_nr_queued;
+ 
+-		qcfs_rq->h_nr_running += task_delta;
++		qcfs_rq->h_nr_queued += queued_delta;
++		qcfs_rq->h_nr_runnable += runnable_delta;
+ 		qcfs_rq->idle_h_nr_running += idle_task_delta;
+ 		qcfs_rq->h_nr_delayed += delayed_delta;
+ 
+@@ -6171,11 +6179,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 	}
+ 
+ 	/* Start the fair server if un-throttling resulted in new runnable tasks */
+-	if (!rq_h_nr_running && rq->cfs.h_nr_running)
++	if (!rq_h_nr_queued && rq->cfs.h_nr_queued)
+ 		dl_server_start(&rq->fair_server);
+ 
+ 	/* At this point se is NULL and we are at root level*/
+-	add_nr_running(rq, task_delta);
++	add_nr_running(rq, queued_delta);
+ 
+ unthrottle_throttle:
+ 	assert_list_leaf_cfs_rq(rq);
+@@ -6890,7 +6898,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
+ 
+ 	SCHED_WARN_ON(task_rq(p) != rq);
+ 
+-	if (rq->cfs.h_nr_running > 1) {
++	if (rq->cfs.h_nr_queued > 1) {
+ 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+ 		u64 slice = se->slice;
+ 		s64 delta = slice - ran;
+@@ -7033,7 +7041,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	int idle_h_nr_running = task_has_idle_policy(p);
+ 	int h_nr_delayed = 0;
+ 	int task_new = !(flags & ENQUEUE_WAKEUP);
+-	int rq_h_nr_running = rq->cfs.h_nr_running;
++	int rq_h_nr_queued = rq->cfs.h_nr_queued;
+ 	u64 slice = 0;
+ 
+ 	/*
+@@ -7081,7 +7089,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 		enqueue_entity(cfs_rq, se, flags);
+ 		slice = cfs_rq_min_slice(cfs_rq);
+ 
+-		cfs_rq->h_nr_running++;
++		if (!h_nr_delayed)
++			cfs_rq->h_nr_runnable++;
++		cfs_rq->h_nr_queued++;
+ 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
+ 		cfs_rq->h_nr_delayed += h_nr_delayed;
+ 
+@@ -7107,7 +7117,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 			min_vruntime_cb_propagate(&se->run_node, NULL);
+ 		slice = cfs_rq_min_slice(cfs_rq);
+ 
+-		cfs_rq->h_nr_running++;
++		if (!h_nr_delayed)
++			cfs_rq->h_nr_runnable++;
++		cfs_rq->h_nr_queued++;
+ 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
+ 		cfs_rq->h_nr_delayed += h_nr_delayed;
+ 
+@@ -7119,7 +7131,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 			goto enqueue_throttle;
+ 	}
+ 
+-	if (!rq_h_nr_running && rq->cfs.h_nr_running) {
++	if (!rq_h_nr_queued && rq->cfs.h_nr_queued) {
+ 		/* Account for idle runtime */
+ 		if (!rq->nr_running)
+ 			dl_server_update_idle_time(rq, rq->curr);
+@@ -7166,19 +7178,19 @@ static void set_next_buddy(struct sched_entity *se);
+ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ {
+ 	bool was_sched_idle = sched_idle_rq(rq);
+-	int rq_h_nr_running = rq->cfs.h_nr_running;
++	int rq_h_nr_queued = rq->cfs.h_nr_queued;
+ 	bool task_sleep = flags & DEQUEUE_SLEEP;
+ 	bool task_delayed = flags & DEQUEUE_DELAYED;
+ 	struct task_struct *p = NULL;
+ 	int idle_h_nr_running = 0;
+-	int h_nr_running = 0;
++	int h_nr_queued = 0;
+ 	int h_nr_delayed = 0;
+ 	struct cfs_rq *cfs_rq;
+ 	u64 slice = 0;
+ 
+ 	if (entity_is_task(se)) {
+ 		p = task_of(se);
+-		h_nr_running = 1;
++		h_nr_queued = 1;
+ 		idle_h_nr_running = task_has_idle_policy(p);
+ 		if (!task_sleep && !task_delayed)
+ 			h_nr_delayed = !!se->sched_delayed;
+@@ -7195,12 +7207,14 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 			break;
+ 		}
+ 
+-		cfs_rq->h_nr_running -= h_nr_running;
++		if (!h_nr_delayed)
++			cfs_rq->h_nr_runnable -= h_nr_queued;
++		cfs_rq->h_nr_queued -= h_nr_queued;
+ 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+ 		cfs_rq->h_nr_delayed -= h_nr_delayed;
+ 
+ 		if (cfs_rq_is_idle(cfs_rq))
+-			idle_h_nr_running = h_nr_running;
++			idle_h_nr_running = h_nr_queued;
+ 
+ 		/* end evaluation on encountering a throttled cfs_rq */
+ 		if (cfs_rq_throttled(cfs_rq))
+@@ -7236,21 +7250,23 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 			min_vruntime_cb_propagate(&se->run_node, NULL);
+ 		slice = cfs_rq_min_slice(cfs_rq);
+ 
+-		cfs_rq->h_nr_running -= h_nr_running;
++		if (!h_nr_delayed)
++			cfs_rq->h_nr_runnable -= h_nr_queued;
++		cfs_rq->h_nr_queued -= h_nr_queued;
+ 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+ 		cfs_rq->h_nr_delayed -= h_nr_delayed;
+ 
+ 		if (cfs_rq_is_idle(cfs_rq))
+-			idle_h_nr_running = h_nr_running;
++			idle_h_nr_running = h_nr_queued;
+ 
+ 		/* end evaluation on encountering a throttled cfs_rq */
+ 		if (cfs_rq_throttled(cfs_rq))
+ 			return 0;
+ 	}
+ 
+-	sub_nr_running(rq, h_nr_running);
++	sub_nr_running(rq, h_nr_queued);
+ 
+-	if (rq_h_nr_running && !rq->cfs.h_nr_running)
++	if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
+ 		dl_server_stop(&rq->fair_server);
+ 
+ 	/* balance early to pull high priority tasks */
+@@ -7297,6 +7313,11 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	return true;
+ }
+ 
++static inline unsigned int cfs_h_nr_delayed(struct rq *rq)
++{
++	return (rq->cfs.h_nr_queued - rq->cfs.h_nr_runnable);
++}
++
+ #ifdef CONFIG_SMP
+ 
+ /* Working cpumask for: sched_balance_rq(), sched_balance_newidle(). */
+@@ -7458,8 +7479,12 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync)
+ 	if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
+ 		return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
+ 
+-	if (sync && cpu_rq(this_cpu)->nr_running == 1)
+-		return this_cpu;
++	if (sync) {
++		struct rq *rq = cpu_rq(this_cpu);
++
++		if ((rq->nr_running - cfs_h_nr_delayed(rq)) == 1)
++			return this_cpu;
++	}
+ 
+ 	if (available_idle_cpu(prev_cpu))
+ 		return prev_cpu;
+@@ -10394,7 +10419,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
+ 	 * When there is more than 1 task, the group_overloaded case already
+ 	 * takes care of cpu with reduced capacity
+ 	 */
+-	if (rq->cfs.h_nr_running != 1)
++	if (rq->cfs.h_nr_queued != 1)
+ 		return false;
+ 
+ 	return check_cpu_capacity(rq, sd);
+@@ -10429,7 +10454,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
+ 		sgs->group_load += load;
+ 		sgs->group_util += cpu_util_cfs(i);
+ 		sgs->group_runnable += cpu_runnable(rq);
+-		sgs->sum_h_nr_running += rq->cfs.h_nr_running;
++		sgs->sum_h_nr_running += rq->cfs.h_nr_queued;
+ 
+ 		nr_running = rq->nr_running;
+ 		sgs->sum_nr_running += nr_running;
+@@ -10744,7 +10769,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
+ 		sgs->group_util += cpu_util_without(i, p);
+ 		sgs->group_runnable += cpu_runnable_without(rq, p);
+ 		local = task_running_on_cpu(i, p);
+-		sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
++		sgs->sum_h_nr_running += rq->cfs.h_nr_queued - local;
+ 
+ 		nr_running = rq->nr_running - local;
+ 		sgs->sum_nr_running += nr_running;
+@@ -11526,7 +11551,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
+ 		if (rt > env->fbq_type)
+ 			continue;
+ 
+-		nr_running = rq->cfs.h_nr_running;
++		nr_running = rq->cfs.h_nr_queued;
+ 		if (!nr_running)
+ 			continue;
+ 
+@@ -11685,7 +11710,7 @@ static int need_active_balance(struct lb_env *env)
+ 	 * available on dst_cpu.
+ 	 */
+ 	if (env->idle &&
+-	    (env->src_rq->cfs.h_nr_running == 1)) {
++	    (env->src_rq->cfs.h_nr_queued == 1)) {
+ 		if ((check_cpu_capacity(env->src_rq, sd)) &&
+ 		    (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
+ 			return 1;
+@@ -12428,7 +12453,7 @@ static void nohz_balancer_kick(struct rq *rq)
+ 		 * If there's a runnable CFS task and the current CPU has reduced
+ 		 * capacity, kick the ILB to see if there's a better CPU to run on:
+ 		 */
+-		if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
++		if (rq->cfs.h_nr_queued >= 1 && check_cpu_capacity(rq, sd)) {
+ 			flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
+ 			goto unlock;
+ 		}
+@@ -12926,11 +12951,11 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
+ 	 * have been enqueued in the meantime. Since we're not going idle,
+ 	 * pretend we pulled a task.
+ 	 */
+-	if (this_rq->cfs.h_nr_running && !pulled_task)
++	if (this_rq->cfs.h_nr_queued && !pulled_task)
+ 		pulled_task = 1;
+ 
+ 	/* Is there a task of a high priority class? */
+-	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
++	if (this_rq->nr_running != this_rq->cfs.h_nr_queued)
+ 		pulled_task = -1;
+ 
+ out:
+@@ -13617,7 +13642,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
+ 				parent_cfs_rq->idle_nr_running--;
+ 		}
+ 
+-		idle_task_delta = grp_cfs_rq->h_nr_running -
++		idle_task_delta = grp_cfs_rq->h_nr_queued -
+ 				  grp_cfs_rq->idle_h_nr_running;
+ 		if (!cfs_rq_is_idle(grp_cfs_rq))
+ 			idle_task_delta *= -1;
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 171a802420a10a..8189a35e53fe14 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -275,7 +275,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+  *
+  *   group: [ see update_cfs_group() ]
+  *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
+- *     se_runnable() = grq->h_nr_running
++ *     se_runnable() = grq->h_nr_queued
+  *
+  *   runnable_sum = se_runnable() * runnable = grq->runnable_sum
+  *   runnable_avg = runnable_sum
+@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
+ {
+ 	if (___update_load_sum(now, &cfs_rq->avg,
+ 				scale_load_down(cfs_rq->load.weight),
+-				cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
++				cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed,
+ 				cfs_rq->curr != NULL)) {
+ 
+ 		___update_load_avg(&cfs_rq->avg, 1);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index d79de755c1c269..e7f5ab21221c48 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -651,7 +651,8 @@ struct balance_callback {
+ struct cfs_rq {
+ 	struct load_weight	load;
+ 	unsigned int		nr_running;
+-	unsigned int		h_nr_running;      /* SCHED_{NORMAL,BATCH,IDLE} */
++	unsigned int		h_nr_queued;       /* SCHED_{NORMAL,BATCH,IDLE} */
++	unsigned int		h_nr_runnable;     /* SCHED_{NORMAL,BATCH,IDLE} */
+ 	unsigned int		idle_nr_running;   /* SCHED_IDLE */
+ 	unsigned int		idle_h_nr_running; /* SCHED_IDLE */
+ 	unsigned int		h_nr_delayed;
+@@ -907,7 +908,7 @@ static inline void se_update_runnable(struct sched_entity *se)
+ 	if (!entity_is_task(se)) {
+ 		struct cfs_rq *cfs_rq = se->my_q;
+ 
+-		se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
++		se->runnable_weight = cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed;
+ 	}
+ }
+ 
+diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
+index 37655f58b8554e..4e4dc430614a42 100644
+--- a/lib/Kconfig.ubsan
++++ b/lib/Kconfig.ubsan
+@@ -118,6 +118,8 @@ config UBSAN_UNREACHABLE
+ 
+ config UBSAN_SIGNED_WRAP
+ 	bool "Perform checking for signed arithmetic wrap-around"
++	# This is very experimental so drop the next line if you really want it
++	depends on BROKEN
+ 	depends on !COMPILE_TEST
+ 	# The no_sanitize attribute was introduced in GCC with version 8.
+ 	depends on !CC_IS_GCC || GCC_VERSION >= 80000
+diff --git a/lib/test_objagg.c b/lib/test_objagg.c
+index d34df4306b874f..222b39fc2629e2 100644
+--- a/lib/test_objagg.c
++++ b/lib/test_objagg.c
+@@ -899,8 +899,10 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
+ 	int err;
+ 
+ 	stats = objagg_hints_stats_get(objagg_hints);
+-	if (IS_ERR(stats))
++	if (IS_ERR(stats)) {
++		*errmsg = "objagg_hints_stats_get() failed.";
+ 		return PTR_ERR(stats);
++	}
+ 	err = __check_expect_stats(stats, expect_stats, errmsg);
+ 	objagg_stats_put(stats);
+ 	return err;
+diff --git a/mm/secretmem.c b/mm/secretmem.c
+index 399552814fd0ff..4662f2510ae5f7 100644
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -195,19 +195,11 @@ static struct file *secretmem_file_create(unsigned long flags)
+ 	struct file *file;
+ 	struct inode *inode;
+ 	const char *anon_name = "[secretmem]";
+-	const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name));
+-	int err;
+ 
+-	inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
++	inode = anon_inode_make_secure_inode(secretmem_mnt->mnt_sb, anon_name, NULL);
+ 	if (IS_ERR(inode))
+ 		return ERR_CAST(inode);
+ 
+-	err = security_inode_init_security_anon(inode, &qname, NULL);
+-	if (err) {
+-		file = ERR_PTR(err);
+-		goto err_free_inode;
+-	}
+-
+ 	file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
+ 				 O_RDWR, &secretmem_fops);
+ 	if (IS_ERR(file))
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index e06e3d2709610c..2646b75163d5ff 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -1078,8 +1078,18 @@ static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
+ 			 pte_t *dst_pte, pte_t *src_pte,
+ 			 pte_t orig_dst_pte, pte_t orig_src_pte,
+ 			 spinlock_t *dst_ptl, spinlock_t *src_ptl,
+-			 struct folio *src_folio)
++			 struct folio *src_folio,
++			 struct swap_info_struct *si, swp_entry_t entry)
+ {
++	/*
++	 * Check if the folio still belongs to the target swap entry after
++	 * acquiring the lock. Folio can be freed in the swap cache while
++	 * not locked.
++	 */
++	if (src_folio && unlikely(!folio_test_swapcache(src_folio) ||
++				  entry.val != src_folio->swap.val))
++		return -EAGAIN;
++
+ 	double_pt_lock(dst_ptl, src_ptl);
+ 
+ 	if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
+@@ -1096,6 +1106,25 @@ static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
+ 	if (src_folio) {
+ 		folio_move_anon_rmap(src_folio, dst_vma);
+ 		src_folio->index = linear_page_index(dst_vma, dst_addr);
++	} else {
++		/*
++		 * Check if the swap entry is cached after acquiring the src_pte
++		 * lock. Otherwise, we might miss a newly loaded swap cache folio.
++		 *
++		 * Check swap_map directly to minimize overhead, READ_ONCE is sufficient.
++		 * We are trying to catch newly added swap cache, the only possible case is
++		 * when a folio is swapped in and out again staying in swap cache, using the
++		 * same entry before the PTE check above. The PTL is acquired and released
++		 * twice, each time after updating the swap_map's flag. So holding
++		 * the PTL here ensures we see the updated value. False positive is possible,
++		 * e.g. SWP_SYNCHRONOUS_IO swapin may set the flag without touching the
++		 * cache, or during the tiny synchronization window between swap cache and
++		 * swap_map, but it will be gone very quickly, worst result is retry jitters.
++		 */
++		if (READ_ONCE(si->swap_map[swp_offset(entry)]) & SWAP_HAS_CACHE) {
++			double_pt_unlock(dst_ptl, src_ptl);
++			return -EAGAIN;
++		}
+ 	}
+ 
+ 	orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
+@@ -1391,7 +1420,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 		}
+ 		err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte,
+ 				orig_dst_pte, orig_src_pte,
+-				dst_ptl, src_ptl, src_folio);
++				dst_ptl, src_ptl, src_folio, si, entry);
+ 	}
+ 
+ out:
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index cc04e501b1c531..7888600b6a795b 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -3095,7 +3095,7 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
+ 	/*
+ 	 * Before removing VM_UNINITIALIZED,
+ 	 * we should make sure that vm has proper values.
+-	 * Pair with smp_rmb() in show_numa_info().
++	 * Pair with smp_rmb() in vread_iter() and vmalloc_info_show().
+ 	 */
+ 	smp_wmb();
+ 	vm->flags &= ~VM_UNINITIALIZED;
+@@ -4938,28 +4938,29 @@ bool vmalloc_dump_obj(void *object)
+ #endif
+ 
+ #ifdef CONFIG_PROC_FS
+-static void show_numa_info(struct seq_file *m, struct vm_struct *v)
+-{
+-	if (IS_ENABLED(CONFIG_NUMA)) {
+-		unsigned int nr, *counters = m->private;
+-		unsigned int step = 1U << vm_area_page_order(v);
+ 
+-		if (!counters)
+-			return;
++/*
++ * Print number of pages allocated on each memory node.
++ *
++ * This function can only be called if CONFIG_NUMA is enabled
++ * and VM_UNINITIALIZED bit in v->flags is disabled.
++ */
++static void show_numa_info(struct seq_file *m, struct vm_struct *v,
++				 unsigned int *counters)
++{
++	unsigned int nr;
++	unsigned int step = 1U << vm_area_page_order(v);
+ 
+-		if (v->flags & VM_UNINITIALIZED)
+-			return;
+-		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
+-		smp_rmb();
++	if (!counters)
++		return;
+ 
+-		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
++	memset(counters, 0, nr_node_ids * sizeof(unsigned int));
+ 
+-		for (nr = 0; nr < v->nr_pages; nr += step)
+-			counters[page_to_nid(v->pages[nr])] += step;
+-		for_each_node_state(nr, N_HIGH_MEMORY)
+-			if (counters[nr])
+-				seq_printf(m, " N%u=%u", nr, counters[nr]);
+-	}
++	for (nr = 0; nr < v->nr_pages; nr += step)
++		counters[page_to_nid(v->pages[nr])] += step;
++	for_each_node_state(nr, N_HIGH_MEMORY)
++		if (counters[nr])
++			seq_printf(m, " N%u=%u", nr, counters[nr]);
+ }
+ 
+ static void show_purge_info(struct seq_file *m)
+@@ -4987,6 +4988,10 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
+ 	struct vmap_area *va;
+ 	struct vm_struct *v;
+ 	int i;
++	unsigned int *counters;
++
++	if (IS_ENABLED(CONFIG_NUMA))
++		counters = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
+ 
+ 	for (i = 0; i < nr_vmap_nodes; i++) {
+ 		vn = &vmap_nodes[i];
+@@ -5003,6 +5008,11 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
+ 			}
+ 
+ 			v = va->vm;
++			if (v->flags & VM_UNINITIALIZED)
++				continue;
++
++			/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
++			smp_rmb();
+ 
+ 			seq_printf(m, "0x%pK-0x%pK %7ld",
+ 				v->addr, v->addr + v->size, v->size);
+@@ -5037,7 +5047,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
+ 			if (is_vmalloc_addr(v->pages))
+ 				seq_puts(m, " vpages");
+ 
+-			show_numa_info(m, v);
++			if (IS_ENABLED(CONFIG_NUMA))
++				show_numa_info(m, v, counters);
++
+ 			seq_putc(m, '\n');
+ 		}
+ 		spin_unlock(&vn->busy.lock);
+@@ -5047,19 +5059,14 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
+ 	 * As a final step, dump "unpurged" areas.
+ 	 */
+ 	show_purge_info(m);
++	if (IS_ENABLED(CONFIG_NUMA))
++		kfree(counters);
+ 	return 0;
+ }
+ 
+ static int __init proc_vmalloc_init(void)
+ {
+-	void *priv_data = NULL;
+-
+-	if (IS_ENABLED(CONFIG_NUMA))
+-		priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
+-
+-	proc_create_single_data("vmallocinfo",
+-		0400, NULL, vmalloc_info_show, priv_data);
+-
++	proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);
+ 	return 0;
+ }
+ module_init(proc_vmalloc_init);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 5c4c3d04d8b934..7fdf17351e4a2a 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2141,40 +2141,6 @@ static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
+ 	return rp->status;
+ }
+ 
+-static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
+-				   struct sk_buff *skb)
+-{
+-	struct hci_rp_le_set_ext_adv_params *rp = data;
+-	struct hci_cp_le_set_ext_adv_params *cp;
+-	struct adv_info *adv_instance;
+-
+-	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+-
+-	if (rp->status)
+-		return rp->status;
+-
+-	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
+-	if (!cp)
+-		return rp->status;
+-
+-	hci_dev_lock(hdev);
+-	hdev->adv_addr_type = cp->own_addr_type;
+-	if (!cp->handle) {
+-		/* Store in hdev for instance 0 */
+-		hdev->adv_tx_power = rp->tx_power;
+-	} else {
+-		adv_instance = hci_find_adv_instance(hdev, cp->handle);
+-		if (adv_instance)
+-			adv_instance->tx_power = rp->tx_power;
+-	}
+-	/* Update adv data as tx power is known now */
+-	hci_update_adv_data(hdev, cp->handle);
+-
+-	hci_dev_unlock(hdev);
+-
+-	return rp->status;
+-}
+-
+ static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
+ 			   struct sk_buff *skb)
+ {
+@@ -4155,8 +4121,6 @@ static const struct hci_cc {
+ 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
+ 	       hci_cc_le_read_num_adv_sets,
+ 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
+-	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
+-	       sizeof(struct hci_rp_le_set_ext_adv_params)),
+ 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
+ 		      hci_cc_le_set_ext_adv_enable),
+ 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index a00316d79dbf56..79d1a6ed08b294 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -1205,9 +1205,126 @@ static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
+ 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ }
+ 
++static int
++hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv,
++			    const struct hci_cp_le_set_ext_adv_params *cp,
++			    struct hci_rp_le_set_ext_adv_params *rp)
++{
++	struct sk_buff *skb;
++
++	skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp),
++			     cp, HCI_CMD_TIMEOUT);
++
++	/* If command return a status event, skb will be set to -ENODATA */
++	if (skb == ERR_PTR(-ENODATA))
++		return 0;
++
++	if (IS_ERR(skb)) {
++		bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld",
++			   HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb));
++		return PTR_ERR(skb);
++	}
++
++	if (skb->len != sizeof(*rp)) {
++		bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u",
++			   HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len);
++		kfree_skb(skb);
++		return -EIO;
++	}
++
++	memcpy(rp, skb->data, sizeof(*rp));
++	kfree_skb(skb);
++
++	if (!rp->status) {
++		hdev->adv_addr_type = cp->own_addr_type;
++		if (!cp->handle) {
++			/* Store in hdev for instance 0 */
++			hdev->adv_tx_power = rp->tx_power;
++		} else if (adv) {
++			adv->tx_power = rp->tx_power;
++		}
++	}
++
++	return rp->status;
++}
++
++static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
++{
++	DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
++		    HCI_MAX_EXT_AD_LENGTH);
++	u8 len;
++	struct adv_info *adv = NULL;
++	int err;
++
++	if (instance) {
++		adv = hci_find_adv_instance(hdev, instance);
++		if (!adv || !adv->adv_data_changed)
++			return 0;
++	}
++
++	len = eir_create_adv_data(hdev, instance, pdu->data,
++				  HCI_MAX_EXT_AD_LENGTH);
++
++	pdu->length = len;
++	pdu->handle = adv ? adv->handle : instance;
++	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
++	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
++
++	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
++				    struct_size(pdu, data, len), pdu,
++				    HCI_CMD_TIMEOUT);
++	if (err)
++		return err;
++
++	/* Update data if the command succeed */
++	if (adv) {
++		adv->adv_data_changed = false;
++	} else {
++		memcpy(hdev->adv_data, pdu->data, len);
++		hdev->adv_data_len = len;
++	}
++
++	return 0;
++}
++
++static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
++{
++	struct hci_cp_le_set_adv_data cp;
++	u8 len;
++
++	memset(&cp, 0, sizeof(cp));
++
++	len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
++
++	/* There's nothing to do if the data hasn't changed */
++	if (hdev->adv_data_len == len &&
++	    memcmp(cp.data, hdev->adv_data, len) == 0)
++		return 0;
++
++	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
++	hdev->adv_data_len = len;
++
++	cp.length = len;
++
++	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
++				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
++}
++
++int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
++{
++	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
++		return 0;
++
++	if (ext_adv_capable(hdev))
++		return hci_set_ext_adv_data_sync(hdev, instance);
++
++	return hci_set_adv_data_sync(hdev, instance);
++}
++
+ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ {
+ 	struct hci_cp_le_set_ext_adv_params cp;
++	struct hci_rp_le_set_ext_adv_params rp;
+ 	bool connectable;
+ 	u32 flags;
+ 	bdaddr_t random_addr;
+@@ -1314,8 +1431,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ 		cp.secondary_phy = HCI_ADV_PHY_1M;
+ 	}
+ 
+-	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
+-				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
++	err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp);
++	if (err)
++		return err;
++
++	/* Update adv data as tx power is known now */
++	err = hci_set_ext_adv_data_sync(hdev, cp.handle);
+ 	if (err)
+ 		return err;
+ 
+@@ -1832,79 +1953,6 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
+ 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ }
+ 
+-static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
+-{
+-	DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
+-		    HCI_MAX_EXT_AD_LENGTH);
+-	u8 len;
+-	struct adv_info *adv = NULL;
+-	int err;
+-
+-	if (instance) {
+-		adv = hci_find_adv_instance(hdev, instance);
+-		if (!adv || !adv->adv_data_changed)
+-			return 0;
+-	}
+-
+-	len = eir_create_adv_data(hdev, instance, pdu->data,
+-				  HCI_MAX_EXT_AD_LENGTH);
+-
+-	pdu->length = len;
+-	pdu->handle = adv ? adv->handle : instance;
+-	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
+-	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+-
+-	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
+-				    struct_size(pdu, data, len), pdu,
+-				    HCI_CMD_TIMEOUT);
+-	if (err)
+-		return err;
+-
+-	/* Update data if the command succeed */
+-	if (adv) {
+-		adv->adv_data_changed = false;
+-	} else {
+-		memcpy(hdev->adv_data, pdu->data, len);
+-		hdev->adv_data_len = len;
+-	}
+-
+-	return 0;
+-}
+-
+-static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
+-{
+-	struct hci_cp_le_set_adv_data cp;
+-	u8 len;
+-
+-	memset(&cp, 0, sizeof(cp));
+-
+-	len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
+-
+-	/* There's nothing to do if the data hasn't changed */
+-	if (hdev->adv_data_len == len &&
+-	    memcmp(cp.data, hdev->adv_data, len) == 0)
+-		return 0;
+-
+-	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+-	hdev->adv_data_len = len;
+-
+-	cp.length = len;
+-
+-	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
+-				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+-}
+-
+-int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
+-{
+-	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+-		return 0;
+-
+-	if (ext_adv_capable(hdev))
+-		return hci_set_ext_adv_data_sync(hdev, instance);
+-
+-	return hci_set_adv_data_sync(hdev, instance);
+-}
+-
+ int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ 				   bool force)
+ {
+@@ -1980,13 +2028,10 @@ static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
+ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
+ {
+ 	struct adv_info *adv, *n;
+-	int err = 0;
+ 
+ 	if (ext_adv_capable(hdev))
+ 		/* Remove all existing sets */
+-		err = hci_clear_adv_sets_sync(hdev, sk);
+-	if (ext_adv_capable(hdev))
+-		return err;
++		return hci_clear_adv_sets_sync(hdev, sk);
+ 
+ 	/* This is safe as long as there is no command send while the lock is
+ 	 * held.
+@@ -2014,13 +2059,11 @@ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
+ static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
+ 			       struct sock *sk)
+ {
+-	int err = 0;
++	int err;
+ 
+ 	/* If we use extended advertising, instance has to be removed first. */
+ 	if (ext_adv_capable(hdev))
+-		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
+-	if (ext_adv_capable(hdev))
+-		return err;
++		return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
+ 
+ 	/* This is safe as long as there is no command send while the lock is
+ 	 * held.
+@@ -2119,16 +2162,13 @@ int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
+ int hci_disable_advertising_sync(struct hci_dev *hdev)
+ {
+ 	u8 enable = 0x00;
+-	int err = 0;
+ 
+ 	/* If controller is not advertising we are done. */
+ 	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
+ 		return 0;
+ 
+ 	if (ext_adv_capable(hdev))
+-		err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
+-	if (ext_adv_capable(hdev))
+-		return err;
++		return hci_disable_ext_adv_instance_sync(hdev, 0x00);
+ 
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
+ 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
+@@ -2491,6 +2531,10 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev)
+ 	int err;
+ 	int old_state;
+ 
++	/* If controller is not advertising we are done. */
++	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
++		return 0;
++
+ 	/* If already been paused there is nothing to do. */
+ 	if (hdev->advertising_paused)
+ 		return 0;
+@@ -6251,6 +6295,7 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
+ 						struct hci_conn *conn)
+ {
+ 	struct hci_cp_le_set_ext_adv_params cp;
++	struct hci_rp_le_set_ext_adv_params rp;
+ 	int err;
+ 	bdaddr_t random_addr;
+ 	u8 own_addr_type;
+@@ -6292,8 +6337,12 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
+ 	if (err)
+ 		return err;
+ 
+-	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
+-				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
++	err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp);
++	if (err)
++		return err;
++
++	/* Update adv data as tx power is known now */
++	err = hci_set_ext_adv_data_sync(hdev, cp.handle);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 7664e7ba372cee..ade93532db34b5 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -1073,7 +1073,8 @@ static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
+ 	struct mgmt_mesh_tx *mesh_tx;
+ 
+ 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
+-	hci_disable_advertising_sync(hdev);
++	if (list_empty(&hdev->adv_instances))
++		hci_disable_advertising_sync(hdev);
+ 	mesh_tx = mgmt_mesh_next(hdev, NULL);
+ 
+ 	if (mesh_tx)
+@@ -2146,6 +2147,9 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data)
+ 	else
+ 		hci_dev_clear_flag(hdev, HCI_MESH);
+ 
++	hdev->le_scan_interval = __le16_to_cpu(cp->period);
++	hdev->le_scan_window = __le16_to_cpu(cp->window);
++
+ 	len -= sizeof(*cp);
+ 
+ 	/* If filters don't fit, forward all adv pkts */
+@@ -2160,6 +2164,7 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+ {
+ 	struct mgmt_cp_set_mesh *cp = data;
+ 	struct mgmt_pending_cmd *cmd;
++	__u16 period, window;
+ 	int err = 0;
+ 
+ 	bt_dev_dbg(hdev, "sock %p", sk);
+@@ -2173,6 +2178,23 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+ 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
+ 				       MGMT_STATUS_INVALID_PARAMS);
+ 
++	/* Keep allowed ranges in sync with set_scan_params() */
++	period = __le16_to_cpu(cp->period);
++
++	if (period < 0x0004 || period > 0x4000)
++		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
++				       MGMT_STATUS_INVALID_PARAMS);
++
++	window = __le16_to_cpu(cp->window);
++
++	if (window < 0x0004 || window > 0x4000)
++		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
++				       MGMT_STATUS_INVALID_PARAMS);
++
++	if (window > period)
++		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
++				       MGMT_STATUS_INVALID_PARAMS);
++
+ 	hci_dev_lock(hdev);
+ 
+ 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
+@@ -6536,6 +6558,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
+ 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ 				       MGMT_STATUS_NOT_SUPPORTED);
+ 
++	/* Keep allowed ranges in sync with set_mesh() */
+ 	interval = __le16_to_cpu(cp->interval);
+ 
+ 	if (interval < 0x0004 || interval > 0x4000)
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 8e1fbdd3bff10b..8e1d00efa62e5c 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4481,6 +4481,10 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
+ 		if (!multicast &&
+ 		    !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
+ 			return false;
++		/* reject invalid/our STA address */
++		if (!is_valid_ether_addr(hdr->addr2) ||
++		    ether_addr_equal(sdata->dev->dev_addr, hdr->addr2))
++			return false;
+ 		if (!rx->sta) {
+ 			int rate_idx;
+ 			if (status->encoding != RX_ENC_LEGACY)
+diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
+index fee772b4637c88..a7054546f52dfa 100644
+--- a/net/rose/rose_route.c
++++ b/net/rose/rose_route.c
+@@ -497,22 +497,15 @@ void rose_rt_device_down(struct net_device *dev)
+ 			t         = rose_node;
+ 			rose_node = rose_node->next;
+ 
+-			for (i = 0; i < t->count; i++) {
++			for (i = t->count - 1; i >= 0; i--) {
+ 				if (t->neighbour[i] != s)
+ 					continue;
+ 
+ 				t->count--;
+ 
+-				switch (i) {
+-				case 0:
+-					t->neighbour[0] = t->neighbour[1];
+-					fallthrough;
+-				case 1:
+-					t->neighbour[1] = t->neighbour[2];
+-					break;
+-				case 2:
+-					break;
+-				}
++				memmove(&t->neighbour[i], &t->neighbour[i + 1],
++					sizeof(t->neighbour[0]) *
++						(t->count - i));
+ 			}
+ 
+ 			if (t->count <= 0)
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 518f52f65a49d7..26378eac1bd08b 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -779,15 +779,12 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
+ 
+ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
+ {
+-	bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
+ 	const struct Qdisc_class_ops *cops;
+ 	unsigned long cl;
+ 	u32 parentid;
+ 	bool notify;
+ 	int drops;
+ 
+-	if (n == 0 && len == 0)
+-		return;
+ 	drops = max_t(int, n, 0);
+ 	rcu_read_lock();
+ 	while ((parentid = sch->parent)) {
+@@ -796,17 +793,8 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
+ 
+ 		if (sch->flags & TCQ_F_NOPARENT)
+ 			break;
+-		/* Notify parent qdisc only if child qdisc becomes empty.
+-		 *
+-		 * If child was empty even before update then backlog
+-		 * counter is screwed and we skip notification because
+-		 * parent class is already passive.
+-		 *
+-		 * If the original child was offloaded then it is allowed
+-		 * to be seem as empty, so the parent is notified anyway.
+-		 */
+-		notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
+-						       !qdisc_is_offloaded);
++		/* Notify parent qdisc only if child qdisc becomes empty. */
++		notify = !sch->q.qlen;
+ 		/* TODO: perform the search on a per txq basis */
+ 		sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
+ 		if (sch == NULL) {
+@@ -815,6 +803,9 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
+ 		}
+ 		cops = sch->ops->cl_ops;
+ 		if (notify && cops->qlen_notify) {
++			/* Note that qlen_notify must be idempotent as it may get called
++			 * multiple times.
++			 */
+ 			cl = cops->find(sch, parentid);
+ 			cops->qlen_notify(sch, cl);
+ 		}
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index 7ce3721c06ca5b..eadc00410ebc51 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -630,7 +630,7 @@ static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
+ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
+ 					  const char *name)
+ {
+-	struct qstr q = QSTR_INIT(name, strlen(name));
++	struct qstr q = QSTR(name);
+ 	struct dentry *dentry = d_hash_and_lookup(parent, &q);
+ 	if (!dentry) {
+ 		dentry = d_alloc(parent, &q);
+@@ -1190,8 +1190,7 @@ static const struct rpc_filelist files[] = {
+ struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
+ 			       const unsigned char *dir_name)
+ {
+-	struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name));
+-	return d_hash_and_lookup(sb->s_root, &dir);
++	return d_hash_and_lookup(sb->s_root, &QSTR(dir_name));
+ }
+ EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
+ 
+@@ -1300,11 +1299,9 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
+ 	struct dentry *gssd_dentry;
+ 	struct dentry *clnt_dentry = NULL;
+ 	struct dentry *pipe_dentry = NULL;
+-	struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name,
+-				  strlen(files[RPCAUTH_gssd].name));
+ 
+ 	/* We should never get this far if "gssd" doesn't exist */
+-	gssd_dentry = d_hash_and_lookup(root, &q);
++	gssd_dentry = d_hash_and_lookup(root, &QSTR(files[RPCAUTH_gssd].name));
+ 	if (!gssd_dentry)
+ 		return ERR_PTR(-ENOENT);
+ 
+@@ -1314,9 +1311,8 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
+ 		goto out;
+ 	}
+ 
+-	q.name = gssd_dummy_clnt_dir[0].name;
+-	q.len = strlen(gssd_dummy_clnt_dir[0].name);
+-	clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
++	clnt_dentry = d_hash_and_lookup(gssd_dentry,
++					&QSTR(gssd_dummy_clnt_dir[0].name));
+ 	if (!clnt_dentry) {
+ 		__rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
+ 		pipe_dentry = ERR_PTR(-ENOENT);
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index b370070194fa4a..7eccd6708d6649 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -119,6 +119,8 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
+ 			   u16 proto,
+ 			   struct vmci_handle handle)
+ {
++	memset(pkt, 0, sizeof(*pkt));
++
+ 	/* We register the stream control handler as an any cid handle so we
+ 	 * must always send from a source address of VMADDR_CID_ANY
+ 	 */
+@@ -131,8 +133,6 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
+ 	pkt->type = type;
+ 	pkt->src_port = src->svm_port;
+ 	pkt->dst_port = dst->svm_port;
+-	memset(&pkt->proto, 0, sizeof(pkt->proto));
+-	memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
+ 
+ 	switch (pkt->type) {
+ 	case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 88850405ded929..f36332e64c4d1a 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1884,11 +1884,17 @@ static int security_compute_sid(u32 ssid,
+ 			goto out_unlock;
+ 	}
+ 	/* Obtain the sid for the context. */
+-	rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+-	if (rc == -ESTALE) {
+-		rcu_read_unlock();
+-		context_destroy(&newcontext);
+-		goto retry;
++	if (context_cmp(scontext, &newcontext))
++		*out_sid = ssid;
++	else if (context_cmp(tcontext, &newcontext))
++		*out_sid = tsid;
++	else {
++		rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
++		if (rc == -ESTALE) {
++			rcu_read_unlock();
++			context_destroy(&newcontext);
++			goto retry;
++		}
+ 	}
+ out_unlock:
+ 	rcu_read_unlock();
+diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
+index 74db115250030e..5a083eecaa6b99 100644
+--- a/sound/isa/sb/sb16_main.c
++++ b/sound/isa/sb/sb16_main.c
+@@ -703,6 +703,9 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct
+ 	unsigned char nval, oval;
+ 	int change;
+ 	
++	if (chip->mode & (SB_MODE_PLAYBACK | SB_MODE_CAPTURE))
++		return -EBUSY;
++
+ 	nval = ucontrol->value.enumerated.item[0];
+ 	if (nval > 2)
+ 		return -EINVAL;
+@@ -711,6 +714,10 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct
+ 	change = nval != oval;
+ 	snd_sb16_set_dma_mode(chip, nval);
+ 	spin_unlock_irqrestore(&chip->reg_lock, flags);
++	if (change) {
++		snd_dma_disable(chip->dma8);
++		snd_dma_disable(chip->dma16);
++	}
+ 	return change;
+ }
+ 
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index b27966f82c8b65..723cb7bc128516 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -451,6 +451,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VF"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -514,6 +521,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb2xxx"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
+index 4326555aac032d..e8fbe8a399f6d7 100644
+--- a/sound/soc/codecs/tas2764.c
++++ b/sound/soc/codecs/tas2764.c
+@@ -14,6 +14,7 @@
+ #include <linux/regulator/consumer.h>
+ #include <linux/regmap.h>
+ #include <linux/of.h>
++#include <linux/of_device.h>
+ #include <linux/slab.h>
+ #include <sound/soc.h>
+ #include <sound/pcm.h>
+@@ -23,6 +24,11 @@
+ 
+ #include "tas2764.h"
+ 
++enum tas2764_devid {
++	DEVID_TAS2764  = 0,
++	DEVID_SN012776 = 1
++};
++
+ struct tas2764_priv {
+ 	struct snd_soc_component *component;
+ 	struct gpio_desc *reset_gpio;
+@@ -30,7 +36,8 @@ struct tas2764_priv {
+ 	struct regmap *regmap;
+ 	struct device *dev;
+ 	int irq;
+-	
++	enum tas2764_devid devid;
++
+ 	int v_sense_slot;
+ 	int i_sense_slot;
+ 
+@@ -525,10 +532,18 @@ static struct snd_soc_dai_driver tas2764_dai_driver[] = {
+ 	},
+ };
+ 
++static uint8_t sn012776_bop_presets[] = {
++	0x01, 0x32, 0x02, 0x22, 0x83, 0x2d, 0x80, 0x02, 0x06,
++	0x32, 0x46, 0x30, 0x02, 0x06, 0x38, 0x40, 0x30, 0x02,
++	0x06, 0x3e, 0x37, 0x30, 0xff, 0xe6
++};
++
++static const struct regmap_config tas2764_i2c_regmap;
++
+ static int tas2764_codec_probe(struct snd_soc_component *component)
+ {
+ 	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
+-	int ret;
++	int ret, i;
+ 
+ 	tas2764->component = component;
+ 
+@@ -538,6 +553,7 @@ static int tas2764_codec_probe(struct snd_soc_component *component)
+ 	}
+ 
+ 	tas2764_reset(tas2764);
++	regmap_reinit_cache(tas2764->regmap, &tas2764_i2c_regmap);
+ 
+ 	if (tas2764->irq) {
+ 		ret = snd_soc_component_write(tas2764->component, TAS2764_INT_MASK0, 0x00);
+@@ -577,6 +593,27 @@ static int tas2764_codec_probe(struct snd_soc_component *component)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	switch (tas2764->devid) {
++	case DEVID_SN012776:
++		ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL,
++					TAS2764_PWR_CTRL_BOP_SRC,
++					TAS2764_PWR_CTRL_BOP_SRC);
++		if (ret < 0)
++			return ret;
++
++		for (i = 0; i < ARRAY_SIZE(sn012776_bop_presets); i++) {
++			ret = snd_soc_component_write(component,
++						TAS2764_BOP_CFG0 + i,
++						sn012776_bop_presets[i]);
++
++			if (ret < 0)
++				return ret;
++		}
++		break;
++	default:
++		break;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -708,6 +745,8 @@ static int tas2764_i2c_probe(struct i2c_client *client)
+ 	if (!tas2764)
+ 		return -ENOMEM;
+ 
++	tas2764->devid = (enum tas2764_devid)of_device_get_match_data(&client->dev);
++
+ 	tas2764->dev = &client->dev;
+ 	tas2764->irq = client->irq;
+ 	i2c_set_clientdata(client, tas2764);
+@@ -744,7 +783,8 @@ MODULE_DEVICE_TABLE(i2c, tas2764_i2c_id);
+ 
+ #if defined(CONFIG_OF)
+ static const struct of_device_id tas2764_of_match[] = {
+-	{ .compatible = "ti,tas2764" },
++	{ .compatible = "ti,tas2764",  .data = (void *)DEVID_TAS2764 },
++	{ .compatible = "ti,sn012776", .data = (void *)DEVID_SN012776 },
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, tas2764_of_match);
+diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h
+index 9490f2686e3891..69c0f91cb42398 100644
+--- a/sound/soc/codecs/tas2764.h
++++ b/sound/soc/codecs/tas2764.h
+@@ -29,6 +29,7 @@
+ #define TAS2764_PWR_CTRL_ACTIVE		0x0
+ #define TAS2764_PWR_CTRL_MUTE		BIT(0)
+ #define TAS2764_PWR_CTRL_SHUTDOWN	BIT(1)
++#define TAS2764_PWR_CTRL_BOP_SRC	BIT(7)
+ 
+ #define TAS2764_VSENSE_POWER_EN		3
+ #define TAS2764_ISENSE_POWER_EN		4
+@@ -116,4 +117,6 @@
+ #define TAS2764_INT_CLK_CFG             TAS2764_REG(0x0, 0x5c)
+ #define TAS2764_INT_CLK_CFG_IRQZ_CLR    BIT(2)
+ 
++#define TAS2764_BOP_CFG0                TAS2764_REG(0X0, 0x1d)
++
+ #endif /* __TAS2764__ */
+diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py
+index e975c4331a7c2a..2019550a1b692e 100644
+--- a/tools/testing/kunit/qemu_configs/sparc.py
++++ b/tools/testing/kunit/qemu_configs/sparc.py
+@@ -2,8 +2,11 @@ from ..qemu_config import QemuArchParams
+ 
+ QEMU_ARCH = QemuArchParams(linux_arch='sparc',
+ 			   kconfig='''
+-CONFIG_SERIAL_8250=y
+-CONFIG_SERIAL_8250_CONSOLE=y''',
++CONFIG_KUNIT_FAULT_TEST=n
++CONFIG_SPARC32=y
++CONFIG_SERIAL_SUNZILOG=y
++CONFIG_SERIAL_SUNZILOG_CONSOLE=y
++''',
+ 			   qemu_arch='sparc',
+ 			   kernel_path='arch/sparc/boot/zImage',
+ 			   kernel_command_line='console=ttyS0 mem=256M',
+diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
+index 06f252733660a1..a81c22d520070d 100644
+--- a/tools/testing/selftests/iommu/iommufd.c
++++ b/tools/testing/selftests/iommu/iommufd.c
+@@ -1748,6 +1748,7 @@ FIXTURE_VARIANT(iommufd_dirty_tracking)
+ 
+ FIXTURE_SETUP(iommufd_dirty_tracking)
+ {
++	size_t mmap_buffer_size;
+ 	unsigned long size;
+ 	int mmap_flags;
+ 	void *vrc;
+@@ -1762,22 +1763,33 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
+ 	self->fd = open("/dev/iommu", O_RDWR);
+ 	ASSERT_NE(-1, self->fd);
+ 
+-	rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
+-	if (rc || !self->buffer) {
+-		SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
+-			   variant->buffer_size, rc);
+-	}
+-
+ 	mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
++	mmap_buffer_size = variant->buffer_size;
+ 	if (variant->hugepages) {
+ 		/*
+ 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
+ 		 * not available.
+ 		 */
+ 		mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
++
++		/*
++		 * Allocation must be aligned to the HUGEPAGE_SIZE, because the
++		 * following mmap() will automatically align the length to be a
++		 * multiple of the underlying huge page size. Failing to do the
++		 * same at this allocation will result in a memory overwrite by
++		 * the mmap().
++		 */
++		if (mmap_buffer_size < HUGEPAGE_SIZE)
++			mmap_buffer_size = HUGEPAGE_SIZE;
++	}
++
++	rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
++	if (rc || !self->buffer) {
++		SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
++			   mmap_buffer_size, rc);
+ 	}
+ 	assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
+-	vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
++	vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
+ 		   mmap_flags, -1, 0);
+ 	assert(vrc == self->buffer);
+ 
+@@ -1806,8 +1818,8 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
+ 
+ FIXTURE_TEARDOWN(iommufd_dirty_tracking)
+ {
+-	munmap(self->buffer, variant->buffer_size);
+-	munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
++	free(self->buffer);
++	free(self->bitmap);
+ 	teardown_iommufd(self->fd, _metadata);
+ }
+ 


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-07-06 13:42 Arisu Tachibana
  0 siblings, 0 replies; 82+ messages in thread
From: Arisu Tachibana @ 2025-07-06 13:42 UTC (permalink / raw
  To: gentoo-commits

commit:     3dfd79410f86ab8ddccf7faf222d0c9fb40afd7e
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Sun Jul  6 13:22:44 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Sun Jul  6 13:22:44 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3dfd7941

Linux patch 6.12.36

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |     6 +-
 1035_linux-6.12.36.patch | 11930 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11935 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index baad7561..eda6f446 100644
--- a/0000_README
+++ b/0000_README
@@ -43,7 +43,7 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
-Patch:  1000_linux-6.12.1.patch
+Patch:  1001_linux-6.12.1.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.1
 
@@ -183,6 +183,10 @@ Patch:  1034_linux-6.12.35.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.35
 
+Patch:  1035_linux-6.12.36.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.36
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1035_linux-6.12.36.patch b/1035_linux-6.12.36.patch
new file mode 100644
index 00000000..e7b42f06
--- /dev/null
+++ b/1035_linux-6.12.36.patch
@@ -0,0 +1,11930 @@
+diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
+index 692aa05500fd53..6ba0325039be21 100644
+--- a/Documentation/devicetree/bindings/serial/8250.yaml
++++ b/Documentation/devicetree/bindings/serial/8250.yaml
+@@ -45,7 +45,7 @@ allOf:
+                   - ns16550
+                   - ns16550a
+     then:
+-      anyOf:
++      oneOf:
+         - required: [ clock-frequency ]
+         - required: [ clocks ]
+ 
+diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
+index c5579a5412fc9a..043f205bc1ae7d 100644
+--- a/Documentation/netlink/specs/tc.yaml
++++ b/Documentation/netlink/specs/tc.yaml
+@@ -227,7 +227,7 @@ definitions:
+         type: u8
+         doc: log(P_max / (qth-max - qth-min))
+       -
+-        name: Scell_log
++        name: Scell-log
+         type: u8
+         doc: cell size for idle damping
+       -
+@@ -248,7 +248,7 @@ definitions:
+         name: DPs
+         type: u32
+       -
+-        name: def_DP
++        name: def-DP
+         type: u32
+       -
+         name: grio
+diff --git a/Makefile b/Makefile
+index 535df76f6f78c8..7012820523fff4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 35
++SUBLEVEL = 36
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
+index 11d99d8b34a2be..66d010a9e8c31d 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
+@@ -227,6 +227,16 @@ vcc5v0_usb: vcc5v0-usb {
+ 		vin-supply = <&vcc12v_dcin>;
+ 	};
+ 
++	vcca_0v9: vcca-0v9 {
++		compatible = "regulator-fixed";
++		regulator-name = "vcca_0v9";
++		regulator-always-on;
++		regulator-boot-on;
++		regulator-min-microvolt = <900000>;
++		regulator-max-microvolt = <900000>;
++		vin-supply = <&vcc3v3_sys>;
++	};
++
+ 	vdd_log: vdd-log {
+ 		compatible = "pwm-regulator";
+ 		pwms = <&pwm2 0 25000 1>;
+@@ -312,6 +322,8 @@ &gmac {
+ };
+ 
+ &hdmi {
++	avdd-0v9-supply = <&vcca_0v9>;
++	avdd-1v8-supply = <&vcc1v8_dvp>;
+ 	ddc-i2c-bus = <&i2c3>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&hdmi_cec>;
+diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
+index ebbce134917ccd..6efa95ad033ab6 100644
+--- a/arch/riscv/include/asm/cmpxchg.h
++++ b/arch/riscv/include/asm/cmpxchg.h
+@@ -169,7 +169,7 @@
+ 		break;							\
+ 	case 4:								\
+ 		__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append,	\
+-				__ret, __ptr, (long), __old, __new);	\
++				__ret, __ptr, (long)(int)(long), __old, __new);	\
+ 		break;							\
+ 	case 8:								\
+ 		__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append,	\
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 479550cdb440f9..03881122506a75 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -916,7 +916,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+  */
+ #ifdef CONFIG_64BIT
+ #define TASK_SIZE_64	(PGDIR_SIZE * PTRS_PER_PGD / 2)
+-#define TASK_SIZE_MAX	LONG_MAX
+ 
+ #ifdef CONFIG_COMPAT
+ #define TASK_SIZE_32	(_AC(0x80000000, UL) - PAGE_SIZE)
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index 36ac96eac9c9e4..d14bfc23e315b0 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -429,7 +429,7 @@ int handle_misaligned_load(struct pt_regs *regs)
+ 
+ 	val.data_u64 = 0;
+ 	if (user_mode(regs)) {
+-		if (copy_from_user_nofault(&val, (u8 __user *)addr, len))
++		if (copy_from_user(&val, (u8 __user *)addr, len))
+ 			return -1;
+ 	} else {
+ 		memcpy(&val, (u8 *)addr, len);
+@@ -530,7 +530,7 @@ int handle_misaligned_store(struct pt_regs *regs)
+ 		return -EOPNOTSUPP;
+ 
+ 	if (user_mode(regs)) {
+-		if (copy_to_user_nofault((u8 __user *)addr, &val, len))
++		if (copy_to_user((u8 __user *)addr, &val, len))
+ 			return -1;
+ 	} else {
+ 		memcpy((u8 *)addr, &val, len);
+diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
+index b8167272988723..b2e4b81763f888 100644
+--- a/arch/riscv/mm/cacheflush.c
++++ b/arch/riscv/mm/cacheflush.c
+@@ -24,7 +24,20 @@ void flush_icache_all(void)
+ 
+ 	if (num_online_cpus() < 2)
+ 		return;
+-	else if (riscv_use_sbi_for_rfence())
++
++	/*
++	 * Make sure all previous writes to the D$ are ordered before making
++	 * the IPI. The RISC-V spec states that a hart must execute a data fence
++	 * before triggering a remote fence.i in order to make the modification
++	 * visable for remote harts.
++	 *
++	 * IPIs on RISC-V are triggered by MMIO writes to either CLINT or
++	 * S-IMSIC, so the fence ensures previous data writes "happen before"
++	 * the MMIO.
++	 */
++	RISCV_FENCE(w, o);
++
++	if (riscv_use_sbi_for_rfence())
+ 		sbi_remote_fence_i(NULL);
+ 	else
+ 		on_each_cpu(ipi_remote_fence_i, NULL, 1);
+diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c
+index b4f8b8e605644a..592b899820d649 100644
+--- a/arch/um/drivers/ubd_user.c
++++ b/arch/um/drivers/ubd_user.c
+@@ -41,7 +41,7 @@ int start_io_thread(unsigned long sp, int *fd_out)
+ 	*fd_out = fds[1];
+ 
+ 	err = os_set_fd_block(*fd_out, 0);
+-	err = os_set_fd_block(kernel_fd, 0);
++	err |= os_set_fd_block(kernel_fd, 0);
+ 	if (err) {
+ 		printk("start_io_thread - failed to set nonblocking I/O.\n");
+ 		goto out_close;
+diff --git a/arch/um/include/asm/asm-prototypes.h b/arch/um/include/asm/asm-prototypes.h
+index 5898a26daa0dd4..408b31d591279d 100644
+--- a/arch/um/include/asm/asm-prototypes.h
++++ b/arch/um/include/asm/asm-prototypes.h
+@@ -1 +1,6 @@
+ #include <asm-generic/asm-prototypes.h>
++#include <asm/checksum.h>
++
++#ifdef CONFIG_UML_X86
++extern void cmpxchg8b_emu(void);
++#endif
+diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
+index 97c8df9c44017c..9077bdb26cc35f 100644
+--- a/arch/um/kernel/trap.c
++++ b/arch/um/kernel/trap.c
+@@ -17,6 +17,122 @@
+ #include <os.h>
+ #include <skas.h>
+ 
++/*
++ * NOTE: UML does not have exception tables. As such, this is almost a copy
++ * of the code in mm/memory.c, only adjusting the logic to simply check whether
++ * we are coming from the kernel instead of doing an additional lookup in the
++ * exception table.
++ * We can do this simplification because we never get here if the exception was
++ * fixable.
++ */
++static inline bool get_mmap_lock_carefully(struct mm_struct *mm, bool is_user)
++{
++	if (likely(mmap_read_trylock(mm)))
++		return true;
++
++	if (!is_user)
++		return false;
++
++	return !mmap_read_lock_killable(mm);
++}
++
++static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
++{
++	/*
++	 * We don't have this operation yet.
++	 *
++	 * It should be easy enough to do: it's basically a
++	 *    atomic_long_try_cmpxchg_acquire()
++	 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
++	 * it also needs the proper lockdep magic etc.
++	 */
++	return false;
++}
++
++static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, bool is_user)
++{
++	mmap_read_unlock(mm);
++	if (!is_user)
++		return false;
++
++	return !mmap_write_lock_killable(mm);
++}
++
++/*
++ * Helper for page fault handling.
++ *
++ * This is kind of equivalend to "mmap_read_lock()" followed
++ * by "find_extend_vma()", except it's a lot more careful about
++ * the locking (and will drop the lock on failure).
++ *
++ * For example, if we have a kernel bug that causes a page
++ * fault, we don't want to just use mmap_read_lock() to get
++ * the mm lock, because that would deadlock if the bug were
++ * to happen while we're holding the mm lock for writing.
++ *
++ * So this checks the exception tables on kernel faults in
++ * order to only do this all for instructions that are actually
++ * expected to fault.
++ *
++ * We can also actually take the mm lock for writing if we
++ * need to extend the vma, which helps the VM layer a lot.
++ */
++static struct vm_area_struct *
++um_lock_mm_and_find_vma(struct mm_struct *mm,
++			unsigned long addr, bool is_user)
++{
++	struct vm_area_struct *vma;
++
++	if (!get_mmap_lock_carefully(mm, is_user))
++		return NULL;
++
++	vma = find_vma(mm, addr);
++	if (likely(vma && (vma->vm_start <= addr)))
++		return vma;
++
++	/*
++	 * Well, dang. We might still be successful, but only
++	 * if we can extend a vma to do so.
++	 */
++	if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
++		mmap_read_unlock(mm);
++		return NULL;
++	}
++
++	/*
++	 * We can try to upgrade the mmap lock atomically,
++	 * in which case we can continue to use the vma
++	 * we already looked up.
++	 *
++	 * Otherwise we'll have to drop the mmap lock and
++	 * re-take it, and also look up the vma again,
++	 * re-checking it.
++	 */
++	if (!mmap_upgrade_trylock(mm)) {
++		if (!upgrade_mmap_lock_carefully(mm, is_user))
++			return NULL;
++
++		vma = find_vma(mm, addr);
++		if (!vma)
++			goto fail;
++		if (vma->vm_start <= addr)
++			goto success;
++		if (!(vma->vm_flags & VM_GROWSDOWN))
++			goto fail;
++	}
++
++	if (expand_stack_locked(vma, addr))
++		goto fail;
++
++success:
++	mmap_write_downgrade(mm);
++	return vma;
++
++fail:
++	mmap_write_unlock(mm);
++	return NULL;
++}
++
+ /*
+  * Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
+  * segv().
+@@ -43,21 +159,10 @@ int handle_page_fault(unsigned long address, unsigned long ip,
+ 	if (is_user)
+ 		flags |= FAULT_FLAG_USER;
+ retry:
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, address);
+-	if (!vma)
+-		goto out;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto out;
+-	if (is_user && !ARCH_IS_STACKGROW(address))
+-		goto out;
+-	vma = expand_stack(mm, address);
++	vma = um_lock_mm_and_find_vma(mm, address, is_user);
+ 	if (!vma)
+ 		goto out_nosemaphore;
+ 
+-good_area:
+ 	*code_out = SEGV_ACCERR;
+ 	if (is_write) {
+ 		if (!(vma->vm_flags & VM_WRITE))
+diff --git a/arch/x86/include/uapi/asm/debugreg.h b/arch/x86/include/uapi/asm/debugreg.h
+index 0007ba077c0c2b..41da492dfb01f0 100644
+--- a/arch/x86/include/uapi/asm/debugreg.h
++++ b/arch/x86/include/uapi/asm/debugreg.h
+@@ -15,7 +15,26 @@
+    which debugging register was responsible for the trap.  The other bits
+    are either reserved or not of interest to us. */
+ 
+-/* Define reserved bits in DR6 which are always set to 1 */
++/*
++ * Define bits in DR6 which are set to 1 by default.
++ *
++ * This is also the DR6 architectural value following Power-up, Reset or INIT.
++ *
++ * Note, with the introduction of Bus Lock Detection (BLD) and Restricted
++ * Transactional Memory (RTM), the DR6 register has been modified:
++ *
++ * 1) BLD flag (bit 11) is no longer reserved to 1 if the CPU supports
++ *    Bus Lock Detection.  The assertion of a bus lock could clear it.
++ *
++ * 2) RTM flag (bit 16) is no longer reserved to 1 if the CPU supports
++ *    restricted transactional memory.  #DB occurred inside an RTM region
++ *    could clear it.
++ *
++ * Apparently, DR6.BLD and DR6.RTM are active low bits.
++ *
++ * As a result, DR6_RESERVED is an incorrect name now, but it is kept for
++ * compatibility.
++ */
+ #define DR6_RESERVED	(0xFFFF0FF0)
+ 
+ #define DR_TRAP0	(0x1)		/* db0 */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index b4877544552369..a11c61fd7d52cf 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -2145,20 +2145,16 @@ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
+ 
+ #endif	/* CONFIG_X86_64 */
+ 
+-/*
+- * Clear all 6 debug registers:
+- */
+-static void clear_all_debug_regs(void)
++static void initialize_debug_regs(void)
+ {
+-	int i;
+-
+-	for (i = 0; i < 8; i++) {
+-		/* Ignore db4, db5 */
+-		if ((i == 4) || (i == 5))
+-			continue;
+-
+-		set_debugreg(0, i);
+-	}
++	/* Control register first -- to make sure everything is disabled. */
++	set_debugreg(0, 7);
++	set_debugreg(DR6_RESERVED, 6);
++	/* dr5 and dr4 don't exist */
++	set_debugreg(0, 3);
++	set_debugreg(0, 2);
++	set_debugreg(0, 1);
++	set_debugreg(0, 0);
+ }
+ 
+ #ifdef CONFIG_KGDB
+@@ -2319,7 +2315,7 @@ void cpu_init(void)
+ 
+ 	load_mm_ldt(&init_mm);
+ 
+-	clear_all_debug_regs();
++	initialize_debug_regs();
+ 	dbg_restore_debug_regs();
+ 
+ 	doublefault_init_cpu_tss();
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 8f62e0666dea51..8abe60919e2f9a 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -119,7 +119,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
+ {
+ 	struct xregs_state __user *x = buf;
+ 	struct _fpx_sw_bytes sw_bytes = {};
+-	u32 xfeatures;
+ 	int err;
+ 
+ 	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
+@@ -132,12 +131,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
+ 	err |= __put_user(FP_XSTATE_MAGIC2,
+ 			  (__u32 __user *)(buf + fpstate->user_size));
+ 
+-	/*
+-	 * Read the xfeatures which we copied (directly from the cpu or
+-	 * from the state in task struct) to the user buffers.
+-	 */
+-	err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
+-
+ 	/*
+ 	 * For legacy compatible, we always set FP/SSE bits in the bit
+ 	 * vector while saving the state to the user context. This will
+@@ -149,9 +142,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
+ 	 * header as well as change any contents in the memory layout.
+ 	 * xrestore as part of sigreturn will capture all the changes.
+ 	 */
+-	xfeatures |= XFEATURE_MASK_FPSSE;
+-
+-	err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
++	err |= set_xfeature_in_sigframe(x, XFEATURE_MASK_FPSSE);
+ 
+ 	return !err;
+ }
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index aa16f1a1bbcf17..f7d8f3d73599e2 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -69,21 +69,31 @@ static inline u64 xfeatures_mask_independent(void)
+ 	return fpu_kernel_cfg.independent_features;
+ }
+ 
++static inline int set_xfeature_in_sigframe(struct xregs_state __user *xbuf, u64 mask)
++{
++	u64 xfeatures;
++	int err;
++
++	/* Read the xfeatures value already saved in the user buffer */
++	err  = __get_user(xfeatures, &xbuf->header.xfeatures);
++	xfeatures |= mask;
++	err |= __put_user(xfeatures, &xbuf->header.xfeatures);
++
++	return err;
++}
++
+ /*
+  * Update the value of PKRU register that was already pushed onto the signal frame.
+  */
+-static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
++static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
+ {
+-	u64 xstate_bv;
+ 	int err;
+ 
+ 	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
+ 		return 0;
+ 
+ 	/* Mark PKRU as in-use so that it is restored correctly. */
+-	xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
+-
+-	err =  __put_user(xstate_bv, &buf->header.xfeatures);
++	err = set_xfeature_in_sigframe(buf, XFEATURE_MASK_PKRU);
+ 	if (err)
+ 		return err;
+ 
+@@ -304,7 +314,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr
+ 	clac();
+ 
+ 	if (!err)
+-		err = update_pkru_in_sigframe(buf, mask, pkru);
++		err = update_pkru_in_sigframe(buf, pkru);
+ 
+ 	return err;
+ }
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index b18fc7539b8d7b..243f3bc9b4dc53 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -977,24 +977,32 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
+ #endif
+ }
+ 
+-static __always_inline unsigned long debug_read_clear_dr6(void)
++static __always_inline unsigned long debug_read_reset_dr6(void)
+ {
+ 	unsigned long dr6;
+ 
++	get_debugreg(dr6, 6);
++	dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
++
+ 	/*
+ 	 * The Intel SDM says:
+ 	 *
+-	 *   Certain debug exceptions may clear bits 0-3. The remaining
+-	 *   contents of the DR6 register are never cleared by the
+-	 *   processor. To avoid confusion in identifying debug
+-	 *   exceptions, debug handlers should clear the register before
+-	 *   returning to the interrupted task.
++	 *   Certain debug exceptions may clear bits 0-3 of DR6.
++	 *
++	 *   BLD induced #DB clears DR6.BLD and any other debug
++	 *   exception doesn't modify DR6.BLD.
+ 	 *
+-	 * Keep it simple: clear DR6 immediately.
++	 *   RTM induced #DB clears DR6.RTM and any other debug
++	 *   exception sets DR6.RTM.
++	 *
++	 *   To avoid confusion in identifying debug exceptions,
++	 *   debug handlers should set DR6.BLD and DR6.RTM, and
++	 *   clear other DR6 bits before returning.
++	 *
++	 * Keep it simple: write DR6 with its architectural reset
++	 * value 0xFFFF0FF0, defined as DR6_RESERVED, immediately.
+ 	 */
+-	get_debugreg(dr6, 6);
+ 	set_debugreg(DR6_RESERVED, 6);
+-	dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
+ 
+ 	return dr6;
+ }
+@@ -1194,13 +1202,13 @@ static noinstr void exc_debug_user(struct pt_regs *regs, unsigned long dr6)
+ /* IST stack entry */
+ DEFINE_IDTENTRY_DEBUG(exc_debug)
+ {
+-	exc_debug_kernel(regs, debug_read_clear_dr6());
++	exc_debug_kernel(regs, debug_read_reset_dr6());
+ }
+ 
+ /* User entry, runs on regular task stack */
+ DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
+ {
+-	exc_debug_user(regs, debug_read_clear_dr6());
++	exc_debug_user(regs, debug_read_reset_dr6());
+ }
+ 
+ #ifdef CONFIG_X86_FRED
+@@ -1219,7 +1227,7 @@ DEFINE_FREDENTRY_DEBUG(exc_debug)
+ {
+ 	/*
+ 	 * FRED #DB stores DR6 on the stack in the format which
+-	 * debug_read_clear_dr6() returns for the IDT entry points.
++	 * debug_read_reset_dr6() returns for the IDT entry points.
+ 	 */
+ 	unsigned long dr6 = fred_event_data(regs);
+ 
+@@ -1234,7 +1242,7 @@ DEFINE_FREDENTRY_DEBUG(exc_debug)
+ /* 32 bit does not have separate entry points. */
+ DEFINE_IDTENTRY_RAW(exc_debug)
+ {
+-	unsigned long dr6 = debug_read_clear_dr6();
++	unsigned long dr6 = debug_read_reset_dr6();
+ 
+ 	if (user_mode(regs))
+ 		exc_debug_user(regs, dr6);
+diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h
+index b07824500363fa..ddc144657efad9 100644
+--- a/arch/x86/um/asm/checksum.h
++++ b/arch/x86/um/asm/checksum.h
+@@ -20,6 +20,9 @@
+  */
+ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+ 
++/* Do not call this directly. Declared for export type visibility. */
++extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
++
+ /**
+  * csum_fold - Fold and invert a 32bit checksum.
+  * sum: 32bit unfolded sum
+diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
+index 05a0d99ce95c4b..1edf6e56440261 100644
+--- a/drivers/accel/ivpu/ivpu_debugfs.c
++++ b/drivers/accel/ivpu/ivpu_debugfs.c
+@@ -423,6 +423,88 @@ static int dct_active_set(void *data, u64 active_percent)
+ 
+ DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+ 
++static int priority_bands_show(struct seq_file *s, void *v)
++{
++	struct ivpu_device *vdev = s->private;
++	struct ivpu_hw_info *hw = vdev->hw;
++
++	for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
++	     band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
++		switch (band) {
++		case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
++			seq_puts(s, "Idle:     ");
++			break;
++
++		case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
++			seq_puts(s, "Normal:   ");
++			break;
++
++		case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
++			seq_puts(s, "Focus:    ");
++			break;
++
++		case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
++			seq_puts(s, "Realtime: ");
++			break;
++		}
++
++		seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
++			   hw->hws.grace_period[band], hw->hws.process_grace_period[band],
++			   hw->hws.process_quantum[band]);
++	}
++
++	return 0;
++}
++
++static int priority_bands_fops_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, priority_bands_show, inode->i_private);
++}
++
++static ssize_t
++priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
++{
++	struct seq_file *s = file->private_data;
++	struct ivpu_device *vdev = s->private;
++	char buf[64];
++	u32 grace_period;
++	u32 process_grace_period;
++	u32 process_quantum;
++	u32 band;
++	int ret;
++
++	if (size >= sizeof(buf))
++		return -EINVAL;
++
++	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
++	if (ret < 0)
++		return ret;
++
++	buf[size] = '\0';
++	ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
++		     &process_quantum);
++	if (ret != 4)
++		return -EINVAL;
++
++	if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
++		return -EINVAL;
++
++	vdev->hw->hws.grace_period[band] = grace_period;
++	vdev->hw->hws.process_grace_period[band] = process_grace_period;
++	vdev->hw->hws.process_quantum[band] = process_quantum;
++
++	return size;
++}
++
++static const struct file_operations ivpu_hws_priority_bands_fops = {
++	.owner = THIS_MODULE,
++	.open = priority_bands_fops_open,
++	.write = priority_bands_fops_write,
++	.read = seq_read,
++	.llseek = seq_lseek,
++	.release = single_release,
++};
++
+ void ivpu_debugfs_init(struct ivpu_device *vdev)
+ {
+ 	struct dentry *debugfs_root = vdev->drm.debugfs_root;
+@@ -445,6 +527,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
+ 			    &fw_trace_hw_comp_mask_fops);
+ 	debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
+ 			    &fw_trace_level_fops);
++	debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
++			    &ivpu_hws_priority_bands_fops);
+ 
+ 	debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
+ 			    &ivpu_reset_engine_fops);
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 67d56a944d5495..00208c4a658073 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -102,6 +102,8 @@ static void file_priv_release(struct kref *ref)
+ 	pm_runtime_get_sync(vdev->drm.dev);
+ 	mutex_lock(&vdev->context_list_lock);
+ 	file_priv_unbind(vdev, file_priv);
++	drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
++	xa_destroy(&file_priv->cmdq_xa);
+ 	mutex_unlock(&vdev->context_list_lock);
+ 	pm_runtime_put_autosuspend(vdev->drm.dev);
+ 
+@@ -261,6 +263,10 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
+ 	file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+ 	file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
+ 
++	xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
++	file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
++	file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
++
+ 	mutex_unlock(&vdev->context_list_lock);
+ 	drm_dev_exit(idx);
+ 
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index 1fe6a3bd4e36b7..f2ba3ed8b3fc52 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -50,11 +50,11 @@
+ #define IVPU_JOB_ID_JOB_MASK		GENMASK(7, 0)
+ #define IVPU_JOB_ID_CONTEXT_MASK	GENMASK(31, 8)
+ 
+-#define IVPU_NUM_ENGINES       2
+ #define IVPU_NUM_PRIORITIES    4
+-#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
++#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
+ 
+-#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority))
++#define IVPU_CMDQ_MIN_ID 1
++#define IVPU_CMDQ_MAX_ID 255
+ 
+ #define IVPU_PLATFORM_SILICON 0
+ #define IVPU_PLATFORM_SIMICS  2
+@@ -174,13 +174,15 @@ struct ivpu_file_priv {
+ 	struct kref ref;
+ 	struct ivpu_device *vdev;
+ 	struct mutex lock; /* Protects cmdq */
+-	struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
++	struct xarray cmdq_xa;
+ 	struct ivpu_mmu_context ctx;
+ 	struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
+ 	struct list_head ms_instance_list;
+ 	struct ivpu_bo *ms_info_bo;
+ 	struct xa_limit job_limit;
+ 	u32 job_id_next;
++	struct xa_limit cmdq_limit;
++	u32 cmdq_id_next;
+ 	bool has_mmu_faults;
+ 	bool bound;
+ 	bool aborted;
+diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
+index 1214f155afa111..37ef8ce642109b 100644
+--- a/drivers/accel/ivpu/ivpu_hw.c
++++ b/drivers/accel/ivpu/ivpu_hw.c
+@@ -110,6 +110,26 @@ static void timeouts_init(struct ivpu_device *vdev)
+ 	}
+ }
+ 
++static void priority_bands_init(struct ivpu_device *vdev)
++{
++	/* Idle */
++	vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
++	vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
++	vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
++	/* Normal */
++	vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
++	vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
++	vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
++	/* Focus */
++	vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
++	vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
++	vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
++	/* Realtime */
++	vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
++	vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
++	vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
++}
++
+ static void memory_ranges_init(struct ivpu_device *vdev)
+ {
+ 	if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+@@ -248,6 +268,7 @@ int ivpu_hw_init(struct ivpu_device *vdev)
+ {
+ 	ivpu_hw_btrs_info_init(vdev);
+ 	ivpu_hw_btrs_freq_ratios_init(vdev);
++	priority_bands_init(vdev);
+ 	memory_ranges_init(vdev);
+ 	platform_init(vdev);
+ 	wa_init(vdev);
+diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
+index 1e85306bcd0653..1c016b99a0fddf 100644
+--- a/drivers/accel/ivpu/ivpu_hw.h
++++ b/drivers/accel/ivpu/ivpu_hw.h
+@@ -45,6 +45,11 @@ struct ivpu_hw_info {
+ 		u8 pn_ratio;
+ 		u32 profiling_freq;
+ 	} pll;
++	struct {
++		u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
++		u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
++		u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
++	} hws;
+ 	u32 tile_fuse;
+ 	u32 sku;
+ 	u16 config;
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index 27121c66e48f81..e631098718b151 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -60,6 +60,7 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
+ 
+ err_free_primary:
+ 	ivpu_bo_free(cmdq->primary_preempt_buf);
++	cmdq->primary_preempt_buf = NULL;
+ 	return -ENOMEM;
+ }
+ 
+@@ -69,10 +70,10 @@ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
+ 	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ 		return;
+ 
+-	drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
+-	drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf);
+-	ivpu_bo_free(cmdq->primary_preempt_buf);
+-	ivpu_bo_free(cmdq->secondary_preempt_buf);
++	if (cmdq->primary_preempt_buf)
++		ivpu_bo_free(cmdq->primary_preempt_buf);
++	if (cmdq->secondary_preempt_buf)
++		ivpu_bo_free(cmdq->secondary_preempt_buf);
+ }
+ 
+ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+@@ -85,27 +86,16 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+ 	if (!cmdq)
+ 		return NULL;
+ 
+-	ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+-			      GFP_KERNEL);
+-	if (ret < 0) {
+-		ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
+-		goto err_free_cmdq;
+-	}
+-
+ 	cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ 	if (!cmdq->mem)
+-		goto err_erase_xa;
++		goto err_free_cmdq;
+ 
+ 	ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ 	if (ret)
+-		goto err_free_cmdq_mem;
++		ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
+ 
+ 	return cmdq;
+ 
+-err_free_cmdq_mem:
+-	ivpu_bo_free(cmdq->mem);
+-err_erase_xa:
+-	xa_erase(&vdev->db_xa, cmdq->db_id);
+ err_free_cmdq:
+ 	kfree(cmdq);
+ 	return NULL;
+@@ -128,13 +118,13 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
+ 	struct ivpu_device *vdev = file_priv->vdev;
+ 	int ret;
+ 
+-	ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
++	ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
+ 				       task_pid_nr(current), engine,
+ 				       cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
++	ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
+ 							priority);
+ 	if (ret)
+ 		return ret;
+@@ -148,20 +138,21 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
+ 	int ret;
+ 
+ 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+-		ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
++		ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
+ 					       cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+ 	else
+ 		ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
+ 					   cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+ 
+ 	if (!ret)
+-		ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
++		ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
++			 cmdq->db_id, cmdq->id, file_priv->ctx.id);
+ 
+ 	return ret;
+ }
+ 
+ static int
+-ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
++ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
+ {
+ 	struct ivpu_device *vdev = file_priv->vdev;
+ 	struct vpu_job_queue_header *jobq_header;
+@@ -177,13 +168,13 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
+ 
+ 	cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
+ 	jobq_header = &cmdq->jobq->header;
+-	jobq_header->engine_idx = engine;
++	jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
+ 	jobq_header->head = 0;
+ 	jobq_header->tail = 0;
+ 	wmb(); /* Flush WC buffer for jobq->header */
+ 
+ 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+-		ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
++		ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -210,9 +201,9 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
+ 	cmdq->db_registered = false;
+ 
+ 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+-		ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
++		ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
+ 		if (!ret)
+-			ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
++			ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
+ 	}
+ 
+ 	ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
+@@ -222,55 +213,104 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
+ 	return 0;
+ }
+ 
+-static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
+-					   u8 priority)
++static int ivpu_db_id_alloc(struct ivpu_device *vdev, u32 *db_id)
++{
++	int ret;
++	u32 id;
++
++	ret = xa_alloc_cyclic(&vdev->db_xa, &id, NULL, vdev->db_limit, &vdev->db_next, GFP_KERNEL);
++	if (ret < 0)
++		return ret;
++
++	*db_id = id;
++	return 0;
++}
++
++static int ivpu_cmdq_id_alloc(struct ivpu_file_priv *file_priv, u32 *cmdq_id)
+ {
+-	int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
+-	struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
++	int ret;
++	u32 id;
++
++	ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &id, NULL, file_priv->cmdq_limit,
++			      &file_priv->cmdq_id_next, GFP_KERNEL);
++	if (ret < 0)
++		return ret;
++
++	*cmdq_id = id;
++	return 0;
++}
++
++static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
++{
++	struct ivpu_device *vdev = file_priv->vdev;
++	struct ivpu_cmdq *cmdq;
++	unsigned long id;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&file_priv->lock);
+ 
++	xa_for_each(&file_priv->cmdq_xa, id, cmdq)
++		if (cmdq->priority == priority)
++			break;
++
+ 	if (!cmdq) {
+ 		cmdq = ivpu_cmdq_alloc(file_priv);
+-		if (!cmdq)
++		if (!cmdq) {
++			ivpu_err(vdev, "Failed to allocate command queue\n");
+ 			return NULL;
+-		file_priv->cmdq[cmdq_idx] = cmdq;
++		}
++
++		ret = ivpu_db_id_alloc(vdev, &cmdq->db_id);
++		if (ret) {
++			ivpu_err(file_priv->vdev, "Failed to allocate doorbell ID: %d\n", ret);
++			goto err_free_cmdq;
++		}
++
++		ret = ivpu_cmdq_id_alloc(file_priv, &cmdq->id);
++		if (ret) {
++			ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
++			goto err_erase_db_id;
++		}
++
++		cmdq->priority = priority;
++		ret = xa_err(xa_store(&file_priv->cmdq_xa, cmdq->id, cmdq, GFP_KERNEL));
++		if (ret) {
++			ivpu_err(vdev, "Failed to store command queue in cmdq_xa: %d\n", ret);
++			goto err_erase_cmdq_id;
++		}
+ 	}
+ 
+-	ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
+-	if (ret)
+-		return NULL;
++	ret = ivpu_cmdq_init(file_priv, cmdq, priority);
++	if (ret) {
++		ivpu_err(vdev, "Failed to initialize command queue: %d\n", ret);
++		goto err_free_cmdq;
++	}
+ 
+ 	return cmdq;
++
++err_erase_cmdq_id:
++	xa_erase(&file_priv->cmdq_xa, cmdq->id);
++err_erase_db_id:
++	xa_erase(&vdev->db_xa, cmdq->db_id);
++err_free_cmdq:
++	ivpu_cmdq_free(file_priv, cmdq);
++	return NULL;
+ }
+ 
+-static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
++void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+ {
+-	int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
+-	struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
++	struct ivpu_cmdq *cmdq;
++	unsigned long cmdq_id;
+ 
+ 	lockdep_assert_held(&file_priv->lock);
+ 
+-	if (cmdq) {
+-		file_priv->cmdq[cmdq_idx] = NULL;
++	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
++		xa_erase(&file_priv->cmdq_xa, cmdq_id);
+ 		ivpu_cmdq_fini(file_priv, cmdq);
+ 		ivpu_cmdq_free(file_priv, cmdq);
+ 	}
+ }
+ 
+-void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+-{
+-	u16 engine;
+-	u8 priority;
+-
+-	lockdep_assert_held(&file_priv->lock);
+-
+-	for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
+-		for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
+-			ivpu_cmdq_release_locked(file_priv, engine, priority);
+-}
+-
+ /*
+  * Mark the doorbell as unregistered
+  * This function needs to be called when the VPU hardware is restarted
+@@ -279,20 +319,13 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+  */
+ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
+ {
+-	u16 engine;
+-	u8 priority;
++	struct ivpu_cmdq *cmdq;
++	unsigned long cmdq_id;
+ 
+ 	mutex_lock(&file_priv->lock);
+ 
+-	for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
+-		for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
+-			int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
+-			struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
+-
+-			if (cmdq)
+-				cmdq->db_registered = false;
+-		}
+-	}
++	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
++		cmdq->db_registered = false;
+ 
+ 	mutex_unlock(&file_priv->lock);
+ }
+@@ -312,17 +345,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
+ 
+ static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
+ {
+-	u16 engine;
+-	u8 priority;
+-
+-	for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
+-		for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
+-			int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
++	struct ivpu_cmdq *cmdq;
++	unsigned long cmdq_id;
+ 
+-			if (file_priv->cmdq[cmdq_idx])
+-				ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]);
+-		}
+-	}
++	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
++		ivpu_cmdq_fini(file_priv, cmdq);
+ }
+ 
+ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
+@@ -349,8 +376,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+ 
+ 	/* Check if there is space left in job queue */
+ 	if (next_entry == header->head) {
+-		ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
+-			 job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
++		ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
++			 job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
+ 		return -EBUSY;
+ 	}
+ 
+@@ -363,10 +390,16 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+ 
+ 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW &&
+ 	    (unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
+-		entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
+-		entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
+-		entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
+-		entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf);
++		if (cmdq->primary_preempt_buf) {
++			entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
++			entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
++		}
++
++		if (cmdq->secondary_preempt_buf) {
++			entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
++			entry->secondary_preempt_buf_size =
++				ivpu_bo_size(cmdq->secondary_preempt_buf);
++		}
+ 	}
+ 
+ 	wmb(); /* Ensure that tail is updated after filling entry */
+@@ -558,7 +591,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+ 	mutex_lock(&vdev->submitted_jobs_lock);
+ 	mutex_lock(&file_priv->lock);
+ 
+-	cmdq = ivpu_cmdq_acquire(file_priv, job->engine_idx, priority);
++	cmdq = ivpu_cmdq_acquire(file_priv, priority);
+ 	if (!cmdq) {
+ 		ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
+ 				      file_priv->ctx.id, job->engine_idx, priority);
+@@ -698,7 +731,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+ 	int idx, ret;
+ 	u8 priority;
+ 
+-	if (params->engine > DRM_IVPU_ENGINE_COPY)
++	if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
+ 		return -EINVAL;
+ 
+ 	if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+@@ -816,7 +849,8 @@ void ivpu_context_abort_thread_handler(struct work_struct *work)
+ 	unsigned long id;
+ 
+ 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+-		ivpu_jsm_reset_engine(vdev, 0);
++		if (ivpu_jsm_reset_engine(vdev, 0))
++			return;
+ 
+ 	mutex_lock(&vdev->context_list_lock);
+ 	xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+@@ -832,7 +866,8 @@ void ivpu_context_abort_thread_handler(struct work_struct *work)
+ 	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ 		return;
+ 
+-	ivpu_jsm_hws_resume_engine(vdev, 0);
++	if (ivpu_jsm_hws_resume_engine(vdev, 0))
++		return;
+ 	/*
+ 	 * In hardware scheduling mode NPU already has stopped processing jobs
+ 	 * and won't send us any further notifications, thus we have to free job related resources
+diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
+index 0ae77f0638fadc..af1ed039569cd6 100644
+--- a/drivers/accel/ivpu/ivpu_job.h
++++ b/drivers/accel/ivpu/ivpu_job.h
+@@ -28,8 +28,10 @@ struct ivpu_cmdq {
+ 	struct ivpu_bo *secondary_preempt_buf;
+ 	struct ivpu_bo *mem;
+ 	u32 entry_count;
++	u32 id;
+ 	u32 db_id;
+ 	bool db_registered;
++	u8 priority;
+ };
+ 
+ /**
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
+index ae91ad24d10d86..7c08308d5725dd 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -7,6 +7,8 @@
+ #include "ivpu_hw.h"
+ #include "ivpu_ipc.h"
+ #include "ivpu_jsm_msg.h"
++#include "ivpu_pm.h"
++#include "vpu_jsm_api.h"
+ 
+ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
+ {
+@@ -132,7 +134,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
+ 	struct vpu_jsm_msg resp;
+ 	int ret;
+ 
+-	if (engine > VPU_ENGINE_COPY)
++	if (engine != VPU_ENGINE_COMPUTE)
+ 		return -EINVAL;
+ 
+ 	req.payload.query_engine_hb.engine_idx = engine;
+@@ -155,15 +157,17 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
+ 	struct vpu_jsm_msg resp;
+ 	int ret;
+ 
+-	if (engine > VPU_ENGINE_COPY)
++	if (engine != VPU_ENGINE_COMPUTE)
+ 		return -EINVAL;
+ 
+ 	req.payload.engine_reset.engine_idx = engine;
+ 
+ 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
+ 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+-	if (ret)
++	if (ret) {
+ 		ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
++		ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
++	}
+ 
+ 	return ret;
+ }
+@@ -174,7 +178,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id
+ 	struct vpu_jsm_msg resp;
+ 	int ret;
+ 
+-	if (engine > VPU_ENGINE_COPY)
++	if (engine != VPU_ENGINE_COMPUTE)
+ 		return -EINVAL;
+ 
+ 	req.payload.engine_preempt.engine_idx = engine;
+@@ -346,15 +350,17 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
+ 	struct vpu_jsm_msg resp;
+ 	int ret;
+ 
+-	if (engine >= VPU_ENGINE_NB)
++	if (engine != VPU_ENGINE_COMPUTE)
+ 		return -EINVAL;
+ 
+ 	req.payload.hws_resume_engine.engine_idx = engine;
+ 
+ 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
+ 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+-	if (ret)
++	if (ret) {
+ 		ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
++		ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
++	}
+ 
+ 	return ret;
+ }
+@@ -409,26 +415,18 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
+ {
+ 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
+ 	struct vpu_jsm_msg resp;
++	struct ivpu_hw_info *hw = vdev->hw;
++	struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
++		&req.payload.hws_priority_band_setup;
+ 	int ret;
+ 
+-	/* Idle */
+-	req.payload.hws_priority_band_setup.grace_period[0] = 0;
+-	req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
+-	req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
+-	/* Normal */
+-	req.payload.hws_priority_band_setup.grace_period[1] = 50000;
+-	req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
+-	req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
+-	/* Focus */
+-	req.payload.hws_priority_band_setup.grace_period[2] = 50000;
+-	req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
+-	req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
+-	/* Realtime */
+-	req.payload.hws_priority_band_setup.grace_period[3] = 0;
+-	req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
+-	req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
+-
+-	req.payload.hws_priority_band_setup.normal_band_percentage = 10;
++	for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
++	     band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
++		setup->grace_period[band] = hw->hws.grace_period[band];
++		setup->process_grace_period[band] = hw->hws.process_grace_period[band];
++		setup->process_quantum[band] = hw->hws.process_quantum[band];
++	}
++	setup->normal_band_percentage = 10;
+ 
+ 	ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
+ 					     &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 395240cb3666ee..a6a66d79476386 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1456,7 +1456,7 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
+ 		{
+ 			.matches = {
+ 				DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-				DMI_MATCH(DMI_PRODUCT_VERSION, "ASUSPRO D840MB_M840SA"),
++				DMI_MATCH(DMI_PRODUCT_NAME, "ASUSPRO D840MB_M840SA"),
+ 			},
+ 			/* 320 is broken, there is no known good version. */
+ 		},
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index a0d6e8d7f42c8a..f5429666822f0c 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1781,6 +1781,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
+ 	}
+ 	put_device(dev);
+ 
++	if (rc)
++		dev_err(port->uport_dev,
++			"failed to find %s:%s in target list of %s\n",
++			dev_name(&port->dev),
++			dev_name(port->parent_dport->dport_dev),
++			dev_name(&cxlsd->cxld.dev));
++
+ 	return rc;
+ }
+ 
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 19a58c4ecef3f8..8b27bd545685a0 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -354,7 +354,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
+ 			set_bit(h, evl->bmap);
+ 		h = (h + 1) % size;
+ 	}
+-	drain_workqueue(wq->wq);
++	if (wq->wq)
++		drain_workqueue(wq->wq);
++
+ 	mutex_unlock(&evl->lock);
+ }
+ 
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 5eb51ae93e89d0..aa59b62cd83fb7 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2906,6 +2906,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+ 		return -EINVAL;
+ 	}
+ 
++	xdev->common.directions |= chan->direction;
++
+ 	/* Request the interrupt */
+ 	chan->irq = of_irq_get(node, chan->tdest);
+ 	if (chan->irq < 0)
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index c0a8f9c8d4f0bf..322ba16b31bf20 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1208,7 +1208,9 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
+ 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
+ 		cs_mode |= CS_ODD_PRIMARY;
+ 
+-	/* Asymmetric dual-rank DIMM support. */
++	if (csrow_sec_enabled(2 * dimm, ctrl, pvt))
++		cs_mode |= CS_EVEN_SECONDARY;
++
+ 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
+ 		cs_mode |= CS_ODD_SECONDARY;
+ 
+@@ -1229,12 +1231,13 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
+ 	return cs_mode;
+ }
+ 
+-static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
+-				  int csrow_nr, int dimm)
++static int calculate_cs_size(u32 mask, unsigned int cs_mode)
+ {
+-	u32 msb, weight, num_zero_bits;
+-	u32 addr_mask_deinterleaved;
+-	int size = 0;
++	int msb, weight, num_zero_bits;
++	u32 deinterleaved_mask;
++
++	if (!mask)
++		return 0;
+ 
+ 	/*
+ 	 * The number of zero bits in the mask is equal to the number of bits
+@@ -1247,19 +1250,30 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
+ 	 * without swapping with the most significant bit. This can be handled
+ 	 * by keeping the MSB where it is and ignoring the single zero bit.
+ 	 */
+-	msb = fls(addr_mask_orig) - 1;
+-	weight = hweight_long(addr_mask_orig);
++	msb = fls(mask) - 1;
++	weight = hweight_long(mask);
+ 	num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
+ 
+ 	/* Take the number of zero bits off from the top of the mask. */
+-	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
++	deinterleaved_mask = GENMASK(msb - num_zero_bits, 1);
++	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask);
++
++	return (deinterleaved_mask >> 2) + 1;
++}
++
++static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec,
++				  unsigned int cs_mode, int csrow_nr, int dimm)
++{
++	int size;
+ 
+ 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
+-	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
+-	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
++	edac_dbg(1, "  Primary AddrMask: 0x%x\n", addr_mask);
+ 
+ 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
+-	size = (addr_mask_deinterleaved >> 2) + 1;
++	size = calculate_cs_size(addr_mask, cs_mode);
++
++	edac_dbg(1, "  Secondary AddrMask: 0x%x\n", addr_mask_sec);
++	size += calculate_cs_size(addr_mask_sec, cs_mode);
+ 
+ 	/* Return size in MBs. */
+ 	return size >> 10;
+@@ -1268,8 +1282,8 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
+ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ 				    unsigned int cs_mode, int csrow_nr)
+ {
++	u32 addr_mask = 0, addr_mask_sec = 0;
+ 	int cs_mask_nr = csrow_nr;
+-	u32 addr_mask_orig;
+ 	int dimm, size = 0;
+ 
+ 	/* No Chip Selects are enabled. */
+@@ -1307,13 +1321,13 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ 	if (!pvt->flags.zn_regs_v2)
+ 		cs_mask_nr >>= 1;
+ 
+-	/* Asymmetric dual-rank DIMM support. */
+-	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
+-		addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
+-	else
+-		addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
++	if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY))
++		addr_mask = pvt->csels[umc].csmasks[cs_mask_nr];
++
++	if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY))
++		addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr];
+ 
+-	return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
++	return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm);
+ }
+ 
+ static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+@@ -3515,9 +3529,10 @@ static void gpu_get_err_info(struct mce *m, struct err_info *err)
+ static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ 				    unsigned int cs_mode, int csrow_nr)
+ {
+-	u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
++	u32 addr_mask		= pvt->csels[umc].csmasks[csrow_nr];
++	u32 addr_mask_sec	= pvt->csels[umc].csmasks_sec[csrow_nr];
+ 
+-	return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
++	return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1);
+ }
+ 
+ static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 9da4414de6177d..81f16e4447f8ea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -1902,7 +1902,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
+ 			continue;
+ 		}
+ 		job = to_amdgpu_job(s_job);
+-		if (preempted && (&job->hw_fence) == fence)
++		if (preempted && (&job->hw_fence.base) == fence)
+ 			/* mark the job as preempted */
+ 			job->preemption_status |= AMDGPU_IB_PREEMPTED;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index ca0411c9500e7c..a55e611605fcab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5861,7 +5861,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ 	 *
+ 	 * job->base holds a reference to parent fence
+ 	 */
+-	if (job && dma_fence_is_signaled(&job->hw_fence)) {
++	if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
+ 		job_signaled = true;
+ 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ 		goto skip_hw_reset;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 018240a2ab96a4..34d41e3ce34746 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -301,10 +301,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
+ 	const struct firmware *fw;
+ 	int r;
+ 
+-	r = request_firmware(&fw, fw_name, adev->dev);
++	r = firmware_request_nowarn(&fw, fw_name, adev->dev);
+ 	if (r) {
+-		dev_err(adev->dev, "can't load firmware \"%s\"\n",
+-			fw_name);
++		if (amdgpu_discovery == 2)
++			dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
++		else
++			drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
+ 		return r;
+ 	}
+ 
+@@ -419,16 +421,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
+ 	/* Read from file if it is the preferred option */
+ 	fw_name = amdgpu_discovery_get_fw_name(adev);
+ 	if (fw_name != NULL) {
+-		dev_info(adev->dev, "use ip discovery information from file");
++		drm_dbg(&adev->ddev, "use ip discovery information from file");
+ 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
+-
+-		if (r) {
+-			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
+-			r = -EINVAL;
++		if (r)
+ 			goto out;
+-		}
+-
+ 	} else {
++		drm_dbg(&adev->ddev, "use ip discovery information from memory");
+ 		r = amdgpu_discovery_read_binary_from_mem(
+ 			adev, adev->mman.discovery_bin);
+ 		if (r)
+@@ -1286,10 +1284,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
+ 	int r;
+ 
+ 	r = amdgpu_discovery_init(adev);
+-	if (r) {
+-		DRM_ERROR("amdgpu_discovery_init failed\n");
++	if (r)
+ 		return r;
+-	}
+ 
+ 	adev->gfx.xcc_mask = 0;
+ 	adev->sdma.sdma_mask = 0;
+@@ -2429,6 +2425,40 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ {
+ 	int r;
+ 
++	switch (adev->asic_type) {
++	case CHIP_VEGA10:
++	case CHIP_VEGA12:
++	case CHIP_RAVEN:
++	case CHIP_VEGA20:
++	case CHIP_ARCTURUS:
++	case CHIP_ALDEBARAN:
++		/* this is not fatal.  We have a fallback below
++		 * if the new firmwares are not present. some of
++		 * this will be overridden below to keep things
++		 * consistent with the current behavior.
++		 */
++		r = amdgpu_discovery_reg_base_init(adev);
++		if (!r) {
++			amdgpu_discovery_harvest_ip(adev);
++			amdgpu_discovery_get_gfx_info(adev);
++			amdgpu_discovery_get_mall_info(adev);
++			amdgpu_discovery_get_vcn_info(adev);
++		}
++		break;
++	default:
++		r = amdgpu_discovery_reg_base_init(adev);
++		if (r) {
++			drm_err(&adev->ddev, "discovery failed: %d\n", r);
++			return r;
++		}
++
++		amdgpu_discovery_harvest_ip(adev);
++		amdgpu_discovery_get_gfx_info(adev);
++		amdgpu_discovery_get_mall_info(adev);
++		amdgpu_discovery_get_vcn_info(adev);
++		break;
++	}
++
+ 	switch (adev->asic_type) {
+ 	case CHIP_VEGA10:
+ 		vega10_reg_base_init(adev);
+@@ -2591,14 +2621,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ 		break;
+ 	default:
+-		r = amdgpu_discovery_reg_base_init(adev);
+-		if (r)
+-			return -EINVAL;
+-
+-		amdgpu_discovery_harvest_ip(adev);
+-		amdgpu_discovery_get_gfx_info(adev);
+-		amdgpu_discovery_get_mall_info(adev);
+-		amdgpu_discovery_get_vcn_info(adev);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index 2f24a6aa13bf6e..569e0e53739277 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -41,22 +41,6 @@
+ #include "amdgpu_trace.h"
+ #include "amdgpu_reset.h"
+ 
+-/*
+- * Fences mark an event in the GPUs pipeline and are used
+- * for GPU/CPU synchronization.  When the fence is written,
+- * it is expected that all buffers associated with that fence
+- * are no longer in use by the associated ring on the GPU and
+- * that the relevant GPU caches have been flushed.
+- */
+-
+-struct amdgpu_fence {
+-	struct dma_fence base;
+-
+-	/* RB, DMA, etc. */
+-	struct amdgpu_ring		*ring;
+-	ktime_t				start_timestamp;
+-};
+-
+ static struct kmem_cache *amdgpu_fence_slab;
+ 
+ int amdgpu_fence_slab_init(void)
+@@ -151,12 +135,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
+ 		am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
+ 		if (am_fence == NULL)
+ 			return -ENOMEM;
+-		fence = &am_fence->base;
+-		am_fence->ring = ring;
+ 	} else {
+ 		/* take use of job-embedded fence */
+-		fence = &job->hw_fence;
++		am_fence = &job->hw_fence;
+ 	}
++	fence = &am_fence->base;
++	am_fence->ring = ring;
+ 
+ 	seq = ++ring->fence_drv.sync_seq;
+ 	if (job && job->job_run_counter) {
+@@ -718,7 +702,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
+ 			 * it right here or we won't be able to track them in fence_drv
+ 			 * and they will remain unsignaled during sa_bo free.
+ 			 */
+-			job = container_of(old, struct amdgpu_job, hw_fence);
++			job = container_of(old, struct amdgpu_job, hw_fence.base);
+ 			if (!job->base.s_fence && !dma_fence_is_signaled(old))
+ 				dma_fence_signal(old);
+ 			RCU_INIT_POINTER(*ptr, NULL);
+@@ -780,7 +764,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
+ 
+ static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
+ {
+-	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
++	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
+ 
+ 	return (const char *)to_amdgpu_ring(job->base.sched)->name;
+ }
+@@ -810,7 +794,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
+  */
+ static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
+ {
+-	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
++	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
+ 
+ 	if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
+ 		amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
+@@ -845,7 +829,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu)
+ 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ 
+ 	/* free job if fence has a parent job */
+-	kfree(container_of(f, struct amdgpu_job, hw_fence));
++	kfree(container_of(f, struct amdgpu_job, hw_fence.base));
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 1ce20a19be8ba9..7e6057a6e7f173 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -259,8 +259,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
+ 	/* Check if any fences where initialized */
+ 	if (job->base.s_fence && job->base.s_fence->finished.ops)
+ 		f = &job->base.s_fence->finished;
+-	else if (job->hw_fence.ops)
+-		f = &job->hw_fence;
++	else if (job->hw_fence.base.ops)
++		f = &job->hw_fence.base;
+ 	else
+ 		f = NULL;
+ 
+@@ -277,10 +277,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
+ 	amdgpu_sync_free(&job->explicit_sync);
+ 
+ 	/* only put the hw fence if has embedded fence */
+-	if (!job->hw_fence.ops)
++	if (!job->hw_fence.base.ops)
+ 		kfree(job);
+ 	else
+-		dma_fence_put(&job->hw_fence);
++		dma_fence_put(&job->hw_fence.base);
+ }
+ 
+ void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+@@ -309,10 +309,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
+ 	if (job->gang_submit != &job->base.s_fence->scheduled)
+ 		dma_fence_put(job->gang_submit);
+ 
+-	if (!job->hw_fence.ops)
++	if (!job->hw_fence.base.ops)
+ 		kfree(job);
+ 	else
+-		dma_fence_put(&job->hw_fence);
++		dma_fence_put(&job->hw_fence.base);
+ }
+ 
+ struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+index ce6b9ba967fff0..4fe033d8f35683 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+@@ -48,7 +48,7 @@ struct amdgpu_job {
+ 	struct drm_sched_job    base;
+ 	struct amdgpu_vm	*vm;
+ 	struct amdgpu_sync	explicit_sync;
+-	struct dma_fence	hw_fence;
++	struct amdgpu_fence	hw_fence;
+ 	struct dma_fence	*gang_submit;
+ 	uint32_t		preamble_status;
+ 	uint32_t                preemption_status;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index f93f5100220182..9af2cda676ad7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -126,6 +126,22 @@ struct amdgpu_fence_driver {
+ 	struct dma_fence		**fences;
+ };
+ 
++/*
++ * Fences mark an event in the GPUs pipeline and are used
++ * for GPU/CPU synchronization.  When the fence is written,
++ * it is expected that all buffers associated with that fence
++ * are no longer in use by the associated ring on the GPU and
++ * that the relevant GPU caches have been flushed.
++ */
++
++struct amdgpu_fence {
++	struct dma_fence base;
++
++	/* RB, DMA, etc. */
++	struct amdgpu_ring		*ring;
++	ktime_t				start_timestamp;
++};
++
+ extern const struct drm_sched_backend_ops amdgpu_sched_ops;
+ 
+ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+index e22cb2b5cd9264..dba8051b8c14b2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+@@ -133,7 +133,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
+ 
+ 	vm = &fpriv->vm;
+ 
+-	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
++	drm_exec_init(&exec, 0, 0);
+ 	drm_exec_until_all_locked(&exec) {
+ 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ 		if (likely(!r))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 4c7b53648a507a..eb83d7c1f784c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -30,6 +30,10 @@
+ 
+ #define AMDGPU_UCODE_NAME_MAX		(128)
+ 
++static const struct kicker_device kicker_device_list[] = {
++	{0x744B, 0x00},
++};
++
+ static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
+ {
+ 	DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
+@@ -1383,6 +1387,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
+ 	return NULL;
+ }
+ 
++bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
++		if (adev->pdev->device == kicker_device_list[i].device &&
++			adev->pdev->revision == kicker_device_list[i].revision)
++		return true;
++	}
++
++	return false;
++}
++
+ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
+ {
+ 	int maj, min, rev;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index 4e23419b92d4eb..fd08b015b2a7ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -595,6 +595,11 @@ struct amdgpu_firmware {
+ 	uint64_t fw_buf_mc;
+ };
+ 
++struct kicker_device{
++	unsigned short device;
++	u8 revision;
++};
++
+ void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+ void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+ void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr);
+@@ -622,5 +627,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
+ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
+ 
+ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
++bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index ff5e52025266cd..8f58ec6f140093 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -463,7 +463,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
+ 	int r;
+ 
+ 	lpfn = (u64)place->lpfn << PAGE_SHIFT;
+-	if (!lpfn)
++	if (!lpfn || lpfn > man->size)
+ 		lpfn = man->size;
+ 
+ 	fpfn = (u64)place->fpfn << PAGE_SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+index 4dd86c682ee6a2..1e4ce06f5f2c39 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+@@ -485,7 +485,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
+ {
+ 	struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ 	u32 doorbell_offset, doorbell;
+-	u32 rb_cntl, ib_cntl;
++	u32 rb_cntl, ib_cntl, sdma_cntl;
+ 	int i;
+ 
+ 	for_each_inst(i, inst_mask) {
+@@ -497,6 +497,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
+ 		ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
+ 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
+ 		WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
++		sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
++		sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
++		WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
+ 
+ 		if (sdma[i]->use_doorbell) {
+ 			doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
+@@ -953,6 +956,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
+ 		/* set utc l1 enable flag always to 1 */
+ 		temp = RREG32_SDMA(i, regSDMA_CNTL);
+ 		temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
++		WREG32_SDMA(i, regSDMA_CNTL, temp);
+ 
+ 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
+ 			/* enable context empty interrupt during initialization */
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+index 7062f12b5b7511..6c8c9935a0f2e2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -3640,7 +3640,7 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
+ };
+ 
+ static const uint32_t cwsr_trap_gfx12_hex[] = {
+-	0xbfa00001, 0xbfa0024b,
++	0xbfa00001, 0xbfa002a2,
+ 	0xb0804009, 0xb8f8f804,
+ 	0x9178ff78, 0x00008c00,
+ 	0xb8fbf811, 0x8b6eff78,
+@@ -3714,7 +3714,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
+ 	0x00011677, 0xd7610000,
+ 	0x00011a79, 0xd7610000,
+ 	0x00011c7e, 0xd7610000,
+-	0x00011e7f, 0xbefe00ff,
++	0x00011e7f, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xbefe00ff,
+ 	0x00003fff, 0xbeff0080,
+ 	0xee0a407a, 0x000c0000,
+ 	0x00004000, 0xd760007a,
+@@ -3751,38 +3759,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
+ 	0x00000200, 0xbef600ff,
+ 	0x01000000, 0x7e000280,
+ 	0x7e020280, 0x7e040280,
+-	0xbefd0080, 0xbe804ec2,
+-	0xbf94fffe, 0xb8faf804,
+-	0x8b7a847a, 0x91788478,
+-	0x8c787a78, 0xd7610002,
+-	0x0000fa71, 0x807d817d,
+-	0xd7610002, 0x0000fa6c,
+-	0x807d817d, 0x917aff6d,
+-	0x80000000, 0xd7610002,
+-	0x0000fa7a, 0x807d817d,
+-	0xd7610002, 0x0000fa6e,
+-	0x807d817d, 0xd7610002,
+-	0x0000fa6f, 0x807d817d,
+-	0xd7610002, 0x0000fa78,
+-	0x807d817d, 0xb8faf811,
+-	0xd7610002, 0x0000fa7a,
+-	0x807d817d, 0xd7610002,
+-	0x0000fa7b, 0x807d817d,
+-	0xb8f1f801, 0xd7610002,
+-	0x0000fa71, 0x807d817d,
+-	0xb8f1f814, 0xd7610002,
+-	0x0000fa71, 0x807d817d,
+-	0xb8f1f815, 0xd7610002,
+-	0x0000fa71, 0x807d817d,
+-	0xb8f1f812, 0xd7610002,
+-	0x0000fa71, 0x807d817d,
+-	0xb8f1f813, 0xd7610002,
+-	0x0000fa71, 0x807d817d,
++	0xbe804ec2, 0xbf94fffe,
++	0xb8faf804, 0x8b7a847a,
++	0x91788478, 0x8c787a78,
++	0x917aff6d, 0x80000000,
++	0xd7610002, 0x00010071,
++	0xd7610002, 0x0001026c,
++	0xd7610002, 0x0001047a,
++	0xd7610002, 0x0001066e,
++	0xd7610002, 0x0001086f,
++	0xd7610002, 0x00010a78,
++	0xd7610002, 0x00010e7b,
++	0xd8500000, 0x00000000,
++	0xd8500000, 0x00000000,
++	0xd8500000, 0x00000000,
++	0xd8500000, 0x00000000,
++	0xd8500000, 0x00000000,
++	0xd8500000, 0x00000000,
++	0xd8500000, 0x00000000,
++	0xd8500000, 0x00000000,
++	0xb8faf811, 0xd7610002,
++	0x00010c7a, 0xb8faf801,
++	0xd7610002, 0x0001107a,
++	0xb8faf814, 0xd7610002,
++	0x0001127a, 0xb8faf815,
++	0xd7610002, 0x0001147a,
++	0xb8faf812, 0xd7610002,
++	0x0001167a, 0xb8faf813,
++	0xd7610002, 0x0001187a,
+ 	0xb8faf802, 0xd7610002,
+-	0x0000fa7a, 0x807d817d,
+-	0xbefa50c1, 0xbfc70000,
+-	0xd7610002, 0x0000fa7a,
+-	0x807d817d, 0xbefe00ff,
++	0x00011a7a, 0xbefa50c1,
++	0xbfc70000, 0xd7610002,
++	0x00011c7a, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xbefe00ff,
+ 	0x0000ffff, 0xbeff0080,
+ 	0xc4068070, 0x008ce802,
+ 	0x00000000, 0xbefe00c1,
+@@ -3797,329 +3813,356 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
+ 	0xbe824102, 0xbe844104,
+ 	0xbe864106, 0xbe884108,
+ 	0xbe8a410a, 0xbe8c410c,
+-	0xbe8e410e, 0xd7610002,
+-	0x0000f200, 0x80798179,
+-	0xd7610002, 0x0000f201,
+-	0x80798179, 0xd7610002,
+-	0x0000f202, 0x80798179,
+-	0xd7610002, 0x0000f203,
+-	0x80798179, 0xd7610002,
+-	0x0000f204, 0x80798179,
+-	0xd7610002, 0x0000f205,
+-	0x80798179, 0xd7610002,
+-	0x0000f206, 0x80798179,
+-	0xd7610002, 0x0000f207,
+-	0x80798179, 0xd7610002,
+-	0x0000f208, 0x80798179,
+-	0xd7610002, 0x0000f209,
+-	0x80798179, 0xd7610002,
+-	0x0000f20a, 0x80798179,
+-	0xd7610002, 0x0000f20b,
+-	0x80798179, 0xd7610002,
+-	0x0000f20c, 0x80798179,
+-	0xd7610002, 0x0000f20d,
+-	0x80798179, 0xd7610002,
+-	0x0000f20e, 0x80798179,
+-	0xd7610002, 0x0000f20f,
+-	0x80798179, 0xbf06a079,
+-	0xbfa10007, 0xc4068070,
++	0xbe8e410e, 0xbf068079,
++	0xbfa10032, 0xd7610002,
++	0x00010000, 0xd7610002,
++	0x00010201, 0xd7610002,
++	0x00010402, 0xd7610002,
++	0x00010603, 0xd7610002,
++	0x00010804, 0xd7610002,
++	0x00010a05, 0xd7610002,
++	0x00010c06, 0xd7610002,
++	0x00010e07, 0xd7610002,
++	0x00011008, 0xd7610002,
++	0x00011209, 0xd7610002,
++	0x0001140a, 0xd7610002,
++	0x0001160b, 0xd7610002,
++	0x0001180c, 0xd7610002,
++	0x00011a0d, 0xd7610002,
++	0x00011c0e, 0xd7610002,
++	0x00011e0f, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0x80799079,
++	0xbfa00038, 0xd7610002,
++	0x00012000, 0xd7610002,
++	0x00012201, 0xd7610002,
++	0x00012402, 0xd7610002,
++	0x00012603, 0xd7610002,
++	0x00012804, 0xd7610002,
++	0x00012a05, 0xd7610002,
++	0x00012c06, 0xd7610002,
++	0x00012e07, 0xd7610002,
++	0x00013008, 0xd7610002,
++	0x00013209, 0xd7610002,
++	0x0001340a, 0xd7610002,
++	0x0001360b, 0xd7610002,
++	0x0001380c, 0xd7610002,
++	0x00013a0d, 0xd7610002,
++	0x00013c0e, 0xd7610002,
++	0x00013e0f, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0x80799079,
++	0xc4068070, 0x008ce802,
++	0x00000000, 0x8070ff70,
++	0x00000080, 0xbef90080,
++	0x7e040280, 0x807d907d,
++	0xbf0aff7d, 0x00000060,
++	0xbfa2ff88, 0xbe804100,
++	0xbe824102, 0xbe844104,
++	0xbe864106, 0xbe884108,
++	0xbe8a410a, 0xd7610002,
++	0x00010000, 0xd7610002,
++	0x00010201, 0xd7610002,
++	0x00010402, 0xd7610002,
++	0x00010603, 0xd7610002,
++	0x00010804, 0xd7610002,
++	0x00010a05, 0xd7610002,
++	0x00010c06, 0xd7610002,
++	0x00010e07, 0xd7610002,
++	0x00011008, 0xd7610002,
++	0x00011209, 0xd7610002,
++	0x0001140a, 0xd7610002,
++	0x0001160b, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xd8500000,
++	0x00000000, 0xc4068070,
+ 	0x008ce802, 0x00000000,
++	0xbefe00c1, 0x857d9973,
++	0x8b7d817d, 0xbf06817d,
++	0xbfa20002, 0xbeff0080,
++	0xbfa00001, 0xbeff00c1,
++	0xb8fb4306, 0x8b7bc17b,
++	0xbfa10044, 0x8b7aff6d,
++	0x80000000, 0xbfa10041,
++	0x847b897b, 0xbef6007b,
++	0xb8f03b05, 0x80708170,
++	0xbf0d9973, 0xbfa20002,
++	0x84708970, 0xbfa00001,
++	0x84708a70, 0xb8fa1e06,
++	0x847a8a7a, 0x80707a70,
++	0x8070ff70, 0x00000200,
+ 	0x8070ff70, 0x00000080,
+-	0xbef90080, 0x7e040280,
+-	0x807d907d, 0xbf0aff7d,
+-	0x00000060, 0xbfa2ffbb,
+-	0xbe804100, 0xbe824102,
+-	0xbe844104, 0xbe864106,
+-	0xbe884108, 0xbe8a410a,
+-	0xd7610002, 0x0000f200,
+-	0x80798179, 0xd7610002,
+-	0x0000f201, 0x80798179,
+-	0xd7610002, 0x0000f202,
+-	0x80798179, 0xd7610002,
+-	0x0000f203, 0x80798179,
+-	0xd7610002, 0x0000f204,
+-	0x80798179, 0xd7610002,
+-	0x0000f205, 0x80798179,
+-	0xd7610002, 0x0000f206,
+-	0x80798179, 0xd7610002,
+-	0x0000f207, 0x80798179,
+-	0xd7610002, 0x0000f208,
+-	0x80798179, 0xd7610002,
+-	0x0000f209, 0x80798179,
+-	0xd7610002, 0x0000f20a,
+-	0x80798179, 0xd7610002,
+-	0x0000f20b, 0x80798179,
+-	0xc4068070, 0x008ce802,
+-	0x00000000, 0xbefe00c1,
+-	0x857d9973, 0x8b7d817d,
+-	0xbf06817d, 0xbfa20002,
+-	0xbeff0080, 0xbfa00001,
+-	0xbeff00c1, 0xb8fb4306,
+-	0x8b7bc17b, 0xbfa10044,
+-	0x8b7aff6d, 0x80000000,
+-	0xbfa10041, 0x847b897b,
+-	0xbef6007b, 0xb8f03b05,
+-	0x80708170, 0xbf0d9973,
+-	0xbfa20002, 0x84708970,
+-	0xbfa00001, 0x84708a70,
+-	0xb8fa1e06, 0x847a8a7a,
+-	0x80707a70, 0x8070ff70,
+-	0x00000200, 0x8070ff70,
+-	0x00000080, 0xbef600ff,
+-	0x01000000, 0xd71f0000,
+-	0x000100c1, 0xd7200000,
+-	0x000200c1, 0x16000084,
+-	0x857d9973, 0x8b7d817d,
+-	0xbf06817d, 0xbefd0080,
+-	0xbfa20013, 0xbe8300ff,
+-	0x00000080, 0xbf800000,
+-	0xbf800000, 0xbf800000,
+-	0xd8d80000, 0x01000000,
+-	0xbf8a0000, 0xc4068070,
+-	0x008ce801, 0x00000000,
+-	0x807d037d, 0x80700370,
+-	0xd5250000, 0x0001ff00,
+-	0x00000080, 0xbf0a7b7d,
+-	0xbfa2fff3, 0xbfa00012,
+-	0xbe8300ff, 0x00000100,
++	0xbef600ff, 0x01000000,
++	0xd71f0000, 0x000100c1,
++	0xd7200000, 0x000200c1,
++	0x16000084, 0x857d9973,
++	0x8b7d817d, 0xbf06817d,
++	0xbefd0080, 0xbfa20013,
++	0xbe8300ff, 0x00000080,
+ 	0xbf800000, 0xbf800000,
+ 	0xbf800000, 0xd8d80000,
+ 	0x01000000, 0xbf8a0000,
+ 	0xc4068070, 0x008ce801,
+ 	0x00000000, 0x807d037d,
+ 	0x80700370, 0xd5250000,
+-	0x0001ff00, 0x00000100,
++	0x0001ff00, 0x00000080,
+ 	0xbf0a7b7d, 0xbfa2fff3,
+-	0xbefe00c1, 0x857d9973,
+-	0x8b7d817d, 0xbf06817d,
+-	0xbfa20004, 0xbef000ff,
+-	0x00000200, 0xbeff0080,
+-	0xbfa00003, 0xbef000ff,
+-	0x00000400, 0xbeff00c1,
+-	0xb8fb3b05, 0x807b817b,
+-	0x847b827b, 0x857d9973,
+-	0x8b7d817d, 0xbf06817d,
+-	0xbfa2001b, 0xbef600ff,
+-	0x01000000, 0xbefd0084,
+-	0xbf0a7b7d, 0xbfa10040,
+-	0x7e008700, 0x7e028701,
+-	0x7e048702, 0x7e068703,
+-	0xc4068070, 0x008ce800,
+-	0x00000000, 0xc4068070,
+-	0x008ce801, 0x00008000,
+-	0xc4068070, 0x008ce802,
+-	0x00010000, 0xc4068070,
+-	0x008ce803, 0x00018000,
+-	0x807d847d, 0x8070ff70,
+-	0x00000200, 0xbf0a7b7d,
+-	0xbfa2ffeb, 0xbfa0002a,
++	0xbfa00012, 0xbe8300ff,
++	0x00000100, 0xbf800000,
++	0xbf800000, 0xbf800000,
++	0xd8d80000, 0x01000000,
++	0xbf8a0000, 0xc4068070,
++	0x008ce801, 0x00000000,
++	0x807d037d, 0x80700370,
++	0xd5250000, 0x0001ff00,
++	0x00000100, 0xbf0a7b7d,
++	0xbfa2fff3, 0xbefe00c1,
++	0x857d9973, 0x8b7d817d,
++	0xbf06817d, 0xbfa20004,
++	0xbef000ff, 0x00000200,
++	0xbeff0080, 0xbfa00003,
++	0xbef000ff, 0x00000400,
++	0xbeff00c1, 0xb8fb3b05,
++	0x807b817b, 0x847b827b,
++	0x857d9973, 0x8b7d817d,
++	0xbf06817d, 0xbfa2001b,
+ 	0xbef600ff, 0x01000000,
+ 	0xbefd0084, 0xbf0a7b7d,
+-	0xbfa10015, 0x7e008700,
++	0xbfa10040, 0x7e008700,
+ 	0x7e028701, 0x7e048702,
+ 	0x7e068703, 0xc4068070,
+ 	0x008ce800, 0x00000000,
+ 	0xc4068070, 0x008ce801,
+-	0x00010000, 0xc4068070,
+-	0x008ce802, 0x00020000,
++	0x00008000, 0xc4068070,
++	0x008ce802, 0x00010000,
+ 	0xc4068070, 0x008ce803,
+-	0x00030000, 0x807d847d,
+-	0x8070ff70, 0x00000400,
++	0x00018000, 0x807d847d,
++	0x8070ff70, 0x00000200,
+ 	0xbf0a7b7d, 0xbfa2ffeb,
+-	0xb8fb1e06, 0x8b7bc17b,
+-	0xbfa1000d, 0x847b837b,
+-	0x807b7d7b, 0xbefe00c1,
+-	0xbeff0080, 0x7e008700,
++	0xbfa0002a, 0xbef600ff,
++	0x01000000, 0xbefd0084,
++	0xbf0a7b7d, 0xbfa10015,
++	0x7e008700, 0x7e028701,
++	0x7e048702, 0x7e068703,
+ 	0xc4068070, 0x008ce800,
+-	0x00000000, 0x807d817d,
+-	0x8070ff70, 0x00000080,
+-	0xbf0a7b7d, 0xbfa2fff7,
+-	0xbfa0016e, 0xbef4007e,
+-	0x8b75ff7f, 0x0000ffff,
+-	0x8c75ff75, 0x00040000,
+-	0xbef60080, 0xbef700ff,
+-	0x10807fac, 0xbef1007f,
+-	0xb8f20742, 0x84729972,
+-	0x8b6eff7f, 0x04000000,
+-	0xbfa1003b, 0xbefe00c1,
+-	0x857d9972, 0x8b7d817d,
+-	0xbf06817d, 0xbfa20002,
+-	0xbeff0080, 0xbfa00001,
+-	0xbeff00c1, 0xb8ef4306,
+-	0x8b6fc16f, 0xbfa10030,
+-	0x846f896f, 0xbef6006f,
++	0x00000000, 0xc4068070,
++	0x008ce801, 0x00010000,
++	0xc4068070, 0x008ce802,
++	0x00020000, 0xc4068070,
++	0x008ce803, 0x00030000,
++	0x807d847d, 0x8070ff70,
++	0x00000400, 0xbf0a7b7d,
++	0xbfa2ffeb, 0xb8fb1e06,
++	0x8b7bc17b, 0xbfa1000d,
++	0x847b837b, 0x807b7d7b,
++	0xbefe00c1, 0xbeff0080,
++	0x7e008700, 0xc4068070,
++	0x008ce800, 0x00000000,
++	0x807d817d, 0x8070ff70,
++	0x00000080, 0xbf0a7b7d,
++	0xbfa2fff7, 0xbfa0016e,
++	0xbef4007e, 0x8b75ff7f,
++	0x0000ffff, 0x8c75ff75,
++	0x00040000, 0xbef60080,
++	0xbef700ff, 0x10807fac,
++	0xbef1007f, 0xb8f20742,
++	0x84729972, 0x8b6eff7f,
++	0x04000000, 0xbfa1003b,
++	0xbefe00c1, 0x857d9972,
++	0x8b7d817d, 0xbf06817d,
++	0xbfa20002, 0xbeff0080,
++	0xbfa00001, 0xbeff00c1,
++	0xb8ef4306, 0x8b6fc16f,
++	0xbfa10030, 0x846f896f,
++	0xbef6006f, 0xb8f83b05,
++	0x80788178, 0xbf0d9972,
++	0xbfa20002, 0x84788978,
++	0xbfa00001, 0x84788a78,
++	0xb8ee1e06, 0x846e8a6e,
++	0x80786e78, 0x8078ff78,
++	0x00000200, 0x8078ff78,
++	0x00000080, 0xbef600ff,
++	0x01000000, 0x857d9972,
++	0x8b7d817d, 0xbf06817d,
++	0xbefd0080, 0xbfa2000d,
++	0xc4050078, 0x0080e800,
++	0x00000000, 0xbf8a0000,
++	0xdac00000, 0x00000000,
++	0x807dff7d, 0x00000080,
++	0x8078ff78, 0x00000080,
++	0xbf0a6f7d, 0xbfa2fff4,
++	0xbfa0000c, 0xc4050078,
++	0x0080e800, 0x00000000,
++	0xbf8a0000, 0xdac00000,
++	0x00000000, 0x807dff7d,
++	0x00000100, 0x8078ff78,
++	0x00000100, 0xbf0a6f7d,
++	0xbfa2fff4, 0xbef80080,
++	0xbefe00c1, 0x857d9972,
++	0x8b7d817d, 0xbf06817d,
++	0xbfa20002, 0xbeff0080,
++	0xbfa00001, 0xbeff00c1,
++	0xb8ef3b05, 0x806f816f,
++	0x846f826f, 0x857d9972,
++	0x8b7d817d, 0xbf06817d,
++	0xbfa2002c, 0xbef600ff,
++	0x01000000, 0xbeee0078,
++	0x8078ff78, 0x00000200,
++	0xbefd0084, 0xbf0a6f7d,
++	0xbfa10061, 0xc4050078,
++	0x008ce800, 0x00000000,
++	0xc4050078, 0x008ce801,
++	0x00008000, 0xc4050078,
++	0x008ce802, 0x00010000,
++	0xc4050078, 0x008ce803,
++	0x00018000, 0xbf8a0000,
++	0x7e008500, 0x7e028501,
++	0x7e048502, 0x7e068503,
++	0x807d847d, 0x8078ff78,
++	0x00000200, 0xbf0a6f7d,
++	0xbfa2ffea, 0xc405006e,
++	0x008ce800, 0x00000000,
++	0xc405006e, 0x008ce801,
++	0x00008000, 0xc405006e,
++	0x008ce802, 0x00010000,
++	0xc405006e, 0x008ce803,
++	0x00018000, 0xbf8a0000,
++	0xbfa0003d, 0xbef600ff,
++	0x01000000, 0xbeee0078,
++	0x8078ff78, 0x00000400,
++	0xbefd0084, 0xbf0a6f7d,
++	0xbfa10016, 0xc4050078,
++	0x008ce800, 0x00000000,
++	0xc4050078, 0x008ce801,
++	0x00010000, 0xc4050078,
++	0x008ce802, 0x00020000,
++	0xc4050078, 0x008ce803,
++	0x00030000, 0xbf8a0000,
++	0x7e008500, 0x7e028501,
++	0x7e048502, 0x7e068503,
++	0x807d847d, 0x8078ff78,
++	0x00000400, 0xbf0a6f7d,
++	0xbfa2ffea, 0xb8ef1e06,
++	0x8b6fc16f, 0xbfa1000f,
++	0x846f836f, 0x806f7d6f,
++	0xbefe00c1, 0xbeff0080,
++	0xc4050078, 0x008ce800,
++	0x00000000, 0xbf8a0000,
++	0x7e008500, 0x807d817d,
++	0x8078ff78, 0x00000080,
++	0xbf0a6f7d, 0xbfa2fff6,
++	0xbeff00c1, 0xc405006e,
++	0x008ce800, 0x00000000,
++	0xc405006e, 0x008ce801,
++	0x00010000, 0xc405006e,
++	0x008ce802, 0x00020000,
++	0xc405006e, 0x008ce803,
++	0x00030000, 0xbf8a0000,
+ 	0xb8f83b05, 0x80788178,
+ 	0xbf0d9972, 0xbfa20002,
+ 	0x84788978, 0xbfa00001,
+ 	0x84788a78, 0xb8ee1e06,
+ 	0x846e8a6e, 0x80786e78,
+ 	0x8078ff78, 0x00000200,
+-	0x8078ff78, 0x00000080,
+-	0xbef600ff, 0x01000000,
+-	0x857d9972, 0x8b7d817d,
+-	0xbf06817d, 0xbefd0080,
+-	0xbfa2000d, 0xc4050078,
+-	0x0080e800, 0x00000000,
+-	0xbf8a0000, 0xdac00000,
+-	0x00000000, 0x807dff7d,
+-	0x00000080, 0x8078ff78,
+-	0x00000080, 0xbf0a6f7d,
+-	0xbfa2fff4, 0xbfa0000c,
+-	0xc4050078, 0x0080e800,
+-	0x00000000, 0xbf8a0000,
+-	0xdac00000, 0x00000000,
+-	0x807dff7d, 0x00000100,
+-	0x8078ff78, 0x00000100,
+-	0xbf0a6f7d, 0xbfa2fff4,
+-	0xbef80080, 0xbefe00c1,
+-	0x857d9972, 0x8b7d817d,
+-	0xbf06817d, 0xbfa20002,
+-	0xbeff0080, 0xbfa00001,
+-	0xbeff00c1, 0xb8ef3b05,
+-	0x806f816f, 0x846f826f,
+-	0x857d9972, 0x8b7d817d,
+-	0xbf06817d, 0xbfa2002c,
++	0x80f8ff78, 0x00000050,
+ 	0xbef600ff, 0x01000000,
+-	0xbeee0078, 0x8078ff78,
+-	0x00000200, 0xbefd0084,
+-	0xbf0a6f7d, 0xbfa10061,
+-	0xc4050078, 0x008ce800,
+-	0x00000000, 0xc4050078,
+-	0x008ce801, 0x00008000,
+-	0xc4050078, 0x008ce802,
+-	0x00010000, 0xc4050078,
+-	0x008ce803, 0x00018000,
+-	0xbf8a0000, 0x7e008500,
+-	0x7e028501, 0x7e048502,
+-	0x7e068503, 0x807d847d,
++	0xbefd00ff, 0x0000006c,
++	0x80f89078, 0xf462403a,
++	0xf0000000, 0xbf8a0000,
++	0x80fd847d, 0xbf800000,
++	0xbe804300, 0xbe824302,
++	0x80f8a078, 0xf462603a,
++	0xf0000000, 0xbf8a0000,
++	0x80fd887d, 0xbf800000,
++	0xbe804300, 0xbe824302,
++	0xbe844304, 0xbe864306,
++	0x80f8c078, 0xf462803a,
++	0xf0000000, 0xbf8a0000,
++	0x80fd907d, 0xbf800000,
++	0xbe804300, 0xbe824302,
++	0xbe844304, 0xbe864306,
++	0xbe884308, 0xbe8a430a,
++	0xbe8c430c, 0xbe8e430e,
++	0xbf06807d, 0xbfa1fff0,
++	0xb980f801, 0x00000000,
++	0xb8f83b05, 0x80788178,
++	0xbf0d9972, 0xbfa20002,
++	0x84788978, 0xbfa00001,
++	0x84788a78, 0xb8ee1e06,
++	0x846e8a6e, 0x80786e78,
+ 	0x8078ff78, 0x00000200,
+-	0xbf0a6f7d, 0xbfa2ffea,
+-	0xc405006e, 0x008ce800,
+-	0x00000000, 0xc405006e,
+-	0x008ce801, 0x00008000,
+-	0xc405006e, 0x008ce802,
+-	0x00010000, 0xc405006e,
+-	0x008ce803, 0x00018000,
+-	0xbf8a0000, 0xbfa0003d,
+ 	0xbef600ff, 0x01000000,
+-	0xbeee0078, 0x8078ff78,
+-	0x00000400, 0xbefd0084,
+-	0xbf0a6f7d, 0xbfa10016,
+-	0xc4050078, 0x008ce800,
+-	0x00000000, 0xc4050078,
+-	0x008ce801, 0x00010000,
+-	0xc4050078, 0x008ce802,
+-	0x00020000, 0xc4050078,
+-	0x008ce803, 0x00030000,
+-	0xbf8a0000, 0x7e008500,
+-	0x7e028501, 0x7e048502,
+-	0x7e068503, 0x807d847d,
+-	0x8078ff78, 0x00000400,
+-	0xbf0a6f7d, 0xbfa2ffea,
+-	0xb8ef1e06, 0x8b6fc16f,
+-	0xbfa1000f, 0x846f836f,
+-	0x806f7d6f, 0xbefe00c1,
+-	0xbeff0080, 0xc4050078,
+-	0x008ce800, 0x00000000,
+-	0xbf8a0000, 0x7e008500,
+-	0x807d817d, 0x8078ff78,
+-	0x00000080, 0xbf0a6f7d,
+-	0xbfa2fff6, 0xbeff00c1,
+-	0xc405006e, 0x008ce800,
+-	0x00000000, 0xc405006e,
+-	0x008ce801, 0x00010000,
+-	0xc405006e, 0x008ce802,
+-	0x00020000, 0xc405006e,
+-	0x008ce803, 0x00030000,
+-	0xbf8a0000, 0xb8f83b05,
+-	0x80788178, 0xbf0d9972,
+-	0xbfa20002, 0x84788978,
+-	0xbfa00001, 0x84788a78,
+-	0xb8ee1e06, 0x846e8a6e,
+-	0x80786e78, 0x8078ff78,
+-	0x00000200, 0x80f8ff78,
+-	0x00000050, 0xbef600ff,
+-	0x01000000, 0xbefd00ff,
+-	0x0000006c, 0x80f89078,
+-	0xf462403a, 0xf0000000,
+-	0xbf8a0000, 0x80fd847d,
+-	0xbf800000, 0xbe804300,
+-	0xbe824302, 0x80f8a078,
+-	0xf462603a, 0xf0000000,
+-	0xbf8a0000, 0x80fd887d,
+-	0xbf800000, 0xbe804300,
+-	0xbe824302, 0xbe844304,
+-	0xbe864306, 0x80f8c078,
+-	0xf462803a, 0xf0000000,
+-	0xbf8a0000, 0x80fd907d,
+-	0xbf800000, 0xbe804300,
+-	0xbe824302, 0xbe844304,
+-	0xbe864306, 0xbe884308,
+-	0xbe8a430a, 0xbe8c430c,
+-	0xbe8e430e, 0xbf06807d,
+-	0xbfa1fff0, 0xb980f801,
+-	0x00000000, 0xb8f83b05,
+-	0x80788178, 0xbf0d9972,
+-	0xbfa20002, 0x84788978,
+-	0xbfa00001, 0x84788a78,
+-	0xb8ee1e06, 0x846e8a6e,
+-	0x80786e78, 0x8078ff78,
+-	0x00000200, 0xbef600ff,
+-	0x01000000, 0xbeff0071,
+-	0xf4621bfa, 0xf0000000,
+-	0x80788478, 0xf4621b3a,
++	0xbeff0071, 0xf4621bfa,
+ 	0xf0000000, 0x80788478,
+-	0xf4621b7a, 0xf0000000,
+-	0x80788478, 0xf4621c3a,
++	0xf4621b3a, 0xf0000000,
++	0x80788478, 0xf4621b7a,
+ 	0xf0000000, 0x80788478,
+-	0xf4621c7a, 0xf0000000,
+-	0x80788478, 0xf4621eba,
++	0xf4621c3a, 0xf0000000,
++	0x80788478, 0xf4621c7a,
+ 	0xf0000000, 0x80788478,
+-	0xf4621efa, 0xf0000000,
+-	0x80788478, 0xf4621e7a,
++	0xf4621eba, 0xf0000000,
++	0x80788478, 0xf4621efa,
+ 	0xf0000000, 0x80788478,
+-	0xf4621cfa, 0xf0000000,
+-	0x80788478, 0xf4621bba,
++	0xf4621e7a, 0xf0000000,
++	0x80788478, 0xf4621cfa,
+ 	0xf0000000, 0x80788478,
+-	0xbf8a0000, 0xb96ef814,
+ 	0xf4621bba, 0xf0000000,
+ 	0x80788478, 0xbf8a0000,
+-	0xb96ef815, 0xf4621bba,
++	0xb96ef814, 0xf4621bba,
+ 	0xf0000000, 0x80788478,
+-	0xbf8a0000, 0xb96ef812,
++	0xbf8a0000, 0xb96ef815,
+ 	0xf4621bba, 0xf0000000,
+ 	0x80788478, 0xbf8a0000,
+-	0xb96ef813, 0x8b6eff7f,
+-	0x04000000, 0xbfa1000d,
+-	0x80788478, 0xf4621bba,
++	0xb96ef812, 0xf4621bba,
+ 	0xf0000000, 0x80788478,
+-	0xbf8a0000, 0xbf0d806e,
+-	0xbfa10006, 0x856e906e,
+-	0x8b6e6e6e, 0xbfa10003,
+-	0xbe804ec1, 0x816ec16e,
+-	0xbfa0fffb, 0xbefd006f,
+-	0xbefe0070, 0xbeff0071,
+-	0xb97b2011, 0x857b867b,
+-	0xb97b0191, 0x857b827b,
+-	0xb97bba11, 0xb973f801,
+-	0xb8ee3b05, 0x806e816e,
+-	0xbf0d9972, 0xbfa20002,
+-	0x846e896e, 0xbfa00001,
+-	0x846e8a6e, 0xb8ef1e06,
+-	0x846f8a6f, 0x806e6f6e,
+-	0x806eff6e, 0x00000200,
+-	0x806e746e, 0x826f8075,
+-	0x8b6fff6f, 0x0000ffff,
+-	0xf4605c37, 0xf8000050,
+-	0xf4605d37, 0xf8000060,
+-	0xf4601e77, 0xf8000074,
+-	0xbf8a0000, 0x8b6dff6d,
+-	0x0000ffff, 0x8bfe7e7e,
+-	0x8bea6a6a, 0xb97af804,
++	0xbf8a0000, 0xb96ef813,
++	0x8b6eff7f, 0x04000000,
++	0xbfa1000d, 0x80788478,
++	0xf4621bba, 0xf0000000,
++	0x80788478, 0xbf8a0000,
++	0xbf0d806e, 0xbfa10006,
++	0x856e906e, 0x8b6e6e6e,
++	0xbfa10003, 0xbe804ec1,
++	0x816ec16e, 0xbfa0fffb,
++	0xbefd006f, 0xbefe0070,
++	0xbeff0071, 0xb97b2011,
++	0x857b867b, 0xb97b0191,
++	0x857b827b, 0xb97bba11,
++	0xb973f801, 0xb8ee3b05,
++	0x806e816e, 0xbf0d9972,
++	0xbfa20002, 0x846e896e,
++	0xbfa00001, 0x846e8a6e,
++	0xb8ef1e06, 0x846f8a6f,
++	0x806e6f6e, 0x806eff6e,
++	0x00000200, 0x806e746e,
++	0x826f8075, 0x8b6fff6f,
++	0x0000ffff, 0xf4605c37,
++	0xf8000050, 0xf4605d37,
++	0xf8000060, 0xf4601e77,
++	0xf8000074, 0xbf8a0000,
++	0x8b6dff6d, 0x0000ffff,
++	0x8bfe7e7e, 0x8bea6a6a,
++	0xb97af804, 0xbe804ec2,
++	0xbf94fffe, 0xbe804a6c,
+ 	0xbe804ec2, 0xbf94fffe,
+-	0xbe804a6c, 0xbe804ec2,
+-	0xbf94fffe, 0xbfb10000,
++	0xbfb10000, 0xbf9f0000,
+ 	0xbf9f0000, 0xbf9f0000,
+ 	0xbf9f0000, 0xbf9f0000,
+-	0xbf9f0000, 0x00000000,
+ };
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+index 7b9d36e5fa4372..5a1a1b1f897fe3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+@@ -30,6 +30,7 @@
+ #define CHIP_GFX12 37
+ 
+ #define SINGLE_STEP_MISSED_WORKAROUND 1	//workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
++#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12)
+ 
+ var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK	= 0x4
+ var SQ_WAVE_STATE_PRIV_SCC_SHIFT		= 9
+@@ -351,6 +352,7 @@ L_HAVE_VGPRS:
+ 	v_writelane_b32	v0, ttmp13, 0xD
+ 	v_writelane_b32	v0, exec_lo, 0xE
+ 	v_writelane_b32	v0, exec_hi, 0xF
++	valu_sgpr_hazard()
+ 
+ 	s_mov_b32	exec_lo, 0x3FFF
+ 	s_mov_b32	exec_hi, 0x0
+@@ -417,7 +419,6 @@ L_SAVE_HWREG:
+ 	v_mov_b32	v0, 0x0							//Offset[31:0] from buffer resource
+ 	v_mov_b32	v1, 0x0							//Offset[63:32] from buffer resource
+ 	v_mov_b32	v2, 0x0							//Set of SGPRs for TCP store
+-	s_mov_b32	m0, 0x0							//Next lane of v2 to write to
+ 
+ 	// Ensure no further changes to barrier or LDS state.
+ 	// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
+@@ -430,40 +431,41 @@ L_SAVE_HWREG:
+ 	s_andn2_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ 	s_or_b32	s_save_state_priv, s_save_state_priv, s_save_tmp
+ 
+-	write_hwreg_to_v2(s_save_m0)
+-	write_hwreg_to_v2(s_save_pc_lo)
+ 	s_andn2_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+-	write_hwreg_to_v2(s_save_tmp)
+-	write_hwreg_to_v2(s_save_exec_lo)
+-	write_hwreg_to_v2(s_save_exec_hi)
+-	write_hwreg_to_v2(s_save_state_priv)
++	v_writelane_b32	v2, s_save_m0, 0x0
++	v_writelane_b32	v2, s_save_pc_lo, 0x1
++	v_writelane_b32	v2, s_save_tmp, 0x2
++	v_writelane_b32	v2, s_save_exec_lo, 0x3
++	v_writelane_b32	v2, s_save_exec_hi, 0x4
++	v_writelane_b32	v2, s_save_state_priv, 0x5
++	v_writelane_b32	v2, s_save_xnack_mask, 0x7
++	valu_sgpr_hazard()
+ 
+ 	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+-	write_hwreg_to_v2(s_save_tmp)
++	v_writelane_b32	v2, s_save_tmp, 0x6
+ 
+-	write_hwreg_to_v2(s_save_xnack_mask)
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_MODE)
++	v_writelane_b32	v2, s_save_tmp, 0x8
+ 
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_MODE)
+-	write_hwreg_to_v2(s_save_m0)
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
++	v_writelane_b32	v2, s_save_tmp, 0x9
+ 
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+-	write_hwreg_to_v2(s_save_m0)
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
++	v_writelane_b32	v2, s_save_tmp, 0xA
+ 
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+-	write_hwreg_to_v2(s_save_m0)
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
++	v_writelane_b32	v2, s_save_tmp, 0xB
+ 
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+-	write_hwreg_to_v2(s_save_m0)
+-
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
+-	write_hwreg_to_v2(s_save_m0)
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL)
++	v_writelane_b32	v2, s_save_tmp, 0xC
+ 
+ 	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+-	write_hwreg_to_v2(s_save_tmp)
++	v_writelane_b32	v2, s_save_tmp, 0xD
+ 
+ 	s_get_barrier_state s_save_tmp, -1
+ 	s_wait_kmcnt (0)
+-	write_hwreg_to_v2(s_save_tmp)
++	v_writelane_b32	v2, s_save_tmp, 0xE
++	valu_sgpr_hazard()
+ 
+ 	// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ 	s_mov_b32       exec_lo, 0xFFFF
+@@ -497,10 +499,12 @@ L_SAVE_SGPR_LOOP:
+ 	s_movrels_b64	s12, s12						//s12 = s[12+m0], s13 = s[13+m0]
+ 	s_movrels_b64	s14, s14						//s14 = s[14+m0], s15 = s[15+m0]
+ 
+-	write_16sgpr_to_v2(s0)
+-
+-	s_cmp_eq_u32	ttmp13, 0x20						//have 32 VGPR lanes filled?
+-	s_cbranch_scc0	L_SAVE_SGPR_SKIP_TCP_STORE
++	s_cmp_eq_u32	ttmp13, 0x0
++	s_cbranch_scc0	L_WRITE_V2_SECOND_HALF
++	write_16sgpr_to_v2(s0, 0x0)
++	s_branch	L_SAVE_SGPR_SKIP_TCP_STORE
++L_WRITE_V2_SECOND_HALF:
++	write_16sgpr_to_v2(s0, 0x10)
+ 
+ 	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ 	s_add_u32	s_save_mem_offset, s_save_mem_offset, 0x80
+@@ -1056,27 +1060,21 @@ L_END_PGM:
+ 	s_endpgm_saved
+ end
+ 
+-function write_hwreg_to_v2(s)
+-	// Copy into VGPR for later TCP store.
+-	v_writelane_b32	v2, s, m0
+-	s_add_u32	m0, m0, 0x1
+-end
+-
+-
+-function write_16sgpr_to_v2(s)
++function write_16sgpr_to_v2(s, lane_offset)
+ 	// Copy into VGPR for later TCP store.
+ 	for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
+-		v_writelane_b32	v2, s[sgpr_idx], ttmp13
+-		s_add_u32	ttmp13, ttmp13, 0x1
++		v_writelane_b32	v2, s[sgpr_idx], sgpr_idx + lane_offset
+ 	end
++	valu_sgpr_hazard()
++	s_add_u32	ttmp13, ttmp13, 0x10
+ end
+ 
+ function write_12sgpr_to_v2(s)
+ 	// Copy into VGPR for later TCP store.
+ 	for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
+-		v_writelane_b32	v2, s[sgpr_idx], ttmp13
+-		s_add_u32	ttmp13, ttmp13, 0x1
++		v_writelane_b32	v2, s[sgpr_idx], sgpr_idx
+ 	end
++	valu_sgpr_hazard()
+ end
+ 
+ function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+@@ -1128,3 +1126,11 @@ function get_wave_size2(s_reg)
+ 	s_getreg_b32	s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
+ 	s_lshl_b32	s_reg, s_reg, S_WAVE_SIZE
+ end
++
++function valu_sgpr_hazard
++#if HAVE_VALU_SGPR_HAZARD
++	for var rep = 0; rep < 8; rep ++
++		ds_nop
++	end
++#endif
++end
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 9186ef0bd2a32a..07eadab4c1c4d8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -537,7 +537,8 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
+ 			kfd->cwsr_isa = cwsr_trap_gfx11_hex;
+ 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
+ 		} else {
+-			BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) > PAGE_SIZE);
++			BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex)
++					     > KFD_CWSR_TMA_OFFSET);
+ 			kfd->cwsr_isa = cwsr_trap_gfx12_hex;
+ 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex);
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index ea379224920935..6798510c4a707d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -1315,6 +1315,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
+ 	user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+ 	if (unlikely(user_gpu_id == -EINVAL)) {
+ 		WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
++		kfd_unref_process(p);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+index 1f9f5bfeaf8680..d87b895660c21c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+@@ -237,7 +237,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
+ 
+ 	packet->bitfields2.engine_sel =
+ 		engine_sel__mes_map_queues__compute_vi;
+-	packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
++	packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
+ 	packet->bitfields2.extended_engine_sel =
+ 		extended_engine_sel__mes_map_queues__legacy_engine_sel;
+ 	packet->bitfields2.queue_type =
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+index 7e3d506bb79b9f..f3aa93ddbf9c92 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+@@ -762,6 +762,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
+ 		plane->pixel_format = dml2_420_10;
+ 		break;
+ 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
++	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+ 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ 		plane->pixel_format = dml2_444_64;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+index 0090b7bc232bfd..157903115f3b4c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+@@ -4651,7 +4651,10 @@ static void calculate_tdlut_setting(
+ 	//the tdlut is fetched during the 2 row times of prefetch.
+ 	if (p->setup_for_tdlut) {
+ 		*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
+-		*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
++		if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024)
++			*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
++		else
++			*p->tdlut_opt_time = 0;
+ 		*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+index 92a3fff1e26165..405aefd14d9b4b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+@@ -909,6 +909,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
+ 		out->SourcePixelFormat[location] = dml_420_10;
+ 		break;
+ 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
++	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+ 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ 		out->SourcePixelFormat[location] = dml_444_64;
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+index 59457ca24e1dc3..03b22e9115ea8a 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+@@ -951,8 +951,8 @@ void dce110_edp_backlight_control(
+ 	struct dc_context *ctx = link->ctx;
+ 	struct bp_transmitter_control cntl = { 0 };
+ 	uint8_t pwrseq_instance = 0;
+-	unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
+-	unsigned int post_T7_delay = OLED_POST_T7_DELAY;
++	unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0);
++	unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0);
+ 
+ 	if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
+ 		!= CONNECTOR_ID_EDP) {
+@@ -1067,7 +1067,8 @@ void dce110_edp_backlight_control(
+ 	if (!enable) {
+ 		/*follow oem panel config's requirement*/
+ 		pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
+-		msleep(pre_T11_delay);
++		if (pre_T11_delay)
++			msleep(pre_T11_delay);
+ 	}
+ }
+ 
+@@ -1216,7 +1217,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
+ 	struct dce_hwseq *hws = link->dc->hwseq;
+ 
+ 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+-		if (!link->skip_implict_edp_power_control)
++		if (!link->skip_implict_edp_power_control && hws)
+ 			hws->funcs.edp_backlight_control(link, false);
+ 		link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index 8c137d7c032e1f..e58e7b93810be7 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
+ 	struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ 	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ 
++	if (!display)
++		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ 	mutex_lock(&psp->hdcp_context.mutex);
+ 	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+ 	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 24ed1cd3caf17a..162dc0698f4ac3 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -1319,9 +1319,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
+ 
+ 	/*
+ 	 * Concurrent operations could possibly trigger a call to
+-	 * drm_connector_helper_funcs.get_modes by trying to read the
+-	 * display modes. Protect access to I/O registers by acquiring
+-	 * the I/O-register lock. Released in atomic_flush().
++	 * drm_connector_helper_funcs.get_modes by reading the display
++	 * modes. Protect access to registers by acquiring the modeset
++	 * lock.
+ 	 */
+ 	mutex_lock(&ast->modeset_lock);
+ 	drm_atomic_helper_commit_tail(state);
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+index 7457d38622b0c7..89eed0668bfb24 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+@@ -568,15 +568,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
+ 	struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
+ 	unsigned long dsi_hss_hsa_hse_hbp;
+ 	unsigned int nlanes = output->dev->lanes;
++	int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
+ 	int ret;
+ 
+ 	ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
+ 	if (ret)
+ 		return ret;
+ 
+-	phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
+-					 mipi_dsi_pixel_format_to_bpp(output->dev->format),
+-					 nlanes, phy_cfg);
++	ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
++					       mipi_dsi_pixel_format_to_bpp(output->dev->format),
++					       nlanes, phy_cfg);
++	if (ret)
++		return ret;
+ 
+ 	ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
+ 	if (ret)
+@@ -680,6 +683,11 @@ static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
+ 	struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ 	struct cdns_dsi *dsi = input_to_dsi(input);
+ 
++	dsi->phy_initialized = false;
++	dsi->link_initialized = false;
++	phy_power_off(dsi->dphy);
++	phy_exit(dsi->dphy);
++
+ 	pm_runtime_put(dsi->base.dev);
+ }
+ 
+@@ -761,7 +769,7 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+ 	struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
+ 	unsigned long tx_byte_period;
+ 	struct cdns_dsi_cfg dsi_cfg;
+-	u32 tmp, reg_wakeup, div;
++	u32 tmp, reg_wakeup, div, status;
+ 	int nlanes;
+ 
+ 	if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
+@@ -778,6 +786,19 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+ 	cdns_dsi_hs_init(dsi);
+ 	cdns_dsi_init_link(dsi);
+ 
++	/*
++	 * Now that the DSI Link and DSI Phy are initialized,
++	 * wait for the CLK and Data Lanes to be ready.
++	 */
++	tmp = CLK_LANE_RDY;
++	for (int i = 0; i < nlanes; i++)
++		tmp |= DATA_LANE_RDY(i);
++
++	if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
++			       (tmp == (status & tmp)), 100, 500000))
++		dev_err(dsi->base.dev,
++			"Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
++
+ 	writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
+ 	       dsi->regs + VID_HSIZE1);
+ 	writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
+@@ -952,7 +973,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
+ 		bridge = drm_panel_bridge_add_typed(panel,
+ 						    DRM_MODE_CONNECTOR_DSI);
+ 	} else {
+-		bridge = of_drm_find_bridge(dev->dev.of_node);
++		bridge = of_drm_find_bridge(np);
+ 		if (!bridge)
+ 			bridge = ERR_PTR(-EINVAL);
+ 	}
+@@ -1152,7 +1173,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
+ 	clk_disable_unprepare(dsi->dsi_sys_clk);
+ 	clk_disable_unprepare(dsi->dsi_p_clk);
+ 	reset_control_assert(dsi->dsi_p_rst);
+-	dsi->link_initialized = false;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 95ce50ed53acf6..5500767cda7e4f 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -331,12 +331,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
+ 	 * 200 ms.  We'll assume that the panel driver will have the hardcoded
+ 	 * delay in its prepare and always disable HPD.
+ 	 *
+-	 * If HPD somehow makes sense on some future panel we'll have to
+-	 * change this to be conditional on someone specifying that HPD should
+-	 * be used.
++	 * For DisplayPort bridge type, we need HPD. So we use the bridge type
++	 * to conditionally disable HPD.
++	 * NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms()
++	 * can be called before. So for DisplayPort, HPD will be enabled once
++	 * bridge type is set. We are using bridge type instead of "no-hpd"
++	 * property because it is not used properly in devicetree description
++	 * and hence is unreliable.
+ 	 */
+-	regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+-			   HPD_DISABLE);
++
++	if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort)
++		regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
++				   HPD_DISABLE);
+ 
+ 	pdata->comms_enabled = true;
+ 
+@@ -424,36 +430,8 @@ static int status_show(struct seq_file *s, void *data)
+ 
+ 	return 0;
+ }
+-
+ DEFINE_SHOW_ATTRIBUTE(status);
+ 
+-static void ti_sn65dsi86_debugfs_remove(void *data)
+-{
+-	debugfs_remove_recursive(data);
+-}
+-
+-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
+-{
+-	struct device *dev = pdata->dev;
+-	struct dentry *debugfs;
+-	int ret;
+-
+-	debugfs = debugfs_create_dir(dev_name(dev), NULL);
+-
+-	/*
+-	 * We might get an error back if debugfs wasn't enabled in the kernel
+-	 * so let's just silently return upon failure.
+-	 */
+-	if (IS_ERR_OR_NULL(debugfs))
+-		return;
+-
+-	ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
+-	if (ret)
+-		return;
+-
+-	debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+-}
+-
+ /* -----------------------------------------------------------------------------
+  * Auxiliary Devices (*not* AUX)
+  */
+@@ -1201,9 +1179,14 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
+ 	struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ 	int val = 0;
+ 
+-	pm_runtime_get_sync(pdata->dev);
++	/*
++	 * Runtime reference is grabbed in ti_sn_bridge_hpd_enable()
++	 * as the chip won't report HPD just after being powered on.
++	 * HPD_DEBOUNCED_STATE reflects correct state only after the
++	 * debounce time (~100-400 ms).
++	 */
++
+ 	regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
+-	pm_runtime_put_autosuspend(pdata->dev);
+ 
+ 	return val & HPD_DEBOUNCED_STATE ? connector_status_connected
+ 					 : connector_status_disconnected;
+@@ -1217,6 +1200,35 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
+ 	return drm_edid_read_ddc(connector, &pdata->aux.ddc);
+ }
+ 
++static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
++{
++	struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
++	struct dentry *debugfs;
++
++	debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
++	debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
++}
++
++static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
++{
++	struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
++
++	/*
++	 * Device needs to be powered on before reading the HPD state
++	 * for reliable hpd detection in ti_sn_bridge_detect() due to
++	 * the high debounce time.
++	 */
++
++	pm_runtime_get_sync(pdata->dev);
++}
++
++static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
++{
++	struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
++
++	pm_runtime_put_autosuspend(pdata->dev);
++}
++
+ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
+ 	.attach = ti_sn_bridge_attach,
+ 	.detach = ti_sn_bridge_detach,
+@@ -1230,6 +1242,9 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
+ 	.atomic_reset = drm_atomic_helper_bridge_reset,
+ 	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ 	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
++	.debugfs_init = ti_sn65dsi86_debugfs_init,
++	.hpd_enable = ti_sn_bridge_hpd_enable,
++	.hpd_disable = ti_sn_bridge_hpd_disable,
+ };
+ 
+ static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
+@@ -1318,8 +1333,26 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
+ 	pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
+ 			   ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
+ 
+-	if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
+-		pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
++	if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
++		pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
++				    DRM_BRIDGE_OP_HPD;
++		/*
++		 * If comms were already enabled they would have been enabled
++		 * with the wrong value of HPD_DISABLE. Update it now. Comms
++		 * could be enabled if anyone is holding a pm_runtime reference
++		 * (like if a GPIO is in use). Note that in most cases nobody
++		 * is doing AUX channel xfers before the bridge is added so
++		 * HPD doesn't _really_ matter then. The only exception is in
++		 * the eDP case where the panel wants to read the EDID before
++		 * the bridge is added. We always consistently have HPD disabled
++		 * for eDP.
++		 */
++		mutex_lock(&pdata->comms_mutex);
++		if (pdata->comms_enabled)
++			regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
++					   HPD_DISABLE, 0);
++		mutex_unlock(&pdata->comms_mutex);
++	};
+ 
+ 	drm_bridge_add(&pdata->bridge);
+ 
+@@ -1938,8 +1971,6 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
+ 	if (ret)
+ 		return ret;
+ 
+-	ti_sn65dsi86_debugfs_init(pdata);
+-
+ 	/*
+ 	 * Break ourselves up into a collection of aux devices. The only real
+ 	 * motiviation here is to solve the chicken-and-egg problem of probe
+diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
+index 7c8287c18e381f..6fcf2a8bf67627 100644
+--- a/drivers/gpu/drm/drm_fbdev_dma.c
++++ b/drivers/gpu/drm/drm_fbdev_dma.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: MIT
+ 
+ #include <linux/fb.h>
++#include <linux/vmalloc.h>
+ 
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_drv.h>
+@@ -72,43 +73,108 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
+ 	.fb_destroy = drm_fbdev_dma_fb_destroy,
+ };
+ 
+-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
++FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
+ 				   drm_fb_helper_damage_range,
+ 				   drm_fb_helper_damage_area);
+ 
+-static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
+ {
+ 	struct drm_fb_helper *fb_helper = info->par;
+-	struct drm_framebuffer *fb = fb_helper->fb;
+-	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
++	void *shadow = info->screen_buffer;
++
++	if (!fb_helper->dev)
++		return;
+ 
+-	if (!dma->map_noncoherent)
+-		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++	if (info->fbdefio)
++		fb_deferred_io_cleanup(info);
++	drm_fb_helper_fini(fb_helper);
++	vfree(shadow);
+ 
+-	return fb_deferred_io_mmap(info, vma);
++	drm_client_buffer_vunmap(fb_helper->buffer);
++	drm_client_framebuffer_delete(fb_helper->buffer);
++	drm_client_release(&fb_helper->client);
++	drm_fb_helper_unprepare(fb_helper);
++	kfree(fb_helper);
+ }
+ 
+-static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
++static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
+ 	.owner = THIS_MODULE,
+ 	.fb_open = drm_fbdev_dma_fb_open,
+ 	.fb_release = drm_fbdev_dma_fb_release,
+-	__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
++	FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
+ 	DRM_FB_HELPER_DEFAULT_OPS,
+-	__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
+-	.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
+-	.fb_destroy = drm_fbdev_dma_fb_destroy,
++	.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
+ };
+ 
+ /*
+  * struct drm_fb_helper
+  */
+ 
++static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
++					   struct drm_clip_rect *clip,
++					   struct iosys_map *dst)
++{
++	struct drm_framebuffer *fb = fb_helper->fb;
++	size_t offset = clip->y1 * fb->pitches[0];
++	size_t len = clip->x2 - clip->x1;
++	unsigned int y;
++	void *src;
++
++	switch (drm_format_info_bpp(fb->format, 0)) {
++	case 1:
++		offset += clip->x1 / 8;
++		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
++		break;
++	case 2:
++		offset += clip->x1 / 4;
++		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
++		break;
++	case 4:
++		offset += clip->x1 / 2;
++		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
++		break;
++	default:
++		offset += clip->x1 * fb->format->cpp[0];
++		len *= fb->format->cpp[0];
++		break;
++	}
++
++	src = fb_helper->info->screen_buffer + offset;
++	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
++
++	for (y = clip->y1; y < clip->y2; y++) {
++		iosys_map_memcpy_to(dst, 0, src, len);
++		iosys_map_incr(dst, fb->pitches[0]);
++		src += fb->pitches[0];
++	}
++}
++
++static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
++				     struct drm_clip_rect *clip)
++{
++	struct drm_client_buffer *buffer = fb_helper->buffer;
++	struct iosys_map dst;
++
++	/*
++	 * For fbdev emulation, we only have to protect against fbdev modeset
++	 * operations. Nothing else will involve the client buffer's BO. So it
++	 * is sufficient to acquire struct drm_fb_helper.lock here.
++	 */
++	mutex_lock(&fb_helper->lock);
++
++	dst = buffer->map;
++	drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
++
++	mutex_unlock(&fb_helper->lock);
++
++	return 0;
++}
++
+ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 					 struct drm_fb_helper_surface_size *sizes)
+ {
+ 	return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes);
+ }
+-
+ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
+ 					 struct drm_clip_rect *clip)
+ {
+@@ -120,6 +186,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
+ 		return 0;
+ 
+ 	if (helper->fb->funcs->dirty) {
++		ret = drm_fbdev_dma_damage_blit(helper, clip);
++		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
++			return ret;
++
+ 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+ 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ 			return ret;
+@@ -137,14 +207,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
+  * struct drm_fb_helper
+  */
+ 
++static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
++						 struct drm_fb_helper_surface_size *sizes)
++{
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_client_buffer *buffer = fb_helper->buffer;
++	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
++	struct drm_framebuffer *fb = fb_helper->fb;
++	struct fb_info *info = fb_helper->info;
++	struct iosys_map map = buffer->map;
++
++	info->fbops = &drm_fbdev_dma_fb_ops;
++
++	/* screen */
++	info->flags |= FBINFO_VIRTFB; /* system memory */
++	if (dma_obj->map_noncoherent)
++		info->flags |= FBINFO_READS_FAST; /* signal caching */
++	info->screen_size = sizes->surface_height * fb->pitches[0];
++	info->screen_buffer = map.vaddr;
++	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
++		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
++			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
++	}
++	info->fix.smem_len = info->screen_size;
++
++	return 0;
++}
++
++static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
++							  struct drm_fb_helper_surface_size *sizes)
++{
++	struct drm_client_buffer *buffer = fb_helper->buffer;
++	struct fb_info *info = fb_helper->info;
++	size_t screen_size = buffer->gem->size;
++	void *screen_buffer;
++	int ret;
++
++	/*
++	 * Deferred I/O requires struct page for framebuffer memory,
++	 * which is not guaranteed for all DMA ranges. We thus create
++	 * a shadow buffer in system memory.
++	 */
++	screen_buffer = vzalloc(screen_size);
++	if (!screen_buffer)
++		return -ENOMEM;
++
++	info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
++
++	/* screen */
++	info->flags |= FBINFO_VIRTFB; /* system memory */
++	info->flags |= FBINFO_READS_FAST; /* signal caching */
++	info->screen_buffer = screen_buffer;
++	info->fix.smem_len = screen_size;
++
++	fb_helper->fbdefio.delay = HZ / 20;
++	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
++
++	info->fbdefio = &fb_helper->fbdefio;
++	ret = fb_deferred_io_init(info);
++	if (ret)
++		goto err_vfree;
++
++	return 0;
++
++err_vfree:
++	vfree(screen_buffer);
++	return ret;
++}
++
+ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ 				     struct drm_fb_helper_surface_size *sizes)
+ {
+ 	struct drm_client_dev *client = &fb_helper->client;
+ 	struct drm_device *dev = fb_helper->dev;
+-	bool use_deferred_io = false;
+ 	struct drm_client_buffer *buffer;
+-	struct drm_gem_dma_object *dma_obj;
+ 	struct drm_framebuffer *fb;
+ 	struct fb_info *info;
+ 	u32 format;
+@@ -161,19 +297,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ 					       sizes->surface_height, format);
+ 	if (IS_ERR(buffer))
+ 		return PTR_ERR(buffer);
+-	dma_obj = to_drm_gem_dma_obj(buffer->gem);
+ 
+ 	fb = buffer->fb;
+ 
+-	/*
+-	 * Deferred I/O requires struct page for framebuffer memory,
+-	 * which is not guaranteed for all DMA ranges. We thus only
+-	 * install deferred I/O if we have a framebuffer that requires
+-	 * it.
+-	 */
+-	if (fb->funcs->dirty)
+-		use_deferred_io = true;
+-
+ 	ret = drm_client_buffer_vmap(buffer, &map);
+ 	if (ret) {
+ 		goto err_drm_client_buffer_delete;
+@@ -194,45 +320,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ 
+ 	drm_fb_helper_fill_info(info, fb_helper, sizes);
+ 
+-	if (use_deferred_io)
+-		info->fbops = &drm_fbdev_dma_deferred_fb_ops;
++	if (fb->funcs->dirty)
++		ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
+ 	else
+-		info->fbops = &drm_fbdev_dma_fb_ops;
+-
+-	/* screen */
+-	info->flags |= FBINFO_VIRTFB; /* system memory */
+-	if (dma_obj->map_noncoherent)
+-		info->flags |= FBINFO_READS_FAST; /* signal caching */
+-	info->screen_size = sizes->surface_height * fb->pitches[0];
+-	info->screen_buffer = map.vaddr;
+-	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+-		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+-			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+-	}
+-	info->fix.smem_len = info->screen_size;
+-
+-	/*
+-	 * Only set up deferred I/O if the screen buffer supports
+-	 * it. If this disagrees with the previous test for ->dirty,
+-	 * mmap on the /dev/fb file might not work correctly.
+-	 */
+-	if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
+-		unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
+-
+-		if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
+-			use_deferred_io = false;
+-	}
+-
+-	/* deferred I/O */
+-	if (use_deferred_io) {
+-		fb_helper->fbdefio.delay = HZ / 20;
+-		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+-
+-		info->fbdefio = &fb_helper->fbdefio;
+-		ret = fb_deferred_io_init(info);
+-		if (ret)
+-			goto err_drm_fb_helper_release_info;
+-	}
++		ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
++	if (ret)
++		goto err_drm_fb_helper_release_info;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+index ab9ca4824b62e1..e60288af350277 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+@@ -34,6 +34,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ 							  *sched_job)
+ {
+ 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
++	struct drm_gpu_scheduler *sched = sched_job->sched;
+ 	struct etnaviv_gpu *gpu = submit->gpu;
+ 	u32 dma_addr;
+ 	int change;
+@@ -76,7 +77,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ 	return DRM_GPU_SCHED_STAT_NOMINAL;
+ 
+ out_no_timeout:
+-	list_add(&sched_job->list, &sched_job->sched->pending_list);
++	spin_lock(&sched->job_list_lock);
++	list_add(&sched_job->list, &sched->pending_list);
++	spin_unlock(&sched->job_list_lock);
+ 	return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index d21f3fb397060c..3c7789ca62075a 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -1059,7 +1059,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
+ 				              BXT_MIPI_TRANS_VACTIVE(port));
+ 	adjusted_mode->crtc_vtotal =
+ 				intel_de_read(display,
+-				              BXT_MIPI_TRANS_VTOTAL(port));
++				              BXT_MIPI_TRANS_VTOTAL(port)) + 1;
+ 
+ 	hactive = adjusted_mode->crtc_hdisplay;
+ 	hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port));
+@@ -1264,7 +1264,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
+ 			intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port),
+ 				       adjusted_mode->crtc_vdisplay);
+ 			intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port),
+-				       adjusted_mode->crtc_vtotal);
++				       adjusted_mode->crtc_vtotal - 1);
+ 		}
+ 
+ 		intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port),
+diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
+index c43223916a1b11..5cc302ad13e164 100644
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -111,7 +111,7 @@ static unsigned int config_bit(const u64 config)
+ 		return other_bit(config);
+ }
+ 
+-static u32 config_mask(const u64 config)
++static __always_inline u32 config_mask(const u64 config)
+ {
+ 	unsigned int bit = config_bit(config);
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index e1228fb093ee01..a5c1534eafdb1a 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -928,16 +928,17 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
+-		return MODE_CLOCK_HIGH;
+-
+ 	dp_display = container_of(dp, struct dp_display_private, dp_display);
+ 	link_info = &dp_display->panel->link_info;
+ 
+-	if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
+-	    dp_display->panel->vsc_sdp_supported)
++	if ((drm_mode_is_420_only(&dp->connector->display_info, mode) &&
++	     dp_display->panel->vsc_sdp_supported) ||
++	     msm_dp_wide_bus_available(dp))
+ 		mode_pclk_khz /= 2;
+ 
++	if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
++		return MODE_CLOCK_HIGH;
++
+ 	mode_bpp = dp->connector->display_info.bpc * num_components;
+ 	if (!mode_bpp)
+ 		mode_bpp = default_bpp;
+diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
+index 1b9be5bd97f12d..da0176eae3fe30 100644
+--- a/drivers/gpu/drm/msm/dp/dp_drm.c
++++ b/drivers/gpu/drm/msm/dp/dp_drm.c
+@@ -257,7 +257,10 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
++	if (msm_dp_wide_bus_available(dp))
++		mode_pclk_khz /= 2;
++
++	if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
+ 		return MODE_CLOCK_HIGH;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+index 6970b0f7f457c8..2e1d5c3432728c 100644
+--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
++++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+@@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
+ 	priv->gpu_devfreq_config.downdifferential = 10;
+ 
+ 	mutex_init(&df->lock);
++	df->suspended = true;
+ 
+ 	ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
+ 				     DEV_PM_QOS_MIN_FREQUENCY, 0);
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index 002057be0d84a2..c9c50e3b18a23e 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -189,6 +189,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
+ {
+ 	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+ 
++	drm_sched_fence_scheduled(job->s_fence, NULL);
+ 	drm_sched_fence_finished(job->s_fence, -ESRCH);
+ 	WARN_ON(job->s_fence->parent);
+ 	job->sched->ops->free_job(job);
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index be61c9d1a4f0e8..51ca78551b57e1 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -1320,10 +1320,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
+ 		if (wgrp->dc == dc->pipe) {
+ 			for (j = 0; j < wgrp->num_windows; j++) {
+ 				unsigned int index = wgrp->windows[j];
++				enum drm_plane_type type;
++
++				if (primary)
++					type = DRM_PLANE_TYPE_OVERLAY;
++				else
++					type = DRM_PLANE_TYPE_PRIMARY;
+ 
+ 				plane = tegra_shared_plane_create(drm, dc,
+ 								  wgrp->index,
+-								  index);
++								  index, type);
+ 				if (IS_ERR(plane))
+ 					return plane;
+ 
+@@ -1331,10 +1337,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
+ 				 * Choose the first shared plane owned by this
+ 				 * head as the primary plane.
+ 				 */
+-				if (!primary) {
+-					plane->type = DRM_PLANE_TYPE_PRIMARY;
++				if (!primary)
+ 					primary = plane;
+-				}
+ 			}
+ 		}
+ 	}
+@@ -1388,7 +1392,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
+ 	if (crtc->state)
+ 		tegra_crtc_atomic_destroy_state(crtc, crtc->state);
+ 
+-	__drm_atomic_helper_crtc_reset(crtc, &state->base);
++	if (state)
++		__drm_atomic_helper_crtc_reset(crtc, &state->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+ 
+ static struct drm_crtc_state *
+diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
+index e0c2019a591b18..3507dd6e90234e 100644
+--- a/drivers/gpu/drm/tegra/hub.c
++++ b/drivers/gpu/drm/tegra/hub.c
+@@ -755,9 +755,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
+ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
+ 					    struct tegra_dc *dc,
+ 					    unsigned int wgrp,
+-					    unsigned int index)
++					    unsigned int index,
++					    enum drm_plane_type type)
+ {
+-	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
+ 	struct tegra_drm *tegra = drm->dev_private;
+ 	struct tegra_display_hub *hub = tegra->hub;
+ 	struct tegra_shared_plane *plane;
+diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
+index 23c4b2115ed1e3..a66f18c4facc9d 100644
+--- a/drivers/gpu/drm/tegra/hub.h
++++ b/drivers/gpu/drm/tegra/hub.h
+@@ -80,7 +80,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
+ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
+ 					    struct tegra_dc *dc,
+ 					    unsigned int wgrp,
+-					    unsigned int index);
++					    unsigned int index,
++					    enum drm_plane_type type);
+ 
+ int tegra_display_hub_atomic_check(struct drm_device *drm,
+ 				   struct drm_atomic_state *state);
+diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
+index 751326e3d9c374..c7e81f2610f8cc 100644
+--- a/drivers/gpu/drm/tiny/cirrus.c
++++ b/drivers/gpu/drm/tiny/cirrus.c
+@@ -318,7 +318,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
+ 	/* Enable extended blanking and pitch bits, and enable full memory */
+ 	cr1b = 0x22;
+ 	cr1b |= (pitch >> 7) & 0x10;
+-	cr1b |= (pitch >> 6) & 0x40;
+ 	wreg_crt(cirrus, 0x1b, cr1b);
+ 
+ 	cirrus_set_start_address(cirrus, 0);
+diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
+index 280a09a6e2ad76..0f712eb685ba2c 100644
+--- a/drivers/gpu/drm/udl/udl_drv.c
++++ b/drivers/gpu/drm/udl/udl_drv.c
+@@ -126,9 +126,9 @@ static void udl_usb_disconnect(struct usb_interface *interface)
+ {
+ 	struct drm_device *dev = usb_get_intfdata(interface);
+ 
++	drm_dev_unplug(dev);
+ 	drm_kms_helper_poll_fini(dev);
+ 	udl_drop_usb(dev);
+-	drm_dev_unplug(dev);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
+index a1928cedc7ddf0..e164e2d71e1157 100644
+--- a/drivers/gpu/drm/xe/display/xe_display.c
++++ b/drivers/gpu/drm/xe/display/xe_display.c
+@@ -96,6 +96,8 @@ int xe_display_create(struct xe_device *xe)
+ 	spin_lock_init(&xe->display.fb_tracking.lock);
+ 
+ 	xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
++	if (!xe->display.hotplug.dp_wq)
++		return -ENOMEM;
+ 
+ 	return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
+index ff19eca5d358b3..e9820126feb969 100644
+--- a/drivers/gpu/drm/xe/xe_ggtt.c
++++ b/drivers/gpu/drm/xe/xe_ggtt.c
+@@ -198,6 +198,13 @@ static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
+ 	.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
+ };
+ 
++static void dev_fini_ggtt(void *arg)
++{
++	struct xe_ggtt *ggtt = arg;
++
++	drain_workqueue(ggtt->wq);
++}
++
+ /**
+  * xe_ggtt_init_early - Early GGTT initialization
+  * @ggtt: the &xe_ggtt to be initialized
+@@ -254,6 +261,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
+ 	if (err)
+ 		return err;
+ 
++	err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
++	if (err)
++		return err;
++
+ 	if (IS_SRIOV_VF(xe)) {
+ 		err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
+ 		if (err)
+diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+index 64b2ae6839db26..400b2e9e89ab9c 100644
+--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
++++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+@@ -51,7 +51,15 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
+ 
+ static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
+ {
+-	drm_sched_resubmit_jobs(&sched->base);
++	struct drm_sched_job *s_job;
++
++	list_for_each_entry(s_job, &sched->base.pending_list, list) {
++		struct drm_sched_fence *s_fence = s_job->s_fence;
++		struct dma_fence *hw_fence = s_fence->parent;
++
++		if (hw_fence && !dma_fence_is_signaled(hw_fence))
++			sched->base.ops->run_job(s_job);
++	}
+ }
+ 
+ static inline bool
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+index 3155825fa46ad3..9deb9b44c3c3ea 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+@@ -137,6 +137,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
+ 	struct xe_gt_tlb_invalidation_fence *fence, *next;
+ 	int pending_seqno;
+ 
++	/*
++	 * we can get here before the CTs are even initialized if we're wedging
++	 * very early, in which case there are not going to be any pending
++	 * fences so we can bail immediately.
++	 */
++	if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
++		return;
++
+ 	/*
+ 	 * CT channel is already disabled at this point. No new TLB requests can
+ 	 * appear.
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
+index cd6a5f09d631e4..1f74f6bd50f319 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct.c
++++ b/drivers/gpu/drm/xe/xe_guc_ct.c
+@@ -454,6 +454,9 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
+  */
+ void xe_guc_ct_stop(struct xe_guc_ct *ct)
+ {
++	if (!xe_guc_ct_initialized(ct))
++		return;
++
+ 	xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
+ 	stop_g2h_handler(ct);
+ }
+@@ -638,7 +641,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
+ 	u16 seqno;
+ 	int ret;
+ 
+-	xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
++	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
+ 	xe_gt_assert(gt, !g2h_len || !g2h_fence);
+ 	xe_gt_assert(gt, !num_g2h || !g2h_fence);
+ 	xe_gt_assert(gt, !g2h_len || num_g2h);
+@@ -1209,7 +1212,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
+ 	u32 action;
+ 	u32 *hxg;
+ 
+-	xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
++	xe_gt_assert(gt, xe_guc_ct_initialized(ct));
+ 	lockdep_assert_held(&ct->fast_lock);
+ 
+ 	if (ct->state == XE_GUC_CT_STATE_DISABLED)
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
+index 190202fce2d048..13e316668e9012 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct.h
++++ b/drivers/gpu/drm/xe/xe_guc_ct.h
+@@ -23,6 +23,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
+ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
+ void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
+ 
++static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
++{
++	return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
++}
++
+ static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
+ {
+ 	return ct->state == XE_GUC_CT_STATE_ENABLED;
+diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
+index 034b29984d5ed4..f978da8be35c24 100644
+--- a/drivers/gpu/drm/xe/xe_guc_pc.c
++++ b/drivers/gpu/drm/xe/xe_guc_pc.c
+@@ -975,7 +975,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
+ 		goto out;
+ 	}
+ 
+-	memset(pc->bo->vmap.vaddr, 0, size);
++	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
+ 	slpc_shared_data_write(pc, header.size, size);
+ 
+ 	ret = pc_action_reset(pc);
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index 20d05efdd406e6..0e17820a35e2ce 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -227,6 +227,17 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
+ static void guc_submit_fini(struct drm_device *drm, void *arg)
+ {
+ 	struct xe_guc *guc = arg;
++	struct xe_device *xe = guc_to_xe(guc);
++	struct xe_gt *gt = guc_to_gt(guc);
++	int ret;
++
++	ret = wait_event_timeout(guc->submission_state.fini_wq,
++				 xa_empty(&guc->submission_state.exec_queue_lookup),
++				 HZ * 5);
++
++	drain_workqueue(xe->destroy_wq);
++
++	xe_gt_assert(gt, ret);
+ 
+ 	xa_destroy(&guc->submission_state.exec_queue_lookup);
+ }
+@@ -298,6 +309,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
+ 
+ 	primelockdep(guc);
+ 
++	guc->submission_state.initialized = true;
++
+ 	return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
+ }
+ 
+@@ -826,6 +839,13 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
+ 
+ 	xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
+ 
++	/*
++	 * If device is being wedged even before submission_state is
++	 * initialized, there's nothing to do here.
++	 */
++	if (!guc->submission_state.initialized)
++		return;
++
+ 	err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
+ 				       guc_submit_wedged_fini, guc);
+ 	if (err) {
+@@ -1702,6 +1722,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
+ {
+ 	int ret;
+ 
++	if (!guc->submission_state.initialized)
++		return 0;
++
+ 	/*
+ 	 * Using an atomic here rather than submission_state.lock as this
+ 	 * function can be called while holding the CT lock (engine reset
+diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
+index ed150fc09ad04f..7842b71e68beb2 100644
+--- a/drivers/gpu/drm/xe/xe_guc_types.h
++++ b/drivers/gpu/drm/xe/xe_guc_types.h
+@@ -74,6 +74,11 @@ struct xe_guc {
+ 		struct mutex lock;
+ 		/** @submission_state.enabled: submission is enabled */
+ 		bool enabled;
++		/**
++		 * @submission_state.initialized: mark when submission state is
++		 * even initialized - before that not even the lock is valid
++		 */
++		bool initialized;
+ 		/** @submission_state.fini_wq: submit fini wait queue */
+ 		wait_queue_head_t fini_wq;
+ 	} submission_state;
+diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+index ef84fa757b26f1..34e38bb167bac6 100644
+--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
++++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+@@ -57,12 +57,35 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
+ 	return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
+ }
+ 
++static u32 get_wopcm_size(struct xe_device *xe)
++{
++	u32 wopcm_size;
++	u64 val;
++
++	val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED);
++	val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
++
++	switch (val) {
++	case 0x5 ... 0x6:
++		val--;
++		fallthrough;
++	case 0x0 ... 0x3:
++		wopcm_size = (1U << val) * SZ_1M;
++		break;
++	default:
++		WARN(1, "Missing case wopcm_size=%llx\n", val);
++		wopcm_size = 0;
++	}
++
++	return wopcm_size;
++}
++
+ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ {
+ 	struct xe_tile *tile = xe_device_get_root_tile(xe);
+ 	struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+-	u64 stolen_size;
++	u64 stolen_size, wopcm_size;
+ 	u64 tile_offset;
+ 	u64 tile_size;
+ 
+@@ -74,7 +97,13 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ 	if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
+ 		return 0;
+ 
++	/* Carve out the top of DSM as it contains the reserved WOPCM region */
++	wopcm_size = get_wopcm_size(xe);
++	if (drm_WARN_ON(&xe->drm, !wopcm_size))
++		return 0;
++
+ 	stolen_size = tile_size - mgr->stolen_base;
++	stolen_size -= wopcm_size;
+ 
+ 	/* Verify usage fits in the actual resource available */
+ 	if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
+@@ -89,29 +118,6 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ 	return ALIGN_DOWN(stolen_size, SZ_1M);
+ }
+ 
+-static u32 get_wopcm_size(struct xe_device *xe)
+-{
+-	u32 wopcm_size;
+-	u64 val;
+-
+-	val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED);
+-	val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
+-
+-	switch (val) {
+-	case 0x5 ... 0x6:
+-		val--;
+-		fallthrough;
+-	case 0x0 ... 0x3:
+-		wopcm_size = (1U << val) * SZ_1M;
+-		break;
+-	default:
+-		WARN(1, "Missing case wopcm_size=%llx\n", val);
+-		wopcm_size = 0;
+-	}
+-
+-	return wopcm_size;
+-}
+-
+ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index de257a032225ff..15fd497c920c8e 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -1477,8 +1477,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
+ 	 * scheduler drops all the references of it, hence protecting the VM
+ 	 * for this case is necessary.
+ 	 */
+-	if (flags & XE_VM_FLAG_LR_MODE)
++	if (flags & XE_VM_FLAG_LR_MODE) {
++		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ 		xe_pm_runtime_get_noresume(xe);
++	}
+ 
+ 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
+ 	if (!vm_resv_obj) {
+@@ -1523,10 +1525,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
+ 		vm->batch_invalidate_tlb = true;
+ 	}
+ 
+-	if (vm->flags & XE_VM_FLAG_LR_MODE) {
+-		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
++	if (vm->flags & XE_VM_FLAG_LR_MODE)
+ 		vm->batch_invalidate_tlb = false;
+-	}
+ 
+ 	/* Fill pt_root after allocating scratch tables */
+ 	for_each_tile(tile, xe, id) {
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index f66194fde8912a..56e530860caefb 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -529,11 +529,14 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
+ 
+ 	/*
+ 	 * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
+-	 * regular keys
++	 * regular keys (Compact only)
+ 	 */
+-	ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
+-	if (ret)
+-		hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
++	if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
++	    hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
++		ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
++		if (ret)
++			hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
++	}
+ 
+ 	/* Switch middle button to native mode */
+ 	ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 34428349fa3118..1b1112772777ca 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2021,14 +2021,18 @@ static int wacom_initialize_remotes(struct wacom *wacom)
+ 
+ 	remote->remote_dir = kobject_create_and_add("wacom_remote",
+ 						    &wacom->hdev->dev.kobj);
+-	if (!remote->remote_dir)
++	if (!remote->remote_dir) {
++		kfifo_free(&remote->remote_fifo);
+ 		return -ENOMEM;
++	}
+ 
+ 	error = sysfs_create_files(remote->remote_dir, remote_unpair_attrs);
+ 
+ 	if (error) {
+ 		hid_err(wacom->hdev,
+ 			"cannot create sysfs group err: %d\n", error);
++		kfifo_free(&remote->remote_fifo);
++		kobject_put(remote->remote_dir);
+ 		return error;
+ 	}
+ 
+@@ -2874,6 +2878,7 @@ static void wacom_remove(struct hid_device *hdev)
+ 	hid_hw_stop(hdev);
+ 
+ 	cancel_delayed_work_sync(&wacom->init_work);
++	cancel_delayed_work_sync(&wacom->aes_battery_work);
+ 	cancel_work_sync(&wacom->wireless_work);
+ 	cancel_work_sync(&wacom->battery_work);
+ 	cancel_work_sync(&wacom->remote_work);
+diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
+index fe7f6b1b09851e..e14be8ebaad30e 100644
+--- a/drivers/hwmon/pmbus/max34440.c
++++ b/drivers/hwmon/pmbus/max34440.c
+@@ -34,16 +34,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
+ /*
+  * The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
+  * swapped from the standard pmbus spec addresses.
++ * For max34451, version MAX34451ETNA6+ and later has this issue fixed.
+  */
+ #define MAX34440_IOUT_OC_WARN_LIMIT	0x46
+ #define MAX34440_IOUT_OC_FAULT_LIMIT	0x4A
+ 
++#define MAX34451ETNA6_MFR_REV		0x0012
++
+ #define MAX34451_MFR_CHANNEL_CONFIG	0xe4
+ #define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK	0x3f
+ 
+ struct max34440_data {
+ 	int id;
+ 	struct pmbus_driver_info info;
++	u8 iout_oc_warn_limit;
++	u8 iout_oc_fault_limit;
+ };
+ 
+ #define to_max34440_data(x)  container_of(x, struct max34440_data, info)
+@@ -60,11 +65,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
+ 	switch (reg) {
+ 	case PMBUS_IOUT_OC_FAULT_LIMIT:
+ 		ret = pmbus_read_word_data(client, page, phase,
+-					   MAX34440_IOUT_OC_FAULT_LIMIT);
++					   data->iout_oc_fault_limit);
+ 		break;
+ 	case PMBUS_IOUT_OC_WARN_LIMIT:
+ 		ret = pmbus_read_word_data(client, page, phase,
+-					   MAX34440_IOUT_OC_WARN_LIMIT);
++					   data->iout_oc_warn_limit);
+ 		break;
+ 	case PMBUS_VIRT_READ_VOUT_MIN:
+ 		ret = pmbus_read_word_data(client, page, phase,
+@@ -133,11 +138,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
+ 
+ 	switch (reg) {
+ 	case PMBUS_IOUT_OC_FAULT_LIMIT:
+-		ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
++		ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit,
+ 					    word);
+ 		break;
+ 	case PMBUS_IOUT_OC_WARN_LIMIT:
+-		ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
++		ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit,
+ 					    word);
+ 		break;
+ 	case PMBUS_VIRT_RESET_POUT_HISTORY:
+@@ -235,6 +240,25 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
+ 	 */
+ 
+ 	int page, rv;
++	bool max34451_na6 = false;
++
++	rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION);
++	if (rv < 0)
++		return rv;
++
++	if (rv >= MAX34451ETNA6_MFR_REV) {
++		max34451_na6 = true;
++		data->info.format[PSC_VOLTAGE_IN] = direct;
++		data->info.format[PSC_CURRENT_IN] = direct;
++		data->info.m[PSC_VOLTAGE_IN] = 1;
++		data->info.b[PSC_VOLTAGE_IN] = 0;
++		data->info.R[PSC_VOLTAGE_IN] = 3;
++		data->info.m[PSC_CURRENT_IN] = 1;
++		data->info.b[PSC_CURRENT_IN] = 0;
++		data->info.R[PSC_CURRENT_IN] = 2;
++		data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
++		data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
++	}
+ 
+ 	for (page = 0; page < 16; page++) {
+ 		rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+@@ -251,16 +275,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
+ 		case 0x20:
+ 			data->info.func[page] = PMBUS_HAVE_VOUT |
+ 				PMBUS_HAVE_STATUS_VOUT;
++
++			if (max34451_na6)
++				data->info.func[page] |= PMBUS_HAVE_VIN |
++					PMBUS_HAVE_STATUS_INPUT;
+ 			break;
+ 		case 0x21:
+ 			data->info.func[page] = PMBUS_HAVE_VOUT;
++
++			if (max34451_na6)
++				data->info.func[page] |= PMBUS_HAVE_VIN;
+ 			break;
+ 		case 0x22:
+ 			data->info.func[page] = PMBUS_HAVE_IOUT |
+ 				PMBUS_HAVE_STATUS_IOUT;
++
++			if (max34451_na6)
++				data->info.func[page] |= PMBUS_HAVE_IIN |
++					PMBUS_HAVE_STATUS_INPUT;
+ 			break;
+ 		case 0x23:
+ 			data->info.func[page] = PMBUS_HAVE_IOUT;
++
++			if (max34451_na6)
++				data->info.func[page] |= PMBUS_HAVE_IIN;
+ 			break;
+ 		default:
+ 			break;
+@@ -494,6 +532,8 @@ static int max34440_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 	data->id = i2c_match_id(max34440_id, client)->driver_data;
+ 	data->info = max34440_info[data->id];
++	data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT;
++	data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT;
+ 
+ 	if (data->id == max34451) {
+ 		rv = max34451_set_supported_funcs(client, data);
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index c7e35a431ab002..b7941d8abbfe7a 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -97,7 +97,8 @@ coresight_find_out_connection(struct coresight_device *src_dev,
+ 
+ static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
+ {
+-	return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR);
++	return FIELD_GET(CORESIGHT_CLAIM_MASK,
++			 csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR));
+ }
+ 
+ static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev)
+diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
+index 05f891ca6b5c9d..cc7ff1e36ef421 100644
+--- a/drivers/hwtracing/coresight/coresight-priv.h
++++ b/drivers/hwtracing/coresight/coresight-priv.h
+@@ -35,6 +35,7 @@ extern const struct device_type coresight_dev_type[];
+  * Coresight device CLAIM protocol.
+  * See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore.
+  */
++#define CORESIGHT_CLAIM_MASK		GENMASK(1, 0)
+ #define CORESIGHT_CLAIM_SELF_HOSTED	BIT(1)
+ 
+ #define TIMEOUT_US		100
+diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
+index 80d45079b763c0..e0a76fb5bc31f5 100644
+--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
++++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
+@@ -111,6 +111,11 @@ static u32 osif_func(struct i2c_adapter *adapter)
+ 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+ 
++/* prevent invalid 0-length usb_control_msg */
++static const struct i2c_adapter_quirks osif_quirks = {
++	.flags = I2C_AQ_NO_ZERO_LEN_READ,
++};
++
+ static const struct i2c_algorithm osif_algorithm = {
+ 	.xfer = osif_xfer,
+ 	.functionality = osif_func,
+@@ -143,6 +148,7 @@ static int osif_probe(struct usb_interface *interface,
+ 
+ 	priv->adapter.owner = THIS_MODULE;
+ 	priv->adapter.class = I2C_CLASS_HWMON;
++	priv->adapter.quirks = &osif_quirks;
+ 	priv->adapter.algo = &osif_algorithm;
+ 	priv->adapter.algo_data = priv;
+ 	snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
+index 0f2ed181b2665c..0cc7c0a816fc02 100644
+--- a/drivers/i2c/busses/i2c-tiny-usb.c
++++ b/drivers/i2c/busses/i2c-tiny-usb.c
+@@ -138,6 +138,11 @@ static u32 usb_func(struct i2c_adapter *adapter)
+ 	return ret;
+ }
+ 
++/* prevent invalid 0-length usb_control_msg */
++static const struct i2c_adapter_quirks usb_quirks = {
++	.flags = I2C_AQ_NO_ZERO_LEN_READ,
++};
++
+ /* This is the actual algorithm we define */
+ static const struct i2c_algorithm usb_algorithm = {
+ 	.xfer = usb_xfer,
+@@ -246,6 +251,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
+ 	/* setup i2c adapter description */
+ 	dev->adapter.owner = THIS_MODULE;
+ 	dev->adapter.class = I2C_CLASS_HWMON;
++	dev->adapter.quirks = &usb_quirks;
+ 	dev->adapter.algo = &usb_algorithm;
+ 	dev->adapter.algo_data = dev;
+ 	snprintf(dev->adapter.name, sizeof(dev->adapter.name),
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index ea4aabd3960a08..3df1d4f6bc959e 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -477,6 +477,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
+ 		 * byte set to zero. */
+ 		ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]);
+ 		break;
++
++	default:
++		dev_err_ratelimited(&indio_dev->dev, "Unsupported reg_size: %u\n", reg_size);
++		goto irq_handled;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
+index 2cf148f16306db..56a125f56284f1 100644
+--- a/drivers/iio/dac/Makefile
++++ b/drivers/iio/dac/Makefile
+@@ -4,7 +4,7 @@
+ #
+ 
+ # When adding new entries keep the list in alphabetical order
+-obj-$(CONFIG_AD3552R) += ad3552r.o
++obj-$(CONFIG_AD3552R) += ad3552r.o ad3552r-common.o
+ obj-$(CONFIG_AD5360) += ad5360.o
+ obj-$(CONFIG_AD5380) += ad5380.o
+ obj-$(CONFIG_AD5421) += ad5421.o
+diff --git a/drivers/iio/dac/ad3552r-common.c b/drivers/iio/dac/ad3552r-common.c
+new file mode 100644
+index 00000000000000..94869ad15c27ed
+--- /dev/null
++++ b/drivers/iio/dac/ad3552r-common.c
+@@ -0,0 +1,248 @@
++// SPDX-License-Identifier: GPL-2.0+
++//
++// Copyright (c) 2010-2024 Analog Devices Inc.
++// Copyright (c) 2024 Baylibre, SAS
++
++#include <linux/bitfield.h>
++#include <linux/device.h>
++#include <linux/module.h>
++#include <linux/property.h>
++#include <linux/regulator/consumer.h>
++
++#include "ad3552r.h"
++
++const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2] = {
++	[AD3552R_CH_OUTPUT_RANGE_0__2P5V]	= { 0, 2500 },
++	[AD3552R_CH_OUTPUT_RANGE_0__5V]		= { 0, 5000 },
++	[AD3552R_CH_OUTPUT_RANGE_0__10V]	= { 0, 10000 },
++	[AD3552R_CH_OUTPUT_RANGE_NEG_5__5V]	= { -5000, 5000 },
++	[AD3552R_CH_OUTPUT_RANGE_NEG_10__10V]	= { -10000, 10000 }
++};
++EXPORT_SYMBOL_NS_GPL(ad3552r_ch_ranges, IIO_AD3552R);
++
++const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
++	[AD3542R_CH_OUTPUT_RANGE_0__2P5V]	= { 0, 2500 },
++	[AD3542R_CH_OUTPUT_RANGE_0__5V]		= { 0, 5000 },
++	[AD3542R_CH_OUTPUT_RANGE_0__10V]	= { 0, 10000 },
++	[AD3542R_CH_OUTPUT_RANGE_NEG_5__5V]	= { -5000, 5000 },
++	[AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V]	= { -2500, 7500 }
++};
++EXPORT_SYMBOL_NS_GPL(ad3542r_ch_ranges, IIO_AD3552R);
++
++/* Gain * AD3552R_GAIN_SCALE */
++static const s32 gains_scaling_table[] = {
++	[AD3552R_CH_GAIN_SCALING_1]		= 1000,
++	[AD3552R_CH_GAIN_SCALING_0_5]		= 500,
++	[AD3552R_CH_GAIN_SCALING_0_25]		= 250,
++	[AD3552R_CH_GAIN_SCALING_0_125]		= 125
++};
++
++u16 ad3552r_calc_custom_gain(u8 p, u8 n, s16 goffs)
++{
++	return FIELD_PREP(AD3552R_MASK_CH_RANGE_OVERRIDE, 1) |
++	       FIELD_PREP(AD3552R_MASK_CH_GAIN_SCALING_P, p) |
++	       FIELD_PREP(AD3552R_MASK_CH_GAIN_SCALING_N, n) |
++	       FIELD_PREP(AD3552R_MASK_CH_OFFSET_BIT_8, abs(goffs)) |
++	       FIELD_PREP(AD3552R_MASK_CH_OFFSET_POLARITY, goffs < 0);
++}
++EXPORT_SYMBOL_NS_GPL(ad3552r_calc_custom_gain, IIO_AD3552R);
++
++static void ad3552r_get_custom_range(struct ad3552r_ch_data *ch_data,
++				     s32 *v_min, s32 *v_max)
++{
++	s64 vref, tmp, common, offset, gn, gp;
++	/*
++	 * From datasheet formula (In Volts):
++	 *	Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03]
++	 *	Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03]
++	 * Calculus are converted to milivolts
++	 */
++	vref = 2500;
++	/* 2.5 * 1.03 * 1000 (To mV) */
++	common = 2575 * ch_data->rfb;
++	offset = ch_data->gain_offset;
++
++	gn = gains_scaling_table[ch_data->n];
++	tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common;
++	tmp = div_s64(tmp, 1024  * AD3552R_GAIN_SCALE);
++	*v_max = vref + tmp;
++
++	gp = gains_scaling_table[ch_data->p];
++	tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common;
++	tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
++	*v_min = vref - tmp;
++}
++
++void ad3552r_calc_gain_and_offset(struct ad3552r_ch_data *ch_data,
++				  const struct ad3552r_model_data *model_data)
++{
++	s32 idx, v_max, v_min, span, rem;
++	s64 tmp;
++
++	if (ch_data->range_override) {
++		ad3552r_get_custom_range(ch_data, &v_min, &v_max);
++	} else {
++		/* Normal range */
++		idx = ch_data->range;
++		v_min = model_data->ranges_table[idx][0];
++		v_max = model_data->ranges_table[idx][1];
++	}
++
++	/*
++	 * From datasheet formula:
++	 *	Vout = Span * (D / 65536) + Vmin
++	 * Converted to scale and offset:
++	 *	Scale = Span / 65536
++	 *	Offset = 65536 * Vmin / Span
++	 *
++	 * Reminders are in micros in order to be printed as
++	 * IIO_VAL_INT_PLUS_MICRO
++	 */
++	span = v_max - v_min;
++	ch_data->scale_int = div_s64_rem(span, 65536, &rem);
++	/* Do operations in microvolts */
++	ch_data->scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000, 65536);
++
++	ch_data->offset_int = div_s64_rem(v_min * 65536, span, &rem);
++	tmp = (s64)rem * 1000000;
++	ch_data->offset_dec = div_s64(tmp, span);
++}
++EXPORT_SYMBOL_NS_GPL(ad3552r_calc_gain_and_offset, IIO_AD3552R);
++
++int ad3552r_get_ref_voltage(struct device *dev, u32 *val)
++{
++	int voltage;
++	int delta = 100000;
++
++	voltage = devm_regulator_get_enable_read_voltage(dev, "vref");
++	if (voltage < 0 && voltage != -ENODEV)
++		return dev_err_probe(dev, voltage,
++				     "Error getting vref voltage\n");
++
++	if (voltage == -ENODEV) {
++		if (device_property_read_bool(dev, "adi,vref-out-en"))
++			*val = AD3552R_INTERNAL_VREF_PIN_2P5V;
++		else
++			*val = AD3552R_INTERNAL_VREF_PIN_FLOATING;
++
++		return 0;
++	}
++
++	if (voltage > 2500000 + delta || voltage < 2500000 - delta) {
++		dev_warn(dev, "vref-supply must be 2.5V");
++		return -EINVAL;
++	}
++
++	*val = AD3552R_EXTERNAL_VREF_PIN_INPUT;
++
++	return 0;
++}
++EXPORT_SYMBOL_NS_GPL(ad3552r_get_ref_voltage, IIO_AD3552R);
++
++int ad3552r_get_drive_strength(struct device *dev, u32 *val)
++{
++	int err;
++	u32 drive_strength;
++
++	err = device_property_read_u32(dev, "adi,sdo-drive-strength",
++				       &drive_strength);
++	if (err)
++		return err;
++
++	if (drive_strength > 3) {
++		dev_err_probe(dev, -EINVAL,
++			      "adi,sdo-drive-strength must be less than 4\n");
++		return -EINVAL;
++	}
++
++	*val = drive_strength;
++
++	return 0;
++}
++EXPORT_SYMBOL_NS_GPL(ad3552r_get_drive_strength, IIO_AD3552R);
++
++int ad3552r_get_custom_gain(struct device *dev, struct fwnode_handle *child,
++			    u8 *gs_p, u8 *gs_n, u16 *rfb, s16 *goffs)
++{
++	int err;
++	u32 val;
++	struct fwnode_handle *gain_child __free(fwnode_handle) =
++		fwnode_get_named_child_node(child,
++					    "custom-output-range-config");
++
++	if (!gain_child)
++		return dev_err_probe(dev, -EINVAL,
++				     "custom-output-range-config mandatory\n");
++
++	err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val);
++	if (err)
++		return dev_err_probe(dev, err,
++				     "adi,gain-scaling-p mandatory\n");
++	*gs_p = val;
++
++	err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val);
++	if (err)
++		return dev_err_probe(dev, err,
++				     "adi,gain-scaling-n property mandatory\n");
++	*gs_n = val;
++
++	err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val);
++	if (err)
++		return dev_err_probe(dev, err,
++				     "adi,rfb-ohms mandatory\n");
++	*rfb = val;
++
++	err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val);
++	if (err)
++		return dev_err_probe(dev, err,
++				     "adi,gain-offset mandatory\n");
++	*goffs = val;
++
++	return 0;
++}
++EXPORT_SYMBOL_NS_GPL(ad3552r_get_custom_gain, IIO_AD3552R);
++
++static int ad3552r_find_range(const struct ad3552r_model_data *model_info,
++			      s32 *vals)
++{
++	int i;
++
++	for (i = 0; i < model_info->num_ranges; i++)
++		if (vals[0] == model_info->ranges_table[i][0] * 1000 &&
++		    vals[1] == model_info->ranges_table[i][1] * 1000)
++			return i;
++
++	return -EINVAL;
++}
++
++int ad3552r_get_output_range(struct device *dev,
++			     const struct ad3552r_model_data *model_info,
++			     struct fwnode_handle *child, u32 *val)
++{
++	int ret;
++	s32 vals[2];
++
++	/* This property is optional, so returning -ENOENT if missing */
++	if (!fwnode_property_present(child, "adi,output-range-microvolt"))
++		return -ENOENT;
++
++	ret = fwnode_property_read_u32_array(child,
++					     "adi,output-range-microvolt",
++					     vals, 2);
++	if (ret)
++		return dev_err_probe(dev, ret,
++				"invalid adi,output-range-microvolt\n");
++
++	ret = ad3552r_find_range(model_info, vals);
++	if (ret < 0)
++		return dev_err_probe(dev, ret,
++			"invalid adi,output-range-microvolt value\n");
++
++	*val = ret;
++
++	return 0;
++}
++EXPORT_SYMBOL_NS_GPL(ad3552r_get_output_range, IIO_AD3552R);
++
++MODULE_DESCRIPTION("ad3552r common functions");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
+index 390d3fab21478f..5b2ce2aa67a47d 100644
+--- a/drivers/iio/dac/ad3552r.c
++++ b/drivers/iio/dac/ad3552r.c
+@@ -6,271 +6,15 @@
+  * Copyright 2021 Analog Devices Inc.
+  */
+ #include <linux/unaligned.h>
++#include <linux/bitfield.h>
+ #include <linux/device.h>
+ #include <linux/iio/triggered_buffer.h>
+ #include <linux/iio/trigger_consumer.h>
+ #include <linux/iopoll.h>
+ #include <linux/kernel.h>
+-#include <linux/regulator/consumer.h>
+ #include <linux/spi/spi.h>
+ 
+-/* Register addresses */
+-/* Primary address space */
+-#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A		0x00
+-#define   AD3552R_MASK_SOFTWARE_RESET			(BIT(7) | BIT(0))
+-#define   AD3552R_MASK_ADDR_ASCENSION			BIT(5)
+-#define   AD3552R_MASK_SDO_ACTIVE			BIT(4)
+-#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B		0x01
+-#define   AD3552R_MASK_SINGLE_INST			BIT(7)
+-#define   AD3552R_MASK_SHORT_INSTRUCTION		BIT(3)
+-#define AD3552R_REG_ADDR_DEVICE_CONFIG			0x02
+-#define   AD3552R_MASK_DEVICE_STATUS(n)			BIT(4 + (n))
+-#define   AD3552R_MASK_CUSTOM_MODES			GENMASK(3, 2)
+-#define   AD3552R_MASK_OPERATING_MODES			GENMASK(1, 0)
+-#define AD3552R_REG_ADDR_CHIP_TYPE			0x03
+-#define   AD3552R_MASK_CLASS				GENMASK(7, 0)
+-#define AD3552R_REG_ADDR_PRODUCT_ID_L			0x04
+-#define AD3552R_REG_ADDR_PRODUCT_ID_H			0x05
+-#define AD3552R_REG_ADDR_CHIP_GRADE			0x06
+-#define   AD3552R_MASK_GRADE				GENMASK(7, 4)
+-#define   AD3552R_MASK_DEVICE_REVISION			GENMASK(3, 0)
+-#define AD3552R_REG_ADDR_SCRATCH_PAD			0x0A
+-#define AD3552R_REG_ADDR_SPI_REVISION			0x0B
+-#define AD3552R_REG_ADDR_VENDOR_L			0x0C
+-#define AD3552R_REG_ADDR_VENDOR_H			0x0D
+-#define AD3552R_REG_ADDR_STREAM_MODE			0x0E
+-#define   AD3552R_MASK_LENGTH				GENMASK(7, 0)
+-#define AD3552R_REG_ADDR_TRANSFER_REGISTER		0x0F
+-#define   AD3552R_MASK_MULTI_IO_MODE			GENMASK(7, 6)
+-#define   AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE		BIT(2)
+-#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C		0x10
+-#define   AD3552R_MASK_CRC_ENABLE			(GENMASK(7, 6) |\
+-							 GENMASK(1, 0))
+-#define   AD3552R_MASK_STRICT_REGISTER_ACCESS		BIT(5)
+-#define AD3552R_REG_ADDR_INTERFACE_STATUS_A		0x11
+-#define   AD3552R_MASK_INTERFACE_NOT_READY		BIT(7)
+-#define   AD3552R_MASK_CLOCK_COUNTING_ERROR		BIT(5)
+-#define   AD3552R_MASK_INVALID_OR_NO_CRC		BIT(3)
+-#define   AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER	BIT(2)
+-#define   AD3552R_MASK_PARTIAL_REGISTER_ACCESS		BIT(1)
+-#define   AD3552R_MASK_REGISTER_ADDRESS_INVALID		BIT(0)
+-#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D		0x14
+-#define   AD3552R_MASK_ALERT_ENABLE_PULLUP		BIT(6)
+-#define   AD3552R_MASK_MEM_CRC_EN			BIT(4)
+-#define   AD3552R_MASK_SDO_DRIVE_STRENGTH		GENMASK(3, 2)
+-#define   AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN		BIT(1)
+-#define   AD3552R_MASK_SPI_CONFIG_DDR			BIT(0)
+-#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG		0x15
+-#define   AD3552R_MASK_IDUMP_FAST_MODE			BIT(6)
+-#define   AD3552R_MASK_SAMPLE_HOLD_DIFFERENTIAL_USER_EN	BIT(5)
+-#define   AD3552R_MASK_SAMPLE_HOLD_USER_TRIM		GENMASK(4, 3)
+-#define   AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE		BIT(2)
+-#define   AD3552R_MASK_REFERENCE_VOLTAGE_SEL		GENMASK(1, 0)
+-#define AD3552R_REG_ADDR_ERR_ALARM_MASK			0x16
+-#define   AD3552R_MASK_REF_RANGE_ALARM			BIT(6)
+-#define   AD3552R_MASK_CLOCK_COUNT_ERR_ALARM		BIT(5)
+-#define   AD3552R_MASK_MEM_CRC_ERR_ALARM		BIT(4)
+-#define   AD3552R_MASK_SPI_CRC_ERR_ALARM		BIT(3)
+-#define   AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM		BIT(2)
+-#define   AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM	BIT(1)
+-#define   AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM	BIT(0)
+-#define AD3552R_REG_ADDR_ERR_STATUS			0x17
+-#define   AD3552R_MASK_REF_RANGE_ERR_STATUS			BIT(6)
+-#define   AD3552R_MASK_DUAL_SPI_STREAM_EXCEEDS_DAC_ERR_STATUS	BIT(5)
+-#define   AD3552R_MASK_MEM_CRC_ERR_STATUS			BIT(4)
+-#define   AD3552R_MASK_RESET_STATUS				BIT(0)
+-#define AD3552R_REG_ADDR_POWERDOWN_CONFIG		0x18
+-#define   AD3552R_MASK_CH_DAC_POWERDOWN(ch)		BIT(4 + (ch))
+-#define   AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch)	BIT(ch)
+-#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE		0x19
+-#define   AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch)		((ch) ? GENMASK(7, 4) :\
+-							 GENMASK(3, 0))
+-#define AD3552R_REG_ADDR_CH_OFFSET(ch)			(0x1B + (ch) * 2)
+-#define   AD3552R_MASK_CH_OFFSET_BITS_0_7		GENMASK(7, 0)
+-#define AD3552R_REG_ADDR_CH_GAIN(ch)			(0x1C + (ch) * 2)
+-#define   AD3552R_MASK_CH_RANGE_OVERRIDE		BIT(7)
+-#define   AD3552R_MASK_CH_GAIN_SCALING_N		GENMASK(6, 5)
+-#define   AD3552R_MASK_CH_GAIN_SCALING_P		GENMASK(4, 3)
+-#define   AD3552R_MASK_CH_OFFSET_POLARITY		BIT(2)
+-#define   AD3552R_MASK_CH_OFFSET_BIT_8			BIT(0)
+-/*
+- * Secondary region
+- * For multibyte registers specify the highest address because the access is
+- * done in descending order
+- */
+-#define AD3552R_SECONDARY_REGION_START			0x28
+-#define AD3552R_REG_ADDR_HW_LDAC_16B			0x28
+-#define AD3552R_REG_ADDR_CH_DAC_16B(ch)			(0x2C - (1 - ch) * 2)
+-#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B		0x2E
+-#define AD3552R_REG_ADDR_CH_SELECT_16B			0x2F
+-#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B		0x31
+-#define AD3552R_REG_ADDR_SW_LDAC_16B			0x32
+-#define AD3552R_REG_ADDR_CH_INPUT_16B(ch)		(0x36 - (1 - ch) * 2)
+-/* 3 bytes registers */
+-#define AD3552R_REG_START_24B				0x37
+-#define AD3552R_REG_ADDR_HW_LDAC_24B			0x37
+-#define AD3552R_REG_ADDR_CH_DAC_24B(ch)			(0x3D - (1 - ch) * 3)
+-#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B		0x40
+-#define AD3552R_REG_ADDR_CH_SELECT_24B			0x41
+-#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B		0x44
+-#define AD3552R_REG_ADDR_SW_LDAC_24B			0x45
+-#define AD3552R_REG_ADDR_CH_INPUT_24B(ch)		(0x4B - (1 - ch) * 3)
+-
+-/* Useful defines */
+-#define AD3552R_MAX_CH					2
+-#define AD3552R_MASK_CH(ch)				BIT(ch)
+-#define AD3552R_MASK_ALL_CH				GENMASK(1, 0)
+-#define AD3552R_MAX_REG_SIZE				3
+-#define AD3552R_READ_BIT				BIT(7)
+-#define AD3552R_ADDR_MASK				GENMASK(6, 0)
+-#define AD3552R_MASK_DAC_12B				0xFFF0
+-#define AD3552R_DEFAULT_CONFIG_B_VALUE			0x8
+-#define AD3552R_SCRATCH_PAD_TEST_VAL1			0x34
+-#define AD3552R_SCRATCH_PAD_TEST_VAL2			0xB2
+-#define AD3552R_GAIN_SCALE				1000
+-#define AD3552R_LDAC_PULSE_US				100
+-
+-enum ad3552r_ch_vref_select {
+-	/* Internal source with Vref I/O floating */
+-	AD3552R_INTERNAL_VREF_PIN_FLOATING,
+-	/* Internal source with Vref I/O at 2.5V */
+-	AD3552R_INTERNAL_VREF_PIN_2P5V,
+-	/* External source with Vref I/O as input */
+-	AD3552R_EXTERNAL_VREF_PIN_INPUT
+-};
+-
+-enum ad3552r_id {
+-	AD3541R_ID = 0x400b,
+-	AD3542R_ID = 0x4009,
+-	AD3551R_ID = 0x400a,
+-	AD3552R_ID = 0x4008,
+-};
+-
+-enum ad3552r_ch_output_range {
+-	/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
+-	AD3552R_CH_OUTPUT_RANGE_0__2P5V,
+-	/* Range from 0 V to 5 V. Requires Rfb1x connection  */
+-	AD3552R_CH_OUTPUT_RANGE_0__5V,
+-	/* Range from 0 V to 10 V. Requires Rfb2x connection  */
+-	AD3552R_CH_OUTPUT_RANGE_0__10V,
+-	/* Range from -5 V to 5 V. Requires Rfb2x connection  */
+-	AD3552R_CH_OUTPUT_RANGE_NEG_5__5V,
+-	/* Range from -10 V to 10 V. Requires Rfb4x connection  */
+-	AD3552R_CH_OUTPUT_RANGE_NEG_10__10V,
+-};
+-
+-static const s32 ad3552r_ch_ranges[][2] = {
+-	[AD3552R_CH_OUTPUT_RANGE_0__2P5V]	= {0, 2500},
+-	[AD3552R_CH_OUTPUT_RANGE_0__5V]		= {0, 5000},
+-	[AD3552R_CH_OUTPUT_RANGE_0__10V]	= {0, 10000},
+-	[AD3552R_CH_OUTPUT_RANGE_NEG_5__5V]	= {-5000, 5000},
+-	[AD3552R_CH_OUTPUT_RANGE_NEG_10__10V]	= {-10000, 10000}
+-};
+-
+-enum ad3542r_ch_output_range {
+-	/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
+-	AD3542R_CH_OUTPUT_RANGE_0__2P5V,
+-	/* Range from 0 V to 3 V. Requires Rfb1x connection  */
+-	AD3542R_CH_OUTPUT_RANGE_0__3V,
+-	/* Range from 0 V to 5 V. Requires Rfb1x connection  */
+-	AD3542R_CH_OUTPUT_RANGE_0__5V,
+-	/* Range from 0 V to 10 V. Requires Rfb2x connection  */
+-	AD3542R_CH_OUTPUT_RANGE_0__10V,
+-	/* Range from -2.5 V to 7.5 V. Requires Rfb2x connection  */
+-	AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
+-	/* Range from -5 V to 5 V. Requires Rfb2x connection  */
+-	AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
+-};
+-
+-static const s32 ad3542r_ch_ranges[][2] = {
+-	[AD3542R_CH_OUTPUT_RANGE_0__2P5V]	= {0, 2500},
+-	[AD3542R_CH_OUTPUT_RANGE_0__3V]		= {0, 3000},
+-	[AD3542R_CH_OUTPUT_RANGE_0__5V]		= {0, 5000},
+-	[AD3542R_CH_OUTPUT_RANGE_0__10V]	= {0, 10000},
+-	[AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V]	= {-2500, 7500},
+-	[AD3542R_CH_OUTPUT_RANGE_NEG_5__5V]	= {-5000, 5000}
+-};
+-
+-enum ad3552r_ch_gain_scaling {
+-	/* Gain scaling of 1 */
+-	AD3552R_CH_GAIN_SCALING_1,
+-	/* Gain scaling of 0.5 */
+-	AD3552R_CH_GAIN_SCALING_0_5,
+-	/* Gain scaling of 0.25 */
+-	AD3552R_CH_GAIN_SCALING_0_25,
+-	/* Gain scaling of 0.125 */
+-	AD3552R_CH_GAIN_SCALING_0_125,
+-};
+-
+-/* Gain * AD3552R_GAIN_SCALE */
+-static const s32 gains_scaling_table[] = {
+-	[AD3552R_CH_GAIN_SCALING_1]		= 1000,
+-	[AD3552R_CH_GAIN_SCALING_0_5]		= 500,
+-	[AD3552R_CH_GAIN_SCALING_0_25]		= 250,
+-	[AD3552R_CH_GAIN_SCALING_0_125]		= 125
+-};
+-
+-enum ad3552r_dev_attributes {
+-	/* - Direct register values */
+-	/* From 0-3 */
+-	AD3552R_SDO_DRIVE_STRENGTH,
+-	/*
+-	 * 0 -> Internal Vref, vref_io pin floating (default)
+-	 * 1 -> Internal Vref, vref_io driven by internal vref
+-	 * 2 or 3 -> External Vref
+-	 */
+-	AD3552R_VREF_SELECT,
+-	/* Read registers in ascending order if set. Else descending */
+-	AD3552R_ADDR_ASCENSION,
+-};
+-
+-enum ad3552r_ch_attributes {
+-	/* DAC powerdown */
+-	AD3552R_CH_DAC_POWERDOWN,
+-	/* DAC amplifier powerdown */
+-	AD3552R_CH_AMPLIFIER_POWERDOWN,
+-	/* Select the output range. Select from enum ad3552r_ch_output_range */
+-	AD3552R_CH_OUTPUT_RANGE_SEL,
+-	/*
+-	 * Over-rider the range selector in order to manually set the output
+-	 * voltage range
+-	 */
+-	AD3552R_CH_RANGE_OVERRIDE,
+-	/* Manually set the offset voltage */
+-	AD3552R_CH_GAIN_OFFSET,
+-	/* Sets the polarity of the offset. */
+-	AD3552R_CH_GAIN_OFFSET_POLARITY,
+-	/* PDAC gain scaling */
+-	AD3552R_CH_GAIN_SCALING_P,
+-	/* NDAC gain scaling */
+-	AD3552R_CH_GAIN_SCALING_N,
+-	/* Rfb value */
+-	AD3552R_CH_RFB,
+-	/* Channel select. When set allow Input -> DAC and Mask -> DAC */
+-	AD3552R_CH_SELECT,
+-};
+-
+-struct ad3552r_ch_data {
+-	s32	scale_int;
+-	s32	scale_dec;
+-	s32	offset_int;
+-	s32	offset_dec;
+-	s16	gain_offset;
+-	u16	rfb;
+-	u8	n;
+-	u8	p;
+-	u8	range;
+-	bool	range_override;
+-};
+-
+-struct ad3552r_model_data {
+-	const char *model_name;
+-	enum ad3552r_id chip_id;
+-	unsigned int num_hw_channels;
+-	const s32 (*ranges_table)[2];
+-	int num_ranges;
+-	bool requires_output_range;
+-};
++#include "ad3552r.h"
+ 
+ struct ad3552r_desc {
+ 	const struct ad3552r_model_data *model_data;
+@@ -285,45 +29,6 @@ struct ad3552r_desc {
+ 	unsigned int		num_ch;
+ };
+ 
+-static const u16 addr_mask_map[][2] = {
+-	[AD3552R_ADDR_ASCENSION] = {
+-			AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
+-			AD3552R_MASK_ADDR_ASCENSION
+-	},
+-	[AD3552R_SDO_DRIVE_STRENGTH] = {
+-			AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+-			AD3552R_MASK_SDO_DRIVE_STRENGTH
+-	},
+-	[AD3552R_VREF_SELECT] = {
+-			AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
+-			AD3552R_MASK_REFERENCE_VOLTAGE_SEL
+-	},
+-};
+-
+-/* 0 -> reg addr, 1->ch0 mask, 2->ch1 mask */
+-static const u16 addr_mask_map_ch[][3] = {
+-	[AD3552R_CH_DAC_POWERDOWN] = {
+-			AD3552R_REG_ADDR_POWERDOWN_CONFIG,
+-			AD3552R_MASK_CH_DAC_POWERDOWN(0),
+-			AD3552R_MASK_CH_DAC_POWERDOWN(1)
+-	},
+-	[AD3552R_CH_AMPLIFIER_POWERDOWN] = {
+-			AD3552R_REG_ADDR_POWERDOWN_CONFIG,
+-			AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0),
+-			AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1)
+-	},
+-	[AD3552R_CH_OUTPUT_RANGE_SEL] = {
+-			AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
+-			AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0),
+-			AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1)
+-	},
+-	[AD3552R_CH_SELECT] = {
+-			AD3552R_REG_ADDR_CH_SELECT_16B,
+-			AD3552R_MASK_CH(0),
+-			AD3552R_MASK_CH(1)
+-	}
+-};
+-
+ static u8 _ad3552r_reg_len(u8 addr)
+ {
+ 	switch (addr) {
+@@ -399,11 +104,6 @@ static int ad3552r_read_reg(struct ad3552r_desc *dac, u8 addr, u16 *val)
+ 	return 0;
+ }
+ 
+-static u16 ad3552r_field_prep(u16 val, u16 mask)
+-{
+-	return (val << __ffs(mask)) & mask;
+-}
+-
+ /* Update field of a register, shift val if needed */
+ static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask,
+ 				    u16 val)
+@@ -416,21 +116,11 @@ static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask,
+ 		return ret;
+ 
+ 	reg &= ~mask;
+-	reg |= ad3552r_field_prep(val, mask);
++	reg |= val;
+ 
+ 	return ad3552r_write_reg(dac, addr, reg);
+ }
+ 
+-static int ad3552r_set_ch_value(struct ad3552r_desc *dac,
+-				enum ad3552r_ch_attributes attr,
+-				u8 ch,
+-				u16 val)
+-{
+-	/* Update register related to attributes in chip */
+-	return ad3552r_update_reg_field(dac, addr_mask_map_ch[attr][0],
+-				       addr_mask_map_ch[attr][ch + 1], val);
+-}
+-
+ #define AD3552R_CH_DAC(_idx) ((struct iio_chan_spec) {		\
+ 	.type = IIO_VOLTAGE,					\
+ 	.output = true,						\
+@@ -510,8 +200,14 @@ static int ad3552r_write_raw(struct iio_dev *indio_dev,
+ 					val);
+ 		break;
+ 	case IIO_CHAN_INFO_ENABLE:
+-		err = ad3552r_set_ch_value(dac, AD3552R_CH_DAC_POWERDOWN,
+-					   chan->channel, !val);
++		if (chan->channel == 0)
++			val = FIELD_PREP(AD3552R_MASK_CH_DAC_POWERDOWN(0), !val);
++		else
++			val = FIELD_PREP(AD3552R_MASK_CH_DAC_POWERDOWN(1), !val);
++
++		err = ad3552r_update_reg_field(dac, AD3552R_REG_ADDR_POWERDOWN_CONFIG,
++					       AD3552R_MASK_CH_DAC_POWERDOWN(chan->channel),
++					       val);
+ 		break;
+ 	default:
+ 		err = -EINVAL;
+@@ -721,83 +417,9 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
+ 		return ret;
+ 
+ 	return ad3552r_update_reg_field(dac,
+-					addr_mask_map[AD3552R_ADDR_ASCENSION][0],
+-					addr_mask_map[AD3552R_ADDR_ASCENSION][1],
+-					val);
+-}
+-
+-static void ad3552r_get_custom_range(struct ad3552r_desc *dac, s32 i, s32 *v_min,
+-				     s32 *v_max)
+-{
+-	s64 vref, tmp, common, offset, gn, gp;
+-	/*
+-	 * From datasheet formula (In Volts):
+-	 *	Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03]
+-	 *	Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03]
+-	 * Calculus are converted to milivolts
+-	 */
+-	vref = 2500;
+-	/* 2.5 * 1.03 * 1000 (To mV) */
+-	common = 2575 * dac->ch_data[i].rfb;
+-	offset = dac->ch_data[i].gain_offset;
+-
+-	gn = gains_scaling_table[dac->ch_data[i].n];
+-	tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common;
+-	tmp = div_s64(tmp, 1024  * AD3552R_GAIN_SCALE);
+-	*v_max = vref + tmp;
+-
+-	gp = gains_scaling_table[dac->ch_data[i].p];
+-	tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common;
+-	tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
+-	*v_min = vref - tmp;
+-}
+-
+-static void ad3552r_calc_gain_and_offset(struct ad3552r_desc *dac, s32 ch)
+-{
+-	s32 idx, v_max, v_min, span, rem;
+-	s64 tmp;
+-
+-	if (dac->ch_data[ch].range_override) {
+-		ad3552r_get_custom_range(dac, ch, &v_min, &v_max);
+-	} else {
+-		/* Normal range */
+-		idx = dac->ch_data[ch].range;
+-		v_min = dac->model_data->ranges_table[idx][0];
+-		v_max = dac->model_data->ranges_table[idx][1];
+-	}
+-
+-	/*
+-	 * From datasheet formula:
+-	 *	Vout = Span * (D / 65536) + Vmin
+-	 * Converted to scale and offset:
+-	 *	Scale = Span / 65536
+-	 *	Offset = 65536 * Vmin / Span
+-	 *
+-	 * Reminders are in micros in order to be printed as
+-	 * IIO_VAL_INT_PLUS_MICRO
+-	 */
+-	span = v_max - v_min;
+-	dac->ch_data[ch].scale_int = div_s64_rem(span, 65536, &rem);
+-	/* Do operations in microvolts */
+-	dac->ch_data[ch].scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000,
+-							65536);
+-
+-	dac->ch_data[ch].offset_int = div_s64_rem(v_min * 65536, span, &rem);
+-	tmp = (s64)rem * 1000000;
+-	dac->ch_data[ch].offset_dec = div_s64(tmp, span);
+-}
+-
+-static int ad3552r_find_range(const struct ad3552r_model_data *model_data,
+-			      s32 *vals)
+-{
+-	int i;
+-
+-	for (i = 0; i < model_data->num_ranges; i++)
+-		if (vals[0] == model_data->ranges_table[i][0] * 1000 &&
+-		    vals[1] == model_data->ranges_table[i][1] * 1000)
+-			return i;
+-
+-	return -EINVAL;
++					AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
++					AD3552R_MASK_ADDR_ASCENSION,
++					FIELD_PREP(AD3552R_MASK_ADDR_ASCENSION, val));
+ }
+ 
+ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
+@@ -805,57 +427,30 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
+ 					 u32 ch)
+ {
+ 	struct device *dev = &dac->spi->dev;
+-	u32 val;
+ 	int err;
+ 	u8 addr;
+-	u16 reg = 0, offset;
+-
+-	struct fwnode_handle *gain_child __free(fwnode_handle)
+-		= fwnode_get_named_child_node(child,
+-					      "custom-output-range-config");
+-	if (!gain_child)
+-		return dev_err_probe(dev, -EINVAL,
+-				     "mandatory custom-output-range-config property missing\n");
+-
+-	dac->ch_data[ch].range_override = 1;
+-	reg |= ad3552r_field_prep(1, AD3552R_MASK_CH_RANGE_OVERRIDE);
+-
+-	err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val);
+-	if (err)
+-		return dev_err_probe(dev, err,
+-				     "mandatory adi,gain-scaling-p property missing\n");
+-	reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_P);
+-	dac->ch_data[ch].p = val;
+-
+-	err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val);
+-	if (err)
+-		return dev_err_probe(dev, err,
+-				     "mandatory adi,gain-scaling-n property missing\n");
+-	reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_N);
+-	dac->ch_data[ch].n = val;
+-
+-	err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val);
+-	if (err)
+-		return dev_err_probe(dev, err,
+-				     "mandatory adi,rfb-ohms property missing\n");
+-	dac->ch_data[ch].rfb = val;
++	u16 reg;
+ 
+-	err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val);
++	err = ad3552r_get_custom_gain(dev, child,
++				      &dac->ch_data[ch].p,
++				      &dac->ch_data[ch].n,
++				      &dac->ch_data[ch].rfb,
++				      &dac->ch_data[ch].gain_offset);
+ 	if (err)
+-		return dev_err_probe(dev, err,
+-				     "mandatory adi,gain-offset property missing\n");
+-	dac->ch_data[ch].gain_offset = val;
++		return err;
+ 
+-	offset = abs((s32)val);
+-	reg |= ad3552r_field_prep((offset >> 8), AD3552R_MASK_CH_OFFSET_BIT_8);
++	dac->ch_data[ch].range_override = 1;
+ 
+-	reg |= ad3552r_field_prep((s32)val < 0, AD3552R_MASK_CH_OFFSET_POLARITY);
+ 	addr = AD3552R_REG_ADDR_CH_GAIN(ch);
+ 	err = ad3552r_write_reg(dac, addr,
+-				offset & AD3552R_MASK_CH_OFFSET_BITS_0_7);
++				abs((s32)dac->ch_data[ch].gain_offset) &
++				AD3552R_MASK_CH_OFFSET_BITS_0_7);
+ 	if (err)
+ 		return dev_err_probe(dev, err, "Error writing register\n");
+ 
++	reg = ad3552r_calc_custom_gain(dac->ch_data[ch].p, dac->ch_data[ch].n,
++				       dac->ch_data[ch].gain_offset);
++
+ 	err = ad3552r_write_reg(dac, addr, reg);
+ 	if (err)
+ 		return dev_err_probe(dev, err, "Error writing register\n");
+@@ -866,49 +461,31 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
+ static int ad3552r_configure_device(struct ad3552r_desc *dac)
+ {
+ 	struct device *dev = &dac->spi->dev;
+-	int err, cnt = 0, voltage, delta = 100000;
+-	u32 vals[2], val, ch;
++	int err, cnt = 0;
++	u32 val, ch;
+ 
+ 	dac->gpio_ldac = devm_gpiod_get_optional(dev, "ldac", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(dac->gpio_ldac))
+ 		return dev_err_probe(dev, PTR_ERR(dac->gpio_ldac),
+ 				     "Error getting gpio ldac");
+ 
+-	voltage = devm_regulator_get_enable_read_voltage(dev, "vref");
+-	if (voltage < 0 && voltage != -ENODEV)
+-		return dev_err_probe(dev, voltage, "Error getting vref voltage\n");
+-
+-	if (voltage == -ENODEV) {
+-		if (device_property_read_bool(dev, "adi,vref-out-en"))
+-			val = AD3552R_INTERNAL_VREF_PIN_2P5V;
+-		else
+-			val = AD3552R_INTERNAL_VREF_PIN_FLOATING;
+-	} else {
+-		if (voltage > 2500000 + delta || voltage < 2500000 - delta) {
+-			dev_warn(dev, "vref-supply must be 2.5V");
+-			return -EINVAL;
+-		}
+-		val = AD3552R_EXTERNAL_VREF_PIN_INPUT;
+-	}
++	err = ad3552r_get_ref_voltage(dev, &val);
++	if (err < 0)
++		return err;
+ 
+ 	err = ad3552r_update_reg_field(dac,
+-				       addr_mask_map[AD3552R_VREF_SELECT][0],
+-				       addr_mask_map[AD3552R_VREF_SELECT][1],
+-				       val);
++				       AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
++				       AD3552R_MASK_REFERENCE_VOLTAGE_SEL,
++				       FIELD_PREP(AD3552R_MASK_REFERENCE_VOLTAGE_SEL, val));
+ 	if (err)
+ 		return err;
+ 
+-	err = device_property_read_u32(dev, "adi,sdo-drive-strength", &val);
++	err = ad3552r_get_drive_strength(dev, &val);
+ 	if (!err) {
+-		if (val > 3) {
+-			dev_err(dev, "adi,sdo-drive-strength must be less than 4\n");
+-			return -EINVAL;
+-		}
+-
+ 		err = ad3552r_update_reg_field(dac,
+-					       addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][0],
+-					       addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][1],
+-					       val);
++					       AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
++					       AD3552R_MASK_SDO_DRIVE_STRENGTH,
++					       FIELD_PREP(AD3552R_MASK_SDO_DRIVE_STRENGTH, val));
+ 		if (err)
+ 			return err;
+ 	}
+@@ -929,24 +506,21 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
+ 					     "reg must be less than %d\n",
+ 					     dac->model_data->num_hw_channels);
+ 
+-		if (fwnode_property_present(child, "adi,output-range-microvolt")) {
+-			err = fwnode_property_read_u32_array(child,
+-							     "adi,output-range-microvolt",
+-							     vals,
+-							     2);
+-			if (err)
+-				return dev_err_probe(dev, err,
+-					"adi,output-range-microvolt property could not be parsed\n");
+-
+-			err = ad3552r_find_range(dac->model_data, vals);
+-			if (err < 0)
+-				return dev_err_probe(dev, err,
+-						     "Invalid adi,output-range-microvolt value\n");
+-
+-			val = err;
+-			err = ad3552r_set_ch_value(dac,
+-						   AD3552R_CH_OUTPUT_RANGE_SEL,
+-						   ch, val);
++		err = ad3552r_get_output_range(dev, dac->model_data,
++					       child, &val);
++		if (err && err != -ENOENT)
++			return err;
++
++		if (!err) {
++			if (ch == 0)
++				val = FIELD_PREP(AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0), val);
++			else
++				val = FIELD_PREP(AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1), val);
++
++			err = ad3552r_update_reg_field(dac,
++						       AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
++						       AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch),
++						       val);
+ 			if (err)
+ 				return err;
+ 
+@@ -961,10 +535,17 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
+ 				return err;
+ 		}
+ 
+-		ad3552r_calc_gain_and_offset(dac, ch);
++		ad3552r_calc_gain_and_offset(&dac->ch_data[ch], dac->model_data);
+ 		dac->enabled_ch |= BIT(ch);
+ 
+-		err = ad3552r_set_ch_value(dac, AD3552R_CH_SELECT, ch, 1);
++		if (ch == 0)
++			val = FIELD_PREP(AD3552R_MASK_CH(0), 1);
++		else
++			val = FIELD_PREP(AD3552R_MASK_CH(1), 1);
++
++		err = ad3552r_update_reg_field(dac,
++					       AD3552R_REG_ADDR_CH_SELECT_16B,
++					       AD3552R_MASK_CH(ch), val);
+ 		if (err < 0)
+ 			return err;
+ 
+@@ -976,8 +557,15 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
+ 	/* Disable unused channels */
+ 	for_each_clear_bit(ch, &dac->enabled_ch,
+ 			   dac->model_data->num_hw_channels) {
+-		err = ad3552r_set_ch_value(dac, AD3552R_CH_AMPLIFIER_POWERDOWN,
+-					   ch, 1);
++		if (ch == 0)
++			val = FIELD_PREP(AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0), 1);
++		else
++			val = FIELD_PREP(AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1), 1);
++
++		err = ad3552r_update_reg_field(dac,
++					       AD3552R_REG_ADDR_POWERDOWN_CONFIG,
++					       AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch),
++					       val);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -1146,3 +734,4 @@ module_spi_driver(ad3552r_driver);
+ MODULE_AUTHOR("Mihail Chindris <mihail.chindris@analog.com>");
+ MODULE_DESCRIPTION("Analog Device AD3552R DAC");
+ MODULE_LICENSE("GPL v2");
++MODULE_IMPORT_NS(IIO_AD3552R);
+diff --git a/drivers/iio/dac/ad3552r.h b/drivers/iio/dac/ad3552r.h
+new file mode 100644
+index 00000000000000..c20f64f80d5db3
+--- /dev/null
++++ b/drivers/iio/dac/ad3552r.h
+@@ -0,0 +1,223 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * AD3552R Digital <-> Analog converters common header
++ *
++ * Copyright 2021-2024 Analog Devices Inc.
++ * Author: Angelo Dureghello <adureghello@baylibre.com>
++ */
++
++#ifndef __DRIVERS_IIO_DAC_AD3552R_H__
++#define __DRIVERS_IIO_DAC_AD3552R_H__
++
++/* Register addresses */
++/* Primary address space */
++#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A		0x00
++#define   AD3552R_MASK_SOFTWARE_RESET			(BIT(7) | BIT(0))
++#define   AD3552R_MASK_ADDR_ASCENSION			BIT(5)
++#define   AD3552R_MASK_SDO_ACTIVE			BIT(4)
++#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B		0x01
++#define   AD3552R_MASK_SINGLE_INST			BIT(7)
++#define   AD3552R_MASK_SHORT_INSTRUCTION		BIT(3)
++#define AD3552R_REG_ADDR_DEVICE_CONFIG			0x02
++#define   AD3552R_MASK_DEVICE_STATUS(n)			BIT(4 + (n))
++#define   AD3552R_MASK_CUSTOM_MODES			GENMASK(3, 2)
++#define   AD3552R_MASK_OPERATING_MODES			GENMASK(1, 0)
++#define AD3552R_REG_ADDR_CHIP_TYPE			0x03
++#define   AD3552R_MASK_CLASS				GENMASK(7, 0)
++#define AD3552R_REG_ADDR_PRODUCT_ID_L			0x04
++#define AD3552R_REG_ADDR_PRODUCT_ID_H			0x05
++#define AD3552R_REG_ADDR_CHIP_GRADE			0x06
++#define   AD3552R_MASK_GRADE				GENMASK(7, 4)
++#define   AD3552R_MASK_DEVICE_REVISION			GENMASK(3, 0)
++#define AD3552R_REG_ADDR_SCRATCH_PAD			0x0A
++#define AD3552R_REG_ADDR_SPI_REVISION			0x0B
++#define AD3552R_REG_ADDR_VENDOR_L			0x0C
++#define AD3552R_REG_ADDR_VENDOR_H			0x0D
++#define AD3552R_REG_ADDR_STREAM_MODE			0x0E
++#define   AD3552R_MASK_LENGTH				GENMASK(7, 0)
++#define AD3552R_REG_ADDR_TRANSFER_REGISTER		0x0F
++#define   AD3552R_MASK_MULTI_IO_MODE			GENMASK(7, 6)
++#define   AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE		BIT(2)
++#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C		0x10
++#define   AD3552R_MASK_CRC_ENABLE \
++		(GENMASK(7, 6) | GENMASK(1, 0))
++#define   AD3552R_MASK_STRICT_REGISTER_ACCESS		BIT(5)
++#define AD3552R_REG_ADDR_INTERFACE_STATUS_A		0x11
++#define   AD3552R_MASK_INTERFACE_NOT_READY		BIT(7)
++#define   AD3552R_MASK_CLOCK_COUNTING_ERROR		BIT(5)
++#define   AD3552R_MASK_INVALID_OR_NO_CRC		BIT(3)
++#define   AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER	BIT(2)
++#define   AD3552R_MASK_PARTIAL_REGISTER_ACCESS		BIT(1)
++#define   AD3552R_MASK_REGISTER_ADDRESS_INVALID		BIT(0)
++#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D		0x14
++#define   AD3552R_MASK_ALERT_ENABLE_PULLUP		BIT(6)
++#define   AD3552R_MASK_MEM_CRC_EN			BIT(4)
++#define   AD3552R_MASK_SDO_DRIVE_STRENGTH		GENMASK(3, 2)
++#define   AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN		BIT(1)
++#define   AD3552R_MASK_SPI_CONFIG_DDR			BIT(0)
++#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG		0x15
++#define   AD3552R_MASK_IDUMP_FAST_MODE			BIT(6)
++#define   AD3552R_MASK_SAMPLE_HOLD_DIFF_USER_EN		BIT(5)
++#define   AD3552R_MASK_SAMPLE_HOLD_USER_TRIM		GENMASK(4, 3)
++#define   AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE		BIT(2)
++#define   AD3552R_MASK_REFERENCE_VOLTAGE_SEL		GENMASK(1, 0)
++#define AD3552R_REG_ADDR_ERR_ALARM_MASK			0x16
++#define   AD3552R_MASK_REF_RANGE_ALARM			BIT(6)
++#define   AD3552R_MASK_CLOCK_COUNT_ERR_ALARM		BIT(5)
++#define   AD3552R_MASK_MEM_CRC_ERR_ALARM		BIT(4)
++#define   AD3552R_MASK_SPI_CRC_ERR_ALARM		BIT(3)
++#define   AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM		BIT(2)
++#define   AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM	BIT(1)
++#define   AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM	BIT(0)
++#define AD3552R_REG_ADDR_ERR_STATUS			0x17
++#define   AD3552R_MASK_REF_RANGE_ERR_STATUS		BIT(6)
++#define   AD3552R_MASK_STREAM_EXCEEDS_DAC_ERR_STATUS	BIT(5)
++#define   AD3552R_MASK_MEM_CRC_ERR_STATUS		BIT(4)
++#define   AD3552R_MASK_RESET_STATUS			BIT(0)
++#define AD3552R_REG_ADDR_POWERDOWN_CONFIG		0x18
++#define   AD3552R_MASK_CH_DAC_POWERDOWN(ch)		BIT(4 + (ch))
++#define   AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch)	BIT(ch)
++#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE		0x19
++#define   AD3552R_MASK_CH0_RANGE			GENMASK(2, 0)
++#define   AD3552R_MASK_CH1_RANGE			GENMASK(6, 4)
++#define   AD3552R_MASK_CH_OUTPUT_RANGE			GENMASK(7, 0)
++#define   AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch) \
++		((ch) ? GENMASK(7, 4) : GENMASK(3, 0))
++#define AD3552R_REG_ADDR_CH_OFFSET(ch)			(0x1B + (ch) * 2)
++#define   AD3552R_MASK_CH_OFFSET_BITS_0_7		GENMASK(7, 0)
++#define AD3552R_REG_ADDR_CH_GAIN(ch)			(0x1C + (ch) * 2)
++#define   AD3552R_MASK_CH_RANGE_OVERRIDE		BIT(7)
++#define   AD3552R_MASK_CH_GAIN_SCALING_N		GENMASK(6, 5)
++#define   AD3552R_MASK_CH_GAIN_SCALING_P		GENMASK(4, 3)
++#define   AD3552R_MASK_CH_OFFSET_POLARITY		BIT(2)
++#define   AD3552R_MASK_CH_OFFSET_BIT_8			BIT(8)
++/*
++ * Secondary region
++ * For multibyte registers specify the highest address because the access is
++ * done in descending order
++ */
++#define AD3552R_SECONDARY_REGION_START			0x28
++#define AD3552R_REG_ADDR_HW_LDAC_16B			0x28
++#define AD3552R_REG_ADDR_CH_DAC_16B(ch)			(0x2C - (1 - (ch)) * 2)
++#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B		0x2E
++#define AD3552R_REG_ADDR_CH_SELECT_16B			0x2F
++#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B		0x31
++#define AD3552R_REG_ADDR_SW_LDAC_16B			0x32
++#define AD3552R_REG_ADDR_CH_INPUT_16B(ch)		(0x36 - (1 - (ch)) * 2)
++/* 3 bytes registers */
++#define AD3552R_REG_START_24B				0x37
++#define AD3552R_REG_ADDR_HW_LDAC_24B			0x37
++#define AD3552R_REG_ADDR_CH_DAC_24B(ch)			(0x3D - (1 - (ch)) * 3)
++#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B		0x40
++#define AD3552R_REG_ADDR_CH_SELECT_24B			0x41
++#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B		0x44
++#define AD3552R_REG_ADDR_SW_LDAC_24B			0x45
++#define AD3552R_REG_ADDR_CH_INPUT_24B(ch)		(0x4B - (1 - (ch)) * 3)
++
++#define AD3552R_MAX_CH					2
++#define AD3552R_MASK_CH(ch)				BIT(ch)
++#define AD3552R_MASK_ALL_CH				GENMASK(1, 0)
++#define AD3552R_MAX_REG_SIZE				3
++#define AD3552R_READ_BIT				BIT(7)
++#define AD3552R_ADDR_MASK				GENMASK(6, 0)
++#define AD3552R_MASK_DAC_12B				GENMASK(15, 4)
++#define AD3552R_DEFAULT_CONFIG_B_VALUE			0x8
++#define AD3552R_SCRATCH_PAD_TEST_VAL1			0x34
++#define AD3552R_SCRATCH_PAD_TEST_VAL2			0xB2
++#define AD3552R_GAIN_SCALE				1000
++#define AD3552R_LDAC_PULSE_US				100
++
++#define AD3552R_MAX_RANGES	5
++#define AD3542R_MAX_RANGES	5
++#define AD3552R_QUAD_SPI	2
++
++extern const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2];
++extern const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2];
++
++enum ad3552r_id {
++	AD3541R_ID = 0x400b,
++	AD3542R_ID = 0x4009,
++	AD3551R_ID = 0x400a,
++	AD3552R_ID = 0x4008,
++};
++
++struct ad3552r_model_data {
++	const char *model_name;
++	enum ad3552r_id chip_id;
++	unsigned int num_hw_channels;
++	const s32 (*ranges_table)[2];
++	int num_ranges;
++	bool requires_output_range;
++};
++
++struct ad3552r_ch_data {
++	s32	scale_int;
++	s32	scale_dec;
++	s32	offset_int;
++	s32	offset_dec;
++	s16	gain_offset;
++	u16	rfb;
++	u8	n;
++	u8	p;
++	u8	range;
++	bool	range_override;
++};
++
++enum ad3552r_ch_gain_scaling {
++	/* Gain scaling of 1 */
++	AD3552R_CH_GAIN_SCALING_1,
++	/* Gain scaling of 0.5 */
++	AD3552R_CH_GAIN_SCALING_0_5,
++	/* Gain scaling of 0.25 */
++	AD3552R_CH_GAIN_SCALING_0_25,
++	/* Gain scaling of 0.125 */
++	AD3552R_CH_GAIN_SCALING_0_125,
++};
++
++enum ad3552r_ch_vref_select {
++	/* Internal source with Vref I/O floating */
++	AD3552R_INTERNAL_VREF_PIN_FLOATING,
++	/* Internal source with Vref I/O at 2.5V */
++	AD3552R_INTERNAL_VREF_PIN_2P5V,
++	/* External source with Vref I/O as input */
++	AD3552R_EXTERNAL_VREF_PIN_INPUT
++};
++
++enum ad3542r_ch_output_range {
++	/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
++	AD3542R_CH_OUTPUT_RANGE_0__2P5V,
++	/* Range from 0 V to 5 V. Requires Rfb1x connection  */
++	AD3542R_CH_OUTPUT_RANGE_0__5V,
++	/* Range from 0 V to 10 V. Requires Rfb2x connection  */
++	AD3542R_CH_OUTPUT_RANGE_0__10V,
++	/* Range from -5 V to 5 V. Requires Rfb2x connection  */
++	AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
++	/* Range from -2.5 V to 7.5 V. Requires Rfb2x connection  */
++	AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
++};
++
++enum ad3552r_ch_output_range {
++	/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
++	AD3552R_CH_OUTPUT_RANGE_0__2P5V,
++	/* Range from 0 V to 5 V. Requires Rfb1x connection  */
++	AD3552R_CH_OUTPUT_RANGE_0__5V,
++	/* Range from 0 V to 10 V. Requires Rfb2x connection  */
++	AD3552R_CH_OUTPUT_RANGE_0__10V,
++	/* Range from -5 V to 5 V. Requires Rfb2x connection  */
++	AD3552R_CH_OUTPUT_RANGE_NEG_5__5V,
++	/* Range from -10 V to 10 V. Requires Rfb4x connection  */
++	AD3552R_CH_OUTPUT_RANGE_NEG_10__10V,
++};
++
++int ad3552r_get_output_range(struct device *dev,
++			     const struct ad3552r_model_data *model_info,
++			     struct fwnode_handle *child, u32 *val);
++int ad3552r_get_custom_gain(struct device *dev, struct fwnode_handle *child,
++			    u8 *gs_p, u8 *gs_n, u16 *rfb, s16 *goffs);
++u16 ad3552r_calc_custom_gain(u8 p, u8 n, s16 goffs);
++int ad3552r_get_ref_voltage(struct device *dev, u32 *val);
++int ad3552r_get_drive_strength(struct device *dev, u32 *val);
++void ad3552r_calc_gain_and_offset(struct ad3552r_ch_data *ch_data,
++				  const struct ad3552r_model_data *model_data);
++
++#endif /* __DRIVERS_IIO_DAC_AD3552R_H__ */
+diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
+index b4c6c7c4725694..8fae58db1d6393 100644
+--- a/drivers/iio/pressure/zpa2326.c
++++ b/drivers/iio/pressure/zpa2326.c
+@@ -582,7 +582,7 @@ static int zpa2326_fill_sample_buffer(struct iio_dev               *indio_dev,
+ 	struct {
+ 		u32 pressure;
+ 		u16 temperature;
+-		u64 timestamp;
++		aligned_s64 timestamp;
+ 	}   sample;
+ 	int err;
+ 
+diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
+index 30c1ecb5f361ee..c707be97049b71 100644
+--- a/drivers/leds/led-class-multicolor.c
++++ b/drivers/leds/led-class-multicolor.c
+@@ -61,7 +61,8 @@ static ssize_t multi_intensity_store(struct device *dev,
+ 	for (i = 0; i < mcled_cdev->num_colors; i++)
+ 		mcled_cdev->subled_info[i].intensity = intensity_value[i];
+ 
+-	led_set_brightness(led_cdev, led_cdev->brightness);
++	if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags))
++		led_set_brightness(led_cdev, led_cdev->brightness);
+ 	ret = size;
+ err_out:
+ 	mutex_unlock(&led_cdev->led_access);
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index cb174e788a96c2..92c2fb618c8e1b 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -490,8 +490,8 @@ void mbox_free_channel(struct mbox_chan *chan)
+ 	if (chan->txdone_method == TXDONE_BY_ACK)
+ 		chan->txdone_method = TXDONE_BY_POLL;
+ 
+-	module_put(chan->mbox->dev->driver->owner);
+ 	spin_unlock_irqrestore(&chan->lock, flags);
++	module_put(chan->mbox->dev->driver->owner);
+ }
+ EXPORT_SYMBOL_GPL(mbox_free_channel);
+ 
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index e42f1400cea9d7..f5171167819b51 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1733,7 +1733,12 @@ static CLOSURE_CALLBACK(cache_set_flush)
+ 			mutex_unlock(&b->write_lock);
+ 		}
+ 
+-	if (ca->alloc_thread)
++	/*
++	 * If the register_cache_set() call to bch_cache_set_alloc() failed,
++	 * ca has not been assigned a value and return error.
++	 * So we need check ca is not NULL during bch_cache_set_unregister().
++	 */
++	if (ca && ca->alloc_thread)
+ 		kthread_stop(ca->alloc_thread);
+ 
+ 	if (c->journal.cur) {
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 1e0d3b9b75d6fe..163a5bbd485f97 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -2410,7 +2410,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
+ 	 */
+ 	sb_retrieve_failed_devices(sb, failed_devices);
+ 	rdev_for_each(r, mddev) {
+-		if (test_bit(Journal, &rdev->flags) ||
++		if (test_bit(Journal, &r->flags) ||
+ 		    !r->sb_page)
+ 			continue;
+ 		sb2 = page_address(r->sb_page);
+diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c
+index 655453bb276bed..425b3a74f4dbae 100644
+--- a/drivers/md/dm-vdo/indexer/volume.c
++++ b/drivers/md/dm-vdo/indexer/volume.c
+@@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
+ 				     u32 physical_page, struct cached_page **page_ptr)
+ {
+ 	struct cached_page *page;
++	unsigned int zone_number = request->zone_number;
+ 
+ 	get_page_from_cache(&volume->page_cache, physical_page, &page);
+ 	if (page != NULL) {
+-		if (request->zone_number == 0) {
++		if (zone_number == 0) {
+ 			/* Only one zone is allowed to update the LRU. */
+ 			make_page_most_recent(&volume->page_cache, page);
+ 		}
+@@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
+ 	}
+ 
+ 	/* Prepare to enqueue a read for the page. */
+-	end_pending_search(&volume->page_cache, request->zone_number);
++	end_pending_search(&volume->page_cache, zone_number);
+ 	mutex_lock(&volume->read_threads_mutex);
+ 
+ 	/*
+@@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
+ 		 * the order does not matter for correctness as it does below.
+ 		 */
+ 		mutex_unlock(&volume->read_threads_mutex);
+-		begin_pending_search(&volume->page_cache, physical_page,
+-				     request->zone_number);
++		begin_pending_search(&volume->page_cache, physical_page, zone_number);
+ 		return UDS_QUEUED;
+ 	}
+ 
+@@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
+ 	 * "search pending" state in careful order so no other thread can mess with the data before
+ 	 * the caller gets to look at it.
+ 	 */
+-	begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
++	begin_pending_search(&volume->page_cache, physical_page, zone_number);
+ 	mutex_unlock(&volume->read_threads_mutex);
+ 	*page_ptr = page;
+ 	return UDS_SUCCESS;
+@@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
+ {
+ 	int result;
+ 	struct cached_page *page = NULL;
++	unsigned int zone_number = request->zone_number;
+ 	u32 physical_page = map_to_physical_page(volume->geometry, chapter,
+ 						 index_page_number);
+ 
+@@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
+ 	 * invalidation by the reader thread, before the reader thread has noticed that the
+ 	 * invalidate_counter has been incremented.
+ 	 */
+-	begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
++	begin_pending_search(&volume->page_cache, physical_page, zone_number);
+ 
+ 	result = get_volume_page_protected(volume, request, physical_page, &page);
+ 	if (result != UDS_SUCCESS) {
+-		end_pending_search(&volume->page_cache, request->zone_number);
++		end_pending_search(&volume->page_cache, zone_number);
+ 		return result;
+ 	}
+ 
+ 	result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
+ 					       &request->record_name,
+ 					       record_page_number);
+-	end_pending_search(&volume->page_cache, request->zone_number);
++	end_pending_search(&volume->page_cache, zone_number);
+ 	return result;
+ }
+ 
+@@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
+ {
+ 	struct cached_page *record_page;
+ 	struct index_geometry *geometry = volume->geometry;
++	unsigned int zone_number = request->zone_number;
+ 	int result;
+ 	u32 physical_page, page_number;
+ 
+@@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
+ 	 * invalidation by the reader thread, before the reader thread has noticed that the
+ 	 * invalidate_counter has been incremented.
+ 	 */
+-	begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
++	begin_pending_search(&volume->page_cache, physical_page, zone_number);
+ 
+ 	result = get_volume_page_protected(volume, request, physical_page, &record_page);
+ 	if (result != UDS_SUCCESS) {
+-		end_pending_search(&volume->page_cache, request->zone_number);
++		end_pending_search(&volume->page_cache, zone_number);
+ 		return result;
+ 	}
+ 
+@@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
+ 			       &request->record_name, geometry, &request->old_metadata))
+ 		*found = true;
+ 
+-	end_pending_search(&volume->page_cache, request->zone_number);
++	end_pending_search(&volume->page_cache, zone_number);
+ 	return UDS_SUCCESS;
+ }
+ 
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index fbb4f57010da69..c12359fd3a420c 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -787,7 +787,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
+ 	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
+ 	 */
+ 	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
+-	if (write_behind > COUNTER_MAX)
++	if (write_behind > COUNTER_MAX / 2)
+ 		write_behind = COUNTER_MAX / 2;
+ 	sb->write_behind = cpu_to_le32(write_behind);
+ 	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index c70d9c24c6fb34..957d620ad671c1 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1848,7 +1848,7 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+ 	unsigned int processed_ctrls = 0;
+ 	struct uvc_control *ctrl;
+ 	unsigned int i;
+-	int ret;
++	int ret = 0;
+ 
+ 	if (entity == NULL)
+ 		return 0;
+@@ -1877,8 +1877,6 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+ 				dev->intfnum, ctrl->info.selector,
+ 				uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
+ 				ctrl->info.size);
+-		else
+-			ret = 0;
+ 
+ 		if (!ret)
+ 			processed_ctrls++;
+@@ -1890,17 +1888,24 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+ 
+ 		ctrl->dirty = 0;
+ 
+-		if (ret < 0) {
++		if (ret < 0 && !rollback) {
+ 			if (err_ctrl)
+ 				*err_ctrl = ctrl;
+-			return ret;
++			/*
++			 * If we fail to set a control, we need to rollback
++			 * the next ones.
++			 */
++			rollback = 1;
+ 		}
+ 
+-		if (!rollback && handle &&
++		if (!rollback && handle && !ret &&
+ 		    ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ 			uvc_ctrl_set_handle(handle, ctrl, handle);
+ 	}
+ 
++	if (ret)
++		return ret;
++
+ 	return processed_ctrls;
+ }
+ 
+@@ -1931,7 +1936,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ 	struct uvc_video_chain *chain = handle->chain;
+ 	struct uvc_control *err_ctrl;
+ 	struct uvc_entity *entity;
+-	int ret = 0;
++	int ret_out = 0;
++	int ret;
+ 
+ 	/* Find the control. */
+ 	list_for_each_entry(entity, &chain->entities, chain) {
+@@ -1942,17 +1948,23 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ 				ctrls->error_idx =
+ 					uvc_ctrl_find_ctrl_idx(entity, ctrls,
+ 							       err_ctrl);
+-			goto done;
++			/*
++			 * When we fail to commit an entity, we need to
++			 * restore the UVC_CTRL_DATA_BACKUP for all the
++			 * controls in the other entities, otherwise our cache
++			 * and the hardware will be out of sync.
++			 */
++			rollback = 1;
++
++			ret_out = ret;
+ 		} else if (ret > 0 && !rollback) {
+ 			uvc_ctrl_send_events(handle, entity,
+ 					     ctrls->controls, ctrls->count);
+ 		}
+ 	}
+ 
+-	ret = 0;
+-done:
+ 	mutex_unlock(&chain->ctrl_mutex);
+-	return ret;
++	return ret_out;
+ }
+ 
+ int uvc_ctrl_get(struct uvc_video_chain *chain,
+diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
+index 6fce79ec2dc646..7e7e8af9af2246 100644
+--- a/drivers/mfd/max14577.c
++++ b/drivers/mfd/max14577.c
+@@ -456,6 +456,7 @@ static void max14577_i2c_remove(struct i2c_client *i2c)
+ {
+ 	struct max14577 *max14577 = i2c_get_clientdata(i2c);
+ 
++	device_init_wakeup(max14577->dev, false);
+ 	mfd_remove_devices(max14577->dev);
+ 	regmap_del_irq_chip(max14577->irq, max14577->irq_data);
+ 	if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
+diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c
+index 9bcca1856bfeea..db3d6a21a21222 100644
+--- a/drivers/misc/tps6594-pfsm.c
++++ b/drivers/misc/tps6594-pfsm.c
+@@ -281,6 +281,9 @@ static int tps6594_pfsm_probe(struct platform_device *pdev)
+ 	pfsm->miscdev.minor = MISC_DYNAMIC_MINOR;
+ 	pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x",
+ 					    tps->chip_id, tps->reg);
++	if (!pfsm->miscdev.name)
++		return -ENOMEM;
++
+ 	pfsm->miscdev.fops = &tps6594_pfsm_fops;
+ 	pfsm->miscdev.parent = dev->parent;
+ 	pfsm->chip_id = tps->chip_id;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 154f73f121ecaa..ad4aec522f4f84 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2871,6 +2871,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ {
+ 	struct bnxt_napi *bnapi = cpr->bnapi;
+ 	u32 raw_cons = cpr->cp_raw_cons;
++	bool flush_xdp = false;
+ 	u32 cons;
+ 	int rx_pkts = 0;
+ 	u8 event = 0;
+@@ -2924,6 +2925,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 			else
+ 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
+ 							   &event);
++			if (event & BNXT_REDIRECT_EVENT)
++				flush_xdp = true;
+ 			if (likely(rc >= 0))
+ 				rx_pkts += rc;
+ 			/* Increment rx_pkts when rc is -ENOMEM to count towards
+@@ -2948,7 +2951,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 		}
+ 	}
+ 
+-	if (event & BNXT_REDIRECT_EVENT) {
++	if (flush_xdp) {
+ 		xdp_do_flush();
+ 		event &= ~BNXT_REDIRECT_EVENT;
+ 	}
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+index 1619943fb2637a..4e8881b479e487 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+@@ -485,7 +485,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg)
+ 		tmp = ioread32(reg + 4);
+ 	} while (high != tmp);
+ 
+-	return le64_to_cpu((__le64)high << 32 | low);
++	return (u64)high << 32 | low;
+ }
+ #endif
+ 
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index 0eeda7e502db26..0f5758c273c229 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -321,7 +321,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
+ 					   len, DMA_TO_DEVICE);
+ 	} else /* XDP_REDIRECT */ {
+ 		dma_addr = ionic_tx_map_single(q, frame->data, len);
+-		if (!dma_addr)
++		if (dma_addr == DMA_MAPPING_ERROR)
+ 			return -EIO;
+ 	}
+ 
+@@ -357,7 +357,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
+ 			} else {
+ 				dma_addr = ionic_tx_map_frag(q, frag, 0,
+ 							     skb_frag_size(frag));
+-				if (dma_mapping_error(q->dev, dma_addr)) {
++				if (dma_addr == DMA_MAPPING_ERROR) {
+ 					ionic_tx_desc_unmap_bufs(q, desc_info);
+ 					return -EIO;
+ 				}
+@@ -1083,7 +1083,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ 		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
+ 				     dev_name(dev), q->name);
+ 		q_to_tx_stats(q)->dma_map_err++;
+-		return 0;
++		return DMA_MAPPING_ERROR;
+ 	}
+ 	return dma_addr;
+ }
+@@ -1100,7 +1100,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ 		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
+ 				     dev_name(dev), q->name);
+ 		q_to_tx_stats(q)->dma_map_err++;
+-		return 0;
++		return DMA_MAPPING_ERROR;
+ 	}
+ 	return dma_addr;
+ }
+@@ -1116,7 +1116,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
+ 	int frag_idx;
+ 
+ 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
+-	if (!dma_addr)
++	if (dma_addr == DMA_MAPPING_ERROR)
+ 		return -EIO;
+ 	buf_info->dma_addr = dma_addr;
+ 	buf_info->len = skb_headlen(skb);
+@@ -1126,7 +1126,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
+ 	nfrags = skb_shinfo(skb)->nr_frags;
+ 	for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
+ 		dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
+-		if (!dma_addr)
++		if (dma_addr == DMA_MAPPING_ERROR)
+ 			goto dma_fail;
+ 		buf_info->dma_addr = dma_addr;
+ 		buf_info->len = skb_frag_size(frag);
+diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
+index e2db944e6fa8bd..be4c9622618d84 100644
+--- a/drivers/net/ethernet/realtek/r8169.h
++++ b/drivers/net/ethernet/realtek/r8169.h
+@@ -68,6 +68,7 @@ enum mac_version {
+ 	/* support for RTL_GIGA_MAC_VER_60 has been removed */
+ 	RTL_GIGA_MAC_VER_61,
+ 	RTL_GIGA_MAC_VER_63,
++	RTL_GIGA_MAC_VER_64,
+ 	RTL_GIGA_MAC_VER_65,
+ 	RTL_GIGA_MAC_VER_66,
+ 	RTL_GIGA_MAC_NONE
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 85bb5121cd245d..7b82779e4cd5d2 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -55,6 +55,7 @@
+ #define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
+ #define FIRMWARE_8125A_3	"rtl_nic/rtl8125a-3.fw"
+ #define FIRMWARE_8125B_2	"rtl_nic/rtl8125b-2.fw"
++#define FIRMWARE_8125D_1	"rtl_nic/rtl8125d-1.fw"
+ #define FIRMWARE_8126A_2	"rtl_nic/rtl8126a-2.fw"
+ #define FIRMWARE_8126A_3	"rtl_nic/rtl8126a-3.fw"
+ 
+@@ -138,6 +139,7 @@ static const struct {
+ 	[RTL_GIGA_MAC_VER_61] = {"RTL8125A",		FIRMWARE_8125A_3},
+ 	/* reserve 62 for CFG_METHOD_4 in the vendor driver */
+ 	[RTL_GIGA_MAC_VER_63] = {"RTL8125B",		FIRMWARE_8125B_2},
++	[RTL_GIGA_MAC_VER_64] = {"RTL8125D",		FIRMWARE_8125D_1},
+ 	[RTL_GIGA_MAC_VER_65] = {"RTL8126A",		FIRMWARE_8126A_2},
+ 	[RTL_GIGA_MAC_VER_66] = {"RTL8126A",		FIRMWARE_8126A_3},
+ };
+@@ -707,6 +709,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3);
+ MODULE_FIRMWARE(FIRMWARE_8107E_2);
+ MODULE_FIRMWARE(FIRMWARE_8125A_3);
+ MODULE_FIRMWARE(FIRMWARE_8125B_2);
++MODULE_FIRMWARE(FIRMWARE_8125D_1);
+ MODULE_FIRMWARE(FIRMWARE_8126A_2);
+ MODULE_FIRMWARE(FIRMWARE_8126A_3);
+ 
+@@ -2098,10 +2101,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
+ 		tp->tx_lpi_timer = timer_val;
+ 		r8168_mac_ocp_write(tp, 0xe048, timer_val);
+ 		break;
+-	case RTL_GIGA_MAC_VER_61:
+-	case RTL_GIGA_MAC_VER_63:
+-	case RTL_GIGA_MAC_VER_65:
+-	case RTL_GIGA_MAC_VER_66:
++	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ 		tp->tx_lpi_timer = timer_val;
+ 		RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
+ 		break;
+@@ -2233,6 +2233,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
+ 		{ 0x7cf, 0x64a,	RTL_GIGA_MAC_VER_66 },
+ 		{ 0x7cf, 0x649,	RTL_GIGA_MAC_VER_65 },
+ 
++		/* 8125D family. */
++		{ 0x7cf, 0x688,	RTL_GIGA_MAC_VER_64 },
++
+ 		/* 8125B family. */
+ 		{ 0x7cf, 0x641,	RTL_GIGA_MAC_VER_63 },
+ 
+@@ -2500,9 +2503,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
+ 	case RTL_GIGA_MAC_VER_61:
+ 		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
+ 		break;
+-	case RTL_GIGA_MAC_VER_63:
+-	case RTL_GIGA_MAC_VER_65:
+-	case RTL_GIGA_MAC_VER_66:
++	case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ 		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
+ 			RX_PAUSE_SLOT_ON);
+ 		break;
+@@ -3840,6 +3841,12 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
+ 	rtl_hw_start_8125_common(tp);
+ }
+ 
++static void rtl_hw_start_8125d(struct rtl8169_private *tp)
++{
++	rtl_set_def_aspm_entry_latency(tp);
++	rtl_hw_start_8125_common(tp);
++}
++
+ static void rtl_hw_start_8126a(struct rtl8169_private *tp)
+ {
+ 	rtl_disable_zrxdc_timeout(tp);
+@@ -3889,6 +3896,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
+ 		[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
+ 		[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
+ 		[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
++		[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
+ 		[RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
+ 		[RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
+ 	};
+@@ -3906,6 +3914,7 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
+ 	/* disable interrupt coalescing */
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_61:
++	case RTL_GIGA_MAC_VER_64:
+ 		for (i = 0xa00; i < 0xb00; i += 4)
+ 			RTL_W32(tp, i, 0);
+ 		break;
+diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
+index cf29b120848269..d09b2a41cd062e 100644
+--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
++++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
+@@ -1104,6 +1104,15 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
+ 	rtl8125b_config_eee_phy(phydev);
+ }
+ 
++static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
++				   struct phy_device *phydev)
++{
++	r8169_apply_firmware(tp);
++	rtl8125_legacy_force_mode(phydev);
++	rtl8168g_disable_aldps(phydev);
++	rtl8125b_config_eee_phy(phydev);
++}
++
+ static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
+ 				   struct phy_device *phydev)
+ {
+@@ -1160,6 +1169,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
+ 		[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
+ 		[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
+ 		[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
++		[RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
+ 		[RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
+ 		[RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
+ 	};
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 0250c5cb28ff21..36328298dc9b81 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3603,7 +3603,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
+ {
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+ 	enum request_irq_err irq_err;
+-	cpumask_t cpu_mask;
+ 	int irq_idx = 0;
+ 	char *int_name;
+ 	int ret;
+@@ -3732,9 +3731,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
+ 			irq_idx = i;
+ 			goto irq_error;
+ 		}
+-		cpumask_clear(&cpu_mask);
+-		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+-		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
++		irq_set_affinity_hint(priv->rx_irq[i],
++				      cpumask_of(i % num_online_cpus()));
+ 	}
+ 
+ 	/* Request Tx MSI irq */
+@@ -3757,9 +3755,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
+ 			irq_idx = i;
+ 			goto irq_error;
+ 		}
+-		cpumask_clear(&cpu_mask);
+-		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+-		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
++		irq_set_affinity_hint(priv->tx_irq[i],
++				      cpumask_of(i % num_online_cpus()));
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index 71c891d14fb626..d8a6fea961c02b 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -1336,6 +1336,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
+ 	u8 tun_prot = 0;
+ 
+ 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
++csum_failed:
+ 		if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
+ 		    !(first->tx_flags & WX_TX_FLAGS_CC))
+ 			return;
+@@ -1429,7 +1430,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
+ 					WX_TXD_L4LEN_SHIFT;
+ 			break;
+ 		default:
+-			break;
++			skb_checksum_help(skb);
++			goto csum_failed;
+ 		}
+ 
+ 		/* update TX checksum flag */
+@@ -2425,7 +2427,7 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring)
+ 	struct page_pool_params pp_params = {
+ 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ 		.order = 0,
+-		.pool_size = rx_ring->size,
++		.pool_size = rx_ring->count,
+ 		.nid = dev_to_node(rx_ring->dev),
+ 		.dev = rx_ring->dev,
+ 		.dma_dir = DMA_FROM_DEVICE,
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 166f6a7283731e..8ce5705af69c59 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -92,6 +92,7 @@
+ 
+ #define RTL_GENERIC_PHYID			0x001cc800
+ #define RTL_8211FVD_PHYID			0x001cc878
++#define RTL_8221B				0x001cc840
+ #define RTL_8221B_VB_CG				0x001cc849
+ #define RTL_8221B_VN_CG				0x001cc84a
+ #define RTL_8251B				0x001cc862
+@@ -1040,6 +1041,23 @@ static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
+ 	return val >= 0 && val & MDIO_PMA_SPEED_2_5G;
+ }
+ 
++/* On internal PHY's MMD reads over C22 always return 0.
++ * Check a MMD register which is known to be non-zero.
++ */
++static bool rtlgen_supports_mmd(struct phy_device *phydev)
++{
++	int val;
++
++	phy_lock_mdio_bus(phydev);
++	__phy_write(phydev, MII_MMD_CTRL, MDIO_MMD_PCS);
++	__phy_write(phydev, MII_MMD_DATA, MDIO_PCS_EEE_ABLE);
++	__phy_write(phydev, MII_MMD_CTRL, MDIO_MMD_PCS | MII_MMD_CTRL_NOINCR);
++	val = __phy_read(phydev, MII_MMD_DATA);
++	phy_unlock_mdio_bus(phydev);
++
++	return val > 0;
++}
++
+ static int rtlgen_match_phy_device(struct phy_device *phydev)
+ {
+ 	return phydev->phy_id == RTL_GENERIC_PHYID &&
+@@ -1049,7 +1067,8 @@ static int rtlgen_match_phy_device(struct phy_device *phydev)
+ static int rtl8226_match_phy_device(struct phy_device *phydev)
+ {
+ 	return phydev->phy_id == RTL_GENERIC_PHYID &&
+-	       rtlgen_supports_2_5gbps(phydev);
++	       rtlgen_supports_2_5gbps(phydev) &&
++	       rtlgen_supports_mmd(phydev);
+ }
+ 
+ static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id,
+@@ -1061,6 +1080,11 @@ static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id,
+ 		return !is_c45 && (id == phydev->phy_id);
+ }
+ 
++static int rtl8221b_match_phy_device(struct phy_device *phydev)
++{
++	return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev);
++}
++
+ static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev)
+ {
+ 	return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
+@@ -1081,9 +1105,22 @@ static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev)
+ 	return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
+ }
+ 
+-static int rtl8251b_c22_match_phy_device(struct phy_device *phydev)
++static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
+ {
+-	return rtlgen_is_c45_match(phydev, RTL_8251B, false);
++	if (phydev->is_c45)
++		return false;
++
++	switch (phydev->phy_id) {
++	case RTL_GENERIC_PHYID:
++	case RTL_8221B:
++	case RTL_8251B:
++	case 0x001cc841:
++		break;
++	default:
++		return false;
++	}
++
++	return rtlgen_supports_2_5gbps(phydev) && !rtlgen_supports_mmd(phydev);
+ }
+ 
+ static int rtl8251b_c45_match_phy_device(struct phy_device *phydev)
+@@ -1345,10 +1382,8 @@ static struct phy_driver realtek_drvs[] = {
+ 		.resume		= rtlgen_resume,
+ 		.read_page	= rtl821x_read_page,
+ 		.write_page	= rtl821x_write_page,
+-		.read_mmd	= rtl822x_read_mmd,
+-		.write_mmd	= rtl822x_write_mmd,
+ 	}, {
+-		PHY_ID_MATCH_EXACT(0x001cc840),
++		.match_phy_device = rtl8221b_match_phy_device,
+ 		.name		= "RTL8226B_RTL8221B 2.5Gbps PHY",
+ 		.get_features	= rtl822x_get_features,
+ 		.config_aneg	= rtl822x_config_aneg,
+@@ -1359,8 +1394,6 @@ static struct phy_driver realtek_drvs[] = {
+ 		.resume		= rtlgen_resume,
+ 		.read_page	= rtl821x_read_page,
+ 		.write_page	= rtl821x_write_page,
+-		.read_mmd	= rtl822x_read_mmd,
+-		.write_mmd	= rtl822x_write_mmd,
+ 	}, {
+ 		PHY_ID_MATCH_EXACT(0x001cc838),
+ 		.name           = "RTL8226-CG 2.5Gbps PHY",
+@@ -1438,8 +1471,9 @@ static struct phy_driver realtek_drvs[] = {
+ 		.read_page      = rtl821x_read_page,
+ 		.write_page     = rtl821x_write_page,
+ 	}, {
+-		.match_phy_device = rtl8251b_c22_match_phy_device,
+-		.name           = "RTL8126A-internal 5Gbps PHY",
++		.match_phy_device = rtl_internal_nbaset_match_phy_device,
++		.name           = "Realtek Internal NBASE-T PHY",
++		.flags		= PHY_IS_INTERNAL,
+ 		.get_features   = rtl822x_get_features,
+ 		.config_aneg    = rtl822x_config_aneg,
+ 		.read_status    = rtl822x_read_status,
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 4cc72be28c7319..25e486e6e8054a 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -453,7 +453,8 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
+ 			return NULL;
+ 	}
+ 
+-	list_del(&req->entry);
++	list_del_init(&req->entry);
++	init_llist_node(&req->lentry);
+ 	return req;
+ }
+ 
+@@ -561,6 +562,8 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
+ 	req->queue = queue;
+ 	nvme_req(rq)->ctrl = &ctrl->ctrl;
+ 	nvme_req(rq)->cmd = &pdu->cmd;
++	init_llist_node(&req->lentry);
++	INIT_LIST_HEAD(&req->entry);
+ 
+ 	return 0;
+ }
+@@ -765,6 +768,14 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
+ 		return -EPROTO;
+ 	}
+ 
++	if (llist_on_list(&req->lentry) ||
++	    !list_empty(&req->entry)) {
++		dev_err(queue->ctrl->ctrl.device,
++			"req %d unexpected r2t while processing request\n",
++			rq->tag);
++		return -EPROTO;
++	}
++
+ 	req->pdu_len = 0;
+ 	req->h2cdata_left = r2t_length;
+ 	req->h2cdata_offset = r2t_offset;
+@@ -1349,7 +1360,7 @@ static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
+ 	queue->nr_cqe = 0;
+ 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
+ 	release_sock(sk);
+-	return consumed;
++	return consumed == -EAGAIN ? 0 : consumed;
+ }
+ 
+ static void nvme_tcp_io_work(struct work_struct *w)
+@@ -1377,6 +1388,11 @@ static void nvme_tcp_io_work(struct work_struct *w)
+ 		else if (unlikely(result < 0))
+ 			return;
+ 
++		/* did we get some space after spending time in recv? */
++		if (nvme_tcp_queue_has_pending(queue) &&
++		    sk_stream_is_writeable(queue->sock->sk))
++			pending = true;
++
+ 		if (!pending || !queue->rd_enabled)
+ 			return;
+ 
+@@ -2594,6 +2610,8 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
+ 	ctrl->async_req.offset = 0;
+ 	ctrl->async_req.curr_bio = NULL;
+ 	ctrl->async_req.data_len = 0;
++	init_llist_node(&ctrl->async_req.lentry);
++	INIT_LIST_HEAD(&ctrl->async_req.entry);
+ 
+ 	nvme_tcp_queue_request(&ctrl->async_req, true, true);
+ }
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index ad3028b755d16a..3b24fed3177de8 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -48,6 +48,8 @@
+ #define IMX95_PCIE_SS_RW_REG_0			0xf0
+ #define IMX95_PCIE_REF_CLKEN			BIT(23)
+ #define IMX95_PCIE_PHY_CR_PARA_SEL		BIT(9)
++#define IMX95_PCIE_SS_RW_REG_1			0xf4
++#define IMX95_PCIE_SYS_AUX_PWR_DET		BIT(31)
+ 
+ #define IMX95_PE0_GEN_CTRL_1			0x1050
+ #define IMX95_PCIE_DEVICE_TYPE			GENMASK(3, 0)
+@@ -206,6 +208,19 @@ static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
+ 
+ static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
+ {
++	/*
++	 * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready
++	 * Through Beacon or PERST# De-assertion
++	 *
++	 * When the auxiliary power is not available, the controller
++	 * cannot exit from L23 Ready with beacon or PERST# de-assertion
++	 * when main power is not removed.
++	 *
++	 * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
++	 */
++	regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1,
++			IMX95_PCIE_SYS_AUX_PWR_DET);
++
+ 	regmap_update_bits(imx_pcie->iomuxc_gpr,
+ 			IMX95_PCIE_SS_RW_REG_0,
+ 			IMX95_PCIE_PHY_CR_PARA_SEL,
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 6d6cbc8b5b2c67..d40afe74ddd1a3 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -752,22 +752,19 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
+ 	/* Set link width speed control register */
+ 	lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ 	lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
++	lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ 	switch (num_lanes) {
+ 	case 1:
+ 		plc |= PORT_LINK_MODE_1_LANES;
+-		lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ 		break;
+ 	case 2:
+ 		plc |= PORT_LINK_MODE_2_LANES;
+-		lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ 		break;
+ 	case 4:
+ 		plc |= PORT_LINK_MODE_4_LANES;
+-		lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ 		break;
+ 	case 8:
+ 		plc |= PORT_LINK_MODE_8_LANES;
+-		lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+ 		break;
+ 	default:
+ 		dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
+diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
+index ddc65368e77d19..16725f9536f657 100644
+--- a/drivers/pci/controller/pcie-apple.c
++++ b/drivers/pci/controller/pcie-apple.c
+@@ -585,6 +585,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
+ 	list_add_tail(&port->entry, &pcie->ports);
+ 	init_completion(&pcie->event);
+ 
++	/* In the success path, we keep a reference to np around */
++	of_node_get(np);
++
+ 	ret = apple_pcie_port_register_irqs(port);
+ 	WARN_ON(ret);
+ 
+@@ -764,7 +767,6 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ {
+ 	struct device *dev = cfg->parent;
+ 	struct platform_device *platform = to_platform_device(dev);
+-	struct device_node *of_port;
+ 	struct apple_pcie *pcie;
+ 	int ret;
+ 
+@@ -787,11 +789,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ 	if (ret)
+ 		return ret;
+ 
+-	for_each_child_of_node(dev->of_node, of_port) {
++	for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ 		ret = apple_pcie_setup_port(pcie, of_port);
+ 		if (ret) {
+ 			dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
+-			of_node_put(of_port);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index 3a39e167bdbff8..d62fea0fbdfc1f 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -85,7 +85,7 @@ static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
+ 	if (!uapqns || nr_apqns == 0)
+ 		return NULL;
+ 
+-	return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
++	return memdup_array_user(uapqns, nr_apqns, sizeof(struct pkey_apqn));
+ }
+ 
+ static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs)
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 21f22e913cd08d..8a44e01ebf9b63 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -5910,7 +5910,11 @@ megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
+ 	const struct cpumask *mask;
+ 
+ 	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+-		mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
++		int nid = dev_to_node(&instance->pdev->dev);
++
++		if (nid == NUMA_NO_NODE)
++			nid = 0;
++		mask = cpumask_of_node(nid);
+ 
+ 		for (i = 0; i < instance->low_latency_index_start; i++) {
+ 			irq = pci_irq_vector(instance->pdev, i);
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index f9463f263fba16..12f8073cb5968e 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1929,10 +1929,10 @@ static int cqspi_probe(struct platform_device *pdev)
+ 			goto probe_setup_failed;
+ 	}
+ 
+-	ret = devm_pm_runtime_enable(dev);
+-	if (ret) {
+-		if (cqspi->rx_chan)
+-			dma_release_channel(cqspi->rx_chan);
++	pm_runtime_enable(dev);
++
++	if (cqspi->rx_chan) {
++		dma_release_channel(cqspi->rx_chan);
+ 		goto probe_setup_failed;
+ 	}
+ 
+@@ -1952,6 +1952,7 @@ static int cqspi_probe(struct platform_device *pdev)
+ 	return 0;
+ probe_setup_failed:
+ 	cqspi_controller_enable(cqspi, 0);
++	pm_runtime_disable(dev);
+ probe_reset_failed:
+ 	if (cqspi->is_jh7110)
+ 		cqspi_jh7110_disable_clk(pdev, cqspi);
+@@ -1970,7 +1971,8 @@ static void cqspi_remove(struct platform_device *pdev)
+ 	if (cqspi->rx_chan)
+ 		dma_release_channel(cqspi->rx_chan);
+ 
+-	clk_disable_unprepare(cqspi->clk);
++	if (pm_runtime_get_sync(&pdev->dev) >= 0)
++		clk_disable(cqspi->clk);
+ 
+ 	if (cqspi->is_jh7110)
+ 		cqspi_jh7110_disable_clk(pdev, cqspi);
+diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
+index 79bac30e79af64..21e357966d2a22 100644
+--- a/drivers/spi/spi-fsl-qspi.c
++++ b/drivers/spi/spi-fsl-qspi.c
+@@ -839,6 +839,19 @@ static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
+ 	.get_name = fsl_qspi_get_name,
+ };
+ 
++static void fsl_qspi_cleanup(void *data)
++{
++	struct fsl_qspi *q = data;
++
++	/* disable the hardware */
++	qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
++	qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
++
++	fsl_qspi_clk_disable_unprep(q);
++
++	mutex_destroy(&q->lock);
++}
++
+ static int fsl_qspi_probe(struct platform_device *pdev)
+ {
+ 	struct spi_controller *ctlr;
+@@ -928,15 +941,16 @@ static int fsl_qspi_probe(struct platform_device *pdev)
+ 
+ 	ctlr->dev.of_node = np;
+ 
++	ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q);
++	if (ret)
++		goto err_put_ctrl;
++
+ 	ret = devm_spi_register_controller(dev, ctlr);
+ 	if (ret)
+-		goto err_destroy_mutex;
++		goto err_put_ctrl;
+ 
+ 	return 0;
+ 
+-err_destroy_mutex:
+-	mutex_destroy(&q->lock);
+-
+ err_disable_clk:
+ 	fsl_qspi_clk_disable_unprep(q);
+ 
+@@ -947,19 +961,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
+ 	return ret;
+ }
+ 
+-static void fsl_qspi_remove(struct platform_device *pdev)
+-{
+-	struct fsl_qspi *q = platform_get_drvdata(pdev);
+-
+-	/* disable the hardware */
+-	qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+-	qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
+-
+-	fsl_qspi_clk_disable_unprep(q);
+-
+-	mutex_destroy(&q->lock);
+-}
+-
+ static int fsl_qspi_suspend(struct device *dev)
+ {
+ 	return 0;
+@@ -997,7 +998,6 @@ static struct platform_driver fsl_qspi_driver = {
+ 		.pm =   &fsl_qspi_pm_ops,
+ 	},
+ 	.probe          = fsl_qspi_probe,
+-	.remove_new	= fsl_qspi_remove,
+ };
+ module_platform_driver(fsl_qspi_driver);
+ 
+diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
+index 1e9eff01b1aa52..e9f382c280d9b0 100644
+--- a/drivers/staging/rtl8723bs/core/rtw_security.c
++++ b/drivers/staging/rtl8723bs/core/rtw_security.c
+@@ -868,29 +868,21 @@ static signed int aes_cipher(u8 *key, uint	hdrlen,
+ 		num_blocks, payload_index;
+ 
+ 	u8 pn_vector[6];
+-	u8 mic_iv[16];
+-	u8 mic_header1[16];
+-	u8 mic_header2[16];
+-	u8 ctr_preload[16];
++	u8 mic_iv[16] = {};
++	u8 mic_header1[16] = {};
++	u8 mic_header2[16] = {};
++	u8 ctr_preload[16] = {};
+ 
+ 	/* Intermediate Buffers */
+-	u8 chain_buffer[16];
+-	u8 aes_out[16];
+-	u8 padded_buffer[16];
++	u8 chain_buffer[16] = {};
++	u8 aes_out[16] = {};
++	u8 padded_buffer[16] = {};
+ 	u8 mic[8];
+ 	uint	frtype  = GetFrameType(pframe);
+ 	uint	frsubtype  = GetFrameSubType(pframe);
+ 
+ 	frsubtype = frsubtype>>4;
+ 
+-	memset((void *)mic_iv, 0, 16);
+-	memset((void *)mic_header1, 0, 16);
+-	memset((void *)mic_header2, 0, 16);
+-	memset((void *)ctr_preload, 0, 16);
+-	memset((void *)chain_buffer, 0, 16);
+-	memset((void *)aes_out, 0, 16);
+-	memset((void *)padded_buffer, 0, 16);
+-
+ 	if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen ==  WLAN_HDR_A3_QOS_LEN))
+ 		a4_exists = 0;
+ 	else
+@@ -1080,15 +1072,15 @@ static signed int aes_decipher(u8 *key, uint	hdrlen,
+ 			num_blocks, payload_index;
+ 	signed int res = _SUCCESS;
+ 	u8 pn_vector[6];
+-	u8 mic_iv[16];
+-	u8 mic_header1[16];
+-	u8 mic_header2[16];
+-	u8 ctr_preload[16];
++	u8 mic_iv[16] = {};
++	u8 mic_header1[16] = {};
++	u8 mic_header2[16] = {};
++	u8 ctr_preload[16] = {};
+ 
+ 		/* Intermediate Buffers */
+-	u8 chain_buffer[16];
+-	u8 aes_out[16];
+-	u8 padded_buffer[16];
++	u8 chain_buffer[16] = {};
++	u8 aes_out[16] = {};
++	u8 padded_buffer[16] = {};
+ 	u8 mic[8];
+ 
+ 	uint frtype  = GetFrameType(pframe);
+@@ -1096,14 +1088,6 @@ static signed int aes_decipher(u8 *key, uint	hdrlen,
+ 
+ 	frsubtype = frsubtype>>4;
+ 
+-	memset((void *)mic_iv, 0, 16);
+-	memset((void *)mic_header1, 0, 16);
+-	memset((void *)mic_header2, 0, 16);
+-	memset((void *)ctr_preload, 0, 16);
+-	memset((void *)chain_buffer, 0, 16);
+-	memset((void *)aes_out, 0, 16);
+-	memset((void *)padded_buffer, 0, 16);
+-
+ 	/* start to decrypt the payload */
+ 
+ 	num_blocks = (plen-8) / 16; /* plen including LLC, payload_length and mic) */
+diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
+index f462b3d1c104ce..d6b01e015a96b6 100644
+--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
++++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
+@@ -115,6 +115,7 @@
+ 
+ #define UART_RESET_REG				0x94
+ #define UART_RESET_D3_RESET_DISABLE		BIT(16)
++#define UART_RESET_HOT_RESET_DISABLE		BIT(17)
+ 
+ #define UART_BURST_STATUS_REG			0x9C
+ #define UART_TX_BURST_FIFO			0xA0
+@@ -620,6 +621,10 @@ static int pci1xxxx_suspend(struct device *dev)
+ 	}
+ 
+ 	data = readl(p + UART_RESET_REG);
++
++	if (priv->dev_rev >= 0xC0)
++		data |= UART_RESET_HOT_RESET_DISABLE;
++
+ 	writel(data | UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG);
+ 
+ 	if (wakeup)
+@@ -647,7 +652,12 @@ static int pci1xxxx_resume(struct device *dev)
+ 	}
+ 
+ 	data = readl(p + UART_RESET_REG);
++
++	if (priv->dev_rev >= 0xC0)
++		data &= ~UART_RESET_HOT_RESET_DISABLE;
++
+ 	writel(data & ~UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG);
++
+ 	iounmap(p);
+ 
+ 	for (i = 0; i < priv->nr; i++) {
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 8e3b15534bc72c..deb9635cb48dc2 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -233,6 +233,7 @@ struct imx_port {
+ 	enum imx_tx_state	tx_state;
+ 	struct hrtimer		trigger_start_tx;
+ 	struct hrtimer		trigger_stop_tx;
++	unsigned int		rxtl;
+ };
+ 
+ struct imx_port_ucrs {
+@@ -1328,6 +1329,7 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport)
+ 
+ #define TXTL_DEFAULT 8
+ #define RXTL_DEFAULT 8 /* 8 characters or aging timer */
++#define RXTL_CONSOLE_DEFAULT 1
+ #define TXTL_DMA 8 /* DMA burst setting */
+ #define RXTL_DMA 9 /* DMA burst setting */
+ 
+@@ -1445,7 +1447,7 @@ static void imx_uart_disable_dma(struct imx_port *sport)
+ 	ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN);
+ 	imx_uart_writel(sport, ucr1, UCR1);
+ 
+-	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
++	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
+ 
+ 	sport->dma_is_enabled = 0;
+ }
+@@ -1470,7 +1472,12 @@ static int imx_uart_startup(struct uart_port *port)
+ 		return retval;
+ 	}
+ 
+-	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
++	if (uart_console(&sport->port))
++		sport->rxtl = RXTL_CONSOLE_DEFAULT;
++	else
++		sport->rxtl = RXTL_DEFAULT;
++
++	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
+ 
+ 	/* disable the DREN bit (Data Ready interrupt enable) before
+ 	 * requesting IRQs
+@@ -1936,7 +1943,7 @@ static int imx_uart_poll_init(struct uart_port *port)
+ 	if (retval)
+ 		clk_disable_unprepare(sport->clk_ipg);
+ 
+-	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
++	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
+ 
+ 	uart_port_lock_irqsave(&sport->port, &flags);
+ 
+@@ -2028,7 +2035,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
+ 		/* If the receiver trigger is 0, set it to a default value */
+ 		ufcr = imx_uart_readl(sport, UFCR);
+ 		if ((ufcr & UFCR_RXTL_MASK) == 0)
+-			imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
++			imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
+ 		imx_uart_start_rx(port);
+ 	}
+ 
+@@ -2213,7 +2220,7 @@ imx_uart_console_setup(struct console *co, char *options)
+ 	else
+ 		imx_uart_console_get_options(sport, &baud, &parity, &bits);
+ 
+-	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
++	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
+ 
+ 	retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
+ 
+diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
+index 5d1677f1b651c2..cb3b127b06b613 100644
+--- a/drivers/tty/serial/serial_base_bus.c
++++ b/drivers/tty/serial/serial_base_bus.c
+@@ -72,6 +72,7 @@ static int serial_base_device_init(struct uart_port *port,
+ 	dev->parent = parent_dev;
+ 	dev->bus = &serial_base_bus_type;
+ 	dev->release = release;
++	device_set_of_node_from_dev(dev, parent_dev);
+ 
+ 	if (!serial_base_initialized) {
+ 		dev_dbg(port->dev, "uart_add_one_port() called before arch_initcall()?\n");
+diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
+index 68357ac8ffe3ca..71890f3244a0fe 100644
+--- a/drivers/tty/serial/uartlite.c
++++ b/drivers/tty/serial/uartlite.c
+@@ -880,16 +880,6 @@ static int ulite_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
+ 
+-	if (!ulite_uart_driver.state) {
+-		dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
+-		ret = uart_register_driver(&ulite_uart_driver);
+-		if (ret < 0) {
+-			dev_err(&pdev->dev, "Failed to register driver\n");
+-			clk_disable_unprepare(pdata->clk);
+-			return ret;
+-		}
+-	}
+-
+ 	ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
+ 
+ 	pm_runtime_mark_last_busy(&pdev->dev);
+@@ -929,16 +919,25 @@ static struct platform_driver ulite_platform_driver = {
+ 
+ static int __init ulite_init(void)
+ {
++	int ret;
++
++	pr_debug("uartlite: calling uart_register_driver()\n");
++	ret = uart_register_driver(&ulite_uart_driver);
++	if (ret)
++		return ret;
+ 
+ 	pr_debug("uartlite: calling platform_driver_register()\n");
+-	return platform_driver_register(&ulite_platform_driver);
++	ret = platform_driver_register(&ulite_platform_driver);
++	if (ret)
++		uart_unregister_driver(&ulite_uart_driver);
++
++	return ret;
+ }
+ 
+ static void __exit ulite_exit(void)
+ {
+ 	platform_driver_unregister(&ulite_platform_driver);
+-	if (ulite_uart_driver.state)
+-		uart_unregister_driver(&ulite_uart_driver);
++	uart_unregister_driver(&ulite_uart_driver);
+ }
+ 
+ module_init(ulite_init);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 374f505fec3d13..a6299cb19237c0 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -1392,6 +1392,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
+ 	 * make sure that there are no outstanding requests when
+ 	 * clock scaling is in progress
+ 	 */
++	mutex_lock(&hba->host->scan_mutex);
+ 	blk_mq_quiesce_tagset(&hba->host->tag_set);
+ 	mutex_lock(&hba->wb_mutex);
+ 	down_write(&hba->clk_scaling_lock);
+@@ -1402,6 +1403,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
+ 		up_write(&hba->clk_scaling_lock);
+ 		mutex_unlock(&hba->wb_mutex);
+ 		blk_mq_unquiesce_tagset(&hba->host->tag_set);
++		mutex_unlock(&hba->host->scan_mutex);
+ 		goto out;
+ 	}
+ 
+@@ -1423,6 +1425,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
+ 	mutex_unlock(&hba->wb_mutex);
+ 
+ 	blk_mq_unquiesce_tagset(&hba->host->tag_set);
++	mutex_unlock(&hba->host->scan_mutex);
+ 	ufshcd_release(hba);
+ }
+ 
+@@ -7740,7 +7743,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+ 	hba->silence_err_logs = false;
+ 
+ 	/* scale up clocks to max frequency before full reinitialization */
+-	ufshcd_scale_clks(hba, ULONG_MAX, true);
++	if (ufshcd_is_clkscaling_supported(hba))
++		ufshcd_scale_clks(hba, ULONG_MAX, true);
+ 
+ 	err = ufshcd_hba_enable(hba);
+ 
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 16e7fa4d488d37..ecd6d1f39e4984 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -92,7 +92,6 @@ struct wdm_device {
+ 	u16			wMaxCommand;
+ 	u16			wMaxPacketSize;
+ 	__le16			inum;
+-	int			reslength;
+ 	int			length;
+ 	int			read;
+ 	int			count;
+@@ -214,6 +213,11 @@ static void wdm_in_callback(struct urb *urb)
+ 	if (desc->rerr == 0 && status != -EPIPE)
+ 		desc->rerr = status;
+ 
++	if (length == 0) {
++		dev_dbg(&desc->intf->dev, "received ZLP\n");
++		goto skip_zlp;
++	}
++
+ 	if (length + desc->length > desc->wMaxCommand) {
+ 		/* The buffer would overflow */
+ 		set_bit(WDM_OVERFLOW, &desc->flags);
+@@ -222,18 +226,18 @@ static void wdm_in_callback(struct urb *urb)
+ 		if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
+ 			memmove(desc->ubuf + desc->length, desc->inbuf, length);
+ 			desc->length += length;
+-			desc->reslength = length;
+ 		}
+ 	}
+ skip_error:
+ 
+ 	if (desc->rerr) {
+ 		/*
+-		 * Since there was an error, userspace may decide to not read
+-		 * any data after poll'ing.
++		 * If there was a ZLP or an error, userspace may decide to not
++		 * read any data after poll'ing.
+ 		 * We should respond to further attempts from the device to send
+ 		 * data, so that we can get unstuck.
+ 		 */
++skip_zlp:
+ 		schedule_work(&desc->service_outs_intr);
+ 	} else {
+ 		set_bit(WDM_READ, &desc->flags);
+@@ -585,15 +589,6 @@ static ssize_t wdm_read
+ 			goto retry;
+ 		}
+ 
+-		if (!desc->reslength) { /* zero length read */
+-			dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n");
+-			clear_bit(WDM_READ, &desc->flags);
+-			rv = service_outstanding_interrupt(desc);
+-			spin_unlock_irq(&desc->iuspin);
+-			if (rv < 0)
+-				goto err;
+-			goto retry;
+-		}
+ 		cntr = desc->length;
+ 		spin_unlock_irq(&desc->iuspin);
+ 	}
+@@ -1016,7 +1011,7 @@ static void service_interrupt_work(struct work_struct *work)
+ 
+ 	spin_lock_irq(&desc->iuspin);
+ 	service_outstanding_interrupt(desc);
+-	if (!desc->resp_count) {
++	if (!desc->resp_count && (desc->length || desc->rerr)) {
+ 		set_bit(WDM_READ, &desc->flags);
+ 		wake_up(&desc->wait);
+ 	}
+diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
+index 501e8bc9738eba..1096a884c8d705 100644
+--- a/drivers/usb/common/usb-conn-gpio.c
++++ b/drivers/usb/common/usb-conn-gpio.c
+@@ -20,6 +20,9 @@
+ #include <linux/power_supply.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/usb/role.h>
++#include <linux/idr.h>
++
++static DEFINE_IDA(usb_conn_ida);
+ 
+ #define USB_GPIO_DEB_MS		20	/* ms */
+ #define USB_GPIO_DEB_US		((USB_GPIO_DEB_MS) * 1000)	/* us */
+@@ -29,6 +32,7 @@
+ 
+ struct usb_conn_info {
+ 	struct device *dev;
++	int conn_id; /* store the IDA-allocated ID */
+ 	struct usb_role_switch *role_sw;
+ 	enum usb_role last_role;
+ 	struct regulator *vbus;
+@@ -160,7 +164,17 @@ static int usb_conn_psy_register(struct usb_conn_info *info)
+ 		.of_node = dev->of_node,
+ 	};
+ 
+-	desc->name = "usb-charger";
++	info->conn_id = ida_alloc(&usb_conn_ida, GFP_KERNEL);
++	if (info->conn_id < 0)
++		return info->conn_id;
++
++	desc->name = devm_kasprintf(dev, GFP_KERNEL, "usb-charger-%d",
++				    info->conn_id);
++	if (!desc->name) {
++		ida_free(&usb_conn_ida, info->conn_id);
++		return -ENOMEM;
++	}
++
+ 	desc->properties = usb_charger_properties;
+ 	desc->num_properties = ARRAY_SIZE(usb_charger_properties);
+ 	desc->get_property = usb_charger_get_property;
+@@ -168,8 +182,10 @@ static int usb_conn_psy_register(struct usb_conn_info *info)
+ 	cfg.drv_data = info;
+ 
+ 	info->charger = devm_power_supply_register(dev, desc, &cfg);
+-	if (IS_ERR(info->charger))
+-		dev_err(dev, "Unable to register charger\n");
++	if (IS_ERR(info->charger)) {
++		dev_err(dev, "Unable to register charger %d\n", info->conn_id);
++		ida_free(&usb_conn_ida, info->conn_id);
++	}
+ 
+ 	return PTR_ERR_OR_ZERO(info->charger);
+ }
+@@ -277,6 +293,9 @@ static void usb_conn_remove(struct platform_device *pdev)
+ 
+ 	cancel_delayed_work_sync(&info->dw_det);
+ 
++	if (info->charger)
++		ida_free(&usb_conn_ida, info->conn_id);
++
+ 	if (info->last_role == USB_ROLE_HOST && info->vbus)
+ 		regulator_disable(info->vbus);
+ 
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 0b4685aad2d503..118fa4c93a7956 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -695,15 +695,16 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
+ 		device_set_of_node_from_dev(&dev->dev, bus->sysdev);
+ 		dev_set_name(&dev->dev, "usb%d", bus->busnum);
+ 	} else {
++		int n;
++
+ 		/* match any labeling on the hubs; it's one-based */
+ 		if (parent->devpath[0] == '0') {
+-			snprintf(dev->devpath, sizeof dev->devpath,
+-				"%d", port1);
++			n = snprintf(dev->devpath, sizeof(dev->devpath), "%d", port1);
+ 			/* Root ports are not counted in route string */
+ 			dev->route = 0;
+ 		} else {
+-			snprintf(dev->devpath, sizeof dev->devpath,
+-				"%s.%d", parent->devpath, port1);
++			n = snprintf(dev->devpath, sizeof(dev->devpath), "%s.%d",
++				     parent->devpath, port1);
+ 			/* Route string assumes hubs have less than 16 ports */
+ 			if (port1 < 15)
+ 				dev->route = parent->route +
+@@ -712,6 +713,11 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
+ 				dev->route = parent->route +
+ 					(15 << ((parent->level - 1)*4));
+ 		}
++		if (n >= sizeof(dev->devpath)) {
++			usb_put_hcd(bus_to_hcd(bus));
++			usb_put_dev(dev);
++			return NULL;
++		}
+ 
+ 		dev->dev.parent = &parent->dev;
+ 		dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index bd4c788f03bc14..d3d0d75ab1f594 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -4604,6 +4604,12 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
+ 	if (!hsotg)
+ 		return -ENODEV;
+ 
++	/* Exit clock gating when driver is stopped. */
++	if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
++	    hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
++		dwc2_gadget_exit_clock_gating(hsotg, 0);
++	}
++
+ 	/* all endpoints should be shutdown */
+ 	for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ 		if (hsotg->eps_in[ep])
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index c7a05f842745bc..d8bd2d82e9ec63 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -75,6 +75,7 @@ struct f_hidg {
+ 	/* recv report */
+ 	spinlock_t			read_spinlock;
+ 	wait_queue_head_t		read_queue;
++	bool				disabled;
+ 	/* recv report - interrupt out only (use_out_ep == 1) */
+ 	struct list_head		completed_out_req;
+ 	unsigned int			qlen;
+@@ -329,7 +330,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
+ 
+ 	spin_lock_irqsave(&hidg->read_spinlock, flags);
+ 
+-#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req))
++#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req) || hidg->disabled)
+ 
+ 	/* wait for at least one buffer to complete */
+ 	while (!READ_COND_INTOUT) {
+@@ -343,6 +344,11 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
+ 		spin_lock_irqsave(&hidg->read_spinlock, flags);
+ 	}
+ 
++	if (hidg->disabled) {
++		spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++		return -ESHUTDOWN;
++	}
++
+ 	/* pick the first one */
+ 	list = list_first_entry(&hidg->completed_out_req,
+ 				struct f_hidg_req_list, list);
+@@ -387,7 +393,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
+ 	return count;
+ }
+ 
+-#define READ_COND_SSREPORT (hidg->set_report_buf != NULL)
++#define READ_COND_SSREPORT (hidg->set_report_buf != NULL || hidg->disabled)
+ 
+ static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer,
+ 				    size_t count, loff_t *ptr)
+@@ -1012,6 +1018,11 @@ static void hidg_disable(struct usb_function *f)
+ 	}
+ 	spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ 
++	spin_lock_irqsave(&hidg->read_spinlock, flags);
++	hidg->disabled = true;
++	spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++	wake_up(&hidg->read_queue);
++
+ 	spin_lock_irqsave(&hidg->write_spinlock, flags);
+ 	if (!hidg->write_pending) {
+ 		free_ep_req(hidg->in_ep, hidg->req);
+@@ -1097,6 +1108,10 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ 		}
+ 	}
+ 
++	spin_lock_irqsave(&hidg->read_spinlock, flags);
++	hidg->disabled = false;
++	spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++
+ 	if (hidg->in_ep != NULL) {
+ 		spin_lock_irqsave(&hidg->write_spinlock, flags);
+ 		hidg->req = req_in;
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 7b23631f47449b..6ad205046032cd 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -1297,14 +1297,14 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
+ 	struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
+ 			tport_wwn);
+ 	struct usbg_tpg *tpg;
+-	unsigned long tpgt;
++	u16 tpgt;
+ 	int ret;
+ 	struct f_tcm_opts *opts;
+ 	unsigned i;
+ 
+ 	if (strstr(name, "tpgt_") != name)
+ 		return ERR_PTR(-EINVAL);
+-	if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
++	if (kstrtou16(name + 5, 0, &tpgt))
+ 		return ERR_PTR(-EINVAL);
+ 	ret = -ENODEV;
+ 	mutex_lock(&tpg_instances_lock);
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 92cc1b13612084..4976a7238b287d 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -393,6 +393,10 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
+ 		break;
+ 	case CMDT_RSP_NAK:
+ 		switch (cmd) {
++		case DP_CMD_STATUS_UPDATE:
++			if (typec_altmode_exit(alt))
++				dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
++			break;
+ 		case DP_CMD_CONFIGURE:
+ 			dp->data.conf = 0;
+ 			ret = dp_altmode_configured(dp);
+diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
+index 49926d6e72c71b..182c902c42f61c 100644
+--- a/drivers/usb/typec/mux.c
++++ b/drivers/usb/typec/mux.c
+@@ -214,7 +214,7 @@ int typec_switch_set(struct typec_switch *sw,
+ 		sw_dev = sw->sw_devs[i];
+ 
+ 		ret = sw_dev->set(sw_dev, orientation);
+-		if (ret)
++		if (ret && ret != -EOPNOTSUPP)
+ 			return ret;
+ 	}
+ 
+@@ -378,7 +378,7 @@ int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+ 		mux_dev = mux->mux_devs[i];
+ 
+ 		ret = mux_dev->set(mux_dev, state);
+-		if (ret)
++		if (ret && ret != -EOPNOTSUPP)
+ 			return ret;
+ 	}
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 1d8e760df483cc..9838a2c8c1b857 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -5566,8 +5566,7 @@ static void run_state_machine(struct tcpm_port *port)
+ 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
+ 						       port->pps_data.active, 0);
+ 		tcpm_set_charge(port, false);
+-		tcpm_set_state(port, hard_reset_state(port),
+-			       PD_T_PS_SOURCE_OFF);
++		tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF);
+ 		break;
+ 	case PR_SWAP_SNK_SRC_SOURCE_ON:
+ 		tcpm_enable_auto_vbus_discharge(port, true);
+diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
+index e8c22cccb5c132..7dfcc9351bce5f 100644
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -427,8 +427,8 @@ struct btrfs_backref_node *btrfs_backref_alloc_node(
+ struct btrfs_backref_edge *btrfs_backref_alloc_edge(
+ 		struct btrfs_backref_cache *cache);
+ 
+-#define		LINK_LOWER	(1 << 0)
+-#define		LINK_UPPER	(1 << 1)
++#define		LINK_LOWER	(1U << 0)
++#define		LINK_UPPER	(1U << 1)
+ 
+ void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
+ 			     struct btrfs_backref_node *lower,
+diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
+index bd38df5647e35b..71984d7db839b4 100644
+--- a/fs/btrfs/direct-io.c
++++ b/fs/btrfs/direct-io.c
+@@ -151,8 +151,8 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
+ 	}
+ 
+ 	ordered = btrfs_alloc_ordered_extent(inode, start, file_extent,
+-					     (1 << type) |
+-					     (1 << BTRFS_ORDERED_DIRECT));
++					     (1U << type) |
++					     (1U << BTRFS_ORDERED_DIRECT));
+ 	if (IS_ERR(ordered)) {
+ 		if (em) {
+ 			free_extent_map(em);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 147c50ef912acf..e655fa3bfd9be7 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2168,8 +2168,7 @@ static int load_global_roots_objectid(struct btrfs_root *tree_root,
+ 		found = true;
+ 		root = read_tree_root_path(tree_root, path, &key);
+ 		if (IS_ERR(root)) {
+-			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
+-				ret = PTR_ERR(root);
++			ret = PTR_ERR(root);
+ 			break;
+ 		}
+ 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+@@ -2786,6 +2785,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
+ 	btrfs_init_scrub(fs_info);
+ 	btrfs_init_balance(fs_info);
+ 	btrfs_init_async_reclaim_work(fs_info);
++	btrfs_init_extent_map_shrinker_work(fs_info);
+ 
+ 	rwlock_init(&fs_info->block_group_cache_lock);
+ 	fs_info->block_group_cache_tree = RB_ROOT_CACHED;
+@@ -4335,6 +4335,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ 	cancel_work_sync(&fs_info->async_reclaim_work);
+ 	cancel_work_sync(&fs_info->async_data_reclaim_work);
+ 	cancel_work_sync(&fs_info->preempt_reclaim_work);
++	cancel_work_sync(&fs_info->extent_map_shrinker_work);
+ 
+ 	/* Cancel or finish ongoing discard work */
+ 	btrfs_discard_cleanup(fs_info);
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index fcb60837d7dc62..039a73731135a7 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -79,7 +79,7 @@ enum {
+  *    single word in a bitmap may straddle two pages in the extent buffer.
+  */
+ #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
+-#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
++#define BYTE_MASK ((1U << BITS_PER_BYTE) - 1)
+ #define BITMAP_FIRST_BYTE_MASK(start) \
+ 	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
+ #define BITMAP_LAST_BYTE_MASK(nbits) \
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 1d93e1202c3394..36af9aa9aab13f 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -1128,11 +1128,14 @@ struct btrfs_em_shrink_ctx {
+ 
+ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx)
+ {
+-	const u64 cur_fs_gen = btrfs_get_fs_generation(inode->root->fs_info);
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
++	const u64 cur_fs_gen = btrfs_get_fs_generation(fs_info);
+ 	struct extent_map_tree *tree = &inode->extent_tree;
+ 	long nr_dropped = 0;
+ 	struct rb_node *node;
+ 
++	lockdep_assert_held_write(&tree->lock);
++
+ 	/*
+ 	 * Take the mmap lock so that we serialize with the inode logging phase
+ 	 * of fsync because we may need to set the full sync flag on the inode,
+@@ -1144,28 +1147,12 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
+ 	 * to find new extents, which may not be there yet because ordered
+ 	 * extents haven't completed yet.
+ 	 *
+-	 * We also do a try lock because otherwise we could deadlock. This is
+-	 * because the shrinker for this filesystem may be invoked while we are
+-	 * in a path that is holding the mmap lock in write mode. For example in
+-	 * a reflink operation while COWing an extent buffer, when allocating
+-	 * pages for a new extent buffer and under memory pressure, the shrinker
+-	 * may be invoked, and therefore we would deadlock by attempting to read
+-	 * lock the mmap lock while we are holding already a write lock on it.
++	 * We also do a try lock because we don't want to block for too long and
++	 * we are holding the extent map tree's lock in write mode.
+ 	 */
+ 	if (!down_read_trylock(&inode->i_mmap_lock))
+ 		return 0;
+ 
+-	/*
+-	 * We want to be fast so if the lock is busy we don't want to spend time
+-	 * waiting for it - either some task is about to do IO for the inode or
+-	 * we may have another task shrinking extent maps, here in this code, so
+-	 * skip this inode.
+-	 */
+-	if (!write_trylock(&tree->lock)) {
+-		up_read(&inode->i_mmap_lock);
+-		return 0;
+-	}
+-
+ 	node = rb_first(&tree->root);
+ 	while (node) {
+ 		struct rb_node *next = rb_next(node);
+@@ -1201,36 +1188,89 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
+ 		 * lock. This is to avoid slowing other tasks trying to take the
+ 		 * lock.
+ 		 */
+-		if (need_resched() || rwlock_needbreak(&tree->lock))
++		if (need_resched() || rwlock_needbreak(&tree->lock) ||
++		    btrfs_fs_closing(fs_info))
+ 			break;
+ 		node = next;
+ 	}
+-	write_unlock(&tree->lock);
+ 	up_read(&inode->i_mmap_lock);
+ 
+ 	return nr_dropped;
+ }
+ 
++static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root,
++						      u64 min_ino)
++{
++	struct btrfs_inode *inode;
++	unsigned long from = min_ino;
++
++	xa_lock(&root->inodes);
++	while (true) {
++		struct extent_map_tree *tree;
++
++		inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
++		if (!inode)
++			break;
++
++		tree = &inode->extent_tree;
++
++		/*
++		 * We want to be fast so if the lock is busy we don't want to
++		 * spend time waiting for it (some task is about to do IO for
++		 * the inode).
++		 */
++		if (!write_trylock(&tree->lock))
++			goto next;
++
++		/*
++		 * Skip inode if it doesn't have loaded extent maps, so we avoid
++		 * getting a reference and doing an iput later. This includes
++		 * cases like files that were opened for things like stat(2), or
++		 * files with all extent maps previously released through the
++		 * release folio callback (btrfs_release_folio()) or released in
++		 * a previous run, or directories which never have extent maps.
++		 */
++		if (RB_EMPTY_ROOT(&tree->root)) {
++			write_unlock(&tree->lock);
++			goto next;
++		}
++
++		if (igrab(&inode->vfs_inode))
++			break;
++
++		write_unlock(&tree->lock);
++next:
++		from = btrfs_ino(inode) + 1;
++		cond_resched_lock(&root->inodes.xa_lock);
++	}
++	xa_unlock(&root->inodes);
++
++	return inode;
++}
++
+ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
+ {
++	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct btrfs_inode *inode;
+ 	long nr_dropped = 0;
+ 	u64 min_ino = ctx->last_ino + 1;
+ 
+-	inode = btrfs_find_first_inode(root, min_ino);
++	inode = find_first_inode_to_shrink(root, min_ino);
+ 	while (inode) {
+ 		nr_dropped += btrfs_scan_inode(inode, ctx);
++		write_unlock(&inode->extent_tree.lock);
+ 
+ 		min_ino = btrfs_ino(inode) + 1;
+ 		ctx->last_ino = btrfs_ino(inode);
+-		btrfs_add_delayed_iput(inode);
++		iput(&inode->vfs_inode);
+ 
+-		if (ctx->scanned >= ctx->nr_to_scan)
++		if (ctx->scanned >= ctx->nr_to_scan ||
++		    btrfs_fs_closing(fs_info))
+ 			break;
+ 
+ 		cond_resched();
+ 
+-		inode = btrfs_find_first_inode(root, min_ino);
++		inode = find_first_inode_to_shrink(root, min_ino);
+ 	}
+ 
+ 	if (inode) {
+@@ -1254,16 +1294,19 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx
+ 	return nr_dropped;
+ }
+ 
+-long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
++static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
+ {
++	struct btrfs_fs_info *fs_info;
+ 	struct btrfs_em_shrink_ctx ctx;
+ 	u64 start_root_id;
+ 	u64 next_root_id;
+ 	bool cycled = false;
+ 	long nr_dropped = 0;
+ 
++	fs_info = container_of(work, struct btrfs_fs_info, extent_map_shrinker_work);
++
+ 	ctx.scanned = 0;
+-	ctx.nr_to_scan = nr_to_scan;
++	ctx.nr_to_scan = atomic64_read(&fs_info->extent_map_shrinker_nr_to_scan);
+ 
+ 	/*
+ 	 * In case we have multiple tasks running this shrinker, make the next
+@@ -1281,12 +1324,12 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
+ 	if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
+ 		s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
+ 
+-		trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan,
++		trace_btrfs_extent_map_shrinker_scan_enter(fs_info, ctx.nr_to_scan,
+ 							   nr, ctx.last_root,
+ 							   ctx.last_ino);
+ 	}
+ 
+-	while (ctx.scanned < ctx.nr_to_scan) {
++	while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) {
+ 		struct btrfs_root *root;
+ 		unsigned long count;
+ 
+@@ -1344,5 +1387,34 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
+ 							  ctx.last_ino);
+ 	}
+ 
+-	return nr_dropped;
++	atomic64_set(&fs_info->extent_map_shrinker_nr_to_scan, 0);
++}
++
++void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
++{
++	/*
++	 * Do nothing if the shrinker is already running. In case of high memory
++	 * pressure we can have a lot of tasks calling us and all passing the
++	 * same nr_to_scan value, but in reality we may need only to free
++	 * nr_to_scan extent maps (or less). In case we need to free more than
++	 * that, we will be called again by the fs shrinker, so no worries about
++	 * not doing enough work to reclaim memory from extent maps.
++	 * We can also be repeatedly called with the same nr_to_scan value
++	 * simply because the shrinker runs asynchronously and multiple calls
++	 * to this function are made before the shrinker does enough progress.
++	 *
++	 * That's why we set the atomic counter to nr_to_scan only if its
++	 * current value is zero, instead of incrementing the counter by
++	 * nr_to_scan.
++	 */
++	if (atomic64_cmpxchg(&fs_info->extent_map_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
++		return;
++
++	queue_work(system_unbound_wq, &fs_info->extent_map_shrinker_work);
++}
++
++void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
++{
++	atomic64_set(&fs_info->extent_map_shrinker_nr_to_scan, 0);
++	INIT_WORK(&fs_info->extent_map_shrinker_work, btrfs_extent_map_shrinker_worker);
+ }
+diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
+index 5154a8f1d26c94..cd123b266b6416 100644
+--- a/fs/btrfs/extent_map.h
++++ b/fs/btrfs/extent_map.h
+@@ -189,6 +189,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode,
+ int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
+ 				   struct extent_map *new_em,
+ 				   bool modified);
+-long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan);
++void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan);
++void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info);
+ 
+ #endif
+diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
+index bb822e425d7fa0..374843aca60d88 100644
+--- a/fs/btrfs/fs.h
++++ b/fs/btrfs/fs.h
+@@ -639,6 +639,8 @@ struct btrfs_fs_info {
+ 	spinlock_t extent_map_shrinker_lock;
+ 	u64 extent_map_shrinker_last_root;
+ 	u64 extent_map_shrinker_last_ino;
++	atomic64_t extent_map_shrinker_nr_to_scan;
++	struct work_struct extent_map_shrinker_work;
+ 
+ 	/* Protected by 'trans_lock'. */
+ 	struct list_head dirty_cowonly_roots;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1ab5b0c1b9b76a..921ec3802648b0 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1249,7 +1249,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
+ 	free_extent_map(em);
+ 
+ 	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+-					     1 << BTRFS_ORDERED_COMPRESSED);
++					     1U << BTRFS_ORDERED_COMPRESSED);
+ 	if (IS_ERR(ordered)) {
+ 		btrfs_drop_extent_map_range(inode, start, end, false);
+ 		ret = PTR_ERR(ordered);
+@@ -1408,6 +1408,17 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 
+ 	alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
+ 
++	/*
++	 * We're not doing compressed IO, don't unlock the first page (which
++	 * the caller expects to stay locked), don't clear any dirty bits and
++	 * don't set any writeback bits.
++	 *
++	 * Do set the Ordered (Private2) bit so we know this page was properly
++	 * setup for writepage.
++	 */
++	page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
++	page_ops |= PAGE_SET_ORDERED;
++
+ 	/*
+ 	 * Relocation relies on the relocated extents to have exactly the same
+ 	 * size as the original extents. Normally writeback for relocation data
+@@ -1452,8 +1463,13 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 				continue;
+ 			}
+ 			if (done_offset) {
+-				*done_offset = start - 1;
+-				return 0;
++				/*
++				 * Move @end to the end of the processed range,
++				 * and exit the loop to unlock the processed extents.
++				 */
++				end = start - 1;
++				ret = 0;
++				break;
+ 			}
+ 			ret = -ENOSPC;
+ 		}
+@@ -1470,6 +1486,10 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 		file_extent.offset = 0;
+ 		file_extent.compression = BTRFS_COMPRESS_NONE;
+ 
++		/*
++		 * Locked range will be released either during error clean up or
++		 * after the whole range is finished.
++		 */
+ 		lock_extent(&inode->io_tree, start, start + ram_size - 1,
+ 			    &cached);
+ 
+@@ -1484,7 +1504,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 		free_extent_map(em);
+ 
+ 		ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+-						     1 << BTRFS_ORDERED_REGULAR);
++						     1U << BTRFS_ORDERED_REGULAR);
+ 		if (IS_ERR(ordered)) {
+ 			unlock_extent(&inode->io_tree, start,
+ 				      start + ram_size - 1, &cached);
+@@ -1515,27 +1535,12 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 
+ 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ 
+-		/*
+-		 * We're not doing compressed IO, don't unlock the first page
+-		 * (which the caller expects to stay locked), don't clear any
+-		 * dirty bits and don't set any writeback bits
+-		 *
+-		 * Do set the Ordered (Private2) bit so we know this page was
+-		 * properly setup for writepage.
+-		 */
+-		page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
+-		page_ops |= PAGE_SET_ORDERED;
+-
+-		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
+-					     locked_folio, &cached,
+-					     EXTENT_LOCKED | EXTENT_DELALLOC,
+-					     page_ops);
+-		if (num_bytes < cur_alloc_size)
++		if (num_bytes < ram_size)
+ 			num_bytes = 0;
+ 		else
+-			num_bytes -= cur_alloc_size;
++			num_bytes -= ram_size;
+ 		alloc_hint = ins.objectid + ins.offset;
+-		start += cur_alloc_size;
++		start += ram_size;
+ 		extent_reserved = false;
+ 
+ 		/*
+@@ -1546,6 +1551,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 		if (ret)
+ 			goto out_unlock;
+ 	}
++	extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
++				     EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
+ done:
+ 	if (done_offset)
+ 		*done_offset = end;
+@@ -1561,40 +1568,35 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 	 * Now, we have three regions to clean up:
+ 	 *
+ 	 * |-------(1)----|---(2)---|-------------(3)----------|
+-	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
++	 * `- orig_start  `- start  `- start + ram_size  `- end
+ 	 *
+ 	 * We process each region below.
+ 	 */
+ 
+-	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+-		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
+-	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
+-
+ 	/*
+ 	 * For the range (1). We have already instantiated the ordered extents
+ 	 * for this region. They are cleaned up by
+ 	 * btrfs_cleanup_ordered_extents() in e.g,
+-	 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
+-	 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
+-	 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
+-	 * function.
++	 * btrfs_run_delalloc_range().
++	 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
++	 * are also handled by the cleanup function.
+ 	 *
+-	 * However, in case of @keep_locked, we still need to unlock the pages
+-	 * (except @locked_folio) to ensure all the pages are unlocked.
++	 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
++	 * finish the writeback of the involved folios, which will be never submitted.
+ 	 */
+-	if (keep_locked && orig_start < start) {
++	if (orig_start < start) {
++		clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
++		page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
++
+ 		if (!locked_folio)
+ 			mapping_set_error(inode->vfs_inode.i_mapping, ret);
+ 		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
+-					     locked_folio, NULL, 0, page_ops);
++					     locked_folio, NULL, clear_bits, page_ops);
+ 	}
+ 
+-	/*
+-	 * At this point we're unlocked, we want to make sure we're only
+-	 * clearing these flags under the extent lock, so lock the rest of the
+-	 * range and clear everything up.
+-	 */
+-	lock_extent(&inode->io_tree, start, end, NULL);
++	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
++		     EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
++	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
+ 
+ 	/*
+ 	 * For the range (2). If we reserved an extent for our delalloc range
+@@ -1608,11 +1610,11 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 	 */
+ 	if (extent_reserved) {
+ 		extent_clear_unlock_delalloc(inode, start,
+-					     start + cur_alloc_size - 1,
++					     start + ram_size - 1,
+ 					     locked_folio, &cached, clear_bits,
+ 					     page_ops);
+-		btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
+-		start += cur_alloc_size;
++		btrfs_qgroup_free_data(inode, NULL, start, ram_size, NULL);
++		start += ram_size;
+ 	}
+ 
+ 	/*
+@@ -2055,6 +2057,63 @@ static void cleanup_dirty_folios(struct btrfs_inode *inode,
+ 	mapping_set_error(mapping, error);
+ }
+ 
++static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
++			   struct extent_state **cached,
++			   struct can_nocow_file_extent_args *nocow_args,
++			   u64 file_pos, bool is_prealloc)
++{
++	struct btrfs_ordered_extent *ordered;
++	u64 len = nocow_args->file_extent.num_bytes;
++	u64 end = file_pos + len - 1;
++	int ret = 0;
++
++	lock_extent(&inode->io_tree, file_pos, end, cached);
++
++	if (is_prealloc) {
++		struct extent_map *em;
++
++		em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
++					BTRFS_ORDERED_PREALLOC);
++		if (IS_ERR(em)) {
++			unlock_extent(&inode->io_tree, file_pos, end, cached);
++			return PTR_ERR(em);
++		}
++		free_extent_map(em);
++	}
++
++	ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
++					     is_prealloc
++					     ? (1U << BTRFS_ORDERED_PREALLOC)
++					     : (1U << BTRFS_ORDERED_NOCOW));
++	if (IS_ERR(ordered)) {
++		if (is_prealloc)
++			btrfs_drop_extent_map_range(inode, file_pos, end, false);
++		unlock_extent(&inode->io_tree, file_pos, end, cached);
++		return PTR_ERR(ordered);
++	}
++
++	if (btrfs_is_data_reloc_root(inode->root))
++		/*
++		 * Errors are handled later, as we must prevent
++		 * extent_clear_unlock_delalloc() in error handler from freeing
++		 * metadata of the created ordered extent.
++		 */
++		ret = btrfs_reloc_clone_csums(ordered);
++	btrfs_put_ordered_extent(ordered);
++
++	extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
++				     EXTENT_LOCKED | EXTENT_DELALLOC |
++				     EXTENT_CLEAR_DATA_RESV,
++				     PAGE_UNLOCK | PAGE_SET_ORDERED);
++
++	/*
++	 * btrfs_reloc_clone_csums() error, now we're OK to call error handler,
++	 * as metadata for created ordered extent will only be freed by
++	 * btrfs_finish_ordered_io().
++	 */
++	return ret;
++}
++
+ /*
+  * when nowcow writeback call back.  This checks for snapshots or COW copies
+  * of the extents that exist in the file, and COWs the file as required.
+@@ -2099,15 +2158,12 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 
+ 	while (cur_offset <= end) {
+ 		struct btrfs_block_group *nocow_bg = NULL;
+-		struct btrfs_ordered_extent *ordered;
+ 		struct btrfs_key found_key;
+ 		struct btrfs_file_extent_item *fi;
+ 		struct extent_buffer *leaf;
+ 		struct extent_state *cached_state = NULL;
+ 		u64 extent_end;
+-		u64 nocow_end;
+ 		int extent_type;
+-		bool is_prealloc;
+ 
+ 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
+ 					       cur_offset, 0);
+@@ -2242,67 +2298,13 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 			}
+ 		}
+ 
+-		nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
+-		lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state);
+-
+-		is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
+-		if (is_prealloc) {
+-			struct extent_map *em;
+-
+-			em = btrfs_create_io_em(inode, cur_offset,
+-						&nocow_args.file_extent,
+-						BTRFS_ORDERED_PREALLOC);
+-			if (IS_ERR(em)) {
+-				unlock_extent(&inode->io_tree, cur_offset,
+-					      nocow_end, &cached_state);
+-				btrfs_dec_nocow_writers(nocow_bg);
+-				ret = PTR_ERR(em);
+-				goto error;
+-			}
+-			free_extent_map(em);
+-		}
+-
+-		ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
+-				&nocow_args.file_extent,
+-				is_prealloc
+-				? (1 << BTRFS_ORDERED_PREALLOC)
+-				: (1 << BTRFS_ORDERED_NOCOW));
++		ret = nocow_one_range(inode, locked_folio, &cached_state,
++				      &nocow_args, cur_offset,
++				      extent_type == BTRFS_FILE_EXTENT_PREALLOC);
+ 		btrfs_dec_nocow_writers(nocow_bg);
+-		if (IS_ERR(ordered)) {
+-			if (is_prealloc) {
+-				btrfs_drop_extent_map_range(inode, cur_offset,
+-							    nocow_end, false);
+-			}
+-			unlock_extent(&inode->io_tree, cur_offset,
+-				      nocow_end, &cached_state);
+-			ret = PTR_ERR(ordered);
++		if (ret < 0)
+ 			goto error;
+-		}
+-
+-		if (btrfs_is_data_reloc_root(root))
+-			/*
+-			 * Error handled later, as we must prevent
+-			 * extent_clear_unlock_delalloc() in error handler
+-			 * from freeing metadata of created ordered extent.
+-			 */
+-			ret = btrfs_reloc_clone_csums(ordered);
+-		btrfs_put_ordered_extent(ordered);
+-
+-		extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
+-					     locked_folio, &cached_state,
+-					     EXTENT_LOCKED | EXTENT_DELALLOC |
+-					     EXTENT_CLEAR_DATA_RESV,
+-					     PAGE_UNLOCK | PAGE_SET_ORDERED);
+-
+ 		cur_offset = extent_end;
+-
+-		/*
+-		 * btrfs_reloc_clone_csums() error, now we're OK to call error
+-		 * handler, as metadata for created ordered extent will only
+-		 * be freed by btrfs_finish_ordered_io().
+-		 */
+-		if (ret)
+-			goto error;
+ 	}
+ 	btrfs_release_path(path);
+ 
+@@ -7997,6 +7999,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 	int ret;
+ 	int ret2;
+ 	bool need_abort = false;
++	bool logs_pinned = false;
+ 	struct fscrypt_name old_fname, new_fname;
+ 	struct fscrypt_str *old_name, *new_name;
+ 
+@@ -8120,6 +8123,31 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 	inode_inc_iversion(new_inode);
+ 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
+ 
++	if (old_ino != BTRFS_FIRST_FREE_OBJECTID &&
++	    new_ino != BTRFS_FIRST_FREE_OBJECTID) {
++		/*
++		 * If we are renaming in the same directory (and it's not for
++		 * root entries) pin the log early to prevent any concurrent
++		 * task from logging the directory after we removed the old
++		 * entries and before we add the new entries, otherwise that
++		 * task can sync a log without any entry for the inodes we are
++		 * renaming and therefore replaying that log, if a power failure
++		 * happens after syncing the log, would result in deleting the
++		 * inodes.
++		 *
++		 * If the rename affects two different directories, we want to
++		 * make sure the that there's no log commit that contains
++		 * updates for only one of the directories but not for the
++		 * other.
++		 *
++		 * If we are renaming an entry for a root, we don't care about
++		 * log updates since we called btrfs_set_log_full_commit().
++		 */
++		btrfs_pin_log_trans(root);
++		btrfs_pin_log_trans(dest);
++		logs_pinned = true;
++	}
++
+ 	if (old_dentry->d_parent != new_dentry->d_parent) {
+ 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
+ 					BTRFS_I(old_inode), true);
+@@ -8177,30 +8205,23 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 		BTRFS_I(new_inode)->dir_index = new_idx;
+ 
+ 	/*
+-	 * Now pin the logs of the roots. We do it to ensure that no other task
+-	 * can sync the logs while we are in progress with the rename, because
+-	 * that could result in an inconsistency in case any of the inodes that
+-	 * are part of this rename operation were logged before.
++	 * Do the log updates for all inodes.
++	 *
++	 * If either entry is for a root we don't need to update the logs since
++	 * we've called btrfs_set_log_full_commit() before.
+ 	 */
+-	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
+-		btrfs_pin_log_trans(root);
+-	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
+-		btrfs_pin_log_trans(dest);
+-
+-	/* Do the log updates for all inodes. */
+-	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
++	if (logs_pinned) {
+ 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
+ 				   old_rename_ctx.index, new_dentry->d_parent);
+-	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
+ 		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
+ 				   new_rename_ctx.index, old_dentry->d_parent);
++	}
+ 
+-	/* Now unpin the logs. */
+-	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
++out_fail:
++	if (logs_pinned) {
+ 		btrfs_end_log_trans(root);
+-	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
+ 		btrfs_end_log_trans(dest);
+-out_fail:
++	}
+ 	ret2 = btrfs_end_transaction(trans);
+ 	ret = ret ? ret : ret2;
+ out_notrans:
+@@ -8250,6 +8271,7 @@ static int btrfs_rename(struct mnt_idmap *idmap,
+ 	int ret2;
+ 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
+ 	struct fscrypt_name old_fname, new_fname;
++	bool logs_pinned = false;
+ 
+ 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+ 		return -EPERM;
+@@ -8384,6 +8406,29 @@ static int btrfs_rename(struct mnt_idmap *idmap,
+ 	inode_inc_iversion(old_inode);
+ 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
+ 
++	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
++		/*
++		 * If we are renaming in the same directory (and it's not a
++		 * root entry) pin the log to prevent any concurrent task from
++		 * logging the directory after we removed the old entry and
++		 * before we add the new entry, otherwise that task can sync
++		 * a log without any entry for the inode we are renaming and
++		 * therefore replaying that log, if a power failure happens
++		 * after syncing the log, would result in deleting the inode.
++		 *
++		 * If the rename affects two different directories, we want to
++		 * make sure the that there's no log commit that contains
++		 * updates for only one of the directories but not for the
++		 * other.
++		 *
++		 * If we are renaming an entry for a root, we don't care about
++		 * log updates since we called btrfs_set_log_full_commit().
++		 */
++		btrfs_pin_log_trans(root);
++		btrfs_pin_log_trans(dest);
++		logs_pinned = true;
++	}
++
+ 	if (old_dentry->d_parent != new_dentry->d_parent)
+ 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
+ 					BTRFS_I(old_inode), true);
+@@ -8432,7 +8477,7 @@ static int btrfs_rename(struct mnt_idmap *idmap,
+ 	if (old_inode->i_nlink == 1)
+ 		BTRFS_I(old_inode)->dir_index = index;
+ 
+-	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
++	if (logs_pinned)
+ 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
+ 				   rename_ctx.index, new_dentry->d_parent);
+ 
+@@ -8448,6 +8493,10 @@ static int btrfs_rename(struct mnt_idmap *idmap,
+ 		}
+ 	}
+ out_fail:
++	if (logs_pinned) {
++		btrfs_end_log_trans(root);
++		btrfs_end_log_trans(dest);
++	}
+ 	ret2 = btrfs_end_transaction(trans);
+ 	ret = ret ? ret : ret2;
+ out_notrans:
+@@ -9683,8 +9732,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ 	free_extent_map(em);
+ 
+ 	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+-				       (1 << BTRFS_ORDERED_ENCODED) |
+-				       (1 << BTRFS_ORDERED_COMPRESSED));
++				       (1U << BTRFS_ORDERED_ENCODED) |
++				       (1U << BTRFS_ORDERED_COMPRESSED));
+ 	if (IS_ERR(ordered)) {
+ 		btrfs_drop_extent_map_range(inode, start, end, false);
+ 		ret = PTR_ERR(ordered);
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 4ed11b089ea95a..880f9553d79d3e 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -153,9 +153,10 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ 	struct btrfs_ordered_extent *entry;
+ 	int ret;
+ 	u64 qgroup_rsv = 0;
++	const bool is_nocow = (flags &
++	       ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)));
+ 
+-	if (flags &
+-	    ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
++	if (is_nocow) {
+ 		/* For nocow write, we can release the qgroup rsv right now */
+ 		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
+ 		if (ret < 0)
+@@ -170,8 +171,13 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ 			return ERR_PTR(ret);
+ 	}
+ 	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
+-	if (!entry)
++	if (!entry) {
++		if (!is_nocow)
++			btrfs_qgroup_free_refroot(inode->root->fs_info,
++						  btrfs_root_id(inode->root),
++						  qgroup_rsv, BTRFS_QGROUP_RSV_DATA);
+ 		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	entry->file_offset = file_offset;
+ 	entry->num_bytes = num_bytes;
+@@ -253,7 +259,7 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
+  * @disk_bytenr:     Offset of extent on disk.
+  * @disk_num_bytes:  Size of extent on disk.
+  * @offset:          Offset into unencoded data where file data starts.
+- * @flags:           Flags specifying type of extent (1 << BTRFS_ORDERED_*).
++ * @flags:           Flags specifying type of extent (1U << BTRFS_ORDERED_*).
+  * @compress_type:   Compression algorithm used for data.
+  *
+  * Most of these parameters correspond to &struct btrfs_file_extent_item. The
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
+index 39bec672df0cc0..8afadf994b8c81 100644
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -200,8 +200,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
+ 	struct btrfs_stripe_hash_table *x;
+ 	struct btrfs_stripe_hash *cur;
+ 	struct btrfs_stripe_hash *h;
+-	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
+-	int i;
++	unsigned int num_entries = 1U << BTRFS_STRIPE_HASH_TABLE_BITS;
+ 
+ 	if (info->stripe_hash_table)
+ 		return 0;
+@@ -222,7 +221,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
+ 
+ 	h = table->table;
+ 
+-	for (i = 0; i < num_entries; i++) {
++	for (unsigned int i = 0; i < num_entries; i++) {
+ 		cur = h + i;
+ 		INIT_LIST_HEAD(&cur->hash_list);
+ 		spin_lock_init(&cur->lock);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index bcb8def4ade203..6119a06b056938 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -28,7 +28,6 @@
+ #include <linux/btrfs.h>
+ #include <linux/security.h>
+ #include <linux/fs_parser.h>
+-#include <linux/swap.h>
+ #include "messages.h"
+ #include "delayed-inode.h"
+ #include "ctree.h"
+@@ -2399,16 +2398,10 @@ static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_cont
+ 	const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan);
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ 
+-	/*
+-	 * We may be called from any task trying to allocate memory and we don't
+-	 * want to slow it down with scanning and dropping extent maps. It would
+-	 * also cause heavy lock contention if many tasks concurrently enter
+-	 * here. Therefore only allow kswapd tasks to scan and drop extent maps.
+-	 */
+-	if (!current_is_kswapd())
+-		return 0;
++	btrfs_free_extent_maps(fs_info, nr_to_scan);
+ 
+-	return btrfs_free_extent_maps(fs_info, nr_to_scan);
++	/* The extent map shrinker runs asynchronously, so always return 0. */
++	return 0;
+ }
+ 
+ static const struct super_operations btrfs_super_ops = {
+diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
+index 0a2dbfaaf49e29..de226209220fe6 100644
+--- a/fs/btrfs/tests/extent-io-tests.c
++++ b/fs/btrfs/tests/extent-io-tests.c
+@@ -14,9 +14,9 @@
+ #include "../disk-io.h"
+ #include "../btrfs_inode.h"
+ 
+-#define PROCESS_UNLOCK		(1 << 0)
+-#define PROCESS_RELEASE		(1 << 1)
+-#define PROCESS_TEST_LOCKED	(1 << 2)
++#define PROCESS_UNLOCK		(1U << 0)
++#define PROCESS_RELEASE		(1U << 1)
++#define PROCESS_TEST_LOCKED	(1U << 2)
+ 
+ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
+ 				       unsigned long flags)
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 8e65018600010b..58e0cac5779dd5 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -3268,6 +3268,12 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
+ 					device->bytes_used - dev_extent_len);
+ 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
+ 			btrfs_clear_space_info_full(fs_info);
++
++			if (list_empty(&device->post_commit_list)) {
++				list_add_tail(&device->post_commit_list,
++					      &trans->transaction->dev_update_list);
++			}
++
+ 			mutex_unlock(&fs_info->chunk_mutex);
+ 		}
+ 	}
+diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
+index 866607fd3e5884..c9ea37fabf6591 100644
+--- a/fs/btrfs/zstd.c
++++ b/fs/btrfs/zstd.c
+@@ -24,7 +24,7 @@
+ #include "super.h"
+ 
+ #define ZSTD_BTRFS_MAX_WINDOWLOG 17
+-#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
++#define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG)
+ #define ZSTD_BTRFS_DEFAULT_LEVEL 3
+ #define ZSTD_BTRFS_MAX_LEVEL 15
+ /* 307s to avoid pathologically clashing with transaction commit */
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 851d70200c6b8f..a7254cab44cc2e 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -2616,7 +2616,7 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
+ 	s32 stripe_unit = ci->i_layout.stripe_unit;
+ 	s32 stripe_count = ci->i_layout.stripe_count;
+ 	s32 object_size = ci->i_layout.object_size;
+-	u64 object_set_size = object_size * stripe_count;
++	u64 object_set_size = (u64) object_size * stripe_count;
+ 	u64 nearly, t;
+ 
+ 	/* round offset up to next period boundary */
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 71ddecaf771f81..02f438cd6bfaf6 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -35,6 +35,17 @@
+ #include <trace/events/f2fs.h>
+ #include <uapi/linux/f2fs.h>
+ 
++static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size)
++{
++	loff_t old_size = i_size_read(inode);
++
++	if (old_size >= new_size)
++		return;
++
++	/* zero or drop pages only in range of [old_size, new_size] */
++	truncate_pagecache(inode, old_size);
++}
++
+ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
+ {
+ 	struct inode *inode = file_inode(vmf->vma->vm_file);
+@@ -103,8 +114,13 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
+ 
+ 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
+ 
++	filemap_invalidate_lock(inode->i_mapping);
++	f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT);
++	filemap_invalidate_unlock(inode->i_mapping);
++
+ 	file_update_time(vmf->vma->vm_file);
+ 	filemap_invalidate_lock_shared(inode->i_mapping);
++
+ 	folio_lock(folio);
+ 	if (unlikely(folio->mapping != inode->i_mapping ||
+ 			folio_pos(folio) > i_size_read(inode) ||
+@@ -1064,6 +1080,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 		f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
+ 		filemap_invalidate_lock(inode->i_mapping);
+ 
++		if (attr->ia_size > old_size)
++			f2fs_zero_post_eof_page(inode, attr->ia_size);
+ 		truncate_setsize(inode, attr->ia_size);
+ 
+ 		if (attr->ia_size <= old_size)
+@@ -1182,6 +1200,10 @@ static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+ 	if (ret)
+ 		return ret;
+ 
++	filemap_invalidate_lock(inode->i_mapping);
++	f2fs_zero_post_eof_page(inode, offset + len);
++	filemap_invalidate_unlock(inode->i_mapping);
++
+ 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
+ 
+@@ -1465,6 +1487,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
+ 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 
++	f2fs_zero_post_eof_page(inode, offset + len);
++
+ 	f2fs_lock_op(sbi);
+ 	f2fs_drop_extent_tree(inode);
+ 	truncate_pagecache(inode, offset);
+@@ -1586,6 +1610,10 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
+ 	if (ret)
+ 		return ret;
+ 
++	filemap_invalidate_lock(mapping);
++	f2fs_zero_post_eof_page(inode, offset + len);
++	filemap_invalidate_unlock(mapping);
++
+ 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
+ 
+@@ -1717,6 +1745,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ 	/* avoid gc operation during block exchange */
+ 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 	filemap_invalidate_lock(mapping);
++
++	f2fs_zero_post_eof_page(inode, offset + len);
+ 	truncate_pagecache(inode, offset);
+ 
+ 	while (!ret && idx > pg_start) {
+@@ -1774,6 +1804,10 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
+ 	if (err)
+ 		return err;
+ 
++	filemap_invalidate_lock(inode->i_mapping);
++	f2fs_zero_post_eof_page(inode, offset + len);
++	filemap_invalidate_unlock(inode->i_mapping);
++
+ 	f2fs_balance_fs(sbi, true);
+ 
+ 	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
+@@ -4715,6 +4749,10 @@ static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+ 	err = file_modified(file);
+ 	if (err)
+ 		return err;
++
++	filemap_invalidate_lock(inode->i_mapping);
++	f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from));
++	filemap_invalidate_unlock(inode->i_mapping);
+ 	return count;
+ }
+ 
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 330f89ddb5c8f3..f0e83ea56e38c4 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1787,26 +1787,32 @@ static int f2fs_statfs_project(struct super_block *sb,
+ 
+ 	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
+ 					dquot->dq_dqb.dqb_bhardlimit);
+-	if (limit)
+-		limit >>= sb->s_blocksize_bits;
++	limit >>= sb->s_blocksize_bits;
++
++	if (limit) {
++		uint64_t remaining = 0;
+ 
+-	if (limit && buf->f_blocks > limit) {
+ 		curblock = (dquot->dq_dqb.dqb_curspace +
+ 			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
+-		buf->f_blocks = limit;
+-		buf->f_bfree = buf->f_bavail =
+-			(buf->f_blocks > curblock) ?
+-			 (buf->f_blocks - curblock) : 0;
++		if (limit > curblock)
++			remaining = limit - curblock;
++
++		buf->f_blocks = min(buf->f_blocks, limit);
++		buf->f_bfree = min(buf->f_bfree, remaining);
++		buf->f_bavail = min(buf->f_bavail, remaining);
+ 	}
+ 
+ 	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
+ 					dquot->dq_dqb.dqb_ihardlimit);
+ 
+-	if (limit && buf->f_files > limit) {
+-		buf->f_files = limit;
+-		buf->f_ffree =
+-			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
+-			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
++	if (limit) {
++		uint64_t remaining = 0;
++
++		if (limit > dquot->dq_dqb.dqb_curinodes)
++			remaining = limit - dquot->dq_dqb.dqb_curinodes;
++
++		buf->f_files = min(buf->f_files, limit);
++		buf->f_ffree = min(buf->f_ffree, remaining);
+ 	}
+ 
+ 	spin_unlock(&dquot->dq_dqb_lock);
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index ff543dc09130e1..ce7324d0d9ed1d 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1921,6 +1921,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 	int err;
+ 	bool trust_local_cmtime = is_wb;
+ 	bool fault_blocked = false;
++	u64 attr_version;
+ 
+ 	if (!fc->default_permissions)
+ 		attr->ia_valid |= ATTR_FORCE;
+@@ -2005,6 +2006,8 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 		if (fc->handle_killpriv_v2 && !capable(CAP_FSETID))
+ 			inarg.valid |= FATTR_KILL_SUIDGID;
+ 	}
++
++	attr_version = fuse_get_attr_version(fm->fc);
+ 	fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
+ 	err = fuse_simple_request(fm, &args);
+ 	if (err) {
+@@ -2030,6 +2033,14 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 		/* FIXME: clear I_DIRTY_SYNC? */
+ 	}
+ 
++	if (fi->attr_version > attr_version) {
++		/*
++		 * Apply attributes, for example for fsnotify_change(), but set
++		 * attribute timeout to zero.
++		 */
++		outarg.attr_valid = outarg.attr_valid_nsec = 0;
++	}
++
+ 	fuse_change_attributes_common(inode, &outarg.attr, NULL,
+ 				      ATTR_TIMEOUT(&outarg),
+ 				      fuse_get_cache_mask(inode));
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 0e1019382cf519..35e063c9f3a42e 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -178,45 +178,30 @@ int dbMount(struct inode *ipbmap)
+ 	dbmp_le = (struct dbmap_disk *) mp->data;
+ 	bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
+ 	bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+-
+ 	bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+-	if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
+-		bmp->db_l2nbperpage < 0) {
+-		err = -EINVAL;
+-		goto err_release_metapage;
+-	}
+-
+ 	bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+-	if (!bmp->db_numag || bmp->db_numag > MAXAG) {
+-		err = -EINVAL;
+-		goto err_release_metapage;
+-	}
+-
+ 	bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+ 	bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
+ 	bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
+-	if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
+-		bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
+-		err = -EINVAL;
+-		goto err_release_metapage;
+-	}
+-
+ 	bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+ 	bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
+ 	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+-	if (!bmp->db_agwidth) {
+-		err = -EINVAL;
+-		goto err_release_metapage;
+-	}
+ 	bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+ 	bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
+-	if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
+-	    bmp->db_agl2size < 0) {
+-		err = -EINVAL;
+-		goto err_release_metapage;
+-	}
+ 
+-	if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
++	if ((bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) ||
++	    (bmp->db_l2nbperpage < 0) ||
++	    !bmp->db_numag || (bmp->db_numag > MAXAG) ||
++	    (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) ||
++	    (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) ||
++	    (bmp->db_agheight < 0) || (bmp->db_agheight > (L2LPERCTL >> 1)) ||
++	    (bmp->db_agwidth < 1) || (bmp->db_agwidth > (LPERCTL / MAXAG)) ||
++	    (bmp->db_agwidth > (1 << (L2LPERCTL - (bmp->db_agheight << 1)))) ||
++	    (bmp->db_agstart < 0) ||
++	    (bmp->db_agstart > (CTLTREESIZE - 1 - bmp->db_agwidth * (MAXAG - 1))) ||
++	    (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) ||
++	    (bmp->db_agl2size < 0) ||
++	    ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
+ 		err = -EINVAL;
+ 		goto err_release_metapage;
+ 	}
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 843bc6191f30b4..b5c5cf01d0c40a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2521,14 +2521,14 @@ static int attach_recursive_mnt(struct mount *source_mnt,
+ 	hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
+ 		struct mount *q;
+ 		hlist_del_init(&child->mnt_hash);
+-		q = __lookup_mnt(&child->mnt_parent->mnt,
+-				 child->mnt_mountpoint);
+-		if (q)
+-			mnt_change_mountpoint(child, smp, q);
+ 		/* Notice when we are propagating across user namespaces */
+ 		if (child->mnt_parent->mnt_ns->user_ns != user_ns)
+ 			lock_mnt_tree(child);
+ 		child->mnt.mnt_flags &= ~MNT_LOCKED;
++		q = __lookup_mnt(&child->mnt_parent->mnt,
++				 child->mnt_mountpoint);
++		if (q)
++			mnt_change_mountpoint(child, smp, q);
+ 		commit_tree(child);
+ 	}
+ 	put_mountpoint(smp);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 330273cf945316..16607b24ab9c15 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -557,6 +557,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
+ 			set_nlink(inode, fattr->nlink);
+ 		else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
+ 			nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK);
++		else
++			set_nlink(inode, 1);
+ 		if (fattr->valid & NFS_ATTR_FATTR_OWNER)
+ 			inode->i_uid = fattr->uid;
+ 		else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
+@@ -633,6 +635,34 @@ nfs_fattr_fixup_delegated(struct inode *inode, struct nfs_fattr *fattr)
+ 	}
+ }
+ 
++static void nfs_set_timestamps_to_ts(struct inode *inode, struct iattr *attr)
++{
++	unsigned int cache_flags = 0;
++
++	if (attr->ia_valid & ATTR_MTIME_SET) {
++		struct timespec64 ctime = inode_get_ctime(inode);
++		struct timespec64 mtime = inode_get_mtime(inode);
++		struct timespec64 now;
++		int updated = 0;
++
++		now = inode_set_ctime_current(inode);
++		if (!timespec64_equal(&now, &ctime))
++			updated |= S_CTIME;
++
++		inode_set_mtime_to_ts(inode, attr->ia_mtime);
++		if (!timespec64_equal(&now, &mtime))
++			updated |= S_MTIME;
++
++		inode_maybe_inc_iversion(inode, updated);
++		cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
++	}
++	if (attr->ia_valid & ATTR_ATIME_SET) {
++		inode_set_atime_to_ts(inode, attr->ia_atime);
++		cache_flags |= NFS_INO_INVALID_ATIME;
++	}
++	NFS_I(inode)->cache_validity &= ~cache_flags;
++}
++
+ static void nfs_update_timestamps(struct inode *inode, unsigned int ia_valid)
+ {
+ 	enum file_time_flags time_flags = 0;
+@@ -701,14 +731,27 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 
+ 	if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) {
+ 		spin_lock(&inode->i_lock);
+-		nfs_update_timestamps(inode, attr->ia_valid);
++		if (attr->ia_valid & ATTR_MTIME_SET) {
++			nfs_set_timestamps_to_ts(inode, attr);
++			attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
++						ATTR_ATIME|ATTR_ATIME_SET);
++		} else {
++			nfs_update_timestamps(inode, attr->ia_valid);
++			attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME);
++		}
+ 		spin_unlock(&inode->i_lock);
+-		attr->ia_valid &= ~(ATTR_MTIME | ATTR_ATIME);
+ 	} else if (nfs_have_delegated_atime(inode) &&
+ 		   attr->ia_valid & ATTR_ATIME &&
+ 		   !(attr->ia_valid & ATTR_MTIME)) {
+-		nfs_update_delegated_atime(inode);
+-		attr->ia_valid &= ~ATTR_ATIME;
++		if (attr->ia_valid & ATTR_ATIME_SET) {
++			spin_lock(&inode->i_lock);
++			nfs_set_timestamps_to_ts(inode, attr);
++			spin_unlock(&inode->i_lock);
++			attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
++		} else {
++			nfs_update_delegated_atime(inode);
++			attr->ia_valid &= ~ATTR_ATIME;
++		}
+ 	}
+ 
+ 	/* Optimization: if the end result is no change, don't RPC */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 57d49e874f51fa..77b239b10d4187 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -313,14 +313,14 @@ static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
+ 
+ 	if (nfs_have_delegated_mtime(inode)) {
+ 		if (!(cache_validity & NFS_INO_INVALID_ATIME))
+-			dst[1] &= ~FATTR4_WORD1_TIME_ACCESS;
++			dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
+ 		if (!(cache_validity & NFS_INO_INVALID_MTIME))
+-			dst[1] &= ~FATTR4_WORD1_TIME_MODIFY;
++			dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET);
+ 		if (!(cache_validity & NFS_INO_INVALID_CTIME))
+-			dst[1] &= ~FATTR4_WORD1_TIME_METADATA;
++			dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET);
+ 	} else if (nfs_have_delegated_atime(inode)) {
+ 		if (!(cache_validity & NFS_INO_INVALID_ATIME))
+-			dst[1] &= ~FATTR4_WORD1_TIME_ACCESS;
++			dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
+ 	}
+ }
+ 
+@@ -6174,6 +6174,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
+ 	struct nfs_server *server = NFS_SERVER(inode);
+ 	int ret;
+ 
++	if (unlikely(NFS_FH(inode)->size == 0))
++		return -ENODATA;
+ 	if (!nfs4_server_supports_acls(server, type))
+ 		return -EOPNOTSUPP;
+ 	ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
+@@ -6248,6 +6250,9 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
+ {
+ 	struct nfs4_exception exception = { };
+ 	int err;
++
++	if (unlikely(NFS_FH(inode)->size == 0))
++		return -ENODATA;
+ 	do {
+ 		err = __nfs4_proc_set_acl(inode, buf, buflen, type);
+ 		trace_nfs4_set_acl(inode, err);
+@@ -10814,7 +10819,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+ 
+ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ {
+-	ssize_t error, error2, error3;
++	ssize_t error, error2, error3, error4;
+ 	size_t left = size;
+ 
+ 	error = generic_listxattr(dentry, list, left);
+@@ -10837,8 +10842,16 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ 	error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
+ 	if (error3 < 0)
+ 		return error3;
++	if (list) {
++		list += error3;
++		left -= error3;
++	}
++
++	error4 = security_inode_listsecurity(d_inode(dentry), list, left);
++	if (error4 < 0)
++		return error4;
+ 
+-	error += error2 + error3;
++	error += error2 + error3 + error4;
+ 	if (size && error > size)
+ 		return -ERANGE;
+ 	return error;
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index 8f080046c59d9a..99571de665dde9 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -300,7 +300,9 @@ enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path)
+ 
+ struct dentry *ovl_dentry_upper(struct dentry *dentry)
+ {
+-	return ovl_upperdentry_dereference(OVL_I(d_inode(dentry)));
++	struct inode *inode = d_inode(dentry);
++
++	return inode ? ovl_upperdentry_dereference(OVL_I(inode)) : NULL;
+ }
+ 
+ struct dentry *ovl_dentry_lower(struct dentry *dentry)
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 536b7dc4538182..96fe904b2ac587 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -2155,7 +2155,7 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
+ 				categories |= PAGE_IS_FILE;
+ 		}
+ 
+-		if (is_zero_pfn(pmd_pfn(pmd)))
++		if (is_huge_zero_pmd(pmd))
+ 			categories |= PAGE_IS_PFNZERO;
+ 		if (pmd_soft_dirty(pmd))
+ 			categories |= PAGE_IS_SOFT_DIRTY;
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index e03c890de0a068..c0196be0e65fc0 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -362,6 +362,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ 	c = 0;
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
++#ifdef CONFIG_CIFS_SMB_DIRECT
++		struct smbdirect_socket_parameters *sp;
++#endif
++
+ 		/* channel info will be printed as a part of sessions below */
+ 		if (SERVER_IS_CHAN(server))
+ 			continue;
+@@ -383,25 +387,26 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ 			seq_printf(m, "\nSMBDirect transport not available");
+ 			goto skip_rdma;
+ 		}
++		sp = &server->smbd_conn->socket.parameters;
+ 
+ 		seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+ 			"transport status: %x",
+ 			server->smbd_conn->protocol,
+-			server->smbd_conn->transport_status);
++			server->smbd_conn->socket.status);
+ 		seq_printf(m, "\nConn receive_credit_max: %x "
+ 			"send_credit_target: %x max_send_size: %x",
+-			server->smbd_conn->receive_credit_max,
+-			server->smbd_conn->send_credit_target,
+-			server->smbd_conn->max_send_size);
++			sp->recv_credit_max,
++			sp->send_credit_target,
++			sp->max_send_size);
+ 		seq_printf(m, "\nConn max_fragmented_recv_size: %x "
+ 			"max_fragmented_send_size: %x max_receive_size:%x",
+-			server->smbd_conn->max_fragmented_recv_size,
+-			server->smbd_conn->max_fragmented_send_size,
+-			server->smbd_conn->max_receive_size);
++			sp->max_fragmented_recv_size,
++			sp->max_fragmented_send_size,
++			sp->max_recv_size);
+ 		seq_printf(m, "\nConn keep_alive_interval: %x "
+ 			"max_readwrite_size: %x rdma_readwrite_threshold: %x",
+-			server->smbd_conn->keep_alive_interval,
+-			server->smbd_conn->max_readwrite_size,
++			sp->keepalive_interval_msec * 1000,
++			sp->max_read_write_size,
+ 			server->smbd_conn->rdma_readwrite_threshold);
+ 		seq_printf(m, "\nDebug count_get_receive_buffer: %x "
+ 			"count_put_receive_buffer: %x count_send_empty: %x",
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index e0faee22be07e8..c66655adecb2c9 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -677,6 +677,7 @@ inc_rfc1001_len(void *buf, int count)
+ struct TCP_Server_Info {
+ 	struct list_head tcp_ses_list;
+ 	struct list_head smb_ses_list;
++	struct list_head rlist; /* reconnect list */
+ 	spinlock_t srv_lock;  /* protect anything here that is not protected */
+ 	__u64 conn_id; /* connection identifier (useful for debugging) */
+ 	int srv_count; /* reference counter */
+@@ -739,6 +740,7 @@ struct TCP_Server_Info {
+ 	char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+ 	__u32 sequence_number; /* for signing, protected by srv_mutex */
+ 	__u32 reconnect_instance; /* incremented on each reconnect */
++	__le32 session_key_id; /* retrieved from negotiate response and send in session setup request */
+ 	struct session_key session_key;
+ 	unsigned long lstrp; /* when we got last response from this server */
+ 	struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index 28f8ca470770d8..688a26aeef3b40 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -557,7 +557,7 @@ typedef union smb_com_session_setup_andx {
+ 		__le16 MaxBufferSize;
+ 		__le16 MaxMpxCount;
+ 		__le16 VcNumber;
+-		__u32 SessionKey;
++		__le32 SessionKey;
+ 		__le16 SecurityBlobLength;
+ 		__u32 Reserved;
+ 		__le32 Capabilities;	/* see below */
+@@ -576,7 +576,7 @@ typedef union smb_com_session_setup_andx {
+ 		__le16 MaxBufferSize;
+ 		__le16 MaxMpxCount;
+ 		__le16 VcNumber;
+-		__u32 SessionKey;
++		__le32 SessionKey;
+ 		__le16 CaseInsensitivePasswordLength; /* ASCII password len */
+ 		__le16 CaseSensitivePasswordLength; /* Unicode password length*/
+ 		__u32 Reserved;	/* see below */
+@@ -614,7 +614,7 @@ typedef union smb_com_session_setup_andx {
+ 		__le16 MaxBufferSize;
+ 		__le16 MaxMpxCount;
+ 		__le16 VcNumber;
+-		__u32 SessionKey;
++		__le32 SessionKey;
+ 		__le16 PasswordLength;
+ 		__u32 Reserved; /* encrypt key len and offset */
+ 		__le16 ByteCount;
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index cf8d9de2298fcc..d6ba55d4720d20 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -481,6 +481,7 @@ CIFSSMBNegotiate(const unsigned int xid,
+ 	server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
+ 	cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
+ 	server->capabilities = le32_to_cpu(pSMBr->Capabilities);
++	server->session_key_id = pSMBr->SessionKey;
+ 	server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
+ 	server->timeAdj *= 60;
+ 
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 91f5fd818cbf4a..9275e0d1e2f640 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -140,6 +140,14 @@ static void smb2_query_server_interfaces(struct work_struct *work)
+ 			   (SMB_INTERFACE_POLL_INTERVAL * HZ));
+ }
+ 
++#define set_need_reco(server) \
++do { \
++	spin_lock(&server->srv_lock); \
++	if (server->tcpStatus != CifsExiting) \
++		server->tcpStatus = CifsNeedReconnect; \
++	spin_unlock(&server->srv_lock); \
++} while (0)
++
+ /*
+  * Update the tcpStatus for the server.
+  * This is used to signal the cifsd thread to call cifs_reconnect
+@@ -153,39 +161,45 @@ void
+ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ 				bool all_channels)
+ {
+-	struct TCP_Server_Info *pserver;
++	struct TCP_Server_Info *nserver;
+ 	struct cifs_ses *ses;
++	LIST_HEAD(reco);
+ 	int i;
+ 
+-	/* If server is a channel, select the primary channel */
+-	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+ 	/* if we need to signal just this channel */
+ 	if (!all_channels) {
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsExiting)
+-			server->tcpStatus = CifsNeedReconnect;
+-		spin_unlock(&server->srv_lock);
++		set_need_reco(server);
+ 		return;
+ 	}
+ 
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		if (cifs_ses_exiting(ses))
+-			continue;
+-		spin_lock(&ses->chan_lock);
+-		for (i = 0; i < ses->chan_count; i++) {
+-			if (!ses->chans[i].server)
++	if (SERVER_IS_CHAN(server))
++		server = server->primary_server;
++	scoped_guard(spinlock, &cifs_tcp_ses_lock) {
++		set_need_reco(server);
++		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			spin_lock(&ses->ses_lock);
++			if (ses->ses_status == SES_EXITING) {
++				spin_unlock(&ses->ses_lock);
+ 				continue;
+-
+-			spin_lock(&ses->chans[i].server->srv_lock);
+-			if (ses->chans[i].server->tcpStatus != CifsExiting)
+-				ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+-			spin_unlock(&ses->chans[i].server->srv_lock);
++			}
++			spin_lock(&ses->chan_lock);
++			for (i = 1; i < ses->chan_count; i++) {
++				nserver = ses->chans[i].server;
++				if (!nserver)
++					continue;
++				nserver->srv_count++;
++				list_add(&nserver->rlist, &reco);
++			}
++			spin_unlock(&ses->chan_lock);
++			spin_unlock(&ses->ses_lock);
+ 		}
+-		spin_unlock(&ses->chan_lock);
+ 	}
+-	spin_unlock(&cifs_tcp_ses_lock);
++
++	list_for_each_entry_safe(server, nserver, &reco, rlist) {
++		list_del_init(&server->rlist);
++		set_need_reco(server);
++		cifs_put_tcp_session(server, 0);
++	}
+ }
+ 
+ /*
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 4373dd64b66d4f..5122f3895dfc29 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -323,6 +323,14 @@ check_smb_hdr(struct smb_hdr *smb)
+ 	if (smb->Command == SMB_COM_LOCKING_ANDX)
+ 		return 0;
+ 
++	/*
++	 * Windows NT server returns error resposne (e.g. STATUS_DELETE_PENDING
++	 * or STATUS_OBJECT_NAME_NOT_FOUND or ERRDOS/ERRbadfile or any other)
++	 * for some TRANS2 requests without the RESPONSE flag set in header.
++	 */
++	if (smb->Command == SMB_COM_TRANSACTION2 && smb->Status.CifsError != 0)
++		return 0;
++
+ 	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
+ 		 get_mid(smb));
+ 	return 1;
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 10d82d0dc6a9ee..8be7c4d2d9d623 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -658,6 +658,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
+ 					USHRT_MAX));
+ 	pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq);
+ 	pSMB->req.VcNumber = cpu_to_le16(1);
++	pSMB->req.SessionKey = server->session_key_id;
+ 
+ 	/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
+ 
+@@ -1714,22 +1715,22 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
+ 	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ 
+ 	capabilities = cifs_ssetup_hdr(ses, server, pSMB);
+-	if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
+-		cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
+-		return -ENOSYS;
+-	}
+-
+ 	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+ 	capabilities |= CAP_EXTENDED_SECURITY;
+ 	pSMB->req.Capabilities |= cpu_to_le32(capabilities);
+ 
+ 	bcc_ptr = sess_data->iov[2].iov_base;
+-	/* unicode strings must be word aligned */
+-	if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
+-		*bcc_ptr = 0;
+-		bcc_ptr++;
++
++	if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) {
++		/* unicode strings must be word aligned */
++		if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
++			*bcc_ptr = 0;
++			bcc_ptr++;
++		}
++		unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
++	} else {
++		ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ 	}
+-	unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ 
+ 	sess_data->iov[2].iov_len = (long) bcc_ptr -
+ 					(long) sess_data->iov[2].iov_base;
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 74bcc51ccd32f8..e596bc4837b68f 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -504,6 +504,9 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	wsize = min_t(unsigned int, wsize, server->max_write);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 	if (server->rdma) {
++		struct smbdirect_socket_parameters *sp =
++			&server->smbd_conn->socket.parameters;
++
+ 		if (server->sign)
+ 			/*
+ 			 * Account for SMB2 data transfer packet header and
+@@ -511,12 +514,12 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 			 */
+ 			wsize = min_t(unsigned int,
+ 				wsize,
+-				server->smbd_conn->max_fragmented_send_size -
++				sp->max_fragmented_send_size -
+ 					SMB2_READWRITE_PDU_HEADER_SIZE -
+ 					sizeof(struct smb2_transform_hdr));
+ 		else
+ 			wsize = min_t(unsigned int,
+-				wsize, server->smbd_conn->max_readwrite_size);
++				wsize, sp->max_read_write_size);
+ 	}
+ #endif
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+@@ -552,6 +555,9 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	rsize = min_t(unsigned int, rsize, server->max_read);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 	if (server->rdma) {
++		struct smbdirect_socket_parameters *sp =
++			&server->smbd_conn->socket.parameters;
++
+ 		if (server->sign)
+ 			/*
+ 			 * Account for SMB2 data transfer packet header and
+@@ -559,12 +565,12 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 			 */
+ 			rsize = min_t(unsigned int,
+ 				rsize,
+-				server->smbd_conn->max_fragmented_recv_size -
++				sp->max_fragmented_recv_size -
+ 					SMB2_READWRITE_PDU_HEADER_SIZE -
+ 					sizeof(struct smb2_transform_hdr));
+ 		else
+ 			rsize = min_t(unsigned int,
+-				rsize, server->smbd_conn->max_readwrite_size);
++				rsize, sp->max_read_write_size);
+ 	}
+ #endif
+ 
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 9d8be034f103f2..ac06f2617f3468 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -7,6 +7,7 @@
+ #include <linux/module.h>
+ #include <linux/highmem.h>
+ #include <linux/folio_queue.h>
++#include "../common/smbdirect/smbdirect_pdu.h"
+ #include "smbdirect.h"
+ #include "cifs_debug.h"
+ #include "cifsproto.h"
+@@ -50,9 +51,6 @@ struct smb_extract_to_rdma {
+ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
+ 					struct smb_extract_to_rdma *rdma);
+ 
+-/* SMBD version number */
+-#define SMBD_V1	0x0100
+-
+ /* Port numbers for SMBD transport */
+ #define SMB_PORT	445
+ #define SMBD_PORT	5445
+@@ -165,10 +163,11 @@ static void smbd_disconnect_rdma_work(struct work_struct *work)
+ {
+ 	struct smbd_connection *info =
+ 		container_of(work, struct smbd_connection, disconnect_work);
++	struct smbdirect_socket *sc = &info->socket;
+ 
+-	if (info->transport_status == SMBD_CONNECTED) {
+-		info->transport_status = SMBD_DISCONNECTING;
+-		rdma_disconnect(info->id);
++	if (sc->status == SMBDIRECT_SOCKET_CONNECTED) {
++		sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
++		rdma_disconnect(sc->rdma.cm_id);
+ 	}
+ }
+ 
+@@ -182,6 +181,7 @@ static int smbd_conn_upcall(
+ 		struct rdma_cm_id *id, struct rdma_cm_event *event)
+ {
+ 	struct smbd_connection *info = id->context;
++	struct smbdirect_socket *sc = &info->socket;
+ 
+ 	log_rdma_event(INFO, "event=%d status=%d\n",
+ 		event->event, event->status);
+@@ -205,7 +205,7 @@ static int smbd_conn_upcall(
+ 
+ 	case RDMA_CM_EVENT_ESTABLISHED:
+ 		log_rdma_event(INFO, "connected event=%d\n", event->event);
+-		info->transport_status = SMBD_CONNECTED;
++		sc->status = SMBDIRECT_SOCKET_CONNECTED;
+ 		wake_up_interruptible(&info->conn_wait);
+ 		break;
+ 
+@@ -213,20 +213,20 @@ static int smbd_conn_upcall(
+ 	case RDMA_CM_EVENT_UNREACHABLE:
+ 	case RDMA_CM_EVENT_REJECTED:
+ 		log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
+-		info->transport_status = SMBD_DISCONNECTED;
++		sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ 		wake_up_interruptible(&info->conn_wait);
+ 		break;
+ 
+ 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ 	case RDMA_CM_EVENT_DISCONNECTED:
+ 		/* This happens when we fail the negotiation */
+-		if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
+-			info->transport_status = SMBD_DISCONNECTED;
++		if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
++			sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ 			wake_up(&info->conn_wait);
+ 			break;
+ 		}
+ 
+-		info->transport_status = SMBD_DISCONNECTED;
++		sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ 		wake_up_interruptible(&info->disconn_wait);
+ 		wake_up_interruptible(&info->wait_reassembly_queue);
+ 		wake_up_interruptible_all(&info->wait_send_queue);
+@@ -275,6 +275,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	int i;
+ 	struct smbd_request *request =
+ 		container_of(wc->wr_cqe, struct smbd_request, cqe);
++	struct smbd_connection *info = request->info;
++	struct smbdirect_socket *sc = &info->socket;
+ 
+ 	log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
+ 		request, wc->status);
+@@ -286,7 +288,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	}
+ 
+ 	for (i = 0; i < request->num_sge; i++)
+-		ib_dma_unmap_single(request->info->id->device,
++		ib_dma_unmap_single(sc->ib.dev,
+ 			request->sge[i].addr,
+ 			request->sge[i].length,
+ 			DMA_TO_DEVICE);
+@@ -299,7 +301,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	mempool_free(request, request->info->request_mempool);
+ }
+ 
+-static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
++static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
+ {
+ 	log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
+ 		       resp->min_version, resp->max_version,
+@@ -318,15 +320,17 @@ static bool process_negotiation_response(
+ 		struct smbd_response *response, int packet_length)
+ {
+ 	struct smbd_connection *info = response->info;
+-	struct smbd_negotiate_resp *packet = smbd_response_payload(response);
++	struct smbdirect_socket *sc = &info->socket;
++	struct smbdirect_socket_parameters *sp = &sc->parameters;
++	struct smbdirect_negotiate_resp *packet = smbd_response_payload(response);
+ 
+-	if (packet_length < sizeof(struct smbd_negotiate_resp)) {
++	if (packet_length < sizeof(struct smbdirect_negotiate_resp)) {
+ 		log_rdma_event(ERR,
+ 			"error: packet_length=%d\n", packet_length);
+ 		return false;
+ 	}
+ 
+-	if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
++	if (le16_to_cpu(packet->negotiated_version) != SMBDIRECT_V1) {
+ 		log_rdma_event(ERR, "error: negotiated_version=%x\n",
+ 			le16_to_cpu(packet->negotiated_version));
+ 		return false;
+@@ -347,20 +351,20 @@ static bool process_negotiation_response(
+ 
+ 	atomic_set(&info->receive_credits, 0);
+ 
+-	if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
++	if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) {
+ 		log_rdma_event(ERR, "error: preferred_send_size=%d\n",
+ 			le32_to_cpu(packet->preferred_send_size));
+ 		return false;
+ 	}
+-	info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
++	sp->max_recv_size = le32_to_cpu(packet->preferred_send_size);
+ 
+ 	if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
+ 		log_rdma_event(ERR, "error: max_receive_size=%d\n",
+ 			le32_to_cpu(packet->max_receive_size));
+ 		return false;
+ 	}
+-	info->max_send_size = min_t(int, info->max_send_size,
+-					le32_to_cpu(packet->max_receive_size));
++	sp->max_send_size = min_t(u32, sp->max_send_size,
++				  le32_to_cpu(packet->max_receive_size));
+ 
+ 	if (le32_to_cpu(packet->max_fragmented_size) <
+ 			SMBD_MIN_FRAGMENTED_SIZE) {
+@@ -368,18 +372,18 @@ static bool process_negotiation_response(
+ 			le32_to_cpu(packet->max_fragmented_size));
+ 		return false;
+ 	}
+-	info->max_fragmented_send_size =
++	sp->max_fragmented_send_size =
+ 		le32_to_cpu(packet->max_fragmented_size);
+ 	info->rdma_readwrite_threshold =
+-		rdma_readwrite_threshold > info->max_fragmented_send_size ?
+-		info->max_fragmented_send_size :
++		rdma_readwrite_threshold > sp->max_fragmented_send_size ?
++		sp->max_fragmented_send_size :
+ 		rdma_readwrite_threshold;
+ 
+ 
+-	info->max_readwrite_size = min_t(u32,
++	sp->max_read_write_size = min_t(u32,
+ 			le32_to_cpu(packet->max_readwrite_size),
+ 			info->max_frmr_depth * PAGE_SIZE);
+-	info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
++	info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
+ 
+ 	return true;
+ }
+@@ -393,8 +397,9 @@ static void smbd_post_send_credits(struct work_struct *work)
+ 	struct smbd_connection *info =
+ 		container_of(work, struct smbd_connection,
+ 			post_send_credits_work);
++	struct smbdirect_socket *sc = &info->socket;
+ 
+-	if (info->transport_status != SMBD_CONNECTED) {
++	if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ 		wake_up(&info->wait_receive_queues);
+ 		return;
+ 	}
+@@ -448,7 +453,7 @@ static void smbd_post_send_credits(struct work_struct *work)
+ /* Called from softirq, when recv is done */
+ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+-	struct smbd_data_transfer *data_transfer;
++	struct smbdirect_data_transfer *data_transfer;
+ 	struct smbd_response *response =
+ 		container_of(wc->wr_cqe, struct smbd_response, cqe);
+ 	struct smbd_connection *info = response->info;
+@@ -474,7 +479,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	switch (response->type) {
+ 	/* SMBD negotiation response */
+ 	case SMBD_NEGOTIATE_RESP:
+-		dump_smbd_negotiate_resp(smbd_response_payload(response));
++		dump_smbdirect_negotiate_resp(smbd_response_payload(response));
+ 		info->full_packet_received = true;
+ 		info->negotiate_done =
+ 			process_negotiation_response(response, wc->byte_len);
+@@ -531,7 +536,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		/* Send a KEEP_ALIVE response right away if requested */
+ 		info->keep_alive_requested = KEEP_ALIVE_NONE;
+ 		if (le16_to_cpu(data_transfer->flags) &
+-				SMB_DIRECT_RESPONSE_REQUESTED) {
++				SMBDIRECT_FLAG_RESPONSE_REQUESTED) {
+ 			info->keep_alive_requested = KEEP_ALIVE_PENDING;
+ 		}
+ 
+@@ -635,32 +640,34 @@ static int smbd_ia_open(
+ 		struct smbd_connection *info,
+ 		struct sockaddr *dstaddr, int port)
+ {
++	struct smbdirect_socket *sc = &info->socket;
+ 	int rc;
+ 
+-	info->id = smbd_create_id(info, dstaddr, port);
+-	if (IS_ERR(info->id)) {
+-		rc = PTR_ERR(info->id);
++	sc->rdma.cm_id = smbd_create_id(info, dstaddr, port);
++	if (IS_ERR(sc->rdma.cm_id)) {
++		rc = PTR_ERR(sc->rdma.cm_id);
+ 		goto out1;
+ 	}
++	sc->ib.dev = sc->rdma.cm_id->device;
+ 
+-	if (!frwr_is_supported(&info->id->device->attrs)) {
++	if (!frwr_is_supported(&sc->ib.dev->attrs)) {
+ 		log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
+ 		log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
+-			       info->id->device->attrs.device_cap_flags,
+-			       info->id->device->attrs.max_fast_reg_page_list_len);
++			       sc->ib.dev->attrs.device_cap_flags,
++			       sc->ib.dev->attrs.max_fast_reg_page_list_len);
+ 		rc = -EPROTONOSUPPORT;
+ 		goto out2;
+ 	}
+ 	info->max_frmr_depth = min_t(int,
+ 		smbd_max_frmr_depth,
+-		info->id->device->attrs.max_fast_reg_page_list_len);
++		sc->ib.dev->attrs.max_fast_reg_page_list_len);
+ 	info->mr_type = IB_MR_TYPE_MEM_REG;
+-	if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
++	if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ 		info->mr_type = IB_MR_TYPE_SG_GAPS;
+ 
+-	info->pd = ib_alloc_pd(info->id->device, 0);
+-	if (IS_ERR(info->pd)) {
+-		rc = PTR_ERR(info->pd);
++	sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
++	if (IS_ERR(sc->ib.pd)) {
++		rc = PTR_ERR(sc->ib.pd);
+ 		log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
+ 		goto out2;
+ 	}
+@@ -668,8 +675,8 @@ static int smbd_ia_open(
+ 	return 0;
+ 
+ out2:
+-	rdma_destroy_id(info->id);
+-	info->id = NULL;
++	rdma_destroy_id(sc->rdma.cm_id);
++	sc->rdma.cm_id = NULL;
+ 
+ out1:
+ 	return rc;
+@@ -683,10 +690,12 @@ static int smbd_ia_open(
+  */
+ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+ {
++	struct smbdirect_socket *sc = &info->socket;
++	struct smbdirect_socket_parameters *sp = &sc->parameters;
+ 	struct ib_send_wr send_wr;
+ 	int rc = -ENOMEM;
+ 	struct smbd_request *request;
+-	struct smbd_negotiate_req *packet;
++	struct smbdirect_negotiate_req *packet;
+ 
+ 	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+ 	if (!request)
+@@ -695,29 +704,29 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+ 	request->info = info;
+ 
+ 	packet = smbd_request_payload(request);
+-	packet->min_version = cpu_to_le16(SMBD_V1);
+-	packet->max_version = cpu_to_le16(SMBD_V1);
++	packet->min_version = cpu_to_le16(SMBDIRECT_V1);
++	packet->max_version = cpu_to_le16(SMBDIRECT_V1);
+ 	packet->reserved = 0;
+-	packet->credits_requested = cpu_to_le16(info->send_credit_target);
+-	packet->preferred_send_size = cpu_to_le32(info->max_send_size);
+-	packet->max_receive_size = cpu_to_le32(info->max_receive_size);
++	packet->credits_requested = cpu_to_le16(sp->send_credit_target);
++	packet->preferred_send_size = cpu_to_le32(sp->max_send_size);
++	packet->max_receive_size = cpu_to_le32(sp->max_recv_size);
+ 	packet->max_fragmented_size =
+-		cpu_to_le32(info->max_fragmented_recv_size);
++		cpu_to_le32(sp->max_fragmented_recv_size);
+ 
+ 	request->num_sge = 1;
+ 	request->sge[0].addr = ib_dma_map_single(
+-				info->id->device, (void *)packet,
++				sc->ib.dev, (void *)packet,
+ 				sizeof(*packet), DMA_TO_DEVICE);
+-	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
++	if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
+ 		rc = -EIO;
+ 		goto dma_mapping_failed;
+ 	}
+ 
+ 	request->sge[0].length = sizeof(*packet);
+-	request->sge[0].lkey = info->pd->local_dma_lkey;
++	request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
+ 
+ 	ib_dma_sync_single_for_device(
+-		info->id->device, request->sge[0].addr,
++		sc->ib.dev, request->sge[0].addr,
+ 		request->sge[0].length, DMA_TO_DEVICE);
+ 
+ 	request->cqe.done = send_done;
+@@ -734,14 +743,14 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+ 		request->sge[0].length, request->sge[0].lkey);
+ 
+ 	atomic_inc(&info->send_pending);
+-	rc = ib_post_send(info->id->qp, &send_wr, NULL);
++	rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
+ 	if (!rc)
+ 		return 0;
+ 
+ 	/* if we reach here, post send failed */
+ 	log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+ 	atomic_dec(&info->send_pending);
+-	ib_dma_unmap_single(info->id->device, request->sge[0].addr,
++	ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr,
+ 		request->sge[0].length, DMA_TO_DEVICE);
+ 
+ 	smbd_disconnect_rdma_connection(info);
+@@ -774,10 +783,10 @@ static int manage_credits_prior_sending(struct smbd_connection *info)
+ /*
+  * Check if we need to send a KEEP_ALIVE message
+  * The idle connection timer triggers a KEEP_ALIVE message when expires
+- * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
++ * SMBDIRECT_FLAG_RESPONSE_REQUESTED is set in the message flag to have peer send
+  * back a response.
+  * return value:
+- * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
++ * 1 if SMBDIRECT_FLAG_RESPONSE_REQUESTED needs to be set
+  * 0: otherwise
+  */
+ static int manage_keep_alive_before_sending(struct smbd_connection *info)
+@@ -793,6 +802,8 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
+ static int smbd_post_send(struct smbd_connection *info,
+ 		struct smbd_request *request)
+ {
++	struct smbdirect_socket *sc = &info->socket;
++	struct smbdirect_socket_parameters *sp = &sc->parameters;
+ 	struct ib_send_wr send_wr;
+ 	int rc, i;
+ 
+@@ -801,7 +812,7 @@ static int smbd_post_send(struct smbd_connection *info,
+ 			"rdma_request sge[%d] addr=0x%llx length=%u\n",
+ 			i, request->sge[i].addr, request->sge[i].length);
+ 		ib_dma_sync_single_for_device(
+-			info->id->device,
++			sc->ib.dev,
+ 			request->sge[i].addr,
+ 			request->sge[i].length,
+ 			DMA_TO_DEVICE);
+@@ -816,7 +827,7 @@ static int smbd_post_send(struct smbd_connection *info,
+ 	send_wr.opcode = IB_WR_SEND;
+ 	send_wr.send_flags = IB_SEND_SIGNALED;
+ 
+-	rc = ib_post_send(info->id->qp, &send_wr, NULL);
++	rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
+ 	if (rc) {
+ 		log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+ 		smbd_disconnect_rdma_connection(info);
+@@ -824,7 +835,7 @@ static int smbd_post_send(struct smbd_connection *info,
+ 	} else
+ 		/* Reset timer for idle connection after packet is sent */
+ 		mod_delayed_work(info->workqueue, &info->idle_timer_work,
+-			info->keep_alive_interval*HZ);
++			msecs_to_jiffies(sp->keepalive_interval_msec));
+ 
+ 	return rc;
+ }
+@@ -833,22 +844,24 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ 			       struct iov_iter *iter,
+ 			       int *_remaining_data_length)
+ {
++	struct smbdirect_socket *sc = &info->socket;
++	struct smbdirect_socket_parameters *sp = &sc->parameters;
+ 	int i, rc;
+ 	int header_length;
+ 	int data_length;
+ 	struct smbd_request *request;
+-	struct smbd_data_transfer *packet;
++	struct smbdirect_data_transfer *packet;
+ 	int new_credits = 0;
+ 
+ wait_credit:
+ 	/* Wait for send credits. A SMBD packet needs one credit */
+ 	rc = wait_event_interruptible(info->wait_send_queue,
+ 		atomic_read(&info->send_credits) > 0 ||
+-		info->transport_status != SMBD_CONNECTED);
++		sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ 	if (rc)
+ 		goto err_wait_credit;
+ 
+-	if (info->transport_status != SMBD_CONNECTED) {
++	if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ 		log_outgoing(ERR, "disconnected not sending on wait_credit\n");
+ 		rc = -EAGAIN;
+ 		goto err_wait_credit;
+@@ -860,17 +873,17 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ 
+ wait_send_queue:
+ 	wait_event(info->wait_post_send,
+-		atomic_read(&info->send_pending) < info->send_credit_target ||
+-		info->transport_status != SMBD_CONNECTED);
++		atomic_read(&info->send_pending) < sp->send_credit_target ||
++		sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ 
+-	if (info->transport_status != SMBD_CONNECTED) {
++	if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ 		log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
+ 		rc = -EAGAIN;
+ 		goto err_wait_send_queue;
+ 	}
+ 
+ 	if (unlikely(atomic_inc_return(&info->send_pending) >
+-				info->send_credit_target)) {
++				sp->send_credit_target)) {
+ 		atomic_dec(&info->send_pending);
+ 		goto wait_send_queue;
+ 	}
+@@ -890,8 +903,8 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ 			.nr_sge		= 1,
+ 			.max_sge	= SMBDIRECT_MAX_SEND_SGE,
+ 			.sge		= request->sge,
+-			.device		= info->id->device,
+-			.local_dma_lkey	= info->pd->local_dma_lkey,
++			.device		= sc->ib.dev,
++			.local_dma_lkey	= sc->ib.pd->local_dma_lkey,
+ 			.direction	= DMA_TO_DEVICE,
+ 		};
+ 
+@@ -909,7 +922,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ 
+ 	/* Fill in the packet header */
+ 	packet = smbd_request_payload(request);
+-	packet->credits_requested = cpu_to_le16(info->send_credit_target);
++	packet->credits_requested = cpu_to_le16(sp->send_credit_target);
+ 
+ 	new_credits = manage_credits_prior_sending(info);
+ 	atomic_add(new_credits, &info->receive_credits);
+@@ -919,7 +932,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ 
+ 	packet->flags = 0;
+ 	if (manage_keep_alive_before_sending(info))
+-		packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
++		packet->flags |= cpu_to_le16(SMBDIRECT_FLAG_RESPONSE_REQUESTED);
+ 
+ 	packet->reserved = 0;
+ 	if (!data_length)
+@@ -938,23 +951,23 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ 		     le32_to_cpu(packet->remaining_data_length));
+ 
+ 	/* Map the packet to DMA */
+-	header_length = sizeof(struct smbd_data_transfer);
++	header_length = sizeof(struct smbdirect_data_transfer);
+ 	/* If this is a packet without payload, don't send padding */
+ 	if (!data_length)
+-		header_length = offsetof(struct smbd_data_transfer, padding);
++		header_length = offsetof(struct smbdirect_data_transfer, padding);
+ 
+-	request->sge[0].addr = ib_dma_map_single(info->id->device,
++	request->sge[0].addr = ib_dma_map_single(sc->ib.dev,
+ 						 (void *)packet,
+ 						 header_length,
+ 						 DMA_TO_DEVICE);
+-	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
++	if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
+ 		rc = -EIO;
+ 		request->sge[0].addr = 0;
+ 		goto err_dma;
+ 	}
+ 
+ 	request->sge[0].length = header_length;
+-	request->sge[0].lkey = info->pd->local_dma_lkey;
++	request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
+ 
+ 	rc = smbd_post_send(info, request);
+ 	if (!rc)
+@@ -963,7 +976,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ err_dma:
+ 	for (i = 0; i < request->num_sge; i++)
+ 		if (request->sge[i].addr)
+-			ib_dma_unmap_single(info->id->device,
++			ib_dma_unmap_single(sc->ib.dev,
+ 					    request->sge[i].addr,
+ 					    request->sge[i].length,
+ 					    DMA_TO_DEVICE);
+@@ -1008,17 +1021,19 @@ static int smbd_post_send_empty(struct smbd_connection *info)
+ static int smbd_post_recv(
+ 		struct smbd_connection *info, struct smbd_response *response)
+ {
++	struct smbdirect_socket *sc = &info->socket;
++	struct smbdirect_socket_parameters *sp = &sc->parameters;
+ 	struct ib_recv_wr recv_wr;
+ 	int rc = -EIO;
+ 
+ 	response->sge.addr = ib_dma_map_single(
+-				info->id->device, response->packet,
+-				info->max_receive_size, DMA_FROM_DEVICE);
+-	if (ib_dma_mapping_error(info->id->device, response->sge.addr))
++				sc->ib.dev, response->packet,
++				sp->max_recv_size, DMA_FROM_DEVICE);
++	if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr))
+ 		return rc;
+ 
+-	response->sge.length = info->max_receive_size;
+-	response->sge.lkey = info->pd->local_dma_lkey;
++	response->sge.length = sp->max_recv_size;
++	response->sge.lkey = sc->ib.pd->local_dma_lkey;
+ 
+ 	response->cqe.done = recv_done;
+ 
+@@ -1027,9 +1042,9 @@ static int smbd_post_recv(
+ 	recv_wr.sg_list = &response->sge;
+ 	recv_wr.num_sge = 1;
+ 
+-	rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
++	rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL);
+ 	if (rc) {
+-		ib_dma_unmap_single(info->id->device, response->sge.addr,
++		ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+ 				    response->sge.length, DMA_FROM_DEVICE);
+ 		smbd_disconnect_rdma_connection(info);
+ 		log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+@@ -1187,9 +1202,10 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
+ static void put_receive_buffer(
+ 	struct smbd_connection *info, struct smbd_response *response)
+ {
++	struct smbdirect_socket *sc = &info->socket;
+ 	unsigned long flags;
+ 
+-	ib_dma_unmap_single(info->id->device, response->sge.addr,
++	ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+ 		response->sge.length, DMA_FROM_DEVICE);
+ 
+ 	spin_lock_irqsave(&info->receive_queue_lock, flags);
+@@ -1264,6 +1280,8 @@ static void idle_connection_timer(struct work_struct *work)
+ 	struct smbd_connection *info = container_of(
+ 					work, struct smbd_connection,
+ 					idle_timer_work.work);
++	struct smbdirect_socket *sc = &info->socket;
++	struct smbdirect_socket_parameters *sp = &sc->parameters;
+ 
+ 	if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
+ 		log_keep_alive(ERR,
+@@ -1278,7 +1296,7 @@ static void idle_connection_timer(struct work_struct *work)
+ 
+ 	/* Setup the next idle timeout work */
+ 	queue_delayed_work(info->workqueue, &info->idle_timer_work,
+-			info->keep_alive_interval*HZ);
++			msecs_to_jiffies(sp->keepalive_interval_msec));
+ }
+ 
+ /*
+@@ -1289,6 +1307,8 @@ static void idle_connection_timer(struct work_struct *work)
+ void smbd_destroy(struct TCP_Server_Info *server)
+ {
+ 	struct smbd_connection *info = server->smbd_conn;
++	struct smbdirect_socket *sc;
++	struct smbdirect_socket_parameters *sp;
+ 	struct smbd_response *response;
+ 	unsigned long flags;
+ 
+@@ -1296,19 +1316,22 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ 		log_rdma_event(INFO, "rdma session already destroyed\n");
+ 		return;
+ 	}
++	sc = &info->socket;
++	sp = &sc->parameters;
+ 
+ 	log_rdma_event(INFO, "destroying rdma session\n");
+-	if (info->transport_status != SMBD_DISCONNECTED) {
+-		rdma_disconnect(server->smbd_conn->id);
++	if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) {
++		rdma_disconnect(sc->rdma.cm_id);
+ 		log_rdma_event(INFO, "wait for transport being disconnected\n");
+ 		wait_event_interruptible(
+ 			info->disconn_wait,
+-			info->transport_status == SMBD_DISCONNECTED);
++			sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+ 	}
+ 
+ 	log_rdma_event(INFO, "destroying qp\n");
+-	ib_drain_qp(info->id->qp);
+-	rdma_destroy_qp(info->id);
++	ib_drain_qp(sc->ib.qp);
++	rdma_destroy_qp(sc->rdma.cm_id);
++	sc->ib.qp = NULL;
+ 
+ 	log_rdma_event(INFO, "cancelling idle timer\n");
+ 	cancel_delayed_work_sync(&info->idle_timer_work);
+@@ -1336,7 +1359,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ 	log_rdma_event(INFO, "free receive buffers\n");
+ 	wait_event(info->wait_receive_queues,
+ 		info->count_receive_queue + info->count_empty_packet_queue
+-			== info->receive_credit_max);
++			== sp->recv_credit_max);
+ 	destroy_receive_buffers(info);
+ 
+ 	/*
+@@ -1355,10 +1378,10 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ 	}
+ 	destroy_mr_list(info);
+ 
+-	ib_free_cq(info->send_cq);
+-	ib_free_cq(info->recv_cq);
+-	ib_dealloc_pd(info->pd);
+-	rdma_destroy_id(info->id);
++	ib_free_cq(sc->ib.send_cq);
++	ib_free_cq(sc->ib.recv_cq);
++	ib_dealloc_pd(sc->ib.pd);
++	rdma_destroy_id(sc->rdma.cm_id);
+ 
+ 	/* free mempools */
+ 	mempool_destroy(info->request_mempool);
+@@ -1367,7 +1390,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ 	mempool_destroy(info->response_mempool);
+ 	kmem_cache_destroy(info->response_cache);
+ 
+-	info->transport_status = SMBD_DESTROYED;
++	sc->status = SMBDIRECT_SOCKET_DESTROYED;
+ 
+ 	destroy_workqueue(info->workqueue);
+ 	log_rdma_event(INFO,  "rdma session destroyed\n");
+@@ -1392,7 +1415,7 @@ int smbd_reconnect(struct TCP_Server_Info *server)
+ 	 * This is possible if transport is disconnected and we haven't received
+ 	 * notification from RDMA, but upper layer has detected timeout
+ 	 */
+-	if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
++	if (server->smbd_conn->socket.status == SMBDIRECT_SOCKET_CONNECTED) {
+ 		log_rdma_event(INFO, "disconnecting transport\n");
+ 		smbd_destroy(server);
+ 	}
+@@ -1424,37 +1447,47 @@ static void destroy_caches_and_workqueue(struct smbd_connection *info)
+ #define MAX_NAME_LEN	80
+ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+ {
++	struct smbdirect_socket *sc = &info->socket;
++	struct smbdirect_socket_parameters *sp = &sc->parameters;
+ 	char name[MAX_NAME_LEN];
+ 	int rc;
+ 
++	if (WARN_ON_ONCE(sp->max_recv_size < sizeof(struct smbdirect_data_transfer)))
++		return -ENOMEM;
++
+ 	scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
+ 	info->request_cache =
+ 		kmem_cache_create(
+ 			name,
+ 			sizeof(struct smbd_request) +
+-				sizeof(struct smbd_data_transfer),
++				sizeof(struct smbdirect_data_transfer),
+ 			0, SLAB_HWCACHE_ALIGN, NULL);
+ 	if (!info->request_cache)
+ 		return -ENOMEM;
+ 
+ 	info->request_mempool =
+-		mempool_create(info->send_credit_target, mempool_alloc_slab,
++		mempool_create(sp->send_credit_target, mempool_alloc_slab,
+ 			mempool_free_slab, info->request_cache);
+ 	if (!info->request_mempool)
+ 		goto out1;
+ 
+ 	scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
++
++	struct kmem_cache_args response_args = {
++		.align		= __alignof__(struct smbd_response),
++		.useroffset	= (offsetof(struct smbd_response, packet) +
++				   sizeof(struct smbdirect_data_transfer)),
++		.usersize	= sp->max_recv_size - sizeof(struct smbdirect_data_transfer),
++	};
+ 	info->response_cache =
+-		kmem_cache_create(
+-			name,
+-			sizeof(struct smbd_response) +
+-				info->max_receive_size,
+-			0, SLAB_HWCACHE_ALIGN, NULL);
++		kmem_cache_create(name,
++				  sizeof(struct smbd_response) + sp->max_recv_size,
++				  &response_args, SLAB_HWCACHE_ALIGN);
+ 	if (!info->response_cache)
+ 		goto out2;
+ 
+ 	info->response_mempool =
+-		mempool_create(info->receive_credit_max, mempool_alloc_slab,
++		mempool_create(sp->recv_credit_max, mempool_alloc_slab,
+ 		       mempool_free_slab, info->response_cache);
+ 	if (!info->response_mempool)
+ 		goto out3;
+@@ -1464,7 +1497,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+ 	if (!info->workqueue)
+ 		goto out4;
+ 
+-	rc = allocate_receive_buffers(info, info->receive_credit_max);
++	rc = allocate_receive_buffers(info, sp->recv_credit_max);
+ 	if (rc) {
+ 		log_rdma_event(ERR, "failed to allocate receive buffers\n");
+ 		goto out5;
+@@ -1491,6 +1524,8 @@ static struct smbd_connection *_smbd_get_connection(
+ {
+ 	int rc;
+ 	struct smbd_connection *info;
++	struct smbdirect_socket *sc;
++	struct smbdirect_socket_parameters *sp;
+ 	struct rdma_conn_param conn_param;
+ 	struct ib_qp_init_attr qp_attr;
+ 	struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
+@@ -1500,101 +1535,102 @@ static struct smbd_connection *_smbd_get_connection(
+ 	info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
+ 	if (!info)
+ 		return NULL;
++	sc = &info->socket;
++	sp = &sc->parameters;
+ 
+-	info->transport_status = SMBD_CONNECTING;
++	sc->status = SMBDIRECT_SOCKET_CONNECTING;
+ 	rc = smbd_ia_open(info, dstaddr, port);
+ 	if (rc) {
+ 		log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
+ 		goto create_id_failed;
+ 	}
+ 
+-	if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
+-	    smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
++	if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe ||
++	    smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) {
+ 		log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+ 			       smbd_send_credit_target,
+-			       info->id->device->attrs.max_cqe,
+-			       info->id->device->attrs.max_qp_wr);
++			       sc->ib.dev->attrs.max_cqe,
++			       sc->ib.dev->attrs.max_qp_wr);
+ 		goto config_failed;
+ 	}
+ 
+-	if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
+-	    smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
++	if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe ||
++	    smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) {
+ 		log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+ 			       smbd_receive_credit_max,
+-			       info->id->device->attrs.max_cqe,
+-			       info->id->device->attrs.max_qp_wr);
++			       sc->ib.dev->attrs.max_cqe,
++			       sc->ib.dev->attrs.max_qp_wr);
+ 		goto config_failed;
+ 	}
+ 
+-	info->receive_credit_max = smbd_receive_credit_max;
+-	info->send_credit_target = smbd_send_credit_target;
+-	info->max_send_size = smbd_max_send_size;
+-	info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
+-	info->max_receive_size = smbd_max_receive_size;
+-	info->keep_alive_interval = smbd_keep_alive_interval;
++	sp->recv_credit_max = smbd_receive_credit_max;
++	sp->send_credit_target = smbd_send_credit_target;
++	sp->max_send_size = smbd_max_send_size;
++	sp->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
++	sp->max_recv_size = smbd_max_receive_size;
++	sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
+ 
+-	if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
+-	    info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
++	if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
++	    sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
+ 		log_rdma_event(ERR,
+ 			"device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
+ 			IB_DEVICE_NAME_MAX,
+-			info->id->device->name,
+-			info->id->device->attrs.max_send_sge,
+-			info->id->device->attrs.max_recv_sge);
++			sc->ib.dev->name,
++			sc->ib.dev->attrs.max_send_sge,
++			sc->ib.dev->attrs.max_recv_sge);
+ 		goto config_failed;
+ 	}
+ 
+-	info->send_cq = NULL;
+-	info->recv_cq = NULL;
+-	info->send_cq =
+-		ib_alloc_cq_any(info->id->device, info,
+-				info->send_credit_target, IB_POLL_SOFTIRQ);
+-	if (IS_ERR(info->send_cq)) {
+-		info->send_cq = NULL;
++	sc->ib.send_cq =
++		ib_alloc_cq_any(sc->ib.dev, info,
++				sp->send_credit_target, IB_POLL_SOFTIRQ);
++	if (IS_ERR(sc->ib.send_cq)) {
++		sc->ib.send_cq = NULL;
+ 		goto alloc_cq_failed;
+ 	}
+ 
+-	info->recv_cq =
+-		ib_alloc_cq_any(info->id->device, info,
+-				info->receive_credit_max, IB_POLL_SOFTIRQ);
+-	if (IS_ERR(info->recv_cq)) {
+-		info->recv_cq = NULL;
++	sc->ib.recv_cq =
++		ib_alloc_cq_any(sc->ib.dev, info,
++				sp->recv_credit_max, IB_POLL_SOFTIRQ);
++	if (IS_ERR(sc->ib.recv_cq)) {
++		sc->ib.recv_cq = NULL;
+ 		goto alloc_cq_failed;
+ 	}
+ 
+ 	memset(&qp_attr, 0, sizeof(qp_attr));
+ 	qp_attr.event_handler = smbd_qp_async_error_upcall;
+ 	qp_attr.qp_context = info;
+-	qp_attr.cap.max_send_wr = info->send_credit_target;
+-	qp_attr.cap.max_recv_wr = info->receive_credit_max;
++	qp_attr.cap.max_send_wr = sp->send_credit_target;
++	qp_attr.cap.max_recv_wr = sp->recv_credit_max;
+ 	qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE;
+ 	qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE;
+ 	qp_attr.cap.max_inline_data = 0;
+ 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ 	qp_attr.qp_type = IB_QPT_RC;
+-	qp_attr.send_cq = info->send_cq;
+-	qp_attr.recv_cq = info->recv_cq;
++	qp_attr.send_cq = sc->ib.send_cq;
++	qp_attr.recv_cq = sc->ib.recv_cq;
+ 	qp_attr.port_num = ~0;
+ 
+-	rc = rdma_create_qp(info->id, info->pd, &qp_attr);
++	rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
+ 	if (rc) {
+ 		log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
+ 		goto create_qp_failed;
+ 	}
++	sc->ib.qp = sc->rdma.cm_id->qp;
+ 
+ 	memset(&conn_param, 0, sizeof(conn_param));
+ 	conn_param.initiator_depth = 0;
+ 
+ 	conn_param.responder_resources =
+-		min(info->id->device->attrs.max_qp_rd_atom,
++		min(sc->ib.dev->attrs.max_qp_rd_atom,
+ 		    SMBD_CM_RESPONDER_RESOURCES);
+ 	info->responder_resources = conn_param.responder_resources;
+ 	log_rdma_mr(INFO, "responder_resources=%d\n",
+ 		info->responder_resources);
+ 
+ 	/* Need to send IRD/ORD in private data for iWARP */
+-	info->id->device->ops.get_port_immutable(
+-		info->id->device, info->id->port_num, &port_immutable);
++	sc->ib.dev->ops.get_port_immutable(
++		sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable);
+ 	if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+ 		ird_ord_hdr[0] = info->responder_resources;
+ 		ird_ord_hdr[1] = 1;
+@@ -1615,16 +1651,16 @@ static struct smbd_connection *_smbd_get_connection(
+ 	init_waitqueue_head(&info->conn_wait);
+ 	init_waitqueue_head(&info->disconn_wait);
+ 	init_waitqueue_head(&info->wait_reassembly_queue);
+-	rc = rdma_connect(info->id, &conn_param);
++	rc = rdma_connect(sc->rdma.cm_id, &conn_param);
+ 	if (rc) {
+ 		log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
+ 		goto rdma_connect_failed;
+ 	}
+ 
+ 	wait_event_interruptible(
+-		info->conn_wait, info->transport_status != SMBD_CONNECTING);
++		info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING);
+ 
+-	if (info->transport_status != SMBD_CONNECTED) {
++	if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ 		log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+ 		goto rdma_connect_failed;
+ 	}
+@@ -1640,7 +1676,7 @@ static struct smbd_connection *_smbd_get_connection(
+ 	init_waitqueue_head(&info->wait_send_queue);
+ 	INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
+ 	queue_delayed_work(info->workqueue, &info->idle_timer_work,
+-		info->keep_alive_interval*HZ);
++		msecs_to_jiffies(sp->keepalive_interval_msec));
+ 
+ 	init_waitqueue_head(&info->wait_send_pending);
+ 	atomic_set(&info->send_pending, 0);
+@@ -1675,26 +1711,26 @@ static struct smbd_connection *_smbd_get_connection(
+ negotiation_failed:
+ 	cancel_delayed_work_sync(&info->idle_timer_work);
+ 	destroy_caches_and_workqueue(info);
+-	info->transport_status = SMBD_NEGOTIATE_FAILED;
++	sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
+ 	init_waitqueue_head(&info->conn_wait);
+-	rdma_disconnect(info->id);
++	rdma_disconnect(sc->rdma.cm_id);
+ 	wait_event(info->conn_wait,
+-		info->transport_status == SMBD_DISCONNECTED);
++		sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+ 
+ allocate_cache_failed:
+ rdma_connect_failed:
+-	rdma_destroy_qp(info->id);
++	rdma_destroy_qp(sc->rdma.cm_id);
+ 
+ create_qp_failed:
+ alloc_cq_failed:
+-	if (info->send_cq)
+-		ib_free_cq(info->send_cq);
+-	if (info->recv_cq)
+-		ib_free_cq(info->recv_cq);
++	if (sc->ib.send_cq)
++		ib_free_cq(sc->ib.send_cq);
++	if (sc->ib.recv_cq)
++		ib_free_cq(sc->ib.recv_cq);
+ 
+ config_failed:
+-	ib_dealloc_pd(info->pd);
+-	rdma_destroy_id(info->id);
++	ib_dealloc_pd(sc->ib.pd);
++	rdma_destroy_id(sc->rdma.cm_id);
+ 
+ create_id_failed:
+ 	kfree(info);
+@@ -1719,34 +1755,39 @@ struct smbd_connection *smbd_get_connection(
+ }
+ 
+ /*
+- * Receive data from receive reassembly queue
++ * Receive data from the transport's receive reassembly queue
+  * All the incoming data packets are placed in reassembly queue
+- * buf: the buffer to read data into
++ * iter: the buffer to read data into
+  * size: the length of data to read
+  * return value: actual data read
+- * Note: this implementation copies the data from reassebmly queue to receive
++ *
++ * Note: this implementation copies the data from reassembly queue to receive
+  * buffers used by upper layer. This is not the optimal code path. A better way
+  * to do it is to not have upper layer allocate its receive buffers but rather
+  * borrow the buffer from reassembly queue, and return it after data is
+  * consumed. But this will require more changes to upper layer code, and also
+  * need to consider packet boundaries while they still being reassembled.
+  */
+-static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+-		unsigned int size)
++int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
+ {
++	struct smbdirect_socket *sc = &info->socket;
+ 	struct smbd_response *response;
+-	struct smbd_data_transfer *data_transfer;
++	struct smbdirect_data_transfer *data_transfer;
++	size_t size = iov_iter_count(&msg->msg_iter);
+ 	int to_copy, to_read, data_read, offset;
+ 	u32 data_length, remaining_data_length, data_offset;
+ 	int rc;
+ 
++	if (WARN_ON_ONCE(iov_iter_rw(&msg->msg_iter) == WRITE))
++		return -EINVAL; /* It's a bug in upper layer to get there */
++
+ again:
+ 	/*
+ 	 * No need to hold the reassembly queue lock all the time as we are
+ 	 * the only one reading from the front of the queue. The transport
+ 	 * may add more entries to the back of the queue at the same time
+ 	 */
+-	log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
++	log_read(INFO, "size=%zd info->reassembly_data_length=%d\n", size,
+ 		info->reassembly_data_length);
+ 	if (info->reassembly_data_length >= size) {
+ 		int queue_length;
+@@ -1784,7 +1825,10 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+ 			if (response->first_segment && size == 4) {
+ 				unsigned int rfc1002_len =
+ 					data_length + remaining_data_length;
+-				*((__be32 *)buf) = cpu_to_be32(rfc1002_len);
++				__be32 rfc1002_hdr = cpu_to_be32(rfc1002_len);
++				if (copy_to_iter(&rfc1002_hdr, sizeof(rfc1002_hdr),
++						 &msg->msg_iter) != sizeof(rfc1002_hdr))
++					return -EFAULT;
+ 				data_read = 4;
+ 				response->first_segment = false;
+ 				log_read(INFO, "returning rfc1002 length %d\n",
+@@ -1793,10 +1837,9 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+ 			}
+ 
+ 			to_copy = min_t(int, data_length - offset, to_read);
+-			memcpy(
+-				buf + data_read,
+-				(char *)data_transfer + data_offset + offset,
+-				to_copy);
++			if (copy_to_iter((char *)data_transfer + data_offset + offset,
++					 to_copy, &msg->msg_iter) != to_copy)
++				return -EFAULT;
+ 
+ 			/* move on to the next buffer? */
+ 			if (to_copy == data_length - offset) {
+@@ -1848,12 +1891,12 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+ 	rc = wait_event_interruptible(
+ 		info->wait_reassembly_queue,
+ 		info->reassembly_data_length >= size ||
+-			info->transport_status != SMBD_CONNECTED);
++			sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ 	/* Don't return any data if interrupted */
+ 	if (rc)
+ 		return rc;
+ 
+-	if (info->transport_status != SMBD_CONNECTED) {
++	if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ 		log_read(ERR, "disconnected\n");
+ 		return -ECONNABORTED;
+ 	}
+@@ -1861,89 +1904,6 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+ 	goto again;
+ }
+ 
+-/*
+- * Receive a page from receive reassembly queue
+- * page: the page to read data into
+- * to_read: the length of data to read
+- * return value: actual data read
+- */
+-static int smbd_recv_page(struct smbd_connection *info,
+-		struct page *page, unsigned int page_offset,
+-		unsigned int to_read)
+-{
+-	int ret;
+-	char *to_address;
+-	void *page_address;
+-
+-	/* make sure we have the page ready for read */
+-	ret = wait_event_interruptible(
+-		info->wait_reassembly_queue,
+-		info->reassembly_data_length >= to_read ||
+-			info->transport_status != SMBD_CONNECTED);
+-	if (ret)
+-		return ret;
+-
+-	/* now we can read from reassembly queue and not sleep */
+-	page_address = kmap_atomic(page);
+-	to_address = (char *) page_address + page_offset;
+-
+-	log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
+-		page, to_address, to_read);
+-
+-	ret = smbd_recv_buf(info, to_address, to_read);
+-	kunmap_atomic(page_address);
+-
+-	return ret;
+-}
+-
+-/*
+- * Receive data from transport
+- * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
+- * return: total bytes read, or 0. SMB Direct will not do partial read.
+- */
+-int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
+-{
+-	char *buf;
+-	struct page *page;
+-	unsigned int to_read, page_offset;
+-	int rc;
+-
+-	if (iov_iter_rw(&msg->msg_iter) == WRITE) {
+-		/* It's a bug in upper layer to get there */
+-		cifs_dbg(VFS, "Invalid msg iter dir %u\n",
+-			 iov_iter_rw(&msg->msg_iter));
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	switch (iov_iter_type(&msg->msg_iter)) {
+-	case ITER_KVEC:
+-		buf = msg->msg_iter.kvec->iov_base;
+-		to_read = msg->msg_iter.kvec->iov_len;
+-		rc = smbd_recv_buf(info, buf, to_read);
+-		break;
+-
+-	case ITER_BVEC:
+-		page = msg->msg_iter.bvec->bv_page;
+-		page_offset = msg->msg_iter.bvec->bv_offset;
+-		to_read = msg->msg_iter.bvec->bv_len;
+-		rc = smbd_recv_page(info, page, page_offset, to_read);
+-		break;
+-
+-	default:
+-		/* It's a bug in upper layer to get there */
+-		cifs_dbg(VFS, "Invalid msg type %d\n",
+-			 iov_iter_type(&msg->msg_iter));
+-		rc = -EINVAL;
+-	}
+-
+-out:
+-	/* SMBDirect will read it all or nothing */
+-	if (rc > 0)
+-		msg->msg_iter.count = 0;
+-	return rc;
+-}
+-
+ /*
+  * Send data to transport
+  * Each rqst is transported as a SMBDirect payload
+@@ -1954,12 +1914,14 @@ int smbd_send(struct TCP_Server_Info *server,
+ 	int num_rqst, struct smb_rqst *rqst_array)
+ {
+ 	struct smbd_connection *info = server->smbd_conn;
++	struct smbdirect_socket *sc = &info->socket;
++	struct smbdirect_socket_parameters *sp = &sc->parameters;
+ 	struct smb_rqst *rqst;
+ 	struct iov_iter iter;
+ 	unsigned int remaining_data_length, klen;
+ 	int rc, i, rqst_idx;
+ 
+-	if (info->transport_status != SMBD_CONNECTED)
++	if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
+ 		return -EAGAIN;
+ 
+ 	/*
+@@ -1971,10 +1933,10 @@ int smbd_send(struct TCP_Server_Info *server,
+ 	for (i = 0; i < num_rqst; i++)
+ 		remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
+ 
+-	if (unlikely(remaining_data_length > info->max_fragmented_send_size)) {
++	if (unlikely(remaining_data_length > sp->max_fragmented_send_size)) {
+ 		/* assertion: payload never exceeds negotiated maximum */
+ 		log_write(ERR, "payload size %d > max size %d\n",
+-			remaining_data_length, info->max_fragmented_send_size);
++			remaining_data_length, sp->max_fragmented_send_size);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2053,6 +2015,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+ {
+ 	struct smbd_connection *info =
+ 		container_of(work, struct smbd_connection, mr_recovery_work);
++	struct smbdirect_socket *sc = &info->socket;
+ 	struct smbd_mr *smbdirect_mr;
+ 	int rc;
+ 
+@@ -2070,7 +2033,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+ 			}
+ 
+ 			smbdirect_mr->mr = ib_alloc_mr(
+-				info->pd, info->mr_type,
++				sc->ib.pd, info->mr_type,
+ 				info->max_frmr_depth);
+ 			if (IS_ERR(smbdirect_mr->mr)) {
+ 				log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+@@ -2099,12 +2062,13 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+ 
+ static void destroy_mr_list(struct smbd_connection *info)
+ {
++	struct smbdirect_socket *sc = &info->socket;
+ 	struct smbd_mr *mr, *tmp;
+ 
+ 	cancel_work_sync(&info->mr_recovery_work);
+ 	list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
+ 		if (mr->state == MR_INVALIDATED)
+-			ib_dma_unmap_sg(info->id->device, mr->sgt.sgl,
++			ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
+ 				mr->sgt.nents, mr->dir);
+ 		ib_dereg_mr(mr->mr);
+ 		kfree(mr->sgt.sgl);
+@@ -2121,6 +2085,7 @@ static void destroy_mr_list(struct smbd_connection *info)
+  */
+ static int allocate_mr_list(struct smbd_connection *info)
+ {
++	struct smbdirect_socket *sc = &info->socket;
+ 	int i;
+ 	struct smbd_mr *smbdirect_mr, *tmp;
+ 
+@@ -2136,7 +2101,7 @@ static int allocate_mr_list(struct smbd_connection *info)
+ 		smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+ 		if (!smbdirect_mr)
+ 			goto cleanup_entries;
+-		smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
++		smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type,
+ 					info->max_frmr_depth);
+ 		if (IS_ERR(smbdirect_mr->mr)) {
+ 			log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+@@ -2181,20 +2146,20 @@ static int allocate_mr_list(struct smbd_connection *info)
+  */
+ static struct smbd_mr *get_mr(struct smbd_connection *info)
+ {
++	struct smbdirect_socket *sc = &info->socket;
+ 	struct smbd_mr *ret;
+ 	int rc;
+ again:
+ 	rc = wait_event_interruptible(info->wait_mr,
+ 		atomic_read(&info->mr_ready_count) ||
+-		info->transport_status != SMBD_CONNECTED);
++		sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ 	if (rc) {
+ 		log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
+ 		return NULL;
+ 	}
+ 
+-	if (info->transport_status != SMBD_CONNECTED) {
+-		log_rdma_mr(ERR, "info->transport_status=%x\n",
+-			info->transport_status);
++	if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
++		log_rdma_mr(ERR, "sc->status=%x\n", sc->status);
+ 		return NULL;
+ 	}
+ 
+@@ -2247,6 +2212,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+ 				 struct iov_iter *iter,
+ 				 bool writing, bool need_invalidate)
+ {
++	struct smbdirect_socket *sc = &info->socket;
+ 	struct smbd_mr *smbdirect_mr;
+ 	int rc, num_pages;
+ 	enum dma_data_direction dir;
+@@ -2276,7 +2242,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+ 		    num_pages, iov_iter_count(iter), info->max_frmr_depth);
+ 	smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth);
+ 
+-	rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl,
++	rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
+ 			   smbdirect_mr->sgt.nents, dir);
+ 	if (!rc) {
+ 		log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
+@@ -2312,7 +2278,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+ 	 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
+ 	 * on the next ib_post_send when we actually send I/O to remote peer
+ 	 */
+-	rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
++	rc = ib_post_send(sc->ib.qp, &reg_wr->wr, NULL);
+ 	if (!rc)
+ 		return smbdirect_mr;
+ 
+@@ -2321,7 +2287,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+ 
+ 	/* If all failed, attempt to recover this MR by setting it MR_ERROR*/
+ map_mr_error:
+-	ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl,
++	ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
+ 			smbdirect_mr->sgt.nents, smbdirect_mr->dir);
+ 
+ dma_map_error:
+@@ -2359,6 +2325,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+ {
+ 	struct ib_send_wr *wr;
+ 	struct smbd_connection *info = smbdirect_mr->conn;
++	struct smbdirect_socket *sc = &info->socket;
+ 	int rc = 0;
+ 
+ 	if (smbdirect_mr->need_invalidate) {
+@@ -2372,7 +2339,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+ 		wr->send_flags = IB_SEND_SIGNALED;
+ 
+ 		init_completion(&smbdirect_mr->invalidate_done);
+-		rc = ib_post_send(info->id->qp, wr, NULL);
++		rc = ib_post_send(sc->ib.qp, wr, NULL);
+ 		if (rc) {
+ 			log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
+ 			smbd_disconnect_rdma_connection(info);
+@@ -2389,7 +2356,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+ 
+ 	if (smbdirect_mr->state == MR_INVALIDATED) {
+ 		ib_dma_unmap_sg(
+-			info->id->device, smbdirect_mr->sgt.sgl,
++			sc->ib.dev, smbdirect_mr->sgt.sgl,
+ 			smbdirect_mr->sgt.nents,
+ 			smbdirect_mr->dir);
+ 		smbdirect_mr->state = MR_READY;
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index c08e3665150d74..3d552ab27e0f3d 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -15,6 +15,9 @@
+ #include <rdma/rdma_cm.h>
+ #include <linux/mempool.h>
+ 
++#include "../common/smbdirect/smbdirect.h"
++#include "../common/smbdirect/smbdirect_socket.h"
++
+ extern int rdma_readwrite_threshold;
+ extern int smbd_max_frmr_depth;
+ extern int smbd_keep_alive_interval;
+@@ -50,14 +53,8 @@ enum smbd_connection_status {
+  * 5. mempools for allocating packets
+  */
+ struct smbd_connection {
+-	enum smbd_connection_status transport_status;
+-
+-	/* RDMA related */
+-	struct rdma_cm_id *id;
+-	struct ib_qp_init_attr qp_attr;
+-	struct ib_pd *pd;
+-	struct ib_cq *send_cq, *recv_cq;
+-	struct ib_device_attr dev_attr;
++	struct smbdirect_socket socket;
++
+ 	int ri_rc;
+ 	struct completion ri_done;
+ 	wait_queue_head_t conn_wait;
+@@ -72,15 +69,7 @@ struct smbd_connection {
+ 	spinlock_t lock_new_credits_offered;
+ 	int new_credits_offered;
+ 
+-	/* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
+-	int receive_credit_max;
+-	int send_credit_target;
+-	int max_send_size;
+-	int max_fragmented_recv_size;
+-	int max_fragmented_send_size;
+-	int max_receive_size;
+-	int keep_alive_interval;
+-	int max_readwrite_size;
++	/* dynamic connection parameters defined in [MS-SMBD] 3.1.1.1 */
+ 	enum keep_alive_status keep_alive_requested;
+ 	int protocol;
+ 	atomic_t send_credits;
+@@ -177,47 +166,6 @@ enum smbd_message_type {
+ 	SMBD_TRANSFER_DATA,
+ };
+ 
+-#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
+-
+-/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
+-struct smbd_negotiate_req {
+-	__le16 min_version;
+-	__le16 max_version;
+-	__le16 reserved;
+-	__le16 credits_requested;
+-	__le32 preferred_send_size;
+-	__le32 max_receive_size;
+-	__le32 max_fragmented_size;
+-} __packed;
+-
+-/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
+-struct smbd_negotiate_resp {
+-	__le16 min_version;
+-	__le16 max_version;
+-	__le16 negotiated_version;
+-	__le16 reserved;
+-	__le16 credits_requested;
+-	__le16 credits_granted;
+-	__le32 status;
+-	__le32 max_readwrite_size;
+-	__le32 preferred_send_size;
+-	__le32 max_receive_size;
+-	__le32 max_fragmented_size;
+-} __packed;
+-
+-/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
+-struct smbd_data_transfer {
+-	__le16 credits_requested;
+-	__le16 credits_granted;
+-	__le16 flags;
+-	__le16 reserved;
+-	__le32 remaining_data_length;
+-	__le32 data_offset;
+-	__le32 data_length;
+-	__le32 padding;
+-	__u8 buffer[];
+-} __packed;
+-
+ /* The packet fields for a registered RDMA buffer */
+ struct smbd_buffer_descriptor_v1 {
+ 	__le64 offset;
+diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
+index 12cbd3428a6da5..9c3cc7c3300c2f 100644
+--- a/fs/smb/client/trace.h
++++ b/fs/smb/client/trace.h
+@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(smb3_rw_err_class,
+ 		__entry->len = len;
+ 		__entry->rc = rc;
+ 	),
+-	TP_printk("\tR=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
++	TP_printk("R=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
+ 		  __entry->rreq_debug_id, __entry->rreq_debug_index,
+ 		  __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+ 		  __entry->offset, __entry->len, __entry->rc)
+@@ -190,7 +190,7 @@ DECLARE_EVENT_CLASS(smb3_other_err_class,
+ 		__entry->len = len;
+ 		__entry->rc = rc;
+ 	),
+-	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
++	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
+ 		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+ 		__entry->offset, __entry->len, __entry->rc)
+ )
+@@ -247,7 +247,7 @@ DECLARE_EVENT_CLASS(smb3_copy_range_err_class,
+ 		__entry->len = len;
+ 		__entry->rc = rc;
+ 	),
+-	TP_printk("\txid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x rc=%d",
++	TP_printk("xid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x rc=%d",
+ 		__entry->xid, __entry->sesid, __entry->tid, __entry->target_fid,
+ 		__entry->src_offset, __entry->target_fid, __entry->target_offset, __entry->len, __entry->rc)
+ )
+@@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(smb3_copy_range_done_class,
+ 		__entry->target_offset = target_offset;
+ 		__entry->len = len;
+ 	),
+-	TP_printk("\txid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x",
++	TP_printk("xid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x",
+ 		__entry->xid, __entry->sesid, __entry->tid, __entry->target_fid,
+ 		__entry->src_offset, __entry->target_fid, __entry->target_offset, __entry->len)
+ )
+@@ -482,7 +482,7 @@ DECLARE_EVENT_CLASS(smb3_fd_class,
+ 		__entry->tid = tid;
+ 		__entry->sesid = sesid;
+ 	),
+-	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx",
++	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx",
+ 		__entry->xid, __entry->sesid, __entry->tid, __entry->fid)
+ )
+ 
+@@ -521,7 +521,7 @@ DECLARE_EVENT_CLASS(smb3_fd_err_class,
+ 		__entry->sesid = sesid;
+ 		__entry->rc = rc;
+ 	),
+-	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
++	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
+ 		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+ 		__entry->rc)
+ )
+@@ -793,7 +793,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_err_class,
+ 		__entry->status = status;
+ 		__entry->rc = rc;
+ 	),
+-	TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
++	TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
+ 		__entry->sesid, __entry->tid, __entry->cmd, __entry->mid,
+ 		__entry->status, __entry->rc)
+ )
+@@ -828,7 +828,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_done_class,
+ 		__entry->cmd = cmd;
+ 		__entry->mid = mid;
+ 	),
+-	TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu",
++	TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu",
+ 		__entry->sesid, __entry->tid,
+ 		__entry->cmd, __entry->mid)
+ )
+@@ -866,7 +866,7 @@ DECLARE_EVENT_CLASS(smb3_mid_class,
+ 		__entry->when_sent = when_sent;
+ 		__entry->when_received = when_received;
+ 	),
+-	TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
++	TP_printk("cmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
+ 		__entry->cmd, __entry->mid, __entry->pid, __entry->when_sent,
+ 		__entry->when_received)
+ )
+@@ -897,7 +897,7 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class,
+ 		__assign_str(func_name);
+ 		__entry->rc = rc;
+ 	),
+-	TP_printk("\t%s: xid=%u rc=%d",
++	TP_printk("%s: xid=%u rc=%d",
+ 		__get_str(func_name), __entry->xid, __entry->rc)
+ )
+ 
+@@ -923,7 +923,7 @@ DECLARE_EVENT_CLASS(smb3_sync_err_class,
+ 		__entry->ino = ino;
+ 		__entry->rc = rc;
+ 	),
+-	TP_printk("\tino=%lu rc=%d",
++	TP_printk("ino=%lu rc=%d",
+ 		__entry->ino, __entry->rc)
+ )
+ 
+@@ -949,7 +949,7 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class,
+ 		__entry->xid = xid;
+ 		__assign_str(func_name);
+ 	),
+-	TP_printk("\t%s: xid=%u",
++	TP_printk("%s: xid=%u",
+ 		__get_str(func_name), __entry->xid)
+ )
+ 
+diff --git a/fs/smb/common/smbdirect/smbdirect.h b/fs/smb/common/smbdirect/smbdirect.h
+new file mode 100644
+index 00000000000000..b9a385344ff31c
+--- /dev/null
++++ b/fs/smb/common/smbdirect/smbdirect.h
+@@ -0,0 +1,37 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2017, Microsoft Corporation.
++ *   Copyright (C) 2018, LG Electronics.
++ */
++
++#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
++#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
++
++/* SMB-DIRECT buffer descriptor V1 structure [MS-SMBD] 2.2.3.1 */
++struct smbdirect_buffer_descriptor_v1 {
++	__le64 offset;
++	__le32 token;
++	__le32 length;
++} __packed;
++
++/*
++ * Connection parameters mostly from [MS-SMBD] 3.1.1.1
++ *
++ * These are setup and negotiated at the beginning of a
++ * connection and remain constant unless explicitly changed.
++ *
++ * Some values are important for the upper layer.
++ */
++struct smbdirect_socket_parameters {
++	__u16 recv_credit_max;
++	__u16 send_credit_target;
++	__u32 max_send_size;
++	__u32 max_fragmented_send_size;
++	__u32 max_recv_size;
++	__u32 max_fragmented_recv_size;
++	__u32 max_read_write_size;
++	__u32 keepalive_interval_msec;
++	__u32 keepalive_timeout_msec;
++} __packed;
++
++#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ */
+diff --git a/fs/smb/common/smbdirect/smbdirect_pdu.h b/fs/smb/common/smbdirect/smbdirect_pdu.h
+new file mode 100644
+index 00000000000000..ae9fdb05ce2314
+--- /dev/null
++++ b/fs/smb/common/smbdirect/smbdirect_pdu.h
+@@ -0,0 +1,55 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (c) 2017 Stefan Metzmacher
++ */
++
++#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
++#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
++
++#define SMBDIRECT_V1 0x0100
++
++/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
++struct smbdirect_negotiate_req {
++	__le16 min_version;
++	__le16 max_version;
++	__le16 reserved;
++	__le16 credits_requested;
++	__le32 preferred_send_size;
++	__le32 max_receive_size;
++	__le32 max_fragmented_size;
++} __packed;
++
++/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
++struct smbdirect_negotiate_resp {
++	__le16 min_version;
++	__le16 max_version;
++	__le16 negotiated_version;
++	__le16 reserved;
++	__le16 credits_requested;
++	__le16 credits_granted;
++	__le32 status;
++	__le32 max_readwrite_size;
++	__le32 preferred_send_size;
++	__le32 max_receive_size;
++	__le32 max_fragmented_size;
++} __packed;
++
++#define SMBDIRECT_DATA_MIN_HDR_SIZE 0x14
++#define SMBDIRECT_DATA_OFFSET       0x18
++
++#define SMBDIRECT_FLAG_RESPONSE_REQUESTED 0x0001
++
++/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
++struct smbdirect_data_transfer {
++	__le16 credits_requested;
++	__le16 credits_granted;
++	__le16 flags;
++	__le16 reserved;
++	__le32 remaining_data_length;
++	__le32 data_offset;
++	__le32 data_length;
++	__le32 padding;
++	__u8 buffer[];
++} __packed;
++
++#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__ */
+diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
+new file mode 100644
+index 00000000000000..e5b15cc44a7ba5
+--- /dev/null
++++ b/fs/smb/common/smbdirect/smbdirect_socket.h
+@@ -0,0 +1,43 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (c) 2025 Stefan Metzmacher
++ */
++
++#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
++#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
++
++enum smbdirect_socket_status {
++	SMBDIRECT_SOCKET_CREATED,
++	SMBDIRECT_SOCKET_CONNECTING,
++	SMBDIRECT_SOCKET_CONNECTED,
++	SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
++	SMBDIRECT_SOCKET_DISCONNECTING,
++	SMBDIRECT_SOCKET_DISCONNECTED,
++	SMBDIRECT_SOCKET_DESTROYED
++};
++
++struct smbdirect_socket {
++	enum smbdirect_socket_status status;
++
++	/* RDMA related */
++	struct {
++		struct rdma_cm_id *cm_id;
++	} rdma;
++
++	/* IB verbs related */
++	struct {
++		struct ib_pd *pd;
++		struct ib_cq *send_cq;
++		struct ib_cq *recv_cq;
++
++		/*
++		 * shortcuts for rdma.cm_id->{qp,device};
++		 */
++		struct ib_qp *qp;
++		struct ib_device *dev;
++	} ib;
++
++	struct smbdirect_socket_parameters parameters;
++};
++
++#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 572102098c1080..dd3e0e3f7bf046 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -108,6 +108,7 @@ struct ksmbd_conn {
+ 	__le16				signing_algorithm;
+ 	bool				binding;
+ 	atomic_t			refcnt;
++	bool				is_aapl;
+ };
+ 
+ struct ksmbd_conn_ops {
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 6537ffd2b9651e..5d2324c09a0704 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2871,7 +2871,7 @@ int smb2_open(struct ksmbd_work *work)
+ 	int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
+ 	int rc = 0;
+ 	int contxt_cnt = 0, query_disk_id = 0;
+-	int maximal_access_ctxt = 0, posix_ctxt = 0;
++	bool maximal_access_ctxt = false, posix_ctxt = false;
+ 	int s_type = 0;
+ 	int next_off = 0;
+ 	char *name = NULL;
+@@ -2898,6 +2898,27 @@ int smb2_open(struct ksmbd_work *work)
+ 		return create_smb2_pipe(work);
+ 	}
+ 
++	if (req->CreateContextsOffset && tcon->posix_extensions) {
++		context = smb2_find_context_vals(req, SMB2_CREATE_TAG_POSIX, 16);
++		if (IS_ERR(context)) {
++			rc = PTR_ERR(context);
++			goto err_out2;
++		} else if (context) {
++			struct create_posix *posix = (struct create_posix *)context;
++
++			if (le16_to_cpu(context->DataOffset) +
++				le32_to_cpu(context->DataLength) <
++			    sizeof(struct create_posix) - 4) {
++				rc = -EINVAL;
++				goto err_out2;
++			}
++			ksmbd_debug(SMB, "get posix context\n");
++
++			posix_mode = le32_to_cpu(posix->Mode);
++			posix_ctxt = true;
++		}
++	}
++
+ 	if (req->NameLength) {
+ 		name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
+ 				     le16_to_cpu(req->NameLength),
+@@ -2920,9 +2941,11 @@ int smb2_open(struct ksmbd_work *work)
+ 				goto err_out2;
+ 		}
+ 
+-		rc = ksmbd_validate_filename(name);
+-		if (rc < 0)
+-			goto err_out2;
++		if (posix_ctxt == false) {
++			rc = ksmbd_validate_filename(name);
++			if (rc < 0)
++				goto err_out2;
++		}
+ 
+ 		if (ksmbd_share_veto_filename(share, name)) {
+ 			rc = -ENOENT;
+@@ -3080,28 +3103,6 @@ int smb2_open(struct ksmbd_work *work)
+ 			rc = -EBADF;
+ 			goto err_out2;
+ 		}
+-
+-		if (tcon->posix_extensions) {
+-			context = smb2_find_context_vals(req,
+-							 SMB2_CREATE_TAG_POSIX, 16);
+-			if (IS_ERR(context)) {
+-				rc = PTR_ERR(context);
+-				goto err_out2;
+-			} else if (context) {
+-				struct create_posix *posix =
+-					(struct create_posix *)context;
+-				if (le16_to_cpu(context->DataOffset) +
+-				    le32_to_cpu(context->DataLength) <
+-				    sizeof(struct create_posix) - 4) {
+-					rc = -EINVAL;
+-					goto err_out2;
+-				}
+-				ksmbd_debug(SMB, "get posix context\n");
+-
+-				posix_mode = le32_to_cpu(posix->Mode);
+-				posix_ctxt = 1;
+-			}
+-		}
+ 	}
+ 
+ 	if (ksmbd_override_fsids(work)) {
+@@ -3534,6 +3535,15 @@ int smb2_open(struct ksmbd_work *work)
+ 			ksmbd_debug(SMB, "get query on disk id context\n");
+ 			query_disk_id = 1;
+ 		}
++
++		if (conn->is_aapl == false) {
++			context = smb2_find_context_vals(req, SMB2_CREATE_AAPL, 4);
++			if (IS_ERR(context)) {
++				rc = PTR_ERR(context);
++				goto err_out1;
++			} else if (context)
++				conn->is_aapl = true;
++		}
+ 	}
+ 
+ 	rc = ksmbd_vfs_getattr(&path, &stat);
+@@ -3973,7 +3983,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+ 		if (dinfo->EaSize)
+ 			dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
+ 		dinfo->Reserved = 0;
+-		dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
++		if (conn->is_aapl)
++			dinfo->UniqueId = 0;
++		else
++			dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+ 		if (d_info->hide_dot_file && d_info->name[0] == '.')
+ 			dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
+ 		memcpy(dinfo->FileName, conv_name, conv_len);
+@@ -3990,7 +4003,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+ 			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+ 		if (fibdinfo->EaSize)
+ 			fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
+-		fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
++		if (conn->is_aapl)
++			fibdinfo->UniqueId = 0;
++		else
++			fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+ 		fibdinfo->ShortNameLength = 0;
+ 		fibdinfo->Reserved = 0;
+ 		fibdinfo->Reserved2 = cpu_to_le16(0);
+diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
+index 17a0b18a8406b3..16ae8a10490beb 100644
+--- a/fs/smb/server/smb2pdu.h
++++ b/fs/smb/server/smb2pdu.h
+@@ -63,6 +63,9 @@ struct preauth_integrity_info {
+ 
+ #define SMB2_SESSION_TIMEOUT		(10 * HZ)
+ 
++/* Apple Defined Contexts */
++#define SMB2_CREATE_AAPL		"AAPL"
++
+ struct create_durable_req_v2 {
+ 	struct create_context_hdr ccontext;
+ 	__u8   Name[8];
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index e9e3366d059ef1..730aa0245aef93 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -29,6 +29,7 @@
+ #include <linux/idr.h>
+ #include <linux/leds.h>
+ #include <linux/rculist.h>
++#include <linux/srcu.h>
+ 
+ #include <net/bluetooth/hci.h>
+ #include <net/bluetooth/hci_sync.h>
+@@ -338,6 +339,7 @@ struct adv_monitor {
+ 
+ struct hci_dev {
+ 	struct list_head list;
++	struct srcu_struct srcu;
+ 	struct mutex	lock;
+ 
+ 	struct ida	unset_handle_ida;
+diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
+index 13001da141c336..4b261eb705bc03 100644
+--- a/include/uapi/drm/ivpu_accel.h
++++ b/include/uapi/drm/ivpu_accel.h
+@@ -261,7 +261,7 @@ struct drm_ivpu_bo_info {
+ 
+ /* drm_ivpu_submit engines */
+ #define DRM_IVPU_ENGINE_COMPUTE 0
+-#define DRM_IVPU_ENGINE_COPY    1
++#define DRM_IVPU_ENGINE_COPY    1 /* Deprecated */
+ 
+ /**
+  * struct drm_ivpu_submit - Submit commands to the VPU
+@@ -292,10 +292,6 @@ struct drm_ivpu_submit {
+ 	 * %DRM_IVPU_ENGINE_COMPUTE:
+ 	 *
+ 	 * Performs Deep Learning Neural Compute Inference Operations
+-	 *
+-	 * %DRM_IVPU_ENGINE_COPY:
+-	 *
+-	 * Performs memory copy operations to/from system memory allocated for VPU
+ 	 */
+ 	__u32 engine;
+ 
+diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
+index ed07181d4eff91..e05280e4152286 100644
+--- a/include/uapi/linux/vm_sockets.h
++++ b/include/uapi/linux/vm_sockets.h
+@@ -17,6 +17,10 @@
+ #ifndef _UAPI_VM_SOCKETS_H
+ #define _UAPI_VM_SOCKETS_H
+ 
++#ifndef __KERNEL__
++#include <sys/socket.h>        /* for struct sockaddr and sa_family_t */
++#endif
++
+ #include <linux/socket.h>
+ #include <linux/types.h>
+ 
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index c9289597522f6e..9bd27deeee6fad 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -263,6 +263,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
+ 		if (len > arg->max_len) {
+ 			len = arg->max_len;
+ 			if (!(bl->flags & IOBL_INC)) {
++				arg->partial_map = 1;
+ 				if (iov != arg->iovs)
+ 					break;
+ 				buf->len = len;
+diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
+index 36aadfe5ac0027..2586a292dfb914 100644
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -61,6 +61,7 @@ struct buf_sel_arg {
+ 	size_t max_len;
+ 	unsigned short nr_iovs;
+ 	unsigned short mode;
++	unsigned short partial_map;
+ };
+ 
+ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 384915d931b72c..0116cfaec84881 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -76,12 +76,18 @@ struct io_sr_msg {
+ 	/* initialised and used only by !msg send variants */
+ 	u16				addr_len;
+ 	u16				buf_group;
++	unsigned short			retry_flags;
+ 	void __user			*addr;
+ 	void __user			*msg_control;
+ 	/* used only for send zerocopy */
+ 	struct io_kiocb 		*notif;
+ };
+ 
++enum sr_retry_flags {
++	IO_SR_MSG_RETRY		= 1,
++	IO_SR_MSG_PARTIAL_MAP	= 2,
++};
++
+ /*
+  * Number of times we'll try and do receives if there's more data. If we
+  * exceed this limit, then add us to the back of the queue and retry from
+@@ -203,6 +209,7 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
+ 
+ 	req->flags &= ~REQ_F_BL_EMPTY;
+ 	sr->done_io = 0;
++	sr->retry_flags = 0;
+ 	sr->len = 0; /* get from the provided buffer */
+ 	req->buf_index = sr->buf_group;
+ }
+@@ -409,6 +416,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ 
+ 	sr->done_io = 0;
++	sr->retry_flags = 0;
+ 
+ 	if (req->opcode == IORING_OP_SEND) {
+ 		if (READ_ONCE(sqe->__pad3[0]))
+@@ -780,6 +788,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ 
+ 	sr->done_io = 0;
++	sr->retry_flags = 0;
+ 
+ 	if (unlikely(sqe->file_index || sqe->addr2))
+ 		return -EINVAL;
+@@ -828,6 +837,9 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	return io_recvmsg_prep_setup(req);
+ }
+ 
++/* bits to clear in old and inherit in new cflags on bundle retry */
++#define CQE_F_MASK	(IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE)
++
+ /*
+  * Finishes io_recv and io_recvmsg.
+  *
+@@ -845,11 +857,27 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
+ 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
+ 
+ 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
+-		cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
++		size_t this_ret = *ret - sr->done_io;
++
++		cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret),
+ 				      issue_flags);
++		if (sr->retry_flags & IO_SR_MSG_RETRY)
++			cflags = req->cqe.flags | (cflags & CQE_F_MASK);
+ 		/* bundle with no more immediate buffers, we're done */
+ 		if (req->flags & REQ_F_BL_EMPTY)
+ 			goto finish;
++		/*
++		 * If more is available AND it was a full transfer, retry and
++		 * append to this one
++		 */
++		if (!sr->retry_flags && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
++		    !iov_iter_count(&kmsg->msg.msg_iter)) {
++			req->cqe.flags = cflags & ~CQE_F_MASK;
++			sr->len = kmsg->msg.msg_inq;
++			sr->done_io += this_ret;
++			sr->retry_flags |= IO_SR_MSG_RETRY;
++			return false;
++		}
+ 	} else {
+ 		cflags |= io_put_kbuf(req, *ret, issue_flags);
+ 	}
+@@ -1088,13 +1116,21 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
+ 			arg.mode |= KBUF_MODE_FREE;
+ 		}
+ 
+-		if (kmsg->msg.msg_inq > 0)
++		if (kmsg->msg.msg_inq > 1)
+ 			arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
+ 
+ 		ret = io_buffers_peek(req, &arg);
+ 		if (unlikely(ret < 0))
+ 			return ret;
+ 
++		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
++			kmsg->free_iov_nr = ret;
++			kmsg->free_iov = arg.iovs;
++			req->flags |= REQ_F_NEED_CLEANUP;
++		}
++		if (arg.partial_map)
++			sr->retry_flags |= IO_SR_MSG_PARTIAL_MAP;
++
+ 		/* special case 1 vec, can be a fast path */
+ 		if (ret == 1) {
+ 			sr->buf = arg.iovs[0].iov_base;
+@@ -1103,11 +1139,6 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
+ 		}
+ 		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
+ 				arg.out_len);
+-		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
+-			kmsg->free_iov_nr = ret;
+-			kmsg->free_iov = arg.iovs;
+-			req->flags |= REQ_F_NEED_CLEANUP;
+-		}
+ 	} else {
+ 		void __user *buf;
+ 
+@@ -1228,6 +1259,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	struct io_kiocb *notif;
+ 
+ 	zc->done_io = 0;
++	zc->retry_flags = 0;
+ 	req->flags |= REQ_F_POLL_NO_LAZY;
+ 
+ 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index a67bae350416b3..1687e35e21c93d 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -119,8 +119,11 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
+ 	if (imu != &dummy_ubuf) {
+ 		if (!refcount_dec_and_test(&imu->refs))
+ 			return;
+-		for (i = 0; i < imu->nr_bvecs; i++)
+-			unpin_user_page(imu->bvec[i].bv_page);
++		for (i = 0; i < imu->nr_bvecs; i++) {
++			struct folio *folio = page_folio(imu->bvec[i].bv_page);
++
++			unpin_user_folio(folio, 1);
++		}
+ 		if (imu->acct_pages)
+ 			io_unaccount_mem(ctx, imu->acct_pages);
+ 		kvfree(imu);
+@@ -915,6 +918,7 @@ static bool io_try_coalesce_buffer(struct page ***pages, int *nr_pages,
+ 		return false;
+ 
+ 	data->folio_shift = folio_shift(folio);
++	data->first_folio_page_idx = folio_page_idx(folio, page_array[0]);
+ 	/*
+ 	 * Check if pages are contiguous inside a folio, and all folios have
+ 	 * the same page count except for the head and tail.
+@@ -983,10 +987,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
+ 		goto done;
+ 
+ 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
+-	if (ret) {
+-		unpin_user_pages(pages, nr_pages);
++	if (ret)
+ 		goto done;
+-	}
+ 
+ 	size = iov->iov_len;
+ 	/* store original address for later verification */
+@@ -997,7 +999,9 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
+ 	if (coalesced)
+ 		imu->folio_shift = data.folio_shift;
+ 	refcount_set(&imu->refs, 1);
+-	off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1);
++	off = (unsigned long)iov->iov_base & ~PAGE_MASK;
++	if (coalesced)
++		off += data.first_folio_page_idx << PAGE_SHIFT;
+ 	*pimu = imu;
+ 	ret = 0;
+ 
+@@ -1010,8 +1014,13 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
+ 		size -= vec_len;
+ 	}
+ done:
+-	if (ret)
++	if (ret) {
+ 		kvfree(imu);
++		if (pages) {
++			for (i = 0; i < nr_pages; i++)
++				unpin_user_folio(page_folio(pages[i]), 1);
++		}
++	}
+ 	kvfree(pages);
+ 	return ret;
+ }
+diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
+index 8ed58803621027..459cf4c6e85640 100644
+--- a/io_uring/rsrc.h
++++ b/io_uring/rsrc.h
+@@ -56,6 +56,7 @@ struct io_imu_folio_data {
+ 	/* For non-head/tail folios, has to be fully included */
+ 	unsigned int	nr_pages_mid;
+ 	unsigned int	folio_shift;
++	unsigned long	first_folio_page_idx;
+ };
+ 
+ void io_rsrc_node_ref_zero(struct io_rsrc_node *node);
+diff --git a/lib/group_cpus.c b/lib/group_cpus.c
+index ee272c4cefcc13..18d43a406114b9 100644
+--- a/lib/group_cpus.c
++++ b/lib/group_cpus.c
+@@ -352,6 +352,9 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
+ 	int ret = -ENOMEM;
+ 	struct cpumask *masks = NULL;
+ 
++	if (numgrps == 0)
++		return NULL;
++
+ 	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ 		return NULL;
+ 
+@@ -426,8 +429,12 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
+ #else /* CONFIG_SMP */
+ struct cpumask *group_cpus_evenly(unsigned int numgrps)
+ {
+-	struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
++	struct cpumask *masks;
+ 
++	if (numgrps == 0)
++		return NULL;
++
++	masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ 	if (!masks)
+ 		return NULL;
+ 
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 8d73ccf66f3aa0..44441ec5b0affc 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5542,8 +5542,9 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
+ 	mas_wr_store_type(&wr_mas);
+ 	request = mas_prealloc_calc(mas, entry);
+ 	if (!request)
+-		return ret;
++		goto set_flag;
+ 
++	mas->mas_flags &= ~MA_STATE_PREALLOC;
+ 	mas_node_count_gfp(mas, request, gfp);
+ 	if (mas_is_err(mas)) {
+ 		mas_set_alloc_req(mas, 0);
+@@ -5553,6 +5554,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
+ 		return ret;
+ 	}
+ 
++set_flag:
+ 	mas->mas_flags |= MA_STATE_PREALLOC;
+ 	return ret;
+ }
+diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
+index b095457380b560..d9e01648db70ec 100644
+--- a/mm/damon/sysfs-schemes.c
++++ b/mm/damon/sysfs-schemes.c
+@@ -423,6 +423,7 @@ static ssize_t memcg_path_store(struct kobject *kobj,
+ 		return -ENOMEM;
+ 
+ 	strscpy(path, buf, count + 1);
++	kfree(filter->memcg_path);
+ 	filter->memcg_path = path;
+ 	return count;
+ }
+diff --git a/mm/gup.c b/mm/gup.c
+index 90866b827b60f4..e323843cc5dd80 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2326,13 +2326,13 @@ static void pofs_unpin(struct pages_or_folios *pofs)
+ /*
+  * Returns the number of collected folios. Return value is always >= 0.
+  */
+-static void collect_longterm_unpinnable_folios(
++static unsigned long collect_longterm_unpinnable_folios(
+ 		struct list_head *movable_folio_list,
+ 		struct pages_or_folios *pofs)
+ {
++	unsigned long i, collected = 0;
+ 	struct folio *prev_folio = NULL;
+ 	bool drain_allow = true;
+-	unsigned long i;
+ 
+ 	for (i = 0; i < pofs->nr_entries; i++) {
+ 		struct folio *folio = pofs_get_folio(pofs, i);
+@@ -2344,6 +2344,8 @@ static void collect_longterm_unpinnable_folios(
+ 		if (folio_is_longterm_pinnable(folio))
+ 			continue;
+ 
++		collected++;
++
+ 		if (folio_is_device_coherent(folio))
+ 			continue;
+ 
+@@ -2365,6 +2367,8 @@ static void collect_longterm_unpinnable_folios(
+ 				    NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ 				    folio_nr_pages(folio));
+ 	}
++
++	return collected;
+ }
+ 
+ /*
+@@ -2441,9 +2445,11 @@ static long
+ check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
+ {
+ 	LIST_HEAD(movable_folio_list);
++	unsigned long collected;
+ 
+-	collect_longterm_unpinnable_folios(&movable_folio_list, pofs);
+-	if (list_empty(&movable_folio_list))
++	collected = collect_longterm_unpinnable_folios(&movable_folio_list,
++						       pofs);
++	if (!collected)
+ 		return 0;
+ 
+ 	return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
+diff --git a/mm/vma.c b/mm/vma.c
+index 1d82ec4ee7bb52..140f7017bb6343 100644
+--- a/mm/vma.c
++++ b/mm/vma.c
+@@ -836,9 +836,6 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *
+ 		err = dup_anon_vma(next, vma, &anon_dup);
+ 	}
+ 
+-	if (err)
+-		goto abort;
+-
+ 	/*
+ 	 * In nearly all cases, we expand vmg->vma. There is one exception -
+ 	 * merge_right where we partially span the VMA. In this case we shrink
+@@ -846,22 +843,11 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *
+ 	 */
+ 	expanded = !merge_right || merge_will_delete_vma;
+ 
+-	if (commit_merge(vmg, adjust,
+-			 merge_will_delete_vma ? vma : NULL,
+-			 merge_will_delete_next ? next : NULL,
+-			 adj_start, expanded)) {
+-		if (anon_dup)
+-			unlink_anon_vmas(anon_dup);
+-
+-		/*
+-		 * We've cleaned up any cloned anon_vma's, no VMAs have been
+-		 * modified, no harm no foul if the user requests that we not
+-		 * report this and just give up, leaving the VMAs unmerged.
+-		 */
+-		if (!vmg->give_up_on_oom)
+-			vmg->state = VMA_MERGE_ERROR_NOMEM;
+-		return NULL;
+-	}
++	if (err || commit_merge(vmg, adjust,
++			merge_will_delete_vma ? vma : NULL,
++			merge_will_delete_next ? next : NULL,
++			adj_start, expanded))
++		goto abort;
+ 
+ 	res = merge_left ? prev : next;
+ 	khugepaged_enter_vma(res, vmg->flags);
+@@ -873,6 +859,9 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *
+ 	vma_iter_set(vmg->vmi, start);
+ 	vma_iter_load(vmg->vmi);
+ 
++	if (anon_dup)
++		unlink_anon_vmas(anon_dup);
++
+ 	/*
+ 	 * This means we have failed to clone anon_vma's correctly, but no
+ 	 * actual changes to VMAs have occurred, so no harm no foul - if the
+diff --git a/net/atm/clip.c b/net/atm/clip.c
+index 42b910cb4e8ee7..0d7744442b25ac 100644
+--- a/net/atm/clip.c
++++ b/net/atm/clip.c
+@@ -193,12 +193,6 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
+ 
+ 	pr_debug("\n");
+ 
+-	if (!clip_devs) {
+-		atm_return(vcc, skb->truesize);
+-		kfree_skb(skb);
+-		return;
+-	}
+-
+ 	if (!skb) {
+ 		pr_debug("removing VCC %p\n", clip_vcc);
+ 		if (clip_vcc->entry)
+@@ -208,6 +202,11 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
+ 		return;
+ 	}
+ 	atm_return(vcc, skb->truesize);
++	if (!clip_devs) {
++		kfree_skb(skb);
++		return;
++	}
++
+ 	skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs;
+ 	/* clip_vcc->entry == NULL if we don't have an IP address yet */
+ 	if (!skb->dev) {
+diff --git a/net/atm/resources.c b/net/atm/resources.c
+index 995d29e7fb138c..b19d851e1f4439 100644
+--- a/net/atm/resources.c
++++ b/net/atm/resources.c
+@@ -146,11 +146,10 @@ void atm_dev_deregister(struct atm_dev *dev)
+ 	 */
+ 	mutex_lock(&atm_dev_mutex);
+ 	list_del(&dev->dev_list);
+-	mutex_unlock(&atm_dev_mutex);
+-
+ 	atm_dev_release_vccs(dev);
+ 	atm_unregister_sysfs(dev);
+ 	atm_proc_dev_deregister(dev);
++	mutex_unlock(&atm_dev_mutex);
+ 
+ 	atm_dev_put(dev);
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 0d3816c807588c..b74ada80923788 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -64,7 +64,7 @@ static DEFINE_IDA(hci_index_ida);
+ 
+ /* Get HCI device by index.
+  * Device is held on return. */
+-struct hci_dev *hci_dev_get(int index)
++static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
+ {
+ 	struct hci_dev *hdev = NULL, *d;
+ 
+@@ -77,6 +77,8 @@ struct hci_dev *hci_dev_get(int index)
+ 	list_for_each_entry(d, &hci_dev_list, list) {
+ 		if (d->id == index) {
+ 			hdev = hci_dev_hold(d);
++			if (srcu_index)
++				*srcu_index = srcu_read_lock(&d->srcu);
+ 			break;
+ 		}
+ 	}
+@@ -84,6 +86,22 @@ struct hci_dev *hci_dev_get(int index)
+ 	return hdev;
+ }
+ 
++struct hci_dev *hci_dev_get(int index)
++{
++	return __hci_dev_get(index, NULL);
++}
++
++static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
++{
++	return __hci_dev_get(index, srcu_index);
++}
++
++static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
++{
++	srcu_read_unlock(&hdev->srcu, srcu_index);
++	hci_dev_put(hdev);
++}
++
+ /* ---- Inquiry support ---- */
+ 
+ bool hci_discovery_active(struct hci_dev *hdev)
+@@ -568,9 +586,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
+ int hci_dev_reset(__u16 dev)
+ {
+ 	struct hci_dev *hdev;
+-	int err;
++	int err, srcu_index;
+ 
+-	hdev = hci_dev_get(dev);
++	hdev = hci_dev_get_srcu(dev, &srcu_index);
+ 	if (!hdev)
+ 		return -ENODEV;
+ 
+@@ -592,7 +610,7 @@ int hci_dev_reset(__u16 dev)
+ 	err = hci_dev_do_reset(hdev);
+ 
+ done:
+-	hci_dev_put(hdev);
++	hci_dev_put_srcu(hdev, srcu_index);
+ 	return err;
+ }
+ 
+@@ -2439,6 +2457,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
+ 	if (!hdev)
+ 		return NULL;
+ 
++	if (init_srcu_struct(&hdev->srcu)) {
++		kfree(hdev);
++		return NULL;
++	}
++
+ 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
+ 	hdev->esco_type = (ESCO_HV1);
+ 	hdev->link_mode = (HCI_LM_ACCEPT);
+@@ -2684,6 +2707,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ 	list_del(&hdev->list);
+ 	write_unlock(&hci_dev_list_lock);
+ 
++	synchronize_srcu(&hdev->srcu);
++	cleanup_srcu_struct(&hdev->srcu);
++
+ 	disable_work_sync(&hdev->rx_work);
+ 	disable_work_sync(&hdev->cmd_work);
+ 	disable_work_sync(&hdev->tx_work);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a40534bf9084d0..0628fedc0e29b6 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3380,7 +3380,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
+ 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ 	struct l2cap_conf_efs efs;
+ 	u8 remote_efs = 0;
+-	u16 mtu = L2CAP_DEFAULT_MTU;
++	u16 mtu = 0;
+ 	u16 result = L2CAP_CONF_SUCCESS;
+ 	u16 size;
+ 
+@@ -3485,6 +3485,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
+ 		/* Configure output options and let the other side know
+ 		 * which ones we don't like. */
+ 
++		/* If MTU is not provided in configure request, use the most recently
++		 * explicitly or implicitly accepted value for the other direction,
++		 * or the default value.
++		 */
++		if (mtu == 0)
++			mtu = chan->imtu ? chan->imtu : L2CAP_DEFAULT_MTU;
++
+ 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ 			result = L2CAP_CONF_UNACCEPT;
+ 		else {
+diff --git a/net/core/selftests.c b/net/core/selftests.c
+index 561653f9d71d44..ef27594d6a9968 100644
+--- a/net/core/selftests.c
++++ b/net/core/selftests.c
+@@ -160,8 +160,9 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev,
+ 	skb->csum = 0;
+ 	skb->ip_summed = CHECKSUM_PARTIAL;
+ 	if (attr->tcp) {
+-		thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr,
+-					    ihdr->daddr, 0);
++		int l4len = skb->len - skb_transport_offset(skb);
++
++		thdr->check = ~tcp_v4_check(l4len, ihdr->saddr, ihdr->daddr, 0);
+ 		skb->csum_start = skb_transport_header(skb) - skb->head;
+ 		skb->csum_offset = offsetof(struct tcphdr, check);
+ 	} else {
+diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
+index cca6d14084d214..282e8c13e2bfc5 100644
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -2097,6 +2097,9 @@ void ieee80211_link_release_channel(struct ieee80211_link_data *link)
+ {
+ 	struct ieee80211_sub_if_data *sdata = link->sdata;
+ 
++	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++		return;
++
+ 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+ 
+ 	if (rcu_access_pointer(link->conf->chanctx_conf))
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index bfe0514efca37f..2f017dbbcb975c 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1209,6 +1209,15 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
+ 	if ((_link = wiphy_dereference((local)->hw.wiphy,		\
+ 				       ___sdata->link[___link_id])))
+ 
++#define for_each_link_data(sdata, __link)					\
++	struct ieee80211_sub_if_data *__sdata = sdata;				\
++	for (int __link_id = 0;							\
++	     __link_id < ARRAY_SIZE((__sdata)->link); __link_id++)		\
++		if ((!(__sdata)->vif.valid_links ||				\
++		     (__sdata)->vif.valid_links & BIT(__link_id)) &&		\
++		    ((__link) = sdata_dereference((__sdata)->link[__link_id],	\
++						  (__sdata))))
++
+ static inline int
+ ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
+ 				struct cfg80211_rnr_elems *rnr_elems,
+@@ -2061,6 +2070,9 @@ static inline void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata
+ 	ieee80211_vif_set_links(sdata, 0, 0);
+ }
+ 
++void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata);
++void ieee80211_apvlan_link_clear(struct ieee80211_sub_if_data *sdata);
++
+ /* tx handling */
+ void ieee80211_clear_tx_pending(struct ieee80211_local *local);
+ void ieee80211_tx_pending(struct tasklet_struct *t);
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 7e1e561ef76c1c..209d6ffa8e4261 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -494,6 +494,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ 			break;
+ 		list_del_rcu(&sdata->u.mntr.list);
+ 		break;
++	case NL80211_IFTYPE_AP_VLAN:
++		ieee80211_apvlan_link_clear(sdata);
++		break;
+ 	default:
+ 		break;
+ 	}
+@@ -1268,6 +1271,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+ 		sdata->crypto_tx_tailroom_needed_cnt +=
+ 			master->crypto_tx_tailroom_needed_cnt;
+ 
++		ieee80211_apvlan_link_setup(sdata);
++
+ 		break;
+ 		}
+ 	case NL80211_IFTYPE_AP:
+@@ -1322,7 +1327,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+ 	case NL80211_IFTYPE_AP_VLAN:
+ 		/* no need to tell driver, but set carrier and chanctx */
+ 		if (sdata->bss->active) {
+-			ieee80211_link_vlan_copy_chanctx(&sdata->deflink);
++			struct ieee80211_link_data *link;
++
++			for_each_link_data(sdata, link) {
++				ieee80211_link_vlan_copy_chanctx(link);
++			}
++
+ 			netif_carrier_on(dev);
+ 			ieee80211_set_vif_encap_ops(sdata);
+ 		} else {
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index 46092fbcde90eb..9484449d6a3476 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -12,6 +12,71 @@
+ #include "key.h"
+ #include "debugfs_netdev.h"
+ 
++static void ieee80211_update_apvlan_links(struct ieee80211_sub_if_data *sdata)
++{
++	struct ieee80211_sub_if_data *vlan;
++	struct ieee80211_link_data *link;
++	u16 ap_bss_links = sdata->vif.valid_links;
++	u16 new_links, vlan_links;
++	unsigned long add;
++
++	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
++		int link_id;
++
++		if (!vlan)
++			continue;
++
++		/* No support for 4addr with MLO yet */
++		if (vlan->wdev.use_4addr)
++			return;
++
++		vlan_links = vlan->vif.valid_links;
++
++		new_links = ap_bss_links;
++
++		add = new_links & ~vlan_links;
++		if (!add)
++			continue;
++
++		ieee80211_vif_set_links(vlan, add, 0);
++
++		for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
++			link = sdata_dereference(vlan->link[link_id], vlan);
++			ieee80211_link_vlan_copy_chanctx(link);
++		}
++	}
++}
++
++void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata)
++{
++	struct ieee80211_sub_if_data *ap_bss = container_of(sdata->bss,
++					    struct ieee80211_sub_if_data, u.ap);
++	u16 new_links = ap_bss->vif.valid_links;
++	unsigned long add;
++	int link_id;
++
++	if (!ap_bss->vif.valid_links)
++		return;
++
++	add = new_links;
++	for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
++		sdata->wdev.valid_links |= BIT(link_id);
++		ether_addr_copy(sdata->wdev.links[link_id].addr,
++				ap_bss->wdev.links[link_id].addr);
++	}
++
++	ieee80211_vif_set_links(sdata, new_links, 0);
++}
++
++void ieee80211_apvlan_link_clear(struct ieee80211_sub_if_data *sdata)
++{
++	if (!sdata->wdev.valid_links)
++		return;
++
++	sdata->wdev.valid_links = 0;
++	ieee80211_vif_clear_links(sdata);
++}
++
+ void ieee80211_link_setup(struct ieee80211_link_data *link)
+ {
+ 	if (link->sdata->vif.type == NL80211_IFTYPE_STATION)
+@@ -28,8 +93,16 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+ 	if (link_id < 0)
+ 		link_id = 0;
+ 
+-	rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
+-	rcu_assign_pointer(sdata->link[link_id], link);
++	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
++		struct ieee80211_sub_if_data *ap_bss;
++		struct ieee80211_bss_conf *ap_bss_conf;
++
++		ap_bss = container_of(sdata->bss,
++				      struct ieee80211_sub_if_data, u.ap);
++		ap_bss_conf = sdata_dereference(ap_bss->vif.link_conf[link_id],
++						ap_bss);
++		memcpy(link_conf, ap_bss_conf, sizeof(*link_conf));
++	}
+ 
+ 	link->sdata = sdata;
+ 	link->link_id = link_id;
+@@ -51,6 +124,7 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+ 	if (!deflink) {
+ 		switch (sdata->vif.type) {
+ 		case NL80211_IFTYPE_AP:
++		case NL80211_IFTYPE_AP_VLAN:
+ 			ether_addr_copy(link_conf->addr,
+ 					sdata->wdev.links[link_id].addr);
+ 			link_conf->bssid = link_conf->addr;
+@@ -65,6 +139,9 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+ 
+ 		ieee80211_link_debugfs_add(link);
+ 	}
++
++	rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
++	rcu_assign_pointer(sdata->link[link_id], link);
+ }
+ 
+ void ieee80211_link_stop(struct ieee80211_link_data *link)
+@@ -174,6 +251,7 @@ static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata,
+ 
+ 	switch (sdata->vif.type) {
+ 	case NL80211_IFTYPE_AP:
++	case NL80211_IFTYPE_AP_VLAN:
+ 		/* in an AP all links are always active */
+ 		sdata->vif.active_links = valid_links;
+ 
+@@ -275,12 +353,16 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
+ 		ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links);
+ 
+ 		/* tell the driver */
+-		ret = drv_change_vif_links(sdata->local, sdata,
+-					   old_links & old_active,
+-					   new_links & sdata->vif.active_links,
+-					   old);
++		if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
++			ret = drv_change_vif_links(sdata->local, sdata,
++						   old_links & old_active,
++						   new_links & sdata->vif.active_links,
++						   old);
+ 		if (!new_links)
+ 			ieee80211_debugfs_recreate_netdev(sdata, false);
++
++		if (sdata->vif.type == NL80211_IFTYPE_AP)
++			ieee80211_update_apvlan_links(sdata);
+ 	}
+ 
+ 	if (ret) {
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index a98ae563613c04..77638e965726c5 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -3908,7 +3908,7 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
+ {
+ 	u64 tsf = drv_get_tsf(local, sdata);
+ 	u64 dtim_count = 0;
+-	u16 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
++	u32 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
+ 	u8 dtim_period = sdata->vif.bss_conf.dtim_period;
+ 	struct ps_data *ps;
+ 	u8 bcns_from_dtim;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 17a4de75bfaf6c..e492655cb2212e 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2749,8 +2749,13 @@ rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
+ 	case -EPROTONOSUPPORT:
+ 		goto out_err;
+ 	case -EACCES:
+-		/* Re-encode with a fresh cred */
+-		fallthrough;
++		/* possible RPCSEC_GSS out-of-sequence event (RFC2203),
++		 * reset recv state and keep waiting, don't retransmit
++		 */
++		task->tk_rqstp->rq_reply_bytes_recvd = 0;
++		task->tk_status = xprt_request_enqueue_receive(task);
++		task->tk_action = call_transmit_status;
++		return -EBADMSG;
+ 	default:
+ 		goto out_garbage;
+ 	}
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 6b176230044397..45f8e21829ecd0 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -666,6 +666,11 @@ static void unix_sock_destructor(struct sock *sk)
+ #endif
+ }
+ 
++static unsigned int unix_skb_len(const struct sk_buff *skb)
++{
++	return skb->len - UNIXCB(skb).consumed;
++}
++
+ static void unix_release_sock(struct sock *sk, int embrion)
+ {
+ 	struct unix_sock *u = unix_sk(sk);
+@@ -700,10 +705,16 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 
+ 	if (skpair != NULL) {
+ 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
++			struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
++
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++			if (skb && !unix_skb_len(skb))
++				skb = skb_peek_next(skb, &sk->sk_receive_queue);
++#endif
+ 			unix_state_lock(skpair);
+ 			/* No more writes */
+ 			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
+-			if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
++			if (skb || embrion)
+ 				WRITE_ONCE(skpair->sk_err, ECONNRESET);
+ 			unix_state_unlock(skpair);
+ 			skpair->sk_state_change(skpair);
+@@ -2594,11 +2605,6 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
+ 	return timeo;
+ }
+ 
+-static unsigned int unix_skb_len(const struct sk_buff *skb)
+-{
+-	return skb->len - UNIXCB(skb).consumed;
+-}
+-
+ struct unix_stream_read_state {
+ 	int (*recv_actor)(struct sk_buff *, int, int,
+ 			  struct unix_stream_read_state *);
+@@ -2613,11 +2619,11 @@ struct unix_stream_read_state {
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ {
++	struct sk_buff *oob_skb, *read_skb = NULL;
+ 	struct socket *sock = state->socket;
+ 	struct sock *sk = sock->sk;
+ 	struct unix_sock *u = unix_sk(sk);
+ 	int chunk = 1;
+-	struct sk_buff *oob_skb;
+ 
+ 	mutex_lock(&u->iolock);
+ 	unix_state_lock(sk);
+@@ -2632,9 +2638,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ 
+ 	oob_skb = u->oob_skb;
+ 
+-	if (!(state->flags & MSG_PEEK))
++	if (!(state->flags & MSG_PEEK)) {
+ 		WRITE_ONCE(u->oob_skb, NULL);
+ 
++		if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue &&
++		    !unix_skb_len(oob_skb->prev)) {
++			read_skb = oob_skb->prev;
++			__skb_unlink(read_skb, &sk->sk_receive_queue);
++		}
++	}
++
+ 	spin_unlock(&sk->sk_receive_queue.lock);
+ 	unix_state_unlock(sk);
+ 
+@@ -2645,6 +2658,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ 
+ 	mutex_unlock(&u->iolock);
+ 
++	consume_skb(read_skb);
++
+ 	if (chunk < 0)
+ 		return -EFAULT;
+ 
+diff --git a/rust/Makefile b/rust/Makefile
+index 93650b2ee7d575..b8b7f817c48e42 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -238,7 +238,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
+ 	-fzero-call-used-regs=% -fno-stack-clash-protection \
+ 	-fno-inline-functions-called-once -fsanitize=bounds-strict \
+ 	-fstrict-flex-arrays=% -fmin-function-alignment=% \
+-	-fzero-init-padding-bits=% \
++	-fzero-init-padding-bits=% -mno-fdpic \
+ 	--param=% --param asan-%
+ 
+ # Derived from `scripts/Makefile.clang`.
+diff --git a/rust/macros/module.rs b/rust/macros/module.rs
+index da2a18b276e0bf..a5ea5850e307a0 100644
+--- a/rust/macros/module.rs
++++ b/rust/macros/module.rs
+@@ -260,6 +260,7 @@ mod __module_init {{
+                     #[cfg(MODULE)]
+                     #[doc(hidden)]
+                     #[no_mangle]
++                    #[link_section = \".exit.text\"]
+                     pub extern \"C\" fn cleanup_module() {{
+                         // SAFETY:
+                         // - This function is inaccessible to the outside due to the double
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index 90633970b59f72..f8f1b1f6b1382c 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -44,7 +44,7 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
+ 	struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+ 
+ 	/* ignore unsol events during shutdown */
+-	if (codec->bus->shutdown)
++	if (codec->card->shutdown || codec->bus->shutdown)
+ 		return;
+ 
+ 	/* ignore unsol events during system suspend/resume */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 1872c8b7505373..d4e325b785332a 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2727,6 +2727,9 @@ static const struct pci_device_id azx_ids[] = {
+ 	{ PCI_VDEVICE(ATI, 0xab38),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ 	  AZX_DCAPS_PM_RUNTIME },
++	{ PCI_VDEVICE(ATI, 0xab40),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
++	  AZX_DCAPS_PM_RUNTIME },
+ 	/* GLENFLY */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_GLENFLY, PCI_ANY_ID),
+ 	  .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index cb41cd2ba0ef17..30e9e26c5b2a7d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10859,6 +10859,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1204, "ASUS Strix G615JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x1214, "ASUS Strix G615LH_LM_LP", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x1264, "ASUS UM5606KA", ALC294_FIXUP_BASS_SPEAKER_15),
+ 	SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1294, "ASUS B3405CVA", ALC245_FIXUP_CS35L41_SPI_2),
+@@ -10933,6 +10934,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1df3, "ASUS UM5606WA", ALC294_FIXUP_BASS_SPEAKER_15),
+ 	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x1e10, "ASUS VivoBook X507UAR", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 3d9da93d22ee84..b27966f82c8b65 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -353,6 +353,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "83J2"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83J3"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/rt1320-sdw.c b/sound/soc/codecs/rt1320-sdw.c
+index f2d194e76a947f..8755a63478d796 100644
+--- a/sound/soc/codecs/rt1320-sdw.c
++++ b/sound/soc/codecs/rt1320-sdw.c
+@@ -2085,7 +2085,7 @@ static const struct reg_sequence rt1320_vc_patch_code_write[] = {
+ 	{ 0x3fc2bfc0, 0x03 },
+ 	{ 0x0000d486, 0x43 },
+ 	{ SDW_SDCA_CTL(FUNC_NUM_AMP, RT1320_SDCA_ENT_PDE23, RT1320_SDCA_CTL_REQ_POWER_STATE, 0), 0x00 },
+-	{ 0x1000db00, 0x04 },
++	{ 0x1000db00, 0x07 },
+ 	{ 0x1000db01, 0x00 },
+ 	{ 0x1000db02, 0x11 },
+ 	{ 0x1000db03, 0x00 },
+@@ -2106,6 +2106,21 @@ static const struct reg_sequence rt1320_vc_patch_code_write[] = {
+ 	{ 0x1000db12, 0x00 },
+ 	{ 0x1000db13, 0x00 },
+ 	{ 0x1000db14, 0x45 },
++	{ 0x1000db15, 0x0d },
++	{ 0x1000db16, 0x01 },
++	{ 0x1000db17, 0x00 },
++	{ 0x1000db18, 0x00 },
++	{ 0x1000db19, 0xbf },
++	{ 0x1000db1a, 0x13 },
++	{ 0x1000db1b, 0x09 },
++	{ 0x1000db1c, 0x00 },
++	{ 0x1000db1d, 0x00 },
++	{ 0x1000db1e, 0x00 },
++	{ 0x1000db1f, 0x12 },
++	{ 0x1000db20, 0x09 },
++	{ 0x1000db21, 0x00 },
++	{ 0x1000db22, 0x00 },
++	{ 0x1000db23, 0x00 },
+ 	{ 0x0000d540, 0x01 },
+ 	{ 0x0000c081, 0xfc },
+ 	{ 0x0000f01e, 0x80 },
+diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
+index 373a31ddccb2d6..1375ac571fbf38 100644
+--- a/sound/soc/codecs/wcd9335.c
++++ b/sound/soc/codecs/wcd9335.c
+@@ -17,7 +17,7 @@
+ #include <sound/soc.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc-dapm.h>
+-#include <linux/of_gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include <sound/tlv.h>
+@@ -329,8 +329,7 @@ struct wcd9335_codec {
+ 	int comp_enabled[COMPANDER_MAX];
+ 
+ 	int intr1;
+-	int reset_gpio;
+-	struct regulator_bulk_data supplies[WCD9335_MAX_SUPPLY];
++	struct gpio_desc *reset_gpio;
+ 
+ 	unsigned int rx_port_value[WCD9335_RX_MAX];
+ 	unsigned int tx_port_value[WCD9335_TX_MAX];
+@@ -353,6 +352,10 @@ struct wcd9335_irq {
+ 	char *name;
+ };
+ 
++static const char * const wcd9335_supplies[] = {
++	"vdd-buck", "vdd-buck-sido", "vdd-tx", "vdd-rx", "vdd-io",
++};
++
+ static const struct wcd9335_slim_ch wcd9335_tx_chs[WCD9335_TX_MAX] = {
+ 	WCD9335_SLIM_TX_CH(0),
+ 	WCD9335_SLIM_TX_CH(1),
+@@ -4973,12 +4976,11 @@ static const struct regmap_irq_chip wcd9335_regmap_irq1_chip = {
+ static int wcd9335_parse_dt(struct wcd9335_codec *wcd)
+ {
+ 	struct device *dev = wcd->dev;
+-	struct device_node *np = dev->of_node;
+ 	int ret;
+ 
+-	wcd->reset_gpio = of_get_named_gpio(np,	"reset-gpios", 0);
+-	if (wcd->reset_gpio < 0)
+-		return dev_err_probe(dev, wcd->reset_gpio, "Reset GPIO missing from DT\n");
++	wcd->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
++	if (IS_ERR(wcd->reset_gpio))
++		return dev_err_probe(dev, PTR_ERR(wcd->reset_gpio), "Reset GPIO missing from DT\n");
+ 
+ 	wcd->mclk = devm_clk_get(dev, "mclk");
+ 	if (IS_ERR(wcd->mclk))
+@@ -4988,30 +4990,16 @@ static int wcd9335_parse_dt(struct wcd9335_codec *wcd)
+ 	if (IS_ERR(wcd->native_clk))
+ 		return dev_err_probe(dev, PTR_ERR(wcd->native_clk), "slimbus clock not found\n");
+ 
+-	wcd->supplies[0].supply = "vdd-buck";
+-	wcd->supplies[1].supply = "vdd-buck-sido";
+-	wcd->supplies[2].supply = "vdd-tx";
+-	wcd->supplies[3].supply = "vdd-rx";
+-	wcd->supplies[4].supply = "vdd-io";
+-
+-	ret = regulator_bulk_get(dev, WCD9335_MAX_SUPPLY, wcd->supplies);
++	ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(wcd9335_supplies),
++					     wcd9335_supplies);
+ 	if (ret)
+-		return dev_err_probe(dev, ret, "Failed to get supplies\n");
++		return dev_err_probe(dev, ret, "Failed to get and enable supplies\n");
+ 
+ 	return 0;
+ }
+ 
+ static int wcd9335_power_on_reset(struct wcd9335_codec *wcd)
+ {
+-	struct device *dev = wcd->dev;
+-	int ret;
+-
+-	ret = regulator_bulk_enable(WCD9335_MAX_SUPPLY, wcd->supplies);
+-	if (ret) {
+-		dev_err(dev, "Failed to get supplies: err = %d\n", ret);
+-		return ret;
+-	}
+-
+ 	/*
+ 	 * For WCD9335, it takes about 600us for the Vout_A and
+ 	 * Vout_D to be ready after BUCK_SIDO is powered up.
+@@ -5021,9 +5009,9 @@ static int wcd9335_power_on_reset(struct wcd9335_codec *wcd)
+ 	 */
+ 	usleep_range(600, 650);
+ 
+-	gpio_direction_output(wcd->reset_gpio, 0);
++	gpiod_set_value(wcd->reset_gpio, 1);
+ 	msleep(20);
+-	gpio_set_value(wcd->reset_gpio, 1);
++	gpiod_set_value(wcd->reset_gpio, 0);
+ 	msleep(20);
+ 
+ 	return 0;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index c7387081577cd3..0da4ee9757c018 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2282,6 +2282,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_DISABLE_AUTOSUSPEND),
+ 	DEVICE_FLG(0x17aa, 0x104d, /* Lenovo ThinkStation P620 Internal Speaker + Front Headset */
+ 		   QUIRK_FLAG_DISABLE_AUTOSUSPEND),
++	DEVICE_FLG(0x17ef, 0x3083, /* Lenovo TBT3 dock */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x1852, 0x5062, /* Luxman D-08u */
+ 		   QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ 	DEVICE_FLG(0x1852, 0x5065, /* Luxman DA-06 */
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index c1ea8844a46fc4..aa91d63749f2ca 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -987,6 +987,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
+ 	 * and request Cluster Descriptor
+ 	 */
+ 	wLength = le16_to_cpu(hc_header.wLength);
++	if (wLength < sizeof(cluster))
++		return NULL;
+ 	cluster = kzalloc(wLength, GFP_KERNEL);
+ 	if (!cluster)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 46cce18c830864..12306b5de3efbe 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -225,6 +225,9 @@ static void btf_dump_free_names(struct hashmap *map)
+ 	size_t bkt;
+ 	struct hashmap_entry *cur;
+ 
++	if (!map)
++		return;
++
+ 	hashmap__for_each_entry(map, cur, bkt)
+ 		free((void *)cur->pkey);
+ 
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 1290314da67618..36e341b4b77bf2 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -596,7 +596,7 @@ struct extern_desc {
+ 	int sym_idx;
+ 	int btf_id;
+ 	int sec_btf_id;
+-	const char *name;
++	char *name;
+ 	char *essent_name;
+ 	bool is_set;
+ 	bool is_weak;
+@@ -4223,7 +4223,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
+ 			return ext->btf_id;
+ 		}
+ 		t = btf__type_by_id(obj->btf, ext->btf_id);
+-		ext->name = btf__name_by_offset(obj->btf, t->name_off);
++		ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off));
++		if (!ext->name)
++			return -ENOMEM;
+ 		ext->sym_idx = i;
+ 		ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
+ 
+@@ -9062,8 +9064,10 @@ void bpf_object__close(struct bpf_object *obj)
+ 	zfree(&obj->btf_custom_path);
+ 	zfree(&obj->kconfig);
+ 
+-	for (i = 0; i < obj->nr_extern; i++)
++	for (i = 0; i < obj->nr_extern; i++) {
++		zfree(&obj->externs[i].name);
+ 		zfree(&obj->externs[i].essent_name);
++	}
+ 
+ 	zfree(&obj->externs);
+ 	obj->nr_extern = 0;
+diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
+index a3f220ba7025bd..ee65bad0436d0c 100644
+--- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c
++++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
+@@ -32,6 +32,16 @@ int my_int_last SEC(".data.array_not_last");
+ 
+ int percpu_arr[1] SEC(".data.percpu_arr");
+ 
++/* at least one extern is included, to ensure that a specific
++ * regression is tested whereby resizing resulted in a free-after-use
++ * bug after type information is invalidated by the resize operation.
++ *
++ * There isn't a particularly good API to test for this specific condition,
++ * but by having externs for the resizing tests it will cover this path.
++ */
++extern int LINUX_KERNEL_VERSION __kconfig;
++long version_sink;
++
+ SEC("tp/syscalls/sys_enter_getpid")
+ int bss_array_sum(void *ctx)
+ {
+@@ -44,6 +54,9 @@ int bss_array_sum(void *ctx)
+ 	for (size_t i = 0; i < bss_array_len; ++i)
+ 		sum += array[i];
+ 
++	/* see above; ensure this is not optimized out */
++	version_sink = LINUX_KERNEL_VERSION;
++
+ 	return 0;
+ }
+ 
+@@ -59,6 +72,9 @@ int data_array_sum(void *ctx)
+ 	for (size_t i = 0; i < data_array_len; ++i)
+ 		sum += my_array[i];
+ 
++	/* see above; ensure this is not optimized out */
++	version_sink = LINUX_KERNEL_VERSION;
++
+ 	return 0;
+ }
+ 


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-06-27 11:26 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-06-27 11:26 UTC (permalink / raw
  To: gentoo-commits

commit:     8dba58346968ee9f6bfbad6d703085158994e468
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 27 11:26:44 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 27 11:26:44 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8dba5834

Remove redundant patch

Removed:
1740_x86-insn-decoder-test-allow-longer-symbol-names.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 --
 ...sn-decoder-test-allow-longer-symbol-names.patch | 49 ----------------------
 2 files changed, 53 deletions(-)

diff --git a/0000_README b/0000_README
index 61416a87..baad7561 100644
--- a/0000_README
+++ b/0000_README
@@ -199,10 +199,6 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
-Patch:  1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
-From:   https://gitlab.com/cki-project/kernel-ark/-/commit/8d4a52c3921d278f27241fc0c6949d8fdc13a7f5
-Desc:   x86/insn_decoder_test: allow longer symbol-names
-
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch b/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
deleted file mode 100644
index 70c706ba..00000000
--- a/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From 8d4a52c3921d278f27241fc0c6949d8fdc13a7f5 Mon Sep 17 00:00:00 2001
-From: David Rheinsberg <david@readahead.eu>
-Date: Tue, 24 Jan 2023 12:04:59 +0100
-Subject: [PATCH] x86/insn_decoder_test: allow longer symbol-names
-
-Increase the allowed line-length of the insn-decoder-test to 4k to allow
-for symbol-names longer than 256 characters.
-
-The insn-decoder-test takes objdump output as input, which may contain
-symbol-names as instruction arguments. With rust-code entering the
-kernel, those symbol-names will include mangled-symbols which might
-exceed the current line-length-limit of the tool.
-
-By bumping the line-length-limit of the tool to 4k, we get a reasonable
-buffer for all objdump outputs I have seen so far. Unfortunately, ELF
-symbol-names are not restricted in length, so technically this might
-still end up failing if we encounter longer names in the future.
-
-My compile-failure looks like this:
-
-    arch/x86/tools/insn_decoder_test: error: malformed line 1152000:
-    tBb_+0xf2>
-
-..which overflowed by 10 characters reading this line:
-
-    ffffffff81458193:   74 3d                   je     ffffffff814581d2 <_RNvXse_NtNtNtCshGpAVYOtgW1_4core4iter8adapters7flattenINtB5_13FlattenCompatINtNtB7_3map3MapNtNtNtBb_3str4iter5CharsNtB1v_17CharEscapeDefaultENtNtBb_4char13EscapeDefaultENtNtBb_3fmt5Debug3fmtBb_+0xf2>
-
-Signed-off-by: David Rheinsberg <david@readahead.eu>
-Signed-off-by: Scott Weaver <scweaver@redhat.com>
----
- arch/x86/tools/insn_decoder_test.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
-index 472540aeabc23..366e07546344b 100644
---- a/arch/x86/tools/insn_decoder_test.c
-+++ b/arch/x86/tools/insn_decoder_test.c
-@@ -106,7 +106,7 @@ static void parse_args(int argc, char **argv)
- 	}
- }
- 
--#define BUFSIZE 256
-+#define BUFSIZE 4096
- 
- int main(int argc, char **argv)
- {
--- 
-GitLab
-


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-06-27 11:17 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-06-27 11:17 UTC (permalink / raw
  To: gentoo-commits

commit:     4ad4589f0ce0ba9ffc01969fb506093634267102
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 27 11:17:48 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 27 11:17:48 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4ad4589f

Linux patch 6.12.35

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1034_linux-6.12.35.patch | 14477 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 14481 insertions(+)

diff --git a/0000_README b/0000_README
index 7ab46567..61416a87 100644
--- a/0000_README
+++ b/0000_README
@@ -179,6 +179,10 @@ Patch:  1033_linux-6.12.34.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.34
 
+Patch:  1034_linux-6.12.35.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.35
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1034_linux-6.12.35.patch b/1034_linux-6.12.35.patch
new file mode 100644
index 00000000..a3c10dbc
--- /dev/null
+++ b/1034_linux-6.12.35.patch
@@ -0,0 +1,14477 @@
+diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
+index b57ae6963e6298..6b6f6762d122f9 100644
+--- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
++++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
+@@ -97,7 +97,10 @@ properties:
+ 
+   resets:
+     items:
+-      - description: module reset
++      - description:
++          Module reset. This property is optional for controllers in Tegra194,
++          Tegra234 etc where an internal software reset is available as an
++          alternative.
+ 
+   reset-names:
+     items:
+@@ -116,6 +119,13 @@ properties:
+       - const: rx
+       - const: tx
+ 
++required:
++  - compatible
++  - reg
++  - interrupts
++  - clocks
++  - clock-names
++
+ allOf:
+   - $ref: /schemas/i2c/i2c-controller.yaml
+   - if:
+@@ -169,6 +179,18 @@ allOf:
+       properties:
+         power-domains: false
+ 
++  - if:
++      not:
++        properties:
++          compatible:
++            contains:
++              enum:
++                - nvidia,tegra194-i2c
++    then:
++      required:
++        - resets
++        - reset-names
++
+ unevaluatedProperties: false
+ 
+ examples:
+diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
+index 7964e0c245aebe..81607ce407595e 100644
+--- a/Documentation/kbuild/makefiles.rst
++++ b/Documentation/kbuild/makefiles.rst
+@@ -656,6 +656,20 @@ cc-cross-prefix
+             endif
+     endif
+ 
++$(RUSTC) support functions
++--------------------------
++
++rustc-min-version
++  rustc-min-version tests if the value of $(CONFIG_RUSTC_VERSION) is greater
++  than or equal to the provided value and evaluates to y if so.
++
++  Example::
++
++    rustflags-$(call rustc-min-version, 108500) := -Cfoo
++
++  In this example, rustflags-y will be assigned the value -Cfoo if
++  $(CONFIG_RUSTC_VERSION) is >= 1.85.0.
++
+ $(LD) support functions
+ -----------------------
+ 
+diff --git a/Makefile b/Makefile
+index b58a061cb35955..535df76f6f78c8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 34
++SUBLEVEL = 35
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
+index c36fb27212615a..86a2f9e5d0ef9d 100644
+--- a/arch/arm/mach-omap2/clockdomain.h
++++ b/arch/arm/mach-omap2/clockdomain.h
+@@ -48,6 +48,7 @@
+ #define CLKDM_NO_AUTODEPS			(1 << 4)
+ #define CLKDM_ACTIVE_WITH_MPU			(1 << 5)
+ #define CLKDM_MISSING_IDLE_REPORTING		(1 << 6)
++#define CLKDM_STANDBY_FORCE_WAKEUP		BIT(7)
+ 
+ #define CLKDM_CAN_HWSUP		(CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
+ #define CLKDM_CAN_SWSUP		(CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
+diff --git a/arch/arm/mach-omap2/clockdomains33xx_data.c b/arch/arm/mach-omap2/clockdomains33xx_data.c
+index 87f4e927eb1830..c05a3c07d44863 100644
+--- a/arch/arm/mach-omap2/clockdomains33xx_data.c
++++ b/arch/arm/mach-omap2/clockdomains33xx_data.c
+@@ -19,7 +19,7 @@ static struct clockdomain l4ls_am33xx_clkdm = {
+ 	.pwrdm		= { .name = "per_pwrdm" },
+ 	.cm_inst	= AM33XX_CM_PER_MOD,
+ 	.clkdm_offs	= AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET,
+-	.flags		= CLKDM_CAN_SWSUP,
++	.flags		= CLKDM_CAN_SWSUP | CLKDM_STANDBY_FORCE_WAKEUP,
+ };
+ 
+ static struct clockdomain l3s_am33xx_clkdm = {
+diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
+index acdf72a541c02a..a4dd42abda89b0 100644
+--- a/arch/arm/mach-omap2/cm33xx.c
++++ b/arch/arm/mach-omap2/cm33xx.c
+@@ -20,6 +20,9 @@
+ #include "cm-regbits-34xx.h"
+ #include "cm-regbits-33xx.h"
+ #include "prm33xx.h"
++#if IS_ENABLED(CONFIG_SUSPEND)
++#include <linux/suspend.h>
++#endif
+ 
+ /*
+  * CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield:
+@@ -328,8 +331,17 @@ static int am33xx_clkdm_clk_disable(struct clockdomain *clkdm)
+ {
+ 	bool hwsup = false;
+ 
++#if IS_ENABLED(CONFIG_SUSPEND)
++	/*
++	 * In case of standby, Don't put the l4ls clk domain to sleep.
++	 * Since CM3 PM FW doesn't wake-up/enable the l4ls clk domain
++	 * upon wake-up, CM3 PM FW fails to wake-up th MPU.
++	 */
++	if (pm_suspend_target_state == PM_SUSPEND_STANDBY &&
++	    (clkdm->flags & CLKDM_STANDBY_FORCE_WAKEUP))
++		return 0;
++#endif
+ 	hwsup = am33xx_cm_is_clkdm_in_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
+-
+ 	if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
+ 		am33xx_clkdm_sleep(clkdm);
+ 
+diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c
+index 4f31e61c0c90ca..9f9a20274db848 100644
+--- a/arch/arm/mach-omap2/pmic-cpcap.c
++++ b/arch/arm/mach-omap2/pmic-cpcap.c
+@@ -264,7 +264,11 @@ int __init omap4_cpcap_init(void)
+ 
+ static int __init cpcap_late_init(void)
+ {
+-	omap4_vc_set_pmic_signaling(PWRDM_POWER_RET);
++	if (!of_find_compatible_node(NULL, NULL, "motorola,cpcap"))
++		return 0;
++
++	if (soc_is_omap443x() || soc_is_omap446x() || soc_is_omap447x())
++		omap4_vc_set_pmic_signaling(PWRDM_POWER_RET);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index 89f1c97f3079c1..cbf5a03d2b1896 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -515,7 +515,5 @@ void __init early_ioremap_init(void)
+ bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ 				 unsigned long flags)
+ {
+-	unsigned long pfn = PHYS_PFN(offset);
+-
+-	return memblock_is_map_memory(pfn);
++	return memblock_is_map_memory(offset);
+ }
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 19a4988621ac9a..88029d38b3c65e 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -48,7 +48,7 @@ KBUILD_CFLAGS	+= $(CC_FLAGS_NO_FPU) \
+ KBUILD_CFLAGS	+= $(call cc-disable-warning, psabi)
+ KBUILD_AFLAGS	+= $(compat_vdso)
+ 
+-ifeq ($(call test-ge, $(CONFIG_RUSTC_VERSION), 108500),y)
++ifeq ($(call rustc-min-version, 108500),y)
+ KBUILD_RUSTFLAGS += --target=aarch64-unknown-none-softfloat
+ else
+ KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
+index 9edbd871c31bf0..5f12cdc2b9671a 100644
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -330,13 +330,14 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
+ }
+ 
+ /*
+- * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
+- * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
+- * flush_tlb_batched_pending().
++ * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure
++ * all the previously issued TLBIs targeting mm have completed. But since we
++ * can be executing on a remote CPU, a DSB cannot guarantee this like it can
++ * for arch_tlbbatch_flush(). Our only option is to flush the entire mm.
+  */
+ static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
+ {
+-	dsb(ish);
++	flush_tlb_mm(mm);
+ }
+ 
+ /*
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 1559a239137f32..1a8f4284cb69a0 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -140,7 +140,7 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+ 
+ 	addr += n;
+ 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
+-		return *addr;
++		return READ_ONCE_NOCHECK(*addr);
+ 	else
+ 		return 0;
+ }
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 9bcd51fd67d4e0..aed8d32979d9c7 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -1285,7 +1285,8 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
+ 	next = addr;
+ 	end = addr + PUD_SIZE;
+ 	do {
+-		pmd_free_pte_page(pmdp, next);
++		if (pmd_present(pmdp_get(pmdp)))
++			pmd_free_pte_page(pmdp, next);
+ 	} while (pmdp++, next += PMD_SIZE, next != end);
+ 
+ 	pud_clear(pudp);
+diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h
+index 319a8c616f1f5b..003172b8406be7 100644
+--- a/arch/loongarch/include/asm/irqflags.h
++++ b/arch/loongarch/include/asm/irqflags.h
+@@ -14,40 +14,48 @@
+ static inline void arch_local_irq_enable(void)
+ {
+ 	u32 flags = CSR_CRMD_IE;
++	register u32 mask asm("t0") = CSR_CRMD_IE;
++
+ 	__asm__ __volatile__(
+ 		"csrxchg %[val], %[mask], %[reg]\n\t"
+ 		: [val] "+r" (flags)
+-		: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
++		: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
+ 		: "memory");
+ }
+ 
+ static inline void arch_local_irq_disable(void)
+ {
+ 	u32 flags = 0;
++	register u32 mask asm("t0") = CSR_CRMD_IE;
++
+ 	__asm__ __volatile__(
+ 		"csrxchg %[val], %[mask], %[reg]\n\t"
+ 		: [val] "+r" (flags)
+-		: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
++		: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
+ 		: "memory");
+ }
+ 
+ static inline unsigned long arch_local_irq_save(void)
+ {
+ 	u32 flags = 0;
++	register u32 mask asm("t0") = CSR_CRMD_IE;
++
+ 	__asm__ __volatile__(
+ 		"csrxchg %[val], %[mask], %[reg]\n\t"
+ 		: [val] "+r" (flags)
+-		: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
++		: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
+ 		: "memory");
+ 	return flags;
+ }
+ 
+ static inline void arch_local_irq_restore(unsigned long flags)
+ {
++	register u32 mask asm("t0") = CSR_CRMD_IE;
++
+ 	__asm__ __volatile__(
+ 		"csrxchg %[val], %[mask], %[reg]\n\t"
+ 		: [val] "+r" (flags)
+-		: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
++		: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
+ 		: "memory");
+ }
+ 
+diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h
+index 02f36772541b7d..7e9edc1cb610df 100644
+--- a/arch/loongarch/include/asm/vdso/getrandom.h
++++ b/arch/loongarch/include/asm/vdso/getrandom.h
+@@ -20,7 +20,7 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
+ 
+ 	asm volatile(
+ 	"      syscall 0\n"
+-	: "+r" (ret)
++	: "=r" (ret)
+ 	: "r" (nr), "r" (buffer), "r" (len), "r" (flags)
+ 	: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+ 	  "memory");
+diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
+index 89e6b222c2f2d1..2d1a9c27af2925 100644
+--- a/arch/loongarch/include/asm/vdso/gettimeofday.h
++++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
+@@ -25,7 +25,7 @@ static __always_inline long gettimeofday_fallback(
+ 
+ 	asm volatile(
+ 	"       syscall 0\n"
+-	: "+r" (ret)
++	: "=r" (ret)
+ 	: "r" (nr), "r" (tv), "r" (tz)
+ 	: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ 	  "$t8", "memory");
+@@ -44,7 +44,7 @@ static __always_inline long clock_gettime_fallback(
+ 
+ 	asm volatile(
+ 	"       syscall 0\n"
+-	: "+r" (ret)
++	: "=r" (ret)
+ 	: "r" (nr), "r" (clkid), "r" (ts)
+ 	: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ 	  "$t8", "memory");
+@@ -63,7 +63,7 @@ static __always_inline int clock_getres_fallback(
+ 
+ 	asm volatile(
+ 	"       syscall 0\n"
+-	: "+r" (ret)
++	: "=r" (ret)
+ 	: "r" (nr), "r" (clkid), "r" (ts)
+ 	: "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ 	  "$t8", "memory");
+diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
+index cea84d7f2b91a1..02dad4624fe329 100644
+--- a/arch/loongarch/mm/hugetlbpage.c
++++ b/arch/loongarch/mm/hugetlbpage.c
+@@ -47,7 +47,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ 				pmd = pmd_offset(pud, addr);
+ 		}
+ 	}
+-	return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
++
++	return (!pmd || pmd_none(pmdp_get(pmd))) ? NULL : (pte_t *) pmd;
+ }
+ 
+ uint64_t pmd_to_entrylo(unsigned long pmd_val)
+diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
+index b289b2c1b29460..c729bd68780423 100644
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -27,6 +27,7 @@ endif
+ # offsets.
+ cflags-vdso := $(ccflags-vdso) \
+ 	$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
++	$(filter -std=%,$(KBUILD_CFLAGS)) \
+ 	-O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
+ 	-mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \
+ 	-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
+diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
+index 92227fa813dc34..17c42d718eb336 100644
+--- a/arch/parisc/boot/compressed/Makefile
++++ b/arch/parisc/boot/compressed/Makefile
+@@ -18,6 +18,7 @@ KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
+ ifndef CONFIG_64BIT
+ KBUILD_CFLAGS += -mfast-indirect-calls
+ endif
++KBUILD_CFLAGS += -std=gnu11
+ 
+ LDFLAGS_vmlinux := -X -e startup --as-needed -T
+ $(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index f4626943633adc..00e97204783eda 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -25,7 +25,7 @@
+ #define DPRINTF(fmt, args...)
+ #endif
+ 
+-#define RFMT "%#08lx"
++#define RFMT "0x%08lx"
+ 
+ /* 1111 1100 0000 0000 0001 0011 1100 0000 */
+ #define OPCODE1(a,b,c)	((a)<<26|(b)<<12|(c)<<6) 
+diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
+index 02897f4b0dbf81..b891910fce8a69 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -183,7 +183,7 @@
+ /*
+  * Used to name C functions called from asm
+  */
+-#ifdef CONFIG_PPC_KERNEL_PCREL
++#if defined(__powerpc64__) && defined(CONFIG_PPC_KERNEL_PCREL)
+ #define CFUNC(name) name@notoc
+ #else
+ #define CFUNC(name) name
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index 83fe99861eb178..ca7f7bb2b47869 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -1509,6 +1509,8 @@ int eeh_pe_configure(struct eeh_pe *pe)
+ 	/* Invalid PE ? */
+ 	if (!pe)
+ 		return -ENODEV;
++	else
++		ret = eeh_ops->configure_bridge(pe);
+ 
+ 	return ret;
+ }
+diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
+index c568cad6a22e6b..6ba68b28ed8704 100644
+--- a/arch/powerpc/kernel/vdso/Makefile
++++ b/arch/powerpc/kernel/vdso/Makefile
+@@ -53,7 +53,7 @@ ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WAR
+ ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CFLAGS))
+ 
+ CC32FLAGS := -m32
+-CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc
++CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc -mpcrel
+ ifdef CONFIG_CC_IS_CLANG
+ # This flag is supported by clang for 64-bit but not 32-bit so it will cause
+ # an unused command line flag warning for this file.
+diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
+index 6dfb55b52d363d..ba98a680a12e67 100644
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -524,7 +524,12 @@ static struct msi_domain_info pseries_msi_domain_info = {
+ 
+ static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+ {
+-	__pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
++	struct pci_dev *dev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
++
++	if (dev->current_state == PCI_D0)
++		__pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
++	else
++		get_cached_msi_msg(data->irq, msg);
+ }
+ 
+ static struct irq_chip pseries_msi_irq_chip = {
+diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
+index 5fbf3f94f1e855..b17fad091babdc 100644
+--- a/arch/riscv/kvm/vcpu_sbi_replace.c
++++ b/arch/riscv/kvm/vcpu_sbi_replace.c
+@@ -103,7 +103,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
+ 		kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
+ 		break;
+ 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+-		if (cp->a2 == 0 && cp->a3 == 0)
++		if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
+ 			kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
+ 		else
+ 			kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
+@@ -111,7 +111,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
+ 		kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
+ 		break;
+ 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+-		if (cp->a2 == 0 && cp->a3 == 0)
++		if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
+ 			kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
+ 						       hbase, hmask, cp->a4);
+ 		else
+@@ -127,9 +127,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
+ 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
+ 		/*
+ 		 * Until nested virtualization is implemented, the
+-		 * SBI HFENCE calls should be treated as NOPs
++		 * SBI HFENCE calls should return not supported
++		 * hence fallthrough.
+ 		 */
+-		break;
+ 	default:
+ 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ 	}
+diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
+index a688351f4ab521..7bc97ebd60d5dd 100644
+--- a/arch/s390/kvm/gaccess.c
++++ b/arch/s390/kvm/gaccess.c
+@@ -317,7 +317,7 @@ enum prot_type {
+ 	PROT_TYPE_DAT  = 3,
+ 	PROT_TYPE_IEP  = 4,
+ 	/* Dummy value for passing an initialized value when code != PGM_PROTECTION */
+-	PROT_NONE,
++	PROT_TYPE_DUMMY,
+ };
+ 
+ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
+@@ -333,7 +333,7 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
+ 	switch (code) {
+ 	case PGM_PROTECTION:
+ 		switch (prot) {
+-		case PROT_NONE:
++		case PROT_TYPE_DUMMY:
+ 			/* We should never get here, acts like termination */
+ 			WARN_ON_ONCE(1);
+ 			break;
+@@ -803,7 +803,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
+ 			gpa = kvm_s390_real_to_abs(vcpu, ga);
+ 			if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
+ 				rc = PGM_ADDRESSING;
+-				prot = PROT_NONE;
++				prot = PROT_TYPE_DUMMY;
+ 			}
+ 		}
+ 		if (rc)
+@@ -961,7 +961,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
+ 		if (rc == PGM_PROTECTION)
+ 			prot = PROT_TYPE_KEYC;
+ 		else
+-			prot = PROT_NONE;
++			prot = PROT_TYPE_DUMMY;
+ 		rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
+ 	}
+ out_unlock:
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 88f72745fa59e1..9e19d6076d3e8e 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -44,6 +44,7 @@
+ /* list of all detected zpci devices */
+ static LIST_HEAD(zpci_list);
+ static DEFINE_SPINLOCK(zpci_list_lock);
++static DEFINE_MUTEX(zpci_add_remove_lock);
+ 
+ static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
+ static DEFINE_SPINLOCK(zpci_domain_lock);
+@@ -69,6 +70,15 @@ EXPORT_SYMBOL_GPL(zpci_aipb);
+ struct airq_iv *zpci_aif_sbv;
+ EXPORT_SYMBOL_GPL(zpci_aif_sbv);
+ 
++void zpci_zdev_put(struct zpci_dev *zdev)
++{
++	if (!zdev)
++		return;
++	mutex_lock(&zpci_add_remove_lock);
++	kref_put_lock(&zdev->kref, zpci_release_device, &zpci_list_lock);
++	mutex_unlock(&zpci_add_remove_lock);
++}
++
+ struct zpci_dev *get_zdev_by_fid(u32 fid)
+ {
+ 	struct zpci_dev *tmp, *zdev = NULL;
+@@ -831,6 +841,7 @@ int zpci_add_device(struct zpci_dev *zdev)
+ {
+ 	int rc;
+ 
++	mutex_lock(&zpci_add_remove_lock);
+ 	zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state);
+ 	rc = zpci_init_iommu(zdev);
+ 	if (rc)
+@@ -844,12 +855,14 @@ int zpci_add_device(struct zpci_dev *zdev)
+ 	spin_lock(&zpci_list_lock);
+ 	list_add_tail(&zdev->entry, &zpci_list);
+ 	spin_unlock(&zpci_list_lock);
++	mutex_unlock(&zpci_add_remove_lock);
+ 	return 0;
+ 
+ error_destroy_iommu:
+ 	zpci_destroy_iommu(zdev);
+ error:
+ 	zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc);
++	mutex_unlock(&zpci_add_remove_lock);
+ 	return rc;
+ }
+ 
+@@ -919,21 +932,20 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
+  * @zdev: the zpci_dev that was reserved
+  *
+  * Handle the case that a given zPCI function was reserved by another system.
+- * After a call to this function the zpci_dev can not be found via
+- * get_zdev_by_fid() anymore but may still be accessible via existing
+- * references though it will not be functional anymore.
+  */
+ void zpci_device_reserved(struct zpci_dev *zdev)
+ {
+-	/*
+-	 * Remove device from zpci_list as it is going away. This also
+-	 * makes sure we ignore subsequent zPCI events for this device.
+-	 */
+-	spin_lock(&zpci_list_lock);
+-	list_del(&zdev->entry);
+-	spin_unlock(&zpci_list_lock);
++	lockdep_assert_held(&zdev->state_lock);
++	/* We may declare the device reserved multiple times */
++	if (zdev->state == ZPCI_FN_STATE_RESERVED)
++		return;
+ 	zdev->state = ZPCI_FN_STATE_RESERVED;
+ 	zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
++	/*
++	 * The underlying device is gone. Allow the zdev to be freed
++	 * as soon as all other references are gone by accounting for
++	 * the removal as a dropped reference.
++	 */
+ 	zpci_zdev_put(zdev);
+ }
+ 
+@@ -941,13 +953,14 @@ void zpci_release_device(struct kref *kref)
+ {
+ 	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+ 
++	lockdep_assert_held(&zpci_add_remove_lock);
+ 	WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED);
+-
+-	if (zdev->zbus->bus)
+-		zpci_bus_remove_device(zdev, false);
+-
+-	if (zdev_enabled(zdev))
+-		zpci_disable_device(zdev);
++	/*
++	 * We already hold zpci_list_lock thanks to kref_put_lock().
++	 * This makes sure no new reference can be taken from the list.
++	 */
++	list_del(&zdev->entry);
++	spin_unlock(&zpci_list_lock);
+ 
+ 	if (zdev->has_hp_slot)
+ 		zpci_exit_slot(zdev);
+diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
+index af9f0ac79a1b1b..3febb3b297c0c4 100644
+--- a/arch/s390/pci/pci_bus.h
++++ b/arch/s390/pci/pci_bus.h
+@@ -17,11 +17,8 @@ int zpci_bus_scan_device(struct zpci_dev *zdev);
+ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
+ 
+ void zpci_release_device(struct kref *kref);
+-static inline void zpci_zdev_put(struct zpci_dev *zdev)
+-{
+-	if (zdev)
+-		kref_put(&zdev->kref, zpci_release_device);
+-}
++
++void zpci_zdev_put(struct zpci_dev *zdev);
+ 
+ static inline void zpci_zdev_get(struct zpci_dev *zdev)
+ {
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index 7f7b732b3f3efa..6f48a1073c6e81 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -322,6 +322,22 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
+ 	zdev->state = ZPCI_FN_STATE_STANDBY;
+ }
+ 
++static void zpci_event_reappear(struct zpci_dev *zdev)
++{
++	lockdep_assert_held(&zdev->state_lock);
++	/*
++	 * The zdev is in the reserved state. This means that it was presumed to
++	 * go away but there are still undropped references. Now, the platform
++	 * announced its availability again. Bring back the lingering zdev
++	 * to standby. This is safe because we hold a temporary reference
++	 * now so that it won't go away. Account for the re-appearance of the
++	 * underlying device by incrementing the reference count.
++	 */
++	zdev->state = ZPCI_FN_STATE_STANDBY;
++	zpci_zdev_get(zdev);
++	zpci_dbg(1, "rea fid:%x, fh:%x\n", zdev->fid, zdev->fh);
++}
++
+ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ {
+ 	struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+@@ -345,8 +361,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 				break;
+ 			}
+ 		} else {
++			if (zdev->state == ZPCI_FN_STATE_RESERVED)
++				zpci_event_reappear(zdev);
+ 			/* the configuration request may be stale */
+-			if (zdev->state != ZPCI_FN_STATE_STANDBY)
++			else if (zdev->state != ZPCI_FN_STATE_STANDBY)
+ 				break;
+ 			zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ 		}
+@@ -362,6 +380,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 				break;
+ 			}
+ 		} else {
++			if (zdev->state == ZPCI_FN_STATE_RESERVED)
++				zpci_event_reappear(zdev);
+ 			zpci_update_fh(zdev, ccdf->fh);
+ 		}
+ 		break;
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index 4779c3cb6cfab2..0fa34c50129679 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -228,7 +228,7 @@ static inline int __pcilg_mio_inuser(
+ 		[ioaddr_len] "+&d" (ioaddr_len.pair),
+ 		[cc] "+d" (cc), [val] "=d" (val),
+ 		[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
+-		[shift] "+d" (shift)
++		[shift] "+a" (shift)
+ 		:: "cc", "memory");
+ 
+ 	/* did we write everything to the user space buffer? */
+diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
+index b5b63329406137..2d13ef1f4b057a 100644
+--- a/arch/x86/include/asm/tdx.h
++++ b/arch/x86/include/asm/tdx.h
+@@ -97,7 +97,7 @@ void tdx_init(void);
+ 
+ typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
+ 
+-static inline u64 sc_retry(sc_func_t func, u64 fn,
++static __always_inline u64 sc_retry(sc_func_t func, u64 fn,
+ 			   struct tdx_module_args *args)
+ {
+ 	int retry = RDRAND_RETRY_LOOPS;
+diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
+index 9ace84486499b8..147ea26dfdad64 100644
+--- a/arch/x86/kernel/cpu/sgx/main.c
++++ b/arch/x86/kernel/cpu/sgx/main.c
+@@ -719,6 +719,8 @@ int arch_memory_failure(unsigned long pfn, int flags)
+ 		goto out;
+ 	}
+ 
++	sgx_unmark_page_reclaimable(page);
++
+ 	/*
+ 	 * TBD: Add additional plumbing to enable pre-emptive
+ 	 * action for asynchronous poison notification. Until
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 7cbacd0439211e..1f42a71b15c023 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1483,7 +1483,7 @@ static void svm_clear_current_vmcb(struct vmcb *vmcb)
+ {
+ 	int i;
+ 
+-	for_each_online_cpu(i)
++	for_each_possible_cpu(i)
+ 		cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
+ }
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index a3d45b01dbadf3..bcbedddacc4804 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -770,8 +770,11 @@ void vmx_emergency_disable_virtualization_cpu(void)
+ 		return;
+ 
+ 	list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
+-			    loaded_vmcss_on_cpu_link)
++			    loaded_vmcss_on_cpu_link) {
+ 		vmcs_clear(v->vmcs);
++		if (v->shadow_vmcs)
++			vmcs_clear(v->shadow_vmcs);
++	}
+ 
+ 	kvm_cpu_vmxoff();
+ }
+diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
+index 472540aeabc235..08cd913cbd4e9a 100644
+--- a/arch/x86/tools/insn_decoder_test.c
++++ b/arch/x86/tools/insn_decoder_test.c
+@@ -10,8 +10,7 @@
+ #include <assert.h>
+ #include <unistd.h>
+ #include <stdarg.h>
+-
+-#define unlikely(cond) (cond)
++#include <linux/kallsyms.h>
+ 
+ #include <asm/insn.h>
+ #include <inat.c>
+@@ -106,7 +105,7 @@ static void parse_args(int argc, char **argv)
+ 	}
+ }
+ 
+-#define BUFSIZE 256
++#define BUFSIZE (256 + KSYM_NAME_LEN)
+ 
+ int main(int argc, char **argv)
+ {
+diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
+index 4e2b2e2ac9f90d..eb91bc5448de23 100644
+--- a/arch/x86/virt/vmx/tdx/tdx.c
++++ b/arch/x86/virt/vmx/tdx/tdx.c
+@@ -69,8 +69,9 @@ static inline void seamcall_err_ret(u64 fn, u64 err,
+ 			args->r9, args->r10, args->r11);
+ }
+ 
+-static inline int sc_retry_prerr(sc_func_t func, sc_err_func_t err_func,
+-				 u64 fn, struct tdx_module_args *args)
++static __always_inline int sc_retry_prerr(sc_func_t func,
++					  sc_err_func_t err_func,
++					  u64 fn, struct tdx_module_args *args)
+ {
+ 	u64 sret = sc_retry(func, fn, args);
+ 
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index f575cc1705b3f4..7ddd7dd23dda88 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -1180,20 +1180,20 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
+ 	if (!plug || rq_list_empty(&plug->mq_list))
+ 		return false;
+ 
+-	rq_list_for_each(&plug->mq_list, rq) {
+-		if (rq->q == q) {
+-			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
+-			    BIO_MERGE_OK)
+-				return true;
+-			break;
+-		}
++	rq = plug->mq_list.tail;
++	if (rq->q == q)
++		return blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
++			BIO_MERGE_OK;
++	else if (!plug->multiple_queues)
++		return false;
+ 
+-		/*
+-		 * Only keep iterating plug list for merges if we have multiple
+-		 * queues
+-		 */
+-		if (!plug->multiple_queues)
+-			break;
++	rq_list_for_each(&plug->mq_list, rq) {
++		if (rq->q != q)
++			continue;
++		if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
++		    BIO_MERGE_OK)
++			return true;
++		break;
+ 	}
+ 	return false;
+ }
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 164ded9eb14446..d84946eb2f21e1 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1240,6 +1240,7 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
+ 	if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) {
+ 		bio->bi_opf &= ~REQ_OP_MASK;
+ 		bio->bi_opf |= REQ_OP_ZONE_APPEND;
++		bio_clear_flag(bio, BIO_EMULATES_ZONE_APPEND);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
+index 83e4995540a6ee..cd40446a22a57c 100644
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -53,18 +53,18 @@ static struct {
+ 	int gen;
+ 	const char *name;
+ } fw_names[] = {
+-	{ IVPU_HW_IP_37XX, "vpu_37xx.bin" },
++	{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v1.bin" },
+ 	{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
+-	{ IVPU_HW_IP_40XX, "vpu_40xx.bin" },
++	{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v1.bin" },
+ 	{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+-	{ IVPU_HW_IP_50XX, "vpu_50xx.bin" },
++	{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
+ 	{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
+ };
+ 
+ /* Production fw_names from the table above */
+-MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
+-MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
+-MODULE_FIRMWARE("intel/vpu/vpu_50xx_v0.0.bin");
++MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
++MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
++MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
+ 
+ static int ivpu_fw_request(struct ivpu_device *vdev)
+ {
+diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
+index c8daffd90f3001..6b1bda7e130d06 100644
+--- a/drivers/accel/ivpu/ivpu_gem.c
++++ b/drivers/accel/ivpu/ivpu_gem.c
+@@ -26,11 +26,21 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
+ {
+ 	ivpu_dbg(vdev, BO,
+ 		 "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
+-		 action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
++		 action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
+ 		 (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
+ 		 (bool)bo->base.base.import_attach);
+ }
+ 
++static inline int ivpu_bo_lock(struct ivpu_bo *bo)
++{
++	return dma_resv_lock(bo->base.base.resv, NULL);
++}
++
++static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
++{
++	dma_resv_unlock(bo->base.base.resv);
++}
++
+ /*
+  * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+  *
+@@ -41,22 +51,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
+ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+ {
+ 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
++	struct sg_table *sgt;
+ 	int ret = 0;
+ 
+-	mutex_lock(&bo->lock);
+-
+ 	ivpu_dbg_bo(vdev, bo, "pin");
+-	drm_WARN_ON(&vdev->drm, !bo->ctx);
+ 
+-	if (!bo->mmu_mapped) {
+-		struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
++	sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
++	if (IS_ERR(sgt)) {
++		ret = PTR_ERR(sgt);
++		ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
++		return ret;
++	}
+ 
+-		if (IS_ERR(sgt)) {
+-			ret = PTR_ERR(sgt);
+-			ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
+-			goto unlock;
+-		}
++	ivpu_bo_lock(bo);
+ 
++	if (!bo->mmu_mapped) {
++		drm_WARN_ON(&vdev->drm, !bo->ctx);
+ 		ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
+ 					       ivpu_bo_is_snooped(bo));
+ 		if (ret) {
+@@ -67,7 +77,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+ 	}
+ 
+ unlock:
+-	mutex_unlock(&bo->lock);
++	ivpu_bo_unlock(bo);
+ 
+ 	return ret;
+ }
+@@ -82,7 +92,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
+ 	if (!drm_dev_enter(&vdev->drm, &idx))
+ 		return -ENODEV;
+ 
+-	mutex_lock(&bo->lock);
++	ivpu_bo_lock(bo);
+ 
+ 	ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
+ 	if (!ret) {
+@@ -92,9 +102,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
+ 		ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
+ 	}
+ 
+-	ivpu_dbg_bo(vdev, bo, "alloc");
+-
+-	mutex_unlock(&bo->lock);
++	ivpu_bo_unlock(bo);
+ 
+ 	drm_dev_exit(idx);
+ 
+@@ -105,7 +113,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
+ {
+ 	struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ 
+-	lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
++	lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
+ 
+ 	if (bo->mmu_mapped) {
+ 		drm_WARN_ON(&vdev->drm, !bo->ctx);
+@@ -123,14 +131,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
+ 	if (bo->base.base.import_attach)
+ 		return;
+ 
+-	dma_resv_lock(bo->base.base.resv, NULL);
+ 	if (bo->base.sgt) {
+ 		dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ 		sg_free_table(bo->base.sgt);
+ 		kfree(bo->base.sgt);
+ 		bo->base.sgt = NULL;
+ 	}
+-	dma_resv_unlock(bo->base.base.resv);
+ }
+ 
+ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+@@ -142,12 +148,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
+ 
+ 	mutex_lock(&vdev->bo_list_lock);
+ 	list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
+-		mutex_lock(&bo->lock);
++		ivpu_bo_lock(bo);
+ 		if (bo->ctx == ctx) {
+ 			ivpu_dbg_bo(vdev, bo, "unbind");
+ 			ivpu_bo_unbind_locked(bo);
+ 		}
+-		mutex_unlock(&bo->lock);
++		ivpu_bo_unlock(bo);
+ 	}
+ 	mutex_unlock(&vdev->bo_list_lock);
+ }
+@@ -167,12 +173,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
+ 	bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
+ 
+ 	INIT_LIST_HEAD(&bo->bo_list_node);
+-	mutex_init(&bo->lock);
+ 
+ 	return &bo->base.base;
+ }
+ 
+-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
++static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
+ {
+ 	struct drm_gem_shmem_object *shmem;
+ 	struct ivpu_bo *bo;
+@@ -190,6 +195,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
+ 		return ERR_CAST(shmem);
+ 
+ 	bo = to_ivpu_bo(&shmem->base);
++	bo->ctx_id = ctx_id;
+ 	bo->base.map_wc = flags & DRM_IVPU_BO_WC;
+ 	bo->flags = flags;
+ 
+@@ -197,6 +203,8 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
+ 	list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ 	mutex_unlock(&vdev->bo_list_lock);
+ 
++	ivpu_dbg_bo(vdev, bo, "alloc");
++
+ 	return bo;
+ }
+ 
+@@ -234,10 +242,14 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
+ 	list_del(&bo->bo_list_node);
+ 	mutex_unlock(&vdev->bo_list_lock);
+ 
+-	drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
++	drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
++		    !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
++	drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
++	drm_WARN_ON(&vdev->drm, bo->base.vaddr);
+ 
+ 	ivpu_bo_unbind_locked(bo);
+-	mutex_destroy(&bo->lock);
++	drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
++	drm_WARN_ON(&vdev->drm, bo->ctx);
+ 
+ 	drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ 	drm_gem_shmem_free(&bo->base);
+@@ -271,7 +283,7 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ 	if (size == 0)
+ 		return -EINVAL;
+ 
+-	bo = ivpu_bo_alloc(vdev, size, args->flags);
++	bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
+ 	if (IS_ERR(bo)) {
+ 		ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
+ 			 bo, file_priv->ctx.id, args->size, args->flags);
+@@ -279,7 +291,10 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ 	}
+ 
+ 	ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+-	if (!ret)
++	if (ret)
++		ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
++			 bo, file_priv->ctx.id, args->size, args->flags);
++	else
+ 		args->vpu_addr = bo->vpu_addr;
+ 
+ 	drm_gem_object_put(&bo->base.base);
+@@ -302,7 +317,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ 	drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
+ 	drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
+ 
+-	bo = ivpu_bo_alloc(vdev, size, flags);
++	bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ 	if (IS_ERR(bo)) {
+ 		ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ 			 bo, range->start, size, flags);
+@@ -318,9 +333,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ 		goto err_put;
+ 
+ 	if (flags & DRM_IVPU_BO_MAPPABLE) {
+-		dma_resv_lock(bo->base.base.resv, NULL);
++		ivpu_bo_lock(bo);
+ 		ret = drm_gem_shmem_vmap(&bo->base, &map);
+-		dma_resv_unlock(bo->base.base.resv);
++		ivpu_bo_unlock(bo);
+ 
+ 		if (ret)
+ 			goto err_put;
+@@ -343,9 +358,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
+ 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
+ 
+ 	if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
+-		dma_resv_lock(bo->base.base.resv, NULL);
++		ivpu_bo_lock(bo);
+ 		drm_gem_shmem_vunmap(&bo->base, &map);
+-		dma_resv_unlock(bo->base.base.resv);
++		ivpu_bo_unlock(bo);
+ 	}
+ 
+ 	drm_gem_object_put(&bo->base.base);
+@@ -364,12 +379,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
+ 
+ 	bo = to_ivpu_bo(obj);
+ 
+-	mutex_lock(&bo->lock);
++	ivpu_bo_lock(bo);
+ 	args->flags = bo->flags;
+ 	args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
+ 	args->vpu_addr = bo->vpu_addr;
+ 	args->size = obj->size;
+-	mutex_unlock(&bo->lock);
++	ivpu_bo_unlock(bo);
+ 
+ 	drm_gem_object_put(obj);
+ 	return ret;
+@@ -403,10 +418,10 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
+ 
+ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
+ {
+-	mutex_lock(&bo->lock);
++	ivpu_bo_lock(bo);
+ 
+ 	drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+-		   bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
++		   bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
+ 		   bo->flags, kref_read(&bo->base.base.refcount));
+ 
+ 	if (bo->base.pages)
+@@ -420,7 +435,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
+ 
+ 	drm_printf(p, "\n");
+ 
+-	mutex_unlock(&bo->lock);
++	ivpu_bo_unlock(bo);
+ }
+ 
+ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
+diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
+index d975000abd7859..07bffe98c96355 100644
+--- a/drivers/accel/ivpu/ivpu_gem.h
++++ b/drivers/accel/ivpu/ivpu_gem.h
+@@ -17,10 +17,10 @@ struct ivpu_bo {
+ 	struct list_head bo_list_node;
+ 	struct drm_mm_node mm_node;
+ 
+-	struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
+ 	u64 vpu_addr;
+ 	u32 flags;
+ 	u32 job_status; /* Valid only for command buffer */
++	u32 ctx_id;
+ 	bool mmu_mapped;
+ };
+ 
+diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
+index 4e88f9fc2a2894..b6588b7fa8986a 100644
+--- a/drivers/acpi/acpica/amlresrc.h
++++ b/drivers/acpi/acpica/amlresrc.h
+@@ -504,10 +504,6 @@ struct aml_resource_pin_group_config {
+ 
+ #define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION    1	/* ACPI 6.2 */
+ 
+-/* restore default alignment */
+-
+-#pragma pack()
+-
+ /* Union of all resource descriptors, so we can allocate the worst case */
+ 
+ union aml_resource {
+@@ -562,6 +558,10 @@ union aml_resource {
+ 	u8 byte_item;
+ };
+ 
++/* restore default alignment */
++
++#pragma pack()
++
+ /* Interfaces used by both the disassembler and compiler */
+ 
+ void
+diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
+index fb9ed5e1da89dc..2bdae8a25e084d 100644
+--- a/drivers/acpi/acpica/dsutils.c
++++ b/drivers/acpi/acpica/dsutils.c
+@@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
+ 	union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
+ 	u32 arg_count = 0;
+ 	u32 index = walk_state->num_operands;
++	u32 prev_num_operands = walk_state->num_operands;
++	u32 new_num_operands;
+ 	u32 i;
+ 
+ 	ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
+@@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
+ 
+ 	/* Create the interpreter arguments, in reverse order */
+ 
++	new_num_operands = index;
+ 	index--;
+ 	for (i = 0; i < arg_count; i++) {
+ 		arg = arguments[index];
+@@ -720,7 +723,11 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
+ 	 * pop everything off of the operand stack and delete those
+ 	 * objects
+ 	 */
+-	acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
++	walk_state->num_operands = i;
++	acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state);
++
++	/* Restore operand count */
++	walk_state->num_operands = prev_num_operands;
+ 
+ 	ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
+ 	return_ACPI_STATUS(status);
+diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
+index 54471083ba545e..0bce1baaa62b32 100644
+--- a/drivers/acpi/acpica/psobject.c
++++ b/drivers/acpi/acpica/psobject.c
+@@ -636,7 +636,8 @@ acpi_status
+ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ 			  union acpi_parse_object *op, acpi_status status)
+ {
+-	acpi_status status2;
++	acpi_status return_status = status;
++	u8 ascending = TRUE;
+ 
+ 	ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
+ 
+@@ -650,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ 			  op));
+ 	do {
+ 		if (op) {
+-			if (walk_state->ascending_callback != NULL) {
++			if (ascending && walk_state->ascending_callback != NULL) {
+ 				walk_state->op = op;
+ 				walk_state->op_info =
+ 				    acpi_ps_get_opcode_info(op->common.
+@@ -672,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ 				}
+ 
+ 				if (status == AE_CTRL_TERMINATE) {
+-					status = AE_OK;
+-
+-					/* Clean up */
+-					do {
+-						if (op) {
+-							status2 =
+-							    acpi_ps_complete_this_op
+-							    (walk_state, op);
+-							if (ACPI_FAILURE
+-							    (status2)) {
+-								return_ACPI_STATUS
+-								    (status2);
+-							}
+-						}
+-
+-						acpi_ps_pop_scope(&
+-								  (walk_state->
+-								   parser_state),
+-								  &op,
+-								  &walk_state->
+-								  arg_types,
+-								  &walk_state->
+-								  arg_count);
+-
+-					} while (op);
+-
+-					return_ACPI_STATUS(status);
++					ascending = FALSE;
++					return_status = AE_CTRL_TERMINATE;
+ 				}
+ 
+ 				else if (ACPI_FAILURE(status)) {
+ 
+ 					/* First error is most important */
+ 
+-					(void)
+-					    acpi_ps_complete_this_op(walk_state,
+-								     op);
+-					return_ACPI_STATUS(status);
++					ascending = FALSE;
++					return_status = status;
+ 				}
+ 			}
+ 
+-			status2 = acpi_ps_complete_this_op(walk_state, op);
+-			if (ACPI_FAILURE(status2)) {
+-				return_ACPI_STATUS(status2);
++			status = acpi_ps_complete_this_op(walk_state, op);
++			if (ACPI_FAILURE(status)) {
++				ascending = FALSE;
++				if (ACPI_SUCCESS(return_status) ||
++				    return_status == AE_CTRL_TERMINATE) {
++					return_status = status;
++				}
+ 			}
+ 		}
+ 
+@@ -724,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ 
+ 	} while (op);
+ 
+-	return_ACPI_STATUS(status);
++	return_ACPI_STATUS(return_status);
+ }
+diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
+index 27384ee245f094..f92010e667cda7 100644
+--- a/drivers/acpi/acpica/rsaddr.c
++++ b/drivers/acpi/acpica/rsaddr.c
+@@ -272,18 +272,13 @@ u8
+ acpi_rs_get_address_common(struct acpi_resource *resource,
+ 			   union aml_resource *aml)
+ {
+-	struct aml_resource_address address;
+-
+ 	ACPI_FUNCTION_ENTRY();
+ 
+-	/* Avoid undefined behavior: member access within misaligned address */
+-
+-	memcpy(&address, aml, sizeof(address));
+-
+ 	/* Validate the Resource Type */
+ 
+-	if ((address.resource_type > 2) &&
+-	    (address.resource_type < 0xC0) && (address.resource_type != 0x0A)) {
++	if ((aml->address.resource_type > 2) &&
++	    (aml->address.resource_type < 0xC0) &&
++	    (aml->address.resource_type != 0x0A)) {
+ 		return (FALSE);
+ 	}
+ 
+@@ -304,7 +299,7 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
+ 		/* Generic resource type, just grab the type_specific byte */
+ 
+ 		resource->data.address.info.type_specific =
+-		    address.specific_flags;
++		    aml->address.specific_flags;
+ 	}
+ 
+ 	return (TRUE);
+diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
+index 6e7a152d645953..242daf45e20eff 100644
+--- a/drivers/acpi/acpica/rscalc.c
++++ b/drivers/acpi/acpica/rscalc.c
+@@ -608,18 +608,12 @@ acpi_rs_get_list_length(u8 *aml_buffer,
+ 
+ 		case ACPI_RESOURCE_NAME_SERIAL_BUS:{
+ 
+-				/* Avoid undefined behavior: member access within misaligned address */
+-
+-				struct aml_resource_common_serialbus
+-				    common_serial_bus;
+-				memcpy(&common_serial_bus, aml_resource,
+-				       sizeof(common_serial_bus));
+-
+ 				minimum_aml_resource_length =
+ 				    acpi_gbl_resource_aml_serial_bus_sizes
+-				    [common_serial_bus.type];
++				    [aml_resource->common_serial_bus.type];
+ 				extra_struct_bytes +=
+-				    common_serial_bus.resource_length -
++				    aml_resource->common_serial_bus.
++				    resource_length -
+ 				    minimum_aml_resource_length;
+ 				break;
+ 			}
+@@ -688,16 +682,10 @@ acpi_rs_get_list_length(u8 *aml_buffer,
+ 		 */
+ 		if (acpi_ut_get_resource_type(aml_buffer) ==
+ 		    ACPI_RESOURCE_NAME_SERIAL_BUS) {
+-
+-			/* Avoid undefined behavior: member access within misaligned address */
+-
+-			struct aml_resource_common_serialbus common_serial_bus;
+-			memcpy(&common_serial_bus, aml_resource,
+-			       sizeof(common_serial_bus));
+-
+ 			buffer_size =
+ 			    acpi_gbl_resource_struct_serial_bus_sizes
+-			    [common_serial_bus.type] + extra_struct_bytes;
++			    [aml_resource->common_serial_bus.type] +
++			    extra_struct_bytes;
+ 		} else {
+ 			buffer_size =
+ 			    acpi_gbl_resource_struct_sizes[resource_index] +
+diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
+index 164c96e063c6e8..e46efaa889cdd7 100644
+--- a/drivers/acpi/acpica/rslist.c
++++ b/drivers/acpi/acpica/rslist.c
+@@ -55,21 +55,15 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
+ 	aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+ 
+ 	if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+-
+-		/* Avoid undefined behavior: member access within misaligned address */
+-
+-		struct aml_resource_common_serialbus common_serial_bus;
+-		memcpy(&common_serial_bus, aml_resource,
+-		       sizeof(common_serial_bus));
+-
+-		if (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) {
++		if (aml_resource->common_serial_bus.type >
++		    AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ 			conversion_table = NULL;
+ 		} else {
+ 			/* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */
+ 
+ 			conversion_table =
+ 			    acpi_gbl_convert_resource_serial_bus_dispatch
+-			    [common_serial_bus.type];
++			    [aml_resource->common_serial_bus.type];
+ 		}
+ 	} else {
+ 		conversion_table =
+diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
+index 42b30b9f93128e..7fad03c5252c35 100644
+--- a/drivers/acpi/acpica/utprint.c
++++ b/drivers/acpi/acpica/utprint.c
+@@ -333,11 +333,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
+ 
+ 	pos = string;
+ 
+-	if (size != ACPI_UINT32_MAX) {
+-		end = string + size;
+-	} else {
+-		end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
+-	}
++	size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string));
++	end = string + size;
+ 
+ 	for (; *format; ++format) {
+ 		if (*format != '%') {
+diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
+index cff7901f7866ec..e1cc3d3487508c 100644
+--- a/drivers/acpi/acpica/utresrc.c
++++ b/drivers/acpi/acpica/utresrc.c
+@@ -361,20 +361,16 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
+ 	aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+ 	if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+ 
+-		/* Avoid undefined behavior: member access within misaligned address */
+-
+-		struct aml_resource_common_serialbus common_serial_bus;
+-		memcpy(&common_serial_bus, aml_resource,
+-		       sizeof(common_serial_bus));
+-
+ 		/* Validate the bus_type field */
+ 
+-		if ((common_serial_bus.type == 0) ||
+-		    (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) {
++		if ((aml_resource->common_serial_bus.type == 0) ||
++		    (aml_resource->common_serial_bus.type >
++		     AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ 			if (walk_state) {
+ 				ACPI_ERROR((AE_INFO,
+ 					    "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
+-					    common_serial_bus.type));
++					    aml_resource->common_serial_bus.
++					    type));
+ 			}
+ 			return (AE_AML_INVALID_RESOURCE_TYPE);
+ 		}
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 65fa3444367a13..6a7ac34d73bda6 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -243,10 +243,23 @@ static int acpi_battery_get_property(struct power_supply *psy,
+ 		break;
+ 	case POWER_SUPPLY_PROP_CURRENT_NOW:
+ 	case POWER_SUPPLY_PROP_POWER_NOW:
+-		if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
++		if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
+ 			ret = -ENODEV;
+-		else
+-			val->intval = battery->rate_now * 1000;
++			break;
++		}
++
++		val->intval = battery->rate_now * 1000;
++		/*
++		 * When discharging, the current should be reported as a
++		 * negative number as per the power supply class interface
++		 * definition.
++		 */
++		if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
++		    (battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
++		    acpi_battery_handle_discharging(battery)
++				== POWER_SUPPLY_STATUS_DISCHARGING)
++			val->intval = -val->intval;
++
+ 		break;
+ 	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ 	case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 16917dc3ad604c..6234055d259846 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -1444,8 +1444,10 @@ static int __init acpi_init(void)
+ 	}
+ 
+ 	acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
+-	if (!acpi_kobj)
+-		pr_debug("%s: kset create error\n", __func__);
++	if (!acpi_kobj) {
++		pr_err("Failed to register kobject\n");
++		return -ENOMEM;
++	}
+ 
+ 	init_prmt();
+ 	acpi_init_pcc();
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 650122deb480d0..395240cb3666ee 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1410,8 +1410,15 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
+ 
+ static bool ahci_broken_lpm(struct pci_dev *pdev)
+ {
++	/*
++	 * Platforms with LPM problems.
++	 * If driver_data is NULL, there is no existing BIOS version with
++	 * functioning LPM.
++	 * If driver_data is non-NULL, then driver_data contains the DMI BIOS
++	 * build date of the first BIOS version with functioning LPM (i.e. older
++	 * BIOS versions have broken LPM).
++	 */
+ 	static const struct dmi_system_id sysids[] = {
+-		/* Various Lenovo 50 series have LPM issues with older BIOSen */
+ 		{
+ 			.matches = {
+ 				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -1446,6 +1453,29 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
+ 			 */
+ 			.driver_data = "20180310", /* 2.35 */
+ 		},
++		{
++			.matches = {
++				DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++				DMI_MATCH(DMI_PRODUCT_VERSION, "ASUSPRO D840MB_M840SA"),
++			},
++			/* 320 is broken, there is no known good version. */
++		},
++		{
++			/*
++			 * AMD 500 Series Chipset SATA Controller [1022:43eb]
++			 * on this motherboard timeouts on ports 5 and 6 when
++			 * LPM is enabled, at least with WDC WD20EFAX-68FB5N0
++			 * hard drives. LPM with the same drive works fine on
++			 * all other ports on the same controller.
++			 */
++			.matches = {
++				DMI_MATCH(DMI_BOARD_VENDOR,
++					  "ASUSTeK COMPUTER INC."),
++				DMI_MATCH(DMI_BOARD_NAME,
++					  "ROG STRIX B550-F GAMING (WI-FI)"),
++			},
++			/* 3621 is broken, there is no known good version. */
++		},
+ 		{ }	/* terminate list */
+ 	};
+ 	const struct dmi_system_id *dmi = dmi_first_match(sysids);
+@@ -1455,6 +1485,9 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
+ 	if (!dmi)
+ 		return false;
+ 
++	if (!dmi->driver_data)
++		return true;
++
+ 	dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+ 	snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+ 
+diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
+index 696b99720dcbda..d82728a01832b5 100644
+--- a/drivers/ata/pata_via.c
++++ b/drivers/ata/pata_via.c
+@@ -368,7 +368,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
+ 	}
+ 
+ 	if (dev->class == ATA_DEV_ATAPI &&
+-	    dmi_check_system(no_atapi_dma_dmi_table)) {
++	    (dmi_check_system(no_atapi_dma_dmi_table) ||
++	     config->id == PCI_DEVICE_ID_VIA_6415)) {
+ 		ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
+ 		mask &= ATA_MASK_PIO;
+ 	}
+diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
+index d4aa0f353b6c80..eeae160c898d38 100644
+--- a/drivers/atm/atmtcp.c
++++ b/drivers/atm/atmtcp.c
+@@ -288,7 +288,9 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ 	struct sk_buff *new_skb;
+ 	int result = 0;
+ 
+-	if (!skb->len) return 0;
++	if (skb->len < sizeof(struct atmtcp_hdr))
++		goto done;
++
+ 	dev = vcc->dev_data;
+ 	hdr = (struct atmtcp_hdr *) skb->data;
+ 	if (hdr->length == ATMTCP_HDR_MAGIC) {
+diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
+index 0e60dd650b5e0d..70db08f3ac6fae 100644
+--- a/drivers/base/platform-msi.c
++++ b/drivers/base/platform-msi.c
+@@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
+ void platform_device_msi_free_irqs_all(struct device *dev)
+ {
+ 	msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
++	msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN);
+ }
+ EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 04113adb092b52..99f25d6b2027ad 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1003,7 +1003,7 @@ static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
+ 	 * If 'expires' is after the current time, we've been called
+ 	 * too early.
+ 	 */
+-	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
++	if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
+ 		dev->power.timer_expires = 0;
+ 		rpm_suspend(dev, dev->power.timer_autosuspends ?
+ 		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index eb6eb25b343baf..53b3f0061ad12a 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
+ 	if (prop->is_inline)
+ 		return -EINVAL;
+ 
+-	if (index * sizeof(*ref) >= prop->length)
++	if ((index + 1) * sizeof(*ref) > prop->length)
+ 		return -ENOENT;
+ 
+ 	ref_array = prop->pointer;
+diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
+index 3523dd82d7a002..280679bde3a506 100644
+--- a/drivers/block/aoe/aoedev.c
++++ b/drivers/block/aoe/aoedev.c
+@@ -198,6 +198,7 @@ aoedev_downdev(struct aoedev *d)
+ {
+ 	struct aoetgt *t, **tt, **te;
+ 	struct list_head *head, *pos, *nx;
++	struct request *rq, *rqnext;
+ 	int i;
+ 
+ 	d->flags &= ~DEVFL_UP;
+@@ -223,6 +224,13 @@ aoedev_downdev(struct aoedev *d)
+ 	/* clean out the in-process request (if any) */
+ 	aoe_failip(d);
+ 
++	/* clean out any queued block requests */
++	list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) {
++		list_del_init(&rq->queuelist);
++		blk_mq_start_request(rq);
++		blk_mq_end_request(rq, BLK_STS_IOERR);
++	}
++
+ 	/* fast fail all pending I/O */
+ 	if (d->blkq) {
+ 		/* UP is cleared, freeze+quiesce to insure all are errored */
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index a01a547c562f35..746ef36e58df20 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -2457,6 +2457,9 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+ 	if (copy_from_user(&info, argp, sizeof(info)))
+ 		return -EFAULT;
+ 
++	if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || info.nr_hw_queues > UBLK_MAX_NR_QUEUES)
++		return -EINVAL;
++
+ 	if (capable(CAP_SYS_ADMIN))
+ 		info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
+ 	else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index af2be0271806f8..aa63852060500c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -672,6 +672,8 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3584), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
+@@ -712,6 +714,8 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Additional Realtek 8723AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
+index 9c4c1395fcdbf2..a376ec66165348 100644
+--- a/drivers/bus/fsl-mc/fsl-mc-uapi.c
++++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
+@@ -275,13 +275,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
+ 		.size = 8,
+ 	},
+ 	[DPSW_GET_TAILDROP] = {
+-		.cmdid_value = 0x0A80,
++		.cmdid_value = 0x0A90,
+ 		.cmdid_mask = 0xFFF0,
+ 		.token = true,
+ 		.size = 14,
+ 	},
+ 	[DPSW_SET_TAILDROP] = {
+-		.cmdid_value = 0x0A90,
++		.cmdid_value = 0x0A80,
+ 		.cmdid_mask = 0xFFF0,
+ 		.token = true,
+ 		.size = 24,
+diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
+index 95b10a6cf3073f..8b7a34f4db94bb 100644
+--- a/drivers/bus/fsl-mc/mc-io.c
++++ b/drivers/bus/fsl-mc/mc-io.c
+@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ 	if (error < 0)
+ 		goto error_cleanup_resource;
+ 
+-	dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+-						   &dpmcp_dev->dev,
+-						   DL_FLAG_AUTOREMOVE_CONSUMER);
+-	if (!dpmcp_dev->consumer_link) {
+-		error = -EINVAL;
+-		goto error_cleanup_mc_io;
++	/* If the DPRC device itself tries to allocate a portal (usually for
++	 * UAPI interaction), don't add a device link between them since the
++	 * DPMCP device is an actual child device of the DPRC and a reverse
++	 * dependency is not allowed.
++	 */
++	if (mc_dev != mc_bus_dev) {
++		dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
++							   &dpmcp_dev->dev,
++							   DL_FLAG_AUTOREMOVE_CONSUMER);
++		if (!dpmcp_dev->consumer_link) {
++			error = -EINVAL;
++			goto error_cleanup_mc_io;
++		}
+ 	}
+ 
+ 	*new_mc_io = mc_io;
+diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
+index f2052cd0a05178..b22c59d57c8f0a 100644
+--- a/drivers/bus/fsl-mc/mc-sys.c
++++ b/drivers/bus/fsl-mc/mc-sys.c
+@@ -19,7 +19,7 @@
+ /*
+  * Timeout in milliseconds to wait for the completion of an MC command
+  */
+-#define MC_CMD_COMPLETION_TIMEOUT_MS	500
++#define MC_CMD_COMPLETION_TIMEOUT_MS	15000
+ 
+ /*
+  * usleep_range() min and max values used to throttle down polling
+diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
+index aeb53b2c34a8cd..26357ee68dee98 100644
+--- a/drivers/bus/mhi/ep/ring.c
++++ b/drivers/bus/mhi/ep/ring.c
+@@ -131,19 +131,23 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
+ 	}
+ 
+ 	old_offset = ring->rd_offset;
+-	mhi_ep_ring_inc_index(ring);
+ 
+ 	dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
++	buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
++	buf_info.dev_addr = el;
++	buf_info.size = sizeof(*el);
++
++	ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
++	if (ret)
++		return ret;
++
++	mhi_ep_ring_inc_index(ring);
+ 
+ 	/* Update rp in ring context */
+ 	rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
+ 	memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
+ 
+-	buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+-	buf_info.dev_addr = el;
+-	buf_info.size = sizeof(*el);
+-
+-	return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
++	return ret;
+ }
+ 
+ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
+diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
+index 11c0e751f22398..0ccbcb717955a3 100644
+--- a/drivers/bus/mhi/host/pm.c
++++ b/drivers/bus/mhi/host/pm.c
+@@ -602,6 +602,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
+ 	struct mhi_cmd *mhi_cmd;
+ 	struct mhi_event_ctxt *er_ctxt;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
++	bool reset_device = false;
+ 	int ret, i;
+ 
+ 	dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
+@@ -630,8 +631,23 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
+ 	/* Wake up threads waiting for state transition */
+ 	wake_up_all(&mhi_cntrl->state_event);
+ 
+-	/* Trigger MHI RESET so that the device will not access host memory */
+ 	if (MHI_REG_ACCESS_VALID(prev_state)) {
++		/*
++		 * If the device is in PBL or SBL, it will only respond to
++		 * RESET if the device is in SYSERR state. SYSERR might
++		 * already be cleared at this point.
++		 */
++		enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl);
++		enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl);
++
++		if (cur_state == MHI_STATE_SYS_ERR)
++			reset_device = true;
++		else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL)
++			reset_device = true;
++	}
++
++	/* Trigger MHI RESET so that the device will not access host memory */
++	if (reset_device) {
+ 		u32 in_reset = -1;
+ 		unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+ 
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 270a94a06e05ce..f715c8d281293b 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -677,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
+ 	return 0;
+ }
+ 
+-/* Interconnect instances to probe before l4_per instances */
+-static struct resource early_bus_ranges[] = {
+-	/* am3/4 l4_wkup */
+-	{ .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
+-	/* omap4/5 and dra7 l4_cfg */
+-	{ .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
+-	/* omap4 l4_wkup */
+-	{ .start = 0x4a300000, .end = 0x4a300000 + 0x30000,  },
+-	/* omap5 and dra7 l4_wkup without dra7 dcan segment */
+-	{ .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000,  },
+-};
+-
+-static atomic_t sysc_defer = ATOMIC_INIT(10);
+-
+-/**
+- * sysc_defer_non_critical - defer non_critical interconnect probing
+- * @ddata: device driver data
+- *
+- * We want to probe l4_cfg and l4_wkup interconnect instances before any
+- * l4_per instances as l4_per instances depend on resources on l4_cfg and
+- * l4_wkup interconnects.
+- */
+-static int sysc_defer_non_critical(struct sysc *ddata)
+-{
+-	struct resource *res;
+-	int i;
+-
+-	if (!atomic_read(&sysc_defer))
+-		return 0;
+-
+-	for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
+-		res = &early_bus_ranges[i];
+-		if (ddata->module_pa >= res->start &&
+-		    ddata->module_pa <= res->end) {
+-			atomic_set(&sysc_defer, 0);
+-
+-			return 0;
+-		}
+-	}
+-
+-	atomic_dec_if_positive(&sysc_defer);
+-
+-	return -EPROBE_DEFER;
+-}
+-
+ static struct device_node *stdout_path;
+ 
+ static void sysc_init_stdout_path(struct sysc *ddata)
+@@ -947,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
+ 	if (error)
+ 		return error;
+ 
+-	error = sysc_defer_non_critical(ddata);
+-	if (error)
+-		return error;
+-
+ 	sysc_check_children(ddata);
+ 
+ 	if (!of_property_present(np, "reg"))
+diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
+index 4f92b83965d5a9..b72eebd0fa4749 100644
+--- a/drivers/clk/meson/g12a.c
++++ b/drivers/clk/meson/g12a.c
+@@ -4099,6 +4099,7 @@ static const struct clk_parent_data spicc_sclk_parent_data[] = {
+ 	{ .hw = &g12a_clk81.hw },
+ 	{ .hw = &g12a_fclk_div4.hw },
+ 	{ .hw = &g12a_fclk_div3.hw },
++	{ .hw = &g12a_fclk_div2.hw },
+ 	{ .hw = &g12a_fclk_div5.hw },
+ 	{ .hw = &g12a_fclk_div7.hw },
+ };
+diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
+index 009f39139b6440..3e44757e25d324 100644
+--- a/drivers/clk/qcom/gcc-x1e80100.c
++++ b/drivers/clk/qcom/gcc-x1e80100.c
+@@ -6753,6 +6753,10 @@ static int gcc_x1e80100_probe(struct platform_device *pdev)
+ 	/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
+ 	regmap_write(regmap, 0x52224, 0x0);
+ 
++	/* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks  */
++	qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
++	qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
++
+ 	return qcom_cc_really_probe(&pdev->dev, &gcc_x1e80100_desc, regmap);
+ }
+ 
+diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
+index d341ce0708aac3..e4af3a92863794 100644
+--- a/drivers/clk/rockchip/clk-rk3036.c
++++ b/drivers/clk/rockchip/clk-rk3036.c
+@@ -431,6 +431,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
+ 	"hclk_peri",
+ 	"pclk_peri",
+ 	"pclk_ddrupctl",
++	"ddrphy",
+ };
+ 
+ static void __init rk3036_clk_init(struct device_node *np)
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 7a16d193222866..62dbc5701e993a 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -482,6 +482,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+ 	u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ 	u64 value = prev;
+ 
++	if (!policy)
++		return;
++
+ 	min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+ 			cpudata->max_limit_perf);
+ 	max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index 7e7c1613a67c6d..beb660ca240cc8 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -367,6 +367,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
+ 	.register_em	= scmi_cpufreq_register_em,
+ };
+ 
++static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
++{
++	struct device_node *scmi_np = dev_of_node(scmi_dev);
++	struct device_node *cpu_np, *np;
++	struct device *cpu_dev;
++	int cpu, idx;
++
++	if (!scmi_np)
++		return false;
++
++	for_each_possible_cpu(cpu) {
++		cpu_dev = get_cpu_device(cpu);
++		if (!cpu_dev)
++			continue;
++
++		cpu_np = dev_of_node(cpu_dev);
++
++		np = of_parse_phandle(cpu_np, "clocks", 0);
++		of_node_put(np);
++
++		if (np == scmi_np)
++			return true;
++
++		idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
++		np = of_parse_phandle(cpu_np, "power-domains", idx);
++		of_node_put(np);
++
++		if (np == scmi_np)
++			return true;
++	}
++
++	return false;
++}
++
+ static int scmi_cpufreq_probe(struct scmi_device *sdev)
+ {
+ 	int ret;
+@@ -375,7 +409,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
+ 
+ 	handle = sdev->handle;
+ 
+-	if (!handle)
++	if (!handle || !scmi_dev_used_by_cpus(dev))
+ 		return -ENODEV;
+ 
+ 	perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
+diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+index f49818a13013a3..41420e349572a2 100644
+--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
++++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+@@ -181,11 +181,19 @@ static void adf_remove(struct pci_dev *pdev)
+ 	adf_cleanup_accel(accel_dev);
+ }
+ 
++static void adf_shutdown(struct pci_dev *pdev)
++{
++	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
++
++	adf_dev_down(accel_dev);
++}
++
+ static struct pci_driver adf_driver = {
+ 	.id_table = adf_pci_tbl,
+ 	.name = ADF_420XX_DEVICE_NAME,
+ 	.probe = adf_probe,
+ 	.remove = adf_remove,
++	.shutdown = adf_shutdown,
+ 	.sriov_configure = adf_sriov_configure,
+ 	.err_handler = &adf_err_handler,
+ };
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+index 659905e4595034..01b34eda83e919 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+@@ -183,11 +183,19 @@ static void adf_remove(struct pci_dev *pdev)
+ 	adf_cleanup_accel(accel_dev);
+ }
+ 
++static void adf_shutdown(struct pci_dev *pdev)
++{
++	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
++
++	adf_dev_down(accel_dev);
++}
++
+ static struct pci_driver adf_driver = {
+ 	.id_table = adf_pci_tbl,
+ 	.name = ADF_4XXX_DEVICE_NAME,
+ 	.probe = adf_probe,
+ 	.remove = adf_remove,
++	.shutdown = adf_shutdown,
+ 	.sriov_configure = adf_sriov_configure,
+ 	.err_handler = &adf_err_handler,
+ };
+diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+index 4d18057745d444..b776f7ea0dfb57 100644
+--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
++++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+@@ -19,6 +19,13 @@
+ #include <adf_dbgfs.h>
+ #include "adf_c3xxx_hw_data.h"
+ 
++static void adf_shutdown(struct pci_dev *pdev)
++{
++	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
++
++	adf_dev_down(accel_dev);
++}
++
+ static const struct pci_device_id adf_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
+ 	{ }
+@@ -33,6 +40,7 @@ static struct pci_driver adf_driver = {
+ 	.name = ADF_C3XXX_DEVICE_NAME,
+ 	.probe = adf_probe,
+ 	.remove = adf_remove,
++	.shutdown = adf_shutdown,
+ 	.sriov_configure = adf_sriov_configure,
+ 	.err_handler = &adf_err_handler,
+ };
+diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+index e6b5de55434ec1..5310149c311e21 100644
+--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
++++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+@@ -19,6 +19,13 @@
+ #include <adf_dbgfs.h>
+ #include "adf_c62x_hw_data.h"
+ 
++static void adf_shutdown(struct pci_dev *pdev)
++{
++	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
++
++	adf_dev_down(accel_dev);
++}
++
+ static const struct pci_device_id adf_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
+ 	{ }
+@@ -33,6 +40,7 @@ static struct pci_driver adf_driver = {
+ 	.name = ADF_C62X_DEVICE_NAME,
+ 	.probe = adf_probe,
+ 	.remove = adf_remove,
++	.shutdown = adf_shutdown,
+ 	.sriov_configure = adf_sriov_configure,
+ 	.err_handler = &adf_err_handler,
+ };
+diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+index 2a50cce415151b..5ddf567ffcad6e 100644
+--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
++++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+@@ -19,6 +19,13 @@
+ #include <adf_dbgfs.h>
+ #include "adf_dh895xcc_hw_data.h"
+ 
++static void adf_shutdown(struct pci_dev *pdev)
++{
++	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
++
++	adf_dev_down(accel_dev);
++}
++
+ static const struct pci_device_id adf_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
+ 	{ }
+@@ -33,6 +40,7 @@ static struct pci_driver adf_driver = {
+ 	.name = ADF_DH895XCC_DEVICE_NAME,
+ 	.probe = adf_probe,
+ 	.remove = adf_remove,
++	.shutdown = adf_shutdown,
+ 	.sriov_configure = adf_sriov_configure,
+ 	.err_handler = &adf_err_handler,
+ };
+diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
+index 5fd31ba715c22a..24273cb082ba59 100644
+--- a/drivers/crypto/marvell/cesa/cesa.c
++++ b/drivers/crypto/marvell/cesa/cesa.c
+@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
+ 
+ static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
+ {
+-	if (engine->chain.first && engine->chain.last)
++	if (engine->chain_hw.first && engine->chain_hw.last)
+ 		return mv_cesa_tdma_process(engine, status);
+ 
+ 	return mv_cesa_std_process(engine, status);
+diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
+index d215a6bed6bc7b..50ca1039fdaa7a 100644
+--- a/drivers/crypto/marvell/cesa/cesa.h
++++ b/drivers/crypto/marvell/cesa/cesa.h
+@@ -440,8 +440,10 @@ struct mv_cesa_dev {
+  *			SRAM
+  * @queue:		fifo of the pending crypto requests
+  * @load:		engine load counter, useful for load balancing
+- * @chain:		list of the current tdma descriptors being processed
+- *			by this engine.
++ * @chain_hw:		list of the current tdma descriptors being processed
++ *			by the hardware.
++ * @chain_sw:		list of the current tdma descriptors that will be
++ *			submitted to the hardware.
+  * @complete_queue:	fifo of the processed requests by the engine
+  *
+  * Structure storing CESA engine information.
+@@ -463,7 +465,8 @@ struct mv_cesa_engine {
+ 	struct gen_pool *pool;
+ 	struct crypto_queue queue;
+ 	atomic_t load;
+-	struct mv_cesa_tdma_chain chain;
++	struct mv_cesa_tdma_chain chain_hw;
++	struct mv_cesa_tdma_chain chain_sw;
+ 	struct list_head complete_queue;
+ 	int irq;
+ };
+diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
+index 388a06e180d64a..243305354420c1 100644
+--- a/drivers/crypto/marvell/cesa/tdma.c
++++ b/drivers/crypto/marvell/cesa/tdma.c
+@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
+ {
+ 	struct mv_cesa_engine *engine = dreq->engine;
+ 
++	spin_lock_bh(&engine->lock);
++	if (engine->chain_sw.first == dreq->chain.first) {
++		engine->chain_sw.first = NULL;
++		engine->chain_sw.last = NULL;
++	}
++	engine->chain_hw.first = dreq->chain.first;
++	engine->chain_hw.last = dreq->chain.last;
++	spin_unlock_bh(&engine->lock);
++
+ 	writel_relaxed(0, engine->regs + CESA_SA_CFG);
+ 
+ 	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
+@@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
+ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
+ 			struct mv_cesa_req *dreq)
+ {
+-	if (engine->chain.first == NULL && engine->chain.last == NULL) {
+-		engine->chain.first = dreq->chain.first;
+-		engine->chain.last  = dreq->chain.last;
+-	} else {
+-		struct mv_cesa_tdma_desc *last;
++	struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
+ 
+-		last = engine->chain.last;
++	/*
++	 * Break the DMA chain if the request being queued needs the IV
++	 * regs to be set before lauching the request.
++	 */
++	if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
++		engine->chain_sw.first = dreq->chain.first;
++	else {
+ 		last->next = dreq->chain.first;
+-		engine->chain.last = dreq->chain.last;
+-
+-		/*
+-		 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+-		 * the last element of the current chain, or if the request
+-		 * being queued needs the IV regs to be set before lauching
+-		 * the request.
+-		 */
+-		if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+-		    !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
+-			last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
++		last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
++	}
++	last = dreq->chain.last;
++	engine->chain_sw.last = last;
++	/*
++	 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
++	 * the last element of the current chain.
++	 */
++	if (last->flags & CESA_TDMA_BREAK_CHAIN) {
++		engine->chain_sw.first = NULL;
++		engine->chain_sw.last = NULL;
+ 	}
+ }
+ 
+@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
+ 
+ 	tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
+ 
+-	for (tdma = engine->chain.first; tdma; tdma = next) {
++	for (tdma = engine->chain_hw.first; tdma; tdma = next) {
+ 		spin_lock_bh(&engine->lock);
+ 		next = tdma->next;
+ 		spin_unlock_bh(&engine->lock);
+@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
+ 								 &backlog);
+ 
+ 			/* Re-chaining to the next request */
+-			engine->chain.first = tdma->next;
++			engine->chain_hw.first = tdma->next;
+ 			tdma->next = NULL;
+ 
+ 			/* If this is the last request, clear the chain */
+-			if (engine->chain.first == NULL)
+-				engine->chain.last  = NULL;
++			if (engine->chain_hw.first == NULL)
++				engine->chain_hw.last  = NULL;
+ 			spin_unlock_bh(&engine->lock);
+ 
+ 			ctx = crypto_tfm_ctx(req->tfm);
+diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
+index 5e836e4e5b449a..959f690b12260d 100644
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -223,8 +223,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
+ 			ubuf->sg = NULL;
+ 		}
+ 	} else {
+-		dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
+-				    direction);
++		dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
+ 	}
+ 
+ 	return ret;
+@@ -239,7 +238,7 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
+ 	if (!ubuf->sg)
+ 		return -EINVAL;
+ 
+-	dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
++	dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
+index 8420862c90a4d5..a059964b97f8cf 100644
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -1746,9 +1746,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
+ 
+ 	local_irq_save(flags);
+ 	if (trig_type == ALTR_UE_TRIGGER_CHAR)
+-		writel(priv->ue_set_mask, set_addr);
++		writew(priv->ue_set_mask, set_addr);
+ 	else
+-		writel(priv->ce_set_mask, set_addr);
++		writew(priv->ce_set_mask, set_addr);
+ 
+ 	/* Ensure the interrupt test bits are set */
+ 	wmb();
+@@ -1778,7 +1778,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
+ 
+ 	local_irq_save(flags);
+ 	if (trig_type == ALTR_UE_TRIGGER_CHAR) {
+-		writel(priv->ue_set_mask, set_addr);
++		writew(priv->ue_set_mask, set_addr);
+ 	} else {
+ 		/* Setup read/write of 4 bytes */
+ 		writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 5d356b7c45897c..c0a8f9c8d4f0bf 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -3882,6 +3882,7 @@ static int per_family_init(struct amd64_pvt *pvt)
+ 			break;
+ 		case 0x70 ... 0x7f:
+ 			pvt->ctl_name			= "F19h_M70h";
++			pvt->max_mcs			= 4;
+ 			pvt->flags.zn_regs_v2		= 1;
+ 			break;
+ 		case 0x90 ... 0x9f:
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 993615fa490ebd..f1abe605865ad4 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -1708,6 +1708,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
+ 	return info->desc->max_msg_size;
+ }
+ 
++/**
++ * scmi_protocol_msg_check  - Check protocol message attributes
++ *
++ * @ph: A reference to the protocol handle.
++ * @message_id: The ID of the message to check.
++ * @attributes: A parameter to optionally return the retrieved message
++ *		attributes, in case of Success.
++ *
++ * An helper to check protocol message attributes for a specific protocol
++ * and message pair.
++ *
++ * Return: 0 on SUCCESS
++ */
++static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
++				   u32 message_id, u32 *attributes)
++{
++	int ret;
++	struct scmi_xfer *t;
++
++	ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
++			    sizeof(__le32), 0, &t);
++	if (ret)
++		return ret;
++
++	put_unaligned_le32(message_id, t->tx.buf);
++	ret = do_xfer(ph, t);
++	if (!ret && attributes)
++		*attributes = get_unaligned_le32(t->rx.buf);
++	xfer_put(ph, t);
++
++	return ret;
++}
++
+ /**
+  * struct scmi_iterator  - Iterator descriptor
+  * @msg: A reference to the message TX buffer; filled by @prepare_message with
+@@ -1849,6 +1882,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
+ 	int ret;
+ 	u32 flags;
+ 	u64 phys_addr;
++	u32 attributes;
+ 	u8 size;
+ 	void __iomem *addr;
+ 	struct scmi_xfer *t;
+@@ -1857,6 +1891,15 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
+ 	struct scmi_msg_resp_desc_fc *resp;
+ 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ 
++	/* Check if the MSG_ID supports fastchannel */
++	ret = scmi_protocol_msg_check(ph, message_id, &attributes);
++	if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
++		dev_dbg(ph->dev,
++			"Skip FC init for 0x%02X/%d  domain:%d - ret:%d\n",
++			pi->proto->id, message_id, domain, ret);
++		return;
++	}
++
+ 	if (!p_addr) {
+ 		ret = -EINVAL;
+ 		goto err_out;
+@@ -1984,39 +2027,6 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
+ #endif
+ }
+ 
+-/**
+- * scmi_protocol_msg_check  - Check protocol message attributes
+- *
+- * @ph: A reference to the protocol handle.
+- * @message_id: The ID of the message to check.
+- * @attributes: A parameter to optionally return the retrieved message
+- *		attributes, in case of Success.
+- *
+- * An helper to check protocol message attributes for a specific protocol
+- * and message pair.
+- *
+- * Return: 0 on SUCCESS
+- */
+-static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
+-				   u32 message_id, u32 *attributes)
+-{
+-	int ret;
+-	struct scmi_xfer *t;
+-
+-	ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
+-			    sizeof(__le32), 0, &t);
+-	if (ret)
+-		return ret;
+-
+-	put_unaligned_le32(message_id, t->tx.buf);
+-	ret = do_xfer(ph, t);
+-	if (!ret && attributes)
+-		*attributes = get_unaligned_le32(t->rx.buf);
+-	xfer_put(ph, t);
+-
+-	return ret;
+-}
+-
+ static const struct scmi_proto_helpers_ops helpers_ops = {
+ 	.extended_name_get = scmi_common_extended_name_get,
+ 	.get_max_msg_size = scmi_common_get_max_msg_size,
+diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
+index aaee57cdcd5589..d62c4469d1fd9f 100644
+--- a/drivers/firmware/arm_scmi/protocols.h
++++ b/drivers/firmware/arm_scmi/protocols.h
+@@ -31,6 +31,8 @@
+ 
+ #define SCMI_PROTOCOL_VENDOR_BASE	0x80
+ 
++#define MSG_SUPPORTS_FASTCHANNEL(x)	((x) & BIT(0))
++
+ enum scmi_common_cmd {
+ 	PROTOCOL_VERSION = 0x0,
+ 	PROTOCOL_ATTRIBUTES = 0x1,
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index a3df782fa687b0..e919940c8bf9ab 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -124,6 +124,7 @@ static __init int sysfb_init(void)
+ {
+ 	struct screen_info *si = &screen_info;
+ 	struct device *parent;
++	unsigned int type;
+ 	struct simplefb_platform_data mode;
+ 	const char *name;
+ 	bool compatible;
+@@ -151,17 +152,26 @@ static __init int sysfb_init(void)
+ 			goto put_device;
+ 	}
+ 
++	type = screen_info_video_type(si);
++
+ 	/* if the FB is incompatible, create a legacy framebuffer device */
+-	if (si->orig_video_isVGA == VIDEO_TYPE_EFI)
+-		name = "efi-framebuffer";
+-	else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+-		name = "vesa-framebuffer";
+-	else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC)
+-		name = "vga-framebuffer";
+-	else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC)
++	switch (type) {
++	case VIDEO_TYPE_EGAC:
+ 		name = "ega-framebuffer";
+-	else
++		break;
++	case VIDEO_TYPE_VGAC:
++		name = "vga-framebuffer";
++		break;
++	case VIDEO_TYPE_VLFB:
++		name = "vesa-framebuffer";
++		break;
++	case VIDEO_TYPE_EFI:
++		name = "efi-framebuffer";
++		break;
++	default:
+ 		name = "platform-framebuffer";
++		break;
++	}
+ 
+ 	pd = platform_device_alloc(name, 0);
+ 	if (!pd) {
+diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
+index 10ea71273c8915..9875e34bde72a4 100644
+--- a/drivers/gpio/gpio-mlxbf3.c
++++ b/drivers/gpio/gpio-mlxbf3.c
+@@ -190,7 +190,9 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
+ 	struct mlxbf3_gpio_context *gs;
+ 	struct gpio_irq_chip *girq;
+ 	struct gpio_chip *gc;
++	char *colon_ptr;
+ 	int ret, irq;
++	long num;
+ 
+ 	gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
+ 	if (!gs)
+@@ -227,25 +229,39 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
+ 	gc->owner = THIS_MODULE;
+ 	gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
+ 
+-	irq = platform_get_irq(pdev, 0);
+-	if (irq >= 0) {
+-		girq = &gs->gc.irq;
+-		gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
+-		girq->default_type = IRQ_TYPE_NONE;
+-		/* This will let us handle the parent IRQ in the driver */
+-		girq->num_parents = 0;
+-		girq->parents = NULL;
+-		girq->parent_handler = NULL;
+-		girq->handler = handle_bad_irq;
+-
+-		/*
+-		 * Directly request the irq here instead of passing
+-		 * a flow-handler because the irq is shared.
+-		 */
+-		ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
+-				       IRQF_SHARED, dev_name(dev), gs);
+-		if (ret)
+-			return dev_err_probe(dev, ret, "failed to request IRQ");
++	colon_ptr = strchr(dev_name(dev), ':');
++	if (!colon_ptr) {
++		dev_err(dev, "invalid device name format\n");
++		return -EINVAL;
++	}
++
++	ret = kstrtol(++colon_ptr, 16, &num);
++	if (ret) {
++		dev_err(dev, "invalid device instance\n");
++		return ret;
++	}
++
++	if (!num) {
++		irq = platform_get_irq(pdev, 0);
++		if (irq >= 0) {
++			girq = &gs->gc.irq;
++			gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
++			girq->default_type = IRQ_TYPE_NONE;
++			/* This will let us handle the parent IRQ in the driver */
++			girq->num_parents = 0;
++			girq->parents = NULL;
++			girq->parent_handler = NULL;
++			girq->handler = handle_bad_irq;
++
++			/*
++			 * Directly request the irq here instead of passing
++			 * a flow-handler because the irq is shared.
++			 */
++			ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
++					       IRQF_SHARED, dev_name(dev), gs);
++			if (ret)
++				return dev_err_probe(dev, ret, "failed to request IRQ");
++		}
+ 	}
+ 
+ 	platform_set_drvdata(pdev, gs);
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index ef3aee1cabcfd0..bb7c1bf5f856e3 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -951,7 +951,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
+ 					IRQF_ONESHOT | IRQF_SHARED, dev_name(dev),
+ 					chip);
+ 	if (ret)
+-		return dev_err_probe(dev, client->irq, "failed to request irq\n");
++		return dev_err_probe(dev, ret, "failed to request irq\n");
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 626daedb016987..36f8c7bb79d818 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -215,6 +215,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
+ 		 */
+ 		{ "lantiq,pci-xway",	"gpio-reset",	false },
+ #endif
++#if IS_ENABLED(CONFIG_REGULATOR_S5M8767)
++		/*
++		 * According to S5M8767, the DVS and DS pin are
++		 * active-high signals. However, exynos5250-spring.dts use
++		 * active-low setting.
++		 */
++		{ "samsung,s5m8767-pmic", "s5m8767,pmic-buck-dvs-gpios", true },
++		{ "samsung,s5m8767-pmic", "s5m8767,pmic-buck-ds-gpios", true },
++#endif
+ #if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
+ 		/*
+ 		 * DTS for Nokia N900 incorrectly specified "active high"
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+index e0b02bf1c5639e..3d114ea7049f7c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+@@ -985,6 +985,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
+ 			ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ 			VCN_RB1_DB_CTRL__EN_MASK);
+ 
++	/* Keeping one read-back to ensure all register writes are done, otherwise
++	 * it may introduce race conditions */
++	RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
++
+ 	return 0;
+ }
+ 
+@@ -1167,6 +1171,10 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
+ 		tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ 		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ 		fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
++
++		/* Keeping one read-back to ensure all register writes are done, otherwise
++		 * it may introduce race conditions */
++		RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
+index 21eb0c5b320d5f..c43223916a1b11 100644
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -115,7 +115,7 @@ static u32 config_mask(const u64 config)
+ {
+ 	unsigned int bit = config_bit(config);
+ 
+-	if (__builtin_constant_p(config))
++	if (__builtin_constant_p(bit))
+ 		BUILD_BUG_ON(bit >
+ 			     BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ 							 enable)) - 1);
+@@ -124,7 +124,7 @@ static u32 config_mask(const u64 config)
+ 			     BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ 							 enable)) - 1);
+ 
+-	return BIT(config_bit(config));
++	return BIT(bit);
+ }
+ 
+ static bool is_engine_event(struct perf_event *event)
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index d2189441aa38ae..80c78aff964338 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -123,6 +123,20 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+ 		OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ 		OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ 		OUT_RING(ring, submit->seqno - 1);
++
++		OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
++		OUT_RING(ring, CP_SET_THREAD_BOTH);
++
++		/* Reset state used to synchronize BR and BV */
++		OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1);
++		OUT_RING(ring,
++			 CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS |
++			 CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE |
++			 CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER |
++			 CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS);
++
++		OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
++		OUT_RING(ring, CP_SET_THREAD_BR);
+ 	}
+ 
+ 	if (!sysprof) {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+index d8a2edebfe8c3c..b7699ca89dcc53 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+@@ -94,17 +94,21 @@ static void drm_mode_to_intf_timing_params(
+ 		timing->vsync_polarity = 0;
+ 	}
+ 
+-	/* for DP/EDP, Shift timings to align it to bottom right */
+-	if (phys_enc->hw_intf->cap->type == INTF_DP) {
++	timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
++	timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
++
++	/*
++	 *  For DP/EDP, Shift timings to align it to bottom right.
++	 *  wide_bus_en is set for everything excluding SDM845 &
++	 *  porch changes cause DisplayPort failure and HDMI tearing.
++	 */
++	if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
+ 		timing->h_back_porch += timing->h_front_porch;
+ 		timing->h_front_porch = 0;
+ 		timing->v_back_porch += timing->v_front_porch;
+ 		timing->v_front_porch = 0;
+ 	}
+ 
+-	timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+-	timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
+-
+ 	/*
+ 	 * for DP, divide the horizonal parameters by 2 when
+ 	 * widebus is enabled
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+index 677c6257181195..28cc550e22a881 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+@@ -703,6 +703,13 @@ static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
+ 	/* TODO: Remove this when we have proper display handover support */
+ 	msm_dsi_phy_pll_save_state(phy);
+ 
++	/*
++	 * Store also proper vco_current_rate, because its value will be used in
++	 * dsi_10nm_pll_restore_state().
++	 */
++	if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE))
++		pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+index c6cdc5c003dc07..2ca0ad6efc96e3 100644
+--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
++++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+@@ -2260,7 +2260,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
+ 	<reg32 offset="0" name="0">
+ 		<bitfield name="CLEAR_ON_CHIP_TS" pos="0" type="boolean"/>
+ 		<bitfield name="CLEAR_RESOURCE_TABLE" pos="1" type="boolean"/>
+-		<bitfield name="CLEAR_GLOBAL_LOCAL_TS" pos="2" type="boolean"/>
++		<bitfield name="CLEAR_BV_BR_COUNTER" pos="2" type="boolean"/>
++		<bitfield name="RESET_GLOBAL_LOCAL_TS" pos="3" type="boolean"/>
+ 	</reg32>
+ </domain>
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
+index d47442125fa183..9aae26eb7d8fba 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
+@@ -42,7 +42,7 @@
+ #include "nouveau_acpi.h"
+ 
+ static struct ida bl_ida;
+-#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
++#define BL_NAME_SIZE 24 // 12 for name + 11 for digits + 1 for '\0'
+ 
+ static bool
+ nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index 06f5057690bd87..e0fc12d514d766 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -974,7 +974,7 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
+ 
+ static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
+ {
+-	unsigned int columns = DIV_ROUND_UP(ssd130x->height, SSD132X_SEGMENT_WIDTH);
++	unsigned int columns = DIV_ROUND_UP(ssd130x->width, SSD132X_SEGMENT_WIDTH);
+ 	unsigned int height = ssd130x->height;
+ 
+ 	memset(data_array, 0, columns * height);
+diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
+index ad32e584deeec4..c9c88d3ad6698f 100644
+--- a/drivers/gpu/drm/v3d/v3d_sched.c
++++ b/drivers/gpu/drm/v3d/v3d_sched.c
+@@ -191,7 +191,6 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+ 	struct v3d_dev *v3d = job->v3d;
+ 	struct v3d_file_priv *file = job->file->driver_priv;
+ 	struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+-	struct v3d_stats *local_stats = &file->stats[queue];
+ 	u64 now = local_clock();
+ 	unsigned long flags;
+ 
+@@ -201,7 +200,12 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+ 	else
+ 		preempt_disable();
+ 
+-	v3d_stats_update(local_stats, now);
++	/* Don't update the local stats if the file context has already closed */
++	if (file)
++		v3d_stats_update(&file->stats[queue], now);
++	else
++		drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n");
++
+ 	v3d_stats_update(global_stats, now);
+ 
+ 	if (IS_ENABLED(CONFIG_LOCKDEP))
+diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
+index c6e0c8d77a70f7..a1928cedc7ddf0 100644
+--- a/drivers/gpu/drm/xe/display/xe_display.c
++++ b/drivers/gpu/drm/xe/display/xe_display.c
+@@ -352,6 +352,36 @@ void xe_display_pm_suspend(struct xe_device *xe)
+ 	__xe_display_pm_suspend(xe, false);
+ }
+ 
++void xe_display_pm_shutdown(struct xe_device *xe)
++{
++	struct intel_display *display = &xe->display;
++
++	if (!xe->info.probe_display)
++		return;
++
++	intel_power_domains_disable(xe);
++	intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
++	if (has_display(xe)) {
++		drm_kms_helper_poll_disable(&xe->drm);
++		intel_display_driver_disable_user_access(xe);
++		intel_display_driver_suspend(xe);
++	}
++
++	xe_display_flush_cleanup_work(xe);
++	intel_dp_mst_suspend(xe);
++	intel_hpd_cancel_work(xe);
++
++	if (has_display(xe))
++		intel_display_driver_suspend_access(xe);
++
++	intel_encoder_suspend_all(display);
++	intel_encoder_shutdown_all(display);
++
++	intel_opregion_suspend(display, PCI_D3cold);
++
++	intel_dmc_suspend(xe);
++}
++
+ void xe_display_pm_runtime_suspend(struct xe_device *xe)
+ {
+ 	if (!xe->info.probe_display)
+@@ -376,6 +406,19 @@ void xe_display_pm_suspend_late(struct xe_device *xe)
+ 	intel_display_power_suspend_late(xe);
+ }
+ 
++void xe_display_pm_shutdown_late(struct xe_device *xe)
++{
++	if (!xe->info.probe_display)
++		return;
++
++	/*
++	 * The only requirement is to reboot with display DC states disabled,
++	 * for now leaving all display power wells in the INIT power domain
++	 * enabled.
++	 */
++	intel_power_domains_driver_remove(xe);
++}
++
+ void xe_display_pm_resume_early(struct xe_device *xe)
+ {
+ 	if (!xe->info.probe_display)
+diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
+index bed55fd26f3044..17afa537aee508 100644
+--- a/drivers/gpu/drm/xe/display/xe_display.h
++++ b/drivers/gpu/drm/xe/display/xe_display.h
+@@ -35,7 +35,9 @@ void xe_display_irq_reset(struct xe_device *xe);
+ void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
+ 
+ void xe_display_pm_suspend(struct xe_device *xe);
++void xe_display_pm_shutdown(struct xe_device *xe);
+ void xe_display_pm_suspend_late(struct xe_device *xe);
++void xe_display_pm_shutdown_late(struct xe_device *xe);
+ void xe_display_pm_resume_early(struct xe_device *xe);
+ void xe_display_pm_resume(struct xe_device *xe);
+ void xe_display_pm_runtime_suspend(struct xe_device *xe);
+@@ -66,7 +68,9 @@ static inline void xe_display_irq_reset(struct xe_device *xe) {}
+ static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
+ 
+ static inline void xe_display_pm_suspend(struct xe_device *xe) {}
++static inline void xe_display_pm_shutdown(struct xe_device *xe) {}
+ static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
++static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {}
+ static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
+ static inline void xe_display_pm_resume(struct xe_device *xe) {}
+ static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index 23e02372a49dba..0c3db53b93d8a9 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -374,6 +374,11 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
+ 	return ERR_PTR(err);
+ }
+ 
++static bool xe_driver_flr_disabled(struct xe_device *xe)
++{
++	return xe_mmio_read32(xe_root_mmio_gt(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
++}
++
+ /*
+  * The driver-initiated FLR is the highest level of reset that we can trigger
+  * from within the driver. It is different from the PCI FLR in that it doesn't
+@@ -387,17 +392,12 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
+  * if/when a new instance of i915 is bound to the device it will do a full
+  * re-init anyway.
+  */
+-static void xe_driver_flr(struct xe_device *xe)
++static void __xe_driver_flr(struct xe_device *xe)
+ {
+ 	const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
+ 	struct xe_gt *gt = xe_root_mmio_gt(xe);
+ 	int ret;
+ 
+-	if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
+-		drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
+-		return;
+-	}
+-
+ 	drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
+ 
+ 	/*
+@@ -438,6 +438,16 @@ static void xe_driver_flr(struct xe_device *xe)
+ 	xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ }
+ 
++static void xe_driver_flr(struct xe_device *xe)
++{
++	if (xe_driver_flr_disabled(xe)) {
++		drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
++		return;
++	}
++
++	__xe_driver_flr(xe);
++}
++
+ static void xe_driver_flr_fini(void *arg)
+ {
+ 	struct xe_device *xe = arg;
+@@ -797,6 +807,24 @@ void xe_device_remove(struct xe_device *xe)
+ 
+ void xe_device_shutdown(struct xe_device *xe)
+ {
++	struct xe_gt *gt;
++	u8 id;
++
++	drm_dbg(&xe->drm, "Shutting down device\n");
++
++	if (xe_driver_flr_disabled(xe)) {
++		xe_display_pm_shutdown(xe);
++
++		xe_irq_suspend(xe);
++
++		for_each_gt(gt, xe, id)
++			xe_gt_shutdown(gt);
++
++		xe_display_pm_shutdown_late(xe);
++	} else {
++		/* BOOM! */
++		__xe_driver_flr(xe);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index 3a7628fb5ad328..231ed53cf907c6 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -98,14 +98,14 @@ void xe_gt_sanitize(struct xe_gt *gt)
+ 
+ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
+ {
++	unsigned int fw_ref;
+ 	u32 reg;
+-	int err;
+ 
+ 	if (!XE_WA(gt, 16023588340))
+ 		return;
+ 
+-	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	if (WARN_ON(err))
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	if (!fw_ref)
+ 		return;
+ 
+ 	if (!xe_gt_is_media_type(gt)) {
+@@ -114,14 +114,14 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
+ 		xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
+ 	}
+ 
+-	xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
+-	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++	xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+ 
+ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
+ {
++	unsigned int fw_ref;
+ 	u32 reg;
+-	int err;
+ 
+ 	if (!XE_WA(gt, 16023588340))
+ 		return;
+@@ -129,15 +129,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
+ 	if (xe_gt_is_media_type(gt))
+ 		return;
+ 
+-	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	if (WARN_ON(err))
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	if (!fw_ref)
+ 		return;
+ 
+ 	reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
+ 	reg &= ~CG_DIS_CNTLBUS;
+ 	xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
+ 
+-	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+ 
+ /**
+@@ -405,11 +405,14 @@ static void dump_pat_on_error(struct xe_gt *gt)
+ 
+ static int gt_fw_domain_init(struct xe_gt *gt)
+ {
++	unsigned int fw_ref;
+ 	int err, i;
+ 
+-	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	if (err)
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	if (!fw_ref) {
++		err = -ETIMEDOUT;
+ 		goto err_hw_fence_irq;
++	}
+ 
+ 	if (!xe_gt_is_media_type(gt)) {
+ 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
+@@ -444,14 +447,12 @@ static int gt_fw_domain_init(struct xe_gt *gt)
+ 	 */
+ 	gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
+ 
+-	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+-	XE_WARN_ON(err);
+-
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	return 0;
+ 
+ err_force_wake:
+ 	dump_pat_on_error(gt);
+-	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ err_hw_fence_irq:
+ 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+@@ -461,11 +462,14 @@ static int gt_fw_domain_init(struct xe_gt *gt)
+ 
+ static int all_fw_domain_init(struct xe_gt *gt)
+ {
++	unsigned int fw_ref;
+ 	int err, i;
+ 
+-	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (err)
+-		goto err_hw_fence_irq;
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
++		err = -ETIMEDOUT;
++		goto err_force_wake;
++	}
+ 
+ 	xe_gt_mcr_set_implicit_defaults(gt);
+ 	xe_wa_process_gt(gt);
+@@ -531,14 +535,12 @@ static int all_fw_domain_init(struct xe_gt *gt)
+ 	if (IS_SRIOV_PF(gt_to_xe(gt)))
+ 		xe_gt_sriov_pf_init_hw(gt);
+ 
+-	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	XE_WARN_ON(err);
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 
+ 	return 0;
+ 
+ err_force_wake:
+-	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-err_hw_fence_irq:
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+ 
+@@ -551,11 +553,12 @@ static int all_fw_domain_init(struct xe_gt *gt)
+  */
+ int xe_gt_init_hwconfig(struct xe_gt *gt)
+ {
++	unsigned int fw_ref;
+ 	int err;
+ 
+-	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	if (err)
+-		goto out;
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	if (!fw_ref)
++		return -ETIMEDOUT;
+ 
+ 	xe_gt_mcr_init_early(gt);
+ 	xe_pat_init(gt);
+@@ -573,8 +576,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
+ 	xe_gt_enable_host_l2_vram(gt);
+ 
+ out_fw:
+-	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+-out:
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	return err;
+ }
+ 
+@@ -744,6 +746,7 @@ static int do_gt_restart(struct xe_gt *gt)
+ 
+ static int gt_reset(struct xe_gt *gt)
+ {
++	unsigned int fw_ref;
+ 	int err;
+ 
+ 	if (xe_device_wedged(gt_to_xe(gt)))
+@@ -764,9 +767,11 @@ static int gt_reset(struct xe_gt *gt)
+ 
+ 	xe_gt_sanitize(gt);
+ 
+-	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (err)
+-		goto err_msg;
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
++		err = -ETIMEDOUT;
++		goto err_out;
++	}
+ 
+ 	xe_uc_gucrc_disable(&gt->uc);
+ 	xe_uc_stop_prepare(&gt->uc);
+@@ -784,8 +789,7 @@ static int gt_reset(struct xe_gt *gt)
+ 	if (err)
+ 		goto err_out;
+ 
+-	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	XE_WARN_ON(err);
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	xe_pm_runtime_put(gt_to_xe(gt));
+ 
+ 	xe_gt_info(gt, "reset done\n");
+@@ -793,8 +797,7 @@ static int gt_reset(struct xe_gt *gt)
+ 	return 0;
+ 
+ err_out:
+-	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+-err_msg:
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	XE_WARN_ON(xe_uc_start(&gt->uc));
+ err_fail:
+ 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
+@@ -826,22 +829,25 @@ void xe_gt_reset_async(struct xe_gt *gt)
+ 
+ void xe_gt_suspend_prepare(struct xe_gt *gt)
+ {
+-	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
++	unsigned int fw_ref;
++
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ 
+ 	xe_uc_suspend_prepare(&gt->uc);
+ 
+-	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+ 
+ int xe_gt_suspend(struct xe_gt *gt)
+ {
++	unsigned int fw_ref;
+ 	int err;
+ 
+ 	xe_gt_dbg(gt, "suspending\n");
+ 	xe_gt_sanitize(gt);
+ 
+-	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (err)
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ 		goto err_msg;
+ 
+ 	err = xe_uc_suspend(&gt->uc);
+@@ -852,19 +858,29 @@ int xe_gt_suspend(struct xe_gt *gt)
+ 
+ 	xe_gt_disable_host_l2_vram(gt);
+ 
+-	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	xe_gt_dbg(gt, "suspended\n");
+ 
+ 	return 0;
+ 
+-err_force_wake:
+-	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ err_msg:
++	err = -ETIMEDOUT;
++err_force_wake:
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
+ 
+ 	return err;
+ }
+ 
++void xe_gt_shutdown(struct xe_gt *gt)
++{
++	unsigned int fw_ref;
++
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	do_gt_reset(gt);
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
++}
++
+ /**
+  * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
+  * @gt: the GT object
+@@ -887,11 +903,12 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
+ 
+ int xe_gt_resume(struct xe_gt *gt)
+ {
++	unsigned int fw_ref;
+ 	int err;
+ 
+ 	xe_gt_dbg(gt, "resuming\n");
+-	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+-	if (err)
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ 		goto err_msg;
+ 
+ 	err = do_gt_restart(gt);
+@@ -900,14 +917,15 @@ int xe_gt_resume(struct xe_gt *gt)
+ 
+ 	xe_gt_idle_enable_pg(gt);
+ 
+-	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	xe_gt_dbg(gt, "resumed\n");
+ 
+ 	return 0;
+ 
+-err_force_wake:
+-	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ err_msg:
++	err = -ETIMEDOUT;
++err_force_wake:
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
+ 
+ 	return err;
+diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
+index ee138e9768a238..881f1cbc2c4919 100644
+--- a/drivers/gpu/drm/xe/xe_gt.h
++++ b/drivers/gpu/drm/xe/xe_gt.h
+@@ -48,6 +48,7 @@ void xe_gt_record_user_engines(struct xe_gt *gt);
+ 
+ void xe_gt_suspend_prepare(struct xe_gt *gt);
+ int xe_gt_suspend(struct xe_gt *gt);
++void xe_gt_shutdown(struct xe_gt *gt);
+ int xe_gt_resume(struct xe_gt *gt);
+ void xe_gt_reset_async(struct xe_gt *gt);
+ void xe_gt_sanitize(struct xe_gt *gt);
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index bcdd168cdc6d79..c5bdf0f1b32f76 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -52,6 +52,10 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+ #define FEATURE_KBD_LED_REPORT_ID1 0x5d
+ #define FEATURE_KBD_LED_REPORT_ID2 0x5e
+ 
++#define ROG_ALLY_REPORT_SIZE 64
++#define ROG_ALLY_X_MIN_MCU 313
++#define ROG_ALLY_MIN_MCU 319
++
+ #define SUPPORT_KBD_BACKLIGHT BIT(0)
+ 
+ #define MAX_TOUCH_MAJOR 8
+@@ -84,6 +88,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+ #define QUIRK_MEDION_E1239T		BIT(10)
+ #define QUIRK_ROG_NKEY_KEYBOARD		BIT(11)
+ #define QUIRK_ROG_CLAYMORE_II_KEYBOARD BIT(12)
++#define QUIRK_ROG_ALLY_XPAD		BIT(13)
+ 
+ #define I2C_KEYBOARD_QUIRKS			(QUIRK_FIX_NOTEBOOK_REPORT | \
+ 						 QUIRK_NO_INIT_REPORTS | \
+@@ -534,9 +539,99 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
+ 	return !!(value & ASUS_WMI_DSTS_PRESENCE_BIT);
+ }
+ 
++/*
++ * We don't care about any other part of the string except the version section.
++ * Example strings: FGA80100.RC72LA.312_T01, FGA80100.RC71LS.318_T01
++ * The bytes "5a 05 03 31 00 1a 13" and possibly more come before the version
++ * string, and there may be additional bytes after the version string such as
++ * "75 00 74 00 65 00" or a postfix such as "_T01"
++ */
++static int mcu_parse_version_string(const u8 *response, size_t response_size)
++{
++	const u8 *end = response + response_size;
++	const u8 *p = response;
++	int dots, err, version;
++	char buf[4];
++
++	dots = 0;
++	while (p < end && dots < 2) {
++		if (*p++ == '.')
++			dots++;
++	}
++
++	if (dots != 2 || p >= end || (p + 3) >= end)
++		return -EINVAL;
++
++	memcpy(buf, p, 3);
++	buf[3] = '\0';
++
++	err = kstrtoint(buf, 10, &version);
++	if (err || version < 0)
++		return -EINVAL;
++
++	return version;
++}
++
++static int mcu_request_version(struct hid_device *hdev)
++{
++	u8 *response __free(kfree) = kzalloc(ROG_ALLY_REPORT_SIZE, GFP_KERNEL);
++	const u8 request[] = { 0x5a, 0x05, 0x03, 0x31, 0x00, 0x20 };
++	int ret;
++
++	if (!response)
++		return -ENOMEM;
++
++	ret = asus_kbd_set_report(hdev, request, sizeof(request));
++	if (ret < 0)
++		return ret;
++
++	ret = hid_hw_raw_request(hdev, FEATURE_REPORT_ID, response,
++				ROG_ALLY_REPORT_SIZE, HID_FEATURE_REPORT,
++				HID_REQ_GET_REPORT);
++	if (ret < 0)
++		return ret;
++
++	ret = mcu_parse_version_string(response, ROG_ALLY_REPORT_SIZE);
++	if (ret < 0) {
++		pr_err("Failed to parse MCU version: %d\n", ret);
++		print_hex_dump(KERN_ERR, "MCU: ", DUMP_PREFIX_NONE,
++			      16, 1, response, ROG_ALLY_REPORT_SIZE, false);
++	}
++
++	return ret;
++}
++
++static void validate_mcu_fw_version(struct hid_device *hdev, int idProduct)
++{
++	int min_version, version;
++
++	version = mcu_request_version(hdev);
++	if (version < 0)
++		return;
++
++	switch (idProduct) {
++	case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY:
++		min_version = ROG_ALLY_MIN_MCU;
++		break;
++	case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X:
++		min_version = ROG_ALLY_X_MIN_MCU;
++		break;
++	default:
++		min_version = 0;
++	}
++
++	if (version < min_version) {
++		hid_warn(hdev,
++			"The MCU firmware version must be %d or greater to avoid issues with suspend.\n",
++			min_version);
++	}
++}
++
+ static int asus_kbd_register_leds(struct hid_device *hdev)
+ {
+ 	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
++	struct usb_interface *intf;
++	struct usb_device *udev;
+ 	unsigned char kbd_func;
+ 	int ret;
+ 
+@@ -560,6 +655,14 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
+ 			if (ret < 0)
+ 				return ret;
+ 		}
++
++		if (drvdata->quirks & QUIRK_ROG_ALLY_XPAD) {
++			intf = to_usb_interface(hdev->dev.parent);
++			udev = interface_to_usbdev(intf);
++			validate_mcu_fw_version(hdev,
++				le16_to_cpu(udev->descriptor.idProduct));
++		}
++
+ 	} else {
+ 		/* Initialize keyboard */
+ 		ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
+@@ -1280,10 +1383,10 @@ static const struct hid_device_id asus_devices[] = {
+ 	  QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ 	    USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY),
+-	  QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
++	  QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD},
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ 	    USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X),
+-	  QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
++	  QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ 	    USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD),
+ 	  QUIRK_ROG_CLAYMORE_II_KEYBOARD },
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index f001ae880e1dbe..27306c17b0c4e4 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -206,11 +206,20 @@ int vmbus_connect(void)
+ 	INIT_LIST_HEAD(&vmbus_connection.chn_list);
+ 	mutex_init(&vmbus_connection.channel_mutex);
+ 
++	/*
++	 * The following Hyper-V interrupt and monitor pages can be used by
++	 * UIO for mapping to user-space, so they should always be allocated on
++	 * system page boundaries. The system page size must be >= the Hyper-V
++	 * page size.
++	 */
++	BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
++
+ 	/*
+ 	 * Setup the vmbus event connection for channel interrupt
+ 	 * abstraction stuff
+ 	 */
+-	vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
++	vmbus_connection.int_page =
++		(void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ 	if (vmbus_connection.int_page == NULL) {
+ 		ret = -ENOMEM;
+ 		goto cleanup;
+@@ -225,8 +234,8 @@ int vmbus_connect(void)
+ 	 * Setup the monitor notification facility. The 1st page for
+ 	 * parent->child and the 2nd page for child->parent
+ 	 */
+-	vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
+-	vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
++	vmbus_connection.monitor_pages[0] = (void *)__get_free_page(GFP_KERNEL);
++	vmbus_connection.monitor_pages[1] = (void *)__get_free_page(GFP_KERNEL);
+ 	if ((vmbus_connection.monitor_pages[0] == NULL) ||
+ 	    (vmbus_connection.monitor_pages[1] == NULL)) {
+ 		ret = -ENOMEM;
+@@ -342,21 +351,23 @@ void vmbus_disconnect(void)
+ 		destroy_workqueue(vmbus_connection.work_queue);
+ 
+ 	if (vmbus_connection.int_page) {
+-		hv_free_hyperv_page(vmbus_connection.int_page);
++		free_page((unsigned long)vmbus_connection.int_page);
+ 		vmbus_connection.int_page = NULL;
+ 	}
+ 
+ 	if (vmbus_connection.monitor_pages[0]) {
+ 		if (!set_memory_encrypted(
+ 			(unsigned long)vmbus_connection.monitor_pages[0], 1))
+-			hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
++			free_page((unsigned long)
++				vmbus_connection.monitor_pages[0]);
+ 		vmbus_connection.monitor_pages[0] = NULL;
+ 	}
+ 
+ 	if (vmbus_connection.monitor_pages[1]) {
+ 		if (!set_memory_encrypted(
+ 			(unsigned long)vmbus_connection.monitor_pages[1], 1))
+-			hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
++			free_page((unsigned long)
++				vmbus_connection.monitor_pages[1]);
+ 		vmbus_connection.monitor_pages[1] = NULL;
+ 	}
+ }
+diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
+index a3a07662e49175..8aeec16a7a9054 100644
+--- a/drivers/hwmon/ftsteutates.c
++++ b/drivers/hwmon/ftsteutates.c
+@@ -423,13 +423,16 @@ static int fts_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ 		break;
+ 	case hwmon_pwm:
+ 		switch (attr) {
+-		case hwmon_pwm_auto_channels_temp:
+-			if (data->fan_source[channel] == FTS_FAN_SOURCE_INVALID)
++		case hwmon_pwm_auto_channels_temp: {
++			u8 fan_source = data->fan_source[channel];
++
++			if (fan_source == FTS_FAN_SOURCE_INVALID || fan_source >= BITS_PER_LONG)
+ 				*val = 0;
+ 			else
+-				*val = BIT(data->fan_source[channel]);
++				*val = BIT(fan_source);
+ 
+ 			return 0;
++		}
+ 		default:
+ 			break;
+ 		}
+diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
+index 4f608a3790fb72..953dfe2bd166c3 100644
+--- a/drivers/hwmon/ltc4282.c
++++ b/drivers/hwmon/ltc4282.c
+@@ -1511,13 +1511,6 @@ static int ltc4282_setup(struct ltc4282_state *st, struct device *dev)
+ 			return ret;
+ 	}
+ 
+-	if (device_property_read_bool(dev, "adi,fault-log-enable")) {
+-		ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL,
+-				      LTC4282_FAULT_LOG_EN_MASK);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	if (device_property_read_bool(dev, "adi,fault-log-enable")) {
+ 		ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_FAULT_LOG_EN_MASK);
+ 		if (ret)
+diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
+index 9486db249c64fb..b3694a4209b975 100644
+--- a/drivers/hwmon/occ/common.c
++++ b/drivers/hwmon/occ/common.c
+@@ -459,12 +459,10 @@ static ssize_t occ_show_power_1(struct device *dev,
+ 	return sysfs_emit(buf, "%llu\n", val);
+ }
+ 
+-static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
++static u64 occ_get_powr_avg(u64 accum, u32 samples)
+ {
+-	u64 divisor = get_unaligned_be32(samples);
+-
+-	return (divisor == 0) ? 0 :
+-		div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
++	return (samples == 0) ? 0 :
++		mul_u64_u32_div(accum, 1000000UL, samples);
+ }
+ 
+ static ssize_t occ_show_power_2(struct device *dev,
+@@ -489,8 +487,8 @@ static ssize_t occ_show_power_2(struct device *dev,
+ 				  get_unaligned_be32(&power->sensor_id),
+ 				  power->function_id, power->apss_channel);
+ 	case 1:
+-		val = occ_get_powr_avg(&power->accumulator,
+-				       &power->update_tag);
++		val = occ_get_powr_avg(get_unaligned_be64(&power->accumulator),
++				       get_unaligned_be32(&power->update_tag));
+ 		break;
+ 	case 2:
+ 		val = (u64)get_unaligned_be32(&power->update_tag) *
+@@ -527,8 +525,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
+ 		return sysfs_emit(buf, "%u_system\n",
+ 				  get_unaligned_be32(&power->sensor_id));
+ 	case 1:
+-		val = occ_get_powr_avg(&power->system.accumulator,
+-				       &power->system.update_tag);
++		val = occ_get_powr_avg(get_unaligned_be64(&power->system.accumulator),
++				       get_unaligned_be32(&power->system.update_tag));
+ 		break;
+ 	case 2:
+ 		val = (u64)get_unaligned_be32(&power->system.update_tag) *
+@@ -541,8 +539,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
+ 		return sysfs_emit(buf, "%u_proc\n",
+ 				  get_unaligned_be32(&power->sensor_id));
+ 	case 5:
+-		val = occ_get_powr_avg(&power->proc.accumulator,
+-				       &power->proc.update_tag);
++		val = occ_get_powr_avg(get_unaligned_be64(&power->proc.accumulator),
++				       get_unaligned_be32(&power->proc.update_tag));
+ 		break;
+ 	case 6:
+ 		val = (u64)get_unaligned_be32(&power->proc.update_tag) *
+@@ -555,8 +553,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
+ 		return sysfs_emit(buf, "%u_vdd\n",
+ 				  get_unaligned_be32(&power->sensor_id));
+ 	case 9:
+-		val = occ_get_powr_avg(&power->vdd.accumulator,
+-				       &power->vdd.update_tag);
++		val = occ_get_powr_avg(get_unaligned_be64(&power->vdd.accumulator),
++				       get_unaligned_be32(&power->vdd.update_tag));
+ 		break;
+ 	case 10:
+ 		val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
+@@ -569,8 +567,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
+ 		return sysfs_emit(buf, "%u_vdn\n",
+ 				  get_unaligned_be32(&power->sensor_id));
+ 	case 13:
+-		val = occ_get_powr_avg(&power->vdn.accumulator,
+-				       &power->vdn.update_tag);
++		val = occ_get_powr_avg(get_unaligned_be64(&power->vdn.accumulator),
++				       get_unaligned_be32(&power->vdn.update_tag));
+ 		break;
+ 	case 14:
+ 		val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
+@@ -747,29 +745,30 @@ static ssize_t occ_show_extended(struct device *dev,
+ }
+ 
+ /*
+- * Some helper macros to make it easier to define an occ_attribute. Since these
+- * are dynamically allocated, we shouldn't use the existing kernel macros which
++ * A helper to make it easier to define an occ_attribute. Since these
++ * are dynamically allocated, we cannot use the existing kernel macros which
+  * stringify the name argument.
+  */
+-#define ATTR_OCC(_name, _mode, _show, _store) {				\
+-	.attr	= {							\
+-		.name = _name,						\
+-		.mode = VERIFY_OCTAL_PERMISSIONS(_mode),		\
+-	},								\
+-	.show	= _show,						\
+-	.store	= _store,						\
+-}
+-
+-#define SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index) {	\
+-	.dev_attr	= ATTR_OCC(_name, _mode, _show, _store),	\
+-	.index		= _index,					\
+-	.nr		= _nr,						\
++static void occ_init_attribute(struct occ_attribute *attr, int mode,
++	ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf),
++	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
++				   const char *buf, size_t count),
++	int nr, int index, const char *fmt, ...)
++{
++	va_list args;
++
++	va_start(args, fmt);
++	vsnprintf(attr->name, sizeof(attr->name), fmt, args);
++	va_end(args);
++
++	attr->sensor.dev_attr.attr.name = attr->name;
++	attr->sensor.dev_attr.attr.mode = mode;
++	attr->sensor.dev_attr.show = show;
++	attr->sensor.dev_attr.store = store;
++	attr->sensor.index = index;
++	attr->sensor.nr = nr;
+ }
+ 
+-#define OCC_INIT_ATTR(_name, _mode, _show, _store, _nr, _index)		\
+-	((struct sensor_device_attribute_2)				\
+-		SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index))
+-
+ /*
+  * Allocate and instatiate sensor_device_attribute_2s. It's most efficient to
+  * use our own instead of the built-in hwmon attribute types.
+@@ -855,14 +854,15 @@ static int occ_setup_sensor_attrs(struct occ *occ)
+ 		sensors->extended.num_sensors = 0;
+ 	}
+ 
+-	occ->attrs = devm_kzalloc(dev, sizeof(*occ->attrs) * num_attrs,
++	occ->attrs = devm_kcalloc(dev, num_attrs, sizeof(*occ->attrs),
+ 				  GFP_KERNEL);
+ 	if (!occ->attrs)
+ 		return -ENOMEM;
+ 
+ 	/* null-terminated list */
+-	occ->group.attrs = devm_kzalloc(dev, sizeof(*occ->group.attrs) *
+-					num_attrs + 1, GFP_KERNEL);
++	occ->group.attrs = devm_kcalloc(dev, num_attrs + 1,
++					sizeof(*occ->group.attrs),
++					GFP_KERNEL);
+ 	if (!occ->group.attrs)
+ 		return -ENOMEM;
+ 
+@@ -872,43 +872,33 @@ static int occ_setup_sensor_attrs(struct occ *occ)
+ 		s = i + 1;
+ 		temp = ((struct temp_sensor_2 *)sensors->temp.data) + i;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "temp%d_label", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL,
+-					     0, i);
++		occ_init_attribute(attr, 0444, show_temp, NULL,
++				   0, i, "temp%d_label", s);
+ 		attr++;
+ 
+ 		if (sensors->temp.version == 2 &&
+ 		    temp->fru_type == OCC_FRU_TYPE_VRM) {
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "temp%d_alarm", s);
++			occ_init_attribute(attr, 0444, show_temp, NULL,
++					   1, i, "temp%d_alarm", s);
+ 		} else {
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "temp%d_input", s);
++			occ_init_attribute(attr, 0444, show_temp, NULL,
++					   1, i, "temp%d_input", s);
+ 		}
+ 
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL,
+-					     1, i);
+ 		attr++;
+ 
+ 		if (sensors->temp.version > 1) {
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "temp%d_fru_type", s);
+-			attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-						     show_temp, NULL, 2, i);
++			occ_init_attribute(attr, 0444, show_temp, NULL,
++					   2, i, "temp%d_fru_type", s);
+ 			attr++;
+ 
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "temp%d_fault", s);
+-			attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-						     show_temp, NULL, 3, i);
++			occ_init_attribute(attr, 0444, show_temp, NULL,
++					   3, i, "temp%d_fault", s);
+ 			attr++;
+ 
+ 			if (sensors->temp.version == 0x10) {
+-				snprintf(attr->name, sizeof(attr->name),
+-					 "temp%d_max", s);
+-				attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-							     show_temp, NULL,
+-							     4, i);
++				occ_init_attribute(attr, 0444, show_temp, NULL,
++						   4, i, "temp%d_max", s);
+ 				attr++;
+ 			}
+ 		}
+@@ -917,14 +907,12 @@ static int occ_setup_sensor_attrs(struct occ *occ)
+ 	for (i = 0; i < sensors->freq.num_sensors; ++i) {
+ 		s = i + 1;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "freq%d_label", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL,
+-					     0, i);
++		occ_init_attribute(attr, 0444, show_freq, NULL,
++				   0, i, "freq%d_label", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "freq%d_input", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL,
+-					     1, i);
++		occ_init_attribute(attr, 0444, show_freq, NULL,
++				   1, i, "freq%d_input", s);
+ 		attr++;
+ 	}
+ 
+@@ -940,32 +928,24 @@ static int occ_setup_sensor_attrs(struct occ *occ)
+ 			s = (i * 4) + 1;
+ 
+ 			for (j = 0; j < 4; ++j) {
+-				snprintf(attr->name, sizeof(attr->name),
+-					 "power%d_label", s);
+-				attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-							     show_power, NULL,
+-							     nr++, i);
++				occ_init_attribute(attr, 0444, show_power,
++						   NULL, nr++, i,
++						   "power%d_label", s);
+ 				attr++;
+ 
+-				snprintf(attr->name, sizeof(attr->name),
+-					 "power%d_average", s);
+-				attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-							     show_power, NULL,
+-							     nr++, i);
++				occ_init_attribute(attr, 0444, show_power,
++						   NULL, nr++, i,
++						   "power%d_average", s);
+ 				attr++;
+ 
+-				snprintf(attr->name, sizeof(attr->name),
+-					 "power%d_average_interval", s);
+-				attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-							     show_power, NULL,
+-							     nr++, i);
++				occ_init_attribute(attr, 0444, show_power,
++						   NULL, nr++, i,
++						   "power%d_average_interval", s);
+ 				attr++;
+ 
+-				snprintf(attr->name, sizeof(attr->name),
+-					 "power%d_input", s);
+-				attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-							     show_power, NULL,
+-							     nr++, i);
++				occ_init_attribute(attr, 0444, show_power,
++						   NULL, nr++, i,
++						   "power%d_input", s);
+ 				attr++;
+ 
+ 				s++;
+@@ -977,28 +957,20 @@ static int occ_setup_sensor_attrs(struct occ *occ)
+ 		for (i = 0; i < sensors->power.num_sensors; ++i) {
+ 			s = i + 1;
+ 
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "power%d_label", s);
+-			attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-						     show_power, NULL, 0, i);
++			occ_init_attribute(attr, 0444, show_power, NULL,
++					   0, i, "power%d_label", s);
+ 			attr++;
+ 
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "power%d_average", s);
+-			attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-						     show_power, NULL, 1, i);
++			occ_init_attribute(attr, 0444, show_power, NULL,
++					   1, i, "power%d_average", s);
+ 			attr++;
+ 
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "power%d_average_interval", s);
+-			attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-						     show_power, NULL, 2, i);
++			occ_init_attribute(attr, 0444, show_power, NULL,
++					   2, i, "power%d_average_interval", s);
+ 			attr++;
+ 
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "power%d_input", s);
+-			attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-						     show_power, NULL, 3, i);
++			occ_init_attribute(attr, 0444, show_power, NULL,
++					   3, i, "power%d_input", s);
+ 			attr++;
+ 		}
+ 
+@@ -1006,56 +978,43 @@ static int occ_setup_sensor_attrs(struct occ *occ)
+ 	}
+ 
+ 	if (sensors->caps.num_sensors >= 1) {
+-		snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
+-					     0, 0);
++		occ_init_attribute(attr, 0444, show_caps, NULL,
++				   0, 0, "power%d_label", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "power%d_cap", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
+-					     1, 0);
++		occ_init_attribute(attr, 0444, show_caps, NULL,
++				   1, 0, "power%d_cap", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "power%d_input", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
+-					     2, 0);
++		occ_init_attribute(attr, 0444, show_caps, NULL,
++				   2, 0, "power%d_input", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name),
+-			 "power%d_cap_not_redundant", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
+-					     3, 0);
++		occ_init_attribute(attr, 0444, show_caps, NULL,
++				   3, 0, "power%d_cap_not_redundant", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "power%d_cap_max", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
+-					     4, 0);
++		occ_init_attribute(attr, 0444, show_caps, NULL,
++				   4, 0, "power%d_cap_max", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "power%d_cap_min", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
+-					     5, 0);
++		occ_init_attribute(attr, 0444, show_caps, NULL,
++				   5, 0, "power%d_cap_min", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "power%d_cap_user",
+-			 s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0644, show_caps,
+-					     occ_store_caps_user, 6, 0);
++		occ_init_attribute(attr, 0644, show_caps, occ_store_caps_user,
++				   6, 0, "power%d_cap_user", s);
+ 		attr++;
+ 
+ 		if (sensors->caps.version > 1) {
+-			snprintf(attr->name, sizeof(attr->name),
+-				 "power%d_cap_user_source", s);
+-			attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-						     show_caps, NULL, 7, 0);
++			occ_init_attribute(attr, 0444, show_caps, NULL,
++					   7, 0, "power%d_cap_user_source", s);
+ 			attr++;
+ 
+ 			if (sensors->caps.version > 2) {
+-				snprintf(attr->name, sizeof(attr->name),
+-					 "power%d_cap_min_soft", s);
+-				attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-							     show_caps, NULL,
+-							     8, 0);
++				occ_init_attribute(attr, 0444, show_caps, NULL,
++						   8, 0,
++						   "power%d_cap_min_soft", s);
+ 				attr++;
+ 			}
+ 		}
+@@ -1064,19 +1023,16 @@ static int occ_setup_sensor_attrs(struct occ *occ)
+ 	for (i = 0; i < sensors->extended.num_sensors; ++i) {
+ 		s = i + 1;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "extn%d_label", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-					     occ_show_extended, NULL, 0, i);
++		occ_init_attribute(attr, 0444, occ_show_extended, NULL,
++				   0, i, "extn%d_label", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "extn%d_flags", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-					     occ_show_extended, NULL, 1, i);
++		occ_init_attribute(attr, 0444, occ_show_extended, NULL,
++				   1, i, "extn%d_flags", s);
+ 		attr++;
+ 
+-		snprintf(attr->name, sizeof(attr->name), "extn%d_input", s);
+-		attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
+-					     occ_show_extended, NULL, 2, i);
++		occ_init_attribute(attr, 0444, occ_show_extended, NULL,
++				   2, i, "extn%d_input", s);
+ 		attr++;
+ 	}
+ 
+diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
+index f0f0f1f2131d0a..602e98e61cc015 100644
+--- a/drivers/i2c/busses/i2c-designware-slave.c
++++ b/drivers/i2c/busses/i2c-designware-slave.c
+@@ -94,7 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
+ 	i2c_dw_disable(dev);
+ 	synchronize_irq(dev->irq);
+ 	dev->slave = NULL;
+-	pm_runtime_put(dev->dev);
++	pm_runtime_put_sync_suspend(dev->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
+index a693ebb64edf41..7b6eb2bfb412e5 100644
+--- a/drivers/i2c/busses/i2c-npcm7xx.c
++++ b/drivers/i2c/busses/i2c-npcm7xx.c
+@@ -1969,10 +1969,14 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
+ 
+ 	/* Check HW is OK: SDA and SCL should be high at this point. */
+ 	if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
+-		dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
+-		dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
+-			npcm_i2c_get_SCL(&bus->adap));
+-		return -ENXIO;
++		dev_warn(bus->dev, " I2C%d SDA=%d SCL=%d, attempting to recover\n", bus->num,
++				 npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap));
++		if (npcm_i2c_recovery_tgclk(&bus->adap)) {
++			dev_err(bus->dev, "I2C%d init fail: SDA=%d SCL=%d\n",
++				bus->num, npcm_i2c_get_SDA(&bus->adap),
++				npcm_i2c_get_SCL(&bus->adap));
++			return -ENXIO;
++		}
+ 	}
+ 
+ 	npcm_i2c_int_enable(bus, true);
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 1df5b42041427c..89ce8a62b37c62 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -1395,6 +1395,11 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ 			ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE);
+ 			if (ret)
+ 				break;
++
++			/* Validate message length before proceeding */
++			if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX)
++				break;
++
+ 			/* Set the msg length from first byte */
+ 			msgs[i].len += msgs[i].buf[0];
+ 			dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
+diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
+index acadabec4df7ad..5e17c1e6d2c717 100644
+--- a/drivers/iio/accel/fxls8962af-core.c
++++ b/drivers/iio/accel/fxls8962af-core.c
+@@ -22,6 +22,7 @@
+ #include <linux/property.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/regmap.h>
++#include <linux/units.h>
+ 
+ #include <linux/iio/buffer.h>
+ #include <linux/iio/events.h>
+@@ -436,8 +437,16 @@ static int fxls8962af_read_raw(struct iio_dev *indio_dev,
+ 		*val = FXLS8962AF_TEMP_CENTER_VAL;
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_SCALE:
+-		*val = 0;
+-		return fxls8962af_read_full_scale(data, val2);
++		switch (chan->type) {
++		case IIO_TEMP:
++			*val = MILLIDEGREE_PER_DEGREE;
++			return IIO_VAL_INT;
++		case IIO_ACCEL:
++			*val = 0;
++			return fxls8962af_read_full_scale(data, val2);
++		default:
++			return -EINVAL;
++		}
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+ 		return fxls8962af_read_samp_freq(data, val, val2);
+ 	default:
+@@ -736,9 +745,11 @@ static const struct iio_event_spec fxls8962af_event[] = {
+ 	.type = IIO_TEMP, \
+ 	.address = FXLS8962AF_TEMP_OUT, \
+ 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
++			      BIT(IIO_CHAN_INFO_SCALE) | \
+ 			      BIT(IIO_CHAN_INFO_OFFSET),\
+ 	.scan_index = -1, \
+ 	.scan_type = { \
++		.sign = 's', \
+ 		.realbits = 8, \
+ 		.storagebits = 8, \
+ 	}, \
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index 6c4e74420fd25b..216f3c9ce183e7 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -1452,6 +1452,7 @@ config TI_ADS1298
+ 	tristate "Texas Instruments ADS1298"
+ 	depends on SPI
+ 	select IIO_BUFFER
++	select IIO_KFIFO_BUF
+ 	help
+ 	  If you say yes here you get support for Texas Instruments ADS1298
+ 	  medical ADC chips
+diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
+index 32a5448116a135..42112f97fac1a6 100644
+--- a/drivers/iio/adc/ad7606_spi.c
++++ b/drivers/iio/adc/ad7606_spi.c
+@@ -151,7 +151,7 @@ static int ad7606_spi_reg_write(struct ad7606_state *st,
+ 	struct spi_device *spi = to_spi_device(st->dev);
+ 
+ 	st->d16[0] = cpu_to_be16((st->bops->rd_wr_cmd(addr, 1) << 8) |
+-				  (val & 0x1FF));
++				  (val & 0xFF));
+ 
+ 	return spi_write(spi, &st->d16[0], sizeof(st->d16[0]));
+ }
+diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
+index 58a25792cec377..1e2cf512c2f5f2 100644
+--- a/drivers/iio/adc/ad7944.c
++++ b/drivers/iio/adc/ad7944.c
+@@ -290,6 +290,8 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
+ 
+ 	if (chan->scan_type.sign == 's')
+ 		*val = sign_extend32(*val, chan->scan_type.realbits - 1);
++	else
++		*val &= GENMASK(chan->scan_type.realbits - 1, 0);
+ 
+ 	return IIO_VAL_INT;
+ }
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
+index 213cce1c31110e..91f0f381082bda 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
+@@ -67,16 +67,18 @@ int inv_icm42600_temp_read_raw(struct iio_dev *indio_dev,
+ 		return IIO_VAL_INT;
+ 	/*
+ 	 * T°C = (temp / 132.48) + 25
+-	 * Tm°C = 1000 * ((temp * 100 / 13248) + 25)
++	 * Tm°C = 1000 * ((temp / 132.48) + 25)
++	 * Tm°C = 7.548309 * temp + 25000
++	 * Tm°C = (temp + 3312) * 7.548309
+ 	 * scale: 100000 / 13248 ~= 7.548309
+-	 * offset: 25000
++	 * offset: 3312
+ 	 */
+ 	case IIO_CHAN_INFO_SCALE:
+ 		*val = 7;
+ 		*val2 = 548309;
+ 		return IIO_VAL_INT_PLUS_MICRO;
+ 	case IIO_CHAN_INFO_OFFSET:
+-		*val = 25000;
++		*val = 3312;
+ 		return IIO_VAL_INT;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index 7e3a55349e1070..96a678250e5532 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -366,12 +366,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
+ /*
+  * CM_ID <-- DESTROYING
+  *
+- * Clean up all resources associated with the connection and release
+- * the initial reference taken by iw_create_cm_id.
+- *
+- * Returns true if and only if the last cm_id_priv reference has been dropped.
++ * Clean up all resources associated with the connection.
+  */
+-static bool destroy_cm_id(struct iw_cm_id *cm_id)
++static void destroy_cm_id(struct iw_cm_id *cm_id)
+ {
+ 	struct iwcm_id_private *cm_id_priv;
+ 	struct ib_qp *qp;
+@@ -440,20 +437,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
+ 		iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
+ 		iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
+ 	}
+-
+-	return iwcm_deref_id(cm_id_priv);
+ }
+ 
+ /*
+- * This function is only called by the application thread and cannot
+- * be called by the event thread. The function will wait for all
+- * references to be released on the cm_id and then kfree the cm_id
+- * object.
++ * Destroy cm_id. If the cm_id still has other references, wait for all
++ * references to be released on the cm_id and then release the initial
++ * reference taken by iw_create_cm_id.
+  */
+ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
+ {
+-	if (!destroy_cm_id(cm_id))
++	struct iwcm_id_private *cm_id_priv;
++
++	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
++	destroy_cm_id(cm_id);
++	if (refcount_read(&cm_id_priv->refcount) > 1)
+ 		flush_workqueue(iwcm_wq);
++	iwcm_deref_id(cm_id_priv);
+ }
+ EXPORT_SYMBOL(iw_destroy_cm_id);
+ 
+@@ -1033,8 +1032,10 @@ static void cm_work_handler(struct work_struct *_work)
+ 
+ 		if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
+ 			ret = process_event(cm_id_priv, &levent);
+-			if (ret)
+-				WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
++			if (ret) {
++				destroy_cm_id(&cm_id_priv->id);
++				WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
++			}
+ 		} else
+ 			pr_debug("dropping event %d\n", levent.event);
+ 		if (iwcm_deref_id(cm_id_priv))
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 985b9d7d69f20c..81e44b73812295 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -942,7 +942,7 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
+ static void update_srq_db(struct hns_roce_srq *srq)
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+-	struct hns_roce_v2_db db;
++	struct hns_roce_v2_db db = {};
+ 
+ 	hr_reg_write(&db, DB_TAG, srq->srqn);
+ 	hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
+diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
+index 380fe8dab3b063..9514f577995faf 100644
+--- a/drivers/input/keyboard/gpio_keys.c
++++ b/drivers/input/keyboard/gpio_keys.c
+@@ -449,6 +449,8 @@ static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t)
+ 						      release_timer);
+ 	struct input_dev *input = bdata->input;
+ 
++	guard(spinlock_irqsave)(&bdata->lock);
++
+ 	if (bdata->key_pressed) {
+ 		input_report_key(input, *bdata->code, 0);
+ 		input_sync(input);
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index 4215f9b9c2b07a..fc22cbb854a385 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -844,6 +844,12 @@ static int ims_pcu_flash_firmware(struct ims_pcu *pcu,
+ 		addr = be32_to_cpu(rec->addr) / 2;
+ 		len = be16_to_cpu(rec->len);
+ 
++		if (len > sizeof(pcu->cmd_buf) - 1 - sizeof(*fragment)) {
++			dev_err(pcu->dev,
++				"Invalid record length in firmware: %d\n", len);
++			return -EINVAL;
++		}
++
+ 		fragment = (void *)&pcu->cmd_buf[1];
+ 		put_unaligned_le32(addr, &fragment->addr);
+ 		fragment->len = len;
+diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
+index 20020cbc0752be..a94699f2bbc67b 100644
+--- a/drivers/input/misc/sparcspkr.c
++++ b/drivers/input/misc/sparcspkr.c
+@@ -75,9 +75,14 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int
+ 		return -1;
+ 
+ 	switch (code) {
+-		case SND_BELL: if (value) value = 1000;
+-		case SND_TONE: break;
+-		default: return -1;
++	case SND_BELL:
++		if (value)
++			value = 1000;
++		break;
++	case SND_TONE:
++		break;
++	default:
++		return -1;
+ 	}
+ 
+ 	if (value > 20 && value < 32767)
+@@ -113,9 +118,14 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned
+ 		return -1;
+ 
+ 	switch (code) {
+-		case SND_BELL: if (value) value = 1000;
+-		case SND_TONE: break;
+-		default: return -1;
++	case SND_BELL:
++		if (value)
++			value = 1000;
++		break;
++	case SND_TONE:
++		break;
++	default:
++		return -1;
+ 	}
+ 
+ 	if (value > 20 && value < 32767)
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index f61e48f2373249..23e78a034da8f3 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -107,7 +107,9 @@ static inline int get_acpihid_device_id(struct device *dev,
+ 					struct acpihid_map_entry **entry)
+ {
+ 	struct acpi_device *adev = ACPI_COMPANION(dev);
+-	struct acpihid_map_entry *p;
++	struct acpihid_map_entry *p, *p1 = NULL;
++	int hid_count = 0;
++	bool fw_bug;
+ 
+ 	if (!adev)
+ 		return -ENODEV;
+@@ -115,12 +117,33 @@ static inline int get_acpihid_device_id(struct device *dev,
+ 	list_for_each_entry(p, &acpihid_map, list) {
+ 		if (acpi_dev_hid_uid_match(adev, p->hid,
+ 					   p->uid[0] ? p->uid : NULL)) {
+-			if (entry)
+-				*entry = p;
+-			return p->devid;
++			p1 = p;
++			fw_bug = false;
++			hid_count = 1;
++			break;
++		}
++
++		/*
++		 * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
++		 */
++		if (acpi_dev_hid_match(adev, p->hid)) {
++			p1 = p;
++			hid_count++;
++			fw_bug = true;
+ 		}
+ 	}
+-	return -EINVAL;
++
++	if (!p1)
++		return -EINVAL;
++	if (fw_bug)
++		dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
++			     hid_count, hid_count > 1 ? "s" : "");
++	if (hid_count > 1)
++		return -EINVAL;
++	if (entry)
++		*entry = p1;
++
++	return p1->devid;
+ }
+ 
+ static inline int get_device_sbdf_id(struct device *dev)
+@@ -838,6 +861,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
+ {
+ 	iommu_ga_log_notifier = notifier;
+ 
++	/*
++	 * Ensure all in-flight IRQ handlers run to completion before returning
++	 * to the caller, e.g. to ensure module code isn't unloaded while it's
++	 * being executed in the IRQ handler.
++	 */
++	if (!notifier)
++		synchronize_rcu();
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 157542c07aaafa..56e9f125cda9a0 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1970,6 +1970,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
+ 		return ret;
+ 
+ 	info->domain = domain;
++	info->domain_attached = true;
+ 	spin_lock_irqsave(&domain->lock, flags);
+ 	list_add(&info->link, &domain->devices);
+ 	spin_unlock_irqrestore(&domain->lock, flags);
+@@ -3381,6 +3382,10 @@ void device_block_translation(struct device *dev)
+ 	struct intel_iommu *iommu = info->iommu;
+ 	unsigned long flags;
+ 
++	/* Device in DMA blocking state. Noting to do. */
++	if (!info->domain_attached)
++		return;
++
+ 	if (info->domain)
+ 		cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
+ 
+@@ -3393,6 +3398,9 @@ void device_block_translation(struct device *dev)
+ 			domain_context_clear(info);
+ 	}
+ 
++	/* Device now in DMA blocking state. */
++	info->domain_attached = false;
++
+ 	if (!info->domain)
+ 		return;
+ 
+@@ -4406,6 +4414,9 @@ static int device_set_dirty_tracking(struct list_head *devices, bool enable)
+ 			break;
+ 	}
+ 
++	if (!ret)
++		info->domain_attached = true;
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index 1497f3112b12cd..6f16eeb2ac6554 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -776,6 +776,7 @@ struct device_domain_info {
+ 	u8 ats_supported:1;
+ 	u8 ats_enabled:1;
+ 	u8 dtlb_extra_inval:1;	/* Quirk for devices need extra flush */
++	u8 domain_attached:1;	/* Device has domain attached */
+ 	u8 ats_qdep;
+ 	struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
+ 	struct intel_iommu *iommu; /* IOMMU used by this device */
+diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
+index 433c58944401f9..3b5251034a871b 100644
+--- a/drivers/iommu/intel/nested.c
++++ b/drivers/iommu/intel/nested.c
+@@ -27,8 +27,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	if (info->domain)
+-		device_block_translation(dev);
++	device_block_translation(dev);
+ 
+ 	if (iommu->agaw < dmar_domain->s2_domain->agaw) {
+ 		dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
+@@ -62,6 +61,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
+ 		goto unassign_tag;
+ 
+ 	info->domain = dmar_domain;
++	info->domain_attached = true;
+ 	spin_lock_irqsave(&dmar_domain->lock, flags);
+ 	list_add(&info->link, &dmar_domain->devices);
+ 	spin_unlock_irqrestore(&dmar_domain->lock, flags);
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 9511dae5b556a9..94b6c43dfa5cbd 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -133,10 +133,9 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
+ 	spin_lock_irqsave(&ms->lock, flags);
+ 	should_wake = !(bl->head);
+ 	bio_list_add(bl, bio);
+-	spin_unlock_irqrestore(&ms->lock, flags);
+-
+ 	if (should_wake)
+ 		wakeup_mirrord(ms);
++	spin_unlock_irqrestore(&ms->lock, flags);
+ }
+ 
+ static void dispatch_bios(void *context, struct bio_list *bio_list)
+@@ -646,9 +645,9 @@ static void write_callback(unsigned long error, void *context)
+ 	if (!ms->failures.head)
+ 		should_wake = 1;
+ 	bio_list_add(&ms->failures, bio);
+-	spin_unlock_irqrestore(&ms->lock, flags);
+ 	if (should_wake)
+ 		wakeup_mirrord(ms);
++	spin_unlock_irqrestore(&ms->lock, flags);
+ }
+ 
+ static void do_write(struct mirror_set *ms, struct bio *bio)
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 883f01e78324f5..e45cffdd419a8d 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -431,6 +431,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ 		return 0;
+ 	}
+ 
++	mutex_lock(&q->limits_lock);
+ 	if (blk_stack_limits(limits, &q->limits,
+ 			get_start_sect(bdev) + start) < 0)
+ 		DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
+@@ -448,6 +449,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ 	 */
+ 	if (!dm_target_has_integrity(ti->type))
+ 		queue_limits_stack_integrity_bdev(limits, bdev);
++	mutex_unlock(&q->limits_lock);
+ 	return 0;
+ }
+ 
+@@ -1734,8 +1736,12 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *
+ 					   sector_t start, sector_t len, void *data)
+ {
+ 	struct request_queue *q = bdev_get_queue(dev->bdev);
++	int b;
+ 
+-	return !q->limits.max_write_zeroes_sectors;
++	mutex_lock(&q->limits_lock);
++	b = !q->limits.max_write_zeroes_sectors;
++	mutex_unlock(&q->limits_lock);
++	return b;
+ }
+ 
+ static bool dm_table_supports_write_zeroes(struct dm_table *t)
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 6bd9848518d477..559b8179ac5025 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -604,6 +604,10 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+ 	(*argc)--;
+ 
+ 	if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
++		if (v->fec->dev) {
++			ti->error = "FEC device already specified";
++			return -EINVAL;
++		}
+ 		r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev);
+ 		if (r) {
+ 			ti->error = "FEC device lookup failed";
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 53ba0fbdf495c8..ce0462e751a61d 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -1080,6 +1080,9 @@ static int verity_alloc_most_once(struct dm_verity *v)
+ {
+ 	struct dm_target *ti = v->ti;
+ 
++	if (v->validated_blocks)
++		return 0;
++
+ 	/* the bitset can only handle INT_MAX blocks */
+ 	if (v->data_blocks > INT_MAX) {
+ 		ti->error = "device too large to use check_at_most_once";
+@@ -1103,6 +1106,9 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
+ 	struct dm_verity_io *io;
+ 	u8 *zero_data;
+ 
++	if (v->zero_digest)
++		return 0;
++
+ 	v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
+ 
+ 	if (!v->zero_digest)
+@@ -1537,7 +1543,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 			goto bad;
+ 	}
+ 
+-	/* Root hash signature is  a optional parameter*/
++	/* Root hash signature is an optional parameter */
+ 	r = verity_verify_root_hash(root_hash_digest_to_validate,
+ 				    strlen(root_hash_digest_to_validate),
+ 				    verify_args.sig,
+diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
+index a9e2c6c0a33c6d..d5261a0e4232e1 100644
+--- a/drivers/md/dm-verity-verify-sig.c
++++ b/drivers/md/dm-verity-verify-sig.c
+@@ -71,9 +71,14 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
+ 				     const char *arg_name)
+ {
+ 	struct dm_target *ti = v->ti;
+-	int ret = 0;
++	int ret;
+ 	const char *sig_key = NULL;
+ 
++	if (v->signature_key_desc) {
++		ti->error = DM_VERITY_VERIFY_ERR("root_hash_sig_key_desc already specified");
++		return -EINVAL;
++	}
++
+ 	if (!*argc) {
+ 		ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified");
+ 		return -EINVAL;
+@@ -83,14 +88,18 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
+ 	(*argc)--;
+ 
+ 	ret = verity_verify_get_sig_from_key(sig_key, sig_opts);
+-	if (ret < 0)
++	if (ret < 0) {
+ 		ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified");
++		return ret;
++	}
+ 
+ 	v->signature_key_desc = kstrdup(sig_key, GFP_KERNEL);
+-	if (!v->signature_key_desc)
++	if (!v->signature_key_desc) {
++		ti->error = DM_VERITY_VERIFY_ERR("Could not allocate memory for signature key");
+ 		return -ENOMEM;
++	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ /*
+diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+index 6975a71d740f6d..a5aa6a2a028cb2 100644
+--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
++++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+@@ -469,7 +469,7 @@ vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+ 	struct vb2_dma_sg_buf *buf = dbuf->priv;
+ 	struct sg_table *sgt = buf->dma_sgt;
+ 
+-	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
++	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
+ 	return 0;
+ }
+ 
+@@ -480,7 +480,7 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ 	struct vb2_dma_sg_buf *buf = dbuf->priv;
+ 	struct sg_table *sgt = buf->dma_sgt;
+ 
+-	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
++	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
+index cf8858cb13d4ce..611c9823be8578 100644
+--- a/drivers/media/i2c/ccs-pll.c
++++ b/drivers/media/i2c/ccs-pll.c
+@@ -312,6 +312,11 @@ __ccs_pll_calculate_vt_tree(struct device *dev,
+ 	dev_dbg(dev, "more_mul2: %u\n", more_mul);
+ 
+ 	pll_fr->pll_multiplier = mul * more_mul;
++	if (pll_fr->pll_multiplier > lim_fr->max_pll_multiplier) {
++		dev_dbg(dev, "pll multiplier %u too high\n",
++			pll_fr->pll_multiplier);
++		return -EINVAL;
++	}
+ 
+ 	if (pll_fr->pll_multiplier * pll_fr->pll_ip_clk_freq_hz >
+ 	    lim_fr->max_pll_op_clk_freq_hz)
+@@ -397,6 +402,8 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
+ 	min_pre_pll_clk_div = max_t(u16, min_pre_pll_clk_div,
+ 				    pll->ext_clk_freq_hz /
+ 				    lim_fr->max_pll_ip_clk_freq_hz);
++	if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER))
++		min_pre_pll_clk_div = clk_div_even(min_pre_pll_clk_div);
+ 
+ 	dev_dbg(dev, "vt min/max_pre_pll_clk_div: %u,%u\n",
+ 		min_pre_pll_clk_div, max_pre_pll_clk_div);
+@@ -792,7 +799,7 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
+ 		op_lim_fr->min_pre_pll_clk_div, op_lim_fr->max_pre_pll_clk_div);
+ 	max_op_pre_pll_clk_div =
+ 		min_t(u16, op_lim_fr->max_pre_pll_clk_div,
+-		      clk_div_even(pll->ext_clk_freq_hz /
++		      DIV_ROUND_UP(pll->ext_clk_freq_hz,
+ 				   op_lim_fr->min_pll_ip_clk_freq_hz));
+ 	min_op_pre_pll_clk_div =
+ 		max_t(u16, op_lim_fr->min_pre_pll_clk_div,
+@@ -815,6 +822,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
+ 			      one_or_more(
+ 				      DIV_ROUND_UP(op_lim_fr->max_pll_op_clk_freq_hz,
+ 						   pll->ext_clk_freq_hz))));
++	if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER))
++		min_op_pre_pll_clk_div = clk_div_even(min_op_pre_pll_clk_div);
+ 	dev_dbg(dev, "pll_op check: min / max op_pre_pll_clk_div: %u / %u\n",
+ 		min_op_pre_pll_clk_div, max_op_pre_pll_clk_div);
+ 
+diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
+index 7670d6c82d923e..5d754372230e5f 100644
+--- a/drivers/media/i2c/ds90ub913.c
++++ b/drivers/media/i2c/ds90ub913.c
+@@ -450,10 +450,10 @@ static int ub913_set_fmt(struct v4l2_subdev *sd,
+ 	if (!fmt)
+ 		return -EINVAL;
+ 
+-	format->format.code = finfo->outcode;
+-
+ 	*fmt = format->format;
+ 
++	fmt->code = finfo->outcode;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
+index 0beb80b8c45815..9b4db4cd4929ca 100644
+--- a/drivers/media/i2c/imx335.c
++++ b/drivers/media/i2c/imx335.c
+@@ -31,7 +31,7 @@
+ #define IMX335_REG_CPWAIT_TIME		CCI_REG8(0x300d)
+ #define IMX335_REG_WINMODE		CCI_REG8(0x3018)
+ #define IMX335_REG_HTRIMMING_START	CCI_REG16_LE(0x302c)
+-#define IMX335_REG_HNUM			CCI_REG8(0x302e)
++#define IMX335_REG_HNUM			CCI_REG16_LE(0x302e)
+ 
+ /* Lines per frame */
+ #define IMX335_REG_VMAX			CCI_REG24_LE(0x3030)
+@@ -660,7 +660,8 @@ static int imx335_enum_frame_size(struct v4l2_subdev *sd,
+ 	struct imx335 *imx335 = to_imx335(sd);
+ 	u32 code;
+ 
+-	if (fsize->index > ARRAY_SIZE(imx335_mbus_codes))
++	/* Only a single supported_mode available. */
++	if (fsize->index > 0)
+ 		return -EINVAL;
+ 
+ 	code = imx335_get_format_code(imx335, fsize->code);
+diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
+index bd0b2f0f0d45b9..3a0835fa576676 100644
+--- a/drivers/media/i2c/ov2740.c
++++ b/drivers/media/i2c/ov2740.c
+@@ -1404,12 +1404,12 @@ static int ov2740_probe(struct i2c_client *client)
+ 	return 0;
+ 
+ probe_error_v4l2_subdev_cleanup:
++	pm_runtime_disable(&client->dev);
++	pm_runtime_set_suspended(&client->dev);
+ 	v4l2_subdev_cleanup(&ov2740->sd);
+ 
+ probe_error_media_entity_cleanup:
+ 	media_entity_cleanup(&ov2740->sd.entity);
+-	pm_runtime_disable(&client->dev);
+-	pm_runtime_set_suspended(&client->dev);
+ 
+ probe_error_v4l2_ctrl_handler_free:
+ 	v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler);
+diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
+index 2833b14ee139dc..c0ab3c0ed88e34 100644
+--- a/drivers/media/i2c/ov5675.c
++++ b/drivers/media/i2c/ov5675.c
+@@ -1295,11 +1295,8 @@ static int ov5675_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 
+ 	ret = ov5675_get_hwcfg(ov5675, &client->dev);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to get HW configuration: %d",
+-			ret);
++	if (ret)
+ 		return ret;
+-	}
+ 
+ 	v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
+ 
+diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
+index 3b94338f55ed39..23d524de7d60a5 100644
+--- a/drivers/media/i2c/ov8856.c
++++ b/drivers/media/i2c/ov8856.c
+@@ -2276,8 +2276,8 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
+ 	if (!is_acpi_node(fwnode)) {
+ 		ov8856->xvclk = devm_clk_get(dev, "xvclk");
+ 		if (IS_ERR(ov8856->xvclk)) {
+-			dev_err(dev, "could not get xvclk clock (%pe)\n",
+-				ov8856->xvclk);
++			dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
++				      "could not get xvclk clock\n");
+ 			return PTR_ERR(ov8856->xvclk);
+ 		}
+ 
+@@ -2382,11 +2382,8 @@ static int ov8856_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 
+ 	ret = ov8856_get_hwcfg(ov8856, &client->dev);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to get HW configuration: %d",
+-			ret);
++	if (ret)
+ 		return ret;
+-	}
+ 
+ 	v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
+ 
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c
+index b71f66bd8c1fdb..92d513608395c6 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-dma.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c
+@@ -172,7 +172,7 @@ void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size,
+ 	count = PHYS_PFN(size);
+ 
+ 	iova = alloc_iova(&mmu->dmap->iovad, count,
+-			  PHYS_PFN(dma_get_mask(dev)), 0);
++			  PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
+ 	if (!iova)
+ 		goto out_kfree;
+ 
+@@ -398,7 +398,7 @@ int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
+ 		nents, npages);
+ 
+ 	iova = alloc_iova(&mmu->dmap->iovad, npages,
+-			  PHYS_PFN(dma_get_mask(dev)), 0);
++			  PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
+ 	if (!iova)
+ 		return 0;
+ 
+diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
+index 91718eabd74e57..1c5c38a30e6298 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6.c
++++ b/drivers/media/pci/intel/ipu6/ipu6.c
+@@ -463,11 +463,6 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
+ {
+ 	int ret;
+ 
+-	/* disable IPU6 PCI ATS on mtl ES2 */
+-	if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 &&
+-	    pci_ats_supported(dev))
+-		pci_disable_ats(dev);
+-
+ 	/* No PCI msi capability for IPU6EP */
+ 	if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
+ 		/* likely do nothing as msi not enabled by default */
+diff --git a/drivers/media/platform/imagination/e5010-jpeg-enc.c b/drivers/media/platform/imagination/e5010-jpeg-enc.c
+index 187f2d8abfbb5d..cb1f7de1b6321d 100644
+--- a/drivers/media/platform/imagination/e5010-jpeg-enc.c
++++ b/drivers/media/platform/imagination/e5010-jpeg-enc.c
+@@ -1057,8 +1057,11 @@ static int e5010_probe(struct platform_device *pdev)
+ 	e5010->vdev->lock = &e5010->mutex;
+ 
+ 	ret = v4l2_device_register(dev, &e5010->v4l2_dev);
+-	if (ret)
+-		return dev_err_probe(dev, ret, "failed to register v4l2 device\n");
++	if (ret) {
++		dev_err_probe(dev, ret, "failed to register v4l2 device\n");
++		goto fail_after_video_device_alloc;
++	}
++
+ 
+ 	e5010->m2m_dev = v4l2_m2m_init(&e5010_m2m_ops);
+ 	if (IS_ERR(e5010->m2m_dev)) {
+@@ -1118,6 +1121,8 @@ static int e5010_probe(struct platform_device *pdev)
+ 	v4l2_m2m_release(e5010->m2m_dev);
+ fail_after_v4l2_register:
+ 	v4l2_device_unregister(&e5010->v4l2_dev);
++fail_after_video_device_alloc:
++	video_device_release(e5010->vdev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+index aa721cc43647c7..2725db882e5b30 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+@@ -821,7 +821,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
+ 	inst->vsi_core->fb.y.dma_addr = y_fb_dma;
+ 	inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[0];
+ 	inst->vsi_core->fb.c.dma_addr = c_fb_dma;
+-	inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[1];
++	inst->vsi_core->fb.c.size = ctx->picinfo.fb_sz[1];
+ 
+ 	inst->vsi_core->dec.vdec_fb_va = (unsigned long)fb;
+ 
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index b8c9bb017fb5f6..73be1013edd0c8 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -752,6 +752,32 @@ static int mxc_get_free_slot(struct mxc_jpeg_slot_data *slot_data)
+ 	return -1;
+ }
+ 
++static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
++{
++	/* free descriptor for decoding/encoding phase */
++	dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
++			  jpeg->slot_data.desc,
++			  jpeg->slot_data.desc_handle);
++	jpeg->slot_data.desc = NULL;
++	jpeg->slot_data.desc_handle = 0;
++
++	/* free descriptor for encoder configuration phase / decoder DHT */
++	dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
++			  jpeg->slot_data.cfg_desc,
++			  jpeg->slot_data.cfg_desc_handle);
++	jpeg->slot_data.cfg_desc_handle = 0;
++	jpeg->slot_data.cfg_desc = NULL;
++
++	/* free configuration stream */
++	dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
++			  jpeg->slot_data.cfg_stream_vaddr,
++			  jpeg->slot_data.cfg_stream_handle);
++	jpeg->slot_data.cfg_stream_vaddr = NULL;
++	jpeg->slot_data.cfg_stream_handle = 0;
++
++	jpeg->slot_data.used = false;
++}
++
+ static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg)
+ {
+ 	struct mxc_jpeg_desc *desc;
+@@ -794,30 +820,11 @@ static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg)
+ 	return true;
+ err:
+ 	dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", jpeg->slot_data.slot);
++	mxc_jpeg_free_slot_data(jpeg);
+ 
+ 	return false;
+ }
+ 
+-static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
+-{
+-	/* free descriptor for decoding/encoding phase */
+-	dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
+-			  jpeg->slot_data.desc,
+-			  jpeg->slot_data.desc_handle);
+-
+-	/* free descriptor for encoder configuration phase / decoder DHT */
+-	dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
+-			  jpeg->slot_data.cfg_desc,
+-			  jpeg->slot_data.cfg_desc_handle);
+-
+-	/* free configuration stream */
+-	dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
+-			  jpeg->slot_data.cfg_stream_vaddr,
+-			  jpeg->slot_data.cfg_stream_handle);
+-
+-	jpeg->slot_data.used = false;
+-}
+-
+ static void mxc_jpeg_check_and_set_last_buffer(struct mxc_jpeg_ctx *ctx,
+ 					       struct vb2_v4l2_buffer *src_buf,
+ 					       struct vb2_v4l2_buffer *dst_buf)
+@@ -1918,9 +1925,19 @@ static void mxc_jpeg_buf_queue(struct vb2_buffer *vb)
+ 	jpeg_src_buf = vb2_to_mxc_buf(vb);
+ 	jpeg_src_buf->jpeg_parse_error = false;
+ 	ret = mxc_jpeg_parse(ctx, vb);
+-	if (ret)
++	if (ret) {
+ 		jpeg_src_buf->jpeg_parse_error = true;
+ 
++		/*
++		 * if the capture queue is not setup, the device_run() won't be scheduled,
++		 * need to drop the error buffer, so that the decoding can continue
++		 */
++		if (!vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx))) {
++			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
++			return;
++		}
++	}
++
+ end:
+ 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ }
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+index 9745d6219a1667..cd6c52e9d158a7 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+@@ -43,6 +43,7 @@ struct mxc_isi_m2m_ctx_queue_data {
+ 	struct v4l2_pix_format_mplane format;
+ 	const struct mxc_isi_format_info *info;
+ 	u32 sequence;
++	bool streaming;
+ };
+ 
+ struct mxc_isi_m2m_ctx {
+@@ -486,15 +487,18 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh,
+ 				enum v4l2_buf_type type)
+ {
+ 	struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
++	struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
+ 	const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
+ 	const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
+ 	const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
+ 	const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
+ 	struct mxc_isi_m2m *m2m = ctx->m2m;
+ 	bool bypass;
+-
+ 	int ret;
+ 
++	if (q->streaming)
++		return 0;
++
+ 	mutex_lock(&m2m->lock);
+ 
+ 	if (m2m->usage_count == INT_MAX) {
+@@ -547,6 +551,8 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh,
+ 		goto unchain;
+ 	}
+ 
++	q->streaming = true;
++
+ 	return 0;
+ 
+ unchain:
+@@ -569,10 +575,14 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
+ 				 enum v4l2_buf_type type)
+ {
+ 	struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
++	struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
+ 	struct mxc_isi_m2m *m2m = ctx->m2m;
+ 
+ 	v4l2_m2m_ioctl_streamoff(file, fh, type);
+ 
++	if (!q->streaming)
++		return 0;
++
+ 	mutex_lock(&m2m->lock);
+ 
+ 	/*
+@@ -598,6 +608,8 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
+ 
+ 	mutex_unlock(&m2m->lock);
+ 
++	q->streaming = false;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index cabcf710c0462a..4d10e94eefe9e8 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -354,7 +354,7 @@ static int venus_probe(struct platform_device *pdev)
+ 
+ 	ret = v4l2_device_register(dev, &core->v4l2_dev);
+ 	if (ret)
+-		goto err_core_deinit;
++		goto err_hfi_destroy;
+ 
+ 	platform_set_drvdata(pdev, core);
+ 
+@@ -386,24 +386,24 @@ static int venus_probe(struct platform_device *pdev)
+ 
+ 	ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
+ 	if (ret)
+-		goto err_venus_shutdown;
++		goto err_core_deinit;
+ 
+ 	ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
+ 	if (ret)
+-		goto err_venus_shutdown;
++		goto err_core_deinit;
+ 
+ 	ret = pm_runtime_put_sync(dev);
+ 	if (ret) {
+ 		pm_runtime_get_noresume(dev);
+-		goto err_dev_unregister;
++		goto err_core_deinit;
+ 	}
+ 
+ 	venus_dbgfs_init(core);
+ 
+ 	return 0;
+ 
+-err_dev_unregister:
+-	v4l2_device_unregister(&core->v4l2_dev);
++err_core_deinit:
++	hfi_core_deinit(core, false);
+ err_venus_shutdown:
+ 	venus_shutdown(core);
+ err_firmware_deinit:
+@@ -414,9 +414,9 @@ static int venus_probe(struct platform_device *pdev)
+ 	pm_runtime_put_noidle(dev);
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_set_suspended(dev);
++	v4l2_device_unregister(&core->v4l2_dev);
++err_hfi_destroy:
+ 	hfi_destroy(core);
+-err_core_deinit:
+-	hfi_core_deinit(core, false);
+ err_core_put:
+ 	if (core->pm_ops->core_put)
+ 		core->pm_ops->core_put(core);
+diff --git a/drivers/media/platform/ti/davinci/vpif.c b/drivers/media/platform/ti/davinci/vpif.c
+index f4e1fa76bf3724..353e8ad158793a 100644
+--- a/drivers/media/platform/ti/davinci/vpif.c
++++ b/drivers/media/platform/ti/davinci/vpif.c
+@@ -504,7 +504,7 @@ static int vpif_probe(struct platform_device *pdev)
+ 	pdev_display = kzalloc(sizeof(*pdev_display), GFP_KERNEL);
+ 	if (!pdev_display) {
+ 		ret = -ENOMEM;
+-		goto err_put_pdev_capture;
++		goto err_del_pdev_capture;
+ 	}
+ 
+ 	pdev_display->name = "vpif_display";
+@@ -527,6 +527,8 @@ static int vpif_probe(struct platform_device *pdev)
+ 
+ err_put_pdev_display:
+ 	platform_device_put(pdev_display);
++err_del_pdev_capture:
++	platform_device_del(pdev_capture);
+ err_put_pdev_capture:
+ 	platform_device_put(pdev_capture);
+ err_put_rpm:
+diff --git a/drivers/media/platform/ti/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c
+index dd375c4e180d1b..7d0c723dcd119a 100644
+--- a/drivers/media/platform/ti/omap3isp/ispccdc.c
++++ b/drivers/media/platform/ti/omap3isp/ispccdc.c
+@@ -446,8 +446,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
+ 		if (ret < 0)
+ 			goto done;
+ 
+-		dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
+-				    req->table.sgt.nents, DMA_TO_DEVICE);
++		dma_sync_sgtable_for_cpu(isp->dev, &req->table.sgt,
++					 DMA_TO_DEVICE);
+ 
+ 		if (copy_from_user(req->table.addr, config->lsc,
+ 				   req->config.size)) {
+@@ -455,8 +455,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
+ 			goto done;
+ 		}
+ 
+-		dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
+-				       req->table.sgt.nents, DMA_TO_DEVICE);
++		dma_sync_sgtable_for_device(isp->dev, &req->table.sgt,
++					    DMA_TO_DEVICE);
+ 	}
+ 
+ 	spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+diff --git a/drivers/media/platform/ti/omap3isp/ispstat.c b/drivers/media/platform/ti/omap3isp/ispstat.c
+index 359a846205b0ff..d3da68408ecb16 100644
+--- a/drivers/media/platform/ti/omap3isp/ispstat.c
++++ b/drivers/media/platform/ti/omap3isp/ispstat.c
+@@ -161,8 +161,7 @@ static void isp_stat_buf_sync_for_device(struct ispstat *stat,
+ 	if (ISP_STAT_USES_DMAENGINE(stat))
+ 		return;
+ 
+-	dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
+-			       buf->sgt.nents, DMA_FROM_DEVICE);
++	dma_sync_sgtable_for_device(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE);
+ }
+ 
+ static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
+@@ -171,8 +170,7 @@ static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
+ 	if (ISP_STAT_USES_DMAENGINE(stat))
+ 		return;
+ 
+-	dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
+-			    buf->sgt.nents, DMA_FROM_DEVICE);
++	dma_sync_sgtable_for_cpu(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE);
+ }
+ 
+ static void isp_stat_buf_clear(struct ispstat *stat)
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c
+index 7838e62727128f..f3023e91b3ebc8 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_channel.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c
+@@ -497,7 +497,7 @@ int vidtv_channel_si_init(struct vidtv_mux *m)
+ 	vidtv_psi_sdt_table_destroy(m->si.sdt);
+ free_pat:
+ 	vidtv_psi_pat_table_destroy(m->si.pat);
+-	return 0;
++	return -EINVAL;
+ }
+ 
+ void vidtv_channel_si_destroy(struct vidtv_mux *m)
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index f25e011153642e..0d5919e0007562 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -947,8 +947,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
+ 			if (dev->has_compose_cap) {
+ 				v4l2_rect_set_min_size(compose, &min_rect);
+ 				v4l2_rect_set_max_size(compose, &max_rect);
+-				v4l2_rect_map_inside(compose, &fmt);
+ 			}
++			v4l2_rect_map_inside(compose, &fmt);
+ 			dev->fmt_cap_rect = fmt;
+ 			tpg_s_buf_height(&dev->tpg, fmt.height);
+ 		} else if (dev->has_compose_cap) {
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
+index 1d98d3465e28d4..ce52c936cb9310 100644
+--- a/drivers/media/usb/dvb-usb/cxusb.c
++++ b/drivers/media/usb/dvb-usb/cxusb.c
+@@ -119,9 +119,8 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
+ 
+ 	o[0] = GPIO_TUNER;
+ 	o[1] = onoff;
+-	cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
+ 
+-	if (i != 0x01)
++	if (!cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1) && i != 0x01)
+ 		dev_info(&d->udev->dev, "gpio_write failed.\n");
+ 
+ 	st->gpio_write_state[GPIO_TUNER] = onoff;
+diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
+index 5a47dcbf1c8e55..303b055fefea98 100644
+--- a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
++++ b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
+@@ -520,12 +520,13 @@ static int hdcs_init(struct sd *sd)
+ static int hdcs_dump(struct sd *sd)
+ {
+ 	u16 reg, val;
++	int err = 0;
+ 
+ 	pr_info("Dumping sensor registers:\n");
+ 
+-	for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH; reg++) {
+-		stv06xx_read_sensor(sd, reg, &val);
++	for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH && !err; reg++) {
++		err = stv06xx_read_sensor(sd, reg, &val);
+ 		pr_info("reg 0x%02x = 0x%02x\n", reg, val);
+ 	}
+-	return 0;
++	return (err < 0) ? err : 0;
+ }
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 58d1bc80253e87..c70d9c24c6fb34 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1689,7 +1689,9 @@ static bool uvc_ctrl_xctrls_has_control(const struct v4l2_ext_control *xctrls,
+ }
+ 
+ static void uvc_ctrl_send_events(struct uvc_fh *handle,
+-	const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
++				 struct uvc_entity *entity,
++				 const struct v4l2_ext_control *xctrls,
++				 unsigned int xctrls_count)
+ {
+ 	struct uvc_control_mapping *mapping;
+ 	struct uvc_control *ctrl;
+@@ -1700,6 +1702,9 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
+ 		u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ 
+ 		ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
++		if (ctrl->entity != entity)
++			continue;
++
+ 		if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ 			/* Notification will be sent from an Interrupt event. */
+ 			continue;
+@@ -1830,12 +1835,17 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
+ 	return mutex_lock_interruptible(&chain->ctrl_mutex) ? -ERESTARTSYS : 0;
+ }
+ 
++/*
++ * Returns the number of uvc controls that have been correctly set, or a
++ * negative number if there has been an error.
++ */
+ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+ 				  struct uvc_fh *handle,
+ 				  struct uvc_entity *entity,
+ 				  int rollback,
+ 				  struct uvc_control **err_ctrl)
+ {
++	unsigned int processed_ctrls = 0;
+ 	struct uvc_control *ctrl;
+ 	unsigned int i;
+ 	int ret;
+@@ -1870,6 +1880,9 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+ 		else
+ 			ret = 0;
+ 
++		if (!ret)
++			processed_ctrls++;
++
+ 		if (rollback || ret < 0)
+ 			memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
+ 			       uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
+@@ -1888,7 +1901,7 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+ 			uvc_ctrl_set_handle(handle, ctrl, handle);
+ 	}
+ 
+-	return 0;
++	return processed_ctrls;
+ }
+ 
+ static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
+@@ -1930,11 +1943,13 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ 					uvc_ctrl_find_ctrl_idx(entity, ctrls,
+ 							       err_ctrl);
+ 			goto done;
++		} else if (ret > 0 && !rollback) {
++			uvc_ctrl_send_events(handle, entity,
++					     ctrls->controls, ctrls->count);
+ 		}
+ 	}
+ 
+-	if (!rollback)
+-		uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
++	ret = 0;
+ done:
+ 	mutex_unlock(&chain->ctrl_mutex);
+ 	return ret;
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index a0d683d2664719..241b3f95f32706 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2217,13 +2217,16 @@ static int uvc_probe(struct usb_interface *intf,
+ #endif
+ 
+ 	/* Parse the Video Class control descriptor. */
+-	if (uvc_parse_control(dev) < 0) {
++	ret = uvc_parse_control(dev);
++	if (ret < 0) {
++		ret = -ENODEV;
+ 		uvc_dbg(dev, PROBE, "Unable to parse UVC descriptors\n");
+ 		goto error;
+ 	}
+ 
+ 	/* Parse the associated GPIOs. */
+-	if (uvc_gpio_parse(dev) < 0) {
++	ret = uvc_gpio_parse(dev);
++	if (ret < 0) {
+ 		uvc_dbg(dev, PROBE, "Unable to parse UVC GPIOs\n");
+ 		goto error;
+ 	}
+@@ -2249,24 +2252,32 @@ static int uvc_probe(struct usb_interface *intf,
+ 	}
+ 
+ 	/* Register the V4L2 device. */
+-	if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
++	ret = v4l2_device_register(&intf->dev, &dev->vdev);
++	if (ret < 0)
+ 		goto error;
+ 
+ 	/* Scan the device for video chains. */
+-	if (uvc_scan_device(dev) < 0)
++	if (uvc_scan_device(dev) < 0) {
++		ret = -ENODEV;
+ 		goto error;
++	}
+ 
+ 	/* Initialize controls. */
+-	if (uvc_ctrl_init_device(dev) < 0)
++	if (uvc_ctrl_init_device(dev) < 0) {
++		ret = -ENODEV;
+ 		goto error;
++	}
+ 
+ 	/* Register video device nodes. */
+-	if (uvc_register_chains(dev) < 0)
++	if (uvc_register_chains(dev) < 0) {
++		ret = -ENODEV;
+ 		goto error;
++	}
+ 
+ #ifdef CONFIG_MEDIA_CONTROLLER
+ 	/* Register the media device node */
+-	if (media_device_register(&dev->mdev) < 0)
++	ret = media_device_register(&dev->mdev);
++	if (ret < 0)
+ 		goto error;
+ #endif
+ 	/* Save our data pointer in the interface data. */
+@@ -2300,7 +2311,7 @@ static int uvc_probe(struct usb_interface *intf,
+ error:
+ 	uvc_unregister_video(dev);
+ 	kref_put(&dev->ref, uvc_delete);
+-	return -ENODEV;
++	return ret;
+ }
+ 
+ static void uvc_disconnect(struct usb_interface *intf)
+diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
+index 3d7711cc42bc58..56f3ab966d6069 100644
+--- a/drivers/media/v4l2-core/v4l2-dev.c
++++ b/drivers/media/v4l2-core/v4l2-dev.c
+@@ -1052,25 +1052,25 @@ int __video_register_device(struct video_device *vdev,
+ 	vdev->dev.class = &video_class;
+ 	vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
+ 	vdev->dev.parent = vdev->dev_parent;
++	vdev->dev.release = v4l2_device_release;
+ 	dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
++
++	/* Increase v4l2_device refcount */
++	v4l2_device_get(vdev->v4l2_dev);
++
+ 	mutex_lock(&videodev_lock);
+ 	ret = device_register(&vdev->dev);
+ 	if (ret < 0) {
+ 		mutex_unlock(&videodev_lock);
+ 		pr_err("%s: device_register failed\n", __func__);
+-		goto cleanup;
++		put_device(&vdev->dev);
++		return ret;
+ 	}
+-	/* Register the release callback that will be called when the last
+-	   reference to the device goes away. */
+-	vdev->dev.release = v4l2_device_release;
+ 
+ 	if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
+ 		pr_warn("%s: requested %s%d, got %s\n", __func__,
+ 			name_base, nr, video_device_node_name(vdev));
+ 
+-	/* Increase v4l2_device refcount */
+-	v4l2_device_get(vdev->v4l2_dev);
+-
+ 	/* Part 5: Register the entity. */
+ 	ret = video_register_media_controller(vdev);
+ 
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index 3205feb1e8ff6a..9cbdd240c3a7d4 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -89,6 +89,7 @@ struct mmc_fixup {
+ #define CID_MANFID_MICRON       0x13
+ #define CID_MANFID_SAMSUNG      0x15
+ #define CID_MANFID_APACER       0x27
++#define CID_MANFID_SWISSBIT     0x5D
+ #define CID_MANFID_KINGSTON     0x70
+ #define CID_MANFID_HYNIX	0x90
+ #define CID_MANFID_KINGSTON_SD	0x9F
+@@ -294,4 +295,9 @@ static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c)
+ 	return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY;
+ }
+ 
++static inline int mmc_card_no_uhs_ddr50_tuning(const struct mmc_card *c)
++{
++	return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING;
++}
++
+ #endif
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 89b512905be140..7f893bafaa607d 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -34,6 +34,16 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
+ 		   MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY,
+ 		   EXT_CSD_REV_ANY),
+ 
++	/*
++	 * Swissbit series S46-u cards throw I/O errors during tuning requests
++	 * after the initial tuning request expectedly times out. This has
++	 * only been observed on cards manufactured on 01/2019 that are using
++	 * Bay Trail host controllers.
++	 */
++	_FIXUP_EXT("0016G", CID_MANFID_SWISSBIT, 0x5342, 2019, 1,
++		   0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
++		   MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY),
++
+ 	END_FIXUP
+ };
+ 
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 63915541c0e494..916ae9996e9d72 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -613,6 +613,29 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
+ 	return 0;
+ }
+ 
++/*
++ * Determine if the card should tune or not.
++ */
++static bool mmc_sd_use_tuning(struct mmc_card *card)
++{
++	/*
++	 * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
++	 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
++	 */
++	if (mmc_host_is_spi(card->host))
++		return false;
++
++	switch (card->host->ios.timing) {
++	case MMC_TIMING_UHS_SDR50:
++	case MMC_TIMING_UHS_SDR104:
++		return true;
++	case MMC_TIMING_UHS_DDR50:
++		return !mmc_card_no_uhs_ddr50_tuning(card);
++	}
++
++	return false;
++}
++
+ /*
+  * UHS-I specific initialization procedure
+  */
+@@ -656,14 +679,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
+ 	if (err)
+ 		goto out;
+ 
+-	/*
+-	 * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
+-	 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+-	 */
+-	if (!mmc_host_is_spi(card->host) &&
+-		(card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
+-		 card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
+-		 card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
++	if (mmc_sd_use_tuning(card)) {
+ 		err = mmc_execute_tuning(card);
+ 
+ 		/*
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index b8cff9240b286c..beafca6ba0df4d 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -2917,7 +2917,7 @@ static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_
+ 		write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ 	}
+ 
+-	nandc->buf_count = len;
++	nandc->buf_count = 512;
+ 	memset(nandc->data_buffer, 0xff, nandc->buf_count);
+ 
+ 	config_nand_single_cw_page_read(chip, false, 0);
+diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
+index c28634e20abf8a..ac887754b98e21 100644
+--- a/drivers/mtd/nand/raw/sunxi_nand.c
++++ b/drivers/mtd/nand/raw/sunxi_nand.c
+@@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
+ 	if (ret)
+ 		return ret;
+ 
++	sunxi_nfc_randomizer_config(nand, page, false);
+ 	sunxi_nfc_randomizer_enable(nand);
+ 	writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
+ 	       nfc->regs + NFC_REG_CMD);
+@@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
+ 	if (ret)
+ 		return ret;
+ 
++	sunxi_nfc_randomizer_config(nand, page, false);
+ 	sunxi_nfc_randomizer_enable(nand);
+ 	sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
+ 
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 3cf10078059958..3fa83f05bfcc8e 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -966,7 +966,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+ 		u32 status, tx_nr_packets_max;
+ 
+ 		netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
+-				      KVASER_PCIEFD_CAN_TX_MAX_COUNT);
++				      roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT));
+ 		if (!netdev)
+ 			return -ENOMEM;
+ 
+@@ -995,7 +995,6 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+ 		can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
+ 
+ 		can->can.clock.freq = pcie->freq;
+-		can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
+ 		spin_lock_init(&can->lock);
+ 
+ 		can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
+diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
+index 2f73bf3abad889..b6c5c8bab7390f 100644
+--- a/drivers/net/can/m_can/tcan4x5x-core.c
++++ b/drivers/net/can/m_can/tcan4x5x-core.c
+@@ -385,10 +385,11 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 	priv = cdev_to_priv(mcan_class);
+ 
+ 	priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
+-	if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
+-		ret = -EPROBE_DEFER;
+-		goto out_m_can_class_free_dev;
+-	} else {
++	if (IS_ERR(priv->power)) {
++		if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
++			ret = -EPROBE_DEFER;
++			goto out_m_can_class_free_dev;
++		}
+ 		priv->power = NULL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+index c1d1673c5749d6..b565189e591398 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+@@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
+ 	}
+ #endif
+ 
+-	skb_tx_timestamp(skb);
+ 	return aq_nic_xmit(aq_nic, skb);
+ }
+ 
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index 71e50fc65c1478..b0994bd05874a9 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
+ 
+ 	frags = aq_nic_map_skb(self, skb, ring);
+ 
++	skb_tx_timestamp(skb);
++
+ 	if (likely(frags)) {
+ 		err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
+ 						       ring, frags);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 2bb1fce350dbb1..154f73f121ecaa 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -10390,6 +10390,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ 	bp->num_rss_ctx--;
+ }
+ 
++static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
++				  int rxr_id)
++{
++	u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
++	int i, vnic_rx;
++
++	/* Ntuple VNIC always has all the rx rings. Any change of ring id
++	 * must be updated because a future filter may use it.
++	 */
++	if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
++		return true;
++
++	for (i = 0; i < tbl_size; i++) {
++		if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
++			vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
++		else
++			vnic_rx = bp->rss_indir_tbl[i];
++
++		if (rxr_id == vnic_rx)
++			return true;
++	}
++
++	return false;
++}
++
++static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
++				u16 mru, int rxr_id)
++{
++	int rc;
++
++	if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
++		return 0;
++
++	if (mru) {
++		rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
++		if (rc) {
++			netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
++				   vnic->vnic_id, rc);
++			return rc;
++		}
++	}
++	vnic->mru = mru;
++	bnxt_hwrm_vnic_update(bp, vnic,
++			      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
++
++	return 0;
++}
++
++static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
++{
++	struct ethtool_rxfh_context *ctx;
++	unsigned long context;
++	int rc;
++
++	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
++		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
++		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
++
++		rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
++		if (rc)
++			return rc;
++	}
++
++	return 0;
++}
++
+ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
+ {
+ 	bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
+@@ -15326,6 +15392,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+ 	struct bnxt_cp_ring_info *cpr;
+ 	struct bnxt_vnic_info *vnic;
+ 	int i, rc;
++	u16 mru;
+ 
+ 	rxr = &bp->rx_ring[idx];
+ 	clone = qmem;
+@@ -15356,21 +15423,15 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+ 	cpr = &rxr->bnapi->cp_ring;
+ 	cpr->sw_stats->rx.rx_resets++;
+ 
+-	for (i = 0; i <= bp->nr_vnics; i++) {
++	mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
++	for (i = 0; i < bp->nr_vnics; i++) {
+ 		vnic = &bp->vnic_info[i];
+ 
+-		rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+-		if (rc) {
+-			netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+-				   vnic->vnic_id, rc);
++		rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
++		if (rc)
+ 			return rc;
+-		}
+-		vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+-		bnxt_hwrm_vnic_update(bp, vnic,
+-				      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ 	}
+-
+-	return 0;
++	return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
+ 
+ err_free_hwrm_rx_ring:
+ 	bnxt_hwrm_rx_ring_free(bp, rxr, false);
+@@ -15384,12 +15445,12 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+ 	struct bnxt_vnic_info *vnic;
+ 	int i;
+ 
+-	for (i = 0; i <= bp->nr_vnics; i++) {
++	for (i = 0; i < bp->nr_vnics; i++) {
+ 		vnic = &bp->vnic_info[i];
+-		vnic->mru = 0;
+-		bnxt_hwrm_vnic_update(bp, vnic,
+-				      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
++
++		bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
+ 	}
++	bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
+ 	/* Make sure NAPI sees that the VNIC is disabled */
+ 	synchronize_net();
+ 	rxr = &bp->rx_ring[idx];
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index 546d9a3d7efea7..1867552a8bdbe2 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -148,7 +148,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
+ 	struct net_device *dev = edev->net;
+ 	struct bnxt *bp = netdev_priv(dev);
+ 	struct bnxt_ulp *ulp;
+-	int i = 0;
+ 
+ 	ulp = edev->ulp_tbl;
+ 	rtnl_lock();
+@@ -164,10 +163,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
+ 	synchronize_rcu();
+ 	ulp->max_async_event_id = 0;
+ 	ulp->async_events_bmap = NULL;
+-	while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+-		msleep(100);
+-		i++;
+-	}
+ 	mutex_unlock(&edev->en_dev_lock);
+ 	rtnl_unlock();
+ 	return;
+@@ -235,10 +230,9 @@ void bnxt_ulp_stop(struct bnxt *bp)
+ 		return;
+ 
+ 	mutex_lock(&edev->en_dev_lock);
+-	if (!bnxt_ulp_registered(edev)) {
+-		mutex_unlock(&edev->en_dev_lock);
+-		return;
+-	}
++	if (!bnxt_ulp_registered(edev) ||
++	    (edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
++		goto ulp_stop_exit;
+ 
+ 	edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
+ 	if (aux_priv) {
+@@ -254,6 +248,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
+ 			adrv->suspend(adev, pm);
+ 		}
+ 	}
++ulp_stop_exit:
+ 	mutex_unlock(&edev->en_dev_lock);
+ }
+ 
+@@ -262,19 +257,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
+ 	struct bnxt_aux_priv *aux_priv = bp->aux_priv;
+ 	struct bnxt_en_dev *edev = bp->edev;
+ 
+-	if (!edev)
+-		return;
+-
+-	edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+-
+-	if (err)
++	if (!edev || err)
+ 		return;
+ 
+ 	mutex_lock(&edev->en_dev_lock);
+-	if (!bnxt_ulp_registered(edev)) {
+-		mutex_unlock(&edev->en_dev_lock);
+-		return;
+-	}
++	if (!bnxt_ulp_registered(edev) ||
++	    !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
++		goto ulp_start_exit;
+ 
+ 	if (edev->ulp_tbl->msix_requested)
+ 		bnxt_fill_msix_vecs(bp, edev->msix_entries);
+@@ -291,6 +280,8 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
+ 			adrv->resume(adev);
+ 		}
+ 	}
++ulp_start_exit:
++	edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+ 	mutex_unlock(&edev->en_dev_lock);
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+index 4f4914f5c84c91..b76a231ca7dacd 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+@@ -48,7 +48,6 @@ struct bnxt_ulp {
+ 	unsigned long	*async_events_bmap;
+ 	u16		max_async_event_id;
+ 	u16		msix_requested;
+-	atomic_t	ref_count;
+ };
+ 
+ struct bnxt_en_dev {
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index ae100ed8ed6b92..3c2a7919b1289d 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -5117,7 +5117,11 @@ static int macb_probe(struct platform_device *pdev)
+ 
+ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ 	if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
+-		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
++		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
++		if (err) {
++			dev_err(&pdev->dev, "failed to set DMA mask\n");
++			goto err_out_free_netdev;
++		}
+ 		bp->hw_dma_cap |= HW_DMA_CAP_64B;
+ 	}
+ #endif
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index 73e1c71c5092e4..92833eefc04b4e 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -1143,6 +1143,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ 	struct gmac_txdesc *txd;
+ 	skb_frag_t *skb_frag;
+ 	dma_addr_t mapping;
++	bool tcp = false;
+ 	void *buffer;
+ 	u16 mss;
+ 	int ret;
+@@ -1150,6 +1151,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ 	word1 = skb->len;
+ 	word3 = SOF_BIT;
+ 
++	/* Determine if we are doing TCP */
++	if (skb->protocol == htons(ETH_P_IP))
++		tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
++	else
++		/* IPv6 */
++		tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
++
+ 	mss = skb_shinfo(skb)->gso_size;
+ 	if (mss) {
+ 		/* This means we are dealing with TCP and skb->len is the
+@@ -1162,8 +1170,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ 			   mss, skb->len);
+ 		word1 |= TSS_MTU_ENABLE_BIT;
+ 		word3 |= mss;
++	} else if (tcp) {
++		/* Even if we are not using TSO, use the hardware offloader
++		 * for transferring the TCP frame: this hardware has partial
++		 * TCP awareness (called TOE - TCP Offload Engine) and will
++		 * according to the datasheet put packets belonging to the
++		 * same TCP connection in the same queue for the TOE/TSO
++		 * engine to process. The engine will deal with chopping
++		 * up frames that exceed ETH_DATA_LEN which the
++		 * checksumming engine cannot handle (see below) into
++		 * manageable chunks. It flawlessly deals with quite big
++		 * frames and frames containing custom DSA EtherTypes.
++		 */
++		mss = netdev->mtu + skb_tcp_all_headers(skb);
++		mss = min(mss, skb->len);
++		netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
++			   skb->len, netdev->mtu, mss);
++		word1 |= TSS_MTU_ENABLE_BIT;
++		word3 |= mss;
+ 	} else if (skb->len >= ETH_FRAME_LEN) {
+-		/* Hardware offloaded checksumming isn't working on frames
++		/* Hardware offloaded checksumming isn't working on non-TCP frames
+ 		 * bigger than 1514 bytes. A hypothesis about this is that the
+ 		 * checksum buffer is only 1518 bytes, so when the frames get
+ 		 * bigger they get truncated, or the last few bytes get
+@@ -1180,21 +1206,16 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ 	}
+ 
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+-		int tcp = 0;
+-
+ 		/* We do not switch off the checksumming on non TCP/UDP
+ 		 * frames: as is shown from tests, the checksumming engine
+ 		 * is smart enough to see that a frame is not actually TCP
+ 		 * or UDP and then just pass it through without any changes
+ 		 * to the frame.
+ 		 */
+-		if (skb->protocol == htons(ETH_P_IP)) {
++		if (skb->protocol == htons(ETH_P_IP))
+ 			word1 |= TSS_IP_CHKSUM_BIT;
+-			tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+-		} else { /* IPv6 */
++		else
+ 			word1 |= TSS_IPV6_ENABLE_BIT;
+-			tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
+-		}
+ 
+ 		word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
+ 	}
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index 6bf8a7aeef9081..787218d60c6b16 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -146,6 +146,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	np->ioaddr = ioaddr;
+ 	np->chip_id = chip_idx;
+ 	np->pdev = pdev;
++
++	spin_lock_init(&np->stats_lock);
+ 	spin_lock_init (&np->tx_lock);
+ 	spin_lock_init (&np->rx_lock);
+ 
+@@ -865,7 +867,6 @@ tx_error (struct net_device *dev, int tx_status)
+ 	frame_id = (tx_status & 0xffff0000);
+ 	printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
+ 		dev->name, tx_status, frame_id);
+-	dev->stats.tx_errors++;
+ 	/* Ttransmit Underrun */
+ 	if (tx_status & 0x10) {
+ 		dev->stats.tx_fifo_errors++;
+@@ -902,9 +903,15 @@ tx_error (struct net_device *dev, int tx_status)
+ 		rio_set_led_mode(dev);
+ 		/* Let TxStartThresh stay default value */
+ 	}
++
++	spin_lock(&np->stats_lock);
+ 	/* Maximum Collisions */
+ 	if (tx_status & 0x08)
+ 		dev->stats.collisions++;
++
++	dev->stats.tx_errors++;
++	spin_unlock(&np->stats_lock);
++
+ 	/* Restart the Tx */
+ 	dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
+ }
+@@ -1073,7 +1080,9 @@ get_stats (struct net_device *dev)
+ 	int i;
+ #endif
+ 	unsigned int stat_reg;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&np->stats_lock, flags);
+ 	/* All statistics registers need to be acknowledged,
+ 	   else statistic overflow could cause problems */
+ 
+@@ -1123,6 +1132,9 @@ get_stats (struct net_device *dev)
+ 	dr16(TCPCheckSumErrors);
+ 	dr16(UDPCheckSumErrors);
+ 	dr16(IPCheckSumErrors);
++
++	spin_unlock_irqrestore(&np->stats_lock, flags);
++
+ 	return &dev->stats;
+ }
+ 
+diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
+index 0e33e2eaae9606..56aff2f0bdbfa0 100644
+--- a/drivers/net/ethernet/dlink/dl2k.h
++++ b/drivers/net/ethernet/dlink/dl2k.h
+@@ -372,6 +372,8 @@ struct netdev_private {
+ 	struct pci_dev *pdev;
+ 	void __iomem *ioaddr;
+ 	void __iomem *eeprom_addr;
++	// To ensure synchronization when stats are updated.
++	spinlock_t stats_lock;
+ 	spinlock_t tx_lock;
+ 	spinlock_t rx_lock;
+ 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
+index 51b8377edd1d04..a89aa4ac0a064a 100644
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
+ 	/* version 1 of the cmd is not supported only by BE2 */
+ 	if (BE2_chip(adapter))
+ 		hdr->version = 0;
+-	if (BE3_chip(adapter) || lancer_chip(adapter))
++	else if (BE3_chip(adapter) || lancer_chip(adapter))
+ 		hdr->version = 1;
+ 	else
+ 		hdr->version = 2;
+diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
+index c699bd6bcbb938..474073c7f94d74 100644
+--- a/drivers/net/ethernet/faraday/Kconfig
++++ b/drivers/net/ethernet/faraday/Kconfig
+@@ -31,6 +31,7 @@ config FTGMAC100
+ 	depends on ARM || COMPILE_TEST
+ 	depends on !64BIT || BROKEN
+ 	select PHYLIB
++	select FIXED_PHY
+ 	select MDIO_ASPEED if MACH_ASPEED_G6
+ 	select CRC32
+ 	help
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 07e90334635824..5fe54e9b71e25a 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -3540,9 +3540,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
+ 	case e1000_pch_cnp:
+ 	case e1000_pch_tgp:
+ 	case e1000_pch_adp:
+-	case e1000_pch_mtp:
+-	case e1000_pch_lnp:
+-	case e1000_pch_ptp:
+ 	case e1000_pch_nvp:
+ 		if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+ 			/* Stable 24MHz frequency */
+@@ -3558,6 +3555,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
+ 			adapter->cc.shift = shift;
+ 		}
+ 		break;
++	case e1000_pch_mtp:
++	case e1000_pch_lnp:
++	case e1000_pch_ptp:
++		/* System firmware can misreport this value, so set it to a
++		 * stable 38400KHz frequency.
++		 */
++		incperiod = INCPERIOD_38400KHZ;
++		incvalue = INCVALUE_38400KHZ;
++		shift = INCVALUE_SHIFT_38400KHZ;
++		adapter->cc.shift = shift;
++		break;
+ 	case e1000_82574:
+ 	case e1000_82583:
+ 		/* Stable 25MHz frequency */
+diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
+index 89d57dd911dc89..ea3c3eb2ef2020 100644
+--- a/drivers/net/ethernet/intel/e1000e/ptp.c
++++ b/drivers/net/ethernet/intel/e1000e/ptp.c
+@@ -295,15 +295,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
+ 	case e1000_pch_cnp:
+ 	case e1000_pch_tgp:
+ 	case e1000_pch_adp:
+-	case e1000_pch_mtp:
+-	case e1000_pch_lnp:
+-	case e1000_pch_ptp:
+ 	case e1000_pch_nvp:
+ 		if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ 			adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ 		else
+ 			adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ 		break;
++	case e1000_pch_mtp:
++	case e1000_pch_lnp:
++	case e1000_pch_ptp:
++		adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
++		break;
+ 	case e1000_82574:
+ 	case e1000_82583:
+ 		adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index e8031f1a9b4fc6..2f5a850148676f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -817,10 +817,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
+ void i40e_clear_hw(struct i40e_hw *hw)
+ {
+ 	u32 num_queues, base_queue;
+-	u32 num_pf_int;
+-	u32 num_vf_int;
++	s32 num_pf_int;
++	s32 num_vf_int;
+ 	u32 num_vfs;
+-	u32 i, j;
++	s32 i;
++	u32 j;
+ 	u32 val;
+ 	u32 eol = 0x7ff;
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
+index 405ddd17de1bff..0bb4fb56fbe611 100644
+--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
++++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
+@@ -377,6 +377,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
+ 	return false;
+ }
+ 
++/**
++ * ice_arfs_cmp - Check if aRFS filter matches this flow.
++ * @fltr_info: filter info of the saved ARFS entry.
++ * @fk: flow dissector keys.
++ * @n_proto:  One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
++ * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
++ *
++ * Since this function assumes limited values for n_proto and ip_proto, it
++ * is meant to be called only from ice_rx_flow_steer().
++ *
++ * Return:
++ * * true	- fltr_info refers to the same flow as fk.
++ * * false	- fltr_info and fk refer to different flows.
++ */
++static bool
++ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
++	     __be16 n_proto, u8 ip_proto)
++{
++	/* Determine if the filter is for IPv4 or IPv6 based on flow_type,
++	 * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
++	 */
++	bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
++		     fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
++
++	/* Following checks are arranged in the quickest and most discriminative
++	 * fields first for early failure.
++	 */
++	if (is_v4)
++		return n_proto == htons(ETH_P_IP) &&
++			fltr_info->ip.v4.src_port == fk->ports.src &&
++			fltr_info->ip.v4.dst_port == fk->ports.dst &&
++			fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
++			fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
++			fltr_info->ip.v4.proto == ip_proto;
++
++	return fltr_info->ip.v6.src_port == fk->ports.src &&
++		fltr_info->ip.v6.dst_port == fk->ports.dst &&
++		fltr_info->ip.v6.proto == ip_proto &&
++		!memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
++			sizeof(struct in6_addr)) &&
++		!memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
++			sizeof(struct in6_addr));
++}
++
+ /**
+  * ice_rx_flow_steer - steer the Rx flow to where application is being run
+  * @netdev: ptr to the netdev being adjusted
+@@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
+ 			continue;
+ 
+ 		fltr_info = &arfs_entry->fltr_info;
++
++		if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
++			continue;
++
+ 		ret = fltr_info->fltr_id;
+ 
+ 		if (fltr_info->q_index == rxq_idx ||
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index ed21d7f55ac11b..5b9a7ee278f17b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -502,10 +502,14 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id)
+  */
+ int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
+ {
+-	struct ice_repr *repr = ice_repr_create_vf(vf);
+ 	struct devlink *devlink = priv_to_devlink(pf);
++	struct ice_repr *repr;
+ 	int err;
+ 
++	if (!ice_is_eswitch_mode_switchdev(pf))
++		return 0;
++
++	repr = ice_repr_create_vf(vf);
+ 	if (IS_ERR(repr))
+ 		return PTR_ERR(repr);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 0e740342e2947e..c5430363e70810 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -3146,7 +3146,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
+ 		u16 vsi_handle_arr[2];
+ 
+ 		/* A rule already exists with the new VSI being added */
+-		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
++		if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
+ 			return -EEXIST;
+ 
+ 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
+@@ -5977,7 +5977,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
+ 
+ 		/* A rule already exists with the new VSI being added */
+ 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+-			return 0;
++			return -EEXIST;
+ 
+ 		/* Update the previously created VSI list set with
+ 		 * the new VSI ID passed in
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+index 07eaa3c3f4d369..530e4319a2e89d 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+@@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ 					 u16 reg, u16 val, bool lock)
+ {
+ 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+-	int max_retry = 1;
++	int max_retry = 3;
+ 	int retry = 0;
+ 	u8 reg_high;
+ 	u8 csum;
+@@ -2284,7 +2284,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ 					    u8 dev_addr, u8 data, bool lock)
+ {
+ 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+-	u32 max_retry = 1;
++	u32 max_retry = 3;
+ 	u32 retry = 0;
+ 	int status;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+index 7417087b6db597..a2807a1e4f4a62 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+@@ -352,9 +352,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
+ 	mutex_lock(&pfvf->mbox.lock);
+ 
+ 	/* Remove RQ's policer mapping */
+-	for (qidx = 0; qidx < hw->rx_queues; qidx++)
+-		cn10k_map_unmap_rq_policer(pfvf, qidx,
+-					   hw->matchall_ipolicer, false);
++	for (qidx = 0; qidx < hw->rx_queues; qidx++) {
++		rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false);
++		if (rc)
++			dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).",
++				 qidx, rc);
++	}
+ 
+ 	rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index cd17a3f4faf83e..a68cd3f0304c64 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -1897,6 +1897,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
+ 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
+ 		info->so_timestamping |=
+ 			SOF_TIMESTAMPING_TX_HARDWARE |
++			SOF_TIMESTAMPING_TX_SOFTWARE |
+ 			SOF_TIMESTAMPING_RX_HARDWARE |
+ 			SOF_TIMESTAMPING_RAW_HARDWARE;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
+index 72b19b05c0cf4f..fc9ba534d5d975 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
+@@ -508,7 +508,7 @@ static int
+ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ 		       u32 *match_param)
+ {
+-	bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
++	bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
+ 	struct mlx5hws_definer_fc *fc = cd->fc;
+ 	struct mlx5hws_definer_fc *curr_fc;
+ 	u32 *s_ipv6, *d_ipv6;
+@@ -520,6 +520,20 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ 		return -EINVAL;
+ 	}
+ 
++	ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
++					outer_headers.src_ipv4_src_ipv6,
++					0x80) ||
++		      HWS_IS_FLD_SET_SZ(match_param,
++					outer_headers.dst_ipv4_dst_ipv6, 0x80);
++	ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) ||
++		     HWS_IS_FLD_SET(match_param, outer_headers.ethertype);
++
++	if (ip_addr_set && !ip_ver_set) {
++		mlx5hws_err(cd->ctx,
++			    "Unsupported match on IP address without version or ethertype\n");
++		return -EINVAL;
++	}
++
+ 	/* L2 Check ethertype */
+ 	HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
+ 		    outer_headers.ethertype,
+@@ -572,10 +586,16 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ 			      outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+ 
+ 	/* Assume IPv6 is used if ipv6 bits are set */
+-	is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+-	is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
++	is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
++		  d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ 
+-	if (is_s_ipv6) {
++	/* IHL is an IPv4-specific field. */
++	if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) {
++		mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
++		return -EINVAL;
++	}
++
++	if (is_ipv6) {
+ 		/* Handle IPv6 source address */
+ 		HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
+ 			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+@@ -589,13 +609,6 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ 		HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
+ 			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ 			    ipv6_src_outer.ipv6_address_31_0);
+-	} else {
+-		/* Handle IPv4 source address */
+-		HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+-			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+-			    ipv4_src_dest_outer.source_address);
+-	}
+-	if (is_d_ipv6) {
+ 		/* Handle IPv6 destination address */
+ 		HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
+ 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+@@ -610,6 +623,10 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ 			    ipv6_dst_outer.ipv6_address_31_0);
+ 	} else {
++		/* Handle IPv4 source address */
++		HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
++			    outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
++			    ipv4_src_dest_outer.source_address);
+ 		/* Handle IPv4 destination address */
+ 		HWS_SET_HDR(fc, match_param, IPV4_DST_O,
+ 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+@@ -667,7 +684,7 @@ static int
+ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ 		       u32 *match_param)
+ {
+-	bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
++	bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
+ 	struct mlx5hws_definer_fc *fc = cd->fc;
+ 	struct mlx5hws_definer_fc *curr_fc;
+ 	u32 *s_ipv6, *d_ipv6;
+@@ -679,6 +696,20 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ 		return -EINVAL;
+ 	}
+ 
++	ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
++					inner_headers.src_ipv4_src_ipv6,
++					0x80) ||
++		      HWS_IS_FLD_SET_SZ(match_param,
++					inner_headers.dst_ipv4_dst_ipv6, 0x80);
++	ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) ||
++		     HWS_IS_FLD_SET(match_param, inner_headers.ethertype);
++
++	if (ip_addr_set && !ip_ver_set) {
++		mlx5hws_err(cd->ctx,
++			    "Unsupported match on IP address without version or ethertype\n");
++		return -EINVAL;
++	}
++
+ 	/* L2 Check ethertype */
+ 	HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
+ 		    inner_headers.ethertype,
+@@ -730,10 +761,16 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ 			      inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+ 
+ 	/* Assume IPv6 is used if ipv6 bits are set */
+-	is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+-	is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
++	is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
++		  d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ 
+-	if (is_s_ipv6) {
++	/* IHL is an IPv4-specific field. */
++	if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) {
++		mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
++		return -EINVAL;
++	}
++
++	if (is_ipv6) {
+ 		/* Handle IPv6 source address */
+ 		HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
+ 			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+@@ -747,13 +784,6 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ 		HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
+ 			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ 			    ipv6_src_inner.ipv6_address_31_0);
+-	} else {
+-		/* Handle IPv4 source address */
+-		HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+-			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+-			    ipv4_src_dest_inner.source_address);
+-	}
+-	if (is_d_ipv6) {
+ 		/* Handle IPv6 destination address */
+ 		HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
+ 			    inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+@@ -768,6 +798,10 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ 			    inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ 			    ipv6_dst_inner.ipv6_address_31_0);
+ 	} else {
++		/* Handle IPv4 source address */
++		HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
++			    inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
++			    ipv4_src_dest_inner.source_address);
+ 		/* Handle IPv4 destination address */
+ 		HWS_SET_HDR(fc, match_param, IPV4_DST_I,
+ 			    inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index 0d5f750faa4555..b04024d0ae676c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -465,19 +465,22 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
+ {
+ 	u32 *out;
+ 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
++	int err;
+ 
+ 	out = kvzalloc(outlen, GFP_KERNEL);
+ 	if (!out)
+ 		return -ENOMEM;
+ 
+-	mlx5_query_nic_vport_context(mdev, 0, out);
++	err = mlx5_query_nic_vport_context(mdev, 0, out);
++	if (err)
++		goto out;
+ 
+ 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
+ 				nic_vport_context.node_guid);
+-
++out:
+ 	kvfree(out);
+ 
+-	return 0;
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+ 
+@@ -519,19 +522,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
+ {
+ 	u32 *out;
+ 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
++	int err;
+ 
+ 	out = kvzalloc(outlen, GFP_KERNEL);
+ 	if (!out)
+ 		return -ENOMEM;
+ 
+-	mlx5_query_nic_vport_context(mdev, 0, out);
++	err = mlx5_query_nic_vport_context(mdev, 0, out);
++	if (err)
++		goto out;
+ 
+ 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
+ 				   nic_vport_context.qkey_violation_counter);
+-
++out:
+ 	kvfree(out);
+ 
+-	return 0;
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+index 385a56ac73481a..c82254a8ae6611 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+@@ -447,8 +447,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ 	priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
+ 
+ 	phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
+-	if (phy_irq < 0) {
+-		dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
++	if (phy_irq == -EPROBE_DEFER) {
++		err = -EPROBE_DEFER;
++		goto out;
++	} else if (phy_irq < 0) {
+ 		phy_irq = PHY_POLL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+index 7775418316df53..d6cf97ecf32766 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+@@ -127,11 +127,8 @@ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
+ 		return -EBUSY;
+ 
+ 	addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
+-	if (dma_mapping_error(fbd->dev, addr)) {
+-		free_page((unsigned long)msg);
+-
++	if (dma_mapping_error(fbd->dev, addr))
+ 		return -ENOSPC;
+-	}
+ 
+ 	mbx->buf_info[tail].msg = msg;
+ 	mbx->buf_info[tail].addr = addr;
+diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+index 1a1cbd034eda0a..2acd9c3531deaf 100644
+--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
++++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+@@ -18,6 +18,8 @@
+ #define EEPROM_MAC_OFFSET		    (0x01)
+ #define MAX_EEPROM_SIZE			    (512)
+ #define MAX_OTP_SIZE			    (1024)
++#define MAX_HS_OTP_SIZE			    (8 * 1024)
++#define MAX_HS_EEPROM_SIZE		    (64 * 1024)
+ #define OTP_INDICATOR_1			    (0xF3)
+ #define OTP_INDICATOR_2			    (0xF7)
+ 
+@@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
+ 	int ret;
+ 	int i;
+ 
++	if (offset + length > MAX_HS_OTP_SIZE)
++		return -EINVAL;
++
+ 	ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ 	if (ret < 0)
+ 		return ret;
+@@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ 	int ret;
+ 	int i;
+ 
++	if (offset + length > MAX_HS_OTP_SIZE)
++		return -EINVAL;
++
+ 	ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ 	if (ret < 0)
+ 		return ret;
+@@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
+ 	u32 val;
+ 	int i;
+ 
++	if (offset + length > MAX_HS_EEPROM_SIZE)
++		return -EINVAL;
++
+ 	retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ 	if (retval < 0)
+ 		return retval;
+@@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
+ 	u32 val;
+ 	int i;
+ 
++	if (offset + length > MAX_HS_EEPROM_SIZE)
++		return -EINVAL;
++
+ 	retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ 	if (retval < 0)
+ 		return retval;
+@@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
+ 	struct lan743x_adapter *adapter = netdev_priv(netdev);
+ 
+ 	if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
+-		return MAX_OTP_SIZE;
++		return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE;
+ 
+-	return MAX_EEPROM_SIZE;
++	return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE;
+ }
+ 
+ static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
+index 0d29914cd46063..225e8232474d73 100644
+--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
++++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
+@@ -18,9 +18,9 @@
+  */
+ #define LAN743X_PTP_N_EVENT_CHAN	2
+ #define LAN743X_PTP_N_PEROUT		LAN743X_PTP_N_EVENT_CHAN
+-#define LAN743X_PTP_N_EXTTS		4
+-#define LAN743X_PTP_N_PPS		0
+ #define PCI11X1X_PTP_IO_MAX_CHANNELS	8
++#define LAN743X_PTP_N_EXTTS		PCI11X1X_PTP_IO_MAX_CHANNELS
++#define LAN743X_PTP_N_PPS		0
+ #define PTP_CMD_CTL_TIMEOUT_CNT		50
+ 
+ struct lan743x_adapter;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+index 0f817c3f92d820..533df5993048fe 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+@@ -515,9 +515,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ 	unsigned long start_time;
+ 	unsigned long max_wait;
+ 	unsigned long duration;
+-	int done = 0;
+ 	bool fw_up;
+ 	int opcode;
++	bool done;
+ 	int err;
+ 
+ 	/* Wait for dev cmd to complete, retrying if we get EAGAIN,
+@@ -525,6 +525,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ 	 */
+ 	max_wait = jiffies + (max_seconds * HZ);
+ try_again:
++	done = false;
+ 	opcode = idev->opcode;
+ 	start_time = jiffies;
+ 	for (fw_up = ionic_is_fw_running(idev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index f68e3ece919cc8..0250c5cb28ff21 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4424,8 +4424,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (priv->sarc_type)
+ 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+ 
+-	skb_tx_timestamp(skb);
+-
+ 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ 		     priv->hwts_tx_en)) {
+ 		/* declare that device is doing timestamping */
+@@ -4460,6 +4458,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	}
+ 
+ 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
++	skb_tx_timestamp(skb);
+ 
+ 	stmmac_flush_tx_descriptors(priv, queue);
+ 	stmmac_tx_timer_arm(priv, queue);
+@@ -4703,8 +4702,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (priv->sarc_type)
+ 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+ 
+-	skb_tx_timestamp(skb);
+-
+ 	/* Ready to fill the first descriptor and set the OWN bit w/o any
+ 	 * problems because all the descriptors are actually ready to be
+ 	 * passed to the DMA engine.
+@@ -4751,7 +4748,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ 
+ 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
+-
++	skb_tx_timestamp(skb);
+ 	stmmac_flush_tx_descriptors(priv, queue);
+ 	stmmac_tx_timer_arm(priv, queue);
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 61788a43cb8618..393cc5192e90d1 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2693,7 +2693,9 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
+ 			goto of_node_put;
+ 
+ 		ret = of_get_mac_address(port_np, port->slave.mac_addr);
+-		if (ret) {
++		if (ret == -EPROBE_DEFER) {
++			goto of_node_put;
++		} else if (ret) {
+ 			am65_cpsw_am654_get_efuse_macid(port_np,
+ 							port->port_id,
+ 							port->slave.mac_addr);
+@@ -3586,6 +3588,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	am65_cpsw_nuss_get_ver(common);
++
++	ret = am65_cpsw_nuss_init_host_p(common);
++	if (ret)
++		goto err_pm_clear;
++
++	ret = am65_cpsw_nuss_init_slave_ports(common);
++	if (ret)
++		goto err_pm_clear;
++
+ 	node = of_get_child_by_name(dev->of_node, "mdio");
+ 	if (!node) {
+ 		dev_warn(dev, "MDIO node not found\n");
+@@ -3602,16 +3614,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+ 	}
+ 	of_node_put(node);
+ 
+-	am65_cpsw_nuss_get_ver(common);
+-
+-	ret = am65_cpsw_nuss_init_host_p(common);
+-	if (ret)
+-		goto err_of_clear;
+-
+-	ret = am65_cpsw_nuss_init_slave_ports(common);
+-	if (ret)
+-		goto err_of_clear;
+-
+ 	/* init common data */
+ 	ale_params.dev = dev;
+ 	ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
+diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
+index e4d993f3137407..545177e84c0eba 100644
+--- a/drivers/net/ethernet/vertexcom/mse102x.c
++++ b/drivers/net/ethernet/vertexcom/mse102x.c
+@@ -306,7 +306,7 @@ static void mse102x_dump_packet(const char *msg, int len, const char *data)
+ 		       data, len, true);
+ }
+ 
+-static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
++static irqreturn_t mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ {
+ 	struct sk_buff *skb;
+ 	unsigned int rxalign;
+@@ -327,7 +327,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ 		mse102x_tx_cmd_spi(mse, CMD_CTR);
+ 		ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ 		if (ret)
+-			return;
++			return IRQ_NONE;
+ 
+ 		cmd_resp = be16_to_cpu(rx);
+ 		if ((cmd_resp & CMD_MASK) != CMD_RTS) {
+@@ -360,7 +360,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ 	rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
+ 	skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
+ 	if (!skb)
+-		return;
++		return IRQ_NONE;
+ 
+ 	/* 2 bytes Start of frame (before ethernet header)
+ 	 * 2 bytes Data frame tail (after ethernet frame)
+@@ -370,7 +370,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ 	if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
+ 		mse->ndev->stats.rx_errors++;
+ 		dev_kfree_skb(skb);
+-		return;
++		return IRQ_HANDLED;
+ 	}
+ 
+ 	if (netif_msg_pktdata(mse))
+@@ -381,6 +381,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ 
+ 	mse->ndev->stats.rx_packets++;
+ 	mse->ndev->stats.rx_bytes += rxlen;
++
++	return IRQ_HANDLED;
+ }
+ 
+ static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
+@@ -512,12 +514,13 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
+ {
+ 	struct mse102x_net *mse = _mse;
+ 	struct mse102x_net_spi *mses = to_mse102x_spi(mse);
++	irqreturn_t ret;
+ 
+ 	mutex_lock(&mses->lock);
+-	mse102x_rx_pkt_spi(mse);
++	ret = mse102x_rx_pkt_spi(mse);
+ 	mutex_unlock(&mses->lock);
+ 
+-	return IRQ_HANDLED;
++	return ret;
+ }
+ 
+ static int mse102x_net_open(struct net_device *ndev)
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index 79b898311819d4..ee2a7b2f6268de 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -25,6 +25,7 @@
+ #include <net/pkt_cls.h>
+ #include <net/rtnetlink.h>
+ #include <net/udp_tunnel.h>
++#include <net/busy_poll.h>
+ 
+ #include "netdevsim.h"
+ 
+@@ -341,6 +342,7 @@ static int nsim_rcv(struct nsim_rq *rq, int budget)
+ 			break;
+ 
+ 		skb = skb_dequeue(&rq->skb_queue);
++		skb_mark_napi_id(skb, &rq->napi);
+ 		netif_receive_skb(skb);
+ 	}
+ 
+diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
+index 74162190bccc10..8531b804021aa4 100644
+--- a/drivers/net/usb/asix.h
++++ b/drivers/net/usb/asix.h
+@@ -224,7 +224,6 @@ int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm);
+ 
+ u16 asix_read_medium_status(struct usbnet *dev, int in_pm);
+ int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm);
+-void asix_adjust_link(struct net_device *netdev);
+ 
+ int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm);
+ 
+diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
+index 72ffc89b477ad8..7fd763917ae2cf 100644
+--- a/drivers/net/usb/asix_common.c
++++ b/drivers/net/usb/asix_common.c
+@@ -414,28 +414,6 @@ int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
+ 	return ret;
+ }
+ 
+-/* set MAC link settings according to information from phylib */
+-void asix_adjust_link(struct net_device *netdev)
+-{
+-	struct phy_device *phydev = netdev->phydev;
+-	struct usbnet *dev = netdev_priv(netdev);
+-	u16 mode = 0;
+-
+-	if (phydev->link) {
+-		mode = AX88772_MEDIUM_DEFAULT;
+-
+-		if (phydev->duplex == DUPLEX_HALF)
+-			mode &= ~AX_MEDIUM_FD;
+-
+-		if (phydev->speed != SPEED_100)
+-			mode &= ~AX_MEDIUM_PS;
+-	}
+-
+-	asix_write_medium_mode(dev, mode, 0);
+-	phy_print_status(phydev);
+-	usbnet_link_change(dev, phydev->link, 0);
+-}
+-
+ int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
+ {
+ 	int ret;
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index da24941a6e4446..9b0318fb50b55c 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -752,7 +752,6 @@ static void ax88772_mac_link_down(struct phylink_config *config,
+ 	struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
+ 
+ 	asix_write_medium_mode(dev, 0, 0);
+-	usbnet_link_change(dev, false, false);
+ }
+ 
+ static void ax88772_mac_link_up(struct phylink_config *config,
+@@ -783,7 +782,6 @@ static void ax88772_mac_link_up(struct phylink_config *config,
+ 		m |= AX_MEDIUM_RFC;
+ 
+ 	asix_write_medium_mode(dev, m, 0);
+-	usbnet_link_change(dev, true, false);
+ }
+ 
+ static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
+@@ -1350,10 +1348,9 @@ static const struct driver_info ax88772_info = {
+ 	.description = "ASIX AX88772 USB 2.0 Ethernet",
+ 	.bind = ax88772_bind,
+ 	.unbind = ax88772_unbind,
+-	.status = asix_status,
+ 	.reset = ax88772_reset,
+ 	.stop = ax88772_stop,
+-	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
++	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
+ 	.rx_fixup = asix_rx_fixup_common,
+ 	.tx_fixup = asix_tx_fixup,
+ };
+@@ -1362,11 +1359,9 @@ static const struct driver_info ax88772b_info = {
+ 	.description = "ASIX AX88772B USB 2.0 Ethernet",
+ 	.bind = ax88772_bind,
+ 	.unbind = ax88772_unbind,
+-	.status = asix_status,
+ 	.reset = ax88772_reset,
+ 	.stop = ax88772_stop,
+-	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+-	         FLAG_MULTI_PACKET,
++	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
+ 	.rx_fixup = asix_rx_fixup_common,
+ 	.tx_fixup = asix_tx_fixup,
+ 	.data = FLAG_EEPROM_MAC,
+@@ -1376,11 +1371,9 @@ static const struct driver_info lxausb_t1l_info = {
+ 	.description = "Linux Automation GmbH USB 10Base-T1L",
+ 	.bind = ax88772_bind,
+ 	.unbind = ax88772_unbind,
+-	.status = asix_status,
+ 	.reset = ax88772_reset,
+ 	.stop = ax88772_stop,
+-	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+-		 FLAG_MULTI_PACKET,
++	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
+ 	.rx_fixup = asix_rx_fixup_common,
+ 	.tx_fixup = asix_tx_fixup,
+ 	.data = FLAG_EEPROM_MAC,
+@@ -1412,10 +1405,8 @@ static const struct driver_info hg20f9_info = {
+ 	.description = "HG20F9 USB 2.0 Ethernet",
+ 	.bind = ax88772_bind,
+ 	.unbind = ax88772_unbind,
+-	.status = asix_status,
+ 	.reset = ax88772_reset,
+-	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+-	         FLAG_MULTI_PACKET,
++	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
+ 	.rx_fixup = asix_rx_fixup_common,
+ 	.tx_fixup = asix_tx_fixup,
+ 	.data = FLAG_EEPROM_MAC,
+diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
+index f69d9b902da04a..a206ffa76f1b93 100644
+--- a/drivers/net/usb/ch9200.c
++++ b/drivers/net/usb/ch9200.c
+@@ -178,6 +178,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ {
+ 	struct usbnet *dev = netdev_priv(netdev);
+ 	unsigned char buff[2];
++	int ret;
+ 
+ 	netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
+ 		   __func__, phy_id, loc);
+@@ -185,8 +186,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ 	if (phy_id != 0)
+ 		return -ENODEV;
+ 
+-	control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
+-		     CONTROL_TIMEOUT_MS);
++	ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
++			   CONTROL_TIMEOUT_MS);
++	if (ret < 0)
++		return ret;
+ 
+ 	return (buff[0] | buff[1] << 8);
+ }
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 474faccf75fd93..1a707709380017 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -605,10 +605,10 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
+ 	if (rd == NULL)
+ 		return -ENOMEM;
+ 
+-	if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
+-		kfree(rd);
+-		return -ENOMEM;
+-	}
++	/* The driver can work correctly without a dst cache, so do not treat
++	 * dst cache initialization errors as fatal.
++	 */
++	dst_cache_init(&rd->dst_cache, GFP_ATOMIC | __GFP_NOWARN);
+ 
+ 	rd->remote_ip = *ip;
+ 	rd->remote_port = port;
+diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
+index e66e86bdec20ff..9d8efec46508a1 100644
+--- a/drivers/net/wireless/ath/ath11k/ce.c
++++ b/drivers/net/wireless/ath/ath11k/ce.c
+@@ -393,11 +393,10 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
+ 		goto err;
+ 	}
+ 
++	/* Make sure descriptor is read after the head pointer. */
++	dma_rmb();
++
+ 	*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
+-	if (*nbytes == 0) {
+-		ret = -EIO;
+-		goto err;
+-	}
+ 
+ 	*skb = pipe->dest_ring->skb[sw_index];
+ 	pipe->dest_ring->skb[sw_index] = NULL;
+@@ -430,8 +429,8 @@ static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
+ 		dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ 				 max_nbytes, DMA_FROM_DEVICE);
+ 
+-		if (unlikely(max_nbytes < nbytes)) {
+-			ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
++		if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
++			ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
+ 				    nbytes, max_nbytes);
+ 			dev_kfree_skb_any(skb);
+ 			continue;
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index 8002fb32a2cc10..2ec1771262fd97 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -811,6 +811,52 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
+ 	},
+ };
+ 
++static const struct dmi_system_id ath11k_pm_quirk_table[] = {
++	{
++		.driver_data = (void *)ATH11K_PM_WOW,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
++		},
++	},
++	{
++		.driver_data = (void *)ATH11K_PM_WOW,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
++		},
++	},
++	{
++		.driver_data = (void *)ATH11K_PM_WOW,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
++		},
++	},
++	{
++		.driver_data = (void *)ATH11K_PM_WOW,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
++		},
++	},
++	{
++		.driver_data = (void *)ATH11K_PM_WOW,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
++		},
++	},
++	{
++		.driver_data = (void *)ATH11K_PM_WOW,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
++		},
++	},
++	{}
++};
++
+ static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
+ {
+ 	WARN_ON(!ab->hw_params.single_pdev_only);
+@@ -2197,8 +2243,17 @@ EXPORT_SYMBOL(ath11k_core_pre_init);
+ 
+ int ath11k_core_init(struct ath11k_base *ab)
+ {
++	const struct dmi_system_id *dmi_id;
+ 	int ret;
+ 
++	dmi_id = dmi_first_match(ath11k_pm_quirk_table);
++	if (dmi_id)
++		ab->pm_policy = (kernel_ulong_t)dmi_id->driver_data;
++	else
++		ab->pm_policy = ATH11K_PM_DEFAULT;
++
++	ath11k_dbg(ab, ATH11K_DBG_BOOT, "pm policy %u\n", ab->pm_policy);
++
+ 	ret = ath11k_core_soc_create(ab);
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to create soc core: %d\n", ret);
+diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
+index fcdec14eb3cfa9..09fdb7be0e1971 100644
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -891,6 +891,11 @@ struct ath11k_msi_config {
+ 	u16 hw_rev;
+ };
+ 
++enum ath11k_pm_policy {
++	ATH11K_PM_DEFAULT,
++	ATH11K_PM_WOW,
++};
++
+ /* Master structure to hold the hw data which may be used in core module */
+ struct ath11k_base {
+ 	enum ath11k_hw_rev hw_rev;
+@@ -1053,6 +1058,8 @@ struct ath11k_base {
+ 	} testmode;
+ #endif
+ 
++	enum ath11k_pm_policy pm_policy;
++
+ 	/* must be last */
+ 	u8 drv_priv[] __aligned(sizeof(void *));
+ };
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index bfb8e7b1a300c6..007d8695904235 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -2637,7 +2637,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ 	struct ath11k *ar;
+ 	struct hal_reo_dest_ring *desc;
+ 	enum hal_reo_dest_ring_push_reason push_reason;
+-	u32 cookie;
++	u32 cookie, info0, rx_msdu_info0, rx_mpdu_info0;
+ 	int i;
+ 
+ 	for (i = 0; i < MAX_RADIOS; i++)
+@@ -2650,11 +2650,14 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ try_again:
+ 	ath11k_hal_srng_access_begin(ab, srng);
+ 
++	/* Make sure descriptor is read after the head pointer. */
++	dma_rmb();
++
+ 	while (likely(desc =
+ 	      (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
+ 									     srng))) {
+ 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+-				   desc->buf_addr_info.info1);
++				   READ_ONCE(desc->buf_addr_info.info1));
+ 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ 				   cookie);
+ 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
+@@ -2683,8 +2686,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ 
+ 		num_buffs_reaped[mac_id]++;
+ 
++		info0 = READ_ONCE(desc->info0);
+ 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+-					desc->info0);
++					info0);
+ 		if (unlikely(push_reason !=
+ 			     HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
+ 			dev_kfree_skb_any(msdu);
+@@ -2692,18 +2696,21 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ 			continue;
+ 		}
+ 
+-		rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
++		rx_msdu_info0 = READ_ONCE(desc->rx_msdu_info.info0);
++		rx_mpdu_info0 = READ_ONCE(desc->rx_mpdu_info.info0);
++
++		rxcb->is_first_msdu = !!(rx_msdu_info0 &
+ 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+-		rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
++		rxcb->is_last_msdu = !!(rx_msdu_info0 &
+ 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+-		rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
++		rxcb->is_continuation = !!(rx_msdu_info0 &
+ 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ 		rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
+-					  desc->rx_mpdu_info.meta_data);
++					  READ_ONCE(desc->rx_mpdu_info.meta_data));
+ 		rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
+-					 desc->rx_mpdu_info.info0);
++					 rx_mpdu_info0);
+ 		rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
+-				      desc->info0);
++				      info0);
+ 
+ 		rxcb->mac_id = mac_id;
+ 		__skb_queue_tail(&msdu_list[mac_id], msdu);
+diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
+index f02599bd1c36b1..c445bf5cd83211 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.c
++++ b/drivers/net/wireless/ath/ath11k/hal.c
+@@ -599,7 +599,7 @@ u32 ath11k_hal_ce_dst_status_get_length(void *buf)
+ 	struct hal_ce_srng_dst_status_desc *desc = buf;
+ 	u32 len;
+ 
+-	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
++	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, READ_ONCE(desc->flags));
+ 	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
+ 
+ 	return len;
+@@ -829,7 +829,7 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
+ 		srng->u.src_ring.cached_tp =
+ 			*(volatile u32 *)srng->u.src_ring.tp_addr;
+ 	} else {
+-		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
++		srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
+ 
+ 		/* Try to prefetch the next descriptor in the ring */
+ 		if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 7a22483b35cd98..a5555c959dec96 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -1989,6 +1989,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
+ 			    chunk->prev_size == chunk->size)
+ 				continue;
+ 
++			if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
++				ath11k_dbg(ab, ATH11K_DBG_QMI,
++					   "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n",
++					    chunk->size, chunk->type,
++					    chunk->prev_size, chunk->prev_type);
++				ab->qmi.target_mem_delayed = true;
++				return 0;
++			}
++
+ 			/* cannot reuse the existing chunk */
+ 			dma_free_coherent(ab->dev, chunk->prev_size,
+ 					  chunk->vaddr, chunk->paddr);
+diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
+index be0d669d31fcce..740586fe49d1f9 100644
+--- a/drivers/net/wireless/ath/ath12k/ce.c
++++ b/drivers/net/wireless/ath/ath12k/ce.c
+@@ -343,11 +343,10 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
+ 		goto err;
+ 	}
+ 
++	/* Make sure descriptor is read after the head pointer. */
++	dma_rmb();
++
+ 	*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
+-	if (*nbytes == 0) {
+-		ret = -EIO;
+-		goto err;
+-	}
+ 
+ 	*skb = pipe->dest_ring->skb[sw_index];
+ 	pipe->dest_ring->skb[sw_index] = NULL;
+@@ -380,8 +379,8 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
+ 		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ 				 max_nbytes, DMA_FROM_DEVICE);
+ 
+-		if (unlikely(max_nbytes < nbytes)) {
+-			ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
++		if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
++			ath12k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
+ 				    nbytes, max_nbytes);
+ 			dev_kfree_skb_any(skb);
+ 			continue;
+diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
+index 857bc5f9e946a9..f9547a3945e44b 100644
+--- a/drivers/net/wireless/ath/ath12k/ce.h
++++ b/drivers/net/wireless/ath/ath12k/ce.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #ifndef ATH12K_CE_H
+@@ -39,8 +39,8 @@
+ #define PIPEDIR_INOUT_H2H	4 /* bidirectional, host to host */
+ 
+ /* CE address/mask */
+-#define CE_HOST_IE_ADDRESS	0x00A1803C
+-#define CE_HOST_IE_2_ADDRESS	0x00A18040
++#define CE_HOST_IE_ADDRESS	0x75804C
++#define CE_HOST_IE_2_ADDRESS	0x758050
+ #define CE_HOST_IE_3_ADDRESS	CE_HOST_IE_ADDRESS
+ 
+ #define CE_HOST_IE_3_SHIFT	0xC
+diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
+index 6a88745369447f..7bfd323cdf244b 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
++++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
+@@ -1080,6 +1080,8 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
+ 	bool is_mcbc = rxcb->is_mcbc;
+ 	bool is_eapol_tkip = rxcb->is_eapol;
+ 
++	status->link_valid = 0;
++
+ 	if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ 	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
+ 		he = skb_push(msdu, sizeof(known));
+diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
+index bfa404997710e9..3afb11c7bf18eb 100644
+--- a/drivers/net/wireless/ath/ath12k/hal.c
++++ b/drivers/net/wireless/ath/ath12k/hal.c
+@@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+ 
+ static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+ {
+-	return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+-	       RX_MPDU_START_INFO6_MCAST_BCAST;
++	return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
++	       RX_MSDU_END_INFO5_DA_IS_MCBC;
+ }
+ 
+ static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+@@ -902,8 +902,8 @@ static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc
+ 
+ static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+ {
+-	return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
+-	       RX_MPDU_START_INFO6_MCAST_BCAST;
++	return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) &
++	       RX_MSDU_END_INFO5_DA_IS_MCBC;
+ }
+ 
+ static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+@@ -1943,7 +1943,7 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
+ {
+ 	u32 len;
+ 
+-	len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
++	len = le32_get_bits(READ_ONCE(desc->flags), HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+ 	desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+ 
+ 	return len;
+@@ -2113,7 +2113,7 @@ void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
+ 		srng->u.src_ring.cached_tp =
+ 			*(volatile u32 *)srng->u.src_ring.tp_addr;
+ 	else
+-		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
++		srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
+ }
+ 
+ /* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
+diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
+index c68998e9667c93..8cbe28950d0c0a 100644
+--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
++++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
+@@ -705,7 +705,7 @@ enum hal_rx_msdu_desc_reo_dest_ind {
+ #define RX_MSDU_DESC_INFO0_DECAP_FORMAT		GENMASK(30, 29)
+ 
+ #define HAL_RX_MSDU_PKT_LENGTH_GET(val)		\
+-	(u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
++	(le32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
+ 
+ struct rx_msdu_desc {
+ 	__le32 info0;
+diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
+index 26f4b440c26d27..0ac92a606cea0c 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.c
++++ b/drivers/net/wireless/ath/ath12k/pci.c
+@@ -1301,6 +1301,9 @@ void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
+ {
+ 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ 
++	if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
++		return;
++
+ 	/* restore aspm in case firmware bootup fails */
+ 	ath12k_pci_aspm_restore(ab_pci);
+ 
+@@ -1503,6 +1506,8 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
+ 	return 0;
+ 
+ err_free_irq:
++	/* __free_irq() expects the caller to have cleared the affinity hint */
++	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
+ 	ath12k_pci_free_irq(ab);
+ 
+ err_ce_free:
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index 17ac54047f9a70..5c2130f77dac66 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -980,14 +980,24 @@ int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
+ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
+ 				       struct wmi_vdev_start_req_arg *arg)
+ {
++	u32 center_freq1 = arg->band_center_freq1;
++
+ 	memset(chan, 0, sizeof(*chan));
+ 
+ 	chan->mhz = cpu_to_le32(arg->freq);
+-	chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
+-	if (arg->mode == MODE_11AC_VHT80_80)
++	chan->band_center_freq1 = cpu_to_le32(center_freq1);
++	if (arg->mode == MODE_11BE_EHT160) {
++		if (arg->freq > center_freq1)
++			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
++		else
++			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
++
++		chan->band_center_freq2 = cpu_to_le32(center_freq1);
++	} else if (arg->mode == MODE_11BE_EHT80_80) {
+ 		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
+-	else
++	} else {
+ 		chan->band_center_freq2 = 0;
++	}
+ 
+ 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
+ 	if (arg->passive)
+@@ -5784,7 +5794,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
+ 		goto fallback;
+ 	}
+ 
+-	spin_lock(&ab->base_lock);
++	spin_lock_bh(&ab->base_lock);
+ 	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ 		/* Once mac is registered, ar is valid and all CC events from
+ 		 * fw is considered to be received due to user requests
+@@ -5808,7 +5818,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
+ 		ab->default_regd[pdev_idx] = regd;
+ 	}
+ 	ab->dfs_region = reg_info->dfs_region;
+-	spin_unlock(&ab->base_lock);
++	spin_unlock_bh(&ab->base_lock);
+ 
+ 	goto mem_free;
+ 
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
+index a3e03580cd9ff0..564ca6a619856b 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -438,14 +438,21 @@ static void carl9170_usb_rx_complete(struct urb *urb)
+ 
+ 		if (atomic_read(&ar->rx_anch_urbs) == 0) {
+ 			/*
+-			 * The system is too slow to cope with
+-			 * the enormous workload. We have simply
+-			 * run out of active rx urbs and this
+-			 * unfortunately leads to an unpredictable
+-			 * device.
++			 * At this point, either the system is too slow to
++			 * cope with the enormous workload (so we have simply
++			 * run out of active rx urbs and this unfortunately
++			 * leads to an unpredictable device), or the device
++			 * is not fully functional after an unsuccessful
++			 * firmware loading attempts (so it doesn't pass
++			 * ieee80211_register_hw() and there is no internal
++			 * workqueue at all).
+ 			 */
+ 
+-			ieee80211_queue_work(ar->hw, &ar->ping_work);
++			if (ar->registered)
++				ieee80211_queue_work(ar->hw, &ar->ping_work);
++			else
++				pr_warn_once("device %s is not registered\n",
++					     dev_name(&ar->udev->dev));
+ 		}
+ 	} else {
+ 		/*
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+index 2e2fcb3807efb9..10d647fbc971e6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+@@ -44,6 +44,8 @@
+ 	IWL_QU_C_HR_B_FW_PRE "-" __stringify(api) ".ucode"
+ #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
+ 	IWL_QU_B_JF_B_FW_PRE "-" __stringify(api) ".ucode"
++#define IWL_QU_C_JF_B_MODULE_FIRMWARE(api) \
++	IWL_QU_C_JF_B_FW_PRE "-" __stringify(api) ".ucode"
+ #define IWL_CC_A_MODULE_FIRMWARE(api)			\
+ 	IWL_CC_A_FW_PRE "-" __stringify(api) ".ucode"
+ 
+@@ -423,6 +425,7 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = {
+ MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL_QU_C_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+index e96ddaeeeeff52..d013de30e7ed65 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
++ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+  * Copyright (C) 2015-2017 Intel Deutschland GmbH
+  */
+@@ -962,7 +962,7 @@ u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
+ 	u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
+ 	bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
+ 
+-	if (rate_idx <= IWL_FIRST_CCK_RATE)
++	if (rate_idx <= IWL_LAST_CCK_RATE)
+ 		flags |= is_new_rate ? IWL_MAC_BEACON_CCK
+ 			  : IWL_MAC_BEACON_CCK_V1;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 18d7d59ae58147..462ebe088b3c17 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -2726,6 +2726,8 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ 	for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
+ 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+ 
++		spin_lock_bh(&rxq->lock);
++
+ 		pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
+ 				 i);
+ 		pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
+@@ -2746,6 +2748,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ 			pos += scnprintf(buf + pos, bufsz - pos,
+ 					 "\tclosed_rb_num: Not Allocated\n");
+ 		}
++		spin_unlock_bh(&rxq->lock);
+ 	}
+ 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ 	kfree(buf);
+@@ -3410,8 +3413,11 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ 		/* Dump RBs is supported only for pre-9000 devices (1 queue) */
+ 		struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+ 		/* RBs */
++		spin_lock_bh(&rxq->lock);
+ 		num_rbs = iwl_get_closed_rb_stts(trans, rxq);
+ 		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
++		spin_unlock_bh(&rxq->lock);
++
+ 		len += num_rbs * (sizeof(*data) +
+ 				  sizeof(struct iwl_fw_error_dump_rb) +
+ 				  (PAGE_SIZE << trans_pcie->rx_page_order));
+diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
+index 772084a9bd8d7c..3baf8ab01e22b0 100644
+--- a/drivers/net/wireless/intersil/p54/fwio.c
++++ b/drivers/net/wireless/intersil/p54/fwio.c
+@@ -231,6 +231,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
+ 
+ 	mutex_lock(&priv->eeprom_mutex);
+ 	priv->eeprom = buf;
++	priv->eeprom_slice_size = len;
+ 	eeprom_hdr = skb_put(skb, eeprom_hdr_size + len);
+ 
+ 	if (priv->fw_var < 0x509) {
+@@ -253,6 +254,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
+ 		ret = -EBUSY;
+ 	}
+ 	priv->eeprom = NULL;
++	priv->eeprom_slice_size = 0;
+ 	mutex_unlock(&priv->eeprom_mutex);
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
+index 522656de415987..aeb5e40cc5ef3f 100644
+--- a/drivers/net/wireless/intersil/p54/p54.h
++++ b/drivers/net/wireless/intersil/p54/p54.h
+@@ -258,6 +258,7 @@ struct p54_common {
+ 
+ 	/* eeprom handling */
+ 	void *eeprom;
++	size_t eeprom_slice_size;
+ 	struct completion eeprom_comp;
+ 	struct mutex eeprom_mutex;
+ };
+diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
+index 8414aa208655f6..2deb1bb54f24bd 100644
+--- a/drivers/net/wireless/intersil/p54/txrx.c
++++ b/drivers/net/wireless/intersil/p54/txrx.c
+@@ -496,14 +496,19 @@ static void p54_rx_eeprom_readback(struct p54_common *priv,
+ 		return ;
+ 
+ 	if (priv->fw_var >= 0x509) {
+-		memcpy(priv->eeprom, eeprom->v2.data,
+-		       le16_to_cpu(eeprom->v2.len));
++		if (le16_to_cpu(eeprom->v2.len) != priv->eeprom_slice_size)
++			return;
++
++		memcpy(priv->eeprom, eeprom->v2.data, priv->eeprom_slice_size);
+ 	} else {
+-		memcpy(priv->eeprom, eeprom->v1.data,
+-		       le16_to_cpu(eeprom->v1.len));
++		if (le16_to_cpu(eeprom->v1.len) != priv->eeprom_slice_size)
++			return;
++
++		memcpy(priv->eeprom, eeprom->v1.data, priv->eeprom_slice_size);
+ 	}
+ 
+ 	priv->eeprom = NULL;
++	priv->eeprom_slice_size = 0;
+ 	tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
+ 	dev_kfree_skb_any(tmp);
+ 	complete(&priv->eeprom_comp);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+index 84ef80ab4afbfa..96cecc576a9867 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+@@ -17,6 +17,8 @@ static const struct usb_device_id mt76x2u_device_table[] = {
+ 	{ USB_DEVICE(0x057c, 0x8503) },	/* Avm FRITZ!WLAN AC860 */
+ 	{ USB_DEVICE(0x7392, 0xb711) },	/* Edimax EW 7722 UAC */
+ 	{ USB_DEVICE(0x0e8d, 0x7632) },	/* HC-M7662BU1 */
++	{ USB_DEVICE(0x0471, 0x2126) }, /* LiteOn WN4516R module, nonstandard USB connector */
++	{ USB_DEVICE(0x0471, 0x7600) }, /* LiteOn WN4519R module, nonstandard USB connector */
+ 	{ USB_DEVICE(0x2c4e, 0x0103) },	/* Mercury UD13 */
+ 	{ USB_DEVICE(0x0846, 0x9014) },	/* Netgear WNDA3100v3 */
+ 	{ USB_DEVICE(0x0846, 0x9053) },	/* Netgear A6210 */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+index 33a14365ec9b98..3b556281151158 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+@@ -191,6 +191,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
+ {
+ 	struct ieee80211_hw *hw = mt76_hw(dev);
+ 	struct mt76_usb *usb = &dev->mt76.usb;
++	bool vht;
+ 	int err;
+ 
+ 	INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+@@ -217,7 +218,17 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
+ 
+ 	/* check hw sg support in order to enable AMSDU */
+ 	hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
+-	err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
++	switch (dev->mt76.rev) {
++	case 0x76320044:
++		/* these ASIC revisions do not support VHT */
++		vht = false;
++		break;
++	default:
++		vht = true;
++		break;
++	}
++
++	err = mt76_register_device(&dev->mt76, vht, mt76x02_rates,
+ 				   ARRAY_SIZE(mt76x02_rates));
+ 	if (err)
+ 		goto fail;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 6a3629f71caaa7..9c245c23a2d730 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -83,6 +83,11 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
+ 			he_cap_elem->phy_cap_info[9] |=
+ 				IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ 				IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
++
++			if (is_mt7922(phy->mt76->dev)) {
++				he_cap_elem->phy_cap_info[0] |=
++					IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
++			}
+ 			break;
+ 		case NL80211_IFTYPE_STATION:
+ 			he_cap_elem->mac_cap_info[1] |=
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+index 039949b344b981..14553dcc61c577 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+@@ -204,6 +204,12 @@ static void mt7925_init_work(struct work_struct *work)
+ 		return;
+ 	}
+ 
++	ret = mt7925_mcu_set_thermal_protect(dev);
++	if (ret) {
++		dev_err(dev->mt76.dev, "thermal protection enable failed\n");
++		return;
++	}
++
+ 	/* we support chip reset now */
+ 	dev->hw_init_done = true;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index a19c108ad4b5c9..57a1db394dda46 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -961,6 +961,23 @@ int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable)
+ }
+ EXPORT_SYMBOL_GPL(mt7925_mcu_set_deep_sleep);
+ 
++int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev)
++{
++	char cmd[64];
++	int ret = 0;
++
++	snprintf(cmd, sizeof(cmd), "ThermalProtGband %d %d %d %d %d %d %d %d %d %d",
++		 0, 100, 90, 80, 30, 1, 1, 115, 105, 5);
++	ret = mt7925_mcu_chip_config(dev, cmd);
++
++	snprintf(cmd, sizeof(cmd), "ThermalProtAband %d %d %d %d %d %d %d %d %d %d",
++		 1, 100, 90, 80, 30, 1, 1, 115, 105, 5);
++	ret |= mt7925_mcu_chip_config(dev, cmd);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(mt7925_mcu_set_thermal_protect);
++
+ int mt7925_run_firmware(struct mt792x_dev *dev)
+ {
+ 	int err;
+@@ -3288,7 +3305,8 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ 		else
+ 			uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ 
+-		if (cmd == MCU_UNI_CMD(HIF_CTRL))
++		if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
++		    cmd == MCU_UNI_CMD(CHIP_CONFIG))
+ 			uni_txd->option &= ~MCU_CMD_ACK;
+ 
+ 		goto exit;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+index 887427e0760aed..780c5921679aa3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+@@ -635,6 +635,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
+ int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ 			  struct ieee80211_bss_conf *link_conf);
+ int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
++int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev);
+ int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
+ int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
+ int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+index 9aec675450f267..5e428f19f9722a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+@@ -482,9 +482,6 @@ static int mt7925_pci_suspend(struct device *device)
+ 
+ 	/* disable interrupt */
+ 	mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
+-	mt76_wr(dev, MT_WFDMA0_HOST_INT_DIS,
+-		dev->irq_map->tx.all_complete_mask |
+-		MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD);
+ 
+ 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
+index 985794a40c1a8e..547489092c2947 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
+@@ -28,7 +28,7 @@
+ #define MT_MDP_TO_HIF			0
+ #define MT_MDP_TO_WM			1
+ 
+-#define MT_WFDMA0_HOST_INT_ENA		MT_WFDMA0(0x228)
++#define MT_WFDMA0_HOST_INT_ENA		MT_WFDMA0(0x204)
+ #define MT_WFDMA0_HOST_INT_DIS		MT_WFDMA0(0x22c)
+ #define HOST_RX_DONE_INT_ENA4		BIT(12)
+ #define HOST_RX_DONE_INT_ENA5		BIT(13)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index ef2d7eaaaffdd6..0990a3d481f2d9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -623,6 +623,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
+ 		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
+ 	}
+ 
++	/* IEEE 802.11 fragmentation can only be applied to unicast frames.
++	 * Hence, drop fragments with multicast/broadcast RA.
++	 * This check fixes vulnerabilities, like CVE-2020-26145.
++	 */
++	if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
++	    FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
++		return -EINVAL;
++
+ 	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
+ 	if (hdr_trans && ieee80211_has_morefrags(fc)) {
+ 		if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
+diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
+index 56d1139ba8bcce..7e7bfa532ed255 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
++++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
+@@ -503,8 +503,10 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
+ 			  (void *)buffer, buffer_len, complete_fn, context);
+ 
+ 	r = usb_submit_urb(urb, GFP_ATOMIC);
+-	if (r)
++	if (r) {
++		usb_free_urb(urb);
+ 		dev_err(&udev->dev, "Async write submit failed (%d)\n", r);
++	}
+ 
+ 	return r;
+ }
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 0eafc4d125f91d..898f597f70a96d 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -155,6 +155,16 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
+ 	    ((u8)init_aspm) == (PCI_EXP_LNKCTL_ASPM_L0S |
+ 				PCI_EXP_LNKCTL_ASPM_L1 | PCI_EXP_LNKCTL_CCC))
+ 		ppsc->support_aspm = false;
++
++	/* RTL8723BE found on some ASUSTek laptops, such as F441U and
++	 * X555UQ with subsystem ID 11ad:1723 are known to output large
++	 * amounts of PCIe AER errors during and after boot up, causing
++	 * heavy lags, poor network throughput, and occasional lock-ups.
++	 */
++	if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8723BE &&
++	    (rtlpci->pdev->subsystem_vendor == 0x11ad &&
++	     rtlpci->pdev->subsystem_device == 0x1723))
++		ppsc->support_aspm = false;
+ }
+ 
+ static bool _rtl_pci_platform_switch_device_pci_aspm(
+diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
+index 96aeda26014e20..d4bee9c3ecfeab 100644
+--- a/drivers/net/wireless/realtek/rtw88/hci.h
++++ b/drivers/net/wireless/realtek/rtw88/hci.h
+@@ -19,6 +19,8 @@ struct rtw_hci_ops {
+ 	void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
+ 	void (*interface_cfg)(struct rtw_dev *rtwdev);
+ 	void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable);
++	void (*write_firmware_page)(struct rtw_dev *rtwdev, u32 page,
++				    const u8 *data, u32 size);
+ 
+ 	int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
+ 	int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
+@@ -79,6 +81,12 @@ static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
+ 		rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable);
+ }
+ 
++static inline void rtw_hci_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
++					       const u8 *data, u32 size)
++{
++	rtwdev->hci.ops->write_firmware_page(rtwdev, page, data, size);
++}
++
+ static inline int
+ rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+ {
+diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
+index d1c4f5cdcb21da..efb1da198e74c9 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac.c
++++ b/drivers/net/wireless/realtek/rtw88/mac.c
+@@ -854,8 +854,8 @@ static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en)
+ 	}
+ }
+ 
+-static void
+-write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
++void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
++			     const u8 *data, u32 size)
+ {
+ 	u32 val32;
+ 	u32 block_nr;
+@@ -885,6 +885,7 @@ write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
+ 		rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
+ 	}
+ }
++EXPORT_SYMBOL(rtw_write_firmware_page);
+ 
+ static int
+ download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
+@@ -902,11 +903,13 @@ download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
+ 	rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
+ 
+ 	for (page = 0; page < total_page; page++) {
+-		write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
++		rtw_hci_write_firmware_page(rtwdev, page, data,
++					    DLFW_PAGE_SIZE_LEGACY);
+ 		data += DLFW_PAGE_SIZE_LEGACY;
+ 	}
+ 	if (last_page_size)
+-		write_firmware_page(rtwdev, page, data, last_page_size);
++		rtw_hci_write_firmware_page(rtwdev, page, data,
++					    last_page_size);
+ 
+ 	if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
+ 		rtw_err(rtwdev, "failed to check download firmware report\n");
+diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
+index 58c3dccc14bb51..737c6d5d8da723 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac.h
++++ b/drivers/net/wireless/realtek/rtw88/mac.h
+@@ -32,6 +32,8 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ 			 u8 primary_ch_idx);
+ int rtw_mac_power_on(struct rtw_dev *rtwdev);
+ void rtw_mac_power_off(struct rtw_dev *rtwdev);
++void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
++			     const u8 *data, u32 size);
+ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
+ int rtw_mac_init(struct rtw_dev *rtwdev);
+ void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
+index 0b9b8807af2cb0..fab9bb9257dd94 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.c
++++ b/drivers/net/wireless/realtek/rtw88/pci.c
+@@ -12,6 +12,7 @@
+ #include "fw.h"
+ #include "ps.h"
+ #include "debug.h"
++#include "mac.h"
+ 
+ static bool rtw_disable_msi;
+ static bool rtw_pci_disable_aspm;
+@@ -1602,6 +1603,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
+ 	.link_ps = rtw_pci_link_ps,
+ 	.interface_cfg = rtw_pci_interface_cfg,
+ 	.dynamic_rx_agg = NULL,
++	.write_firmware_page = rtw_write_firmware_page,
+ 
+ 	.read8 = rtw_pci_read8,
+ 	.read16 = rtw_pci_read16,
+diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
+index 5b8e88c9759d12..787fa09fd063a9 100644
+--- a/drivers/net/wireless/realtek/rtw88/sdio.c
++++ b/drivers/net/wireless/realtek/rtw88/sdio.c
+@@ -10,6 +10,7 @@
+ #include <linux/mmc/host.h>
+ #include <linux/mmc/sdio_func.h>
+ #include "main.h"
++#include "mac.h"
+ #include "debug.h"
+ #include "fw.h"
+ #include "ps.h"
+@@ -1155,6 +1156,7 @@ static struct rtw_hci_ops rtw_sdio_ops = {
+ 	.link_ps = rtw_sdio_link_ps,
+ 	.interface_cfg = rtw_sdio_interface_cfg,
+ 	.dynamic_rx_agg = NULL,
++	.write_firmware_page = rtw_write_firmware_page,
+ 
+ 	.read8 = rtw_sdio_read8,
+ 	.read16 = rtw_sdio_read16,
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index 07695294767acb..a446be45f26e7e 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -138,7 +138,7 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len)
+ 
+ 	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ 			      RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
+-			      addr, 0, data, len, 30000);
++			      addr, 0, data, len, 500);
+ 	if (ret < 0 && ret != -ENODEV && count++ < 4)
+ 		rtw_err(rtwdev, "write register 0x%x failed with %d\n",
+ 			addr, ret);
+@@ -164,6 +164,60 @@ static void rtw_usb_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
+ 	rtw_usb_write(rtwdev, addr, val, 4);
+ }
+ 
++static void rtw_usb_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
++					const u8 *data, u32 size)
++{
++	struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
++	struct usb_device *udev = rtwusb->udev;
++	u32 addr = FW_START_ADDR_LEGACY;
++	u8 *data_dup, *buf;
++	u32 n, block_size;
++	int ret;
++
++	switch (rtwdev->chip->id) {
++	case RTW_CHIP_TYPE_8723D:
++		block_size = 254;
++		break;
++	default:
++		block_size = 196;
++		break;
++	}
++
++	data_dup = kmemdup(data, size, GFP_KERNEL);
++	if (!data_dup)
++		return;
++
++	buf = data_dup;
++
++	rtw_write32_mask(rtwdev, REG_MCUFW_CTRL, BIT_ROM_PGE, page);
++
++	while (size > 0) {
++		if (size >= block_size)
++			n = block_size;
++		else if (size >= 8)
++			n = 8;
++		else
++			n = 1;
++
++		ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++				      RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
++				      addr, 0, buf, n, 500);
++		if (ret != n) {
++			if (ret != -ENODEV)
++				rtw_err(rtwdev,
++					"write 0x%x len %d failed: %d\n",
++					addr, n, ret);
++			break;
++		}
++
++		addr += n;
++		buf += n;
++		size -= n;
++	}
++
++	kfree(data_dup);
++}
++
+ static int dma_mapping_to_ep(enum rtw_dma_mapping dma_mapping)
+ {
+ 	switch (dma_mapping) {
+@@ -815,6 +869,7 @@ static struct rtw_hci_ops rtw_usb_ops = {
+ 	.link_ps = rtw_usb_link_ps,
+ 	.interface_cfg = rtw_usb_interface_cfg,
+ 	.dynamic_rx_agg = rtw_usb_dynamic_rx_agg,
++	.write_firmware_page = rtw_usb_write_firmware_page,
+ 
+ 	.write8  = rtw_usb_write8,
+ 	.write16 = rtw_usb_write16,
+diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
+index 8d140b94cb4403..0c8ea5e629e6a8 100644
+--- a/drivers/net/wireless/realtek/rtw89/cam.c
++++ b/drivers/net/wireless/realtek/rtw89/cam.c
+@@ -6,6 +6,7 @@
+ #include "debug.h"
+ #include "fw.h"
+ #include "mac.h"
++#include "ps.h"
+ 
+ static struct sk_buff *
+ rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev,
+@@ -447,9 +448,11 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
+ 
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_WEP40:
++		rtw89_leave_ips_by_hwflags(rtwdev);
+ 		hw_key_type = RTW89_SEC_KEY_TYPE_WEP40;
+ 		break;
+ 	case WLAN_CIPHER_SUITE_WEP104:
++		rtw89_leave_ips_by_hwflags(rtwdev);
+ 		hw_key_type = RTW89_SEC_KEY_TYPE_WEP104;
+ 		break;
+ 	case WLAN_CIPHER_SUITE_CCMP:
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index 9b09d4b7dea597..2188bca899e392 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -5513,11 +5513,11 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ 	case RTW89_MAC_C2H_CLASS_FWDBG:
+ 		return;
+ 	default:
+-		rtw89_info(rtwdev, "c2h class %d not support\n", class);
++		rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
+ 		return;
+ 	}
+ 	if (!handler) {
+-		rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
++		rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class,
+ 			   func);
+ 		return;
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
+index 5c31639b4cade9..355c3f58ab1850 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.c
++++ b/drivers/net/wireless/realtek/rtw89/phy.c
+@@ -3062,10 +3062,16 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
+ 		    (int)(len - sizeof(report->hdr)), &report->state);
+ }
+ 
++static void
++rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
++{
++}
++
+ static
+ void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
+ 						  struct sk_buff *c2h, u32 len) = {
+ 	[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
++	[RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
+ };
+ 
+ bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
+@@ -3119,11 +3125,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ 			return;
+ 		fallthrough;
+ 	default:
+-		rtw89_info(rtwdev, "c2h class %d not support\n", class);
++		rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
+ 		return;
+ 	}
+ 	if (!handler) {
+-		rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
++		rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class,
+ 			   func);
+ 		return;
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
+index 9bb9c9c8e7a1b0..961a4bacb02a58 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.h
++++ b/drivers/net/wireless/realtek/rtw89/phy.h
+@@ -151,6 +151,7 @@ enum rtw89_phy_c2h_rfk_log_func {
+ 
+ enum rtw89_phy_c2h_rfk_report_func {
+ 	RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
++	RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6,
+ };
+ 
+ enum rtw89_phy_c2h_dm_func {
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+index 28907df7407d58..c958d6ab24d324 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+@@ -77,11 +77,6 @@ void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ 					     RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ 			rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH);
+ 
+-			if (band == RTW89_BAND_2G)
+-				rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0);
+-			else
+-				rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1);
+-
+ 			switch (band) {
+ 			case RTW89_BAND_2G:
+ 			default:
+diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
+index 4a2b7c9921bc61..6fcc21f596ea74 100644
+--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
++++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
+@@ -1229,6 +1229,11 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
+ 	/* MLD not supported here */
+ 	u32 bcn_int = data->link_data[0].beacon_int;
+ 	u64 delta = abs(tsf - now);
++	struct ieee80211_bss_conf *conf;
++
++	conf = link_conf_dereference_protected(vif, data->link_data[0].link_id);
++	if (conf && !conf->enable_beacon)
++		return;
+ 
+ 	/* adjust after beaconing with new timestamp at old TBTT */
+ 	if (tsf > now) {
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index a1b3c538a4bd2e..64ae8af01d9a47 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -442,21 +442,14 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ 	pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
+ 
+ 	/*
+-	 * For iopoll, complete it directly. Note that using the uring_cmd
+-	 * helper for this is safe only because we check blk_rq_is_poll().
+-	 * As that returns false if we're NOT on a polled queue, then it's
+-	 * safe to use the polled completion helper.
+-	 *
+-	 * Otherwise, move the completion to task work.
++	 * IOPOLL could potentially complete this request directly, but
++	 * if multiple rings are polling on the same queue, then it's possible
++	 * for one ring to find completions for another ring. Punting the
++	 * completion via task_work will always direct it to the right
++	 * location, rather than potentially complete requests for ringA
++	 * under iopoll invocations from ringB.
+ 	 */
+-	if (blk_rq_is_poll(req)) {
+-		if (pdu->bio)
+-			blk_rq_unmap_user(pdu->bio);
+-		io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
+-	} else {
+-		io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+-	}
+-
++	io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+ 	return RQ_END_IO_FREE;
+ }
+ 
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
+index 0bf4cde34f5171..f700e8c4908223 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
+@@ -292,13 +292,14 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ 	struct cdns_pcie *pcie = &ep->pcie;
+ 	u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ 	u32 val, reg;
++	u16 actual_interrupts = interrupts + 1;
+ 
+ 	fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+ 
+ 	reg = cap + PCI_MSIX_FLAGS;
+ 	val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
+ 	val &= ~PCI_MSIX_FLAGS_QSIZE;
+-	val |= interrupts;
++	val |= interrupts; /* 0's based value */
+ 	cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
+ 
+ 	/* Set MSIX BAR and offset */
+@@ -308,7 +309,7 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ 
+ 	/* Set PBA BAR and offset.  BAR must match MSIX BAR */
+ 	reg = cap + PCI_MSIX_PBA;
+-	val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
++	val = (offset + (actual_interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ 	cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
+ 
+ 	return 0;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 9e7e94f32b436c..00289948f9c124 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -398,6 +398,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ 	struct dw_pcie_ep_func *ep_func;
+ 	u32 val, reg;
++	u16 actual_interrupts = interrupts + 1;
+ 
+ 	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ 	if (!ep_func || !ep_func->msix_cap)
+@@ -408,7 +409,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 	reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ 	val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
+ 	val &= ~PCI_MSIX_FLAGS_QSIZE;
+-	val |= interrupts;
++	val |= interrupts; /* 0's based value */
+ 	dw_pcie_writew_dbi(pci, reg, val);
+ 
+ 	reg = ep_func->msix_cap + PCI_MSIX_TABLE;
+@@ -416,7 +417,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 	dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
+ 
+ 	reg = ep_func->msix_cap + PCI_MSIX_PBA;
+-	val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
++	val = (offset + (actual_interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ 	dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
+ 
+ 	dw_pcie_dbi_ro_wr_dis(pci);
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index 1170e1107508bd..6b113a1212a920 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -44,7 +44,6 @@
+ #define PCIE_LINKUP			(PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+ #define PCIE_RDLH_LINK_UP_CHGED		BIT(1)
+ #define PCIE_LINK_REQ_RST_NOT_INT	BIT(2)
+-#define PCIE_L0S_ENTRY			0x11
+ #define PCIE_CLIENT_GENERAL_CONTROL	0x0
+ #define PCIE_CLIENT_INTR_STATUS_LEGACY	0x8
+ #define PCIE_CLIENT_INTR_MASK_LEGACY	0x1c
+@@ -177,8 +176,7 @@ static int rockchip_pcie_link_up(struct dw_pcie *pci)
+ 	struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ 	u32 val = rockchip_pcie_get_ltssm(rockchip);
+ 
+-	if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
+-	    (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
++	if ((val & PCIE_LINKUP) == PCIE_LINKUP)
+ 		return 1;
+ 
+ 	return 0;
+@@ -379,8 +377,8 @@ static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
+ 
+ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
+ {
+-	phy_exit(rockchip->phy);
+ 	phy_power_off(rockchip->phy);
++	phy_exit(rockchip->phy);
+ }
+ 
+ static const struct dw_pcie_ops dw_pcie_ops = {
+diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
+index e9e9aaa91770ae..d9996516f49e67 100644
+--- a/drivers/pci/hotplug/s390_pci_hpc.c
++++ b/drivers/pci/hotplug/s390_pci_hpc.c
+@@ -65,9 +65,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
+ 
+ 	rc = zpci_deconfigure_device(zdev);
+ out:
+-	mutex_unlock(&zdev->state_lock);
+ 	if (pdev)
+ 		pci_dev_put(pdev);
++	mutex_unlock(&zdev->state_lock);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 7ca5422feb2d44..51a09e48967f23 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5643,7 +5643,8 @@ static void pci_slot_unlock(struct pci_slot *slot)
+ 			continue;
+ 		if (dev->subordinate)
+ 			pci_bus_unlock(dev->subordinate);
+-		pci_dev_unlock(dev);
++		else
++			pci_dev_unlock(dev);
+ 	}
+ }
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 064067d9c8b529..db609d26811ba2 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4995,6 +4995,18 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
+ 		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+ 
++static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags)
++{
++	/*
++	 * Loongson PCIe Root Ports don't advertise an ACS capability, but
++	 * they do not allow peer-to-peer transactions between Root Ports.
++	 * Allow each Root Port to be in a separate IOMMU group by masking
++	 * SV/RR/CR/UF bits.
++	 */
++	return pci_acs_ctrl_enabled(acs_flags,
++		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
++}
++
+ /*
+  * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on
+  * multi-function devices, the hardware isolates the functions by
+@@ -5128,6 +5140,17 @@ static const struct pci_dev_acs_enabled {
+ 	{ PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs },
+ 	{ PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs },
+ 	{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
++	/* Loongson PCIe Root Ports */
++	{ PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs },
++	{ PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs },
+ 	/* Amazon Annapurna Labs */
+ 	{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
+ 	/* Zhaoxin multi-function devices */
+diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+index adc6394626ce83..f914f016b3d2ce 100644
+--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
++++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+@@ -95,12 +95,12 @@ static u32 phy_tx_preemp_amp_tune_from_property(u32 microamp)
+ static u32 phy_tx_vboost_level_from_property(u32 microvolt)
+ {
+ 	switch (microvolt) {
+-	case 0 ... 960:
+-		return 0;
+-	case 961 ... 1160:
+-		return 2;
+-	default:
++	case 1156:
++		return 5;
++	case 844:
+ 		return 3;
++	default:
++		return 4;
+ 	}
+ }
+ 
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+index 4ce11c74fec1fc..53b6eb7486593d 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+@@ -358,9 +358,7 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev,
+ 
+ 	val = grp->val[func];
+ 
+-	regmap_update_bits(info->regmap, reg, mask, val);
+-
+-	return 0;
++	return regmap_update_bits(info->regmap, reg, mask, val);
+ }
+ 
+ static int armada_37xx_pmx_set(struct pinctrl_dev *pctldev,
+@@ -402,10 +400,13 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
+ 	struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
+ 	unsigned int reg = OUTPUT_EN;
+ 	unsigned int val, mask;
++	int ret;
+ 
+ 	armada_37xx_update_reg(&reg, &offset);
+ 	mask = BIT(offset);
+-	regmap_read(info->regmap, reg, &val);
++	ret = regmap_read(info->regmap, reg, &val);
++	if (ret)
++		return ret;
+ 
+ 	if (val & mask)
+ 		return GPIO_LINE_DIRECTION_OUT;
+@@ -442,11 +443,14 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
+ 	struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
+ 	unsigned int reg = INPUT_VAL;
+ 	unsigned int val, mask;
++	int ret;
+ 
+ 	armada_37xx_update_reg(&reg, &offset);
+ 	mask = BIT(offset);
+ 
+-	regmap_read(info->regmap, reg, &val);
++	ret = regmap_read(info->regmap, reg, &val);
++	if (ret)
++		return ret;
+ 
+ 	return (val & mask) != 0;
+ }
+@@ -471,16 +475,17 @@ static int armada_37xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ {
+ 	struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ 	struct gpio_chip *chip = range->gc;
++	int ret;
+ 
+ 	dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
+ 		offset, range->name, offset, input ? "input" : "output");
+ 
+ 	if (input)
+-		armada_37xx_gpio_direction_input(chip, offset);
++		ret = armada_37xx_gpio_direction_input(chip, offset);
+ 	else
+-		armada_37xx_gpio_direction_output(chip, offset, 0);
++		ret = armada_37xx_gpio_direction_output(chip, offset, 0);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int armada_37xx_gpio_request_enable(struct pinctrl_dev *pctldev,
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index 70d7485ada3643..60fcd53830a7d0 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -636,6 +636,14 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ 
+ 	mcp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ 
++	/*
++	 * Reset the chip - we don't really know what state it's in, so reset
++	 * all pins to input first to prevent surprises.
++	 */
++	ret = mcp_write(mcp, MCP_IODIR, mcp->chip.ngpio == 16 ? 0xFFFF : 0xFF);
++	if (ret < 0)
++		return ret;
++
+ 	/* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
+ 	 * and MCP_IOCON.HAEN = 1, so we work with all chips.
+ 	 */
+diff --git a/drivers/platform/loongarch/loongson-laptop.c b/drivers/platform/loongarch/loongson-laptop.c
+index 99203584949daa..61b18ac206c9ee 100644
+--- a/drivers/platform/loongarch/loongson-laptop.c
++++ b/drivers/platform/loongarch/loongson-laptop.c
+@@ -56,8 +56,7 @@ static struct input_dev *generic_inputdev;
+ static acpi_handle hotkey_handle;
+ static struct key_entry hotkey_keycode_map[GENERIC_HOTKEY_MAP_MAX];
+ 
+-int loongson_laptop_turn_on_backlight(void);
+-int loongson_laptop_turn_off_backlight(void);
++static bool bl_powered;
+ static int loongson_laptop_backlight_update(struct backlight_device *bd);
+ 
+ /* 2. ACPI Helpers and device model */
+@@ -354,16 +353,42 @@ static int ec_backlight_level(u8 level)
+ 	return level;
+ }
+ 
++static int ec_backlight_set_power(bool state)
++{
++	int status;
++	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
++	struct acpi_object_list args = { 1, &arg0 };
++
++	arg0.integer.value = state;
++	status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
++	if (ACPI_FAILURE(status)) {
++		pr_info("Loongson lvds error: 0x%x\n", status);
++		return -EIO;
++	}
++
++	return 0;
++}
++
+ static int loongson_laptop_backlight_update(struct backlight_device *bd)
+ {
+-	int lvl = ec_backlight_level(bd->props.brightness);
++	bool target_powered = !backlight_is_blank(bd);
++	int ret = 0, lvl = ec_backlight_level(bd->props.brightness);
+ 
+ 	if (lvl < 0)
+ 		return -EIO;
++
+ 	if (ec_set_brightness(lvl))
+ 		return -EIO;
+ 
+-	return 0;
++	if (target_powered != bl_powered) {
++		ret = ec_backlight_set_power(target_powered);
++		if (ret < 0)
++			return ret;
++
++		bl_powered = target_powered;
++	}
++
++	return ret;
+ }
+ 
+ static int loongson_laptop_get_brightness(struct backlight_device *bd)
+@@ -384,7 +409,7 @@ static const struct backlight_ops backlight_laptop_ops = {
+ 
+ static int laptop_backlight_register(void)
+ {
+-	int status = 0;
++	int status = 0, ret;
+ 	struct backlight_properties props;
+ 
+ 	memset(&props, 0, sizeof(props));
+@@ -392,44 +417,20 @@ static int laptop_backlight_register(void)
+ 	if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
+ 		return -EIO;
+ 
+-	props.brightness = 1;
++	ret = ec_backlight_set_power(true);
++	if (ret)
++		return ret;
++
++	bl_powered = true;
++
+ 	props.max_brightness = status;
++	props.brightness = ec_get_brightness();
++	props.power = BACKLIGHT_POWER_ON;
+ 	props.type = BACKLIGHT_PLATFORM;
+ 
+ 	backlight_device_register("loongson_laptop",
+ 				NULL, NULL, &backlight_laptop_ops, &props);
+ 
+-	return 0;
+-}
+-
+-int loongson_laptop_turn_on_backlight(void)
+-{
+-	int status;
+-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+-	struct acpi_object_list args = { 1, &arg0 };
+-
+-	arg0.integer.value = 1;
+-	status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+-	if (ACPI_FAILURE(status)) {
+-		pr_info("Loongson lvds error: 0x%x\n", status);
+-		return -ENODEV;
+-	}
+-
+-	return 0;
+-}
+-
+-int loongson_laptop_turn_off_backlight(void)
+-{
+-	int status;
+-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+-	struct acpi_object_list args = { 1, &arg0 };
+-
+-	arg0.integer.value = 0;
+-	status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+-	if (ACPI_FAILURE(status)) {
+-		pr_info("Loongson lvds error: 0x%x\n", status);
+-		return -ENODEV;
+-	}
+ 
+ 	return 0;
+ }
+@@ -611,11 +612,17 @@ static int __init generic_acpi_laptop_init(void)
+ 
+ static void __exit generic_acpi_laptop_exit(void)
+ {
++	int i;
++
+ 	if (generic_inputdev) {
+-		if (input_device_registered)
+-			input_unregister_device(generic_inputdev);
+-		else
++		if (!input_device_registered) {
+ 			input_free_device(generic_inputdev);
++		} else {
++			input_unregister_device(generic_inputdev);
++
++			for (i = 0; i < ARRAY_SIZE(generic_sub_drivers); i++)
++				generic_subdriver_exit(&generic_sub_drivers[i]);
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
+index dc071b4257d7bf..357a46fdffedad 100644
+--- a/drivers/platform/x86/amd/pmc/pmc.c
++++ b/drivers/platform/x86/amd/pmc/pmc.c
+@@ -393,6 +393,8 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+ 			return -ENOMEM;
+ 	}
+ 
++	memset_io(dev->smu_virt_addr, 0, sizeof(struct smu_metrics));
++
+ 	/* Start the logging */
+ 	amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false);
+ 	amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false);
+diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
+index b6bcc1d57f9682..a9b195ec6f33f9 100644
+--- a/drivers/platform/x86/amd/pmf/tee-if.c
++++ b/drivers/platform/x86/amd/pmf/tee-if.c
+@@ -422,12 +422,12 @@ static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_
+ 	rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ 	if (rc < 0 || sess_arg.ret != 0) {
+ 		pr_err("Failed to open TEE session err:%#x, rc:%d\n", sess_arg.ret, rc);
+-		return rc;
++		return rc ?: -EINVAL;
+ 	}
+ 
+ 	*id = sess_arg.session;
+ 
+-	return rc;
++	return 0;
+ }
+ 
+ static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
+@@ -462,7 +462,9 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
+ 	dev->tee_ctx = tee_client_open_context(NULL, amd_pmf_amdtee_ta_match, NULL, NULL);
+ 	if (IS_ERR(dev->tee_ctx)) {
+ 		dev_err(dev->dev, "Failed to open TEE context\n");
+-		return PTR_ERR(dev->tee_ctx);
++		ret = PTR_ERR(dev->tee_ctx);
++		dev->tee_ctx = NULL;
++		return ret;
+ 	}
+ 
+ 	ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
+@@ -502,9 +504,12 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
+ 
+ static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
+ {
++	if (!dev->tee_ctx)
++		return;
+ 	tee_shm_free(dev->fw_shm_pool);
+ 	tee_client_close_session(dev->tee_ctx, dev->session_id);
+ 	tee_client_close_context(dev->tee_ctx);
++	dev->tee_ctx = NULL;
+ }
+ 
+ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
+diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
+index 9f51e0fcab04e1..fee20866b41e41 100644
+--- a/drivers/platform/x86/dell/dell_rbu.c
++++ b/drivers/platform/x86/dell/dell_rbu.c
+@@ -292,7 +292,7 @@ static int packet_read_list(char *data, size_t * pread_length)
+ 	remaining_bytes = *pread_length;
+ 	bytes_read = rbu_data.packet_read_count;
+ 
+-	list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) {
++	list_for_each_entry(newpacket, &packet_data_head.list, list) {
+ 		bytes_copied = do_packet_read(pdest, newpacket,
+ 			remaining_bytes, bytes_read, &temp_count);
+ 		remaining_bytes -= bytes_copied;
+@@ -315,14 +315,14 @@ static void packet_empty_list(void)
+ {
+ 	struct packet_data *newpacket, *tmp;
+ 
+-	list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) {
++	list_for_each_entry_safe(newpacket, tmp, &packet_data_head.list, list) {
+ 		list_del(&newpacket->list);
+ 
+ 		/*
+ 		 * zero out the RBU packet memory before freeing
+ 		 * to make sure there are no stale RBU packets left in memory
+ 		 */
+-		memset(newpacket->data, 0, rbu_data.packetsize);
++		memset(newpacket->data, 0, newpacket->length);
+ 		set_memory_wb((unsigned long)newpacket->data,
+ 			1 << newpacket->ordernum);
+ 		free_pages((unsigned long) newpacket->data,
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index bdb4cbee42058a..93aa72bff3f00d 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -15,6 +15,7 @@
+ #include <linux/bug.h>
+ #include <linux/cleanup.h>
+ #include <linux/debugfs.h>
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/dmi.h>
+ #include <linux/i8042.h>
+@@ -267,6 +268,20 @@ static void ideapad_shared_exit(struct ideapad_private *priv)
+  */
+ #define IDEAPAD_EC_TIMEOUT 200 /* in ms */
+ 
++/*
++ * Some models (e.g., ThinkBook since 2024) have a low tolerance for being
++ * polled too frequently. Doing so may break the state machine in the EC,
++ * resulting in a hard shutdown.
++ *
++ * It is also observed that frequent polls may disturb the ongoing operation
++ * and notably delay the availability of EC response.
++ *
++ * These values are used as the delay before the first poll and the interval
++ * between subsequent polls to solve the above issues.
++ */
++#define IDEAPAD_EC_POLL_MIN_US 150
++#define IDEAPAD_EC_POLL_MAX_US 300
++
+ static int eval_int(acpi_handle handle, const char *name, unsigned long *res)
+ {
+ 	unsigned long long result;
+@@ -383,7 +398,7 @@ static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *da
+ 	end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
+ 
+ 	while (time_before(jiffies, end_jiffies)) {
+-		schedule();
++		usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US);
+ 
+ 		err = eval_vpcr(handle, 1, &val);
+ 		if (err)
+@@ -414,7 +429,7 @@ static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long dat
+ 	end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
+ 
+ 	while (time_before(jiffies, end_jiffies)) {
+-		schedule();
++		usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US);
+ 
+ 		err = eval_vpcr(handle, 1, &val);
+ 		if (err)
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+index 0591053813a283..5ab45b75166628 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+@@ -467,10 +467,13 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
+ 
+ 	/* Get the package ID from the TPMI core */
+ 	plat_info = tpmi_get_platform_data(auxdev);
+-	if (plat_info)
+-		pkg = plat_info->package_id;
+-	else
++	if (unlikely(!plat_info)) {
+ 		dev_info(&auxdev->dev, "Platform information is NULL\n");
++		ret = -ENODEV;
++		goto err_rem_common;
++	}
++
++	pkg = plat_info->package_id;
+ 
+ 	for (i = 0; i < num_resources; ++i) {
+ 		struct tpmi_uncore_power_domain_info *pd_info;
+diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
+index 8b1f894f5e790c..2643525a572bbf 100644
+--- a/drivers/pmdomain/core.c
++++ b/drivers/pmdomain/core.c
+@@ -2228,8 +2228,10 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
+ 	return 0;
+ put:
+ 	put_device(&genpd->dev);
+-	if (genpd->free_states == genpd_free_default_power_state)
++	if (genpd->free_states == genpd_free_default_power_state) {
+ 		kfree(genpd->states);
++		genpd->states = NULL;
++	}
+ free:
+ 	if (genpd_is_cpu_domain(genpd))
+ 		free_cpumask_var(genpd->cpus);
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 1a20c775489c72..871f03d160c53a 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -2062,7 +2062,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
+ 	mutex_unlock(&di->lock);
+ 
+ 	if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
+-		return -ENODEV;
++		return di->cache.flags;
+ 
+ 	switch (psp) {
+ 	case POWER_SUPPLY_PROP_STATUS:
+diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
+index ba0d22d9042950..868e95f0887e11 100644
+--- a/drivers/power/supply/bq27xxx_battery_i2c.c
++++ b/drivers/power/supply/bq27xxx_battery_i2c.c
+@@ -6,6 +6,7 @@
+  *	Andrew F. Davis <afd@ti.com>
+  */
+ 
++#include <linux/delay.h>
+ #include <linux/i2c.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+@@ -31,6 +32,7 @@ static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg,
+ 	struct i2c_msg msg[2];
+ 	u8 data[2];
+ 	int ret;
++	int retry = 0;
+ 
+ 	if (!client->adapter)
+ 		return -ENODEV;
+@@ -47,7 +49,16 @@ static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg,
+ 	else
+ 		msg[1].len = 2;
+ 
+-	ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
++	do {
++		ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
++		if (ret == -EBUSY && ++retry < 3) {
++			/* sleep 10 milliseconds when busy */
++			usleep_range(10000, 11000);
++			continue;
++		}
++		break;
++	} while (1);
++
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/power/supply/collie_battery.c b/drivers/power/supply/collie_battery.c
+index 68390bd1004f04..3daf7befc0bf64 100644
+--- a/drivers/power/supply/collie_battery.c
++++ b/drivers/power/supply/collie_battery.c
+@@ -440,6 +440,7 @@ static int collie_bat_probe(struct ucb1x00_dev *dev)
+ 
+ static void collie_bat_remove(struct ucb1x00_dev *dev)
+ {
++	device_init_wakeup(&ucb->dev, 0);
+ 	free_irq(gpiod_to_irq(collie_bat_main.gpio_full), &collie_bat_main);
+ 	power_supply_unregister(collie_bat_bu.psy);
+ 	power_supply_unregister(collie_bat_main.psy);
+diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
+index 51310f6e4803b9..c1640bc6accd27 100644
+--- a/drivers/power/supply/max17040_battery.c
++++ b/drivers/power/supply/max17040_battery.c
+@@ -410,8 +410,9 @@ static int max17040_get_property(struct power_supply *psy,
+ 		if (!chip->channel_temp)
+ 			return -ENODATA;
+ 
+-		iio_read_channel_processed_scale(chip->channel_temp,
+-						 &val->intval, 10);
++		iio_read_channel_processed(chip->channel_temp, &val->intval);
++		val->intval /= 100; /* Convert from milli- to deci-degree */
++
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 1a1edd87122d3d..b892a7323084dc 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -121,7 +121,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
+ 	struct ptp_clock_info *ops;
+ 	int err = -EOPNOTSUPP;
+ 
+-	if (ptp_clock_freerun(ptp)) {
++	if (tx->modes & (ADJ_SETOFFSET | ADJ_FREQUENCY | ADJ_OFFSET) &&
++	    ptp_clock_freerun(ptp)) {
+ 		pr_err("ptp: physical clock is free running\n");
+ 		return -EBUSY;
+ 	}
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 528d86a33f37de..a6aad743c282f4 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -98,7 +98,27 @@ static inline int queue_cnt(const struct timestamp_event_queue *q)
+ /* Check if ptp virtual clock is in use */
+ static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
+ {
+-	return !ptp->is_virtual_clock;
++	bool in_use = false;
++
++	/* Virtual clocks can't be stacked on top of virtual clocks.
++	 * Avoid acquiring the n_vclocks_mux on virtual clocks, to allow this
++	 * function to be called from code paths where the n_vclocks_mux of the
++	 * parent physical clock is already held. Functionally that's not an
++	 * issue, but lockdep would complain, because they have the same lock
++	 * class.
++	 */
++	if (ptp->is_virtual_clock)
++		return false;
++
++	if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
++		return true;
++
++	if (ptp->n_vclocks)
++		in_use = true;
++
++	mutex_unlock(&ptp->n_vclocks_mux);
++
++	return in_use;
+ }
+ 
+ /* Check if ptp clock shall be free running */
+diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c
+index b5477659ba186c..73c68f494e7f3e 100644
+--- a/drivers/pwm/pwm-axi-pwmgen.c
++++ b/drivers/pwm/pwm-axi-pwmgen.c
+@@ -174,7 +174,7 @@ static int axi_pwmgen_probe(struct platform_device *pdev)
+ 	struct regmap *regmap;
+ 	struct pwm_chip *chip;
+ 	struct axi_pwmgen_ddata *ddata;
+-	struct clk *clk;
++	struct clk *axi_clk, *clk;
+ 	void __iomem *io_base;
+ 	int ret;
+ 
+@@ -197,9 +197,26 @@ static int axi_pwmgen_probe(struct platform_device *pdev)
+ 	ddata = pwmchip_get_drvdata(chip);
+ 	ddata->regmap = regmap;
+ 
+-	clk = devm_clk_get_enabled(dev, NULL);
++	/*
++	 * Using NULL here instead of "axi" for backwards compatibility. There
++	 * are some dtbs that don't give clock-names and have the "ext" clock
++	 * as the one and only clock (due to mistake in the original bindings).
++	 */
++	axi_clk = devm_clk_get_enabled(dev, NULL);
++	if (IS_ERR(axi_clk))
++		return dev_err_probe(dev, PTR_ERR(axi_clk), "failed to get axi clock\n");
++
++	clk = devm_clk_get_optional_enabled(dev, "ext");
+ 	if (IS_ERR(clk))
+-		return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n");
++		return dev_err_probe(dev, PTR_ERR(clk), "failed to get ext clock\n");
++
++	/*
++	 * If there is no "ext" clock, it means the HDL was compiled with
++	 * ASYNC_CLK_EN=0. In this case, the AXI clock is also used for the
++	 * PWM output clock.
++	 */
++	if (!clk)
++		clk = axi_clk;
+ 
+ 	ret = devm_clk_rate_exclusive_get(dev, clk);
+ 	if (ret)
+diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
+index 9135227301c8d6..e548edf64eca04 100644
+--- a/drivers/rapidio/rio_cm.c
++++ b/drivers/rapidio/rio_cm.c
+@@ -789,6 +789,9 @@ static int riocm_ch_send(u16 ch_id, void *buf, int len)
+ 	if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
+ 		return -EINVAL;
+ 
++	if (len < sizeof(struct rio_ch_chan_hdr))
++		return -EINVAL;		/* insufficient data from user */
++
+ 	ch = riocm_get_channel(ch_id);
+ 	if (!ch) {
+ 		riocm_error("%s(%d) ch_%d not found", current->comm,
+diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
+index 5e7171b9065ae7..41fd15adfd1fdd 100644
+--- a/drivers/regulator/max14577-regulator.c
++++ b/drivers/regulator/max14577-regulator.c
+@@ -40,11 +40,14 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev)
+ 	struct max14577 *max14577 = rdev_get_drvdata(rdev);
+ 	const struct maxim_charger_current *limits =
+ 		&maxim_charger_currents[max14577->dev_type];
++	int ret;
+ 
+ 	if (rdev_get_id(rdev) != MAX14577_CHARGER)
+ 		return -EINVAL;
+ 
+-	max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, &reg_data);
++	ret = max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, &reg_data);
++	if (ret < 0)
++		return ret;
+ 
+ 	if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0)
+ 		return limits->min;
+diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
+index 3d333b61fb18c8..fcdd2d0317a573 100644
+--- a/drivers/regulator/max20086-regulator.c
++++ b/drivers/regulator/max20086-regulator.c
+@@ -29,7 +29,7 @@
+ #define	MAX20086_REG_ADC4		0x09
+ 
+ /* DEVICE IDs */
+-#define MAX20086_DEVICE_ID_MAX20086	0x40
++#define MAX20086_DEVICE_ID_MAX20086	0x30
+ #define MAX20086_DEVICE_ID_MAX20087	0x20
+ #define MAX20086_DEVICE_ID_MAX20088	0x10
+ #define MAX20086_DEVICE_ID_MAX20089	0x00
+@@ -264,7 +264,7 @@ static int max20086_i2c_probe(struct i2c_client *i2c)
+ 	 * shutdown.
+ 	 */
+ 	flags = boot_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+-	chip->ena_gpiod = devm_gpiod_get(chip->dev, "enable", flags);
++	chip->ena_gpiod = devm_gpiod_get_optional(chip->dev, "enable", flags);
+ 	if (IS_ERR(chip->ena_gpiod)) {
+ 		ret = PTR_ERR(chip->ena_gpiod);
+ 		dev_err(chip->dev, "Failed to get enable GPIO: %d\n", ret);
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index d2308c2f97eb94..b7011eb384a5dd 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -1617,7 +1617,7 @@ static int rproc_attach(struct rproc *rproc)
+ 	ret = rproc_set_rsc_table(rproc);
+ 	if (ret) {
+ 		dev_err(dev, "can't load resource table: %d\n", ret);
+-		goto unprepare_device;
++		goto clean_up_resources;
+ 	}
+ 
+ 	/* reset max_notifyid */
+@@ -1634,7 +1634,7 @@ static int rproc_attach(struct rproc *rproc)
+ 	ret = rproc_handle_resources(rproc, rproc_loading_handlers);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to process resources: %d\n", ret);
+-		goto unprepare_device;
++		goto clean_up_resources;
+ 	}
+ 
+ 	/* Allocate carveout resources associated to rproc */
+@@ -1653,9 +1653,9 @@ static int rproc_attach(struct rproc *rproc)
+ 
+ clean_up_resources:
+ 	rproc_resource_cleanup(rproc);
+-unprepare_device:
+ 	/* release HW resources if needed */
+ 	rproc_unprepare_device(rproc);
++	kfree(rproc->clean_table);
+ disable_iommu:
+ 	rproc_disable_iommu(rproc);
+ 	return ret;
+diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
+index 09f0484a90e103..fba6e393635e36 100644
+--- a/drivers/remoteproc/ti_k3_m4_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
+@@ -228,7 +228,7 @@ static int k3_m4_rproc_unprepare(struct rproc *rproc)
+ 	int ret;
+ 
+ 	/* If the core is going to be detached do not assert the module reset */
+-	if (rproc->state == RPROC_ATTACHED)
++	if (rproc->state == RPROC_DETACHED)
+ 		return 0;
+ 
+ 	ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
+index cb67fa80fb12c8..a95da6768f6656 100644
+--- a/drivers/s390/scsi/zfcp_sysfs.c
++++ b/drivers/s390/scsi/zfcp_sysfs.c
+@@ -450,6 +450,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
+ 	if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
+ 		return -EINVAL;
+ 
++	flush_work(&port->rport_work);
++
+ 	retval = zfcp_unit_add(port, fcp_lun);
+ 	if (retval)
+ 		return retval;
+diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
+index 5a5525054d71c8..5b079b8b7a082b 100644
+--- a/drivers/scsi/elx/efct/efct_hw.c
++++ b/drivers/scsi/elx/efct/efct_hw.c
+@@ -1120,7 +1120,7 @@ int
+ efct_hw_parse_filter(struct efct_hw *hw, void *value)
+ {
+ 	int rc = 0;
+-	char *p = NULL;
++	char *p = NULL, *pp = NULL;
+ 	char *token;
+ 	u32 idx = 0;
+ 
+@@ -1132,6 +1132,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value)
+ 		efc_log_err(hw->os, "p is NULL\n");
+ 		return -ENOMEM;
+ 	}
++	pp = p;
+ 
+ 	idx = 0;
+ 	while ((token = strsep(&p, ",")) && *token) {
+@@ -1144,7 +1145,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value)
+ 		if (idx == ARRAY_SIZE(hw->config.filter_def))
+ 			break;
+ 	}
+-	kfree(p);
++	kfree(pp);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index f2e4237ff3d994..34f77b250387c0 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -5082,7 +5082,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
+ 		case CMD_GEN_REQUEST64_CR:
+ 			if (iocb->ndlp == ndlp)
+ 				return 1;
+-			fallthrough;
++			break;
+ 		case CMD_ELS_REQUEST64_CR:
+ 			if (remote_id == ndlp->nlp_DID)
+ 				return 1;
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 6748fba48a07ed..4dccbaeb632835 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -6020,9 +6020,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
+ 	phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
+ 	phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
+ 
+-	memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
+-	strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
++	memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str,
+ 		sizeof(phba->BIOSVersion));
++	phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0';
+ 
+ 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ 			"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 8cc9f924a8ae60..c5a21e369e1675 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -9708,6 +9708,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1bd4, 0x0089)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++				0x1bd4, 0x00a3)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1ff9, 0x00a1)
+@@ -10044,6 +10048,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x207d, 0x4044)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x207d, 0x4054)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x207d, 0x4084)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x207d, 0x4094)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x207d, 0x4140)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x207d, 0x4240)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADVANTECH, 0x8312)
+@@ -10260,6 +10288,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1cc4, 0x0201)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1018, 0x8238)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1f3f, 0x0610)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_LENOVO, 0x0220)
+@@ -10268,10 +10304,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_LENOVO, 0x0221)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0222)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0223)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0224)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0225)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_LENOVO, 0x0520)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0521)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_LENOVO, 0x0522)
+@@ -10292,6 +10348,26 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_LENOVO, 0x0623)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0624)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0625)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0626)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0627)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_LENOVO, 0x0628)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 				0x1014, 0x0718)
+@@ -10320,6 +10396,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1137, 0x0300)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++				0x1ded, 0x3301)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1ff9, 0x0045)
+@@ -10468,6 +10548,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 				0x1f51, 0x100a)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++				0x1f51, 0x100b)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1f51, 0x100e)
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 48b0ca92b44fb3..954a1cc50ba746 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -362,7 +362,7 @@ MODULE_PARM_DESC(ring_avail_percent_lowater,
+ /*
+  * Timeout in seconds for all devices managed by this driver.
+  */
+-static int storvsc_timeout = 180;
++static const int storvsc_timeout = 180;
+ 
+ #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
+ static struct scsi_transport_template *fc_transport_template;
+@@ -768,7 +768,7 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
+ 		return;
+ 	}
+ 
+-	t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
++	t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
+ 	if (t == 0) {
+ 		dev_err(dev, "Failed to create sub-channel: timed out\n");
+ 		return;
+@@ -833,7 +833,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
+ 	if (ret != 0)
+ 		return ret;
+ 
+-	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
++	t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
+ 	if (t == 0)
+ 		return -ETIMEDOUT;
+ 
+@@ -1351,6 +1351,8 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
+ 		return ret;
+ 
+ 	ret = storvsc_channel_init(device, is_fc);
++	if (ret)
++		vmbus_close(device->channel);
+ 
+ 	return ret;
+ }
+@@ -1668,7 +1670,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+ 	if (ret != 0)
+ 		return FAILED;
+ 
+-	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
++	t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
+ 	if (t == 0)
+ 		return TIMEOUT_ERROR;
+ 
+diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
+index 463b1c5288318d..db25f406878b04 100644
+--- a/drivers/soc/qcom/pmic_glink_altmode.c
++++ b/drivers/soc/qcom/pmic_glink_altmode.c
+@@ -219,21 +219,29 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
+ {
+ 	struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work);
+ 	struct pmic_glink_altmode *altmode = alt_port->altmode;
++	enum drm_connector_status conn_status;
+ 
+ 	typec_switch_set(alt_port->typec_switch, alt_port->orientation);
+ 
+-	if (alt_port->svid == USB_TYPEC_DP_SID && alt_port->mode == 0xff)
+-		pmic_glink_altmode_safe(altmode, alt_port);
+-	else if (alt_port->svid == USB_TYPEC_DP_SID)
+-		pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode,
+-					     alt_port->hpd_state, alt_port->hpd_irq);
+-	else
+-		pmic_glink_altmode_enable_usb(altmode, alt_port);
++	if (alt_port->svid == USB_TYPEC_DP_SID) {
++		if (alt_port->mode == 0xff) {
++			pmic_glink_altmode_safe(altmode, alt_port);
++		} else {
++			pmic_glink_altmode_enable_dp(altmode, alt_port,
++						     alt_port->mode,
++						     alt_port->hpd_state,
++						     alt_port->hpd_irq);
++		}
+ 
+-	drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
+-				  alt_port->hpd_state ?
+-				  connector_status_connected :
+-				  connector_status_disconnected);
++		if (alt_port->hpd_state)
++			conn_status = connector_status_connected;
++		else
++			conn_status = connector_status_disconnected;
++
++		drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, conn_status);
++	} else {
++		pmic_glink_altmode_enable_usb(altmode, alt_port);
++	}
+ 
+ 	pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
+ }
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index 4ae1a7039418b1..1f806ee966c373 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -411,7 +411,7 @@ static ssize_t ad5933_store(struct device *dev,
+ 		ret = ad5933_cmd(st, 0);
+ 		break;
+ 	case AD5933_OUT_SETTLING_CYCLES:
+-		val = clamp(val, (u16)0, (u16)0x7FF);
++		val = clamp(val, (u16)0, (u16)0x7FC);
+ 		st->settling_cycles = val;
+ 
+ 		/* 2x, 4x handling, see datasheet */
+diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
+index d113679b1e2d7a..acc7998758ad84 100644
+--- a/drivers/tee/tee_core.c
++++ b/drivers/tee/tee_core.c
+@@ -10,6 +10,7 @@
+ #include <linux/fs.h>
+ #include <linux/idr.h>
+ #include <linux/module.h>
++#include <linux/overflow.h>
+ #include <linux/slab.h>
+ #include <linux/tee_core.h>
+ #include <linux/uaccess.h>
+@@ -19,7 +20,7 @@
+ 
+ #define TEE_NUM_DEVICES	32
+ 
+-#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
++#define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x)))
+ 
+ #define TEE_UUID_NS_NAME_SIZE	128
+ 
+@@ -487,7 +488,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx,
+ 	if (copy_from_user(&arg, uarg, sizeof(arg)))
+ 		return -EFAULT;
+ 
+-	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
++	if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
+ 		return -EINVAL;
+ 
+ 	if (arg.num_params) {
+@@ -565,7 +566,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx,
+ 	if (copy_from_user(&arg, uarg, sizeof(arg)))
+ 		return -EFAULT;
+ 
+-	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
++	if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
+ 		return -EINVAL;
+ 
+ 	if (arg.num_params) {
+@@ -699,7 +700,7 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
+ 	if (get_user(num_params, &uarg->num_params))
+ 		return -EFAULT;
+ 
+-	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
++	if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len)
+ 		return -EINVAL;
+ 
+ 	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+@@ -798,7 +799,7 @@ static int tee_ioctl_supp_send(struct tee_context *ctx,
+ 	    get_user(num_params, &uarg->num_params))
+ 		return -EFAULT;
+ 
+-	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
++	if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len)
+ 		return -EINVAL;
+ 
+ 	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index aacbed76c7c54b..53236e3e4fa475 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -183,6 +183,7 @@ static struct sci_port sci_ports[SCI_NPORTS];
+ static unsigned long sci_ports_in_use;
+ static struct uart_driver sci_uart_driver;
+ static bool sci_uart_earlycon;
++static bool sci_uart_earlycon_dev_probing;
+ 
+ static inline struct sci_port *
+ to_sci_port(struct uart_port *uart)
+@@ -3404,7 +3405,8 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+ static int sci_probe_single(struct platform_device *dev,
+ 				      unsigned int index,
+ 				      struct plat_sci_port *p,
+-				      struct sci_port *sciport)
++				      struct sci_port *sciport,
++				      struct resource *sci_res)
+ {
+ 	int ret;
+ 
+@@ -3451,6 +3453,30 @@ static int sci_probe_single(struct platform_device *dev,
+ 		sciport->port.flags |= UPF_HARD_FLOW;
+ 	}
+ 
++	if (sci_uart_earlycon && sci_ports[0].port.mapbase == sci_res->start) {
++		/*
++		 * In case:
++		 * - this is the earlycon port (mapped on index 0 in sci_ports[]) and
++		 * - it now maps to an alias other than zero and
++		 * - the earlycon is still alive (e.g., "earlycon keep_bootcon" is
++		 *   available in bootargs)
++		 *
++		 * we need to avoid disabling clocks and PM domains through the runtime
++		 * PM APIs called in __device_attach(). For this, increment the runtime
++		 * PM reference counter (the clocks and PM domains were already enabled
++		 * by the bootloader). Otherwise the earlycon may access the HW when it
++		 * has no clocks enabled leading to failures (infinite loop in
++		 * sci_poll_put_char()).
++		 */
++		pm_runtime_get_noresume(&dev->dev);
++
++		/*
++		 * Skip cleanup the sci_port[0] in early_console_exit(), this
++		 * port is the same as the earlycon one.
++		 */
++		sci_uart_earlycon_dev_probing = true;
++	}
++
+ 	return uart_add_one_port(&sci_uart_driver, &sciport->port);
+ }
+ 
+@@ -3509,7 +3535,7 @@ static int sci_probe(struct platform_device *dev)
+ 
+ 	platform_set_drvdata(dev, sp);
+ 
+-	ret = sci_probe_single(dev, dev_id, p, sp);
++	ret = sci_probe_single(dev, dev_id, p, sp, res);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -3666,6 +3692,22 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver,
+ #ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
+ static struct plat_sci_port port_cfg;
+ 
++static int early_console_exit(struct console *co)
++{
++	struct sci_port *sci_port = &sci_ports[0];
++
++	/*
++	 * Clean the slot used by earlycon. A new SCI device might
++	 * map to this slot.
++	 */
++	if (!sci_uart_earlycon_dev_probing) {
++		memset(sci_port, 0, sizeof(*sci_port));
++		sci_uart_earlycon = false;
++	}
++
++	return 0;
++}
++
+ static int __init early_console_setup(struct earlycon_device *device,
+ 				      int type)
+ {
+@@ -3683,6 +3725,8 @@ static int __init early_console_setup(struct earlycon_device *device,
+ 		       SCSCR_RE | SCSCR_TE | port_cfg.scscr);
+ 
+ 	device->con->write = serial_console_write;
++	device->con->exit = early_console_exit;
++
+ 	return 0;
+ }
+ static int __init sci_early_console_setup(struct earlycon_device *device,
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index c2759bbeed8491..a6551a795f7440 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -243,6 +243,9 @@ hv_uio_probe(struct hv_device *dev,
+ 	if (!ring_size)
+ 		ring_size = SZ_2M;
+ 
++	/* Adjust ring size if necessary to have it page aligned */
++	ring_size = VMBUS_RING_SIZE(ring_size);
++
+ 	pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL);
+ 	if (!pdata)
+ 		return -ENOMEM;
+@@ -274,13 +277,13 @@ hv_uio_probe(struct hv_device *dev,
+ 	pdata->info.mem[INT_PAGE_MAP].name = "int_page";
+ 	pdata->info.mem[INT_PAGE_MAP].addr
+ 		= (uintptr_t)vmbus_connection.int_page;
+-	pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
++	pdata->info.mem[INT_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
+ 	pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+ 
+ 	pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
+ 	pdata->info.mem[MON_PAGE_MAP].addr
+ 		= (uintptr_t)vmbus_connection.monitor_pages[1];
+-	pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
++	pdata->info.mem[MON_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
+ 	pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+ 
+ 	pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
+diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
+index 139049368fdcf8..7d02470f19b932 100644
+--- a/drivers/video/console/dummycon.c
++++ b/drivers/video/console/dummycon.c
+@@ -85,6 +85,15 @@ static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
+ 	/* Redraw, so that we get putc(s) for output done while blanked */
+ 	return true;
+ }
++
++static bool dummycon_switch(struct vc_data *vc)
++{
++	/*
++	 * Redraw, so that we get putc(s) for output done while switched
++	 * away. Informs deferred consoles to take over the display.
++	 */
++	return true;
++}
+ #else
+ static void dummycon_putc(struct vc_data *vc, u16 c, unsigned int y,
+ 			  unsigned int x) { }
+@@ -95,6 +104,10 @@ static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
+ {
+ 	return false;
+ }
++static bool dummycon_switch(struct vc_data *vc)
++{
++	return false;
++}
+ #endif
+ 
+ static const char *dummycon_startup(void)
+@@ -124,11 +137,6 @@ static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
+ 	return false;
+ }
+ 
+-static bool dummycon_switch(struct vc_data *vc)
+-{
+-	return false;
+-}
+-
+ /*
+  *  The console `switch' structure for the dummy console
+  *
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index 37bd18730fe0df..f9cdbf8c53e34b 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+ 				     c->vc_screenbuf_size - delta);
+ 			c->vc_origin = vga_vram_end - c->vc_screenbuf_size;
+ 			vga_rolled_over = 0;
+-		} else
++		} else if (oldo - delta >= (unsigned long)c->vc_screenbuf)
+ 			c->vc_origin -= delta;
+ 		c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
+ 		scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char,
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 07d127110ca4c9..c98786996c6471 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -117,9 +117,14 @@ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+ 
+ static struct fb_info *fbcon_info_from_console(int console)
+ {
++	signed char fb;
+ 	WARN_CONSOLE_UNLOCKED();
+ 
+-	return fbcon_registered_fb[con2fb_map[console]];
++	fb = con2fb_map[console];
++	if (fb < 0 || fb >= ARRAY_SIZE(fbcon_registered_fb))
++		return NULL;
++
++	return fbcon_registered_fb[fb];
+ }
+ 
+ static int logo_lines;
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 3c568cff2913e4..eca2498f243685 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -328,8 +328,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
+ 	    !list_empty(&info->modelist))
+ 		ret = fb_add_videomode(&mode, &info->modelist);
+ 
+-	if (ret)
++	if (ret) {
++		info->var = old_var;
+ 		return ret;
++	}
+ 
+ 	event.info = info;
+ 	event.data = &mode;
+@@ -388,7 +390,7 @@ static int fb_check_foreignness(struct fb_info *fi)
+ 
+ static int do_register_framebuffer(struct fb_info *fb_info)
+ {
+-	int i;
++	int i, err = 0;
+ 	struct fb_videomode mode;
+ 
+ 	if (fb_check_foreignness(fb_info))
+@@ -397,10 +399,18 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ 	if (num_registered_fb == FB_MAX)
+ 		return -ENXIO;
+ 
+-	num_registered_fb++;
+ 	for (i = 0 ; i < FB_MAX; i++)
+ 		if (!registered_fb[i])
+ 			break;
++
++	if (!fb_info->modelist.prev || !fb_info->modelist.next)
++		INIT_LIST_HEAD(&fb_info->modelist);
++
++	fb_var_to_videomode(&mode, &fb_info->var);
++	err = fb_add_videomode(&mode, &fb_info->modelist);
++	if (err < 0)
++		return err;
++
+ 	fb_info->node = i;
+ 	refcount_set(&fb_info->count, 1);
+ 	mutex_init(&fb_info->lock);
+@@ -426,16 +436,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ 	if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT))
+ 		bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ 
+-	if (!fb_info->modelist.prev || !fb_info->modelist.next)
+-		INIT_LIST_HEAD(&fb_info->modelist);
+-
+ 	if (fb_info->skip_vt_switch)
+ 		pm_vt_switch_required(fb_info->device, false);
+ 	else
+ 		pm_vt_switch_required(fb_info->device, true);
+ 
+-	fb_var_to_videomode(&mode, &fb_info->var);
+-	fb_add_videomode(&mode, &fb_info->modelist);
++	num_registered_fb++;
+ 	registered_fb[i] = fb_info;
+ 
+ #ifdef CONFIG_GUMSTIX_AM200EPD
+diff --git a/drivers/video/screen_info_pci.c b/drivers/video/screen_info_pci.c
+index 6c583351714100..66bfc1d0a6dc82 100644
+--- a/drivers/video/screen_info_pci.c
++++ b/drivers/video/screen_info_pci.c
+@@ -7,8 +7,8 @@
+ 
+ static struct pci_dev *screen_info_lfb_pdev;
+ static size_t screen_info_lfb_bar;
+-static resource_size_t screen_info_lfb_offset;
+-static struct resource screen_info_lfb_res = DEFINE_RES_MEM(0, 0);
++static resource_size_t screen_info_lfb_res_start; // original start of resource
++static resource_size_t screen_info_lfb_offset; // framebuffer offset within resource
+ 
+ static bool __screen_info_relocation_is_valid(const struct screen_info *si, struct resource *pr)
+ {
+@@ -31,7 +31,7 @@ void screen_info_apply_fixups(void)
+ 	if (screen_info_lfb_pdev) {
+ 		struct resource *pr = &screen_info_lfb_pdev->resource[screen_info_lfb_bar];
+ 
+-		if (pr->start != screen_info_lfb_res.start) {
++		if (pr->start != screen_info_lfb_res_start) {
+ 			if (__screen_info_relocation_is_valid(si, pr)) {
+ 				/*
+ 				 * Only update base if we have an actual
+@@ -47,46 +47,67 @@ void screen_info_apply_fixups(void)
+ 	}
+ }
+ 
++static int __screen_info_lfb_pci_bus_region(const struct screen_info *si, unsigned int type,
++					    struct pci_bus_region *r)
++{
++	u64 base, size;
++
++	base = __screen_info_lfb_base(si);
++	if (!base)
++		return -EINVAL;
++
++	size = __screen_info_lfb_size(si, type);
++	if (!size)
++		return -EINVAL;
++
++	r->start = base;
++	r->end = base + size - 1;
++
++	return 0;
++}
++
+ static void screen_info_fixup_lfb(struct pci_dev *pdev)
+ {
+ 	unsigned int type;
+-	struct resource res[SCREEN_INFO_MAX_RESOURCES];
+-	size_t i, numres;
++	struct pci_bus_region bus_region;
+ 	int ret;
++	struct resource r = {
++		.flags = IORESOURCE_MEM,
++	};
++	const struct resource *pr;
+ 	const struct screen_info *si = &screen_info;
+ 
+ 	if (screen_info_lfb_pdev)
+ 		return; // already found
+ 
+ 	type = screen_info_video_type(si);
+-	if (type != VIDEO_TYPE_EFI)
+-		return; // only applies to EFI
++	if (!__screen_info_has_lfb(type))
++		return; // only applies to EFI; maybe VESA
+ 
+-	ret = screen_info_resources(si, res, ARRAY_SIZE(res));
++	ret = __screen_info_lfb_pci_bus_region(si, type, &bus_region);
+ 	if (ret < 0)
+ 		return;
+-	numres = ret;
+ 
+-	for (i = 0; i < numres; ++i) {
+-		struct resource *r = &res[i];
+-		const struct resource *pr;
+-
+-		if (!(r->flags & IORESOURCE_MEM))
+-			continue;
+-		pr = pci_find_resource(pdev, r);
+-		if (!pr)
+-			continue;
+-
+-		/*
+-		 * We've found a PCI device with the framebuffer
+-		 * resource. Store away the parameters to track
+-		 * relocation of the framebuffer aperture.
+-		 */
+-		screen_info_lfb_pdev = pdev;
+-		screen_info_lfb_bar = pr - pdev->resource;
+-		screen_info_lfb_offset = r->start - pr->start;
+-		memcpy(&screen_info_lfb_res, r, sizeof(screen_info_lfb_res));
+-	}
++	/*
++	 * Translate the PCI bus address to resource. Account
++	 * for an offset if the framebuffer is behind a PCI host
++	 * bridge.
++	 */
++	pcibios_bus_to_resource(pdev->bus, &r, &bus_region);
++
++	pr = pci_find_resource(pdev, &r);
++	if (!pr)
++		return;
++
++	/*
++	 * We've found a PCI device with the framebuffer
++	 * resource. Store away the parameters to track
++	 * relocation of the framebuffer aperture.
++	 */
++	screen_info_lfb_pdev = pdev;
++	screen_info_lfb_bar = pr - pdev->resource;
++	screen_info_lfb_offset = r.start - pr->start;
++	screen_info_lfb_res_start = bus_region.start;
+ }
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16,
+ 			       screen_info_fixup_lfb);
+diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/tsm.c
+index 9432d4e303f16b..8a638bc34d4a9e 100644
+--- a/drivers/virt/coco/tsm.c
++++ b/drivers/virt/coco/tsm.c
+@@ -15,6 +15,7 @@
+ static struct tsm_provider {
+ 	const struct tsm_ops *ops;
+ 	void *data;
++	atomic_t count;
+ } provider;
+ static DECLARE_RWSEM(tsm_rwsem);
+ 
+@@ -92,6 +93,10 @@ static ssize_t tsm_report_privlevel_store(struct config_item *cfg,
+ 	if (rc)
+ 		return rc;
+ 
++	guard(rwsem_write)(&tsm_rwsem);
++	if (!provider.ops)
++		return -ENXIO;
++
+ 	/*
+ 	 * The valid privilege levels that a TSM might accept, if it accepts a
+ 	 * privilege level setting at all, are a max of TSM_PRIVLEVEL_MAX (see
+@@ -101,7 +106,6 @@ static ssize_t tsm_report_privlevel_store(struct config_item *cfg,
+ 	if (provider.ops->privlevel_floor > val || val > TSM_PRIVLEVEL_MAX)
+ 		return -EINVAL;
+ 
+-	guard(rwsem_write)(&tsm_rwsem);
+ 	rc = try_advance_write_generation(report);
+ 	if (rc)
+ 		return rc;
+@@ -115,6 +119,10 @@ static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg,
+ 					       char *buf)
+ {
+ 	guard(rwsem_read)(&tsm_rwsem);
++
++	if (!provider.ops)
++		return -ENXIO;
++
+ 	return sysfs_emit(buf, "%u\n", provider.ops->privlevel_floor);
+ }
+ CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor);
+@@ -217,6 +225,9 @@ CONFIGFS_ATTR_RO(tsm_report_, generation);
+ static ssize_t tsm_report_provider_show(struct config_item *cfg, char *buf)
+ {
+ 	guard(rwsem_read)(&tsm_rwsem);
++	if (!provider.ops)
++		return -ENXIO;
++
+ 	return sysfs_emit(buf, "%s\n", provider.ops->name);
+ }
+ CONFIGFS_ATTR_RO(tsm_report_, provider);
+@@ -284,7 +295,7 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf,
+ 	guard(rwsem_write)(&tsm_rwsem);
+ 	ops = provider.ops;
+ 	if (!ops)
+-		return -ENOTTY;
++		return -ENXIO;
+ 	if (!report->desc.inblob_len)
+ 		return -EINVAL;
+ 
+@@ -421,12 +432,20 @@ static struct config_item *tsm_report_make_item(struct config_group *group,
+ 	if (!state)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	atomic_inc(&provider.count);
+ 	config_item_init_type_name(&state->cfg, name, &tsm_report_type);
+ 	return &state->cfg;
+ }
+ 
++static void tsm_report_drop_item(struct config_group *group, struct config_item *item)
++{
++	config_item_put(item);
++	atomic_dec(&provider.count);
++}
++
+ static struct configfs_group_operations tsm_report_group_ops = {
+ 	.make_item = tsm_report_make_item,
++	.drop_item = tsm_report_drop_item,
+ };
+ 
+ static const struct config_item_type tsm_reports_type = {
+@@ -459,6 +478,11 @@ int tsm_register(const struct tsm_ops *ops, void *priv)
+ 		return -EBUSY;
+ 	}
+ 
++	if (atomic_read(&provider.count)) {
++		pr_err("configfs/tsm/report not empty\n");
++		return -EBUSY;
++	}
++
+ 	provider.ops = ops;
+ 	provider.data = priv;
+ 	return 0;
+@@ -470,6 +494,9 @@ int tsm_unregister(const struct tsm_ops *ops)
+ 	guard(rwsem_write)(&tsm_rwsem);
+ 	if (ops != provider.ops)
+ 		return -EBUSY;
++	if (atomic_read(&provider.count))
++		pr_warn("\"%s\" unregistered with items present in configfs/tsm/report\n",
++			provider.ops->name);
+ 	provider.ops = NULL;
+ 	provider.data = NULL;
+ 	return 0;
+diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
+index d708c091bf1b1e..180526220d8c42 100644
+--- a/drivers/watchdog/da9052_wdt.c
++++ b/drivers/watchdog/da9052_wdt.c
+@@ -164,6 +164,7 @@ static int da9052_wdt_probe(struct platform_device *pdev)
+ 	da9052_wdt = &driver_data->wdt;
+ 
+ 	da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
++	da9052_wdt->min_hw_heartbeat_ms = DA9052_TWDMIN;
+ 	da9052_wdt->info = &da9052_wdt_info;
+ 	da9052_wdt->ops = &da9052_wdt_ops;
+ 	da9052_wdt->parent = dev;
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index c2a9e2cc03de93..6828a2cff02c8a 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -396,6 +396,15 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+ 		struct page **pages;
+ 		size_t page_off;
+ 
++		/*
++		 * FIXME: io_iter.count needs to be corrected to aligned
++		 * length. Otherwise, iov_iter_get_pages_alloc2() operates
++		 * with the initial unaligned length value. As a result,
++		 * ceph_msg_data_cursor_init() triggers BUG_ON() in the case
++		 * if msg->sparse_read_total > msg->data_length.
++		 */
++		subreq->io_iter.count = len;
++
+ 		err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off);
+ 		if (err < 0) {
+ 			doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index c235f9a60394c2..b61074b377ac58 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -1227,6 +1227,7 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc)
+ 	s->s_time_min = 0;
+ 	s->s_time_max = U32_MAX;
+ 	s->s_flags |= SB_NODIRATIME | SB_NOATIME;
++	s->s_magic = CEPH_SUPER_MAGIC;
+ 
+ 	ceph_fscrypt_set_ops(s);
+ 
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 43d6bde1adcc04..e5b6a427f31cd9 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -600,7 +600,7 @@ static int populate_attrs(struct config_item *item)
+ 				break;
+ 		}
+ 	}
+-	if (t->ct_bin_attrs) {
++	if (!error && t->ct_bin_attrs) {
+ 		for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) {
+ 			if (ops && ops->is_bin_visible && !ops->is_bin_visible(item, bin_attr, i))
+ 				continue;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index c2e6989a568c2a..e94df69ee2e0d2 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3369,6 +3369,13 @@ static inline unsigned int ext4_flex_bg_size(struct ext4_sb_info *sbi)
+ 	return 1 << sbi->s_log_groups_per_flex;
+ }
+ 
++static inline loff_t ext4_get_maxbytes(struct inode *inode)
++{
++	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++		return inode->i_sb->s_maxbytes;
++	return EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
++}
++
+ #define ext4_std_error(sb, errno)				\
+ do {								\
+ 	if ((errno))						\
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index ba3419958a8320..b16d72275e1054 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2396,18 +2396,19 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
+ int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
+ {
+ 	int index;
+-	int depth;
+ 
+ 	/* If we are converting the inline data, only one is needed here. */
+ 	if (ext4_has_inline_data(inode))
+ 		return 1;
+ 
+-	depth = ext_depth(inode);
+-
++	/*
++	 * Extent tree can change between the time we estimate credits and
++	 * the time we actually modify the tree. Assume the worst case.
++	 */
+ 	if (extents <= 1)
+-		index = depth * 2;
++		index = EXT4_MAX_EXTENT_DEPTH * 2;
+ 	else
+-		index = depth * 3;
++		index = EXT4_MAX_EXTENT_DEPTH * 3;
+ 
+ 	return index;
+ }
+@@ -4976,12 +4977,7 @@ static const struct iomap_ops ext4_iomap_xattr_ops = {
+ 
+ static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
+ {
+-	u64 maxbytes;
+-
+-	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+-		maxbytes = inode->i_sb->s_maxbytes;
+-	else
+-		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
++	u64 maxbytes = ext4_get_maxbytes(inode);
+ 
+ 	if (*len == 0)
+ 		return -EINVAL;
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index f14aed14b9cf3d..6c692151b0d6c5 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -896,12 +896,7 @@ static int ext4_file_open(struct inode *inode, struct file *filp)
+ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
+ {
+ 	struct inode *inode = file->f_mapping->host;
+-	loff_t maxbytes;
+-
+-	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+-		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
+-	else
+-		maxbytes = inode->i_sb->s_maxbytes;
++	loff_t maxbytes = ext4_get_maxbytes(inode);
+ 
+ 	switch (whence) {
+ 	default:
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 3536ca7e4fccab..05b148d6fc7114 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -392,7 +392,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+ }
+ 
+ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+-				    unsigned int len)
++				    loff_t len)
+ {
+ 	int ret, size, no_expand;
+ 	struct ext4_inode_info *ei = EXT4_I(inode);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 38fe9a213d09b7..f769f5cb6deb78 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1000,7 +1000,12 @@ int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
+  */
+ static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
+ {
+-	folio_mark_dirty(bh->b_folio);
++	struct folio *folio = bh->b_folio;
++	struct inode *inode = folio->mapping->host;
++
++	/* only regular files have a_ops */
++	if (S_ISREG(inode->i_mode))
++		folio_mark_dirty(folio);
+ 	return ext4_handle_dirty_metadata(handle, NULL, bh);
+ }
+ 
+@@ -4928,7 +4933,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 		ei->i_file_acl |=
+ 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
+ 	inode->i_size = ext4_isize(sb, raw_inode);
+-	if ((size = i_size_read(inode)) < 0) {
++	size = i_size_read(inode);
++	if (size < 0 || size > ext4_get_maxbytes(inode)) {
+ 		ext4_error_inode(inode, function, line, 0,
+ 				 "iget: bad i_size value: %lld", size);
+ 		ret = -EFSCORRUPTED;
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 7f26440e8595a1..b05bb7bfa14c5d 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -178,8 +178,7 @@ void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
+ #ifdef CONFIG_F2FS_FS_LZO
+ static int lzo_init_compress_ctx(struct compress_ctx *cc)
+ {
+-	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+-				LZO1X_MEM_COMPRESS, GFP_NOFS);
++	cc->private = f2fs_vmalloc(LZO1X_MEM_COMPRESS);
+ 	if (!cc->private)
+ 		return -ENOMEM;
+ 
+@@ -189,7 +188,7 @@ static int lzo_init_compress_ctx(struct compress_ctx *cc)
+ 
+ static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
+ {
+-	kvfree(cc->private);
++	vfree(cc->private);
+ 	cc->private = NULL;
+ }
+ 
+@@ -246,7 +245,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
+ 		size = LZ4HC_MEM_COMPRESS;
+ #endif
+ 
+-	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
++	cc->private = f2fs_vmalloc(size);
+ 	if (!cc->private)
+ 		return -ENOMEM;
+ 
+@@ -261,7 +260,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
+ 
+ static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
+ {
+-	kvfree(cc->private);
++	vfree(cc->private);
+ 	cc->private = NULL;
+ }
+ 
+@@ -342,8 +341,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
+ 	params = zstd_get_params(level, cc->rlen);
+ 	workspace_size = zstd_cstream_workspace_bound(&params.cParams);
+ 
+-	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+-					workspace_size, GFP_NOFS);
++	workspace = f2fs_vmalloc(workspace_size);
+ 	if (!workspace)
+ 		return -ENOMEM;
+ 
+@@ -351,7 +349,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
+ 	if (!stream) {
+ 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
+ 				"%s zstd_init_cstream failed", __func__);
+-		kvfree(workspace);
++		vfree(workspace);
+ 		return -EIO;
+ 	}
+ 
+@@ -364,7 +362,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
+ 
+ static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
+ {
+-	kvfree(cc->private);
++	vfree(cc->private);
+ 	cc->private = NULL;
+ 	cc->private2 = NULL;
+ }
+@@ -423,8 +421,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
+ 
+ 	workspace_size = zstd_dstream_workspace_bound(max_window_size);
+ 
+-	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
+-					workspace_size, GFP_NOFS);
++	workspace = f2fs_vmalloc(workspace_size);
+ 	if (!workspace)
+ 		return -ENOMEM;
+ 
+@@ -432,7 +429,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
+ 	if (!stream) {
+ 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ 				"%s zstd_init_dstream failed", __func__);
+-		kvfree(workspace);
++		vfree(workspace);
+ 		return -EIO;
+ 	}
+ 
+@@ -444,7 +441,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
+ 
+ static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
+ {
+-	kvfree(dic->private);
++	vfree(dic->private);
+ 	dic->private = NULL;
+ 	dic->private2 = NULL;
+ }
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 1219e37fa7ad3c..61b715cc2e231b 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3494,6 +3494,11 @@ static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
+ 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
+ }
+ 
++static inline void *f2fs_vmalloc(size_t size)
++{
++	return vmalloc(size);
++}
++
+ static inline int get_extra_isize(struct inode *inode)
+ {
+ 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 1061991434b119..06688b9957c81f 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -34,7 +34,9 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
+ 	if (f2fs_inode_dirtied(inode, sync))
+ 		return;
+ 
+-	if (f2fs_is_atomic_file(inode))
++	/* only atomic file w/ FI_ATOMIC_COMMITTED can be set vfs dirty */
++	if (f2fs_is_atomic_file(inode) &&
++			!is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
+ 		return;
+ 
+ 	mark_inode_dirty_sync(inode);
+@@ -286,6 +288,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 		return false;
+ 	}
+ 
++	if (ino_of_node(node_page) == fi->i_xattr_nid) {
++		f2fs_warn(sbi, "%s: corrupted inode i_ino=%lx, xnid=%x, run fsck to fix.",
++			  __func__, inode->i_ino, fi->i_xattr_nid);
++		return false;
++	}
++
+ 	if (f2fs_has_extra_attr(inode)) {
+ 		if (!f2fs_sb_has_extra_attr(sbi)) {
+ 			f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 6f70f377f12115..781b872fac8c2e 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -560,6 +560,15 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
+ 		goto fail;
+ 	}
+ 
++	if (unlikely(inode->i_nlink == 0)) {
++		f2fs_warn(F2FS_I_SB(inode), "%s: inode (ino=%lx) has zero i_nlink",
++			  __func__, inode->i_ino);
++		err = -EFSCORRUPTED;
++		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
++		f2fs_put_page(page, 0);
++		goto fail;
++	}
++
+ 	f2fs_balance_fs(sbi, true);
+ 
+ 	f2fs_lock_op(sbi);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index b9ffb2ee9548ae..449c0acbfabc03 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -370,7 +370,13 @@ static int __f2fs_commit_atomic_write(struct inode *inode)
+ 	} else {
+ 		sbi->committed_atomic_block += fi->atomic_write_cnt;
+ 		set_inode_flag(inode, FI_ATOMIC_COMMITTED);
++
++		/*
++		 * inode may has no FI_ATOMIC_DIRTIED flag due to no write
++		 * before commit.
++		 */
+ 		if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
++			/* clear atomic dirty status and set vfs dirty status */
+ 			clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ 			f2fs_mark_inode_dirty_sync(inode, true);
+ 		}
+@@ -2772,7 +2778,11 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
+ 	}
+ got_it:
+ 	/* set it as dirty segment in free segmap */
+-	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
++	if (test_bit(segno, free_i->free_segmap)) {
++		ret = -EFSCORRUPTED;
++		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_CORRUPTED_FREE_BITMAP);
++		goto out_unlock;
++	}
+ 
+ 	/* no free section in conventional zone */
+ 	if (new_sec && pinning &&
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index faa76531246ebb..330f89ddb5c8f3 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1516,7 +1516,9 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync)
+ 	}
+ 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ 
+-	if (!ret && f2fs_is_atomic_file(inode))
++	/* if atomic write is not committed, set inode w/ atomic dirty */
++	if (!ret && f2fs_is_atomic_file(inode) &&
++			!is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
+ 		set_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ 
+ 	return ret;
+@@ -3659,6 +3661,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 	block_t user_block_count, valid_user_blocks;
+ 	block_t avail_node_count, valid_node_count;
+ 	unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
++	unsigned int sit_blk_cnt;
+ 	int i, j;
+ 
+ 	total = le32_to_cpu(raw_super->segment_count);
+@@ -3770,6 +3773,13 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 		return 1;
+ 	}
+ 
++	sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK);
++	if (sit_bitmap_size * 8 < sit_blk_cnt) {
++		f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u",
++			 sit_bitmap_size, sit_blk_cnt);
++		return 1;
++	}
++
+ 	cp_pack_start_sum = __start_sum_addr(sbi);
+ 	cp_payload = __cp_payload(sbi);
+ 	if (cp_pack_start_sum < cp_payload + 1 ||
+diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
+index fa5134df985f75..9e27dd8bef88d8 100644
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -975,14 +975,15 @@ static int control_mount(struct gfs2_sbd *sdp)
+ 		if (sdp->sd_args.ar_spectator) {
+ 			fs_info(sdp, "Recovery is required. Waiting for a "
+ 				"non-spectator to mount.\n");
++			spin_unlock(&ls->ls_recover_spin);
+ 			msleep_interruptible(1000);
+ 		} else {
+ 			fs_info(sdp, "control_mount wait1 block %u start %u "
+ 				"mount %u lvb %u flags %lx\n", block_gen,
+ 				start_gen, mount_gen, lvb_gen,
+ 				ls->ls_recover_flags);
++			spin_unlock(&ls->ls_recover_spin);
+ 		}
+-		spin_unlock(&ls->ls_recover_spin);
+ 		goto restart;
+ 	}
+ 
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index 47038e6608123c..d5da9817df9b36 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -1275,6 +1275,7 @@ static int isofs_read_inode(struct inode *inode, int relocated)
+ 	unsigned long offset;
+ 	struct iso_inode_info *ei = ISOFS_I(inode);
+ 	int ret = -EIO;
++	struct timespec64 ts;
+ 
+ 	block = ei->i_iget5_block;
+ 	bh = sb_bread(inode->i_sb, block);
+@@ -1387,8 +1388,10 @@ static int isofs_read_inode(struct inode *inode, int relocated)
+ 			inode->i_ino, de->flags[-high_sierra]);
+ 	}
+ #endif
+-	inode_set_mtime_to_ts(inode,
+-			      inode_set_atime_to_ts(inode, inode_set_ctime(inode, iso_date(de->date, high_sierra), 0)));
++	ts = iso_date(de->date, high_sierra ? ISO_DATE_HIGH_SIERRA : 0);
++	inode_set_ctime_to_ts(inode, ts);
++	inode_set_atime_to_ts(inode, ts);
++	inode_set_mtime_to_ts(inode, ts);
+ 
+ 	ei->i_first_extent = (isonum_733(de->extent) +
+ 			isonum_711(de->ext_attr_length));
+diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
+index 2d55207c9a9902..50655583753334 100644
+--- a/fs/isofs/isofs.h
++++ b/fs/isofs/isofs.h
+@@ -106,7 +106,9 @@ static inline unsigned int isonum_733(u8 *p)
+ 	/* Ignore bigendian datum due to broken mastering programs */
+ 	return get_unaligned_le32(p);
+ }
+-extern int iso_date(u8 *, int);
++#define ISO_DATE_HIGH_SIERRA (1 << 0)
++#define ISO_DATE_LONG_FORM (1 << 1)
++struct timespec64 iso_date(u8 *p, int flags);
+ 
+ struct inode;		/* To make gcc happy */
+ 
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index dbf911126e610e..576498245b9d7c 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -412,7 +412,12 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
+ 				}
+ 			}
+ 			break;
+-		case SIG('T', 'F'):
++		case SIG('T', 'F'): {
++			int flags, size, slen;
++
++			flags = rr->u.TF.flags & TF_LONG_FORM ? ISO_DATE_LONG_FORM : 0;
++			size = rr->u.TF.flags & TF_LONG_FORM ? 17 : 7;
++			slen = rr->len - 5;
+ 			/*
+ 			 * Some RRIP writers incorrectly place ctime in the
+ 			 * TF_CREATE field. Try to handle this correctly for
+@@ -420,27 +425,28 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
+ 			 */
+ 			/* Rock ridge never appears on a High Sierra disk */
+ 			cnt = 0;
+-			if (rr->u.TF.flags & TF_CREATE) {
+-				inode_set_ctime(inode,
+-						iso_date(rr->u.TF.times[cnt++].time, 0),
+-						0);
++			if ((rr->u.TF.flags & TF_CREATE) && size <= slen) {
++				inode_set_ctime_to_ts(inode,
++						iso_date(rr->u.TF.data + size * cnt++, flags));
++				slen -= size;
+ 			}
+-			if (rr->u.TF.flags & TF_MODIFY) {
+-				inode_set_mtime(inode,
+-						iso_date(rr->u.TF.times[cnt++].time, 0),
+-						0);
++			if ((rr->u.TF.flags & TF_MODIFY) && size <= slen) {
++				inode_set_mtime_to_ts(inode,
++						iso_date(rr->u.TF.data + size * cnt++, flags));
++				slen -= size;
+ 			}
+-			if (rr->u.TF.flags & TF_ACCESS) {
+-				inode_set_atime(inode,
+-						iso_date(rr->u.TF.times[cnt++].time, 0),
+-						0);
++			if ((rr->u.TF.flags & TF_ACCESS) && size <= slen) {
++				inode_set_atime_to_ts(inode,
++						iso_date(rr->u.TF.data + size * cnt++, flags));
++				slen -= size;
+ 			}
+-			if (rr->u.TF.flags & TF_ATTRIBUTES) {
+-				inode_set_ctime(inode,
+-						iso_date(rr->u.TF.times[cnt++].time, 0),
+-						0);
++			if ((rr->u.TF.flags & TF_ATTRIBUTES) && size <= slen) {
++				inode_set_ctime_to_ts(inode,
++						iso_date(rr->u.TF.data + size * cnt++, flags));
++				slen -= size;
+ 			}
+ 			break;
++		}
+ 		case SIG('S', 'L'):
+ 			{
+ 				int slen;
+diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
+index 7755e587f77850..c0856fa9bb6a4e 100644
+--- a/fs/isofs/rock.h
++++ b/fs/isofs/rock.h
+@@ -65,13 +65,9 @@ struct RR_PL_s {
+ 	__u8 location[8];
+ };
+ 
+-struct stamp {
+-	__u8 time[7];		/* actually 6 unsigned, 1 signed */
+-} __attribute__ ((packed));
+-
+ struct RR_TF_s {
+ 	__u8 flags;
+-	struct stamp times[];	/* Variable number of these beasts */
++	__u8 data[];
+ } __attribute__ ((packed));
+ 
+ /* Linux-specific extension for transparent decompression */
+diff --git a/fs/isofs/util.c b/fs/isofs/util.c
+index e88dba72166187..42f479da0b282c 100644
+--- a/fs/isofs/util.c
++++ b/fs/isofs/util.c
+@@ -16,29 +16,44 @@
+  * to GMT.  Thus  we should always be correct.
+  */
+ 
+-int iso_date(u8 *p, int flag)
++struct timespec64 iso_date(u8 *p, int flags)
+ {
+ 	int year, month, day, hour, minute, second, tz;
+-	int crtime;
++	struct timespec64 ts;
++
++	if (flags & ISO_DATE_LONG_FORM) {
++		year = (p[0] - '0') * 1000 +
++		       (p[1] - '0') * 100 +
++		       (p[2] - '0') * 10 +
++		       (p[3] - '0') - 1900;
++		month = ((p[4] - '0') * 10 + (p[5] - '0'));
++		day = ((p[6] - '0') * 10 + (p[7] - '0'));
++		hour = ((p[8] - '0') * 10 + (p[9] - '0'));
++		minute = ((p[10] - '0') * 10 + (p[11] - '0'));
++		second = ((p[12] - '0') * 10 + (p[13] - '0'));
++		ts.tv_nsec = ((p[14] - '0') * 10 + (p[15] - '0')) * 10000000;
++		tz = p[16];
++	} else {
++		year = p[0];
++		month = p[1];
++		day = p[2];
++		hour = p[3];
++		minute = p[4];
++		second = p[5];
++		ts.tv_nsec = 0;
++		/* High sierra has no time zone */
++		tz = flags & ISO_DATE_HIGH_SIERRA ? 0 : p[6];
++	}
+ 
+-	year = p[0];
+-	month = p[1];
+-	day = p[2];
+-	hour = p[3];
+-	minute = p[4];
+-	second = p[5];
+-	if (flag == 0) tz = p[6]; /* High sierra has no time zone */
+-	else tz = 0;
+-	
+ 	if (year < 0) {
+-		crtime = 0;
++		ts.tv_sec = 0;
+ 	} else {
+-		crtime = mktime64(year+1900, month, day, hour, minute, second);
++		ts.tv_sec = mktime64(year+1900, month, day, hour, minute, second);
+ 
+ 		/* sign extend */
+ 		if (tz & 0x80)
+ 			tz |= (-1 << 8);
+-		
++
+ 		/* 
+ 		 * The timezone offset is unreliable on some disks,
+ 		 * so we make a sanity check.  In no case is it ever
+@@ -65,7 +80,7 @@ int iso_date(u8 *p, int flag)
+ 		 * for pointing out the sign error.
+ 		 */
+ 		if (-52 <= tz && tz <= 52)
+-			crtime -= tz * 15 * 60;
++			ts.tv_sec -= tz * 15 * 60;
+ 	}
+-	return crtime;
+-}		
++	return ts;
++}
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 66513c18ca294b..f440110df93a9b 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1513,7 +1513,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ 				jh->b_next_transaction == transaction);
+ 		spin_unlock(&jh->b_state_lock);
+ 	}
+-	if (jh->b_modified == 1) {
++	if (data_race(jh->b_modified == 1)) {
+ 		/* If it's in our transaction it must be in BJ_Metadata list. */
+ 		if (data_race(jh->b_transaction == transaction &&
+ 		    jh->b_jlist != BJ_Metadata)) {
+@@ -1532,7 +1532,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ 		goto out;
+ 	}
+ 
+-	journal = transaction->t_journal;
+ 	spin_lock(&jh->b_state_lock);
+ 
+ 	if (is_handle_aborted(handle)) {
+@@ -1547,6 +1546,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ 		goto out_unlock_bh;
+ 	}
+ 
++	journal = transaction->t_journal;
++
+ 	if (jh->b_modified == 0) {
+ 		/*
+ 		 * This buffer's got modified and becoming part
+diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
+index ef3a1e1b6cb065..fda9f4d6093f94 100644
+--- a/fs/jffs2/erase.c
++++ b/fs/jffs2/erase.c
+@@ -425,7 +425,9 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
+ 			.totlen =	cpu_to_je32(c->cleanmarker_size)
+ 		};
+ 
+-		jffs2_prealloc_raw_node_refs(c, jeb, 1);
++		ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
++		if (ret)
++			goto filebad;
+ 
+ 		marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
+ 
+diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
+index 29671e33a1714c..62879c218d4b11 100644
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -256,7 +256,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
+ 
+ 		jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
+ 			  __func__, skip);
+-		jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
++		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
++		if (ret)
++			goto out;
+ 		jffs2_scan_dirty_space(c, c->nextblock, skip);
+ 	}
+ #endif
+diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
+index 4fe64519870f1a..d83372d3e1a07b 100644
+--- a/fs/jffs2/summary.c
++++ b/fs/jffs2/summary.c
+@@ -858,7 +858,10 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
+ 	spin_unlock(&c->erase_completion_lock);
+ 
+ 	jeb = c->nextblock;
+-	jffs2_prealloc_raw_node_refs(c, jeb, 1);
++	ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
++
++	if (ret)
++		goto out;
+ 
+ 	if (!c->summary->sum_num || !c->summary->sum_list_head) {
+ 		JFFS2_WARNING("Empty summary info!!!\n");
+@@ -872,6 +875,8 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
+ 	datasize += padsize;
+ 
+ 	ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
++
++out:
+ 	spin_lock(&c->erase_completion_lock);
+ 	return ret;
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 11f2b5cb3b06b2..57d49e874f51fa 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3957,8 +3957,9 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
+ 		     FATTR4_WORD0_CASE_INSENSITIVE |
+ 		     FATTR4_WORD0_CASE_PRESERVING;
+ 	if (minorversion)
+-		bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT |
+-			     FATTR4_WORD2_OPEN_ARGUMENTS;
++		bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
++	if (minorversion > 1)
++		bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS;
+ 
+ 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+ 	if (status == 0) {
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 81bd1b9aba176f..3c1fa320b3f1bd 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -56,7 +56,8 @@ static int nfs_return_empty_folio(struct folio *folio)
+ {
+ 	folio_zero_segment(folio, 0, folio_size(folio));
+ 	folio_mark_uptodate(folio);
+-	folio_unlock(folio);
++	if (nfs_netfs_folio_unlock(folio))
++		folio_unlock(folio);
+ 	return 0;
+ }
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 7a1fdafa42ea17..02c9f3b312a0e8 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -3658,7 +3658,8 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
+ 	struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
+ 	u32 opiter;
+ 
+-	if (!cstate->minorversion)
++	if (rqstp->rq_procinfo != &nfsd_version4.vs_proc[NFSPROC4_COMPOUND] ||
++	    cstate->minorversion == 0)
+ 		return false;
+ 
+ 	if (cstate->spo_must_allowed)
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 2fc1919dd3c09f..6edeb3bdf81b50 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3382,6 +3382,23 @@ static __be32 nfsd4_encode_fattr4_suppattr_exclcreat(struct xdr_stream *xdr,
+ 	return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]);
+ }
+ 
++/*
++ * Copied from generic_remap_checks/generic_remap_file_range_prep.
++ *
++ * These generic functions use the file system's s_blocksize, but
++ * individual file systems aren't required to use
++ * generic_remap_file_range_prep. Until there is a mechanism for
++ * determining a particular file system's (or file's) clone block
++ * size, this is the best NFSD can do.
++ */
++static __be32 nfsd4_encode_fattr4_clone_blksize(struct xdr_stream *xdr,
++						const struct nfsd4_fattr_args *args)
++{
++	struct inode *inode = d_inode(args->dentry);
++
++	return nfsd4_encode_uint32_t(xdr, inode->i_sb->s_blocksize);
++}
++
+ #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ static __be32 nfsd4_encode_fattr4_sec_label(struct xdr_stream *xdr,
+ 					    const struct nfsd4_fattr_args *args)
+@@ -3487,7 +3504,7 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
+ 	[FATTR4_MODE_SET_MASKED]	= nfsd4_encode_fattr4__noop,
+ 	[FATTR4_SUPPATTR_EXCLCREAT]	= nfsd4_encode_fattr4_suppattr_exclcreat,
+ 	[FATTR4_FS_CHARSET_CAP]		= nfsd4_encode_fattr4__noop,
+-	[FATTR4_CLONE_BLKSIZE]		= nfsd4_encode_fattr4__noop,
++	[FATTR4_CLONE_BLKSIZE]		= nfsd4_encode_fattr4_clone_blksize,
+ 	[FATTR4_SPACE_FREED]		= nfsd4_encode_fattr4__noop,
+ 	[FATTR4_CHANGE_ATTR_TYPE]	= nfsd4_encode_fattr4__noop,
+ 
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 2e835e7c107ee0..dcaa31706394cf 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1653,7 +1653,7 @@ int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
+  */
+ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
+ {
+-	int *nthreads, count = 0, nrpools, i, ret = -EOPNOTSUPP, rem;
++	int *nthreads, nrpools = 0, i, ret = -EOPNOTSUPP, rem;
+ 	struct net *net = genl_info_net(info);
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 	const struct nlattr *attr;
+@@ -1665,12 +1665,11 @@ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
+ 	/* count number of SERVER_THREADS values */
+ 	nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ 		if (nla_type(attr) == NFSD_A_SERVER_THREADS)
+-			count++;
++			nrpools++;
+ 	}
+ 
+ 	mutex_lock(&nfsd_mutex);
+ 
+-	nrpools = max(count, nfsd_nrpools(net));
+ 	nthreads = kcalloc(nrpools, sizeof(int), GFP_KERNEL);
+ 	if (!nthreads) {
+ 		ret = -ENOMEM;
+@@ -2331,12 +2330,9 @@ static int __init init_nfsd(void)
+ 	if (retval)
+ 		goto out_free_pnfs;
+ 	nfsd_lockd_init();	/* lockd->nfsd callbacks */
+-	retval = create_proc_exports_entry();
+-	if (retval)
+-		goto out_free_lockd;
+ 	retval = register_pernet_subsys(&nfsd_net_ops);
+ 	if (retval < 0)
+-		goto out_free_exports;
++		goto out_free_lockd;
+ 	retval = register_cld_notifier();
+ 	if (retval)
+ 		goto out_free_subsys;
+@@ -2345,22 +2341,26 @@ static int __init init_nfsd(void)
+ 		goto out_free_cld;
+ 	retval = register_filesystem(&nfsd_fs_type);
+ 	if (retval)
+-		goto out_free_all;
++		goto out_free_nfsd4;
+ 	retval = genl_register_family(&nfsd_nl_family);
++	if (retval)
++		goto out_free_filesystem;
++	retval = create_proc_exports_entry();
+ 	if (retval)
+ 		goto out_free_all;
+ 	nfsd_localio_ops_init();
+ 
+ 	return 0;
+ out_free_all:
++	genl_unregister_family(&nfsd_nl_family);
++out_free_filesystem:
++	unregister_filesystem(&nfsd_fs_type);
++out_free_nfsd4:
+ 	nfsd4_destroy_laundry_wq();
+ out_free_cld:
+ 	unregister_cld_notifier();
+ out_free_subsys:
+ 	unregister_pernet_subsys(&nfsd_net_ops);
+-out_free_exports:
+-	remove_proc_entry("fs/nfs/exports", NULL);
+-	remove_proc_entry("fs/nfs", NULL);
+ out_free_lockd:
+ 	nfsd_lockd_shutdown();
+ 	nfsd_drc_slab_free();
+@@ -2373,14 +2373,14 @@ static int __init init_nfsd(void)
+ 
+ static void __exit exit_nfsd(void)
+ {
++	remove_proc_entry("fs/nfs/exports", NULL);
++	remove_proc_entry("fs/nfs", NULL);
+ 	genl_unregister_family(&nfsd_nl_family);
+ 	unregister_filesystem(&nfsd_fs_type);
+ 	nfsd4_destroy_laundry_wq();
+ 	unregister_cld_notifier();
+ 	unregister_pernet_subsys(&nfsd_net_ops);
+ 	nfsd_drc_slab_free();
+-	remove_proc_entry("fs/nfs/exports", NULL);
+-	remove_proc_entry("fs/nfs", NULL);
+ 	nfsd_lockd_shutdown();
+ 	nfsd4_free_slabs();
+ 	nfsd4_exit_pnfs();
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 49e2f32102ab59..45f1bb2c6f136d 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -406,13 +406,13 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
+ 	if (ret)
+ 		goto out_filecache;
+ 
++#ifdef CONFIG_NFSD_V4_2_INTER_SSC
++	nfsd4_ssc_init_umount_work(nn);
++#endif
+ 	ret = nfs4_state_start_net(net);
+ 	if (ret)
+ 		goto out_reply_cache;
+ 
+-#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+-	nfsd4_ssc_init_umount_work(nn);
+-#endif
+ 	nn->nfsd_net_up = true;
+ 	return 0;
+ 
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index 4444c78e2e0c34..94095058da34ec 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -48,8 +48,8 @@ static struct file *ovl_open_realfile(const struct file *file,
+ 		if (!inode_owner_or_capable(real_idmap, realinode))
+ 			flags &= ~O_NOATIME;
+ 
+-		realfile = backing_file_open(&file->f_path, flags, realpath,
+-					     current_cred());
++		realfile = backing_file_open(file_user_path((struct file *) file),
++					     flags, realpath, current_cred());
+ 	}
+ 	revert_creds(old_cred);
+ 
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 74979466729535..d64742ba371aa5 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -484,8 +484,17 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
+ 		spin_lock(&cfids->cfid_list_lock);
+ 		list_for_each_entry(cfid, &cfids->entries, entry) {
+ 			tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
+-			if (tmp_list == NULL)
+-				break;
++			if (tmp_list == NULL) {
++				/*
++				 * If the malloc() fails, we won't drop all
++				 * dentries, and unmounting is likely to trigger
++				 * a 'Dentry still in use' error.
++				 */
++				cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n");
++				spin_unlock(&cfids->cfid_list_lock);
++				spin_unlock(&cifs_sb->tlink_tree_lock);
++				goto done;
++			}
+ 			spin_lock(&cfid->fid_lock);
+ 			tmp_list->dentry = cfid->dentry;
+ 			cfid->dentry = NULL;
+@@ -497,6 +506,7 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
+ 	}
+ 	spin_unlock(&cifs_sb->tlink_tree_lock);
+ 
++done:
+ 	list_for_each_entry_safe(tmp_list, q, &entry, entry) {
+ 		list_del(&tmp_list->entry);
+ 		dput(tmp_list->dentry);
+diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
+index 1dfe79d947a62f..bc8a812ff95f8b 100644
+--- a/fs/smb/client/cached_dir.h
++++ b/fs/smb/client/cached_dir.h
+@@ -21,10 +21,10 @@ struct cached_dirent {
+ struct cached_dirents {
+ 	bool is_valid:1;
+ 	bool is_failed:1;
+-	struct dir_context *ctx; /*
+-				  * Only used to make sure we only take entries
+-				  * from a single context. Never dereferenced.
+-				  */
++	struct file *file; /*
++			    * Used to associate the cache with a single
++			    * open file instance.
++			    */
+ 	struct mutex de_mutex;
+ 	int pos;		 /* Expected ctx->pos */
+ 	struct list_head entries;
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index a38b40d68b14f1..e0faee22be07e8 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1058,6 +1058,7 @@ struct cifs_chan {
+ };
+ 
+ #define CIFS_SES_FLAG_SCALE_CHANNELS (0x1)
++#define CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES (0x2)
+ 
+ /*
+  * Session structure.  One of these for each uid session with a particular host
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 8260d0e07a6283..91f5fd818cbf4a 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -132,13 +132,9 @@ static void smb2_query_server_interfaces(struct work_struct *work)
+ 	rc = server->ops->query_server_interfaces(xid, tcon, false);
+ 	free_xid(xid);
+ 
+-	if (rc) {
+-		if (rc == -EOPNOTSUPP)
+-			return;
+-
++	if (rc)
+ 		cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
+ 				__func__, rc);
+-	}
+ 
+ 	queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+ 			   (SMB_INTERFACE_POLL_INTERVAL * HZ));
+@@ -393,6 +389,13 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
+ 	if (!cifs_tcp_ses_needs_reconnect(server, 1))
+ 		return 0;
+ 
++	/*
++	 * if smb session has been marked for reconnect, also reconnect all
++	 * connections. This way, the other connections do not end up bad.
++	 */
++	if (mark_smb_session)
++		cifs_signal_cifsd_for_reconnect(server, mark_smb_session);
++
+ 	cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+ 
+ 	cifs_abort_connection(server);
+@@ -401,7 +404,8 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
+ 		try_to_freeze();
+ 		cifs_server_lock(server);
+ 
+-		if (!cifs_swn_set_server_dstaddr(server)) {
++		if (!cifs_swn_set_server_dstaddr(server) &&
++		    !SERVER_IS_CHAN(server)) {
+ 			/* resolve the hostname again to make sure that IP address is up-to-date */
+ 			rc = reconn_set_ipaddr_from_hostname(server);
+ 			cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
+@@ -3989,6 +3993,7 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+ 		return 0;
+ 	}
+ 
++	server->lstrp = jiffies;
+ 	server->tcpStatus = CifsInNegotiate;
+ 	spin_unlock(&server->srv_lock);
+ 
+diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
+index e3f9213131c467..a6655807c0865a 100644
+--- a/fs/smb/client/namespace.c
++++ b/fs/smb/client/namespace.c
+@@ -146,6 +146,9 @@ static char *automount_fullpath(struct dentry *dentry, void *page)
+ 	}
+ 	spin_unlock(&tcon->tc_lock);
+ 
++	if (unlikely(!page))
++		return ERR_PTR(-ENOMEM);
++
+ 	s = dentry_path_raw(dentry, page, PATH_MAX);
+ 	if (IS_ERR(s))
+ 		return s;
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index 787d6bcb5d1dc4..c3feb26fcfd03a 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -850,9 +850,9 @@ static bool emit_cached_dirents(struct cached_dirents *cde,
+ }
+ 
+ static void update_cached_dirents_count(struct cached_dirents *cde,
+-					struct dir_context *ctx)
++					struct file *file)
+ {
+-	if (cde->ctx != ctx)
++	if (cde->file != file)
+ 		return;
+ 	if (cde->is_valid || cde->is_failed)
+ 		return;
+@@ -861,9 +861,9 @@ static void update_cached_dirents_count(struct cached_dirents *cde,
+ }
+ 
+ static void finished_cached_dirents_count(struct cached_dirents *cde,
+-					struct dir_context *ctx)
++					struct dir_context *ctx, struct file *file)
+ {
+-	if (cde->ctx != ctx)
++	if (cde->file != file)
+ 		return;
+ 	if (cde->is_valid || cde->is_failed)
+ 		return;
+@@ -876,11 +876,12 @@ static void finished_cached_dirents_count(struct cached_dirents *cde,
+ static void add_cached_dirent(struct cached_dirents *cde,
+ 			      struct dir_context *ctx,
+ 			      const char *name, int namelen,
+-			      struct cifs_fattr *fattr)
++			      struct cifs_fattr *fattr,
++				  struct file *file)
+ {
+ 	struct cached_dirent *de;
+ 
+-	if (cde->ctx != ctx)
++	if (cde->file != file)
+ 		return;
+ 	if (cde->is_valid || cde->is_failed)
+ 		return;
+@@ -910,7 +911,8 @@ static void add_cached_dirent(struct cached_dirents *cde,
+ static bool cifs_dir_emit(struct dir_context *ctx,
+ 			  const char *name, int namelen,
+ 			  struct cifs_fattr *fattr,
+-			  struct cached_fid *cfid)
++			  struct cached_fid *cfid,
++			  struct file *file)
+ {
+ 	bool rc;
+ 	ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
+@@ -922,7 +924,7 @@ static bool cifs_dir_emit(struct dir_context *ctx,
+ 	if (cfid) {
+ 		mutex_lock(&cfid->dirents.de_mutex);
+ 		add_cached_dirent(&cfid->dirents, ctx, name, namelen,
+-				  fattr);
++				  fattr, file);
+ 		mutex_unlock(&cfid->dirents.de_mutex);
+ 	}
+ 
+@@ -1022,7 +1024,7 @@ static int cifs_filldir(char *find_entry, struct file *file,
+ 	cifs_prime_dcache(file_dentry(file), &name, &fattr);
+ 
+ 	return !cifs_dir_emit(ctx, name.name, name.len,
+-			      &fattr, cfid);
++			      &fattr, cfid, file);
+ }
+ 
+ 
+@@ -1073,8 +1075,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
+ 	 * we need to initialize scanning and storing the
+ 	 * directory content.
+ 	 */
+-	if (ctx->pos == 0 && cfid->dirents.ctx == NULL) {
+-		cfid->dirents.ctx = ctx;
++	if (ctx->pos == 0 && cfid->dirents.file == NULL) {
++		cfid->dirents.file = file;
+ 		cfid->dirents.pos = 2;
+ 	}
+ 	/*
+@@ -1142,7 +1144,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
+ 	} else {
+ 		if (cfid) {
+ 			mutex_lock(&cfid->dirents.de_mutex);
+-			finished_cached_dirents_count(&cfid->dirents, ctx);
++			finished_cached_dirents_count(&cfid->dirents, ctx, file);
+ 			mutex_unlock(&cfid->dirents.de_mutex);
+ 		}
+ 		cifs_dbg(FYI, "Could not find entry\n");
+@@ -1183,7 +1185,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
+ 		ctx->pos++;
+ 		if (cfid) {
+ 			mutex_lock(&cfid->dirents.de_mutex);
+-			update_cached_dirents_count(&cfid->dirents, ctx);
++			update_cached_dirents_count(&cfid->dirents, file);
+ 			mutex_unlock(&cfid->dirents.de_mutex);
+ 		}
+ 
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+index b6556fe3dfa11a..4d45c31336df17 100644
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -738,7 +738,6 @@ static bool wsl_to_fattr(struct cifs_open_info_data *data,
+ 	if (!have_xattr_dev && (tag == IO_REPARSE_TAG_LX_CHR || tag == IO_REPARSE_TAG_LX_BLK))
+ 		return false;
+ 
+-	fattr->cf_dtype = S_DT(fattr->cf_mode);
+ 	return true;
+ }
+ 
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 9b32f7821b718f..10d82d0dc6a9ee 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -473,6 +473,10 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ 
+ 	ses->chans[chan_index].iface = iface;
+ 	spin_unlock(&ses->chan_lock);
++
++	spin_lock(&server->srv_lock);
++	memcpy(&server->dstaddr, &iface->sockaddr, sizeof(server->dstaddr));
++	spin_unlock(&server->srv_lock);
+ }
+ 
+ static int
+@@ -522,8 +526,7 @@ cifs_ses_add_channel(struct cifs_ses *ses,
+ 	ctx->domainauto = ses->domainAuto;
+ 	ctx->domainname = ses->domainName;
+ 
+-	/* no hostname for extra channels */
+-	ctx->server_hostname = "";
++	ctx->server_hostname = ses->server->hostname;
+ 
+ 	ctx->username = ses->user_name;
+ 	ctx->password = ses->password;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 176be478cd138f..c6ae395a46925c 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -428,14 +428,23 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	if (!rc &&
+ 	    (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
+ 	    server->ops->query_server_interfaces) {
+-		mutex_unlock(&ses->session_mutex);
+-
+ 		/*
+-		 * query server network interfaces, in case they change
++		 * query server network interfaces, in case they change.
++		 * Also mark the session as pending this update while the query
++		 * is in progress. This will be used to avoid calling
++		 * smb2_reconnect recursively.
+ 		 */
++		ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
+ 		xid = get_xid();
+ 		rc = server->ops->query_server_interfaces(xid, tcon, false);
+ 		free_xid(xid);
++		ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
++
++		/* regardless of rc value, setup polling */
++		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
++				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
++
++		mutex_unlock(&ses->session_mutex);
+ 
+ 		if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
+ 			/*
+@@ -455,11 +464,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		if (ses->chan_max > ses->chan_count &&
+ 		    ses->iface_count &&
+ 		    !SERVER_IS_CHAN(server)) {
+-			if (ses->chan_count == 1) {
++			if (ses->chan_count == 1)
+ 				cifs_server_dbg(VFS, "supports multichannel now\n");
+-				queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+-						 (SMB_INTERFACE_POLL_INTERVAL * HZ));
+-			}
+ 
+ 			cifs_try_adding_channels(ses);
+ 		}
+@@ -577,11 +583,18 @@ static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
+ 			       struct TCP_Server_Info *server,
+ 			       void **request_buf, unsigned int *total_len)
+ {
+-	/* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
+-	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
++	/*
++	 * Skip reconnect in one of the following cases:
++	 * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs
++	 * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from
++	 * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag)
++	 */
++	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO ||
++	    (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO &&
++	     (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES)))
+ 		return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
+ 					     request_buf, total_len);
+-	}
++
+ 	return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
+ 				   request_buf, total_len);
+ }
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index b0b7254661e926..9d8be034f103f2 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -2552,13 +2552,14 @@ static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
+ 		size_t fsize = folioq_folio_size(folioq, slot);
+ 
+ 		if (offset < fsize) {
+-			size_t part = umin(maxsize - ret, fsize - offset);
++			size_t part = umin(maxsize, fsize - offset);
+ 
+ 			if (!smb_set_sge(rdma, folio_page(folio, 0), offset, part))
+ 				return -EIO;
+ 
+ 			offset += part;
+ 			ret += part;
++			maxsize -= part;
+ 		}
+ 
+ 		if (offset >= fsize) {
+@@ -2573,7 +2574,7 @@ static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
+ 				slot = 0;
+ 			}
+ 		}
+-	} while (rdma->nr_sge < rdma->max_sge || maxsize > 0);
++	} while (rdma->nr_sge < rdma->max_sge && maxsize > 0);
+ 
+ 	iter->folioq = folioq;
+ 	iter->folioq_slot = slot;
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 9f13a705f7f676..35d18711879316 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -1029,14 +1029,16 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+ 	uint index = 0;
+ 	unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
+ 	struct TCP_Server_Info *server = NULL;
+-	int i;
++	int i, start, cur;
+ 
+ 	if (!ses)
+ 		return NULL;
+ 
+ 	spin_lock(&ses->chan_lock);
++	start = atomic_inc_return(&ses->chan_seq);
+ 	for (i = 0; i < ses->chan_count; i++) {
+-		server = ses->chans[i].server;
++		cur = (start + i) % ses->chan_count;
++		server = ses->chans[cur].server;
+ 		if (!server || server->terminate)
+ 			continue;
+ 
+@@ -1053,17 +1055,15 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+ 		 */
+ 		if (server->in_flight < min_in_flight) {
+ 			min_in_flight = server->in_flight;
+-			index = i;
++			index = cur;
+ 		}
+ 		if (server->in_flight > max_in_flight)
+ 			max_in_flight = server->in_flight;
+ 	}
+ 
+ 	/* if all channels are equally loaded, fall back to round-robin */
+-	if (min_in_flight == max_in_flight) {
+-		index = (uint)atomic_inc_return(&ses->chan_seq);
+-		index %= ses->chan_count;
+-	}
++	if (min_in_flight == max_in_flight)
++		index = (uint)start % ses->chan_count;
+ 
+ 	server = ses->chans[index].server;
+ 	spin_unlock(&ses->chan_lock);
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index 7aaea71a4f2061..9eb3e6010aa68a 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -40,7 +40,7 @@ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ 	kvfree(conn->request_buf);
+ 	kfree(conn->preauth_info);
+ 	if (atomic_dec_and_test(&conn->refcnt)) {
+-		ksmbd_free_transport(conn->transport);
++		conn->transport->ops->free_transport(conn->transport);
+ 		kfree(conn);
+ 	}
+ }
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 14620e147dda57..572102098c1080 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -132,6 +132,7 @@ struct ksmbd_transport_ops {
+ 			  void *buf, unsigned int len,
+ 			  struct smb2_buffer_desc_v1 *desc,
+ 			  unsigned int desc_len);
++	void (*free_transport)(struct ksmbd_transport *kt);
+ };
+ 
+ struct ksmbd_transport {
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 08d9a7cfba8cdc..6537ffd2b9651e 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1605,17 +1605,18 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ 	out_len = work->response_sz -
+ 		(le16_to_cpu(rsp->SecurityBufferOffset) + 4);
+ 
+-	/* Check previous session */
+-	prev_sess_id = le64_to_cpu(req->PreviousSessionId);
+-	if (prev_sess_id && prev_sess_id != sess->id)
+-		destroy_previous_session(conn, sess->user, prev_sess_id);
+-
+ 	retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
+ 					 out_blob, &out_len);
+ 	if (retval) {
+ 		ksmbd_debug(SMB, "krb5 authentication failed\n");
+ 		return -EINVAL;
+ 	}
++
++	/* Check previous session */
++	prev_sess_id = le64_to_cpu(req->PreviousSessionId);
++	if (prev_sess_id && prev_sess_id != sess->id)
++		destroy_previous_session(conn, sess->user, prev_sess_id);
++
+ 	rsp->SecurityBufferLength = cpu_to_le16(out_len);
+ 
+ 	if ((conn->sign || server_conf.enforced_signing) ||
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 7c5a0d712873d2..6921d62934bcb0 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -158,7 +158,8 @@ struct smb_direct_transport {
+ };
+ 
+ #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
+-
++#define SMBD_TRANS(t)	((struct smb_direct_transport *)container_of(t, \
++				struct smb_direct_transport, transport))
+ enum {
+ 	SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
+ 	SMB_DIRECT_MSG_DATA_TRANSFER
+@@ -409,6 +410,11 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+ 	return NULL;
+ }
+ 
++static void smb_direct_free_transport(struct ksmbd_transport *kt)
++{
++	kfree(SMBD_TRANS(kt));
++}
++
+ static void free_transport(struct smb_direct_transport *t)
+ {
+ 	struct smb_direct_recvmsg *recvmsg;
+@@ -454,7 +460,6 @@ static void free_transport(struct smb_direct_transport *t)
+ 
+ 	smb_direct_destroy_pools(t);
+ 	ksmbd_conn_free(KSMBD_TRANS(t)->conn);
+-	kfree(t);
+ }
+ 
+ static struct smb_direct_sendmsg
+@@ -2300,4 +2305,5 @@ static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
+ 	.read		= smb_direct_read,
+ 	.rdma_read	= smb_direct_rdma_read,
+ 	.rdma_write	= smb_direct_rdma_write,
++	.free_transport = smb_direct_free_transport,
+ };
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index abedf510899a74..4e9f98db9ff409 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -93,7 +93,7 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
+ 	return t;
+ }
+ 
+-void ksmbd_free_transport(struct ksmbd_transport *kt)
++static void ksmbd_tcp_free_transport(struct ksmbd_transport *kt)
+ {
+ 	struct tcp_transport *t = TCP_TRANS(kt);
+ 
+@@ -656,4 +656,5 @@ static const struct ksmbd_transport_ops ksmbd_tcp_transport_ops = {
+ 	.read		= ksmbd_tcp_read,
+ 	.writev		= ksmbd_tcp_writev,
+ 	.disconnect	= ksmbd_tcp_disconnect,
++	.free_transport = ksmbd_tcp_free_transport,
+ };
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 4f5a45338a83ac..0191ac2590e094 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -1341,6 +1341,7 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
+ 		buffer += err;
+ 	}
+ 	remaining_size -= err;
++	err = 0;
+ 
+ 	read_lock(&xattrs->lock);
+ 	for (rbp = rb_first(&xattrs->rb_root); rbp; rbp = rb_next(rbp)) {
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index 80767e8bf3ad43..d323dfffa4bfc6 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -527,7 +527,7 @@ typedef u64 acpi_integer;
+ 
+ /* Support for the special RSDP signature (8 characters) */
+ 
+-#define ACPI_VALIDATE_RSDP_SIG(a)       (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
++#define ACPI_VALIDATE_RSDP_SIG(a)       (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, (sizeof(a) < 8) ? ACPI_NAMESEG_SIZE : 8))
+ #define ACPI_MAKE_RSDP_SIG(dest)        (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+ 
+ /* Support for OEMx signature (x can be any character) */
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 4d5ee84c468ba6..f826bb59556afe 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1110,13 +1110,13 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
+ 
+ acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
+ 					   u32 val_a, u32 val_b);
+-#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
+ struct acpi_s2idle_dev_ops {
+ 	struct list_head list_node;
+ 	void (*prepare)(void);
+ 	void (*check)(void);
+ 	void (*restore)(void);
+ };
++#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
+ int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+ void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+ int acpi_get_lps0_constraint(struct acpi_device *adev);
+@@ -1125,6 +1125,13 @@ static inline int acpi_get_lps0_constraint(struct device *dev)
+ {
+ 	return ACPI_STATE_UNKNOWN;
+ }
++static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
++{
++	return -ENODEV;
++}
++static inline void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
++{
++}
+ #endif /* CONFIG_SUSPEND && CONFIG_X86 */
+ void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
+ #else
+diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
+index 9b02961d65ee66..45f2f278b50a8a 100644
+--- a/include/linux/atmdev.h
++++ b/include/linux/atmdev.h
+@@ -249,6 +249,12 @@ static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+ 	ATM_SKB(skb)->atm_options = vcc->atm_options;
+ }
+ 
++static inline void atm_return_tx(struct atm_vcc *vcc, struct sk_buff *skb)
++{
++	WARN_ON_ONCE(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize,
++					   &sk_atm(vcc)->sk_wmem_alloc));
++}
++
+ static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
+ {
+ 	atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc);
+diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
+index 5178b72bc92098..eaa7a3f5445072 100644
+--- a/include/linux/bus/stm32_firewall_device.h
++++ b/include/linux/bus/stm32_firewall_device.h
+@@ -114,27 +114,30 @@ void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 su
+ 
+ #else /* CONFIG_STM32_FIREWALL */
+ 
+-int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+-				unsigned int nb_firewall)
++static inline int stm32_firewall_get_firewall(struct device_node *np,
++					      struct stm32_firewall *firewall,
++					      unsigned int nb_firewall)
+ {
+ 	return -ENODEV;
+ }
+ 
+-int stm32_firewall_grant_access(struct stm32_firewall *firewall)
++static inline int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+ {
+ 	return -ENODEV;
+ }
+ 
+-void stm32_firewall_release_access(struct stm32_firewall *firewall)
++static inline void stm32_firewall_release_access(struct stm32_firewall *firewall)
+ {
+ }
+ 
+-int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
++static inline int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall,
++						    u32 subsystem_id)
+ {
+ 	return -ENODEV;
+ }
+ 
+-void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
++static inline void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall,
++						       u32 subsystem_id)
+ {
+ }
+ 
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index c24f8bc01045df..5206d63b33860b 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -78,6 +78,7 @@ enum stop_cp_reason {
+ 	STOP_CP_REASON_UPDATE_INODE,
+ 	STOP_CP_REASON_FLUSH_FAIL,
+ 	STOP_CP_REASON_NO_SEGMENT,
++	STOP_CP_REASON_CORRUPTED_FREE_BITMAP,
+ 	STOP_CP_REASON_MAX,
+ };
+ 
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 12f7a7b9c06e9b..3897f4492e1f49 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -272,6 +272,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
+ bool is_hugetlb_entry_migration(pte_t pte);
+ bool is_hugetlb_entry_hwpoisoned(pte_t pte);
+ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
++void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
+ 
+ #else /* !CONFIG_HUGETLB_PAGE */
+ 
+@@ -465,6 +466,8 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+ 
+ static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+ 
++static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {}
++
+ #endif /* !CONFIG_HUGETLB_PAGE */
+ 
+ #ifndef pgd_write
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index eb67d3d5ff5b22..2e455b20c37c23 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -295,6 +295,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_SD_CACHE	(1<<15)	/* Disable broken SD cache support */
+ #define MMC_QUIRK_BROKEN_CACHE_FLUSH	(1<<16)	/* Don't flush cache until the write has occurred */
+ #define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY	(1<<17) /* Disable broken SD poweroff notify support */
++#define MMC_QUIRK_NO_UHS_DDR50_TUNING	(1<<18) /* Disable DDR50 tuning */
+ 
+ 	bool			written_flag;	/* Indicates eMMC has been written since power on */
+ 	bool			reenable_cmdq;	/* Re-enable Command Queue */
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index 6a5e08b937b315..5f56fa87801314 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -336,7 +336,7 @@ struct tcp_sock {
+ 	} rcv_rtt_est;
+ /* Receiver queue space */
+ 	struct {
+-		u32	space;
++		int	space;
+ 		u32	seq;
+ 		u64	time;
+ 	} rcvq_space;
+diff --git a/include/net/checksum.h b/include/net/checksum.h
+index 1338cb92c8e72a..28b101f26636e8 100644
+--- a/include/net/checksum.h
++++ b/include/net/checksum.h
+@@ -158,7 +158,7 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+ 			       const __be32 *from, const __be32 *to,
+ 			       bool pseudohdr);
+ void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+-				     __wsum diff, bool pseudohdr);
++				     __wsum diff, bool pseudohdr, bool ipv6);
+ 
+ static __always_inline
+ void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 248bfb26e2af9c..6d52b5584d2fb3 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -363,15 +363,6 @@ struct ipcm6_cookie {
+ 	struct ipv6_txoptions *opt;
+ };
+ 
+-static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
+-{
+-	*ipc6 = (struct ipcm6_cookie) {
+-		.hlimit = -1,
+-		.tclass = -1,
+-		.dontfrag = -1,
+-	};
+-}
+-
+ static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
+ 				 const struct sock *sk)
+ {
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index fee854892bec58..8e70941602064e 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -5311,22 +5311,6 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
+ 			    struct ieee80211_tx_rate *dest,
+ 			    int max_rates);
+ 
+-/**
+- * ieee80211_sta_set_expected_throughput - set the expected tpt for a station
+- *
+- * Call this function to notify mac80211 about a change in expected throughput
+- * to a station. A driver for a device that does rate control in firmware can
+- * call this function when the expected throughput estimate towards a station
+- * changes. The information is used to tune the CoDel AQM applied to traffic
+- * going towards that station (which can otherwise be too aggressive and cause
+- * slow stations to starve).
+- *
+- * @pubsta: the station to set throughput for.
+- * @thr: the current expected throughput in kbps.
+- */
+-void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
+-					   u32 thr);
+-
+ /**
+  * ieee80211_tx_rate_update - transmit rate update callback
+  *
+diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
+index 57df3843e650c9..ad79f1ca4fb5a3 100644
+--- a/include/trace/events/erofs.h
++++ b/include/trace/events/erofs.h
+@@ -211,24 +211,6 @@ TRACE_EVENT(erofs_map_blocks_exit,
+ 		  show_mflags(__entry->mflags), __entry->ret)
+ );
+ 
+-TRACE_EVENT(erofs_destroy_inode,
+-	TP_PROTO(struct inode *inode),
+-
+-	TP_ARGS(inode),
+-
+-	TP_STRUCT__entry(
+-		__field(	dev_t,		dev		)
+-		__field(	erofs_nid_t,	nid		)
+-	),
+-
+-	TP_fast_assign(
+-		__entry->dev	= inode->i_sb->s_dev;
+-		__entry->nid	= EROFS_I(inode)->nid;
+-	),
+-
+-	TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry))
+-);
+-
+ #endif /* _TRACE_EROFS_H */
+ 
+  /* This part must be outside protection */
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 552fd633f8200d..5a5cdb4539358a 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -2035,6 +2035,7 @@ union bpf_attr {
+  * 		for updates resulting in a null checksum the value is set to
+  * 		**CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
+  * 		the checksum is to be computed against a pseudo-header.
++ * 		Flag **BPF_F_IPV6** should be set for IPv6 packets.
+  *
+  * 		This helper works in combination with **bpf_csum_diff**\ (),
+  * 		which does not update the checksum in-place, but offers more
+@@ -6049,6 +6050,7 @@ enum {
+ 	BPF_F_PSEUDO_HDR		= (1ULL << 4),
+ 	BPF_F_MARK_MANGLED_0		= (1ULL << 5),
+ 	BPF_F_MARK_ENFORCE		= (1ULL << 6),
++	BPF_F_IPV6			= (1ULL << 7),
+ };
+ 
+ /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index a2d577b099308e..8f555c1d7185c3 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -1204,8 +1204,10 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+ 	atomic_set(&wq->worker_refs, 1);
+ 	init_completion(&wq->worker_done);
+ 	ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
+-	if (ret)
++	if (ret) {
++		put_task_struct(wq->task);
+ 		goto err;
++	}
+ 
+ 	return wq;
+ err:
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 64870f51b67883..52ada466bf98f3 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1681,7 +1681,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
+ 	spin_unlock(&ctx->completion_lock);
+ 
+ 	io_prep_async_link(req);
+-	de = kmalloc(sizeof(*de), GFP_KERNEL);
++	de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT);
+ 	if (!de) {
+ 		ret = -ENOMEM;
+ 		io_req_defer_failed(req, ret);
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 7a8c3a004800ed..c9289597522f6e 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -262,8 +262,11 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
+ 		/* truncate end piece, if needed, for non partial buffers */
+ 		if (len > arg->max_len) {
+ 			len = arg->max_len;
+-			if (!(bl->flags & IOBL_INC))
++			if (!(bl->flags & IOBL_INC)) {
++				if (iov != arg->iovs)
++					break;
+ 				buf->len = len;
++			}
+ 		}
+ 
+ 		iov->iov_base = u64_to_user_ptr(buf->addr);
+@@ -728,7 +731,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ 		io_destroy_bl(ctx, bl);
+ 	}
+ 
+-	free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
++	free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
+ 	if (!bl)
+ 		return -ENOMEM;
+ 
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 9a630689489571..2faa3058b2d0e7 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -426,7 +426,6 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
+ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ 				struct io_uring_params *p)
+ {
+-	struct task_struct *task_to_put = NULL;
+ 	int ret;
+ 
+ 	/* Retain compatibility with failing for an invalid attach attempt */
+@@ -510,7 +509,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ 		rcu_assign_pointer(sqd->thread, tsk);
+ 		mutex_unlock(&sqd->lock);
+ 
+-		task_to_put = get_task_struct(tsk);
++		get_task_struct(tsk);
+ 		ret = io_uring_alloc_task_context(tsk, ctx);
+ 		wake_up_new_task(tsk);
+ 		if (ret)
+@@ -525,8 +524,6 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ 	complete(&ctx->sq_data->exited);
+ err:
+ 	io_sq_thread_finish(ctx);
+-	if (task_to_put)
+-		put_task_struct(task_to_put);
+ 	return ret;
+ }
+ 
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 99564c87008408..492fcc6999857a 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -431,8 +431,11 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
+ void shm_destroy_orphaned(struct ipc_namespace *ns)
+ {
+ 	down_write(&shm_ids(ns).rwsem);
+-	if (shm_ids(ns).in_use)
++	if (shm_ids(ns).in_use) {
++		rcu_read_lock();
+ 		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
++		rcu_read_unlock();
++	}
+ 	up_write(&shm_ids(ns).rwsem);
+ }
+ 
+diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
+index 477947456371a2..2285b27ce68c74 100644
+--- a/kernel/bpf/bpf_struct_ops.c
++++ b/kernel/bpf/bpf_struct_ops.c
+@@ -577,7 +577,7 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ 	if (model->ret_size > 0)
+ 		flags |= BPF_TRAMP_F_RET_FENTRY_RET;
+ 
+-	size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
++	size = arch_bpf_trampoline_size(model, flags, tlinks, stub_func);
+ 	if (size <= 0)
+ 		return size ? : -EFAULT;
+ 
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 2c54c148a94f30..f83bd019db1417 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -6684,10 +6684,10 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ 			/* Is this a func with potential NULL args? */
+ 			if (strcmp(tname, raw_tp_null_args[i].func))
+ 				continue;
+-			if (raw_tp_null_args[i].mask & (0x1 << (arg * 4)))
++			if (raw_tp_null_args[i].mask & (0x1ULL << (arg * 4)))
+ 				info->reg_type |= PTR_MAYBE_NULL;
+ 			/* Is the current arg IS_ERR? */
+-			if (raw_tp_null_args[i].mask & (0x2 << (arg * 4)))
++			if (raw_tp_null_args[i].mask & (0x2ULL << (arg * 4)))
+ 				ptr_err_raw_tp = true;
+ 			break;
+ 		}
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index a05aeb34589641..9173d107758d45 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -129,7 +129,8 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
+ 
+ BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
+ {
+-	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
++	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
++		     !rcu_read_lock_bh_held());
+ 	return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
+ }
+ 
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index 074653f964c1d0..01c02d116e8e16 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -188,13 +188,12 @@ static void freezer_attach(struct cgroup_taskset *tset)
+ 		if (!(freezer->state & CGROUP_FREEZING)) {
+ 			__thaw_task(task);
+ 		} else {
+-			freeze_task(task);
+-
+ 			/* clear FROZEN and propagate upwards */
+ 			while (freezer && (freezer->state & CGROUP_FROZEN)) {
+ 				freezer->state &= ~CGROUP_FROZEN;
+ 				freezer = parent_freezer(freezer);
+ 			}
++			freeze_task(task);
+ 		}
+ 	}
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 9ce82904f761d2..7210104b3345ca 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -206,6 +206,19 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
+ 	__perf_ctx_unlock(&cpuctx->ctx);
+ }
+ 
++typedef struct {
++	struct perf_cpu_context *cpuctx;
++	struct perf_event_context *ctx;
++} class_perf_ctx_lock_t;
++
++static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
++{ perf_ctx_unlock(_T->cpuctx, _T->ctx); }
++
++static inline class_perf_ctx_lock_t
++class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
++				struct perf_event_context *ctx)
++{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
++
+ #define TASK_TOMBSTONE ((void *)-1L)
+ 
+ static bool is_kernel_event(struct perf_event *event)
+@@ -898,7 +911,13 @@ static void perf_cgroup_switch(struct task_struct *task)
+ 	if (READ_ONCE(cpuctx->cgrp) == cgrp)
+ 		return;
+ 
+-	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
++	guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
++	/*
++	 * Re-check, could've raced vs perf_remove_from_context().
++	 */
++	if (READ_ONCE(cpuctx->cgrp) == NULL)
++		return;
++
+ 	perf_ctx_disable(&cpuctx->ctx, true);
+ 
+ 	ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
+@@ -916,7 +935,6 @@ static void perf_cgroup_switch(struct task_struct *task)
+ 	ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
+ 
+ 	perf_ctx_enable(&cpuctx->ctx, true);
+-	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ }
+ 
+ static int perf_cgroup_ensure_storage(struct perf_event *event,
+@@ -2111,8 +2129,9 @@ perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
+ }
+ 
+ static void put_event(struct perf_event *event);
+-static void event_sched_out(struct perf_event *event,
+-			    struct perf_event_context *ctx);
++static void __event_disable(struct perf_event *event,
++			    struct perf_event_context *ctx,
++			    enum perf_event_state state);
+ 
+ static void perf_put_aux_event(struct perf_event *event)
+ {
+@@ -2145,8 +2164,7 @@ static void perf_put_aux_event(struct perf_event *event)
+ 		 * state so that we don't try to schedule it again. Note
+ 		 * that perf_event_enable() will clear the ERROR status.
+ 		 */
+-		event_sched_out(iter, ctx);
+-		perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
++		__event_disable(iter, ctx, PERF_EVENT_STATE_ERROR);
+ 	}
+ }
+ 
+@@ -2204,18 +2222,6 @@ static inline struct list_head *get_event_list(struct perf_event *event)
+ 				    &event->pmu_ctx->flexible_active;
+ }
+ 
+-/*
+- * Events that have PERF_EV_CAP_SIBLING require being part of a group and
+- * cannot exist on their own, schedule them out and move them into the ERROR
+- * state. Also see _perf_event_enable(), it will not be able to recover
+- * this ERROR state.
+- */
+-static inline void perf_remove_sibling_event(struct perf_event *event)
+-{
+-	event_sched_out(event, event->ctx);
+-	perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+-}
+-
+ static void perf_group_detach(struct perf_event *event)
+ {
+ 	struct perf_event *leader = event->group_leader;
+@@ -2251,8 +2257,15 @@ static void perf_group_detach(struct perf_event *event)
+ 	 */
+ 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
+ 
++		/*
++		 * Events that have PERF_EV_CAP_SIBLING require being part of
++		 * a group and cannot exist on their own, schedule them out
++		 * and move them into the ERROR state. Also see
++		 * _perf_event_enable(), it will not be able to recover this
++		 * ERROR state.
++		 */
+ 		if (sibling->event_caps & PERF_EV_CAP_SIBLING)
+-			perf_remove_sibling_event(sibling);
++			__event_disable(sibling, ctx, PERF_EVENT_STATE_ERROR);
+ 
+ 		sibling->group_leader = sibling;
+ 		list_del_init(&sibling->sibling_list);
+@@ -2512,6 +2525,15 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
+ 	event_function_call(event, __perf_remove_from_context, (void *)flags);
+ }
+ 
++static void __event_disable(struct perf_event *event,
++			    struct perf_event_context *ctx,
++			    enum perf_event_state state)
++{
++	event_sched_out(event, ctx);
++	perf_cgroup_event_disable(event, ctx);
++	perf_event_set_state(event, state);
++}
++
+ /*
+  * Cross CPU call to disable a performance event
+  */
+@@ -2526,13 +2548,18 @@ static void __perf_event_disable(struct perf_event *event,
+ 	perf_pmu_disable(event->pmu_ctx->pmu);
+ 	ctx_time_update_event(ctx, event);
+ 
++	/*
++	 * When disabling a group leader, the whole group becomes ineligible
++	 * to run, so schedule out the full group.
++	 */
+ 	if (event == event->group_leader)
+ 		group_sched_out(event, ctx);
+-	else
+-		event_sched_out(event, ctx);
+ 
+-	perf_event_set_state(event, PERF_EVENT_STATE_OFF);
+-	perf_cgroup_event_disable(event, ctx);
++	/*
++	 * But only mark the leader OFF; the siblings will remain
++	 * INACTIVE.
++	 */
++	__event_disable(event, ctx, PERF_EVENT_STATE_OFF);
+ 
+ 	perf_pmu_enable(event->pmu_ctx->pmu);
+ }
+@@ -7097,6 +7124,10 @@ perf_sample_ustack_size(u16 stack_size, u16 header_size,
+ 	if (!regs)
+ 		return 0;
+ 
++	/* No mm, no stack, no dump. */
++	if (!current->mm)
++		return 0;
++
+ 	/*
+ 	 * Check if we fit in with the requested stack size into the:
+ 	 * - TASK_SIZE
+@@ -7808,6 +7839,9 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
+ 	const u32 max_stack = event->attr.sample_max_stack;
+ 	struct perf_callchain_entry *callchain;
+ 
++	if (!current->mm)
++		user = false;
++
+ 	if (!kernel && !user)
+ 		return &__empty_callchain;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 56b8bd9487b4b8..d465b36bcc8696 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -923,6 +923,15 @@ void __noreturn do_exit(long code)
+ 	tsk->exit_code = code;
+ 	taskstats_exit(tsk, group_dead);
+ 
++	/*
++	 * Since sampling can touch ->mm, make sure to stop everything before we
++	 * tear it down.
++	 *
++	 * Also flushes inherited counters to the parent - before the parent
++	 * gets woken up by child-exit notifications.
++	 */
++	perf_event_exit_task(tsk);
++
+ 	exit_mm();
+ 
+ 	if (group_dead)
+@@ -939,14 +948,6 @@ void __noreturn do_exit(long code)
+ 	exit_task_work(tsk);
+ 	exit_thread(tsk);
+ 
+-	/*
+-	 * Flush inherited counters to the parent - before the parent
+-	 * gets woken up by child-exit notifications.
+-	 *
+-	 * because of cgroup mode, must be called before cgroup_exit()
+-	 */
+-	perf_event_exit_task(tsk);
+-
+ 	sched_autogroup_exit_task(tsk);
+ 	cgroup_exit(tsk);
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 51f36de5990a3b..d4948a8629929c 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -8423,7 +8423,7 @@ void __init sched_init(void)
+ 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
+ #endif /* CONFIG_FAIR_GROUP_SCHED */
+ #ifdef CONFIG_EXT_GROUP_SCHED
+-		root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
++		scx_tg_init(&root_task_group);
+ #endif /* CONFIG_EXT_GROUP_SCHED */
+ #ifdef CONFIG_RT_GROUP_SCHED
+ 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
+@@ -8863,7 +8863,7 @@ struct task_group *sched_create_group(struct task_group *parent)
+ 	if (!alloc_rt_sched_group(tg, parent))
+ 		goto err;
+ 
+-	scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
++	scx_tg_init(tg);
+ 	alloc_uclamp_sched_group(tg, parent);
+ 
+ 	return tg;
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index ace5262642f9ef..ddd4fa785264eb 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -3918,6 +3918,11 @@ static void scx_cgroup_warn_missing_idle(struct task_group *tg)
+ 	cgroup_warned_missing_idle = true;
+ }
+ 
++void scx_tg_init(struct task_group *tg)
++{
++	tg->scx_weight = CGROUP_WEIGHT_DFL;
++}
++
+ int scx_tg_online(struct task_group *tg)
+ {
+ 	int ret = 0;
+diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
+index 1079b56b0f7aea..67032c30c754c3 100644
+--- a/kernel/sched/ext.h
++++ b/kernel/sched/ext.h
+@@ -70,6 +70,7 @@ static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ #ifdef CONFIG_EXT_GROUP_SCHED
++void scx_tg_init(struct task_group *tg);
+ int scx_tg_online(struct task_group *tg);
+ void scx_tg_offline(struct task_group *tg);
+ int scx_cgroup_can_attach(struct cgroup_taskset *tset);
+@@ -79,6 +80,7 @@ void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
+ void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
+ void scx_group_set_idle(struct task_group *tg, bool idle);
+ #else	/* CONFIG_EXT_GROUP_SCHED */
++static inline void scx_tg_init(struct task_group *tg) {}
+ static inline int scx_tg_online(struct task_group *tg) { return 0; }
+ static inline void scx_tg_offline(struct task_group *tg) {}
+ static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 58fb7280cabbe6..ae862ad9642cb0 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -302,7 +302,7 @@ static void clocksource_verify_choose_cpus(void)
+ {
+ 	int cpu, i, n = verify_n_cpus;
+ 
+-	if (n < 0) {
++	if (n < 0 || n >= num_online_cpus()) {
+ 		/* Check all of the CPUs. */
+ 		cpumask_copy(&cpus_chosen, cpu_online_mask);
+ 		cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index e67d67f7b90650..ad7db84b04090a 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -7295,9 +7295,10 @@ void ftrace_release_mod(struct module *mod)
+ 
+ 	mutex_lock(&ftrace_lock);
+ 
+-	if (ftrace_disabled)
+-		goto out_unlock;
+-
++	/*
++	 * To avoid the UAF problem after the module is unloaded, the
++	 * 'mod_map' resource needs to be released unconditionally.
++	 */
+ 	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
+ 		if (mod_map->mod == mod) {
+ 			list_del_rcu(&mod_map->list);
+@@ -7306,6 +7307,9 @@ void ftrace_release_mod(struct module *mod)
+ 		}
+ 	}
+ 
++	if (ftrace_disabled)
++		goto out_unlock;
++
+ 	/*
+ 	 * Each module has its own ftrace_pages, remove
+ 	 * them from the list.
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 4dc72540c3b0fb..8fbb4385e8149d 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -47,6 +47,7 @@ int __read_mostly watchdog_user_enabled = 1;
+ static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
+ static int __read_mostly watchdog_softlockup_user_enabled = 1;
+ int __read_mostly watchdog_thresh = 10;
++static int __read_mostly watchdog_thresh_next;
+ static int __read_mostly watchdog_hardlockup_available;
+ 
+ struct cpumask watchdog_cpumask __read_mostly;
+@@ -863,12 +864,20 @@ int lockup_detector_offline_cpu(unsigned int cpu)
+ 	return 0;
+ }
+ 
+-static void __lockup_detector_reconfigure(void)
++static void __lockup_detector_reconfigure(bool thresh_changed)
+ {
+ 	cpus_read_lock();
+ 	watchdog_hardlockup_stop();
+ 
+ 	softlockup_stop_all();
++	/*
++	 * To prevent watchdog_timer_fn from using the old interval and
++	 * the new watchdog_thresh at the same time, which could lead to
++	 * false softlockup reports, it is necessary to update the
++	 * watchdog_thresh after the softlockup is completed.
++	 */
++	if (thresh_changed)
++		watchdog_thresh = READ_ONCE(watchdog_thresh_next);
+ 	set_sample_period();
+ 	lockup_detector_update_enable();
+ 	if (watchdog_enabled && watchdog_thresh)
+@@ -881,7 +890,7 @@ static void __lockup_detector_reconfigure(void)
+ void lockup_detector_reconfigure(void)
+ {
+ 	mutex_lock(&watchdog_mutex);
+-	__lockup_detector_reconfigure();
++	__lockup_detector_reconfigure(false);
+ 	mutex_unlock(&watchdog_mutex);
+ }
+ 
+@@ -901,27 +910,29 @@ static __init void lockup_detector_setup(void)
+ 		return;
+ 
+ 	mutex_lock(&watchdog_mutex);
+-	__lockup_detector_reconfigure();
++	__lockup_detector_reconfigure(false);
+ 	softlockup_initialized = true;
+ 	mutex_unlock(&watchdog_mutex);
+ }
+ 
+ #else /* CONFIG_SOFTLOCKUP_DETECTOR */
+-static void __lockup_detector_reconfigure(void)
++static void __lockup_detector_reconfigure(bool thresh_changed)
+ {
+ 	cpus_read_lock();
+ 	watchdog_hardlockup_stop();
++	if (thresh_changed)
++		watchdog_thresh = READ_ONCE(watchdog_thresh_next);
+ 	lockup_detector_update_enable();
+ 	watchdog_hardlockup_start();
+ 	cpus_read_unlock();
+ }
+ void lockup_detector_reconfigure(void)
+ {
+-	__lockup_detector_reconfigure();
++	__lockup_detector_reconfigure(false);
+ }
+ static inline void lockup_detector_setup(void)
+ {
+-	__lockup_detector_reconfigure();
++	__lockup_detector_reconfigure(false);
+ }
+ #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
+ 
+@@ -939,11 +950,11 @@ void lockup_detector_soft_poweroff(void)
+ #ifdef CONFIG_SYSCTL
+ 
+ /* Propagate any changes to the watchdog infrastructure */
+-static void proc_watchdog_update(void)
++static void proc_watchdog_update(bool thresh_changed)
+ {
+ 	/* Remove impossible cpus to keep sysctl output clean. */
+ 	cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
+-	__lockup_detector_reconfigure();
++	__lockup_detector_reconfigure(thresh_changed);
+ }
+ 
+ /*
+@@ -976,7 +987,7 @@ static int proc_watchdog_common(int which, const struct ctl_table *table, int wr
+ 		old = READ_ONCE(*param);
+ 		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ 		if (!err && old != READ_ONCE(*param))
+-			proc_watchdog_update();
++			proc_watchdog_update(false);
+ 	}
+ 	mutex_unlock(&watchdog_mutex);
+ 	return err;
+@@ -1027,11 +1038,13 @@ static int proc_watchdog_thresh(const struct ctl_table *table, int write,
+ 
+ 	mutex_lock(&watchdog_mutex);
+ 
+-	old = READ_ONCE(watchdog_thresh);
++	watchdog_thresh_next = READ_ONCE(watchdog_thresh);
++
++	old = watchdog_thresh_next;
+ 	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ 
+-	if (!err && write && old != READ_ONCE(watchdog_thresh))
+-		proc_watchdog_update();
++	if (!err && write && old != READ_ONCE(watchdog_thresh_next))
++		proc_watchdog_update(true);
+ 
+ 	mutex_unlock(&watchdog_mutex);
+ 	return err;
+@@ -1052,7 +1065,7 @@ static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
+ 
+ 	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
+ 	if (!err && write)
+-		proc_watchdog_update();
++		proc_watchdog_update(false);
+ 
+ 	mutex_unlock(&watchdog_mutex);
+ 	return err;
+@@ -1072,7 +1085,7 @@ static struct ctl_table watchdog_sysctls[] = {
+ 	},
+ 	{
+ 		.procname	= "watchdog_thresh",
+-		.data		= &watchdog_thresh,
++		.data		= &watchdog_thresh_next,
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_watchdog_thresh,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index a9d64e08dffc7c..3c87eb98609c08 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -7731,7 +7731,8 @@ void __init workqueue_init_early(void)
+ 		restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
+ 
+ 	cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask);
+-
++	cpumask_andnot(wq_isolated_cpumask, cpu_possible_mask,
++						housekeeping_cpumask(HK_TYPE_DOMAIN));
+ 	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
+ 
+ 	unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs();
+diff --git a/lib/Kconfig b/lib/Kconfig
+index b38849af6f1302..b893c9288c1408 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -767,6 +767,7 @@ config GENERIC_LIB_DEVMEM_IS_ALLOWED
+ 
+ config PLDMFW
+ 	bool
++	select CRC32
+ 	default n
+ 
+ config ASN1_ENCODER
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index e48375fe5a50ce..b1d7c427bbe3d6 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -2807,6 +2807,15 @@ config FORTIFY_KUNIT_TEST
+ 	  by the str*() and mem*() family of functions. For testing runtime
+ 	  traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests.
+ 
++config LONGEST_SYM_KUNIT_TEST
++	tristate "Test the longest symbol possible" if !KUNIT_ALL_TESTS
++	depends on KUNIT && KPROBES
++	default KUNIT_ALL_TESTS
++	help
++	  Tests the longest symbol possible
++
++	  If unsure, say N.
++
+ config HW_BREAKPOINT_KUNIT_TEST
+ 	bool "Test hw_breakpoint constraints accounting" if !KUNIT_ALL_TESTS
+ 	depends on HAVE_HW_BREAKPOINT
+diff --git a/lib/Makefile b/lib/Makefile
+index 773adf88af4166..fc878e716825d9 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -389,6 +389,8 @@ CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+ obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+ obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+ obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
++obj-$(CONFIG_LONGEST_SYM_KUNIT_TEST) += longest_symbol_kunit.o
++CFLAGS_longest_symbol_kunit.o += $(call cc-disable-warning, missing-prototypes)
+ 
+ obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
+ 
+diff --git a/lib/longest_symbol_kunit.c b/lib/longest_symbol_kunit.c
+new file mode 100644
+index 00000000000000..e3c28ff1807f0d
+--- /dev/null
++++ b/lib/longest_symbol_kunit.c
+@@ -0,0 +1,82 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Test the longest symbol length. Execute with:
++ *  ./tools/testing/kunit/kunit.py run longest-symbol
++ *  --arch=x86_64 --kconfig_add CONFIG_KPROBES=y --kconfig_add CONFIG_MODULES=y
++ *  --kconfig_add CONFIG_RETPOLINE=n --kconfig_add CONFIG_CFI_CLANG=n
++ *  --kconfig_add CONFIG_MITIGATION_RETPOLINE=n
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <kunit/test.h>
++#include <linux/stringify.h>
++#include <linux/kprobes.h>
++#include <linux/kallsyms.h>
++
++#define DI(name) s##name##name
++#define DDI(name) DI(n##name##name)
++#define DDDI(name) DDI(n##name##name)
++#define DDDDI(name) DDDI(n##name##name)
++#define DDDDDI(name) DDDDI(n##name##name)
++
++/*Generate a symbol whose name length is 511 */
++#define LONGEST_SYM_NAME  DDDDDI(g1h2i3j4k5l6m7n)
++
++#define RETURN_LONGEST_SYM 0xAAAAA
++
++noinline int LONGEST_SYM_NAME(void);
++noinline int LONGEST_SYM_NAME(void)
++{
++	return RETURN_LONGEST_SYM;
++}
++
++_Static_assert(sizeof(__stringify(LONGEST_SYM_NAME)) == KSYM_NAME_LEN,
++"Incorrect symbol length found. Expected KSYM_NAME_LEN: "
++__stringify(KSYM_NAME_LEN) ", but found: "
++__stringify(sizeof(LONGEST_SYM_NAME)));
++
++static void test_longest_symbol(struct kunit *test)
++{
++	KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, LONGEST_SYM_NAME());
++};
++
++static void test_longest_symbol_kallsyms(struct kunit *test)
++{
++	unsigned long (*kallsyms_lookup_name)(const char *name);
++	static int (*longest_sym)(void);
++
++	struct kprobe kp = {
++		.symbol_name = "kallsyms_lookup_name",
++	};
++
++	if (register_kprobe(&kp) < 0) {
++		pr_info("%s: kprobe not registered", __func__);
++		KUNIT_FAIL(test, "test_longest_symbol kallsyms: kprobe not registered\n");
++		return;
++	}
++
++	kunit_warn(test, "test_longest_symbol kallsyms: kprobe registered\n");
++	kallsyms_lookup_name = (unsigned long (*)(const char *name))kp.addr;
++	unregister_kprobe(&kp);
++
++	longest_sym =
++		(void *) kallsyms_lookup_name(__stringify(LONGEST_SYM_NAME));
++	KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, longest_sym());
++};
++
++static struct kunit_case longest_symbol_test_cases[] = {
++	KUNIT_CASE(test_longest_symbol),
++	KUNIT_CASE(test_longest_symbol_kallsyms),
++	{}
++};
++
++static struct kunit_suite longest_symbol_test_suite = {
++	.name = "longest-symbol",
++	.test_cases = longest_symbol_test_cases,
++};
++kunit_test_suite(longest_symbol_test_suite);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Test the longest symbol length");
++MODULE_AUTHOR("Sergio González Collado");
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ad646fe6688a49..9c6a4e855481af 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -87,7 +87,7 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
+ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
+ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+-		unsigned long start, unsigned long end);
++		unsigned long start, unsigned long end, bool take_locks);
+ static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
+ 
+ static void hugetlb_free_folio(struct folio *folio)
+@@ -5071,26 +5071,40 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+ {
+ 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
+ 		return -EINVAL;
++	return 0;
++}
+ 
++void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
++{
+ 	/*
+ 	 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
+ 	 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
+ 	 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
++	 * This function is called in the middle of a VMA split operation, with
++	 * MM, VMA and rmap all write-locked to prevent concurrent page table
++	 * walks (except hardware and gup_fast()).
+ 	 */
++	vma_assert_write_locked(vma);
++	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
++
+ 	if (addr & ~PUD_MASK) {
+-		/*
+-		 * hugetlb_vm_op_split is called right before we attempt to
+-		 * split the VMA. We will need to unshare PMDs in the old and
+-		 * new VMAs, so let's unshare before we split.
+-		 */
+ 		unsigned long floor = addr & PUD_MASK;
+ 		unsigned long ceil = floor + PUD_SIZE;
+ 
+-		if (floor >= vma->vm_start && ceil <= vma->vm_end)
+-			hugetlb_unshare_pmds(vma, floor, ceil);
++		if (floor >= vma->vm_start && ceil <= vma->vm_end) {
++			/*
++			 * Locking:
++			 * Use take_locks=false here.
++			 * The file rmap lock is already held.
++			 * The hugetlb VMA lock can't be taken when we already
++			 * hold the file rmap lock, and we don't need it because
++			 * its purpose is to synchronize against concurrent page
++			 * table walks, which are not possible thanks to the
++			 * locks held by our caller.
++			 */
++			hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
++		}
+ 	}
+-
+-	return 0;
+ }
+ 
+ static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
+@@ -7252,6 +7266,13 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ 		return 0;
+ 
+ 	pud_clear(pud);
++	/*
++	 * Once our caller drops the rmap lock, some other process might be
++	 * using this page table as a normal, non-hugetlb page table.
++	 * Wait for pending gup_fast() in other threads to finish before letting
++	 * that happen.
++	 */
++	tlb_remove_table_sync_one();
+ 	ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
+ 	mm_dec_nr_pmds(mm);
+ 	return 1;
+@@ -7484,9 +7505,16 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
+ 	}
+ }
+ 
++/*
++ * If @take_locks is false, the caller must ensure that no concurrent page table
++ * access can happen (except for gup_fast() and hardware page walks).
++ * If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
++ * concurrent page fault handling) and the file rmap lock.
++ */
+ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ 				   unsigned long start,
+-				   unsigned long end)
++				   unsigned long end,
++				   bool take_locks)
+ {
+ 	struct hstate *h = hstate_vma(vma);
+ 	unsigned long sz = huge_page_size(h);
+@@ -7510,8 +7538,12 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
+ 				start, end);
+ 	mmu_notifier_invalidate_range_start(&range);
+-	hugetlb_vma_lock_write(vma);
+-	i_mmap_lock_write(vma->vm_file->f_mapping);
++	if (take_locks) {
++		hugetlb_vma_lock_write(vma);
++		i_mmap_lock_write(vma->vm_file->f_mapping);
++	} else {
++		i_mmap_assert_write_locked(vma->vm_file->f_mapping);
++	}
+ 	for (address = start; address < end; address += PUD_SIZE) {
+ 		ptep = hugetlb_walk(vma, address, sz);
+ 		if (!ptep)
+@@ -7521,8 +7553,10 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ 		spin_unlock(ptl);
+ 	}
+ 	flush_hugetlb_tlb_range(vma, start, end);
+-	i_mmap_unlock_write(vma->vm_file->f_mapping);
+-	hugetlb_vma_unlock_write(vma);
++	if (take_locks) {
++		i_mmap_unlock_write(vma->vm_file->f_mapping);
++		hugetlb_vma_unlock_write(vma);
++	}
+ 	/*
+ 	 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
+ 	 * Documentation/mm/mmu_notifier.rst.
+@@ -7537,7 +7571,8 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
+ {
+ 	hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
+-			ALIGN_DOWN(vma->vm_end, PUD_SIZE));
++			ALIGN_DOWN(vma->vm_end, PUD_SIZE),
++			/* take_locks = */ true);
+ }
+ 
+ #ifdef CONFIG_CMA
+diff --git a/mm/madvise.c b/mm/madvise.c
+index c211e8fa4e49bb..2e66a08fd4f4c6 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -495,6 +495,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
+ 					pte_offset_map_lock(mm, pmd, addr, &ptl);
+ 				if (!start_pte)
+ 					break;
++				flush_tlb_batched_pending(mm);
+ 				arch_enter_lazy_mmu_mode();
+ 				if (!err)
+ 					nr = 0;
+@@ -728,6 +729,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
+ 				start_pte = pte;
+ 				if (!start_pte)
+ 					break;
++				flush_tlb_batched_pending(mm);
+ 				arch_enter_lazy_mmu_mode();
+ 				if (!err)
+ 					nr = 0;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index fcd4c1439cb9c3..bfb3f903bb6d5e 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -543,8 +543,8 @@ static int dirty_ratio_handler(const struct ctl_table *table, int write, void *b
+ 
+ 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ 	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
+-		writeback_set_ratelimit();
+ 		vm_dirty_bytes = 0;
++		writeback_set_ratelimit();
+ 	}
+ 	return ret;
+ }
+diff --git a/mm/vma.c b/mm/vma.c
+index 9b4517944901dd..1d82ec4ee7bb52 100644
+--- a/mm/vma.c
++++ b/mm/vma.c
+@@ -416,7 +416,14 @@ static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ 	init_vma_prep(&vp, vma);
+ 	vp.insert = new;
+ 	vma_prepare(&vp);
++
++	/*
++	 * Get rid of huge pages and shared page tables straddling the split
++	 * boundary.
++	 */
+ 	vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
++	if (is_vm_hugetlb_page(vma))
++		hugetlb_split(vma, addr);
+ 
+ 	if (new_below) {
+ 		vma->vm_start = addr;
+diff --git a/mm/vma_internal.h b/mm/vma_internal.h
+index b930ab12a5878e..1dd119f266e64e 100644
+--- a/mm/vma_internal.h
++++ b/mm/vma_internal.h
+@@ -17,6 +17,7 @@
+ #include <linux/file.h>
+ #include <linux/fs.h>
+ #include <linux/huge_mm.h>
++#include <linux/hugetlb.h>
+ #include <linux/hugetlb_inline.h>
+ #include <linux/kernel.h>
+ #include <linux/khugepaged.h>
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 9b75699992ff92..d7f7976ea13ac6 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -635,6 +635,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
+ 
+ 	skb->dev = NULL; /* for paths shared with net_device interfaces */
+ 	if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
++		atm_return_tx(vcc, skb);
+ 		kfree_skb(skb);
+ 		error = -EFAULT;
+ 		goto out;
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index a948dd47c3f347..42e8047c651052 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -124,6 +124,7 @@ static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ 
+ /* Device structures */
+ static struct net_device *dev_lec[MAX_LEC_ITF];
++static DEFINE_MUTEX(lec_mutex);
+ 
+ #if IS_ENABLED(CONFIG_BRIDGE)
+ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
+@@ -685,6 +686,7 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
+ 	int bytes_left;
+ 	struct atmlec_ioc ioc_data;
+ 
++	lockdep_assert_held(&lec_mutex);
+ 	/* Lecd must be up in this case */
+ 	bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
+ 	if (bytes_left != 0)
+@@ -710,6 +712,7 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
+ 
+ static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
+ {
++	lockdep_assert_held(&lec_mutex);
+ 	if (arg < 0 || arg >= MAX_LEC_ITF)
+ 		return -EINVAL;
+ 	arg = array_index_nospec(arg, MAX_LEC_ITF);
+@@ -725,6 +728,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
+ 	int i;
+ 	struct lec_priv *priv;
+ 
++	lockdep_assert_held(&lec_mutex);
+ 	if (arg < 0)
+ 		arg = 0;
+ 	if (arg >= MAX_LEC_ITF)
+@@ -742,6 +746,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
+ 		snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i);
+ 		if (register_netdev(dev_lec[i])) {
+ 			free_netdev(dev_lec[i]);
++			dev_lec[i] = NULL;
+ 			return -EINVAL;
+ 		}
+ 
+@@ -904,7 +909,6 @@ static void *lec_itf_walk(struct lec_state *state, loff_t *l)
+ 	v = (dev && netdev_priv(dev)) ?
+ 		lec_priv_walk(state, l, netdev_priv(dev)) : NULL;
+ 	if (!v && dev) {
+-		dev_put(dev);
+ 		/* Partial state reset for the next time we get called */
+ 		dev = NULL;
+ 	}
+@@ -928,6 +932,7 @@ static void *lec_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+ 	struct lec_state *state = seq->private;
+ 
++	mutex_lock(&lec_mutex);
+ 	state->itf = 0;
+ 	state->dev = NULL;
+ 	state->locked = NULL;
+@@ -945,8 +950,9 @@ static void lec_seq_stop(struct seq_file *seq, void *v)
+ 	if (state->dev) {
+ 		spin_unlock_irqrestore(&state->locked->lec_arp_lock,
+ 				       state->flags);
+-		dev_put(state->dev);
++		state->dev = NULL;
+ 	}
++	mutex_unlock(&lec_mutex);
+ }
+ 
+ static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+@@ -1003,6 +1009,7 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ 		return -ENOIOCTLCMD;
+ 	}
+ 
++	mutex_lock(&lec_mutex);
+ 	switch (cmd) {
+ 	case ATMLEC_CTRL:
+ 		err = lecd_attach(vcc, (int)arg);
+@@ -1017,6 +1024,7 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ 		break;
+ 	}
+ 
++	mutex_unlock(&lec_mutex);
+ 	return err;
+ }
+ 
+diff --git a/net/atm/raw.c b/net/atm/raw.c
+index 2b5f78a7ec3e4a..1e6511ec842cbc 100644
+--- a/net/atm/raw.c
++++ b/net/atm/raw.c
+@@ -36,7 +36,7 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
+ 
+ 	pr_debug("(%d) %d -= %d\n",
+ 		 vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
+-	WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
++	atm_return_tx(vcc, skb);
+ 	dev_kfree_skb_any(skb);
+ 	sk->sk_write_space(sk);
+ }
+diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
+index 1820f09ff59ceb..3f24b4ee49c274 100644
+--- a/net/bridge/br_mst.c
++++ b/net/bridge/br_mst.c
+@@ -80,10 +80,10 @@ static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
+ 	if (br_vlan_get_state(v) == state)
+ 		return;
+ 
+-	br_vlan_set_state(v, state);
+-
+ 	if (v->vid == vg->pvid)
+ 		br_vlan_set_pvid_state(vg, state);
++
++	br_vlan_set_state(v, state);
+ }
+ 
+ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index b2ae0d2434d2eb..733ff6b758f691 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -2105,12 +2105,17 @@ static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
+ 	}
+ }
+ 
+-void br_multicast_enable_port(struct net_bridge_port *port)
++static void br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
+ {
+-	struct net_bridge *br = port->br;
++	struct net_bridge *br = pmctx->port->br;
+ 
+ 	spin_lock_bh(&br->multicast_lock);
+-	__br_multicast_enable_port_ctx(&port->multicast_ctx);
++	if (br_multicast_port_ctx_is_vlan(pmctx) &&
++	    !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
++		spin_unlock_bh(&br->multicast_lock);
++		return;
++	}
++	__br_multicast_enable_port_ctx(pmctx);
+ 	spin_unlock_bh(&br->multicast_lock);
+ }
+ 
+@@ -2137,11 +2142,67 @@ static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
+ 	br_multicast_rport_del_notify(pmctx, del);
+ }
+ 
++static void br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
++{
++	struct net_bridge *br = pmctx->port->br;
++
++	spin_lock_bh(&br->multicast_lock);
++	if (br_multicast_port_ctx_is_vlan(pmctx) &&
++	    !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
++		spin_unlock_bh(&br->multicast_lock);
++		return;
++	}
++
++	__br_multicast_disable_port_ctx(pmctx);
++	spin_unlock_bh(&br->multicast_lock);
++}
++
++static void br_multicast_toggle_port(struct net_bridge_port *port, bool on)
++{
++#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
++	if (br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
++		struct net_bridge_vlan_group *vg;
++		struct net_bridge_vlan *vlan;
++
++		rcu_read_lock();
++		vg = nbp_vlan_group_rcu(port);
++		if (!vg) {
++			rcu_read_unlock();
++			return;
++		}
++
++		/* iterate each vlan, toggle vlan multicast context */
++		list_for_each_entry_rcu(vlan, &vg->vlan_list, vlist) {
++			struct net_bridge_mcast_port *pmctx =
++						&vlan->port_mcast_ctx;
++			u8 state = br_vlan_get_state(vlan);
++			/* enable vlan multicast context when state is
++			 * LEARNING or FORWARDING
++			 */
++			if (on && br_vlan_state_allowed(state, true))
++				br_multicast_enable_port_ctx(pmctx);
++			else
++				br_multicast_disable_port_ctx(pmctx);
++		}
++		rcu_read_unlock();
++		return;
++	}
++#endif
++	/* toggle port multicast context when vlan snooping is disabled */
++	if (on)
++		br_multicast_enable_port_ctx(&port->multicast_ctx);
++	else
++		br_multicast_disable_port_ctx(&port->multicast_ctx);
++}
++
++void br_multicast_enable_port(struct net_bridge_port *port)
++{
++	br_multicast_toggle_port(port, true);
++}
++
+ void br_multicast_disable_port(struct net_bridge_port *port)
+ {
+-	spin_lock_bh(&port->br->multicast_lock);
+-	__br_multicast_disable_port_ctx(&port->multicast_ctx);
+-	spin_unlock_bh(&port->br->multicast_lock);
++	br_multicast_toggle_port(port, false);
+ }
+ 
+ static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
+@@ -4211,6 +4272,32 @@ static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
+ #endif
+ }
+ 
++void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state)
++{
++#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
++	struct net_bridge *br;
++
++	if (!br_vlan_should_use(v))
++		return;
++
++	if (br_vlan_is_master(v))
++		return;
++
++	br = v->port->br;
++
++	if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
++		return;
++
++	if (br_vlan_state_allowed(state, true))
++		br_multicast_enable_port_ctx(&v->port_mcast_ctx);
++
++	/* Multicast is not disabled for the vlan when it goes in
++	 * blocking state because the timers will expire and stop by
++	 * themselves without sending more queries.
++	 */
++#endif
++}
++
+ void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
+ {
+ 	struct net_bridge *br;
+@@ -4304,9 +4391,9 @@ int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
+ 		__br_multicast_open(&br->multicast_ctx);
+ 	list_for_each_entry(p, &br->port_list, list) {
+ 		if (on)
+-			br_multicast_disable_port(p);
++			br_multicast_disable_port_ctx(&p->multicast_ctx);
+ 		else
+-			br_multicast_enable_port(p);
++			br_multicast_enable_port_ctx(&p->multicast_ctx);
+ 	}
+ 
+ 	list_for_each_entry(vlan, &vg->vlan_list, vlist)
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index df502cc1191c3f..6a1bce8959afa2 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -1053,6 +1053,7 @@ void br_multicast_port_ctx_init(struct net_bridge_port *port,
+ 				struct net_bridge_vlan *vlan,
+ 				struct net_bridge_mcast_port *pmctx);
+ void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx);
++void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state);
+ void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on);
+ int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
+ 				      struct netlink_ext_ack *extack);
+@@ -1503,6 +1504,11 @@ static inline void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pm
+ {
+ }
+ 
++static inline void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v,
++						      u8 state)
++{
++}
++
+ static inline void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan,
+ 						bool on)
+ {
+@@ -1854,7 +1860,9 @@ bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
+ bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
+ 			      const struct net_bridge_vlan *v_opts);
+ 
+-/* vlan state manipulation helpers using *_ONCE to annotate lock-free access */
++/* vlan state manipulation helpers using *_ONCE to annotate lock-free access,
++ * while br_vlan_set_state() may access data protected by multicast_lock.
++ */
+ static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v)
+ {
+ 	return READ_ONCE(v->state);
+@@ -1863,6 +1871,7 @@ static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v)
+ static inline void br_vlan_set_state(struct net_bridge_vlan *v, u8 state)
+ {
+ 	WRITE_ONCE(v->state, state);
++	br_multicast_update_vlan_mcast_ctx(v, state);
+ }
+ 
+ static inline u8 br_vlan_get_pvid_state(const struct net_bridge_vlan_group *vg)
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 99b23fd2f509c9..1c0cf6f2fff52b 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1980,10 +1980,11 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
+ 	bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
+ 	bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
+ 	bool do_mforce = flags & BPF_F_MARK_ENFORCE;
++	bool is_ipv6   = flags & BPF_F_IPV6;
+ 	__sum16 *ptr;
+ 
+ 	if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
+-			       BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
++			       BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK | BPF_F_IPV6)))
+ 		return -EINVAL;
+ 	if (unlikely(offset > 0xffff || offset & 1))
+ 		return -EFAULT;
+@@ -1999,7 +2000,7 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
+ 		if (unlikely(from != 0))
+ 			return -EINVAL;
+ 
+-		inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
++		inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo, is_ipv6);
+ 		break;
+ 	case 2:
+ 		inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
+@@ -3249,6 +3250,13 @@ static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
+ 	.arg1_type      = ARG_PTR_TO_CTX,
+ };
+ 
++static void bpf_skb_change_protocol(struct sk_buff *skb, u16 proto)
++{
++	skb->protocol = htons(proto);
++	if (skb_valid_dst(skb))
++		skb_dst_drop(skb);
++}
++
+ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
+ {
+ 	/* Caller already did skb_cow() with len as headroom,
+@@ -3345,7 +3353,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
+ 		}
+ 	}
+ 
+-	skb->protocol = htons(ETH_P_IPV6);
++	bpf_skb_change_protocol(skb, ETH_P_IPV6);
+ 	skb_clear_hash(skb);
+ 
+ 	return 0;
+@@ -3375,7 +3383,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
+ 		}
+ 	}
+ 
+-	skb->protocol = htons(ETH_P_IP);
++	bpf_skb_change_protocol(skb, ETH_P_IP);
+ 	skb_clear_hash(skb);
+ 
+ 	return 0;
+@@ -3566,10 +3574,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
+ 		/* Match skb->protocol to new outer l3 protocol */
+ 		if (skb->protocol == htons(ETH_P_IP) &&
+ 		    flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
+-			skb->protocol = htons(ETH_P_IPV6);
++			bpf_skb_change_protocol(skb, ETH_P_IPV6);
+ 		else if (skb->protocol == htons(ETH_P_IPV6) &&
+ 			 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
+-			skb->protocol = htons(ETH_P_IP);
++			bpf_skb_change_protocol(skb, ETH_P_IP);
+ 	}
+ 
+ 	if (skb_is_gso(skb)) {
+@@ -3622,10 +3630,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
+ 	/* Match skb->protocol to new outer l3 protocol */
+ 	if (skb->protocol == htons(ETH_P_IP) &&
+ 	    flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV6)
+-		skb->protocol = htons(ETH_P_IPV6);
++		bpf_skb_change_protocol(skb, ETH_P_IPV6);
+ 	else if (skb->protocol == htons(ETH_P_IPV6) &&
+ 		 flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV4)
+-		skb->protocol = htons(ETH_P_IP);
++		bpf_skb_change_protocol(skb, ETH_P_IP);
+ 
+ 	if (skb_is_gso(skb)) {
+ 		struct skb_shared_info *shinfo = skb_shinfo(skb);
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index 0f23b3126bdaf4..b1c3e0ad6dbf48 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -829,6 +829,10 @@ static bool page_pool_napi_local(const struct page_pool *pool)
+ 	const struct napi_struct *napi;
+ 	u32 cpuid;
+ 
++	/* On PREEMPT_RT the softirq can be preempted by the consumer */
++	if (IS_ENABLED(CONFIG_PREEMPT_RT))
++		return false;
++
+ 	if (unlikely(!in_softirq()))
+ 		return false;
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index fdb36165c58f5b..cf54593149ccee 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6197,9 +6197,6 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
+ 	if (!pskb_may_pull(skb, write_len))
+ 		return -ENOMEM;
+ 
+-	if (!skb_frags_readable(skb))
+-		return -EFAULT;
+-
+ 	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+ 		return 0;
+ 
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index a8d238dd982af0..97f52394d1eb16 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -689,7 +689,8 @@ static void sk_psock_backlog(struct work_struct *work)
+ 			if (ret <= 0) {
+ 				if (ret == -EAGAIN) {
+ 					sk_psock_skb_state(psock, state, len, off);
+-
++					/* Restore redir info we cleared before */
++					skb_bpf_set_redir(skb, psock->sk, ingress);
+ 					/* Delay slightly to prioritize any
+ 					 * other work that might be here.
+ 					 */
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 3c5386c76d6fe4..9c63da2829f6ee 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3930,7 +3930,7 @@ static int assign_proto_idx(struct proto *prot)
+ {
+ 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
+ 
+-	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
++	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR)) {
+ 		pr_err("PROTO_INUSE_NR exhausted\n");
+ 		return -ENOSPC;
+ 	}
+@@ -3941,7 +3941,7 @@ static int assign_proto_idx(struct proto *prot)
+ 
+ static void release_proto_idx(struct proto *prot)
+ {
+-	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
++	if (prot->inuse_idx != PROTO_INUSE_NR)
+ 		clear_bit(prot->inuse_idx, proto_inuse_idx);
+ }
+ #else
+diff --git a/net/core/utils.c b/net/core/utils.c
+index 27f4cffaae05d9..b8c21a859e27b1 100644
+--- a/net/core/utils.c
++++ b/net/core/utils.c
+@@ -473,11 +473,11 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+ EXPORT_SYMBOL(inet_proto_csum_replace16);
+ 
+ void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+-				     __wsum diff, bool pseudohdr)
++				     __wsum diff, bool pseudohdr, bool ipv6)
+ {
+ 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ 		csum_replace_by_diff(sum, diff);
+-		if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
++		if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr && !ipv6)
+ 			skb->csum = ~csum_sub(diff, skb->csum);
+ 	} else if (pseudohdr) {
+ 		*sum = ~csum_fold(csum_add(diff, csum_unfold(*sum)));
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 41b320f0c20ebf..88d7c96bfac06f 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -189,7 +189,11 @@ const __u8 ip_tos2prio[16] = {
+ EXPORT_SYMBOL(ip_tos2prio);
+ 
+ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
++#ifndef CONFIG_PREEMPT_RT
+ #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
++#else
++#define RT_CACHE_STAT_INC(field) this_cpu_inc(rt_cache_stat.field)
++#endif
+ 
+ #ifdef CONFIG_PROC_FS
+ static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 32b28fc21b63c0..408985eb74eefa 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -3,6 +3,7 @@
+ #include <linux/tcp.h>
+ #include <linux/rcupdate.h>
+ #include <net/tcp.h>
++#include <net/busy_poll.h>
+ 
+ void tcp_fastopen_init_key_once(struct net *net)
+ {
+@@ -279,6 +280,8 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
+ 
+ 	refcount_set(&req->rsk_refcnt, 2);
+ 
++	sk_mark_napi_id_set(child, skb);
++
+ 	/* Now finish processing the fastopen child socket. */
+ 	tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index d29219e067b7fd..d176e7888a203c 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -665,10 +665,12 @@ EXPORT_SYMBOL(tcp_initialize_rcv_mss);
+  */
+ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
+ {
+-	u32 new_sample = tp->rcv_rtt_est.rtt_us;
+-	long m = sample;
++	u32 new_sample, old_sample = tp->rcv_rtt_est.rtt_us;
++	long m = sample << 3;
+ 
+-	if (new_sample != 0) {
++	if (old_sample == 0 || m < old_sample) {
++		new_sample = m;
++	} else {
+ 		/* If we sample in larger samples in the non-timestamp
+ 		 * case, we could grossly overestimate the RTT especially
+ 		 * with chatty applications or bulk transfer apps which
+@@ -679,17 +681,9 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
+ 		 * else with timestamps disabled convergence takes too
+ 		 * long.
+ 		 */
+-		if (!win_dep) {
+-			m -= (new_sample >> 3);
+-			new_sample += m;
+-		} else {
+-			m <<= 3;
+-			if (m < new_sample)
+-				new_sample = m;
+-		}
+-	} else {
+-		/* No previous measure. */
+-		new_sample = m << 3;
++		if (win_dep)
++			return;
++		new_sample = old_sample - (old_sample >> 3) + sample;
+ 	}
+ 
+ 	tp->rcv_rtt_est.rtt_us = new_sample;
+@@ -713,7 +707,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
+ 	tp->rcv_rtt_est.time = tp->tcp_mstamp;
+ }
+ 
+-static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp)
++static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp, u32 min_delta)
+ {
+ 	u32 delta, delta_us;
+ 
+@@ -723,7 +717,7 @@ static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp)
+ 
+ 	if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
+ 		if (!delta)
+-			delta = 1;
++			delta = min_delta;
+ 		delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+ 		return delta_us;
+ 	}
+@@ -741,9 +735,9 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
+ 
+ 	if (TCP_SKB_CB(skb)->end_seq -
+ 	    TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
+-		s32 delta = tcp_rtt_tsopt_us(tp);
++		s32 delta = tcp_rtt_tsopt_us(tp, 0);
+ 
+-		if (delta >= 0)
++		if (delta > 0)
+ 			tcp_rcv_rtt_update(tp, delta, 0);
+ 	}
+ }
+@@ -755,8 +749,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
+ void tcp_rcv_space_adjust(struct sock *sk)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+-	u32 copied;
+-	int time;
++	int time, inq, copied;
+ 
+ 	trace_tcp_rcv_space_adjust(sk);
+ 
+@@ -767,6 +760,9 @@ void tcp_rcv_space_adjust(struct sock *sk)
+ 
+ 	/* Number of bytes copied to user in last RTT */
+ 	copied = tp->copied_seq - tp->rcvq_space.seq;
++	/* Number of bytes in receive queue. */
++	inq = tp->rcv_nxt - tp->copied_seq;
++	copied -= inq;
+ 	if (copied <= tp->rcvq_space.space)
+ 		goto new_measure;
+ 
+@@ -2486,20 +2482,33 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
+ {
+ 	const struct sock *sk = (const struct sock *)tp;
+ 
+-	if (tp->retrans_stamp &&
+-	    tcp_tsopt_ecr_before(tp, tp->retrans_stamp))
+-		return true;  /* got echoed TS before first retransmission */
++	/* Received an echoed timestamp before the first retransmission? */
++	if (tp->retrans_stamp)
++		return tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
++
++	/* We set tp->retrans_stamp upon the first retransmission of a loss
++	 * recovery episode, so normally if tp->retrans_stamp is 0 then no
++	 * retransmission has happened yet (likely due to TSQ, which can cause
++	 * fast retransmits to be delayed). So if snd_una advanced while
++	 * (tp->retrans_stamp is 0 then apparently a packet was merely delayed,
++	 * not lost. But there are exceptions where we retransmit but then
++	 * clear tp->retrans_stamp, so we check for those exceptions.
++	 */
+ 
+-	/* Check if nothing was retransmitted (retrans_stamp==0), which may
+-	 * happen in fast recovery due to TSQ. But we ignore zero retrans_stamp
+-	 * in TCP_SYN_SENT, since when we set FLAG_SYN_ACKED we also clear
+-	 * retrans_stamp even if we had retransmitted the SYN.
++	/* (1) For non-SACK connections, tcp_is_non_sack_preventing_reopen()
++	 * clears tp->retrans_stamp when snd_una == high_seq.
+ 	 */
+-	if (!tp->retrans_stamp &&	   /* no record of a retransmit/SYN? */
+-	    sk->sk_state != TCP_SYN_SENT)  /* not the FLAG_SYN_ACKED case? */
+-		return true;  /* nothing was retransmitted */
++	if (!tcp_is_sack(tp) && !before(tp->snd_una, tp->high_seq))
++		return false;
+ 
+-	return false;
++	/* (2) In TCP_SYN_SENT tcp_clean_rtx_queue() clears tp->retrans_stamp
++	 * when setting FLAG_SYN_ACKED is set, even if the SYN was
++	 * retransmitted.
++	 */
++	if (sk->sk_state == TCP_SYN_SENT)
++		return false;
++
++	return true;	/* tp->retrans_stamp is zero; no retransmit yet */
+ }
+ 
+ /* Undo procedures. */
+@@ -3226,7 +3235,7 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ 	 */
+ 	if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp &&
+ 	    tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED)
+-		seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp);
++		seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp, 1);
+ 
+ 	rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
+ 	if (seq_rtt_us < 0)
+@@ -6841,6 +6850,9 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+ 		if (!tp->srtt_us)
+ 			tcp_synack_rtt_meas(sk, req);
+ 
++		if (tp->rx_opt.tstamp_ok)
++			tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
++
+ 		if (req) {
+ 			tcp_rcv_synrecv_state_fastopen(sk);
+ 		} else {
+@@ -6866,9 +6878,6 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+ 		tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
+ 		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+ 
+-		if (tp->rx_opt.tstamp_ok)
+-			tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+-
+ 		if (!inet_csk(sk)->icsk_ca_ops->cong_control)
+ 			tcp_update_pacing_rate(sk);
+ 
+diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
+index 62618a058b8fad..a247bb93908bf4 100644
+--- a/net/ipv6/calipso.c
++++ b/net/ipv6/calipso.c
+@@ -1207,6 +1207,10 @@ static int calipso_req_setattr(struct request_sock *req,
+ 	struct ipv6_opt_hdr *old, *new;
+ 	struct sock *sk = sk_to_full_sk(req_to_sk(req));
+ 
++	/* sk is NULL for SYN+ACK w/ SYN Cookie */
++	if (!sk)
++		return -ENOMEM;
++
+ 	if (req_inet->ipv6_opt && req_inet->ipv6_opt->hopopt)
+ 		old = req_inet->ipv6_opt->hopopt;
+ 	else
+@@ -1247,6 +1251,10 @@ static void calipso_req_delattr(struct request_sock *req)
+ 	struct ipv6_txoptions *txopts;
+ 	struct sock *sk = sk_to_full_sk(req_to_sk(req));
+ 
++	/* sk is NULL for SYN+ACK w/ SYN Cookie */
++	if (!sk)
++		return;
++
+ 	if (!req_inet->ipv6_opt || !req_inet->ipv6_opt->hopopt)
+ 		return;
+ 
+diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c
+index 95e9146918cc6f..b8d43ed4689db9 100644
+--- a/net/ipv6/ila/ila_common.c
++++ b/net/ipv6/ila/ila_common.c
+@@ -86,7 +86,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb,
+ 
+ 			diff = get_csum_diff(ip6h, p);
+ 			inet_proto_csum_replace_by_diff(&th->check, skb,
+-							diff, true);
++							diff, true, true);
+ 		}
+ 		break;
+ 	case NEXTHDR_UDP:
+@@ -97,7 +97,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb,
+ 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ 				diff = get_csum_diff(ip6h, p);
+ 				inet_proto_csum_replace_by_diff(&uh->check, skb,
+-								diff, true);
++								diff, true, true);
+ 				if (!uh->check)
+ 					uh->check = CSUM_MANGLED_0;
+ 			}
+@@ -111,7 +111,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb,
+ 
+ 			diff = get_csum_diff(ip6h, p);
+ 			inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
+-							diff, true);
++							diff, true, true);
+ 		}
+ 		break;
+ 	}
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 89a61e040e6a18..f0e5431c2d46f5 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -2043,8 +2043,6 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
+ 		ip6_cork_release(cork, &v6_cork);
+ 		return ERR_PTR(err);
+ 	}
+-	if (ipc6->dontfrag < 0)
+-		ipc6->dontfrag = inet6_test_bit(DONTFRAG, sk);
+ 
+ 	err = __ip6_append_data(sk, &queue, cork, &v6_cork,
+ 				&current->task_frag, getfrag, from,
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 608fa9d05b55be..328419e05c8153 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -777,7 +777,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
+ 	fl6.flowi6_uid = sk->sk_uid;
+ 
+-	ipcm6_init(&ipc6);
++	ipcm6_init_sk(&ipc6, sk);
+ 	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
+ 	ipc6.sockc.mark = fl6.flowi6_mark;
+ 
+@@ -890,9 +890,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	if (hdrincl)
+ 		fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
+ 
+-	if (ipc6.tclass < 0)
+-		ipc6.tclass = np->tclass;
+-
+ 	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
+ 
+ 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+@@ -903,9 +900,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	if (ipc6.hlimit < 0)
+ 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+ 
+-	if (ipc6.dontfrag < 0)
+-		ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
+-
+ 	if (msg->msg_flags&MSG_CONFIRM)
+ 		goto do_confirm;
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 197d0ac47592ad..57e38e5e4be926 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1399,7 +1399,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	int is_udplite = IS_UDPLITE(sk);
+ 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+ 
+-	ipcm6_init(&ipc6);
++	ipcm6_init_sk(&ipc6, sk);
+ 	ipc6.gso_size = READ_ONCE(up->gso_size);
+ 	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
+ 	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
+@@ -1608,9 +1608,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
+ 
+-	if (ipc6.tclass < 0)
+-		ipc6.tclass = np->tclass;
+-
+ 	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
+ 
+ 	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
+@@ -1656,8 +1653,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	WRITE_ONCE(up->pending, AF_INET6);
+ 
+ do_append_data:
+-	if (ipc6.dontfrag < 0)
+-		ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
+ 	up->len += ulen;
+ 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
+ 			      &ipc6, fl6, dst_rt6_info(dst),
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index f4c1da0708269a..b98d13584c81f0 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -547,7 +547,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
+ 	fl6.flowi6_uid = sk->sk_uid;
+ 
+-	ipcm6_init(&ipc6);
++	ipcm6_init_sk(&ipc6, sk);
+ 
+ 	if (lsa) {
+ 		if (addr_len < SIN6_LEN_RFC2133)
+@@ -634,9 +634,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
+ 
+-	if (ipc6.tclass < 0)
+-		ipc6.tclass = np->tclass;
+-
+ 	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
+ 
+ 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+@@ -648,9 +645,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	if (ipc6.hlimit < 0)
+ 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+ 
+-	if (ipc6.dontfrag < 0)
+-		ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
+-
+ 	if (msg->msg_flags & MSG_CONFIRM)
+ 		goto do_confirm;
+ 
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index f11fd360b422dd..cf2b8a05c3389d 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2876,7 +2876,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
+ 		 * the frames sent while scanning on other channel will be
+ 		 * lost)
+ 		 */
+-		if (sdata->deflink.u.ap.beacon &&
++		if (ieee80211_num_beaconing_links(sdata) &&
+ 		    (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
+ 		     !(req->flags & NL80211_SCAN_FLAG_AP)))
+ 			return -EOPNOTSUPP;
+diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
+index 1e9389c49a57d0..e6f937cfedcf6b 100644
+--- a/net/mac80211/debugfs_sta.c
++++ b/net/mac80211/debugfs_sta.c
+@@ -152,12 +152,6 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
+ 	spin_lock_bh(&local->fq.lock);
+ 	rcu_read_lock();
+ 
+-	p += scnprintf(p,
+-		       bufsz + buf - p,
+-		       "target %uus interval %uus ecn %s\n",
+-		       codel_time_to_us(sta->cparams.target),
+-		       codel_time_to_us(sta->cparams.interval),
+-		       sta->cparams.ecn ? "yes" : "no");
+ 	p += scnprintf(p,
+ 		       bufsz + buf - p,
+ 		       "tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n");
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index 2922a9fec950dd..ba8aeb47bffd7d 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -636,7 +636,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
+ 				mesh_path_add_gate(mpath);
+ 		}
+ 		rcu_read_unlock();
+-	} else {
++	} else if (ifmsh->mshcfg.dot11MeshForwarding) {
+ 		rcu_read_lock();
+ 		mpath = mesh_path_lookup(sdata, target_addr);
+ 		if (mpath) {
+@@ -654,6 +654,8 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
+ 			}
+ 		}
+ 		rcu_read_unlock();
++	} else {
++		forward = false;
+ 	}
+ 
+ 	if (reply) {
+@@ -671,7 +673,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
+ 		}
+ 	}
+ 
+-	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
++	if (forward) {
+ 		u32 preq_id;
+ 		u8 hopcount;
+ 
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index 3dc9752188d58f..1b045b62961f57 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -971,8 +971,6 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
+ 	if (sta->uploaded)
+ 		drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+ 
+-	ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL(rate_control_set_rates);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 49095f19a0f221..4eb45e08b97e7c 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -18,7 +18,6 @@
+ #include <linux/timer.h>
+ #include <linux/rtnetlink.h>
+ 
+-#include <net/codel.h>
+ #include <net/mac80211.h>
+ #include "ieee80211_i.h"
+ #include "driver-ops.h"
+@@ -683,12 +682,6 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
+ 		}
+ 	}
+ 
+-	sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD;
+-	sta->cparams.target = MS2TIME(20);
+-	sta->cparams.interval = MS2TIME(100);
+-	sta->cparams.ecn = true;
+-	sta->cparams.ce_threshold_selector = 0;
+-	sta->cparams.ce_threshold_mask = 0;
+ 
+ 	sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
+ 
+@@ -2878,27 +2871,6 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
+ 	return sta->deflink.status_stats.last_ack;
+ }
+ 
+-static void sta_update_codel_params(struct sta_info *sta, u32 thr)
+-{
+-	if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) {
+-		sta->cparams.target = MS2TIME(50);
+-		sta->cparams.interval = MS2TIME(300);
+-		sta->cparams.ecn = false;
+-	} else {
+-		sta->cparams.target = MS2TIME(20);
+-		sta->cparams.interval = MS2TIME(100);
+-		sta->cparams.ecn = true;
+-	}
+-}
+-
+-void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
+-					   u32 thr)
+-{
+-	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+-
+-	sta_update_codel_params(sta, thr);
+-}
+-
+ int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id)
+ {
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 9195d5a2de0a86..a9cfeeb13e53f5 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -466,14 +466,6 @@ struct ieee80211_fragment_cache {
+ 	unsigned int next;
+ };
+ 
+-/*
+- * The bandwidth threshold below which the per-station CoDel parameters will be
+- * scaled to be more lenient (to prevent starvation of slow stations). This
+- * value will be scaled by the number of active stations when it is being
+- * applied.
+- */
+-#define STA_SLOW_THRESHOLD 6000 /* 6 Mbps */
+-
+ /**
+  * struct link_sta_info - Link STA information
+  * All link specific sta info are stored here for reference. This can be
+@@ -619,7 +611,6 @@ struct link_sta_info {
+  * @sta: station information we share with the driver
+  * @sta_state: duplicates information about station state (for debug)
+  * @rcu_head: RCU head used for freeing this station struct
+- * @cparams: CoDel parameters for this station.
+  * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
+  * @amsdu_mesh_control: track the mesh A-MSDU format used by the peer:
+  *
+@@ -710,8 +701,6 @@ struct sta_info {
+ 	struct dentry *debugfs_dir;
+ #endif
+ 
+-	struct codel_params cparams;
+-
+ 	u8 reserved_tid;
+ 	s8 amsdu_mesh_control;
+ 
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 0ff8b56f580708..00c309e7768e17 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1401,16 +1401,9 @@ static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
+ 
+ 	local = container_of(fq, struct ieee80211_local, fq);
+ 	txqi = container_of(tin, struct txq_info, tin);
++	cparams = &local->cparams;
+ 	cstats = &txqi->cstats;
+ 
+-	if (txqi->txq.sta) {
+-		struct sta_info *sta = container_of(txqi->txq.sta,
+-						    struct sta_info, sta);
+-		cparams = &sta->cparams;
+-	} else {
+-		cparams = &local->cparams;
+-	}
+-
+ 	if (flow == &tin->default_flow)
+ 		cvars = &txqi->def_cvars;
+ 	else
+@@ -4523,8 +4516,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
+ 						     IEEE80211_TX_CTRL_MLO_LINK_UNSPEC,
+ 						     NULL);
+ 	} else if (ieee80211_vif_is_mld(&sdata->vif) &&
+-		   sdata->vif.type == NL80211_IFTYPE_AP &&
+-		   !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) {
++		   ((sdata->vif.type == NL80211_IFTYPE_AP &&
++		     !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) ||
++		    (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
++		     !sdata->wdev.use_4addr))) {
+ 		ieee80211_mlo_multicast_tx(dev, skb);
+ 	} else {
+ normal:
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index df62638b649843..3373b6b34dc7df 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -81,8 +81,8 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
+ 
+ 	if (index < net->mpls.platform_labels) {
+ 		struct mpls_route __rcu **platform_label =
+-			rcu_dereference(net->mpls.platform_label);
+-		rt = rcu_dereference(platform_label[index]);
++			rcu_dereference_rtnl(net->mpls.platform_label);
++		rt = rcu_dereference_rtnl(platform_label[index]);
+ 	}
+ 	return rt;
+ }
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 0529e4ef752070..c5855069bdaba0 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -663,6 +663,9 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f,
+ 	    check_add_overflow(rules, extra, &rules_alloc))
+ 		return -EOVERFLOW;
+ 
++	if (rules_alloc > (INT_MAX / sizeof(*new_mt)))
++		return -ENOMEM;
++
+ 	new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL_ACCOUNT);
+ 	if (!new_mt)
+ 		return -ENOMEM;
+@@ -1499,6 +1502,9 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+ 		       src->groups * NFT_PIPAPO_BUCKETS(src->bb));
+ 
+ 		if (src->rules > 0) {
++			if (src->rules_alloc > (INT_MAX / sizeof(*src->mt)))
++				goto out_mt;
++
+ 			dst->mt = kvmalloc_array(src->rules_alloc,
+ 						 sizeof(*src->mt),
+ 						 GFP_KERNEL_ACCOUNT);
+diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
+index ed1508a9e093ed..aab107727f186e 100644
+--- a/net/nfc/nci/uart.c
++++ b/net/nfc/nci/uart.c
+@@ -119,22 +119,22 @@ static int nci_uart_set_driver(struct tty_struct *tty, unsigned int driver)
+ 
+ 	memcpy(nu, nci_uart_drivers[driver], sizeof(struct nci_uart));
+ 	nu->tty = tty;
+-	tty->disc_data = nu;
+ 	skb_queue_head_init(&nu->tx_q);
+ 	INIT_WORK(&nu->write_work, nci_uart_write_work);
+ 	spin_lock_init(&nu->rx_lock);
+ 
+ 	ret = nu->ops.open(nu);
+ 	if (ret) {
+-		tty->disc_data = NULL;
+ 		kfree(nu);
++		return ret;
+ 	} else if (!try_module_get(nu->owner)) {
+ 		nu->ops.close(nu);
+-		tty->disc_data = NULL;
+ 		kfree(nu);
+ 		return -ENOENT;
+ 	}
+-	return ret;
++	tty->disc_data = nu;
++
++	return 0;
+ }
+ 
+ /* ------ LDISC part ------ */
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index a903b3c4680500..11a7d5a25d6b16 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -656,6 +656,14 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
+ 		NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
+ 		return -EINVAL;
+ 	}
++
++	if (ctl->perturb_period < 0 ||
++	    ctl->perturb_period > INT_MAX / HZ) {
++		NL_SET_ERR_MSG_MOD(extack, "invalid perturb period");
++		return -EINVAL;
++	}
++	perturb_period = ctl->perturb_period * HZ;
++
+ 	if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
+ 					ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
+ 		return -EINVAL;
+@@ -672,14 +680,12 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
+ 	headdrop = q->headdrop;
+ 	maxdepth = q->maxdepth;
+ 	maxflows = q->maxflows;
+-	perturb_period = q->perturb_period;
+ 	quantum = q->quantum;
+ 	flags = q->flags;
+ 
+ 	/* update and validate configuration */
+ 	if (ctl->quantum)
+ 		quantum = ctl->quantum;
+-	perturb_period = ctl->perturb_period * HZ;
+ 	if (ctl->flows)
+ 		maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
+ 	if (ctl->divisor) {
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 8623dc0bafc09b..3142715d7e41eb 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1328,13 +1328,15 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
+ 
+ 		stab = rtnl_dereference(q->root->stab);
+ 
+-		oper = rtnl_dereference(q->oper_sched);
++		rcu_read_lock();
++		oper = rcu_dereference(q->oper_sched);
+ 		if (oper)
+ 			taprio_update_queue_max_sdu(q, oper, stab);
+ 
+-		admin = rtnl_dereference(q->admin_sched);
++		admin = rcu_dereference(q->admin_sched);
+ 		if (admin)
+ 			taprio_update_queue_max_sdu(q, admin, stab);
++		rcu_read_unlock();
+ 
+ 		break;
+ 	}
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 53725ee7ba06d7..b301d64d9d80f3 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -9100,7 +9100,8 @@ static void __sctp_write_space(struct sctp_association *asoc)
+ 		wq = rcu_dereference(sk->sk_wq);
+ 		if (wq) {
+ 			if (waitqueue_active(&wq->wait))
+-				wake_up_interruptible(&wq->wait);
++				wake_up_interruptible_poll(&wq->wait, EPOLLOUT |
++						EPOLLWRNORM | EPOLLWRBAND);
+ 
+ 			/* Note that we try to include the Async I/O support
+ 			 * here by modeling from the current TCP/UDP code.
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 79879b7d39cb4f..46a95877d2deba 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1369,7 +1369,8 @@ svc_process_common(struct svc_rqst *rqstp)
+ 	case SVC_OK:
+ 		break;
+ 	case SVC_GARBAGE:
+-		goto err_garbage_args;
++		rqstp->rq_auth_stat = rpc_autherr_badcred;
++		goto err_bad_auth;
+ 	case SVC_SYSERR:
+ 		goto err_system_err;
+ 	case SVC_DENIED:
+@@ -1510,14 +1511,6 @@ svc_process_common(struct svc_rqst *rqstp)
+ 	*rqstp->rq_accept_statp = rpc_proc_unavail;
+ 	goto sendit;
+ 
+-err_garbage_args:
+-	svc_printk(rqstp, "failed to decode RPC header\n");
+-
+-	if (serv->sv_stats)
+-		serv->sv_stats->rpcbadfmt++;
+-	*rqstp->rq_accept_statp = rpc_garbage_args;
+-	goto sendit;
+-
+ err_system_err:
+ 	if (serv->sv_stats)
+ 		serv->sv_stats->rpcbadfmt++;
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index ca6172822b68ae..3d7f1413df0233 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -577,6 +577,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
+ 	if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
+ 		ib_destroy_qp(newxprt->sc_qp);
+ 	rdma_destroy_id(newxprt->sc_cm_id);
++	rpcrdma_rn_unregister(dev, &newxprt->sc_rn);
+ 	/* This call to put will destroy the transport */
+ 	svc_xprt_put(&newxprt->sc_xprt);
+ 	return NULL;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 171ad4e2523f13..67d099c7c66259 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2743,6 +2743,11 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
+ 	}
+ 	rpc_shutdown_client(lower_clnt);
+ 
++	/* Check for ingress data that arrived before the socket's
++	 * ->data_ready callback was set up.
++	 */
++	xs_poll_check_readable(upper_transport);
++
+ out_unlock:
+ 	current_restore_flags(pflags, PF_MEMALLOC);
+ 	upper_transport->clnt = NULL;
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index 79f91b6ca8c847..ea5bb131ebd060 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -425,7 +425,7 @@ static void tipc_aead_free(struct rcu_head *rp)
+ 	}
+ 	free_percpu(aead->tfm_entry);
+ 	kfree_sensitive(aead->key);
+-	kfree(aead);
++	kfree_sensitive(aead);
+ }
+ 
+ static int tipc_aead_users(struct tipc_aead __rcu *aead)
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index 108a4cc2e00107..258d6aa4f21ae4 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -489,7 +489,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 		rtnl_lock();
+ 		b = tipc_bearer_find(net, bname);
+-		if (!b) {
++		if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) {
+ 			rtnl_unlock();
+ 			return -EINVAL;
+ 		}
+@@ -500,7 +500,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 		rtnl_lock();
+ 		b = rtnl_dereference(tn->bearer_list[bid]);
+-		if (!b) {
++		if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) {
+ 			rtnl_unlock();
+ 			return -EINVAL;
+ 		}
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 1ce8fff2a28a4e..586e50678ed80e 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -553,6 +553,9 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ 	INIT_WORK(&rdev->mgmt_registrations_update_wk,
+ 		  cfg80211_mgmt_registrations_update_wk);
+ 	spin_lock_init(&rdev->mgmt_registrations_lock);
++	INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work);
++	INIT_LIST_HEAD(&rdev->wiphy_work_list);
++	spin_lock_init(&rdev->wiphy_work_lock);
+ 
+ #ifdef CONFIG_CFG80211_DEFAULT_PS
+ 	rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+@@ -570,9 +573,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ 		return NULL;
+ 	}
+ 
+-	INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work);
+-	INIT_LIST_HEAD(&rdev->wiphy_work_list);
+-	spin_lock_init(&rdev->wiphy_work_lock);
+ 	INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work);
+ 	INIT_WORK(&rdev->conn_work, cfg80211_conn_work);
+ 	INIT_WORK(&rdev->event_work, cfg80211_event_work);
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index da2a1c00ca8a63..d41e5642625e3b 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -178,11 +178,27 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
+ 				       "Replay seq and seq_hi should be 0 for output SA");
+ 			return -EINVAL;
+ 		}
+-		if (rs->oseq_hi && !(p->flags & XFRM_STATE_ESN)) {
+-			NL_SET_ERR_MSG(
+-				extack,
+-				"Replay oseq_hi should be 0 in non-ESN mode for output SA");
+-			return -EINVAL;
++
++		if (!(p->flags & XFRM_STATE_ESN)) {
++			if (rs->oseq_hi) {
++				NL_SET_ERR_MSG(
++					extack,
++					"Replay oseq_hi should be 0 in non-ESN mode for output SA");
++				return -EINVAL;
++			}
++			if (rs->oseq == U32_MAX) {
++				NL_SET_ERR_MSG(
++					extack,
++					"Replay oseq should be less than 0xFFFFFFFF in non-ESN mode for output SA");
++				return -EINVAL;
++			}
++		} else {
++			if (rs->oseq == U32_MAX && rs->oseq_hi == U32_MAX) {
++				NL_SET_ERR_MSG(
++					extack,
++					"Replay oseq and oseq_hi should be less than 0xFFFFFFFF for output SA");
++				return -EINVAL;
++			}
+ 		}
+ 		if (rs->bmp_len) {
+ 			NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA");
+@@ -196,11 +212,27 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
+ 				       "Replay oseq and oseq_hi should be 0 for input SA");
+ 			return -EINVAL;
+ 		}
+-		if (rs->seq_hi && !(p->flags & XFRM_STATE_ESN)) {
+-			NL_SET_ERR_MSG(
+-				extack,
+-				"Replay seq_hi should be 0 in non-ESN mode for input SA");
+-			return -EINVAL;
++		if (!(p->flags & XFRM_STATE_ESN)) {
++			if (rs->seq_hi) {
++				NL_SET_ERR_MSG(
++					extack,
++					"Replay seq_hi should be 0 in non-ESN mode for input SA");
++				return -EINVAL;
++			}
++
++			if (rs->seq == U32_MAX) {
++				NL_SET_ERR_MSG(
++					extack,
++					"Replay seq should be less than 0xFFFFFFFF in non-ESN mode for input SA");
++				return -EINVAL;
++			}
++		} else {
++			if (rs->seq == U32_MAX && rs->seq_hi == U32_MAX) {
++				NL_SET_ERR_MSG(
++					extack,
++					"Replay seq and seq_hi should be less than 0xFFFFFFFF for input SA");
++				return -EINVAL;
++			}
+ 		}
+ 	}
+ 
+diff --git a/rust/Makefile b/rust/Makefile
+index 1b00e16951eeb8..93650b2ee7d575 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -53,6 +53,8 @@ endif
+ core-cfgs = \
+     --cfg no_fp_fmt_parse
+ 
++core-edition := $(if $(call rustc-min-version,108700),2024,2021)
++
+ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+       cmd_rustdoc = \
+ 	OBJTREE=$(abspath $(objtree)) \
+@@ -95,8 +97,8 @@ rustdoc-macros: $(src)/macros/lib.rs FORCE
+ 
+ # Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should
+ # not be needed -- see https://github.com/rust-lang/rust/pull/128307.
+-rustdoc-core: private skip_flags = -Wrustdoc::unescaped_backticks
+-rustdoc-core: private rustc_target_flags = $(core-cfgs)
++rustdoc-core: private skip_flags = --edition=2021 -Wrustdoc::unescaped_backticks
++rustdoc-core: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs)
+ rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
+ 	+$(call if_changed,rustdoc)
+ 
+@@ -372,7 +374,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
+       cmd_rustc_library = \
+ 	OBJTREE=$(abspath $(objtree)) \
+ 	$(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \
+-		$(filter-out $(skip_flags),$(rust_flags) $(rustc_target_flags)) \
++		$(filter-out $(skip_flags),$(rust_flags)) $(rustc_target_flags) \
+ 		--emit=dep-info=$(depfile) --emit=obj=$@ \
+ 		--emit=metadata=$(dir $@)$(patsubst %.o,lib%.rmeta,$(notdir $@)) \
+ 		--crate-type rlib -L$(objtree)/$(obj) \
+@@ -383,7 +385,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
+ 
+ rust-analyzer:
+ 	$(Q)$(srctree)/scripts/generate_rust_analyzer.py \
+-		--cfgs='core=$(core-cfgs)' \
++		--cfgs='core=$(core-cfgs)' $(core-edition) \
+ 		$(realpath $(srctree)) $(realpath $(objtree)) \
+ 		$(rustc_sysroot) $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \
+ 		$(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json
+@@ -407,9 +409,9 @@ define rule_rustc_library
+ endef
+ 
+ $(obj)/core.o: private skip_clippy = 1
+-$(obj)/core.o: private skip_flags = -Wunreachable_pub
++$(obj)/core.o: private skip_flags = --edition=2021 -Wunreachable_pub
+ $(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
+-$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
++$(obj)/core.o: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs)
+ $(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs \
+     $(wildcard $(objtree)/include/config/RUSTC_VERSION_TEXT) FORCE
+ 	+$(call if_changed_rule,rustc_library)
+diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
+index c6cd729b65cbfb..638e1e729986dc 100644
+--- a/scripts/Makefile.compiler
++++ b/scripts/Makefile.compiler
+@@ -43,7 +43,7 @@ as-instr = $(call try-run,\
+ # __cc-option
+ # Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
+ __cc-option = $(call try-run,\
+-	$(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
++	$(1) -Werror $(2) $(3:-Wno-%=-W%) -c -x c /dev/null -o "$$TMP",$(3),$(4))
+ 
+ # cc-option
+ # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -57,7 +57,7 @@ cc-option-yn = $(if $(call cc-option,$1),y,n)
+ 
+ # cc-disable-warning
+ # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
+-cc-disable-warning = $(if $(call cc-option,-W$(strip $1)),-Wno-$(strip $1))
++cc-disable-warning = $(call cc-option,-Wno-$(strip $1))
+ 
+ # gcc-min-version
+ # Usage: cflags-$(call gcc-min-version, 70100) += -foo
+@@ -67,6 +67,10 @@ gcc-min-version = $(call test-ge, $(CONFIG_GCC_VERSION), $1)
+ # Usage: cflags-$(call clang-min-version, 110000) += -foo
+ clang-min-version = $(call test-ge, $(CONFIG_CLANG_VERSION), $1)
+ 
++# rustc-min-version
++# Usage: rustc-$(call rustc-min-version, 108500) += -Cfoo
++rustc-min-version = $(call test-ge, $(CONFIG_RUSTC_VERSION), $1)
++
+ # ld-option
+ # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y)
+ ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
+diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
+index 690f9830f06482..f9c9a2117632cc 100755
+--- a/scripts/generate_rust_analyzer.py
++++ b/scripts/generate_rust_analyzer.py
+@@ -18,7 +18,7 @@ def args_crates_cfgs(cfgs):
+ 
+     return crates_cfgs
+ 
+-def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
++def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edition):
+     # Generate the configuration list.
+     cfg = []
+     with open(objtree / "include" / "generated" / "rustc_cfg") as fd:
+@@ -34,7 +34,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+     crates_indexes = {}
+     crates_cfgs = args_crates_cfgs(cfgs)
+ 
+-    def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=True, is_proc_macro=False):
++    def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=True, is_proc_macro=False, edition="2021"):
+         crates_indexes[display_name] = len(crates)
+         crates.append({
+             "display_name": display_name,
+@@ -43,7 +43,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+             "is_proc_macro": is_proc_macro,
+             "deps": [{"crate": crates_indexes[dep], "name": dep} for dep in deps],
+             "cfg": cfg,
+-            "edition": "2021",
++            "edition": edition,
+             "env": {
+                 "RUST_MODFILE": "This is only for rust-analyzer"
+             }
+@@ -53,6 +53,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+         display_name,
+         deps,
+         cfg=[],
++        edition="2021",
+     ):
+         append_crate(
+             display_name,
+@@ -60,12 +61,13 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+             deps,
+             cfg,
+             is_workspace_member=False,
++            edition=edition,
+         )
+ 
+     # NB: sysroot crates reexport items from one another so setting up our transitive dependencies
+     # here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth
+     # for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`.
+-    append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", []))
++    append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", []), edition=core_edition)
+     append_sysroot_crate("alloc", ["core"])
+     append_sysroot_crate("std", ["alloc", "core"])
+     append_sysroot_crate("proc_macro", ["core", "std"])
+@@ -155,6 +157,7 @@ def main():
+     parser = argparse.ArgumentParser()
+     parser.add_argument('--verbose', '-v', action='store_true')
+     parser.add_argument('--cfgs', action='append', default=[])
++    parser.add_argument("core_edition")
+     parser.add_argument("srctree", type=pathlib.Path)
+     parser.add_argument("objtree", type=pathlib.Path)
+     parser.add_argument("sysroot", type=pathlib.Path)
+@@ -171,7 +174,7 @@ def main():
+     assert args.sysroot in args.sysroot_src.parents
+ 
+     rust_project = {
+-        "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs),
++        "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs, args.core_edition),
+         "sysroot": str(args.sysroot),
+     }
+ 
+diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
+index 90ec4ef1b082f9..61d56b0c2be138 100644
+--- a/security/selinux/xfrm.c
++++ b/security/selinux/xfrm.c
+@@ -94,7 +94,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
+ 
+ 	ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ 	ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+-	ctx->ctx_len = str_len;
++	ctx->ctx_len = str_len + 1;
+ 	memcpy(ctx->ctx_str, &uctx[1], str_len);
+ 	ctx->ctx_str[str_len] = '\0';
+ 	rc = security_context_to_sid(ctx->ctx_str, str_len,
+diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
+index 61d2314834e7b1..d8249d997c2a0b 100644
+--- a/sound/pci/hda/cs35l41_hda_property.c
++++ b/sound/pci/hda/cs35l41_hda_property.c
+@@ -31,6 +31,9 @@ struct cs35l41_config {
+ };
+ 
+ static const struct cs35l41_config cs35l41_config_table[] = {
++	{ "10251826", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
++	{ "1025182C", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
++	{ "10251844", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
+ 	{ "10280B27", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ 	{ "10280B28", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ 	{ "10280BEB", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 0, 0, 0 },
+@@ -452,6 +455,9 @@ struct cs35l41_prop_model {
+ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
+ 	{ "CLSA0100", NULL, lenovo_legion_no_acpi },
+ 	{ "CLSA0101", NULL, lenovo_legion_no_acpi },
++	{ "CSC3551", "10251826", generic_dsd_config },
++	{ "CSC3551", "1025182C", generic_dsd_config },
++	{ "CSC3551", "10251844", generic_dsd_config },
+ 	{ "CSC3551", "10280B27", generic_dsd_config },
+ 	{ "CSC3551", "10280B28", generic_dsd_config },
+ 	{ "CSC3551", "10280BEB", generic_dsd_config },
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 25b1984898ab21..1872c8b7505373 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2286,6 +2286,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
+ 	SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
+ 	/* Dell ALC3271 */
+ 	SND_PCI_QUIRK(0x1028, 0x0962, "Dell ALC3271", 0),
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=220210 */
++	SND_PCI_QUIRK(0x17aa, 0x5079, "Lenovo Thinkpad E15", 0),
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e714e91c271217..cb41cd2ba0ef17 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10449,6 +10449,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
++	SND_PCI_QUIRK(0x1028, 0x0879, "Dell Latitude 5420 Rugged", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+@@ -10727,6 +10728,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10780,6 +10782,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c9c, "HP Victus 16-s1xxx (MB 8C9C)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -10837,6 +10840,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8e60, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8e61, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8e62, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x1032, "ASUS VivoBook X513EA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x1034, "ASUS GU605C", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2),
+diff --git a/sound/soc/amd/acp/acp-sdw-sof-mach.c b/sound/soc/amd/acp/acp-sdw-sof-mach.c
+index 3be401c7227040..99a244f495bd30 100644
+--- a/sound/soc/amd/acp/acp-sdw-sof-mach.c
++++ b/sound/soc/amd/acp/acp-sdw-sof-mach.c
+@@ -267,7 +267,7 @@ static int create_sdw_dailinks(struct snd_soc_card *card,
+ 
+ 	/* generate DAI links by each sdw link */
+ 	while (sof_dais->initialised) {
+-		int current_be_id;
++		int current_be_id = 0;
+ 
+ 		ret = create_sdw_dailink(card, sof_dais, dai_links,
+ 					 &current_be_id, codec_conf);
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index e632f16c910250..3d9da93d22ee84 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -311,6 +311,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "83AS"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83HN"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -360,7 +367,7 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
+ 		}
+ 	},
+-        {
++	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
+index 863c3f672ba98d..0931b6109755fb 100644
+--- a/sound/soc/codecs/tas2770.c
++++ b/sound/soc/codecs/tas2770.c
+@@ -156,11 +156,37 @@ static const struct snd_kcontrol_new isense_switch =
+ static const struct snd_kcontrol_new vsense_switch =
+ 	SOC_DAPM_SINGLE("Switch", TAS2770_PWR_CTRL, 2, 1, 1);
+ 
++static int sense_event(struct snd_soc_dapm_widget *w,
++			struct snd_kcontrol *kcontrol, int event)
++{
++	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
++	struct tas2770_priv *tas2770 = snd_soc_component_get_drvdata(component);
++
++	/*
++	 * Powering up ISENSE/VSENSE requires a trip through the shutdown state.
++	 * Do that here to ensure that our changes are applied properly, otherwise
++	 * we might end up with non-functional IVSENSE if playback started earlier,
++	 * which would break software speaker protection.
++	 */
++	switch (event) {
++	case SND_SOC_DAPM_PRE_REG:
++		return snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
++						    TAS2770_PWR_CTRL_MASK,
++						    TAS2770_PWR_CTRL_SHUTDOWN);
++	case SND_SOC_DAPM_POST_REG:
++		return tas2770_update_pwr_ctrl(tas2770);
++	default:
++		return 0;
++	}
++}
++
+ static const struct snd_soc_dapm_widget tas2770_dapm_widgets[] = {
+ 	SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ 	SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0, &tas2770_asi1_mux),
+-	SND_SOC_DAPM_SWITCH("ISENSE", TAS2770_PWR_CTRL, 3, 1, &isense_switch),
+-	SND_SOC_DAPM_SWITCH("VSENSE", TAS2770_PWR_CTRL, 2, 1, &vsense_switch),
++	SND_SOC_DAPM_SWITCH_E("ISENSE", TAS2770_PWR_CTRL, 3, 1, &isense_switch,
++		sense_event, SND_SOC_DAPM_PRE_REG | SND_SOC_DAPM_POST_REG),
++	SND_SOC_DAPM_SWITCH_E("VSENSE", TAS2770_PWR_CTRL, 2, 1, &vsense_switch,
++		sense_event, SND_SOC_DAPM_PRE_REG | SND_SOC_DAPM_POST_REG),
+ 	SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2770_dac_event,
+ 			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ 	SND_SOC_DAPM_OUTPUT("OUT"),
+diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c
+index 9c1997a42334d6..1df827a084cace 100644
+--- a/sound/soc/codecs/wcd937x.c
++++ b/sound/soc/codecs/wcd937x.c
+@@ -92,7 +92,6 @@ struct wcd937x_priv {
+ 	struct regmap_irq_chip *wcd_regmap_irq_chip;
+ 	struct regmap_irq_chip_data *irq_chip;
+ 	struct regulator_bulk_data supplies[WCD937X_MAX_BULK_SUPPLY];
+-	struct regulator *buck_supply;
+ 	struct snd_soc_jack *jack;
+ 	unsigned long status_mask;
+ 	s32 micb_ref[WCD937X_MAX_MICBIAS];
+@@ -2897,10 +2896,8 @@ static int wcd937x_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, ret, "Failed to get supplies\n");
+ 
+ 	ret = regulator_bulk_enable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
+-	if (ret) {
+-		regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
++	if (ret)
+ 		return dev_err_probe(dev, ret, "Failed to enable supplies\n");
+-	}
+ 
+ 	wcd937x_dt_parse_micbias_info(dev, wcd937x);
+ 
+@@ -2936,7 +2933,6 @@ static int wcd937x_probe(struct platform_device *pdev)
+ 
+ err_disable_regulators:
+ 	regulator_bulk_disable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
+-	regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
+ 
+ 	return ret;
+ }
+@@ -2953,7 +2949,6 @@ static void wcd937x_remove(struct platform_device *pdev)
+ 	pm_runtime_dont_use_autosuspend(dev);
+ 
+ 	regulator_bulk_disable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
+-	regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
+ }
+ 
+ #if defined(CONFIG_OF)
+diff --git a/sound/soc/meson/meson-card-utils.c b/sound/soc/meson/meson-card-utils.c
+index 1a4ef124e4e259..ad38c74166a463 100644
+--- a/sound/soc/meson/meson-card-utils.c
++++ b/sound/soc/meson/meson-card-utils.c
+@@ -231,7 +231,7 @@ static int meson_card_parse_of_optional(struct snd_soc_card *card,
+ 						    const char *p))
+ {
+ 	/* If property is not provided, don't fail ... */
+-	if (!of_property_read_bool(card->dev->of_node, propname))
++	if (!of_property_present(card->dev->of_node, propname))
+ 		return 0;
+ 
+ 	/* ... but do fail if it is provided and the parsing fails */
+diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
+index a479d7e5b7fbdc..314ff68506d9fd 100644
+--- a/sound/soc/qcom/sdm845.c
++++ b/sound/soc/qcom/sdm845.c
+@@ -91,6 +91,10 @@ static int sdm845_slim_snd_hw_params(struct snd_pcm_substream *substream,
+ 		else
+ 			ret = snd_soc_dai_set_channel_map(cpu_dai, tx_ch_cnt,
+ 							  tx_ch, 0, NULL);
++		if (ret != 0 && ret != -ENOTSUPP) {
++			dev_err(rtd->dev, "failed to set cpu chan map, err:%d\n", ret);
++			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/soc/sdw_utils/soc_sdw_rt_amp.c b/sound/soc/sdw_utils/soc_sdw_rt_amp.c
+index 6951dfb5652632..b3d6ca24997347 100644
+--- a/sound/soc/sdw_utils/soc_sdw_rt_amp.c
++++ b/sound/soc/sdw_utils/soc_sdw_rt_amp.c
+@@ -190,7 +190,7 @@ int asoc_sdw_rt_amp_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc
+ 	const struct snd_soc_dapm_route *rt_amp_map;
+ 	char codec_name[CODEC_NAME_SIZE];
+ 	struct snd_soc_dai *codec_dai;
+-	int ret;
++	int ret = -EINVAL;
+ 	int i;
+ 
+ 	rt_amp_map = get_codec_name_and_route(dai, codec_name);
+diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
+index 1920b996e9aad3..51043e556b3e98 100644
+--- a/sound/soc/tegra/tegra210_ahub.c
++++ b/sound/soc/tegra/tegra210_ahub.c
+@@ -1359,6 +1359,8 @@ static int tegra_ahub_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	ahub->soc_data = of_device_get_match_data(&pdev->dev);
++	if (!ahub->soc_data)
++		return -ENODEV;
+ 
+ 	platform_set_drvdata(pdev, ahub);
+ 
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 0e9b5431a47f20..faac7df1fbcf02 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -383,6 +383,13 @@ static const struct usbmix_name_map ms_usb_link_map[] = {
+ 	{ 0 }   /* terminator */
+ };
+ 
++/* KTMicro USB */
++static struct usbmix_name_map s31b2_0022_map[] = {
++	{ 23, "Speaker Playback" },
++	{ 18, "Headphone Playback" },
++	{ 0 }
++};
++
+ /* ASUS ROG Zenith II with Realtek ALC1220-VB */
+ static const struct usbmix_name_map asus_zenith_ii_map[] = {
+ 	{ 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
+@@ -692,6 +699,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x045e, 0x083c),
+ 		.map = ms_usb_link_map,
+ 	},
++	{
++		/* KTMicro USB */
++		.id = USB_ID(0X31b2, 0x0022),
++		.map = s31b2_0022_map,
++	},
+ 	{ 0 } /* terminator */
+ };
+ 
+diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
+index afab728468bf64..4189c9d74fb062 100644
+--- a/tools/bpf/bpftool/cgroup.c
++++ b/tools/bpf/bpftool/cgroup.c
+@@ -318,11 +318,11 @@ static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ 
+ static int do_show(int argc, char **argv)
+ {
+-	enum bpf_attach_type type;
+ 	int has_attached_progs;
+ 	const char *path;
+ 	int cgroup_fd;
+ 	int ret = -1;
++	unsigned int i;
+ 
+ 	query_flags = 0;
+ 
+@@ -370,14 +370,14 @@ static int do_show(int argc, char **argv)
+ 		       "AttachFlags", "Name");
+ 
+ 	btf_vmlinux = libbpf_find_kernel_btf();
+-	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
++	for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) {
+ 		/*
+ 		 * Not all attach types may be supported, so it's expected,
+ 		 * that some requests will fail.
+ 		 * If we were able to get the show for at least one
+ 		 * attach type, let's return 0.
+ 		 */
+-		if (show_bpf_progs(cgroup_fd, type, 0) == 0)
++		if (show_bpf_progs(cgroup_fd, cgroup_attach_types[i], 0) == 0)
+ 			ret = 0;
+ 	}
+ 
+@@ -400,9 +400,9 @@ static int do_show(int argc, char **argv)
+ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
+ 			   int typeflag, struct FTW *ftw)
+ {
+-	enum bpf_attach_type type;
+ 	int has_attached_progs;
+ 	int cgroup_fd;
++	unsigned int i;
+ 
+ 	if (typeflag != FTW_D)
+ 		return 0;
+@@ -434,8 +434,8 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
+ 	}
+ 
+ 	btf_vmlinux = libbpf_find_kernel_btf();
+-	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
+-		show_bpf_progs(cgroup_fd, type, ftw->level);
++	for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++)
++		show_bpf_progs(cgroup_fd, cgroup_attach_types[i], ftw->level);
+ 
+ 	if (errno == EINVAL)
+ 		/* Last attach type does not support query.
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 552fd633f8200d..5a5cdb4539358a 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -2035,6 +2035,7 @@ union bpf_attr {
+  * 		for updates resulting in a null checksum the value is set to
+  * 		**CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
+  * 		the checksum is to be computed against a pseudo-header.
++ * 		Flag **BPF_F_IPV6** should be set for IPv6 packets.
+  *
+  * 		This helper works in combination with **bpf_csum_diff**\ (),
+  * 		which does not update the checksum in-place, but offers more
+@@ -6049,6 +6050,7 @@ enum {
+ 	BPF_F_PSEUDO_HDR		= (1ULL << 4),
+ 	BPF_F_MARK_MANGLED_0		= (1ULL << 5),
+ 	BPF_F_MARK_ENFORCE		= (1ULL << 6),
++	BPF_F_IPV6			= (1ULL << 7),
+ };
+ 
+ /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
+diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
+index 27e7bfae953bd3..b770702dab372e 100644
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -995,7 +995,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
+ 	if (base_btf) {
+ 		btf->base_btf = base_btf;
+ 		btf->start_id = btf__type_cnt(base_btf);
+-		btf->start_str_off = base_btf->hdr->str_len;
++		btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
+ 		btf->swapped_endian = base_btf->swapped_endian;
+ 	}
+ 
+@@ -4176,6 +4176,19 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
+ 	return true;
+ }
+ 
++static bool btf_dedup_identical_ptrs(struct btf_dedup *d, __u32 id1, __u32 id2)
++{
++	struct btf_type *t1, *t2;
++
++	t1 = btf_type_by_id(d->btf, id1);
++	t2 = btf_type_by_id(d->btf, id2);
++
++	if (!btf_is_ptr(t1) || !btf_is_ptr(t2))
++		return false;
++
++	return t1->type == t2->type;
++}
++
+ /*
+  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
+  * call it "candidate graph" in this description for brevity) to a type graph
+@@ -4308,6 +4321,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
+ 		 */
+ 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
+ 			return 1;
++		/* A similar case is again observed for PTRs. */
++		if (btf_dedup_identical_ptrs(d, hypot_type_id, cand_id))
++			return 1;
+ 		return 0;
+ 	}
+ 
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index bb24f6bac20737..1290314da67618 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -13971,6 +13971,12 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
+ 		}
+ 
+ 		link = map_skel->link;
++		if (!link) {
++			pr_warn("map '%s': BPF map skeleton link is uninitialized\n",
++				bpf_map__name(map));
++			continue;
++		}
++
+ 		if (*link)
+ 			continue;
+ 
+diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c
+index ed114b04429365..b6986d50dde6c9 100644
+--- a/tools/perf/tests/tests-scripts.c
++++ b/tools/perf/tests/tests-scripts.c
+@@ -255,6 +255,7 @@ static void append_scripts_in_dir(int dir_fd,
+ 			continue; /* Skip scripts that have a separate driver. */
+ 		fd = openat(dir_fd, ent->d_name, O_PATH);
+ 		append_scripts_in_dir(fd, result, result_sz);
++		close(fd);
+ 	}
+ 	for (i = 0; i < n_dirs; i++) /* Clean up */
+ 		zfree(&entlist[i]);
+diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
+index 81e0135cddf017..a1c71d9793bd88 100644
+--- a/tools/perf/util/print-events.c
++++ b/tools/perf/util/print-events.c
+@@ -282,6 +282,7 @@ bool is_event_supported(u8 type, u64 config)
+ 			ret = evsel__open(evsel, NULL, tmap) >= 0;
+ 		}
+ 
++		evsel__close(evsel);
+ 		evsel__delete(evsel);
+ 	}
+ 
+diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
+index d51249f14e2fe8..5656e58a538031 100644
+--- a/tools/testing/selftests/x86/Makefile
++++ b/tools/testing/selftests/x86/Makefile
+@@ -12,7 +12,7 @@ CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh "$(CC)" trivial_program.c -no-pie)
+ 
+ TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
+ 			check_initial_reg_state sigreturn iopl ioperm \
+-			test_vsyscall mov_ss_trap \
++			test_vsyscall mov_ss_trap sigtrap_loop \
+ 			syscall_arg_fault fsgsbase_restore sigaltstack
+ TARGETS_C_BOTHBITS += nx_stack
+ TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \
+diff --git a/tools/testing/selftests/x86/sigtrap_loop.c b/tools/testing/selftests/x86/sigtrap_loop.c
+new file mode 100644
+index 00000000000000..9d065479e89f94
+--- /dev/null
++++ b/tools/testing/selftests/x86/sigtrap_loop.c
+@@ -0,0 +1,101 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (C) 2025 Intel Corporation
++ */
++#define _GNU_SOURCE
++
++#include <err.h>
++#include <signal.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <sys/ucontext.h>
++
++#ifdef __x86_64__
++# define REG_IP REG_RIP
++#else
++# define REG_IP REG_EIP
++#endif
++
++static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags)
++{
++	struct sigaction sa;
++
++	memset(&sa, 0, sizeof(sa));
++	sa.sa_sigaction = handler;
++	sa.sa_flags = SA_SIGINFO | flags;
++	sigemptyset(&sa.sa_mask);
++
++	if (sigaction(sig, &sa, 0))
++		err(1, "sigaction");
++
++	return;
++}
++
++static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
++{
++	ucontext_t *ctx = (ucontext_t *)ctx_void;
++	static unsigned int loop_count_on_same_ip;
++	static unsigned long last_trap_ip;
++
++	if (last_trap_ip == ctx->uc_mcontext.gregs[REG_IP]) {
++		printf("\tTrapped at %016lx\n", last_trap_ip);
++
++		/*
++		 * If the same IP is hit more than 10 times in a row, it is
++		 * _considered_ an infinite loop.
++		 */
++		if (++loop_count_on_same_ip > 10) {
++			printf("[FAIL]\tDetected SIGTRAP infinite loop\n");
++			exit(1);
++		}
++
++		return;
++	}
++
++	loop_count_on_same_ip = 0;
++	last_trap_ip = ctx->uc_mcontext.gregs[REG_IP];
++	printf("\tTrapped at %016lx\n", last_trap_ip);
++}
++
++int main(int argc, char *argv[])
++{
++	sethandler(SIGTRAP, sigtrap, 0);
++
++	/*
++	 * Set the Trap Flag (TF) to single-step the test code, therefore to
++	 * trigger a SIGTRAP signal after each instruction until the TF is
++	 * cleared.
++	 *
++	 * Because the arithmetic flags are not significant here, the TF is
++	 * set by pushing 0x302 onto the stack and then popping it into the
++	 * flags register.
++	 *
++	 * Four instructions in the following asm code are executed with the
++	 * TF set, thus the SIGTRAP handler is expected to run four times.
++	 */
++	printf("[RUN]\tSIGTRAP infinite loop detection\n");
++	asm volatile(
++#ifdef __x86_64__
++		/*
++		 * Avoid clobbering the redzone
++		 *
++		 * Equivalent to "sub $128, %rsp", however -128 can be encoded
++		 * in a single byte immediate while 128 uses 4 bytes.
++		 */
++		"add $-128, %rsp\n\t"
++#endif
++		"push $0x302\n\t"
++		"popf\n\t"
++		"nop\n\t"
++		"nop\n\t"
++		"push $0x202\n\t"
++		"popf\n\t"
++#ifdef __x86_64__
++		"sub $-128, %rsp\n\t"
++#endif
++	);
++
++	printf("[OK]\tNo SIGTRAP infinite loop detected\n");
++	return 0;
++}
+diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
+index c5b9da034511c4..1d5bbc8464f183 100644
+--- a/tools/testing/vma/vma_internal.h
++++ b/tools/testing/vma/vma_internal.h
+@@ -735,6 +735,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
+ 	(void)adjust_next;
+ }
+ 
++static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
++
+ static inline void vma_iter_free(struct vma_iterator *vmi)
+ {
+ 	mas_destroy(&vmi->mas);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-06-19 14:22 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-06-19 14:22 UTC (permalink / raw
  To: gentoo-commits

commit:     fd972dad5a8b4fe87b20f7864fb519c79b8b272b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun 19 14:22:15 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun 19 14:22:15 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fd972dad

Linux patch 6.12.34

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1033_linux-6.12.34.patch | 21339 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 21343 insertions(+)

diff --git a/0000_README b/0000_README
index 1f792eae..7ab46567 100644
--- a/0000_README
+++ b/0000_README
@@ -175,6 +175,10 @@ Patch:  1032_linux-6.12.33.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.33
 
+Patch:  1033_linux-6.12.34.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.34
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1033_linux-6.12.34.patch b/1033_linux-6.12.34.patch
new file mode 100644
index 00000000..e97adf9a
--- /dev/null
+++ b/1033_linux-6.12.34.patch
@@ -0,0 +1,21339 @@
+diff --git a/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml b/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
+index ec6115d3796bab..5575c58357d6e7 100644
+--- a/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
++++ b/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
+@@ -27,22 +27,31 @@ properties:
+     maxItems: 1
+ 
+   "#pwm-cells":
+-    const: 2
++    const: 3
+ 
+   clocks:
+-    maxItems: 1
++    minItems: 1
++    maxItems: 2
++
++  clock-names:
++    minItems: 1
++    items:
++      - const: axi
++      - const: ext
+ 
+ required:
+   - reg
+   - clocks
++  - clock-names
+ 
+ unevaluatedProperties: false
+ 
+ examples:
+   - |
+     pwm@44b00000 {
+-       compatible = "adi,axi-pwmgen-2.00.a";
+-       reg = <0x44b00000 0x1000>;
+-       clocks = <&spi_clk>;
+-       #pwm-cells = <2>;
++        compatible = "adi,axi-pwmgen-2.00.a";
++        reg = <0x44b00000 0x1000>;
++        clocks = <&fpga_clk>, <&spi_clk>;
++        clock-names = "axi", "ext";
++        #pwm-cells = <3>;
+     };
+diff --git a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml
+index 119de3d7f9dd71..44548a9da15807 100644
+--- a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml
++++ b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml
+@@ -35,8 +35,8 @@ additionalProperties: false
+ examples:
+   - |
+     pwm: pwm@f0408000 {
+-       compatible = "brcm,bcm7038-pwm";
+-       reg = <0xf0408000 0x28>;
+-       #pwm-cells = <2>;
+-       clocks = <&upg_fixed>;
++        compatible = "brcm,bcm7038-pwm";
++        reg = <0xf0408000 0x28>;
++        #pwm-cells = <2>;
++        clocks = <&upg_fixed>;
+     };
+diff --git a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml
+index e86c8053b366a2..fd785da5d3d73c 100644
+--- a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml
++++ b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml
+@@ -43,9 +43,9 @@ examples:
+     #include <dt-bindings/clock/bcm281xx.h>
+ 
+     pwm@3e01a000 {
+-       compatible = "brcm,bcm11351-pwm", "brcm,kona-pwm";
+-       reg = <0x3e01a000 0xcc>;
+-       clocks = <&slave_ccu BCM281XX_SLAVE_CCU_PWM>;
+-       #pwm-cells = <3>;
++        compatible = "brcm,bcm11351-pwm", "brcm,kona-pwm";
++        reg = <0x3e01a000 0xcc>;
++        clocks = <&slave_ccu BCM281XX_SLAVE_CCU_PWM>;
++        #pwm-cells = <3>;
+     };
+ ...
+diff --git a/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml b/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml
+index 6327bb2f6ee080..698266c09e2535 100644
+--- a/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml
++++ b/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml
+@@ -33,7 +33,7 @@ patternProperties:
+ 
+   "^ldo-v(camio18|aud28|aux18|io18|io28|rf12|rf18|cn18|cn28|fe28)$":
+     type: object
+-    $ref: fixed-regulator.yaml#
++    $ref: regulator.yaml#
+     unevaluatedProperties: false
+     description:
+       Properties for single fixed LDO regulator.
+@@ -112,7 +112,6 @@ examples:
+           regulator-enable-ramp-delay = <220>;
+         };
+         mt6357_vfe28_reg: ldo-vfe28 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vfe28";
+           regulator-min-microvolt = <2800000>;
+           regulator-max-microvolt = <2800000>;
+@@ -125,14 +124,12 @@ examples:
+           regulator-enable-ramp-delay = <110>;
+         };
+         mt6357_vrf18_reg: ldo-vrf18 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vrf18";
+           regulator-min-microvolt = <1800000>;
+           regulator-max-microvolt = <1800000>;
+           regulator-enable-ramp-delay = <110>;
+         };
+         mt6357_vrf12_reg: ldo-vrf12 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vrf12";
+           regulator-min-microvolt = <1200000>;
+           regulator-max-microvolt = <1200000>;
+@@ -157,14 +154,12 @@ examples:
+           regulator-enable-ramp-delay = <264>;
+         };
+         mt6357_vcn28_reg: ldo-vcn28 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vcn28";
+           regulator-min-microvolt = <2800000>;
+           regulator-max-microvolt = <2800000>;
+           regulator-enable-ramp-delay = <264>;
+         };
+         mt6357_vcn18_reg: ldo-vcn18 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vcn18";
+           regulator-min-microvolt = <1800000>;
+           regulator-max-microvolt = <1800000>;
+@@ -183,7 +178,6 @@ examples:
+           regulator-enable-ramp-delay = <264>;
+         };
+         mt6357_vcamio_reg: ldo-vcamio18 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vcamio";
+           regulator-min-microvolt = <1800000>;
+           regulator-max-microvolt = <1800000>;
+@@ -212,28 +206,24 @@ examples:
+           regulator-always-on;
+         };
+         mt6357_vaux18_reg: ldo-vaux18 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vaux18";
+           regulator-min-microvolt = <1800000>;
+           regulator-max-microvolt = <1800000>;
+           regulator-enable-ramp-delay = <264>;
+         };
+         mt6357_vaud28_reg: ldo-vaud28 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vaud28";
+           regulator-min-microvolt = <2800000>;
+           regulator-max-microvolt = <2800000>;
+           regulator-enable-ramp-delay = <264>;
+         };
+         mt6357_vio28_reg: ldo-vio28 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vio28";
+           regulator-min-microvolt = <2800000>;
+           regulator-max-microvolt = <2800000>;
+           regulator-enable-ramp-delay = <264>;
+         };
+         mt6357_vio18_reg: ldo-vio18 {
+-          compatible = "regulator-fixed";
+           regulator-name = "vio18";
+           regulator-min-microvolt = <1800000>;
+           regulator-max-microvolt = <1800000>;
+diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml
+index de0b4ae740ff23..a975bce599750e 100644
+--- a/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml
++++ b/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml
+@@ -50,7 +50,7 @@ required:
+   - compatible
+ 
+ allOf:
+-  - $ref: reserved-memory.yaml
++  - $ref: /schemas/reserved-memory/reserved-memory.yaml
+ 
+ unevaluatedProperties: false
+ 
+@@ -61,7 +61,7 @@ examples:
+         #size-cells = <2>;
+ 
+         qman-fqd {
+-            compatible = "shared-dma-pool";
++            compatible = "fsl,qman-fqd";
+             size = <0 0x400000>;
+             alignment = <0 0x400000>;
+             no-map;
+diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
+index 71a1a399e1e1fe..af9a8d43b2479c 100644
+--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
++++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
+@@ -846,6 +846,8 @@ patternProperties:
+     description: Linux-specific binding
+   "^linx,.*":
+     description: Linx Technologies
++  "^liontron,.*":
++    description: Shenzhen Liontron Technology Co., Ltd
+   "^liteon,.*":
+     description: LITE-ON Technology Corp.
+   "^litex,.*":
+diff --git a/Documentation/gpu/xe/index.rst b/Documentation/gpu/xe/index.rst
+index 3f07aa3b54325d..89bbdcccf8eb70 100644
+--- a/Documentation/gpu/xe/index.rst
++++ b/Documentation/gpu/xe/index.rst
+@@ -16,6 +16,7 @@ DG2, etc is provided to prototype the driver.
+    xe_migrate
+    xe_cs
+    xe_pm
++   xe_gt_freq
+    xe_pcode
+    xe_gt_mcr
+    xe_wa
+diff --git a/Documentation/gpu/xe/xe_gt_freq.rst b/Documentation/gpu/xe/xe_gt_freq.rst
+new file mode 100644
+index 00000000000000..c0811200e32755
+--- /dev/null
++++ b/Documentation/gpu/xe/xe_gt_freq.rst
+@@ -0,0 +1,14 @@
++.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++
++==========================
++Xe GT Frequency Management
++==========================
++
++.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c
++   :doc: Xe GT Frequency Management
++
++Internal API
++============
++
++.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c
++   :internal:
+diff --git a/Makefile b/Makefile
+index c53dd3520193a9..b58a061cb35955 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/microchip/at91sam9263ek.dts b/arch/arm/boot/dts/microchip/at91sam9263ek.dts
+index ce8baff6a9f4e0..e42e1a75a715db 100644
+--- a/arch/arm/boot/dts/microchip/at91sam9263ek.dts
++++ b/arch/arm/boot/dts/microchip/at91sam9263ek.dts
+@@ -152,7 +152,7 @@ nand_controller: nand-controller {
+ 				nand@3 {
+ 					reg = <0x3 0x0 0x800000>;
+ 					rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
+-					cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
++					cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>;
+ 					nand-bus-width = <8>;
+ 					nand-ecc-mode = "soft";
+ 					nand-on-flash-bbt;
+diff --git a/arch/arm/boot/dts/microchip/tny_a9263.dts b/arch/arm/boot/dts/microchip/tny_a9263.dts
+index 62b7d9f9a926c5..c8b6318aaa838c 100644
+--- a/arch/arm/boot/dts/microchip/tny_a9263.dts
++++ b/arch/arm/boot/dts/microchip/tny_a9263.dts
+@@ -64,7 +64,7 @@ nand_controller: nand-controller {
+ 				nand@3 {
+ 					reg = <0x3 0x0 0x800000>;
+ 					rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
+-					cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
++					cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>;
+ 					nand-bus-width = <8>;
+ 					nand-ecc-mode = "soft";
+ 					nand-on-flash-bbt;
+diff --git a/arch/arm/boot/dts/microchip/usb_a9263.dts b/arch/arm/boot/dts/microchip/usb_a9263.dts
+index 45745915b2e160..454176ce6d3fff 100644
+--- a/arch/arm/boot/dts/microchip/usb_a9263.dts
++++ b/arch/arm/boot/dts/microchip/usb_a9263.dts
+@@ -58,7 +58,7 @@ usb1: gadget@fff78000 {
+ 			};
+ 
+ 			spi0: spi@fffa4000 {
+-				cs-gpios = <&pioB 15 GPIO_ACTIVE_HIGH>;
++				cs-gpios = <&pioA 5 GPIO_ACTIVE_LOW>;
+ 				status = "okay";
+ 				flash@0 {
+ 					compatible = "atmel,at45", "atmel,dataflash";
+@@ -84,7 +84,7 @@ nand_controller: nand-controller {
+ 				nand@3 {
+ 					reg = <0x3 0x0 0x800000>;
+ 					rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
+-					cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
++					cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>;
+ 					nand-bus-width = <8>;
+ 					nand-ecc-mode = "soft";
+ 					nand-on-flash-bbt;
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+index ac7494ed633e1b..be87c396f05f1f 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+@@ -213,12 +213,6 @@ sleep_clk: sleep_clk {
+ 		};
+ 	};
+ 
+-	sfpb_mutex: hwmutex {
+-		compatible = "qcom,sfpb-mutex";
+-		syscon = <&sfpb_wrapper_mutex 0x604 0x4>;
+-		#hwlock-cells = <1>;
+-	};
+-
+ 	smem {
+ 		compatible = "qcom,smem";
+ 		memory-region = <&smem_region>;
+@@ -284,6 +278,40 @@ scm {
+ 		};
+ 	};
+ 
++	replicator {
++		compatible = "arm,coresight-static-replicator";
++
++		clocks = <&rpmcc RPM_QDSS_CLK>;
++		clock-names = "apb_pclk";
++
++		in-ports {
++			port {
++				replicator_in: endpoint {
++					remote-endpoint = <&funnel_out>;
++				};
++			};
++		};
++
++		out-ports {
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			port@0 {
++				reg = <0>;
++				replicator_out0: endpoint {
++					remote-endpoint = <&etb_in>;
++				};
++			};
++
++			port@1 {
++				reg = <1>;
++				replicator_out1: endpoint {
++					remote-endpoint = <&tpiu_in>;
++				};
++			};
++		};
++	};
++
+ 	soc: soc {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+@@ -305,9 +333,10 @@ tlmm_pinmux: pinctrl@800000 {
+ 			pinctrl-0 = <&ps_hold_default_state>;
+ 		};
+ 
+-		sfpb_wrapper_mutex: syscon@1200000 {
+-			compatible = "syscon";
+-			reg = <0x01200000 0x8000>;
++		sfpb_mutex: hwmutex@1200600 {
++			compatible = "qcom,sfpb-mutex";
++			reg = <0x01200600 0x100>;
++			#hwlock-cells = <1>;
+ 		};
+ 
+ 		intc: interrupt-controller@2000000 {
+@@ -326,6 +355,8 @@ timer@200a000 {
+ 				     <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
+ 			reg = <0x0200a000 0x100>;
+ 			clock-frequency = <27000000>;
++			clocks = <&sleep_clk>;
++			clock-names = "sleep";
+ 			cpu-offset = <0x80000>;
+ 		};
+ 
+@@ -1532,39 +1563,6 @@ tpiu_in: endpoint {
+ 			};
+ 		};
+ 
+-		replicator {
+-			compatible = "arm,coresight-static-replicator";
+-
+-			clocks = <&rpmcc RPM_QDSS_CLK>;
+-			clock-names = "apb_pclk";
+-
+-			out-ports {
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+-
+-				port@0 {
+-					reg = <0>;
+-					replicator_out0: endpoint {
+-						remote-endpoint = <&etb_in>;
+-					};
+-				};
+-				port@1 {
+-					reg = <1>;
+-					replicator_out1: endpoint {
+-						remote-endpoint = <&tpiu_in>;
+-					};
+-				};
+-			};
+-
+-			in-ports {
+-				port {
+-					replicator_in: endpoint {
+-						remote-endpoint = <&funnel_out>;
+-					};
+-				};
+-			};
+-		};
+-
+ 		funnel@1a04000 {
+ 			compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ 			reg = <0x1a04000 0x1000>;
+diff --git a/arch/arm/mach-aspeed/Kconfig b/arch/arm/mach-aspeed/Kconfig
+index 080019aa6fcd89..fcf287edd0e5e6 100644
+--- a/arch/arm/mach-aspeed/Kconfig
++++ b/arch/arm/mach-aspeed/Kconfig
+@@ -2,7 +2,6 @@
+ menuconfig ARCH_ASPEED
+ 	bool "Aspeed BMC architectures"
+ 	depends on (CPU_LITTLE_ENDIAN && ARCH_MULTI_V5) || ARCH_MULTI_V6 || ARCH_MULTI_V7
+-	select SRAM
+ 	select WATCHDOG
+ 	select ASPEED_WATCHDOG
+ 	select MFD_SYSCON
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index a11a7a42edbfb5..7887d18cce3e45 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -322,9 +322,9 @@ config ARCH_MMAP_RND_BITS_MAX
+ 	default 24 if ARM64_VA_BITS=39
+ 	default 27 if ARM64_VA_BITS=42
+ 	default 30 if ARM64_VA_BITS=47
+-	default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
+-	default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
+-	default 33 if ARM64_VA_BITS=48
++	default 29 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52) && ARM64_64K_PAGES
++	default 31 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52) && ARM64_16K_PAGES
++	default 33 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52)
+ 	default 14 if ARM64_64K_PAGES
+ 	default 16 if ARM64_16K_PAGES
+ 	default 18
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
+index 97ff1ddd631888..734a75198f06e0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
+@@ -124,6 +124,7 @@ &sai5 {
+ 	assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>;
+ 	assigned-clock-rates = <24576000>;
+ 	#sound-dai-cells = <0>;
++	fsl,sai-mclk-direction-output;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+index 62ed64663f4952..9ba0cb89fa24e0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+@@ -233,6 +233,7 @@ eeprom@50 {
+ 	rtc: rtc@51 {
+ 		compatible = "nxp,pcf85263";
+ 		reg = <0x51>;
++		quartz-load-femtofarads = <12500>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
+index 1df5ceb1138793..37fc5ed98d7f61 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
+@@ -124,6 +124,7 @@ &sai5 {
+ 	assigned-clock-parents = <&clk IMX8MN_AUDIO_PLL1_OUT>;
+ 	assigned-clock-rates = <24576000>;
+ 	#sound-dai-cells = <0>;
++	fsl,sai-mclk-direction-output;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
+index 2a64115eebf1c6..bb11590473a4c7 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
+@@ -242,6 +242,7 @@ eeprom@50 {
+ 	rtc: rtc@51 {
+ 		compatible = "nxp,pcf85263";
+ 		reg = <0x51>;
++		quartz-load-femtofarads = <12500>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
+index 15f7ab58db36cc..88561df70d03ac 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi
+@@ -257,6 +257,7 @@ eeprom@50 {
+ 	rtc: rtc@51 {
+ 		compatible = "nxp,pcf85263";
+ 		reg = <0x51>;
++		quartz-load-femtofarads = <12500>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt6357.dtsi b/arch/arm64/boot/dts/mediatek/mt6357.dtsi
+index 5fafa842d312f3..dca4e5c3d8e210 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6357.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6357.dtsi
+@@ -60,7 +60,6 @@ mt6357_vpa_reg: buck-vpa {
+ 			};
+ 
+ 			mt6357_vfe28_reg: ldo-vfe28 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vfe28";
+ 				regulator-min-microvolt = <2800000>;
+ 				regulator-max-microvolt = <2800000>;
+@@ -75,7 +74,6 @@ mt6357_vxo22_reg: ldo-vxo22 {
+ 			};
+ 
+ 			mt6357_vrf18_reg: ldo-vrf18 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vrf18";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -83,7 +81,6 @@ mt6357_vrf18_reg: ldo-vrf18 {
+ 			};
+ 
+ 			mt6357_vrf12_reg: ldo-vrf12 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vrf12";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <1200000>;
+@@ -112,7 +109,6 @@ mt6357_vcn33_wifi_reg: ldo-vcn33-wifi {
+ 			};
+ 
+ 			mt6357_vcn28_reg: ldo-vcn28 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vcn28";
+ 				regulator-min-microvolt = <2800000>;
+ 				regulator-max-microvolt = <2800000>;
+@@ -120,7 +116,6 @@ mt6357_vcn28_reg: ldo-vcn28 {
+ 			};
+ 
+ 			mt6357_vcn18_reg: ldo-vcn18 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vcn18";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -142,7 +137,6 @@ mt6357_vcamd_reg: ldo-vcamd {
+ 			};
+ 
+ 			mt6357_vcamio_reg: ldo-vcamio18 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vcamio";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -175,7 +169,6 @@ mt6357_vsram_proc_reg: ldo-vsram-proc {
+ 			};
+ 
+ 			mt6357_vaux18_reg: ldo-vaux18 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vaux18";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -183,7 +176,6 @@ mt6357_vaux18_reg: ldo-vaux18 {
+ 			};
+ 
+ 			mt6357_vaud28_reg: ldo-vaud28 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vaud28";
+ 				regulator-min-microvolt = <2800000>;
+ 				regulator-max-microvolt = <2800000>;
+@@ -191,7 +183,6 @@ mt6357_vaud28_reg: ldo-vaud28 {
+ 			};
+ 
+ 			mt6357_vio28_reg: ldo-vio28 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vio28";
+ 				regulator-min-microvolt = <2800000>;
+ 				regulator-max-microvolt = <2800000>;
+@@ -199,7 +190,6 @@ mt6357_vio28_reg: ldo-vio28 {
+ 			};
+ 
+ 			mt6357_vio18_reg: ldo-vio18 {
+-				compatible = "regulator-fixed";
+ 				regulator-name = "vio18";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt6359.dtsi b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
+index 8e1b8c85c6ede9..779d6dfb55c003 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6359.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
+@@ -18,6 +18,8 @@ mt6359codec: mt6359codec {
+ 		};
+ 
+ 		regulators {
++			compatible = "mediatek,mt6359-regulator";
++
+ 			mt6359_vs1_buck_reg: buck_vs1 {
+ 				regulator-name = "vs1";
+ 				regulator-min-microvolt = <800000>;
+@@ -296,7 +298,7 @@ mt6359_vsram_others_sshub_ldo: ldo_vsram_others_sshub {
+ 			};
+ 		};
+ 
+-		mt6359rtc: mt6359rtc {
++		mt6359rtc: rtc {
+ 			compatible = "mediatek,mt6358-rtc";
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index 22924f61ec9ed2..c4fafd51b12256 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -280,14 +280,10 @@ panel_in: endpoint {
+ 			};
+ 		};
+ 	};
++};
+ 
+-	ports {
+-		port {
+-			dsi_out: endpoint {
+-				remote-endpoint = <&panel_in>;
+-			};
+-		};
+-	};
++&dsi_out {
++	remote-endpoint = <&panel_in>;
+ };
+ 
+ &gic {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 92c41463d10e37..65be2c2c26d405 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1836,6 +1836,10 @@ dsi0: dsi@14014000 {
+ 			phys = <&mipi_tx0>;
+ 			phy-names = "dphy";
+ 			status = "disabled";
++
++			port {
++				dsi_out: endpoint { };
++			};
+ 		};
+ 
+ 		dpi0: dpi@14015000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index f013dbad9dc4ea..2e138b54f55639 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -617,22 +617,6 @@ power-domain@MT8195_POWER_DOMAIN_VPPSYS0 {
+ 					#size-cells = <0>;
+ 					#power-domain-cells = <1>;
+ 
+-					power-domain@MT8195_POWER_DOMAIN_VDEC1 {
+-						reg = <MT8195_POWER_DOMAIN_VDEC1>;
+-						clocks = <&vdecsys CLK_VDEC_LARB1>;
+-						clock-names = "vdec1-0";
+-						mediatek,infracfg = <&infracfg_ao>;
+-						#power-domain-cells = <0>;
+-					};
+-
+-					power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
+-						reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
+-						clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
+-						clock-names = "venc1-larb";
+-						mediatek,infracfg = <&infracfg_ao>;
+-						#power-domain-cells = <0>;
+-					};
+-
+ 					power-domain@MT8195_POWER_DOMAIN_VDOSYS0 {
+ 						reg = <MT8195_POWER_DOMAIN_VDOSYS0>;
+ 						clocks = <&topckgen CLK_TOP_CFG_VDO0>,
+@@ -678,15 +662,25 @@ power-domain@MT8195_POWER_DOMAIN_VDEC0 {
+ 							clocks = <&vdecsys_soc CLK_VDEC_SOC_LARB1>;
+ 							clock-names = "vdec0-0";
+ 							mediatek,infracfg = <&infracfg_ao>;
++							#address-cells = <1>;
++							#size-cells = <0>;
+ 							#power-domain-cells = <0>;
+-						};
+ 
+-						power-domain@MT8195_POWER_DOMAIN_VDEC2 {
+-							reg = <MT8195_POWER_DOMAIN_VDEC2>;
+-							clocks = <&vdecsys_core1 CLK_VDEC_CORE1_LARB1>;
+-							clock-names = "vdec2-0";
+-							mediatek,infracfg = <&infracfg_ao>;
+-							#power-domain-cells = <0>;
++							power-domain@MT8195_POWER_DOMAIN_VDEC1 {
++								reg = <MT8195_POWER_DOMAIN_VDEC1>;
++								clocks = <&vdecsys CLK_VDEC_LARB1>;
++								clock-names = "vdec1-0";
++								mediatek,infracfg = <&infracfg_ao>;
++								#power-domain-cells = <0>;
++							};
++
++							power-domain@MT8195_POWER_DOMAIN_VDEC2 {
++								reg = <MT8195_POWER_DOMAIN_VDEC2>;
++								clocks = <&vdecsys_core1 CLK_VDEC_CORE1_LARB1>;
++								clock-names = "vdec2-0";
++								mediatek,infracfg = <&infracfg_ao>;
++								#power-domain-cells = <0>;
++							};
+ 						};
+ 
+ 						power-domain@MT8195_POWER_DOMAIN_VENC {
+@@ -694,7 +688,17 @@ power-domain@MT8195_POWER_DOMAIN_VENC {
+ 							clocks = <&vencsys CLK_VENC_LARB>;
+ 							clock-names = "venc0-larb";
+ 							mediatek,infracfg = <&infracfg_ao>;
++							#address-cells = <1>;
++							#size-cells = <0>;
+ 							#power-domain-cells = <0>;
++
++							power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
++								reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
++								clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
++								clock-names = "venc1-larb";
++								mediatek,infracfg = <&infracfg_ao>;
++								#power-domain-cells = <0>;
++							};
+ 						};
+ 
+ 						power-domain@MT8195_POWER_DOMAIN_VDOSYS1 {
+diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+index 2b3bb5d0af17bd..f0b7949df92c05 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+@@ -621,9 +621,7 @@ uartb: serial@3110000 {
+ 		reg-shift = <2>;
+ 		interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&bpmp TEGRA186_CLK_UARTB>;
+-		clock-names = "serial";
+ 		resets = <&bpmp TEGRA186_RESET_UARTB>;
+-		reset-names = "serial";
+ 		status = "disabled";
+ 	};
+ 
+@@ -633,9 +631,7 @@ uartd: serial@3130000 {
+ 		reg-shift = <2>;
+ 		interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&bpmp TEGRA186_CLK_UARTD>;
+-		clock-names = "serial";
+ 		resets = <&bpmp TEGRA186_RESET_UARTD>;
+-		reset-names = "serial";
+ 		status = "disabled";
+ 	};
+ 
+@@ -645,9 +641,7 @@ uarte: serial@3140000 {
+ 		reg-shift = <2>;
+ 		interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&bpmp TEGRA186_CLK_UARTE>;
+-		clock-names = "serial";
+ 		resets = <&bpmp TEGRA186_RESET_UARTE>;
+-		reset-names = "serial";
+ 		status = "disabled";
+ 	};
+ 
+@@ -657,9 +651,7 @@ uartf: serial@3150000 {
+ 		reg-shift = <2>;
+ 		interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&bpmp TEGRA186_CLK_UARTF>;
+-		clock-names = "serial";
+ 		resets = <&bpmp TEGRA186_RESET_UARTF>;
+-		reset-names = "serial";
+ 		status = "disabled";
+ 	};
+ 
+@@ -1236,9 +1228,7 @@ uartc: serial@c280000 {
+ 		reg-shift = <2>;
+ 		interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&bpmp TEGRA186_CLK_UARTC>;
+-		clock-names = "serial";
+ 		resets = <&bpmp TEGRA186_RESET_UARTC>;
+-		reset-names = "serial";
+ 		status = "disabled";
+ 	};
+ 
+@@ -1248,9 +1238,7 @@ uartg: serial@c290000 {
+ 		reg-shift = <2>;
+ 		interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&bpmp TEGRA186_CLK_UARTG>;
+-		clock-names = "serial";
+ 		resets = <&bpmp TEGRA186_RESET_UARTG>;
+-		reset-names = "serial";
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+index 33f92b77cd9d9e..c3695077478514 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+@@ -766,9 +766,7 @@ uartd: serial@3130000 {
+ 			reg-shift = <2>;
+ 			interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&bpmp TEGRA194_CLK_UARTD>;
+-			clock-names = "serial";
+ 			resets = <&bpmp TEGRA194_RESET_UARTD>;
+-			reset-names = "serial";
+ 			status = "disabled";
+ 		};
+ 
+@@ -778,9 +776,7 @@ uarte: serial@3140000 {
+ 			reg-shift = <2>;
+ 			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&bpmp TEGRA194_CLK_UARTE>;
+-			clock-names = "serial";
+ 			resets = <&bpmp TEGRA194_RESET_UARTE>;
+-			reset-names = "serial";
+ 			status = "disabled";
+ 		};
+ 
+@@ -790,9 +786,7 @@ uartf: serial@3150000 {
+ 			reg-shift = <2>;
+ 			interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&bpmp TEGRA194_CLK_UARTF>;
+-			clock-names = "serial";
+ 			resets = <&bpmp TEGRA194_RESET_UARTF>;
+-			reset-names = "serial";
+ 			status = "disabled";
+ 		};
+ 
+@@ -817,9 +811,7 @@ uarth: serial@3170000 {
+ 			reg-shift = <2>;
+ 			interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&bpmp TEGRA194_CLK_UARTH>;
+-			clock-names = "serial";
+ 			resets = <&bpmp TEGRA194_RESET_UARTH>;
+-			reset-names = "serial";
+ 			status = "disabled";
+ 		};
+ 
+@@ -1616,9 +1608,7 @@ uartc: serial@c280000 {
+ 			reg-shift = <2>;
+ 			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&bpmp TEGRA194_CLK_UARTC>;
+-			clock-names = "serial";
+ 			resets = <&bpmp TEGRA194_RESET_UARTC>;
+-			reset-names = "serial";
+ 			status = "disabled";
+ 		};
+ 
+@@ -1628,9 +1618,7 @@ uartg: serial@c290000 {
+ 			reg-shift = <2>;
+ 			interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&bpmp TEGRA194_CLK_UARTG>;
+-			clock-names = "serial";
+ 			resets = <&bpmp TEGRA194_RESET_UARTG>;
+-			reset-names = "serial";
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+index 1c53ccc5e3cbf3..9c1b2e7d3997fa 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+@@ -11,6 +11,7 @@ aliases {
+ 		rtc0 = "/i2c@7000d000/pmic@3c";
+ 		rtc1 = "/rtc@7000e000";
+ 		serial0 = &uarta;
++		serial3 = &uartd;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+index 91e104b0f86534..a5294a42c287ae 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+@@ -111,6 +111,13 @@ mp5496_l2: l2 {
+ 			regulator-always-on;
+ 			regulator-boot-on;
+ 		};
++
++		mp5496_l5: l5 {
++			regulator-min-microvolt = <1800000>;
++			regulator-max-microvolt = <1800000>;
++			regulator-always-on;
++			regulator-boot-on;
++		};
+ 	};
+ };
+ 
+@@ -146,7 +153,7 @@ &usb_0_dwc3 {
+ };
+ 
+ &usb_0_qmpphy {
+-	vdda-pll-supply = <&mp5496_l2>;
++	vdda-pll-supply = <&mp5496_l5>;
+ 	vdda-phy-supply = <&regulator_fixed_0p925>;
+ 
+ 	status = "okay";
+@@ -154,7 +161,7 @@ &usb_0_qmpphy {
+ 
+ &usb_0_qusbphy {
+ 	vdd-supply = <&regulator_fixed_0p925>;
+-	vdda-pll-supply = <&mp5496_l2>;
++	vdda-pll-supply = <&mp5496_l5>;
+ 	vdda-phy-dpdm-supply = <&regulator_fixed_3p3>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+index 79bc42ffb6a1ff..2cfdf5bd5fd9be 100644
+--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+@@ -1073,7 +1073,7 @@ spi0: spi@4a80000 {
+ 				interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ 						 &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ 						<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+-						 &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
++						 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
+ 				interconnect-names = "qup-core",
+ 						     "qup-config";
+ 				#address-cells = <1>;
+@@ -1092,7 +1092,7 @@ uart0: serial@4a80000 {
+ 				interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ 						 &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ 						<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+-						 &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
++						 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
+ 				interconnect-names = "qup-core",
+ 						     "qup-config";
+ 				status = "disabled";
+@@ -1137,7 +1137,7 @@ spi1: spi@4a84000 {
+ 				interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ 						 &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ 						<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+-						 &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
++						 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
+ 				interconnect-names = "qup-core",
+ 						     "qup-config";
+ 				#address-cells = <1>;
+@@ -1184,7 +1184,7 @@ spi2: spi@4a88000 {
+ 				interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ 						 &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ 						<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+-						 &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
++						 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
+ 				interconnect-names = "qup-core",
+ 						     "qup-config";
+ 				#address-cells = <1>;
+@@ -1231,7 +1231,7 @@ spi3: spi@4a8c000 {
+ 				interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ 						 &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ 						<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+-						 &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
++						 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
+ 				interconnect-names = "qup-core",
+ 						     "qup-config";
+ 				#address-cells = <1>;
+@@ -1278,7 +1278,7 @@ spi4: spi@4a90000 {
+ 				interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ 						 &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ 						<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+-						 &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
++						 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
+ 				interconnect-names = "qup-core",
+ 						     "qup-config";
+ 				#address-cells = <1>;
+@@ -1297,7 +1297,7 @@ uart4: serial@4a90000 {
+ 				interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ 						 &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ 						<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+-						 &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
++						 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
+ 				interconnect-names = "qup-core",
+ 						     "qup-config";
+ 				status = "disabled";
+@@ -1342,7 +1342,7 @@ spi5: spi@4a94000 {
+ 				interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG
+ 						 &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
+ 						<&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG
+-						 &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>;
++						 &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>;
+ 				interconnect-names = "qup-core",
+ 						     "qup-config";
+ 				#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+index 6a28cab971891d..8e5951da5920db 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+@@ -1131,9 +1131,6 @@ &sound {
+ 		"VA DMIC0", "MIC BIAS1",
+ 		"VA DMIC1", "MIC BIAS1",
+ 		"VA DMIC2", "MIC BIAS3",
+-		"VA DMIC0", "VA MIC BIAS1",
+-		"VA DMIC1", "VA MIC BIAS1",
+-		"VA DMIC2", "VA MIC BIAS3",
+ 		"TX SWR_ADC1", "ADC2_OUTPUT";
+ 
+ 	wcd-playback-dai-link {
+diff --git a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
+index 962c8aa4004401..dc604be4afc632 100644
+--- a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
++++ b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
+@@ -167,6 +167,7 @@ &blsp1_dma {
+ 	 * BAM DMA interconnects support is in place.
+ 	 */
+ 	/delete-property/ clocks;
++	/delete-property/ clock-names;
+ };
+ 
+ &blsp1_uart2 {
+@@ -179,6 +180,7 @@ &blsp2_dma {
+ 	 * BAM DMA interconnects support is in place.
+ 	 */
+ 	/delete-property/ clocks;
++	/delete-property/ clock-names;
+ };
+ 
+ &blsp2_uart1 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts b/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts
+index 7167f75bced3fd..a9926ad6c6f9f5 100644
+--- a/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts
++++ b/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts
+@@ -107,6 +107,7 @@ &qusb2phy0 {
+ 	status = "okay";
+ 
+ 	vdd-supply = <&vreg_l1b_0p925>;
++	vdda-pll-supply = <&vreg_l10a_1p8>;
+ 	vdda-phy-dpdm-supply = <&vreg_l7b_3p125>;
+ };
+ 
+@@ -404,6 +405,8 @@ &sdhc_1 {
+ &sdhc_2 {
+ 	status = "okay";
+ 
++	cd-gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>;
++
+ 	vmmc-supply = <&vreg_l5b_2p95>;
+ 	vqmmc-supply = <&vreg_l2b_2p95>;
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts b/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts
+index d37a433130b98f..5948b401165ce9 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts
+@@ -135,8 +135,6 @@ vdda_pll_cc_ebi23:
+ 		vdda_sp_sensor:
+ 		vdda_ufs1_core:
+ 		vdda_ufs2_core:
+-		vdda_usb1_ss_core:
+-		vdda_usb2_ss_core:
+ 		vreg_l1a_0p875: ldo1 {
+ 			regulator-min-microvolt = <880000>;
+ 			regulator-max-microvolt = <880000>;
+@@ -157,6 +155,7 @@ vreg_l3a_1p0: ldo3 {
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ 		};
+ 
++		vdda_usb1_ss_core:
+ 		vdd_wcss_cx:
+ 		vdd_wcss_mx:
+ 		vdda_wcss_pll:
+@@ -383,8 +382,8 @@ &ufs_mem_phy {
+ };
+ 
+ &sdhc_2 {
+-	pinctrl-names = "default";
+ 	pinctrl-0 = <&sdc2_clk_state &sdc2_cmd_state &sdc2_data_state &sd_card_det_n_state>;
++	pinctrl-names = "default";
+ 	cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&vreg_l21a_2p95>;
+ 	vqmmc-supply = <&vddpx_2>;
+@@ -418,16 +417,9 @@ &usb_1_qmpphy {
+ 	status = "okay";
+ };
+ 
+-&wifi {
+-	vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
+-	vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+-	vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+-	vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+-	status = "okay";
+-};
+-
+ &tlmm {
+-	gpio-reserved-ranges = <0 4>, <27 4>, <81 4>, <85 4>;
++	gpio-reserved-ranges = <27 4>, /* SPI (eSE - embedded Secure Element) */
++			       <85 4>; /* SPI (fingerprint reader) */
+ 
+ 	sdc2_clk_state: sdc2-clk-state {
+ 		pins = "sdc2_clk";
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index faa36d17b9f2c9..e17937f76806c6 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -606,7 +606,7 @@ cpu7_opp8: opp-1632000000 {
+ 		};
+ 
+ 		cpu7_opp9: opp-1747200000 {
+-			opp-hz = /bits/ 64 <1708800000>;
++			opp-hz = /bits/ 64 <1747200000>;
+ 			opp-peak-kBps = <5412000 42393600>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 404473fa491ae0..0be8f2befec7c5 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -1806,11 +1806,11 @@ cryptobam: dma-controller@1dc4000 {
+ 			interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ 			#dma-cells = <1>;
+ 			qcom,ee = <0>;
++			qcom,num-ees = <4>;
++			num-channels = <16>;
+ 			qcom,controlled-remotely;
+ 			iommus = <&apps_smmu 0x594 0x0011>,
+ 				 <&apps_smmu 0x596 0x0011>;
+-			/* FIXME: Probing BAM DMA causes some abort and system hang */
+-			status = "fail";
+ 		};
+ 
+ 		crypto: crypto@1dfa000 {
+@@ -1822,8 +1822,6 @@ crypto: crypto@1dfa000 {
+ 				 <&apps_smmu 0x596 0x0011>;
+ 			interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>;
+ 			interconnect-names = "memory";
+-			/* FIXME: dependency BAM DMA is disabled */
+-			status = "disabled";
+ 		};
+ 
+ 		ipa: ipa@1e40000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+index fddf979de38d1b..edde21972f5ac1 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+@@ -3605,8 +3605,11 @@ mdss: display-subsystem@ae00000 {
+ 			resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
+ 
+ 			interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS
+-					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+-			interconnect-names = "mdp0-mem";
++					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
++					<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
++					 &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
++			interconnect-names = "mdp0-mem",
++					     "cpu-cfg";
+ 
+ 			power-domains = <&dispcc MDSS_GDSC>;
+ 
+@@ -6354,20 +6357,20 @@ map0 {
+ 
+ 			trips {
+ 				gpu0_alert0: trip-point0 {
+-					temperature = <85000>;
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+ 					type = "passive";
+ 				};
+ 
+ 				trip-point1 {
+-					temperature = <90000>;
++					temperature = <110000>;
+ 					hysteresis = <1000>;
+ 					type = "hot";
+ 				};
+ 
+ 				trip-point2 {
+-					temperature = <110000>;
+-					hysteresis = <1000>;
++					temperature = <115000>;
++					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6387,20 +6390,20 @@ map0 {
+ 
+ 			trips {
+ 				gpu1_alert0: trip-point0 {
+-					temperature = <85000>;
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+ 					type = "passive";
+ 				};
+ 
+ 				trip-point1 {
+-					temperature = <90000>;
++					temperature = <110000>;
+ 					hysteresis = <1000>;
+ 					type = "hot";
+ 				};
+ 
+ 				trip-point2 {
+-					temperature = <110000>;
+-					hysteresis = <1000>;
++					temperature = <115000>;
++					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6420,20 +6423,20 @@ map0 {
+ 
+ 			trips {
+ 				gpu2_alert0: trip-point0 {
+-					temperature = <85000>;
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+ 					type = "passive";
+ 				};
+ 
+ 				trip-point1 {
+-					temperature = <90000>;
++					temperature = <110000>;
+ 					hysteresis = <1000>;
+ 					type = "hot";
+ 				};
+ 
+ 				trip-point2 {
+-					temperature = <110000>;
+-					hysteresis = <1000>;
++					temperature = <115000>;
++					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6453,20 +6456,20 @@ map0 {
+ 
+ 			trips {
+ 				gpu3_alert0: trip-point0 {
+-					temperature = <85000>;
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+ 					type = "passive";
+ 				};
+ 
+ 				trip-point1 {
+-					temperature = <90000>;
++					temperature = <110000>;
+ 					hysteresis = <1000>;
+ 					type = "hot";
+ 				};
+ 
+ 				trip-point2 {
+-					temperature = <110000>;
+-					hysteresis = <1000>;
++					temperature = <115000>;
++					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6486,20 +6489,20 @@ map0 {
+ 
+ 			trips {
+ 				gpu4_alert0: trip-point0 {
+-					temperature = <85000>;
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+ 					type = "passive";
+ 				};
+ 
+ 				trip-point1 {
+-					temperature = <90000>;
++					temperature = <110000>;
+ 					hysteresis = <1000>;
+ 					type = "hot";
+ 				};
+ 
+ 				trip-point2 {
+-					temperature = <110000>;
+-					hysteresis = <1000>;
++					temperature = <115000>;
++					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6519,20 +6522,20 @@ map0 {
+ 
+ 			trips {
+ 				gpu5_alert0: trip-point0 {
+-					temperature = <85000>;
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+ 					type = "passive";
+ 				};
+ 
+ 				trip-point1 {
+-					temperature = <90000>;
++					temperature = <110000>;
+ 					hysteresis = <1000>;
+ 					type = "hot";
+ 				};
+ 
+ 				trip-point2 {
+-					temperature = <110000>;
+-					hysteresis = <1000>;
++					temperature = <115000>;
++					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6552,20 +6555,20 @@ map0 {
+ 
+ 			trips {
+ 				gpu6_alert0: trip-point0 {
+-					temperature = <85000>;
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+ 					type = "passive";
+ 				};
+ 
+ 				trip-point1 {
+-					temperature = <90000>;
++					temperature = <110000>;
+ 					hysteresis = <1000>;
+ 					type = "hot";
+ 				};
+ 
+ 				trip-point2 {
+-					temperature = <110000>;
+-					hysteresis = <1000>;
++					temperature = <115000>;
++					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6585,20 +6588,20 @@ map0 {
+ 
+ 			trips {
+ 				gpu7_alert0: trip-point0 {
+-					temperature = <85000>;
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+ 					type = "passive";
+ 				};
+ 
+ 				trip-point1 {
+-					temperature = <90000>;
++					temperature = <110000>;
+ 					hysteresis = <1000>;
+ 					type = "hot";
+ 				};
+ 
+ 				trip-point2 {
+-					temperature = <110000>;
+-					hysteresis = <1000>;
++					temperature = <115000>;
++					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+index 19da90704b7cb9..001a9dc0a4baa3 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+@@ -267,6 +267,7 @@ vreg_l12b: ldo12 {
+ 			regulator-min-microvolt = <1200000>;
+ 			regulator-max-microvolt = <1200000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l13b: ldo13 {
+@@ -288,6 +289,7 @@ vreg_l15b: ldo15 {
+ 			regulator-min-microvolt = <1800000>;
+ 			regulator-max-microvolt = <1800000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l16b: ldo16 {
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index 5a5abd5fa65850..5082ecb32089bc 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -20,6 +20,7 @@
+ #include <dt-bindings/soc/qcom,gpr.h>
+ #include <dt-bindings/soc/qcom,rpmh-rsc.h>
+ #include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
++#include <dt-bindings/thermal/thermal.h>
+ 
+ / {
+ 	interrupt-parent = <&intc>;
+@@ -4284,6 +4285,8 @@ usb_2_dwc3: usb@a200000 {
+ 				phy-names = "usb2-phy";
+ 				maximum-speed = "high-speed";
+ 
++				dma-coherent;
++
+ 				ports {
+ 					#address-cells = <1>;
+ 					#size-cells = <0>;
+@@ -6412,8 +6415,8 @@ trip-point0 {
+ 				};
+ 
+ 				aoss0-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6438,7 +6441,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6464,7 +6467,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6490,7 +6493,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6516,7 +6519,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6542,7 +6545,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6568,7 +6571,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6594,7 +6597,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6620,7 +6623,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6638,8 +6641,8 @@ trip-point0 {
+ 				};
+ 
+ 				cpuss2-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6656,8 +6659,8 @@ trip-point0 {
+ 				};
+ 
+ 				cpuss2-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6674,7 +6677,7 @@ trip-point0 {
+ 				};
+ 
+ 				mem-critical {
+-					temperature = <125000>;
++					temperature = <115000>;
+ 					hysteresis = <0>;
+ 					type = "critical";
+ 				};
+@@ -6692,7 +6695,7 @@ trip-point0 {
+ 				};
+ 
+ 				video-critical {
+-					temperature = <125000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6710,8 +6713,8 @@ trip-point0 {
+ 				};
+ 
+ 				aoss0-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6736,7 +6739,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6762,7 +6765,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6788,7 +6791,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6814,7 +6817,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6840,7 +6843,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6866,7 +6869,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6892,7 +6895,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6918,7 +6921,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -6936,8 +6939,8 @@ trip-point0 {
+ 				};
+ 
+ 				cpuss2-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6954,8 +6957,8 @@ trip-point0 {
+ 				};
+ 
+ 				cpuss2-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6972,8 +6975,8 @@ trip-point0 {
+ 				};
+ 
+ 				aoss0-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -6998,7 +7001,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7024,7 +7027,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7050,7 +7053,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7076,7 +7079,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7102,7 +7105,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7128,7 +7131,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7154,7 +7157,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7180,7 +7183,7 @@ trip-point1 {
+ 				};
+ 
+ 				cpu-critical {
+-					temperature = <110000>;
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7198,8 +7201,8 @@ trip-point0 {
+ 				};
+ 
+ 				cpuss2-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -7216,8 +7219,8 @@ trip-point0 {
+ 				};
+ 
+ 				cpuss2-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -7234,8 +7237,8 @@ trip-point0 {
+ 				};
+ 
+ 				aoss0-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -7252,8 +7255,8 @@ trip-point0 {
+ 				};
+ 
+ 				nsp0-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -7270,8 +7273,8 @@ trip-point0 {
+ 				};
+ 
+ 				nsp1-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -7288,8 +7291,8 @@ trip-point0 {
+ 				};
+ 
+ 				nsp2-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -7306,33 +7309,34 @@ trip-point0 {
+ 				};
+ 
+ 				nsp3-critical {
+-					temperature = <125000>;
+-					hysteresis = <0>;
++					temperature = <115000>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+ 		};
+ 
+ 		gpuss-0-thermal {
+-			polling-delay-passive = <10>;
++			polling-delay-passive = <200>;
+ 
+ 			thermal-sensors = <&tsens3 5>;
+ 
+-			trips {
+-				trip-point0 {
+-					temperature = <85000>;
+-					hysteresis = <1000>;
+-					type = "passive";
++			cooling-maps {
++				map0 {
++					trip = <&gpuss0_alert0>;
++					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
++			};
+ 
+-				trip-point1 {
+-					temperature = <90000>;
++			trips {
++				gpuss0_alert0: trip-point0 {
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+-					type = "hot";
++					type = "passive";
+ 				};
+ 
+-				trip-point2 {
+-					temperature = <125000>;
++				gpu-critical {
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7340,25 +7344,26 @@ trip-point2 {
+ 		};
+ 
+ 		gpuss-1-thermal {
+-			polling-delay-passive = <10>;
++			polling-delay-passive = <200>;
+ 
+ 			thermal-sensors = <&tsens3 6>;
+ 
+-			trips {
+-				trip-point0 {
+-					temperature = <85000>;
+-					hysteresis = <1000>;
+-					type = "passive";
++			cooling-maps {
++				map0 {
++					trip = <&gpuss1_alert0>;
++					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
++			};
+ 
+-				trip-point1 {
+-					temperature = <90000>;
++			trips {
++				gpuss1_alert0: trip-point0 {
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+-					type = "hot";
++					type = "passive";
+ 				};
+ 
+-				trip-point2 {
+-					temperature = <125000>;
++				gpu-critical {
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7366,25 +7371,26 @@ trip-point2 {
+ 		};
+ 
+ 		gpuss-2-thermal {
+-			polling-delay-passive = <10>;
++			polling-delay-passive = <200>;
+ 
+ 			thermal-sensors = <&tsens3 7>;
+ 
+-			trips {
+-				trip-point0 {
+-					temperature = <85000>;
+-					hysteresis = <1000>;
+-					type = "passive";
++			cooling-maps {
++				map0 {
++					trip = <&gpuss2_alert0>;
++					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
++			};
+ 
+-				trip-point1 {
+-					temperature = <90000>;
++			trips {
++				gpuss2_alert0: trip-point0 {
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+-					type = "hot";
++					type = "passive";
+ 				};
+ 
+-				trip-point2 {
+-					temperature = <125000>;
++				gpu-critical {
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7392,25 +7398,26 @@ trip-point2 {
+ 		};
+ 
+ 		gpuss-3-thermal {
+-			polling-delay-passive = <10>;
++			polling-delay-passive = <200>;
+ 
+ 			thermal-sensors = <&tsens3 8>;
+ 
+-			trips {
+-				trip-point0 {
+-					temperature = <85000>;
+-					hysteresis = <1000>;
+-					type = "passive";
++			cooling-maps {
++				map0 {
++					trip = <&gpuss3_alert0>;
++					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
++			};
+ 
+-				trip-point1 {
+-					temperature = <90000>;
++			trips {
++				gpuss3_alert0: trip-point0 {
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+-					type = "hot";
++					type = "passive";
+ 				};
+ 
+-				trip-point2 {
+-					temperature = <125000>;
++				gpu-critical {
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7418,25 +7425,26 @@ trip-point2 {
+ 		};
+ 
+ 		gpuss-4-thermal {
+-			polling-delay-passive = <10>;
++			polling-delay-passive = <200>;
+ 
+ 			thermal-sensors = <&tsens3 9>;
+ 
+-			trips {
+-				trip-point0 {
+-					temperature = <85000>;
+-					hysteresis = <1000>;
+-					type = "passive";
++			cooling-maps {
++				map0 {
++					trip = <&gpuss4_alert0>;
++					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
++			};
+ 
+-				trip-point1 {
+-					temperature = <90000>;
++			trips {
++				gpuss4_alert0: trip-point0 {
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+-					type = "hot";
++					type = "passive";
+ 				};
+ 
+-				trip-point2 {
+-					temperature = <125000>;
++				gpu-critical {
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7444,25 +7452,26 @@ trip-point2 {
+ 		};
+ 
+ 		gpuss-5-thermal {
+-			polling-delay-passive = <10>;
++			polling-delay-passive = <200>;
+ 
+ 			thermal-sensors = <&tsens3 10>;
+ 
+-			trips {
+-				trip-point0 {
+-					temperature = <85000>;
+-					hysteresis = <1000>;
+-					type = "passive";
++			cooling-maps {
++				map0 {
++					trip = <&gpuss5_alert0>;
++					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
++			};
+ 
+-				trip-point1 {
+-					temperature = <90000>;
++			trips {
++				gpuss5_alert0: trip-point0 {
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+-					type = "hot";
++					type = "passive";
+ 				};
+ 
+-				trip-point2 {
+-					temperature = <125000>;
++				gpu-critical {
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7470,25 +7479,26 @@ trip-point2 {
+ 		};
+ 
+ 		gpuss-6-thermal {
+-			polling-delay-passive = <10>;
++			polling-delay-passive = <200>;
+ 
+ 			thermal-sensors = <&tsens3 11>;
+ 
+-			trips {
+-				trip-point0 {
+-					temperature = <85000>;
+-					hysteresis = <1000>;
+-					type = "passive";
++			cooling-maps {
++				map0 {
++					trip = <&gpuss6_alert0>;
++					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
++			};
+ 
+-				trip-point1 {
+-					temperature = <90000>;
++			trips {
++				gpuss6_alert0: trip-point0 {
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+-					type = "hot";
++					type = "passive";
+ 				};
+ 
+-				trip-point2 {
+-					temperature = <125000>;
++				gpu-critical {
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7496,25 +7506,26 @@ trip-point2 {
+ 		};
+ 
+ 		gpuss-7-thermal {
+-			polling-delay-passive = <10>;
++			polling-delay-passive = <200>;
+ 
+ 			thermal-sensors = <&tsens3 12>;
+ 
+-			trips {
+-				trip-point0 {
+-					temperature = <85000>;
+-					hysteresis = <1000>;
+-					type = "passive";
++			cooling-maps {
++				map0 {
++					trip = <&gpuss7_alert0>;
++					cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
++			};
+ 
+-				trip-point1 {
+-					temperature = <90000>;
++			trips {
++				gpuss7_alert0: trip-point0 {
++					temperature = <95000>;
+ 					hysteresis = <1000>;
+-					type = "hot";
++					type = "passive";
+ 				};
+ 
+-				trip-point2 {
+-					temperature = <125000>;
++				gpu-critical {
++					temperature = <115000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+@@ -7533,7 +7544,7 @@ trip-point0 {
+ 
+ 				camera0-critical {
+ 					temperature = <115000>;
+-					hysteresis = <0>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+@@ -7551,7 +7562,7 @@ trip-point0 {
+ 
+ 				camera0-critical {
+ 					temperature = <115000>;
+-					hysteresis = <0>;
++					hysteresis = <1000>;
+ 					type = "critical";
+ 				};
+ 			};
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso
+index e6cf304c77ee92..5d820bd32ff674 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso
++++ b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso
+@@ -108,7 +108,7 @@ sound_clk_pins: sound-clk {
+ 	};
+ 
+ 	tpu0_pins: tpu0 {
+-		groups = "tpu_to0_a";
++		groups = "tpu_to0_b";
+ 		function = "tpu";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+index f6f15946579ebf..57466fbfd3f9af 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+@@ -284,14 +284,6 @@ &uart2 {
+ 	status = "okay";
+ };
+ 
+-&usb_host0_ehci {
+-	status = "okay";
+-};
+-
+-&usb_host0_ohci {
+-	status = "okay";
+-};
+-
+ &vopb {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
+index f2cc086e5001a6..887c9be1b41008 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts
+@@ -636,6 +636,7 @@ flash@0 {
+ 		spi-max-frequency = <104000000>;
+ 		spi-rx-bus-width = <4>;
+ 		spi-tx-bus-width = <1>;
++		vcc-supply = <&vcc_1v8>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi
+index 93189f83064006..c30354268c8f5e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi
+@@ -486,9 +486,12 @@ &saradc {
+ &sdhci {
+ 	bus-width = <8>;
+ 	max-frequency = <200000000>;
++	mmc-hs200-1_8v;
+ 	non-removable;
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
++	pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
++	vmmc-supply = <&vcc_3v3>;
++	vqmmc-supply = <&vcc_1v8>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+index 83e7e0fbe7839e..ad4331bc07806c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+@@ -428,16 +428,15 @@ xin32k: clock-2 {
+ 		#clock-cells = <0>;
+ 	};
+ 
+-	pmu_sram: sram@10f000 {
+-		compatible = "mmio-sram";
+-		reg = <0x0 0x0010f000 0x0 0x100>;
+-		ranges = <0 0x0 0x0010f000 0x100>;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
++	reserved-memory {
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges;
+ 
+-		scmi_shmem: sram@0 {
++		scmi_shmem: shmem@10f000 {
+ 			compatible = "arm,scmi-shmem";
+-			reg = <0x0 0x100>;
++			reg = <0x0 0x0010f000 0x0 0x100>;
++			no-map;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+index 8230d53cd69609..f7a557e6af5477 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+@@ -557,6 +557,7 @@ &usb1 {
+ &ospi1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&mcu_fss0_ospi1_pins_default>;
++	status = "okay";
+ 
+ 	flash@0 {
+ 		compatible = "jedec,spi-nor";
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index 8fe7dbae33bf90..f988dd79add899 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -1536,6 +1536,9 @@ CONFIG_PHY_HISTB_COMBPHY=y
+ CONFIG_PHY_HISI_INNO_USB2=y
+ CONFIG_PHY_MVEBU_CP110_COMPHY=y
+ CONFIG_PHY_MTK_TPHY=y
++CONFIG_PHY_MTK_HDMI=m
++CONFIG_PHY_MTK_MIPI_DSI=m
++CONFIG_PHY_MTK_DP=m
+ CONFIG_PHY_QCOM_EDP=m
+ CONFIG_PHY_QCOM_PCIE2=m
+ CONFIG_PHY_QCOM_QMP=m
+diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
+index da6d2c1c0b030c..5f4dc6364dbb9d 100644
+--- a/arch/arm64/include/asm/esr.h
++++ b/arch/arm64/include/asm/esr.h
+@@ -370,12 +370,14 @@
+ /*
+  * ISS values for SME traps
+  */
+-
+-#define ESR_ELx_SME_ISS_SME_DISABLED	0
+-#define ESR_ELx_SME_ISS_ILL		1
+-#define ESR_ELx_SME_ISS_SM_DISABLED	2
+-#define ESR_ELx_SME_ISS_ZA_DISABLED	3
+-#define ESR_ELx_SME_ISS_ZT_DISABLED	4
++#define ESR_ELx_SME_ISS_SMTC_MASK		GENMASK(2, 0)
++#define ESR_ELx_SME_ISS_SMTC(esr)		((esr) & ESR_ELx_SME_ISS_SMTC_MASK)
++
++#define ESR_ELx_SME_ISS_SMTC_SME_DISABLED	0
++#define ESR_ELx_SME_ISS_SMTC_ILL		1
++#define ESR_ELx_SME_ISS_SMTC_SM_DISABLED	2
++#define ESR_ELx_SME_ISS_SMTC_ZA_DISABLED	3
++#define ESR_ELx_SME_ISS_SMTC_ZT_DISABLED	4
+ 
+ /* ISS field definitions for MOPS exceptions */
+ #define ESR_ELx_MOPS_ISS_MEM_INST	(UL(1) << 24)
+diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
+index f2a84efc361858..c8dcb67b81a72c 100644
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -6,6 +6,7 @@
+ #define __ASM_FP_H
+ 
+ #include <asm/errno.h>
++#include <asm/percpu.h>
+ #include <asm/ptrace.h>
+ #include <asm/processor.h>
+ #include <asm/sigcontext.h>
+@@ -94,6 +95,8 @@ struct cpu_fp_state {
+ 	enum fp_type to_save;
+ };
+ 
++DECLARE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
++
+ extern void fpsimd_bind_state_to_cpu(struct cpu_fp_state *fp_state);
+ 
+ extern void fpsimd_flush_task_state(struct task_struct *target);
+diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
+index 3fcd9d080bf2a9..d23315ef7b679b 100644
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -393,20 +393,16 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+  * As per the ABI exit SME streaming mode and clear the SVE state not
+  * shared with FPSIMD on syscall entry.
+  */
+-static inline void fp_user_discard(void)
++static inline void fpsimd_syscall_enter(void)
+ {
+-	/*
+-	 * If SME is active then exit streaming mode.  If ZA is active
+-	 * then flush the SVE registers but leave userspace access to
+-	 * both SVE and SME enabled, otherwise disable SME for the
+-	 * task and fall through to disabling SVE too.  This means
+-	 * that after a syscall we never have any streaming mode
+-	 * register state to track, if this changes the KVM code will
+-	 * need updating.
+-	 */
++	/* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */
+ 	if (system_supports_sme())
+ 		sme_smstop_sm();
+ 
++	/*
++	 * The CPU is not in streaming mode. If non-streaming SVE is not
++	 * supported, there is no SVE state that needs to be discarded.
++	 */
+ 	if (!system_supports_sve())
+ 		return;
+ 
+@@ -416,6 +412,33 @@ static inline void fp_user_discard(void)
+ 		sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
+ 		sve_flush_live(true, sve_vq_minus_one);
+ 	}
++
++	/*
++	 * Any live non-FPSIMD SVE state has been zeroed. Allow
++	 * fpsimd_save_user_state() to lazily discard SVE state until either
++	 * the live state is unbound or fpsimd_syscall_exit() is called.
++	 */
++	__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD);
++}
++
++static __always_inline void fpsimd_syscall_exit(void)
++{
++	if (!system_supports_sve())
++		return;
++
++	/*
++	 * The current task's user FPSIMD/SVE/SME state is now bound to this
++	 * CPU. The fpsimd_last_state.to_save value is either:
++	 *
++	 * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU
++	 *   since fpsimd_syscall_enter().
++	 *
++	 * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at
++	 *   any point.
++	 *
++	 * Reset this to FP_STATE_CURRENT to stop lazy discarding.
++	 */
++	__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT);
+ }
+ 
+ UNHANDLED(el1t, 64, sync)
+@@ -707,10 +730,11 @@ static void noinstr el0_svc(struct pt_regs *regs)
+ {
+ 	enter_from_user_mode(regs);
+ 	cortex_a76_erratum_1463225_svc_handler();
+-	fp_user_discard();
++	fpsimd_syscall_enter();
+ 	local_daif_restore(DAIF_PROCCTX);
+ 	do_el0_svc(regs);
+ 	exit_to_user_mode(regs);
++	fpsimd_syscall_exit();
+ }
+ 
+ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index f38d22dac140f1..8854bce5cfe205 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -119,7 +119,7 @@
+  *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
+  */
+ 
+-static DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
++DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
+ 
+ __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
+ #ifdef CONFIG_ARM64_SVE
+@@ -359,9 +359,6 @@ static void task_fpsimd_load(void)
+ 	WARN_ON(preemptible());
+ 	WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE));
+ 
+-	if (system_supports_fpmr())
+-		write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR);
+-
+ 	if (system_supports_sve() || system_supports_sme()) {
+ 		switch (current->thread.fp_type) {
+ 		case FP_STATE_FPSIMD:
+@@ -413,6 +410,9 @@ static void task_fpsimd_load(void)
+ 			restore_ffr = system_supports_fa64();
+ 	}
+ 
++	if (system_supports_fpmr())
++		write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR);
++
+ 	if (restore_sve_regs) {
+ 		WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE);
+ 		sve_load_state(sve_pffr(&current->thread),
+@@ -453,12 +453,15 @@ static void fpsimd_save_user_state(void)
+ 		*(last->fpmr) = read_sysreg_s(SYS_FPMR);
+ 
+ 	/*
+-	 * If a task is in a syscall the ABI allows us to only
+-	 * preserve the state shared with FPSIMD so don't bother
+-	 * saving the full SVE state in that case.
++	 * Save SVE state if it is live.
++	 *
++	 * The syscall ABI discards live SVE state at syscall entry. When
++	 * entering a syscall, fpsimd_syscall_enter() sets to_save to
++	 * FP_STATE_FPSIMD to allow the SVE state to be lazily discarded until
++	 * either new SVE state is loaded+bound or fpsimd_syscall_exit() is
++	 * called prior to a return to userspace.
+ 	 */
+-	if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE) &&
+-	     !in_syscall(current_pt_regs())) ||
++	if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) ||
+ 	    last->to_save == FP_STATE_SVE) {
+ 		save_sve_regs = true;
+ 		save_ffr = true;
+@@ -651,7 +654,7 @@ static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
+  * task->thread.uw.fpsimd_state must be up to date before calling this
+  * function.
+  */
+-static void fpsimd_to_sve(struct task_struct *task)
++static inline void fpsimd_to_sve(struct task_struct *task)
+ {
+ 	unsigned int vq;
+ 	void *sst = task->thread.sve_state;
+@@ -675,7 +678,7 @@ static void fpsimd_to_sve(struct task_struct *task)
+  * bytes of allocated kernel memory.
+  * task->thread.sve_state must be up to date before calling this function.
+  */
+-static void sve_to_fpsimd(struct task_struct *task)
++static inline void sve_to_fpsimd(struct task_struct *task)
+ {
+ 	unsigned int vq, vl;
+ 	void const *sst = task->thread.sve_state;
+@@ -1436,7 +1439,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
+ 	 * If this not a trap due to SME being disabled then something
+ 	 * is being used in the wrong mode, report as SIGILL.
+ 	 */
+-	if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) {
++	if (ESR_ELx_SME_ISS_SMTC(esr) != ESR_ELx_SME_ISS_SMTC_SME_DISABLED) {
+ 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ 		return;
+ 	}
+@@ -1460,6 +1463,8 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
+ 		sme_set_vq(vq_minus_one);
+ 
+ 		fpsimd_bind_task_to_cpu();
++	} else {
++		fpsimd_flush_task_state(current);
+ 	}
+ 
+ 	put_cpu_fpsimd_context();
+@@ -1573,8 +1578,8 @@ void fpsimd_thread_switch(struct task_struct *next)
+ 		fpsimd_save_user_state();
+ 
+ 	if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) {
+-		fpsimd_load_kernel_state(next);
+ 		fpsimd_flush_cpu_state();
++		fpsimd_load_kernel_state(next);
+ 	} else {
+ 		/*
+ 		 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
+@@ -1661,6 +1666,9 @@ void fpsimd_flush_thread(void)
+ 		current->thread.svcr = 0;
+ 	}
+ 
++	if (system_supports_fpmr())
++		current->thread.uw.fpmr = 0;
++
+ 	current->thread.fp_type = FP_STATE_FPSIMD;
+ 
+ 	put_cpu_fpsimd_context();
+@@ -1801,7 +1809,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
+ 	get_cpu_fpsimd_context();
+ 
+ 	current->thread.uw.fpsimd_state = *state;
+-	if (test_thread_flag(TIF_SVE))
++	if (current->thread.fp_type == FP_STATE_SVE)
+ 		fpsimd_to_sve(current);
+ 
+ 	task_fpsimd_load();
+diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
+index 9d01361696a145..ae551b8571374f 100644
+--- a/arch/arm64/xen/hypercall.S
++++ b/arch/arm64/xen/hypercall.S
+@@ -83,7 +83,26 @@ HYPERCALL3(vcpu_op);
+ HYPERCALL1(platform_op_raw);
+ HYPERCALL2(multicall);
+ HYPERCALL2(vm_assist);
+-HYPERCALL3(dm_op);
++
++SYM_FUNC_START(HYPERVISOR_dm_op)
++	mov x16, #__HYPERVISOR_dm_op;	\
++	/*
++	 * dm_op hypercalls are issued by the userspace. The kernel needs to
++	 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
++	 * translations to user memory via AT instructions. Since AT
++	 * instructions are not affected by the PAN bit (ARMv8.1), we only
++	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
++	 * is enabled (it implies that hardware UAO and PAN disabled).
++	 */
++	uaccess_ttbr0_enable x6, x7, x8
++	hvc XEN_IMM
++
++	/*
++	 * Disable userspace access from kernel once the hyp call completed.
++	 */
++	uaccess_ttbr0_disable x6, x7
++	ret
++SYM_FUNC_END(HYPERVISOR_dm_op);
+ 
+ SYM_FUNC_START(privcmd_call)
+ 	mov x16, x0
+diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
+index e324410ef239c0..d26c7f4f8c360a 100644
+--- a/arch/m68k/mac/config.c
++++ b/arch/m68k/mac/config.c
+@@ -793,7 +793,7 @@ static void __init mac_identify(void)
+ 	}
+ 
+ 	macintosh_config = mac_data_table;
+-	for (m = macintosh_config; m->ident != -1; m++) {
++	for (m = &mac_data_table[1]; m->ident != -1; m++) {
+ 		if (m->ident == model) {
+ 			macintosh_config = m;
+ 			break;
+diff --git a/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts b/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts
+index c7ea4f1c0bb21f..6c277ab83d4b94 100644
+--- a/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts
++++ b/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts
+@@ -29,6 +29,7 @@ msi: msi-controller@2ff00000 {
+ 		compatible = "loongson,pch-msi-1.0";
+ 		reg = <0 0x2ff00000 0 0x8>;
+ 		interrupt-controller;
++		#interrupt-cells = <1>;
+ 		msi-controller;
+ 		loongson,msi-base-vec = <64>;
+ 		loongson,msi-num-vecs = <64>;
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index f43c1198768c64..b4006a4a112161 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -162,7 +162,7 @@ endif
+ 
+ obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM)	+= tm.o
+ 
+-ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)(CONFIG_PPC_BOOK3S),)
++ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)$(CONFIG_PPC_BOOK3S),)
+ obj-y				+= ppc_save_regs.o
+ endif
+ 
+diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
+index 9ac3266e496522..a325c1c02f96dc 100644
+--- a/arch/powerpc/kexec/crash.c
++++ b/arch/powerpc/kexec/crash.c
+@@ -359,7 +359,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
+ 	if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
+ 		is_via_system_reset = 1;
+ 
+-	crash_smp_send_stop();
++	if (IS_ENABLED(CONFIG_SMP))
++		crash_smp_send_stop();
++	else
++		crash_kexec_prepare();
+ 
+ 	crash_save_cpu(regs, crashing_cpu);
+ 
+diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
+index 0b6365d85d1171..dc6f75d3ac6ef7 100644
+--- a/arch/powerpc/platforms/book3s/vas-api.c
++++ b/arch/powerpc/platforms/book3s/vas-api.c
+@@ -521,6 +521,15 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * Map complete page to the paste address. So the user
++	 * space should pass 0ULL to the offset parameter.
++	 */
++	if (vma->vm_pgoff) {
++		pr_debug("Page offset unsupported to map paste address\n");
++		return -EINVAL;
++	}
++
+ 	/* Ensure instance has an open send window */
+ 	if (!txwin) {
+ 		pr_err("No send window open?\n");
+diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
+index 877720c645151f..35471b679638a8 100644
+--- a/arch/powerpc/platforms/powernv/memtrace.c
++++ b/arch/powerpc/platforms/powernv/memtrace.c
+@@ -48,11 +48,15 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
+ static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ 	struct memtrace_entry *ent = filp->private_data;
++	unsigned long ent_nrpages = ent->size >> PAGE_SHIFT;
++	unsigned long vma_nrpages = vma_pages(vma);
+ 
+-	if (ent->size < vma->vm_end - vma->vm_start)
++	/* The requested page offset should be within object's page count */
++	if (vma->vm_pgoff >= ent_nrpages)
+ 		return -EINVAL;
+ 
+-	if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
++	/* The requested mapping range should remain within the bounds */
++	if (vma_nrpages > ent_nrpages - vma->vm_pgoff)
+ 		return -EINVAL;
+ 
+ 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index d6ebc19fb99c51..eec333dd2e598c 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -197,7 +197,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+ 
+ static void tce_free_pSeries(struct iommu_table *tbl)
+ {
+-	if (!tbl->it_userspace)
++	if (tbl->it_userspace)
+ 		tce_iommu_userspace_view_free(tbl);
+ }
+ 
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index d14bfc23e315b0..36ac96eac9c9e4 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -429,7 +429,7 @@ int handle_misaligned_load(struct pt_regs *regs)
+ 
+ 	val.data_u64 = 0;
+ 	if (user_mode(regs)) {
+-		if (copy_from_user(&val, (u8 __user *)addr, len))
++		if (copy_from_user_nofault(&val, (u8 __user *)addr, len))
+ 			return -1;
+ 	} else {
+ 		memcpy(&val, (u8 *)addr, len);
+@@ -530,7 +530,7 @@ int handle_misaligned_store(struct pt_regs *regs)
+ 		return -EOPNOTSUPP;
+ 
+ 	if (user_mode(regs)) {
+-		if (copy_to_user((u8 __user *)addr, &val, len))
++		if (copy_to_user_nofault((u8 __user *)addr, &val, len))
+ 			return -1;
+ 	} else {
+ 		memcpy((u8 *)addr, &val, len);
+diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
+index 6e704ed86a83a9..635c67ed36653d 100644
+--- a/arch/riscv/kvm/vcpu_sbi.c
++++ b/arch/riscv/kvm/vcpu_sbi.c
+@@ -139,9 +139,9 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
+ 	struct kvm_vcpu *tmp;
+ 
+ 	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+-		spin_lock(&vcpu->arch.mp_state_lock);
++		spin_lock(&tmp->arch.mp_state_lock);
+ 		WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+-		spin_unlock(&vcpu->arch.mp_state_lock);
++		spin_unlock(&tmp->arch.mp_state_lock);
+ 	}
+ 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+ 
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 9d440a0b729eb7..64bb8b71013ae4 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -605,17 +605,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ 	}
+ 	/* Setup stack and backchain */
+ 	if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
+-		if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
+-			/* lgr %w1,%r15 (backchain) */
+-			EMIT4(0xb9040000, REG_W1, REG_15);
++		/* lgr %w1,%r15 (backchain) */
++		EMIT4(0xb9040000, REG_W1, REG_15);
+ 		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
+ 		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
+ 		/* aghi %r15,-STK_OFF */
+ 		EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
+-		if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
+-			/* stg %w1,152(%r15) (backchain) */
+-			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
+-				      REG_15, 152);
++		/* stg %w1,152(%r15) (backchain) */
++		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
++			      REG_15, 152);
+ 	}
+ }
+ 
+diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
+index 0bfde2ea5cb8ce..cdf7bf02983626 100644
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -38,7 +38,6 @@ struct amd_uncore_ctx {
+ 	int refcnt;
+ 	int cpu;
+ 	struct perf_event **events;
+-	struct hlist_node node;
+ };
+ 
+ struct amd_uncore_pmu {
+@@ -890,6 +889,39 @@ static void amd_uncore_umc_start(struct perf_event *event, int flags)
+ 	perf_event_update_userpage(event);
+ }
+ 
++static void amd_uncore_umc_read(struct perf_event *event)
++{
++	struct hw_perf_event *hwc = &event->hw;
++	u64 prev, new, shift;
++	s64 delta;
++
++	shift = COUNTER_SHIFT + 1;
++	prev = local64_read(&hwc->prev_count);
++
++	/*
++	 * UMC counters do not have RDPMC assignments. Read counts directly
++	 * from the corresponding PERF_CTR.
++	 */
++	rdmsrl(hwc->event_base, new);
++
++	/*
++	 * Unlike the other uncore counters, UMC counters saturate and set the
++	 * Overflow bit (bit 48) on overflow. Since they do not roll over,
++	 * proactively reset the corresponding PERF_CTR when bit 47 is set so
++	 * that the counter never gets a chance to saturate.
++	 */
++	if (new & BIT_ULL(63 - COUNTER_SHIFT)) {
++		wrmsrl(hwc->event_base, 0);
++		local64_set(&hwc->prev_count, 0);
++	} else {
++		local64_set(&hwc->prev_count, new);
++	}
++
++	delta = (new << shift) - (prev << shift);
++	delta >>= shift;
++	local64_add(delta, &event->count);
++}
++
+ static
+ void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
+ {
+@@ -967,7 +999,7 @@ int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
+ 				.del		= amd_uncore_del,
+ 				.start		= amd_uncore_umc_start,
+ 				.stop		= amd_uncore_stop,
+-				.read		= amd_uncore_read,
++				.read		= amd_uncore_umc_read,
+ 				.capabilities	= PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+ 				.module		= THIS_MODULE,
+ 			};
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index 920426d691ce71..3e4e85f71a6ad0 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -117,13 +117,10 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+ {
+ 	if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
+-		if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
+-			mb();
+-			clflush((void *)&current_thread_info()->flags);
+-			mb();
+-		}
++		const void *addr = &current_thread_info()->flags;
+ 
+-		__monitor((void *)&current_thread_info()->flags, 0, 0);
++		alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
++		__monitor(addr, 0, 0);
+ 
+ 		if (!need_resched()) {
+ 			if (ecx & 1) {
+diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
+index e770c4fc47f4c5..8727c7e21dd1e6 100644
+--- a/arch/x86/include/asm/sighandling.h
++++ b/arch/x86/include/asm/sighandling.h
+@@ -24,4 +24,26 @@ int ia32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs);
+ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs);
+ int x32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs);
+ 
++/*
++ * To prevent immediate repeat of single step trap on return from SIGTRAP
++ * handler if the trap flag (TF) is set without an external debugger attached,
++ * clear the software event flag in the augmented SS, ensuring no single-step
++ * trap is pending upon ERETU completion.
++ *
++ * Note, this function should be called in sigreturn() before the original
++ * state is restored to make sure the TF is read from the entry frame.
++ */
++static __always_inline void prevent_single_step_upon_eretu(struct pt_regs *regs)
++{
++	/*
++	 * If the trap flag (TF) is set, i.e., the sigreturn() SYSCALL instruction
++	 * is being single-stepped, do not clear the software event flag in the
++	 * augmented SS, thus a debugger won't skip over the following instruction.
++	 */
++#ifdef CONFIG_X86_FRED
++	if (!(regs->flags & X86_EFLAGS_TF))
++		regs->fred_ss.swevent = 0;
++#endif
++}
++
+ #endif /* _ASM_X86_SIGHANDLING_H */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 39e9ec3dea985d..b4877544552369 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1007,17 +1007,18 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+ 		c->x86_capability[CPUID_D_1_EAX] = eax;
+ 	}
+ 
+-	/* AMD-defined flags: level 0x80000001 */
++	/*
++	 * Check if extended CPUID leaves are implemented: Max extended
++	 * CPUID leaf must be in the 0x80000001-0x8000ffff range.
++	 */
+ 	eax = cpuid_eax(0x80000000);
+-	c->extended_cpuid_level = eax;
++	c->extended_cpuid_level = ((eax & 0xffff0000) == 0x80000000) ? eax : 0;
+ 
+-	if ((eax & 0xffff0000) == 0x80000000) {
+-		if (eax >= 0x80000001) {
+-			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
++	if (c->extended_cpuid_level >= 0x80000001) {
++		cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+ 
+-			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
+-			c->x86_capability[CPUID_8000_0001_EDX] = edx;
+-		}
++		c->x86_capability[CPUID_8000_0001_ECX] = ecx;
++		c->x86_capability[CPUID_8000_0001_EDX] = edx;
+ 	}
+ 
+ 	if (c->extended_cpuid_level >= 0x80000007) {
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 079f046ee26d19..e8021d3e58824a 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -696,6 +696,8 @@ static int load_late_locked(void)
+ 		return load_late_stop_cpus(true);
+ 	case UCODE_NFOUND:
+ 		return -ENOENT;
++	case UCODE_OK:
++		return 0;
+ 	default:
+ 		return -EBADFD;
+ 	}
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index 7b29ebda024f4e..1ececfce7a46a6 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -591,7 +591,7 @@ static void get_fixed_ranges(mtrr_type *frs)
+ 
+ void mtrr_save_fixed_ranges(void *info)
+ {
+-	if (boot_cpu_has(X86_FEATURE_MTRR))
++	if (mtrr_state.have_fixed)
+ 		get_fixed_ranges(mtrr_state.fixed_ranges);
+ }
+ 
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index e2fab3ceb09fb7..9a101150376db7 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -33,8 +33,9 @@ void io_bitmap_share(struct task_struct *tsk)
+ 	set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
+ }
+ 
+-static void task_update_io_bitmap(struct task_struct *tsk)
++static void task_update_io_bitmap(void)
+ {
++	struct task_struct *tsk = current;
+ 	struct thread_struct *t = &tsk->thread;
+ 
+ 	if (t->iopl_emul == 3 || t->io_bitmap) {
+@@ -54,7 +55,12 @@ void io_bitmap_exit(struct task_struct *tsk)
+ 	struct io_bitmap *iobm = tsk->thread.io_bitmap;
+ 
+ 	tsk->thread.io_bitmap = NULL;
+-	task_update_io_bitmap(tsk);
++	/*
++	 * Don't touch the TSS when invoked on a failed fork(). TSS
++	 * reflects the state of @current and not the state of @tsk.
++	 */
++	if (tsk == current)
++		task_update_io_bitmap();
+ 	if (iobm && refcount_dec_and_test(&iobm->refcnt))
+ 		kfree(iobm);
+ }
+@@ -192,8 +198,7 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ 	}
+ 
+ 	t->iopl_emul = level;
+-	task_update_io_bitmap(current);
+-
++	task_update_io_bitmap();
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index feca4f20b06aaa..85fa2db38dc42b 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -414,7 +414,7 @@ static __always_inline bool handle_pending_pir(u64 *pir, struct pt_regs *regs)
+ 	bool handled = false;
+ 
+ 	for (i = 0; i < 4; i++)
+-		pir_copy[i] = pir[i];
++		pir_copy[i] = READ_ONCE(pir[i]);
+ 
+ 	for (i = 0; i < 4; i++) {
+ 		if (!pir_copy[i])
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index c7ce3655b70780..1dbd7a34645c29 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -180,6 +180,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ 	frame->ret_addr = (unsigned long) ret_from_fork_asm;
+ 	p->thread.sp = (unsigned long) fork_frame;
+ 	p->thread.io_bitmap = NULL;
++	clear_tsk_thread_flag(p, TIF_IO_BITMAP);
+ 	p->thread.iopl_warn = 0;
+ 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
+ 
+@@ -468,6 +469,11 @@ void native_tss_update_io_bitmap(void)
+ 	} else {
+ 		struct io_bitmap *iobm = t->io_bitmap;
+ 
++		if (WARN_ON_ONCE(!iobm)) {
++			clear_thread_flag(TIF_IO_BITMAP);
++			native_tss_invalidate_io_bitmap();
++		}
++
+ 		/*
+ 		 * Only copy bitmap data when the sequence number differs. The
+ 		 * update time is accounted to the incoming task.
+@@ -906,13 +912,10 @@ static __init bool prefer_mwait_c1_over_halt(void)
+ static __cpuidle void mwait_idle(void)
+ {
+ 	if (!current_set_polling_and_test()) {
+-		if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
+-			mb(); /* quirk */
+-			clflush((void *)&current_thread_info()->flags);
+-			mb(); /* quirk */
+-		}
++		const void *addr = &current_thread_info()->flags;
+ 
+-		__monitor((void *)&current_thread_info()->flags, 0, 0);
++		alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
++		__monitor(addr, 0, 0);
+ 		if (!need_resched()) {
+ 			__sti_mwait(0, 0);
+ 			raw_local_irq_disable();
+diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
+index 98123ff10506c6..42bbc42bd3503c 100644
+--- a/arch/x86/kernel/signal_32.c
++++ b/arch/x86/kernel/signal_32.c
+@@ -152,6 +152,8 @@ SYSCALL32_DEFINE0(sigreturn)
+ 	struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
+ 	sigset_t set;
+ 
++	prevent_single_step_upon_eretu(regs);
++
+ 	if (!access_ok(frame, sizeof(*frame)))
+ 		goto badframe;
+ 	if (__get_user(set.sig[0], &frame->sc.oldmask)
+@@ -175,6 +177,8 @@ SYSCALL32_DEFINE0(rt_sigreturn)
+ 	struct rt_sigframe_ia32 __user *frame;
+ 	sigset_t set;
+ 
++	prevent_single_step_upon_eretu(regs);
++
+ 	frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4);
+ 
+ 	if (!access_ok(frame, sizeof(*frame)))
+diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
+index ee9453891901b7..d483b585c6c604 100644
+--- a/arch/x86/kernel/signal_64.c
++++ b/arch/x86/kernel/signal_64.c
+@@ -250,6 +250,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ 	sigset_t set;
+ 	unsigned long uc_flags;
+ 
++	prevent_single_step_upon_eretu(regs);
++
+ 	frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
+ 	if (!access_ok(frame, sizeof(*frame)))
+ 		goto badframe;
+@@ -366,6 +368,8 @@ COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
+ 	sigset_t set;
+ 	unsigned long uc_flags;
+ 
++	prevent_single_step_upon_eretu(regs);
++
+ 	frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
+ 
+ 	if (!access_ok(frame, sizeof(*frame)))
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
+index f5dd84eb55dcda..cd3fd5155f6ece 100644
+--- a/arch/x86/lib/x86-opcode-map.txt
++++ b/arch/x86/lib/x86-opcode-map.txt
+@@ -35,7 +35,7 @@
+ #  - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
+ #  - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+ #
+-# REX2 Prefix
++# REX2 Prefix Superscripts
+ #  - (!REX2): REX2 is not allowed
+ #  - (REX2): REX2 variant e.g. JMPABS
+ 
+@@ -286,10 +286,10 @@ df: ESC
+ # Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
+ # in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
+ # to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
+-e0: LOOPNE/LOOPNZ Jb (f64) (!REX2)
+-e1: LOOPE/LOOPZ Jb (f64) (!REX2)
+-e2: LOOP Jb (f64) (!REX2)
+-e3: JrCXZ Jb (f64) (!REX2)
++e0: LOOPNE/LOOPNZ Jb (f64),(!REX2)
++e1: LOOPE/LOOPZ Jb (f64),(!REX2)
++e2: LOOP Jb (f64),(!REX2)
++e3: JrCXZ Jb (f64),(!REX2)
+ e4: IN AL,Ib (!REX2)
+ e5: IN eAX,Ib (!REX2)
+ e6: OUT Ib,AL (!REX2)
+@@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2)
+ # in "near" jumps and calls is 16-bit. For CALL,
+ # push of return address is 16-bit wide, RSP is decremented by 2
+ # but is not truncated to 16 bits, unlike RIP.
+-e8: CALL Jz (f64) (!REX2)
+-e9: JMP-near Jz (f64) (!REX2)
+-ea: JMP-far Ap (i64) (!REX2)
+-eb: JMP-short Jb (f64) (!REX2)
++e8: CALL Jz (f64),(!REX2)
++e9: JMP-near Jz (f64),(!REX2)
++ea: JMP-far Ap (i64),(!REX2)
++eb: JMP-short Jb (f64),(!REX2)
+ ec: IN AL,DX (!REX2)
+ ed: IN eAX,DX (!REX2)
+ ee: OUT DX,AL (!REX2)
+@@ -478,22 +478,22 @@ AVXcode: 1
+ 7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
+ # 0x0f 0x80-0x8f
+ # Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+-80: JO Jz (f64) (!REX2)
+-81: JNO Jz (f64) (!REX2)
+-82: JB/JC/JNAE Jz (f64) (!REX2)
+-83: JAE/JNB/JNC Jz (f64) (!REX2)
+-84: JE/JZ Jz (f64) (!REX2)
+-85: JNE/JNZ Jz (f64) (!REX2)
+-86: JBE/JNA Jz (f64) (!REX2)
+-87: JA/JNBE Jz (f64) (!REX2)
+-88: JS Jz (f64) (!REX2)
+-89: JNS Jz (f64) (!REX2)
+-8a: JP/JPE Jz (f64) (!REX2)
+-8b: JNP/JPO Jz (f64) (!REX2)
+-8c: JL/JNGE Jz (f64) (!REX2)
+-8d: JNL/JGE Jz (f64) (!REX2)
+-8e: JLE/JNG Jz (f64) (!REX2)
+-8f: JNLE/JG Jz (f64) (!REX2)
++80: JO Jz (f64),(!REX2)
++81: JNO Jz (f64),(!REX2)
++82: JB/JC/JNAE Jz (f64),(!REX2)
++83: JAE/JNB/JNC Jz (f64),(!REX2)
++84: JE/JZ Jz (f64),(!REX2)
++85: JNE/JNZ Jz (f64),(!REX2)
++86: JBE/JNA Jz (f64),(!REX2)
++87: JA/JNBE Jz (f64),(!REX2)
++88: JS Jz (f64),(!REX2)
++89: JNS Jz (f64),(!REX2)
++8a: JP/JPE Jz (f64),(!REX2)
++8b: JNP/JPO Jz (f64),(!REX2)
++8c: JL/JNGE Jz (f64),(!REX2)
++8d: JNL/JGE Jz (f64),(!REX2)
++8e: JLE/JNG Jz (f64),(!REX2)
++8f: JNLE/JG Jz (f64),(!REX2)
+ # 0x0f 0x90-0x9f
+ 90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
+ 91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 414118435240ab..164ded9eb14446 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1321,7 +1321,6 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
+ 	spin_unlock_irqrestore(&zwplug->lock, flags);
+ 
+ 	bdev = bio->bi_bdev;
+-	submit_bio_noacct_nocheck(bio);
+ 
+ 	/*
+ 	 * blk-mq devices will reuse the extra reference on the request queue
+@@ -1329,8 +1328,12 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
+ 	 * path for BIO-based devices will not do that. So drop this extra
+ 	 * reference here.
+ 	 */
+-	if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO))
++	if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) {
++		bdev->bd_disk->fops->submit_bio(bio);
+ 		blk_queue_exit(bdev->bd_disk->queue);
++	} else {
++		blk_mq_submit_bio(bio);
++	}
+ 
+ put_zwplug:
+ 	/* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */
+diff --git a/block/elevator.c b/block/elevator.c
+index 43ba4ab1ada7fd..1f76e9efd77170 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -752,7 +752,6 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
+ ssize_t elv_iosched_show(struct gendisk *disk, char *name)
+ {
+ 	struct request_queue *q = disk->queue;
+-	struct elevator_queue *eq = q->elevator;
+ 	struct elevator_type *cur = NULL, *e;
+ 	int len = 0;
+ 
+@@ -763,7 +762,7 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *name)
+ 		len += sprintf(name+len, "[none] ");
+ 	} else {
+ 		len += sprintf(name+len, "none ");
+-		cur = eq->type;
++		cur = q->elevator->type;
+ 	}
+ 
+ 	spin_lock(&elv_list_lock);
+diff --git a/crypto/api.c b/crypto/api.c
+index c2c4eb14ef955f..5ce54328fef11f 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -220,10 +220,19 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
+ 		if (crypto_is_test_larval(larval))
+ 			crypto_larval_kill(larval);
+ 		alg = ERR_PTR(-ETIMEDOUT);
+-	} else if (!alg) {
++	} else if (!alg || PTR_ERR(alg) == -EEXIST) {
++		int err = alg ? -EEXIST : -EAGAIN;
++
++		/*
++		 * EEXIST is expected because two probes can be scheduled
++		 * at the same time with one using alg_name and the other
++		 * using driver_name.  Do a re-lookup but do not retry in
++		 * case we hit a quirk like gcm_base(ctr(aes),...) which
++		 * will never match.
++		 */
+ 		alg = &larval->alg;
+ 		alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
+-		      ERR_PTR(-EAGAIN);
++		      ERR_PTR(err);
+ 	} else if (IS_ERR(alg))
+ 		;
+ 	else if (crypto_is_test_larval(larval) &&
+diff --git a/crypto/lrw.c b/crypto/lrw.c
+index e216fbf2b78667..4bede0031c63c7 100644
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 
+ 	err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
+ 				   cipher_name, 0, mask);
+-	if (err == -ENOENT) {
++	if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
+ 		err = -ENAMETOOLONG;
+ 		if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ 			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
+@@ -356,7 +356,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 	/* Alas we screwed up the naming so we have to mangle the
+ 	 * cipher name.
+ 	 */
+-	if (!strncmp(cipher_name, "ecb(", 4)) {
++	if (!memcmp(cipher_name, "ecb(", 4)) {
+ 		int len;
+ 
+ 		len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+diff --git a/crypto/xts.c b/crypto/xts.c
+index 672e1a3f0b0c93..91e391a6ba270d 100644
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -363,7 +363,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 
+ 	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
+ 				   cipher_name, 0, mask);
+-	if (err == -ENOENT) {
++	if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
+ 		err = -ENAMETOOLONG;
+ 		if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ 			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
+@@ -397,7 +397,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 	/* Alas we screwed up the naming so we have to mangle the
+ 	 * cipher name.
+ 	 */
+-	if (!strncmp(cipher_name, "ecb(", 4)) {
++	if (!memcmp(cipher_name, "ecb(", 4)) {
+ 		int len;
+ 
+ 		len = strscpy(name, cipher_name + 4, sizeof(name));
+diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
+index 5241f4c01c7655..89a4ac447a2bea 100644
+--- a/drivers/acpi/acpica/exserial.c
++++ b/drivers/acpi/acpica/exserial.c
+@@ -201,6 +201,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
+ 		function = ACPI_READ;
+ 		break;
+ 
++	case ACPI_ADR_SPACE_FIXED_HARDWARE:
++
++		buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE;
++		function = ACPI_READ;
++		break;
++
+ 	default:
+ 		return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ 	}
+diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
+index 3cfe7e7475f2fd..070c07d68dfb2f 100644
+--- a/drivers/acpi/apei/Kconfig
++++ b/drivers/acpi/apei/Kconfig
+@@ -23,6 +23,7 @@ config ACPI_APEI_GHES
+ 	select ACPI_HED
+ 	select IRQ_WORK
+ 	select GENERIC_ALLOCATOR
++	select ARM_SDE_INTERFACE if ARM64
+ 	help
+ 	  Generic Hardware Error Source provides a way to report
+ 	  platform hardware errors (such as that from chipset). It
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index cff6685fa6cc6b..6cf40e8ac321e0 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -1612,7 +1612,7 @@ void __init acpi_ghes_init(void)
+ {
+ 	int rc;
+ 
+-	sdei_init();
++	acpi_sdei_init();
+ 
+ 	if (acpi_disabled)
+ 		return;
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index e78e3754d99e1d..dab941dc984a9d 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -463,7 +463,7 @@ bool cppc_allow_fast_switch(void)
+ 	struct cpc_desc *cpc_ptr;
+ 	int cpu;
+ 
+-	for_each_possible_cpu(cpu) {
++	for_each_present_cpu(cpu) {
+ 		cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
+ 		desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
+ 		if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
+diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
+index df9328c850bd33..f2c943b934be0a 100644
+--- a/drivers/acpi/osi.c
++++ b/drivers/acpi/osi.c
+@@ -42,7 +42,6 @@ static struct acpi_osi_entry
+ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
+ 	{"Module Device", true},
+ 	{"Processor Device", true},
+-	{"3.0 _SCP Extensions", true},
+ 	{"Processor Aggregator Device", true},
+ };
+ 
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 14c7bac4100b46..7d59c6c9185fc1 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -534,7 +534,7 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
+  */
+ static const struct dmi_system_id irq1_edge_low_force_override[] = {
+ 	{
+-		/* MECHREV Jiaolong17KS Series GM7XG0M */
++		/* MECHREVO Jiaolong17KS Series GM7XG0M */
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"),
+ 		},
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 1abe61f11525d9..faf4cdec23f04c 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -916,6 +916,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
+ 	if (!dev->power.is_suspended)
+ 		goto Complete;
+ 
++	dev->power.is_suspended = false;
++
+ 	if (dev->power.direct_complete) {
+ 		/* Match the pm_runtime_disable() in __device_suspend(). */
+ 		pm_runtime_enable(dev);
+@@ -971,7 +973,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
+ 
+  End:
+ 	error = dpm_run_callback(callback, dev, state, info);
+-	dev->power.is_suspended = false;
+ 
+ 	device_unlock(dev);
+ 	dpm_watchdog_clear(&wd);
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 292f127cae0abe..02fa8106ef549f 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -224,19 +224,22 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
+ 
+ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
+ {
+-	sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS;
++	sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
++	sector_t aligned_end = round_down(
++			sector + (size >> SECTOR_SHIFT), PAGE_SECTORS);
+ 	struct page *page;
+ 
+-	size -= (aligned_sector - sector) * SECTOR_SIZE;
++	if (aligned_end <= aligned_sector)
++		return;
++
+ 	xa_lock(&brd->brd_pages);
+-	while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
++	while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
+ 		page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
+ 		if (page) {
+ 			__free_page(page);
+ 			brd->brd_nr_pages--;
+ 		}
+ 		aligned_sector += PAGE_SECTORS;
+-		size -= PAGE_SIZE;
+ 	}
+ 	xa_unlock(&brd->brd_pages);
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 0843d229b0f765..e9a197474b9d8b 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -323,11 +323,14 @@ static void lo_complete_rq(struct request *rq)
+ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
+ {
+ 	struct request *rq = blk_mq_rq_from_pdu(cmd);
++	struct loop_device *lo = rq->q->queuedata;
+ 
+ 	if (!atomic_dec_and_test(&cmd->ref))
+ 		return;
+ 	kfree(cmd->bvec);
+ 	cmd->bvec = NULL;
++	if (req_op(rq) == REQ_OP_WRITE)
++		file_end_write(lo->lo_backing_file);
+ 	if (likely(!blk_should_fake_timeout(rq->q)))
+ 		blk_mq_complete_request(rq);
+ }
+@@ -402,9 +405,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+ 		cmd->iocb.ki_flags = 0;
+ 	}
+ 
+-	if (rw == ITER_SOURCE)
++	if (rw == ITER_SOURCE) {
++		file_start_write(lo->lo_backing_file);
+ 		ret = file->f_op->write_iter(&cmd->iocb, &iter);
+-	else
++	} else
+ 		ret = file->f_op->read_iter(&cmd->iocb, &iter);
+ 
+ 	lo_rw_aio_do_completion(cmd);
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index 645047fb92fd26..51d6d91ed4041b 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -2705,7 +2705,7 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
+ 	} __packed data;
+ 
+ 	efi_status_t status;
+-	unsigned long data_size = 0;
++	unsigned long data_size = sizeof(data);
+ 	efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03,
+ 				   0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31);
+ 
+@@ -2715,16 +2715,10 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
+ 	if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ 		return -EOPNOTSUPP;
+ 
+-	status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
+-				  NULL);
+-
+-	if (status != EFI_BUFFER_TOO_SMALL || !data_size)
+-		return -EIO;
+-
+ 	status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
+ 				  &data);
+ 
+-	if (status != EFI_SUCCESS)
++	if (status != EFI_SUCCESS || data_size != sizeof(data))
+ 		return -ENXIO;
+ 
+ 	*dsbr_var = data.dsbr;
+diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
+index d225f0a37f985d..34812bf7587d66 100644
+--- a/drivers/bluetooth/btintel_pcie.c
++++ b/drivers/bluetooth/btintel_pcie.c
+@@ -231,8 +231,13 @@ static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
+ static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
+ {
+ 	int i, ret;
++	struct rxq *rxq = &data->rxq;
++
++	/* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
++	 * hardware issues leading to race condition at the firmware.
++	 */
+ 
+-	for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
++	for (i = 0; i < rxq->count - 3; i++) {
+ 		ret = btintel_pcie_submit_rx(data);
+ 		if (ret)
+ 			return ret;
+@@ -1147,8 +1152,8 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
+ 	 *  + size of index * Number of queues(2) * type of index array(4)
+ 	 *  + size of context information
+ 	 */
+-	total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
+-		+ sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
++	total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
++	total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
+ 
+ 	/* Add the sum of size of index array and size of ci struct */
+ 	total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
+@@ -1173,36 +1178,36 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
+ 	data->dma_v_addr = v_addr;
+ 
+ 	/* Setup descriptor count */
+-	data->txq.count = BTINTEL_DESCS_COUNT;
+-	data->rxq.count = BTINTEL_DESCS_COUNT;
++	data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
++	data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
+ 
+ 	/* Setup tfds */
+ 	data->txq.tfds_p_addr = p_addr;
+ 	data->txq.tfds = v_addr;
+ 
+-	p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+-	v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
++	p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
++	v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ 
+ 	/* Setup urbd0 */
+ 	data->txq.urbd0s_p_addr = p_addr;
+ 	data->txq.urbd0s = v_addr;
+ 
+-	p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+-	v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
++	p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
++	v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ 
+ 	/* Setup FRBD*/
+ 	data->rxq.frbds_p_addr = p_addr;
+ 	data->rxq.frbds = v_addr;
+ 
+-	p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+-	v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
++	p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
++	v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ 
+ 	/* Setup urbd1 */
+ 	data->rxq.urbd1s_p_addr = p_addr;
+ 	data->rxq.urbd1s = v_addr;
+ 
+-	p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+-	v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
++	p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
++	v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ 
+ 	/* Setup data buffers for txq */
+ 	err = btintel_pcie_setup_txq_bufs(data, &data->txq);
+diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
+index 8b7824ad005a2a..ee0eec0237afda 100644
+--- a/drivers/bluetooth/btintel_pcie.h
++++ b/drivers/bluetooth/btintel_pcie.h
+@@ -81,8 +81,11 @@ enum {
+ /* Default interrupt timeout in msec */
+ #define BTINTEL_DEFAULT_INTR_TIMEOUT_MS	3000
+ 
+-/* The number of descriptors in TX/RX queues */
+-#define BTINTEL_DESCS_COUNT	16
++/* The number of descriptors in TX queues */
++#define BTINTEL_PCIE_TX_DESCS_COUNT	32
++
++/* The number of descriptors in RX queues */
++#define BTINTEL_PCIE_RX_DESCS_COUNT	64
+ 
+ /* Number of Queue for TX and RX
+  * It indicates the index of the IA(Index Array)
+@@ -104,9 +107,6 @@ enum {
+ /* Doorbell vector for TFD */
+ #define BTINTEL_PCIE_TX_DB_VEC	0
+ 
+-/* Number of pending RX requests for downlink */
+-#define BTINTEL_PCIE_RX_MAX_QUEUE	6
+-
+ /* Doorbell vector for FRBD */
+ #define BTINTEL_PCIE_RX_DB_VEC	513
+ 
+diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
+index 930d8a3ba722b3..58d16ff166c2db 100644
+--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
+@@ -905,8 +905,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ 
+ error_cleanup_dev:
+ 	kfree(mc_dev->regions);
+-	kfree(mc_bus);
+-	kfree(mc_dev);
++	if (mc_bus)
++		kfree(mc_bus);
++	else
++		kfree(mc_dev);
+ 
+ 	return error;
+ }
+diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
+index a18a8768feb405..6cb26b6e7347d4 100644
+--- a/drivers/clk/bcm/clk-raspberrypi.c
++++ b/drivers/clk/bcm/clk-raspberrypi.c
+@@ -271,6 +271,8 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
+ 	init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
+ 				   "fw-clk-%s",
+ 				   rpi_firmware_clk_names[id]);
++	if (!init.name)
++		return ERR_PTR(-ENOMEM);
+ 	init.ops = &raspberrypi_firmware_clk_ops;
+ 	init.flags = CLK_GET_RATE_NOCACHE;
+ 
+diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
+index f6634cc8663ef6..418668184ec355 100644
+--- a/drivers/clk/qcom/camcc-sm6350.c
++++ b/drivers/clk/qcom/camcc-sm6350.c
+@@ -1694,6 +1694,9 @@ static struct clk_branch camcc_sys_tmr_clk = {
+ 
+ static struct gdsc bps_gdsc = {
+ 	.gdscr = 0x6004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "bps_gdsc",
+ 	},
+@@ -1703,6 +1706,9 @@ static struct gdsc bps_gdsc = {
+ 
+ static struct gdsc ipe_0_gdsc = {
+ 	.gdscr = 0x7004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "ipe_0_gdsc",
+ 	},
+@@ -1712,6 +1718,9 @@ static struct gdsc ipe_0_gdsc = {
+ 
+ static struct gdsc ife_0_gdsc = {
+ 	.gdscr = 0x9004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "ife_0_gdsc",
+ 	},
+@@ -1720,6 +1729,9 @@ static struct gdsc ife_0_gdsc = {
+ 
+ static struct gdsc ife_1_gdsc = {
+ 	.gdscr = 0xa004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "ife_1_gdsc",
+ 	},
+@@ -1728,6 +1740,9 @@ static struct gdsc ife_1_gdsc = {
+ 
+ static struct gdsc ife_2_gdsc = {
+ 	.gdscr = 0xb004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "ife_2_gdsc",
+ 	},
+@@ -1736,6 +1751,9 @@ static struct gdsc ife_2_gdsc = {
+ 
+ static struct gdsc titan_top_gdsc = {
+ 	.gdscr = 0x14004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "titan_top_gdsc",
+ 	},
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index 2bc6b5f99f5725..d52fd4b49a02f2 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -680,6 +680,9 @@ static struct clk_branch disp_cc_xo_clk = {
+ 
+ static struct gdsc mdss_gdsc = {
+ 	.gdscr = 0x1004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "mdss_gdsc",
+ 	},
+diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
+index 7431c9a65044f8..45193b3d714bab 100644
+--- a/drivers/clk/qcom/gcc-msm8939.c
++++ b/drivers/clk/qcom/gcc-msm8939.c
+@@ -432,7 +432,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = {
+ 	{ P_XO, 0 },
+ 	{ P_GPLL0, 1 },
+ 	{ P_GPLL1_AUX, 2 },
+-	{ P_GPLL6, 2 },
++	{ P_GPLL6, 3 },
+ 	{ P_SLEEP_CLK, 6 },
+ };
+ 
+@@ -1113,7 +1113,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
+ };
+ 
+ static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
+-	F(24000000, P_GPLL0, 1, 1, 45),
++	F(24000000, P_GPLL6, 1, 1, 45),
+ 	F(66670000, P_GPLL0, 12, 0, 0),
+ 	{ }
+ };
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index 74346dc026068a..a4d6dff9d0f7f1 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -2320,6 +2320,9 @@ static struct clk_branch gcc_video_xo_clk = {
+ 
+ static struct gdsc usb30_prim_gdsc = {
+ 	.gdscr = 0x1a004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "usb30_prim_gdsc",
+ 	},
+@@ -2328,6 +2331,9 @@ static struct gdsc usb30_prim_gdsc = {
+ 
+ static struct gdsc ufs_phy_gdsc = {
+ 	.gdscr = 0x3a004,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0xf,
+ 	.pd = {
+ 		.name = "ufs_phy_gdsc",
+ 	},
+diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
+index 1e12ad8948dbd8..644bdc41892c66 100644
+--- a/drivers/clk/qcom/gpucc-sm6350.c
++++ b/drivers/clk/qcom/gpucc-sm6350.c
+@@ -412,6 +412,9 @@ static struct clk_branch gpu_cc_gx_vsense_clk = {
+ static struct gdsc gpu_cx_gdsc = {
+ 	.gdscr = 0x106c,
+ 	.gds_hw_ctrl = 0x1540,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0x8,
+ 	.pd = {
+ 		.name = "gpu_cx_gdsc",
+ 	},
+@@ -422,6 +425,9 @@ static struct gdsc gpu_cx_gdsc = {
+ static struct gdsc gpu_gx_gdsc = {
+ 	.gdscr = 0x100c,
+ 	.clamp_io_ctrl = 0x1508,
++	.en_rest_wait_val = 0x2,
++	.en_few_wait_val = 0x2,
++	.clk_dis_wait_val = 0x2,
+ 	.pd = {
+ 		.name = "gpu_gx_gdsc",
+ 		.power_on = gdsc_gx_do_nothing_enable,
+diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
+index 229473855c5b38..bc762ba87a19b6 100644
+--- a/drivers/counter/interrupt-cnt.c
++++ b/drivers/counter/interrupt-cnt.c
+@@ -3,12 +3,14 @@
+  * Copyright (c) 2021 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+  */
+ 
++#include <linux/cleanup.h>
+ #include <linux/counter.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/platform_device.h>
+ #include <linux/types.h>
+ 
+@@ -19,6 +21,7 @@ struct interrupt_cnt_priv {
+ 	struct gpio_desc *gpio;
+ 	int irq;
+ 	bool enabled;
++	struct mutex lock;
+ 	struct counter_signal signals;
+ 	struct counter_synapse synapses;
+ 	struct counter_count cnts;
+@@ -41,6 +44,8 @@ static int interrupt_cnt_enable_read(struct counter_device *counter,
+ {
+ 	struct interrupt_cnt_priv *priv = counter_priv(counter);
+ 
++	guard(mutex)(&priv->lock);
++
+ 	*enable = priv->enabled;
+ 
+ 	return 0;
+@@ -51,6 +56,8 @@ static int interrupt_cnt_enable_write(struct counter_device *counter,
+ {
+ 	struct interrupt_cnt_priv *priv = counter_priv(counter);
+ 
++	guard(mutex)(&priv->lock);
++
+ 	if (priv->enabled == enable)
+ 		return 0;
+ 
+@@ -227,6 +234,8 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
++	mutex_init(&priv->lock);
++
+ 	ret = devm_counter_add(dev, counter);
+ 	if (ret < 0)
+ 		return dev_err_probe(dev, ret, "Failed to add counter\n");
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+index 19b7fb4a93e86c..05f67661553c9a 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+@@ -275,13 +275,16 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
+ 	} else {
+ 		if (nr_sgs > 0)
+ 			dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+-		dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
++
++		if (nr_sgd > 0)
++			dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
+ 	}
+ 
+ theend_iv:
+ 	if (areq->iv && ivsize > 0) {
+-		if (rctx->addr_iv)
++		if (!dma_mapping_error(ce->dev, rctx->addr_iv))
+ 			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
++
+ 		offset = areq->cryptlen - ivsize;
+ 		if (rctx->op_dir & CE_DECRYPTION) {
+ 			memcpy(areq->iv, chan->backup_iv, ivsize);
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+index e55e58e164db32..fcc6832a065cbb 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+@@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
+ 	err = pm_runtime_set_suspended(ce->dev);
+ 	if (err)
+ 		return err;
+-	pm_runtime_enable(ce->dev);
+-	return err;
+-}
+ 
+-static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
+-{
+-	pm_runtime_disable(ce->dev);
++	err = devm_pm_runtime_enable(ce->dev);
++	if (err)
++		return err;
++
++	return 0;
+ }
+ 
+ static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
+@@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
+ 			       "sun8i-ce-ns", ce);
+ 	if (err) {
+ 		dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
+-		goto error_irq;
++		goto error_pm;
+ 	}
+ 
+ 	err = sun8i_ce_register_algs(ce);
+@@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev)
+ 	return 0;
+ error_alg:
+ 	sun8i_ce_unregister_algs(ce);
+-error_irq:
+-	sun8i_ce_pm_exit(ce);
+ error_pm:
+ 	sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
+ 	return err;
+@@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev)
+ #endif
+ 
+ 	sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
+-
+-	sun8i_ce_pm_exit(ce);
+ }
+ 
+ static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+index 6072dd9f390b40..3f9d79ea01aaa6 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+@@ -343,9 +343,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+ 	u32 common;
+ 	u64 byte_count;
+ 	__le32 *bf;
+-	void *buf = NULL;
++	void *buf, *result;
+ 	int j, i, todo;
+-	void *result = NULL;
+ 	u64 bs;
+ 	int digestsize;
+ 	dma_addr_t addr_res, addr_pad;
+@@ -365,14 +364,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+ 	buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
+ 	if (!buf) {
+ 		err = -ENOMEM;
+-		goto theend;
++		goto err_out;
+ 	}
+ 	bf = (__le32 *)buf;
+ 
+ 	result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
+ 	if (!result) {
+ 		err = -ENOMEM;
+-		goto theend;
++		goto err_free_buf;
+ 	}
+ 
+ 	flow = rctx->flow;
+@@ -398,7 +397,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+ 	if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ 		dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ 		err = -EINVAL;
+-		goto theend;
++		goto err_free_result;
+ 	}
+ 
+ 	len = areq->nbytes;
+@@ -411,7 +410,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+ 	if (len > 0) {
+ 		dev_err(ce->dev, "remaining len %d\n", len);
+ 		err = -EINVAL;
+-		goto theend;
++		goto err_unmap_src;
+ 	}
+ 	addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
+ 	cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
+@@ -419,7 +418,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+ 	if (dma_mapping_error(ce->dev, addr_res)) {
+ 		dev_err(ce->dev, "DMA map dest\n");
+ 		err = -EINVAL;
+-		goto theend;
++		goto err_unmap_src;
+ 	}
+ 
+ 	byte_count = areq->nbytes;
+@@ -441,7 +440,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+ 	}
+ 	if (!j) {
+ 		err = -EINVAL;
+-		goto theend;
++		goto err_unmap_result;
+ 	}
+ 
+ 	addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
+@@ -450,7 +449,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+ 	if (dma_mapping_error(ce->dev, addr_pad)) {
+ 		dev_err(ce->dev, "DMA error on padding SG\n");
+ 		err = -EINVAL;
+-		goto theend;
++		goto err_unmap_result;
+ 	}
+ 
+ 	if (ce->variant->hash_t_dlen_in_bits)
+@@ -463,16 +462,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+ 	err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
+ 
+ 	dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+-	dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
++
++err_unmap_result:
+ 	dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
++	if (!err)
++		memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+ 
++err_unmap_src:
++	dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ 
+-	memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+-theend:
+-	kfree(buf);
++err_free_result:
+ 	kfree(result);
++
++err_free_buf:
++	kfree(buf);
++
++err_out:
+ 	local_bh_disable();
+ 	crypto_finalize_hash_request(engine, breq, err);
+ 	local_bh_enable();
++
+ 	return 0;
+ }
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+index 3b5c2af013d0da..83df4d71905318 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+@@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx {
+  * @flow:	the flow to use for this request
+  */
+ struct sun8i_ce_hash_reqctx {
+-	struct ahash_request fallback_req;
+ 	int flow;
++	struct ahash_request fallback_req; // keep at the end
+ };
+ 
+ /*
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+index 9b9605ce8ee629..8831bcb230c2d4 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+@@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+ 
+ 	/* we need to copy all IVs from source in case DMA is bi-directionnal */
+ 	while (sg && len) {
+-		if (sg_dma_len(sg) == 0) {
++		if (sg->length == 0) {
+ 			sg = sg_next(sg);
+ 			continue;
+ 		}
+diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
+index 0f37dfd42d8509..3876e3ce822f44 100644
+--- a/drivers/crypto/marvell/cesa/cipher.c
++++ b/drivers/crypto/marvell/cesa/cipher.c
+@@ -459,6 +459,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
+ 	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ 	struct mv_cesa_engine *engine;
+ 
++	if (!req->cryptlen)
++		return 0;
++
+ 	ret = mv_cesa_skcipher_req_init(req, tmpl);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
+index f150861ceaf695..6815eddc906812 100644
+--- a/drivers/crypto/marvell/cesa/hash.c
++++ b/drivers/crypto/marvell/cesa/hash.c
+@@ -663,7 +663,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
+ 	if (ret)
+ 		goto err_free_tdma;
+ 
+-	if (iter.src.sg) {
++	if (iter.base.len > iter.src.op_offset) {
+ 		/*
+ 		 * Add all the new data, inserting an operation block and
+ 		 * launch command between each full SRAM block-worth of
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index 7d89385c3c450c..38b54719587cf5 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -5572,7 +5572,8 @@ static int udma_probe(struct platform_device *pdev)
+ 		uc->config.dir = DMA_MEM_TO_MEM;
+ 		uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
+ 					  dev_name(dev), i);
+-
++		if (!uc->name)
++			return -ENOMEM;
+ 		vchan_init(&uc->vc, &ud->ddev);
+ 		/* Use custom vchan completion handling */
+ 		tasklet_setup(&uc->vc.task, udma_vchan_complete);
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index fbdf005bed3a49..ac4b3d95531c5d 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -95,7 +95,7 @@ static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64,
+ static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
+ static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
+ 
+-static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
++static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable, u32 *rrl_ctl,
+ 				      u32 *offsets_scrub, u32 *offsets_demand,
+ 				      u32 *offsets_demand2)
+ {
+@@ -108,10 +108,10 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable
+ 
+ 	if (enable) {
+ 		/* Save default configurations */
+-		imc->chan[chan].retry_rd_err_log_s = s;
+-		imc->chan[chan].retry_rd_err_log_d = d;
++		rrl_ctl[0] = s;
++		rrl_ctl[1] = d;
+ 		if (offsets_demand2)
+-			imc->chan[chan].retry_rd_err_log_d2 = d2;
++			rrl_ctl[2] = d2;
+ 
+ 		s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
+ 		s |=  RETRY_RD_ERR_LOG_EN;
+@@ -125,25 +125,25 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable
+ 		}
+ 	} else {
+ 		/* Restore default configurations */
+-		if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
++		if (rrl_ctl[0] & RETRY_RD_ERR_LOG_UC)
+ 			s |=  RETRY_RD_ERR_LOG_UC;
+-		if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
++		if (rrl_ctl[0] & RETRY_RD_ERR_LOG_NOOVER)
+ 			s |=  RETRY_RD_ERR_LOG_NOOVER;
+-		if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
++		if (!(rrl_ctl[0] & RETRY_RD_ERR_LOG_EN))
+ 			s &= ~RETRY_RD_ERR_LOG_EN;
+-		if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
++		if (rrl_ctl[1] & RETRY_RD_ERR_LOG_UC)
+ 			d |=  RETRY_RD_ERR_LOG_UC;
+-		if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
++		if (rrl_ctl[1] & RETRY_RD_ERR_LOG_NOOVER)
+ 			d |=  RETRY_RD_ERR_LOG_NOOVER;
+-		if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
++		if (!(rrl_ctl[1] & RETRY_RD_ERR_LOG_EN))
+ 			d &= ~RETRY_RD_ERR_LOG_EN;
+ 
+ 		if (offsets_demand2) {
+-			if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
++			if (rrl_ctl[2] & RETRY_RD_ERR_LOG_UC)
+ 				d2 |=  RETRY_RD_ERR_LOG_UC;
+-			if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
++			if (!(rrl_ctl[2] & RETRY_RD_ERR_LOG_NOOVER))
+ 				d2 &=  ~RETRY_RD_ERR_LOG_NOOVER;
+-			if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
++			if (!(rrl_ctl[2] & RETRY_RD_ERR_LOG_EN))
+ 				d2 &= ~RETRY_RD_ERR_LOG_EN;
+ 		}
+ 	}
+@@ -157,6 +157,7 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable
+ static void enable_retry_rd_err_log(bool enable)
+ {
+ 	int i, j, imc_num, chan_num;
++	struct skx_channel *chan;
+ 	struct skx_imc *imc;
+ 	struct skx_dev *d;
+ 
+@@ -171,8 +172,9 @@ static void enable_retry_rd_err_log(bool enable)
+ 			if (!imc->mbase)
+ 				continue;
+ 
++			chan = d->imc[i].chan;
+ 			for (j = 0; j < chan_num; j++)
+-				__enable_retry_rd_err_log(imc, j, enable,
++				__enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[0],
+ 							  res_cfg->offsets_scrub,
+ 							  res_cfg->offsets_demand,
+ 							  res_cfg->offsets_demand2);
+@@ -186,12 +188,13 @@ static void enable_retry_rd_err_log(bool enable)
+ 			if (!imc->mbase || !imc->hbm_mc)
+ 				continue;
+ 
++			chan = d->imc[i].chan;
+ 			for (j = 0; j < chan_num; j++) {
+-				__enable_retry_rd_err_log(imc, j, enable,
++				__enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[0],
+ 							  res_cfg->offsets_scrub_hbm0,
+ 							  res_cfg->offsets_demand_hbm0,
+ 							  NULL);
+-				__enable_retry_rd_err_log(imc, j, enable,
++				__enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[1],
+ 							  res_cfg->offsets_scrub_hbm1,
+ 							  res_cfg->offsets_demand_hbm1,
+ 							  NULL);
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index 85ec3196664d30..88f5ff249f2e0e 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -115,6 +115,7 @@ EXPORT_SYMBOL_GPL(skx_adxl_get);
+ 
+ void skx_adxl_put(void)
+ {
++	adxl_component_count = 0;
+ 	kfree(adxl_values);
+ 	kfree(adxl_msg);
+ }
+diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
+index 849198fd14da69..f40eb6e4f63199 100644
+--- a/drivers/edac/skx_common.h
++++ b/drivers/edac/skx_common.h
+@@ -79,6 +79,9 @@
+  */
+ #define MCACOD_EXT_MEM_ERR	0x280
+ 
++/* Max RRL register sets per {,sub-,pseudo-}channel. */
++#define NUM_RRL_SET		3
++
+ /*
+  * Each cpu socket contains some pci devices that provide global
+  * information, and also some that are local to each of the two
+@@ -117,9 +120,11 @@ struct skx_dev {
+ 		struct skx_channel {
+ 			struct pci_dev	*cdev;
+ 			struct pci_dev	*edev;
+-			u32 retry_rd_err_log_s;
+-			u32 retry_rd_err_log_d;
+-			u32 retry_rd_err_log_d2;
++			/*
++			 * Two groups of RRL control registers per channel to save default RRL
++			 * settings of two {sub-,pseudo-}channels in Linux RRL control mode.
++			 */
++			u32 rrl_ctl[2][NUM_RRL_SET];
+ 			struct skx_dimm {
+ 				u8 close_pg;
+ 				u8 bank_xor_enable;
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index 9f35f69e0f9e2b..f7044bf53d1fcb 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -31,7 +31,6 @@ config ARM_SCPI_PROTOCOL
+ config ARM_SDE_INTERFACE
+ 	bool "ARM Software Delegated Exception Interface (SDEI)"
+ 	depends on ARM64
+-	depends on ACPI_APEI_GHES
+ 	help
+ 	  The Software Delegated Exception Interface (SDEI) is an ARM
+ 	  standard for registering callbacks from the platform firmware
+diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
+index 3e8051fe829657..71e2a9a89f6ada 100644
+--- a/drivers/firmware/arm_sdei.c
++++ b/drivers/firmware/arm_sdei.c
+@@ -1062,13 +1062,12 @@ static bool __init sdei_present_acpi(void)
+ 	return true;
+ }
+ 
+-void __init sdei_init(void)
++void __init acpi_sdei_init(void)
+ {
+ 	struct platform_device *pdev;
+ 	int ret;
+ 
+-	ret = platform_driver_register(&sdei_driver);
+-	if (ret || !sdei_present_acpi())
++	if (!sdei_present_acpi())
+ 		return;
+ 
+ 	pdev = platform_device_register_simple(sdei_driver.driver.name,
+@@ -1081,6 +1080,12 @@ void __init sdei_init(void)
+ 	}
+ }
+ 
++static int __init sdei_init(void)
++{
++	return platform_driver_register(&sdei_driver);
++}
++arch_initcall(sdei_init);
++
+ int sdei_event_handler(struct pt_regs *regs,
+ 		       struct sdei_registered_event *arg)
+ {
+diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
+index de659f6a815fd4..1ad414da9920a0 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -603,6 +603,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
+  * @image:	EFI loaded image protocol
+  * @soft_limit:	preferred address for loading the initrd
+  * @hard_limit:	upper limit address for loading the initrd
++ * @out:	pointer to store the address of the initrd table
+  *
+  * Return:	status code
+  */
+diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
+index 2328ca58bba61f..d6701d81cf6807 100644
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -759,8 +759,10 @@ int __init psci_dt_init(void)
+ 
+ 	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+ 
+-	if (!np || !of_device_is_available(np))
++	if (!np || !of_device_is_available(np)) {
++		of_node_put(np);
+ 		return -ENODEV;
++	}
+ 
+ 	init_fn = (psci_initcall_t)matched_np->data;
+ 	ret = init_fn(np);
+diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
+index 9cb37aefbac4b2..1902ebf5a298f8 100644
+--- a/drivers/fpga/tests/fpga-mgr-test.c
++++ b/drivers/fpga/tests/fpga-mgr-test.c
+@@ -263,6 +263,7 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test)
+ 	img_buf = init_test_buffer(test, IMAGE_SIZE);
+ 
+ 	sgt = kunit_kzalloc(test, sizeof(*sgt), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 	sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+index 5c54c9fd446196..a76fc15a55f5bb 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+@@ -144,6 +144,10 @@ int atomctrl_initialize_mc_reg_table(
+ 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
+ 		smu_atom_get_data_table(hwmgr->adev,
+ 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
++	if (!vram_info) {
++		pr_err("Could not retrieve the VramInfo table!");
++		return -EINVAL;
++	}
+ 
+ 	if (module_index >= vram_info->ucNumOfVRAMModule) {
+ 		pr_err("Invalid VramInfo table.");
+@@ -181,6 +185,10 @@ int atomctrl_initialize_mc_reg_table_v2_2(
+ 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
+ 		smu_atom_get_data_table(hwmgr->adev,
+ 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
++	if (!vram_info) {
++		pr_err("Could not retrieve the VramInfo table!");
++		return -EINVAL;
++	}
+ 
+ 	if (module_index >= vram_info->ucNumOfVRAMModule) {
+ 		pr_err("Invalid VramInfo table.");
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+index 4d1d40e1f1b4d1..748bed8acd2d95 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+@@ -879,7 +879,11 @@ static int lt9611uxc_probe(struct i2c_client *client)
+ 		}
+ 	}
+ 
+-	return lt9611uxc_audio_init(dev, lt9611uxc);
++	ret = lt9611uxc_audio_init(dev, lt9611uxc);
++	if (ret)
++		goto err_remove_bridge;
++
++	return 0;
+ 
+ err_remove_bridge:
+ 	free_irq(client->irq, lt9611uxc);
+diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
+index 642bb15fb5475a..25c0424e34db29 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
++++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
+@@ -314,8 +314,8 @@
+ #define  PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK	REG_GENMASK(20, 16)
+ #define  PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val)	REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+ #define  PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK	REG_GENMASK(12, 8)
+-#define  PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val)	REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
++#define  PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val)	REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK, val)
+ #define  PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK	REG_GENMASK(4, 0)
+-#define  PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val)	REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
++#define  PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val)	REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK, val)
+ 
+ #endif /* __INTEL_PSR_REGS_H__ */
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index 8aaadbb702df6d..b48373b1667793 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -633,7 +633,7 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc,
+ 		atomic_inc(&guc->outstanding_submission_g2h);
+ 
+ 	ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+-	if (ret)
++	if (ret && g2h_len_dw)
+ 		atomic_dec(&guc->outstanding_submission_g2h);
+ 
+ 	return ret;
+@@ -3422,18 +3422,29 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
+ 	 * GuC is active, lets destroy this context, but at this point we can still be racing
+ 	 * with suspend, so we undo everything if the H2G fails in deregister_context so
+ 	 * that GuC reset will find this context during clean up.
++	 *
++	 * There is a race condition where the reset code could have altered
++	 * this context's state and done a wakeref put before we try to
++	 * deregister it here. So check if the context is still set to be
++	 * destroyed before undoing earlier changes, to avoid two wakeref puts
++	 * on the same context.
+ 	 */
+ 	ret = deregister_context(ce, ce->guc_id.id);
+ 	if (ret) {
++		bool pending_destroyed;
+ 		spin_lock_irqsave(&ce->guc_state.lock, flags);
+-		set_context_registered(ce);
+-		clr_context_destroyed(ce);
++		pending_destroyed = context_destroyed(ce);
++		if (pending_destroyed) {
++			set_context_registered(ce);
++			clr_context_destroyed(ce);
++		}
+ 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ 		/*
+ 		 * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
+ 		 * the wakeref immediately but per function spec usage call this after unlock.
+ 		 */
+-		intel_wakeref_put_async(&gt->wakeref);
++		if (pending_destroyed)
++			intel_wakeref_put_async(&gt->wakeref);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 4e93fd075e03cc..42e62b0409612e 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -463,7 +463,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ 
+ 	ret = drmm_mode_config_init(drm);
+ 	if (ret)
+-		goto put_mutex_dev;
++		return ret;
+ 
+ 	drm->mode_config.min_width = 64;
+ 	drm->mode_config.min_height = 64;
+@@ -481,8 +481,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ 	for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ 		drm->dev_private = private->all_drm_private[i];
+ 		ret = component_bind_all(private->all_drm_private[i]->dev, drm);
+-		if (ret)
+-			goto put_mutex_dev;
++		if (ret) {
++			while (--i >= 0)
++				component_unbind_all(private->all_drm_private[i]->dev, drm);
++			return ret;
++		}
+ 	}
+ 
+ 	/*
+@@ -575,9 +578,6 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ err_component_unbind:
+ 	for (i = 0; i < private->data->mmsys_dev_num; i++)
+ 		component_unbind_all(private->all_drm_private[i]->dev, drm);
+-put_mutex_dev:
+-	for (i = 0; i < private->data->mmsys_dev_num; i++)
+-		put_device(private->all_drm_private[i]->mutex_dev);
+ 
+ 	return ret;
+ }
+@@ -648,8 +648,10 @@ static int mtk_drm_bind(struct device *dev)
+ 		return 0;
+ 
+ 	drm = drm_dev_alloc(&mtk_drm_driver, dev);
+-	if (IS_ERR(drm))
+-		return PTR_ERR(drm);
++	if (IS_ERR(drm)) {
++		ret = PTR_ERR(drm);
++		goto err_put_dev;
++	}
+ 
+ 	private->drm_master = true;
+ 	drm->dev_private = private;
+@@ -675,18 +677,31 @@ static int mtk_drm_bind(struct device *dev)
+ 	drm_dev_put(drm);
+ 	for (i = 0; i < private->data->mmsys_dev_num; i++)
+ 		private->all_drm_private[i]->drm = NULL;
++err_put_dev:
++	for (i = 0; i < private->data->mmsys_dev_num; i++) {
++		/* For device_find_child in mtk_drm_get_all_priv() */
++		put_device(private->all_drm_private[i]->dev);
++	}
++	put_device(private->mutex_dev);
+ 	return ret;
+ }
+ 
+ static void mtk_drm_unbind(struct device *dev)
+ {
+ 	struct mtk_drm_private *private = dev_get_drvdata(dev);
++	int i;
+ 
+ 	/* for multi mmsys dev, unregister drm dev in mmsys master */
+ 	if (private->drm_master) {
+ 		drm_dev_unregister(private->drm);
+ 		mtk_drm_kms_deinit(private->drm);
+ 		drm_dev_put(private->drm);
++
++		for (i = 0; i < private->data->mmsys_dev_num; i++) {
++			/* For device_find_child in mtk_drm_get_all_priv() */
++			put_device(private->all_drm_private[i]->dev);
++		}
++		put_device(private->mutex_dev);
+ 	}
+ 	private->mtk_drm_bound = false;
+ 	private->drm_master = false;
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index 4bd0baa2a4f555..f59452e8fa6fbf 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -168,7 +168,7 @@ static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
+ 	/* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
+ 	{
+ 		.limits = {
+-			.max_hdmi_phy_freq = 1650000,
++			.max_hdmi_phy_freq = 1650000000,
+ 		},
+ 		.attrs = (const struct soc_device_attribute []) {
+ 			{ .soc_id = "GXL (S805*)", },
+diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
+index 3f9345c14f31c1..be4b0e4df6e13e 100644
+--- a/drivers/gpu/drm/meson/meson_drv.h
++++ b/drivers/gpu/drm/meson/meson_drv.h
+@@ -37,7 +37,7 @@ struct meson_drm_match_data {
+ };
+ 
+ struct meson_drm_soc_limits {
+-	unsigned int max_hdmi_phy_freq;
++	unsigned long long max_hdmi_phy_freq;
+ };
+ 
+ struct meson_drm {
+diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+index 0593a1cde906ff..2ad8383fcaed5d 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
++++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+@@ -70,12 +70,12 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
+ {
+ 	struct meson_drm *priv = encoder_hdmi->priv;
+ 	int vic = drm_match_cea_mode(mode);
+-	unsigned int phy_freq;
+-	unsigned int vclk_freq;
+-	unsigned int venc_freq;
+-	unsigned int hdmi_freq;
++	unsigned long long phy_freq;
++	unsigned long long vclk_freq;
++	unsigned long long venc_freq;
++	unsigned long long hdmi_freq;
+ 
+-	vclk_freq = mode->clock;
++	vclk_freq = mode->clock * 1000ULL;
+ 
+ 	/* For 420, pixel clock is half unlike venc clock */
+ 	if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+@@ -107,7 +107,8 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
+ 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ 		venc_freq /= 2;
+ 
+-	dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
++	dev_dbg(priv->dev,
++		"phy:%lluHz vclk=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
+ 		phy_freq, vclk_freq, venc_freq, hdmi_freq,
+ 		priv->venc.hdmi_use_enci);
+ 
+@@ -122,10 +123,11 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
+ 	struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
+ 	struct meson_drm *priv = encoder_hdmi->priv;
+ 	bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
+-	unsigned int phy_freq;
+-	unsigned int vclk_freq;
+-	unsigned int venc_freq;
+-	unsigned int hdmi_freq;
++	unsigned long long clock = mode->clock * 1000ULL;
++	unsigned long long phy_freq;
++	unsigned long long vclk_freq;
++	unsigned long long venc_freq;
++	unsigned long long hdmi_freq;
+ 	int vic = drm_match_cea_mode(mode);
+ 	enum drm_mode_status status;
+ 
+@@ -144,12 +146,12 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
+ 		if (status != MODE_OK)
+ 			return status;
+ 
+-		return meson_vclk_dmt_supported_freq(priv, mode->clock);
++		return meson_vclk_dmt_supported_freq(priv, clock);
+ 	/* Check against supported VIC modes */
+ 	} else if (!meson_venc_hdmi_supported_vic(vic))
+ 		return MODE_BAD;
+ 
+-	vclk_freq = mode->clock;
++	vclk_freq = clock;
+ 
+ 	/* For 420, pixel clock is half unlike venc clock */
+ 	if (drm_mode_is_420_only(display_info, mode) ||
+@@ -179,7 +181,8 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
+ 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ 		venc_freq /= 2;
+ 
+-	dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
++	dev_dbg(priv->dev,
++		"%s: vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz\n",
+ 		__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
+ 
+ 	return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
+diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
+index 2a82119eb58ed8..dfe0c28a0f054c 100644
+--- a/drivers/gpu/drm/meson/meson_vclk.c
++++ b/drivers/gpu/drm/meson/meson_vclk.c
+@@ -110,7 +110,7 @@
+ #define HDMI_PLL_LOCK		BIT(31)
+ #define HDMI_PLL_LOCK_G12A	(3 << 30)
+ 
+-#define FREQ_1000_1001(_freq)	DIV_ROUND_CLOSEST(_freq * 1000, 1001)
++#define FREQ_1000_1001(_freq)	DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
+ 
+ /* VID PLL Dividers */
+ enum {
+@@ -360,11 +360,11 @@ enum {
+ };
+ 
+ struct meson_vclk_params {
+-	unsigned int pll_freq;
+-	unsigned int phy_freq;
+-	unsigned int vclk_freq;
+-	unsigned int venc_freq;
+-	unsigned int pixel_freq;
++	unsigned long long pll_freq;
++	unsigned long long phy_freq;
++	unsigned long long vclk_freq;
++	unsigned long long venc_freq;
++	unsigned long long pixel_freq;
+ 	unsigned int pll_od1;
+ 	unsigned int pll_od2;
+ 	unsigned int pll_od3;
+@@ -372,11 +372,11 @@ struct meson_vclk_params {
+ 	unsigned int vclk_div;
+ } params[] = {
+ 	[MESON_VCLK_HDMI_ENCI_54000] = {
+-		.pll_freq = 4320000,
+-		.phy_freq = 270000,
+-		.vclk_freq = 54000,
+-		.venc_freq = 54000,
+-		.pixel_freq = 54000,
++		.pll_freq = 4320000000,
++		.phy_freq = 270000000,
++		.vclk_freq = 54000000,
++		.venc_freq = 54000000,
++		.pixel_freq = 54000000,
+ 		.pll_od1 = 4,
+ 		.pll_od2 = 4,
+ 		.pll_od3 = 1,
+@@ -384,11 +384,11 @@ struct meson_vclk_params {
+ 		.vclk_div = 1,
+ 	},
+ 	[MESON_VCLK_HDMI_DDR_54000] = {
+-		.pll_freq = 4320000,
+-		.phy_freq = 270000,
+-		.vclk_freq = 54000,
+-		.venc_freq = 54000,
+-		.pixel_freq = 27000,
++		.pll_freq = 4320000000,
++		.phy_freq = 270000000,
++		.vclk_freq = 54000000,
++		.venc_freq = 54000000,
++		.pixel_freq = 27000000,
+ 		.pll_od1 = 4,
+ 		.pll_od2 = 4,
+ 		.pll_od3 = 1,
+@@ -396,11 +396,11 @@ struct meson_vclk_params {
+ 		.vclk_div = 1,
+ 	},
+ 	[MESON_VCLK_HDMI_DDR_148500] = {
+-		.pll_freq = 2970000,
+-		.phy_freq = 742500,
+-		.vclk_freq = 148500,
+-		.venc_freq = 148500,
+-		.pixel_freq = 74250,
++		.pll_freq = 2970000000,
++		.phy_freq = 742500000,
++		.vclk_freq = 148500000,
++		.venc_freq = 148500000,
++		.pixel_freq = 74250000,
+ 		.pll_od1 = 4,
+ 		.pll_od2 = 1,
+ 		.pll_od3 = 1,
+@@ -408,11 +408,11 @@ struct meson_vclk_params {
+ 		.vclk_div = 1,
+ 	},
+ 	[MESON_VCLK_HDMI_74250] = {
+-		.pll_freq = 2970000,
+-		.phy_freq = 742500,
+-		.vclk_freq = 74250,
+-		.venc_freq = 74250,
+-		.pixel_freq = 74250,
++		.pll_freq = 2970000000,
++		.phy_freq = 742500000,
++		.vclk_freq = 74250000,
++		.venc_freq = 74250000,
++		.pixel_freq = 74250000,
+ 		.pll_od1 = 2,
+ 		.pll_od2 = 2,
+ 		.pll_od3 = 2,
+@@ -420,11 +420,11 @@ struct meson_vclk_params {
+ 		.vclk_div = 1,
+ 	},
+ 	[MESON_VCLK_HDMI_148500] = {
+-		.pll_freq = 2970000,
+-		.phy_freq = 1485000,
+-		.vclk_freq = 148500,
+-		.venc_freq = 148500,
+-		.pixel_freq = 148500,
++		.pll_freq = 2970000000,
++		.phy_freq = 1485000000,
++		.vclk_freq = 148500000,
++		.venc_freq = 148500000,
++		.pixel_freq = 148500000,
+ 		.pll_od1 = 1,
+ 		.pll_od2 = 2,
+ 		.pll_od3 = 2,
+@@ -432,11 +432,11 @@ struct meson_vclk_params {
+ 		.vclk_div = 1,
+ 	},
+ 	[MESON_VCLK_HDMI_297000] = {
+-		.pll_freq = 5940000,
+-		.phy_freq = 2970000,
+-		.venc_freq = 297000,
+-		.vclk_freq = 297000,
+-		.pixel_freq = 297000,
++		.pll_freq = 5940000000,
++		.phy_freq = 2970000000,
++		.venc_freq = 297000000,
++		.vclk_freq = 297000000,
++		.pixel_freq = 297000000,
+ 		.pll_od1 = 2,
+ 		.pll_od2 = 1,
+ 		.pll_od3 = 1,
+@@ -444,11 +444,11 @@ struct meson_vclk_params {
+ 		.vclk_div = 2,
+ 	},
+ 	[MESON_VCLK_HDMI_594000] = {
+-		.pll_freq = 5940000,
+-		.phy_freq = 5940000,
+-		.venc_freq = 594000,
+-		.vclk_freq = 594000,
+-		.pixel_freq = 594000,
++		.pll_freq = 5940000000,
++		.phy_freq = 5940000000,
++		.venc_freq = 594000000,
++		.vclk_freq = 594000000,
++		.pixel_freq = 594000000,
+ 		.pll_od1 = 1,
+ 		.pll_od2 = 1,
+ 		.pll_od3 = 2,
+@@ -456,11 +456,11 @@ struct meson_vclk_params {
+ 		.vclk_div = 1,
+ 	},
+ 	[MESON_VCLK_HDMI_594000_YUV420] = {
+-		.pll_freq = 5940000,
+-		.phy_freq = 2970000,
+-		.venc_freq = 594000,
+-		.vclk_freq = 594000,
+-		.pixel_freq = 297000,
++		.pll_freq = 5940000000,
++		.phy_freq = 2970000000,
++		.venc_freq = 594000000,
++		.vclk_freq = 594000000,
++		.pixel_freq = 297000000,
+ 		.pll_od1 = 2,
+ 		.pll_od2 = 1,
+ 		.pll_od3 = 1,
+@@ -617,16 +617,16 @@ static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
+ 				3 << 20, pll_od_to_reg(od3) << 20);
+ }
+ 
+-#define XTAL_FREQ 24000
++#define XTAL_FREQ (24 * 1000 * 1000)
+ 
+ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
+-					 unsigned int pll_freq)
++					 unsigned long long pll_freq)
+ {
+ 	/* The GXBB PLL has a /2 pre-multiplier */
+ 	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
+-		pll_freq /= 2;
++		pll_freq = DIV_ROUND_DOWN_ULL(pll_freq, 2);
+ 
+-	return pll_freq / XTAL_FREQ;
++	return DIV_ROUND_DOWN_ULL(pll_freq, XTAL_FREQ);
+ }
+ 
+ #define HDMI_FRAC_MAX_GXBB	4096
+@@ -635,12 +635,13 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
+ 
+ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
+ 					    unsigned int m,
+-					    unsigned int pll_freq)
++					    unsigned long long pll_freq)
+ {
+-	unsigned int parent_freq = XTAL_FREQ;
++	unsigned long long parent_freq = XTAL_FREQ;
+ 	unsigned int frac_max = HDMI_FRAC_MAX_GXL;
+ 	unsigned int frac_m;
+ 	unsigned int frac;
++	u32 remainder;
+ 
+ 	/* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
+ 	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
+@@ -652,11 +653,11 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
+ 		frac_max = HDMI_FRAC_MAX_G12A;
+ 
+ 	/* We can have a perfect match !*/
+-	if (pll_freq / m == parent_freq &&
+-	    pll_freq % m == 0)
++	if (div_u64_rem(pll_freq, m, &remainder) == parent_freq &&
++	    remainder == 0)
+ 		return 0;
+ 
+-	frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
++	frac = mul_u64_u64_div_u64(pll_freq, frac_max, parent_freq);
+ 	frac_m = m * frac_max;
+ 	if (frac_m > frac)
+ 		return frac_max;
+@@ -666,7 +667,7 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
+ }
+ 
+ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
+-					   unsigned int m,
++					   unsigned long long m,
+ 					   unsigned int frac)
+ {
+ 	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
+@@ -694,7 +695,7 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
+ }
+ 
+ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
+-				       unsigned int freq,
++				       unsigned long long freq,
+ 				       unsigned int *m,
+ 				       unsigned int *frac,
+ 				       unsigned int *od)
+@@ -706,7 +707,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
+ 			continue;
+ 		*frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
+ 
+-		DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
++		DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d\n",
+ 				 freq, *m, *frac, *od);
+ 
+ 		if (meson_hdmi_pll_validate_params(priv, *m, *frac))
+@@ -718,7 +719,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
+ 
+ /* pll_freq is the frequency after the OD dividers */
+ enum drm_mode_status
+-meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
++meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq)
+ {
+ 	unsigned int od, m, frac;
+ 
+@@ -741,7 +742,7 @@ EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
+ 
+ /* pll_freq is the frequency after the OD dividers */
+ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
+-				       unsigned int pll_freq)
++				       unsigned long long pll_freq)
+ {
+ 	unsigned int od, m, frac, od1, od2, od3;
+ 
+@@ -756,7 +757,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
+ 			od1 = od / od2;
+ 		}
+ 
+-		DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
++		DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d/%d/%d\n",
+ 				 pll_freq, m, frac, od1, od2, od3);
+ 
+ 		meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+@@ -764,17 +765,48 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
+ 		return;
+ 	}
+ 
+-	DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
++	DRM_ERROR("Fatal, unable to find parameters for PLL freq %lluHz\n",
+ 		  pll_freq);
+ }
+ 
++static bool meson_vclk_freqs_are_matching_param(unsigned int idx,
++						unsigned long long phy_freq,
++						unsigned long long vclk_freq)
++{
++	DRM_DEBUG_DRIVER("i = %d vclk_freq = %lluHz alt = %lluHz\n",
++			 idx, params[idx].vclk_freq,
++			 FREQ_1000_1001(params[idx].vclk_freq));
++	DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
++			 idx, params[idx].phy_freq,
++			 FREQ_1000_1001(params[idx].phy_freq));
++
++	/* Match strict frequency */
++	if (phy_freq == params[idx].phy_freq &&
++	    vclk_freq == params[idx].vclk_freq)
++		return true;
++
++	/* Match 1000/1001 variant: vclk deviation has to be less than 1kHz
++	 * (drm EDID is defined in 1kHz steps, so everything smaller must be
++	 * rounding error) and the PHY freq deviation has to be less than
++	 * 10kHz (as the TMDS clock is 10 times the pixel clock, so anything
++	 * smaller must be rounding error as well).
++	 */
++	if (abs(vclk_freq - FREQ_1000_1001(params[idx].vclk_freq)) < 1000 &&
++	    abs(phy_freq - FREQ_1000_1001(params[idx].phy_freq)) < 10000)
++		return true;
++
++	/* no match */
++	return false;
++}
++
+ enum drm_mode_status
+-meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+-			      unsigned int vclk_freq)
++meson_vclk_vic_supported_freq(struct meson_drm *priv,
++			      unsigned long long phy_freq,
++			      unsigned long long vclk_freq)
+ {
+ 	int i;
+ 
+-	DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
++	DRM_DEBUG_DRIVER("phy_freq = %lluHz vclk_freq = %lluHz\n",
+ 			 phy_freq, vclk_freq);
+ 
+ 	/* Check against soc revision/package limits */
+@@ -785,19 +817,7 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ 	}
+ 
+ 	for (i = 0 ; params[i].pixel_freq ; ++i) {
+-		DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
+-				 i, params[i].pixel_freq,
+-				 FREQ_1000_1001(params[i].pixel_freq));
+-		DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+-				 i, params[i].phy_freq,
+-				 FREQ_1000_1001(params[i].phy_freq/10)*10);
+-		/* Match strict frequency */
+-		if (phy_freq == params[i].phy_freq &&
+-		    vclk_freq == params[i].vclk_freq)
+-			return MODE_OK;
+-		/* Match 1000/1001 variant */
+-		if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+-		    vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
++		if (meson_vclk_freqs_are_matching_param(i, phy_freq, vclk_freq))
+ 			return MODE_OK;
+ 	}
+ 
+@@ -805,8 +825,9 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ }
+ EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
+ 
+-static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+-			   unsigned int od1, unsigned int od2, unsigned int od3,
++static void meson_vclk_set(struct meson_drm *priv,
++			   unsigned long long pll_base_freq, unsigned int od1,
++			   unsigned int od2, unsigned int od3,
+ 			   unsigned int vid_pll_div, unsigned int vclk_div,
+ 			   unsigned int hdmi_tx_div, unsigned int venc_div,
+ 			   bool hdmi_use_enci, bool vic_alternate_clock)
+@@ -826,15 +847,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+ 		meson_hdmi_pll_generic_set(priv, pll_base_freq);
+ 	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
+ 		switch (pll_base_freq) {
+-		case 2970000:
++		case 2970000000:
+ 			m = 0x3d;
+ 			frac = vic_alternate_clock ? 0xd02 : 0xe00;
+ 			break;
+-		case 4320000:
++		case 4320000000:
+ 			m = vic_alternate_clock ? 0x59 : 0x5a;
+ 			frac = vic_alternate_clock ? 0xe8f : 0;
+ 			break;
+-		case 5940000:
++		case 5940000000:
+ 			m = 0x7b;
+ 			frac = vic_alternate_clock ? 0xa05 : 0xc00;
+ 			break;
+@@ -844,15 +865,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+ 	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ 		   meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
+ 		switch (pll_base_freq) {
+-		case 2970000:
++		case 2970000000:
+ 			m = 0x7b;
+ 			frac = vic_alternate_clock ? 0x281 : 0x300;
+ 			break;
+-		case 4320000:
++		case 4320000000:
+ 			m = vic_alternate_clock ? 0xb3 : 0xb4;
+ 			frac = vic_alternate_clock ? 0x347 : 0;
+ 			break;
+-		case 5940000:
++		case 5940000000:
+ 			m = 0xf7;
+ 			frac = vic_alternate_clock ? 0x102 : 0x200;
+ 			break;
+@@ -861,15 +882,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+ 		meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+ 	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ 		switch (pll_base_freq) {
+-		case 2970000:
++		case 2970000000:
+ 			m = 0x7b;
+ 			frac = vic_alternate_clock ? 0x140b4 : 0x18000;
+ 			break;
+-		case 4320000:
++		case 4320000000:
+ 			m = vic_alternate_clock ? 0xb3 : 0xb4;
+ 			frac = vic_alternate_clock ? 0x1a3ee : 0;
+ 			break;
+-		case 5940000:
++		case 5940000000:
+ 			m = 0xf7;
+ 			frac = vic_alternate_clock ? 0x8148 : 0x10000;
+ 			break;
+@@ -1025,14 +1046,14 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+ }
+ 
+ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+-		      unsigned int phy_freq, unsigned int vclk_freq,
+-		      unsigned int venc_freq, unsigned int dac_freq,
++		      unsigned long long phy_freq, unsigned long long vclk_freq,
++		      unsigned long long venc_freq, unsigned long long dac_freq,
+ 		      bool hdmi_use_enci)
+ {
+ 	bool vic_alternate_clock = false;
+-	unsigned int freq;
+-	unsigned int hdmi_tx_div;
+-	unsigned int venc_div;
++	unsigned long long freq;
++	unsigned long long hdmi_tx_div;
++	unsigned long long venc_div;
+ 
+ 	if (target == MESON_VCLK_TARGET_CVBS) {
+ 		meson_venci_cvbs_clock_config(priv);
+@@ -1052,27 +1073,25 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ 		return;
+ 	}
+ 
+-	hdmi_tx_div = vclk_freq / dac_freq;
++	hdmi_tx_div = DIV_ROUND_DOWN_ULL(vclk_freq, dac_freq);
+ 
+ 	if (hdmi_tx_div == 0) {
+-		pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
++		pr_err("Fatal Error, invalid HDMI-TX freq %lluHz\n",
+ 		       dac_freq);
+ 		return;
+ 	}
+ 
+-	venc_div = vclk_freq / venc_freq;
++	venc_div = DIV_ROUND_DOWN_ULL(vclk_freq, venc_freq);
+ 
+ 	if (venc_div == 0) {
+-		pr_err("Fatal Error, invalid HDMI venc freq %d\n",
++		pr_err("Fatal Error, invalid HDMI venc freq %lluHz\n",
+ 		       venc_freq);
+ 		return;
+ 	}
+ 
+ 	for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+-		if ((phy_freq == params[freq].phy_freq ||
+-		     phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+-		    (vclk_freq == params[freq].vclk_freq ||
+-		     vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
++		if (meson_vclk_freqs_are_matching_param(freq, phy_freq,
++							vclk_freq)) {
+ 			if (vclk_freq != params[freq].vclk_freq)
+ 				vic_alternate_clock = true;
+ 			else
+@@ -1098,7 +1117,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ 	}
+ 
+ 	if (!params[freq].pixel_freq) {
+-		pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
++		pr_err("Fatal Error, invalid HDMI vclk freq %lluHz\n",
++		       vclk_freq);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
+index 60617aaf18dd1c..7ac55744e57494 100644
+--- a/drivers/gpu/drm/meson/meson_vclk.h
++++ b/drivers/gpu/drm/meson/meson_vclk.h
+@@ -20,17 +20,18 @@ enum {
+ };
+ 
+ /* 27MHz is the CVBS Pixel Clock */
+-#define MESON_VCLK_CVBS			27000
++#define MESON_VCLK_CVBS			(27 * 1000 * 1000)
+ 
+ enum drm_mode_status
+-meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
++meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq);
+ enum drm_mode_status
+-meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+-			      unsigned int vclk_freq);
++meson_vclk_vic_supported_freq(struct meson_drm *priv,
++			      unsigned long long phy_freq,
++			      unsigned long long vclk_freq);
+ 
+ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+-		      unsigned int phy_freq, unsigned int vclk_freq,
+-		      unsigned int venc_freq, unsigned int dac_freq,
++		      unsigned long long phy_freq, unsigned long long vclk_freq,
++		      unsigned long long venc_freq, unsigned long long dac_freq,
+ 		      bool hdmi_use_enci);
+ 
+ #endif /* __MESON_VCLK_H */
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index d903ad9c0b5fb8..d2189441aa38ae 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -554,7 +554,6 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
+ 	if (adreno_is_7c3(gpu)) {
+ 		gpu->ubwc_config.highest_bank_bit = 14;
+ 		gpu->ubwc_config.amsbc = 1;
+-		gpu->ubwc_config.rgb565_predicator = 1;
+ 		gpu->ubwc_config.uavflagprd_inv = 2;
+ 		gpu->ubwc_config.macrotile_mode = 1;
+ 	}
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index 36cc9dbc00b5c1..d8d5a91c00ec8d 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -76,7 +76,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ 	{
+ 		.name = "sspp_0", .id = SSPP_VIG0,
+ 		.base = 0x4000, .len = 0x1f0,
+-		.features = VIG_SDM845_MASK,
++		.features = VIG_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_vig_sblk_qseed3_1_4,
+ 		.xin_id = 0,
+ 		.type = SSPP_TYPE_VIG,
+@@ -84,7 +84,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ 	}, {
+ 		.name = "sspp_1", .id = SSPP_VIG1,
+ 		.base = 0x6000, .len = 0x1f0,
+-		.features = VIG_SDM845_MASK,
++		.features = VIG_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_vig_sblk_qseed3_1_4,
+ 		.xin_id = 4,
+ 		.type = SSPP_TYPE_VIG,
+@@ -92,7 +92,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ 	}, {
+ 		.name = "sspp_2", .id = SSPP_VIG2,
+ 		.base = 0x8000, .len = 0x1f0,
+-		.features = VIG_SDM845_MASK,
++		.features = VIG_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_vig_sblk_qseed3_1_4,
+ 		.xin_id = 8,
+ 		.type = SSPP_TYPE_VIG,
+@@ -100,7 +100,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ 	}, {
+ 		.name = "sspp_3", .id = SSPP_VIG3,
+ 		.base = 0xa000, .len = 0x1f0,
+-		.features = VIG_SDM845_MASK,
++		.features = VIG_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_vig_sblk_qseed3_1_4,
+ 		.xin_id = 12,
+ 		.type = SSPP_TYPE_VIG,
+@@ -108,7 +108,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ 	}, {
+ 		.name = "sspp_8", .id = SSPP_DMA0,
+ 		.base = 0x24000, .len = 0x1f0,
+-		.features = DMA_SDM845_MASK,
++		.features = DMA_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_dma_sblk,
+ 		.xin_id = 1,
+ 		.type = SSPP_TYPE_DMA,
+@@ -116,7 +116,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ 	}, {
+ 		.name = "sspp_9", .id = SSPP_DMA1,
+ 		.base = 0x26000, .len = 0x1f0,
+-		.features = DMA_SDM845_MASK,
++		.features = DMA_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_dma_sblk,
+ 		.xin_id = 5,
+ 		.type = SSPP_TYPE_DMA,
+@@ -124,7 +124,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ 	}, {
+ 		.name = "sspp_10", .id = SSPP_DMA2,
+ 		.base = 0x28000, .len = 0x1f0,
+-		.features = DMA_CURSOR_SDM845_MASK,
++		.features = DMA_CURSOR_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_dma_sblk,
+ 		.xin_id = 9,
+ 		.type = SSPP_TYPE_DMA,
+@@ -132,7 +132,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ 	}, {
+ 		.name = "sspp_11", .id = SSPP_DMA3,
+ 		.base = 0x2a000, .len = 0x1f0,
+-		.features = DMA_CURSOR_SDM845_MASK,
++		.features = DMA_CURSOR_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_dma_sblk,
+ 		.xin_id = 13,
+ 		.type = SSPP_TYPE_DMA,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index e8eacdb47967a2..485c3041c80188 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ 	{
+ 		.name = "sspp_0", .id = SSPP_VIG0,
+ 		.base = 0x4000, .len = 0x1f0,
+-		.features = VIG_SDM845_MASK,
++		.features = VIG_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_vig_sblk_qseed3_1_4,
+ 		.xin_id = 0,
+ 		.type = SSPP_TYPE_VIG,
+@@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ 	}, {
+ 		.name = "sspp_1", .id = SSPP_VIG1,
+ 		.base = 0x6000, .len = 0x1f0,
+-		.features = VIG_SDM845_MASK,
++		.features = VIG_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_vig_sblk_qseed3_1_4,
+ 		.xin_id = 4,
+ 		.type = SSPP_TYPE_VIG,
+@@ -91,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ 	}, {
+ 		.name = "sspp_2", .id = SSPP_VIG2,
+ 		.base = 0x8000, .len = 0x1f0,
+-		.features = VIG_SDM845_MASK,
++		.features = VIG_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_vig_sblk_qseed3_1_4,
+ 		.xin_id = 8,
+ 		.type = SSPP_TYPE_VIG,
+@@ -99,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ 	}, {
+ 		.name = "sspp_3", .id = SSPP_VIG3,
+ 		.base = 0xa000, .len = 0x1f0,
+-		.features = VIG_SDM845_MASK,
++		.features = VIG_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_vig_sblk_qseed3_1_4,
+ 		.xin_id = 12,
+ 		.type = SSPP_TYPE_VIG,
+@@ -107,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ 	}, {
+ 		.name = "sspp_8", .id = SSPP_DMA0,
+ 		.base = 0x24000, .len = 0x1f0,
+-		.features = DMA_SDM845_MASK,
++		.features = DMA_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_dma_sblk,
+ 		.xin_id = 1,
+ 		.type = SSPP_TYPE_DMA,
+@@ -115,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ 	}, {
+ 		.name = "sspp_9", .id = SSPP_DMA1,
+ 		.base = 0x26000, .len = 0x1f0,
+-		.features = DMA_SDM845_MASK,
++		.features = DMA_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_dma_sblk,
+ 		.xin_id = 5,
+ 		.type = SSPP_TYPE_DMA,
+@@ -123,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ 	}, {
+ 		.name = "sspp_10", .id = SSPP_DMA2,
+ 		.base = 0x28000, .len = 0x1f0,
+-		.features = DMA_CURSOR_SDM845_MASK,
++		.features = DMA_CURSOR_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_dma_sblk,
+ 		.xin_id = 9,
+ 		.type = SSPP_TYPE_DMA,
+@@ -131,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ 	}, {
+ 		.name = "sspp_11", .id = SSPP_DMA3,
+ 		.base = 0x2a000, .len = 0x1f0,
+-		.features = DMA_CURSOR_SDM845_MASK,
++		.features = DMA_CURSOR_SDM845_MASK_SDMA,
+ 		.sblk = &dpu_dma_sblk,
+ 		.xin_id = 13,
+ 		.type = SSPP_TYPE_DMA,
+diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+index 04ce925b3d9dbd..49cfa84b34f0ca 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
++++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+@@ -22,7 +22,6 @@ struct sofef00_panel {
+ 	struct mipi_dsi_device *dsi;
+ 	struct regulator *supply;
+ 	struct gpio_desc *reset_gpio;
+-	const struct drm_display_mode *mode;
+ };
+ 
+ static inline
+@@ -159,26 +158,11 @@ static const struct drm_display_mode enchilada_panel_mode = {
+ 	.height_mm = 145,
+ };
+ 
+-static const struct drm_display_mode fajita_panel_mode = {
+-	.clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000,
+-	.hdisplay = 1080,
+-	.hsync_start = 1080 + 72,
+-	.hsync_end = 1080 + 72 + 16,
+-	.htotal = 1080 + 72 + 16 + 36,
+-	.vdisplay = 2340,
+-	.vsync_start = 2340 + 32,
+-	.vsync_end = 2340 + 32 + 4,
+-	.vtotal = 2340 + 32 + 4 + 18,
+-	.width_mm = 68,
+-	.height_mm = 145,
+-};
+-
+ static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+ {
+ 	struct drm_display_mode *mode;
+-	struct sofef00_panel *ctx = to_sofef00_panel(panel);
+ 
+-	mode = drm_mode_duplicate(connector->dev, ctx->mode);
++	mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode);
+ 	if (!mode)
+ 		return -ENOMEM;
+ 
+@@ -239,13 +223,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 
+-	ctx->mode = of_device_get_match_data(dev);
+-
+-	if (!ctx->mode) {
+-		dev_err(dev, "Missing device mode\n");
+-		return -ENODEV;
+-	}
+-
+ 	ctx->supply = devm_regulator_get(dev, "vddio");
+ 	if (IS_ERR(ctx->supply))
+ 		return dev_err_probe(dev, PTR_ERR(ctx->supply),
+@@ -295,14 +272,7 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
+ }
+ 
+ static const struct of_device_id sofef00_panel_of_match[] = {
+-	{ // OnePlus 6 / enchilada
+-		.compatible = "samsung,sofef00",
+-		.data = &enchilada_panel_mode,
+-	},
+-	{ // OnePlus 6T / fajita
+-		.compatible = "samsung,s6e3fc2x01",
+-		.data = &fajita_panel_mode,
+-	},
++	{ .compatible = "samsung,sofef00" },
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index d041ff542a4eed..82db3daf4f81ab 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2141,13 +2141,14 @@ static const struct display_timing evervision_vgg644804_timing = {
+ static const struct panel_desc evervision_vgg644804 = {
+ 	.timings = &evervision_vgg644804_timing,
+ 	.num_timings = 1,
+-	.bpc = 8,
++	.bpc = 6,
+ 	.size = {
+ 		.width = 115,
+ 		.height = 86,
+ 	},
+ 	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+-	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
++	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
+ static const struct display_timing evervision_vgg804821_timing = {
+diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
+index 0e6f94df690dd3..b57824abeb9eed 100644
+--- a/drivers/gpu/drm/panthor/panthor_mmu.c
++++ b/drivers/gpu/drm/panthor/panthor_mmu.c
+@@ -780,6 +780,7 @@ int panthor_vm_active(struct panthor_vm *vm)
+ 	if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
+ 		gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
+ 		ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
++		ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
+ 		gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
+index b7b3b3add16627..a7a323dc5cf92a 100644
+--- a/drivers/gpu/drm/panthor/panthor_regs.h
++++ b/drivers/gpu/drm/panthor/panthor_regs.h
+@@ -133,8 +133,8 @@
+ #define GPU_COHERENCY_PROT_BIT(name)			BIT(GPU_COHERENCY_  ## name)
+ 
+ #define GPU_COHERENCY_PROTOCOL				0x304
+-#define   GPU_COHERENCY_ACE				0
+-#define   GPU_COHERENCY_ACE_LITE			1
++#define   GPU_COHERENCY_ACE_LITE			0
++#define   GPU_COHERENCY_ACE				1
+ #define   GPU_COHERENCY_NONE				31
+ 
+ #define MCU_CONTROL					0x700
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+index 70d8ad065bfa1d..4c8fe83dd6101b 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+@@ -705,7 +705,7 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
+ 		ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
+ 						       cells, i, &args);
+ 		if (ret < 0)
+-			goto error;
++			goto done;
+ 
+ 		/*
+ 		 * Add the VSP to the list or update the corresponding existing
+@@ -743,13 +743,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
+ 		vsp->dev = rcdu;
+ 
+ 		ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
+-		if (ret < 0)
+-			goto error;
++		if (ret)
++			goto done;
+ 	}
+ 
+-	return 0;
+-
+-error:
++done:
+ 	for (i = 0; i < ARRAY_SIZE(vsps); ++i)
+ 		of_node_put(vsps[i].np);
+ 
+diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
+index 1e8ec50b759e46..ff5a749710db3a 100644
+--- a/drivers/gpu/drm/tegra/rgb.c
++++ b/drivers/gpu/drm/tegra/rgb.c
+@@ -200,6 +200,11 @@ static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
+ 	.atomic_check = tegra_rgb_encoder_atomic_check,
+ };
+ 
++static void tegra_dc_of_node_put(void *data)
++{
++	of_node_put(data);
++}
++
+ int tegra_dc_rgb_probe(struct tegra_dc *dc)
+ {
+ 	struct device_node *np;
+@@ -207,7 +212,14 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
+ 	int err;
+ 
+ 	np = of_get_child_by_name(dc->dev->of_node, "rgb");
+-	if (!np || !of_device_is_available(np))
++	if (!np)
++		return -ENODEV;
++
++	err = devm_add_action_or_reset(dc->dev, tegra_dc_of_node_put, np);
++	if (err < 0)
++		return err;
++
++	if (!of_device_is_available(np))
+ 		return -ENODEV;
+ 
+ 	rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
+diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+index e70d7c3076acf1..f0ddc223c1f839 100644
+--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
++++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+@@ -75,24 +75,30 @@ int vc4_mock_atomic_add_output(struct kunit *test,
+ 	int ret;
+ 
+ 	encoder = vc4_find_encoder_by_type(drm, type);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
++	if (!encoder)
++		return -ENODEV;
+ 
+ 	crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
++	if (!crtc)
++		return -ENODEV;
+ 
+ 	output = encoder_to_vc4_dummy_output(encoder);
+ 	conn = &output->connector;
+ 	conn_state = drm_atomic_get_connector_state(state, conn);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
++	if (IS_ERR(conn_state))
++		return PTR_ERR(conn_state);
+ 
+ 	ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+-	KUNIT_EXPECT_EQ(test, ret, 0);
++	if (ret)
++		return ret;
+ 
+ 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
++	if (IS_ERR(crtc_state))
++		return PTR_ERR(crtc_state);
+ 
+ 	ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode);
+-	KUNIT_EXPECT_EQ(test, ret, 0);
++	if (ret)
++		return ret;
+ 
+ 	crtc_state->active = true;
+ 
+@@ -113,26 +119,32 @@ int vc4_mock_atomic_del_output(struct kunit *test,
+ 	int ret;
+ 
+ 	encoder = vc4_find_encoder_by_type(drm, type);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
++	if (!encoder)
++		return -ENODEV;
+ 
+ 	crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
++	if (!crtc)
++		return -ENODEV;
+ 
+ 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
++	if (IS_ERR(crtc_state))
++		return PTR_ERR(crtc_state);
+ 
+ 	crtc_state->active = false;
+ 
+ 	ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	if (ret)
++		return ret;
+ 
+ 	output = encoder_to_vc4_dummy_output(encoder);
+ 	conn = &output->connector;
+ 	conn_state = drm_atomic_get_connector_state(state, conn);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
++	if (IS_ERR(conn_state))
++		return PTR_ERR(conn_state);
+ 
+ 	ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	if (ret)
++		return ret;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index 40b4d084e3ceef..91b589a497d025 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -198,7 +198,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
+ 		i++;
+ 	}
+ 
+-	vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
++	vkms_state->active_planes = kcalloc(i, sizeof(*vkms_state->active_planes), GFP_KERNEL);
+ 	if (!vkms_state->active_planes)
+ 		return -ENOMEM;
+ 	vkms_state->num_active_planes = i;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index 183cda50094cb7..e8e49f13cfa2ce 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -51,11 +51,13 @@ static void vmw_bo_release(struct vmw_bo *vbo)
+ 			mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ 			(void)vmw_resource_reserve(res, false, true);
+ 			vmw_resource_mob_detach(res);
++			if (res->dirty)
++				res->func->dirty_free(res);
+ 			if (res->coherent)
+ 				vmw_bo_dirty_release(res->guest_memory_bo);
+ 			res->guest_memory_bo = NULL;
+ 			res->guest_memory_offset = 0;
+-			vmw_resource_unreserve(res, false, false, false, NULL,
++			vmw_resource_unreserve(res, true, false, false, NULL,
+ 					       0);
+ 			mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ 		}
+@@ -73,9 +75,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)
+ {
+ 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
+ 
+-	WARN_ON(vbo->dirty);
+ 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ 	vmw_bo_release(vbo);
++	WARN_ON(vbo->dirty);
+ 	kfree(vbo);
+ }
+ 
+@@ -849,9 +851,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
+ 	vmw_bo_placement_set(bo, domain, domain);
+ }
+ 
+-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
++int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+ {
+-	xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
++	return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
+ }
+ 
+ void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+index c21ba7ff773682..940c0a0b9c4510 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+@@ -142,7 +142,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ 			struct ttm_resource *mem);
+ void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+ 
+-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
++int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+ void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+ struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 2e52d73eba4840..ea741bc4ac3fc7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -4086,6 +4086,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+ 	return 0;
+ }
+ 
++/*
++ * DMA fence callback to remove a seqno_waiter
++ */
++struct seqno_waiter_rm_context {
++	struct dma_fence_cb base;
++	struct vmw_private *dev_priv;
++};
++
++static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
++{
++	struct seqno_waiter_rm_context *ctx =
++		container_of(cb, struct seqno_waiter_rm_context, base);
++
++	vmw_seqno_waiter_remove(ctx->dev_priv);
++	kfree(ctx);
++}
++
+ int vmw_execbuf_process(struct drm_file *file_priv,
+ 			struct vmw_private *dev_priv,
+ 			void __user *user_commands, void *kernel_commands,
+@@ -4266,6 +4283,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 		} else {
+ 			/* Link the fence with the FD created earlier */
+ 			fd_install(out_fence_fd, sync_file->file);
++			struct seqno_waiter_rm_context *ctx =
++				kmalloc(sizeof(*ctx), GFP_KERNEL);
++			ctx->dev_priv = dev_priv;
++			vmw_seqno_waiter_add(dev_priv);
++			if (dma_fence_add_callback(&fence->base, &ctx->base,
++						   seqno_waiter_rm_cb) < 0) {
++				vmw_seqno_waiter_remove(dev_priv);
++				kfree(ctx);
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index a73af8a355fbf5..c4d5fe5f330f98 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+ 		goto out_bad_resource;
+ 
+ 	res = converter->base_obj_to_res(base);
+-	kref_get(&res->kref);
++	vmw_resource_reference(res);
+ 
+ 	*p_res = res;
+ 	ret = 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 5721c74da3e0b9..d7a8070330ba54 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -658,7 +658,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
+ 	struct vmw_user_surface *user_srf =
+ 	    container_of(srf, struct vmw_user_surface, srf);
+ 
+-	WARN_ON_ONCE(res->dirty);
++	WARN_ON(res->dirty);
+ 	if (user_srf->master)
+ 		drm_master_put(&user_srf->master);
+ 	kfree(srf->offsets);
+@@ -689,8 +689,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+ 	 * Dumb buffers own the resource and they'll unref the
+ 	 * resource themselves
+ 	 */
+-	if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
+-		return;
++	WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
+ 
+ 	vmw_resource_unreference(&res);
+ }
+@@ -871,7 +870,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 			vmw_resource_unreference(&res);
+ 			goto out_unlock;
+ 		}
+-		vmw_bo_add_detached_resource(res->guest_memory_bo, res);
++
++		ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
++		if (unlikely(ret != 0)) {
++			vmw_resource_unreference(&res);
++			goto out_unlock;
++		}
+ 	}
+ 
+ 	tmp = vmw_resource_reference(&srf->res);
+@@ -1670,6 +1674,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 
+ 	}
+ 
++	if (res->guest_memory_bo) {
++		ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
++		if (unlikely(ret != 0)) {
++			vmw_resource_unreference(&res);
++			goto out_unlock;
++		}
++	}
++
+ 	tmp = vmw_resource_reference(res);
+ 	ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
+ 				    VMW_RES_SURFACE,
+@@ -1684,7 +1696,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 	rep->handle      = user_srf->prime.base.handle;
+ 	rep->backup_size = res->guest_memory_size;
+ 	if (res->guest_memory_bo) {
+-		vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ 		rep->buffer_map_handle =
+ 			drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
+ 		rep->buffer_size = res->guest_memory_bo->tbo.base.size;
+@@ -2358,12 +2369,19 @@ int vmw_dumb_create(struct drm_file *file_priv,
+ 	vbo = res->guest_memory_bo;
+ 	vbo->is_dumb = true;
+ 	vbo->dumb_surface = vmw_res_to_srf(res);
+-
++	drm_gem_object_put(&vbo->tbo.base);
++	/*
++	 * Unset the user surface dtor since this in not actually exposed
++	 * to userspace. The suface is owned via the dumb_buffer's GEM handle
++	 */
++	struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
++						struct vmw_user_surface, srf);
++	usurf->prime.base.refcount_release = NULL;
+ err:
+ 	if (res)
+ 		vmw_resource_unreference(&res);
+-	if (ret)
+-		ttm_ref_object_base_unref(tfile, arg.rep.handle);
++
++	ttm_ref_object_base_unref(tfile, arg.rep.handle);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
+index ab76973f3e1e6f..a05fde2c7b122c 100644
+--- a/drivers/gpu/drm/xe/xe_gt_freq.c
++++ b/drivers/gpu/drm/xe/xe_gt_freq.c
+@@ -32,6 +32,7 @@
+  * Xe's Freq provides a sysfs API for frequency management:
+  *
+  * device/tile#/gt#/freq0/<item>_freq *read-only* files:
++ *
+  * - act_freq: The actual resolved frequency decided by PCODE.
+  * - cur_freq: The current one requested by GuC PC to the PCODE.
+  * - rpn_freq: The Render Performance (RP) N level, which is the minimal one.
+@@ -39,6 +40,7 @@
+  * - rp0_freq: The Render Performance (RP) 0 level, which is the maximum one.
+  *
+  * device/tile#/gt#/freq0/<item>_freq *read-write* files:
++ *
+  * - min_freq: Min frequency request.
+  * - max_freq: Max frequency request.
+  *             If max <= min, then freq_min becomes a fixed frequency request.
+diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
+index 025d649434673d..23028afbbe1d1f 100644
+--- a/drivers/gpu/drm/xe/xe_pci.c
++++ b/drivers/gpu/drm/xe/xe_pci.c
+@@ -910,6 +910,7 @@ static int xe_pci_suspend(struct device *dev)
+ 
+ 	pci_save_state(pdev);
+ 	pci_disable_device(pdev);
++	pci_set_power_state(pdev, PCI_D3cold);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
+index 0fb210e40a4127..9eafff0b6ea4c3 100644
+--- a/drivers/hid/hid-hyperv.c
++++ b/drivers/hid/hid-hyperv.c
+@@ -192,7 +192,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
+ 		goto cleanup;
+ 
+ 	input_device->report_desc_size = le16_to_cpu(
+-					desc->desc[0].wDescriptorLength);
++					desc->rpt_desc.wDescriptorLength);
+ 	if (input_device->report_desc_size == 0) {
+ 		input_device->dev_info_status = -EINVAL;
+ 		goto cleanup;
+@@ -210,7 +210,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
+ 
+ 	memcpy(input_device->report_desc,
+ 	       ((unsigned char *)desc) + desc->bLength,
+-	       le16_to_cpu(desc->desc[0].wDescriptorLength));
++	       le16_to_cpu(desc->rpt_desc.wDescriptorLength));
+ 
+ 	/* Send the ack */
+ 	memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index bf0f51ef0149ff..01625dbb28e8d2 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
+ 	struct usb_host_interface *interface = intf->cur_altsetting;
+ 	struct usb_device *dev = interface_to_usbdev (intf);
+ 	struct hid_descriptor *hdesc;
++	struct hid_class_descriptor *hcdesc;
+ 	u32 quirks = 0;
+ 	unsigned int rsize = 0;
+ 	char *rdesc;
+-	int ret, n;
+-	int num_descriptors;
+-	size_t offset = offsetof(struct hid_descriptor, desc);
++	int ret;
+ 
+ 	quirks = hid_lookup_quirk(hid);
+ 
+@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
+ 		return -ENODEV;
+ 	}
+ 
+-	if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+-		dbg_hid("hid descriptor is too short\n");
++	if (!hdesc->bNumDescriptors ||
++	    hdesc->bLength != sizeof(*hdesc) +
++			      (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
++		dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
++			hdesc->bLength, hdesc->bNumDescriptors);
+ 		return -EINVAL;
+ 	}
+ 
+ 	hid->version = le16_to_cpu(hdesc->bcdHID);
+ 	hid->country = hdesc->bCountryCode;
+ 
+-	num_descriptors = min_t(int, hdesc->bNumDescriptors,
+-	       (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+-
+-	for (n = 0; n < num_descriptors; n++)
+-		if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
+-			rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
++	if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
++		rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
+ 
+ 	if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+ 		dbg_hid("weird size of report descriptor (%u)\n", rsize);
+@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
+ 		goto err;
+ 	}
+ 
++	if (hdesc->bNumDescriptors > 1)
++		hid_warn(intf,
++			"%u unsupported optional hid class descriptors\n",
++			(int)(hdesc->bNumDescriptors - 1));
++
+ 	hid->quirks |= quirks;
+ 
+ 	return 0;
+diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
+index 9555366aeaf0d3..fdc157c7394d98 100644
+--- a/drivers/hwmon/asus-ec-sensors.c
++++ b/drivers/hwmon/asus-ec-sensors.c
+@@ -910,6 +910,10 @@ static int asus_ec_hwmon_read_string(struct device *dev,
+ {
+ 	struct ec_sensors_data *state = dev_get_drvdata(dev);
+ 	int sensor_index = find_ec_sensor_index(state, type, channel);
++
++	if (sensor_index < 0)
++		return sensor_index;
++
+ 	*str = get_sensor_info(state, sensor_index)->label;
+ 
+ 	return 0;
+diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
+index d8ad64ea81f119..25fd02955c38d6 100644
+--- a/drivers/hwtracing/coresight/coresight-catu.c
++++ b/drivers/hwtracing/coresight/coresight-catu.c
+@@ -458,12 +458,17 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode,
+ static int catu_enable(struct coresight_device *csdev, enum cs_mode mode,
+ 		       void *data)
+ {
+-	int rc;
++	int rc = 0;
+ 	struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+ 
+-	CS_UNLOCK(catu_drvdata->base);
+-	rc = catu_enable_hw(catu_drvdata, mode, data);
+-	CS_LOCK(catu_drvdata->base);
++	guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock);
++	if (csdev->refcnt == 0) {
++		CS_UNLOCK(catu_drvdata->base);
++		rc = catu_enable_hw(catu_drvdata, mode, data);
++		CS_LOCK(catu_drvdata->base);
++	}
++	if (!rc)
++		csdev->refcnt++;
+ 	return rc;
+ }
+ 
+@@ -486,12 +491,15 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
+ 
+ static int catu_disable(struct coresight_device *csdev, void *__unused)
+ {
+-	int rc;
++	int rc = 0;
+ 	struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+ 
+-	CS_UNLOCK(catu_drvdata->base);
+-	rc = catu_disable_hw(catu_drvdata);
+-	CS_LOCK(catu_drvdata->base);
++	guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock);
++	if (--csdev->refcnt == 0) {
++		CS_UNLOCK(catu_drvdata->base);
++		rc = catu_disable_hw(catu_drvdata);
++		CS_LOCK(catu_drvdata->base);
++	}
+ 	return rc;
+ }
+ 
+@@ -550,6 +558,7 @@ static int __catu_probe(struct device *dev, struct resource *res)
+ 	dev->platform_data = pdata;
+ 
+ 	drvdata->base = base;
++	raw_spin_lock_init(&drvdata->spinlock);
+ 	catu_desc.access = CSDEV_ACCESS_IOMEM(base);
+ 	catu_desc.pdata = pdata;
+ 	catu_desc.dev = dev;
+@@ -702,7 +711,7 @@ static int __init catu_init(void)
+ {
+ 	int ret;
+ 
+-	ret = coresight_init_driver("catu", &catu_driver, &catu_platform_driver);
++	ret = coresight_init_driver("catu", &catu_driver, &catu_platform_driver, THIS_MODULE);
+ 	tmc_etr_set_catu_ops(&etr_catu_buf_ops);
+ 	return ret;
+ }
+diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
+index 141feac1c14b08..755776cd19c5bb 100644
+--- a/drivers/hwtracing/coresight/coresight-catu.h
++++ b/drivers/hwtracing/coresight/coresight-catu.h
+@@ -65,6 +65,7 @@ struct catu_drvdata {
+ 	void __iomem *base;
+ 	struct coresight_device *csdev;
+ 	int irq;
++	raw_spinlock_t spinlock;
+ };
+ 
+ #define CATU_REG32(name, offset)					\
+diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h
+index 6ba01397574182..84cdde6f0e4db2 100644
+--- a/drivers/hwtracing/coresight/coresight-config.h
++++ b/drivers/hwtracing/coresight/coresight-config.h
+@@ -228,7 +228,7 @@ struct cscfg_feature_csdev {
+  * @feats_csdev:references to the device features to enable.
+  */
+ struct cscfg_config_csdev {
+-	const struct cscfg_config_desc *config_desc;
++	struct cscfg_config_desc *config_desc;
+ 	struct coresight_device *csdev;
+ 	bool enabled;
+ 	struct list_head node;
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index c42aa9fddab9b7..c7e35a431ab002 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -1422,17 +1422,17 @@ module_init(coresight_init);
+ module_exit(coresight_exit);
+ 
+ int coresight_init_driver(const char *drv, struct amba_driver *amba_drv,
+-			  struct platform_driver *pdev_drv)
++			  struct platform_driver *pdev_drv, struct module *owner)
+ {
+ 	int ret;
+ 
+-	ret = amba_driver_register(amba_drv);
++	ret = __amba_driver_register(amba_drv, owner);
+ 	if (ret) {
+ 		pr_err("%s: error registering AMBA driver\n", drv);
+ 		return ret;
+ 	}
+ 
+-	ret = platform_driver_register(pdev_drv);
++	ret = __platform_driver_register(pdev_drv, owner);
+ 	if (!ret)
+ 		return 0;
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
+index 75962dae9aa185..cc599c5ef4b224 100644
+--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
++++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
+@@ -774,7 +774,8 @@ static struct platform_driver debug_platform_driver = {
+ 
+ static int __init debug_init(void)
+ {
+-	return coresight_init_driver("debug", &debug_driver, &debug_platform_driver);
++	return coresight_init_driver("debug", &debug_driver, &debug_platform_driver,
++				     THIS_MODULE);
+ }
+ 
+ static void __exit debug_exit(void)
+diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
+index 5a819c8970fbf5..8f451b051ddc33 100644
+--- a/drivers/hwtracing/coresight/coresight-funnel.c
++++ b/drivers/hwtracing/coresight/coresight-funnel.c
+@@ -433,7 +433,8 @@ static struct amba_driver dynamic_funnel_driver = {
+ 
+ static int __init funnel_init(void)
+ {
+-	return coresight_init_driver("funnel", &dynamic_funnel_driver, &funnel_driver);
++	return coresight_init_driver("funnel", &dynamic_funnel_driver, &funnel_driver,
++				     THIS_MODULE);
+ }
+ 
+ static void __exit funnel_exit(void)
+diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
+index 3e55be9c841861..f7607c72857c5b 100644
+--- a/drivers/hwtracing/coresight/coresight-replicator.c
++++ b/drivers/hwtracing/coresight/coresight-replicator.c
+@@ -438,7 +438,8 @@ static struct amba_driver dynamic_replicator_driver = {
+ 
+ static int __init replicator_init(void)
+ {
+-	return coresight_init_driver("replicator", &dynamic_replicator_driver, &replicator_driver);
++	return coresight_init_driver("replicator", &dynamic_replicator_driver, &replicator_driver,
++				     THIS_MODULE);
+ }
+ 
+ static void __exit replicator_exit(void)
+diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
+index cb3e04755c9929..65bc50a6d3e9ad 100644
+--- a/drivers/hwtracing/coresight/coresight-stm.c
++++ b/drivers/hwtracing/coresight/coresight-stm.c
+@@ -1047,7 +1047,7 @@ static struct platform_driver stm_platform_driver = {
+ 
+ static int __init stm_init(void)
+ {
+-	return coresight_init_driver("stm", &stm_driver, &stm_platform_driver);
++	return coresight_init_driver("stm", &stm_driver, &stm_platform_driver, THIS_MODULE);
+ }
+ 
+ static void __exit stm_exit(void)
+diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
+index 11138a9762b015..30a561d874819b 100644
+--- a/drivers/hwtracing/coresight/coresight-syscfg.c
++++ b/drivers/hwtracing/coresight/coresight-syscfg.c
+@@ -867,6 +867,25 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev)
+ }
+ EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats);
+ 
++static bool cscfg_config_desc_get(struct cscfg_config_desc *config_desc)
++{
++	if (!atomic_fetch_inc(&config_desc->active_cnt)) {
++		/* must ensure that config cannot be unloaded in use */
++		if (unlikely(cscfg_owner_get(config_desc->load_owner))) {
++			atomic_dec(&config_desc->active_cnt);
++			return false;
++		}
++	}
++
++	return true;
++}
++
++static void cscfg_config_desc_put(struct cscfg_config_desc *config_desc)
++{
++	if (!atomic_dec_return(&config_desc->active_cnt))
++		cscfg_owner_put(config_desc->load_owner);
++}
++
+ /*
+  * This activate configuration for either perf or sysfs. Perf can have multiple
+  * active configs, selected per event, sysfs is limited to one.
+@@ -890,22 +909,17 @@ static int _cscfg_activate_config(unsigned long cfg_hash)
+ 			if (config_desc->available == false)
+ 				return -EBUSY;
+ 
+-			/* must ensure that config cannot be unloaded in use */
+-			err = cscfg_owner_get(config_desc->load_owner);
+-			if (err)
++			if (!cscfg_config_desc_get(config_desc)) {
++				err = -EINVAL;
+ 				break;
++			}
++
+ 			/*
+ 			 * increment the global active count - control changes to
+ 			 * active configurations
+ 			 */
+ 			atomic_inc(&cscfg_mgr->sys_active_cnt);
+ 
+-			/*
+-			 * mark the descriptor as active so enable config on a
+-			 * device instance will use it
+-			 */
+-			atomic_inc(&config_desc->active_cnt);
+-
+ 			err = 0;
+ 			dev_dbg(cscfg_device(), "Activate config %s.\n", config_desc->name);
+ 			break;
+@@ -920,9 +934,8 @@ static void _cscfg_deactivate_config(unsigned long cfg_hash)
+ 
+ 	list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
+ 		if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
+-			atomic_dec(&config_desc->active_cnt);
+ 			atomic_dec(&cscfg_mgr->sys_active_cnt);
+-			cscfg_owner_put(config_desc->load_owner);
++			cscfg_config_desc_put(config_desc);
+ 			dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name);
+ 			break;
+ 		}
+@@ -1047,7 +1060,7 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
+ 				     unsigned long cfg_hash, int preset)
+ {
+ 	struct cscfg_config_csdev *config_csdev_active = NULL, *config_csdev_item;
+-	const struct cscfg_config_desc *config_desc;
++	struct cscfg_config_desc *config_desc;
+ 	unsigned long flags;
+ 	int err = 0;
+ 
+@@ -1062,8 +1075,8 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
+ 	spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
+ 	list_for_each_entry(config_csdev_item, &csdev->config_csdev_list, node) {
+ 		config_desc = config_csdev_item->config_desc;
+-		if ((atomic_read(&config_desc->active_cnt)) &&
+-		    ((unsigned long)config_desc->event_ea->var == cfg_hash)) {
++		if (((unsigned long)config_desc->event_ea->var == cfg_hash) &&
++				cscfg_config_desc_get(config_desc)) {
+ 			config_csdev_active = config_csdev_item;
+ 			csdev->active_cscfg_ctxt = (void *)config_csdev_active;
+ 			break;
+@@ -1097,7 +1110,11 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
+ 				err = -EBUSY;
+ 			spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
+ 		}
++
++		if (err)
++			cscfg_config_desc_put(config_desc);
+ 	}
++
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(cscfg_csdev_enable_active_config);
+@@ -1136,8 +1153,10 @@ void cscfg_csdev_disable_active_config(struct coresight_device *csdev)
+ 	spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
+ 
+ 	/* true if there was an enabled active config */
+-	if (config_csdev)
++	if (config_csdev) {
+ 		cscfg_csdev_disable_config(config_csdev);
++		cscfg_config_desc_put(config_csdev->config_desc);
++	}
+ }
+ EXPORT_SYMBOL_GPL(cscfg_csdev_disable_active_config);
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
+index 3a482fd2cb225b..475fa4bb6813b9 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
+@@ -741,7 +741,7 @@ static struct platform_driver tmc_platform_driver = {
+ 
+ static int __init tmc_init(void)
+ {
+-	return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver);
++	return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver, THIS_MODULE);
+ }
+ 
+ static void __exit tmc_exit(void)
+diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
+index b048e146fbb108..f9ecd05cbe5c5e 100644
+--- a/drivers/hwtracing/coresight/coresight-tpiu.c
++++ b/drivers/hwtracing/coresight/coresight-tpiu.c
+@@ -318,7 +318,7 @@ static struct platform_driver tpiu_platform_driver = {
+ 
+ static int __init tpiu_init(void)
+ {
+-	return coresight_init_driver("tpiu", &tpiu_driver, &tpiu_platform_driver);
++	return coresight_init_driver("tpiu", &tpiu_driver, &tpiu_platform_driver, THIS_MODULE);
+ }
+ 
+ static void __exit tpiu_exit(void)
+diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
+index 30a7392c4f8b95..9c9e0c950b4272 100644
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -300,9 +300,9 @@ static int ad7124_get_3db_filter_freq(struct ad7124_state *st,
+ 
+ 	switch (st->channels[channel].cfg.filter_type) {
+ 	case AD7124_SINC3_FILTER:
+-		return DIV_ROUND_CLOSEST(fadc * 230, 1000);
++		return DIV_ROUND_CLOSEST(fadc * 272, 1000);
+ 	case AD7124_SINC4_FILTER:
+-		return DIV_ROUND_CLOSEST(fadc * 262, 1000);
++		return DIV_ROUND_CLOSEST(fadc * 230, 1000);
+ 	default:
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
+index b097f04172c80b..4bd6b5aac4fe80 100644
+--- a/drivers/iio/adc/mcp3911.c
++++ b/drivers/iio/adc/mcp3911.c
+@@ -6,7 +6,7 @@
+  * Copyright (C) 2018 Kent Gustavsson <kent@minoris.se>
+  */
+ #include <linux/bitfield.h>
+-#include <linux/bits.h>
++#include <linux/bitops.h>
+ #include <linux/cleanup.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+@@ -79,6 +79,8 @@
+ #define MCP3910_CONFIG1_CLKEXT		BIT(6)
+ #define MCP3910_CONFIG1_VREFEXT		BIT(7)
+ 
++#define MCP3910_CHANNEL(ch)		(MCP3911_REG_CHANNEL0 + (ch))
++
+ #define MCP3910_REG_OFFCAL_CH0		0x0f
+ #define MCP3910_OFFCAL(ch)		(MCP3910_REG_OFFCAL_CH0 + (ch) * 6)
+ 
+@@ -110,6 +112,7 @@ struct mcp3911_chip_info {
+ 	int (*get_offset)(struct mcp3911 *adc, int channel, int *val);
+ 	int (*set_offset)(struct mcp3911 *adc, int channel, int val);
+ 	int (*set_scale)(struct mcp3911 *adc, int channel, u32 val);
++	int (*get_raw)(struct mcp3911 *adc, int channel, int *val);
+ };
+ 
+ struct mcp3911 {
+@@ -170,6 +173,18 @@ static int mcp3911_update(struct mcp3911 *adc, u8 reg, u32 mask, u32 val, u8 len
+ 	return mcp3911_write(adc, reg, val, len);
+ }
+ 
++static int mcp3911_read_s24(struct mcp3911 *const adc, u8 const reg, s32 *const val)
++{
++	u32 uval;
++	int const ret = mcp3911_read(adc, reg, &uval, 3);
++
++	if (ret)
++		return ret;
++
++	*val = sign_extend32(uval, 23);
++	return ret;
++}
++
+ static int mcp3910_enable_offset(struct mcp3911 *adc, bool enable)
+ {
+ 	unsigned int mask = MCP3910_CONFIG0_EN_OFFCAL;
+@@ -194,6 +209,11 @@ static int mcp3910_set_offset(struct mcp3911 *adc, int channel, int val)
+ 	return adc->chip->enable_offset(adc, 1);
+ }
+ 
++static int mcp3910_get_raw(struct mcp3911 *adc, int channel, s32 *val)
++{
++	return mcp3911_read_s24(adc, MCP3910_CHANNEL(channel), val);
++}
++
+ static int mcp3911_enable_offset(struct mcp3911 *adc, bool enable)
+ {
+ 	unsigned int mask = MCP3911_STATUSCOM_EN_OFFCAL;
+@@ -218,6 +238,11 @@ static int mcp3911_set_offset(struct mcp3911 *adc, int channel, int val)
+ 	return adc->chip->enable_offset(adc, 1);
+ }
+ 
++static int mcp3911_get_raw(struct mcp3911 *adc, int channel, s32 *val)
++{
++	return mcp3911_read_s24(adc, MCP3911_CHANNEL(channel), val);
++}
++
+ static int mcp3910_get_osr(struct mcp3911 *adc, u32 *val)
+ {
+ 	int ret;
+@@ -321,12 +346,9 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
+ 	guard(mutex)(&adc->lock);
+ 	switch (mask) {
+ 	case IIO_CHAN_INFO_RAW:
+-		ret = mcp3911_read(adc,
+-				   MCP3911_CHANNEL(channel->channel), val, 3);
++		ret = adc->chip->get_raw(adc, channel->channel, val);
+ 		if (ret)
+ 			return ret;
+-
+-		*val = sign_extend32(*val, 23);
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		ret = adc->chip->get_offset(adc, channel->channel, val);
+@@ -799,6 +821,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
+ 		.get_offset = mcp3910_get_offset,
+ 		.set_offset = mcp3910_set_offset,
+ 		.set_scale = mcp3910_set_scale,
++		.get_raw = mcp3910_get_raw,
+ 	},
+ 	[MCP3911] = {
+ 		.channels = mcp3911_channels,
+@@ -810,6 +833,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
+ 		.get_offset = mcp3911_get_offset,
+ 		.set_offset = mcp3911_set_offset,
+ 		.set_scale = mcp3911_set_scale,
++		.get_raw = mcp3911_get_raw,
+ 	},
+ 	[MCP3912] = {
+ 		.channels = mcp3912_channels,
+@@ -821,6 +845,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
+ 		.get_offset = mcp3910_get_offset,
+ 		.set_offset = mcp3910_set_offset,
+ 		.set_scale = mcp3910_set_scale,
++		.get_raw = mcp3910_get_raw,
+ 	},
+ 	[MCP3913] = {
+ 		.channels = mcp3913_channels,
+@@ -832,6 +857,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
+ 		.get_offset = mcp3910_get_offset,
+ 		.set_offset = mcp3910_set_offset,
+ 		.set_scale = mcp3910_set_scale,
++		.get_raw = mcp3910_get_raw,
+ 	},
+ 	[MCP3914] = {
+ 		.channels = mcp3914_channels,
+@@ -843,6 +869,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
+ 		.get_offset = mcp3910_get_offset,
+ 		.set_offset = mcp3910_set_offset,
+ 		.set_scale = mcp3910_set_scale,
++		.get_raw = mcp3910_get_raw,
+ 	},
+ 	[MCP3918] = {
+ 		.channels = mcp3918_channels,
+@@ -854,6 +881,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
+ 		.get_offset = mcp3910_get_offset,
+ 		.set_offset = mcp3910_set_offset,
+ 		.set_scale = mcp3910_set_scale,
++		.get_raw = mcp3910_get_raw,
+ 	},
+ 	[MCP3919] = {
+ 		.channels = mcp3919_channels,
+@@ -865,6 +893,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
+ 		.get_offset = mcp3910_get_offset,
+ 		.set_offset = mcp3910_set_offset,
+ 		.set_scale = mcp3910_set_scale,
++		.get_raw = mcp3910_get_raw,
+ 	},
+ };
+ static const struct of_device_id mcp3911_dt_ids[] = {
+diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
+index 7ef249d8328661..c3f9fa307b84c2 100644
+--- a/drivers/iio/adc/pac1934.c
++++ b/drivers/iio/adc/pac1934.c
+@@ -1081,7 +1081,7 @@ static int pac1934_chip_identify(struct pac1934_chip_info *info)
+ 
+ /*
+  * documentation related to the ACPI device definition
+- * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC1934-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
++ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC193X-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
+  */
+ static int pac1934_acpi_parse_channel_config(struct i2c_client *client,
+ 					     struct pac1934_chip_info *info)
+diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
+index d85b7d3de86604..cc8ce0fe74e7c6 100644
+--- a/drivers/iio/filter/admv8818.c
++++ b/drivers/iio/filter/admv8818.c
+@@ -14,6 +14,7 @@
+ #include <linux/mod_devicetable.h>
+ #include <linux/mutex.h>
+ #include <linux/notifier.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+ #include <linux/spi/spi.h>
+ #include <linux/units.h>
+@@ -70,6 +71,16 @@
+ #define ADMV8818_HPF_WR0_MSK			GENMASK(7, 4)
+ #define ADMV8818_LPF_WR0_MSK			GENMASK(3, 0)
+ 
++#define ADMV8818_BAND_BYPASS       0
++#define ADMV8818_BAND_MIN          1
++#define ADMV8818_BAND_MAX          4
++#define ADMV8818_BAND_CORNER_LOW   0
++#define ADMV8818_BAND_CORNER_HIGH  1
++
++#define ADMV8818_STATE_MIN   0
++#define ADMV8818_STATE_MAX   15
++#define ADMV8818_NUM_STATES  16
++
+ enum {
+ 	ADMV8818_BW_FREQ,
+ 	ADMV8818_CENTER_FREQ
+@@ -90,20 +101,24 @@ struct admv8818_state {
+ 	struct mutex		lock;
+ 	unsigned int		filter_mode;
+ 	u64			cf_hz;
++	u64			lpf_margin_hz;
++	u64			hpf_margin_hz;
+ };
+ 
+-static const unsigned long long freq_range_hpf[4][2] = {
++static const unsigned long long freq_range_hpf[5][2] = {
++	{0ULL, 0ULL}, /* bypass */
+ 	{1750000000ULL, 3550000000ULL},
+ 	{3400000000ULL, 7250000000ULL},
+ 	{6600000000, 12000000000},
+ 	{12500000000, 19900000000}
+ };
+ 
+-static const unsigned long long freq_range_lpf[4][2] = {
++static const unsigned long long freq_range_lpf[5][2] = {
++	{U64_MAX, U64_MAX}, /* bypass */
+ 	{2050000000ULL, 3850000000ULL},
+ 	{3350000000ULL, 7250000000ULL},
+ 	{7000000000, 13000000000},
+-	{12550000000, 18500000000}
++	{12550000000, 18850000000}
+ };
+ 
+ static const struct regmap_config admv8818_regmap_config = {
+@@ -121,44 +136,59 @@ static const char * const admv8818_modes[] = {
+ 
+ static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq)
+ {
+-	unsigned int hpf_step = 0, hpf_band = 0, i, j;
+-	u64 freq_step;
+-	int ret;
++	int band, state, ret;
++	unsigned int hpf_state = ADMV8818_STATE_MIN, hpf_band = ADMV8818_BAND_BYPASS;
++	u64 freq_error, min_freq_error, freq_corner, freq_step;
+ 
+-	if (freq < freq_range_hpf[0][0])
++	if (freq < freq_range_hpf[ADMV8818_BAND_MIN][ADMV8818_BAND_CORNER_LOW])
+ 		goto hpf_write;
+ 
+-	if (freq > freq_range_hpf[3][1]) {
+-		hpf_step = 15;
+-		hpf_band = 4;
+-
++	if (freq >= freq_range_hpf[ADMV8818_BAND_MAX][ADMV8818_BAND_CORNER_HIGH]) {
++		hpf_state = ADMV8818_STATE_MAX;
++		hpf_band = ADMV8818_BAND_MAX;
+ 		goto hpf_write;
+ 	}
+ 
+-	for (i = 0; i < 4; i++) {
+-		freq_step = div_u64((freq_range_hpf[i][1] -
+-			freq_range_hpf[i][0]), 15);
++	/* Close HPF frequency gap between 12 and 12.5 GHz */
++	if (freq >= 12000ULL * HZ_PER_MHZ && freq < 12500ULL * HZ_PER_MHZ) {
++		hpf_state = ADMV8818_STATE_MAX;
++		hpf_band = 3;
++		goto hpf_write;
++	}
+ 
+-		if (freq > freq_range_hpf[i][0] &&
+-		    (freq < freq_range_hpf[i][1] + freq_step)) {
+-			hpf_band = i + 1;
++	min_freq_error = U64_MAX;
++	for (band = ADMV8818_BAND_MIN; band <= ADMV8818_BAND_MAX; band++) {
++		/*
++		 * This (and therefore all other ranges) have a corner
++		 * frequency higher than the target frequency.
++		 */
++		if (freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW] > freq)
++			break;
+ 
+-			for (j = 1; j <= 16; j++) {
+-				if (freq < (freq_range_hpf[i][0] + (freq_step * j))) {
+-					hpf_step = j - 1;
+-					break;
+-				}
++		freq_step = freq_range_hpf[band][ADMV8818_BAND_CORNER_HIGH] -
++			    freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW];
++		freq_step = div_u64(freq_step, ADMV8818_NUM_STATES - 1);
++
++		for (state = ADMV8818_STATE_MIN; state <= ADMV8818_STATE_MAX; state++) {
++			freq_corner = freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW] +
++				      freq_step * state;
++
++			/*
++			 * This (and therefore all other states) have a corner
++			 * frequency higher than the target frequency.
++			 */
++			if (freq_corner > freq)
++				break;
++
++			freq_error = freq - freq_corner;
++			if (freq_error < min_freq_error) {
++				min_freq_error = freq_error;
++				hpf_state = state;
++				hpf_band = band;
+ 			}
+-			break;
+ 		}
+ 	}
+ 
+-	/* Close HPF frequency gap between 12 and 12.5 GHz */
+-	if (freq >= 12000 * HZ_PER_MHZ && freq <= 12500 * HZ_PER_MHZ) {
+-		hpf_band = 3;
+-		hpf_step = 15;
+-	}
+-
+ hpf_write:
+ 	ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW,
+ 				 ADMV8818_SW_IN_SET_WR0_MSK |
+@@ -170,7 +200,7 @@ static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq)
+ 
+ 	return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
+ 				  ADMV8818_HPF_WR0_MSK,
+-				  FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_step));
++				  FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_state));
+ }
+ 
+ static int admv8818_hpf_select(struct admv8818_state *st, u64 freq)
+@@ -186,31 +216,52 @@ static int admv8818_hpf_select(struct admv8818_state *st, u64 freq)
+ 
+ static int __admv8818_lpf_select(struct admv8818_state *st, u64 freq)
+ {
+-	unsigned int lpf_step = 0, lpf_band = 0, i, j;
+-	u64 freq_step;
+-	int ret;
++	int band, state, ret;
++	unsigned int lpf_state = ADMV8818_STATE_MIN, lpf_band = ADMV8818_BAND_BYPASS;
++	u64 freq_error, min_freq_error, freq_corner, freq_step;
+ 
+-	if (freq > freq_range_lpf[3][1])
++	if (freq > freq_range_lpf[ADMV8818_BAND_MAX][ADMV8818_BAND_CORNER_HIGH])
+ 		goto lpf_write;
+ 
+-	if (freq < freq_range_lpf[0][0]) {
+-		lpf_band = 1;
+-
++	if (freq < freq_range_lpf[ADMV8818_BAND_MIN][ADMV8818_BAND_CORNER_LOW]) {
++		lpf_state = ADMV8818_STATE_MIN;
++		lpf_band = ADMV8818_BAND_MIN;
+ 		goto lpf_write;
+ 	}
+ 
+-	for (i = 0; i < 4; i++) {
+-		if (freq > freq_range_lpf[i][0] && freq < freq_range_lpf[i][1]) {
+-			lpf_band = i + 1;
+-			freq_step = div_u64((freq_range_lpf[i][1] - freq_range_lpf[i][0]), 15);
++	min_freq_error = U64_MAX;
++	for (band = ADMV8818_BAND_MAX; band >= ADMV8818_BAND_MIN; --band) {
++		/*
++		 * At this point the highest corner frequency of
++		 * all remaining ranges is below the target.
++		 * LPF corner should be >= the target.
++		 */
++		if (freq > freq_range_lpf[band][ADMV8818_BAND_CORNER_HIGH])
++			break;
++
++		freq_step = freq_range_lpf[band][ADMV8818_BAND_CORNER_HIGH] -
++			    freq_range_lpf[band][ADMV8818_BAND_CORNER_LOW];
++		freq_step = div_u64(freq_step, ADMV8818_NUM_STATES - 1);
++
++		for (state = ADMV8818_STATE_MAX; state >= ADMV8818_STATE_MIN; --state) {
++
++			freq_corner = freq_range_lpf[band][ADMV8818_BAND_CORNER_LOW] +
++				      state * freq_step;
+ 
+-			for (j = 0; j <= 15; j++) {
+-				if (freq < (freq_range_lpf[i][0] + (freq_step * j))) {
+-					lpf_step = j;
+-					break;
+-				}
++			/*
++			 * At this point all other states in range will
++			 * place the corner frequency below the target
++			 * LPF corner should >= the target.
++			 */
++			if (freq > freq_corner)
++				break;
++
++			freq_error = freq_corner - freq;
++			if (freq_error < min_freq_error) {
++				min_freq_error = freq_error;
++				lpf_state = state;
++				lpf_band = band;
+ 			}
+-			break;
+ 		}
+ 	}
+ 
+@@ -225,7 +276,7 @@ static int __admv8818_lpf_select(struct admv8818_state *st, u64 freq)
+ 
+ 	return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
+ 				  ADMV8818_LPF_WR0_MSK,
+-				  FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_step));
++				  FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_state));
+ }
+ 
+ static int admv8818_lpf_select(struct admv8818_state *st, u64 freq)
+@@ -242,16 +293,28 @@ static int admv8818_lpf_select(struct admv8818_state *st, u64 freq)
+ static int admv8818_rfin_band_select(struct admv8818_state *st)
+ {
+ 	int ret;
++	u64 hpf_corner_target, lpf_corner_target;
+ 
+ 	st->cf_hz = clk_get_rate(st->clkin);
+ 
++	/* Check for underflow */
++	if (st->cf_hz > st->hpf_margin_hz)
++		hpf_corner_target = st->cf_hz - st->hpf_margin_hz;
++	else
++		hpf_corner_target = 0;
++
++	/* Check for overflow */
++	lpf_corner_target = st->cf_hz + st->lpf_margin_hz;
++	if (lpf_corner_target < st->cf_hz)
++		lpf_corner_target = U64_MAX;
++
+ 	mutex_lock(&st->lock);
+ 
+-	ret = __admv8818_hpf_select(st, st->cf_hz);
++	ret = __admv8818_hpf_select(st, hpf_corner_target);
+ 	if (ret)
+ 		goto exit;
+ 
+-	ret = __admv8818_lpf_select(st, st->cf_hz);
++	ret = __admv8818_lpf_select(st, lpf_corner_target);
+ exit:
+ 	mutex_unlock(&st->lock);
+ 	return ret;
+@@ -278,8 +341,11 @@ static int __admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq)
+ 
+ 	hpf_state = FIELD_GET(ADMV8818_HPF_WR0_MSK, data);
+ 
+-	*hpf_freq = div_u64(freq_range_hpf[hpf_band - 1][1] - freq_range_hpf[hpf_band - 1][0], 15);
+-	*hpf_freq = freq_range_hpf[hpf_band - 1][0] + (*hpf_freq * hpf_state);
++	*hpf_freq = freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_HIGH] -
++		    freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_LOW];
++	*hpf_freq = div_u64(*hpf_freq, ADMV8818_NUM_STATES - 1);
++	*hpf_freq = freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_LOW] +
++		    (*hpf_freq * hpf_state);
+ 
+ 	return ret;
+ }
+@@ -316,8 +382,11 @@ static int __admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq)
+ 
+ 	lpf_state = FIELD_GET(ADMV8818_LPF_WR0_MSK, data);
+ 
+-	*lpf_freq = div_u64(freq_range_lpf[lpf_band - 1][1] - freq_range_lpf[lpf_band - 1][0], 15);
+-	*lpf_freq = freq_range_lpf[lpf_band - 1][0] + (*lpf_freq * lpf_state);
++	*lpf_freq = freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_HIGH] -
++		    freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_LOW];
++	*lpf_freq = div_u64(*lpf_freq, ADMV8818_NUM_STATES - 1);
++	*lpf_freq = freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_LOW] +
++		    (*lpf_freq * lpf_state);
+ 
+ 	return ret;
+ }
+@@ -333,6 +402,19 @@ static int admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq)
+ 	return ret;
+ }
+ 
++static int admv8818_write_raw_get_fmt(struct iio_dev *indio_dev,
++								struct iio_chan_spec const *chan,
++								long mask)
++{
++	switch (mask) {
++	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
++	case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
++		return IIO_VAL_INT_64;
++	default:
++		return -EINVAL;
++	}
++}
++
+ static int admv8818_write_raw(struct iio_dev *indio_dev,
+ 			      struct iio_chan_spec const *chan,
+ 			      int val, int val2, long info)
+@@ -341,6 +423,9 @@ static int admv8818_write_raw(struct iio_dev *indio_dev,
+ 
+ 	u64 freq = ((u64)val2 << 32 | (u32)val);
+ 
++	if ((s64)freq < 0)
++		return -EINVAL;
++
+ 	switch (info) {
+ 	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ 		return admv8818_lpf_select(st, freq);
+@@ -502,6 +587,7 @@ static int admv8818_set_mode(struct iio_dev *indio_dev,
+ 
+ static const struct iio_info admv8818_info = {
+ 	.write_raw = admv8818_write_raw,
++	.write_raw_get_fmt = admv8818_write_raw_get_fmt,
+ 	.read_raw = admv8818_read_raw,
+ 	.debugfs_reg_access = &admv8818_reg_access,
+ };
+@@ -641,6 +727,32 @@ static int admv8818_clk_setup(struct admv8818_state *st)
+ 	return devm_add_action_or_reset(&spi->dev, admv8818_clk_notifier_unreg, st);
+ }
+ 
++static int admv8818_read_properties(struct admv8818_state *st)
++{
++	struct spi_device *spi = st->spi;
++	u32 mhz;
++	int ret;
++
++	ret = device_property_read_u32(&spi->dev, "adi,lpf-margin-mhz", &mhz);
++	if (ret == 0)
++		st->lpf_margin_hz = (u64)mhz * HZ_PER_MHZ;
++	else if (ret == -EINVAL)
++		st->lpf_margin_hz = 0;
++	else
++		return ret;
++
++
++	ret = device_property_read_u32(&spi->dev, "adi,hpf-margin-mhz", &mhz);
++	if (ret == 0)
++		st->hpf_margin_hz = (u64)mhz * HZ_PER_MHZ;
++	else if (ret == -EINVAL)
++		st->hpf_margin_hz = 0;
++	else if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
+ static int admv8818_probe(struct spi_device *spi)
+ {
+ 	struct iio_dev *indio_dev;
+@@ -672,6 +784,10 @@ static int admv8818_probe(struct spi_device *spi)
+ 
+ 	mutex_init(&st->lock);
+ 
++	ret = admv8818_read_properties(st);
++	if (ret)
++		return ret;
++
+ 	ret = admv8818_init(st);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 07fb8d3c037f00..d45e3909dafe1d 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -166,7 +166,7 @@ struct cm_port {
+ struct cm_device {
+ 	struct kref kref;
+ 	struct list_head list;
+-	spinlock_t mad_agent_lock;
++	rwlock_t mad_agent_lock;
+ 	struct ib_device *ib_device;
+ 	u8 ack_delay;
+ 	int going_down;
+@@ -284,7 +284,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
+ 	if (!cm_id_priv->av.port)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
++	read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ 	mad_agent = cm_id_priv->av.port->mad_agent;
+ 	if (!mad_agent) {
+ 		m = ERR_PTR(-EINVAL);
+@@ -315,7 +315,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
+ 	m->context[0] = cm_id_priv;
+ 
+ out:
+-	spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
++	read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ 	return m;
+ }
+ 
+@@ -1294,10 +1294,10 @@ static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
+ 	if (!cm_id_priv->av.port)
+ 		return cpu_to_be64(low_tid);
+ 
+-	spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
++	read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ 	if (cm_id_priv->av.port->mad_agent)
+ 		hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
+-	spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
++	read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ 	return cpu_to_be64(hi_tid | low_tid);
+ }
+ 
+@@ -4374,7 +4374,7 @@ static int cm_add_one(struct ib_device *ib_device)
+ 		return -ENOMEM;
+ 
+ 	kref_init(&cm_dev->kref);
+-	spin_lock_init(&cm_dev->mad_agent_lock);
++	rwlock_init(&cm_dev->mad_agent_lock);
+ 	cm_dev->ib_device = ib_device;
+ 	cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
+ 	cm_dev->going_down = 0;
+@@ -4490,9 +4490,9 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ 		 * The above ensures no call paths from the work are running,
+ 		 * the remaining paths all take the mad_agent_lock.
+ 		 */
+-		spin_lock(&cm_dev->mad_agent_lock);
++		write_lock(&cm_dev->mad_agent_lock);
+ 		port->mad_agent = NULL;
+-		spin_unlock(&cm_dev->mad_agent_lock);
++		write_unlock(&cm_dev->mad_agent_lock);
+ 		ib_unregister_mad_agent(mad_agent);
+ 		ib_port_unregister_client_groups(ib_device, i,
+ 						 cm_counter_groups);
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 176d0b3e448870..81bc24a346d370 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -5231,7 +5231,8 @@ static int cma_netevent_callback(struct notifier_block *self,
+ 			   neigh->ha, ETH_ALEN))
+ 			continue;
+ 		cma_id_get(current_id);
+-		queue_work(cma_wq, &current_id->id.net_work);
++		if (!queue_work(cma_wq, &current_id->id.net_work))
++			cma_id_put(current_id);
+ 	}
+ out:
+ 	spin_unlock_irqrestore(&id_table_lock, flags);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
+index 4fc5b9d5fea87e..307c35888b3003 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -33,7 +33,6 @@
+ #include <linux/pci.h>
+ #include <rdma/ib_addr.h>
+ #include <rdma/ib_cache.h>
+-#include "hnae3.h"
+ #include "hns_roce_device.h"
+ #include "hns_roce_hw_v2.h"
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index f5c3e560df58d7..985b9d7d69f20c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -43,7 +43,6 @@
+ #include <rdma/ib_umem.h>
+ #include <rdma/uverbs_ioctl.h>
+ 
+-#include "hnae3.h"
+ #include "hns_roce_common.h"
+ #include "hns_roce_device.h"
+ #include "hns_roce_cmd.h"
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index 91a5665465ffba..bc7466830eaf9d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -34,6 +34,7 @@
+ #define _HNS_ROCE_HW_V2_H
+ 
+ #include <linux/bitops.h>
++#include "hnae3.h"
+ 
+ #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ		32
+ #define HNS_ROCE_V2_MTT_ENTRY_SZ		64
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index 8d0b63d4b50a6c..e7a497cc125cc3 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -37,7 +37,6 @@
+ #include <rdma/ib_smi.h>
+ #include <rdma/ib_user_verbs.h>
+ #include <rdma/ib_cache.h>
+-#include "hnae3.h"
+ #include "hns_roce_common.h"
+ #include "hns_roce_device.h"
+ #include "hns_roce_hem.h"
+diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+index 356d9881694973..f637b73b946e44 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
++++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+@@ -4,7 +4,6 @@
+ #include <rdma/rdma_cm.h>
+ #include <rdma/restrack.h>
+ #include <uapi/rdma/rdma_netlink.h>
+-#include "hnae3.h"
+ #include "hns_roce_common.h"
+ #include "hns_roce_device.h"
+ #include "hns_roce_hw_v2.h"
+diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
+index d3dcc272200afa..146d03ae40bd9f 100644
+--- a/drivers/infiniband/hw/mlx5/qpc.c
++++ b/drivers/infiniband/hw/mlx5/qpc.c
+@@ -21,8 +21,10 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
+ 	spin_lock_irqsave(&table->lock, flags);
+ 
+ 	common = radix_tree_lookup(&table->tree, rsn);
+-	if (common)
++	if (common && !common->invalid)
+ 		refcount_inc(&common->refcount);
++	else
++		common = NULL;
+ 
+ 	spin_unlock_irqrestore(&table->lock, flags);
+ 
+@@ -178,6 +180,18 @@ static int create_resource_common(struct mlx5_ib_dev *dev,
+ 	return 0;
+ }
+ 
++static void modify_resource_common_state(struct mlx5_ib_dev *dev,
++					 struct mlx5_core_qp *qp,
++					 bool invalid)
++{
++	struct mlx5_qp_table *table = &dev->qp_table;
++	unsigned long flags;
++
++	spin_lock_irqsave(&table->lock, flags);
++	qp->common.invalid = invalid;
++	spin_unlock_irqrestore(&table->lock, flags);
++}
++
+ static void destroy_resource_common(struct mlx5_ib_dev *dev,
+ 				    struct mlx5_core_qp *qp)
+ {
+@@ -609,8 +623,20 @@ int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ 				 struct mlx5_core_qp *rq)
+ {
++	int ret;
++
++	/* The rq destruction can be called again in case it fails, hence we
++	 * mark the common resource as invalid and only once FW destruction
++	 * is completed successfully we actually destroy the resources.
++	 */
++	modify_resource_common_state(dev, rq, true);
++	ret = destroy_rq_tracked(dev, rq->qpn, rq->uid);
++	if (ret) {
++		modify_resource_common_state(dev, rq, false);
++		return ret;
++	}
+ 	destroy_resource_common(dev, rq);
+-	return destroy_rq_tracked(dev, rq->qpn, rq->uid);
++	return 0;
+ }
+ 
+ static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
+diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
+index e2468bc04a5cb3..c2516c7549582d 100644
+--- a/drivers/input/rmi4/rmi_f34.c
++++ b/drivers/input/rmi4/rmi_f34.c
+@@ -4,6 +4,7 @@
+  * Copyright (C) 2016 Zodiac Inflight Innovations
+  */
+ 
++#include "linux/device.h"
+ #include <linux/kernel.h>
+ #include <linux/rmi.h>
+ #include <linux/firmware.h>
+@@ -298,39 +299,30 @@ static int rmi_f34_update_firmware(struct f34_data *f34,
+ 	return ret;
+ }
+ 
+-static int rmi_f34_status(struct rmi_function *fn)
+-{
+-	struct f34_data *f34 = dev_get_drvdata(&fn->dev);
+-
+-	/*
+-	 * The status is the percentage complete, or once complete,
+-	 * zero for success or a negative return code.
+-	 */
+-	return f34->update_status;
+-}
+-
+ static ssize_t rmi_driver_bootloader_id_show(struct device *dev,
+ 					     struct device_attribute *dattr,
+ 					     char *buf)
+ {
+ 	struct rmi_driver_data *data = dev_get_drvdata(dev);
+-	struct rmi_function *fn = data->f34_container;
++	struct rmi_function *fn;
+ 	struct f34_data *f34;
+ 
+-	if (fn) {
+-		f34 = dev_get_drvdata(&fn->dev);
+-
+-		if (f34->bl_version == 5)
+-			return sysfs_emit(buf, "%c%c\n",
+-					  f34->bootloader_id[0],
+-					  f34->bootloader_id[1]);
+-		else
+-			return sysfs_emit(buf, "V%d.%d\n",
+-					  f34->bootloader_id[1],
+-					  f34->bootloader_id[0]);
+-	}
++	fn = data->f34_container;
++	if (!fn)
++		return -ENODEV;
+ 
+-	return 0;
++	f34 = dev_get_drvdata(&fn->dev);
++	if (!f34)
++		return -ENODEV;
++
++	if (f34->bl_version == 5)
++		return sysfs_emit(buf, "%c%c\n",
++				  f34->bootloader_id[0],
++				  f34->bootloader_id[1]);
++	else
++		return sysfs_emit(buf, "V%d.%d\n",
++				  f34->bootloader_id[1],
++				  f34->bootloader_id[0]);
+ }
+ 
+ static DEVICE_ATTR(bootloader_id, 0444, rmi_driver_bootloader_id_show, NULL);
+@@ -343,13 +335,16 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev,
+ 	struct rmi_function *fn = data->f34_container;
+ 	struct f34_data *f34;
+ 
+-	if (fn) {
+-		f34 = dev_get_drvdata(&fn->dev);
++	fn = data->f34_container;
++	if (!fn)
++		return -ENODEV;
+ 
+-		return sysfs_emit(buf, "%s\n", f34->configuration_id);
+-	}
++	f34 = dev_get_drvdata(&fn->dev);
++	if (!f34)
++		return -ENODEV;
+ 
+-	return 0;
++
++	return sysfs_emit(buf, "%s\n", f34->configuration_id);
+ }
+ 
+ static DEVICE_ATTR(configuration_id, 0444,
+@@ -365,10 +360,14 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
+ 
+ 	if (!data->f34_container) {
+ 		dev_warn(dev, "%s: No F34 present!\n", __func__);
+-		return -EINVAL;
++		return -ENODEV;
+ 	}
+ 
+ 	f34 = dev_get_drvdata(&data->f34_container->dev);
++	if (!f34) {
++		dev_warn(dev, "%s: No valid F34 present!\n", __func__);
++		return -ENODEV;
++	}
+ 
+ 	if (f34->bl_version >= 7) {
+ 		if (data->pdt_props & HAS_BSR) {
+@@ -494,10 +493,18 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev,
+ 						char *buf)
+ {
+ 	struct rmi_driver_data *data = dev_get_drvdata(dev);
+-	int update_status = 0;
++	struct f34_data *f34;
++	int update_status = -ENODEV;
+ 
+-	if (data->f34_container)
+-		update_status = rmi_f34_status(data->f34_container);
++	/*
++	 * The status is the percentage complete, or once complete,
++	 * zero for success or a negative return code.
++	 */
++	if (data->f34_container) {
++		f34 = dev_get_drvdata(&data->f34_container->dev);
++		if (f34)
++			update_status = f34->update_status;
++	}
+ 
+ 	return sysfs_emit(buf, "%d\n", update_status);
+ }
+@@ -517,33 +524,21 @@ static const struct attribute_group rmi_firmware_attr_group = {
+ 	.attrs = rmi_firmware_attrs,
+ };
+ 
+-static int rmi_f34_probe(struct rmi_function *fn)
++static int rmi_f34v5_probe(struct f34_data *f34)
+ {
+-	struct f34_data *f34;
+-	unsigned char f34_queries[9];
++	struct rmi_function *fn = f34->fn;
++	u8 f34_queries[9];
+ 	bool has_config_id;
+-	u8 version = fn->fd.function_version;
+-	int ret;
+-
+-	f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
+-	if (!f34)
+-		return -ENOMEM;
+-
+-	f34->fn = fn;
+-	dev_set_drvdata(&fn->dev, f34);
+-
+-	/* v5 code only supported version 0, try V7 probe */
+-	if (version > 0)
+-		return rmi_f34v7_probe(f34);
++	int error;
+ 
+ 	f34->bl_version = 5;
+ 
+-	ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+-			     f34_queries, sizeof(f34_queries));
+-	if (ret) {
++	error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
++			       f34_queries, sizeof(f34_queries));
++	if (error) {
+ 		dev_err(&fn->dev, "%s: Failed to query properties\n",
+ 			__func__);
+-		return ret;
++		return error;
+ 	}
+ 
+ 	snprintf(f34->bootloader_id, sizeof(f34->bootloader_id),
+@@ -569,11 +564,11 @@ static int rmi_f34_probe(struct rmi_function *fn)
+ 		f34->v5.config_blocks);
+ 
+ 	if (has_config_id) {
+-		ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
+-				     f34_queries, sizeof(f34_queries));
+-		if (ret) {
++		error = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
++				       f34_queries, sizeof(f34_queries));
++		if (error) {
+ 			dev_err(&fn->dev, "Failed to read F34 config ID\n");
+-			return ret;
++			return error;
+ 		}
+ 
+ 		snprintf(f34->configuration_id, sizeof(f34->configuration_id),
+@@ -582,12 +577,34 @@ static int rmi_f34_probe(struct rmi_function *fn)
+ 			 f34_queries[2], f34_queries[3]);
+ 
+ 		rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n",
+-			 f34->configuration_id);
++			f34->configuration_id);
+ 	}
+ 
+ 	return 0;
+ }
+ 
++static int rmi_f34_probe(struct rmi_function *fn)
++{
++	struct f34_data *f34;
++	u8 version = fn->fd.function_version;
++	int error;
++
++	f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
++	if (!f34)
++		return -ENOMEM;
++
++	f34->fn = fn;
++
++	/* v5 code only supported version 0 */
++	error = version == 0 ? rmi_f34v5_probe(f34) : rmi_f34v7_probe(f34);
++	if (error)
++		return error;
++
++	dev_set_drvdata(&fn->dev, f34);
++
++	return 0;
++}
++
+ int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+ {
+ 	return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
+index b3aa1f5d53218b..1469ad0794f285 100644
+--- a/drivers/iommu/Kconfig
++++ b/drivers/iommu/Kconfig
+@@ -199,7 +199,6 @@ source "drivers/iommu/iommufd/Kconfig"
+ config IRQ_REMAP
+ 	bool "Support for Interrupt Remapping"
+ 	depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
+-	select DMAR_TABLE if INTEL_IOMMU
+ 	help
+ 	  Supports Interrupt remapping for IO-APIC and MSI devices.
+ 	  To use x2apic mode in the CPU's which support x2APIC enhancements or
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 879009adef407b..0ad55649e2d007 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -2394,6 +2394,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
+ 	unsigned int pgsize_idx, pgsize_idx_next;
+ 	unsigned long pgsizes;
+ 	size_t offset, pgsize, pgsize_next;
++	size_t offset_end;
+ 	unsigned long addr_merge = paddr | iova;
+ 
+ 	/* Page sizes supported by the hardware and small enough for @size */
+@@ -2434,7 +2435,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
+ 	 * If size is big enough to accommodate the larger page, reduce
+ 	 * the number of smaller pages.
+ 	 */
+-	if (offset + pgsize_next <= size)
++	if (!check_add_overflow(offset, pgsize_next, &offset_end) &&
++	    offset_end <= size)
+ 		size = offset;
+ 
+ out_set_count:
+diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
+index f815dab3be50cd..0657bd3d8f97b2 100644
+--- a/drivers/mailbox/imx-mailbox.c
++++ b/drivers/mailbox/imx-mailbox.c
+@@ -226,7 +226,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
+ {
+ 	u32 *arg = data;
+ 	u32 val;
+-	int ret;
++	int ret, count;
+ 
+ 	switch (cp->type) {
+ 	case IMX_MU_TYPE_TX:
+@@ -240,11 +240,20 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
+ 	case IMX_MU_TYPE_TXDB_V2:
+ 		imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
+ 			     priv->dcfg->xCR[IMX_MU_GCR]);
+-		ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+-					 !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+-					 0, 1000);
+-		if (ret)
+-			dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
++		ret = -ETIMEDOUT;
++		count = 0;
++		while (ret && (count < 10)) {
++			ret =
++			readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
++					   !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
++					   0, 10000);
++
++			if (ret) {
++				dev_warn_ratelimited(priv->dev,
++						     "channel type: %d timeout, %d times, retry\n",
++						     cp->type, ++count);
++			}
++		}
+ 		break;
+ 	default:
+ 		dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index 9c43ed9bdd37b5..d24f71819c3d65 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -92,18 +92,6 @@ struct gce_plat {
+ 	u32 gce_num;
+ };
+ 
+-static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
+-{
+-	WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
+-
+-	if (enable)
+-		writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
+-	else
+-		writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
+-
+-	clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+-}
+-
+ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
+ {
+ 	struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
+@@ -112,6 +100,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
+ }
+ EXPORT_SYMBOL(cmdq_get_shift_pa);
+ 
++static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable)
++{
++	u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0;
++
++	if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en)
++		return;
++
++	if (cmdq->pdata->sw_ddr_en && ddr_enable)
++		val |= GCE_DDR_EN;
++
++	writel(val, cmdq->base + GCE_GCTL_VALUE);
++}
++
+ static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
+ {
+ 	u32 status;
+@@ -140,16 +141,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
+ static void cmdq_init(struct cmdq *cmdq)
+ {
+ 	int i;
+-	u32 gctl_regval = 0;
+ 
+ 	WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
+-	if (cmdq->pdata->control_by_sw)
+-		gctl_regval = GCE_CTRL_BY_SW;
+-	if (cmdq->pdata->sw_ddr_en)
+-		gctl_regval |= GCE_DDR_EN;
+ 
+-	if (gctl_regval)
+-		writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
++	cmdq_gctl_value_toggle(cmdq, true);
+ 
+ 	writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
+ 	for (i = 0; i <= CMDQ_MAX_EVENT; i++)
+@@ -315,14 +310,21 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
+ static int cmdq_runtime_resume(struct device *dev)
+ {
+ 	struct cmdq *cmdq = dev_get_drvdata(dev);
++	int ret;
+ 
+-	return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
++	ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
++	if (ret)
++		return ret;
++
++	cmdq_gctl_value_toggle(cmdq, true);
++	return 0;
+ }
+ 
+ static int cmdq_runtime_suspend(struct device *dev)
+ {
+ 	struct cmdq *cmdq = dev_get_drvdata(dev);
+ 
++	cmdq_gctl_value_toggle(cmdq, false);
+ 	clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+ 	return 0;
+ }
+@@ -347,9 +349,6 @@ static int cmdq_suspend(struct device *dev)
+ 	if (task_running)
+ 		dev_warn(dev, "exist running task(s) in suspend\n");
+ 
+-	if (cmdq->pdata->sw_ddr_en)
+-		cmdq_sw_ddr_enable(cmdq, false);
+-
+ 	return pm_runtime_force_suspend(dev);
+ }
+ 
+@@ -360,9 +359,6 @@ static int cmdq_resume(struct device *dev)
+ 	WARN_ON(pm_runtime_force_resume(dev));
+ 	cmdq->suspended = false;
+ 
+-	if (cmdq->pdata->sw_ddr_en)
+-		cmdq_sw_ddr_enable(cmdq, true);
+-
+ 	return 0;
+ }
+ 
+@@ -370,9 +366,6 @@ static void cmdq_remove(struct platform_device *pdev)
+ {
+ 	struct cmdq *cmdq = platform_get_drvdata(pdev);
+ 
+-	if (cmdq->pdata->sw_ddr_en)
+-		cmdq_sw_ddr_enable(cmdq, false);
+-
+ 	if (!IS_ENABLED(CONFIG_PM))
+ 		cmdq_runtime_suspend(&pdev->dev);
+ 
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 3637761f35853c..f3a3f2ef632261 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -141,6 +141,7 @@ struct mapped_device {
+ #ifdef CONFIG_BLK_DEV_ZONED
+ 	unsigned int nr_zones;
+ 	void *zone_revalidate_map;
++	struct task_struct *revalidate_map_task;
+ #endif
+ 
+ #ifdef CONFIG_IMA
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index b690905ab89ffb..347881f323d5bc 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -47,14 +47,15 @@ enum feature_flag_bits {
+ };
+ 
+ struct per_bio_data {
+-	bool bio_submitted;
++	bool bio_can_corrupt;
++	struct bvec_iter saved_iter;
+ };
+ 
+ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 			  struct dm_target *ti)
+ {
+-	int r;
+-	unsigned int argc;
++	int r = 0;
++	unsigned int argc = 0;
+ 	const char *arg_name;
+ 
+ 	static const struct dm_arg _args[] = {
+@@ -65,14 +66,13 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 		{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
+ 	};
+ 
+-	/* No feature arguments supplied. */
+-	if (!as->argc)
+-		return 0;
+-
+-	r = dm_read_arg_group(_args, as, &argc, &ti->error);
+-	if (r)
++	if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error)))
+ 		return r;
+ 
++	/* No feature arguments supplied. */
++	if (!argc)
++		goto error_all_io;
++
+ 	while (argc) {
+ 		arg_name = dm_shift_arg(as);
+ 		argc--;
+@@ -217,6 +217,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 	if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
+ 	    !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
+ 	    !fc->random_read_corrupt && !fc->random_write_corrupt) {
++error_all_io:
+ 		set_bit(ERROR_WRITES, &fc->flags);
+ 		set_bit(ERROR_READS, &fc->flags);
+ 	}
+@@ -339,7 +340,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
+ }
+ 
+ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
+-			       unsigned char corrupt_bio_value)
++			       unsigned char corrupt_bio_value,
++			       struct bvec_iter start)
+ {
+ 	struct bvec_iter iter;
+ 	struct bio_vec bvec;
+@@ -348,7 +350,7 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
+ 	 * Overwrite the Nth byte of the bio's data, on whichever page
+ 	 * it falls.
+ 	 */
+-	bio_for_each_segment(bvec, bio, iter) {
++	__bio_for_each_segment(bvec, bio, iter, start) {
+ 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
+ 			unsigned char *segment = bvec_kmap_local(&bvec);
+ 			segment[corrupt_bio_byte] = corrupt_bio_value;
+@@ -357,36 +359,31 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
+ 				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
+ 				bio, corrupt_bio_value, corrupt_bio_byte,
+ 				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
+-				(unsigned long long)bio->bi_iter.bi_sector,
+-				bio->bi_iter.bi_size);
++				(unsigned long long)start.bi_sector,
++				start.bi_size);
+ 			break;
+ 		}
+ 		corrupt_bio_byte -= bio_iter_len(bio, iter);
+ 	}
+ }
+ 
+-static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
++static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc,
++			     struct bvec_iter start)
+ {
+ 	unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
+ 
+-	if (!bio_has_data(bio))
+-		return;
+-
+-	corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value);
++	corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start);
+ }
+ 
+-static void corrupt_bio_random(struct bio *bio)
++static void corrupt_bio_random(struct bio *bio, struct bvec_iter start)
+ {
+ 	unsigned int corrupt_byte;
+ 	unsigned char corrupt_value;
+ 
+-	if (!bio_has_data(bio))
+-		return;
+-
+-	corrupt_byte = get_random_u32() % bio->bi_iter.bi_size;
++	corrupt_byte = get_random_u32() % start.bi_size;
+ 	corrupt_value = get_random_u8();
+ 
+-	corrupt_bio_common(bio, corrupt_byte, corrupt_value);
++	corrupt_bio_common(bio, corrupt_byte, corrupt_value, start);
+ }
+ 
+ static void clone_free(struct bio *clone)
+@@ -481,7 +478,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 	unsigned int elapsed;
+ 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ 
+-	pb->bio_submitted = false;
++	pb->bio_can_corrupt = false;
+ 
+ 	if (op_is_zone_mgmt(bio_op(bio)))
+ 		goto map_bio;
+@@ -490,10 +487,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 	elapsed = (jiffies - fc->start_time) / HZ;
+ 	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
+ 		bool corrupt_fixed, corrupt_random;
+-		/*
+-		 * Flag this bio as submitted while down.
+-		 */
+-		pb->bio_submitted = true;
++
++		if (bio_has_data(bio)) {
++			pb->bio_can_corrupt = true;
++			pb->saved_iter = bio->bi_iter;
++		}
+ 
+ 		/*
+ 		 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
+@@ -516,6 +514,8 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 			return DM_MAPIO_SUBMITTED;
+ 		}
+ 
++		if (!pb->bio_can_corrupt)
++			goto map_bio;
+ 		/*
+ 		 * Corrupt matching writes.
+ 		 */
+@@ -535,9 +535,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 			struct bio *clone = clone_bio(ti, fc, bio);
+ 			if (clone) {
+ 				if (corrupt_fixed)
+-					corrupt_bio_data(clone, fc);
++					corrupt_bio_data(clone, fc,
++							 clone->bi_iter);
+ 				if (corrupt_random)
+-					corrupt_bio_random(clone);
++					corrupt_bio_random(clone,
++							   clone->bi_iter);
+ 				submit_bio(clone);
+ 				return DM_MAPIO_SUBMITTED;
+ 			}
+@@ -559,21 +561,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ 	if (op_is_zone_mgmt(bio_op(bio)))
+ 		return DM_ENDIO_DONE;
+ 
+-	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
++	if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) {
+ 		if (fc->corrupt_bio_byte) {
+ 			if ((fc->corrupt_bio_rw == READ) &&
+ 			    all_corrupt_bio_flags_match(bio, fc)) {
+ 				/*
+ 				 * Corrupt successful matching READs while in down state.
+ 				 */
+-				corrupt_bio_data(bio, fc);
++				corrupt_bio_data(bio, fc, pb->saved_iter);
+ 			}
+ 		}
+ 		if (fc->random_read_corrupt) {
+ 			u64 rnd = get_random_u64();
+ 			u32 rem = do_div(rnd, PROBABILITY_BASE);
+ 			if (rem < fc->random_read_corrupt)
+-				corrupt_bio_random(bio);
++				corrupt_bio_random(bio, pb->saved_iter);
+ 		}
+ 		if (test_bit(ERROR_READS, &fc->flags)) {
+ 			/*
+diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
+index c0d41c36e06ebf..04cc36a9d5ca46 100644
+--- a/drivers/md/dm-zone.c
++++ b/drivers/md/dm-zone.c
+@@ -56,24 +56,31 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
+ {
+ 	struct mapped_device *md = disk->private_data;
+ 	struct dm_table *map;
+-	int srcu_idx, ret;
++	struct dm_table *zone_revalidate_map = md->zone_revalidate_map;
++	int srcu_idx, ret = -EIO;
++	bool put_table = false;
+ 
+-	if (!md->zone_revalidate_map) {
+-		/* Regular user context */
++	if (!zone_revalidate_map || md->revalidate_map_task != current) {
++		/*
++		 * Regular user context or
++		 * Zone revalidation during __bind() is in progress, but this
++		 * call is from a different process
++		 */
+ 		if (dm_suspended_md(md))
+ 			return -EAGAIN;
+ 
+ 		map = dm_get_live_table(md, &srcu_idx);
+-		if (!map)
+-			return -EIO;
++		put_table = true;
+ 	} else {
+ 		/* Zone revalidation during __bind() */
+-		map = md->zone_revalidate_map;
++		map = zone_revalidate_map;
+ 	}
+ 
+-	ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data);
++	if (map)
++		ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb,
++					     data);
+ 
+-	if (!md->zone_revalidate_map)
++	if (put_table)
+ 		dm_put_live_table(md, srcu_idx);
+ 
+ 	return ret;
+@@ -175,7 +182,9 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
+ 	 * our table for dm_blk_report_zones() to use directly.
+ 	 */
+ 	md->zone_revalidate_map = t;
++	md->revalidate_map_task = current;
+ 	ret = blk_revalidate_disk_zones(disk);
++	md->revalidate_map_task = NULL;
+ 	md->zone_revalidate_map = NULL;
+ 
+ 	if (ret) {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index d29125ee9e72af..92e5a233f51607 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2410,21 +2410,29 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ 			       struct queue_limits *limits)
+ {
+ 	struct dm_table *old_map;
+-	sector_t size;
++	sector_t size, old_size;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&md->suspend_lock);
+ 
+ 	size = dm_table_get_size(t);
+ 
++	old_size = dm_get_size(md);
++	set_capacity(md->disk, size);
++
++	ret = dm_table_set_restrictions(t, md->queue, limits);
++	if (ret) {
++		set_capacity(md->disk, old_size);
++		old_map = ERR_PTR(ret);
++		goto out;
++	}
++
+ 	/*
+ 	 * Wipe any geometry if the size of the table changed.
+ 	 */
+-	if (size != dm_get_size(md))
++	if (size != old_size)
+ 		memset(&md->geometry, 0, sizeof(md->geometry));
+ 
+-	set_capacity(md->disk, size);
+-
+ 	dm_table_event_callback(t, event_callback, md);
+ 
+ 	if (dm_table_request_based(t)) {
+@@ -2442,10 +2450,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ 		 * requests in the queue may refer to bio from the old bioset,
+ 		 * so you must walk through the queue to unprep.
+ 		 */
+-		if (!md->mempools) {
++		if (!md->mempools)
+ 			md->mempools = t->mempools;
+-			t->mempools = NULL;
+-		}
++		else
++			dm_free_md_mempools(t->mempools);
+ 	} else {
+ 		/*
+ 		 * The md may already have mempools that need changing.
+@@ -2454,14 +2462,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ 		 */
+ 		dm_free_md_mempools(md->mempools);
+ 		md->mempools = t->mempools;
+-		t->mempools = NULL;
+-	}
+-
+-	ret = dm_table_set_restrictions(t, md->queue, limits);
+-	if (ret) {
+-		old_map = ERR_PTR(ret);
+-		goto out;
+ 	}
++	t->mempools = NULL;
+ 
+ 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ 	rcu_assign_pointer(md->map, (void *)t);
+diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
+index 232c93eea7eea6..18cad5ac92d8d2 100644
+--- a/drivers/media/platform/verisilicon/hantro_postproc.c
++++ b/drivers/media/platform/verisilicon/hantro_postproc.c
+@@ -260,8 +260,10 @@ int hantro_postproc_init(struct hantro_ctx *ctx)
+ 
+ 	for (i = 0; i < num_buffers; i++) {
+ 		ret = hantro_postproc_alloc(ctx, i);
+-		if (ret)
++		if (ret) {
++			hantro_postproc_free(ctx);
+ 			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
+index e58990c85ed878..e36805f07282ef 100644
+--- a/drivers/mfd/exynos-lpass.c
++++ b/drivers/mfd/exynos-lpass.c
+@@ -122,8 +122,8 @@ static int exynos_lpass_probe(struct platform_device *pdev)
+ 	if (IS_ERR(lpass->sfr0_clk))
+ 		return PTR_ERR(lpass->sfr0_clk);
+ 
+-	lpass->top = regmap_init_mmio(dev, base_top,
+-					&exynos_lpass_reg_conf);
++	lpass->top = devm_regmap_init_mmio(dev, base_top,
++					   &exynos_lpass_reg_conf);
+ 	if (IS_ERR(lpass->top)) {
+ 		dev_err(dev, "LPASS top regmap initialization failed\n");
+ 		return PTR_ERR(lpass->top);
+@@ -141,11 +141,9 @@ static void exynos_lpass_remove(struct platform_device *pdev)
+ {
+ 	struct exynos_lpass *lpass = platform_get_drvdata(pdev);
+ 
+-	exynos_lpass_disable(lpass);
+ 	pm_runtime_disable(&pdev->dev);
+ 	if (!pm_runtime_status_suspended(&pdev->dev))
+ 		exynos_lpass_disable(lpass);
+-	regmap_exit(lpass->top);
+ }
+ 
+ static int __maybe_unused exynos_lpass_suspend(struct device *dev)
+diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
+index 792236f56399af..b9cc85ea2c4019 100644
+--- a/drivers/mfd/stmpe-spi.c
++++ b/drivers/mfd/stmpe-spi.c
+@@ -129,7 +129,7 @@ static const struct spi_device_id stmpe_spi_id[] = {
+ 	{ "stmpe2403", STMPE2403 },
+ 	{ }
+ };
+-MODULE_DEVICE_TABLE(spi, stmpe_id);
++MODULE_DEVICE_TABLE(spi, stmpe_spi_id);
+ 
+ static struct spi_driver stmpe_spi_driver = {
+ 	.driver = {
+diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
+index ad7c7f1573191f..5e44b518f36c74 100644
+--- a/drivers/misc/mei/vsc-tp.c
++++ b/drivers/misc/mei/vsc-tp.c
+@@ -324,7 +324,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
+ 	guard(mutex)(&tp->mutex);
+ 
+ 	/* rom xfer is big endian */
+-	cpu_to_be32_array((u32 *)tp->tx_buf, obuf, words);
++	cpu_to_be32_array((__be32 *)tp->tx_buf, obuf, words);
+ 
+ 	ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
+ 				!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
+@@ -340,7 +340,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
+ 		return ret;
+ 
+ 	if (ibuf)
+-		be32_to_cpu_array(ibuf, (u32 *)tp->rx_buf, words);
++		be32_to_cpu_array(ibuf, (__be32 *)tp->rx_buf, words);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
+index abe79f6fd2a79b..b64944367ac533 100644
+--- a/drivers/misc/vmw_vmci/vmci_host.c
++++ b/drivers/misc/vmw_vmci/vmci_host.c
+@@ -227,6 +227,7 @@ static int drv_cp_harray_to_user(void __user *user_buf_uva,
+ static int vmci_host_setup_notify(struct vmci_ctx *context,
+ 				  unsigned long uva)
+ {
++	struct page *page;
+ 	int retval;
+ 
+ 	if (context->notify_page) {
+@@ -243,13 +244,11 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
+ 	/*
+ 	 * Lock physical page backing a given user VA.
+ 	 */
+-	retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
+-	if (retval != 1) {
+-		context->notify_page = NULL;
++	retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &page);
++	if (retval != 1)
+ 		return VMCI_ERROR_GENERIC;
+-	}
+-	if (context->notify_page == NULL)
+-		return VMCI_ERROR_UNAVAILABLE;
++
++	context->notify_page = page;
+ 
+ 	/*
+ 	 * Map the locked page and set up notify pointer.
+diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
+index 8fd80dac11bfdf..bf29aad082a19f 100644
+--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
++++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
+@@ -17,6 +17,7 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_domain.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/reset.h>
+ #include <linux/sizes.h>
+@@ -787,6 +788,29 @@ static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv
+ 	}
+ }
+ 
++static void dwcmshc_rk3576_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
++{
++	struct device *dev = mmc_dev(host->mmc);
++	int ret;
++
++	/*
++	 * This works around the design of the RK3576's power domains, which
++	 * makes the PD_NVM power domain, which the sdhci controller on the
++	 * RK3576 is in, never come back the same way once it's run-time
++	 * suspended once. This can happen during early kernel boot if no driver
++	 * is using either PD_NVM or its child power domain PD_SDGMAC for a
++	 * short moment, leading to it being turned off to save power. By
++	 * keeping it on, sdhci suspending won't lead to PD_NVM becoming a
++	 * candidate for getting turned off.
++	 */
++	ret = dev_pm_genpd_rpm_always_on(dev, true);
++	if (ret && ret != -EOPNOTSUPP)
++		dev_warn(dev, "failed to set PD rpm always on, SoC may hang later: %pe\n",
++			 ERR_PTR(ret));
++
++	dwcmshc_rk35xx_postinit(host, dwc_priv);
++}
++
+ static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
+ {
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+@@ -1218,6 +1242,18 @@ static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
+ 	.postinit = dwcmshc_rk35xx_postinit,
+ };
+ 
++static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk3576_pdata = {
++	.pdata = {
++		.ops = &sdhci_dwcmshc_rk35xx_ops,
++		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
++			  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
++		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++			   SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
++	},
++	.init = dwcmshc_rk35xx_init,
++	.postinit = dwcmshc_rk3576_postinit,
++};
++
+ static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = {
+ 	.pdata = {
+ 		.ops = &sdhci_dwcmshc_th1520_ops,
+@@ -1316,6 +1352,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
+ 		.compatible = "rockchip,rk3588-dwcmshc",
+ 		.data = &sdhci_dwcmshc_rk35xx_pdata,
+ 	},
++	{
++		.compatible = "rockchip,rk3576-dwcmshc",
++		.data = &sdhci_dwcmshc_rk3576_pdata,
++	},
+ 	{
+ 		.compatible = "rockchip,rk3568-dwcmshc",
+ 		.data = &sdhci_dwcmshc_rk35xx_pdata,
+diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
+index 47e10945b8d271..63cb206269dd9d 100644
+--- a/drivers/mtd/nand/ecc-mxic.c
++++ b/drivers/mtd/nand/ecc-mxic.c
+@@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
+ {
+ 	struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ 	struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+-	int nents, step, ret;
++	int nents, step, ret = 0;
+ 
+ 	if (req->mode == MTD_OPS_RAW)
+ 		return 0;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 4d2e30f4ee2507..2a513dbbd97566 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2113,15 +2113,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 		 * set the master's mac address to that of the first slave
+ 		 */
+ 		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+-		ss.ss_family = slave_dev->type;
+-		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
+-					  extack);
+-		if (res) {
+-			slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
+-			goto err_restore_mtu;
+-		}
++	} else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
++		   BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
++		   memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
++		/* Set slave to random address to avoid duplicate mac
++		 * address in later fail over.
++		 */
++		eth_random_addr(ss.__data);
++	} else {
++		goto skip_mac_set;
+ 	}
+ 
++	ss.ss_family = slave_dev->type;
++	res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, extack);
++	if (res) {
++		slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
++		goto err_restore_mtu;
++	}
++
++skip_mac_set:
++
+ 	/* set no_addrconf flag before open to prevent IPv6 addrconf */
+ 	slave_dev->priv_flags |= IFF_NO_ADDRCONF;
+ 
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 0168ad495e6c90..71c30a81c36dbd 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1326,24 +1326,7 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
+ 		off = B53_RGMII_CTRL_P(port);
+ 
+ 	b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+-
+-	switch (interface) {
+-	case PHY_INTERFACE_MODE_RGMII_ID:
+-		rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
+-		break;
+-	case PHY_INTERFACE_MODE_RGMII_RXID:
+-		rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC);
+-		rgmii_ctrl |= RGMII_CTRL_DLL_RXC;
+-		break;
+-	case PHY_INTERFACE_MODE_RGMII_TXID:
+-		rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC);
+-		rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+-		break;
+-	case PHY_INTERFACE_MODE_RGMII:
+-	default:
+-		rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
+-		break;
+-	}
++	rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
+ 
+ 	if (port != dev->imp_port) {
+ 		if (is63268(dev))
+@@ -1373,8 +1356,7 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
+ 	 * tx_clk aligned timing (restoring to reset defaults)
+ 	 */
+ 	b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+-	rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
+-			RGMII_CTRL_TIMING_SEL);
++	rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
+ 
+ 	/* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ 	 * sure that we enable the port TX clock internal delay to
+@@ -1394,7 +1376,10 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
+ 		rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ 	if (interface == PHY_INTERFACE_MODE_RGMII)
+ 		rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+-	rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
++
++	if (dev->chip_id != BCM53115_DEVICE_ID)
++		rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
++
+ 	b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+ 
+ 	dev_info(ds->dev, "Configured port %d for %s\n", port,
+@@ -1458,6 +1443,10 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
+ 	__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ 	__set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
+ 
++	/* BCM63xx RGMII ports support RGMII */
++	if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
++		phy_interface_set_rgmii(config->supported_interfaces);
++
+ 	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ 		MAC_10 | MAC_100;
+ 
+@@ -2047,9 +2036,6 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ 
+ 		b53_get_vlan_entry(dev, pvid, vl);
+ 		vl->members &= ~BIT(port);
+-		if (vl->members == BIT(cpu_port))
+-			vl->members &= ~BIT(cpu_port);
+-		vl->untag = vl->members;
+ 		b53_set_vlan_entry(dev, pvid, vl);
+ 	}
+ 
+@@ -2128,8 +2114,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ 		}
+ 
+ 		b53_get_vlan_entry(dev, pvid, vl);
+-		vl->members |= BIT(port) | BIT(cpu_port);
+-		vl->untag |= BIT(port) | BIT(cpu_port);
++		vl->members |= BIT(port);
+ 		b53_set_vlan_entry(dev, pvid, vl);
+ 	}
+ }
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 862c4575701fec..14f39d1f59d361 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -2207,7 +2207,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
+ 			};
+ 			stats[stats_idx++] = (struct stats) {
+ 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
+-				.value = cpu_to_be64(priv->rx[0].fill_cnt),
++				.value = cpu_to_be64(priv->rx[idx].fill_cnt),
+ 				.queue_id = cpu_to_be32(idx),
+ 			};
+ 		}
+diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+index f879426cb5523a..26053cc85d1c52 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+@@ -770,6 +770,9 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
+ 	s16 completion_tag;
+ 
+ 	pkt = gve_alloc_pending_packet(tx);
++	if (!pkt)
++		return -ENOMEM;
++
+ 	pkt->skb = skb;
+ 	completion_tag = pkt - tx->dqo.pending_packets;
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index dfa785e39458db..625fa93fc18bb1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1546,8 +1546,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
+  * @vf: pointer to the VF structure
+  * @flr: VFLR was issued or not
+  *
+- * Returns true if the VF is in reset, resets successfully, or resets
+- * are disabled and false otherwise.
++ * Return: True if reset was performed successfully or if resets are disabled.
++ * False if reset is already in progress.
+  **/
+ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+ {
+@@ -1566,7 +1566,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+ 
+ 	/* If VF is being reset already we don't need to continue. */
+ 	if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+-		return true;
++		return false;
+ 
+ 	i40e_trigger_vf_reset(vf, flr);
+ 
+@@ -4328,7 +4328,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+ 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+ 		if (reg & BIT(bit_idx))
+ 			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+-			i40e_reset_vf(vf, true);
++			if (!i40e_reset_vf(vf, true)) {
++				/* At least one VF did not finish resetting, retry next time */
++				set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
++			}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 63d2105fce9332..d1abd21cfc647c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2761,6 +2761,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
+ 	}
+ }
+ 
++/**
++ * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
++ * @vsi: the VSI with XDP rings being unmapped
++ */
++static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
++{
++	int v_idx;
++
++	ice_for_each_q_vector(vsi, v_idx) {
++		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
++		struct ice_tx_ring *ring;
++
++		ice_for_each_tx_ring(ring, q_vector->tx)
++			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
++				break;
++
++		/* restore the value of last node prior to XDP setup */
++		q_vector->tx.tx_ring = ring;
++	}
++}
++
+ /**
+  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
+  * @vsi: VSI to bring up Tx rings used by XDP
+@@ -2824,7 +2845,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 	if (status) {
+ 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
+ 			status);
+-		goto clear_xdp_rings;
++		goto unmap_xdp_rings;
+ 	}
+ 
+ 	/* assign the prog only when it's not already present on VSI;
+@@ -2840,6 +2861,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 		ice_vsi_assign_bpf_prog(vsi, prog);
+ 
+ 	return 0;
++unmap_xdp_rings:
++	ice_unmap_xdp_rings(vsi);
+ clear_xdp_rings:
+ 	ice_for_each_xdp_txq(vsi, i)
+ 		if (vsi->xdp_rings[i]) {
+@@ -2856,6 +2879,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 	mutex_unlock(&pf->avail_q_mutex);
+ 
+ 	devm_kfree(dev, vsi->xdp_rings);
++	vsi->xdp_rings = NULL;
++
+ 	return -ENOMEM;
+ }
+ 
+@@ -2871,7 +2896,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
+ {
+ 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ 	struct ice_pf *pf = vsi->back;
+-	int i, v_idx;
++	int i;
+ 
+ 	/* q_vectors are freed in reset path so there's no point in detaching
+ 	 * rings
+@@ -2879,17 +2904,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
+ 	if (cfg_type == ICE_XDP_CFG_PART)
+ 		goto free_qmap;
+ 
+-	ice_for_each_q_vector(vsi, v_idx) {
+-		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+-		struct ice_tx_ring *ring;
+-
+-		ice_for_each_tx_ring(ring, q_vector->tx)
+-			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+-				break;
+-
+-		/* restore the value of last node prior to XDP setup */
+-		q_vector->tx.tx_ring = ring;
+-	}
++	ice_unmap_xdp_rings(vsi);
+ 
+ free_qmap:
+ 	mutex_lock(&pf->avail_q_mutex);
+@@ -3034,11 +3049,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
+ 		if (xdp_ring_err) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
++			goto resume_if;
+ 		} else {
+ 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+ 							     ICE_XDP_CFG_FULL);
+-			if (xdp_ring_err)
++			if (xdp_ring_err) {
+ 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
++				goto resume_if;
++			}
+ 		}
+ 		xdp_features_set_redirect_target(vsi->netdev, true);
+ 		/* reallocate Rx queues that are used for zero-copy */
+@@ -3056,6 +3074,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
+ 	}
+ 
++resume_if:
+ 	if (if_running)
+ 		ret = ice_up(vsi);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
+index 6ca13c5dcb14e7..d9d09296d1d481 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sched.c
++++ b/drivers/net/ethernet/intel/ice/ice_sched.c
+@@ -84,6 +84,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
+ 	return NULL;
+ }
+ 
++/**
++ * ice_sched_find_next_vsi_node - find the next node for a given VSI
++ * @vsi_node: VSI support node to start search with
++ *
++ * Return: Next VSI support node, or NULL.
++ *
++ * The function returns a pointer to the next node from the VSI layer
++ * assigned to the given VSI, or NULL if there is no such a node.
++ */
++static struct ice_sched_node *
++ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node)
++{
++	unsigned int vsi_handle = vsi_node->vsi_handle;
++
++	while ((vsi_node = vsi_node->sibling) != NULL)
++		if (vsi_node->vsi_handle == vsi_handle)
++			break;
++
++	return vsi_node;
++}
++
+ /**
+  * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
+  * @hw: pointer to the HW struct
+@@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
+ 		if (parent->num_children < max_child_nodes) {
+ 			new_num_nodes = max_child_nodes - parent->num_children;
+ 		} else {
+-			/* This parent is full, try the next sibling */
+-			parent = parent->sibling;
++			/* This parent is full,
++			 * try the next available sibling.
++			 */
++			parent = ice_sched_find_next_vsi_node(parent);
+ 			/* Don't modify the first node TEID memory if the
+ 			 * first node was added already in the above call.
+ 			 * Instead send some temp memory for all other
+@@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ 	/* get the first queue group node from VSI sub-tree */
+ 	qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
+ 	while (qgrp_node) {
++		struct ice_sched_node *next_vsi_node;
++
+ 		/* make sure the qgroup node is part of the VSI subtree */
+ 		if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
+ 			if (qgrp_node->num_children < max_children &&
+ 			    qgrp_node->owner == owner)
+ 				break;
+ 		qgrp_node = qgrp_node->sibling;
++		if (qgrp_node)
++			continue;
++
++		next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
++		if (!next_vsi_node)
++			break;
++
++		vsi_node = next_vsi_node;
++		qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
+ 	}
+ 
+ 	/* Select the best queue group */
+@@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ /**
+  * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
+  * @hw: pointer to the HW struct
+- * @num_qs: number of queues
++ * @num_new_qs: number of new queues that will be added to the tree
+  * @num_nodes: num nodes array
+  *
+  * This function calculates the number of VSI child nodes based on the
+  * number of queues.
+  */
+ static void
+-ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
++ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes)
+ {
+-	u16 num = num_qs;
++	u16 num = num_new_qs;
+ 	u8 i, qgl, vsil;
+ 
+ 	qgl = ice_sched_get_qgrp_layer(hw);
+@@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
+ 		if (!parent)
+ 			return -EIO;
+ 
+-		if (i == vsil)
++		/* Do not modify the VSI handle for already existing VSI nodes,
++		 * (if no new VSI node was added to the tree).
++		 * Assign the VSI handle only to newly added VSI nodes.
++		 */
++		if (i == vsil && num_added)
+ 			parent->vsi_handle = vsi_handle;
+ 	}
+ 
+@@ -1812,6 +1850,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
+ 					       num_nodes);
+ }
+ 
++/**
++ * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count
++ * @hw: pointer to the HW struct
++ * @vsi_node: pointer to the leftmost VSI node that needs to be extended
++ * @new_numqs: new number of queues that has to be handled by the VSI
++ * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry
++ *
++ * This function recalculates the number of supported nodes that need to
++ * be added after adding more Tx queues for a given VSI.
++ * The number of new VSI support nodes that shall be added will be saved
++ * to the @new_num_nodes table for the VSI layer.
++ */
++static void
++ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw,
++				   struct ice_sched_node *vsi_node,
++				   unsigned int new_numqs, u16 *new_num_nodes)
++{
++	u32 vsi_nodes_cnt = 1;
++	u32 max_queue_cnt = 1;
++	u32 qgl, vsil;
++
++	qgl = ice_sched_get_qgrp_layer(hw);
++	vsil = ice_sched_get_vsi_layer(hw);
++
++	for (u32 i = vsil; i <= qgl; i++)
++		max_queue_cnt *= hw->max_children[i];
++
++	while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL)
++		vsi_nodes_cnt++;
++
++	if (new_numqs > (max_queue_cnt * vsi_nodes_cnt))
++		new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) -
++				      vsi_nodes_cnt;
++}
++
+ /**
+  * ice_sched_update_vsi_child_nodes - update VSI child nodes
+  * @pi: port information structure
+@@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
+ 			return status;
+ 	}
+ 
+-	if (new_numqs)
+-		ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
+-	/* Keep the max number of queue configuration all the time. Update the
+-	 * tree only if number of queues > previous number of queues. This may
++	ice_sched_recalc_vsi_support_nodes(hw, vsi_node,
++					   new_numqs, new_num_nodes);
++	ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs,
++				       new_num_nodes);
++
++	/* Never decrease the number of queues in the tree. Update the tree
++	 * only if number of queues > previous number of queues. This may
+ 	 * leave some extra nodes in the tree if number of queues < previous
+ 	 * number but that wouldn't harm anything. Removing those extra nodes
+ 	 * may complicate the code if those nodes are part of SRL or
+ 	 * individually rate limited.
++	 * Also, add the required VSI support nodes if the existing ones cannot
++	 * handle the requested new number of queues.
+ 	 */
++	status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
++						 new_num_nodes);
++	if (status)
++		return status;
++
+ 	status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
+ 					       new_num_nodes, owner);
+ 	if (status)
+@@ -2012,6 +2095,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
+ 	return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
+ }
+ 
++/**
++ * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI
++ * @pi: port information structure
++ * @vsi_node: pointer to the leftmost node of the VSI to be removed
++ * @owner: LAN or RDMA
++ * @tc: TC number
++ *
++ * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC.
++ *
++ * This function removes all the VSI support nodes associated with a given VSI
++ * and its LAN or RDMA children nodes from the scheduler tree.
++ */
++static int
++ice_sched_rm_vsi_subtree(struct ice_port_info *pi,
++			 struct ice_sched_node *vsi_node, u8 owner, u8 tc)
++{
++	u16 vsi_handle = vsi_node->vsi_handle;
++	bool all_vsi_nodes_removed = true;
++	int j = 0;
++
++	while (vsi_node) {
++		struct ice_sched_node *next_vsi_node;
++
++		if (ice_sched_is_leaf_node_present(vsi_node)) {
++			ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc);
++			return -EBUSY;
++		}
++		while (j < vsi_node->num_children) {
++			if (vsi_node->children[j]->owner == owner)
++				ice_free_sched_node(pi, vsi_node->children[j]);
++			else
++				j++;
++		}
++
++		next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
++
++		/* remove the VSI if it has no children */
++		if (!vsi_node->num_children)
++			ice_free_sched_node(pi, vsi_node);
++		else
++			all_vsi_nodes_removed = false;
++
++		vsi_node = next_vsi_node;
++	}
++
++	/* clean up aggregator related VSI info if any */
++	if (all_vsi_nodes_removed)
++		ice_sched_rm_agg_vsi_info(pi, vsi_handle);
++
++	return 0;
++}
++
+ /**
+  * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
+  * @pi: port information structure
+@@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
+ 
+ 	ice_for_each_traffic_class(i) {
+ 		struct ice_sched_node *vsi_node, *tc_node;
+-		u8 j = 0;
+ 
+ 		tc_node = ice_sched_get_tc_node(pi, i);
+ 		if (!tc_node)
+@@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
+ 		if (!vsi_node)
+ 			continue;
+ 
+-		if (ice_sched_is_leaf_node_present(vsi_node)) {
+-			ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
+-			status = -EBUSY;
++		status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i);
++		if (status)
+ 			goto exit_sched_rm_vsi_cfg;
+-		}
+-		while (j < vsi_node->num_children) {
+-			if (vsi_node->children[j]->owner == owner) {
+-				ice_free_sched_node(pi, vsi_node->children[j]);
+ 
+-				/* reset the counter again since the num
+-				 * children will be updated after node removal
+-				 */
+-				j = 0;
+-			} else {
+-				j++;
+-			}
+-		}
+-		/* remove the VSI if it has no children */
+-		if (!vsi_node->num_children) {
+-			ice_free_sched_node(pi, vsi_node);
+-			vsi_ctx->sched.vsi_node[i] = NULL;
++		vsi_ctx->sched.vsi_node[i] = NULL;
+ 
+-			/* clean up aggregator related VSI info if any */
+-			ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+-		}
+ 		if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ 			vsi_ctx->sched.max_lanq[i] = 0;
+ 		else
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index 615e74d038457c..ba645ab22d394a 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -1802,11 +1802,19 @@ void idpf_vc_event_task(struct work_struct *work)
+ 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ 		return;
+ 
+-	if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
+-	    test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+-		set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+-		idpf_init_hard_reset(adapter);
+-	}
++	if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
++		goto func_reset;
++
++	if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
++		goto drv_load;
++
++	return;
++
++func_reset:
++	idpf_vc_xn_shutdown(adapter->vcxn_mngr);
++drv_load:
++	set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
++	idpf_init_hard_reset(adapter);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+index dfd7cf1d9aa0ad..a986dd57255592 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+@@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ {
+ 	struct idpf_tx_offload_params offload = { };
+ 	struct idpf_tx_buf *first;
++	int csum, tso, needed;
+ 	unsigned int count;
+ 	__be16 protocol;
+-	int csum, tso;
+ 
+ 	count = idpf_tx_desc_count_required(tx_q, skb);
+ 	if (unlikely(!count))
+ 		return idpf_tx_drop_skb(tx_q, skb);
+ 
+-	if (idpf_tx_maybe_stop_common(tx_q,
+-				      count + IDPF_TX_DESCS_PER_CACHE_LINE +
+-				      IDPF_TX_DESCS_FOR_CTX)) {
++	needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX;
++	if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
++				       IDPF_DESC_UNUSED(tx_q),
++				       needed, needed)) {
+ 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+ 
+ 		u64_stats_update_begin(&tx_q->stats_sync);
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 623bf17f87f9c0..c6c36de58b9d12 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -2132,6 +2132,19 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
+ 	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
+ }
+ 
++/* Global conditions to tell whether the txq (and related resources)
++ * has room to allow the use of "size" descriptors.
++ */
++static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
++{
++	if (IDPF_DESC_UNUSED(tx_q) < size ||
++	    IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
++		IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
++	    IDPF_TX_BUF_RSV_LOW(tx_q))
++		return 0;
++	return 1;
++}
++
+ /**
+  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
+  * @tx_q: the queue to be checked
+@@ -2142,29 +2155,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
+ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
+ 				     unsigned int descs_needed)
+ {
+-	if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
+-		goto out;
+-
+-	/* If there are too many outstanding completions expected on the
+-	 * completion queue, stop the TX queue to give the device some time to
+-	 * catch up
+-	 */
+-	if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+-		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
+-		goto splitq_stop;
+-
+-	/* Also check for available book keeping buffers; if we are low, stop
+-	 * the queue to wait for more completions
+-	 */
+-	if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
+-		goto splitq_stop;
+-
+-	return 0;
+-
+-splitq_stop:
+-	netif_stop_subqueue(tx_q->netdev, tx_q->idx);
++	if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
++				      idpf_txq_has_room(tx_q, descs_needed),
++				      1, 1))
++		return 0;
+ 
+-out:
+ 	u64_stats_update_begin(&tx_q->stats_sync);
+ 	u64_stats_inc(&tx_q->q_stats.q_busy);
+ 	u64_stats_update_end(&tx_q->stats_sync);
+@@ -2190,12 +2185,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
+ 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
+ 	tx_q->next_to_use = val;
+ 
+-	if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
+-		u64_stats_update_begin(&tx_q->stats_sync);
+-		u64_stats_inc(&tx_q->q_stats.q_busy);
+-		u64_stats_update_end(&tx_q->stats_sync);
+-	}
+-
+ 	/* Force memory writes to complete before letting h/w
+ 	 * know there are new descriptors to fetch.  (Only
+ 	 * applicable for weak-ordered memory model archs,
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+index 9c1fe84108ed2e..ffeeaede6cf8f4 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -1052,12 +1052,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
+ 				      u16 cleaned_count);
+ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+ 
+-static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
+-					     u32 needed)
+-{
+-	return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+-					  IDPF_DESC_UNUSED(tx_q),
+-					  needed, needed);
+-}
+-
+ #endif /* !_IDPF_TXRX_H_ */
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+index 99bdb95bf22661..151beea20d3435 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+@@ -376,7 +376,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+  * All waiting threads will be woken-up and their transaction aborted. Further
+  * operations on that object will fail.
+  */
+-static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
++void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
+ {
+ 	int i;
+ 
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+index 83da5d8da56bf2..23271cf0a21605 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+@@ -66,5 +66,6 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport);
+ int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+ int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
++void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
+ 
+ #endif /* _IDPF_VIRTCHNL_H_ */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+index 35acc07bd96489..5765bac119f0e7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+@@ -1638,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
+ 	if (!node->is_static)
+ 		dwrr_del_node = true;
+ 
++	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
+ 	/* destroy the leaf node */
+ 	otx2_qos_disable_sq(pfvf, qid);
+ 	otx2_qos_destroy_node(pfvf, node);
+@@ -1682,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
+ 	}
+ 	kfree(new_cfg);
+ 
+-	/* update tx_real_queues */
+-	otx2_qos_update_tx_netdev_queues(pfvf);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+index 9d887bfc31089c..ac9345644068ec 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+@@ -256,6 +256,26 @@ int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx)
+ 	return err;
+ }
+ 
++static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf)
++{
++	struct ndc_sync_op *req;
++	int rc;
++
++	mutex_lock(&pfvf->mbox.lock);
++
++	req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox);
++	if (!req) {
++		mutex_unlock(&pfvf->mbox.lock);
++		return -ENOMEM;
++	}
++
++	req->nix_lf_tx_sync = true;
++	req->npa_lf_sync = true;
++	rc = otx2_sync_mbox_msg(&pfvf->mbox);
++	mutex_unlock(&pfvf->mbox.lock);
++	return rc;
++}
++
+ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
+ {
+ 	struct otx2_qset *qset = &pfvf->qset;
+@@ -285,6 +305,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
+ 
+ 	otx2_qos_sqb_flush(pfvf, sq_idx);
+ 	otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
++	/* NIX/NPA NDC sync */
++	otx2_qos_nix_npa_ndc_sync(pfvf);
+ 	otx2_cleanup_tx_cqes(pfvf, cq);
+ 
+ 	mutex_lock(&pfvf->mbox.lock);
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index c2ab87828d8589..5eb7a97e7eb177 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1468,6 +1468,8 @@ static __maybe_unused int mtk_star_suspend(struct device *dev)
+ 	if (netif_running(ndev))
+ 		mtk_star_disable(ndev);
+ 
++	netif_device_detach(ndev);
++
+ 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+ 
+ 	return 0;
+@@ -1492,6 +1494,8 @@ static __maybe_unused int mtk_star_resume(struct device *dev)
+ 			clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+ 	}
+ 
++	netif_device_attach(ndev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index cd754cd76bde1b..d73a2044dc2662 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -249,7 +249,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ static u32 freq_to_shift(u16 freq)
+ {
+ 	u32 freq_khz = freq * 1000;
+-	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
++	u64 max_val_cycles = freq_khz * 1000ULL * MLX4_EN_WRAP_AROUND_SEC;
+ 	u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
+ 	/* calculate max possible multiplier in order to fit in 64bit */
+ 	u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+index 08ab0999f7b316..14192da4b8ed0d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+@@ -706,8 +706,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
+ 				xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ 				page = xdpi.page.page;
+ 
+-				/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
+-				 * as we know this is a page_pool page.
++				/* No need to check page_pool_page_is_pp() as we
++				 * know this is a page_pool page.
+ 				 */
+ 				page_pool_recycle_direct(page->pp, page);
+ 			} while (++n < num);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index 1baf8933a07cb0..39dcbf863421ad 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -266,8 +266,7 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ 				  struct mlx5_accel_esp_xfrm_attrs *attrs)
+ {
+ 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+-	struct xfrm_state *x = sa_entry->x;
+-	struct net_device *netdev;
++	struct net_device *netdev = sa_entry->dev;
+ 	struct neighbour *n;
+ 	u8 addr[ETH_ALEN];
+ 	const void *pkey;
+@@ -277,8 +276,6 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ 	    attrs->type != XFRM_DEV_OFFLOAD_PACKET)
+ 		return;
+ 
+-	netdev = x->xso.real_dev;
+-
+ 	mlx5_query_mac_address(mdev, addr);
+ 	switch (attrs->dir) {
+ 	case XFRM_DEV_OFFLOAD_IN:
+@@ -707,6 +704,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+ 		return -ENOMEM;
+ 
+ 	sa_entry->x = x;
++	sa_entry->dev = netdev;
+ 	sa_entry->ipsec = ipsec;
+ 	/* Check if this SA is originated from acquire flow temporary SA */
+ 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
+@@ -849,8 +847,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
+ 	struct mlx5e_ipsec_sa_entry *sa_entry;
+ 	struct mlx5e_ipsec *ipsec;
+ 	struct neighbour *n = ptr;
+-	struct net_device *netdev;
+-	struct xfrm_state *x;
+ 	unsigned long idx;
+ 
+ 	if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
+@@ -870,11 +866,9 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
+ 				continue;
+ 		}
+ 
+-		x = sa_entry->x;
+-		netdev = x->xso.real_dev;
+ 		data = sa_entry->work->data;
+ 
+-		neigh_ha_snapshot(data->addr, n, netdev);
++		neigh_ha_snapshot(data->addr, n, sa_entry->dev);
+ 		queue_work(ipsec->wq, &sa_entry->work->work);
+ 	}
+ 
+@@ -1005,8 +999,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
+ 	size_t headers;
+ 
+ 	lockdep_assert(lockdep_is_held(&x->lock) ||
+-		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+-		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
++		       lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) ||
++		       lockdep_is_held(&net->xfrm.xfrm_state_lock));
+ 
+ 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
+ 		return;
+@@ -1141,7 +1135,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
+ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
+ 				 struct netlink_ext_ack *extack)
+ {
+-	struct net_device *netdev = x->xdo.real_dev;
++	struct net_device *netdev = x->xdo.dev;
+ 	struct mlx5e_ipsec_pol_entry *pol_entry;
+ 	struct mlx5e_priv *priv;
+ 	int err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+index 7d943e93cf6dc0..9aff779c77c898 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+@@ -260,6 +260,7 @@ struct mlx5e_ipsec_limits {
+ struct mlx5e_ipsec_sa_entry {
+ 	struct mlx5e_ipsec_esn_state esn_state;
+ 	struct xfrm_state *x;
++	struct net_device *dev;
+ 	struct mlx5e_ipsec *ipsec;
+ 	struct mlx5_accel_esp_xfrm_attrs attrs;
+ 	void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 218d5402cd1a65..4d766eea32a377 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -2028,9 +2028,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ 	return err;
+ }
+ 
+-static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
++static bool mlx5_flow_has_geneve_opt(struct mlx5_flow_spec *spec)
+ {
+-	struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
+ 	void *headers_v = MLX5_ADDR_OF(fte_match_param,
+ 				       spec->match_value,
+ 				       misc_parameters_3);
+@@ -2069,7 +2068,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
+ 	}
+ 	complete_all(&flow->del_hw_done);
+ 
+-	if (mlx5_flow_has_geneve_opt(flow))
++	if (mlx5_flow_has_geneve_opt(&attr->parse_attr->spec))
+ 		mlx5_geneve_tlv_option_del(priv->mdev->geneve);
+ 
+ 	if (flow->decap_route)
+@@ -2574,12 +2573,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
+ 
+ 		err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
+ 		if (err) {
+-			kvfree(tmp_spec);
+ 			NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
+ 			netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
+-			return err;
++		} else {
++			err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ 		}
+-		err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
++		if (mlx5_flow_has_geneve_opt(tmp_spec))
++			mlx5_geneve_tlv_option_del(priv->mdev->geneve);
+ 		kvfree(tmp_spec);
+ 		if (err)
+ 			return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 7aef30dbd82d6c..6544546a1153f9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1295,12 +1295,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ 		ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
+ 		if (ret)
+ 			goto ecpf_err;
+-		if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+-			ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
+-							     enabled_events);
+-			if (ret)
+-				goto ec_vf_err;
+-		}
++	}
++
++	/* Enable ECVF vports */
++	if (mlx5_core_ec_sriov_enabled(esw->dev)) {
++		ret = mlx5_eswitch_load_ec_vf_vports(esw,
++						     esw->esw_funcs.num_ec_vfs,
++						     enabled_events);
++		if (ret)
++			goto ec_vf_err;
+ 	}
+ 
+ 	/* Enable VF vports */
+@@ -1331,9 +1334,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
+ {
+ 	mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ 
++	if (mlx5_core_ec_sriov_enabled(esw->dev))
++		mlx5_eswitch_unload_ec_vf_vports(esw,
++						 esw->esw_funcs.num_ec_vfs);
++
+ 	if (mlx5_ecpf_vport_exists(esw->dev)) {
+-		if (mlx5_core_ec_sriov_enabled(esw->dev))
+-			mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
+ 		mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 0ce999706d412a..1bc88743d2dfa9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -2200,6 +2200,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
+ 	struct mlx5_flow_handle *rule;
+ 	struct match_list *iter;
+ 	bool take_write = false;
++	bool try_again = false;
+ 	struct fs_fte *fte;
+ 	u64  version = 0;
+ 	int err;
+@@ -2264,6 +2265,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
+ 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ 
+ 		if (!g->node.active) {
++			try_again = true;
+ 			up_write_ref_node(&g->node, false);
+ 			continue;
+ 		}
+@@ -2285,7 +2287,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
+ 			tree_put_node(&fte->node, false);
+ 		return rule;
+ 	}
+-	rule = ERR_PTR(-ENOENT);
++	err = try_again ? -EAGAIN : -ENOENT;
++	rule = ERR_PTR(err);
+ out:
+ 	kmem_cache_free(steering->ftes_cache, fte);
+ 	return rule;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index 972e8e9df585ba..9bc9bd83c2324c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -291,7 +291,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
+ static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
+ {
+ 	struct device *device = mlx5_core_dma_dev(dev);
+-	int nid = dev_to_node(device);
++	int nid = dev->priv.numa_node;
+ 	struct page *page;
+ 	u64 zero_addr = 1;
+ 	u64 addr;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
+index ab5f8f07f1f7e5..72b19b05c0cf4f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
+@@ -558,6 +558,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ 	HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
+ 		    outer_headers.ip_protocol,
+ 		    eth_l3_outer.protocol_next_header);
++	HWS_SET_HDR(fc, match_param, IP_VERSION_O,
++		    outer_headers.ip_version,
++		    eth_l3_outer.ip_version);
+ 	HWS_SET_HDR(fc, match_param, IP_TTL_O,
+ 		    outer_headers.ttl_hoplimit,
+ 		    eth_l3_outer.time_to_live_hop_limit);
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 812ad9d61676a3..9836fbbea0cc2f 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1330,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
+ }
+ 
+ /* PHY */
+-static int lan743x_phy_reset(struct lan743x_adapter *adapter)
++static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter)
+ {
+ 	u32 data;
+ 
+@@ -1346,11 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
+ 				  50000, 1000000);
+ }
+ 
+-static int lan743x_phy_init(struct lan743x_adapter *adapter)
+-{
+-	return lan743x_phy_reset(adapter);
+-}
+-
+ static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
+ {
+ 	u32 id_rev;
+@@ -3505,10 +3500,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = lan743x_phy_init(adapter);
+-	if (ret)
+-		return ret;
+-
+ 	ret = lan743x_ptp_init(adapter);
+ 	if (ret)
+ 		return ret;
+@@ -3642,6 +3633,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
+ 	if (ret)
+ 		goto cleanup_pci;
+ 
++	ret = lan743x_hw_reset_phy(adapter);
++	if (ret)
++		goto cleanup_pci;
++
+ 	ret = lan743x_hardware_init(adapter, pdev);
+ 	if (ret)
+ 		goto cleanup_pci;
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 534d4716d5f7d4..b34e015eedf9b9 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
+ 	lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
+ }
+ 
++static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type)
++{
++	lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE);
++}
++
+ static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
+ {
+ 	lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
+@@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
+ 			return err;
+ 
+ 		lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
++		lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type);
+ 		lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
+ 	}
+ 
+@@ -874,6 +880,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
+ 	lan966x_vlan_port_set_vlan_aware(port, 0);
+ 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ 	lan966x_vlan_port_apply(port);
++	lan966x_vlan_port_rew_host(port);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+index 25cb2f61986f69..8aa39497818fed 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+@@ -75,6 +75,10 @@
+ #define IFH_REW_OP_ONE_STEP_PTP		0x3
+ #define IFH_REW_OP_TWO_STEP_PTP		0x4
+ 
++#define IFH_PDU_TYPE_NONE		0
++#define IFH_PDU_TYPE_IPV4		7
++#define IFH_PDU_TYPE_IPV6		8
++
+ #define FDMA_RX_DCB_MAX_DBS		1
+ #define FDMA_TX_DCB_MAX_DBS		1
+ 
+@@ -254,6 +258,7 @@ struct lan966x_phc {
+ 
+ struct lan966x_skb_cb {
+ 	u8 rew_op;
++	u8 pdu_type;
+ 	u16 ts_id;
+ 	unsigned long jiffies;
+ };
+@@ -492,6 +497,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port);
+ bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
+ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ 				      bool vlan_aware);
++void lan966x_vlan_port_rew_host(struct lan966x_port *port);
+ int lan966x_vlan_port_set_vid(struct lan966x_port *port,
+ 			      u16 vid,
+ 			      bool pvid,
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+index 63905bb5a63a83..87e5e81d40dc68 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+@@ -322,34 +322,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
+ 	*cfg = phc->hwtstamp_config;
+ }
+ 
+-static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
++static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb,
++				 u8 *rew_op, u8 *pdu_type)
+ {
+ 	struct ptp_header *header;
+ 	u8 msgtype;
+ 	int type;
+ 
+-	if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
+-		return IFH_REW_OP_NOOP;
++	if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) {
++		*rew_op = IFH_REW_OP_NOOP;
++		*pdu_type = IFH_PDU_TYPE_NONE;
++		return;
++	}
+ 
+ 	type = ptp_classify_raw(skb);
+-	if (type == PTP_CLASS_NONE)
+-		return IFH_REW_OP_NOOP;
++	if (type == PTP_CLASS_NONE) {
++		*rew_op = IFH_REW_OP_NOOP;
++		*pdu_type = IFH_PDU_TYPE_NONE;
++		return;
++	}
+ 
+ 	header = ptp_parse_header(skb, type);
+-	if (!header)
+-		return IFH_REW_OP_NOOP;
++	if (!header) {
++		*rew_op = IFH_REW_OP_NOOP;
++		*pdu_type = IFH_PDU_TYPE_NONE;
++		return;
++	}
+ 
+-	if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
+-		return IFH_REW_OP_TWO_STEP_PTP;
++	if (type & PTP_CLASS_L2)
++		*pdu_type = IFH_PDU_TYPE_NONE;
++	if (type & PTP_CLASS_IPV4)
++		*pdu_type = IFH_PDU_TYPE_IPV4;
++	if (type & PTP_CLASS_IPV6)
++		*pdu_type = IFH_PDU_TYPE_IPV6;
++
++	if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) {
++		*rew_op = IFH_REW_OP_TWO_STEP_PTP;
++		return;
++	}
+ 
+ 	/* If it is sync and run 1 step then set the correct operation,
+ 	 * otherwise run as 2 step
+ 	 */
+ 	msgtype = ptp_get_msgtype(header, type);
+-	if ((msgtype & 0xf) == 0)
+-		return IFH_REW_OP_ONE_STEP_PTP;
++	if ((msgtype & 0xf) == 0) {
++		*rew_op = IFH_REW_OP_ONE_STEP_PTP;
++		return;
++	}
+ 
+-	return IFH_REW_OP_TWO_STEP_PTP;
++	*rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ }
+ 
+ static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
+@@ -374,10 +395,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ {
+ 	struct lan966x *lan966x = port->lan966x;
+ 	unsigned long flags;
++	u8 pdu_type;
+ 	u8 rew_op;
+ 
+-	rew_op = lan966x_ptp_classify(port, skb);
++	lan966x_ptp_classify(port, skb, &rew_op, &pdu_type);
+ 	LAN966X_SKB_CB(skb)->rew_op = rew_op;
++	LAN966X_SKB_CB(skb)->pdu_type = pdu_type;
+ 
+ 	if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ 		return 0;
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+index 1c88120eb291a2..bcb4db76b75cd5 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+@@ -297,6 +297,7 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
+ 	lan966x_vlan_port_set_vlan_aware(port, false);
+ 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ 	lan966x_vlan_port_apply(port);
++	lan966x_vlan_port_rew_host(port);
+ }
+ 
+ int lan966x_port_changeupper(struct net_device *dev,
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+index fa34a739c748e1..7da22520724ce2 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+@@ -149,6 +149,27 @@ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ 	port->vlan_aware = vlan_aware;
+ }
+ 
++/* When the interface is in host mode, the interface should not be vlan aware
++ * but it should insert all the tags that it gets from the network stack.
++ * The tags are not in the data of the frame but actually in the skb and the ifh
++ * is configured already to get this tag. So what we need to do is to update the
++ * rewriter to insert the vlan tag for all frames which have a vlan tag
++ * different than 0.
++ */
++void lan966x_vlan_port_rew_host(struct lan966x_port *port)
++{
++	struct lan966x *lan966x = port->lan966x;
++	u32 val;
++
++	/* Tag all frames except when VID=0*/
++	val = REW_TAG_CFG_TAG_CFG_SET(2);
++
++	/* Update only some bits in the register */
++	lan_rmw(val,
++		REW_TAG_CFG_TAG_CFG,
++		lan966x, REW_TAG_CFG(port->chip_port));
++}
++
+ void lan966x_vlan_port_apply(struct lan966x_port *port)
+ {
+ 	struct lan966x *lan966x = port->lan966x;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+index c9693f77e1f61f..ac6f2e3a3fcd2f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+@@ -32,6 +32,11 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
+ 	int i, ret = 0;
+ 	u32 ctrl;
+ 
++	if (!ptp_rate) {
++		netdev_warn(priv->dev, "Invalid PTP rate");
++		return -EINVAL;
++	}
++
+ 	ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
+ 	ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
+ 	ret |= est_write(est_addr, EST_TER, cfg->ter, false);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 918d7f2e8ba992..f68e3ece919cc8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -835,6 +835,11 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+ 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ 		return -EOPNOTSUPP;
+ 
++	if (!priv->plat->clk_ptp_rate) {
++		netdev_err(priv->dev, "Invalid PTP clock rate");
++		return -EINVAL;
++	}
++
+ 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
+ 	priv->systime_flags = systime_flags;
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index aaf008bdbbcd46..8fd868b671a261 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -419,6 +419,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ 	struct device_node *np = pdev->dev.of_node;
+ 	struct plat_stmmacenet_data *plat;
+ 	struct stmmac_dma_cfg *dma_cfg;
++	static int bus_id = -ENODEV;
+ 	int phy_mode;
+ 	void *ret;
+ 	int rc;
+@@ -454,8 +455,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ 	of_property_read_u32(np, "max-speed", &plat->max_speed);
+ 
+ 	plat->bus_id = of_alias_get_id(np, "ethernet");
+-	if (plat->bus_id < 0)
+-		plat->bus_id = 0;
++	if (plat->bus_id < 0) {
++		if (bus_id < 0)
++			bus_id = of_alias_get_highest_id("ethernet");
++		/* No ethernet alias found, init at -1 so first bus_id is 0 */
++		if (bus_id < 0)
++			bus_id = -1;
++		plat->bus_id = ++bus_id;
++	}
+ 
+ 	/* Default to phy auto-detection */
+ 	plat->phy_addr = -1;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+index a6b1de9a251dd4..5c85040a1b937e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+@@ -303,7 +303,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
+ 
+ 	/* Calculate the clock domain crossing (CDC) error if necessary */
+ 	priv->plat->cdc_error_adj = 0;
+-	if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
++	if (priv->plat->has_gmac4)
+ 		priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
+ 
+ 	stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
+index 6f0edae38ea242..172ae38381b453 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
+@@ -29,6 +29,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
+ 	spin_lock(&prueth->stats_lock);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
++		/* In MII mode TX lines are swapped inside ICSSG, so read Tx stats
++		 * from slice1 for port0 and slice0 for port1 to get accurate Tx
++		 * stats for a given port
++		 */
++		if (emac->phy_if == PHY_INTERFACE_MODE_MII &&
++		    icssg_all_miig_stats[i].offset >= ICSSG_TX_PACKET_OFFSET &&
++		    icssg_all_miig_stats[i].offset <= ICSSG_TX_BYTE_OFFSET)
++			base = stats_base[slice ^ 1];
+ 		regmap_read(prueth->miig_rt,
+ 			    base + icssg_all_miig_stats[i].offset,
+ 			    &val);
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index fe3438abcd253d..2d47b35443af00 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -843,7 +843,7 @@ static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
+ 	dev_consume_skb_any(skbuf_dma->skb);
+ 	netif_txq_completed_wake(txq, 1, len,
+ 				 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
+-				 2 * MAX_SKB_FRAGS);
++				 2);
+ }
+ 
+ /**
+@@ -877,7 +877,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
+ 
+ 	dma_dev = lp->tx_chan->device;
+ 	sg_len = skb_shinfo(skb)->nr_frags + 1;
+-	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
++	if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
+ 		netif_stop_queue(ndev);
+ 		if (net_ratelimit())
+ 			netdev_warn(ndev, "TX ring unexpectedly full\n");
+@@ -927,7 +927,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
+ 	txq = skb_get_tx_queue(lp->ndev, skb);
+ 	netdev_tx_sent_queue(txq, skb->len);
+ 	netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
+-			     MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
++			     1, 2);
+ 
+ 	dmaengine_submit(dma_tx_desc);
+ 	dma_async_issue_pending(lp->tx_chan);
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index ee215928257387..090a56a5e456ac 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -246,15 +246,39 @@ static sci_t make_sci(const u8 *addr, __be16 port)
+ 	return sci;
+ }
+ 
+-static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
++static sci_t macsec_active_sci(struct macsec_secy *secy)
+ {
+-	sci_t sci;
++	struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc);
++
++	/* Case single RX SC */
++	if (rx_sc && !rcu_dereference_bh(rx_sc->next))
++		return (rx_sc->active) ? rx_sc->sci : 0;
++	/* Case no RX SC or multiple */
++	else
++		return 0;
++}
++
++static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present,
++			      struct macsec_rxh_data *rxd)
++{
++	struct macsec_dev *macsec;
++	sci_t sci = 0;
+ 
+-	if (sci_present)
++	/* SC = 1 */
++	if (sci_present) {
+ 		memcpy(&sci, hdr->secure_channel_id,
+ 		       sizeof(hdr->secure_channel_id));
+-	else
++	/* SC = 0; ES = 0 */
++	} else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) &&
++		   (list_is_singular(&rxd->secys))) {
++		/* Only one SECY should exist on this scenario */
++		macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev,
++						secys);
++		if (macsec)
++			return macsec_active_sci(&macsec->secy);
++	} else {
+ 		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
++	}
+ 
+ 	return sci;
+ }
+@@ -1108,7 +1132,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 	struct macsec_rxh_data *rxd;
+ 	struct macsec_dev *macsec;
+ 	unsigned int len;
+-	sci_t sci;
++	sci_t sci = 0;
+ 	u32 hdr_pn;
+ 	bool cbit;
+ 	struct pcpu_rx_sc_stats *rxsc_stats;
+@@ -1155,11 +1179,14 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 
+ 	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
+ 	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
+-	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
+ 
+ 	rcu_read_lock();
+ 	rxd = macsec_data_rcu(skb->dev);
+ 
++	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd);
++	if (!sci)
++		goto drop_nosc;
++
+ 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
+ 		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
+ 
+@@ -1282,6 +1309,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 	macsec_rxsa_put(rx_sa);
+ drop_nosa:
+ 	macsec_rxsc_put(rx_sc);
++drop_nosc:
+ 	rcu_read_unlock();
+ drop_direct:
+ 	kfree_skb(skb);
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index 1b29d1d794a201..79b898311819d4 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -353,7 +353,8 @@ static int nsim_poll(struct napi_struct *napi, int budget)
+ 	int done;
+ 
+ 	done = nsim_rcv(rq, budget);
+-	napi_complete(napi);
++	if (done < budget)
++		napi_complete_done(napi, done);
+ 
+ 	return done;
+ }
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 7e2f10182c0cf3..591e8fd33d8ea6 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -889,6 +889,9 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+ 
+ 	lockdep_assert_held_once(&bus->mdio_lock);
+ 
++	if (addr >= PHY_MAX_ADDR)
++		return -ENXIO;
++
+ 	if (bus->read)
+ 		retval = bus->read(bus, addr, regnum);
+ 	else
+@@ -918,6 +921,9 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+ 
+ 	lockdep_assert_held_once(&bus->mdio_lock);
+ 
++	if (addr >= PHY_MAX_ADDR)
++		return -ENXIO;
++
+ 	if (bus->write)
+ 		err = bus->write(bus, addr, regnum, val);
+ 	else
+@@ -979,6 +985,9 @@ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
+ 
+ 	lockdep_assert_held_once(&bus->mdio_lock);
+ 
++	if (addr >= PHY_MAX_ADDR)
++		return -ENXIO;
++
+ 	if (bus->read_c45)
+ 		retval = bus->read_c45(bus, addr, devad, regnum);
+ 	else
+@@ -1010,6 +1019,9 @@ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ 
+ 	lockdep_assert_held_once(&bus->mdio_lock);
+ 
++	if (addr >= PHY_MAX_ADDR)
++		return -ENXIO;
++
+ 	if (bus->write_c45)
+ 		err = bus->write_c45(bus, addr, devad, regnum, val);
+ 	else
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index 738a8822fcf014..ce49f3ac6939b6 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -943,7 +943,9 @@ static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
+ 	/* UDP checksum offset in IPv4 packet
+ 	 * according to: https://tools.ietf.org/html/rfc768
+ 	 */
+-	val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
++	val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26);
++	if (enable)
++		val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
+ 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
+ 			     val);
+ 
+@@ -1163,18 +1165,24 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
+ 		container_of(mii_ts, struct vsc8531_private, mii_ts);
+ 
+ 	if (!vsc8531->ptp->configured)
+-		return;
++		goto out;
+ 
+-	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
+-		kfree_skb(skb);
+-		return;
+-	}
++	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
++		goto out;
++
++	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
++		if (ptp_msg_is_sync(skb, type))
++			goto out;
+ 
+ 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ 
+ 	mutex_lock(&vsc8531->ts_lock);
+ 	__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
+ 	mutex_unlock(&vsc8531->ts_lock);
++	return;
++
++out:
++	kfree_skb(skb);
+ }
+ 
+ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 8af44224480f15..13dea33d86ffa5 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -2010,8 +2010,10 @@ void phy_detach(struct phy_device *phydev)
+ 	struct module *ndev_owner = NULL;
+ 	struct mii_bus *bus;
+ 
+-	if (phydev->devlink)
++	if (phydev->devlink) {
+ 		device_link_del(phydev->devlink);
++		phydev->devlink = NULL;
++	}
+ 
+ 	if (phydev->sysfs_links) {
+ 		if (dev)
+diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
+index ff5be2cbf17b90..9201ee10a13f78 100644
+--- a/drivers/net/usb/aqc111.c
++++ b/drivers/net/usb/aqc111.c
+@@ -30,11 +30,14 @@ static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ 	ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
+ 				   USB_RECIP_DEVICE, value, index, data, size);
+ 
+-	if (unlikely(ret < 0))
++	if (unlikely(ret < size)) {
+ 		netdev_warn(dev->net,
+ 			    "Failed to read(0x%x) reg index 0x%04x: %d\n",
+ 			    cmd, index, ret);
+ 
++		ret = ret < 0 ? ret : -ENODATA;
++	}
++
+ 	return ret;
+ }
+ 
+@@ -46,11 +49,14 @@ static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ 	ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
+ 			      USB_RECIP_DEVICE, value, index, data, size);
+ 
+-	if (unlikely(ret < 0))
++	if (unlikely(ret < size)) {
+ 		netdev_warn(dev->net,
+ 			    "Failed to read(0x%x) reg index 0x%04x: %d\n",
+ 			    cmd, index, ret);
+ 
++		ret = ret < 0 ? ret : -ENODATA;
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 151d7cdfc48023..c48c2de6f961f7 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1560,6 +1560,30 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
+ 	return (hlen + (hdr.tcp->doff << 2));
+ }
+ 
++static void
++vmxnet3_lro_tunnel(struct sk_buff *skb, __be16 ip_proto)
++{
++	struct udphdr *uh = NULL;
++
++	if (ip_proto == htons(ETH_P_IP)) {
++		struct iphdr *iph = (struct iphdr *)skb->data;
++
++		if (iph->protocol == IPPROTO_UDP)
++			uh = (struct udphdr *)(iph + 1);
++	} else {
++		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
++
++		if (iph->nexthdr == IPPROTO_UDP)
++			uh = (struct udphdr *)(iph + 1);
++	}
++	if (uh) {
++		if (uh->check)
++			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
++		else
++			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
++	}
++}
++
+ static int
+ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ 		       struct vmxnet3_adapter *adapter, int quota)
+@@ -1873,6 +1897,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ 			if (segCnt != 0 && mss != 0) {
+ 				skb_shinfo(skb)->gso_type = rcd->v4 ?
+ 					SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
++				if (encap_lro)
++					vmxnet3_lro_tunnel(skb, skb->protocol);
+ 				skb_shinfo(skb)->gso_size = mss;
+ 				skb_shinfo(skb)->gso_segs = segCnt;
+ 			} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
+diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
+index 45e9b908dbfb05..acb9ce7a626afd 100644
+--- a/drivers/net/wireguard/device.c
++++ b/drivers/net/wireguard/device.c
+@@ -364,6 +364,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
+ 	if (ret < 0)
+ 		goto err_free_handshake_queue;
+ 
++	dev_set_threaded(dev, true);
+ 	ret = register_netdevice(dev);
+ 	if (ret < 0)
+ 		goto err_uninit_ratelimiter;
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index 0fe47d51013c7c..59f7ccb33fde3e 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -937,7 +937,9 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
+ 
+ 	dev_set_threaded(ar->napi_dev, true);
+ 	ath10k_core_napi_enable(ar);
+-	ath10k_snoc_irq_enable(ar);
++	/* IRQs are left enabled when we restart due to a firmware crash */
++	if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
++		ath10k_snoc_irq_enable(ar);
+ 	ath10k_snoc_rx_post(ar);
+ 
+ 	clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index 7eba6ee054ffef..8002fb32a2cc10 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -855,6 +855,7 @@ void ath11k_fw_stats_init(struct ath11k *ar)
+ 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
+ 
+ 	init_completion(&ar->fw_stats_complete);
++	init_completion(&ar->fw_stats_done);
+ }
+ 
+ void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
+@@ -1811,6 +1812,20 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
+ {
+ 	int ret;
+ 
++	switch (ath11k_crypto_mode) {
++	case ATH11K_CRYPT_MODE_SW:
++		set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
++		set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
++		break;
++	case ATH11K_CRYPT_MODE_HW:
++		clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
++		clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
++		break;
++	default:
++		ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
++		return -EINVAL;
++	}
++
+ 	ret = ath11k_core_start_firmware(ab, ab->fw_mode);
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to start firmware: %d\n", ret);
+@@ -1829,20 +1844,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
+ 		goto err_firmware_stop;
+ 	}
+ 
+-	switch (ath11k_crypto_mode) {
+-	case ATH11K_CRYPT_MODE_SW:
+-		set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+-		set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+-		break;
+-	case ATH11K_CRYPT_MODE_HW:
+-		clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+-		clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+-		break;
+-	default:
+-		ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+-		return -EINVAL;
+-	}
+-
+ 	if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
+ 		set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ 
+@@ -1915,6 +1916,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
+ void ath11k_core_halt(struct ath11k *ar)
+ {
+ 	struct ath11k_base *ab = ar->ab;
++	struct list_head *pos, *n;
+ 
+ 	lockdep_assert_held(&ar->conf_mutex);
+ 
+@@ -1929,7 +1931,12 @@ void ath11k_core_halt(struct ath11k *ar)
+ 
+ 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ 	synchronize_rcu();
+-	INIT_LIST_HEAD(&ar->arvifs);
++
++	spin_lock_bh(&ar->data_lock);
++	list_for_each_safe(pos, n, &ar->arvifs)
++		list_del_init(pos);
++	spin_unlock_bh(&ar->data_lock);
++
+ 	idr_init(&ar->txmgmt_idr);
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
+index 09c37e19a16802..fcdec14eb3cfa9 100644
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -599,6 +599,8 @@ struct ath11k_fw_stats {
+ 	struct list_head pdevs;
+ 	struct list_head vdevs;
+ 	struct list_head bcn;
++	u32 num_vdev_recvd;
++	u32 num_bcn_recvd;
+ };
+ 
+ struct ath11k_dbg_htt_stats {
+@@ -780,7 +782,7 @@ struct ath11k {
+ 	u8 alpha2[REG_ALPHA2_LEN + 1];
+ 	struct ath11k_fw_stats fw_stats;
+ 	struct completion fw_stats_complete;
+-	bool fw_stats_done;
++	struct completion fw_stats_done;
+ 
+ 	/* protected by conf_mutex */
+ 	bool ps_state_enable;
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
+index 57281a135dd7fa..5d46f8e4c231fb 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs.c
++++ b/drivers/net/wireless/ath/ath11k/debugfs.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/vmalloc.h>
+@@ -93,57 +93,14 @@ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ 	spin_unlock_bh(&dbr_data->lock);
+ }
+ 
+-static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
+-{
+-	spin_lock_bh(&ar->data_lock);
+-	ar->fw_stats_done = false;
+-	ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+-	ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+-	spin_unlock_bh(&ar->data_lock);
+-}
+-
+ void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
+ {
+ 	struct ath11k_base *ab = ar->ab;
+-	struct ath11k_pdev *pdev;
+-	bool is_end;
+-	static unsigned int num_vdev, num_bcn;
+-	size_t total_vdevs_started = 0;
+-	int i;
+-
+-	/* WMI_REQUEST_PDEV_STAT request has been already processed */
+-
+-	if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+-		ar->fw_stats_done = true;
+-		return;
+-	}
+-
+-	if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+-		if (list_empty(&stats->vdevs)) {
+-			ath11k_warn(ab, "empty vdev stats");
+-			return;
+-		}
+-		/* FW sends all the active VDEV stats irrespective of PDEV,
+-		 * hence limit until the count of all VDEVs started
+-		 */
+-		for (i = 0; i < ab->num_radios; i++) {
+-			pdev = rcu_dereference(ab->pdevs_active[i]);
+-			if (pdev && pdev->ar)
+-				total_vdevs_started += ar->num_started_vdevs;
+-		}
+-
+-		is_end = ((++num_vdev) == total_vdevs_started);
+-
+-		list_splice_tail_init(&stats->vdevs,
+-				      &ar->fw_stats.vdevs);
+-
+-		if (is_end) {
+-			ar->fw_stats_done = true;
+-			num_vdev = 0;
+-		}
+-		return;
+-	}
++	bool is_end = true;
+ 
++	/* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_RSSI_PER_CHAIN_STAT and
++	 * WMI_REQUEST_VDEV_STAT requests have been already processed.
++	 */
+ 	if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ 		if (list_empty(&stats->bcn)) {
+ 			ath11k_warn(ab, "empty bcn stats");
+@@ -152,97 +109,18 @@ void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *
+ 		/* Mark end until we reached the count of all started VDEVs
+ 		 * within the PDEV
+ 		 */
+-		is_end = ((++num_bcn) == ar->num_started_vdevs);
++		if (ar->num_started_vdevs)
++			is_end = ((++ar->fw_stats.num_bcn_recvd) ==
++				  ar->num_started_vdevs);
+ 
+ 		list_splice_tail_init(&stats->bcn,
+ 				      &ar->fw_stats.bcn);
+ 
+-		if (is_end) {
+-			ar->fw_stats_done = true;
+-			num_bcn = 0;
+-		}
++		if (is_end)
++			complete(&ar->fw_stats_done);
+ 	}
+ }
+ 
+-static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
+-					   struct stats_request_params *req_param)
+-{
+-	struct ath11k_base *ab = ar->ab;
+-	unsigned long timeout, time_left;
+-	int ret;
+-
+-	lockdep_assert_held(&ar->conf_mutex);
+-
+-	/* FW stats can get split when exceeding the stats data buffer limit.
+-	 * In that case, since there is no end marking for the back-to-back
+-	 * received 'update stats' event, we keep a 3 seconds timeout in case,
+-	 * fw_stats_done is not marked yet
+-	 */
+-	timeout = jiffies + msecs_to_jiffies(3 * 1000);
+-
+-	ath11k_debugfs_fw_stats_reset(ar);
+-
+-	reinit_completion(&ar->fw_stats_complete);
+-
+-	ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+-
+-	if (ret) {
+-		ath11k_warn(ab, "could not request fw stats (%d)\n",
+-			    ret);
+-		return ret;
+-	}
+-
+-	time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+-
+-	if (!time_left)
+-		return -ETIMEDOUT;
+-
+-	for (;;) {
+-		if (time_after(jiffies, timeout))
+-			break;
+-
+-		spin_lock_bh(&ar->data_lock);
+-		if (ar->fw_stats_done) {
+-			spin_unlock_bh(&ar->data_lock);
+-			break;
+-		}
+-		spin_unlock_bh(&ar->data_lock);
+-	}
+-	return 0;
+-}
+-
+-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+-				u32 vdev_id, u32 stats_id)
+-{
+-	struct ath11k_base *ab = ar->ab;
+-	struct stats_request_params req_param;
+-	int ret;
+-
+-	mutex_lock(&ar->conf_mutex);
+-
+-	if (ar->state != ATH11K_STATE_ON) {
+-		ret = -ENETDOWN;
+-		goto err_unlock;
+-	}
+-
+-	req_param.pdev_id = pdev_id;
+-	req_param.vdev_id = vdev_id;
+-	req_param.stats_id = stats_id;
+-
+-	ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+-	if (ret)
+-		ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
+-
+-	ath11k_dbg(ab, ATH11K_DBG_WMI,
+-		   "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+-		   pdev_id, vdev_id, stats_id);
+-
+-err_unlock:
+-	mutex_unlock(&ar->conf_mutex);
+-
+-	return ret;
+-}
+-
+ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+ {
+ 	struct ath11k *ar = inode->i_private;
+@@ -268,7 +146,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+ 	req_param.vdev_id = 0;
+ 	req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+ 
+-	ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
++	ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ 	if (ret) {
+ 		ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ 		goto err_free;
+@@ -339,7 +217,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
+ 	req_param.vdev_id = 0;
+ 	req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+ 
+-	ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
++	ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ 	if (ret) {
+ 		ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ 		goto err_free;
+@@ -415,7 +293,7 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
+ 			continue;
+ 
+ 		req_param.vdev_id = arvif->vdev_id;
+-		ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
++		ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ 		if (ret) {
+ 			ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ 			goto err_free;
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
+index a39e458637b013..ed7fec177588f6 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs.h
++++ b/drivers/net/wireless/ath/ath11k/debugfs.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2022, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #ifndef _ATH11K_DEBUGFS_H_
+@@ -273,8 +273,6 @@ void ath11k_debugfs_unregister(struct ath11k *ar);
+ void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
+ 
+ void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
+-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+-				u32 vdev_id, u32 stats_id);
+ 
+ static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+ {
+@@ -381,12 +379,6 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+ 	return 0;
+ }
+ 
+-static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
+-					      u32 pdev_id, u32 vdev_id, u32 stats_id)
+-{
+-	return 0;
+-}
+-
+ static inline void
+ ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ 				enum wmi_direct_buffer_module id,
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index f8068d2e848c33..7ead581f5bfd1d 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -8938,6 +8938,86 @@ static void ath11k_mac_put_chain_rssi(struct station_info *sinfo,
+ 	}
+ }
+ 
++static void ath11k_mac_fw_stats_reset(struct ath11k *ar)
++{
++	spin_lock_bh(&ar->data_lock);
++	ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
++	ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
++	ar->fw_stats.num_vdev_recvd = 0;
++	ar->fw_stats.num_bcn_recvd = 0;
++	spin_unlock_bh(&ar->data_lock);
++}
++
++int ath11k_mac_fw_stats_request(struct ath11k *ar,
++				struct stats_request_params *req_param)
++{
++	struct ath11k_base *ab = ar->ab;
++	unsigned long time_left;
++	int ret;
++
++	lockdep_assert_held(&ar->conf_mutex);
++
++	ath11k_mac_fw_stats_reset(ar);
++
++	reinit_completion(&ar->fw_stats_complete);
++	reinit_completion(&ar->fw_stats_done);
++
++	ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
++
++	if (ret) {
++		ath11k_warn(ab, "could not request fw stats (%d)\n",
++			    ret);
++		return ret;
++	}
++
++	time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
++	if (!time_left)
++		return -ETIMEDOUT;
++
++	/* FW stats can get split when exceeding the stats data buffer limit.
++	 * In that case, since there is no end marking for the back-to-back
++	 * received 'update stats' event, we keep a 3 seconds timeout in case,
++	 * fw_stats_done is not marked yet
++	 */
++	time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
++	if (!time_left)
++		return -ETIMEDOUT;
++
++	return 0;
++}
++
++static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id,
++				   u32 vdev_id, u32 stats_id)
++{
++	struct ath11k_base *ab = ar->ab;
++	struct stats_request_params req_param;
++	int ret;
++
++	mutex_lock(&ar->conf_mutex);
++
++	if (ar->state != ATH11K_STATE_ON) {
++		ret = -ENETDOWN;
++		goto err_unlock;
++	}
++
++	req_param.pdev_id = pdev_id;
++	req_param.vdev_id = vdev_id;
++	req_param.stats_id = stats_id;
++
++	ret = ath11k_mac_fw_stats_request(ar, &req_param);
++	if (ret)
++		ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
++
++	ath11k_dbg(ab, ATH11K_DBG_WMI,
++		   "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
++		   pdev_id, vdev_id, stats_id);
++
++err_unlock:
++	mutex_unlock(&ar->conf_mutex);
++
++	return ret;
++}
++
+ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ 					 struct ieee80211_vif *vif,
+ 					 struct ieee80211_sta *sta,
+@@ -8975,8 +9055,8 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
+ 	    arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ 	    ar->ab->hw_params.supports_rssi_stats &&
+-	    !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+-					 WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
++	    !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
++				     WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
+ 		ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
+ 	}
+ 
+@@ -8984,8 +9064,8 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ 	if (!signal &&
+ 	    arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ 	    ar->ab->hw_params.supports_rssi_stats &&
+-	    !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+-					WMI_REQUEST_VDEV_STAT)))
++	    !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
++				      WMI_REQUEST_VDEV_STAT)))
+ 		signal = arsta->rssi_beacon;
+ 
+ 	ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+@@ -9331,11 +9411,13 @@ static int ath11k_fw_stats_request(struct ath11k *ar,
+ 	lockdep_assert_held(&ar->conf_mutex);
+ 
+ 	spin_lock_bh(&ar->data_lock);
+-	ar->fw_stats_done = false;
+ 	ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
++	ar->fw_stats.num_vdev_recvd = 0;
++	ar->fw_stats.num_bcn_recvd = 0;
+ 	spin_unlock_bh(&ar->data_lock);
+ 
+ 	reinit_completion(&ar->fw_stats_complete);
++	reinit_completion(&ar->fw_stats_done);
+ 
+ 	ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+ 	if (ret) {
+diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
+index f5800fbecff89e..5e61eea1bb0378 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.h
++++ b/drivers/net/wireless/ath/ath11k/mac.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #ifndef ATH11K_MAC_H
+@@ -179,4 +179,6 @@ int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ 				  struct ieee80211_vif *vif,
+ 				  struct ieee80211_chanctx_conf *ctx);
++int ath11k_mac_fw_stats_request(struct ath11k *ar,
++				struct stats_request_params *req_param);
+ #endif
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index 87abfa54752953..5f7edf622de7a8 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -8157,6 +8157,11 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
+ static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
+ {
+ 	struct ath11k_fw_stats stats = {};
++	size_t total_vdevs_started = 0;
++	struct ath11k_pdev *pdev;
++	bool is_end = true;
++	int i;
++
+ 	struct ath11k *ar;
+ 	int ret;
+ 
+@@ -8183,18 +8188,50 @@ static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *sk
+ 
+ 	spin_lock_bh(&ar->data_lock);
+ 
+-	/* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
++	/* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and
++	 * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via
+ 	 * debugfs fw stats. Therefore, processing it separately.
+ 	 */
+ 	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ 		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+-		ar->fw_stats_done = true;
++		complete(&ar->fw_stats_done);
++		goto complete;
++	}
++
++	if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
++		complete(&ar->fw_stats_done);
++		goto complete;
++	}
++
++	if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
++		if (list_empty(&stats.vdevs)) {
++			ath11k_warn(ab, "empty vdev stats");
++			goto complete;
++		}
++		/* FW sends all the active VDEV stats irrespective of PDEV,
++		 * hence limit until the count of all VDEVs started
++		 */
++		for (i = 0; i < ab->num_radios; i++) {
++			pdev = rcu_dereference(ab->pdevs_active[i]);
++			if (pdev && pdev->ar)
++				total_vdevs_started += ar->num_started_vdevs;
++		}
++
++		if (total_vdevs_started)
++			is_end = ((++ar->fw_stats.num_vdev_recvd) ==
++				  total_vdevs_started);
++
++		list_splice_tail_init(&stats.vdevs,
++				      &ar->fw_stats.vdevs);
++
++		if (is_end)
++			complete(&ar->fw_stats_done);
++
+ 		goto complete;
+ 	}
+ 
+-	/* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
+-	 * are currently requested only via debugfs fw stats. Hence, processing these
+-	 * in debugfs context
++	/* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats.
++	 * Hence, processing it in debugfs context
+ 	 */
+ 	ath11k_debugfs_fw_stats_process(ar, &stats);
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
+index 8bb8ee98188bfc..c3c76e26806291 100644
+--- a/drivers/net/wireless/ath/ath12k/core.c
++++ b/drivers/net/wireless/ath/ath12k/core.c
+@@ -1004,6 +1004,7 @@ static void ath12k_rfkill_work(struct work_struct *work)
+ 
+ void ath12k_core_halt(struct ath12k *ar)
+ {
++	struct list_head *pos, *n;
+ 	struct ath12k_base *ab = ar->ab;
+ 
+ 	lockdep_assert_held(&ar->conf_mutex);
+@@ -1019,7 +1020,12 @@ void ath12k_core_halt(struct ath12k *ar)
+ 
+ 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ 	synchronize_rcu();
+-	INIT_LIST_HEAD(&ar->arvifs);
++
++	spin_lock_bh(&ar->data_lock);
++	list_for_each_safe(pos, n, &ar->arvifs)
++		list_del_init(pos);
++	spin_unlock_bh(&ar->data_lock);
++
+ 	idr_init(&ar->txmgmt_idr);
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+index f1b7e74aefe426..6f2e7ecc66af71 100644
+--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
++++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+@@ -1646,6 +1646,9 @@ static ssize_t ath12k_write_htt_stats_type(struct file *file,
+ 	const int size = 32;
+ 	int num_args;
+ 
++	if (count > size)
++		return -EINVAL;
++
+ 	char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+ 	if (!buf)
+ 		return -ENOMEM;
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index 4cbba96121a114..1623298ba2c47e 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -228,12 +228,6 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
+ 	ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+ }
+ 
+-static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
+-						struct hal_rx_desc *desc)
+-{
+-	return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
+-}
+-
+ static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
+ 						struct hal_rx_desc *desc)
+ {
+@@ -1768,6 +1762,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
+ 	struct hal_rx_desc *ldesc;
+ 	int space_extra, rem_len, buf_len;
+ 	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
++	bool is_continuation;
+ 
+ 	/* As the msdu is spread across multiple rx buffers,
+ 	 * find the offset to the start of msdu for computing
+@@ -1816,7 +1811,8 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
+ 	rem_len = msdu_len - buf_first_len;
+ 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ 		rxcb = ATH12K_SKB_RXCB(skb);
+-		if (rxcb->is_continuation)
++		is_continuation = rxcb->is_continuation;
++		if (is_continuation)
+ 			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
+ 		else
+ 			buf_len = rem_len;
+@@ -1834,7 +1830,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
+ 		dev_kfree_skb_any(skb);
+ 
+ 		rem_len -= buf_len;
+-		if (!rxcb->is_continuation)
++		if (!is_continuation)
+ 			break;
+ 	}
+ 
+@@ -2067,10 +2063,13 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
+ 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ 	struct ath12k_base *ab = ar->ab;
+ 	size_t hdr_len, crypto_len;
+-	struct ieee80211_hdr *hdr;
+-	u16 qos_ctl;
+-	__le16 fc;
+-	u8 *crypto_hdr;
++	struct ieee80211_hdr hdr;
++	__le16 qos_ctl;
++	u8 *crypto_hdr, mesh_ctrl;
++
++	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
++	hdr_len = ieee80211_hdrlen(hdr.frame_control);
++	mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
+ 
+ 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+@@ -2078,27 +2077,21 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
+ 		ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
+ 	}
+ 
+-	fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
+-	hdr_len = ieee80211_hdrlen(fc);
+ 	skb_push(msdu, hdr_len);
+-	hdr = (struct ieee80211_hdr *)msdu->data;
+-	hdr->frame_control = fc;
+-
+-	/* Get wifi header from rx_desc */
+-	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
++	memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
+ 
+ 	if (rxcb->is_mcbc)
+ 		status->flag &= ~RX_FLAG_PN_VALIDATED;
+ 
+ 	/* Add QOS header */
+-	if (ieee80211_is_data_qos(hdr->frame_control)) {
+-		qos_ctl = rxcb->tid;
+-		if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
+-			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
++	if (ieee80211_is_data_qos(hdr.frame_control)) {
++		struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
+ 
+-		/* TODO: Add other QoS ctl fields when required */
+-		memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
+-		       &qos_ctl, IEEE80211_QOS_CTL_LEN);
++		qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
++		if (mesh_ctrl)
++			qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
++
++		memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
+ 	}
+ }
+ 
+@@ -3693,6 +3686,15 @@ static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+ 
+ 	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
+ 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
++
++	if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
++		ath12k_dbg(ab, ATH12K_DBG_DATA,
++			   "invalid msdu len in tkip mic err %u\n", msdu_len);
++		ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
++				sizeof(*desc));
++		return true;
++	}
++
+ 	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ 	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
+index 201ffdb8c44ae9..734e3da4cbf191 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
+@@ -566,6 +566,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
++	case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
+ 		ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+ 		break;
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
+index ca04bfae8bdccc..bfa404997710e9 100644
+--- a/drivers/net/wireless/ath/ath12k/hal.c
++++ b/drivers/net/wireless/ath/ath12k/hal.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ #include <linux/dma-mapping.h>
+ #include "hal_tx.h"
+@@ -511,11 +511,6 @@ static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ 	crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
+ }
+ 
+-static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+-{
+-	return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl);
+-}
+-
+ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
+ {
+ 	struct ath12k_hal *hal = &ab->hal;
+@@ -552,9 +547,9 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
+ 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
+ 
+ 	s = &hal->srng_config[HAL_TCL_DATA];
+-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
++	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
+ 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+-	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
++	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
+ 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+ 
+ 	s = &hal->srng_config[HAL_TCL_CMD];
+@@ -566,29 +561,29 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
+ 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+ 
+ 	s = &hal->srng_config[HAL_CE_SRC];
+-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
+-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
+-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
++	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
++	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
++	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
++	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ 
+ 	s = &hal->srng_config[HAL_CE_DST];
+-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
+-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
+-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
++	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
++	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
++	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
++	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ 
+ 	s = &hal->srng_config[HAL_CE_DST_STATUS];
+-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
++	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
+ 		HAL_CE_DST_STATUS_RING_BASE_LSB;
+-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
+-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
++	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
++	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
++	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ 
+ 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+@@ -736,7 +731,6 @@ const struct hal_rx_ops hal_rx_qcn9274_ops = {
+ 	.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc,
+ 	.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
+ 	.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
+-	.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
+ 	.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
+ 	.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
+ 	.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
+@@ -975,11 +969,6 @@ ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ 		HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+ }
+ 
+-static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+-{
+-	return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
+-}
+-
+ static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+ {
+ 	return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+@@ -1080,8 +1069,6 @@ const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
+ 	.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
+ 	.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
+ 	.rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
+-	.rx_desc_get_mpdu_frame_ctl =
+-		ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
+ 	.dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
+ 	.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
+ 	.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
+@@ -1330,11 +1317,6 @@ static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ 	crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
+ }
+ 
+-static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+-{
+-	return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl);
+-}
+-
+ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
+ {
+ 	struct ath12k_hal *hal = &ab->hal;
+@@ -1371,9 +1353,9 @@ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
+ 
+ 	s = &hal->srng_config[HAL_TCL_DATA];
+ 	s->max_rings = 5;
+-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
++	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
+ 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+-	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
++	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
+ 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+ 
+ 	s = &hal->srng_config[HAL_TCL_CMD];
+@@ -1386,31 +1368,31 @@ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
+ 
+ 	s = &hal->srng_config[HAL_CE_SRC];
+ 	s->max_rings = 12;
+-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
+-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
+-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
++	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
++	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
++	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
++	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ 
+ 	s = &hal->srng_config[HAL_CE_DST];
+ 	s->max_rings = 12;
+-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
+-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
+-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
++	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
++	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
++	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
++	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ 
+ 	s = &hal->srng_config[HAL_CE_DST_STATUS];
+ 	s->max_rings = 12;
+-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
++	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
+ 		HAL_CE_DST_STATUS_RING_BASE_LSB;
+-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
+-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
++	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
++	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
++	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
++		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ 
+ 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+@@ -1555,7 +1537,6 @@ const struct hal_rx_ops hal_rx_wcn7850_ops = {
+ 	.rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc,
+ 	.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
+ 	.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
+-	.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
+ 	.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
+ 	.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
+ 	.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
+@@ -1756,7 +1737,7 @@ static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
+ 			      HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
+ 	      u32_encode_bits((srng->entry_size * srng->num_entries),
+ 			      HAL_TCL1_RING_BASE_MSB_RING_SIZE);
+-	ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
++	ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
+ 
+ 	val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
+ 	ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
+diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
+index 8a78bb9a10bc15..fb7ec6fce07d3d 100644
+--- a/drivers/net/wireless/ath/ath12k/hal.h
++++ b/drivers/net/wireless/ath/ath12k/hal.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #ifndef ATH12K_HAL_H
+@@ -44,10 +44,14 @@ struct ath12k_base;
+ #define HAL_SEQ_WCSS_UMAC_OFFSET		0x00a00000
+ #define HAL_SEQ_WCSS_UMAC_REO_REG		0x00a38000
+ #define HAL_SEQ_WCSS_UMAC_TCL_REG		0x00a44000
+-#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG		0x01b80000
+-#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG		0x01b81000
+-#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG		0x01b82000
+-#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG		0x01b83000
++#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \
++	((ab)->hw_params->regs->hal_umac_ce0_src_reg_base)
++#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \
++	((ab)->hw_params->regs->hal_umac_ce0_dest_reg_base)
++#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \
++	((ab)->hw_params->regs->hal_umac_ce1_src_reg_base)
++#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \
++	((ab)->hw_params->regs->hal_umac_ce1_dest_reg_base)
+ #define HAL_SEQ_WCSS_UMAC_WBM_REG		0x00a34000
+ 
+ #define HAL_CE_WFSS_CE_REG_BASE			0x01b80000
+@@ -57,8 +61,10 @@ struct ath12k_base;
+ /* SW2TCL(x) R0 ring configuration address */
+ #define HAL_TCL1_RING_CMN_CTRL_REG		0x00000020
+ #define HAL_TCL1_RING_DSCP_TID_MAP		0x00000240
+-#define HAL_TCL1_RING_BASE_LSB			0x00000900
+-#define HAL_TCL1_RING_BASE_MSB			0x00000904
++#define HAL_TCL1_RING_BASE_LSB(ab) \
++	((ab)->hw_params->regs->hal_tcl1_ring_base_lsb)
++#define HAL_TCL1_RING_BASE_MSB(ab) \
++	((ab)->hw_params->regs->hal_tcl1_ring_base_msb)
+ #define HAL_TCL1_RING_ID(ab)			((ab)->hw_params->regs->hal_tcl1_ring_id)
+ #define HAL_TCL1_RING_MISC(ab) \
+ 	((ab)->hw_params->regs->hal_tcl1_ring_misc)
+@@ -76,30 +82,31 @@ struct ath12k_base;
+ 	((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb)
+ #define HAL_TCL1_RING_MSI1_DATA(ab) \
+ 	((ab)->hw_params->regs->hal_tcl1_ring_msi1_data)
+-#define HAL_TCL2_RING_BASE_LSB			0x00000978
++#define HAL_TCL2_RING_BASE_LSB(ab) \
++	((ab)->hw_params->regs->hal_tcl2_ring_base_lsb)
+ #define HAL_TCL_RING_BASE_LSB(ab) \
+ 	((ab)->hw_params->regs->hal_tcl_ring_base_lsb)
+ 
+-#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab)				\
+-	(HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab)				\
+-	(HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab)				\
+-	(HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_BASE_MSB_OFFSET				\
+-	(HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_ID_OFFSET(ab)				\
+-	(HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab)			\
+-	(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
+-		(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
+-		(HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
+-		(HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
+-#define HAL_TCL1_RING_MISC_OFFSET(ab) \
+-		(HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB)
++#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_MSI1_BASE_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab)	({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_MSI1_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_MSI1_DATA(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_ID_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_ID(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_TP_ADDR_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_TP_ADDR_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
++#define HAL_TCL1_RING_MISC_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
++	(HAL_TCL1_RING_MISC(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+ 
+ /* SW2TCL(x) R2 ring pointers (head/tail) address */
+ #define HAL_TCL1_RING_HP			0x00002000
+@@ -1068,7 +1075,6 @@ struct hal_rx_ops {
+ 	bool (*rx_desc_is_da_mcbc)(struct hal_rx_desc *desc);
+ 	void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
+ 				      struct ieee80211_hdr *hdr);
+-	u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc);
+ 	void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
+ 					  u8 *crypto_hdr,
+ 					  enum hal_encrypt_type enctype);
+diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
+index 4f745cfd7d8e7e..c68998e9667c93 100644
+--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
++++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ #include "core.h"
+ 
+@@ -1296,6 +1296,7 @@ enum hal_wbm_htt_tx_comp_status {
+ 	HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ 	HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ 	HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
++	HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH,
+ 	HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
+ };
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
+index ec1bda95e555dd..e3eb22bb9e1cb9 100644
+--- a/drivers/net/wireless/ath/ath12k/hw.c
++++ b/drivers/net/wireless/ath/ath12k/hw.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/types.h>
+@@ -615,6 +615,9 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = {
+ 	.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ 	.hal_tcl1_ring_msi1_data = 0x00000950,
+ 	.hal_tcl_ring_base_lsb = 0x00000b58,
++	.hal_tcl1_ring_base_lsb = 0x00000900,
++	.hal_tcl1_ring_base_msb = 0x00000904,
++	.hal_tcl2_ring_base_lsb = 0x00000978,
+ 
+ 	/* TCL STATUS ring address */
+ 	.hal_tcl_status_ring_base_lsb = 0x00000d38,
+@@ -677,6 +680,14 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = {
+ 
+ 	/* REO status ring address */
+ 	.hal_reo_status_ring_base = 0x00000a84,
++
++	/* CE base address */
++	.hal_umac_ce0_src_reg_base = 0x01b80000,
++	.hal_umac_ce0_dest_reg_base = 0x01b81000,
++	.hal_umac_ce1_src_reg_base = 0x01b82000,
++	.hal_umac_ce1_dest_reg_base = 0x01b83000,
++
++	.gcc_gcc_pcie_hot_rst = 0x1e38338,
+ };
+ 
+ static const struct ath12k_hw_regs qcn9274_v2_regs = {
+@@ -691,6 +702,9 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
+ 	.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ 	.hal_tcl1_ring_msi1_data = 0x00000950,
+ 	.hal_tcl_ring_base_lsb = 0x00000b58,
++	.hal_tcl1_ring_base_lsb = 0x00000900,
++	.hal_tcl1_ring_base_msb = 0x00000904,
++	.hal_tcl2_ring_base_lsb = 0x00000978,
+ 
+ 	/* TCL STATUS ring address */
+ 	.hal_tcl_status_ring_base_lsb = 0x00000d38,
+@@ -757,6 +771,14 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
+ 
+ 	/* REO status ring address */
+ 	.hal_reo_status_ring_base = 0x00000aa0,
++
++	/* CE base address */
++	.hal_umac_ce0_src_reg_base = 0x01b80000,
++	.hal_umac_ce0_dest_reg_base = 0x01b81000,
++	.hal_umac_ce1_src_reg_base = 0x01b82000,
++	.hal_umac_ce1_dest_reg_base = 0x01b83000,
++
++	.gcc_gcc_pcie_hot_rst = 0x1e38338,
+ };
+ 
+ static const struct ath12k_hw_regs wcn7850_regs = {
+@@ -771,6 +793,9 @@ static const struct ath12k_hw_regs wcn7850_regs = {
+ 	.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ 	.hal_tcl1_ring_msi1_data = 0x00000950,
+ 	.hal_tcl_ring_base_lsb = 0x00000b58,
++	.hal_tcl1_ring_base_lsb = 0x00000900,
++	.hal_tcl1_ring_base_msb = 0x00000904,
++	.hal_tcl2_ring_base_lsb = 0x00000978,
+ 
+ 	/* TCL STATUS ring address */
+ 	.hal_tcl_status_ring_base_lsb = 0x00000d38,
+@@ -833,6 +858,14 @@ static const struct ath12k_hw_regs wcn7850_regs = {
+ 
+ 	/* REO status ring address */
+ 	.hal_reo_status_ring_base = 0x00000a84,
++
++	/* CE base address */
++	.hal_umac_ce0_src_reg_base = 0x01b80000,
++	.hal_umac_ce0_dest_reg_base = 0x01b81000,
++	.hal_umac_ce1_src_reg_base = 0x01b82000,
++	.hal_umac_ce1_dest_reg_base = 0x01b83000,
++
++	.gcc_gcc_pcie_hot_rst = 0x1e40304,
+ };
+ 
+ static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
+diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
+index 8d52182e28aef4..862b11325a9021 100644
+--- a/drivers/net/wireless/ath/ath12k/hw.h
++++ b/drivers/net/wireless/ath/ath12k/hw.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #ifndef ATH12K_HW_H
+@@ -293,6 +293,9 @@ struct ath12k_hw_regs {
+ 	u32 hal_tcl1_ring_msi1_base_msb;
+ 	u32 hal_tcl1_ring_msi1_data;
+ 	u32 hal_tcl_ring_base_lsb;
++	u32 hal_tcl1_ring_base_lsb;
++	u32 hal_tcl1_ring_base_msb;
++	u32 hal_tcl2_ring_base_lsb;
+ 
+ 	u32 hal_tcl_status_ring_base_lsb;
+ 
+@@ -316,6 +319,11 @@ struct ath12k_hw_regs {
+ 	u32 pcie_qserdes_sysclk_en_sel;
+ 	u32 pcie_pcs_osc_dtct_config_base;
+ 
++	u32 hal_umac_ce0_src_reg_base;
++	u32 hal_umac_ce0_dest_reg_base;
++	u32 hal_umac_ce1_src_reg_base;
++	u32 hal_umac_ce1_dest_reg_base;
++
+ 	u32 hal_ppe_rel_ring_base;
+ 
+ 	u32 hal_reo2_ring_base;
+@@ -347,6 +355,8 @@ struct ath12k_hw_regs {
+ 	u32 hal_reo_cmd_ring_base;
+ 
+ 	u32 hal_reo_status_ring_base;
++
++	u32 gcc_gcc_pcie_hot_rst;
+ };
+ 
+ static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
+diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
+index 45d537066345a2..26f4b440c26d27 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.c
++++ b/drivers/net/wireless/ath/ath12k/pci.c
+@@ -290,10 +290,10 @@ static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
+ 
+ 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
+ 
+-	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
++	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
+ 	val |= GCC_GCC_PCIE_HOT_RST_VAL;
+-	ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
+-	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
++	ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST(ab), val);
++	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
+ 
+ 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
+ 
+@@ -1514,12 +1514,12 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
+ err_mhi_unregister:
+ 	ath12k_mhi_unregister(ab_pci);
+ 
+-err_pci_msi_free:
+-	ath12k_pci_msi_free(ab_pci);
+-
+ err_irq_affinity_cleanup:
+ 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
+ 
++err_pci_msi_free:
++	ath12k_pci_msi_free(ab_pci);
++
+ err_pci_free_region:
+ 	ath12k_pci_free_region(ab_pci);
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
+index 31584a7ad80eb9..9321674eef8b8f 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.h
++++ b/drivers/net/wireless/ath/ath12k/pci.h
+@@ -28,7 +28,9 @@
+ #define PCIE_PCIE_PARF_LTSSM			0x1e081b0
+ #define PARM_LTSSM_VALUE			0x111
+ 
+-#define GCC_GCC_PCIE_HOT_RST			0x1e38338
++#define GCC_GCC_PCIE_HOT_RST(ab) \
++	((ab)->hw_params->regs->gcc_gcc_pcie_hot_rst)
++
+ #define GCC_GCC_PCIE_HOT_RST_VAL		0x10
+ 
+ #define PCIE_PCIE_INT_ALL_CLEAR			0x1e08228
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index 30836a09d5506c..17ac54047f9a70 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -2157,7 +2157,7 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
+ 
+ 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
+ 		eht_mcs = ptr;
+-		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
++		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
+ 							     sizeof(*eht_mcs));
+ 
+ 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
+@@ -4372,6 +4372,7 @@ static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
+ 	return 0;
+ 
+ err:
++	kfree(svc_rdy_ext.mac_phy_caps);
+ 	ath12k_wmi_free_dbring_caps(ab);
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+index 547634f82183d6..81fa7cbad89213 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+@@ -290,6 +290,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
+ 	struct ath_common *common = ath9k_hw_common(priv->ah);
+ 	int slot;
+ 
++	if (!priv->cur_beacon_conf.enable_beacon)
++		return;
++
+ 	if (swba->beacon_pending != 0) {
+ 		priv->beacon.bmisscnt++;
+ 		if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+index a8c4e354e2ce75..5f8f2458044439 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+@@ -2,6 +2,7 @@
+ /******************************************************************************
+  *
+  * Copyright(c) 2005 - 2014, 2018 - 2023 Intel Corporation. All rights reserved.
++ * Copyright(c) 2025 Intel Corporation
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+  *****************************************************************************/
+@@ -2709,6 +2710,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+ 							  optimal_rate);
+ 		iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
+ 					     &txrc->reported_rate);
++		txrc->reported_rate.count = 1;
+ 	}
+ 	spin_unlock_bh(&lq_sta->pers.lock);
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
+index 738bafc3749b0a..66f0f5377ac181 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n.c
+@@ -403,14 +403,12 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
+ 
+ 		if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ 		    bss_desc->bcn_ht_oper->ht_param &
+-		    IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
+-			chan_list->chan_scan_param[0].radio_type |=
+-				CHAN_BW_40MHZ << 2;
++		    IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
+ 			SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
+ 					  radio_type,
+ 					  (bss_desc->bcn_ht_oper->ht_param &
+ 					  IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
+-		}
++
+ 		*buffer += struct_size(chan_list, chan_scan_param, 1);
+ 		ret_len += struct_size(chan_list, chan_scan_param, 1);
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+index 2e7604eed27b02..a6245c3ccef48c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+@@ -649,6 +649,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+ 		wed->wlan.base = devm_ioremap(dev->mt76.dev,
+ 					      pci_resource_start(pci_dev, 0),
+ 					      pci_resource_len(pci_dev, 0));
++		if (!wed->wlan.base)
++			return -ENOMEM;
++
+ 		wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
+ 		wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) +
+ 				      MT_INT_WED_SOURCE_CSR;
+@@ -676,6 +679,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+ 		wed->wlan.bus_type = MTK_WED_BUS_AXI;
+ 		wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start,
+ 					      resource_size(res));
++		if (!wed->wlan.base)
++			return -ENOMEM;
++
+ 		wed->wlan.phy_base = res->start;
+ 		wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR;
+ 		wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index 2396e1795fe171..a19c108ad4b5c9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -769,7 +769,7 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl)
+ 	int ret;
+ 
+ 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(WSYS_CONFIG),
+-					&req, sizeof(req), false, NULL);
++					&req, sizeof(req), true, NULL);
+ 	return ret;
+ }
+ 
+@@ -1411,7 +1411,7 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
+ 	};
+ 
+ 	return mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(EFUSE_CTRL),
+-					 &req, sizeof(req), false, NULL);
++					 &req, sizeof(req), true, NULL);
+ }
+ EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom);
+ 
+@@ -2087,8 +2087,6 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 		},
+ 	};
+ 
+-	mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true);
+-
+ 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
+ 				 true);
+ }
+@@ -2743,7 +2741,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable)
+ 	conf->band = 0; /* unused */
+ 
+ 	err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS),
+-				    false);
++				    true);
+ 
+ 	return err;
+ }
+@@ -2771,6 +2769,9 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	struct tlv *tlv;
+ 	int max_len;
+ 
++	if (test_bit(MT76_HW_SCANNING, &phy->state))
++		return -EBUSY;
++
+ 	max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) +
+ 				sizeof(*bssid) + sizeof(*chan_info) +
+ 				sizeof(*misc) + sizeof(*ie);
+@@ -2858,7 +2859,7 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	}
+ 
+ 	err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
+-				    false);
++				    true);
+ 	if (err < 0)
+ 		clear_bit(MT76_HW_SCANNING, &phy->state);
+ 
+@@ -2964,7 +2965,7 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy,
+ 	}
+ 
+ 	return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
+-				     false);
++				     true);
+ }
+ EXPORT_SYMBOL_GPL(mt7925_mcu_sched_scan_req);
+ 
+@@ -3000,7 +3001,7 @@ mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
+ 		clear_bit(MT76_HW_SCHED_SCANNING, &phy->state);
+ 
+ 	return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
+-				     false);
++				     true);
+ }
+ 
+ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
+@@ -3039,7 +3040,7 @@ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
+ 	}
+ 
+ 	return mt76_mcu_send_msg(phy->dev, MCU_UNI_CMD(SCAN_REQ),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ EXPORT_SYMBOL_GPL(mt7925_mcu_cancel_hw_scan);
+ 
+@@ -3144,7 +3145,7 @@ int mt7925_mcu_set_channel_domain(struct mt76_phy *phy)
+ 	memcpy(__skb_push(skb, sizeof(req)), &req, sizeof(req));
+ 
+ 	return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SET_DOMAIN_INFO),
+-				     false);
++				     true);
+ }
+ EXPORT_SYMBOL_GPL(mt7925_mcu_set_channel_domain);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+index 69a7d9b2e38bd7..4b68d2fc5e0949 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+@@ -493,7 +493,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
+ 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
+ 			       MT_RXQ_ID(MT_RXQ_MCU),
+ 			       MT7996_RX_MCU_RING_SIZE,
+-			       MT_RX_BUF_SIZE,
++			       MT7996_RX_MCU_BUF_SIZE,
+ 			       MT_RXQ_RING_BASE(MT_RXQ_MCU));
+ 	if (ret)
+ 		return ret;
+@@ -502,7 +502,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
+ 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+ 			       MT_RXQ_ID(MT_RXQ_MCU_WA),
+ 			       MT7996_RX_MCU_RING_SIZE_WA,
+-			       MT_RX_BUF_SIZE,
++			       MT7996_RX_MCU_BUF_SIZE,
+ 			       MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+index d8a013812d1e37..c5503855411436 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+@@ -1193,6 +1193,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 		u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
+ 			       IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ 
++	eht_cap_elem->mac_cap_info[1] |=
++		IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK;
++
+ 	eht_cap_elem->phy_cap_info[0] =
+ 		IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ 		IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+index b6209ed1cfe014..bffee73b780cba 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+@@ -323,6 +323,9 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
+ 	wed->wlan.base = devm_ioremap(dev->mt76.dev,
+ 				      pci_resource_start(pci_dev, 0),
+ 				      pci_resource_len(pci_dev, 0));
++	if (!wed->wlan.base)
++		return -ENOMEM;
++
+ 	wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
+ 
+ 	if (hif2) {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+index ab8c9070630b0f..425fd030bee001 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+@@ -28,6 +28,9 @@
+ #define MT7996_RX_RING_SIZE		1536
+ #define MT7996_RX_MCU_RING_SIZE		512
+ #define MT7996_RX_MCU_RING_SIZE_WA	1024
++/* scatter-gather of mcu event is not supported in connac3 */
++#define MT7996_RX_MCU_BUF_SIZE		(2048 + \
++					 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+ 
+ #define MT7996_FIRMWARE_WA		"mediatek/mt7996/mt7996_wa.bin"
+ #define MT7996_FIRMWARE_WM		"mediatek/mt7996/mt7996_wm.bin"
+diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
+index a99776af56c27f..c476e65c4d71e7 100644
+--- a/drivers/net/wireless/realtek/rtw88/coex.c
++++ b/drivers/net/wireless/realtek/rtw88/coex.c
+@@ -309,7 +309,7 @@ static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type)
+ {
+ 	struct rtw_coex *coex = &rtwdev->coex;
+ 	struct rtw_coex_stat *coex_stat = &coex->stat;
+-	u8 para[2] = {0};
++	u8 para[6] = {};
+ 	u8 times;
+ 	u16 tbtt_interval = coex_stat->wl_beacon_interval;
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+index 1dbe1cdbc3fd45..3157cd834233dc 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+@@ -3993,7 +3993,8 @@ static void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
+ 	rtw_write32(rtwdev, REG_NCTL0, 0x00001148);
+ 	rtw_write32(rtwdev, REG_NCTL0, 0x00001149);
+ 
+-	check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55);
++	if (!check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55))
++		rtw_warn(rtwdev, "DPK stuck, performance may be suboptimal");
+ 
+ 	rtw_write8(rtwdev, 0x1b10, 0x0);
+ 	rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x0000000c);
+diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
+index 1d62b38526c486..5b8e88c9759d12 100644
+--- a/drivers/net/wireless/realtek/rtw88/sdio.c
++++ b/drivers/net/wireless/realtek/rtw88/sdio.c
+@@ -718,10 +718,7 @@ static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ 	case RTW_TX_QUEUE_H2C:
+ 		return TX_DESC_QSEL_H2C;
+ 	case RTW_TX_QUEUE_MGMT:
+-		if (rtw_chip_wcpu_11n(rtwdev))
+-			return TX_DESC_QSEL_HIGH;
+-		else
+-			return TX_DESC_QSEL_MGMT;
++		return TX_DESC_QSEL_MGMT;
+ 	case RTW_TX_QUEUE_HI0:
+ 		return TX_DESC_QSEL_HIGH;
+ 	default:
+@@ -1228,10 +1225,7 @@ static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev,
+ 		return;
+ 	}
+ 
+-	if (queue <= RTW_TX_QUEUE_VO)
+-		rtw_sdio_indicate_tx_status(rtwdev, skb);
+-	else
+-		dev_kfree_skb_any(skb);
++	rtw_sdio_indicate_tx_status(rtwdev, skb);
+ }
+ 
+ static void rtw_sdio_tx_handler(struct work_struct *work)
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index e5c90050e71158..7dbce3b10a7de4 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -5016,7 +5016,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ 	return 0;
+ }
+ 
+-#define RTW89_SCAN_DELAY_TSF_UNIT 104800
++#define RTW89_SCAN_DELAY_TSF_UNIT 1000000
+ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ 				 struct rtw89_scan_option *option,
+ 				 struct rtw89_vif_link *rtwvif_link,
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 0ac84f968994b4..e203d3b2a82749 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -228,7 +228,7 @@ int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
+ 						       struct sk_buff *skb)
+ {
+ 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+-	int rx_tag_retry = 100;
++	int rx_tag_retry = 1000;
+ 	int ret;
+ 
+ 	do {
+diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
+index 8755c5e6a65b30..c814fbd756a1e7 100644
+--- a/drivers/net/wwan/mhi_wwan_mbim.c
++++ b/drivers/net/wwan/mhi_wwan_mbim.c
+@@ -550,8 +550,8 @@ static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
+ 	struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+ 	struct mhi_mbim_context *mbim = ctxt;
+ 
+-	link->session = if_id;
+ 	link->mbim = mbim;
++	link->session = mhi_mbim_get_link_mux_id(link->mbim->mdev->mhi_cntrl) + if_id;
+ 	link->ndev = ndev;
+ 	u64_stats_init(&link->rx_syncp);
+ 	u64_stats_init(&link->tx_syncp);
+@@ -607,7 +607,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
+ {
+ 	struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+ 	struct mhi_mbim_context *mbim;
+-	int err, link_id;
++	int err;
+ 
+ 	mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
+ 	if (!mbim)
+@@ -628,11 +628,8 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
+ 	/* Number of transfer descriptors determines size of the queue */
+ 	mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+ 
+-	/* Get the corresponding mux_id from mhi */
+-	link_id = mhi_mbim_get_link_mux_id(cntrl);
+-
+ 	/* Register wwan link ops with MHI controller representing WWAN instance */
+-	return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, link_id);
++	return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
+ }
+ 
+ static void mhi_mbim_remove(struct mhi_device *mhi_dev)
+diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
+index 91fa082e9cab80..fc0a7cb181df2c 100644
+--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
+@@ -302,7 +302,7 @@ static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id
+ 	ccmni->ctlb = ctlb;
+ 	ccmni->dev = dev;
+ 	atomic_set(&ccmni->usage, 0);
+-	ctlb->ccmni_inst[if_id] = ccmni;
++	WRITE_ONCE(ctlb->ccmni_inst[if_id], ccmni);
+ 
+ 	ret = register_netdevice(dev);
+ 	if (ret)
+@@ -324,6 +324,7 @@ static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct l
+ 	if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
+ 		return;
+ 
++	WRITE_ONCE(ctlb->ccmni_inst[if_id], NULL);
+ 	unregister_netdevice(dev);
+ }
+ 
+@@ -419,7 +420,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu
+ 
+ 	skb_cb = T7XX_SKB_CB(skb);
+ 	netif_id = skb_cb->netif_idx;
+-	ccmni = ccmni_ctlb->ccmni_inst[netif_id];
++	ccmni = READ_ONCE(ccmni_ctlb->ccmni_inst[netif_id]);
+ 	if (!ccmni) {
+ 		dev_kfree_skb(skb);
+ 		return;
+@@ -441,7 +442,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu
+ 
+ static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
+ {
+-	struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
++	struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
+ 	struct netdev_queue *net_queue;
+ 
+ 	if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
+@@ -453,7 +454,7 @@ static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno
+ 
+ static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
+ {
+-	struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
++	struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
+ 	struct netdev_queue *net_queue;
+ 
+ 	if (atomic_read(&ccmni->usage) > 0) {
+@@ -471,7 +472,7 @@ static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
+ 	if (ctlb->md_sta != MD_STATE_READY)
+ 		return;
+ 
+-	if (!ctlb->ccmni_inst[0]) {
++	if (!READ_ONCE(ctlb->ccmni_inst[0])) {
+ 		dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
+ 		return;
+ 	}
+diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
+index 2b9e6cfaf2a80a..1a0058be582104 100644
+--- a/drivers/nvme/host/constants.c
++++ b/drivers/nvme/host/constants.c
+@@ -145,7 +145,7 @@ static const char * const nvme_statuses[] = {
+ 	[NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
+ 	[NVME_SC_INVALID_PI] = "Invalid Protection Information",
+ 	[NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
+-	[NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported",
++	[NVME_SC_CMD_SIZE_LIM_EXCEEDED	] = "Command Size Limits Exceeded",
+ 	[NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
+ 	[NVME_SC_ZONE_FULL] = "Zone Is Full",
+ 	[NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 98dad1bdff440a..eca764fede48f2 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -284,7 +284,6 @@ static blk_status_t nvme_error_status(u16 status)
+ 	case NVME_SC_NS_NOT_READY:
+ 		return BLK_STS_TARGET;
+ 	case NVME_SC_BAD_ATTRIBUTES:
+-	case NVME_SC_ONCS_NOT_SUPPORTED:
+ 	case NVME_SC_INVALID_OPCODE:
+ 	case NVME_SC_INVALID_FIELD:
+ 	case NVME_SC_INVALID_NS:
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index dc7922f226004f..80dd09aa01a3b9 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -82,8 +82,6 @@ static int nvme_status_to_pr_err(int status)
+ 		return PR_STS_SUCCESS;
+ 	case NVME_SC_RESERVATION_CONFLICT:
+ 		return PR_STS_RESERVATION_CONFLICT;
+-	case NVME_SC_ONCS_NOT_SUPPORTED:
+-		return -EOPNOTSUPP;
+ 	case NVME_SC_BAD_ATTRIBUTES:
+ 	case NVME_SC_INVALID_OPCODE:
+ 	case NVME_SC_INVALID_FIELD:
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index ed2424f8a396e0..4606c881366691 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -62,14 +62,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
+ 		return  NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
+ 	case -EOPNOTSUPP:
+ 		req->error_loc = offsetof(struct nvme_common_command, opcode);
+-		switch (req->cmd->common.opcode) {
+-		case nvme_cmd_dsm:
+-		case nvme_cmd_write_zeroes:
+-			return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
+-		default:
+-			return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+-		}
+-		break;
++		return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ 	case -ENODATA:
+ 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
+ 		return NVME_SC_ACCESS_DENIED;
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index da195d61a9664c..f1b5ffc00ce88b 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -623,12 +623,13 @@ fcloop_fcp_recv_work(struct work_struct *work)
+ {
+ 	struct fcloop_fcpreq *tfcp_req =
+ 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+-	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
++	struct nvmefc_fcp_req *fcpreq;
+ 	unsigned long flags;
+ 	int ret = 0;
+ 	bool aborted = false;
+ 
+ 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
++	fcpreq = tfcp_req->fcpreq;
+ 	switch (tfcp_req->inistate) {
+ 	case INI_IO_START:
+ 		tfcp_req->inistate = INI_IO_ACTIVE;
+@@ -643,16 +644,19 @@ fcloop_fcp_recv_work(struct work_struct *work)
+ 	}
+ 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+-	if (unlikely(aborted))
+-		ret = -ECANCELED;
+-	else {
+-		if (likely(!check_for_drop(tfcp_req)))
+-			ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+-				&tfcp_req->tgt_fcp_req,
+-				fcpreq->cmdaddr, fcpreq->cmdlen);
+-		else
+-			pr_info("%s: dropped command ********\n", __func__);
++	if (unlikely(aborted)) {
++		/* the abort handler will call fcloop_call_host_done */
++		return;
++	}
++
++	if (unlikely(check_for_drop(tfcp_req))) {
++		pr_info("%s: dropped command ********\n", __func__);
++		return;
+ 	}
++
++	ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
++				   &tfcp_req->tgt_fcp_req,
++				   fcpreq->cmdaddr, fcpreq->cmdlen);
+ 	if (ret)
+ 		fcloop_call_host_done(fcpreq, tfcp_req, ret);
+ }
+@@ -667,9 +671,10 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+-	fcpreq = tfcp_req->fcpreq;
+ 	switch (tfcp_req->inistate) {
+ 	case INI_IO_ABORTED:
++		fcpreq = tfcp_req->fcpreq;
++		tfcp_req->fcpreq = NULL;
+ 		break;
+ 	case INI_IO_COMPLETED:
+ 		completed = true;
+@@ -691,10 +696,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+ 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ 					&tfcp_req->tgt_fcp_req);
+ 
+-	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+-	tfcp_req->fcpreq = NULL;
+-	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+-
+ 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ 	/* call_host_done releases reference for abort downcall */
+ }
+diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
+index eaf31c823cbe88..73ecbc13c5b23d 100644
+--- a/drivers/nvme/target/io-cmd-bdev.c
++++ b/drivers/nvme/target/io-cmd-bdev.c
+@@ -145,15 +145,8 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
+ 		req->error_loc = offsetof(struct nvme_rw_command, slba);
+ 		break;
+ 	case BLK_STS_NOTSUPP:
++		status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+ 		req->error_loc = offsetof(struct nvme_common_command, opcode);
+-		switch (req->cmd->common.opcode) {
+-		case nvme_cmd_dsm:
+-		case nvme_cmd_write_zeroes:
+-			status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
+-			break;
+-		default:
+-			status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
+-		}
+ 		break;
+ 	case BLK_STS_MEDIUM:
+ 		status = NVME_SC_ACCESS_DENIED;
+diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
+index 8682adaacd692d..7da717d6c7faf3 100644
+--- a/drivers/nvmem/zynqmp_nvmem.c
++++ b/drivers/nvmem/zynqmp_nvmem.c
+@@ -213,6 +213,7 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
+ 	econfig.word_size = 1;
+ 	econfig.size = ZYNQMP_NVMEM_SIZE;
+ 	econfig.dev = dev;
++	econfig.priv = dev;
+ 	econfig.add_legacy_fixed_of_cells = true;
+ 	econfig.reg_read = zynqmp_nvmem_read;
+ 	econfig.reg_write = zynqmp_nvmem_write;
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index 576e9beefc7c8f..9a72f75e5c2d8d 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -1916,15 +1916,16 @@ static int __init unittest_data_add(void)
+ 	rc = of_resolve_phandles(unittest_data_node);
+ 	if (rc) {
+ 		pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc);
+-		of_overlay_mutex_unlock();
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto unlock;
+ 	}
+ 
+ 	/* attach the sub-tree to live tree */
+ 	if (!of_root) {
+ 		pr_warn("%s: no live tree to attach sub-tree\n", __func__);
+ 		kfree(unittest_data);
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto unlock;
+ 	}
+ 
+ 	EXPECT_BEGIN(KERN_INFO,
+@@ -1943,9 +1944,10 @@ static int __init unittest_data_add(void)
+ 	EXPECT_END(KERN_INFO,
+ 		   "Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
+ 
++unlock:
+ 	of_overlay_mutex_unlock();
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ #ifdef CONFIG_OF_OVERLAY
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index 8af95e9da7cec6..741e10a575ec75 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -570,14 +570,5 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+ 	if (!bridge->ops)
+ 		bridge->ops = &cdns_pcie_host_ops;
+ 
+-	ret = pci_host_probe(bridge);
+-	if (ret < 0)
+-		goto err_init;
+-
+-	return 0;
+-
+- err_init:
+-	pm_runtime_put_sync(dev);
+-
+-	return ret;
++	return pci_host_probe(bridge);
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+index 3a5511c3f7d970..5d77a01648606c 100644
+--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
++++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+@@ -403,6 +403,7 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
+ 	.msix_capable = false,
+ 	.bar[BAR_1] = { .type = BAR_RESERVED, },
+ 	.bar[BAR_3] = { .type = BAR_RESERVED, },
++	.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
+ 	.bar[BAR_5] = { .type = BAR_RESERVED, },
+ 	.align = SZ_1M,
+ };
+diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
+index fefab2758a0646..ddc65368e77d19 100644
+--- a/drivers/pci/controller/pcie-apple.c
++++ b/drivers/pci/controller/pcie-apple.c
+@@ -541,7 +541,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
+ 	rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
+ 
+ 	/* Assert PERST# before setting up the clock */
+-	gpiod_set_value(reset, 1);
++	gpiod_set_value_cansleep(reset, 1);
+ 
+ 	ret = apple_pcie_setup_refclk(pcie, port);
+ 	if (ret < 0)
+@@ -552,7 +552,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
+ 
+ 	/* Deassert PERST# */
+ 	rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
+-	gpiod_set_value(reset, 0);
++	gpiod_set_value_cansleep(reset, 0);
+ 
+ 	/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ 	msleep(100);
+diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
+index 50bc2892a36c54..963d2f3aa5d476 100644
+--- a/drivers/pci/endpoint/pci-epf-core.c
++++ b/drivers/pci/endpoint/pci-epf-core.c
+@@ -236,12 +236,13 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ 	}
+ 
+ 	dev = epc->dev.parent;
+-	dma_free_coherent(dev, epf_bar[bar].size, addr,
++	dma_free_coherent(dev, epf_bar[bar].aligned_size, addr,
+ 			  epf_bar[bar].phys_addr);
+ 
+ 	epf_bar[bar].phys_addr = 0;
+ 	epf_bar[bar].addr = NULL;
+ 	epf_bar[bar].size = 0;
++	epf_bar[bar].aligned_size = 0;
+ 	epf_bar[bar].barno = 0;
+ 	epf_bar[bar].flags = 0;
+ }
+@@ -264,7 +265,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ 			  enum pci_epc_interface_type type)
+ {
+ 	u64 bar_fixed_size = epc_features->bar[bar].fixed_size;
+-	size_t align = epc_features->align;
++	size_t aligned_size, align = epc_features->align;
+ 	struct pci_epf_bar *epf_bar;
+ 	dma_addr_t phys_addr;
+ 	struct pci_epc *epc;
+@@ -281,12 +282,18 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ 			return NULL;
+ 		}
+ 		size = bar_fixed_size;
++	} else {
++		/* BAR size must be power of two */
++		size = roundup_pow_of_two(size);
+ 	}
+ 
+-	if (align)
+-		size = ALIGN(size, align);
+-	else
+-		size = roundup_pow_of_two(size);
++	/*
++	 * Allocate enough memory to accommodate the iATU alignment
++	 * requirement.  In most cases, this will be the same as .size but
++	 * it might be different if, for example, the fixed size of a BAR
++	 * is smaller than align.
++	 */
++	aligned_size = align ? ALIGN(size, align) : size;
+ 
+ 	if (type == PRIMARY_INTERFACE) {
+ 		epc = epf->epc;
+@@ -297,7 +304,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ 	}
+ 
+ 	dev = epc->dev.parent;
+-	space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
++	space = dma_alloc_coherent(dev, aligned_size, &phys_addr, GFP_KERNEL);
+ 	if (!space) {
+ 		dev_err(dev, "failed to allocate mem space\n");
+ 		return NULL;
+@@ -306,6 +313,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ 	epf_bar[bar].phys_addr = phys_addr;
+ 	epf_bar[bar].addr = space;
+ 	epf_bar[bar].size = size;
++	epf_bar[bar].aligned_size = aligned_size;
+ 	epf_bar[bar].barno = bar;
+ 	if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
+ 		epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index af370628e58393..b78e0e41732445 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -1676,24 +1676,19 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+ 		return NULL;
+ 
+ 	root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
+-	if (!root_ops) {
+-		kfree(ri);
+-		return NULL;
+-	}
++	if (!root_ops)
++		goto free_ri;
+ 
+ 	ri->cfg = pci_acpi_setup_ecam_mapping(root);
+-	if (!ri->cfg) {
+-		kfree(ri);
+-		kfree(root_ops);
+-		return NULL;
+-	}
++	if (!ri->cfg)
++		goto free_root_ops;
+ 
+ 	root_ops->release_info = pci_acpi_generic_release_info;
+ 	root_ops->prepare_resources = pci_acpi_root_prepare_resources;
+ 	root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
+ 	bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
+ 	if (!bus)
+-		return NULL;
++		goto free_cfg;
+ 
+ 	/* If we must preserve the resource configuration, claim now */
+ 	host = pci_find_host_bridge(bus);
+@@ -1710,6 +1705,14 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+ 		pcie_bus_configure_settings(child);
+ 
+ 	return bus;
++
++free_cfg:
++	pci_ecam_free(ri->cfg);
++free_root_ops:
++	kfree(root_ops);
++free_ri:
++	kfree(ri);
++	return NULL;
+ }
+ 
+ void pcibios_add_bus(struct pci_bus *bus)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 25c07af1686b9b..7ca5422feb2d44 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4945,7 +4945,7 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
+ 		delay);
+ 	if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ 		/* Did not train, no need to wait any further */
+-		pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
++		pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
+ 		return -ENOTTY;
+ 	}
+ 
+diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
+index 2b6ef7efa3c113..cdc54315d879f4 100644
+--- a/drivers/pci/pcie/dpc.c
++++ b/drivers/pci/pcie/dpc.c
+@@ -260,40 +260,48 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
+ void dpc_process_error(struct pci_dev *pdev)
+ {
+ 	u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
+-	struct aer_err_info info;
++	struct aer_err_info info = {};
+ 
+ 	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
+-	pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
+-
+-	pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
+-		 status, source);
+ 
+ 	reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN;
+-	ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
+-	pci_warn(pdev, "%s detected\n",
+-		 (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR) ?
+-		 "unmasked uncorrectable error" :
+-		 (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE) ?
+-		 "ERR_NONFATAL" :
+-		 (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
+-		 "ERR_FATAL" :
+-		 (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
+-		 "RP PIO error" :
+-		 (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
+-		 "software trigger" :
+-		 "reserved error");
+-
+-	/* show RP PIO error detail information */
+-	if (pdev->dpc_rp_extensions &&
+-	    reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT &&
+-	    ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO)
+-		dpc_process_rp_pio_error(pdev);
+-	else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
+-		 dpc_get_aer_uncorrect_severity(pdev, &info) &&
+-		 aer_get_device_error_info(pdev, &info)) {
+-		aer_print_error(pdev, &info);
+-		pci_aer_clear_nonfatal_status(pdev);
+-		pci_aer_clear_fatal_status(pdev);
++
++	switch (reason) {
++	case PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR:
++		pci_warn(pdev, "containment event, status:%#06x: unmasked uncorrectable error detected\n",
++			 status);
++		if (dpc_get_aer_uncorrect_severity(pdev, &info) &&
++		    aer_get_device_error_info(pdev, &info)) {
++			aer_print_error(pdev, &info);
++			pci_aer_clear_nonfatal_status(pdev);
++			pci_aer_clear_fatal_status(pdev);
++		}
++		break;
++	case PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE:
++	case PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE:
++		pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
++				     &source);
++		pci_warn(pdev, "containment event, status:%#06x, %s received from %04x:%02x:%02x.%d\n",
++			 status,
++			 (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
++				"ERR_FATAL" : "ERR_NONFATAL",
++			 pci_domain_nr(pdev->bus), PCI_BUS_NUM(source),
++			 PCI_SLOT(source), PCI_FUNC(source));
++		break;
++	case PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT:
++		ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
++		pci_warn(pdev, "containment event, status:%#06x: %s detected\n",
++			 status,
++			 (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
++			 "RP PIO error" :
++			 (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
++			 "software trigger" :
++			 "reserved error");
++		/* show RP PIO error detail information */
++		if (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO &&
++		    pdev->dpc_rp_extensions)
++			dpc_process_rp_pio_error(pdev);
++		break;
+ 	}
+ }
+ 
+diff --git a/drivers/perf/amlogic/meson_ddr_pmu_core.c b/drivers/perf/amlogic/meson_ddr_pmu_core.c
+index 07446d784a1a64..c1e755c356a333 100644
+--- a/drivers/perf/amlogic/meson_ddr_pmu_core.c
++++ b/drivers/perf/amlogic/meson_ddr_pmu_core.c
+@@ -511,7 +511,7 @@ int meson_ddr_pmu_create(struct platform_device *pdev)
+ 
+ 	fmt_attr_fill(pmu->info.hw_info->fmt_attr);
+ 
+-	pmu->cpu = smp_processor_id();
++	pmu->cpu = raw_smp_processor_id();
+ 
+ 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME);
+ 	if (!name)
+diff --git a/drivers/perf/arm-ni.c b/drivers/perf/arm-ni.c
+index 90fcfe693439ef..b87d3a9ba7d545 100644
+--- a/drivers/perf/arm-ni.c
++++ b/drivers/perf/arm-ni.c
+@@ -576,6 +576,23 @@ static int arm_ni_init_cd(struct arm_ni *ni, struct arm_ni_node *node, u64 res_s
+ 	return err;
+ }
+ 
++static void arm_ni_remove(struct platform_device *pdev)
++{
++	struct arm_ni *ni = platform_get_drvdata(pdev);
++
++	for (int i = 0; i < ni->num_cds; i++) {
++		struct arm_ni_cd *cd = ni->cds + i;
++
++		if (!cd->pmu_base)
++			continue;
++
++		writel_relaxed(0, cd->pmu_base + NI_PMCR);
++		writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR);
++		perf_pmu_unregister(&cd->pmu);
++		cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node);
++	}
++}
++
+ static void arm_ni_probe_domain(void __iomem *base, struct arm_ni_node *node)
+ {
+ 	u32 reg = readl_relaxed(base + NI_NODE_TYPE);
+@@ -644,6 +661,7 @@ static int arm_ni_probe(struct platform_device *pdev)
+ 	ni->num_cds = num_cds;
+ 	ni->part = part;
+ 	ni->id = atomic_fetch_inc(&id);
++	platform_set_drvdata(pdev, ni);
+ 
+ 	for (int v = 0; v < cfg.num_components; v++) {
+ 		reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v));
+@@ -657,8 +675,11 @@ static int arm_ni_probe(struct platform_device *pdev)
+ 				reg = readl_relaxed(pd.base + NI_CHILD_PTR(c));
+ 				arm_ni_probe_domain(base + reg, &cd);
+ 				ret = arm_ni_init_cd(ni, &cd, res->start);
+-				if (ret)
++				if (ret) {
++					ni->cds[cd.id].pmu_base = NULL;
++					arm_ni_remove(pdev);
+ 					return ret;
++				}
+ 			}
+ 		}
+ 	}
+@@ -666,23 +687,6 @@ static int arm_ni_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static void arm_ni_remove(struct platform_device *pdev)
+-{
+-	struct arm_ni *ni = platform_get_drvdata(pdev);
+-
+-	for (int i = 0; i < ni->num_cds; i++) {
+-		struct arm_ni_cd *cd = ni->cds + i;
+-
+-		if (!cd->pmu_base)
+-			continue;
+-
+-		writel_relaxed(0, cd->pmu_base + NI_PMCR);
+-		writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR);
+-		perf_pmu_unregister(&cd->pmu);
+-		cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node);
+-	}
+-}
+-
+ #ifdef CONFIG_OF
+ static const struct of_device_id arm_ni_of_match[] = {
+ 	{ .compatible = "arm,ni-700" },
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+index 8e2cd2c178d6b2..c12efd127a6125 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+@@ -2044,12 +2044,16 @@ static void __iomem *qmp_usb_iomap(struct device *dev, struct device_node *np,
+ 					int index, bool exclusive)
+ {
+ 	struct resource res;
++	void __iomem *mem;
+ 
+ 	if (!exclusive) {
+ 		if (of_address_to_resource(np, index, &res))
+ 			return IOMEM_ERR_PTR(-EINVAL);
+ 
+-		return devm_ioremap(dev, res.start, resource_size(&res));
++		mem = devm_ioremap(dev, res.start, resource_size(&res));
++		if (!mem)
++			return IOMEM_ERR_PTR(-ENOMEM);
++		return mem;
+ 	}
+ 
+ 	return devm_of_iomap(dev, np, index, NULL);
+diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+index 9b99fdd43f5f5c..5547f8df8e7178 100644
+--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
++++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+@@ -192,6 +192,7 @@
+ #define LN3_TX_SER_RATE_SEL_HBR2	BIT(3)
+ #define LN3_TX_SER_RATE_SEL_HBR3	BIT(2)
+ 
++#define HDMI14_MAX_RATE			340000000
+ #define HDMI20_MAX_RATE			600000000
+ 
+ struct lcpll_config {
+@@ -780,9 +781,7 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
+ {
+ 	const struct ropll_config *cfg = NULL;
+ 	struct ropll_config rc = {0};
+-	int i;
+-
+-	hdptx->rate = rate * 100;
++	int ret, i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
+ 		if (rate == ropll_tmds_cfg[i].bit_rate) {
+@@ -841,7 +840,11 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
+ 	regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_CLK_EN,
+ 			   PLL_PCG_CLK_EN);
+ 
+-	return rk_hdptx_post_enable_pll(hdptx);
++	ret = rk_hdptx_post_enable_pll(hdptx);
++	if (!ret)
++		hdptx->rate = rate * 100;
++
++	return ret;
+ }
+ 
+ static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
+@@ -851,7 +854,7 @@ static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
+ 
+ 	regmap_write(hdptx->regmap, LNTOP_REG(0200), 0x06);
+ 
+-	if (rate >= 3400000) {
++	if (rate > HDMI14_MAX_RATE / 100) {
+ 		/* For 1/40 bitrate clk */
+ 		rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_lntop_highbr_seq);
+ 	} else {
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index 93ab277d9943cf..fbe74e4ef320c1 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -1819,12 +1819,16 @@ static int at91_gpio_probe(struct platform_device *pdev)
+ 	struct at91_gpio_chip *at91_chip = NULL;
+ 	struct gpio_chip *chip;
+ 	struct pinctrl_gpio_range *range;
++	int alias_idx;
+ 	int ret = 0;
+ 	int irq, i;
+-	int alias_idx = of_alias_get_id(np, "gpio");
+ 	uint32_t ngpio;
+ 	char **names;
+ 
++	alias_idx = of_alias_get_id(np, "gpio");
++	if (alias_idx < 0)
++		return alias_idx;
++
+ 	BUG_ON(alias_idx >= ARRAY_SIZE(gpio_chips));
+ 	if (gpio_chips[alias_idx])
+ 		return dev_err_probe(dev, -EBUSY, "%d slot is occupied.\n", alias_idx);
+diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+index f5c1c427b44e91..61b7c22e963c2c 100644
+--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c
++++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+@@ -165,6 +165,10 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = {
+ 	PINCTRL_PIN(62, "GPIO_62"),
+ 	PINCTRL_PIN(63, "GPIO_63"),
+ 	PINCTRL_PIN(64, "GPIO_64"),
++	PINCTRL_PIN(65, "GPIO_65"),
++	PINCTRL_PIN(66, "GPIO_66"),
++	PINCTRL_PIN(67, "GPIO_67"),
++	PINCTRL_PIN(68, "GPIO_68"),
+ 	PINCTRL_PIN(69, "GPIO_69"),
+ 	PINCTRL_PIN(70, "GPIO_70"),
+ 	PINCTRL_PIN(71, "GPIO_71"),
+@@ -179,12 +183,17 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = {
+ 	PINCTRL_PIN(80, "GPIO_80"),
+ 	PINCTRL_PIN(81, "GPIO_81"),
+ 	PINCTRL_PIN(82, "GPIO_82"),
++	PINCTRL_PIN(83, "GPIO_83"),
++	PINCTRL_PIN(84, "GPIO_84"),
++	PINCTRL_PIN(85, "GPIO_85"),
+ 	PINCTRL_PIN(86, "GPIO_86"),
+ 	PINCTRL_PIN(87, "GPIO_87"),
+ 	PINCTRL_PIN(88, "GPIO_88"),
+ 	PINCTRL_PIN(89, "GPIO_89"),
+ 	PINCTRL_PIN(90, "GPIO_90"),
+ 	PINCTRL_PIN(91, "GPIO_91"),
++	PINCTRL_PIN(92, "GPIO_92"),
++	PINCTRL_PIN(93, "GPIO_93"),
+ 	PINCTRL_PIN(94, "GPIO_94"),
+ 	PINCTRL_PIN(95, "GPIO_95"),
+ 	PINCTRL_PIN(96, "GPIO_96"),
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+index 23b4bc1e5da81c..a2ac1702d0dfae 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+@@ -809,8 +809,8 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
+ 		.pin_banks	= exynosautov920_pin_banks0,
+ 		.nr_banks	= ARRAY_SIZE(exynosautov920_pin_banks0),
+ 		.eint_wkup_init	= exynos_eint_wkup_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= exynosautov920_pinctrl_suspend,
++		.resume		= exynosautov920_pinctrl_resume,
+ 		.retention_data	= &exynosautov920_retention_data,
+ 	}, {
+ 		/* pin-controller instance 1 AUD data */
+@@ -821,43 +821,43 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
+ 		.pin_banks	= exynosautov920_pin_banks2,
+ 		.nr_banks	= ARRAY_SIZE(exynosautov920_pin_banks2),
+ 		.eint_gpio_init	= exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= exynosautov920_pinctrl_suspend,
++		.resume		= exynosautov920_pinctrl_resume,
+ 	}, {
+ 		/* pin-controller instance 3 HSI1 data */
+ 		.pin_banks	= exynosautov920_pin_banks3,
+ 		.nr_banks	= ARRAY_SIZE(exynosautov920_pin_banks3),
+ 		.eint_gpio_init	= exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= exynosautov920_pinctrl_suspend,
++		.resume		= exynosautov920_pinctrl_resume,
+ 	}, {
+ 		/* pin-controller instance 4 HSI2 data */
+ 		.pin_banks	= exynosautov920_pin_banks4,
+ 		.nr_banks	= ARRAY_SIZE(exynosautov920_pin_banks4),
+ 		.eint_gpio_init	= exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= exynosautov920_pinctrl_suspend,
++		.resume		= exynosautov920_pinctrl_resume,
+ 	}, {
+ 		/* pin-controller instance 5 HSI2UFS data */
+ 		.pin_banks	= exynosautov920_pin_banks5,
+ 		.nr_banks	= ARRAY_SIZE(exynosautov920_pin_banks5),
+ 		.eint_gpio_init	= exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= exynosautov920_pinctrl_suspend,
++		.resume		= exynosautov920_pinctrl_resume,
+ 	}, {
+ 		/* pin-controller instance 6 PERIC0 data */
+ 		.pin_banks	= exynosautov920_pin_banks6,
+ 		.nr_banks	= ARRAY_SIZE(exynosautov920_pin_banks6),
+ 		.eint_gpio_init	= exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= exynosautov920_pinctrl_suspend,
++		.resume		= exynosautov920_pinctrl_resume,
+ 	}, {
+ 		/* pin-controller instance 7 PERIC1 data */
+ 		.pin_banks	= exynosautov920_pin_banks7,
+ 		.nr_banks	= ARRAY_SIZE(exynosautov920_pin_banks7),
+ 		.eint_gpio_init	= exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= exynosautov920_pinctrl_suspend,
++		.resume		= exynosautov920_pinctrl_resume,
+ 	},
+ };
+ 
+@@ -1024,15 +1024,15 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
+ 		.pin_banks	= gs101_pin_alive,
+ 		.nr_banks	= ARRAY_SIZE(gs101_pin_alive),
+ 		.eint_wkup_init = exynos_eint_wkup_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= gs101_pinctrl_suspend,
++		.resume		= gs101_pinctrl_resume,
+ 	}, {
+ 		/* pin banks of gs101 pin-controller (FAR_ALIVE) */
+ 		.pin_banks	= gs101_pin_far_alive,
+ 		.nr_banks	= ARRAY_SIZE(gs101_pin_far_alive),
+ 		.eint_wkup_init = exynos_eint_wkup_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= gs101_pinctrl_suspend,
++		.resume		= gs101_pinctrl_resume,
+ 	}, {
+ 		/* pin banks of gs101 pin-controller (GSACORE) */
+ 		.pin_banks	= gs101_pin_gsacore,
+@@ -1046,29 +1046,29 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
+ 		.pin_banks	= gs101_pin_peric0,
+ 		.nr_banks	= ARRAY_SIZE(gs101_pin_peric0),
+ 		.eint_gpio_init = exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= gs101_pinctrl_suspend,
++		.resume		= gs101_pinctrl_resume,
+ 	}, {
+ 		/* pin banks of gs101 pin-controller (PERIC1) */
+ 		.pin_banks	= gs101_pin_peric1,
+ 		.nr_banks	= ARRAY_SIZE(gs101_pin_peric1),
+ 		.eint_gpio_init = exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume	= exynos_pinctrl_resume,
++		.suspend	= gs101_pinctrl_suspend,
++		.resume		= gs101_pinctrl_resume,
+ 	}, {
+ 		/* pin banks of gs101 pin-controller (HSI1) */
+ 		.pin_banks	= gs101_pin_hsi1,
+ 		.nr_banks	= ARRAY_SIZE(gs101_pin_hsi1),
+ 		.eint_gpio_init = exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= gs101_pinctrl_suspend,
++		.resume		= gs101_pinctrl_resume,
+ 	}, {
+ 		/* pin banks of gs101 pin-controller (HSI2) */
+ 		.pin_banks	= gs101_pin_hsi2,
+ 		.nr_banks	= ARRAY_SIZE(gs101_pin_hsi2),
+ 		.eint_gpio_init = exynos_eint_gpio_init,
+-		.suspend	= exynos_pinctrl_suspend,
+-		.resume		= exynos_pinctrl_resume,
++		.suspend	= gs101_pinctrl_suspend,
++		.resume		= gs101_pinctrl_resume,
+ 	},
+ };
+ 
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
+index ac6dc22b37c98e..7887fd41665111 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
+@@ -761,153 +761,187 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
+ 	return 0;
+ }
+ 
+-static void exynos_pinctrl_suspend_bank(
+-				struct samsung_pinctrl_drv_data *drvdata,
+-				struct samsung_pin_bank *bank)
++static void exynos_set_wakeup(struct samsung_pin_bank *bank)
+ {
+-	struct exynos_eint_gpio_save *save = bank->soc_priv;
+-	const void __iomem *regs = bank->eint_base;
++	struct exynos_irq_chip *irq_chip;
+ 
+-	if (clk_enable(bank->drvdata->pclk)) {
+-		dev_err(bank->gpio_chip.parent,
+-			"unable to enable clock for saving state\n");
+-		return;
++	if (bank->irq_chip) {
++		irq_chip = bank->irq_chip;
++		irq_chip->set_eint_wakeup_mask(bank->drvdata, irq_chip);
+ 	}
+-
+-	save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+-						+ bank->eint_offset);
+-	save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+-						+ 2 * bank->eint_offset);
+-	save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+-						+ 2 * bank->eint_offset + 4);
+-	save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+-						+ bank->eint_offset);
+-
+-	clk_disable(bank->drvdata->pclk);
+-
+-	pr_debug("%s: save     con %#010x\n", bank->name, save->eint_con);
+-	pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
+-	pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
+-	pr_debug("%s: save    mask %#010x\n", bank->name, save->eint_mask);
+ }
+ 
+-static void exynosauto_pinctrl_suspend_bank(struct samsung_pinctrl_drv_data *drvdata,
+-					    struct samsung_pin_bank *bank)
++void exynos_pinctrl_suspend(struct samsung_pin_bank *bank)
+ {
+ 	struct exynos_eint_gpio_save *save = bank->soc_priv;
+ 	const void __iomem *regs = bank->eint_base;
+ 
+-	if (clk_enable(bank->drvdata->pclk)) {
+-		dev_err(bank->gpio_chip.parent,
+-			"unable to enable clock for saving state\n");
+-		return;
++	if (bank->eint_type == EINT_TYPE_GPIO) {
++		save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
++				       + bank->eint_offset);
++		save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
++					   + 2 * bank->eint_offset);
++		save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
++					   + 2 * bank->eint_offset + 4);
++		save->eint_mask = readl(regs + bank->irq_chip->eint_mask
++					+ bank->eint_offset);
++
++		pr_debug("%s: save     con %#010x\n",
++			 bank->name, save->eint_con);
++		pr_debug("%s: save fltcon0 %#010x\n",
++			 bank->name, save->eint_fltcon0);
++		pr_debug("%s: save fltcon1 %#010x\n",
++			 bank->name, save->eint_fltcon1);
++		pr_debug("%s: save    mask %#010x\n",
++			 bank->name, save->eint_mask);
++	} else if (bank->eint_type == EINT_TYPE_WKUP) {
++		exynos_set_wakeup(bank);
+ 	}
+-
+-	save->eint_con = readl(regs + bank->pctl_offset + bank->eint_con_offset);
+-	save->eint_mask = readl(regs + bank->pctl_offset + bank->eint_mask_offset);
+-
+-	clk_disable(bank->drvdata->pclk);
+-
+-	pr_debug("%s: save     con %#010x\n", bank->name, save->eint_con);
+-	pr_debug("%s: save    mask %#010x\n", bank->name, save->eint_mask);
+ }
+ 
+-void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
++void gs101_pinctrl_suspend(struct samsung_pin_bank *bank)
+ {
+-	struct samsung_pin_bank *bank = drvdata->pin_banks;
+-	struct exynos_irq_chip *irq_chip = NULL;
+-	int i;
++	struct exynos_eint_gpio_save *save = bank->soc_priv;
++	const void __iomem *regs = bank->eint_base;
+ 
+-	for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
+-		if (bank->eint_type == EINT_TYPE_GPIO) {
+-			if (bank->eint_con_offset)
+-				exynosauto_pinctrl_suspend_bank(drvdata, bank);
+-			else
+-				exynos_pinctrl_suspend_bank(drvdata, bank);
+-		}
+-		else if (bank->eint_type == EINT_TYPE_WKUP) {
+-			if (!irq_chip) {
+-				irq_chip = bank->irq_chip;
+-				irq_chip->set_eint_wakeup_mask(drvdata,
+-							       irq_chip);
+-			}
+-		}
++	if (bank->eint_type == EINT_TYPE_GPIO) {
++		save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
++				       + bank->eint_offset);
++
++		save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
++					   + bank->eint_fltcon_offset);
++
++		/* fltcon1 register only exists for pins 4-7 */
++		if (bank->nr_pins > 4)
++			save->eint_fltcon1 = readl(regs +
++						EXYNOS_GPIO_EFLTCON_OFFSET
++						+ bank->eint_fltcon_offset + 4);
++
++		save->eint_mask = readl(regs + bank->irq_chip->eint_mask
++					+ bank->eint_offset);
++
++		pr_debug("%s: save     con %#010x\n",
++			 bank->name, save->eint_con);
++		pr_debug("%s: save fltcon0 %#010x\n",
++			 bank->name, save->eint_fltcon0);
++		if (bank->nr_pins > 4)
++			pr_debug("%s: save fltcon1 %#010x\n",
++				 bank->name, save->eint_fltcon1);
++		pr_debug("%s: save    mask %#010x\n",
++			 bank->name, save->eint_mask);
++	} else if (bank->eint_type == EINT_TYPE_WKUP) {
++		exynos_set_wakeup(bank);
+ 	}
+ }
+ 
+-static void exynos_pinctrl_resume_bank(
+-				struct samsung_pinctrl_drv_data *drvdata,
+-				struct samsung_pin_bank *bank)
++void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank)
+ {
+ 	struct exynos_eint_gpio_save *save = bank->soc_priv;
+-	void __iomem *regs = bank->eint_base;
++	const void __iomem *regs = bank->eint_base;
+ 
+-	if (clk_enable(bank->drvdata->pclk)) {
+-		dev_err(bank->gpio_chip.parent,
+-			"unable to enable clock for restoring state\n");
+-		return;
++	if (bank->eint_type == EINT_TYPE_GPIO) {
++		save->eint_con = readl(regs + bank->pctl_offset +
++				       bank->eint_con_offset);
++		save->eint_mask = readl(regs + bank->pctl_offset +
++					bank->eint_mask_offset);
++		pr_debug("%s: save     con %#010x\n",
++			 bank->name, save->eint_con);
++		pr_debug("%s: save    mask %#010x\n",
++			 bank->name, save->eint_mask);
++	} else if (bank->eint_type == EINT_TYPE_WKUP) {
++		exynos_set_wakeup(bank);
+ 	}
++}
+ 
+-	pr_debug("%s:     con %#010x => %#010x\n", bank->name,
+-			readl(regs + EXYNOS_GPIO_ECON_OFFSET
+-			+ bank->eint_offset), save->eint_con);
+-	pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
+-			readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+-			+ 2 * bank->eint_offset), save->eint_fltcon0);
+-	pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
+-			readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+-			+ 2 * bank->eint_offset + 4), save->eint_fltcon1);
+-	pr_debug("%s:    mask %#010x => %#010x\n", bank->name,
+-			readl(regs + bank->irq_chip->eint_mask
+-			+ bank->eint_offset), save->eint_mask);
+-
+-	writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+-						+ bank->eint_offset);
+-	writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+-						+ 2 * bank->eint_offset);
+-	writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+-						+ 2 * bank->eint_offset + 4);
+-	writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+-						+ bank->eint_offset);
++void gs101_pinctrl_resume(struct samsung_pin_bank *bank)
++{
++	struct exynos_eint_gpio_save *save = bank->soc_priv;
+ 
+-	clk_disable(bank->drvdata->pclk);
++	void __iomem *regs = bank->eint_base;
++	void __iomem *eint_fltcfg0 = regs + EXYNOS_GPIO_EFLTCON_OFFSET
++		     + bank->eint_fltcon_offset;
++
++	if (bank->eint_type == EINT_TYPE_GPIO) {
++		pr_debug("%s:     con %#010x => %#010x\n", bank->name,
++			 readl(regs + EXYNOS_GPIO_ECON_OFFSET
++			       + bank->eint_offset), save->eint_con);
++
++		pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
++			 readl(eint_fltcfg0), save->eint_fltcon0);
++
++		/* fltcon1 register only exists for pins 4-7 */
++		if (bank->nr_pins > 4)
++			pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
++				 readl(eint_fltcfg0 + 4), save->eint_fltcon1);
++
++		pr_debug("%s:    mask %#010x => %#010x\n", bank->name,
++			 readl(regs + bank->irq_chip->eint_mask
++			       + bank->eint_offset), save->eint_mask);
++
++		writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
++		       + bank->eint_offset);
++		writel(save->eint_fltcon0, eint_fltcfg0);
++
++		if (bank->nr_pins > 4)
++			writel(save->eint_fltcon1, eint_fltcfg0 + 4);
++		writel(save->eint_mask, regs + bank->irq_chip->eint_mask
++		       + bank->eint_offset);
++	}
+ }
+ 
+-static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvdata,
+-					   struct samsung_pin_bank *bank)
++void exynos_pinctrl_resume(struct samsung_pin_bank *bank)
+ {
+ 	struct exynos_eint_gpio_save *save = bank->soc_priv;
+ 	void __iomem *regs = bank->eint_base;
+ 
+-	if (clk_enable(bank->drvdata->pclk)) {
+-		dev_err(bank->gpio_chip.parent,
+-			"unable to enable clock for restoring state\n");
+-		return;
++	if (bank->eint_type == EINT_TYPE_GPIO) {
++		pr_debug("%s:     con %#010x => %#010x\n", bank->name,
++			 readl(regs + EXYNOS_GPIO_ECON_OFFSET
++			       + bank->eint_offset), save->eint_con);
++		pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
++			 readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
++			       + 2 * bank->eint_offset), save->eint_fltcon0);
++		pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
++			 readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
++			       + 2 * bank->eint_offset + 4),
++			 save->eint_fltcon1);
++		pr_debug("%s:    mask %#010x => %#010x\n", bank->name,
++			 readl(regs + bank->irq_chip->eint_mask
++			       + bank->eint_offset), save->eint_mask);
++
++		writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
++		       + bank->eint_offset);
++		writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
++		       + 2 * bank->eint_offset);
++		writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
++		       + 2 * bank->eint_offset + 4);
++		writel(save->eint_mask, regs + bank->irq_chip->eint_mask
++		       + bank->eint_offset);
+ 	}
+-
+-	pr_debug("%s:     con %#010x => %#010x\n", bank->name,
+-		 readl(regs + bank->pctl_offset + bank->eint_con_offset), save->eint_con);
+-	pr_debug("%s:    mask %#010x => %#010x\n", bank->name,
+-		 readl(regs + bank->pctl_offset + bank->eint_mask_offset), save->eint_mask);
+-
+-	writel(save->eint_con, regs + bank->pctl_offset + bank->eint_con_offset);
+-	writel(save->eint_mask, regs + bank->pctl_offset + bank->eint_mask_offset);
+-
+-	clk_disable(bank->drvdata->pclk);
+ }
+ 
+-void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
++void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank)
+ {
+-	struct samsung_pin_bank *bank = drvdata->pin_banks;
+-	int i;
++	struct exynos_eint_gpio_save *save = bank->soc_priv;
++	void __iomem *regs = bank->eint_base;
+ 
+-	for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
+-		if (bank->eint_type == EINT_TYPE_GPIO) {
+-			if (bank->eint_con_offset)
+-				exynosauto_pinctrl_resume_bank(drvdata, bank);
+-			else
+-				exynos_pinctrl_resume_bank(drvdata, bank);
+-		}
++	if (bank->eint_type == EINT_TYPE_GPIO) {
++		/* exynosautov920 has eint_con_offset for all but one bank */
++		if (!bank->eint_con_offset)
++			exynos_pinctrl_resume(bank);
++
++		pr_debug("%s:     con %#010x => %#010x\n", bank->name,
++			 readl(regs + bank->pctl_offset + bank->eint_con_offset),
++			 save->eint_con);
++		pr_debug("%s:    mask %#010x => %#010x\n", bank->name,
++			 readl(regs + bank->pctl_offset +
++			       bank->eint_mask_offset), save->eint_mask);
++
++		writel(save->eint_con,
++		       regs + bank->pctl_offset + bank->eint_con_offset);
++		writel(save->eint_mask,
++		       regs + bank->pctl_offset + bank->eint_mask_offset);
++	}
+ }
+ 
+ static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata)
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
+index 97a43fa4dfc567..c70b8ead56b4bc 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
+@@ -211,8 +211,12 @@ struct exynos_muxed_weint_data {
+ 
+ int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d);
+ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d);
+-void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata);
+-void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata);
++void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank);
++void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank);
++void exynos_pinctrl_suspend(struct samsung_pin_bank *bank);
++void exynos_pinctrl_resume(struct samsung_pin_bank *bank);
++void gs101_pinctrl_suspend(struct samsung_pin_bank *bank);
++void gs101_pinctrl_resume(struct samsung_pin_bank *bank);
+ struct samsung_retention_ctrl *
+ exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata,
+ 		      const struct samsung_retention_data *data);
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
+index 63ac89a802d301..210534586c0c0b 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
+@@ -1333,6 +1333,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
+ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
+ {
+ 	struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
++	struct samsung_pin_bank *bank;
+ 	int i;
+ 
+ 	i = clk_enable(drvdata->pclk);
+@@ -1343,7 +1344,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
+ 	}
+ 
+ 	for (i = 0; i < drvdata->nr_banks; i++) {
+-		struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
++		bank = &drvdata->pin_banks[i];
+ 		const void __iomem *reg = bank->pctl_base + bank->pctl_offset;
+ 		const u8 *offs = bank->type->reg_offset;
+ 		const u8 *widths = bank->type->fld_width;
+@@ -1371,10 +1372,14 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
+ 		}
+ 	}
+ 
++	for (i = 0; i < drvdata->nr_banks; i++) {
++		bank = &drvdata->pin_banks[i];
++		if (drvdata->suspend)
++			drvdata->suspend(bank);
++	}
++
+ 	clk_disable(drvdata->pclk);
+ 
+-	if (drvdata->suspend)
+-		drvdata->suspend(drvdata);
+ 	if (drvdata->retention_ctrl && drvdata->retention_ctrl->enable)
+ 		drvdata->retention_ctrl->enable(drvdata);
+ 
+@@ -1392,6 +1397,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
+ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
+ {
+ 	struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
++	struct samsung_pin_bank *bank;
+ 	int ret;
+ 	int i;
+ 
+@@ -1406,11 +1412,14 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
+ 		return ret;
+ 	}
+ 
+-	if (drvdata->resume)
+-		drvdata->resume(drvdata);
++	for (i = 0; i < drvdata->nr_banks; i++) {
++		bank = &drvdata->pin_banks[i];
++		if (drvdata->resume)
++			drvdata->resume(bank);
++	}
+ 
+ 	for (i = 0; i < drvdata->nr_banks; i++) {
+-		struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
++		bank = &drvdata->pin_banks[i];
+ 		void __iomem *reg = bank->pctl_base + bank->pctl_offset;
+ 		const u8 *offs = bank->type->reg_offset;
+ 		const u8 *widths = bank->type->fld_width;
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
+index 14c3b6b965851e..7ffd2e193e4256 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
+@@ -285,8 +285,8 @@ struct samsung_pin_ctrl {
+ 	int		(*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
+ 	int		(*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
+ 	void		(*pud_value_init)(struct samsung_pinctrl_drv_data *drvdata);
+-	void		(*suspend)(struct samsung_pinctrl_drv_data *);
+-	void		(*resume)(struct samsung_pinctrl_drv_data *);
++	void		(*suspend)(struct samsung_pin_bank *bank);
++	void		(*resume)(struct samsung_pin_bank *bank);
+ };
+ 
+ /**
+@@ -335,8 +335,8 @@ struct samsung_pinctrl_drv_data {
+ 
+ 	struct samsung_retention_ctrl	*retention_ctrl;
+ 
+-	void (*suspend)(struct samsung_pinctrl_drv_data *);
+-	void (*resume)(struct samsung_pinctrl_drv_data *);
++	void (*suspend)(struct samsung_pin_bank *bank);
++	void (*resume)(struct samsung_pin_bank *bank);
+ };
+ 
+ /**
+diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
+index 05913e9fe08211..8b1f894f5e790c 100644
+--- a/drivers/pmdomain/core.c
++++ b/drivers/pmdomain/core.c
+@@ -697,6 +697,37 @@ bool dev_pm_genpd_get_hwmode(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
+ 
++/**
++ * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
++ *
++ * @dev: Device for which the PM domain may need to stay on for.
++ * @on: Value to set or unset for the condition.
++ *
++ * For some usecases a consumer driver requires its device to remain power-on
++ * from the PM domain perspective during runtime. This function allows the
++ * behaviour to be dynamically controlled for a device attached to a genpd.
++ *
++ * It is assumed that the users guarantee that the genpd wouldn't be detached
++ * while this routine is getting called.
++ *
++ * Return: Returns 0 on success and negative error values on failures.
++ */
++int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
++{
++	struct generic_pm_domain *genpd;
++
++	genpd = dev_to_genpd_safe(dev);
++	if (!genpd)
++		return -ENODEV;
++
++	genpd_lock(genpd);
++	dev_gpd_data(dev)->rpm_always_on = on;
++	genpd_unlock(genpd);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
++
+ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
+ {
+ 	unsigned int state_idx = genpd->state_idx;
+@@ -868,6 +899,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
+ 		if (!pm_runtime_suspended(pdd->dev) ||
+ 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
+ 			not_suspended++;
++
++		/* The device may need its PM domain to stay powered on. */
++		if (to_gpd_data(pdd)->rpm_always_on)
++			return -EBUSY;
+ 	}
+ 
+ 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
+diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
+index 16512654295f5c..f1e0a0857a90c7 100644
+--- a/drivers/power/reset/at91-reset.c
++++ b/drivers/power/reset/at91-reset.c
+@@ -129,12 +129,11 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
+ 		"	str	%4, [%0, %6]\n\t"
+ 		/* Disable SDRAM1 accesses */
+ 		"1:	tst	%1, #0\n\t"
+-		"	beq	2f\n\t"
+ 		"	strne	%3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
+ 		/* Power down SDRAM1 */
+ 		"	strne	%4, [%1, %6]\n\t"
+ 		/* Reset CPU */
+-		"2:	str	%5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
++		"	str	%5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
+ 
+ 		"	b	.\n\t"
+ 		:
+@@ -145,7 +144,7 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
+ 		  "r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN),
+ 		  "r" (reset->data->reset_args),
+ 		  "r" (reset->ramc_lpr)
+-		: "r4");
++	);
+ 
+ 	return NOTIFY_DONE;
+ }
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 18934e28469ee6..528d86a33f37de 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -98,17 +98,7 @@ static inline int queue_cnt(const struct timestamp_event_queue *q)
+ /* Check if ptp virtual clock is in use */
+ static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
+ {
+-	bool in_use = false;
+-
+-	if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+-		return true;
+-
+-	if (!ptp->is_virtual_clock && ptp->n_vclocks)
+-		in_use = true;
+-
+-	mutex_unlock(&ptp->n_vclocks_mux);
+-
+-	return in_use;
++	return !ptp->is_virtual_clock;
+ }
+ 
+ /* Check if ptp clock shall be free running */
+diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
+index 198d45f8e88493..3d333b61fb18c8 100644
+--- a/drivers/regulator/max20086-regulator.c
++++ b/drivers/regulator/max20086-regulator.c
+@@ -5,6 +5,7 @@
+ // Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@idesonboard.com>
+ // Copyright (C) 2018 Avnet, Inc.
+ 
++#include <linux/cleanup.h>
+ #include <linux/err.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+@@ -133,11 +134,11 @@ static int max20086_regulators_register(struct max20086 *chip)
+ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
+ {
+ 	struct of_regulator_match *matches;
+-	struct device_node *node;
+ 	unsigned int i;
+ 	int ret;
+ 
+-	node = of_get_child_by_name(chip->dev->of_node, "regulators");
++	struct device_node *node __free(device_node) =
++		of_get_child_by_name(chip->dev->of_node, "regulators");
+ 	if (!node) {
+ 		dev_err(chip->dev, "regulators node not found\n");
+ 		return -ENODEV;
+@@ -153,7 +154,6 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
+ 
+ 	ret = of_regulator_match(chip->dev, node, matches,
+ 				 chip->info->num_outputs);
+-	of_node_put(node);
+ 	if (ret < 0) {
+ 		dev_err(chip->dev, "Failed to match regulators\n");
+ 		return -EINVAL;
+diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
+index dd36fd077911af..1e197f7734742b 100644
+--- a/drivers/remoteproc/qcom_wcnss_iris.c
++++ b/drivers/remoteproc/qcom_wcnss_iris.c
+@@ -197,6 +197,7 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
+ 
+ err_device_del:
+ 	device_del(&iris->dev);
++	put_device(&iris->dev);
+ 
+ 	return ERR_PTR(ret);
+ }
+@@ -204,4 +205,5 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
+ void qcom_iris_remove(struct qcom_iris *iris)
+ {
+ 	device_del(&iris->dev);
++	put_device(&iris->dev);
+ }
+diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+index 8be3f631c19206..2ae0655ddf1d22 100644
+--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+@@ -115,10 +115,6 @@ static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
+ 	const char *name = kproc->rproc->name;
+ 	u32 msg = omap_mbox_message(data);
+ 
+-	/* Do not forward messages from a detached core */
+-	if (kproc->rproc->state == RPROC_DETACHED)
+-		return;
+-
+ 	dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+ 
+ 	switch (msg) {
+@@ -159,10 +155,6 @@ static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
+ 	mbox_msg_t msg = (mbox_msg_t)vqid;
+ 	int ret;
+ 
+-	/* Do not forward messages to a detached core */
+-	if (kproc->rproc->state == RPROC_DETACHED)
+-		return;
+-
+ 	/* send the index of the triggered virtqueue in the mailbox payload */
+ 	ret = mbox_send_message(kproc->mbox, (void *)msg);
+ 	if (ret < 0)
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index 747ee467da88c9..4894461aa65f3b 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -194,10 +194,6 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
+ 	const char *name = kproc->rproc->name;
+ 	u32 msg = omap_mbox_message(data);
+ 
+-	/* Do not forward message from a detached core */
+-	if (kproc->rproc->state == RPROC_DETACHED)
+-		return;
+-
+ 	dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+ 
+ 	switch (msg) {
+@@ -233,10 +229,6 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
+ 	mbox_msg_t msg = (mbox_msg_t)vqid;
+ 	int ret;
+ 
+-	/* Do not forward message to a detached core */
+-	if (kproc->rproc->state == RPROC_DETACHED)
+-		return;
+-
+ 	/* send the index of the triggered virtqueue in the mailbox payload */
+ 	ret = mbox_send_message(kproc->mbox, (void *)msg);
+ 	if (ret < 0)
+diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
+index 43f601c84b4fcb..79d35ab43729ea 100644
+--- a/drivers/rpmsg/qcom_smd.c
++++ b/drivers/rpmsg/qcom_smd.c
+@@ -746,7 +746,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
+ 	__le32 hdr[5] = { cpu_to_le32(len), };
+ 	int tlen = sizeof(hdr) + len;
+ 	unsigned long flags;
+-	int ret;
++	int ret = 0;
+ 
+ 	/* Word aligned channels only accept word size aligned data */
+ 	if (channel->info_word && len % 4)
+diff --git a/drivers/rtc/rtc-loongson.c b/drivers/rtc/rtc-loongson.c
+index 90e9d97a86b487..c9d5b91a6544d1 100644
+--- a/drivers/rtc/rtc-loongson.c
++++ b/drivers/rtc/rtc-loongson.c
+@@ -129,6 +129,14 @@ static u32 loongson_rtc_handler(void *id)
+ {
+ 	struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
+ 
++	rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF);
++
++	/*
++	 * The TOY_MATCH0_REG should be cleared 0 here,
++	 * otherwise the interrupt cannot be cleared.
++	 */
++	regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
++
+ 	spin_lock(&priv->lock);
+ 	/* Disable RTC alarm wakeup and interrupt */
+ 	writel(readl(priv->pm_base + PM1_EN_REG) & ~RTC_EN,
+diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
+index 27a191fa3704c6..e66c9c6fd37246 100644
+--- a/drivers/rtc/rtc-sh.c
++++ b/drivers/rtc/rtc-sh.c
+@@ -485,9 +485,15 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 
+-	rtc->periodic_irq = ret;
+-	rtc->carry_irq = platform_get_irq(pdev, 1);
+-	rtc->alarm_irq = platform_get_irq(pdev, 2);
++	if (!pdev->dev.of_node) {
++		rtc->periodic_irq = ret;
++		rtc->carry_irq = platform_get_irq(pdev, 1);
++		rtc->alarm_irq = platform_get_irq(pdev, 2);
++	} else {
++		rtc->alarm_irq = ret;
++		rtc->periodic_irq = platform_get_irq(pdev, 1);
++		rtc->carry_irq = platform_get_irq(pdev, 2);
++	}
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ 	if (!res)
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index e98e6b2b9f5700..d9500b7306905f 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1850,33 +1850,14 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
+ 	}
+ 	hisi_sas_dereg_device(hisi_hba, device);
+ 
+-	rc = hisi_sas_debug_I_T_nexus_reset(device);
+-	if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
+-		struct sas_phy *local_phy;
+-
++	if (dev_is_sata(device)) {
+ 		rc = hisi_sas_softreset_ata_disk(device);
+-		switch (rc) {
+-		case -ECOMM:
+-			rc = -ENODEV;
+-			break;
+-		case TMF_RESP_FUNC_FAILED:
+-		case -EMSGSIZE:
+-		case -EIO:
+-			local_phy = sas_get_local_phy(device);
+-			rc = sas_phy_enable(local_phy, 0);
+-			if (!rc) {
+-				local_phy->enabled = 0;
+-				dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
+-					SAS_ADDR(device->sas_addr), rc);
+-				rc = -ENODEV;
+-			}
+-			sas_put_local_phy(local_phy);
+-			break;
+-		default:
+-			break;
+-		}
++		if (rc == TMF_RESP_FUNC_FAILED)
++			dev_err(dev, "ata disk %016llx reset (%d)\n",
++				SAS_ADDR(device->sas_addr), rc);
+ 	}
+ 
++	rc = hisi_sas_debug_I_T_nexus_reset(device);
+ 	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
+ 		hisi_sas_release_task(hisi_hba, device);
+ 
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index e979ec1478c184..e895bd25098fd6 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -699,7 +699,7 @@ static u32 qedf_get_login_failures(void *cookie)
+ }
+ 
+ static struct qed_fcoe_cb_ops qedf_cb_ops = {
+-	{
++	.common = {
+ 		.link_update = qedf_link_update,
+ 		.bw_update = qedf_bw_update,
+ 		.schedule_recovery_handler = qedf_schedule_recovery_handler,
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 8274fe0ec7146f..7a5bebf5b096cd 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3526,7 +3526,7 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
+ 		pr_err("%s could not find host no %u\n",
+ 		       __func__, ev->u.new_flashnode.host_no);
+ 		err = -ENODEV;
+-		goto put_host;
++		goto exit_new_fnode;
+ 	}
+ 
+ 	index = transport->new_flashnode(shost, data, len);
+@@ -3536,7 +3536,6 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
+ 	else
+ 		err = -EIO;
+ 
+-put_host:
+ 	scsi_host_put(shost);
+ 
+ exit_new_fnode:
+@@ -3561,7 +3560,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
+ 		pr_err("%s could not find host no %u\n",
+ 		       __func__, ev->u.del_flashnode.host_no);
+ 		err = -ENODEV;
+-		goto put_host;
++		goto exit_del_fnode;
+ 	}
+ 
+ 	idx = ev->u.del_flashnode.flashnode_idx;
+@@ -3603,7 +3602,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
+ 		pr_err("%s could not find host no %u\n",
+ 		       __func__, ev->u.login_flashnode.host_no);
+ 		err = -ENODEV;
+-		goto put_host;
++		goto exit_login_fnode;
+ 	}
+ 
+ 	idx = ev->u.login_flashnode.flashnode_idx;
+@@ -3655,7 +3654,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
+ 		pr_err("%s could not find host no %u\n",
+ 		       __func__, ev->u.logout_flashnode.host_no);
+ 		err = -ENODEV;
+-		goto put_host;
++		goto exit_logout_fnode;
+ 	}
+ 
+ 	idx = ev->u.logout_flashnode.flashnode_idx;
+@@ -3705,7 +3704,7 @@ static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
+ 		pr_err("%s could not find host no %u\n",
+ 		       __func__, ev->u.logout_flashnode.host_no);
+ 		err = -ENODEV;
+-		goto put_host;
++		goto exit_logout_sid;
+ 	}
+ 
+ 	session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index d919a74746a056..8cc9f924a8ae60 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -5990,7 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
+ 			pqi_stream_data->next_lba = rmd.first_block +
+ 				rmd.block_cnt;
+ 			pqi_stream_data->last_accessed = jiffies;
+-			per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
++				per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++;
+ 			return true;
+ 		}
+ 
+@@ -6069,7 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
+ 			rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
+ 			if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
+ 				raid_bypassed = true;
+-				per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
++				per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++;
+ 			}
+ 		}
+ 		if (!raid_bypassed)
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index 888b5840c01500..d2e63277f0aa9a 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -166,7 +166,7 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
+ 	int rc;
+ 
+ 	lpc_snoop->irq = platform_get_irq(pdev, 0);
+-	if (!lpc_snoop->irq)
++	if (lpc_snoop->irq < 0)
+ 		return -ENODEV;
+ 
+ 	rc = devm_request_irq(dev, lpc_snoop->irq,
+@@ -200,11 +200,15 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ 	lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
+ 	lpc_snoop->chan[channel].miscdev.name =
+ 		devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
++	if (!lpc_snoop->chan[channel].miscdev.name) {
++		rc = -ENOMEM;
++		goto err_free_fifo;
++	}
+ 	lpc_snoop->chan[channel].miscdev.fops = &snoop_fops;
+ 	lpc_snoop->chan[channel].miscdev.parent = dev;
+ 	rc = misc_register(&lpc_snoop->chan[channel].miscdev);
+ 	if (rc)
+-		return rc;
++		goto err_free_fifo;
+ 
+ 	/* Enable LPC snoop channel at requested port */
+ 	switch (channel) {
+@@ -221,7 +225,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ 		hicrb_en = HICRB_ENSNP1D;
+ 		break;
+ 	default:
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto err_misc_deregister;
+ 	}
+ 
+ 	regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
+@@ -231,6 +236,12 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ 		regmap_update_bits(lpc_snoop->regmap, HICRB,
+ 				hicrb_en, hicrb_en);
+ 
++	return 0;
++
++err_misc_deregister:
++	misc_deregister(&lpc_snoop->chan[channel].miscdev);
++err_free_fifo:
++	kfifo_free(&lpc_snoop->chan[channel].fifo);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
+index cefcbd61c62815..95d8a8f728db54 100644
+--- a/drivers/soc/qcom/smp2p.c
++++ b/drivers/soc/qcom/smp2p.c
+@@ -578,7 +578,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
+ 	smp2p->mbox_client.knows_txdone = true;
+ 	smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
+ 	if (IS_ERR(smp2p->mbox_chan)) {
+-		if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
++		if (PTR_ERR(smp2p->mbox_chan) != -ENOENT)
+ 			return PTR_ERR(smp2p->mbox_chan);
+ 
+ 		smp2p->mbox_chan = NULL;
+diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
+index 1ca857c2a4aa31..8df12efeea21c7 100644
+--- a/drivers/spi/spi-bcm63xx-hsspi.c
++++ b/drivers/spi/spi-bcm63xx-hsspi.c
+@@ -745,7 +745,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+ 	if (IS_ERR(clk))
+ 		return PTR_ERR(clk);
+ 
+-	reset = devm_reset_control_get_optional_exclusive(dev, NULL);
++	reset = devm_reset_control_get_optional_shared(dev, NULL);
+ 	if (IS_ERR(reset))
+ 		return PTR_ERR(reset);
+ 
+diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
+index ef3a7226db125c..a95badb7b71146 100644
+--- a/drivers/spi/spi-bcm63xx.c
++++ b/drivers/spi/spi-bcm63xx.c
+@@ -523,7 +523,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
+ 		return PTR_ERR(clk);
+ 	}
+ 
+-	reset = devm_reset_control_get_optional_exclusive(dev, NULL);
++	reset = devm_reset_control_get_optional_shared(dev, NULL);
+ 	if (IS_ERR(reset))
+ 		return PTR_ERR(reset);
+ 
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 532b2e9c31d0d3..4c5f12b76de6a5 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -134,6 +134,7 @@ struct omap2_mcspi {
+ 	size_t			max_xfer_len;
+ 	u32			ref_clk_hz;
+ 	bool			use_multi_mode;
++	bool			last_msg_kept_cs;
+ };
+ 
+ struct omap2_mcspi_cs {
+@@ -1269,6 +1270,10 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
+ 	 * multi-mode is applicable.
+ 	 */
+ 	mcspi->use_multi_mode = true;
++
++	if (mcspi->last_msg_kept_cs)
++		mcspi->use_multi_mode = false;
++
+ 	list_for_each_entry(tr, &msg->transfers, transfer_list) {
+ 		if (!tr->bits_per_word)
+ 			bits_per_word = msg->spi->bits_per_word;
+@@ -1287,18 +1292,19 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
+ 			mcspi->use_multi_mode = false;
+ 		}
+ 
+-		/* Check if transfer asks to change the CS status after the transfer */
+-		if (!tr->cs_change)
+-			mcspi->use_multi_mode = false;
+-
+-		/*
+-		 * If at least one message is not compatible, switch back to single mode
+-		 *
+-		 * The bits_per_word of certain transfer can be different, but it will have no
+-		 * impact on the signal itself.
+-		 */
+-		if (!mcspi->use_multi_mode)
+-			break;
++		if (list_is_last(&tr->transfer_list, &msg->transfers)) {
++			/* Check if transfer asks to keep the CS status after the whole message */
++			if (tr->cs_change) {
++				mcspi->use_multi_mode = false;
++				mcspi->last_msg_kept_cs = true;
++			} else {
++				mcspi->last_msg_kept_cs = false;
++			}
++		} else {
++			/* Check if transfer asks to change the CS status after the transfer */
++			if (!tr->cs_change)
++				mcspi->use_multi_mode = false;
++		}
+ 	}
+ 
+ 	omap2_mcspi_set_mode(ctlr);
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index 3519656515ea12..1870f8c8521315 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -918,6 +918,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
+ 	void *rx_buf = t->rx_buf;
+ 	unsigned int len = t->len;
+ 	unsigned int bits = t->bits_per_word;
++	unsigned int max_wdlen = 256;
+ 	unsigned int bytes_per_word;
+ 	unsigned int words;
+ 	int n;
+@@ -931,17 +932,17 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
+ 	if (!spi_controller_is_target(p->ctlr))
+ 		sh_msiof_spi_set_clk_regs(p, t);
+ 
++	if (tx_buf)
++		max_wdlen = min(max_wdlen, p->tx_fifo_size);
++	if (rx_buf)
++		max_wdlen = min(max_wdlen, p->rx_fifo_size);
++
+ 	while (ctlr->dma_tx && len > 15) {
+ 		/*
+ 		 *  DMA supports 32-bit words only, hence pack 8-bit and 16-bit
+ 		 *  words, with byte resp. word swapping.
+ 		 */
+-		unsigned int l = 0;
+-
+-		if (tx_buf)
+-			l = min(round_down(len, 4), p->tx_fifo_size * 4);
+-		if (rx_buf)
+-			l = min(round_down(len, 4), p->rx_fifo_size * 4);
++		unsigned int l = min(round_down(len, 4), max_wdlen * 4);
+ 
+ 		if (bits <= 8) {
+ 			copy32 = copy_bswap32;
+diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
+index 2d48ad844fb80b..92348ebc60c786 100644
+--- a/drivers/spi/spi-tegra210-quad.c
++++ b/drivers/spi/spi-tegra210-quad.c
+@@ -134,7 +134,7 @@
+ #define QSPI_COMMAND_VALUE_SET(X)		(((x) & 0xFF) << 0)
+ 
+ #define QSPI_CMB_SEQ_CMD_CFG			0x1a0
+-#define QSPI_COMMAND_X1_X2_X4(x)		(((x) & 0x3) << 13)
++#define QSPI_COMMAND_X1_X2_X4(x)		((((x) >> 1) & 0x3) << 13)
+ #define QSPI_COMMAND_X1_X2_X4_MASK		(0x03 << 13)
+ #define QSPI_COMMAND_SDR_DDR			BIT(12)
+ #define QSPI_COMMAND_SIZE_SET(x)		(((x) & 0xFF) << 0)
+@@ -147,7 +147,7 @@
+ #define QSPI_ADDRESS_VALUE_SET(X)		(((x) & 0xFFFF) << 0)
+ 
+ #define QSPI_CMB_SEQ_ADDR_CFG			0x1ac
+-#define QSPI_ADDRESS_X1_X2_X4(x)		(((x) & 0x3) << 13)
++#define QSPI_ADDRESS_X1_X2_X4(x)		((((x) >> 1) & 0x3) << 13)
+ #define QSPI_ADDRESS_X1_X2_X4_MASK		(0x03 << 13)
+ #define QSPI_ADDRESS_SDR_DDR			BIT(12)
+ #define QSPI_ADDRESS_SIZE_SET(x)		(((x) & 0xFF) << 0)
+@@ -1036,10 +1036,6 @@ static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
+ {
+ 	u32 addr_config = 0;
+ 
+-	/* Extract Address configuration and value */
+-	is_ddr = 0; //Only SDR mode supported
+-	bus_width = 0; //X1 mode
+-
+ 	if (is_ddr)
+ 		addr_config |= QSPI_ADDRESS_SDR_DDR;
+ 	else
+@@ -1079,13 +1075,13 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ 		switch (transfer_phase) {
+ 		case CMD_TRANSFER:
+ 			/* X1 SDR mode */
+-			cmd_config = tegra_qspi_cmd_config(false, 0,
++			cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits,
+ 							   xfer->len);
+ 			cmd_value = *((const u8 *)(xfer->tx_buf));
+ 			break;
+ 		case ADDR_TRANSFER:
+ 			/* X1 SDR mode */
+-			addr_config = tegra_qspi_addr_config(false, 0,
++			addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits,
+ 							     xfer->len);
+ 			address_value = *((const u32 *)(xfer->tx_buf));
+ 			break;
+@@ -1163,26 +1159,22 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ 				ret = -EIO;
+ 				goto exit;
+ 			}
+-			if (!xfer->cs_change) {
+-				tegra_qspi_transfer_end(spi);
+-				spi_transfer_delay_exec(xfer);
+-			}
+ 			break;
+ 		default:
+ 			ret = -EINVAL;
+ 			goto exit;
+ 		}
+ 		msg->actual_length += xfer->len;
++		if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) {
++			tegra_qspi_transfer_end(spi);
++			spi_transfer_delay_exec(xfer);
++		}
+ 		transfer_phase++;
+ 	}
+ 	ret = 0;
+ 
+ exit:
+ 	msg->status = ret;
+-	if (ret < 0) {
+-		tegra_qspi_transfer_end(spi);
+-		spi_transfer_delay_exec(xfer);
+-	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index ac398b5a973604..a1d941b0be00b7 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -213,8 +213,14 @@ static int rkvdec_enum_framesizes(struct file *file, void *priv,
+ 	if (!fmt)
+ 		return -EINVAL;
+ 
+-	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+-	fsize->stepwise = fmt->frmsize;
++	fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
++	fsize->stepwise.min_width = 1;
++	fsize->stepwise.max_width = fmt->frmsize.max_width;
++	fsize->stepwise.step_width = 1;
++	fsize->stepwise.min_height = 1;
++	fsize->stepwise.max_height = fmt->frmsize.max_height;
++	fsize->stepwise.step_height = 1;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index 3295b27ab70d2b..ae063d1bc95f86 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -209,6 +209,13 @@ static const struct debugfs_reg32 lvts_regs[] = {
+ 	LVTS_DEBUG_FS_REGS(LVTS_CLKEN),
+ };
+ 
++static void lvts_debugfs_exit(void *data)
++{
++	struct lvts_domain *lvts_td = data;
++
++	debugfs_remove_recursive(lvts_td->dom_dentry);
++}
++
+ static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td)
+ {
+ 	struct debugfs_regset32 *regset;
+@@ -241,12 +248,7 @@ static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td)
+ 		debugfs_create_regset32("registers", 0400, dentry, regset);
+ 	}
+ 
+-	return 0;
+-}
+-
+-static void lvts_debugfs_exit(struct lvts_domain *lvts_td)
+-{
+-	debugfs_remove_recursive(lvts_td->dom_dentry);
++	return devm_add_action_or_reset(dev, lvts_debugfs_exit, lvts_td);
+ }
+ 
+ #else
+@@ -257,8 +259,6 @@ static inline int lvts_debugfs_init(struct device *dev,
+ 	return 0;
+ }
+ 
+-static void lvts_debugfs_exit(struct lvts_domain *lvts_td) { }
+-
+ #endif
+ 
+ static int lvts_raw_to_temp(u32 raw_temp, int temp_factor)
+@@ -1352,8 +1352,6 @@ static void lvts_remove(struct platform_device *pdev)
+ 
+ 	for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
+ 		lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false);
+-
+-	lvts_debugfs_exit(lvts_td);
+ }
+ 
+ static const struct lvts_ctrl_data mt7988_lvts_ap_data_ctrl[] = {
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index 402fdf8b1cdeca..57821b6f4e4682 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -440,10 +440,10 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+ 			bool configured = val & PORT_CS_19_PC;
+ 			usb4 = port->usb4;
+ 
+-			if (((flags & TB_WAKE_ON_CONNECT) |
++			if (((flags & TB_WAKE_ON_CONNECT) &&
+ 			      device_may_wakeup(&usb4->dev)) && !configured)
+ 				val |= PORT_CS_19_WOC;
+-			if (((flags & TB_WAKE_ON_DISCONNECT) |
++			if (((flags & TB_WAKE_ON_DISCONNECT) &&
+ 			      device_may_wakeup(&usb4->dev)) && configured)
+ 				val |= PORT_CS_19_WOD;
+ 			if ((flags & TB_WAKE_ON_USB4) && configured)
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 0dd68bdbfbcf7c..4f57991944dc45 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1168,16 +1168,6 @@ static int omap_8250_tx_dma(struct uart_8250_port *p)
+ 		return 0;
+ 	}
+ 
+-	sg_init_table(&sg, 1);
+-	ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1,
+-					   UART_XMIT_SIZE, dma->tx_addr);
+-	if (ret != 1) {
+-		serial8250_clear_THRI(p);
+-		return 0;
+-	}
+-
+-	dma->tx_size = sg_dma_len(&sg);
+-
+ 	if (priv->habit & OMAP_DMA_TX_KICK) {
+ 		unsigned char c;
+ 		u8 tx_lvl;
+@@ -1202,18 +1192,22 @@ static int omap_8250_tx_dma(struct uart_8250_port *p)
+ 			ret = -EBUSY;
+ 			goto err;
+ 		}
+-		if (dma->tx_size < 4) {
++		if (kfifo_len(&tport->xmit_fifo) < 4) {
+ 			ret = -EINVAL;
+ 			goto err;
+ 		}
+-		if (!kfifo_get(&tport->xmit_fifo, &c)) {
++		if (!uart_fifo_out(&p->port, &c, 1)) {
+ 			ret = -EINVAL;
+ 			goto err;
+ 		}
+ 		skip_byte = c;
+-		/* now we need to recompute due to kfifo_get */
+-		kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1,
+-				UART_XMIT_SIZE, dma->tx_addr);
++	}
++
++	sg_init_table(&sg, 1);
++	ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, UART_XMIT_SIZE, dma->tx_addr);
++	if (ret != 1) {
++		ret = -EINVAL;
++		goto err;
+ 	}
+ 
+ 	desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1, DMA_MEM_TO_DEV,
+@@ -1223,6 +1217,7 @@ static int omap_8250_tx_dma(struct uart_8250_port *p)
+ 		goto err;
+ 	}
+ 
++	dma->tx_size = sg_dma_len(&sg);
+ 	dma->tx_running = 1;
+ 
+ 	desc->callback = omap_8250_dma_tx_complete;
+diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
+index fb082ee73d5b25..9b54f017f2e8aa 100644
+--- a/drivers/tty/serial/milbeaut_usio.c
++++ b/drivers/tty/serial/milbeaut_usio.c
+@@ -523,7 +523,10 @@ static int mlb_usio_probe(struct platform_device *pdev)
+ 	}
+ 	port->membase = devm_ioremap(&pdev->dev, res->start,
+ 				resource_size(res));
+-
++	if (!port->membase) {
++		ret = -ENOMEM;
++		goto failed;
++	}
+ 	ret = platform_get_irq_byname(pdev, "rx");
+ 	mlb_usio_irq[index][RX] = ret;
+ 
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 76cf177b040ebe..aacbed76c7c54b 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -3074,10 +3074,6 @@ static int sci_init_single(struct platform_device *dev,
+ 		ret = sci_init_clocks(sci_port, &dev->dev);
+ 		if (ret < 0)
+ 			return ret;
+-
+-		port->dev = &dev->dev;
+-
+-		pm_runtime_enable(&dev->dev);
+ 	}
+ 
+ 	port->type		= p->type;
+@@ -3104,11 +3100,6 @@ static int sci_init_single(struct platform_device *dev,
+ 	return 0;
+ }
+ 
+-static void sci_cleanup_single(struct sci_port *port)
+-{
+-	pm_runtime_disable(port->port.dev);
+-}
+-
+ #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
+     defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
+ static void serial_console_putchar(struct uart_port *port, unsigned char ch)
+@@ -3278,8 +3269,6 @@ static void sci_remove(struct platform_device *dev)
+ 	sci_ports_in_use &= ~BIT(port->port.line);
+ 	uart_remove_one_port(&sci_uart_driver, &port->port);
+ 
+-	sci_cleanup_single(port);
+-
+ 	if (port->port.fifosize > 1)
+ 		device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
+ 	if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
+@@ -3444,6 +3433,11 @@ static int sci_probe_single(struct platform_device *dev,
+ 	if (ret)
+ 		return ret;
+ 
++	sciport->port.dev = &dev->dev;
++	ret = devm_pm_runtime_enable(&dev->dev);
++	if (ret)
++		return ret;
++
+ 	sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
+ 	if (IS_ERR(sciport->gpios))
+ 		return PTR_ERR(sciport->gpios);
+@@ -3457,13 +3451,7 @@ static int sci_probe_single(struct platform_device *dev,
+ 		sciport->port.flags |= UPF_HARD_FLOW;
+ 	}
+ 
+-	ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+-	if (ret) {
+-		sci_cleanup_single(sciport);
+-		return ret;
+-	}
+-
+-	return 0;
++	return uart_add_one_port(&sci_uart_driver, &sciport->port);
+ }
+ 
+ static int sci_probe(struct platform_device *dev)
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index 4b91072f3a4e91..1f2bdd2e1cc593 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -1103,8 +1103,6 @@ long vt_compat_ioctl(struct tty_struct *tty,
+ 	case VT_WAITACTIVE:
+ 	case VT_RELDISP:
+ 	case VT_DISALLOCATE:
+-	case VT_RESIZE:
+-	case VT_RESIZEX:
+ 		return vt_ioctl(tty, cmd, arg);
+ 
+ 	/*
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 45b04f3c377643..420e943bb73a7a 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -670,7 +670,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 	int tag = scsi_cmd_to_rq(cmd)->tag;
+ 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ 	struct ufs_hw_queue *hwq;
+-	unsigned long flags;
+ 	int err;
+ 
+ 	/* Skip task abort in case previous aborts failed and report failure */
+@@ -709,10 +708,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 		return FAILED;
+ 	}
+ 
+-	spin_lock_irqsave(&hwq->cq_lock, flags);
+-	if (ufshcd_cmd_inflight(lrbp->cmd))
+-		ufshcd_release_scsi_cmd(hba, lrbp);
+-	spin_unlock_irqrestore(&hwq->cq_lock, flags);
+-
+ 	return SUCCESS;
+ }
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 247e425428c88b..374f505fec3d13 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -6577,9 +6577,14 @@ static void ufshcd_err_handler(struct work_struct *work)
+ 		up(&hba->host_sem);
+ 		return;
+ 	}
+-	ufshcd_set_eh_in_progress(hba);
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
++
+ 	ufshcd_err_handling_prepare(hba);
++
++	spin_lock_irqsave(hba->host->host_lock, flags);
++	ufshcd_set_eh_in_progress(hba);
++	spin_unlock_irqrestore(hba->host->host_lock, flags);
++
+ 	/* Complete requests that have door-bell cleared by h/w */
+ 	ufshcd_complete_requests(hba, false);
+ 	spin_lock_irqsave(hba->host->host_lock, flags);
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index 4557b1bcd6356b..a715f377d0a806 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -366,10 +366,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (phy->power_count) {
++	if (phy->power_count)
+ 		phy_power_off(phy);
+-		phy_exit(phy);
+-	}
++
+ 
+ 	/* phy initialization - calibrate the phy */
+ 	ret = phy_init(phy);
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
+index 79d06958d61936..38e693cd3efc05 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.c
++++ b/drivers/usb/cdns3/cdnsp-gadget.c
+@@ -28,7 +28,8 @@
+ unsigned int cdnsp_port_speed(unsigned int port_status)
+ {
+ 	/*Detect gadget speed based on PORTSC register*/
+-	if (DEV_SUPERSPEEDPLUS(port_status))
++	if (DEV_SUPERSPEEDPLUS(port_status) ||
++	    DEV_SSP_GEN1x2(port_status) || DEV_SSP_GEN2x2(port_status))
+ 		return USB_SPEED_SUPER_PLUS;
+ 	else if (DEV_SUPERSPEED(port_status))
+ 		return USB_SPEED_SUPER;
+@@ -546,6 +547,7 @@ int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
+ 	dma_addr_t cmd_deq_dma;
+ 	union cdnsp_trb *event;
+ 	u32 cycle_state;
++	u32 retry = 10;
+ 	int ret, val;
+ 	u64 cmd_dma;
+ 	u32  flags;
+@@ -577,8 +579,23 @@ int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
+ 		flags = le32_to_cpu(event->event_cmd.flags);
+ 
+ 		/* Check the owner of the TRB. */
+-		if ((flags & TRB_CYCLE) != cycle_state)
++		if ((flags & TRB_CYCLE) != cycle_state) {
++			/*
++			 * Give some extra time to get chance controller
++			 * to finish command before returning error code.
++			 * Checking CMD_RING_BUSY is not sufficient because
++			 * this bit is cleared to '0' when the Command
++			 * Descriptor has been executed by controller
++			 * and not when command completion event has
++			 * be added to event ring.
++			 */
++			if (retry--) {
++				udelay(20);
++				continue;
++			}
++
+ 			return -EINVAL;
++		}
+ 
+ 		cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
+ 
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
+index 12534be52f39df..2afa3e558f85ca 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.h
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -285,11 +285,15 @@ struct cdnsp_port_regs {
+ #define XDEV_HS			(0x3 << 10)
+ #define XDEV_SS			(0x4 << 10)
+ #define XDEV_SSP		(0x5 << 10)
++#define XDEV_SSP1x2		(0x6 << 10)
++#define XDEV_SSP2x2		(0x7 << 10)
+ #define DEV_UNDEFSPEED(p)	(((p) & DEV_SPEED_MASK) == (0x0 << 10))
+ #define DEV_FULLSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_FS)
+ #define DEV_HIGHSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_HS)
+ #define DEV_SUPERSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_SS)
+ #define DEV_SUPERSPEEDPLUS(p)	(((p) & DEV_SPEED_MASK) == XDEV_SSP)
++#define DEV_SSP_GEN1x2(p)	(((p) & DEV_SPEED_MASK) == XDEV_SSP1x2)
++#define DEV_SSP_GEN2x2(p)	(((p) & DEV_SPEED_MASK) == XDEV_SSP2x2)
+ #define DEV_SUPERSPEED_ANY(p)	(((p) & DEV_SPEED_MASK) >= XDEV_SS)
+ #define DEV_PORT_SPEED(p)	(((p) >> 10) & 0x0f)
+ /* Port Link State Write Strobe - set this when changing link state */
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 66f3d9324ba2f3..75de29725a450c 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -565,14 +565,15 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
+ 
+ 	rv = usbtmc_get_stb(file_data, &stb);
+ 
+-	if (rv > 0) {
+-		srq_asserted = atomic_xchg(&file_data->srq_asserted,
+-					srq_asserted);
+-		if (srq_asserted)
+-			stb |= 0x40; /* Set RQS bit */
++	if (rv < 0)
++		return rv;
++
++	srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
++	if (srq_asserted)
++		stb |= 0x40; /* Set RQS bit */
++
++	rv = put_user(stb, (__u8 __user *)arg);
+ 
+-		rv = put_user(stb, (__u8 __user *)arg);
+-	}
+ 	return rv;
+ 
+ }
+@@ -2201,7 +2202,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 
+ 	case USBTMC_IOCTL_GET_STB:
+ 		retval = usbtmc_get_stb(file_data, &tmp_byte);
+-		if (retval > 0)
++		if (!retval)
+ 			retval = put_user(tmp_byte, (__u8 __user *)arg);
+ 		break;
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 145787c424e0c8..da3d0e525b64e9 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -6135,6 +6135,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	struct usb_hub			*parent_hub;
+ 	struct usb_hcd			*hcd = bus_to_hcd(udev->bus);
+ 	struct usb_device_descriptor	descriptor;
++	struct usb_interface		*intf;
+ 	struct usb_host_bos		*bos;
+ 	int				i, j, ret = 0;
+ 	int				port1 = udev->portnum;
+@@ -6192,6 +6193,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	if (!udev->actconfig)
+ 		goto done;
+ 
++	/*
++	 * Some devices can't handle setting default altsetting 0 with a
++	 * Set-Interface request. Disable host-side endpoints of those
++	 * interfaces here. Enable and reset them back after host has set
++	 * its internal endpoint structures during usb_hcd_alloc_bandwith()
++	 */
++	for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
++		intf = udev->actconfig->interface[i];
++		if (intf->cur_altsetting->desc.bAlternateSetting == 0)
++			usb_disable_interface(udev, intf, true);
++	}
++
+ 	mutex_lock(hcd->bandwidth_mutex);
+ 	ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
+ 	if (ret < 0) {
+@@ -6223,12 +6236,11 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	 */
+ 	for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ 		struct usb_host_config *config = udev->actconfig;
+-		struct usb_interface *intf = config->interface[i];
+ 		struct usb_interface_descriptor *desc;
+ 
++		intf = config->interface[i];
+ 		desc = &intf->cur_altsetting->desc;
+ 		if (desc->bAlternateSetting == 0) {
+-			usb_disable_interface(udev, intf, true);
+ 			usb_enable_interface(udev, intf, true);
+ 			ret = 0;
+ 		} else {
+diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
+index 03c22114214b5a..494e21a11cd265 100644
+--- a/drivers/usb/core/usb-acpi.c
++++ b/drivers/usb/core/usb-acpi.c
+@@ -165,6 +165,8 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev)
+ 		return 0;
+ 
+ 	hub = usb_hub_to_struct_hub(udev->parent);
++	if (!hub)
++		return 0;
+ 	port_dev = hub->ports[udev->portnum - 1];
+ 
+ 	struct fwnode_handle *nhi_fwnode __free(fwnode_handle) =
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index 740311c4fa2496..c7a05f842745bc 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -144,8 +144,8 @@ static struct hid_descriptor hidg_desc = {
+ 	.bcdHID				= cpu_to_le16(0x0101),
+ 	.bCountryCode			= 0x00,
+ 	.bNumDescriptors		= 0x1,
+-	/*.desc[0].bDescriptorType	= DYNAMIC */
+-	/*.desc[0].wDescriptorLenght	= DYNAMIC */
++	/*.rpt_desc.bDescriptorType	= DYNAMIC */
++	/*.rpt_desc.wDescriptorLength	= DYNAMIC */
+ };
+ 
+ /* Super-Speed Support */
+@@ -939,8 +939,8 @@ static int hidg_setup(struct usb_function *f,
+ 			struct hid_descriptor hidg_desc_copy = hidg_desc;
+ 
+ 			VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
+-			hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
+-			hidg_desc_copy.desc[0].wDescriptorLength =
++			hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
++			hidg_desc_copy.rpt_desc.wDescriptorLength =
+ 				cpu_to_le16(hidg->report_desc_length);
+ 
+ 			length = min_t(unsigned short, length,
+@@ -1210,8 +1210,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ 	 * We can use hidg_desc struct here but we should not relay
+ 	 * that its content won't change after returning from this function.
+ 	 */
+-	hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
+-	hidg_desc.desc[0].wDescriptorLength =
++	hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
++	hidg_desc.rpt_desc.wDescriptorLength =
+ 		cpu_to_le16(hidg->report_desc_length);
+ 
+ 	hidg_hs_in_ep_desc.bEndpointAddress =
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 4b3d5075621aa0..d709e24c1fd422 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -1570,7 +1570,7 @@ static int gadget_match_driver(struct device *dev, const struct device_driver *d
+ {
+ 	struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ 	struct usb_udc *udc = gadget->udc;
+-	struct usb_gadget_driver *driver = container_of(drv,
++	const struct usb_gadget_driver *driver = container_of(drv,
+ 			struct usb_gadget_driver, driver);
+ 
+ 	/* If the driver specifies a udc_name, it must match the UDC's name */
+diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c
+index b4d5408a4371bf..cf716ae870b80c 100644
+--- a/drivers/usb/misc/onboard_usb_dev.c
++++ b/drivers/usb/misc/onboard_usb_dev.c
+@@ -36,9 +36,10 @@
+ #define USB5744_CMD_CREG_ACCESS			0x99
+ #define USB5744_CMD_CREG_ACCESS_LSB		0x37
+ #define USB5744_CREG_MEM_ADDR			0x00
++#define USB5744_CREG_MEM_RD_ADDR		0x04
+ #define USB5744_CREG_WRITE			0x00
+-#define USB5744_CREG_RUNTIMEFLAGS2		0x41
+-#define USB5744_CREG_RUNTIMEFLAGS2_LSB		0x1D
++#define USB5744_CREG_READ			0x01
++#define USB5744_CREG_RUNTIMEFLAGS2		0x411D
+ #define USB5744_CREG_BYPASS_UDC_SUSPEND		BIT(3)
+ 
+ static void onboard_dev_attach_usb_driver(struct work_struct *work);
+@@ -309,11 +310,88 @@ static void onboard_dev_attach_usb_driver(struct work_struct *work)
+ 		pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err));
+ }
+ 
++#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744)
++static int onboard_dev_5744_i2c_read_byte(struct i2c_client *client, u16 addr, u8 *data)
++{
++	struct i2c_msg msg[2];
++	u8 rd_buf[3];
++	int ret;
++
++	u8 wr_buf[7] = {0, USB5744_CREG_MEM_ADDR, 4,
++			USB5744_CREG_READ, 1,
++			addr >> 8 & 0xff,
++			addr & 0xff};
++	msg[0].addr = client->addr;
++	msg[0].flags = 0;
++	msg[0].len = sizeof(wr_buf);
++	msg[0].buf = wr_buf;
++
++	ret = i2c_transfer(client->adapter, msg, 1);
++	if (ret < 0)
++		return ret;
++
++	wr_buf[0] = USB5744_CMD_CREG_ACCESS;
++	wr_buf[1] = USB5744_CMD_CREG_ACCESS_LSB;
++	wr_buf[2] = 0;
++	msg[0].len = 3;
++
++	ret = i2c_transfer(client->adapter, msg, 1);
++	if (ret < 0)
++		return ret;
++
++	wr_buf[0] = 0;
++	wr_buf[1] = USB5744_CREG_MEM_RD_ADDR;
++	msg[0].len = 2;
++
++	msg[1].addr = client->addr;
++	msg[1].flags = I2C_M_RD;
++	msg[1].len = 2;
++	msg[1].buf = rd_buf;
++
++	ret = i2c_transfer(client->adapter, msg, 2);
++	if (ret < 0)
++		return ret;
++	*data = rd_buf[1];
++
++	return 0;
++}
++
++static int onboard_dev_5744_i2c_write_byte(struct i2c_client *client, u16 addr, u8 data)
++{
++	struct i2c_msg msg[2];
++	int ret;
++
++	u8 wr_buf[8] = {0, USB5744_CREG_MEM_ADDR, 5,
++			USB5744_CREG_WRITE, 1,
++			addr >> 8 & 0xff,
++			addr & 0xff,
++			data};
++	msg[0].addr = client->addr;
++	msg[0].flags = 0;
++	msg[0].len = sizeof(wr_buf);
++	msg[0].buf = wr_buf;
++
++	ret = i2c_transfer(client->adapter, msg, 1);
++	if (ret < 0)
++		return ret;
++
++	msg[0].len = 3;
++	wr_buf[0] = USB5744_CMD_CREG_ACCESS;
++	wr_buf[1] = USB5744_CMD_CREG_ACCESS_LSB;
++	wr_buf[2] = 0;
++
++	ret = i2c_transfer(client->adapter, msg, 1);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
+ static int onboard_dev_5744_i2c_init(struct i2c_client *client)
+ {
+-#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744)
+ 	struct device *dev = &client->dev;
+ 	int ret;
++	u8 reg;
+ 
+ 	/*
+ 	 * Set BYPASS_UDC_SUSPEND bit to ensure MCU is always enabled
+@@ -321,20 +399,16 @@ static int onboard_dev_5744_i2c_init(struct i2c_client *client)
+ 	 * The command writes 5 bytes to memory and single data byte in
+ 	 * configuration register.
+ 	 */
+-	char wr_buf[7] = {USB5744_CREG_MEM_ADDR, 5,
+-			  USB5744_CREG_WRITE, 1,
+-			  USB5744_CREG_RUNTIMEFLAGS2,
+-			  USB5744_CREG_RUNTIMEFLAGS2_LSB,
+-			  USB5744_CREG_BYPASS_UDC_SUSPEND};
+-
+-	ret = i2c_smbus_write_block_data(client, 0, sizeof(wr_buf), wr_buf);
++	ret = onboard_dev_5744_i2c_read_byte(client,
++					     USB5744_CREG_RUNTIMEFLAGS2, &reg);
+ 	if (ret)
+-		return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n");
++		return dev_err_probe(dev, ret, "CREG_RUNTIMEFLAGS2 read failed\n");
+ 
+-	ret = i2c_smbus_write_word_data(client, USB5744_CMD_CREG_ACCESS,
+-					USB5744_CMD_CREG_ACCESS_LSB);
++	reg |= USB5744_CREG_BYPASS_UDC_SUSPEND;
++	ret = onboard_dev_5744_i2c_write_byte(client,
++					      USB5744_CREG_RUNTIMEFLAGS2, reg);
+ 	if (ret)
+-		return dev_err_probe(dev, ret, "Configuration Register Access Command failed\n");
++		return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n");
+ 
+ 	/* Send SMBus command to boot hub. */
+ 	ret = i2c_smbus_write_word_data(client, USB5744_CMD_ATTACH,
+@@ -343,10 +417,13 @@ static int onboard_dev_5744_i2c_init(struct i2c_client *client)
+ 		return dev_err_probe(dev, ret, "USB Attach with SMBus command failed\n");
+ 
+ 	return ret;
++}
+ #else
++static int onboard_dev_5744_i2c_init(struct i2c_client *client)
++{
+ 	return -ENODEV;
+-#endif
+ }
++#endif
+ 
+ static int onboard_dev_probe(struct platform_device *pdev)
+ {
+diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
+index 7324de52d9505e..161786e9b7e47d 100644
+--- a/drivers/usb/renesas_usbhs/common.c
++++ b/drivers/usb/renesas_usbhs/common.c
+@@ -685,10 +685,29 @@ static int usbhs_probe(struct platform_device *pdev)
+ 	INIT_DELAYED_WORK(&priv->notify_hotplug_work, usbhsc_notify_hotplug);
+ 	spin_lock_init(usbhs_priv_to_lock(priv));
+ 
++	/*
++	 * Acquire clocks and enable power management (PM) early in the
++	 * probe process, as the driver accesses registers during
++	 * initialization. Ensure the device is active before proceeding.
++	 */
++	pm_runtime_enable(dev);
++
++	ret = usbhsc_clk_get(dev, priv);
++	if (ret)
++		goto probe_pm_disable;
++
++	ret = pm_runtime_resume_and_get(dev);
++	if (ret)
++		goto probe_clk_put;
++
++	ret = usbhsc_clk_prepare_enable(priv);
++	if (ret)
++		goto probe_pm_put;
++
+ 	/* call pipe and module init */
+ 	ret = usbhs_pipe_probe(priv);
+ 	if (ret < 0)
+-		return ret;
++		goto probe_clk_dis_unprepare;
+ 
+ 	ret = usbhs_fifo_probe(priv);
+ 	if (ret < 0)
+@@ -705,10 +724,6 @@ static int usbhs_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto probe_fail_rst;
+ 
+-	ret = usbhsc_clk_get(dev, priv);
+-	if (ret)
+-		goto probe_fail_clks;
+-
+ 	/*
+ 	 * deviece reset here because
+ 	 * USB device might be used in boot loader.
+@@ -721,7 +736,7 @@ static int usbhs_probe(struct platform_device *pdev)
+ 		if (ret) {
+ 			dev_warn(dev, "USB function not selected (GPIO)\n");
+ 			ret = -ENOTSUPP;
+-			goto probe_end_mod_exit;
++			goto probe_assert_rest;
+ 		}
+ 	}
+ 
+@@ -735,14 +750,19 @@ static int usbhs_probe(struct platform_device *pdev)
+ 	ret = usbhs_platform_call(priv, hardware_init, pdev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "platform init failed.\n");
+-		goto probe_end_mod_exit;
++		goto probe_assert_rest;
+ 	}
+ 
+ 	/* reset phy for connection */
+ 	usbhs_platform_call(priv, phy_reset, pdev);
+ 
+-	/* power control */
+-	pm_runtime_enable(dev);
++	/*
++	 * Disable the clocks that were enabled earlier in the probe path,
++	 * and let the driver handle the clocks beyond this point.
++	 */
++	usbhsc_clk_disable_unprepare(priv);
++	pm_runtime_put(dev);
++
+ 	if (!usbhs_get_dparam(priv, runtime_pwctrl)) {
+ 		usbhsc_power_ctrl(priv, 1);
+ 		usbhs_mod_autonomy_mode(priv);
+@@ -759,9 +779,7 @@ static int usbhs_probe(struct platform_device *pdev)
+ 
+ 	return ret;
+ 
+-probe_end_mod_exit:
+-	usbhsc_clk_put(priv);
+-probe_fail_clks:
++probe_assert_rest:
+ 	reset_control_assert(priv->rsts);
+ probe_fail_rst:
+ 	usbhs_mod_remove(priv);
+@@ -769,6 +787,14 @@ static int usbhs_probe(struct platform_device *pdev)
+ 	usbhs_fifo_remove(priv);
+ probe_end_pipe_exit:
+ 	usbhs_pipe_remove(priv);
++probe_clk_dis_unprepare:
++	usbhsc_clk_disable_unprepare(priv);
++probe_pm_put:
++	pm_runtime_put(dev);
++probe_clk_put:
++	usbhsc_clk_put(priv);
++probe_pm_disable:
++	pm_runtime_disable(dev);
+ 
+ 	dev_info(dev, "probe failed (%d)\n", ret);
+ 
+diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
+index aa879253d3b810..13044ee5be10d6 100644
+--- a/drivers/usb/typec/bus.c
++++ b/drivers/usb/typec/bus.c
+@@ -449,7 +449,7 @@ ATTRIBUTE_GROUPS(typec);
+ 
+ static int typec_match(struct device *dev, const struct device_driver *driver)
+ {
+-	struct typec_altmode_driver *drv = to_altmode_driver(driver);
++	const struct typec_altmode_driver *drv = to_altmode_driver(driver);
+ 	struct typec_altmode *altmode = to_typec_altmode(dev);
+ 	const struct typec_device_id *id;
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+index fd1b8059336764..648311f5e3cf13 100644
+--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
++++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+@@ -166,7 +166,8 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
+ 		return;
+ 	}
+ 
+-	if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) {
++	if (count > sizeof(struct pd_message) + 1 ||
++	    count + 1 > TCPC_RECEIVE_BUFFER_LEN) {
+ 		dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d\n", count);
+ 		return;
+ 	}
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index bbd7f53f7d5982..1d8e760df483cc 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -568,6 +568,15 @@ struct pd_rx_event {
+ 	enum tcpm_transmit_type rx_sop_type;
+ };
+ 
++struct altmode_vdm_event {
++	struct kthread_work work;
++	struct tcpm_port *port;
++	u32 header;
++	u32 *data;
++	int cnt;
++	enum tcpm_transmit_type tx_sop_type;
++};
++
+ static const char * const pd_rev[] = {
+ 	[PD_REV10]		= "rev1",
+ 	[PD_REV20]		= "rev2",
+@@ -1562,18 +1571,68 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
+ 	mod_vdm_delayed_work(port, 0);
+ }
+ 
+-static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
+-				    const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
++static void tcpm_queue_vdm_work(struct kthread_work *work)
+ {
+-	if (port->state != SRC_READY && port->state != SNK_READY &&
+-	    port->state != SRC_VDM_IDENTITY_REQUEST)
+-		return;
++	struct altmode_vdm_event *event = container_of(work,
++						       struct altmode_vdm_event,
++						       work);
++	struct tcpm_port *port = event->port;
+ 
+ 	mutex_lock(&port->lock);
+-	tcpm_queue_vdm(port, header, data, cnt, tx_sop_type);
++	if (port->state != SRC_READY && port->state != SNK_READY &&
++	    port->state != SRC_VDM_IDENTITY_REQUEST) {
++		tcpm_log_force(port, "dropping altmode_vdm_event");
++		goto port_unlock;
++	}
++
++	tcpm_queue_vdm(port, event->header, event->data, event->cnt, event->tx_sop_type);
++
++port_unlock:
++	kfree(event->data);
++	kfree(event);
+ 	mutex_unlock(&port->lock);
+ }
+ 
++static int tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
++				   const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
++{
++	struct altmode_vdm_event *event;
++	u32 *data_cpy;
++	int ret = -ENOMEM;
++
++	event = kzalloc(sizeof(*event), GFP_KERNEL);
++	if (!event)
++		goto err_event;
++
++	data_cpy = kcalloc(cnt, sizeof(u32), GFP_KERNEL);
++	if (!data_cpy)
++		goto err_data;
++
++	kthread_init_work(&event->work, tcpm_queue_vdm_work);
++	event->port = port;
++	event->header = header;
++	memcpy(data_cpy, data, sizeof(u32) * cnt);
++	event->data = data_cpy;
++	event->cnt = cnt;
++	event->tx_sop_type = tx_sop_type;
++
++	ret = kthread_queue_work(port->wq, &event->work);
++	if (!ret) {
++		ret = -EBUSY;
++		goto err_queue;
++	}
++
++	return 0;
++
++err_queue:
++	kfree(data_cpy);
++err_data:
++	kfree(event);
++err_event:
++	tcpm_log_force(port, "failed to queue altmode vdm, err:%d", ret);
++	return ret;
++}
++
+ static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
+ {
+ 	u32 vdo = p[VDO_INDEX_IDH];
+@@ -2784,8 +2843,7 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
+ 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
+ 	header |= VDO_OPOS(altmode->mode);
+ 
+-	tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
+-	return 0;
++	return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
+ }
+ 
+ static int tcpm_altmode_exit(struct typec_altmode *altmode)
+@@ -2801,8 +2859,7 @@ static int tcpm_altmode_exit(struct typec_altmode *altmode)
+ 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
+ 	header |= VDO_OPOS(altmode->mode);
+ 
+-	tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
+-	return 0;
++	return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
+ }
+ 
+ static int tcpm_altmode_vdm(struct typec_altmode *altmode,
+@@ -2810,9 +2867,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode,
+ {
+ 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ 
+-	tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
+-
+-	return 0;
++	return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
+ }
+ 
+ static const struct typec_altmode_ops tcpm_altmode_ops = {
+@@ -2836,8 +2891,7 @@ static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_pl
+ 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
+ 	header |= VDO_OPOS(altmode->mode);
+ 
+-	tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
+-	return 0;
++	return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
+ }
+ 
+ static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
+@@ -2853,8 +2907,7 @@ static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plu
+ 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
+ 	header |= VDO_OPOS(altmode->mode);
+ 
+-	tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
+-	return 0;
++	return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
+ }
+ 
+ static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
+@@ -2862,9 +2915,7 @@ static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug
+ {
+ 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ 
+-	tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
+-
+-	return 0;
++	return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
+ }
+ 
+ static const struct typec_cable_ops tcpm_cable_ops = {
+diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+index 0d632ba5d2a3c9..68300fcd3c41b5 100644
+--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
++++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+@@ -350,6 +350,32 @@ static int vf_qm_func_stop(struct hisi_qm *qm)
+ 	return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
+ }
+ 
++static int vf_qm_version_check(struct acc_vf_data *vf_data, struct device *dev)
++{
++	switch (vf_data->acc_magic) {
++	case ACC_DEV_MAGIC_V2:
++		if (vf_data->major_ver != ACC_DRV_MAJOR_VER) {
++			dev_info(dev, "migration driver version<%u.%u> not match!\n",
++				 vf_data->major_ver, vf_data->minor_ver);
++			return -EINVAL;
++		}
++		break;
++	case ACC_DEV_MAGIC_V1:
++		/* Correct dma address */
++		vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
++		vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
++		vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
++		vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
++		vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
++		vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 			     struct hisi_acc_vf_migration_file *migf)
+ {
+@@ -363,7 +389,8 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 	if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done)
+ 		return 0;
+ 
+-	if (vf_data->acc_magic != ACC_DEV_MAGIC) {
++	ret = vf_qm_version_check(vf_data, dev);
++	if (ret) {
+ 		dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
+ 		return -EINVAL;
+ 	}
+@@ -399,13 +426,6 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 		return -EINVAL;
+ 	}
+ 
+-	ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
+-	if (ret) {
+-		dev_err(dev, "failed to write QM_VF_STATE\n");
+-		return ret;
+-	}
+-
+-	hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+ 	hisi_acc_vdev->match_done = true;
+ 	return 0;
+ }
+@@ -418,7 +438,9 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 	int vf_id = hisi_acc_vdev->vf_id;
+ 	int ret;
+ 
+-	vf_data->acc_magic = ACC_DEV_MAGIC;
++	vf_data->acc_magic = ACC_DEV_MAGIC_V2;
++	vf_data->major_ver = ACC_DRV_MAJOR_VER;
++	vf_data->minor_ver = ACC_DRV_MINOR_VER;
+ 	/* Save device id */
+ 	vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
+ 
+@@ -441,6 +463,19 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 	return 0;
+ }
+ 
++static void vf_qm_xeqc_save(struct hisi_qm *qm,
++			    struct hisi_acc_vf_migration_file *migf)
++{
++	struct acc_vf_data *vf_data = &migf->vf_data;
++	u16 eq_head, aeq_head;
++
++	eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF;
++	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0);
++
++	aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF;
++	qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0);
++}
++
+ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 			   struct hisi_acc_vf_migration_file *migf)
+ {
+@@ -456,6 +491,20 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 	if (migf->total_length < sizeof(struct acc_vf_data))
+ 		return -EINVAL;
+ 
++	if (!vf_data->eqe_dma || !vf_data->aeqe_dma ||
++	    !vf_data->sqc_dma || !vf_data->cqc_dma) {
++		dev_info(dev, "resume dma addr is NULL!\n");
++		hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
++		return 0;
++	}
++
++	ret = qm_write_regs(qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
++	if (ret) {
++		dev_err(dev, "failed to write QM_VF_STATE\n");
++		return -EINVAL;
++	}
++	hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
++
+ 	qm->eqe_dma = vf_data->eqe_dma;
+ 	qm->aeqe_dma = vf_data->aeqe_dma;
+ 	qm->sqc_dma = vf_data->sqc_dma;
+@@ -516,12 +565,12 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 		return -EINVAL;
+ 
+ 	/* Every reg is 32 bit, the dma address is 64 bit. */
+-	vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
++	vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
+ 	vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
+-	vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
+-	vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
++	vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
++	vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
+ 	vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
+-	vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
++	vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
+ 
+ 	/* Through SQC_BT/CQC_BT to get sqc and cqc address */
+ 	ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
+@@ -537,6 +586,9 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
+ 	}
+ 
+ 	migf->total_length = sizeof(struct acc_vf_data);
++	/* Save eqc and aeqc interrupt information */
++	vf_qm_xeqc_save(vf_qm, migf);
++
+ 	return 0;
+ }
+ 
+@@ -1326,6 +1378,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
+ 	hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
+ 	hisi_acc_vdev->pf_qm = pf_qm;
+ 	hisi_acc_vdev->vf_dev = pdev;
++	hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
+ 	mutex_init(&hisi_acc_vdev->state_mutex);
+ 
+ 	core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY;
+diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
+index 5bab46602fad26..465284168906b6 100644
+--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
++++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
+@@ -38,6 +38,9 @@
+ #define QM_REG_ADDR_OFFSET	0x0004
+ 
+ #define QM_XQC_ADDR_OFFSET	32U
++#define QM_XQC_ADDR_LOW	0x1
++#define QM_XQC_ADDR_HIGH	0x2
++
+ #define QM_VF_AEQ_INT_MASK	0x0004
+ #define QM_VF_EQ_INT_MASK	0x000c
+ #define QM_IFC_INT_SOURCE_V	0x0020
+@@ -49,10 +52,15 @@
+ #define QM_EQC_DW0		0X8000
+ #define QM_AEQC_DW0		0X8020
+ 
++#define ACC_DRV_MAJOR_VER 1
++#define ACC_DRV_MINOR_VER 0
++
++#define ACC_DEV_MAGIC_V1	0XCDCDCDCDFEEDAACC
++#define ACC_DEV_MAGIC_V2	0xAACCFEEDDECADEDE
++
+ struct acc_vf_data {
+ #define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state)
+ 	/* QM match information */
+-#define ACC_DEV_MAGIC	0XCDCDCDCDFEEDAACC
+ 	u64 acc_magic;
+ 	u32 qp_num;
+ 	u32 dev_id;
+@@ -60,7 +68,9 @@ struct acc_vf_data {
+ 	u32 qp_base;
+ 	u32 vf_qm_state;
+ 	/* QM reserved match information */
+-	u32 qm_rsv_state[3];
++	u16 major_ver;
++	u16 minor_ver;
++	u32 qm_rsv_state[2];
+ 
+ 	/* QM RW regs */
+ 	u32 aeq_int_mask;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index bf391b40e576fc..8338cfd61fe14a 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -294,7 +294,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
+ 			struct rb_node *p;
+ 
+ 			for (p = rb_prev(n); p; p = rb_prev(p)) {
+-				struct vfio_dma *dma = rb_entry(n,
++				struct vfio_dma *dma = rb_entry(p,
+ 							struct vfio_dma, node);
+ 
+ 				vfio_dma_bitmap_free(dma);
+diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
+index 10129095a4c178..b19e5f73de8bb4 100644
+--- a/drivers/video/backlight/qcom-wled.c
++++ b/drivers/video/backlight/qcom-wled.c
+@@ -1406,9 +1406,11 @@ static int wled_configure(struct wled *wled)
+ 	wled->ctrl_addr = be32_to_cpu(*prop_addr);
+ 
+ 	rc = of_property_read_string(dev->of_node, "label", &wled->name);
+-	if (rc)
++	if (rc) {
+ 		wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+-
++		if (!wled->name)
++			return -ENOMEM;
++	}
+ 	switch (wled->version) {
+ 	case 3:
+ 		u32_opts = wled3_opts;
+diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
+index 64843464c66135..cd3821bd82e566 100644
+--- a/drivers/video/fbdev/core/fbcvt.c
++++ b/drivers/video/fbdev/core/fbcvt.c
+@@ -312,7 +312,7 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
+ 	cvt.f_refresh = cvt.refresh;
+ 	cvt.interlace = 1;
+ 
+-	if (!cvt.xres || !cvt.yres || !cvt.refresh) {
++	if (!cvt.xres || !cvt.yres || !cvt.refresh || cvt.f_refresh > INT_MAX) {
+ 		printk(KERN_INFO "fbcvt: Invalid input parameters\n");
+ 		return 1;
+ 	}
+diff --git a/drivers/watchdog/exar_wdt.c b/drivers/watchdog/exar_wdt.c
+index 7c61ff34327116..c2e3bb08df899a 100644
+--- a/drivers/watchdog/exar_wdt.c
++++ b/drivers/watchdog/exar_wdt.c
+@@ -221,7 +221,7 @@ static const struct watchdog_info exar_wdt_info = {
+ 	.options	= WDIOF_KEEPALIVEPING |
+ 			  WDIOF_SETTIMEOUT |
+ 			  WDIOF_MAGICCLOSE,
+-	.identity	= "Exar/MaxLinear XR28V38x Watchdog",
++	.identity	= "Exar XR28V38x Watchdog",
+ };
+ 
+ static const struct watchdog_ops exar_wdt_ops = {
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 4bd31242bd773c..e47bb157aa0903 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -700,15 +700,18 @@ static int __init balloon_add_regions(void)
+ 
+ 		/*
+ 		 * Extra regions are accounted for in the physmap, but need
+-		 * decreasing from current_pages to balloon down the initial
+-		 * allocation, because they are already accounted for in
+-		 * total_pages.
++		 * decreasing from current_pages and target_pages to balloon
++		 * down the initial allocation, because they are already
++		 * accounted for in total_pages.
+ 		 */
+-		if (extra_pfn_end - start_pfn >= balloon_stats.current_pages) {
++		pages = extra_pfn_end - start_pfn;
++		if (pages >= balloon_stats.current_pages ||
++		    pages >= balloon_stats.target_pages) {
+ 			WARN(1, "Extra pages underflow current target");
+ 			return -ERANGE;
+ 		}
+-		balloon_stats.current_pages -= extra_pfn_end - start_pfn;
++		balloon_stats.current_pages -= pages;
++		balloon_stats.target_pages -= pages;
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
+index 819c752332355e..db78c06ba0cc6f 100644
+--- a/fs/9p/vfs_addr.c
++++ b/fs/9p/vfs_addr.c
+@@ -160,4 +160,5 @@ const struct address_space_operations v9fs_addr_operations = {
+ 	.invalidate_folio	= netfs_invalidate_folio,
+ 	.direct_IO		= noop_direct_IO,
+ 	.writepages		= netfs_writepages,
++	.migrate_folio		= filemap_migrate_folio,
+ };
+diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
+index 6d08c100b01de4..5f9a43734812e7 100644
+--- a/fs/btrfs/extent-io-tree.c
++++ b/fs/btrfs/extent-io-tree.c
+@@ -1252,8 +1252,11 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ 		if (!prealloc)
+ 			goto search_again;
+ 		ret = split_state(tree, state, prealloc, end + 1);
+-		if (ret)
++		if (ret) {
+ 			extent_io_tree_panic(tree, state, "split", ret);
++			prealloc = NULL;
++			goto out;
++		}
+ 
+ 		set_state_bits(tree, prealloc, bits, changeset);
+ 		cache_state(prealloc, cached_state);
+@@ -1456,6 +1459,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ 		if (IS_ERR(inserted_state)) {
+ 			ret = PTR_ERR(inserted_state);
+ 			extent_io_tree_panic(tree, prealloc, "insert", ret);
++			goto out;
+ 		}
+ 		cache_state(inserted_state, cached_state);
+ 		if (inserted_state == prealloc)
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 9ce1270addb04b..1ab5b0c1b9b76a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4857,8 +4857,11 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
+ 	folio = __filemap_get_folio(mapping, index,
+ 				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
+ 	if (IS_ERR(folio)) {
+-		btrfs_delalloc_release_space(inode, data_reserved, block_start,
+-					     blocksize, true);
++		if (only_release_metadata)
++			btrfs_delalloc_release_metadata(inode, blocksize, true);
++		else
++			btrfs_delalloc_release_space(inode, data_reserved,
++						     block_start, blocksize, true);
+ 		btrfs_delalloc_release_extents(inode, blocksize);
+ 		ret = -ENOMEM;
+ 		goto out;
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index d8fcc3eb85c88a..3fcc7c092c5eca 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -153,12 +153,14 @@ struct scrub_stripe {
+ 	unsigned int init_nr_io_errors;
+ 	unsigned int init_nr_csum_errors;
+ 	unsigned int init_nr_meta_errors;
++	unsigned int init_nr_meta_gen_errors;
+ 
+ 	/*
+ 	 * The following error bitmaps are all for the current status.
+ 	 * Every time we submit a new read, these bitmaps may be updated.
+ 	 *
+-	 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
++	 * error_bitmap = io_error_bitmap | csum_error_bitmap |
++	 *		  meta_error_bitmap | meta_generation_bitmap;
+ 	 *
+ 	 * IO and csum errors can happen for both metadata and data.
+ 	 */
+@@ -166,6 +168,7 @@ struct scrub_stripe {
+ 	unsigned long io_error_bitmap;
+ 	unsigned long csum_error_bitmap;
+ 	unsigned long meta_error_bitmap;
++	unsigned long meta_gen_error_bitmap;
+ 
+ 	/* For writeback (repair or replace) error reporting. */
+ 	unsigned long write_error_bitmap;
+@@ -616,7 +619,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
+ 	memcpy(on_disk_csum, header->csum, fs_info->csum_size);
+ 
+ 	if (logical != btrfs_stack_header_bytenr(header)) {
+-		bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
++		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
+ 		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ 		btrfs_warn_rl(fs_info,
+ 		"tree block %llu mirror %u has bad bytenr, has %llu want %llu",
+@@ -672,7 +675,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
+ 	}
+ 	if (stripe->sectors[sector_nr].generation !=
+ 	    btrfs_stack_header_generation(header)) {
+-		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
++		bitmap_set(&stripe->meta_gen_error_bitmap, sector_nr, sectors_per_tree);
+ 		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ 		btrfs_warn_rl(fs_info,
+ 		"tree block %llu mirror %u has bad generation, has %llu want %llu",
+@@ -684,6 +687,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
+ 	bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ 	bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
+ 	bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
++	bitmap_clear(&stripe->meta_gen_error_bitmap, sector_nr, sectors_per_tree);
+ }
+ 
+ static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
+@@ -972,8 +976,22 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
+ 			if (__ratelimit(&rs) && dev)
+ 				scrub_print_common_warning("header error", dev, false,
+ 						     stripe->logical, physical);
++		if (test_bit(sector_nr, &stripe->meta_gen_error_bitmap))
++			if (__ratelimit(&rs) && dev)
++				scrub_print_common_warning("generation error", dev, false,
++						     stripe->logical, physical);
+ 	}
+ 
++	/* Update the device stats. */
++	for (int i = 0; i < stripe->init_nr_io_errors; i++)
++		btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_READ_ERRS);
++	for (int i = 0; i < stripe->init_nr_csum_errors; i++)
++		btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
++	/* Generation mismatch error is based on each metadata, not each block. */
++	for (int i = 0; i < stripe->init_nr_meta_gen_errors;
++	     i += (fs_info->nodesize >> fs_info->sectorsize_bits))
++		btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_GENERATION_ERRS);
++
+ 	spin_lock(&sctx->stat_lock);
+ 	sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
+ 	sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
+@@ -982,7 +1000,8 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
+ 	sctx->stat.no_csum += nr_nodatacsum_sectors;
+ 	sctx->stat.read_errors += stripe->init_nr_io_errors;
+ 	sctx->stat.csum_errors += stripe->init_nr_csum_errors;
+-	sctx->stat.verify_errors += stripe->init_nr_meta_errors;
++	sctx->stat.verify_errors += stripe->init_nr_meta_errors +
++				    stripe->init_nr_meta_gen_errors;
+ 	sctx->stat.uncorrectable_errors +=
+ 		bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
+ 	sctx->stat.corrected_errors += nr_repaired_sectors;
+@@ -1028,6 +1047,8 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
+ 						    stripe->nr_sectors);
+ 	stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
+ 						    stripe->nr_sectors);
++	stripe->init_nr_meta_gen_errors = bitmap_weight(&stripe->meta_gen_error_bitmap,
++							stripe->nr_sectors);
+ 
+ 	if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
+ 		goto out;
+@@ -1142,6 +1163,9 @@ static void scrub_write_endio(struct btrfs_bio *bbio)
+ 		bitmap_set(&stripe->write_error_bitmap, sector_nr,
+ 			   bio_size >> fs_info->sectorsize_bits);
+ 		spin_unlock_irqrestore(&stripe->write_error_lock, flags);
++		for (int i = 0; i < (bio_size >> fs_info->sectorsize_bits); i++)
++			btrfs_dev_stat_inc_and_print(stripe->dev,
++						     BTRFS_DEV_STAT_WRITE_ERRS);
+ 	}
+ 	bio_put(&bbio->bio);
+ 
+@@ -1508,10 +1532,12 @@ static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
+ 	stripe->init_nr_io_errors = 0;
+ 	stripe->init_nr_csum_errors = 0;
+ 	stripe->init_nr_meta_errors = 0;
++	stripe->init_nr_meta_gen_errors = 0;
+ 	stripe->error_bitmap = 0;
+ 	stripe->io_error_bitmap = 0;
+ 	stripe->csum_error_bitmap = 0;
+ 	stripe->meta_error_bitmap = 0;
++	stripe->meta_gen_error_bitmap = 0;
+ }
+ 
+ /*
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 3421448fef0e3e..5fcdab61451769 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -188,8 +188,11 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
+ 				filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0) :
+ 				bdev_file_open_by_path(dif->path,
+ 						BLK_OPEN_READ, sb->s_type, NULL);
+-		if (IS_ERR(file))
++		if (IS_ERR(file)) {
++			if (file == ERR_PTR(-ENOTBLK))
++				return -EINVAL;
+ 			return PTR_ERR(file);
++		}
+ 
+ 		if (!erofs_is_fileio_mode(sbi)) {
+ 			dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file),
+@@ -537,24 +540,52 @@ static int erofs_fc_parse_param(struct fs_context *fc,
+ 	return 0;
+ }
+ 
+-static struct inode *erofs_nfs_get_inode(struct super_block *sb,
+-					 u64 ino, u32 generation)
++static int erofs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
++			   struct inode *parent)
+ {
+-	return erofs_iget(sb, ino);
++	erofs_nid_t nid = EROFS_I(inode)->nid;
++	int len = parent ? 6 : 3;
++
++	if (*max_len < len) {
++		*max_len = len;
++		return FILEID_INVALID;
++	}
++
++	fh[0] = (u32)(nid >> 32);
++	fh[1] = (u32)(nid & 0xffffffff);
++	fh[2] = inode->i_generation;
++
++	if (parent) {
++		nid = EROFS_I(parent)->nid;
++
++		fh[3] = (u32)(nid >> 32);
++		fh[4] = (u32)(nid & 0xffffffff);
++		fh[5] = parent->i_generation;
++	}
++
++	*max_len = len;
++	return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN;
+ }
+ 
+ static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
+ 		struct fid *fid, int fh_len, int fh_type)
+ {
+-	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+-				    erofs_nfs_get_inode);
++	if ((fh_type != FILEID_INO64_GEN &&
++	     fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3)
++		return NULL;
++
++	return d_obtain_alias(erofs_iget(sb,
++		((u64)fid->raw[0] << 32) | fid->raw[1]));
+ }
+ 
+ static struct dentry *erofs_fh_to_parent(struct super_block *sb,
+ 		struct fid *fid, int fh_len, int fh_type)
+ {
+-	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+-				    erofs_nfs_get_inode);
++	if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6)
++		return NULL;
++
++	return d_obtain_alias(erofs_iget(sb,
++		((u64)fid->raw[3] << 32) | fid->raw[4]));
+ }
+ 
+ static struct dentry *erofs_get_parent(struct dentry *child)
+@@ -570,7 +601,7 @@ static struct dentry *erofs_get_parent(struct dentry *child)
+ }
+ 
+ static const struct export_operations erofs_export_ops = {
+-	.encode_fh = generic_encode_ino32_fh,
++	.encode_fh = erofs_encode_fh,
+ 	.fh_to_dentry = erofs_fh_to_dentry,
+ 	.fh_to_parent = erofs_fh_to_parent,
+ 	.get_parent = erofs_get_parent,
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 1b0050b8421d88..62c7fd1168a15a 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -53,8 +53,8 @@ bool f2fs_is_cp_guaranteed(struct page *page)
+ 	struct inode *inode;
+ 	struct f2fs_sb_info *sbi;
+ 
+-	if (!mapping)
+-		return false;
++	if (fscrypt_is_bounce_page(page))
++		return page_private_gcing(fscrypt_pagecache_page(page));
+ 
+ 	inode = mapping->host;
+ 	sbi = F2FS_I_SB(inode);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 1c783c2e4902ae..1219e37fa7ad3c 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2508,8 +2508,14 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
+ 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
+ 
+ 	spin_lock(&sbi->stat_lock);
+-	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
+-	sbi->total_valid_block_count -= (block_t)count;
++	if (unlikely(sbi->total_valid_block_count < count)) {
++		f2fs_warn(sbi, "Inconsistent total_valid_block_count:%u, ino:%lu, count:%u",
++			  sbi->total_valid_block_count, inode->i_ino, count);
++		sbi->total_valid_block_count = 0;
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++	} else {
++		sbi->total_valid_block_count -= count;
++	}
+ 	if (sbi->reserved_blocks &&
+ 		sbi->current_reserved_blocks < sbi->reserved_blocks)
+ 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index e0469316c7cd4e..cd56c0e66657be 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -2072,6 +2072,9 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi,
+ 			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+ 		};
+ 
++		if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, segno)))
++			continue;
++
+ 		do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
+ 		put_gc_inode(&gc_list);
+ 
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 57d46e1439dedf..6f70f377f12115 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -413,7 +413,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
+ 
+ 	if (is_inode_flag_set(dir, FI_PROJ_INHERIT) &&
+ 			(!projid_eq(F2FS_I(dir)->i_projid,
+-			F2FS_I(old_dentry->d_inode)->i_projid)))
++			F2FS_I(inode)->i_projid)))
+ 		return -EXDEV;
+ 
+ 	err = f2fs_dquot_initialize(dir);
+@@ -905,7 +905,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ 
+ 	if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
+ 			(!projid_eq(F2FS_I(new_dir)->i_projid,
+-			F2FS_I(old_dentry->d_inode)->i_projid)))
++			F2FS_I(old_inode)->i_projid)))
+ 		return -EXDEV;
+ 
+ 	/*
+@@ -1098,10 +1098,10 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 
+ 	if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
+ 			!projid_eq(F2FS_I(new_dir)->i_projid,
+-			F2FS_I(old_dentry->d_inode)->i_projid)) ||
+-	    (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
++			F2FS_I(old_inode)->i_projid)) ||
++	    (is_inode_flag_set(old_dir, FI_PROJ_INHERIT) &&
+ 			!projid_eq(F2FS_I(old_dir)->i_projid,
+-			F2FS_I(new_dentry->d_inode)->i_projid)))
++			F2FS_I(new_inode)->i_projid)))
+ 		return -EXDEV;
+ 
+ 	err = f2fs_dquot_initialize(old_dir);
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 0c004dd5595b91..05a342933f98f6 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -431,7 +431,6 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
+ 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ 	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ 	unsigned int next;
+-	unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
+ 
+ 	spin_lock(&free_i->segmap_lock);
+ 	clear_bit(segno, free_i->free_segmap);
+@@ -439,7 +438,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
+ 
+ 	next = find_next_bit(free_i->free_segmap,
+ 			start_segno + SEGS_PER_SEC(sbi), start_segno);
+-	if (next >= start_segno + usable_segs) {
++	if (next >= start_segno + f2fs_usable_segs_in_sec(sbi)) {
+ 		clear_bit(secno, free_i->free_secmap);
+ 		free_i->free_sections++;
+ 	}
+@@ -465,22 +464,36 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
+ 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ 	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ 	unsigned int next;
+-	unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
++	bool ret;
+ 
+ 	spin_lock(&free_i->segmap_lock);
+-	if (test_and_clear_bit(segno, free_i->free_segmap)) {
+-		free_i->free_segments++;
+-
+-		if (!inmem && IS_CURSEC(sbi, secno))
+-			goto skip_free;
+-		next = find_next_bit(free_i->free_segmap,
+-				start_segno + SEGS_PER_SEC(sbi), start_segno);
+-		if (next >= start_segno + usable_segs) {
+-			if (test_and_clear_bit(secno, free_i->free_secmap))
+-				free_i->free_sections++;
+-		}
+-	}
+-skip_free:
++	ret = test_and_clear_bit(segno, free_i->free_segmap);
++	if (!ret)
++		goto unlock_out;
++
++	free_i->free_segments++;
++
++	if (!inmem && IS_CURSEC(sbi, secno))
++		goto unlock_out;
++
++	/* check large section */
++	next = find_next_bit(free_i->free_segmap,
++			     start_segno + SEGS_PER_SEC(sbi), start_segno);
++	if (next < start_segno + f2fs_usable_segs_in_sec(sbi))
++		goto unlock_out;
++
++	ret = test_and_clear_bit(secno, free_i->free_secmap);
++	if (!ret)
++		goto unlock_out;
++
++	free_i->free_sections++;
++
++	if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[BG_GC]) == secno)
++		sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
++	if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[FG_GC]) == secno)
++		sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
++
++unlock_out:
+ 	spin_unlock(&free_i->segmap_lock);
+ }
+ 
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 573cc4725e2e88..faa76531246ebb 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1862,9 +1862,9 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	buf->f_fsid    = u64_to_fsid(id);
+ 
+ #ifdef CONFIG_QUOTA
+-	if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
++	if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) &&
+ 			sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
+-		f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
++		f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf);
+ 	}
+ #endif
+ 	return 0;
+diff --git a/fs/filesystems.c b/fs/filesystems.c
+index 58b9067b2391ce..95e5256821a534 100644
+--- a/fs/filesystems.c
++++ b/fs/filesystems.c
+@@ -156,15 +156,19 @@ static int fs_index(const char __user * __name)
+ static int fs_name(unsigned int index, char __user * buf)
+ {
+ 	struct file_system_type * tmp;
+-	int len, res;
++	int len, res = -EINVAL;
+ 
+ 	read_lock(&file_systems_lock);
+-	for (tmp = file_systems; tmp; tmp = tmp->next, index--)
+-		if (index <= 0 && try_module_get(tmp->owner))
++	for (tmp = file_systems; tmp; tmp = tmp->next, index--) {
++		if (index == 0) {
++			if (try_module_get(tmp->owner))
++				res = 0;
+ 			break;
++		}
++	}
+ 	read_unlock(&file_systems_lock);
+-	if (!tmp)
+-		return -EINVAL;
++	if (res)
++		return res;
+ 
+ 	/* OK, we got the reference, so we can safely block */
+ 	len = strlen(tmp->name) + 1;
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 4f1eca99786b61..aecce4bb5e1a9c 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1183,7 +1183,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+ 		   const struct gfs2_glock_operations *glops, int create,
+ 		   struct gfs2_glock **glp)
+ {
+-	struct super_block *s = sdp->sd_vfs;
+ 	struct lm_lockname name = { .ln_number = number,
+ 				    .ln_type = glops->go_type,
+ 				    .ln_sbd = sdp };
+@@ -1246,7 +1245,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+ 	mapping = gfs2_glock2aspace(gl);
+ 	if (mapping) {
+                 mapping->a_ops = &gfs2_meta_aops;
+-		mapping->host = s->s_bdev->bd_mapping->host;
++		mapping->host = sdp->sd_inode;
+ 		mapping->flags = 0;
+ 		mapping_set_gfp_mask(mapping, GFP_NOFS);
+ 		mapping->i_private_data = NULL;
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 95d8081681dcc1..72a0601ce65e2c 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -168,7 +168,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+ static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
+ {
+ 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+-	struct address_space *metamapping = &sdp->sd_aspace;
++	struct address_space *metamapping = gfs2_aspace(sdp);
+ 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
+ 	const unsigned bsize = sdp->sd_sb.sb_bsize;
+ 	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
+@@ -225,7 +225,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl)
+ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
+ {
+ 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+-	struct address_space *mapping = &sdp->sd_aspace;
++	struct address_space *mapping = gfs2_aspace(sdp);
+ 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
+ 	const unsigned bsize = sdp->sd_sb.sb_bsize;
+ 	loff_t start, end;
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index bd1348bff90ebe..e5535d7b465925 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -793,7 +793,7 @@ struct gfs2_sbd {
+ 
+ 	/* Log stuff */
+ 
+-	struct address_space sd_aspace;
++	struct inode *sd_inode;
+ 
+ 	spinlock_t sd_log_lock;
+ 
+@@ -849,6 +849,13 @@ struct gfs2_sbd {
+ 	unsigned long sd_glock_dqs_held;
+ };
+ 
++#define GFS2_BAD_INO 1
++
++static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp)
++{
++	return sdp->sd_inode->i_mapping;
++}
++
+ static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
+ {
+ 	gl->gl_stats.stats[which]++;
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 1b95db2c3aac3c..3be24285ab01da 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -659,7 +659,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	if (!IS_ERR(inode)) {
+ 		if (S_ISDIR(inode->i_mode)) {
+ 			iput(inode);
+-			inode = ERR_PTR(-EISDIR);
++			inode = NULL;
++			error = -EISDIR;
+ 			goto fail_gunlock;
+ 		}
+ 		d_instantiate(dentry, inode);
+diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
+index fea3efcc2f9309..960d6afcdfad81 100644
+--- a/fs/gfs2/meta_io.c
++++ b/fs/gfs2/meta_io.c
+@@ -132,7 +132,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
+ 	unsigned int bufnum;
+ 
+ 	if (mapping == NULL)
+-		mapping = &sdp->sd_aspace;
++		mapping = gfs2_aspace(sdp);
+ 
+ 	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
+ 	index = blkno >> shift;             /* convert block to page */
+diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
+index 831d988c2ceb74..b7c8a6684d0249 100644
+--- a/fs/gfs2/meta_io.h
++++ b/fs/gfs2/meta_io.h
+@@ -44,9 +44,7 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
+ 		struct gfs2_glock_aspace *gla =
+ 			container_of(mapping, struct gfs2_glock_aspace, mapping);
+ 		return gla->glock.gl_name.ln_sbd;
+-	} else if (mapping->a_ops == &gfs2_rgrp_aops)
+-		return container_of(mapping, struct gfs2_sbd, sd_aspace);
+-	else
++	} else
+ 		return inode->i_sb->s_fs_info;
+ }
+ 
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index e83d293c361423..4a0f7de41b2b2f 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -64,15 +64,17 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
+ 
+ void free_sbd(struct gfs2_sbd *sdp)
+ {
++	struct super_block *sb = sdp->sd_vfs;
++
+ 	if (sdp->sd_lkstats)
+ 		free_percpu(sdp->sd_lkstats);
++	sb->s_fs_info = NULL;
+ 	kfree(sdp);
+ }
+ 
+ static struct gfs2_sbd *init_sbd(struct super_block *sb)
+ {
+ 	struct gfs2_sbd *sdp;
+-	struct address_space *mapping;
+ 
+ 	sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
+ 	if (!sdp)
+@@ -109,16 +111,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
+ 
+ 	INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
+ 
+-	mapping = &sdp->sd_aspace;
+-
+-	address_space_init_once(mapping);
+-	mapping->a_ops = &gfs2_rgrp_aops;
+-	mapping->host = sb->s_bdev->bd_mapping->host;
+-	mapping->flags = 0;
+-	mapping_set_gfp_mask(mapping, GFP_NOFS);
+-	mapping->i_private_data = NULL;
+-	mapping->writeback_index = 0;
+-
+ 	spin_lock_init(&sdp->sd_log_lock);
+ 	atomic_set(&sdp->sd_log_pinned, 0);
+ 	INIT_LIST_HEAD(&sdp->sd_log_revokes);
+@@ -1135,6 +1127,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	int silent = fc->sb_flags & SB_SILENT;
+ 	struct gfs2_sbd *sdp;
+ 	struct gfs2_holder mount_gh;
++	struct address_space *mapping;
+ 	int error;
+ 
+ 	sdp = init_sbd(sb);
+@@ -1156,6 +1149,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	sb->s_flags |= SB_NOSEC;
+ 	sb->s_magic = GFS2_MAGIC;
+ 	sb->s_op = &gfs2_super_ops;
++
+ 	sb->s_d_op = &gfs2_dops;
+ 	sb->s_export_op = &gfs2_export_ops;
+ 	sb->s_qcop = &gfs2_quotactl_ops;
+@@ -1181,9 +1175,21 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		sdp->sd_tune.gt_statfs_quantum = 30;
+ 	}
+ 
++	/* Set up an address space for metadata writes */
++	sdp->sd_inode = new_inode(sb);
++	error = -ENOMEM;
++	if (!sdp->sd_inode)
++		goto fail_free;
++	sdp->sd_inode->i_ino = GFS2_BAD_INO;
++	sdp->sd_inode->i_size = OFFSET_MAX;
++
++	mapping = gfs2_aspace(sdp);
++	mapping->a_ops = &gfs2_rgrp_aops;
++	mapping_set_gfp_mask(mapping, GFP_NOFS);
++
+ 	error = init_names(sdp, silent);
+ 	if (error)
+-		goto fail_free;
++		goto fail_iput;
+ 
+ 	snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
+ 
+@@ -1192,7 +1198,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0,
+ 			sdp->sd_fsname);
+ 	if (!sdp->sd_glock_wq)
+-		goto fail_free;
++		goto fail_iput;
+ 
+ 	sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s",
+ 			WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname);
+@@ -1309,9 +1315,10 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ fail_glock_wq:
+ 	if (sdp->sd_glock_wq)
+ 		destroy_workqueue(sdp->sd_glock_wq);
++fail_iput:
++	iput(sdp->sd_inode);
+ fail_free:
+ 	free_sbd(sdp);
+-	sb->s_fs_info = NULL;
+ 	return error;
+ }
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index b9cef63c78717f..5ecb857cf74e30 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -648,7 +648,7 @@ static void gfs2_put_super(struct super_block *sb)
+ 	gfs2_jindex_free(sdp);
+ 	/*  Take apart glock structures and buffer lists  */
+ 	gfs2_gl_hash_clear(sdp);
+-	truncate_inode_pages_final(&sdp->sd_aspace);
++	iput(sdp->sd_inode);
+ 	gfs2_delete_debugfs_file(sdp);
+ 
+ 	gfs2_sys_fs_del(sdp);
+@@ -674,7 +674,7 @@ static int gfs2_sync_fs(struct super_block *sb, int wait)
+ 	return sdp->sd_log_error;
+ }
+ 
+-static int gfs2_do_thaw(struct gfs2_sbd *sdp)
++static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who)
+ {
+ 	struct super_block *sb = sdp->sd_vfs;
+ 	int error;
+@@ -682,7 +682,7 @@ static int gfs2_do_thaw(struct gfs2_sbd *sdp)
+ 	error = gfs2_freeze_lock_shared(sdp);
+ 	if (error)
+ 		goto fail;
+-	error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
++	error = thaw_super(sb, who);
+ 	if (!error)
+ 		return 0;
+ 
+@@ -710,7 +710,7 @@ void gfs2_freeze_func(struct work_struct *work)
+ 	gfs2_freeze_unlock(sdp);
+ 	set_bit(SDF_FROZEN, &sdp->sd_flags);
+ 
+-	error = gfs2_do_thaw(sdp);
++	error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE);
+ 	if (error)
+ 		goto out;
+ 
+@@ -728,6 +728,7 @@ void gfs2_freeze_func(struct work_struct *work)
+ /**
+  * gfs2_freeze_super - prevent further writes to the filesystem
+  * @sb: the VFS structure for the filesystem
++ * @who: freeze flags
+  *
+  */
+ 
+@@ -744,7 +745,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
+ 	}
+ 
+ 	for (;;) {
+-		error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
++		error = freeze_super(sb, who);
+ 		if (error) {
+ 			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
+ 				error);
+@@ -758,7 +759,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
+ 			break;
+ 		}
+ 
+-		error = gfs2_do_thaw(sdp);
++		error = gfs2_do_thaw(sdp, who);
+ 		if (error)
+ 			goto out;
+ 
+@@ -796,6 +797,7 @@ static int gfs2_freeze_fs(struct super_block *sb)
+ /**
+  * gfs2_thaw_super - reallow writes to the filesystem
+  * @sb: the VFS structure for the filesystem
++ * @who: freeze flags
+  *
+  */
+ 
+@@ -814,7 +816,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
+ 	atomic_inc(&sb->s_active);
+ 	gfs2_freeze_unlock(sdp);
+ 
+-	error = gfs2_do_thaw(sdp);
++	error = gfs2_do_thaw(sdp, who);
+ 
+ 	if (!error) {
+ 		clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
+diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
+index ecc699f8d9fcaa..6286183021022a 100644
+--- a/fs/gfs2/sys.c
++++ b/fs/gfs2/sys.c
+@@ -764,7 +764,6 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
+ 	fs_err(sdp, "error %d adding sysfs files\n", error);
+ 	kobject_put(&sdp->sd_kobj);
+ 	wait_for_completion(&sdp->sd_kobj_unregister);
+-	sb->s_fs_info = NULL;
+ 	return error;
+ }
+ 
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index 458519e416fe75..5dc90a498e75d1 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -1560,8 +1560,9 @@ void kernfs_break_active_protection(struct kernfs_node *kn)
+  * invoked before finishing the kernfs operation.  Note that while this
+  * function restores the active reference, it doesn't and can't actually
+  * restore the active protection - @kn may already or be in the process of
+- * being removed.  Once kernfs_break_active_protection() is invoked, that
+- * protection is irreversibly gone for the kernfs operation instance.
++ * being drained and removed.  Once kernfs_break_active_protection() is
++ * invoked, that protection is irreversibly gone for the kernfs operation
++ * instance.
+  *
+  * While this function may be called at any point after
+  * kernfs_break_active_protection() is invoked, its most useful location
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 8502ef68459b98..1943c8bd479bf6 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -778,8 +778,9 @@ bool kernfs_should_drain_open_files(struct kernfs_node *kn)
+ 	/*
+ 	 * @kn being deactivated guarantees that @kn->attr.open can't change
+ 	 * beneath us making the lockless test below safe.
++	 * Callers post kernfs_unbreak_active_protection may be counted in
++	 * kn->active by now, do not WARN_ON because of them.
+ 	 */
+-	WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
+ 
+ 	rcu_read_lock();
+ 	on = rcu_dereference(kn->attr.open);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index c1ac585e41e369..843bc6191f30b4 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2714,6 +2714,10 @@ static int do_change_type(struct path *path, int ms_flags)
+ 		return -EINVAL;
+ 
+ 	namespace_lock();
++	if (!check_mnt(mnt)) {
++		err = -EINVAL;
++		goto out_unlock;
++	}
+ 	if (type == MS_SHARED) {
+ 		err = invent_group_ids(mnt, recurse);
+ 		if (err)
+@@ -3151,7 +3155,7 @@ static int do_set_group(struct path *from_path, struct path *to_path)
+ 	if (IS_MNT_SLAVE(from)) {
+ 		struct mount *m = from->mnt_master;
+ 
+-		list_add(&to->mnt_slave, &m->mnt_slave_list);
++		list_add(&to->mnt_slave, &from->mnt_slave);
+ 		to->mnt_master = m;
+ 	}
+ 
+@@ -3176,18 +3180,25 @@ static int do_set_group(struct path *from_path, struct path *to_path)
+  * Check if path is overmounted, i.e., if there's a mount on top of
+  * @path->mnt with @path->dentry as mountpoint.
+  *
+- * Context: This function expects namespace_lock() to be held.
++ * Context: namespace_sem must be held at least shared.
++ * MUST NOT be called under lock_mount_hash() (there one should just
++ * call __lookup_mnt() and check if it returns NULL).
+  * Return: If path is overmounted true is returned, false if not.
+  */
+ static inline bool path_overmounted(const struct path *path)
+ {
++	unsigned seq = read_seqbegin(&mount_lock);
++	bool no_child;
++
+ 	rcu_read_lock();
+-	if (unlikely(__lookup_mnt(path->mnt, path->dentry))) {
+-		rcu_read_unlock();
+-		return true;
+-	}
++	no_child = !__lookup_mnt(path->mnt, path->dentry);
+ 	rcu_read_unlock();
+-	return false;
++	if (need_seqretry(&mount_lock, seq)) {
++		read_seqlock_excl(&mount_lock);
++		no_child = !__lookup_mnt(path->mnt, path->dentry);
++		read_sequnlock_excl(&mount_lock);
++	}
++	return unlikely(!no_child);
+ }
+ 
+ /**
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index ae5c5e39afa03c..da5286514d8c7b 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1046,6 +1046,16 @@ int nfs_reconfigure(struct fs_context *fc)
+ 
+ 	sync_filesystem(sb);
+ 
++	/*
++	 * The SB_RDONLY flag has been removed from the superblock during
++	 * mounts to prevent interference between different filesystems.
++	 * Similarly, it is also necessary to ignore the SB_RDONLY flag
++	 * during reconfiguration; otherwise, it may also result in the
++	 * creation of redundant superblocks when mounting a directory with
++	 * different rw and ro flags multiple times.
++	 */
++	fc->sb_flags_mask &= ~SB_RDONLY;
++
+ 	/*
+ 	 * Userspace mount programs that send binary options generally send
+ 	 * them populated with default values. We have no way to know which
+@@ -1303,8 +1313,17 @@ int nfs_get_tree_common(struct fs_context *fc)
+ 	if (IS_ERR(server))
+ 		return PTR_ERR(server);
+ 
++	/*
++	 * When NFS_MOUNT_UNSHARED is not set, NFS forces the sharing of a
++	 * superblock among each filesystem that mounts sub-directories
++	 * belonging to a single exported root path.
++	 * To prevent interference between different filesystems, the
++	 * SB_RDONLY flag should be removed from the superblock.
++	 */
+ 	if (server->flags & NFS_MOUNT_UNSHARED)
+ 		compare_super = NULL;
++	else
++		fc->sb_flags &= ~SB_RDONLY;
+ 
+ 	/* -o noac implies -o sync */
+ 	if (server->flags & NFS_MOUNT_NOAC)
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index ef5061bb56da1e..9c51a4ac2627f5 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -2103,11 +2103,13 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree,
+ 
+ 	ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
+ 	if (ret < 0) {
+-		if (unlikely(ret == -ENOENT))
++		if (unlikely(ret == -ENOENT)) {
+ 			nilfs_crit(btree->b_inode->i_sb,
+ 				   "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d",
+ 				   btree->b_inode->i_ino,
+ 				   (unsigned long long)key, level);
++			ret = -EINVAL;
++		}
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
+index 893ab36824cc2b..2d8dc6b35b5477 100644
+--- a/fs/nilfs2/direct.c
++++ b/fs/nilfs2/direct.c
+@@ -273,6 +273,9 @@ static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
+ 	dat = nilfs_bmap_get_dat(bmap);
+ 	key = nilfs_bmap_data_get_key(bmap, bh);
+ 	ptr = nilfs_direct_get_ptr(bmap, key);
++	if (ptr == NILFS_BMAP_INVALID_PTR)
++		return -EINVAL;
++
+ 	if (!buffer_nilfs_volatile(bh)) {
+ 		oldreq.pr_entry_nr = ptr;
+ 		newreq.pr_entry_nr = ptr;
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 78d20e4baa2c9a..1bf2a6593dec66 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -2182,6 +2182,10 @@ static int indx_get_entry_to_replace(struct ntfs_index *indx,
+ 
+ 		e = hdr_first_de(&n->index->ihdr);
+ 		fnd_push(fnd, n, e);
++		if (!e) {
++			err = -EINVAL;
++			goto out;
++		}
+ 
+ 		if (!de_is_last(e)) {
+ 			/*
+@@ -2203,6 +2207,10 @@ static int indx_get_entry_to_replace(struct ntfs_index *indx,
+ 
+ 	n = fnd->nodes[level];
+ 	te = hdr_first_de(&n->index->ihdr);
++	if (!te) {
++		err = -EINVAL;
++		goto out;
++	}
+ 	/* Copy the candidate entry into the replacement entry buffer. */
+ 	re = kmalloc(le16_to_cpu(te->size) + sizeof(u64), GFP_NOFS);
+ 	if (!re) {
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index a1e11228dafd02..5c05cccd2d40b1 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -805,6 +805,10 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+ 		ret = 0;
+ 		goto out;
+ 	}
++	if (is_compressed(ni)) {
++		ret = 0;
++		goto out;
++	}
+ 
+ 	ret = blockdev_direct_IO(iocb, inode, iter,
+ 				 wr ? ntfs_get_block_direct_IO_W :
+@@ -2108,5 +2112,6 @@ const struct address_space_operations ntfs_aops_cmpr = {
+ 	.read_folio	= ntfs_read_folio,
+ 	.readahead	= ntfs_readahead,
+ 	.dirty_folio	= block_dirty_folio,
++	.direct_IO	= ntfs_direct_IO,
+ };
+ // clang-format on
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index e272429da3db34..de7f12858729ac 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -674,7 +674,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
+ 			break;
+ 	}
+ out:
+-	kfree(rec);
++	ocfs2_free_quota_recovery(rec);
+ 	return status;
+ }
+ 
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index 8667f403a0ab61..cf8d9de2298fcc 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -2780,10 +2780,10 @@ int cifs_query_reparse_point(const unsigned int xid,
+ 
+ 	io_req->TotalParameterCount = 0;
+ 	io_req->TotalDataCount = 0;
+-	io_req->MaxParameterCount = cpu_to_le32(2);
++	io_req->MaxParameterCount = cpu_to_le32(0);
+ 	/* BB find exact data count max from sess structure BB */
+ 	io_req->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
+-	io_req->MaxSetupCount = 4;
++	io_req->MaxSetupCount = 1;
+ 	io_req->Reserved = 0;
+ 	io_req->ParameterOffset = 0;
+ 	io_req->DataCount = 0;
+@@ -2810,6 +2810,22 @@ int cifs_query_reparse_point(const unsigned int xid,
+ 		goto error;
+ 	}
+ 
++	/* SetupCount must be 1, otherwise offset to ByteCount is incorrect. */
++	if (io_rsp->SetupCount != 1) {
++		rc = -EIO;
++		goto error;
++	}
++
++	/*
++	 * ReturnedDataLen is output length of executed IOCTL.
++	 * DataCount is output length transferred over network.
++	 * Check that we have full FSCTL_GET_REPARSE_POINT buffer.
++	 */
++	if (data_count != le16_to_cpu(io_rsp->ReturnedDataLen)) {
++		rc = -EIO;
++		goto error;
++	}
++
+ 	end = 2 + get_bcc(&io_rsp->hdr) + (__u8 *)&io_rsp->ByteCount;
+ 	start = (__u8 *)&io_rsp->hdr.Protocol + data_offset;
+ 	if (start >= end) {
+diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
+index 22e812808e5cf9..3a27d4268b3c4a 100644
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -202,6 +202,11 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
+ 
+ 	msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
++	if (!msblk->devblksize) {
++		errorf(fc, "squashfs: unable to set blocksize\n");
++		return -EINVAL;
++	}
++
+ 	msblk->devblksize_log2 = ffz(~msblk->devblksize);
+ 
+ 	mutex_init(&msblk->meta_index_mutex);
+diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
+index d8c4a5dcca7aea..0b343776da8c31 100644
+--- a/fs/xfs/xfs_discard.c
++++ b/fs/xfs/xfs_discard.c
+@@ -146,6 +146,14 @@ xfs_discard_extents(
+ 	return error;
+ }
+ 
++/*
++ * Care must be taken setting up the trim cursor as the perags may not have been
++ * initialised when the cursor is initialised. e.g. a clean mount which hasn't
++ * read in AGFs and the first operation run on the mounted fs is a trim. This
++ * can result in perag fields that aren't initialised until
++ * xfs_trim_gather_extents() calls xfs_alloc_read_agf() to lock down the AG for
++ * the free space search.
++ */
+ struct xfs_trim_cur {
+ 	xfs_agblock_t	start;
+ 	xfs_extlen_t	count;
+@@ -183,6 +191,14 @@ xfs_trim_gather_extents(
+ 	if (error)
+ 		goto out_trans_cancel;
+ 
++	/*
++	 * First time through tcur->count will not have been initialised as
++	 * pag->pagf_longest is not guaranteed to be valid before we read
++	 * the AGF buffer above.
++	 */
++	if (!tcur->count)
++		tcur->count = pag->pagf_longest;
++
+ 	if (tcur->by_bno) {
+ 		/* sub-AG discard request always starts at tcur->start */
+ 		cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
+@@ -329,7 +345,6 @@ xfs_trim_perag_extents(
+ {
+ 	struct xfs_trim_cur	tcur = {
+ 		.start		= start,
+-		.count		= pag->pagf_longest,
+ 		.end		= end,
+ 		.minlen		= minlen,
+ 	};
+diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
+index 255701e1251b4a..f652a5028b5907 100644
+--- a/include/linux/arm_sdei.h
++++ b/include/linux/arm_sdei.h
+@@ -46,12 +46,12 @@ int sdei_unregister_ghes(struct ghes *ghes);
+ /* For use by arch code when CPU hotplug notifiers are not appropriate. */
+ int sdei_mask_local_cpu(void);
+ int sdei_unmask_local_cpu(void);
+-void __init sdei_init(void);
++void __init acpi_sdei_init(void);
+ void sdei_handler_abort(void);
+ #else
+ static inline int sdei_mask_local_cpu(void) { return 0; }
+ static inline int sdei_unmask_local_cpu(void) { return 0; }
+-static inline void sdei_init(void) { }
++static inline void acpi_sdei_init(void) { }
+ static inline void sdei_handler_abort(void) { }
+ #endif /* CONFIG_ARM_SDE_INTERFACE */
+ 
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 9e98fb87e7ef71..1289b8e4878011 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -294,7 +294,7 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ 
+ 	fi->folio = page_folio(bvec->bv_page);
+ 	fi->offset = bvec->bv_offset +
+-			PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
++			PAGE_SIZE * folio_page_idx(fi->folio, bvec->bv_page);
+ 	fi->_seg_count = bvec->bv_len;
+ 	fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ 	fi->_next = folio_next(fi->folio);
+diff --git a/include/linux/bvec.h b/include/linux/bvec.h
+index f41c7f0ef91ed5..a8333b82e766d4 100644
+--- a/include/linux/bvec.h
++++ b/include/linux/bvec.h
+@@ -57,9 +57,12 @@ static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
+  * @offset:	offset into the folio
+  */
+ static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
+-		unsigned int len, unsigned int offset)
++		size_t len, size_t offset)
+ {
+-	bvec_set_page(bv, &folio->page, len, offset);
++	unsigned long nr = offset / PAGE_SIZE;
++
++	WARN_ON_ONCE(len > UINT_MAX);
++	bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE);
+ }
+ 
+ /**
+diff --git a/include/linux/coresight.h b/include/linux/coresight.h
+index f106b102511189..59f99b7da43f5b 100644
+--- a/include/linux/coresight.h
++++ b/include/linux/coresight.h
+@@ -683,7 +683,7 @@ coresight_find_output_type(struct coresight_platform_data *pdata,
+ 			   union coresight_dev_subtype subtype);
+ 
+ int coresight_init_driver(const char *drv, struct amba_driver *amba_drv,
+-			  struct platform_driver *pdev_drv);
++			  struct platform_driver *pdev_drv, struct module *owner);
+ 
+ void coresight_remove_driver(struct amba_driver *amba_drv,
+ 			     struct platform_driver *pdev_drv);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 018de72505b073..017d31f1d27b8f 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -736,8 +736,9 @@ struct hid_descriptor {
+ 	__le16 bcdHID;
+ 	__u8  bCountryCode;
+ 	__u8  bNumDescriptors;
++	struct hid_class_descriptor rpt_desc;
+ 
+-	struct hid_class_descriptor desc[1];
++	struct hid_class_descriptor opt_descs[];
+ } __attribute__ ((packed));
+ 
+ #define HID_DEVICE(b, g, ven, prod)					\
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 777f6aa8efa7b2..d07c1f0ad3de33 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -111,6 +111,8 @@
+ 
+ /* bits unique to S1G beacon */
+ #define IEEE80211_S1G_BCN_NEXT_TBTT	0x100
++#define IEEE80211_S1G_BCN_CSSID		0x200
++#define IEEE80211_S1G_BCN_ANO		0x400
+ 
+ /* see 802.11ah-2016 9.9 NDP CMAC frames */
+ #define IEEE80211_S1G_1MHZ_NDP_BITS	25
+@@ -153,9 +155,6 @@
+ 
+ #define IEEE80211_ANO_NETTYPE_WILD              15
+ 
+-/* bits unique to S1G beacon */
+-#define IEEE80211_S1G_BCN_NEXT_TBTT    0x100
+-
+ /* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
+ #define IEEE80211_CTL_EXT_POLL		0x2000
+ #define IEEE80211_CTL_EXT_SPR		0x3000
+@@ -627,6 +626,42 @@ static inline bool ieee80211_is_s1g_beacon(__le16 fc)
+ 	       cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
+ }
+ 
++/**
++ * ieee80211_s1g_has_next_tbtt - check if IEEE80211_S1G_BCN_NEXT_TBTT
++ * @fc: frame control bytes in little-endian byteorder
++ * Return: whether or not the frame contains the variable-length
++ *	next TBTT field
++ */
++static inline bool ieee80211_s1g_has_next_tbtt(__le16 fc)
++{
++	return ieee80211_is_s1g_beacon(fc) &&
++		(fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT));
++}
++
++/**
++ * ieee80211_s1g_has_ano - check if IEEE80211_S1G_BCN_ANO
++ * @fc: frame control bytes in little-endian byteorder
++ * Return: whether or not the frame contains the variable-length
++ *	ANO field
++ */
++static inline bool ieee80211_s1g_has_ano(__le16 fc)
++{
++	return ieee80211_is_s1g_beacon(fc) &&
++		(fc & cpu_to_le16(IEEE80211_S1G_BCN_ANO));
++}
++
++/**
++ * ieee80211_s1g_has_cssid - check if IEEE80211_S1G_BCN_CSSID
++ * @fc: frame control bytes in little-endian byteorder
++ * Return: whether or not the frame contains the variable-length
++ *	compressed SSID field
++ */
++static inline bool ieee80211_s1g_has_cssid(__le16 fc)
++{
++	return ieee80211_is_s1g_beacon(fc) &&
++		(fc & cpu_to_le16(IEEE80211_S1G_BCN_CSSID));
++}
++
+ /**
+  * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
+  * @fc: frame control bytes in little-endian byteorder
+@@ -1245,16 +1280,40 @@ struct ieee80211_ext {
+ 			u8 change_seq;
+ 			u8 variable[0];
+ 		} __packed s1g_beacon;
+-		struct {
+-			u8 sa[ETH_ALEN];
+-			__le32 timestamp;
+-			u8 change_seq;
+-			u8 next_tbtt[3];
+-			u8 variable[0];
+-		} __packed s1g_short_beacon;
+ 	} u;
+ } __packed __aligned(2);
+ 
++/**
++ * ieee80211_s1g_optional_len - determine length of optional S1G beacon fields
++ * @fc: frame control bytes in little-endian byteorder
++ * Return: total length in bytes of the optional fixed-length fields
++ *
++ * S1G beacons may contain up to three optional fixed-length fields that
++ * precede the variable-length elements. Whether these fields are present
++ * is indicated by flags in the frame control field.
++ *
++ * From IEEE 802.11-2024 section 9.3.4.3:
++ *  - Next TBTT field may be 0 or 3 bytes
++ *  - Short SSID field may be 0 or 4 bytes
++ *  - Access Network Options (ANO) field may be 0 or 1 byte
++ */
++static inline size_t
++ieee80211_s1g_optional_len(__le16 fc)
++{
++	size_t len = 0;
++
++	if (ieee80211_s1g_has_next_tbtt(fc))
++		len += 3;
++
++	if (ieee80211_s1g_has_cssid(fc))
++		len += 4;
++
++	if (ieee80211_s1g_has_ano(fc))
++		len += 1;
++
++	return len;
++}
++
+ #define IEEE80211_TWT_CONTROL_NDP			BIT(0)
+ #define IEEE80211_TWT_CONTROL_RESP_MODE			BIT(1)
+ #define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST	BIT(3)
+diff --git a/include/linux/mdio.h b/include/linux/mdio.h
+index efeca5bd7600b0..84b08059183726 100644
+--- a/include/linux/mdio.h
++++ b/include/linux/mdio.h
+@@ -45,10 +45,7 @@ struct mdio_device {
+ 	unsigned int reset_deassert_delay;
+ };
+ 
+-static inline struct mdio_device *to_mdio_device(const struct device *dev)
+-{
+-	return container_of(dev, struct mdio_device, dev);
+-}
++#define to_mdio_device(__dev)	container_of_const(__dev, struct mdio_device, dev)
+ 
+ /* struct mdio_driver_common: Common to all MDIO drivers */
+ struct mdio_driver_common {
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index d4b2c09cd5fec4..da9749739abde9 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -395,6 +395,7 @@ struct mlx5_core_rsc_common {
+ 	enum mlx5_res_type	res;
+ 	refcount_t		refcount;
+ 	struct completion	free;
++	bool			invalid;
+ };
+ 
+ struct mlx5_uars_page {
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 8617adc6becd1f..059ca4767e148f 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -4243,4 +4243,62 @@ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+ }
+ #endif /* CONFIG_MEM_ALLOC_PROFILING */
+ 
++/*
++ * DMA mapping IDs for page_pool
++ *
++ * When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
++ * stashes it in the upper bits of page->pp_magic. We always want to be able to
++ * unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP
++ * pages can have arbitrary kernel pointers stored in the same field as pp_magic
++ * (since it overlaps with page->lru.next), so we must ensure that we cannot
++ * mistake a valid kernel pointer with any of the values we write into this
++ * field.
++ *
++ * On architectures that set POISON_POINTER_DELTA, this is already ensured,
++ * since this value becomes part of PP_SIGNATURE; meaning we can just use the
++ * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
++ * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
++ * 0, we make sure that we leave the two topmost bits empty, as that guarantees
++ * we won't mistake a valid kernel pointer for a value we set, regardless of the
++ * VMSPLIT setting.
++ *
++ * Altogether, this means that the number of bits available is constrained by
++ * the size of an unsigned long (at the upper end, subtracting two bits per the
++ * above), and the definition of PP_SIGNATURE (with or without
++ * POISON_POINTER_DELTA).
++ */
++#define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
++#if POISON_POINTER_DELTA > 0
++/* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA
++ * index to not overlap with that if set
++ */
++#define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
++#else
++/* Always leave out the topmost two; see above. */
++#define PP_DMA_INDEX_BITS MIN(32, BITS_PER_LONG - PP_DMA_INDEX_SHIFT - 2)
++#endif
++
++#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
++				  PP_DMA_INDEX_SHIFT)
++
++/* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
++ * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
++ * the head page of compound page and bit 1 for pfmemalloc page, as well as the
++ * bits used for the DMA index. page_is_pfmemalloc() is checked in
++ * __page_pool_put_page() to avoid recycling the pfmemalloc page.
++ */
++#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
++
++#ifdef CONFIG_PAGE_POOL
++static inline bool page_pool_page_is_pp(struct page *page)
++{
++	return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
++}
++#else
++static inline bool page_pool_page_is_pp(struct page *page)
++{
++	return false;
++}
++#endif
++
+ #endif /* _LINUX_MM_H */
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index 1c101f6fad2f31..84d4f0657b7a8e 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -1954,7 +1954,7 @@ enum {
+ 	NVME_SC_BAD_ATTRIBUTES		= 0x180,
+ 	NVME_SC_INVALID_PI		= 0x181,
+ 	NVME_SC_READ_ONLY		= 0x182,
+-	NVME_SC_ONCS_NOT_SUPPORTED	= 0x183,
++	NVME_SC_CMD_SIZE_LIM_EXCEEDED	= 0x183,
+ 
+ 	/*
+ 	 * I/O Command Set Specific - Fabrics commands:
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h
+index 0c7e3dcfe8670c..89e9d604988351 100644
+--- a/include/linux/overflow.h
++++ b/include/linux/overflow.h
+@@ -389,24 +389,37 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
+ 	struct_size((type *)NULL, member, count)
+ 
+ /**
+- * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+- * Enables caller macro to pass (different) initializer.
++ * __DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
++ * Enables caller macro to pass arbitrary trailing expressions
+  *
+  * @type: structure type name, including "struct" keyword.
+  * @name: Name for a variable to define.
+  * @member: Name of the array member.
+  * @count: Number of elements in the array; must be compile-time const.
+- * @initializer: initializer expression (could be empty for no init).
++ * @trailer: Trailing expressions for attributes and/or initializers.
+  */
+-#define _DEFINE_FLEX(type, name, member, count, initializer...)			\
++#define __DEFINE_FLEX(type, name, member, count, trailer...)			\
+ 	_Static_assert(__builtin_constant_p(count),				\
+ 		       "onstack flex array members require compile-time const count"); \
+ 	union {									\
+ 		u8 bytes[struct_size_t(type, member, count)];			\
+ 		type obj;							\
+-	} name##_u initializer;							\
++	} name##_u trailer;							\
+ 	type *name = (type *)&name##_u
+ 
++/**
++ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
++ * Enables caller macro to pass (different) initializer.
++ *
++ * @type: structure type name, including "struct" keyword.
++ * @name: Name for a variable to define.
++ * @member: Name of the array member.
++ * @count: Number of elements in the array; must be compile-time const.
++ * @initializer: Initializer expression (e.g., pass `= { }` at minimum).
++ */
++#define _DEFINE_FLEX(type, name, member, count, initializer...)			\
++	__DEFINE_FLEX(type, name, member, count, = { .obj initializer })
++
+ /**
+  * DEFINE_RAW_FLEX() - Define an on-stack instance of structure with a trailing
+  * flexible array member, when it does not have a __counted_by annotation.
+@@ -421,7 +434,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
+  * Use __struct_size(@name) to get compile-time size of it afterwards.
+  */
+ #define DEFINE_RAW_FLEX(type, name, member, count)	\
+-	_DEFINE_FLEX(type, name, member, count, = {})
++	__DEFINE_FLEX(type, name, member, count, = { })
+ 
+ /**
+  * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+@@ -438,6 +451,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
+  * Use __struct_size(@NAME) to get compile-time size of it afterwards.
+  */
+ #define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT)	\
+-	_DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .obj.COUNTER = COUNT, })
++	_DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .COUNTER = COUNT, })
+ 
+ #endif /* __LINUX_OVERFLOW_H */
+diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
+index 18a3aeb62ae4e6..cd6f8f4bc45400 100644
+--- a/include/linux/pci-epf.h
++++ b/include/linux/pci-epf.h
+@@ -114,6 +114,8 @@ struct pci_epf_driver {
+  * @phys_addr: physical address that should be mapped to the BAR
+  * @addr: virtual address corresponding to the @phys_addr
+  * @size: the size of the address space present in BAR
++ * @aligned_size: the size actually allocated to accommodate the iATU alignment
++ *                requirement
+  * @barno: BAR number
+  * @flags: flags that are set for the BAR
+  */
+@@ -121,6 +123,7 @@ struct pci_epf_bar {
+ 	dma_addr_t	phys_addr;
+ 	void		*addr;
+ 	size_t		size;
++	size_t		aligned_size;
+ 	enum pci_barno	barno;
+ 	int		flags;
+ };
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 945264f457d8aa..dfc7b97f9648d8 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -792,10 +792,7 @@ struct phy_device {
+ #define PHY_F_NO_IRQ		0x80000000
+ #define PHY_F_RXC_ALWAYS_ON	0x40000000
+ 
+-static inline struct phy_device *to_phy_device(const struct device *dev)
+-{
+-	return container_of(to_mdio_device(dev), struct phy_device, mdio);
+-}
++#define to_phy_device(__dev)	container_of_const(to_mdio_device(__dev), struct phy_device, mdio)
+ 
+ /**
+  * struct phy_tdr_config - Configuration of a TDR raw test
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index cf4b11be370974..c6716f474ba45e 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -251,6 +251,7 @@ struct generic_pm_domain_data {
+ 	unsigned int default_pstate;
+ 	unsigned int rpm_pstate;
+ 	bool hw_mode;
++	bool rpm_always_on;
+ 	void *data;
+ };
+ 
+@@ -283,6 +284,7 @@ ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
+ void dev_pm_genpd_synced_poweroff(struct device *dev);
+ int dev_pm_genpd_set_hwmode(struct device *dev, bool enable);
+ bool dev_pm_genpd_get_hwmode(struct device *dev);
++int dev_pm_genpd_rpm_always_on(struct device *dev, bool on);
+ 
+ extern struct dev_power_governor simple_qos_governor;
+ extern struct dev_power_governor pm_domain_always_on_gov;
+@@ -366,6 +368,11 @@ static inline bool dev_pm_genpd_get_hwmode(struct device *dev)
+ 	return false;
+ }
+ 
++static inline int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
++{
++	return -EOPNOTSUPP;
++}
++
+ #define simple_qos_governor		(*(struct dev_power_governor *)(NULL))
+ #define pm_domain_always_on_gov		(*(struct dev_power_governor *)(NULL))
+ #endif
+diff --git a/include/linux/poison.h b/include/linux/poison.h
+index 331a9a996fa874..8ca2235f78d5d9 100644
+--- a/include/linux/poison.h
++++ b/include/linux/poison.h
+@@ -70,6 +70,10 @@
+ #define KEY_DESTROY		0xbd
+ 
+ /********** net/core/page_pool.c **********/
++/*
++ * page_pool uses additional free bits within this value to store data, see the
++ * definition of PP_DMA_INDEX_MASK in mm.h
++ */
+ #define PP_SIGNATURE		(0x40 + POISON_POINTER_DELTA)
+ 
+ /********** net/core/skbuff.c **********/
+diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
+index 0387d64e2c66c6..36fb3edfa403d9 100644
+--- a/include/linux/virtio_vsock.h
++++ b/include/linux/virtio_vsock.h
+@@ -140,6 +140,7 @@ struct virtio_vsock_sock {
+ 	u32 last_fwd_cnt;
+ 	u32 rx_bytes;
+ 	u32 buf_alloc;
++	u32 buf_used;
+ 	struct sk_buff_head rx_queue;
+ 	u32 msg_count;
+ };
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 4f3b537476e106..e9e3366d059ef1 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -538,6 +538,7 @@ struct hci_dev {
+ 	struct hci_conn_hash	conn_hash;
+ 
+ 	struct list_head	mesh_pending;
++	struct mutex		mgmt_pending_lock;
+ 	struct list_head	mgmt_pending;
+ 	struct list_head	reject_list;
+ 	struct list_head	accept_list;
+@@ -2379,7 +2380,6 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
+ 			    u8 instance);
+ void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
+ 			      u8 instance);
+-void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
+ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
+ void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
+ 				  bdaddr_t *bdaddr, u8 addr_type);
+diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
+index 6e202ed5e63f3c..7370fba844efcf 100644
+--- a/include/net/netfilter/nft_fib.h
++++ b/include/net/netfilter/nft_fib.h
+@@ -2,6 +2,7 @@
+ #ifndef _NFT_FIB_H_
+ #define _NFT_FIB_H_
+ 
++#include <net/l3mdev.h>
+ #include <net/netfilter/nf_tables.h>
+ 
+ struct nft_fib {
+@@ -39,6 +40,14 @@ static inline bool nft_fib_can_skip(const struct nft_pktinfo *pkt)
+ 	return nft_fib_is_loopback(pkt->skb, indev);
+ }
+ 
++static inline int nft_fib_l3mdev_master_ifindex_rcu(const struct nft_pktinfo *pkt,
++						    const struct net_device *iif)
++{
++	const struct net_device *dev = iif ? iif : pkt->skb->dev;
++
++	return l3mdev_master_ifindex_rcu(dev);
++}
++
+ int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset);
+ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 		 const struct nlattr * const tb[]);
+diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
+index c022c410abe39d..f53e2c90b68665 100644
+--- a/include/net/page_pool/types.h
++++ b/include/net/page_pool/types.h
+@@ -6,6 +6,7 @@
+ #include <linux/dma-direction.h>
+ #include <linux/ptr_ring.h>
+ #include <linux/types.h>
++#include <linux/xarray.h>
+ #include <net/netmem.h>
+ 
+ #define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
+@@ -33,6 +34,9 @@
+ #define PP_FLAG_ALL		(PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
+ 				 PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM)
+ 
++/* Index limit to stay within PP_DMA_INDEX_BITS for DMA indices */
++#define PP_DMA_INDEX_LIMIT XA_LIMIT(1, BIT(PP_DMA_INDEX_BITS) - 1)
++
+ /*
+  * Fast allocation side cache array/stack
+  *
+@@ -216,6 +220,8 @@ struct page_pool {
+ 
+ 	void *mp_priv;
+ 
++	struct xarray dma_mapped;
++
+ #ifdef CONFIG_PAGE_POOL_STATS
+ 	/* recycle stats are per-cpu to avoid locking */
+ 	struct page_pool_recycle_stats __percpu *recycle_stats;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index fa9b9dadbe1709..b7270b6b9e9cc1 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2942,8 +2942,11 @@ int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
+ int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+ static inline bool sk_is_readable(struct sock *sk)
+ {
+-	if (sk->sk_prot->sock_is_readable)
+-		return sk->sk_prot->sock_is_readable(sk);
++	const struct proto *prot = READ_ONCE(sk->sk_prot);
++
++	if (prot->sock_is_readable)
++		return prot->sock_is_readable(sk);
++
+ 	return false;
+ }
+ #endif	/* _SOCK_H */
+diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
+index b098ceadbe74bf..9a70048adbc069 100644
+--- a/include/sound/hdaudio.h
++++ b/include/sound/hdaudio.h
+@@ -223,7 +223,7 @@ struct hdac_driver {
+ 	struct device_driver driver;
+ 	int type;
+ 	const struct hda_device_id *id_table;
+-	int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
++	int (*match)(struct hdac_device *dev, const struct hdac_driver *drv);
+ 	void (*unsol_event)(struct hdac_device *dev, unsigned int event);
+ 
+ 	/* fields used by ext bus APIs */
+@@ -235,7 +235,7 @@ struct hdac_driver {
+ #define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
+ 
+ const struct hda_device_id *
+-hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv);
++hdac_get_device_id(struct hdac_device *hdev, const struct hdac_driver *drv);
+ 
+ /*
+  * Bus verb operators
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index ecdbe473a49f7a..c6c624eb9866d7 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -146,18 +146,26 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
+ 
+ 	if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
+ 		struct io_sq_data *sq = ctx->sq_data;
++		struct task_struct *tsk;
+ 
++		rcu_read_lock();
++		tsk = rcu_dereference(sq->thread);
+ 		/*
+ 		 * sq->thread might be NULL if we raced with the sqpoll
+ 		 * thread termination.
+ 		 */
+-		if (sq->thread) {
++		if (tsk) {
++			get_task_struct(tsk);
++			rcu_read_unlock();
++			getrusage(tsk, RUSAGE_SELF, &sq_usage);
++			put_task_struct(tsk);
+ 			sq_pid = sq->task_pid;
+ 			sq_cpu = sq->sq_cpu;
+-			getrusage(sq->thread, RUSAGE_SELF, &sq_usage);
+ 			sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000
+ 					 + sq_usage.ru_stime.tv_usec);
+ 			sq_work_time = sq->work_time;
++		} else {
++			rcu_read_unlock();
+ 		}
+ 	}
+ 
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index bd3b3f7a6f6cab..64870f51b67883 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2916,7 +2916,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
+ 			struct task_struct *tsk;
+ 
+ 			io_sq_thread_park(sqd);
+-			tsk = sqd->thread;
++			tsk = sqpoll_task_locked(sqd);
+ 			if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
+ 				io_wq_cancel_cb(tsk->io_uring->io_wq,
+ 						io_cancel_ctx_cb, ctx, true);
+@@ -3153,7 +3153,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
+ 	s64 inflight;
+ 	DEFINE_WAIT(wait);
+ 
+-	WARN_ON_ONCE(sqd && sqd->thread != current);
++	WARN_ON_ONCE(sqd && sqpoll_task_locked(sqd) != current);
+ 
+ 	if (!current->io_uring)
+ 		return;
+diff --git a/io_uring/register.c b/io_uring/register.c
+index eca26d4884d9a9..a325b493ae121c 100644
+--- a/io_uring/register.c
++++ b/io_uring/register.c
+@@ -268,6 +268,8 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+ 	if (ctx->flags & IORING_SETUP_SQPOLL) {
+ 		sqd = ctx->sq_data;
+ 		if (sqd) {
++			struct task_struct *tsk;
++
+ 			/*
+ 			 * Observe the correct sqd->lock -> ctx->uring_lock
+ 			 * ordering. Fine to drop uring_lock here, we hold
+@@ -277,8 +279,9 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+ 			mutex_unlock(&ctx->uring_lock);
+ 			mutex_lock(&sqd->lock);
+ 			mutex_lock(&ctx->uring_lock);
+-			if (sqd->thread)
+-				tctx = sqd->thread->io_uring;
++			tsk = sqpoll_task_locked(sqd);
++			if (tsk)
++				tctx = tsk->io_uring;
+ 		}
+ 	} else {
+ 		tctx = current->io_uring;
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 430922c541681e..9a630689489571 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -30,7 +30,7 @@ enum {
+ void io_sq_thread_unpark(struct io_sq_data *sqd)
+ 	__releases(&sqd->lock)
+ {
+-	WARN_ON_ONCE(sqd->thread == current);
++	WARN_ON_ONCE(sqpoll_task_locked(sqd) == current);
+ 
+ 	/*
+ 	 * Do the dance but not conditional clear_bit() because it'd race with
+@@ -45,24 +45,32 @@ void io_sq_thread_unpark(struct io_sq_data *sqd)
+ void io_sq_thread_park(struct io_sq_data *sqd)
+ 	__acquires(&sqd->lock)
+ {
+-	WARN_ON_ONCE(data_race(sqd->thread) == current);
++	struct task_struct *tsk;
+ 
+ 	atomic_inc(&sqd->park_pending);
+ 	set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+ 	mutex_lock(&sqd->lock);
+-	if (sqd->thread)
+-		wake_up_process(sqd->thread);
++
++	tsk = sqpoll_task_locked(sqd);
++	if (tsk) {
++		WARN_ON_ONCE(tsk == current);
++		wake_up_process(tsk);
++	}
+ }
+ 
+ void io_sq_thread_stop(struct io_sq_data *sqd)
+ {
+-	WARN_ON_ONCE(sqd->thread == current);
++	struct task_struct *tsk;
++
+ 	WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
+ 
+ 	set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
+ 	mutex_lock(&sqd->lock);
+-	if (sqd->thread)
+-		wake_up_process(sqd->thread);
++	tsk = sqpoll_task_locked(sqd);
++	if (tsk) {
++		WARN_ON_ONCE(tsk == current);
++		wake_up_process(tsk);
++	}
+ 	mutex_unlock(&sqd->lock);
+ 	wait_for_completion(&sqd->exited);
+ }
+@@ -277,7 +285,8 @@ static int io_sq_thread(void *data)
+ 	/* offload context creation failed, just exit */
+ 	if (!current->io_uring) {
+ 		mutex_lock(&sqd->lock);
+-		sqd->thread = NULL;
++		rcu_assign_pointer(sqd->thread, NULL);
++		put_task_struct(current);
+ 		mutex_unlock(&sqd->lock);
+ 		goto err_out;
+ 	}
+@@ -386,7 +395,8 @@ static int io_sq_thread(void *data)
+ 		io_sq_tw(&retry_list, UINT_MAX);
+ 
+ 	io_uring_cancel_generic(true, sqd);
+-	sqd->thread = NULL;
++	rcu_assign_pointer(sqd->thread, NULL);
++	put_task_struct(current);
+ 	list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ 		atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
+ 	io_run_task_work();
+@@ -496,7 +506,10 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ 			goto err_sqpoll;
+ 		}
+ 
+-		sqd->thread = tsk;
++		mutex_lock(&sqd->lock);
++		rcu_assign_pointer(sqd->thread, tsk);
++		mutex_unlock(&sqd->lock);
++
+ 		task_to_put = get_task_struct(tsk);
+ 		ret = io_uring_alloc_task_context(tsk, ctx);
+ 		wake_up_new_task(tsk);
+@@ -507,9 +520,6 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ 		ret = -EINVAL;
+ 		goto err;
+ 	}
+-
+-	if (task_to_put)
+-		put_task_struct(task_to_put);
+ 	return 0;
+ err_sqpoll:
+ 	complete(&ctx->sq_data->exited);
+@@ -527,10 +537,13 @@ __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
+ 	int ret = -EINVAL;
+ 
+ 	if (sqd) {
++		struct task_struct *tsk;
++
+ 		io_sq_thread_park(sqd);
+ 		/* Don't set affinity for a dying thread */
+-		if (sqd->thread)
+-			ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
++		tsk = sqpoll_task_locked(sqd);
++		if (tsk)
++			ret = io_wq_cpu_affinity(tsk->io_uring, mask);
+ 		io_sq_thread_unpark(sqd);
+ 	}
+ 
+diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h
+index 4171666b1cf4cc..b83dcdec9765fd 100644
+--- a/io_uring/sqpoll.h
++++ b/io_uring/sqpoll.h
+@@ -8,7 +8,7 @@ struct io_sq_data {
+ 	/* ctx's that are using this sqd */
+ 	struct list_head	ctx_list;
+ 
+-	struct task_struct	*thread;
++	struct task_struct __rcu *thread;
+ 	struct wait_queue_head	wait;
+ 
+ 	unsigned		sq_thread_idle;
+@@ -29,3 +29,9 @@ void io_sq_thread_unpark(struct io_sq_data *sqd);
+ void io_put_sq_data(struct io_sq_data *sqd);
+ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
+ int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
++
++static inline struct task_struct *sqpoll_task_locked(struct io_sq_data *sqd)
++{
++	return rcu_dereference_protected(sqd->thread,
++					 lockdep_is_held(&sqd->lock));
++}
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index a60a6a2ce0d7f4..68a327158989b9 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2303,8 +2303,8 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+ 	return 0;
+ }
+ 
+-bool bpf_prog_map_compatible(struct bpf_map *map,
+-			     const struct bpf_prog *fp)
++static bool __bpf_prog_map_compatible(struct bpf_map *map,
++				      const struct bpf_prog *fp)
+ {
+ 	enum bpf_prog_type prog_type = resolve_prog_type(fp);
+ 	bool ret;
+@@ -2313,14 +2313,6 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ 	if (fp->kprobe_override)
+ 		return false;
+ 
+-	/* XDP programs inserted into maps are not guaranteed to run on
+-	 * a particular netdev (and can run outside driver context entirely
+-	 * in the case of devmap and cpumap). Until device checks
+-	 * are implemented, prohibit adding dev-bound programs to program maps.
+-	 */
+-	if (bpf_prog_is_dev_bound(aux))
+-		return false;
+-
+ 	spin_lock(&map->owner.lock);
+ 	if (!map->owner.type) {
+ 		/* There's no owner yet where we could check for
+@@ -2354,6 +2346,19 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ 	return ret;
+ }
+ 
++bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp)
++{
++	/* XDP programs inserted into maps are not guaranteed to run on
++	 * a particular netdev (and can run outside driver context entirely
++	 * in the case of devmap and cpumap). Until device checks
++	 * are implemented, prohibit adding dev-bound programs to program maps.
++	 */
++	if (bpf_prog_is_dev_bound(fp->aux))
++		return false;
++
++	return __bpf_prog_map_compatible(map, fp);
++}
++
+ static int bpf_check_tail_call(const struct bpf_prog *fp)
+ {
+ 	struct bpf_prog_aux *aux = fp->aux;
+@@ -2366,7 +2371,7 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
+ 		if (!map_type_contains_progs(map))
+ 			continue;
+ 
+-		if (!bpf_prog_map_compatible(map, fp)) {
++		if (!__bpf_prog_map_compatible(map, fp)) {
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+@@ -2414,7 +2419,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ 	/* In case of BPF to BPF calls, verifier did all the prep
+ 	 * work with regards to JITing, etc.
+ 	 */
+-	bool jit_needed = false;
++	bool jit_needed = fp->jit_requested;
+ 
+ 	if (fp->bpf_func)
+ 		goto finalize;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 285a4548450bd2..9ce82904f761d2 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6031,6 +6031,9 @@ static int perf_event_set_output(struct perf_event *event,
+ static int perf_event_set_filter(struct perf_event *event, void __user *arg);
+ static int perf_copy_attr(struct perf_event_attr __user *uattr,
+ 			  struct perf_event_attr *attr);
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++				     struct bpf_prog *prog,
++				     u64 bpf_cookie);
+ 
+ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
+ {
+@@ -6099,7 +6102,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
+ 		if (IS_ERR(prog))
+ 			return PTR_ERR(prog);
+ 
+-		err = perf_event_set_bpf_prog(event, prog, 0);
++		err = __perf_event_set_bpf_prog(event, prog, 0);
+ 		if (err) {
+ 			bpf_prog_put(prog);
+ 			return err;
+@@ -9715,14 +9718,14 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
+ 		hwc->interrupts = 1;
+ 	} else {
+ 		hwc->interrupts++;
+-		if (unlikely(throttle &&
+-			     hwc->interrupts > max_samples_per_tick)) {
+-			__this_cpu_inc(perf_throttled_count);
+-			tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
+-			hwc->interrupts = MAX_INTERRUPTS;
+-			perf_log_throttle(event, 0);
+-			ret = 1;
+-		}
++	}
++
++	if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) {
++		__this_cpu_inc(perf_throttled_count);
++		tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
++		hwc->interrupts = MAX_INTERRUPTS;
++		perf_log_throttle(event, 0);
++		ret = 1;
+ 	}
+ 
+ 	if (event->attr.freq) {
+@@ -10756,8 +10759,9 @@ static inline bool perf_event_is_tracing(struct perf_event *event)
+ 	return false;
+ }
+ 
+-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
+-			    u64 bpf_cookie)
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++				     struct bpf_prog *prog,
++				     u64 bpf_cookie)
+ {
+ 	bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
+ 
+@@ -10795,6 +10799,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
+ 	return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
+ }
+ 
++int perf_event_set_bpf_prog(struct perf_event *event,
++			    struct bpf_prog *prog,
++			    u64 bpf_cookie)
++{
++	struct perf_event_context *ctx;
++	int ret;
++
++	ctx = perf_event_ctx_lock(event);
++	ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie);
++	perf_event_ctx_unlock(event, ctx);
++
++	return ret;
++}
++
+ void perf_event_free_bpf_prog(struct perf_event *event)
+ {
+ 	if (!perf_event_is_tracing(event)) {
+@@ -10814,7 +10832,15 @@ static void perf_event_free_filter(struct perf_event *event)
+ {
+ }
+ 
+-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
++static int __perf_event_set_bpf_prog(struct perf_event *event,
++				     struct bpf_prog *prog,
++				     u64 bpf_cookie)
++{
++	return -ENOENT;
++}
++
++int perf_event_set_bpf_prog(struct perf_event *event,
++			    struct bpf_prog *prog,
+ 			    u64 bpf_cookie)
+ {
+ 	return -ENOENT;
+diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
+index 4e1778071d704e..1c9fe741fe6d53 100644
+--- a/kernel/power/energy_model.c
++++ b/kernel/power/energy_model.c
+@@ -233,6 +233,10 @@ static int em_compute_costs(struct device *dev, struct em_perf_state *table,
+ 	unsigned long prev_cost = ULONG_MAX;
+ 	int i, ret;
+ 
++	/* This is needed only for CPUs and EAS skip other devices */
++	if (!_is_cpu_device(dev))
++		return 0;
++
+ 	/* Compute the cost of each performance state. */
+ 	for (i = nr_states - 1; i >= 0; i--) {
+ 		unsigned long power_res, cost;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index d8bad1eeedd3e5..85008ead2ac91f 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -89,6 +89,11 @@ void hibernate_release(void)
+ 	atomic_inc(&hibernate_atomic);
+ }
+ 
++bool hibernation_in_progress(void)
++{
++	return !atomic_read(&hibernate_atomic);
++}
++
+ bool hibernation_available(void)
+ {
+ 	return nohibernate == 0 &&
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 6254814d481714..0622e7dacf1720 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -613,7 +613,8 @@ bool pm_debug_messages_on __read_mostly;
+ 
+ bool pm_debug_messages_should_print(void)
+ {
+-	return pm_debug_messages_on && pm_suspend_target_state != PM_SUSPEND_ON;
++	return pm_debug_messages_on && (hibernation_in_progress() ||
++		pm_suspend_target_state != PM_SUSPEND_ON);
+ }
+ EXPORT_SYMBOL_GPL(pm_debug_messages_should_print);
+ 
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index de0e6b1077f231..6d1ec7b23e844f 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -71,10 +71,14 @@ extern void enable_restore_image_protection(void);
+ static inline void enable_restore_image_protection(void) {}
+ #endif /* CONFIG_STRICT_KERNEL_RWX */
+ 
++extern bool hibernation_in_progress(void);
++
+ #else /* !CONFIG_HIBERNATION */
+ 
+ static inline void hibernate_reserved_size_init(void) {}
+ static inline void hibernate_image_size_init(void) {}
++
++static inline bool hibernation_in_progress(void) { return false; }
+ #endif /* !CONFIG_HIBERNATION */
+ 
+ #define power_attr(_name) \
+diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
+index 52571dcad768b9..4e941999a53ba6 100644
+--- a/kernel/power/wakelock.c
++++ b/kernel/power/wakelock.c
+@@ -49,6 +49,9 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
+ 			len += sysfs_emit_at(buf, len, "%s ", wl->name);
+ 	}
+ 
++	if (len > 0)
++		--len;
++
+ 	len += sysfs_emit_at(buf, len, "\n");
+ 
+ 	mutex_unlock(&wakelocks_lock);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 4ed86321952171..cefa831c8cb322 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -802,6 +802,10 @@ static int rcu_watching_snap_save(struct rcu_data *rdp)
+ 	return 0;
+ }
+ 
++#ifndef arch_irq_stat_cpu
++#define arch_irq_stat_cpu(cpu) 0
++#endif
++
+ /*
+  * Returns positive if the specified CPU has passed through a quiescent state
+  * by virtue of being in or having passed through an dynticks idle state since
+@@ -937,9 +941,9 @@ static int rcu_watching_snap_recheck(struct rcu_data *rdp)
+ 			rsrp->cputime_irq     = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
+ 			rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
+ 			rsrp->cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
+-			rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
+-			rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
+-			rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
++			rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu);
++			rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu);
++			rsrp->nr_csw = nr_context_switches_cpu(cpu);
+ 			rsrp->jiffies = jiffies;
+ 			rsrp->gp_seq = rdp->gp_seq;
+ 		}
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index a9a811d9d7a372..1bba2225e7448b 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -168,7 +168,7 @@ struct rcu_snap_record {
+ 	u64		cputime_irq;	/* Accumulated cputime of hard irqs */
+ 	u64		cputime_softirq;/* Accumulated cputime of soft irqs */
+ 	u64		cputime_system; /* Accumulated cputime of kernel tasks */
+-	unsigned long	nr_hardirqs;	/* Accumulated number of hard irqs */
++	u64		nr_hardirqs;	/* Accumulated number of hard irqs */
+ 	unsigned int	nr_softirqs;	/* Accumulated number of soft irqs */
+ 	unsigned long long nr_csw;	/* Accumulated number of task switches */
+ 	unsigned long   jiffies;	/* Track jiffies value */
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index 4432db6d0b99b3..4d524a2212a8d6 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -457,8 +457,8 @@ static void print_cpu_stat_info(int cpu)
+ 	rsr.cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
+ 
+ 	pr_err("\t         hardirqs   softirqs   csw/system\n");
+-	pr_err("\t number: %8ld %10d %12lld\n",
+-		kstat_cpu_irqs_sum(cpu) - rsrp->nr_hardirqs,
++	pr_err("\t number: %8lld %10d %12lld\n",
++		kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu) - rsrp->nr_hardirqs,
+ 		kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs,
+ 		nr_context_switches_cpu(cpu) - rsrp->nr_csw);
+ 	pr_err("\tcputime: %8lld %10lld %12lld   ==> %d(ms)\n",
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index e9bb1b4c58421f..51f36de5990a3b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2229,6 +2229,12 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
+ 		 * just go back and repeat.
+ 		 */
+ 		rq = task_rq_lock(p, &rf);
++		/*
++		 * If task is sched_delayed, force dequeue it, to avoid always
++		 * hitting the tick timeout in the queued case
++		 */
++		if (p->se.sched_delayed)
++			dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+ 		trace_sched_wait_task(p);
+ 		running = task_on_cpu(rq, p);
+ 		queued = task_on_rq_queued(p);
+@@ -6517,12 +6523,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+  * Otherwise marks the task's __state as RUNNING
+  */
+ static bool try_to_block_task(struct rq *rq, struct task_struct *p,
+-			      unsigned long task_state)
++			      unsigned long *task_state_p)
+ {
++	unsigned long task_state = *task_state_p;
+ 	int flags = DEQUEUE_NOCLOCK;
+ 
+ 	if (signal_pending_state(task_state, p)) {
+ 		WRITE_ONCE(p->__state, TASK_RUNNING);
++		*task_state_p = TASK_RUNNING;
+ 		return false;
+ 	}
+ 
+@@ -6656,7 +6664,7 @@ static void __sched notrace __schedule(int sched_mode)
+ 			goto picked;
+ 		}
+ 	} else if (!preempt && prev_state) {
+-		try_to_block_task(rq, prev, prev_state);
++		try_to_block_task(rq, prev, &prev_state);
+ 		switch_count = &prev->nvcsw;
+ 	}
+ 
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 6bcee470405926..d44641108ba81f 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -1400,6 +1400,15 @@ void run_posix_cpu_timers(void)
+ 
+ 	lockdep_assert_irqs_disabled();
+ 
++	/*
++	 * Ensure that release_task(tsk) can't happen while
++	 * handle_posix_cpu_timers() is running. Otherwise, a concurrent
++	 * posix_cpu_timer_del() may fail to lock_task_sighand(tsk) and
++	 * miss timer->it.cpu.firing != 0.
++	 */
++	if (tsk->exit_state)
++		return;
++
+ 	/*
+ 	 * If the actual expiry is deferred to task work context and the
+ 	 * work is already scheduled there is no point to do anything here.
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index e5c063fc8ef97a..3ec7df7dbeec4e 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -1828,7 +1828,7 @@ static struct pt_regs *get_bpf_raw_tp_regs(void)
+ 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
+ 
+-	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
++	if (nest_level > ARRAY_SIZE(tp_regs->regs)) {
+ 		this_cpu_dec(bpf_raw_tp_nest_level);
+ 		return ERR_PTR(-EBUSY);
+ 	}
+@@ -2932,6 +2932,9 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ 	if (sizeof(u64) != sizeof(void *))
+ 		return -EOPNOTSUPP;
+ 
++	if (attr->link_create.flags)
++		return -EINVAL;
++
+ 	if (!is_kprobe_multi(prog))
+ 		return -EINVAL;
+ 
+@@ -3346,7 +3349,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ 	}
+ 
+ 	if (pid) {
++		rcu_read_lock();
+ 		task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
++		rcu_read_unlock();
+ 		if (!task) {
+ 			err = -ESRCH;
+ 			goto error_path_put;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index baa5547e977a02..6ab740d3185bc3 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2796,6 +2796,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 	if (nr_pages < 2)
+ 		nr_pages = 2;
+ 
++	/*
++	 * Keep CPUs from coming online while resizing to synchronize
++	 * with new per CPU buffers being created.
++	 */
++	guard(cpus_read_lock)();
++
+ 	/* prevent another thread from changing buffer sizes */
+ 	mutex_lock(&buffer->mutex);
+ 	atomic_inc(&buffer->resizing);
+@@ -2840,7 +2846,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 			cond_resched();
+ 		}
+ 
+-		cpus_read_lock();
+ 		/*
+ 		 * Fire off all the required work handlers
+ 		 * We can't schedule on offline CPUs, but it's not necessary
+@@ -2880,7 +2885,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 			cpu_buffer->nr_pages_to_update = 0;
+ 		}
+ 
+-		cpus_read_unlock();
+ 	} else {
+ 		cpu_buffer = buffer->buffers[cpu_id];
+ 
+@@ -2908,8 +2912,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 			goto out_err;
+ 		}
+ 
+-		cpus_read_lock();
+-
+ 		/* Can't run something on an offline CPU. */
+ 		if (!cpu_online(cpu_id))
+ 			rb_update_pages(cpu_buffer);
+@@ -2928,7 +2930,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 		}
+ 
+ 		cpu_buffer->nr_pages_to_update = 0;
+-		cpus_read_unlock();
+ 	}
+ 
+  out:
+@@ -6754,7 +6755,7 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
+ 	old_size = buffer->subbuf_size;
+ 
+ 	/* prevent another thread from changing buffer sizes */
+-	mutex_lock(&buffer->mutex);
++	guard(mutex)(&buffer->mutex);
+ 	atomic_inc(&buffer->record_disabled);
+ 
+ 	/* Make sure all commits have finished */
+@@ -6859,7 +6860,6 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
+ 	}
+ 
+ 	atomic_dec(&buffer->record_disabled);
+-	mutex_unlock(&buffer->mutex);
+ 
+ 	return 0;
+ 
+@@ -6868,7 +6868,6 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
+ 	buffer->subbuf_size = old_size;
+ 
+ 	atomic_dec(&buffer->record_disabled);
+-	mutex_unlock(&buffer->mutex);
+ 
+ 	for_each_buffer_cpu(buffer, cpu) {
+ 		cpu_buffer = buffer->buffers[cpu];
+@@ -7274,8 +7273,8 @@ int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
+ 	/* Check if any events were dropped */
+ 	missed_events = cpu_buffer->lost_events;
+ 
+-	if (cpu_buffer->reader_page != cpu_buffer->commit_page) {
+-		if (missed_events) {
++	if (missed_events) {
++		if (cpu_buffer->reader_page != cpu_buffer->commit_page) {
+ 			struct buffer_data_page *bpage = reader->page;
+ 			unsigned int commit;
+ 			/*
+@@ -7296,13 +7295,23 @@ int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
+ 				local_add(RB_MISSED_STORED, &bpage->commit);
+ 			}
+ 			local_add(RB_MISSED_EVENTS, &bpage->commit);
++		} else if (!WARN_ONCE(cpu_buffer->reader_page == cpu_buffer->tail_page,
++				      "Reader on commit with %ld missed events",
++				      missed_events)) {
++			/*
++			 * There shouldn't be any missed events if the tail_page
++			 * is on the reader page. But if the tail page is not on the
++			 * reader page and the commit_page is, that would mean that
++			 * there's a commit_overrun (an interrupt preempted an
++			 * addition of an event and then filled the buffer
++			 * with new events). In this case it's not an
++			 * error, but it should still be reported.
++			 *
++			 * TODO: Add missed events to the page for user space to know.
++			 */
++			pr_info("Ring buffer [%d] commit overrun lost %ld events at timestamp:%lld\n",
++				cpu, missed_events, cpu_buffer->reader_page->page->time_stamp);
+ 		}
+-	} else {
+-		/*
+-		 * There really shouldn't be any missed events if the commit
+-		 * is on the reader page.
+-		 */
+-		WARN_ON_ONCE(missed_events);
+ 	}
+ 
+ 	cpu_buffer->lost_events = 0;
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 82da3ac140242c..57e1af1d3e6d45 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1731,6 +1731,9 @@ extern int event_enable_register_trigger(char *glob,
+ extern void event_enable_unregister_trigger(char *glob,
+ 					    struct event_trigger_data *test,
+ 					    struct trace_event_file *file);
++extern struct event_trigger_data *
++trigger_data_alloc(struct event_command *cmd_ops, char *cmd, char *param,
++		   void *private_data);
+ extern void trigger_data_free(struct event_trigger_data *data);
+ extern int event_trigger_init(struct event_trigger_data *data);
+ extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
+@@ -1757,11 +1760,6 @@ extern bool event_trigger_check_remove(const char *glob);
+ extern bool event_trigger_empty_param(const char *param);
+ extern int event_trigger_separate_filter(char *param_and_filter, char **param,
+ 					 char **filter, bool param_required);
+-extern struct event_trigger_data *
+-event_trigger_alloc(struct event_command *cmd_ops,
+-		    char *cmd,
+-		    char *param,
+-		    void *private_data);
+ extern int event_trigger_parse_num(char *trigger,
+ 				   struct event_trigger_data *trigger_data);
+ extern int event_trigger_set_filter(struct event_command *cmd_ops,
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 4ebafc655223a8..3379e14d38e9b4 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -5249,17 +5249,94 @@ hist_trigger_actions(struct hist_trigger_data *hist_data,
+ 	}
+ }
+ 
++/*
++ * The hist_pad structure is used to save information to create
++ * a histogram from the histogram trigger. It's too big to store
++ * on the stack, so when the histogram trigger is initialized
++ * a percpu array of 4 hist_pad structures is allocated.
++ * This will cover every context from normal, softirq, irq and NMI
++ * in the very unlikely event that a tigger happens at each of
++ * these contexts and interrupts a currently active trigger.
++ */
++struct hist_pad {
++	unsigned long		entries[HIST_STACKTRACE_DEPTH];
++	u64			var_ref_vals[TRACING_MAP_VARS_MAX];
++	char			compound_key[HIST_KEY_SIZE_MAX];
++};
++
++static struct hist_pad __percpu *hist_pads;
++static DEFINE_PER_CPU(int, hist_pad_cnt);
++static refcount_t hist_pad_ref;
++
++/* One hist_pad for every context (normal, softirq, irq, NMI) */
++#define MAX_HIST_CNT 4
++
++static int alloc_hist_pad(void)
++{
++	lockdep_assert_held(&event_mutex);
++
++	if (refcount_read(&hist_pad_ref)) {
++		refcount_inc(&hist_pad_ref);
++		return 0;
++	}
++
++	hist_pads = __alloc_percpu(sizeof(struct hist_pad) * MAX_HIST_CNT,
++				   __alignof__(struct hist_pad));
++	if (!hist_pads)
++		return -ENOMEM;
++
++	refcount_set(&hist_pad_ref, 1);
++	return 0;
++}
++
++static void free_hist_pad(void)
++{
++	lockdep_assert_held(&event_mutex);
++
++	if (!refcount_dec_and_test(&hist_pad_ref))
++		return;
++
++	free_percpu(hist_pads);
++	hist_pads = NULL;
++}
++
++static struct hist_pad *get_hist_pad(void)
++{
++	struct hist_pad *hist_pad;
++	int cnt;
++
++	if (WARN_ON_ONCE(!hist_pads))
++		return NULL;
++
++	preempt_disable();
++
++	hist_pad = per_cpu_ptr(hist_pads, smp_processor_id());
++
++	if (this_cpu_read(hist_pad_cnt) == MAX_HIST_CNT) {
++		preempt_enable();
++		return NULL;
++	}
++
++	cnt = this_cpu_inc_return(hist_pad_cnt) - 1;
++
++	return &hist_pad[cnt];
++}
++
++static void put_hist_pad(void)
++{
++	this_cpu_dec(hist_pad_cnt);
++	preempt_enable();
++}
++
+ static void event_hist_trigger(struct event_trigger_data *data,
+ 			       struct trace_buffer *buffer, void *rec,
+ 			       struct ring_buffer_event *rbe)
+ {
+ 	struct hist_trigger_data *hist_data = data->private_data;
+ 	bool use_compound_key = (hist_data->n_keys > 1);
+-	unsigned long entries[HIST_STACKTRACE_DEPTH];
+-	u64 var_ref_vals[TRACING_MAP_VARS_MAX];
+-	char compound_key[HIST_KEY_SIZE_MAX];
+ 	struct tracing_map_elt *elt = NULL;
+ 	struct hist_field *key_field;
++	struct hist_pad *hist_pad;
+ 	u64 field_contents;
+ 	void *key = NULL;
+ 	unsigned int i;
+@@ -5267,12 +5344,18 @@ static void event_hist_trigger(struct event_trigger_data *data,
+ 	if (unlikely(!rbe))
+ 		return;
+ 
+-	memset(compound_key, 0, hist_data->key_size);
++	hist_pad = get_hist_pad();
++	if (!hist_pad)
++		return;
++
++	memset(hist_pad->compound_key, 0, hist_data->key_size);
+ 
+ 	for_each_hist_key_field(i, hist_data) {
+ 		key_field = hist_data->fields[i];
+ 
+ 		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
++			unsigned long *entries = hist_pad->entries;
++
+ 			memset(entries, 0, HIST_STACKTRACE_SIZE);
+ 			if (key_field->field) {
+ 				unsigned long *stack, n_entries;
+@@ -5296,26 +5379,31 @@ static void event_hist_trigger(struct event_trigger_data *data,
+ 		}
+ 
+ 		if (use_compound_key)
+-			add_to_key(compound_key, key, key_field, rec);
++			add_to_key(hist_pad->compound_key, key, key_field, rec);
+ 	}
+ 
+ 	if (use_compound_key)
+-		key = compound_key;
++		key = hist_pad->compound_key;
+ 
+ 	if (hist_data->n_var_refs &&
+-	    !resolve_var_refs(hist_data, key, var_ref_vals, false))
+-		return;
++	    !resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, false))
++		goto out;
+ 
+ 	elt = tracing_map_insert(hist_data->map, key);
+ 	if (!elt)
+-		return;
++		goto out;
+ 
+-	hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, var_ref_vals);
++	hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, hist_pad->var_ref_vals);
+ 
+-	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
+-		hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals);
++	if (resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, true)) {
++		hist_trigger_actions(hist_data, elt, buffer, rec, rbe,
++				     key, hist_pad->var_ref_vals);
++	}
+ 
+ 	hist_poll_wakeup();
++
++ out:
++	put_hist_pad();
+ }
+ 
+ static void hist_trigger_stacktrace_print(struct seq_file *m,
+@@ -6160,6 +6248,9 @@ static int event_hist_trigger_init(struct event_trigger_data *data)
+ {
+ 	struct hist_trigger_data *hist_data = data->private_data;
+ 
++	if (alloc_hist_pad() < 0)
++		return -ENOMEM;
++
+ 	if (!data->ref && hist_data->attrs->name)
+ 		save_named_trigger(hist_data->attrs->name, data);
+ 
+@@ -6204,6 +6295,7 @@ static void event_hist_trigger_free(struct event_trigger_data *data)
+ 
+ 		destroy_hist_data(hist_data);
+ 	}
++	free_hist_pad();
+ }
+ 
+ static struct event_trigger_ops event_hist_trigger_ops = {
+@@ -6219,9 +6311,7 @@ static int event_hist_trigger_named_init(struct event_trigger_data *data)
+ 
+ 	save_named_trigger(data->named_data->name, data);
+ 
+-	event_hist_trigger_init(data->named_data);
+-
+-	return 0;
++	return event_hist_trigger_init(data->named_data);
+ }
+ 
+ static void event_hist_trigger_named_free(struct event_trigger_data *data)
+@@ -6708,7 +6798,7 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
+ 		return PTR_ERR(hist_data);
+ 	}
+ 
+-	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, hist_data);
++	trigger_data = trigger_data_alloc(cmd_ops, cmd, param, hist_data);
+ 	if (!trigger_data) {
+ 		ret = -ENOMEM;
+ 		goto out_free;
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 27e21488d57417..d5dbda9b0e4b05 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -825,7 +825,7 @@ int event_trigger_separate_filter(char *param_and_filter, char **param,
+ }
+ 
+ /**
+- * event_trigger_alloc - allocate and init event_trigger_data for a trigger
++ * trigger_data_alloc - allocate and init event_trigger_data for a trigger
+  * @cmd_ops: The event_command operations for the trigger
+  * @cmd: The cmd string
+  * @param: The param string
+@@ -836,14 +836,14 @@ int event_trigger_separate_filter(char *param_and_filter, char **param,
+  * trigger_ops to assign to the event_trigger_data.  @private_data can
+  * also be passed in and associated with the event_trigger_data.
+  *
+- * Use event_trigger_free() to free an event_trigger_data object.
++ * Use trigger_data_free() to free an event_trigger_data object.
+  *
+  * Return: The trigger_data object success, NULL otherwise
+  */
+-struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
+-					       char *cmd,
+-					       char *param,
+-					       void *private_data)
++struct event_trigger_data *trigger_data_alloc(struct event_command *cmd_ops,
++					      char *cmd,
++					      char *param,
++					      void *private_data)
+ {
+ 	struct event_trigger_data *trigger_data;
+ 	struct event_trigger_ops *trigger_ops;
+@@ -1010,13 +1010,13 @@ event_trigger_parse(struct event_command *cmd_ops,
+ 		return ret;
+ 
+ 	ret = -ENOMEM;
+-	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
++	trigger_data = trigger_data_alloc(cmd_ops, cmd, param, file);
+ 	if (!trigger_data)
+ 		goto out;
+ 
+ 	if (remove) {
+ 		event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
+-		kfree(trigger_data);
++		trigger_data_free(trigger_data);
+ 		ret = 0;
+ 		goto out;
+ 	}
+@@ -1043,7 +1043,7 @@ event_trigger_parse(struct event_command *cmd_ops,
+ 
+  out_free:
+ 	event_trigger_reset_filter(cmd_ops, trigger_data);
+-	kfree(trigger_data);
++	trigger_data_free(trigger_data);
+ 	goto out;
+ }
+ 
+@@ -1814,7 +1814,7 @@ int event_enable_trigger_parse(struct event_command *cmd_ops,
+ 	enable_data->enable = enable;
+ 	enable_data->file = event_enable_file;
+ 
+-	trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
++	trigger_data = trigger_data_alloc(cmd_ops, cmd, param, enable_data);
+ 	if (!trigger_data) {
+ 		kfree(enable_data);
+ 		goto out;
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index bdb37d572e97ca..8ede6be556a960 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -820,7 +820,7 @@ static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
+ 	size_t size = i->count;
+ 
+ 	do {
+-		size_t len = bvec->bv_len;
++		size_t len = bvec->bv_len - skip;
+ 
+ 		if (len > size)
+ 			len = size;
+diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c
+index 92b2cccd5e7633..484fd85251b415 100644
+--- a/lib/kunit/static_stub.c
++++ b/lib/kunit/static_stub.c
+@@ -96,7 +96,7 @@ void __kunit_activate_static_stub(struct kunit *test,
+ 
+ 	/* If the replacement address is NULL, deactivate the stub. */
+ 	if (!replacement_addr) {
+-		kunit_deactivate_static_stub(test, replacement_addr);
++		kunit_deactivate_static_stub(test, real_fn_addr);
+ 		return;
+ 	}
+ 
+diff --git a/lib/usercopy_kunit.c b/lib/usercopy_kunit.c
+index 77fa00a13df775..80f8abe10968c1 100644
+--- a/lib/usercopy_kunit.c
++++ b/lib/usercopy_kunit.c
+@@ -27,6 +27,7 @@
+ 			    !defined(CONFIG_MICROBLAZE) &&	\
+ 			    !defined(CONFIG_NIOS2) &&		\
+ 			    !defined(CONFIG_PPC32) &&		\
++			    !defined(CONFIG_SPARC32) &&		\
+ 			    !defined(CONFIG_SUPERH))
+ # define TEST_U64
+ #endif
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 882903f42300b8..752576749db9d0 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -872,9 +872,7 @@ static inline bool page_expected_state(struct page *page,
+ #ifdef CONFIG_MEMCG
+ 			page->memcg_data |
+ #endif
+-#ifdef CONFIG_PAGE_POOL
+-			((page->pp_magic & ~0x3UL) == PP_SIGNATURE) |
+-#endif
++			page_pool_page_is_pp(page) |
+ 			(page->flags & check_flags)))
+ 		return false;
+ 
+@@ -901,10 +899,8 @@ static const char *page_bad_reason(struct page *page, unsigned long flags)
+ 	if (unlikely(page->memcg_data))
+ 		bad_reason = "page still charged to cgroup";
+ #endif
+-#ifdef CONFIG_PAGE_POOL
+-	if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE))
++	if (unlikely(page_pool_page_is_pp(page)))
+ 		bad_reason = "page_pool leak";
+-#endif
+ 	return bad_reason;
+ }
+ 
+diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
+index 1bc51e2b05a347..3f72111ba651f9 100644
+--- a/net/bluetooth/eir.c
++++ b/net/bluetooth/eir.c
+@@ -242,7 +242,7 @@ u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
+ 	return ad_len;
+ }
+ 
+-u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
++u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr, u8 size)
+ {
+ 	struct adv_info *adv = NULL;
+ 	u8 ad_len = 0, flags = 0;
+@@ -286,7 +286,7 @@ u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
+ 		/* If flags would still be empty, then there is no need to
+ 		 * include the "Flags" AD field".
+ 		 */
+-		if (flags) {
++		if (flags && (ad_len + eir_precalc_len(1) <= size)) {
+ 			ptr[0] = 0x02;
+ 			ptr[1] = EIR_FLAGS;
+ 			ptr[2] = flags;
+@@ -316,7 +316,8 @@ u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
+ 		}
+ 
+ 		/* Provide Tx Power only if we can provide a valid value for it */
+-		if (adv_tx_power != HCI_TX_POWER_INVALID) {
++		if (adv_tx_power != HCI_TX_POWER_INVALID &&
++		    (ad_len + eir_precalc_len(1) <= size)) {
+ 			ptr[0] = 0x02;
+ 			ptr[1] = EIR_TX_POWER;
+ 			ptr[2] = (u8)adv_tx_power;
+@@ -366,17 +367,19 @@ u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr)
+ 
+ void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len)
+ {
+-	while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, len))) {
++	size_t dlen;
++
++	while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, &dlen))) {
+ 		u16 value = get_unaligned_le16(eir);
+ 
+ 		if (uuid == value) {
+ 			if (len)
+-				*len -= 2;
++				*len = dlen - 2;
+ 			return &eir[2];
+ 		}
+ 
+-		eir += *len;
+-		eir_len -= *len;
++		eir += dlen;
++		eir_len -= dlen;
+ 	}
+ 
+ 	return NULL;
+diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h
+index 5c89a05e8b2905..9372db83f912fa 100644
+--- a/net/bluetooth/eir.h
++++ b/net/bluetooth/eir.h
+@@ -9,7 +9,7 @@
+ 
+ void eir_create(struct hci_dev *hdev, u8 *data);
+ 
+-u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr);
++u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr, u8 size);
+ u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr);
+ u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr);
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index ae66fa0a5fb584..c6c1232db4e28f 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -2067,6 +2067,8 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ {
+ 	struct hci_conn *conn;
+ 
++	bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid);
++
+ 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
+ 	if (IS_ERR(conn))
+ 		return conn;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 72439764186ed2..0d3816c807588c 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1877,10 +1877,8 @@ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
+ 	if (monitor->handle)
+ 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
+ 
+-	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
++	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
+ 		hdev->adv_monitors_cnt--;
+-		mgmt_adv_monitor_removed(hdev, monitor->handle);
+-	}
+ 
+ 	kfree(monitor);
+ }
+@@ -2507,6 +2505,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
+ 
+ 	mutex_init(&hdev->lock);
+ 	mutex_init(&hdev->req_lock);
++	mutex_init(&hdev->mgmt_pending_lock);
+ 
+ 	ida_init(&hdev->unset_handle_ida);
+ 
+@@ -3416,23 +3415,18 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
+ 
+ 	bt_dev_err(hdev, "link tx timeout");
+ 
+-	rcu_read_lock();
++	hci_dev_lock(hdev);
+ 
+ 	/* Kill stalled connections */
+-	list_for_each_entry_rcu(c, &h->list, list) {
++	list_for_each_entry(c, &h->list, list) {
+ 		if (c->type == type && c->sent) {
+ 			bt_dev_err(hdev, "killing stalled connection %pMR",
+ 				   &c->dst);
+-			/* hci_disconnect might sleep, so, we have to release
+-			 * the RCU read lock before calling it.
+-			 */
+-			rcu_read_unlock();
+ 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
+-			rcu_read_lock();
+ 		}
+ 	}
+ 
+-	rcu_read_unlock();
++	hci_dev_unlock(hdev);
+ }
+ 
+ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+@@ -4071,10 +4065,13 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+ 		return;
+ 	}
+ 
+-	err = hci_send_frame(hdev, skb);
+-	if (err < 0) {
+-		hci_cmd_sync_cancel_sync(hdev, -err);
+-		return;
++	if (hci_skb_opcode(skb) != HCI_OP_NOP) {
++		err = hci_send_frame(hdev, skb);
++		if (err < 0) {
++			hci_cmd_sync_cancel_sync(hdev, -err);
++			return;
++		}
++		atomic_dec(&hdev->cmd_cnt);
+ 	}
+ 
+ 	if (hdev->req_status == HCI_REQ_PEND &&
+@@ -4082,8 +4079,6 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+ 		kfree_skb(hdev->req_skb);
+ 		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
+ 	}
+-
+-	atomic_dec(&hdev->cmd_cnt);
+ }
+ 
+ static void hci_cmd_work(struct work_struct *work)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 88946334035193..5c4c3d04d8b934 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6333,6 +6333,17 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
+ 			info->secondary_phy &= 0x1f;
+ 		}
+ 
++		/* Check if PA Sync is pending and if the hci_conn SID has not
++		 * been set update it.
++		 */
++		if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
++			struct hci_conn *conn;
++
++			conn = hci_conn_hash_lookup_create_pa_sync(hdev);
++			if (conn && conn->sid == HCI_SID_INVALID)
++				conn->sid = info->sid;
++		}
++
+ 		if (legacy_evt_type != LE_ADV_INVALID) {
+ 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
+ 					   info->bdaddr_type, NULL, 0,
+@@ -7136,7 +7147,8 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
+ 
+ 	/* Only match event if command OGF is for LE */
+ 	if (hdev->req_skb &&
+-	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
++	   (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 ||
++	    hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) &&
+ 	    hci_skb_event(hdev->req_skb) == ev->subevent) {
+ 		*opcode = hci_skb_opcode(hdev->req_skb);
+ 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
+@@ -7492,8 +7504,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ 		goto done;
+ 	}
+ 
++	hci_dev_lock(hdev);
+ 	kfree_skb(hdev->recv_event);
+ 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
++	hci_dev_unlock(hdev);
+ 
+ 	event = hdr->evt;
+ 	if (!event) {
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 6597936fbd51b9..a00316d79dbf56 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -1559,7 +1559,8 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
+ static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
+ {
+ 	u8 bid[3];
+-	u8 ad[4 + 3];
++	u8 ad[HCI_MAX_EXT_AD_LENGTH];
++	u8 len;
+ 
+ 	/* Skip if NULL adv as instance 0x00 is used for general purpose
+ 	 * advertising so it cannot used for the likes of Broadcast Announcement
+@@ -1585,8 +1586,10 @@ static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
+ 
+ 	/* Generate Broadcast ID */
+ 	get_random_bytes(bid, sizeof(bid));
+-	eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
+-	hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL);
++	len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
++	memcpy(ad + len, adv->adv_data, adv->adv_data_len);
++	hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len,
++				  ad, 0, NULL);
+ 
+ 	return hci_update_adv_data_sync(hdev, adv->instance);
+ }
+@@ -1603,8 +1606,15 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
+ 
+ 	if (instance) {
+ 		adv = hci_find_adv_instance(hdev, instance);
+-		/* Create an instance if that could not be found */
+-		if (!adv) {
++		if (adv) {
++			/* Turn it into periodic advertising */
++			adv->periodic = true;
++			adv->per_adv_data_len = data_len;
++			if (data)
++				memcpy(adv->per_adv_data, data, data_len);
++			adv->flags = flags;
++		} else if (!adv) {
++			/* Create an instance if that could not be found */
+ 			adv = hci_add_per_instance(hdev, instance, flags,
+ 						   data_len, data,
+ 						   sync_interval,
+@@ -1836,7 +1846,8 @@ static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
+ 			return 0;
+ 	}
+ 
+-	len = eir_create_adv_data(hdev, instance, pdu->data);
++	len = eir_create_adv_data(hdev, instance, pdu->data,
++				  HCI_MAX_EXT_AD_LENGTH);
+ 
+ 	pdu->length = len;
+ 	pdu->handle = adv ? adv->handle : instance;
+@@ -1867,7 +1878,7 @@ static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
+ 
+ 	memset(&cp, 0, sizeof(cp));
+ 
+-	len = eir_create_adv_data(hdev, instance, cp.data);
++	len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
+ 
+ 	/* There's nothing to do if the data hasn't changed */
+ 	if (hdev->adv_data_len == len &&
+@@ -6890,20 +6901,37 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ 
+ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+ {
++	struct hci_conn *conn = data;
++	struct hci_conn *pa_sync;
++
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+-	if (!err)
++	if (err == -ECANCELED)
+ 		return;
+ 
++	hci_dev_lock(hdev);
++
+ 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+ 
+-	if (err == -ECANCELED)
+-		return;
++	if (!hci_conn_valid(hdev, conn))
++		clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+ 
+-	hci_dev_lock(hdev);
++	if (!err)
++		goto unlock;
+ 
+-	hci_update_passive_scan_sync(hdev);
++	/* Add connection to indicate PA sync error */
++	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
++				     HCI_ROLE_SLAVE);
+ 
++	if (IS_ERR(pa_sync))
++		goto unlock;
++
++	set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
++
++	/* Notify iso layer */
++	hci_connect_cfm(pa_sync, bt_status(err));
++
++unlock:
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -6917,9 +6945,23 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ 	if (!hci_conn_valid(hdev, conn))
+ 		return -ECANCELED;
+ 
++	if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID)
++		return -EINVAL;
++
+ 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
+ 		return -EBUSY;
+ 
++	/* Stop scanning if SID has not been set and active scanning is enabled
++	 * so we use passive scanning which will be scanning using the allow
++	 * list programmed to contain only the connection address.
++	 */
++	if (conn->sid == HCI_SID_INVALID &&
++	    hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
++		hci_scan_disable_sync(hdev);
++		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
++		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
++	}
++
+ 	/* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can
+ 	 * program the address in the allow list so PA advertisements can be
+ 	 * received.
+@@ -6928,6 +6970,14 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ 
+ 	hci_update_passive_scan_sync(hdev);
+ 
++	/* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update
++	 * it.
++	 */
++	if (conn->sid == HCI_SID_INVALID)
++		__hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
++					 HCI_EV_LE_EXT_ADV_REPORT,
++					 conn->conn_timeout, NULL);
++
+ 	memset(&cp, 0, sizeof(cp));
+ 	cp.options = qos->bcast.options;
+ 	cp.sid = conn->sid;
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 72bf9b1db22471..a08a0f3d5003cc 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -938,7 +938,7 @@ static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr,
+ 
+ 	iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type;
+ 
+-	if (sa->iso_bc->bc_sid > 0x0f)
++	if (sa->iso_bc->bc_sid > 0x0f && sa->iso_bc->bc_sid != HCI_SID_INVALID)
+ 		return -EINVAL;
+ 
+ 	iso_pi(sk)->bc_sid = sa->iso_bc->bc_sid;
+@@ -1963,6 +1963,9 @@ static bool iso_match_sid(struct sock *sk, void *data)
+ {
+ 	struct hci_ev_le_pa_sync_established *ev = data;
+ 
++	if (iso_pi(sk)->bc_sid == HCI_SID_INVALID)
++		return true;
++
+ 	return ev->sid == iso_pi(sk)->bc_sid;
+ }
+ 
+@@ -2009,8 +2012,10 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 	if (ev1) {
+ 		sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
+ 				  iso_match_sid, ev1);
+-		if (sk && !ev1->status)
++		if (sk && !ev1->status) {
+ 			iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle);
++			iso_pi(sk)->bc_sid = ev1->sid;
++		}
+ 
+ 		goto done;
+ 	}
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 66fa5d6fea6cad..a40534bf9084d0 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4835,7 +4835,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
+ 
+ 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
+ 				     SMP_ALLOW_STK)) {
+-		result = L2CAP_CR_LE_AUTHENTICATION;
++		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
++			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
+ 		chan = NULL;
+ 		goto response_unlock;
+ 	}
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index d4700f940e8a12..7664e7ba372cee 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -1440,22 +1440,17 @@ static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
+ 
+ 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
+ 
+-	list_del(&cmd->list);
+-
+ 	if (match->sk == NULL) {
+ 		match->sk = cmd->sk;
+ 		sock_hold(match->sk);
+ 	}
+-
+-	mgmt_pending_free(cmd);
+ }
+ 
+ static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
+ {
+ 	u8 *status = data;
+ 
+-	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+-	mgmt_pending_remove(cmd);
++	mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
+ }
+ 
+ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
+@@ -1469,8 +1464,6 @@ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
+ 
+ 	if (cmd->cmd_complete) {
+ 		cmd->cmd_complete(cmd, match->mgmt_status);
+-		mgmt_pending_remove(cmd);
+-
+ 		return;
+ 	}
+ 
+@@ -1479,13 +1472,13 @@ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
+ 
+ static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
+ {
+-	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
++	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
+ 				 cmd->param, cmd->param_len);
+ }
+ 
+ static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
+ {
+-	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
++	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
+ 				 cmd->param, sizeof(struct mgmt_addr_info));
+ }
+ 
+@@ -1525,7 +1518,7 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
+ 
+ 	if (err) {
+ 		u8 mgmt_err = mgmt_status(err);
+-		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
++		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
+ 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+ 		goto done;
+ 	}
+@@ -1700,7 +1693,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
+ 
+ 	if (err) {
+ 		u8 mgmt_err = mgmt_status(err);
+-		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
++		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
+ 		goto done;
+ 	}
+ 
+@@ -1936,8 +1929,8 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+ 			new_settings(hdev, NULL);
+ 		}
+ 
+-		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
+-				     &mgmt_err);
++		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
++				     cmd_status_rsp, &mgmt_err);
+ 		return;
+ 	}
+ 
+@@ -1947,7 +1940,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+ 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
+ 	}
+ 
+-	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
++	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
+ 
+ 	if (changed)
+ 		new_settings(hdev, match.sk);
+@@ -2067,12 +2060,12 @@ static void set_le_complete(struct hci_dev *hdev, void *data, int err)
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+ 	if (status) {
+-		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+-							&status);
++		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
++				     &status);
+ 		return;
+ 	}
+ 
+-	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
++	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
+ 
+ 	new_settings(hdev, match.sk);
+ 
+@@ -2131,7 +2124,7 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
+ 	struct sock *sk = cmd->sk;
+ 
+ 	if (status) {
+-		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
++		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
+ 				     cmd_status_rsp, &status);
+ 		return;
+ 	}
+@@ -2572,7 +2565,7 @@ static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
+ 
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+-	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
++	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 			  mgmt_status(err), hdev->dev_class, 3);
+ 
+ 	mgmt_pending_free(cmd);
+@@ -3360,7 +3353,7 @@ static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
+ 	bacpy(&rp.addr.bdaddr, &conn->dst);
+ 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
+ 
+-	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
++	err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
+ 				status, &rp, sizeof(rp));
+ 
+ 	/* So we don't get further callbacks for this connection */
+@@ -5172,24 +5165,14 @@ static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
+ 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
+ }
+ 
+-void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
++static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
++				     __le16 handle)
+ {
+ 	struct mgmt_ev_adv_monitor_removed ev;
+-	struct mgmt_pending_cmd *cmd;
+-	struct sock *sk_skip = NULL;
+-	struct mgmt_cp_remove_adv_monitor *cp;
+-
+-	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
+-	if (cmd) {
+-		cp = cmd->param;
+-
+-		if (cp->monitor_handle)
+-			sk_skip = cmd->sk;
+-	}
+ 
+-	ev.monitor_handle = cpu_to_le16(handle);
++	ev.monitor_handle = handle;
+ 
+-	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
++	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
+ }
+ 
+ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
+@@ -5260,7 +5243,7 @@ static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
+ 		hci_update_passive_scan(hdev);
+ 	}
+ 
+-	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
++	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 			  mgmt_status(status), &rp, sizeof(rp));
+ 	mgmt_pending_remove(cmd);
+ 
+@@ -5291,8 +5274,7 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
+ 
+ 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
+ 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
+-	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
+-	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
++	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
+ 		status = MGMT_STATUS_BUSY;
+ 		goto unlock;
+ 	}
+@@ -5462,8 +5444,7 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
+ 	struct mgmt_pending_cmd *cmd = data;
+ 	struct mgmt_cp_remove_adv_monitor *cp;
+ 
+-	if (status == -ECANCELED ||
+-	    cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
++	if (status == -ECANCELED)
+ 		return;
+ 
+ 	hci_dev_lock(hdev);
+@@ -5472,12 +5453,14 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
+ 
+ 	rp.monitor_handle = cp->monitor_handle;
+ 
+-	if (!status)
++	if (!status) {
++		mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
+ 		hci_update_passive_scan(hdev);
++	}
+ 
+-	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
++	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 			  mgmt_status(status), &rp, sizeof(rp));
+-	mgmt_pending_remove(cmd);
++	mgmt_pending_free(cmd);
+ 
+ 	hci_dev_unlock(hdev);
+ 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
+@@ -5487,10 +5470,6 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
+ static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
+ {
+ 	struct mgmt_pending_cmd *cmd = data;
+-
+-	if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
+-		return -ECANCELED;
+-
+ 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
+ 	u16 handle = __le16_to_cpu(cp->monitor_handle);
+ 
+@@ -5509,14 +5488,13 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
+ 	hci_dev_lock(hdev);
+ 
+ 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
+-	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
+ 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
+ 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
+ 		status = MGMT_STATUS_BUSY;
+ 		goto unlock;
+ 	}
+ 
+-	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
++	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
+ 	if (!cmd) {
+ 		status = MGMT_STATUS_NO_RESOURCES;
+ 		goto unlock;
+@@ -5526,7 +5504,7 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
+ 				  mgmt_remove_adv_monitor_complete);
+ 
+ 	if (err) {
+-		mgmt_pending_remove(cmd);
++		mgmt_pending_free(cmd);
+ 
+ 		if (err == -ENOMEM)
+ 			status = MGMT_STATUS_NO_RESOURCES;
+@@ -5879,7 +5857,7 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
+ 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
+ 		return;
+ 
+-	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
++	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
+ 			  cmd->param, 1);
+ 	mgmt_pending_remove(cmd);
+ 
+@@ -6117,7 +6095,7 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
+ 
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+-	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
++	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
+ 			  cmd->param, 1);
+ 	mgmt_pending_remove(cmd);
+ 
+@@ -6342,7 +6320,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
+ 	u8 status = mgmt_status(err);
+ 
+ 	if (status) {
+-		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
++		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
+ 				     cmd_status_rsp, &status);
+ 		return;
+ 	}
+@@ -6352,7 +6330,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
+ 	else
+ 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
+ 
+-	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
++	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
+ 			     &match);
+ 
+ 	new_settings(hdev, match.sk);
+@@ -6696,7 +6674,7 @@ static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
+ 		 */
+ 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
+ 
+-		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
++		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
+ 	} else {
+ 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
+ 		new_settings(hdev, cmd->sk);
+@@ -6833,7 +6811,7 @@ static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
+ 	if (err) {
+ 		u8 mgmt_err = mgmt_status(err);
+ 
+-		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
++		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
+ 		goto done;
+ 	}
+ 
+@@ -7280,7 +7258,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
+ 		rp.max_tx_power = HCI_TX_POWER_INVALID;
+ 	}
+ 
+-	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
++	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
+ 			  &rp, sizeof(rp));
+ 
+ 	mgmt_pending_free(cmd);
+@@ -7440,7 +7418,7 @@ static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
+ 	}
+ 
+ complete:
+-	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
++	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
+ 			  sizeof(rp));
+ 
+ 	mgmt_pending_free(cmd);
+@@ -8690,10 +8668,10 @@ static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
+ 	rp.instance = cp->instance;
+ 
+ 	if (err)
+-		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
++		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 				mgmt_status(err));
+ 	else
+-		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
++		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 				  mgmt_status(err), &rp, sizeof(rp));
+ 
+ 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
+@@ -8881,10 +8859,10 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
+ 
+ 		hci_remove_adv_instance(hdev, cp->instance);
+ 
+-		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
++		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 				mgmt_status(err));
+ 	} else {
+-		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
++		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 				  mgmt_status(err), &rp, sizeof(rp));
+ 	}
+ 
+@@ -9031,10 +9009,10 @@ static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
+ 	rp.instance = cp->instance;
+ 
+ 	if (err)
+-		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
++		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 				mgmt_status(err));
+ 	else
+-		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
++		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 				  mgmt_status(err), &rp, sizeof(rp));
+ 
+ 	mgmt_pending_free(cmd);
+@@ -9193,10 +9171,10 @@ static void remove_advertising_complete(struct hci_dev *hdev, void *data,
+ 	rp.instance = cp->instance;
+ 
+ 	if (err)
+-		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
++		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 				mgmt_status(err));
+ 	else
+-		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
++		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
+ 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+ 
+ 	mgmt_pending_free(cmd);
+@@ -9467,7 +9445,7 @@ void mgmt_index_removed(struct hci_dev *hdev)
+ 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ 		return;
+ 
+-	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
++	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
+ 
+ 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
+@@ -9505,7 +9483,8 @@ void mgmt_power_on(struct hci_dev *hdev, int err)
+ 		hci_update_passive_scan(hdev);
+ 	}
+ 
+-	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
++	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
++			     &match);
+ 
+ 	new_settings(hdev, match.sk);
+ 
+@@ -9520,7 +9499,8 @@ void __mgmt_power_off(struct hci_dev *hdev)
+ 	struct cmd_lookup match = { NULL, hdev };
+ 	u8 zero_cod[] = { 0, 0, 0 };
+ 
+-	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
++	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
++			     &match);
+ 
+ 	/* If the power off is because of hdev unregistration let
+ 	 * use the appropriate INVALID_INDEX status. Otherwise use
+@@ -9534,7 +9514,7 @@ void __mgmt_power_off(struct hci_dev *hdev)
+ 	else
+ 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
+ 
+-	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
++	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
+ 
+ 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
+ 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+@@ -9775,7 +9755,6 @@ static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
+ 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
+ 
+ 	cmd->cmd_complete(cmd, 0);
+-	mgmt_pending_remove(cmd);
+ }
+ 
+ bool mgmt_powering_down(struct hci_dev *hdev)
+@@ -9831,8 +9810,8 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ 	struct mgmt_cp_disconnect *cp;
+ 	struct mgmt_pending_cmd *cmd;
+ 
+-	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+-			     hdev);
++	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
++			     unpair_device_rsp, hdev);
+ 
+ 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
+ 	if (!cmd)
+@@ -10025,7 +10004,7 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
+ 
+ 	if (status) {
+ 		u8 mgmt_err = mgmt_status(status);
+-		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
++		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
+ 				     cmd_status_rsp, &mgmt_err);
+ 		return;
+ 	}
+@@ -10035,8 +10014,8 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
+ 	else
+ 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
+ 
+-	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
+-			     &match);
++	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
++			     settings_rsp, &match);
+ 
+ 	if (changed)
+ 		new_settings(hdev, match.sk);
+@@ -10060,9 +10039,12 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ {
+ 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
+ 
+-	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
+-	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
+-	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
++	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
++			     &match);
++	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
++			     &match);
++	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
++			     &match);
+ 
+ 	if (!status) {
+ 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
+diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
+index 17ab909a7c07f0..a88a07da394734 100644
+--- a/net/bluetooth/mgmt_util.c
++++ b/net/bluetooth/mgmt_util.c
+@@ -217,47 +217,47 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+ struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode,
+ 					   struct hci_dev *hdev)
+ {
+-	struct mgmt_pending_cmd *cmd;
++	struct mgmt_pending_cmd *cmd, *tmp;
++
++	mutex_lock(&hdev->mgmt_pending_lock);
+ 
+-	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
++	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
+ 		if (hci_sock_get_channel(cmd->sk) != channel)
+ 			continue;
+-		if (cmd->opcode == opcode)
+-			return cmd;
+-	}
+ 
+-	return NULL;
+-}
+-
+-struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel,
+-						u16 opcode,
+-						struct hci_dev *hdev,
+-						const void *data)
+-{
+-	struct mgmt_pending_cmd *cmd;
+-
+-	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+-		if (cmd->user_data != data)
+-			continue;
+-		if (cmd->opcode == opcode)
++		if (cmd->opcode == opcode) {
++			mutex_unlock(&hdev->mgmt_pending_lock);
+ 			return cmd;
++		}
+ 	}
+ 
++	mutex_unlock(&hdev->mgmt_pending_lock);
++
+ 	return NULL;
+ }
+ 
+-void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
++void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove,
+ 			  void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
+ 			  void *data)
+ {
+ 	struct mgmt_pending_cmd *cmd, *tmp;
+ 
++	mutex_lock(&hdev->mgmt_pending_lock);
++
+ 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
+ 		if (opcode > 0 && cmd->opcode != opcode)
+ 			continue;
+ 
++		if (remove)
++			list_del(&cmd->list);
++
+ 		cb(cmd, data);
++
++		if (remove)
++			mgmt_pending_free(cmd);
+ 	}
++
++	mutex_unlock(&hdev->mgmt_pending_lock);
+ }
+ 
+ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
+@@ -271,7 +271,7 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
+ 		return NULL;
+ 
+ 	cmd->opcode = opcode;
+-	cmd->index = hdev->id;
++	cmd->hdev = hdev;
+ 
+ 	cmd->param = kmemdup(data, len, GFP_KERNEL);
+ 	if (!cmd->param) {
+@@ -297,7 +297,9 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ 	if (!cmd)
+ 		return NULL;
+ 
++	mutex_lock(&hdev->mgmt_pending_lock);
+ 	list_add_tail(&cmd->list, &hdev->mgmt_pending);
++	mutex_unlock(&hdev->mgmt_pending_lock);
+ 
+ 	return cmd;
+ }
+@@ -311,7 +313,10 @@ void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
+ 
+ void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
+ {
++	mutex_lock(&cmd->hdev->mgmt_pending_lock);
+ 	list_del(&cmd->list);
++	mutex_unlock(&cmd->hdev->mgmt_pending_lock);
++
+ 	mgmt_pending_free(cmd);
+ }
+ 
+@@ -321,7 +326,7 @@ void mgmt_mesh_foreach(struct hci_dev *hdev,
+ {
+ 	struct mgmt_mesh_tx *mesh_tx, *tmp;
+ 
+-	list_for_each_entry_safe(mesh_tx, tmp, &hdev->mgmt_pending, list) {
++	list_for_each_entry_safe(mesh_tx, tmp, &hdev->mesh_pending, list) {
+ 		if (!sk || mesh_tx->sk == sk)
+ 			cb(mesh_tx, data);
+ 	}
+diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
+index bdf978605d5a85..024e51dd693756 100644
+--- a/net/bluetooth/mgmt_util.h
++++ b/net/bluetooth/mgmt_util.h
+@@ -33,7 +33,7 @@ struct mgmt_mesh_tx {
+ struct mgmt_pending_cmd {
+ 	struct list_head list;
+ 	u16 opcode;
+-	int index;
++	struct hci_dev *hdev;
+ 	void *param;
+ 	size_t param_len;
+ 	struct sock *sk;
+@@ -54,11 +54,7 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+ 
+ struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode,
+ 					   struct hci_dev *hdev);
+-struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel,
+-						u16 opcode,
+-						struct hci_dev *hdev,
+-						const void *data);
+-void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
++void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove,
+ 			  void (*cb)(struct mgmt_pending_cmd *cmd, void *data),
+ 			  void *data);
+ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index 816bb0fde718ed..6482de4d875092 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -60,19 +60,19 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
+ 		struct ip_fraglist_iter iter;
+ 		struct sk_buff *frag;
+ 
+-		if (first_len - hlen > mtu ||
+-		    skb_headroom(skb) < ll_rs)
++		if (first_len - hlen > mtu)
+ 			goto blackhole;
+ 
+-		if (skb_cloned(skb))
++		if (skb_cloned(skb) ||
++		    skb_headroom(skb) < ll_rs)
+ 			goto slow_path;
+ 
+ 		skb_walk_frags(skb, frag) {
+-			if (frag->len > mtu ||
+-			    skb_headroom(frag) < hlen + ll_rs)
++			if (frag->len > mtu)
+ 				goto blackhole;
+ 
+-			if (skb_shared(frag))
++			if (skb_shared(frag) ||
++			    skb_headroom(frag) < hlen + ll_rs)
+ 				goto slow_path;
+ 		}
+ 
+diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h
+index 7eadb8393e002f..cd95394399b40c 100644
+--- a/net/core/netmem_priv.h
++++ b/net/core/netmem_priv.h
+@@ -5,7 +5,7 @@
+ 
+ static inline unsigned long netmem_get_pp_magic(netmem_ref netmem)
+ {
+-	return __netmem_clear_lsb(netmem)->pp_magic;
++	return __netmem_clear_lsb(netmem)->pp_magic & ~PP_DMA_INDEX_MASK;
+ }
+ 
+ static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
+@@ -15,9 +15,16 @@ static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
+ 
+ static inline void netmem_clear_pp_magic(netmem_ref netmem)
+ {
++	WARN_ON_ONCE(__netmem_clear_lsb(netmem)->pp_magic & PP_DMA_INDEX_MASK);
++
+ 	__netmem_clear_lsb(netmem)->pp_magic = 0;
+ }
+ 
++static inline bool netmem_is_pp(netmem_ref netmem)
++{
++	return (netmem_get_pp_magic(netmem) & PP_MAGIC_MASK) == PP_SIGNATURE;
++}
++
+ static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)
+ {
+ 	__netmem_clear_lsb(netmem)->pp = pool;
+@@ -28,4 +35,28 @@ static inline void netmem_set_dma_addr(netmem_ref netmem,
+ {
+ 	__netmem_clear_lsb(netmem)->dma_addr = dma_addr;
+ }
++
++static inline unsigned long netmem_get_dma_index(netmem_ref netmem)
++{
++	unsigned long magic;
++
++	if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
++		return 0;
++
++	magic = __netmem_clear_lsb(netmem)->pp_magic;
++
++	return (magic & PP_DMA_INDEX_MASK) >> PP_DMA_INDEX_SHIFT;
++}
++
++static inline void netmem_set_dma_index(netmem_ref netmem,
++					unsigned long id)
++{
++	unsigned long magic;
++
++	if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
++		return;
++
++	magic = netmem_get_pp_magic(netmem) | (id << PP_DMA_INDEX_SHIFT);
++	__netmem_clear_lsb(netmem)->pp_magic = magic;
++}
+ #endif
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index c8ce069605c421..0f23b3126bdaf4 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -151,9 +151,9 @@ u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
+ EXPORT_SYMBOL(page_pool_ethtool_stats_get);
+ 
+ #else
+-#define alloc_stat_inc(pool, __stat)
+-#define recycle_stat_inc(pool, __stat)
+-#define recycle_stat_add(pool, __stat, val)
++#define alloc_stat_inc(...)	do { } while (0)
++#define recycle_stat_inc(...)	do { } while (0)
++#define recycle_stat_add(...)	do { } while (0)
+ #endif
+ 
+ static bool page_pool_producer_lock(struct page_pool *pool)
+@@ -273,8 +273,7 @@ static int page_pool_init(struct page_pool *pool,
+ 	/* Driver calling page_pool_create() also call page_pool_destroy() */
+ 	refcount_set(&pool->user_cnt, 1);
+ 
+-	if (pool->dma_map)
+-		get_device(pool->p.dev);
++	xa_init_flags(&pool->dma_mapped, XA_FLAGS_ALLOC1);
+ 
+ 	if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
+ 		/* We rely on rtnl_lock()ing to make sure netdev_rx_queue
+@@ -312,9 +311,7 @@ static int page_pool_init(struct page_pool *pool,
+ static void page_pool_uninit(struct page_pool *pool)
+ {
+ 	ptr_ring_cleanup(&pool->ring, NULL);
+-
+-	if (pool->dma_map)
+-		put_device(pool->p.dev);
++	xa_destroy(&pool->dma_mapped);
+ 
+ #ifdef CONFIG_PAGE_POOL_STATS
+ 	if (!pool->system)
+@@ -455,13 +452,21 @@ page_pool_dma_sync_for_device(const struct page_pool *pool,
+ 			      netmem_ref netmem,
+ 			      u32 dma_sync_size)
+ {
+-	if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
+-		__page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
++	if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) {
++		rcu_read_lock();
++		/* re-check under rcu_read_lock() to sync with page_pool_scrub() */
++		if (pool->dma_sync)
++			__page_pool_dma_sync_for_device(pool, netmem,
++							dma_sync_size);
++		rcu_read_unlock();
++	}
+ }
+ 
+-static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
++static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp)
+ {
+ 	dma_addr_t dma;
++	int err;
++	u32 id;
+ 
+ 	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
+ 	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
+@@ -475,15 +480,30 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
+ 	if (dma_mapping_error(pool->p.dev, dma))
+ 		return false;
+ 
+-	if (page_pool_set_dma_addr_netmem(netmem, dma))
++	if (page_pool_set_dma_addr_netmem(netmem, dma)) {
++		WARN_ONCE(1, "unexpected DMA address, please report to netdev@");
+ 		goto unmap_failed;
++	}
++
++	if (in_softirq())
++		err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
++			       PP_DMA_INDEX_LIMIT, gfp);
++	else
++		err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
++				  PP_DMA_INDEX_LIMIT, gfp);
++	if (err) {
++		WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
++		goto unset_failed;
++	}
+ 
++	netmem_set_dma_index(netmem, id);
+ 	page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
+ 
+ 	return true;
+ 
++unset_failed:
++	page_pool_set_dma_addr_netmem(netmem, 0);
+ unmap_failed:
+-	WARN_ONCE(1, "unexpected DMA address, please report to netdev@");
+ 	dma_unmap_page_attrs(pool->p.dev, dma,
+ 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+@@ -500,7 +520,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
+ 	if (unlikely(!page))
+ 		return NULL;
+ 
+-	if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page)))) {
++	if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) {
+ 		put_page(page);
+ 		return NULL;
+ 	}
+@@ -547,7 +567,7 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
+ 	 */
+ 	for (i = 0; i < nr_pages; i++) {
+ 		netmem = pool->alloc.cache[i];
+-		if (dma_map && unlikely(!page_pool_dma_map(pool, netmem))) {
++		if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) {
+ 			put_page(netmem_to_page(netmem));
+ 			continue;
+ 		}
+@@ -649,6 +669,8 @@ void page_pool_clear_pp_info(netmem_ref netmem)
+ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
+ 							 netmem_ref netmem)
+ {
++	struct page *old, *page = netmem_to_page(netmem);
++	unsigned long id;
+ 	dma_addr_t dma;
+ 
+ 	if (!pool->dma_map)
+@@ -657,6 +679,17 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
+ 		 */
+ 		return;
+ 
++	id = netmem_get_dma_index(netmem);
++	if (!id)
++		return;
++
++	if (in_softirq())
++		old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
++	else
++		old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
++	if (old != page)
++		return;
++
+ 	dma = page_pool_get_dma_addr_netmem(netmem);
+ 
+ 	/* When page is unmapped, it cannot be returned to our pool */
+@@ -664,6 +697,7 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
+ 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ 	page_pool_set_dma_addr_netmem(netmem, 0);
++	netmem_set_dma_index(netmem, 0);
+ }
+ 
+ /* Disconnects a page (from a page_pool).  API users can have a need
+@@ -700,19 +734,16 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
+ 
+ static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
+ {
+-	int ret;
+-	/* BH protection not needed if current is softirq */
+-	if (in_softirq())
+-		ret = ptr_ring_produce(&pool->ring, (__force void *)netmem);
+-	else
+-		ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem);
++	bool in_softirq, ret;
+ 
+-	if (!ret) {
++	/* BH protection not needed if current is softirq */
++	in_softirq = page_pool_producer_lock(pool);
++	ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem);
++	if (ret)
+ 		recycle_stat_inc(pool, ring);
+-		return true;
+-	}
++	page_pool_producer_unlock(pool, in_softirq);
+ 
+-	return false;
++	return ret;
+ }
+ 
+ /* Only allow direct recycling in special circumstances, into the
+@@ -1038,8 +1069,29 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
+ 
+ static void page_pool_scrub(struct page_pool *pool)
+ {
++	unsigned long id;
++	void *ptr;
++
+ 	page_pool_empty_alloc_cache_once(pool);
+-	pool->destroy_cnt++;
++	if (!pool->destroy_cnt++ && pool->dma_map) {
++		if (pool->dma_sync) {
++			/* Disable page_pool_dma_sync_for_device() */
++			pool->dma_sync = false;
++
++			/* Make sure all concurrent returns that may see the old
++			 * value of dma_sync (and thus perform a sync) have
++			 * finished before doing the unmapping below. Skip the
++			 * wait if the device doesn't actually need syncing, or
++			 * if there are no outstanding mapped pages.
++			 */
++			if (dma_dev_need_sync(pool->p.dev) &&
++			    !xa_empty(&pool->dma_mapped))
++				synchronize_net();
++		}
++
++		xa_for_each(&pool->dma_mapped, id, ptr)
++			__page_pool_release_page_dma(pool, page_to_netmem(ptr));
++	}
+ 
+ 	/* No more consumers should exist, but producers could still
+ 	 * be in-flight.
+@@ -1049,10 +1101,14 @@ static void page_pool_scrub(struct page_pool *pool)
+ 
+ static int page_pool_release(struct page_pool *pool)
+ {
++	bool in_softirq;
+ 	int inflight;
+ 
+ 	page_pool_scrub(pool);
+ 	inflight = page_pool_inflight(pool, true);
++	/* Acquire producer lock to make sure producers have exited. */
++	in_softirq = page_pool_producer_lock(pool);
++	page_pool_producer_unlock(pool, in_softirq);
+ 	if (!inflight)
+ 		__page_pool_destroy(pool);
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index f220306731dac8..fdb36165c58f5b 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -925,11 +925,6 @@ static void skb_clone_fraglist(struct sk_buff *skb)
+ 		skb_get(list);
+ }
+ 
+-static bool is_pp_netmem(netmem_ref netmem)
+-{
+-	return (netmem_get_pp_magic(netmem) & ~0x3UL) == PP_SIGNATURE;
+-}
+-
+ int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ 		    unsigned int headroom)
+ {
+@@ -1027,14 +1022,7 @@ bool napi_pp_put_page(netmem_ref netmem)
+ {
+ 	netmem = netmem_compound_head(netmem);
+ 
+-	/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
+-	 * in order to preserve any existing bits, such as bit 0 for the
+-	 * head page of compound page and bit 1 for pfmemalloc page, so
+-	 * mask those bits for freeing side when doing below checking,
+-	 * and page_is_pfmemalloc() is checked in __page_pool_put_page()
+-	 * to avoid recycling the pfmemalloc page.
+-	 */
+-	if (unlikely(!is_pp_netmem(netmem)))
++	if (unlikely(!netmem_is_pp(netmem)))
+ 		return false;
+ 
+ 	page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
+@@ -1074,7 +1062,7 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
+ 
+ 	for (i = 0; i < shinfo->nr_frags; i++) {
+ 		head_netmem = netmem_compound_head(shinfo->frags[i].netmem);
+-		if (likely(is_pp_netmem(head_netmem)))
++		if (likely(netmem_is_pp(head_netmem)))
+ 			page_pool_ref_netmem(head_netmem);
+ 		else
+ 			page_ref_inc(netmem_to_page(head_netmem));
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index f76cbf49c68c8d..a8d238dd982af0 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -529,16 +529,22 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
+ 					u32 off, u32 len,
+ 					struct sk_psock *psock,
+ 					struct sock *sk,
+-					struct sk_msg *msg)
++					struct sk_msg *msg,
++					bool take_ref)
+ {
+ 	int num_sge, copied;
+ 
++	/* skb_to_sgvec will fail when the total number of fragments in
++	 * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the
++	 * caller may aggregate multiple skbs.
++	 */
+ 	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
+ 	if (num_sge < 0) {
+ 		/* skb linearize may fail with ENOMEM, but lets simply try again
+ 		 * later if this happens. Under memory pressure we don't want to
+ 		 * drop the skb. We need to linearize the skb so that the mapping
+ 		 * in skb_to_sgvec can not error.
++		 * Note that skb_linearize requires the skb not to be shared.
+ 		 */
+ 		if (skb_linearize(skb))
+ 			return -EAGAIN;
+@@ -555,7 +561,7 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
+ 	msg->sg.start = 0;
+ 	msg->sg.size = copied;
+ 	msg->sg.end = num_sge;
+-	msg->skb = skb;
++	msg->skb = take_ref ? skb_get(skb) : skb;
+ 
+ 	sk_psock_queue_msg(psock, msg);
+ 	sk_psock_data_ready(sk, psock);
+@@ -563,7 +569,7 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
+ }
+ 
+ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
+-				     u32 off, u32 len);
++				     u32 off, u32 len, bool take_ref);
+ 
+ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
+ 				u32 off, u32 len)
+@@ -577,7 +583,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
+ 	 * correctly.
+ 	 */
+ 	if (unlikely(skb->sk == sk))
+-		return sk_psock_skb_ingress_self(psock, skb, off, len);
++		return sk_psock_skb_ingress_self(psock, skb, off, len, true);
+ 	msg = sk_psock_create_ingress_msg(sk, skb);
+ 	if (!msg)
+ 		return -EAGAIN;
+@@ -589,7 +595,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
+ 	 * into user buffers.
+ 	 */
+ 	skb_set_owner_r(skb, sk);
+-	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
++	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true);
+ 	if (err < 0)
+ 		kfree(msg);
+ 	return err;
+@@ -600,7 +606,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
+  * because the skb is already accounted for here.
+  */
+ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
+-				     u32 off, u32 len)
++				     u32 off, u32 len, bool take_ref)
+ {
+ 	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
+ 	struct sock *sk = psock->sk;
+@@ -609,7 +615,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
+ 	if (unlikely(!msg))
+ 		return -EAGAIN;
+ 	skb_set_owner_r(skb, sk);
+-	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
++	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
+ 	if (err < 0)
+ 		kfree(msg);
+ 	return err;
+@@ -618,18 +624,13 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
+ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
+ 			       u32 off, u32 len, bool ingress)
+ {
+-	int err = 0;
+-
+ 	if (!ingress) {
+ 		if (!sock_writeable(psock->sk))
+ 			return -EAGAIN;
+ 		return skb_send_sock(psock->sk, skb, off, len);
+ 	}
+-	skb_get(skb);
+-	err = sk_psock_skb_ingress(psock, skb, off, len);
+-	if (err < 0)
+-		kfree_skb(skb);
+-	return err;
++
++	return sk_psock_skb_ingress(psock, skb, off, len);
+ }
+ 
+ static void sk_psock_skb_state(struct sk_psock *psock,
+@@ -654,12 +655,14 @@ static void sk_psock_backlog(struct work_struct *work)
+ 	bool ingress;
+ 	int ret;
+ 
++	/* Increment the psock refcnt to synchronize with close(fd) path in
++	 * sock_map_close(), ensuring we wait for backlog thread completion
++	 * before sk_socket freed. If refcnt increment fails, it indicates
++	 * sock_map_close() completed with sk_socket potentially already freed.
++	 */
++	if (!sk_psock_get(psock->sk))
++		return;
+ 	mutex_lock(&psock->work_mutex);
+-	if (unlikely(state->len)) {
+-		len = state->len;
+-		off = state->off;
+-	}
+-
+ 	while ((skb = skb_peek(&psock->ingress_skb))) {
+ 		len = skb->len;
+ 		off = 0;
+@@ -669,6 +672,13 @@ static void sk_psock_backlog(struct work_struct *work)
+ 			off = stm->offset;
+ 			len = stm->full_len;
+ 		}
++
++		/* Resume processing from previous partial state */
++		if (unlikely(state->len)) {
++			len = state->len;
++			off = state->off;
++		}
++
+ 		ingress = skb_bpf_ingress(skb);
+ 		skb_bpf_redirect_clear(skb);
+ 		do {
+@@ -696,11 +706,14 @@ static void sk_psock_backlog(struct work_struct *work)
+ 			len -= ret;
+ 		} while (len);
+ 
++		/* The entire skb sent, clear state */
++		sk_psock_skb_state(psock, state, 0, 0);
+ 		skb = skb_dequeue(&psock->ingress_skb);
+ 		kfree_skb(skb);
+ 	}
+ end:
+ 	mutex_unlock(&psock->work_mutex);
++	sk_psock_put(psock->sk, psock);
+ }
+ 
+ struct sk_psock *sk_psock_init(struct sock *sk, int node)
+@@ -1013,7 +1026,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
+ 				off = stm->offset;
+ 				len = stm->full_len;
+ 			}
+-			err = sk_psock_skb_ingress_self(psock, skb, off, len);
++			err = sk_psock_skb_ingress_self(psock, skb, off, len, false);
+ 		}
+ 		if (err < 0) {
+ 			spin_lock_bh(&psock->ingress_lock);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 0842dc9189bf80..3c5386c76d6fe4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3157,16 +3157,16 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
+ 	struct proto *prot = sk->sk_prot;
+-	bool charged = false;
++	bool charged = true;
+ 	long allocated;
+ 
+ 	sk_memory_allocated_add(sk, amt);
+ 	allocated = sk_memory_allocated(sk);
+ 
+ 	if (memcg) {
+-		if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()))
++		charged = mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge());
++		if (!charged)
+ 			goto suppress_allocation;
+-		charged = true;
+ 	}
+ 
+ 	/* Under limit. */
+@@ -3251,7 +3251,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
+ 
+ 	sk_memory_allocated_sub(sk, amt);
+ 
+-	if (charged)
++	if (memcg && charged)
+ 		mem_cgroup_uncharge_skmem(memcg, amt);
+ 
+ 	return 0;
+diff --git a/net/core/xdp.c b/net/core/xdp.c
+index bcc5551c6424bd..23e7d736718b01 100644
+--- a/net/core/xdp.c
++++ b/net/core/xdp.c
+@@ -381,8 +381,8 @@ void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ 		page = virt_to_head_page(data);
+ 		if (napi_direct && xdp_return_frame_no_direct())
+ 			napi_direct = false;
+-		/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
+-		 * as mem->type knows this a page_pool page
++		/* No need to check netmem_is_pp() as mem->type knows this a
++		 * page_pool page
+ 		 */
+ 		page_pool_put_full_page(page->pp, page, napi_direct);
+ 		break;
+diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
+index 8c3c068728e51c..fe75821623a4fc 100644
+--- a/net/dsa/tag_brcm.c
++++ b/net/dsa/tag_brcm.c
+@@ -257,7 +257,7 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ 	int source_port;
+ 	u8 *brcm_tag;
+ 
+-	if (unlikely(!pskb_may_pull(skb, BRCM_LEG_PORT_ID)))
++	if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
+ 		return NULL;
+ 
+ 	brcm_tag = dsa_etype_header_pos_rx(skb);
+diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
+index d25d717c121f0c..f514eb52b8d4b2 100644
+--- a/net/ipv4/netfilter/nft_fib_ipv4.c
++++ b/net/ipv4/netfilter/nft_fib_ipv4.c
+@@ -49,7 +49,12 @@ void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
+ 	else
+ 		addr = iph->saddr;
+ 
+-	*dst = inet_dev_addr_type(nft_net(pkt), dev, addr);
++	if (priv->flags & (NFTA_FIB_F_IIF | NFTA_FIB_F_OIF)) {
++		*dst = inet_dev_addr_type(nft_net(pkt), dev, addr);
++		return;
++	}
++
++	*dst = inet_addr_type_dev_table(nft_net(pkt), pkt->skb->dev, addr);
+ }
+ EXPORT_SYMBOL_GPL(nft_fib4_eval_type);
+ 
+@@ -64,8 +69,8 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 	struct flowi4 fl4 = {
+ 		.flowi4_scope = RT_SCOPE_UNIVERSE,
+ 		.flowi4_iif = LOOPBACK_IFINDEX,
++		.flowi4_proto = pkt->tprot,
+ 		.flowi4_uid = sock_net_uid(nft_net(pkt), NULL),
+-		.flowi4_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)),
+ 	};
+ 	const struct net_device *oif;
+ 	const struct net_device *found;
+@@ -89,6 +94,8 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 	else
+ 		oif = NULL;
+ 
++	fl4.flowi4_l3mdev = nft_fib_l3mdev_master_ifindex_rcu(pkt, oif);
++
+ 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
+ 	if (!iph) {
+ 		regs->verdict.code = NFT_BREAK;
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index da5d4aea1b5915..845730184c5d31 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -332,6 +332,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ 	bool copy_dtor;
+ 	__sum16 check;
+ 	__be16 newlen;
++	int ret = 0;
+ 
+ 	mss = skb_shinfo(gso_skb)->gso_size;
+ 	if (gso_skb->len <= sizeof(*uh) + mss)
+@@ -360,6 +361,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ 		if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
+ 			return __udp_gso_segment_list(gso_skb, features, is_ipv6);
+ 
++		ret = __skb_linearize(gso_skb);
++		if (ret)
++			return ERR_PTR(ret);
++
+ 		 /* Setup csum, as fraglist skips this in udp4_gro_receive. */
+ 		gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head;
+ 		gso_skb->csum_offset = offsetof(struct udphdr, check);
+diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
+index 581ce055bf520f..4541836ee3da20 100644
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -164,20 +164,20 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ 		struct ip6_fraglist_iter iter;
+ 		struct sk_buff *frag2;
+ 
+-		if (first_len - hlen > mtu ||
+-		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
++		if (first_len - hlen > mtu)
+ 			goto blackhole;
+ 
+-		if (skb_cloned(skb))
++		if (skb_cloned(skb) ||
++		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
+ 			goto slow_path;
+ 
+ 		skb_walk_frags(skb, frag2) {
+-			if (frag2->len > mtu ||
+-			    skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr)))
++			if (frag2->len > mtu)
+ 				goto blackhole;
+ 
+ 			/* Partially cloned skb? */
+-			if (skb_shared(frag2))
++			if (skb_shared(frag2) ||
++			    skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr)))
+ 				goto slow_path;
+ 		}
+ 
+diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
+index 7fd9d7b21cd42d..421036a3605b46 100644
+--- a/net/ipv6/netfilter/nft_fib_ipv6.c
++++ b/net/ipv6/netfilter/nft_fib_ipv6.c
+@@ -50,6 +50,7 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv,
+ 		fl6->flowi6_mark = pkt->skb->mark;
+ 
+ 	fl6->flowlabel = (*(__be32 *)iph) & IPV6_FLOWINFO_MASK;
++	fl6->flowi6_l3mdev = nft_fib_l3mdev_master_ifindex_rcu(pkt, dev);
+ 
+ 	return lookup_flags;
+ }
+@@ -73,8 +74,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
+ 	else if (priv->flags & NFTA_FIB_F_OIF)
+ 		dev = nft_out(pkt);
+ 
+-	fl6.flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev);
+-
+ 	nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
+ 
+ 	if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
+@@ -158,6 +157,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ {
+ 	const struct nft_fib *priv = nft_expr_priv(expr);
+ 	int noff = skb_network_offset(pkt->skb);
++	const struct net_device *found = NULL;
+ 	const struct net_device *oif = NULL;
+ 	u32 *dest = &regs->data[priv->dreg];
+ 	struct ipv6hdr *iph, _iph;
+@@ -165,7 +165,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 		.flowi6_iif = LOOPBACK_IFINDEX,
+ 		.flowi6_proto = pkt->tprot,
+ 		.flowi6_uid = sock_net_uid(nft_net(pkt), NULL),
+-		.flowi6_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)),
+ 	};
+ 	struct rt6_info *rt;
+ 	int lookup_flags;
+@@ -203,11 +202,15 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 	if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
+ 		goto put_rt_err;
+ 
+-	if (oif && oif != rt->rt6i_idev->dev &&
+-	    l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) != oif->ifindex)
+-		goto put_rt_err;
++	if (!oif) {
++		found = rt->rt6i_idev->dev;
++	} else {
++		if (oif == rt->rt6i_idev->dev ||
++		    l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == oif->ifindex)
++			found = oif;
++	}
+ 
+-	nft_fib_store_result(dest, priv, rt->rt6i_idev->dev);
++	nft_fib_store_result(dest, priv, found);
+  put_rt_err:
+ 	ip6_rt_put(rt);
+ }
+diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
+index c74705ead9849f..e445a0a45568d1 100644
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -1644,10 +1644,8 @@ static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = {
+ 	[SEG6_LOCAL_SRH]	= { .type = NLA_BINARY },
+ 	[SEG6_LOCAL_TABLE]	= { .type = NLA_U32 },
+ 	[SEG6_LOCAL_VRFTABLE]	= { .type = NLA_U32 },
+-	[SEG6_LOCAL_NH4]	= { .type = NLA_BINARY,
+-				    .len = sizeof(struct in_addr) },
+-	[SEG6_LOCAL_NH6]	= { .type = NLA_BINARY,
+-				    .len = sizeof(struct in6_addr) },
++	[SEG6_LOCAL_NH4]	= NLA_POLICY_EXACT_LEN(sizeof(struct in_addr)),
++	[SEG6_LOCAL_NH6]	= NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+ 	[SEG6_LOCAL_IIF]	= { .type = NLA_U32 },
+ 	[SEG6_LOCAL_OIF]	= { .type = NLA_U32 },
+ 	[SEG6_LOCAL_BPF]	= { .type = NLA_NESTED },
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 8fa9b9dd461184..16bb3db67eaac0 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -6728,11 +6728,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
+ 	bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type);
+ 	if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ 		struct ieee80211_ext *ext = (void *) mgmt;
+-
+-		if (ieee80211_is_s1g_short_beacon(ext->frame_control))
+-			variable = ext->u.s1g_short_beacon.variable;
+-		else
+-			variable = ext->u.s1g_beacon.variable;
++		variable = ext->u.s1g_beacon.variable +
++			   ieee80211_s1g_optional_len(ext->frame_control);
+ 	}
+ 
+ 	baselen = (u8 *) variable - (u8 *) mgmt;
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index adb88c06b59822..ce6d5857214eba 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -260,6 +260,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ 	struct ieee80211_bss *bss;
+ 	struct ieee80211_channel *channel;
++	struct ieee80211_ext *ext;
+ 	size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+ 				      u.probe_resp.variable);
+ 
+@@ -269,12 +270,10 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ 		return;
+ 
+ 	if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+-		if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+-			min_hdr_len = offsetof(struct ieee80211_ext,
+-					       u.s1g_short_beacon.variable);
+-		else
+-			min_hdr_len = offsetof(struct ieee80211_ext,
+-					       u.s1g_beacon);
++		ext = (struct ieee80211_ext *)mgmt;
++		min_hdr_len =
++			offsetof(struct ieee80211_ext, u.s1g_beacon.variable) +
++			ieee80211_s1g_optional_len(ext->frame_control);
+ 	}
+ 
+ 	if (skb->len < min_hdr_len)
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index 4e0842df5234ea..2c260f33b55cc5 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -143,16 +143,15 @@ struct ncsi_channel_vlan_filter {
+ };
+ 
+ struct ncsi_channel_stats {
+-	u32 hnc_cnt_hi;		/* Counter cleared            */
+-	u32 hnc_cnt_lo;		/* Counter cleared            */
+-	u32 hnc_rx_bytes;	/* Rx bytes                   */
+-	u32 hnc_tx_bytes;	/* Tx bytes                   */
+-	u32 hnc_rx_uc_pkts;	/* Rx UC packets              */
+-	u32 hnc_rx_mc_pkts;     /* Rx MC packets              */
+-	u32 hnc_rx_bc_pkts;	/* Rx BC packets              */
+-	u32 hnc_tx_uc_pkts;	/* Tx UC packets              */
+-	u32 hnc_tx_mc_pkts;	/* Tx MC packets              */
+-	u32 hnc_tx_bc_pkts;	/* Tx BC packets              */
++	u64 hnc_cnt;		/* Counter cleared            */
++	u64 hnc_rx_bytes;	/* Rx bytes                   */
++	u64 hnc_tx_bytes;	/* Tx bytes                   */
++	u64 hnc_rx_uc_pkts;	/* Rx UC packets              */
++	u64 hnc_rx_mc_pkts;     /* Rx MC packets              */
++	u64 hnc_rx_bc_pkts;	/* Rx BC packets              */
++	u64 hnc_tx_uc_pkts;	/* Tx UC packets              */
++	u64 hnc_tx_mc_pkts;	/* Tx MC packets              */
++	u64 hnc_tx_bc_pkts;	/* Tx BC packets              */
+ 	u32 hnc_fcs_err;	/* FCS errors                 */
+ 	u32 hnc_align_err;	/* Alignment errors           */
+ 	u32 hnc_false_carrier;	/* False carrier detection    */
+@@ -181,7 +180,7 @@ struct ncsi_channel_stats {
+ 	u32 hnc_tx_1023_frames;	/* Tx 512-1023 bytes frames   */
+ 	u32 hnc_tx_1522_frames;	/* Tx 1024-1522 bytes frames  */
+ 	u32 hnc_tx_9022_frames;	/* Tx 1523-9022 bytes frames  */
+-	u32 hnc_rx_valid_bytes;	/* Rx valid bytes             */
++	u64 hnc_rx_valid_bytes;	/* Rx valid bytes             */
+ 	u32 hnc_rx_runt_pkts;	/* Rx error runt packets      */
+ 	u32 hnc_rx_jabber_pkts;	/* Rx error jabber packets    */
+ 	u32 ncsi_rx_cmds;	/* Rx NCSI commands           */
+diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h
+index f2f3b5c1b94126..24edb273797240 100644
+--- a/net/ncsi/ncsi-pkt.h
++++ b/net/ncsi/ncsi-pkt.h
+@@ -252,16 +252,15 @@ struct ncsi_rsp_gp_pkt {
+ /* Get Controller Packet Statistics */
+ struct ncsi_rsp_gcps_pkt {
+ 	struct ncsi_rsp_pkt_hdr rsp;            /* Response header            */
+-	__be32                  cnt_hi;         /* Counter cleared            */
+-	__be32                  cnt_lo;         /* Counter cleared            */
+-	__be32                  rx_bytes;       /* Rx bytes                   */
+-	__be32                  tx_bytes;       /* Tx bytes                   */
+-	__be32                  rx_uc_pkts;     /* Rx UC packets              */
+-	__be32                  rx_mc_pkts;     /* Rx MC packets              */
+-	__be32                  rx_bc_pkts;     /* Rx BC packets              */
+-	__be32                  tx_uc_pkts;     /* Tx UC packets              */
+-	__be32                  tx_mc_pkts;     /* Tx MC packets              */
+-	__be32                  tx_bc_pkts;     /* Tx BC packets              */
++	__be64                  cnt;            /* Counter cleared            */
++	__be64                  rx_bytes;       /* Rx bytes                   */
++	__be64                  tx_bytes;       /* Tx bytes                   */
++	__be64                  rx_uc_pkts;     /* Rx UC packets              */
++	__be64                  rx_mc_pkts;     /* Rx MC packets              */
++	__be64                  rx_bc_pkts;     /* Rx BC packets              */
++	__be64                  tx_uc_pkts;     /* Tx UC packets              */
++	__be64                  tx_mc_pkts;     /* Tx MC packets              */
++	__be64                  tx_bc_pkts;     /* Tx BC packets              */
+ 	__be32                  fcs_err;        /* FCS errors                 */
+ 	__be32                  align_err;      /* Alignment errors           */
+ 	__be32                  false_carrier;  /* False carrier detection    */
+@@ -290,11 +289,11 @@ struct ncsi_rsp_gcps_pkt {
+ 	__be32                  tx_1023_frames; /* Tx 512-1023 bytes frames   */
+ 	__be32                  tx_1522_frames; /* Tx 1024-1522 bytes frames  */
+ 	__be32                  tx_9022_frames; /* Tx 1523-9022 bytes frames  */
+-	__be32                  rx_valid_bytes; /* Rx valid bytes             */
++	__be64                  rx_valid_bytes; /* Rx valid bytes             */
+ 	__be32                  rx_runt_pkts;   /* Rx error runt packets      */
+ 	__be32                  rx_jabber_pkts; /* Rx error jabber packets    */
+ 	__be32                  checksum;       /* Checksum                   */
+-};
++}  __packed __aligned(4);
+ 
+ /* Get NCSI Statistics */
+ struct ncsi_rsp_gns_pkt {
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 4a8ce2949faeac..8668888c5a2f99 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -926,16 +926,15 @@ static int ncsi_rsp_handler_gcps(struct ncsi_request *nr)
+ 
+ 	/* Update HNC's statistics */
+ 	ncs = &nc->stats;
+-	ncs->hnc_cnt_hi         = ntohl(rsp->cnt_hi);
+-	ncs->hnc_cnt_lo         = ntohl(rsp->cnt_lo);
+-	ncs->hnc_rx_bytes       = ntohl(rsp->rx_bytes);
+-	ncs->hnc_tx_bytes       = ntohl(rsp->tx_bytes);
+-	ncs->hnc_rx_uc_pkts     = ntohl(rsp->rx_uc_pkts);
+-	ncs->hnc_rx_mc_pkts     = ntohl(rsp->rx_mc_pkts);
+-	ncs->hnc_rx_bc_pkts     = ntohl(rsp->rx_bc_pkts);
+-	ncs->hnc_tx_uc_pkts     = ntohl(rsp->tx_uc_pkts);
+-	ncs->hnc_tx_mc_pkts     = ntohl(rsp->tx_mc_pkts);
+-	ncs->hnc_tx_bc_pkts     = ntohl(rsp->tx_bc_pkts);
++	ncs->hnc_cnt            = be64_to_cpu(rsp->cnt);
++	ncs->hnc_rx_bytes       = be64_to_cpu(rsp->rx_bytes);
++	ncs->hnc_tx_bytes       = be64_to_cpu(rsp->tx_bytes);
++	ncs->hnc_rx_uc_pkts     = be64_to_cpu(rsp->rx_uc_pkts);
++	ncs->hnc_rx_mc_pkts     = be64_to_cpu(rsp->rx_mc_pkts);
++	ncs->hnc_rx_bc_pkts     = be64_to_cpu(rsp->rx_bc_pkts);
++	ncs->hnc_tx_uc_pkts     = be64_to_cpu(rsp->tx_uc_pkts);
++	ncs->hnc_tx_mc_pkts     = be64_to_cpu(rsp->tx_mc_pkts);
++	ncs->hnc_tx_bc_pkts     = be64_to_cpu(rsp->tx_bc_pkts);
+ 	ncs->hnc_fcs_err        = ntohl(rsp->fcs_err);
+ 	ncs->hnc_align_err      = ntohl(rsp->align_err);
+ 	ncs->hnc_false_carrier  = ntohl(rsp->false_carrier);
+@@ -964,7 +963,7 @@ static int ncsi_rsp_handler_gcps(struct ncsi_request *nr)
+ 	ncs->hnc_tx_1023_frames = ntohl(rsp->tx_1023_frames);
+ 	ncs->hnc_tx_1522_frames = ntohl(rsp->tx_1522_frames);
+ 	ncs->hnc_tx_9022_frames = ntohl(rsp->tx_9022_frames);
+-	ncs->hnc_rx_valid_bytes = ntohl(rsp->rx_valid_bytes);
++	ncs->hnc_rx_valid_bytes = be64_to_cpu(rsp->rx_valid_bytes);
+ 	ncs->hnc_rx_runt_pkts   = ntohl(rsp->rx_runt_pkts);
+ 	ncs->hnc_rx_jabber_pkts = ntohl(rsp->rx_jabber_pkts);
+ 
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index 4085c436e30628..02f10a46fab7c8 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -248,7 +248,7 @@ static noinline bool
+ nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
+ 		      const struct nf_conn *ignored_ct)
+ {
+-	static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT;
++	static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST;
+ 	const struct nf_conntrack_tuple_hash *thash;
+ 	const struct nf_conntrack_zone *zone;
+ 	struct nf_conn *ct;
+@@ -287,8 +287,14 @@ nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
+ 	zone = nf_ct_zone(ignored_ct);
+ 
+ 	thash = nf_conntrack_find_get(net, zone, tuple);
+-	if (unlikely(!thash)) /* clashing entry went away */
+-		return false;
++	if (unlikely(!thash)) {
++		struct nf_conntrack_tuple reply;
++
++		nf_ct_invert_tuple(&reply, tuple);
++		thash = nf_conntrack_find_get(net, zone, &reply);
++		if (!thash) /* clashing entry went away */
++			return false;
++	}
+ 
+ 	ct = nf_ct_tuplehash_to_ctrack(thash);
+ 
+diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
+index 9b2d7463d3d326..df0798da2329b9 100644
+--- a/net/netfilter/nft_quota.c
++++ b/net/netfilter/nft_quota.c
+@@ -19,10 +19,16 @@ struct nft_quota {
+ };
+ 
+ static inline bool nft_overquota(struct nft_quota *priv,
+-				 const struct sk_buff *skb)
++				 const struct sk_buff *skb,
++				 bool *report)
+ {
+-	return atomic64_add_return(skb->len, priv->consumed) >=
+-	       atomic64_read(&priv->quota);
++	u64 consumed = atomic64_add_return(skb->len, priv->consumed);
++	u64 quota = atomic64_read(&priv->quota);
++
++	if (report)
++		*report = consumed >= quota;
++
++	return consumed > quota;
+ }
+ 
+ static inline bool nft_quota_invert(struct nft_quota *priv)
+@@ -34,7 +40,7 @@ static inline void nft_quota_do_eval(struct nft_quota *priv,
+ 				     struct nft_regs *regs,
+ 				     const struct nft_pktinfo *pkt)
+ {
+-	if (nft_overquota(priv, pkt->skb) ^ nft_quota_invert(priv))
++	if (nft_overquota(priv, pkt->skb, NULL) ^ nft_quota_invert(priv))
+ 		regs->verdict.code = NFT_BREAK;
+ }
+ 
+@@ -51,13 +57,13 @@ static void nft_quota_obj_eval(struct nft_object *obj,
+ 			       const struct nft_pktinfo *pkt)
+ {
+ 	struct nft_quota *priv = nft_obj_data(obj);
+-	bool overquota;
++	bool overquota, report;
+ 
+-	overquota = nft_overquota(priv, pkt->skb);
++	overquota = nft_overquota(priv, pkt->skb, &report);
+ 	if (overquota ^ nft_quota_invert(priv))
+ 		regs->verdict.code = NFT_BREAK;
+ 
+-	if (overquota &&
++	if (report &&
+ 	    !test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
+ 		nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
+ 			       NFT_MSG_NEWOBJ, 0, nft_pf(pkt), 0, GFP_ATOMIC);
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 7be342b495f5f7..0529e4ef752070 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -683,6 +683,30 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f,
+ 	return 0;
+ }
+ 
++
++/**
++ * lt_calculate_size() - Get storage size for lookup table with overflow check
++ * @groups:	Amount of bit groups
++ * @bb:		Number of bits grouped together in lookup table buckets
++ * @bsize:	Size of each bucket in lookup table, in longs
++ *
++ * Return: allocation size including alignment overhead, negative on overflow
++ */
++static ssize_t lt_calculate_size(unsigned int groups, unsigned int bb,
++				 unsigned int bsize)
++{
++	ssize_t ret = groups * NFT_PIPAPO_BUCKETS(bb) * sizeof(long);
++
++	if (check_mul_overflow(ret, bsize, &ret))
++		return -1;
++	if (check_add_overflow(ret, NFT_PIPAPO_ALIGN_HEADROOM, &ret))
++		return -1;
++	if (ret > INT_MAX)
++		return -1;
++
++	return ret;
++}
++
+ /**
+  * pipapo_resize() - Resize lookup or mapping table, or both
+  * @f:		Field containing lookup and mapping tables
+@@ -701,6 +725,7 @@ static int pipapo_resize(struct nft_pipapo_field *f,
+ 	long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
+ 	unsigned int new_bucket_size, copy;
+ 	int group, bucket, err;
++	ssize_t lt_size;
+ 
+ 	if (rules >= NFT_PIPAPO_RULE0_MAX)
+ 		return -ENOSPC;
+@@ -719,10 +744,11 @@ static int pipapo_resize(struct nft_pipapo_field *f,
+ 	else
+ 		copy = new_bucket_size;
+ 
+-	new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
+-			  new_bucket_size * sizeof(*new_lt) +
+-			  NFT_PIPAPO_ALIGN_HEADROOM,
+-			  GFP_KERNEL);
++	lt_size = lt_calculate_size(f->groups, f->bb, new_bucket_size);
++	if (lt_size < 0)
++		return -ENOMEM;
++
++	new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
+ 	if (!new_lt)
+ 		return -ENOMEM;
+ 
+@@ -907,7 +933,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
+ {
+ 	unsigned int groups, bb;
+ 	unsigned long *new_lt;
+-	size_t lt_size;
++	ssize_t lt_size;
+ 
+ 	lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
+ 		  sizeof(*f->lt);
+@@ -917,15 +943,17 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
+ 		groups = f->groups * 2;
+ 		bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
+ 
+-		lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
+-			  sizeof(*f->lt);
++		lt_size = lt_calculate_size(groups, bb, f->bsize);
++		if (lt_size < 0)
++			return;
+ 	} else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
+ 		   lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
+ 		groups = f->groups / 2;
+ 		bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
+ 
+-		lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
+-			  sizeof(*f->lt);
++		lt_size = lt_calculate_size(groups, bb, f->bsize);
++		if (lt_size < 0)
++			return;
+ 
+ 		/* Don't increase group width if the resulting lookup table size
+ 		 * would exceed the upper size threshold for a "small" set.
+@@ -936,7 +964,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
+ 		return;
+ 	}
+ 
+-	new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
++	new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
+ 	if (!new_lt)
+ 		return;
+ 
+@@ -1451,13 +1479,15 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+ 
+ 	for (i = 0; i < old->field_count; i++) {
+ 		unsigned long *new_lt;
++		ssize_t lt_size;
+ 
+ 		memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
+ 
+-		new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
+-				  src->bsize * sizeof(*dst->lt) +
+-				  NFT_PIPAPO_ALIGN_HEADROOM,
+-				  GFP_KERNEL_ACCOUNT);
++		lt_size = lt_calculate_size(src->groups, src->bb, src->bsize);
++		if (lt_size < 0)
++			goto out_lt;
++
++		new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
+ 		if (!new_lt)
+ 			goto out_lt;
+ 
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index c15db28c5ebc43..be7c16c79f711e 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -1113,6 +1113,25 @@ bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
+ 	return true;
+ }
+ 
++/**
++ * pipapo_resmap_init_avx2() - Initialise result map before first use
++ * @m:		Matching data, including mapping table
++ * @res_map:	Result map
++ *
++ * Like pipapo_resmap_init() but do not set start map bits covered by the first field.
++ */
++static inline void pipapo_resmap_init_avx2(const struct nft_pipapo_match *m, unsigned long *res_map)
++{
++	const struct nft_pipapo_field *f = m->f;
++	int i;
++
++	/* Starting map doesn't need to be set to all-ones for this implementation,
++	 * but we do need to zero the remaining bits, if any.
++	 */
++	for (i = f->bsize; i < m->bsize_max; i++)
++		res_map[i] = 0ul;
++}
++
+ /**
+  * nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation
+  * @net:	Network namespace
+@@ -1171,7 +1190,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 	res  = scratch->map + (map_index ? m->bsize_max : 0);
+ 	fill = scratch->map + (map_index ? 0 : m->bsize_max);
+ 
+-	/* Starting map doesn't need to be set for this implementation */
++	pipapo_resmap_init_avx2(m, res);
+ 
+ 	nft_pipapo_avx2_prepare();
+ 
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index 0d99786c322e88..e18d322290fb09 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -624,10 +624,10 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
+ 		struct geneve_opt *opt;
+ 		int offset = 0;
+ 
+-		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
+-		if (!inner)
+-			goto failure;
+ 		while (opts->len > offset) {
++			inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
++			if (!inner)
++				goto failure;
+ 			opt = (struct geneve_opt *)(opts->u.data + offset);
+ 			if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
+ 					 opt->opt_class) ||
+@@ -637,8 +637,8 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
+ 				    opt->length * 4, opt->opt_data))
+ 				goto inner_failure;
+ 			offset += sizeof(*opt) + opt->length * 4;
++			nla_nest_end(skb, inner);
+ 		}
+-		nla_nest_end(skb, inner);
+ 	}
+ 	nla_nest_end(skb, nest);
+ 	return 0;
+diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
+index 30e99464171b7b..93f064306901c0 100644
+--- a/net/netfilter/xt_TCPOPTSTRIP.c
++++ b/net/netfilter/xt_TCPOPTSTRIP.c
+@@ -91,7 +91,7 @@ tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+ 	return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb));
+ }
+ 
+-#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ static unsigned int
+ tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+@@ -119,7 +119,7 @@ static struct xt_target tcpoptstrip_tg_reg[] __read_mostly = {
+ 		.targetsize = sizeof(struct xt_tcpoptstrip_target_info),
+ 		.me         = THIS_MODULE,
+ 	},
+-#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ 	{
+ 		.name       = "TCPOPTSTRIP",
+ 		.family     = NFPROTO_IPV6,
+diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
+index 65b965ca40ea7e..59b9d04400cac2 100644
+--- a/net/netfilter/xt_mark.c
++++ b/net/netfilter/xt_mark.c
+@@ -48,7 +48,7 @@ static struct xt_target mark_tg_reg[] __read_mostly = {
+ 		.targetsize     = sizeof(struct xt_mark_tginfo2),
+ 		.me             = THIS_MODULE,
+ 	},
+-#if IS_ENABLED(CONFIG_IP_NF_ARPTABLES)
++#if IS_ENABLED(CONFIG_IP_NF_ARPTABLES) || IS_ENABLED(CONFIG_NFT_COMPAT_ARP)
+ 	{
+ 		.name           = "MARK",
+ 		.revision       = 2,
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index cd9160bbc91974..33b77084a4e5f3 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -1165,6 +1165,11 @@ int netlbl_conn_setattr(struct sock *sk,
+ 		break;
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	case AF_INET6:
++		if (sk->sk_family != AF_INET6) {
++			ret_val = -EAFNOSUPPORT;
++			goto conn_setattr_return;
++		}
++
+ 		addr6 = (struct sockaddr_in6 *)addr;
+ 		entry = netlbl_domhsh_getentry_af6(secattr->domain,
+ 						   &addr6->sin6_addr);
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
+index 8a848ce72e2910..b80bd3a9077397 100644
+--- a/net/openvswitch/flow.c
++++ b/net/openvswitch/flow.c
+@@ -788,7 +788,7 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
+ 			memset(&key->ipv4, 0, sizeof(key->ipv4));
+ 		}
+ 	} else if (eth_p_mpls(key->eth.type)) {
+-		u8 label_count = 1;
++		size_t label_count = 1;
+ 
+ 		memset(&key->mpls, 0, sizeof(key->mpls));
+ 		skb_set_inner_network_header(skb, skb->mac_len);
+diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
+index 2c069f0181c62b..037f764822b965 100644
+--- a/net/sched/sch_ets.c
++++ b/net/sched/sch_ets.c
+@@ -661,7 +661,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
+ 	for (i = q->nbands; i < oldbands; i++) {
+ 		if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
+ 			list_del_init(&q->classes[i].alist);
+-		qdisc_tree_flush_backlog(q->classes[i].qdisc);
++		qdisc_purge_queue(q->classes[i].qdisc);
+ 	}
+ 	WRITE_ONCE(q->nstrict, nstrict);
+ 	memcpy(q->prio2band, priomap, sizeof(priomap));
+diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
+index cc30f7a32f1a78..9e2b9a490db23d 100644
+--- a/net/sched/sch_prio.c
++++ b/net/sched/sch_prio.c
+@@ -211,7 +211,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+ 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
+ 
+ 	for (i = q->bands; i < oldbands; i++)
+-		qdisc_tree_flush_backlog(q->queues[i]);
++		qdisc_purge_queue(q->queues[i]);
+ 
+ 	for (i = oldbands; i < q->bands; i++) {
+ 		q->queues[i] = queues[i];
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index b5f096588fae6e..0f0701ed397e90 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -283,7 +283,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
+ 	q->userbits = userbits;
+ 	q->limit = ctl->limit;
+ 	if (child) {
+-		qdisc_tree_flush_backlog(q->qdisc);
++		qdisc_purge_queue(q->qdisc);
+ 		old_child = q->qdisc;
+ 		q->qdisc = child;
+ 	}
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 58b42dcf8f2013..a903b3c4680500 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -310,7 +310,10 @@ static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
+ 		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
+ 		x = q->tail->next;
+ 		slot = &q->slots[x];
+-		q->tail->next = slot->next;
++		if (slot->next == x)
++			q->tail = NULL; /* no more active slots */
++		else
++			q->tail->next = slot->next;
+ 		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
+ 		goto drop;
+ 	}
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index dc26b22d53c734..4c977f049670a6 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -452,7 +452,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	sch_tree_lock(sch);
+ 	if (child) {
+-		qdisc_tree_flush_backlog(q->qdisc);
++		qdisc_purge_queue(q->qdisc);
+ 		old = q->qdisc;
+ 		q->qdisc = child;
+ 	}
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index aca8bdf65d729f..ca6172822b68ae 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -406,12 +406,12 @@ static void svc_rdma_xprt_done(struct rpcrdma_notification *rn)
+  */
+ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
+ {
++	unsigned int ctxts, rq_depth, maxpayload;
+ 	struct svcxprt_rdma *listen_rdma;
+ 	struct svcxprt_rdma *newxprt = NULL;
+ 	struct rdma_conn_param conn_param;
+ 	struct rpcrdma_connect_private pmsg;
+ 	struct ib_qp_init_attr qp_attr;
+-	unsigned int ctxts, rq_depth;
+ 	struct ib_device *dev;
+ 	int ret = 0;
+ 	RPC_IFDEBUG(struct sockaddr *sap);
+@@ -462,12 +462,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
+ 		newxprt->sc_max_bc_requests = 2;
+ 	}
+ 
+-	/* Arbitrarily estimate the number of rw_ctxs needed for
+-	 * this transport. This is enough rw_ctxs to make forward
+-	 * progress even if the client is using one rkey per page
+-	 * in each Read chunk.
++	/* Arbitrary estimate of the needed number of rdma_rw contexts.
+ 	 */
+-	ctxts = 3 * RPCSVC_MAXPAGES;
++	maxpayload = min(xprt->xpt_server->sv_max_payload,
++			 RPCSVC_MAXPAYLOAD_RDMA);
++	ctxts = newxprt->sc_max_requests * 3 *
++		rdma_rw_mr_factor(dev, newxprt->sc_port_num,
++				  maxpayload >> PAGE_SHIFT);
++
+ 	newxprt->sc_sq_depth = rq_depth + ctxts;
+ 	if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr)
+ 		newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index 8584893b478510..79f91b6ca8c847 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -818,7 +818,11 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ 	}
+ 
+ 	/* Get net to avoid freed tipc_crypto when delete namespace */
+-	get_net(aead->crypto->net);
++	if (!maybe_get_net(aead->crypto->net)) {
++		tipc_bearer_put(b);
++		rc = -ENODEV;
++		goto exit;
++	}
+ 
+ 	/* Now, do encrypt */
+ 	rc = crypto_aead_encrypt(req);
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 7bcc9b4408a2c7..8fb5925f2389e9 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -908,6 +908,13 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
+ 					    &msg_redir, send, flags);
+ 		lock_sock(sk);
+ 		if (err < 0) {
++			/* Regardless of whether the data represented by
++			 * msg_redir is sent successfully, we have already
++			 * uncharged it via sk_msg_return_zero(). The
++			 * msg->sg.size represents the remaining unprocessed
++			 * data, which needs to be uncharged here.
++			 */
++			sk_mem_uncharge(sk, msg->sg.size);
+ 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
+ 			msg->sg.size = 0;
+ 		}
+@@ -1120,9 +1127,13 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ 					num_async++;
+ 				else if (ret == -ENOMEM)
+ 					goto wait_for_memory;
+-				else if (ctx->open_rec && ret == -ENOSPC)
++				else if (ctx->open_rec && ret == -ENOSPC) {
++					if (msg_pl->cork_bytes) {
++						ret = 0;
++						goto send_end;
++					}
+ 					goto rollback_iter;
+-				else if (ret != -EAGAIN)
++				} else if (ret != -EAGAIN)
+ 					goto send_end;
+ 			}
+ 			continue;
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 7f7de6d8809655..2c9b1011cdcc80 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -441,18 +441,20 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
+ static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+ 					u32 len)
+ {
+-	if (vvs->rx_bytes + len > vvs->buf_alloc)
++	if (vvs->buf_used + len > vvs->buf_alloc)
+ 		return false;
+ 
+ 	vvs->rx_bytes += len;
++	vvs->buf_used += len;
+ 	return true;
+ }
+ 
+ static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
+-					u32 len)
++					u32 bytes_read, u32 bytes_dequeued)
+ {
+-	vvs->rx_bytes -= len;
+-	vvs->fwd_cnt += len;
++	vvs->rx_bytes -= bytes_read;
++	vvs->buf_used -= bytes_dequeued;
++	vvs->fwd_cnt += bytes_dequeued;
+ }
+ 
+ void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
+@@ -581,11 +583,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ 				   size_t len)
+ {
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+-	size_t bytes, total = 0;
+ 	struct sk_buff *skb;
+ 	u32 fwd_cnt_delta;
+ 	bool low_rx_bytes;
+ 	int err = -EFAULT;
++	size_t total = 0;
+ 	u32 free_space;
+ 
+ 	spin_lock_bh(&vvs->rx_lock);
+@@ -597,6 +599,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ 	}
+ 
+ 	while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
++		size_t bytes, dequeued = 0;
++
+ 		skb = skb_peek(&vvs->rx_queue);
+ 
+ 		bytes = min_t(size_t, len - total,
+@@ -620,12 +624,12 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ 		VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes;
+ 
+ 		if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) {
+-			u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
+-
+-			virtio_transport_dec_rx_pkt(vvs, pkt_len);
++			dequeued = le32_to_cpu(virtio_vsock_hdr(skb)->len);
+ 			__skb_unlink(skb, &vvs->rx_queue);
+ 			consume_skb(skb);
+ 		}
++
++		virtio_transport_dec_rx_pkt(vvs, bytes, dequeued);
+ 	}
+ 
+ 	fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt;
+@@ -781,7 +785,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ 				msg->msg_flags |= MSG_EOR;
+ 		}
+ 
+-		virtio_transport_dec_rx_pkt(vvs, pkt_len);
++		virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len);
+ 		kfree_skb(skb);
+ 	}
+ 
+@@ -1735,6 +1739,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
+ 	struct sock *sk = sk_vsock(vsk);
+ 	struct virtio_vsock_hdr *hdr;
+ 	struct sk_buff *skb;
++	u32 pkt_len;
+ 	int off = 0;
+ 	int err;
+ 
+@@ -1752,7 +1757,8 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
+ 	if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
+ 		vvs->msg_count--;
+ 
+-	virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len));
++	pkt_len = le32_to_cpu(hdr->len);
++	virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len);
+ 	spin_unlock_bh(&vvs->rx_lock);
+ 
+ 	virtio_transport_send_credit_update(vsk);
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index f0dd1f448d4d42..d80ab1725f28dd 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -3213,6 +3213,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ 	const u8 *ie;
+ 	size_t ielen;
+ 	u64 tsf;
++	size_t s1g_optional_len;
+ 
+ 	if (WARN_ON(!mgmt))
+ 		return NULL;
+@@ -3227,12 +3228,11 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ 
+ 	if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ 		ext = (void *) mgmt;
+-		if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+-			min_hdr_len = offsetof(struct ieee80211_ext,
+-					       u.s1g_short_beacon.variable);
+-		else
+-			min_hdr_len = offsetof(struct ieee80211_ext,
+-					       u.s1g_beacon.variable);
++		s1g_optional_len =
++			ieee80211_s1g_optional_len(ext->frame_control);
++		min_hdr_len =
++			offsetof(struct ieee80211_ext, u.s1g_beacon.variable) +
++			s1g_optional_len;
+ 	} else {
+ 		/* same for beacons */
+ 		min_hdr_len = offsetof(struct ieee80211_mgmt,
+@@ -3248,11 +3248,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ 		const struct ieee80211_s1g_bcn_compat_ie *compat;
+ 		const struct element *elem;
+ 
+-		if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+-			ie = ext->u.s1g_short_beacon.variable;
+-		else
+-			ie = ext->u.s1g_beacon.variable;
+-
++		ie = ext->u.s1g_beacon.variable + s1g_optional_len;
+ 		elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen);
+ 		if (!elem)
+ 			return NULL;
+diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
+index b33c4591e09a4f..32ad8f3fc81e83 100644
+--- a/net/xfrm/xfrm_device.c
++++ b/net/xfrm/xfrm_device.c
+@@ -373,7 +373,6 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
+ 
+ 	xdo->dev = dev;
+ 	netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
+-	xdo->real_dev = dev;
+ 	xdo->type = XFRM_DEV_OFFLOAD_PACKET;
+ 	switch (dir) {
+ 	case XFRM_POLICY_IN:
+@@ -395,7 +394,6 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
+ 	err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack);
+ 	if (err) {
+ 		xdo->dev = NULL;
+-		xdo->real_dev = NULL;
+ 		xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+ 		xdo->dir = 0;
+ 		netdev_put(dev, &xdo->dev_tracker);
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index abd725386cb604..7a298058fc16cf 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1487,7 +1487,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 			xso->type = XFRM_DEV_OFFLOAD_PACKET;
+ 			xso->dir = xdo->dir;
+ 			xso->dev = xdo->dev;
+-			xso->real_dev = xdo->real_dev;
+ 			xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
+ 			netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC);
+ 			error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
+@@ -1495,7 +1494,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 				xso->dir = 0;
+ 				netdev_put(xso->dev, &xso->dev_tracker);
+ 				xso->dev = NULL;
+-				xso->real_dev = NULL;
+ 				xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+ 				x->km.state = XFRM_STATE_DEAD;
+ 				to_put = x;
+diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
+index 87a71fd40c3cad..f62204fe563f58 100644
+--- a/rust/kernel/alloc/kvec.rs
++++ b/rust/kernel/alloc/kvec.rs
+@@ -196,6 +196,9 @@ pub fn len(&self) -> usize {
+     #[inline]
+     pub unsafe fn set_len(&mut self, new_len: usize) {
+         debug_assert!(new_len <= self.capacity());
++
++        // INVARIANT: By the safety requirements of this method `new_len` represents the exact
++        // number of elements stored within `self`.
+         self.len = new_len;
+     }
+ 
+diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
+index 3222c1070444fa..ef12c8f929eda3 100644
+--- a/scripts/gcc-plugins/gcc-common.h
++++ b/scripts/gcc-plugins/gcc-common.h
+@@ -123,6 +123,38 @@ static inline tree build_const_char_string(int len, const char *str)
+ 	return cstr;
+ }
+ 
++static inline void __add_type_attr(tree type, const char *attr, tree args)
++{
++	tree oldattr;
++
++	if (type == NULL_TREE)
++		return;
++	oldattr = lookup_attribute(attr, TYPE_ATTRIBUTES(type));
++	if (oldattr != NULL_TREE) {
++		gcc_assert(TREE_VALUE(oldattr) == args || TREE_VALUE(TREE_VALUE(oldattr)) == TREE_VALUE(args));
++		return;
++	}
++
++	TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
++	TYPE_ATTRIBUTES(type) = tree_cons(get_identifier(attr), args, TYPE_ATTRIBUTES(type));
++}
++
++static inline void add_type_attr(tree type, const char *attr, tree args)
++{
++	tree main_variant = TYPE_MAIN_VARIANT(type);
++
++	__add_type_attr(TYPE_CANONICAL(type), attr, args);
++	__add_type_attr(TYPE_CANONICAL(main_variant), attr, args);
++	__add_type_attr(main_variant, attr, args);
++
++	for (type = TYPE_NEXT_VARIANT(main_variant); type; type = TYPE_NEXT_VARIANT(type)) {
++		if (!lookup_attribute(attr, TYPE_ATTRIBUTES(type)))
++			TYPE_ATTRIBUTES(type) = TYPE_ATTRIBUTES(main_variant);
++
++		__add_type_attr(TYPE_CANONICAL(type), attr, args);
++	}
++}
++
+ #define PASS_INFO(NAME, REF, ID, POS)		\
+ struct register_pass_info NAME##_pass_info = {	\
+ 	.pass = make_##NAME##_pass(),		\
+diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
+index 5694df3da2e95b..ff65a4f87f240a 100644
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -73,6 +73,9 @@ static tree handle_randomize_layout_attr(tree *node, tree name, tree args, int f
+ 
+ 	if (TYPE_P(*node)) {
+ 		type = *node;
++	} else if (TREE_CODE(*node) == FIELD_DECL) {
++		*no_add_attrs = false;
++		return NULL_TREE;
+ 	} else {
+ 		gcc_assert(TREE_CODE(*node) == TYPE_DECL);
+ 		type = TREE_TYPE(*node);
+@@ -344,35 +347,18 @@ static int relayout_struct(tree type)
+ 
+ 	shuffle(type, (tree *)newtree, shuffle_length);
+ 
+-	/*
+-	 * set up a bogus anonymous struct field designed to error out on unnamed struct initializers
+-	 * as gcc provides no other way to detect such code
+-	 */
+-	list = make_node(FIELD_DECL);
+-	TREE_CHAIN(list) = newtree[0];
+-	TREE_TYPE(list) = void_type_node;
+-	DECL_SIZE(list) = bitsize_zero_node;
+-	DECL_NONADDRESSABLE_P(list) = 1;
+-	DECL_FIELD_BIT_OFFSET(list) = bitsize_zero_node;
+-	DECL_SIZE_UNIT(list) = size_zero_node;
+-	DECL_FIELD_OFFSET(list) = size_zero_node;
+-	DECL_CONTEXT(list) = type;
+-	// to satisfy the constify plugin
+-	TREE_READONLY(list) = 1;
+-
+ 	for (i = 0; i < num_fields - 1; i++)
+ 		TREE_CHAIN(newtree[i]) = newtree[i+1];
+ 	TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE;
+ 
++	add_type_attr(type, "randomize_performed", NULL_TREE);
++	add_type_attr(type, "designated_init", NULL_TREE);
++	if (has_flexarray)
++		add_type_attr(type, "has_flexarray", NULL_TREE);
++
+ 	main_variant = TYPE_MAIN_VARIANT(type);
+-	for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant)) {
+-		TYPE_FIELDS(variant) = list;
+-		TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant));
+-		TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant));
+-		TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("designated_init"), NULL_TREE, TYPE_ATTRIBUTES(variant));
+-		if (has_flexarray)
+-			TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("has_flexarray"), NULL_TREE, TYPE_ATTRIBUTES(type));
+-	}
++	for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant))
++		TYPE_FIELDS(variant) = newtree[0];
+ 
+ 	/*
+ 	 * force a re-layout of the main variant
+@@ -440,10 +426,8 @@ static void randomize_type(tree type)
+ 	if (lookup_attribute("randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))) || is_pure_ops_struct(type))
+ 		relayout_struct(type);
+ 
+-	for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) {
+-		TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
+-		TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("randomize_considered"), NULL_TREE, TYPE_ATTRIBUTES(type));
+-	}
++	add_type_attr(type, "randomize_considered", NULL_TREE);
++
+ #ifdef __DEBUG_PLUGIN
+ 	fprintf(stderr, "Marking randomize_considered on struct %s\n", ORIG_TYPE_NAME(type));
+ #ifdef __DEBUG_VERBOSE
+diff --git a/sound/core/seq_device.c b/sound/core/seq_device.c
+index 4492be5d2317c7..bac9f860373425 100644
+--- a/sound/core/seq_device.c
++++ b/sound/core/seq_device.c
+@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
+ static int snd_seq_bus_match(struct device *dev, const struct device_driver *drv)
+ {
+ 	struct snd_seq_device *sdev = to_seq_dev(dev);
+-	struct snd_seq_driver *sdrv = to_seq_drv(drv);
++	const struct snd_seq_driver *sdrv = to_seq_drv(drv);
+ 
+ 	return strcmp(sdrv->id, sdev->id) == 0 &&
+ 		sdrv->argsize == sdev->argsize;
+diff --git a/sound/hda/hda_bus_type.c b/sound/hda/hda_bus_type.c
+index 7545ace7b0ee4b..eb72a7af2e56e8 100644
+--- a/sound/hda/hda_bus_type.c
++++ b/sound/hda/hda_bus_type.c
+@@ -21,7 +21,7 @@ MODULE_LICENSE("GPL");
+  * driver id_table and returns the matching device id entry.
+  */
+ const struct hda_device_id *
+-hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv)
++hdac_get_device_id(struct hdac_device *hdev, const struct hdac_driver *drv)
+ {
+ 	if (drv->id_table) {
+ 		const struct hda_device_id *id  = drv->id_table;
+@@ -38,7 +38,7 @@ hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv)
+ }
+ EXPORT_SYMBOL_GPL(hdac_get_device_id);
+ 
+-static int hdac_codec_match(struct hdac_device *dev, struct hdac_driver *drv)
++static int hdac_codec_match(struct hdac_device *dev, const struct hdac_driver *drv)
+ {
+ 	if (hdac_get_device_id(dev, drv))
+ 		return 1;
+@@ -49,7 +49,7 @@ static int hdac_codec_match(struct hdac_device *dev, struct hdac_driver *drv)
+ static int hda_bus_match(struct device *dev, const struct device_driver *drv)
+ {
+ 	struct hdac_device *hdev = dev_to_hdac_dev(dev);
+-	struct hdac_driver *hdrv = drv_to_hdac_driver(drv);
++	const struct hdac_driver *hdrv = drv_to_hdac_driver(drv);
+ 
+ 	if (hdev->type != hdrv->type)
+ 		return 0;
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index b7ca2a83fbb086..90633970b59f72 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -18,10 +18,10 @@
+ /*
+  * find a matching codec id
+  */
+-static int hda_codec_match(struct hdac_device *dev, struct hdac_driver *drv)
++static int hda_codec_match(struct hdac_device *dev, const struct hdac_driver *drv)
+ {
+ 	struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+-	struct hda_codec_driver *driver =
++	const struct hda_codec_driver *driver =
+ 		container_of(drv, struct hda_codec_driver, core);
+ 	const struct hda_device_id *list;
+ 	/* check probe_id instead of vendor_id if set */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index dce56809120068..e714e91c271217 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7616,6 +7616,24 @@ static void alc245_fixup_hp_spectre_x360_16_aa0xxx(struct hda_codec *codec,
+ 	alc245_fixup_hp_gpio_led(codec, fix, action);
+ }
+ 
++static void alc245_fixup_hp_zbook_firefly_g12a(struct hda_codec *codec,
++					  const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++	static const hda_nid_t conn[] = { 0x02 };
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		spec->gen.auto_mute_via_amp = 1;
++		snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
++		break;
++	}
++
++	cs35l41_fixup_i2c_two(codec, fix, action);
++	alc245_fixup_hp_mute_led_coefbit(codec, fix, action);
++	alc285_fixup_hp_coef_micmute_led(codec, fix, action);
++}
++
+ /*
+  * ALC287 PCM hooks
+  */
+@@ -7963,6 +7981,7 @@ enum {
+ 	ALC256_FIXUP_HEADPHONE_AMP_VOL,
+ 	ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX,
+ 	ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX,
++	ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A,
+ 	ALC285_FIXUP_ASUS_GA403U,
+ 	ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC,
+ 	ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1,
+@@ -10251,6 +10270,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc245_fixup_hp_spectre_x360_16_aa0xxx,
+ 	},
++	[ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc245_fixup_hp_zbook_firefly_g12a,
++	},
+ 	[ALC285_FIXUP_ASUS_GA403U] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_asus_ga403u,
+@@ -10770,11 +10793,50 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d85, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d86, "HP Elite X360 14 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d8c, "HP EliteBook 13 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d8d, "HP Elite X360 13 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d8e, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d8f, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d90, "HP EliteBook 16 G12", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8d91, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8d92, "HP ZBook Firefly 16 G12", ALC285_FIXUP_HP_GPIO_LED),
+-	SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
+-	SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
+-	SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d9b, "HP 17 Turbine OmniBook 7 UMA", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8d9c, "HP 17 Turbine OmniBook 7 DIS", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8d9d, "HP 17 Turbine OmniBook X UMA", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8d9e, "HP 17 Turbine OmniBook X DIS", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8d9f, "HP 14 Cadet (x360)", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8da0, "HP 16 Clipper OmniBook 7(X360)", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8da1, "HP 16 Clipper OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8da7, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8da8, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8dec, "HP EliteBook 640 G12", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8dee, "HP EliteBook 660 G12", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8df0, "HP EliteBook 630 G12", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8dfc, "HP EliteBook 645 G12", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8dfe, "HP EliteBook 665 G12", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8e11, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e12, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e13, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e14, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e15, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e16, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e17, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e1b, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e1c, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
++	SND_PCI_QUIRK(0x103c, 0x8e1d, "HP ZBook X Gli 16 G12", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8e2c, "HP EliteBook 16 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8e36, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e37, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e3a, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e3b, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e60, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e61, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8e62, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2),
+diff --git a/sound/soc/apple/mca.c b/sound/soc/apple/mca.c
+index c9e7d40c47cc1c..4a4ec1c09e1329 100644
+--- a/sound/soc/apple/mca.c
++++ b/sound/soc/apple/mca.c
+@@ -464,6 +464,28 @@ static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
+ 	return -EINVAL;
+ }
+ 
++static int mca_fe_startup(struct snd_pcm_substream *substream,
++			  struct snd_soc_dai *dai)
++{
++	struct mca_cluster *cl = mca_dai_to_cluster(dai);
++	unsigned int mask, nchannels;
++
++	if (cl->tdm_slots) {
++		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++			mask = cl->tdm_tx_mask;
++		else
++			mask = cl->tdm_rx_mask;
++
++		nchannels = hweight32(mask);
++	} else {
++		nchannels = 2;
++	}
++
++	return snd_pcm_hw_constraint_minmax(substream->runtime,
++					    SNDRV_PCM_HW_PARAM_CHANNELS,
++					    1, nchannels);
++}
++
+ static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ 			       unsigned int rx_mask, int slots, int slot_width)
+ {
+@@ -680,6 +702,7 @@ static int mca_fe_hw_params(struct snd_pcm_substream *substream,
+ }
+ 
+ static const struct snd_soc_dai_ops mca_fe_ops = {
++	.startup = mca_fe_startup,
+ 	.set_fmt = mca_fe_set_fmt,
+ 	.set_bclk_ratio = mca_set_bclk_ratio,
+ 	.set_tdm_slot = mca_fe_set_tdm_slot,
+diff --git a/sound/soc/codecs/hda.c b/sound/soc/codecs/hda.c
+index ddc00927313cfe..dc7794c9ac44ce 100644
+--- a/sound/soc/codecs/hda.c
++++ b/sound/soc/codecs/hda.c
+@@ -152,7 +152,7 @@ int hda_codec_probe_complete(struct hda_codec *codec)
+ 	ret = snd_hda_codec_build_controls(codec);
+ 	if (ret < 0) {
+ 		dev_err(&hdev->dev, "unable to create controls %d\n", ret);
+-		goto out;
++		return ret;
+ 	}
+ 
+ 	/* Bus suspended codecs as it does not manage their pm */
+@@ -160,7 +160,7 @@ int hda_codec_probe_complete(struct hda_codec *codec)
+ 	/* rpm was forbidden in snd_hda_codec_device_new() */
+ 	snd_hda_codec_set_power_save(codec, 2000);
+ 	snd_hda_codec_register(codec);
+-out:
++
+ 	/* Complement pm_runtime_get_sync(bus) in probe */
+ 	pm_runtime_mark_last_busy(bus->dev);
+ 	pm_runtime_put_autosuspend(bus->dev);
+diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
+index 39a7d39536fe6f..4326555aac032d 100644
+--- a/sound/soc/codecs/tas2764.c
++++ b/sound/soc/codecs/tas2764.c
+@@ -540,7 +540,7 @@ static int tas2764_codec_probe(struct snd_soc_component *component)
+ 	tas2764_reset(tas2764);
+ 
+ 	if (tas2764->irq) {
+-		ret = snd_soc_component_write(tas2764->component, TAS2764_INT_MASK0, 0xff);
++		ret = snd_soc_component_write(tas2764->component, TAS2764_INT_MASK0, 0x00);
+ 		if (ret < 0)
+ 			return ret;
+ 
+diff --git a/sound/soc/intel/avs/debugfs.c b/sound/soc/intel/avs/debugfs.c
+index 1767ded4d98307..c9978fb9c74e2b 100644
+--- a/sound/soc/intel/avs/debugfs.c
++++ b/sound/soc/intel/avs/debugfs.c
+@@ -372,7 +372,10 @@ static ssize_t trace_control_write(struct file *file, const char __user *from, s
+ 		return ret;
+ 
+ 	num_elems = *array;
+-	resource_mask = array[1];
++	if (!num_elems) {
++		ret = -EINVAL;
++		goto free_array;
++	}
+ 
+ 	/*
+ 	 * Disable if just resource mask is provided - no log priority flags.
+@@ -380,6 +383,7 @@ static ssize_t trace_control_write(struct file *file, const char __user *from, s
+ 	 * Enable input format:   mask, prio1, .., prioN
+ 	 * Where 'N' equals number of bits set in the 'mask'.
+ 	 */
++	resource_mask = array[1];
+ 	if (num_elems == 1) {
+ 		ret = disable_logs(adev, resource_mask);
+ 	} else {
+diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c
+index 4fba46e77c470e..eff1d46040da69 100644
+--- a/sound/soc/intel/avs/ipc.c
++++ b/sound/soc/intel/avs/ipc.c
+@@ -169,7 +169,9 @@ static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg
+ 
+ 	dev_crit(adev->dev, "communication severed, rebooting dsp..\n");
+ 
+-	cancel_delayed_work_sync(&ipc->d0ix_work);
++	/* Avoid deadlock as the exception may be the response to SET_D0IX. */
++	if (current_work() != &ipc->d0ix_work.work)
++		cancel_delayed_work_sync(&ipc->d0ix_work);
+ 	ipc->in_d0ix = false;
+ 	/* Re-enabled on recovery completion. */
+ 	pm_runtime_disable(adev->dev);
+diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359.c b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+index 8ebf6c7502aa3d..400cec09c3a3c8 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-mt6359.c
++++ b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+@@ -822,12 +822,12 @@ SND_SOC_DAILINK_DEFS(ETDM1_IN_BE,
+ 
+ SND_SOC_DAILINK_DEFS(ETDM2_IN_BE,
+ 		     DAILINK_COMP_ARRAY(COMP_CPU("ETDM2_IN")),
+-		     DAILINK_COMP_ARRAY(COMP_EMPTY()),
++		     DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ 		     DAILINK_COMP_ARRAY(COMP_EMPTY()));
+ 
+ SND_SOC_DAILINK_DEFS(ETDM1_OUT_BE,
+ 		     DAILINK_COMP_ARRAY(COMP_CPU("ETDM1_OUT")),
+-		     DAILINK_COMP_ARRAY(COMP_EMPTY()),
++		     DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ 		     DAILINK_COMP_ARRAY(COMP_EMPTY()));
+ 
+ SND_SOC_DAILINK_DEFS(ETDM2_OUT_BE,
+diff --git a/sound/soc/sof/amd/pci-acp70.c b/sound/soc/sof/amd/pci-acp70.c
+index a5d8b6a95a2226..fe2ad0395f5d36 100644
+--- a/sound/soc/sof/amd/pci-acp70.c
++++ b/sound/soc/sof/amd/pci-acp70.c
+@@ -34,6 +34,7 @@ static const struct sof_amd_acp_desc acp70_chip_info = {
+ 	.ext_intr_cntl = ACP70_EXTERNAL_INTR_CNTL,
+ 	.ext_intr_stat	= ACP70_EXT_INTR_STAT,
+ 	.ext_intr_stat1	= ACP70_EXT_INTR_STAT1,
++	.acp_error_stat = ACP70_ERROR_STATUS,
+ 	.dsp_intr_base	= ACP70_DSP_SW_INTR_BASE,
+ 	.acp_sw0_i2s_err_reason = ACP7X_SW0_I2S_ERROR_REASON,
+ 	.sram_pte_offset = ACP70_SRAM_PTE_OFFSET,
+diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
+index 2fe4969cdc3b47..9db2cdb3212822 100644
+--- a/sound/soc/sof/ipc4-pcm.c
++++ b/sound/soc/sof/ipc4-pcm.c
+@@ -780,7 +780,8 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm
+ 
+ 		/* allocate memory for max number of pipeline IDs */
+ 		pipeline_list->pipelines = kcalloc(ipc4_data->max_num_pipelines,
+-						   sizeof(struct snd_sof_widget *), GFP_KERNEL);
++						   sizeof(*pipeline_list->pipelines),
++						   GFP_KERNEL);
+ 		if (!pipeline_list->pipelines) {
+ 			sof_ipc4_pcm_free(sdev, spcm);
+ 			return -ENOMEM;
+diff --git a/sound/soc/ti/omap-hdmi.c b/sound/soc/ti/omap-hdmi.c
+index cf43ac19c4a6d0..55e7cb96858fca 100644
+--- a/sound/soc/ti/omap-hdmi.c
++++ b/sound/soc/ti/omap-hdmi.c
+@@ -361,17 +361,20 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
+ 	if (!card->dai_link)
+ 		return -ENOMEM;
+ 
+-	compnent = devm_kzalloc(dev, sizeof(*compnent), GFP_KERNEL);
++	compnent = devm_kzalloc(dev, 2 * sizeof(*compnent), GFP_KERNEL);
+ 	if (!compnent)
+ 		return -ENOMEM;
+-	card->dai_link->cpus		= compnent;
++	card->dai_link->cpus		= &compnent[0];
+ 	card->dai_link->num_cpus	= 1;
+ 	card->dai_link->codecs		= &snd_soc_dummy_dlc;
+ 	card->dai_link->num_codecs	= 1;
++	card->dai_link->platforms	= &compnent[1];
++	card->dai_link->num_platforms	= 1;
+ 
+ 	card->dai_link->name = card->name;
+ 	card->dai_link->stream_name = card->name;
+ 	card->dai_link->cpus->dai_name = dev_name(ad->dssdev);
++	card->dai_link->platforms->name = dev_name(ad->dssdev);
+ 	card->num_links = 1;
+ 	card->dev = dev;
+ 
+diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
+index 4727043fd74580..77f06da93151e8 100644
+--- a/sound/usb/implicit.c
++++ b/sound/usb/implicit.c
+@@ -57,6 +57,7 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
+ 	IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */
+ 	IMPLICIT_FB_FIXED_DEV(0x0499, 0x172f, 0x81, 2), /* Steinberg UR22C */
+ 	IMPLICIT_FB_FIXED_DEV(0x0d9a, 0x00df, 0x81, 2), /* RTX6001 */
++	IMPLICIT_FB_FIXED_DEV(0x19f7, 0x000a, 0x84, 3), /* RODE AI-1 */
+ 	IMPLICIT_FB_FIXED_DEV(0x22f0, 0x0006, 0x81, 3), /* Allen&Heath Qu-16 */
+ 	IMPLICIT_FB_FIXED_DEV(0x1686, 0xf029, 0x82, 2), /* Zoom UAC-2 */
+ 	IMPLICIT_FB_FIXED_DEV(0x2466, 0x8003, 0x86, 2), /* Fractal Audio Axe-Fx II */
+diff --git a/tools/arch/x86/kcpuid/kcpuid.c b/tools/arch/x86/kcpuid/kcpuid.c
+index 1b25c0a95d3f9a..40a9e59c2fd568 100644
+--- a/tools/arch/x86/kcpuid/kcpuid.c
++++ b/tools/arch/x86/kcpuid/kcpuid.c
+@@ -1,11 +1,12 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #define _GNU_SOURCE
+ 
+-#include <stdio.h>
++#include <err.h>
++#include <getopt.h>
+ #include <stdbool.h>
++#include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+-#include <getopt.h>
+ 
+ #define ARRAY_SIZE(x)	(sizeof(x) / sizeof((x)[0]))
+ #define min(a, b)	(((a) < (b)) ? (a) : (b))
+@@ -145,14 +146,14 @@ static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf,
+ 	if (!func->leafs) {
+ 		func->leafs = malloc(sizeof(struct subleaf));
+ 		if (!func->leafs)
+-			perror("malloc func leaf");
++			err(EXIT_FAILURE, NULL);
+ 
+ 		func->nr = 1;
+ 	} else {
+ 		s = func->nr;
+ 		func->leafs = realloc(func->leafs, (s + 1) * sizeof(*leaf));
+ 		if (!func->leafs)
+-			perror("realloc f->leafs");
++			err(EXIT_FAILURE, NULL);
+ 
+ 		func->nr++;
+ 	}
+@@ -211,7 +212,7 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax)
+ 
+ 	range = malloc(sizeof(struct cpuid_range));
+ 	if (!range)
+-		perror("malloc range");
++		err(EXIT_FAILURE, NULL);
+ 
+ 	if (input_eax & 0x80000000)
+ 		range->is_ext = true;
+@@ -220,7 +221,7 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax)
+ 
+ 	range->funcs = malloc(sizeof(struct cpuid_func) * idx_func);
+ 	if (!range->funcs)
+-		perror("malloc range->funcs");
++		err(EXIT_FAILURE, NULL);
+ 
+ 	range->nr = idx_func;
+ 	memset(range->funcs, 0, sizeof(struct cpuid_func) * idx_func);
+@@ -395,8 +396,8 @@ static int parse_line(char *line)
+ 	return 0;
+ 
+ err_exit:
+-	printf("Warning: wrong line format:\n");
+-	printf("\tline[%d]: %s\n", flines, line);
++	warnx("Wrong line format:\n"
++	      "\tline[%d]: %s", flines, line);
+ 	return -1;
+ }
+ 
+@@ -418,10 +419,8 @@ static void parse_text(void)
+ 		file = fopen("./cpuid.csv", "r");
+ 	}
+ 
+-	if (!file) {
+-		printf("Fail to open '%s'\n", filename);
+-		return;
+-	}
++	if (!file)
++		err(EXIT_FAILURE, "%s", filename);
+ 
+ 	while (1) {
+ 		ret = getline(&line, &len, file);
+@@ -530,7 +529,7 @@ static inline struct cpuid_func *index_to_func(u32 index)
+ 	func_idx = index & 0xffff;
+ 
+ 	if ((func_idx + 1) > (u32)range->nr) {
+-		printf("ERR: invalid input index (0x%x)\n", index);
++		warnx("Invalid input index (0x%x)", index);
+ 		return NULL;
+ 	}
+ 	return &range->funcs[func_idx];
+@@ -562,7 +561,7 @@ static void show_info(void)
+ 				return;
+ 			}
+ 
+-			printf("ERR: invalid input subleaf (0x%x)\n", user_sub);
++			warnx("Invalid input subleaf (0x%x)", user_sub);
+ 		}
+ 
+ 		show_func(func);
+@@ -593,15 +592,15 @@ static void setup_platform_cpuid(void)
+ 
+ static void usage(void)
+ {
+-	printf("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n"
+-		"\t-a|--all             Show both bit flags and complex bit fields info\n"
+-		"\t-b|--bitflags        Show boolean flags only\n"
+-		"\t-d|--detail          Show details of the flag/fields (default)\n"
+-		"\t-f|--flags           Specify the cpuid csv file\n"
+-		"\t-h|--help            Show usage info\n"
+-		"\t-l|--leaf=index      Specify the leaf you want to check\n"
+-		"\t-r|--raw             Show raw cpuid data\n"
+-		"\t-s|--subleaf=sub     Specify the subleaf you want to check\n"
++	warnx("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n"
++	      "\t-a|--all             Show both bit flags and complex bit fields info\n"
++	      "\t-b|--bitflags        Show boolean flags only\n"
++	      "\t-d|--detail          Show details of the flag/fields (default)\n"
++	      "\t-f|--flags           Specify the CPUID CSV file\n"
++	      "\t-h|--help            Show usage info\n"
++	      "\t-l|--leaf=index      Specify the leaf you want to check\n"
++	      "\t-r|--raw             Show raw CPUID data\n"
++	      "\t-s|--subleaf=sub     Specify the subleaf you want to check"
+ 	);
+ }
+ 
+@@ -652,7 +651,7 @@ static int parse_options(int argc, char *argv[])
+ 			user_sub = strtoul(optarg, NULL, 0);
+ 			break;
+ 		default:
+-			printf("%s: Invalid option '%c'\n", argv[0], optopt);
++			warnx("Invalid option '%c'", optopt);
+ 			return -1;
+ 	}
+ 
+diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
+index f5dd84eb55dcda..cd3fd5155f6ece 100644
+--- a/tools/arch/x86/lib/x86-opcode-map.txt
++++ b/tools/arch/x86/lib/x86-opcode-map.txt
+@@ -35,7 +35,7 @@
+ #  - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
+ #  - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+ #
+-# REX2 Prefix
++# REX2 Prefix Superscripts
+ #  - (!REX2): REX2 is not allowed
+ #  - (REX2): REX2 variant e.g. JMPABS
+ 
+@@ -286,10 +286,10 @@ df: ESC
+ # Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
+ # in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
+ # to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
+-e0: LOOPNE/LOOPNZ Jb (f64) (!REX2)
+-e1: LOOPE/LOOPZ Jb (f64) (!REX2)
+-e2: LOOP Jb (f64) (!REX2)
+-e3: JrCXZ Jb (f64) (!REX2)
++e0: LOOPNE/LOOPNZ Jb (f64),(!REX2)
++e1: LOOPE/LOOPZ Jb (f64),(!REX2)
++e2: LOOP Jb (f64),(!REX2)
++e3: JrCXZ Jb (f64),(!REX2)
+ e4: IN AL,Ib (!REX2)
+ e5: IN eAX,Ib (!REX2)
+ e6: OUT Ib,AL (!REX2)
+@@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2)
+ # in "near" jumps and calls is 16-bit. For CALL,
+ # push of return address is 16-bit wide, RSP is decremented by 2
+ # but is not truncated to 16 bits, unlike RIP.
+-e8: CALL Jz (f64) (!REX2)
+-e9: JMP-near Jz (f64) (!REX2)
+-ea: JMP-far Ap (i64) (!REX2)
+-eb: JMP-short Jb (f64) (!REX2)
++e8: CALL Jz (f64),(!REX2)
++e9: JMP-near Jz (f64),(!REX2)
++ea: JMP-far Ap (i64),(!REX2)
++eb: JMP-short Jb (f64),(!REX2)
+ ec: IN AL,DX (!REX2)
+ ed: IN eAX,DX (!REX2)
+ ee: OUT DX,AL (!REX2)
+@@ -478,22 +478,22 @@ AVXcode: 1
+ 7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
+ # 0x0f 0x80-0x8f
+ # Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+-80: JO Jz (f64) (!REX2)
+-81: JNO Jz (f64) (!REX2)
+-82: JB/JC/JNAE Jz (f64) (!REX2)
+-83: JAE/JNB/JNC Jz (f64) (!REX2)
+-84: JE/JZ Jz (f64) (!REX2)
+-85: JNE/JNZ Jz (f64) (!REX2)
+-86: JBE/JNA Jz (f64) (!REX2)
+-87: JA/JNBE Jz (f64) (!REX2)
+-88: JS Jz (f64) (!REX2)
+-89: JNS Jz (f64) (!REX2)
+-8a: JP/JPE Jz (f64) (!REX2)
+-8b: JNP/JPO Jz (f64) (!REX2)
+-8c: JL/JNGE Jz (f64) (!REX2)
+-8d: JNL/JGE Jz (f64) (!REX2)
+-8e: JLE/JNG Jz (f64) (!REX2)
+-8f: JNLE/JG Jz (f64) (!REX2)
++80: JO Jz (f64),(!REX2)
++81: JNO Jz (f64),(!REX2)
++82: JB/JC/JNAE Jz (f64),(!REX2)
++83: JAE/JNB/JNC Jz (f64),(!REX2)
++84: JE/JZ Jz (f64),(!REX2)
++85: JNE/JNZ Jz (f64),(!REX2)
++86: JBE/JNA Jz (f64),(!REX2)
++87: JA/JNBE Jz (f64),(!REX2)
++88: JS Jz (f64),(!REX2)
++89: JNS Jz (f64),(!REX2)
++8a: JP/JPE Jz (f64),(!REX2)
++8b: JNP/JPO Jz (f64),(!REX2)
++8c: JL/JNGE Jz (f64),(!REX2)
++8d: JNL/JGE Jz (f64),(!REX2)
++8e: JLE/JNG Jz (f64),(!REX2)
++8f: JNLE/JG Jz (f64),(!REX2)
+ # 0x0f 0x90-0x9f
+ 90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
+ 91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
+diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
+index 9af426d4329931..afab728468bf64 100644
+--- a/tools/bpf/bpftool/cgroup.c
++++ b/tools/bpf/bpftool/cgroup.c
+@@ -221,7 +221,7 @@ static int cgroup_has_attached_progs(int cgroup_fd)
+ 	for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) {
+ 		int count = count_attached_bpf_progs(cgroup_fd, cgroup_attach_types[i]);
+ 
+-		if (count < 0)
++		if (count < 0 && errno != EINVAL)
+ 			return -1;
+ 
+ 		if (count > 0) {
+diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
+index 4b8079f294f65b..b0072e64b01023 100644
+--- a/tools/bpf/resolve_btfids/Makefile
++++ b/tools/bpf/resolve_btfids/Makefile
+@@ -19,7 +19,7 @@ endif
+ 
+ # Overrides for the prepare step libraries.
+ HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \
+-		  CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
++		  CROSS_COMPILE="" CLANG_CROSS_FLAGS="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
+ 
+ RM      ?= rm
+ HOSTCC  ?= gcc
+diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
+index c0e13cdf966077..b997c68bd94536 100644
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -388,7 +388,13 @@ extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
+ #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
+ #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
+ 
++#if defined(__clang__) && (__clang_major__ >= 19)
++#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__))
++#elif defined(__GNUC__) && (__GNUC__ >= 14)
++#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__))
++#else
+ #define ___type(...) typeof(___arrow(__VA_ARGS__))
++#endif
+ 
+ #define ___read(read_fn, dst, src_type, src, accessor)			    \
+ 	read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 6e4d417604fa0f..bb24f6bac20737 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -60,6 +60,8 @@
+ #define BPF_FS_MAGIC		0xcafe4a11
+ #endif
+ 
++#define MAX_EVENT_NAME_LEN	64
++
+ #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
+ 
+ #define BPF_INSN_SZ (sizeof(struct bpf_insn))
+@@ -283,7 +285,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
+ 	old_errno = errno;
+ 
+ 	va_start(args, format);
+-	__libbpf_pr(level, format, args);
++	print_fn(level, format, args);
+ 	va_end(args);
+ 
+ 	errno = old_errno;
+@@ -887,7 +889,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
+ 			return -LIBBPF_ERRNO__FORMAT;
+ 		}
+ 
+-		if (sec_off + prog_sz > sec_sz) {
++		if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) {
+ 			pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
+ 				sec_name, sec_off);
+ 			return -LIBBPF_ERRNO__FORMAT;
+@@ -11039,16 +11041,16 @@ static const char *tracefs_available_filter_functions_addrs(void)
+ 			     : TRACEFS"/available_filter_functions_addrs";
+ }
+ 
+-static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
+-					 const char *kfunc_name, size_t offset)
++static void gen_probe_legacy_event_name(char *buf, size_t buf_sz,
++					const char *name, size_t offset)
+ {
+ 	static int index = 0;
+ 	int i;
+ 
+-	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
+-		 __sync_fetch_and_add(&index, 1));
++	snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(),
++		 __sync_fetch_and_add(&index, 1), name, offset);
+ 
+-	/* sanitize binary_path in the probe name */
++	/* sanitize name in the probe name */
+ 	for (i = 0; buf[i]; i++) {
+ 		if (!isalnum(buf[i]))
+ 			buf[i] = '_';
+@@ -11174,9 +11176,9 @@ int probe_kern_syscall_wrapper(int token_fd)
+ 
+ 		return pfd >= 0 ? 1 : 0;
+ 	} else { /* legacy mode */
+-		char probe_name[128];
++		char probe_name[MAX_EVENT_NAME_LEN];
+ 
+-		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
++		gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
+ 		if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
+ 			return 0;
+ 
+@@ -11233,10 +11235,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
+ 					    func_name, offset,
+ 					    -1 /* pid */, 0 /* ref_ctr_off */);
+ 	} else {
+-		char probe_name[256];
++		char probe_name[MAX_EVENT_NAME_LEN];
+ 
+-		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
+-					     func_name, offset);
++		gen_probe_legacy_event_name(probe_name, sizeof(probe_name),
++					    func_name, offset);
+ 
+ 		legacy_probe = strdup(probe_name);
+ 		if (!legacy_probe)
+@@ -11744,20 +11746,6 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
+ 	return ret;
+ }
+ 
+-static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
+-					 const char *binary_path, uint64_t offset)
+-{
+-	int i;
+-
+-	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
+-
+-	/* sanitize binary_path in the probe name */
+-	for (i = 0; buf[i]; i++) {
+-		if (!isalnum(buf[i]))
+-			buf[i] = '_';
+-	}
+-}
+-
+ static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
+ 					  const char *binary_path, size_t offset)
+ {
+@@ -12173,13 +12161,14 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
+ 		pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
+ 					    func_offset, pid, ref_ctr_off);
+ 	} else {
+-		char probe_name[PATH_MAX + 64];
++		char probe_name[MAX_EVENT_NAME_LEN];
+ 
+ 		if (ref_ctr_off)
+ 			return libbpf_err_ptr(-EINVAL);
+ 
+-		gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
+-					     binary_path, func_offset);
++		gen_probe_legacy_event_name(probe_name, sizeof(probe_name),
++					    strrchr(binary_path, '/') ? : binary_path,
++					    func_offset);
+ 
+ 		legacy_probe = strdup(probe_name);
+ 		if (!legacy_probe)
+@@ -13256,7 +13245,6 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
+ 	attr.config = PERF_COUNT_SW_BPF_OUTPUT;
+ 	attr.type = PERF_TYPE_SOFTWARE;
+ 	attr.sample_type = PERF_SAMPLE_RAW;
+-	attr.sample_period = sample_period;
+ 	attr.wakeup_events = sample_period;
+ 
+ 	p.attr = &attr;
+diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
+index 179f6b31cbd6fa..d4ab9315afe717 100644
+--- a/tools/lib/bpf/linker.c
++++ b/tools/lib/bpf/linker.c
+@@ -1220,7 +1220,7 @@ static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj
+ 		} else {
+ 			if (!secs_match(dst_sec, src_sec)) {
+ 				pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name);
+-				return -1;
++				return -EINVAL;
+ 			}
+ 
+ 			/* "license" and "version" sections are deduped */
+@@ -2067,7 +2067,7 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob
+ 			}
+ 		} else if (!secs_match(dst_sec, src_sec)) {
+ 			pr_warn("sections %s are not compatible\n", src_sec->sec_name);
+-			return -1;
++			return -EINVAL;
+ 		}
+ 
+ 		/* shdr->sh_link points to SYMTAB */
+diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
+index 975e265eab3bfe..06663f9ea581f9 100644
+--- a/tools/lib/bpf/nlattr.c
++++ b/tools/lib/bpf/nlattr.c
+@@ -63,16 +63,16 @@ static int validate_nla(struct nlattr *nla, int maxtype,
+ 		minlen = nla_attr_minlen[pt->type];
+ 
+ 	if (libbpf_nla_len(nla) < minlen)
+-		return -1;
++		return -EINVAL;
+ 
+ 	if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen)
+-		return -1;
++		return -EINVAL;
+ 
+ 	if (pt->type == LIBBPF_NLA_STRING) {
+ 		char *data = libbpf_nla_data(nla);
+ 
+ 		if (data[libbpf_nla_len(nla) - 1] != '\0')
+-			return -1;
++			return -EINVAL;
+ 	}
+ 
+ 	return 0;
+@@ -118,19 +118,18 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
+ 		if (policy) {
+ 			err = validate_nla(nla, maxtype, policy);
+ 			if (err < 0)
+-				goto errout;
++				return err;
+ 		}
+ 
+-		if (tb[type])
++		if (tb[type]) {
+ 			pr_warn("Attribute of type %#x found multiple times in message, "
+ 				"previous attribute is being ignored.\n", type);
++		}
+ 
+ 		tb[type] = nla;
+ 	}
+ 
+-	err = 0;
+-errout:
+-	return err;
++	return 0;
+ }
+ 
+ /**
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 4fce0074076f3a..a737286de75926 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -222,7 +222,8 @@ static bool is_rust_noreturn(const struct symbol *func)
+ 	       str_ends_with(func->name, "_7___rustc17rust_begin_unwind")				||
+ 	       strstr(func->name, "_4core9panicking13assert_failed")					||
+ 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
+-	       (strstr(func->name, "_4core5slice5index24slice_") &&
++	       (strstr(func->name, "_4core5slice5index") &&
++		strstr(func->name, "slice_") &&
+ 		str_ends_with(func->name, "_fail"));
+ }
+ 
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index b102a4c525e4b0..a2034fa1832543 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -569,6 +569,8 @@ ifndef NO_LIBELF
+     ifeq ($(feature-libdebuginfod), 1)
+       CFLAGS += -DHAVE_DEBUGINFOD_SUPPORT
+       EXTLIBS += -ldebuginfod
++    else
++      $(warning No elfutils/debuginfod.h found, no debuginfo server support, please install libdebuginfod-dev/elfutils-debuginfod-client-devel or equivalent)
+     endif
+   endif
+ 
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 8ee59ecb14110f..b61c355fbdeed7 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -1143,7 +1143,8 @@ install-tests: all install-gtk
+ 		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \
+ 		$(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \
+ 		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
+-		$(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
++		$(INSTALL) tests/shell/base_report/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
++		$(INSTALL) tests/shell/base_report/*.txt '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
+ 		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' ; \
+ 		$(INSTALL) tests/shell/coresight/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight'
+ 	$(Q)$(MAKE) -C tests/shell/coresight install-tests
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index adbaf80b398c1f..ab9035573a15ed 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -3471,7 +3471,7 @@ static struct option __record_options[] = {
+ 		    "sample selected machine registers on interrupt,"
+ 		    " use '-I?' to list register names", parse_intr_regs),
+ 	OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
+-		    "sample selected machine registers on interrupt,"
++		    "sample selected machine registers in user space,"
+ 		    " use '--user-regs=?' to list register names", parse_user_regs),
+ 	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
+ 		    "Record running/enabled time of read (:S) events"),
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index ecd26e058baf67..f77e4f4b6f03e5 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -1327,7 +1327,7 @@ static const struct syscall_fmt syscall_fmts[] = {
+ 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
+ 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
+ 		   [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
+-	{ .name	    = "rseq",	    .errpid = true,
++	{ .name	    = "rseq",
+ 	  .arg = { [0] = { .from_user = true /* rseq */, }, }, },
+ 	{ .name	    = "rt_sigaction",
+ 	  .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
+@@ -1351,7 +1351,7 @@ static const struct syscall_fmt syscall_fmts[] = {
+ 	{ .name	    = "sendto",
+ 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
+ 		   [4] = SCA_SOCKADDR_FROM_USER(addr), }, },
+-	{ .name	    = "set_robust_list",	    .errpid = true,
++	{ .name	    = "set_robust_list",
+ 	  .arg = { [0] = { .from_user = true /* head */, }, }, },
+ 	{ .name	    = "set_tid_address", .errpid = true, },
+ 	{ .name	    = "setitimer",
+@@ -2873,8 +2873,8 @@ errno_print: {
+ 	else if (sc->fmt->errpid) {
+ 		struct thread *child = machine__find_thread(trace->host, ret, ret);
+ 
++		fprintf(trace->output, "%ld", ret);
+ 		if (child != NULL) {
+-			fprintf(trace->output, "%ld", ret);
+ 			if (thread__comm_set(child))
+ 				fprintf(trace->output, " (%s)", thread__comm_str(child));
+ 			thread__put(child);
+@@ -3986,10 +3986,13 @@ static int trace__set_filter_loop_pids(struct trace *trace)
+ 		if (!strcmp(thread__comm_str(parent), "sshd") ||
+ 		    strstarts(thread__comm_str(parent), "gnome-terminal")) {
+ 			pids[nr++] = thread__tid(parent);
++			thread__put(parent);
+ 			break;
+ 		}
++		thread__put(thread);
+ 		thread = parent;
+ 	}
++	thread__put(thread);
+ 
+ 	err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
+ 	if (!err && trace->filter_pids.map)
+diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
+index 121cf61ba1b345..e0b2e7268ef68c 100755
+--- a/tools/perf/scripts/python/exported-sql-viewer.py
++++ b/tools/perf/scripts/python/exported-sql-viewer.py
+@@ -680,7 +680,10 @@ class CallGraphModelBase(TreeModel):
+ 				s = value.replace("%", "\\%")
+ 				s = s.replace("_", "\\_")
+ 				# Translate * and ? into SQL LIKE pattern characters % and _
+-				trans = string.maketrans("*?", "%_")
++				if sys.version_info[0] == 3:
++					trans = str.maketrans("*?", "%_")
++				else:
++					trans = string.maketrans("*?", "%_")
+ 				match = " LIKE '" + str(s).translate(trans) + "'"
+ 			else:
+ 				match = " GLOB '" + str(value) + "'"
+diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
+index 5cab17a1942e67..ee43d8fa2ed672 100644
+--- a/tools/perf/tests/switch-tracking.c
++++ b/tools/perf/tests/switch-tracking.c
+@@ -258,7 +258,7 @@ static int compar(const void *a, const void *b)
+ 	const struct event_node *nodeb = b;
+ 	s64 cmp = nodea->event_time - nodeb->event_time;
+ 
+-	return cmp;
++	return cmp < 0 ? -1 : (cmp > 0 ? 1 : 0);
+ }
+ 
+ static int process_events(struct evlist *evlist,
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 49ba82bf33918a..3283b6313bab82 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -3267,10 +3267,10 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h
+ 				/*
+ 				 * No need to set actions->dso here since
+ 				 * it's just to remove the current filter.
+-				 * Ditto for thread below.
+ 				 */
+ 				do_zoom_dso(browser, actions);
+ 			} else if (top == &browser->hists->thread_filter) {
++				actions->thread = thread;
+ 				do_zoom_thread(browser, actions);
+ 			} else if (top == &browser->hists->socket_filter) {
+ 				do_zoom_socket(browser, actions);
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index fd2597613f3dcb..61f10578e12121 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -127,6 +127,7 @@ struct intel_pt {
+ 
+ 	bool single_pebs;
+ 	bool sample_pebs;
++	int pebs_data_src_fmt;
+ 	struct evsel *pebs_evsel;
+ 
+ 	u64 evt_sample_type;
+@@ -175,6 +176,7 @@ enum switch_state {
+ struct intel_pt_pebs_event {
+ 	struct evsel *evsel;
+ 	u64 id;
++	int data_src_fmt;
+ };
+ 
+ struct intel_pt_queue {
+@@ -2232,7 +2234,146 @@ static void intel_pt_add_lbrs(struct branch_stack *br_stack,
+ 	}
+ }
+ 
+-static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
++#define P(a, b) PERF_MEM_S(a, b)
++#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
++#define LEVEL(x) P(LVLNUM, x)
++#define REM P(REMOTE, REMOTE)
++#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
++
++#define PERF_PEBS_DATA_SOURCE_GRT_MAX	0x10
++#define PERF_PEBS_DATA_SOURCE_GRT_MASK	(PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
++
++/* Based on kernel __intel_pmu_pebs_data_source_grt() and pebs_data_source */
++static const u64 pebs_data_source_grt[PERF_PEBS_DATA_SOURCE_GRT_MAX] = {
++	P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),         /* L3 miss|SNP N/A */
++	OP_LH | P(LVL, L1)  | LEVEL(L1)  | P(SNOOP, NONE),             /* L1 hit|SNP None */
++	OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE),             /* LFB/MAB hit|SNP None */
++	OP_LH | P(LVL, L2)  | LEVEL(L2)  | P(SNOOP, NONE),             /* L2 hit|SNP None */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOP, NONE),             /* L3 hit|SNP None */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOP, HIT),              /* L3 hit|SNP Hit */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOP, HITM),             /* L3 hit|SNP HitM */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOP, HITM),             /* L3 hit|SNP HitM */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOPX, FWD),             /* L3 hit|SNP Fwd */
++	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM),   /* Remote L3 hit|SNP HitM */
++	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | P(SNOOP, HIT),         /* RAM hit|SNP Hit */
++	OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT),    /* Remote L3 hit|SNP Hit */
++	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | SNOOP_NONE_MISS,       /* RAM hit|SNP None or Miss */
++	OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* Remote RAM hit|SNP None or Miss */
++	OP_LH | P(LVL, IO)  | LEVEL(NA) | P(SNOOP, NONE),              /* I/O hit|SNP None */
++	OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE),              /* Uncached hit|SNP None */
++};
++
++/* Based on kernel __intel_pmu_pebs_data_source_cmt() and pebs_data_source */
++static const u64 pebs_data_source_cmt[PERF_PEBS_DATA_SOURCE_GRT_MAX] = {
++	P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),       /* L3 miss|SNP N/A */
++	OP_LH | P(LVL, L1)  | LEVEL(L1)  | P(SNOOP, NONE),           /* L1 hit|SNP None */
++	OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE),           /* LFB/MAB hit|SNP None */
++	OP_LH | P(LVL, L2)  | LEVEL(L2)  | P(SNOOP, NONE),           /* L2 hit|SNP None */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOP, NONE),           /* L3 hit|SNP None */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOP, MISS),           /* L3 hit|SNP Hit */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOP, HIT),            /* L3 hit|SNP HitM */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOPX, FWD),           /* L3 hit|SNP HitM */
++	OP_LH | P(LVL, L3)  | LEVEL(L3)  | P(SNOOP, HITM),           /* L3 hit|SNP Fwd */
++	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* Remote L3 hit|SNP HitM */
++	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | P(SNOOP, NONE),      /* RAM hit|SNP Hit */
++	OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE),                   /* Remote L3 hit|SNP Hit */
++	OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD),                   /* RAM hit|SNP None or Miss */
++	OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM),                   /* Remote RAM hit|SNP None or Miss */
++	OP_LH | P(LVL, IO)  | LEVEL(NA) | P(SNOOP, NONE),            /* I/O hit|SNP None */
++	OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE),            /* Uncached hit|SNP None */
++};
++
++/* Based on kernel pebs_set_tlb_lock() */
++static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
++{
++	/*
++	 * TLB access
++	 * 0 = did not miss 2nd level TLB
++	 * 1 = missed 2nd level TLB
++	 */
++	if (tlb)
++		*val |= P(TLB, MISS) | P(TLB, L2);
++	else
++		*val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
++
++	/* locked prefix */
++	if (lock)
++		*val |= P(LOCK, LOCKED);
++}
++
++/* Based on kernel __grt_latency_data() */
++static u64 intel_pt_grt_latency_data(u8 dse, bool tlb, bool lock, bool blk,
++				     const u64 *pebs_data_source)
++{
++	u64 val;
++
++	dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
++	val = pebs_data_source[dse];
++
++	pebs_set_tlb_lock(&val, tlb, lock);
++
++	if (blk)
++		val |= P(BLK, DATA);
++	else
++		val |= P(BLK, NA);
++
++	return val;
++}
++
++/* Default value for data source */
++#define PERF_MEM_NA (PERF_MEM_S(OP, NA)    |\
++		     PERF_MEM_S(LVL, NA)   |\
++		     PERF_MEM_S(SNOOP, NA) |\
++		     PERF_MEM_S(LOCK, NA)  |\
++		     PERF_MEM_S(TLB, NA)   |\
++		     PERF_MEM_S(LVLNUM, NA))
++
++enum DATA_SRC_FORMAT {
++	DATA_SRC_FORMAT_ERR  = -1,
++	DATA_SRC_FORMAT_NA   =  0,
++	DATA_SRC_FORMAT_GRT  =  1,
++	DATA_SRC_FORMAT_CMT  =  2,
++};
++
++/* Based on kernel grt_latency_data() and cmt_latency_data */
++static u64 intel_pt_get_data_src(u64 mem_aux_info, int data_src_fmt)
++{
++	switch (data_src_fmt) {
++	case DATA_SRC_FORMAT_GRT: {
++		union {
++			u64 val;
++			struct {
++				unsigned int dse:4;
++				unsigned int locked:1;
++				unsigned int stlb_miss:1;
++				unsigned int fwd_blk:1;
++				unsigned int reserved:25;
++			};
++		} x = {.val = mem_aux_info};
++		return intel_pt_grt_latency_data(x.dse, x.stlb_miss, x.locked, x.fwd_blk,
++						 pebs_data_source_grt);
++	}
++	case DATA_SRC_FORMAT_CMT: {
++		union {
++			u64 val;
++			struct {
++				unsigned int dse:5;
++				unsigned int locked:1;
++				unsigned int stlb_miss:1;
++				unsigned int fwd_blk:1;
++				unsigned int reserved:24;
++			};
++		} x = {.val = mem_aux_info};
++		return intel_pt_grt_latency_data(x.dse, x.stlb_miss, x.locked, x.fwd_blk,
++						 pebs_data_source_cmt);
++	}
++	default:
++		return PERF_MEM_NA;
++	}
++}
++
++static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel,
++					 u64 id, int data_src_fmt)
+ {
+ 	const struct intel_pt_blk_items *items = &ptq->state->items;
+ 	struct perf_sample sample = { .ip = 0, };
+@@ -2350,6 +2491,18 @@ static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evse
+ 		}
+ 	}
+ 
++	if (sample_type & PERF_SAMPLE_DATA_SRC) {
++		if (items->has_mem_aux_info && data_src_fmt) {
++			if (data_src_fmt < 0) {
++				pr_err("Intel PT missing data_src info\n");
++				return -1;
++			}
++			sample.data_src = intel_pt_get_data_src(items->mem_aux_info, data_src_fmt);
++		} else {
++			sample.data_src = PERF_MEM_NA;
++		}
++	}
++
+ 	if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
+ 		u64 ax = items->has_rax ? items->rax : 0;
+ 		/* Refer kernel's intel_hsw_transaction() */
+@@ -2368,9 +2521,10 @@ static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
+ {
+ 	struct intel_pt *pt = ptq->pt;
+ 	struct evsel *evsel = pt->pebs_evsel;
++	int data_src_fmt = pt->pebs_data_src_fmt;
+ 	u64 id = evsel->core.id[0];
+ 
+-	return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
++	return intel_pt_do_synth_pebs_sample(ptq, evsel, id, data_src_fmt);
+ }
+ 
+ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
+@@ -2395,7 +2549,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
+ 				       hw_id);
+ 			return intel_pt_synth_single_pebs_sample(ptq);
+ 		}
+-		err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
++		err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id, pe->data_src_fmt);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -3355,6 +3509,49 @@ static int intel_pt_process_itrace_start(struct intel_pt *pt,
+ 					event->itrace_start.tid);
+ }
+ 
++/*
++ * Events with data_src are identified by L1_Hit_Indication
++ * refer https://github.com/intel/perfmon
++ */
++static int intel_pt_data_src_fmt(struct intel_pt *pt, struct evsel *evsel)
++{
++	struct perf_env *env = pt->machine->env;
++	int fmt = DATA_SRC_FORMAT_NA;
++
++	if (!env->cpuid)
++		return DATA_SRC_FORMAT_ERR;
++
++	/*
++	 * PEBS-via-PT is only supported on E-core non-hybrid. Of those only
++	 * Gracemont and Crestmont have data_src. Check for:
++	 *	Alderlake N   (Gracemont)
++	 *	Sierra Forest (Crestmont)
++	 *	Grand Ridge   (Crestmont)
++	 */
++
++	if (!strncmp(env->cpuid, "GenuineIntel,6,190,", 19))
++		fmt = DATA_SRC_FORMAT_GRT;
++
++	if (!strncmp(env->cpuid, "GenuineIntel,6,175,", 19) ||
++	    !strncmp(env->cpuid, "GenuineIntel,6,182,", 19))
++		fmt = DATA_SRC_FORMAT_CMT;
++
++	if (fmt == DATA_SRC_FORMAT_NA)
++		return fmt;
++
++	/*
++	 * Only data_src events are:
++	 *	mem-loads	event=0xd0,umask=0x5
++	 *	mem-stores	event=0xd0,umask=0x6
++	 */
++	if (evsel->core.attr.type == PERF_TYPE_RAW &&
++	    ((evsel->core.attr.config & 0xffff) == 0x5d0 ||
++	     (evsel->core.attr.config & 0xffff) == 0x6d0))
++		return fmt;
++
++	return DATA_SRC_FORMAT_NA;
++}
++
+ static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
+ 					     union perf_event *event,
+ 					     struct perf_sample *sample)
+@@ -3375,6 +3572,7 @@ static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
+ 
+ 	ptq->pebs[hw_id].evsel = evsel;
+ 	ptq->pebs[hw_id].id = sample->id;
++	ptq->pebs[hw_id].data_src_fmt = intel_pt_data_src_fmt(pt, evsel);
+ 
+ 	return 0;
+ }
+@@ -3924,6 +4122,7 @@ static void intel_pt_setup_pebs_events(struct intel_pt *pt)
+ 			}
+ 			pt->single_pebs = true;
+ 			pt->sample_pebs = true;
++			pt->pebs_data_src_fmt = intel_pt_data_src_fmt(pt, evsel);
+ 			pt->pebs_evsel = evsel;
+ 		}
+ 	}
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 9be2f4479f5257..20fd742984e3c4 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1974,7 +1974,7 @@ static void ip__resolve_ams(struct thread *thread,
+ 	 * Thus, we have to try consecutively until we find a match
+ 	 * or else, the symbol is unknown
+ 	 */
+-	thread__find_cpumode_addr_location(thread, ip, &al);
++	thread__find_cpumode_addr_location(thread, ip, /*symbols=*/true, &al);
+ 
+ 	ams->addr = ip;
+ 	ams->al_addr = al.addr;
+@@ -2076,7 +2076,7 @@ static int add_callchain_ip(struct thread *thread,
+ 	al.sym = NULL;
+ 	al.srcline = NULL;
+ 	if (!cpumode) {
+-		thread__find_cpumode_addr_location(thread, ip, &al);
++		thread__find_cpumode_addr_location(thread, ip, symbols, &al);
+ 	} else {
+ 		if (ip >= PERF_CONTEXT_MAX) {
+ 			switch (ip) {
+@@ -2104,6 +2104,8 @@ static int add_callchain_ip(struct thread *thread,
+ 		}
+ 		if (symbols)
+ 			thread__find_symbol(thread, *cpumode, ip, &al);
++		else
++			thread__find_map(thread, *cpumode, ip, &al);
+ 	}
+ 
+ 	if (al.sym != NULL) {
+diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
+index c6f369b5d893f3..36c1d3090689fc 100644
+--- a/tools/perf/util/symbol-minimal.c
++++ b/tools/perf/util/symbol-minimal.c
+@@ -90,11 +90,23 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
+ {
+ 	FILE *fp;
+ 	int ret = -1;
+-	bool need_swap = false;
++	bool need_swap = false, elf32;
+ 	u8 e_ident[EI_NIDENT];
+-	size_t buf_size;
+-	void *buf;
+ 	int i;
++	union {
++		struct {
++			Elf32_Ehdr ehdr32;
++			Elf32_Phdr *phdr32;
++		};
++		struct {
++			Elf64_Ehdr ehdr64;
++			Elf64_Phdr *phdr64;
++		};
++	} hdrs;
++	void *phdr;
++	size_t phdr_size;
++	void *buf = NULL;
++	size_t buf_size = 0;
+ 
+ 	fp = fopen(filename, "r");
+ 	if (fp == NULL)
+@@ -108,117 +120,79 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
+ 		goto out;
+ 
+ 	need_swap = check_need_swap(e_ident[EI_DATA]);
++	elf32 = e_ident[EI_CLASS] == ELFCLASS32;
+ 
+-	/* for simplicity */
+-	fseek(fp, 0, SEEK_SET);
+-
+-	if (e_ident[EI_CLASS] == ELFCLASS32) {
+-		Elf32_Ehdr ehdr;
+-		Elf32_Phdr *phdr;
+-
+-		if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+-			goto out;
++	if (fread(elf32 ? (void *)&hdrs.ehdr32 : (void *)&hdrs.ehdr64,
++		  elf32 ? sizeof(hdrs.ehdr32) : sizeof(hdrs.ehdr64),
++		  1, fp) != 1)
++		goto out;
+ 
+-		if (need_swap) {
+-			ehdr.e_phoff = bswap_32(ehdr.e_phoff);
+-			ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
+-			ehdr.e_phnum = bswap_16(ehdr.e_phnum);
++	if (need_swap) {
++		if (elf32) {
++			hdrs.ehdr32.e_phoff = bswap_32(hdrs.ehdr32.e_phoff);
++			hdrs.ehdr32.e_phentsize = bswap_16(hdrs.ehdr32.e_phentsize);
++			hdrs.ehdr32.e_phnum = bswap_16(hdrs.ehdr32.e_phnum);
++		} else {
++			hdrs.ehdr64.e_phoff = bswap_64(hdrs.ehdr64.e_phoff);
++			hdrs.ehdr64.e_phentsize = bswap_16(hdrs.ehdr64.e_phentsize);
++			hdrs.ehdr64.e_phnum = bswap_16(hdrs.ehdr64.e_phnum);
+ 		}
++	}
++	phdr_size = elf32 ? hdrs.ehdr32.e_phentsize * hdrs.ehdr32.e_phnum
++			  : hdrs.ehdr64.e_phentsize * hdrs.ehdr64.e_phnum;
++	phdr = malloc(phdr_size);
++	if (phdr == NULL)
++		goto out;
+ 
+-		buf_size = ehdr.e_phentsize * ehdr.e_phnum;
+-		buf = malloc(buf_size);
+-		if (buf == NULL)
+-			goto out;
+-
+-		fseek(fp, ehdr.e_phoff, SEEK_SET);
+-		if (fread(buf, buf_size, 1, fp) != 1)
+-			goto out_free;
+-
+-		for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
+-			void *tmp;
+-			long offset;
+-
+-			if (need_swap) {
+-				phdr->p_type = bswap_32(phdr->p_type);
+-				phdr->p_offset = bswap_32(phdr->p_offset);
+-				phdr->p_filesz = bswap_32(phdr->p_filesz);
+-			}
+-
+-			if (phdr->p_type != PT_NOTE)
+-				continue;
+-
+-			buf_size = phdr->p_filesz;
+-			offset = phdr->p_offset;
+-			tmp = realloc(buf, buf_size);
+-			if (tmp == NULL)
+-				goto out_free;
+-
+-			buf = tmp;
+-			fseek(fp, offset, SEEK_SET);
+-			if (fread(buf, buf_size, 1, fp) != 1)
+-				goto out_free;
++	fseek(fp, elf32 ? hdrs.ehdr32.e_phoff : hdrs.ehdr64.e_phoff, SEEK_SET);
++	if (fread(phdr, phdr_size, 1, fp) != 1)
++		goto out_free;
+ 
+-			ret = read_build_id(buf, buf_size, bid, need_swap);
+-			if (ret == 0) {
+-				ret = bid->size;
+-				break;
+-			}
+-		}
+-	} else {
+-		Elf64_Ehdr ehdr;
+-		Elf64_Phdr *phdr;
++	if (elf32)
++		hdrs.phdr32 = phdr;
++	else
++		hdrs.phdr64 = phdr;
+ 
+-		if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+-			goto out;
++	for (i = 0; i < elf32 ? hdrs.ehdr32.e_phnum : hdrs.ehdr64.e_phnum; i++) {
++		size_t p_filesz;
+ 
+ 		if (need_swap) {
+-			ehdr.e_phoff = bswap_64(ehdr.e_phoff);
+-			ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
+-			ehdr.e_phnum = bswap_16(ehdr.e_phnum);
++			if (elf32) {
++				hdrs.phdr32[i].p_type = bswap_32(hdrs.phdr32[i].p_type);
++				hdrs.phdr32[i].p_offset = bswap_32(hdrs.phdr32[i].p_offset);
++				hdrs.phdr32[i].p_filesz = bswap_32(hdrs.phdr32[i].p_offset);
++			} else {
++				hdrs.phdr64[i].p_type = bswap_32(hdrs.phdr64[i].p_type);
++				hdrs.phdr64[i].p_offset = bswap_64(hdrs.phdr64[i].p_offset);
++				hdrs.phdr64[i].p_filesz = bswap_64(hdrs.phdr64[i].p_filesz);
++			}
+ 		}
++		if ((elf32 ? hdrs.phdr32[i].p_type : hdrs.phdr64[i].p_type) != PT_NOTE)
++			continue;
+ 
+-		buf_size = ehdr.e_phentsize * ehdr.e_phnum;
+-		buf = malloc(buf_size);
+-		if (buf == NULL)
+-			goto out;
+-
+-		fseek(fp, ehdr.e_phoff, SEEK_SET);
+-		if (fread(buf, buf_size, 1, fp) != 1)
+-			goto out_free;
+-
+-		for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
++		p_filesz = elf32 ? hdrs.phdr32[i].p_filesz : hdrs.phdr64[i].p_filesz;
++		if (p_filesz > buf_size) {
+ 			void *tmp;
+-			long offset;
+-
+-			if (need_swap) {
+-				phdr->p_type = bswap_32(phdr->p_type);
+-				phdr->p_offset = bswap_64(phdr->p_offset);
+-				phdr->p_filesz = bswap_64(phdr->p_filesz);
+-			}
+ 
+-			if (phdr->p_type != PT_NOTE)
+-				continue;
+-
+-			buf_size = phdr->p_filesz;
+-			offset = phdr->p_offset;
++			buf_size = p_filesz;
+ 			tmp = realloc(buf, buf_size);
+ 			if (tmp == NULL)
+ 				goto out_free;
+-
+ 			buf = tmp;
+-			fseek(fp, offset, SEEK_SET);
+-			if (fread(buf, buf_size, 1, fp) != 1)
+-				goto out_free;
++		}
++		fseek(fp, elf32 ? hdrs.phdr32[i].p_offset : hdrs.phdr64[i].p_offset, SEEK_SET);
++		if (fread(buf, p_filesz, 1, fp) != 1)
++			goto out_free;
+ 
+-			ret = read_build_id(buf, buf_size, bid, need_swap);
+-			if (ret == 0) {
+-				ret = bid->size;
+-				break;
+-			}
++		ret = read_build_id(buf, p_filesz, bid, need_swap);
++		if (ret == 0) {
++			ret = bid->size;
++			break;
+ 		}
+ 	}
+ out_free:
+ 	free(buf);
++	free(phdr);
+ out:
+ 	fclose(fp);
+ 	return ret;
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index 0ffdd52d86d707..309d573eac9a94 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -406,7 +406,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bo
+ }
+ 
+ void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
+-					struct addr_location *al)
++					bool symbols, struct addr_location *al)
+ {
+ 	size_t i;
+ 	const u8 cpumodes[] = {
+@@ -417,7 +417,11 @@ void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
+ 	};
+ 
+ 	for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
+-		thread__find_symbol(thread, cpumodes[i], addr, al);
++		if (symbols)
++			thread__find_symbol(thread, cpumodes[i], addr, al);
++		else
++			thread__find_map(thread, cpumodes[i], addr, al);
++
+ 		if (al->map)
+ 			break;
+ 	}
+diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
+index 6cbf6eb2812e05..1fb32e7d62a4dc 100644
+--- a/tools/perf/util/thread.h
++++ b/tools/perf/util/thread.h
+@@ -122,7 +122,7 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+ 				      u64 addr, struct addr_location *al);
+ 
+ void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
+-					struct addr_location *al);
++					bool symbols, struct addr_location *al);
+ 
+ int thread__memcpy(struct thread *thread, struct machine *machine,
+ 		   void *buf, u64 ip, int len, bool *is64bit);
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 12424bf08551d0..4c322586730d44 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -4491,6 +4491,38 @@ unsigned long pmt_read_counter(struct pmt_counter *ppmt, unsigned int domain_id)
+ 	return (value & value_mask) >> value_shift;
+ }
+ 
++
++/* Rapl domain enumeration helpers */
++static inline int get_rapl_num_domains(void)
++{
++	int num_packages = topo.max_package_id + 1;
++	int num_cores_per_package;
++	int num_cores;
++
++	if (!platform->has_per_core_rapl)
++		return num_packages;
++
++	num_cores_per_package = topo.max_core_id + 1;
++	num_cores = num_cores_per_package * num_packages;
++
++	return num_cores;
++}
++
++static inline int get_rapl_domain_id(int cpu)
++{
++	int nr_cores_per_package = topo.max_core_id + 1;
++	int rapl_core_id;
++
++	if (!platform->has_per_core_rapl)
++		return cpus[cpu].physical_package_id;
++
++	/* Compute the system-wide unique core-id for @cpu */
++	rapl_core_id = cpus[cpu].physical_core_id;
++	rapl_core_id += cpus[cpu].physical_package_id * nr_cores_per_package;
++
++	return rapl_core_id;
++}
++
+ /*
+  * get_counters(...)
+  * migrate to cpu
+@@ -4544,7 +4576,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ 		goto done;
+ 
+ 	if (platform->has_per_core_rapl) {
+-		status = get_rapl_counters(cpu, c->core_id, c, p);
++		status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p);
+ 		if (status != 0)
+ 			return status;
+ 	}
+@@ -4610,7 +4642,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ 		p->sys_lpi = cpuidle_cur_sys_lpi_us;
+ 
+ 	if (!platform->has_per_core_rapl) {
+-		status = get_rapl_counters(cpu, p->package_id, c, p);
++		status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p);
+ 		if (status != 0)
+ 			return status;
+ 	}
+@@ -7570,7 +7602,7 @@ void linux_perf_init(void)
+ 
+ void rapl_perf_init(void)
+ {
+-	const unsigned int num_domains = (platform->has_per_core_rapl ? topo.max_core_id : topo.max_package_id) + 1;
++	const unsigned int num_domains = get_rapl_num_domains();
+ 	bool *domain_visited = calloc(num_domains, sizeof(bool));
+ 
+ 	rapl_counter_info_perdomain = calloc(num_domains, sizeof(*rapl_counter_info_perdomain));
+@@ -7611,8 +7643,7 @@ void rapl_perf_init(void)
+ 				continue;
+ 
+ 			/* Skip already seen and handled RAPL domains */
+-			next_domain =
+-			    platform->has_per_core_rapl ? cpus[cpu].physical_core_id : cpus[cpu].physical_package_id;
++			next_domain = get_rapl_domain_id(cpu);
+ 
+ 			assert(next_domain < num_domains);
+ 
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 9cf769d415687d..85c5f39131d341 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -196,7 +196,7 @@ export KHDR_INCLUDES
+ 
+ all:
+ 	@ret=1;							\
+-	for TARGET in $(TARGETS); do				\
++	for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do	\
+ 		BUILD_TARGET=$$BUILD/$$TARGET;			\
+ 		mkdir $$BUILD_TARGET  -p;			\
+ 		$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET	\
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+index a4a1f93878d40d..fad98f01e2c06f 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+@@ -63,6 +63,12 @@ static void test_bpf_nf_ct(int mode)
+ 		.repeat = 1,
+ 	);
+ 
++	if (SYS_NOFAIL("iptables-legacy --version")) {
++		fprintf(stdout, "Missing required iptables-legacy tool\n");
++		test__skip();
++		return;
++	}
++
+ 	skel = test_bpf_nf__open_and_load();
+ 	if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load"))
+ 		return;
+diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
+index 3e9b009580d4e4..7f69d7b5bd4d42 100644
+--- a/tools/testing/selftests/bpf/test_loader.c
++++ b/tools/testing/selftests/bpf/test_loader.c
+@@ -970,6 +970,14 @@ void run_subtest(struct test_loader *tester,
+ 	emit_verifier_log(tester->log_buf, false /*force*/);
+ 	validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log);
+ 
++	/* Restore capabilities because the kernel will silently ignore requests
++	 * for program info (such as xlated program text) if we are not
++	 * bpf-capable. Also, for some reason test_verifier executes programs
++	 * with all capabilities restored. Do the same here.
++	 */
++	if (restore_capabilities(&caps))
++		goto tobj_cleanup;
++
+ 	if (subspec->expect_xlated.cnt) {
+ 		err = get_xlated_program_text(bpf_program__fd(tprog),
+ 					      tester->log_buf, tester->log_buf_sz);
+@@ -995,12 +1003,6 @@ void run_subtest(struct test_loader *tester,
+ 	}
+ 
+ 	if (should_do_test_run(spec, subspec)) {
+-		/* For some reason test_verifier executes programs
+-		 * with all capabilities restored. Do the same here.
+-		 */
+-		if (restore_capabilities(&caps))
+-			goto tobj_cleanup;
+-
+ 		/* Do bpf_map__attach_struct_ops() for each struct_ops map.
+ 		 * This should trigger bpf_struct_ops->reg callback on kernel side.
+ 		 */
+diff --git a/tools/testing/selftests/cpufreq/cpufreq.sh b/tools/testing/selftests/cpufreq/cpufreq.sh
+index e350c521b46750..3aad9db921b533 100755
+--- a/tools/testing/selftests/cpufreq/cpufreq.sh
++++ b/tools/testing/selftests/cpufreq/cpufreq.sh
+@@ -244,9 +244,10 @@ do_suspend()
+ 					printf "Failed to suspend using RTC wake alarm\n"
+ 					return 1
+ 				fi
++			else
++				echo $filename > $SYSFS/power/state
+ 			fi
+ 
+-			echo $filename > $SYSFS/power/state
+ 			printf "Came out of $1\n"
+ 
+ 			printf "Do basic tests after finishing $1 to verify cpufreq state\n\n"
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 8c3a73461475ba..60c84d935a2b0a 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -1618,14 +1618,8 @@ void teardown_trace_fixture(struct __test_metadata *_metadata,
+ {
+ 	if (tracer) {
+ 		int status;
+-		/*
+-		 * Extract the exit code from the other process and
+-		 * adopt it for ourselves in case its asserts failed.
+-		 */
+ 		ASSERT_EQ(0, kill(tracer, SIGUSR1));
+ 		ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
+-		if (WEXITSTATUS(status))
+-			_metadata->exit_code = KSFT_FAIL;
+ 	}
+ }
+ 
+@@ -3155,12 +3149,15 @@ TEST(syscall_restart)
+ 	ret = get_syscall(_metadata, child_pid);
+ #if defined(__arm__)
+ 	/*
+-	 * FIXME:
+ 	 * - native ARM registers do NOT expose true syscall.
+ 	 * - compat ARM registers on ARM64 DO expose true syscall.
++	 * - values of utsbuf.machine include 'armv8l' or 'armb8b'
++	 *   for ARM64 running in compat mode.
+ 	 */
+ 	ASSERT_EQ(0, uname(&utsbuf));
+-	if (strncmp(utsbuf.machine, "arm", 3) == 0) {
++	if ((strncmp(utsbuf.machine, "arm", 3) == 0) &&
++	    (strncmp(utsbuf.machine, "armv8l", 6) != 0) &&
++	    (strncmp(utsbuf.machine, "armv8b", 6) != 0)) {
+ 		EXPECT_EQ(__NR_nanosleep, ret);
+ 	} else
+ #endif


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-06-10 12:15 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-06-10 12:15 UTC (permalink / raw
  To: gentoo-commits

commit:     16a8b4c8eb0cf791c8b7feee1cd82c642d64ff42
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jun 10 12:15:07 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jun 10 12:15:07 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=16a8b4c8

Linux patch 6.12.33

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |   4 +
 1032_linux-6.12.33.patch | 880 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 884 insertions(+)

diff --git a/0000_README b/0000_README
index 96a76d7e..1f792eae 100644
--- a/0000_README
+++ b/0000_README
@@ -171,6 +171,10 @@ Patch:  1031_linux-6.12.32.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.32
 
+Patch:  1032_linux-6.12.33.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.33
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1032_linux-6.12.33.patch b/1032_linux-6.12.33.patch
new file mode 100644
index 00000000..7c9e2d76
--- /dev/null
+++ b/1032_linux-6.12.33.patch
@@ -0,0 +1,880 @@
+diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
+index dc3a3f709feaa6..bac4d0b51d8a1b 100644
+--- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
++++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
+@@ -58,8 +58,7 @@ properties:
+   fsl,phy-tx-vboost-level-microvolt:
+     description:
+       Adjust the boosted transmit launch pk-pk differential amplitude
+-    minimum: 880
+-    maximum: 1120
++    enum: [844, 1008, 1156]
+ 
+   fsl,phy-comp-dis-tune-percent:
+     description:
+diff --git a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
+index e44e88d993d0bf..e802e9ac975b81 100644
+--- a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
++++ b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
+@@ -14,9 +14,22 @@ allOf:
+ 
+ properties:
+   compatible:
+-    enum:
+-      - usb4b4,6504
+-      - usb4b4,6506
++    oneOf:
++      - enum:
++          - usb4b4,6504
++          - usb4b4,6506
++      - items:
++          - enum:
++              - usb4b4,6500
++              - usb4b4,6508
++          - const: usb4b4,6504
++      - items:
++          - enum:
++              - usb4b4,6502
++              - usb4b4,6503
++              - usb4b4,6507
++              - usb4b4,650a
++          - const: usb4b4,6506
+ 
+   reg: true
+ 
+diff --git a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
+index 8d8b53e96bcfee..ccb4b153e6f2dd 100644
+--- a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
++++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
+@@ -12,11 +12,14 @@ ACPI in general allows referring to device objects in the tree only.
+ Hierarchical data extension nodes may not be referred to directly, hence this
+ document defines a scheme to implement such references.
+ 
+-A reference consist of the device object name followed by one or more
+-hierarchical data extension [dsd-guide] keys. Specifically, the hierarchical
+-data extension node which is referred to by the key shall lie directly under
+-the parent object i.e. either the device object or another hierarchical data
+-extension node.
++A reference to a _DSD hierarchical data node is a string consisting of a
++device object reference followed by a dot (".") and a relative path to a data
++node object. Do not use non-string references as this will produce a copy of
++the hierarchical data node, not a reference!
++
++The hierarchical data extension node which is referred to shall be located
++directly under its parent object i.e. either the device object or another
++hierarchical data extension node [dsd-guide].
+ 
+ The keys in the hierarchical data nodes shall consist of the name of the node,
+ "@" character and the number of the node in hexadecimal notation (without pre-
+@@ -33,11 +36,9 @@ extension key.
+ Example
+ =======
+ 
+-In the ASL snippet below, the "reference" _DSD property contains a
+-device object reference to DEV0 and under that device object, a
+-hierarchical data extension key "node@1" referring to the NOD1 object
+-and lastly, a hierarchical data extension key "anothernode" referring to
+-the ANOD object which is also the final target node of the reference.
++In the ASL snippet below, the "reference" _DSD property contains a string
++reference to a hierarchical data extension node ANOD under DEV0 under the parent
++of DEV1. ANOD is also the final target node of the reference.
+ ::
+ 
+ 	Device (DEV0)
+@@ -76,10 +77,7 @@ the ANOD object which is also the final target node of the reference.
+ 	    Name (_DSD, Package () {
+ 		ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ 		Package () {
+-		    Package () {
+-			"reference", Package () {
+-			    ^DEV0, "node@1", "anothernode"
+-			}
++		    Package () { "reference", "^DEV0.ANOD" }
+ 		    },
+ 		}
+ 	    })
+diff --git a/Documentation/firmware-guide/acpi/dsd/graph.rst b/Documentation/firmware-guide/acpi/dsd/graph.rst
+index b9dbfc73ed25b6..d6ae5ffa748ca4 100644
+--- a/Documentation/firmware-guide/acpi/dsd/graph.rst
++++ b/Documentation/firmware-guide/acpi/dsd/graph.rst
+@@ -66,12 +66,9 @@ of that port shall be zero. Similarly, if a port may only have a single
+ endpoint, the number of that endpoint shall be zero.
+ 
+ The endpoint reference uses property extension with "remote-endpoint" property
+-name followed by a reference in the same package. Such references consist of
+-the remote device reference, the first package entry of the port data extension
+-reference under the device and finally the first package entry of the endpoint
+-data extension reference under the port. Individual references thus appear as::
++name followed by a string reference in the same package. [data-node-ref]::
+ 
+-    Package() { device, "port@X", "endpoint@Y" }
++    "device.datanode"
+ 
+ In the above example, "X" is the number of the port and "Y" is the number of
+ the endpoint.
+@@ -109,7 +106,7 @@ A simple example of this is show below::
+ 		ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ 		Package () {
+ 		    Package () { "reg", 0 },
+-		    Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, "port@4", "endpoint@0" } },
++		    Package () { "remote-endpoint", "\\_SB.PCI0.ISP.EP40" },
+ 		}
+ 	    })
+ 	}
+@@ -141,7 +138,7 @@ A simple example of this is show below::
+ 		ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ 		Package () {
+ 		    Package () { "reg", 0 },
+-		    Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, "port@0", "endpoint@0" } },
++		    Package () { "remote-endpoint", "\\_SB.PCI0.I2C2.CAM0.EP00" },
+ 		}
+ 	    })
+ 	}
+diff --git a/Documentation/firmware-guide/acpi/dsd/leds.rst b/Documentation/firmware-guide/acpi/dsd/leds.rst
+index 93db592c93c712..a97cd07d49be38 100644
+--- a/Documentation/firmware-guide/acpi/dsd/leds.rst
++++ b/Documentation/firmware-guide/acpi/dsd/leds.rst
+@@ -15,11 +15,6 @@ Referring to LEDs in Device tree is documented in [video-interfaces], in
+ "flash-leds" property documentation. In short, LEDs are directly referred to by
+ using phandles.
+ 
+-While Device tree allows referring to any node in the tree [devicetree], in
+-ACPI references are limited to device nodes only [acpi]. For this reason using
+-the same mechanism on ACPI is not possible. A mechanism to refer to non-device
+-ACPI nodes is documented in [data-node-ref].
+-
+ ACPI allows (as does DT) using integer arguments after the reference. A
+ combination of the LED driver device reference and an integer argument,
+ referring to the "reg" property of the relevant LED, is used to identify
+@@ -74,7 +69,7 @@ omitted. ::
+ 			Package () {
+ 				Package () {
+ 					"flash-leds",
+-					Package () { ^LED, "led@0", ^LED, "led@1" },
++					Package () { "^LED.LED0", "^LED.LED1" },
+ 				}
+ 			}
+ 		})
+diff --git a/Makefile b/Makefile
+index 1e6a6c66403f1b..c53dd3520193a9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 32
++SUBLEVEL = 33
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/block/bio.c b/block/bio.c
+index 20c74696bf23b1..094a5adf79d23b 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1156,9 +1156,10 @@ EXPORT_SYMBOL(bio_add_page);
+ void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
+ 			  size_t off)
+ {
++	unsigned long nr = off / PAGE_SIZE;
++
+ 	WARN_ON_ONCE(len > UINT_MAX);
+-	WARN_ON_ONCE(off > UINT_MAX);
+-	__bio_add_page(bio, &folio->page, len, off);
++	__bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
+ }
+ EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
+ 
+@@ -1179,9 +1180,11 @@ EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
+ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
+ 		   size_t off)
+ {
+-	if (len > UINT_MAX || off > UINT_MAX)
++	unsigned long nr = off / PAGE_SIZE;
++
++	if (len > UINT_MAX)
+ 		return false;
+-	return bio_add_page(bio, &folio->page, len, off) > 0;
++	return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
+ }
+ EXPORT_SYMBOL(bio_add_folio);
+ 
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 168d03d5aa1d07..67d56a944d5495 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -709,6 +709,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index a5707a85e72556..1fe6a3bd4e36b7 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -23,9 +23,10 @@
+ #define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
+ #define DRIVER_DATE "20230117"
+ 
+-#define PCI_DEVICE_ID_MTL   0x7d1d
+-#define PCI_DEVICE_ID_ARL   0xad1d
+-#define PCI_DEVICE_ID_LNL   0x643e
++#define PCI_DEVICE_ID_MTL	0x7d1d
++#define PCI_DEVICE_ID_ARL	0xad1d
++#define PCI_DEVICE_ID_LNL	0x643e
++#define PCI_DEVICE_ID_PTL_P	0xb03e
+ 
+ #define IVPU_HW_IP_37XX 37
+ #define IVPU_HW_IP_40XX 40
+@@ -227,6 +228,8 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
+ 		return IVPU_HW_IP_37XX;
+ 	case PCI_DEVICE_ID_LNL:
+ 		return IVPU_HW_IP_40XX;
++	case PCI_DEVICE_ID_PTL_P:
++		return IVPU_HW_IP_50XX;
+ 	default:
+ 		dump_stack();
+ 		ivpu_err(vdev, "Unknown NPU IP generation\n");
+@@ -241,6 +244,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
+ 	case PCI_DEVICE_ID_ARL:
+ 		return IVPU_HW_BTRS_MTL;
+ 	case PCI_DEVICE_ID_LNL:
++	case PCI_DEVICE_ID_PTL_P:
+ 		return IVPU_HW_BTRS_LNL;
+ 	default:
+ 		dump_stack();
+diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
+index d12188730ac7fa..83e4995540a6ee 100644
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -57,11 +57,14 @@ static struct {
+ 	{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
+ 	{ IVPU_HW_IP_40XX, "vpu_40xx.bin" },
+ 	{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
++	{ IVPU_HW_IP_50XX, "vpu_50xx.bin" },
++	{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
+ };
+ 
+ /* Production fw_names from the table above */
+ MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
+ MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
++MODULE_FIRMWARE("intel/vpu/vpu_50xx_v0.0.bin");
+ 
+ static int ivpu_fw_request(struct ivpu_device *vdev)
+ {
+diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+index d0b795b344c7f7..fc0ee8d637f968 100644
+--- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
++++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+@@ -115,6 +115,8 @@
+ 
+ #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY			0x00030068u
+ #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST_DLY_MASK	GENMASK(7, 0)
++#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST1_DLY_MASK	GENMASK(15, 8)
++#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST2_DLY_MASK	GENMASK(23, 16)
+ 
+ #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY			0x0003006cu
+ #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK	GENMASK(7, 0)
+diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
+index 60b33fc59d96e3..bd2582a8c80f38 100644
+--- a/drivers/accel/ivpu/ivpu_hw_ip.c
++++ b/drivers/accel/ivpu/ivpu_hw_ip.c
+@@ -8,15 +8,12 @@
+ #include "ivpu_hw.h"
+ #include "ivpu_hw_37xx_reg.h"
+ #include "ivpu_hw_40xx_reg.h"
++#include "ivpu_hw_btrs.h"
+ #include "ivpu_hw_ip.h"
+ #include "ivpu_hw_reg_io.h"
+ #include "ivpu_mmu.h"
+ #include "ivpu_pm.h"
+ 
+-#define PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT 0
+-#define PWR_ISLAND_EN_POST_DLY_FREQ_HIGH    18
+-#define PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT  3
+-#define PWR_ISLAND_STATUS_DLY_FREQ_HIGH	    46
+ #define PWR_ISLAND_STATUS_TIMEOUT_US        (5 * USEC_PER_MSEC)
+ 
+ #define TIM_SAFE_ENABLE		            0xf1d0dead
+@@ -268,20 +265,15 @@ void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
+ 		idle_gen_drive_40xx(vdev, false);
+ }
+ 
+-static void pwr_island_delay_set_50xx(struct ivpu_device *vdev)
++static void
++pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
+ {
+-	u32 val, post, status;
+-
+-	if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) {
+-		post = PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT;
+-		status = PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT;
+-	} else {
+-		post = PWR_ISLAND_EN_POST_DLY_FREQ_HIGH;
+-		status = PWR_ISLAND_STATUS_DLY_FREQ_HIGH;
+-	}
++	u32 val;
+ 
+ 	val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
+ 	val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
++	val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
++	val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
+ 	REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
+ 
+ 	val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
+@@ -686,13 +678,36 @@ static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
+ 	REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
+ }
+ 
++static void pwr_island_delay_set(struct ivpu_device *vdev)
++{
++	bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
++	u32 post, post1, post2, status;
++
++	if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
++		return;
++
++	switch (ivpu_device_id(vdev)) {
++	case PCI_DEVICE_ID_PTL_P:
++		post = high ? 18 : 0;
++		post1 = 0;
++		post2 = 0;
++		status = high ? 46 : 3;
++		break;
++
++	default:
++		dump_stack();
++		ivpu_err(vdev, "Unknown device ID\n");
++		return;
++	}
++
++	pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
++}
++
+ int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
+ {
+ 	int ret;
+ 
+-	if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_50XX)
+-		pwr_island_delay_set_50xx(vdev);
+-
++	pwr_island_delay_set(vdev);
+ 	pwr_island_enable(vdev);
+ 
+ 	ret = wait_for_pwr_island_status(vdev, 0x1);
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 1837622ea625a8..025b9a07c08751 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2385,14 +2385,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 
+ 		qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ 					       GPIOD_OUT_LOW);
+-		if (IS_ERR(qcadev->bt_en) &&
+-		    (data->soc_type == QCA_WCN6750 ||
+-		     data->soc_type == QCA_WCN6855)) {
+-			dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
+-			return PTR_ERR(qcadev->bt_en);
+-		}
++		if (IS_ERR(qcadev->bt_en))
++			return dev_err_probe(&serdev->dev,
++					     PTR_ERR(qcadev->bt_en),
++					     "failed to acquire BT_EN gpio\n");
+ 
+-		if (!qcadev->bt_en)
++		if (!qcadev->bt_en &&
++		    (data->soc_type == QCA_WCN6750 ||
++		     data->soc_type == QCA_WCN6855))
+ 			power_ctrl_enabled = false;
+ 
+ 		qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 47e910c22a80bd..0f1679817682f0 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -663,7 +663,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+ 	nominal_perf = perf_caps.nominal_perf;
+ 
+ 	if (nominal_freq)
+-		*nominal_freq = perf_caps.nominal_freq;
++		*nominal_freq = perf_caps.nominal_freq * 1000;
+ 
+ 	if (!highest_perf || !nominal_perf) {
+ 		pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
+diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
+index 4e5b6f9a56d1b2..7b8fcfa55038bc 100644
+--- a/drivers/cpufreq/tegra186-cpufreq.c
++++ b/drivers/cpufreq/tegra186-cpufreq.c
+@@ -73,18 +73,11 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
+ {
+ 	struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ 	unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+-	u32 cpu;
+ 
+ 	policy->freq_table = data->clusters[cluster].table;
+ 	policy->cpuinfo.transition_latency = 300 * 1000;
+ 	policy->driver_data = NULL;
+ 
+-	/* set same policy for all cpus in a cluster */
+-	for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+-		if (data->cpus[cpu].bpmp_cluster_id == cluster)
+-			cpumask_set_cpu(cpu, policy->cpus);
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 5f9452b22596a3..084d9ed325af63 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -668,21 +668,15 @@ static void dm_crtc_high_irq(void *interrupt_params)
+ 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ 
+ 	if (acrtc->dm_irq_params.stream &&
+-		acrtc->dm_irq_params.vrr_params.supported) {
+-		bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+-		bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+-		bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+-
++	    acrtc->dm_irq_params.vrr_params.supported &&
++	    acrtc->dm_irq_params.freesync_config.state ==
++		    VRR_STATE_ACTIVE_VARIABLE) {
+ 		mod_freesync_handle_v_update(adev->dm.freesync_module,
+ 					     acrtc->dm_irq_params.stream,
+ 					     &acrtc->dm_irq_params.vrr_params);
+ 
+-		/* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+-		if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+-			dc_stream_adjust_vmin_vmax(adev->dm.dc,
+-					acrtc->dm_irq_params.stream,
+-					&acrtc->dm_irq_params.vrr_params.adjust);
+-		}
++		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
++					   &acrtc->dm_irq_params.vrr_params.adjust);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 62650a2f00ccc1..a392e060ca2f42 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -805,6 +805,15 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
+ 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
+ 	pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
+ 
++	/* Disable L0s/L1 before updating L1SS config */
++	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
++	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
++		pcie_capability_write_word(child, PCI_EXP_LNKCTL,
++					   child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
++		pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
++					   parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
++	}
++
+ 	/*
+ 	 * Setup L0s state
+ 	 *
+@@ -829,6 +838,13 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
+ 
+ 	aspm_l1ss_init(link);
+ 
++	/* Restore L0s/L1 if they were enabled */
++	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
++	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
++		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl);
++		pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl);
++	}
++
+ 	/* Save default state */
+ 	link->aspm_default = link->aspm_enabled;
+ 
+@@ -845,25 +861,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
+ 	}
+ }
+ 
+-/* Configure the ASPM L1 substates */
++/* Configure the ASPM L1 substates. Caller must disable L1 first. */
+ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
+ {
+-	u32 val, enable_req;
++	u32 val;
+ 	struct pci_dev *child = link->downstream, *parent = link->pdev;
+ 
+-	enable_req = (link->aspm_enabled ^ state) & state;
++	val = 0;
++	if (state & PCIE_LINK_STATE_L1_1)
++		val |= PCI_L1SS_CTL1_ASPM_L1_1;
++	if (state & PCIE_LINK_STATE_L1_2)
++		val |= PCI_L1SS_CTL1_ASPM_L1_2;
++	if (state & PCIE_LINK_STATE_L1_1_PCIPM)
++		val |= PCI_L1SS_CTL1_PCIPM_L1_1;
++	if (state & PCIE_LINK_STATE_L1_2_PCIPM)
++		val |= PCI_L1SS_CTL1_PCIPM_L1_2;
+ 
+ 	/*
+-	 * Here are the rules specified in the PCIe spec for enabling L1SS:
+-	 * - When enabling L1.x, enable bit at parent first, then at child
+-	 * - When disabling L1.x, disable bit at child first, then at parent
+-	 * - When enabling ASPM L1.x, need to disable L1
+-	 *   (at child followed by parent).
+-	 * - The ASPM/PCIPM L1.2 must be disabled while programming timing
++	 * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates:
++	 * - Clear L1.x enable bits at child first, then at parent
++	 * - Set L1.x enable bits at parent first, then at child
++	 * - ASPM/PCIPM L1.2 must be disabled while programming timing
+ 	 *   parameters
+-	 *
+-	 * To keep it simple, disable all L1SS bits first, and later enable
+-	 * what is needed.
+ 	 */
+ 
+ 	/* Disable all L1 substates */
+@@ -871,26 +890,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
+ 				       PCI_L1SS_CTL1_L1SS_MASK, 0);
+ 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ 				       PCI_L1SS_CTL1_L1SS_MASK, 0);
+-	/*
+-	 * If needed, disable L1, and it gets enabled later
+-	 * in pcie_config_aspm_link().
+-	 */
+-	if (enable_req & (PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2)) {
+-		pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
+-					   PCI_EXP_LNKCTL_ASPM_L1);
+-		pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+-					   PCI_EXP_LNKCTL_ASPM_L1);
+-	}
+-
+-	val = 0;
+-	if (state & PCIE_LINK_STATE_L1_1)
+-		val |= PCI_L1SS_CTL1_ASPM_L1_1;
+-	if (state & PCIE_LINK_STATE_L1_2)
+-		val |= PCI_L1SS_CTL1_ASPM_L1_2;
+-	if (state & PCIE_LINK_STATE_L1_1_PCIPM)
+-		val |= PCI_L1SS_CTL1_PCIPM_L1_1;
+-	if (state & PCIE_LINK_STATE_L1_2_PCIPM)
+-		val |= PCI_L1SS_CTL1_PCIPM_L1_2;
+ 
+ 	/* Enable what we need to enable */
+ 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+@@ -937,21 +936,30 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
+ 		dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
+ 	}
+ 
++	/*
++	 * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable
++	 * bits for ASPM L1 PM Substates must be done while ASPM L1 is
++	 * disabled. Disable L1 here and apply new configuration after L1SS
++	 * configuration has been completed.
++	 *
++	 * Per sec 7.5.3.7, when disabling ASPM L1, software must disable
++	 * it in the Downstream component prior to disabling it in the
++	 * Upstream component, and ASPM L1 must be enabled in the Upstream
++	 * component prior to enabling it in the Downstream component.
++	 *
++	 * Sec 7.5.3.7 also recommends programming the same ASPM Control
++	 * value for all functions of a multi-function device.
++	 */
++	list_for_each_entry(child, &linkbus->devices, bus_list)
++		pcie_config_aspm_dev(child, 0);
++	pcie_config_aspm_dev(parent, 0);
++
+ 	if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
+ 		pcie_config_aspm_l1ss(link, state);
+ 
+-	/*
+-	 * Spec 2.0 suggests all functions should be configured the
+-	 * same setting for ASPM. Enabling ASPM L1 should be done in
+-	 * upstream component first and then downstream, and vice
+-	 * versa for disabling ASPM L1. Spec doesn't mention L0S.
+-	 */
+-	if (state & PCIE_LINK_STATE_L1)
+-		pcie_config_aspm_dev(parent, upstream);
++	pcie_config_aspm_dev(parent, upstream);
+ 	list_for_each_entry(child, &linkbus->devices, bus_list)
+ 		pcie_config_aspm_dev(child, dwstream);
+-	if (!(state & PCIE_LINK_STATE_L1))
+-		pcie_config_aspm_dev(parent, upstream);
+ 
+ 	link->aspm_enabled = state;
+ 
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+index 4c4ada06423d72..4ce11c74fec1fc 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+@@ -417,20 +417,22 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
+ 					     unsigned int offset, int value)
+ {
+ 	struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
+-	unsigned int reg = OUTPUT_EN;
++	unsigned int en_offset = offset;
++	unsigned int reg = OUTPUT_VAL;
+ 	unsigned int mask, val, ret;
+ 
+ 	armada_37xx_update_reg(&reg, &offset);
+ 	mask = BIT(offset);
++	val = value ? mask : 0;
+ 
+-	ret = regmap_update_bits(info->regmap, reg, mask, mask);
+-
++	ret = regmap_update_bits(info->regmap, reg, mask, val);
+ 	if (ret)
+ 		return ret;
+ 
+-	reg = OUTPUT_VAL;
+-	val = value ? mask : 0;
+-	regmap_update_bits(info->regmap, reg, mask, val);
++	reg = OUTPUT_EN;
++	armada_37xx_update_reg(&reg, &en_offset);
++
++	regmap_update_bits(info->regmap, reg, mask, mask);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
+index e31fa0ad127e95..a0afdeaac270f0 100644
+--- a/drivers/rtc/class.c
++++ b/drivers/rtc/class.c
+@@ -327,7 +327,7 @@ static void rtc_device_get_offset(struct rtc_device *rtc)
+ 	 *
+ 	 * Otherwise the offset seconds should be 0.
+ 	 */
+-	if (rtc->start_secs > rtc->range_max ||
++	if ((rtc->start_secs >= 0 && rtc->start_secs > rtc->range_max) ||
+ 	    rtc->start_secs + range_secs - 1 < rtc->range_min)
+ 		rtc->offset_secs = rtc->start_secs - rtc->range_min;
+ 	else if (rtc->start_secs > rtc->range_min)
+diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c
+index fe361652727a3f..13b5b1f2046510 100644
+--- a/drivers/rtc/lib.c
++++ b/drivers/rtc/lib.c
+@@ -46,24 +46,38 @@ EXPORT_SYMBOL(rtc_year_days);
+  * rtc_time64_to_tm - converts time64_t to rtc_time.
+  *
+  * @time:	The number of seconds since 01-01-1970 00:00:00.
+- *		(Must be positive.)
++ *		Works for values since at least 1900
+  * @tm:		Pointer to the struct rtc_time.
+  */
+ void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
+ {
+-	unsigned int secs;
+-	int days;
++	int days, secs;
+ 
+ 	u64 u64tmp;
+ 	u32 u32tmp, udays, century, day_of_century, year_of_century, year,
+ 		day_of_year, month, day;
+ 	bool is_Jan_or_Feb, is_leap_year;
+ 
+-	/* time must be positive */
++	/*
++	 * Get days and seconds while preserving the sign to
++	 * handle negative time values (dates before 1970-01-01)
++	 */
+ 	days = div_s64_rem(time, 86400, &secs);
+ 
++	/*
++	 * We need 0 <= secs < 86400 which isn't given for negative
++	 * values of time. Fixup accordingly.
++	 */
++	if (secs < 0) {
++		days -= 1;
++		secs += 86400;
++	}
++
+ 	/* day of the week, 1970-01-01 was a Thursday */
+ 	tm->tm_wday = (days + 4) % 7;
++	/* Ensure tm_wday is always positive */
++	if (tm->tm_wday < 0)
++		tm->tm_wday += 7;
+ 
+ 	/*
+ 	 * The following algorithm is, basically, Proposition 6.3 of Neri
+@@ -93,7 +107,7 @@ void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
+ 	 * thus, is slightly different from [1].
+ 	 */
+ 
+-	udays		= ((u32) days) + 719468;
++	udays		= days + 719468;
+ 
+ 	u32tmp		= 4 * udays + 3;
+ 	century		= u32tmp / 146097;
+diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
+index 4bdb2d45e0bffc..58ab3d86bc25ed 100644
+--- a/drivers/thunderbolt/ctl.c
++++ b/drivers/thunderbolt/ctl.c
+@@ -148,6 +148,11 @@ static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
+ 	struct tb_ctl *ctl = req->ctl;
+ 
+ 	mutex_lock(&ctl->request_queue_lock);
++	if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) {
++		mutex_unlock(&ctl->request_queue_lock);
++		return;
++	}
++
+ 	list_del(&req->list);
+ 	clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+ 	if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
+index ce0fef7e2c665c..be2f130696b3a0 100644
+--- a/drivers/tty/serial/jsm/jsm_tty.c
++++ b/drivers/tty/serial/jsm/jsm_tty.c
+@@ -451,6 +451,7 @@ int jsm_uart_port_init(struct jsm_board *brd)
+ 		if (!brd->channels[i])
+ 			continue;
+ 
++		brd->channels[i]->uart_port.dev = &brd->pci_dev->dev;
+ 		brd->channels[i]->uart_port.irq = brd->irq;
+ 		brd->channels[i]->uart_port.uartclk = 14745600;
+ 		brd->channels[i]->uart_port.type = PORT_JSM;
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 740d2d2b19fbe0..66f3d9324ba2f3 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -483,6 +483,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
+ 	u8 tag;
+ 	int rv;
+ 	long wait_rv;
++	unsigned long expire;
+ 
+ 	dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
+ 		data->iin_ep_present);
+@@ -512,10 +513,11 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
+ 	}
+ 
+ 	if (data->iin_ep_present) {
++		expire = msecs_to_jiffies(file_data->timeout);
+ 		wait_rv = wait_event_interruptible_timeout(
+ 			data->waitq,
+ 			atomic_read(&data->iin_data_valid) != 0,
+-			file_data->timeout);
++			expire);
+ 		if (wait_rv < 0) {
+ 			dev_dbg(dev, "wait interrupted %ld\n", wait_rv);
+ 			rv = wait_rv;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 4903c733d37ae7..c979ecd0169a2d 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -372,6 +372,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* SanDisk Corp. SanDisk 3.2Gen1 */
+ 	{ USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++	/* SanDisk Extreme 55AE */
++	{ USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Realforce 87U Keyboard */
+ 	{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index ad41363e3cea5a..9708c3d40f078b 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -457,6 +457,8 @@ static int pl2303_detect_type(struct usb_serial *serial)
+ 		case 0x605:
+ 		case 0x700:	/* GR */
+ 		case 0x705:
++		case 0x905:	/* GT-2AB */
++		case 0x1005:	/* GC-Q20 */
+ 			return TYPE_HXN;
+ 		}
+ 		break;
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index d460d71b425783..1477e31d776327 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+ 
++/* Reported-by: Zhihong Zhou <zhouzhihong@greatwall.com.cn> */
++UNUSUAL_DEV(0x0781, 0x55e8, 0x0000, 0x9999,
++		"SanDisk",
++		"",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_IGNORE_UAS),
++
+ /* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+ UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
+ 		"Hiksemi",
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 5863a20b6c5dd3..0568e643e8447a 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -367,7 +367,7 @@ struct ucsi_debugfs_entry {
+ 		u64 low;
+ 		u64 high;
+ 	} response;
+-	u32 status;
++	int status;
+ 	struct dentry *dentry;
+ };
+ 
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index a60db5e795a4c4..1061991434b119 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -777,6 +777,13 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 		!is_inode_flag_set(inode, FI_DIRTY_INODE))
+ 		return 0;
+ 
++	/*
++	 * no need to update inode page, ultimately f2fs_evict_inode() will
++	 * clear dirty status of inode.
++	 */
++	if (f2fs_cp_error(sbi))
++		return -EIO;
++
+ 	if (!f2fs_is_checkpoint_ready(sbi)) {
+ 		f2fs_mark_inode_dirty_sync(inode, true);
+ 		return -ENOSPC;
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 51b2b8c5c749c5..0c004dd5595b91 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -562,13 +562,16 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ 			unsigned int node_blocks, unsigned int data_blocks,
+ 			unsigned int dent_blocks)
+ {
+-
+ 	unsigned int segno, left_blocks, blocks;
+ 	int i;
+ 
+ 	/* check current data/node sections in the worst case. */
+ 	for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) {
+ 		segno = CURSEG_I(sbi, i)->segno;
++
++		if (unlikely(segno == NULL_SEGNO))
++			return false;
++
+ 		left_blocks = CAP_BLKS_PER_SEC(sbi) -
+ 				get_ckpt_valid_blocks(sbi, segno, true);
+ 
+@@ -579,6 +582,10 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ 
+ 	/* check current data section for dentry blocks. */
+ 	segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
++
++	if (unlikely(segno == NULL_SEGNO))
++		return false;
++
+ 	left_blocks = CAP_BLKS_PER_SEC(sbi) -
+ 			get_ckpt_valid_blocks(sbi, segno, true);
+ 	if (dent_blocks > left_blocks)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index e773b0adcfc0a2..2dc5cfecb016b1 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6699,7 +6699,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+ 		ret = trace_seq_to_buffer(&iter->seq,
+ 					  page_address(spd.pages[i]),
+ 					  min((size_t)trace_seq_used(&iter->seq),
+-						  PAGE_SIZE));
++						  (size_t)PAGE_SIZE));
+ 		if (ret < 0) {
+ 			__free_page(spd.pages[i]);
+ 			break;


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-06-04 18:10 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-06-04 18:10 UTC (permalink / raw
  To: gentoo-commits

commit:     9d46239a49e1eda39318e3d2aa9c40de3b156ae3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun  4 18:10:06 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun  4 18:10:06 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9d46239a

Linux patch 6.12.32

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1031_linux-6.12.32.patch | 2035 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2039 insertions(+)

diff --git a/0000_README b/0000_README
index b4bbe192..96a76d7e 100644
--- a/0000_README
+++ b/0000_README
@@ -167,6 +167,10 @@ Patch:  1030_linux-6.12.31.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.31
 
+Patch:  1031_linux-6.12.32.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.32
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1031_linux-6.12.32.patch b/1031_linux-6.12.32.patch
new file mode 100644
index 00000000..fd993377
--- /dev/null
+++ b/1031_linux-6.12.32.patch
@@ -0,0 +1,2035 @@
+diff --git a/Makefile b/Makefile
+index 18c2a7cf9e9134..1e6a6c66403f1b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 08a82a5cf66758..81ccd0600c5ab2 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -261,6 +261,8 @@ cryptobam: dma-controller@704000 {
+ 			interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
+ 			#dma-cells = <1>;
+ 			qcom,ee = <1>;
++			qcom,num-ees = <4>;
++			num-channels = <16>;
+ 			qcom,controlled-remotely;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+index 8a21448c0fa845..b28fa598cebb3d 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+@@ -4012,15 +4012,7 @@ compute-cb@1 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <1>;
+ 						iommus = <&apps_smmu 0x2141 0x04a0>,
+-							 <&apps_smmu 0x2161 0x04a0>,
+-							 <&apps_smmu 0x2181 0x0400>,
+-							 <&apps_smmu 0x21c1 0x04a0>,
+-							 <&apps_smmu 0x21e1 0x04a0>,
+-							 <&apps_smmu 0x2541 0x04a0>,
+-							 <&apps_smmu 0x2561 0x04a0>,
+-							 <&apps_smmu 0x2581 0x0400>,
+-							 <&apps_smmu 0x25c1 0x04a0>,
+-							 <&apps_smmu 0x25e1 0x04a0>;
++							 <&apps_smmu 0x2181 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4028,15 +4020,7 @@ compute-cb@2 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <2>;
+ 						iommus = <&apps_smmu 0x2142 0x04a0>,
+-							 <&apps_smmu 0x2162 0x04a0>,
+-							 <&apps_smmu 0x2182 0x0400>,
+-							 <&apps_smmu 0x21c2 0x04a0>,
+-							 <&apps_smmu 0x21e2 0x04a0>,
+-							 <&apps_smmu 0x2542 0x04a0>,
+-							 <&apps_smmu 0x2562 0x04a0>,
+-							 <&apps_smmu 0x2582 0x0400>,
+-							 <&apps_smmu 0x25c2 0x04a0>,
+-							 <&apps_smmu 0x25e2 0x04a0>;
++							 <&apps_smmu 0x2182 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4044,15 +4028,7 @@ compute-cb@3 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <3>;
+ 						iommus = <&apps_smmu 0x2143 0x04a0>,
+-							 <&apps_smmu 0x2163 0x04a0>,
+-							 <&apps_smmu 0x2183 0x0400>,
+-							 <&apps_smmu 0x21c3 0x04a0>,
+-							 <&apps_smmu 0x21e3 0x04a0>,
+-							 <&apps_smmu 0x2543 0x04a0>,
+-							 <&apps_smmu 0x2563 0x04a0>,
+-							 <&apps_smmu 0x2583 0x0400>,
+-							 <&apps_smmu 0x25c3 0x04a0>,
+-							 <&apps_smmu 0x25e3 0x04a0>;
++							 <&apps_smmu 0x2183 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4060,15 +4036,7 @@ compute-cb@4 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <4>;
+ 						iommus = <&apps_smmu 0x2144 0x04a0>,
+-							 <&apps_smmu 0x2164 0x04a0>,
+-							 <&apps_smmu 0x2184 0x0400>,
+-							 <&apps_smmu 0x21c4 0x04a0>,
+-							 <&apps_smmu 0x21e4 0x04a0>,
+-							 <&apps_smmu 0x2544 0x04a0>,
+-							 <&apps_smmu 0x2564 0x04a0>,
+-							 <&apps_smmu 0x2584 0x0400>,
+-							 <&apps_smmu 0x25c4 0x04a0>,
+-							 <&apps_smmu 0x25e4 0x04a0>;
++							 <&apps_smmu 0x2184 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4076,15 +4044,7 @@ compute-cb@5 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <5>;
+ 						iommus = <&apps_smmu 0x2145 0x04a0>,
+-							 <&apps_smmu 0x2165 0x04a0>,
+-							 <&apps_smmu 0x2185 0x0400>,
+-							 <&apps_smmu 0x21c5 0x04a0>,
+-							 <&apps_smmu 0x21e5 0x04a0>,
+-							 <&apps_smmu 0x2545 0x04a0>,
+-							 <&apps_smmu 0x2565 0x04a0>,
+-							 <&apps_smmu 0x2585 0x0400>,
+-							 <&apps_smmu 0x25c5 0x04a0>,
+-							 <&apps_smmu 0x25e5 0x04a0>;
++							 <&apps_smmu 0x2185 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4092,15 +4052,7 @@ compute-cb@6 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <6>;
+ 						iommus = <&apps_smmu 0x2146 0x04a0>,
+-							 <&apps_smmu 0x2166 0x04a0>,
+-							 <&apps_smmu 0x2186 0x0400>,
+-							 <&apps_smmu 0x21c6 0x04a0>,
+-							 <&apps_smmu 0x21e6 0x04a0>,
+-							 <&apps_smmu 0x2546 0x04a0>,
+-							 <&apps_smmu 0x2566 0x04a0>,
+-							 <&apps_smmu 0x2586 0x0400>,
+-							 <&apps_smmu 0x25c6 0x04a0>,
+-							 <&apps_smmu 0x25e6 0x04a0>;
++							 <&apps_smmu 0x2186 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4108,15 +4060,7 @@ compute-cb@7 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <7>;
+ 						iommus = <&apps_smmu 0x2147 0x04a0>,
+-							 <&apps_smmu 0x2167 0x04a0>,
+-							 <&apps_smmu 0x2187 0x0400>,
+-							 <&apps_smmu 0x21c7 0x04a0>,
+-							 <&apps_smmu 0x21e7 0x04a0>,
+-							 <&apps_smmu 0x2547 0x04a0>,
+-							 <&apps_smmu 0x2567 0x04a0>,
+-							 <&apps_smmu 0x2587 0x0400>,
+-							 <&apps_smmu 0x25c7 0x04a0>,
+-							 <&apps_smmu 0x25e7 0x04a0>;
++							 <&apps_smmu 0x2187 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4124,15 +4068,7 @@ compute-cb@8 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <8>;
+ 						iommus = <&apps_smmu 0x2148 0x04a0>,
+-							 <&apps_smmu 0x2168 0x04a0>,
+-							 <&apps_smmu 0x2188 0x0400>,
+-							 <&apps_smmu 0x21c8 0x04a0>,
+-							 <&apps_smmu 0x21e8 0x04a0>,
+-							 <&apps_smmu 0x2548 0x04a0>,
+-							 <&apps_smmu 0x2568 0x04a0>,
+-							 <&apps_smmu 0x2588 0x0400>,
+-							 <&apps_smmu 0x25c8 0x04a0>,
+-							 <&apps_smmu 0x25e8 0x04a0>;
++							 <&apps_smmu 0x2188 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4140,31 +4076,7 @@ compute-cb@9 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <9>;
+ 						iommus = <&apps_smmu 0x2149 0x04a0>,
+-							 <&apps_smmu 0x2169 0x04a0>,
+-							 <&apps_smmu 0x2189 0x0400>,
+-							 <&apps_smmu 0x21c9 0x04a0>,
+-							 <&apps_smmu 0x21e9 0x04a0>,
+-							 <&apps_smmu 0x2549 0x04a0>,
+-							 <&apps_smmu 0x2569 0x04a0>,
+-							 <&apps_smmu 0x2589 0x0400>,
+-							 <&apps_smmu 0x25c9 0x04a0>,
+-							 <&apps_smmu 0x25e9 0x04a0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@10 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <10>;
+-						iommus = <&apps_smmu 0x214a 0x04a0>,
+-							 <&apps_smmu 0x216a 0x04a0>,
+-							 <&apps_smmu 0x218a 0x0400>,
+-							 <&apps_smmu 0x21ca 0x04a0>,
+-							 <&apps_smmu 0x21ea 0x04a0>,
+-							 <&apps_smmu 0x254a 0x04a0>,
+-							 <&apps_smmu 0x256a 0x04a0>,
+-							 <&apps_smmu 0x258a 0x0400>,
+-							 <&apps_smmu 0x25ca 0x04a0>,
+-							 <&apps_smmu 0x25ea 0x04a0>;
++							 <&apps_smmu 0x2189 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4172,15 +4084,7 @@ compute-cb@11 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <11>;
+ 						iommus = <&apps_smmu 0x214b 0x04a0>,
+-							 <&apps_smmu 0x216b 0x04a0>,
+-							 <&apps_smmu 0x218b 0x0400>,
+-							 <&apps_smmu 0x21cb 0x04a0>,
+-							 <&apps_smmu 0x21eb 0x04a0>,
+-							 <&apps_smmu 0x254b 0x04a0>,
+-							 <&apps_smmu 0x256b 0x04a0>,
+-							 <&apps_smmu 0x258b 0x0400>,
+-							 <&apps_smmu 0x25cb 0x04a0>,
+-							 <&apps_smmu 0x25eb 0x04a0>;
++							 <&apps_smmu 0x218b 0x0400>;
+ 						dma-coherent;
+ 					};
+ 				};
+@@ -4240,15 +4144,7 @@ compute-cb@1 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <1>;
+ 						iommus = <&apps_smmu 0x2941 0x04a0>,
+-							 <&apps_smmu 0x2961 0x04a0>,
+-							 <&apps_smmu 0x2981 0x0400>,
+-							 <&apps_smmu 0x29c1 0x04a0>,
+-							 <&apps_smmu 0x29e1 0x04a0>,
+-							 <&apps_smmu 0x2d41 0x04a0>,
+-							 <&apps_smmu 0x2d61 0x04a0>,
+-							 <&apps_smmu 0x2d81 0x0400>,
+-							 <&apps_smmu 0x2dc1 0x04a0>,
+-							 <&apps_smmu 0x2de1 0x04a0>;
++							 <&apps_smmu 0x2981 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4256,15 +4152,7 @@ compute-cb@2 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <2>;
+ 						iommus = <&apps_smmu 0x2942 0x04a0>,
+-							 <&apps_smmu 0x2962 0x04a0>,
+-							 <&apps_smmu 0x2982 0x0400>,
+-							 <&apps_smmu 0x29c2 0x04a0>,
+-							 <&apps_smmu 0x29e2 0x04a0>,
+-							 <&apps_smmu 0x2d42 0x04a0>,
+-							 <&apps_smmu 0x2d62 0x04a0>,
+-							 <&apps_smmu 0x2d82 0x0400>,
+-							 <&apps_smmu 0x2dc2 0x04a0>,
+-							 <&apps_smmu 0x2de2 0x04a0>;
++							 <&apps_smmu 0x2982 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4272,15 +4160,7 @@ compute-cb@3 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <3>;
+ 						iommus = <&apps_smmu 0x2943 0x04a0>,
+-							 <&apps_smmu 0x2963 0x04a0>,
+-							 <&apps_smmu 0x2983 0x0400>,
+-							 <&apps_smmu 0x29c3 0x04a0>,
+-							 <&apps_smmu 0x29e3 0x04a0>,
+-							 <&apps_smmu 0x2d43 0x04a0>,
+-							 <&apps_smmu 0x2d63 0x04a0>,
+-							 <&apps_smmu 0x2d83 0x0400>,
+-							 <&apps_smmu 0x2dc3 0x04a0>,
+-							 <&apps_smmu 0x2de3 0x04a0>;
++							 <&apps_smmu 0x2983 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4288,15 +4168,7 @@ compute-cb@4 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <4>;
+ 						iommus = <&apps_smmu 0x2944 0x04a0>,
+-							 <&apps_smmu 0x2964 0x04a0>,
+-							 <&apps_smmu 0x2984 0x0400>,
+-							 <&apps_smmu 0x29c4 0x04a0>,
+-							 <&apps_smmu 0x29e4 0x04a0>,
+-							 <&apps_smmu 0x2d44 0x04a0>,
+-							 <&apps_smmu 0x2d64 0x04a0>,
+-							 <&apps_smmu 0x2d84 0x0400>,
+-							 <&apps_smmu 0x2dc4 0x04a0>,
+-							 <&apps_smmu 0x2de4 0x04a0>;
++							 <&apps_smmu 0x2984 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4304,15 +4176,7 @@ compute-cb@5 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <5>;
+ 						iommus = <&apps_smmu 0x2945 0x04a0>,
+-							 <&apps_smmu 0x2965 0x04a0>,
+-							 <&apps_smmu 0x2985 0x0400>,
+-							 <&apps_smmu 0x29c5 0x04a0>,
+-							 <&apps_smmu 0x29e5 0x04a0>,
+-							 <&apps_smmu 0x2d45 0x04a0>,
+-							 <&apps_smmu 0x2d65 0x04a0>,
+-							 <&apps_smmu 0x2d85 0x0400>,
+-							 <&apps_smmu 0x2dc5 0x04a0>,
+-							 <&apps_smmu 0x2de5 0x04a0>;
++							 <&apps_smmu 0x2985 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4320,15 +4184,7 @@ compute-cb@6 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <6>;
+ 						iommus = <&apps_smmu 0x2946 0x04a0>,
+-							 <&apps_smmu 0x2966 0x04a0>,
+-							 <&apps_smmu 0x2986 0x0400>,
+-							 <&apps_smmu 0x29c6 0x04a0>,
+-							 <&apps_smmu 0x29e6 0x04a0>,
+-							 <&apps_smmu 0x2d46 0x04a0>,
+-							 <&apps_smmu 0x2d66 0x04a0>,
+-							 <&apps_smmu 0x2d86 0x0400>,
+-							 <&apps_smmu 0x2dc6 0x04a0>,
+-							 <&apps_smmu 0x2de6 0x04a0>;
++							 <&apps_smmu 0x2986 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4336,15 +4192,7 @@ compute-cb@7 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <7>;
+ 						iommus = <&apps_smmu 0x2947 0x04a0>,
+-							 <&apps_smmu 0x2967 0x04a0>,
+-							 <&apps_smmu 0x2987 0x0400>,
+-							 <&apps_smmu 0x29c7 0x04a0>,
+-							 <&apps_smmu 0x29e7 0x04a0>,
+-							 <&apps_smmu 0x2d47 0x04a0>,
+-							 <&apps_smmu 0x2d67 0x04a0>,
+-							 <&apps_smmu 0x2d87 0x0400>,
+-							 <&apps_smmu 0x2dc7 0x04a0>,
+-							 <&apps_smmu 0x2de7 0x04a0>;
++							 <&apps_smmu 0x2987 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4352,15 +4200,7 @@ compute-cb@8 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <8>;
+ 						iommus = <&apps_smmu 0x2948 0x04a0>,
+-							 <&apps_smmu 0x2968 0x04a0>,
+-							 <&apps_smmu 0x2988 0x0400>,
+-							 <&apps_smmu 0x29c8 0x04a0>,
+-							 <&apps_smmu 0x29e8 0x04a0>,
+-							 <&apps_smmu 0x2d48 0x04a0>,
+-							 <&apps_smmu 0x2d68 0x04a0>,
+-							 <&apps_smmu 0x2d88 0x0400>,
+-							 <&apps_smmu 0x2dc8 0x04a0>,
+-							 <&apps_smmu 0x2de8 0x04a0>;
++							 <&apps_smmu 0x2988 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4368,15 +4208,7 @@ compute-cb@9 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <9>;
+ 						iommus = <&apps_smmu 0x2949 0x04a0>,
+-							 <&apps_smmu 0x2969 0x04a0>,
+-							 <&apps_smmu 0x2989 0x0400>,
+-							 <&apps_smmu 0x29c9 0x04a0>,
+-							 <&apps_smmu 0x29e9 0x04a0>,
+-							 <&apps_smmu 0x2d49 0x04a0>,
+-							 <&apps_smmu 0x2d69 0x04a0>,
+-							 <&apps_smmu 0x2d89 0x0400>,
+-							 <&apps_smmu 0x2dc9 0x04a0>,
+-							 <&apps_smmu 0x2de9 0x04a0>;
++							 <&apps_smmu 0x2989 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4384,15 +4216,7 @@ compute-cb@10 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <10>;
+ 						iommus = <&apps_smmu 0x294a 0x04a0>,
+-							 <&apps_smmu 0x296a 0x04a0>,
+-							 <&apps_smmu 0x298a 0x0400>,
+-							 <&apps_smmu 0x29ca 0x04a0>,
+-							 <&apps_smmu 0x29ea 0x04a0>,
+-							 <&apps_smmu 0x2d4a 0x04a0>,
+-							 <&apps_smmu 0x2d6a 0x04a0>,
+-							 <&apps_smmu 0x2d8a 0x0400>,
+-							 <&apps_smmu 0x2dca 0x04a0>,
+-							 <&apps_smmu 0x2dea 0x04a0>;
++							 <&apps_smmu 0x298a 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4400,15 +4224,7 @@ compute-cb@11 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <11>;
+ 						iommus = <&apps_smmu 0x294b 0x04a0>,
+-							 <&apps_smmu 0x296b 0x04a0>,
+-							 <&apps_smmu 0x298b 0x0400>,
+-							 <&apps_smmu 0x29cb 0x04a0>,
+-							 <&apps_smmu 0x29eb 0x04a0>,
+-							 <&apps_smmu 0x2d4b 0x04a0>,
+-							 <&apps_smmu 0x2d6b 0x04a0>,
+-							 <&apps_smmu 0x2d8b 0x0400>,
+-							 <&apps_smmu 0x2dcb 0x04a0>,
+-							 <&apps_smmu 0x2deb 0x04a0>;
++							 <&apps_smmu 0x298b 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4416,15 +4232,7 @@ compute-cb@12 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <12>;
+ 						iommus = <&apps_smmu 0x294c 0x04a0>,
+-							 <&apps_smmu 0x296c 0x04a0>,
+-							 <&apps_smmu 0x298c 0x0400>,
+-							 <&apps_smmu 0x29cc 0x04a0>,
+-							 <&apps_smmu 0x29ec 0x04a0>,
+-							 <&apps_smmu 0x2d4c 0x04a0>,
+-							 <&apps_smmu 0x2d6c 0x04a0>,
+-							 <&apps_smmu 0x2d8c 0x0400>,
+-							 <&apps_smmu 0x2dcc 0x04a0>,
+-							 <&apps_smmu 0x2dec 0x04a0>;
++							 <&apps_smmu 0x298c 0x0400>;
+ 						dma-coherent;
+ 					};
+ 
+@@ -4432,15 +4240,7 @@ compute-cb@13 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <13>;
+ 						iommus = <&apps_smmu 0x294d 0x04a0>,
+-							 <&apps_smmu 0x296d 0x04a0>,
+-							 <&apps_smmu 0x298d 0x0400>,
+-							 <&apps_smmu 0x29Cd 0x04a0>,
+-							 <&apps_smmu 0x29ed 0x04a0>,
+-							 <&apps_smmu 0x2d4d 0x04a0>,
+-							 <&apps_smmu 0x2d6d 0x04a0>,
+-							 <&apps_smmu 0x2d8d 0x0400>,
+-							 <&apps_smmu 0x2dcd 0x04a0>,
+-							 <&apps_smmu 0x2ded 0x04a0>;
++							 <&apps_smmu 0x298d 0x0400>;
+ 						dma-coherent;
+ 					};
+ 				};
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 46adf10e5fe4d6..404473fa491ae0 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -455,7 +455,7 @@ cdsp_secure_heap: memory@80c00000 {
+ 			no-map;
+ 		};
+ 
+-		pil_camera_mem: mmeory@85200000 {
++		pil_camera_mem: memory@85200000 {
+ 			reg = <0x0 0x85200000 0x0 0x500000>;
+ 			no-map;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index d664a88a018efb..58ed68f534e50e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -4553,6 +4553,8 @@ cryptobam: dma-controller@1dc4000 {
+ 			interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ 			#dma-cells = <1>;
+ 			qcom,ee = <0>;
++			qcom,num-ees = <4>;
++			num-channels = <16>;
+ 			qcom,controlled-remotely;
+ 			iommus = <&apps_smmu 0x584 0x11>,
+ 				 <&apps_smmu 0x588 0x0>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+index 9ecf4a7fc3287a..cfdd30009015f6 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+@@ -1952,6 +1952,8 @@ cryptobam: dma-controller@1dc4000 {
+ 			interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ 			#dma-cells = <1>;
+ 			qcom,ee = <0>;
++			qcom,num-ees = <4>;
++			num-channels = <20>;
+ 			qcom,controlled-remotely;
+ 			iommus = <&apps_smmu 0x480 0x0>,
+ 				 <&apps_smmu 0x481 0x0>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+index 416cfb71878a5f..fddf979de38d1b 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+@@ -2495,6 +2495,8 @@ cryptobam: dma-controller@1dc4000 {
+ 				 <&apps_smmu 0x481 0>;
+ 
+ 			qcom,ee = <0>;
++			qcom,num-ees = <4>;
++			num-channels = <20>;
+ 			qcom,controlled-remotely;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+index b2cf080cab5622..ce3fa29de7b4a3 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+@@ -314,8 +314,8 @@ vreg_l1j_0p8: ldo1 {
+ 
+ 		vreg_l2j_1p2: ldo2 {
+ 			regulator-name = "vreg_l2j_1p2";
+-			regulator-min-microvolt = <1200000>;
+-			regulator-max-microvolt = <1200000>;
++			regulator-min-microvolt = <1256000>;
++			regulator-max-microvolt = <1256000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+index e9ed723f90381a..07c2fdfe7ce136 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+@@ -266,6 +266,7 @@ vreg_l12b_1p2: ldo12 {
+ 			regulator-min-microvolt = <1200000>;
+ 			regulator-max-microvolt = <1200000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l14b_3p0: ldo14 {
+@@ -280,8 +281,8 @@ vreg_l15b_1p8: ldo15 {
+ 			regulator-min-microvolt = <1800000>;
+ 			regulator-max-microvolt = <1800000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+-
+ 	};
+ 
+ 	regulators-1 {
+@@ -484,8 +485,8 @@ vreg_l1j_0p8: ldo1 {
+ 
+ 		vreg_l2j_1p2: ldo2 {
+ 			regulator-name = "vreg_l2j_1p2";
+-			regulator-min-microvolt = <1200000>;
+-			regulator-max-microvolt = <1200000>;
++			regulator-min-microvolt = <1256000>;
++			regulator-max-microvolt = <1256000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+index af76aa034d0e17..9062eb6766f2cf 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+@@ -356,6 +356,7 @@ vreg_l12b_1p2: ldo12 {
+ 			regulator-min-microvolt = <1200000>;
+ 			regulator-max-microvolt = <1200000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l13b_3p0: ldo13 {
+@@ -377,6 +378,7 @@ vreg_l15b_1p8: ldo15 {
+ 			regulator-min-microvolt = <1800000>;
+ 			regulator-max-microvolt = <1800000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l16b_2p9: ldo16 {
+@@ -594,8 +596,8 @@ vreg_l1j_0p8: ldo1 {
+ 
+ 		vreg_l2j_1p2: ldo2 {
+ 			regulator-name = "vreg_l2j_1p2";
+-			regulator-min-microvolt = <1200000>;
+-			regulator-max-microvolt = <1200000>;
++			regulator-min-microvolt = <1256000>;
++			regulator-max-microvolt = <1256000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index 91e4fbca19f99c..5a5abd5fa65850 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -6682,15 +6682,19 @@ mem-critical {
+ 		};
+ 
+ 		video-thermal {
+-			polling-delay-passive = <250>;
+-
+ 			thermal-sensors = <&tsens0 12>;
+ 
+ 			trips {
+ 				trip-point0 {
++					temperature = <90000>;
++					hysteresis = <2000>;
++					type = "hot";
++				};
++
++				video-critical {
+ 					temperature = <125000>;
+ 					hysteresis = <1000>;
+-					type = "passive";
++					type = "critical";
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index 60c6814206a1f9..3f3a31eced9707 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -552,8 +552,6 @@ sdhci0: mmc@fa10000 {
+ 		power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 57 5>, <&k3_clks 57 6>;
+ 		clock-names = "clk_ahb", "clk_xin";
+-		assigned-clocks = <&k3_clks 57 6>;
+-		assigned-clock-parents = <&k3_clks 57 8>;
+ 		bus-width = <8>;
+ 		mmc-ddr-1_8v;
+ 		mmc-hs200-1_8v;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+index 56945d29e0150b..45d68a0d1b5931 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+@@ -575,8 +575,6 @@ sdhci0: mmc@fa10000 {
+ 		power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 57 5>, <&k3_clks 57 6>;
+ 		clock-names = "clk_ahb", "clk_xin";
+-		assigned-clocks = <&k3_clks 57 6>;
+-		assigned-clock-parents = <&k3_clks 57 8>;
+ 		bus-width = <8>;
+ 		mmc-hs200-1_8v;
+ 		ti,clkbuf-sel = <0x7>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
+index 9b6f5137910837..77fe2b27cb58d0 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
+@@ -564,8 +564,6 @@ sdhci0: mmc@fa10000 {
+ 		power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 57 1>, <&k3_clks 57 2>;
+ 		clock-names = "clk_ahb", "clk_xin";
+-		assigned-clocks = <&k3_clks 57 2>;
+-		assigned-clock-parents = <&k3_clks 57 4>;
+ 		bus-width = <8>;
+ 		mmc-ddr-1_8v;
+ 		mmc-hs200-1_8v;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso
+index 76ca02127f95ff..dd090813a32d61 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso
++++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso
+@@ -22,7 +22,7 @@ &main_i2c2 {
+ 	#size-cells = <0>;
+ 	status = "okay";
+ 
+-	i2c-switch@71 {
++	i2c-mux@71 {
+ 		compatible = "nxp,pca9543";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -39,7 +39,6 @@ ov5640: camera@10 {
+ 				reg = <0x10>;
+ 
+ 				clocks = <&clk_imx219_fixed>;
+-				clock-names = "xclk";
+ 
+ 				reset-gpios = <&exp1 13 GPIO_ACTIVE_HIGH>;
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso
+index ccc7f5e43184fa..7fc7c95f5cd578 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso
++++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso
+@@ -22,7 +22,7 @@ &main_i2c2 {
+ 	#size-cells = <0>;
+ 	status = "okay";
+ 
+-	i2c-switch@71 {
++	i2c-mux@71 {
+ 		compatible = "nxp,pca9543";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso
+index 4eaf9d757dd0ad..b6bfdfbbdd984a 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso
++++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso
+@@ -22,7 +22,7 @@ &main_i2c2 {
+ 	#size-cells = <0>;
+ 	status = "okay";
+ 
+-	i2c-switch@71 {
++	i2c-mux@71 {
+ 		compatible = "nxp,pca9543";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+index 1f1af7ea233053..0534b53483473c 100644
+--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+@@ -449,6 +449,8 @@ sdhci0: mmc@4f80000 {
+ 		ti,otap-del-sel-mmc-hs = <0x0>;
+ 		ti,otap-del-sel-ddr52 = <0x5>;
+ 		ti,otap-del-sel-hs200 = <0x5>;
++		ti,itap-del-sel-legacy = <0xa>;
++		ti,itap-del-sel-mmc-hs = <0x1>;
+ 		ti,itap-del-sel-ddr52 = <0x0>;
+ 		dma-coherent;
+ 		status = "disabled";
+diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
+index d5ceab79536ca4..b40496097f82d1 100644
+--- a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
+@@ -44,6 +44,17 @@ vusb_main: regulator-vusb-main5v0 {
+ 		regulator-boot-on;
+ 	};
+ 
++	vsys_5v0: regulator-vsys5v0 {
++		/* Output of LM61460 */
++		compatible = "regulator-fixed";
++		regulator-name = "vsys_5v0";
++		regulator-min-microvolt = <5000000>;
++		regulator-max-microvolt = <5000000>;
++		vin-supply = <&vusb_main>;
++		regulator-always-on;
++		regulator-boot-on;
++	};
++
+ 	vsys_3v3: regulator-vsys3v3 {
+ 		/* Output of LM5141 */
+ 		compatible = "regulator-fixed";
+@@ -76,7 +87,7 @@ vdd_sd_dv: regulator-tlv71033 {
+ 		regulator-min-microvolt = <1800000>;
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-boot-on;
+-		vin-supply = <&vsys_3v3>;
++		vin-supply = <&vsys_5v0>;
+ 		gpios = <&main_gpio0 49 GPIO_ACTIVE_HIGH>;
+ 		states = <1800000 0x0>,
+ 			 <3300000 0x1>;
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso b/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso
+index 47bb5480b5b006..4eb3cffab0321d 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso
++++ b/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso
+@@ -19,6 +19,33 @@ clk_imx219_fixed: imx219-xclk {
+ 		#clock-cells = <0>;
+ 		clock-frequency = <24000000>;
+ 	};
++
++	reg_2p8v: regulator-2p8v {
++		compatible = "regulator-fixed";
++		regulator-name = "2P8V";
++		regulator-min-microvolt = <2800000>;
++		regulator-max-microvolt = <2800000>;
++		vin-supply = <&vdd_sd_dv>;
++		regulator-always-on;
++	};
++
++	reg_1p8v: regulator-1p8v {
++		compatible = "regulator-fixed";
++		regulator-name = "1P8V";
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
++		vin-supply = <&vdd_sd_dv>;
++		regulator-always-on;
++	};
++
++	reg_1p2v: regulator-1p2v {
++		compatible = "regulator-fixed";
++		regulator-name = "1P2V";
++		regulator-min-microvolt = <1200000>;
++		regulator-max-microvolt = <1200000>;
++		vin-supply = <&vdd_sd_dv>;
++		regulator-always-on;
++	};
+ };
+ 
+ &csi_mux {
+@@ -34,7 +61,9 @@ imx219_0: imx219-0@10 {
+ 		reg = <0x10>;
+ 
+ 		clocks = <&clk_imx219_fixed>;
+-		clock-names = "xclk";
++		VANA-supply = <&reg_2p8v>;
++		VDIG-supply = <&reg_1p8v>;
++		VDDL-supply = <&reg_1p2v>;
+ 
+ 		port {
+ 			csi2_cam0: endpoint {
+@@ -56,7 +85,9 @@ imx219_1: imx219-1@10 {
+ 		reg = <0x10>;
+ 
+ 		clocks = <&clk_imx219_fixed>;
+-		clock-names = "xclk";
++		VANA-supply = <&reg_2p8v>;
++		VDIG-supply = <&reg_1p8v>;
++		VDDL-supply = <&reg_1p2v>;
+ 
+ 		port {
+ 			csi2_cam1: endpoint {
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+index 6285e8d94ddeb7..c8d7eb1814f060 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+@@ -184,6 +184,17 @@ vsys_3v3: fixedregulator-vsys3v3 {
+ 		regulator-boot-on;
+ 	};
+ 
++	vsys_5v0: fixedregulator-vsys5v0 {
++		/* Output of LM61460 */
++		compatible = "regulator-fixed";
++		regulator-name = "vsys_5v0";
++		regulator-min-microvolt = <5000000>;
++		regulator-max-microvolt = <5000000>;
++		vin-supply = <&vusb_main>;
++		regulator-always-on;
++		regulator-boot-on;
++	};
++
+ 	vdd_mmc1: fixedregulator-sd {
+ 		compatible = "regulator-fixed";
+ 		pinctrl-names = "default";
+@@ -211,6 +222,20 @@ vdd_sd_dv_alt: gpio-regulator-tps659411 {
+ 			 <3300000 0x1>;
+ 	};
+ 
++	vdd_sd_dv: gpio-regulator-TLV71033 {
++		compatible = "regulator-gpio";
++		pinctrl-names = "default";
++		pinctrl-0 = <&vdd_sd_dv_pins_default>;
++		regulator-name = "tlv71033";
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <3300000>;
++		regulator-boot-on;
++		vin-supply = <&vsys_5v0>;
++		gpios = <&main_gpio0 118 GPIO_ACTIVE_HIGH>;
++		states = <1800000 0x0>,
++			 <3300000 0x1>;
++	};
++
+ 	transceiver1: can-phy1 {
+ 		compatible = "ti,tcan1042";
+ 		#phy-cells = <0>;
+@@ -608,6 +633,12 @@ J721E_WKUP_IOPAD(0xd4, PIN_OUTPUT, 7) /* (G26) WKUP_GPIO0_9 */
+ 		>;
+ 	};
+ 
++	vdd_sd_dv_pins_default: vdd-sd-dv-default-pins {
++		pinctrl-single,pins = <
++			J721E_IOPAD(0x1dc, PIN_OUTPUT, 7) /* (Y1) SPI1_CLK.GPIO0_118 */
++		>;
++	};
++
+ 	wkup_uart0_pins_default: wkup-uart0-default-pins {
+ 		pinctrl-single,pins = <
+ 			J721E_WKUP_IOPAD(0xa0, PIN_INPUT, 0) /* (J29) WKUP_UART0_RXD */
+diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+index a00f4a7d20d98f..710f80a14b6472 100644
+--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
++++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+@@ -720,6 +720,10 @@ &serdes_ln_ctrl {
+ 		      <J722S_SERDES1_LANE0_PCIE0_LANE0>;
+ };
+ 
++&serdes_wiz0 {
++	status = "okay";
++};
++
+ &serdes0 {
+ 	status = "okay";
+ 	serdes0_usb_link: phy@0 {
+@@ -731,6 +735,10 @@ serdes0_usb_link: phy@0 {
+ 	};
+ };
+ 
++&serdes_wiz1 {
++	status = "okay";
++};
++
+ &serdes1 {
+ 	status = "okay";
+ 	serdes1_pcie_link: phy@0 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
+index ed6f4ba08afca1..ec8fcf9d16d6ab 100644
+--- a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
+@@ -32,6 +32,8 @@ serdes_wiz0: phy@f000000 {
+ 		assigned-clocks = <&k3_clks 279 1>;
+ 		assigned-clock-parents = <&k3_clks 279 5>;
+ 
++		status = "disabled";
++
+ 		serdes0: serdes@f000000 {
+ 			compatible = "ti,j721e-serdes-10g";
+ 			reg = <0x0f000000 0x00010000>;
+@@ -70,6 +72,8 @@ serdes_wiz1: phy@f010000 {
+ 		assigned-clocks = <&k3_clks 280 1>;
+ 		assigned-clock-parents = <&k3_clks 280 5>;
+ 
++		status = "disabled";
++
+ 		serdes1: serdes@f010000 {
+ 			compatible = "ti,j721e-serdes-10g";
+ 			reg = <0x0f010000 0x00010000>;
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
+index 2bf4547485e1b3..013c0d25d34814 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
+@@ -77,7 +77,7 @@ pcie1_ctrl: pcie1-ctrl@4074 {
+ 
+ 		serdes_ln_ctrl: mux-controller@4080 {
+ 			compatible = "reg-mux";
+-			reg = <0x00004080 0x30>;
++			reg = <0x00004080 0x50>;
+ 			#mux-control-cells = <1>;
+ 			mux-reg-masks = <0x0 0x3>, <0x4 0x3>, /* SERDES0 lane0/1 select */
+ 					<0x8 0x3>, <0xc 0x3>, /* SERDES0 lane2/3 select */
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 00b63bac5effb5..3317d87e209201 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -151,5 +151,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated
+ archclean:
+ 	@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
+ 		-o -name '*.gcov' \) -type f -print | xargs rm -f
++	$(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean
+ 
+ export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING DEV_NULL_PATH
+diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
+index e49a19fea3bdf6..dc882fc9fa9efc 100644
+--- a/drivers/char/tpm/tpm-buf.c
++++ b/drivers/char/tpm/tpm-buf.c
+@@ -201,7 +201,7 @@ static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void
+  */
+ u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset)
+ {
+-	u8 value;
++	u8 value = 0;
+ 
+ 	tpm_buf_read(buf, offset, sizeof(value), &value);
+ 
+@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u8);
+  */
+ u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset)
+ {
+-	u16 value;
++	u16 value = 0;
+ 
+ 	tpm_buf_read(buf, offset, sizeof(value), &value);
+ 
+@@ -235,7 +235,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u16);
+  */
+ u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
+ {
+-	u32 value;
++	u32 value = 0;
+ 
+ 	tpm_buf_read(buf, offset, sizeof(value), &value);
+ 
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 22aa2bab3693c4..19a58c4ecef3f8 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -225,7 +225,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
+ 	struct idxd_wq *wq;
+ 	struct device *dev, *fdev;
+ 	int rc = 0;
+-	struct iommu_sva *sva;
++	struct iommu_sva *sva = NULL;
+ 	unsigned int pasid;
+ 	struct idxd_cdev *idxd_cdev;
+ 
+@@ -322,7 +322,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
+ 	if (device_user_pasid_enabled(idxd))
+ 		idxd_xa_pasid_remove(ctx);
+ failed_get_pasid:
+-	if (device_user_pasid_enabled(idxd))
++	if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva))
+ 		iommu_sva_unbind_device(sva);
+ failed:
+ 	mutex_unlock(&wq->wq_lock);
+diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
+index e89f299f214009..dcecb7a2591176 100644
+--- a/drivers/gpio/gpio-virtuser.c
++++ b/drivers/gpio/gpio-virtuser.c
+@@ -400,10 +400,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file,
+ 	char buf[32], *trimmed;
+ 	int ret, dir, val = 0;
+ 
+-	ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
++	if (count >= sizeof(buf))
++		return -EINVAL;
++
++	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ 	if (ret < 0)
+ 		return ret;
+ 
++	buf[ret] = '\0';
++
+ 	trimmed = strim(buf);
+ 
+ 	if (strcmp(trimmed, "input") == 0) {
+@@ -622,12 +627,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file,
+ 	char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2];
+ 	int ret;
+ 
++	if (count >= sizeof(buf))
++		return -EINVAL;
++
+ 	ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos,
+ 				     user_buf, count);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	buf[strlen(buf) - 1] = '\0';
++	buf[ret] = '\0';
+ 
+ 	ret = gpiod_set_consumer_name(data->ad.desc, buf);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+index 55014c15211674..7e3d506bb79b9f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+@@ -887,7 +887,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
+ }
+ 
+ //TODO : Could be possibly moved to a common helper layer.
+-static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id)
++static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id)
+ {
+ 	int i, j;
+ 
+@@ -895,10 +895,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str
+ 		return false;
+ 
+ 	for (i = 0; i < context->stream_count; i++) {
+-		for (j = 0; j < context->stream_status[i].plane_count; j++) {
+-			if (context->stream_status[i].plane_states[j] == plane) {
+-				*plane_id = (i << 16) | j;
+-				return true;
++		if (context->streams[i]->stream_id == stream_id) {
++			for (j = 0; j < context->stream_status[i].plane_count; j++) {
++				if (context->stream_status[i].plane_states[j] == plane) {
++					*plane_id = (i << 16) | j;
++					return true;
++				}
+ 			}
+ 		}
+ 	}
+@@ -921,14 +923,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
+ 	return location;
+ }
+ 
+-static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
++static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
+ 		const struct dc_plane_state *plane, const struct dc_state *context)
+ {
+ 	unsigned int plane_id;
+ 	int i = 0;
+ 	int location = -1;
+ 
+-	if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) {
++	if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
+ 		ASSERT(false);
+ 		return -1;
+ 	}
+@@ -1013,7 +1015,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
+ 			dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
+ 		} else {
+ 			for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
+-				disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context);
++				disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context);
+ 
+ 				if (disp_cfg_plane_location < 0)
+ 					disp_cfg_plane_location = dml_dispcfg->num_planes++;
+@@ -1024,7 +1026,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
+ 				populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
+ 				dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
+ 
+-				if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
++				if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
+ 					dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
+ 
+ 				/* apply forced pstate policy */
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+index c4e03482ba9ae4..aa28001297675a 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+@@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
+ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
+ {
+ 	struct pipe_ctx *pipes[MAX_PIPES];
++	struct dc_stream_state *streams[MAX_PIPES];
+ 	struct dc_state *state = link->dc->current_state;
+ 	uint8_t count;
+ 	int i;
+@@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
+ 
+ 	link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+ 
++	/* The subsequent call to dc_commit_updates_for_stream for a full update
++	 * will release the current state and swap to a new state. Releasing the
++	 * current state results in the stream pointers in the pipe_ctx structs
++	 * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream.
++	 */
++	for (i = 0; i < count; i++)
++		streams[i] = pipes[i]->stream;
++
+ 	for (i = 0; i < count; i++) {
+-		stream_update.stream = pipes[i]->stream;
++		stream_update.stream = streams[i];
+ 		dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
+-				pipes[i]->stream, &stream_update,
++				streams[i], &stream_update,
+ 				state);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+index 5404de2aea5457..c160b015d178ad 100644
+--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+@@ -157,6 +157,7 @@
+ #define XEHPG_SC_INSTDONE_EXTRA2		XE_REG_MCR(0x7108)
+ 
+ #define COMMON_SLICE_CHICKEN4			XE_REG(0x7300, XE_REG_OPTION_MASKED)
++#define   SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE	REG_BIT(12)
+ #define   DISABLE_TDC_LOAD_BALANCING_CALC	REG_BIT(6)
+ 
+ #define COMMON_SLICE_CHICKEN3				XE_REG(0x7304, XE_REG_OPTION_MASKED)
+diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
+index 2d4e38b3bab19e..ce6d2167b94ada 100644
+--- a/drivers/gpu/drm/xe/xe_lrc.c
++++ b/drivers/gpu/drm/xe/xe_lrc.c
+@@ -874,7 +874,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
+ 
+ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
+ {
+-	u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
++	u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
+ 
+ 	xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
+ 	xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
+@@ -905,6 +905,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ 	int err;
+ 
+ 	kref_init(&lrc->refcount);
++	lrc->gt = gt;
+ 	lrc->flags = 0;
+ 	lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
+ 	if (xe_gt_has_indirect_ring_state(gt))
+@@ -923,7 +924,6 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ 		return PTR_ERR(lrc->bo);
+ 
+ 	lrc->size = lrc_size;
+-	lrc->tile = gt_to_tile(hwe->gt);
+ 	lrc->ring.size = ring_size;
+ 	lrc->ring.tail = 0;
+ 	lrc->ctx_timestamp = 0;
+diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
+index 71ecb453f811a4..cd38586ae98932 100644
+--- a/drivers/gpu/drm/xe/xe_lrc_types.h
++++ b/drivers/gpu/drm/xe/xe_lrc_types.h
+@@ -25,8 +25,8 @@ struct xe_lrc {
+ 	/** @size: size of lrc including any indirect ring state page */
+ 	u32 size;
+ 
+-	/** @tile: tile which this LRC belongs to */
+-	struct xe_tile *tile;
++	/** @gt: gt which this LRC belongs to */
++	struct xe_gt *gt;
+ 
+ 	/** @flags: LRC flags */
+ #define XE_LRC_FLAG_INDIRECT_RING_STATE		0x1
+diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
+index 0a1905f8d380a8..aea6034a810793 100644
+--- a/drivers/gpu/drm/xe/xe_wa.c
++++ b/drivers/gpu/drm/xe/xe_wa.c
+@@ -783,6 +783,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
+ 	  XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ 	  XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ 	},
++	{ XE_RTP_NAME("22021007897"),
++	  XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
++	  XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
++	},
+ 
+ 	/* Xe3_LPG */
+ 	{ XE_RTP_NAME("14021490052"),
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 92baa34f42f28a..c6424f6259487e 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -41,6 +41,10 @@
+ #define USB_VENDOR_ID_ACTIONSTAR	0x2101
+ #define USB_DEVICE_ID_ACTIONSTAR_1011	0x1011
+ 
++#define USB_VENDOR_ID_ADATA_XPG 0x125f
++#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505
++#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506
++
+ #define USB_VENDOR_ID_ADS_TECH		0x06e1
+ #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X	0xa155
+ 
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 5d7a418ccdbecf..73979643315bfd 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -27,6 +27,8 @@
+ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 020e5897812fed..3cf10078059958 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -1670,24 +1670,28 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
+ 	return res;
+ }
+ 
+-static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
++static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+ {
++	void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
+ 	u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+ 
+-	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
++	iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
++
++	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ 		kvaser_pciefd_read_buffer(pcie, 0);
++		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
++	}
+ 
+-	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
++	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ 		kvaser_pciefd_read_buffer(pcie, 1);
++		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
++	}
+ 
+ 	if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ 		     irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+ 		     irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
+ 		     irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
+ 		dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
+-
+-	iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+-	return irq;
+ }
+ 
+ static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+@@ -1715,29 +1719,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+ 	struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
+ 	const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
+ 	u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+-	u32 srb_irq = 0;
+-	u32 srb_release = 0;
+ 	int i;
+ 
+ 	if (!(pci_irq & irq_mask->all))
+ 		return IRQ_NONE;
+ 
++	iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
++
+ 	if (pci_irq & irq_mask->kcan_rx0)
+-		srb_irq = kvaser_pciefd_receive_irq(pcie);
++		kvaser_pciefd_receive_irq(pcie);
+ 
+ 	for (i = 0; i < pcie->nr_channels; i++) {
+ 		if (pci_irq & irq_mask->kcan_tx[i])
+ 			kvaser_pciefd_transmit_irq(pcie->can[i]);
+ 	}
+ 
+-	if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+-		srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
+-
+-	if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+-		srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
+-
+-	if (srb_release)
+-		iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
++	iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -1757,13 +1754,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
+ 	}
+ }
+ 
++static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
++{
++	unsigned int i;
++
++	/* Masking PCI_IRQ is insufficient as running ISR will unmask it */
++	iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
++	for (i = 0; i < pcie->nr_channels; ++i)
++		iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
++}
++
+ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ 			       const struct pci_device_id *id)
+ {
+ 	int ret;
+ 	struct kvaser_pciefd *pcie;
+ 	const struct kvaser_pciefd_irq_mask *irq_mask;
+-	void __iomem *irq_en_base;
+ 
+ 	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ 	if (!pcie)
+@@ -1829,8 +1835,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ 		  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+ 
+ 	/* Enable PCI interrupts */
+-	irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
+-	iowrite32(irq_mask->all, irq_en_base);
++	iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+ 	/* Ready the DMA buffers */
+ 	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+ 		  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+@@ -1844,8 +1849,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ 	return 0;
+ 
+ err_free_irq:
+-	/* Disable PCI interrupts */
+-	iowrite32(0, irq_en_base);
++	kvaser_pciefd_disable_irq_srcs(pcie);
+ 	free_irq(pcie->pci->irq, pcie);
+ 
+ err_pci_free_irq_vectors:
+@@ -1868,35 +1872,26 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ 	return ret;
+ }
+ 
+-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
+-{
+-	int i;
+-
+-	for (i = 0; i < pcie->nr_channels; i++) {
+-		struct kvaser_pciefd_can *can = pcie->can[i];
+-
+-		if (can) {
+-			iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+-			unregister_candev(can->can.dev);
+-			del_timer(&can->bec_poll_timer);
+-			kvaser_pciefd_pwm_stop(can);
+-			free_candev(can->can.dev);
+-		}
+-	}
+-}
+-
+ static void kvaser_pciefd_remove(struct pci_dev *pdev)
+ {
+ 	struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
++	unsigned int i;
+ 
+-	kvaser_pciefd_remove_all_ctrls(pcie);
++	for (i = 0; i < pcie->nr_channels; ++i) {
++		struct kvaser_pciefd_can *can = pcie->can[i];
+ 
+-	/* Disable interrupts */
+-	iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
+-	iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
++		unregister_candev(can->can.dev);
++		del_timer(&can->bec_poll_timer);
++		kvaser_pciefd_pwm_stop(can);
++	}
+ 
++	kvaser_pciefd_disable_irq_srcs(pcie);
+ 	free_irq(pcie->pci->irq, pcie);
+ 	pci_free_irq_vectors(pcie->pci);
++
++	for (i = 0; i < pcie->nr_channels; ++i)
++		free_candev(pcie->can[i]->can.dev);
++
+ 	pci_iounmap(pdev, pcie->reg_base);
+ 	pci_release_regions(pdev);
+ 	pci_disable_device(pdev);
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index a21e7c0afbfdc8..61788a43cb8618 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2699,7 +2699,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
+ 							port->slave.mac_addr);
+ 			if (!is_valid_ether_addr(port->slave.mac_addr)) {
+ 				eth_random_addr(port->slave.mac_addr);
+-				dev_err(dev, "Use random MAC address\n");
++				dev_info(dev, "Use random MAC address\n");
+ 			}
+ 		}
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index cd8a10f6accff9..37fd1a8ace127e 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3701,6 +3701,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ 	{ PCI_DEVICE(0x1e49, 0x0041),   /* ZHITAI TiPro7000 NVMe SSD */
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
++	{ PCI_DEVICE(0x025e, 0xf1ac),   /* SOLIDIGM  P44 pro SSDPFKKW020X7  */
++		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ 	{ PCI_DEVICE(0xc0a9, 0x540a),   /* Crucial P2 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 30506c43776f15..ff17e0f95fbb84 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -727,8 +727,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ 
+ 		if ((chan == 5 && cmn->rsp_vc_num < 2) ||
+ 		    (chan == 6 && cmn->dat_vc_num < 2) ||
+-		    (chan == 7 && cmn->snp_vc_num < 2) ||
+-		    (chan == 8 && cmn->req_vc_num < 2))
++		    (chan == 7 && cmn->req_vc_num < 2) ||
++		    (chan == 8 && cmn->snp_vc_num < 2))
+ 			return 0;
+ 	}
+ 
+@@ -884,8 +884,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ 	_CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)),	\
+ 	_CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)),	\
+ 	_CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)),	\
+-	_CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)),	\
+-	_CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
++	_CMN_EVENT_XP(req2_##_name, (_event) | (7 << 5)),	\
++	_CMN_EVENT_XP(snp2_##_name, (_event) | (8 << 5))
+ 
+ #define CMN_EVENT_XP_DAT(_name, _event)				\
+ 	_CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)),	\
+@@ -2557,6 +2557,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
+ 
+ 	cmn->dev = &pdev->dev;
+ 	cmn->part = (unsigned long)device_get_match_data(cmn->dev);
++	cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
+ 	platform_set_drvdata(pdev, cmn);
+ 
+ 	if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
+@@ -2584,7 +2585,6 @@ static int arm_cmn_probe(struct platform_device *pdev)
+ 	if (err)
+ 		return err;
+ 
+-	cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
+ 	cmn->pmu = (struct pmu) {
+ 		.module = THIS_MODULE,
+ 		.parent = cmn->dev,
+@@ -2650,6 +2650,7 @@ static const struct acpi_device_id arm_cmn_acpi_match[] = {
+ 	{ "ARMHC600", PART_CMN600 },
+ 	{ "ARMHC650" },
+ 	{ "ARMHC700" },
++	{ "ARMHC003" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
+diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+index dc6e01dff5c743..9b99fdd43f5f5c 100644
+--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
++++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+@@ -328,6 +328,8 @@ static const struct ropll_config ropll_tmds_cfg[] = {
+ 	  1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ 	{ 650000, 162, 162, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 54, 0, 16, 4, 1,
+ 	  1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
++	{ 502500, 84, 84, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 11, 1, 4, 5,
++	  4, 11, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ 	{ 337500, 0x70, 0x70, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 0x2, 0, 0x01, 5,
+ 	  1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ 	{ 400000, 100, 100, 1, 1, 11, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
+diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c
+index cb5454fbe2c8fa..b505d89860b439 100644
+--- a/drivers/phy/starfive/phy-jh7110-usb.c
++++ b/drivers/phy/starfive/phy-jh7110-usb.c
+@@ -18,6 +18,8 @@
+ #include <linux/usb/of.h>
+ 
+ #define USB_125M_CLK_RATE		125000000
++#define USB_CLK_MODE_OFF		0x0
++#define USB_CLK_MODE_RX_NORMAL_PWR	BIT(1)
+ #define USB_LS_KEEPALIVE_OFF		0x4
+ #define USB_LS_KEEPALIVE_ENABLE		BIT(4)
+ 
+@@ -78,6 +80,7 @@ static int jh7110_usb2_phy_init(struct phy *_phy)
+ {
+ 	struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
+ 	int ret;
++	unsigned int val;
+ 
+ 	ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE);
+ 	if (ret)
+@@ -87,6 +90,10 @@ static int jh7110_usb2_phy_init(struct phy *_phy)
+ 	if (ret)
+ 		return ret;
+ 
++	val = readl(phy->regs + USB_CLK_MODE_OFF);
++	val |= USB_CLK_MODE_RX_NORMAL_PWR;
++	writel(val, phy->regs + USB_CLK_MODE_OFF);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
+index ae992ac1ab4ac0..6d5300c54a4216 100644
+--- a/drivers/platform/x86/fujitsu-laptop.c
++++ b/drivers/platform/x86/fujitsu-laptop.c
+@@ -17,13 +17,13 @@
+ /*
+  * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional
+  * features made available on a range of Fujitsu laptops including the
+- * P2xxx/P5xxx/S6xxx/S7xxx series.
++ * P2xxx/P5xxx/S2xxx/S6xxx/S7xxx series.
+  *
+  * This driver implements a vendor-specific backlight control interface for
+  * Fujitsu laptops and provides support for hotkeys present on certain Fujitsu
+  * laptops.
+  *
+- * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
++ * This driver has been tested on a Fujitsu Lifebook S2110, S6410, S7020 and
+  * P8010.  It should work on most P-series and S-series Lifebooks, but
+  * YMMV.
+  *
+@@ -107,7 +107,11 @@
+ #define KEY2_CODE			0x411
+ #define KEY3_CODE			0x412
+ #define KEY4_CODE			0x413
+-#define KEY5_CODE			0x420
++#define KEY5_CODE			0x414
++#define KEY6_CODE			0x415
++#define KEY7_CODE			0x416
++#define KEY8_CODE			0x417
++#define KEY9_CODE			0x420
+ 
+ /* Hotkey ringbuffer limits */
+ #define MAX_HOTKEY_RINGBUFFER_SIZE	100
+@@ -560,7 +564,7 @@ static const struct key_entry keymap_default[] = {
+ 	{ KE_KEY, KEY2_CODE,            { KEY_PROG2 } },
+ 	{ KE_KEY, KEY3_CODE,            { KEY_PROG3 } },
+ 	{ KE_KEY, KEY4_CODE,            { KEY_PROG4 } },
+-	{ KE_KEY, KEY5_CODE,            { KEY_RFKILL } },
++	{ KE_KEY, KEY9_CODE,            { KEY_RFKILL } },
+ 	/* Soft keys read from status flags */
+ 	{ KE_KEY, FLAG_RFKILL,          { KEY_RFKILL } },
+ 	{ KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } },
+@@ -584,6 +588,18 @@ static const struct key_entry keymap_p8010[] = {
+ 	{ KE_END, 0 }
+ };
+ 
++static const struct key_entry keymap_s2110[] = {
++	{ KE_KEY, KEY1_CODE, { KEY_PROG1 } }, /* "A" */
++	{ KE_KEY, KEY2_CODE, { KEY_PROG2 } }, /* "B" */
++	{ KE_KEY, KEY3_CODE, { KEY_WWW } },   /* "Internet" */
++	{ KE_KEY, KEY4_CODE, { KEY_EMAIL } }, /* "E-mail" */
++	{ KE_KEY, KEY5_CODE, { KEY_STOPCD } },
++	{ KE_KEY, KEY6_CODE, { KEY_PLAYPAUSE } },
++	{ KE_KEY, KEY7_CODE, { KEY_PREVIOUSSONG } },
++	{ KE_KEY, KEY8_CODE, { KEY_NEXTSONG } },
++	{ KE_END, 0 }
++};
++
+ static const struct key_entry *keymap = keymap_default;
+ 
+ static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id)
+@@ -621,6 +637,15 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = {
+ 		},
+ 		.driver_data = (void *)keymap_p8010
+ 	},
++	{
++		.callback = fujitsu_laptop_dmi_keymap_override,
++		.ident = "Fujitsu LifeBook S2110",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S2110"),
++		},
++		.driver_data = (void *)keymap_s2110
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index dea40da867552f..0528af4ed8d694 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -232,6 +232,7 @@ enum tpacpi_hkey_event_t {
+ 	/* Thermal events */
+ 	TP_HKEY_EV_ALARM_BAT_HOT	= 0x6011, /* battery too hot */
+ 	TP_HKEY_EV_ALARM_BAT_XHOT	= 0x6012, /* battery critically hot */
++	TP_HKEY_EV_ALARM_BAT_LIM_CHANGE	= 0x6013, /* battery charge limit changed*/
+ 	TP_HKEY_EV_ALARM_SENSOR_HOT	= 0x6021, /* sensor too hot */
+ 	TP_HKEY_EV_ALARM_SENSOR_XHOT	= 0x6022, /* sensor critically hot */
+ 	TP_HKEY_EV_THM_TABLE_CHANGED	= 0x6030, /* windows; thermal table changed */
+@@ -3778,6 +3779,10 @@ static bool hotkey_notify_6xxx(const u32 hkey, bool *send_acpi_ev)
+ 		pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n");
+ 		/* recommended action: immediate sleep/hibernate */
+ 		break;
++	case TP_HKEY_EV_ALARM_BAT_LIM_CHANGE:
++		pr_debug("Battery Info: battery charge threshold changed\n");
++		/* User changed charging threshold. No action needed */
++		return true;
+ 	case TP_HKEY_EV_ALARM_SENSOR_HOT:
+ 		pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n");
+ 		/* recommended action: warn user through gui, that */
+@@ -11472,6 +11477,8 @@ static int __must_check __init get_thinkpad_model_data(
+ 		tp->vendor = PCI_VENDOR_ID_IBM;
+ 	else if (dmi_name_in_vendors("LENOVO"))
+ 		tp->vendor = PCI_VENDOR_ID_LENOVO;
++	else if (dmi_name_in_vendors("NEC"))
++		tp->vendor = PCI_VENDOR_ID_LENOVO;
+ 	else
+ 		return 0;
+ 
+diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
+index 2ee6755b43f549..3019f57e65841e 100644
+--- a/drivers/spi/spi-sun4i.c
++++ b/drivers/spi/spi-sun4i.c
+@@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host,
+ 	else
+ 		reg |= SUN4I_CTL_DHB;
+ 
++	/* Now that the settings are correct, enable the interface */
++	reg |= SUN4I_CTL_ENABLE;
++
+ 	sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+ 
+ 	/* Ensure that we have a parent clock fast enough */
+@@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev)
+ 	}
+ 
+ 	sun4i_spi_write(sspi, SUN4I_CTL_REG,
+-			SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
++			SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+ 
+ 	return 0;
+ 
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 2b8c36c9660c5c..64894ba6efca40 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -43,6 +43,8 @@
+ #include <linux/timekeeping.h>
+ #include <linux/sysctl.h>
+ #include <linux/elf.h>
++#include <linux/pidfs.h>
++#include <uapi/linux/pidfd.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/mmu_context.h>
+@@ -60,6 +62,12 @@ static void free_vma_snapshot(struct coredump_params *cprm);
+ #define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024)
+ /* Define a reasonable max cap */
+ #define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024)
++/*
++ * File descriptor number for the pidfd for the thread-group leader of
++ * the coredumping task installed into the usermode helper's file
++ * descriptor table.
++ */
++#define COREDUMP_PIDFD_NUMBER 3
+ 
+ static int core_uses_pid;
+ static unsigned int core_pipe_limit;
+@@ -339,6 +347,27 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+ 			case 'C':
+ 				err = cn_printf(cn, "%d", cprm->cpu);
+ 				break;
++			/* pidfd number */
++			case 'F': {
++				/*
++				 * Installing a pidfd only makes sense if
++				 * we actually spawn a usermode helper.
++				 */
++				if (!ispipe)
++					break;
++
++				/*
++				 * Note that we'll install a pidfd for the
++				 * thread-group leader. We know that task
++				 * linkage hasn't been removed yet and even if
++				 * this @current isn't the actual thread-group
++				 * leader we know that the thread-group leader
++				 * cannot be reaped until @current has exited.
++				 */
++				cprm->pid = task_tgid(current);
++				err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER);
++				break;
++			}
+ 			default:
+ 				break;
+ 			}
+@@ -493,7 +522,7 @@ static void wait_for_dump_helpers(struct file *file)
+ }
+ 
+ /*
+- * umh_pipe_setup
++ * umh_coredump_setup
+  * helper function to customize the process used
+  * to collect the core in userspace.  Specifically
+  * it sets up a pipe and installs it as fd 0 (stdin)
+@@ -503,11 +532,32 @@ static void wait_for_dump_helpers(struct file *file)
+  * is a special value that we use to trap recursive
+  * core dumps
+  */
+-static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
++static int umh_coredump_setup(struct subprocess_info *info, struct cred *new)
+ {
+ 	struct file *files[2];
+ 	struct coredump_params *cp = (struct coredump_params *)info->data;
+-	int err = create_pipe_files(files, 0);
++	int err;
++
++	if (cp->pid) {
++		struct file *pidfs_file __free(fput) = NULL;
++
++		pidfs_file = pidfs_alloc_file(cp->pid, O_RDWR);
++		if (IS_ERR(pidfs_file))
++			return PTR_ERR(pidfs_file);
++
++		/*
++		 * Usermode helpers are childen of either
++		 * system_unbound_wq or of kthreadd. So we know that
++		 * we're starting off with a clean file descriptor
++		 * table. So we should always be able to use
++		 * COREDUMP_PIDFD_NUMBER as our file descriptor value.
++		 */
++		err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0);
++		if (err < 0)
++			return err;
++	}
++
++	err = create_pipe_files(files, 0);
+ 	if (err)
+ 		return err;
+ 
+@@ -515,10 +565,13 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
+ 
+ 	err = replace_fd(0, files[0], 0);
+ 	fput(files[0]);
++	if (err < 0)
++		return err;
++
+ 	/* and disallow core files too */
+ 	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
+ 
+-	return err;
++	return 0;
+ }
+ 
+ void do_coredump(const kernel_siginfo_t *siginfo)
+@@ -593,7 +646,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
+ 		}
+ 
+ 		if (cprm.limit == 1) {
+-			/* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
++			/* See umh_coredump_setup() which sets RLIMIT_CORE = 1.
+ 			 *
+ 			 * Normally core limits are irrelevant to pipes, since
+ 			 * we're not writing to the file system, but we use
+@@ -632,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
+ 		retval = -ENOMEM;
+ 		sub_info = call_usermodehelper_setup(helper_argv[0],
+ 						helper_argv, NULL, GFP_KERNEL,
+-						umh_pipe_setup, NULL, &cprm);
++						umh_coredump_setup, NULL, &cprm);
+ 		if (sub_info)
+ 			retval = call_usermodehelper_exec(sub_info,
+ 							  UMH_WAIT_EXEC);
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 03ecc77656151e..4503758e9594bf 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -1096,6 +1096,8 @@ struct nfs_server *nfs_create_server(struct fs_context *fc)
+ 		if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
+ 			server->namelen = NFS2_MAXNAMLEN;
+ 	}
++	/* Linux 'subtree_check' borkenness mandates this setting */
++	server->fh_expire_type = NFS_FH_VOL_RENAME;
+ 
+ 	if (!(fattr->valid & NFS_ATTR_FATTR)) {
+ 		error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh,
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 492cffd9d3d845..f9f4a92f63e929 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -2690,6 +2690,18 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
+ 	unblock_revalidate(new_dentry);
+ }
+ 
++static bool nfs_rename_is_unsafe_cross_dir(struct dentry *old_dentry,
++					   struct dentry *new_dentry)
++{
++	struct nfs_server *server = NFS_SB(old_dentry->d_sb);
++
++	if (old_dentry->d_parent != new_dentry->d_parent)
++		return false;
++	if (server->fh_expire_type & NFS_FH_RENAME_UNSAFE)
++		return !(server->fh_expire_type & NFS_FH_NOEXPIRE_WITH_OPEN);
++	return true;
++}
++
+ /*
+  * RENAME
+  * FIXME: Some nfsds, like the Linux user space nfsd, may generate a
+@@ -2777,7 +2789,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ 
+ 	}
+ 
+-	if (S_ISREG(old_inode->i_mode))
++	if (S_ISREG(old_inode->i_mode) &&
++	    nfs_rename_is_unsafe_cross_dir(old_dentry, new_dentry))
+ 		nfs_sync_inode(old_inode);
+ 	task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
+ 				must_unblock ? nfs_unblock_rename : NULL);
+diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
+index 4fa304fa5bc4b2..29d9234d5c085f 100644
+--- a/fs/nfs/filelayout/filelayoutdev.c
++++ b/fs/nfs/filelayout/filelayoutdev.c
+@@ -76,6 +76,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ 	struct page *scratch;
+ 	struct list_head dsaddrs;
+ 	struct nfs4_pnfs_ds_addr *da;
++	struct net *net = server->nfs_client->cl_net;
+ 
+ 	/* set up xdr stream */
+ 	scratch = alloc_page(gfp_flags);
+@@ -159,8 +160,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ 
+ 		mp_count = be32_to_cpup(p); /* multipath count */
+ 		for (j = 0; j < mp_count; j++) {
+-			da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
+-						    &stream, gfp_flags);
++			da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags);
+ 			if (da)
+ 				list_add_tail(&da->da_node, &dsaddrs);
+ 		}
+@@ -170,7 +170,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ 			goto out_err_free_deviceid;
+ 		}
+ 
+-		dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
++		dsaddr->ds_list[i] = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags);
+ 		if (!dsaddr->ds_list[i])
+ 			goto out_err_drain_dsaddrs;
+ 		trace_fl_getdevinfo(server, &pdev->dev_id, dsaddr->ds_list[i]->ds_remotestr);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index e58bedfb1dcc14..4a304cf17c4b07 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -49,6 +49,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ 	struct nfs4_pnfs_ds_addr *da;
+ 	struct nfs4_ff_layout_ds *new_ds = NULL;
+ 	struct nfs4_ff_ds_version *ds_versions = NULL;
++	struct net *net = server->nfs_client->cl_net;
+ 	u32 mp_count;
+ 	u32 version_count;
+ 	__be32 *p;
+@@ -80,8 +81,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ 
+ 	for (i = 0; i < mp_count; i++) {
+ 		/* multipath ds */
+-		da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
+-					    &stream, gfp_flags);
++		da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags);
+ 		if (da)
+ 			list_add_tail(&da->da_node, &dsaddrs);
+ 	}
+@@ -149,7 +149,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
+ 	new_ds->ds_versions = ds_versions;
+ 	new_ds->ds_versions_cnt = version_count;
+ 
+-	new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
++	new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags);
+ 	if (!new_ds->ds)
+ 		goto out_err_drain_dsaddrs;
+ 
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index 30d2613e912b88..91ff877185c8af 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -60,6 +60,7 @@ struct nfs4_pnfs_ds {
+ 	struct list_head	ds_node;  /* nfs4_pnfs_dev_hlist dev_dslist */
+ 	char			*ds_remotestr;	/* comma sep list of addrs */
+ 	struct list_head	ds_addrs;
++	const struct net	*ds_net;
+ 	struct nfs_client	*ds_clp;
+ 	refcount_t		ds_count;
+ 	unsigned long		ds_state;
+@@ -415,7 +416,8 @@ int pnfs_generic_commit_pagelist(struct inode *inode,
+ int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max);
+ void pnfs_generic_write_commit_done(struct rpc_task *task, void *data);
+ void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
+-struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
++struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(const struct net *net,
++				      struct list_head *dsaddrs,
+ 				      gfp_t gfp_flags);
+ void nfs4_pnfs_v3_ds_connect_unload(void);
+ int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index dbef837e871ad4..2ee20a0f0b36d3 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -604,12 +604,12 @@ _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
+  * Lookup DS by addresses.  nfs4_ds_cache_lock is held
+  */
+ static struct nfs4_pnfs_ds *
+-_data_server_lookup_locked(const struct list_head *dsaddrs)
++_data_server_lookup_locked(const struct net *net, const struct list_head *dsaddrs)
+ {
+ 	struct nfs4_pnfs_ds *ds;
+ 
+ 	list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+-		if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
++		if (ds->ds_net == net && _same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
+ 			return ds;
+ 	return NULL;
+ }
+@@ -716,7 +716,7 @@ nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
+  * uncached and return cached struct nfs4_pnfs_ds.
+  */
+ struct nfs4_pnfs_ds *
+-nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
++nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags)
+ {
+ 	struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
+ 	char *remotestr;
+@@ -734,13 +734,14 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
+ 	remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
+ 
+ 	spin_lock(&nfs4_ds_cache_lock);
+-	tmp_ds = _data_server_lookup_locked(dsaddrs);
++	tmp_ds = _data_server_lookup_locked(net, dsaddrs);
+ 	if (tmp_ds == NULL) {
+ 		INIT_LIST_HEAD(&ds->ds_addrs);
+ 		list_splice_init(dsaddrs, &ds->ds_addrs);
+ 		ds->ds_remotestr = remotestr;
+ 		refcount_set(&ds->ds_count, 1);
+ 		INIT_LIST_HEAD(&ds->ds_node);
++		ds->ds_net = net;
+ 		ds->ds_clp = NULL;
+ 		list_add(&ds->ds_node, &nfs4_data_server_cache);
+ 		dprintk("%s add new data server %s\n", __func__,
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 03f606afad93a0..d7a8a580d01362 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -146,12 +146,9 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
+ {
+ 	struct oplock_info *opinfo;
+ 
+-	if (list_empty(&ci->m_op_list))
+-		return NULL;
+-
+ 	down_read(&ci->m_lock);
+-	opinfo = list_first_entry(&ci->m_op_list, struct oplock_info,
+-					op_entry);
++	opinfo = list_first_entry_or_null(&ci->m_op_list, struct oplock_info,
++					  op_entry);
+ 	if (opinfo) {
+ 		if (opinfo->conn == NULL ||
+ 		    !atomic_inc_not_zero(&opinfo->refcount))
+diff --git a/include/linux/coredump.h b/include/linux/coredump.h
+index 77e6e195d1d687..76e41805b92de9 100644
+--- a/include/linux/coredump.h
++++ b/include/linux/coredump.h
+@@ -28,6 +28,7 @@ struct coredump_params {
+ 	int vma_count;
+ 	size_t vma_data_size;
+ 	struct core_vma_metadata *vma_meta;
++	struct pid *pid;
+ };
+ 
+ extern unsigned int core_file_note_size_limit;
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index 81ab18658d72dc..2cff5cafbaa785 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -211,6 +211,15 @@ struct nfs_server {
+ 	char			*fscache_uniq;	/* Uniquifier (or NULL) */
+ #endif
+ 
++	/* The following #defines numerically match the NFSv4 equivalents */
++#define NFS_FH_NOEXPIRE_WITH_OPEN (0x1)
++#define NFS_FH_VOLATILE_ANY (0x2)
++#define NFS_FH_VOL_MIGRATION (0x4)
++#define NFS_FH_VOL_RENAME (0x8)
++#define NFS_FH_RENAME_UNSAFE (NFS_FH_VOLATILE_ANY | NFS_FH_VOL_RENAME)
++	u32			fh_expire_type;	/* V4 bitmask representing file
++						   handle volatility type for
++						   this filesystem */
+ 	u32			pnfs_blksize;	/* layout_blksize attr */
+ #if IS_ENABLED(CONFIG_NFS_V4)
+ 	u32			attr_bitmask[3];/* V4 bitmask representing the set
+@@ -234,9 +243,6 @@ struct nfs_server {
+ 	u32			acl_bitmask;	/* V4 bitmask representing the ACEs
+ 						   that are supported on this
+ 						   filesystem */
+-	u32			fh_expire_type;	/* V4 bitmask representing file
+-						   handle volatility type for
+-						   this filesystem */
+ 	struct pnfs_layoutdriver_type  *pnfs_curr_ld; /* Active layout driver */
+ 	struct rpc_wait_queue	roc_rpcwaitq;
+ 	void			*pnfs_ld_data;	/* per mount point data */
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 7986145a527cbe..5a7745170e84b1 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -175,6 +175,11 @@ struct hfsc_sched {
+ 
+ #define	HT_INFINITY	0xffffffffffffffffULL	/* infinite time value */
+ 
++static bool cl_in_el_or_vttree(struct hfsc_class *cl)
++{
++	return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) ||
++		((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node));
++}
+ 
+ /*
+  * eligible tree holds backlogged classes being sorted by their eligible times.
+@@ -1040,6 +1045,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	if (cl == NULL)
+ 		return -ENOBUFS;
+ 
++	RB_CLEAR_NODE(&cl->el_node);
++
+ 	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
+ 	if (err) {
+ 		kfree(cl);
+@@ -1572,7 +1579,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ 	sch->qstats.backlog += len;
+ 	sch->q.qlen++;
+ 
+-	if (first && !cl->cl_nactive) {
++	if (first && !cl_in_el_or_vttree(cl)) {
+ 		if (cl->cl_flags & HFSC_RSC)
+ 			init_ed(cl, len);
+ 		if (cl->cl_flags & HFSC_FSC)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 13ffc9a6555f65..dce56809120068 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6813,7 +6813,10 @@ static void alc256_fixup_chromebook(struct hda_codec *codec,
+ 
+ 	switch (action) {
+ 	case HDA_FIXUP_ACT_PRE_PROBE:
+-		spec->gen.suppress_auto_mute = 1;
++		if (codec->core.subsystem_id == 0x10280d76)
++			spec->gen.suppress_auto_mute = 0;
++		else
++			spec->gen.suppress_auto_mute = 1;
+ 		spec->gen.suppress_auto_mic = 1;
+ 		spec->en_3kpull_low = false;
+ 		break;


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-05-29 16:35 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-05-29 16:35 UTC (permalink / raw
  To: gentoo-commits

commit:     74a83fd1c4e0979b7dcfc9be66b92aba913259e9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 29 16:35:13 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 29 16:35:13 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=74a83fd1

Linux patch 6.12.31

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1030_linux-6.12.31.patch | 29582 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 29586 insertions(+)

diff --git a/0000_README b/0000_README
index 0b4156fb..b4bbe192 100644
--- a/0000_README
+++ b/0000_README
@@ -163,6 +163,10 @@ Patch:  1029_linux-6.12.30.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.30
 
+Patch:  1030_linux-6.12.31.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.31
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1030_linux-6.12.31.patch b/1030_linux-6.12.31.patch
new file mode 100644
index 00000000..4797ca15
--- /dev/null
+++ b/1030_linux-6.12.31.patch
@@ -0,0 +1,29582 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index e691f75c97e7b6..b5cb3614855413 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -6261,6 +6261,8 @@
+ 
+ 			Selecting 'on' will also enable the mitigation
+ 			against user space to user space task attacks.
++			Selecting specific mitigation does not force enable
++			user mitigations.
+ 
+ 			Selecting 'off' will disable both the kernel and
+ 			the user space protections.
+diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst
+index 84b43061c11be2..60434f2b028637 100644
+--- a/Documentation/driver-api/serial/driver.rst
++++ b/Documentation/driver-api/serial/driver.rst
+@@ -103,4 +103,4 @@ Some helpers are provided in order to set/get modem control lines via GPIO.
+ .. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c
+    :identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod
+            mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms
+-           mctrl_gpio_disable_ms
++           mctrl_gpio_disable_ms_sync mctrl_gpio_disable_ms_no_sync
+diff --git a/Documentation/hwmon/dell-smm-hwmon.rst b/Documentation/hwmon/dell-smm-hwmon.rst
+index 74905675d71f99..5a4edb6565cf95 100644
+--- a/Documentation/hwmon/dell-smm-hwmon.rst
++++ b/Documentation/hwmon/dell-smm-hwmon.rst
+@@ -32,12 +32,12 @@ Temperature sensors and fans can be queried and set via the standard
+ =============================== ======= =======================================
+ Name				Perm	Description
+ =============================== ======= =======================================
+-fan[1-3]_input                  RO      Fan speed in RPM.
+-fan[1-3]_label                  RO      Fan label.
+-fan[1-3]_min                    RO      Minimal Fan speed in RPM
+-fan[1-3]_max                    RO      Maximal Fan speed in RPM
+-fan[1-3]_target                 RO      Expected Fan speed in RPM
+-pwm[1-3]                        RW      Control the fan PWM duty-cycle.
++fan[1-4]_input                  RO      Fan speed in RPM.
++fan[1-4]_label                  RO      Fan label.
++fan[1-4]_min                    RO      Minimal Fan speed in RPM
++fan[1-4]_max                    RO      Maximal Fan speed in RPM
++fan[1-4]_target                 RO      Expected Fan speed in RPM
++pwm[1-4]                        RW      Control the fan PWM duty-cycle.
+ pwm1_enable                     WO      Enable or disable automatic BIOS fan
+                                         control (not supported on all laptops,
+                                         see below for details).
+@@ -93,7 +93,7 @@ Again, when you find new codes, we'd be happy to have your patches!
+ ---------------------------
+ 
+ The driver also exports the fans as thermal cooling devices with
+-``type`` set to ``dell-smm-fan[1-3]``. This allows for easy fan control
++``type`` set to ``dell-smm-fan[1-4]``. This allows for easy fan control
+ using one of the thermal governors.
+ 
+ Module parameters
+diff --git a/Makefile b/Makefile
+index 6e8afa78bbef66..18c2a7cf9e9134 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 30
++SUBLEVEL = 31
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -997,10 +997,6 @@ NOSTDINC_FLAGS += -nostdinc
+ # perform bounds checking.
+ KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
+ 
+-#Currently, disable -Wstringop-overflow for GCC 11, globally.
+-KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
+-KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
+-
+ # disable invalid "can't wrap" optimizations for signed / pointers
+ KBUILD_CFLAGS	+= -fno-strict-overflow
+ 
+diff --git a/arch/arm/boot/dts/nvidia/tegra114.dtsi b/arch/arm/boot/dts/nvidia/tegra114.dtsi
+index 86f14e2fd29f3a..6c057b50695140 100644
+--- a/arch/arm/boot/dts/nvidia/tegra114.dtsi
++++ b/arch/arm/boot/dts/nvidia/tegra114.dtsi
+@@ -139,7 +139,7 @@ dsib: dsi@54400000 {
+ 			reg = <0x54400000 0x00040000>;
+ 			clocks = <&tegra_car TEGRA114_CLK_DSIB>,
+ 				 <&tegra_car TEGRA114_CLK_DSIBLP>,
+-				 <&tegra_car TEGRA114_CLK_PLL_D2_OUT0>;
++				 <&tegra_car TEGRA114_CLK_PLL_D_OUT0>;
+ 			clock-names = "dsi", "lp", "parent";
+ 			resets = <&tegra_car 82>;
+ 			reset-names = "dsi";
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index 05a1547642b60f..6c3e6aa22606f5 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -545,11 +545,12 @@ extern u32 at91_pm_suspend_in_sram_sz;
+ 
+ static int at91_suspend_finish(unsigned long val)
+ {
+-	unsigned char modified_gray_code[] = {
+-		0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
+-		0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
+-		0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
+-		0x10, 0x11,
++	/* SYNOPSYS workaround to fix a bug in the calibration logic */
++	unsigned char modified_fix_code[] = {
++		0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
++		0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
++		0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
++		0x1e, 0x1f,
+ 	};
+ 	unsigned int tmp, index;
+ 	int i;
+@@ -560,25 +561,25 @@ static int at91_suspend_finish(unsigned long val)
+ 		 * restore the ZQ0SR0 with the value saved here. But the
+ 		 * calibration is buggy and restoring some values from ZQ0SR0
+ 		 * is forbidden and risky thus we need to provide processed
+-		 * values for these (modified gray code values).
++		 * values for these.
+ 		 */
+ 		tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
+ 
+ 		/* Store pull-down output impedance select. */
+ 		index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
+-		soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
++		soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
+ 
+ 		/* Store pull-up output impedance select. */
+ 		index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
+-		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
++		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
+ 
+ 		/* Store pull-down on-die termination impedance select. */
+ 		index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
+-		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
++		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
+ 
+ 		/* Store pull-up on-die termination impedance select. */
+ 		index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
+-		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
++		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
+ 
+ 		/*
+ 		 * The 1st 8 words of memory might get corrupted in the process
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+index 3be1e8c2fdb9cf..1012103df25f7b 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+@@ -151,28 +151,12 @@ &pio {
+ 	vcc-pg-supply = <&reg_aldo1>;
+ };
+ 
+-&r_ir {
+-	linux,rc-map-name = "rc-beelink-gs1";
+-	status = "okay";
+-};
+-
+-&r_pio {
+-	/*
+-	 * FIXME: We can't add that supply for now since it would
+-	 * create a circular dependency between pinctrl, the regulator
+-	 * and the RSB Bus.
+-	 *
+-	 * vcc-pl-supply = <&reg_aldo1>;
+-	 */
+-	vcc-pm-supply = <&reg_aldo1>;
+-};
+-
+-&r_rsb {
++&r_i2c {
+ 	status = "okay";
+ 
+-	axp805: pmic@745 {
++	axp805: pmic@36 {
+ 		compatible = "x-powers,axp805", "x-powers,axp806";
+-		reg = <0x745>;
++		reg = <0x36>;
+ 		interrupt-parent = <&r_intc>;
+ 		interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
+ 		interrupt-controller;
+@@ -290,6 +274,22 @@ sw {
+ 	};
+ };
+ 
++&r_ir {
++	linux,rc-map-name = "rc-beelink-gs1";
++	status = "okay";
++};
++
++&r_pio {
++	/*
++	 * PL0 and PL1 are used for PMIC I2C
++	 * don't enable the pl-supply else
++	 * it will fail at boot
++	 *
++	 * vcc-pl-supply = <&reg_aldo1>;
++	 */
++	vcc-pm-supply = <&reg_aldo1>;
++};
++
+ &spdif {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&spdif_tx_pin>;
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+index 6c3bfe3d09d9a3..14cc99b216224e 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+@@ -175,16 +175,12 @@ &pio {
+ 	vcc-pg-supply = <&reg_vcc_wifi_io>;
+ };
+ 
+-&r_ir {
+-	status = "okay";
+-};
+-
+-&r_rsb {
++&r_i2c {
+ 	status = "okay";
+ 
+-	axp805: pmic@745 {
++	axp805: pmic@36 {
+ 		compatible = "x-powers,axp805", "x-powers,axp806";
+-		reg = <0x745>;
++		reg = <0x36>;
+ 		interrupt-parent = <&r_intc>;
+ 		interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
+ 		interrupt-controller;
+@@ -295,6 +291,10 @@ sw {
+ 	};
+ };
+ 
++&r_ir {
++	status = "okay";
++};
++
+ &rtc {
+ 	clocks = <&ext_osc32k>;
+ };
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+index 13b07141c3344b..bab0f63dcc5be8 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+@@ -112,20 +112,12 @@ &pio {
+ 	vcc-pg-supply = <&reg_aldo1>;
+ };
+ 
+-&r_ir {
+-	status = "okay";
+-};
+-
+-&r_pio {
+-	vcc-pm-supply = <&reg_bldo3>;
+-};
+-
+-&r_rsb {
++&r_i2c {
+ 	status = "okay";
+ 
+-	axp805: pmic@745 {
++	axp805: pmic@36 {
+ 		compatible = "x-powers,axp805", "x-powers,axp806";
+-		reg = <0x745>;
++		reg = <0x36>;
+ 		interrupt-parent = <&r_intc>;
+ 		interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
+ 		interrupt-controller;
+@@ -240,6 +232,14 @@ sw {
+ 	};
+ };
+ 
++&r_ir {
++	status = "okay";
++};
++
++&r_pio {
++	vcc-pm-supply = <&reg_bldo3>;
++};
++
+ &rtc {
+ 	clocks = <&ext_osc32k>;
+ };
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
+index 3a9b6907185d03..24282084570787 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
+@@ -26,6 +26,8 @@ memory@0 {
+ 
+ 	leds {
+ 		compatible = "gpio-leds";
++		pinctrl-names = "default";
++		pinctrl-0 = <&spi_quad_pins>;
+ 
+ 		led-power1 {
+ 			label = "udpu:green:power";
+@@ -82,8 +84,6 @@ &sdhci0 {
+ 
+ &spi0 {
+ 	status = "okay";
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&spi_quad_pins>;
+ 
+ 	flash@0 {
+ 		compatible = "jedec,spi-nor";
+@@ -108,6 +108,10 @@ partition@180000 {
+ 	};
+ };
+ 
++&spi_quad_pins {
++	function = "gpio";
++};
++
+ &pinctrl_nb {
+ 	i2c2_recovery_pins: i2c2-recovery-pins {
+ 		groups = "i2c2";
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+index 63b94a04308e86..38d49d612c0c19 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+@@ -1686,7 +1686,7 @@ vdd_1v8_dis: regulator-vdd-1v8-dis {
+ 		regulator-min-microvolt = <1800000>;
+ 		regulator-max-microvolt = <1800000>;
+ 		regulator-always-on;
+-		gpio = <&exp1 14 GPIO_ACTIVE_HIGH>;
++		gpio = <&exp1 9 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 		vin-supply = <&vdd_1v8>;
+ 	};
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
+index 36e88805374606..9ce55b4d2de892 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
+@@ -302,6 +302,16 @@ pcie@14160000 {
+ 		};
+ 
+ 		pcie@141a0000 {
++			reg = <0x00 0x141a0000 0x0 0x00020000   /* appl registers (128K)      */
++			       0x00 0x3a000000 0x0 0x00040000   /* configuration space (256K) */
++			       0x00 0x3a040000 0x0 0x00040000   /* iATU_DMA reg space (256K)  */
++			       0x00 0x3a080000 0x0 0x00040000   /* DBI reg space (256K)       */
++			       0x2e 0x20000000 0x0 0x10000000>; /* ECAM (256MB)               */
++
++			ranges = <0x81000000 0x00 0x3a100000 0x00 0x3a100000 0x0 0x00100000      /* downstream I/O (1MB) */
++				  0x82000000 0x00 0x40000000 0x2e 0x30000000 0x0 0x08000000      /* non-prefetchable memory (128MB) */
++				  0xc3000000 0x28 0x00000000 0x28 0x00000000 0x6 0x20000000>;    /* prefetchable memory (25088MB) */
++
+ 			status = "okay";
+ 			vddio-pex-ctl-supply = <&vdd_1v8_ls>;
+ 			phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
+index 60d1b1acf9a030..385fed8a852afd 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
++++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
+@@ -10,39 +10,44 @@
+ 
+ #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+ / {
+-	pss_ref_clk: pss_ref_clk {
++	pss_ref_clk: pss-ref-clk {
+ 		bootph-all;
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <33333333>;
++		clock-output-names = "pss_ref_clk";
+ 	};
+ 
+-	video_clk: video_clk {
++	video_clk: video-clk {
+ 		bootph-all;
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <27000000>;
++		clock-output-names = "video_clk";
+ 	};
+ 
+-	pss_alt_ref_clk: pss_alt_ref_clk {
++	pss_alt_ref_clk: pss-alt-ref-clk {
+ 		bootph-all;
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <0>;
++		clock-output-names = "pss_alt_ref_clk";
+ 	};
+ 
+-	gt_crx_ref_clk: gt_crx_ref_clk {
++	gt_crx_ref_clk: gt-crx-ref-clk {
+ 		bootph-all;
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <108000000>;
++		clock-output-names = "gt_crx_ref_clk";
+ 	};
+ 
+-	aux_ref_clk: aux_ref_clk {
++	aux_ref_clk: aux-ref-clk {
+ 		bootph-all;
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <27000000>;
++		clock-output-names = "aux_ref_clk";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 8a6b7feca3e428..d92a0203e5a93d 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -132,6 +132,7 @@
+ #define FUJITSU_CPU_PART_A64FX		0x001
+ 
+ #define HISI_CPU_PART_TSV110		0xD01
++#define HISI_CPU_PART_HIP09			0xD02
+ 
+ #define APPLE_CPU_PART_M1_ICESTORM	0x022
+ #define APPLE_CPU_PART_M1_FIRESTORM	0x023
+@@ -208,6 +209,7 @@
+ #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
+ #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
+ #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
++#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
+ #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
+ #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
+ #define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index c329ea061dc988..5ba8376735cb03 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -554,18 +554,6 @@ static inline int pmd_protnone(pmd_t pmd)
+ #endif
+ 
+ #define pmd_present(pmd)	pte_present(pmd_pte(pmd))
+-
+-/*
+- * THP definitions.
+- */
+-
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-static inline int pmd_trans_huge(pmd_t pmd)
+-{
+-	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
+-}
+-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+-
+ #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
+ #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
+ #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
+@@ -725,6 +713,18 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ #define pmd_leaf_size(pmd)	(pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
+ #define pte_leaf_size(pte)	(pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline int pmd_trans_huge(pmd_t pmd)
++{
++	/*
++	 * If pmd is present-invalid, pmd_table() won't detect it
++	 * as a table, so force the valid bit for the comparison.
++	 */
++	return pmd_val(pmd) && pmd_present(pmd) &&
++	       !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID));
++}
++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
++
+ #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
+ static inline bool pud_sect(pud_t pud) { return false; }
+ static inline bool pud_table(pud_t pud) { return true; }
+@@ -806,7 +806,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+ 	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
+ 
+ #define pud_none(pud)		(!pud_val(pud))
+-#define pud_bad(pud)		(!pud_table(pud))
++#define pud_bad(pud)		((pud_val(pud) & PUD_TYPE_MASK) != \
++				 PUD_TYPE_TABLE)
+ #define pud_present(pud)	pte_present(pud_pte(pud))
+ #ifndef __PAGETABLE_PMD_FOLDED
+ #define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index 8ef3335ecff722..31eaf15d2079a4 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -904,6 +904,7 @@ static u8 spectre_bhb_loop_affected(void)
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+ 		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
++		MIDR_ALL_VERSIONS(MIDR_HISI_HIP09),
+ 		{},
+ 	};
+ 	static const struct midr_range spectre_bhb_k11_list[] = {
+diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
+index c9bfeda89e4076..66132683f1ed50 100644
+--- a/arch/loongarch/kernel/Makefile
++++ b/arch/loongarch/kernel/Makefile
+@@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT)	+= lbt.o
+ 
+ obj-$(CONFIG_ARCH_STRICT_ALIGN)	+= unaligned.o
+ 
+-CFLAGS_module.o		+= $(call cc-option,-Wno-override-init,)
+-CFLAGS_syscall.o	+= $(call cc-option,-Wno-override-init,)
+-CFLAGS_traps.o		+= $(call cc-option,-Wno-override-init,)
+-CFLAGS_perf_event.o	+= $(call cc-option,-Wno-override-init,)
++CFLAGS_module.o		+= $(call cc-disable-warning, override-init)
++CFLAGS_syscall.o	+= $(call cc-disable-warning, override-init)
++CFLAGS_traps.o		+= $(call cc-disable-warning, override-init)
++CFLAGS_perf_event.o	+= $(call cc-disable-warning, override-init)
+ 
+ ifdef CONFIG_FUNCTION_TRACER
+   ifndef CONFIG_DYNAMIC_FTRACE
+diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
+index b2f4cbe01ae804..2e188e8f14687d 100644
+--- a/arch/loongarch/kvm/Makefile
++++ b/arch/loongarch/kvm/Makefile
+@@ -19,4 +19,4 @@ kvm-y += tlb.o
+ kvm-y += vcpu.o
+ kvm-y += vm.o
+ 
+-CFLAGS_exit.o	+= $(call cc-option,-Wno-override-init,)
++CFLAGS_exit.o	+= $(call cc-disable-warning, override-init)
+diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
+index dc025888f6d289..b41fc104466888 100644
+--- a/arch/mips/include/asm/ftrace.h
++++ b/arch/mips/include/asm/ftrace.h
+@@ -91,4 +91,20 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
+ 
+ #endif /* __ASSEMBLY__ */
+ #endif /* CONFIG_FUNCTION_TRACER */
++
++#ifdef CONFIG_FTRACE_SYSCALLS
++#ifndef __ASSEMBLY__
++/*
++ * Some syscall entry functions on mips start with "__sys_" (fork and clone,
++ * for instance). We should also match the sys_ variant with those.
++ */
++#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
++static inline bool arch_syscall_match_sym_name(const char *sym,
++					       const char *name)
++{
++	return !strcmp(sym, name) ||
++		(!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4));
++}
++#endif /* __ASSEMBLY__ */
++#endif /* CONFIG_FTRACE_SYSCALLS */
+ #endif /* _ASM_MIPS_FTRACE_H */
+diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
+index d09ca77e624d76..9369a8dc385e26 100644
+--- a/arch/mips/kernel/pm-cps.c
++++ b/arch/mips/kernel/pm-cps.c
+@@ -57,10 +57,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
+ /* Indicates online CPUs coupled with the current CPU */
+ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
+ 
+-/*
+- * Used to synchronize entry to deep idle states. Actually per-core rather
+- * than per-CPU.
+- */
++/* Used to synchronize entry to deep idle states */
+ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
+ 
+ /* Saved CPU state across the CPS_PM_POWER_GATED state */
+@@ -112,9 +109,10 @@ int cps_pm_enter_state(enum cps_pm_state state)
+ 	cps_nc_entry_fn entry;
+ 	struct core_boot_config *core_cfg;
+ 	struct vpe_boot_config *vpe_cfg;
++	atomic_t *barrier;
+ 
+ 	/* Check that there is an entry function for this state */
+-	entry = per_cpu(nc_asm_enter, core)[state];
++	entry = per_cpu(nc_asm_enter, cpu)[state];
+ 	if (!entry)
+ 		return -EINVAL;
+ 
+@@ -150,7 +148,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
+ 	smp_mb__after_atomic();
+ 
+ 	/* Create a non-coherent mapping of the core ready_count */
+-	core_ready_count = per_cpu(ready_count, core);
++	core_ready_count = per_cpu(ready_count, cpu);
+ 	nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
+ 				   (unsigned long)core_ready_count);
+ 	nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
+@@ -158,7 +156,8 @@ int cps_pm_enter_state(enum cps_pm_state state)
+ 
+ 	/* Ensure ready_count is zero-initialised before the assembly runs */
+ 	WRITE_ONCE(*nc_core_ready_count, 0);
+-	coupled_barrier(&per_cpu(pm_barrier, core), online);
++	barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
++	coupled_barrier(barrier, online);
+ 
+ 	/* Run the generated entry code */
+ 	left = entry(online, nc_core_ready_count);
+@@ -629,12 +628,14 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
+ 
+ static int cps_pm_online_cpu(unsigned int cpu)
+ {
+-	enum cps_pm_state state;
+-	unsigned core = cpu_core(&cpu_data[cpu]);
++	unsigned int sibling, core;
+ 	void *entry_fn, *core_rc;
++	enum cps_pm_state state;
++
++	core = cpu_core(&cpu_data[cpu]);
+ 
+ 	for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
+-		if (per_cpu(nc_asm_enter, core)[state])
++		if (per_cpu(nc_asm_enter, cpu)[state])
+ 			continue;
+ 		if (!test_bit(state, state_support))
+ 			continue;
+@@ -646,16 +647,19 @@ static int cps_pm_online_cpu(unsigned int cpu)
+ 			clear_bit(state, state_support);
+ 		}
+ 
+-		per_cpu(nc_asm_enter, core)[state] = entry_fn;
++		for_each_cpu(sibling, &cpu_sibling_map[cpu])
++			per_cpu(nc_asm_enter, sibling)[state] = entry_fn;
+ 	}
+ 
+-	if (!per_cpu(ready_count, core)) {
++	if (!per_cpu(ready_count, cpu)) {
+ 		core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
+ 		if (!core_rc) {
+ 			pr_err("Failed allocate core %u ready_count\n", core);
+ 			return -ENOMEM;
+ 		}
+-		per_cpu(ready_count, core) = core_rc;
++
++		for_each_cpu(sibling, &cpu_sibling_map[cpu])
++			per_cpu(ready_count, sibling) = core_rc;
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
+index d99863cd6cde48..049152f8d597a6 100644
+--- a/arch/powerpc/include/asm/mmzone.h
++++ b/arch/powerpc/include/asm/mmzone.h
+@@ -29,6 +29,7 @@ extern cpumask_var_t node_to_cpumask_map[];
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ extern unsigned long max_pfn;
+ u64 memory_hotplug_max(void);
++u64 hot_add_drconf_memory_max(void);
+ #else
+ #define memory_hotplug_max() memblock_end_of_DRAM()
+ #endif
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index 935568d68196d0..b1dc4cb9f78e67 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -2982,11 +2982,11 @@ static void __init fixup_device_tree_pmac(void)
+ 	char type[8];
+ 	phandle node;
+ 
+-	// Some pmacs are missing #size-cells on escc nodes
++	// Some pmacs are missing #size-cells on escc or i2s nodes
+ 	for (node = 0; prom_next_node(&node); ) {
+ 		type[0] = '\0';
+ 		prom_getprop(node, "device_type", type, sizeof(type));
+-		if (prom_strcmp(type, "escc"))
++		if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s"))
+ 			continue;
+ 
+ 		if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 04189689c127e3..0d807bf2328d86 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -988,7 +988,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
+ 	return 0;
+ }
+ 
+-
++#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
+ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
+ {
+ 	if (radix_enabled())
+@@ -996,6 +996,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
+ 
+ 	return false;
+ }
++#endif
+ 
+ int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
+ 				unsigned long addr, unsigned long next)
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index 3c1da08304d032..603a0f652ba61c 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -1336,7 +1336,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
+ 	return nid;
+ }
+ 
+-static u64 hot_add_drconf_memory_max(void)
++u64 hot_add_drconf_memory_max(void)
+ {
+ 	struct device_node *memory = NULL;
+ 	struct device_node *dn = NULL;
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 42867469752d73..33d726bb99e3d9 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -2222,6 +2222,10 @@ static struct pmu power_pmu = {
+ #define PERF_SAMPLE_ADDR_TYPE  (PERF_SAMPLE_ADDR |		\
+ 				PERF_SAMPLE_PHYS_ADDR |		\
+ 				PERF_SAMPLE_DATA_PAGE_SIZE)
++
++#define SIER_TYPE_SHIFT	15
++#define SIER_TYPE_MASK	(0x7ull << SIER_TYPE_SHIFT)
++
+ /*
+  * A counter has overflowed; update its count and record
+  * things if requested.  Note that interrupts are hard-disabled
+@@ -2290,6 +2294,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
+ 	    is_kernel_addr(mfspr(SPRN_SIAR)))
+ 		record = 0;
+ 
++	/*
++	 * SIER[46-48] presents instruction type of the sampled instruction.
++	 * In ISA v3.0 and before values "0" and "7" are considered reserved.
++	 * In ISA v3.1, value "7" has been used to indicate "larx/stcx".
++	 * Drop the sample if "type" has reserved values for this field with a
++	 * ISA version check.
++	 */
++	if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
++			ppmu->get_mem_data_src) {
++		val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT;
++		if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) {
++			record = 0;
++			atomic64_inc(&event->lost_samples);
++		}
++	}
++
+ 	/*
+ 	 * Finally record data if requested.
+ 	 */
+diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
+index 56301b2bc8ae87..031a2b63c171dc 100644
+--- a/arch/powerpc/perf/isa207-common.c
++++ b/arch/powerpc/perf/isa207-common.c
+@@ -321,8 +321,10 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
+ 
+ 	sier = mfspr(SPRN_SIER);
+ 	val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
+-	if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
++	if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
++		dsrc->val = 0;
+ 		return;
++	}
+ 
+ 	idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
+ 	sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index ae6f7a235d8b24..d6ebc19fb99c51 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -52,7 +52,8 @@ enum {
+ enum {
+ 	DDW_EXT_SIZE = 0,
+ 	DDW_EXT_RESET_DMA_WIN = 1,
+-	DDW_EXT_QUERY_OUT_SIZE = 2
++	DDW_EXT_QUERY_OUT_SIZE = 2,
++	DDW_EXT_LIMITED_ADDR_MODE = 3
+ };
+ 
+ static struct iommu_table *iommu_pseries_alloc_table(int node)
+@@ -1284,17 +1285,13 @@ static LIST_HEAD(failed_ddw_pdn_list);
+ 
+ static phys_addr_t ddw_memory_hotplug_max(void)
+ {
+-	resource_size_t max_addr = memory_hotplug_max();
+-	struct device_node *memory;
++	resource_size_t max_addr;
+ 
+-	for_each_node_by_type(memory, "memory") {
+-		struct resource res;
+-
+-		if (of_address_to_resource(memory, 0, &res))
+-			continue;
+-
+-		max_addr = max_t(resource_size_t, max_addr, res.end + 1);
+-	}
++#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
++	max_addr = hot_add_drconf_memory_max();
++#else
++	max_addr = memblock_end_of_DRAM();
++#endif
+ 
+ 	return max_addr;
+ }
+@@ -1331,6 +1328,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
+ 			 ret);
+ }
+ 
++/*
++ * Platforms support placing PHB in limited address mode starting with LoPAR
++ * level 2.13 implement. In this mode, the DMA address returned by DDW is over
++ * 4GB but, less than 64-bits. This benefits IO adapters that don't support
++ * 64-bits for DMA addresses.
++ */
++static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn)
++{
++	int ret;
++	u32 cfg_addr, reset_dma_win, las_supported;
++	u64 buid;
++	struct device_node *dn;
++	struct pci_dn *pdn;
++
++	ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
++	if (ret)
++		goto out;
++
++	ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported);
++
++	/* Limited Address Space extension available on the platform but DDW in
++	 * limited addressing mode not supported
++	 */
++	if (!ret && !las_supported)
++		ret = -EPROTO;
++
++	if (ret) {
++		dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret);
++		goto out;
++	}
++
++	dn = pci_device_to_OF_node(dev);
++	pdn = PCI_DN(dn);
++	buid = pdn->phb->buid;
++	cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
++
++	ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid),
++			BUID_LO(buid), 1);
++	if (ret)
++		dev_info(&dev->dev,
++			 "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ",
++			 reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
++			 ret);
++
++out:
++	return ret;
++}
++
+ /* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
+ static int iommu_get_page_shift(u32 query_page_size)
+ {
+@@ -1398,7 +1443,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64
+  *
+  * returns true if can map all pages (direct mapping), false otherwise..
+  */
+-static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
++static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask)
+ {
+ 	int len = 0, ret;
+ 	int max_ram_len = order_base_2(ddw_memory_hotplug_max());
+@@ -1417,6 +1462,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 	bool pmem_present;
+ 	struct pci_dn *pci = PCI_DN(pdn);
+ 	struct property *default_win = NULL;
++	bool limited_addr_req = false, limited_addr_enabled = false;
++	int dev_max_ddw;
++	int ddw_sz;
+ 
+ 	dn = of_find_node_by_type(NULL, "ibm,pmemory");
+ 	pmem_present = dn != NULL;
+@@ -1443,7 +1491,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 	 * the ibm,ddw-applicable property holds the tokens for:
+ 	 * ibm,query-pe-dma-window
+ 	 * ibm,create-pe-dma-window
+-	 * ibm,remove-pe-dma-window
+ 	 * for the given node in that order.
+ 	 * the property is actually in the parent, not the PE
+ 	 */
+@@ -1463,6 +1510,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 	if (ret != 0)
+ 		goto out_failed;
+ 
++	/* DMA Limited Addressing required? This is when the driver has
++	 * requested to create DDW but supports mask which is less than 64-bits
++	 */
++	limited_addr_req = (dma_mask != DMA_BIT_MASK(64));
++
++	/* place the PHB in Limited Addressing mode */
++	if (limited_addr_req) {
++		if (limited_dma_window(dev, pdn))
++			goto out_failed;
++
++		/* PHB is in Limited address mode */
++		limited_addr_enabled = true;
++	}
++
+ 	/*
+ 	 * If there is no window available, remove the default DMA window,
+ 	 * if it's present. This will make all the resources available to the
+@@ -1509,6 +1570,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 		goto out_failed;
+ 	}
+ 
++	/* Maximum DMA window size that the device can address (in log2) */
++	dev_max_ddw = fls64(dma_mask);
++
++	/* If the device DMA mask is less than 64-bits, make sure the DMA window
++	 * size is not bigger than what the device can access
++	 */
++	ddw_sz = min(order_base_2(query.largest_available_block << page_shift),
++			dev_max_ddw);
++
+ 	/*
+ 	 * The "ibm,pmemory" can appear anywhere in the address space.
+ 	 * Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
+@@ -1517,23 +1587,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 	 */
+ 	len = max_ram_len;
+ 	if (pmem_present) {
+-		if (query.largest_available_block >=
+-		    (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
++		if (ddw_sz >= MAX_PHYSMEM_BITS)
+ 			len = MAX_PHYSMEM_BITS;
+ 		else
+ 			dev_info(&dev->dev, "Skipping ibm,pmemory");
+ 	}
+ 
+ 	/* check if the available block * number of ptes will map everything */
+-	if (query.largest_available_block < (1ULL << (len - page_shift))) {
++	if (ddw_sz < len) {
+ 		dev_dbg(&dev->dev,
+ 			"can't map partition max 0x%llx with %llu %llu-sized pages\n",
+ 			1ULL << len,
+ 			query.largest_available_block,
+ 			1ULL << page_shift);
+ 
+-		len = order_base_2(query.largest_available_block << page_shift);
+-
++		len = ddw_sz;
+ 		dynamic_mapping = true;
+ 	} else {
+ 		direct_mapping = !default_win_removed ||
+@@ -1547,8 +1615,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 		 */
+ 		if (default_win_removed && pmem_present && !direct_mapping) {
+ 			/* DDW is big enough to be split */
+-			if ((query.largest_available_block << page_shift) >=
+-			     MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
++			if ((1ULL << ddw_sz) >=
++			    MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
++
+ 				direct_mapping = true;
+ 
+ 				/* offset of the Dynamic part of DDW */
+@@ -1559,8 +1628,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 			dynamic_mapping = true;
+ 
+ 			/* create max size DDW possible */
+-			len = order_base_2(query.largest_available_block
+-							<< page_shift);
++			len = ddw_sz;
+ 		}
+ 	}
+ 
+@@ -1600,7 +1668,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 
+ 	if (direct_mapping) {
+ 		/* DDW maps the whole partition, so enable direct DMA mapping */
+-		ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
++		ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT,
+ 					    win64->value, tce_setrange_multi_pSeriesLP_walk);
+ 		if (ret) {
+ 			dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
+@@ -1689,7 +1757,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 	__remove_dma_window(pdn, ddw_avail, create.liobn);
+ 
+ out_failed:
+-	if (default_win_removed)
++	if (default_win_removed || limited_addr_enabled)
+ 		reset_dma_window(dev, pdn);
+ 
+ 	fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
+@@ -1708,6 +1776,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 		dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset +
+ 						(1ULL << max_ram_len);
+ 
++	dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n",
++			limited_addr_req, limited_addr_enabled, direct_mapping);
++
+ 	return direct_mapping;
+ }
+ 
+@@ -1833,8 +1904,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
+ {
+ 	struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
+ 
+-	/* only attempt to use a new window if 64-bit DMA is requested */
+-	if (dma_mask < DMA_BIT_MASK(64))
++	/* For DDW, DMA mask should be more than 32-bits. For mask more then
++	 * 32-bits but less then 64-bits, DMA addressing is supported in
++	 * Limited Addressing mode.
++	 */
++	if (dma_mask <= DMA_BIT_MASK(32))
+ 		return false;
+ 
+ 	dev_dbg(&pdev->dev, "node is %pOF\n", dn);
+@@ -1847,7 +1921,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
+ 	 */
+ 	pdn = pci_dma_find(dn, NULL);
+ 	if (pdn && PCI_DN(pdn))
+-		return enable_ddw(pdev, pdn);
++		return enable_ddw(pdev, pdn, dma_mask);
+ 
+ 	return false;
+ }
+@@ -2349,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
+ 	struct memory_notify *arg = data;
+ 	int ret = 0;
+ 
++	/* This notifier can get called when onlining persistent memory as well.
++	 * TCEs are not pre-mapped for persistent memory. Persistent memory will
++	 * always be above ddw_memory_hotplug_max()
++	 */
++
+ 	switch (action) {
+ 	case MEM_GOING_ONLINE:
+ 		spin_lock(&dma_win_list_lock);
+ 		list_for_each_entry(window, &dma_win_list, list) {
+-			if (window->direct) {
++			if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
++				ddw_memory_hotplug_max()) {
+ 				ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
+ 						arg->nr_pages, window->prop);
+ 			}
+@@ -2365,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
+ 	case MEM_OFFLINE:
+ 		spin_lock(&dma_win_list_lock);
+ 		list_for_each_entry(window, &dma_win_list, list) {
+-			if (window->direct) {
++			if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
++				ddw_memory_hotplug_max()) {
+ 				ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
+ 						arg->nr_pages, window->prop);
+ 			}
+diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
+index febf820d505837..e8beadc2bffda3 100644
+--- a/arch/riscv/include/asm/page.h
++++ b/arch/riscv/include/asm/page.h
+@@ -26,12 +26,9 @@
+  * When not using MMU this corresponds to the first free page in
+  * physical memory (aligned on a page boundary).
+  */
+-#ifdef CONFIG_64BIT
+ #ifdef CONFIG_MMU
++#ifdef CONFIG_64BIT
+ #define PAGE_OFFSET		kernel_map.page_offset
+-#else
+-#define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
+-#endif
+ /*
+  * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
+  * define the PAGE_OFFSET value for SV48 and SV39.
+@@ -41,6 +38,9 @@
+ #else
+ #define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
+ #endif /* CONFIG_64BIT */
++#else
++#define PAGE_OFFSET		((unsigned long)phys_ram_base)
++#endif /* CONFIG_MMU */
+ 
+ #ifndef __ASSEMBLY__
+ 
+@@ -97,11 +97,7 @@ typedef struct page *pgtable_t;
+ #define MIN_MEMBLOCK_ADDR      0
+ #endif
+ 
+-#ifdef CONFIG_MMU
+ #define ARCH_PFN_OFFSET		(PFN_DOWN((unsigned long)phys_ram_base))
+-#else
+-#define ARCH_PFN_OFFSET		(PAGE_OFFSET >> PAGE_SHIFT)
+-#endif /* CONFIG_MMU */
+ 
+ struct kernel_mapping {
+ 	unsigned long page_offset;
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index c0866ada5bbc49..479550cdb440f9 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+ #include <asm/pgtable-bits.h>
+ 
+ #ifndef CONFIG_MMU
+-#define KERNEL_LINK_ADDR	PAGE_OFFSET
++#define KERNEL_LINK_ADDR	_AC(CONFIG_PAGE_OFFSET, UL)
+ #define KERN_VIRT_SIZE		(UL(-1))
+ #else
+ 
+diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
+index 69dc8aaab3fb3a..6b3b5255c88925 100644
+--- a/arch/riscv/kernel/Makefile
++++ b/arch/riscv/kernel/Makefile
+@@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o	= $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_sbi.o	= $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_return_address.o	= $(CC_FLAGS_FTRACE)
+ endif
+-CFLAGS_syscall_table.o	+= $(call cc-option,-Wno-override-init,)
+-CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
++CFLAGS_syscall_table.o	+= $(call cc-disable-warning, override-init)
++CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init)
+ 
+ ifdef CONFIG_KEXEC_CORE
+ AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index 9b6e86ce386744..bb77607c87aa2d 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -4,6 +4,7 @@
+ #include <linux/smp.h>
+ #include <linux/sched.h>
+ #include <linux/hugetlb.h>
++#include <linux/mmu_notifier.h>
+ #include <asm/sbi.h>
+ #include <asm/mmu_context.h>
+ 
+@@ -78,10 +79,17 @@ static void __ipi_flush_tlb_range_asid(void *info)
+ 	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
+ }
+ 
+-static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
++static inline unsigned long get_mm_asid(struct mm_struct *mm)
++{
++	return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
++}
++
++static void __flush_tlb_range(struct mm_struct *mm,
++			      const struct cpumask *cmask,
+ 			      unsigned long start, unsigned long size,
+ 			      unsigned long stride)
+ {
++	unsigned long asid = get_mm_asid(mm);
+ 	unsigned int cpu;
+ 
+ 	if (cpumask_empty(cmask))
+@@ -105,30 +113,26 @@ static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
+ 	}
+ 
+ 	put_cpu();
+-}
+ 
+-static inline unsigned long get_mm_asid(struct mm_struct *mm)
+-{
+-	return cntx2asid(atomic_long_read(&mm->context.id));
++	if (mm)
++		mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
+ }
+ 
+ void flush_tlb_mm(struct mm_struct *mm)
+ {
+-	__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
+-			  0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
++	__flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ }
+ 
+ void flush_tlb_mm_range(struct mm_struct *mm,
+ 			unsigned long start, unsigned long end,
+ 			unsigned int page_size)
+ {
+-	__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
+-			  start, end - start, page_size);
++	__flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
+ }
+ 
+ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+ {
+-	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
++	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
+ 			  addr, PAGE_SIZE, PAGE_SIZE);
+ }
+ 
+@@ -161,13 +165,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ 		}
+ 	}
+ 
+-	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
++	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
+ 			  start, end - start, stride_size);
+ }
+ 
+ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+ {
+-	__flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
++	__flush_tlb_range(NULL, cpu_online_mask,
+ 			  start, end - start, PAGE_SIZE);
+ }
+ 
+@@ -175,7 +179,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ 			unsigned long end)
+ {
+-	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
++	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
+ 			  start, end - start, PMD_SIZE);
+ }
+ #endif
+@@ -189,7 +193,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ 			       struct mm_struct *mm,
+ 			       unsigned long uaddr)
+ {
++	unsigned long start = uaddr & PAGE_MASK;
++
+ 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
++	mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE);
+ }
+ 
+ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
+@@ -199,7 +206,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
+ 
+ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+ {
+-	__flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
+-			  FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
++	__flush_tlb_range(NULL, &batch->cpumask,
++			  0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ 	cpumask_clear(&batch->cpumask);
+ }
+diff --git a/arch/s390/hypfs/hypfs_diag_fs.c b/arch/s390/hypfs/hypfs_diag_fs.c
+index 00a6d370a28032..280266a74f378d 100644
+--- a/arch/s390/hypfs/hypfs_diag_fs.c
++++ b/arch/s390/hypfs/hypfs_diag_fs.c
+@@ -208,6 +208,8 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
+ 	snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
+ 							    cpu_info));
+ 	cpu_dir = hypfs_mkdir(cpus_dir, buffer);
++	if (IS_ERR(cpu_dir))
++		return PTR_ERR(cpu_dir);
+ 	rc = hypfs_create_u64(cpu_dir, "mgmtime",
+ 			      cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
+ 			      cpu_info__lp_time(diag204_get_info_type(), cpu_info));
+diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
+index e95b2c8081eb8e..793afe236df077 100644
+--- a/arch/s390/include/asm/tlb.h
++++ b/arch/s390/include/asm/tlb.h
+@@ -85,7 +85,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ 	tlb->mm->context.flush_mm = 1;
+ 	tlb->freed_tables = 1;
+ 	tlb->cleared_pmds = 1;
+-	if (mm_alloc_pgste(tlb->mm))
++	if (mm_has_pgste(tlb->mm))
+ 		gmap_unlink(tlb->mm, (unsigned long *)pte, address);
+ 	tlb_remove_ptdesc(tlb, pte);
+ }
+diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
+index a5b4fe2ad93153..6ca9ea4a230bca 100644
+--- a/arch/um/kernel/mem.c
++++ b/arch/um/kernel/mem.c
+@@ -70,6 +70,7 @@ void __init mem_init(void)
+ 	map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
+ 	memblock_free((void *)brk_end, uml_reserved - brk_end);
+ 	uml_reserved = brk_end;
++	min_low_pfn = PFN_UP(__pa(uml_reserved));
+ 
+ 	/* this will put all low memory onto the freelists */
+ 	memblock_free_all();
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 7b3622ba4c3c82..15425c9bdc2bcf 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2420,6 +2420,7 @@ config STRICT_SIGALTSTACK_SIZE
+ config CFI_AUTO_DEFAULT
+ 	bool "Attempt to use FineIBT by default at boot time"
+ 	depends on FINEIBT
++	depends on !RUST || RUSTC_VERSION >= 108800
+ 	default y
+ 	help
+ 	  Attempt to use FineIBT by default at boot time. If enabled,
+diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
+index c9299aeb7333e6..3882ead513f742 100644
+--- a/arch/x86/boot/genimage.sh
++++ b/arch/x86/boot/genimage.sh
+@@ -22,6 +22,7 @@
+ # This script requires:
+ #   bash
+ #   syslinux
++#   genisoimage
+ #   mtools (for fdimage* and hdimage)
+ #   edk2/OVMF (for hdimage)
+ #
+@@ -251,7 +252,9 @@ geniso() {
+ 	cp "$isolinux" "$ldlinux" "$tmp_dir"
+ 	cp "$FBZIMAGE" "$tmp_dir"/linux
+ 	echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
+-	cp "${FDINITRDS[@]}" "$tmp_dir"/
++	if [ ${#FDINITRDS[@]} -gt 0 ]; then
++		cp "${FDINITRDS[@]}" "$tmp_dir"/
++	fi
+ 	genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
+ 		    -quiet -o "$FIMAGE" -b isolinux.bin \
+ 		    -c boot.cat -no-emul-boot -boot-load-size 4 \
+diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
+index 58e3124ee2b420..5b96249734ada1 100644
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -63,7 +63,7 @@ THUNK warn_thunk_thunk, __warn_thunk
+  * entirely in the C code, and use an alias emitted by the linker script
+  * instead.
+  */
+-#ifdef CONFIG_STACKPROTECTOR
++#if defined(CONFIG_STACKPROTECTOR) && defined(CONFIG_SMP)
+ EXPORT_SYMBOL(__ref_stack_chk_guard);
+ #endif
+ #endif
+diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
+index c3a2f6f57770ab..d34ee6f04f18f3 100644
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -272,7 +272,7 @@ static int perf_ibs_init(struct perf_event *event)
+ {
+ 	struct hw_perf_event *hwc = &event->hw;
+ 	struct perf_ibs *perf_ibs;
+-	u64 max_cnt, config;
++	u64 config;
+ 	int ret;
+ 
+ 	perf_ibs = get_ibs_pmu(event->attr.type);
+@@ -309,10 +309,19 @@ static int perf_ibs_init(struct perf_event *event)
+ 		if (!hwc->sample_period)
+ 			hwc->sample_period = 0x10;
+ 	} else {
+-		max_cnt = config & perf_ibs->cnt_mask;
++		u64 period = 0;
++
++		if (perf_ibs == &perf_ibs_op) {
++			period = (config & IBS_OP_MAX_CNT) << 4;
++			if (ibs_caps & IBS_CAPS_OPCNTEXT)
++				period |= config & IBS_OP_MAX_CNT_EXT_MASK;
++		} else {
++			period = (config & IBS_FETCH_MAX_CNT) << 4;
++		}
++
+ 		config &= ~perf_ibs->cnt_mask;
+-		event->attr.sample_period = max_cnt << 4;
+-		hwc->sample_period = event->attr.sample_period;
++		event->attr.sample_period = period;
++		hwc->sample_period = period;
+ 	}
+ 
+ 	if (!hwc->sample_period)
+@@ -1222,7 +1231,8 @@ static __init int perf_ibs_op_init(void)
+ 	if (ibs_caps & IBS_CAPS_OPCNTEXT) {
+ 		perf_ibs_op.max_period  |= IBS_OP_MAX_CNT_EXT_MASK;
+ 		perf_ibs_op.config_mask	|= IBS_OP_MAX_CNT_EXT_MASK;
+-		perf_ibs_op.cnt_mask    |= IBS_OP_MAX_CNT_EXT_MASK;
++		perf_ibs_op.cnt_mask    |= (IBS_OP_MAX_CNT_EXT_MASK |
++					    IBS_OP_CUR_CNT_EXT_MASK);
+ 	}
+ 
+ 	if (ibs_caps & IBS_CAPS_ZEN4)
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 1b82bcc6fa5564..54007174c15b56 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -2240,8 +2240,9 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_
+ 			       setup_pebs_fixed_sample_data);
+ }
+ 
+-static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
++static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask)
+ {
++	u64 pebs_enabled = cpuc->pebs_enabled & mask;
+ 	struct perf_event *event;
+ 	int bit;
+ 
+@@ -2252,7 +2253,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int
+ 	 * It needs to call intel_pmu_save_and_restart_reload() to
+ 	 * update the event->count for this case.
+ 	 */
+-	for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
++	for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) {
+ 		event = cpuc->events[bit];
+ 		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+ 			intel_pmu_save_and_restart_reload(event, 0);
+@@ -2287,7 +2288,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
+ 	}
+ 
+ 	if (unlikely(base >= top)) {
+-		intel_pmu_pebs_event_update_no_drain(cpuc, size);
++		intel_pmu_pebs_event_update_no_drain(cpuc, mask);
+ 		return;
+ 	}
+ 
+@@ -2397,7 +2398,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
+ 	       (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
+ 
+ 	if (unlikely(base >= top)) {
+-		intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX);
++		intel_pmu_pebs_event_update_no_drain(cpuc, mask);
+ 		return;
+ 	}
+ 
+diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
+index 806649c7f23dc6..9a0f29be1a9ea6 100644
+--- a/arch/x86/include/asm/bug.h
++++ b/arch/x86/include/asm/bug.h
+@@ -22,8 +22,9 @@
+ #define SECOND_BYTE_OPCODE_UD2	0x0b
+ 
+ #define BUG_NONE		0xffff
+-#define BUG_UD1			0xfffe
+-#define BUG_UD2			0xfffd
++#define BUG_UD2			0xfffe
++#define BUG_UD1			0xfffd
++#define BUG_UD1_UBSAN		0xfffc
+ 
+ #ifdef CONFIG_GENERIC_BUG
+ 
+diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h
+index 31d19c815f992c..7dd5ab239c87bd 100644
+--- a/arch/x86/include/asm/cfi.h
++++ b/arch/x86/include/asm/cfi.h
+@@ -126,6 +126,17 @@ static inline int cfi_get_offset(void)
+ 
+ extern u32 cfi_get_func_hash(void *func);
+ 
++#ifdef CONFIG_FINEIBT
++extern bool decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type);
++#else
++static inline bool
++decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type)
++{
++	return false;
++}
++
++#endif
++
+ #else
+ static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
+ {
+diff --git a/arch/x86/include/asm/ibt.h b/arch/x86/include/asm/ibt.h
+index 1e59581d500ca9..b778ae6e67ee8c 100644
+--- a/arch/x86/include/asm/ibt.h
++++ b/arch/x86/include/asm/ibt.h
+@@ -41,7 +41,7 @@
+ 	_ASM_PTR fname "\n\t"				\
+ 	".popsection\n\t"
+ 
+-static inline __attribute_const__ u32 gen_endbr(void)
++static __always_inline __attribute_const__ u32 gen_endbr(void)
+ {
+ 	u32 endbr;
+ 
+@@ -56,7 +56,7 @@ static inline __attribute_const__ u32 gen_endbr(void)
+ 	return endbr;
+ }
+ 
+-static inline __attribute_const__ u32 gen_endbr_poison(void)
++static __always_inline __attribute_const__ u32 gen_endbr_poison(void)
+ {
+ 	/*
+ 	 * 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index 62d8b9448dc5c5..c6198fbcc1d77d 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -46,6 +46,7 @@
+ #define INTEL_ANY			IFM(X86_FAMILY_ANY, X86_MODEL_ANY)
+ 
+ #define INTEL_PENTIUM_PRO		IFM(6, 0x01)
++#define INTEL_PENTIUM_III_DESCHUTES	IFM(6, 0x05)
+ 
+ #define INTEL_CORE_YONAH		IFM(6, 0x0E)
+ 
+diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
+index 41a0ebb699ec64..f677382093f360 100644
+--- a/arch/x86/include/asm/nmi.h
++++ b/arch/x86/include/asm/nmi.h
+@@ -56,6 +56,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *);
+ 
+ void unregister_nmi_handler(unsigned int, const char *);
+ 
++void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler);
++
+ void stop_nmi(void);
+ void restart_nmi(void);
+ void local_touch_nmi(void);
+diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
+index c55a79d5feaeb6..2d9c250b3c8d80 100644
+--- a/arch/x86/include/asm/percpu.h
++++ b/arch/x86/include/asm/percpu.h
+@@ -349,9 +349,9 @@ do {									\
+ 									\
+ 	asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\
+ 			      "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
+-		  : [var] "+m" (__my_cpu_var(_var)),			\
+-		    "+a" (old__.low),					\
+-		    "+d" (old__.high)					\
++		  : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)),	\
++				  "+a" (old__.low),			\
++				  "+d" (old__.high))			\
+ 		  : "b" (new__.low),					\
+ 		    "c" (new__.high),					\
+ 		    "S" (&(_var))					\
+@@ -380,10 +380,10 @@ do {									\
+ 	asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\
+ 			      "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
+ 		  CC_SET(z)						\
+-		  : CC_OUT(z) (success),				\
+-		    [var] "+m" (__my_cpu_var(_var)),			\
+-		    "+a" (old__.low),					\
+-		    "+d" (old__.high)					\
++		  : ALT_OUTPUT_SP(CC_OUT(z) (success),			\
++				  [var] "+m" (__my_cpu_var(_var)),	\
++				  "+a" (old__.low),			\
++				  "+d" (old__.high))			\
+ 		  : "b" (new__.low),					\
+ 		    "c" (new__.high),					\
+ 		    "S" (&(_var))					\
+@@ -420,9 +420,9 @@ do {									\
+ 									\
+ 	asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\
+ 			      "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
+-		  : [var] "+m" (__my_cpu_var(_var)),			\
+-		    "+a" (old__.low),					\
+-		    "+d" (old__.high)					\
++		  : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)),	\
++				  "+a" (old__.low),			\
++				  "+d" (old__.high))			\
+ 		  : "b" (new__.low),					\
+ 		    "c" (new__.high),					\
+ 		    "S" (&(_var))					\
+@@ -451,10 +451,10 @@ do {									\
+ 	asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\
+ 			      "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
+ 		  CC_SET(z)						\
+-		  : CC_OUT(z) (success),				\
+-		    [var] "+m" (__my_cpu_var(_var)),			\
+-		    "+a" (old__.low),					\
+-		    "+d" (old__.high)					\
++		  : ALT_OUTPUT_SP(CC_OUT(z) (success),			\
++				  [var] "+m" (__my_cpu_var(_var)),	\
++				  "+a" (old__.low),			\
++				  "+d" (old__.high))			\
+ 		  : "b" (new__.low),					\
+ 		    "c" (new__.high),					\
+ 		    "S" (&(_var))					\
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 7505bb5d260ab4..aa351c4a20eee0 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -520,6 +520,7 @@ struct pebs_xmm {
+  */
+ #define IBS_OP_CUR_CNT		(0xFFF80ULL<<32)
+ #define IBS_OP_CUR_CNT_RAND	(0x0007FULL<<32)
++#define IBS_OP_CUR_CNT_EXT_MASK	(0x7FULL<<52)
+ #define IBS_OP_CNT_CTL		(1ULL<<19)
+ #define IBS_OP_VAL		(1ULL<<18)
+ #define IBS_OP_ENABLE		(1ULL<<17)
+diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
+index 98726c2b04f852..ddaf3c62efb523 100644
+--- a/arch/x86/include/asm/sev-common.h
++++ b/arch/x86/include/asm/sev-common.h
+@@ -116,7 +116,7 @@ enum psc_op {
+ #define GHCB_MSR_VMPL_REQ		0x016
+ #define GHCB_MSR_VMPL_REQ_LEVEL(v)			\
+ 	/* GHCBData[39:32] */				\
+-	(((u64)(v) & GENMASK_ULL(7, 0) << 32) |		\
++	((((u64)(v) & GENMASK_ULL(7, 0)) << 32) |	\
+ 	/* GHCBDdata[11:0] */				\
+ 	GHCB_MSR_VMPL_REQ)
+ 
+diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
+index 9b82eebd7add55..dafbf581c515d0 100644
+--- a/arch/x86/include/uapi/asm/bootparam.h
++++ b/arch/x86/include/uapi/asm/bootparam.h
+@@ -26,7 +26,7 @@
+ #define XLF_5LEVEL_ENABLED		(1<<6)
+ #define XLF_MEM_ENCRYPTION		(1<<7)
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ 
+ #include <linux/types.h>
+ #include <linux/screen_info.h>
+@@ -210,6 +210,6 @@ enum x86_hardware_subarch {
+ 	X86_NR_SUBARCHS,
+ };
+ 
+-#endif /* __ASSEMBLY__ */
++#endif /* __ASSEMBLER__ */
+ 
+ #endif /* _ASM_X86_BOOTPARAM_H */
+diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
+index 2f491efe3a1263..55bc6686715603 100644
+--- a/arch/x86/include/uapi/asm/e820.h
++++ b/arch/x86/include/uapi/asm/e820.h
+@@ -54,7 +54,7 @@
+  */
+ #define E820_RESERVED_KERN        128
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ #include <linux/types.h>
+ struct e820entry {
+ 	__u64 addr;	/* start of memory segment */
+@@ -76,7 +76,7 @@ struct e820map {
+ #define BIOS_ROM_BASE		0xffe00000
+ #define BIOS_ROM_END		0xffffffff
+ 
+-#endif /* __ASSEMBLY__ */
++#endif /* __ASSEMBLER__ */
+ 
+ 
+ #endif /* _UAPI_ASM_X86_E820_H */
+diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
+index d62ac5db093b49..a82c039d8e6a7e 100644
+--- a/arch/x86/include/uapi/asm/ldt.h
++++ b/arch/x86/include/uapi/asm/ldt.h
+@@ -12,7 +12,7 @@
+ /* The size of each LDT entry. */
+ #define LDT_ENTRY_SIZE	8
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ /*
+  * Note on 64bit base and limit is ignored and you cannot set DS/ES/CS
+  * not to the default values if you still want to do syscalls. This
+@@ -44,5 +44,5 @@ struct user_desc {
+ #define MODIFY_LDT_CONTENTS_STACK	1
+ #define MODIFY_LDT_CONTENTS_CODE	2
+ 
+-#endif /* !__ASSEMBLY__ */
++#endif /* !__ASSEMBLER__ */
+ #endif /* _ASM_X86_LDT_H */
+diff --git a/arch/x86/include/uapi/asm/msr.h b/arch/x86/include/uapi/asm/msr.h
+index e7516b402a00f1..4b8917ca28fe76 100644
+--- a/arch/x86/include/uapi/asm/msr.h
++++ b/arch/x86/include/uapi/asm/msr.h
+@@ -2,7 +2,7 @@
+ #ifndef _UAPI_ASM_X86_MSR_H
+ #define _UAPI_ASM_X86_MSR_H
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ 
+ #include <linux/types.h>
+ #include <linux/ioctl.h>
+@@ -10,5 +10,5 @@
+ #define X86_IOC_RDMSR_REGS	_IOWR('c', 0xA0, __u32[8])
+ #define X86_IOC_WRMSR_REGS	_IOWR('c', 0xA1, __u32[8])
+ 
+-#endif /* __ASSEMBLY__ */
++#endif /* __ASSEMBLER__ */
+ #endif /* _UAPI_ASM_X86_MSR_H */
+diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
+index 16074b9c93bb51..5823584dea132b 100644
+--- a/arch/x86/include/uapi/asm/ptrace-abi.h
++++ b/arch/x86/include/uapi/asm/ptrace-abi.h
+@@ -25,7 +25,7 @@
+ 
+ #else /* __i386__ */
+ 
+-#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
++#if defined(__ASSEMBLER__) || defined(__FRAME_OFFSETS)
+ /*
+  * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+  * unless syscall needs a complete, fully filled "struct pt_regs".
+@@ -57,7 +57,7 @@
+ #define EFLAGS 144
+ #define RSP 152
+ #define SS 160
+-#endif /* __ASSEMBLY__ */
++#endif /* __ASSEMBLER__ */
+ 
+ /* top of stack page */
+ #define FRAME_SIZE 168
+@@ -87,7 +87,7 @@
+ 
+ #define PTRACE_SINGLEBLOCK	33	/* resume execution until next branch */
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ #include <linux/types.h>
+ #endif
+ 
+diff --git a/arch/x86/include/uapi/asm/ptrace.h b/arch/x86/include/uapi/asm/ptrace.h
+index 85165c0edafc86..e0b5b4f6226b18 100644
+--- a/arch/x86/include/uapi/asm/ptrace.h
++++ b/arch/x86/include/uapi/asm/ptrace.h
+@@ -7,7 +7,7 @@
+ #include <asm/processor-flags.h>
+ 
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ 
+ #ifdef __i386__
+ /* this struct defines the way the registers are stored on the
+@@ -81,6 +81,6 @@ struct pt_regs {
+ 
+ 
+ 
+-#endif /* !__ASSEMBLY__ */
++#endif /* !__ASSEMBLER__ */
+ 
+ #endif /* _UAPI_ASM_X86_PTRACE_H */
+diff --git a/arch/x86/include/uapi/asm/setup_data.h b/arch/x86/include/uapi/asm/setup_data.h
+index b111b0c1854491..50c45ead4e7c97 100644
+--- a/arch/x86/include/uapi/asm/setup_data.h
++++ b/arch/x86/include/uapi/asm/setup_data.h
+@@ -18,7 +18,7 @@
+ #define SETUP_INDIRECT			(1<<31)
+ #define SETUP_TYPE_MAX			(SETUP_ENUM_MAX | SETUP_INDIRECT)
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ 
+ #include <linux/types.h>
+ 
+@@ -78,6 +78,6 @@ struct ima_setup_data {
+ 	__u64 size;
+ } __attribute__((packed));
+ 
+-#endif /* __ASSEMBLY__ */
++#endif /* __ASSEMBLER__ */
+ 
+ #endif /* _UAPI_ASM_X86_SETUP_DATA_H */
+diff --git a/arch/x86/include/uapi/asm/signal.h b/arch/x86/include/uapi/asm/signal.h
+index f777346450ec3d..1067efabf18b5b 100644
+--- a/arch/x86/include/uapi/asm/signal.h
++++ b/arch/x86/include/uapi/asm/signal.h
+@@ -2,7 +2,7 @@
+ #ifndef _UAPI_ASM_X86_SIGNAL_H
+ #define _UAPI_ASM_X86_SIGNAL_H
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+ 
+@@ -16,7 +16,7 @@ struct siginfo;
+ typedef unsigned long sigset_t;
+ 
+ #endif /* __KERNEL__ */
+-#endif /* __ASSEMBLY__ */
++#endif /* __ASSEMBLER__ */
+ 
+ 
+ #define SIGHUP		 1
+@@ -68,7 +68,7 @@ typedef unsigned long sigset_t;
+ 
+ #include <asm-generic/signal-defs.h>
+ 
+-#ifndef __ASSEMBLY__
++#ifndef __ASSEMBLER__
+ 
+ 
+ # ifndef __KERNEL__
+@@ -106,6 +106,6 @@ typedef struct sigaltstack {
+ 	__kernel_size_t ss_size;
+ } stack_t;
+ 
+-#endif /* __ASSEMBLY__ */
++#endif /* __ASSEMBLER__ */
+ 
+ #endif /* _UAPI_ASM_X86_SIGNAL_H */
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 66e77bd7d51161..6ab96bc764cfaa 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -1254,6 +1254,7 @@ asm(	".pushsection .rodata			\n"
+ 	"	endbr64				\n"
+ 	"	subl	$0x12345678, %r10d	\n"
+ 	"	je	fineibt_preamble_end	\n"
++	"fineibt_preamble_ud2:			\n"
+ 	"	ud2				\n"
+ 	"	nop				\n"
+ 	"fineibt_preamble_end:			\n"
+@@ -1261,9 +1262,11 @@ asm(	".pushsection .rodata			\n"
+ );
+ 
+ extern u8 fineibt_preamble_start[];
++extern u8 fineibt_preamble_ud2[];
+ extern u8 fineibt_preamble_end[];
+ 
+ #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
++#define fineibt_preamble_ud2  (fineibt_preamble_ud2 - fineibt_preamble_start)
+ #define fineibt_preamble_hash 7
+ 
+ asm(	".pushsection .rodata			\n"
+@@ -1568,6 +1571,33 @@ static void poison_cfi(void *addr)
+ 	}
+ }
+ 
++/*
++ * regs->ip points to a UD2 instruction, return true and fill out target and
++ * type when this UD2 is from a FineIBT preamble.
++ *
++ * We check the preamble by checking for the ENDBR instruction relative to the
++ * UD2 instruction.
++ */
++bool decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type)
++{
++	unsigned long addr = regs->ip - fineibt_preamble_ud2;
++	u32 endbr, hash;
++
++	__get_kernel_nofault(&endbr, addr, u32, Efault);
++	if (endbr != gen_endbr())
++		return false;
++
++	*target = addr + fineibt_preamble_size;
++
++	__get_kernel_nofault(&hash, addr + fineibt_preamble_hash, u32, Efault);
++	*type = (u32)regs->r10 + hash;
++
++	return true;
++
++Efault:
++	return false;
++}
++
+ #else
+ 
+ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
+diff --git a/arch/x86/kernel/cfi.c b/arch/x86/kernel/cfi.c
+index e6bf78fac14622..f6905bef0af844 100644
+--- a/arch/x86/kernel/cfi.c
++++ b/arch/x86/kernel/cfi.c
+@@ -70,11 +70,25 @@ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
+ 	unsigned long target;
+ 	u32 type;
+ 
+-	if (!is_cfi_trap(regs->ip))
+-		return BUG_TRAP_TYPE_NONE;
++	switch (cfi_mode) {
++	case CFI_KCFI:
++		if (!is_cfi_trap(regs->ip))
++			return BUG_TRAP_TYPE_NONE;
++
++		if (!decode_cfi_insn(regs, &target, &type))
++			return report_cfi_failure_noaddr(regs, regs->ip);
++
++		break;
+ 
+-	if (!decode_cfi_insn(regs, &target, &type))
+-		return report_cfi_failure_noaddr(regs, regs->ip);
++	case CFI_FINEIBT:
++		if (!decode_fineibt_insn(regs, &target, &type))
++			return BUG_TRAP_TYPE_NONE;
++
++		break;
++
++	default:
++		return BUG_TRAP_TYPE_NONE;
++	}
+ 
+ 	return report_cfi_failure(regs, regs->ip, &target, type);
+ }
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index c683abd640fdea..0e9ab0b9a4942c 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1442,9 +1442,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
+ static enum spectre_v2_user_cmd __init
+ spectre_v2_parse_user_cmdline(void)
+ {
++	enum spectre_v2_user_cmd mode;
+ 	char arg[20];
+ 	int ret, i;
+ 
++	mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
++		SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
++
+ 	switch (spectre_v2_cmd) {
+ 	case SPECTRE_V2_CMD_NONE:
+ 		return SPECTRE_V2_USER_CMD_NONE;
+@@ -1457,7 +1461,7 @@ spectre_v2_parse_user_cmdline(void)
+ 	ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
+ 				  arg, sizeof(arg));
+ 	if (ret < 0)
+-		return SPECTRE_V2_USER_CMD_AUTO;
++		return mode;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
+ 		if (match_option(arg, ret, v2_user_options[i].option)) {
+@@ -1467,8 +1471,8 @@ spectre_v2_parse_user_cmdline(void)
+ 		}
+ 	}
+ 
+-	pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
+-	return SPECTRE_V2_USER_CMD_AUTO;
++	pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
++	return mode;
+ }
+ 
+ static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index df5650eb3f0881..362cc71bbc8662 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -74,7 +74,7 @@ void intel_collect_cpu_info(struct cpu_signature *sig)
+ 	sig->pf = 0;
+ 	sig->rev = intel_get_microcode_revision();
+ 
+-	if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) {
++	if (IFM(x86_family(sig->sig), x86_model(sig->sig)) >= INTEL_PENTIUM_III_DESCHUTES) {
+ 		unsigned int val[2];
+ 
+ 		/* get processor flags from MSR 0x17 */
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index ed163c8c8604e3..9a95d00f142330 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -40,8 +40,12 @@
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/nmi.h>
+ 
++/*
++ * An emergency handler can be set in any context including NMI
++ */
+ struct nmi_desc {
+ 	raw_spinlock_t lock;
++	nmi_handler_t emerg_handler;
+ 	struct list_head head;
+ };
+ 
+@@ -132,9 +136,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
+ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+ {
+ 	struct nmi_desc *desc = nmi_to_desc(type);
++	nmi_handler_t ehandler;
+ 	struct nmiaction *a;
+ 	int handled=0;
+ 
++	/*
++	 * Call the emergency handler, if set
++	 *
++	 * In the case of crash_nmi_callback() emergency handler, it will
++	 * return in the case of the crashing CPU to enable it to complete
++	 * other necessary crashing actions ASAP. Other handlers in the
++	 * linked list won't need to be run.
++	 */
++	ehandler = desc->emerg_handler;
++	if (ehandler)
++		return ehandler(type, regs);
++
+ 	rcu_read_lock();
+ 
+ 	/*
+@@ -224,6 +241,31 @@ void unregister_nmi_handler(unsigned int type, const char *name)
+ }
+ EXPORT_SYMBOL_GPL(unregister_nmi_handler);
+ 
++/**
++ * set_emergency_nmi_handler - Set emergency handler
++ * @type:    NMI type
++ * @handler: the emergency handler to be stored
++ *
++ * Set an emergency NMI handler which, if set, will preempt all the other
++ * handlers in the linked list. If a NULL handler is passed in, it will clear
++ * it. It is expected that concurrent calls to this function will not happen
++ * or the system is screwed beyond repair.
++ */
++void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler)
++{
++	struct nmi_desc *desc = nmi_to_desc(type);
++
++	if (WARN_ON_ONCE(desc->emerg_handler == handler))
++		return;
++	desc->emerg_handler = handler;
++
++	/*
++	 * Ensure the emergency handler is visible to other CPUs before
++	 * function return
++	 */
++	smp_wmb();
++}
++
+ static void
+ pci_serr_error(unsigned char reason, struct pt_regs *regs)
+ {
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index dc1dd3f3e67fcd..9aaac1f9f45b57 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -926,15 +926,11 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ 	shootdown_callback = callback;
+ 
+ 	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+-	/* Would it be better to replace the trap vector here? */
+-	if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
+-				 NMI_FLAG_FIRST, "crash"))
+-		return;		/* Return what? */
++
+ 	/*
+-	 * Ensure the new callback function is set before sending
+-	 * out the NMI
++	 * Set emergency handler to preempt other handlers.
+ 	 */
+-	wmb();
++	set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback);
+ 
+ 	apic_send_IPI_allbutself(NMI_VECTOR);
+ 
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index f1fac08fdef28c..2c451de702c872 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -681,9 +681,9 @@ static void __init smp_quirk_init_udelay(void)
+ 		return;
+ 
+ 	/* if modern processor, use no delay */
+-	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
+-	    ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
+-	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
++	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO) ||
++	    (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && boot_cpu_data.x86 >= 0x18) ||
++	    (boot_cpu_data.x86_vendor == X86_VENDOR_AMD   && boot_cpu_data.x86 >= 0xF)) {
+ 		init_udelay = 0;
+ 		return;
+ 	}
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 5e3e036e6e537f..b18fc7539b8d7b 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -94,10 +94,17 @@ __always_inline int is_valid_bugaddr(unsigned long addr)
+ 
+ /*
+  * Check for UD1 or UD2, accounting for Address Size Override Prefixes.
+- * If it's a UD1, get the ModRM byte to pass along to UBSan.
++ * If it's a UD1, further decode to determine its use:
++ *
++ * UBSan{0}:     67 0f b9 00             ud1    (%eax),%eax
++ * UBSan{10}:    67 0f b9 40 10          ud1    0x10(%eax),%eax
++ * static_call:  0f b9 cc                ud1    %esp,%ecx
++ *
++ * Notably UBSAN uses EAX, static_call uses ECX.
+  */
+-__always_inline int decode_bug(unsigned long addr, u32 *imm)
++__always_inline int decode_bug(unsigned long addr, s32 *imm, int *len)
+ {
++	unsigned long start = addr;
+ 	u8 v;
+ 
+ 	if (addr < TASK_SIZE_MAX)
+@@ -110,24 +117,42 @@ __always_inline int decode_bug(unsigned long addr, u32 *imm)
+ 		return BUG_NONE;
+ 
+ 	v = *(u8 *)(addr++);
+-	if (v == SECOND_BYTE_OPCODE_UD2)
++	if (v == SECOND_BYTE_OPCODE_UD2) {
++		*len = addr - start;
+ 		return BUG_UD2;
++	}
+ 
+-	if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1)
++	if (v != SECOND_BYTE_OPCODE_UD1)
+ 		return BUG_NONE;
+ 
+-	/* Retrieve the immediate (type value) for the UBSAN UD1 */
+-	v = *(u8 *)(addr++);
+-	if (X86_MODRM_RM(v) == 4)
+-		addr++;
+-
+ 	*imm = 0;
+-	if (X86_MODRM_MOD(v) == 1)
+-		*imm = *(u8 *)addr;
+-	else if (X86_MODRM_MOD(v) == 2)
+-		*imm = *(u32 *)addr;
+-	else
+-		WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v));
++	v = *(u8 *)(addr++);		/* ModRM */
++
++	if (X86_MODRM_MOD(v) != 3 && X86_MODRM_RM(v) == 4)
++		addr++;			/* SIB */
++
++	/* Decode immediate, if present */
++	switch (X86_MODRM_MOD(v)) {
++	case 0: if (X86_MODRM_RM(v) == 5)
++			addr += 4; /* RIP + disp32 */
++		break;
++
++	case 1: *imm = *(s8 *)addr;
++		addr += 1;
++		break;
++
++	case 2: *imm = *(s32 *)addr;
++		addr += 4;
++		break;
++
++	case 3: break;
++	}
++
++	/* record instruction length */
++	*len = addr - start;
++
++	if (X86_MODRM_REG(v) == 0)	/* EAX */
++		return BUG_UD1_UBSAN;
+ 
+ 	return BUG_UD1;
+ }
+@@ -258,10 +283,10 @@ static inline void handle_invalid_op(struct pt_regs *regs)
+ static noinstr bool handle_bug(struct pt_regs *regs)
+ {
+ 	bool handled = false;
+-	int ud_type;
+-	u32 imm;
++	int ud_type, ud_len;
++	s32 ud_imm;
+ 
+-	ud_type = decode_bug(regs->ip, &imm);
++	ud_type = decode_bug(regs->ip, &ud_imm, &ud_len);
+ 	if (ud_type == BUG_NONE)
+ 		return handled;
+ 
+@@ -281,15 +306,28 @@ static noinstr bool handle_bug(struct pt_regs *regs)
+ 	 */
+ 	if (regs->flags & X86_EFLAGS_IF)
+ 		raw_local_irq_enable();
+-	if (ud_type == BUG_UD2) {
++
++	switch (ud_type) {
++	case BUG_UD2:
+ 		if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
+ 		    handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
+-			regs->ip += LEN_UD2;
++			regs->ip += ud_len;
+ 			handled = true;
+ 		}
+-	} else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
+-		pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip);
++		break;
++
++	case BUG_UD1_UBSAN:
++		if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
++			pr_crit("%s at %pS\n",
++				report_ubsan_failure(regs, ud_imm),
++				(void *)regs->ip);
++		}
++		break;
++
++	default:
++		break;
+ 	}
++
+ 	if (regs->flags & X86_EFLAGS_IF)
+ 		raw_local_irq_disable();
+ 	instrumentation_end();
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 101725c149c429..9cbc1e6057d3c4 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -645,8 +645,13 @@ static void __init memory_map_top_down(unsigned long map_start,
+ 	 */
+ 	addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
+ 					 map_end);
+-	memblock_phys_free(addr, PMD_SIZE);
+-	real_end = addr + PMD_SIZE;
++	if (!addr) {
++		pr_warn("Failed to release memory for alloc_low_pages()");
++		real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE));
++	} else {
++		memblock_phys_free(addr, PMD_SIZE);
++		real_end = addr + PMD_SIZE;
++	}
+ 
+ 	/* step_size need to be small so pgt_buf from BRK could cover it */
+ 	step_size = PMD_SIZE;
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index ff253648706fa9..d8853afd314b24 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -967,9 +967,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ 	ret = __add_pages(nid, start_pfn, nr_pages, params);
+ 	WARN_ON_ONCE(ret);
+ 
+-	/* update max_pfn, max_low_pfn and high_memory */
+-	update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
+-				  nr_pages << PAGE_SHIFT);
++	/*
++	 * Special case: add_pages() is called by memremap_pages() for adding device
++	 * private pages. Do not bump up max_pfn in the device private path,
++	 * because max_pfn changes affect dma_addressing_limited().
++	 *
++	 * dma_addressing_limited() returning true when max_pfn is the device's
++	 * addressable memory can force device drivers to use bounce buffers
++	 * and impact their performance negatively:
++	 */
++	if (!params->pgmap)
++		/* update max_pfn, max_low_pfn and high_memory */
++		update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
+ 
+ 	return ret;
+ }
+diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
+index 230f1dee4f0954..e0b0ec0f824574 100644
+--- a/arch/x86/mm/kaslr.c
++++ b/arch/x86/mm/kaslr.c
+@@ -109,8 +109,14 @@ void __init kernel_randomize_memory(void)
+ 	memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
+ 		CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
+ 
+-	/* Adapt physical memory region size based on available memory */
+-	if (memory_tb < kaslr_regions[0].size_tb)
++	/*
++	 * Adapt physical memory region size based on available memory,
++	 * except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
++	 * device BAR space assuming the direct map space is large enough
++	 * for creating a ZONE_DEVICE mapping in the direct map corresponding
++	 * to the physical BAR address.
++	 */
++	if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
+ 		kaslr_regions[0].size_tb = memory_tb;
+ 
+ 	/*
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 63230ff8cf4f06..08e76a5ca1553d 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -27,6 +27,7 @@
+ #include <asm/mmu_context.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/microcode.h>
++#include <asm/fred.h>
+ 
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -231,6 +232,19 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
+ 	 */
+ #ifdef CONFIG_X86_64
+ 	wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
++
++	/*
++	 * Reinitialize FRED to ensure the FRED MSRs contain the same values
++	 * as before hibernation.
++	 *
++	 * Note, the setup of FRED RSPs requires access to percpu data
++	 * structures.  Therefore, FRED reinitialization can only occur after
++	 * the percpu access pointer (i.e., MSR_GS_BASE) is restored.
++	 */
++	if (ctxt->cr4 & X86_CR4_FRED) {
++		cpu_init_fred_exceptions();
++		cpu_init_fred_rsps();
++	}
+ #else
+ 	loadsegment(fs, __KERNEL_PERCPU);
+ #endif
+diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c
+index e80ab7d281177b..1b0d95328b2c72 100644
+--- a/arch/x86/um/os-Linux/mcontext.c
++++ b/arch/x86/um/os-Linux/mcontext.c
+@@ -27,7 +27,6 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
+ 	COPY(RIP);
+ 	COPY2(EFLAGS, EFL);
+ 	COPY2(CS, CSGSFS);
+-	regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
+-	regs->gp[CS / sizeof(unsigned long)] |= 3;
++	regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48;
+ #endif
+ }
+diff --git a/block/badblocks.c b/block/badblocks.c
+index db4ec8b9b2a8c2..a9709771a1015c 100644
+--- a/block/badblocks.c
++++ b/block/badblocks.c
+@@ -1349,14 +1349,15 @@ static int _badblocks_check(struct badblocks *bb, sector_t s, int sectors,
+ 	len = sectors;
+ 
+ update_sectors:
++	/* This situation should never happen */
++	WARN_ON(sectors < len);
++
+ 	s += len;
+ 	sectors -= len;
+ 
+ 	if (sectors > 0)
+ 		goto re_check;
+ 
+-	WARN_ON(sectors < 0);
+-
+ 	if (unacked_badblocks > 0)
+ 		rv = -1;
+ 	else if (acked_badblocks > 0)
+diff --git a/block/bdev.c b/block/bdev.c
+index 738e3c8457e7f4..e7daca6565ea68 100644
+--- a/block/bdev.c
++++ b/block/bdev.c
+@@ -168,9 +168,26 @@ int set_blocksize(struct file *file, int size)
+ 
+ 	/* Don't change the size if it is same as current */
+ 	if (inode->i_blkbits != blksize_bits(size)) {
++		/*
++		 * Flush and truncate the pagecache before we reconfigure the
++		 * mapping geometry because folio sizes are variable now.  If a
++		 * reader has already allocated a folio whose size is smaller
++		 * than the new min_order but invokes readahead after the new
++		 * min_order becomes visible, readahead will think there are
++		 * "zero" blocks per folio and crash.  Take the inode and
++		 * invalidation locks to avoid racing with
++		 * read/write/fallocate.
++		 */
++		inode_lock(inode);
++		filemap_invalidate_lock(inode->i_mapping);
++
+ 		sync_blockdev(bdev);
++		kill_bdev(bdev);
++
+ 		inode->i_blkbits = blksize_bits(size);
+ 		kill_bdev(bdev);
++		filemap_invalidate_unlock(inode->i_mapping);
++		inode_unlock(inode);
+ 	}
+ 	return 0;
+ }
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index f1cf7f2909f3a7..643d6bf66522e6 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1725,27 +1725,27 @@ int blkcg_policy_register(struct blkcg_policy *pol)
+ 	struct blkcg *blkcg;
+ 	int i, ret;
+ 
++	/*
++	 * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy
++	 * without pd_alloc_fn/pd_free_fn can't be activated.
++	 */
++	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
++	    (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
++		return -EINVAL;
++
+ 	mutex_lock(&blkcg_pol_register_mutex);
+ 	mutex_lock(&blkcg_pol_mutex);
+ 
+ 	/* find an empty slot */
+-	ret = -ENOSPC;
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++)
+ 		if (!blkcg_policy[i])
+ 			break;
+ 	if (i >= BLKCG_MAX_POLS) {
+ 		pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
++		ret = -ENOSPC;
+ 		goto err_unlock;
+ 	}
+ 
+-	/*
+-	 * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy
+-	 * without pd_alloc_fn/pd_free_fn can't be activated.
+-	 */
+-	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
+-	    (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
+-		goto err_unlock;
+-
+ 	/* register @pol */
+ 	pol->plid = i;
+ 	blkcg_policy[pol->plid] = pol;
+@@ -1756,8 +1756,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
+ 			struct blkcg_policy_data *cpd;
+ 
+ 			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
+-			if (!cpd)
++			if (!cpd) {
++				ret = -ENOMEM;
+ 				goto err_free_cpds;
++			}
+ 
+ 			blkcg->cpd[pol->plid] = cpd;
+ 			cpd->blkcg = blkcg;
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 1e63e3dd544020..7858c92b44834f 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -124,6 +124,11 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
+ 		return 0;
+ 	}
+ 
++	if (lim->features & BLK_FEAT_BOUNCE_HIGH) {
++		pr_warn("no bounce buffer support for integrity metadata\n");
++		return -EINVAL;
++	}
++
+ 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
+ 		pr_warn("integrity support disabled.\n");
+ 		return -EINVAL;
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 2c4192e12efab6..6b82fcbd7e7741 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -1593,13 +1593,6 @@ static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
+ 	return tg_may_dispatch(tg, bio, NULL);
+ }
+ 
+-static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
+-{
+-	if (!bio_flagged(bio, BIO_BPS_THROTTLED))
+-		tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
+-	tg->carryover_ios[rw]--;
+-}
+-
+ bool __blk_throtl_bio(struct bio *bio)
+ {
+ 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+@@ -1636,10 +1629,12 @@ bool __blk_throtl_bio(struct bio *bio)
+ 			/*
+ 			 * IOs which may cause priority inversions are
+ 			 * dispatched directly, even if they're over limit.
+-			 * Debts are handled by carryover_bytes/ios while
+-			 * calculating wait time.
++			 *
++			 * Charge and dispatch directly, and our throttle
++			 * control algorithm is adaptive, and extra IO bytes
++			 * will be throttled for paying the debt
+ 			 */
+-			tg_dispatch_in_debt(tg, bio, rw);
++			throtl_charge_bio(tg, bio);
+ 		} else {
+ 			/* if above limits, break to queue */
+ 			break;
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index c11db5be253248..414118435240ab 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -347,6 +347,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
+ 		op = REQ_OP_ZONE_RESET;
+ 
+ 		/* Invalidate the page cache, including dirty pages. */
++		inode_lock(bdev->bd_mapping->host);
+ 		filemap_invalidate_lock(bdev->bd_mapping);
+ 		ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
+ 		if (ret)
+@@ -368,8 +369,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
+ 	ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
+ 
+ fail:
+-	if (cmd == BLKRESETZONE)
++	if (cmd == BLKRESETZONE) {
+ 		filemap_invalidate_unlock(bdev->bd_mapping);
++		inode_unlock(bdev->bd_mapping->host);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/block/blk.h b/block/blk.h
+index 1426f9c281973e..e91012247ff296 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -482,7 +482,8 @@ static inline void blk_zone_update_request_bio(struct request *rq,
+ 	 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
+ 	 * lookup the zone write plug.
+ 	 */
+-	if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
++	if (req_op(rq) == REQ_OP_ZONE_APPEND ||
++	    bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
+ 		bio->bi_iter.bi_sector = rq->__sector;
+ }
+ void blk_zone_write_plug_bio_endio(struct bio *bio);
+diff --git a/block/bounce.c b/block/bounce.c
+index 0d898cd5ec497f..09a9616cf20944 100644
+--- a/block/bounce.c
++++ b/block/bounce.c
+@@ -41,8 +41,6 @@ static void init_bounce_bioset(void)
+ 
+ 	ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ 	BUG_ON(ret);
+-	if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
+-		BUG_ON(1);
+ 
+ 	ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
+ 	BUG_ON(ret);
+diff --git a/block/fops.c b/block/fops.c
+index 43983be5a2b3b1..d4b1d942f2700e 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -721,7 +721,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 			ret = direct_write_fallback(iocb, from, ret,
+ 					blkdev_buffered_write(iocb, from));
+ 	} else {
++		/*
++		 * Take i_rwsem and invalidate_lock to avoid racing with
++		 * set_blocksize changing i_blkbits/folio order and punching
++		 * out the pagecache.
++		 */
++		inode_lock_shared(bd_inode);
+ 		ret = blkdev_buffered_write(iocb, from);
++		inode_unlock_shared(bd_inode);
+ 	}
+ 
+ 	if (ret > 0)
+@@ -732,6 +739,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 
+ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ {
++	struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
+ 	struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
+ 	loff_t size = bdev_nr_bytes(bdev);
+ 	loff_t pos = iocb->ki_pos;
+@@ -768,7 +776,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ 			goto reexpand;
+ 	}
+ 
++	/*
++	 * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
++	 * changing i_blkbits/folio order and punching out the pagecache.
++	 */
++	inode_lock_shared(bd_inode);
+ 	ret = filemap_read(iocb, to, ret);
++	inode_unlock_shared(bd_inode);
+ 
+ reexpand:
+ 	if (unlikely(shorted))
+@@ -811,6 +825,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+ 	if ((start | len) & (bdev_logical_block_size(bdev) - 1))
+ 		return -EINVAL;
+ 
++	inode_lock(inode);
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 
+ 	/*
+@@ -843,6 +858,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+ 
+  fail:
+ 	filemap_invalidate_unlock(inode->i_mapping);
++	inode_unlock(inode);
+ 	return error;
+ }
+ 
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 6554b728bae6aa..919066b4bb49c8 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -141,6 +141,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
+ 	if (err)
+ 		return err;
+ 
++	inode_lock(bdev->bd_mapping->host);
+ 	filemap_invalidate_lock(bdev->bd_mapping);
+ 	err = truncate_bdev_range(bdev, mode, start, start + len - 1);
+ 	if (err)
+@@ -173,6 +174,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
+ 	blk_finish_plug(&plug);
+ fail:
+ 	filemap_invalidate_unlock(bdev->bd_mapping);
++	inode_unlock(bdev->bd_mapping->host);
+ 	return err;
+ }
+ 
+@@ -198,12 +200,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
+ 	    end > bdev_nr_bytes(bdev))
+ 		return -EINVAL;
+ 
++	inode_lock(bdev->bd_mapping->host);
+ 	filemap_invalidate_lock(bdev->bd_mapping);
+ 	err = truncate_bdev_range(bdev, mode, start, end - 1);
+ 	if (!err)
+ 		err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
+ 						GFP_KERNEL);
+ 	filemap_invalidate_unlock(bdev->bd_mapping);
++	inode_unlock(bdev->bd_mapping->host);
+ 	return err;
+ }
+ 
+@@ -235,6 +239,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
+ 		return -EINVAL;
+ 
+ 	/* Invalidate the page cache, including dirty pages */
++	inode_lock(bdev->bd_mapping->host);
+ 	filemap_invalidate_lock(bdev->bd_mapping);
+ 	err = truncate_bdev_range(bdev, mode, start, end);
+ 	if (err)
+@@ -245,6 +250,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
+ 
+ fail:
+ 	filemap_invalidate_unlock(bdev->bd_mapping);
++	inode_unlock(bdev->bd_mapping->host);
+ 	return err;
+ }
+ 
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index bcd9de009a91b6..fe19bf7f15eb9d 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -473,6 +473,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+ 	struct ahash_alg *alg = crypto_ahash_alg(hash);
+ 
+ 	crypto_ahash_set_statesize(hash, alg->halg.statesize);
++	crypto_ahash_set_reqsize(hash, alg->reqsize);
+ 
+ 	if (tfm->__crt_alg->cra_type == &crypto_shash_type)
+ 		return crypto_init_ahash_using_shash(tfm);
+@@ -638,6 +639,9 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
+ 	if (alg->halg.statesize == 0)
+ 		return -EINVAL;
+ 
++	if (alg->reqsize && alg->reqsize < alg->halg.statesize)
++		return -EINVAL;
++
+ 	err = hash_prepare_alg(&alg->halg);
+ 	if (err)
+ 		return err;
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 5498a87249d3e7..e3f1a4852737b0 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock,
+ 		goto out_free_state;
+ 
+ 	err = crypto_ahash_import(&ctx2->req, state);
+-	if (err) {
+-		sock_orphan(sk2);
+-		sock_put(sk2);
+-	}
+ 
+ out_free_state:
+ 	kfree_sensitive(state);
+diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
+index 0631d975bfac11..0abc2d87f04200 100644
+--- a/crypto/lzo-rle.c
++++ b/crypto/lzo-rle.c
+@@ -55,7 +55,7 @@ static int __lzorle_compress(const u8 *src, unsigned int slen,
+ 	size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
+ 	int err;
+ 
+-	err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx);
++	err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
+ 
+ 	if (err != LZO_E_OK)
+ 		return -EINVAL;
+diff --git a/crypto/lzo.c b/crypto/lzo.c
+index ebda132dd22bf5..8338851c7406a3 100644
+--- a/crypto/lzo.c
++++ b/crypto/lzo.c
+@@ -55,7 +55,7 @@ static int __lzo_compress(const u8 *src, unsigned int slen,
+ 	size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
+ 	int err;
+ 
+-	err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
++	err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
+ 
+ 	if (err != LZO_E_OK)
+ 		return -EINVAL;
+diff --git a/crypto/skcipher.c b/crypto/skcipher.c
+index ceed7f33a67ba5..fd3273b519dc0b 100644
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -844,6 +844,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
+ 
+ 	/* Only sync algorithms allowed. */
+ 	mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
++	type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE);
+ 
+ 	tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
+ 
+diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
+index f139c564eadf9f..10e711c96a6706 100644
+--- a/drivers/accel/qaic/qaic_drv.c
++++ b/drivers/accel/qaic/qaic_drv.c
+@@ -432,7 +432,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
+ 	int bars;
+ 	int ret;
+ 
+-	bars = pci_select_bars(pdev, IORESOURCE_MEM);
++	bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
+ 
+ 	/* make sure the device has the expected BARs */
+ 	if (bars != (BIT(0) | BIT(2) | BIT(4))) {
+diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
+index d67f63d93b2abd..89fa569d637021 100644
+--- a/drivers/acpi/Kconfig
++++ b/drivers/acpi/Kconfig
+@@ -443,7 +443,7 @@ config ACPI_SBS
+ 	  the modules will be called sbs and sbshc.
+ 
+ config ACPI_HED
+-	tristate "Hardware Error Device"
++	bool "Hardware Error Device"
+ 	help
+ 	  This driver supports the Hardware Error Device (PNP0C33),
+ 	  which is used to report some hardware errors notified via
+diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
+index 01abf26764b00c..3f5a1840f57330 100644
+--- a/drivers/acpi/acpi_pnp.c
++++ b/drivers/acpi/acpi_pnp.c
+@@ -355,8 +355,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
+  * device represented by it.
+  */
+ static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
++	{"INT3F0D"},
+ 	{"INTC1080"},
+ 	{"INTC1081"},
++	{"INTC1099"},
+ 	{""},
+ };
+ 
+diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
+index 7652515a6be1e3..3499f86c411e3b 100644
+--- a/drivers/acpi/hed.c
++++ b/drivers/acpi/hed.c
+@@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = {
+ 		.remove = acpi_hed_remove,
+ 	},
+ };
+-module_acpi_driver(acpi_hed_driver);
++
++static int __init acpi_hed_driver_init(void)
++{
++	return acpi_bus_register_driver(&acpi_hed_driver);
++}
++subsys_initcall(acpi_hed_driver_init);
+ 
+ MODULE_AUTHOR("Huang Ying");
+ MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
+diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
+index 19b619376d48b9..09020bb8ad15fa 100644
+--- a/drivers/auxdisplay/charlcd.c
++++ b/drivers/auxdisplay/charlcd.c
+@@ -595,18 +595,19 @@ static int charlcd_init(struct charlcd *lcd)
+ 	return 0;
+ }
+ 
+-struct charlcd *charlcd_alloc(void)
++struct charlcd *charlcd_alloc(unsigned int drvdata_size)
+ {
+ 	struct charlcd_priv *priv;
+ 	struct charlcd *lcd;
+ 
+-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
+ 	if (!priv)
+ 		return NULL;
+ 
+ 	priv->esc_seq.len = -1;
+ 
+ 	lcd = &priv->lcd;
++	lcd->drvdata = priv->drvdata;
+ 
+ 	return lcd;
+ }
+diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
+index 4d4287209d04c4..d10b89740bcae7 100644
+--- a/drivers/auxdisplay/charlcd.h
++++ b/drivers/auxdisplay/charlcd.h
+@@ -51,7 +51,7 @@ struct charlcd {
+ 		unsigned long y;
+ 	} addr;
+ 
+-	void *drvdata;
++	void *drvdata;			/* Set by charlcd_alloc() */
+ };
+ 
+ /**
+@@ -95,7 +95,8 @@ struct charlcd_ops {
+ };
+ 
+ void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
+-struct charlcd *charlcd_alloc(void);
++
++struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+ void charlcd_free(struct charlcd *lcd);
+ 
+ int charlcd_register(struct charlcd *lcd);
+diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
+index 41807ce363399d..9428f951c9bf22 100644
+--- a/drivers/auxdisplay/hd44780.c
++++ b/drivers/auxdisplay/hd44780.c
+@@ -226,7 +226,7 @@ static int hd44780_probe(struct platform_device *pdev)
+ 	if (!hdc)
+ 		return -ENOMEM;
+ 
+-	lcd = charlcd_alloc();
++	lcd = charlcd_alloc(0);
+ 	if (!lcd)
+ 		goto fail1;
+ 
+diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
+index 6422be0dfe20e6..0ecf6a9469f24c 100644
+--- a/drivers/auxdisplay/lcd2s.c
++++ b/drivers/auxdisplay/lcd2s.c
+@@ -307,7 +307,7 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
+ 	if (err < 0)
+ 		return err;
+ 
+-	lcd = charlcd_alloc();
++	lcd = charlcd_alloc(0);
+ 	if (!lcd)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
+index 6dc8798d01f98c..4da142692d55f8 100644
+--- a/drivers/auxdisplay/panel.c
++++ b/drivers/auxdisplay/panel.c
+@@ -835,7 +835,7 @@ static void lcd_init(void)
+ 	if (!hdc)
+ 		return;
+ 
+-	charlcd = charlcd_alloc();
++	charlcd = charlcd_alloc(0);
+ 	if (!charlcd) {
+ 		kfree(hdc);
+ 		return;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 6bd44ec2c9b1aa..0843d229b0f765 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -916,7 +916,7 @@ static unsigned int loop_default_blocksize(struct loop_device *lo,
+ 		struct block_device *backing_bdev)
+ {
+ 	/* In case of direct I/O, match underlying block size */
+-	if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev)
++	if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && backing_bdev)
+ 		return bdev_logical_block_size(backing_bdev);
+ 	return SECTOR_SIZE;
+ }
+@@ -969,9 +969,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ 	if (!file)
+ 		return -EBADF;
+ 
+-	if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
+-		return -EINVAL;
+-
+ 	error = loop_check_backing_file(file);
+ 	if (error)
+ 		return error;
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 38b9e485e520d5..a01a547c562f35 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -484,15 +484,17 @@ static wait_queue_head_t ublk_idr_wq;	/* wait until one idr is freed */
+ 
+ static DEFINE_MUTEX(ublk_ctl_mutex);
+ 
++
++#define UBLK_MAX_UBLKS UBLK_MINORS
++
+ /*
+- * Max ublk devices allowed to add
++ * Max unprivileged ublk devices allowed to add
+  *
+  * It can be extended to one per-user limit in future or even controlled
+  * by cgroup.
+  */
+-#define UBLK_MAX_UBLKS UBLK_MINORS
+-static unsigned int ublks_max = 64;
+-static unsigned int ublks_added;	/* protected by ublk_ctl_mutex */
++static unsigned int unprivileged_ublks_max = 64;
++static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
+ 
+ static struct miscdevice ublk_misc;
+ 
+@@ -1879,10 +1881,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
+ 	return -EIOCBQUEUED;
+ 
+  out:
+-	io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ 	pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
+ 			__func__, cmd_op, tag, ret, io->flags);
+-	return -EIOCBQUEUED;
++	return ret;
+ }
+ 
+ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
+@@ -1938,7 +1939,10 @@ static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
+ static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
+ 		unsigned int issue_flags)
+ {
+-	ublk_ch_uring_cmd_local(cmd, issue_flags);
++	int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
++
++	if (ret != -EIOCBQUEUED)
++		io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ }
+ 
+ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+@@ -2203,7 +2207,8 @@ static int ublk_add_chdev(struct ublk_device *ub)
+ 	if (ret)
+ 		goto fail;
+ 
+-	ublks_added++;
++	if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV)
++		unprivileged_ublks_added++;
+ 	return 0;
+  fail:
+ 	put_device(dev);
+@@ -2241,12 +2246,17 @@ static int ublk_add_tag_set(struct ublk_device *ub)
+ 
+ static void ublk_remove(struct ublk_device *ub)
+ {
++	bool unprivileged;
++
+ 	ublk_stop_dev(ub);
+ 	cancel_work_sync(&ub->stop_work);
+ 	cancel_work_sync(&ub->quiesce_work);
+ 	cdev_device_del(&ub->cdev, &ub->cdev_dev);
++	unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
+ 	ublk_put_device(ub);
+-	ublks_added--;
++
++	if (unprivileged)
++		unprivileged_ublks_added--;
+ }
+ 
+ static struct ublk_device *ublk_get_device_from_id(int idx)
+@@ -2495,7 +2505,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+ 		return ret;
+ 
+ 	ret = -EACCES;
+-	if (ublks_added >= ublks_max)
++	if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) &&
++	    unprivileged_ublks_added >= unprivileged_ublks_max)
+ 		goto out_unlock;
+ 
+ 	ret = -ENOMEM;
+@@ -3056,10 +3067,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ 	if (ub)
+ 		ublk_put_device(ub);
+  out:
+-	io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ 	pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
+ 			__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
+-	return -EIOCBQUEUED;
++	return ret;
+ }
+ 
+ static const struct file_operations ublk_ctl_fops = {
+@@ -3123,23 +3133,26 @@ static void __exit ublk_exit(void)
+ module_init(ublk_init);
+ module_exit(ublk_exit);
+ 
+-static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
++static int ublk_set_max_unprivileged_ublks(const char *buf,
++					   const struct kernel_param *kp)
+ {
+ 	return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
+ }
+ 
+-static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
++static int ublk_get_max_unprivileged_ublks(char *buf,
++					   const struct kernel_param *kp)
+ {
+-	return sysfs_emit(buf, "%u\n", ublks_max);
++	return sysfs_emit(buf, "%u\n", unprivileged_ublks_max);
+ }
+ 
+-static const struct kernel_param_ops ublk_max_ublks_ops = {
+-	.set = ublk_set_max_ublks,
+-	.get = ublk_get_max_ublks,
++static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = {
++	.set = ublk_set_max_unprivileged_ublks,
++	.get = ublk_get_max_unprivileged_ublks,
+ };
+ 
+-module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
+-MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
++module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops,
++		&unprivileged_ublks_max, 0644);
++MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
+ 
+ MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
+ MODULE_DESCRIPTION("Userspace block device");
+diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
+index 11d33cd7b08fc0..13dcc0077732b0 100644
+--- a/drivers/bluetooth/btmtksdio.c
++++ b/drivers/bluetooth/btmtksdio.c
+@@ -610,7 +610,8 @@ static void btmtksdio_txrx_work(struct work_struct *work)
+ 	} while (int_status || time_is_before_jiffies(txrx_timeout));
+ 
+ 	/* Enable interrupt */
+-	sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
++	if (bdev->func->irq_handler)
++		sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
+ 
+ 	sdio_release_host(bdev->func);
+ 
+@@ -722,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
+ {
+ 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ 
++	/* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
++	if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
++		return 0;
++
+ 	sdio_claim_host(bdev->func);
+ 
+ 	/* Disable interrupt */
+@@ -1429,11 +1434,15 @@ static void btmtksdio_remove(struct sdio_func *func)
+ 	if (!bdev)
+ 		return;
+ 
++	hdev = bdev->hdev;
++
++	/* Make sure to call btmtksdio_close before removing sdio card */
++	if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
++		btmtksdio_close(hdev);
++
+ 	/* Be consistent the state in btmtksdio_probe */
+ 	pm_runtime_get_noresume(bdev->dev);
+ 
+-	hdev = bdev->hdev;
+-
+ 	sdio_set_drvdata(func, NULL);
+ 	hci_unregister_dev(hdev);
+ 	hci_free_dev(hdev);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 7e1f03231b4c90..af2be0271806f8 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2979,9 +2979,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
+ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+ 	int ret = 0;
++	unsigned int skip = 0;
+ 	u8 pkt_type;
+-	u8 *sk_ptr;
+-	unsigned int sk_len;
+ 	u16 seqno;
+ 	u32 dump_size;
+ 
+@@ -2990,18 +2989,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ 	struct usb_device *udev = btdata->udev;
+ 
+ 	pkt_type = hci_skb_pkt_type(skb);
+-	sk_ptr = skb->data;
+-	sk_len = skb->len;
++	skip = sizeof(struct hci_event_hdr);
++	if (pkt_type == HCI_ACLDATA_PKT)
++		skip += sizeof(struct hci_acl_hdr);
+ 
+-	if (pkt_type == HCI_ACLDATA_PKT) {
+-		sk_ptr += HCI_ACL_HDR_SIZE;
+-		sk_len -= HCI_ACL_HDR_SIZE;
+-	}
+-
+-	sk_ptr += HCI_EVENT_HDR_SIZE;
+-	sk_len -= HCI_EVENT_HDR_SIZE;
++	skb_pull(skb, skip);
++	dump_hdr = (struct qca_dump_hdr *)skb->data;
+ 
+-	dump_hdr = (struct qca_dump_hdr *)sk_ptr;
+ 	seqno = le16_to_cpu(dump_hdr->seqno);
+ 	if (seqno == 0) {
+ 		set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
+@@ -3021,16 +3015,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ 
+ 		btdata->qca_dump.ram_dump_size = dump_size;
+ 		btdata->qca_dump.ram_dump_seqno = 0;
+-		sk_ptr += offsetof(struct qca_dump_hdr, data0);
+-		sk_len -= offsetof(struct qca_dump_hdr, data0);
++
++		skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
+ 
+ 		usb_disable_autosuspend(udev);
+ 		bt_dev_info(hdev, "%s memdump size(%u)\n",
+ 			    (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
+ 			    dump_size);
+ 	} else {
+-		sk_ptr += offsetof(struct qca_dump_hdr, data);
+-		sk_len -= offsetof(struct qca_dump_hdr, data);
++		skb_pull(skb, offsetof(struct qca_dump_hdr, data));
+ 	}
+ 
+ 	if (!btdata->qca_dump.ram_dump_size) {
+@@ -3050,7 +3043,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ 		return ret;
+ 	}
+ 
+-	skb_pull(skb, skb->len - sk_len);
+ 	hci_devcd_append(hdev, skb);
+ 	btdata->qca_dump.ram_dump_seqno++;
+ 	if (seqno == QCA_LAST_SEQUENCE_NUM) {
+@@ -3078,68 +3070,58 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ /* Return: true if the ACL packet is a dump packet, false otherwise. */
+ static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+-	u8 *sk_ptr;
+-	unsigned int sk_len;
+-
+ 	struct hci_event_hdr *event_hdr;
+ 	struct hci_acl_hdr *acl_hdr;
+ 	struct qca_dump_hdr *dump_hdr;
++	struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
++	bool is_dump = false;
+ 
+-	sk_ptr = skb->data;
+-	sk_len = skb->len;
+-
+-	acl_hdr = hci_acl_hdr(skb);
+-	if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
++	if (!clone)
+ 		return false;
+ 
+-	sk_ptr += HCI_ACL_HDR_SIZE;
+-	sk_len -= HCI_ACL_HDR_SIZE;
+-	event_hdr = (struct hci_event_hdr *)sk_ptr;
+-
+-	if ((event_hdr->evt != HCI_VENDOR_PKT) ||
+-	    (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
+-		return false;
++	acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
++	if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
++		goto out;
+ 
+-	sk_ptr += HCI_EVENT_HDR_SIZE;
+-	sk_len -= HCI_EVENT_HDR_SIZE;
++	event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
++	if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
++		goto out;
+ 
+-	dump_hdr = (struct qca_dump_hdr *)sk_ptr;
+-	if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
+-	    (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+-	    (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+-		return false;
++	dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
++	if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
++	   (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
++		goto out;
+ 
+-	return true;
++	is_dump = true;
++out:
++	consume_skb(clone);
++	return is_dump;
+ }
+ 
+ /* Return: true if the event packet is a dump packet, false otherwise. */
+ static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+-	u8 *sk_ptr;
+-	unsigned int sk_len;
+-
+ 	struct hci_event_hdr *event_hdr;
+ 	struct qca_dump_hdr *dump_hdr;
++	struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
++	bool is_dump = false;
+ 
+-	sk_ptr = skb->data;
+-	sk_len = skb->len;
+-
+-	event_hdr = hci_event_hdr(skb);
+-
+-	if ((event_hdr->evt != HCI_VENDOR_PKT)
+-	    || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
++	if (!clone)
+ 		return false;
+ 
+-	sk_ptr += HCI_EVENT_HDR_SIZE;
+-	sk_len -= HCI_EVENT_HDR_SIZE;
++	event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
++	if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
++		goto out;
+ 
+-	dump_hdr = (struct qca_dump_hdr *)sk_ptr;
+-	if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
+-	    (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+-	    (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+-		return false;
++	dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
++	if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
++	   (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
++		goto out;
+ 
+-	return true;
++	is_dump = true;
++out:
++	consume_skb(clone);
++	return is_dump;
+ }
+ 
+ static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
+diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
+index ecea089157301f..cf0b8315404479 100644
+--- a/drivers/char/tpm/tpm2-sessions.c
++++ b/drivers/char/tpm/tpm2-sessions.c
+@@ -974,7 +974,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
+ 	int rc;
+ 
+ 	if (chip->auth) {
+-		dev_warn_once(&chip->dev, "auth session is active\n");
++		dev_dbg_once(&chip->dev, "auth session is active\n");
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
+index 014db638662407..8ddf3a9a53dfd5 100644
+--- a/drivers/clk/clk-s2mps11.c
++++ b/drivers/clk/clk-s2mps11.c
+@@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
++	clk_data->num = S2MPS11_CLKS_NUM;
++
+ 	switch (hwid) {
+ 	case S2MPS11X:
+ 		s2mps11_reg = S2MPS11_REG_RTC_CTRL;
+@@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
+ 		clk_data->hws[i] = &s2mps11_clks[i].hw;
+ 	}
+ 
+-	clk_data->num = S2MPS11_CLKS_NUM;
+ 	of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
+ 			       clk_data);
+ 
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index fb18f507f12135..fe6dac70f1a15b 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -8,6 +8,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
++#include <linux/units.h>
+ #include <linux/of_address.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+@@ -406,11 +407,151 @@ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_
+ static struct clk_hw **hws;
+ static struct clk_hw_onecell_data *clk_hw_data;
+ 
++struct imx8mp_clock_constraints {
++	unsigned int clkid;
++	u32 maxrate;
++};
++
++/*
++ * Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023
++ * Table 13. Maximum frequency of modules.
++ * Probable typos fixed are marked with a comment.
++ */
++static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = {
++	{ IMX8MP_CLK_A53_DIV,             1000 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ENET_AXI,             266666667 }, /* Datasheet claims 266MHz */
++	{ IMX8MP_CLK_NAND_USDHC_BUS,       266666667 }, /* Datasheet claims 266MHz */
++	{ IMX8MP_CLK_MEDIA_APB,            200 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_HDMI_APB,             133333333 }, /* Datasheet claims 133MHz */
++	{ IMX8MP_CLK_ML_AXI,               800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_AHB,                  133333333 },
++	{ IMX8MP_CLK_IPG_ROOT,              66666667 },
++	{ IMX8MP_CLK_AUDIO_AHB,            400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_DISP2_PIX,      170 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_DRAM_ALT,             666666667 },
++	{ IMX8MP_CLK_DRAM_APB,             200 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_CAN1,                  80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_CAN2,                  80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_PCIE_AUX,              10 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_I2C5,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_I2C6,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_SAI1,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_SAI2,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_SAI3,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_SAI5,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_SAI6,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_ENET_QOS,             125 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ENET_QOS_TIMER,       200 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ENET_REF,             125 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ENET_TIMER,           125 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ENET_PHY_REF,         125 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_NAND,                 500 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_QSPI,                 400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_USDHC1,               400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_USDHC2,               400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_I2C1,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_I2C2,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_I2C3,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_I2C4,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_UART1,                 80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_UART2,                 80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_UART3,                 80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_UART4,                 80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ECSPI1,                80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ECSPI2,                80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_PWM1,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_PWM2,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_PWM3,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_PWM4,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_GPT1,                 100 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPT2,                 100 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPT3,                 100 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPT4,                 100 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPT5,                 100 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPT6,                 100 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_WDOG,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_IPP_DO_CLKO1,         200 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_IPP_DO_CLKO2,         200 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_HDMI_REF_266M,        266 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_USDHC3,               400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_MIPI_PHY1_REF,  300 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_DISP1_PIX,      250 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_CAM2_PIX,       277 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_LDB,            595 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ECSPI3,                80 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_PDM,                  200 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_SAI7,                  66666667 }, /* Datasheet claims 66MHz */
++	{ IMX8MP_CLK_MAIN_AXI,             400 * HZ_PER_MHZ },
++	{ /* Sentinel */ }
++};
++
++static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = {
++	{ IMX8MP_CLK_M7_CORE,           600 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ML_CORE,           800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU3D_CORE,        800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU2D_CORE,        800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_AUDIO_AXI_SRC,     600 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_HSIO_AXI,          400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_ISP,         400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_VPU_BUS,           600 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_AXI,         400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_HDMI_AXI,          400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU_AXI,           600 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU_AHB,           300 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_NOC,               800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_NOC_IO,            600 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ML_AHB,            300 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_VPU_G1,            600 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_VPU_G2,            500 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_CAM1_PIX,    400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_VPU_VC8000E,       400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */
++	{ IMX8MP_CLK_DRAM_CORE,         800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GIC,               400 * HZ_PER_MHZ },
++	{ /* Sentinel */ }
++};
++
++static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = {
++	{ IMX8MP_CLK_M7_CORE,            800 * HZ_PER_MHZ},
++	{ IMX8MP_CLK_ML_CORE,           1000 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU3D_CORE,        1000 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU2D_CORE,        1000 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_AUDIO_AXI_SRC,      800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_HSIO_AXI,           500 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_ISP,          500 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_VPU_BUS,            800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_AXI,          500 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_HDMI_AXI,           500 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU_AXI,            800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GPU_AHB,            400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_NOC,               1000 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_NOC_IO,             800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_ML_AHB,             400 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_VPU_G1,             800 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_VPU_G2,             700 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_MEDIA_CAM1_PIX,     500 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_VPU_VC8000E,        500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */
++	{ IMX8MP_CLK_DRAM_CORE,         1000 * HZ_PER_MHZ },
++	{ IMX8MP_CLK_GIC,                500 * HZ_PER_MHZ },
++	{ /* Sentinel */ }
++};
++
++static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[])
++{
++	const struct imx8mp_clock_constraints *constr;
++
++	for (constr = constraints; constr->clkid; constr++)
++		clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate);
++}
++
+ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np;
+ 	void __iomem *anatop_base, *ccm_base;
++	const char *opmode;
+ 	int err;
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
+@@ -715,6 +856,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ 
+ 	imx_check_clk_hws(hws, IMX8MP_CLK_END);
+ 
++	imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints);
++
++	err = of_property_read_string(np, "fsl,operating-mode", &opmode);
++	if (!err) {
++		if (!strcmp(opmode, "nominal"))
++			imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints);
++		else if (!strcmp(opmode, "overdrive"))
++			imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints);
++	}
++
+ 	err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to register hws for i.MX8MP\n");
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 16145f74bbc853..fd605bccf48dc0 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -199,7 +199,7 @@ config IPQ_GCC_4019
+ 
+ config IPQ_GCC_5018
+ 	tristate "IPQ5018 Global Clock Controller"
+-	depends on ARM64 || COMPILE_TEST
++	depends on ARM || ARM64 || COMPILE_TEST
+ 	help
+ 	  Support for global clock controller on ipq5018 devices.
+ 	  Say Y if you want to use peripheral devices such as UART, SPI,
+diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
+index 34d2f17520dcca..450ddbebd35f27 100644
+--- a/drivers/clk/qcom/camcc-sm8250.c
++++ b/drivers/clk/qcom/camcc-sm8250.c
+@@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ 		.parent_data = cam_cc_parent_data_2,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ 		.parent_data = cam_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ 		.parent_data = cam_cc_parent_data_4,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index 10e276dabff93d..e76ecc4663511f 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -670,14 +670,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ 	u32 alpha_width = pll_alpha_width(pll);
+ 
+-	regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
++	if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
++		return 0;
++
++	if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
++		return 0;
+ 
+-	regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ 	if (ctl & PLL_ALPHA_EN) {
+-		regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
++		if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low))
++			return 0;
+ 		if (alpha_width > 32) {
+-			regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+-				    &high);
++			if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
++					&high))
++				return 0;
+ 			a = (u64)high << 32 | low;
+ 		} else {
+ 			a = low & GENMASK(alpha_width - 1, 0);
+@@ -903,8 +908,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ 	u32 l, alpha = 0, ctl, alpha_m, alpha_n;
+ 
+-	regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+-	regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
++	if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
++		return 0;
++
++	if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
++		return 0;
+ 
+ 	if (ctl & PLL_ALPHA_EN) {
+ 		regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
+@@ -1098,8 +1106,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ 	u32 l, frac, alpha_width = pll_alpha_width(pll);
+ 
+-	regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+-	regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac);
++	if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
++		return 0;
++
++	if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac))
++		return 0;
+ 
+ 	return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
+ }
+@@ -1157,7 +1168,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ 	struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ 	u32 ctl;
+ 
+-	regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
++	if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
++		return 0;
+ 
+ 	ctl >>= PLL_POST_DIV_SHIFT;
+ 	ctl &= PLL_POST_DIV_MASK(pll);
+@@ -1373,8 +1385,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
+ 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ 	u32 l, frac, alpha_width = pll_alpha_width(pll);
+ 
+-	regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+-	regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
++	if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
++		return 0;
++
++	if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac))
++		return 0;
+ 
+ 	return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
+ }
+@@ -1524,7 +1539,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ 	struct regmap *regmap = pll->clkr.regmap;
+ 	u32 i, div = 1, val;
+ 
+-	regmap_read(regmap, PLL_USER_CTL(pll), &val);
++	if (regmap_read(regmap, PLL_USER_CTL(pll), &val))
++		return 0;
+ 
+ 	val >>= pll->post_div_shift;
+ 	val &= PLL_POST_DIV_MASK(pll);
+@@ -2451,9 +2467,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
+ 	struct regmap *regmap = pll->clkr.regmap;
+ 	u32 l, frac;
+ 
+-	regmap_read(regmap, PLL_L_VAL(pll), &l);
++	if (regmap_read(regmap, PLL_L_VAL(pll), &l))
++		return 0;
+ 	l &= LUCID_EVO_PLL_L_VAL_MASK;
+-	regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
++
++	if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac))
++		return 0;
+ 
+ 	return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll));
+ }
+@@ -2528,7 +2547,8 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
+ 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ 	u32 l;
+ 
+-	regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
++	if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
++		return 0;
+ 
+ 	return parent_rate * l;
+ }
+diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+index 45e7264770866f..22169da08a51a0 100644
+--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
++++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/clk-provider.h>
+@@ -713,14 +714,24 @@ static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
+ 	[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
+ };
+ 
++static const struct regmap_config lpass_audio_cc_sc7280_reset_regmap_config = {
++	.name = "lpassaudio_cc_reset",
++	.reg_bits = 32,
++	.reg_stride = 4,
++	.val_bits = 32,
++	.fast_io = true,
++	.max_register = 0xc8,
++};
++
+ static const struct qcom_cc_desc lpass_audio_cc_reset_sc7280_desc = {
+-	.config = &lpass_audio_cc_sc7280_regmap_config,
++	.config = &lpass_audio_cc_sc7280_reset_regmap_config,
+ 	.resets = lpass_audio_cc_sc7280_resets,
+ 	.num_resets = ARRAY_SIZE(lpass_audio_cc_sc7280_resets),
+ };
+ 
+ static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
+-	{ .compatible = "qcom,sc7280-lpassaudiocc" },
++	{ .compatible = "qcom,qcm6490-lpassaudiocc", .data = &lpass_audio_cc_reset_sc7280_desc },
++	{ .compatible = "qcom,sc7280-lpassaudiocc", .data = &lpass_audio_cc_sc7280_desc },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table);
+@@ -752,13 +763,17 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+ 	struct regmap *regmap;
+ 	int ret;
+ 
++	desc = device_get_match_data(&pdev->dev);
++
++	if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcm6490-lpassaudiocc"))
++		return qcom_cc_probe_by_index(pdev, 1, desc);
++
+ 	ret = lpass_audio_setup_runtime_pm(pdev);
+ 	if (ret)
+ 		return ret;
+ 
+ 	lpass_audio_cc_sc7280_regmap_config.name = "lpassaudio_cc";
+ 	lpass_audio_cc_sc7280_regmap_config.max_register = 0x2f000;
+-	desc = &lpass_audio_cc_sc7280_desc;
+ 
+ 	regmap = qcom_cc_map(pdev, desc);
+ 	if (IS_ERR(regmap)) {
+@@ -772,7 +787,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+ 	regmap_write(regmap, 0x4, 0x3b);
+ 	regmap_write(regmap, 0x8, 0xff05);
+ 
+-	ret = qcom_cc_really_probe(&pdev->dev, &lpass_audio_cc_sc7280_desc, regmap);
++	ret = qcom_cc_really_probe(&pdev->dev, desc, regmap);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
+ 		goto exit;
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 229f4540b219e3..97d42328fa81ac 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -1549,28 +1549,6 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
+ 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
+ }
+ 
+-static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
+-				const struct of_phandle_args *clkspec)
+-{
+-	const struct rzg2l_cpg_info *info = priv->info;
+-	unsigned int id;
+-	unsigned int i;
+-
+-	if (clkspec->args_count != 2)
+-		return false;
+-
+-	if (clkspec->args[0] != CPG_MOD)
+-		return false;
+-
+-	id = clkspec->args[1] + info->num_total_core_clks;
+-	for (i = 0; i < info->num_no_pm_mod_clks; i++) {
+-		if (info->no_pm_mod_clks[i] == id)
+-			return false;
+-	}
+-
+-	return true;
+-}
+-
+ /**
+  * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
+  * @onecell_data: cell data
+@@ -1595,45 +1573,73 @@ struct rzg2l_cpg_pd {
+ 	u16 id;
+ };
+ 
++static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
++				const struct of_phandle_args *clkspec)
++{
++	if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
++		return false;
++
++	switch (clkspec->args[0]) {
++	case CPG_MOD: {
++		struct rzg2l_cpg_priv *priv = pd->priv;
++		const struct rzg2l_cpg_info *info = priv->info;
++		unsigned int id = clkspec->args[1];
++
++		if (id >= priv->num_mod_clks)
++			return false;
++
++		id += info->num_total_core_clks;
++
++		for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
++			if (info->no_pm_mod_clks[i] == id)
++				return false;
++		}
++
++		return true;
++	}
++
++	case CPG_CORE:
++	default:
++		return false;
++	}
++}
++
+ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
+ {
+ 	struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
+-	struct rzg2l_cpg_priv *priv = pd->priv;
+ 	struct device_node *np = dev->of_node;
+ 	struct of_phandle_args clkspec;
+ 	bool once = true;
+ 	struct clk *clk;
++	unsigned int i;
+ 	int error;
+-	int i = 0;
+-
+-	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
+-					   &clkspec)) {
+-		if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
+-			if (once) {
+-				once = false;
+-				error = pm_clk_create(dev);
+-				if (error) {
+-					of_node_put(clkspec.np);
+-					goto err;
+-				}
+-			}
+-			clk = of_clk_get_from_provider(&clkspec);
++
++	for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
++		if (!rzg2l_cpg_is_pm_clk(pd, &clkspec)) {
+ 			of_node_put(clkspec.np);
+-			if (IS_ERR(clk)) {
+-				error = PTR_ERR(clk);
+-				goto fail_destroy;
+-			}
++			continue;
++		}
+ 
+-			error = pm_clk_add_clk(dev, clk);
++		if (once) {
++			once = false;
++			error = pm_clk_create(dev);
+ 			if (error) {
+-				dev_err(dev, "pm_clk_add_clk failed %d\n",
+-					error);
+-				goto fail_put;
++				of_node_put(clkspec.np);
++				goto err;
+ 			}
+-		} else {
+-			of_node_put(clkspec.np);
+ 		}
+-		i++;
++		clk = of_clk_get_from_provider(&clkspec);
++		of_node_put(clkspec.np);
++		if (IS_ERR(clk)) {
++			error = PTR_ERR(clk);
++			goto fail_destroy;
++		}
++
++		error = pm_clk_add_clk(dev, clk);
++		if (error) {
++			dev_err(dev, "pm_clk_add_clk failed %d\n", error);
++			goto fail_put;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+index 3f095515f54f91..54d2c7f0ed632f 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
++++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+@@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = {
+ 	{ .hw = &pll_periph0_2x_clk.common.hw },
+ 	{ .hw = &pll_audio1_div2_clk.common.hw },
+ };
+-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
+-				       0, 4,	/* M */
+-				       8, 2,	/* P */
+-				       24, 3,	/* mux */
+-				       BIT(31),	/* gate */
+-				       0);
+-
+-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
+-				       0, 4,	/* M */
+-				       8, 2,	/* P */
+-				       24, 3,	/* mux */
+-				       BIT(31),	/* gate */
+-				       0);
++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
++					       mmc0_mmc1_parents, 0x830,
++					       0, 4,		/* M */
++					       8, 2,		/* P */
++					       24, 3,		/* mux */
++					       BIT(31),		/* gate */
++					       2,		/* post-div */
++					       0);
++
++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
++					       mmc0_mmc1_parents, 0x834,
++					       0, 4,		/* M */
++					       8, 2,		/* P */
++					       24, 3,		/* mux */
++					       BIT(31),		/* gate */
++					       2,		/* post-div */
++					       0);
+ 
+ static const struct clk_parent_data mmc2_parents[] = {
+ 	{ .fw_name = "hosc" },
+@@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = {
+ 	{ .hw = &pll_periph0_800M_clk.common.hw },
+ 	{ .hw = &pll_audio1_div2_clk.common.hw },
+ };
+-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
+-				       0, 4,	/* M */
+-				       8, 2,	/* P */
+-				       24, 3,	/* mux */
+-				       BIT(31),	/* gate */
+-				       0);
++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents,
++					       0x838,
++					       0, 4,		/* M */
++					       8, 2,		/* P */
++					       24, 3,		/* mux */
++					       BIT(31),		/* gate */
++					       2,		/* post-div */
++					       0);
+ 
+ static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
+ 			  0x84c, BIT(0), 0);
+diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
+index 6e50f3728fb5f1..7d836a9fb3db34 100644
+--- a/drivers/clk/sunxi-ng/ccu_mp.h
++++ b/drivers/clk/sunxi-ng/ccu_mp.h
+@@ -52,6 +52,28 @@ struct ccu_mp {
+ 		}							\
+ 	}
+ 
++#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \
++						_reg,			\
++						_mshift, _mwidth,	\
++						_pshift, _pwidth,	\
++						_muxshift, _muxwidth,	\
++						_gate, _postdiv, _flags)\
++	struct ccu_mp _struct = {					\
++		.enable	= _gate,					\
++		.m	= _SUNXI_CCU_DIV(_mshift, _mwidth),		\
++		.p	= _SUNXI_CCU_DIV(_pshift, _pwidth),		\
++		.mux	= _SUNXI_CCU_MUX(_muxshift, _muxwidth),		\
++		.fixed_post_div	= _postdiv,				\
++		.common	= {						\
++			.reg		= _reg,				\
++			.features	= CCU_FEATURE_FIXED_POSTDIV,	\
++			.hw.init	= CLK_HW_INIT_PARENTS_DATA(_name, \
++							_parents,	\
++							&ccu_mp_ops,	\
++							_flags),	\
++		}							\
++	}
++
+ #define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg,	\
+ 				   _mshift, _mwidth,			\
+ 				   _pshift, _pwidth,			\
+diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
+index 110347707ff980..8592910710d179 100644
+--- a/drivers/clocksource/mips-gic-timer.c
++++ b/drivers/clocksource/mips-gic-timer.c
+@@ -115,6 +115,9 @@ static void gic_update_frequency(void *data)
+ 
+ static int gic_starting_cpu(unsigned int cpu)
+ {
++	/* Ensure the GIC counter is running */
++	clear_gic_config(GIC_CONFIG_COUNTSTOP);
++
+ 	gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
+ 	return 0;
+ }
+@@ -252,9 +255,6 @@ static int __init gic_clocksource_of_init(struct device_node *node)
+ 			pr_warn("Unable to register clock notifier\n");
+ 	}
+ 
+-	/* And finally start the counter */
+-	clear_gic_config(GIC_CONFIG_COUNTSTOP);
+-
+ 	/*
+ 	 * It's safe to use the MIPS GIC timer as a sched clock source only if
+ 	 * its ticks are stable, which is true on either the platforms with
+diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
+index 48ce50c5f5e68e..4d7cf338824a3b 100644
+--- a/drivers/clocksource/timer-riscv.c
++++ b/drivers/clocksource/timer-riscv.c
+@@ -126,7 +126,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
+ 
+ static int riscv_timer_dying_cpu(unsigned int cpu)
+ {
++	/*
++	 * Stop the timer when the cpu is going to be offline otherwise
++	 * the timer interrupt may be pending while performing power-down.
++	 */
++	riscv_clock_event_stop();
+ 	disable_percpu_irq(riscv_clock_event_irq);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 9db5354fdb0271..7a16d193222866 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -696,7 +696,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+ 		pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ 		return -EOPNOTSUPP;
+ 	}
+-	guard(mutex)(&amd_pstate_driver_lock);
+ 
+ 	ret = amd_pstate_cpu_boost_update(policy, state);
+ 	WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index 78ad3221fe077e..67bac12d4d55b8 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -172,6 +172,7 @@ static const struct of_device_id blocklist[] __initconst = {
+ 	{ .compatible = "qcom,sm8350", },
+ 	{ .compatible = "qcom,sm8450", },
+ 	{ .compatible = "qcom,sm8550", },
++	{ .compatible = "qcom,sm8650", },
+ 
+ 	{ .compatible = "st,stih407", },
+ 	{ .compatible = "st,stih410", },
+diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
+index 7b8fcfa55038bc..4e5b6f9a56d1b2 100644
+--- a/drivers/cpufreq/tegra186-cpufreq.c
++++ b/drivers/cpufreq/tegra186-cpufreq.c
+@@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
+ {
+ 	struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ 	unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
++	u32 cpu;
+ 
+ 	policy->freq_table = data->clusters[cluster].table;
+ 	policy->cpuinfo.transition_latency = 300 * 1000;
+ 	policy->driver_data = NULL;
+ 
++	/* set same policy for all cpus in a cluster */
++	for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
++		if (data->cpus[cpu].bpmp_cluster_id == cluster)
++			cpumask_set_cpu(cpu, policy->cpus);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
+index f3c9d49f0f2a52..97ffadc7e57a64 100644
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -239,8 +239,19 @@ static unsigned int get_typical_interval(struct menu_device *data)
+ 	 * This can deal with workloads that have long pauses interspersed
+ 	 * with sporadic activity with a bunch of short pauses.
+ 	 */
+-	if ((divisor * 4) <= INTERVALS * 3)
++	if (divisor * 4 <= INTERVALS * 3) {
++		/*
++		 * If there are sufficiently many data points still under
++		 * consideration after the outliers have been eliminated,
++		 * returning without a prediction would be a mistake because it
++		 * is likely that the next interval will not exceed the current
++		 * maximum, so return the latter in that case.
++		 */
++		if (divisor >= INTERVALS / 2)
++			return max;
++
+ 		return UINT_MAX;
++	}
+ 
+ 	thresh = max - 1;
+ 	goto again;
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+index 5387c68f3c9df1..42624410703729 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+@@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
+ 				break;
+ 			}
+ 
+-			dev_err(&pdev->dev,
+-				"Request failed with software error code 0x%x\n",
+-				cpt_status->s.uc_compcode);
++			pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n",
++				 cpt_status->s.uc_compcode,
++				 info->req->areq->tfm->__crt_alg->cra_name,
++				 info->req->areq->tfm->__crt_alg->cra_driver_name);
+ 			otx2_cpt_dump_sg_list(pdev, info->req);
+ 			break;
+ 		}
+diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
+index 77a6301f37f0af..29c0c69d5905dc 100644
+--- a/drivers/crypto/mxs-dcp.c
++++ b/drivers/crypto/mxs-dcp.c
+@@ -265,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
+ 		    MXS_DCP_CONTROL0_INTERRUPT |
+ 		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
+ 
+-	if (key_referenced)
+-		/* Set OTP key bit to select the key via KEY_SELECT. */
+-		desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
+-	else
++	if (!key_referenced)
+ 		/* Payload contains the key. */
+ 		desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
++	else if (actx->key[0] == DCP_PAES_KEY_OTP)
++		/* Set OTP key bit to select the key via KEY_SELECT. */
++		desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
+ 
+ 	if (rctx->enc)
+ 		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
+diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
+index 27645606f900b8..4794d58dab5567 100644
+--- a/drivers/dma/fsl-edma-main.c
++++ b/drivers/dma/fsl-edma-main.c
+@@ -56,7 +56,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
+ 
+ 	intr = edma_readl_chreg(fsl_chan, ch_int);
+ 	if (!intr)
+-		return IRQ_HANDLED;
++		return IRQ_NONE;
+ 
+ 	edma_writel_chreg(fsl_chan, 1, ch_int);
+ 
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 57f1bf2ab20be0..22aa2bab3693c4 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -412,6 +412,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
+ 		return -EPERM;
+ 
++	if (current->mm != ctx->mm)
++		return -EPERM;
++
+ 	rc = check_vma(wq, vma, __func__);
+ 	if (rc < 0)
+ 		return rc;
+@@ -478,6 +481,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t
+ 	ssize_t written = 0;
+ 	int i;
+ 
++	if (current->mm != ctx->mm)
++		return -EPERM;
++
+ 	for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
+ 		int rc = idxd_submit_user_descriptor(ctx, udesc + i);
+ 
+@@ -498,6 +504,9 @@ static __poll_t idxd_cdev_poll(struct file *filp,
+ 	struct idxd_device *idxd = wq->idxd;
+ 	__poll_t out = 0;
+ 
++	if (current->mm != ctx->mm)
++		return POLLNVAL;
++
+ 	poll_wait(filp, &wq->err_queue, wait);
+ 	spin_lock(&idxd->dev_lock);
+ 	if (idxd->sw_err.valid)
+diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
+index 1877201d1aa9fe..20bdc52f63a503 100644
+--- a/drivers/dpll/dpll_core.c
++++ b/drivers/dpll/dpll_core.c
+@@ -443,8 +443,11 @@ static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
+ static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
+ 			     struct dpll_pin_properties *dst)
+ {
++	if (WARN_ON(src->freq_supported && !src->freq_supported_num))
++		return -EINVAL;
++
+ 	memcpy(dst, src, sizeof(*dst));
+-	if (src->freq_supported && src->freq_supported_num) {
++	if (src->freq_supported) {
+ 		size_t freq_size = src->freq_supported_num *
+ 				   sizeof(*src->freq_supported);
+ 		dst->freq_supported = kmemdup(src->freq_supported,
+diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
+index 56be8ef40f376b..e3635fba63b493 100644
+--- a/drivers/edac/ie31200_edac.c
++++ b/drivers/edac/ie31200_edac.c
+@@ -405,10 +405,9 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ 	int i, j, ret;
+ 	struct mem_ctl_info *mci = NULL;
+ 	struct edac_mc_layer layers[2];
+-	struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
+ 	void __iomem *window;
+ 	struct ie31200_priv *priv;
+-	u32 addr_decode, mad_offset;
++	u32 addr_decode[IE31200_CHANNELS], mad_offset;
+ 
+ 	/*
+ 	 * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
+@@ -466,19 +465,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ 		mad_offset = IE31200_MAD_DIMM_0_OFFSET;
+ 	}
+ 
+-	/* populate DIMM info */
+ 	for (i = 0; i < IE31200_CHANNELS; i++) {
+-		addr_decode = readl(window + mad_offset +
++		addr_decode[i] = readl(window + mad_offset +
+ 					(i * 4));
+-		edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
+-		for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
+-			populate_dimm_info(&dimm_info[i][j], addr_decode, j,
+-					   skl);
+-			edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
+-				 dimm_info[i][j].size,
+-				 dimm_info[i][j].dual_rank,
+-				 dimm_info[i][j].x16_width);
+-		}
++		edac_dbg(0, "addr_decode: 0x%x\n", addr_decode[i]);
+ 	}
+ 
+ 	/*
+@@ -489,14 +479,22 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ 	 */
+ 	for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
+ 		for (j = 0; j < IE31200_CHANNELS; j++) {
++			struct dimm_data dimm_info;
+ 			struct dimm_info *dimm;
+ 			unsigned long nr_pages;
+ 
+-			nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
++			populate_dimm_info(&dimm_info, addr_decode[j], i,
++					   skl);
++			edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
++				 dimm_info.size,
++				 dimm_info.dual_rank,
++				 dimm_info.x16_width);
++
++			nr_pages = IE31200_PAGES(dimm_info.size, skl);
+ 			if (nr_pages == 0)
+ 				continue;
+ 
+-			if (dimm_info[j][i].dual_rank) {
++			if (dimm_info.dual_rank) {
+ 				nr_pages = nr_pages / 2;
+ 				dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
+ 				dimm->nr_pages = nr_pages;
+diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
+index dfda5ffc14db72..dea3eb741d95db 100644
+--- a/drivers/firmware/arm_ffa/bus.c
++++ b/drivers/firmware/arm_ffa/bus.c
+@@ -212,6 +212,7 @@ ffa_device_register(const struct ffa_partition_info *part_info,
+ 	dev = &ffa_dev->dev;
+ 	dev->bus = &ffa_bus_type;
+ 	dev->release = ffa_release_device;
++	dev->dma_mask = &dev->coherent_dma_mask;
+ 	dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+ 
+ 	ffa_dev->id = id;
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index dce448687e28e7..47751b2c057ae6 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -150,6 +150,14 @@ static int ffa_version_check(u32 *version)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
++		pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
++		       FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
++		       FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
++		       FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
++		return -EINVAL;
++	}
++
+ 	if (ver.a0 < FFA_MIN_VERSION) {
+ 		pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
+ 		       FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
+@@ -1441,6 +1449,10 @@ static int ffa_setup_partitions(void)
+ 
+ 	kfree(pbuf);
+ 
++	/* Check if the host is already added as part of partition info */
++	if (xa_load(&drv_info->partition_info, drv_info->vm_id))
++		return 0;
++
+ 	/* Allocate for the host */
+ 	info = kzalloc(sizeof(*info), GFP_KERNEL);
+ 	if (!info) {
+diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
+index 782c9bec8361c5..73a6ab4a224d73 100644
+--- a/drivers/firmware/arm_scmi/bus.c
++++ b/drivers/firmware/arm_scmi/bus.c
+@@ -42,7 +42,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
+  * This helper let an SCMI driver request specific devices identified by the
+  * @id_table to be created for each active SCMI instance.
+  *
+- * The requested device name MUST NOT be already existent for any protocol;
++ * The requested device name MUST NOT be already existent for this protocol;
+  * at first the freshly requested @id_table is annotated in the IDR table
+  * @scmi_requested_devices and then the requested device is advertised to any
+  * registered party via the @scmi_requested_devices_nh notification chain.
+@@ -52,7 +52,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
+ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+ {
+ 	int ret = 0;
+-	unsigned int id = 0;
+ 	struct list_head *head, *phead = NULL;
+ 	struct scmi_requested_dev *rdev;
+ 
+@@ -67,19 +66,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+ 	}
+ 
+ 	/*
+-	 * Search for the matching protocol rdev list and then search
+-	 * of any existent equally named device...fails if any duplicate found.
++	 * Find the matching protocol rdev list and then search of any
++	 * existent equally named device...fails if any duplicate found.
+ 	 */
+ 	mutex_lock(&scmi_requested_devices_mtx);
+-	idr_for_each_entry(&scmi_requested_devices, head, id) {
+-		if (!phead) {
+-			/* A list found registered in the IDR is never empty */
+-			rdev = list_first_entry(head, struct scmi_requested_dev,
+-						node);
+-			if (rdev->id_table->protocol_id ==
+-			    id_table->protocol_id)
+-				phead = head;
+-		}
++	phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
++	if (phead) {
++		head = phead;
+ 		list_for_each_entry(rdev, head, node) {
+ 			if (!strcmp(rdev->id_table->name, id_table->name)) {
+ 				pr_err("Ignoring duplicate request [%d] %s\n",
+diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
+index add8acf66a9c7c..5578158f13750c 100644
+--- a/drivers/firmware/xilinx/zynqmp.c
++++ b/drivers/firmware/xilinx/zynqmp.c
+@@ -1012,17 +1012,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
+ int zynqmp_pm_fpga_get_config_status(u32 *value)
+ {
+ 	u32 ret_payload[PAYLOAD_ARG_CNT];
+-	u32 buf, lower_addr, upper_addr;
+ 	int ret;
+ 
+ 	if (!value)
+ 		return -EINVAL;
+ 
+-	lower_addr = lower_32_bits((u64)&buf);
+-	upper_addr = upper_32_bits((u64)&buf);
+-
+ 	ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
+-				  XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
++				  XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0,
+ 				  XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
+ 
+ 	*value = ret_payload[1];
+diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
+index 6b091443244530..5af0bd33890c0b 100644
+--- a/drivers/fpga/altera-cvp.c
++++ b/drivers/fpga/altera-cvp.c
+@@ -52,7 +52,7 @@
+ /* V2 Defines */
+ #define VSE_CVP_TX_CREDITS		0x49	/* 8bit */
+ 
+-#define V2_CREDIT_TIMEOUT_US		20000
++#define V2_CREDIT_TIMEOUT_US		40000
+ #define V2_CHECK_CREDIT_US		10
+ #define V2_POLL_TIMEOUT_US		1000000
+ #define V2_USER_TIMEOUT_US		500000
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index f9d1194484423a..581fe1a48f376c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -192,7 +192,7 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
+ #if IS_ENABLED(CONFIG_HSA_AMD)
+ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
+ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
++void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
+ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+ 				unsigned long cur_seq, struct kgd_mem *mem);
+ int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+@@ -212,9 +212,8 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+ }
+ 
+ static inline
+-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
++void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
+ {
+-	return 0;
+ }
+ 
+ static inline
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index fa572ba7f9fc1c..1465b3adacb0af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -370,40 +370,32 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
+ 	return 0;
+ }
+ 
+-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
++/**
++ * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
++ * @bo: the BO where to remove the evictions fences from.
++ *
++ * This functions should only be used on release when all references to the BO
++ * are already dropped. We remove the eviction fence from the private copy of
++ * the dma_resv object here since that is what is used during release to
++ * determine of the BO is idle or not.
++ */
++void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
+ {
+-	struct amdgpu_bo *root = bo;
+-	struct amdgpu_vm_bo_base *vm_bo;
+-	struct amdgpu_vm *vm;
+-	struct amdkfd_process_info *info;
+-	struct amdgpu_amdkfd_fence *ef;
+-	int ret;
+-
+-	/* we can always get vm_bo from root PD bo.*/
+-	while (root->parent)
+-		root = root->parent;
++	struct dma_resv *resv = &bo->tbo.base._resv;
++	struct dma_fence *fence, *stub;
++	struct dma_resv_iter cursor;
+ 
+-	vm_bo = root->vm_bo;
+-	if (!vm_bo)
+-		return 0;
++	dma_resv_assert_held(resv);
+ 
+-	vm = vm_bo->vm;
+-	if (!vm)
+-		return 0;
+-
+-	info = vm->process_info;
+-	if (!info || !info->eviction_fence)
+-		return 0;
+-
+-	ef = container_of(dma_fence_get(&info->eviction_fence->base),
+-			struct amdgpu_amdkfd_fence, base);
+-
+-	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
+-	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
+-	dma_resv_unlock(bo->tbo.base.resv);
++	stub = dma_fence_get_stub();
++	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
++		if (!to_amdgpu_amdkfd_fence(fence))
++			continue;
+ 
+-	dma_fence_put(&ef->base);
+-	return ret;
++		dma_resv_replace_fences(resv, fence->context, stub,
++					DMA_RESV_USAGE_BOOKKEEP);
++	}
++	dma_fence_put(stub);
+ }
+ 
+ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index cb102ee71d04c6..ca0411c9500e7c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -170,6 +170,24 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
+ static DEVICE_ATTR(pcie_replay_count, 0444,
+ 		amdgpu_device_get_pcie_replay_count, NULL);
+ 
++static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
++{
++	int ret = 0;
++
++	if (!amdgpu_sriov_vf(adev))
++		ret = sysfs_create_file(&adev->dev->kobj,
++					&dev_attr_pcie_replay_count.attr);
++
++	return ret;
++}
++
++static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
++{
++	if (!amdgpu_sriov_vf(adev))
++		sysfs_remove_file(&adev->dev->kobj,
++				  &dev_attr_pcie_replay_count.attr);
++}
++
+ static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
+ 					  struct bin_attribute *attr, char *buf,
+ 					  loff_t ppos, size_t count)
+@@ -4030,11 +4048,6 @@ static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
+ }
+ #endif
+ 
+-static const struct attribute *amdgpu_dev_attributes[] = {
+-	&dev_attr_pcie_replay_count.attr,
+-	NULL
+-};
+-
+ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
+ {
+ 	if (amdgpu_mcbp == 1)
+@@ -4477,7 +4490,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 	} else
+ 		adev->ucode_sysfs_en = true;
+ 
+-	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
++	r = amdgpu_device_attr_sysfs_init(adev);
+ 	if (r)
+ 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
+ 
+@@ -4614,7 +4627,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ 		amdgpu_pm_sysfs_fini(adev);
+ 	if (adev->ucode_sysfs_en)
+ 		amdgpu_ucode_sysfs_fini(adev);
+-	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
++	amdgpu_device_attr_sysfs_fini(adev);
+ 	amdgpu_fru_sysfs_fini(adev);
+ 
+ 	amdgpu_reg_state_sysfs_fini(adev);
+@@ -4664,6 +4677,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ 	kfree(adev->fru_info);
+ 	adev->fru_info = NULL;
+ 
++	kfree(adev->xcp_mgr);
++	adev->xcp_mgr = NULL;
++
+ 	px = amdgpu_device_supports_px(adev_to_drm(adev));
+ 
+ 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index ca8091fd3a24f4..018240a2ab96a4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -111,8 +111,7 @@
+ #include "amdgpu_isp.h"
+ #endif
+ 
+-#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
+-MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
++MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+ 
+ #define mmIP_DISCOVERY_VERSION  0x16A00
+ #define mmRCC_CONFIG_MEMSIZE	0xde3
+@@ -295,21 +294,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ 	return ret;
+ }
+ 
+-static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
++static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
++							uint8_t *binary,
++							const char *fw_name)
+ {
+ 	const struct firmware *fw;
+-	const char *fw_name;
+ 	int r;
+ 
+-	switch (amdgpu_discovery) {
+-	case 2:
+-		fw_name = FIRMWARE_IP_DISCOVERY;
+-		break;
+-	default:
+-		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
+-		return -EINVAL;
+-	}
+-
+ 	r = request_firmware(&fw, fw_name, adev->dev);
+ 	if (r) {
+ 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
+@@ -402,10 +393,19 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
+ 	return 0;
+ }
+ 
++static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
++{
++	if (amdgpu_discovery == 2)
++		return "amdgpu/ip_discovery.bin";
++
++	return NULL;
++}
++
+ static int amdgpu_discovery_init(struct amdgpu_device *adev)
+ {
+ 	struct table_info *info;
+ 	struct binary_header *bhdr;
++	const char *fw_name;
+ 	uint16_t offset;
+ 	uint16_t size;
+ 	uint16_t checksum;
+@@ -417,9 +417,10 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
+ 		return -ENOMEM;
+ 
+ 	/* Read from file if it is the preferred option */
+-	if (amdgpu_discovery == 2) {
++	fw_name = amdgpu_discovery_get_fw_name(adev);
++	if (fw_name != NULL) {
+ 		dev_info(adev->dev, "use ip discovery information from file");
+-		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
++		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
+ 
+ 		if (r) {
+ 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+index 2f90fff1b9ddc0..e63a32c2144757 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+@@ -42,6 +42,29 @@
+ #include <linux/dma-fence-array.h>
+ #include <linux/pci-p2pdma.h>
+ 
++static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
++
++/**
++ * dma_buf_attach_adev - Helper to get adev of an attachment
++ *
++ * @attach: attachment
++ *
++ * Returns:
++ * A struct amdgpu_device * if the attaching device is an amdgpu device or
++ * partition, NULL otherwise.
++ */
++static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
++{
++	if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
++		struct drm_gem_object *obj = attach->importer_priv;
++		struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
++
++		return amdgpu_ttm_adev(bo->tbo.bdev);
++	}
++
++	return NULL;
++}
++
+ /**
+  * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
+  *
+@@ -53,11 +76,13 @@
+ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
+ 				 struct dma_buf_attachment *attach)
+ {
++	struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
+ 	struct drm_gem_object *obj = dmabuf->priv;
+ 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ 
+-	if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
++	if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
++	    pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
+ 		attach->peer2peer = false;
+ 
+ 	return 0;
+@@ -456,6 +481,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
+ 	struct drm_gem_object *obj = &bo->tbo.base;
+ 	struct drm_gem_object *gobj;
+ 
++	if (!adev)
++		return false;
++
+ 	if (obj->import_attach) {
+ 		struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 1b479bd8513548..93c3de2d27d3ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -172,6 +172,7 @@ uint amdgpu_sdma_phase_quantum = 32;
+ char *amdgpu_disable_cu;
+ char *amdgpu_virtual_display;
+ bool enforce_isolation;
++int amdgpu_modeset = -1;
+ 
+ /* Specifies the default granularity for SVM, used in buffer
+  * migration and restoration of backing memory when handling
+@@ -1037,6 +1038,13 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
+ module_param(enforce_isolation, bool, 0444);
+ MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+ 
++/**
++ * DOC: modeset (int)
++ * Override nomodeset (1 = override, -1 = auto). The default is -1 (auto).
++ */
++MODULE_PARM_DESC(modeset, "Override nomodeset (1 = enable, -1 = auto)");
++module_param_named(modeset, amdgpu_modeset, int, 0444);
++
+ /**
+  * DOC: seamless (int)
+  * Seamless boot will keep the image on the screen during the boot process.
+@@ -2248,6 +2256,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ 	int ret, retry = 0, i;
+ 	bool supports_atomic = false;
+ 
++	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
++	    (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
++		if (drm_firmware_drivers_only() && amdgpu_modeset == -1)
++			return -EINVAL;
++	}
++
+ 	/* skip devices which are owned by radeon */
+ 	for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
+ 		if (amdgpu_unsupported_pciidlist[i] == pdev->device)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index 508f02eb0cf8f9..7de10208e8dde3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -78,6 +78,9 @@ struct amdgpu_ih_ring {
+ #define amdgpu_ih_ts_after(t1, t2) \
+ 		(((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+ 
++#define amdgpu_ih_ts_after_or_equal(t1, t2) \
++		(((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL)
++
+ /* provided by the ih block */
+ struct amdgpu_ih_funcs {
+ 	/* ring read/write ptr handling, called from interrupt context */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 4c4bdc4f51b294..fc588ef598c094 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1246,28 +1246,36 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
+ 	if (abo->kfd_bo)
+ 		amdgpu_amdkfd_release_notify(abo);
+ 
+-	/* We only remove the fence if the resv has individualized. */
+-	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
+-			&& bo->base.resv != &bo->base._resv);
+-	if (bo->base.resv == &bo->base._resv)
+-		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
++	/*
++	 * We lock the private dma_resv object here and since the BO is about to
++	 * be released nobody else should have a pointer to it.
++	 * So when this locking here fails something is wrong with the reference
++	 * counting.
++	 */
++	if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
++		return;
++
++	amdgpu_amdkfd_remove_all_eviction_fences(abo);
+ 
+ 	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
+ 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+ 	    adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
+-		return;
++		goto out;
+ 
+-	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
+-		return;
++	r = dma_resv_reserve_fences(&bo->base._resv, 1);
++	if (r)
++		goto out;
+ 
+-	r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
+-	if (!WARN_ON(r)) {
+-		amdgpu_vram_mgr_set_cleared(bo->resource);
+-		amdgpu_bo_fence(abo, fence, false);
+-		dma_fence_put(fence);
+-	}
++	r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
++	if (WARN_ON(r))
++		goto out;
++
++	amdgpu_vram_mgr_set_cleared(bo->resource);
++	dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
++	dma_fence_put(fence);
+ 
+-	dma_resv_unlock(bo->base.resv);
++out:
++	dma_resv_unlock(&bo->base._resv);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index d70855d7c61c1d..48e30e5f833891 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -44,7 +44,7 @@
+ #include "amdgpu_securedisplay.h"
+ #include "amdgpu_atomfirmware.h"
+ 
+-#define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*3)
++#define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
+ 
+ static int psp_load_smu_fw(struct psp_context *psp);
+ static int psp_rap_terminate(struct psp_context *psp);
+@@ -531,7 +531,6 @@ static int psp_sw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 	struct psp_context *psp = &adev->psp;
+-	struct psp_gfx_cmd_resp *cmd = psp->cmd;
+ 
+ 	psp_memory_training_fini(psp);
+ 
+@@ -541,8 +540,8 @@ static int psp_sw_fini(void *handle)
+ 	amdgpu_ucode_release(&psp->cap_fw);
+ 	amdgpu_ucode_release(&psp->toc_fw);
+ 
+-	kfree(cmd);
+-	cmd = NULL;
++	kfree(psp->cmd);
++	psp->cmd = NULL;
+ 
+ 	psp_free_shared_bufs(psp);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+index bb7b9b2eaac1a5..8da0bddab3d234 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+@@ -383,6 +383,45 @@ int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+ 	return 0;
+ }
+ 
++static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func,
++				   void *data)
++{
++	uint32_t umc_node_inst;
++	uint32_t node_inst;
++	uint32_t umc_inst;
++	uint32_t ch_inst;
++	int ret;
++
++	/*
++	 * This loop is done based on the following -
++	 * umc.active mask = mask of active umc instances across all nodes
++	 * umc.umc_inst_num = maximum number of umc instancess per node
++	 * umc.node_inst_num = maximum number of node instances
++	 * Channel instances are not assumed to be harvested.
++	 */
++	dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d",
++		adev->umc.active_mask, adev->umc.umc_inst_num);
++	for_each_set_bit(umc_node_inst, &(adev->umc.active_mask),
++			 adev->umc.node_inst_num * adev->umc.umc_inst_num) {
++		node_inst = umc_node_inst / adev->umc.umc_inst_num;
++		umc_inst = umc_node_inst % adev->umc.umc_inst_num;
++		LOOP_UMC_CH_INST(ch_inst) {
++			dev_dbg(adev->dev,
++				"node_inst :%d umc_inst: %d ch_inst: %d",
++				node_inst, umc_inst, ch_inst);
++			ret = func(adev, node_inst, umc_inst, ch_inst, data);
++			if (ret) {
++				dev_err(adev->dev,
++					"Node %d umc %d ch %d func returns %d\n",
++					node_inst, umc_inst, ch_inst, ret);
++				return ret;
++			}
++		}
++	}
++
++	return 0;
++}
++
+ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
+ 			umc_func func, void *data)
+ {
+@@ -391,6 +430,9 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
+ 	uint32_t ch_inst         = 0;
+ 	int ret = 0;
+ 
++	if (adev->aid_mask)
++		return amdgpu_umc_loop_all_aid(adev, func, data);
++
+ 	if (adev->umc.node_inst_num) {
+ 		LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
+ 			ret = func(adev, node_inst, umc_inst, ch_inst, data);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 0357fea8ae1dff..1f06b22dbe7c63 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -63,6 +63,23 @@
+ #define regPC_CONFIG_CNTL_1		0x194d
+ #define regPC_CONFIG_CNTL_1_BASE_IDX	1
+ 
++#define regCP_GFX_MQD_CONTROL_DEFAULT                                             0x00000100
++#define regCP_GFX_HQD_VMID_DEFAULT                                                0x00000000
++#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT                                      0x00000000
++#define regCP_GFX_HQD_QUANTUM_DEFAULT                                             0x00000a01
++#define regCP_GFX_HQD_CNTL_DEFAULT                                                0x00a00000
++#define regCP_RB_DOORBELL_CONTROL_DEFAULT                                         0x00000000
++#define regCP_GFX_HQD_RPTR_DEFAULT                                                0x00000000
++
++#define regCP_HQD_EOP_CONTROL_DEFAULT                                             0x00000006
++#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
++#define regCP_MQD_CONTROL_DEFAULT                                                 0x00000100
++#define regCP_HQD_PQ_CONTROL_DEFAULT                                              0x00308509
++#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
++#define regCP_HQD_PQ_RPTR_DEFAULT                                                 0x00000000
++#define regCP_HQD_PERSISTENT_STATE_DEFAULT                                        0x0be05501
++#define regCP_HQD_IB_CONTROL_DEFAULT                                              0x00300000
++
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
+@@ -3896,7 +3913,7 @@ static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
+ 	if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ 		priority = 1;
+ 
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
++	tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
+ 	mqd->cp_gfx_hqd_queue_priority = tmp;
+ }
+@@ -3918,14 +3935,14 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
+ 
+ 	/* set up mqd control */
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
++	tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
+ 	mqd->cp_gfx_mqd_control = tmp;
+ 
+ 	/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
++	tmp = regCP_GFX_HQD_VMID_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
+ 	mqd->cp_gfx_hqd_vmid = 0;
+ 
+@@ -3933,7 +3950,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 	gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
+ 
+ 	/* set up time quantum */
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
++	tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
+ 	mqd->cp_gfx_hqd_quantum = tmp;
+ 
+@@ -3955,7 +3972,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 
+ 	/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
+ 	rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
++	tmp = regCP_GFX_HQD_CNTL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
+ #ifdef __BIG_ENDIAN
+@@ -3964,7 +3981,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_gfx_hqd_cntl = tmp;
+ 
+ 	/* set up cp_doorbell_control */
+-	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
++	tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
+ 	if (prop->use_doorbell) {
+ 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
+ 				    DOORBELL_OFFSET, prop->doorbell_index);
+@@ -3976,7 +3993,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_rb_doorbell_control = tmp;
+ 
+ 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+-	mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
++	mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
+ 
+ 	/* active the queue */
+ 	mqd->cp_gfx_hqd_active = 1;
+@@ -4062,14 +4079,14 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
+ 
+ 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
++	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+ 			(order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
+ 
+ 	mqd->cp_hqd_eop_control = tmp;
+ 
+ 	/* enable doorbell? */
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
++	tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
+ 
+ 	if (prop->use_doorbell) {
+ 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+@@ -4098,7 +4115,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
+ 
+ 	/* set MQD vmid to 0 */
+-	tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
++	tmp = regCP_MQD_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
+ 	mqd->cp_mqd_control = tmp;
+ 
+@@ -4108,7 +4125,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+ 
+ 	/* set up the HQD, this is similar to CP_RB0_CNTL */
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
++	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
+ 			    (order_base_2(prop->queue_size / 4) - 1));
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+@@ -4134,7 +4151,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	tmp = 0;
+ 	/* enable the doorbell if requested */
+ 	if (prop->use_doorbell) {
+-		tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
++		tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
+ 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ 				DOORBELL_OFFSET, prop->doorbell_index);
+ 
+@@ -4149,17 +4166,17 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_hqd_pq_doorbell_control = tmp;
+ 
+ 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+-	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
++	mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
+ 
+ 	/* set the vmid for the queue */
+ 	mqd->cp_hqd_vmid = 0;
+ 
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
++	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
+ 	mqd->cp_hqd_persistent_state = tmp;
+ 
+ 	/* set MIN_IB_AVAIL_SIZE */
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
++	tmp = regCP_HQD_IB_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
+ 	mqd->cp_hqd_ib_control = tmp;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+index 241619ee10e4be..adcfcf594286fe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+@@ -52,6 +52,24 @@
+ 
+ #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
+ 
++#define regCP_GFX_MQD_CONTROL_DEFAULT                                             0x00000100
++#define regCP_GFX_HQD_VMID_DEFAULT                                                0x00000000
++#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT                                      0x00000000
++#define regCP_GFX_HQD_QUANTUM_DEFAULT                                             0x00000a01
++#define regCP_GFX_HQD_CNTL_DEFAULT                                                0x00f00000
++#define regCP_RB_DOORBELL_CONTROL_DEFAULT                                         0x00000000
++#define regCP_GFX_HQD_RPTR_DEFAULT                                                0x00000000
++
++#define regCP_HQD_EOP_CONTROL_DEFAULT                                             0x00000006
++#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
++#define regCP_MQD_CONTROL_DEFAULT                                                 0x00000100
++#define regCP_HQD_PQ_CONTROL_DEFAULT                                              0x00308509
++#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
++#define regCP_HQD_PQ_RPTR_DEFAULT                                                 0x00000000
++#define regCP_HQD_PERSISTENT_STATE_DEFAULT                                        0x0be05501
++#define regCP_HQD_IB_CONTROL_DEFAULT                                              0x00300000
++
++
+ MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin");
+@@ -2851,25 +2869,25 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
+ 
+ 	/* set up mqd control */
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
++	tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
+ 	mqd->cp_gfx_mqd_control = tmp;
+ 
+ 	/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
++	tmp = regCP_GFX_HQD_VMID_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
+ 	mqd->cp_gfx_hqd_vmid = 0;
+ 
+ 	/* set up default queue priority level
+ 	 * 0x0 = low priority, 0x1 = high priority */
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
++	tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
+ 	mqd->cp_gfx_hqd_queue_priority = tmp;
+ 
+ 	/* set up time quantum */
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
++	tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
+ 	mqd->cp_gfx_hqd_quantum = tmp;
+ 
+@@ -2891,7 +2909,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 
+ 	/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
+ 	rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
+-	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
++	tmp = regCP_GFX_HQD_CNTL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
+ 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
+ #ifdef __BIG_ENDIAN
+@@ -2900,7 +2918,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_gfx_hqd_cntl = tmp;
+ 
+ 	/* set up cp_doorbell_control */
+-	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
++	tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
+ 	if (prop->use_doorbell) {
+ 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
+ 				    DOORBELL_OFFSET, prop->doorbell_index);
+@@ -2912,7 +2930,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_rb_doorbell_control = tmp;
+ 
+ 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+-	mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
++	mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
+ 
+ 	/* active the queue */
+ 	mqd->cp_gfx_hqd_active = 1;
+@@ -3007,14 +3025,14 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
+ 
+ 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
++	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+ 			(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
+ 
+ 	mqd->cp_hqd_eop_control = tmp;
+ 
+ 	/* enable doorbell? */
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
++	tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
+ 
+ 	if (prop->use_doorbell) {
+ 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+@@ -3043,7 +3061,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
+ 
+ 	/* set MQD vmid to 0 */
+-	tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
++	tmp = regCP_MQD_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
+ 	mqd->cp_mqd_control = tmp;
+ 
+@@ -3053,7 +3071,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+ 
+ 	/* set up the HQD, this is similar to CP_RB0_CNTL */
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
++	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
+ 			    (order_base_2(prop->queue_size / 4) - 1));
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+@@ -3078,7 +3096,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	tmp = 0;
+ 	/* enable the doorbell if requested */
+ 	if (prop->use_doorbell) {
+-		tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
++		tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
+ 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ 				DOORBELL_OFFSET, prop->doorbell_index);
+ 
+@@ -3093,17 +3111,17 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ 	mqd->cp_hqd_pq_doorbell_control = tmp;
+ 
+ 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+-	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
++	mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
+ 
+ 	/* set the vmid for the queue */
+ 	mqd->cp_hqd_vmid = 0;
+ 
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
++	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
+ 	mqd->cp_hqd_persistent_state = tmp;
+ 
+ 	/* set MIN_IB_AVAIL_SIZE */
+-	tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
++	tmp = regCP_HQD_IB_CONTROL_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
+ 	mqd->cp_hqd_ib_control = tmp;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+index 0e3ddea7b8e0f8..a7bfc9f41d0e39 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+@@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
+ {
+ 	uint64_t value;
+ 
+-	/* Program the AGP BAR */
+-	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
+-	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+-	WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+-
+ 	if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
++		/* Program the AGP BAR */
++		WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
++		WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
++		WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
++
+ 		/* Program the system aperture low logical page number. */
+ 		WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ 			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 9a212413c6d3a6..78c527b56f7c52 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1461,7 +1461,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
+ 		adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
+ 		adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
+ 		adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
+-		adev->umc.active_mask = adev->aid_mask;
+ 		adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
+ 		if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
+ 			adev->umc.ras = &umc_v12_0_ras;
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 7a773fcd7752c2..49113df8baefd9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -690,7 +690,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
+ 
+ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
+ {
+-	int size = 128 * PAGE_SIZE;
++	int size = 128 * AMDGPU_GPU_PAGE_SIZE;
+ 	int ret = 0;
+ 	struct amdgpu_device *adev = mes->adev;
+ 	union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+index 9689e2b5d4e518..2adee2b94c37d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+@@ -172,6 +172,30 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
+ 	WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
+ 
++/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
++static void mmhub_v1_7_init_snoop_override_regs(struct amdgpu_device *adev)
++{
++	uint32_t tmp;
++	int i;
++	uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
++			    regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
++
++	for (i = 0; i < 5; i++) { /* DAGB instances */
++		tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
++			regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance);
++		tmp |= (1 << 15); /* SDMA client is BIT15 */
++		WREG32_SOC15_OFFSET(MMHUB, 0,
++			regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance, tmp);
++
++		tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
++			regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance);
++		tmp |= (1 << 15);
++		WREG32_SOC15_OFFSET(MMHUB, 0,
++			regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance, tmp);
++	}
++
++}
++
+ static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev)
+ {
+ 	uint32_t tmp;
+@@ -337,6 +361,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
+ 	mmhub_v1_7_init_system_aperture_regs(adev);
+ 	mmhub_v1_7_init_tlb_regs(adev);
+ 	mmhub_v1_7_init_cache_regs(adev);
++	mmhub_v1_7_init_snoop_override_regs(adev);
+ 
+ 	mmhub_v1_7_enable_system_domain(adev);
+ 	mmhub_v1_7_disable_identity_aperture(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+index b01bb759d0f4f4..2276c644a69741 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+@@ -214,6 +214,32 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
+ 	}
+ }
+ 
++/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
++static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
++{
++	uint32_t tmp, inst_mask;
++	int i, j;
++	uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
++			    regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
++
++	inst_mask = adev->aid_mask;
++	for_each_inst(i, inst_mask) {
++		for (j = 0; j < 5; j++) { /* DAGB instances */
++			tmp = RREG32_SOC15_OFFSET(MMHUB, i,
++				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance);
++			tmp |= (1 << 15); /* SDMA client is BIT15 */
++			WREG32_SOC15_OFFSET(MMHUB, i,
++				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp);
++
++			tmp = RREG32_SOC15_OFFSET(MMHUB, i,
++				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance);
++			tmp |= (1 << 15);
++			WREG32_SOC15_OFFSET(MMHUB, i,
++				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp);
++		}
++	}
++}
++
+ static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
+ {
+ 	uint32_t tmp, inst_mask;
+@@ -419,6 +445,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
+ 	mmhub_v1_8_init_system_aperture_regs(adev);
+ 	mmhub_v1_8_init_tlb_regs(adev);
+ 	mmhub_v1_8_init_cache_regs(adev);
++	mmhub_v1_8_init_snoop_override_regs(adev);
+ 
+ 	mmhub_v1_8_enable_system_domain(adev);
+ 	mmhub_v1_8_disable_identity_aperture(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+index ff1b58e446892a..fe0710b55c3ac7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+@@ -198,6 +198,36 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
+ 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+ }
+ 
++/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
++static void mmhub_v9_4_init_snoop_override_regs(struct amdgpu_device *adev, int hubid)
++{
++	uint32_t tmp;
++	int i;
++	uint32_t distance = mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
++			    mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
++	uint32_t huboffset = hubid * MMHUB_INSTANCE_REGISTER_OFFSET;
++
++	for (i = 0; i < 5 - (2 * hubid); i++) {
++		/* DAGB instances 0 to 4 are in hub0 and 5 to 7 are in hub1 */
++		tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
++			mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
++			huboffset + i * distance);
++		tmp |= (1 << 15); /* SDMA client is BIT15 */
++		WREG32_SOC15_OFFSET(MMHUB, 0,
++			mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
++			huboffset + i * distance, tmp);
++
++		tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
++			mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
++			huboffset + i * distance);
++		tmp |= (1 << 15);
++		WREG32_SOC15_OFFSET(MMHUB, 0,
++			mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
++			huboffset + i * distance, tmp);
++	}
++
++}
++
+ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
+ {
+ 	uint32_t tmp;
+@@ -392,6 +422,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
+ 		if (!amdgpu_sriov_vf(adev))
+ 			mmhub_v9_4_init_cache_regs(adev, i);
+ 
++		mmhub_v9_4_init_snoop_override_regs(adev, i);
+ 		mmhub_v9_4_enable_system_domain(adev, i);
+ 		if (!amdgpu_sriov_vf(adev))
+ 			mmhub_v9_4_disable_identity_aperture(adev, i);
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 4f94a119d62754..ab0eecbab41257 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -141,23 +141,23 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
+ };
+ 
+ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+ };
+ 
+ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index bba35880badb9f..04a1b2a46368f3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -117,23 +117,17 @@ static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
+ };
+ 
+ static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+ };
+ 
+ static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 951b87e7e3f68e..6a58dd8d2130c9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -2453,14 +2453,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ 	return retval;
+ }
+ 
+-/*
+- * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+- * stay in user mode.
+- */
+-#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+-/* APE1 limit is inclusive and 64K aligned. */
+-#define APE1_LIMIT_ALIGNMENT 0xFFFF
+-
+ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+ 				   struct qcm_process_device *qpd,
+ 				   enum cache_policy default_policy,
+@@ -2475,34 +2467,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+ 
+ 	dqm_lock(dqm);
+ 
+-	if (alternate_aperture_size == 0) {
+-		/* base > limit disables APE1 */
+-		qpd->sh_mem_ape1_base = 1;
+-		qpd->sh_mem_ape1_limit = 0;
+-	} else {
+-		/*
+-		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+-		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
+-		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+-		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+-		 * Verify that the base and size parameters can be
+-		 * represented in this format and convert them.
+-		 * Additionally restrict APE1 to user-mode addresses.
+-		 */
+-
+-		uint64_t base = (uintptr_t)alternate_aperture_base;
+-		uint64_t limit = base + alternate_aperture_size - 1;
+-
+-		if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+-		   (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+-			retval = false;
+-			goto out;
+-		}
+-
+-		qpd->sh_mem_ape1_base = base >> 16;
+-		qpd->sh_mem_ape1_limit = limit >> 16;
+-	}
+-
+ 	retval = dqm->asic_ops.set_cache_memory_policy(
+ 			dqm,
+ 			qpd,
+@@ -2511,6 +2475,9 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+ 			alternate_aperture_base,
+ 			alternate_aperture_size);
+ 
++	if (retval)
++		goto out;
++
+ 	if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
+ 		program_sh_mem_settings(dqm, qpd);
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+index d4d95c7f2e5d40..32bedef912b3b2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+@@ -27,6 +27,14 @@
+ #include "oss/oss_2_4_sh_mask.h"
+ #include "gca/gfx_7_2_sh_mask.h"
+ 
++/*
++ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
++ * stay in user mode.
++ */
++#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
++/* APE1 limit is inclusive and 64K aligned. */
++#define APE1_LIMIT_ALIGNMENT 0xFFFF
++
+ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
+ 				   struct qcm_process_device *qpd,
+ 				   enum cache_policy default_policy,
+@@ -84,6 +92,36 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
+ {
+ 	uint32_t default_mtype;
+ 	uint32_t ape1_mtype;
++	unsigned int temp;
++	bool retval = true;
++
++	if (alternate_aperture_size == 0) {
++		/* base > limit disables APE1 */
++		qpd->sh_mem_ape1_base = 1;
++		qpd->sh_mem_ape1_limit = 0;
++	} else {
++		/*
++		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
++		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
++		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
++		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
++		 * Verify that the base and size parameters can be
++		 * represented in this format and convert them.
++		 * Additionally restrict APE1 to user-mode addresses.
++		 */
++
++		uint64_t base = (uintptr_t)alternate_aperture_base;
++		uint64_t limit = base + alternate_aperture_size - 1;
++
++		if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
++		   (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
++			retval = false;
++			goto out;
++		}
++
++		qpd->sh_mem_ape1_base = base >> 16;
++		qpd->sh_mem_ape1_limit = limit >> 16;
++	}
+ 
+ 	default_mtype = (default_policy == cache_policy_coherent) ?
+ 			MTYPE_NONCACHED :
+@@ -97,37 +135,22 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
+ 			| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
+ 			| DEFAULT_MTYPE(default_mtype)
+ 			| APE1_MTYPE(ape1_mtype);
+-
+-	return true;
+-}
+-
+-static int update_qpd_cik(struct device_queue_manager *dqm,
+-			  struct qcm_process_device *qpd)
+-{
+-	struct kfd_process_device *pdd;
+-	unsigned int temp;
+-
+-	pdd = qpd_to_pdd(qpd);
+-
+-	/* check if sh_mem_config register already configured */
+-	if (qpd->sh_mem_config == 0) {
+-		qpd->sh_mem_config =
+-			ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
+-			DEFAULT_MTYPE(MTYPE_NONCACHED) |
+-			APE1_MTYPE(MTYPE_NONCACHED);
+-		qpd->sh_mem_ape1_limit = 0;
+-		qpd->sh_mem_ape1_base = 0;
+-	}
+-
+ 	/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
+ 	 * aperture addresses.
+ 	 */
+-	temp = get_sh_mem_bases_nybble_64(pdd);
++	temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
+ 	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ 
+ 	pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ 		qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+ 
++out:
++	return retval;
++}
++
++static int update_qpd_cik(struct device_queue_manager *dqm,
++			  struct qcm_process_device *qpd)
++{
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+index 245a90dfc2f6b3..b5f5f141353b5f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+@@ -31,10 +31,17 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
+ 			 struct qcm_process_device *qpd);
+ static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
+ 			    struct qcm_process_device *qpd);
++static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
++				   struct qcm_process_device *qpd,
++				   enum cache_policy default_policy,
++				   enum cache_policy alternate_policy,
++				   void __user *alternate_aperture_base,
++				   uint64_t alternate_aperture_size);
+ 
+ void device_queue_manager_init_v10(
+ 	struct device_queue_manager_asic_ops *asic_ops)
+ {
++	asic_ops->set_cache_memory_policy = set_cache_memory_policy_v10;
+ 	asic_ops->update_qpd = update_qpd_v10;
+ 	asic_ops->init_sdma_vm = init_sdma_vm_v10;
+ 	asic_ops->mqd_manager_init = mqd_manager_init_v10;
+@@ -49,27 +56,27 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
+ 		private_base;
+ }
+ 
+-static int update_qpd_v10(struct device_queue_manager *dqm,
+-			 struct qcm_process_device *qpd)
++static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
++				   struct qcm_process_device *qpd,
++				   enum cache_policy default_policy,
++				   enum cache_policy alternate_policy,
++				   void __user *alternate_aperture_base,
++				   uint64_t alternate_aperture_size)
+ {
+-	struct kfd_process_device *pdd;
+-
+-	pdd = qpd_to_pdd(qpd);
+-
+-	/* check if sh_mem_config register already configured */
+-	if (qpd->sh_mem_config == 0) {
+-		qpd->sh_mem_config =
+-			(SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+-				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+-			(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+-		qpd->sh_mem_ape1_limit = 0;
+-		qpd->sh_mem_ape1_base = 0;
+-	}
+-
+-	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
++	qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
++			      SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
++			      (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
++	qpd->sh_mem_ape1_limit = 0;
++	qpd->sh_mem_ape1_base = 0;
++	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
+ 
+ 	pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
++	return true;
++}
+ 
++static int update_qpd_v10(struct device_queue_manager *dqm,
++			 struct qcm_process_device *qpd)
++{
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
+index 2e129da7acb43a..f436878d0d6218 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
+@@ -30,10 +30,17 @@ static int update_qpd_v11(struct device_queue_manager *dqm,
+ 			 struct qcm_process_device *qpd);
+ static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
+ 			    struct qcm_process_device *qpd);
++static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
++				   struct qcm_process_device *qpd,
++				   enum cache_policy default_policy,
++				   enum cache_policy alternate_policy,
++				   void __user *alternate_aperture_base,
++				   uint64_t alternate_aperture_size);
+ 
+ void device_queue_manager_init_v11(
+ 	struct device_queue_manager_asic_ops *asic_ops)
+ {
++	asic_ops->set_cache_memory_policy = set_cache_memory_policy_v11;
+ 	asic_ops->update_qpd = update_qpd_v11;
+ 	asic_ops->init_sdma_vm = init_sdma_vm_v11;
+ 	asic_ops->mqd_manager_init = mqd_manager_init_v11;
+@@ -48,28 +55,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
+ 		private_base;
+ }
+ 
+-static int update_qpd_v11(struct device_queue_manager *dqm,
+-			 struct qcm_process_device *qpd)
++static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
++				   struct qcm_process_device *qpd,
++				   enum cache_policy default_policy,
++				   enum cache_policy alternate_policy,
++				   void __user *alternate_aperture_base,
++				   uint64_t alternate_aperture_size)
+ {
+-	struct kfd_process_device *pdd;
+-
+-	pdd = qpd_to_pdd(qpd);
+-
+-	/* check if sh_mem_config register already configured */
+-	if (qpd->sh_mem_config == 0) {
+-		qpd->sh_mem_config =
+-			(SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+-				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+-			(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+-
+-		qpd->sh_mem_ape1_limit = 0;
+-		qpd->sh_mem_ape1_base = 0;
+-	}
++	qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
++			      SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
++			      (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+ 
+-	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
++	qpd->sh_mem_ape1_limit = 0;
++	qpd->sh_mem_ape1_base = 0;
++	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
+ 
+ 	pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
++	return true;
++}
+ 
++static int update_qpd_v11(struct device_queue_manager *dqm,
++			 struct qcm_process_device *qpd)
++{
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
+index 4f3295b29dfb1b..62ca1c8fcbaf9a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
+@@ -30,10 +30,17 @@ static int update_qpd_v12(struct device_queue_manager *dqm,
+ 			 struct qcm_process_device *qpd);
+ static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q,
+ 			    struct qcm_process_device *qpd);
++static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
++				   struct qcm_process_device *qpd,
++				   enum cache_policy default_policy,
++				   enum cache_policy alternate_policy,
++				   void __user *alternate_aperture_base,
++				   uint64_t alternate_aperture_size);
+ 
+ void device_queue_manager_init_v12(
+ 	struct device_queue_manager_asic_ops *asic_ops)
+ {
++	asic_ops->set_cache_memory_policy = set_cache_memory_policy_v12;
+ 	asic_ops->update_qpd = update_qpd_v12;
+ 	asic_ops->init_sdma_vm = init_sdma_vm_v12;
+ 	asic_ops->mqd_manager_init = mqd_manager_init_v12;
+@@ -48,28 +55,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
+ 		private_base;
+ }
+ 
+-static int update_qpd_v12(struct device_queue_manager *dqm,
+-			 struct qcm_process_device *qpd)
++static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
++				   struct qcm_process_device *qpd,
++				   enum cache_policy default_policy,
++				   enum cache_policy alternate_policy,
++				   void __user *alternate_aperture_base,
++				   uint64_t alternate_aperture_size)
+ {
+-	struct kfd_process_device *pdd;
+-
+-	pdd = qpd_to_pdd(qpd);
+-
+-	/* check if sh_mem_config register already configured */
+-	if (qpd->sh_mem_config == 0) {
+-		qpd->sh_mem_config =
+-			(SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+-				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+-			(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+-
+-		qpd->sh_mem_ape1_limit = 0;
+-		qpd->sh_mem_ape1_base = 0;
+-	}
++	qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
++			      SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
++			      (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+ 
+-	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
++	qpd->sh_mem_ape1_limit = 0;
++	qpd->sh_mem_ape1_base = 0;
++	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
+ 
+ 	pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
++	return true;
++}
+ 
++static int update_qpd_v12(struct device_queue_manager *dqm,
++			 struct qcm_process_device *qpd)
++{
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+index 210bcc048f4c51..d85eadaa1e11bd 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+@@ -30,10 +30,17 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
+ 			 struct qcm_process_device *qpd);
+ static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
+ 			    struct qcm_process_device *qpd);
++static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
++				   struct qcm_process_device *qpd,
++				   enum cache_policy default_policy,
++				   enum cache_policy alternate_policy,
++				   void __user *alternate_aperture_base,
++				   uint64_t alternate_aperture_size);
+ 
+ void device_queue_manager_init_v9(
+ 	struct device_queue_manager_asic_ops *asic_ops)
+ {
++	asic_ops->set_cache_memory_policy = set_cache_memory_policy_v9;
+ 	asic_ops->update_qpd = update_qpd_v9;
+ 	asic_ops->init_sdma_vm = init_sdma_vm_v9;
+ 	asic_ops->mqd_manager_init = mqd_manager_init_v9;
+@@ -48,10 +55,36 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
+ 		private_base;
+ }
+ 
++static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
++				   struct qcm_process_device *qpd,
++				   enum cache_policy default_policy,
++				   enum cache_policy alternate_policy,
++				   void __user *alternate_aperture_base,
++				   uint64_t alternate_aperture_size)
++{
++	qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
++				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
++
++	if (dqm->dev->kfd->noretry)
++		qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
++
++	if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
++		KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
++		qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
++
++	qpd->sh_mem_ape1_limit = 0;
++	qpd->sh_mem_ape1_base = 0;
++	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
++
++	pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
++		 qpd->sh_mem_config);
++	return true;
++}
++
+ static int update_qpd_v9(struct device_queue_manager *dqm,
+ 			 struct qcm_process_device *qpd)
+ {
+-	struct kfd_process_device *pdd;
++	struct kfd_process_device *pdd = qpd_to_pdd(qpd);
+ 
+ 	pdd = qpd_to_pdd(qpd);
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+index b291ee0fab9439..320518f418903d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+@@ -27,6 +27,14 @@
+ #include "gca/gfx_8_0_sh_mask.h"
+ #include "oss/oss_3_0_sh_mask.h"
+ 
++/*
++ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
++ * stay in user mode.
++ */
++#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
++/* APE1 limit is inclusive and 64K aligned. */
++#define APE1_LIMIT_ALIGNMENT 0xFFFF
++
+ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+ 				       struct qcm_process_device *qpd,
+ 				       enum cache_policy default_policy,
+@@ -85,6 +93,36 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+ {
+ 	uint32_t default_mtype;
+ 	uint32_t ape1_mtype;
++	unsigned int temp;
++	bool retval = true;
++
++	if (alternate_aperture_size == 0) {
++		/* base > limit disables APE1 */
++		qpd->sh_mem_ape1_base = 1;
++		qpd->sh_mem_ape1_limit = 0;
++	} else {
++		/*
++		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
++		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
++		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
++		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
++		 * Verify that the base and size parameters can be
++		 * represented in this format and convert them.
++		 * Additionally restrict APE1 to user-mode addresses.
++		 */
++
++		uint64_t base = (uintptr_t)alternate_aperture_base;
++		uint64_t limit = base + alternate_aperture_size - 1;
++
++		if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
++		   (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
++			retval = false;
++			goto out;
++		}
++
++		qpd->sh_mem_ape1_base = base >> 16;
++		qpd->sh_mem_ape1_limit = limit >> 16;
++	}
+ 
+ 	default_mtype = (default_policy == cache_policy_coherent) ?
+ 			MTYPE_UC :
+@@ -100,40 +138,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+ 			default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+ 			ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
+ 
+-	return true;
+-}
+-
+-static int update_qpd_vi(struct device_queue_manager *dqm,
+-			 struct qcm_process_device *qpd)
+-{
+-	struct kfd_process_device *pdd;
+-	unsigned int temp;
+-
+-	pdd = qpd_to_pdd(qpd);
+-
+-	/* check if sh_mem_config register already configured */
+-	if (qpd->sh_mem_config == 0) {
+-		qpd->sh_mem_config =
+-				SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+-					SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+-				MTYPE_UC <<
+-					SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+-				MTYPE_UC <<
+-					SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
+-
+-		qpd->sh_mem_ape1_limit = 0;
+-		qpd->sh_mem_ape1_base = 0;
+-	}
+-
+ 	/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
+ 	 * aperture addresses.
+ 	 */
+-	temp = get_sh_mem_bases_nybble_64(pdd);
++	temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
+ 	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ 
+ 	pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ 		temp, qpd->sh_mem_bases);
++out:
++	return retval;
++}
+ 
++static int update_qpd_vi(struct device_queue_manager *dqm,
++			 struct qcm_process_device *qpd)
++{
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 0ec8b457494bd7..45923da7709fd3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -842,6 +842,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	/* If the process just called exec(3), it is possible that the
++	 * cleanup of the kfd_process (following the release of the mm
++	 * of the old process image) is still in the cleanup work queue.
++	 * Make sure to drain any job before trying to recreate any
++	 * resource for this process.
++	 */
++	flush_workqueue(kfd_process_wq);
++
+ 	/*
+ 	 * take kfd processes mutex before starting of process creation
+ 	 * so there won't be a case where two threads of the same process
+@@ -860,14 +868,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
+ 	if (process) {
+ 		pr_debug("Process already found\n");
+ 	} else {
+-		/* If the process just called exec(3), it is possible that the
+-		 * cleanup of the kfd_process (following the release of the mm
+-		 * of the old process image) is still in the cleanup work queue.
+-		 * Make sure to drain any job before trying to recreate any
+-		 * resource for this process.
+-		 */
+-		flush_workqueue(kfd_process_wq);
+-
+ 		process = create_process(thread);
+ 		if (IS_ERR(process))
+ 			goto out;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index b50283864dcd26..f00d41be7fca24 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -3014,7 +3014,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+ 
+ 	/* check if this page fault time stamp is before svms->checkpoint_ts */
+ 	if (svms->checkpoint_ts[gpuidx] != 0) {
+-		if (amdgpu_ih_ts_after(ts,  svms->checkpoint_ts[gpuidx])) {
++		if (amdgpu_ih_ts_after_or_equal(ts,  svms->checkpoint_ts[gpuidx])) {
+ 			pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ 			r = -EAGAIN;
+ 			goto out_unlock_svms;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 3871591c9aec98..82da568604b6eb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -1683,17 +1683,32 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
+ 				int cache_type, unsigned int cu_processor_id,
+ 				struct kfd_node *knode)
+ {
+-	unsigned int cu_sibling_map_mask;
++	unsigned int cu_sibling_map_mask = 0;
+ 	int first_active_cu;
+ 	int i, j, k, xcc, start, end;
+ 	int num_xcc = NUM_XCC(knode->xcc_mask);
+ 	struct kfd_cache_properties *pcache = NULL;
+ 	enum amdgpu_memory_partition mode;
+ 	struct amdgpu_device *adev = knode->adev;
++	bool found = false;
+ 
+ 	start = ffs(knode->xcc_mask) - 1;
+ 	end = start + num_xcc;
+-	cu_sibling_map_mask = cu_info->bitmap[start][0][0];
++
++	/* To find the bitmap in the first active cu in the first
++	 * xcc, it is based on the assumption that evrey xcc must
++	 * have at least one active cu.
++	 */
++	for (i = 0; i < gfx_info->max_shader_engines && !found; i++) {
++		for (j = 0; j < gfx_info->max_sh_per_se && !found; j++) {
++			if (cu_info->bitmap[start][i % 4][j % 4]) {
++				cu_sibling_map_mask =
++					cu_info->bitmap[start][i % 4][j % 4];
++				found = true;
++			}
++		}
++	}
++
+ 	cu_sibling_map_mask &=
+ 		((1 << pcache_info[cache_type].num_cu_shared) - 1);
+ 	first_active_cu = ffs(cu_sibling_map_mask);
+@@ -2002,10 +2017,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
+ 		dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
+ 					HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
+ 
+-		if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0))
+-			dev->node_props.capability |=
+-				HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
+-
+ 		if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(12, 0, 0))
+ 			dev->node_props.capability |=
+ 				HSA_CAP_TRAP_DEBUG_PRECISE_ALU_OPERATIONS_SUPPORTED;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ff33760aa4fae0..5f9452b22596a3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -365,6 +365,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+ 					      struct dm_crtc_state *new_state)
+ {
++	if (new_state->stream->adjust.timing_adjust_pending)
++		return true;
+ 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
+ 		return true;
+ 	else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
+@@ -3313,11 +3315,6 @@ static int dm_resume(void *handle)
+ 
+ 		return 0;
+ 	}
+-
+-	/* leave display off for S4 sequence */
+-	if (adev->in_s4)
+-		return 0;
+-
+ 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ 	dc_state_release(dm_state->context);
+ 	dm_state->context = dc_state_create(dm->dc, NULL);
+@@ -5550,9 +5547,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ 
+ 	case DRM_COLOR_YCBCR_BT2020:
+ 		if (full_range)
+-			*color_space = COLOR_SPACE_2020_YCBCR;
++			*color_space = COLOR_SPACE_2020_YCBCR_FULL;
+ 		else
+-			return -EINVAL;
++			*color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
+ 		break;
+ 
+ 	default:
+@@ -6048,7 +6045,7 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
+ 		if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ 			color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
+ 		else
+-			color_space = COLOR_SPACE_2020_YCBCR;
++			color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
+ 		break;
+ 	case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
+ 	default:
+@@ -7397,12 +7394,12 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
+ }
+ 
+ struct dc_stream_state *
+-create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
++create_validate_stream_for_sink(struct drm_connector *connector,
+ 				const struct drm_display_mode *drm_mode,
+ 				const struct dm_connector_state *dm_state,
+ 				const struct dc_stream_state *old_stream)
+ {
+-	struct drm_connector *connector = &aconnector->base;
++	struct amdgpu_dm_connector *aconnector = NULL;
+ 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ 	struct dc_stream_state *stream;
+ 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+@@ -7413,8 +7410,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 	if (!dm_state)
+ 		return NULL;
+ 
+-	if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+-	    aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
++	if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
++		aconnector = to_amdgpu_dm_connector(connector);
++
++	if (aconnector &&
++	    (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
++	     aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER))
+ 		bpc_limit = 8;
+ 
+ 	do {
+@@ -7426,10 +7427,11 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 			break;
+ 		}
+ 
+-		if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
++		dc_result = dc_validate_stream(adev->dm.dc, stream);
++
++		if (!aconnector) /* writeback connector */
+ 			return stream;
+ 
+-		dc_result = dc_validate_stream(adev->dm.dc, stream);
+ 		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ 			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
+ 
+@@ -7459,7 +7461,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 				     __func__, __LINE__);
+ 
+ 		aconnector->force_yuv420_output = true;
+-		stream = create_validate_stream_for_sink(aconnector, drm_mode,
++		stream = create_validate_stream_for_sink(connector, drm_mode,
+ 						dm_state, old_stream);
+ 		aconnector->force_yuv420_output = false;
+ 	}
+@@ -7474,6 +7476,9 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
+ 	struct dc_sink *dc_sink;
+ 	/* TODO: Unhardcode stream count */
+ 	struct dc_stream_state *stream;
++	/* we always have an amdgpu_dm_connector here since we got
++	 * here via the amdgpu_dm_connector_helper_funcs
++	 */
+ 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ 
+ 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+@@ -7498,7 +7503,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
+ 
+ 	drm_mode_set_crtcinfo(mode, 0);
+ 
+-	stream = create_validate_stream_for_sink(aconnector, mode,
++	stream = create_validate_stream_for_sink(connector, mode,
+ 						 to_dm_connector_state(connector->state),
+ 						 NULL);
+ 	if (stream) {
+@@ -8278,7 +8283,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ 	int i;
+ 	int result = -EIO;
+ 
+-	if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
++	if (!ddc_service->ddc_pin)
+ 		return result;
+ 
+ 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+@@ -10518,7 +10523,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ 			goto skip_modeset;
+ 
+-		new_stream = create_validate_stream_for_sink(aconnector,
++		new_stream = create_validate_stream_for_sink(connector,
+ 							     &new_crtc_state->mode,
+ 							     dm_new_conn_state,
+ 							     dm_old_crtc_state->stream);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 20ad72d1b0d9b3..9603352ee09491 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -987,7 +987,7 @@ int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int
+ 					struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
+ 
+ struct dc_stream_state *
+-	create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
++	create_validate_stream_for_sink(struct drm_connector *connector,
+ 					const struct drm_display_mode *drm_mode,
+ 					const struct dm_connector_state *dm_state,
+ 					const struct dc_stream_state *old_stream);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 98e88903d07d52..15d94d2a0e2fb3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -1145,7 +1145,7 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
+ 	case COLOR_SPACE_2020_RGB_FULLRANGE:
+ 		seq_puts(m, "BT2020_RGB");
+ 		break;
+-	case COLOR_SPACE_2020_YCBCR:
++	case COLOR_SPACE_2020_YCBCR_LIMITED:
+ 		seq_puts(m, "BT2020_YCC");
+ 		break;
+ 	default:
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index fca0c31e14d8fc..92158009cfa739 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1646,7 +1646,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ 
+ 		if (ind >= 0) {
+ 			struct drm_connector *connector;
+-			struct amdgpu_dm_connector *aconnector;
+ 			struct drm_connector_state *drm_new_conn_state;
+ 			struct dm_connector_state *dm_new_conn_state;
+ 			struct dm_crtc_state *dm_old_crtc_state;
+@@ -1654,15 +1653,14 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ 			connector =
+ 				amdgpu_dm_find_first_crtc_matching_connector(state,
+ 									     state->crtcs[ind].ptr);
+-			aconnector = to_amdgpu_dm_connector(connector);
+ 			drm_new_conn_state =
+ 				drm_atomic_get_new_connector_state(state,
+-								   &aconnector->base);
++								   connector);
+ 			dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ 			dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
+ 
+ 			local_dc_state->streams[i] =
+-				create_validate_stream_for_sink(aconnector,
++				create_validate_stream_for_sink(connector,
+ 								&state->crtcs[ind].new_state->mode,
+ 								dm_new_conn_state,
+ 								dm_old_crtc_state->stream);
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+index b2fc4f8e648250..a51c2701da247f 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+@@ -40,7 +40,8 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
+ 	case COLOR_SPACE_YCBCR709:
+ 	case COLOR_SPACE_YCBCR601_LIMITED:
+ 	case COLOR_SPACE_YCBCR709_LIMITED:
+-	case COLOR_SPACE_2020_YCBCR:
++	case COLOR_SPACE_2020_YCBCR_LIMITED:
++	case COLOR_SPACE_2020_YCBCR_FULL:
+ 		return false;
+ 	default:
+ 		/* Add a case to switch */
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+index 7d18f372ce7ab4..6bc59b7ef007b9 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+@@ -101,7 +101,6 @@ static void init_dig_encoder_control(struct bios_parser *bp)
+ 		bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
+ 		break;
+ 	default:
+-		dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
+ 		bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
+ 		break;
+ 	}
+@@ -238,7 +237,6 @@ static void init_transmitter_control(struct bios_parser *bp)
+ 		bp->cmd_tbl.transmitter_control = transmitter_control_v1_7;
+ 		break;
+ 	default:
+-		dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
+ 		bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
+ 		break;
+ 	}
+@@ -408,8 +406,6 @@ static void init_set_pixel_clock(struct bios_parser *bp)
+ 		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+ 		break;
+ 	default:
+-		dm_output_to_console("Don't have set_pixel_clock for v%d\n",
+-			 BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
+ 		bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
+ 		break;
+ 	}
+@@ -554,7 +550,6 @@ static void init_set_crtc_timing(struct bios_parser *bp)
+ 			set_crtc_using_dtd_timing_v3;
+ 		break;
+ 	default:
+-		dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version);
+ 		bp->cmd_tbl.set_crtc_timing = NULL;
+ 		break;
+ 	}
+@@ -671,8 +666,6 @@ static void init_enable_crtc(struct bios_parser *bp)
+ 		bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+ 		break;
+ 	default:
+-		dm_output_to_console("Don't have enable_crtc for v%d\n",
+-			 BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
+ 		bp->cmd_tbl.enable_crtc = NULL;
+ 		break;
+ 	}
+@@ -864,8 +857,6 @@ static void init_set_dce_clock(struct bios_parser *bp)
+ 		bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+ 		break;
+ 	default:
+-		dm_output_to_console("Don't have set_dce_clock for v%d\n",
+-			 BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
+ 		bp->cmd_tbl.set_dce_clock = NULL;
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+index 73458e2951034a..df8139bda142bf 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+@@ -87,8 +87,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
+ 		return true;
+ 
+ 	default:
+-		/* Unsupported DCE */
+-		BREAK_TO_DEBUGGER();
++		*h = dal_cmd_tbl_helper_dce112_get_table2();
+ 		return false;
+ 	}
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+index a0fb4481d2f1b1..e4d22f74f98691 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+@@ -130,7 +130,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ 	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ 	struct dc *dc = clk_mgr_base->ctx->dc;
+-	int display_count;
++	int display_count = 0;
+ 	bool update_dppclk = false;
+ 	bool update_dispclk = false;
+ 	bool dpp_clock_lowered = false;
+@@ -194,8 +194,6 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ 	if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
+ 		new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
+-	if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
+-		new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
+ 
+ 	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ 		if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+@@ -204,15 +202,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
+ 		update_dppclk = true;
+ 	}
+ 
+-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+-		/* No need to apply the w/a if we haven't taken over from bios yet */
+-		if (clk_mgr_base->clks.dispclk_khz)
+-			dcn315_disable_otg_wa(clk_mgr_base, context, true);
++	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
++	    (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
++		int requested_dispclk_khz = new_clocks->dispclk_khz;
+ 
++		dcn315_disable_otg_wa(clk_mgr_base, context, true);
++
++		/* Clamp the requested clock to PMFW based on their limit. */
++		if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
++			requested_dispclk_khz = dc->debug.min_disp_clk_khz;
++
++		dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
+ 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+-		dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+-		if (clk_mgr_base->clks.dispclk_khz)
+-			dcn315_disable_otg_wa(clk_mgr_base, context, false);
++		dcn315_disable_otg_wa(clk_mgr_base, context, false);
+ 
+ 		update_dispclk = true;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+index c3e50c3aaa609e..49efea0c8fcffa 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+@@ -140,7 +140,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ 	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ 	struct dc *dc = clk_mgr_base->ctx->dc;
+-	int display_count;
++	int display_count = 0;
+ 	bool update_dppclk = false;
+ 	bool update_dispclk = false;
+ 	bool dpp_clock_lowered = false;
+@@ -201,8 +201,6 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ 	if (new_clocks->dppclk_khz < 100000)
+ 		new_clocks->dppclk_khz = 100000;
+-	if (new_clocks->dispclk_khz < 100000)
+-		new_clocks->dispclk_khz = 100000;
+ 
+ 	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ 		if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+@@ -211,11 +209,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
+ 		update_dppclk = true;
+ 	}
+ 
+-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
++	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
++	    (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
++		int requested_dispclk_khz = new_clocks->dispclk_khz;
++
+ 		dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ 
++		/* Clamp the requested clock to PMFW based on their limit. */
++		if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
++			requested_dispclk_khz = dc->debug.min_disp_clk_khz;
++
++		dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
+ 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+-		dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+ 		dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+ 
+ 		update_dispclk = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+index 7d0d8852ce8d27..a4ac601a30c350 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+@@ -452,14 +452,19 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
+ 		update_dppclk = true;
+ 	}
+ 
+-	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
++	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
++	    (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
++		int requested_dispclk_khz = new_clocks->dispclk_khz;
++
+ 		dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ 
+-		if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz)
+-			new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz;
++		/* Clamp the requested clock to PMFW based on their limit. */
++		if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
++			requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+ 
++		dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
+ 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+-		dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
++
+ 		dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+ 
+ 		update_dispclk = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+index 8cfc5f4359374d..313e52997596a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+@@ -24,6 +24,8 @@
+ 
+ #include "dml/dcn401/dcn401_fpu.h"
+ 
++#define DCN_BASE__INST0_SEG1                       0x000000C0
++
+ #define mmCLK01_CLK0_CLK_PLL_REQ                        0x16E37
+ #define mmCLK01_CLK0_CLK0_DFS_CNTL                      0x16E69
+ #define mmCLK01_CLK0_CLK1_DFS_CNTL                      0x16E6C
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 216b525bd75e79..a99d3e2256f196 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -276,6 +276,7 @@ static bool create_links(
+ 		link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ 		link->link_id.id = CONNECTOR_ID_VIRTUAL;
+ 		link->link_id.enum_id = ENUM_ID_1;
++		link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ 		link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+ 
+ 		if (!link->link_enc) {
+@@ -438,9 +439,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ 	 * Don't adjust DRR while there's bandwidth optimizations pending to
+ 	 * avoid conflicting with firmware updates.
+ 	 */
+-	if (dc->ctx->dce_version > DCE_VERSION_MAX)
+-		if (dc->optimized_required || dc->wm_optimized_required)
++	if (dc->ctx->dce_version > DCE_VERSION_MAX) {
++		if (dc->optimized_required || dc->wm_optimized_required) {
++			stream->adjust.timing_adjust_pending = true;
+ 			return false;
++		}
++	}
+ 
+ 	dc_exit_ips_for_hw_access(dc);
+ 
+@@ -452,6 +456,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ 
+ 	if (dc->caps.max_v_total != 0 &&
+ 		(adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
++		stream->adjust.timing_adjust_pending = false;
+ 		if (adjust->allow_otg_v_count_halt)
+ 			return set_long_vtotal(dc, stream, adjust);
+ 		else
+@@ -465,7 +470,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ 			dc->hwss.set_drr(&pipe,
+ 					1,
+ 					*adjust);
+-
++			stream->adjust.timing_adjust_pending = false;
+ 			return true;
+ 		}
+ 	}
+@@ -2975,8 +2980,14 @@ static void copy_stream_update_to_stream(struct dc *dc,
+ 	if (update->vrr_active_fixed)
+ 		stream->vrr_active_fixed = *update->vrr_active_fixed;
+ 
+-	if (update->crtc_timing_adjust)
++	if (update->crtc_timing_adjust) {
++		if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
++			stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
++			stream->adjust.timing_adjust_pending)
++			update->crtc_timing_adjust->timing_adjust_pending = true;
+ 		stream->adjust = *update->crtc_timing_adjust;
++		update->crtc_timing_adjust->timing_adjust_pending = false;
++	}
+ 
+ 	if (update->dpms_off)
+ 		stream->dpms_off = *update->dpms_off;
+@@ -4734,7 +4745,8 @@ static bool full_update_required(struct dc *dc,
+ 			stream_update->lut3d_func ||
+ 			stream_update->pending_test_pattern ||
+ 			stream_update->crtc_timing_adjust ||
+-			stream_update->scaler_sharpener_update))
++			stream_update->scaler_sharpener_update ||
++			stream_update->hw_cursor_req))
+ 		return true;
+ 
+ 	if (stream) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+index bb766c2a74176a..d62b00314682fe 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+@@ -176,7 +176,7 @@ static bool is_ycbcr2020_type(
+ {
+ 	bool ret = false;
+ 
+-	if (color_space == COLOR_SPACE_2020_YCBCR)
++	if (color_space == COLOR_SPACE_2020_YCBCR_LIMITED || color_space == COLOR_SPACE_2020_YCBCR_FULL)
+ 		ret = true;
+ 	return ret;
+ }
+@@ -247,7 +247,8 @@ void color_space_to_black_color(
+ 	case COLOR_SPACE_YCBCR709_BLACK:
+ 	case COLOR_SPACE_YCBCR601_LIMITED:
+ 	case COLOR_SPACE_YCBCR709_LIMITED:
+-	case COLOR_SPACE_2020_YCBCR:
++	case COLOR_SPACE_2020_YCBCR_LIMITED:
++	case COLOR_SPACE_2020_YCBCR_FULL:
+ 		*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
+ 		break;
+ 
+@@ -508,6 +509,7 @@ void set_p_state_switch_method(
+ 	if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba)
+ 		return;
+ 
++	pipe_ctx->p_state_type = P_STATE_UNKNOWN;
+ 	if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
+ 			dm_dram_clock_change_unsupported) {
+ 		/* MCLK switching is supported */
+@@ -554,6 +556,21 @@ void set_p_state_switch_method(
+ 	}
+ }
+ 
++void set_drr_and_clear_adjust_pending(
++		struct pipe_ctx *pipe_ctx,
++		struct dc_stream_state *stream,
++		struct drr_params *params)
++{
++	/* params can be null.*/
++	if (pipe_ctx && pipe_ctx->stream_res.tg &&
++			pipe_ctx->stream_res.tg->funcs->set_drr)
++		pipe_ctx->stream_res.tg->funcs->set_drr(
++				pipe_ctx->stream_res.tg, params);
++
++	if (stream)
++		stream->adjust.timing_adjust_pending = false;
++}
++
+ void get_fams2_visual_confirm_color(
+ 		struct dc *dc,
+ 		struct dc_state *context,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index bfcbbea377298f..6dbf139c51f727 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -4215,7 +4215,7 @@ static void set_avi_info_frame(
+ 		break;
+ 	case COLOR_SPACE_2020_RGB_FULLRANGE:
+ 	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+-	case COLOR_SPACE_2020_YCBCR:
++	case COLOR_SPACE_2020_YCBCR_LIMITED:
+ 		hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
+ 		hdmi_info.bits.C0_C1   = COLORIMETRY_EXTENDED;
+ 		break;
+@@ -4229,7 +4229,7 @@ static void set_avi_info_frame(
+ 		break;
+ 	}
+ 
+-	if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
++	if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR_LIMITED &&
+ 			stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
+ 		hdmi_info.bits.EC0_EC2 = 0;
+ 		hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index 41bd95e9177a41..223c3d55544b28 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -959,6 +959,14 @@ union dp_128b_132b_supported_lttpr_link_rates {
+ 	uint8_t raw;
+ };
+ 
++union dp_alpm_lttpr_cap {
++	struct {
++		uint8_t AUX_LESS_ALPM_SUPPORTED	:1;
++		uint8_t RESERVED				:7;
++	} bits;
++	uint8_t raw;
++};
++
+ union dp_sink_video_fallback_formats {
+ 	struct {
+ 		uint8_t dp_1024x768_60Hz_24bpp_support	:1;
+@@ -1103,6 +1111,7 @@ struct dc_lttpr_caps {
+ 	uint8_t max_ext_timeout;
+ 	union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
+ 	union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
++	union dp_alpm_lttpr_cap alpm;
+ 	uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+ };
+ 
+@@ -1352,6 +1361,9 @@ struct dp_trace {
+ #ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP
+ #define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP    0x221c
+ #endif
++#ifndef DP_LTTPR_ALPM_CAPABILITIES
++#define DP_LTTPR_ALPM_CAPABILITIES              0xF0009
++#endif
+ #ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
+ #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE	0x50
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index c10567ec1c8199..6fd94c5f6da52a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -641,7 +641,8 @@ enum dc_color_space {
+ 	COLOR_SPACE_YCBCR709_LIMITED,
+ 	COLOR_SPACE_2020_RGB_FULLRANGE,
+ 	COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+-	COLOR_SPACE_2020_YCBCR,
++	COLOR_SPACE_2020_YCBCR_LIMITED,
++	COLOR_SPACE_2020_YCBCR_FULL,
+ 	COLOR_SPACE_ADOBERGB,
+ 	COLOR_SPACE_DCIP3,
+ 	COLOR_SPACE_DISPLAYNATIVE,
+@@ -649,6 +650,7 @@ enum dc_color_space {
+ 	COLOR_SPACE_APPCTRL,
+ 	COLOR_SPACE_CUSTOMPOINTS,
+ 	COLOR_SPACE_YCBCR709_BLACK,
++	COLOR_SPACE_2020_YCBCR = COLOR_SPACE_2020_YCBCR_LIMITED,
+ };
+ 
+ enum dc_dither_option {
+@@ -1000,6 +1002,7 @@ struct dc_crtc_timing_adjust {
+ 	uint32_t v_total_mid;
+ 	uint32_t v_total_mid_frame_num;
+ 	uint32_t allow_otg_v_count_halt;
++	uint8_t timing_adjust_pending;
+ };
+ 
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index c8bdbbba44ef9d..1aca9e96c474fd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -1009,6 +1009,13 @@ struct psr_settings {
+ 	unsigned int psr_sdp_transmit_line_num_deadline;
+ 	uint8_t force_ffu_mode;
+ 	unsigned int psr_power_opt;
++
++	/**
++	 * Some panels cannot handle idle pattern during PSR entry.
++	 * To power down phy before disable stream to avoid sending
++	 * idle pattern.
++	 */
++	uint8_t power_down_phy_before_disable_stream;
+ };
+ 
+ enum replay_coasting_vtotal_type {
+diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+index 0b889004509ad0..62402c7be0a5ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+@@ -580,9 +580,6 @@ static void dccg401_set_dpstreamclk(
+ 		int otg_inst,
+ 		int dp_hpo_inst)
+ {
+-	/* set the dtbclk_p source */
+-	dccg401_set_dtbclk_p_src(dccg, src, otg_inst);
+-
+ 	/* enabled to select one of the DTBCLKs for pipe */
+ 	if (src == REFCLK)
+ 		dccg401_disable_dpstreamclk(dccg, dp_hpo_inst);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index 5c2825bc9a8766..654b919465f082 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -420,7 +420,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+ 			dynamic_range_rgb = 1; /*limited range*/
+ 			break;
+ 		case COLOR_SPACE_2020_RGB_FULLRANGE:
+-		case COLOR_SPACE_2020_YCBCR:
++		case COLOR_SPACE_2020_YCBCR_LIMITED:
+ 		case COLOR_SPACE_XR_RGB:
+ 		case COLOR_SPACE_MSREF_SCRGB:
+ 		case COLOR_SPACE_ADOBERGB:
+@@ -432,6 +432,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+ 		case COLOR_SPACE_APPCTRL:
+ 		case COLOR_SPACE_CUSTOMPOINTS:
+ 		case COLOR_SPACE_UNKNOWN:
++		default:
+ 			/* do nothing */
+ 			break;
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+index cae18f8c1c9a08..8821153d0ac3ba 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+@@ -419,6 +419,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
+ 	copy_settings_data->relock_delay_frame_cnt = 0;
+ 	if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
+ 		copy_settings_data->relock_delay_frame_cnt = 2;
++
++	copy_settings_data->power_down_phy_before_disable_stream =
++		link->psr_settings.power_down_phy_before_disable_stream;
++
+ 	copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
+ 
+ 	dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+index f496e952ceecb8..f8f1e98f646e6b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+@@ -393,7 +393,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ 		break;
+ 	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ 	case COLOR_SPACE_2020_RGB_FULLRANGE:
+-	case COLOR_SPACE_2020_YCBCR:
++	case COLOR_SPACE_2020_YCBCR_LIMITED:
+ 	case COLOR_SPACE_XR_RGB:
+ 	case COLOR_SPACE_MSREF_SCRGB:
+ 	case COLOR_SPACE_ADOBERGB:
+@@ -406,6 +406,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ 	case COLOR_SPACE_CUSTOMPOINTS:
+ 	case COLOR_SPACE_UNKNOWN:
+ 	case COLOR_SPACE_YCBCR709_BLACK:
++	default:
+ 		/* do nothing */
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+index 0a27e0942a1234..0008816cf15536 100644
+--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+@@ -634,7 +634,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
+ 		break;
+ 	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ 	case COLOR_SPACE_2020_RGB_FULLRANGE:
+-	case COLOR_SPACE_2020_YCBCR:
++	case COLOR_SPACE_2020_YCBCR_LIMITED:
+ 	case COLOR_SPACE_XR_RGB:
+ 	case COLOR_SPACE_MSREF_SCRGB:
+ 	case COLOR_SPACE_ADOBERGB:
+@@ -647,6 +647,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
+ 	case COLOR_SPACE_CUSTOMPOINTS:
+ 	case COLOR_SPACE_UNKNOWN:
+ 	case COLOR_SPACE_YCBCR709_BLACK:
++	default:
+ 		/* do nothing */
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+index 47d785204f29cb..c90dee4e9116ae 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
+ 	.dcn_downspread_percent = 0.5,
+ 	.gpuvm_min_page_size_bytes = 4096,
+ 	.hostvm_min_page_size_bytes = 4096,
+-	.do_urgent_latency_adjustment = 0,
++	.do_urgent_latency_adjustment = 1,
+ 	.urgent_latency_adjustment_fabric_clock_component_us = 0,
+-	.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
++	.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ };
+ 
+ void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
+@@ -367,6 +367,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
+ 				clock_limits[i].socclk_mhz;
+ 			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
+ 				clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
++
++			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts;
+ 			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ 				clock_limits[i].dtbclk_mhz;
+ 			dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+index a201dbb743d791..79d921adc21531 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+@@ -401,6 +401,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
+ 				clock_limits[i].socclk_mhz;
+ 			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
+ 				clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
++			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts;
+ 			dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ 				clock_limits[i].dtbclk_mhz;
+ 			dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+index e3e4f40bd41238..dcbe327209d5d7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+@@ -221,7 +221,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
+ 	if (!result)
+ 		return false;
+ 
++	DC_FP_START();
+ 	result = dml2_build_mode_programming(mode_programming);
++	DC_FP_END();
+ 	if (!result)
+ 		return false;
+ 
+@@ -271,7 +273,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
+ 	mode_support->dml2_instance = dml_init->dml2_instance;
+ 	dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
+ 	dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
++	DC_FP_START();
+ 	is_supported = dml2_check_mode_supported(mode_support);
++	DC_FP_END();
+ 	if (!is_supported)
+ 		return false;
+ 
+@@ -282,16 +286,12 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
+ {
+ 	bool out = false;
+ 
+-	DC_FP_START();
+-
+ 	/* Use dml_validate_only for fast_validate path */
+ 	if (fast_validate)
+ 		out = dml21_check_mode_support(in_dc, context, dml_ctx);
+ 	else
+ 		out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
+ 
+-	DC_FP_END();
+-
+ 	return out;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+index e2a3764d9d181a..0090b7bc232bfd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+@@ -3630,13 +3630,12 @@ static unsigned int CalculateMaxVStartup(
+ 	double line_time_us = (double)timing->h_total / ((double)timing->pixel_clock_khz / 1000);
+ 	unsigned int vblank_actual = timing->v_total - timing->v_active;
+ 	unsigned int vblank_nom_default_in_line = (unsigned int)math_floor2((double)vblank_nom_default_us / line_time_us, 1.0);
+-	unsigned int vblank_nom_input = (unsigned int)math_min2(timing->vblank_nom, vblank_nom_default_in_line);
+-	unsigned int vblank_avail = (vblank_nom_input == 0) ? vblank_nom_default_in_line : vblank_nom_input;
++	unsigned int vblank_avail = (timing->vblank_nom == 0) ? vblank_nom_default_in_line : (unsigned int)timing->vblank_nom;
+ 
+ 	vblank_size = (unsigned int)math_min2(vblank_actual, vblank_avail);
+ 
+ 	if (timing->interlaced && !ptoi_supported)
+-		max_vstartup_lines = (unsigned int)(math_floor2(vblank_size / 2.0, 1.0));
++		max_vstartup_lines = (unsigned int)(math_floor2((vblank_size - 1) / 2.0, 1.0));
+ 	else
+ 		max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
+ #ifdef __DML_VBA_DEBUG__
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+index 0f944fcfd5a5bb..785226945699dd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+@@ -159,6 +159,7 @@ struct dml2_clks_table_entry {
+ 	unsigned int dtbclk_mhz;
+ 	unsigned int dispclk_mhz;
+ 	unsigned int dppclk_mhz;
++	unsigned int dram_speed_mts; /*which is based on wck_ratio*/
+ };
+ 
+ struct dml2_clks_num_entries {
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+index 40acebd13e46dc..abf439e743f233 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+@@ -425,11 +425,6 @@ bool dpp3_get_optimal_number_of_taps(
+ 	int min_taps_y, min_taps_c;
+ 	enum lb_memory_config lb_config;
+ 
+-	if (scl_data->viewport.width > scl_data->h_active &&
+-		dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+-		scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+-		return false;
+-
+ 	/*
+ 	 * Set default taps if none are provided
+ 	 * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
+@@ -467,6 +462,12 @@ bool dpp3_get_optimal_number_of_taps(
+ 	else
+ 		scl_data->taps.h_taps_c = in_taps->h_taps_c;
+ 
++	// Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first
++	if (scl_data->viewport.width > scl_data->h_active &&
++		dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
++		scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
++		return false;
++
+ 	/*Ensure we can support the requested number of vtaps*/
+ 	min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
+ 	min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
+diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
+index 678db949cfe3ce..759b453385c46b 100644
+--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
+@@ -323,7 +323,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
+ 		break;
+ 	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ 	case COLOR_SPACE_2020_RGB_FULLRANGE:
+-	case COLOR_SPACE_2020_YCBCR:
++	case COLOR_SPACE_2020_YCBCR_LIMITED:
+ 	case COLOR_SPACE_XR_RGB:
+ 	case COLOR_SPACE_MSREF_SCRGB:
+ 	case COLOR_SPACE_ADOBERGB:
+@@ -336,6 +336,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
+ 	case COLOR_SPACE_CUSTOMPOINTS:
+ 	case COLOR_SPACE_UNKNOWN:
+ 	case COLOR_SPACE_YCBCR709_BLACK:
++	default:
+ 		/* do nothing */
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+index 4fbed0298adfa7..59457ca24e1dc3 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+@@ -1064,7 +1064,8 @@ void dce110_edp_backlight_control(
+ 			DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
+ 	}
+ 
+-	if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
++	if (!enable) {
++		/*follow oem panel config's requirement*/
+ 		pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
+ 		msleep(pre_T11_delay);
+ 	}
+@@ -1653,9 +1654,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
+ 
+ 	params.vertical_total_min = stream->adjust.v_total_min;
+ 	params.vertical_total_max = stream->adjust.v_total_max;
+-	if (pipe_ctx->stream_res.tg->funcs->set_drr)
+-		pipe_ctx->stream_res.tg->funcs->set_drr(
+-			pipe_ctx->stream_res.tg, &params);
++	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
+ 
+ 	// DRR should set trigger event to monitor surface update event
+ 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
+@@ -2103,8 +2102,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
+ 		struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
+ 
+ 		if ((tg != NULL) && tg->funcs) {
+-			if (tg->funcs->set_drr)
+-				tg->funcs->set_drr(tg, &params);
++			set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
+ 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ 				if (tg->funcs->set_static_screen_control)
+ 					tg->funcs->set_static_screen_control(
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+index d725af14af371a..00be0b26689d3a 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+@@ -1112,9 +1112,7 @@ static void dcn10_reset_back_end_for_pipe(
+ 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+ 
+ 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+-		if (pipe_ctx->stream_res.tg->funcs->set_drr)
+-			pipe_ctx->stream_res.tg->funcs->set_drr(
+-					pipe_ctx->stream_res.tg, NULL);
++		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
+ 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ 			pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+ 	}
+@@ -3217,8 +3215,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
+ 		struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
+ 
+ 		if ((tg != NULL) && tg->funcs) {
+-			if (tg->funcs->set_drr)
+-				tg->funcs->set_drr(tg, &params);
++			set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
+ 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ 				if (tg->funcs->set_static_screen_control)
+ 					tg->funcs->set_static_screen_control(
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+index f5f1ccd8303cf3..9c5cdb3b80b5de 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -952,9 +952,7 @@ enum dc_status dcn20_enable_stream_timing(
+ 	params.vertical_total_max = stream->adjust.v_total_max;
+ 	params.vertical_total_mid = stream->adjust.v_total_mid;
+ 	params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
+-	if (pipe_ctx->stream_res.tg->funcs->set_drr)
+-		pipe_ctx->stream_res.tg->funcs->set_drr(
+-			pipe_ctx->stream_res.tg, &params);
++	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
+ 
+ 	// DRR should set trigger event to monitor surface update event
+ 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
+@@ -2822,9 +2820,7 @@ void dcn20_reset_back_end_for_pipe(
+ 			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
+ 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ 
+-		if (pipe_ctx->stream_res.tg->funcs->set_drr)
+-			pipe_ctx->stream_res.tg->funcs->set_drr(
+-					pipe_ctx->stream_res.tg, NULL);
++		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
+ 		/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
+ 		 * the case where the same symclk is shared across multiple otg
+ 		 * instances
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+index 3d4b31bd994691..9aa925a0b3b434 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+@@ -528,9 +528,7 @@ static void dcn31_reset_back_end_for_pipe(
+ 	if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ 		pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+ 
+-	if (pipe_ctx->stream_res.tg->funcs->set_drr)
+-		pipe_ctx->stream_res.tg->funcs->set_drr(
+-				pipe_ctx->stream_res.tg, NULL);
++	set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
+ 
+ 	link = pipe_ctx->stream->link;
+ 	/* DPMS may already disable or */
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index 38755ca771401b..ca446e08f6a270 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -1452,8 +1452,7 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+ 					num_frames = 2 * (frame_rate % 60);
+ 				}
+ 			}
+-			if (tg->funcs->set_drr)
+-				tg->funcs->set_drr(tg, &params);
++			set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
+ 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ 				if (tg->funcs->set_static_screen_control)
+ 					tg->funcs->set_static_screen_control(
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+index 62f1e597787e69..3279f347660cb6 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+@@ -844,6 +844,13 @@ enum dc_status dcn401_enable_stream_timing(
+ 				odm_slice_width, last_odm_slice_width);
+ 	}
+ 
++	/* set DTBCLK_P */
++	if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
++		if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
++			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
++		}
++	}
++
+ 	/* HW program guide assume display already disable
+ 	 * by unplug sequence. OTG assume stop.
+ 	 */
+@@ -895,10 +902,7 @@ enum dc_status dcn401_enable_stream_timing(
+ 	}
+ 
+ 	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
+-
+-	if (pipe_ctx->stream_res.tg->funcs->set_drr)
+-		pipe_ctx->stream_res.tg->funcs->set_drr(
+-			pipe_ctx->stream_res.tg, &params);
++	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
+ 
+ 	/* Event triggers and num frames initialized for DRR, but can be
+ 	 * later updated for PSR use. Note DRR trigger events are generated
+@@ -1007,8 +1011,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
+ 				dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ 			}
+ 		} else {
+-			/* need to set DTBCLK_P source to DPREFCLK for DP8B10B */
+-			dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst);
+ 			dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
+ 					link_enc->transmitter - TRANSMITTER_UNIPHY_A);
+ 		}
+@@ -1773,3 +1775,128 @@ void dcn401_program_outstanding_updates(struct dc *dc,
+ 	if (hubbub->funcs->program_compbuf_segments)
+ 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
+ }
++
++void dcn401_reset_back_end_for_pipe(
++		struct dc *dc,
++		struct pipe_ctx *pipe_ctx,
++		struct dc_state *context)
++{
++	int i;
++	struct dc_link *link = pipe_ctx->stream->link;
++	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
++
++	DC_LOGGER_INIT(dc->ctx->logger);
++	if (pipe_ctx->stream_res.stream_enc == NULL) {
++		pipe_ctx->stream = NULL;
++		return;
++	}
++
++	/* DPMS may already disable or */
++	/* dpms_off status is incorrect due to fastboot
++	 * feature. When system resume from S4 with second
++	 * screen only, the dpms_off would be true but
++	 * VBIOS lit up eDP, so check link status too.
++	 */
++	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
++		dc->link_srv->set_dpms_off(pipe_ctx);
++	else if (pipe_ctx->stream_res.audio)
++		dc->hwss.disable_audio_stream(pipe_ctx);
++
++	/* free acquired resources */
++	if (pipe_ctx->stream_res.audio) {
++		/*disable az_endpoint*/
++		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
++
++		/*free audio*/
++		if (dc->caps.dynamic_audio == true) {
++			/*we have to dynamic arbitrate the audio endpoints*/
++			/*we free the resource, need reset is_audio_acquired*/
++			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
++					pipe_ctx->stream_res.audio, false);
++			pipe_ctx->stream_res.audio = NULL;
++		}
++	}
++
++	/* by upper caller loop, parent pipe: pipe0, will be reset last.
++	 * back end share by all pipes and will be disable only when disable
++	 * parent pipe.
++	 */
++	if (pipe_ctx->top_pipe == NULL) {
++
++		dc->hwss.set_abm_immediate_disable(pipe_ctx);
++
++		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
++
++		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
++		if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
++			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
++					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
++
++		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
++
++		/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
++		 * the case where the same symclk is shared across multiple otg
++		 * instances
++		 */
++		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
++			link->phy_state.symclk_ref_cnts.otg = 0;
++		if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
++			link_hwss->disable_link_output(link,
++					&pipe_ctx->link_res, pipe_ctx->stream->signal);
++			link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
++		}
++
++		/* reset DTBCLK_P */
++		if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
++			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
++	}
++
++	for (i = 0; i < dc->res_pool->pipe_count; i++)
++		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
++			break;
++
++	if (i == dc->res_pool->pipe_count)
++		return;
++
++/*
++ * In case of a dangling plane, setting this to NULL unconditionally
++ * causes failures during reset hw ctx where, if stream is NULL,
++ * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
++ */
++	pipe_ctx->stream = NULL;
++	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
++					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
++}
++
++void dcn401_reset_hw_ctx_wrap(
++		struct dc *dc,
++		struct dc_state *context)
++{
++	int i;
++	struct dce_hwseq *hws = dc->hwseq;
++
++	/* Reset Back End*/
++	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
++		struct pipe_ctx *pipe_ctx_old =
++			&dc->current_state->res_ctx.pipe_ctx[i];
++		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++		if (!pipe_ctx_old->stream)
++			continue;
++
++		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
++			continue;
++
++		if (!pipe_ctx->stream ||
++				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
++			struct clock_source *old_clk = pipe_ctx_old->clock_source;
++
++			if (hws->funcs.reset_back_end_for_pipe)
++				hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
++			if (hws->funcs.enable_stream_gating)
++				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
++			if (old_clk)
++				old_clk->funcs->cs_power_down(old_clk);
++		}
++	}
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+index a27e62081685d2..6256429c8a4f69 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+@@ -84,4 +84,11 @@ void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct
+ void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
+ void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
+ void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context);
++void dcn401_reset_back_end_for_pipe(
++		struct dc *dc,
++		struct pipe_ctx *pipe_ctx,
++		struct dc_state *context);
++void dcn401_reset_hw_ctx_wrap(
++		struct dc *dc,
++		struct dc_state *context);
+ #endif /* __DC_HWSS_DCN401_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+index a2ca07235c83d9..d6f36b8e1a2610 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+@@ -111,7 +111,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
+ 	.power_down = dce110_power_down,
+ 	.enable_display_power_gating = dcn10_dummy_display_power_gating,
+ 	.blank_pixel_data = dcn20_blank_pixel_data,
+-	.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
++	.reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap,
+ 	.enable_stream_timing = dcn401_enable_stream_timing,
+ 	.edp_backlight_control = dce110_edp_backlight_control,
+ 	.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+@@ -136,7 +136,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
+ 	.update_mall_sel = dcn32_update_mall_sel,
+ 	.calculate_dccg_k1_k2_values = NULL,
+ 	.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
+-	.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
++	.reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe,
+ 	.populate_mcm_luts = NULL,
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+index ac920562562336..9ae6259f2db175 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+@@ -46,6 +46,7 @@ struct dce_hwseq;
+ struct link_resource;
+ struct dc_dmub_cmd;
+ struct pg_block_update;
++struct drr_params;
+ 
+ struct subvp_pipe_control_lock_fast_params {
+ 	struct dc *dc;
+@@ -509,6 +510,11 @@ void set_p_state_switch_method(
+ 		struct dc_state *context,
+ 		struct pipe_ctx *pipe_ctx);
+ 
++void set_drr_and_clear_adjust_pending(
++		struct pipe_ctx *pipe_ctx,
++		struct dc_stream_state *stream,
++		struct drr_params *params);
++
+ void hwss_execute_sequence(struct dc *dc,
+ 		struct block_sequence block_sequence[],
+ 		int num_steps);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index e1e3142cdc00ac..62fb2009b3028d 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -621,7 +621,7 @@ struct dc_state {
+ 	 */
+ 	struct bw_context bw_ctx;
+ 
+-	struct block_sequence block_sequence[50];
++	struct block_sequence block_sequence[100];
+ 	unsigned int block_sequence_steps;
+ 	struct dc_dmub_cmd dc_dmub_cmd[10];
+ 	unsigned int dmub_cmd_count;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+index 7a1ca1e98059b0..221645c023b502 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+@@ -221,6 +221,7 @@ enum dentist_divider_range {
+ 	CLK_SF(CLK0_CLK_PLL_REQ, FbMult_frac, mask_sh)
+ 
+ #define CLK_REG_LIST_DCN401()	  \
++	SR(DENTIST_DISPCLK_CNTL), \
+ 	CLK_SR_DCN401(CLK0_CLK_PLL_REQ,   CLK01, 0), \
+ 	CLK_SR_DCN401(CLK0_CLK0_DFS_CNTL, CLK01, 0), \
+ 	CLK_SR_DCN401(CLK0_CLK1_DFS_CNTL,  CLK01, 0), \
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+index 0150f2581ee4c5..0c5675d1c59368 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+@@ -119,10 +119,14 @@ static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] =
+ 		{ 0x39a6, 0x2568, 0,      0xe0d6,
+ 		  0xeedd, 0x2568, 0xf925, 0x9a8,
+ 		  0,      0x2568, 0x43ee, 0xdbb2 } },
+-	{ COLOR_SPACE_2020_YCBCR,
++	{ COLOR_SPACE_2020_YCBCR_FULL,
+ 		{ 0x2F30, 0x2000, 0,      0xE869,
+ 		  0xEDB7, 0x2000, 0xFABC, 0xBC6,
+ 		  0,      0x2000, 0x3C34, 0xE1E6 } },
++	{ COLOR_SPACE_2020_YCBCR_LIMITED,
++		{ 0x35B9, 0x2543, 0,      0xE2B2,
++		  0xEB2F, 0x2543, 0xFA01, 0x0B1F,
++		  0,      0x2543, 0x4489, 0xDB42 } },
+ 	{ COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+ 		{ 0x35E0, 0x255F, 0,      0xE2B3,
+ 		  0xEB20, 0x255F, 0xF9FD, 0xB1E,
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+index 885e749cdc6e96..842636c7922b40 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -250,21 +250,21 @@ static uint32_t intersect_frl_link_bw_support(
+ {
+ 	uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
+ 
+-	// HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
+-	if (hdmi_encoded_link_bw.bits.FRL_MODE) {
+-		if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+-			supported_bw_in_kbps = 48000000;
+-		else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+-			supported_bw_in_kbps = 40000000;
+-		else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+-			supported_bw_in_kbps = 32000000;
+-		else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+-			supported_bw_in_kbps = 24000000;
+-		else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+-			supported_bw_in_kbps = 18000000;
+-		else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+-			supported_bw_in_kbps = 9000000;
+-	}
++	/* Skip checking FRL_MODE bit, as certain PCON will clear
++	 * it despite supporting the link BW indicated in the other bits.
++	 */
++	if (hdmi_encoded_link_bw.bits.BW_48Gbps)
++		supported_bw_in_kbps = 48000000;
++	else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
++		supported_bw_in_kbps = 40000000;
++	else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
++		supported_bw_in_kbps = 32000000;
++	else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
++		supported_bw_in_kbps = 24000000;
++	else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
++		supported_bw_in_kbps = 18000000;
++	else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
++		supported_bw_in_kbps = 9000000;
+ 
+ 	return supported_bw_in_kbps;
+ }
+@@ -945,6 +945,9 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
+ 		 * TODO: add MST specific link training routine
+ 		 */
+ 		decide_mst_link_settings(link, link_setting);
++	} else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
++		link_setting->lane_count = LANE_COUNT_FOUR;
++		link_setting->link_rate = LINK_RATE_HIGH3;
+ 	} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ 		/* enable edp link optimization for DSC eDP case */
+ 		if (stream->timing.flags.DSC) {
+@@ -967,9 +970,6 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
+ 		} else {
+ 			edp_decide_link_settings(link, link_setting, req_bw);
+ 		}
+-	} else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
+-		link_setting->lane_count = LANE_COUNT_FOUR;
+-		link_setting->link_rate = LINK_RATE_HIGH3;
+ 	} else {
+ 		decide_dp_link_settings(link, link_setting, req_bw);
+ 	}
+@@ -1495,7 +1495,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
+ 
+ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
+ {
+-	uint8_t lttpr_dpcd_data[8] = {0};
++	uint8_t lttpr_dpcd_data[10] = {0};
+ 	enum dc_status status;
+ 	bool is_lttpr_present;
+ 
+@@ -1545,6 +1545,10 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
+ 			lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ 							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ 
++	link->dpcd_caps.lttpr_caps.alpm.raw =
++			lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES -
++							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
++
+ 	/* If this chip cap is set, at least one retimer must exist in the chain
+ 	 * Override count to 1 if we receive a known bad count (0 or an invalid value) */
+ 	if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+index bafa52a0165a08..17c57cf98ec5c2 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+@@ -75,7 +75,8 @@ void dp_disable_link_phy(struct dc_link *link,
+ 	struct dc  *dc = link->ctx->dc;
+ 
+ 	if (!link->wa_flags.dp_keep_receiver_powered &&
+-		!link->skip_implict_edp_power_control)
++			!link->skip_implict_edp_power_control &&
++			link->type != dc_connection_none)
+ 		dpcd_write_rx_power_ctrl(link, false);
+ 
+ 	dc->hwss.disable_link_output(link, link_res, signal);
+@@ -163,8 +164,9 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
+ 	} else {
+ 		if (link->fec_state == dc_link_fec_ready) {
+ 			fec_config = 0;
+-			core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
+-				&fec_config, sizeof(fec_config));
++			if (link->type != dc_connection_none)
++				core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
++					&fec_config, sizeof(fec_config));
+ 
+ 			link_enc->funcs->fec_set_ready(link_enc, false);
+ 			link->fec_state = dc_link_fec_not_ready;
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+index 27b881f947e8b8..9385a32a471b80 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+@@ -1769,13 +1769,10 @@ bool perform_link_training_with_retries(
+ 			is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ 				(cur_link_settings.lane_count <= LANE_COUNT_ONE));
+ 
+-			if (is_link_bw_low) {
++			if (is_link_bw_low)
+ 				DC_LOG_WARNING(
+ 					"%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+ 					__func__, link->link_index, req_bw, link_bw);
+-
+-				return false;
+-			}
+ 		}
+ 
+ 		msleep(delay_between_attempts);
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+index 3bdce32a85e3c7..ae95ec48e57219 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+@@ -36,7 +36,8 @@
+ 	link->ctx->logger
+ 
+ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
+-		const struct dc_link_settings *link_settings)
++		const struct dc_link_settings *link_settings,
++		enum lttpr_mode lttpr_mode)
+ {
+ 	union training_aux_rd_interval training_rd_interval;
+ 	uint32_t wait_in_micro_secs = 100;
+@@ -49,6 +50,8 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
+ 				DP_TRAINING_AUX_RD_INTERVAL,
+ 				(uint8_t *)&training_rd_interval,
+ 				sizeof(training_rd_interval));
++		if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT)
++			wait_in_micro_secs = 400;
+ 		if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ 			wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ 	}
+@@ -110,7 +113,6 @@ void decide_8b_10b_training_settings(
+ 	 */
+ 	lt_settings->link_settings.link_spread = link->dp_ss_off ?
+ 			LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+-	lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
+ 	lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
+ 	lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
+ 	lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
+@@ -119,6 +121,7 @@ void decide_8b_10b_training_settings(
+ 	lt_settings->disallow_per_lane_settings = true;
+ 	lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ 	lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
++	lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
+ 	dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+index 3aa05a2be6c09f..fa642f4b88c2dd 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+@@ -674,6 +674,18 @@ bool edp_setup_psr(struct dc_link *link,
+ 	if (!link)
+ 		return false;
+ 
++	//Clear PSR cfg
++	memset(&psr_configuration, 0, sizeof(psr_configuration));
++	dm_helpers_dp_write_dpcd(
++		link->ctx,
++		link,
++		DP_PSR_EN_CFG,
++		&psr_configuration.raw,
++		sizeof(psr_configuration.raw));
++
++	if (link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
++		return false;
++
+ 	dc = link->ctx->dc;
+ 	dmcu = dc->res_pool->dmcu;
+ 	psr = dc->res_pool->psr;
+@@ -684,9 +696,6 @@ bool edp_setup_psr(struct dc_link *link,
+ 	if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ 		return false;
+ 
+-
+-	memset(&psr_configuration, 0, sizeof(psr_configuration));
+-
+ 	psr_configuration.bits.ENABLE                    = 1;
+ 	psr_configuration.bits.CRC_VERIFICATION          = 1;
+ 	psr_configuration.bits.FRAME_CAPTURE_INDICATION  =
+@@ -950,6 +959,16 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
+ 	if (!link)
+ 		return false;
+ 
++	//Clear Replay config
++	dm_helpers_dp_write_dpcd(link->ctx, link,
++		DP_SINK_PR_ENABLE_AND_CONFIGURATION,
++		(uint8_t *)&(replay_config.raw), sizeof(uint8_t));
++
++	if (!(link->replay_settings.config.replay_supported))
++		return false;
++
++	link->replay_settings.config.replay_error_status.raw = 0;
++
+ 	dc = link->ctx->dc;
+ 
+ 	replay = dc->res_pool->replay;
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+index f2ce687c0e03ca..9cb72805b8d1ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+@@ -1699,7 +1699,7 @@ static int dcn315_populate_dml_pipes_from_context(
+ 		pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ 		DC_FP_START();
+ 		dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
+-		if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
++		if (pixel_rate_crb) {
+ 			int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
+ 			/* Ceil to crb segment size */
+ 			int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
+@@ -1756,28 +1756,26 @@ static int dcn315_populate_dml_pipes_from_context(
+ 				continue;
+ 			}
+ 
+-			if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
+-				bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
+-						|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+-
+-				if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
+-					pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
+-							(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
+-				if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
+-					/* Clamp to 2 pipe split max det segments */
+-					remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
+-					pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
+-				}
+-				if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
+-					/* If we are splitting we must have an even number of segments */
+-					remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
+-					pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
+-				}
+-				/* Convert segments into size for DML use */
+-				pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
+-
+-				crb_idx++;
++			bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
++					|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
++
++			if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
++				pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
++						(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
++			if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
++				/* Clamp to 2 pipe split max det segments */
++				remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
++				pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
++			}
++			if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
++				/* If we are splitting we must have an even number of segments */
++				remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
++				pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ 			}
++			/* Convert segments into size for DML use */
++			pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
++
++			crb_idx++;
+ 			pipe_cnt++;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+index 014e8a296f0c78..54c7d6aecf51c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
++++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+@@ -875,8 +875,8 @@ static bool spl_get_optimal_number_of_taps(
+ 	  bool *enable_isharp)
+ {
+ 	int num_part_y, num_part_c;
+-	int max_taps_y, max_taps_c;
+-	int min_taps_y, min_taps_c;
++	unsigned int max_taps_y, max_taps_c;
++	unsigned int min_taps_y, min_taps_c;
+ 	enum lb_memory_config lb_config;
+ 	bool skip_easf = false;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+index 2a74ff5fdfdbc6..a2c28949ec47fd 100644
+--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
++++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+@@ -479,7 +479,7 @@ struct spl_sharpness_range {
+ };
+ struct adaptive_sharpness {
+ 	bool enable;
+-	int sharpness_level;
++	unsigned int sharpness_level;
+ 	struct spl_sharpness_range sharpness_range;
+ };
+ enum linear_light_scaling	{	// convert it in translation logic
+diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+index 7835100b37c41e..d7433683036824 100644
+--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
++++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+@@ -2869,6 +2869,12 @@ struct dmub_cmd_psr_copy_settings_data {
+ 	 * Some panels request main link off before xth vertical line
+ 	 */
+ 	uint16_t poweroff_before_vertical_line;
++	/**
++	 * Some panels cannot handle idle pattern during PSR entry.
++	 * To power down phy before disable stream to avoid sending
++	 * idle pattern.
++	 */
++	uint8_t power_down_phy_before_disable_stream;
+ };
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+index d9f31b191c693d..1a68b5782cac6c 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+@@ -83,8 +83,8 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
+ void dmub_dcn31_reset(struct dmub_srv *dmub)
+ {
+ 	union dmub_gpint_data_register cmd;
+-	const uint32_t timeout = 100;
+-	uint32_t in_reset, scratch, i, pwait_mode;
++	const uint32_t timeout = 100000;
++	uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
+ 
+ 	REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ 
+@@ -108,7 +108,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
+ 		}
+ 
+ 		for (i = 0; i < timeout; ++i) {
+-			scratch = dmub->hw_funcs.get_gpint_response(dmub);
++			scratch = REG_READ(DMCUB_SCRATCH7);
+ 			if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ 				break;
+ 
+@@ -125,9 +125,14 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
+ 		/* Force reset in case we timed out, DMCUB is likely hung. */
+ 	}
+ 
+-	REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+-	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+-	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
++	REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
++
++	if (is_enabled) {
++		REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
++		REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
++		REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
++	}
++
+ 	REG_WRITE(DMCUB_INBOX1_RPTR, 0);
+ 	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+ 	REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+index 2ccad79053c586..4581eb47945180 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+@@ -88,7 +88,7 @@ static inline void dmub_dcn35_translate_addr(const union dmub_addr *addr_in,
+ void dmub_dcn35_reset(struct dmub_srv *dmub)
+ {
+ 	union dmub_gpint_data_register cmd;
+-	const uint32_t timeout = 100;
++	const uint32_t timeout = 100000;
+ 	uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
+ 
+ 	REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+@@ -113,7 +113,7 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
+ 		}
+ 
+ 		for (i = 0; i < timeout; ++i) {
+-			scratch = dmub->hw_funcs.get_gpint_response(dmub);
++			scratch = REG_READ(DMCUB_SCRATCH7);
+ 			if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ 				break;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+index 39a8cb6d7523c3..e1c4fe1c6e3ee2 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+@@ -63,8 +63,10 @@ static inline void dmub_dcn401_translate_addr(const union dmub_addr *addr_in,
+ void dmub_dcn401_reset(struct dmub_srv *dmub)
+ {
+ 	union dmub_gpint_data_register cmd;
+-	const uint32_t timeout = 30;
+-	uint32_t in_reset, scratch, i;
++	const uint32_t timeout_us = 1 * 1000 * 1000; //1s
++	const uint32_t poll_delay_us = 1; //1us
++	uint32_t i = 0;
++	uint32_t in_reset, scratch, pwait_mode;
+ 
+ 	REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ 
+@@ -75,32 +77,35 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
+ 
+ 		dmub->hw_funcs.set_gpint(dmub, cmd);
+ 
+-		/**
+-		 * Timeout covers both the ACK and the wait
+-		 * for remaining work to finish.
+-		 *
+-		 * This is mostly bound by the PHY disable sequence.
+-		 * Each register check will be greater than 1us, so
+-		 * don't bother using udelay.
+-		 */
+-
+-		for (i = 0; i < timeout; ++i) {
++		for (i = 0; i < timeout_us; i++) {
+ 			if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ 				break;
++
++			udelay(poll_delay_us);
+ 		}
+ 
+-		for (i = 0; i < timeout; ++i) {
++		for (; i < timeout_us; i++) {
+ 			scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ 			if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ 				break;
++
++			udelay(poll_delay_us);
+ 		}
+ 
+-		/* Force reset in case we timed out, DMCUB is likely hung. */
++		for (; i < timeout_us; i++) {
++			REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
++			if (pwait_mode & (1 << 0))
++				break;
++
++			udelay(poll_delay_us);
++		}
++	}
++
++	if (i >= timeout_us) {
++		/* timeout should never occur */
++		BREAK_TO_DEBUGGER();
+ 	}
+ 
+-	REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+-	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+-	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ 	REG_WRITE(DMCUB_INBOX1_RPTR, 0);
+ 	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+ 	REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
+@@ -131,7 +136,10 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
+ 
+ 	dmub_dcn401_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ 
++	/* reset and disable DMCUB and MMHUBBUB DMUIF */
+ 	REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
++	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
++	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ 
+ 	dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
+ 
+@@ -151,6 +159,7 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
+ 			DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
+ 			DMCUB_REGION3_CW1_ENABLE, 1);
+ 
++	/* release DMCUB reset only to prevent premature execution */
+ 	REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
+ 			0x20);
+ }
+@@ -161,7 +170,10 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
+ {
+ 	union dmub_addr offset;
+ 
++	/* reset and disable DMCUB and MMHUBBUB DMUIF */
+ 	REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
++	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
++	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ 
+ 	offset = cw0->offset;
+ 
+@@ -181,6 +193,7 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
+ 			DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
+ 			DMCUB_REGION3_CW1_ENABLE, 1);
+ 
++	/* release DMCUB reset only to prevent premature execution */
+ 	REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
+ 			0x20);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+index 4c8843b796950b..31f95b27e227d6 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+@@ -169,7 +169,8 @@ struct dmub_srv;
+ 	DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN) \
+ 	DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK) \
+ 	DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_STAT) \
+-	DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN)
++	DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN) \
++	DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
+ 
+ struct dmub_srv_dcn401_reg_offset {
+ #define DMUB_SR(reg) uint32_t reg;
+diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+index a344e2e49b0eab..b3d55cac35694b 100644
+--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
++++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+@@ -383,10 +383,10 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ 				colorimetryFormat = ColorimetryYCC_DP_ITU709;
+ 			else if (cs == COLOR_SPACE_ADOBERGB)
+ 				colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
+-			else if (cs == COLOR_SPACE_2020_YCBCR)
++			else if (cs == COLOR_SPACE_2020_YCBCR_LIMITED)
+ 				colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
+ 
+-			if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22)
++			if (cs == COLOR_SPACE_2020_YCBCR_LIMITED && tf == TRANSFER_FUNC_GAMMA_22)
+ 				colorimetryFormat = ColorimetryYCC_DP_ITU709;
+ 			break;
+ 
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+index c488d4a50cf46a..b2252deabc17a4 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+@@ -203,6 +203,10 @@
+ #define mmDAGB0_WR_DATA_CREDIT_BASE_IDX                                                                1
+ #define mmDAGB0_WR_MISC_CREDIT                                                                         0x0058
+ #define mmDAGB0_WR_MISC_CREDIT_BASE_IDX                                                                1
++#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE                                                               0x005b
++#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX                                                      1
++#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE                                                         0x005c
++#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX                                                1
+ #define mmDAGB0_WRCLI_ASK_PENDING                                                                      0x005d
+ #define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX                                                             1
+ #define mmDAGB0_WRCLI_GO_PENDING                                                                       0x005e
+@@ -455,6 +459,10 @@
+ #define mmDAGB1_WR_DATA_CREDIT_BASE_IDX                                                                1
+ #define mmDAGB1_WR_MISC_CREDIT                                                                         0x00d8
+ #define mmDAGB1_WR_MISC_CREDIT_BASE_IDX                                                                1
++#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE                                                               0x00db
++#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX                                                      1
++#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE                                                         0x00dc
++#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX                                                1
+ #define mmDAGB1_WRCLI_ASK_PENDING                                                                      0x00dd
+ #define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX                                                             1
+ #define mmDAGB1_WRCLI_GO_PENDING                                                                       0x00de
+@@ -707,6 +715,10 @@
+ #define mmDAGB2_WR_DATA_CREDIT_BASE_IDX                                                                1
+ #define mmDAGB2_WR_MISC_CREDIT                                                                         0x0158
+ #define mmDAGB2_WR_MISC_CREDIT_BASE_IDX                                                                1
++#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE                                                               0x015b
++#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX                                                      1
++#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE                                                         0x015c
++#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX                                                1
+ #define mmDAGB2_WRCLI_ASK_PENDING                                                                      0x015d
+ #define mmDAGB2_WRCLI_ASK_PENDING_BASE_IDX                                                             1
+ #define mmDAGB2_WRCLI_GO_PENDING                                                                       0x015e
+@@ -959,6 +971,10 @@
+ #define mmDAGB3_WR_DATA_CREDIT_BASE_IDX                                                                1
+ #define mmDAGB3_WR_MISC_CREDIT                                                                         0x01d8
+ #define mmDAGB3_WR_MISC_CREDIT_BASE_IDX                                                                1
++#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE                                                               0x01db
++#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX                                                      1
++#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE                                                         0x01dc
++#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX                                                1
+ #define mmDAGB3_WRCLI_ASK_PENDING                                                                      0x01dd
+ #define mmDAGB3_WRCLI_ASK_PENDING_BASE_IDX                                                             1
+ #define mmDAGB3_WRCLI_GO_PENDING                                                                       0x01de
+@@ -1211,6 +1227,10 @@
+ #define mmDAGB4_WR_DATA_CREDIT_BASE_IDX                                                                1
+ #define mmDAGB4_WR_MISC_CREDIT                                                                         0x0258
+ #define mmDAGB4_WR_MISC_CREDIT_BASE_IDX                                                                1
++#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE                                                               0x025b
++#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX                                                      1
++#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE                                                         0x025c
++#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX                                                1
+ #define mmDAGB4_WRCLI_ASK_PENDING                                                                      0x025d
+ #define mmDAGB4_WRCLI_ASK_PENDING_BASE_IDX                                                             1
+ #define mmDAGB4_WRCLI_GO_PENDING                                                                       0x025e
+@@ -4793,6 +4813,10 @@
+ #define mmDAGB5_WR_DATA_CREDIT_BASE_IDX                                                                1
+ #define mmDAGB5_WR_MISC_CREDIT                                                                         0x3058
+ #define mmDAGB5_WR_MISC_CREDIT_BASE_IDX                                                                1
++#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE                                                               0x305b
++#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX                                                      1
++#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE                                                         0x305c
++#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX                                                1
+ #define mmDAGB5_WRCLI_ASK_PENDING                                                                      0x305d
+ #define mmDAGB5_WRCLI_ASK_PENDING_BASE_IDX                                                             1
+ #define mmDAGB5_WRCLI_GO_PENDING                                                                       0x305e
+@@ -5045,6 +5069,10 @@
+ #define mmDAGB6_WR_DATA_CREDIT_BASE_IDX                                                                1
+ #define mmDAGB6_WR_MISC_CREDIT                                                                         0x30d8
+ #define mmDAGB6_WR_MISC_CREDIT_BASE_IDX                                                                1
++#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE                                                               0x30db
++#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX                                                      1
++#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE                                                         0x30dc
++#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX                                                1
+ #define mmDAGB6_WRCLI_ASK_PENDING                                                                      0x30dd
+ #define mmDAGB6_WRCLI_ASK_PENDING_BASE_IDX                                                             1
+ #define mmDAGB6_WRCLI_GO_PENDING                                                                       0x30de
+@@ -5297,6 +5325,10 @@
+ #define mmDAGB7_WR_DATA_CREDIT_BASE_IDX                                                                1
+ #define mmDAGB7_WR_MISC_CREDIT                                                                         0x3158
+ #define mmDAGB7_WR_MISC_CREDIT_BASE_IDX                                                                1
++#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE                                                               0x315b
++#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX                                                      1
++#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE                                                         0x315c
++#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX                                                1
+ #define mmDAGB7_WRCLI_ASK_PENDING                                                                      0x315d
+ #define mmDAGB7_WRCLI_ASK_PENDING_BASE_IDX                                                             1
+ #define mmDAGB7_WRCLI_GO_PENDING                                                                       0x315e
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+index 2969fbf282b7d0..5069d2fd467f2b 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+@@ -1532,6 +1532,12 @@
+ //DAGB0_WRCLI_DBUS_GO_PENDING
+ #define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT                                                              0x0
+ #define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK                                                                0xFFFFFFFFL
++//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE
++#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT                                                         0x0
++#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK                                                           0xFFFFFFFFL
++//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
++#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT                                                   0x0
++#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK                                                     0xFFFFFFFFL
+ //DAGB0_DAGB_DLY
+ #define DAGB0_DAGB_DLY__DLY__SHIFT                                                                            0x0
+ #define DAGB0_DAGB_DLY__CLI__SHIFT                                                                            0x8
+@@ -3207,6 +3213,12 @@
+ //DAGB1_WRCLI_DBUS_GO_PENDING
+ #define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT                                                              0x0
+ #define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK                                                                0xFFFFFFFFL
++//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE
++#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT                                                         0x0
++#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK                                                           0xFFFFFFFFL
++//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
++#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT                                                   0x0
++#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK                                                     0xFFFFFFFFL
+ //DAGB1_DAGB_DLY
+ #define DAGB1_DAGB_DLY__DLY__SHIFT                                                                            0x0
+ #define DAGB1_DAGB_DLY__CLI__SHIFT                                                                            0x8
+@@ -4882,6 +4894,12 @@
+ //DAGB2_WRCLI_DBUS_GO_PENDING
+ #define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT                                                              0x0
+ #define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK                                                                0xFFFFFFFFL
++//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE
++#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT                                                         0x0
++#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK                                                           0xFFFFFFFFL
++//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
++#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT                                                   0x0
++#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK                                                     0xFFFFFFFFL
+ //DAGB2_DAGB_DLY
+ #define DAGB2_DAGB_DLY__DLY__SHIFT                                                                            0x0
+ #define DAGB2_DAGB_DLY__CLI__SHIFT                                                                            0x8
+@@ -6557,6 +6575,12 @@
+ //DAGB3_WRCLI_DBUS_GO_PENDING
+ #define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT                                                              0x0
+ #define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK                                                                0xFFFFFFFFL
++//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE
++#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT                                                         0x0
++#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK                                                           0xFFFFFFFFL
++//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
++#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT                                                   0x0
++#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK                                                     0xFFFFFFFFL
+ //DAGB3_DAGB_DLY
+ #define DAGB3_DAGB_DLY__DLY__SHIFT                                                                            0x0
+ #define DAGB3_DAGB_DLY__CLI__SHIFT                                                                            0x8
+@@ -8232,6 +8256,12 @@
+ //DAGB4_WRCLI_DBUS_GO_PENDING
+ #define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT                                                              0x0
+ #define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK                                                                0xFFFFFFFFL
++//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE
++#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT                                                         0x0
++#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK                                                           0xFFFFFFFFL
++//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
++#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT                                                   0x0
++#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK                                                     0xFFFFFFFFL
+ //DAGB4_DAGB_DLY
+ #define DAGB4_DAGB_DLY__DLY__SHIFT                                                                            0x0
+ #define DAGB4_DAGB_DLY__CLI__SHIFT                                                                            0x8
+@@ -28737,6 +28767,12 @@
+ //DAGB5_WRCLI_DBUS_GO_PENDING
+ #define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT                                                              0x0
+ #define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY_MASK                                                                0xFFFFFFFFL
++//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE
++#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT                                                         0x0
++#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK                                                           0xFFFFFFFFL
++//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
++#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT                                                   0x0
++#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK                                                     0xFFFFFFFFL
+ //DAGB5_DAGB_DLY
+ #define DAGB5_DAGB_DLY__DLY__SHIFT                                                                            0x0
+ #define DAGB5_DAGB_DLY__CLI__SHIFT                                                                            0x8
+@@ -30412,6 +30448,12 @@
+ //DAGB6_WRCLI_DBUS_GO_PENDING
+ #define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT                                                              0x0
+ #define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY_MASK                                                                0xFFFFFFFFL
++//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE
++#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT                                                         0x0
++#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK                                                           0xFFFFFFFFL
++//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
++#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT                                                   0x0
++#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK                                                     0xFFFFFFFFL
+ //DAGB6_DAGB_DLY
+ #define DAGB6_DAGB_DLY__DLY__SHIFT                                                                            0x0
+ #define DAGB6_DAGB_DLY__CLI__SHIFT                                                                            0x8
+@@ -32087,6 +32129,12 @@
+ //DAGB7_WRCLI_DBUS_GO_PENDING
+ #define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT                                                              0x0
+ #define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY_MASK                                                                0xFFFFFFFFL
++//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE
++#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT                                                         0x0
++#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK                                                           0xFFFFFFFFL
++//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
++#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT                                                   0x0
++#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK                                                     0xFFFFFFFFL
+ //DAGB7_DAGB_DLY
+ #define DAGB7_DAGB_DLY__DLY__SHIFT                                                                            0x0
+ #define DAGB7_DAGB_DLY__CLI__SHIFT                                                                            0x8
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 99d2d3092ea540..3fd8da5dc761ef 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -2772,6 +2772,7 @@ int smu_get_power_limit(void *handle,
+ 			switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ 			case IP_VERSION(13, 0, 2):
+ 			case IP_VERSION(13, 0, 6):
++			case IP_VERSION(13, 0, 12):
+ 			case IP_VERSION(13, 0, 14):
+ 			case IP_VERSION(11, 0, 7):
+ 			case IP_VERSION(11, 0, 11):
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+index 55ed6247eb61f7..9ac694c4f1f7a7 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+@@ -275,8 +275,9 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
+ 	int var = (adev->pdev->device & 0xF);
+ 	char ucode_prefix[15];
+ 
+-	/* No need to load P2S tables in IOV mode */
+-	if (amdgpu_sriov_vf(adev))
++	/* No need to load P2S tables in IOV mode or for smu v13.0.12 */
++	if (amdgpu_sriov_vf(adev) ||
++	    (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)))
+ 		return 0;
+ 
+ 	if (!(adev->flags & AMD_IS_APU)) {
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index ed496fb32bf349..24ed1cd3caf17a 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -131,7 +131,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+ 		return false;
+ 	}
+ 
+-	switch (mode->crtc_hdisplay) {
++	switch (mode->hdisplay) {
+ 	case 640:
+ 		vbios_mode->enh_table = &res_640x480[refresh_rate_index];
+ 		break;
+@@ -145,7 +145,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+ 		vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
+ 		break;
+ 	case 1280:
+-		if (mode->crtc_vdisplay == 800)
++		if (mode->vdisplay == 800)
+ 			vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
+ 		else
+ 			vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
+@@ -157,7 +157,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+ 		vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
+ 		break;
+ 	case 1600:
+-		if (mode->crtc_vdisplay == 900)
++		if (mode->vdisplay == 900)
+ 			vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
+ 		else
+ 			vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+@@ -166,7 +166,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+ 		vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
+ 		break;
+ 	case 1920:
+-		if (mode->crtc_vdisplay == 1080)
++		if (mode->vdisplay == 1080)
+ 			vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
+ 		else
+ 			vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
+@@ -210,6 +210,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+ 	hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
+ 	vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+ 
++	adjusted_mode->crtc_hdisplay = vbios_mode->enh_table->hde;
+ 	adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
+ 	adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
+ 	adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
+@@ -219,6 +220,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+ 					 vbios_mode->enh_table->hfp +
+ 					 vbios_mode->enh_table->hsync);
+ 
++	adjusted_mode->crtc_vdisplay = vbios_mode->enh_table->vde;
+ 	adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
+ 	adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
+ 	adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+index 8f786592143b6c..24e1e11acf6978 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+@@ -244,7 +244,9 @@ static const struct hdmi_codec_pdata codec_data = {
+ 	.ops = &adv7511_codec_ops,
+ 	.max_i2s_channels = 2,
+ 	.i2s = 1,
++	.no_i2s_capture = 1,
+ 	.spdif = 1,
++	.no_spdif_capture = 1,
+ };
+ 
+ int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 32902f77f00dd8..40e4e1b6c91106 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -574,6 +574,30 @@ mode_valid(struct drm_atomic_state *state)
+ 	return 0;
+ }
+ 
++static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
++					 struct drm_crtc *crtc)
++{
++	struct drm_encoder *drm_enc;
++	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
++									  crtc);
++
++	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
++		if (!drm_enc->possible_clones) {
++			DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
++			continue;
++		}
++
++		if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
++		    crtc_state->encoder_mask) {
++			DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
++				  crtc->base.id, crtc_state->encoder_mask);
++			return -EINVAL;
++		}
++	}
++
++	return 0;
++}
++
+ /**
+  * drm_atomic_helper_check_modeset - validate state object for modeset changes
+  * @dev: DRM device
+@@ -745,6 +769,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
+ 		ret = drm_atomic_add_affected_planes(state, crtc);
+ 		if (ret != 0)
+ 			return ret;
++
++		ret = drm_atomic_check_valid_clones(state, crtc);
++		if (ret != 0)
++			return ret;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
+index 103c185bb1c8a7..ca42e6081d27c4 100644
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -324,7 +324,7 @@ EXPORT_SYMBOL(drm_buddy_init);
+  */
+ void drm_buddy_fini(struct drm_buddy *mm)
+ {
+-	u64 root_size, size;
++	u64 root_size, size, start;
+ 	unsigned int order;
+ 	int i;
+ 
+@@ -332,7 +332,8 @@ void drm_buddy_fini(struct drm_buddy *mm)
+ 
+ 	for (i = 0; i < mm->n_roots; ++i) {
+ 		order = ilog2(size) - ilog2(mm->chunk_size);
+-		__force_merge(mm, 0, size, order);
++		start = drm_buddy_block_offset(mm->roots[i]);
++		__force_merge(mm, start, start + size, order);
+ 
+ 		WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
+ 		drm_block_free(mm, mm->roots[i]);
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 13bc4c290b17d5..9edb3247c767b8 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -6596,6 +6596,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
+ 	info->has_hdmi_infoframe = false;
+ 	info->rgb_quant_range_selectable = false;
+ 	memset(&info->hdmi, 0, sizeof(info->hdmi));
++	memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
+ 
+ 	info->edid_hdmi_rgb444_dc_modes = 0;
+ 	info->edid_hdmi_ycbcr444_dc_modes = 0;
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 149b8e25da5bbf..426d0867882dfb 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -322,7 +322,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ 		return -ENOENT;
+ 
+ 	/* Don't allow imported objects to be mapped */
+-	if (obj->import_attach) {
++	if (drm_gem_is_imported(obj)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -1152,7 +1152,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
+ 			  drm_vma_node_start(&obj->vma_node));
+ 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
+ 	drm_printf_indent(p, indent, "imported=%s\n",
+-			  str_yes_no(obj->import_attach));
++			  str_yes_no(drm_gem_is_imported(obj)));
+ 
+ 	if (obj->funcs->print_info)
+ 		obj->funcs->print_info(p, indent, obj);
+diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
+index 9c11d3158324c1..20a50180d4d495 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
+@@ -410,12 +410,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
+ 
+ static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
+ {
+-	mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
++	if (dpi->conf->reg_h_fre_con)
++		mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
+ }
+ 
+ static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
+ {
+-	if (dpi->conf->edge_sel_en)
++	if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con)
+ 		mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+index 9c83bab0a53091..fc84ca214f247d 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+@@ -58,7 +58,7 @@
+ #include <linux/parser.h>
+ 
+ #define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+-#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
++#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
+ 
+ struct r535_gsp_msg {
+ 	u8 auth_tag_buffer[16];
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index 767e47a2b0c149..663af985d1b389 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -1983,6 +1983,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ 	EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"),
+ 	EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+ 
++	EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"),
+ 	EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
+ 
+ 	{ /* sentinal */ }
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 5880d87fe6b3aa..5d7df4c3b08c47 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -157,6 +157,7 @@ struct vop2_video_port {
+ 	struct drm_crtc crtc;
+ 	struct vop2 *vop2;
+ 	struct clk *dclk;
++	struct clk *dclk_src;
+ 	unsigned int id;
+ 	const struct vop2_video_port_data *data;
+ 
+@@ -211,6 +212,7 @@ struct vop2 {
+ 	struct clk *hclk;
+ 	struct clk *aclk;
+ 	struct clk *pclk;
++	struct clk *pll_hdmiphy0;
+ 
+ 	/* optional internal rgb encoder */
+ 	struct rockchip_rgb *rgb;
+@@ -219,6 +221,8 @@ struct vop2 {
+ 	struct vop2_win win[];
+ };
+ 
++#define VOP2_MAX_DCLK_RATE		600000000
++
+ #define vop2_output_if_is_hdmi(x)	((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
+ 					 (x) == ROCKCHIP_VOP2_EP_HDMI1)
+ 
+@@ -1051,6 +1055,9 @@ static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
+ 
+ 	vop2_crtc_disable_irq(vp, VP_INT_DSP_HOLD_VALID);
+ 
++	if (vp->dclk_src)
++		clk_set_parent(vp->dclk, vp->dclk_src);
++
+ 	clk_disable_unprepare(vp->dclk);
+ 
+ 	vop2->enable_count--;
+@@ -1432,10 +1439,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
+ 
+ 	rb_swap = vop2_win_rb_swap(fb->format->format);
+ 	vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap);
+-	if (!vop2_cluster_window(win)) {
+-		uv_swap = vop2_win_uv_swap(fb->format->format);
+-		vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
+-	}
++	uv_swap = vop2_win_uv_swap(fb->format->format);
++	vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
+ 
+ 	if (fb->format->is_yuv) {
+ 		vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4));
+@@ -2073,6 +2078,27 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
+ 
+ 	vop2_vp_write(vp, RK3568_VP_MIPI_CTRL, 0);
+ 
++	/*
++	 * Switch to HDMI PHY PLL as DCLK source for display modes up
++	 * to 4K@60Hz, if available, otherwise keep using the system CRU.
++	 */
++	if (vop2->pll_hdmiphy0 && clock <= VOP2_MAX_DCLK_RATE) {
++		drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
++			struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
++
++			if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
++				if (!vp->dclk_src)
++					vp->dclk_src = clk_get_parent(vp->dclk);
++
++				ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0);
++				if (ret < 0)
++					drm_warn(vop2->drm,
++						 "Could not switch to HDMI0 PHY PLL: %d\n", ret);
++				break;
++			}
++		}
++	}
++
+ 	clk_set_rate(vp->dclk, clock);
+ 
+ 	vop2_post_config(crtc);
+@@ -3244,6 +3270,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
+ 		return PTR_ERR(vop2->pclk);
+ 	}
+ 
++	vop2->pll_hdmiphy0 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy0");
++	if (IS_ERR(vop2->pll_hdmiphy0)) {
++		drm_err(vop2->drm, "failed to get pll_hdmiphy0\n");
++		return PTR_ERR(vop2->pll_hdmiphy0);
++	}
++
+ 	vop2->irq = platform_get_irq(pdev, 0);
+ 	if (vop2->irq < 0) {
+ 		drm_err(vop2->drm, "cannot find irq for vop2\n");
+diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
+index d7ff1f5fa481f7..7c17108da7d2da 100644
+--- a/drivers/gpu/drm/v3d/v3d_drv.c
++++ b/drivers/gpu/drm/v3d/v3d_drv.c
+@@ -286,11 +286,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
++	v3d->clk = devm_clk_get_optional(dev, NULL);
++	if (IS_ERR(v3d->clk))
++		return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
++
++	ret = clk_prepare_enable(v3d->clk);
++	if (ret) {
++		dev_err(&pdev->dev, "Couldn't enable the V3D clock\n");
++		return ret;
++	}
++
+ 	mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
+ 	mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
+ 	ret = dma_set_mask_and_coherent(dev, mask);
+ 	if (ret)
+-		return ret;
++		goto clk_disable;
+ 
+ 	v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
+ 
+@@ -310,28 +320,29 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
+ 		ret = PTR_ERR(v3d->reset);
+ 
+ 		if (ret == -EPROBE_DEFER)
+-			return ret;
++			goto clk_disable;
+ 
+ 		v3d->reset = NULL;
+ 		ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
+ 		if (ret) {
+ 			dev_err(dev,
+ 				"Failed to get reset control or bridge regs\n");
+-			return ret;
++			goto clk_disable;
+ 		}
+ 	}
+ 
+ 	if (v3d->ver < 41) {
+ 		ret = map_regs(v3d, &v3d->gca_regs, "gca");
+ 		if (ret)
+-			return ret;
++			goto clk_disable;
+ 	}
+ 
+ 	v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
+ 					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ 	if (!v3d->mmu_scratch) {
+ 		dev_err(dev, "Failed to allocate MMU scratch page\n");
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto clk_disable;
+ 	}
+ 
+ 	ret = v3d_gem_init(drm);
+@@ -360,6 +371,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
+ 	v3d_gem_destroy(drm);
+ dma_free:
+ 	dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
++clk_disable:
++	clk_disable_unprepare(v3d->clk);
+ 	return ret;
+ }
+ 
+@@ -377,6 +390,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
+ 
+ 	dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
+ 		    v3d->mmu_scratch_paddr);
++
++	clk_disable_unprepare(v3d->clk);
+ }
+ 
+ static struct platform_driver v3d_platform_driver = {
+diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
+index 84e327b569252f..8acc4640f0a285 100644
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -702,6 +702,21 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
+ 		goto out;
+ 	}
+ 
++	/* Reject BO eviction if BO is bound to current VM. */
++	if (evict && ctx->resv) {
++		struct drm_gpuvm_bo *vm_bo;
++
++		drm_gem_for_each_gpuvm_bo(vm_bo, &bo->ttm.base) {
++			struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
++
++			if (xe_vm_resv(vm) == ctx->resv &&
++			    xe_vm_in_preempt_fence_mode(vm)) {
++				ret = -EBUSY;
++				goto out;
++			}
++		}
++	}
++
+ 	/*
+ 	 * Failed multi-hop where the old_mem is still marked as
+ 	 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
+@@ -1975,6 +1990,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
+ 	struct xe_file *xef = to_xe_file(file);
+ 	struct drm_xe_gem_create *args = data;
+ 	struct xe_vm *vm = NULL;
++	ktime_t end = 0;
+ 	struct xe_bo *bo;
+ 	unsigned int bo_flags;
+ 	u32 handle;
+@@ -2047,6 +2063,10 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
+ 		vm = xe_vm_lookup(xef, args->vm_id);
+ 		if (XE_IOCTL_DBG(xe, !vm))
+ 			return -ENOENT;
++	}
++
++retry:
++	if (vm) {
+ 		err = xe_vm_lock(vm, true);
+ 		if (err)
+ 			goto out_vm;
+@@ -2060,6 +2080,8 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
+ 
+ 	if (IS_ERR(bo)) {
+ 		err = PTR_ERR(bo);
++		if (xe_vm_validate_should_retry(NULL, err, &end))
++			goto retry;
+ 		goto out_vm;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
+index fe4319eb13fdfb..278ce1c37b395a 100644
+--- a/drivers/gpu/drm/xe/xe_debugfs.c
++++ b/drivers/gpu/drm/xe/xe_debugfs.c
+@@ -147,7 +147,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
+ 		return -EINVAL;
+ 
+ 	if (xe->wedged.mode == wedged_mode)
+-		return 0;
++		return size;
+ 
+ 	xe->wedged.mode = wedged_mode;
+ 
+@@ -156,6 +156,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
+ 		ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads);
+ 		if (ret) {
+ 			xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n");
++			xe_pm_runtime_put(xe);
+ 			return -EIO;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index bb85208cf1a94c..23e02372a49dba 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -694,7 +694,9 @@ int xe_device_probe(struct xe_device *xe)
+ 	}
+ 
+ 	/* Allocate and map stolen after potential VRAM resize */
+-	xe_ttm_stolen_mgr_init(xe);
++	err = xe_ttm_stolen_mgr_init(xe);
++	if (err)
++		return err;
+ 
+ 	/*
+ 	 * Now that GT is initialized (TTM in particular),
+@@ -706,6 +708,12 @@ int xe_device_probe(struct xe_device *xe)
+ 	if (err)
+ 		goto err;
+ 
++	for_each_tile(tile, xe, id) {
++		err = xe_tile_init(tile);
++		if (err)
++			goto err;
++	}
++
+ 	for_each_gt(gt, xe, id) {
+ 		last_gt = id;
+ 
+diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+index 904cf47925aa1d..ed9183599e31cc 100644
+--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
++++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+@@ -28,10 +28,10 @@
+ 	"\n" \
+ 	"#endif\n"
+ 
+-static void print_usage(FILE *f)
++static void print_usage(FILE *f, const char *progname)
+ {
+ 	fprintf(f, "usage: %s <input-rule-file> <generated-c-source-file> <generated-c-header-file>\n",
+-		program_invocation_short_name);
++		progname);
+ }
+ 
+ static void print_parse_error(const char *err_msg, const char *line,
+@@ -144,7 +144,7 @@ int main(int argc, const char *argv[])
+ 
+ 	if (argc < 3) {
+ 		fprintf(stderr, "ERROR: wrong arguments\n");
+-		print_usage(stderr);
++		print_usage(stderr, argv[0]);
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+index c9ed996b9cb0c3..786f0dba41437d 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+@@ -323,6 +323,26 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
+ 	return err;
+ }
+ 
++static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
++{
++	int err = 0;
++
++	xe_gt_assert(gt, vfid);
++	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
++
++	if (reset)
++		err = pf_send_vf_cfg_reset(gt, vfid);
++	if (!err)
++		err = pf_push_full_vf_config(gt, vfid);
++
++	return err;
++}
++
++static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
++{
++	return pf_push_vf_cfg(gt, vfid, true);
++}
++
+ static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
+ {
+ 	struct xe_device *xe = gt_to_xe(gt);
+@@ -419,6 +439,10 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
+ 			return err;
+ 
+ 		pf_release_vf_config_ggtt(gt, config);
++
++		err = pf_refresh_vf_cfg(gt, vfid);
++		if (unlikely(err))
++			return err;
+ 	}
+ 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
+ 
+@@ -744,6 +768,10 @@ static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctx
+ 			return ret;
+ 
+ 		pf_release_config_ctxs(gt, config);
++
++		ret = pf_refresh_vf_cfg(gt, vfid);
++		if (unlikely(ret))
++			return ret;
+ 	}
+ 
+ 	if (!num_ctxs)
+@@ -1041,6 +1069,10 @@ static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
+ 			return ret;
+ 
+ 		pf_release_config_dbs(gt, config);
++
++		ret = pf_refresh_vf_cfg(gt, vfid);
++		if (unlikely(ret))
++			return ret;
+ 	}
+ 
+ 	if (!num_dbs)
+@@ -2003,10 +2035,7 @@ int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh
+ 	xe_gt_assert(gt, vfid);
+ 
+ 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+-	if (refresh)
+-		err = pf_send_vf_cfg_reset(gt, vfid);
+-	if (!err)
+-		err = pf_push_full_vf_config(gt, vfid);
++	err = pf_push_vf_cfg(gt, vfid, refresh);
+ 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ 
+ 	if (unlikely(err)) {
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+index f982d6f9f218d8..29badbd829ab60 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+@@ -46,12 +46,19 @@ static int guc_action_vf_reset(struct xe_guc *guc)
+ 	return ret > 0 ? -EPROTO : ret;
+ }
+ 
++#define GUC_RESET_VF_STATE_RETRY_MAX	10
+ static int vf_reset_guc_state(struct xe_gt *gt)
+ {
++	unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX;
+ 	struct xe_guc *guc = &gt->uc.guc;
+ 	int err;
+ 
+-	err = guc_action_vf_reset(guc);
++	do {
++		err = guc_action_vf_reset(guc);
++		if (!err || err != -ETIMEDOUT)
++			break;
++	} while (--retry);
++
+ 	if (unlikely(err))
+ 		xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
+ 	return err;
+@@ -228,6 +235,9 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
+ {
+ 	int err;
+ 
++	if (!xe_device_uc_enabled(gt_to_xe(gt)))
++		return -ENODEV;
++
+ 	err = vf_reset_guc_state(gt);
+ 	if (unlikely(err))
+ 		return err;
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+index 98a450271f5cee..3155825fa46ad3 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+@@ -406,6 +406,28 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
+ 	return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
+ }
+ 
++/**
++ * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
++ * @gt: graphics tile
++ * @vm: VM to invalidate
++ *
++ * Invalidate entire VM's address space
++ */
++void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
++{
++	struct xe_gt_tlb_invalidation_fence fence;
++	u64 range = 1ull << vm->xe->info.va_bits;
++	int ret;
++
++	xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
++
++	ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
++	if (ret < 0)
++		return;
++
++	xe_gt_tlb_invalidation_fence_wait(&fence);
++}
++
+ /**
+  * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
+  * @gt: graphics tile
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+index 672acfcdf0d70d..abe9b03d543e6e 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+@@ -12,6 +12,7 @@
+ 
+ struct xe_gt;
+ struct xe_guc;
++struct xe_vm;
+ struct xe_vma;
+ 
+ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
+@@ -21,6 +22,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
+ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
+ 			       struct xe_gt_tlb_invalidation_fence *fence,
+ 			       struct xe_vma *vma);
++void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
+ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
+ 				 struct xe_gt_tlb_invalidation_fence *fence,
+ 				 u64 start, u64 end, u32 asid);
+diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
+index ade6162dc25987..0e5b43e1518ea5 100644
+--- a/drivers/gpu/drm/xe/xe_guc_relay.c
++++ b/drivers/gpu/drm/xe/xe_guc_relay.c
+@@ -224,7 +224,7 @@ __relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u
+ 	 * with CTB lock held which is marked as used in the reclaim path.
+ 	 * Btw, that's one of the reason why we use mempool here!
+ 	 */
+-	txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL);
++	txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_NOWAIT);
+ 	if (!txn)
+ 		return ERR_PTR(-ENOMEM);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 448766033690c7..d306ed0a04434d 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -535,6 +535,7 @@ static ssize_t xe_oa_read(struct file *file, char __user *buf,
+ 			mutex_unlock(&stream->stream_lock);
+ 		} while (!offset && !ret);
+ 	} else {
++		xe_oa_buffer_check_unlocked(stream);
+ 		mutex_lock(&stream->stream_lock);
+ 		ret = __xe_oa_read(stream, buf, count, &offset);
+ 		mutex_unlock(&stream->stream_lock);
+diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
+index aaceee748287ef..09ee8a06fe2ed3 100644
+--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
++++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
+@@ -62,6 +62,55 @@ static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
+ 			xe_gt_sriov_pf_control_trigger_flr(gt, n);
+ }
+ 
++static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
++{
++	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
++
++	xe_assert(xe, IS_SRIOV_PF(xe));
++
++	/* caller must use pci_dev_put() */
++	return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
++			pdev->bus->number,
++			pci_iov_virtfn_devfn(pdev, vf_id));
++}
++
++static void pf_link_vfs(struct xe_device *xe, int num_vfs)
++{
++	struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev);
++	struct device_link *link;
++	struct pci_dev *pdev_vf;
++	unsigned int n;
++
++	/*
++	 * When both PF and VF devices are enabled on the host, during system
++	 * resume they are resuming in parallel.
++	 *
++	 * But PF has to complete the provision of VF first to allow any VFs to
++	 * successfully resume.
++	 *
++	 * Create a parent-child device link between PF and VF devices that will
++	 * enforce correct resume order.
++	 */
++	for (n = 1; n <= num_vfs; n++) {
++		pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1);
++
++		/* unlikely, something weird is happening, abort */
++		if (!pdev_vf) {
++			xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n",
++				     n, str_plural(num_vfs));
++			break;
++		}
++
++		link = device_link_add(&pdev_vf->dev, &pdev_pf->dev,
++				       DL_FLAG_AUTOREMOVE_CONSUMER);
++		/* unlikely and harmless, continue with other VFs */
++		if (!link)
++			xe_sriov_notice(xe, "Failed linking VF%u\n", n);
++
++		pci_dev_put(pdev_vf);
++	}
++}
++
+ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+@@ -92,6 +141,8 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
+ 	if (err < 0)
+ 		goto failed;
+ 
++	pf_link_vfs(xe, num_vfs);
++
+ 	xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
+ 		      num_vfs, total_vfs, str_plural(total_vfs));
+ 	return num_vfs;
+diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
+index 230cf47fb9c5ee..fb94ff55c73616 100644
+--- a/drivers/gpu/drm/xe/xe_pt.c
++++ b/drivers/gpu/drm/xe/xe_pt.c
+@@ -217,6 +217,20 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
+ 	xe_pt_free(pt);
+ }
+ 
++/**
++ * xe_pt_clear() - Clear a page-table.
++ * @xe: xe device.
++ * @pt: The page-table.
++ *
++ * Clears page-table by setting to zero.
++ */
++void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
++{
++	struct iosys_map *map = &pt->bo->vmap;
++
++	xe_map_memset(xe, map, 0, 0, SZ_4K);
++}
++
+ /**
+  * DOC: Pagetable building
+  *
+diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
+index 9ab386431caddb..8e43912ae8e94c 100644
+--- a/drivers/gpu/drm/xe/xe_pt.h
++++ b/drivers/gpu/drm/xe/xe_pt.h
+@@ -13,6 +13,7 @@ struct dma_fence;
+ struct xe_bo;
+ struct xe_device;
+ struct xe_exec_queue;
++struct xe_svm_range;
+ struct xe_sync_entry;
+ struct xe_tile;
+ struct xe_vm;
+@@ -35,6 +36,8 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
+ 
+ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
+ 
++void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt);
++
+ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
+ struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
+ 				       struct xe_vma_ops *vops);
+diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
+index fe2cb2a96f7885..1d425475900677 100644
+--- a/drivers/gpu/drm/xe/xe_sa.c
++++ b/drivers/gpu/drm/xe/xe_sa.c
+@@ -57,8 +57,6 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
+ 	}
+ 	sa_manager->bo = bo;
+ 	sa_manager->is_iomem = bo->vmap.is_iomem;
+-
+-	drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
+ 	sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
+ 
+ 	if (bo->vmap.is_iomem) {
+@@ -72,6 +70,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
+ 		memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
+ 	}
+ 
++	drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
+ 	ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
+ 				       sa_manager);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
+index dda5268507d8e1..36c87d7c72fbcb 100644
+--- a/drivers/gpu/drm/xe/xe_tile.c
++++ b/drivers/gpu/drm/xe/xe_tile.c
+@@ -167,17 +167,19 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
+ 	if (err)
+ 		return err;
+ 
++	xe_wa_apply_tile_workarounds(tile);
++
++	return xe_tile_sysfs_init(tile);
++}
++
++int xe_tile_init(struct xe_tile *tile)
++{
+ 	tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
+ 	if (IS_ERR(tile->mem.kernel_bb_pool))
+ 		return PTR_ERR(tile->mem.kernel_bb_pool);
+ 
+-	xe_wa_apply_tile_workarounds(tile);
+-
+-	err = xe_tile_sysfs_init(tile);
+-
+ 	return 0;
+ }
+-
+ void xe_tile_migrate_wait(struct xe_tile *tile)
+ {
+ 	xe_migrate_wait(tile->migrate);
+diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
+index 1c9e42ade6b05d..eb939316d55b05 100644
+--- a/drivers/gpu/drm/xe/xe_tile.h
++++ b/drivers/gpu/drm/xe/xe_tile.h
+@@ -12,6 +12,7 @@ struct xe_tile;
+ 
+ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
+ int xe_tile_init_noalloc(struct xe_tile *tile);
++int xe_tile_init(struct xe_tile *tile);
+ 
+ void xe_tile_migrate_wait(struct xe_tile *tile);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+index f7113cf6109d59..ef84fa757b26f1 100644
+--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
++++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+@@ -201,17 +201,16 @@ static u64 detect_stolen(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ #endif
+ }
+ 
+-void xe_ttm_stolen_mgr_init(struct xe_device *xe)
++int xe_ttm_stolen_mgr_init(struct xe_device *xe)
+ {
+-	struct xe_ttm_stolen_mgr *mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
+ 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
++	struct xe_ttm_stolen_mgr *mgr;
+ 	u64 stolen_size, io_size;
+ 	int err;
+ 
+-	if (!mgr) {
+-		drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n");
+-		return;
+-	}
++	mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
++	if (!mgr)
++		return -ENOMEM;
+ 
+ 	if (IS_SRIOV_VF(xe))
+ 		stolen_size = 0;
+@@ -224,7 +223,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
+ 
+ 	if (!stolen_size) {
+ 		drm_dbg_kms(&xe->drm, "No stolen memory support\n");
+-		return;
++		return 0;
+ 	}
+ 
+ 	/*
+@@ -240,7 +239,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
+ 				     io_size, PAGE_SIZE);
+ 	if (err) {
+ 		drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err);
+-		return;
++		return err;
+ 	}
+ 
+ 	drm_dbg_kms(&xe->drm, "Initialized stolen memory support with %llu bytes\n",
+@@ -248,6 +247,8 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
+ 
+ 	if (io_size)
+ 		mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, io_size);
++
++	return 0;
+ }
+ 
+ u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset)
+diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
+index 1777245ff81011..8e877d1e839bd5 100644
+--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
++++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
+@@ -12,7 +12,7 @@ struct ttm_resource;
+ struct xe_bo;
+ struct xe_device;
+ 
+-void xe_ttm_stolen_mgr_init(struct xe_device *xe);
++int xe_ttm_stolen_mgr_init(struct xe_device *xe);
+ int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem);
+ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe);
+ u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset);
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 872de052d670f5..de257a032225ff 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -8,6 +8,7 @@
+ #include <linux/dma-fence-array.h>
+ #include <linux/nospec.h>
+ 
++#include <drm/drm_drv.h>
+ #include <drm/drm_exec.h>
+ #include <drm/drm_print.h>
+ #include <drm/ttm/ttm_execbuf_util.h>
+@@ -1581,9 +1582,40 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
+ 
+ static void xe_vm_close(struct xe_vm *vm)
+ {
++	struct xe_device *xe = vm->xe;
++	bool bound;
++	int idx;
++
++	bound = drm_dev_enter(&xe->drm, &idx);
++
+ 	down_write(&vm->lock);
++
+ 	vm->size = 0;
++
++	if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
++		struct xe_tile *tile;
++		struct xe_gt *gt;
++		u8 id;
++
++		/* Wait for pending binds */
++		dma_resv_wait_timeout(xe_vm_resv(vm),
++				      DMA_RESV_USAGE_BOOKKEEP,
++				      false, MAX_SCHEDULE_TIMEOUT);
++
++		if (bound) {
++			for_each_tile(tile, xe, id)
++				if (vm->pt_root[id])
++					xe_pt_clear(xe, vm->pt_root[id]);
++
++			for_each_gt(gt, xe, id)
++				xe_gt_tlb_invalidation_vm(gt, vm);
++		}
++	}
++
+ 	up_write(&vm->lock);
++
++	if (bound)
++		drm_dev_exit(idx);
+ }
+ 
+ void xe_vm_close_and_put(struct xe_vm *vm)
+diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
+index c439ed2f16dbca..af6bc76dbf6493 100644
+--- a/drivers/hid/usbhid/usbkbd.c
++++ b/drivers/hid/usbhid/usbkbd.c
+@@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
+ 		return -1;
+ 
+ 	spin_lock_irqsave(&kbd->leds_lock, flags);
+-	kbd->newleds = (!!test_bit(LED_KANA,    dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
++	kbd->newleds = (!!test_bit(LED_KANA,    dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
+ 		       (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL,   dev->led) << 1) |
+ 		       (!!test_bit(LED_NUML,    dev->led));
+ 
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index f5bdf842040e6c..b043fbd15c9da8 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -73,7 +73,7 @@
+ #define DELL_SMM_LEGACY_EXECUTE	0x1
+ 
+ #define DELL_SMM_NO_TEMP	10
+-#define DELL_SMM_NO_FANS	3
++#define DELL_SMM_NO_FANS	4
+ 
+ struct smm_regs {
+ 	unsigned int eax;
+@@ -1074,11 +1074,14 @@ static const struct hwmon_channel_info * const dell_smm_info[] = {
+ 			   HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ 			   HWMON_F_TARGET,
+ 			   HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
++			   HWMON_F_TARGET,
++			   HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ 			   HWMON_F_TARGET
+ 			   ),
+ 	HWMON_CHANNEL_INFO(pwm,
+ 			   HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ 			   HWMON_PWM_INPUT,
++			   HWMON_PWM_INPUT,
+ 			   HWMON_PWM_INPUT
+ 			   ),
+ 	NULL
+diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
+index d92c536be9af78..b779240328d59f 100644
+--- a/drivers/hwmon/gpio-fan.c
++++ b/drivers/hwmon/gpio-fan.c
+@@ -393,7 +393,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ 	if (state >= fan_data->num_speed)
+ 		return -EINVAL;
+ 
++	mutex_lock(&fan_data->lock);
++
+ 	set_fan_speed(fan_data, state);
++
++	mutex_unlock(&fan_data->lock);
++
+ 	return 0;
+ }
+ 
+@@ -489,7 +494,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
+ 
+ static void gpio_fan_stop(void *data)
+ {
++	struct gpio_fan_data *fan_data = data;
++
++	mutex_lock(&fan_data->lock);
+ 	set_fan_speed(data, 0);
++	mutex_unlock(&fan_data->lock);
+ }
+ 
+ static int gpio_fan_probe(struct platform_device *pdev)
+@@ -562,7 +571,9 @@ static int gpio_fan_suspend(struct device *dev)
+ 
+ 	if (fan_data->gpios) {
+ 		fan_data->resume_speed = fan_data->speed_index;
++		mutex_lock(&fan_data->lock);
+ 		set_fan_speed(fan_data, 0);
++		mutex_unlock(&fan_data->lock);
+ 	}
+ 
+ 	return 0;
+@@ -572,8 +583,11 @@ static int gpio_fan_resume(struct device *dev)
+ {
+ 	struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ 
+-	if (fan_data->gpios)
++	if (fan_data->gpios) {
++		mutex_lock(&fan_data->lock);
+ 		set_fan_speed(fan_data, fan_data->resume_speed);
++		mutex_unlock(&fan_data->lock);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
+index 92d82faf237fcf..4e05077e4256d4 100644
+--- a/drivers/hwmon/xgene-hwmon.c
++++ b/drivers/hwmon/xgene-hwmon.c
+@@ -105,7 +105,7 @@ struct xgene_hwmon_dev {
+ 
+ 	phys_addr_t		comm_base_addr;
+ 	void			*pcc_comm_addr;
+-	u64			usecs_lat;
++	unsigned int		usecs_lat;
+ };
+ 
+ /*
+diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
+index aea9ac9c4bd069..7948597d483d2b 100644
+--- a/drivers/hwtracing/coresight/coresight-etb10.c
++++ b/drivers/hwtracing/coresight/coresight-etb10.c
+@@ -84,7 +84,7 @@ struct etb_drvdata {
+ 	struct clk		*atclk;
+ 	struct coresight_device	*csdev;
+ 	struct miscdevice	miscdev;
+-	spinlock_t		spinlock;
++	raw_spinlock_t		spinlock;
+ 	local_t			reading;
+ 	pid_t			pid;
+ 	u8			*buf;
+@@ -145,7 +145,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
+ 	unsigned long flags;
+ 	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ 
+-	spin_lock_irqsave(&drvdata->spinlock, flags);
++	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ 
+ 	/* Don't messup with perf sessions. */
+ 	if (coresight_get_mode(csdev) == CS_MODE_PERF) {
+@@ -163,7 +163,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
+ 
+ 	csdev->refcnt++;
+ out:
+-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
++	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 	return ret;
+ }
+ 
+@@ -176,7 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
+ 	struct perf_output_handle *handle = data;
+ 	struct cs_buffers *buf = etm_perf_sink_config(handle);
+ 
+-	spin_lock_irqsave(&drvdata->spinlock, flags);
++	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ 
+ 	/* No need to continue if the component is already in used by sysFS. */
+ 	if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
+@@ -219,7 +219,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
+ 	}
+ 
+ out:
+-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
++	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 	return ret;
+ }
+ 
+@@ -352,11 +352,11 @@ static int etb_disable(struct coresight_device *csdev)
+ 	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&drvdata->spinlock, flags);
++	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ 
+ 	csdev->refcnt--;
+ 	if (csdev->refcnt) {
+-		spin_unlock_irqrestore(&drvdata->spinlock, flags);
++		raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 		return -EBUSY;
+ 	}
+ 
+@@ -366,7 +366,7 @@ static int etb_disable(struct coresight_device *csdev)
+ 	/* Dissociate from monitored process. */
+ 	drvdata->pid = -1;
+ 	coresight_set_mode(csdev, CS_MODE_DISABLED);
+-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
++	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 
+ 	dev_dbg(&csdev->dev, "ETB disabled\n");
+ 	return 0;
+@@ -443,7 +443,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
+ 
+ 	capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+ 
+-	spin_lock_irqsave(&drvdata->spinlock, flags);
++	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ 
+ 	/* Don't do anything if another tracer is using this sink */
+ 	if (csdev->refcnt != 1)
+@@ -566,7 +566,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
+ 	__etb_enable_hw(drvdata);
+ 	CS_LOCK(drvdata->base);
+ out:
+-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
++	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 
+ 	return to_read;
+ }
+@@ -587,13 +587,13 @@ static void etb_dump(struct etb_drvdata *drvdata)
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&drvdata->spinlock, flags);
++	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ 	if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
+ 		__etb_disable_hw(drvdata);
+ 		etb_dump_hw(drvdata);
+ 		__etb_enable_hw(drvdata);
+ 	}
+-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
++	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 
+ 	dev_dbg(&drvdata->csdev->dev, "ETB dumped\n");
+ }
+@@ -746,7 +746,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
+ 	drvdata->base = base;
+ 	desc.access = CSDEV_ACCESS_IOMEM(base);
+ 
+-	spin_lock_init(&drvdata->spinlock);
++	raw_spin_lock_init(&drvdata->spinlock);
+ 
+ 	drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
+ 
+diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig
+index 4b6359326ede99..4f7d2b6d79e294 100644
+--- a/drivers/hwtracing/intel_th/Kconfig
++++ b/drivers/hwtracing/intel_th/Kconfig
+@@ -60,6 +60,7 @@ config INTEL_TH_STH
+ 
+ config INTEL_TH_MSU
+ 	tristate "Intel(R) Trace Hub Memory Storage Unit"
++	depends on MMU
+ 	help
+ 	  Memory Storage Unit (MSU) trace output device enables
+ 	  storing STP traces to system memory. It supports single
+diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
+index 66123d684ac9e7..93b65a9731d721 100644
+--- a/drivers/hwtracing/intel_th/msu.c
++++ b/drivers/hwtracing/intel_th/msu.c
+@@ -19,6 +19,7 @@
+ #include <linux/io.h>
+ #include <linux/workqueue.h>
+ #include <linux/dma-mapping.h>
++#include <linux/pfn_t.h>
+ 
+ #ifdef CONFIG_X86
+ #include <asm/set_memory.h>
+@@ -967,7 +968,6 @@ static void msc_buffer_contig_free(struct msc *msc)
+ 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
+ 		struct page *page = virt_to_page(msc->base + off);
+ 
+-		page->mapping = NULL;
+ 		__free_page(page);
+ 	}
+ 
+@@ -1149,9 +1149,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
+ 	int i;
+ 
+ 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
+-		struct page *page = msc_sg_page(sg);
+-
+-		page->mapping = NULL;
+ 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
+ 				  sg_virt(sg), sg_dma_address(sg));
+ 	}
+@@ -1592,22 +1589,10 @@ static void msc_mmap_close(struct vm_area_struct *vma)
+ {
+ 	struct msc_iter *iter = vma->vm_file->private_data;
+ 	struct msc *msc = iter->msc;
+-	unsigned long pg;
+ 
+ 	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
+ 		return;
+ 
+-	/* drop page _refcounts */
+-	for (pg = 0; pg < msc->nr_pages; pg++) {
+-		struct page *page = msc_buffer_get_page(msc, pg);
+-
+-		if (WARN_ON_ONCE(!page))
+-			continue;
+-
+-		if (page->mapping)
+-			page->mapping = NULL;
+-	}
+-
+ 	/* last mapping -- drop user_count */
+ 	atomic_dec(&msc->user_count);
+ 	mutex_unlock(&msc->buf_mutex);
+@@ -1617,16 +1602,14 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
+ {
+ 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
+ 	struct msc *msc = iter->msc;
++	struct page *page;
+ 
+-	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
+-	if (!vmf->page)
++	page = msc_buffer_get_page(msc, vmf->pgoff);
++	if (!page)
+ 		return VM_FAULT_SIGBUS;
+ 
+-	get_page(vmf->page);
+-	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
+-	vmf->page->index = vmf->pgoff;
+-
+-	return 0;
++	get_page(page);
++	return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page));
+ }
+ 
+ static const struct vm_operations_struct msc_mmap_ops = {
+@@ -1667,7 +1650,7 @@ static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
+ 		atomic_dec(&msc->user_count);
+ 
+ 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
++	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP);
+ 	vma->vm_ops = &msc_mmap_ops;
+ 	return ret;
+ }
+diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
+index 7b2c5d71a7fcec..5ea6d40373e7e9 100644
+--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
++++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
+@@ -207,6 +207,7 @@ static const struct software_node dgpu_node = {
+ static int i2c_dw_pci_probe(struct pci_dev *pdev,
+ 			    const struct pci_device_id *id)
+ {
++	struct device *device = &pdev->dev;
+ 	struct dw_i2c_dev *dev;
+ 	struct i2c_adapter *adap;
+ 	int r;
+@@ -214,25 +215,22 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
+ 	struct dw_scl_sda_cfg *cfg;
+ 
+ 	if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers))
+-		return dev_err_probe(&pdev->dev, -EINVAL,
+-				     "Invalid driver data %ld\n",
++		return dev_err_probe(device, -EINVAL, "Invalid driver data %ld\n",
+ 				     id->driver_data);
+ 
+ 	controller = &dw_pci_controllers[id->driver_data];
+ 
+ 	r = pcim_enable_device(pdev);
+ 	if (r)
+-		return dev_err_probe(&pdev->dev, r,
+-				     "Failed to enable I2C PCI device\n");
++		return dev_err_probe(device, r, "Failed to enable I2C PCI device\n");
+ 
+ 	pci_set_master(pdev);
+ 
+ 	r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
+ 	if (r)
+-		return dev_err_probe(&pdev->dev, r,
+-				     "I/O memory remapping failed\n");
++		return dev_err_probe(device, r, "I/O memory remapping failed\n");
+ 
+-	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
++	dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL);
+ 	if (!dev)
+ 		return -ENOMEM;
+ 
+@@ -242,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
+ 
+ 	dev->get_clk_rate_khz = controller->get_clk_rate_khz;
+ 	dev->base = pcim_iomap_table(pdev)[0];
+-	dev->dev = &pdev->dev;
++	dev->dev = device;
+ 	dev->irq = pci_irq_vector(pdev, 0);
+ 	dev->flags |= controller->flags;
+ 
+@@ -280,15 +278,17 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
+ 
+ 	if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
+ 		dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
+-		if (IS_ERR(dev->slave))
+-			return dev_err_probe(dev->dev, PTR_ERR(dev->slave),
++		if (IS_ERR(dev->slave)) {
++			i2c_del_adapter(&dev->adapter);
++			return dev_err_probe(device, PTR_ERR(dev->slave),
+ 					     "register UCSI failed\n");
++		}
+ 	}
+ 
+-	pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+-	pm_runtime_use_autosuspend(&pdev->dev);
+-	pm_runtime_put_autosuspend(&pdev->dev);
+-	pm_runtime_allow(&pdev->dev);
++	pm_runtime_set_autosuspend_delay(device, 1000);
++	pm_runtime_use_autosuspend(device);
++	pm_runtime_put_autosuspend(device);
++	pm_runtime_allow(device);
+ 
+ 	return 0;
+ }
+@@ -296,11 +296,12 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
+ static void i2c_dw_pci_remove(struct pci_dev *pdev)
+ {
+ 	struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
++	struct device *device = &pdev->dev;
+ 
+ 	i2c_dw_disable(dev);
+ 
+-	pm_runtime_forbid(&pdev->dev);
+-	pm_runtime_get_noresume(&pdev->dev);
++	pm_runtime_forbid(device);
++	pm_runtime_get_noresume(device);
+ 
+ 	i2c_del_adapter(&dev->adapter);
+ }
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index 2d0c7348e4917c..a3e86930bf4186 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -205,6 +205,7 @@ static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
+ 
+ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ {
++	struct device *device = &pdev->dev;
+ 	struct i2c_adapter *adap;
+ 	struct dw_i2c_dev *dev;
+ 	int irq, ret;
+@@ -213,15 +214,15 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		return irq;
+ 
+-	dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
++	dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL);
+ 	if (!dev)
+ 		return -ENOMEM;
+ 
+-	dev->flags = (uintptr_t)device_get_match_data(&pdev->dev);
+-	if (device_property_present(&pdev->dev, "wx,i2c-snps-model"))
++	dev->flags = (uintptr_t)device_get_match_data(device);
++	if (device_property_present(device, "wx,i2c-snps-model"))
+ 		dev->flags = MODEL_WANGXUN_SP | ACCESS_POLLING;
+ 
+-	dev->dev = &pdev->dev;
++	dev->dev = device;
+ 	dev->irq = irq;
+ 	platform_set_drvdata(pdev, dev);
+ 
+@@ -229,7 +230,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
++	dev->rst = devm_reset_control_get_optional_exclusive(device, NULL);
+ 	if (IS_ERR(dev->rst))
+ 		return PTR_ERR(dev->rst);
+ 
+@@ -246,13 +247,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ 	i2c_dw_configure(dev);
+ 
+ 	/* Optional interface clock */
+-	dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
++	dev->pclk = devm_clk_get_optional(device, "pclk");
+ 	if (IS_ERR(dev->pclk)) {
+ 		ret = PTR_ERR(dev->pclk);
+ 		goto exit_reset;
+ 	}
+ 
+-	dev->clk = devm_clk_get_optional(&pdev->dev, NULL);
++	dev->clk = devm_clk_get_optional(device, NULL);
+ 	if (IS_ERR(dev->clk)) {
+ 		ret = PTR_ERR(dev->clk);
+ 		goto exit_reset;
+@@ -280,28 +281,24 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ 					I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
+ 	adap->nr = -1;
+ 
+-	if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
+-		dev_pm_set_driver_flags(&pdev->dev,
+-					DPM_FLAG_SMART_PREPARE);
+-	} else {
+-		dev_pm_set_driver_flags(&pdev->dev,
+-					DPM_FLAG_SMART_PREPARE |
+-					DPM_FLAG_SMART_SUSPEND);
+-	}
++	if (dev->flags & ACCESS_NO_IRQ_SUSPEND)
++		dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE);
++	else
++		dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND);
+ 
+-	device_enable_async_suspend(&pdev->dev);
++	device_enable_async_suspend(device);
+ 
+ 	/* The code below assumes runtime PM to be disabled. */
+-	WARN_ON(pm_runtime_enabled(&pdev->dev));
++	WARN_ON(pm_runtime_enabled(device));
+ 
+-	pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+-	pm_runtime_use_autosuspend(&pdev->dev);
+-	pm_runtime_set_active(&pdev->dev);
++	pm_runtime_set_autosuspend_delay(device, 1000);
++	pm_runtime_use_autosuspend(device);
++	pm_runtime_set_active(device);
+ 
+ 	if (dev->shared_with_punit)
+-		pm_runtime_get_noresume(&pdev->dev);
++		pm_runtime_get_noresume(device);
+ 
+-	pm_runtime_enable(&pdev->dev);
++	pm_runtime_enable(device);
+ 
+ 	ret = i2c_dw_probe(dev);
+ 	if (ret)
+@@ -319,15 +316,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ static void dw_i2c_plat_remove(struct platform_device *pdev)
+ {
+ 	struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
++	struct device *device = &pdev->dev;
+ 
+-	pm_runtime_get_sync(&pdev->dev);
++	pm_runtime_get_sync(device);
+ 
+ 	i2c_del_adapter(&dev->adapter);
+ 
+ 	i2c_dw_disable(dev);
+ 
+-	pm_runtime_dont_use_autosuspend(&pdev->dev);
+-	pm_runtime_put_sync(&pdev->dev);
++	pm_runtime_dont_use_autosuspend(device);
++	pm_runtime_put_sync(device);
+ 	dw_i2c_plat_pm_cleanup(dev);
+ 
+ 	i2c_dw_remove_lock_support(dev);
+diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
+index 4d76e71cdd4be1..afc1a8171f59e1 100644
+--- a/drivers/i2c/busses/i2c-pxa.c
++++ b/drivers/i2c/busses/i2c-pxa.c
+@@ -1503,7 +1503,10 @@ static int i2c_pxa_probe(struct platform_device *dev)
+ 				i2c->adap.name);
+ 	}
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_prepare_enable(i2c->clk);
++	if (ret)
++		return dev_err_probe(&dev->dev, ret,
++				     "failed to enable clock\n");
+ 
+ 	if (i2c->use_pio) {
+ 		i2c->adap.algo = &i2c_pxa_pio_algorithm;
+diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
+index d480162a4d3941..eb97abcb4cd330 100644
+--- a/drivers/i2c/busses/i2c-qup.c
++++ b/drivers/i2c/busses/i2c-qup.c
+@@ -14,6 +14,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/err.h>
+ #include <linux/i2c.h>
++#include <linux/interconnect.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
+@@ -150,6 +151,8 @@
+ /* TAG length for DATA READ in RX FIFO  */
+ #define READ_RX_TAGS_LEN		2
+ 
++#define QUP_BUS_WIDTH			8
++
+ static unsigned int scl_freq;
+ module_param_named(scl_freq, scl_freq, uint, 0444);
+ MODULE_PARM_DESC(scl_freq, "SCL frequency override");
+@@ -227,6 +230,7 @@ struct qup_i2c_dev {
+ 	int			irq;
+ 	struct clk		*clk;
+ 	struct clk		*pclk;
++	struct icc_path		*icc_path;
+ 	struct i2c_adapter	adap;
+ 
+ 	int			clk_ctl;
+@@ -255,6 +259,10 @@ struct qup_i2c_dev {
+ 	/* To configure when bus is in run state */
+ 	u32			config_run;
+ 
++	/* bandwidth votes */
++	u32			src_clk_freq;
++	u32			cur_bw_clk_freq;
++
+ 	/* dma parameters */
+ 	bool			is_dma;
+ 	/* To check if the current transfer is using DMA */
+@@ -453,6 +461,23 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
+ 	return ret;
+ }
+ 
++static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq)
++{
++	u32 needed_peak_bw;
++	int ret;
++
++	if (qup->cur_bw_clk_freq == clk_freq)
++		return 0;
++
++	needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH);
++	ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw);
++	if (ret)
++		return ret;
++
++	qup->cur_bw_clk_freq = clk_freq;
++	return 0;
++}
++
+ static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup)
+ {
+ 	struct qup_i2c_block *blk = &qup->blk;
+@@ -838,6 +863,10 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ 	int ret = 0;
+ 	int idx = 0;
+ 
++	ret = qup_i2c_vote_bw(qup, qup->src_clk_freq);
++	if (ret)
++		return ret;
++
+ 	enable_irq(qup->irq);
+ 	ret = qup_i2c_req_dma(qup);
+ 
+@@ -1643,6 +1672,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
+ 	config = readl(qup->base + QUP_CONFIG);
+ 	config |= QUP_CLOCK_AUTO_GATE;
+ 	writel(config, qup->base + QUP_CONFIG);
++	qup_i2c_vote_bw(qup, 0);
+ 	clk_disable_unprepare(qup->pclk);
+ }
+ 
+@@ -1743,6 +1773,11 @@ static int qup_i2c_probe(struct platform_device *pdev)
+ 			goto fail_dma;
+ 		}
+ 		qup->is_dma = true;
++
++		qup->icc_path = devm_of_icc_get(&pdev->dev, NULL);
++		if (IS_ERR(qup->icc_path))
++			return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path),
++					     "failed to get interconnect path\n");
+ 	}
+ 
+ nodma:
+@@ -1791,6 +1826,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
+ 		qup_i2c_enable_clocks(qup);
+ 		src_clk_freq = clk_get_rate(qup->clk);
+ 	}
++	qup->src_clk_freq = src_clk_freq;
+ 
+ 	/*
+ 	 * Bootloaders might leave a pending interrupt on certain QUP's,
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 7911814ad82ac6..474a96ebda2265 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -511,6 +511,8 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 			queue_work(master->base.wq, &master->hj_work);
+ 		break;
+ 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
++		svc_i3c_master_emit_stop(master);
++		break;
+ 	default:
+ 		break;
+ 	}
+@@ -859,6 +861,8 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
+ 	u32 reg;
+ 	int ret, i;
+ 
++	svc_i3c_master_flush_fifo(master);
++
+ 	while (true) {
+ 		/* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
+ 		 *
+diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
+index 0f36138a714456..58a25792cec377 100644
+--- a/drivers/iio/adc/ad7944.c
++++ b/drivers/iio/adc/ad7944.c
+@@ -98,6 +98,9 @@ struct ad7944_chip_info {
+ 	const struct iio_chan_spec channels[2];
+ };
+ 
++/* get number of bytes for SPI xfer */
++#define AD7944_SPI_BYTES(scan_type) ((scan_type).realbits > 16 ? 4 : 2)
++
+ /*
+  * AD7944_DEFINE_CHIP_INFO - Define a chip info structure for a specific chip
+  * @_name: The name of the chip
+@@ -164,7 +167,7 @@ static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *
+ 
+ 	/* Then we can read the data during the acquisition phase */
+ 	xfers[2].rx_buf = &adc->sample.raw;
+-	xfers[2].len = BITS_TO_BYTES(chan->scan_type.storagebits);
++	xfers[2].len = AD7944_SPI_BYTES(chan->scan_type);
+ 	xfers[2].bits_per_word = chan->scan_type.realbits;
+ 
+ 	spi_message_init_with_transfers(&adc->msg, xfers, 3);
+@@ -193,7 +196,7 @@ static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc
+ 	xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+ 
+ 	xfers[1].rx_buf = &adc->sample.raw;
+-	xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
++	xfers[1].len = AD7944_SPI_BYTES(chan->scan_type);
+ 	xfers[1].bits_per_word = chan->scan_type.realbits;
+ 
+ 	spi_message_init_with_transfers(&adc->msg, xfers, 2);
+@@ -228,7 +231,7 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
+ 	xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+ 
+ 	xfers[1].rx_buf = adc->chain_mode_buf;
+-	xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits) * n_chain_dev;
++	xfers[1].len = AD7944_SPI_BYTES(chan->scan_type) * n_chain_dev;
+ 	xfers[1].bits_per_word = chan->scan_type.realbits;
+ 
+ 	spi_message_init_with_transfers(&adc->msg, xfers, 2);
+@@ -274,12 +277,12 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
+ 		return ret;
+ 
+ 	if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) {
+-		if (chan->scan_type.storagebits > 16)
++		if (chan->scan_type.realbits > 16)
+ 			*val = ((u32 *)adc->chain_mode_buf)[chan->scan_index];
+ 		else
+ 			*val = ((u16 *)adc->chain_mode_buf)[chan->scan_index];
+ 	} else {
+-		if (chan->scan_type.storagebits > 16)
++		if (chan->scan_type.realbits > 16)
+ 			*val = adc->sample.raw.u32;
+ 		else
+ 			*val = adc->sample.raw.u16;
+@@ -409,8 +412,7 @@ static int ad7944_chain_mode_alloc(struct device *dev,
+ 	/* 1 word for each voltage channel + aligned u64 for timestamp */
+ 
+ 	chain_mode_buf_size = ALIGN(n_chain_dev *
+-		BITS_TO_BYTES(chan[0].scan_type.storagebits), sizeof(u64))
+-		+ sizeof(u64);
++		AD7944_SPI_BYTES(chan[0].scan_type), sizeof(u64)) + sizeof(u64);
+ 	buf = devm_kzalloc(dev, chain_mode_buf_size, GFP_KERNEL);
+ 	if (!buf)
+ 		return -ENOMEM;
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 07c571c7b69992..c5b68639476058 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -80,9 +80,12 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ 				     unsigned long pgsz_bitmap,
+ 				     unsigned long virt)
+ {
+-	struct scatterlist *sg;
++	unsigned long curr_len = 0;
++	dma_addr_t curr_base = ~0;
+ 	unsigned long va, pgoff;
++	struct scatterlist *sg;
+ 	dma_addr_t mask;
++	dma_addr_t end;
+ 	int i;
+ 
+ 	umem->iova = va = virt;
+@@ -107,17 +110,30 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ 	pgoff = umem->address & ~PAGE_MASK;
+ 
+ 	for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
+-		/* Walk SGL and reduce max page size if VA/PA bits differ
+-		 * for any address.
++		/* If the current entry is physically contiguous with the previous
++		 * one, no need to take its start addresses into consideration.
+ 		 */
+-		mask |= (sg_dma_address(sg) + pgoff) ^ va;
++		if (check_add_overflow(curr_base, curr_len, &end) ||
++		    end != sg_dma_address(sg)) {
++
++			curr_base = sg_dma_address(sg);
++			curr_len = 0;
++
++			/* Reduce max page size if VA/PA bits differ */
++			mask |= (curr_base + pgoff) ^ va;
++
++			/* The alignment of any VA matching a discontinuity point
++			* in the physical memory sets the maximum possible page
++			* size as this must be a starting point of a new page that
++			* needs to be aligned.
++			*/
++			if (i != 0)
++				mask |= va;
++		}
++
++		curr_len += sg_dma_len(sg);
+ 		va += sg_dma_len(sg) - pgoff;
+-		/* Except for the last entry, the ending iova alignment sets
+-		 * the maximum possible page size as the low bits of the iova
+-		 * must be zero when starting the next chunk.
+-		 */
+-		if (i != (umem->sgt_append.sgt.nents - 1))
+-			mask |= va;
++
+ 		pgoff = 0;
+ 	}
+ 
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index edef79daed3fa8..535bb99ed9f5fc 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -718,8 +718,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
+ 		goto err_free;
+ 
+ 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
+-	if (!pd) {
+-		ret = -EINVAL;
++	if (IS_ERR(pd)) {
++		ret = PTR_ERR(pd);
+ 		goto err_free;
+ 	}
+ 
+@@ -809,8 +809,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
+ 	if (cmd.flags & IB_MR_REREG_PD) {
+ 		new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
+ 					   attrs);
+-		if (!new_pd) {
+-			ret = -EINVAL;
++		if (IS_ERR(new_pd)) {
++			ret = PTR_ERR(new_pd);
+ 			goto put_uobjs;
+ 		}
+ 	} else {
+@@ -919,8 +919,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
+ 		return PTR_ERR(uobj);
+ 
+ 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
+-	if (!pd) {
+-		ret = -EINVAL;
++	if (IS_ERR(pd)) {
++		ret = PTR_ERR(pd);
+ 		goto err_free;
+ 	}
+ 
+@@ -1127,8 +1127,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
+ 		return ret;
+ 
+ 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+-	if (!cq)
+-		return -EINVAL;
++	if (IS_ERR(cq))
++		return PTR_ERR(cq);
+ 
+ 	ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
+ 	if (ret)
+@@ -1189,8 +1189,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
+ 		return ret;
+ 
+ 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+-	if (!cq)
+-		return -EINVAL;
++	if (IS_ERR(cq))
++		return PTR_ERR(cq);
+ 
+ 	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
+ 	header_ptr = attrs->ucore.outbuf;
+@@ -1238,8 +1238,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
+ 		return ret;
+ 
+ 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+-	if (!cq)
+-		return -EINVAL;
++	if (IS_ERR(cq))
++		return PTR_ERR(cq);
+ 
+ 	ib_req_notify_cq(cq, cmd.solicited_only ?
+ 			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
+@@ -1321,8 +1321,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
+ 		ind_tbl = uobj_get_obj_read(rwq_ind_table,
+ 					    UVERBS_OBJECT_RWQ_IND_TBL,
+ 					    cmd->rwq_ind_tbl_handle, attrs);
+-		if (!ind_tbl) {
+-			ret = -EINVAL;
++		if (IS_ERR(ind_tbl)) {
++			ret = PTR_ERR(ind_tbl);
+ 			goto err_put;
+ 		}
+ 
+@@ -1360,8 +1360,10 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
+ 			if (cmd->is_srq) {
+ 				srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
+ 							cmd->srq_handle, attrs);
+-				if (!srq || srq->srq_type == IB_SRQT_XRC) {
+-					ret = -EINVAL;
++				if (IS_ERR(srq) ||
++				    srq->srq_type == IB_SRQT_XRC) {
++					ret = IS_ERR(srq) ? PTR_ERR(srq) :
++								  -EINVAL;
+ 					goto err_put;
+ 				}
+ 			}
+@@ -1371,23 +1373,29 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
+ 					rcq = uobj_get_obj_read(
+ 						cq, UVERBS_OBJECT_CQ,
+ 						cmd->recv_cq_handle, attrs);
+-					if (!rcq) {
+-						ret = -EINVAL;
++					if (IS_ERR(rcq)) {
++						ret = PTR_ERR(rcq);
+ 						goto err_put;
+ 					}
+ 				}
+ 			}
+ 		}
+ 
+-		if (has_sq)
++		if (has_sq) {
+ 			scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
+ 						cmd->send_cq_handle, attrs);
++			if (IS_ERR(scq)) {
++				ret = PTR_ERR(scq);
++				goto err_put;
++			}
++		}
++
+ 		if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
+ 			rcq = rcq ?: scq;
+ 		pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
+ 				       attrs);
+-		if (!pd || (!scq && has_sq)) {
+-			ret = -EINVAL;
++		if (IS_ERR(pd)) {
++			ret = PTR_ERR(pd);
+ 			goto err_put;
+ 		}
+ 
+@@ -1482,18 +1490,18 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
+ err_put:
+ 	if (!IS_ERR(xrcd_uobj))
+ 		uobj_put_read(xrcd_uobj);
+-	if (pd)
++	if (!IS_ERR_OR_NULL(pd))
+ 		uobj_put_obj_read(pd);
+-	if (scq)
++	if (!IS_ERR_OR_NULL(scq))
+ 		rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
+ 					UVERBS_LOOKUP_READ);
+-	if (rcq && rcq != scq)
++	if (!IS_ERR_OR_NULL(rcq) && rcq != scq)
+ 		rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
+ 					UVERBS_LOOKUP_READ);
+-	if (srq)
++	if (!IS_ERR_OR_NULL(srq))
+ 		rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ 					UVERBS_LOOKUP_READ);
+-	if (ind_tbl)
++	if (!IS_ERR_OR_NULL(ind_tbl))
+ 		uobj_put_obj_read(ind_tbl);
+ 
+ 	uobj_alloc_abort(&obj->uevent.uobject, attrs);
+@@ -1655,8 +1663,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
+ 	}
+ 
+ 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+-	if (!qp) {
+-		ret = -EINVAL;
++	if (IS_ERR(qp)) {
++		ret = PTR_ERR(qp);
+ 		goto out;
+ 	}
+ 
+@@ -1761,8 +1769,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
+ 
+ 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
+ 			       attrs);
+-	if (!qp) {
+-		ret = -EINVAL;
++	if (IS_ERR(qp)) {
++		ret = PTR_ERR(qp);
+ 		goto out;
+ 	}
+ 
+@@ -2028,8 +2036,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
+ 		return -ENOMEM;
+ 
+ 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+-	if (!qp) {
+-		ret = -EINVAL;
++	if (IS_ERR(qp)) {
++		ret = PTR_ERR(qp);
+ 		goto out;
+ 	}
+ 
+@@ -2066,9 +2074,9 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
+ 
+ 			ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
+ 						   user_wr->wr.ud.ah, attrs);
+-			if (!ud->ah) {
++			if (IS_ERR(ud->ah)) {
++				ret = PTR_ERR(ud->ah);
+ 				kfree(ud);
+-				ret = -EINVAL;
+ 				goto out_put;
+ 			}
+ 			ud->remote_qpn = user_wr->wr.ud.remote_qpn;
+@@ -2305,8 +2313,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
+ 		return PTR_ERR(wr);
+ 
+ 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+-	if (!qp) {
+-		ret = -EINVAL;
++	if (IS_ERR(qp)) {
++		ret = PTR_ERR(qp);
+ 		goto out;
+ 	}
+ 
+@@ -2356,8 +2364,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
+ 		return PTR_ERR(wr);
+ 
+ 	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
+-	if (!srq) {
+-		ret = -EINVAL;
++	if (IS_ERR(srq)) {
++		ret = PTR_ERR(srq);
+ 		goto out;
+ 	}
+ 
+@@ -2413,8 +2421,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
+ 	}
+ 
+ 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
+-	if (!pd) {
+-		ret = -EINVAL;
++	if (IS_ERR(pd)) {
++		ret = PTR_ERR(pd);
+ 		goto err;
+ 	}
+ 
+@@ -2483,8 +2491,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
+ 		return ret;
+ 
+ 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+-	if (!qp)
+-		return -EINVAL;
++	if (IS_ERR(qp))
++		return PTR_ERR(qp);
+ 
+ 	obj = qp->uobject;
+ 
+@@ -2533,8 +2541,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
+ 		return ret;
+ 
+ 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+-	if (!qp)
+-		return -EINVAL;
++	if (IS_ERR(qp))
++		return PTR_ERR(qp);
+ 
+ 	obj = qp->uobject;
+ 	mutex_lock(&obj->mcast_lock);
+@@ -2668,8 +2676,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
+ 							UVERBS_OBJECT_FLOW_ACTION,
+ 							kern_spec->action.handle,
+ 							attrs);
+-		if (!ib_spec->action.act)
+-			return -EINVAL;
++		if (IS_ERR(ib_spec->action.act))
++			return PTR_ERR(ib_spec->action.act);
+ 		ib_spec->action.size =
+ 			sizeof(struct ib_flow_spec_action_handle);
+ 		flow_resources_add(uflow_res,
+@@ -2686,8 +2694,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
+ 					  UVERBS_OBJECT_COUNTERS,
+ 					  kern_spec->flow_count.handle,
+ 					  attrs);
+-		if (!ib_spec->flow_count.counters)
+-			return -EINVAL;
++		if (IS_ERR(ib_spec->flow_count.counters))
++			return PTR_ERR(ib_spec->flow_count.counters);
+ 		ib_spec->flow_count.size =
+ 				sizeof(struct ib_flow_spec_action_count);
+ 		flow_resources_add(uflow_res,
+@@ -2905,14 +2913,14 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
+ 		return PTR_ERR(obj);
+ 
+ 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
+-	if (!pd) {
+-		err = -EINVAL;
++	if (IS_ERR(pd)) {
++		err = PTR_ERR(pd);
+ 		goto err_uobj;
+ 	}
+ 
+ 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+-	if (!cq) {
+-		err = -EINVAL;
++	if (IS_ERR(cq)) {
++		err = PTR_ERR(cq);
+ 		goto err_put_pd;
+ 	}
+ 
+@@ -3013,8 +3021,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
+ 		return -EINVAL;
+ 
+ 	wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
+-	if (!wq)
+-		return -EINVAL;
++	if (IS_ERR(wq))
++		return PTR_ERR(wq);
+ 
+ 	if (cmd.attr_mask & IB_WQ_FLAGS) {
+ 		wq_attr.flags = cmd.flags;
+@@ -3097,8 +3105,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
+ 			num_read_wqs++) {
+ 		wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
+ 				       wqs_handles[num_read_wqs], attrs);
+-		if (!wq) {
+-			err = -EINVAL;
++		if (IS_ERR(wq)) {
++			err = PTR_ERR(wq);
+ 			goto put_wqs;
+ 		}
+ 
+@@ -3253,8 +3261,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
+ 	}
+ 
+ 	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
+-	if (!qp) {
+-		err = -EINVAL;
++	if (IS_ERR(qp)) {
++		err = PTR_ERR(qp);
+ 		goto err_uobj;
+ 	}
+ 
+@@ -3400,15 +3408,15 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
+ 	if (ib_srq_has_cq(cmd->srq_type)) {
+ 		attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
+ 						cmd->cq_handle, attrs);
+-		if (!attr.ext.cq) {
+-			ret = -EINVAL;
++		if (IS_ERR(attr.ext.cq)) {
++			ret = PTR_ERR(attr.ext.cq);
+ 			goto err_put_xrcd;
+ 		}
+ 	}
+ 
+ 	pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
+-	if (!pd) {
+-		ret = -EINVAL;
++	if (IS_ERR(pd)) {
++		ret = PTR_ERR(pd);
+ 		goto err_put_cq;
+ 	}
+ 
+@@ -3515,8 +3523,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
+ 		return ret;
+ 
+ 	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
+-	if (!srq)
+-		return -EINVAL;
++	if (IS_ERR(srq))
++		return PTR_ERR(srq);
+ 
+ 	attr.max_wr    = cmd.max_wr;
+ 	attr.srq_limit = cmd.srq_limit;
+@@ -3543,8 +3551,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
+ 		return ret;
+ 
+ 	srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
+-	if (!srq)
+-		return -EINVAL;
++	if (IS_ERR(srq))
++		return PTR_ERR(srq);
+ 
+ 	ret = ib_query_srq(srq, &attr);
+ 
+@@ -3669,8 +3677,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
+ 		return -EOPNOTSUPP;
+ 
+ 	cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
+-	if (!cq)
+-		return -EINVAL;
++	if (IS_ERR(cq))
++		return PTR_ERR(cq);
+ 
+ 	ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
+ 
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 473ee0831307c1..dc40001072a5ec 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -3109,22 +3109,23 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
+ bool __rdma_block_iter_next(struct ib_block_iter *biter)
+ {
+ 	unsigned int block_offset;
+-	unsigned int sg_delta;
++	unsigned int delta;
+ 
+ 	if (!biter->__sg_nents || !biter->__sg)
+ 		return false;
+ 
+ 	biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
+ 	block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
+-	sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
++	delta = BIT_ULL(biter->__pg_bit) - block_offset;
+ 
+-	if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
+-		biter->__sg_advance += sg_delta;
+-	} else {
++	while (biter->__sg_nents && biter->__sg &&
++	       sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) {
++		delta -= sg_dma_len(biter->__sg) - biter->__sg_advance;
+ 		biter->__sg_advance = 0;
+ 		biter->__sg = sg_next(biter->__sg);
+ 		biter->__sg_nents--;
+ 	}
++	biter->__sg_advance += delta;
+ 
+ 	return true;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index e14d8f316ad8f2..da5b14602a761d 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -289,6 +289,8 @@ static const struct xpad_device {
+ 	{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
++	{ 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
++	{ 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ 	{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+ 	{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
+ 	{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
+@@ -353,6 +355,7 @@ static const struct xpad_device {
+ 	{ 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
+ 	{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
++	{ 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ 	{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ 	{ 0x2345, 0xe00b, "Machenike G5 Pro Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
+index c616de2c5926ec..a56a2739630591 100644
+--- a/drivers/iommu/amd/io_pgtable_v2.c
++++ b/drivers/iommu/amd/io_pgtable_v2.c
+@@ -254,7 +254,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ 		pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
+ 				   iova, map_size, gfp, &updated);
+ 		if (!pte) {
+-			ret = -EINVAL;
++			ret = -ENOMEM;
+ 			goto out;
+ 		}
+ 
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 2a9fa0c8cc00fe..0f0caf59023c79 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -1815,7 +1815,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+ 	static DEFINE_MUTEX(msi_prepare_lock); /* see below */
+ 
+ 	if (!domain || !domain->iova_cookie) {
+-		desc->iommu_cookie = NULL;
++		msi_desc_set_iommu_msi_iova(desc, 0, 0);
+ 		return 0;
+ 	}
+ 
+@@ -1827,11 +1827,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+ 	mutex_lock(&msi_prepare_lock);
+ 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
+ 	mutex_unlock(&msi_prepare_lock);
+-
+-	msi_desc_set_iommu_cookie(desc, msi_page);
+-
+ 	if (!msi_page)
+ 		return -ENOMEM;
++
++	msi_desc_set_iommu_msi_iova(
++		desc, msi_page->iova,
++		ilog2(cookie_msi_granule(domain->iova_cookie)));
+ 	return 0;
+ }
+ 
+@@ -1842,18 +1843,15 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+  */
+ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+ {
+-	struct device *dev = msi_desc_to_dev(desc);
+-	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+-	const struct iommu_dma_msi_page *msi_page;
++#ifdef CONFIG_IRQ_MSI_IOMMU
++	if (desc->iommu_msi_shift) {
++		u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift;
+ 
+-	msi_page = msi_desc_get_iommu_cookie(desc);
+-
+-	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
+-		return;
+-
+-	msg->address_hi = upper_32_bits(msi_page->iova);
+-	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
+-	msg->address_lo += lower_32_bits(msi_page->iova);
++		msg->address_hi = upper_32_bits(msi_iova);
++		msg->address_lo = lower_32_bits(msi_iova) |
++				  (msg->address_lo & ((1 << desc->iommu_msi_shift) - 1));
++	}
++#endif
+ }
+ 
+ static int iommu_dma_init(void)
+diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
+index de5b54eaa8bf1a..a5913c0b02a0a7 100644
+--- a/drivers/iommu/iommu-priv.h
++++ b/drivers/iommu/iommu-priv.h
+@@ -17,6 +17,8 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
+ 	return dev->iommu->iommu_dev->ops;
+ }
+ 
++void dev_iommu_free(struct device *dev);
++
+ const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
+ 
+ static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwspec)
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index cac3dce111689c..879009adef407b 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -347,7 +347,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
+ 	return param;
+ }
+ 
+-static void dev_iommu_free(struct device *dev)
++void dev_iommu_free(struct device *dev)
+ {
+ 	struct dev_iommu *param = dev->iommu;
+ 
+diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
+index 3fd8920e79ffb9..74480ae6bfc0bd 100644
+--- a/drivers/iommu/iommufd/device.c
++++ b/drivers/iommu/iommufd/device.c
+@@ -3,6 +3,7 @@
+  */
+ #include <linux/iommu.h>
+ #include <linux/iommufd.h>
++#include <linux/pci-ats.h>
+ #include <linux/slab.h>
+ #include <uapi/linux/iommufd.h>
+ 
+@@ -1304,7 +1305,8 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
+ 	void *data;
+ 	int rc;
+ 
+-	if (cmd->flags || cmd->__reserved)
++	if (cmd->flags || cmd->__reserved[0] || cmd->__reserved[1] ||
++	    cmd->__reserved[2])
+ 		return -EOPNOTSUPP;
+ 
+ 	idev = iommufd_get_device(ucmd, cmd->dev_id);
+@@ -1361,6 +1363,36 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
+ 	if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
+ 		cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING;
+ 
++	cmd->out_max_pasid_log2 = 0;
++	/*
++	 * Currently, all iommu drivers enable PASID in the probe_device()
++	 * op if iommu and device supports it. So the max_pasids stored in
++	 * dev->iommu indicates both PASID support and enable status. A
++	 * non-zero dev->iommu->max_pasids means PASID is supported and
++	 * enabled. The iommufd only reports PASID capability to userspace
++	 * if it's enabled.
++	 */
++	if (idev->dev->iommu->max_pasids) {
++		cmd->out_max_pasid_log2 = ilog2(idev->dev->iommu->max_pasids);
++
++		if (dev_is_pci(idev->dev)) {
++			struct pci_dev *pdev = to_pci_dev(idev->dev);
++			int ctrl;
++
++			ctrl = pci_pasid_status(pdev);
++
++			WARN_ON_ONCE(ctrl < 0 ||
++				     !(ctrl & PCI_PASID_CTRL_ENABLE));
++
++			if (ctrl & PCI_PASID_CTRL_EXEC)
++				cmd->out_capabilities |=
++						IOMMU_HW_CAP_PCI_PASID_EXEC;
++			if (ctrl & PCI_PASID_CTRL_PRIV)
++				cmd->out_capabilities |=
++						IOMMU_HW_CAP_PCI_PASID_PRIV;
++		}
++	}
++
+ 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+ out_free:
+ 	kfree(data);
+diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
+index d06bf6e6c19fd2..2454627a8b61ba 100644
+--- a/drivers/iommu/iommufd/hw_pagetable.c
++++ b/drivers/iommu/iommufd/hw_pagetable.c
+@@ -122,6 +122,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
+ 	if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
+ 	    !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
+ 		return ERR_PTR(-EOPNOTSUPP);
++	if ((flags & IOMMU_HWPT_FAULT_ID_VALID) &&
++	    (flags & IOMMU_HWPT_ALLOC_NEST_PARENT))
++		return ERR_PTR(-EOPNOTSUPP);
+ 
+ 	hwpt_paging = __iommufd_object_alloc(
+ 		ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
+diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
+index e7a6a1611d193b..e3fcab925a547c 100644
+--- a/drivers/iommu/of_iommu.c
++++ b/drivers/iommu/of_iommu.c
+@@ -118,6 +118,7 @@ static void of_pci_check_device_ats(struct device *dev, struct device_node *np)
+ int of_iommu_configure(struct device *dev, struct device_node *master_np,
+ 		       const u32 *id)
+ {
++	bool dev_iommu_present;
+ 	int err;
+ 
+ 	if (!master_np)
+@@ -129,6 +130,7 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
+ 		mutex_unlock(&iommu_probe_device_lock);
+ 		return 0;
+ 	}
++	dev_iommu_present = dev->iommu;
+ 
+ 	/*
+ 	 * We don't currently walk up the tree looking for a parent IOMMU.
+@@ -149,8 +151,10 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
+ 		err = of_iommu_configure_device(master_np, dev, id);
+ 	}
+ 
+-	if (err)
++	if (err && dev_iommu_present)
+ 		iommu_fwspec_free(dev);
++	else if (err && dev->iommu)
++		dev_iommu_free(dev);
+ 	mutex_unlock(&iommu_probe_device_lock);
+ 
+ 	if (!err && dev->bus)
+diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
+index 7cd6b646774b9a..205ad61d15e49f 100644
+--- a/drivers/irqchip/irq-riscv-aplic-direct.c
++++ b/drivers/irqchip/irq-riscv-aplic-direct.c
+@@ -31,7 +31,7 @@ struct aplic_direct {
+ };
+ 
+ struct aplic_idc {
+-	unsigned int		hart_index;
++	u32			hart_index;
+ 	void __iomem		*regs;
+ 	struct aplic_direct	*direct;
+ };
+@@ -219,6 +219,20 @@ static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
+ 	return 0;
+ }
+ 
++static int aplic_direct_get_hart_index(struct device *dev, u32 logical_index,
++				       u32 *hart_index)
++{
++	const char *prop_hart_index = "riscv,hart-indexes";
++	struct device_node *np = to_of_node(dev->fwnode);
++
++	if (!np || !of_property_present(np, prop_hart_index)) {
++		*hart_index = logical_index;
++		return 0;
++	}
++
++	return of_property_read_u32_index(np, prop_hart_index, logical_index, hart_index);
++}
++
+ int aplic_direct_setup(struct device *dev, void __iomem *regs)
+ {
+ 	int i, j, rc, cpu, current_cpu, setup_count = 0;
+@@ -265,8 +279,12 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
+ 		cpumask_set_cpu(cpu, &direct->lmask);
+ 
+ 		idc = per_cpu_ptr(&aplic_idcs, cpu);
+-		idc->hart_index = i;
+-		idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
++		rc = aplic_direct_get_hart_index(dev, i, &idc->hart_index);
++		if (rc) {
++			dev_warn(dev, "hart index not found for IDC%d\n", i);
++			continue;
++		}
++		idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE;
+ 		idc->direct = direct;
+ 
+ 		aplic_idc_set_delivery(idc, true);
+diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
+index c5c2e6929a2f5a..b5def6268936e0 100644
+--- a/drivers/irqchip/irq-riscv-imsic-early.c
++++ b/drivers/irqchip/irq-riscv-imsic-early.c
+@@ -77,6 +77,12 @@ static void imsic_handle_irq(struct irq_desc *desc)
+ 	struct imsic_vector *vec;
+ 	unsigned long local_id;
+ 
++	/*
++	 * Process pending local synchronization instead of waiting
++	 * for per-CPU local timer to expire.
++	 */
++	imsic_local_sync_all(false);
++
+ 	chained_irq_enter(chip, desc);
+ 
+ 	while ((local_id = csr_swap(CSR_TOPEI, 0))) {
+@@ -120,7 +126,7 @@ static int imsic_starting_cpu(unsigned int cpu)
+ 	 * Interrupts identities might have been enabled/disabled while
+ 	 * this CPU was not running so sync-up local enable/disable state.
+ 	 */
+-	imsic_local_sync_all();
++	imsic_local_sync_all(true);
+ 
+ 	/* Enable local interrupt delivery */
+ 	imsic_local_delivery(true);
+diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
+index c708780e8760f3..5d7c30ad8855b1 100644
+--- a/drivers/irqchip/irq-riscv-imsic-platform.c
++++ b/drivers/irqchip/irq-riscv-imsic-platform.c
+@@ -96,9 +96,8 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
+ 				  bool force)
+ {
+ 	struct imsic_vector *old_vec, *new_vec;
+-	struct irq_data *pd = d->parent_data;
+ 
+-	old_vec = irq_data_get_irq_chip_data(pd);
++	old_vec = irq_data_get_irq_chip_data(d);
+ 	if (WARN_ON(!old_vec))
+ 		return -ENOENT;
+ 
+@@ -116,13 +115,13 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
+ 		return -ENOSPC;
+ 
+ 	/* Point device to the new vector */
+-	imsic_msi_update_msg(d, new_vec);
++	imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
+ 
+ 	/* Update irq descriptors with the new vector */
+-	pd->chip_data = new_vec;
++	d->chip_data = new_vec;
+ 
+-	/* Update effective affinity of parent irq data */
+-	irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
++	/* Update effective affinity */
++	irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
+ 
+ 	/* Move state of the old vector to the new vector */
+ 	imsic_vector_move(old_vec, new_vec);
+@@ -135,6 +134,9 @@ static struct irq_chip imsic_irq_base_chip = {
+ 	.name			= "IMSIC",
+ 	.irq_mask		= imsic_irq_mask,
+ 	.irq_unmask		= imsic_irq_unmask,
++#ifdef CONFIG_SMP
++	.irq_set_affinity	= imsic_irq_set_affinity,
++#endif
+ 	.irq_retrigger		= imsic_irq_retrigger,
+ 	.irq_compose_msi_msg	= imsic_irq_compose_msg,
+ 	.flags			= IRQCHIP_SKIP_SET_WAKE |
+@@ -245,7 +247,7 @@ static bool imsic_init_dev_msi_info(struct device *dev,
+ 		if (WARN_ON_ONCE(domain != real_parent))
+ 			return false;
+ #ifdef CONFIG_SMP
+-		info->chip->irq_set_affinity = imsic_irq_set_affinity;
++		info->chip->irq_set_affinity = irq_chip_set_affinity_parent;
+ #endif
+ 		break;
+ 	default:
+diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
+index b97e6cd89ed742..06ff0e17c0c337 100644
+--- a/drivers/irqchip/irq-riscv-imsic-state.c
++++ b/drivers/irqchip/irq-riscv-imsic-state.c
+@@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,
+ 	}
+ }
+ 
+-static void __imsic_local_sync(struct imsic_local_priv *lpriv)
++static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
+ {
+ 	struct imsic_local_config *mlocal;
+ 	struct imsic_vector *vec, *mvec;
++	bool ret = true;
+ 	int i;
+ 
+ 	lockdep_assert_held(&lpriv->lock);
+@@ -143,35 +144,75 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+ 			__imsic_id_clear_enable(i);
+ 
+ 		/*
+-		 * If the ID was being moved to a new ID on some other CPU
+-		 * then we can get a MSI during the movement so check the
+-		 * ID pending bit and re-trigger the new ID on other CPU
+-		 * using MMIO write.
++		 * Clear the previous vector pointer of the new vector only
++		 * after the movement is complete on the old CPU.
+ 		 */
+-		mvec = READ_ONCE(vec->move);
+-		WRITE_ONCE(vec->move, NULL);
+-		if (mvec && mvec != vec) {
++		mvec = READ_ONCE(vec->move_prev);
++		if (mvec) {
++			/*
++			 * If the old vector has not been updated then
++			 * try again in the next sync-up call.
++			 */
++			if (READ_ONCE(mvec->move_next)) {
++				ret = false;
++				continue;
++			}
++
++			WRITE_ONCE(vec->move_prev, NULL);
++		}
++
++		/*
++		 * If a vector was being moved to a new vector on some other
++		 * CPU then we can get a MSI during the movement so check the
++		 * ID pending bit and re-trigger the new ID on other CPU using
++		 * MMIO write.
++		 */
++		mvec = READ_ONCE(vec->move_next);
++		if (mvec) {
+ 			if (__imsic_id_read_clear_pending(i)) {
+ 				mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
+ 				writel_relaxed(mvec->local_id, mlocal->msi_va);
+ 			}
+ 
++			WRITE_ONCE(vec->move_next, NULL);
+ 			imsic_vector_free(&lpriv->vectors[i]);
+ 		}
+ 
+ skip:
+ 		bitmap_clear(lpriv->dirty_bitmap, i, 1);
+ 	}
++
++	return ret;
+ }
+ 
+-void imsic_local_sync_all(void)
++#ifdef CONFIG_SMP
++static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
++{
++	lockdep_assert_held(&lpriv->lock);
++
++	if (!timer_pending(&lpriv->timer)) {
++		lpriv->timer.expires = jiffies + 1;
++		add_timer_on(&lpriv->timer, cpu);
++	}
++}
++#else
++static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
++{
++}
++#endif
++
++void imsic_local_sync_all(bool force_all)
+ {
+ 	struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+ 	unsigned long flags;
+ 
+ 	raw_spin_lock_irqsave(&lpriv->lock, flags);
+-	bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+-	__imsic_local_sync(lpriv);
++
++	if (force_all)
++		bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
++	if (!__imsic_local_sync(lpriv))
++		__imsic_local_timer_start(lpriv, smp_processor_id());
++
+ 	raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+ }
+ 
+@@ -190,12 +231,7 @@ void imsic_local_delivery(bool enable)
+ #ifdef CONFIG_SMP
+ static void imsic_local_timer_callback(struct timer_list *timer)
+ {
+-	struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+-	unsigned long flags;
+-
+-	raw_spin_lock_irqsave(&lpriv->lock, flags);
+-	__imsic_local_sync(lpriv);
+-	raw_spin_unlock_irqrestore(&lpriv->lock, flags);
++	imsic_local_sync_all(false);
+ }
+ 
+ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
+@@ -216,14 +252,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
+ 	 */
+ 	if (cpu_online(cpu)) {
+ 		if (cpu == smp_processor_id()) {
+-			__imsic_local_sync(lpriv);
+-			return;
++			if (__imsic_local_sync(lpriv))
++				return;
+ 		}
+ 
+-		if (!timer_pending(&lpriv->timer)) {
+-			lpriv->timer.expires = jiffies + 1;
+-			add_timer_on(&lpriv->timer, cpu);
+-		}
++		__imsic_local_timer_start(lpriv, cpu);
+ 	}
+ }
+ #else
+@@ -278,8 +311,9 @@ void imsic_vector_unmask(struct imsic_vector *vec)
+ 	raw_spin_unlock(&lpriv->lock);
+ }
+ 
+-static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
+-				     bool new_enable, struct imsic_vector *new_move)
++static bool imsic_vector_move_update(struct imsic_local_priv *lpriv,
++				     struct imsic_vector *vec, bool is_old_vec,
++				     bool new_enable, struct imsic_vector *move_vec)
+ {
+ 	unsigned long flags;
+ 	bool enabled;
+@@ -289,7 +323,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi
+ 	/* Update enable and move details */
+ 	enabled = READ_ONCE(vec->enable);
+ 	WRITE_ONCE(vec->enable, new_enable);
+-	WRITE_ONCE(vec->move, new_move);
++	if (is_old_vec)
++		WRITE_ONCE(vec->move_next, move_vec);
++	else
++		WRITE_ONCE(vec->move_prev, move_vec);
+ 
+ 	/* Mark the vector as dirty and synchronize */
+ 	bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+@@ -322,8 +359,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve
+ 	 * interrupt on the old vector while device was being moved
+ 	 * to the new vector.
+ 	 */
+-	enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
+-	imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
++	enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec);
++	imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec);
+ }
+ 
+ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+@@ -386,7 +423,8 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask
+ 	vec = &lpriv->vectors[local_id];
+ 	vec->hwirq = hwirq;
+ 	vec->enable = false;
+-	vec->move = NULL;
++	vec->move_next = NULL;
++	vec->move_prev = NULL;
+ 
+ 	return vec;
+ }
+diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
+index 391e4428082757..f02842b84ed582 100644
+--- a/drivers/irqchip/irq-riscv-imsic-state.h
++++ b/drivers/irqchip/irq-riscv-imsic-state.h
+@@ -23,7 +23,8 @@ struct imsic_vector {
+ 	unsigned int				hwirq;
+ 	/* Details accessed using local lock held */
+ 	bool					enable;
+-	struct imsic_vector			*move;
++	struct imsic_vector			*move_next;
++	struct imsic_vector			*move_prev;
+ };
+ 
+ struct imsic_local_priv {
+@@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id)
+ 	__imsic_eix_update(id, 1, false, false);
+ }
+ 
+-void imsic_local_sync_all(void);
++void imsic_local_sync_all(bool force_all);
+ void imsic_local_delivery(bool enable);
+ 
+ void imsic_vector_mask(struct imsic_vector *vec);
+@@ -87,7 +88,7 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
+ 
+ static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
+ {
+-	return READ_ONCE(vec->move);
++	return READ_ONCE(vec->move_prev);
+ }
+ 
+ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
+diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
+index e1a81e0109e8a5..c0aa34b1d0e2dc 100644
+--- a/drivers/leds/rgb/leds-pwm-multicolor.c
++++ b/drivers/leds/rgb/leds-pwm-multicolor.c
+@@ -135,8 +135,11 @@ static int led_pwm_mc_probe(struct platform_device *pdev)
+ 
+ 	/* init the multicolor's LED class device */
+ 	cdev = &priv->mc_cdev.led_cdev;
+-	fwnode_property_read_u32(mcnode, "max-brightness",
++	ret = fwnode_property_read_u32(mcnode, "max-brightness",
+ 				 &cdev->max_brightness);
++	if (ret)
++		goto release_mcnode;
++
+ 	cdev->flags = LED_CORE_SUSPENDRESUME;
+ 	cdev->brightness_set_blocking = led_pwm_mc_set;
+ 
+diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
+index 4b0863db901a9e..356a55ced2c289 100644
+--- a/drivers/leds/trigger/ledtrig-netdev.c
++++ b/drivers/leds/trigger/ledtrig-netdev.c
+@@ -68,6 +68,7 @@ struct led_netdev_data {
+ 	unsigned int last_activity;
+ 
+ 	unsigned long mode;
++	unsigned long blink_delay;
+ 	int link_speed;
+ 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_link_modes);
+ 	u8 duplex;
+@@ -86,6 +87,10 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
+ 	/* Already validated, hw control is possible with the requested mode */
+ 	if (trigger_data->hw_control) {
+ 		led_cdev->hw_control_set(led_cdev, trigger_data->mode);
++		if (led_cdev->blink_set) {
++			led_cdev->blink_set(led_cdev, &trigger_data->blink_delay,
++					    &trigger_data->blink_delay);
++		}
+ 
+ 		return;
+ 	}
+@@ -454,10 +459,11 @@ static ssize_t interval_store(struct device *dev,
+ 			      size_t size)
+ {
+ 	struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
++	struct led_classdev *led_cdev = trigger_data->led_cdev;
+ 	unsigned long value;
+ 	int ret;
+ 
+-	if (trigger_data->hw_control)
++	if (trigger_data->hw_control && !led_cdev->blink_set)
+ 		return -EINVAL;
+ 
+ 	ret = kstrtoul(buf, 0, &value);
+@@ -466,9 +472,13 @@ static ssize_t interval_store(struct device *dev,
+ 
+ 	/* impose some basic bounds on the timer interval */
+ 	if (value >= 5 && value <= 10000) {
+-		cancel_delayed_work_sync(&trigger_data->work);
++		if (trigger_data->hw_control) {
++			trigger_data->blink_delay = value;
++		} else {
++			cancel_delayed_work_sync(&trigger_data->work);
+ 
+-		atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
++			atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
++		}
+ 		set_baseline_state(trigger_data);	/* resets timer */
+ 	}
+ 
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index d3d26a2c98956c..cb174e788a96c2 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -415,11 +415,12 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ 
+ 	mutex_lock(&con_mutex);
+ 
+-	if (of_parse_phandle_with_args(dev->of_node, "mboxes",
+-				       "#mbox-cells", index, &spec)) {
++	ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
++					 index, &spec);
++	if (ret) {
+ 		dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ 		mutex_unlock(&con_mutex);
+-		return ERR_PTR(-ENODEV);
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	chan = ERR_PTR(-EPROBE_DEFER);
+diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
+index f8215a8f656a46..49254d99a8ad68 100644
+--- a/drivers/mailbox/pcc.c
++++ b/drivers/mailbox/pcc.c
+@@ -419,8 +419,12 @@ int pcc_mbox_ioremap(struct mbox_chan *chan)
+ 		return -1;
+ 	pchan_info = chan->con_priv;
+ 	pcc_mbox_chan = &pchan_info->chan;
+-	pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr,
+-				       pcc_mbox_chan->shmem_size);
++
++	pcc_mbox_chan->shmem = acpi_os_ioremap(pcc_mbox_chan->shmem_base_addr,
++					       pcc_mbox_chan->shmem_size);
++	if (!pcc_mbox_chan->shmem)
++		return -ENXIO;
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(pcc_mbox_ioremap);
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 849eb6333e980f..6aa4095dc58762 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -2899,6 +2899,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
+ 	return to_cblock(size);
+ }
+ 
++static bool can_resume(struct cache *cache)
++{
++	/*
++	 * Disallow retrying the resume operation for devices that failed the
++	 * first resume attempt, as the failure leaves the policy object partially
++	 * initialized. Retrying could trigger BUG_ON when loading cache mappings
++	 * into the incomplete policy object.
++	 */
++	if (cache->sized && !cache->loaded_mappings) {
++		if (get_cache_mode(cache) != CM_WRITE)
++			DMERR("%s: unable to resume a failed-loaded cache, please check metadata.",
++			      cache_device_name(cache));
++		else
++			DMERR("%s: unable to resume cache due to missing proper cache table reload",
++			      cache_device_name(cache));
++		return false;
++	}
++
++	return true;
++}
++
+ static bool can_resize(struct cache *cache, dm_cblock_t new_size)
+ {
+ 	if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+@@ -2947,6 +2968,9 @@ static int cache_preresume(struct dm_target *ti)
+ 	struct cache *cache = ti->private;
+ 	dm_cblock_t csize = get_cache_dev_size(cache);
+ 
++	if (!can_resume(cache))
++		return -EINVAL;
++
+ 	/*
+ 	 * Check to see if the cache has resized.
+ 	 */
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 3dc5bc3d29d64a..883f01e78324f5 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -698,6 +698,10 @@ int dm_table_add_target(struct dm_table *t, const char *type,
+ 		DMERR("%s: zero-length target", dm_device_name(t->md));
+ 		return -EINVAL;
+ 	}
++	if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) {
++		DMERR("%s: too large device", dm_device_name(t->md));
++		return -EINVAL;
++	}
+ 
+ 	ti->type = dm_get_target_type(type);
+ 	if (!ti->type) {
+diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c
+index 627adc24af3b76..053b7845d1f34d 100644
+--- a/drivers/md/dm-vdo/indexer/index-layout.c
++++ b/drivers/md/dm-vdo/indexer/index-layout.c
+@@ -54,7 +54,6 @@
+  * Each save also has a unique nonce.
+  */
+ 
+-#define MAGIC_SIZE 32
+ #define NONCE_INFO_SIZE 32
+ #define MAX_SAVES 2
+ 
+@@ -98,9 +97,11 @@ enum region_type {
+ #define SUPER_VERSION_CURRENT 3
+ #define SUPER_VERSION_MAXIMUM 7
+ 
+-static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
++static const u8 LAYOUT_MAGIC[] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
+ static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */
+ 
++#define MAGIC_SIZE (sizeof(LAYOUT_MAGIC) - 1)
++
+ struct region_header {
+ 	u64 magic;
+ 	u64 region_blocks;
+diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c
+index fff847767755a3..b897f88250d2a7 100644
+--- a/drivers/md/dm-vdo/vdo.c
++++ b/drivers/md/dm-vdo/vdo.c
+@@ -31,9 +31,7 @@
+ 
+ #include <linux/completion.h>
+ #include <linux/device-mapper.h>
+-#include <linux/kernel.h>
+ #include <linux/lz4.h>
+-#include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+@@ -142,12 +140,6 @@ static void finish_vdo_request_queue(void *ptr)
+ 	vdo_unregister_allocating_thread();
+ }
+ 
+-#ifdef MODULE
+-#define MODULE_NAME THIS_MODULE->name
+-#else
+-#define MODULE_NAME "dm-vdo"
+-#endif  /* MODULE */
+-
+ static const struct vdo_work_queue_type default_queue_type = {
+ 	.start = start_vdo_request_queue,
+ 	.finish = finish_vdo_request_queue,
+@@ -559,8 +551,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason,
+ 	*vdo_ptr = vdo;
+ 
+ 	snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix),
+-		 "%s%u", MODULE_NAME, instance);
+-	BUG_ON(vdo->thread_name_prefix[0] == '\0');
++		 "vdo%u", instance);
+ 	result = vdo_allocate(vdo->thread_config.thread_count,
+ 			      struct vdo_thread, __func__, &vdo->threads);
+ 	if (result != VDO_SUCCESS) {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 19230404d8c2bd..d29125ee9e72af 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1541,14 +1541,18 @@ static void __send_empty_flush(struct clone_info *ci)
+ {
+ 	struct dm_table *t = ci->map;
+ 	struct bio flush_bio;
++	blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
++
++	if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
++	    (REQ_IDLE | REQ_SYNC))
++		opf |= REQ_IDLE;
+ 
+ 	/*
+ 	 * Use an on-stack bio for this, it's safe since we don't
+ 	 * need to reference it after submit. It's just used as
+ 	 * the basis for the clone(s).
+ 	 */
+-	bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
+-		 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
++	bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
+ 
+ 	ci->bio = &flush_bio;
+ 	ci->sector_count = 0;
+diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
+index 819ff9f7c90fea..2a20a4fad796ca 100644
+--- a/drivers/media/i2c/adv7180.c
++++ b/drivers/media/i2c/adv7180.c
+@@ -195,6 +195,7 @@ struct adv7180_state;
+ #define ADV7180_FLAG_V2			BIT(1)
+ #define ADV7180_FLAG_MIPI_CSI2		BIT(2)
+ #define ADV7180_FLAG_I2P		BIT(3)
++#define ADV7180_FLAG_TEST_PATTERN	BIT(4)
+ 
+ struct adv7180_chip_info {
+ 	unsigned int flags;
+@@ -682,11 +683,15 @@ static int adv7180_init_controls(struct adv7180_state *state)
+ 			  ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF);
+ 	v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL);
+ 
+-	v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, &adv7180_ctrl_ops,
+-				      V4L2_CID_TEST_PATTERN,
+-				      ARRAY_SIZE(test_pattern_menu) - 1,
+-				      0, ARRAY_SIZE(test_pattern_menu) - 1,
+-				      test_pattern_menu);
++	if (state->chip_info->flags & ADV7180_FLAG_TEST_PATTERN) {
++		v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl,
++					     &adv7180_ctrl_ops,
++					     V4L2_CID_TEST_PATTERN,
++					     ARRAY_SIZE(test_pattern_menu) - 1,
++					     0,
++					     ARRAY_SIZE(test_pattern_menu) - 1,
++					     test_pattern_menu);
++	}
+ 
+ 	state->sd.ctrl_handler = &state->ctrl_hdl;
+ 	if (state->ctrl_hdl.error) {
+@@ -1221,7 +1226,7 @@ static const struct adv7180_chip_info adv7182_info = {
+ };
+ 
+ static const struct adv7180_chip_info adv7280_info = {
+-	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
++	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
+ 	.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN2) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN3) |
+@@ -1235,7 +1240,8 @@ static const struct adv7180_chip_info adv7280_info = {
+ };
+ 
+ static const struct adv7180_chip_info adv7280_m_info = {
+-	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
++	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
++		ADV7180_FLAG_TEST_PATTERN,
+ 	.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN2) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN3) |
+@@ -1256,7 +1262,8 @@ static const struct adv7180_chip_info adv7280_m_info = {
+ };
+ 
+ static const struct adv7180_chip_info adv7281_info = {
+-	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
++	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
++		ADV7180_FLAG_TEST_PATTERN,
+ 	.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN2) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN7) |
+@@ -1271,7 +1278,8 @@ static const struct adv7180_chip_info adv7281_info = {
+ };
+ 
+ static const struct adv7180_chip_info adv7281_m_info = {
+-	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
++	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
++		ADV7180_FLAG_TEST_PATTERN,
+ 	.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN2) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN3) |
+@@ -1291,7 +1299,8 @@ static const struct adv7180_chip_info adv7281_m_info = {
+ };
+ 
+ static const struct adv7180_chip_info adv7281_ma_info = {
+-	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
++	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
++		ADV7180_FLAG_TEST_PATTERN,
+ 	.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN2) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN3) |
+@@ -1316,7 +1325,7 @@ static const struct adv7180_chip_info adv7281_ma_info = {
+ };
+ 
+ static const struct adv7180_chip_info adv7282_info = {
+-	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
++	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
+ 	.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN2) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN7) |
+@@ -1331,7 +1340,8 @@ static const struct adv7180_chip_info adv7282_info = {
+ };
+ 
+ static const struct adv7180_chip_info adv7282_m_info = {
+-	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
++	.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
++		ADV7180_FLAG_TEST_PATTERN,
+ 	.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN2) |
+ 		BIT(ADV7182_INPUT_CVBS_AIN3) |
+diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
+index 906aa314b7f84c..eaa1496c71bb2e 100644
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -74,7 +74,7 @@
+ #define IMX219_REG_VTS			CCI_REG16(0x0160)
+ #define IMX219_VTS_MAX			0xffff
+ 
+-#define IMX219_VBLANK_MIN		4
++#define IMX219_VBLANK_MIN		32
+ 
+ /* HBLANK control - read only */
+ #define IMX219_PPL_DEFAULT		3448
+diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
+index fcfd1d851bd4aa..0beb80b8c45815 100644
+--- a/drivers/media/i2c/imx335.c
++++ b/drivers/media/i2c/imx335.c
+@@ -559,12 +559,14 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
+ 			imx335->vblank,
+ 			imx335->vblank + imx335->cur_mode->height);
+ 
+-		return __v4l2_ctrl_modify_range(imx335->exp_ctrl,
+-						IMX335_EXPOSURE_MIN,
+-						imx335->vblank +
+-						imx335->cur_mode->height -
+-						IMX335_EXPOSURE_OFFSET,
+-						1, IMX335_EXPOSURE_DEFAULT);
++		ret = __v4l2_ctrl_modify_range(imx335->exp_ctrl,
++					       IMX335_EXPOSURE_MIN,
++					       imx335->vblank +
++					       imx335->cur_mode->height -
++					       IMX335_EXPOSURE_OFFSET,
++					       1, IMX335_EXPOSURE_DEFAULT);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	/*
+@@ -575,6 +577,13 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
+ 		return 0;
+ 
+ 	switch (ctrl->id) {
++	case V4L2_CID_VBLANK:
++		exposure = imx335->exp_ctrl->val;
++		analog_gain = imx335->again_ctrl->val;
++
++		ret = imx335_update_exp_gain(imx335, exposure, analog_gain);
++
++		break;
+ 	case V4L2_CID_EXPOSURE:
+ 		exposure = ctrl->val;
+ 		analog_gain = imx335->again_ctrl->val;
+diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
+index 389582420ba782..048a1a381b3331 100644
+--- a/drivers/media/i2c/tc358746.c
++++ b/drivers/media/i2c/tc358746.c
+@@ -460,24 +460,20 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746)
+ 	return err;
+ }
+ 
+-/* Use MHz as base so the div needs no u64 */
+-static u32 tc358746_cfg_to_cnt(unsigned int cfg_val,
+-			       unsigned int clk_mhz,
+-			       unsigned int time_base)
++static u32 tc358746_cfg_to_cnt(unsigned long cfg_val, unsigned long clk_hz,
++			       unsigned long long time_base)
+ {
+-	return DIV_ROUND_UP(cfg_val * clk_mhz, time_base);
++	return div64_u64((u64)cfg_val * clk_hz + time_base - 1, time_base);
+ }
+ 
+-static u32 tc358746_ps_to_cnt(unsigned int cfg_val,
+-			      unsigned int clk_mhz)
++static u32 tc358746_ps_to_cnt(unsigned long cfg_val, unsigned long clk_hz)
+ {
+-	return tc358746_cfg_to_cnt(cfg_val, clk_mhz, USEC_PER_SEC);
++	return tc358746_cfg_to_cnt(cfg_val, clk_hz, PSEC_PER_SEC);
+ }
+ 
+-static u32 tc358746_us_to_cnt(unsigned int cfg_val,
+-			      unsigned int clk_mhz)
++static u32 tc358746_us_to_cnt(unsigned long cfg_val, unsigned long clk_hz)
+ {
+-	return tc358746_cfg_to_cnt(cfg_val, clk_mhz, 1);
++	return tc358746_cfg_to_cnt(cfg_val, clk_hz, USEC_PER_SEC);
+ }
+ 
+ static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
+@@ -492,7 +488,6 @@ static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
+ 
+ 	/* The hs_byte_clk is also called SYSCLK in the excel sheet */
+ 	hs_byte_clk = cfg->hs_clk_rate / 8;
+-	hs_byte_clk /= HZ_PER_MHZ;
+ 	hf_clk = hs_byte_clk / 2;
+ 
+ 	val = tc358746_us_to_cnt(cfg->init, hf_clk) - 1;
+diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
+index 858db5d4ca75c3..e51f2ed3f0315a 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid.c
++++ b/drivers/media/platform/qcom/camss/camss-csid.c
+@@ -683,11 +683,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
+ 	int ret;
+ 
+ 	if (enable) {
+-		ret = v4l2_ctrl_handler_setup(&csid->ctrls);
+-		if (ret < 0) {
+-			dev_err(csid->camss->dev,
+-				"could not sync v4l2 controls: %d\n", ret);
+-			return ret;
++		if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
++			ret = v4l2_ctrl_handler_setup(&csid->ctrls);
++			if (ret < 0) {
++				dev_err(csid->camss->dev,
++					"could not sync v4l2 controls: %d\n", ret);
++				return ret;
++			}
+ 		}
+ 
+ 		if (!csid->testgen.enabled &&
+@@ -761,7 +763,8 @@ static void csid_try_format(struct csid_device *csid,
+ 		break;
+ 
+ 	case MSM_CSID_PAD_SRC:
+-		if (csid->testgen_mode->cur.val == 0) {
++		if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
++		    csid->testgen_mode->cur.val == 0) {
+ 			/* Test generator is disabled, */
+ 			/* keep pad formats in sync */
+ 			u32 code = fmt->code;
+@@ -811,7 +814,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
+ 
+ 		code->code = csid->res->formats->formats[code->index].code;
+ 	} else {
+-		if (csid->testgen_mode->cur.val == 0) {
++		if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
++		    csid->testgen_mode->cur.val == 0) {
+ 			struct v4l2_mbus_framefmt *sink_fmt;
+ 
+ 			sink_fmt = __csid_get_format(csid, sd_state,
+@@ -1190,7 +1194,8 @@ static int csid_link_setup(struct media_entity *entity,
+ 
+ 		/* If test generator is enabled */
+ 		/* do not allow a link from CSIPHY to CSID */
+-		if (csid->testgen_mode->cur.val != 0)
++		if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED &&
++		    csid->testgen_mode->cur.val != 0)
+ 			return -EBUSY;
+ 
+ 		sd = media_entity_to_v4l2_subdev(remote->entity);
+@@ -1283,24 +1288,27 @@ int msm_csid_register_entity(struct csid_device *csid,
+ 		 MSM_CSID_NAME, csid->id);
+ 	v4l2_set_subdevdata(sd, csid);
+ 
+-	ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
+-	if (ret < 0) {
+-		dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
+-		return ret;
+-	}
++	if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
++		ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
++		if (ret < 0) {
++			dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
++			return ret;
++		}
+ 
+-	csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
+-				&csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
+-				csid->testgen.nmodes, 0, 0,
+-				csid->testgen.modes);
++		csid->testgen_mode =
++			v4l2_ctrl_new_std_menu_items(&csid->ctrls,
++						     &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
++						     csid->testgen.nmodes, 0, 0,
++						     csid->testgen.modes);
+ 
+-	if (csid->ctrls.error) {
+-		dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
+-		ret = csid->ctrls.error;
+-		goto free_ctrl;
+-	}
++		if (csid->ctrls.error) {
++			dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
++			ret = csid->ctrls.error;
++			goto free_ctrl;
++		}
+ 
+-	csid->subdev.ctrl_handler = &csid->ctrls;
++		csid->subdev.ctrl_handler = &csid->ctrls;
++	}
+ 
+ 	ret = csid_init_formats(sd, NULL);
+ 	if (ret < 0) {
+@@ -1331,7 +1339,8 @@ int msm_csid_register_entity(struct csid_device *csid,
+ media_cleanup:
+ 	media_entity_cleanup(&sd->entity);
+ free_ctrl:
+-	v4l2_ctrl_handler_free(&csid->ctrls);
++	if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
++		v4l2_ctrl_handler_free(&csid->ctrls);
+ 
+ 	return ret;
+ }
+@@ -1344,7 +1353,8 @@ void msm_csid_unregister_entity(struct csid_device *csid)
+ {
+ 	v4l2_device_unregister_subdev(&csid->subdev);
+ 	media_entity_cleanup(&csid->subdev.entity);
+-	v4l2_ctrl_handler_free(&csid->ctrls);
++	if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
++		v4l2_ctrl_handler_free(&csid->ctrls);
+ }
+ 
+ inline bool csid_is_lite(struct csid_device *csid)
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index 83c5a36d071fcc..8f6b0eccefb48f 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -398,6 +398,10 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
+ 			return sink_code;
+ 		}
+ 		break;
++	default:
++		WARN(1, "Unsupported HW version: %x\n",
++		     vfe->camss->res->version);
++		break;
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+index 67d3d6e50d2e2b..ed3a107965cc97 100644
+--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
++++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+@@ -797,13 +797,12 @@ static int c8sectpfe_probe(struct platform_device *pdev)
+ 		}
+ 		tsin->i2c_adapter =
+ 			of_find_i2c_adapter_by_node(i2c_bus);
++		of_node_put(i2c_bus);
+ 		if (!tsin->i2c_adapter) {
+ 			dev_err(&pdev->dev, "No i2c adapter found\n");
+-			of_node_put(i2c_bus);
+ 			ret = -ENODEV;
+ 			goto err_node_put;
+ 		}
+-		of_node_put(i2c_bus);
+ 
+ 		/* Acquire reset GPIO and activate it */
+ 		tsin->rst_gpio = devm_fwnode_gpiod_get(dev,
+diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
+index 669bd96da4c795..273e8ed8c2a908 100644
+--- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
+@@ -789,9 +789,14 @@ static int vivid_thread_vid_cap(void *data)
+ 			next_jiffies_since_start = jiffies_since_start;
+ 
+ 		wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+-		while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
+-		       !kthread_should_stop())
+-			schedule();
++		if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
++			continue;
++
++		wait_queue_head_t wait;
++
++		init_waitqueue_head(&wait);
++		wait_event_interruptible_timeout(wait, kthread_should_stop(),
++					cur_jiffies + wait_jiffies - jiffies);
+ 	}
+ 	dprintk(dev, 1, "Video Capture Thread End\n");
+ 	return 0;
+diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
+index fac6208b51da84..015a7b166a1e61 100644
+--- a/drivers/media/test-drivers/vivid/vivid-kthread-out.c
++++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
+@@ -235,9 +235,14 @@ static int vivid_thread_vid_out(void *data)
+ 			next_jiffies_since_start = jiffies_since_start;
+ 
+ 		wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+-		while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
+-		       !kthread_should_stop())
+-			schedule();
++		if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
++			continue;
++
++		wait_queue_head_t wait;
++
++		init_waitqueue_head(&wait);
++		wait_event_interruptible_timeout(wait, kthread_should_stop(),
++					cur_jiffies + wait_jiffies - jiffies);
+ 	}
+ 	dprintk(dev, 1, "Video Output Thread End\n");
+ 	return 0;
+diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
+index fa711ee36a3fbc..c862689786b69c 100644
+--- a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
++++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
+@@ -135,9 +135,14 @@ static int vivid_thread_touch_cap(void *data)
+ 			next_jiffies_since_start = jiffies_since_start;
+ 
+ 		wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+-		while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
+-		       !kthread_should_stop())
+-			schedule();
++		if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
++			continue;
++
++		wait_queue_head_t wait;
++
++		init_waitqueue_head(&wait);
++		wait_event_interruptible_timeout(wait, kthread_should_stop(),
++					cur_jiffies + wait_jiffies - jiffies);
+ 	}
+ 	dprintk(dev, 1, "Touch Capture Thread End\n");
+ 	return 0;
+diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+index 38cda33dffb2ab..97cfc58b70571e 100644
+--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+@@ -206,9 +206,14 @@ static int vivid_thread_sdr_cap(void *data)
+ 			next_jiffies_since_start = jiffies_since_start;
+ 
+ 		wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+-		while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
+-		       !kthread_should_stop())
+-			schedule();
++		if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
++			continue;
++
++		wait_queue_head_t wait;
++
++		init_waitqueue_head(&wait);
++		wait_event_interruptible_timeout(wait, kthread_should_stop(),
++					cur_jiffies + wait_jiffies - jiffies);
+ 	}
+ 	dprintk(dev, 1, "SDR Capture Thread End\n");
+ 	return 0;
+diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
+index abb967c8bd352c..6cb130eb32d582 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-417.c
++++ b/drivers/media/usb/cx231xx/cx231xx-417.c
+@@ -1722,6 +1722,8 @@ static void cx231xx_video_dev_init(
+ 	vfd->lock = &dev->lock;
+ 	vfd->release = video_device_release_empty;
+ 	vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl;
++	vfd->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
++			   V4L2_CAP_VIDEO_CAPTURE;
+ 	video_set_drvdata(vfd, dev);
+ 	if (dev->tuner_type == TUNER_ABSENT) {
+ 		v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY);
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 4837d8df9c0386..58d1bc80253e87 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -862,6 +862,25 @@ static inline void uvc_clear_bit(u8 *data, int bit)
+ 	data[bit >> 3] &= ~(1 << (bit & 7));
+ }
+ 
++static s32 uvc_menu_to_v4l2_menu(struct uvc_control_mapping *mapping, s32 val)
++{
++	unsigned int i;
++
++	for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
++		u32 menu_value;
++
++		if (!test_bit(i, &mapping->menu_mask))
++			continue;
++
++		menu_value = uvc_mapping_get_menu_value(mapping, i);
++
++		if (menu_value == val)
++			return i;
++	}
++
++	return val;
++}
++
+ /*
+  * Extract the bit string specified by mapping->offset and mapping->size
+  * from the little-endian data stored at 'data' and return the result as
+@@ -896,6 +915,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
+ 	if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
+ 		value |= -(value & (1 << (mapping->size - 1)));
+ 
++	/* If it is a menu, convert from uvc to v4l2. */
++	if (mapping->v4l2_type != V4L2_CTRL_TYPE_MENU)
++		return value;
++
++	switch (query) {
++	case UVC_GET_CUR:
++	case UVC_GET_DEF:
++		return uvc_menu_to_v4l2_menu(mapping, value);
++	}
++
+ 	return value;
+ }
+ 
+@@ -1060,32 +1089,6 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
+ 	return 0;
+ }
+ 
+-static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
+-				const u8 *data)
+-{
+-	s32 value = mapping->get(mapping, UVC_GET_CUR, data);
+-
+-	if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
+-		unsigned int i;
+-
+-		for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+-			u32 menu_value;
+-
+-			if (!test_bit(i, &mapping->menu_mask))
+-				continue;
+-
+-			menu_value = uvc_mapping_get_menu_value(mapping, i);
+-
+-			if (menu_value == value) {
+-				value = i;
+-				break;
+-			}
+-		}
+-	}
+-
+-	return value;
+-}
+-
+ static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain,
+ 			       struct uvc_control *ctrl)
+ {
+@@ -1136,8 +1139,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	*value = __uvc_ctrl_get_value(mapping,
+-				uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
++	*value = mapping->get(mapping, UVC_GET_CUR,
++			      uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ 
+ 	return 0;
+ }
+@@ -1287,7 +1290,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ {
+ 	struct uvc_control_mapping *master_map = NULL;
+ 	struct uvc_control *master_ctrl = NULL;
+-	unsigned int i;
+ 
+ 	memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
+ 	v4l2_ctrl->id = mapping->id;
+@@ -1330,21 +1332,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ 		v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
+ 		v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
+ 		v4l2_ctrl->step = 1;
+-
+-		for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+-			u32 menu_value;
+-
+-			if (!test_bit(i, &mapping->menu_mask))
+-				continue;
+-
+-			menu_value = uvc_mapping_get_menu_value(mapping, i);
+-
+-			if (menu_value == v4l2_ctrl->default_value) {
+-				v4l2_ctrl->default_value = i;
+-				break;
+-			}
+-		}
+-
+ 		return 0;
+ 
+ 	case V4L2_CTRL_TYPE_BOOLEAN:
+@@ -1627,7 +1614,7 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+ 		uvc_ctrl_set_handle(handle, ctrl, NULL);
+ 
+ 	list_for_each_entry(mapping, &ctrl->info.mappings, list) {
+-		s32 value = __uvc_ctrl_get_value(mapping, data);
++		s32 value = mapping->get(mapping, UVC_GET_CUR, data);
+ 
+ 		/*
+ 		 * handle may be NULL here if the device sends auto-update
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 7bcd706281daf3..cb7d9fb589fca9 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -106,6 +106,12 @@ static int uvc_ioctl_xu_ctrl_map(struct uvc_video_chain *chain,
+ 	struct uvc_control_mapping *map;
+ 	int ret;
+ 
++	if (xmap->data_type > UVC_CTRL_DATA_TYPE_BITMASK) {
++		uvc_dbg(chain->dev, CONTROL,
++			"Unsupported UVC data type %u\n", xmap->data_type);
++		return -EINVAL;
++	}
++
+ 	map = kzalloc(sizeof(*map), GFP_KERNEL);
+ 	if (map == NULL)
+ 		return -ENOMEM;
+diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
+index 3a4ba08810d249..1193852bad599b 100644
+--- a/drivers/media/v4l2-core/v4l2-subdev.c
++++ b/drivers/media/v4l2-core/v4l2-subdev.c
+@@ -439,6 +439,8 @@ static int call_enum_dv_timings(struct v4l2_subdev *sd,
+ static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
+ 				struct v4l2_mbus_config *config)
+ {
++	memset(config, 0, sizeof(*config));
++
+ 	return check_pad(sd, pad) ? :
+ 	       sd->ops->pad->get_mbus_config(sd, pad, config);
+ }
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
+index 4051551757f2dc..3780929039710c 100644
+--- a/drivers/mfd/axp20x.c
++++ b/drivers/mfd/axp20x.c
+@@ -214,6 +214,7 @@ static const struct regmap_range axp717_writeable_ranges[] = {
+ 	regmap_reg_range(AXP717_VSYS_V_POWEROFF, AXP717_VSYS_V_POWEROFF),
+ 	regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
+ 	regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
++	regmap_reg_range(AXP717_TS_PIN_CFG, AXP717_TS_PIN_CFG),
+ 	regmap_reg_range(AXP717_ICC_CHG_SET, AXP717_CV_CHG_SET),
+ 	regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
+ 	regmap_reg_range(AXP717_ADC_CH_EN_CONTROL, AXP717_ADC_CH_EN_CONTROL),
+diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
+index 57ff5cb294a664..d3b77abec786e3 100644
+--- a/drivers/mfd/tps65219.c
++++ b/drivers/mfd/tps65219.c
+@@ -228,7 +228,6 @@ static const struct regmap_irq_chip tps65219_irq_chip = {
+ static int tps65219_probe(struct i2c_client *client)
+ {
+ 	struct tps65219 *tps;
+-	unsigned int chipid;
+ 	bool pwr_button;
+ 	int ret;
+ 
+@@ -253,12 +252,6 @@ static int tps65219_probe(struct i2c_client *client)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = regmap_read(tps->regmap, TPS65219_REG_TI_DEV_ID, &chipid);
+-	if (ret) {
+-		dev_err(tps->dev, "Failed to read device ID: %d\n", ret);
+-		return ret;
+-	}
+-
+ 	ret = devm_mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO,
+ 				   tps65219_cells, ARRAY_SIZE(tps65219_cells),
+ 				   NULL, 0, regmap_irq_get_domain(tps->irq_data));
+diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
+index 89224d4af4a201..e13f9fdd9d7b1c 100644
+--- a/drivers/misc/eeprom/ee1004.c
++++ b/drivers/misc/eeprom/ee1004.c
+@@ -304,6 +304,10 @@ static int ee1004_probe(struct i2c_client *client)
+ 				     I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ 		return -EPFNOSUPPORT;
+ 
++	err = i2c_smbus_read_byte(client);
++	if (err < 0)
++		return -ENODEV;
++
+ 	mutex_lock(&ee1004_bus_lock);
+ 
+ 	err = ee1004_init_bus_data(client);
+diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
+index eb51fbe8d92fb4..ad7c7f1573191f 100644
+--- a/drivers/misc/mei/vsc-tp.c
++++ b/drivers/misc/mei/vsc-tp.c
+@@ -71,8 +71,8 @@ struct vsc_tp {
+ 	u32 seq;
+ 
+ 	/* command buffer */
+-	void *tx_buf;
+-	void *rx_buf;
++	struct vsc_tp_packet *tx_buf;
++	struct vsc_tp_packet *rx_buf;
+ 
+ 	atomic_t assert_cnt;
+ 	wait_queue_head_t xfer_wait;
+@@ -164,7 +164,7 @@ static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
+ {
+ 	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr);
+ 	int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
+-	u8 *src, *crc_src, *rx_buf = tp->rx_buf;
++	u8 *src, *crc_src, *rx_buf = (u8 *)tp->rx_buf;
+ 	int count_down = VSC_TP_MAX_XFER_COUNT;
+ 	u32 recv_crc = 0, crc = ~0;
+ 	struct vsc_tp_packet_hdr ack;
+@@ -324,7 +324,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
+ 	guard(mutex)(&tp->mutex);
+ 
+ 	/* rom xfer is big endian */
+-	cpu_to_be32_array(tp->tx_buf, obuf, words);
++	cpu_to_be32_array((u32 *)tp->tx_buf, obuf, words);
+ 
+ 	ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
+ 				!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
+@@ -340,7 +340,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
+ 		return ret;
+ 
+ 	if (ibuf)
+-		be32_to_cpu_array(ibuf, tp->rx_buf, words);
++		be32_to_cpu_array(ibuf, (u32 *)tp->rx_buf, words);
+ 
+ 	return ret;
+ }
+@@ -496,11 +496,11 @@ static int vsc_tp_probe(struct spi_device *spi)
+ 	if (!tp)
+ 		return -ENOMEM;
+ 
+-	tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
++	tp->tx_buf = devm_kzalloc(dev, sizeof(*tp->tx_buf), GFP_KERNEL);
+ 	if (!tp->tx_buf)
+ 		return -ENOMEM;
+ 
+-	tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
++	tp->rx_buf = devm_kzalloc(dev, sizeof(*tp->rx_buf), GFP_KERNEL);
+ 	if (!tp->rx_buf)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index e22afb420d099e..f05256b7c2084c 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -287,11 +287,13 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
+ 	void *read_buf __free(kfree) = NULL;
+ 	struct pci_dev *pdev = test->pdev;
+ 
++	bar_size = pci_resource_len(pdev, barno);
++	if (!bar_size)
++		return -ENODATA;
++
+ 	if (!test->bar[barno])
+ 		return false;
+ 
+-	bar_size = pci_resource_len(pdev, barno);
+-
+ 	if (barno == test->test_reg_bar)
+ 		bar_size = 0x4;
+ 
+diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
+index 6dc057718d2cb8..89682f10e69f3a 100644
+--- a/drivers/mmc/host/dw_mmc-exynos.c
++++ b/drivers/mmc/host/dw_mmc-exynos.c
+@@ -27,6 +27,8 @@ enum dw_mci_exynos_type {
+ 	DW_MCI_TYPE_EXYNOS5420_SMU,
+ 	DW_MCI_TYPE_EXYNOS7,
+ 	DW_MCI_TYPE_EXYNOS7_SMU,
++	DW_MCI_TYPE_EXYNOS7870,
++	DW_MCI_TYPE_EXYNOS7870_SMU,
+ 	DW_MCI_TYPE_ARTPEC8,
+ };
+ 
+@@ -69,6 +71,12 @@ static struct dw_mci_exynos_compatible {
+ 	}, {
+ 		.compatible	= "samsung,exynos7-dw-mshc-smu",
+ 		.ctrl_type	= DW_MCI_TYPE_EXYNOS7_SMU,
++	}, {
++		.compatible	= "samsung,exynos7870-dw-mshc",
++		.ctrl_type	= DW_MCI_TYPE_EXYNOS7870,
++	}, {
++		.compatible	= "samsung,exynos7870-dw-mshc-smu",
++		.ctrl_type	= DW_MCI_TYPE_EXYNOS7870_SMU,
+ 	}, {
+ 		.compatible	= "axis,artpec8-dw-mshc",
+ 		.ctrl_type	= DW_MCI_TYPE_ARTPEC8,
+@@ -85,6 +93,8 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
+ 		return EXYNOS4210_FIXED_CIU_CLK_DIV;
+ 	else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 			priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++			priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++			priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 			priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
+ 	else
+@@ -100,7 +110,8 @@ static void dw_mci_exynos_config_smu(struct dw_mci *host)
+ 	 * set for non-ecryption mode at this time.
+ 	 */
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
+-		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
+ 		mci_writel(host, MPSBEGIN0, 0);
+ 		mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
+ 		mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
+@@ -126,6 +137,12 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
+ 				DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
+ 	}
+ 
++	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
++		/* Quirk needed for certain Exynos SoCs */
++		host->quirks |= DW_MMC_QUIRK_FIFO64_32;
++	}
++
+ 	if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) {
+ 		/* Quirk needed for the ARTPEC-8 SoC */
+ 		host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT;
+@@ -143,6 +160,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
+ 
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 		priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		clksel = mci_readl(host, CLKSEL64);
+ 	else
+@@ -152,6 +171,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
+ 
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 		priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		mci_writel(host, CLKSEL64, clksel);
+ 	else
+@@ -222,6 +243,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
+ 
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 		priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		clksel = mci_readl(host, CLKSEL64);
+ 	else
+@@ -230,6 +253,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
+ 	if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
+ 		if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 			priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++			priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++			priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 			priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 			mci_writel(host, CLKSEL64, clksel);
+ 		else
+@@ -409,6 +434,8 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
+ 
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 		priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
+ 	else
+@@ -422,6 +449,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
+ 
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 		priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		clksel = mci_readl(host, CLKSEL64);
+ 	else
+@@ -429,6 +458,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
+ 	clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 		priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		mci_writel(host, CLKSEL64, clksel);
+ 	else
+@@ -443,6 +474,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
+ 
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 		priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		clksel = mci_readl(host, CLKSEL64);
+ 	else
+@@ -453,6 +486,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
+ 
+ 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
++		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
+ 		priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
+ 		mci_writel(host, CLKSEL64, clksel);
+ 	else
+@@ -632,6 +667,10 @@ static const struct of_device_id dw_mci_exynos_match[] = {
+ 			.data = &exynos_drv_data, },
+ 	{ .compatible = "samsung,exynos7-dw-mshc-smu",
+ 			.data = &exynos_drv_data, },
++	{ .compatible = "samsung,exynos7870-dw-mshc",
++			.data = &exynos_drv_data, },
++	{ .compatible = "samsung,exynos7870-dw-mshc-smu",
++			.data = &exynos_drv_data, },
+ 	{ .compatible = "axis,artpec8-dw-mshc",
+ 			.data = &artpec_drv_data, },
+ 	{},
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 2e2e15e2d8fb8b..b0b1d403f35276 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -609,8 +609,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+ 
+ 	sdhci_set_power(host, mode, vdd);
+ 
+-	if (mode == MMC_POWER_OFF)
++	if (mode == MMC_POWER_OFF) {
++		if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
++		    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD)
++			usleep_range(15000, 17500);
+ 		return;
++	}
+ 
+ 	/*
+ 	 * Bus power might not enable after D3 -> D0 transition due to the
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 4b91c9e9663575..8ae76300d157d0 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2035,10 +2035,15 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+ 
+ 	host->mmc->actual_clock = 0;
+ 
+-	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
++	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++	if (clk & SDHCI_CLOCK_CARD_EN)
++		sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN,
++			SDHCI_CLOCK_CONTROL);
+ 
+-	if (clock == 0)
++	if (clock == 0) {
++		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ 		return;
++	}
+ 
+ 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ 	sdhci_enable_clk(host, clk);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 4d73abae503d1e..4d2e30f4ee2507 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2542,7 +2542,7 @@ static int __bond_release_one(struct net_device *bond_dev,
+ 
+ 	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
+ 
+-	if (!all && (!bond->params.fail_over_mac ||
++	if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
+ 		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
+ 		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
+ 		    bond_has_slaves(bond))
+diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
+index 399844809bbeaa..bb6071a758f365 100644
+--- a/drivers/net/can/c_can/c_can_platform.c
++++ b/drivers/net/can/c_can/c_can_platform.c
+@@ -324,7 +324,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
+ 		/* Check if we need custom RAMINIT via syscon. Mostly for TI
+ 		 * platforms. Only supported with DT boot.
+ 		 */
+-		if (np && of_property_read_bool(np, "syscon-raminit")) {
++		if (np && of_property_present(np, "syscon-raminit")) {
+ 			u32 id;
+ 			struct c_can_raminit *raminit = &priv->raminit_sys;
+ 
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index fee012b57f3371..020e5897812fed 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -16,6 +16,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/pci.h>
+ #include <linux/timer.h>
++#include <net/netdev_queues.h>
+ 
+ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
+@@ -410,10 +411,13 @@ struct kvaser_pciefd_can {
+ 	void __iomem *reg_base;
+ 	struct can_berr_counter bec;
+ 	u8 cmd_seq;
++	u8 tx_max_count;
++	u8 tx_idx;
++	u8 ack_idx;
+ 	int err_rep_cnt;
+-	int echo_idx;
++	unsigned int completed_tx_pkts;
++	unsigned int completed_tx_bytes;
+ 	spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
+-	spinlock_t echo_lock; /* Locks the message echo buffer */
+ 	struct timer_list bec_poll_timer;
+ 	struct completion start_comp, flush_comp;
+ };
+@@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
+ 	int ret;
+ 	struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ 
++	can->tx_idx = 0;
++	can->ack_idx = 0;
++
+ 	ret = open_candev(netdev);
+ 	if (ret)
+ 		return ret;
+@@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
+ 		del_timer(&can->bec_poll_timer);
+ 	}
+ 	can->can.state = CAN_STATE_STOPPED;
++	netdev_reset_queue(netdev);
+ 	close_candev(netdev);
+ 
+ 	return ret;
+ }
+ 
++static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
++{
++	return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
++}
++
+ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
+-					   struct kvaser_pciefd_can *can,
++					   struct can_priv *can, u8 seq,
+ 					   struct sk_buff *skb)
+ {
+ 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ 	int packet_size;
+-	int seq = can->echo_idx;
+ 
+ 	memset(p, 0, sizeof(*p));
+-	if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
++	if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ 		p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
+ 
+ 	if (cf->can_id & CAN_RTR_FLAG)
+@@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
+ 	} else {
+ 		p->header[1] |=
+ 			FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
+-				   can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
++				   can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
+ 	}
+ 
+ 	p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
+@@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
+ 					    struct net_device *netdev)
+ {
+ 	struct kvaser_pciefd_can *can = netdev_priv(netdev);
+-	unsigned long irq_flags;
+ 	struct kvaser_pciefd_tx_packet packet;
++	unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
++	unsigned int frame_len;
+ 	int nr_words;
+-	u8 count;
+ 
+ 	if (can_dev_dropped_skb(netdev, skb))
+ 		return NETDEV_TX_OK;
++	if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
++		return NETDEV_TX_BUSY;
+ 
+-	nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
++	nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
+ 
+-	spin_lock_irqsave(&can->echo_lock, irq_flags);
+ 	/* Prepare and save echo skb in internal slot */
+-	can_put_echo_skb(skb, netdev, can->echo_idx, 0);
+-
+-	/* Move echo index to the next slot */
+-	can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
++	WRITE_ONCE(can->can.echo_skb[seq], NULL);
++	frame_len = can_skb_get_frame_len(skb);
++	can_put_echo_skb(skb, netdev, seq, frame_len);
++	netdev_sent_queue(netdev, frame_len);
++	WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
+ 
+ 	/* Write header to fifo */
+ 	iowrite32(packet.header[0],
+@@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
+ 			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
+ 	}
+ 
+-	count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
+-			  ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+-	/* No room for a new message, stop the queue until at least one
+-	 * successful transmit
+-	 */
+-	if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
+-		netif_stop_queue(netdev);
+-	spin_unlock_irqrestore(&can->echo_lock, irq_flags);
++	netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
+ 
+ 	return NETDEV_TX_OK;
+ }
+@@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+ 		can->kv_pcie = pcie;
+ 		can->cmd_seq = 0;
+ 		can->err_rep_cnt = 0;
++		can->completed_tx_pkts = 0;
++		can->completed_tx_bytes = 0;
+ 		can->bec.txerr = 0;
+ 		can->bec.rxerr = 0;
+ 
+@@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+ 		tx_nr_packets_max =
+ 			FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
+ 				  ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
++		can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
+ 
+ 		can->can.clock.freq = pcie->freq;
+-		can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
+-		can->echo_idx = 0;
+-		spin_lock_init(&can->echo_lock);
++		can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
+ 		spin_lock_init(&can->lock);
+ 
+ 		can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
+@@ -1200,7 +1208,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ 		skb = alloc_canfd_skb(priv->dev, &cf);
+ 		if (!skb) {
+ 			priv->dev->stats.rx_dropped++;
+-			return -ENOMEM;
++			return 0;
+ 		}
+ 
+ 		cf->len = can_fd_dlc2len(dlc);
+@@ -1212,7 +1220,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ 		skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
+ 		if (!skb) {
+ 			priv->dev->stats.rx_dropped++;
+-			return -ENOMEM;
++			return 0;
+ 		}
+ 		can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
+ 	}
+@@ -1230,7 +1238,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ 	priv->dev->stats.rx_packets++;
+ 	kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+ 
+-	return netif_rx(skb);
++	netif_rx(skb);
++
++	return 0;
+ }
+ 
+ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+@@ -1507,19 +1517,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
+ 		netdev_dbg(can->can.dev, "Packet was flushed\n");
+ 	} else {
+ 		int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
+-		int len;
+-		u8 count;
++		unsigned int len, frame_len = 0;
+ 		struct sk_buff *skb;
+ 
++		if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
++			return 0;
+ 		skb = can->can.echo_skb[echo_idx];
+-		if (skb)
+-			kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+-		len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
+-		count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
+-				  ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
++		if (!skb)
++			return 0;
++		kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
++		len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
+ 
+-		if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
+-			netif_wake_queue(can->can.dev);
++		/* Pairs with barrier in kvaser_pciefd_start_xmit() */
++		smp_store_release(&can->ack_idx, can->ack_idx + 1);
++		can->completed_tx_pkts++;
++		can->completed_tx_bytes += frame_len;
+ 
+ 		if (!one_shot_fail) {
+ 			can->can.dev->stats.tx_bytes += len;
+@@ -1635,11 +1647,26 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
+ {
+ 	int pos = 0;
+ 	int res = 0;
++	unsigned int i;
+ 
+ 	do {
+ 		res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
+ 	} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ 
++	/* Report ACKs in this buffer to BQL en masse for correct periods */
++	for (i = 0; i < pcie->nr_channels; ++i) {
++		struct kvaser_pciefd_can *can = pcie->can[i];
++
++		if (!can->completed_tx_pkts)
++			continue;
++		netif_subqueue_completed_wake(can->can.dev, 0,
++					      can->completed_tx_pkts,
++					      can->completed_tx_bytes,
++					      kvaser_pciefd_tx_avail(can), 1);
++		can->completed_tx_pkts = 0;
++		can->completed_tx_bytes = 0;
++	}
++
+ 	return res;
+ }
+ 
+diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
+index 24c6622d36bd85..58ff2ec1d9757e 100644
+--- a/drivers/net/can/slcan/slcan-core.c
++++ b/drivers/net/can/slcan/slcan-core.c
+@@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
+ #define SLCAN_CMD_LEN 1
+ #define SLCAN_SFF_ID_LEN 3
+ #define SLCAN_EFF_ID_LEN 8
++#define SLCAN_DATA_LENGTH_LEN 1
++#define SLCAN_ERROR_LEN 1
+ #define SLCAN_STATE_LEN 1
+ #define SLCAN_STATE_BE_RXCNT_LEN 3
+ #define SLCAN_STATE_BE_TXCNT_LEN 3
+-#define SLCAN_STATE_FRAME_LEN       (1 + SLCAN_CMD_LEN + \
+-				     SLCAN_STATE_BE_RXCNT_LEN + \
+-				     SLCAN_STATE_BE_TXCNT_LEN)
++#define SLCAN_STATE_MSG_LEN     (SLCAN_CMD_LEN +		\
++                                 SLCAN_STATE_LEN +		\
++                                 SLCAN_STATE_BE_RXCNT_LEN +	\
++                                 SLCAN_STATE_BE_TXCNT_LEN)
++#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN +	\
++                                 SLCAN_ERROR_LEN +	\
++                                 SLCAN_DATA_LENGTH_LEN)
++#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN +	\
++                                 SLCAN_SFF_ID_LEN +	\
++                                 SLCAN_DATA_LENGTH_LEN)
+ struct slcan {
+ 	struct can_priv         can;
+ 
+@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl)
+ 	u32 tmpid;
+ 	char *cmd = sl->rbuff;
+ 
++	if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
++		return;
++
+ 	skb = alloc_can_skb(sl->dev, &cf);
+ 	if (unlikely(!skb)) {
+ 		sl->dev->stats.rx_dropped++;
+@@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl)
+ 		return;
+ 	}
+ 
+-	if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
++	if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
+ 		return;
+ 
+ 	cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
+@@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl)
+ 	bool rx_errors = false, tx_errors = false, rx_over_errors = false;
+ 	int i, len;
+ 
++	if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
++		return;
++
+ 	/* get len from sanitized ASCII value */
+ 	len = cmd[1];
+ 	if (len >= '0' && len < '9')
+@@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl)
+ static void slcan_unesc(struct slcan *sl, unsigned char s)
+ {
+ 	if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+-		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+-		    sl->rcount > 4)
++		if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
+ 			slcan_bump(sl);
+ 
+ 		sl->rcount = 0;
+diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
+index 9e90c238149104..68335935cea778 100644
+--- a/drivers/net/ethernet/apm/xgene-v2/main.c
++++ b/drivers/net/ethernet/apm/xgene-v2/main.c
+@@ -9,8 +9,6 @@
+ 
+ #include "main.h"
+ 
+-static const struct acpi_device_id xge_acpi_match[];
+-
+ static int xge_get_resources(struct xge_pdata *pdata)
+ {
+ 	struct platform_device *pdev;
+@@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
+ static struct platform_driver xge_driver = {
+ 	.driver = {
+ 		   .name = "xgene-enet-v2",
+-		   .acpi_match_table = ACPI_PTR(xge_acpi_match),
++		   .acpi_match_table = xge_acpi_match,
+ 	},
+ 	.probe = xge_probe,
+ 	.remove_new = xge_remove,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 12b61a6fcda428..2bb1fce350dbb1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -11783,6 +11783,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+ 	struct hwrm_func_drv_if_change_input *req;
+ 	bool fw_reset = !bp->irq_tbl;
+ 	bool resc_reinit = false;
++	bool caps_change = false;
+ 	int rc, retry = 0;
+ 	u32 flags = 0;
+ 
+@@ -11838,8 +11839,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+ 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ 		return -ENODEV;
+ 	}
+-	if (resc_reinit || fw_reset) {
+-		if (fw_reset) {
++	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
++		caps_change = true;
++
++	if (resc_reinit || fw_reset || caps_change) {
++		if (fw_reset || caps_change) {
+ 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ 				bnxt_ulp_irq_stop(bp);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index f662a5d54986cf..d8272b7a55fcb0 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1572,6 +1572,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
+ 	}
+ }
+ 
++static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
++				 int rx_ring_last)
++{
++	while (rx_ring_first != rx_ring_last) {
++		enetc_flip_rx_buff(rx_ring,
++				   &rx_ring->rx_swbd[rx_ring_first]);
++		enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
++	}
++}
++
+ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ 				   struct napi_struct *napi, int work_limit,
+ 				   struct bpf_prog *prog)
+@@ -1687,11 +1697,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ 				enetc_xdp_drop(rx_ring, orig_i, i);
+ 				rx_ring->stats.xdp_redirect_failures++;
+ 			} else {
+-				while (orig_i != i) {
+-					enetc_flip_rx_buff(rx_ring,
+-							   &rx_ring->rx_swbd[orig_i]);
+-					enetc_bdr_idx_inc(rx_ring, &orig_i);
+-				}
++				enetc_bulk_flip_buff(rx_ring, orig_i, i);
+ 				xdp_redirect_frm_cnt++;
+ 				rx_ring->stats.xdp_redirect++;
+ 			}
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 04906897615d87..479ced24096b80 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1098,6 +1098,29 @@ static void fec_enet_enable_ring(struct net_device *ndev)
+ 	}
+ }
+ 
++/* Whack a reset.  We should wait for this.
++ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
++ * instead of reset MAC itself.
++ */
++static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
++{
++	u32 val;
++
++	if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
++		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
++		    ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
++			writel(0, fep->hwp + FEC_ECNTRL);
++		} else {
++			writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
++			udelay(10);
++		}
++	} else {
++		val = readl(fep->hwp + FEC_ECNTRL);
++		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
++		writel(val, fep->hwp + FEC_ECNTRL);
++	}
++}
++
+ /*
+  * This function is called to start or restart the FEC during a link
+  * change, transmit timeout, or to reconfigure the FEC.  The network
+@@ -1114,17 +1137,7 @@ fec_restart(struct net_device *ndev)
+ 	if (fep->bufdesc_ex)
+ 		fec_ptp_save_state(fep);
+ 
+-	/* Whack a reset.  We should wait for this.
+-	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+-	 * instead of reset MAC itself.
+-	 */
+-	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+-	    ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+-		writel(0, fep->hwp + FEC_ECNTRL);
+-	} else {
+-		writel(1, fep->hwp + FEC_ECNTRL);
+-		udelay(10);
+-	}
++	fec_ctrl_reset(fep, false);
+ 
+ 	/*
+ 	 * enet-mac reset will reset mac address registers too,
+@@ -1378,22 +1391,7 @@ fec_stop(struct net_device *ndev)
+ 	if (fep->bufdesc_ex)
+ 		fec_ptp_save_state(fep);
+ 
+-	/* Whack a reset.  We should wait for this.
+-	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+-	 * instead of reset MAC itself.
+-	 */
+-	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+-		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+-			writel(0, fep->hwp + FEC_ECNTRL);
+-		} else {
+-			writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+-			udelay(10);
+-		}
+-	} else {
+-		val = readl(fep->hwp + FEC_ECNTRL);
+-		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+-		writel(val, fep->hwp + FEC_ECNTRL);
+-	}
++	fec_ctrl_reset(fep, true);
+ 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 7d1feeb317be34..2a2acbeb572214 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3817,8 +3817,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
+ 	ice_for_each_q_vector(vsi, q_idx) {
+ 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+ 
+-		if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
+-			combined++;
++		combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
+ 	}
+ 
+ 	return combined;
+diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
+index ad82ff7d199570..09f9c7ba52795b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_irq.c
++++ b/drivers/net/ethernet/intel/ice/ice_irq.c
+@@ -45,7 +45,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
+ /**
+  * ice_get_irq_res - get an interrupt resource
+  * @pf: board private structure
+- * @dyn_only: force entry to be dynamically allocated
++ * @dyn_allowed: allow entry to be dynamically allocated
+  *
+  * Allocate new irq entry in the free slot of the tracker. Since xarray
+  * is used, always allocate new entry at the lowest possible index. Set
+@@ -53,11 +53,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
+  *
+  * Returns allocated irq entry or NULL on failure.
+  */
+-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
++static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
++					     bool dyn_allowed)
+ {
+-	struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
++	struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
+ 				  .min = 0 };
+-	unsigned int num_static = pf->irq_tracker.num_static;
++	unsigned int num_static = pf->irq_tracker.num_static - 1;
+ 	struct ice_irq_entry *entry;
+ 	unsigned int index;
+ 	int ret;
+@@ -66,9 +67,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+ 	if (!entry)
+ 		return NULL;
+ 
+-	/* skip preallocated entries if the caller says so */
+-	if (dyn_only)
+-		limit.min = num_static;
++	/* only already allocated if the caller says so */
++	if (!dyn_allowed)
++		limit.max = num_static;
+ 
+ 	ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
+ 		       GFP_KERNEL);
+@@ -78,7 +79,7 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+ 		entry = NULL;
+ 	} else {
+ 		entry->index = index;
+-		entry->dynamic = index >= num_static;
++		entry->dynamic = index > num_static;
+ 	}
+ 
+ 	return entry;
+@@ -272,7 +273,7 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
+ /**
+  * ice_alloc_irq - Allocate new interrupt vector
+  * @pf: board private structure
+- * @dyn_only: force dynamic allocation of the interrupt
++ * @dyn_allowed: allow dynamic allocation of the interrupt
+  *
+  * Allocate new interrupt vector for a given owner id.
+  * return struct msi_map with interrupt details and track
+@@ -285,20 +286,20 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
+  * interrupt will be allocated with pci_msix_alloc_irq_at.
+  *
+  * Some callers may only support dynamically allocated interrupts.
+- * This is indicated with dyn_only flag.
++ * This is indicated with dyn_allowed flag.
+  *
+  * On failure, return map with negative .index. The caller
+  * is expected to check returned map index.
+  *
+  */
+-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
++struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
+ {
+ 	int sriov_base_vector = pf->sriov_base_vector;
+ 	struct msi_map map = { .index = -ENOENT };
+ 	struct device *dev = ice_pf_to_dev(pf);
+ 	struct ice_irq_entry *entry;
+ 
+-	entry = ice_get_irq_res(pf, dyn_only);
++	entry = ice_get_irq_res(pf, dyn_allowed);
+ 	if (!entry)
+ 		return map;
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
+index 22371011c24928..2410aee59fb2d5 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.c
++++ b/drivers/net/ethernet/intel/ice/ice_lag.c
+@@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
+ 		 */
+ 		if (!primary_lag) {
+ 			lag->primary = true;
++			if (!ice_is_switchdev_running(lag->pf))
++				return;
++
+ 			/* Configure primary's SWID to be shared */
+ 			ice_lag_primary_swid(lag, true);
+ 			primary_lag = lag;
+ 		} else {
+ 			u16 swid;
+ 
++			if (!ice_is_switchdev_running(primary_lag->pf))
++				return;
++
+ 			swid = primary_lag->pf->hw.port_info->sw_id;
+ 			ice_lag_set_swid(swid, lag, true);
+ 			ice_lag_add_prune_list(primary_lag, lag->pf);
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 121a5ad5c8e10b..8961eebe67aa23 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -567,6 +567,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
+ 			return -ENOMEM;
+ 	}
+ 
++	vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
++
+ 	switch (vsi->type) {
+ 	case ICE_VSI_PF:
+ 	case ICE_VSI_SF:
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index ca707dfcb286ef..63d2105fce9332 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5175,11 +5175,12 @@ int ice_load(struct ice_pf *pf)
+ 
+ 	ice_napi_add(vsi);
+ 
++	ice_init_features(pf);
++
+ 	err = ice_init_rdma(pf);
+ 	if (err)
+ 		goto err_init_rdma;
+ 
+-	ice_init_features(pf);
+ 	ice_service_task_restart(pf);
+ 
+ 	clear_bit(ICE_DOWN, pf->state);
+@@ -5187,6 +5188,7 @@ int ice_load(struct ice_pf *pf)
+ 	return 0;
+ 
+ err_init_rdma:
++	ice_deinit_features(pf);
+ 	ice_tc_indir_block_unregister(vsi);
+ err_tc_indir_block_register:
+ 	ice_unregister_netdev(vsi);
+@@ -5210,8 +5212,8 @@ void ice_unload(struct ice_pf *pf)
+ 
+ 	devl_assert_locked(priv_to_devlink(pf));
+ 
+-	ice_deinit_features(pf);
+ 	ice_deinit_rdma(pf);
++	ice_deinit_features(pf);
+ 	ice_tc_indir_block_unregister(vsi);
+ 	ice_unregister_netdev(vsi);
+ 	ice_devlink_destroy_pf_port(pf);
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index c8c1d48ff793d7..87ffd25b268a2e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -3877,7 +3877,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
+ 		}
+ 
+ 		ice_vfhw_mac_add(vf, &al->list[i]);
+-		vf->num_mac++;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
+index aef0e9775a3305..70dbf80f3bb75b 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf.h
++++ b/drivers/net/ethernet/intel/idpf/idpf.h
+@@ -143,6 +143,7 @@ enum idpf_vport_state {
+  * @vport_id: Vport identifier
+  * @link_speed_mbps: Link speed in mbps
+  * @vport_idx: Relative vport index
++ * @max_tx_hdr_size: Max header length hardware can support
+  * @state: See enum idpf_vport_state
+  * @netstats: Packet and byte stats
+  * @stats_lock: Lock to protect stats update
+@@ -153,6 +154,7 @@ struct idpf_netdev_priv {
+ 	u32 vport_id;
+ 	u32 link_speed_mbps;
+ 	u16 vport_idx;
++	u16 max_tx_hdr_size;
+ 	enum idpf_vport_state state;
+ 	struct rtnl_link_stats64 netstats;
+ 	spinlock_t stats_lock;
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index 5ce663d04de00b..615e74d038457c 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -723,6 +723,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
+ 		np->vport = vport;
+ 		np->vport_idx = vport->idx;
+ 		np->vport_id = vport->vport_id;
++		np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
+ 		vport->netdev = netdev;
+ 
+ 		return idpf_init_mac_addr(vport, netdev);
+@@ -740,6 +741,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
+ 	np->adapter = adapter;
+ 	np->vport_idx = vport->idx;
+ 	np->vport_id = vport->vport_id;
++	np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
+ 
+ 	spin_lock_init(&np->stats_lock);
+ 
+@@ -2189,8 +2191,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
+ 					     struct net_device *netdev,
+ 					     netdev_features_t features)
+ {
+-	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+-	struct idpf_adapter *adapter = vport->adapter;
++	struct idpf_netdev_priv *np = netdev_priv(netdev);
++	u16 max_tx_hdr_size = np->max_tx_hdr_size;
+ 	size_t len;
+ 
+ 	/* No point in doing any of this if neither checksum nor GSO are
+@@ -2213,7 +2215,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
+ 		goto unsupported;
+ 
+ 	len = skb_network_header_len(skb);
+-	if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
++	if (unlikely(len > max_tx_hdr_size))
+ 		goto unsupported;
+ 
+ 	if (!skb->encapsulation)
+@@ -2226,7 +2228,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
+ 
+ 	/* IPLEN can support at most 127 dwords */
+ 	len = skb_inner_network_header_len(skb);
+-	if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
++	if (unlikely(len > max_tx_hdr_size))
+ 		goto unsupported;
+ 
+ 	/* No need to validate L4LEN as TCP is the only protocol with a
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index afc902ae4763e0..623bf17f87f9c0 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -4022,6 +4022,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
+ 		return budget;
+ 	}
+ 
++	/* Switch to poll mode in the tear-down path after sending disable
++	 * queues virtchnl message, as the interrupts will be disabled after
++	 * that.
++	 */
++	if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
++							 q_vector->tx[0])))
++		return budget;
++
+ 	work_done = min_t(int, work_done, budget - 1);
+ 
+ 	/* Exit the polling mode, but don't re-enable interrupts if stack might
+@@ -4032,15 +4040,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
+ 	else
+ 		idpf_vport_intr_set_wb_on_itr(q_vector);
+ 
+-	/* Switch to poll mode in the tear-down path after sending disable
+-	 * queues virtchnl message, as the interrupts will be disabled after
+-	 * that
+-	 */
+-	if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
+-							 q_vector->tx[0])))
+-		return budget;
+-	else
+-		return work_done;
++	return work_done;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index e43c4608d3ba33..971993586fb49d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+ /* Supported devices */
+ static const struct pci_device_id cgx_id_table[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
++	  PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
++	  PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
++	  PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
++	  PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
++	  PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
++	  PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) },
+ 	{ 0, }  /* end of table */
+ };
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 8555edbb1c8f9a..f94bf04788e986 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -30,6 +30,8 @@
+ #define PCI_SUBSYS_DEVID_CNF10K_A	       0xBA00
+ #define PCI_SUBSYS_DEVID_CNF10K_B              0xBC00
+ #define PCI_SUBSYS_DEVID_CN10K_B               0xBD00
++#define PCI_SUBSYS_DEVID_CN20KA                0xC220
++#define PCI_SUBSYS_DEVID_CNF20KA               0xC320
+ 
+ /* PCI BAR nos */
+ #define	PCI_AF_REG_BAR_NUM			0
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+index 7fa98aeb3663c0..4a3370a40dd887 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+@@ -13,19 +13,26 @@
+ /* RVU LMTST */
+ #define LMT_TBL_OP_READ		0
+ #define LMT_TBL_OP_WRITE	1
+-#define LMT_MAP_TABLE_SIZE	(128 * 1024)
+ #define LMT_MAPTBL_ENTRY_SIZE	16
++#define LMT_MAX_VFS		256
++
++#define LMT_MAP_ENTRY_ENA      BIT_ULL(20)
++#define LMT_MAP_ENTRY_LINES    GENMASK_ULL(18, 16)
+ 
+ /* Function to perform operations (read/write) on lmtst map table */
+ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ 			       int lmt_tbl_op)
+ {
+ 	void __iomem *lmt_map_base;
+-	u64 tbl_base;
++	u64 tbl_base, cfg;
++	int pfs, vfs;
+ 
+ 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
++	cfg  = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
++	vfs = 1 << (cfg & 0xF);
++	pfs = 1 << ((cfg >> 4) & 0x7);
+ 
+-	lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
++	lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
+ 	if (!lmt_map_base) {
+ 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ 		return -ENOMEM;
+@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ 		*val = readq(lmt_map_base + index);
+ 	} else {
+ 		writeq((*val), (lmt_map_base + index));
++
++		cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
++		/* 2048 LMTLINES */
++		cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
++
++		writeq(cfg, (lmt_map_base + (index + 8)));
++
+ 		/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ 		 * changes effective. Write 1 for flush and read is being used as a
+ 		 * barrier and sets up a data dependency. Write to 0 after a write
+@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ #define LMT_MAP_TBL_W1_OFF  8
+ static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+ {
+-	return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
++	return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
+ 		(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+ }
+ 
+@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ 
+ 	mutex_lock(&rvu->rsrc_lock);
+ 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+-	pf = rvu_get_pf(pcifunc) & 0x1F;
++	pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
+ 	val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ 	      ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index 87ba77e5026a02..e24accfecb3fb4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -580,6 +580,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ 	u64 lmt_addr, val, tbl_base;
+ 	int pf, vf, num_vfs, hw_vfs;
+ 	void __iomem *lmt_map_base;
++	int apr_pfs, apr_vfs;
+ 	int buf_size = 10240;
+ 	size_t off = 0;
+ 	int index = 0;
+@@ -595,8 +596,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ 		return -ENOMEM;
+ 
+ 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
++	val  = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
++	apr_vfs = 1 << (val & 0xF);
++	apr_pfs = 1 << ((val >> 4) & 0x7);
+ 
+-	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
++	lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
++				  LMT_MAPTBL_ENTRY_SIZE);
+ 	if (!lmt_map_base) {
+ 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ 		kfree(buf);
+@@ -618,7 +623,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
+ 				    pf);
+ 
+-		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
++		index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ 				 (tbl_base + index));
+ 		lmt_addr = readq(lmt_map_base + index);
+@@ -631,7 +636,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ 		/* Reading num of VFs per PF */
+ 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ 		for (vf = 0; vf < num_vfs; vf++) {
+-			index = (pf * rvu->hw->total_vfs * 16) +
++			index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
+ 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
+ 			off += scnprintf(&buf[off], buf_size - 1 - off,
+ 					    "PF%d:VF%d  \t\t", pf, vf);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 7510a918d942c0..f75afcf5f5aeff 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -988,6 +988,7 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+ 	int err, pool_id, non_xdp_queues;
+ 	struct nix_aq_enq_req *aq;
+ 	struct otx2_cq_queue *cq;
++	struct otx2_pool *pool;
+ 
+ 	cq = &qset->cq[qidx];
+ 	cq->cq_idx = qidx;
+@@ -996,8 +997,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+ 		cq->cq_type = CQ_RX;
+ 		cq->cint_idx = qidx;
+ 		cq->cqe_cnt = qset->rqe_cnt;
+-		if (pfvf->xdp_prog)
++		if (pfvf->xdp_prog) {
++			pool = &qset->pool[qidx];
+ 			xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
++			xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
++						   MEM_TYPE_PAGE_POOL,
++						   pool->page_pool);
++		}
+ 	} else if (qidx < non_xdp_queues) {
+ 		cq->cq_type = CQ_TX;
+ 		cq->cint_idx = qidx - pfvf->hw.rx_queues;
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index f20bb390df3add..c855fb799ce145 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -34,8 +34,10 @@ struct mtk_flow_data {
+ 	u16 vlan_in;
+ 
+ 	struct {
+-		u16 id;
+-		__be16 proto;
++		struct {
++			u16 id;
++			__be16 proto;
++		} vlans[2];
+ 		u8 num;
+ 	} vlan;
+ 	struct {
+@@ -349,18 +351,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
+ 		case FLOW_ACTION_CSUM:
+ 			break;
+ 		case FLOW_ACTION_VLAN_PUSH:
+-			if (data.vlan.num == 1 ||
++			if (data.vlan.num + data.pppoe.num == 2 ||
+ 			    act->vlan.proto != htons(ETH_P_8021Q))
+ 				return -EOPNOTSUPP;
+ 
+-			data.vlan.id = act->vlan.vid;
+-			data.vlan.proto = act->vlan.proto;
++			data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
++			data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
+ 			data.vlan.num++;
+ 			break;
+ 		case FLOW_ACTION_VLAN_POP:
+ 			break;
+ 		case FLOW_ACTION_PPPOE_PUSH:
+-			if (data.pppoe.num == 1)
++			if (data.pppoe.num == 1 ||
++			    data.vlan.num == 2)
+ 				return -EOPNOTSUPP;
+ 
+ 			data.pppoe.sid = act->pppoe.sid;
+@@ -450,12 +453,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
+ 	if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ 		foe.bridge.vlan = data.vlan_in;
+ 
+-	if (data.vlan.num == 1) {
+-		if (data.vlan.proto != htons(ETH_P_8021Q))
+-			return -EOPNOTSUPP;
++	for (i = 0; i < data.vlan.num; i++)
++		mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
+ 
+-		mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
+-	}
+ 	if (data.pppoe.num == 1)
+ 		mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
+index b330020dc0d674..f2bded847e61d1 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
++++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
+@@ -682,9 +682,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
+ }
+ 
+ static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
+-				    struct mlx4_db *db, int order)
++				    struct mlx4_db *db, unsigned int order)
+ {
+-	int o;
++	unsigned int o;
+ 	int i;
+ 
+ 	for (o = order; o <= 1; ++o) {
+@@ -712,7 +712,7 @@ static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
+ 	return 0;
+ }
+ 
+-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
++int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
+ {
+ 	struct mlx4_priv *priv = mlx4_priv(dev);
+ 	struct mlx4_db_pgdir *pgdir;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 1ddb11cb25f916..6e077d202827a2 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -450,6 +450,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
+ 
+ 	if (unlikely(!priv->port_up))
+ 		return 0;
++	if (unlikely(!napi_budget) && cq->type == TX_XDP)
++		return 0;
+ 
+ 	netdev_txq_bql_complete_prefetchw(ring->tx_queue);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 57b7298a0e793c..e048a667e0758b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -94,8 +94,6 @@ struct page_pool;
+ #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
+ 	MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
+ 
+-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+-
+ /* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
+  * These are theoretical maximums, which can be further restricted by
+  * capabilities. These values are used for static resource allocations and
+@@ -385,7 +383,6 @@ enum {
+ 	MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
+ 	MLX5E_SQ_STATE_PENDING_XSK_TX,
+ 	MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+-	MLX5E_SQ_STATE_XDP_MULTIBUF,
+ 	MLX5E_NUM_SQ_STATES, /* Must be kept last */
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+index 31eb99f09c63c1..58ec5e44aa7ada 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+@@ -10,6 +10,9 @@
+ #include <net/page_pool/types.h>
+ #include <net/xdp_sock_drv.h>
+ 
++#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
++#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
++
+ static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
+ {
+ 	u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
+@@ -103,18 +106,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ 			  enum mlx5e_mpwrq_umr_mode umr_mode)
+ {
+ 	u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+-	u8 max_pages_per_wqe, max_log_mpwqe_size;
++	u8 max_pages_per_wqe, max_log_wqe_size_calc;
++	u8 max_log_wqe_size_cap;
+ 	u16 max_wqe_size;
+ 
+ 	/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
+ 	max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
+ 	max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
+ 				       MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
+-	max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
++	max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
++
++	WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
+ 
+-	WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
++	max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
++			   MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
+ 
+-	return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
++	return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
+ }
+ 
+ u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+@@ -1242,7 +1249,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
+ 	mlx5e_build_sq_param_common(mdev, param);
+ 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ 	param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+-	param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+ 	mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+index 3f8986f9d86291..bd5877acc5b1eb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
+ 	struct mlx5_wq_param       wq;
+ 	bool                       is_mpw;
+ 	bool                       is_tls;
+-	bool                       is_xdp_mb;
+ 	u16                        stop_room;
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index c8adf309ecad04..dbd9482359e1ec 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
+ 	[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
+ 	[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
+ 	[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
+-	[MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
+ };
+ 
+ static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+index 4610621a340e50..08ab0999f7b316 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+@@ -546,6 +546,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
+ 	bool inline_ok;
+ 	bool linear;
+ 	u16 pi;
++	int i;
+ 
+ 	struct mlx5e_xdpsq_stats *stats = sq->stats;
+ 
+@@ -612,41 +613,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
+ 
+ 	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+ 
+-	if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
+-		int i;
+-
+-		memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+-		memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
+-
+-		eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
++	memset(&cseg->trailer, 0, sizeof(cseg->trailer));
++	memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
+ 
+-		for (i = 0; i < num_frags; i++) {
+-			skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+-			dma_addr_t addr;
++	eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ 
+-			addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+-				page_pool_get_dma_addr(skb_frag_page(frag)) +
+-				skb_frag_off(frag);
++	for (i = 0; i < num_frags; i++) {
++		skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
++		dma_addr_t addr;
+ 
+-			dseg->addr = cpu_to_be64(addr);
+-			dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+-			dseg->lkey = sq->mkey_be;
+-			dseg++;
+-		}
++		addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
++			page_pool_get_dma_addr(skb_frag_page(frag)) +
++			skb_frag_off(frag);
+ 
+-		cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
++		dseg->addr = cpu_to_be64(addr);
++		dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
++		dseg->lkey = sq->mkey_be;
++		dseg++;
++	}
+ 
+-		sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+-			.num_wqebbs = num_wqebbs,
+-			.num_pkts = 1,
+-		};
++	cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ 
+-		sq->pc += num_wqebbs;
+-	} else {
+-		cseg->fm_ce_se = 0;
++	sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
++		.num_wqebbs = num_wqebbs,
++		.num_pkts = 1,
++	};
+ 
+-		sq->pc++;
+-	}
++	sq->pc += num_wqebbs;
+ 
+ 	xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+index 57861d34d46f85..59b9653f573c8f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+@@ -165,6 +165,25 @@ static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+ #endif
+ }
+ 
++static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
++					struct mlx5e_ipsec_rx *rx,
++					struct mlx5_flow_spec *spec)
++{
++	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
++
++	if (rx == ipsec->rx_esw) {
++		mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
++	} else {
++		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
++				 misc_parameters_2.metadata_reg_c_2);
++		MLX5_SET(fte_match_param, spec->match_value,
++			 misc_parameters_2.metadata_reg_c_2,
++			 sa_entry->ipsec_obj_id | BIT(31));
++
++		spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
++	}
++}
++
+ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
+ 					 struct mlx5e_ipsec_rx *rx)
+ {
+@@ -200,11 +219,8 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
+ 
+ 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
+ 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
+-	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+-	MLX5_SET(fte_match_param, spec->match_value,
+-		 misc_parameters_2.metadata_reg_c_2,
+-		 sa_entry->ipsec_obj_id | BIT(31));
+ 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
++	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
+ 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ 	if (IS_ERR(rule)) {
+ 		err = PTR_ERR(rule);
+@@ -281,10 +297,8 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
+ 
+ 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
+-	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+-	MLX5_SET(fte_match_param, spec->match_value,  misc_parameters_2.metadata_reg_c_2,
+-		 sa_entry->ipsec_obj_id | BIT(31));
+ 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
++	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
+ 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ 	if (IS_ERR(rule)) {
+ 		err = PTR_ERR(rule);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 3e9ad3cb8121df..4a2f58a9d70660 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2030,41 +2030,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ 	csp.min_inline_mode = sq->min_inline_mode;
+ 	set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ 
+-	if (param->is_xdp_mb)
+-		set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
+-
+ 	err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
+ 	if (err)
+ 		goto err_free_xdpsq;
+ 
+ 	mlx5e_set_xmit_fp(sq, param->is_mpw);
+ 
+-	if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
+-		unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
+-		unsigned int inline_hdr_sz = 0;
+-		int i;
+-
+-		if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+-			inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+-			ds_cnt++;
+-		}
+-
+-		/* Pre initialize fixed WQE fields */
+-		for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
+-			struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
+-			struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+-			struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
+-
+-			sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
+-				.num_wqebbs = 1,
+-				.num_pkts   = 1,
+-			};
+-
+-			cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+-			eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+-		}
+-	}
+-
+ 	return 0;
+ 
+ err_free_xdpsq:
+@@ -3788,8 +3759,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
+ 	/* MQPRIO is another toplevel qdisc that can't be attached
+ 	 * simultaneously with the offloaded HTB.
+ 	 */
+-	if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
+-		return -EINVAL;
++	if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
++		NL_SET_ERR_MSG_MOD(mqprio->extack,
++				   "MQPRIO cannot be configured when HTB offload is enabled.");
++		return -EOPNOTSUPP;
++	}
+ 
+ 	switch (mqprio->mode) {
+ 	case TC_MQPRIO_MODE_DCB:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 0657d107653577..18ec392d17404c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -65,6 +65,7 @@
+ #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
+ 	max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+ #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
++#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
+ 
+ static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
+ 
+@@ -854,6 +855,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
+ 
+ 	/* RQ */
+ 	mlx5e_build_rq_params(mdev, params);
++	if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
++		params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
+ 
+ 	/* If netdev is already registered (e.g. move from nic profile to uplink,
+ 	 * RTNL lock must be held before triggering netdev notifiers.
+@@ -885,6 +888,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
+ 	netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
+ 
+ 	netdev->watchdog_timeo    = 15 * HZ;
++	if (mlx5_core_is_ecpf(mdev))
++		netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
+ 
+ #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+ 	netdev->hw_features    |= NETIF_F_HW_TC;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+index 1d60465cc2ca4f..2f7a543feca623 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+@@ -166,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
+ 	struct udphdr *udph;
+ 	struct iphdr *iph;
+ 
++	if (skb_linearize(skb))
++		goto out;
++
+ 	/* We are only going to peek, no need to clone the SKB */
+ 	if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
+ 		goto out;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+index ed977ae75fab89..4bba2884c1c058 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+@@ -85,6 +85,19 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ 	return err;
+ }
+ 
++void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
++					  struct mlx5_flow_spec *spec)
++{
++	MLX5_SET(fte_match_param, spec->match_criteria,
++		 misc_parameters_2.metadata_reg_c_1,
++		 ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK);
++	MLX5_SET(fte_match_param, spec->match_value,
++		 misc_parameters_2.metadata_reg_c_1,
++		 sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS);
++
++	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
++}
++
+ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
+ {
+ 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+index ac9c65b89166e6..514c15258b1d13 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+@@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ 				       struct mlx5e_ipsec_tx_create_attr *attr);
+ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
++void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
++					  struct mlx5_flow_spec *spec);
+ #else
+ static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ 						     struct mlx5e_ipsec_rx_create_attr *attr) {}
+@@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ 						     struct mlx5e_ipsec_tx_create_attr *attr) {}
+ 
+ static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
++static inline void
++mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
++				     struct mlx5_flow_spec *spec) {}
+ #endif /* CONFIG_MLX5_ESWITCH */
+ #endif /* __MLX5_ESW_IPSEC_FS_H__ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+index 8587cd572da536..bdb825aa872688 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+@@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
+ 	if (!flow_group_in)
+ 		return -ENOMEM;
+ 
+-	ft_attr.max_fte = POOL_NEXT_SIZE;
++	ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
+ 	ft_attr.prio = LEGACY_FDB_PRIO;
+ 	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
+ 	if (IS_ERR(fdb)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
+index d91ea53eb394d1..fc6e56305cbbc8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
+@@ -163,11 +163,16 @@ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
+ 	u64 value_msb;
+ 
+ 	value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
++	/* bit 1-63 are not supported for NICs,
++	 * hence read only bit 0 (asic) from lsb.
++	 */
++	value_lsb &= 0x1;
+ 	value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
+ 
+-	mlx5_core_warn(events->dev,
+-		       "High temperature on sensors with bit set %llx %llx",
+-		       value_msb, value_lsb);
++	if (net_ratelimit())
++		mlx5_core_warn(events->dev,
++			       "High temperature on sensors with bit set %llx %llx",
++			       value_msb, value_lsb);
+ 
+ 	return NOTIFY_OK;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+index c14590acc77260..f6abfd00d7e68c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab
+ 	int i, found_i = -1;
+ 
+ 	for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+-		if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
++		if (dev->priv.ft_pool->ft_left[i] &&
++		    (FT_POOLS[i] >= desired_size ||
++		     desired_size == MLX5_FS_MAX_POOL_SIZE) &&
+ 		    FT_POOLS[i] <= max_ft_size) {
+ 			found_i = i;
+-			if (desired_size != POOL_NEXT_SIZE)
++			if (desired_size != MLX5_FS_MAX_POOL_SIZE)
+ 				break;
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+index 25f4274b372b56..173e312db7204f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+@@ -7,8 +7,6 @@
+ #include <linux/mlx5/driver.h>
+ #include "fs_core.h"
+ 
+-#define POOL_NEXT_SIZE 0
+-
+ int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
+ void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index a6329ca2d9bffb..52c8035547be5c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -799,6 +799,7 @@ static void poll_health(struct timer_list *t)
+ 	health->prev = count;
+ 	if (health->miss_counter == MAX_MISSES) {
+ 		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
++		health->synd = ioread8(&h->synd);
+ 		print_health_info(dev);
+ 		queue_work(health->wq, &health->report_work);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+index 711d14dea2485f..d313cb7f0ed88c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
+ 		ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ 				  MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ 
+-	sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
++	sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
++		FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
+ 	ft_attr.max_fte = sz;
+ 
+ 	/* We use chains_default_ft(chains) as the table's next_ft till
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+index a400616a24d416..79e94632533c80 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+@@ -544,6 +544,8 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
+ 	fbnic_rss_key_fill(fbn->rss_key);
+ 	fbnic_rss_init_en_mask(fbn);
+ 
++	netdev->priv_flags |= IFF_UNICAST_FLT;
++
+ 	netdev->features |=
+ 		NETIF_F_RXHASH |
+ 		NETIF_F_SG |
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 547255ca1c4efa..812ad9d61676a3 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -3466,6 +3466,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ 				 struct pci_dev *pdev)
+ {
+ 	struct lan743x_tx *tx;
++	u32 sgmii_ctl;
+ 	int index;
+ 	int ret;
+ 
+@@ -3478,6 +3479,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ 		spin_lock_init(&adapter->eth_syslock_spinlock);
+ 		mutex_init(&adapter->sgmii_rw_lock);
+ 		pci11x1x_set_rfe_rd_fifo_threshold(adapter);
++		sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
++		if (adapter->is_sgmii_en) {
++			sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
++			sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
++		} else {
++			sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
++			sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
++		}
++		lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ 	} else {
+ 		adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
+ 		adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
+@@ -3526,7 +3536,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ 
+ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
+ {
+-	u32 sgmii_ctl;
+ 	int ret;
+ 
+ 	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
+@@ -3538,10 +3547,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
+ 	adapter->mdiobus->priv = (void *)adapter;
+ 	if (adapter->is_pci11x1x) {
+ 		if (adapter->is_sgmii_en) {
+-			sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+-			sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+-			sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+-			lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ 			netif_dbg(adapter, drv, adapter->netdev,
+ 				  "SGMII operation\n");
+ 			adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+@@ -3552,10 +3557,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
+ 			netif_dbg(adapter, drv, adapter->netdev,
+ 				  "lan743x-mdiobus-c45\n");
+ 		} else {
+-			sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+-			sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+-			sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+-			lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ 			netif_dbg(adapter, drv, adapter->netdev,
+ 				  "RGMII operation\n");
+ 			// Only C22 support when RGMII I/F
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index 36802e0a8b570f..9bac4083d8a091 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -1044,7 +1044,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
+ 	header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
+ 
+ 	if (oob_in_sgl) {
+-		WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
++		WARN_ON_ONCE(wqe_req->num_sge < 2);
+ 
+ 		header->client_oob_in_sgl = 1;
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 5ed2818bac257c..85bb5121cd245d 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2850,6 +2850,32 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
+ 		RTL_R32(tp, CSIDR) : ~0;
+ }
+ 
++static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
++{
++	struct pci_dev *pdev = tp->pci_dev;
++	u32 csi;
++	int rc;
++	u8 val;
++
++#define RTL_GEN3_RELATED_OFF	0x0890
++#define RTL_GEN3_ZRXDC_NONCOMPL	0x1
++	if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
++		rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
++		if (rc == PCIBIOS_SUCCESSFUL) {
++			val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
++			rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
++						   val);
++			if (rc == PCIBIOS_SUCCESSFUL)
++				return;
++		}
++	}
++
++	netdev_notice_once(tp->dev,
++		"No native access to PCI extended config space, falling back to CSI\n");
++	csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF);
++	rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL);
++}
++
+ static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
+ {
+ 	struct pci_dev *pdev = tp->pci_dev;
+@@ -3816,6 +3842,7 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
+ 
+ static void rtl_hw_start_8126a(struct rtl8169_private *tp)
+ {
++	rtl_disable_zrxdc_timeout(tp);
+ 	rtl_set_def_aspm_entry_latency(tp);
+ 	rtl_hw_start_8125_common(tp);
+ }
+@@ -5231,6 +5258,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
+ 	new_bus->priv = tp;
+ 	new_bus->parent = &pdev->dev;
+ 	new_bus->irq[0] = PHY_MAC_INTERRUPT;
++	new_bus->phy_mask = GENMASK(31, 1);
+ 	snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
+ 		 pci_domain_nr(pdev->bus), pci_dev_id(pdev));
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index ab7c2750c10425..702ea5a00b56d3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -590,6 +590,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ 	if (ret)
+ 		goto err_disable_device;
+ 
++	plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use;
++	plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use;
++
+ 	if (dev_of_node(&pdev->dev))
+ 		ret = loongson_dwmac_dt_config(pdev, plat, &res);
+ 	else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+index 50073bdade46e4..8f90eae9377411 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -33,6 +33,7 @@ struct rk_gmac_ops {
+ 	void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
+ 				    bool enable);
+ 	void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
++	bool php_grf_required;
+ 	bool regs_valid;
+ 	u32 regs[];
+ };
+@@ -1263,6 +1264,7 @@ static const struct rk_gmac_ops rk3576_ops = {
+ 	.set_rgmii_speed = rk3576_set_gmac_speed,
+ 	.set_rmii_speed = rk3576_set_gmac_speed,
+ 	.set_clock_selection = rk3576_set_clock_selection,
++	.php_grf_required = true,
+ 	.regs_valid = true,
+ 	.regs = {
+ 		0x2a220000, /* gmac0 */
+@@ -1410,6 +1412,7 @@ static const struct rk_gmac_ops rk3588_ops = {
+ 	.set_rgmii_speed = rk3588_set_gmac_speed,
+ 	.set_rmii_speed = rk3588_set_gmac_speed,
+ 	.set_clock_selection = rk3588_set_clock_selection,
++	.php_grf_required = true,
+ 	.regs_valid = true,
+ 	.regs = {
+ 		0xfe1b0000, /* gmac0 */
+@@ -1830,8 +1833,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
+ 
+ 	bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ 							"rockchip,grf");
+-	bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+-							    "rockchip,php-grf");
++	if (IS_ERR(bsp_priv->grf)) {
++		dev_err_probe(dev, PTR_ERR(bsp_priv->grf),
++			      "failed to lookup rockchip,grf\n");
++		return ERR_CAST(bsp_priv->grf);
++	}
++
++	if (ops->php_grf_required) {
++		bsp_priv->php_grf =
++			syscon_regmap_lookup_by_phandle(dev->of_node,
++							"rockchip,php-grf");
++		if (IS_ERR(bsp_priv->php_grf)) {
++			dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf),
++				      "failed to lookup rockchip,php-grf\n");
++			return ERR_CAST(bsp_priv->php_grf);
++		}
++	}
+ 
+ 	if (plat->phy_node) {
+ 		bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+index 4a0ae92b3055c2..ce8367b63823a2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+@@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
+ 		/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
+ 		 * address. No need to mask it again.
+ 		 */
+-		reg |= 1 << H3_EPHY_ADDR_SHIFT;
++		reg |= ret << H3_EPHY_ADDR_SHIFT;
+ 	} else {
+ 		/* For SoCs without internal PHY the PHY selection bit should be
+ 		 * set to 0 (external PHY).
+diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
+index 259bdac24cf211..558b791a97eddd 100644
+--- a/drivers/net/ethernet/tehuti/tn40.c
++++ b/drivers/net/ethernet/tehuti/tn40.c
+@@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	ret = tn40_phy_register(priv);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to set up PHY.\n");
+-		goto err_free_irq;
++		goto err_cleanup_swnodes;
+ 	}
+ 
+ 	ret = tn40_priv_init(priv);
+@@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	return 0;
+ err_unregister_phydev:
+ 	tn40_phy_unregister(priv);
++err_cleanup_swnodes:
++	tn40_swnodes_cleanup(priv);
+ err_free_irq:
+ 	pci_free_irq_vectors(pdev);
+ err_unset_drvdata:
+@@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev)
+ 	unregister_netdev(ndev);
+ 
+ 	tn40_phy_unregister(priv);
++	tn40_swnodes_cleanup(priv);
+ 	pci_free_irq_vectors(priv->pdev);
+ 	pci_set_drvdata(pdev, NULL);
+ 	iounmap(priv->regs);
+@@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = {
+ 			 PCI_VENDOR_ID_ASUSTEK, 0x8709) },
+ 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ 			 PCI_VENDOR_ID_EDIMAX, 0x8103) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
++			 PCI_VENDOR_ID_TEHUTI, 0x3015) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
++			 PCI_VENDOR_ID_EDIMAX, 0x8102) },
+ 	{ }
+ };
+ 
+diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
+index 490781fe512053..25da8686d4691d 100644
+--- a/drivers/net/ethernet/tehuti/tn40.h
++++ b/drivers/net/ethernet/tehuti/tn40.h
+@@ -4,10 +4,13 @@
+ #ifndef _TN40_H_
+ #define _TN40_H_
+ 
++#include <linux/property.h>
+ #include "tn40_regs.h"
+ 
+ #define TN40_DRV_NAME "tn40xx"
+ 
++#define PCI_DEVICE_ID_TEHUTI_TN9510	0x4025
++
+ #define TN40_MDIO_SPEED_1MHZ (1)
+ #define TN40_MDIO_SPEED_6MHZ (6)
+ 
+@@ -102,10 +105,39 @@ struct tn40_txdb {
+ 	int size; /* Number of elements in the db */
+ };
+ 
++#define NODE_PROP(_NAME, _PROP)	(		\
++	(const struct software_node) {		\
++		.name = _NAME,			\
++		.properties = _PROP,		\
++	})
++
++#define NODE_PAR_PROP(_NAME, _PAR, _PROP)	(	\
++	(const struct software_node) {		\
++		.name = _NAME,			\
++		.parent = _PAR,			\
++		.properties = _PROP,		\
++	})
++
++enum tn40_swnodes {
++	SWNODE_MDIO,
++	SWNODE_PHY,
++	SWNODE_MAX
++};
++
++struct tn40_nodes {
++	char phy_name[32];
++	char mdio_name[32];
++	struct property_entry phy_props[3];
++	struct software_node swnodes[SWNODE_MAX];
++	const struct software_node *group[SWNODE_MAX + 1];
++};
++
+ struct tn40_priv {
+ 	struct net_device *ndev;
+ 	struct pci_dev *pdev;
+ 
++	struct tn40_nodes nodes;
++
+ 	struct napi_struct napi;
+ 	/* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
+ 	struct tn40_rxd_fifo rxd_fifo0;
+@@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
+ 
+ int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+ 
++void tn40_swnodes_cleanup(struct tn40_priv *priv);
+ int tn40_mdiobus_init(struct tn40_priv *priv);
+ 
+ int tn40_phy_register(struct tn40_priv *priv);
+diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
+index af18615d64a8a2..5bb0cbc87d064e 100644
+--- a/drivers/net/ethernet/tehuti/tn40_mdio.c
++++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
+@@ -14,6 +14,8 @@
+ 	 (FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
+ #define TN40_MDIO_CMD_READ BIT(15)
+ 
++#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld"
++
+ static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
+ {
+ 	void __iomem *regs = priv->regs;
+@@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
+ 	return  tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
+ }
+ 
++/* registers an mdio node and an aqr105 PHY at address 1
++ * tn40_mdio-%id {
++ *	ethernet-phy@1 {
++ *		compatible = "ethernet-phy-id03a1.b4a3";
++ *		reg = <1>;
++ *		firmware-name = AQR105_FIRMWARE;
++ *	};
++ * };
++ */
++static int tn40_swnodes_register(struct tn40_priv *priv)
++{
++	struct tn40_nodes *nodes = &priv->nodes;
++	struct pci_dev *pdev = priv->pdev;
++	struct software_node *swnodes;
++	u32 id;
++
++	id = pci_dev_id(pdev);
++
++	snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1");
++	snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x",
++		 id);
++
++	swnodes = nodes->swnodes;
++
++	swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL);
++
++	nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible",
++						    "ethernet-phy-id03a1.b4a3");
++	nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1);
++	nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name",
++						    AQR105_FIRMWARE);
++	swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name,
++					    &swnodes[SWNODE_MDIO],
++					    nodes->phy_props);
++
++	nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY];
++	nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO];
++	return software_node_register_node_group(nodes->group);
++}
++
++void tn40_swnodes_cleanup(struct tn40_priv *priv)
++{
++	/* cleanup of swnodes is only needed for AQR105-based cards */
++	if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
++		fwnode_handle_put(dev_fwnode(&priv->mdio->dev));
++		device_remove_software_node(&priv->mdio->dev);
++		software_node_unregister_node_group(priv->nodes.group);
++	}
++}
++
+ int tn40_mdiobus_init(struct tn40_priv *priv)
+ {
+ 	struct pci_dev *pdev = priv->pdev;
+@@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv)
+ 
+ 	bus->read_c45 = tn40_mdio_read_c45;
+ 	bus->write_c45 = tn40_mdio_write_c45;
++	priv->mdio = bus;
++
++	/* provide swnodes for AQR105-based cards only */
++	if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
++		ret = tn40_swnodes_register(priv);
++		if (ret) {
++			pr_err("swnodes failed\n");
++			return ret;
++		}
++
++		ret = device_add_software_node(&bus->dev,
++					       priv->nodes.group[SWNODE_MDIO]);
++		if (ret) {
++			dev_err(&pdev->dev,
++				"device_add_software_node failed: %d\n", ret);
++			goto err_swnodes_unregister;
++		}
++	}
+ 
+ 	ret = devm_mdiobus_register(&pdev->dev, bus);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
+ 			ret, bus->state, MDIOBUS_UNREGISTERED);
+-		return ret;
++		goto err_swnodes_cleanup;
+ 	}
+ 	tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
+-	priv->mdio = bus;
+ 	return 0;
++
++err_swnodes_unregister:
++	software_node_unregister_node_group(priv->nodes.group);
++	return ret;
++err_swnodes_cleanup:
++	tn40_swnodes_cleanup(priv);
++	return ret;
+ }
++
++MODULE_FIRMWARE(AQR105_FIRMWARE);
+diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
+index 557cc71b9dd22c..0eee1a0527b5c7 100644
+--- a/drivers/net/ethernet/ti/cpsw_new.c
++++ b/drivers/net/ethernet/ti/cpsw_new.c
+@@ -1417,6 +1417,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
+ 		ndev->netdev_ops = &cpsw_netdev_ops;
+ 		ndev->ethtool_ops = &cpsw_ethtool_ops;
+ 		SET_NETDEV_DEV(ndev, dev);
++		ndev->dev.of_node = slave_data->slave_node;
+ 
+ 		if (!napi_ndev) {
+ 			/* CPSW Host port CPDMA interface is shared between
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index 753215ebc67c70..a036910f60828c 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -1446,8 +1446,7 @@ static u8 mcps_data_request(
+ 	command.pdata.data_req.src_addr_mode = src_addr_mode;
+ 	command.pdata.data_req.dst.mode = dst_address_mode;
+ 	if (dst_address_mode != MAC_MODE_NO_ADDR) {
+-		command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id);
+-		command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id);
++		put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id);
+ 		if (dst_address_mode == MAC_MODE_SHORT_ADDR) {
+ 			command.pdata.data_req.dst.address[0] = LS_BYTE(
+ 				dst_addr->short_address
+@@ -1795,12 +1794,12 @@ static int ca8210_skb_rx(
+ 	}
+ 	hdr.source.mode = data_ind[0];
+ 	dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode);
+-	hdr.source.pan_id = *(u16 *)&data_ind[1];
++	hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1]));
+ 	dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id);
+ 	memcpy(&hdr.source.extended_addr, &data_ind[3], 8);
+ 	hdr.dest.mode = data_ind[11];
+ 	dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode);
+-	hdr.dest.pan_id = *(u16 *)&data_ind[12];
++	hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12]));
+ 	dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id);
+ 	memcpy(&hdr.dest.extended_addr, &data_ind[14], 8);
+ 
+@@ -1927,7 +1926,7 @@ static int ca8210_skb_tx(
+ 	status =  mcps_data_request(
+ 		header.source.mode,
+ 		header.dest.mode,
+-		header.dest.pan_id,
++		le16_to_cpu(header.dest.pan_id),
+ 		(union macaddr *)&header.dest.extended_addr,
+ 		skb->len - mac_len,
+ 		&skb->data[mac_len],
+diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
+index 6622de48fc9e76..503a9174321c66 100644
+--- a/drivers/net/mctp/mctp-i2c.c
++++ b/drivers/net/mctp/mctp-i2c.c
+@@ -538,7 +538,7 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+ 		rc = __i2c_transfer(midev->adapter, &msg, 1);
+ 
+ 		/* on tx errors, the flow can no longer be considered valid */
+-		if (rc)
++		if (rc < 0)
+ 			mctp_i2c_invalidate_tx_flow(midev, skb);
+ 
+ 		break;
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index 9788b820c6be72..99a5eee77bec10 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* NXP C45 PHY driver
+- * Copyright 2021-2023 NXP
++ * Copyright 2021-2025 NXP
+  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
+  */
+ 
+@@ -18,6 +18,8 @@
+ 
+ #include "nxp-c45-tja11xx.h"
+ 
++#define PHY_ID_MASK			GENMASK(31, 4)
++/* Same id: TJA1103, TJA1104 */
+ #define PHY_ID_TJA_1103			0x001BB010
+ #define PHY_ID_TJA_1120			0x001BB031
+ 
+@@ -1930,6 +1932,30 @@ static void tja1120_nmi_handler(struct phy_device *phydev,
+ 	}
+ }
+ 
++static int nxp_c45_macsec_ability(struct phy_device *phydev)
++{
++	bool macsec_ability;
++	int phy_abilities;
++
++	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
++				     VEND1_PORT_ABILITIES);
++	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
++
++	return macsec_ability;
++}
++
++static int tja1103_match_phy_device(struct phy_device *phydev)
++{
++	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
++	       !nxp_c45_macsec_ability(phydev);
++}
++
++static int tja1104_match_phy_device(struct phy_device *phydev)
++{
++	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
++	       nxp_c45_macsec_ability(phydev);
++}
++
+ static const struct nxp_c45_regmap tja1120_regmap = {
+ 	.vend1_ptp_clk_period	= 0x1020,
+ 	.vend1_event_msg_filt	= 0x9010,
+@@ -2000,7 +2026,6 @@ static const struct nxp_c45_phy_data tja1120_phy_data = {
+ 
+ static struct phy_driver nxp_c45_driver[] = {
+ 	{
+-		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
+ 		.name			= "NXP C45 TJA1103",
+ 		.get_features		= nxp_c45_get_features,
+ 		.driver_data		= &tja1103_phy_data,
+@@ -2022,6 +2047,31 @@ static struct phy_driver nxp_c45_driver[] = {
+ 		.get_sqi		= nxp_c45_get_sqi,
+ 		.get_sqi_max		= nxp_c45_get_sqi_max,
+ 		.remove			= nxp_c45_remove,
++		.match_phy_device	= tja1103_match_phy_device,
++	},
++	{
++		.name			= "NXP C45 TJA1104",
++		.get_features		= nxp_c45_get_features,
++		.driver_data		= &tja1103_phy_data,
++		.probe			= nxp_c45_probe,
++		.soft_reset		= nxp_c45_soft_reset,
++		.config_aneg		= genphy_c45_config_aneg,
++		.config_init		= nxp_c45_config_init,
++		.config_intr		= tja1103_config_intr,
++		.handle_interrupt	= nxp_c45_handle_interrupt,
++		.read_status		= genphy_c45_read_status,
++		.suspend		= genphy_c45_pma_suspend,
++		.resume			= genphy_c45_pma_resume,
++		.get_sset_count		= nxp_c45_get_sset_count,
++		.get_strings		= nxp_c45_get_strings,
++		.get_stats		= nxp_c45_get_stats,
++		.cable_test_start	= nxp_c45_cable_test_start,
++		.cable_test_get_status	= nxp_c45_cable_test_get_status,
++		.set_loopback		= genphy_c45_loopback,
++		.get_sqi		= nxp_c45_get_sqi,
++		.get_sqi_max		= nxp_c45_get_sqi_max,
++		.remove			= nxp_c45_remove,
++		.match_phy_device	= tja1104_match_phy_device,
+ 	},
+ 	{
+ 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 3e9957b6aa1489..b78dfcbec936c2 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1811,7 +1811,7 @@ bool phylink_expects_phy(struct phylink *pl)
+ {
+ 	if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
+ 	    (pl->cfg_link_an_mode == MLO_AN_INBAND &&
+-	     phy_interface_mode_is_8023z(pl->link_config.interface)))
++	     phy_interface_mode_is_8023z(pl->link_interface)))
+ 		return false;
+ 	return true;
+ }
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 96fa3857d8e257..2cab046749a922 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -10085,6 +10085,7 @@ static const struct usb_device_id rtl8152_table[] = {
+ 	{ USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff) },
+ 	{ USB_DEVICE(VENDOR_ID_TPLINK,  0x0601) },
+ 	{ USB_DEVICE(VENDOR_ID_DLINK,   0xb301) },
++	{ USB_DEVICE(VENDOR_ID_DELL,    0xb097) },
+ 	{ USB_DEVICE(VENDOR_ID_ASUS,    0x1976) },
+ 	{}
+ };
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index b70654c7ad34ea..151d7cdfc48023 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -3599,8 +3599,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
+ 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ 	int err = 0;
+ 
+-	WRITE_ONCE(netdev->mtu, new_mtu);
+-
+ 	/*
+ 	 * Reset_work may be in the middle of resetting the device, wait for its
+ 	 * completion.
+@@ -3614,6 +3612,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
+ 
+ 		/* we need to re-create the rx queue based on the new mtu */
+ 		vmxnet3_rq_destroy_all(adapter);
++		WRITE_ONCE(netdev->mtu, new_mtu);
+ 		vmxnet3_adjust_rx_ring_size(adapter);
+ 		err = vmxnet3_rq_create_all(adapter);
+ 		if (err) {
+@@ -3630,6 +3629,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
+ 				   "Closing it\n", err);
+ 			goto out;
+ 		}
++	} else {
++		WRITE_ONCE(netdev->mtu, new_mtu);
+ 	}
+ 
+ out:
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 5e7cdd1b806fbd..474faccf75fd93 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -227,9 +227,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
+ 			be32_to_cpu(fdb->vni)))
+ 		goto nla_put_failure;
+ 
+-	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
++	ci.ndm_used	 = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
+ 	ci.ndm_confirmed = 0;
+-	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
++	ci.ndm_updated	 = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
+ 	ci.ndm_refcnt	 = 0;
+ 
+ 	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+@@ -434,8 +434,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+ 	struct vxlan_fdb *f;
+ 
+ 	f = __vxlan_find_mac(vxlan, mac, vni);
+-	if (f && f->used != jiffies)
+-		f->used = jiffies;
++	if (f && READ_ONCE(f->used) != jiffies)
++		WRITE_ONCE(f->used, jiffies);
+ 
+ 	return f;
+ }
+@@ -1009,12 +1009,12 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
+ 	    !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
+ 		if (f->state != state) {
+ 			f->state = state;
+-			f->updated = jiffies;
++			WRITE_ONCE(f->updated, jiffies);
+ 			notify = 1;
+ 		}
+ 		if (f->flags != fdb_flags) {
+ 			f->flags = fdb_flags;
+-			f->updated = jiffies;
++			WRITE_ONCE(f->updated, jiffies);
+ 			notify = 1;
+ 		}
+ 	}
+@@ -1048,7 +1048,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
+ 	}
+ 
+ 	if (ndm_flags & NTF_USE)
+-		f->used = jiffies;
++		WRITE_ONCE(f->used, jiffies);
+ 
+ 	if (notify) {
+ 		if (rd == NULL)
+@@ -1477,7 +1477,7 @@ static bool vxlan_snoop(struct net_device *dev,
+ 				    src_mac, &rdst->remote_ip.sa, &src_ip->sa);
+ 
+ 		rdst->remote_ip = *src_ip;
+-		f->updated = jiffies;
++		WRITE_ONCE(f->updated, jiffies);
+ 		vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
+ 	} else {
+ 		u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
+@@ -2825,7 +2825,7 @@ static void vxlan_cleanup(struct timer_list *t)
+ 			if (f->flags & NTF_EXT_LEARNED)
+ 				continue;
+ 
+-			timeout = f->used + vxlan->cfg.age_interval * HZ;
++			timeout = READ_ONCE(f->used) + vxlan->cfg.age_interval * HZ;
+ 			if (time_before_eq(timeout, jiffies)) {
+ 				netdev_dbg(vxlan->dev,
+ 					   "garbage collect %pM\n",
+@@ -4340,6 +4340,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ 			    struct netlink_ext_ack *extack)
+ {
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
++	bool rem_ip_changed, change_igmp;
+ 	struct net_device *lowerdev;
+ 	struct vxlan_config conf;
+ 	struct vxlan_rdst *dst;
+@@ -4363,8 +4364,13 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ 	if (err)
+ 		return err;
+ 
++	rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip);
++	change_igmp = vxlan->dev->flags & IFF_UP &&
++		      (rem_ip_changed ||
++		       dst->remote_ifindex != conf.remote_ifindex);
++
+ 	/* handle default dst entry */
+-	if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
++	if (rem_ip_changed) {
+ 		u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
+ 
+ 		spin_lock_bh(&vxlan->hash_lock[hash_index]);
+@@ -4408,6 +4414,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ 		}
+ 	}
+ 
++	if (change_igmp && vxlan_addr_multicast(&dst->remote_ip))
++		err = vxlan_multicast_leave(vxlan);
++
+ 	if (conf.age_interval != vxlan->cfg.age_interval)
+ 		mod_timer(&vxlan->age_timer, jiffies);
+ 
+@@ -4415,7 +4424,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ 	if (lowerdev && lowerdev != dst->remote_dev)
+ 		dst->remote_dev = lowerdev;
+ 	vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
+-	return 0;
++
++	if (!err && change_igmp &&
++	    vxlan_addr_multicast(&dst->remote_ip))
++		err = vxlan_multicast_join(vxlan);
++
++	return err;
+ }
+ 
+ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
+diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
+index 65d2bc0687c884..eaf902c25e192b 100644
+--- a/drivers/net/wireless/ath/ath11k/dp.h
++++ b/drivers/net/wireless/ath/ath11k/dp.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #ifndef ATH11K_DP_H
+@@ -20,7 +20,6 @@ struct ath11k_ext_irq_grp;
+ 
+ struct dp_rx_tid {
+ 	u8 tid;
+-	u32 *vaddr;
+ 	dma_addr_t paddr;
+ 	u32 size;
+ 	u32 ba_win_sz;
+@@ -37,6 +36,9 @@ struct dp_rx_tid {
+ 	/* Timer info related to fragments */
+ 	struct timer_list frag_timer;
+ 	struct ath11k_base *ab;
++	u32 *vaddr_unaligned;
++	dma_addr_t paddr_unaligned;
++	u32 unaligned_size;
+ };
+ 
+ #define DP_REO_DESC_FREE_THRESHOLD  64
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 40b52d12b43235..bfb8e7b1a300c6 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/ieee80211.h>
+@@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
+ 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ 		list_del(&cmd->list);
+ 		rx_tid = &cmd->data;
+-		if (rx_tid->vaddr) {
+-			dma_unmap_single(ab->dev, rx_tid->paddr,
+-					 rx_tid->size, DMA_BIDIRECTIONAL);
+-			kfree(rx_tid->vaddr);
+-			rx_tid->vaddr = NULL;
++		if (rx_tid->vaddr_unaligned) {
++			dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
++					     rx_tid->vaddr_unaligned,
++					     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
++			rx_tid->vaddr_unaligned = NULL;
+ 		}
+ 		kfree(cmd);
+ 	}
+@@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
+ 		list_del(&cmd_cache->list);
+ 		dp->reo_cmd_cache_flush_count--;
+ 		rx_tid = &cmd_cache->data;
+-		if (rx_tid->vaddr) {
+-			dma_unmap_single(ab->dev, rx_tid->paddr,
+-					 rx_tid->size, DMA_BIDIRECTIONAL);
+-			kfree(rx_tid->vaddr);
+-			rx_tid->vaddr = NULL;
++		if (rx_tid->vaddr_unaligned) {
++			dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
++					     rx_tid->vaddr_unaligned,
++					     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
++			rx_tid->vaddr_unaligned = NULL;
+ 		}
+ 		kfree(cmd_cache);
+ 	}
+@@ -708,11 +708,11 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
+ 	if (status != HAL_REO_CMD_SUCCESS)
+ 		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+ 			    rx_tid->tid, status);
+-	if (rx_tid->vaddr) {
+-		dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+-				 DMA_BIDIRECTIONAL);
+-		kfree(rx_tid->vaddr);
+-		rx_tid->vaddr = NULL;
++	if (rx_tid->vaddr_unaligned) {
++		dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
++				     rx_tid->vaddr_unaligned,
++				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
++		rx_tid->vaddr_unaligned = NULL;
+ 	}
+ }
+ 
+@@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
+ 			   rx_tid->tid, ret);
+-		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+-				 DMA_BIDIRECTIONAL);
+-		kfree(rx_tid->vaddr);
+-		rx_tid->vaddr = NULL;
++		dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
++				     rx_tid->vaddr_unaligned,
++				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
++		rx_tid->vaddr_unaligned = NULL;
+ 	}
+ }
+ 
+@@ -802,10 +802,10 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
+ 
+ 	return;
+ free_desc:
+-	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+-			 DMA_BIDIRECTIONAL);
+-	kfree(rx_tid->vaddr);
+-	rx_tid->vaddr = NULL;
++	dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
++			     rx_tid->vaddr_unaligned,
++			     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
++	rx_tid->vaddr_unaligned = NULL;
+ }
+ 
+ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+@@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ 		if (ret != -ESHUTDOWN)
+ 			ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ 				   tid, ret);
+-		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
+-				 DMA_BIDIRECTIONAL);
+-		kfree(rx_tid->vaddr);
+-		rx_tid->vaddr = NULL;
++		dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
++				     rx_tid->vaddr_unaligned,
++				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
++		rx_tid->vaddr_unaligned = NULL;
+ 	}
+ 
+ 	rx_tid->paddr = 0;
++	rx_tid->paddr_unaligned = 0;
+ 	rx_tid->size = 0;
++	rx_tid->unaligned_size = 0;
+ }
+ 
+ static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
+@@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
+ 	if (!rx_tid->active)
+ 		goto unlock_exit;
+ 
+-	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+-			 DMA_BIDIRECTIONAL);
+-	kfree(rx_tid->vaddr);
+-	rx_tid->vaddr = NULL;
++	dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
++			     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
++	rx_tid->vaddr_unaligned = NULL;
+ 
+ 	rx_tid->active = false;
+ 
+@@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ 	struct ath11k_base *ab = ar->ab;
+ 	struct ath11k_peer *peer;
+ 	struct dp_rx_tid *rx_tid;
+-	u32 hw_desc_sz;
+-	u32 *addr_aligned;
+-	void *vaddr;
++	u32 hw_desc_sz, *vaddr;
++	void *vaddr_unaligned;
+ 	dma_addr_t paddr;
+ 	int ret;
+ 
+@@ -1050,49 +1050,40 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ 	else
+ 		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+ 
+-	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+-	if (!vaddr) {
++	rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
++	vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
++						DMA_BIDIRECTIONAL, GFP_ATOMIC);
++	if (!vaddr_unaligned) {
+ 		spin_unlock_bh(&ab->base_lock);
+ 		return -ENOMEM;
+ 	}
+ 
+-	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+-
+-	ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
+-				   ssn, pn_type);
+-
+-	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
+-			       DMA_BIDIRECTIONAL);
+-
+-	ret = dma_mapping_error(ab->dev, paddr);
+-	if (ret) {
+-		spin_unlock_bh(&ab->base_lock);
+-		ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
+-			    peer_mac, tid, ret);
+-		goto err_mem_free;
+-	}
+-
+-	rx_tid->vaddr = vaddr;
+-	rx_tid->paddr = paddr;
++	rx_tid->vaddr_unaligned = vaddr_unaligned;
++	vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
++	rx_tid->paddr_unaligned = paddr;
++	rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
++			(unsigned long)rx_tid->vaddr_unaligned);
++	ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
+ 	rx_tid->size = hw_desc_sz;
+ 	rx_tid->active = true;
+ 
++	/* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
++	 * Since these changes are not reflected in the device, driver now needs to
++	 * explicitly call dma_sync_single_for_device.
++	 */
++	dma_sync_single_for_device(ab->dev, rx_tid->paddr,
++				   rx_tid->size,
++				   DMA_TO_DEVICE);
+ 	spin_unlock_bh(&ab->base_lock);
+ 
+-	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
+-						     paddr, tid, 1, ba_win_sz);
++	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
++						     tid, 1, ba_win_sz);
+ 	if (ret) {
+ 		ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
+ 			    peer_mac, tid, ret);
+ 		ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
+ 	}
+ 
+-	return ret;
+-
+-err_mem_free:
+-	kfree(rx_tid->vaddr);
+-	rx_tid->vaddr = NULL;
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
+index 51252e8bc1ae99..8bb8ee98188bfc 100644
+--- a/drivers/net/wireless/ath/ath12k/core.c
++++ b/drivers/net/wireless/ath/ath12k/core.c
+@@ -161,7 +161,7 @@ EXPORT_SYMBOL(ath12k_core_resume);
+ 
+ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ 					   size_t name_len, bool with_variant,
+-					   bool bus_type_mode)
++					   bool bus_type_mode, bool with_default)
+ {
+ 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
+ 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+@@ -192,7 +192,9 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ 			  ath12k_bus_str(ab->hif.bus),
+ 			  ab->qmi.target.chip_id,
+-			  ab->qmi.target.board_id, variant);
++			  with_default ?
++			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
++			  variant);
+ 		break;
+ 	}
+ 
+@@ -204,19 +206,19 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ 					 size_t name_len)
+ {
+-	return __ath12k_core_create_board_name(ab, name, name_len, true, false);
++	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
+ }
+ 
+ static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
+ 						  size_t name_len)
+ {
+-	return __ath12k_core_create_board_name(ab, name, name_len, false, false);
++	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
+ }
+ 
+ static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
+ 						  size_t name_len)
+ {
+-	return __ath12k_core_create_board_name(ab, name, name_len, false, true);
++	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
+ }
+ 
+ const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
+index 7f2e9a9b40977d..3faf3430effb99 100644
+--- a/drivers/net/wireless/ath/ath12k/core.h
++++ b/drivers/net/wireless/ath/ath12k/core.h
+@@ -148,6 +148,7 @@ struct ath12k_ext_irq_grp {
+ 	u32 num_irq;
+ 	u32 grp_id;
+ 	u64 timestamp;
++	bool napi_enabled;
+ 	struct napi_struct napi;
+ 	struct net_device *napi_ndev;
+ };
+diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
+index 4c98b9de1e5840..6a88745369447f 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
++++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include "dp_mon.h"
+@@ -666,6 +666,11 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
+ 		if (userid < HAL_MAX_UL_MU_USERS) {
+ 			struct hal_rx_user_status *rxuser_stats =
+ 				&ppdu_info->userstats[userid];
++
++			if (ppdu_info->num_mpdu_fcs_ok > 1 ||
++			    ppdu_info->num_mpdu_fcs_err > 1)
++				ppdu_info->userstats[userid].ampdu_present = true;
++
+ 			ppdu_info->num_users += 1;
+ 
+ 			ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+@@ -783,8 +788,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
+ 		if (userid < HAL_MAX_UL_MU_USERS) {
+ 			info[0] = __le32_to_cpu(mpdu_start->info0);
+ 			ppdu_info->userid = userid;
+-			ppdu_info->ampdu_id[userid] =
+-				u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
++			ppdu_info->userstats[userid].ampdu_id =
++				u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
+ 		}
+ 
+ 		mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+@@ -1020,15 +1025,14 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
+ {
+ 	struct ieee80211_supported_band *sband;
+ 	u8 *ptr = NULL;
+-	u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
+ 
+ 	rxs->flag |= RX_FLAG_MACTIME_START;
+ 	rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+ 	rxs->nss = ppduinfo->nss + 1;
+ 
+-	if (ampdu_id) {
++	if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
+ 		rxs->flag |= RX_FLAG_AMPDU_DETAILS;
+-		rxs->ampdu_reference = ampdu_id;
++		rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id;
+ 	}
+ 
+ 	if (ppduinfo->he_mu_flags) {
+diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
+index 44406e0b4a342f..201ffdb8c44ae9 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
+@@ -117,7 +117,7 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
+ 			       le32_encode_bits(ti->data_len,
+ 						HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
+ 
+-	tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
++	tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+ 				le32_encode_bits(ti->encap_type,
+ 						 HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
+ 				le32_encode_bits(ti->encrypt_type,
+@@ -557,13 +557,13 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
+ 
+ 	switch (wbm_status) {
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+-	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+-	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ 		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ 		ts.ack_rssi = le32_get_bits(status_desc->info2,
+ 					    HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
+ 		ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
+ 		break;
++	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
++	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ 		ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
+index 739f73370015e2..4f745cfd7d8e7e 100644
+--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
++++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
+@@ -2966,7 +2966,7 @@ struct hal_mon_buf_ring {
+ 
+ #define HAL_MON_DEST_COOKIE_BUF_ID      GENMASK(17, 0)
+ 
+-#define HAL_MON_DEST_INFO0_END_OFFSET		GENMASK(15, 0)
++#define HAL_MON_DEST_INFO0_END_OFFSET		GENMASK(11, 0)
+ #define HAL_MON_DEST_INFO0_FLUSH_DETECTED	BIT(16)
+ #define HAL_MON_DEST_INFO0_END_OF_PPDU		BIT(17)
+ #define HAL_MON_DEST_INFO0_INITIATOR		BIT(18)
+diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
+index 095216eabc01dc..8c37cbc01b1c5b 100644
+--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
++++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
+@@ -143,6 +143,8 @@ struct hal_rx_user_status {
+ 	u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
+ 	u32 mpdu_ok_byte_count;
+ 	u32 mpdu_err_byte_count;
++	bool ampdu_present;
++	u16 ampdu_id;
+ };
+ 
+ #define HAL_MAX_UL_MU_USERS	37
+@@ -226,7 +228,6 @@ struct hal_rx_mon_ppdu_info {
+ 	u8 addr4[ETH_ALEN];
+ 	struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
+ 	u8 userid;
+-	u16 ampdu_id[HAL_MAX_UL_MU_USERS];
+ 	bool first_msdu_in_mpdu;
+ 	bool is_ampdu;
+ 	u8 medium_prot_type;
+diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
+index 2ff866e1d7d5bb..45d537066345a2 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.c
++++ b/drivers/net/wireless/ath/ath12k/pci.c
+@@ -481,8 +481,11 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
+ 
+ 		ath12k_pci_ext_grp_disable(irq_grp);
+ 
+-		napi_synchronize(&irq_grp->napi);
+-		napi_disable(&irq_grp->napi);
++		if (irq_grp->napi_enabled) {
++			napi_synchronize(&irq_grp->napi);
++			napi_disable(&irq_grp->napi);
++			irq_grp->napi_enabled = false;
++		}
+ 	}
+ }
+ 
+@@ -1112,7 +1115,11 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
+ 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ 
+-		napi_enable(&irq_grp->napi);
++		if (!irq_grp->napi_enabled) {
++			napi_enable(&irq_grp->napi);
++			irq_grp->napi_enabled = true;
++		}
++
+ 		ath12k_pci_ext_grp_enable(irq_grp);
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index a6ba97949440e4..30836a09d5506c 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -2206,8 +2206,8 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
+ 	arg->dwell_time_active = 50;
+ 	arg->dwell_time_active_2g = 0;
+ 	arg->dwell_time_passive = 150;
+-	arg->dwell_time_active_6g = 40;
+-	arg->dwell_time_passive_6g = 30;
++	arg->dwell_time_active_6g = 70;
++	arg->dwell_time_passive_6g = 70;
+ 	arg->min_rest_time = 50;
+ 	arg->max_rest_time = 500;
+ 	arg->repeat_probe_time = 0;
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index 7fad7e75af6a37..619bebd389bd28 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -691,7 +691,9 @@ static int ath9k_of_init(struct ath_softc *sc)
+ 		ah->ah_flags |= AH_NO_EEP_SWAP;
+ 	}
+ 
+-	of_get_mac_address(np, common->macaddr);
++	ret = of_get_mac_address(np, common->macaddr);
++	if (ret == -EPROBE_DEFER)
++		return ret;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+index ab7c0f8d54f425..d3542af0f625ee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+@@ -148,11 +148,9 @@ const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
+ 	.mq_rx_supported = true,
+ 	.rf_id = true,
+ 	.gen2 = true,
+-	.integrated = true,
+ 	.umac_prph_offset = 0x300000,
+ 	.xtal_latency = 12000,
+ 	.low_latency_xtal = true,
+-	.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+ };
+ 
+ const char iwl_br_name[] = "Intel(R) TBD Br device";
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index 6594216f873c47..cd284767ff4bad 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
++ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
+  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+  * Copyright (C) 2015-2017 Intel Deutschland GmbH
+  */
+@@ -2691,7 +2691,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
+ 	}
+ 	/* collect DRAM_IMR region in the last */
+ 	if (imr_reg_data.reg_tlv)
+-		size += iwl_dump_ini_mem(fwrt, list, &reg_data,
++		size += iwl_dump_ini_mem(fwrt, list, &imr_reg_data,
+ 					 &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
+ 
+ 	if (size) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+index 834f7c9bb9e92d..86d6286a153785 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright(c) 2021-2024 Intel Corporation
++ * Copyright(c) 2021-2025 Intel Corporation
+  */
+ 
+ #include "iwl-drv.h"
+@@ -673,8 +673,10 @@ int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+ 	struct uefi_cnv_var_eckv *data;
+ 	int ret = 0;
+ 
+-	data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
+-					      "ECKV", sizeof(*data), NULL);
++	data = iwl_uefi_get_verified_variable_guid(fwrt->trans,
++						   &IWL_EFI_WIFI_BT_GUID,
++						   IWL_UEFI_ECKV_NAME,
++						   "ECKV", sizeof(*data), NULL);
+ 	if (IS_ERR(data))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+index e525d449e656e7..c931f5cedb0b9a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright(c) 2021-2024 Intel Corporation
++ * Copyright(c) 2021-2025 Intel Corporation
+  */
+ #ifndef __iwl_fw_uefi__
+ #define __iwl_fw_uefi__
+@@ -19,7 +19,7 @@
+ #define IWL_UEFI_WTAS_NAME		L"UefiCnvWlanWTAS"
+ #define IWL_UEFI_SPLC_NAME		L"UefiCnvWlanSPLC"
+ #define IWL_UEFI_WRDD_NAME		L"UefiCnvWlanWRDD"
+-#define IWL_UEFI_ECKV_NAME		L"UefiCnvWlanECKV"
++#define IWL_UEFI_ECKV_NAME		L"UefiCnvCommonECKV"
+ #define IWL_UEFI_DSM_NAME		L"UefiCnvWlanGeneralCfg"
+ #define IWL_UEFI_WBEM_NAME		L"UefiCnvWlanWBEM"
+ #define IWL_UEFI_PUNCTURING_NAME	L"UefiCnvWlanPuncturing"
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index 08d990ba8a7949..ce787326aa69d0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2018-2024 Intel Corporation
++ * Copyright (C) 2018-2025 Intel Corporation
+  */
+ #include <linux/firmware.h>
+ #include "iwl-drv.h"
+@@ -1372,15 +1372,15 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
+ 	switch (tp_id) {
+ 	case IWL_FW_INI_TIME_POINT_EARLY:
+ 		iwl_dbg_tlv_init_cfg(fwrt);
+-		iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ 		iwl_dbg_tlv_update_drams(fwrt);
+ 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
++		iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ 		break;
+ 	case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
+ 		iwl_dbg_tlv_apply_buffers(fwrt);
+ 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+-		iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
++		iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ 		break;
+ 	case IWL_FW_INI_TIME_POINT_PERIODIC:
+ 		iwl_dbg_tlv_set_periodic_trigs(fwrt);
+@@ -1390,14 +1390,14 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
+ 	case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
+ 	case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
+ 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+-		iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
+ 				       iwl_dbg_tlv_check_fw_pkt);
++		iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ 		break;
+ 	default:
+ 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+-		iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
++		iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ 		break;
+ 	}
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+index 3b3dcaf33c9d9e..510e04b721da63 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+@@ -2,7 +2,7 @@
+ /*
+  * Copyright (C) 2015 Intel Mobile Communications GmbH
+  * Copyright (C) 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
++ * Copyright (C) 2019-2021, 2023-2025 Intel Corporation
+  */
+ #include <linux/kernel.h>
+ #include <linux/bsearch.h>
+@@ -419,6 +419,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_tx);
+ void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
+ 		       struct sk_buff_head *skbs, bool is_flush)
+ {
++	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
++		return;
++
+ 	if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ 		      "bad state = %d\n", trans->state))
+ 		return;
+@@ -451,6 +454,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
+ 
+ int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
+ {
++	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
++		return -EIO;
++
+ 	if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ 		      "bad state = %d\n", trans->state))
+ 		return -EIO;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+index 55245f913286b9..2ed7a0d77ef836 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+@@ -773,7 +773,11 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 
+ 			target.bssid = bssid;
+ 			target.cipher = cipher;
++			target.tk = NULL;
+ 			ieee80211_iter_keys(mvm->hw, vif, iter, &target);
++
++			if (!WARN_ON(!target.tk))
++				memcpy(tk, target.tk, TK_11AZ_LEN);
+ 		} else {
+ 			memcpy(tk, entry->tk, sizeof(entry->tk));
+ 		}
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index d37d83d246354e..3fd257f770baa3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -4096,6 +4096,20 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
+ 	return 0;
+ }
+ 
++void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
++			     bool update)
++{
++	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++
++	if (!iwl_mvm_has_rlc_offload(mvm))
++		return;
++
++	mvmvif->ps_disabled = !vif->cfg.ps;
++
++	if (update)
++		iwl_mvm_power_update_mac(mvm);
++}
++
+ /* Common part for MLD and non-MLD modes */
+ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
+ 				 struct ieee80211_vif *vif,
+@@ -4188,6 +4202,7 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
+ 		   new_state == IEEE80211_STA_AUTHORIZED) {
+ 		ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta,
+ 							    callbacks);
++		iwl_mvm_smps_workaround(mvm, vif, true);
+ 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ 		   new_state == IEEE80211_STA_ASSOC) {
+ 		ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+index e252f0dcea2057..04da02bdd9536d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2022-2024 Intel Corporation
++ * Copyright (C) 2022-2025 Intel Corporation
+  */
+ #include "mvm.h"
+ 
+@@ -961,6 +961,7 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
+ 	}
+ 
+ 	if (changes & BSS_CHANGED_PS) {
++		iwl_mvm_smps_workaround(mvm, vif, false);
+ 		ret = iwl_mvm_power_update_mac(mvm);
+ 		if (ret)
+ 			IWL_ERR(mvm, "failed to update power mode\n");
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index ef07cff203b0d4..7d86d273092acd 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -3042,4 +3042,7 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ 					struct ieee80211_vif *vif,
+ 					struct ieee80211_bss_conf *bss_conf,
+ 					bool is_ap);
++
++void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
++			     bool update);
+ #endif /* __IWL_MVM_H__ */
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 9141ea57abfce1..68989d183e82a4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -587,6 +587,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
+ 	IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ 	IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ 	IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
++	IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
++	IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ 
+ 	IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
+ 	IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name),
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
+index 66f0f5377ac181..738bafc3749b0a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n.c
+@@ -403,12 +403,14 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
+ 
+ 		if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ 		    bss_desc->bcn_ht_oper->ht_param &
+-		    IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
++		    IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
++			chan_list->chan_scan_param[0].radio_type |=
++				CHAN_BW_40MHZ << 2;
+ 			SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
+ 					  radio_type,
+ 					  (bss_desc->bcn_ht_oper->ht_param &
+ 					  IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
+-
++		}
+ 		*buffer += struct_size(chan_list, chan_scan_param, 1);
+ 		ret_len += struct_size(chan_list, chan_scan_param, 1);
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index e2e9b5ece74e21..a6ac8e5512eba4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -489,6 +489,7 @@ struct mt76_hw_cap {
+ #define MT_DRV_RX_DMA_HDR		BIT(3)
+ #define MT_DRV_HW_MGMT_TXQ		BIT(4)
+ #define MT_DRV_AMSDU_OFFLOAD		BIT(5)
++#define MT_DRV_IGNORE_TXS_FAILED	BIT(6)
+ 
+ struct mt76_driver_ops {
+ 	u32 drv_flags;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+index db0c29e65185ca..487ad716f872ad 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+@@ -314,6 +314,9 @@ enum tx_frag_idx {
+ #define MT_TXFREE_INFO_COUNT		GENMASK(27, 24)
+ #define MT_TXFREE_INFO_STAT		GENMASK(29, 28)
+ 
++#define MT_TXS_HDR_SIZE			4 /* Unit: DW */
++#define MT_TXS_SIZE			12 /* Unit: DW */
++
+ #define MT_TXS0_BW			GENMASK(31, 29)
+ #define MT_TXS0_TID			GENMASK(28, 26)
+ #define MT_TXS0_AMPDU			BIT(25)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+index 1eb955f3ca130b..911e162a459802 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+@@ -156,7 +156,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	static const struct mt76_driver_ops drv_ops = {
+ 		.txwi_size = sizeof(struct mt76x02_txwi),
+ 		.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
+-			     MT_DRV_SW_RX_AIRTIME,
++			     MT_DRV_SW_RX_AIRTIME |
++			     MT_DRV_IGNORE_TXS_FAILED,
+ 		.survey_flags = SURVEY_INFO_TIME_TX,
+ 		.update_survey = mt76x02_update_channel,
+ 		.set_channel = mt76x0_set_channel,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+index b031c500b74156..90e5666c0857dc 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+@@ -214,7 +214,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
+ 			 const struct usb_device_id *id)
+ {
+ 	static const struct mt76_driver_ops drv_ops = {
+-		.drv_flags = MT_DRV_SW_RX_AIRTIME,
++		.drv_flags = MT_DRV_SW_RX_AIRTIME |
++			     MT_DRV_IGNORE_TXS_FAILED,
+ 		.survey_flags = SURVEY_INFO_TIME_TX,
+ 		.update_survey = mt76x02_update_channel,
+ 		.set_channel = mt76x0_set_channel,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+index 67c9d1caa0bd63..55f076231bdc56 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+@@ -22,7 +22,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	static const struct mt76_driver_ops drv_ops = {
+ 		.txwi_size = sizeof(struct mt76x02_txwi),
+ 		.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
+-			     MT_DRV_SW_RX_AIRTIME,
++			     MT_DRV_SW_RX_AIRTIME |
++			     MT_DRV_IGNORE_TXS_FAILED,
+ 		.survey_flags = SURVEY_INFO_TIME_TX,
+ 		.update_survey = mt76x02_update_channel,
+ 		.set_channel = mt76x2e_set_channel,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+index a4f4d12f904e7c..84ef80ab4afbfa 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+@@ -30,7 +30,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
+ 			 const struct usb_device_id *id)
+ {
+ 	static const struct mt76_driver_ops drv_ops = {
+-		.drv_flags = MT_DRV_SW_RX_AIRTIME,
++		.drv_flags = MT_DRV_SW_RX_AIRTIME |
++			     MT_DRV_IGNORE_TXS_FAILED,
+ 		.survey_flags = SURVEY_INFO_TIME_TX,
+ 		.update_survey = mt76x02_update_channel,
+ 		.set_channel = mt76x2u_set_channel,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index 8476f9caa98dbf..2396e1795fe171 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -616,6 +616,54 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
+ 	return ret;
+ }
+ 
++static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val)
++{
++	struct {
++		u8 rsv[4];
++
++		__le16 tag;
++		__le16 len;
++
++		__le32 addr;
++		__le32 valid;
++		u8 data[MT7925_EEPROM_BLOCK_SIZE];
++	} __packed req = {
++		.tag = cpu_to_le16(1),
++		.len = cpu_to_le16(sizeof(req) - 4),
++		.addr = cpu_to_le32(round_down(offset,
++				    MT7925_EEPROM_BLOCK_SIZE)),
++	};
++	struct evt {
++		u8 rsv[4];
++
++		__le16 tag;
++		__le16 len;
++
++		__le32 ver;
++		__le32 addr;
++		__le32 valid;
++		__le32 size;
++		__le32 magic_num;
++		__le32 type;
++		__le32 rsv1[4];
++		u8 data[32];
++	} __packed *res;
++	struct sk_buff *skb;
++	int ret;
++
++	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL),
++					&req, sizeof(req), true, &skb);
++	if (ret)
++		return ret;
++
++	res = (struct evt *)skb->data;
++	*val = res->data[offset % MT7925_EEPROM_BLOCK_SIZE];
++
++	dev_kfree_skb(skb);
++
++	return 0;
++}
++
+ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
+ {
+ 	const struct mt76_connac2_fw_trailer *hdr;
+@@ -624,13 +672,20 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
+ 	struct mt76_dev *mdev = &dev->mt76;
+ 	struct mt792x_phy *phy = &dev->phy;
+ 	const struct firmware *fw;
++	u8 *clc_base = NULL, hw_encap = 0;
+ 	int ret, i, len, offset = 0;
+-	u8 *clc_base = NULL;
+ 
+ 	if (mt7925_disable_clc ||
+ 	    mt76_is_usb(&dev->mt76))
+ 		return 0;
+ 
++	if (mt76_is_mmio(&dev->mt76)) {
++		ret = mt7925_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
++		if (ret)
++			return ret;
++		hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
++	}
++
+ 	ret = request_firmware(&fw, fw_name, mdev->dev);
+ 	if (ret)
+ 		return ret;
+@@ -675,6 +730,10 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
+ 		if (phy->clc[clc->idx])
+ 			continue;
+ 
++		/* header content sanity */
++		if (u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
++			continue;
++
+ 		phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
+ 						  le32_to_cpu(clc->len),
+ 						  GFP_KERNEL);
+@@ -3228,6 +3287,9 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ 		else
+ 			uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ 
++		if (cmd == MCU_UNI_CMD(HIF_CTRL))
++			uni_txd->option &= ~MCU_CMD_ACK;
++
+ 		goto exit;
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+index df3c705d1cb3fa..4ad779329b8f08 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+@@ -147,9 +147,12 @@ enum mt7925_eeprom_field {
+ 	MT_EE_CHIP_ID =		0x000,
+ 	MT_EE_VERSION =		0x002,
+ 	MT_EE_MAC_ADDR =	0x004,
++	MT_EE_HW_TYPE =		0xa71,
+ 	__MT_EE_MAX =		0x9ff
+ };
+ 
++#define MT_EE_HW_TYPE_ENCAP     GENMASK(1, 0)
++
+ enum {
+ 	TXPWR_USER,
+ 	TXPWR_EEPROM,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index f590902fdeea37..ef2d7eaaaffdd6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -1399,7 +1399,7 @@ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
+ 		mt7996_mac_tx_free(dev, data, len);
+ 		return false;
+ 	case PKT_TYPE_TXS:
+-		for (rxd += 4; rxd + 8 <= end; rxd += 8)
++		for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
+ 			mt7996_mac_add_txs(dev, rxd);
+ 		return false;
+ 	case PKT_TYPE_RX_FW_MONITOR:
+@@ -1442,7 +1442,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ 		mt7996_mcu_rx_event(dev, skb);
+ 		break;
+ 	case PKT_TYPE_TXS:
+-		for (rxd += 4; rxd + 8 <= end; rxd += 8)
++		for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
+ 			mt7996_mac_add_txs(dev, rxd);
+ 		dev_kfree_skb(skb);
+ 		break;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+index 43468bcaffc6dd..a75e1c9435bb01 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+@@ -908,7 +908,8 @@ enum {
+ 	UNI_CMD_SER_SET_RECOVER_L3_TX_DISABLE,
+ 	UNI_CMD_SER_SET_RECOVER_L3_BF,
+ 	UNI_CMD_SER_SET_RECOVER_L4_MDP,
+-	UNI_CMD_SER_SET_RECOVER_FULL,
++	UNI_CMD_SER_SET_RECOVER_FROM_ETH,
++	UNI_CMD_SER_SET_RECOVER_FULL = 8,
+ 	UNI_CMD_SER_SET_SYSTEM_ASSERT,
+ 	/* action */
+ 	UNI_CMD_SER_ENABLE = 1,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+index 442f72450352b0..b6209ed1cfe014 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+@@ -281,7 +281,7 @@ static int mt7996_mmio_wed_reset(struct mtk_wed_device *wed)
+ 	if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
+ 		return -EBUSY;
+ 
+-	ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_L1,
++	ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_FROM_ETH,
+ 				 mphy->band_idx);
+ 	if (ret)
+ 		goto out;
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index ce193e625666be..065a1e4537457a 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -100,7 +100,8 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
+ 		return;
+ 
+ 	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
+-	if (flags & MT_TX_CB_TXS_FAILED) {
++	if (flags & MT_TX_CB_TXS_FAILED &&
++	    (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) {
+ 		info->status.rates[0].count = 0;
+ 		info->status.rates[0].idx = -1;
+ 		info->flags |= IEEE80211_TX_STAT_ACK;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+index 4ce0c05c512910..569856ca677f62 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+@@ -860,9 +860,10 @@ rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len)
+ 	return len;
+ 
+ write_error:
+-	dev_info(&udev->dev,
+-		 "%s: Failed to write block at addr: %04x size: %04x\n",
+-		 __func__, addr, blocksize);
++	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
++		dev_info(&udev->dev,
++			 "%s: Failed to write block at addr: %04x size: %04x\n",
++			 __func__, addr, blocksize);
+ 	return -EAGAIN;
+ }
+ 
+@@ -4064,8 +4065,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
+ 	 */
+ 	rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary);
+ 
+-	ret = rtl8xxxu_download_firmware(priv);
+-	dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
++	for (int retry = 5; retry >= 0 ; retry--) {
++		ret = rtl8xxxu_download_firmware(priv);
++		dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
++		if (ret != -EAGAIN)
++			break;
++		if (retry)
++			dev_dbg(dev, "%s: retry firmware download\n", __func__);
++	}
+ 	if (ret)
+ 		goto exit;
+ 	ret = rtl8xxxu_start_firmware(priv);
+diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
+index 564f5988ee82a7..d1c4f5cdcb21da 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac.c
++++ b/drivers/net/wireless/realtek/rtw88/mac.c
+@@ -783,7 +783,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
+ 	if (!check_firmware_size(data, size))
+ 		return -EINVAL;
+ 
+-	if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
++	if (rtwdev->chip->ltecoex_addr &&
++	    !ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
+ 		return -EBUSY;
+ 
+ 	wlan_cpu_enable(rtwdev, false);
+@@ -801,7 +802,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
+ 
+ 	wlan_cpu_enable(rtwdev, true);
+ 
+-	if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
++	if (rtwdev->chip->ltecoex_addr &&
++	    !ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
+ 		ret = -EBUSY;
+ 		goto dlfw_fail;
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index bbdef38c7e341c..a808af2f085ec8 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -1543,6 +1543,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
+ {
+ 	const struct rtw_chip_info *chip = rtwdev->chip;
+ 	struct rtw_efuse *efuse = &rtwdev->efuse;
++	int i;
+ 
+ 	ht_cap->ht_supported = true;
+ 	ht_cap->cap = 0;
+@@ -1562,25 +1563,20 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
+ 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ 	ht_cap->ampdu_density = chip->ampdu_density;
+ 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+-	if (efuse->hw_cap.nss > 1) {
+-		ht_cap->mcs.rx_mask[0] = 0xFF;
+-		ht_cap->mcs.rx_mask[1] = 0xFF;
+-		ht_cap->mcs.rx_mask[4] = 0x01;
+-		ht_cap->mcs.rx_highest = cpu_to_le16(300);
+-	} else {
+-		ht_cap->mcs.rx_mask[0] = 0xFF;
+-		ht_cap->mcs.rx_mask[1] = 0x00;
+-		ht_cap->mcs.rx_mask[4] = 0x01;
+-		ht_cap->mcs.rx_highest = cpu_to_le16(150);
+-	}
++
++	for (i = 0; i < efuse->hw_cap.nss; i++)
++		ht_cap->mcs.rx_mask[i] = 0xFF;
++	ht_cap->mcs.rx_mask[4] = 0x01;
++	ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss);
+ }
+ 
+ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
+ 			     struct ieee80211_sta_vht_cap *vht_cap)
+ {
+ 	struct rtw_efuse *efuse = &rtwdev->efuse;
+-	u16 mcs_map;
++	u16 mcs_map = 0;
+ 	__le16 highest;
++	int i;
+ 
+ 	if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE &&
+ 	    efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT)
+@@ -1603,21 +1599,15 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
+ 	if (rtw_chip_has_rx_ldpc(rtwdev))
+ 		vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
+ 
+-	mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+-		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+-		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+-		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+-		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+-		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+-		  IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+-	if (efuse->hw_cap.nss > 1) {
+-		highest = cpu_to_le16(780);
+-		mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2;
+-	} else {
+-		highest = cpu_to_le16(390);
+-		mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2;
++	for (i = 0; i < 8; i++) {
++		if (i < efuse->hw_cap.nss)
++			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
++		else
++			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ 	}
+ 
++	highest = cpu_to_le16(390 * efuse->hw_cap.nss);
++
+ 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+ 	vht_cap->vht_mcs.rx_highest = highest;
+diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
+index 4d9b8668e8b047..2708ee2f12a471 100644
+--- a/drivers/net/wireless/realtek/rtw88/reg.h
++++ b/drivers/net/wireless/realtek/rtw88/reg.h
+@@ -109,6 +109,7 @@
+ #define BIT_SHIFT_ROM_PGE	16
+ #define BIT_FW_INIT_RDY		BIT(15)
+ #define BIT_FW_DW_RDY		BIT(14)
++#define BIT_CPU_CLK_SEL		(BIT(12) | BIT(13))
+ #define BIT_RPWM_TOGGLE		BIT(7)
+ #define BIT_RAM_DL_SEL		BIT(7)	/* legacy only */
+ #define BIT_DMEM_CHKSUM_OK	BIT(6)
+@@ -126,7 +127,7 @@
+ 				 BIT_CHECK_SUM_OK)
+ #define FW_READY_LEGACY		(BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT |	       \
+ 				 BIT_WINTINI_RDY | BIT_RAM_DL_SEL)
+-#define FW_READY_MASK		0xffff
++#define FW_READY_MASK		(0xffff & ~BIT_CPU_CLK_SEL)
+ 
+ #define REG_MCU_TST_CFG		0x84
+ #define VAL_FW_TRIGGER		0x1
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+index 6edb17aea90e0e..4a6c0a9266a099 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+@@ -976,11 +976,11 @@ static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ }
+ 
+ static void
+-rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
++rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path,
++				    u8 rs, u32 *phy_pwr_idx)
+ {
+ 	struct rtw_hal *hal = &rtwdev->hal;
+ 	static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
+-	static u32 phy_pwr_idx;
+ 	u8 rate, rate_idx, pwr_index, shift;
+ 	int j;
+ 
+@@ -988,12 +988,12 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+ 		rate = rtw_rate_section[rs][j];
+ 		pwr_index = hal->tx_pwr_tbl[path][rate];
+ 		shift = rate & 0x3;
+-		phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
++		*phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+ 		if (shift == 0x3) {
+ 			rate_idx = rate & 0xfc;
+ 			rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
+-				    phy_pwr_idx);
+-			phy_pwr_idx = 0;
++				    *phy_pwr_idx);
++			*phy_pwr_idx = 0;
+ 		}
+ 	}
+ }
+@@ -1001,11 +1001,13 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+ static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev)
+ {
+ 	struct rtw_hal *hal = &rtwdev->hal;
++	u32 phy_pwr_idx = 0;
+ 	int rs, path;
+ 
+ 	for (path = 0; path < hal->rf_path_num; path++) {
+ 		for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+-			rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs);
++			rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs,
++							    &phy_pwr_idx);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
+index e222d3c01a77ec..66819f69440550 100644
+--- a/drivers/net/wireless/realtek/rtw88/util.c
++++ b/drivers/net/wireless/realtek/rtw88/util.c
+@@ -101,7 +101,8 @@ void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+ 		*nss = 4;
+ 		*mcs = rate - DESC_RATEVHT4SS_MCS0;
+ 	} else if (rate >= DESC_RATEMCS0 &&
+-		   rate <= DESC_RATEMCS15) {
++		   rate <= DESC_RATEMCS31) {
++		*nss = 0;
+ 		*mcs = rate - DESC_RATEMCS0;
+ 	}
+ }
+diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
+index 8d54d71fcf539e..6cdbf02f405ae1 100644
+--- a/drivers/net/wireless/realtek/rtw89/coex.c
++++ b/drivers/net/wireless/realtek/rtw89/coex.c
+@@ -89,10 +89,10 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
+ 	[CXST_B4]	= __DEF_FBTC_SLOT(50,  0xe5555555, SLOT_MIX),
+ 	[CXST_LK]	= __DEF_FBTC_SLOT(20,  0xea5a5a5a, SLOT_ISO),
+ 	[CXST_BLK]	= __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX),
+-	[CXST_E2G]	= __DEF_FBTC_SLOT(0,   0xea5a5a5a, SLOT_MIX),
+-	[CXST_E5G]	= __DEF_FBTC_SLOT(0,   0xffffffff, SLOT_ISO),
++	[CXST_E2G]	= __DEF_FBTC_SLOT(5,   0xea5a5a5a, SLOT_MIX),
++	[CXST_E5G]	= __DEF_FBTC_SLOT(5,   0xffffffff, SLOT_ISO),
+ 	[CXST_EBT]	= __DEF_FBTC_SLOT(5,   0xe5555555, SLOT_MIX),
+-	[CXST_ENULL]	= __DEF_FBTC_SLOT(0,   0xaaaaaaaa, SLOT_ISO),
++	[CXST_ENULL]	= __DEF_FBTC_SLOT(5,   0xaaaaaaaa, SLOT_ISO),
+ 	[CXST_WLK]	= __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
+ 	[CXST_W1FDD]	= __DEF_FBTC_SLOT(50,  0xffffffff, SLOT_ISO),
+ 	[CXST_B1FDD]	= __DEF_FBTC_SLOT(50,  0xffffdfff, SLOT_ISO),
+@@ -5356,7 +5356,8 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
+ 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ 	struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ 
+-	if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
++	if (btc->cx.state_map != BTC_WLINKING &&
++	    RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
+ 		_action_wl_25g_mcc(rtwdev);
+ 		rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n");
+ 	} else if (rtwdev->dbcc_en) {
+@@ -7178,6 +7179,8 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx)
+ 		_fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
+ 	}
+ 
++	btc->dm.tdma_instant_excute = 1;
++
+ 	_run_coex(rtwdev, BTC_RSN_NTFY_SCAN_FINISH);
+ }
+ 
+@@ -7630,7 +7633,8 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
+ 	else
+ 		wl->status.map.connecting = 0;
+ 
+-	if (state == BTC_ROLE_MSTS_STA_DIS_CONN)
++	if (state == BTC_ROLE_MSTS_STA_DIS_CONN ||
++	    state == BTC_ROLE_MSTS_STA_CONN_END)
+ 		wl->status.map._4way = false;
+ 
+ 	_run_coex(rtwdev, BTC_RSN_NTFY_ROLE_INFO);
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index f82a26be6fa82b..83b22bd0ce81a3 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -4862,8 +4862,6 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
+ 
+ 	rtw89_hci_mac_pre_deinit(rtwdev);
+ 
+-	rtw89_mac_pwr_off(rtwdev);
+-
+ 	return 0;
+ }
+ 
+@@ -4944,36 +4942,45 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
+ 
+ 	rtw89_read_chip_ver(rtwdev);
+ 
++	ret = rtw89_mac_pwr_on(rtwdev);
++	if (ret) {
++		rtw89_err(rtwdev, "failed to power on\n");
++		return ret;
++	}
++
+ 	ret = rtw89_wait_firmware_completion(rtwdev);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to wait firmware completion\n");
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	ret = rtw89_fw_recognize(rtwdev);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to recognize firmware\n");
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	ret = rtw89_chip_efuse_info_setup(rtwdev);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	ret = rtw89_fw_recognize_elements(rtwdev);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to recognize firmware elements\n");
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	ret = rtw89_chip_board_info_setup(rtwdev);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	rtw89_core_setup_rfe_parms(rtwdev);
+ 	rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
+ 
+-	return 0;
++out:
++	rtw89_mac_pwr_off(rtwdev);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(rtw89_chip_info_setup);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
+index ff3048d2489f12..4f64ea392e6c9e 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -17,6 +17,7 @@ struct rtw89_dev;
+ struct rtw89_pci_info;
+ struct rtw89_mac_gen_def;
+ struct rtw89_phy_gen_def;
++struct rtw89_fw_blacklist;
+ struct rtw89_efuse_block_cfg;
+ struct rtw89_h2c_rf_tssi;
+ struct rtw89_fw_txpwr_track_cfg;
+@@ -4232,6 +4233,7 @@ struct rtw89_chip_info {
+ 	bool try_ce_fw;
+ 	u8 bbmcu_nr;
+ 	u32 needed_fw_elms;
++	const struct rtw89_fw_blacklist *fw_blacklist;
+ 	u32 fifo_size;
+ 	bool small_fifo_size;
+ 	u32 dle_scc_rsvd_size;
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index 620e076d1b597d..e5c90050e71158 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -38,6 +38,16 @@ struct rtw89_arp_rsp {
+ 
+ static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+ 
++const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = {
++	.ver = 0x00,
++	.list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
++		 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
++		 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
++		 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
++	},
++};
++EXPORT_SYMBOL(rtw89_fw_blacklist_default);
++
+ union rtw89_fw_element_arg {
+ 	size_t offset;
+ 	enum rtw89_rf_path rf_path;
+@@ -285,7 +295,7 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
+ 	if (!sec->secure_boot)
+ 		goto out;
+ 
+-	sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
++	sb_sel_ver = get_unaligned_le32(&section_content->sb_sel_ver.v);
+ 	if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
+ 		goto ignore;
+ 
+@@ -315,6 +325,46 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
+ 	return 0;
+ }
+ 
++static int __check_secure_blacklist(struct rtw89_dev *rtwdev,
++				    struct rtw89_fw_bin_info *info,
++				    struct rtw89_fw_hdr_section_info *section_info,
++				    const void *content)
++{
++	const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist;
++	const union rtw89_fw_section_mssc_content *section_content = content;
++	struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
++	u8 byte_idx;
++	u8 bit_mask;
++
++	if (!sec->secure_boot)
++		return 0;
++
++	if (!info->secure_section_exist || section_info->ignore)
++		return 0;
++
++	if (!chip_blacklist) {
++		rtw89_err(rtwdev, "chip no blacklist for secure firmware\n");
++		return -ENOENT;
++	}
++
++	byte_idx = section_content->blacklist.bit_in_chip_list >> 3;
++	bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7);
++
++	if (section_content->blacklist.ver > chip_blacklist->ver) {
++		rtw89_err(rtwdev, "chip blacklist out of date (%u, %u)\n",
++			  section_content->blacklist.ver, chip_blacklist->ver);
++		return -EINVAL;
++	}
++
++	if (chip_blacklist->list[byte_idx] & bit_mask) {
++		rtw89_err(rtwdev, "firmware %u in chip blacklist\n",
++			  section_content->blacklist.ver);
++		return -EPERM;
++	}
++
++	return 0;
++}
++
+ static int __parse_security_section(struct rtw89_dev *rtwdev,
+ 				    struct rtw89_fw_bin_info *info,
+ 				    struct rtw89_fw_hdr_section_info *section_info,
+@@ -340,7 +390,7 @@ static int __parse_security_section(struct rtw89_dev *rtwdev,
+ 		info->secure_section_exist = true;
+ 	}
+ 
+-	return 0;
++	return __check_secure_blacklist(rtwdev, info, section_info, content);
+ }
+ 
+ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
+@@ -451,6 +501,30 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
+ 	}
+ }
+ 
++static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev,
++				  const struct firmware *firmware,
++				  const struct rtw89_mfw_hdr *mfw_hdr)
++{
++	const void *mfw = firmware->data;
++	u32 mfw_len = firmware->size;
++	u8 fw_nr = mfw_hdr->fw_nr;
++	const void *ptr;
++
++	if (fw_nr == 0) {
++		rtw89_err(rtwdev, "mfw header has no fw entry\n");
++		return -ENOENT;
++	}
++
++	ptr = &mfw_hdr->info[fw_nr];
++
++	if (ptr > mfw + mfw_len) {
++		rtw89_err(rtwdev, "mfw header out of address\n");
++		return -EFAULT;
++	}
++
++	return 0;
++}
++
+ static
+ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ 			struct rtw89_fw_suit *fw_suit, bool nowarn)
+@@ -461,6 +535,7 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ 	u32 mfw_len = firmware->size;
+ 	const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
+ 	const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
++	int ret;
+ 	int i;
+ 
+ 	if (mfw_hdr->sig != RTW89_MFW_SIG) {
+@@ -473,6 +548,10 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ 		return 0;
+ 	}
+ 
++	ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
++	if (ret)
++		return ret;
++
+ 	for (i = 0; i < mfw_hdr->fw_nr; i++) {
+ 		tmp = &mfw_hdr->info[i];
+ 		if (tmp->type != type)
+@@ -502,6 +581,12 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ found:
+ 	fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
+ 	fw_suit->size = le32_to_cpu(mfw_info->size);
++
++	if (fw_suit->data + fw_suit->size > mfw + mfw_len) {
++		rtw89_err(rtwdev, "fw_suit %d out of address\n", type);
++		return -EFAULT;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -513,12 +598,17 @@ static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
+ 		(const struct rtw89_mfw_hdr *)firmware->data;
+ 	const struct rtw89_mfw_info *mfw_info;
+ 	u32 size;
++	int ret;
+ 
+ 	if (mfw_hdr->sig != RTW89_MFW_SIG) {
+ 		rtw89_warn(rtwdev, "not mfw format\n");
+ 		return 0;
+ 	}
+ 
++	ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
++	if (ret)
++		return ret;
++
+ 	mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
+ 	size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
+ 
+@@ -1234,7 +1324,6 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ 	ret = rtw89_h2c_tx(rtwdev, skb, false);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to send h2c\n");
+-		ret = -1;
+ 		goto fail;
+ 	}
+ 
+@@ -1311,7 +1400,6 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
+ 		ret = rtw89_h2c_tx(rtwdev, skb, true);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "failed to send h2c\n");
+-			ret = -1;
+ 			goto fail;
+ 		}
+ 
+@@ -3080,9 +3168,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
+ 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ 
+-	h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
++	h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) |
++		  le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ 				   CCTLINFO_G7_W6_ULDL);
+-	h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
++	h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
+ 
+ 	if (rtwsta_link) {
+ 		h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
+index ccbbc43f33feed..502ece540b9dca 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.h
++++ b/drivers/net/wireless/realtek/rtw89/fw.h
+@@ -639,6 +639,11 @@ struct rtw89_fw_mss_pool_hdr {
+ } __packed;
+ 
+ union rtw89_fw_section_mssc_content {
++	struct {
++		u8 pad[0x20];
++		u8 bit_in_chip_list;
++		u8 ver;
++	} __packed blacklist;
+ 	struct {
+ 		u8 pad[58];
+ 		__le32 v;
+@@ -649,6 +654,13 @@ union rtw89_fw_section_mssc_content {
+ 	} __packed key_sign_len;
+ } __packed;
+ 
++struct rtw89_fw_blacklist {
++	u8 ver;
++	u8 list[32];
++};
++
++extern const struct rtw89_fw_blacklist rtw89_fw_blacklist_default;
++
+ static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
+ {
+ 	le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index 4574aa62839b02..9b09d4b7dea597 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -1491,6 +1491,21 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
+ #undef PWR_ACT
+ }
+ 
++int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev)
++{
++	int ret;
++
++	ret = rtw89_mac_power_switch(rtwdev, true);
++	if (ret) {
++		rtw89_mac_power_switch(rtwdev, false);
++		ret = rtw89_mac_power_switch(rtwdev, true);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
+ void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev)
+ {
+ 	rtw89_mac_power_switch(rtwdev, false);
+@@ -3918,14 +3933,6 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb)
+ {
+ 	int ret;
+ 
+-	ret = rtw89_mac_power_switch(rtwdev, true);
+-	if (ret) {
+-		rtw89_mac_power_switch(rtwdev, false);
+-		ret = rtw89_mac_power_switch(rtwdev, true);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
+ 
+ 	if (include_bb) {
+@@ -3958,6 +3965,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
+ 	bool include_bb = !!chip->bbmcu_nr;
+ 	int ret;
+ 
++	ret = rtw89_mac_pwr_on(rtwdev);
++	if (ret)
++		return ret;
++
+ 	ret = rtw89_mac_partial_init(rtwdev, include_bb);
+ 	if (ret)
+ 		goto fail;
+@@ -3989,7 +4000,7 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
+ 
+ 	return ret;
+ fail:
+-	rtw89_mac_power_switch(rtwdev, false);
++	rtw89_mac_pwr_off(rtwdev);
+ 
+ 	return ret;
+ }
+@@ -4745,6 +4756,32 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ 		rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
+ }
+ 
++void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link)
++{
++	struct ieee80211_bss_conf *bss_conf;
++	bool set;
++	u32 reg;
++
++	if (rtwdev->chip->chip_gen != RTW89_CHIP_BE)
++		return;
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	set = bss_conf->he_support && !bss_conf->eht_support;
++
++	rcu_read_unlock();
++
++	reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CLIENT_OM_CTRL,
++				   rtwvif_link->mac_idx);
++
++	if (set)
++		rtw89_write32_set(rtwdev, reg, B_BE_TRIG_DIS_EHTTB);
++	else
++		rtw89_write32_clr(rtwdev, reg, B_BE_TRIG_DIS_EHTTB);
++}
++
+ void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+ 	rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link);
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
+index 0c269961a57311..7974849f41e257 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.h
++++ b/drivers/net/wireless/realtek/rtw89/mac.h
+@@ -1120,6 +1120,7 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
+ 	rtw89_write32_set(rtwdev, reg, bit);
+ }
+ 
++int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev);
+ void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev);
+ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb);
+ int rtw89_mac_init(struct rtw89_dev *rtwdev);
+@@ -1160,6 +1161,8 @@ void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ 				struct rtw89_vif_link *rtwvif_link, bool en);
+ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ 					struct rtw89_vif_link *rtwvif_link);
++void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link);
+ void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+ void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en);
+ int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index 8351a70d325d4a..3a1a2b243adf0e 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -669,6 +669,7 @@ static void __rtw89_ops_bss_link_assoc(struct rtw89_dev *rtwdev,
+ 	rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, rtwvif_link);
+ 	rtw89_mac_port_update(rtwdev, rtwvif_link);
+ 	rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, rtwvif_link);
++	rtw89_mac_set_he_tb(rtwdev, rtwvif_link);
+ }
+ 
+ static void __rtw89_ops_bss_assoc(struct rtw89_dev *rtwdev,
+diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
+index 69678eab230939..9fbcc7fee290fa 100644
+--- a/drivers/net/wireless/realtek/rtw89/reg.h
++++ b/drivers/net/wireless/realtek/rtw89/reg.h
+@@ -7093,6 +7093,10 @@
+ #define B_BE_MACLBK_RDY_NUM_MASK GENMASK(7, 3)
+ #define B_BE_MACLBK_EN BIT(0)
+ 
++#define R_BE_CLIENT_OM_CTRL 0x11040
++#define R_BE_CLIENT_OM_CTRL_C1 0x15040
++#define B_BE_TRIG_DIS_EHTTB BIT(24)
++
+ #define R_BE_WMAC_NAV_CTL 0x11080
+ #define R_BE_WMAC_NAV_CTL_C1 0x15080
+ #define B_BE_WMAC_NAV_UPPER_EN BIT(26)
+diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
+index bb064a086970bb..e8df68818da014 100644
+--- a/drivers/net/wireless/realtek/rtw89/regd.c
++++ b/drivers/net/wireless/realtek/rtw89/regd.c
+@@ -695,6 +695,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
+ 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ 	struct rtw89_dev *rtwdev = hw->priv;
+ 
++	wiphy_lock(wiphy);
+ 	mutex_lock(&rtwdev->mutex);
+ 	rtw89_leave_ps_mode(rtwdev);
+ 
+@@ -712,6 +713,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
+ 
+ exit:
+ 	mutex_unlock(&rtwdev->mutex);
++	wiphy_unlock(wiphy);
+ }
+ 
+ /* Maximum Transmit Power field (@raw) can be EIRP or PSD.
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+index f9766bf30e71df..0d2a1e712b3455 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+@@ -2443,6 +2443,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
+ 	.try_ce_fw		= true,
+ 	.bbmcu_nr		= 0,
+ 	.needed_fw_elms		= 0,
++	.fw_blacklist		= NULL,
+ 	.fifo_size		= 196608,
+ 	.small_fifo_size	= true,
+ 	.dle_scc_rsvd_size	= 98304,
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+index 42d369d2e916a6..5f08207936c689 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+@@ -2159,6 +2159,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
+ 	.try_ce_fw		= false,
+ 	.bbmcu_nr		= 0,
+ 	.needed_fw_elms		= 0,
++	.fw_blacklist		= NULL,
+ 	.fifo_size		= 458752,
+ 	.small_fifo_size	= false,
+ 	.dle_scc_rsvd_size	= 0,
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+index 364aa21cbd446f..0e03d97ba1cf6a 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+@@ -797,6 +797,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
+ 	.try_ce_fw		= true,
+ 	.bbmcu_nr		= 0,
+ 	.needed_fw_elms		= 0,
++	.fw_blacklist		= &rtw89_fw_blacklist_default,
+ 	.fifo_size		= 196608,
+ 	.small_fifo_size	= true,
+ 	.dle_scc_rsvd_size	= 98304,
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+index dab7e71ec6a140..1dd3e51bab9f3d 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+@@ -731,6 +731,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
+ 	.try_ce_fw		= true,
+ 	.bbmcu_nr		= 0,
+ 	.needed_fw_elms		= RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ,
++	.fw_blacklist		= &rtw89_fw_blacklist_default,
+ 	.fifo_size		= 458752,
+ 	.small_fifo_size	= true,
+ 	.dle_scc_rsvd_size	= 98304,
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+index dbe77abb2c488f..5e2592cf1a9faa 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+@@ -2936,6 +2936,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
+ 	.try_ce_fw		= false,
+ 	.bbmcu_nr		= 0,
+ 	.needed_fw_elms		= 0,
++	.fw_blacklist		= &rtw89_fw_blacklist_default,
+ 	.fifo_size		= 458752,
+ 	.small_fifo_size	= false,
+ 	.dle_scc_rsvd_size	= 0,
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+index ef7747adbcc2b8..64a41f24b2adb2 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+@@ -2632,6 +2632,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
+ 	.try_ce_fw		= false,
+ 	.bbmcu_nr		= 1,
+ 	.needed_fw_elms		= RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS,
++	.fw_blacklist		= &rtw89_fw_blacklist_default,
+ 	.fifo_size		= 589824,
+ 	.small_fifo_size	= false,
+ 	.dle_scc_rsvd_size	= 0,
+diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
+index 7b203bb7f151a7..02c2ac12f197a5 100644
+--- a/drivers/net/wireless/realtek/rtw89/ser.c
++++ b/drivers/net/wireless/realtek/rtw89/ser.c
+@@ -156,9 +156,11 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt)
+ 	rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
+ 		    ser_st_name(ser), ser_ev_name(ser, evt));
+ 
++	wiphy_lock(rtwdev->hw->wiphy);
+ 	mutex_lock(&rtwdev->mutex);
+ 	rtw89_leave_lps(rtwdev);
+ 	mutex_unlock(&rtwdev->mutex);
++	wiphy_unlock(rtwdev->hw->wiphy);
+ 
+ 	ser->st_tbl[ser->state].st_func(ser, evt);
+ }
+@@ -707,9 +709,11 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
+ 
+ 	switch (evt) {
+ 	case SER_EV_STATE_IN:
++		wiphy_lock(rtwdev->hw->wiphy);
+ 		mutex_lock(&rtwdev->mutex);
+ 		ser_l2_reset_st_pre_hdl(ser);
+ 		mutex_unlock(&rtwdev->mutex);
++		wiphy_unlock(rtwdev->hw->wiphy);
+ 
+ 		ieee80211_restart_hw(rtwdev->hw);
+ 		ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
+diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
+index 3f424f14de4ec2..4a2b7c9921bc61 100644
+--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
++++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
+@@ -4,7 +4,7 @@
+  * Copyright (c) 2008, Jouni Malinen <j@w1.fi>
+  * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+  * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2024 Intel Corporation
++ * Copyright (C) 2018 - 2025 Intel Corporation
+  */
+ 
+ /*
+@@ -1983,11 +1983,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+ 			return;
+ 		}
+ 
+-		if (sta && sta->mlo) {
+-			if (WARN_ON(!link_sta)) {
+-				ieee80211_free_txskb(hw, skb);
+-				return;
+-			}
++		/* Do address translations only between shared links. It is
++		 * possible that while an non-AP MLD station and an AP MLD
++		 * station have shared links, the frame is intended to be sent
++		 * on a link which is not shared (for example when sending a
++		 * probe response).
++		 */
++		if (sta && sta->mlo && link_sta) {
+ 			/* address translation to link addresses on TX */
+ 			ether_addr_copy(hdr->addr1, link_sta->addr);
+ 			ether_addr_copy(hdr->addr2, bss_conf->addr);
+diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
+index 082253a3a95607..04f4a049599a1a 100644
+--- a/drivers/nvdimm/label.c
++++ b/drivers/nvdimm/label.c
+@@ -442,7 +442,8 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd)
+ 	if (ndd->data)
+ 		return 0;
+ 
+-	if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
++	if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 ||
++	    ndd->nsarea.config_size == 0) {
+ 		dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
+ 			ndd->nsarea.max_xfer, ndd->nsarea.config_size);
+ 		return -ENXIO;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 265b3608ae26ef..cd8a10f6accff9 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3587,6 +3587,9 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
+ 		.driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
++	{ PCI_DEVICE(0x126f, 0x1001),	/* Silicon Motion generic */
++		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
++				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ 	{ PCI_DEVICE(0x126f, 0x2262),	/* Silicon Motion generic */
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ 				NVME_QUIRK_BOGUS_NID, },
+@@ -3610,6 +3613,9 @@ static const struct pci_device_id nvme_id_table[] = {
+ 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ 	{ PCI_DEVICE(0x15b7, 0x5008),   /* Sandisk SN530 */
+ 		.driver_data = NVME_QUIRK_BROKEN_MSI },
++	{ PCI_DEVICE(0x15b7, 0x5009),   /* Sandisk SN550 */
++		.driver_data = NVME_QUIRK_BROKEN_MSI |
++				NVME_QUIRK_NO_DEEPEST_PS },
+ 	{ PCI_DEVICE(0x1987, 0x5012),	/* Phison E12 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1987, 0x5016),	/* Phison E16 */
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 4f9cac8a5abe07..259ad77c03c50f 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1560,6 +1560,9 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
+ {
+ 	struct socket *sock = queue->sock;
+ 
++	if (!queue->state_change)
++		return;
++
+ 	write_lock_bh(&sock->sk->sk_callback_lock);
+ 	sock->sk->sk_data_ready =  queue->data_ready;
+ 	sock->sk->sk_state_change = queue->state_change;
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index d00a3b015635c2..d1869e6de3844a 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -581,9 +581,11 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
+ 	cell->nbits = info->nbits;
+ 	cell->np = info->np;
+ 
+-	if (cell->nbits)
++	if (cell->nbits) {
+ 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+ 					   BITS_PER_BYTE);
++		cell->raw_len = ALIGN(cell->bytes, nvmem->word_size);
++	}
+ 
+ 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+ 		dev_err(&nvmem->dev,
+@@ -592,6 +594,18 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) {
++		dev_err(&nvmem->dev,
++			"cell %s raw len %zd unaligned to nvmem word size %d\n",
++			cell->name ?: "<unknown>", cell->raw_len,
++			nvmem->word_size);
++
++		if (info->raw_len)
++			return -EINVAL;
++
++		cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -824,7 +838,9 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod
+ 		if (addr && len == (2 * sizeof(u32))) {
+ 			info.bit_offset = be32_to_cpup(addr++);
+ 			info.nbits = be32_to_cpup(addr);
+-			if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) {
++			if (info.bit_offset >= BITS_PER_BYTE * info.bytes ||
++			    info.nbits < 1 ||
++			    info.bit_offset + info.nbits > BITS_PER_BYTE * info.bytes) {
+ 				dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
+ 				of_node_put(child);
+ 				return -EINVAL;
+@@ -1617,21 +1633,29 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put);
+ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
+ {
+ 	u8 *p, *b;
+-	int i, extra, bit_offset = cell->bit_offset;
++	int i, extra, bytes_offset;
++	int bit_offset = cell->bit_offset;
+ 
+ 	p = b = buf;
+-	if (bit_offset) {
++
++	bytes_offset = bit_offset / BITS_PER_BYTE;
++	b += bytes_offset;
++	bit_offset %= BITS_PER_BYTE;
++
++	if (bit_offset % BITS_PER_BYTE) {
+ 		/* First shift */
+-		*b++ >>= bit_offset;
++		*p = *b++ >> bit_offset;
+ 
+ 		/* setup rest of the bytes if any */
+ 		for (i = 1; i < cell->bytes; i++) {
+ 			/* Get bits from next byte and shift them towards msb */
+-			*p |= *b << (BITS_PER_BYTE - bit_offset);
++			*p++ |= *b << (BITS_PER_BYTE - bit_offset);
+ 
+-			p = b;
+-			*b++ >>= bit_offset;
++			*p = *b++ >> bit_offset;
+ 		}
++	} else if (p != b) {
++		memmove(p, b, cell->bytes - bytes_offset);
++		p += cell->bytes - 1;
+ 	} else {
+ 		/* point to the msb */
+ 		p += cell->bytes - 1;
+diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
+index 116a39e804c70b..a872c640b8c5a5 100644
+--- a/drivers/nvmem/qfprom.c
++++ b/drivers/nvmem/qfprom.c
+@@ -321,19 +321,32 @@ static int qfprom_reg_read(void *context,
+ 			unsigned int reg, void *_val, size_t bytes)
+ {
+ 	struct qfprom_priv *priv = context;
+-	u8 *val = _val;
+-	int i = 0, words = bytes;
++	u32 *val = _val;
+ 	void __iomem *base = priv->qfpcorrected;
++	int words = DIV_ROUND_UP(bytes, sizeof(u32));
++	int i;
+ 
+ 	if (read_raw_data && priv->qfpraw)
+ 		base = priv->qfpraw;
+ 
+-	while (words--)
+-		*val++ = readb(base + reg + i++);
++	for (i = 0; i < words; i++)
++		*val++ = readl(base + reg + i * sizeof(u32));
+ 
+ 	return 0;
+ }
+ 
++/* Align reads to word boundary */
++static void qfprom_fixup_dt_cell_info(struct nvmem_device *nvmem,
++				      struct nvmem_cell_info *cell)
++{
++	unsigned int byte_offset = cell->offset % sizeof(u32);
++
++	cell->bit_offset += byte_offset * BITS_PER_BYTE;
++	cell->offset -= byte_offset;
++	if (byte_offset && !cell->nbits)
++		cell->nbits = cell->bytes * BITS_PER_BYTE;
++}
++
+ static void qfprom_runtime_disable(void *data)
+ {
+ 	pm_runtime_disable(data);
+@@ -358,10 +371,11 @@ static int qfprom_probe(struct platform_device *pdev)
+ 	struct nvmem_config econfig = {
+ 		.name = "qfprom",
+ 		.add_legacy_fixed_of_cells = true,
+-		.stride = 1,
+-		.word_size = 1,
++		.stride = 4,
++		.word_size = 4,
+ 		.id = NVMEM_DEVID_AUTO,
+ 		.reg_read = qfprom_reg_read,
++		.fixup_dt_cell_info = qfprom_fixup_dt_cell_info,
+ 	};
+ 	struct device *dev = &pdev->dev;
+ 	struct resource *res;
+diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
+index ebc3f0b24166bc..d88f12c5324264 100644
+--- a/drivers/nvmem/rockchip-otp.c
++++ b/drivers/nvmem/rockchip-otp.c
+@@ -59,7 +59,6 @@
+ #define RK3588_OTPC_AUTO_EN		0x08
+ #define RK3588_OTPC_INT_ST		0x84
+ #define RK3588_OTPC_DOUT0		0x20
+-#define RK3588_NO_SECURE_OFFSET		0x300
+ #define RK3588_NBYTES			4
+ #define RK3588_BURST_NUM		1
+ #define RK3588_BURST_SHIFT		8
+@@ -69,6 +68,7 @@
+ 
+ struct rockchip_data {
+ 	int size;
++	int read_offset;
+ 	const char * const *clks;
+ 	int num_clks;
+ 	nvmem_reg_read_t reg_read;
+@@ -196,7 +196,7 @@ static int rk3588_otp_read(void *context, unsigned int offset,
+ 	addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES;
+ 	addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES;
+ 	addr_len = addr_end - addr_start;
+-	addr_start += RK3588_NO_SECURE_OFFSET;
++	addr_start += otp->data->read_offset / RK3588_NBYTES;
+ 
+ 	buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL);
+ 	if (!buf)
+@@ -274,12 +274,21 @@ static const struct rockchip_data px30_data = {
+ 	.reg_read = px30_otp_read,
+ };
+ 
++static const struct rockchip_data rk3576_data = {
++	.size = 0x100,
++	.read_offset = 0x700,
++	.clks = px30_otp_clocks,
++	.num_clks = ARRAY_SIZE(px30_otp_clocks),
++	.reg_read = rk3588_otp_read,
++};
++
+ static const char * const rk3588_otp_clocks[] = {
+ 	"otp", "apb_pclk", "phy", "arb",
+ };
+ 
+ static const struct rockchip_data rk3588_data = {
+ 	.size = 0x400,
++	.read_offset = 0xc00,
+ 	.clks = rk3588_otp_clocks,
+ 	.num_clks = ARRAY_SIZE(rk3588_otp_clocks),
+ 	.reg_read = rk3588_otp_read,
+@@ -294,6 +303,10 @@ static const struct of_device_id rockchip_otp_match[] = {
+ 		.compatible = "rockchip,rk3308-otp",
+ 		.data = &px30_data,
+ 	},
++	{
++		.compatible = "rockchip,rk3576-otp",
++		.data = &rk3576_data,
++	},
+ 	{
+ 		.compatible = "rockchip,rk3588-otp",
+ 		.data = &rk3588_data,
+diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+index 0d94e4a967d81d..7cef00d9d7ab6a 100644
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -194,6 +194,12 @@ config PCI_P2PDMA
+ 	  P2P DMA transactions must be between devices behind the same root
+ 	  port.
+ 
++	  Enabling this option will reduce the entropy of x86 KASLR memory
++	  regions. For example - on a 46 bit system, the entropy goes down
++	  from 16 bits to 15 bits. The actual reduction in entropy depends
++	  on the physical address bits, on processor features, kernel config
++	  (5 level page table) and physical memory present on the system.
++
+ 	  If unsure, say N.
+ 
+ config PCI_LABEL
+diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
+index 6afff1f1b14301..c331b108e71de9 100644
+--- a/drivers/pci/ats.c
++++ b/drivers/pci/ats.c
+@@ -538,4 +538,37 @@ int pci_max_pasids(struct pci_dev *pdev)
+ 	return (1 << FIELD_GET(PCI_PASID_CAP_WIDTH, supported));
+ }
+ EXPORT_SYMBOL_GPL(pci_max_pasids);
++
++/**
++ * pci_pasid_status - Check the PASID status
++ * @pdev: PCI device structure
++ *
++ * Returns a negative value when no PASID capability is present.
++ * Otherwise the value of the control register is returned.
++ * Status reported are:
++ *
++ * PCI_PASID_CTRL_ENABLE - PASID enabled
++ * PCI_PASID_CTRL_EXEC - Execute permission enabled
++ * PCI_PASID_CTRL_PRIV - Privileged mode enabled
++ */
++int pci_pasid_status(struct pci_dev *pdev)
++{
++	int pasid;
++	u16 ctrl;
++
++	if (pdev->is_virtfn)
++		pdev = pci_physfn(pdev);
++
++	pasid = pdev->pasid_cap;
++	if (!pasid)
++		return -EINVAL;
++
++	pci_read_config_word(pdev, pasid + PCI_PASID_CTRL, &ctrl);
++
++	ctrl &= PCI_PASID_CTRL_ENABLE | PCI_PASID_CTRL_EXEC |
++		PCI_PASID_CTRL_PRIV;
++
++	return ctrl;
++}
++EXPORT_SYMBOL_GPL(pci_pasid_status);
+ #endif /* CONFIG_PCI_PASID */
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index dea19250598a66..9e7e94f32b436c 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -280,7 +280,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+ 	u32 index;
+ 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ 
+-	for (index = 0; index < pci->num_ob_windows; index++) {
++	for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
+ 		if (ep->outbound_addr[index] != addr)
+ 			continue;
+ 		*atu_index = index;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
+index 120e2aca5164ab..d428457d9c4320 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-host.c
++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
+@@ -902,7 +902,7 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
+ 	if (ret)
+ 		return ret;
+ 
+-	mem = ioremap(atu.cpu_addr, pci->region_align);
++	mem = ioremap(pci->pp.msg_res->start, pci->region_align);
+ 	if (!mem)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
+index 792d24cea5747b..81f085cebf6272 100644
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -309,8 +309,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)
+ 	if (log2_in >= 12 && log2_in <= 15)
+ 		/* Covers 4KB to 32KB (inclusive) */
+ 		return (log2_in - 12) + 0x1c;
+-	else if (log2_in >= 16 && log2_in <= 35)
+-		/* Covers 64KB to 32GB, (inclusive) */
++	else if (log2_in >= 16 && log2_in <= 36)
++		/* Covers 64KB to 64GB, (inclusive) */
+ 		return log2_in - 15;
+ 	/* Something is awry so disable */
+ 	return 0;
+@@ -1947,3 +1947,4 @@ module_platform_driver(brcm_pcie_driver);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
+ MODULE_AUTHOR("Broadcom");
++MODULE_SOFTDEP("pre: irq_bcm2712_mip");
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index 94ceec50a2b94c..8df064b62a2ff3 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -17,6 +17,8 @@
+ #include <linux/rculist.h>
+ #include <linux/rcupdate.h>
+ 
++#include <xen/xen.h>
++
+ #include <asm/irqdomain.h>
+ 
+ #define VMD_CFGBAR	0
+@@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	struct vmd_dev *vmd;
+ 	int err;
+ 
++	if (xen_domain()) {
++		/*
++		 * Xen doesn't have knowledge about devices in the VMD bus
++		 * because the config space of devices behind the VMD bridge is
++		 * not known to Xen, and hence Xen cannot discover or configure
++		 * them in any way.
++		 *
++		 * Bypass of MSI remapping won't work in that case as direct
++		 * write by Linux to the MSI entries won't result in functional
++		 * interrupts, as Xen is the entity that manages the host
++		 * interrupt controller and must configure interrupts.  However
++		 * multiplexing of interrupts by the VMD bridge will work under
++		 * Xen, so force the usage of that mode which must always be
++		 * supported by VMD bridges.
++		 */
++		features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
++	}
++
+ 	if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
+index 54286a40bdfbf7..6643a88c7a0ce3 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
++++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
+@@ -125,7 +125,7 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
+ 
+ static struct pci_epf_header sa8775p_header = {
+ 	.vendorid = PCI_VENDOR_ID_QCOM,
+-	.deviceid = 0x0306,               /* FIXME: Update deviceid for sa8775p EP */
++	.deviceid = 0x0116,
+ 	.baseclass_code = PCI_CLASS_OTHERS,
+ 	.interrupt_pin = PCI_INTERRUPT_INTA,
+ };
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 14b4c68ab4e1a2..21aa3709e25776 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -703,6 +703,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
+ 		if (ret) {
+ 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ 					   PRIMARY_INTERFACE);
++			epf_test->reg[bar] = NULL;
+ 			dev_err(dev, "Failed to set BAR%d\n", bar);
+ 			if (bar == test_reg_bar)
+ 				return ret;
+@@ -878,6 +879,7 @@ static void pci_epf_test_free_space(struct pci_epf *epf)
+ 
+ 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ 				   PRIMARY_INTERFACE);
++		epf_test->reg[bar] = NULL;
+ 	}
+ }
+ 
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index f16c7ce3bf3fc8..1eceabef9e84d7 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -814,11 +814,9 @@ static resource_size_t calculate_iosize(resource_size_t size,
+ 	size = (size & 0xff) + ((size & ~0xffUL) << 2);
+ #endif
+ 	size = size + size1;
+-	if (size < old_size)
+-		size = old_size;
+ 
+-	size = ALIGN(max(size, add_size) + children_add_size, align);
+-	return size;
++	size = max(size, add_size) + children_add_size;
++	return ALIGN(max(size, old_size), align);
+ }
+ 
+ static resource_size_t calculate_memsize(resource_size_t size,
+diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
+index 0afe02f879b45a..d9b56f0f909422 100644
+--- a/drivers/perf/arm_pmuv3.c
++++ b/drivers/perf/arm_pmuv3.c
+@@ -816,10 +816,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+ 	else
+ 		armv8pmu_disable_user_access();
+ 
++	kvm_vcpu_pmu_resync_el0();
++
+ 	/* Enable all counters */
+ 	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+-
+-	kvm_vcpu_pmu_resync_el0();
+ }
+ 
+ static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
+index 413f76e2d1744d..e0a6a272f5714e 100644
+--- a/drivers/phy/phy-core.c
++++ b/drivers/phy/phy-core.c
+@@ -405,13 +405,14 @@ EXPORT_SYMBOL_GPL(phy_power_off);
+ 
+ int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
+ {
+-	int ret;
++	int ret = 0;
+ 
+-	if (!phy || !phy->ops->set_mode)
++	if (!phy)
+ 		return 0;
+ 
+ 	mutex_lock(&phy->mutex);
+-	ret = phy->ops->set_mode(phy, mode, submode);
++	if (phy->ops->set_mode)
++		ret = phy->ops->set_mode(phy, mode, submode);
+ 	if (!ret)
+ 		phy->attrs.mode = mode;
+ 	mutex_unlock(&phy->mutex);
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 513fd35dcaa959..b45aee8f596442 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -9,6 +9,7 @@
+  * Copyright (C) 2014 Cogent Embedded, Inc.
+  */
+ 
++#include <linux/cleanup.h>
+ #include <linux/extcon-provider.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+@@ -118,9 +119,8 @@ struct rcar_gen3_chan {
+ 	struct regulator *vbus;
+ 	struct reset_control *rstc;
+ 	struct work_struct work;
+-	struct mutex lock;	/* protects rphys[...].powered */
++	spinlock_t lock;	/* protects access to hardware and driver data structure. */
+ 	enum usb_dr_mode dr_mode;
+-	int irq;
+ 	u32 obint_enable_bits;
+ 	bool extcon_host;
+ 	bool is_otg_channel;
+@@ -349,6 +349,8 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ 	bool is_b_device;
+ 	enum phy_mode cur_mode, new_mode;
+ 
++	guard(spinlock_irqsave)(&ch->lock);
++
+ 	if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ 		return -EIO;
+ 
+@@ -416,7 +418,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
+ 		val = readl(usb2_base + USB2_ADPCTRL);
+ 		writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
+ 	}
+-	msleep(20);
++	mdelay(20);
+ 
+ 	writel(0xffffffff, usb2_base + USB2_OBINTSTA);
+ 	writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN);
+@@ -428,16 +430,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+ {
+ 	struct rcar_gen3_chan *ch = _ch;
+ 	void __iomem *usb2_base = ch->base;
+-	u32 status = readl(usb2_base + USB2_OBINTSTA);
++	struct device *dev = ch->dev;
+ 	irqreturn_t ret = IRQ_NONE;
++	u32 status;
++
++	pm_runtime_get_noresume(dev);
+ 
+-	if (status & ch->obint_enable_bits) {
+-		dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
+-		writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
+-		rcar_gen3_device_recognition(ch);
+-		ret = IRQ_HANDLED;
++	if (pm_runtime_suspended(dev))
++		goto rpm_put;
++
++	scoped_guard(spinlock, &ch->lock) {
++		status = readl(usb2_base + USB2_OBINTSTA);
++		if (status & ch->obint_enable_bits) {
++			dev_vdbg(dev, "%s: %08x\n", __func__, status);
++			writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
++			rcar_gen3_device_recognition(ch);
++			ret = IRQ_HANDLED;
++		}
+ 	}
+ 
++rpm_put:
++	pm_runtime_put_noidle(dev);
+ 	return ret;
+ }
+ 
+@@ -447,17 +460,8 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ 	struct rcar_gen3_chan *channel = rphy->ch;
+ 	void __iomem *usb2_base = channel->base;
+ 	u32 val;
+-	int ret;
+ 
+-	if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
+-		INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+-		ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
+-				  IRQF_SHARED, dev_name(channel->dev), channel);
+-		if (ret < 0) {
+-			dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
+-			return ret;
+-		}
+-	}
++	guard(spinlock_irqsave)(&channel->lock);
+ 
+ 	/* Initialize USB2 part */
+ 	val = readl(usb2_base + USB2_INT_ENABLE);
+@@ -485,6 +489,8 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+ 	void __iomem *usb2_base = channel->base;
+ 	u32 val;
+ 
++	guard(spinlock_irqsave)(&channel->lock);
++
+ 	rphy->initialized = false;
+ 
+ 	val = readl(usb2_base + USB2_INT_ENABLE);
+@@ -493,9 +499,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+ 		val &= ~USB2_INT_ENABLE_UCOM_INTEN;
+ 	writel(val, usb2_base + USB2_INT_ENABLE);
+ 
+-	if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
+-		free_irq(channel->irq, channel);
+-
+ 	return 0;
+ }
+ 
+@@ -507,16 +510,17 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
+ 	u32 val;
+ 	int ret = 0;
+ 
+-	mutex_lock(&channel->lock);
+-	if (!rcar_gen3_are_all_rphys_power_off(channel))
+-		goto out;
+-
+ 	if (channel->vbus) {
+ 		ret = regulator_enable(channel->vbus);
+ 		if (ret)
+-			goto out;
++			return ret;
+ 	}
+ 
++	guard(spinlock_irqsave)(&channel->lock);
++
++	if (!rcar_gen3_are_all_rphys_power_off(channel))
++		goto out;
++
+ 	val = readl(usb2_base + USB2_USBCTR);
+ 	val |= USB2_USBCTR_PLL_RST;
+ 	writel(val, usb2_base + USB2_USBCTR);
+@@ -526,7 +530,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
+ out:
+ 	/* The powered flag should be set for any other phys anyway */
+ 	rphy->powered = true;
+-	mutex_unlock(&channel->lock);
+ 
+ 	return 0;
+ }
+@@ -537,18 +540,20 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p)
+ 	struct rcar_gen3_chan *channel = rphy->ch;
+ 	int ret = 0;
+ 
+-	mutex_lock(&channel->lock);
+-	rphy->powered = false;
++	scoped_guard(spinlock_irqsave, &channel->lock) {
++		rphy->powered = false;
+ 
+-	if (!rcar_gen3_are_all_rphys_power_off(channel))
+-		goto out;
++		if (rcar_gen3_are_all_rphys_power_off(channel)) {
++			u32 val = readl(channel->base + USB2_USBCTR);
++
++			val |= USB2_USBCTR_PLL_RST;
++			writel(val, channel->base + USB2_USBCTR);
++		}
++	}
+ 
+ 	if (channel->vbus)
+ 		ret = regulator_disable(channel->vbus);
+ 
+-out:
+-	mutex_unlock(&channel->lock);
+-
+ 	return ret;
+ }
+ 
+@@ -701,7 +706,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ 	struct device *dev = &pdev->dev;
+ 	struct rcar_gen3_chan *channel;
+ 	struct phy_provider *provider;
+-	int ret = 0, i;
++	int ret = 0, i, irq;
+ 
+ 	if (!dev->of_node) {
+ 		dev_err(dev, "This driver needs device tree\n");
+@@ -717,8 +722,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ 		return PTR_ERR(channel->base);
+ 
+ 	channel->obint_enable_bits = USB2_OBINT_BITS;
+-	/* get irq number here and request_irq for OTG in phy_init */
+-	channel->irq = platform_get_irq_optional(pdev, 0);
+ 	channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
+ 	if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
+ 		channel->is_otg_channel = true;
+@@ -761,7 +764,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ 	if (phy_data->no_adp_ctrl)
+ 		channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
+ 
+-	mutex_init(&channel->lock);
++	spin_lock_init(&channel->lock);
+ 	for (i = 0; i < NUM_OF_PHYS; i++) {
+ 		channel->rphys[i].phy = devm_phy_create(dev, NULL,
+ 							phy_data->phy_usb2_ops);
+@@ -787,6 +790,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ 		channel->vbus = NULL;
+ 	}
+ 
++	irq = platform_get_irq_optional(pdev, 0);
++	if (irq < 0 && irq != -ENXIO) {
++		ret = irq;
++		goto error;
++	} else if (irq > 0) {
++		INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
++		ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
++				       IRQF_SHARED, dev_name(dev), channel);
++		if (ret < 0) {
++			dev_err(dev, "Failed to request irq (%d)\n", irq);
++			goto error;
++		}
++	}
++
+ 	provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
+ 	if (IS_ERR(provider)) {
+ 		dev_err(dev, "Failed to register PHY provider\n");
+diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+index be6f1ca9095aaa..dc6e01dff5c743 100644
+--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
++++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+@@ -94,8 +94,8 @@
+ #define LCPLL_ALONE_MODE		BIT(1)
+ /* CMN_REG(0097) */
+ #define DIG_CLK_SEL			BIT(1)
+-#define ROPLL_REF			BIT(1)
+-#define LCPLL_REF			0
++#define LCPLL_REF			BIT(1)
++#define ROPLL_REF			0
+ /* CMN_REG(0099) */
+ #define CMN_ROPLL_ALONE_MODE		BIT(2)
+ #define ROPLL_ALONE_MODE		BIT(2)
+diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c
+index f5c6d264d89ed9..d2021f7941e3ed 100644
+--- a/drivers/phy/rockchip/phy-rockchip-usbdp.c
++++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c
+@@ -187,6 +187,8 @@ struct rk_udphy {
+ 	u32 dp_aux_din_sel;
+ 	bool dp_sink_hpd_sel;
+ 	bool dp_sink_hpd_cfg;
++	unsigned int link_rate;
++	unsigned int lanes;
+ 	u8 bw;
+ 	int id;
+ 
+@@ -1102,15 +1104,19 @@ static int rk_udphy_dp_phy_power_off(struct phy *phy)
+ 	return 0;
+ }
+ 
+-static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate)
++/*
++ * Verify link rate
++ */
++static int rk_udphy_dp_phy_verify_link_rate(struct rk_udphy *udphy,
++					    struct phy_configure_opts_dp *dp)
+ {
+-	switch (link_rate) {
++	switch (dp->link_rate) {
+ 	case 1620:
+ 	case 2700:
+ 	case 5400:
+ 	case 8100:
++		udphy->link_rate = dp->link_rate;
+ 		break;
+-
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -1118,45 +1124,44 @@ static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate)
+ 	return 0;
+ }
+ 
+-static int rk_udphy_dp_phy_verify_config(struct rk_udphy *udphy,
+-					 struct phy_configure_opts_dp *dp)
++static int rk_udphy_dp_phy_verify_lanes(struct rk_udphy *udphy,
++					struct phy_configure_opts_dp *dp)
+ {
+-	int i, ret;
+-
+-	/* If changing link rate was required, verify it's supported. */
+-	ret = rk_udphy_dp_phy_verify_link_rate(dp->link_rate);
+-	if (ret)
+-		return ret;
+-
+-	/* Verify lane count. */
+ 	switch (dp->lanes) {
+ 	case 1:
+ 	case 2:
+ 	case 4:
+ 		/* valid lane count. */
++		udphy->lanes = dp->lanes;
+ 		break;
+ 
+ 	default:
+ 		return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * If changing voltages is required, check swing and pre-emphasis
+-	 * levels, per-lane.
+-	 */
+-	if (dp->set_voltages) {
+-		/* Lane count verified previously. */
+-		for (i = 0; i < dp->lanes; i++) {
+-			if (dp->voltage[i] > 3 || dp->pre[i] > 3)
+-				return -EINVAL;
++	return 0;
++}
+ 
+-			/*
+-			 * Sum of voltage swing and pre-emphasis levels cannot
+-			 * exceed 3.
+-			 */
+-			if (dp->voltage[i] + dp->pre[i] > 3)
+-				return -EINVAL;
+-		}
++/*
++ * If changing voltages is required, check swing and pre-emphasis
++ * levels, per-lane.
++ */
++static int rk_udphy_dp_phy_verify_voltages(struct rk_udphy *udphy,
++					   struct phy_configure_opts_dp *dp)
++{
++	int i;
++
++	/* Lane count verified previously. */
++	for (i = 0; i < udphy->lanes; i++) {
++		if (dp->voltage[i] > 3 || dp->pre[i] > 3)
++			return -EINVAL;
++
++		/*
++		 * Sum of voltage swing and pre-emphasis levels cannot
++		 * exceed 3.
++		 */
++		if (dp->voltage[i] + dp->pre[i] > 3)
++			return -EINVAL;
+ 	}
+ 
+ 	return 0;
+@@ -1196,9 +1201,23 @@ static int rk_udphy_dp_phy_configure(struct phy *phy,
+ 	u32 i, val, lane;
+ 	int ret;
+ 
+-	ret = rk_udphy_dp_phy_verify_config(udphy, dp);
+-	if (ret)
+-		return ret;
++	if (dp->set_rate) {
++		ret = rk_udphy_dp_phy_verify_link_rate(udphy, dp);
++		if (ret)
++			return ret;
++	}
++
++	if (dp->set_lanes) {
++		ret = rk_udphy_dp_phy_verify_lanes(udphy, dp);
++		if (ret)
++			return ret;
++	}
++
++	if (dp->set_voltages) {
++		ret = rk_udphy_dp_phy_verify_voltages(udphy, dp);
++		if (ret)
++			return ret;
++	}
+ 
+ 	if (dp->set_rate) {
+ 		regmap_update_bits(udphy->pma_regmap, CMN_DP_RSTN_OFFSET,
+@@ -1243,9 +1262,9 @@ static int rk_udphy_dp_phy_configure(struct phy *phy,
+ 	}
+ 
+ 	if (dp->set_voltages) {
+-		for (i = 0; i < dp->lanes; i++) {
++		for (i = 0; i < udphy->lanes; i++) {
+ 			lane = udphy->dp_lane_sel[i];
+-			switch (dp->link_rate) {
++			switch (udphy->link_rate) {
+ 			case 1620:
+ 			case 2700:
+ 				regmap_update_bits(udphy->pma_regmap,
+diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+index 46b8f6987c62c3..28d02ae60cc140 100644
+--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
++++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+@@ -1513,8 +1513,11 @@ static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_preinit[] = {
+ 	PHY_TUNING_ENTRY_PMA(0x09e0, -1, 0x00),
+ 	PHY_TUNING_ENTRY_PMA(0x09e4, -1, 0x36),
+ 	PHY_TUNING_ENTRY_PMA(0x1e7c, -1, 0x06),
+-	PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x00),
+-	PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x36),
++	PHY_TUNING_ENTRY_PMA(0x19e0, -1, 0x00),
++	PHY_TUNING_ENTRY_PMA(0x19e4, -1, 0x36),
++	/* fix bootloader bug */
++	PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x02),
++	PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x0b),
+ 	/* improve LVCC */
+ 	PHY_TUNING_ENTRY_PMA(0x08f0, -1, 0x30),
+ 	PHY_TUNING_ENTRY_PMA(0x18f0, -1, 0x30),
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+index cf6efa9c0364a1..a039b490cdb8e6 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+@@ -72,7 +72,7 @@ static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI;
+ struct bcm281xx_pin_function {
+ 	const char *name;
+ 	const char * const *groups;
+-	const unsigned ngroups;
++	const unsigned int ngroups;
+ };
+ 
+ /*
+@@ -84,10 +84,10 @@ struct bcm281xx_pinctrl_data {
+ 
+ 	/* List of all pins */
+ 	const struct pinctrl_pin_desc *pins;
+-	const unsigned npins;
++	const unsigned int npins;
+ 
+ 	const struct bcm281xx_pin_function *functions;
+-	const unsigned nfunctions;
++	const unsigned int nfunctions;
+ 
+ 	struct regmap *regmap;
+ };
+@@ -941,7 +941,7 @@ static struct bcm281xx_pinctrl_data bcm281xx_pinctrl = {
+ };
+ 
+ static inline enum bcm281xx_pin_type pin_type_get(struct pinctrl_dev *pctldev,
+-						  unsigned pin)
++						  unsigned int pin)
+ {
+ 	struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ 
+@@ -985,7 +985,7 @@ static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+ }
+ 
+ static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+-						   unsigned group)
++						   unsigned int group)
+ {
+ 	struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ 
+@@ -993,9 +993,9 @@ static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ }
+ 
+ static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+-					   unsigned group,
++					   unsigned int group,
+ 					   const unsigned **pins,
+-					   unsigned *num_pins)
++					   unsigned int *num_pins)
+ {
+ 	struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ 
+@@ -1007,7 +1007,7 @@ static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ 
+ static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ 					  struct seq_file *s,
+-					  unsigned offset)
++					  unsigned int offset)
+ {
+ 	seq_printf(s, " %s", dev_name(pctldev->dev));
+ }
+@@ -1029,7 +1029,7 @@ static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
+ }
+ 
+ static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
+-						 unsigned function)
++						 unsigned int function)
+ {
+ 	struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ 
+@@ -1037,9 +1037,9 @@ static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
+ }
+ 
+ static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
+-					   unsigned function,
++					   unsigned int function,
+ 					   const char * const **groups,
+-					   unsigned * const num_groups)
++					   unsigned int * const num_groups)
+ {
+ 	struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ 
+@@ -1050,8 +1050,8 @@ static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
+ }
+ 
+ static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev,
+-			       unsigned function,
+-			       unsigned group)
++			       unsigned int function,
++			       unsigned int group)
+ {
+ 	struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ 	const struct bcm281xx_pin_function *f = &pdata->functions[function];
+@@ -1082,7 +1082,7 @@ static const struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = {
+ };
+ 
+ static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
+-					   unsigned pin,
++					   unsigned int pin,
+ 					   unsigned long *config)
+ {
+ 	return -ENOTSUPP;
+@@ -1091,9 +1091,9 @@ static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
+ 
+ /* Goes through the configs and update register val/mask */
+ static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
+-				   unsigned pin,
++				   unsigned int pin,
+ 				   unsigned long *configs,
+-				   unsigned num_configs,
++				   unsigned int num_configs,
+ 				   u32 *val,
+ 				   u32 *mask)
+ {
+@@ -1207,9 +1207,9 @@ static const u16 bcm281xx_pullup_map[] = {
+ 
+ /* Goes through the configs and update register val/mask */
+ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
+-				   unsigned pin,
++				   unsigned int pin,
+ 				   unsigned long *configs,
+-				   unsigned num_configs,
++				   unsigned int num_configs,
+ 				   u32 *val,
+ 				   u32 *mask)
+ {
+@@ -1277,9 +1277,9 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
+ 
+ /* Goes through the configs and update register val/mask */
+ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
+-				    unsigned pin,
++				    unsigned int pin,
+ 				    unsigned long *configs,
+-				    unsigned num_configs,
++				    unsigned int num_configs,
+ 				    u32 *val,
+ 				    u32 *mask)
+ {
+@@ -1321,9 +1321,9 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
+ }
+ 
+ static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
+-					   unsigned pin,
++					   unsigned int pin,
+ 					   unsigned long *configs,
+-					   unsigned num_configs)
++					   unsigned int num_configs)
+ {
+ 	struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ 	enum bcm281xx_pin_type pin_type;
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index 6a94ecd6a8deae..0b7f74beb6a6a8 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -143,10 +143,14 @@ static int dt_to_map_one_config(struct pinctrl *p,
+ 		pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
+ 		if (pctldev)
+ 			break;
+-		/* Do not defer probing of hogs (circular loop) */
++		/*
++		 * Do not defer probing of hogs (circular loop)
++		 *
++		 * Return 1 to let the caller catch the case.
++		 */
+ 		if (np_pctldev == p->dev->of_node) {
+ 			of_node_put(np_pctldev);
+-			return -ENODEV;
++			return 1;
+ 		}
+ 	}
+ 	of_node_put(np_pctldev);
+@@ -265,6 +269,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
+ 			ret = dt_to_map_one_config(p, pctldev, statename,
+ 						   np_config);
+ 			of_node_put(np_config);
++			if (ret == 1)
++				continue;
+ 			if (ret < 0)
+ 				goto err;
+ 		}
+diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
+index 253a0cc57e396d..e5a32a0532eeec 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson.c
++++ b/drivers/pinctrl/meson/pinctrl-meson.c
+@@ -487,7 +487,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
+ 	case PIN_CONFIG_BIAS_PULL_DOWN:
+ 	case PIN_CONFIG_BIAS_PULL_UP:
+ 		if (meson_pinconf_get_pull(pc, pin) == param)
+-			arg = 1;
++			arg = 60000;
+ 		else
+ 			return -EINVAL;
+ 		break;
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index a6bdff7a0bb254..018e96d921c050 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -43,7 +43,6 @@
+  * @pctrl:          pinctrl handle.
+  * @chip:           gpiochip handle.
+  * @desc:           pin controller descriptor
+- * @restart_nb:     restart notifier block.
+  * @irq:            parent irq for the TLMM irq_chip.
+  * @intr_target_use_scm: route irq to application cpu using scm calls
+  * @lock:           Spinlock to protect register resources as well
+@@ -63,7 +62,6 @@ struct msm_pinctrl {
+ 	struct pinctrl_dev *pctrl;
+ 	struct gpio_chip chip;
+ 	struct pinctrl_desc desc;
+-	struct notifier_block restart_nb;
+ 
+ 	int irq;
+ 
+@@ -1470,10 +1468,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
+ 	return 0;
+ }
+ 
+-static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
+-			       void *data)
++static int msm_ps_hold_restart(struct sys_off_data *data)
+ {
+-	struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb);
++	struct msm_pinctrl *pctrl = data->cb_data;
+ 
+ 	writel(0, pctrl->regs[0] + PS_HOLD_OFFSET);
+ 	mdelay(1000);
+@@ -1484,7 +1481,11 @@ static struct msm_pinctrl *poweroff_pctrl;
+ 
+ static void msm_ps_hold_poweroff(void)
+ {
+-	msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL);
++	struct sys_off_data data = {
++		.cb_data = poweroff_pctrl,
++	};
++
++	msm_ps_hold_restart(&data);
+ }
+ 
+ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
+@@ -1494,9 +1495,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
+ 
+ 	for (i = 0; i < pctrl->soc->nfunctions; i++)
+ 		if (!strcmp(func[i].name, "ps_hold")) {
+-			pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
+-			pctrl->restart_nb.priority = 128;
+-			if (register_restart_handler(&pctrl->restart_nb))
++			if (devm_register_sys_off_handler(pctrl->dev,
++							  SYS_OFF_MODE_RESTART,
++							  128,
++							  msm_ps_hold_restart,
++							  pctrl))
+ 				dev_err(pctrl->dev,
+ 					"failed to setup restart handler.\n");
+ 			poweroff_pctrl = pctrl;
+@@ -1598,8 +1601,6 @@ void msm_pinctrl_remove(struct platform_device *pdev)
+ 	struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
+ 
+ 	gpiochip_remove(&pctrl->chip);
+-
+-	unregister_restart_handler(&pctrl->restart_nb);
+ }
+ EXPORT_SYMBOL(msm_pinctrl_remove);
+ 
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index d90685cfe2e1a4..bde58f5a743cb9 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -311,6 +311,7 @@ struct rzg2l_pinctrl_pin_settings {
+  * @pmc: PMC registers cache
+  * @pfc: PFC registers cache
+  * @iolh: IOLH registers cache
++ * @pupd: PUPD registers cache
+  * @ien: IEN registers cache
+  * @sd_ch: SD_CH registers cache
+  * @eth_poc: ET_POC registers cache
+@@ -324,6 +325,7 @@ struct rzg2l_pinctrl_reg_cache {
+ 	u32	*pfc;
+ 	u32	*iolh[2];
+ 	u32	*ien[2];
++	u32	*pupd[2];
+ 	u8	sd_ch[2];
+ 	u8	eth_poc[2];
+ 	u8	eth_mode;
+@@ -2539,6 +2541,11 @@ static int rzg2l_pinctrl_reg_cache_alloc(struct rzg2l_pinctrl *pctrl)
+ 		if (!cache->ien[i])
+ 			return -ENOMEM;
+ 
++		cache->pupd[i] = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pupd[i]),
++					      GFP_KERNEL);
++		if (!cache->pupd[i])
++			return -ENOMEM;
++
+ 		/* Allocate dedicated cache. */
+ 		dedicated_cache->iolh[i] = devm_kcalloc(pctrl->dev, n_dedicated_pins,
+ 							sizeof(*dedicated_cache->iolh[i]),
+@@ -2779,7 +2786,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
+ 	struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+ 
+ 	for (u32 port = 0; port < nports; port++) {
+-		bool has_iolh, has_ien;
++		bool has_iolh, has_ien, has_pupd;
+ 		u32 off, caps;
+ 		u8 pincnt;
+ 		u64 cfg;
+@@ -2791,6 +2798,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
+ 		caps = FIELD_GET(PIN_CFG_MASK, cfg);
+ 		has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
+ 		has_ien = !!(caps & PIN_CFG_IEN);
++		has_pupd = !!(caps & PIN_CFG_PUPD);
+ 
+ 		if (suspend)
+ 			RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PFC(off), cache->pfc[port]);
+@@ -2809,6 +2817,15 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
+ 			}
+ 		}
+ 
++		if (has_pupd) {
++			RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PUPD(off),
++						 cache->pupd[0][port]);
++			if (pincnt >= 4) {
++				RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PUPD(off),
++							 cache->pupd[1][port]);
++			}
++		}
++
+ 		RZG2L_PCTRL_REG_ACCESS16(suspend, pctrl->base + PM(off), cache->pm[port]);
+ 		RZG2L_PCTRL_REG_ACCESS8(suspend, pctrl->base + P(off), cache->p[port]);
+ 
+diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
+index 57f2674e75d688..84b4850771ce2a 100644
+--- a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
++++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
+@@ -574,10 +574,10 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
+ 					 struct cv1800_pin *pin,
+ 					 unsigned long *configs,
+ 					 unsigned int num_configs,
+-					 u32 *value)
++					 u32 *value, u32 *mask)
+ {
+ 	int i;
+-	u32 v = 0;
++	u32 v = 0, m = 0;
+ 	enum cv1800_pin_io_type type;
+ 	int ret;
+ 
+@@ -596,10 +596,12 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
+ 		case PIN_CONFIG_BIAS_PULL_DOWN:
+ 			v &= ~PIN_IO_PULLDOWN;
+ 			v |= FIELD_PREP(PIN_IO_PULLDOWN, arg);
++			m |= PIN_IO_PULLDOWN;
+ 			break;
+ 		case PIN_CONFIG_BIAS_PULL_UP:
+ 			v &= ~PIN_IO_PULLUP;
+ 			v |= FIELD_PREP(PIN_IO_PULLUP, arg);
++			m |= PIN_IO_PULLUP;
+ 			break;
+ 		case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ 			ret = cv1800_pinctrl_oc2reg(pctrl, pin, arg);
+@@ -607,6 +609,7 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
+ 				return ret;
+ 			v &= ~PIN_IO_DRIVE;
+ 			v |= FIELD_PREP(PIN_IO_DRIVE, ret);
++			m |= PIN_IO_DRIVE;
+ 			break;
+ 		case PIN_CONFIG_INPUT_SCHMITT_UV:
+ 			ret = cv1800_pinctrl_schmitt2reg(pctrl, pin, arg);
+@@ -614,6 +617,7 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
+ 				return ret;
+ 			v &= ~PIN_IO_SCHMITT;
+ 			v |= FIELD_PREP(PIN_IO_SCHMITT, ret);
++			m |= PIN_IO_SCHMITT;
+ 			break;
+ 		case PIN_CONFIG_POWER_SOURCE:
+ 			/* Ignore power source as it is always fixed */
+@@ -621,10 +625,12 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
+ 		case PIN_CONFIG_SLEW_RATE:
+ 			v &= ~PIN_IO_OUT_FAST_SLEW;
+ 			v |= FIELD_PREP(PIN_IO_OUT_FAST_SLEW, arg);
++			m |= PIN_IO_OUT_FAST_SLEW;
+ 			break;
+ 		case PIN_CONFIG_BIAS_BUS_HOLD:
+ 			v &= ~PIN_IO_BUS_HOLD;
+ 			v |= FIELD_PREP(PIN_IO_BUS_HOLD, arg);
++			m |= PIN_IO_BUS_HOLD;
+ 			break;
+ 		default:
+ 			return -ENOTSUPP;
+@@ -632,17 +638,19 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
+ 	}
+ 
+ 	*value = v;
++	*mask = m;
+ 
+ 	return 0;
+ }
+ 
+ static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl,
+ 				 unsigned int pin_id,
+-				 u32 value)
++				 u32 value, u32 mask)
+ {
+ 	struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ 	unsigned long flags;
+ 	void __iomem *addr;
++	u32 reg;
+ 
+ 	if (!pin)
+ 		return -EINVAL;
+@@ -650,7 +658,10 @@ static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl,
+ 	addr = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
+ 
+ 	raw_spin_lock_irqsave(&pctrl->lock, flags);
+-	writel(value, addr);
++	reg = readl(addr);
++	reg &= ~mask;
++	reg |= value;
++	writel(reg, addr);
+ 	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ 
+ 	return 0;
+@@ -662,16 +673,17 @@ static int cv1800_pconf_set(struct pinctrl_dev *pctldev,
+ {
+ 	struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ 	struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+-	u32 value;
++	u32 value, mask;
+ 
+ 	if (!pin)
+ 		return -ENODEV;
+ 
+ 	if (cv1800_pinconf_compute_config(pctrl, pin,
+-					  configs, num_configs, &value))
++					  configs, num_configs,
++					  &value, &mask))
+ 		return -ENOTSUPP;
+ 
+-	return cv1800_pin_set_config(pctrl, pin_id, value);
++	return cv1800_pin_set_config(pctrl, pin_id, value, mask);
+ }
+ 
+ static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
+@@ -682,7 +694,7 @@ static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
+ 	struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ 	const struct group_desc *group;
+ 	const struct cv1800_pin_mux_config *pinmuxs;
+-	u32 value;
++	u32 value, mask;
+ 	int i;
+ 
+ 	group = pinctrl_generic_get_group(pctldev, gsel);
+@@ -692,11 +704,12 @@ static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
+ 	pinmuxs = group->data;
+ 
+ 	if (cv1800_pinconf_compute_config(pctrl, pinmuxs[0].pin,
+-					  configs, num_configs, &value))
++					  configs, num_configs,
++					  &value, &mask))
+ 		return -ENOTSUPP;
+ 
+ 	for (i = 0; i < group->grp.npins; i++)
+-		cv1800_pin_set_config(pctrl, group->grp.pins[i], value);
++		cv1800_pin_set_config(pctrl, group->grp.pins[i], value, mask);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
+index 3b046450bd3ff8..edcc78ebce4569 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
+@@ -278,8 +278,8 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ 	return 0;
+ }
+ 
+-static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
+-					unsigned int offset)
++static int tegra_pinctrl_get_group_index(struct pinctrl_dev *pctldev,
++					 unsigned int offset)
+ {
+ 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ 	unsigned int group, num_pins, j;
+@@ -292,12 +292,35 @@ static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *
+ 			continue;
+ 		for (j = 0; j < num_pins; j++) {
+ 			if (offset == pins[j])
+-				return &pmx->soc->groups[group];
++				return group;
+ 		}
+ 	}
+ 
+-	dev_err(pctldev->dev, "Pingroup not found for pin %u\n", offset);
+-	return NULL;
++	return -EINVAL;
++}
++
++static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
++							    unsigned int offset,
++							    int group_index)
++{
++	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
++
++	if (group_index < 0 || group_index >= pmx->soc->ngroups)
++		return NULL;
++
++	return &pmx->soc->groups[group_index];
++}
++
++static struct tegra_pingroup_config *tegra_pinctrl_get_group_config(struct pinctrl_dev *pctldev,
++								    unsigned int offset,
++								    int group_index)
++{
++	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
++
++	if (group_index < 0)
++		return NULL;
++
++	return &pmx->pingroup_configs[group_index];
+ }
+ 
+ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+@@ -306,12 +329,15 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+ {
+ 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ 	const struct tegra_pingroup *group;
++	struct tegra_pingroup_config *config;
++	int group_index;
+ 	u32 value;
+ 
+ 	if (!pmx->soc->sfsel_in_mux)
+ 		return 0;
+ 
+-	group = tegra_pinctrl_get_group(pctldev, offset);
++	group_index = tegra_pinctrl_get_group_index(pctldev, offset);
++	group = tegra_pinctrl_get_group(pctldev, offset, group_index);
+ 
+ 	if (!group)
+ 		return -EINVAL;
+@@ -319,7 +345,11 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 	if (group->mux_reg < 0 || group->sfsel_bit < 0)
+ 		return -EINVAL;
+ 
++	config = tegra_pinctrl_get_group_config(pctldev, offset, group_index);
++	if (!config)
++		return -EINVAL;
+ 	value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
++	config->is_sfsel = (value & BIT(group->sfsel_bit)) != 0;
+ 	value &= ~BIT(group->sfsel_bit);
+ 	pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
+ 
+@@ -332,12 +362,15 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
+ {
+ 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ 	const struct tegra_pingroup *group;
++	struct tegra_pingroup_config *config;
++	int group_index;
+ 	u32 value;
+ 
+ 	if (!pmx->soc->sfsel_in_mux)
+ 		return;
+ 
+-	group = tegra_pinctrl_get_group(pctldev, offset);
++	group_index = tegra_pinctrl_get_group_index(pctldev, offset);
++	group = tegra_pinctrl_get_group(pctldev, offset, group_index);
+ 
+ 	if (!group)
+ 		return;
+@@ -345,8 +378,12 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
+ 	if (group->mux_reg < 0 || group->sfsel_bit < 0)
+ 		return;
+ 
++	config = tegra_pinctrl_get_group_config(pctldev, offset, group_index);
++	if (!config)
++		return;
+ 	value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
+-	value |= BIT(group->sfsel_bit);
++	if (config->is_sfsel)
++		value |= BIT(group->sfsel_bit);
+ 	pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
+ }
+ 
+@@ -791,6 +828,12 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
+ 	pmx->dev = &pdev->dev;
+ 	pmx->soc = soc_data;
+ 
++	pmx->pingroup_configs = devm_kcalloc(&pdev->dev,
++					     pmx->soc->ngroups, sizeof(*pmx->pingroup_configs),
++					     GFP_KERNEL);
++	if (!pmx->pingroup_configs)
++		return -ENOMEM;
++
+ 	/*
+ 	 * Each mux group will appear in 4 functions' list of groups.
+ 	 * This over-allocates slightly, since not all groups are mux groups.
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
+index b3289bdf727d82..b97136685f7a88 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
+@@ -8,6 +8,10 @@
+ #ifndef __PINMUX_TEGRA_H__
+ #define __PINMUX_TEGRA_H__
+ 
++struct tegra_pingroup_config {
++	bool is_sfsel;
++};
++
+ struct tegra_pmx {
+ 	struct device *dev;
+ 	struct pinctrl_dev *pctl;
+@@ -21,6 +25,8 @@ struct tegra_pmx {
+ 	int nbanks;
+ 	void __iomem **regs;
+ 	u32 *backup_regs;
++	/* Array of size soc->ngroups */
++	struct tegra_pingroup_config *pingroup_configs;
+ };
+ 
+ enum tegra_pinconf_param {
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index a1cff9ff35a929..9d79c5ea8b4953 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -304,6 +304,7 @@ struct asus_wmi {
+ 
+ 	u32 kbd_rgb_dev;
+ 	bool kbd_rgb_state_available;
++	bool oobe_state_available;
+ 
+ 	u8 throttle_thermal_policy_mode;
+ 	u32 throttle_thermal_policy_dev;
+@@ -1826,7 +1827,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
+ 			goto error;
+ 	}
+ 
+-	if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) {
++	if (asus->oobe_state_available) {
+ 		/*
+ 		 * Disable OOBE state, so that e.g. the keyboard backlight
+ 		 * works.
+@@ -4741,6 +4742,7 @@ static int asus_wmi_add(struct platform_device *pdev)
+ 	asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
+ 	asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
+ 	asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
++	asus->oobe_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE);
+ 	asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
+ 						&& dmi_check_system(asus_ally_mcu_quirk);
+ 
+@@ -4994,6 +4996,13 @@ static int asus_hotk_restore(struct device *device)
+ 	}
+ 	if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
+ 		kbd_led_update(asus);
++	if (asus->oobe_state_available) {
++		/*
++		 * Disable OOBE state, so that e.g. the keyboard backlight
++		 * works.
++		 */
++		asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL);
++	}
+ 
+ 	if (asus_wmi_has_fnlock_key(asus))
+ 		asus_wmi_fnlock_update(asus);
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
+index 230e6ee966366a..d8f1bf5e58a0f4 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
+@@ -45,7 +45,7 @@ static ssize_t current_password_store(struct kobject *kobj,
+ 	int length;
+ 
+ 	length = strlen(buf);
+-	if (buf[length-1] == '\n')
++	if (length && buf[length - 1] == '\n')
+ 		length--;
+ 
+ 	/* firmware does verifiation of min/max password length,
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index c908f52ed717b1..bdb4cbee42058a 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1297,6 +1297,16 @@ static const struct key_entry ideapad_keymap[] = {
+ 	/* Specific to some newer models */
+ 	{ KE_KEY,	0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } },
+ 	{ KE_KEY,	0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } },
++	/* Star- (User Assignable Key) */
++	{ KE_KEY,	0x44 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
++	/* Eye */
++	{ KE_KEY,	0x45 | IDEAPAD_WMI_KEY, { KEY_PROG3 } },
++	/* Performance toggle also Fn+Q, handled inside ideapad_wmi_notify() */
++	{ KE_KEY,	0x3d | IDEAPAD_WMI_KEY, { KEY_PROG4 } },
++	/* shift + prtsc */
++	{ KE_KEY,   0x2d | IDEAPAD_WMI_KEY, { KEY_CUT } },
++	{ KE_KEY,   0x29 | IDEAPAD_WMI_KEY, { KEY_TOUCHPAD_TOGGLE } },
++	{ KE_KEY,   0x2a | IDEAPAD_WMI_KEY, { KEY_ROOT_MENU } },
+ 
+ 	{ KE_END },
+ };
+@@ -2083,6 +2093,12 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
+ 		dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n",
+ 			data->integer.value);
+ 
++		/* performance button triggered by 0x3d */
++		if (data->integer.value == 0x3d && priv->dytc) {
++			platform_profile_cycle();
++			break;
++		}
++
+ 		/* 0x02 FnLock, 0x03 Esc */
+ 		if (data->integer.value == 0x02 || data->integer.value == 0x03)
+ 			ideapad_fn_lock_led_notify(priv, data->integer.value == 0x02);
+diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
+index 9a609358956f3a..59392f1a0d8ada 100644
+--- a/drivers/platform/x86/intel/hid.c
++++ b/drivers/platform/x86/intel/hid.c
+@@ -44,16 +44,17 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Alex Hung");
+ 
+ static const struct acpi_device_id intel_hid_ids[] = {
+-	{"INT33D5", 0},
+-	{"INTC1051", 0},
+-	{"INTC1054", 0},
+-	{"INTC1070", 0},
+-	{"INTC1076", 0},
+-	{"INTC1077", 0},
+-	{"INTC1078", 0},
+-	{"INTC107B", 0},
+-	{"INTC10CB", 0},
+-	{"", 0},
++	{ "INT33D5" },
++	{ "INTC1051" },
++	{ "INTC1054" },
++	{ "INTC1070" },
++	{ "INTC1076" },
++	{ "INTC1077" },
++	{ "INTC1078" },
++	{ "INTC107B" },
++	{ "INTC10CB" },
++	{ "INTC10CC" },
++	{ }
+ };
+ MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
+ 
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index 4cfb53206cb848..1abd8378f158d3 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -995,8 +995,8 @@ static ssize_t current_value_store(struct kobject *kobj,
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+-		set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
+-					new_setting, tlmi_priv.pwd_admin->signature);
++		set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name,
++				    new_setting, tlmi_priv.pwd_admin->signature);
+ 		if (!set_str) {
+ 			ret = -ENOMEM;
+ 			goto out;
+@@ -1026,7 +1026,7 @@ static ssize_t current_value_store(struct kobject *kobj,
+ 				goto out;
+ 		}
+ 
+-		set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
++		set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name,
+ 				    new_setting);
+ 		if (!set_str) {
+ 			ret = -ENOMEM;
+@@ -1054,11 +1054,11 @@ static ssize_t current_value_store(struct kobject *kobj,
+ 		}
+ 
+ 		if (auth_str)
+-			set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
+-					new_setting, auth_str);
++			set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name,
++					    new_setting, auth_str);
+ 		else
+-			set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
+-					new_setting);
++			set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name,
++					    new_setting);
+ 		if (!set_str) {
+ 			ret = -ENOMEM;
+ 			goto out;
+@@ -1568,9 +1568,6 @@ static int tlmi_analyze(void)
+ 			continue;
+ 		}
+ 
+-		/* It is not allowed to have '/' for file name. Convert it into '\'. */
+-		strreplace(item, '/', '\\');
+-
+ 		/* Remove the value part */
+ 		strreplace(item, ',', '\0');
+ 
+@@ -1582,11 +1579,16 @@ static int tlmi_analyze(void)
+ 			goto fail_clear_attr;
+ 		}
+ 		setting->index = i;
++
++		strscpy(setting->name, item);
++		/* It is not allowed to have '/' for file name. Convert it into '\'. */
++		strreplace(item, '/', '\\');
+ 		strscpy(setting->display_name, item);
++
+ 		/* If BIOS selections supported, load those */
+ 		if (tlmi_priv.can_get_bios_selections) {
+-			ret = tlmi_get_bios_selections(setting->display_name,
+-					&setting->possible_values);
++			ret = tlmi_get_bios_selections(setting->name,
++						       &setting->possible_values);
+ 			if (ret || !setting->possible_values)
+ 				pr_info("Error retrieving possible values for %d : %s\n",
+ 						i, setting->display_name);
+diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
+index e1975ffebeb42d..7f9632a53736f6 100644
+--- a/drivers/platform/x86/think-lmi.h
++++ b/drivers/platform/x86/think-lmi.h
+@@ -84,6 +84,7 @@ struct tlmi_pwd_setting {
+ struct tlmi_attr_setting {
+ 	struct kobject kobj;
+ 	int index;
++	char name[TLMI_SETTINGS_MAXLEN];
+ 	char display_name[TLMI_SETTINGS_MAXLEN];
+ 	char *possible_values;
+ };
+diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
+index 88819659df83a2..05913e9fe08211 100644
+--- a/drivers/pmdomain/core.c
++++ b/drivers/pmdomain/core.c
+@@ -3043,7 +3043,7 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
+ 	/* Verify that the index is within a valid range. */
+ 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
+ 						 "#power-domain-cells");
+-	if (index >= num_domains)
++	if (num_domains < 0 || index >= num_domains)
+ 		return NULL;
+ 
+ 	/* Allocate and register device on the genpd bus. */
+diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
+index e03c2cb39a6936..0dbf1893abfa39 100644
+--- a/drivers/pmdomain/imx/gpcv2.c
++++ b/drivers/pmdomain/imx/gpcv2.c
+@@ -1361,7 +1361,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	if (IS_ENABLED(CONFIG_LOCKDEP) &&
+-	    of_property_read_bool(domain->dev->of_node, "power-domains"))
++	    of_property_present(domain->dev->of_node, "power-domains"))
+ 		lockdep_set_subclass(&domain->genpd.mlock, 1);
+ 
+ 	ret = of_genpd_add_provider_simple(domain->dev->of_node,
+diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
+index 66409cff2083fc..e001b5c25bed00 100644
+--- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c
++++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
+@@ -338,11 +338,6 @@ static int __init rcar_gen4_sysc_pd_init(void)
+ 		struct rcar_gen4_sysc_pd *pd;
+ 		size_t n;
+ 
+-		if (!area->name) {
+-			/* Skip NULLified area */
+-			continue;
+-		}
+-
+ 		n = strlen(area->name) + 1;
+ 		pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
+ 		if (!pd) {
+diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
+index b99326917330f5..1e29485237894a 100644
+--- a/drivers/pmdomain/renesas/rcar-sysc.c
++++ b/drivers/pmdomain/renesas/rcar-sysc.c
+@@ -396,11 +396,6 @@ static int __init rcar_sysc_pd_init(void)
+ 		struct rcar_sysc_pd *pd;
+ 		size_t n;
+ 
+-		if (!area->name) {
+-			/* Skip NULLified area */
+-			continue;
+-		}
+-
+ 		n = strlen(area->name) + 1;
+ 		pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
+ 		if (!pd) {
+diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
+index 57eba1ddb17ba5..88fbae613e8bc3 100644
+--- a/drivers/power/supply/axp20x_battery.c
++++ b/drivers/power/supply/axp20x_battery.c
+@@ -89,6 +89,8 @@
+ #define AXP717_BAT_CC_MIN_UA		0
+ #define AXP717_BAT_CC_MAX_UA		3008000
+ 
++#define AXP717_TS_PIN_DISABLE		BIT(4)
++
+ struct axp20x_batt_ps;
+ 
+ struct axp_data {
+@@ -117,6 +119,7 @@ struct axp20x_batt_ps {
+ 	/* Maximum constant charge current */
+ 	unsigned int max_ccc;
+ 	const struct axp_data	*data;
++	bool ts_disable;
+ };
+ 
+ static int axp20x_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt,
+@@ -983,6 +986,24 @@ static void axp717_set_battery_info(struct platform_device *pdev,
+ 	int ccc = info->constant_charge_current_max_ua;
+ 	int val;
+ 
++	axp_batt->ts_disable = (device_property_read_bool(axp_batt->dev,
++							  "x-powers,no-thermistor"));
++
++	/*
++	 * Under rare conditions an incorrectly programmed efuse for
++	 * the temp sensor on the PMIC may trigger a fault condition.
++	 * Allow users to hard-code if the ts pin is not used to work
++	 * around this problem. Note that this requires the battery
++	 * be correctly defined in the device tree with a monitored
++	 * battery node.
++	 */
++	if (axp_batt->ts_disable) {
++		regmap_update_bits(axp_batt->regmap,
++				   AXP717_TS_PIN_CFG,
++				   AXP717_TS_PIN_DISABLE,
++				   AXP717_TS_PIN_DISABLE);
++	}
++
+ 	if (vmin > 0 && axp717_set_voltage_min_design(axp_batt, vmin))
+ 		dev_err(&pdev->dev,
+ 			"couldn't set voltage_min_design\n");
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 1a936829975e11..efbd80db778d66 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -315,6 +315,8 @@ struct ptp_ocp_serial_port {
+ #define OCP_BOARD_ID_LEN		13
+ #define OCP_SERIAL_LEN			6
+ #define OCP_SMA_NUM			4
++#define OCP_SIGNAL_NUM			4
++#define OCP_FREQ_NUM			4
+ 
+ enum {
+ 	PORT_GNSS,
+@@ -342,8 +344,8 @@ struct ptp_ocp {
+ 	struct dcf_master_reg	__iomem *dcf_out;
+ 	struct dcf_slave_reg	__iomem *dcf_in;
+ 	struct tod_reg		__iomem *nmea_out;
+-	struct frequency_reg	__iomem *freq_in[4];
+-	struct ptp_ocp_ext_src	*signal_out[4];
++	struct frequency_reg	__iomem *freq_in[OCP_FREQ_NUM];
++	struct ptp_ocp_ext_src	*signal_out[OCP_SIGNAL_NUM];
+ 	struct ptp_ocp_ext_src	*pps;
+ 	struct ptp_ocp_ext_src	*ts0;
+ 	struct ptp_ocp_ext_src	*ts1;
+@@ -378,10 +380,12 @@ struct ptp_ocp {
+ 	u32			utc_tai_offset;
+ 	u32			ts_window_adjust;
+ 	u64			fw_cap;
+-	struct ptp_ocp_signal	signal[4];
++	struct ptp_ocp_signal	signal[OCP_SIGNAL_NUM];
+ 	struct ptp_ocp_sma_connector sma[OCP_SMA_NUM];
+ 	const struct ocp_sma_op *sma_op;
+ 	struct dpll_device *dpll;
++	int signals_nr;
++	int freq_in_nr;
+ };
+ 
+ #define OCP_REQ_TIMESTAMP	BIT(0)
+@@ -2693,6 +2697,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+ 	bp->eeprom_map = fb_eeprom_map;
+ 	bp->fw_version = ioread32(&bp->image->version);
+ 	bp->sma_op = &ocp_fb_sma_op;
++	bp->signals_nr = 4;
++	bp->freq_in_nr = 4;
+ 
+ 	ptp_ocp_fb_set_version(bp);
+ 
+@@ -2858,6 +2864,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+ 	bp->fw_version = ioread32(&bp->reg->version);
+ 	bp->fw_tag = 2;
+ 	bp->sma_op = &ocp_art_sma_op;
++	bp->signals_nr = 4;
++	bp->freq_in_nr = 4;
+ 
+ 	/* Enable MAC serial port during initialisation */
+ 	iowrite32(1, &bp->board_config->mro50_serial_activate);
+@@ -2884,6 +2892,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+ 	bp->flash_start = 0xA00000;
+ 	bp->eeprom_map = fb_eeprom_map;
+ 	bp->sma_op = &ocp_adva_sma_op;
++	bp->signals_nr = 2;
++	bp->freq_in_nr = 2;
+ 
+ 	version = ioread32(&bp->image->version);
+ 	/* if lower 16 bits are empty, this is the fw loader. */
+@@ -4004,7 +4014,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
+ {
+ 	struct signal_reg __iomem *reg = bp->signal_out[nr]->mem;
+ 	struct ptp_ocp_signal *signal = &bp->signal[nr];
+-	char label[8];
++	char label[16];
+ 	bool on;
+ 	u32 val;
+ 
+@@ -4030,7 +4040,7 @@ static void
+ _frequency_summary_show(struct seq_file *s, int nr,
+ 			struct frequency_reg __iomem *reg)
+ {
+-	char label[8];
++	char label[16];
+ 	bool on;
+ 	u32 val;
+ 
+@@ -4174,11 +4184,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
+ 	}
+ 
+ 	if (bp->fw_cap & OCP_CAP_SIGNAL)
+-		for (i = 0; i < 4; i++)
++		for (i = 0; i < bp->signals_nr; i++)
+ 			_signal_summary_show(s, bp, i);
+ 
+ 	if (bp->fw_cap & OCP_CAP_FREQ)
+-		for (i = 0; i < 4; i++)
++		for (i = 0; i < bp->freq_in_nr; i++)
+ 			_frequency_summary_show(s, i, bp->freq_in[i]);
+ 
+ 	if (bp->irig_out) {
+diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
+index 40f7dba42b5ad7..404cbe32711e73 100644
+--- a/drivers/regulator/ad5398.c
++++ b/drivers/regulator/ad5398.c
+@@ -14,6 +14,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/regulator/driver.h>
+ #include <linux/regulator/machine.h>
++#include <linux/regulator/of_regulator.h>
+ 
+ #define AD5398_CURRENT_EN_MASK	0x8000
+ 
+@@ -221,15 +222,20 @@ static int ad5398_probe(struct i2c_client *client)
+ 	const struct ad5398_current_data_format *df =
+ 			(struct ad5398_current_data_format *)id->driver_data;
+ 
+-	if (!init_data)
+-		return -EINVAL;
+-
+ 	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ 	if (!chip)
+ 		return -ENOMEM;
+ 
+ 	config.dev = &client->dev;
++	if (client->dev.of_node)
++		init_data = of_get_regulator_init_data(&client->dev,
++						       client->dev.of_node,
++						       &ad5398_reg);
++	if (!init_data)
++		return -EINVAL;
++
+ 	config.init_data = init_data;
++	config.of_node = client->dev.of_node;
+ 	config.driver_data = chip;
+ 
+ 	chip->client = client;
+diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
+index a7bb9da27029db..a2ae6adf0053a5 100644
+--- a/drivers/remoteproc/qcom_wcnss.c
++++ b/drivers/remoteproc/qcom_wcnss.c
+@@ -117,10 +117,10 @@ static const struct wcnss_data pronto_v1_data = {
+ 	.pmu_offset = 0x1004,
+ 	.spare_offset = 0x1088,
+ 
+-	.pd_names = { "mx", "cx" },
++	.pd_names = { "cx", "mx" },
+ 	.vregs = (struct wcnss_vreg_info[]) {
+-		{ "vddmx", 950000, 1150000, 0 },
+ 		{ "vddcx", .super_turbo = true},
++		{ "vddmx", 950000, 1150000, 0 },
+ 		{ "vddpx", 1800000, 1800000, 0 },
+ 	},
+ 	.num_pd_vregs = 2,
+@@ -131,10 +131,10 @@ static const struct wcnss_data pronto_v2_data = {
+ 	.pmu_offset = 0x1004,
+ 	.spare_offset = 0x1088,
+ 
+-	.pd_names = { "mx", "cx" },
++	.pd_names = { "cx", "mx" },
+ 	.vregs = (struct wcnss_vreg_info[]) {
+-		{ "vddmx", 1287500, 1287500, 0 },
+ 		{ "vddcx", .super_turbo = true },
++		{ "vddmx", 1287500, 1287500, 0 },
+ 		{ "vddpx", 1800000, 1800000, 0 },
+ 	},
+ 	.num_pd_vregs = 2,
+@@ -397,8 +397,17 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
+ static int wcnss_init_pds(struct qcom_wcnss *wcnss,
+ 			  const char * const pd_names[WCNSS_MAX_PDS])
+ {
++	struct device *dev = wcnss->dev;
+ 	int i, ret;
+ 
++	/* Handle single power domain */
++	if (dev->pm_domain) {
++		wcnss->pds[0] = dev;
++		wcnss->num_pds = 1;
++		pm_runtime_enable(dev);
++		return 0;
++	}
++
+ 	for (i = 0; i < WCNSS_MAX_PDS; i++) {
+ 		if (!pd_names[i])
+ 			break;
+@@ -418,8 +427,15 @@ static int wcnss_init_pds(struct qcom_wcnss *wcnss,
+ 
+ static void wcnss_release_pds(struct qcom_wcnss *wcnss)
+ {
++	struct device *dev = wcnss->dev;
+ 	int i;
+ 
++	/* Handle single power domain */
++	if (wcnss->num_pds == 1 && dev->pm_domain) {
++		pm_runtime_disable(dev);
++		return;
++	}
++
+ 	for (i = 0; i < wcnss->num_pds; i++)
+ 		dev_pm_domain_detach(wcnss->pds[i], false);
+ }
+@@ -437,10 +453,14 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
+ 	 * the regulators for the power domains. For old device trees we need to
+ 	 * reserve extra space to manage them through the regulator interface.
+ 	 */
+-	if (wcnss->num_pds)
+-		info += num_pd_vregs;
+-	else
++	if (wcnss->num_pds) {
++		info += wcnss->num_pds;
++		/* Handle single power domain case */
++		if (wcnss->num_pds < num_pd_vregs)
++			num_vregs += num_pd_vregs - wcnss->num_pds;
++	} else {
+ 		num_vregs += num_pd_vregs;
++	}
+ 
+ 	bulk = devm_kcalloc(wcnss->dev,
+ 			    num_vregs, sizeof(struct regulator_bulk_data),
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index 872e0b679be481..5efbe69bf5ca8c 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -1807,10 +1807,8 @@ static int ds1307_probe(struct i2c_client *client)
+ 		 * For some variants, be sure alarms can trigger when we're
+ 		 * running on Vbackup (BBSQI/BBSQW)
+ 		 */
+-		if (want_irq || ds1307_can_wakeup_device) {
++		if (want_irq || ds1307_can_wakeup_device)
+ 			regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit;
+-			regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
+-		}
+ 
+ 		regmap_write(ds1307->regmap, DS1337_REG_CONTROL,
+ 			     regs[0]);
+diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
+index 35b2e36b426a0d..cb01038a2e27fe 100644
+--- a/drivers/rtc/rtc-rv3032.c
++++ b/drivers/rtc/rtc-rv3032.c
+@@ -69,7 +69,7 @@
+ #define RV3032_CLKOUT2_FD_MSK		GENMASK(6, 5)
+ #define RV3032_CLKOUT2_OS		BIT(7)
+ 
+-#define RV3032_CTRL1_EERD		BIT(3)
++#define RV3032_CTRL1_EERD		BIT(2)
+ #define RV3032_CTRL1_WADA		BIT(5)
+ 
+ #define RV3032_CTRL2_STOP		BIT(0)
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 9f76f2d7b66e58..3bac163057d896 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -851,48 +851,66 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
+ 	vfio_put_device(&matrix_mdev->vdev);
+ }
+ 
+-#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
+-			 "already assigned to %s"
++#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s"
+ 
+-static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
+-					 unsigned long *apm,
+-					 unsigned long *aqm)
++#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev"
++
++static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee,
++					 struct ap_matrix_mdev *assigned_to,
++					 unsigned long *apm, unsigned long *aqm)
+ {
+ 	unsigned long apid, apqi;
+-	const struct device *dev = mdev_dev(matrix_mdev->mdev);
+-	const char *mdev_name = dev_name(dev);
+ 
+-	for_each_set_bit_inv(apid, apm, AP_DEVICES)
++	for_each_set_bit_inv(apid, apm, AP_DEVICES) {
++		for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
++			dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR,
++				 apid, apqi, dev_name(mdev_dev(assigned_to->mdev)));
++		}
++	}
++}
++
++static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee,
++					unsigned long *apm, unsigned long *aqm)
++{
++	unsigned long apid, apqi;
++
++	for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ 		for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
+-			dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
++			dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi);
++	}
+ }
+ 
+ /**
+  * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
+  *
++ * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being
++ *	      assigned; or, NULL if this function was called by the AP bus
++ *	      driver in_use callback to verify none of the APQNs being reserved
++ *	      for the host device driver are in use by a vfio_ap mediated device
+  * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
+  * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
+  *
+- * Verifies that each APQN derived from the Cartesian product of a bitmap of
+- * AP adapter IDs and AP queue indexes is not configured for any matrix
+- * mediated device. AP queue sharing is not allowed.
++ * Verifies that each APQN derived from the Cartesian product of APIDs
++ * represented by the bits set in @mdev_apm and the APQIs of the bits set in
++ * @mdev_aqm is not assigned to a mediated device other than the mdev to which
++ * the APQN is being assigned (@assignee). AP queue sharing is not allowed.
+  *
+  * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
+  */
+-static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
++static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee,
++					  unsigned long *mdev_apm,
+ 					  unsigned long *mdev_aqm)
+ {
+-	struct ap_matrix_mdev *matrix_mdev;
++	struct ap_matrix_mdev *assigned_to;
+ 	DECLARE_BITMAP(apm, AP_DEVICES);
+ 	DECLARE_BITMAP(aqm, AP_DOMAINS);
+ 
+-	list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
++	list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) {
+ 		/*
+-		 * If the input apm and aqm are fields of the matrix_mdev
+-		 * object, then move on to the next matrix_mdev.
++		 * If the mdev to which the mdev_apm and mdev_aqm is being
++		 * assigned is the same as the mdev being verified
+ 		 */
+-		if (mdev_apm == matrix_mdev->matrix.apm &&
+-		    mdev_aqm == matrix_mdev->matrix.aqm)
++		if (assignee == assigned_to)
+ 			continue;
+ 
+ 		memset(apm, 0, sizeof(apm));
+@@ -902,15 +920,16 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
+ 		 * We work on full longs, as we can only exclude the leftover
+ 		 * bits in non-inverse order. The leftover is all zeros.
+ 		 */
+-		if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
+-				AP_DEVICES))
++		if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm,	AP_DEVICES))
+ 			continue;
+ 
+-		if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
+-				AP_DOMAINS))
++		if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm,	AP_DOMAINS))
+ 			continue;
+ 
+-		vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
++		if (assignee)
++			vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm);
++		else
++			vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm);
+ 
+ 		return -EADDRINUSE;
+ 	}
+@@ -939,7 +958,8 @@ static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
+ 					       matrix_mdev->matrix.aqm))
+ 		return -EADDRNOTAVAIL;
+ 
+-	return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
++	return vfio_ap_mdev_verify_no_sharing(matrix_mdev,
++					      matrix_mdev->matrix.apm,
+ 					      matrix_mdev->matrix.aqm);
+ }
+ 
+@@ -2467,7 +2487,7 @@ int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
+ 
+ 	mutex_lock(&matrix_dev->guests_lock);
+ 	mutex_lock(&matrix_dev->mdevs_lock);
+-	ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
++	ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm);
+ 	mutex_unlock(&matrix_dev->mdevs_lock);
+ 	mutex_unlock(&matrix_dev->guests_lock);
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 6e8d8a96c54fb3..f2e4237ff3d994 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -227,10 +227,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ 		return;
+ 
+-	/* check for recovered fabric node */
+-	if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+-	    ndlp->nlp_DID == Fabric_DID)
++	/* Ignore callback for a mismatched (stale) rport */
++	if (ndlp->rport != rport) {
++		lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE,
++			      "6788 fc rport mismatch: d_id x%06x ndlp x%px "
++			      "fc rport x%px node rport x%px state x%x "
++			      "refcnt %u\n",
++			      ndlp->nlp_DID, ndlp, rport, ndlp->rport,
++			      ndlp->nlp_state, kref_read(&ndlp->kref));
+ 		return;
++	}
+ 
+ 	if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+@@ -5622,6 +5628,7 @@ static struct lpfc_nodelist *
+ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+ {
+ 	struct lpfc_nodelist *ndlp;
++	struct lpfc_nodelist *np = NULL;
+ 	uint32_t data1;
+ 
+ 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+@@ -5636,14 +5643,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+ 					 ndlp, ndlp->nlp_DID,
+ 					 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
+ 					 ndlp->active_rrqs_xri_bitmap);
+-			return ndlp;
++
++			/* Check for new or potentially stale node */
++			if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
++				return ndlp;
++			np = ndlp;
+ 		}
+ 	}
+ 
+-	/* FIND node did <did> NOT FOUND */
+-	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+-			 "0932 FIND node did x%x NOT FOUND.\n", did);
+-	return NULL;
++	if (!np)
++		/* FIND node did <did> NOT FOUND */
++		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
++				 "0932 FIND node did x%x NOT FOUND.\n", did);
++
++	return np;
+ }
+ 
+ struct lpfc_nodelist *
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index a3658ef1141b26..50c761991191ff 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -13190,6 +13190,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
+ 	eqhdl = lpfc_get_eq_hdl(0);
+ 	rc = pci_irq_vector(phba->pcidev, 0);
+ 	if (rc < 0) {
++		free_irq(phba->pcidev->irq, phba);
+ 		pci_free_irq_vectors(phba->pcidev);
+ 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ 				"0496 MSI pci_irq_vec failed (%d)\n", rc);
+@@ -13270,6 +13271,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+ 			eqhdl = lpfc_get_eq_hdl(0);
+ 			retval = pci_irq_vector(phba->pcidev, 0);
+ 			if (retval < 0) {
++				free_irq(phba->pcidev->irq, phba);
+ 				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ 					"0502 INTR pci_irq_vec failed (%d)\n",
+ 					 retval);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index c0a372868e1d7f..604f37e5c0c355 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
+ 	char *desc = NULL;
+ 	u16 event;
+ 
++	if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
++		return;
++
+ 	event = event_reply->event;
+ 
+ 	switch (event) {
+@@ -2744,7 +2747,10 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
+ 		return;
+ 	}
+ 
+-	if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
++	if (!(mrioc->facts.ioc_capabilities &
++		MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
++		(mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
++
+ 		mrioc->ts_update_counter = 0;
+ 		mpi3mr_sync_timestamp(mrioc);
+ 	}
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+index 87784c96249a7f..47faa27bc35591 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -679,6 +679,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
+ 	size_t data_in_sz = 0;
+ 	long ret;
+ 	u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
++	int tm_ret;
+ 
+ 	issue_reset = 0;
+ 
+@@ -1120,18 +1121,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
+ 			if (pcie_device && (!ioc->tm_custom_handling) &&
+ 			    (!(mpt3sas_scsih_is_pcie_scsi_device(
+ 			    pcie_device->device_info))))
+-				mpt3sas_scsih_issue_locked_tm(ioc,
++				tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
+ 				  le16_to_cpu(mpi_request->FunctionDependent1),
+ 				  0, 0, 0,
+ 				  MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 				  0, pcie_device->reset_timeout,
+ 			MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
+ 			else
+-				mpt3sas_scsih_issue_locked_tm(ioc,
++				tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
+ 				  le16_to_cpu(mpi_request->FunctionDependent1),
+ 				  0, 0, 0,
+ 				  MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 				  0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
++
++			if (tm_ret != SUCCESS) {
++				ioc_info(ioc,
++					 "target reset failed, issue hard reset: handle (0x%04x)\n",
++					 le16_to_cpu(mpi_request->FunctionDependent1));
++				mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
++			}
+ 		} else
+ 			mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ 	}
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 680ba180a67252..89a2aaccdcfceb 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -173,6 +173,10 @@ static const char *sdebug_version_date = "20210520";
+ #define DEF_ZBC_MAX_OPEN_ZONES	8
+ #define DEF_ZBC_NR_CONV_ZONES	1
+ 
++/* Default parameters for tape drives */
++#define TAPE_DEF_DENSITY  0x0
++#define TAPE_DEF_BLKSIZE  0
++
+ #define SDEBUG_LUN_0_VAL 0
+ 
+ /* bit mask values for sdebug_opts */
+@@ -363,6 +367,10 @@ struct sdebug_dev_info {
+ 	ktime_t create_ts;	/* time since bootup that this device was created */
+ 	struct sdeb_zone_state *zstate;
+ 
++	/* For tapes */
++	unsigned int tape_blksize;
++	unsigned int tape_density;
++
+ 	struct dentry *debugfs_entry;
+ 	struct spinlock list_lock;
+ 	struct list_head inject_err_list;
+@@ -773,7 +781,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
+ /* 20 */
+ 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
+ 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+-	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
++	{0, 0x1, 0, 0, NULL, NULL, /* REWIND ?? */
+ 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
+ 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+@@ -2742,7 +2750,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
+ 	unsigned char *ap;
+ 	unsigned char *arr __free(kfree);
+ 	unsigned char *cmd = scp->cmnd;
+-	bool dbd, llbaa, msense_6, is_disk, is_zbc;
++	bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
+ 
+ 	arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+ 	if (!arr)
+@@ -2755,7 +2763,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
+ 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
+ 	is_disk = (sdebug_ptype == TYPE_DISK);
+ 	is_zbc = devip->zoned;
+-	if ((is_disk || is_zbc) && !dbd)
++	is_tape = (sdebug_ptype == TYPE_TAPE);
++	if ((is_disk || is_zbc || is_tape) && !dbd)
+ 		bd_len = llbaa ? 16 : 8;
+ 	else
+ 		bd_len = 0;
+@@ -2793,15 +2802,25 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
+ 			put_unaligned_be32(0xffffffff, ap + 0);
+ 		else
+ 			put_unaligned_be32(sdebug_capacity, ap + 0);
+-		put_unaligned_be16(sdebug_sector_size, ap + 6);
++		if (is_tape) {
++			ap[0] = devip->tape_density;
++			put_unaligned_be16(devip->tape_blksize, ap + 6);
++		} else
++			put_unaligned_be16(sdebug_sector_size, ap + 6);
+ 		offset += bd_len;
+ 		ap = arr + offset;
+ 	} else if (16 == bd_len) {
++		if (is_tape) {
++			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
++			return check_condition_result;
++		}
+ 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
+ 		put_unaligned_be32(sdebug_sector_size, ap + 12);
+ 		offset += bd_len;
+ 		ap = arr + offset;
+ 	}
++	if (cmd[2] == 0)
++		goto only_bd; /* Only block descriptor requested */
+ 
+ 	/*
+ 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
+@@ -2902,6 +2921,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
+ 	default:
+ 		goto bad_pcode;
+ 	}
++only_bd:
+ 	if (msense_6)
+ 		arr[0] = offset - 1;
+ 	else
+@@ -2945,8 +2965,27 @@ static int resp_mode_select(struct scsi_cmnd *scp,
+ 			    __func__, param_len, res);
+ 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
+ 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
+-	off = bd_len + (mselect6 ? 4 : 8);
+-	if (md_len > 2 || off >= res) {
++	off = (mselect6 ? 4 : 8);
++	if (sdebug_ptype == TYPE_TAPE) {
++		int blksize;
++
++		if (bd_len != 8) {
++			mk_sense_invalid_fld(scp, SDEB_IN_DATA,
++					mselect6 ? 3 : 6, -1);
++			return check_condition_result;
++		}
++		blksize = get_unaligned_be16(arr + off + 6);
++		if ((blksize % 4) != 0) {
++			mk_sense_invalid_fld(scp, SDEB_IN_DATA, off + 6, -1);
++			return check_condition_result;
++		}
++		devip->tape_density = arr[off];
++		devip->tape_blksize = blksize;
++	}
++	off += bd_len;
++	if (off >= res)
++		return 0; /* No page written, just descriptors */
++	if (md_len > 2) {
+ 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
+ 		return check_condition_result;
+ 	}
+@@ -5835,6 +5874,10 @@ static struct sdebug_dev_info *sdebug_device_create(
+ 		} else {
+ 			devip->zoned = false;
+ 		}
++		if (sdebug_ptype == TYPE_TAPE) {
++			devip->tape_density = TAPE_DEF_DENSITY;
++			devip->tape_blksize = TAPE_DEF_BLKSIZE;
++		}
+ 		devip->create_ts = ktime_get_boottime();
+ 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
+ 		spin_lock_init(&devip->list_lock);
+diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
+index 093774d7753465..daa160459c9b3c 100644
+--- a/drivers/scsi/scsi_sysctl.c
++++ b/drivers/scsi/scsi_sysctl.c
+@@ -17,7 +17,9 @@ static struct ctl_table scsi_table[] = {
+ 	  .data		= &scsi_logging_level,
+ 	  .maxlen	= sizeof(scsi_logging_level),
+ 	  .mode		= 0644,
+-	  .proc_handler	= proc_dointvec },
++	  .proc_handler	= proc_dointvec_minmax,
++	  .extra1	= SYSCTL_ZERO,
++	  .extra2	= SYSCTL_INT_MAX },
+ };
+ 
+ static struct ctl_table_header *scsi_table_header;
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index a17441635ff3ab..3e982c166baf9b 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -952,7 +952,6 @@ static void reset_state(struct scsi_tape *STp)
+ 		STp->partition = find_partition(STp);
+ 		if (STp->partition < 0)
+ 			STp->partition = 0;
+-		STp->new_partition = STp->partition;
+ 	}
+ }
+ \f
+@@ -2894,7 +2893,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
+ 			timeout = STp->long_timeout * 8;
+ 
+ 		DEBC_printk(STp, "Erasing tape.\n");
+-		fileno = blkno = at_sm = 0;
+ 		break;
+ 	case MTSETBLK:		/* Set block length */
+ 	case MTSETDENSITY:	/* Set tape density */
+@@ -2927,14 +2925,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
+ 		if (cmd_in == MTSETDENSITY) {
+ 			(STp->buffer)->b_data[4] = arg;
+ 			STp->density_changed = 1;	/* At least we tried ;-) */
++			STp->changed_density = arg;
+ 		} else if (cmd_in == SET_DENS_AND_BLK)
+ 			(STp->buffer)->b_data[4] = arg >> 24;
+ 		else
+ 			(STp->buffer)->b_data[4] = STp->density;
+ 		if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
+ 			ltmp = arg & MT_ST_BLKSIZE_MASK;
+-			if (cmd_in == MTSETBLK)
++			if (cmd_in == MTSETBLK) {
+ 				STp->blksize_changed = 1; /* At least we tried ;-) */
++				STp->changed_blksize = arg;
++			}
+ 		} else
+ 			ltmp = STp->block_size;
+ 		(STp->buffer)->b_data[9] = (ltmp >> 16);
+@@ -3081,7 +3082,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
+ 			   cmd_in == MTSETDRVBUFFER ||
+ 			   cmd_in == SET_DENS_AND_BLK) {
+ 			if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST &&
+-			    !(STp->use_pf & PF_TESTED)) {
++				cmdstatp->sense_hdr.asc == 0x24 &&
++				(STp->device)->scsi_level <= SCSI_2 &&
++				!(STp->use_pf & PF_TESTED)) {
+ 				/* Try the other possible state of Page Format if not
+ 				   already tried */
+ 				STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
+@@ -3633,9 +3636,25 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+ 				retval = (-EIO);
+ 				goto out;
+ 			}
+-			reset_state(STp);
++			reset_state(STp); /* Clears pos_unknown */
+ 			/* remove this when the midlevel properly clears was_reset */
+ 			STp->device->was_reset = 0;
++
++			/* Fix the device settings after reset, ignore errors */
++			if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK ||
++				mtc.mt_op == MTEOM) {
++				if (STp->can_partitions) {
++					/* STp->new_partition contains the
++					 *  latest partition set
++					 */
++					STp->partition = 0;
++					switch_partition(STp);
++				}
++				if (STp->density_changed)
++					st_int_ioctl(STp, MTSETDENSITY, STp->changed_density);
++				if (STp->blksize_changed)
++					st_int_ioctl(STp, MTSETBLK, STp->changed_blksize);
++			}
+ 		}
+ 
+ 		if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
+diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
+index 1aaaf5369a40fc..6d31b894ee84cc 100644
+--- a/drivers/scsi/st.h
++++ b/drivers/scsi/st.h
+@@ -165,6 +165,7 @@ struct scsi_tape {
+ 	unsigned char compression_changed;
+ 	unsigned char drv_buffer;
+ 	unsigned char density;
++	unsigned char changed_density;
+ 	unsigned char door_locked;
+ 	unsigned char autorew_dev;   /* auto-rewind device */
+ 	unsigned char rew_at_close;  /* rewind necessary at close */
+@@ -172,6 +173,7 @@ struct scsi_tape {
+ 	unsigned char cleaning_req;  /* cleaning requested? */
+ 	unsigned char first_tur;     /* first TEST UNIT READY */
+ 	int block_size;
++	int changed_blksize;
+ 	int min_block;
+ 	int max_block;
+ 	int recover_count;     /* From tape opening */
+diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
+index 27c9fa745fd528..b8d5244678f010 100644
+--- a/drivers/soc/apple/rtkit-internal.h
++++ b/drivers/soc/apple/rtkit-internal.h
+@@ -44,6 +44,7 @@ struct apple_rtkit {
+ 
+ 	struct apple_rtkit_shmem ioreport_buffer;
+ 	struct apple_rtkit_shmem crashlog_buffer;
++	struct apple_rtkit_shmem oslog_buffer;
+ 
+ 	struct apple_rtkit_shmem syslog_buffer;
+ 	char *syslog_msg_buffer;
+diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
+index e6d940292c9fbd..45ccbe2cbcd63f 100644
+--- a/drivers/soc/apple/rtkit.c
++++ b/drivers/soc/apple/rtkit.c
+@@ -66,8 +66,9 @@ enum {
+ #define APPLE_RTKIT_SYSLOG_MSG_SIZE  GENMASK_ULL(31, 24)
+ 
+ #define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
+-#define APPLE_RTKIT_OSLOG_INIT	1
+-#define APPLE_RTKIT_OSLOG_ACK	3
++#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1
++#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36)
++#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
+ 
+ #define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
+ #define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
+@@ -251,15 +252,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
+ 					    struct apple_rtkit_shmem *buffer,
+ 					    u8 ep, u64 msg)
+ {
+-	size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
+ 	u64 reply;
+ 	int err;
+ 
++	/* The different size vs. IOVA shifts look odd but are indeed correct this way */
++	if (ep == APPLE_RTKIT_EP_OSLOG) {
++		buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg);
++		buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12;
++	} else {
++		buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12;
++		buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
++	}
++
+ 	buffer->buffer = NULL;
+ 	buffer->iomem = NULL;
+ 	buffer->is_mapped = false;
+-	buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+-	buffer->size = n_4kpages << 12;
+ 
+ 	dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
+ 		buffer->size, &buffer->iova);
+@@ -284,11 +291,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
+ 	}
+ 
+ 	if (!buffer->is_mapped) {
+-		reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+-				   APPLE_RTKIT_BUFFER_REQUEST);
+-		reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
+-		reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+-				    buffer->iova);
++		/* oslog uses different fields and needs a shifted IOVA instead of size */
++		if (ep == APPLE_RTKIT_EP_OSLOG) {
++			reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE,
++					   APPLE_RTKIT_OSLOG_BUFFER_REQUEST);
++			reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size);
++			reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA,
++					    buffer->iova >> 12);
++		} else {
++			reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
++					   APPLE_RTKIT_BUFFER_REQUEST);
++			reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE,
++					    buffer->size >> 12);
++			reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
++					    buffer->iova);
++		}
+ 		apple_rtkit_send_message(rtk, ep, reply, NULL, false);
+ 	}
+ 
+@@ -482,25 +499,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
+ 	}
+ }
+ 
+-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+-{
+-	u64 ack;
+-
+-	dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
+-	ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
+-	apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
+-}
+-
+ static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
+ {
+ 	u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
+ 
+ 	switch (type) {
+-	case APPLE_RTKIT_OSLOG_INIT:
+-		apple_rtkit_oslog_rx_init(rtk, msg);
++	case APPLE_RTKIT_OSLOG_BUFFER_REQUEST:
++		apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer,
++						 APPLE_RTKIT_EP_OSLOG, msg);
+ 		break;
+ 	default:
+-		dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
++		dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n",
++			 msg);
+ 	}
+ }
+ 
+@@ -667,7 +677,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+ 	rtk->mbox->rx = apple_rtkit_rx;
+ 	rtk->mbox->cookie = rtk;
+ 
+-	rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
++	rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ 					  dev_name(rtk->dev));
+ 	if (!rtk->wq) {
+ 		ret = -ENOMEM;
+@@ -710,6 +720,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
+ 
+ 	apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+ 	apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
++	apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
+ 	apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+ 
+ 	kfree(rtk->syslog_msg_buffer);
+@@ -890,6 +901,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk)
+ 
+ 	apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+ 	apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
++	apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
+ 	apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+ 
+ 	kfree(rtk->syslog_msg_buffer);
+diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
+index 5250c1d702eb9b..aaa965d4b050a7 100644
+--- a/drivers/soc/mediatek/mtk-mutex.c
++++ b/drivers/soc/mediatek/mtk-mutex.c
+@@ -155,6 +155,7 @@
+ #define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3	23
+ #define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4	24
+ #define MT8188_MUTEX_MOD_DISP1_DISP_MIXER	30
++#define MT8188_MUTEX_MOD_DISP1_DPI1		38
+ #define MT8188_MUTEX_MOD_DISP1_DP_INTF1		39
+ 
+ #define MT8195_MUTEX_MOD_DISP_OVL0		0
+@@ -289,6 +290,7 @@
+ #define MT8188_MUTEX_SOF_DSI0			1
+ #define MT8188_MUTEX_SOF_DP_INTF0		3
+ #define MT8188_MUTEX_SOF_DP_INTF1		4
++#define MT8188_MUTEX_SOF_DPI1			5
+ #define MT8195_MUTEX_SOF_DSI0			1
+ #define MT8195_MUTEX_SOF_DSI1			2
+ #define MT8195_MUTEX_SOF_DP_INTF0		3
+@@ -301,6 +303,7 @@
+ #define MT8188_MUTEX_EOF_DSI0			(MT8188_MUTEX_SOF_DSI0 << 7)
+ #define MT8188_MUTEX_EOF_DP_INTF0		(MT8188_MUTEX_SOF_DP_INTF0 << 7)
+ #define MT8188_MUTEX_EOF_DP_INTF1		(MT8188_MUTEX_SOF_DP_INTF1 << 7)
++#define MT8188_MUTEX_EOF_DPI1			(MT8188_MUTEX_SOF_DPI1 << 7)
+ #define MT8195_MUTEX_EOF_DSI0			(MT8195_MUTEX_SOF_DSI0 << 7)
+ #define MT8195_MUTEX_EOF_DSI1			(MT8195_MUTEX_SOF_DSI1 << 7)
+ #define MT8195_MUTEX_EOF_DP_INTF0		(MT8195_MUTEX_SOF_DP_INTF0 << 7)
+@@ -472,6 +475,7 @@ static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ 	[DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
+ 	[DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
+ 	[DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1,
++	[DDP_COMPONENT_DPI1] = MT8188_MUTEX_MOD_DISP1_DPI1,
+ 	[DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER,
+ 	[DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0,
+ 	[DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1,
+@@ -686,6 +690,8 @@ static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ 	[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ 	[MUTEX_SOF_DSI0] =
+ 		MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
++	[MUTEX_SOF_DPI1] =
++		MT8188_MUTEX_SOF_DPI1 | MT8188_MUTEX_EOF_DPI1,
+ 	[MUTEX_SOF_DP_INTF0] =
+ 		MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
+ 	[MUTEX_SOF_DP_INTF1] =
+diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
+index 97006cc3b94610..8e681f51952644 100644
+--- a/drivers/soc/samsung/exynos-asv.c
++++ b/drivers/soc/samsung/exynos-asv.c
+@@ -9,6 +9,7 @@
+  * Samsung Exynos SoC Adaptive Supply Voltage support
+  */
+ 
++#include <linux/array_size.h>
+ #include <linux/cpu.h>
+ #include <linux/device.h>
+ #include <linux/energy_model.h>
+diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
+index bba8d86ae1bb06..dedfe6d0fb3f36 100644
+--- a/drivers/soc/samsung/exynos-chipid.c
++++ b/drivers/soc/samsung/exynos-chipid.c
+@@ -12,6 +12,7 @@
+  * Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
+  */
+ 
++#include <linux/array_size.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
+ #include <linux/mfd/syscon.h>
+diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
+index dd5256e5aae1ae..c40313886a0123 100644
+--- a/drivers/soc/samsung/exynos-pmu.c
++++ b/drivers/soc/samsung/exynos-pmu.c
+@@ -5,6 +5,7 @@
+ //
+ // Exynos - CPU PMU(Power Management Unit) support
+ 
++#include <linux/array_size.h>
+ #include <linux/arm-smccc.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
+index 114352695ac2bc..5a93a68dba87fd 100644
+--- a/drivers/soc/samsung/exynos-usi.c
++++ b/drivers/soc/samsung/exynos-usi.c
+@@ -6,6 +6,7 @@
+  * Samsung Exynos USI driver (Universal Serial Interface).
+  */
+ 
++#include <linux/array_size.h>
+ #include <linux/clk.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
+index 30f230ed1769cf..4bad12a995422e 100644
+--- a/drivers/soc/samsung/exynos3250-pmu.c
++++ b/drivers/soc/samsung/exynos3250-pmu.c
+@@ -5,6 +5,7 @@
+ //
+ // Exynos3250 - CPU PMU (Power Management Unit) support
+ 
++#include <linux/array_size.h>
+ #include <linux/soc/samsung/exynos-regs-pmu.h>
+ #include <linux/soc/samsung/exynos-pmu.h>
+ 
+diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
+index 7a2d50be6b4ac0..2ae5c3e1b07a37 100644
+--- a/drivers/soc/samsung/exynos5250-pmu.c
++++ b/drivers/soc/samsung/exynos5250-pmu.c
+@@ -5,6 +5,7 @@
+ //
+ // Exynos5250 - CPU PMU (Power Management Unit) support
+ 
++#include <linux/array_size.h>
+ #include <linux/soc/samsung/exynos-regs-pmu.h>
+ #include <linux/soc/samsung/exynos-pmu.h>
+ 
+diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
+index 6fedcd78cb4519..58a2209795f78a 100644
+--- a/drivers/soc/samsung/exynos5420-pmu.c
++++ b/drivers/soc/samsung/exynos5420-pmu.c
+@@ -5,6 +5,7 @@
+ //
+ // Exynos5420 - CPU PMU (Power Management Unit) support
+ 
++#include <linux/array_size.h>
+ #include <linux/pm.h>
+ #include <linux/soc/samsung/exynos-regs-pmu.h>
+ #include <linux/soc/samsung/exynos-pmu.h>
+diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
+index 4fb0f0a248288b..704039eb3c0784 100644
+--- a/drivers/soc/ti/k3-socinfo.c
++++ b/drivers/soc/ti/k3-socinfo.c
+@@ -105,6 +105,12 @@ k3_chipinfo_variant_to_sr(unsigned int partno, unsigned int variant,
+ 	return -ENODEV;
+ }
+ 
++static const struct regmap_config k3_chipinfo_regmap_cfg = {
++	.reg_bits = 32,
++	.val_bits = 32,
++	.reg_stride = 4,
++};
++
+ static int k3_chipinfo_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *node = pdev->dev.of_node;
+@@ -112,13 +118,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
+ 	struct device *dev = &pdev->dev;
+ 	struct soc_device *soc_dev;
+ 	struct regmap *regmap;
++	void __iomem *base;
+ 	u32 partno_id;
+ 	u32 variant;
+ 	u32 jtag_id;
+ 	u32 mfg;
+ 	int ret;
+ 
+-	regmap = device_node_to_regmap(node);
++	base = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(base))
++		return PTR_ERR(base);
++
++	regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg);
+ 	if (IS_ERR(regmap))
+ 		return PTR_ERR(regmap);
+ 
+diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
+index 0d01849c358619..e3d5e6c1d582c3 100644
+--- a/drivers/soundwire/amd_manager.c
++++ b/drivers/soundwire/amd_manager.c
+@@ -1110,6 +1110,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
+ 		amd_sdw_wake_enable(amd_manager, false);
+ 		return amd_sdw_clock_stop(amd_manager);
+ 	} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
++		amd_sdw_wake_enable(amd_manager, false);
+ 		/*
+ 		 * As per hardware programming sequence on AMD platforms,
+ 		 * clock stop should be invoked first before powering-off
+@@ -1137,6 +1138,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
+ 		amd_sdw_wake_enable(amd_manager, true);
+ 		return amd_sdw_clock_stop(amd_manager);
+ 	} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
++		amd_sdw_wake_enable(amd_manager, true);
+ 		ret = amd_sdw_clock_stop(amd_manager);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index 263ca32f0c5c39..6ca06cce41d3c4 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -121,6 +121,10 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
+ 	set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
+ 	set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
+ 
++	ret = sdw_irq_create(bus, fwnode);
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * SDW is an enumerable bus, but devices can be powered off. So,
+ 	 * they won't be able to report as present.
+@@ -137,6 +141,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
+ 
+ 	if (ret < 0) {
+ 		dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
++		sdw_irq_delete(bus);
+ 		return ret;
+ 	}
+ 
+@@ -155,10 +160,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
+ 	bus->params.curr_bank = SDW_BANK0;
+ 	bus->params.next_bank = SDW_BANK1;
+ 
+-	ret = sdw_irq_create(bus, fwnode);
+-	if (ret)
+-		return ret;
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL(sdw_bus_master_add);
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 05652e983539b4..6f2b5ec5c87c6a 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -1341,7 +1341,7 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
+ 	return val;
+ }
+ 
+-static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
++static int cdns_init_clock_ctrl(struct sdw_cdns *cdns)
+ {
+ 	struct sdw_bus *bus = &cdns->bus;
+ 	struct sdw_master_prop *prop = &bus->prop;
+@@ -1355,14 +1355,25 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
+ 		prop->default_row,
+ 		prop->default_col);
+ 
++	if (!prop->default_frame_rate || !prop->default_row) {
++		dev_err(cdns->dev, "Default frame_rate %d or row %d is invalid\n",
++			prop->default_frame_rate, prop->default_row);
++		return -EINVAL;
++	}
++
+ 	/* Set clock divider */
+-	divider	= (prop->mclk_freq / prop->max_clk_freq) - 1;
++	divider	= (prop->mclk_freq * SDW_DOUBLE_RATE_FACTOR /
++		bus->params.curr_dr_freq) - 1;
+ 
+ 	cdns_updatel(cdns, CDNS_MCP_CLK_CTRL0,
+ 		     CDNS_MCP_CLK_MCLKD_MASK, divider);
+ 	cdns_updatel(cdns, CDNS_MCP_CLK_CTRL1,
+ 		     CDNS_MCP_CLK_MCLKD_MASK, divider);
+ 
++	/* Set frame shape base on the actual bus frequency. */
++	prop->default_col = bus->params.curr_dr_freq /
++			    prop->default_frame_rate / prop->default_row;
++
+ 	/*
+ 	 * Frame shape changes after initialization have to be done
+ 	 * with the bank switch mechanism
+@@ -1375,6 +1386,8 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
+ 	ssp_interval = prop->default_frame_rate / SDW_CADENCE_GSYNC_HZ;
+ 	cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, ssp_interval);
+ 	cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, ssp_interval);
++
++	return 0;
+ }
+ 
+ /**
+@@ -1383,9 +1396,12 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
+  */
+ int sdw_cdns_init(struct sdw_cdns *cdns)
+ {
++	int ret;
+ 	u32 val;
+ 
+-	cdns_init_clock_ctrl(cdns);
++	ret = cdns_init_clock_ctrl(cdns);
++	if (ret)
++		return ret;
+ 
+ 	sdw_cdns_check_self_clearing_bits(cdns, __func__, false, 0);
+ 
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 3fa990fb59c78b..7c43df252328dc 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0+
+ //
+ // Copyright 2013 Freescale Semiconductor, Inc.
+-// Copyright 2020 NXP
++// Copyright 2020-2025 NXP
+ //
+ // Freescale DSPI driver
+ // This file contains a driver for the Freescale DSPI
+@@ -62,6 +62,7 @@
+ #define SPI_SR_TFIWF			BIT(18)
+ #define SPI_SR_RFDF			BIT(17)
+ #define SPI_SR_CMDFFF			BIT(16)
++#define SPI_SR_TXRXS			BIT(30)
+ #define SPI_SR_CLEAR			(SPI_SR_TCFQF | \
+ 					SPI_SR_TFUF | SPI_SR_TFFF | \
+ 					SPI_SR_CMDTCF | SPI_SR_SPEF | \
+@@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ 	struct spi_transfer *transfer;
+ 	bool cs = false;
+ 	int status = 0;
++	u32 val = 0;
++	bool cs_change = false;
+ 
+ 	message->actual_length = 0;
+ 
++	/* Put DSPI in running mode if halted. */
++	regmap_read(dspi->regmap, SPI_MCR, &val);
++	if (val & SPI_MCR_HALT) {
++		regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0);
++		while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
++		       !(val & SPI_SR_TXRXS))
++			;
++	}
++
+ 	list_for_each_entry(transfer, &message->transfers, transfer_list) {
+ 		dspi->cur_transfer = transfer;
+ 		dspi->cur_msg = message;
+@@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ 				dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
+ 		}
+ 
++		cs_change = transfer->cs_change;
+ 		dspi->tx = transfer->tx_buf;
+ 		dspi->rx = transfer->rx_buf;
+ 		dspi->len = transfer->len;
+@@ -962,6 +975,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ 				   SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ 				   SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ 
++		regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
++
+ 		spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
+ 				       dspi->progress, !dspi->irq);
+ 
+@@ -988,6 +1003,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ 			dspi_deassert_cs(spi, &cs);
+ 	}
+ 
++	if (status || !cs_change) {
++		/* Put DSPI in stop mode */
++		regmap_update_bits(dspi->regmap, SPI_MCR,
++				   SPI_MCR_HALT, SPI_MCR_HALT);
++		while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
++		       val & SPI_SR_TXRXS)
++			;
++	}
++
+ 	message->status = status;
+ 	spi_finalize_current_message(ctlr);
+ 
+@@ -1167,6 +1191,20 @@ static int dspi_resume(struct device *dev)
+ 
+ static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+ 
++static const struct regmap_range dspi_yes_ranges[] = {
++	regmap_reg_range(SPI_MCR, SPI_MCR),
++	regmap_reg_range(SPI_TCR, SPI_CTAR(3)),
++	regmap_reg_range(SPI_SR, SPI_TXFR3),
++	regmap_reg_range(SPI_RXFR0, SPI_RXFR3),
++	regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)),
++	regmap_reg_range(SPI_SREX, SPI_SREX),
++};
++
++static const struct regmap_access_table dspi_access_table = {
++	.yes_ranges	= dspi_yes_ranges,
++	.n_yes_ranges	= ARRAY_SIZE(dspi_yes_ranges),
++};
++
+ static const struct regmap_range dspi_volatile_ranges[] = {
+ 	regmap_reg_range(SPI_MCR, SPI_TCR),
+ 	regmap_reg_range(SPI_SR, SPI_SR),
+@@ -1184,6 +1222,8 @@ static const struct regmap_config dspi_regmap_config = {
+ 	.reg_stride	= 4,
+ 	.max_register	= 0x88,
+ 	.volatile_table	= &dspi_volatile_table,
++	.rd_table	= &dspi_access_table,
++	.wr_table	= &dspi_access_table,
+ };
+ 
+ static const struct regmap_range dspi_xspi_volatile_ranges[] = {
+@@ -1205,6 +1245,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
+ 		.reg_stride	= 4,
+ 		.max_register	= 0x13c,
+ 		.volatile_table	= &dspi_xspi_volatile_table,
++		.rd_table	= &dspi_access_table,
++		.wr_table	= &dspi_access_table,
+ 	},
+ 	{
+ 		.name		= "pushr",
+@@ -1227,6 +1269,8 @@ static int dspi_init(struct fsl_dspi *dspi)
+ 	if (!spi_controller_is_target(dspi->ctlr))
+ 		mcr |= SPI_MCR_HOST;
+ 
++	mcr |= SPI_MCR_HALT;
++
+ 	regmap_write(dspi->regmap, SPI_MCR, mcr);
+ 	regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+ 
+diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
+index c02c4204442f5e..0eb35c4e3987ea 100644
+--- a/drivers/spi/spi-mux.c
++++ b/drivers/spi/spi-mux.c
+@@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi)
+ 
+ 	priv->current_cs = spi_get_chipselect(spi, 0);
+ 
+-	spi_setup(priv->spi);
+-
+-	return 0;
++	return spi_setup(priv->spi);
+ }
+ 
+ static int spi_mux_setup(struct spi_device *spi)
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 40a64a598a7495..5008489d6fac8a 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -547,7 +547,7 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
+ 	cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
+ 	if (spi->mode & SPI_LSB_FIRST)
+ 		cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
+-	if (spi->mode & SPI_CS_HIGH)
++	if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0)))
+ 		cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET;
+ 
+ 	if (xfer->rx_buf && xfer->tx_buf)
+diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
+index b9df39e06e7cd4..4b091b4d4ff372 100644
+--- a/drivers/spi/spi-zynqmp-gqspi.c
++++ b/drivers/spi/spi-zynqmp-gqspi.c
+@@ -799,7 +799,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
+ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
+ {
+ 	struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
+-	irqreturn_t ret = IRQ_NONE;
+ 	u32 status, mask, dma_status = 0;
+ 
+ 	status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
+@@ -814,27 +813,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
+ 				   dma_status);
+ 	}
+ 
+-	if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
++	if (!mask && !dma_status)
++		return IRQ_NONE;
++
++	if (mask & GQSPI_ISR_TXNOT_FULL_MASK)
+ 		zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
+-		ret = IRQ_HANDLED;
+-	}
+ 
+-	if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
++	if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK)
+ 		zynqmp_process_dma_irq(xqspi);
+-		ret = IRQ_HANDLED;
+-	} else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+-			(mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
++	else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
++			(mask & GQSPI_IER_GENFIFOEMPTY_MASK))
+ 		zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
+-		ret = IRQ_HANDLED;
+-	}
+ 
+ 	if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
+ 	    ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
+ 		zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ 		complete(&xqspi->data_completion);
+-		ret = IRQ_HANDLED;
+ 	}
+-	return ret;
++	return IRQ_HANDLED;
+ }
+ 
+ /**
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index 97787002080a18..1a9432646b70ae 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -588,29 +588,6 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
+ 	return 0;
+ }
+ 
+-int
+-vchiq_platform_init_state(struct vchiq_state *state)
+-{
+-	struct vchiq_arm_state *platform_state;
+-
+-	platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+-	if (!platform_state)
+-		return -ENOMEM;
+-
+-	rwlock_init(&platform_state->susp_res_lock);
+-
+-	init_completion(&platform_state->ka_evt);
+-	atomic_set(&platform_state->ka_use_count, 0);
+-	atomic_set(&platform_state->ka_use_ack_count, 0);
+-	atomic_set(&platform_state->ka_release_count, 0);
+-
+-	platform_state->state = state;
+-
+-	state->platform_state = (struct opaque_platform_state *)platform_state;
+-
+-	return 0;
+-}
+-
+ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
+ {
+ 	return (struct vchiq_arm_state *)state->platform_state;
+@@ -1358,6 +1335,39 @@ vchiq_keepalive_thread_func(void *v)
+ 	return 0;
+ }
+ 
++int
++vchiq_platform_init_state(struct vchiq_state *state)
++{
++	struct vchiq_arm_state *platform_state;
++	char threadname[16];
++
++	platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
++	if (!platform_state)
++		return -ENOMEM;
++
++	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
++		 state->id);
++	platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
++						   (void *)state, threadname);
++	if (IS_ERR(platform_state->ka_thread)) {
++		dev_err(state->dev, "couldn't create thread %s\n", threadname);
++		return PTR_ERR(platform_state->ka_thread);
++	}
++
++	rwlock_init(&platform_state->susp_res_lock);
++
++	init_completion(&platform_state->ka_evt);
++	atomic_set(&platform_state->ka_use_count, 0);
++	atomic_set(&platform_state->ka_use_ack_count, 0);
++	atomic_set(&platform_state->ka_release_count, 0);
++
++	platform_state->state = state;
++
++	state->platform_state = (struct opaque_platform_state *)platform_state;
++
++	return 0;
++}
++
+ int
+ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
+ 		   enum USE_TYPE_E use_type)
+@@ -1678,7 +1688,6 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
+ 				       enum vchiq_connstate newstate)
+ {
+ 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+-	char threadname[16];
+ 
+ 	dev_dbg(state->dev, "suspend: %d: %s->%s\n",
+ 		state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
+@@ -1693,17 +1702,7 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
+ 
+ 	arm_state->first_connect = 1;
+ 	write_unlock_bh(&arm_state->susp_res_lock);
+-	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+-		 state->id);
+-	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+-					      (void *)state,
+-					      threadname);
+-	if (IS_ERR(arm_state->ka_thread)) {
+-		dev_err(state->dev, "suspend: Couldn't create thread %s\n",
+-			threadname);
+-	} else {
+-		wake_up_process(arm_state->ka_thread);
+-	}
++	wake_up_process(arm_state->ka_thread);
+ }
+ 
+ static const struct of_device_id vchiq_of_match[] = {
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 6002283cbebabc..68bbdf3ee101db 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4317,8 +4317,8 @@ int iscsit_close_connection(
+ 	spin_unlock(&iscsit_global->ts_bitmap_lock);
+ 
+ 	iscsit_stop_timers_for_cmds(conn);
+-	iscsit_stop_nopin_response_timer(conn);
+ 	iscsit_stop_nopin_timer(conn);
++	iscsit_stop_nopin_response_timer(conn);
+ 
+ 	if (conn->conn_transport->iscsit_wait_conn)
+ 		conn->conn_transport->iscsit_wait_conn(conn);
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index 61c065702350e0..701dcbd7b63cf0 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -2151,8 +2151,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
+ 			if (descr->serv_action_valid)
+ 				return TCM_INVALID_CDB_FIELD;
+ 
+-			if (!descr->enabled || descr->enabled(descr, cmd))
++			if (!descr->enabled || descr->enabled(descr, cmd)) {
+ 				*opcode = descr;
++				return TCM_NO_SENSE;
++			}
+ 			break;
+ 		case 0x2:
+ 			/*
+@@ -2166,8 +2168,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
+ 			if (descr->serv_action_valid &&
+ 			    descr->service_action == requested_sa) {
+ 				if (!descr->enabled || descr->enabled(descr,
+-								      cmd))
++								      cmd)) {
+ 					*opcode = descr;
++					return TCM_NO_SENSE;
++				}
+ 			} else if (!descr->serv_action_valid)
+ 				return TCM_INVALID_CDB_FIELD;
+ 			break;
+@@ -2180,13 +2184,15 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
+ 			 */
+ 			if (descr->service_action == requested_sa)
+ 				if (!descr->enabled || descr->enabled(descr,
+-								      cmd))
++								      cmd)) {
+ 					*opcode = descr;
++					return TCM_NO_SENSE;
++				}
+ 			break;
+ 		}
+ 	}
+ 
+-	return 0;
++	return TCM_NO_SENSE;
+ }
+ 
+ static sense_reason_t
+diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
+index 65b33b56a9be5e..8c44f378b61ef1 100644
+--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
+@@ -329,6 +329,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
+ 	tj_max = intel_tcc_get_tjmax(cpu);
+ 	if (tj_max < 0)
+ 		return tj_max;
++	tj_max *= 1000;
+ 
+ 	zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL);
+ 	if (!zonedev)
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index 4b3225377e8f8f..3295b27ab70d2b 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -65,7 +65,6 @@
+ #define LVTS_HW_FILTER				0x0
+ #define LVTS_TSSEL_CONF				0x13121110
+ #define LVTS_CALSCALE_CONF			0x300
+-#define LVTS_MONINT_CONF			0x0300318C
+ 
+ #define LVTS_MONINT_OFFSET_SENSOR0		0xC
+ #define LVTS_MONINT_OFFSET_SENSOR1		0x180
+@@ -929,7 +928,7 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
+ 	 * The LVTS_MONINT register layout is the same as the LVTS_MONINTSTS
+ 	 * register, except we set the bits to enable the interrupt.
+ 	 */
+-	writel(LVTS_MONINT_CONF, LVTS_MONINT(lvts_ctrl->base));
++	writel(0, LVTS_MONINT(lvts_ctrl->base));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
+index 52e26be8c53df6..aed2729f63d06c 100644
+--- a/drivers/thermal/qoriq_thermal.c
++++ b/drivers/thermal/qoriq_thermal.c
+@@ -18,6 +18,7 @@
+ #define SITES_MAX		16
+ #define TMR_DISABLE		0x0
+ #define TMR_ME			0x80000000
++#define TMR_CMD			BIT(29)
+ #define TMR_ALPF		0x0c000000
+ #define TMR_ALPF_V2		0x03000000
+ #define TMTMIR_DEFAULT	0x0000000f
+@@ -356,6 +357,12 @@ static int qoriq_tmu_suspend(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	if (data->ver > TMU_VER1) {
++		ret = regmap_set_bits(data->regmap, REGS_TMR, TMR_CMD);
++		if (ret)
++			return ret;
++	}
++
+ 	clk_disable_unprepare(data->clk);
+ 
+ 	return 0;
+@@ -370,6 +377,12 @@ static int qoriq_tmu_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	if (data->ver > TMU_VER1) {
++		ret = regmap_clear_bits(data->regmap, REGS_TMR, TMR_CMD);
++		if (ret)
++			return ret;
++	}
++
+ 	/* Enable monitoring */
+ 	return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME);
+ }
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index eeb64433ebbca0..3488be7620674c 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -93,9 +93,11 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
+ 	if (ret)
+ 		goto err_nvm;
+ 
+-	ret = tb_nvm_add_non_active(nvm, nvm_write);
+-	if (ret)
+-		goto err_nvm;
++	if (!rt->no_nvm_upgrade) {
++		ret = tb_nvm_add_non_active(nvm, nvm_write);
++		if (ret)
++			goto err_nvm;
++	}
+ 
+ 	rt->nvm = nvm;
+ 	dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index c1376727642a71..05196799296522 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1657,7 +1657,7 @@ static void serial8250_disable_ms(struct uart_port *port)
+ 	if (up->bugs & UART_BUG_NOMSR)
+ 		return;
+ 
+-	mctrl_gpio_disable_ms(up->gpios);
++	mctrl_gpio_disable_ms_no_sync(up->gpios);
+ 
+ 	up->ier &= ~UART_IER_MSI;
+ 	serial_port_out(port, UART_IER, up->ier);
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 09b246c9e389ec..8bd39586a49f7f 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -700,7 +700,7 @@ static void atmel_disable_ms(struct uart_port *port)
+ 
+ 	atmel_port->ms_irq_enabled = false;
+ 
+-	mctrl_gpio_disable_ms(atmel_port->gpios);
++	mctrl_gpio_disable_ms_no_sync(atmel_port->gpios);
+ 
+ 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
+ 		idr |= ATMEL_US_CTSIC;
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 90974d338f3c0b..8e3b15534bc72c 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1596,7 +1596,7 @@ static void imx_uart_shutdown(struct uart_port *port)
+ 		imx_uart_dma_exit(sport);
+ 	}
+ 
+-	mctrl_gpio_disable_ms(sport->gpios);
++	mctrl_gpio_disable_ms_sync(sport->gpios);
+ 
+ 	uart_port_lock_irqsave(&sport->port, &flags);
+ 	ucr2 = imx_uart_readl(sport, UCR2);
+diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
+index 8855688a5b6c09..ca55bcc0b61119 100644
+--- a/drivers/tty/serial/serial_mctrl_gpio.c
++++ b/drivers/tty/serial/serial_mctrl_gpio.c
+@@ -322,11 +322,7 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
+ }
+ EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
+ 
+-/**
+- * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines
+- * @gpios: gpios to disable
+- */
+-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
++static void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios, bool sync)
+ {
+ 	enum mctrl_gpio_idx i;
+ 
+@@ -342,10 +338,34 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
+ 		if (!gpios->irq[i])
+ 			continue;
+ 
+-		disable_irq(gpios->irq[i]);
++		if (sync)
++			disable_irq(gpios->irq[i]);
++		else
++			disable_irq_nosync(gpios->irq[i]);
+ 	}
+ }
+-EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
++
++/**
++ * mctrl_gpio_disable_ms_sync - disable irqs and handling of changes to the ms
++ * lines, and wait for any pending IRQ to be processed
++ * @gpios: gpios to disable
++ */
++void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios)
++{
++	mctrl_gpio_disable_ms(gpios, true);
++}
++EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_sync);
++
++/**
++ * mctrl_gpio_disable_ms_no_sync - disable irqs and handling of changes to the
++ * ms lines, and return immediately
++ * @gpios: gpios to disable
++ */
++void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios)
++{
++	mctrl_gpio_disable_ms(gpios, false);
++}
++EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_no_sync);
+ 
+ void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios)
+ {
+diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
+index fc76910fb105a3..79e97838ebe567 100644
+--- a/drivers/tty/serial/serial_mctrl_gpio.h
++++ b/drivers/tty/serial/serial_mctrl_gpio.h
+@@ -87,9 +87,16 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios);
+ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios);
+ 
+ /*
+- * Disable gpio interrupts to report status line changes.
++ * Disable gpio interrupts to report status line changes, and block until
++ * any corresponding IRQ is processed
+  */
+-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios);
++void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios);
++
++/*
++ * Disable gpio interrupts to report status line changes, and return
++ * immediately
++ */
++void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios);
+ 
+ /*
+  * Enable gpio wakeup interrupts to enable wake up source.
+@@ -148,7 +155,11 @@ static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
+ {
+ }
+ 
+-static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
++static inline void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios)
++{
++}
++
++static inline void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios)
+ {
+ }
+ 
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index f43059e1b5c28e..76cf177b040ebe 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -104,6 +104,20 @@ struct plat_sci_reg {
+ 	u8 offset, size;
+ };
+ 
++struct sci_suspend_regs {
++	u16 scdl;
++	u16 sccks;
++	u16 scsmr;
++	u16 scscr;
++	u16 scfcr;
++	u16 scsptr;
++	u16 hssrr;
++	u16 scpcr;
++	u16 scpdr;
++	u8 scbrr;
++	u8 semr;
++};
++
+ struct sci_port_params {
+ 	const struct plat_sci_reg regs[SCIx_NR_REGS];
+ 	unsigned int fifosize;
+@@ -134,6 +148,8 @@ struct sci_port {
+ 	struct dma_chan			*chan_tx;
+ 	struct dma_chan			*chan_rx;
+ 
++	struct reset_control		*rstc;
++
+ #ifdef CONFIG_SERIAL_SH_SCI_DMA
+ 	struct dma_chan			*chan_tx_saved;
+ 	struct dma_chan			*chan_rx_saved;
+@@ -153,6 +169,7 @@ struct sci_port {
+ 	int				rx_trigger;
+ 	struct timer_list		rx_fifo_timer;
+ 	int				rx_fifo_timeout;
++	struct sci_suspend_regs		suspend_regs;
+ 	u16				hscif_tot;
+ 
+ 	bool has_rtscts;
+@@ -2297,7 +2314,7 @@ static void sci_shutdown(struct uart_port *port)
+ 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ 
+ 	s->autorts = false;
+-	mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
++	mctrl_gpio_disable_ms_sync(to_sci_port(port)->gpios);
+ 
+ 	uart_port_lock_irqsave(port, &flags);
+ 	sci_stop_rx(port);
+@@ -3384,6 +3401,7 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+ 	}
+ 
+ 	sp = &sci_ports[id];
++	sp->rstc = rstc;
+ 	*dev_id = id;
+ 
+ 	p->type = SCI_OF_TYPE(data);
+@@ -3532,13 +3550,77 @@ static int sci_probe(struct platform_device *dev)
+ 	return 0;
+ }
+ 
++static void sci_console_save(struct sci_port *s)
++{
++	struct sci_suspend_regs *regs = &s->suspend_regs;
++	struct uart_port *port = &s->port;
++
++	if (sci_getreg(port, SCDL)->size)
++		regs->scdl = sci_serial_in(port, SCDL);
++	if (sci_getreg(port, SCCKS)->size)
++		regs->sccks = sci_serial_in(port, SCCKS);
++	if (sci_getreg(port, SCSMR)->size)
++		regs->scsmr = sci_serial_in(port, SCSMR);
++	if (sci_getreg(port, SCSCR)->size)
++		regs->scscr = sci_serial_in(port, SCSCR);
++	if (sci_getreg(port, SCFCR)->size)
++		regs->scfcr = sci_serial_in(port, SCFCR);
++	if (sci_getreg(port, SCSPTR)->size)
++		regs->scsptr = sci_serial_in(port, SCSPTR);
++	if (sci_getreg(port, SCBRR)->size)
++		regs->scbrr = sci_serial_in(port, SCBRR);
++	if (sci_getreg(port, HSSRR)->size)
++		regs->hssrr = sci_serial_in(port, HSSRR);
++	if (sci_getreg(port, SCPCR)->size)
++		regs->scpcr = sci_serial_in(port, SCPCR);
++	if (sci_getreg(port, SCPDR)->size)
++		regs->scpdr = sci_serial_in(port, SCPDR);
++	if (sci_getreg(port, SEMR)->size)
++		regs->semr = sci_serial_in(port, SEMR);
++}
++
++static void sci_console_restore(struct sci_port *s)
++{
++	struct sci_suspend_regs *regs = &s->suspend_regs;
++	struct uart_port *port = &s->port;
++
++	if (sci_getreg(port, SCDL)->size)
++		sci_serial_out(port, SCDL, regs->scdl);
++	if (sci_getreg(port, SCCKS)->size)
++		sci_serial_out(port, SCCKS, regs->sccks);
++	if (sci_getreg(port, SCSMR)->size)
++		sci_serial_out(port, SCSMR, regs->scsmr);
++	if (sci_getreg(port, SCSCR)->size)
++		sci_serial_out(port, SCSCR, regs->scscr);
++	if (sci_getreg(port, SCFCR)->size)
++		sci_serial_out(port, SCFCR, regs->scfcr);
++	if (sci_getreg(port, SCSPTR)->size)
++		sci_serial_out(port, SCSPTR, regs->scsptr);
++	if (sci_getreg(port, SCBRR)->size)
++		sci_serial_out(port, SCBRR, regs->scbrr);
++	if (sci_getreg(port, HSSRR)->size)
++		sci_serial_out(port, HSSRR, regs->hssrr);
++	if (sci_getreg(port, SCPCR)->size)
++		sci_serial_out(port, SCPCR, regs->scpcr);
++	if (sci_getreg(port, SCPDR)->size)
++		sci_serial_out(port, SCPDR, regs->scpdr);
++	if (sci_getreg(port, SEMR)->size)
++		sci_serial_out(port, SEMR, regs->semr);
++}
++
+ static __maybe_unused int sci_suspend(struct device *dev)
+ {
+ 	struct sci_port *sport = dev_get_drvdata(dev);
+ 
+-	if (sport)
++	if (sport) {
+ 		uart_suspend_port(&sci_uart_driver, &sport->port);
+ 
++		if (!console_suspend_enabled && uart_console(&sport->port))
++			sci_console_save(sport);
++		else
++			return reset_control_assert(sport->rstc);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -3546,8 +3628,18 @@ static __maybe_unused int sci_resume(struct device *dev)
+ {
+ 	struct sci_port *sport = dev_get_drvdata(dev);
+ 
+-	if (sport)
++	if (sport) {
++		if (!console_suspend_enabled && uart_console(&sport->port)) {
++			sci_console_restore(sport);
++		} else {
++			int ret = reset_control_deassert(sport->rstc);
++
++			if (ret)
++				return ret;
++		}
++
+ 		uart_resume_port(&sci_uart_driver, &sport->port);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 9b9981352b1e1a..e685cace5c8540 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -944,7 +944,7 @@ static void stm32_usart_enable_ms(struct uart_port *port)
+ 
+ static void stm32_usart_disable_ms(struct uart_port *port)
+ {
+-	mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
++	mctrl_gpio_disable_ms_sync(to_stm32_port(port)->gpios);
+ }
+ 
+ /* Transmit stop */
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index a9b032d2f4a8db..247e425428c88b 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
+ 	  .model = UFS_ANY_MODEL,
+ 	  .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ 		   UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
++		   UFS_DEVICE_QUIRK_PA_HIBER8TIME |
+ 		   UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
+ 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
+ 	  .model = UFS_ANY_MODEL,
+@@ -8459,6 +8460,31 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+ 	return ret;
+ }
+ 
++/**
++ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
++ * @hba: per-adapter instance
++ *
++ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
++ * to ensure proper hibernation timing. This function retrieves the current
++ * PA_HIBERN8TIME value and increments it by 100us.
++ */
++static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
++{
++	u32 pa_h8time;
++	int ret;
++
++	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
++	if (ret) {
++		dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
++		return;
++	}
++
++	/* Increment by 1 to increase hibernation time by 100 µs */
++	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
++	if (ret)
++		dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
++}
++
+ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+ {
+ 	ufshcd_vops_apply_dev_quirks(hba);
+@@ -8469,6 +8495,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+ 
+ 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ 		ufshcd_quirk_tune_host_pa_tactivate(hba);
++
++	if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
++		ufshcd_quirk_override_pa_h8time(hba);
+ }
+ 
+ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 8c26275696df99..f9c51e0f2e37c6 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1959,7 +1959,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 	xhci->interrupters = NULL;
+ 
+ 	xhci->page_size = 0;
+-	xhci->page_shift = 0;
+ 	xhci->usb2_rhub.bus_state.bus_suspended = 0;
+ 	xhci->usb3_rhub.bus_state.bus_suspended = 0;
+ }
+@@ -2378,6 +2377,22 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
+ }
+ EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
+ 
++static void xhci_hcd_page_size(struct xhci_hcd *xhci)
++{
++	u32 page_size;
++
++	page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
++	if (!is_power_of_2(page_size)) {
++		xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
++		/* Fallback to 4K page size, since that's common */
++		page_size = 1;
++	}
++
++	xhci->page_size = page_size << 12;
++	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
++		       xhci->page_size >> 10);
++}
++
+ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ {
+ 	struct xhci_interrupter *ir;
+@@ -2385,7 +2400,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 	dma_addr_t	dma;
+ 	unsigned int	val, val2;
+ 	u64		val_64;
+-	u32		page_size, temp;
++	u32		temp;
+ 	int		i;
+ 
+ 	INIT_LIST_HEAD(&xhci->cmd_list);
+@@ -2394,20 +2409,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 	INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
+ 	init_completion(&xhci->cmd_ring_stop_completion);
+ 
+-	page_size = readl(&xhci->op_regs->page_size);
+-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+-			"Supported page size register = 0x%x", page_size);
+-	val = ffs(page_size) - 1;
+-	if (val < 16)
+-		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+-			"Supported page size of %iK", (1 << (val + 12)) / 1024);
+-	else
+-		xhci_warn(xhci, "WARN: no supported page size\n");
+-	/* Use 4K pages, since that's common and the minimum the HC supports */
+-	xhci->page_shift = 12;
+-	xhci->page_size = 1 << xhci->page_shift;
+-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+-			"HCD page size set to %iK", xhci->page_size / 1024);
++	xhci_hcd_page_size(xhci);
+ 
+ 	/*
+ 	 * Program the Number of Device Slots Enabled field in the CONFIG
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 3e70e4f6bf0832..fbc8419a547303 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1156,7 +1156,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ 	 */
+ 		switch (GET_EP_CTX_STATE(ep_ctx)) {
+ 		case EP_STATE_HALTED:
+-			xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
++			xhci_dbg(xhci, "Stop ep completion raced with stall\n");
++			/*
++			 * If the halt happened before Stop Endpoint failed, its transfer event
++			 * should have already been handled and Reset Endpoint should be pending.
++			 */
++			if (ep->ep_state & EP_HALTED)
++				goto reset_done;
++
+ 			if (ep->ep_state & EP_HAS_STREAMS) {
+ 				reset_type = EP_SOFT_RESET;
+ 			} else {
+@@ -1167,8 +1174,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ 			}
+ 			/* reset ep, reset handler cleans up cancelled tds */
+ 			err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
++			xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err);
+ 			if (err)
+ 				break;
++reset_done:
++			/* Reset EP handler will clean up cancelled TDs */
+ 			ep->ep_state &= ~EP_STOP_CMD_PENDING;
+ 			return;
+ 		case EP_STATE_STOPPED:
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 2a954efa53e80e..c4d5b90ef90a86 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -211,6 +211,9 @@ struct xhci_op_regs {
+ #define CONFIG_CIE		(1 << 9)
+ /* bits 10:31 - reserved and should be preserved */
+ 
++/* bits 15:0 - HCD page shift bit */
++#define XHCI_PAGE_SIZE_MASK     0xffff
++
+ /**
+  * struct xhci_intr_reg - Interrupt Register Set
+  * @irq_pending:	IMAN - Interrupt Management Register.  Used to enable
+@@ -1503,10 +1506,7 @@ struct xhci_hcd {
+ 	u16		max_interrupters;
+ 	/* imod_interval in ns (I * 250ns) */
+ 	u32		imod_interval;
+-	/* 4KB min, 128MB max */
+-	int		page_size;
+-	/* Valid values are 12 to 20, inclusive */
+-	int		page_shift;
++	u32		page_size;
+ 	/* MSI-X/MSI vectors */
+ 	int		nvecs;
+ 	/* optional clocks */
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 5f581e71e20105..76aedac37a788a 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -3884,6 +3884,9 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ 	ndev->mvdev.max_vqs = max_vqs;
+ 	mvdev = &ndev->mvdev;
+ 	mvdev->mdev = mdev;
++	/* cpu_to_mlx5vdpa16() below depends on this flag */
++	mvdev->actual_features =
++			(device_features & BIT_ULL(VIRTIO_F_VERSION_1));
+ 
+ 	ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
+ 	ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index ea2745c1ac5e68..8ea38e7421df4f 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -1813,7 +1813,8 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
+ 					cpu_to_le16(PCI_COMMAND_MEMORY);
+ 	}
+ 
+-	if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
++	if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx ||
++	    vdev->pdev->irq == IRQ_NOTCONNECTED)
+ 		vconfig[PCI_INTERRUPT_PIN] = 0;
+ 
+ 	ret = vfio_cap_init(vdev);
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index c9eaba2276365c..087c273a547fa9 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -727,15 +727,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
+ static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
+ {
+ 	if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
+-		u8 pin;
+-
+-		if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
+-		    vdev->nointx || vdev->pdev->is_virtfn)
+-			return 0;
+-
+-		pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
+-
+-		return pin ? 1 : 0;
++		return vdev->vconfig[PCI_INTERRUPT_PIN] ? 1 : 0;
+ 	} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
+ 		u8 pos;
+ 		u16 flags;
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 8382c583433565..565966351dfadc 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -259,7 +259,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
+ 	if (!is_irq_none(vdev))
+ 		return -EINVAL;
+ 
+-	if (!pdev->irq)
++	if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED)
+ 		return -ENODEV;
+ 
+ 	name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 35a03306d13454..38d243d914d00b 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -571,6 +571,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+ 	int ret;
+ 
+ 	llnode = llist_del_all(&svq->completion_list);
++
++	mutex_lock(&svq->vq.mutex);
++
+ 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
+ 		se_cmd = &cmd->tvc_se_cmd;
+ 
+@@ -604,6 +607,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+ 		vhost_scsi_release_cmd_res(se_cmd);
+ 	}
+ 
++	mutex_unlock(&svq->vq.mutex);
++
+ 	if (signal)
+ 		vhost_signal(&svq->vs->dev, &svq->vq);
+ }
+@@ -757,7 +762,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
+ 	size_t len = iov_iter_count(iter);
+ 	unsigned int nbytes = 0;
+ 	struct page *page;
+-	int i;
++	int i, ret;
+ 
+ 	if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
+ 		cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
+@@ -770,6 +775,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
+ 		page = alloc_page(GFP_KERNEL);
+ 		if (!page) {
+ 			i--;
++			ret = -ENOMEM;
+ 			goto err;
+ 		}
+ 
+@@ -777,8 +783,10 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
+ 		sg_set_page(&sg[i], page, nbytes, 0);
+ 
+ 		if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
+-		    copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
++		    copy_page_from_iter(page, 0, nbytes, iter) != nbytes) {
++			ret = -EFAULT;
+ 			goto err;
++		}
+ 
+ 		len -= nbytes;
+ 	}
+@@ -793,7 +801,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
+ 	for (; i >= 0; i--)
+ 		__free_page(sg_page(&sg[i]));
+ 	kfree(cmd->saved_iter_addr);
+-	return -ENOMEM;
++	return ret;
+ }
+ 
+ static int
+@@ -1277,9 +1285,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
+ 
+ 		if (data_direction != DMA_NONE) {
+-			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
+-						      &prot_iter, exp_data_len,
+-						      &data_iter))) {
++			ret = vhost_scsi_mapal(cmd, prot_bytes, &prot_iter,
++					       exp_data_len, &data_iter);
++			if (unlikely(ret)) {
+ 				vq_err(vq, "Failed to map iov to sgl\n");
+ 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ 				goto err;
+@@ -1346,8 +1354,11 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
+ 	else
+ 		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ 
++	mutex_lock(&tmf->svq->vq.mutex);
+ 	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
+ 				 tmf->vq_desc, &tmf->resp_iov, resp_code);
++	mutex_unlock(&tmf->svq->vq.mutex);
++
+ 	vhost_scsi_release_tmf_res(tmf);
+ }
+ 
+diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
+index 3ff1b2a8659e87..f9475c14f7339b 100644
+--- a/drivers/video/fbdev/core/bitblit.c
++++ b/drivers/video/fbdev/core/bitblit.c
+@@ -59,12 +59,11 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ }
+ 
+ static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
+-		      int sx, int height, int width)
++		      int sx, int height, int width, int fg, int bg)
+ {
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ 	struct fb_fillrect region;
+ 
+-	region.color = attr_bgcol_ec(bgshift, vc, info);
++	region.color = bg;
+ 	region.dx = sx * vc->vc_font.width;
+ 	region.dy = sy * vc->vc_font.height;
+ 	region.width = width * vc->vc_font.width;
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index e8b4e8c119b5ce..07d127110ca4c9 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1258,7 +1258,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ {
+ 	struct fb_info *info = fbcon_info_from_console(vc->vc_num);
+ 	struct fbcon_ops *ops = info->fbcon_par;
+-
++	int fg, bg;
+ 	struct fbcon_display *p = &fb_display[vc->vc_num];
+ 	u_int y_break;
+ 
+@@ -1279,16 +1279,18 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ 		fbcon_clear_margins(vc, 0);
+ 	}
+ 
++	fg = get_color(vc, info, vc->vc_video_erase_char, 1);
++	bg = get_color(vc, info, vc->vc_video_erase_char, 0);
+ 	/* Split blits that cross physical y_wrap boundary */
+ 
+ 	y_break = p->vrows - p->yscroll;
+ 	if (sy < y_break && sy + height - 1 >= y_break) {
+ 		u_int b = y_break - sy;
+-		ops->clear(vc, info, real_y(p, sy), sx, b, width);
++		ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg);
+ 		ops->clear(vc, info, real_y(p, sy + b), sx, height - b,
+-				 width);
++				 width, fg, bg);
+ 	} else
+-		ops->clear(vc, info, real_y(p, sy), sx, height, width);
++		ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg);
+ }
+ 
+ static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
+index df70ea5ec5b379..4d97e6d8a16a24 100644
+--- a/drivers/video/fbdev/core/fbcon.h
++++ b/drivers/video/fbdev/core/fbcon.h
+@@ -55,7 +55,7 @@ struct fbcon_ops {
+ 	void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
+ 		      int sx, int dy, int dx, int height, int width);
+ 	void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
+-		      int sx, int height, int width);
++		      int sx, int height, int width, int fb, int bg);
+ 	void (*putcs)(struct vc_data *vc, struct fb_info *info,
+ 		      const unsigned short *s, int count, int yy, int xx,
+ 		      int fg, int bg);
+@@ -116,42 +116,6 @@ static inline int mono_col(const struct fb_info *info)
+ 	return (~(0xfff << max_len)) & 0xff;
+ }
+ 
+-static inline int attr_col_ec(int shift, struct vc_data *vc,
+-			      struct fb_info *info, int is_fg)
+-{
+-	int is_mono01;
+-	int col;
+-	int fg;
+-	int bg;
+-
+-	if (!vc)
+-		return 0;
+-
+-	if (vc->vc_can_do_color)
+-		return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char)
+-			: attr_bgcol(shift,vc->vc_video_erase_char);
+-
+-	if (!info)
+-		return 0;
+-
+-	col = mono_col(info);
+-	is_mono01 = info->fix.visual == FB_VISUAL_MONO01;
+-
+-	if (attr_reverse(vc->vc_video_erase_char)) {
+-		fg = is_mono01 ? col : 0;
+-		bg = is_mono01 ? 0 : col;
+-	}
+-	else {
+-		fg = is_mono01 ? 0 : col;
+-		bg = is_mono01 ? col : 0;
+-	}
+-
+-	return is_fg ? fg : bg;
+-}
+-
+-#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
+-#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
+-
+     /*
+      *  Scroll Method
+      */
+diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
+index f9b794ff7d3968..89ef4ba7e8672b 100644
+--- a/drivers/video/fbdev/core/fbcon_ccw.c
++++ b/drivers/video/fbdev/core/fbcon_ccw.c
+@@ -78,14 +78,13 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ }
+ 
+ static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
+-		     int sx, int height, int width)
++		     int sx, int height, int width, int fg, int bg)
+ {
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 	struct fb_fillrect region;
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ 	u32 vyres = GETVYRES(ops->p, info);
+ 
+-	region.color = attr_bgcol_ec(bgshift,vc,info);
++	region.color = bg;
+ 	region.dx = sy * vc->vc_font.height;
+ 	region.dy = vyres - ((sx + width) * vc->vc_font.width);
+ 	region.height = width * vc->vc_font.width;
+diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
+index 903f6fc174e146..b9dac7940fb777 100644
+--- a/drivers/video/fbdev/core/fbcon_cw.c
++++ b/drivers/video/fbdev/core/fbcon_cw.c
+@@ -63,14 +63,13 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ }
+ 
+ static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
+-		     int sx, int height, int width)
++		     int sx, int height, int width, int fg, int bg)
+ {
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 	struct fb_fillrect region;
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ 	u32 vxres = GETVXRES(ops->p, info);
+ 
+-	region.color = attr_bgcol_ec(bgshift,vc,info);
++	region.color = bg;
+ 	region.dx = vxres - ((sy + height) * vc->vc_font.height);
+ 	region.dy = sx *  vc->vc_font.width;
+ 	region.height = width * vc->vc_font.width;
+diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
+index 594331936fd3cf..0af7913a2abdcc 100644
+--- a/drivers/video/fbdev/core/fbcon_ud.c
++++ b/drivers/video/fbdev/core/fbcon_ud.c
+@@ -64,15 +64,14 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ }
+ 
+ static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
+-		     int sx, int height, int width)
++		     int sx, int height, int width, int fg, int bg)
+ {
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 	struct fb_fillrect region;
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ 	u32 vyres = GETVYRES(ops->p, info);
+ 	u32 vxres = GETVXRES(ops->p, info);
+ 
+-	region.color = attr_bgcol_ec(bgshift,vc,info);
++	region.color = bg;
+ 	region.dy = vyres - ((sy + height) * vc->vc_font.height);
+ 	region.dx = vxres - ((sx + width) *  vc->vc_font.width);
+ 	region.width = width * vc->vc_font.width;
+diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
+index eff7ec4da1671f..d342b90c42b7fe 100644
+--- a/drivers/video/fbdev/core/tileblit.c
++++ b/drivers/video/fbdev/core/tileblit.c
+@@ -32,16 +32,14 @@ static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ }
+ 
+ static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
+-		       int sx, int height, int width)
++		       int sx, int height, int width, int fg, int bg)
+ {
+ 	struct fb_tilerect rect;
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+-	int fgshift = (vc->vc_hi_font_mask) ? 9 : 8;
+ 
+ 	rect.index = vc->vc_video_erase_char &
+ 		((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
+-	rect.fg = attr_fgcol_ec(fgshift, vc, info);
+-	rect.bg = attr_bgcol_ec(bgshift, vc, info);
++	rect.fg = fg;
++	rect.bg = bg;
+ 	rect.sx = sx;
+ 	rect.sy = sy;
+ 	rect.width = width;
+@@ -76,7 +74,42 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info,
+ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
+ 			       int color, int bottom_only)
+ {
+-	return;
++	unsigned int cw = vc->vc_font.width;
++	unsigned int ch = vc->vc_font.height;
++	unsigned int rw = info->var.xres - (vc->vc_cols*cw);
++	unsigned int bh = info->var.yres - (vc->vc_rows*ch);
++	unsigned int rs = info->var.xres - rw;
++	unsigned int bs = info->var.yres - bh;
++	unsigned int vwt = info->var.xres_virtual / cw;
++	unsigned int vht = info->var.yres_virtual / ch;
++	struct fb_tilerect rect;
++
++	rect.index = vc->vc_video_erase_char &
++		((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
++	rect.fg = color;
++	rect.bg = color;
++
++	if ((int) rw > 0 && !bottom_only) {
++		rect.sx = (info->var.xoffset + rs + cw - 1) / cw;
++		rect.sy = 0;
++		rect.width = (rw + cw - 1) / cw;
++		rect.height = vht;
++		if (rect.width + rect.sx > vwt)
++			rect.width = vwt - rect.sx;
++		if (rect.sx < vwt)
++			info->tileops->fb_tilefill(info, &rect);
++	}
++
++	if ((int) bh > 0) {
++		rect.sx = info->var.xoffset / cw;
++		rect.sy = (info->var.yoffset + bs) / ch;
++		rect.width = rs / cw;
++		rect.height = (bh + ch - 1) / ch;
++		if (rect.height + rect.sy > vht)
++			rect.height = vht - rect.sy;
++		if (rect.sy < vht)
++			info->tileops->fb_tilefill(info, &rect);
++	}
+ }
+ 
+ static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
+diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
+index 5ac8201c353378..b71d15794ce8b8 100644
+--- a/drivers/video/fbdev/fsl-diu-fb.c
++++ b/drivers/video/fbdev/fsl-diu-fb.c
+@@ -1827,6 +1827,7 @@ static void fsl_diu_remove(struct platform_device *pdev)
+ 	int i;
+ 
+ 	data = dev_get_drvdata(&pdev->dev);
++	device_remove_file(&pdev->dev, &data->dev_attr);
+ 	disable_lcdc(&data->fsl_diu_info[0]);
+ 
+ 	free_irq(data->irq, data->diu_reg);
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 1f8a322eb00be0..147926c8bae09d 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -2530,7 +2530,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+ 	struct vring_virtqueue *vq = to_vvq(_vq);
+ 
+ 	if (vq->event_triggered)
+-		vq->event_triggered = false;
++		data_race(vq->event_triggered = false);
+ 
+ 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
+ 				 virtqueue_enable_cb_delayed_split(_vq);
+diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
+index b4773a6aaf8cc7..837e15701c0e27 100644
+--- a/drivers/watchdog/aspeed_wdt.c
++++ b/drivers/watchdog/aspeed_wdt.c
+@@ -11,21 +11,30 @@
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/kstrtox.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include <linux/platform_device.h>
++#include <linux/regmap.h>
+ #include <linux/watchdog.h>
+ 
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ 				__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
++struct aspeed_wdt_scu {
++	const char *compatible;
++	u32 reset_status_reg;
++	u32 wdt_reset_mask;
++	u32 wdt_reset_mask_shift;
++};
+ 
+ struct aspeed_wdt_config {
+ 	u32 ext_pulse_width_mask;
+ 	u32 irq_shift;
+ 	u32 irq_mask;
++	struct aspeed_wdt_scu scu;
+ };
+ 
+ struct aspeed_wdt {
+@@ -39,18 +48,36 @@ static const struct aspeed_wdt_config ast2400_config = {
+ 	.ext_pulse_width_mask = 0xff,
+ 	.irq_shift = 0,
+ 	.irq_mask = 0,
++	.scu = {
++		.compatible = "aspeed,ast2400-scu",
++		.reset_status_reg = 0x3c,
++		.wdt_reset_mask = 0x1,
++		.wdt_reset_mask_shift = 1,
++	},
+ };
+ 
+ static const struct aspeed_wdt_config ast2500_config = {
+ 	.ext_pulse_width_mask = 0xfffff,
+ 	.irq_shift = 12,
+ 	.irq_mask = GENMASK(31, 12),
++	.scu = {
++		.compatible = "aspeed,ast2500-scu",
++		.reset_status_reg = 0x3c,
++		.wdt_reset_mask = 0x1,
++		.wdt_reset_mask_shift = 2,
++	},
+ };
+ 
+ static const struct aspeed_wdt_config ast2600_config = {
+ 	.ext_pulse_width_mask = 0xfffff,
+ 	.irq_shift = 0,
+ 	.irq_mask = GENMASK(31, 10),
++	.scu = {
++		.compatible = "aspeed,ast2600-scu",
++		.reset_status_reg = 0x74,
++		.wdt_reset_mask = 0xf,
++		.wdt_reset_mask_shift = 16,
++	},
+ };
+ 
+ static const struct of_device_id aspeed_wdt_of_table[] = {
+@@ -213,6 +240,56 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
+ 	return 0;
+ }
+ 
++static void aspeed_wdt_update_bootstatus(struct platform_device *pdev,
++					 struct aspeed_wdt *wdt)
++{
++	const struct resource *res;
++	struct aspeed_wdt_scu scu = wdt->cfg->scu;
++	struct regmap *scu_base;
++	u32 reset_mask_width;
++	u32 reset_mask_shift;
++	u32 idx = 0;
++	u32 status;
++	int ret;
++
++	if (!of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt")) {
++		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++		idx = ((intptr_t)wdt->base & 0x00000fff) / (uintptr_t)resource_size(res);
++	}
++
++	scu_base = syscon_regmap_lookup_by_compatible(scu.compatible);
++	if (IS_ERR(scu_base)) {
++		wdt->wdd.bootstatus = WDIOS_UNKNOWN;
++		return;
++	}
++
++	ret = regmap_read(scu_base, scu.reset_status_reg, &status);
++	if (ret) {
++		wdt->wdd.bootstatus = WDIOS_UNKNOWN;
++		return;
++	}
++
++	reset_mask_width = hweight32(scu.wdt_reset_mask);
++	reset_mask_shift = scu.wdt_reset_mask_shift +
++			   reset_mask_width * idx;
++
++	if (status & (scu.wdt_reset_mask << reset_mask_shift))
++		wdt->wdd.bootstatus = WDIOF_CARDRESET;
++
++	/* clear wdt reset event flag */
++	if (of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt") ||
++	    of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2500-wdt")) {
++		ret = regmap_read(scu_base, scu.reset_status_reg, &status);
++		if (!ret) {
++			status &= ~(scu.wdt_reset_mask << reset_mask_shift);
++			regmap_write(scu_base, scu.reset_status_reg, status);
++		}
++	} else {
++		regmap_write(scu_base, scu.reset_status_reg,
++			     scu.wdt_reset_mask << reset_mask_shift);
++	}
++}
++
+ /* access_cs0 shows if cs0 is accessible, hence the reverted bit */
+ static ssize_t access_cs0_show(struct device *dev,
+ 			       struct device_attribute *attr, char *buf)
+@@ -458,10 +535,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+ 		writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
+ 	}
+ 
++	aspeed_wdt_update_bootstatus(pdev, wdt);
++
+ 	status = readl(wdt->base + WDT_TIMEOUT_STATUS);
+ 	if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) {
+-		wdt->wdd.bootstatus = WDIOF_CARDRESET;
+-
+ 		if (of_device_is_compatible(np, "aspeed,ast2400-wdt") ||
+ 		    of_device_is_compatible(np, "aspeed,ast2500-wdt"))
+ 			wdt->wdd.groups = bswitch_groups;
+diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
+index 416f231809cb69..bfe07adb3e3a6c 100644
+--- a/drivers/xen/pci.c
++++ b/drivers/xen/pci.c
+@@ -43,6 +43,18 @@ static int xen_add_device(struct device *dev)
+ 		pci_mcfg_reserved = true;
+ 	}
+ #endif
++
++	if (pci_domain_nr(pci_dev->bus) >> 16) {
++		/*
++		 * The hypercall interface is limited to 16bit PCI segment
++		 * values, do not attempt to register devices with Xen in
++		 * segments greater or equal than 0x10000.
++		 */
++		dev_info(dev,
++			 "not registering with Xen: invalid PCI segment\n");
++		return 0;
++	}
++
+ 	if (pci_seg_supported) {
+ 		DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
+ 
+@@ -149,6 +161,16 @@ static int xen_remove_device(struct device *dev)
+ 	int r;
+ 	struct pci_dev *pci_dev = to_pci_dev(dev);
+ 
++	if (pci_domain_nr(pci_dev->bus) >> 16) {
++		/*
++		 * The hypercall interface is limited to 16bit PCI segment
++		 * values.
++		 */
++		dev_info(dev,
++			 "not unregistering with Xen: invalid PCI segment\n");
++		return 0;
++	}
++
+ 	if (pci_seg_supported) {
+ 		struct physdev_pci_device device = {
+ 			.seg = pci_domain_nr(pci_dev->bus),
+@@ -182,6 +204,16 @@ int xen_reset_device(const struct pci_dev *dev)
+ 		.flags = PCI_DEVICE_RESET_FLR,
+ 	};
+ 
++	if (pci_domain_nr(dev->bus) >> 16) {
++		/*
++		 * The hypercall interface is limited to 16bit PCI segment
++		 * values.
++		 */
++		dev_info(&dev->dev,
++			 "unable to notify Xen of device reset: invalid PCI segment\n");
++		return 0;
++	}
++
+ 	return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
+ }
+ EXPORT_SYMBOL_GPL(xen_reset_device);
+diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
+index 544d3f9010b92a..1db82da56db62b 100644
+--- a/drivers/xen/platform-pci.c
++++ b/drivers/xen/platform-pci.c
+@@ -26,6 +26,8 @@
+ 
+ #define DRV_NAME    "xen-platform-pci"
+ 
++#define PCI_DEVICE_ID_XEN_PLATFORM_XS61	0x0002
++
+ static unsigned long platform_mmio;
+ static unsigned long platform_mmio_alloc;
+ static unsigned long platform_mmiolen;
+@@ -174,6 +176,8 @@ static int platform_pci_probe(struct pci_dev *pdev,
+ static const struct pci_device_id platform_pci_tbl[] = {
+ 	{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
+ 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
++	{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ 	{0,}
+ };
+ 
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 6d32ffb0113650..86fe6e77905669 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -966,9 +966,15 @@ static int __init xenbus_init(void)
+ 	if (xen_pv_domain())
+ 		xen_store_domain_type = XS_PV;
+ 	if (xen_hvm_domain())
++	{
+ 		xen_store_domain_type = XS_HVM;
+-	if (xen_hvm_domain() && xen_initial_domain())
+-		xen_store_domain_type = XS_LOCAL;
++		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
++		if (err)
++			goto out_error;
++		xen_store_evtchn = (int)v;
++		if (!v && xen_initial_domain())
++			xen_store_domain_type = XS_LOCAL;
++	}
+ 	if (xen_pv_domain() && !xen_start_info->store_evtchn)
+ 		xen_store_domain_type = XS_LOCAL;
+ 	if (xen_pv_domain() && xen_start_info->store_evtchn)
+@@ -987,10 +993,6 @@ static int __init xenbus_init(void)
+ 		xen_store_interface = gfn_to_virt(xen_store_gfn);
+ 		break;
+ 	case XS_HVM:
+-		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+-		if (err)
+-			goto out_error;
+-		xen_store_evtchn = (int)v;
+ 		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ 		if (err)
+ 			goto out_error;
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 4423d8b716a58f..aa8656c8b7e7e7 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1891,6 +1891,17 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 			up_write(&space_info->groups_sem);
+ 			goto next;
+ 		}
++
++		/*
++		 * Cache the zone_unusable value before turning the block group
++		 * to read only. As soon as the block group is read only it's
++		 * zone_unusable value gets moved to the block group's read-only
++		 * bytes and isn't available for calculations anymore. We also
++		 * cache it before unlocking the block group, to prevent races
++		 * (reports from KCSAN and such tools) with tasks updating it.
++		 */
++		zone_unusable = bg->zone_unusable;
++
+ 		spin_unlock(&bg->lock);
+ 		spin_unlock(&space_info->lock);
+ 
+@@ -1907,13 +1918,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 			goto next;
+ 		}
+ 
+-		/*
+-		 * Cache the zone_unusable value before turning the block group
+-		 * to read only. As soon as the blog group is read only it's
+-		 * zone_unusable value gets moved to the block group's read-only
+-		 * bytes and isn't available for calculations anymore.
+-		 */
+-		zone_unusable = bg->zone_unusable;
+ 		ret = inc_block_group_ro(bg, 0);
+ 		up_write(&space_info->groups_sem);
+ 		if (ret < 0)
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 40332ab62f1018..65d883da86c60b 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -606,7 +606,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
+ 	free_extent_map(em);
+ 
+ 	cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
+-	cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
++	cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct folio *), GFP_NOFS);
+ 	if (!cb->compressed_folios) {
+ 		ret = BLK_STS_RESOURCE;
+ 		goto out_free_bio;
+diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
+index e9cdc1759dada8..de23c4b3515e58 100644
+--- a/fs/btrfs/discard.c
++++ b/fs/btrfs/discard.c
+@@ -168,13 +168,7 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 	block_group->discard_eligible_time = 0;
+ 	queued = !list_empty(&block_group->discard_list);
+ 	list_del_init(&block_group->discard_list);
+-	/*
+-	 * If the block group is currently running in the discard workfn, we
+-	 * don't want to deref it, since it's still being used by the workfn.
+-	 * The workfn will notice this case and deref the block group when it is
+-	 * finished.
+-	 */
+-	if (queued && !running)
++	if (queued)
+ 		btrfs_put_block_group(block_group);
+ 
+ 	spin_unlock(&discard_ctl->lock);
+@@ -273,9 +267,10 @@ static struct btrfs_block_group *peek_discard_list(
+ 			block_group->discard_cursor = block_group->start;
+ 			block_group->discard_state = BTRFS_DISCARD_EXTENTS;
+ 		}
+-		discard_ctl->block_group = block_group;
+ 	}
+ 	if (block_group) {
++		btrfs_get_block_group(block_group);
++		discard_ctl->block_group = block_group;
+ 		*discard_state = block_group->discard_state;
+ 		*discard_index = block_group->discard_index;
+ 	}
+@@ -506,9 +501,20 @@ static void btrfs_discard_workfn(struct work_struct *work)
+ 
+ 	block_group = peek_discard_list(discard_ctl, &discard_state,
+ 					&discard_index, now);
+-	if (!block_group || !btrfs_run_discard_work(discard_ctl))
++	if (!block_group)
+ 		return;
++	if (!btrfs_run_discard_work(discard_ctl)) {
++		spin_lock(&discard_ctl->lock);
++		btrfs_put_block_group(block_group);
++		discard_ctl->block_group = NULL;
++		spin_unlock(&discard_ctl->lock);
++		return;
++	}
+ 	if (now < block_group->discard_eligible_time) {
++		spin_lock(&discard_ctl->lock);
++		btrfs_put_block_group(block_group);
++		discard_ctl->block_group = NULL;
++		spin_unlock(&discard_ctl->lock);
+ 		btrfs_discard_schedule_work(discard_ctl, false);
+ 		return;
+ 	}
+@@ -560,15 +566,7 @@ static void btrfs_discard_workfn(struct work_struct *work)
+ 	spin_lock(&discard_ctl->lock);
+ 	discard_ctl->prev_discard = trimmed;
+ 	discard_ctl->prev_discard_time = now;
+-	/*
+-	 * If the block group was removed from the discard list while it was
+-	 * running in this workfn, then we didn't deref it, since this function
+-	 * still owned that reference. But we set the discard_ctl->block_group
+-	 * back to NULL, so we can use that condition to know that now we need
+-	 * to deref the block_group.
+-	 */
+-	if (discard_ctl->block_group == NULL)
+-		btrfs_put_block_group(block_group);
++	btrfs_put_block_group(block_group);
+ 	discard_ctl->block_group = NULL;
+ 	__btrfs_discard_schedule_work(discard_ctl, now, false);
+ 	spin_unlock(&discard_ctl->lock);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 19e5f8eaae772d..147c50ef912acf 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4254,6 +4254,14 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ 	/* clear out the rbtree of defraggable inodes */
+ 	btrfs_cleanup_defrag_inodes(fs_info);
+ 
++	/*
++	 * Handle the error fs first, as it will flush and wait for all ordered
++	 * extents.  This will generate delayed iputs, thus we want to handle
++	 * it first.
++	 */
++	if (unlikely(BTRFS_FS_ERROR(fs_info)))
++		btrfs_error_commit_super(fs_info);
++
+ 	/*
+ 	 * Wait for any fixup workers to complete.
+ 	 * If we don't wait for them here and they are still running by the time
+@@ -4274,6 +4282,19 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ 	 */
+ 	btrfs_flush_workqueue(fs_info->delalloc_workers);
+ 
++	/*
++	 * We can have ordered extents getting their last reference dropped from
++	 * the fs_info->workers queue because for async writes for data bios we
++	 * queue a work for that queue, at btrfs_wq_submit_bio(), that runs
++	 * run_one_async_done() which calls btrfs_bio_end_io() in case the bio
++	 * has an error, and that later function can do the final
++	 * btrfs_put_ordered_extent() on the ordered extent attached to the bio,
++	 * which adds a delayed iput for the inode. So we must flush the queue
++	 * so that we don't have delayed iputs after committing the current
++	 * transaction below and stopping the cleaner and transaction kthreads.
++	 */
++	btrfs_flush_workqueue(fs_info->workers);
++
+ 	/*
+ 	 * When finishing a compressed write bio we schedule a work queue item
+ 	 * to finish an ordered extent - btrfs_finish_compressed_write_work()
+@@ -4343,9 +4364,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ 			btrfs_err(fs_info, "commit super ret %d", ret);
+ 	}
+ 
+-	if (BTRFS_FS_ERROR(fs_info))
+-		btrfs_error_commit_super(fs_info);
+-
+ 	kthread_stop(fs_info->transaction_kthread);
+ 	kthread_stop(fs_info->cleaner_kthread);
+ 
+@@ -4468,10 +4486,6 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
+ 	/* cleanup FS via transaction */
+ 	btrfs_cleanup_transaction(fs_info);
+ 
+-	mutex_lock(&fs_info->cleaner_mutex);
+-	btrfs_run_delayed_iputs(fs_info);
+-	mutex_unlock(&fs_info->cleaner_mutex);
+-
+ 	down_write(&fs_info->cleanup_work_sem);
+ 	up_write(&fs_info->cleanup_work_sem);
+ }
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index e263d4b0546fa2..d322cf82783f9d 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2826,10 +2826,10 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
+ 	return eb;
+ }
+ 
+-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
+ 					u64 start)
+ {
++#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ 	struct extent_buffer *eb, *exists = NULL;
+ 	int ret;
+ 
+@@ -2865,8 +2865,11 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
+ free_eb:
+ 	btrfs_release_extent_buffer(eb);
+ 	return exists;
+-}
++#else
++	/* Stub to avoid linker error when compiled with optimizations turned off. */
++	return NULL;
+ #endif
++}
+ 
+ static struct extent_buffer *grab_extent_buffer(
+ 		struct btrfs_fs_info *fs_info, struct page *page)
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index 8a36117ed4532b..fcb60837d7dc62 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -293,6 +293,8 @@ static inline int num_extent_pages(const struct extent_buffer *eb)
+  */
+ static inline int num_extent_folios(const struct extent_buffer *eb)
+ {
++	if (!eb->folios[0])
++		return 0;
+ 	if (folio_order(eb->folios[0]))
+ 		return 1;
+ 	return num_extent_pages(eb);
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index c73a41b1ad5607..d8fcc3eb85c88a 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1541,8 +1541,8 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ 	u64 extent_gen;
+ 	int ret;
+ 
+-	if (unlikely(!extent_root)) {
+-		btrfs_err(fs_info, "no valid extent root for scrub");
++	if (unlikely(!extent_root || !csum_root)) {
++		btrfs_err(fs_info, "no valid extent or csum root for scrub");
+ 		return -EUCLEAN;
+ 	}
+ 	memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index b1015f383f75ef..c843b4aefb8ac2 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -487,10 +487,8 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
+ 	if (p->buf_len >= len)
+ 		return 0;
+ 
+-	if (len > PATH_MAX) {
+-		WARN_ON(1);
+-		return -ENOMEM;
+-	}
++	if (WARN_ON(len > PATH_MAX))
++		return -ENAMETOOLONG;
+ 
+ 	path_len = p->end - p->start;
+ 	old_buf_len = p->buf_len;
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 32bd0f4c422360..e9e84512a027af 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -176,18 +176,8 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
+ }
+ EXPORT_SYMBOL(end_buffer_write_sync);
+ 
+-/*
+- * Various filesystems appear to want __find_get_block to be non-blocking.
+- * But it's the page lock which protects the buffers.  To get around this,
+- * we get exclusion from try_to_free_buffers with the blockdev mapping's
+- * i_private_lock.
+- *
+- * Hack idea: for the blockdev mapping, i_private_lock contention
+- * may be quite high.  This code could TryLock the page, and if that
+- * succeeds, there is no need to take i_private_lock.
+- */
+ static struct buffer_head *
+-__find_get_block_slow(struct block_device *bdev, sector_t block)
++__find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
+ {
+ 	struct address_space *bd_mapping = bdev->bd_mapping;
+ 	const int blkbits = bd_mapping->host->i_blkbits;
+@@ -204,7 +194,16 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
+ 	if (IS_ERR(folio))
+ 		goto out;
+ 
+-	spin_lock(&bd_mapping->i_private_lock);
++	/*
++	 * Folio lock protects the buffers. Callers that cannot block
++	 * will fallback to serializing vs try_to_free_buffers() via
++	 * the i_private_lock.
++	 */
++	if (atomic)
++		spin_lock(&bd_mapping->i_private_lock);
++	else
++		folio_lock(folio);
++
+ 	head = folio_buffers(folio);
+ 	if (!head)
+ 		goto out_unlock;
+@@ -236,7 +235,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
+ 		       1 << blkbits);
+ 	}
+ out_unlock:
+-	spin_unlock(&bd_mapping->i_private_lock);
++	if (atomic)
++		spin_unlock(&bd_mapping->i_private_lock);
++	else
++		folio_unlock(folio);
+ 	folio_put(folio);
+ out:
+ 	return ret;
+@@ -656,7 +658,9 @@ EXPORT_SYMBOL(generic_buffers_fsync);
+ void write_boundary_block(struct block_device *bdev,
+ 			sector_t bblock, unsigned blocksize)
+ {
+-	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
++	struct buffer_head *bh;
++
++	bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
+ 	if (bh) {
+ 		if (buffer_dirty(bh))
+ 			write_dirty_buffer(bh, 0);
+@@ -1394,14 +1398,15 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
+  * it in the LRU and mark it as accessed.  If it is not present then return
+  * NULL
+  */
+-struct buffer_head *
+-__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
++static struct buffer_head *
++find_get_block_common(struct block_device *bdev, sector_t block,
++			unsigned size, bool atomic)
+ {
+ 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
+ 
+ 	if (bh == NULL) {
+ 		/* __find_get_block_slow will mark the page accessed */
+-		bh = __find_get_block_slow(bdev, block);
++		bh = __find_get_block_slow(bdev, block, atomic);
+ 		if (bh)
+ 			bh_lru_install(bh);
+ 	} else
+@@ -1409,8 +1414,23 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+ 
+ 	return bh;
+ }
++
++struct buffer_head *
++__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
++{
++	return find_get_block_common(bdev, block, size, true);
++}
+ EXPORT_SYMBOL(__find_get_block);
+ 
++/* same as __find_get_block() but allows sleeping contexts */
++struct buffer_head *
++__find_get_block_nonatomic(struct block_device *bdev, sector_t block,
++			   unsigned size)
++{
++	return find_get_block_common(bdev, block, size, false);
++}
++EXPORT_SYMBOL(__find_get_block_nonatomic);
++
+ /**
+  * bdev_getblk - Get a buffer_head in a block device's buffer cache.
+  * @bdev: The block device.
+@@ -1428,7 +1448,12 @@ EXPORT_SYMBOL(__find_get_block);
+ struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
+ 		unsigned size, gfp_t gfp)
+ {
+-	struct buffer_head *bh = __find_get_block(bdev, block, size);
++	struct buffer_head *bh;
++
++	if (gfpflags_allow_blocking(gfp))
++		bh = __find_get_block_nonatomic(bdev, block, size);
++	else
++		bh = __find_get_block(bdev, block, size);
+ 
+ 	might_alloc(gfp);
+ 	if (bh)
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index f2d88a3581695a..10461451185e82 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1826,8 +1826,8 @@ static int dlm_tcp_listen_validate(void)
+ {
+ 	/* We don't support multi-homed hosts */
+ 	if (dlm_local_count > 1) {
+-		log_print("TCP protocol can't handle multi-homed hosts, try SCTP");
+-		return -EINVAL;
++		log_print("Detect multi-homed hosts but use only the first IP address.");
++		log_print("Try SCTP, if you want to enable multi-link.");
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index edbabb3256c9ac..2c11e8f3048e94 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -453,6 +453,7 @@ int __init erofs_init_shrinker(void);
+ void erofs_exit_shrinker(void);
+ int __init z_erofs_init_subsystem(void);
+ void z_erofs_exit_subsystem(void);
++int z_erofs_init_super(struct super_block *sb);
+ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
+ 				  unsigned long nr_shrink);
+ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+@@ -462,7 +463,6 @@ void z_erofs_put_gbuf(void *ptr);
+ int z_erofs_gbuf_growsize(unsigned int nrpages);
+ int __init z_erofs_gbuf_init(void);
+ void z_erofs_gbuf_exit(void);
+-int erofs_init_managed_cache(struct super_block *sb);
+ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
+ #else
+ static inline void erofs_shrinker_register(struct super_block *sb) {}
+@@ -471,7 +471,7 @@ static inline int erofs_init_shrinker(void) { return 0; }
+ static inline void erofs_exit_shrinker(void) {}
+ static inline int z_erofs_init_subsystem(void) { return 0; }
+ static inline void z_erofs_exit_subsystem(void) {}
+-static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
++static inline int z_erofs_init_super(struct super_block *sb) { return 0; }
+ #endif	/* !CONFIG_EROFS_FS_ZIP */
+ 
+ #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 5b279977c9d5d6..3421448fef0e3e 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -664,9 +664,16 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	else
+ 		sb->s_flags &= ~SB_POSIXACL;
+ 
+-#ifdef CONFIG_EROFS_FS_ZIP
+-	xa_init(&sbi->managed_pslots);
+-#endif
++	err = z_erofs_init_super(sb);
++	if (err)
++		return err;
++
++	if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
++		inode = erofs_iget(sb, sbi->packed_nid);
++		if (IS_ERR(inode))
++			return PTR_ERR(inode);
++		sbi->packed_inode = inode;
++	}
+ 
+ 	inode = erofs_iget(sb, sbi->root_nid);
+ 	if (IS_ERR(inode))
+@@ -678,24 +685,11 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		iput(inode);
+ 		return -EINVAL;
+ 	}
+-
+ 	sb->s_root = d_make_root(inode);
+ 	if (!sb->s_root)
+ 		return -ENOMEM;
+ 
+ 	erofs_shrinker_register(sb);
+-	if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
+-		sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
+-		if (IS_ERR(sbi->packed_inode)) {
+-			err = PTR_ERR(sbi->packed_inode);
+-			sbi->packed_inode = NULL;
+-			return err;
+-		}
+-	}
+-	err = erofs_init_managed_cache(sb);
+-	if (err)
+-		return err;
+-
+ 	err = erofs_xattr_prefixes_init(sb);
+ 	if (err)
+ 		return err;
+@@ -831,6 +825,16 @@ static int erofs_init_fs_context(struct fs_context *fc)
+ 	return 0;
+ }
+ 
++static void erofs_drop_internal_inodes(struct erofs_sb_info *sbi)
++{
++	iput(sbi->packed_inode);
++	sbi->packed_inode = NULL;
++#ifdef CONFIG_EROFS_FS_ZIP
++	iput(sbi->managed_cache);
++	sbi->managed_cache = NULL;
++#endif
++}
++
+ static void erofs_kill_sb(struct super_block *sb)
+ {
+ 	struct erofs_sb_info *sbi = EROFS_SB(sb);
+@@ -840,6 +844,7 @@ static void erofs_kill_sb(struct super_block *sb)
+ 		kill_anon_super(sb);
+ 	else
+ 		kill_block_super(sb);
++	erofs_drop_internal_inodes(sbi);
+ 	fs_put_dax(sbi->dif0.dax_dev, NULL);
+ 	erofs_fscache_unregister_fs(sb);
+ 	erofs_sb_free(sbi);
+@@ -850,17 +855,10 @@ static void erofs_put_super(struct super_block *sb)
+ {
+ 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ 
+-	DBG_BUGON(!sbi);
+-
+ 	erofs_unregister_sysfs(sb);
+ 	erofs_shrinker_unregister(sb);
+ 	erofs_xattr_prefixes_cleanup(sb);
+-#ifdef CONFIG_EROFS_FS_ZIP
+-	iput(sbi->managed_cache);
+-	sbi->managed_cache = NULL;
+-#endif
+-	iput(sbi->packed_inode);
+-	sbi->packed_inode = NULL;
++	erofs_drop_internal_inodes(sbi);
+ 	erofs_free_dev_context(sbi->devs);
+ 	sbi->devs = NULL;
+ 	erofs_fscache_unregister_fs(sb);
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index e5e94afc5af88a..74521d7dbee1d8 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -663,18 +663,18 @@ static const struct address_space_operations z_erofs_cache_aops = {
+ 	.invalidate_folio = z_erofs_cache_invalidate_folio,
+ };
+ 
+-int erofs_init_managed_cache(struct super_block *sb)
++int z_erofs_init_super(struct super_block *sb)
+ {
+ 	struct inode *const inode = new_inode(sb);
+ 
+ 	if (!inode)
+ 		return -ENOMEM;
+-
+ 	set_nlink(inode, 1);
+ 	inode->i_size = OFFSET_MAX;
+ 	inode->i_mapping->a_ops = &z_erofs_cache_aops;
+ 	mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
+ 	EROFS_SB(sb)->managed_cache = inode;
++	xa_init(&EROFS_SB(sb)->managed_pslots);
+ 	return 0;
+ }
+ 
+diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
+index 3801516ac50716..cc4dcb7a32b57d 100644
+--- a/fs/exfat/inode.c
++++ b/fs/exfat/inode.c
+@@ -274,9 +274,11 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
+ 	sector_t last_block;
+ 	sector_t phys = 0;
+ 	sector_t valid_blks;
++	loff_t i_size;
+ 
+ 	mutex_lock(&sbi->s_lock);
+-	last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
++	i_size = i_size_read(inode);
++	last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size, sb);
+ 	if (iblock >= last_block && !create)
+ 		goto done;
+ 
+@@ -305,102 +307,95 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
+ 	if (buffer_delay(bh_result))
+ 		clear_buffer_delay(bh_result);
+ 
+-	if (create) {
++	/*
++	 * In most cases, we just need to set bh_result to mapped, unmapped
++	 * or new status as follows:
++	 *  1. i_size == valid_size
++	 *  2. write case (create == 1)
++	 *  3. direct_read (!bh_result->b_folio)
++	 *     -> the unwritten part will be zeroed in exfat_direct_IO()
++	 *
++	 * Otherwise, in the case of buffered read, it is necessary to take
++	 * care the last nested block if valid_size is not equal to i_size.
++	 */
++	if (i_size == ei->valid_size || create || !bh_result->b_folio)
+ 		valid_blks = EXFAT_B_TO_BLK_ROUND_UP(ei->valid_size, sb);
++	else
++		valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
+ 
+-		if (iblock + max_blocks < valid_blks) {
+-			/* The range has been written, map it */
+-			goto done;
+-		} else if (iblock < valid_blks) {
+-			/*
+-			 * The range has been partially written,
+-			 * map the written part.
+-			 */
+-			max_blocks = valid_blks - iblock;
+-			goto done;
+-		}
++	/* The range has been fully written, map it */
++	if (iblock + max_blocks < valid_blks)
++		goto done;
+ 
+-		/* The area has not been written, map and mark as new. */
+-		set_buffer_new(bh_result);
++	/* The range has been partially written, map the written part */
++	if (iblock < valid_blks) {
++		max_blocks = valid_blks - iblock;
++		goto done;
++	}
+ 
++	/* The area has not been written, map and mark as new for create case */
++	if (create) {
++		set_buffer_new(bh_result);
+ 		ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb);
+ 		mark_inode_dirty(inode);
+-	} else {
+-		valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
++		goto done;
++	}
+ 
+-		if (iblock + max_blocks < valid_blks) {
+-			/* The range has been written, map it */
+-			goto done;
+-		} else if (iblock < valid_blks) {
+-			/*
+-			 * The area has been partially written,
+-			 * map the written part.
+-			 */
+-			max_blocks = valid_blks - iblock;
++	/*
++	 * The area has just one block partially written.
++	 * In that case, we should read and fill the unwritten part of
++	 * a block with zero.
++	 */
++	if (bh_result->b_folio && iblock == valid_blks &&
++	    (ei->valid_size & (sb->s_blocksize - 1))) {
++		loff_t size, pos;
++		void *addr;
++
++		max_blocks = 1;
++
++		/*
++		 * No buffer_head is allocated.
++		 * (1) bmap: It's enough to set blocknr without I/O.
++		 * (2) read: The unwritten part should be filled with zero.
++		 *           If a folio does not have any buffers,
++		 *           let's returns -EAGAIN to fallback to
++		 *           block_read_full_folio() for per-bh IO.
++		 */
++		if (!folio_buffers(bh_result->b_folio)) {
++			err = -EAGAIN;
+ 			goto done;
+-		} else if (iblock == valid_blks &&
+-			   (ei->valid_size & (sb->s_blocksize - 1))) {
+-			/*
+-			 * The block has been partially written,
+-			 * zero the unwritten part and map the block.
+-			 */
+-			loff_t size, pos;
+-			void *addr;
+-
+-			max_blocks = 1;
+-
+-			/*
+-			 * For direct read, the unwritten part will be zeroed in
+-			 * exfat_direct_IO()
+-			 */
+-			if (!bh_result->b_folio)
+-				goto done;
+-
+-			/*
+-			 * No buffer_head is allocated.
+-			 * (1) bmap: It's enough to fill bh_result without I/O.
+-			 * (2) read: The unwritten part should be filled with 0
+-			 *           If a folio does not have any buffers,
+-			 *           let's returns -EAGAIN to fallback to
+-			 *           per-bh IO like block_read_full_folio().
+-			 */
+-			if (!folio_buffers(bh_result->b_folio)) {
+-				err = -EAGAIN;
+-				goto done;
+-			}
++		}
+ 
+-			pos = EXFAT_BLK_TO_B(iblock, sb);
+-			size = ei->valid_size - pos;
+-			addr = folio_address(bh_result->b_folio) +
+-			       offset_in_folio(bh_result->b_folio, pos);
++		pos = EXFAT_BLK_TO_B(iblock, sb);
++		size = ei->valid_size - pos;
++		addr = folio_address(bh_result->b_folio) +
++			offset_in_folio(bh_result->b_folio, pos);
+ 
+-			/* Check if bh->b_data points to proper addr in folio */
+-			if (bh_result->b_data != addr) {
+-				exfat_fs_error_ratelimit(sb,
++		/* Check if bh->b_data points to proper addr in folio */
++		if (bh_result->b_data != addr) {
++			exfat_fs_error_ratelimit(sb,
+ 					"b_data(%p) != folio_addr(%p)",
+ 					bh_result->b_data, addr);
+-				err = -EINVAL;
+-				goto done;
+-			}
+-
+-			/* Read a block */
+-			err = bh_read(bh_result, 0);
+-			if (err < 0)
+-				goto done;
++			err = -EINVAL;
++			goto done;
++		}
+ 
+-			/* Zero unwritten part of a block */
+-			memset(bh_result->b_data + size, 0,
+-			       bh_result->b_size - size);
++		/* Read a block */
++		err = bh_read(bh_result, 0);
++		if (err < 0)
++			goto done;
+ 
+-			err = 0;
+-		} else {
+-			/*
+-			 * The range has not been written, clear the mapped flag
+-			 * to only zero the cache and do not read from disk.
+-			 */
+-			clear_buffer_mapped(bh_result);
+-		}
++		/* Zero unwritten part of a block */
++		memset(bh_result->b_data + size, 0, bh_result->b_size - size);
++		err = 0;
++		goto done;
+ 	}
++
++	/*
++	 * The area has not been written, clear mapped for read/bmap cases.
++	 * If so, it will be filled with zero without reading from disk.
++	 */
++	clear_buffer_mapped(bh_result);
+ done:
+ 	bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb);
+ 	if (err < 0)
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 8042ad87380897..c48fd36b2d74c0 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -649,8 +649,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+ 	/* Hm, nope.  Are (enough) root reserved clusters available? */
+ 	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
+ 	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
+-	    capable(CAP_SYS_RESOURCE) ||
+-	    (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
++	    (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
++	    capable(CAP_SYS_RESOURCE)) {
+ 
+ 		if (free_clusters >= (nclusters + dirty_clusters +
+ 				      resv_clusters))
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index bbffb76d9a9049..c2e6989a568c2a 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -278,7 +278,8 @@ struct ext4_system_blocks {
+ /*
+  * Flags for ext4_io_end->flags
+  */
+-#define	EXT4_IO_END_UNWRITTEN	0x0001
++#define EXT4_IO_END_UNWRITTEN	0x0001
++#define EXT4_IO_END_FAILED	0x0002
+ 
+ struct ext4_io_end_vec {
+ 	struct list_head list;		/* list of io_end_vec */
+@@ -3012,6 +3013,8 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
+ extern int ext4_can_truncate(struct inode *inode);
+ extern int ext4_truncate(struct inode *);
+ extern int ext4_break_layouts(struct inode *);
++extern int ext4_truncate_page_cache_block_range(struct inode *inode,
++						loff_t start, loff_t end);
+ extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
+ extern void ext4_set_inode_flags(struct inode *, bool init);
+ extern int ext4_alloc_da_blocks(struct inode *inode);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 60909af2d4a537..ba3419958a8320 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4667,22 +4667,13 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 			goto out_mutex;
+ 		}
+ 
+-		/*
+-		 * For journalled data we need to write (and checkpoint) pages
+-		 * before discarding page cache to avoid inconsitent data on
+-		 * disk in case of crash before zeroing trans is committed.
+-		 */
+-		if (ext4_should_journal_data(inode)) {
+-			ret = filemap_write_and_wait_range(mapping, start,
+-							   end - 1);
+-			if (ret) {
+-				filemap_invalidate_unlock(mapping);
+-				goto out_mutex;
+-			}
++		/* Now release the pages and zero block aligned part of pages */
++		ret = ext4_truncate_page_cache_block_range(inode, start, end);
++		if (ret) {
++			filemap_invalidate_unlock(mapping);
++			goto out_mutex;
+ 		}
+ 
+-		/* Now release the pages and zero block aligned part of pages */
+-		truncate_pagecache_range(inode, start, end - 1);
+ 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ 
+ 		ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 487d9aec56c9d7..38fe9a213d09b7 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -31,6 +31,7 @@
+ #include <linux/writeback.h>
+ #include <linux/pagevec.h>
+ #include <linux/mpage.h>
++#include <linux/rmap.h>
+ #include <linux/namei.h>
+ #include <linux/uio.h>
+ #include <linux/bio.h>
+@@ -3879,6 +3880,68 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
+ 	return ret;
+ }
+ 
++static inline void ext4_truncate_folio(struct inode *inode,
++				       loff_t start, loff_t end)
++{
++	unsigned long blocksize = i_blocksize(inode);
++	struct folio *folio;
++
++	/* Nothing to be done if no complete block needs to be truncated. */
++	if (round_up(start, blocksize) >= round_down(end, blocksize))
++		return;
++
++	folio = filemap_lock_folio(inode->i_mapping, start >> PAGE_SHIFT);
++	if (IS_ERR(folio))
++		return;
++
++	if (folio_mkclean(folio))
++		folio_mark_dirty(folio);
++	folio_unlock(folio);
++	folio_put(folio);
++}
++
++int ext4_truncate_page_cache_block_range(struct inode *inode,
++					 loff_t start, loff_t end)
++{
++	unsigned long blocksize = i_blocksize(inode);
++	int ret;
++
++	/*
++	 * For journalled data we need to write (and checkpoint) pages
++	 * before discarding page cache to avoid inconsitent data on disk
++	 * in case of crash before freeing or unwritten converting trans
++	 * is committed.
++	 */
++	if (ext4_should_journal_data(inode)) {
++		ret = filemap_write_and_wait_range(inode->i_mapping, start,
++						   end - 1);
++		if (ret)
++			return ret;
++		goto truncate_pagecache;
++	}
++
++	/*
++	 * If the block size is less than the page size, the file's mapped
++	 * blocks within one page could be freed or converted to unwritten.
++	 * So it's necessary to remove writable userspace mappings, and then
++	 * ext4_page_mkwrite() can be called during subsequent write access
++	 * to these partial folios.
++	 */
++	if (!IS_ALIGNED(start | end, PAGE_SIZE) &&
++	    blocksize < PAGE_SIZE && start < inode->i_size) {
++		loff_t page_boundary = round_up(start, PAGE_SIZE);
++
++		ext4_truncate_folio(inode, start, min(page_boundary, end));
++		if (end > page_boundary)
++			ext4_truncate_folio(inode,
++					    round_down(end, PAGE_SIZE), end);
++	}
++
++truncate_pagecache:
++	truncate_pagecache_range(inode, start, end - 1);
++	return 0;
++}
++
+ static void ext4_wait_dax_page(struct inode *inode)
+ {
+ 	filemap_invalidate_unlock(inode->i_mapping);
+@@ -3933,17 +3996,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ 
+ 	trace_ext4_punch_hole(inode, offset, length, 0);
+ 
+-	/*
+-	 * Write out all dirty pages to avoid race conditions
+-	 * Then release them.
+-	 */
+-	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+-		ret = filemap_write_and_wait_range(mapping, offset,
+-						   offset + length - 1);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	inode_lock(inode);
+ 
+ 	/* No need to punch hole beyond i_size */
+@@ -4005,8 +4057,11 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ 		ret = ext4_update_disksize_before_punch(inode, offset, length);
+ 		if (ret)
+ 			goto out_dio;
+-		truncate_pagecache_range(inode, first_block_offset,
+-					 last_block_offset);
++
++		ret = ext4_truncate_page_cache_block_range(inode,
++				first_block_offset, last_block_offset + 1);
++		if (ret)
++			goto out_dio;
+ 	}
+ 
+ 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 92f49d7eb3c001..109cf88e7caacf 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6644,7 +6644,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ 		for (i = 0; i < count; i++) {
+ 			cond_resched();
+ 			if (is_metadata)
+-				bh = sb_find_get_block(inode->i_sb, block + i);
++				bh = sb_find_get_block_nonatomic(inode->i_sb,
++								 block + i);
+ 			ext4_forget(handle, is_metadata, inode, bh, block + i);
+ 		}
+ 	}
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index b7b9261fec3b50..cb023922c93c82 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -181,14 +181,25 @@ static int ext4_end_io_end(ext4_io_end_t *io_end)
+ 		   "list->prev 0x%p\n",
+ 		   io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
+ 
+-	io_end->handle = NULL;	/* Following call will use up the handle */
+-	ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
++	/*
++	 * Do not convert the unwritten extents if data writeback fails,
++	 * or stale data may be exposed.
++	 */
++	io_end->handle = NULL;  /* Following call will use up the handle */
++	if (unlikely(io_end->flag & EXT4_IO_END_FAILED)) {
++		ret = -EIO;
++		if (handle)
++			jbd2_journal_free_reserved(handle);
++	} else {
++		ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
++	}
+ 	if (ret < 0 && !ext4_forced_shutdown(inode->i_sb)) {
+ 		ext4_msg(inode->i_sb, KERN_EMERG,
+ 			 "failed to convert unwritten extents to written "
+ 			 "extents -- potential data loss!  "
+ 			 "(inode %lu, error %d)", inode->i_ino, ret);
+ 	}
++
+ 	ext4_clear_io_unwritten_flag(io_end);
+ 	ext4_release_io_end(io_end);
+ 	return ret;
+@@ -344,6 +355,7 @@ static void ext4_end_bio(struct bio *bio)
+ 			     bio->bi_status, inode->i_ino,
+ 			     (unsigned long long)
+ 			     bi_sector >> (inode->i_blkbits - 9));
++		io_end->flag |= EXT4_IO_END_FAILED;
+ 		mapping_set_error(inode->i_mapping,
+ 				blk_status_to_errno(bio->bi_status));
+ 	}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 4291ab3c20be67..99117d1e1bdd5b 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2787,6 +2787,13 @@ static int ext4_check_opt_consistency(struct fs_context *fc,
+ 	}
+ 
+ 	if (is_remount) {
++		if (!sbi->s_journal &&
++		    ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) {
++			ext4_msg(NULL, KERN_WARNING,
++				 "Remounting fs w/o journal so ignoring data_err option");
++			ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT);
++		}
++
+ 		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
+ 		    (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
+ 			ext4_msg(NULL, KERN_ERR, "can't mount with "
+@@ -5396,6 +5403,11 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 				 "data=, fs mounted w/o journal");
+ 			goto failed_mount3a;
+ 		}
++		if (test_opt(sb, DATA_ERR_ABORT)) {
++			ext4_msg(sb, KERN_ERR,
++				 "can't mount with data_err=abort, fs mounted w/o journal");
++			goto failed_mount3a;
++		}
+ 		sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
+ 		clear_opt(sb, JOURNAL_CHECKSUM);
+ 		clear_opt(sb, DATA_FLAGS);
+@@ -6744,6 +6756,7 @@ static int ext4_reconfigure(struct fs_context *fc)
+ {
+ 	struct super_block *sb = fc->root->d_sb;
+ 	int ret;
++	bool old_ro = sb_rdonly(sb);
+ 
+ 	fc->s_fs_info = EXT4_SB(sb);
+ 
+@@ -6755,9 +6768,9 @@ static int ext4_reconfigure(struct fs_context *fc)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ext4_msg(sb, KERN_INFO, "re-mounted %pU %s. Quota mode: %s.",
+-		 &sb->s_uuid, sb_rdonly(sb) ? "ro" : "r/w",
+-		 ext4_quota_mode(sb));
++	ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.",
++		 &sb->s_uuid,
++		 (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : "");
+ 
+ 	return 0;
+ }
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index d9a44f03e558bf..7df638f901a1f5 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -61,6 +61,12 @@ struct f2fs_attr {
+ 	int id;
+ };
+ 
++struct f2fs_base_attr {
++	struct attribute attr;
++	ssize_t (*show)(struct f2fs_base_attr *a, char *buf);
++	ssize_t (*store)(struct f2fs_base_attr *a, const char *buf, size_t len);
++};
++
+ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
+ 			     struct f2fs_sb_info *sbi, char *buf);
+ 
+@@ -864,6 +870,25 @@ static void f2fs_sb_release(struct kobject *kobj)
+ 	complete(&sbi->s_kobj_unregister);
+ }
+ 
++static ssize_t f2fs_base_attr_show(struct kobject *kobj,
++				struct attribute *attr, char *buf)
++{
++	struct f2fs_base_attr *a = container_of(attr,
++				struct f2fs_base_attr, attr);
++
++	return a->show ? a->show(a, buf) : 0;
++}
++
++static ssize_t f2fs_base_attr_store(struct kobject *kobj,
++				struct attribute *attr,
++				const char *buf, size_t len)
++{
++	struct f2fs_base_attr *a = container_of(attr,
++				struct f2fs_base_attr, attr);
++
++	return a->store ? a->store(a, buf, len) : 0;
++}
++
+ /*
+  * Note that there are three feature list entries:
+  * 1) /sys/fs/f2fs/features
+@@ -882,14 +907,13 @@ static void f2fs_sb_release(struct kobject *kobj)
+  *     please add new on-disk feature in this list only.
+  *     - ref. F2FS_SB_FEATURE_RO_ATTR()
+  */
+-static ssize_t f2fs_feature_show(struct f2fs_attr *a,
+-		struct f2fs_sb_info *sbi, char *buf)
++static ssize_t f2fs_feature_show(struct f2fs_base_attr *a, char *buf)
+ {
+ 	return sysfs_emit(buf, "supported\n");
+ }
+ 
+ #define F2FS_FEATURE_RO_ATTR(_name)				\
+-static struct f2fs_attr f2fs_attr_##_name = {			\
++static struct f2fs_base_attr f2fs_base_attr_##_name = {		\
+ 	.attr = {.name = __stringify(_name), .mode = 0444 },	\
+ 	.show	= f2fs_feature_show,				\
+ }
+@@ -1258,37 +1282,38 @@ static struct attribute *f2fs_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(f2fs);
+ 
++#define BASE_ATTR_LIST(name) (&f2fs_base_attr_##name.attr)
+ static struct attribute *f2fs_feat_attrs[] = {
+ #ifdef CONFIG_FS_ENCRYPTION
+-	ATTR_LIST(encryption),
+-	ATTR_LIST(test_dummy_encryption_v2),
++	BASE_ATTR_LIST(encryption),
++	BASE_ATTR_LIST(test_dummy_encryption_v2),
+ #if IS_ENABLED(CONFIG_UNICODE)
+-	ATTR_LIST(encrypted_casefold),
++	BASE_ATTR_LIST(encrypted_casefold),
+ #endif
+ #endif /* CONFIG_FS_ENCRYPTION */
+ #ifdef CONFIG_BLK_DEV_ZONED
+-	ATTR_LIST(block_zoned),
++	BASE_ATTR_LIST(block_zoned),
+ #endif
+-	ATTR_LIST(atomic_write),
+-	ATTR_LIST(extra_attr),
+-	ATTR_LIST(project_quota),
+-	ATTR_LIST(inode_checksum),
+-	ATTR_LIST(flexible_inline_xattr),
+-	ATTR_LIST(quota_ino),
+-	ATTR_LIST(inode_crtime),
+-	ATTR_LIST(lost_found),
++	BASE_ATTR_LIST(atomic_write),
++	BASE_ATTR_LIST(extra_attr),
++	BASE_ATTR_LIST(project_quota),
++	BASE_ATTR_LIST(inode_checksum),
++	BASE_ATTR_LIST(flexible_inline_xattr),
++	BASE_ATTR_LIST(quota_ino),
++	BASE_ATTR_LIST(inode_crtime),
++	BASE_ATTR_LIST(lost_found),
+ #ifdef CONFIG_FS_VERITY
+-	ATTR_LIST(verity),
++	BASE_ATTR_LIST(verity),
+ #endif
+-	ATTR_LIST(sb_checksum),
++	BASE_ATTR_LIST(sb_checksum),
+ #if IS_ENABLED(CONFIG_UNICODE)
+-	ATTR_LIST(casefold),
++	BASE_ATTR_LIST(casefold),
+ #endif
+-	ATTR_LIST(readonly),
++	BASE_ATTR_LIST(readonly),
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+-	ATTR_LIST(compression),
++	BASE_ATTR_LIST(compression),
+ #endif
+-	ATTR_LIST(pin_file),
++	BASE_ATTR_LIST(pin_file),
+ 	NULL,
+ };
+ ATTRIBUTE_GROUPS(f2fs_feat);
+@@ -1362,9 +1387,14 @@ static struct kset f2fs_kset = {
+ 	.kobj	= {.ktype = &f2fs_ktype},
+ };
+ 
++static const struct sysfs_ops f2fs_feat_attr_ops = {
++	.show	= f2fs_base_attr_show,
++	.store	= f2fs_base_attr_store,
++};
++
+ static const struct kobj_type f2fs_feat_ktype = {
+ 	.default_groups = f2fs_feat_groups,
+-	.sysfs_ops	= &f2fs_attr_ops,
++	.sysfs_ops	= &f2fs_feat_attr_ops,
+ };
+ 
+ static struct kobject f2fs_feat = {
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index a1e86ec07c38b5..ff543dc09130e1 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1133,6 +1133,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
+ 	else if (err == -EINTR)
+ 		fuse_invalidate_attr(inode);
+ 
++	if (err == -ENOSYS)
++		err = -EPERM;
+ 	return err;
+ }
+ 
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index a51fe42732c4c2..4f1eca99786b61 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -843,12 +843,13 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock)
+ __releases(&gl->gl_lockref.lock)
+ __acquires(&gl->gl_lockref.lock)
+ {
+-	struct gfs2_holder *gh = NULL;
++	struct gfs2_holder *gh;
+ 
+ 	if (test_bit(GLF_LOCK, &gl->gl_flags))
+ 		return;
+ 	set_bit(GLF_LOCK, &gl->gl_flags);
+ 
++	/* While a demote is in progress, the GLF_LOCK flag must be set. */
+ 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
+ 
+ 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
+@@ -860,18 +861,22 @@ __acquires(&gl->gl_lockref.lock)
+ 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
+ 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
+ 		gl->gl_target = gl->gl_demote_state;
++		do_xmote(gl, NULL, gl->gl_target);
++		return;
+ 	} else {
+ 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
+ 			gfs2_demote_wake(gl);
+ 		if (do_promote(gl))
+ 			goto out_unlock;
+ 		gh = find_first_waiter(gl);
++		if (!gh)
++			goto out_unlock;
+ 		gl->gl_target = gh->gh_state;
+ 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
+ 			do_error(gl, 0); /* Fail queued try locks */
++		do_xmote(gl, gh, gl->gl_target);
++		return;
+ 	}
+-	do_xmote(gl, gh, gl->gl_target);
+-	return;
+ 
+ out_sched:
+ 	clear_bit(GLF_LOCK, &gl->gl_flags);
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 667f67342c522f..f85f401526c546 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -287,19 +287,20 @@ static int fc_do_one_pass(journal_t *journal,
+ int jbd2_journal_recover(journal_t *journal)
+ {
+ 	int			err, err2;
+-	journal_superblock_t *	sb;
+-
+ 	struct recovery_info	info;
+ 
+ 	memset(&info, 0, sizeof(info));
+-	sb = journal->j_superblock;
+ 
+ 	/*
+ 	 * The journal superblock's s_start field (the current log head)
+ 	 * is always zero if, and only if, the journal was cleanly
+-	 * unmounted.
++	 * unmounted. We use its in-memory version j_tail here because
++	 * jbd2_journal_wipe() could have updated it without updating journal
++	 * superblock.
+ 	 */
+-	if (!sb->s_start) {
++	if (!journal->j_tail) {
++		journal_superblock_t *sb = journal->j_superblock;
++
+ 		jbd2_debug(1, "No recovery required, last transaction %d, head block %u\n",
+ 			  be32_to_cpu(sb->s_sequence), be32_to_cpu(sb->s_head));
+ 		journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
+diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
+index ce63d5fde9c3a8..f68fc8c255f007 100644
+--- a/fs/jbd2/revoke.c
++++ b/fs/jbd2/revoke.c
+@@ -345,7 +345,8 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
+ 	bh = bh_in;
+ 
+ 	if (!bh) {
+-		bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
++		bh = __find_get_block_nonatomic(bdev, blocknr,
++						journal->j_blocksize);
+ 		if (bh)
+ 			BUFFER_TRACE(bh, "found on hash");
+ 	}
+@@ -355,7 +356,8 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
+ 
+ 		/* If there is a different buffer_head lying around in
+ 		 * memory anywhere... */
+-		bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
++		bh2 = __find_get_block_nonatomic(bdev, blocknr,
++						 journal->j_blocksize);
+ 		if (bh2) {
+ 			/* ... and it has RevokeValid status... */
+ 			if (bh2 != bh && buffer_revokevalid(bh2))
+@@ -466,7 +468,8 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
+ 	 * state machine will get very upset later on. */
+ 	if (need_cancel) {
+ 		struct buffer_head *bh2;
+-		bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
++		bh2 = __find_get_block_nonatomic(bh->b_bdev, bh->b_blocknr,
++						 bh->b_size);
+ 		if (bh2) {
+ 			if (bh2 != bh)
+ 				clear_buffer_revoked(bh2);
+@@ -495,9 +498,9 @@ void jbd2_clear_buffer_revoked_flags(journal_t *journal)
+ 			struct jbd2_revoke_record_s *record;
+ 			struct buffer_head *bh;
+ 			record = (struct jbd2_revoke_record_s *)list_entry;
+-			bh = __find_get_block(journal->j_fs_dev,
+-					      record->blocknr,
+-					      journal->j_blocksize);
++			bh = __find_get_block_nonatomic(journal->j_fs_dev,
++							record->blocknr,
++							journal->j_blocksize);
+ 			if (bh) {
+ 				clear_buffer_revoked(bh);
+ 				__brelse(bh);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index c3c1e8c644f2e0..c1ac585e41e369 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -750,12 +750,8 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
+ 	smp_mb();		// see mntput_no_expire() and do_umount()
+ 	if (likely(!read_seqretry(&mount_lock, seq)))
+ 		return 0;
+-	if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
+-		mnt_add_count(mnt, -1);
+-		return 1;
+-	}
+ 	lock_mount_hash();
+-	if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
++	if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) {
+ 		mnt_add_count(mnt, -1);
+ 		unlock_mount_hash();
+ 		return 1;
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 325ba0663a6de2..8bdbc4dca89ca6 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -307,7 +307,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
+ 	if (delegation == NULL)
+ 		goto out;
+ 	spin_lock(&delegation->lock);
+-	if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
++	if (delegation->inode &&
++	    !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ 		clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
+ 		/* Refcount matched in nfs_end_delegation_return() */
+ 		ret = nfs_get_delegation(delegation);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index a1cfe4cc60c4b1..8f7ea4076653db 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1263,6 +1263,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+ 		case -ECONNRESET:
+ 		case -EHOSTDOWN:
+ 		case -EHOSTUNREACH:
++		case -ENETDOWN:
+ 		case -ENETUNREACH:
+ 		case -EADDRINUSE:
+ 		case -ENOBUFS:
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 596f3517013728..330273cf945316 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -74,6 +74,8 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
+ 
+ int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
+ {
++	if (unlikely(nfs_current_task_exiting()))
++		return -EINTR;
+ 	schedule();
+ 	if (signal_pending_state(mode, current))
+ 		return -ERESTARTSYS;
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 8b568a514fd1c6..1be4be3d4a2b6b 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -901,6 +901,11 @@ static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid)
+ 				NFS4_STATEID_OTHER_SIZE);
+ }
+ 
++static inline bool nfs_current_task_exiting(void)
++{
++	return (current->flags & PF_EXITING) != 0;
++}
++
+ static inline bool nfs_error_is_fatal(int err)
+ {
+ 	switch (err) {
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index 1566163c6d85b9..88b0fb343ae044 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -39,7 +39,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
+ 		__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
+ 		schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
+ 		res = -ERESTARTSYS;
+-	} while (!fatal_signal_pending(current));
++	} while (!fatal_signal_pending(current) && !nfs_current_task_exiting());
+ 	return res;
+ }
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index ca01f79c82e4ad..11f2b5cb3b06b2 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -434,6 +434,8 @@ static int nfs4_delay_killable(long *timeout)
+ {
+ 	might_sleep();
+ 
++	if (unlikely(nfs_current_task_exiting()))
++		return -EINTR;
+ 	__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
+ 	schedule_timeout(nfs4_update_delay(timeout));
+ 	if (!__fatal_signal_pending(current))
+@@ -445,6 +447,8 @@ static int nfs4_delay_interruptible(long *timeout)
+ {
+ 	might_sleep();
+ 
++	if (unlikely(nfs_current_task_exiting()))
++		return -EINTR;
+ 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
+ 	schedule_timeout(nfs4_update_delay(timeout));
+ 	if (!signal_pending(current))
+@@ -1765,7 +1769,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
+ 		rcu_read_unlock();
+ 		trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
+ 
+-		if (!fatal_signal_pending(current)) {
++		if (!fatal_signal_pending(current) &&
++		    !nfs_current_task_exiting()) {
+ 			if (schedule_timeout(5*HZ) == 0)
+ 				status = -EAGAIN;
+ 			else
+@@ -3569,7 +3574,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
+ 		write_sequnlock(&state->seqlock);
+ 		trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
+ 
+-		if (fatal_signal_pending(current))
++		if (fatal_signal_pending(current) || nfs_current_task_exiting())
+ 			status = -EINTR;
+ 		else
+ 			if (schedule_timeout(5*HZ) != 0)
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index dafd61186557f8..397a86011878f6 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -2740,7 +2740,15 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ 	pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
+ 			" with error %d\n", section_sep, section,
+ 			clp->cl_hostname, -status);
+-	ssleep(1);
++	switch (status) {
++	case -ENETDOWN:
++	case -ENETUNREACH:
++		nfs_mark_client_ready(clp, -EIO);
++		break;
++	default:
++		ssleep(1);
++		break;
++	}
+ out_drain:
+ 	memalloc_nofs_restore(memflags);
+ 	nfs4_end_drain_session(clp);
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index ac03fd3c330c66..ecd71c1908859d 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -693,8 +693,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ 	int blocksize;
+ 	int err;
+ 
+-	down_write(&nilfs->ns_sem);
+-
+ 	blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
+ 	if (!blocksize) {
+ 		nilfs_err(sb, "unable to set blocksize");
+@@ -767,7 +765,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ 	set_nilfs_init(nilfs);
+ 	err = 0;
+  out:
+-	up_write(&nilfs->ns_sem);
+ 	return err;
+ 
+  failed_sbh:
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 2ebee1dced1b2d..c2a73bfb16aa46 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -1271,7 +1271,7 @@ static int ocfs2_force_read_journal(struct inode *inode)
+ 		}
+ 
+ 		for (i = 0; i < p_blocks; i++, p_blkno++) {
+-			bh = __find_get_block(osb->sb->s_bdev, p_blkno,
++			bh = __find_get_block_nonatomic(osb->sb->s_bdev, p_blkno,
+ 					osb->sb->s_blocksize);
+ 			/* block not cached. */
+ 			if (!bh)
+diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
+index aae6d2b8767df0..63d7c1ca0dfd35 100644
+--- a/fs/orangefs/inode.c
++++ b/fs/orangefs/inode.c
+@@ -23,9 +23,9 @@ static int orangefs_writepage_locked(struct page *page,
+ 	struct orangefs_write_range *wr = NULL;
+ 	struct iov_iter iter;
+ 	struct bio_vec bv;
+-	size_t len, wlen;
++	size_t wlen;
+ 	ssize_t ret;
+-	loff_t off;
++	loff_t len, off;
+ 
+ 	set_page_writeback(page);
+ 
+@@ -91,8 +91,7 @@ static int orangefs_writepages_work(struct orangefs_writepages *ow,
+ 	struct orangefs_write_range *wrp, wr;
+ 	struct iov_iter iter;
+ 	ssize_t ret;
+-	size_t len;
+-	loff_t off;
++	loff_t len, off;
+ 	int i;
+ 
+ 	len = i_size_read(inode);
+diff --git a/fs/pidfs.c b/fs/pidfs.c
+index 80675b6bf88459..52b7e4f7673274 100644
+--- a/fs/pidfs.c
++++ b/fs/pidfs.c
+@@ -95,20 +95,21 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
+ static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
+ {
+ 	struct pid *pid = pidfd_pid(file);
+-	bool thread = file->f_flags & PIDFD_THREAD;
+ 	struct task_struct *task;
+ 	__poll_t poll_flags = 0;
+ 
+ 	poll_wait(file, &pid->wait_pidfd, pts);
+ 	/*
+-	 * Depending on PIDFD_THREAD, inform pollers when the thread
+-	 * or the whole thread-group exits.
++	 * Don't wake waiters if the thread-group leader exited
++	 * prematurely. They either get notified when the last subthread
++	 * exits or not at all if one of the remaining subthreads execs
++	 * and assumes the struct pid of the old thread-group leader.
+ 	 */
+ 	guard(rcu)();
+ 	task = pid_task(pid, PIDTYPE_PID);
+ 	if (!task)
+ 		poll_flags = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
+-	else if (task->exit_state && (thread || thread_group_empty(task)))
++	else if (task->exit_state && !delay_group_leader(task))
+ 		poll_flags = EPOLLIN | EPOLLRDNORM;
+ 
+ 	return poll_flags;
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index 56815799ce798e..9de6b280c4f411 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -265,7 +265,7 @@ static void parse_options(char *options)
+ static int pstore_show_options(struct seq_file *m, struct dentry *root)
+ {
+ 	if (kmsg_bytes != CONFIG_PSTORE_DEFAULT_KMSG_BYTES)
+-		seq_printf(m, ",kmsg_bytes=%lu", kmsg_bytes);
++		seq_printf(m, ",kmsg_bytes=%u", kmsg_bytes);
+ 	return 0;
+ }
+ 
+diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
+index 801d6c0b170c3a..a0fc511969100c 100644
+--- a/fs/pstore/internal.h
++++ b/fs/pstore/internal.h
+@@ -6,7 +6,7 @@
+ #include <linux/time.h>
+ #include <linux/pstore.h>
+ 
+-extern unsigned long kmsg_bytes;
++extern unsigned int kmsg_bytes;
+ 
+ #ifdef CONFIG_PSTORE_FTRACE
+ extern void pstore_register_ftrace(void);
+@@ -35,7 +35,7 @@ static inline void pstore_unregister_pmsg(void) {}
+ 
+ extern struct pstore_info *psinfo;
+ 
+-extern void	pstore_set_kmsg_bytes(int);
++extern void	pstore_set_kmsg_bytes(unsigned int bytes);
+ extern void	pstore_get_records(int);
+ extern void	pstore_get_backend_records(struct pstore_info *psi,
+ 					   struct dentry *root, int quiet);
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index f56b066ab80ce4..557cf9d40177f6 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -92,8 +92,8 @@ module_param(compress, charp, 0444);
+ MODULE_PARM_DESC(compress, "compression to use");
+ 
+ /* How much of the kernel log to snapshot */
+-unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
+-module_param(kmsg_bytes, ulong, 0444);
++unsigned int kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
++module_param(kmsg_bytes, uint, 0444);
+ MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
+ 
+ static void *compress_workspace;
+@@ -107,9 +107,9 @@ static void *compress_workspace;
+ static char *big_oops_buf;
+ static size_t max_compressed_size;
+ 
+-void pstore_set_kmsg_bytes(int bytes)
++void pstore_set_kmsg_bytes(unsigned int bytes)
+ {
+-	kmsg_bytes = bytes;
++	WRITE_ONCE(kmsg_bytes, bytes);
+ }
+ 
+ /* Tag each group of saved records with a sequence number */
+@@ -278,6 +278,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ 			struct kmsg_dump_detail *detail)
+ {
+ 	struct kmsg_dump_iter iter;
++	unsigned int	remaining = READ_ONCE(kmsg_bytes);
+ 	unsigned long	total = 0;
+ 	const char	*why;
+ 	unsigned int	part = 1;
+@@ -300,7 +301,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ 	kmsg_dump_rewind(&iter);
+ 
+ 	oopscount++;
+-	while (total < kmsg_bytes) {
++	while (total < remaining) {
+ 		char *dst;
+ 		size_t dst_size;
+ 		int header_size;
+diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
+index e36f0e2d7d21e2..9a73478e00688b 100644
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -811,7 +811,23 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
+ 			return;
+ 
+ 		for (i = 0; i < num_aces; ++i) {
++			if (end_of_acl - acl_base < acl_size)
++				break;
++
+ 			ppace[i] = (struct smb_ace *) (acl_base + acl_size);
++			acl_base = (char *)ppace[i];
++			acl_size = offsetof(struct smb_ace, sid) +
++				offsetof(struct smb_sid, sub_auth);
++
++			if (end_of_acl - acl_base < acl_size ||
++			    ppace[i]->sid.num_subauth == 0 ||
++			    ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
++			    (end_of_acl - acl_base <
++			     acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
++			    (le16_to_cpu(ppace[i]->size) <
++			     acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
++				break;
++
+ #ifdef CONFIG_CIFS_DEBUG2
+ 			dump_ace(ppace[i], end_of_acl);
+ #endif
+@@ -855,7 +871,6 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
+ 				(void *)ppace[i],
+ 				sizeof(struct smb_ace)); */
+ 
+-			acl_base = (char *)ppace[i];
+ 			acl_size = le16_to_cpu(ppace[i]->size);
+ 		}
+ 
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index ee78bb6741d62f..28f8ca470770d8 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -1226,10 +1226,9 @@ typedef struct smb_com_query_information_rsp {
+ typedef struct smb_com_setattr_req {
+ 	struct smb_hdr hdr; /* wct = 8 */
+ 	__le16 attr;
+-	__le16 time_low;
+-	__le16 time_high;
++	__le32 last_write_time;
+ 	__le16 reserved[5]; /* must be zero */
+-	__u16  ByteCount;
++	__le16 ByteCount;
+ 	__u8   BufferFormat; /* 4 = ASCII */
+ 	unsigned char fileName[];
+ } __attribute__((packed)) SETATTR_REQ;
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 90b7b30abfbd87..6e938b17875f5c 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -31,6 +31,9 @@ extern void cifs_small_buf_release(void *);
+ extern void free_rsp_buf(int, void *);
+ extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
+ 			unsigned int /* length */);
++extern int smb_send_kvec(struct TCP_Server_Info *server,
++			 struct msghdr *msg,
++			 size_t *sent);
+ extern unsigned int _get_xid(void);
+ extern void _free_xid(unsigned int);
+ #define get_xid()							\
+@@ -393,6 +396,10 @@ extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon);
+ extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ 			struct kstatfs *FSData);
+ 
++extern int SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon,
++			     const char *fileName, __le32 attributes, __le64 write_time,
++			     const struct nls_table *nls_codepage,
++			     struct cifs_sb_info *cifs_sb);
+ extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ 			const char *fileName, const FILE_BASIC_INFO *data,
+ 			const struct nls_table *nls_codepage,
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index e2a14e25da87ce..8667f403a0ab61 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -5199,6 +5199,63 @@ CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
+ 	return rc;
+ }
+ 
++int
++SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon,
++		  const char *fileName, __le32 attributes, __le64 write_time,
++		  const struct nls_table *nls_codepage,
++		  struct cifs_sb_info *cifs_sb)
++{
++	SETATTR_REQ *pSMB;
++	SETATTR_RSP *pSMBr;
++	struct timespec64 ts;
++	int bytes_returned;
++	int name_len;
++	int rc;
++
++	cifs_dbg(FYI, "In %s path %s\n", __func__, fileName);
++
++retry:
++	rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++			cifsConvertToUTF16((__le16 *) pSMB->fileName,
++					   fileName, PATH_MAX, nls_codepage,
++					   cifs_remap(cifs_sb));
++		name_len++;     /* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->fileName, fileName);
++	}
++	/* Only few attributes can be set by this command, others are not accepted by Win9x. */
++	pSMB->attr = cpu_to_le16(le32_to_cpu(attributes) &
++			(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE));
++	/* Zero write time value (in both NT and SETATTR formats) means to not change it. */
++	if (le64_to_cpu(write_time) != 0) {
++		ts = cifs_NTtimeToUnix(write_time);
++		pSMB->last_write_time = cpu_to_le32(ts.tv_sec);
++	}
++	pSMB->BufferFormat = 0x04;
++	name_len++; /* account for buffer type byte */
++	inc_rfc1001_len(pSMB, (__u16)name_len);
++	pSMB->ByteCount = cpu_to_le16(name_len);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc)
++		cifs_dbg(FYI, "Send error in %s = %d\n", __func__, rc);
++
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto retry;
++
++	return rc;
++}
++
+ /* Some legacy servers such as NT4 require that the file times be set on
+    an open handle, rather than by pathname - this is awkward due to
+    potential access conflicts on the open, but it is unavoidable for these
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 112057c7ca11c6..8260d0e07a6283 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -3063,8 +3063,10 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
+ 	 * sessinit is sent but no second negprot
+ 	 */
+ 	struct rfc1002_session_packet req = {};
+-	struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
++	struct msghdr msg = {};
++	struct kvec iov = {};
+ 	unsigned int len;
++	size_t sent;
+ 
+ 	req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
+ 
+@@ -3093,10 +3095,18 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
+ 	 * As per rfc1002, @len must be the number of bytes that follows the
+ 	 * length field of a rfc1002 session request payload.
+ 	 */
+-	len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
++	len = sizeof(req.trailer.session_req);
++	req.type = RFC1002_SESSION_REQUEST;
++	req.flags = 0;
++	req.length = cpu_to_be16(len);
++	len += offsetof(typeof(req), trailer.session_req);
++	iov.iov_base = &req;
++	iov.iov_len = len;
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, len);
++	rc = smb_send_kvec(server, &msg, &sent);
++	if (rc < 0 || len != sent)
++		return (rc == -EINTR || rc == -EAGAIN) ? rc : -ECONNABORTED;
+ 
+-	smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
+-	rc = smb_send(server, smb_buf, len);
+ 	/*
+ 	 * RFC1001 layer in at least one server requires very short break before
+ 	 * negprot presumably because not expecting negprot to follow so fast.
+@@ -3105,7 +3115,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
+ 	 */
+ 	usleep_range(1000, 2000);
+ 
+-	return rc;
++	return 0;
+ }
+ 
+ static int
+@@ -3957,11 +3967,13 @@ int
+ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+ 			struct TCP_Server_Info *server)
+ {
++	bool in_retry = false;
+ 	int rc = 0;
+ 
+ 	if (!server->ops->need_neg || !server->ops->negotiate)
+ 		return -ENOSYS;
+ 
++retry:
+ 	/* only send once per connect */
+ 	spin_lock(&server->srv_lock);
+ 	if (server->tcpStatus != CifsGood &&
+@@ -3981,6 +3993,14 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+ 	spin_unlock(&server->srv_lock);
+ 
+ 	rc = server->ops->negotiate(xid, ses, server);
++	if (rc == -EAGAIN) {
++		/* Allow one retry attempt */
++		if (!in_retry) {
++			in_retry = true;
++			goto retry;
++		}
++		rc = -EHOSTDOWN;
++	}
+ 	if (rc == 0) {
+ 		spin_lock(&server->srv_lock);
+ 		if (server->tcpStatus == CifsInNegotiate)
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 69cca4f17dbaad..8b70d92f484580 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -1058,6 +1058,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 	int i, opt;
+ 	bool is_smb3 = !strcmp(fc->fs_type->name, "smb3");
+ 	bool skip_parsing = false;
++	char *hostname;
+ 
+ 	cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key);
+ 
+@@ -1267,6 +1268,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 	case Opt_rsize:
+ 		ctx->rsize = result.uint_32;
+ 		ctx->got_rsize = true;
++		ctx->vol_rsize = ctx->rsize;
+ 		break;
+ 	case Opt_wsize:
+ 		ctx->wsize = result.uint_32;
+@@ -1282,6 +1284,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 					 ctx->wsize, PAGE_SIZE);
+ 			}
+ 		}
++		ctx->vol_wsize = ctx->wsize;
+ 		break;
+ 	case Opt_acregmax:
+ 		if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) {
+@@ -1388,6 +1391,16 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 			cifs_errorf(fc, "OOM when copying UNC string\n");
+ 			goto cifs_parse_mount_err;
+ 		}
++		hostname = extract_hostname(ctx->UNC);
++		if (IS_ERR(hostname)) {
++			cifs_errorf(fc, "Cannot extract hostname from UNC string\n");
++			goto cifs_parse_mount_err;
++		}
++		/* last byte, type, is 0x20 for servr type */
++		memset(ctx->target_rfc1001_name, 0x20, RFC1001_NAME_LEN_WITH_NULL);
++		for (i = 0; i < RFC1001_NAME_LEN && hostname[i] != 0; i++)
++			ctx->target_rfc1001_name[i] = toupper(hostname[i]);
++		kfree(hostname);
+ 		break;
+ 	case Opt_user:
+ 		kfree(ctx->username);
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index ac6baa774ad3a9..c7e00025518f75 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -263,6 +263,9 @@ struct smb3_fs_context {
+ 	bool use_client_guid:1;
+ 	/* reuse existing guid for multichannel */
+ 	u8 client_guid[SMB2_CLIENT_GUID_SIZE];
++	/* User-specified original r/wsize value */
++	unsigned int vol_rsize;
++	unsigned int vol_wsize;
+ 	unsigned int bsize;
+ 	unsigned int rasize;
+ 	unsigned int rsize;
+diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
+index 47ddeb7fa1116b..aa45ef6ae99a61 100644
+--- a/fs/smb/client/link.c
++++ b/fs/smb/client/link.c
+@@ -257,7 +257,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_open_parms oparms;
+ 	struct cifs_io_parms io_parms = {0};
+ 	int buf_type = CIFS_NO_BUFFER;
+-	FILE_ALL_INFO file_info;
++	struct cifs_open_info_data query_data;
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+@@ -269,11 +269,11 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 		.fid = &fid,
+ 	};
+ 
+-	rc = CIFS_open(xid, &oparms, &oplock, &file_info);
++	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &query_data);
+ 	if (rc)
+ 		return rc;
+ 
+-	if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
++	if (query_data.fi.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
+ 		rc = -ENOENT;
+ 		/* it's not a symlink */
+ 		goto out;
+@@ -312,7 +312,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 		.fid = &fid,
+ 	};
+ 
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, NULL);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index 50f96259d9adc2..787d6bcb5d1dc4 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -733,7 +733,10 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ 			else
+ 				cifs_buf_release(cfile->srch_inf.
+ 						ntwrk_buf_start);
++			/* Reset all pointers to the network buffer to prevent stale references */
+ 			cfile->srch_inf.ntwrk_buf_start = NULL;
++			cfile->srch_inf.srch_entries_start = NULL;
++			cfile->srch_inf.last_entry = NULL;
+ 		}
+ 		rc = initiate_cifs_search(xid, file, full_path);
+ 		if (rc) {
+@@ -756,11 +759,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ 		rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
+ 						 search_flags,
+ 						 &cfile->srch_inf);
++		if (rc)
++			return -ENOENT;
+ 		/* FindFirst/Next set last_entry to NULL on malformed reply */
+ 		if (cfile->srch_inf.last_entry)
+ 			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
+-		if (rc)
+-			return -ENOENT;
+ 	}
+ 	if (index_to_find < cfile->srch_inf.index_of_last_entry) {
+ 		/* we found the buffer that contains the entry */
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index 55cceb82293236..0385a514f59e9f 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -426,13 +426,6 @@ cifs_negotiate(const unsigned int xid,
+ {
+ 	int rc;
+ 	rc = CIFSSMBNegotiate(xid, ses, server);
+-	if (rc == -EAGAIN) {
+-		/* retry only once on 1st time connection */
+-		set_credits(server, 1);
+-		rc = CIFSSMBNegotiate(xid, ses, server);
+-		if (rc == -EAGAIN)
+-			rc = -EHOSTDOWN;
+-	}
+ 	return rc;
+ }
+ 
+@@ -444,8 +437,8 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	unsigned int wsize;
+ 
+ 	/* start with specified wsize, or default */
+-	if (ctx->wsize)
+-		wsize = ctx->wsize;
++	if (ctx->got_wsize)
++		wsize = ctx->vol_wsize;
+ 	else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+ 		wsize = CIFS_DEFAULT_IOSIZE;
+ 	else
+@@ -497,7 +490,7 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	else
+ 		defsize = server->maxBuf - sizeof(READ_RSP);
+ 
+-	rsize = ctx->rsize ? ctx->rsize : defsize;
++	rsize = ctx->got_rsize ? ctx->vol_rsize : defsize;
+ 
+ 	/*
+ 	 * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
+@@ -548,24 +541,104 @@ static int cifs_query_path_info(const unsigned int xid,
+ 				const char *full_path,
+ 				struct cifs_open_info_data *data)
+ {
+-	int rc;
++	int rc = -EOPNOTSUPP;
+ 	FILE_ALL_INFO fi = {};
++	struct cifs_search_info search_info = {};
++	bool non_unicode_wildcard = false;
+ 
+ 	data->reparse_point = false;
+ 	data->adjust_tz = false;
+ 
+-	/* could do find first instead but this returns more info */
+-	rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls,
+-			      cifs_remap(cifs_sb));
+ 	/*
+-	 * BB optimize code so we do not make the above call when server claims
+-	 * no NT SMB support and the above call failed at least once - set flag
+-	 * in tcon or mount.
++	 * First try CIFSSMBQPathInfo() function which returns more info
++	 * (NumberOfLinks) than CIFSFindFirst() fallback function.
++	 * Some servers like Win9x do not support SMB_QUERY_FILE_ALL_INFO over
++	 * TRANS2_QUERY_PATH_INFORMATION, but supports it with filehandle over
++	 * TRANS2_QUERY_FILE_INFORMATION (function CIFSSMBQFileInfo(). But SMB
++	 * Open command on non-NT servers works only for files, does not work
++	 * for directories. And moreover Win9x SMB server returns bogus data in
++	 * SMB_QUERY_FILE_ALL_INFO Attributes field. So for non-NT servers,
++	 * do not even use CIFSSMBQPathInfo() or CIFSSMBQFileInfo() function.
++	 */
++	if (tcon->ses->capabilities & CAP_NT_SMBS)
++		rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */,
++				      cifs_sb->local_nls, cifs_remap(cifs_sb));
++
++	/*
++	 * Non-UNICODE variant of fallback functions below expands wildcards,
++	 * so they cannot be used for querying paths with wildcard characters.
+ 	 */
+-	if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
++	if (rc && !(tcon->ses->capabilities & CAP_UNICODE) && strpbrk(full_path, "*?\"><"))
++		non_unicode_wildcard = true;
++
++	/*
++	 * Then fallback to CIFSFindFirst() which works also with non-NT servers
++	 * but does not does not provide NumberOfLinks.
++	 */
++	if ((rc == -EOPNOTSUPP || rc == -EINVAL) &&
++	    !non_unicode_wildcard) {
++		if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find))
++			search_info.info_level = SMB_FIND_FILE_INFO_STANDARD;
++		else
++			search_info.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
++		rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL,
++				   CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END,
++				   &search_info, false);
++		if (rc == 0) {
++			if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) {
++				FIND_FILE_STANDARD_INFO *di;
++				int offset = tcon->ses->server->timeAdj;
++
++				di = (FIND_FILE_STANDARD_INFO *)search_info.srch_entries_start;
++				fi.CreationTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
++						di->CreationDate, di->CreationTime, offset)));
++				fi.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
++						di->LastAccessDate, di->LastAccessTime, offset)));
++				fi.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
++						di->LastWriteDate, di->LastWriteTime, offset)));
++				fi.ChangeTime = fi.LastWriteTime;
++				fi.Attributes = cpu_to_le32(le16_to_cpu(di->Attributes));
++				fi.AllocationSize = cpu_to_le64(le32_to_cpu(di->AllocationSize));
++				fi.EndOfFile = cpu_to_le64(le32_to_cpu(di->DataSize));
++			} else {
++				FILE_FULL_DIRECTORY_INFO *di;
++
++				di = (FILE_FULL_DIRECTORY_INFO *)search_info.srch_entries_start;
++				fi.CreationTime = di->CreationTime;
++				fi.LastAccessTime = di->LastAccessTime;
++				fi.LastWriteTime = di->LastWriteTime;
++				fi.ChangeTime = di->ChangeTime;
++				fi.Attributes = di->ExtFileAttributes;
++				fi.AllocationSize = di->AllocationSize;
++				fi.EndOfFile = di->EndOfFile;
++				fi.EASize = di->EaSize;
++			}
++			fi.NumberOfLinks = cpu_to_le32(1);
++			fi.DeletePending = 0;
++			fi.Directory = !!(le32_to_cpu(fi.Attributes) & ATTR_DIRECTORY);
++			cifs_buf_release(search_info.ntwrk_buf_start);
++		} else if (!full_path[0]) {
++			/*
++			 * CIFSFindFirst() does not work on root path if the
++			 * root path was exported on the server from the top
++			 * level path (drive letter).
++			 */
++			rc = -EOPNOTSUPP;
++		}
++	}
++
++	/*
++	 * If everything failed then fallback to the legacy SMB command
++	 * SMB_COM_QUERY_INFORMATION which works with all servers, but
++	 * provide just few information.
++	 */
++	if ((rc == -EOPNOTSUPP || rc == -EINVAL) && !non_unicode_wildcard) {
+ 		rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
+ 					 cifs_remap(cifs_sb));
+ 		data->adjust_tz = true;
++	} else if ((rc == -EOPNOTSUPP || rc == -EINVAL) && non_unicode_wildcard) {
++		/* Path with non-UNICODE wildcard character cannot exist. */
++		rc = -ENOENT;
+ 	}
+ 
+ 	if (!rc) {
+@@ -662,6 +735,13 @@ static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 	int rc;
+ 	FILE_ALL_INFO fi = {};
+ 
++	/*
++	 * CIFSSMBQFileInfo() for non-NT servers returns bogus data in
++	 * Attributes fields. So do not use this command for non-NT servers.
++	 */
++	if (!(tcon->ses->capabilities & CAP_NT_SMBS))
++		return -EOPNOTSUPP;
++
+ 	if (cfile->symlink_target) {
+ 		data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+ 		if (!data->symlink_target)
+@@ -832,6 +912,9 @@ smb_set_file_info(struct inode *inode, const char *full_path,
+ 	struct cifs_fid fid;
+ 	struct cifs_open_parms oparms;
+ 	struct cifsFileInfo *open_file;
++	FILE_BASIC_INFO new_buf;
++	struct cifs_open_info_data query_data;
++	__le64 write_time = buf->LastWriteTime;
+ 	struct cifsInodeInfo *cinode = CIFS_I(inode);
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ 	struct tcon_link *tlink = NULL;
+@@ -839,20 +922,58 @@ smb_set_file_info(struct inode *inode, const char *full_path,
+ 
+ 	/* if the file is already open for write, just use that fileid */
+ 	open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
++
+ 	if (open_file) {
+ 		fid.netfid = open_file->fid.netfid;
+ 		netpid = open_file->pid;
+ 		tcon = tlink_tcon(open_file->tlink);
+-		goto set_via_filehandle;
++	} else {
++		tlink = cifs_sb_tlink(cifs_sb);
++		if (IS_ERR(tlink)) {
++			rc = PTR_ERR(tlink);
++			tlink = NULL;
++			goto out;
++		}
++		tcon = tlink_tcon(tlink);
+ 	}
+ 
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink)) {
+-		rc = PTR_ERR(tlink);
+-		tlink = NULL;
+-		goto out;
++	/*
++	 * Non-NT servers interprets zero time value in SMB_SET_FILE_BASIC_INFO
++	 * over TRANS2_SET_FILE_INFORMATION as a valid time value. NT servers
++	 * interprets zero time value as do not change existing value on server.
++	 * API of ->set_file_info() callback expects that zero time value has
++	 * the NT meaning - do not change. Therefore if server is non-NT and
++	 * some time values in "buf" are zero, then fetch missing time values.
++	 */
++	if (!(tcon->ses->capabilities & CAP_NT_SMBS) &&
++	    (!buf->CreationTime || !buf->LastAccessTime ||
++	     !buf->LastWriteTime || !buf->ChangeTime)) {
++		rc = cifs_query_path_info(xid, tcon, cifs_sb, full_path, &query_data);
++		if (rc) {
++			if (open_file) {
++				cifsFileInfo_put(open_file);
++				open_file = NULL;
++			}
++			goto out;
++		}
++		/*
++		 * Original write_time from buf->LastWriteTime is preserved
++		 * as SMBSetInformation() interprets zero as do not change.
++		 */
++		new_buf = *buf;
++		buf = &new_buf;
++		if (!buf->CreationTime)
++			buf->CreationTime = query_data.fi.CreationTime;
++		if (!buf->LastAccessTime)
++			buf->LastAccessTime = query_data.fi.LastAccessTime;
++		if (!buf->LastWriteTime)
++			buf->LastWriteTime = query_data.fi.LastWriteTime;
++		if (!buf->ChangeTime)
++			buf->ChangeTime = query_data.fi.ChangeTime;
+ 	}
+-	tcon = tlink_tcon(tlink);
++
++	if (open_file)
++		goto set_via_filehandle;
+ 
+ 	rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls,
+ 				cifs_sb);
+@@ -873,8 +994,45 @@ smb_set_file_info(struct inode *inode, const char *full_path,
+ 		.fid = &fid,
+ 	};
+ 
+-	cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (S_ISDIR(inode->i_mode) && !(tcon->ses->capabilities & CAP_NT_SMBS)) {
++		/* Opening directory path is not possible on non-NT servers. */
++		rc = -EOPNOTSUPP;
++	} else {
++		/*
++		 * Use cifs_open_file() instead of CIFS_open() as the
++		 * cifs_open_file() selects the correct function which
++		 * works also on non-NT servers.
++		 */
++		rc = cifs_open_file(xid, &oparms, &oplock, NULL);
++		/*
++		 * Opening path for writing on non-NT servers is not
++		 * possible when the read-only attribute is already set.
++		 * Non-NT server in this case returns -EACCES. For those
++		 * servers the only possible way how to clear the read-only
++		 * bit is via SMB_COM_SETATTR command.
++		 */
++		if (rc == -EACCES &&
++		    (cinode->cifsAttrs & ATTR_READONLY) &&
++		     le32_to_cpu(buf->Attributes) != 0 && /* 0 = do not change attrs */
++		     !(le32_to_cpu(buf->Attributes) & ATTR_READONLY) &&
++		     !(tcon->ses->capabilities & CAP_NT_SMBS))
++			rc = -EOPNOTSUPP;
++	}
++
++	/* Fallback to SMB_COM_SETATTR command when absolutelty needed. */
++	if (rc == -EOPNOTSUPP) {
++		cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n");
++		rc = SMBSetInformation(xid, tcon, full_path,
++				       buf->Attributes != 0 ? buf->Attributes : cpu_to_le32(cinode->cifsAttrs),
++				       write_time,
++				       cifs_sb->local_nls, cifs_sb);
++		if (rc == 0)
++			cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
++		else
++			rc = -EACCES;
++		goto out;
++	}
++
+ 	if (rc != 0) {
+ 		if (rc == -EIO)
+ 			rc = -EINVAL;
+@@ -882,6 +1040,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
+ 	}
+ 
+ 	netpid = current->tgid;
++	cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for attrs/times not supported by this server\n");
+ 
+ set_via_filehandle:
+ 	rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid);
+@@ -892,6 +1051,21 @@ smb_set_file_info(struct inode *inode, const char *full_path,
+ 		CIFSSMBClose(xid, tcon, fid.netfid);
+ 	else
+ 		cifsFileInfo_put(open_file);
++
++	/*
++	 * Setting the read-only bit is not honered on non-NT servers when done
++	 * via open-semantics. So for setting it, use SMB_COM_SETATTR command.
++	 * This command works only after the file is closed, so use it only when
++	 * operation was called without the filehandle.
++	 */
++	if (open_file == NULL &&
++	    !(tcon->ses->capabilities & CAP_NT_SMBS) &&
++	    le32_to_cpu(buf->Attributes) & ATTR_READONLY) {
++		SMBSetInformation(xid, tcon, full_path,
++				  buf->Attributes,
++				  0 /* do not change write time */,
++				  cifs_sb->local_nls, cifs_sb);
++	}
+ out:
+ 	if (tlink != NULL)
+ 		cifs_put_tlink(tlink);
+diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
+index e836bc2193ddd3..b313c128ffbabf 100644
+--- a/fs/smb/client/smb2file.c
++++ b/fs/smb/client/smb2file.c
+@@ -107,16 +107,25 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32
+ 	int err_buftype = CIFS_NO_BUFFER;
+ 	struct cifs_fid *fid = oparms->fid;
+ 	struct network_resiliency_req nr_ioctl_req;
++	bool retry_without_read_attributes = false;
+ 
+ 	smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb);
+ 	if (smb2_path == NULL)
+ 		return -ENOMEM;
+ 
+-	oparms->desired_access |= FILE_READ_ATTRIBUTES;
++	if (!(oparms->desired_access & FILE_READ_ATTRIBUTES)) {
++		oparms->desired_access |= FILE_READ_ATTRIBUTES;
++		retry_without_read_attributes = true;
++	}
+ 	smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
+ 
+ 	rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
+ 		       &err_buftype);
++	if (rc == -EACCES && retry_without_read_attributes) {
++		oparms->desired_access &= ~FILE_READ_ATTRIBUTES;
++		rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
++			       &err_buftype);
++	}
+ 	if (rc && data) {
+ 		struct smb2_hdr *hdr = err_iov.iov_base;
+ 
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 590b70d71694be..74bcc51ccd32f8 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -464,12 +464,20 @@ smb2_negotiate(const unsigned int xid,
+ 	server->CurrentMid = 0;
+ 	spin_unlock(&server->mid_lock);
+ 	rc = SMB2_negotiate(xid, ses, server);
+-	/* BB we probably don't need to retry with modern servers */
+-	if (rc == -EAGAIN)
+-		rc = -EHOSTDOWN;
+ 	return rc;
+ }
+ 
++static inline unsigned int
++prevent_zero_iosize(unsigned int size, const char *type)
++{
++	if (size == 0) {
++		cifs_dbg(VFS, "SMB: Zero %ssize calculated, using minimum value %u\n",
++			 type, CIFS_MIN_DEFAULT_IOSIZE);
++		return CIFS_MIN_DEFAULT_IOSIZE;
++	}
++	return size;
++}
++
+ static unsigned int
+ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ {
+@@ -477,12 +485,12 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	unsigned int wsize;
+ 
+ 	/* start with specified wsize, or default */
+-	wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
++	wsize = ctx->got_wsize ? ctx->vol_wsize : CIFS_DEFAULT_IOSIZE;
+ 	wsize = min_t(unsigned int, wsize, server->max_write);
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ 		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+ 
+-	return wsize;
++	return prevent_zero_iosize(wsize, "w");
+ }
+ 
+ static unsigned int
+@@ -492,7 +500,7 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	unsigned int wsize;
+ 
+ 	/* start with specified wsize, or default */
+-	wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
++	wsize = ctx->got_wsize ? ctx->vol_wsize : SMB3_DEFAULT_IOSIZE;
+ 	wsize = min_t(unsigned int, wsize, server->max_write);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 	if (server->rdma) {
+@@ -514,7 +522,7 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ 		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+ 
+-	return wsize;
++	return prevent_zero_iosize(wsize, "w");
+ }
+ 
+ static unsigned int
+@@ -524,13 +532,13 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	unsigned int rsize;
+ 
+ 	/* start with specified rsize, or default */
+-	rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
++	rsize = ctx->got_rsize ? ctx->vol_rsize : CIFS_DEFAULT_IOSIZE;
+ 	rsize = min_t(unsigned int, rsize, server->max_read);
+ 
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ 		rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
+ 
+-	return rsize;
++	return prevent_zero_iosize(rsize, "r");
+ }
+ 
+ static unsigned int
+@@ -540,7 +548,7 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	unsigned int rsize;
+ 
+ 	/* start with specified rsize, or default */
+-	rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
++	rsize = ctx->got_rsize ? ctx->vol_rsize : SMB3_DEFAULT_IOSIZE;
+ 	rsize = min_t(unsigned int, rsize, server->max_read);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 	if (server->rdma) {
+@@ -563,7 +571,7 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ 		rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
+ 
+-	return rsize;
++	return prevent_zero_iosize(rsize, "r");
+ }
+ 
+ /*
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 91812150186c01..9f13a705f7f676 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -179,7 +179,7 @@ delete_mid(struct mid_q_entry *mid)
+  * Our basic "send data to server" function. Should be called with srv_mutex
+  * held. The caller is responsible for handling the results.
+  */
+-static int
++int
+ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
+ 	      size_t *sent)
+ {
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index 9f272cc8f5660d..0a4ca286f41690 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -95,6 +95,9 @@
+  */
+ #define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
+ 
++/* According to MS-SMB2 specification The minimum recommended value is 65536.*/
++#define CIFS_MIN_DEFAULT_IOSIZE (65536)
++
+ /*
+  * SMB2 Header Definition
+  *
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index f0760d786502fc..08d9a7cfba8cdc 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1448,7 +1448,7 @@ static int ntlm_authenticate(struct ksmbd_work *work,
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+ 	struct ksmbd_session *sess = work->sess;
+-	struct channel *chann = NULL;
++	struct channel *chann = NULL, *old;
+ 	struct ksmbd_user *user;
+ 	u64 prev_id;
+ 	int sz, rc;
+@@ -1560,7 +1560,12 @@ static int ntlm_authenticate(struct ksmbd_work *work,
+ 				return -ENOMEM;
+ 
+ 			chann->conn = conn;
+-			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP);
++			old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann,
++					KSMBD_DEFAULT_GFP);
++			if (xa_is_err(old)) {
++				kfree(chann);
++				return xa_err(old);
++			}
+ 		}
+ 	}
+ 
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index e059316be36fd0..59ae63ab868574 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -426,10 +426,15 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ 	ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
+ 		    *pos, count);
+ 
++	if (*pos >= XATTR_SIZE_MAX) {
++		pr_err("stream write position %lld is out of bounds\n",	*pos);
++		return -EINVAL;
++	}
++
+ 	size = *pos + count;
+ 	if (size > XATTR_SIZE_MAX) {
+ 		size = XATTR_SIZE_MAX;
+-		count = (*pos + count) - XATTR_SIZE_MAX;
++		count = XATTR_SIZE_MAX - *pos;
+ 	}
+ 
+ 	v_len = ksmbd_vfs_getcasexattr(idmap,
+@@ -443,13 +448,6 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ 		goto out;
+ 	}
+ 
+-	if (v_len <= *pos) {
+-		pr_err("stream write position %lld is out of bounds (stream length: %zd)\n",
+-				*pos, v_len);
+-		err = -EINVAL;
+-		goto out;
+-	}
+-
+ 	if (v_len < size) {
+ 		wbuf = kvzalloc(size, KSMBD_DEFAULT_GFP);
+ 		if (!wbuf) {
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index 2d5ea9f9ff43eb..6692253f0b5be5 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -132,6 +132,7 @@ struct ahash_request {
+  *	      This is a counterpart to @init_tfm, used to remove
+  *	      various changes set in @init_tfm.
+  * @clone_tfm: Copy transform into new object, may allocate memory.
++ * @reqsize: Size of the request context.
+  * @halg: see struct hash_alg_common
+  */
+ struct ahash_alg {
+@@ -148,6 +149,8 @@ struct ahash_alg {
+ 	void (*exit_tfm)(struct crypto_ahash *tfm);
+ 	int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
+ 
++	unsigned int reqsize;
++
+ 	struct hash_alg_common halg;
+ };
+ 
+diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
+index 31ca88deb10d26..1ded9a8d4e84d7 100644
+--- a/include/drm/drm_atomic.h
++++ b/include/drm/drm_atomic.h
+@@ -376,8 +376,27 @@ struct drm_atomic_state {
+ 	 *
+ 	 * Allow full modeset. This is used by the ATOMIC IOCTL handler to
+ 	 * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
+-	 * never consult this flag, instead looking at the output of
+-	 * drm_atomic_crtc_needs_modeset().
++	 * generally not consult this flag, but instead look at the output of
++	 * drm_atomic_crtc_needs_modeset(). The detailed rules are:
++	 *
++	 * - Drivers must not consult @allow_modeset in the atomic commit path.
++	 *   Use drm_atomic_crtc_needs_modeset() instead.
++	 *
++	 * - Drivers must consult @allow_modeset before adding unrelated struct
++	 *   drm_crtc_state to this commit by calling
++	 *   drm_atomic_get_crtc_state(). See also the warning in the
++	 *   documentation for that function.
++	 *
++	 * - Drivers must never change this flag, it is under the exclusive
++	 *   control of userspace.
++	 *
++	 * - Drivers may consult @allow_modeset in the atomic check path, if
++	 *   they have the choice between an optimal hardware configuration
++	 *   which requires a modeset, and a less optimal configuration which
++	 *   can be committed without a modeset. An example would be suboptimal
++	 *   scanout FIFO allocation resulting in increased idle power
++	 *   consumption. This allows userspace to avoid flickering and delays
++	 *   for the normal composition loop at reasonable cost.
+ 	 */
+ 	bool allow_modeset : 1;
+ 	/**
+diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
+index d8b86df2ec0dab..ff251745de18f3 100644
+--- a/include/drm/drm_gem.h
++++ b/include/drm/drm_gem.h
+@@ -35,6 +35,7 @@
+  */
+ 
+ #include <linux/kref.h>
++#include <linux/dma-buf.h>
+ #include <linux/dma-resv.h>
+ #include <linux/list.h>
+ #include <linux/mutex.h>
+@@ -570,6 +571,18 @@ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_obje
+ 	return (obj->handle_count > 1) || obj->dma_buf;
+ }
+ 
++/**
++ * drm_gem_is_imported() - Tests if GEM object's buffer has been imported
++ * @obj: the GEM object
++ *
++ * Returns:
++ * True if the GEM object's buffer has been imported, false otherwise
++ */
++static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
++{
++	return !!obj->import_attach;
++}
++
+ #ifdef CONFIG_LOCKDEP
+ /**
+  * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index ce91d9b2acb9f8..7e029c82ae45f0 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -111,6 +111,7 @@ struct bpf_prog_list {
+ 	struct bpf_prog *prog;
+ 	struct bpf_cgroup_link *link;
+ 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
++	u32 flags;
+ };
+ 
+ int cgroup_bpf_inherit(struct cgroup *cgrp);
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index 932139c5d46f5d..ffcd76d9777038 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -223,6 +223,8 @@ void __wait_on_buffer(struct buffer_head *);
+ wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
+ struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
+ 			unsigned size);
++struct buffer_head *__find_get_block_nonatomic(struct block_device *bdev,
++			sector_t block, unsigned size);
+ struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
+ 		unsigned size, gfp_t gfp);
+ void __brelse(struct buffer_head *);
+@@ -398,6 +400,12 @@ sb_find_get_block(struct super_block *sb, sector_t block)
+ 	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
+ }
+ 
++static inline struct buffer_head *
++sb_find_get_block_nonatomic(struct super_block *sb, sector_t block)
++{
++	return __find_get_block_nonatomic(sb->s_bdev, block, sb->s_blocksize);
++}
++
+ static inline void
+ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
+ {
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 667cb6db901934..39120b172992ed 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -26,9 +26,9 @@
+ #include <linux/atomic.h>
+ #include <linux/uidgid.h>
+ #include <linux/gfp.h>
+-#include <linux/overflow.h>
+ #include <linux/device/bus.h>
+ #include <linux/device/class.h>
++#include <linux/device/devres.h>
+ #include <linux/device/driver.h>
+ #include <linux/cleanup.h>
+ #include <asm/device.h>
+@@ -281,123 +281,6 @@ int __must_check device_create_bin_file(struct device *dev,
+ void device_remove_bin_file(struct device *dev,
+ 			    const struct bin_attribute *attr);
+ 
+-/* device resource management */
+-typedef void (*dr_release_t)(struct device *dev, void *res);
+-typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
+-
+-void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
+-			  int nid, const char *name) __malloc;
+-#define devres_alloc(release, size, gfp) \
+-	__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
+-#define devres_alloc_node(release, size, gfp, nid) \
+-	__devres_alloc_node(release, size, gfp, nid, #release)
+-
+-void devres_for_each_res(struct device *dev, dr_release_t release,
+-			 dr_match_t match, void *match_data,
+-			 void (*fn)(struct device *, void *, void *),
+-			 void *data);
+-void devres_free(void *res);
+-void devres_add(struct device *dev, void *res);
+-void *devres_find(struct device *dev, dr_release_t release,
+-		  dr_match_t match, void *match_data);
+-void *devres_get(struct device *dev, void *new_res,
+-		 dr_match_t match, void *match_data);
+-void *devres_remove(struct device *dev, dr_release_t release,
+-		    dr_match_t match, void *match_data);
+-int devres_destroy(struct device *dev, dr_release_t release,
+-		   dr_match_t match, void *match_data);
+-int devres_release(struct device *dev, dr_release_t release,
+-		   dr_match_t match, void *match_data);
+-
+-/* devres group */
+-void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
+-void devres_close_group(struct device *dev, void *id);
+-void devres_remove_group(struct device *dev, void *id);
+-int devres_release_group(struct device *dev, void *id);
+-
+-/* managed devm_k.alloc/kfree for device drivers */
+-void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __alloc_size(2);
+-void *devm_krealloc(struct device *dev, void *ptr, size_t size,
+-		    gfp_t gfp) __must_check __realloc_size(3);
+-__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
+-				     const char *fmt, va_list ap) __malloc;
+-__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
+-				    const char *fmt, ...) __malloc;
+-static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+-{
+-	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
+-}
+-static inline void *devm_kmalloc_array(struct device *dev,
+-				       size_t n, size_t size, gfp_t flags)
+-{
+-	size_t bytes;
+-
+-	if (unlikely(check_mul_overflow(n, size, &bytes)))
+-		return NULL;
+-
+-	return devm_kmalloc(dev, bytes, flags);
+-}
+-static inline void *devm_kcalloc(struct device *dev,
+-				 size_t n, size_t size, gfp_t flags)
+-{
+-	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+-}
+-static inline __realloc_size(3, 4) void * __must_check
+-devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
+-{
+-	size_t bytes;
+-
+-	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+-		return NULL;
+-
+-	return devm_krealloc(dev, p, bytes, flags);
+-}
+-
+-void devm_kfree(struct device *dev, const void *p);
+-char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
+-const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
+-void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
+-	__realloc_size(3);
+-
+-unsigned long devm_get_free_pages(struct device *dev,
+-				  gfp_t gfp_mask, unsigned int order);
+-void devm_free_pages(struct device *dev, unsigned long addr);
+-
+-#ifdef CONFIG_HAS_IOMEM
+-void __iomem *devm_ioremap_resource(struct device *dev,
+-				    const struct resource *res);
+-void __iomem *devm_ioremap_resource_wc(struct device *dev,
+-				       const struct resource *res);
+-
+-void __iomem *devm_of_iomap(struct device *dev,
+-			    struct device_node *node, int index,
+-			    resource_size_t *size);
+-#else
+-
+-static inline
+-void __iomem *devm_ioremap_resource(struct device *dev,
+-				    const struct resource *res)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+-static inline
+-void __iomem *devm_ioremap_resource_wc(struct device *dev,
+-				       const struct resource *res)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+-static inline
+-void __iomem *devm_of_iomap(struct device *dev,
+-			    struct device_node *node, int index,
+-			    resource_size_t *size)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+-#endif
+-
+ /* allows to add/remove a custom action to devres stack */
+ void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+ void devm_release_action(struct device *dev, void (*action)(void *), void *data);
+diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
+new file mode 100644
+index 00000000000000..9b49f991585086
+--- /dev/null
++++ b/include/linux/device/devres.h
+@@ -0,0 +1,129 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _DEVICE_DEVRES_H_
++#define _DEVICE_DEVRES_H_
++
++#include <linux/err.h>
++#include <linux/gfp_types.h>
++#include <linux/numa.h>
++#include <linux/overflow.h>
++#include <linux/stdarg.h>
++#include <linux/types.h>
++
++struct device;
++struct device_node;
++struct resource;
++
++/* device resource management */
++typedef void (*dr_release_t)(struct device *dev, void *res);
++typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
++
++void * __malloc
++__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name);
++#define devres_alloc(release, size, gfp) \
++	__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
++#define devres_alloc_node(release, size, gfp, nid) \
++	__devres_alloc_node(release, size, gfp, nid, #release)
++
++void devres_for_each_res(struct device *dev, dr_release_t release,
++			 dr_match_t match, void *match_data,
++			 void (*fn)(struct device *, void *, void *),
++			 void *data);
++void devres_free(void *res);
++void devres_add(struct device *dev, void *res);
++void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
++void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data);
++void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
++int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
++int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
++
++/* devres group */
++void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
++void devres_close_group(struct device *dev, void *id);
++void devres_remove_group(struct device *dev, void *id);
++int devres_release_group(struct device *dev, void *id);
++
++/* managed devm_k.alloc/kfree for device drivers */
++void * __alloc_size(2)
++devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
++void * __must_check __realloc_size(3)
++devm_krealloc(struct device *dev, void *ptr, size_t size, gfp_t gfp);
++static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
++{
++	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
++}
++static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags)
++{
++	size_t bytes;
++
++	if (unlikely(check_mul_overflow(n, size, &bytes)))
++		return NULL;
++
++	return devm_kmalloc(dev, bytes, flags);
++}
++static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags)
++{
++	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
++}
++static inline __realloc_size(3, 4) void * __must_check
++devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
++{
++	size_t bytes;
++
++	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
++		return NULL;
++
++	return devm_krealloc(dev, p, bytes, flags);
++}
++
++void devm_kfree(struct device *dev, const void *p);
++
++void * __realloc_size(3)
++devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
++static inline void *devm_kmemdup_array(struct device *dev, const void *src,
++				       size_t n, size_t size, gfp_t flags)
++{
++	return devm_kmemdup(dev, src, size_mul(size, n), flags);
++}
++
++char * __malloc
++devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
++const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
++char * __printf(3, 0) __malloc
++devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap);
++char * __printf(3, 4) __malloc
++devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
++
++unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order);
++void devm_free_pages(struct device *dev, unsigned long addr);
++
++#ifdef CONFIG_HAS_IOMEM
++
++void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res);
++void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res);
++
++void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
++			    resource_size_t *size);
++#else
++
++static inline
++void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res)
++{
++	return IOMEM_ERR_PTR(-EINVAL);
++}
++
++static inline
++void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res)
++{
++	return IOMEM_ERR_PTR(-EINVAL);
++}
++
++static inline
++void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
++			    resource_size_t *size)
++{
++	return IOMEM_ERR_PTR(-EINVAL);
++}
++
++#endif
++
++#endif /* _DEVICE_DEVRES_H_ */
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index 1524da363734af..22b9099927fad6 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -633,10 +633,14 @@ static inline int dma_mmap_wc(struct device *dev,
+ #else
+ #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
+ #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
+-#define dma_unmap_addr(PTR, ADDR_NAME)           (0)
+-#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
+-#define dma_unmap_len(PTR, LEN_NAME)             (0)
+-#define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
++#define dma_unmap_addr(PTR, ADDR_NAME)           \
++	({ typeof(PTR) __p __maybe_unused = PTR; 0; })
++#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  \
++	do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
++#define dma_unmap_len(PTR, LEN_NAME)             \
++	({ typeof(PTR) __p __maybe_unused = PTR; 0; })
++#define dma_unmap_len_set(PTR, LEN_NAME, VAL)    \
++	do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
+ #endif
+ 
+ #endif /* _LINUX_DMA_MAPPING_H */
+diff --git a/include/linux/err.h b/include/linux/err.h
+index a4dacd745fcf41..1d60aa86db53b5 100644
+--- a/include/linux/err.h
++++ b/include/linux/err.h
+@@ -44,6 +44,9 @@ static inline void * __must_check ERR_PTR(long error)
+ /* Return the pointer in the percpu address space. */
+ #define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
+ 
++/* Cast an error pointer to __iomem. */
++#define IOMEM_ERR_PTR(error) (__force void __iomem *)ERR_PTR(error)
++
+ /**
+  * PTR_ERR - Extract the error code from an error pointer.
+  * @ptr: An error pointer.
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 930a591b9b616c..3c3f6996f29fae 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -461,7 +461,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
+ 		const char *from = kmap_local_folio(folio, offset);
+ 		size_t chunk = len;
+ 
+-		if (folio_test_highmem(folio) &&
++		if (folio_test_partial_kmap(folio) &&
+ 		    chunk > PAGE_SIZE - offset_in_page(offset))
+ 			chunk = PAGE_SIZE - offset_in_page(offset);
+ 		memcpy(to, from, chunk);
+@@ -489,7 +489,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
+ 		char *to = kmap_local_folio(folio, offset);
+ 		size_t chunk = len;
+ 
+-		if (folio_test_highmem(folio) &&
++		if (folio_test_partial_kmap(folio) &&
+ 		    chunk > PAGE_SIZE - offset_in_page(offset))
+ 			chunk = PAGE_SIZE - offset_in_page(offset);
+ 		memcpy(to, from, chunk);
+@@ -522,7 +522,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio,
+ {
+ 	size_t len = folio_size(folio) - offset;
+ 
+-	if (folio_test_highmem(folio)) {
++	if (folio_test_partial_kmap(folio)) {
+ 		size_t max = PAGE_SIZE - offset_in_page(offset);
+ 
+ 		while (len > max) {
+@@ -560,7 +560,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset,
+ 
+ 	VM_BUG_ON(offset + len > folio_size(folio));
+ 
+-	if (folio_test_highmem(folio)) {
++	if (folio_test_partial_kmap(folio)) {
+ 		size_t max = PAGE_SIZE - offset_in_page(offset);
+ 
+ 		while (len > max) {
+@@ -597,7 +597,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
+ 	size_t offset = offset_in_folio(folio, pos);
+ 	char *from = kmap_local_folio(folio, offset);
+ 
+-	if (folio_test_highmem(folio)) {
++	if (folio_test_partial_kmap(folio)) {
+ 		offset = offset_in_page(offset);
+ 		len = min_t(size_t, len, PAGE_SIZE - offset);
+ 	} else
+diff --git a/include/linux/io.h b/include/linux/io.h
+index 59ec5eea696c4f..40cb2de73f5ece 100644
+--- a/include/linux/io.h
++++ b/include/linux/io.h
+@@ -65,8 +65,6 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+ }
+ #endif
+ 
+-#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
+-
+ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
+ 			   resource_size_t size);
+ void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index a6e2aadbb91bd3..5aeeed22f35bfc 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -207,6 +207,7 @@ struct inet6_cork {
+ 	struct ipv6_txoptions *opt;
+ 	u8 hop_limit;
+ 	u8 tclass;
++	u8 dontfrag:1;
+ };
+ 
+ /* struct ipv6_pinfo - ipv6 private area */
+diff --git a/include/linux/lzo.h b/include/linux/lzo.h
+index e95c7d1092b286..4d30e3624acd23 100644
+--- a/include/linux/lzo.h
++++ b/include/linux/lzo.h
+@@ -24,10 +24,18 @@
+ int lzo1x_1_compress(const unsigned char *src, size_t src_len,
+ 		     unsigned char *dst, size_t *dst_len, void *wrkmem);
+ 
++/* Same as above but does not write more than dst_len to dst. */
++int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len,
++			  unsigned char *dst, size_t *dst_len, void *wrkmem);
++
+ /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
+ int lzorle1x_1_compress(const unsigned char *src, size_t src_len,
+ 		     unsigned char *dst, size_t *dst_len, void *wrkmem);
+ 
++/* Same as above but does not write more than dst_len to dst. */
++int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len,
++			     unsigned char *dst, size_t *dst_len, void *wrkmem);
++
+ /* safe decompression with overrun testing */
+ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
+ 			  unsigned char *dst, size_t *dst_len);
+diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
+index f4dfc1871a95ba..d1c21ad6440d1a 100644
+--- a/include/linux/mfd/axp20x.h
++++ b/include/linux/mfd/axp20x.h
+@@ -135,6 +135,7 @@ enum axp20x_variants {
+ #define AXP717_IRQ2_STATE		0x4a
+ #define AXP717_IRQ3_STATE		0x4b
+ #define AXP717_IRQ4_STATE		0x4c
++#define AXP717_TS_PIN_CFG		0x50
+ #define AXP717_ICC_CHG_SET		0x62
+ #define AXP717_ITERM_CHG_SET		0x63
+ #define AXP717_CV_CHG_SET		0x64
+diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
+index 27f42f713c891c..86f0f2a25a3d63 100644
+--- a/include/linux/mlx4/device.h
++++ b/include/linux/mlx4/device.h
+@@ -1135,7 +1135,7 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ 		       struct mlx4_buf *buf);
+ 
+-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
++int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order);
+ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
+ 
+ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
+index df73a2ccc9af3d..67256e776566c6 100644
+--- a/include/linux/mlx5/eswitch.h
++++ b/include/linux/mlx5/eswitch.h
+@@ -147,6 +147,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ 
+ /* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
+ #define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
++#define ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK \
++	GENMASK(31 - ESW_RESERVED_BITS, ESW_ZONE_ID_BITS)
+ 
+ u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
+ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
+index b744e554f014d1..db5c9ddef17026 100644
+--- a/include/linux/mlx5/fs.h
++++ b/include/linux/mlx5/fs.h
+@@ -40,6 +40,8 @@
+ 
+ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+ 
++#define MLX5_FS_MAX_POOL_SIZE BIT(30)
++
+ enum mlx5_flow_destination_type {
+ 	MLX5_FLOW_DESTINATION_TYPE_NONE,
+ 	MLX5_FLOW_DESTINATION_TYPE_VPORT,
+diff --git a/include/linux/mman.h b/include/linux/mman.h
+index a842783ffa62bd..03a91024622258 100644
+--- a/include/linux/mman.h
++++ b/include/linux/mman.h
+@@ -157,7 +157,9 @@ calc_vm_flag_bits(struct file *file, unsigned long flags)
+ 	return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
+ 	       _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
+ 	       _calc_vm_trans(flags, MAP_SYNC,	     VM_SYNC      ) |
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ 	       _calc_vm_trans(flags, MAP_STACK,	     VM_NOHUGEPAGE) |
++#endif
+ 	       arch_calc_vm_flag_bits(file, flags);
+ }
+ 
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index 59a421fc42bf07..63d0e51f7a8015 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -165,6 +165,10 @@ struct msi_desc_data {
+  * @dev:	Pointer to the device which uses this descriptor
+  * @msg:	The last set MSI message cached for reuse
+  * @affinity:	Optional pointer to a cpu affinity mask for this descriptor
++ * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr.
++ *                  Only used if iommu_msi_shift != 0
++ * @iommu_msi_shift: Indicates how many bits of the original address should be
++ *                   preserved when using iommu_msi_iova.
+  * @sysfs_attr:	Pointer to sysfs device attribute
+  *
+  * @write_msi_msg:	Callback that may be called when the MSI message
+@@ -183,7 +187,8 @@ struct msi_desc {
+ 	struct msi_msg			msg;
+ 	struct irq_affinity_desc	*affinity;
+ #ifdef CONFIG_IRQ_MSI_IOMMU
+-	const void			*iommu_cookie;
++	u64				iommu_msi_iova : 58;
++	u64				iommu_msi_shift : 6;
+ #endif
+ #ifdef CONFIG_SYSFS
+ 	struct device_attribute		*sysfs_attrs;
+@@ -284,28 +289,14 @@ struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
+ 
+ #define msi_desc_to_dev(desc)		((desc)->dev)
+ 
+-#ifdef CONFIG_IRQ_MSI_IOMMU
+-static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+-{
+-	return desc->iommu_cookie;
+-}
+-
+-static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
+-					     const void *iommu_cookie)
+-{
+-	desc->iommu_cookie = iommu_cookie;
+-}
+-#else
+-static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
++static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova,
++					       unsigned int msi_shift)
+ {
+-	return NULL;
+-}
+-
+-static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
+-					     const void *iommu_cookie)
+-{
+-}
++#ifdef CONFIG_IRQ_MSI_IOMMU
++	desc->iommu_msi_iova = msi_iova >> msi_shift;
++	desc->iommu_msi_shift = msi_shift;
+ #endif
++}
+ 
+ int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
+ 			       struct msi_desc *init_desc);
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index a9244291f5067a..4e8c6d0511c56c 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -573,6 +573,13 @@ FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
+ PAGEFLAG_FALSE(HighMem, highmem)
+ #endif
+ 
++/* Does kmap_local_folio() only allow access to one page of the folio? */
++#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
++#define folio_test_partial_kmap(f)	true
++#else
++#define folio_test_partial_kmap(f)	folio_test_highmem(f)
++#endif
++
+ #ifdef CONFIG_SWAP
+ static __always_inline bool folio_test_swapcache(const struct folio *folio)
+ {
+diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
+index 0e8b74e63767a6..75c6c86cf09dcb 100644
+--- a/include/linux/pci-ats.h
++++ b/include/linux/pci-ats.h
+@@ -42,6 +42,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features);
+ void pci_disable_pasid(struct pci_dev *pdev);
+ int pci_pasid_features(struct pci_dev *pdev);
+ int pci_max_pasids(struct pci_dev *pdev);
++int pci_pasid_status(struct pci_dev *pdev);
+ #else /* CONFIG_PCI_PASID */
+ static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
+ { return -EINVAL; }
+@@ -50,6 +51,8 @@ static inline int pci_pasid_features(struct pci_dev *pdev)
+ { return -EINVAL; }
+ static inline int pci_max_pasids(struct pci_dev *pdev)
+ { return -EINVAL; }
++static inline int pci_pasid_status(struct pci_dev *pdev)
++{ return -EINVAL; }
+ #endif /* CONFIG_PCI_PASID */
+ 
+ #endif /* LINUX_PCI_ATS_H */
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 0997077bcc52ad..ce64b4b937f068 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1063,7 +1063,13 @@ struct perf_output_handle {
+ 	struct perf_buffer		*rb;
+ 	unsigned long			wakeup;
+ 	unsigned long			size;
+-	u64				aux_flags;
++	union {
++		u64			flags;		/* perf_output*() */
++		u64			aux_flags;	/* perf_aux_output*() */
++		struct {
++			u64		skip_read : 1;
++		};
++	};
+ 	union {
+ 		void			*addr;
+ 		unsigned long		head;
+diff --git a/include/linux/pnp.h b/include/linux/pnp.h
+index b7a7158aaf65e3..23fe3eaf242d63 100644
+--- a/include/linux/pnp.h
++++ b/include/linux/pnp.h
+@@ -290,7 +290,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
+ }
+ 
+ struct pnp_fixup {
+-	char id[7];
++	char id[8];
+ 	void (*quirk_function) (struct pnp_dev *dev);	/* fixup function */
+ };
+ 
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index bd69ddc102fbc5..0844ab3288519a 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -95,9 +95,9 @@ static inline void __rcu_read_lock(void)
+ 
+ static inline void __rcu_read_unlock(void)
+ {
+-	preempt_enable();
+ 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
+ 		rcu_read_unlock_strict();
++	preempt_enable();
+ }
+ 
+ static inline int rcu_preempt_depth(void)
+diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
+index 90a684f94776ed..ae8b5cb475a362 100644
+--- a/include/linux/rcutree.h
++++ b/include/linux/rcutree.h
+@@ -104,7 +104,7 @@ extern int rcu_scheduler_active;
+ void rcu_end_inkernel_boot(void);
+ bool rcu_inkernel_boot_has_ended(void);
+ bool rcu_is_watching(void);
+-#ifndef CONFIG_PREEMPTION
++#ifndef CONFIG_PREEMPT_RCU
+ void rcu_all_qs(void);
+ #endif
+ 
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 4b95663163e0b2..71ad766932d315 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -247,10 +247,7 @@ struct spi_device {
+ static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
+ 	      "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
+ 
+-static inline struct spi_device *to_spi_device(const struct device *dev)
+-{
+-	return dev ? container_of(dev, struct spi_device, dev) : NULL;
+-}
++#define to_spi_device(__dev)	container_of_const(__dev, struct spi_device, dev)
+ 
+ /* Most drivers won't need to care about device refcounting */
+ static inline struct spi_device *spi_dev_get(struct spi_device *spi)
+diff --git a/include/linux/trace.h b/include/linux/trace.h
+index fdcd76b7be83d7..7eaad857dee04f 100644
+--- a/include/linux/trace.h
++++ b/include/linux/trace.h
+@@ -72,8 +72,8 @@ static inline int unregister_ftrace_export(struct trace_export *export)
+ static inline void trace_printk_init_buffers(void)
+ {
+ }
+-static inline int trace_array_printk(struct trace_array *tr, unsigned long ip,
+-				     const char *fmt, ...)
++static inline __printf(3, 4)
++int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...)
+ {
+ 	return 0;
+ }
+diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
+index 1ef95c0287f05d..a93ed5ac322656 100644
+--- a/include/linux/trace_seq.h
++++ b/include/linux/trace_seq.h
+@@ -88,8 +88,8 @@ extern __printf(2, 3)
+ void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
+ extern __printf(2, 0)
+ void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
+-extern void
+-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
++extern __printf(2, 0)
++void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+ extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
+ extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+ 			     int cnt);
+@@ -113,8 +113,8 @@ static inline __printf(2, 3)
+ void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+ {
+ }
+-static inline void
+-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
++static inline __printf(2, 0)
++void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+ {
+ }
+ 
+diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
+index 33a4c146dc19c4..2ca60828f28bb6 100644
+--- a/include/linux/usb/r8152.h
++++ b/include/linux/usb/r8152.h
+@@ -30,6 +30,7 @@
+ #define VENDOR_ID_NVIDIA		0x0955
+ #define VENDOR_ID_TPLINK		0x2357
+ #define VENDOR_ID_DLINK			0x2001
++#define VENDOR_ID_DELL			0x413c
+ #define VENDOR_ID_ASUS			0x0b05
+ 
+ #if IS_REACHABLE(CONFIG_USB_RTL8152)
+diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
+index 8daa0929865cfa..43343f1586d138 100644
+--- a/include/media/v4l2-subdev.h
++++ b/include/media/v4l2-subdev.h
+@@ -821,7 +821,9 @@ struct v4l2_subdev_state {
+  *		     possible configuration from the remote end, likely calling
+  *		     this operation as close as possible to stream on time. The
+  *		     operation shall fail if the pad index it has been called on
+- *		     is not valid or in case of unrecoverable failures.
++ *		     is not valid or in case of unrecoverable failures. The
++ *		     config argument has been memset to 0 just before calling
++ *		     the op.
+  *
+  * @set_routing: Enable or disable data connection routes described in the
+  *		 subdevice routing table. Subdevs that implement this operation
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 941dc62f3027cc..8a712ca73f2b07 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -127,6 +127,8 @@ struct wiphy;
+  *	even if it is otherwise disabled.
+  * @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation
+  *	with very low power (VLP), even if otherwise set to NO_IR.
++ * @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel,
++ *	even if otherwise set to NO_IR.
+  */
+ enum ieee80211_channel_flags {
+ 	IEEE80211_CHAN_DISABLED			= BIT(0),
+@@ -155,6 +157,7 @@ enum ieee80211_channel_flags {
+ 	IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT	= BIT(23),
+ 	IEEE80211_CHAN_CAN_MONITOR		= BIT(24),
+ 	IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP	= BIT(25),
++	IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY     = BIT(26),
+ };
+ 
+ #define IEEE80211_CHAN_NO_HT40 \
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 3b964f8834e719..fee854892bec58 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -7,7 +7,7 @@
+  * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2024 Intel Corporation
++ * Copyright (C) 2018 - 2025 Intel Corporation
+  */
+ 
+ #ifndef MAC80211_H
+@@ -3803,7 +3803,7 @@ enum ieee80211_reconfig_type {
+  * @was_assoc: set if this call is due to deauth/disassoc
+  *	while just having been associated
+  * @link_id: the link id on which the frame will be TX'ed.
+- *	Only used with the mgd_prepare_tx() method.
++ *	0 for a non-MLO connection.
+  */
+ struct ieee80211_prep_tx_info {
+ 	u16 duration;
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 83e9ef25b8d0d4..1484dd15a36941 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -233,7 +233,6 @@ struct xfrm_state {
+ 
+ 	/* Data for encapsulator */
+ 	struct xfrm_encap_tmpl	*encap;
+-	struct sock __rcu	*encap_sk;
+ 
+ 	/* NAT keepalive */
+ 	u32			nat_keepalive_interval; /* seconds */
+diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
+index fe05121169589f..555ea3d142a46b 100644
+--- a/include/rdma/uverbs_std_types.h
++++ b/include/rdma/uverbs_std_types.h
+@@ -34,7 +34,7 @@
+ static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
+ {
+ 	if (IS_ERR(uobj))
+-		return NULL;
++		return ERR_CAST(uobj);
+ 	return uobj->object;
+ }
+ #define uobj_get_obj_read(_object, _type, _id, _attrs)                         \
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index 575e55aa08ca93..c1fe6290d04dcb 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -195,6 +195,7 @@ struct hda_codec {
+ 	/* beep device */
+ 	struct hda_beep *beep;
+ 	unsigned int beep_mode;
++	bool beep_just_power_on;
+ 
+ 	/* widget capabilities cache */
+ 	u32 *wcaps;
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h
+index 0bf7d25434d7f8..d9baf24b8cebfd 100644
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -1428,6 +1428,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
+ #define snd_pcm_lib_mmap_iomem	NULL
+ #endif
+ 
++void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime);
++
+ /**
+  * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
+  * @dma: DMA number
+diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
+index af6b3827fb1d01..3b16b0cc1b7a61 100644
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -1924,7 +1924,7 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref,
+ 	TP_PROTO(const struct btrfs_fs_info *fs_info,
+ 		 const struct prelim_ref *oldref,
+ 		 const struct prelim_ref *newref, u64 tree_size),
+-	TP_ARGS(fs_info, newref, oldref, tree_size),
++	TP_ARGS(fs_info, oldref, newref, tree_size),
+ 
+ 	TP_STRUCT__entry_btrfs(
+ 		__field(	u64,  root_id		)
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 4a939c90dc2e4b..552fd633f8200d 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -1206,6 +1206,7 @@ enum bpf_perf_event_type {
+ #define BPF_F_BEFORE		(1U << 3)
+ #define BPF_F_AFTER		(1U << 4)
+ #define BPF_F_ID		(1U << 5)
++#define BPF_F_PREORDER		(1U << 6)
+ #define BPF_F_LINK		BPF_F_LINK /* 1 << 13 */
+ 
+ /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
+diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
+index 8c4470742dcd99..41048271a0667c 100644
+--- a/include/uapi/linux/iommufd.h
++++ b/include/uapi/linux/iommufd.h
+@@ -504,9 +504,17 @@ enum iommu_hw_info_type {
+  *                                   IOMMU_HWPT_GET_DIRTY_BITMAP
+  *                                   IOMMU_HWPT_SET_DIRTY_TRACKING
+  *
++ * @IOMMU_HW_CAP_PCI_PASID_EXEC: Execute Permission Supported, user ignores it
++ *                               when the struct
++ *                               iommu_hw_info::out_max_pasid_log2 is zero.
++ * @IOMMU_HW_CAP_PCI_PASID_PRIV: Privileged Mode Supported, user ignores it
++ *                               when the struct
++ *                               iommu_hw_info::out_max_pasid_log2 is zero.
+  */
+ enum iommufd_hw_capabilities {
+ 	IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
++	IOMMU_HW_CAP_PCI_PASID_EXEC = 1 << 1,
++	IOMMU_HW_CAP_PCI_PASID_PRIV = 1 << 2,
+ };
+ 
+ /**
+@@ -522,6 +530,9 @@ enum iommufd_hw_capabilities {
+  *                 iommu_hw_info_type.
+  * @out_capabilities: Output the generic iommu capability info type as defined
+  *                    in the enum iommu_hw_capabilities.
++ * @out_max_pasid_log2: Output the width of PASIDs. 0 means no PASID support.
++ *                      PCI devices turn to out_capabilities to check if the
++ *                      specific capabilities is supported or not.
+  * @__reserved: Must be 0
+  *
+  * Query an iommu type specific hardware information data from an iommu behind
+@@ -545,7 +556,8 @@ struct iommu_hw_info {
+ 	__u32 data_len;
+ 	__aligned_u64 data_uptr;
+ 	__u32 out_data_type;
+-	__u32 __reserved;
++	__u8 out_max_pasid_log2;
++	__u8 __reserved[3];
+ 	__aligned_u64 out_capabilities;
+ };
+ #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
+diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
+index f97f5adc8d5186..c2d7faf8d87fa3 100644
+--- a/include/uapi/linux/nl80211.h
++++ b/include/uapi/linux/nl80211.h
+@@ -4294,6 +4294,8 @@ enum nl80211_wmm_rule {
+  *	otherwise completely disabled.
+  * @NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP: This channel can be used for a
+  *	very low power (VLP) AP, despite being NO_IR.
++ * @NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY: This channel can be active in
++ *	20 MHz bandwidth, despite being NO_IR.
+  * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
+  *	currently defined
+  * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
+@@ -4338,6 +4340,7 @@ enum nl80211_frequency_attr {
+ 	NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT,
+ 	NL80211_FREQUENCY_ATTR_CAN_MONITOR,
+ 	NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP,
++	NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY,
+ 
+ 	/* keep last */
+ 	__NL80211_FREQUENCY_ATTR_AFTER_LAST,
+@@ -4549,31 +4552,34 @@ enum nl80211_sched_scan_match_attr {
+  * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed
+  * @NL80211_RRF_ALLOW_6GHZ_VLP_AP: Very low power (VLP) AP can be permitted
+  *	despite NO_IR configuration.
++ * @NL80211_RRF_ALLOW_20MHZ_ACTIVITY: Allow activity in 20 MHz bandwidth,
++ *	despite NO_IR configuration.
+  */
+ enum nl80211_reg_rule_flags {
+-	NL80211_RRF_NO_OFDM		= 1<<0,
+-	NL80211_RRF_NO_CCK		= 1<<1,
+-	NL80211_RRF_NO_INDOOR		= 1<<2,
+-	NL80211_RRF_NO_OUTDOOR		= 1<<3,
+-	NL80211_RRF_DFS			= 1<<4,
+-	NL80211_RRF_PTP_ONLY		= 1<<5,
+-	NL80211_RRF_PTMP_ONLY		= 1<<6,
+-	NL80211_RRF_NO_IR		= 1<<7,
+-	__NL80211_RRF_NO_IBSS		= 1<<8,
+-	NL80211_RRF_AUTO_BW		= 1<<11,
+-	NL80211_RRF_IR_CONCURRENT	= 1<<12,
+-	NL80211_RRF_NO_HT40MINUS	= 1<<13,
+-	NL80211_RRF_NO_HT40PLUS		= 1<<14,
+-	NL80211_RRF_NO_80MHZ		= 1<<15,
+-	NL80211_RRF_NO_160MHZ		= 1<<16,
+-	NL80211_RRF_NO_HE		= 1<<17,
+-	NL80211_RRF_NO_320MHZ		= 1<<18,
+-	NL80211_RRF_NO_EHT		= 1<<19,
+-	NL80211_RRF_PSD			= 1<<20,
+-	NL80211_RRF_DFS_CONCURRENT	= 1<<21,
+-	NL80211_RRF_NO_6GHZ_VLP_CLIENT	= 1<<22,
+-	NL80211_RRF_NO_6GHZ_AFC_CLIENT	= 1<<23,
+-	NL80211_RRF_ALLOW_6GHZ_VLP_AP	= 1<<24,
++	NL80211_RRF_NO_OFDM                 = 1 << 0,
++	NL80211_RRF_NO_CCK                  = 1 << 1,
++	NL80211_RRF_NO_INDOOR               = 1 << 2,
++	NL80211_RRF_NO_OUTDOOR              = 1 << 3,
++	NL80211_RRF_DFS                     = 1 << 4,
++	NL80211_RRF_PTP_ONLY                = 1 << 5,
++	NL80211_RRF_PTMP_ONLY               = 1 << 6,
++	NL80211_RRF_NO_IR                   = 1 << 7,
++	__NL80211_RRF_NO_IBSS               = 1 << 8,
++	NL80211_RRF_AUTO_BW                 = 1 << 11,
++	NL80211_RRF_IR_CONCURRENT           = 1 << 12,
++	NL80211_RRF_NO_HT40MINUS            = 1 << 13,
++	NL80211_RRF_NO_HT40PLUS             = 1 << 14,
++	NL80211_RRF_NO_80MHZ                = 1 << 15,
++	NL80211_RRF_NO_160MHZ               = 1 << 16,
++	NL80211_RRF_NO_HE                   = 1 << 17,
++	NL80211_RRF_NO_320MHZ               = 1 << 18,
++	NL80211_RRF_NO_EHT                  = 1 << 19,
++	NL80211_RRF_PSD                     = 1 << 20,
++	NL80211_RRF_DFS_CONCURRENT          = 1 << 21,
++	NL80211_RRF_NO_6GHZ_VLP_CLIENT      = 1 << 22,
++	NL80211_RRF_NO_6GHZ_AFC_CLIENT      = 1 << 23,
++	NL80211_RRF_ALLOW_6GHZ_VLP_AP       = 1 << 24,
++	NL80211_RRF_ALLOW_20MHZ_ACTIVITY    = 1 << 25,
+ };
+ 
+ #define NL80211_RRF_PASSIVE_SCAN	NL80211_RRF_NO_IR
+diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
+index 41ff44dfa1db3f..f52de5ed1b3b6e 100644
+--- a/include/ufs/ufs_quirks.h
++++ b/include/ufs/ufs_quirks.h
+@@ -107,4 +107,10 @@ struct ufs_dev_quirk {
+  */
+ #define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM        (1 << 11)
+ 
++/*
++ * Some ufs devices may need more time to be in hibern8 before exiting.
++ * Enable this quirk to give it an additional 100us.
++ */
++#define UFS_DEVICE_QUIRK_PA_HIBER8TIME          (1 << 12)
++
+ #endif /* UFS_QUIRKS_H_ */
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index 6b1247664b3555..ecdbe473a49f7a 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -83,11 +83,11 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
+ 	seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
+ 	seq_printf(m, "SqHead:\t%u\n", sq_head);
+ 	seq_printf(m, "SqTail:\t%u\n", sq_tail);
+-	seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
++	seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head));
+ 	seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
+ 	seq_printf(m, "CqHead:\t%u\n", cq_head);
+ 	seq_printf(m, "CqTail:\t%u\n", cq_tail);
+-	seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
++	seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail));
+ 	seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
+ 	sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
+ 	for (i = 0; i < sq_entries; i++) {
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 8ef0603c07f110..bd3b3f7a6f6cab 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -630,6 +630,7 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
+ 		 * to care for a non-real case.
+ 		 */
+ 		if (need_resched()) {
++			ctx->cqe_sentinel = ctx->cqe_cached;
+ 			io_cq_unlock_post(ctx);
+ 			mutex_unlock(&ctx->uring_lock);
+ 			cond_resched();
+@@ -874,10 +875,15 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
+ 	lockdep_assert(!io_wq_current_is_worker());
+ 	lockdep_assert_held(&ctx->uring_lock);
+ 
+-	__io_cq_lock(ctx);
+-	posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
++	if (!ctx->lockless_cq) {
++		spin_lock(&ctx->completion_lock);
++		posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
++		spin_unlock(&ctx->completion_lock);
++	} else {
++		posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
++	}
++
+ 	ctx->submit_state.cq_flush = true;
+-	__io_cq_unlock_post(ctx);
+ 	return posted;
+ }
+ 
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index 7fd9badcfaf811..35b1b585e9cbe6 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -94,6 +94,7 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 		kmem_cache_free(req_cachep, req);
+ 		return -EOWNERDEAD;
+ 	}
++	req->opcode = IORING_OP_NOP;
+ 	req->cqe.user_data = user_data;
+ 	io_req_set_res(req, res, cflags);
+ 	percpu_ref_get(&ctx->refs);
+diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
+index b70d0eef8a284d..477947456371a2 100644
+--- a/kernel/bpf/bpf_struct_ops.c
++++ b/kernel/bpf/bpf_struct_ops.c
+@@ -147,39 +147,6 @@ void bpf_struct_ops_image_free(void *image)
+ }
+ 
+ #define MAYBE_NULL_SUFFIX "__nullable"
+-#define MAX_STUB_NAME 128
+-
+-/* Return the type info of a stub function, if it exists.
+- *
+- * The name of a stub function is made up of the name of the struct_ops and
+- * the name of the function pointer member, separated by "__". For example,
+- * if the struct_ops type is named "foo_ops" and the function pointer
+- * member is named "bar", the stub function name would be "foo_ops__bar".
+- */
+-static const struct btf_type *
+-find_stub_func_proto(const struct btf *btf, const char *st_op_name,
+-		     const char *member_name)
+-{
+-	char stub_func_name[MAX_STUB_NAME];
+-	const struct btf_type *func_type;
+-	s32 btf_id;
+-	int cp;
+-
+-	cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s",
+-		      st_op_name, member_name);
+-	if (cp >= MAX_STUB_NAME) {
+-		pr_warn("Stub function name too long\n");
+-		return NULL;
+-	}
+-	btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC);
+-	if (btf_id < 0)
+-		return NULL;
+-	func_type = btf_type_by_id(btf, btf_id);
+-	if (!func_type)
+-		return NULL;
+-
+-	return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */
+-}
+ 
+ /* Prepare argument info for every nullable argument of a member of a
+  * struct_ops type.
+@@ -204,27 +171,42 @@ find_stub_func_proto(const struct btf *btf, const char *st_op_name,
+ static int prepare_arg_info(struct btf *btf,
+ 			    const char *st_ops_name,
+ 			    const char *member_name,
+-			    const struct btf_type *func_proto,
++			    const struct btf_type *func_proto, void *stub_func_addr,
+ 			    struct bpf_struct_ops_arg_info *arg_info)
+ {
+ 	const struct btf_type *stub_func_proto, *pointed_type;
+ 	const struct btf_param *stub_args, *args;
+ 	struct bpf_ctx_arg_aux *info, *info_buf;
+ 	u32 nargs, arg_no, info_cnt = 0;
++	char ksym[KSYM_SYMBOL_LEN];
++	const char *stub_fname;
++	s32 stub_func_id;
+ 	u32 arg_btf_id;
+ 	int offset;
+ 
+-	stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name);
+-	if (!stub_func_proto)
+-		return 0;
++	stub_fname = kallsyms_lookup((unsigned long)stub_func_addr, NULL, NULL, NULL, ksym);
++	if (!stub_fname) {
++		pr_warn("Cannot find the stub function name for the %s in struct %s\n",
++			member_name, st_ops_name);
++		return -ENOENT;
++	}
++
++	stub_func_id = btf_find_by_name_kind(btf, stub_fname, BTF_KIND_FUNC);
++	if (stub_func_id < 0) {
++		pr_warn("Cannot find the stub function %s in btf\n", stub_fname);
++		return -ENOENT;
++	}
++
++	stub_func_proto = btf_type_by_id(btf, stub_func_id);
++	stub_func_proto = btf_type_by_id(btf, stub_func_proto->type);
+ 
+ 	/* Check if the number of arguments of the stub function is the same
+ 	 * as the number of arguments of the function pointer.
+ 	 */
+ 	nargs = btf_type_vlen(func_proto);
+ 	if (nargs != btf_type_vlen(stub_func_proto)) {
+-		pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n",
+-			st_ops_name, member_name, member_name, st_ops_name);
++		pr_warn("the number of arguments of the stub function %s does not match the number of arguments of the member %s of struct %s\n",
++			stub_fname, member_name, st_ops_name);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -254,21 +236,21 @@ static int prepare_arg_info(struct btf *btf,
+ 						    &arg_btf_id);
+ 		if (!pointed_type ||
+ 		    !btf_type_is_struct(pointed_type)) {
+-			pr_warn("stub function %s__%s has %s tagging to an unsupported type\n",
+-				st_ops_name, member_name, MAYBE_NULL_SUFFIX);
++			pr_warn("stub function %s has %s tagging to an unsupported type\n",
++				stub_fname, MAYBE_NULL_SUFFIX);
+ 			goto err_out;
+ 		}
+ 
+ 		offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
+ 		if (offset < 0) {
+-			pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n",
+-				st_ops_name, member_name, arg_no);
++			pr_warn("stub function %s has an invalid trampoline ctx offset for arg#%u\n",
++				stub_fname, arg_no);
+ 			goto err_out;
+ 		}
+ 
+ 		if (args[arg_no].type != stub_args[arg_no].type) {
+-			pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n",
+-				arg_no, st_ops_name, member_name);
++			pr_warn("arg#%u type in stub function %s does not match with its original func_proto\n",
++				arg_no, stub_fname);
+ 			goto err_out;
+ 		}
+ 
+@@ -325,6 +307,13 @@ static bool is_module_member(const struct btf *btf, u32 id)
+ 	return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
+ }
+ 
++int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
++{
++	void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
++
++	return func_ptr ? 0 : -ENOTSUPP;
++}
++
+ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ 			     struct btf *btf,
+ 			     struct bpf_verifier_log *log)
+@@ -388,7 +377,10 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ 
+ 	for_each_member(i, t, member) {
+ 		const struct btf_type *func_proto;
++		void **stub_func_addr;
++		u32 moff;
+ 
++		moff = __btf_member_bit_offset(t, member) / 8;
+ 		mname = btf_name_by_offset(btf, member->name_off);
+ 		if (!*mname) {
+ 			pr_warn("anon member in struct %s is not supported\n",
+@@ -414,7 +406,11 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ 		func_proto = btf_type_resolve_func_ptr(btf,
+ 						       member->type,
+ 						       NULL);
+-		if (!func_proto)
++
++		/* The member is not a function pointer or
++		 * the function pointer is not supported.
++		 */
++		if (!func_proto || bpf_struct_ops_supported(st_ops, moff))
+ 			continue;
+ 
+ 		if (btf_distill_func_proto(log, btf,
+@@ -426,8 +422,9 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ 			goto errout;
+ 		}
+ 
++		stub_func_addr = *(void **)(st_ops->cfi_stubs + moff);
+ 		err = prepare_arg_info(btf, st_ops->name, mname,
+-				       func_proto,
++				       func_proto, stub_func_addr,
+ 				       arg_info + i);
+ 		if (err)
+ 			goto errout;
+@@ -1153,13 +1150,6 @@ void bpf_struct_ops_put(const void *kdata)
+ 	bpf_map_put(&st_map->map);
+ }
+ 
+-int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+-{
+-	void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
+-
+-	return func_ptr ? 0 : -ENOTSUPP;
+-}
+-
+ static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
+ {
+ 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index 025d7e2214aeb4..c0d606c40195d4 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -369,7 +369,7 @@ static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
+ /* count number of elements in the list.
+  * it's slow but the list cannot be long
+  */
+-static u32 prog_list_length(struct hlist_head *head)
++static u32 prog_list_length(struct hlist_head *head, int *preorder_cnt)
+ {
+ 	struct bpf_prog_list *pl;
+ 	u32 cnt = 0;
+@@ -377,6 +377,8 @@ static u32 prog_list_length(struct hlist_head *head)
+ 	hlist_for_each_entry(pl, head, node) {
+ 		if (!prog_list_prog(pl))
+ 			continue;
++		if (preorder_cnt && (pl->flags & BPF_F_PREORDER))
++			(*preorder_cnt)++;
+ 		cnt++;
+ 	}
+ 	return cnt;
+@@ -400,7 +402,7 @@ static bool hierarchy_allows_attach(struct cgroup *cgrp,
+ 
+ 		if (flags & BPF_F_ALLOW_MULTI)
+ 			return true;
+-		cnt = prog_list_length(&p->bpf.progs[atype]);
++		cnt = prog_list_length(&p->bpf.progs[atype], NULL);
+ 		WARN_ON_ONCE(cnt > 1);
+ 		if (cnt == 1)
+ 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
+@@ -423,12 +425,12 @@ static int compute_effective_progs(struct cgroup *cgrp,
+ 	struct bpf_prog_array *progs;
+ 	struct bpf_prog_list *pl;
+ 	struct cgroup *p = cgrp;
+-	int cnt = 0;
++	int i, j, cnt = 0, preorder_cnt = 0, fstart, bstart, init_bstart;
+ 
+ 	/* count number of effective programs by walking parents */
+ 	do {
+ 		if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
+-			cnt += prog_list_length(&p->bpf.progs[atype]);
++			cnt += prog_list_length(&p->bpf.progs[atype], &preorder_cnt);
+ 		p = cgroup_parent(p);
+ 	} while (p);
+ 
+@@ -439,20 +441,34 @@ static int compute_effective_progs(struct cgroup *cgrp,
+ 	/* populate the array with effective progs */
+ 	cnt = 0;
+ 	p = cgrp;
++	fstart = preorder_cnt;
++	bstart = preorder_cnt - 1;
+ 	do {
+ 		if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
+ 			continue;
+ 
++		init_bstart = bstart;
+ 		hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
+ 			if (!prog_list_prog(pl))
+ 				continue;
+ 
+-			item = &progs->items[cnt];
++			if (pl->flags & BPF_F_PREORDER) {
++				item = &progs->items[bstart];
++				bstart--;
++			} else {
++				item = &progs->items[fstart];
++				fstart++;
++			}
+ 			item->prog = prog_list_prog(pl);
+ 			bpf_cgroup_storages_assign(item->cgroup_storage,
+ 						   pl->storage);
+ 			cnt++;
+ 		}
++
++		/* reverse pre-ordering progs at this cgroup level */
++		for (i = bstart + 1, j = init_bstart; i < j; i++, j--)
++			swap(progs->items[i], progs->items[j]);
++
+ 	} while ((p = cgroup_parent(p)));
+ 
+ 	*array = progs;
+@@ -663,7 +679,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
+ 		 */
+ 		return -EPERM;
+ 
+-	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
++	if (prog_list_length(progs, NULL) >= BPF_CGROUP_MAX_PROGS)
+ 		return -E2BIG;
+ 
+ 	pl = find_attach_entry(progs, prog, link, replace_prog,
+@@ -698,6 +714,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
+ 
+ 	pl->prog = prog;
+ 	pl->link = link;
++	pl->flags = flags;
+ 	bpf_cgroup_storages_assign(pl->storage, storage);
+ 	cgrp->bpf.flags[atype] = saved_flags;
+ 
+@@ -1073,7 +1090,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
+ 							      lockdep_is_held(&cgroup_mutex));
+ 			total_cnt += bpf_prog_array_length(effective);
+ 		} else {
+-			total_cnt += prog_list_length(&cgrp->bpf.progs[atype]);
++			total_cnt += prog_list_length(&cgrp->bpf.progs[atype], NULL);
+ 		}
+ 	}
+ 
+@@ -1105,7 +1122,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
+ 			u32 id;
+ 
+ 			progs = &cgrp->bpf.progs[atype];
+-			cnt = min_t(int, prog_list_length(progs), total_cnt);
++			cnt = min_t(int, prog_list_length(progs, NULL), total_cnt);
+ 			i = 0;
+ 			hlist_for_each_entry(pl, progs, node) {
+ 				prog = prog_list_prog(pl);
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index bb3ba8ebaf3d24..570e2f7231443d 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -2223,7 +2223,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
+ 		b = &htab->buckets[i];
+ 		rcu_read_lock();
+ 		head = &b->head;
+-		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
++		hlist_nulls_for_each_entry_safe(elem, n, head, hash_node) {
+ 			key = elem->key;
+ 			if (is_percpu) {
+ 				/* current cpu value for percpu map */
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 977c0845775652..ab74a226e3d6d9 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -4042,7 +4042,8 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
+ #define BPF_F_ATTACH_MASK_BASE	\
+ 	(BPF_F_ALLOW_OVERRIDE |	\
+ 	 BPF_F_ALLOW_MULTI |	\
+-	 BPF_F_REPLACE)
++	 BPF_F_REPLACE |	\
++	 BPF_F_PREORDER)
+ 
+ #define BPF_F_ATTACH_MASK_MPROG	\
+ 	(BPF_F_REPLACE |	\
+@@ -4606,6 +4607,8 @@ static int bpf_prog_get_info_by_fd(struct file *file,
+ 	info.recursion_misses = stats.misses;
+ 
+ 	info.verified_insns = prog->aux->verified_insns;
++	if (prog->aux->btf)
++		info.btf_id = btf_obj_id(prog->aux->btf);
+ 
+ 	if (!bpf_capable()) {
+ 		info.jited_prog_len = 0;
+@@ -4752,8 +4755,6 @@ static int bpf_prog_get_info_by_fd(struct file *file,
+ 		}
+ 	}
+ 
+-	if (prog->aux->btf)
+-		info.btf_id = btf_obj_id(prog->aux->btf);
+ 	info.attach_btf_id = prog->aux->attach_btf_id;
+ 	if (attach_btf)
+ 		info.attach_btf_obj_id = btf_obj_id(attach_btf);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 8656208aa4bbb6..39a3d750f2ff94 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1447,6 +1447,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ 	dst_state->callback_unroll_depth = src->callback_unroll_depth;
+ 	dst_state->used_as_loop_entry = src->used_as_loop_entry;
+ 	dst_state->may_goto_depth = src->may_goto_depth;
++	dst_state->loop_entry = src->loop_entry;
+ 	for (i = 0; i <= src->curframe; i++) {
+ 		dst = dst_state->frame[i];
+ 		if (!dst) {
+@@ -2987,6 +2988,21 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ 	return res ? &res->func_model : NULL;
+ }
+ 
++static int add_kfunc_in_insns(struct bpf_verifier_env *env,
++			      struct bpf_insn *insn, int cnt)
++{
++	int i, ret;
++
++	for (i = 0; i < cnt; i++, insn++) {
++		if (bpf_pseudo_kfunc_call(insn)) {
++			ret = add_kfunc_call(env, insn->imm, insn->off);
++			if (ret < 0)
++				return ret;
++		}
++	}
++	return 0;
++}
++
+ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
+ {
+ 	struct bpf_subprog_info *subprog = env->subprog_info;
+@@ -17267,12 +17283,16 @@ static void clean_verifier_state(struct bpf_verifier_env *env,
+ static void clean_live_states(struct bpf_verifier_env *env, int insn,
+ 			      struct bpf_verifier_state *cur)
+ {
++	struct bpf_verifier_state *loop_entry;
+ 	struct bpf_verifier_state_list *sl;
+ 
+ 	sl = *explored_state(env, insn);
+ 	while (sl) {
+ 		if (sl->state.branches)
+ 			goto next;
++		loop_entry = get_loop_entry(&sl->state);
++		if (loop_entry && loop_entry->branches)
++			goto next;
+ 		if (sl->state.insn_idx != insn ||
+ 		    !same_callsites(&sl->state, cur))
+ 			goto next;
+@@ -18701,6 +18721,10 @@ static int do_check(struct bpf_verifier_env *env)
+ 						return err;
+ 					break;
+ 				} else {
++					if (WARN_ON_ONCE(env->cur_state->loop_entry)) {
++						verbose(env, "verifier bug: env->cur_state->loop_entry != NULL\n");
++						return -EFAULT;
++					}
+ 					do_print_state = true;
+ 					continue;
+ 				}
+@@ -19768,7 +19792,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
+ {
+ 	struct bpf_subprog_info *subprogs = env->subprog_info;
+ 	const struct bpf_verifier_ops *ops = env->ops;
+-	int i, cnt, size, ctx_field_size, delta = 0, epilogue_cnt = 0;
++	int i, cnt, size, ctx_field_size, ret, delta = 0, epilogue_cnt = 0;
+ 	const int insn_cnt = env->prog->len;
+ 	struct bpf_insn *epilogue_buf = env->epilogue_buf;
+ 	struct bpf_insn *insn_buf = env->insn_buf;
+@@ -19797,6 +19821,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
+ 				return -ENOMEM;
+ 			env->prog = new_prog;
+ 			delta += cnt - 1;
++
++			ret = add_kfunc_in_insns(env, epilogue_buf, epilogue_cnt - 1);
++			if (ret < 0)
++				return ret;
+ 		}
+ 	}
+ 
+@@ -19817,6 +19845,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
+ 
+ 			env->prog = new_prog;
+ 			delta += cnt - 1;
++
++			ret = add_kfunc_in_insns(env, insn_buf, cnt - 1);
++			if (ret < 0)
++				return ret;
+ 		}
+ 	}
+ 
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index e63d6f3b004702..62933468aaf46c 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -90,7 +90,7 @@
+ DEFINE_MUTEX(cgroup_mutex);
+ DEFINE_SPINLOCK(css_set_lock);
+ 
+-#ifdef CONFIG_PROVE_RCU
++#if (defined CONFIG_PROVE_RCU || defined CONFIG_LOCKDEP)
+ EXPORT_SYMBOL_GPL(cgroup_mutex);
+ EXPORT_SYMBOL_GPL(css_set_lock);
+ #endif
+diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
+index 3e01781aeb7bd0..c4ce2f5a9745f6 100644
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -323,13 +323,11 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
+ 			rcu_read_unlock();
+ 		}
+ 
+-		/* play nice and yield if necessary */
+-		if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
+-			__cgroup_rstat_unlock(cgrp, cpu);
+-			if (!cond_resched())
+-				cpu_relax();
+-			__cgroup_rstat_lock(cgrp, cpu);
+-		}
++		/* play nice and avoid disabling interrupts for a long time */
++		__cgroup_rstat_unlock(cgrp, cpu);
++		if (!cond_resched())
++			cpu_relax();
++		__cgroup_rstat_lock(cgrp, cpu);
+ 	}
+ }
+ 
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 864a1121bf086d..74d453ec750a11 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -897,6 +897,19 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
+ }
+ EXPORT_SYMBOL(dma_set_coherent_mask);
+ 
++static bool __dma_addressing_limited(struct device *dev)
++{
++	const struct dma_map_ops *ops = get_dma_ops(dev);
++
++	if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
++			 dma_get_required_mask(dev))
++		return true;
++
++	if (unlikely(ops) || use_dma_iommu(dev))
++		return false;
++	return !dma_direct_all_ram_mapped(dev);
++}
++
+ /**
+  * dma_addressing_limited - return if the device is addressing limited
+  * @dev:	device to check
+@@ -907,15 +920,11 @@ EXPORT_SYMBOL(dma_set_coherent_mask);
+  */
+ bool dma_addressing_limited(struct device *dev)
+ {
+-	const struct dma_map_ops *ops = get_dma_ops(dev);
+-
+-	if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
+-			 dma_get_required_mask(dev))
+-		return true;
+-
+-	if (unlikely(ops) || use_dma_iommu(dev))
++	if (!__dma_addressing_limited(dev))
+ 		return false;
+-	return !dma_direct_all_ram_mapped(dev);
++
++	dev_dbg(dev, "device is DMA addressing limited\n");
++	return true;
+ }
+ EXPORT_SYMBOL_GPL(dma_addressing_limited);
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index edafe9fc4bdd06..285a4548450bd2 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1195,6 +1195,12 @@ static void perf_assert_pmu_disabled(struct pmu *pmu)
+ 	WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0);
+ }
+ 
++static inline void perf_pmu_read(struct perf_event *event)
++{
++	if (event->state == PERF_EVENT_STATE_ACTIVE)
++		event->pmu->read(event);
++}
++
+ static void get_ctx(struct perf_event_context *ctx)
+ {
+ 	refcount_inc(&ctx->refcount);
+@@ -3482,8 +3488,7 @@ static void __perf_event_sync_stat(struct perf_event *event,
+ 	 * we know the event must be on the current CPU, therefore we
+ 	 * don't need to use it.
+ 	 */
+-	if (event->state == PERF_EVENT_STATE_ACTIVE)
+-		event->pmu->read(event);
++	perf_pmu_read(event);
+ 
+ 	perf_event_update_time(event);
+ 
+@@ -4634,15 +4639,8 @@ static void __perf_event_read(void *info)
+ 
+ 	pmu->read(event);
+ 
+-	for_each_sibling_event(sub, event) {
+-		if (sub->state == PERF_EVENT_STATE_ACTIVE) {
+-			/*
+-			 * Use sibling's PMU rather than @event's since
+-			 * sibling could be on different (eg: software) PMU.
+-			 */
+-			sub->pmu->read(sub);
+-		}
+-	}
++	for_each_sibling_event(sub, event)
++		perf_pmu_read(sub);
+ 
+ 	data->ret = pmu->commit_txn(pmu);
+ 
+@@ -7408,9 +7406,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
+ 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ 		values[n++] = running;
+ 
+-	if ((leader != event) &&
+-	    (leader->state == PERF_EVENT_STATE_ACTIVE))
+-		leader->pmu->read(leader);
++	if ((leader != event) && !handle->skip_read)
++		perf_pmu_read(leader);
+ 
+ 	values[n++] = perf_event_count(leader, self);
+ 	if (read_format & PERF_FORMAT_ID)
+@@ -7423,9 +7420,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
+ 	for_each_sibling_event(sub, leader) {
+ 		n = 0;
+ 
+-		if ((sub != event) &&
+-		    (sub->state == PERF_EVENT_STATE_ACTIVE))
+-			sub->pmu->read(sub);
++		if ((sub != event) && !handle->skip_read)
++			perf_pmu_read(sub);
+ 
+ 		values[n++] = perf_event_count(sub, self);
+ 		if (read_format & PERF_FORMAT_ID)
+@@ -7484,6 +7480,9 @@ void perf_output_sample(struct perf_output_handle *handle,
+ {
+ 	u64 sample_type = data->type;
+ 
++	if (data->sample_flags & PERF_SAMPLE_READ)
++		handle->skip_read = 1;
++
+ 	perf_output_put(handle, *header);
+ 
+ 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
+@@ -11978,40 +11977,51 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
+ 	if (ctx)
+ 		perf_event_ctx_unlock(event->group_leader, ctx);
+ 
+-	if (!ret) {
+-		if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
+-		    has_extended_regs(event))
+-			ret = -EOPNOTSUPP;
++	if (ret)
++		goto err_pmu;
+ 
+-		if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
+-		    event_has_any_exclude_flag(event))
+-			ret = -EINVAL;
++	if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
++	    has_extended_regs(event)) {
++		ret = -EOPNOTSUPP;
++		goto err_destroy;
++	}
+ 
+-		if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) {
+-			const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu);
+-			struct cpumask *pmu_cpumask = perf_scope_cpumask(pmu->scope);
+-			int cpu;
+-
+-			if (pmu_cpumask && cpumask) {
+-				cpu = cpumask_any_and(pmu_cpumask, cpumask);
+-				if (cpu >= nr_cpu_ids)
+-					ret = -ENODEV;
+-				else
+-					event->event_caps |= PERF_EV_CAP_READ_SCOPE;
+-			} else {
+-				ret = -ENODEV;
+-			}
+-		}
++	if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
++	    event_has_any_exclude_flag(event)) {
++		ret = -EINVAL;
++		goto err_destroy;
++	}
++
++	if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) {
++		const struct cpumask *cpumask;
++		struct cpumask *pmu_cpumask;
++		int cpu;
+ 
+-		if (ret && event->destroy)
+-			event->destroy(event);
++		cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu);
++		pmu_cpumask = perf_scope_cpumask(pmu->scope);
++
++		ret = -ENODEV;
++		if (!pmu_cpumask || !cpumask)
++			goto err_destroy;
++
++		cpu = cpumask_any_and(pmu_cpumask, cpumask);
++		if (cpu >= nr_cpu_ids)
++			goto err_destroy;
++
++		event->event_caps |= PERF_EV_CAP_READ_SCOPE;
+ 	}
+ 
+-	if (ret) {
+-		event->pmu = NULL;
+-		module_put(pmu->module);
++	return 0;
++
++err_destroy:
++	if (event->destroy) {
++		event->destroy(event);
++		event->destroy = NULL;
+ 	}
+ 
++err_pmu:
++	event->pmu = NULL;
++	module_put(pmu->module);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
+index 6c2cb4e4f48dab..8f3f624419aa92 100644
+--- a/kernel/events/hw_breakpoint.c
++++ b/kernel/events/hw_breakpoint.c
+@@ -950,9 +950,10 @@ static int hw_breakpoint_event_init(struct perf_event *bp)
+ 		return -ENOENT;
+ 
+ 	/*
+-	 * no branch sampling for breakpoint events
++	 * Check if breakpoint type is supported before proceeding.
++	 * Also, no branch sampling for breakpoint events.
+ 	 */
+-	if (has_branch_stack(bp))
++	if (!hw_breakpoint_slots_cached(find_slot_idx(bp->attr.bp_type)) || has_branch_stack(bp))
+ 		return -EOPNOTSUPP;
+ 
+ 	err = register_perf_hw_breakpoint(bp);
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index bbfa22c0a1597a..6ecbbc57cdfde8 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -185,6 +185,7 @@ __perf_output_begin(struct perf_output_handle *handle,
+ 
+ 	handle->rb    = rb;
+ 	handle->event = event;
++	handle->flags = 0;
+ 
+ 	have_lost = local_read(&rb->lost);
+ 	if (unlikely(have_lost)) {
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 619f0014c33be4..56b8bd9487b4b8 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -742,10 +742,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
+ 
+ 	tsk->exit_state = EXIT_ZOMBIE;
+ 	/*
+-	 * sub-thread or delay_group_leader(), wake up the
+-	 * PIDFD_THREAD waiters.
++	 * Ignore thread-group leaders that exited before all
++	 * subthreads did.
+ 	 */
+-	if (!thread_group_empty(tsk))
++	if (!delay_group_leader(tsk))
+ 		do_notify_pidfd(tsk);
+ 
+ 	if (unlikely(tsk->ptrace)) {
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 12decadff468f5..97c9afe3efc38d 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -505,10 +505,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
+ 	vma_numab_state_init(new);
+ 	dup_anon_vma_name(orig, new);
+ 
+-	/* track_pfn_copy() will later take care of copying internal state. */
+-	if (unlikely(new->vm_flags & VM_PFNMAP))
+-		untrack_pfn_clear(new);
+-
+ 	return new;
+ }
+ 
+@@ -699,6 +695,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 		tmp = vm_area_dup(mpnt);
+ 		if (!tmp)
+ 			goto fail_nomem;
++
++		/* track_pfn_copy() will later take care of copying internal state. */
++		if (unlikely(tmp->vm_flags & VM_PFNMAP))
++			untrack_pfn_clear(tmp);
++
+ 		retval = vma_dup_policy(mpnt, tmp);
+ 		if (retval)
+ 			goto fail_nomem_policy;
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 22770372bdf329..3e0ef0753e73e1 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -358,7 +358,8 @@ static void padata_reorder(struct parallel_data *pd)
+ 		 * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
+ 		 */
+ 		padata_get_pd(pd);
+-		queue_work(pinst->serial_wq, &pd->reorder_work);
++		if (!queue_work(pinst->serial_wq, &pd->reorder_work))
++			padata_put_pd(pd);
+ 	}
+ }
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 881a26e18c658b..3a91b739e8f306 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3310,7 +3310,12 @@ void console_unblank(void)
+ 	 */
+ 	cookie = console_srcu_read_lock();
+ 	for_each_console_srcu(c) {
+-		if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) {
++		short flags = console_srcu_read_flags(c);
++
++		if (flags & CON_SUSPENDED)
++			continue;
++
++		if ((flags & CON_ENABLED) && c->unblank) {
+ 			found_unblank = true;
+ 			break;
+ 		}
+@@ -3347,7 +3352,12 @@ void console_unblank(void)
+ 
+ 	cookie = console_srcu_read_lock();
+ 	for_each_console_srcu(c) {
+-		if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank)
++		short flags = console_srcu_read_flags(c);
++
++		if (flags & CON_SUSPENDED)
++			continue;
++
++		if ((flags & CON_ENABLED) && c->unblank)
+ 			c->unblank();
+ 	}
+ 	console_srcu_read_unlock(cookie);
+diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
+index feb3ac1dc5d590..f87c9d6d36fcbe 100644
+--- a/kernel/rcu/rcu.h
++++ b/kernel/rcu/rcu.h
+@@ -162,7 +162,7 @@ static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
+ {
+ 	unsigned long cur_s = READ_ONCE(*sp);
+ 
+-	return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
++	return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (3 * RCU_SEQ_STATE_MASK + 1));
+ }
+ 
+ /*
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 8e52c1dd06284c..4ed86321952171 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -1822,10 +1822,14 @@ static noinline_for_stack bool rcu_gp_init(void)
+ 
+ 	/* Advance to a new grace period and initialize state. */
+ 	record_gp_stall_check_time();
++	/*
++	 * A new wait segment must be started before gp_seq advanced, so
++	 * that previous gp waiters won't observe the new gp_seq.
++	 */
++	start_new_poll = rcu_sr_normal_gp_init();
+ 	/* Record GP times before starting GP, hence rcu_seq_start(). */
+ 	rcu_seq_start(&rcu_state.gp_seq);
+ 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
+-	start_new_poll = rcu_sr_normal_gp_init();
+ 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
+ 	rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
+ 	raw_spin_unlock_irq_rcu_node(rnp);
+@@ -4183,14 +4187,17 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
+  */
+ void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+ {
+-	struct rcu_node *rnp = rcu_get_root();
+-
+ 	/*
+ 	 * Any prior manipulation of RCU-protected data must happen
+ 	 * before the loads from ->gp_seq and ->expedited_sequence.
+ 	 */
+ 	smp_mb();  /* ^^^ */
+-	rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
++
++	// Yes, rcu_state.gp_seq, not rnp_root->gp_seq, the latter's use
++	// in poll_state_synchronize_rcu_full() notwithstanding.  Use of
++	// the latter here would result in too-short grace periods due to
++	// interactions with newly onlined CPUs.
++	rgosp->rgos_norm = rcu_seq_snap(&rcu_state.gp_seq);
+ 	rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
+ }
+ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 1c7cbd145d5e37..304e3405e6ec76 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -832,8 +832,17 @@ void rcu_read_unlock_strict(void)
+ {
+ 	struct rcu_data *rdp;
+ 
+-	if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
++	if (irqs_disabled() || in_atomic_preempt_off() || !rcu_state.gp_kthread)
+ 		return;
++
++	/*
++	 * rcu_report_qs_rdp() can only be invoked with a stable rdp and
++	 * from the local CPU.
++	 *
++	 * The in_atomic_preempt_off() check ensures that we come here holding
++	 * the last preempt_count (which will get dropped once we return to
++	 * __rcu_read_unlock().
++	 */
+ 	rdp = this_cpu_ptr(&rcu_data);
+ 	rdp->cpu_no_qs.b.norm = false;
+ 	rcu_report_qs_rdp(rdp);
+@@ -974,13 +983,16 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+  */
+ static void rcu_flavor_sched_clock_irq(int user)
+ {
+-	if (user || rcu_is_cpu_rrupt_from_idle()) {
++	if (user || rcu_is_cpu_rrupt_from_idle() ||
++	     (IS_ENABLED(CONFIG_PREEMPT_COUNT) &&
++	      (preempt_count() == HARDIRQ_OFFSET))) {
+ 
+ 		/*
+ 		 * Get here if this CPU took its interrupt from user
+-		 * mode or from the idle loop, and if this is not a
+-		 * nested interrupt.  In this case, the CPU is in
+-		 * a quiescent state, so note it.
++		 * mode, from the idle loop without this being a nested
++		 * interrupt, or while not holding the task preempt count
++		 * (with PREEMPT_COUNT=y). In this case, the CPU is in a
++		 * quiescent state, so note it.
+ 		 *
+ 		 * No memory barrier is required here because rcu_qs()
+ 		 * references only CPU-local variables that other CPUs
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 990d0828bf2a90..443f6a9ef3f8f6 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -71,10 +71,10 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
+ /*
+  * Minimal preemption granularity for CPU-bound tasks:
+  *
+- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
++ * (default: 0.70 msec * (1 + ilog(ncpus)), units: nanoseconds)
+  */
+-unsigned int sysctl_sched_base_slice			= 750000ULL;
+-static unsigned int normalized_sysctl_sched_base_slice	= 750000ULL;
++unsigned int sysctl_sched_base_slice			= 700000ULL;
++static unsigned int normalized_sysctl_sched_base_slice	= 700000ULL;
+ 
+ const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
+ 
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 2ae45e6eb6bb8e..468b589c39e695 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2063,8 +2063,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
+ 	WARN_ON_ONCE(!tsk->ptrace &&
+ 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
+ 	/*
+-	 * tsk is a group leader and has no threads, wake up the
+-	 * non-PIDFD_THREAD waiters.
++	 * Notify for thread-group leaders without subthreads.
+ 	 */
+ 	if (thread_group_empty(tsk))
+ 		do_notify_pidfd(tsk);
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 8c4524ce65fafe..00ff1763504132 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -126,6 +126,18 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
+ 	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
+ };
+ 
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static struct lock_class_key bh_lock_key;
++struct lockdep_map bh_lock_map = {
++	.name			= "local_bh",
++	.key			= &bh_lock_key,
++	.wait_type_outer	= LD_WAIT_FREE,
++	.wait_type_inner	= LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
++	.lock_type		= LD_LOCK_PERCPU,
++};
++EXPORT_SYMBOL_GPL(bh_lock_map);
++#endif
++
+ /**
+  * local_bh_blocked() - Check for idle whether BH processing is blocked
+  *
+@@ -148,6 +160,8 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+ 
+ 	WARN_ON_ONCE(in_hardirq());
+ 
++	lock_map_acquire_read(&bh_lock_map);
++
+ 	/* First entry of a task into a BH disabled section? */
+ 	if (!current->softirq_disable_cnt) {
+ 		if (preemptible()) {
+@@ -211,6 +225,8 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+ 	WARN_ON_ONCE(in_hardirq());
+ 	lockdep_assert_irqs_enabled();
+ 
++	lock_map_release(&bh_lock_map);
++
+ 	local_irq_save(flags);
+ 	curcnt = __this_cpu_read(softirq_ctrl.cnt);
+ 
+@@ -261,6 +277,8 @@ static inline void ksoftirqd_run_begin(void)
+ /* Counterpart to ksoftirqd_run_begin() */
+ static inline void ksoftirqd_run_end(void)
+ {
++	/* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */
++	lock_map_release(&bh_lock_map);
+ 	__local_bh_enable(SOFTIRQ_OFFSET, true);
+ 	WARN_ON_ONCE(in_interrupt());
+ 	local_irq_enable();
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index db9c06bb23116a..06fbc226341fd0 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -117,16 +117,6 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+ 	.csd = CSD_INIT(retrigger_next_event, NULL)
+ };
+ 
+-static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+-	/* Make sure we catch unsupported clockids */
+-	[0 ... MAX_CLOCKS - 1]	= HRTIMER_MAX_CLOCK_BASES,
+-
+-	[CLOCK_REALTIME]	= HRTIMER_BASE_REALTIME,
+-	[CLOCK_MONOTONIC]	= HRTIMER_BASE_MONOTONIC,
+-	[CLOCK_BOOTTIME]	= HRTIMER_BASE_BOOTTIME,
+-	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
+-};
+-
+ static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
+ {
+ 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
+@@ -1597,14 +1587,19 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude)
+ 
+ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
+ {
+-	if (likely(clock_id < MAX_CLOCKS)) {
+-		int base = hrtimer_clock_to_base_table[clock_id];
+-
+-		if (likely(base != HRTIMER_MAX_CLOCK_BASES))
+-			return base;
++	switch (clock_id) {
++	case CLOCK_REALTIME:
++		return HRTIMER_BASE_REALTIME;
++	case CLOCK_MONOTONIC:
++		return HRTIMER_BASE_MONOTONIC;
++	case CLOCK_BOOTTIME:
++		return HRTIMER_BASE_BOOTTIME;
++	case CLOCK_TAI:
++		return HRTIMER_BASE_TAI;
++	default:
++		WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
++		return HRTIMER_BASE_MONOTONIC;
+ 	}
+-	WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
+-	return HRTIMER_BASE_MONOTONIC;
+ }
+ 
+ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 4576aaed13b23b..c5d9bfbb082b86 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -118,6 +118,7 @@ static int posix_timer_add(struct k_itimer *timer)
+ 			return id;
+ 		}
+ 		spin_unlock(&hash_lock);
++		cond_resched();
+ 	}
+ 	/* POSIX return code when no timer ID could be allocated */
+ 	return -EAGAIN;
+@@ -513,14 +514,21 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
+ 	if (error)
+ 		goto out;
+ 
+-	spin_lock_irq(&current->sighand->siglock);
+-	/* This makes the timer valid in the hash table */
+-	WRITE_ONCE(new_timer->it_signal, current->signal);
+-	hlist_add_head(&new_timer->list, &current->signal->posix_timers);
+-	spin_unlock_irq(&current->sighand->siglock);
+ 	/*
+-	 * After unlocking sighand::siglock @new_timer is subject to
+-	 * concurrent removal and cannot be touched anymore
++	 * timer::it_lock ensures that __lock_timer() observes a fully
++	 * initialized timer when it observes a valid timer::it_signal.
++	 *
++	 * sighand::siglock is required to protect signal::posix_timers.
++	 */
++	scoped_guard (spinlock_irq, &new_timer->it_lock) {
++		guard(spinlock)(&current->sighand->siglock);
++		/* This makes the timer valid in the hash table */
++		WRITE_ONCE(new_timer->it_signal, current->signal);
++		hlist_add_head(&new_timer->list, &current->signal->posix_timers);
++	}
++	/*
++	 * After unlocking @new_timer is subject to concurrent removal and
++	 * cannot be touched anymore
+ 	 */
+ 	return 0;
+ out:
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index 1c311c46da5074..cfbb46cc4e7613 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -46,7 +46,7 @@ static void
+ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
+ 	    int idx, u64 now)
+ {
+-	SEQ_printf(m, " #%d: <%pK>, %ps", idx, taddr, timer->function);
++	SEQ_printf(m, " #%d: <%p>, %ps", idx, taddr, timer->function);
+ 	SEQ_printf(m, ", S:%02x", timer->state);
+ 	SEQ_printf(m, "\n");
+ 	SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
+@@ -98,7 +98,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
+ static void
+ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
+ {
+-	SEQ_printf(m, "  .base:       %pK\n", base);
++	SEQ_printf(m, "  .base:       %p\n", base);
+ 	SEQ_printf(m, "  .index:      %d\n", base->index);
+ 
+ 	SEQ_printf(m, "  .resolution: %u nsecs\n", hrtimer_resolution);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 06104c2c66ab2a..e773b0adcfc0a2 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3343,10 +3343,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+ }
+ EXPORT_SYMBOL_GPL(trace_vbprintk);
+ 
+-__printf(3, 0)
+-static int
+-__trace_array_vprintk(struct trace_buffer *buffer,
+-		      unsigned long ip, const char *fmt, va_list args)
++static __printf(3, 0)
++int __trace_array_vprintk(struct trace_buffer *buffer,
++			  unsigned long ip, const char *fmt, va_list args)
+ {
+ 	struct trace_event_call *call = &event_print;
+ 	struct ring_buffer_event *event;
+@@ -3399,7 +3398,6 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+ 	return len;
+ }
+ 
+-__printf(3, 0)
+ int trace_array_vprintk(struct trace_array *tr,
+ 			unsigned long ip, const char *fmt, va_list args)
+ {
+@@ -3429,7 +3427,6 @@ int trace_array_vprintk(struct trace_array *tr,
+  * Note, trace_array_init_printk() must be called on @tr before this
+  * can be used.
+  */
+-__printf(3, 0)
+ int trace_array_printk(struct trace_array *tr,
+ 		       unsigned long ip, const char *fmt, ...)
+ {
+@@ -3474,7 +3471,6 @@ int trace_array_init_printk(struct trace_array *tr)
+ }
+ EXPORT_SYMBOL_GPL(trace_array_init_printk);
+ 
+-__printf(3, 4)
+ int trace_array_printk_buf(struct trace_buffer *buffer,
+ 			   unsigned long ip, const char *fmt, ...)
+ {
+@@ -3490,7 +3486,6 @@ int trace_array_printk_buf(struct trace_buffer *buffer,
+ 	return ret;
+ }
+ 
+-__printf(2, 0)
+ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
+ {
+ 	return trace_array_vprintk(printk_trace, ip, fmt, args);
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 04ea327198ba80..82da3ac140242c 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -818,13 +818,15 @@ static inline void __init disable_tracing_selftest(const char *reason)
+ 
+ extern void *head_page(struct trace_array_cpu *data);
+ extern unsigned long long ns2usecs(u64 nsec);
+-extern int
+-trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
+-extern int
+-trace_vprintk(unsigned long ip, const char *fmt, va_list args);
+-extern int
+-trace_array_vprintk(struct trace_array *tr,
+-		    unsigned long ip, const char *fmt, va_list args);
++
++__printf(2, 0)
++int trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
++__printf(2, 0)
++int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
++__printf(3, 0)
++int trace_array_vprintk(struct trace_array *tr,
++			unsigned long ip, const char *fmt, va_list args);
++__printf(3, 4)
+ int trace_array_printk_buf(struct trace_buffer *buffer,
+ 			   unsigned long ip, const char *fmt, ...);
+ void trace_printk_seq(struct trace_seq *s);
+diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
+index 2ef2e1b8009165..2f844c279a3e01 100644
+--- a/kernel/vhost_task.c
++++ b/kernel/vhost_task.c
+@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(vhost_task_stop);
+  * @arg: data to be passed to fn and handled_kill
+  * @name: the thread's name
+  *
+- * This returns a specialized task for use by the vhost layer or NULL on
++ * This returns a specialized task for use by the vhost layer or ERR_PTR() on
+  * failure. The returned task is inactive, and the caller must fire it up
+  * through vhost_task_start().
+  */
+diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
+index e49deddd3de9fe..7d1dfbb99b3979 100644
+--- a/lib/dynamic_queue_limits.c
++++ b/lib/dynamic_queue_limits.c
+@@ -190,7 +190,7 @@ EXPORT_SYMBOL(dql_completed);
+ void dql_reset(struct dql *dql)
+ {
+ 	/* Reset all dynamic values */
+-	dql->limit = 0;
++	dql->limit = dql->min_limit;
+ 	dql->num_queued = 0;
+ 	dql->num_completed = 0;
+ 	dql->last_obj_cnt = 0;
+diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile
+index 2f58fafbbdddc0..fc7b2b7ef4b20e 100644
+--- a/lib/lzo/Makefile
++++ b/lib/lzo/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-lzo_compress-objs := lzo1x_compress.o
++lzo_compress-objs := lzo1x_compress.o lzo1x_compress_safe.o
+ lzo_decompress-objs := lzo1x_decompress_safe.o
+ 
+ obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o
+diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
+index 47d6d43ea9578c..7b10ca86a89300 100644
+--- a/lib/lzo/lzo1x_compress.c
++++ b/lib/lzo/lzo1x_compress.c
+@@ -18,11 +18,22 @@
+ #include <linux/lzo.h>
+ #include "lzodefs.h"
+ 
+-static noinline size_t
+-lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+-		    unsigned char *out, size_t *out_len,
+-		    size_t ti, void *wrkmem, signed char *state_offset,
+-		    const unsigned char bitstream_version)
++#undef LZO_UNSAFE
++
++#ifndef LZO_SAFE
++#define LZO_UNSAFE 1
++#define LZO_SAFE(name) name
++#define HAVE_OP(x) 1
++#endif
++
++#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
++
++static noinline int
++LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len,
++			      unsigned char **out, unsigned char *op_end,
++			      size_t *tp, void *wrkmem,
++			      signed char *state_offset,
++			      const unsigned char bitstream_version)
+ {
+ 	const unsigned char *ip;
+ 	unsigned char *op;
+@@ -30,8 +41,9 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ 	const unsigned char * const ip_end = in + in_len - 20;
+ 	const unsigned char *ii;
+ 	lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
++	size_t ti = *tp;
+ 
+-	op = out;
++	op = *out;
+ 	ip = in;
+ 	ii = ip;
+ 	ip += ti < 4 ? 4 - ti : 0;
+@@ -116,25 +128,32 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ 		if (t != 0) {
+ 			if (t <= 3) {
+ 				op[*state_offset] |= t;
++				NEED_OP(4);
+ 				COPY4(op, ii);
+ 				op += t;
+ 			} else if (t <= 16) {
++				NEED_OP(17);
+ 				*op++ = (t - 3);
+ 				COPY8(op, ii);
+ 				COPY8(op + 8, ii + 8);
+ 				op += t;
+ 			} else {
+ 				if (t <= 18) {
++					NEED_OP(1);
+ 					*op++ = (t - 3);
+ 				} else {
+ 					size_t tt = t - 18;
++					NEED_OP(1);
+ 					*op++ = 0;
+ 					while (unlikely(tt > 255)) {
+ 						tt -= 255;
++						NEED_OP(1);
+ 						*op++ = 0;
+ 					}
++					NEED_OP(1);
+ 					*op++ = tt;
+ 				}
++				NEED_OP(t);
+ 				do {
+ 					COPY8(op, ii);
+ 					COPY8(op + 8, ii + 8);
+@@ -151,6 +170,7 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ 		if (unlikely(run_length)) {
+ 			ip += run_length;
+ 			run_length -= MIN_ZERO_RUN_LENGTH;
++			NEED_OP(4);
+ 			put_unaligned_le32((run_length << 21) | 0xfffc18
+ 					   | (run_length & 0x7), op);
+ 			op += 4;
+@@ -243,10 +263,12 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ 		ip += m_len;
+ 		if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
+ 			m_off -= 1;
++			NEED_OP(2);
+ 			*op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
+ 			*op++ = (m_off >> 3);
+ 		} else if (m_off <= M3_MAX_OFFSET) {
+ 			m_off -= 1;
++			NEED_OP(1);
+ 			if (m_len <= M3_MAX_LEN)
+ 				*op++ = (M3_MARKER | (m_len - 2));
+ 			else {
+@@ -254,14 +276,18 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ 				*op++ = M3_MARKER | 0;
+ 				while (unlikely(m_len > 255)) {
+ 					m_len -= 255;
++					NEED_OP(1);
+ 					*op++ = 0;
+ 				}
++				NEED_OP(1);
+ 				*op++ = (m_len);
+ 			}
++			NEED_OP(2);
+ 			*op++ = (m_off << 2);
+ 			*op++ = (m_off >> 6);
+ 		} else {
+ 			m_off -= 0x4000;
++			NEED_OP(1);
+ 			if (m_len <= M4_MAX_LEN)
+ 				*op++ = (M4_MARKER | ((m_off >> 11) & 8)
+ 						| (m_len - 2));
+@@ -282,11 +308,14 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ 				m_len -= M4_MAX_LEN;
+ 				*op++ = (M4_MARKER | ((m_off >> 11) & 8));
+ 				while (unlikely(m_len > 255)) {
++					NEED_OP(1);
+ 					m_len -= 255;
+ 					*op++ = 0;
+ 				}
++				NEED_OP(1);
+ 				*op++ = (m_len);
+ 			}
++			NEED_OP(2);
+ 			*op++ = (m_off << 2);
+ 			*op++ = (m_off >> 6);
+ 		}
+@@ -295,14 +324,20 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ 		ii = ip;
+ 		goto next;
+ 	}
+-	*out_len = op - out;
+-	return in_end - (ii - ti);
++	*out = op;
++	*tp = in_end - (ii - ti);
++	return LZO_E_OK;
++
++output_overrun:
++	return LZO_E_OUTPUT_OVERRUN;
+ }
+ 
+-static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
+-		     unsigned char *out, size_t *out_len,
+-		     void *wrkmem, const unsigned char bitstream_version)
++static int LZO_SAFE(lzogeneric1x_1_compress)(
++	const unsigned char *in, size_t in_len,
++	unsigned char *out, size_t *out_len,
++	void *wrkmem, const unsigned char bitstream_version)
+ {
++	unsigned char * const op_end = out + *out_len;
+ 	const unsigned char *ip = in;
+ 	unsigned char *op = out;
+ 	unsigned char *data_start;
+@@ -326,14 +361,18 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
+ 	while (l > 20) {
+ 		size_t ll = min_t(size_t, l, m4_max_offset + 1);
+ 		uintptr_t ll_end = (uintptr_t) ip + ll;
++		int err;
++
+ 		if ((ll_end + ((t + ll) >> 5)) <= ll_end)
+ 			break;
+ 		BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
+ 		memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
+-		t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem,
+-					&state_offset, bitstream_version);
++		err = LZO_SAFE(lzo1x_1_do_compress)(
++			ip, ll, &op, op_end, &t, wrkmem,
++			&state_offset, bitstream_version);
++		if (err != LZO_E_OK)
++			return err;
+ 		ip += ll;
+-		op += *out_len;
+ 		l  -= ll;
+ 	}
+ 	t += l;
+@@ -342,20 +381,26 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
+ 		const unsigned char *ii = in + in_len - t;
+ 
+ 		if (op == data_start && t <= 238) {
++			NEED_OP(1);
+ 			*op++ = (17 + t);
+ 		} else if (t <= 3) {
+ 			op[state_offset] |= t;
+ 		} else if (t <= 18) {
++			NEED_OP(1);
+ 			*op++ = (t - 3);
+ 		} else {
+ 			size_t tt = t - 18;
++			NEED_OP(1);
+ 			*op++ = 0;
+ 			while (tt > 255) {
+ 				tt -= 255;
++				NEED_OP(1);
+ 				*op++ = 0;
+ 			}
++			NEED_OP(1);
+ 			*op++ = tt;
+ 		}
++		NEED_OP(t);
+ 		if (t >= 16) do {
+ 			COPY8(op, ii);
+ 			COPY8(op + 8, ii + 8);
+@@ -368,31 +413,38 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
+ 		} while (--t > 0);
+ 	}
+ 
++	NEED_OP(3);
+ 	*op++ = M4_MARKER | 1;
+ 	*op++ = 0;
+ 	*op++ = 0;
+ 
+ 	*out_len = op - out;
+ 	return LZO_E_OK;
++
++output_overrun:
++	return LZO_E_OUTPUT_OVERRUN;
+ }
+ 
+-int lzo1x_1_compress(const unsigned char *in, size_t in_len,
+-		     unsigned char *out, size_t *out_len,
+-		     void *wrkmem)
++int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len,
++			       unsigned char *out, size_t *out_len,
++			       void *wrkmem)
+ {
+-	return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0);
++	return LZO_SAFE(lzogeneric1x_1_compress)(
++		in, in_len, out, out_len, wrkmem, 0);
+ }
+ 
+-int lzorle1x_1_compress(const unsigned char *in, size_t in_len,
+-		     unsigned char *out, size_t *out_len,
+-		     void *wrkmem)
++int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len,
++				  unsigned char *out, size_t *out_len,
++				  void *wrkmem)
+ {
+-	return lzogeneric1x_1_compress(in, in_len, out, out_len,
+-				       wrkmem, LZO_VERSION);
++	return LZO_SAFE(lzogeneric1x_1_compress)(
++		in, in_len, out, out_len, wrkmem, LZO_VERSION);
+ }
+ 
+-EXPORT_SYMBOL_GPL(lzo1x_1_compress);
+-EXPORT_SYMBOL_GPL(lzorle1x_1_compress);
++EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress));
++EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress));
+ 
++#ifndef LZO_UNSAFE
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("LZO1X-1 Compressor");
++#endif
+diff --git a/lib/lzo/lzo1x_compress_safe.c b/lib/lzo/lzo1x_compress_safe.c
+new file mode 100644
+index 00000000000000..371c9f84949281
+--- /dev/null
++++ b/lib/lzo/lzo1x_compress_safe.c
+@@ -0,0 +1,18 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ *  LZO1X Compressor from LZO
++ *
++ *  Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
++ *
++ *  The full LZO package can be found at:
++ *  http://www.oberhumer.com/opensource/lzo/
++ *
++ *  Changed for Linux kernel use by:
++ *  Nitin Gupta <nitingupta910@gmail.com>
++ *  Richard Purdie <rpurdie@openedhand.com>
++ */
++
++#define LZO_SAFE(name) name##_safe
++#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
++
++#include "lzo1x_compress.c"
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 2d1e402f06f22a..dcd2d0e15e13bd 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1139,7 +1139,6 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ {
+ 	struct mem_cgroup *iter;
+ 	int ret = 0;
+-	int i = 0;
+ 
+ 	BUG_ON(mem_cgroup_is_root(memcg));
+ 
+@@ -1149,10 +1148,9 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ 
+ 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
+ 		while (!ret && (task = css_task_iter_next(&it))) {
+-			/* Avoid potential softlockup warning */
+-			if ((++i & 1023) == 0)
+-				cond_resched();
+ 			ret = fn(task, arg);
++			/* Avoid potential softlockup warning */
++			cond_resched();
+ 		}
+ 		css_task_iter_end(&it);
+ 		if (ret) {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index ebe1ec66149269..882903f42300b8 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4381,6 +4381,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 	}
+ 
+ retry:
++	/*
++	 * Deal with possible cpuset update races or zonelist updates to avoid
++	 * infinite retries.
++	 */
++	if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
++	    check_retry_zonelist(zonelist_iter_cookie))
++		goto restart;
++
+ 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
+ 	if (alloc_flags & ALLOC_KSWAPD)
+ 		wake_all_kswapds(order, gfp_mask, ac);
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 358bd3083b8886..cc04e501b1c531 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -4097,8 +4097,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ 	 * would be a good heuristic for when to shrink the vm_area?
+ 	 */
+ 	if (size <= old_size) {
+-		/* Zero out "freed" memory. */
+-		if (want_init_on_free())
++		/* Zero out "freed" memory, potentially for future realloc. */
++		if (want_init_on_free() || want_init_on_alloc(flags))
+ 			memset((void *)p + size, 0, old_size - size);
+ 		vm->requested_size = size;
+ 		kasan_poison_vmalloc(p + size, old_size - size);
+@@ -4111,10 +4111,13 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ 	if (size <= alloced_size) {
+ 		kasan_unpoison_vmalloc(p + old_size, size - old_size,
+ 				       KASAN_VMALLOC_PROT_NORMAL);
+-		/* Zero out "alloced" memory. */
+-		if (want_init_on_alloc(flags))
+-			memset((void *)p + old_size, 0, size - old_size);
++		/*
++		 * No need to zero memory here, as unused memory will have
++		 * already been zeroed at initial allocation time or during
++		 * realloc shrink time.
++		 */
+ 		vm->requested_size = size;
++		return (void *)p;
+ 	}
+ 
+ 	/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index bc5b42fce2b801..88946334035193 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -932,6 +932,9 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
+ 		hdev->sco_pkts = 8;
+ 	}
+ 
++	if (!read_voice_setting_capable(hdev))
++		hdev->sco_pkts = 0;
++
+ 	hdev->acl_cnt = hdev->acl_pkts;
+ 	hdev->sco_cnt = hdev->sco_pkts;
+ 
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index c219a8c596d3e5..66fa5d6fea6cad 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -1411,7 +1411,8 @@ static void l2cap_request_info(struct l2cap_conn *conn)
+ 		       sizeof(req), &req);
+ }
+ 
+-static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
++static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
++				     struct l2cap_chan *chan)
+ {
+ 	/* The minimum encryption key size needs to be enforced by the
+ 	 * host stack before establishing any L2CAP connections. The
+@@ -1425,7 +1426,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
+ 	int min_key_size = hcon->hdev->min_enc_key_size;
+ 
+ 	/* On FIPS security level, key size must be 16 bytes */
+-	if (hcon->sec_level == BT_SECURITY_FIPS)
++	if (chan->sec_level == BT_SECURITY_FIPS)
+ 		min_key_size = 16;
+ 
+ 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
+@@ -1453,7 +1454,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
+ 	    !__l2cap_no_conn_pending(chan))
+ 		return;
+ 
+-	if (l2cap_check_enc_key_size(conn->hcon))
++	if (l2cap_check_enc_key_size(conn->hcon, chan))
+ 		l2cap_start_connection(chan);
+ 	else
+ 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
+@@ -1528,7 +1529,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ 				continue;
+ 			}
+ 
+-			if (l2cap_check_enc_key_size(conn->hcon))
++			if (l2cap_check_enc_key_size(conn->hcon, chan))
+ 				l2cap_start_connection(chan);
+ 			else
+ 				l2cap_chan_close(chan, ECONNREFUSED);
+@@ -3957,7 +3958,7 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ 	/* Check if the ACL is secure enough (if not SDP) */
+ 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
+ 	    (!hci_conn_check_link_mode(conn->hcon) ||
+-	    !l2cap_check_enc_key_size(conn->hcon))) {
++	    !l2cap_check_enc_key_size(conn->hcon, pchan))) {
+ 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
+ 		result = L2CAP_CR_SEC_BLOCK;
+ 		goto response;
+@@ -7317,7 +7318,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 		}
+ 
+ 		if (chan->state == BT_CONNECT) {
+-			if (!status && l2cap_check_enc_key_size(hcon))
++			if (!status && l2cap_check_enc_key_size(hcon, chan))
+ 				l2cap_start_connection(chan);
+ 			else
+ 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
+@@ -7327,7 +7328,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 			struct l2cap_conn_rsp rsp;
+ 			__u16 res, stat;
+ 
+-			if (!status && l2cap_check_enc_key_size(hcon)) {
++			if (!status && l2cap_check_enc_key_size(hcon, chan)) {
+ 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ 					res = L2CAP_CR_PEND;
+ 					stat = L2CAP_CS_AUTHOR_PEND;
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
+index 1a52a0bca086d6..7e1ad229e1330c 100644
+--- a/net/bridge/br_mdb.c
++++ b/net/bridge/br_mdb.c
+@@ -1040,7 +1040,7 @@ static int br_mdb_add_group(const struct br_mdb_config *cfg,
+ 
+ 	/* host join */
+ 	if (!port) {
+-		if (mp->host_joined) {
++		if (mp->host_joined && !(cfg->nlflags & NLM_F_REPLACE)) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
+ 			return -EEXIST;
+ 		}
+diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
+index 98aea5485aaef4..a8c67035e23c00 100644
+--- a/net/bridge/br_nf_core.c
++++ b/net/bridge/br_nf_core.c
+@@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = {
+  * ipt_REJECT needs it.  Future netfilter modules might
+  * require us to fill additional fields.
+  */
+-static const u32 br_dst_default_metrics[RTAX_MAX] = {
+-	[RTAX_MTU - 1] = 1500,
+-};
+-
+ void br_netfilter_rtable_init(struct net_bridge *br)
+ {
+ 	struct rtable *rt = &br->fake_rtable;
+ 
+ 	rcuref_init(&rt->dst.__rcuref, 1);
+ 	rt->dst.dev = br->dev;
+-	dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
++	dst_init_metrics(&rt->dst, br->metrics, false);
++	dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu);
+ 	rt->dst.flags	= DST_NOXFRM | DST_FAKE_RTABLE;
+ 	rt->dst.ops = &fake_dst_ops;
+ }
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 041f6e571a2097..df502cc1191c3f 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -505,6 +505,7 @@ struct net_bridge {
+ 		struct rtable		fake_rtable;
+ 		struct rt6_info		fake_rt6_info;
+ 	};
++	u32				metrics[RTAX_MAX];
+ #endif
+ 	u16				group_fwd_mask;
+ 	u16				group_fwd_mask_required;
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 217049fa496e9d..e33ff2a5b20ccb 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -58,6 +58,7 @@
+ #include <linux/can/skb.h>
+ #include <linux/can/bcm.h>
+ #include <linux/slab.h>
++#include <linux/spinlock.h>
+ #include <net/sock.h>
+ #include <net/net_namespace.h>
+ 
+@@ -122,6 +123,7 @@ struct bcm_op {
+ 	struct canfd_frame last_sframe;
+ 	struct sock *sk;
+ 	struct net_device *rx_reg_dev;
++	spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */
+ };
+ 
+ struct bcm_sock {
+@@ -217,7 +219,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
+ 	seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
+ 	seq_printf(m, " <<<\n");
+ 
+-	list_for_each_entry(op, &bo->rx_ops, list) {
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(op, &bo->rx_ops, list) {
+ 
+ 		unsigned long reduction;
+ 
+@@ -273,6 +277,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
+ 		seq_printf(m, "# sent %ld\n", op->frames_abs);
+ 	}
+ 	seq_putc(m, '\n');
++
++	rcu_read_unlock();
++
+ 	return 0;
+ }
+ #endif /* CONFIG_PROC_FS */
+@@ -285,13 +292,18 @@ static void bcm_can_tx(struct bcm_op *op)
+ {
+ 	struct sk_buff *skb;
+ 	struct net_device *dev;
+-	struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
++	struct canfd_frame *cf;
+ 	int err;
+ 
+ 	/* no target device? => exit */
+ 	if (!op->ifindex)
+ 		return;
+ 
++	/* read currframe under lock protection */
++	spin_lock_bh(&op->bcm_tx_lock);
++	cf = op->frames + op->cfsiz * op->currframe;
++	spin_unlock_bh(&op->bcm_tx_lock);
++
+ 	dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
+ 	if (!dev) {
+ 		/* RFC: should this bcm_op remove itself here? */
+@@ -312,6 +324,10 @@ static void bcm_can_tx(struct bcm_op *op)
+ 	skb->dev = dev;
+ 	can_skb_set_owner(skb, op->sk);
+ 	err = can_send(skb, 1);
++
++	/* update currframe and count under lock protection */
++	spin_lock_bh(&op->bcm_tx_lock);
++
+ 	if (!err)
+ 		op->frames_abs++;
+ 
+@@ -320,6 +336,11 @@ static void bcm_can_tx(struct bcm_op *op)
+ 	/* reached last frame? */
+ 	if (op->currframe >= op->nframes)
+ 		op->currframe = 0;
++
++	if (op->count > 0)
++		op->count--;
++
++	spin_unlock_bh(&op->bcm_tx_lock);
+ out:
+ 	dev_put(dev);
+ }
+@@ -430,7 +451,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+ 	struct bcm_msg_head msg_head;
+ 
+ 	if (op->kt_ival1 && (op->count > 0)) {
+-		op->count--;
++		bcm_can_tx(op);
+ 		if (!op->count && (op->flags & TX_COUNTEVT)) {
+ 
+ 			/* create notification to user */
+@@ -445,7 +466,6 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+ 
+ 			bcm_send_to_user(op, &msg_head, NULL, 0);
+ 		}
+-		bcm_can_tx(op);
+ 
+ 	} else if (op->kt_ival2) {
+ 		bcm_can_tx(op);
+@@ -843,7 +863,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
+ 						  REGMASK(op->can_id),
+ 						  bcm_rx_handler, op);
+ 
+-			list_del(&op->list);
++			list_del_rcu(&op->list);
+ 			bcm_remove_op(op);
+ 			return 1; /* done */
+ 		}
+@@ -863,7 +883,7 @@ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
+ 	list_for_each_entry_safe(op, n, ops, list) {
+ 		if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
+ 		    (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
+-			list_del(&op->list);
++			list_del_rcu(&op->list);
+ 			bcm_remove_op(op);
+ 			return 1; /* done */
+ 		}
+@@ -956,6 +976,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 		}
+ 		op->flags = msg_head->flags;
+ 
++		/* only lock for unlikely count/nframes/currframe changes */
++		if (op->nframes != msg_head->nframes ||
++		    op->flags & TX_RESET_MULTI_IDX ||
++		    op->flags & SETTIMER) {
++
++			spin_lock_bh(&op->bcm_tx_lock);
++
++			if (op->nframes != msg_head->nframes ||
++			    op->flags & TX_RESET_MULTI_IDX) {
++				/* potentially update changed nframes */
++				op->nframes = msg_head->nframes;
++				/* restart multiple frame transmission */
++				op->currframe = 0;
++			}
++
++			if (op->flags & SETTIMER)
++				op->count = msg_head->count;
++
++			spin_unlock_bh(&op->bcm_tx_lock);
++		}
++
+ 	} else {
+ 		/* insert new BCM operation for the given can_id */
+ 
+@@ -963,9 +1004,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 		if (!op)
+ 			return -ENOMEM;
+ 
++		spin_lock_init(&op->bcm_tx_lock);
+ 		op->can_id = msg_head->can_id;
+ 		op->cfsiz = CFSIZ(msg_head->flags);
+ 		op->flags = msg_head->flags;
++		op->nframes = msg_head->nframes;
++
++		if (op->flags & SETTIMER)
++			op->count = msg_head->count;
+ 
+ 		/* create array for CAN frames and copy the data */
+ 		if (msg_head->nframes > 1) {
+@@ -1024,22 +1070,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 
+ 	} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
+ 
+-	if (op->nframes != msg_head->nframes) {
+-		op->nframes   = msg_head->nframes;
+-		/* start multiple frame transmission with index 0 */
+-		op->currframe = 0;
+-	}
+-
+-	/* check flags */
+-
+-	if (op->flags & TX_RESET_MULTI_IDX) {
+-		/* start multiple frame transmission with index 0 */
+-		op->currframe = 0;
+-	}
+-
+ 	if (op->flags & SETTIMER) {
+ 		/* set timer values */
+-		op->count = msg_head->count;
+ 		op->ival1 = msg_head->ival1;
+ 		op->ival2 = msg_head->ival2;
+ 		op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
+@@ -1056,11 +1088,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 		op->flags |= TX_ANNOUNCE;
+ 	}
+ 
+-	if (op->flags & TX_ANNOUNCE) {
++	if (op->flags & TX_ANNOUNCE)
+ 		bcm_can_tx(op);
+-		if (op->count)
+-			op->count--;
+-	}
+ 
+ 	if (op->flags & STARTTIMER)
+ 		bcm_tx_start_timer(op);
+@@ -1276,7 +1305,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 					      bcm_rx_handler, op, "bcm", sk);
+ 		if (err) {
+ 			/* this bcm rx op is broken -> remove it */
+-			list_del(&op->list);
++			list_del_rcu(&op->list);
+ 			bcm_remove_op(op);
+ 			return err;
+ 		}
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7b7b36c43c82cc..2ba2160dd093af 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6034,16 +6034,18 @@ static DEFINE_PER_CPU(struct work_struct, flush_works);
+ static void flush_backlog(struct work_struct *work)
+ {
+ 	struct sk_buff *skb, *tmp;
++	struct sk_buff_head list;
+ 	struct softnet_data *sd;
+ 
++	__skb_queue_head_init(&list);
+ 	local_bh_disable();
+ 	sd = this_cpu_ptr(&softnet_data);
+ 
+ 	backlog_lock_irq_disable(sd);
+ 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+-		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
++		if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) {
+ 			__skb_unlink(skb, &sd->input_pkt_queue);
+-			dev_kfree_skb_irq(skb);
++			__skb_queue_tail(&list, skb);
+ 			rps_input_queue_head_incr(sd);
+ 		}
+ 	}
+@@ -6051,14 +6053,16 @@ static void flush_backlog(struct work_struct *work)
+ 
+ 	local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+-		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
++		if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) {
+ 			__skb_unlink(skb, &sd->process_queue);
+-			kfree_skb(skb);
++			__skb_queue_tail(&list, skb);
+ 			rps_input_queue_head_incr(sd);
+ 		}
+ 	}
+ 	local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+ 	local_bh_enable();
++
++	__skb_queue_purge_reason(&list, SKB_DROP_REASON_DEV_READY);
+ }
+ 
+ static bool flush_required(int cpu)
+diff --git a/net/core/dev.h b/net/core/dev.h
+index 2e3bb7669984a6..764e0097ccf220 100644
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -148,6 +148,18 @@ void xdp_do_check_flushed(struct napi_struct *napi);
+ static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
+ #endif
+ 
++/* Best effort check that NAPI is not idle (can't be scheduled to run) */
++static inline void napi_assert_will_not_race(const struct napi_struct *napi)
++{
++	/* uninitialized instance, can't race */
++	if (!napi->poll_list.next)
++		return;
++
++	/* SCHED bit is set on disabled instances */
++	WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
++	WARN_ON(READ_ONCE(napi->list_owner) != -1);
++}
++
+ void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
+ 
+ #define XMIT_RECURSION_LIMIT	8
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index 7b20f6fcb82c02..c8ce069605c421 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -25,6 +25,7 @@
+ 
+ #include <trace/events/page_pool.h>
+ 
++#include "dev.h"
+ #include "mp_dmabuf_devmem.h"
+ #include "netmem_priv.h"
+ #include "page_pool_priv.h"
+@@ -1108,11 +1109,7 @@ void page_pool_disable_direct_recycling(struct page_pool *pool)
+ 	if (!pool->p.napi)
+ 		return;
+ 
+-	/* To avoid races with recycling and additional barriers make sure
+-	 * pool and NAPI are unlinked when NAPI is disabled.
+-	 */
+-	WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state));
+-	WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1);
++	napi_assert_will_not_race(pool->p.napi);
+ 
+ 	WRITE_ONCE(pool->p.napi, NULL);
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index b6db4910359bb5..762ede02789909 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -898,6 +898,10 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
+ 	pkt_dev->nr_labels = 0;
+ 	do {
+ 		__u32 tmp;
++
++		if (n >= MAX_MPLS_LABELS)
++			return -E2BIG;
++
+ 		len = hex32_arg(&buffer[i], 8, &tmp);
+ 		if (len <= 0)
+ 			return len;
+@@ -909,8 +913,6 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
+ 			return -EFAULT;
+ 		i++;
+ 		n++;
+-		if (n >= MAX_MPLS_LABELS)
+-			return -E2BIG;
+ 	} while (c == ',');
+ 
+ 	pkt_dev->nr_labels = n;
+@@ -1896,8 +1898,8 @@ static ssize_t pktgen_thread_write(struct file *file,
+ 	i = len;
+ 
+ 	/* Read variable name */
+-
+-	len = strn_len(&user_buffer[i], sizeof(name) - 1);
++	max = min(sizeof(name) - 1, count - i);
++	len = strn_len(&user_buffer[i], max);
+ 	if (len < 0)
+ 		return len;
+ 
+@@ -1927,7 +1929,8 @@ static ssize_t pktgen_thread_write(struct file *file,
+ 	if (!strcmp(name, "add_device")) {
+ 		char f[32];
+ 		memset(f, 0, 32);
+-		len = strn_len(&user_buffer[i], sizeof(f) - 1);
++		max = min(sizeof(f) - 1, count - i);
++		len = strn_len(&user_buffer[i], max);
+ 		if (len < 0) {
+ 			ret = len;
+ 			goto out;
+diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
+index 281bbac5539d10..75197861bc91fd 100644
+--- a/net/dsa/tag_ksz.c
++++ b/net/dsa/tag_ksz.c
+@@ -140,7 +140,12 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev)
+ {
+-	u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
++	u8 *tag;
++
++	if (skb_linearize(skb))
++		return NULL;
++
++	tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+ 
+ 	return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M,
+ 			      KSZ_EGRESS_TAG_LEN);
+@@ -311,10 +316,16 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
+ 
+ static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
+ {
+-	/* Tag decoding */
+-	u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+-	unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
+ 	unsigned int len = KSZ_EGRESS_TAG_LEN;
++	unsigned int port;
++	u8 *tag;
++
++	if (skb_linearize(skb))
++		return NULL;
++
++	/* Tag decoding */
++	tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
++	port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
+ 
+ 	/* Extra 4-bytes PTP timestamp */
+ 	if (tag[0] & KSZ9477_PTP_TAG_INDICATION) {
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 44048d7538ddc3..9d0754b3642fde 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -543,6 +543,7 @@ static struct hsr_proto_ops hsr_ops = {
+ 	.drop_frame = hsr_drop_frame,
+ 	.fill_frame_info = hsr_fill_frame_info,
+ 	.invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
++	.register_frame_out = hsr_register_frame_out,
+ };
+ 
+ static struct hsr_proto_ops prp_ops = {
+@@ -553,6 +554,7 @@ static struct hsr_proto_ops prp_ops = {
+ 	.fill_frame_info = prp_fill_frame_info,
+ 	.handle_san_frame = prp_handle_san_frame,
+ 	.update_san_info = prp_update_san_info,
++	.register_frame_out = prp_register_frame_out,
+ };
+ 
+ void hsr_dev_setup(struct net_device *dev)
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index c0217476eb17f9..ace4e355d16472 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -524,8 +524,8 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
+ 		 * Also for SAN, this shouldn't be done.
+ 		 */
+ 		if (!frame->is_from_san &&
+-		    hsr_register_frame_out(port, frame->node_src,
+-					   frame->sequence_nr))
++		    hsr->proto_ops->register_frame_out &&
++		    hsr->proto_ops->register_frame_out(port, frame))
+ 			continue;
+ 
+ 		if (frame->is_supervision && port->type == HSR_PT_MASTER &&
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 73bc6f659812f6..85991fab7db584 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -35,6 +35,7 @@ static bool seq_nr_after(u16 a, u16 b)
+ 
+ #define seq_nr_before(a, b)		seq_nr_after((b), (a))
+ #define seq_nr_before_or_eq(a, b)	(!seq_nr_after((a), (b)))
++#define PRP_DROP_WINDOW_LEN 32768
+ 
+ bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr)
+ {
+@@ -176,8 +177,11 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+ 		new_node->time_in[i] = now;
+ 		new_node->time_out[i] = now;
+ 	}
+-	for (i = 0; i < HSR_PT_PORTS; i++)
++	for (i = 0; i < HSR_PT_PORTS; i++) {
+ 		new_node->seq_out[i] = seq_out;
++		new_node->seq_expected[i] = seq_out + 1;
++		new_node->seq_start[i] = seq_out + 1;
++	}
+ 
+ 	if (san && hsr->proto_ops->handle_san_frame)
+ 		hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
+@@ -482,9 +486,11 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+  *	 0 otherwise, or
+  *	 negative error code on error
+  */
+-int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+-			   u16 sequence_nr)
++int hsr_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame)
+ {
++	struct hsr_node *node = frame->node_src;
++	u16 sequence_nr = frame->sequence_nr;
++
+ 	spin_lock_bh(&node->seq_out_lock);
+ 	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
+ 	    time_is_after_jiffies(node->time_out[port->type] +
+@@ -499,6 +505,89 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+ 	return 0;
+ }
+ 
++/* Adaptation of the PRP duplicate discard algorithm described in wireshark
++ * wiki (https://wiki.wireshark.org/PRP)
++ *
++ * A drop window is maintained for both LANs with start sequence set to the
++ * first sequence accepted on the LAN that has not been seen on the other LAN,
++ * and expected sequence set to the latest received sequence number plus one.
++ *
++ * When a frame is received on either LAN it is compared against the received
++ * frames on the other LAN. If it is outside the drop window of the other LAN
++ * the frame is accepted and the drop window is updated.
++ * The drop window for the other LAN is reset.
++ *
++ * 'port' is the outgoing interface
++ * 'frame' is the frame to be sent
++ *
++ * Return:
++ *	 1 if frame can be shown to have been sent recently on this interface,
++ *	 0 otherwise
++ */
++int prp_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame)
++{
++	enum hsr_port_type other_port;
++	enum hsr_port_type rcv_port;
++	struct hsr_node *node;
++	u16 sequence_diff;
++	u16 sequence_exp;
++	u16 sequence_nr;
++
++	/* out-going frames are always in order
++	 * and can be checked the same way as for HSR
++	 */
++	if (frame->port_rcv->type == HSR_PT_MASTER)
++		return hsr_register_frame_out(port, frame);
++
++	/* for PRP we should only forward frames from the slave ports
++	 * to the master port
++	 */
++	if (port->type != HSR_PT_MASTER)
++		return 1;
++
++	node = frame->node_src;
++	sequence_nr = frame->sequence_nr;
++	sequence_exp = sequence_nr + 1;
++	rcv_port = frame->port_rcv->type;
++	other_port = rcv_port == HSR_PT_SLAVE_A ? HSR_PT_SLAVE_B :
++				 HSR_PT_SLAVE_A;
++
++	spin_lock_bh(&node->seq_out_lock);
++	if (time_is_before_jiffies(node->time_out[port->type] +
++	    msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)) ||
++	    (node->seq_start[rcv_port] == node->seq_expected[rcv_port] &&
++	     node->seq_start[other_port] == node->seq_expected[other_port])) {
++		/* the node hasn't been sending for a while
++		 * or both drop windows are empty, forward the frame
++		 */
++		node->seq_start[rcv_port] = sequence_nr;
++	} else if (seq_nr_before(sequence_nr, node->seq_expected[other_port]) &&
++		   seq_nr_before_or_eq(node->seq_start[other_port], sequence_nr)) {
++		/* drop the frame, update the drop window for the other port
++		 * and reset our drop window
++		 */
++		node->seq_start[other_port] = sequence_exp;
++		node->seq_expected[rcv_port] = sequence_exp;
++		node->seq_start[rcv_port] = node->seq_expected[rcv_port];
++		spin_unlock_bh(&node->seq_out_lock);
++		return 1;
++	}
++
++	/* update the drop window for the port where this frame was received
++	 * and clear the drop window for the other port
++	 */
++	node->seq_start[other_port] = node->seq_expected[other_port];
++	node->seq_expected[rcv_port] = sequence_exp;
++	sequence_diff = sequence_exp - node->seq_start[rcv_port];
++	if (sequence_diff > PRP_DROP_WINDOW_LEN)
++		node->seq_start[rcv_port] = sequence_exp - PRP_DROP_WINDOW_LEN;
++
++	node->time_out[port->type] = jiffies;
++	node->seq_out[port->type] = sequence_nr;
++	spin_unlock_bh(&node->seq_out_lock);
++	return 0;
++}
++
+ static struct hsr_port *get_late_port(struct hsr_priv *hsr,
+ 				      struct hsr_node *node)
+ {
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
+index 993fa950d81449..b04948659d84d8 100644
+--- a/net/hsr/hsr_framereg.h
++++ b/net/hsr/hsr_framereg.h
+@@ -44,8 +44,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ 
+ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+ 			   u16 sequence_nr);
+-int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+-			   u16 sequence_nr);
++int hsr_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame);
+ 
+ void hsr_prune_nodes(struct timer_list *t);
+ void hsr_prune_proxy_nodes(struct timer_list *t);
+@@ -73,6 +72,8 @@ void prp_update_san_info(struct hsr_node *node, bool is_sup);
+ bool hsr_is_node_in_db(struct list_head *node_db,
+ 		       const unsigned char addr[ETH_ALEN]);
+ 
++int prp_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame);
++
+ struct hsr_node {
+ 	struct list_head	mac_list;
+ 	/* Protect R/W access to seq_out */
+@@ -89,6 +90,9 @@ struct hsr_node {
+ 	bool			san_b;
+ 	u16			seq_out[HSR_PT_PORTS];
+ 	bool			removed;
++	/* PRP specific duplicate handling */
++	u16			seq_expected[HSR_PT_PORTS];
++	u16			seq_start[HSR_PT_PORTS];
+ 	struct rcu_head		rcu_head;
+ };
+ 
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index fcfeb79bb04018..e26244456f6396 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -183,6 +183,8 @@ struct hsr_proto_ops {
+ 			       struct hsr_frame_info *frame);
+ 	bool (*invalid_dan_ingress_frame)(__be16 protocol);
+ 	void (*update_san_info)(struct hsr_node *node, bool is_sup);
++	int (*register_frame_out)(struct hsr_port *port,
++				  struct hsr_frame_info *frame);
+ };
+ 
+ struct hsr_self_node {
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index f3281312eb5eba..cbe4c6fc8b8e95 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -120,47 +120,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
+ }
+ 
+ #ifdef CONFIG_INET_ESPINTCP
+-struct esp_tcp_sk {
+-	struct sock *sk;
+-	struct rcu_head rcu;
+-};
+-
+-static void esp_free_tcp_sk(struct rcu_head *head)
+-{
+-	struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
+-
+-	sock_put(esk->sk);
+-	kfree(esk);
+-}
+-
+ static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
+ {
+ 	struct xfrm_encap_tmpl *encap = x->encap;
+ 	struct net *net = xs_net(x);
+-	struct esp_tcp_sk *esk;
+ 	__be16 sport, dport;
+-	struct sock *nsk;
+ 	struct sock *sk;
+ 
+-	sk = rcu_dereference(x->encap_sk);
+-	if (sk && sk->sk_state == TCP_ESTABLISHED)
+-		return sk;
+-
+ 	spin_lock_bh(&x->lock);
+ 	sport = encap->encap_sport;
+ 	dport = encap->encap_dport;
+-	nsk = rcu_dereference_protected(x->encap_sk,
+-					lockdep_is_held(&x->lock));
+-	if (sk && sk == nsk) {
+-		esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
+-		if (!esk) {
+-			spin_unlock_bh(&x->lock);
+-			return ERR_PTR(-ENOMEM);
+-		}
+-		RCU_INIT_POINTER(x->encap_sk, NULL);
+-		esk->sk = sk;
+-		call_rcu(&esk->rcu, esp_free_tcp_sk);
+-	}
+ 	spin_unlock_bh(&x->lock);
+ 
+ 	sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4,
+@@ -173,20 +142,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+-	spin_lock_bh(&x->lock);
+-	nsk = rcu_dereference_protected(x->encap_sk,
+-					lockdep_is_held(&x->lock));
+-	if (encap->encap_sport != sport ||
+-	    encap->encap_dport != dport) {
+-		sock_put(sk);
+-		sk = nsk ?: ERR_PTR(-EREMCHG);
+-	} else if (sk == nsk) {
+-		sock_put(sk);
+-	} else {
+-		rcu_assign_pointer(x->encap_sk, sk);
+-	}
+-	spin_unlock_bh(&x->lock);
+-
+ 	return sk;
+ }
+ 
+@@ -199,8 +154,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
+ 
+ 	sk = esp_find_tcp_sk(x);
+ 	err = PTR_ERR_OR_ZERO(sk);
+-	if (err)
++	if (err) {
++		kfree_skb(skb);
+ 		goto out;
++	}
+ 
+ 	bh_lock_sock(sk);
+ 	if (sock_owned_by_user(sk))
+@@ -209,6 +166,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
+ 		err = espintcp_push_skb(sk, skb);
+ 	bh_unlock_sock(sk);
+ 
++	sock_put(sk);
++
+ out:
+ 	rcu_read_unlock();
+ 	return err;
+@@ -392,6 +351,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
+ 	if (IS_ERR(sk))
+ 		return ERR_CAST(sk);
+ 
++	sock_put(sk);
++
+ 	*lenp = htons(len);
+ 	esph = (struct ip_esp_hdr *)(lenp + 1);
+ 
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 793e6781399a4f..5b7c41333d6fc8 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -829,19 +829,33 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+ 		}
+ 	}
+ 
++	if (cfg->fc_dst_len > 32) {
++		NL_SET_ERR_MSG(extack, "Invalid prefix length");
++		err = -EINVAL;
++		goto errout;
++	}
++
++	if (cfg->fc_dst_len < 32 && (ntohl(cfg->fc_dst) << cfg->fc_dst_len)) {
++		NL_SET_ERR_MSG(extack, "Invalid prefix for given prefix length");
++		err = -EINVAL;
++		goto errout;
++	}
++
+ 	if (cfg->fc_nh_id) {
+ 		if (cfg->fc_oif || cfg->fc_gw_family ||
+ 		    cfg->fc_encap || cfg->fc_mp) {
+ 			NL_SET_ERR_MSG(extack,
+ 				       "Nexthop specification and nexthop id are mutually exclusive");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto errout;
+ 		}
+ 	}
+ 
+ 	if (has_gw && has_via) {
+ 		NL_SET_ERR_MSG(extack,
+ 			       "Nexthop configuration can not contain both GATEWAY and VIA");
+-		return -EINVAL;
++		err = -EINVAL;
++		goto errout;
+ 	}
+ 
+ 	if (!cfg->fc_table)
+diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
+index b07292d50ee766..4563e5303c1a8c 100644
+--- a/net/ipv4/fib_rules.c
++++ b/net/ipv4/fib_rules.c
+@@ -245,9 +245,9 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
+ 			       struct nlattr **tb,
+ 			       struct netlink_ext_ack *extack)
+ {
+-	struct net *net = sock_net(skb->sk);
++	struct fib4_rule *rule4 = (struct fib4_rule *)rule;
++	struct net *net = rule->fr_net;
+ 	int err = -EINVAL;
+-	struct fib4_rule *rule4 = (struct fib4_rule *) rule;
+ 
+ 	if (!inet_validate_dscp(frh->tos)) {
+ 		NL_SET_ERR_MSG(extack,
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 09e31757e96c74..cc86031d2050f1 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1193,22 +1193,6 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp,
+ 	return 0;
+ }
+ 
+-static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack)
+-{
+-	if (plen > KEYLENGTH) {
+-		NL_SET_ERR_MSG(extack, "Invalid prefix length");
+-		return false;
+-	}
+-
+-	if ((plen < KEYLENGTH) && (key << plen)) {
+-		NL_SET_ERR_MSG(extack,
+-			       "Invalid prefix for given prefix length");
+-		return false;
+-	}
+-
+-	return true;
+-}
+-
+ static void fib_remove_alias(struct trie *t, struct key_vector *tp,
+ 			     struct key_vector *l, struct fib_alias *old);
+ 
+@@ -1229,9 +1213,6 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
+ 
+ 	key = ntohl(cfg->fc_dst);
+ 
+-	if (!fib_valid_key_len(key, plen, extack))
+-		return -EINVAL;
+-
+ 	pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
+ 
+ 	fi = fib_create_info(cfg, extack);
+@@ -1723,9 +1704,6 @@ int fib_table_delete(struct net *net, struct fib_table *tb,
+ 
+ 	key = ntohl(cfg->fc_dst);
+ 
+-	if (!fib_valid_key_len(key, plen, extack))
+-		return -EINVAL;
+-
+ 	l = fib_find_node(t, &tp, key);
+ 	if (!l)
+ 		return -ESRCH;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 9bfcfd016e1827..2b4a588247639e 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -1230,22 +1230,37 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
+ {
+ 	unsigned int locksz = sizeof(spinlock_t);
+ 	unsigned int i, nblocks = 1;
++	spinlock_t *ptr = NULL;
+ 
+-	if (locksz != 0) {
+-		/* allocate 2 cache lines or at least one spinlock per cpu */
+-		nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
+-		nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
++	if (locksz == 0)
++		goto set_mask;
+ 
+-		/* no more locks than number of hash buckets */
+-		nblocks = min(nblocks, hashinfo->ehash_mask + 1);
++	/* Allocate 2 cache lines or at least one spinlock per cpu. */
++	nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U) * num_possible_cpus();
+ 
+-		hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
+-		if (!hashinfo->ehash_locks)
+-			return -ENOMEM;
++	/* At least one page per NUMA node. */
++	nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz);
++
++	nblocks = roundup_pow_of_two(nblocks);
++
++	/* No more locks than number of hash buckets. */
++	nblocks = min(nblocks, hashinfo->ehash_mask + 1);
+ 
+-		for (i = 0; i < nblocks; i++)
+-			spin_lock_init(&hashinfo->ehash_locks[i]);
++	if (num_online_nodes() > 1) {
++		/* Use vmalloc() to allow NUMA policy to spread pages
++		 * on all available nodes if desired.
++		 */
++		ptr = vmalloc_array(nblocks, locksz);
++	}
++	if (!ptr) {
++		ptr = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
++		if (!ptr)
++			return -ENOMEM;
+ 	}
++	for (i = 0; i < nblocks; i++)
++		spin_lock_init(&ptr[i]);
++	hashinfo->ehash_locks = ptr;
++set_mask:
+ 	hashinfo->ehash_locks_mask = nblocks - 1;
+ 	return 0;
+ }
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index f1f31ebfc79344..9667f27740258e 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -141,7 +141,6 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
+ 	const struct iphdr *iph;
+ 	const int type = icmp_hdr(skb)->type;
+ 	const int code = icmp_hdr(skb)->code;
+-	unsigned int data_len = 0;
+ 	struct ip_tunnel *t;
+ 
+ 	if (tpi->proto == htons(ETH_P_TEB))
+@@ -182,7 +181,6 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
+ 	case ICMP_TIME_EXCEEDED:
+ 		if (code != ICMP_EXC_TTL)
+ 			return 0;
+-		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
+ 		break;
+ 
+ 	case ICMP_REDIRECT:
+@@ -190,10 +188,16 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
+ 	}
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (tpi->proto == htons(ETH_P_IPV6) &&
+-	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
+-					type, data_len))
+-		return 0;
++	if (tpi->proto == htons(ETH_P_IPV6)) {
++		unsigned int data_len = 0;
++
++		if (type == ICMP_TIME_EXCEEDED)
++			data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
++
++		if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
++						type, data_len))
++			return 0;
++	}
+ #endif
+ 
+ 	if (t->parms.iph.daddr == 0 ||
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index d93a5a89c5692d..d29219e067b7fd 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -419,6 +419,20 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
+ 	return false;
+ }
+ 
++static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count)
++{
++	tp->delivered_ce += ecn_count;
++}
++
++/* Updates the delivered and delivered_ce counts */
++static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
++				bool ece_ack)
++{
++	tp->delivered += delivered;
++	if (ece_ack)
++		tcp_count_delivered_ce(tp, delivered);
++}
++
+ /* Buffer size and advertised window tuning.
+  *
+  * 1. Tuning sk->sk_sndbuf, when connection enters established state.
+@@ -1154,15 +1168,6 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
+ 	}
+ }
+ 
+-/* Updates the delivered and delivered_ce counts */
+-static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
+-				bool ece_ack)
+-{
+-	tp->delivered += delivered;
+-	if (ece_ack)
+-		tp->delivered_ce += delivered;
+-}
+-
+ /* This procedure tags the retransmission queue when SACKs arrive.
+  *
+  * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
+@@ -3862,12 +3867,23 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
+ 	}
+ }
+ 
+-static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
++static void tcp_in_ack_event(struct sock *sk, int flag)
+ {
+ 	const struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+-	if (icsk->icsk_ca_ops->in_ack_event)
+-		icsk->icsk_ca_ops->in_ack_event(sk, flags);
++	if (icsk->icsk_ca_ops->in_ack_event) {
++		u32 ack_ev_flags = 0;
++
++		if (flag & FLAG_WIN_UPDATE)
++			ack_ev_flags |= CA_ACK_WIN_UPDATE;
++		if (flag & FLAG_SLOWPATH) {
++			ack_ev_flags |= CA_ACK_SLOWPATH;
++			if (flag & FLAG_ECE)
++				ack_ev_flags |= CA_ACK_ECE;
++		}
++
++		icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags);
++	}
+ }
+ 
+ /* Congestion control has updated the cwnd already. So if we're in
+@@ -3984,12 +4000,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ 		tcp_snd_una_update(tp, ack);
+ 		flag |= FLAG_WIN_UPDATE;
+ 
+-		tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
+-
+ 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
+ 	} else {
+-		u32 ack_ev_flags = CA_ACK_SLOWPATH;
+-
+ 		if (ack_seq != TCP_SKB_CB(skb)->end_seq)
+ 			flag |= FLAG_DATA;
+ 		else
+@@ -4001,19 +4013,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ 			flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+ 							&sack_state);
+ 
+-		if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
++		if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb)))
+ 			flag |= FLAG_ECE;
+-			ack_ev_flags |= CA_ACK_ECE;
+-		}
+ 
+ 		if (sack_state.sack_delivered)
+ 			tcp_count_delivered(tp, sack_state.sack_delivered,
+ 					    flag & FLAG_ECE);
+-
+-		if (flag & FLAG_WIN_UPDATE)
+-			ack_ev_flags |= CA_ACK_WIN_UPDATE;
+-
+-		tcp_in_ack_event(sk, ack_ev_flags);
+ 	}
+ 
+ 	/* This is a deviation from RFC3168 since it states that:
+@@ -4040,6 +4045,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ 
+ 	tcp_rack_update_reo_wnd(sk, &rs);
+ 
++	tcp_in_ack_event(sk, flag);
++
+ 	if (tp->tlp_high_seq)
+ 		tcp_process_tlp_ack(sk, ack, flag);
+ 
+@@ -4071,6 +4078,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ 	return 1;
+ 
+ no_queue:
++	tcp_in_ack_event(sk, flag);
+ 	/* If data was DSACKed, see if we can undo a cwnd reduction. */
+ 	if (flag & FLAG_DSACKING_ACK) {
+ 		tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index a620618cc568a5..17d3fc2fab4ccb 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -182,11 +182,15 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ 	int offset = skb_gro_offset(skb);
+ 	const struct net_offload *ops;
+ 	struct sk_buff *pp = NULL;
+-	int ret;
+-
+-	offset = offset - sizeof(struct udphdr);
++	int len, dlen;
++	__u8 *udpdata;
++	__be32 *udpdata32;
+ 
+-	if (!pskb_pull(skb, offset))
++	len = skb->len - offset;
++	dlen = offset + min(len, 8);
++	udpdata = skb_gro_header(skb, dlen, offset);
++	udpdata32 = (__be32 *)udpdata;
++	if (unlikely(!udpdata))
+ 		return NULL;
+ 
+ 	rcu_read_lock();
+@@ -194,11 +198,10 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ 	if (!ops || !ops->callbacks.gro_receive)
+ 		goto out;
+ 
+-	ret = __xfrm4_udp_encap_rcv(sk, skb, false);
+-	if (ret)
++	/* check if it is a keepalive or IKE packet */
++	if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
+ 		goto out;
+ 
+-	skb_push(skb, offset);
+ 	NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
+ 
+ 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+@@ -208,7 +211,6 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ 
+ out:
+ 	rcu_read_unlock();
+-	skb_push(skb, offset);
+ 	NAPI_GRO_CB(skb)->same_flow = 0;
+ 	NAPI_GRO_CB(skb)->flush = 1;
+ 
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index b2400c226a3258..62d17d7f6d9a99 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -137,47 +137,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
+ }
+ 
+ #ifdef CONFIG_INET6_ESPINTCP
+-struct esp_tcp_sk {
+-	struct sock *sk;
+-	struct rcu_head rcu;
+-};
+-
+-static void esp_free_tcp_sk(struct rcu_head *head)
+-{
+-	struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
+-
+-	sock_put(esk->sk);
+-	kfree(esk);
+-}
+-
+ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
+ {
+ 	struct xfrm_encap_tmpl *encap = x->encap;
+ 	struct net *net = xs_net(x);
+-	struct esp_tcp_sk *esk;
+ 	__be16 sport, dport;
+-	struct sock *nsk;
+ 	struct sock *sk;
+ 
+-	sk = rcu_dereference(x->encap_sk);
+-	if (sk && sk->sk_state == TCP_ESTABLISHED)
+-		return sk;
+-
+ 	spin_lock_bh(&x->lock);
+ 	sport = encap->encap_sport;
+ 	dport = encap->encap_dport;
+-	nsk = rcu_dereference_protected(x->encap_sk,
+-					lockdep_is_held(&x->lock));
+-	if (sk && sk == nsk) {
+-		esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
+-		if (!esk) {
+-			spin_unlock_bh(&x->lock);
+-			return ERR_PTR(-ENOMEM);
+-		}
+-		RCU_INIT_POINTER(x->encap_sk, NULL);
+-		esk->sk = sk;
+-		call_rcu(&esk->rcu, esp_free_tcp_sk);
+-	}
+ 	spin_unlock_bh(&x->lock);
+ 
+ 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
+@@ -190,20 +159,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+-	spin_lock_bh(&x->lock);
+-	nsk = rcu_dereference_protected(x->encap_sk,
+-					lockdep_is_held(&x->lock));
+-	if (encap->encap_sport != sport ||
+-	    encap->encap_dport != dport) {
+-		sock_put(sk);
+-		sk = nsk ?: ERR_PTR(-EREMCHG);
+-	} else if (sk == nsk) {
+-		sock_put(sk);
+-	} else {
+-		rcu_assign_pointer(x->encap_sk, sk);
+-	}
+-	spin_unlock_bh(&x->lock);
+-
+ 	return sk;
+ }
+ 
+@@ -216,8 +171,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
+ 
+ 	sk = esp6_find_tcp_sk(x);
+ 	err = PTR_ERR_OR_ZERO(sk);
+-	if (err)
++	if (err) {
++		kfree_skb(skb);
+ 		goto out;
++	}
+ 
+ 	bh_lock_sock(sk);
+ 	if (sock_owned_by_user(sk))
+@@ -226,6 +183,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
+ 		err = espintcp_push_skb(sk, skb);
+ 	bh_unlock_sock(sk);
+ 
++	sock_put(sk);
++
+ out:
+ 	rcu_read_unlock();
+ 	return err;
+@@ -422,6 +381,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
+ 	if (IS_ERR(sk))
+ 		return ERR_CAST(sk);
+ 
++	sock_put(sk);
++
+ 	*lenp = htons(len);
+ 	esph = (struct ip_esp_hdr *)(lenp + 1);
+ 
+diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
+index 04a9ed5e8310f2..29185c9ebd0204 100644
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -365,9 +365,9 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
+ 			       struct nlattr **tb,
+ 			       struct netlink_ext_ack *extack)
+ {
++	struct fib6_rule *rule6 = (struct fib6_rule *)rule;
++	struct net *net = rule->fr_net;
+ 	int err = -EINVAL;
+-	struct net *net = sock_net(skb->sk);
+-	struct fib6_rule *rule6 = (struct fib6_rule *) rule;
+ 
+ 	if (!inet_validate_dscp(frh->tos)) {
+ 		NL_SET_ERR_MSG(extack,
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 235808cfec7050..68e9a41eed4914 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1498,7 +1498,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
+ 	tunnel = netdev_priv(dev);
+ 
+ 	tunnel->dev = dev;
+-	tunnel->net = dev_net(dev);
+ 	strcpy(tunnel->parms.name, dev->name);
+ 
+ 	ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
+@@ -1882,7 +1881,6 @@ static int ip6erspan_tap_init(struct net_device *dev)
+ 	tunnel = netdev_priv(dev);
+ 
+ 	tunnel->dev = dev;
+-	tunnel->net = dev_net(dev);
+ 	strcpy(tunnel->parms.name, dev->name);
+ 
+ 	ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 434ddf263b88a3..89a61e040e6a18 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1386,6 +1386,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
+ 	}
+ 	v6_cork->hop_limit = ipc6->hlimit;
+ 	v6_cork->tclass = ipc6->tclass;
++	v6_cork->dontfrag = ipc6->dontfrag;
+ 	if (rt->dst.flags & DST_XFRM_TUNNEL)
+ 		mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ?
+ 		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
+@@ -1417,7 +1418,7 @@ static int __ip6_append_data(struct sock *sk,
+ 			     int getfrag(void *from, char *to, int offset,
+ 					 int len, int odd, struct sk_buff *skb),
+ 			     void *from, size_t length, int transhdrlen,
+-			     unsigned int flags, struct ipcm6_cookie *ipc6)
++			     unsigned int flags)
+ {
+ 	struct sk_buff *skb, *skb_prev = NULL;
+ 	struct inet_cork *cork = &cork_full->base;
+@@ -1471,7 +1472,7 @@ static int __ip6_append_data(struct sock *sk,
+ 	if (headersize + transhdrlen > mtu)
+ 		goto emsgsize;
+ 
+-	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
++	if (cork->length + length > mtu - headersize && v6_cork->dontfrag &&
+ 	    (sk->sk_protocol == IPPROTO_UDP ||
+ 	     sk->sk_protocol == IPPROTO_ICMPV6 ||
+ 	     sk->sk_protocol == IPPROTO_RAW)) {
+@@ -1843,7 +1844,7 @@ int ip6_append_data(struct sock *sk,
+ 
+ 	return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork,
+ 				 &np->cork, sk_page_frag(sk), getfrag,
+-				 from, length, transhdrlen, flags, ipc6);
++				 from, length, transhdrlen, flags);
+ }
+ EXPORT_SYMBOL_GPL(ip6_append_data);
+ 
+@@ -2048,7 +2049,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
+ 	err = __ip6_append_data(sk, &queue, cork, &v6_cork,
+ 				&current->task_frag, getfrag, from,
+ 				length + exthdrlen, transhdrlen + exthdrlen,
+-				flags, ipc6);
++				flags);
+ 	if (err) {
+ 		__ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
+ 		return ERR_PTR(err);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 48fd53b9897265..5350c9bb2319bf 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1878,7 +1878,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
+ 	int t_hlen;
+ 
+ 	t->dev = dev;
+-	t->net = dev_net(dev);
+ 
+ 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
+ 	if (ret)
+@@ -1940,6 +1939,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
+ 	struct net *net = dev_net(dev);
+ 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ 
++	t->net = net;
+ 	t->parms.proto = IPPROTO_IPV6;
+ 
+ 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
+@@ -2013,6 +2013,7 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
+ 	int err;
+ 
+ 	nt = netdev_priv(dev);
++	nt->net = net;
+ 
+ 	if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
+ 		err = ip6_tnl_encap_setup(nt, &ipencap);
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 590737c2753798..01235046914432 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -925,7 +925,6 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
+ 	struct ip6_tnl *t = netdev_priv(dev);
+ 
+ 	t->dev = dev;
+-	t->net = dev_net(dev);
+ 	netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ 	netdev_lockdep_set_classes(dev);
+ 	return 0;
+@@ -958,6 +957,7 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
+ 	struct net *net = dev_net(dev);
+ 	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ 
++	t->net = net;
+ 	t->parms.proto = IPPROTO_IPV6;
+ 
+ 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
+@@ -1008,6 +1008,7 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev,
+ 	vti6_netlink_parms(data, &nt->parms);
+ 
+ 	nt->parms.proto = IPPROTO_IPV6;
++	nt->net = net;
+ 
+ 	if (vti6_locate(net, &nt->parms, 0))
+ 		return -EEXIST;
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 39bd8951bfca18..3c15a0ae228e21 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -269,6 +269,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
+ 
+ 	nt = netdev_priv(dev);
+ 
++	nt->net = net;
+ 	nt->parms = *parms;
+ 	if (ipip6_tunnel_create(dev) < 0)
+ 		goto failed_free;
+@@ -1449,7 +1450,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ 	int err;
+ 
+ 	tunnel->dev = dev;
+-	tunnel->net = dev_net(dev);
+ 	strcpy(tunnel->parms.name, dev->name);
+ 
+ 	ipip6_tunnel_bind_dev(dev);
+@@ -1563,6 +1563,7 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
+ 	int err;
+ 
+ 	nt = netdev_priv(dev);
++	nt->net = net;
+ 
+ 	if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
+ 		err = ip_tunnel_encap_setup(nt, &ipencap);
+@@ -1858,6 +1859,9 @@ static int __net_init sit_init_net(struct net *net)
+ 	 */
+ 	sitn->fb_tunnel_dev->netns_local = true;
+ 
++	t = netdev_priv(sitn->fb_tunnel_dev);
++	t->net = net;
++
+ 	err = register_netdev(sitn->fb_tunnel_dev);
+ 	if (err)
+ 		goto err_reg_dev;
+@@ -1865,8 +1869,6 @@ static int __net_init sit_init_net(struct net *net)
+ 	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
+ 	ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
+ 
+-	t = netdev_priv(sitn->fb_tunnel_dev);
+-
+ 	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
+ 	return 0;
+ 
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 4abc5e9d63227a..841c81abaaf4ff 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -179,14 +179,18 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ 	int offset = skb_gro_offset(skb);
+ 	const struct net_offload *ops;
+ 	struct sk_buff *pp = NULL;
+-	int ret;
++	int len, dlen;
++	__u8 *udpdata;
++	__be32 *udpdata32;
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+ 		return xfrm4_gro_udp_encap_rcv(sk, head, skb);
+ 
+-	offset = offset - sizeof(struct udphdr);
+-
+-	if (!pskb_pull(skb, offset))
++	len = skb->len - offset;
++	dlen = offset + min(len, 8);
++	udpdata = skb_gro_header(skb, dlen, offset);
++	udpdata32 = (__be32 *)udpdata;
++	if (unlikely(!udpdata))
+ 		return NULL;
+ 
+ 	rcu_read_lock();
+@@ -194,11 +198,10 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ 	if (!ops || !ops->callbacks.gro_receive)
+ 		goto out;
+ 
+-	ret = __xfrm6_udp_encap_rcv(sk, skb, false);
+-	if (ret)
++	/* check if it is a keepalive or IKE packet */
++	if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
+ 		goto out;
+ 
+-	skb_push(skb, offset);
+ 	NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
+ 
+ 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+@@ -208,7 +211,6 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ 
+ out:
+ 	rcu_read_unlock();
+-	skb_push(skb, offset);
+ 	NAPI_GRO_CB(skb)->same_flow = 0;
+ 	NAPI_GRO_CB(skb)->flush = 1;
+ 
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 0259cde394ba09..cc77ec5769d828 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -887,15 +887,15 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		if (sk->sk_type != SOCK_STREAM)
+ 			goto copy_uaddr;
+ 
++		/* Partial read */
++		if (used + offset < skb_len)
++			continue;
++
+ 		if (!(flags & MSG_PEEK)) {
+ 			skb_unlink(skb, &sk->sk_receive_queue);
+ 			kfree_skb(skb);
+ 			*seq = 0;
+ 		}
+-
+-		/* Partial read */
+-		if (used + offset < skb_len)
+-			continue;
+ 	} while (len > 0);
+ 
+ out:
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index a06644084d15d1..d1c10f5f951604 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -2,7 +2,7 @@
+ /*
+ * Portions of this file
+ * Copyright(c) 2016 Intel Deutschland GmbH
+-* Copyright (C) 2018-2019, 2021-2024 Intel Corporation
++* Copyright (C) 2018-2019, 2021-2025 Intel Corporation
+ */
+ 
+ #ifndef __MAC80211_DRIVER_OPS
+@@ -955,6 +955,7 @@ static inline void drv_mgd_complete_tx(struct ieee80211_local *local,
+ 		return;
+ 	WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
+ 
++	info->link_id = info->link_id < 0 ? 0 : info->link_id;
+ 	trace_drv_mgd_complete_tx(local, sdata, info->duration,
+ 				  info->subtype, info->success);
+ 	if (local->ops->mgd_complete_tx)
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index cc8c5d18b130db..8fa9b9dd461184 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -8,7 +8,7 @@
+  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2024 Intel Corporation
++ * Copyright (C) 2018 - 2025 Intel Corporation
+  */
+ 
+ #include <linux/delay.h>
+@@ -3589,7 +3589,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
+ 	if (tx)
+ 		ieee80211_flush_queues(local, sdata, false);
+ 
+-	drv_mgd_complete_tx(sdata->local, sdata, &info);
++	if (tx || frame_buf)
++		drv_mgd_complete_tx(sdata->local, sdata, &info);
+ 
+ 	/* clear AP addr only after building the needed mgmt frames */
+ 	eth_zero_addr(sdata->deflink.u.mgd.bssid);
+@@ -4033,7 +4034,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
+ 			struct ieee80211_link_data *link;
+ 
+ 			link = sdata_dereference(sdata->link[link_id], sdata);
+-			if (!link)
++			if (!link || !link->conf->bss)
+ 				continue;
+ 			cfg80211_unlink_bss(local->hw.wiphy, link->conf->bss);
+ 			link->conf->bss = NULL;
+@@ -4306,6 +4307,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ 	auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
+ 	status_code = le16_to_cpu(mgmt->u.auth.status_code);
+ 
++	info.link_id = ifmgd->auth_data->link_id;
++
+ 	if (auth_alg != ifmgd->auth_data->algorithm ||
+ 	    (auth_alg != WLAN_AUTH_SAE &&
+ 	     auth_transaction != ifmgd->auth_data->expected_transaction) ||
+@@ -9219,7 +9222,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ 		ieee80211_report_disconnect(sdata, frame_buf,
+ 					    sizeof(frame_buf), true,
+ 					    req->reason_code, false);
+-		drv_mgd_complete_tx(sdata->local, sdata, &info);
+ 		return 0;
+ 	}
+ 
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index e35178f5205faa..bb76295d04c56c 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -589,11 +589,9 @@ int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
+ 	if (ret < 0)
+ 		goto set_flags_err;
+ 
+-	if (attr_rem) {
+-		ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem);
+-		if (ret < 0)
+-			goto set_flags_err;
+-	}
++	ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem);
++	if (ret < 0)
++		goto set_flags_err;
+ 
+ 	if (loc.addr.family == AF_UNSPEC ||
+ 	    rem.addr.family == AF_UNSPEC) {
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index 7d4f0fa8b609d8..3ea60ff7a6a496 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -619,7 +619,9 @@ static struct ctl_table nf_ct_sysctl_table[] = {
+ 		.data		= &nf_conntrack_max,
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_INT_MAX,
+ 	},
+ 	[NF_SYSCTL_CT_COUNT] = {
+ 		.procname	= "nf_conntrack_count",
+@@ -655,7 +657,9 @@ static struct ctl_table nf_ct_sysctl_table[] = {
+ 		.data		= &nf_ct_expect_max,
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= SYSCTL_ONE,
++		.extra2		= SYSCTL_INT_MAX,
+ 	},
+ 	[NF_SYSCTL_CT_ACCT] = {
+ 		.procname	= "nf_conntrack_acct",
+@@ -948,7 +952,9 @@ static struct ctl_table nf_ct_netfilter_table[] = {
+ 		.data		= &nf_conntrack_max,
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_INT_MAX,
+ 	},
+ };
+ 
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index cb8c525ea20eab..7986145a527cbe 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1569,6 +1569,9 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ 		return err;
+ 	}
+ 
++	sch->qstats.backlog += len;
++	sch->q.qlen++;
++
+ 	if (first && !cl->cl_nactive) {
+ 		if (cl->cl_flags & HFSC_RSC)
+ 			init_ed(cl, len);
+@@ -1584,9 +1587,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ 
+ 	}
+ 
+-	sch->qstats.backlog += len;
+-	sch->q.qlen++;
+-
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
+index 716808f374a8d1..b391c2ef463f20 100644
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -1079,14 +1079,16 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
+ 					 struct smc_init_info *ini)
+ {
+ 	u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
++	struct net_device *base_ndev;
+ 	struct net *net;
+ 
+-	ndev = pnet_find_base_ndev(ndev);
++	base_ndev = pnet_find_base_ndev(ndev);
+ 	net = dev_net(ndev);
+-	if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
++	if (smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port,
+ 				   ndev_pnetid) &&
++	    smc_pnet_find_ndev_pnetid_by_table(base_ndev, ndev_pnetid) &&
+ 	    smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) {
+-		smc_pnet_find_rdma_dev(ndev, ini);
++		smc_pnet_find_rdma_dev(base_ndev, ini);
+ 		return; /* pnetid could not be determined */
+ 	}
+ 	_smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net);
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 0090162ee8c350..17a4de75bfaf6c 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -270,9 +270,6 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
+ 	old = rcu_dereference_protected(clnt->cl_xprt,
+ 			lockdep_is_held(&clnt->cl_lock));
+ 
+-	if (!xprt_bound(xprt))
+-		clnt->cl_autobind = 1;
+-
+ 	clnt->cl_timeout = timeout;
+ 	rcu_assign_pointer(clnt->cl_xprt, xprt);
+ 	spin_unlock(&clnt->cl_lock);
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 102c3818bc54d4..53bcca365fb1cd 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -820,9 +820,10 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
+ 	}
+ 
+ 	trace_rpcb_setport(child, map->r_status, map->r_port);
+-	xprt->ops->set_port(xprt, map->r_port);
+-	if (map->r_port)
++	if (map->r_port) {
++		xprt->ops->set_port(xprt, map->r_port);
+ 		xprt_set_bound(xprt);
++	}
+ }
+ 
+ /*
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 9b45fbdc90cabe..73bc39281ef5f5 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -276,6 +276,8 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
+ 
+ static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
+ {
++	if (unlikely(current->flags & PF_EXITING))
++		return -EINTR;
+ 	schedule();
+ 	if (signal_pending_state(mode, current))
+ 		return -ERESTARTSYS;
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index c524421ec65252..8584893b478510 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -817,12 +817,16 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ 		goto exit;
+ 	}
+ 
++	/* Get net to avoid freed tipc_crypto when delete namespace */
++	get_net(aead->crypto->net);
++
+ 	/* Now, do encrypt */
+ 	rc = crypto_aead_encrypt(req);
+ 	if (rc == -EINPROGRESS || rc == -EBUSY)
+ 		return rc;
+ 
+ 	tipc_bearer_put(b);
++	put_net(aead->crypto->net);
+ 
+ exit:
+ 	kfree(ctx);
+@@ -860,6 +864,7 @@ static void tipc_aead_encrypt_done(void *data, int err)
+ 	kfree(tx_ctx);
+ 	tipc_bearer_put(b);
+ 	tipc_aead_put(aead);
++	put_net(net);
+ }
+ 
+ /**
+diff --git a/net/wireless/chan.c b/net/wireless/chan.c
+index e579d7e1425fe9..c4f3fefeb35444 100644
+--- a/net/wireless/chan.c
++++ b/net/wireless/chan.c
+@@ -6,7 +6,7 @@
+  *
+  * Copyright 2009	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+- * Copyright 2018-2024	Intel Corporation
++ * Copyright 2018-2025	Intel Corporation
+  */
+ 
+ #include <linux/export.h>
+@@ -1621,6 +1621,12 @@ bool cfg80211_reg_check_beaconing(struct wiphy *wiphy,
+ 	if (cfg->reg_power == IEEE80211_REG_VLP_AP)
+ 		permitting_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
+ 
++	if ((cfg->iftype == NL80211_IFTYPE_P2P_GO ||
++	     cfg->iftype == NL80211_IFTYPE_AP) &&
++	    (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
++	     chandef->width == NL80211_CHAN_WIDTH_20))
++		permitting_flags |= IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY;
++
+ 	return _cfg80211_reg_can_beacon(wiphy, chandef, cfg->iftype,
+ 					check_no_ir ? IEEE80211_CHAN_NO_IR : 0,
+ 					permitting_flags);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index ecfceddce00fcc..c778ffa1c8efd7 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -1213,6 +1213,10 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
+ 		if ((chan->flags & IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP) &&
+ 		    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP))
+ 			goto nla_put_failure;
++		if ((chan->flags & IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY) &&
++		    nla_put_flag(msg,
++				 NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY))
++			goto nla_put_failure;
+ 	}
+ 
+ 	if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 2b626078739c52..f6846eb0f4b84e 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -5,7 +5,7 @@
+  * Copyright 2008-2011	Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright      2017  Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2024 Intel Corporation
++ * Copyright (C) 2018 - 2025 Intel Corporation
+  *
+  * Permission to use, copy, modify, and/or distribute this software for any
+  * purpose with or without fee is hereby granted, provided that the above
+@@ -1603,6 +1603,8 @@ static u32 map_regdom_flags(u32 rd_flags)
+ 		channel_flags |= IEEE80211_CHAN_PSD;
+ 	if (rd_flags & NL80211_RRF_ALLOW_6GHZ_VLP_AP)
+ 		channel_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
++	if (rd_flags & NL80211_RRF_ALLOW_20MHZ_ACTIVITY)
++		channel_flags |= IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY;
+ 	return channel_flags;
+ }
+ 
+diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
+index fe82e2d073006e..fc7a603b04f130 100644
+--- a/net/xfrm/espintcp.c
++++ b/net/xfrm/espintcp.c
+@@ -171,8 +171,10 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
+ 	struct espintcp_ctx *ctx = espintcp_getctx(sk);
+ 
+ 	if (skb_queue_len(&ctx->out_queue) >=
+-	    READ_ONCE(net_hotdata.max_backlog))
++	    READ_ONCE(net_hotdata.max_backlog)) {
++		kfree_skb(skb);
+ 		return -ENOBUFS;
++	}
+ 
+ 	__skb_queue_tail(&ctx->out_queue, skb);
+ 
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 8a1b83191a6cdf..2c42d83fbaa2dd 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1581,6 +1581,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
+ 	struct xfrm_policy *delpol;
+ 	struct hlist_head *chain;
+ 
++	/* Sanitize mark before store */
++	policy->mark.v &= policy->mark.m;
++
+ 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
+ 	if (chain)
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 711e816fc4041e..abd725386cb604 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -773,9 +773,6 @@ int __xfrm_state_delete(struct xfrm_state *x)
+ 		xfrm_nat_keepalive_state_updated(x);
+ 		spin_unlock(&net->xfrm.xfrm_state_lock);
+ 
+-		if (x->encap_sk)
+-			sock_put(rcu_dereference_raw(x->encap_sk));
+-
+ 		xfrm_dev_state_delete(x);
+ 
+ 		/* All xfrm_state objects are created by xfrm_state_alloc.
+@@ -1656,6 +1653,9 @@ static void __xfrm_state_insert(struct xfrm_state *x)
+ 
+ 	list_add(&x->km.all, &net->xfrm.state_all);
+ 
++	/* Sanitize mark before store */
++	x->mark.v &= x->mark.m;
++
+ 	h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
+ 			  x->props.reqid, x->props.family);
+ 	XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 87013623773a2b..da2a1c00ca8a63 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -178,6 +178,12 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
+ 				       "Replay seq and seq_hi should be 0 for output SA");
+ 			return -EINVAL;
+ 		}
++		if (rs->oseq_hi && !(p->flags & XFRM_STATE_ESN)) {
++			NL_SET_ERR_MSG(
++				extack,
++				"Replay oseq_hi should be 0 in non-ESN mode for output SA");
++			return -EINVAL;
++		}
+ 		if (rs->bmp_len) {
+ 			NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA");
+ 			return -EINVAL;
+@@ -190,6 +196,12 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
+ 				       "Replay oseq and oseq_hi should be 0 for input SA");
+ 			return -EINVAL;
+ 		}
++		if (rs->seq_hi && !(p->flags & XFRM_STATE_ESN)) {
++			NL_SET_ERR_MSG(
++				extack,
++				"Replay seq_hi should be 0 in non-ESN mode for input SA");
++			return -EINVAL;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
+index 7afe040cf43b3f..f06f88b26183c2 100644
+--- a/samples/bpf/Makefile
++++ b/samples/bpf/Makefile
+@@ -400,7 +400,7 @@ $(obj)/%.o: $(src)/%.c
+ 	@echo "  CLANG-bpf " $@
+ 	$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \
+ 		-I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \
+-		-I$(LIBBPF_INCLUDE) \
++		-I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \
+ 		-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
+ 		-D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
+ 		-Wno-gnu-variable-sized-type-not-at-end \
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 686197407c3c61..5652d903523209 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -8,6 +8,7 @@
+ 
+ # Default set of warnings, always enabled
+ KBUILD_CFLAGS += -Wall
++KBUILD_CFLAGS += -Wextra
+ KBUILD_CFLAGS += -Wundef
+ KBUILD_CFLAGS += -Werror=implicit-function-declaration
+ KBUILD_CFLAGS += -Werror=implicit-int
+@@ -15,7 +16,7 @@ KBUILD_CFLAGS += -Werror=return-type
+ KBUILD_CFLAGS += -Werror=strict-prototypes
+ KBUILD_CFLAGS += -Wno-format-security
+ KBUILD_CFLAGS += -Wno-trigraphs
+-KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
++KBUILD_CFLAGS += $(call cc-disable-warning, frame-address)
+ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+ KBUILD_CFLAGS += -Wmissing-declarations
+ KBUILD_CFLAGS += -Wmissing-prototypes
+@@ -68,6 +69,13 @@ KBUILD_CFLAGS += -Wno-pointer-sign
+ # globally built with -Wcast-function-type.
+ KBUILD_CFLAGS += $(call cc-option, -Wcast-function-type)
+ 
++# Currently, disable -Wstringop-overflow for GCC 11, globally.
++KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
++KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
++
++# Currently, disable -Wunterminated-string-initialization as broken
++KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
++
+ # The allocators already balk at large sizes, so silence the compiler
+ # warnings for bounds checks involving those possible values. While
+ # -Wno-alloc-size-larger-than would normally be used here, earlier versions
+@@ -97,7 +105,6 @@ KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion)
+ # Explicitly clear padding bits during variable initialization
+ KBUILD_CFLAGS += $(call cc-option,-fzero-init-padding-bits=all)
+ 
+-KBUILD_CFLAGS += -Wextra
+ KBUILD_CFLAGS += -Wunused
+ 
+ #
+diff --git a/scripts/config b/scripts/config
+index ff88e2faefd35c..ea475c07de283e 100755
+--- a/scripts/config
++++ b/scripts/config
+@@ -32,6 +32,7 @@ commands:
+                              Disable option directly after other option
+ 	--module-after|-M beforeopt option
+                              Turn option into module directly after other option
++	--refresh            Refresh the config using old settings
+ 
+ 	commands can be repeated multiple times
+ 
+@@ -124,16 +125,22 @@ undef_var() {
+ 	txt_delete "^# $name is not set" "$FN"
+ }
+ 
+-if [ "$1" = "--file" ]; then
+-	FN="$2"
+-	if [ "$FN" = "" ] ; then
+-		usage
++FN=.config
++CMDS=()
++while [[ $# -gt 0 ]]; do
++	if [ "$1" = "--file" ]; then
++		if [ "$2" = "" ]; then
++			usage
++		fi
++		FN="$2"
++		shift 2
++	else
++		CMDS+=("$1")
++		shift
+ 	fi
+-	shift 2
+-else
+-	FN=.config
+-fi
++done
+ 
++set -- "${CMDS[@]}"
+ if [ "$1" = "" ] ; then
+ 	usage
+ fi
+@@ -217,9 +224,8 @@ while [ "$1" != "" ] ; do
+ 		set_var "${CONFIG_}$B" "${CONFIG_}$B=m" "${CONFIG_}$A"
+ 		;;
+ 
+-	# undocumented because it ignores --file (fixme)
+ 	--refresh)
+-		yes "" | make oldconfig
++		yes "" | make oldconfig KCONFIG_CONFIG=$FN
+ 		;;
+ 
+ 	*)
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index 3b55e7a4131d9a..ac95661a1c9dd9 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -385,7 +385,7 @@ int conf_read_simple(const char *name, int def)
+ 
+ 	def_flags = SYMBOL_DEF << def;
+ 	for_all_symbols(sym) {
+-		sym->flags &= ~(def_flags|SYMBOL_VALID);
++		sym->flags &= ~def_flags;
+ 		switch (sym->type) {
+ 		case S_INT:
+ 		case S_HEX:
+@@ -398,7 +398,11 @@ int conf_read_simple(const char *name, int def)
+ 		}
+ 	}
+ 
+-	expr_invalidate_all();
++	if (def == S_DEF_USER) {
++		for_all_symbols(sym)
++			sym->flags &= ~SYMBOL_VALID;
++		expr_invalidate_all();
++	}
+ 
+ 	while (getline_stripped(&line, &line_asize, in) != -1) {
+ 		struct menu *choice;
+@@ -464,6 +468,9 @@ int conf_read_simple(const char *name, int def)
+ 		if (conf_set_sym_val(sym, def, def_flags, val))
+ 			continue;
+ 
++		if (def != S_DEF_USER)
++			continue;
++
+ 		/*
+ 		 * If this is a choice member, give it the highest priority.
+ 		 * If conflicting CONFIG options are given from an input file,
+@@ -967,10 +974,8 @@ static int conf_touch_deps(void)
+ 	depfile_path[depfile_prefix_len] = 0;
+ 
+ 	conf_read_simple(name, S_DEF_AUTO);
+-	sym_calc_value(modules_sym);
+ 
+ 	for_all_symbols(sym) {
+-		sym_calc_value(sym);
+ 		if (sym_is_choice(sym))
+ 			continue;
+ 		if (sym->flags & SYMBOL_WRITE) {
+@@ -1084,12 +1089,12 @@ int conf_write_autoconf(int overwrite)
+ 	if (ret)
+ 		return -1;
+ 
+-	if (conf_touch_deps())
+-		return 1;
+-
+ 	for_all_symbols(sym)
+ 		sym_calc_value(sym);
+ 
++	if (conf_touch_deps())
++		return 1;
++
+ 	ret = __conf_write_autoconf(conf_get_autoheader_name(),
+ 				    print_symbol_for_c,
+ 				    &comment_style_c);
+diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
+index 0b7952471c18f6..79c09b378be816 100755
+--- a/scripts/kconfig/merge_config.sh
++++ b/scripts/kconfig/merge_config.sh
+@@ -112,8 +112,8 @@ INITFILE=$1
+ shift;
+ 
+ if [ ! -r "$INITFILE" ]; then
+-	echo "The base file '$INITFILE' does not exist.  Exit." >&2
+-	exit 1
++	echo "The base file '$INITFILE' does not exist. Creating one..." >&2
++	touch "$INITFILE"
+ fi
+ 
+ MERGE_LIST=$*
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index a9aab10bebcaa1..2f3f267e721674 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -245,7 +245,9 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 				&allowed_algos);
+ 	violation_check = ((func == FILE_CHECK || func == MMAP_CHECK ||
+ 			    func == MMAP_CHECK_REQPROT) &&
+-			   (ima_policy_flag & IMA_MEASURE));
++			   (ima_policy_flag & IMA_MEASURE) &&
++			   ((action & IMA_MEASURE) ||
++			    (file->f_mode & FMODE_WRITE)));
+ 	if (!action && !violation_check)
+ 		return 0;
+ 
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index 5dd1e164f9b13d..1e35c9f807b2b6 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -830,7 +830,7 @@ static int smk_open_cipso(struct inode *inode, struct file *file)
+ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ 				size_t count, loff_t *ppos, int format)
+ {
+-	struct netlbl_lsm_catmap *old_cat, *new_cat = NULL;
++	struct netlbl_lsm_catmap *old_cat;
+ 	struct smack_known *skp;
+ 	struct netlbl_lsm_secattr ncats;
+ 	char mapcatset[SMK_CIPSOLEN];
+@@ -917,22 +917,15 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ 
+ 		smack_catset_bit(cat, mapcatset);
+ 	}
+-	ncats.flags = 0;
+-	if (catlen == 0) {
+-		ncats.attr.mls.cat = NULL;
+-		ncats.attr.mls.lvl = maplevel;
+-		new_cat = netlbl_catmap_alloc(GFP_ATOMIC);
+-		if (new_cat)
+-			new_cat->next = ncats.attr.mls.cat;
+-		ncats.attr.mls.cat = new_cat;
+-		skp->smk_netlabel.flags &= ~(1U << 3);
+-		rc = 0;
+-	} else {
+-		rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
+-	}
++
++	rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
+ 	if (rc >= 0) {
+ 		old_cat = skp->smk_netlabel.attr.mls.cat;
+ 		rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat);
++		if (ncats.attr.mls.cat)
++			skp->smk_netlabel.flags |= NETLBL_SECATTR_MLS_CAT;
++		else
++			skp->smk_netlabel.flags &= ~(u32)NETLBL_SECATTR_MLS_CAT;
+ 		skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
+ 		synchronize_rcu();
+ 		netlbl_catmap_free(old_cat);
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 4683b9139c566a..4ecb17bd5436e7 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -1074,8 +1074,7 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
+ 	runtime->oss.params = 0;
+ 	runtime->oss.prepare = 1;
+ 	runtime->oss.buffer_used = 0;
+-	if (runtime->dma_area)
+-		snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes));
++	snd_pcm_runtime_buffer_set_silence(runtime);
+ 
+ 	runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size);
+ 
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 0790b5fd917e12..0a1ba26872f846 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -723,6 +723,17 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
+ 	atomic_inc(&runtime->buffer_accessing);
+ }
+ 
++/* fill the PCM buffer with the current silence format; called from pcm_oss.c */
++void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime)
++{
++	snd_pcm_buffer_access_lock(runtime);
++	if (runtime->dma_area)
++		snd_pcm_format_set_silence(runtime->format, runtime->dma_area,
++					   bytes_to_samples(runtime, runtime->dma_bytes));
++	snd_pcm_buffer_access_unlock(runtime);
++}
++EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence);
++
+ #if IS_ENABLED(CONFIG_SND_PCM_OSS)
+ #define is_oss_stream(substream)	((substream)->oss.oss)
+ #else
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index b74de9c0969fcd..9e59a97f47472f 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1164,8 +1164,7 @@ static __poll_t snd_seq_poll(struct file *file, poll_table * wait)
+ 	if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
+ 
+ 		/* check if data is available in the pool */
+-		if (!snd_seq_write_pool_allocated(client) ||
+-		    snd_seq_pool_poll_wait(client->pool, file, wait))
++		if (snd_seq_pool_poll_wait(client->pool, file, wait))
+ 			mask |= EPOLLOUT | EPOLLWRNORM;
+ 	}
+ 
+@@ -2583,8 +2582,6 @@ int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table
+ 	if (client == NULL)
+ 		return -ENXIO;
+ 
+-	if (! snd_seq_write_pool_allocated(client))
+-		return 1;
+ 	if (snd_seq_pool_poll_wait(client->pool, file, wait))
+ 		return 1;
+ 	return 0;
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index 20155e3e87c6ac..ccde0ca3d20823 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -427,6 +427,7 @@ int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
+ 			   poll_table *wait)
+ {
+ 	poll_wait(file, &pool->output_sleep, wait);
++	guard(spinlock_irq)(&pool->lock);
+ 	return snd_seq_output_ok(pool);
+ }
+ 
+diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
+index e51d475725576b..13a7d92e8d8d03 100644
+--- a/sound/pci/hda/hda_beep.c
++++ b/sound/pci/hda/hda_beep.c
+@@ -31,8 +31,9 @@ static void generate_tone(struct hda_beep *beep, int tone)
+ 			beep->power_hook(beep, true);
+ 		beep->playing = 1;
+ 	}
+-	snd_hda_codec_write(codec, beep->nid, 0,
+-			    AC_VERB_SET_BEEP_CONTROL, tone);
++	if (!codec->beep_just_power_on)
++		snd_hda_codec_write(codec, beep->nid, 0,
++				    AC_VERB_SET_BEEP_CONTROL, tone);
+ 	if (!tone && beep->playing) {
+ 		beep->playing = 0;
+ 		if (beep->power_hook)
+@@ -212,10 +213,12 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
+ 	struct hda_beep *beep;
+ 	int err;
+ 
+-	if (!snd_hda_get_bool_hint(codec, "beep"))
+-		return 0; /* disabled explicitly by hints */
+-	if (codec->beep_mode == HDA_BEEP_MODE_OFF)
+-		return 0; /* disabled by module option */
++	if (!codec->beep_just_power_on) {
++		if (!snd_hda_get_bool_hint(codec, "beep"))
++			return 0; /* disabled explicitly by hints */
++		if (codec->beep_mode == HDA_BEEP_MODE_OFF)
++			return 0; /* disabled by module option */
++	}
+ 
+ 	beep = kzalloc(sizeof(*beep), GFP_KERNEL);
+ 	if (beep == NULL)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index db72c5fce9d183..13ffc9a6555f65 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -28,6 +28,7 @@
+ #include <sound/hda_codec.h>
+ #include "hda_local.h"
+ #include "hda_auto_parser.h"
++#include "hda_beep.h"
+ #include "hda_jack.h"
+ #include "hda_generic.h"
+ #include "hda_component.h"
+@@ -6964,6 +6965,41 @@ static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* GPIO1 = amplifier on/off */
++static void alc285_fixup_hp_spectre_x360_df1(struct hda_codec *codec,
++					     const struct hda_fixup *fix,
++					     int action)
++{
++	struct alc_spec *spec = codec->spec;
++	static const hda_nid_t conn[] = { 0x02 };
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x14, 0x90170110 },  /* front/high speakers */
++		{ 0x17, 0x90170130 },  /* back/bass speakers */
++		{ }
++	};
++
++	// enable mute led
++	alc285_fixup_hp_mute_led_coefbit(codec, fix, action);
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		/* needed for amp of back speakers */
++		spec->gpio_mask |= 0x01;
++		spec->gpio_dir |= 0x01;
++		snd_hda_apply_pincfgs(codec, pincfgs);
++		/* share DAC to have unified volume control */
++		snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn);
++		snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
++		break;
++	case HDA_FIXUP_ACT_INIT:
++		/* need to toggle GPIO to enable the amp of back speakers */
++		alc_update_gpio_data(codec, 0x01, true);
++		msleep(100);
++		alc_update_gpio_data(codec, 0x01, false);
++		break;
++	}
++}
++
+ static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
+ 					  const struct hda_fixup *fix, int action)
+ {
+@@ -7036,6 +7072,30 @@ static void alc285_fixup_hp_envy_x360(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc285_fixup_hp_beep(struct hda_codec *codec,
++				 const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		codec->beep_just_power_on = true;
++	} else  if (action == HDA_FIXUP_ACT_INIT) {
++#ifdef CONFIG_SND_HDA_INPUT_BEEP
++		/*
++		 * Just enable loopback to internal speaker and headphone jack.
++		 * Disable amplification to get about the same beep volume as
++		 * was on pure BIOS setup before loading the driver.
++		 */
++		alc_update_coef_idx(codec, 0x36, 0x7070, BIT(13));
++
++		snd_hda_enable_beep_device(codec, 1);
++
++#if !IS_ENABLED(CONFIG_INPUT_PCSPKR)
++		dev_warn_once(hda_codec_dev(codec),
++			      "enable CONFIG_INPUT_PCSPKR to get PC beeps\n");
++#endif
++#endif
++	}
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+ 
+@@ -7705,6 +7765,7 @@ enum {
+ 	ALC280_FIXUP_HP_9480M,
+ 	ALC245_FIXUP_HP_X360_AMP,
+ 	ALC285_FIXUP_HP_SPECTRE_X360_EB1,
++	ALC285_FIXUP_HP_SPECTRE_X360_DF1,
+ 	ALC285_FIXUP_HP_ENVY_X360,
+ 	ALC288_FIXUP_DELL_HEADSET_MODE,
+ 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -7806,6 +7867,7 @@ enum {
+ 	ALC285_FIXUP_HP_GPIO_LED,
+ 	ALC285_FIXUP_HP_MUTE_LED,
+ 	ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED,
++	ALC285_FIXUP_HP_BEEP_MICMUTE_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED_COEFBIT2,
+ 	ALC236_FIXUP_HP_GPIO_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
+@@ -9395,6 +9457,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_hp_spectre_x360_mute_led,
+ 	},
++	[ALC285_FIXUP_HP_BEEP_MICMUTE_LED] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_hp_beep,
++		.chained = true,
++		.chain_id = ALC285_FIXUP_HP_MUTE_LED,
++	},
+ 	[ALC236_FIXUP_HP_MUTE_LED_COEFBIT2] = {
+ 	    .type = HDA_FIXUP_FUNC,
+ 	    .v.func = alc236_fixup_hp_mute_led_coefbit2,
+@@ -9766,6 +9834,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_hp_spectre_x360_eb1
+ 	},
++	[ALC285_FIXUP_HP_SPECTRE_X360_DF1] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_hp_spectre_x360_df1
++	},
+ 	[ALC285_FIXUP_HP_ENVY_X360] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_hp_envy_x360,
+@@ -10483,6 +10555,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
++	SND_PCI_QUIRK(0x103c, 0x863e, "HP Spectre x360 15-df1xxx", ALC285_FIXUP_HP_SPECTRE_X360_DF1),
+ 	SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ 	SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+@@ -10493,7 +10566,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+-	SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x8760, "HP EliteBook 8{4,5}5 G7", ALC285_FIXUP_HP_BEEP_MICMUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x876e, "HP ENVY x360 Convertible 13-ay0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ 	SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+@@ -11109,6 +11182,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+ 	SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x390d, "Lenovo Yoga Pro 7 14ASP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x391f, "Yoga S990-16 pro Quad YC Quad", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x3920, "Yoga S990-16 pro Quad VECO Quad", ALC287_FIXUP_TAS2781_I2C),
+@@ -11372,6 +11446,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
+ 	{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+ 	{.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
++	{.id = ALC285_FIXUP_HP_SPECTRE_X360_DF1, .name = "alc285-hp-spectre-x360-df1"},
+ 	{.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"},
+ 	{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+ 	{.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"},
+diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
+index 73d764fc853929..984a7f470a31f6 100644
+--- a/sound/soc/codecs/cs42l43-jack.c
++++ b/sound/soc/codecs/cs42l43-jack.c
+@@ -654,6 +654,10 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv)
+ 
+ 	reinit_completion(&priv->type_detect);
+ 
++	regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL,
++			   CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK,
++			   CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK);
++
+ 	cs42l43_start_hs_bias(priv, true);
+ 	regmap_update_bits(cs42l43->regmap, CS42L43_HS2,
+ 			   CS42L43_HSDET_MODE_MASK, 0x3 << CS42L43_HSDET_MODE_SHIFT);
+@@ -665,6 +669,9 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv)
+ 			   CS42L43_HSDET_MODE_MASK, 0x2 << CS42L43_HSDET_MODE_SHIFT);
+ 	cs42l43_stop_hs_bias(priv);
+ 
++	regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL,
++			   CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK, 0);
++
+ 	if (!time_left)
+ 		return -ETIMEDOUT;
+ 
+diff --git a/sound/soc/codecs/mt6359-accdet.h b/sound/soc/codecs/mt6359-accdet.h
+index c234f2f4276a12..78ada3a5bfae55 100644
+--- a/sound/soc/codecs/mt6359-accdet.h
++++ b/sound/soc/codecs/mt6359-accdet.h
+@@ -123,6 +123,15 @@ struct mt6359_accdet {
+ 	struct workqueue_struct *jd_workqueue;
+ };
+ 
++#if IS_ENABLED(CONFIG_SND_SOC_MT6359_ACCDET)
+ int mt6359_accdet_enable_jack_detect(struct snd_soc_component *component,
+ 				     struct snd_soc_jack *jack);
++#else
++static inline int
++mt6359_accdet_enable_jack_detect(struct snd_soc_component *component,
++				 struct snd_soc_jack *jack)
++{
++	return -EOPNOTSUPP;
++}
++#endif
+ #endif
+diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
+index fac0617ab95b65..6cbb8d0535b02e 100644
+--- a/sound/soc/codecs/pcm3168a.c
++++ b/sound/soc/codecs/pcm3168a.c
+@@ -493,9 +493,9 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
+ 		}
+ 		break;
+ 	case 24:
+-		if (provider_mode || (format == SND_SOC_DAIFMT_DSP_A) ||
+-		    		     (format == SND_SOC_DAIFMT_DSP_B)) {
+-			dev_err(component->dev, "24-bit slots not supported in provider mode, or consumer mode using DSP\n");
++		if (!provider_mode && ((format == SND_SOC_DAIFMT_DSP_A) ||
++				       (format == SND_SOC_DAIFMT_DSP_B))) {
++			dev_err(component->dev, "24-bit slots not supported in consumer mode using DSP\n");
+ 			return -EINVAL;
+ 		}
+ 		break;
+diff --git a/sound/soc/codecs/pcm6240.c b/sound/soc/codecs/pcm6240.c
+index 5d99877f883972..e59bb77edf0913 100644
+--- a/sound/soc/codecs/pcm6240.c
++++ b/sound/soc/codecs/pcm6240.c
+@@ -14,7 +14,7 @@
+ 
+ #include <linux/unaligned.h>
+ #include <linux/firmware.h>
+-#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+ #include <linux/of_irq.h>
+@@ -2035,10 +2035,8 @@ static const struct regmap_config pcmdevice_i2c_regmap = {
+ 
+ static void pcmdevice_remove(struct pcmdevice_priv *pcm_dev)
+ {
+-	if (gpio_is_valid(pcm_dev->irq_info.gpio)) {
+-		gpio_free(pcm_dev->irq_info.gpio);
+-		free_irq(pcm_dev->irq_info.nmb, pcm_dev);
+-	}
++	if (pcm_dev->irq)
++		free_irq(pcm_dev->irq, pcm_dev);
+ 	mutex_destroy(&pcm_dev->codec_lock);
+ }
+ 
+@@ -2110,7 +2108,7 @@ static int pcmdevice_i2c_probe(struct i2c_client *i2c)
+ 		ndev = 1;
+ 		dev_addrs[0] = i2c->addr;
+ 	}
+-	pcm_dev->irq_info.gpio = of_irq_get(np, 0);
++	pcm_dev->irq = of_irq_get(np, 0);
+ 
+ 	for (i = 0; i < ndev; i++)
+ 		pcm_dev->addr[i] = dev_addrs[i];
+@@ -2133,22 +2131,10 @@ static int pcmdevice_i2c_probe(struct i2c_client *i2c)
+ 
+ 	if (pcm_dev->chip_id == PCM1690)
+ 		goto skip_interrupt;
+-	if (gpio_is_valid(pcm_dev->irq_info.gpio)) {
+-		dev_dbg(pcm_dev->dev, "irq-gpio = %d", pcm_dev->irq_info.gpio);
+-
+-		ret = gpio_request(pcm_dev->irq_info.gpio, "PCMDEV-IRQ");
+-		if (!ret) {
+-			int gpio = pcm_dev->irq_info.gpio;
+-
+-			gpio_direction_input(gpio);
+-			pcm_dev->irq_info.nmb = gpio_to_irq(gpio);
+-
+-		} else
+-			dev_err(pcm_dev->dev, "%s: GPIO %d request error\n",
+-				__func__, pcm_dev->irq_info.gpio);
++	if (pcm_dev->irq) {
++		dev_dbg(pcm_dev->dev, "irq = %d", pcm_dev->irq);
+ 	} else
+-		dev_err(pcm_dev->dev, "Looking up irq-gpio failed %d\n",
+-			pcm_dev->irq_info.gpio);
++		dev_err(pcm_dev->dev, "No irq provided\n");
+ 
+ skip_interrupt:
+ 	ret = devm_snd_soc_register_component(&i2c->dev,
+diff --git a/sound/soc/codecs/pcm6240.h b/sound/soc/codecs/pcm6240.h
+index 1e125bb9728603..2d8f9e798139ac 100644
+--- a/sound/soc/codecs/pcm6240.h
++++ b/sound/soc/codecs/pcm6240.h
+@@ -208,11 +208,6 @@ struct pcmdevice_regbin {
+ 	struct pcmdevice_config_info **cfg_info;
+ };
+ 
+-struct pcmdevice_irqinfo {
+-	int gpio;
+-	int nmb;
+-};
+-
+ struct pcmdevice_priv {
+ 	struct snd_soc_component *component;
+ 	struct i2c_client *client;
+@@ -221,7 +216,7 @@ struct pcmdevice_priv {
+ 	struct gpio_desc *hw_rst;
+ 	struct regmap *regmap;
+ 	struct pcmdevice_regbin regbin;
+-	struct pcmdevice_irqinfo irq_info;
++	int irq;
+ 	unsigned int addr[PCMDEVICE_MAX_I2C_DEVICES];
+ 	unsigned int chip_id;
+ 	int cur_conf;
+diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c
+index 5449d6b5cf3d11..bf83f4bc94fc18 100644
+--- a/sound/soc/codecs/rt722-sdca-sdw.c
++++ b/sound/soc/codecs/rt722-sdca-sdw.c
+@@ -28,9 +28,50 @@ static bool rt722_sdca_readable_register(struct device *dev, unsigned int reg)
+ 			0):
+ 	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_GE49, RT722_SDCA_CTL_DETECTED_MODE,
+ 			0):
+-	case SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, RT722_SDCA_CTL_HIDTX_CURRENT_OWNER,
+-			0) ... SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01,
+-			RT722_SDCA_CTL_HIDTX_MESSAGE_LENGTH, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_XU03, RT722_SDCA_CTL_SELECTED_MODE,
++			0):
++	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU05,
++			  RT722_SDCA_CTL_FU_MUTE, CH_L) ...
++	     SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU05,
++			  RT722_SDCA_CTL_FU_MUTE, CH_R):
++	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_XU0D,
++			  RT722_SDCA_CTL_SELECTED_MODE, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU0F,
++			  RT722_SDCA_CTL_FU_MUTE, CH_L) ...
++	     SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU0F,
++			  RT722_SDCA_CTL_FU_MUTE, CH_R):
++	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_PDE40,
++			  RT722_SDCA_CTL_REQ_POWER_STATE, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_PDE12,
++			  RT722_SDCA_CTL_REQ_POWER_STATE, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_CS01,
++			  RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_CS11,
++			  RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E,
++			  RT722_SDCA_CTL_FU_MUTE, CH_01) ...
++	     SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E,
++			  RT722_SDCA_CTL_FU_MUTE, CH_04):
++	case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_IT26,
++			  RT722_SDCA_CTL_VENDOR_DEF, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_PDE2A,
++			  RT722_SDCA_CTL_REQ_POWER_STATE, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_CS1F,
++			  RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01,
++			  RT722_SDCA_CTL_HIDTX_CURRENT_OWNER, 0) ...
++	     SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01,
++			  RT722_SDCA_CTL_HIDTX_MESSAGE_LENGTH, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_USER_FU06,
++			  RT722_SDCA_CTL_FU_MUTE, CH_L) ...
++	     SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_USER_FU06,
++			  RT722_SDCA_CTL_FU_MUTE, CH_R):
++	case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_OT23,
++			  RT722_SDCA_CTL_VENDOR_DEF, CH_08):
++	case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_PDE23,
++			  RT722_SDCA_CTL_REQ_POWER_STATE, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_CS31,
++			  RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0):
+ 	case RT722_BUF_ADDR_HID1 ... RT722_BUF_ADDR_HID2:
+ 		return true;
+ 	default:
+@@ -74,6 +115,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re
+ 	case 0x5600000 ... 0x5600007:
+ 	case 0x5700000 ... 0x5700004:
+ 	case 0x5800000 ... 0x5800004:
++	case 0x5810000:
+ 	case 0x5b00003:
+ 	case 0x5c00011:
+ 	case 0x5d00006:
+@@ -81,6 +123,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re
+ 	case 0x5f00030:
+ 	case 0x6100000 ... 0x6100051:
+ 	case 0x6100055 ... 0x6100057:
++	case 0x6100060:
+ 	case 0x6100062:
+ 	case 0x6100064 ... 0x6100065:
+ 	case 0x6100067:
+diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
+index 58315eab492a16..39a7d39536fe6f 100644
+--- a/sound/soc/codecs/tas2764.c
++++ b/sound/soc/codecs/tas2764.c
+@@ -180,33 +180,6 @@ static SOC_ENUM_SINGLE_DECL(
+ static const struct snd_kcontrol_new tas2764_asi1_mux =
+ 	SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum);
+ 
+-static int tas2764_dac_event(struct snd_soc_dapm_widget *w,
+-			     struct snd_kcontrol *kcontrol, int event)
+-{
+-	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+-	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
+-	int ret;
+-
+-	switch (event) {
+-	case SND_SOC_DAPM_POST_PMU:
+-		tas2764->dac_powered = true;
+-		ret = tas2764_update_pwr_ctrl(tas2764);
+-		break;
+-	case SND_SOC_DAPM_PRE_PMD:
+-		tas2764->dac_powered = false;
+-		ret = tas2764_update_pwr_ctrl(tas2764);
+-		break;
+-	default:
+-		dev_err(tas2764->dev, "Unsupported event\n");
+-		return -EINVAL;
+-	}
+-
+-	if (ret < 0)
+-		return ret;
+-
+-	return 0;
+-}
+-
+ static const struct snd_kcontrol_new isense_switch =
+ 	SOC_DAPM_SINGLE("Switch", TAS2764_PWR_CTRL, TAS2764_ISENSE_POWER_EN, 1, 1);
+ static const struct snd_kcontrol_new vsense_switch =
+@@ -219,8 +192,7 @@ static const struct snd_soc_dapm_widget tas2764_dapm_widgets[] = {
+ 			    1, &isense_switch),
+ 	SND_SOC_DAPM_SWITCH("VSENSE", TAS2764_PWR_CTRL, TAS2764_VSENSE_POWER_EN,
+ 			    1, &vsense_switch),
+-	SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2764_dac_event,
+-			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
++	SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+ 	SND_SOC_DAPM_OUTPUT("OUT"),
+ 	SND_SOC_DAPM_SIGGEN("VMON"),
+ 	SND_SOC_DAPM_SIGGEN("IMON")
+@@ -241,9 +213,28 @@ static int tas2764_mute(struct snd_soc_dai *dai, int mute, int direction)
+ {
+ 	struct tas2764_priv *tas2764 =
+ 			snd_soc_component_get_drvdata(dai->component);
++	int ret;
++
++	if (!mute) {
++		tas2764->dac_powered = true;
++		ret = tas2764_update_pwr_ctrl(tas2764);
++		if (ret)
++			return ret;
++	}
+ 
+ 	tas2764->unmuted = !mute;
+-	return tas2764_update_pwr_ctrl(tas2764);
++	ret = tas2764_update_pwr_ctrl(tas2764);
++	if (ret)
++		return ret;
++
++	if (mute) {
++		tas2764->dac_powered = false;
++		ret = tas2764_update_pwr_ctrl(tas2764);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
+ }
+ 
+ static int tas2764_set_bitwidth(struct tas2764_priv *tas2764, int bitwidth)
+@@ -634,6 +625,7 @@ static const struct reg_default tas2764_reg_defaults[] = {
+ 	{ TAS2764_TDM_CFG2, 0x0a },
+ 	{ TAS2764_TDM_CFG3, 0x10 },
+ 	{ TAS2764_TDM_CFG5, 0x42 },
++	{ TAS2764_INT_CLK_CFG, 0x19 },
+ };
+ 
+ static const struct regmap_range_cfg tas2764_regmap_ranges[] = {
+@@ -651,6 +643,7 @@ static const struct regmap_range_cfg tas2764_regmap_ranges[] = {
+ static bool tas2764_volatile_register(struct device *dev, unsigned int reg)
+ {
+ 	switch (reg) {
++	case TAS2764_SW_RST:
+ 	case TAS2764_INT_LTCH0 ... TAS2764_INT_LTCH4:
+ 	case TAS2764_INT_CLK_CFG:
+ 		return true;
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index 47da5674d7c922..e31b7fb104e6c5 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -529,7 +529,7 @@ static const struct sdw_port_config wsa883x_pconfig[WSA883X_MAX_SWR_PORTS] = {
+ 	},
+ 	[WSA883X_PORT_VISENSE] = {
+ 		.num = WSA883X_PORT_VISENSE + 1,
+-		.ch_mask = 0x3,
++		.ch_mask = 0x1,
+ 	},
+ };
+ 
+diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c
+index 560a2c04b69553..18b0ee8f15a55a 100644
+--- a/sound/soc/codecs/wsa884x.c
++++ b/sound/soc/codecs/wsa884x.c
+@@ -891,7 +891,7 @@ static const struct sdw_port_config wsa884x_pconfig[WSA884X_MAX_SWR_PORTS] = {
+ 	},
+ 	[WSA884X_PORT_VISENSE] = {
+ 		.num = WSA884X_PORT_VISENSE + 1,
+-		.ch_mask = 0x3,
++		.ch_mask = 0x1,
+ 	},
+ 	[WSA884X_PORT_CPS] = {
+ 		.num = WSA884X_PORT_CPS + 1,
+diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
+index 93dbe40008c009..e5ae435171d68b 100644
+--- a/sound/soc/fsl/imx-card.c
++++ b/sound/soc/fsl/imx-card.c
+@@ -516,7 +516,7 @@ static int imx_card_parse_of(struct imx_card_data *data)
+ 	if (!card->dai_link)
+ 		return -ENOMEM;
+ 
+-	data->link_data = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL);
++	data->link_data = devm_kcalloc(dev, num_links, sizeof(*link_data), GFP_KERNEL);
+ 	if (!data->link_data)
+ 		return -ENOMEM;
+ 
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 1148e9498d8e83..b6434b4731261b 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -576,6 +576,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF2 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{       /* Acer Aspire SW3-013 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-013"),
++		},
++		.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_2000UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-clk.c b/sound/soc/mediatek/mt8188/mt8188-afe-clk.c
+index e69c1bb2cb2395..7f411b85778237 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-afe-clk.c
++++ b/sound/soc/mediatek/mt8188/mt8188-afe-clk.c
+@@ -58,7 +58,15 @@ static const char *aud_clks[MT8188_CLK_NUM] = {
+ 	[MT8188_CLK_AUD_ADC] = "aud_adc",
+ 	[MT8188_CLK_AUD_DAC_HIRES] = "aud_dac_hires",
+ 	[MT8188_CLK_AUD_A1SYS_HP] = "aud_a1sys_hp",
++	[MT8188_CLK_AUD_AFE_DMIC1] = "aud_afe_dmic1",
++	[MT8188_CLK_AUD_AFE_DMIC2] = "aud_afe_dmic2",
++	[MT8188_CLK_AUD_AFE_DMIC3] = "aud_afe_dmic3",
++	[MT8188_CLK_AUD_AFE_DMIC4] = "aud_afe_dmic4",
+ 	[MT8188_CLK_AUD_ADC_HIRES] = "aud_adc_hires",
++	[MT8188_CLK_AUD_DMIC_HIRES1] = "aud_dmic_hires1",
++	[MT8188_CLK_AUD_DMIC_HIRES2] = "aud_dmic_hires2",
++	[MT8188_CLK_AUD_DMIC_HIRES3] = "aud_dmic_hires3",
++	[MT8188_CLK_AUD_DMIC_HIRES4] = "aud_dmic_hires4",
+ 	[MT8188_CLK_AUD_I2SIN] = "aud_i2sin",
+ 	[MT8188_CLK_AUD_TDM_IN] = "aud_tdm_in",
+ 	[MT8188_CLK_AUD_I2S_OUT] = "aud_i2s_out",
+diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-clk.h b/sound/soc/mediatek/mt8188/mt8188-afe-clk.h
+index ec53c171c170a8..c6c78d684f3ee1 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-afe-clk.h
++++ b/sound/soc/mediatek/mt8188/mt8188-afe-clk.h
+@@ -54,7 +54,15 @@ enum {
+ 	MT8188_CLK_AUD_ADC,
+ 	MT8188_CLK_AUD_DAC_HIRES,
+ 	MT8188_CLK_AUD_A1SYS_HP,
++	MT8188_CLK_AUD_AFE_DMIC1,
++	MT8188_CLK_AUD_AFE_DMIC2,
++	MT8188_CLK_AUD_AFE_DMIC3,
++	MT8188_CLK_AUD_AFE_DMIC4,
+ 	MT8188_CLK_AUD_ADC_HIRES,
++	MT8188_CLK_AUD_DMIC_HIRES1,
++	MT8188_CLK_AUD_DMIC_HIRES2,
++	MT8188_CLK_AUD_DMIC_HIRES3,
++	MT8188_CLK_AUD_DMIC_HIRES4,
+ 	MT8188_CLK_AUD_I2SIN,
+ 	MT8188_CLK_AUD_TDM_IN,
+ 	MT8188_CLK_AUD_I2S_OUT,
+diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
+index 73e5c63aeec878..d36520c6272dd8 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
++++ b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
+@@ -2855,10 +2855,6 @@ static bool mt8188_is_volatile_reg(struct device *dev, unsigned int reg)
+ 	case AFE_DMIC3_SRC_DEBUG_MON0:
+ 	case AFE_DMIC3_UL_SRC_MON0:
+ 	case AFE_DMIC3_UL_SRC_MON1:
+-	case DMIC_GAIN1_CUR:
+-	case DMIC_GAIN2_CUR:
+-	case DMIC_GAIN3_CUR:
+-	case DMIC_GAIN4_CUR:
+ 	case ETDM_IN1_MONITOR:
+ 	case ETDM_IN2_MONITOR:
+ 	case ETDM_OUT1_MONITOR:
+diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c
+index 19adadedc88a2a..1001fd32138037 100644
+--- a/sound/soc/qcom/sm8250.c
++++ b/sound/soc/qcom/sm8250.c
+@@ -7,6 +7,7 @@
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
+ #include <sound/pcm.h>
++#include <sound/pcm_params.h>
+ #include <linux/soundwire/sdw.h>
+ #include <sound/jack.h>
+ #include <linux/input-event-codes.h>
+@@ -39,9 +40,11 @@ static int sm8250_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ 					SNDRV_PCM_HW_PARAM_RATE);
+ 	struct snd_interval *channels = hw_param_interval(params,
+ 					SNDRV_PCM_HW_PARAM_CHANNELS);
++	struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ 
+ 	rate->min = rate->max = 48000;
+ 	channels->min = channels->max = 2;
++	snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/sdw_utils/soc_sdw_cs42l43.c b/sound/soc/sdw_utils/soc_sdw_cs42l43.c
+index adb1c008e871d5..2dc7787234c361 100644
+--- a/sound/soc/sdw_utils/soc_sdw_cs42l43.c
++++ b/sound/soc/sdw_utils/soc_sdw_cs42l43.c
+@@ -20,6 +20,8 @@
+ #include <sound/soc-dapm.h>
+ #include <sound/soc_sdw_utils.h>
+ 
++#define CS42L43_SPK_VOLUME_0DB	128 /* 0dB Max */
++
+ static const struct snd_soc_dapm_route cs42l43_hs_map[] = {
+ 	{ "Headphone", NULL, "cs42l43 AMP3_OUT" },
+ 	{ "Headphone", NULL, "cs42l43 AMP4_OUT" },
+@@ -117,6 +119,14 @@ int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_so
+ 			return -ENOMEM;
+ 	}
+ 
++	ret = snd_soc_limit_volume(card, "cs42l43 Speaker Digital Volume",
++				   CS42L43_SPK_VOLUME_0DB);
++	if (ret)
++		dev_err(card->dev, "cs42l43 speaker volume limit failed: %d\n", ret);
++	else
++		dev_info(card->dev, "Setting CS42L43 Speaker volume limit to %d\n",
++			 CS42L43_SPK_VOLUME_0DB);
++
+ 	ret = snd_soc_dapm_add_routes(&card->dapm, cs42l43_spk_map,
+ 				      ARRAY_SIZE(cs42l43_spk_map));
+ 	if (ret)
+diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
+index 4e08892d24c62d..de09d21add4539 100644
+--- a/sound/soc/soc-dai.c
++++ b/sound/soc/soc-dai.c
+@@ -275,10 +275,11 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
+ 
+ 	if (dai->driver->ops &&
+ 	    dai->driver->ops->xlate_tdm_slot_mask)
+-		dai->driver->ops->xlate_tdm_slot_mask(slots,
+-						      &tx_mask, &rx_mask);
++		ret = dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
+ 	else
+-		snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
++		ret = snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
++	if (ret)
++		goto err;
+ 
+ 	for_each_pcm_streams(stream)
+ 		snd_soc_dai_tdm_mask_set(dai, stream, *tdm_mask[stream]);
+@@ -287,6 +288,7 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
+ 	    dai->driver->ops->set_tdm_slot)
+ 		ret = dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask,
+ 						      slots, slot_width);
++err:
+ 	return soc_dai_ret(dai, ret);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot);
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index b0e4e4168f38d5..fb11003d56cf65 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -639,6 +639,33 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
+ 
++static int snd_soc_clip_to_platform_max(struct snd_kcontrol *kctl)
++{
++	struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
++	struct snd_ctl_elem_value uctl;
++	int ret;
++
++	if (!mc->platform_max)
++		return 0;
++
++	ret = kctl->get(kctl, &uctl);
++	if (ret < 0)
++		return ret;
++
++	if (uctl.value.integer.value[0] > mc->platform_max)
++		uctl.value.integer.value[0] = mc->platform_max;
++
++	if (snd_soc_volsw_is_stereo(mc) &&
++	    uctl.value.integer.value[1] > mc->platform_max)
++		uctl.value.integer.value[1] = mc->platform_max;
++
++	ret = kctl->put(kctl, &uctl);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
+ /**
+  * snd_soc_limit_volume - Set new limit to an existing volume control.
+  *
+@@ -663,7 +690,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card,
+ 		struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
+ 		if (max <= mc->max - mc->min) {
+ 			mc->platform_max = max;
+-			ret = 0;
++			ret = snd_soc_clip_to_platform_max(kctl);
+ 		}
+ 	}
+ 	return ret;
+diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
+index 1989147aa6a463..5fe237688b9127 100644
+--- a/sound/soc/sof/intel/hda-bus.c
++++ b/sound/soc/sof/intel/hda-bus.c
+@@ -76,7 +76,7 @@ void sof_hda_bus_init(struct snd_sof_dev *sdev, struct device *dev)
+ 
+ 	snd_hdac_ext_bus_init(bus, dev, &bus_core_ops, sof_hda_ext_ops);
+ 
+-	if (chip && chip->hw_ip_version == SOF_INTEL_ACE_2_0)
++	if (chip && chip->hw_ip_version >= SOF_INTEL_ACE_2_0)
+ 		bus->use_pio_for_commands = true;
+ #else
+ 	snd_hdac_ext_bus_init(bus, dev, NULL, NULL);
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index c924a998d6f90d..9c8f79e55ec5d8 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -1007,7 +1007,21 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev,
+ 		if (!*mach && codec_num <= 2) {
+ 			bool tplg_fixup = false;
+ 
+-			hda_mach = snd_soc_acpi_intel_hda_machines;
++			/*
++			 * make a local copy of the match array since we might
++			 * be modifying it
++			 */
++			hda_mach = devm_kmemdup_array(sdev->dev,
++					snd_soc_acpi_intel_hda_machines,
++					2, /* we have one entry + sentinel in the array */
++					sizeof(snd_soc_acpi_intel_hda_machines[0]),
++					GFP_KERNEL);
++			if (!hda_mach) {
++				dev_err(bus->dev,
++					"%s: failed to duplicate the HDA match table\n",
++					__func__);
++				return;
++			}
+ 
+ 			dev_info(bus->dev, "using HDA machine driver %s now\n",
+ 				 hda_mach->drv_name);
+diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c
+index 576f407cd456af..976a4794d61000 100644
+--- a/sound/soc/sof/ipc4-control.c
++++ b/sound/soc/sof/ipc4-control.c
+@@ -531,6 +531,14 @@ static int sof_ipc4_bytes_ext_put(struct snd_sof_control *scontrol,
+ 		return -EINVAL;
+ 	}
+ 
++	/* Check header id */
++	if (header.numid != SOF_CTRL_CMD_BINARY) {
++		dev_err_ratelimited(scomp->dev,
++				    "Incorrect numid for bytes put %d\n",
++				    header.numid);
++		return -EINVAL;
++	}
++
+ 	/* Verify the ABI header first */
+ 	if (copy_from_user(&abi_hdr, tlvd->tlv, sizeof(abi_hdr)))
+ 		return -EFAULT;
+@@ -613,7 +621,8 @@ static int _sof_ipc4_bytes_ext_get(struct snd_sof_control *scontrol,
+ 	if (data_size > size)
+ 		return -ENOSPC;
+ 
+-	header.numid = scontrol->comp_id;
++	/* Set header id and length */
++	header.numid = SOF_CTRL_CMD_BINARY;
+ 	header.length = data_size;
+ 
+ 	if (copy_to_user(tlvd, &header, sizeof(struct snd_ctl_tlv)))
+diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
+index 4df2be3d39eba0..2fe4969cdc3b47 100644
+--- a/sound/soc/sof/ipc4-pcm.c
++++ b/sound/soc/sof/ipc4-pcm.c
+@@ -794,7 +794,8 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm
+ 
+ 		spcm->stream[stream].private = stream_priv;
+ 
+-		if (!support_info)
++		/* Delay reporting is only supported on playback */
++		if (!support_info || stream == SNDRV_PCM_STREAM_CAPTURE)
+ 			continue;
+ 
+ 		time_info = kzalloc(sizeof(*time_info), GFP_KERNEL);
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index 37ca15cc5728ca..f9708b8fd73b6a 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -1059,7 +1059,7 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp,
+ 				  struct snd_sof_dai *dai)
+ {
+ 	struct snd_soc_card *card = scomp->card;
+-	struct snd_soc_pcm_runtime *rtd;
++	struct snd_soc_pcm_runtime *rtd, *full, *partial;
+ 	struct snd_soc_dai *cpu_dai;
+ 	int stream;
+ 	int i;
+@@ -1076,12 +1076,22 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp,
+ 	else
+ 		goto end;
+ 
++	full = NULL;
++	partial = NULL;
+ 	list_for_each_entry(rtd, &card->rtd_list, list) {
+ 		/* does stream match DAI link ? */
+-		if (!rtd->dai_link->stream_name ||
+-		    !strstr(rtd->dai_link->stream_name, w->sname))
+-			continue;
++		if (rtd->dai_link->stream_name) {
++			if (!strcmp(rtd->dai_link->stream_name, w->sname)) {
++				full = rtd;
++				break;
++			} else if (strstr(rtd->dai_link->stream_name, w->sname)) {
++				partial = rtd;
++			}
++		}
++	}
+ 
++	rtd = full ? full : partial;
++	if (rtd) {
+ 		for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ 			/*
+ 			 * Please create DAI widget in the right order
+diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
+index 330bc0c09f56bf..93dd88fb805dd7 100644
+--- a/sound/soc/sunxi/sun4i-codec.c
++++ b/sound/soc/sunxi/sun4i-codec.c
+@@ -21,6 +21,7 @@
+ #include <linux/gpio/consumer.h>
+ 
+ #include <sound/core.h>
++#include <sound/jack.h>
+ #include <sound/pcm.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc.h>
+@@ -235,6 +236,7 @@ struct sun4i_codec {
+ 	struct clk	*clk_module;
+ 	struct reset_control *rst;
+ 	struct gpio_desc *gpio_pa;
++	struct gpio_desc *gpio_hp;
+ 
+ 	/* ADC_FIFOC register is at different offset on different SoCs */
+ 	struct regmap_field *reg_adc_fifoc;
+@@ -1263,6 +1265,49 @@ static struct snd_soc_dai_driver dummy_cpu_dai = {
+ 	.ops = &dummy_dai_ops,
+ };
+ 
++static struct snd_soc_jack sun4i_headphone_jack;
++
++static struct snd_soc_jack_pin sun4i_headphone_jack_pins[] = {
++	{ .pin = "Headphone", .mask = SND_JACK_HEADPHONE },
++};
++
++static struct snd_soc_jack_gpio sun4i_headphone_jack_gpio = {
++	.name = "hp-det",
++	.report = SND_JACK_HEADPHONE,
++	.debounce_time = 150,
++};
++
++static int sun4i_codec_machine_init(struct snd_soc_pcm_runtime *rtd)
++{
++	struct snd_soc_card *card = rtd->card;
++	struct sun4i_codec *scodec = snd_soc_card_get_drvdata(card);
++	int ret;
++
++	if (scodec->gpio_hp) {
++		ret = snd_soc_card_jack_new_pins(card, "Headphone Jack",
++						 SND_JACK_HEADPHONE,
++						 &sun4i_headphone_jack,
++						 sun4i_headphone_jack_pins,
++						 ARRAY_SIZE(sun4i_headphone_jack_pins));
++		if (ret) {
++			dev_err(rtd->dev,
++				"Headphone jack creation failed: %d\n", ret);
++			return ret;
++		}
++
++		sun4i_headphone_jack_gpio.desc = scodec->gpio_hp;
++		ret = snd_soc_jack_add_gpios(&sun4i_headphone_jack, 1,
++					     &sun4i_headphone_jack_gpio);
++
++		if (ret) {
++			dev_err(rtd->dev, "Headphone GPIO not added: %d\n", ret);
++			return ret;
++		}
++	}
++
++	return 0;
++}
++
+ static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev,
+ 							int *num_links)
+ {
+@@ -1288,6 +1333,7 @@ static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev,
+ 	link->codecs->name	= dev_name(dev);
+ 	link->platforms->name	= dev_name(dev);
+ 	link->dai_fmt		= SND_SOC_DAIFMT_I2S;
++	link->init		= sun4i_codec_machine_init;
+ 
+ 	*num_links = 1;
+ 
+@@ -1728,6 +1774,13 @@ static int sun4i_codec_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	scodec->gpio_hp = devm_gpiod_get_optional(&pdev->dev, "hp-det", GPIOD_IN);
++	if (IS_ERR(scodec->gpio_hp)) {
++		ret = PTR_ERR(scodec->gpio_hp);
++		dev_err_probe(&pdev->dev, ret, "Failed to get hp-det gpio\n");
++		return ret;
++	}
++
+ 	/* reg_field setup */
+ 	scodec->reg_adc_fifoc = devm_regmap_field_alloc(&pdev->dev,
+ 							scodec->regmap,
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 826ac870f24690..a792ada18863ac 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1885,10 +1885,18 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi,
+ 	}
+ 
+ 	port_info = find_port_info(umidi, number);
+-	name_format = port_info ? port_info->name :
+-		(jack_name != default_jack_name  ? "%s %s" : "%s %s %d");
+-	snprintf(substream->name, sizeof(substream->name),
+-		 name_format, umidi->card->shortname, jack_name, number + 1);
++	if (port_info || jack_name == default_jack_name ||
++	    strncmp(umidi->card->shortname, jack_name, strlen(umidi->card->shortname)) != 0) {
++		name_format = port_info ? port_info->name :
++			(jack_name != default_jack_name  ? "%s %s" : "%s %s %d");
++		snprintf(substream->name, sizeof(substream->name),
++			 name_format, umidi->card->shortname, jack_name, number + 1);
++	} else {
++		/* The manufacturer included the iProduct name in the jack
++		 * name, do not use both
++		 */
++		strscpy(substream->name, jack_name);
++	}
+ 
+ 	*rsubstream = substream;
+ }
+diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
+index 9b75639434b815..0a764426d93586 100644
+--- a/tools/bpf/bpftool/common.c
++++ b/tools/bpf/bpftool/common.c
+@@ -461,10 +461,11 @@ int get_fd_type(int fd)
+ 		p_err("can't read link type: %s", strerror(errno));
+ 		return -1;
+ 	}
+-	if (n == sizeof(path)) {
++	if (n == sizeof(buf)) {
+ 		p_err("can't read link type: path too long!");
+ 		return -1;
+ 	}
++	buf[n] = '\0';
+ 
+ 	if (strstr(buf, "bpf-map"))
+ 		return BPF_OBJ_MAP;
+diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
+index 5fb3fb3d97e0fd..ffe988867703b4 100644
+--- a/tools/build/Makefile.build
++++ b/tools/build/Makefile.build
+@@ -149,6 +149,10 @@ objprefix    := $(subst ./,,$(OUTPUT)$(dir)/)
+ obj-y        := $(addprefix $(objprefix),$(obj-y))
+ subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y))
+ 
++# Separate out test log files from real build objects.
++test-y       := $(filter %_log, $(obj-y))
++obj-y        := $(filter-out %_log, $(obj-y))
++
+ # Final '$(obj)-in.o' object
+ in-target := $(objprefix)$(obj)-in.o
+ 
+@@ -159,7 +163,7 @@ $(subdir-y):
+ 
+ $(sort $(subdir-obj-y)): $(subdir-y) ;
+ 
+-$(in-target): $(obj-y) FORCE
++$(in-target): $(obj-y) $(test-y) FORCE
+ 	$(call rule_mkdir)
+ 	$(call if_changed,$(host)ld_multi)
+ 
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 4a939c90dc2e4b..552fd633f8200d 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -1206,6 +1206,7 @@ enum bpf_perf_event_type {
+ #define BPF_F_BEFORE		(1U << 3)
+ #define BPF_F_AFTER		(1U << 4)
+ #define BPF_F_ID		(1U << 5)
++#define BPF_F_PREORDER		(1U << 6)
+ #define BPF_F_LINK		BPF_F_LINK /* 1 << 13 */
+ 
+ /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 5ff643e60d09ca..6e4d417604fa0f 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -2074,7 +2074,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
+ 	}
+ 
+ 	len = strlen(value);
+-	if (value[len - 1] != '"') {
++	if (len < 2 || value[len - 1] != '"') {
+ 		pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
+ 			ext->name, value);
+ 		return -EINVAL;
+diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
+index ce32cb35007d6f..c4da34048ef858 100644
+--- a/tools/net/ynl/lib/ynl.c
++++ b/tools/net/ynl/lib/ynl.c
+@@ -364,7 +364,7 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
+ 		     "Invalid attribute (binary %s)", policy->name);
+ 		return -1;
+ 	case YNL_PT_NUL_STR:
+-		if ((!policy->len || len <= policy->len) && !data[len - 1])
++		if (len && (!policy->len || len <= policy->len) && !data[len - 1])
+ 			break;
+ 		yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ 		     "Invalid attribute (string %s)", policy->name);
+diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
+index 463f1394ab971b..c78f1c1bca75c3 100755
+--- a/tools/net/ynl/ynl-gen-c.py
++++ b/tools/net/ynl/ynl-gen-c.py
+@@ -2417,6 +2417,9 @@ def render_uapi(family, cw):
+ 
+     defines = []
+     for const in family['definitions']:
++        if const.get('header'):
++            continue
++
+         if const['type'] != 'const':
+             cw.writes_defines(defines)
+             defines = []
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index bea6461ac340d0..4fce0074076f3a 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -3353,7 +3353,7 @@ static int handle_insn_ops(struct instruction *insn,
+ 		if (update_cfi_state(insn, next_insn, &state->cfi, op))
+ 			return 1;
+ 
+-		if (!insn->alt_group)
++		if (!opts.uaccess || !insn->alt_group)
+ 			continue;
+ 
+ 		if (op->dest.type == OP_DEST_PUSHF) {
+@@ -3820,6 +3820,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ 			return 0;
+ 
+ 		case INSN_STAC:
++			if (!opts.uaccess)
++				break;
++
+ 			if (state.uaccess) {
+ 				WARN_INSN(insn, "recursive UACCESS enable");
+ 				return 1;
+@@ -3829,6 +3832,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ 			break;
+ 
+ 		case INSN_CLAC:
++			if (!opts.uaccess)
++				break;
++
+ 			if (!state.uaccess && func) {
+ 				WARN_INSN(insn, "redundant UACCESS disable");
+ 				return 1;
+@@ -4304,7 +4310,8 @@ static int validate_symbol(struct objtool_file *file, struct section *sec,
+ 	if (!insn || insn->ignore || insn->visited)
+ 		return 0;
+ 
+-	state->uaccess = sym->uaccess_safe;
++	if (opts.uaccess)
++		state->uaccess = sym->uaccess_safe;
+ 
+ 	ret = validate_branch(file, insn_func(insn), insn, *state);
+ 	if (ret)
+@@ -4751,8 +4758,10 @@ int check(struct objtool_file *file)
+ 	init_cfi_state(&force_undefined_cfi);
+ 	force_undefined_cfi.force_undefined = true;
+ 
+-	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
++	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
++		ret = -1;
+ 		goto out;
++	}
+ 
+ 	cfi_hash_add(&init_cfi);
+ 	cfi_hash_add(&func_cfi);
+@@ -4769,7 +4778,7 @@ int check(struct objtool_file *file)
+ 	if (opts.retpoline) {
+ 		ret = validate_retpoline(file);
+ 		if (ret < 0)
+-			return ret;
++			goto out;
+ 		warnings += ret;
+ 	}
+ 
+@@ -4805,7 +4814,7 @@ int check(struct objtool_file *file)
+ 		 */
+ 		ret = validate_unrets(file);
+ 		if (ret < 0)
+-			return ret;
++			goto out;
+ 		warnings += ret;
+ 	}
+ 
+@@ -4868,7 +4877,7 @@ int check(struct objtool_file *file)
+ 	if (opts.prefix) {
+ 		ret = add_prefix_symbols(file);
+ 		if (ret < 0)
+-			return ret;
++			goto out;
+ 		warnings += ret;
+ 	}
+ 
+diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
+index a3cf1d17163ae7..e4b00e13302b39 100644
+--- a/tools/power/x86/turbostat/turbostat.8
++++ b/tools/power/x86/turbostat/turbostat.8
+@@ -199,6 +199,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
+ \fBUncMHz\fP per-package uncore MHz, instantaneous sample.
+ .PP
+ \fBUMHz1.0\fP per-package uncore MHz for domain=1 and fabric_cluster=0, instantaneous sample.  System summary is the average of all packages.
++For the "--show" and "--hide" options, use "UncMHz" to operate on all UMHz*.* as a group.
+ .SH TOO MUCH INFORMATION EXAMPLE
+ By default, turbostat dumps all possible information -- a system configuration header, followed by columns for all counters.
+ This is ideal for remote debugging, use the "--out" option to save everything to a text file, and get that file to the expert helping you debug.
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 77ef60980ee581..12424bf08551d0 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -6445,7 +6445,18 @@ static void probe_intel_uncore_frequency_cluster(void)
+ 		sprintf(path, "%s/current_freq_khz", path_base);
+ 		sprintf(name_buf, "UMHz%d.%d", domain_id, cluster_id);
+ 
+-		add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
++		/*
++		 * Once add_couter() is called, that counter is always read
++		 * and reported -- So it is effectively (enabled & present).
++		 * Only call add_counter() here if legacy BIC_UNCORE_MHZ (UncMHz)
++		 * is (enabled).  Since we are in this routine, we
++		 * know we will not probe and set (present) the legacy counter.
++		 *
++		 * This allows "--show/--hide UncMHz" to be effective for
++		 * the clustered MHz counters, as a group.
++		 */
++		if BIC_IS_ENABLED(BIC_UNCORE_MHZ)
++			add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
+ 
+ 		if (quiet)
+ 			continue;
+diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py
+index dc794907686304..4a6bf4e048f5b0 100644
+--- a/tools/testing/kunit/qemu_configs/x86_64.py
++++ b/tools/testing/kunit/qemu_configs/x86_64.py
+@@ -7,4 +7,6 @@ CONFIG_SERIAL_8250_CONSOLE=y''',
+ 			   qemu_arch='x86_64',
+ 			   kernel_path='arch/x86/boot/bzImage',
+ 			   kernel_command_line='console=ttyS0',
+-			   extra_qemu_params=[])
++			   # qboot is faster than SeaBIOS and doesn't mess up
++			   # the terminal.
++			   extra_qemu_params=['-bios', 'qboot.rom'])
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
+index 2d0796314862ac..0a99fd404f6dc0 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
+@@ -68,7 +68,6 @@ static void test_sockmap_ktls_disconnect_after_delete(int family, int map)
+ 		goto close_cli;
+ 
+ 	err = disconnect(cli);
+-	ASSERT_OK(err, "disconnect");
+ 
+ close_cli:
+ 	close(cli);
+diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
+index 4927b9add5add9..06f252733660a1 100644
+--- a/tools/testing/selftests/iommu/iommufd.c
++++ b/tools/testing/selftests/iommu/iommufd.c
+@@ -289,6 +289,10 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
+ 				    &test_hwpt_id);
+ 		test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
+ 				    &test_hwpt_id);
++		test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
++				    IOMMU_HWPT_ALLOC_NEST_PARENT |
++						IOMMU_HWPT_FAULT_ID_VALID,
++				    &test_hwpt_id);
+ 
+ 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
+ 				    IOMMU_HWPT_ALLOC_NEST_PARENT,
+diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+index d9d587454d2079..8c1597ebc2d38b 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+@@ -149,7 +149,7 @@ cfg_test_host_common()
+ 	check_err $? "Failed to add $name host entry"
+ 
+ 	bridge mdb replace dev br0 port br0 grp $grp $state vid 10 &> /dev/null
+-	check_fail $? "Managed to replace $name host entry"
++	check_err $? "Failed to replace $name host entry"
+ 
+ 	bridge mdb del dev br0 port br0 grp $grp $state vid 10
+ 	bridge mdb get dev br0 grp $grp vid 10 &> /dev/null
+diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh
+index 02c21ff4ca81fd..aabd6e5480b8e5 100755
+--- a/tools/testing/selftests/net/gro.sh
++++ b/tools/testing/selftests/net/gro.sh
+@@ -100,5 +100,6 @@ trap cleanup EXIT
+ if [[ "${test}" == "all" ]]; then
+   run_all_tests
+ else
+-  run_test "${proto}" "${test}"
++  exit_code=$(run_test "${proto}" "${test}")
++  exit $exit_code
+ fi;


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-05-27 20:05 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-05-27 20:05 UTC (permalink / raw
  To: gentoo-commits

commit:     ad3c2f8c5dbe5ded07ce02df94f678c4675c10cb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May 27 20:05:29 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May 27 20:05:29 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ad3c2f8c

Fix RANDOM_KMALLOC_CACHE(S) typo

https://bugs.gentoo.org/956708

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 74e75c40..32f3467b 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -207,7 +207,7 @@
 +	select SECURITY_LANDLOCK
 +	select SCHED_CORE if SCHED_SMT
 +	select BUG_ON_DATA_CORRUPTION
-+	select RANDOM_KMALLOC_CACHE if SLUB_TINY=n
++	select RANDOM_KMALLOC_CACHES if SLUB_TINY=n
 +	select SCHED_STACK_END_CHECK
 +	select SECCOMP if HAVE_ARCH_SECCOMP
 +	select SECCOMP_FILTER if HAVE_ARCH_SECCOMP_FILTER


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-05-22 13:37 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-05-22 13:37 UTC (permalink / raw
  To: gentoo-commits

commit:     279eb32689200c1342ad326c78428181c6b7a8ce
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 22 13:37:30 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 22 13:37:30 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=279eb326

Linux patch 6.12.30

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1029_linux-6.12.30.patch | 6374 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6378 insertions(+)

diff --git a/0000_README b/0000_README
index a161cb90..0b4156fb 100644
--- a/0000_README
+++ b/0000_README
@@ -159,6 +159,10 @@ Patch:  1028_linux-6.12.29.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.29
 
+Patch:  1029_linux-6.12.30.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.30
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1029_linux-6.12.30.patch b/1029_linux-6.12.30.patch
new file mode 100644
index 00000000..0979dd64
--- /dev/null
+++ b/1029_linux-6.12.30.patch
@@ -0,0 +1,6374 @@
+diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
+index b02d59a0349c44..c5579a5412fc9a 100644
+--- a/Documentation/netlink/specs/tc.yaml
++++ b/Documentation/netlink/specs/tc.yaml
+@@ -2017,7 +2017,8 @@ attribute-sets:
+     attributes:
+       -
+         name: act
+-        type: nest
++        type: indexed-array
++        sub-type: nest
+         nested-attributes: tc-act-attrs
+       -
+         name: police
+@@ -2250,7 +2251,8 @@ attribute-sets:
+     attributes:
+       -
+         name: act
+-        type: nest
++        type: indexed-array
++        sub-type: nest
+         nested-attributes: tc-act-attrs
+       -
+         name: police
+@@ -2745,7 +2747,7 @@ attribute-sets:
+         type: u16
+         byte-order: big-endian
+       -
+-        name: key-l2-tpv3-sid
++        name: key-l2tpv3-sid
+         type: u32
+         byte-order: big-endian
+       -
+@@ -3504,7 +3506,7 @@ attribute-sets:
+         name: rate64
+         type: u64
+       -
+-        name: prate4
++        name: prate64
+         type: u64
+       -
+         name: burst
+diff --git a/MAINTAINERS b/MAINTAINERS
+index de04c7ba8571bd..d0f18fdba068b0 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -17503,7 +17503,7 @@ F:	include/uapi/linux/ppdev.h
+ PARAVIRT_OPS INTERFACE
+ M:	Juergen Gross <jgross@suse.com>
+ R:	Ajay Kaher <ajay.kaher@broadcom.com>
+-R:	Alexey Makhalov <alexey.amakhalov@broadcom.com>
++R:	Alexey Makhalov <alexey.makhalov@broadcom.com>
+ R:	Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+ L:	virtualization@lists.linux.dev
+ L:	x86@kernel.org
+@@ -24729,7 +24729,7 @@ F:	drivers/misc/vmw_balloon.c
+ 
+ VMWARE HYPERVISOR INTERFACE
+ M:	Ajay Kaher <ajay.kaher@broadcom.com>
+-M:	Alexey Makhalov <alexey.amakhalov@broadcom.com>
++M:	Alexey Makhalov <alexey.makhalov@broadcom.com>
+ R:	Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+ L:	virtualization@lists.linux.dev
+ L:	x86@kernel.org
+@@ -24757,7 +24757,7 @@ F:	drivers/scsi/vmw_pvscsi.h
+ VMWARE VIRTUAL PTP CLOCK DRIVER
+ M:	Nick Shi <nick.shi@broadcom.com>
+ R:	Ajay Kaher <ajay.kaher@broadcom.com>
+-R:	Alexey Makhalov <alexey.amakhalov@broadcom.com>
++R:	Alexey Makhalov <alexey.makhalov@broadcom.com>
+ R:	Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+ L:	netdev@vger.kernel.org
+ S:	Supported
+diff --git a/Makefile b/Makefile
+index 7a06c48ffbaa5b..6e8afa78bbef66 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 29
++SUBLEVEL = 30
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
+index de35fa2d7a6de3..8e3e3354ed67a9 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
+@@ -116,6 +116,10 @@ &arb {
+ 	status = "okay";
+ };
+ 
++&clkc_audio {
++	status = "okay";
++};
++
+ &frddr_a {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi
+index b2ac2583a59292..b59da91fdd041f 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi
+@@ -35,7 +35,6 @@ memory@40000000 {
+ 		      <0x1 0x00000000 0 0xc0000000>;
+ 	};
+ 
+-
+ 	reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
+ 	        compatible = "regulator-fixed";
+ 	        regulator-name = "VSD_3V3";
+@@ -46,6 +45,16 @@ reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
+ 	        startup-delay-us = <100>;
+ 	        off-on-delay-us = <12000>;
+ 	};
++
++	reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
++		compatible = "regulator-gpio";
++		regulator-name = "VSD_VSEL";
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <3300000>;
++		gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
++		states = <3300000 0x0 1800000 0x1>;
++		vin-supply = <&ldo5>;
++	};
+ };
+ 
+ &A53_0 {
+@@ -205,6 +214,7 @@ &usdhc2 {
+         pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+         cd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+         vmmc-supply = <&reg_usdhc2_vmmc>;
++	vqmmc-supply = <&reg_usdhc2_vqmmc>;
+         bus-width = <4>;
+         status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
+index e3a9598b99fca8..cacffc851584fc 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
+@@ -222,6 +222,10 @@ rt5616: audio-codec@1b {
+ 		compatible = "realtek,rt5616";
+ 		reg = <0x1b>;
+ 		#sound-dai-cells = <0>;
++		assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
++		assigned-clock-rates = <12288000>;
++		clocks = <&cru I2S0_8CH_MCLKOUT>;
++		clock-names = "mclk";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588j.dtsi b/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
+index bce72bac4503b5..3045cb3bd68c63 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
+@@ -11,20 +11,15 @@ cluster0_opp_table: opp-table-cluster0 {
+ 		compatible = "operating-points-v2";
+ 		opp-shared;
+ 
+-		opp-1416000000 {
+-			opp-hz = /bits/ 64 <1416000000>;
++		opp-1200000000 {
++			opp-hz = /bits/ 64 <1200000000>;
+ 			opp-microvolt = <750000 750000 950000>;
+ 			clock-latency-ns = <40000>;
+ 			opp-suspend;
+ 		};
+-		opp-1608000000 {
+-			opp-hz = /bits/ 64 <1608000000>;
+-			opp-microvolt = <887500 887500 950000>;
+-			clock-latency-ns = <40000>;
+-		};
+-		opp-1704000000 {
+-			opp-hz = /bits/ 64 <1704000000>;
+-			opp-microvolt = <937500 937500 950000>;
++		opp-1296000000 {
++			opp-hz = /bits/ 64 <1296000000>;
++			opp-microvolt = <775000 775000 950000>;
+ 			clock-latency-ns = <40000>;
+ 		};
+ 	};
+@@ -33,9 +28,14 @@ cluster1_opp_table: opp-table-cluster1 {
+ 		compatible = "operating-points-v2";
+ 		opp-shared;
+ 
++		opp-1200000000{
++			opp-hz = /bits/ 64 <1200000000>;
++			opp-microvolt = <750000 750000 950000>;
++			clock-latency-ns = <40000>;
++		};
+ 		opp-1416000000 {
+ 			opp-hz = /bits/ 64 <1416000000>;
+-			opp-microvolt = <750000 750000 950000>;
++			opp-microvolt = <762500 762500 950000>;
+ 			clock-latency-ns = <40000>;
+ 		};
+ 		opp-1608000000 {
+@@ -43,25 +43,20 @@ opp-1608000000 {
+ 			opp-microvolt = <787500 787500 950000>;
+ 			clock-latency-ns = <40000>;
+ 		};
+-		opp-1800000000 {
+-			opp-hz = /bits/ 64 <1800000000>;
+-			opp-microvolt = <875000 875000 950000>;
+-			clock-latency-ns = <40000>;
+-		};
+-		opp-2016000000 {
+-			opp-hz = /bits/ 64 <2016000000>;
+-			opp-microvolt = <950000 950000 950000>;
+-			clock-latency-ns = <40000>;
+-		};
+ 	};
+ 
+ 	cluster2_opp_table: opp-table-cluster2 {
+ 		compatible = "operating-points-v2";
+ 		opp-shared;
+ 
++		opp-1200000000{
++			opp-hz = /bits/ 64 <1200000000>;
++			opp-microvolt = <750000 750000 950000>;
++			clock-latency-ns = <40000>;
++		};
+ 		opp-1416000000 {
+ 			opp-hz = /bits/ 64 <1416000000>;
+-			opp-microvolt = <750000 750000 950000>;
++			opp-microvolt = <762500 762500 950000>;
+ 			clock-latency-ns = <40000>;
+ 		};
+ 		opp-1608000000 {
+@@ -69,16 +64,6 @@ opp-1608000000 {
+ 			opp-microvolt = <787500 787500 950000>;
+ 			clock-latency-ns = <40000>;
+ 		};
+-		opp-1800000000 {
+-			opp-hz = /bits/ 64 <1800000000>;
+-			opp-microvolt = <875000 875000 950000>;
+-			clock-latency-ns = <40000>;
+-		};
+-		opp-2016000000 {
+-			opp-hz = /bits/ 64 <2016000000>;
+-			opp-microvolt = <950000 950000 950000>;
+-			clock-latency-ns = <40000>;
+-		};
+ 	};
+ 
+ 	gpu_opp_table: opp-table {
+@@ -104,10 +89,6 @@ opp-700000000 {
+ 			opp-hz = /bits/ 64 <700000000>;
+ 			opp-microvolt = <750000 750000 850000>;
+ 		};
+-		opp-850000000 {
+-			opp-hz = /bits/ 64 <800000000>;
+-			opp-microvolt = <787500 787500 850000>;
+-		};
+ 	};
+ };
+ 
+diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
+index a5b63c84f8541a..e5d21e836d993c 100644
+--- a/arch/loongarch/include/asm/ptrace.h
++++ b/arch/loongarch/include/asm/ptrace.h
+@@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v
+ 
+ /* Query offset/name of register from its name/offset */
+ extern int regs_query_register_offset(const char *name);
+-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
++#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
+ 
+ /**
+  * regs_get_register() - get register value from its offset
+diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h
+index 99a0d198927f8b..025fc3f0a1028d 100644
+--- a/arch/loongarch/include/asm/uprobes.h
++++ b/arch/loongarch/include/asm/uprobes.h
+@@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t;
+ #define UPROBE_XOLBP_INSN	__emit_break(BRK_UPROBE_XOLBP)
+ 
+ struct arch_uprobe {
+-	unsigned long	resume_era;
+ 	u32	insn[2];
+ 	u32	ixol[2];
+ 	bool	simulate;
+diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
+index 4f09121417818d..733a7665e434dc 100644
+--- a/arch/loongarch/kernel/genex.S
++++ b/arch/loongarch/kernel/genex.S
+@@ -16,6 +16,7 @@
+ #include <asm/stackframe.h>
+ #include <asm/thread_info.h>
+ 
++	.section .cpuidle.text, "ax"
+ 	.align	5
+ SYM_FUNC_START(__arch_cpu_idle)
+ 	/* start of idle interrupt region */
+@@ -31,14 +32,16 @@ SYM_FUNC_START(__arch_cpu_idle)
+ 	 */
+ 	idle	0
+ 	/* end of idle interrupt region */
+-1:	jr	ra
++idle_exit:
++	jr	ra
+ SYM_FUNC_END(__arch_cpu_idle)
++	.previous
+ 
+ SYM_CODE_START(handle_vint)
+ 	UNWIND_HINT_UNDEFINED
+ 	BACKUP_T0T1
+ 	SAVE_ALL
+-	la_abs	t1, 1b
++	la_abs	t1, idle_exit
+ 	LONG_L	t0, sp, PT_ERA
+ 	/* 3 instructions idle interrupt region */
+ 	ori	t0, t0, 0b1100
+diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c
+index ec5b28e570c963..4c476904227f95 100644
+--- a/arch/loongarch/kernel/kfpu.c
++++ b/arch/loongarch/kernel/kfpu.c
+@@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN;
+ static DEFINE_PER_CPU(bool, in_kernel_fpu);
+ static DEFINE_PER_CPU(unsigned int, euen_current);
+ 
++static inline void fpregs_lock(void)
++{
++	if (IS_ENABLED(CONFIG_PREEMPT_RT))
++		preempt_disable();
++	else
++		local_bh_disable();
++}
++
++static inline void fpregs_unlock(void)
++{
++	if (IS_ENABLED(CONFIG_PREEMPT_RT))
++		preempt_enable();
++	else
++		local_bh_enable();
++}
++
+ void kernel_fpu_begin(void)
+ {
+ 	unsigned int *euen_curr;
+ 
+-	preempt_disable();
++	if (!irqs_disabled())
++		fpregs_lock();
+ 
+ 	WARN_ON(this_cpu_read(in_kernel_fpu));
+ 
+@@ -73,7 +90,8 @@ void kernel_fpu_end(void)
+ 
+ 	this_cpu_write(in_kernel_fpu, false);
+ 
+-	preempt_enable();
++	if (!irqs_disabled())
++		fpregs_unlock();
+ }
+ EXPORT_SYMBOL_GPL(kernel_fpu_end);
+ 
+diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
+index 46d7d40c87e38e..0535e5ddbfb9c9 100644
+--- a/arch/loongarch/kernel/time.c
++++ b/arch/loongarch/kernel/time.c
+@@ -111,7 +111,7 @@ static unsigned long __init get_loops_per_jiffy(void)
+ 	return lpj;
+ }
+ 
+-static long init_offset __nosavedata;
++static long init_offset;
+ 
+ void save_counter(void)
+ {
+diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c
+index 87abc7137b738e..6022eb0f71dbce 100644
+--- a/arch/loongarch/kernel/uprobes.c
++++ b/arch/loongarch/kernel/uprobes.c
+@@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ 	utask->autask.saved_trap_nr = current->thread.trap_nr;
+ 	current->thread.trap_nr = UPROBE_TRAP_NR;
+ 	instruction_pointer_set(regs, utask->xol_vaddr);
+-	user_enable_single_step(current);
+ 
+ 	return 0;
+ }
+@@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ 
+ 	WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
+ 	current->thread.trap_nr = utask->autask.saved_trap_nr;
+-
+-	if (auprobe->simulate)
+-		instruction_pointer_set(regs, auprobe->resume_era);
+-	else
+-		instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
+-
+-	user_disable_single_step(current);
++	instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
+ 
+ 	return 0;
+ }
+@@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ 
+ 	current->thread.trap_nr = utask->autask.saved_trap_nr;
+ 	instruction_pointer_set(regs, utask->vaddr);
+-	user_disable_single_step(current);
+ }
+ 
+ bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+@@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ 
+ 	insn.word = auprobe->insn[0];
+ 	arch_simulate_insn(insn, regs);
+-	auprobe->resume_era = regs->csr_era;
+ 
+ 	return true;
+ }
+diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c
+index 1e0590542f987c..e7b7346592cb2a 100644
+--- a/arch/loongarch/power/hibernate.c
++++ b/arch/loongarch/power/hibernate.c
+@@ -2,6 +2,7 @@
+ #include <asm/fpu.h>
+ #include <asm/loongson.h>
+ #include <asm/sections.h>
++#include <asm/time.h>
+ #include <asm/tlbflush.h>
+ #include <linux/suspend.h>
+ 
+@@ -14,6 +15,7 @@ struct pt_regs saved_regs;
+ 
+ void save_processor_state(void)
+ {
++	save_counter();
+ 	saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
+ 	saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
+ 	saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
+@@ -26,6 +28,7 @@ void save_processor_state(void)
+ 
+ void restore_processor_state(void)
+ {
++	sync_counter();
+ 	csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
+ 	csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
+ 	csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
+diff --git a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
+index b724fb6d9689ef..b8063ba6d6d7f3 100644
+--- a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
++++ b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
+@@ -309,7 +309,7 @@ dmac: dma-controller@4330000 {
+ 					   1024 1024 1024 1024>;
+ 			snps,priority = <0 1 2 3 4 5 6 7>;
+ 			snps,dma-masters = <2>;
+-			snps,data-width = <4>;
++			snps,data-width = <2>;
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 9242c0649adf1b..4607610ef06283 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -7616,9 +7616,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
+ }
+ 
+ #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
++static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
++				int level)
++{
++	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
++}
++
++static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
++				 int level)
++{
++	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
++}
++
++static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
++			       int level)
++{
++	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
++}
++
+ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ 					struct kvm_gfn_range *range)
+ {
++	struct kvm_memory_slot *slot = range->slot;
++	int level;
++
+ 	/*
+ 	 * Zap SPTEs even if the slot can't be mapped PRIVATE.  KVM x86 only
+ 	 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
+@@ -7633,27 +7654,49 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ 	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
+ 		return false;
+ 
+-	return kvm_unmap_gfn_range(kvm, range);
+-}
++	if (WARN_ON_ONCE(range->end <= range->start))
++		return false;
+ 
+-static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+-				int level)
+-{
+-	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
+-}
++	/*
++	 * If the head and tail pages of the range currently allow a hugepage,
++	 * i.e. reside fully in the slot and don't have mixed attributes, then
++	 * add each corresponding hugepage range to the ongoing invalidation,
++	 * e.g. to prevent KVM from creating a hugepage in response to a fault
++	 * for a gfn whose attributes aren't changing.  Note, only the range
++	 * of gfns whose attributes are being modified needs to be explicitly
++	 * unmapped, as that will unmap any existing hugepages.
++	 */
++	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
++		gfn_t start = gfn_round_for_level(range->start, level);
++		gfn_t end = gfn_round_for_level(range->end - 1, level);
++		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
+ 
+-static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+-				 int level)
+-{
+-	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
+-}
++		if ((start != range->start || start + nr_pages > range->end) &&
++		    start >= slot->base_gfn &&
++		    start + nr_pages <= slot->base_gfn + slot->npages &&
++		    !hugepage_test_mixed(slot, start, level))
++			kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
+ 
+-static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+-			       int level)
+-{
+-	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
++		if (end == start)
++			continue;
++
++		if ((end + nr_pages) > range->end &&
++		    (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
++		    !hugepage_test_mixed(slot, end, level))
++			kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
++	}
++
++	/* Unmap the old attribute page. */
++	if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
++		range->attr_filter = KVM_FILTER_SHARED;
++	else
++		range->attr_filter = KVM_FILTER_PRIVATE;
++
++	return kvm_unmap_gfn_range(kvm, range);
+ }
+ 
++
++
+ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
+ 			       gfn_t gfn, int level, unsigned long attrs)
+ {
+diff --git a/block/bio.c b/block/bio.c
+index 43d4ae26f47587..20c74696bf23b1 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -611,7 +611,7 @@ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
+ {
+ 	struct bio *bio;
+ 
+-	if (nr_vecs > UIO_MAXIOV)
++	if (nr_vecs > BIO_MAX_INLINE_VECS)
+ 		return NULL;
+ 	return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
+ }
+diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
+index eccedb0c8886bf..05a0d99ce95c4b 100644
+--- a/drivers/accel/ivpu/ivpu_debugfs.c
++++ b/drivers/accel/ivpu/ivpu_debugfs.c
+@@ -201,7 +201,7 @@ fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, l
+ 	if (!size)
+ 		return -EINVAL;
+ 
+-	ivpu_fw_log_clear(vdev);
++	ivpu_fw_log_mark_read(vdev);
+ 	return size;
+ }
+ 
+diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
+index 8a9395a2abb5d9..d12188730ac7fa 100644
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -218,7 +218,7 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
+ 	fw->cold_boot_entry_point = fw_hdr->entry_point;
+ 	fw->entry_point = fw->cold_boot_entry_point;
+ 
+-	fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
++	fw->trace_level = min_t(u32, ivpu_fw_log_level, IVPU_FW_LOG_FATAL);
+ 	fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
+ 	fw->trace_hw_component_mask = -1;
+ 
+@@ -323,7 +323,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
+ 		goto err_free_fw_mem;
+ 	}
+ 
+-	if (ivpu_log_level <= IVPU_FW_LOG_INFO)
++	if (ivpu_fw_log_level <= IVPU_FW_LOG_INFO)
+ 		log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
+ 	else
+ 		log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
+diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c
+index ef0adb5e0fbeb3..337c906b021072 100644
+--- a/drivers/accel/ivpu/ivpu_fw_log.c
++++ b/drivers/accel/ivpu/ivpu_fw_log.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (C) 2020-2023 Intel Corporation
++ * Copyright (C) 2020-2024 Intel Corporation
+  */
+ 
+ #include <linux/ctype.h>
+@@ -15,19 +15,19 @@
+ #include "ivpu_fw_log.h"
+ #include "ivpu_gem.h"
+ 
+-#define IVPU_FW_LOG_LINE_LENGTH	  256
++#define IVPU_FW_LOG_LINE_LENGTH	256
+ 
+-unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
+-module_param(ivpu_log_level, uint, 0444);
+-MODULE_PARM_DESC(ivpu_log_level,
+-		 "NPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
++unsigned int ivpu_fw_log_level = IVPU_FW_LOG_ERROR;
++module_param_named(fw_log_level, ivpu_fw_log_level, uint, 0444);
++MODULE_PARM_DESC(fw_log_level,
++		 "NPU firmware default log level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
+ 		 " info=" __stringify(IVPU_FW_LOG_INFO)
+ 		 " warn=" __stringify(IVPU_FW_LOG_WARN)
+ 		 " error=" __stringify(IVPU_FW_LOG_ERROR)
+ 		 " fatal=" __stringify(IVPU_FW_LOG_FATAL));
+ 
+-static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
+-		      struct vpu_tracing_buffer_header **log_header)
++static int fw_log_from_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
++			  struct vpu_tracing_buffer_header **out_log)
+ {
+ 	struct vpu_tracing_buffer_header *log;
+ 
+@@ -48,7 +48,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
+ 		return -EINVAL;
+ 	}
+ 
+-	*log_header = log;
++	*out_log = log;
+ 	*offset += log->size;
+ 
+ 	ivpu_dbg(vdev, FW_BOOT,
+@@ -59,7 +59,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
+ 	return 0;
+ }
+ 
+-static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
++static void fw_log_print_lines(char *buffer, u32 size, struct drm_printer *p)
+ {
+ 	char line[IVPU_FW_LOG_LINE_LENGTH];
+ 	u32 index = 0;
+@@ -87,56 +87,89 @@ static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
+ 	}
+ 	line[index] = 0;
+ 	if (index != 0)
+-		drm_printf(p, "%s\n", line);
++		drm_printf(p, "%s", line);
+ }
+ 
+-static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
+-				const char *prefix, bool only_new_msgs, struct drm_printer *p)
++static void fw_log_print_buffer(struct vpu_tracing_buffer_header *log, const char *prefix,
++				bool only_new_msgs, struct drm_printer *p)
+ {
+-	char *log_buffer = (void *)log + log->header_size;
+-	u32 log_size = log->size - log->header_size;
+-	u32 log_start = log->read_index;
+-	u32 log_end = log->write_index;
+-
+-	if (!(log->write_index || log->wrap_count) ||
+-	    (log->write_index == log->read_index && only_new_msgs)) {
+-		drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
+-		return;
++	char *log_data = (void *)log + log->header_size;
++	u32 data_size = log->size - log->header_size;
++	u32 log_start = only_new_msgs ? READ_ONCE(log->read_index) : 0;
++	u32 log_end = READ_ONCE(log->write_index);
++
++	if (log->wrap_count == log->read_wrap_count) {
++		if (log_end <= log_start) {
++			drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
++			return;
++		}
++	} else if (log->wrap_count == log->read_wrap_count + 1) {
++		if (log_end > log_start)
++			log_start = log_end;
++	} else {
++		log_start = log_end;
+ 	}
+ 
+ 	drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
+-	if (log->write_index > log->read_index) {
+-		buffer_print(log_buffer + log_start, log_end - log_start, p);
++	if (log_end > log_start) {
++		fw_log_print_lines(log_data + log_start, log_end - log_start, p);
+ 	} else {
+-		buffer_print(log_buffer + log_end, log_size - log_end, p);
+-		buffer_print(log_buffer, log_end, p);
++		fw_log_print_lines(log_data + log_start, data_size - log_start, p);
++		fw_log_print_lines(log_data, log_end, p);
+ 	}
+-	drm_printf(p, "\x1b[0m");
++	drm_printf(p, "\n\x1b[0m"); /* add new line and clear formatting */
+ 	drm_printf(p, "==== %s \"%s\" log end   ====\n", prefix, log->name);
+ }
+ 
+-void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
++static void
++fw_log_print_all_in_bo(struct ivpu_device *vdev, const char *name,
++		       struct ivpu_bo *bo, bool only_new_msgs, struct drm_printer *p)
+ {
+-	struct vpu_tracing_buffer_header *log_header;
++	struct vpu_tracing_buffer_header *log;
+ 	u32 next = 0;
+ 
+-	while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
+-		fw_log_print_buffer(vdev, log_header, "NPU critical", only_new_msgs, p);
++	while (fw_log_from_bo(vdev, bo, &next, &log) == 0)
++		fw_log_print_buffer(log, name, only_new_msgs, p);
++}
++
++void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
++{
++	fw_log_print_all_in_bo(vdev, "NPU critical", vdev->fw->mem_log_crit, only_new_msgs, p);
++	fw_log_print_all_in_bo(vdev, "NPU verbose", vdev->fw->mem_log_verb, only_new_msgs, p);
++}
++
++void ivpu_fw_log_mark_read(struct ivpu_device *vdev)
++{
++	struct vpu_tracing_buffer_header *log;
++	u32 next;
++
++	next = 0;
++	while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
++		log->read_index = READ_ONCE(log->write_index);
++		log->read_wrap_count = READ_ONCE(log->wrap_count);
++	}
+ 
+ 	next = 0;
+-	while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
+-		fw_log_print_buffer(vdev, log_header, "NPU verbose", only_new_msgs, p);
++	while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
++		log->read_index = READ_ONCE(log->write_index);
++		log->read_wrap_count = READ_ONCE(log->wrap_count);
++	}
+ }
+ 
+-void ivpu_fw_log_clear(struct ivpu_device *vdev)
++void ivpu_fw_log_reset(struct ivpu_device *vdev)
+ {
+-	struct vpu_tracing_buffer_header *log_header;
+-	u32 next = 0;
++	struct vpu_tracing_buffer_header *log;
++	u32 next;
+ 
+-	while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
+-		log_header->read_index = log_header->write_index;
++	next = 0;
++	while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
++		log->read_index = 0;
++		log->read_wrap_count = 0;
++	}
+ 
+ 	next = 0;
+-	while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
+-		log_header->read_index = log_header->write_index;
++	while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
++		log->read_index = 0;
++		log->read_wrap_count = 0;
++	}
+ }
+diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h
+index 4b390a99699d66..8bb528a73cb7e5 100644
+--- a/drivers/accel/ivpu/ivpu_fw_log.h
++++ b/drivers/accel/ivpu/ivpu_fw_log.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (C) 2020-2023 Intel Corporation
++ * Copyright (C) 2020-2024 Intel Corporation
+  */
+ 
+ #ifndef __IVPU_FW_LOG_H__
+@@ -17,14 +17,15 @@
+ #define IVPU_FW_LOG_ERROR   4
+ #define IVPU_FW_LOG_FATAL   5
+ 
+-extern unsigned int ivpu_log_level;
+-
+ #define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE	SZ_1M
+ #define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE	SZ_8M
+ #define IVPU_FW_CRITICAL_BUFFER_SIZE		SZ_512K
+ 
++extern unsigned int ivpu_fw_log_level;
++
+ void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
+-void ivpu_fw_log_clear(struct ivpu_device *vdev);
++void ivpu_fw_log_mark_read(struct ivpu_device *vdev);
++void ivpu_fw_log_reset(struct ivpu_device *vdev);
+ 
+ 
+ #endif /* __IVPU_FW_LOG_H__ */
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index d1fbad78f61ba0..2269569bdee7bb 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -38,6 +38,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
+ 
+ 	ivpu_cmdq_reset_all_contexts(vdev);
+ 	ivpu_ipc_reset(vdev);
++	ivpu_fw_log_reset(vdev);
+ 	ivpu_fw_load(vdev);
+ 	fw->entry_point = fw->cold_boot_entry_point;
+ }
+diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
+index f73ce6e13065dd..54676e3d82dd59 100644
+--- a/drivers/acpi/pptt.c
++++ b/drivers/acpi/pptt.c
+@@ -231,16 +231,18 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
+ 			     sizeof(struct acpi_table_pptt));
+ 	proc_sz = sizeof(struct acpi_pptt_processor);
+ 
+-	while ((unsigned long)entry + proc_sz < table_end) {
++	/* ignore subtable types that are smaller than a processor node */
++	while ((unsigned long)entry + proc_sz <= table_end) {
+ 		cpu_node = (struct acpi_pptt_processor *)entry;
++
+ 		if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
+ 		    cpu_node->parent == node_entry)
+ 			return 0;
+ 		if (entry->length == 0)
+ 			return 0;
++
+ 		entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
+ 				     entry->length);
+-
+ 	}
+ 	return 1;
+ }
+@@ -273,15 +275,18 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
+ 	proc_sz = sizeof(struct acpi_pptt_processor);
+ 
+ 	/* find the processor structure associated with this cpuid */
+-	while ((unsigned long)entry + proc_sz < table_end) {
++	while ((unsigned long)entry + proc_sz <= table_end) {
+ 		cpu_node = (struct acpi_pptt_processor *)entry;
+ 
+ 		if (entry->length == 0) {
+ 			pr_warn("Invalid zero length subtable\n");
+ 			break;
+ 		}
++		/* entry->length may not equal proc_sz, revalidate the processor structure length */
+ 		if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
+ 		    acpi_cpu_id == cpu_node->acpi_processor_id &&
++		    (unsigned long)entry + entry->length <= table_end &&
++		    entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) &&
+ 		     acpi_pptt_leaf_node(table_hdr, cpu_node)) {
+ 			return (struct acpi_pptt_processor *)entry;
+ 		}
+diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
+index 84a1ad61c4ad5f..56b875a6b1fba3 100644
+--- a/drivers/bluetooth/btnxpuart.c
++++ b/drivers/bluetooth/btnxpuart.c
+@@ -612,8 +612,10 @@ static int nxp_download_firmware(struct hci_dev *hdev)
+ 							 &nxpdev->tx_state),
+ 					       msecs_to_jiffies(60000));
+ 
+-	release_firmware(nxpdev->fw);
+-	memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
++	if (nxpdev->fw && strlen(nxpdev->fw_name)) {
++		release_firmware(nxpdev->fw);
++		memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
++	}
+ 
+ 	if (err == 0) {
+ 		bt_dev_err(hdev, "FW Download Timeout. offset: %d",
+diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
+index b0f13c8ea79c7a..ecea089157301f 100644
+--- a/drivers/char/tpm/tpm2-sessions.c
++++ b/drivers/char/tpm/tpm2-sessions.c
+@@ -40,11 +40,6 @@
+  *
+  * These are the usage functions:
+  *
+- * tpm2_start_auth_session() which allocates the opaque auth structure
+- *	and gets a session from the TPM.  This must be called before
+- *	any of the following functions.  The session is protected by a
+- *	session_key which is derived from a random salt value
+- *	encrypted to the NULL seed.
+  * tpm2_end_auth_session() kills the session and frees the resources.
+  *	Under normal operation this function is done by
+  *	tpm_buf_check_hmac_response(), so this is only to be used on
+@@ -963,16 +958,13 @@ static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
+ }
+ 
+ /**
+- * tpm2_start_auth_session() - create a HMAC authentication session with the TPM
+- * @chip: the TPM chip structure to create the session with
++ * tpm2_start_auth_session() - Create an a HMAC authentication session
++ * @chip:	A TPM chip
+  *
+- * This function loads the NULL seed from its saved context and starts
+- * an authentication session on the null seed, fills in the
+- * @chip->auth structure to contain all the session details necessary
+- * for performing the HMAC, encrypt and decrypt operations and
+- * returns.  The NULL seed is flushed before this function returns.
++ * Loads the ephemeral key (null seed), and starts an HMAC authenticated
++ * session. The null seed is flushed before the return.
+  *
+- * Return: zero on success or actual error encountered.
++ * Returns zero on success, or a POSIX error code.
+  */
+ int tpm2_start_auth_session(struct tpm_chip *chip)
+ {
+@@ -1024,7 +1016,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
+ 	/* hash algorithm for session */
+ 	tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
+ 
+-	rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
++	rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession"));
+ 	tpm2_flush_context(chip, null_key);
+ 
+ 	if (rc == TPM2_RC_SUCCESS)
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index 970d02c337c7f1..6c3aa480396b64 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -54,7 +54,7 @@ enum tis_int_flags {
+ enum tis_defaults {
+ 	TIS_MEM_LEN = 0x5000,
+ 	TIS_SHORT_TIMEOUT = 750,	/* ms */
+-	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
++	TIS_LONG_TIMEOUT = 4000,	/* 4 secs */
+ 	TIS_TIMEOUT_MIN_ATML = 14700,	/* usecs */
+ 	TIS_TIMEOUT_MAX_ATML = 15000,	/* usecs */
+ };
+diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
+index 5f8d010516f07f..b1ef4546346d44 100644
+--- a/drivers/dma-buf/dma-resv.c
++++ b/drivers/dma-buf/dma-resv.c
+@@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ 	count++;
+ 
+ 	dma_resv_list_set(fobj, i, fence, usage);
+-	/* pointer update must be visible before we extend the num_fences */
+-	smp_store_mb(fobj->num_fences, count);
++	/* fence update must be visible before we extend the num_fences */
++	smp_wmb();
++	fobj->num_fences = count;
+ }
+ EXPORT_SYMBOL(dma_resv_add_fence);
+ 
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index d891dfca358e20..91b2fbc0b86471 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -841,9 +841,9 @@ static int dmatest_func(void *data)
+ 		} else {
+ 			dma_async_issue_pending(chan);
+ 
+-			wait_event_timeout(thread->done_wait,
+-					   done->done,
+-					   msecs_to_jiffies(params->timeout));
++			wait_event_freezable_timeout(thread->done_wait,
++					done->done,
++					msecs_to_jiffies(params->timeout));
+ 
+ 			status = dma_async_is_tx_complete(chan, cookie, NULL,
+ 							  NULL);
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 234c1c658ec792..18997f80bdc97a 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -153,6 +153,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd)
+ 	pci_free_irq_vectors(pdev);
+ }
+ 
++static void idxd_clean_wqs(struct idxd_device *idxd)
++{
++	struct idxd_wq *wq;
++	struct device *conf_dev;
++	int i;
++
++	for (i = 0; i < idxd->max_wqs; i++) {
++		wq = idxd->wqs[i];
++		if (idxd->hw.wq_cap.op_config)
++			bitmap_free(wq->opcap_bmap);
++		kfree(wq->wqcfg);
++		conf_dev = wq_confdev(wq);
++		put_device(conf_dev);
++		kfree(wq);
++	}
++	bitmap_free(idxd->wq_enable_map);
++	kfree(idxd->wqs);
++}
++
+ static int idxd_setup_wqs(struct idxd_device *idxd)
+ {
+ 	struct device *dev = &idxd->pdev->dev;
+@@ -167,8 +186,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 
+ 	idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
+ 	if (!idxd->wq_enable_map) {
+-		kfree(idxd->wqs);
+-		return -ENOMEM;
++		rc = -ENOMEM;
++		goto err_bitmap;
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+@@ -187,10 +206,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 		conf_dev->bus = &dsa_bus_type;
+ 		conf_dev->type = &idxd_wq_device_type;
+ 		rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
+-		if (rc < 0) {
+-			put_device(conf_dev);
++		if (rc < 0)
+ 			goto err;
+-		}
+ 
+ 		mutex_init(&wq->wq_lock);
+ 		init_waitqueue_head(&wq->err_queue);
+@@ -201,7 +218,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 		wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+ 		wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
+ 		if (!wq->wqcfg) {
+-			put_device(conf_dev);
+ 			rc = -ENOMEM;
+ 			goto err;
+ 		}
+@@ -209,9 +225,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 		if (idxd->hw.wq_cap.op_config) {
+ 			wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ 			if (!wq->opcap_bmap) {
+-				put_device(conf_dev);
+ 				rc = -ENOMEM;
+-				goto err;
++				goto err_opcap_bmap;
+ 			}
+ 			bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
+ 		}
+@@ -222,15 +237,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 
+ 	return 0;
+ 
+- err:
++err_opcap_bmap:
++	kfree(wq->wqcfg);
++
++err:
++	put_device(conf_dev);
++	kfree(wq);
++
+ 	while (--i >= 0) {
+ 		wq = idxd->wqs[i];
++		if (idxd->hw.wq_cap.op_config)
++			bitmap_free(wq->opcap_bmap);
++		kfree(wq->wqcfg);
+ 		conf_dev = wq_confdev(wq);
+ 		put_device(conf_dev);
++		kfree(wq);
++
+ 	}
++	bitmap_free(idxd->wq_enable_map);
++
++err_bitmap:
++	kfree(idxd->wqs);
++
+ 	return rc;
+ }
+ 
++static void idxd_clean_engines(struct idxd_device *idxd)
++{
++	struct idxd_engine *engine;
++	struct device *conf_dev;
++	int i;
++
++	for (i = 0; i < idxd->max_engines; i++) {
++		engine = idxd->engines[i];
++		conf_dev = engine_confdev(engine);
++		put_device(conf_dev);
++		kfree(engine);
++	}
++	kfree(idxd->engines);
++}
++
+ static int idxd_setup_engines(struct idxd_device *idxd)
+ {
+ 	struct idxd_engine *engine;
+@@ -261,6 +307,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
+ 		rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
+ 		if (rc < 0) {
+ 			put_device(conf_dev);
++			kfree(engine);
+ 			goto err;
+ 		}
+ 
+@@ -274,10 +321,26 @@ static int idxd_setup_engines(struct idxd_device *idxd)
+ 		engine = idxd->engines[i];
+ 		conf_dev = engine_confdev(engine);
+ 		put_device(conf_dev);
++		kfree(engine);
+ 	}
++	kfree(idxd->engines);
++
+ 	return rc;
+ }
+ 
++static void idxd_clean_groups(struct idxd_device *idxd)
++{
++	struct idxd_group *group;
++	int i;
++
++	for (i = 0; i < idxd->max_groups; i++) {
++		group = idxd->groups[i];
++		put_device(group_confdev(group));
++		kfree(group);
++	}
++	kfree(idxd->groups);
++}
++
+ static int idxd_setup_groups(struct idxd_device *idxd)
+ {
+ 	struct device *dev = &idxd->pdev->dev;
+@@ -308,6 +371,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
+ 		rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
+ 		if (rc < 0) {
+ 			put_device(conf_dev);
++			kfree(group);
+ 			goto err;
+ 		}
+ 
+@@ -332,20 +396,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
+ 	while (--i >= 0) {
+ 		group = idxd->groups[i];
+ 		put_device(group_confdev(group));
++		kfree(group);
+ 	}
++	kfree(idxd->groups);
++
+ 	return rc;
+ }
+ 
+ static void idxd_cleanup_internals(struct idxd_device *idxd)
+ {
+-	int i;
+-
+-	for (i = 0; i < idxd->max_groups; i++)
+-		put_device(group_confdev(idxd->groups[i]));
+-	for (i = 0; i < idxd->max_engines; i++)
+-		put_device(engine_confdev(idxd->engines[i]));
+-	for (i = 0; i < idxd->max_wqs; i++)
+-		put_device(wq_confdev(idxd->wqs[i]));
++	idxd_clean_groups(idxd);
++	idxd_clean_engines(idxd);
++	idxd_clean_wqs(idxd);
+ 	destroy_workqueue(idxd->wq);
+ }
+ 
+@@ -388,7 +450,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
+ static int idxd_setup_internals(struct idxd_device *idxd)
+ {
+ 	struct device *dev = &idxd->pdev->dev;
+-	int rc, i;
++	int rc;
+ 
+ 	init_waitqueue_head(&idxd->cmd_waitq);
+ 
+@@ -419,14 +481,11 @@ static int idxd_setup_internals(struct idxd_device *idxd)
+  err_evl:
+ 	destroy_workqueue(idxd->wq);
+  err_wkq_create:
+-	for (i = 0; i < idxd->max_groups; i++)
+-		put_device(group_confdev(idxd->groups[i]));
++	idxd_clean_groups(idxd);
+  err_group:
+-	for (i = 0; i < idxd->max_engines; i++)
+-		put_device(engine_confdev(idxd->engines[i]));
++	idxd_clean_engines(idxd);
+  err_engine:
+-	for (i = 0; i < idxd->max_wqs; i++)
+-		put_device(wq_confdev(idxd->wqs[i]));
++	idxd_clean_wqs(idxd);
+  err_wqs:
+ 	return rc;
+ }
+@@ -526,6 +585,17 @@ static void idxd_read_caps(struct idxd_device *idxd)
+ 		idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
+ }
+ 
++static void idxd_free(struct idxd_device *idxd)
++{
++	if (!idxd)
++		return;
++
++	put_device(idxd_confdev(idxd));
++	bitmap_free(idxd->opcap_bmap);
++	ida_free(&idxd_ida, idxd->id);
++	kfree(idxd);
++}
++
+ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -543,28 +613,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
+ 	idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
+ 	idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
+ 	if (idxd->id < 0)
+-		return NULL;
++		goto err_ida;
+ 
+ 	idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
+-	if (!idxd->opcap_bmap) {
+-		ida_free(&idxd_ida, idxd->id);
+-		return NULL;
+-	}
++	if (!idxd->opcap_bmap)
++		goto err_opcap;
+ 
+ 	device_initialize(conf_dev);
+ 	conf_dev->parent = dev;
+ 	conf_dev->bus = &dsa_bus_type;
+ 	conf_dev->type = idxd->data->dev_type;
+ 	rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
+-	if (rc < 0) {
+-		put_device(conf_dev);
+-		return NULL;
+-	}
++	if (rc < 0)
++		goto err_name;
+ 
+ 	spin_lock_init(&idxd->dev_lock);
+ 	spin_lock_init(&idxd->cmd_lock);
+ 
+ 	return idxd;
++
++err_name:
++	put_device(conf_dev);
++	bitmap_free(idxd->opcap_bmap);
++err_opcap:
++	ida_free(&idxd_ida, idxd->id);
++err_ida:
++	kfree(idxd);
++
++	return NULL;
+ }
+ 
+ static int idxd_enable_system_pasid(struct idxd_device *idxd)
+@@ -792,7 +868,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+  err:
+ 	pci_iounmap(pdev, idxd->reg_base);
+  err_iomap:
+-	put_device(idxd_confdev(idxd));
++	idxd_free(idxd);
+  err_idxd_alloc:
+ 	pci_disable_device(pdev);
+ 	return rc;
+@@ -829,7 +905,6 @@ static void idxd_shutdown(struct pci_dev *pdev)
+ static void idxd_remove(struct pci_dev *pdev)
+ {
+ 	struct idxd_device *idxd = pci_get_drvdata(pdev);
+-	struct idxd_irq_entry *irq_entry;
+ 
+ 	idxd_unregister_devices(idxd);
+ 	/*
+@@ -842,20 +917,12 @@ static void idxd_remove(struct pci_dev *pdev)
+ 	get_device(idxd_confdev(idxd));
+ 	device_unregister(idxd_confdev(idxd));
+ 	idxd_shutdown(pdev);
+-	if (device_pasid_enabled(idxd))
+-		idxd_disable_system_pasid(idxd);
+ 	idxd_device_remove_debugfs(idxd);
+-
+-	irq_entry = idxd_get_ie(idxd, 0);
+-	free_irq(irq_entry->vector, irq_entry);
+-	pci_free_irq_vectors(pdev);
++	idxd_cleanup(idxd);
+ 	pci_iounmap(pdev, idxd->reg_base);
+-	if (device_user_pasid_enabled(idxd))
+-		idxd_disable_sva(pdev);
+-	pci_disable_device(pdev);
+-	destroy_workqueue(idxd->wq);
+-	perfmon_pmu_remove(idxd);
+ 	put_device(idxd_confdev(idxd));
++	idxd_free(idxd);
++	pci_disable_device(pdev);
+ }
+ 
+ static struct pci_driver idxd_pci_driver = {
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index b3f27b3f92098a..7d89385c3c450c 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
+ 	u32 residue_diff;
+ 	ktime_t time_diff;
+ 	unsigned long delay;
++	unsigned long flags;
+ 
+ 	while (1) {
++		spin_lock_irqsave(&uc->vc.lock, flags);
++
+ 		if (uc->desc) {
+ 			/* Get previous residue and time stamp */
+ 			residue_diff = uc->tx_drain.residue;
+@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
+ 				break;
+ 			}
+ 
++			spin_unlock_irqrestore(&uc->vc.lock, flags);
++
+ 			usleep_range(ktime_to_us(delay),
+ 				     ktime_to_us(delay) + 10);
+ 			continue;
+@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
+ 
+ 		break;
+ 	}
++
++	spin_unlock_irqrestore(&uc->vc.lock, flags);
+ }
+ 
+ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
+@@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
+ 				      struct of_dma *ofdma)
+ {
+ 	struct udma_dev *ud = ofdma->of_dma_data;
+-	dma_cap_mask_t mask = ud->ddev.cap_mask;
+ 	struct udma_filter_param filter_param;
+ 	struct dma_chan *chan;
+ 
+@@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
+ 		}
+ 	}
+ 
+-	chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
++	chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
+ 				     ofdma->of_node);
+ 	if (!chan) {
+ 		dev_err(ud->dev, "get channel fail in %s.\n", __func__);
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index d764a3af634670..ef3aee1cabcfd0 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -1203,6 +1203,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
+ 
+ 	guard(mutex)(&chip->i2c_lock);
+ 
++	if (chip->client->irq > 0)
++		enable_irq(chip->client->irq);
+ 	regcache_cache_only(chip->regmap, false);
+ 	regcache_mark_dirty(chip->regmap);
+ 	ret = pca953x_regcache_sync(chip);
+@@ -1215,6 +1217,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
+ static void pca953x_save_context(struct pca953x_chip *chip)
+ {
+ 	guard(mutex)(&chip->i2c_lock);
++
++	/* Disable IRQ to prevent early triggering while regmap "cache only" is on */
++	if (chip->client->irq > 0)
++		disable_irq(chip->client->irq);
+ 	regcache_cache_only(chip->regmap, true);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index b7aad43d9ad07b..7edf8d67a0fa5d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -853,6 +853,7 @@ struct amdgpu_device {
+ 	bool				need_swiotlb;
+ 	bool				accel_working;
+ 	struct notifier_block		acpi_nb;
++	struct notifier_block		pm_nb;
+ 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
+ 	struct debugfs_blob_wrapper     debugfs_vbios_blob;
+ 	struct debugfs_blob_wrapper     debugfs_discovery_blob;
+@@ -1570,11 +1571,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap
+ #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+-void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
+ #else
+ static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+-static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
+ #endif
+ 
+ void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index b8d4e07d2043ed..bebfbc1497d8e0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
+ #endif /* CONFIG_AMD_PMC */
+ }
+ 
+-/**
+- * amdgpu_choose_low_power_state
+- *
+- * @adev: amdgpu_device_pointer
+- *
+- * Choose the target low power state for the GPU
+- */
+-void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
+-{
+-	if (adev->in_runpm)
+-		return;
+-
+-	if (amdgpu_acpi_is_s0ix_active(adev))
+-		adev->in_s0ix = true;
+-	else if (amdgpu_acpi_is_s3_active(adev))
+-		adev->in_s3 = true;
+-}
+-
+ #endif /* CONFIG_SUSPEND */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+index cfdf558b48b648..02138aa557935e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ 	struct drm_exec exec;
+ 	int r;
+ 
+-	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
++	drm_exec_init(&exec, 0, 0);
+ 	drm_exec_until_all_locked(&exec) {
+ 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ 		if (likely(!r))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 24d007715a14ae..cb102ee71d04c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -145,6 +145,8 @@ const char *amdgpu_asic_name[] = {
+ };
+ 
+ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
++static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
++				     void *data);
+ 
+ /**
+  * DOC: pcie_replay_count
+@@ -4519,6 +4521,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 
+ 	amdgpu_device_check_iommu_direct_map(adev);
+ 
++	adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
++	r = register_pm_notifier(&adev->pm_nb);
++	if (r)
++		goto failed;
++
+ 	return 0;
+ 
+ release_ras_con:
+@@ -4583,6 +4590,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ 		drain_workqueue(adev->mman.bdev.wq);
+ 	adev->shutdown = true;
+ 
++	unregister_pm_notifier(&adev->pm_nb);
++
+ 	/* make sure IB test finished before entering exclusive mode
+ 	 * to avoid preemption on IB test
+ 	 */
+@@ -4712,6 +4721,33 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
+ /*
+  * Suspend & resume.
+  */
++/**
++ * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
++ * @nb: notifier block
++ * @mode: suspend mode
++ * @data: data
++ *
++ * This function is called when the system is about to suspend or hibernate.
++ * It is used to set the appropriate flags so that eviction can be optimized
++ * in the pm prepare callback.
++ */
++static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
++				     void *data)
++{
++	struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
++
++	switch (mode) {
++	case PM_HIBERNATION_PREPARE:
++		adev->in_s4 = true;
++		break;
++	case PM_POST_HIBERNATION:
++		adev->in_s4 = false;
++		break;
++	}
++
++	return NOTIFY_DONE;
++}
++
+ /**
+  * amdgpu_device_prepare - prepare for device suspend
+  *
+@@ -4726,15 +4762,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
+ 	struct amdgpu_device *adev = drm_to_adev(dev);
+ 	int i, r;
+ 
+-	amdgpu_choose_low_power_state(adev);
+-
+ 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ 		return 0;
+ 
+ 	/* Evict the majority of BOs before starting suspend sequence */
+ 	r = amdgpu_device_evict_resources(adev);
+ 	if (r)
+-		goto unprepare;
++		return r;
+ 
+ 	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+ 
+@@ -4745,15 +4779,10 @@ int amdgpu_device_prepare(struct drm_device *dev)
+ 			continue;
+ 		r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
+ 		if (r)
+-			goto unprepare;
++			return r;
+ 	}
+ 
+ 	return 0;
+-
+-unprepare:
+-	adev->in_s0ix = adev->in_s3 = false;
+-
+-	return r;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index a9eb0927a7664f..1b479bd8513548 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2635,7 +2635,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
+ 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ 	int r;
+ 
+-	adev->in_s4 = true;
+ 	r = amdgpu_device_suspend(drm_dev, true);
+ 	if (r)
+ 		return r;
+@@ -2648,13 +2647,8 @@ static int amdgpu_pmops_freeze(struct device *dev)
+ static int amdgpu_pmops_thaw(struct device *dev)
+ {
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
+-	struct amdgpu_device *adev = drm_to_adev(drm_dev);
+-	int r;
+-
+-	r = amdgpu_device_resume(drm_dev, true);
+-	adev->in_s4 = false;
+ 
+-	return r;
++	return amdgpu_device_resume(drm_dev, true);
+ }
+ 
+ static int amdgpu_pmops_poweroff(struct device *dev)
+@@ -2667,9 +2661,6 @@ static int amdgpu_pmops_poweroff(struct device *dev)
+ static int amdgpu_pmops_restore(struct device *dev)
+ {
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
+-	struct amdgpu_device *adev = drm_to_adev(drm_dev);
+-
+-	adev->in_s4 = false;
+ 
+ 	return amdgpu_device_resume(drm_dev, true);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index 4e9c23d65b02ff..87aaf5f1224f41 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -747,6 +747,18 @@ static int gmc_v11_0_sw_init(void *handle)
+ 	adev->gmc.vram_type = vram_type;
+ 	adev->gmc.vram_vendor = vram_vendor;
+ 
++	/* The mall_size is already calculated as mall_size_per_umc * num_umc.
++	 * However, for gfx1151, which features a 2-to-1 UMC mapping,
++	 * the result must be multiplied by 2 to determine the actual mall size.
++	 */
++	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
++	case IP_VERSION(11, 5, 1):
++		adev->gmc.mall_size *= 2;
++		break;
++	default:
++		break;
++	}
++
+ 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ 	case IP_VERSION(11, 0, 0):
+ 	case IP_VERSION(11, 0, 1):
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 66c50a09d2dfd3..ff33760aa4fae0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -12546,7 +12546,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
+ 		/* The reply is stored in the top nibble of the command. */
+ 		payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
+ 
+-	if (!payload->write && p_notify->aux_reply.length)
++	/*write req may receive a byte indicating partially written number as well*/
++	if (p_notify->aux_reply.length)
+ 		memcpy(payload->data, p_notify->aux_reply.data,
+ 				p_notify->aux_reply.length);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index dca8384af95df0..fca0c31e14d8fc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -62,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ 	enum aux_return_code_type operation_result;
+ 	struct amdgpu_device *adev;
+ 	struct ddc_service *ddc;
++	uint8_t copy[16];
+ 
+ 	if (WARN_ON(msg->size > 16))
+ 		return -E2BIG;
+@@ -77,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ 			(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
+ 	payload.defer_delay = 0;
+ 
++	if (payload.write) {
++		memcpy(copy, msg->buffer, msg->size);
++		payload.data = copy;
++	}
++
+ 	result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
+ 				      &operation_result);
+ 
+@@ -100,9 +106,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ 	 */
+ 	if (payload.write && result >= 0) {
+ 		if (result) {
+-			/*one byte indicating partially written bytes. Force 0 to retry*/
+-			drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
+-			result = 0;
++			/*one byte indicating partially written bytes*/
++			drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
++			result = payload.data[0];
+ 		} else if (!payload.reply[0])
+ 			/*I2C_ACK|AUX_ACK*/
+ 			result = msg->size;
+@@ -127,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ 			break;
+ 		}
+ 
+-		drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
++		drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
+ 	}
+ 
+ 	if (payload.reply[0])
+-		drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
++		drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
+ 			payload.reply[0]);
+ 
+ 	return result;
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+index 1236e0f9a2560c..712aff7e17f7a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+@@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes(
+ 	enum dc_cursor_color_format color_format = cursor_attributes->color_format;
+ 	int cur_rom_en = 0;
+ 
+-	// DCN4 should always do Cursor degamma for Cursor Color modes
+ 	if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
+ 		color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
+-		cur_rom_en = 1;
++		if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
++			cur_rom_en = 1;
++		}
+ 	}
+ 
+ 	REG_UPDATE_3(CURSOR0_CONTROL,
+diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
+index 51c2d742d19980..7c8287c18e381f 100644
+--- a/drivers/gpu/drm/drm_fbdev_dma.c
++++ b/drivers/gpu/drm/drm_fbdev_dma.c
+@@ -105,6 +105,40 @@ static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
+ 
+ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 					 struct drm_fb_helper_surface_size *sizes)
++{
++	return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes);
++}
++
++static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
++					 struct drm_clip_rect *clip)
++{
++	struct drm_device *dev = helper->dev;
++	int ret;
++
++	/* Call damage handlers only if necessary */
++	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
++		return 0;
++
++	if (helper->fb->funcs->dirty) {
++		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
++		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
++			return ret;
++	}
++
++	return 0;
++}
++
++static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
++	.fb_probe = drm_fbdev_dma_helper_fb_probe,
++	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
++};
++
++/*
++ * struct drm_fb_helper
++ */
++
++int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
++				     struct drm_fb_helper_surface_size *sizes)
+ {
+ 	struct drm_client_dev *client = &fb_helper->client;
+ 	struct drm_device *dev = fb_helper->dev;
+@@ -148,6 +182,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 		goto err_drm_client_buffer_delete;
+ 	}
+ 
++	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
+ 	fb_helper->buffer = buffer;
+ 	fb_helper->fb = fb;
+ 
+@@ -211,30 +246,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 	drm_client_framebuffer_delete(buffer);
+ 	return ret;
+ }
+-
+-static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
+-					 struct drm_clip_rect *clip)
+-{
+-	struct drm_device *dev = helper->dev;
+-	int ret;
+-
+-	/* Call damage handlers only if necessary */
+-	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+-		return 0;
+-
+-	if (helper->fb->funcs->dirty) {
+-		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+-		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
+-	.fb_probe = drm_fbdev_dma_helper_fb_probe,
+-	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
+-};
++EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
+ 
+ /*
+  * struct drm_client_funcs
+diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
+index f6889f649bc182..ce17143d47a897 100644
+--- a/drivers/gpu/drm/tiny/Kconfig
++++ b/drivers/gpu/drm/tiny/Kconfig
+@@ -67,6 +67,7 @@ config DRM_OFDRM
+ config DRM_PANEL_MIPI_DBI
+ 	tristate "DRM support for MIPI DBI compatible panels"
+ 	depends on DRM && SPI
++	select DRM_CLIENT_SELECTION
+ 	select DRM_KMS_HELPER
+ 	select DRM_GEM_DMA_HELPER
+ 	select DRM_MIPI_DBI
+diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+index f753cdffe6f827..ac159e8127d561 100644
+--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
++++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+@@ -15,6 +15,7 @@
+ #include <linux/spi/spi.h>
+ 
+ #include <drm/drm_atomic_helper.h>
++#include <drm/drm_client_setup.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_dma.h>
+ #include <drm/drm_gem_atomic_helper.h>
+@@ -264,6 +265,7 @@ static const struct drm_driver panel_mipi_dbi_driver = {
+ 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ 	.fops			= &panel_mipi_dbi_fops,
+ 	DRM_GEM_DMA_DRIVER_OPS_VMAP,
++	DRM_FBDEV_DMA_DRIVER_OPS,
+ 	.debugfs_init		= mipi_dbi_debugfs_init,
+ 	.name			= "panel-mipi-dbi",
+ 	.desc			= "MIPI DBI compatible display panel",
+@@ -388,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
+ 
+ 	spi_set_drvdata(spi, drm);
+ 
+-	drm_fbdev_dma_setup(drm, 0);
++	if (bpp == 16)
++		drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
++	else
++		drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+index 10ec2920d31b34..d4033278be9fca 100644
+--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
++++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+@@ -47,6 +47,10 @@
+ #define   MI_LRI_FORCE_POSTED		REG_BIT(12)
+ #define   MI_LRI_LEN(x)			(((x) & 0xff) + 1)
+ 
++#define MI_STORE_REGISTER_MEM		(__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4))
++#define   MI_SRM_USE_GGTT		REG_BIT(22)
++#define   MI_SRM_ADD_CS_OFFSET		REG_BIT(19)
++
+ #define MI_FLUSH_DW			__MI_INSTR(0x26)
+ #define   MI_FLUSH_DW_STORE_INDEX	REG_BIT(21)
+ #define   MI_INVALIDATE_TLB		REG_BIT(18)
+diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
+index 6fbea70d3d36d7..feb680d127e605 100644
+--- a/drivers/gpu/drm/xe/xe_gsc.c
++++ b/drivers/gpu/drm/xe/xe_gsc.c
+@@ -564,6 +564,28 @@ void xe_gsc_remove(struct xe_gsc *gsc)
+ 	xe_gsc_proxy_remove(gsc);
+ }
+ 
++void xe_gsc_stop_prepare(struct xe_gsc *gsc)
++{
++	struct xe_gt *gt = gsc_to_gt(gsc);
++	int ret;
++
++	if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
++		return;
++
++	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
++
++	/*
++	 * If the GSC FW load or the proxy init are interrupted, the only way
++	 * to recover it is to do an FLR and reload the GSC from scratch.
++	 * Therefore, let's wait for the init to complete before stopping
++	 * operations. The proxy init is the last step, so we can just wait on
++	 * that
++	 */
++	ret = xe_gsc_wait_for_proxy_init_done(gsc);
++	if (ret)
++		xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
++}
++
+ /*
+  * wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
+  * GSC engine reset by writing a notification bit in the GS1 register and then
+diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
+index e282b9ef6ec4d5..c31fe24c4b663c 100644
+--- a/drivers/gpu/drm/xe/xe_gsc.h
++++ b/drivers/gpu/drm/xe/xe_gsc.h
+@@ -16,6 +16,7 @@ struct xe_hw_engine;
+ int xe_gsc_init(struct xe_gsc *gsc);
+ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
+ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
++void xe_gsc_stop_prepare(struct xe_gsc *gsc);
+ void xe_gsc_load_start(struct xe_gsc *gsc);
+ void xe_gsc_remove(struct xe_gsc *gsc);
+ void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
+diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
+index 2d6ea8c0144515..85801de4fab184 100644
+--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
++++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
+@@ -71,6 +71,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
+ 	       HECI1_FWSTS1_PROXY_STATE_NORMAL;
+ }
+ 
++int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
++{
++	struct xe_gt *gt = gsc_to_gt(gsc);
++
++	/* Proxy init can take up to 500ms, so wait double that for safety */
++	return xe_mmio_wait32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
++			      HECI1_FWSTS1_CURRENT_STATE,
++			      HECI1_FWSTS1_PROXY_STATE_NORMAL,
++			      USEC_PER_SEC, NULL, false);
++}
++
+ static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
+ {
+ 	struct xe_gt *gt = gsc_to_gt(gsc);
+diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
+index c511ade6b86378..e2498aa6de1881 100644
+--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
++++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
+@@ -13,6 +13,7 @@ struct xe_gsc;
+ int xe_gsc_proxy_init(struct xe_gsc *gsc);
+ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
+ void xe_gsc_proxy_remove(struct xe_gsc *gsc);
++int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc);
+ int xe_gsc_proxy_start(struct xe_gsc *gsc);
+ 
+ int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index 17ba15132a9840..3a7628fb5ad328 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -828,7 +828,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt)
+ {
+ 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ 
+-	xe_uc_stop_prepare(&gt->uc);
++	xe_uc_suspend_prepare(&gt->uc);
+ 
+ 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ }
+diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
+index aec7db39c061e6..2d4e38b3bab19e 100644
+--- a/drivers/gpu/drm/xe/xe_lrc.c
++++ b/drivers/gpu/drm/xe/xe_lrc.c
+@@ -694,7 +694,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
+ 
+ static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
+ {
+-	/* The start seqno is stored in the driver-defined portion of PPHWSP */
++	/* This is stored in the driver-defined portion of PPHWSP */
+ 	return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
+ }
+ 
+diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
+index 9f327f27c0726e..fb31e09acb519c 100644
+--- a/drivers/gpu/drm/xe/xe_ring_ops.c
++++ b/drivers/gpu/drm/xe/xe_ring_ops.c
+@@ -229,13 +229,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job)
+ 
+ static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
+ {
+-	dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
+-		MI_COPY_MEM_MEM_DST_GGTT;
++	dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
++	dw[i++] = RING_CTX_TIMESTAMP(0).addr;
+ 	dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
+ 	dw[i++] = 0;
+-	dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+-	dw[i++] = 0;
+-	dw[i++] = MI_NOOP;
+ 
+ 	return i;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
+index 0d073a9987c2e8..bb03c524613f2c 100644
+--- a/drivers/gpu/drm/xe/xe_uc.c
++++ b/drivers/gpu/drm/xe/xe_uc.c
+@@ -241,7 +241,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc)
+ 
+ void xe_uc_stop_prepare(struct xe_uc *uc)
+ {
+-	xe_gsc_wait_for_worker_completion(&uc->gsc);
++	xe_gsc_stop_prepare(&uc->gsc);
+ 	xe_guc_stop_prepare(&uc->guc);
+ }
+ 
+@@ -275,6 +275,12 @@ static void uc_reset_wait(struct xe_uc *uc)
+ 		goto again;
+ }
+ 
++void xe_uc_suspend_prepare(struct xe_uc *uc)
++{
++	xe_gsc_wait_for_worker_completion(&uc->gsc);
++	xe_guc_stop_prepare(&uc->guc);
++}
++
+ int xe_uc_suspend(struct xe_uc *uc)
+ {
+ 	/* GuC submission not enabled, nothing to do */
+diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
+index 506517c1133397..ba2937ab94cf56 100644
+--- a/drivers/gpu/drm/xe/xe_uc.h
++++ b/drivers/gpu/drm/xe/xe_uc.h
+@@ -18,6 +18,7 @@ int xe_uc_reset_prepare(struct xe_uc *uc);
+ void xe_uc_stop_prepare(struct xe_uc *uc);
+ void xe_uc_stop(struct xe_uc *uc);
+ int xe_uc_start(struct xe_uc *uc);
++void xe_uc_suspend_prepare(struct xe_uc *uc);
+ int xe_uc_suspend(struct xe_uc *uc);
+ int xe_uc_sanitize_reset(struct xe_uc *uc);
+ void xe_uc_remove(struct xe_uc *uc);
+diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
+index 8420c227e21b3a..bd3cc56366480d 100644
+--- a/drivers/hid/bpf/hid_bpf_dispatch.c
++++ b/drivers/hid/bpf/hid_bpf_dispatch.c
+@@ -38,6 +38,9 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
+ 	struct hid_bpf_ops *e;
+ 	int ret;
+ 
++	if (unlikely(hdev->bpf.destroyed))
++		return ERR_PTR(-ENODEV);
++
+ 	if (type >= HID_REPORT_TYPES)
+ 		return ERR_PTR(-EINVAL);
+ 
+@@ -93,6 +96,9 @@ int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ 	struct hid_bpf_ops *e;
+ 	int ret, idx;
+ 
++	if (unlikely(hdev->bpf.destroyed))
++		return -ENODEV;
++
+ 	if (rtype >= HID_REPORT_TYPES)
+ 		return -EINVAL;
+ 
+@@ -130,6 +136,9 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev,
+ 	struct hid_bpf_ops *e;
+ 	int ret, idx;
+ 
++	if (unlikely(hdev->bpf.destroyed))
++		return -ENODEV;
++
+ 	idx = srcu_read_lock(&hdev->bpf.srcu);
+ 	list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
+ 				 srcu_read_lock_held(&hdev->bpf.srcu)) {
+diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
+index 3b81468a1df297..0bf70664c35ee1 100644
+--- a/drivers/hid/hid-thrustmaster.c
++++ b/drivers/hid/hid-thrustmaster.c
+@@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
+ 	u8 ep_addr[2] = {b_ep, 0};
+ 
+ 	if (!usb_check_int_endpoints(usbif, ep_addr)) {
++		kfree(send_buf);
+ 		hid_err(hdev, "Unexpected non-int endpoint\n");
+ 		return;
+ 	}
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index d8008933c052f5..321c43fb06ae06 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev,
+ 			suffix = "System Control";
+ 			break;
+ 		}
+-	}
+-
+-	if (suffix)
++	} else {
+ 		hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ 						 "%s %s", hdev->name, suffix);
++		if (!hi->input->name)
++			return -ENOMEM;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index fb8cd8469328ee..35f26fa1ffe76e 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -1077,68 +1077,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
+ EXPORT_SYMBOL(vmbus_sendpacket);
+ 
+ /*
+- * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
+- * packets using a GPADL Direct packet type. This interface allows you
+- * to control notifying the host. This will be useful for sending
+- * batched data. Also the sender can control the send flags
+- * explicitly.
+- */
+-int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+-				struct hv_page_buffer pagebuffers[],
+-				u32 pagecount, void *buffer, u32 bufferlen,
+-				u64 requestid)
+-{
+-	int i;
+-	struct vmbus_channel_packet_page_buffer desc;
+-	u32 descsize;
+-	u32 packetlen;
+-	u32 packetlen_aligned;
+-	struct kvec bufferlist[3];
+-	u64 aligned_data = 0;
+-
+-	if (pagecount > MAX_PAGE_BUFFER_COUNT)
+-		return -EINVAL;
+-
+-	/*
+-	 * Adjust the size down since vmbus_channel_packet_page_buffer is the
+-	 * largest size we support
+-	 */
+-	descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
+-			  ((MAX_PAGE_BUFFER_COUNT - pagecount) *
+-			  sizeof(struct hv_page_buffer));
+-	packetlen = descsize + bufferlen;
+-	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+-
+-	/* Setup the descriptor */
+-	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
+-	desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+-	desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
+-	desc.length8 = (u16)(packetlen_aligned >> 3);
+-	desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
+-	desc.reserved = 0;
+-	desc.rangecount = pagecount;
+-
+-	for (i = 0; i < pagecount; i++) {
+-		desc.range[i].len = pagebuffers[i].len;
+-		desc.range[i].offset = pagebuffers[i].offset;
+-		desc.range[i].pfn	 = pagebuffers[i].pfn;
+-	}
+-
+-	bufferlist[0].iov_base = &desc;
+-	bufferlist[0].iov_len = descsize;
+-	bufferlist[1].iov_base = buffer;
+-	bufferlist[1].iov_len = bufferlen;
+-	bufferlist[2].iov_base = &aligned_data;
+-	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
+-
+-	return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
+-}
+-EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
+-
+-/*
+- * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
++ * vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets
+  * using a GPADL Direct packet type.
+- * The buffer includes the vmbus descriptor.
++ * The desc argument must include space for the VMBus descriptor. The
++ * rangecount field must already be set.
+  */
+ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+ 			      struct vmbus_packet_mpb_array *desc,
+@@ -1160,7 +1102,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+ 	desc->length8 = (u16)(packetlen_aligned >> 3);
+ 	desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
+ 	desc->reserved = 0;
+-	desc->rangecount = 1;
+ 
+ 	bufferlist[0].iov_base = desc;
+ 	bufferlist[0].iov_len = desc_size;
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 52cb744b4d7fde..e4136cbaa4d406 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -485,4 +485,10 @@ static inline int hv_debug_add_dev_dir(struct hv_device *dev)
+ 
+ #endif /* CONFIG_HYPERV_TESTING */
+ 
++/* Create and remove sysfs entry for memory mapped ring buffers for a channel */
++int hv_create_ring_sysfs(struct vmbus_channel *channel,
++			 int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
++						    struct vm_area_struct *vma));
++int hv_remove_ring_sysfs(struct vmbus_channel *channel);
++
+ #endif /* _HYPERV_VMBUS_H */
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 2b6749c9712ef2..1f519e925f0601 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1792,6 +1792,27 @@ static ssize_t subchannel_id_show(struct vmbus_channel *channel,
+ }
+ static VMBUS_CHAN_ATTR_RO(subchannel_id);
+ 
++static int hv_mmap_ring_buffer_wrapper(struct file *filp, struct kobject *kobj,
++				       struct bin_attribute *attr,
++				       struct vm_area_struct *vma)
++{
++	struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj);
++
++	/*
++	 * hv_(create|remove)_ring_sysfs implementation ensures that mmap_ring_buffer
++	 * is not NULL.
++	 */
++	return channel->mmap_ring_buffer(channel, vma);
++}
++
++static struct bin_attribute chan_attr_ring_buffer = {
++	.attr = {
++		.name = "ring",
++		.mode = 0600,
++	},
++	.size = 2 * SZ_2M,
++	.mmap = hv_mmap_ring_buffer_wrapper,
++};
+ static struct attribute *vmbus_chan_attrs[] = {
+ 	&chan_attr_out_mask.attr,
+ 	&chan_attr_in_mask.attr,
+@@ -1811,6 +1832,11 @@ static struct attribute *vmbus_chan_attrs[] = {
+ 	NULL
+ };
+ 
++static struct bin_attribute *vmbus_chan_bin_attrs[] = {
++	&chan_attr_ring_buffer,
++	NULL
++};
++
+ /*
+  * Channel-level attribute_group callback function. Returns the permission for
+  * each attribute, and returns 0 if an attribute is not visible.
+@@ -1831,9 +1857,24 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
+ 	return attr->mode;
+ }
+ 
++static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj,
++					      struct bin_attribute *attr, int idx)
++{
++	const struct vmbus_channel *channel =
++		container_of(kobj, struct vmbus_channel, kobj);
++
++	/* Hide ring attribute if channel's ring_sysfs_visible is set to false */
++	if (attr ==  &chan_attr_ring_buffer && !channel->ring_sysfs_visible)
++		return 0;
++
++	return attr->attr.mode;
++}
++
+ static const struct attribute_group vmbus_chan_group = {
+ 	.attrs = vmbus_chan_attrs,
+-	.is_visible = vmbus_chan_attr_is_visible
++	.bin_attrs = vmbus_chan_bin_attrs,
++	.is_visible = vmbus_chan_attr_is_visible,
++	.is_bin_visible = vmbus_chan_bin_attr_is_visible,
+ };
+ 
+ static const struct kobj_type vmbus_chan_ktype = {
+@@ -1841,6 +1882,63 @@ static const struct kobj_type vmbus_chan_ktype = {
+ 	.release = vmbus_chan_release,
+ };
+ 
++/**
++ * hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel.
++ * @channel: Pointer to vmbus_channel structure
++ * @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of
++ *                       channel's "ring" sysfs node, which is for the ring buffer of that channel.
++ *                       Function pointer is of below type:
++ *                       int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
++ *                                                  struct vm_area_struct *vma))
++ *                       This has a pointer to the channel and a pointer to vm_area_struct,
++ *                       used for mmap, as arguments.
++ *
++ * Sysfs node for ring buffer of a channel is created along with other fields, however its
++ * visibility is disabled by default. Sysfs creation needs to be controlled when the use-case
++ * is running.
++ * For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
++ * time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
++ * exposing the ring buffer by default, this function is reponsible to enable visibility of
++ * ring for userspace to use.
++ * Note: Race conditions can happen with userspace and it is not encouraged to create new
++ * use-cases for this. This was added to maintain backward compatibility, while solving
++ * one of the race conditions in uio_hv_generic while creating sysfs.
++ *
++ * Returns 0 on success or error code on failure.
++ */
++int hv_create_ring_sysfs(struct vmbus_channel *channel,
++			 int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
++						    struct vm_area_struct *vma))
++{
++	struct kobject *kobj = &channel->kobj;
++
++	channel->mmap_ring_buffer = hv_mmap_ring_buffer;
++	channel->ring_sysfs_visible = true;
++
++	return sysfs_update_group(kobj, &vmbus_chan_group);
++}
++EXPORT_SYMBOL_GPL(hv_create_ring_sysfs);
++
++/**
++ * hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel.
++ * @channel: Pointer to vmbus_channel structure
++ *
++ * Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group.
++ *
++ * Returns 0 on success or error code on failure.
++ */
++int hv_remove_ring_sysfs(struct vmbus_channel *channel)
++{
++	struct kobject *kobj = &channel->kobj;
++	int ret;
++
++	channel->ring_sysfs_visible = false;
++	ret = sysfs_update_group(kobj, &vmbus_chan_group);
++	channel->mmap_ring_buffer = NULL;
++	return ret;
++}
++EXPORT_SYMBOL_GPL(hv_remove_ring_sysfs);
++
+ /*
+  * vmbus_add_channel_kobj - setup a sub-directory under device/channels
+  */
+diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
+index 7949b076fb87ea..d4c9b85135f585 100644
+--- a/drivers/iio/adc/ad7266.c
++++ b/drivers/iio/adc/ad7266.c
+@@ -45,7 +45,7 @@ struct ad7266_state {
+ 	 */
+ 	struct {
+ 		__be16 sample[2];
+-		s64 timestamp;
++		aligned_s64 timestamp;
+ 	} data __aligned(IIO_DMA_MINALIGN);
+ };
+ 
+diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
+index 157a0df97f971b..a9248a85466ea3 100644
+--- a/drivers/iio/adc/ad7768-1.c
++++ b/drivers/iio/adc/ad7768-1.c
+@@ -169,7 +169,7 @@ struct ad7768_state {
+ 	union {
+ 		struct {
+ 			__be32 chan;
+-			s64 timestamp;
++			aligned_s64 timestamp;
+ 		} scan;
+ 		__be32 d32;
+ 		u8 d8[2];
+diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
+index d0bd94912e0a34..e05ce1f12065c6 100644
+--- a/drivers/iio/chemical/pms7003.c
++++ b/drivers/iio/chemical/pms7003.c
+@@ -5,7 +5,6 @@
+  * Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
+  */
+ 
+-#include <linux/unaligned.h>
+ #include <linux/completion.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
+@@ -19,6 +18,8 @@
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/serdev.h>
++#include <linux/types.h>
++#include <linux/unaligned.h>
+ 
+ #define PMS7003_DRIVER_NAME "pms7003"
+ 
+@@ -76,7 +77,7 @@ struct pms7003_state {
+ 	/* Used to construct scan to push to the IIO buffer */
+ 	struct {
+ 		u16 data[3]; /* PM1, PM2P5, PM10 */
+-		s64 ts;
++		aligned_s64 ts;
+ 	} scan;
+ };
+ 
+diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
+index 814ce0aad1cccd..4085a36cd1db75 100644
+--- a/drivers/iio/chemical/sps30.c
++++ b/drivers/iio/chemical/sps30.c
+@@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
+ 	int ret;
+ 	struct {
+ 		s32 data[4]; /* PM1, PM2P5, PM4, PM10 */
+-		s64 ts;
++		aligned_s64 ts;
+ 	} scan;
+ 
+ 	mutex_lock(&state->lock);
+diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
+index 176e54bb48c33b..d5ca75b128836e 100644
+--- a/drivers/iio/light/opt3001.c
++++ b/drivers/iio/light/opt3001.c
+@@ -692,8 +692,9 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
+ 	struct opt3001 *opt = iio_priv(iio);
+ 	int ret;
+ 	bool wake_result_ready_queue = false;
++	bool ok_to_ignore_lock = opt->ok_to_ignore_lock;
+ 
+-	if (!opt->ok_to_ignore_lock)
++	if (!ok_to_ignore_lock)
+ 		mutex_lock(&opt->lock);
+ 
+ 	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+@@ -730,7 +731,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
+ 	}
+ 
+ out:
+-	if (!opt->ok_to_ignore_lock)
++	if (!ok_to_ignore_lock)
+ 		mutex_unlock(&opt->lock);
+ 
+ 	if (wake_result_ready_queue)
+diff --git a/drivers/iio/pressure/mprls0025pa.h b/drivers/iio/pressure/mprls0025pa.h
+index 9d5c30afa9d69a..d62a018eaff32b 100644
+--- a/drivers/iio/pressure/mprls0025pa.h
++++ b/drivers/iio/pressure/mprls0025pa.h
+@@ -34,16 +34,6 @@ struct iio_dev;
+ struct mpr_data;
+ struct mpr_ops;
+ 
+-/**
+- * struct mpr_chan
+- * @pres: pressure value
+- * @ts: timestamp
+- */
+-struct mpr_chan {
+-	s32 pres;
+-	s64 ts;
+-};
+-
+ enum mpr_func_id {
+ 	MPR_FUNCTION_A,
+ 	MPR_FUNCTION_B,
+@@ -69,6 +59,8 @@ enum mpr_func_id {
+  *       reading in a loop until data is ready
+  * @completion: handshake from irq to read
+  * @chan: channel values for buffered mode
++ * @chan.pres: pressure value
++ * @chan.ts: timestamp
+  * @buffer: raw conversion data
+  */
+ struct mpr_data {
+@@ -87,7 +79,10 @@ struct mpr_data {
+ 	struct gpio_desc	*gpiod_reset;
+ 	int			irq;
+ 	struct completion	completion;
+-	struct mpr_chan		chan;
++	struct {
++		s32 pres;
++		aligned_s64 ts;
++	} chan;
+ 	u8	    buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
+ };
+ 
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 46102f179955ba..df2aa15a5bc9bd 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -1368,6 +1368,9 @@ static void ib_device_notify_register(struct ib_device *device)
+ 
+ 	down_read(&devices_rwsem);
+ 
++	/* Mark for userspace that device is ready */
++	kobject_uevent(&device->dev.kobj, KOBJ_ADD);
++
+ 	ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
+ 	if (ret)
+ 		goto out;
+@@ -1484,10 +1487,9 @@ int ib_register_device(struct ib_device *device, const char *name,
+ 		return ret;
+ 	}
+ 	dev_set_uevent_suppress(&device->dev, false);
+-	/* Mark for userspace that device is ready */
+-	kobject_uevent(&device->dev.kobj, KOBJ_ADD);
+ 
+ 	ib_device_notify_register(device);
++
+ 	ib_device_put(device);
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
+index fec87c9030abdc..fffd144d509eb0 100644
+--- a/drivers/infiniband/sw/rxe/rxe_cq.c
++++ b/drivers/infiniband/sw/rxe/rxe_cq.c
+@@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ 
+ 	err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
+ 			   cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
+-	if (err) {
+-		vfree(cq->queue->buf);
+-		kfree(cq->queue);
++	if (err)
+ 		return err;
+-	}
+ 
+ 	cq->is_user = uresp;
+ 
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index e072d2b50c9876..0168ad495e6c90 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -326,6 +326,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
+ 	}
+ }
+ 
++static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
++{
++	u64 eap_conf;
++
++	if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
++		return;
++
++	b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
++
++	if (is63xx(dev)) {
++		eap_conf &= ~EAP_MODE_MASK_63XX;
++		eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
++	} else {
++		eap_conf &= ~EAP_MODE_MASK;
++		eap_conf |= (u64)mode << EAP_MODE_SHIFT;
++	}
++
++	b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
++}
++
+ static void b53_set_forwarding(struct b53_device *dev, int enable)
+ {
+ 	u8 mgmt;
+@@ -586,6 +606,13 @@ int b53_setup_port(struct dsa_switch *ds, int port)
+ 	b53_port_set_mcast_flood(dev, port, true);
+ 	b53_port_set_learning(dev, port, false);
+ 
++	/* Force all traffic to go to the CPU port to prevent the ASIC from
++	 * trying to forward to bridged ports on matching FDB entries, then
++	 * dropping frames because it isn't allowed to forward there.
++	 */
++	if (dsa_is_user_port(ds, port))
++		b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(b53_setup_port);
+@@ -2043,6 +2070,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ 		pvlan |= BIT(i);
+ 	}
+ 
++	/* Disable redirection of unknown SA to the CPU port */
++	b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
++
+ 	/* Configure the local port VLAN control membership to include
+ 	 * remote ports and update the local port bitmask
+ 	 */
+@@ -2078,6 +2108,9 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ 			pvlan &= ~BIT(i);
+ 	}
+ 
++	/* Enable redirection of unknown SA to the CPU port */
++	b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
++
+ 	b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ 	dev->ports[port].vlan_ctl_mask = pvlan;
+ 
+diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
+index bfbcb66bef6626..5f7a0e5c5709d3 100644
+--- a/drivers/net/dsa/b53/b53_regs.h
++++ b/drivers/net/dsa/b53/b53_regs.h
+@@ -50,6 +50,9 @@
+ /* Jumbo Frame Registers */
+ #define B53_JUMBO_PAGE			0x40
+ 
++/* EAP Registers */
++#define B53_EAP_PAGE			0x42
++
+ /* EEE Control Registers Page */
+ #define B53_EEE_PAGE			0x92
+ 
+@@ -480,6 +483,17 @@
+ #define   JMS_MIN_SIZE			1518
+ #define   JMS_MAX_SIZE			9724
+ 
++/*************************************************************************
++ * EAP Page Registers
++ *************************************************************************/
++#define B53_PORT_EAP_CONF(i)		(0x20 + 8 * (i))
++#define  EAP_MODE_SHIFT			51
++#define  EAP_MODE_SHIFT_63XX		50
++#define  EAP_MODE_MASK			(0x3ull << EAP_MODE_SHIFT)
++#define  EAP_MODE_MASK_63XX		(0x3ull << EAP_MODE_SHIFT_63XX)
++#define  EAP_MODE_BASIC			0
++#define  EAP_MODE_SIMPLIFIED		3
++
+ /*************************************************************************
+  * EEE Configuration Page Registers
+  *************************************************************************/
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index d0563ef59acf64..fbac2a647b20b2 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -2083,6 +2083,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ 	switch (state) {
+ 	case BR_STATE_DISABLED:
+ 	case BR_STATE_BLOCKING:
++	case BR_STATE_LISTENING:
+ 		/* From UM10944 description of DRPDTAG (why put this there?):
+ 		 * "Management traffic flows to the port regardless of the state
+ 		 * of the INGRESS flag". So BPDUs are still be allowed to pass.
+@@ -2092,11 +2093,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ 		mac[port].egress    = false;
+ 		mac[port].dyn_learn = false;
+ 		break;
+-	case BR_STATE_LISTENING:
+-		mac[port].ingress   = true;
+-		mac[port].egress    = false;
+-		mac[port].dyn_learn = false;
+-		break;
+ 	case BR_STATE_LEARNING:
+ 		mac[port].ingress   = true;
+ 		mac[port].egress    = false;
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 60847cdb516eef..ae100ed8ed6b92 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1016,22 +1016,15 @@ static void macb_update_stats(struct macb *bp)
+ 
+ static int macb_halt_tx(struct macb *bp)
+ {
+-	unsigned long	halt_time, timeout;
+-	u32		status;
++	u32 status;
+ 
+ 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
+ 
+-	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+-	do {
+-		halt_time = jiffies;
+-		status = macb_readl(bp, TSR);
+-		if (!(status & MACB_BIT(TGO)))
+-			return 0;
+-
+-		udelay(250);
+-	} while (time_before(halt_time, timeout));
+-
+-	return -ETIMEDOUT;
++	/* Poll TSR until TGO is cleared or timeout. */
++	return read_poll_timeout_atomic(macb_readl, status,
++					!(status & MACB_BIT(TGO)),
++					250, MACB_HALT_TIMEOUT, false,
++					bp, TSR);
+ }
+ 
+ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 44da335d66bda2..6a6efe2b2bc514 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -67,6 +67,8 @@
+ #define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE	(TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
+ #define TSNEP_TX_TYPE_XDP		(TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
+ #define TSNEP_TX_TYPE_XSK		BIT(12)
++#define TSNEP_TX_TYPE_TSTAMP		BIT(13)
++#define TSNEP_TX_TYPE_SKB_TSTAMP	(TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
+ 
+ #define TSNEP_XDP_TX		BIT(0)
+ #define TSNEP_XDP_REDIRECT	BIT(1)
+@@ -387,8 +389,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
+ 	if (entry->skb) {
+ 		entry->properties = length & TSNEP_DESC_LENGTH_MASK;
+ 		entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+-		if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+-		    (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
++		if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
+ 			entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
+ 
+ 		/* toggle user flag to prevent false acknowledge
+@@ -480,7 +481,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
+ 	return mapped;
+ }
+ 
+-static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
++static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
++			bool do_tstamp)
+ {
+ 	struct device *dmadev = tx->adapter->dmadev;
+ 	struct tsnep_tx_entry *entry;
+@@ -506,6 +508,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+ 				entry->type = TSNEP_TX_TYPE_SKB_INLINE;
+ 				mapped = 0;
+ 			}
++
++			if (do_tstamp)
++				entry->type |= TSNEP_TX_TYPE_TSTAMP;
+ 		} else {
+ 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+ 
+@@ -559,11 +564,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
+ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ 					 struct tsnep_tx *tx)
+ {
+-	int count = 1;
+ 	struct tsnep_tx_entry *entry;
++	bool do_tstamp = false;
++	int count = 1;
+ 	int length;
+-	int i;
+ 	int retval;
++	int i;
+ 
+ 	if (skb_shinfo(skb)->nr_frags > 0)
+ 		count += skb_shinfo(skb)->nr_frags;
+@@ -580,7 +586,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ 	entry = &tx->entry[tx->write];
+ 	entry->skb = skb;
+ 
+-	retval = tsnep_tx_map(skb, tx, count);
++	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
++	    tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
++		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
++		do_tstamp = true;
++	}
++
++	retval = tsnep_tx_map(skb, tx, count, do_tstamp);
+ 	if (retval < 0) {
+ 		tsnep_tx_unmap(tx, tx->write, count);
+ 		dev_kfree_skb_any(entry->skb);
+@@ -592,9 +604,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ 	}
+ 	length = retval;
+ 
+-	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+-		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+-
+ 	for (i = 0; i < count; i++)
+ 		tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
+ 				  i == count - 1);
+@@ -845,8 +854,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+ 
+ 		length = tsnep_tx_unmap(tx, tx->read, count);
+ 
+-		if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+-		    (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
++		if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
+ 		    (__le32_to_cpu(entry->desc_wb->properties) &
+ 		     TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
+ 			struct skb_shared_hwtstamps hwtstamps;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 8216f843a7cd5f..e43c4608d3ba33 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -707,6 +707,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
+ 
+ 	if (!is_lmac_valid(cgx, lmac_id))
+ 		return -ENODEV;
++
++	/* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
++	if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
++		lmac_id = 0;
++
+ 	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+index 6cc7a78968fc1c..74953f67a2bf9c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+@@ -533,7 +533,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+ 	if (sw_tx_sc->encrypt)
+ 		sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+ 
+-	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
++	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
++			    pfvf->netdev->mtu + OTX2_ETH_HLEN);
+ 	/* Write SecTag excluding AN bits(1..0) */
+ 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+ 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index f27a3456ae64fe..5b45fd78d2825e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -364,6 +364,7 @@ struct otx2_flow_config {
+ 	struct list_head	flow_list_tc;
+ 	u8			ucast_flt_cnt;
+ 	bool			ntuple;
++	u16			ntuple_cnt;
+ };
+ 
+ struct dev_hw_ops {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+index 53f14aa944bdba..aaea19345750ed 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+@@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
+ 	if (!pfvf->flow_cfg)
+ 		return 0;
+ 
++	pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
+ 	otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 58720a161ee24a..2750326bfcf8b1 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -252,7 +252,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+ 	mutex_unlock(&pfvf->mbox.lock);
+ 
+ 	/* Allocate entries for Ntuple filters */
+-	count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
++	count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
+ 	if (count <= 0) {
+ 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ 		return 0;
+@@ -312,6 +312,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
+ 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
+ 
+ 	pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
++	pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
+ 
+ 	/* Allocate bare minimum number of MCAM entries needed for
+ 	 * unicast and ntuple filters.
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 0a13f7c4684e0e..272f178906d61f 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -4685,7 +4685,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+ 	}
+ 
+ 	if (mtk_is_netsys_v3_or_greater(mac->hw) &&
+-	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
++	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
+ 	    id == MTK_GMAC1_ID) {
+ 		mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ 						       MAC_SYM_PAUSE |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 1c087fa1ca269b..3e9ad3cb8121df 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4344,6 +4344,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
+ 	if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ 		netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+ 
++	features &= ~NETIF_F_HW_MACSEC;
++	if (netdev->features & NETIF_F_HW_MACSEC)
++		netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
++
+ 	return features;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 7d6d859cef3f9f..511cd92e0e3e7c 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
+ 		.rif = rif,
+ 	};
+ 
++	if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
++		return 0;
++
+ 	neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
+ 	if (rms.err)
+ 		goto err_arp;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 99df00c30b8c6c..b5d744d2586f72 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
+ };
+ 
+ static struct qed_eth_cb_ops qede_ll_ops = {
+-	{
++	.common = {
+ #ifdef CONFIG_RFS_ACCEL
+ 		.arfs_filter_op = qede_arfs_filter_op,
+ #endif
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+index 28d24d59efb84f..d57b976b904095 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+@@ -1484,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
+ 	}
+ 
+ 	cmd_op = (cmd.rsp.arg[0] & 0xff);
+-	if (cmd.rsp.arg[0] >> 25 == 2)
+-		return 2;
++	if (cmd.rsp.arg[0] >> 25 == 2) {
++		ret = 2;
++		goto out;
++	}
++
+ 	if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ 		set_bit(QLC_BC_VF_STATE, &vf->state);
+ 	else
+diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
+index e690b95b1bbb4c..a4963766fd996b 100644
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -158,7 +158,6 @@ struct hv_netvsc_packet {
+ 	u8 cp_partial; /* partial copy into send buffer */
+ 
+ 	u8 rmsg_size; /* RNDIS header and PPI size */
+-	u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
+ 	u8 page_buf_cnt;
+ 
+ 	u16 q_idx;
+@@ -893,6 +892,18 @@ struct nvsp_message {
+ 				 sizeof(struct nvsp_message))
+ #define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+ 
++/* Maximum # of contiguous data ranges that can make up a trasmitted packet.
++ * Typically it's the max SKB fragments plus 2 for the rndis packet and the
++ * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may
++ * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries
++ * in a GPA direct packet sent to netvsp over VMBus.
++ */
++#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT
++#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2)
++#else
++#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT
++#endif
++
+ /* Estimated requestor size:
+  * out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
+  */
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 2b6ec979a62f21..807465dd4c8e34 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -947,8 +947,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ 		     + pend_size;
+ 	int i;
+ 	u32 padding = 0;
+-	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
+-		packet->page_buf_cnt;
++	u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt;
+ 	u32 remain;
+ 
+ 	/* Add padding */
+@@ -1049,6 +1048,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
+ 	return 0;
+ }
+ 
++/* Build an "array" of mpb entries describing the data to be transferred
++ * over VMBus. After the desc header fields, each "array" entry is variable
++ * size, and each entry starts after the end of the previous entry. The
++ * "offset" and "len" fields for each entry imply the size of the entry.
++ *
++ * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V
++ * uses that granularity, even if the system page size of the guest is larger.
++ * Each entry in the input "pb" array must describe a contiguous range of
++ * guest physical memory so that the pfns are sequential if the range crosses
++ * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE.
++ */
++static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb,
++				u32 page_buffer_count,
++				struct vmbus_packet_mpb_array *desc,
++				u32 *desc_size)
++{
++	struct hv_mpb_array *mpb_entry = &desc->range;
++	int i, j;
++
++	for (i = 0; i < page_buffer_count; i++) {
++		u32 offset = pb[i].offset;
++		u32 len = pb[i].len;
++
++		mpb_entry->offset = offset;
++		mpb_entry->len = len;
++
++		for (j = 0; j < HVPFN_UP(offset + len); j++)
++			mpb_entry->pfn_array[j] = pb[i].pfn + j;
++
++		mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j];
++	}
++
++	desc->rangecount = page_buffer_count;
++	*desc_size = (char *)mpb_entry - (char *)desc;
++}
++
+ static inline int netvsc_send_pkt(
+ 	struct hv_device *device,
+ 	struct hv_netvsc_packet *packet,
+@@ -1091,8 +1126,11 @@ static inline int netvsc_send_pkt(
+ 
+ 	packet->dma_range = NULL;
+ 	if (packet->page_buf_cnt) {
++		struct vmbus_channel_packet_page_buffer desc;
++		u32 desc_size;
++
+ 		if (packet->cp_partial)
+-			pb += packet->rmsg_pgcnt;
++			pb++;
+ 
+ 		ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
+ 		if (ret) {
+@@ -1100,11 +1138,12 @@ static inline int netvsc_send_pkt(
+ 			goto exit;
+ 		}
+ 
+-		ret = vmbus_sendpacket_pagebuffer(out_channel,
+-						  pb, packet->page_buf_cnt,
+-						  &nvmsg, sizeof(nvmsg),
+-						  req_id);
+-
++		netvsc_build_mpb_array(pb, packet->page_buf_cnt,
++				(struct vmbus_packet_mpb_array *)&desc,
++				 &desc_size);
++		ret = vmbus_sendpacket_mpb_desc(out_channel,
++				(struct vmbus_packet_mpb_array *)&desc,
++				desc_size, &nvmsg, sizeof(nvmsg), req_id);
+ 		if (ret)
+ 			netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
+ 	} else {
+@@ -1253,7 +1292,7 @@ int netvsc_send(struct net_device *ndev,
+ 		packet->send_buf_index = section_index;
+ 
+ 		if (packet->cp_partial) {
+-			packet->page_buf_cnt -= packet->rmsg_pgcnt;
++			packet->page_buf_cnt--;
+ 			packet->total_data_buflen = msd_len + packet->rmsg_size;
+ 		} else {
+ 			packet->page_buf_cnt = 0;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 23180f7b67b6aa..8ec497023224a4 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -325,43 +325,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ 	return txq;
+ }
+ 
+-static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
+-		       struct hv_page_buffer *pb)
+-{
+-	int j = 0;
+-
+-	hvpfn += offset >> HV_HYP_PAGE_SHIFT;
+-	offset = offset & ~HV_HYP_PAGE_MASK;
+-
+-	while (len > 0) {
+-		unsigned long bytes;
+-
+-		bytes = HV_HYP_PAGE_SIZE - offset;
+-		if (bytes > len)
+-			bytes = len;
+-		pb[j].pfn = hvpfn;
+-		pb[j].offset = offset;
+-		pb[j].len = bytes;
+-
+-		offset += bytes;
+-		len -= bytes;
+-
+-		if (offset == HV_HYP_PAGE_SIZE && len) {
+-			hvpfn++;
+-			offset = 0;
+-			j++;
+-		}
+-	}
+-
+-	return j + 1;
+-}
+-
+ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
+ 			   struct hv_netvsc_packet *packet,
+ 			   struct hv_page_buffer *pb)
+ {
+-	u32 slots_used = 0;
+-	char *data = skb->data;
+ 	int frags = skb_shinfo(skb)->nr_frags;
+ 	int i;
+ 
+@@ -370,28 +337,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
+ 	 * 2. skb linear data
+ 	 * 3. skb fragment data
+ 	 */
+-	slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
+-				  offset_in_hvpage(hdr),
+-				  len,
+-				  &pb[slots_used]);
+ 
++	pb[0].offset = offset_in_hvpage(hdr);
++	pb[0].len = len;
++	pb[0].pfn = virt_to_hvpfn(hdr);
+ 	packet->rmsg_size = len;
+-	packet->rmsg_pgcnt = slots_used;
+ 
+-	slots_used += fill_pg_buf(virt_to_hvpfn(data),
+-				  offset_in_hvpage(data),
+-				  skb_headlen(skb),
+-				  &pb[slots_used]);
++	pb[1].offset = offset_in_hvpage(skb->data);
++	pb[1].len = skb_headlen(skb);
++	pb[1].pfn = virt_to_hvpfn(skb->data);
+ 
+ 	for (i = 0; i < frags; i++) {
+ 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
++		struct hv_page_buffer *cur_pb = &pb[i + 2];
++		u64 pfn = page_to_hvpfn(skb_frag_page(frag));
++		u32 offset = skb_frag_off(frag);
+ 
+-		slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
+-					  skb_frag_off(frag),
+-					  skb_frag_size(frag),
+-					  &pb[slots_used]);
++		cur_pb->offset = offset_in_hvpage(offset);
++		cur_pb->len = skb_frag_size(frag);
++		cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
+ 	}
+-	return slots_used;
++	return frags + 2;
+ }
+ 
+ static int count_skb_frag_slots(struct sk_buff *skb)
+@@ -482,7 +448,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
+ 	struct net_device *vf_netdev;
+ 	u32 rndis_msg_size;
+ 	u32 hash;
+-	struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
++	struct hv_page_buffer pb[MAX_DATA_RANGES];
+ 
+ 	/* If VF is present and up then redirect packets to it.
+ 	 * Skip the VF if it is marked down or has no carrier.
+diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
+index ecc2128ca9b72a..e457f809fe3110 100644
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
+ 				  struct rndis_request *req)
+ {
+ 	struct hv_netvsc_packet *packet;
+-	struct hv_page_buffer page_buf[2];
+-	struct hv_page_buffer *pb = page_buf;
++	struct hv_page_buffer pb;
+ 	int ret;
+ 
+ 	/* Setup the packet to send it */
+@@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev,
+ 	packet->total_data_buflen = req->request_msg.msg_len;
+ 	packet->page_buf_cnt = 1;
+ 
+-	pb[0].pfn = virt_to_phys(&req->request_msg) >>
+-					HV_HYP_PAGE_SHIFT;
+-	pb[0].len = req->request_msg.msg_len;
+-	pb[0].offset = offset_in_hvpage(&req->request_msg);
+-
+-	/* Add one page_buf when request_msg crossing page boundary */
+-	if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
+-		packet->page_buf_cnt++;
+-		pb[0].len = HV_HYP_PAGE_SIZE -
+-			pb[0].offset;
+-		pb[1].pfn = virt_to_phys((void *)&req->request_msg
+-			+ pb[0].len) >> HV_HYP_PAGE_SHIFT;
+-		pb[1].offset = 0;
+-		pb[1].len = req->request_msg.msg_len -
+-			pb[0].len;
+-	}
++	pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
++	pb.len = req->request_msg.msg_len;
++	pb.offset = offset_in_hvpage(&req->request_msg);
+ 
+ 	trace_rndis_send(dev->ndev, 0, &req->request_msg);
+ 
+ 	rcu_read_lock_bh();
+-	ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
++	ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
+ 	rcu_read_unlock_bh();
+ 
+ 	return ret;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index fbd1150c33cce7..6d36cb204f9bc5 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -5547,7 +5547,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
+ 
+ 	virtnet_rx_pause(vi, rq);
+ 
+-	err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
++	err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
+ 	if (err) {
+ 		netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
+ 
+@@ -5576,7 +5576,8 @@ static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
+ 
+ 	virtnet_tx_pause(vi, sq);
+ 
+-	err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
++	err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf,
++			      virtnet_sq_free_unused_buf_done);
+ 	if (err) {
+ 		netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
+ 		pool = NULL;
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 5f46d6daeaa7c5..8940f8bb7bb509 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -999,6 +999,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
+ 	int i;
+ 
+ 	mt76_worker_disable(&dev->tx_worker);
++	napi_disable(&dev->tx_napi);
+ 	netif_napi_del(&dev->tx_napi);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 83ee433b694150..265b3608ae26ef 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -390,7 +390,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
+  * as it only leads to a small amount of wasted memory for the lifetime of
+  * the I/O.
+  */
+-static int nvme_pci_npages_prp(void)
++static __always_inline int nvme_pci_npages_prp(void)
+ {
+ 	unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
+ 	unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
+@@ -1202,7 +1202,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
+ 	WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
+ 
+ 	disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
++	spin_lock(&nvmeq->cq_poll_lock);
+ 	nvme_poll_cq(nvmeq, NULL);
++	spin_unlock(&nvmeq->cq_poll_lock);
+ 	enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ }
+ 
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 58e1233051526a..513fd35dcaa959 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -107,7 +107,6 @@ struct rcar_gen3_phy {
+ 	struct rcar_gen3_chan *ch;
+ 	u32 int_enable_bits;
+ 	bool initialized;
+-	bool otg_initialized;
+ 	bool powered;
+ };
+ 
+@@ -320,16 +319,15 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch)
+ 	return false;
+ }
+ 
+-static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch)
++static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch)
+ {
+-	int i;
+-
+-	for (i = 0; i < NUM_OF_PHYS; i++) {
+-		if (ch->rphys[i].otg_initialized)
+-			return false;
++	for (enum rcar_gen3_phy_index i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI;
++	     i++) {
++		if (ch->rphys[i].initialized)
++			return true;
+ 	}
+ 
+-	return true;
++	return false;
+ }
+ 
+ static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch)
+@@ -351,7 +349,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ 	bool is_b_device;
+ 	enum phy_mode cur_mode, new_mode;
+ 
+-	if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
++	if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ 		return -EIO;
+ 
+ 	if (sysfs_streq(buf, "host"))
+@@ -389,7 +387,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
+ {
+ 	struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+ 
+-	if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
++	if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ 		return -EIO;
+ 
+ 	return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
+@@ -402,6 +400,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
+ 	void __iomem *usb2_base = ch->base;
+ 	u32 val;
+ 
++	if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch))
++		return;
++
+ 	/* Should not use functions of read-modify-write a register */
+ 	val = readl(usb2_base + USB2_LINECTRL1);
+ 	val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN |
+@@ -462,16 +463,16 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ 	val = readl(usb2_base + USB2_INT_ENABLE);
+ 	val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits;
+ 	writel(val, usb2_base + USB2_INT_ENABLE);
+-	writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
+-	writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
+-
+-	/* Initialize otg part */
+-	if (channel->is_otg_channel) {
+-		if (rcar_gen3_needs_init_otg(channel))
+-			rcar_gen3_init_otg(channel);
+-		rphy->otg_initialized = true;
++
++	if (!rcar_gen3_is_any_rphy_initialized(channel)) {
++		writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
++		writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
+ 	}
+ 
++	/* Initialize otg part (only if we initialize a PHY with IRQs). */
++	if (rphy->int_enable_bits)
++		rcar_gen3_init_otg(channel);
++
+ 	rphy->initialized = true;
+ 
+ 	return 0;
+@@ -486,9 +487,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+ 
+ 	rphy->initialized = false;
+ 
+-	if (channel->is_otg_channel)
+-		rphy->otg_initialized = false;
+-
+ 	val = readl(usb2_base + USB2_INT_ENABLE);
+ 	val &= ~rphy->int_enable_bits;
+ 	if (!rcar_gen3_is_any_rphy_initialized(channel))
+diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
+index fae6242aa730e0..23a23f2d64e586 100644
+--- a/drivers/phy/tegra/xusb-tegra186.c
++++ b/drivers/phy/tegra/xusb-tegra186.c
+@@ -237,6 +237,8 @@
+ #define   DATA0_VAL_PD				BIT(1)
+ #define   USE_XUSB_AO				BIT(4)
+ 
++#define TEGRA_UTMI_PAD_MAX 4
++
+ #define TEGRA186_LANE(_name, _offset, _shift, _mask, _type)		\
+ 	{								\
+ 		.name = _name,						\
+@@ -269,7 +271,7 @@ struct tegra186_xusb_padctl {
+ 
+ 	/* UTMI bias and tracking */
+ 	struct clk *usb2_trk_clk;
+-	unsigned int bias_pad_enable;
++	DECLARE_BITMAP(utmi_pad_enabled, TEGRA_UTMI_PAD_MAX);
+ 
+ 	/* padctl context */
+ 	struct tegra186_xusb_padctl_context context;
+@@ -603,12 +605,8 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
+ 	u32 value;
+ 	int err;
+ 
+-	mutex_lock(&padctl->lock);
+-
+-	if (priv->bias_pad_enable++ > 0) {
+-		mutex_unlock(&padctl->lock);
++	if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX))
+ 		return;
+-	}
+ 
+ 	err = clk_prepare_enable(priv->usb2_trk_clk);
+ 	if (err < 0)
+@@ -658,8 +656,6 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
+ 	} else {
+ 		clk_disable_unprepare(priv->usb2_trk_clk);
+ 	}
+-
+-	mutex_unlock(&padctl->lock);
+ }
+ 
+ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
+@@ -667,17 +663,8 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
+ 	struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ 	u32 value;
+ 
+-	mutex_lock(&padctl->lock);
+-
+-	if (WARN_ON(priv->bias_pad_enable == 0)) {
+-		mutex_unlock(&padctl->lock);
+-		return;
+-	}
+-
+-	if (--priv->bias_pad_enable > 0) {
+-		mutex_unlock(&padctl->lock);
++	if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX))
+ 		return;
+-	}
+ 
+ 	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ 	value |= USB2_PD_TRK;
+@@ -690,13 +677,13 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
+ 		clk_disable_unprepare(priv->usb2_trk_clk);
+ 	}
+ 
+-	mutex_unlock(&padctl->lock);
+ }
+ 
+ static void tegra186_utmi_pad_power_on(struct phy *phy)
+ {
+ 	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ 	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
++	struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ 	struct tegra_xusb_usb2_port *port;
+ 	struct device *dev = padctl->dev;
+ 	unsigned int index = lane->index;
+@@ -705,9 +692,16 @@ static void tegra186_utmi_pad_power_on(struct phy *phy)
+ 	if (!phy)
+ 		return;
+ 
++	mutex_lock(&padctl->lock);
++	if (test_bit(index, priv->utmi_pad_enabled)) {
++		mutex_unlock(&padctl->lock);
++		return;
++	}
++
+ 	port = tegra_xusb_find_usb2_port(padctl, index);
+ 	if (!port) {
+ 		dev_err(dev, "no port found for USB2 lane %u\n", index);
++		mutex_unlock(&padctl->lock);
+ 		return;
+ 	}
+ 
+@@ -724,18 +718,28 @@ static void tegra186_utmi_pad_power_on(struct phy *phy)
+ 	value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+ 	value &= ~USB2_OTG_PD_DR;
+ 	padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
++
++	set_bit(index, priv->utmi_pad_enabled);
++	mutex_unlock(&padctl->lock);
+ }
+ 
+ static void tegra186_utmi_pad_power_down(struct phy *phy)
+ {
+ 	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ 	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
++	struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ 	unsigned int index = lane->index;
+ 	u32 value;
+ 
+ 	if (!phy)
+ 		return;
+ 
++	mutex_lock(&padctl->lock);
++	if (!test_bit(index, priv->utmi_pad_enabled)) {
++		mutex_unlock(&padctl->lock);
++		return;
++	}
++
+ 	dev_dbg(padctl->dev, "power down UTMI pad %u\n", index);
+ 
+ 	value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+@@ -748,7 +752,11 @@ static void tegra186_utmi_pad_power_down(struct phy *phy)
+ 
+ 	udelay(2);
+ 
++	clear_bit(index, priv->utmi_pad_enabled);
++
+ 	tegra186_utmi_bias_pad_power_off(padctl);
++
++	mutex_unlock(&padctl->lock);
+ }
+ 
+ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index 342f5ccf611d87..c758145e912df4 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -548,16 +548,16 @@ static int tegra_xusb_port_init(struct tegra_xusb_port *port,
+ 
+ 	err = dev_set_name(&port->dev, "%s-%u", name, index);
+ 	if (err < 0)
+-		goto unregister;
++		goto put_device;
+ 
+ 	err = device_add(&port->dev);
+ 	if (err < 0)
+-		goto unregister;
++		goto put_device;
+ 
+ 	return 0;
+ 
+-unregister:
+-	device_unregister(&port->dev);
++put_device:
++	put_device(&port->dev);
+ 	return err;
+ }
+ 
+diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+index b4f49720c87f62..2e3f6fc67c568d 100644
+--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+@@ -217,6 +217,13 @@ static const struct dmi_system_id fwbug_list[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
+ 		}
+ 	},
++	{
++		.ident = "MECHREVO Wujie 14X (GX4HRXL)",
++		.driver_data = &quirk_spurious_8042,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
+index cb5abab2210a7b..b6bcc1d57f9682 100644
+--- a/drivers/platform/x86/amd/pmf/tee-if.c
++++ b/drivers/platform/x86/amd/pmf/tee-if.c
+@@ -334,6 +334,11 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
+ 	return 0;
+ }
+ 
++static inline bool amd_pmf_pb_valid(struct amd_pmf_dev *dev)
++{
++	return memchr_inv(dev->policy_buf, 0xff, dev->policy_sz);
++}
++
+ #ifdef CONFIG_AMD_PMF_DEBUG
+ static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev)
+ {
+@@ -361,12 +366,22 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
+ 	dev->policy_buf = new_policy_buf;
+ 	dev->policy_sz = length;
+ 
++	if (!amd_pmf_pb_valid(dev)) {
++		ret = -EINVAL;
++		goto cleanup;
++	}
++
+ 	amd_pmf_hex_dump_pb(dev);
+ 	ret = amd_pmf_start_policy_engine(dev);
+ 	if (ret < 0)
+-		return ret;
++		goto cleanup;
+ 
+ 	return length;
++
++cleanup:
++	kfree(dev->policy_buf);
++	dev->policy_buf = NULL;
++	return ret;
+ }
+ 
+ static const struct file_operations pb_fops = {
+@@ -528,6 +543,12 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
+ 
+ 	memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
+ 
++	if (!amd_pmf_pb_valid(dev)) {
++		dev_info(dev->dev, "No Smart PC policy present\n");
++		ret = -EINVAL;
++		goto err_free_policy;
++	}
++
+ 	amd_pmf_hex_dump_pb(dev);
+ 
+ 	dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 1101e5b2488e52..a1cff9ff35a929 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -4795,7 +4795,8 @@ static int asus_wmi_add(struct platform_device *pdev)
+ 		goto fail_leds;
+ 
+ 	asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+-	if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
++	if ((result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) ==
++	    (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+ 		asus->driver->wlan_ctrl_by_user = 1;
+ 
+ 	if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
+diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
+index 59eb23d467ec05..198d45f8e88493 100644
+--- a/drivers/regulator/max20086-regulator.c
++++ b/drivers/regulator/max20086-regulator.c
+@@ -132,7 +132,7 @@ static int max20086_regulators_register(struct max20086 *chip)
+ 
+ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
+ {
+-	struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { };
++	struct of_regulator_match *matches;
+ 	struct device_node *node;
+ 	unsigned int i;
+ 	int ret;
+@@ -143,6 +143,11 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
+ 		return -ENODEV;
+ 	}
+ 
++	matches = devm_kcalloc(chip->dev, chip->info->num_outputs,
++			       sizeof(*matches), GFP_KERNEL);
++	if (!matches)
++		return -ENOMEM;
++
+ 	for (i = 0; i < chip->info->num_outputs; ++i)
+ 		matches[i].name = max20086_output_names[i];
+ 
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index 6ab27f4f487844..b8d42098f0b68e 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -169,6 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
+ 					unsigned int nr_zones, size_t *buflen)
+ {
+ 	struct request_queue *q = sdkp->disk->queue;
++	unsigned int max_segments;
+ 	size_t bufsize;
+ 	void *buf;
+ 
+@@ -180,12 +181,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
+ 	 * Furthermore, since the report zone command cannot be split, make
+ 	 * sure that the allocated buffer can always be mapped by limiting the
+ 	 * number of pages allocated to the HBA max segments limit.
++	 * Since max segments can be larger than the max inline bio vectors,
++	 * further limit the allocated buffer to BIO_MAX_INLINE_VECS.
+ 	 */
+ 	nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
+ 	bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
+ 	bufsize = min_t(size_t, bufsize,
+ 			queue_max_hw_sectors(q) << SECTOR_SHIFT);
+-	bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
++	max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q));
++	bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT);
+ 
+ 	while (bufsize >= SECTOR_SIZE) {
+ 		buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index b8186feccdf5aa..48b0ca92b44fb3 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1819,6 +1819,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 				return SCSI_MLQUEUE_DEVICE_BUSY;
+ 		}
+ 
++		payload->rangecount = 1;
+ 		payload->range.len = length;
+ 		payload->range.offset = offset_in_hvpg;
+ 
+diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
+index 31a878d9458d95..7740f94847a883 100644
+--- a/drivers/spi/spi-loopback-test.c
++++ b/drivers/spi/spi-loopback-test.c
+@@ -420,7 +420,7 @@ MODULE_LICENSE("GPL");
+ static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len)
+ {
+ 	/* limit the hex_dump */
+-	if (len < 1024) {
++	if (len <= 1024) {
+ 		print_hex_dump(KERN_INFO, pre,
+ 			       DUMP_PREFIX_OFFSET, 16, 1,
+ 			       ptr, len, 0);
+diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
+index 5c7eb020943b8c..11db703a0dde3c 100644
+--- a/drivers/spi/spi-tegra114.c
++++ b/drivers/spi/spi-tegra114.c
+@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
+ 	u32 inactive_cycles;
+ 	u8 cs_state;
+ 
+-	if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) ||
+-	    (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) ||
+-	    (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) {
++	if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) ||
++	    (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) ||
++	    (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ 		dev_err(&spi->dev,
+ 			"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
+ 			SPI_DELAY_UNIT_SCK);
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index 8704095994118c..c2759bbeed8491 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -131,15 +131,12 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
+ 	vmbus_device_unregister(channel->device_obj);
+ }
+ 
+-/* Sysfs API to allow mmap of the ring buffers
++/* Function used for mmap of ring buffer sysfs interface.
+  * The ring buffer is allocated as contiguous memory by vmbus_open
+  */
+-static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
+-			    struct bin_attribute *attr,
+-			    struct vm_area_struct *vma)
++static int
++hv_uio_ring_mmap(struct vmbus_channel *channel, struct vm_area_struct *vma)
+ {
+-	struct vmbus_channel *channel
+-		= container_of(kobj, struct vmbus_channel, kobj);
+ 	void *ring_buffer = page_address(channel->ringbuffer_page);
+ 
+ 	if (channel->state != CHANNEL_OPENED_STATE)
+@@ -149,15 +146,6 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
+ 			       channel->ringbuffer_pagecount << PAGE_SHIFT);
+ }
+ 
+-static const struct bin_attribute ring_buffer_bin_attr = {
+-	.attr = {
+-		.name = "ring",
+-		.mode = 0600,
+-	},
+-	.size = 2 * SZ_2M,
+-	.mmap = hv_uio_ring_mmap,
+-};
+-
+ /* Callback from VMBUS subsystem when new channel created. */
+ static void
+ hv_uio_new_channel(struct vmbus_channel *new_sc)
+@@ -178,8 +166,7 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
+ 	/* Disable interrupts on sub channel */
+ 	new_sc->inbound.ring_buffer->interrupt_mask = 1;
+ 	set_channel_read_mode(new_sc, HV_CALL_ISR);
+-
+-	ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr);
++	ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap);
+ 	if (ret) {
+ 		dev_err(device, "sysfs create ring bin file failed; %d\n", ret);
+ 		vmbus_close(new_sc);
+@@ -350,10 +337,18 @@ hv_uio_probe(struct hv_device *dev,
+ 		goto fail_close;
+ 	}
+ 
+-	ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
+-	if (ret)
+-		dev_notice(&dev->device,
+-			   "sysfs create ring bin file failed; %d\n", ret);
++	/*
++	 * This internally calls sysfs_update_group, which returns a non-zero value if it executes
++	 * before sysfs_create_group. This is expected as the 'ring' will be created later in
++	 * vmbus_device_register() -> vmbus_add_channel_kobj(). Thus, no need to check the return
++	 * value and print warning.
++	 *
++	 * Creating/exposing sysfs in driver probe is not encouraged as it can lead to race
++	 * conditions with userspace. For backward compatibility, "ring" sysfs could not be removed
++	 * or decoupled from uio_hv_generic probe. Userspace programs can make use of inotify
++	 * APIs to make sure that ring is created.
++	 */
++	hv_create_ring_sysfs(channel, hv_uio_ring_mmap);
+ 
+ 	hv_set_drvdata(dev, pdata);
+ 
+@@ -375,7 +370,7 @@ hv_uio_remove(struct hv_device *dev)
+ 	if (!pdata)
+ 		return;
+ 
+-	sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
++	hv_remove_ring_sysfs(dev->channel);
+ 	uio_unregister_device(&pdata->info);
+ 	hv_uio_cleanup(dev, pdata);
+ 
+diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
+index 8c9d0074db588b..0c45936f51b3d7 100644
+--- a/drivers/usb/gadget/function/f_midi2.c
++++ b/drivers/usb/gadget/function/f_midi2.c
+@@ -475,7 +475,7 @@ static void reply_ump_stream_ep_info(struct f_midi2_ep *ep)
+ /* reply a UMP EP device info */
+ static void reply_ump_stream_ep_device(struct f_midi2_ep *ep)
+ {
+-	struct snd_ump_stream_msg_devince_info rep = {
++	struct snd_ump_stream_msg_device_info rep = {
+ 		.type = UMP_MSG_TYPE_STREAM,
+ 		.status = UMP_STREAM_MSG_STATUS_DEVICE_INFO,
+ 		.manufacture_id = ep->info.manufacturer,
+diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
+index 241d7aa1fbc20f..d35f3a18dd13b0 100644
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -822,6 +822,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+ {
+ 	dma_addr_t		deq;
+ 	union xhci_trb		*evt;
++	enum evtreturn		ret = EVT_DONE;
+ 	u32			ctrl, portsc;
+ 	bool			update_erdp = false;
+ 
+@@ -906,6 +907,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+ 			break;
+ 		case TRB_TYPE(TRB_TRANSFER):
+ 			dbc_handle_xfer_event(dbc, evt);
++			ret = EVT_XFER_DONE;
+ 			break;
+ 		default:
+ 			break;
+@@ -924,7 +926,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+ 		lo_hi_writeq(deq, &dbc->regs->erdp);
+ 	}
+ 
+-	return EVT_DONE;
++	return ret;
+ }
+ 
+ static void xhci_dbc_handle_events(struct work_struct *work)
+@@ -933,6 +935,7 @@ static void xhci_dbc_handle_events(struct work_struct *work)
+ 	struct xhci_dbc		*dbc;
+ 	unsigned long		flags;
+ 	unsigned int		poll_interval;
++	unsigned long		busypoll_timelimit;
+ 
+ 	dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
+ 	poll_interval = dbc->poll_interval;
+@@ -951,10 +954,20 @@ static void xhci_dbc_handle_events(struct work_struct *work)
+ 			dbc->driver->disconnect(dbc);
+ 		break;
+ 	case EVT_DONE:
+-		/* set fast poll rate if there are pending data transfers */
++		/*
++		 * Set fast poll rate if there are pending out transfers, or
++		 * a transfer was recently processed
++		 */
++		busypoll_timelimit = dbc->xfer_timestamp +
++			msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT);
++
+ 		if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
+-		    !list_empty(&dbc->eps[BULK_IN].list_pending))
+-			poll_interval = 1;
++		    time_is_after_jiffies(busypoll_timelimit))
++			poll_interval = 0;
++		break;
++	case EVT_XFER_DONE:
++		dbc->xfer_timestamp = jiffies;
++		poll_interval = 0;
+ 		break;
+ 	default:
+ 		dev_info(dbc->dev, "stop handling dbc events\n");
+diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
+index 9dc8f4d8077cc4..47ac72c2286d9a 100644
+--- a/drivers/usb/host/xhci-dbgcap.h
++++ b/drivers/usb/host/xhci-dbgcap.h
+@@ -96,6 +96,7 @@ struct dbc_ep {
+ #define DBC_WRITE_BUF_SIZE		8192
+ #define DBC_POLL_INTERVAL_DEFAULT	64	/* milliseconds */
+ #define DBC_POLL_INTERVAL_MAX		5000	/* milliseconds */
++#define DBC_XFER_INACTIVITY_TIMEOUT	10	/* milliseconds */
+ /*
+  * Private structure for DbC hardware state:
+  */
+@@ -142,6 +143,7 @@ struct xhci_dbc {
+ 	enum dbc_state			state;
+ 	struct delayed_work		event_work;
+ 	unsigned int			poll_interval;	/* ms */
++	unsigned long			xfer_timestamp;
+ 	unsigned			resume_required:1;
+ 	struct dbc_ep			eps[2];
+ 
+@@ -187,6 +189,7 @@ struct dbc_request {
+ enum evtreturn {
+ 	EVT_ERR	= -1,
+ 	EVT_DONE,
++	EVT_XFER_DONE,
+ 	EVT_GSER,
+ 	EVT_DISC,
+ };
+diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
+index 5d24a2321e152d..8aae80b457d74d 100644
+--- a/drivers/usb/typec/ucsi/displayport.c
++++ b/drivers/usb/typec/ucsi/displayport.c
+@@ -54,7 +54,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
+ 	u8 cur = 0;
+ 	int ret;
+ 
+-	mutex_lock(&dp->con->lock);
++	if (!ucsi_con_mutex_lock(dp->con))
++		return -ENOTCONN;
+ 
+ 	if (!dp->override && dp->initialized) {
+ 		const struct typec_altmode *p = typec_altmode_get_partner(alt);
+@@ -100,7 +101,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
+ 	schedule_work(&dp->work);
+ 	ret = 0;
+ err_unlock:
+-	mutex_unlock(&dp->con->lock);
++	ucsi_con_mutex_unlock(dp->con);
+ 
+ 	return ret;
+ }
+@@ -112,7 +113,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
+ 	u64 command;
+ 	int ret = 0;
+ 
+-	mutex_lock(&dp->con->lock);
++	if (!ucsi_con_mutex_lock(dp->con))
++		return -ENOTCONN;
+ 
+ 	if (!dp->override) {
+ 		const struct typec_altmode *p = typec_altmode_get_partner(alt);
+@@ -144,7 +146,7 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
+ 	schedule_work(&dp->work);
+ 
+ out_unlock:
+-	mutex_unlock(&dp->con->lock);
++	ucsi_con_mutex_unlock(dp->con);
+ 
+ 	return ret;
+ }
+@@ -202,20 +204,21 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt,
+ 	int cmd = PD_VDO_CMD(header);
+ 	int svdm_version;
+ 
+-	mutex_lock(&dp->con->lock);
++	if (!ucsi_con_mutex_lock(dp->con))
++		return -ENOTCONN;
+ 
+ 	if (!dp->override && dp->initialized) {
+ 		const struct typec_altmode *p = typec_altmode_get_partner(alt);
+ 
+ 		dev_warn(&p->dev,
+ 			 "firmware doesn't support alternate mode overriding\n");
+-		mutex_unlock(&dp->con->lock);
++		ucsi_con_mutex_unlock(dp->con);
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+ 	svdm_version = typec_altmode_get_svdm_version(alt);
+ 	if (svdm_version < 0) {
+-		mutex_unlock(&dp->con->lock);
++		ucsi_con_mutex_unlock(dp->con);
+ 		return svdm_version;
+ 	}
+ 
+@@ -259,7 +262,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt,
+ 		break;
+ 	}
+ 
+-	mutex_unlock(&dp->con->lock);
++	ucsi_con_mutex_unlock(dp->con);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 3f2bc13efa4865..8eee3d8e588a29 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1903,6 +1903,40 @@ void ucsi_set_drvdata(struct ucsi *ucsi, void *data)
+ }
+ EXPORT_SYMBOL_GPL(ucsi_set_drvdata);
+ 
++/**
++ * ucsi_con_mutex_lock - Acquire the connector mutex
++ * @con: The connector interface to lock
++ *
++ * Returns true on success, false if the connector is disconnected
++ */
++bool ucsi_con_mutex_lock(struct ucsi_connector *con)
++{
++	bool mutex_locked = false;
++	bool connected = true;
++
++	while (connected && !mutex_locked) {
++		mutex_locked = mutex_trylock(&con->lock) != 0;
++		connected = con->status.flags & UCSI_CONSTAT_CONNECTED;
++		if (connected && !mutex_locked)
++			msleep(20);
++	}
++
++	connected = connected && con->partner;
++	if (!connected && mutex_locked)
++		mutex_unlock(&con->lock);
++
++	return connected;
++}
++
++/**
++ * ucsi_con_mutex_unlock - Release the connector mutex
++ * @con: The connector interface to unlock
++ */
++void ucsi_con_mutex_unlock(struct ucsi_connector *con)
++{
++	mutex_unlock(&con->lock);
++}
++
+ /**
+  * ucsi_create - Allocate UCSI instance
+  * @dev: Device interface to the PPM (Platform Policy Manager)
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index a333006d3496a1..5863a20b6c5dd3 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -91,6 +91,8 @@ int ucsi_register(struct ucsi *ucsi);
+ void ucsi_unregister(struct ucsi *ucsi);
+ void *ucsi_get_drvdata(struct ucsi *ucsi);
+ void ucsi_set_drvdata(struct ucsi *ucsi, void *data);
++bool ucsi_con_mutex_lock(struct ucsi_connector *con);
++void ucsi_con_mutex_unlock(struct ucsi_connector *con);
+ 
+ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
+ 
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 0112742e4504b9..1f8a322eb00be0 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -2815,6 +2815,7 @@ EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
+  * virtqueue_reset - detach and recycle all unused buffers
+  * @_vq: the struct virtqueue we're talking about.
+  * @recycle: callback to recycle unused buffers
++ * @recycle_done: callback to be invoked when recycle for all unused buffers done
+  *
+  * Caller must ensure we don't call this with other virtqueue operations
+  * at the same time (except where noted).
+@@ -2826,7 +2827,8 @@ EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
+  * -EPERM: Operation not permitted
+  */
+ int virtqueue_reset(struct virtqueue *_vq,
+-		    void (*recycle)(struct virtqueue *vq, void *buf))
++		    void (*recycle)(struct virtqueue *vq, void *buf),
++		    void (*recycle_done)(struct virtqueue *vq))
+ {
+ 	struct vring_virtqueue *vq = to_vvq(_vq);
+ 	int err;
+@@ -2834,6 +2836,8 @@ int virtqueue_reset(struct virtqueue *_vq,
+ 	err = virtqueue_disable_and_recycle(_vq, recycle);
+ 	if (err)
+ 		return err;
++	if (recycle_done)
++		recycle_done(_vq);
+ 
+ 	if (vq->packed_ring)
+ 		virtqueue_reinit_packed(vq);
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 0a216a078c3155..47335a0f4a618f 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -825,6 +825,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 	struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
+ 	struct elf_phdr *elf_property_phdata = NULL;
+ 	unsigned long elf_brk;
++	bool brk_moved = false;
+ 	int retval, i;
+ 	unsigned long elf_entry;
+ 	unsigned long e_entry;
+@@ -1092,15 +1093,19 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 			/* Calculate any requested alignment. */
+ 			alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+ 
+-			/*
+-			 * There are effectively two types of ET_DYN
+-			 * binaries: programs (i.e. PIE: ET_DYN with PT_INTERP)
+-			 * and loaders (ET_DYN without PT_INTERP, since they
+-			 * _are_ the ELF interpreter). The loaders must
+-			 * be loaded away from programs since the program
+-			 * may otherwise collide with the loader (especially
+-			 * for ET_EXEC which does not have a randomized
+-			 * position). For example to handle invocations of
++			/**
++			 * DOC: PIE handling
++			 *
++			 * There are effectively two types of ET_DYN ELF
++			 * binaries: programs (i.e. PIE: ET_DYN with
++			 * PT_INTERP) and loaders (i.e. static PIE: ET_DYN
++			 * without PT_INTERP, usually the ELF interpreter
++			 * itself). Loaders must be loaded away from programs
++			 * since the program may otherwise collide with the
++			 * loader (especially for ET_EXEC which does not have
++			 * a randomized position).
++			 *
++			 * For example, to handle invocations of
+ 			 * "./ld.so someprog" to test out a new version of
+ 			 * the loader, the subsequent program that the
+ 			 * loader loads must avoid the loader itself, so
+@@ -1113,6 +1118,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 			 * ELF_ET_DYN_BASE and loaders are loaded into the
+ 			 * independently randomized mmap region (0 load_bias
+ 			 * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
++			 *
++			 * See below for "brk" handling details, which is
++			 * also affected by program vs loader and ASLR.
+ 			 */
+ 			if (interpreter) {
+ 				/* On ET_DYN with PT_INTERP, we do the ASLR. */
+@@ -1229,8 +1237,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 	start_data += load_bias;
+ 	end_data += load_bias;
+ 
+-	current->mm->start_brk = current->mm->brk = ELF_PAGEALIGN(elf_brk);
+-
+ 	if (interpreter) {
+ 		elf_entry = load_elf_interp(interp_elf_ex,
+ 					    interpreter,
+@@ -1286,27 +1292,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 	mm->end_data = end_data;
+ 	mm->start_stack = bprm->p;
+ 
+-	if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
++	/**
++	 * DOC: "brk" handling
++	 *
++	 * For architectures with ELF randomization, when executing a
++	 * loader directly (i.e. static PIE: ET_DYN without PT_INTERP),
++	 * move the brk area out of the mmap region and into the unused
++	 * ELF_ET_DYN_BASE region. Since "brk" grows up it may collide
++	 * early with the stack growing down or other regions being put
++	 * into the mmap region by the kernel (e.g. vdso).
++	 *
++	 * In the CONFIG_COMPAT_BRK case, though, everything is turned
++	 * off because we're not allowed to move the brk at all.
++	 */
++	if (!IS_ENABLED(CONFIG_COMPAT_BRK) &&
++	    IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
++	    elf_ex->e_type == ET_DYN && !interpreter) {
++		elf_brk = ELF_ET_DYN_BASE;
++		/* This counts as moving the brk, so let brk(2) know. */
++		brk_moved = true;
++	}
++	mm->start_brk = mm->brk = ELF_PAGEALIGN(elf_brk);
++
++	if ((current->flags & PF_RANDOMIZE) && snapshot_randomize_va_space > 1) {
+ 		/*
+-		 * For architectures with ELF randomization, when executing
+-		 * a loader directly (i.e. no interpreter listed in ELF
+-		 * headers), move the brk area out of the mmap region
+-		 * (since it grows up, and may collide early with the stack
+-		 * growing down), and into the unused ELF_ET_DYN_BASE region.
++		 * If we didn't move the brk to ELF_ET_DYN_BASE (above),
++		 * leave a gap between .bss and brk.
+ 		 */
+-		if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
+-		    elf_ex->e_type == ET_DYN && !interpreter) {
+-			mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
+-		} else {
+-			/* Otherwise leave a gap between .bss and brk. */
++		if (!brk_moved)
+ 			mm->brk = mm->start_brk = mm->brk + PAGE_SIZE;
+-		}
+ 
+ 		mm->brk = mm->start_brk = arch_randomize_brk(mm);
++		brk_moved = true;
++	}
++
+ #ifdef compat_brk_randomized
++	if (brk_moved)
+ 		current->brk_randomized = 1;
+ #endif
+-	}
+ 
+ 	if (current->personality & MMAP_PAGE_ZERO) {
+ 		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
+diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
+index e815d165cccc22..e9cdc1759dada8 100644
+--- a/fs/btrfs/discard.c
++++ b/fs/btrfs/discard.c
+@@ -94,8 +94,6 @@ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 				  struct btrfs_block_group *block_group)
+ {
+ 	lockdep_assert_held(&discard_ctl->lock);
+-	if (!btrfs_run_discard_work(discard_ctl))
+-		return;
+ 
+ 	if (list_empty(&block_group->discard_list) ||
+ 	    block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) {
+@@ -118,6 +116,9 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 	if (!btrfs_is_block_group_data_only(block_group))
+ 		return;
+ 
++	if (!btrfs_run_discard_work(discard_ctl))
++		return;
++
+ 	spin_lock(&discard_ctl->lock);
+ 	__add_to_discard_list(discard_ctl, block_group);
+ 	spin_unlock(&discard_ctl->lock);
+@@ -250,6 +251,18 @@ static struct btrfs_block_group *peek_discard_list(
+ 		    block_group->used != 0) {
+ 			if (btrfs_is_block_group_data_only(block_group)) {
+ 				__add_to_discard_list(discard_ctl, block_group);
++				/*
++				 * The block group must have been moved to other
++				 * discard list even if discard was disabled in
++				 * the meantime or a transaction abort happened,
++				 * otherwise we can end up in an infinite loop,
++				 * always jumping into the 'again' label and
++				 * keep getting this block group over and over
++				 * in case there are no other block groups in
++				 * the discard lists.
++				 */
++				ASSERT(block_group->discard_index !=
++				       BTRFS_DISCARD_INDEX_UNUSED);
+ 			} else {
+ 				list_del_init(&block_group->discard_list);
+ 				btrfs_put_block_group(block_group);
+diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
+index cbfb225858a59f..bb822e425d7fa0 100644
+--- a/fs/btrfs/fs.h
++++ b/fs/btrfs/fs.h
+@@ -285,6 +285,7 @@ enum {
+ #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR		0ULL
+ 
+ #define BTRFS_DEFAULT_COMMIT_INTERVAL	(30)
++#define BTRFS_WARNING_COMMIT_INTERVAL	(300)
+ #define BTRFS_DEFAULT_MAX_INLINE	(2048)
+ 
+ struct btrfs_dev_replace {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index bee8852e81554e..9ce1270addb04b 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1187,6 +1187,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
+ 	struct extent_state *cached = NULL;
+ 	struct extent_map *em;
+ 	int ret = 0;
++	bool free_pages = false;
+ 	u64 start = async_extent->start;
+ 	u64 end = async_extent->start + async_extent->ram_size - 1;
+ 
+@@ -1207,7 +1208,10 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
+ 	}
+ 
+ 	if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
++		ASSERT(!async_extent->folios);
++		ASSERT(async_extent->nr_folios == 0);
+ 		submit_uncompressed_range(inode, async_extent, locked_folio);
++		free_pages = true;
+ 		goto done;
+ 	}
+ 
+@@ -1223,6 +1227,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
+ 		 * fall back to uncompressed.
+ 		 */
+ 		submit_uncompressed_range(inode, async_extent, locked_folio);
++		free_pages = true;
+ 		goto done;
+ 	}
+ 
+@@ -1264,6 +1269,8 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
+ done:
+ 	if (async_chunk->blkcg_css)
+ 		kthread_associate_blkcg(NULL);
++	if (free_pages)
++		free_async_extent_pages(async_extent);
+ 	kfree(async_extent);
+ 	return;
+ 
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 08ccf5d5e14407..bcb8def4ade203 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -570,6 +570,10 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ 		break;
+ 	case Opt_commit_interval:
+ 		ctx->commit_interval = result.uint_32;
++		if (ctx->commit_interval > BTRFS_WARNING_COMMIT_INTERVAL) {
++			btrfs_warn(NULL, "excessive commit interval %u, use with care",
++				   ctx->commit_interval);
++		}
+ 		if (ctx->commit_interval == 0)
+ 			ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
+ 		break;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index e7bc99c69743cf..ca01f79c82e4ad 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7040,10 +7040,18 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
+ 	struct nfs4_unlockdata *p;
+ 	struct nfs4_state *state = lsp->ls_state;
+ 	struct inode *inode = state->inode;
++	struct nfs_lock_context *l_ctx;
+ 
+ 	p = kzalloc(sizeof(*p), GFP_KERNEL);
+ 	if (p == NULL)
+ 		return NULL;
++	l_ctx = nfs_get_lock_context(ctx);
++	if (!IS_ERR(l_ctx)) {
++		p->l_ctx = l_ctx;
++	} else {
++		kfree(p);
++		return NULL;
++	}
+ 	p->arg.fh = NFS_FH(inode);
+ 	p->arg.fl = &p->fl;
+ 	p->arg.seqid = seqid;
+@@ -7051,7 +7059,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
+ 	p->lsp = lsp;
+ 	/* Ensure we don't close file until we're done freeing locks! */
+ 	p->ctx = get_nfs_open_context(ctx);
+-	p->l_ctx = nfs_get_lock_context(ctx);
+ 	locks_init_lock(&p->fl);
+ 	locks_copy_lock(&p->fl, fl);
+ 	p->server = NFS_SERVER(inode);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 5f582713bf05eb..683e09be25adf3 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -745,6 +745,14 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+ 	return remaining;
+ }
+ 
++static void pnfs_reset_return_info(struct pnfs_layout_hdr *lo)
++{
++	struct pnfs_layout_segment *lseg;
++
++	list_for_each_entry(lseg, &lo->plh_return_segs, pls_list)
++		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
++}
++
+ static void
+ pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
+ 		struct list_head *free_me,
+@@ -1292,6 +1300,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
+ 		pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
+ 		pnfs_free_returned_lsegs(lo, &freeme, range, seq);
+ 		pnfs_set_layout_stateid(lo, stateid, NULL, true);
++		pnfs_reset_return_info(lo);
+ 	} else
+ 		pnfs_mark_layout_stateid_invalid(lo, &freeme);
+ out_unlock:
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+index 2ad067886ec3fa..bc1c1e9b288add 100644
+--- a/fs/smb/client/cifs_spnego.c
++++ b/fs/smb/client/cifs_spnego.c
+@@ -82,6 +82,9 @@ struct key_type cifs_spnego_key_type = {
+ /* strlen of ";pid=0x" */
+ #define PID_KEY_LEN		7
+ 
++/* strlen of ";upcall_target=" */
++#define UPCALL_TARGET_KEY_LEN	15
++
+ /* get a key struct with a SPNEGO security blob, suitable for session setup */
+ struct key *
+ cifs_get_spnego_key(struct cifs_ses *sesInfo,
+@@ -108,6 +111,11 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo,
+ 	if (sesInfo->user_name)
+ 		desc_len += USER_KEY_LEN + strlen(sesInfo->user_name);
+ 
++	if (sesInfo->upcall_target == UPTARGET_MOUNT)
++		desc_len += UPCALL_TARGET_KEY_LEN + 5; // strlen("mount")
++	else
++		desc_len += UPCALL_TARGET_KEY_LEN + 3; // strlen("app")
++
+ 	spnego_key = ERR_PTR(-ENOMEM);
+ 	description = kzalloc(desc_len, GFP_KERNEL);
+ 	if (description == NULL)
+@@ -158,6 +166,14 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo,
+ 	dp = description + strlen(description);
+ 	sprintf(dp, ";pid=0x%x", current->pid);
+ 
++	if (sesInfo->upcall_target == UPTARGET_MOUNT) {
++		dp = description + strlen(description);
++		sprintf(dp, ";upcall_target=mount");
++	} else {
++		dp = description + strlen(description);
++		sprintf(dp, ";upcall_target=app");
++	}
++
+ 	cifs_dbg(FYI, "key description = %s\n", description);
+ 	saved_cred = override_creds(spnego_cred);
+ 	spnego_key = request_key(&cifs_spnego_key_type, description, "");
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 0ceebde38f9fe0..9d96b833015c82 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -547,6 +547,30 @@ static int cifs_show_devname(struct seq_file *m, struct dentry *root)
+ 	return 0;
+ }
+ 
++static void
++cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
++{
++	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
++		seq_puts(s, ",upcall_target=app");
++		return;
++	}
++
++	seq_puts(s, ",upcall_target=");
++
++	switch (cifs_sb->ctx->upcall_target) {
++	case UPTARGET_APP:
++		seq_puts(s, "app");
++		break;
++	case UPTARGET_MOUNT:
++		seq_puts(s, "mount");
++		break;
++	default:
++		/* shouldn't ever happen */
++		seq_puts(s, "unknown");
++		break;
++	}
++}
++
+ /*
+  * cifs_show_options() is for displaying mount options in /proc/mounts.
+  * Not all settable options are displayed but most of the important
+@@ -563,6 +587,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
+ 	cifs_show_security(s, tcon->ses);
+ 	cifs_show_cache_flavor(s, cifs_sb);
++	cifs_show_upcall_target(s, cifs_sb);
+ 
+ 	if (tcon->no_lease)
+ 		seq_puts(s, ",nolease");
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index a8484af7a2fbc4..a38b40d68b14f1 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -154,6 +154,12 @@ enum securityEnum {
+ 	IAKerb,			/* Kerberos proxy */
+ };
+ 
++enum upcall_target_enum {
++	UPTARGET_UNSPECIFIED, /* not specified, defaults to app */
++	UPTARGET_MOUNT, /* upcall to the mount namespace */
++	UPTARGET_APP, /* upcall to the application namespace which did the mount */
++};
++
+ enum cifs_reparse_type {
+ 	CIFS_REPARSE_TYPE_NFS,
+ 	CIFS_REPARSE_TYPE_WSL,
+@@ -1085,6 +1091,7 @@ struct cifs_ses {
+ 	struct session_key auth_key;
+ 	struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
+ 	enum securityEnum sectype; /* what security flavor was specified? */
++	enum upcall_target_enum upcall_target; /* what upcall target was specified? */
+ 	bool sign;		/* is signing required? */
+ 	bool domainAuto:1;
+ 	bool expired_pwd;  /* track if access denied or expired pwd so can know if need to update */
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index d5549e06a533d8..112057c7ca11c6 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2381,6 +2381,26 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 
+ 	ses->sectype = ctx->sectype;
+ 	ses->sign = ctx->sign;
++
++	/*
++	 *Explicitly marking upcall_target mount option for easier handling
++	 * by cifs_spnego.c and eventually cifs.upcall.c
++	 */
++
++	switch (ctx->upcall_target) {
++	case UPTARGET_UNSPECIFIED: /* default to app */
++	case UPTARGET_APP:
++		ses->upcall_target = UPTARGET_APP;
++		break;
++	case UPTARGET_MOUNT:
++		ses->upcall_target = UPTARGET_MOUNT;
++		break;
++	default:
++		// should never happen
++		ses->upcall_target = UPTARGET_APP;
++		break;
++	}
++
+ 	ses->local_nls = load_nls(ctx->local_nls->charset);
+ 
+ 	/* add server as first channel */
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 1f1f4586673a7a..69cca4f17dbaad 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -67,6 +67,12 @@ static const match_table_t cifs_secflavor_tokens = {
+ 	{ Opt_sec_err, NULL }
+ };
+ 
++static const match_table_t cifs_upcall_target = {
++	{ Opt_upcall_target_mount, "mount" },
++	{ Opt_upcall_target_application, "app" },
++	{ Opt_upcall_target_err, NULL }
++};
++
+ const struct fs_parameter_spec smb3_fs_parameters[] = {
+ 	/* Mount options that take no arguments */
+ 	fsparam_flag_no("user_xattr", Opt_user_xattr),
+@@ -179,6 +185,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
+ 	fsparam_string("sec", Opt_sec),
+ 	fsparam_string("cache", Opt_cache),
+ 	fsparam_string("reparse", Opt_reparse),
++	fsparam_string("upcall_target", Opt_upcalltarget),
+ 
+ 	/* Arguments that should be ignored */
+ 	fsparam_flag("guest", Opt_ignore),
+@@ -249,6 +256,29 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
+ 	return 0;
+ }
+ 
++static int
++cifs_parse_upcall_target(struct fs_context *fc, char *value, struct smb3_fs_context *ctx)
++{
++	substring_t args[MAX_OPT_ARGS];
++
++	ctx->upcall_target = UPTARGET_UNSPECIFIED;
++
++	switch (match_token(value, cifs_upcall_target, args)) {
++	case Opt_upcall_target_mount:
++		ctx->upcall_target = UPTARGET_MOUNT;
++		break;
++	case Opt_upcall_target_application:
++		ctx->upcall_target = UPTARGET_APP;
++		break;
++
++	default:
++		cifs_errorf(fc, "bad upcall target: %s\n", value);
++		return 1;
++	}
++
++	return 0;
++}
++
+ static const match_table_t cifs_cacheflavor_tokens = {
+ 	{ Opt_cache_loose, "loose" },
+ 	{ Opt_cache_strict, "strict" },
+@@ -1526,6 +1556,10 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 		if (cifs_parse_security_flavors(fc, param->string, ctx) != 0)
+ 			goto cifs_parse_mount_err;
+ 		break;
++	case Opt_upcalltarget:
++		if (cifs_parse_upcall_target(fc, param->string, ctx) != 0)
++			goto cifs_parse_mount_err;
++		break;
+ 	case Opt_cache:
+ 		if (cifs_parse_cache_flavor(fc, param->string, ctx) != 0)
+ 			goto cifs_parse_mount_err;
+@@ -1703,6 +1737,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 	}
+ 	/* case Opt_ignore: - is ignored as expected ... */
+ 
++	if (ctx->multiuser && ctx->upcall_target == UPTARGET_MOUNT) {
++		cifs_errorf(fc, "multiuser mount option not supported with upcalltarget set as 'mount'\n");
++		goto cifs_parse_mount_err;
++	}
++
+ 	return 0;
+ 
+  cifs_parse_mount_err:
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index c8c8b4451b3bc7..ac6baa774ad3a9 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -61,6 +61,12 @@ enum cifs_sec_param {
+ 	Opt_sec_err
+ };
+ 
++enum cifs_upcall_target_param {
++	Opt_upcall_target_mount,
++	Opt_upcall_target_application,
++	Opt_upcall_target_err
++};
++
+ enum cifs_param {
+ 	/* Mount options that take no arguments */
+ 	Opt_user_xattr,
+@@ -114,6 +120,8 @@ enum cifs_param {
+ 	Opt_multichannel,
+ 	Opt_compress,
+ 	Opt_witness,
++	Opt_is_upcall_target_mount,
++	Opt_is_upcall_target_application,
+ 
+ 	/* Mount options which take numeric value */
+ 	Opt_backupuid,
+@@ -157,6 +165,7 @@ enum cifs_param {
+ 	Opt_sec,
+ 	Opt_cache,
+ 	Opt_reparse,
++	Opt_upcalltarget,
+ 
+ 	/* Mount options to be ignored */
+ 	Opt_ignore,
+@@ -198,6 +207,7 @@ struct smb3_fs_context {
+ 	umode_t file_mode;
+ 	umode_t dir_mode;
+ 	enum securityEnum sectype; /* sectype requested via mnt opts */
++	enum upcall_target_enum upcall_target; /* where to upcall for mount */
+ 	bool sign; /* was signing requested via mnt opts? */
+ 	bool ignore_signature:1;
+ 	bool retry:1;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 83022a476e3b3e..176be478cd138f 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2985,7 +2985,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ 	/* Eventually save off posix specific response info and timestamps */
+ 
+ err_free_rsp_buf:
+-	free_rsp_buf(resp_buftype, rsp);
++	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+ 	kfree(pc_buf);
+ err_free_req:
+ 	cifs_small_buf_release(req);
+diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
+index 4f33a4a4888613..b4071c9cf8c951 100644
+--- a/fs/udf/truncate.c
++++ b/fs/udf/truncate.c
+@@ -115,7 +115,7 @@ void udf_truncate_tail_extent(struct inode *inode)
+ 	}
+ 	/* This inode entry is in-memory only and thus we don't have to mark
+ 	 * the inode dirty */
+-	if (ret == 0)
++	if (ret >= 0)
+ 		iinfo->i_lenExtents = inode->i_size;
+ 	brelse(epos.bh);
+ }
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 05ec7e7d9e87e2..4f5a45338a83ac 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -1290,6 +1290,15 @@ static bool xattr_is_trusted(const char *name)
+ 	return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
+ }
+ 
++static bool xattr_is_maclabel(const char *name)
++{
++	const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
++
++	return !strncmp(name, XATTR_SECURITY_PREFIX,
++			XATTR_SECURITY_PREFIX_LEN) &&
++		security_ismaclabel(suffix);
++}
++
+ /**
+  * simple_xattr_list - list all xattr objects
+  * @inode: inode from which to get the xattrs
+@@ -1322,6 +1331,17 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
+ 	if (err)
+ 		return err;
+ 
++	err = security_inode_listsecurity(inode, buffer, remaining_size);
++	if (err < 0)
++		return err;
++
++	if (buffer) {
++		if (remaining_size < err)
++			return -ERANGE;
++		buffer += err;
++	}
++	remaining_size -= err;
++
+ 	read_lock(&xattrs->lock);
+ 	for (rbp = rb_first(&xattrs->rb_root); rbp; rbp = rb_next(rbp)) {
+ 		xattr = rb_entry(rbp, struct simple_xattr, rb_node);
+@@ -1330,6 +1350,10 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
+ 		if (!trusted && xattr_is_trusted(xattr->name))
+ 			continue;
+ 
++		/* skip MAC labels; these are provided by LSM above */
++		if (xattr_is_maclabel(xattr->name))
++			continue;
++
+ 		err = xattr_list_one(&buffer, &remaining_size, xattr->name);
+ 		if (err)
+ 			break;
+diff --git a/include/drm/drm_fbdev_dma.h b/include/drm/drm_fbdev_dma.h
+index 2da7ee7841337c..6ae4de46497ce5 100644
+--- a/include/drm/drm_fbdev_dma.h
++++ b/include/drm/drm_fbdev_dma.h
+@@ -4,12 +4,24 @@
+ #define DRM_FBDEV_DMA_H
+ 
+ struct drm_device;
++struct drm_fb_helper;
++struct drm_fb_helper_surface_size;
+ 
+ #ifdef CONFIG_DRM_FBDEV_EMULATION
++int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
++				     struct drm_fb_helper_surface_size *sizes);
++
++#define DRM_FBDEV_DMA_DRIVER_OPS \
++	.fbdev_probe = drm_fbdev_dma_driver_fbdev_probe
++
+ void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp);
+ #else
+ static inline void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
+ { }
++
++#define DRM_FBDEV_DMA_DRIVER_OPS \
++	.fbdev_probe = NULL
++
+ #endif
+ 
+ #endif
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 66b7620a1b5333..9e98fb87e7ef71 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -11,6 +11,7 @@
+ #include <linux/uio.h>
+ 
+ #define BIO_MAX_VECS		256U
++#define BIO_MAX_INLINE_VECS	UIO_MAXIOV
+ 
+ struct queue_limits;
+ 
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 02a226bcf0edc9..44bf8af37901ba 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -1067,6 +1067,12 @@ struct vmbus_channel {
+ 
+ 	/* The max size of a packet on this channel */
+ 	u32 max_pkt_size;
++
++	/* function to mmap ring buffer memory to the channel's sysfs ring attribute */
++	int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma);
++
++	/* boolean to control visibility of sysfs for ring buffer */
++	bool ring_sysfs_visible;
+ };
+ 
+ #define lock_requestor(channel, flags)					\
+@@ -1226,13 +1232,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
+ 				  enum vmbus_packet_type type,
+ 				  u32 flags);
+ 
+-extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+-					    struct hv_page_buffer pagebuffers[],
+-					    u32 pagecount,
+-					    void *buffer,
+-					    u32 bufferlen,
+-					    u64 requestid);
+-
+ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+ 				     struct vmbus_packet_mpb_array *mpb,
+ 				     u32 desc_size,
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 15206450929d5e..2e836d44f73864 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -260,11 +260,17 @@ union kvm_mmu_notifier_arg {
+ 	unsigned long attributes;
+ };
+ 
++enum kvm_gfn_range_filter {
++	KVM_FILTER_SHARED		= BIT(0),
++	KVM_FILTER_PRIVATE		= BIT(1),
++};
++
+ struct kvm_gfn_range {
+ 	struct kvm_memory_slot *slot;
+ 	gfn_t start;
+ 	gfn_t end;
+ 	union kvm_mmu_notifier_arg arg;
++	enum kvm_gfn_range_filter attr_filter;
+ 	bool may_block;
+ };
+ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index 6c3125300c009a..a3d8305e88a51e 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -224,7 +224,7 @@ enum tpm2_const {
+ 
+ enum tpm2_timeouts {
+ 	TPM2_TIMEOUT_A          =    750,
+-	TPM2_TIMEOUT_B          =   2000,
++	TPM2_TIMEOUT_B          =   4000,
+ 	TPM2_TIMEOUT_C          =    200,
+ 	TPM2_TIMEOUT_D          =     30,
+ 	TPM2_DURATION_SHORT     =     20,
+@@ -257,6 +257,7 @@ enum tpm2_return_codes {
+ 	TPM2_RC_TESTING		= 0x090A, /* RC_WARN */
+ 	TPM2_RC_REFERENCE_H0	= 0x0910,
+ 	TPM2_RC_RETRY		= 0x0922,
++	TPM2_RC_SESSION_MEMORY	= 0x0903,
+ };
+ 
+ enum tpm2_command_codes {
+@@ -437,6 +438,24 @@ static inline u32 tpm2_rc_value(u32 rc)
+ 	return (rc & BIT(7)) ? rc & 0xbf : rc;
+ }
+ 
++/*
++ * Convert a return value from tpm_transmit_cmd() to POSIX error code.
++ */
++static inline ssize_t tpm_ret_to_err(ssize_t ret)
++{
++	if (ret < 0)
++		return ret;
++
++	switch (tpm2_rc_value(ret)) {
++	case TPM2_RC_SUCCESS:
++		return 0;
++	case TPM2_RC_SESSION_MEMORY:
++		return -ENOMEM;
++	default:
++		return -EFAULT;
++	}
++}
++
+ #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
+ 
+ extern int tpm_is_tpm2(struct tpm_chip *chip);
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 73c8922e69e095..d791d47eb00edf 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -103,7 +103,8 @@ int virtqueue_resize(struct virtqueue *vq, u32 num,
+ 		     void (*recycle)(struct virtqueue *vq, void *buf),
+ 		     void (*recycle_done)(struct virtqueue *vq));
+ int virtqueue_reset(struct virtqueue *vq,
+-		    void (*recycle)(struct virtqueue *vq, void *buf));
++		    void (*recycle)(struct virtqueue *vq, void *buf),
++		    void (*recycle_done)(struct virtqueue *vq));
+ 
+ struct virtio_admin_cmd {
+ 	__le16 opcode;
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 24e48af7e8f74a..a9d7e9ecee6b50 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -1031,6 +1031,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
+ 	return skb;
+ }
+ 
++static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct)
++{
++	struct sk_buff *skb;
++
++	skb = __skb_dequeue(&sch->gso_skb);
++	if (skb) {
++		sch->q.qlen--;
++		return skb;
++	}
++	if (direct)
++		return __qdisc_dequeue_head(&sch->q);
++	else
++		return sch->dequeue(sch);
++}
++
+ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
+ {
+ 	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+diff --git a/include/sound/ump_msg.h b/include/sound/ump_msg.h
+index 72f60ddfea7535..9556b4755a1ed8 100644
+--- a/include/sound/ump_msg.h
++++ b/include/sound/ump_msg.h
+@@ -604,7 +604,7 @@ struct snd_ump_stream_msg_ep_info {
+ } __packed;
+ 
+ /* UMP Stream Message: Device Info Notification (128bit) */
+-struct snd_ump_stream_msg_devince_info {
++struct snd_ump_stream_msg_device_info {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ 	/* 0 */
+ 	u32 type:4;
+@@ -754,7 +754,7 @@ struct snd_ump_stream_msg_fb_name {
+ union snd_ump_stream_msg {
+ 	struct snd_ump_stream_msg_ep_discovery ep_discovery;
+ 	struct snd_ump_stream_msg_ep_info ep_info;
+-	struct snd_ump_stream_msg_devince_info device_info;
++	struct snd_ump_stream_msg_device_info device_info;
+ 	struct snd_ump_stream_msg_stream_cfg stream_cfg;
+ 	struct snd_ump_stream_msg_fb_discovery fb_discovery;
+ 	struct snd_ump_stream_msg_fb_info fb_info;
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index c709a05023cd99..d1fb4bfbbd4c3b 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1100,9 +1100,11 @@ void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
+ 
+ 		if (top_cs) {
+ 			/*
+-			 * Percpu kthreads in top_cpuset are ignored
++			 * PF_NO_SETAFFINITY tasks are ignored.
++			 * All per cpu kthreads should have PF_NO_SETAFFINITY
++			 * flag set, see kthread_set_per_cpu().
+ 			 */
+-			if (kthread_is_per_cpu(task))
++			if (task->flags & PF_NO_SETAFFINITY)
+ 				continue;
+ 			cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
+ 		} else {
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 7ed25654820fd4..ace5262642f9ef 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -6731,6 +6731,12 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
+ 	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
+ 		     __alignof__(struct bpf_iter_scx_dsq));
+ 
++	/*
++	 * next() and destroy() will be called regardless of the return value.
++	 * Always clear $kit->dsq.
++	 */
++	kit->dsq = NULL;
++
+ 	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
+ 		return -EINVAL;
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index e1ffbed8cc5eb5..baa5547e977a02 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1832,10 +1832,12 @@ static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
+ 
+ 	head_page = cpu_buffer->head_page;
+ 
+-	/* If both the head and commit are on the reader_page then we are done. */
+-	if (head_page == cpu_buffer->reader_page &&
+-	    head_page == cpu_buffer->commit_page)
++	/* If the commit_buffer is the reader page, update the commit page */
++	if (meta->commit_buffer == (unsigned long)cpu_buffer->reader_page->page) {
++		cpu_buffer->commit_page = cpu_buffer->reader_page;
++		/* Nothing more to do, the only page is the reader page */
+ 		goto done;
++	}
+ 
+ 	/* Iterate until finding the commit page */
+ 	for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) {
+diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
+index 4376887e0d8aab..c9b0533407edeb 100644
+--- a/kernel/trace/trace_dynevent.c
++++ b/kernel/trace/trace_dynevent.c
+@@ -16,7 +16,7 @@
+ #include "trace_output.h"	/* for trace_event_sem */
+ #include "trace_dynevent.h"
+ 
+-static DEFINE_MUTEX(dyn_event_ops_mutex);
++DEFINE_MUTEX(dyn_event_ops_mutex);
+ static LIST_HEAD(dyn_event_ops_list);
+ 
+ bool trace_event_dyn_try_get_ref(struct trace_event_call *dyn_call)
+@@ -125,6 +125,20 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
+ 	return ret;
+ }
+ 
++/*
++ * Locked version of event creation. The event creation must be protected by
++ * dyn_event_ops_mutex because of protecting trace_probe_log.
++ */
++int dyn_event_create(const char *raw_command, struct dyn_event_operations *type)
++{
++	int ret;
++
++	mutex_lock(&dyn_event_ops_mutex);
++	ret = type->create(raw_command);
++	mutex_unlock(&dyn_event_ops_mutex);
++	return ret;
++}
++
+ static int create_dyn_event(const char *raw_command)
+ {
+ 	struct dyn_event_operations *ops;
+diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h
+index 936477a111d3e7..beee3f8d754444 100644
+--- a/kernel/trace/trace_dynevent.h
++++ b/kernel/trace/trace_dynevent.h
+@@ -100,6 +100,7 @@ void *dyn_event_seq_next(struct seq_file *m, void *v, loff_t *pos);
+ void dyn_event_seq_stop(struct seq_file *m, void *v);
+ int dyn_events_release_all(struct dyn_event_operations *type);
+ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type);
++int dyn_event_create(const char *raw_command, struct dyn_event_operations *type);
+ 
+ /*
+  * for_each_dyn_event	-	iterate over the dyn_event list
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index a5e3d6acf1e1e2..27e21488d57417 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -1581,7 +1581,7 @@ stacktrace_trigger(struct event_trigger_data *data,
+ 	struct trace_event_file *file = data->private_data;
+ 
+ 	if (file)
+-		__trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
++		__trace_stack(file->tr, tracing_gen_ctx_dec(), STACK_SKIP);
+ 	else
+ 		trace_dump_stack(STACK_SKIP);
+ }
+diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
+index fbbc3c719d2f68..fbb6cf8dc04753 100644
+--- a/kernel/trace/trace_functions.c
++++ b/kernel/trace/trace_functions.c
+@@ -574,11 +574,7 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
+ 
+ static __always_inline void trace_stack(struct trace_array *tr)
+ {
+-	unsigned int trace_ctx;
+-
+-	trace_ctx = tracing_gen_ctx();
+-
+-	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
++	__trace_stack(tr, tracing_gen_ctx_dec(), FTRACE_STACK_SKIP);
+ }
+ 
+ static void
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 935a886af40c90..6b9c3f3f870f4f 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1090,7 +1090,7 @@ static int create_or_delete_trace_kprobe(const char *raw_command)
+ 	if (raw_command[0] == '-')
+ 		return dyn_event_release(raw_command, &trace_kprobe_ops);
+ 
+-	ret = trace_kprobe_create(raw_command);
++	ret = dyn_event_create(raw_command, &trace_kprobe_ops);
+ 	return ret == -ECANCELED ? -EINVAL : ret;
+ }
+ 
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 578919962e5dff..ae20ad7f746162 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -154,9 +154,12 @@ static const struct fetch_type *find_fetch_type(const char *type, unsigned long
+ }
+ 
+ static struct trace_probe_log trace_probe_log;
++extern struct mutex dyn_event_ops_mutex;
+ 
+ void trace_probe_log_init(const char *subsystem, int argc, const char **argv)
+ {
++	lockdep_assert_held(&dyn_event_ops_mutex);
++
+ 	trace_probe_log.subsystem = subsystem;
+ 	trace_probe_log.argc = argc;
+ 	trace_probe_log.argv = argv;
+@@ -165,11 +168,15 @@ void trace_probe_log_init(const char *subsystem, int argc, const char **argv)
+ 
+ void trace_probe_log_clear(void)
+ {
++	lockdep_assert_held(&dyn_event_ops_mutex);
++
+ 	memset(&trace_probe_log, 0, sizeof(trace_probe_log));
+ }
+ 
+ void trace_probe_log_set_index(int index)
+ {
++	lockdep_assert_held(&dyn_event_ops_mutex);
++
+ 	trace_probe_log.index = index;
+ }
+ 
+@@ -178,6 +185,8 @@ void __trace_probe_log_err(int offset, int err_type)
+ 	char *command, *p;
+ 	int i, len = 0, pos = 0;
+ 
++	lockdep_assert_held(&dyn_event_ops_mutex);
++
+ 	if (!trace_probe_log.argv)
+ 		return;
+ 
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index b085a8a164ea03..9916677acf24e4 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -739,7 +739,7 @@ static int create_or_delete_trace_uprobe(const char *raw_command)
+ 	if (raw_command[0] == '-')
+ 		return dyn_event_release(raw_command, &trace_uprobe_ops);
+ 
+-	ret = trace_uprobe_create(raw_command);
++	ret = dyn_event_create(raw_command, &trace_uprobe_ops);
+ 	return ret == -ECANCELED ? -EINVAL : ret;
+ }
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index d29da0c6a7f293..ebe1ec66149269 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -7041,9 +7041,6 @@ bool has_managed_dma(void)
+ 
+ #ifdef CONFIG_UNACCEPTED_MEMORY
+ 
+-/* Counts number of zones with unaccepted pages. */
+-static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages);
+-
+ static bool lazy_accept = true;
+ 
+ static int __init accept_memory_parse(char *p)
+@@ -7070,11 +7067,7 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order)
+ static void __accept_page(struct zone *zone, unsigned long *flags,
+ 			  struct page *page)
+ {
+-	bool last;
+-
+ 	list_del(&page->lru);
+-	last = list_empty(&zone->unaccepted_pages);
+-
+ 	account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
+ 	__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
+ 	__ClearPageUnaccepted(page);
+@@ -7083,9 +7076,6 @@ static void __accept_page(struct zone *zone, unsigned long *flags,
+ 	accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
+ 
+ 	__free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
+-
+-	if (last)
+-		static_branch_dec(&zones_with_unaccepted_pages);
+ }
+ 
+ void accept_page(struct page *page)
+@@ -7122,19 +7112,11 @@ static bool try_to_accept_memory_one(struct zone *zone)
+ 	return true;
+ }
+ 
+-static inline bool has_unaccepted_memory(void)
+-{
+-	return static_branch_unlikely(&zones_with_unaccepted_pages);
+-}
+-
+ static bool cond_accept_memory(struct zone *zone, unsigned int order)
+ {
+ 	long to_accept, wmark;
+ 	bool ret = false;
+ 
+-	if (!has_unaccepted_memory())
+-		return false;
+-
+ 	if (list_empty(&zone->unaccepted_pages))
+ 		return false;
+ 
+@@ -7168,22 +7150,17 @@ static bool __free_unaccepted(struct page *page)
+ {
+ 	struct zone *zone = page_zone(page);
+ 	unsigned long flags;
+-	bool first = false;
+ 
+ 	if (!lazy_accept)
+ 		return false;
+ 
+ 	spin_lock_irqsave(&zone->lock, flags);
+-	first = list_empty(&zone->unaccepted_pages);
+ 	list_add_tail(&page->lru, &zone->unaccepted_pages);
+ 	account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
+ 	__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
+ 	__SetPageUnaccepted(page);
+ 	spin_unlock_irqrestore(&zone->lock, flags);
+ 
+-	if (first)
+-		static_branch_inc(&zones_with_unaccepted_pages);
+-
+ 	return true;
+ }
+ 
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 748b52ce856755..e06e3d2709610c 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -1059,8 +1059,13 @@ static int move_present_pte(struct mm_struct *mm,
+ 	src_folio->index = linear_page_index(dst_vma, dst_addr);
+ 
+ 	orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
+-	/* Follow mremap() behavior and treat the entry dirty after the move */
+-	orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
++	/* Set soft dirty bit so userspace can notice the pte was moved */
++#ifdef CONFIG_MEM_SOFT_DIRTY
++	orig_dst_pte = pte_mksoft_dirty(orig_dst_pte);
++#endif
++	if (pte_dirty(orig_src_pte))
++		orig_dst_pte = pte_mkdirty(orig_dst_pte);
++	orig_dst_pte = pte_mkwrite(orig_dst_pte, dst_vma);
+ 
+ 	set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
+ out:
+@@ -1094,6 +1099,9 @@ static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
+ 	}
+ 
+ 	orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
++#ifdef CONFIG_MEM_SOFT_DIRTY
++	orig_src_pte = pte_swp_mksoft_dirty(orig_src_pte);
++#endif
+ 	set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
+ 	double_pt_unlock(dst_ptl, src_ptl);
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index c019f69c593955..d4700f940e8a12 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7612,11 +7612,16 @@ static void add_device_complete(struct hci_dev *hdev, void *data, int err)
+ 	struct mgmt_cp_add_device *cp = cmd->param;
+ 
+ 	if (!err) {
++		struct hci_conn_params *params;
++
++		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
++						le_addr_type(cp->addr.type));
++
+ 		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ 			     cp->action);
+ 		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
+ 				     cp->addr.type, hdev->conn_flags,
+-				     PTR_UINT(cmd->user_data));
++				     params ? params->flags : 0);
+ 	}
+ 
+ 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
+@@ -7719,8 +7724,6 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
+ 		goto unlock;
+ 	}
+ 
+-	cmd->user_data = UINT_PTR(current_flags);
+-
+ 	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
+ 				 add_device_complete);
+ 	if (err < 0) {
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index ee1211a213d702..caedc939eea19e 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1344,10 +1344,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 	hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
+ 
+ 
+-	local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
+-				      sizeof(void *) * channels, GFP_KERNEL);
++	local->int_scan_req = kzalloc(struct_size(local->int_scan_req,
++						  channels, channels),
++				      GFP_KERNEL);
+ 	if (!local->int_scan_req)
+ 		return -ENOMEM;
++	local->int_scan_req->n_channels = channels;
+ 
+ 	eth_broadcast_addr(local->int_scan_req->bssid);
+ 
+diff --git a/net/mctp/device.c b/net/mctp/device.c
+index 85cc5f31f1e7c0..8d1386601bbe06 100644
+--- a/net/mctp/device.c
++++ b/net/mctp/device.c
+@@ -20,8 +20,7 @@
+ #include <net/sock.h>
+ 
+ struct mctp_dump_cb {
+-	int h;
+-	int idx;
++	unsigned long ifindex;
+ 	size_t a_idx;
+ };
+ 
+@@ -115,43 +114,36 @@ static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ 	struct mctp_dump_cb *mcb = (void *)cb->ctx;
+ 	struct net *net = sock_net(skb->sk);
+-	struct hlist_head *head;
+ 	struct net_device *dev;
+ 	struct ifaddrmsg *hdr;
+ 	struct mctp_dev *mdev;
+-	int ifindex;
+-	int idx = 0, rc;
+-
+-	hdr = nlmsg_data(cb->nlh);
+-	// filter by ifindex if requested
+-	ifindex = hdr->ifa_index;
++	int ifindex = 0, rc;
++
++	/* Filter by ifindex if a header is provided */
++	if (cb->nlh->nlmsg_len >= nlmsg_msg_size(sizeof(*hdr))) {
++		hdr = nlmsg_data(cb->nlh);
++		ifindex = hdr->ifa_index;
++	} else {
++		if (cb->strict_check) {
++			NL_SET_ERR_MSG(cb->extack, "mctp: Invalid header for addr dump request");
++			return -EINVAL;
++		}
++	}
+ 
+ 	rcu_read_lock();
+-	for (; mcb->h < NETDEV_HASHENTRIES; mcb->h++, mcb->idx = 0) {
+-		idx = 0;
+-		head = &net->dev_index_head[mcb->h];
+-		hlist_for_each_entry_rcu(dev, head, index_hlist) {
+-			if (idx >= mcb->idx &&
+-			    (ifindex == 0 || ifindex == dev->ifindex)) {
+-				mdev = __mctp_dev_get(dev);
+-				if (mdev) {
+-					rc = mctp_dump_dev_addrinfo(mdev,
+-								    skb, cb);
+-					mctp_dev_put(mdev);
+-					// Error indicates full buffer, this
+-					// callback will get retried.
+-					if (rc < 0)
+-						goto out;
+-				}
+-			}
+-			idx++;
+-			// reset for next iteration
+-			mcb->a_idx = 0;
+-		}
++	for_each_netdev_dump(net, dev, mcb->ifindex) {
++		if (ifindex && ifindex != dev->ifindex)
++			continue;
++		mdev = __mctp_dev_get(dev);
++		if (!mdev)
++			continue;
++		rc = mctp_dump_dev_addrinfo(mdev, skb, cb);
++		mctp_dev_put(mdev);
++		if (rc < 0)
++			break;
++		mcb->a_idx = 0;
+ 	}
+-out:
+ 	rcu_read_unlock();
+-	mcb->idx = idx;
+ 
+ 	return skb->len;
+ }
+@@ -525,9 +517,12 @@ static struct notifier_block mctp_dev_nb = {
+ };
+ 
+ static const struct rtnl_msg_handler mctp_device_rtnl_msg_handlers[] = {
+-	{THIS_MODULE, PF_MCTP, RTM_NEWADDR, mctp_rtm_newaddr, NULL, 0},
+-	{THIS_MODULE, PF_MCTP, RTM_DELADDR, mctp_rtm_deladdr, NULL, 0},
+-	{THIS_MODULE, PF_MCTP, RTM_GETADDR, NULL, mctp_dump_addrinfo, 0},
++	{.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_NEWADDR,
++	 .doit = mctp_rtm_newaddr},
++	{.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_DELADDR,
++	 .doit = mctp_rtm_deladdr},
++	{.owner = THIS_MODULE, .protocol = PF_MCTP, .msgtype = RTM_GETADDR,
++	 .dumpit = mctp_dump_addrinfo},
+ };
+ 
+ int __init mctp_device_init(void)
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 4c460160914f01..d9c8e5a5f9ce9a 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -313,8 +313,10 @@ static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev)
+ 
+ 	key = flow->key;
+ 
+-	if (WARN_ON(key->dev && key->dev != dev))
++	if (key->dev) {
++		WARN_ON(key->dev != dev);
+ 		return;
++	}
+ 
+ 	mctp_dev_set_key(dev, key);
+ }
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index e1f6e7618debd4..afd9805cb68e2c 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -143,7 +143,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	qlen = sch->q.qlen;
+ 	while (sch->q.qlen > sch->limit) {
+-		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
++		struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
+ 
+ 		dropped += qdisc_pkt_len(skb);
+ 		qdisc_qstats_backlog_dec(sch, skb);
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index afefe124d9039e..1af9768cd8ff6e 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -1113,7 +1113,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
+ 		sch_tree_lock(sch);
+ 	}
+ 	while (sch->q.qlen > sch->limit) {
+-		struct sk_buff *skb = fq_dequeue(sch);
++		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+ 
+ 		if (!skb)
+ 			break;
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 778f6e5966be80..551b7cbdae90c5 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -440,7 +440,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	while (sch->q.qlen > sch->limit ||
+ 	       q->memory_usage > q->memory_limit) {
+-		struct sk_buff *skb = fq_codel_dequeue(sch);
++		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+ 
+ 		q->cstats.drop_len += qdisc_pkt_len(skb);
+ 		rtnl_kfree_skbs(skb, skb);
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index c38f33ff80bde7..6ed08b705f8a52 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -364,7 +364,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	/* Drop excess packets if new limit is lower */
+ 	while (sch->q.qlen > sch->limit) {
+-		struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
++		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+ 
+ 		len_dropped += qdisc_pkt_len(skb);
+ 		num_dropped += 1;
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index 44d9efe1a96a89..5aa434b4670738 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -564,7 +564,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+ 	qlen = sch->q.qlen;
+ 	prev_backlog = sch->qstats.backlog;
+ 	while (sch->q.qlen > sch->limit) {
+-		struct sk_buff *skb = hhf_dequeue(sch);
++		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+ 
+ 		rtnl_kfree_skbs(skb, skb);
+ 	}
+diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
+index b3dcb845b32759..db61cbc21b1381 100644
+--- a/net/sched/sch_pie.c
++++ b/net/sched/sch_pie.c
+@@ -192,7 +192,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
+ 	/* Drop excess packets if new limit is lower */
+ 	qlen = sch->q.qlen;
+ 	while (sch->q.qlen > sch->limit) {
+-		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
++		struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
+ 
+ 		dropped += qdisc_pkt_len(skb);
+ 		qdisc_qstats_backlog_dec(sch, skb);
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index 77e33e1e340e31..65b0da6fdf6a79 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -396,7 +396,6 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
+ 		return 0;
+ 
+ 	shinfo = skb_shinfo(strp->anchor);
+-	shinfo->frag_list = NULL;
+ 
+ 	/* If we don't know the length go max plus page for cipher overhead */
+ 	need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
+@@ -412,6 +411,8 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
+ 				   page, 0, 0);
+ 	}
+ 
++	shinfo->frag_list = NULL;
++
+ 	strp->copy_mode = 1;
+ 	strp->stm.offset = 0;
+ 
+diff --git a/samples/ftrace/sample-trace-array.c b/samples/ftrace/sample-trace-array.c
+index d0ee9001c7b376..aaa8fa92e24d52 100644
+--- a/samples/ftrace/sample-trace-array.c
++++ b/samples/ftrace/sample-trace-array.c
+@@ -112,7 +112,7 @@ static int __init sample_trace_array_init(void)
+ 	/*
+ 	 * If context specific per-cpu buffers havent already been allocated.
+ 	 */
+-	trace_printk_init_buffers();
++	trace_array_init_printk(tr);
+ 
+ 	simple_tsk = kthread_run(simple_thread, NULL, "sample-instance");
+ 	if (IS_ERR(simple_tsk)) {
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index dc081cf46d211c..686197407c3c61 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -36,6 +36,18 @@ KBUILD_CFLAGS += -Wno-gnu
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow-non-kprintf)
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation-non-kprintf)
++
++# Clang may emit a warning when a const variable, such as the dummy variables
++# in typecheck(), or const member of an aggregate type are not initialized,
++# which can result in unexpected behavior. However, in many audited cases of
++# the "field" variant of the warning, this is intentional because the field is
++# never used within a particular call path, the field is within a union with
++# other non-const members, or the containing object is not const so the field
++# can be modified via memcpy() / memset(). While the variable warning also gets
++# disabled with this same switch, there should not be too much coverage lost
++# because -Wuninitialized will still flag when an uninitialized const variable
++# is used.
++KBUILD_CFLAGS += $(call cc-disable-warning, default-const-init-unsafe)
+ else
+ 
+ # gcc inanely warns about local variables called 'main'
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index b30faf731da720..b74de9c0969fcd 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -732,15 +732,21 @@ static int snd_seq_deliver_single_event(struct snd_seq_client *client,
+  */
+ static int __deliver_to_subscribers(struct snd_seq_client *client,
+ 				    struct snd_seq_event *event,
+-				    struct snd_seq_client_port *src_port,
+-				    int atomic, int hop)
++				    int port, int atomic, int hop)
+ {
++	struct snd_seq_client_port *src_port;
+ 	struct snd_seq_subscribers *subs;
+ 	int err, result = 0, num_ev = 0;
+ 	union __snd_seq_event event_saved;
+ 	size_t saved_size;
+ 	struct snd_seq_port_subs_info *grp;
+ 
++	if (port < 0)
++		return 0;
++	src_port = snd_seq_port_use_ptr(client, port);
++	if (!src_port)
++		return 0;
++
+ 	/* save original event record */
+ 	saved_size = snd_seq_event_packet_size(event);
+ 	memcpy(&event_saved, event, saved_size);
+@@ -775,6 +781,7 @@ static int __deliver_to_subscribers(struct snd_seq_client *client,
+ 		read_unlock(&grp->list_lock);
+ 	else
+ 		up_read(&grp->list_mutex);
++	snd_seq_port_unlock(src_port);
+ 	memcpy(event, &event_saved, saved_size);
+ 	return (result < 0) ? result : num_ev;
+ }
+@@ -783,25 +790,32 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
+ 				  struct snd_seq_event *event,
+ 				  int atomic, int hop)
+ {
+-	struct snd_seq_client_port *src_port;
+-	int ret = 0, ret2;
+-
+-	src_port = snd_seq_port_use_ptr(client, event->source.port);
+-	if (src_port) {
+-		ret = __deliver_to_subscribers(client, event, src_port, atomic, hop);
+-		snd_seq_port_unlock(src_port);
+-	}
+-
+-	if (client->ump_endpoint_port < 0 ||
+-	    event->source.port == client->ump_endpoint_port)
+-		return ret;
++	int ret;
++#if IS_ENABLED(CONFIG_SND_SEQ_UMP)
++	int ret2;
++#endif
+ 
+-	src_port = snd_seq_port_use_ptr(client, client->ump_endpoint_port);
+-	if (!src_port)
++	ret = __deliver_to_subscribers(client, event,
++				       event->source.port, atomic, hop);
++#if IS_ENABLED(CONFIG_SND_SEQ_UMP)
++	if (!snd_seq_client_is_ump(client) || client->ump_endpoint_port < 0)
+ 		return ret;
+-	ret2 = __deliver_to_subscribers(client, event, src_port, atomic, hop);
+-	snd_seq_port_unlock(src_port);
+-	return ret2 < 0 ? ret2 : ret;
++	/* If it's an event from EP port (and with a UMP group),
++	 * deliver to subscribers of the corresponding UMP group port, too.
++	 * Or, if it's from non-EP port, deliver to subscribers of EP port, too.
++	 */
++	if (event->source.port == client->ump_endpoint_port)
++		ret2 = __deliver_to_subscribers(client, event,
++						snd_seq_ump_group_port(event),
++						atomic, hop);
++	else
++		ret2 = __deliver_to_subscribers(client, event,
++						client->ump_endpoint_port,
++						atomic, hop);
++	if (ret2 < 0)
++		return ret2;
++#endif
++	return ret;
+ }
+ 
+ /* deliver an event to the destination port(s).
+diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
+index ff7e558b4d51d0..db2f169cae11ea 100644
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -1285,3 +1285,21 @@ int snd_seq_deliver_to_ump(struct snd_seq_client *source,
+ 	else
+ 		return cvt_to_ump_midi1(dest, dest_port, event, atomic, hop);
+ }
++
++/* return the UMP group-port number of the event;
++ * return -1 if groupless or non-UMP event
++ */
++int snd_seq_ump_group_port(const struct snd_seq_event *event)
++{
++	const struct snd_seq_ump_event *ump_ev =
++		(const struct snd_seq_ump_event *)event;
++	unsigned char type;
++
++	if (!snd_seq_ev_is_ump(event))
++		return -1;
++	type = ump_message_type(ump_ev->ump[0]);
++	if (ump_is_groupless_msg(type))
++		return -1;
++	/* group-port number starts from 1 */
++	return ump_message_group(ump_ev->ump[0]) + 1;
++}
+diff --git a/sound/core/seq/seq_ump_convert.h b/sound/core/seq/seq_ump_convert.h
+index 6c146d8032804f..4abf0a7637d701 100644
+--- a/sound/core/seq/seq_ump_convert.h
++++ b/sound/core/seq/seq_ump_convert.h
+@@ -18,5 +18,6 @@ int snd_seq_deliver_to_ump(struct snd_seq_client *source,
+ 			   struct snd_seq_client_port *dest_port,
+ 			   struct snd_seq_event *event,
+ 			   int atomic, int hop);
++int snd_seq_ump_group_port(const struct snd_seq_event *event);
+ 
+ #endif /* __SEQ_UMP_CONVERT_H */
+diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
+index c6c018b40c69f9..4e0693f0ab0f89 100644
+--- a/sound/pci/es1968.c
++++ b/sound/pci/es1968.c
+@@ -1561,7 +1561,7 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream)
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct es1968 *chip = snd_pcm_substream_chip(substream);
+ 	struct esschan *es;
+-	int apu1, apu2;
++	int err, apu1, apu2;
+ 
+ 	apu1 = snd_es1968_alloc_apu_pair(chip, ESM_APU_PCM_CAPTURE);
+ 	if (apu1 < 0)
+@@ -1605,7 +1605,9 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream)
+ 	runtime->hw = snd_es1968_capture;
+ 	runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max =
+ 		calc_available_memory_size(chip) - 1024; /* keep MIXBUF size */
+-	snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES);
++	err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES);
++	if (err < 0)
++		return err;
+ 
+ 	spin_lock_irq(&chip->substream_lock);
+ 	list_add(&es->list, &chip->substream_list);
+diff --git a/sound/sh/Kconfig b/sound/sh/Kconfig
+index b75fbb3236a7b9..f5fa09d740b4c9 100644
+--- a/sound/sh/Kconfig
++++ b/sound/sh/Kconfig
+@@ -14,7 +14,7 @@ if SND_SUPERH
+ 
+ config SND_AICA
+ 	tristate "Dreamcast Yamaha AICA sound"
+-	depends on SH_DREAMCAST
++	depends on SH_DREAMCAST && SH_DMA_API
+ 	select SND_PCM
+ 	select G2_DMA
+ 	help
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 09210fb4ac60c1..c7387081577cd3 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2240,6 +2240,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x0c45, 0x636b, /* Microdia JP001 USB Camera */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+@@ -2248,6 +2250,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */
+ 		   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
++	DEVICE_FLG(0x1101, 0x0003, /* Audioengine D1 */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16),
+ 	DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
+diff --git a/tools/net/ynl/ethtool.py b/tools/net/ynl/ethtool.py
+index 63c471f075abf8..7e8342f9148164 100755
+--- a/tools/net/ynl/ethtool.py
++++ b/tools/net/ynl/ethtool.py
+@@ -337,16 +337,24 @@ def main():
+         print('Capabilities:')
+         [print(f'\t{v}') for v in bits_to_dict(tsinfo['timestamping'])]
+ 
+-        print(f'PTP Hardware Clock: {tsinfo["phc-index"]}')
++        print(f'PTP Hardware Clock: {tsinfo.get("phc-index", "none")}')
+ 
+-        print('Hardware Transmit Timestamp Modes:')
+-        [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
++        if 'tx-types' in tsinfo:
++            print('Hardware Transmit Timestamp Modes:')
++            [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
++        else:
++            print('Hardware Transmit Timestamp Modes: none')
++
++        if 'rx-filters' in tsinfo:
++            print('Hardware Receive Filter Modes:')
++            [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
++        else:
++            print('Hardware Receive Filter Modes: none')
+ 
+-        print('Hardware Receive Filter Modes:')
+-        [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
++        if 'stats' in tsinfo and tsinfo['stats']:
++            print('Statistics:')
++            [print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()]
+ 
+-        print('Statistics:')
+-        [print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()]
+         return
+ 
+     print(f'Settings for {args.device}:')
+diff --git a/tools/testing/selftests/net/ncdevmem.c b/tools/testing/selftests/net/ncdevmem.c
+index 64d6805381c50c..8617e6d7698de9 100644
+--- a/tools/testing/selftests/net/ncdevmem.c
++++ b/tools/testing/selftests/net/ncdevmem.c
+@@ -62,7 +62,7 @@
+  */
+ 
+ static char *server_ip = "192.168.1.4";
+-static char *client_ip = "192.168.1.2";
++static char *client_ip;
+ static char *port = "5201";
+ static size_t do_validation;
+ static int start_queue = 8;
+@@ -71,24 +71,107 @@ static char *ifname = "eth1";
+ static unsigned int ifindex;
+ static unsigned int dmabuf_id;
+ 
+-void print_bytes(void *ptr, size_t size)
++struct memory_buffer {
++	int fd;
++	size_t size;
++
++	int devfd;
++	int memfd;
++	char *buf_mem;
++};
++
++struct memory_provider {
++	struct memory_buffer *(*alloc)(size_t size);
++	void (*free)(struct memory_buffer *ctx);
++	void (*memcpy_from_device)(void *dst, struct memory_buffer *src,
++				   size_t off, int n);
++};
++
++static struct memory_buffer *udmabuf_alloc(size_t size)
+ {
+-	unsigned char *p = ptr;
+-	int i;
++	struct udmabuf_create create;
++	struct memory_buffer *ctx;
++	int ret;
+ 
+-	for (i = 0; i < size; i++)
+-		printf("%02hhX ", p[i]);
+-	printf("\n");
++	ctx = malloc(sizeof(*ctx));
++	if (!ctx)
++		error(1, ENOMEM, "malloc failed");
++
++	ctx->size = size;
++
++	ctx->devfd = open("/dev/udmabuf", O_RDWR);
++	if (ctx->devfd < 0)
++		error(1, errno,
++		      "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
++		      TEST_PREFIX);
++
++	ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
++	if (ctx->memfd < 0)
++		error(1, errno, "%s: [skip,no-memfd]\n", TEST_PREFIX);
++
++	ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
++	if (ret < 0)
++		error(1, errno, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
++
++	ret = ftruncate(ctx->memfd, size);
++	if (ret == -1)
++		error(1, errno, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
++
++	memset(&create, 0, sizeof(create));
++
++	create.memfd = ctx->memfd;
++	create.offset = 0;
++	create.size = size;
++	ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create);
++	if (ctx->fd < 0)
++		error(1, errno, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
++
++	ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
++			    ctx->fd, 0);
++	if (ctx->buf_mem == MAP_FAILED)
++		error(1, errno, "%s: [FAIL, map udmabuf]\n", TEST_PREFIX);
++
++	return ctx;
++}
++
++static void udmabuf_free(struct memory_buffer *ctx)
++{
++	munmap(ctx->buf_mem, ctx->size);
++	close(ctx->fd);
++	close(ctx->memfd);
++	close(ctx->devfd);
++	free(ctx);
+ }
+ 
+-void print_nonzero_bytes(void *ptr, size_t size)
++static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src,
++				       size_t off, int n)
++{
++	struct dma_buf_sync sync = {};
++
++	sync.flags = DMA_BUF_SYNC_START;
++	ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
++
++	memcpy(dst, src->buf_mem + off, n);
++
++	sync.flags = DMA_BUF_SYNC_END;
++	ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
++}
++
++static struct memory_provider udmabuf_memory_provider = {
++	.alloc = udmabuf_alloc,
++	.free = udmabuf_free,
++	.memcpy_from_device = udmabuf_memcpy_from_device,
++};
++
++static struct memory_provider *provider = &udmabuf_memory_provider;
++
++static void print_nonzero_bytes(void *ptr, size_t size)
+ {
+ 	unsigned char *p = ptr;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < size; i++)
+ 		putchar(p[i]);
+-	printf("\n");
+ }
+ 
+ void validate_buffer(void *line, size_t size)
+@@ -120,7 +203,7 @@ void validate_buffer(void *line, size_t size)
+ 		char command[256];                                      \
+ 		memset(command, 0, sizeof(command));                    \
+ 		snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \
+-		printf("Running: %s\n", command);                       \
++		fprintf(stderr, "Running: %s\n", command);                       \
+ 		system(command);                                        \
+ 	})
+ 
+@@ -128,22 +211,22 @@ static int reset_flow_steering(void)
+ {
+ 	int ret = 0;
+ 
+-	ret = run_command("sudo ethtool -K %s ntuple off", ifname);
++	ret = run_command("sudo ethtool -K %s ntuple off >&2", ifname);
+ 	if (ret)
+ 		return ret;
+ 
+-	return run_command("sudo ethtool -K %s ntuple on", ifname);
++	return run_command("sudo ethtool -K %s ntuple on >&2", ifname);
+ }
+ 
+ static int configure_headersplit(bool on)
+ {
+-	return run_command("sudo ethtool -G %s tcp-data-split %s", ifname,
++	return run_command("sudo ethtool -G %s tcp-data-split %s >&2", ifname,
+ 			   on ? "on" : "off");
+ }
+ 
+ static int configure_rss(void)
+ {
+-	return run_command("sudo ethtool -X %s equal %d", ifname, start_queue);
++	return run_command("sudo ethtool -X %s equal %d >&2", ifname, start_queue);
+ }
+ 
+ static int configure_channels(unsigned int rx, unsigned int tx)
+@@ -151,10 +234,29 @@ static int configure_channels(unsigned int rx, unsigned int tx)
+ 	return run_command("sudo ethtool -L %s rx %u tx %u", ifname, rx, tx);
+ }
+ 
+-static int configure_flow_steering(void)
++static int configure_flow_steering(struct sockaddr_in6 *server_sin)
+ {
+-	return run_command("sudo ethtool -N %s flow-type tcp4 src-ip %s dst-ip %s src-port %s dst-port %s queue %d",
+-			   ifname, client_ip, server_ip, port, port, start_queue);
++	const char *type = "tcp6";
++	const char *server_addr;
++	char buf[40];
++
++	inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf));
++	server_addr = buf;
++
++	if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) {
++		type = "tcp4";
++		server_addr = strrchr(server_addr, ':') + 1;
++	}
++
++	return run_command("sudo ethtool -N %s flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d >&2",
++			   ifname,
++			   type,
++			   client_ip ? "src-ip" : "",
++			   client_ip ?: "",
++			   server_addr,
++			   client_ip ? "src-port" : "",
++			   client_ip ? port : "",
++			   port, start_queue);
+ }
+ 
+ static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
+@@ -187,7 +289,7 @@ static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
+ 		goto err_close;
+ 	}
+ 
+-	printf("got dmabuf id=%d\n", rsp->id);
++	fprintf(stderr, "got dmabuf id=%d\n", rsp->id);
+ 	dmabuf_id = rsp->id;
+ 
+ 	netdev_bind_rx_req_free(req);
+@@ -202,66 +304,82 @@ static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
+ 	return -1;
+ }
+ 
+-static void create_udmabuf(int *devfd, int *memfd, int *buf, size_t dmabuf_size)
++static void enable_reuseaddr(int fd)
+ {
+-	struct udmabuf_create create;
++	int opt = 1;
+ 	int ret;
+ 
+-	*devfd = open("/dev/udmabuf", O_RDWR);
+-	if (*devfd < 0) {
+-		error(70, 0,
+-		      "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
+-		      TEST_PREFIX);
+-	}
++	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
++	if (ret)
++		error(1, errno, "%s: [FAIL, SO_REUSEPORT]\n", TEST_PREFIX);
+ 
+-	*memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
+-	if (*memfd < 0)
+-		error(70, 0, "%s: [skip,no-memfd]\n", TEST_PREFIX);
++	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
++	if (ret)
++		error(1, errno, "%s: [FAIL, SO_REUSEADDR]\n", TEST_PREFIX);
++}
+ 
+-	/* Required for udmabuf */
+-	ret = fcntl(*memfd, F_ADD_SEALS, F_SEAL_SHRINK);
+-	if (ret < 0)
+-		error(73, 0, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
++static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
++{
++	int ret;
+ 
+-	ret = ftruncate(*memfd, dmabuf_size);
+-	if (ret == -1)
+-		error(74, 0, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
++	sin6->sin6_family = AF_INET6;
++	sin6->sin6_port = htons(port);
++
++	ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr);
++	if (ret != 1) {
++		/* fallback to plain IPv4 */
++		ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]);
++		if (ret != 1)
++			return -1;
++
++		/* add ::ffff prefix */
++		sin6->sin6_addr.s6_addr32[0] = 0;
++		sin6->sin6_addr.s6_addr32[1] = 0;
++		sin6->sin6_addr.s6_addr16[4] = 0;
++		sin6->sin6_addr.s6_addr16[5] = 0xffff;
++	}
+ 
+-	memset(&create, 0, sizeof(create));
++	return 0;
++}
+ 
+-	create.memfd = *memfd;
+-	create.offset = 0;
+-	create.size = dmabuf_size;
+-	*buf = ioctl(*devfd, UDMABUF_CREATE, &create);
+-	if (*buf < 0)
+-		error(75, 0, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
++static struct netdev_queue_id *create_queues(void)
++{
++	struct netdev_queue_id *queues;
++	size_t i = 0;
++
++	queues = calloc(num_queues, sizeof(*queues));
++	for (i = 0; i < num_queues; i++) {
++		queues[i]._present.type = 1;
++		queues[i]._present.id = 1;
++		queues[i].type = NETDEV_QUEUE_TYPE_RX;
++		queues[i].id = start_queue + i;
++	}
++
++	return queues;
+ }
+ 
+-int do_server(void)
++int do_server(struct memory_buffer *mem)
+ {
+ 	char ctrl_data[sizeof(int) * 20000];
+ 	struct netdev_queue_id *queues;
+ 	size_t non_page_aligned_frags = 0;
+-	struct sockaddr_in client_addr;
+-	struct sockaddr_in server_sin;
++	struct sockaddr_in6 client_addr;
++	struct sockaddr_in6 server_sin;
+ 	size_t page_aligned_frags = 0;
+-	int devfd, memfd, buf, ret;
+ 	size_t total_received = 0;
+ 	socklen_t client_addr_len;
+ 	bool is_devmem = false;
+-	char *buf_mem = NULL;
++	char *tmp_mem = NULL;
+ 	struct ynl_sock *ys;
+-	size_t dmabuf_size;
+ 	char iobuf[819200];
+ 	char buffer[256];
+ 	int socket_fd;
+ 	int client_fd;
+-	size_t i = 0;
+-	int opt = 1;
+-
+-	dmabuf_size = getpagesize() * NUM_PAGES;
++	int ret;
+ 
+-	create_udmabuf(&devfd, &memfd, &buf, dmabuf_size);
++	ret = parse_address(server_ip, atoi(port), &server_sin);
++	if (ret < 0)
++		error(1, 0, "parse server address");
+ 
+ 	if (reset_flow_steering())
+ 		error(1, 0, "Failed to reset flow steering\n");
+@@ -271,92 +389,65 @@ int do_server(void)
+ 		error(1, 0, "Failed to configure rss\n");
+ 
+ 	/* Flow steer our devmem flows to start_queue */
+-	if (configure_flow_steering())
++	if (configure_flow_steering(&server_sin))
+ 		error(1, 0, "Failed to configure flow steering\n");
+ 
+ 	sleep(1);
+ 
+-	queues = malloc(sizeof(*queues) * num_queues);
+-
+-	for (i = 0; i < num_queues; i++) {
+-		queues[i]._present.type = 1;
+-		queues[i]._present.id = 1;
+-		queues[i].type = NETDEV_QUEUE_TYPE_RX;
+-		queues[i].id = start_queue + i;
+-	}
+-
+-	if (bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
++	if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
+ 		error(1, 0, "Failed to bind\n");
+ 
+-	buf_mem = mmap(NULL, dmabuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+-		       buf, 0);
+-	if (buf_mem == MAP_FAILED)
+-		error(1, 0, "mmap()");
+-
+-	server_sin.sin_family = AF_INET;
+-	server_sin.sin_port = htons(atoi(port));
++	tmp_mem = malloc(mem->size);
++	if (!tmp_mem)
++		error(1, ENOMEM, "malloc failed");
+ 
+-	ret = inet_pton(server_sin.sin_family, server_ip, &server_sin.sin_addr);
+-	if (socket < 0)
+-		error(79, 0, "%s: [FAIL, create socket]\n", TEST_PREFIX);
++	socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
++	if (socket_fd < 0)
++		error(1, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
+ 
+-	socket_fd = socket(server_sin.sin_family, SOCK_STREAM, 0);
+-	if (socket < 0)
+-		error(errno, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
++	enable_reuseaddr(socket_fd);
+ 
+-	ret = setsockopt(socket_fd, SOL_SOCKET, SO_REUSEPORT, &opt,
+-			 sizeof(opt));
+-	if (ret)
+-		error(errno, errno, "%s: [FAIL, set sock opt]\n", TEST_PREFIX);
+-
+-	ret = setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, &opt,
+-			 sizeof(opt));
+-	if (ret)
+-		error(errno, errno, "%s: [FAIL, set sock opt]\n", TEST_PREFIX);
+-
+-	printf("binding to address %s:%d\n", server_ip,
+-	       ntohs(server_sin.sin_port));
++	fprintf(stderr, "binding to address %s:%d\n", server_ip,
++		ntohs(server_sin.sin6_port));
+ 
+ 	ret = bind(socket_fd, &server_sin, sizeof(server_sin));
+ 	if (ret)
+-		error(errno, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
++		error(1, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
+ 
+ 	ret = listen(socket_fd, 1);
+ 	if (ret)
+-		error(errno, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
++		error(1, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
+ 
+ 	client_addr_len = sizeof(client_addr);
+ 
+-	inet_ntop(server_sin.sin_family, &server_sin.sin_addr, buffer,
++	inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer,
+ 		  sizeof(buffer));
+-	printf("Waiting or connection on %s:%d\n", buffer,
+-	       ntohs(server_sin.sin_port));
++	fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
++		ntohs(server_sin.sin6_port));
+ 	client_fd = accept(socket_fd, &client_addr, &client_addr_len);
+ 
+-	inet_ntop(client_addr.sin_family, &client_addr.sin_addr, buffer,
++	inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer,
+ 		  sizeof(buffer));
+-	printf("Got connection from %s:%d\n", buffer,
+-	       ntohs(client_addr.sin_port));
++	fprintf(stderr, "Got connection from %s:%d\n", buffer,
++		ntohs(client_addr.sin6_port));
+ 
+ 	while (1) {
+ 		struct iovec iov = { .iov_base = iobuf,
+ 				     .iov_len = sizeof(iobuf) };
+ 		struct dmabuf_cmsg *dmabuf_cmsg = NULL;
+-		struct dma_buf_sync sync = { 0 };
+ 		struct cmsghdr *cm = NULL;
+ 		struct msghdr msg = { 0 };
+ 		struct dmabuf_token token;
+ 		ssize_t ret;
+ 
+ 		is_devmem = false;
+-		printf("\n\n");
+ 
+ 		msg.msg_iov = &iov;
+ 		msg.msg_iovlen = 1;
+ 		msg.msg_control = ctrl_data;
+ 		msg.msg_controllen = sizeof(ctrl_data);
+ 		ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM);
+-		printf("recvmsg ret=%ld\n", ret);
++		fprintf(stderr, "recvmsg ret=%ld\n", ret);
+ 		if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
+ 			continue;
+ 		if (ret < 0) {
+@@ -364,16 +455,15 @@ int do_server(void)
+ 			continue;
+ 		}
+ 		if (ret == 0) {
+-			printf("client exited\n");
++			fprintf(stderr, "client exited\n");
+ 			goto cleanup;
+ 		}
+ 
+-		i++;
+ 		for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
+ 			if (cm->cmsg_level != SOL_SOCKET ||
+ 			    (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
+ 			     cm->cmsg_type != SCM_DEVMEM_LINEAR)) {
+-				fprintf(stdout, "skipping non-devmem cmsg\n");
++				fprintf(stderr, "skipping non-devmem cmsg\n");
+ 				continue;
+ 			}
+ 
+@@ -384,7 +474,7 @@ int do_server(void)
+ 				/* TODO: process data copied from skb's linear
+ 				 * buffer.
+ 				 */
+-				fprintf(stdout,
++				fprintf(stderr,
+ 					"SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n",
+ 					dmabuf_cmsg->frag_size);
+ 
+@@ -395,12 +485,13 @@ int do_server(void)
+ 			token.token_count = 1;
+ 
+ 			total_received += dmabuf_cmsg->frag_size;
+-			printf("received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
+-			       dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
+-			       dmabuf_cmsg->frag_offset % getpagesize(),
+-			       dmabuf_cmsg->frag_offset, dmabuf_cmsg->frag_size,
+-			       dmabuf_cmsg->frag_token, total_received,
+-			       dmabuf_cmsg->dmabuf_id);
++			fprintf(stderr,
++				"received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
++				dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
++				dmabuf_cmsg->frag_offset % getpagesize(),
++				dmabuf_cmsg->frag_offset,
++				dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token,
++				total_received, dmabuf_cmsg->dmabuf_id);
+ 
+ 			if (dmabuf_cmsg->dmabuf_id != dmabuf_id)
+ 				error(1, 0,
+@@ -411,22 +502,16 @@ int do_server(void)
+ 			else
+ 				page_aligned_frags++;
+ 
+-			sync.flags = DMA_BUF_SYNC_READ | DMA_BUF_SYNC_START;
+-			ioctl(buf, DMA_BUF_IOCTL_SYNC, &sync);
++			provider->memcpy_from_device(tmp_mem, mem,
++						     dmabuf_cmsg->frag_offset,
++						     dmabuf_cmsg->frag_size);
+ 
+ 			if (do_validation)
+-				validate_buffer(
+-					((unsigned char *)buf_mem) +
+-						dmabuf_cmsg->frag_offset,
+-					dmabuf_cmsg->frag_size);
++				validate_buffer(tmp_mem,
++						dmabuf_cmsg->frag_size);
+ 			else
+-				print_nonzero_bytes(
+-					((unsigned char *)buf_mem) +
+-						dmabuf_cmsg->frag_offset,
+-					dmabuf_cmsg->frag_size);
+-
+-			sync.flags = DMA_BUF_SYNC_READ | DMA_BUF_SYNC_END;
+-			ioctl(buf, DMA_BUF_IOCTL_SYNC, &sync);
++				print_nonzero_bytes(tmp_mem,
++						    dmabuf_cmsg->frag_size);
+ 
+ 			ret = setsockopt(client_fd, SOL_SOCKET,
+ 					 SO_DEVMEM_DONTNEED, &token,
+@@ -438,25 +523,22 @@ int do_server(void)
+ 		if (!is_devmem)
+ 			error(1, 0, "flow steering error\n");
+ 
+-		printf("total_received=%lu\n", total_received);
++		fprintf(stderr, "total_received=%lu\n", total_received);
+ 	}
+ 
+-	fprintf(stdout, "%s: ok\n", TEST_PREFIX);
++	fprintf(stderr, "%s: ok\n", TEST_PREFIX);
+ 
+-	fprintf(stdout, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
++	fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
+ 		page_aligned_frags, non_page_aligned_frags);
+ 
+-	fprintf(stdout, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
++	fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
+ 		page_aligned_frags, non_page_aligned_frags);
+ 
+ cleanup:
+ 
+-	munmap(buf_mem, dmabuf_size);
++	free(tmp_mem);
+ 	close(client_fd);
+ 	close(socket_fd);
+-	close(buf);
+-	close(memfd);
+-	close(devfd);
+ 	ynl_sock_destroy(ys);
+ 
+ 	return 0;
+@@ -464,52 +546,33 @@ int do_server(void)
+ 
+ void run_devmem_tests(void)
+ {
+-	struct netdev_queue_id *queues;
+-	int devfd, memfd, buf;
++	struct memory_buffer *mem;
+ 	struct ynl_sock *ys;
+-	size_t dmabuf_size;
+-	size_t i = 0;
+ 
+-	dmabuf_size = getpagesize() * NUM_PAGES;
+-
+-	create_udmabuf(&devfd, &memfd, &buf, dmabuf_size);
++	mem = provider->alloc(getpagesize() * NUM_PAGES);
+ 
+ 	/* Configure RSS to divert all traffic from our devmem queues */
+ 	if (configure_rss())
+ 		error(1, 0, "rss error\n");
+ 
+-	queues = calloc(num_queues, sizeof(*queues));
+-
+ 	if (configure_headersplit(1))
+ 		error(1, 0, "Failed to configure header split\n");
+ 
+-	if (!bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
++	if (!bind_rx_queue(ifindex, mem->fd,
++			   calloc(num_queues, sizeof(struct netdev_queue_id)),
++			   num_queues, &ys))
+ 		error(1, 0, "Binding empty queues array should have failed\n");
+ 
+-	for (i = 0; i < num_queues; i++) {
+-		queues[i]._present.type = 1;
+-		queues[i]._present.id = 1;
+-		queues[i].type = NETDEV_QUEUE_TYPE_RX;
+-		queues[i].id = start_queue + i;
+-	}
+-
+ 	if (configure_headersplit(0))
+ 		error(1, 0, "Failed to configure header split\n");
+ 
+-	if (!bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
++	if (!bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
+ 		error(1, 0, "Configure dmabuf with header split off should have failed\n");
+ 
+ 	if (configure_headersplit(1))
+ 		error(1, 0, "Failed to configure header split\n");
+ 
+-	for (i = 0; i < num_queues; i++) {
+-		queues[i]._present.type = 1;
+-		queues[i]._present.id = 1;
+-		queues[i].type = NETDEV_QUEUE_TYPE_RX;
+-		queues[i].id = start_queue + i;
+-	}
+-
+-	if (bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
++	if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
+ 		error(1, 0, "Failed to bind\n");
+ 
+ 	/* Deactivating a bound queue should not be legal */
+@@ -518,11 +581,15 @@ void run_devmem_tests(void)
+ 
+ 	/* Closing the netlink socket does an implicit unbind */
+ 	ynl_sock_destroy(ys);
++
++	provider->free(mem);
+ }
+ 
+ int main(int argc, char *argv[])
+ {
++	struct memory_buffer *mem;
+ 	int is_server = 0, opt;
++	int ret;
+ 
+ 	while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:")) != -1) {
+ 		switch (opt) {
+@@ -551,7 +618,7 @@ int main(int argc, char *argv[])
+ 			ifname = optarg;
+ 			break;
+ 		case '?':
+-			printf("unknown option: %c\n", optopt);
++			fprintf(stderr, "unknown option: %c\n", optopt);
+ 			break;
+ 		}
+ 	}
+@@ -559,12 +626,13 @@ int main(int argc, char *argv[])
+ 	ifindex = if_nametoindex(ifname);
+ 
+ 	for (; optind < argc; optind++)
+-		printf("extra arguments: %s\n", argv[optind]);
++		fprintf(stderr, "extra arguments: %s\n", argv[optind]);
+ 
+ 	run_devmem_tests();
+ 
+-	if (is_server)
+-		return do_server();
++	mem = provider->alloc(getpagesize() * NUM_PAGES);
++	ret = is_server ? do_server(mem) : 1;
++	provider->free(mem);
+ 
+-	return 0;
++	return ret;
+ }
+diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
+index 0b7f5bf546da56..0c22ff7a8de2a2 100644
+--- a/tools/testing/vsock/vsock_test.c
++++ b/tools/testing/vsock/vsock_test.c
+@@ -1283,21 +1283,25 @@ static void test_unsent_bytes_client(const struct test_opts *opts, int type)
+ 	send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
+ 	control_expectln("RECEIVED");
+ 
+-	ret = ioctl(fd, SIOCOUTQ, &sock_bytes_unsent);
+-	if (ret < 0) {
+-		if (errno == EOPNOTSUPP) {
+-			fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
+-		} else {
++	/* SIOCOUTQ isn't guaranteed to instantly track sent data. Even though
++	 * the "RECEIVED" message means that the other side has received the
++	 * data, there can be a delay in our kernel before updating the "unsent
++	 * bytes" counter. Repeat SIOCOUTQ until it returns 0.
++	 */
++	timeout_begin(TIMEOUT);
++	do {
++		ret = ioctl(fd, SIOCOUTQ, &sock_bytes_unsent);
++		if (ret < 0) {
++			if (errno == EOPNOTSUPP) {
++				fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
++				break;
++			}
+ 			perror("ioctl");
+ 			exit(EXIT_FAILURE);
+ 		}
+-	} else if (ret == 0 && sock_bytes_unsent != 0) {
+-		fprintf(stderr,
+-			"Unexpected 'SIOCOUTQ' value, expected 0, got %i\n",
+-			sock_bytes_unsent);
+-		exit(EXIT_FAILURE);
+-	}
+-
++		timeout_check("SIOCOUTQ");
++	} while (sock_bytes_unsent != 0);
++	timeout_end();
+ 	close(fd);
+ }
+ 
+diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
+index 8f079a61a56db7..bb062d3d245723 100644
+--- a/virt/kvm/guest_memfd.c
++++ b/virt/kvm/guest_memfd.c
+@@ -118,6 +118,8 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
+ 			.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
+ 			.slot = slot,
+ 			.may_block = true,
++			/* guest memfd is relevant to only private mappings. */
++			.attr_filter = KVM_FILTER_PRIVATE,
+ 		};
+ 
+ 		if (!found_memslot) {
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 279e03029ce149..b99de3b5ffbc03 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -632,6 +632,11 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
+ 			 */
+ 			gfn_range.arg = range->arg;
+ 			gfn_range.may_block = range->may_block;
++			/*
++			 * HVA-based notifications aren't relevant to private
++			 * mappings as they don't have a userspace mapping.
++			 */
++			gfn_range.attr_filter = KVM_FILTER_SHARED;
+ 
+ 			/*
+ 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
+@@ -2454,6 +2459,14 @@ static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
+ 	gfn_range.arg = range->arg;
+ 	gfn_range.may_block = range->may_block;
+ 
++	/*
++	 * If/when KVM supports more attributes beyond private .vs shared, this
++	 * _could_ set KVM_FILTER_{SHARED,PRIVATE} appropriately if the entire target
++	 * range already has the desired private vs. shared state (it's unclear
++	 * if that is a net win).  For now, KVM reaches this point if and only
++	 * if the private flag is being toggled, i.e. all mappings are in play.
++	 */
++
+ 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
+ 		slots = __kvm_memslots(kvm, i);
+ 
+@@ -2510,6 +2523,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ 	struct kvm_mmu_notifier_range pre_set_range = {
+ 		.start = start,
+ 		.end = end,
++		.arg.attributes = attributes,
+ 		.handler = kvm_pre_set_memory_attributes,
+ 		.on_lock = kvm_mmu_invalidate_begin,
+ 		.flush_on_ret = true,


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-05-18 14:32 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-05-18 14:32 UTC (permalink / raw
  To: gentoo-commits

commit:     fd40757e709d46d2603e8a780bd43615f1fdab86
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun May 18 14:32:45 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun May 18 14:32:45 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fd40757e

Linux patch 6.12.29

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1028_linux-6.12.29.patch | 9018 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9022 insertions(+)

diff --git a/0000_README b/0000_README
index f98bc820..a161cb90 100644
--- a/0000_README
+++ b/0000_README
@@ -155,6 +155,10 @@ Patch:  1027_linux-6.12.28.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.28
 
+Patch:  1028_linux-6.12.29.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.29
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1028_linux-6.12.29.patch b/1028_linux-6.12.29.patch
new file mode 100644
index 00000000..f480b0fd
--- /dev/null
+++ b/1028_linux-6.12.29.patch
@@ -0,0 +1,9018 @@
+diff --git a/.clippy.toml b/.clippy.toml
+index e4c4eef10b28c1..5d99a317f7d6fc 100644
+--- a/.clippy.toml
++++ b/.clippy.toml
+@@ -5,5 +5,5 @@ check-private-items = true
+ disallowed-macros = [
+     # The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate
+     # it here, see: https://github.com/rust-lang/rust-clippy/issues/11303.
+-    { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" },
++    { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool", allow-invalid = true },
+ ]
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index 206079d3bd5b12..6a1acabb29d85f 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -511,6 +511,7 @@ Description:	information about CPUs heterogeneity.
+ 
+ What:		/sys/devices/system/cpu/vulnerabilities
+ 		/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
++		/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
+ 		/sys/devices/system/cpu/vulnerabilities/itlb_multihit
+ 		/sys/devices/system/cpu/vulnerabilities/l1tf
+ 		/sys/devices/system/cpu/vulnerabilities/mds
+diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
+index ff0b440ef2dc90..d2caa390395e5b 100644
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -22,3 +22,4 @@ are configurable at compile, boot or run time.
+    srso
+    gather_data_sampling
+    reg-file-data-sampling
++   indirect-target-selection
+diff --git a/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
+new file mode 100644
+index 00000000000000..d9ca64108d2332
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
+@@ -0,0 +1,168 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++Indirect Target Selection (ITS)
++===============================
++
++ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
++released before Alder Lake. ITS may allow an attacker to control the prediction
++of indirect branches and RETs located in the lower half of a cacheline.
++
++ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
++
++Scope of Impact
++---------------
++- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
++  predicted with unintended target corresponding to a branch in the guest.
++
++- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
++  gadgets.
++
++- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
++  branches may still be predicted with targets corresponding to direct branches
++  executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
++  should be available via distro updates. Alternatively microcode can be
++  obtained from Intel's github repository [#f1]_.
++
++Affected CPUs
++-------------
++Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
++
++   ========================  ============  ====================  ===============
++   Common name               Family_Model  eIBRS                 Intra-mode BTI
++                                           Guest/Host Isolation
++   ========================  ============  ====================  ===============
++   SKYLAKE_X (step >= 6)     06_55H        Affected              Affected
++   ICELAKE_X                 06_6AH        Not affected          Affected
++   ICELAKE_D                 06_6CH        Not affected          Affected
++   ICELAKE_L                 06_7EH        Not affected          Affected
++   TIGERLAKE_L               06_8CH        Not affected          Affected
++   TIGERLAKE                 06_8DH        Not affected          Affected
++   KABYLAKE_L (step >= 12)   06_8EH        Affected              Affected
++   KABYLAKE (step >= 13)     06_9EH        Affected              Affected
++   COMETLAKE                 06_A5H        Affected              Affected
++   COMETLAKE_L               06_A6H        Affected              Affected
++   ROCKETLAKE                06_A7H        Not affected          Affected
++   ========================  ============  ====================  ===============
++
++- All affected CPUs enumerate Enhanced IBRS feature.
++- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
++  update for mitigation.
++- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
++  Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
++  host's affected status.
++- Intel Atom CPUs are not affected by ITS.
++
++Mitigation
++----------
++As only the indirect branches and RETs that have their last byte of instruction
++in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
++the mitigation is to not allow indirect branches in the lower half.
++
++This is achieved by relying on existing retpoline support in the kernel, and in
++compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
++added ITS-safe thunks. These safe thunks consists of indirect branch in the
++second half of the cacheline. Not all retpoline sites are patched to thunks, if
++a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
++indirect branch.
++
++Dynamic thunks
++~~~~~~~~~~~~~~
++From a dynamically allocated pool of safe-thunks, each vulnerable site is
++replaced with a new thunk, such that they get a unique address. This could
++improve the branch prediction accuracy. Also, it is a defense-in-depth measure
++against aliasing.
++
++Note, for simplicity, indirect branches in eBPF programs are always replaced
++with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
++in future this can be changed to use dynamic thunks.
++
++All vulnerable RETs are replaced with a static thunk, they do not use dynamic
++thunks. This is because RETs get their prediction from RSB mostly that does not
++depend on source address. RETs that underflow RSB may benefit from dynamic
++thunks. But, RETs significantly outnumber indirect branches, and any benefit
++from a unique source address could be outweighed by the increased icache
++footprint and iTLB pressure.
++
++Retpoline
++~~~~~~~~~
++Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
++reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
++safe thunks. Unless user requested the RSB-stuffing mitigation.
++
++RSB Stuffing
++~~~~~~~~~~~~
++RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow
++attacks. And it also mitigates RETs that are vulnerable to ITS.
++
++Mitigation in guests
++^^^^^^^^^^^^^^^^^^^^
++All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
++and Family/Model of the guest. This is because eIBRS feature could be hidden
++from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
++indicates that the guest is running on an unaffected host.
++
++To prevent guests from unnecessarily deploying the mitigation on unaffected
++platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
++a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
++is not set by any hardware, but is **intended for VMMs to synthesize** it for
++guests as per the host's affected status.
++
++Mitigation options
++^^^^^^^^^^^^^^^^^^
++The ITS mitigation can be controlled using the "indirect_target_selection"
++kernel parameter. The available options are:
++
++   ======== ===================================================================
++   on       (default)  Deploy the "Aligned branch/return thunks" mitigation.
++	    If spectre_v2 mitigation enables retpoline, aligned-thunks are only
++	    deployed for the affected RET instructions. Retpoline mitigates
++	    indirect branches.
++
++   off      Disable ITS mitigation.
++
++   vmexit   Equivalent to "=on" if the CPU is affected by guest/host isolation
++	    part of ITS. Otherwise, mitigation is not deployed. This option is
++	    useful when host userspace is not in the threat model, and only
++	    attacks from guest to host are considered.
++
++   stuff    Deploy RSB-fill mitigation when retpoline is also deployed.
++	    Otherwise, deploy the default mitigation. When retpoline mitigation
++	    is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates
++	    ITS.
++
++   force    Force the ITS bug and deploy the default mitigation.
++   ======== ===================================================================
++
++Sysfs reporting
++---------------
++
++The sysfs file showing ITS mitigation status is:
++
++  /sys/devices/system/cpu/vulnerabilities/indirect_target_selection
++
++Note, microcode mitigation status is not reported in this file.
++
++The possible values in this file are:
++
++.. list-table::
++
++   * - Not affected
++     - The processor is not vulnerable.
++   * - Vulnerable
++     - System is vulnerable and no mitigation has been applied.
++   * - Vulnerable, KVM: Not affected
++     - System is vulnerable to intra-mode BTI, but not affected by eIBRS
++       guest/host isolation.
++   * - Mitigation: Aligned branch/return thunks
++     - The mitigation is enabled, affected indirect branches and RETs are
++       relocated to safe thunks.
++   * - Mitigation: Retpolines, Stuffing RSB
++     - The mitigation is enabled using retpoline and RSB stuffing.
++
++References
++----------
++.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
++
++.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
++
++.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 607a8937f17549..e691f75c97e7b6 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2149,6 +2149,23 @@
+ 			different crypto accelerators. This option can be used
+ 			to achieve best performance for particular HW.
+ 
++	indirect_target_selection= [X86,Intel] Mitigation control for Indirect
++			Target Selection(ITS) bug in Intel CPUs. Updated
++			microcode is also required for a fix in IBPB.
++
++			on:     Enable mitigation (default).
++			off:    Disable mitigation.
++			force:	Force the ITS bug and deploy default
++				mitigation.
++			vmexit: Only deploy mitigation if CPU is affected by
++				guest/host isolation part of ITS.
++			stuff:	Deploy RSB-fill mitigation when retpoline is
++				also deployed. Otherwise, deploy the default
++				mitigation.
++
++			For details see:
++			Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
++
+ 	init=		[KNL]
+ 			Format: <full_path>
+ 			Run specified binary instead of /sbin/init as init
+@@ -3510,6 +3527,7 @@
+ 				expose users to several CPU vulnerabilities.
+ 				Equivalent to: if nokaslr then kpti=0 [ARM64]
+ 					       gather_data_sampling=off [X86]
++					       indirect_target_selection=off [X86]
+ 					       kvm.nx_huge_pages=off [X86]
+ 					       l1tf=off [X86]
+ 					       mds=off [X86]
+diff --git a/Makefile b/Makefile
+index f26e0f946f02ee..7a06c48ffbaa5b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index aee79a50d0e26a..d9b13c87f93bbe 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -165,6 +165,19 @@ reg_usdhc2_vmmc: regulator-usdhc2 {
+ 		startup-delay-us = <20000>;
+ 	};
+ 
++	reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
++		compatible = "regulator-gpio";
++		pinctrl-names = "default";
++		pinctrl-0 = <&pinctrl_usdhc2_vsel>;
++		gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
++		regulator-max-microvolt = <3300000>;
++		regulator-min-microvolt = <1800000>;
++		states = <1800000 0x1>,
++			 <3300000 0x0>;
++		regulator-name = "PMIC_USDHC_VSELECT";
++		vin-supply = <&reg_nvcc_sd>;
++	};
++
+ 	reserved-memory {
+ 		#address-cells = <2>;
+ 		#size-cells = <2>;
+@@ -290,7 +303,7 @@ &gpio1 {
+ 			  "SODIMM_19",
+ 			  "",
+ 			  "",
+-			  "",
++			  "PMIC_USDHC_VSELECT",
+ 			  "",
+ 			  "",
+ 			  "",
+@@ -801,6 +814,7 @@ &usdhc2 {
+ 	pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
+ 	pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>;
+ 	vmmc-supply = <&reg_usdhc2_vmmc>;
++	vqmmc-supply = <&reg_usdhc2_vqmmc>;
+ };
+ 
+ &wdog1 {
+@@ -1222,13 +1236,17 @@ pinctrl_usdhc2_pwr_en: usdhc2pwrengrp {
+ 			<MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5		0x6>;	/* SODIMM 76 */
+ 	};
+ 
++	pinctrl_usdhc2_vsel: usdhc2vselgrp {
++		fsl,pins =
++			<MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4	0x10>; /* PMIC_USDHC_VSELECT */
++	};
++
+ 	/*
+ 	 * Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the
+ 	 * on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here.
+ 	 */
+ 	pinctrl_usdhc2: usdhc2grp {
+ 		fsl,pins =
+-			<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x10>,
+ 			<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK		0x90>,	/* SODIMM 78 */
+ 			<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD		0x90>,	/* SODIMM 74 */
+ 			<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x90>,	/* SODIMM 80 */
+@@ -1239,7 +1257,6 @@ pinctrl_usdhc2: usdhc2grp {
+ 
+ 	pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ 		fsl,pins =
+-			<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x10>,
+ 			<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK		0x94>,
+ 			<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD		0x94>,
+ 			<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x94>,
+@@ -1250,7 +1267,6 @@ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ 
+ 	pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ 		fsl,pins =
+-			<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x10>,
+ 			<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK		0x96>,
+ 			<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD		0x96>,
+ 			<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x96>,
+@@ -1262,7 +1278,6 @@ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ 	/* Avoid backfeeding with removed card power */
+ 	pinctrl_usdhc2_sleep: usdhc2slpgrp {
+ 		fsl,pins =
+-			<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x0>,
+ 			<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK		0x0>,
+ 			<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD		0x0>,
+ 			<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x0>,
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 2a4e686e633c62..8a6b7feca3e428 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -81,6 +81,7 @@
+ #define ARM_CPU_PART_CORTEX_A78AE	0xD42
+ #define ARM_CPU_PART_CORTEX_X1		0xD44
+ #define ARM_CPU_PART_CORTEX_A510	0xD46
++#define ARM_CPU_PART_CORTEX_X1C		0xD4C
+ #define ARM_CPU_PART_CORTEX_A520	0xD80
+ #define ARM_CPU_PART_CORTEX_A710	0xD47
+ #define ARM_CPU_PART_CORTEX_A715	0xD4D
+@@ -166,6 +167,7 @@
+ #define MIDR_CORTEX_A78AE	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
+ #define MIDR_CORTEX_X1	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
+ #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
++#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
+ #define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
+ #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+ #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
+diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
+index bc77869dbd43b2..509c874de5c729 100644
+--- a/arch/arm64/include/asm/insn.h
++++ b/arch/arm64/include/asm/insn.h
+@@ -693,6 +693,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
+ }
+ #endif
+ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
++u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
+ u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
+ 			 enum aarch64_insn_system_register sysreg);
+ 
+diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
+index f1524cdeacf1c4..8fef1262609011 100644
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -97,6 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void);
+ 
+ enum mitigation_state arm64_get_spectre_bhb_state(void);
+ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
++extern bool __nospectre_bhb;
++u8 get_spectre_bhb_loop_value(void);
++bool is_spectre_bhb_fw_mitigated(void);
+ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+ bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
+ 
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 709f2b51be6df3..05ccf4ec278f78 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -111,7 +111,14 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NC
+ 
+ DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
+ 
+-bool arm64_use_ng_mappings = false;
++/*
++ * arm64_use_ng_mappings must be placed in the .data section, otherwise it
++ * ends up in the .bss section where it is initialized in early_map_kernel()
++ * after the MMU (with the idmap) was enabled. create_init_idmap() - which
++ * runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG -
++ * may end up generating an incorrect idmap page table attributes.
++ */
++bool arm64_use_ng_mappings __read_mostly = false;
+ EXPORT_SYMBOL(arm64_use_ng_mappings);
+ 
+ DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index 30e79f111b35e3..8ef3335ecff722 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -891,6 +891,7 @@ static u8 spectre_bhb_loop_affected(void)
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+ 		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+@@ -998,6 +999,11 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
+ 	return true;
+ }
+ 
++u8 get_spectre_bhb_loop_value(void)
++{
++	return max_bhb_k;
++}
++
+ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+ {
+ 	const char *v = arm64_get_bp_hardening_vector(slot);
+@@ -1015,7 +1021,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+ 	isb();
+ }
+ 
+-static bool __read_mostly __nospectre_bhb;
++bool __read_mostly __nospectre_bhb;
+ static int __init parse_spectre_bhb_param(char *str)
+ {
+ 	__nospectre_bhb = true;
+@@ -1093,6 +1099,11 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+ 	update_mitigation_state(&spectre_bhb_state, state);
+ }
+ 
++bool is_spectre_bhb_fw_mitigated(void)
++{
++	return test_bit(BHB_FW, &system_bhb_mitigations);
++}
++
+ /* Patched to NOP when enabled */
+ void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
+ 						     __le32 *origptr,
+diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
+index b008a9b46a7ff4..36d33e064ea01b 100644
+--- a/arch/arm64/lib/insn.c
++++ b/arch/arm64/lib/insn.c
+@@ -5,6 +5,7 @@
+  *
+  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
+  */
++#include <linux/bitfield.h>
+ #include <linux/bitops.h>
+ #include <linux/bug.h>
+ #include <linux/printk.h>
+@@ -1471,43 +1472,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
+ 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
+ }
+ 
+-u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
++static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
+ {
+-	u32 opt;
+-	u32 insn;
+-
+ 	switch (type) {
+ 	case AARCH64_INSN_MB_SY:
+-		opt = 0xf;
+-		break;
++		return 0xf;
+ 	case AARCH64_INSN_MB_ST:
+-		opt = 0xe;
+-		break;
++		return 0xe;
+ 	case AARCH64_INSN_MB_LD:
+-		opt = 0xd;
+-		break;
++		return 0xd;
+ 	case AARCH64_INSN_MB_ISH:
+-		opt = 0xb;
+-		break;
++		return 0xb;
+ 	case AARCH64_INSN_MB_ISHST:
+-		opt = 0xa;
+-		break;
++		return 0xa;
+ 	case AARCH64_INSN_MB_ISHLD:
+-		opt = 0x9;
+-		break;
++		return 0x9;
+ 	case AARCH64_INSN_MB_NSH:
+-		opt = 0x7;
+-		break;
++		return 0x7;
+ 	case AARCH64_INSN_MB_NSHST:
+-		opt = 0x6;
+-		break;
++		return 0x6;
+ 	case AARCH64_INSN_MB_NSHLD:
+-		opt = 0x5;
+-		break;
++		return 0x5;
+ 	default:
+-		pr_err("%s: unknown dmb type %d\n", __func__, type);
++		pr_err("%s: unknown barrier type %d\n", __func__, type);
+ 		return AARCH64_BREAK_FAULT;
+ 	}
++}
++
++u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
++{
++	u32 opt;
++	u32 insn;
++
++	opt = __get_barrier_crm_val(type);
++	if (opt == AARCH64_BREAK_FAULT)
++		return AARCH64_BREAK_FAULT;
+ 
+ 	insn = aarch64_insn_get_dmb_value();
+ 	insn &= ~GENMASK(11, 8);
+@@ -1516,6 +1515,21 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
+ 	return insn;
+ }
+ 
++u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
++{
++	u32 opt, insn;
++
++	opt = __get_barrier_crm_val(type);
++	if (opt == AARCH64_BREAK_FAULT)
++		return AARCH64_BREAK_FAULT;
++
++	insn = aarch64_insn_get_dsb_base_value();
++	insn &= ~GENMASK(11, 8);
++	insn |= (opt << 8);
++
++	return insn;
++}
++
+ u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
+ 			 enum aarch64_insn_system_register sysreg)
+ {
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 27ef366363e4e2..515c411c2c839d 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -7,6 +7,7 @@
+ 
+ #define pr_fmt(fmt) "bpf_jit: " fmt
+ 
++#include <linux/arm-smccc.h>
+ #include <linux/bitfield.h>
+ #include <linux/bpf.h>
+ #include <linux/filter.h>
+@@ -17,6 +18,7 @@
+ #include <asm/asm-extable.h>
+ #include <asm/byteorder.h>
+ #include <asm/cacheflush.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/insn.h>
+ #include <asm/patching.h>
+@@ -857,7 +859,51 @@ static void build_plt(struct jit_ctx *ctx)
+ 		plt->target = (u64)&dummy_tramp;
+ }
+ 
+-static void build_epilogue(struct jit_ctx *ctx)
++/* Clobbers BPF registers 1-4, aka x0-x3 */
++static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
++{
++	const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
++	u8 k = get_spectre_bhb_loop_value();
++
++	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
++	    cpu_mitigations_off() || __nospectre_bhb ||
++	    arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
++		return;
++
++	if (capable(CAP_SYS_ADMIN))
++		return;
++
++	if (supports_clearbhb(SCOPE_SYSTEM)) {
++		emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
++		return;
++	}
++
++	if (k) {
++		emit_a64_mov_i64(r1, k, ctx);
++		emit(A64_B(1), ctx);
++		emit(A64_SUBS_I(true, r1, r1, 1), ctx);
++		emit(A64_B_(A64_COND_NE, -2), ctx);
++		emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
++		emit(aarch64_insn_get_isb_value(), ctx);
++	}
++
++	if (is_spectre_bhb_fw_mitigated()) {
++		emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
++			       ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
++		switch (arm_smccc_1_1_get_conduit()) {
++		case SMCCC_CONDUIT_HVC:
++			emit(aarch64_insn_get_hvc_value(), ctx);
++			break;
++		case SMCCC_CONDUIT_SMC:
++			emit(aarch64_insn_get_smc_value(), ctx);
++			break;
++		default:
++			pr_err_once("Firmware mitigation enabled with unknown conduit\n");
++		}
++	}
++}
++
++static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
+ {
+ 	const u8 r0 = bpf2a64[BPF_REG_0];
+ 	const u8 ptr = bpf2a64[TCCNT_PTR];
+@@ -870,10 +916,13 @@ static void build_epilogue(struct jit_ctx *ctx)
+ 
+ 	emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
+ 
++	if (was_classic)
++		build_bhb_mitigation(ctx);
++
+ 	/* Restore FP/LR registers */
+ 	emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+ 
+-	/* Set return value */
++	/* Move the return value from bpf:r0 (aka x7) to x0 */
+ 	emit(A64_MOV(1, A64_R(0), r0), ctx);
+ 
+ 	/* Authenticate lr */
+@@ -1817,7 +1866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	}
+ 
+ 	ctx.epilogue_offset = ctx.idx;
+-	build_epilogue(&ctx);
++	build_epilogue(&ctx, was_classic);
+ 	build_plt(&ctx);
+ 
+ 	extable_align = __alignof__(struct exception_table_entry);
+@@ -1880,7 +1929,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 		goto out_free_hdr;
+ 	}
+ 
+-	build_epilogue(&ctx);
++	build_epilogue(&ctx, was_classic);
+ 	build_plt(&ctx);
+ 
+ 	/* Extra pass to validate JITed code. */
+diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
+index 4a2b40ce39e091..841612913f0d1b 100644
+--- a/arch/mips/include/asm/ptrace.h
++++ b/arch/mips/include/asm/ptrace.h
+@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
+ 
+ /* Query offset/name of register from its name/offset */
+ extern int regs_query_register_offset(const char *name);
+-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
++#define MAX_REG_OFFSET \
++	(offsetof(struct pt_regs, __last) - sizeof(unsigned long))
+ 
+ /**
+  * regs_get_register() - get register value from its offset
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 8ff8e8b36524b7..9c83848797a78b 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
+ DO_ERROR_INFO(do_trap_load_fault,
+ 	SIGSEGV, SEGV_ACCERR, "load access fault");
+ 
+-asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
++enum misaligned_access_type {
++	MISALIGNED_STORE,
++	MISALIGNED_LOAD,
++};
++static const struct {
++	const char *type_str;
++	int (*handler)(struct pt_regs *regs);
++} misaligned_handler[] = {
++	[MISALIGNED_STORE] = {
++		.type_str = "Oops - store (or AMO) address misaligned",
++		.handler = handle_misaligned_store,
++	},
++	[MISALIGNED_LOAD] = {
++		.type_str = "Oops - load address misaligned",
++		.handler = handle_misaligned_load,
++	},
++};
++
++static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
+ {
++	irqentry_state_t state;
++
+ 	if (user_mode(regs)) {
+ 		irqentry_enter_from_user_mode(regs);
++		local_irq_enable();
++	} else {
++		state = irqentry_nmi_enter(regs);
++	}
+ 
+-		if (handle_misaligned_load(regs))
+-			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+-			      "Oops - load address misaligned");
++	if (misaligned_handler[type].handler(regs))
++		do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
++			      misaligned_handler[type].type_str);
+ 
++	if (user_mode(regs)) {
++		local_irq_disable();
+ 		irqentry_exit_to_user_mode(regs);
+ 	} else {
+-		irqentry_state_t state = irqentry_nmi_enter(regs);
+-
+-		if (handle_misaligned_load(regs))
+-			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+-			      "Oops - load address misaligned");
+-
+ 		irqentry_nmi_exit(regs, state);
+ 	}
+ }
+ 
+-asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
++asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
+ {
+-	if (user_mode(regs)) {
+-		irqentry_enter_from_user_mode(regs);
+-
+-		if (handle_misaligned_store(regs))
+-			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+-				"Oops - store (or AMO) address misaligned");
+-
+-		irqentry_exit_to_user_mode(regs);
+-	} else {
+-		irqentry_state_t state = irqentry_nmi_enter(regs);
+-
+-		if (handle_misaligned_store(regs))
+-			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+-				"Oops - store (or AMO) address misaligned");
++	do_trap_misaligned(regs, MISALIGNED_LOAD);
++}
+ 
+-		irqentry_nmi_exit(regs, state);
+-	}
++asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
++{
++	do_trap_misaligned(regs, MISALIGNED_STORE);
+ }
++
+ DO_ERROR_INFO(do_trap_store_fault,
+ 	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
+ DO_ERROR_INFO(do_trap_ecall_s,
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index 9a80a12f6b48f2..d14bfc23e315b0 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -87,6 +87,13 @@
+ #define INSN_MATCH_C_FSWSP		0xe002
+ #define INSN_MASK_C_FSWSP		0xe003
+ 
++#define INSN_MATCH_C_LHU		0x8400
++#define INSN_MASK_C_LHU			0xfc43
++#define INSN_MATCH_C_LH			0x8440
++#define INSN_MASK_C_LH			0xfc43
++#define INSN_MATCH_C_SH			0x8c00
++#define INSN_MASK_C_SH			0xfc43
++
+ #define INSN_LEN(insn)			((((insn) & 0x3) < 0x3) ? 2 : 4)
+ 
+ #if defined(CONFIG_64BIT)
+@@ -405,6 +412,13 @@ int handle_misaligned_load(struct pt_regs *regs)
+ 		fp = 1;
+ 		len = 4;
+ #endif
++	} else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
++		len = 2;
++		insn = RVC_RS2S(insn) << SH_RD;
++	} else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
++		len = 2;
++		shift = 8 * (sizeof(ulong) - len);
++		insn = RVC_RS2S(insn) << SH_RD;
+ 	} else {
+ 		regs->epc = epc;
+ 		return -1;
+@@ -504,6 +518,9 @@ int handle_misaligned_store(struct pt_regs *regs)
+ 		len = 4;
+ 		val.data_ulong = GET_F32_RS2C(insn, regs);
+ #endif
++	} else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
++		len = 2;
++		val.data_ulong = GET_RS2S(insn, regs);
+ 	} else {
+ 		regs->epc = epc;
+ 		return -1;
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index a7de838f803189..669d335c87abaf 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -636,7 +636,8 @@ SYM_CODE_START(stack_overflow)
+ 	stmg	%r0,%r7,__PT_R0(%r11)
+ 	stmg	%r8,%r9,__PT_PSW(%r11)
+ 	mvc	__PT_R8(64,%r11),0(%r14)
+-	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
++	GET_LC	%r2
++	mvc	__PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
+ 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ 	lgr	%r2,%r11		# pass pointer to pt_regs
+ 	jg	kernel_stack_overflow
+diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
+index 74dac6da03d5bb..8edd13237a1717 100644
+--- a/arch/s390/pci/pci_clp.c
++++ b/arch/s390/pci/pci_clp.c
+@@ -422,6 +422,8 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
+ 		return;
+ 	}
+ 	zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
++	if (IS_ERR(zdev))
++		return;
+ 	list_add_tail(&zdev->entry, scan_list);
+ }
+ 
+diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h
+index 138908b999d76c..b22226634ff609 100644
+--- a/arch/um/include/linux/time-internal.h
++++ b/arch/um/include/linux/time-internal.h
+@@ -83,8 +83,6 @@ extern void time_travel_not_configured(void);
+ #define time_travel_del_event(...) time_travel_not_configured()
+ #endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
+ 
+-extern unsigned long tt_extra_sched_jiffies;
+-
+ /*
+  * Without CONFIG_UML_TIME_TRAVEL_SUPPORT this is a linker error if used,
+  * which is intentional since we really shouldn't link it in that case.
+diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
+index a5beaea2967ec3..b09e85279d2b8c 100644
+--- a/arch/um/kernel/skas/syscall.c
++++ b/arch/um/kernel/skas/syscall.c
+@@ -31,17 +31,6 @@ void handle_syscall(struct uml_pt_regs *r)
+ 		goto out;
+ 
+ 	syscall = UPT_SYSCALL_NR(r);
+-
+-	/*
+-	 * If no time passes, then sched_yield may not actually yield, causing
+-	 * broken spinlock implementations in userspace (ASAN) to hang for long
+-	 * periods of time.
+-	 */
+-	if ((time_travel_mode == TT_MODE_INFCPU ||
+-	     time_travel_mode == TT_MODE_EXTERNAL) &&
+-	    syscall == __NR_sched_yield)
+-		tt_extra_sched_jiffies += 1;
+-
+ 	if (syscall >= 0 && syscall < __NR_syscalls) {
+ 		unsigned long ret = EXECUTE_SYSCALL(syscall, regs);
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index e54da3b4d334e4..7b3622ba4c3c82 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2747,6 +2747,18 @@ config MITIGATION_SSB
+ 	  of speculative execution in a similar way to the Meltdown and Spectre
+ 	  security vulnerabilities.
+ 
++config MITIGATION_ITS
++	bool "Enable Indirect Target Selection mitigation"
++	depends on CPU_SUP_INTEL && X86_64
++	depends on MITIGATION_RETPOLINE && MITIGATION_RETHUNK
++	select EXECMEM
++	default y
++	help
++	  Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
++	  BPU on some Intel CPUs that may allow Spectre V2 style attacks. If
++	  disabled, mitigation cannot be enabled via cmdline.
++	  See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
++
+ endif
+ 
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 1b5be07f86698a..9c6a110a52d48c 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -1524,7 +1524,9 @@ SYM_CODE_END(rewind_stack_and_make_dead)
+  * ORC to unwind properly.
+  *
+  * The alignment is for performance and not for safety, and may be safely
+- * refactored in the future if needed.
++ * refactored in the future if needed. The .skips are for safety, to ensure
++ * that all RETs are in the second half of a cacheline to mitigate Indirect
++ * Target Selection, rather than taking the slowpath via its_return_thunk.
+  */
+ SYM_FUNC_START(clear_bhb_loop)
+ 	push	%rbp
+@@ -1534,10 +1536,22 @@ SYM_FUNC_START(clear_bhb_loop)
+ 	call	1f
+ 	jmp	5f
+ 	.align 64, 0xcc
++	/*
++	 * Shift instructions so that the RET is in the upper half of the
++	 * cacheline and don't take the slowpath to its_return_thunk.
++	 */
++	.skip 32 - (.Lret1 - 1f), 0xcc
+ 	ANNOTATE_INTRA_FUNCTION_CALL
+ 1:	call	2f
+-	RET
++.Lret1:	RET
+ 	.align 64, 0xcc
++	/*
++	 * As above shift instructions for RET at .Lret2 as well.
++	 *
++	 * This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc
++	 * but some Clang versions (e.g. 18) don't like this.
++	 */
++	.skip 32 - 18, 0xcc
+ 2:	movl	$5, %eax
+ 3:	jmp	4f
+ 	nop
+@@ -1545,7 +1559,7 @@ SYM_FUNC_START(clear_bhb_loop)
+ 	jnz	3b
+ 	sub	$1, %ecx
+ 	jnz	1b
+-	RET
++.Lret2:	RET
+ 5:	lfence
+ 	pop	%rbp
+ 	RET
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index ca9ae606aab9ac..f8dab517de8ac7 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -5,6 +5,7 @@
+ #include <linux/types.h>
+ #include <linux/stringify.h>
+ #include <asm/asm.h>
++#include <asm/bug.h>
+ 
+ #define ALT_FLAGS_SHIFT		16
+ 
+@@ -134,6 +135,37 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
+ }
+ #endif
+ 
++#ifdef CONFIG_MITIGATION_ITS
++extern void its_init_mod(struct module *mod);
++extern void its_fini_mod(struct module *mod);
++extern void its_free_mod(struct module *mod);
++extern u8 *its_static_thunk(int reg);
++#else /* CONFIG_MITIGATION_ITS */
++static inline void its_init_mod(struct module *mod) { }
++static inline void its_fini_mod(struct module *mod) { }
++static inline void its_free_mod(struct module *mod) { }
++static inline u8 *its_static_thunk(int reg)
++{
++	WARN_ONCE(1, "ITS not compiled in");
++
++	return NULL;
++}
++#endif
++
++#if defined(CONFIG_MITIGATION_RETHUNK) && defined(CONFIG_OBJTOOL)
++extern bool cpu_wants_rethunk(void);
++extern bool cpu_wants_rethunk_at(void *addr);
++#else
++static __always_inline bool cpu_wants_rethunk(void)
++{
++	return false;
++}
++static __always_inline bool cpu_wants_rethunk_at(void *addr)
++{
++	return false;
++}
++#endif
++
+ #ifdef CONFIG_SMP
+ extern void alternatives_smp_module_add(struct module *mod, char *name,
+ 					void *locks, void *locks_end,
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 64fa42175a1577..308e7d97135cf6 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -475,6 +475,7 @@
+ #define X86_FEATURE_CLEAR_BHB_HW	(21*32+ 3) /* BHI_DIS_S HW control enabled */
+ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
+ #define X86_FEATURE_FAST_CPPC		(21*32 + 5) /* AMD Fast CPPC */
++#define X86_FEATURE_INDIRECT_THUNK_ITS	(21*32 + 6) /* Use thunk for indirect branches in lower half of cacheline */
+ 
+ /*
+  * BUG word(s)
+@@ -526,4 +527,6 @@
+ #define X86_BUG_RFDS			X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
+ #define X86_BUG_BHI			X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
+ #define X86_BUG_IBPB_NO_RET	   	X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
++#define X86_BUG_ITS			X86_BUG(1*32 + 5) /* "its" CPU is affected by Indirect Target Selection */
++#define X86_BUG_ITS_NATIVE_ONLY		X86_BUG(1*32 + 6) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index 695e569159c1d1..be7cddc414e4fb 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -17,10 +17,12 @@ struct ucode_cpu_info {
+ void load_ucode_bsp(void);
+ void load_ucode_ap(void);
+ void microcode_bsp_resume(void);
++bool __init microcode_loader_disabled(void);
+ #else
+ static inline void load_ucode_bsp(void)	{ }
+ static inline void load_ucode_ap(void) { }
+ static inline void microcode_bsp_resume(void) { }
++static inline bool __init microcode_loader_disabled(void) { return false; }
+ #endif
+ 
+ extern unsigned long initrd_start_early;
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 61e991507353eb..ac25f9eb591209 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -209,6 +209,14 @@
+ 						 * VERW clears CPU Register
+ 						 * File.
+ 						 */
++#define ARCH_CAP_ITS_NO			BIT_ULL(62) /*
++						     * Not susceptible to
++						     * Indirect Target Selection.
++						     * This bit is not set by
++						     * HW, but is synthesized by
++						     * VMMs for guests to know
++						     * their affected status.
++						     */
+ 
+ #define MSR_IA32_FLUSH_CMD		0x0000010b
+ #define L1D_FLUSH			BIT(0)	/*
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 96b410b1d4e841..f7bb0016d7d9e5 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -210,9 +210,8 @@
+ .endm
+ 
+ /*
+- * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
+- * to the retpoline thunk with a CS prefix when the register requires
+- * a RAX prefix byte to encode. Also see apply_retpolines().
++ * Emits a conditional CS prefix that is compatible with
++ * -mindirect-branch-cs-prefix.
+  */
+ .macro __CS_PREFIX reg:req
+ 	.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
+@@ -356,10 +355,14 @@
+ 	".long 999b\n\t"					\
+ 	".popsection\n\t"
+ 
++#define ITS_THUNK_SIZE	64
++
+ typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
++typedef u8 its_thunk_t[ITS_THUNK_SIZE];
+ extern retpoline_thunk_t __x86_indirect_thunk_array[];
+ extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
+ extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
++extern its_thunk_t	 __x86_indirect_its_thunk_array[];
+ 
+ #ifdef CONFIG_MITIGATION_RETHUNK
+ extern void __x86_return_thunk(void);
+@@ -383,6 +386,12 @@ static inline void srso_return_thunk(void) {}
+ static inline void srso_alias_return_thunk(void) {}
+ #endif
+ 
++#ifdef CONFIG_MITIGATION_ITS
++extern void its_return_thunk(void);
++#else
++static inline void its_return_thunk(void) {}
++#endif
++
+ extern void retbleed_return_thunk(void);
+ extern void srso_return_thunk(void);
+ extern void srso_alias_return_thunk(void);
+@@ -438,20 +447,23 @@ static inline void call_depth_return_thunk(void) {}
+ 
+ #ifdef CONFIG_X86_64
+ 
++/*
++ * Emits a conditional CS prefix that is compatible with
++ * -mindirect-branch-cs-prefix.
++ */
++#define __CS_PREFIX(reg)				\
++	".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n"	\
++	".ifc \\rs," reg "\n"				\
++	".byte 0x2e\n"					\
++	".endif\n"					\
++	".endr\n"
++
+ /*
+  * Inline asm uses the %V modifier which is only in newer GCC
+  * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
+  */
+-# define CALL_NOSPEC						\
+-	ALTERNATIVE_2(						\
+-	ANNOTATE_RETPOLINE_SAFE					\
+-	"call *%[thunk_target]\n",				\
+-	"call __x86_indirect_thunk_%V[thunk_target]\n",		\
+-	X86_FEATURE_RETPOLINE,					\
+-	"lfence;\n"						\
+-	ANNOTATE_RETPOLINE_SAFE					\
+-	"call *%[thunk_target]\n",				\
+-	X86_FEATURE_RETPOLINE_LFENCE)
++#define CALL_NOSPEC	__CS_PREFIX("%V[thunk_target]")	\
++			"call __x86_indirect_thunk_%V[thunk_target]\n"
+ 
+ # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+ 
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index d17518ca19b8b8..66e77bd7d51161 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -18,6 +18,7 @@
+ #include <linux/mmu_context.h>
+ #include <linux/bsearch.h>
+ #include <linux/sync_core.h>
++#include <linux/execmem.h>
+ #include <asm/text-patching.h>
+ #include <asm/alternative.h>
+ #include <asm/sections.h>
+@@ -31,6 +32,8 @@
+ #include <asm/paravirt.h>
+ #include <asm/asm-prototypes.h>
+ #include <asm/cfi.h>
++#include <asm/ibt.h>
++#include <asm/set_memory.h>
+ 
+ int __read_mostly alternatives_patched;
+ 
+@@ -124,6 +127,136 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
+ #endif
+ };
+ 
++#ifdef CONFIG_MITIGATION_ITS
++
++#ifdef CONFIG_MODULES
++static struct module *its_mod;
++#endif
++static void *its_page;
++static unsigned int its_offset;
++
++/* Initialize a thunk with the "jmp *reg; int3" instructions. */
++static void *its_init_thunk(void *thunk, int reg)
++{
++	u8 *bytes = thunk;
++	int i = 0;
++
++	if (reg >= 8) {
++		bytes[i++] = 0x41; /* REX.B prefix */
++		reg -= 8;
++	}
++	bytes[i++] = 0xff;
++	bytes[i++] = 0xe0 + reg; /* jmp *reg */
++	bytes[i++] = 0xcc;
++
++	return thunk;
++}
++
++#ifdef CONFIG_MODULES
++void its_init_mod(struct module *mod)
++{
++	if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
++		return;
++
++	mutex_lock(&text_mutex);
++	its_mod = mod;
++	its_page = NULL;
++}
++
++void its_fini_mod(struct module *mod)
++{
++	if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
++		return;
++
++	WARN_ON_ONCE(its_mod != mod);
++
++	its_mod = NULL;
++	its_page = NULL;
++	mutex_unlock(&text_mutex);
++
++	for (int i = 0; i < mod->its_num_pages; i++) {
++		void *page = mod->its_page_array[i];
++		set_memory_rox((unsigned long)page, 1);
++	}
++}
++
++void its_free_mod(struct module *mod)
++{
++	if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
++		return;
++
++	for (int i = 0; i < mod->its_num_pages; i++) {
++		void *page = mod->its_page_array[i];
++		execmem_free(page);
++	}
++	kfree(mod->its_page_array);
++}
++#endif /* CONFIG_MODULES */
++
++static void *its_alloc(void)
++{
++	void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
++
++	if (!page)
++		return NULL;
++
++#ifdef CONFIG_MODULES
++	if (its_mod) {
++		void *tmp = krealloc(its_mod->its_page_array,
++				     (its_mod->its_num_pages+1) * sizeof(void *),
++				     GFP_KERNEL);
++		if (!tmp)
++			return NULL;
++
++		its_mod->its_page_array = tmp;
++		its_mod->its_page_array[its_mod->its_num_pages++] = page;
++	}
++#endif /* CONFIG_MODULES */
++
++	return no_free_ptr(page);
++}
++
++static void *its_allocate_thunk(int reg)
++{
++	int size = 3 + (reg / 8);
++	void *thunk;
++
++	if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
++		its_page = its_alloc();
++		if (!its_page) {
++			pr_err("ITS page allocation failed\n");
++			return NULL;
++		}
++		memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
++		its_offset = 32;
++	}
++
++	/*
++	 * If the indirect branch instruction will be in the lower half
++	 * of a cacheline, then update the offset to reach the upper half.
++	 */
++	if ((its_offset + size - 1) % 64 < 32)
++		its_offset = ((its_offset - 1) | 0x3F) + 33;
++
++	thunk = its_page + its_offset;
++	its_offset += size;
++
++	set_memory_rw((unsigned long)its_page, 1);
++	thunk = its_init_thunk(thunk, reg);
++	set_memory_rox((unsigned long)its_page, 1);
++
++	return thunk;
++}
++
++u8 *its_static_thunk(int reg)
++{
++	u8 *thunk = __x86_indirect_its_thunk_array[reg];
++
++	return thunk;
++}
++
++#endif
++
+ /*
+  * Nomenclature for variable names to simplify and clarify this code and ease
+  * any potential staring at it:
+@@ -581,7 +714,8 @@ static int emit_indirect(int op, int reg, u8 *bytes)
+ 	return i;
+ }
+ 
+-static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
++static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
++			     void *call_dest, void *jmp_dest)
+ {
+ 	u8 op = insn->opcode.bytes[0];
+ 	int i = 0;
+@@ -602,7 +736,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
+ 	switch (op) {
+ 	case CALL_INSN_OPCODE:
+ 		__text_gen_insn(bytes+i, op, addr+i,
+-				__x86_indirect_call_thunk_array[reg],
++				call_dest,
+ 				CALL_INSN_SIZE);
+ 		i += CALL_INSN_SIZE;
+ 		break;
+@@ -610,7 +744,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
+ 	case JMP32_INSN_OPCODE:
+ clang_jcc:
+ 		__text_gen_insn(bytes+i, op, addr+i,
+-				__x86_indirect_jump_thunk_array[reg],
++				jmp_dest,
+ 				JMP32_INSN_SIZE);
+ 		i += JMP32_INSN_SIZE;
+ 		break;
+@@ -625,6 +759,39 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
+ 	return i;
+ }
+ 
++static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
++{
++	return __emit_trampoline(addr, insn, bytes,
++				 __x86_indirect_call_thunk_array[reg],
++				 __x86_indirect_jump_thunk_array[reg]);
++}
++
++#ifdef CONFIG_MITIGATION_ITS
++static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
++{
++	u8 *thunk = __x86_indirect_its_thunk_array[reg];
++	u8 *tmp = its_allocate_thunk(reg);
++
++	if (tmp)
++		thunk = tmp;
++
++	return __emit_trampoline(addr, insn, bytes, thunk, thunk);
++}
++
++/* Check if an indirect branch is at ITS-unsafe address */
++static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
++{
++	if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
++		return false;
++
++	/* Indirect branch opcode is 2 or 3 bytes depending on reg */
++	addr += 1 + reg / 8;
++
++	/* Lower-half of the cacheline? */
++	return !(addr & 0x20);
++}
++#endif
++
+ /*
+  * Rewrite the compiler generated retpoline thunk calls.
+  *
+@@ -699,6 +866,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
+ 		bytes[i++] = 0xe8; /* LFENCE */
+ 	}
+ 
++#ifdef CONFIG_MITIGATION_ITS
++	/*
++	 * Check if the address of last byte of emitted-indirect is in
++	 * lower-half of the cacheline. Such branches need ITS mitigation.
++	 */
++	if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
++		return emit_its_trampoline(addr, insn, reg, bytes);
++#endif
++
+ 	ret = emit_indirect(op, reg, bytes + i);
+ 	if (ret < 0)
+ 		return ret;
+@@ -770,6 +946,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
+ 
+ #ifdef CONFIG_MITIGATION_RETHUNK
+ 
++bool cpu_wants_rethunk(void)
++{
++	return cpu_feature_enabled(X86_FEATURE_RETHUNK);
++}
++
++bool cpu_wants_rethunk_at(void *addr)
++{
++	if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
++		return false;
++	if (x86_return_thunk != its_return_thunk)
++		return true;
++
++	return !((unsigned long)addr & 0x20);
++}
++
+ /*
+  * Rewrite the compiler generated return thunk tail-calls.
+  *
+@@ -786,7 +977,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
+ 	int i = 0;
+ 
+ 	/* Patch the custom return thunks... */
+-	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
++	if (cpu_wants_rethunk_at(addr)) {
+ 		i = JMP32_INSN_SIZE;
+ 		__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
+ 	} else {
+@@ -803,7 +994,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
+ {
+ 	s32 *s;
+ 
+-	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++	if (cpu_wants_rethunk())
+ 		static_call_force_reinit();
+ 
+ 	for (s = start; s < end; s++) {
+@@ -1665,6 +1856,8 @@ static noinline void __init alt_reloc_selftest(void)
+ 
+ void __init alternative_instructions(void)
+ {
++	u64 ibt;
++
+ 	int3_selftest();
+ 
+ 	/*
+@@ -1691,6 +1884,9 @@ void __init alternative_instructions(void)
+ 	 */
+ 	paravirt_set_cap();
+ 
++	/* Keep CET-IBT disabled until caller/callee are patched */
++	ibt = ibt_save(/*disable*/ true);
++
+ 	__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
+ 			__cfi_sites, __cfi_sites_end, true);
+ 
+@@ -1714,6 +1910,8 @@ void __init alternative_instructions(void)
+ 	 */
+ 	apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
+ 
++	ibt_restore(ibt);
++
+ #ifdef CONFIG_SMP
+ 	/* Patch to UP if other cpus not imminent. */
+ 	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 46bddb5bb15ffb..c683abd640fdea 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -49,6 +49,7 @@ static void __init srbds_select_mitigation(void);
+ static void __init l1d_flush_select_mitigation(void);
+ static void __init srso_select_mitigation(void);
+ static void __init gds_select_mitigation(void);
++static void __init its_select_mitigation(void);
+ 
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -67,6 +68,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
+ 
+ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
+ 
++static void __init set_return_thunk(void *thunk)
++{
++	if (x86_return_thunk != __x86_return_thunk)
++		pr_warn("x86/bugs: return thunk changed\n");
++
++	x86_return_thunk = thunk;
++}
++
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+ static void update_spec_ctrl(u64 val)
+ {
+@@ -175,6 +184,7 @@ void __init cpu_select_mitigations(void)
+ 	 */
+ 	srso_select_mitigation();
+ 	gds_select_mitigation();
++	its_select_mitigation();
+ }
+ 
+ /*
+@@ -1104,7 +1114,7 @@ static void __init retbleed_select_mitigation(void)
+ 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ 		setup_force_cpu_cap(X86_FEATURE_UNRET);
+ 
+-		x86_return_thunk = retbleed_return_thunk;
++		set_return_thunk(retbleed_return_thunk);
+ 
+ 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+@@ -1139,7 +1149,7 @@ static void __init retbleed_select_mitigation(void)
+ 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ 		setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
+ 
+-		x86_return_thunk = call_depth_return_thunk;
++		set_return_thunk(call_depth_return_thunk);
+ 		break;
+ 
+ 	default:
+@@ -1173,6 +1183,145 @@ static void __init retbleed_select_mitigation(void)
+ 	pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
+ }
+ 
++#undef pr_fmt
++#define pr_fmt(fmt)     "ITS: " fmt
++
++enum its_mitigation_cmd {
++	ITS_CMD_OFF,
++	ITS_CMD_ON,
++	ITS_CMD_VMEXIT,
++	ITS_CMD_RSB_STUFF,
++};
++
++enum its_mitigation {
++	ITS_MITIGATION_OFF,
++	ITS_MITIGATION_VMEXIT_ONLY,
++	ITS_MITIGATION_ALIGNED_THUNKS,
++	ITS_MITIGATION_RETPOLINE_STUFF,
++};
++
++static const char * const its_strings[] = {
++	[ITS_MITIGATION_OFF]			= "Vulnerable",
++	[ITS_MITIGATION_VMEXIT_ONLY]		= "Mitigation: Vulnerable, KVM: Not affected",
++	[ITS_MITIGATION_ALIGNED_THUNKS]		= "Mitigation: Aligned branch/return thunks",
++	[ITS_MITIGATION_RETPOLINE_STUFF]	= "Mitigation: Retpolines, Stuffing RSB",
++};
++
++static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
++
++static enum its_mitigation_cmd its_cmd __ro_after_init =
++	IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
++
++static int __init its_parse_cmdline(char *str)
++{
++	if (!str)
++		return -EINVAL;
++
++	if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
++		pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
++		return 0;
++	}
++
++	if (!strcmp(str, "off")) {
++		its_cmd = ITS_CMD_OFF;
++	} else if (!strcmp(str, "on")) {
++		its_cmd = ITS_CMD_ON;
++	} else if (!strcmp(str, "force")) {
++		its_cmd = ITS_CMD_ON;
++		setup_force_cpu_bug(X86_BUG_ITS);
++	} else if (!strcmp(str, "vmexit")) {
++		its_cmd = ITS_CMD_VMEXIT;
++	} else if (!strcmp(str, "stuff")) {
++		its_cmd = ITS_CMD_RSB_STUFF;
++	} else {
++		pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
++	}
++
++	return 0;
++}
++early_param("indirect_target_selection", its_parse_cmdline);
++
++static void __init its_select_mitigation(void)
++{
++	enum its_mitigation_cmd cmd = its_cmd;
++
++	if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
++		its_mitigation = ITS_MITIGATION_OFF;
++		return;
++	}
++
++	/* Retpoline+CDT mitigates ITS, bail out */
++	if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
++	    boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
++		its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
++		goto out;
++	}
++
++	/* Exit early to avoid irrelevant warnings */
++	if (cmd == ITS_CMD_OFF) {
++		its_mitigation = ITS_MITIGATION_OFF;
++		goto out;
++	}
++	if (spectre_v2_enabled == SPECTRE_V2_NONE) {
++		pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
++		its_mitigation = ITS_MITIGATION_OFF;
++		goto out;
++	}
++	if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
++	    !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
++		pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
++		its_mitigation = ITS_MITIGATION_OFF;
++		goto out;
++	}
++	if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
++		pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
++		its_mitigation = ITS_MITIGATION_OFF;
++		goto out;
++	}
++	if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
++		pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
++		its_mitigation = ITS_MITIGATION_OFF;
++		goto out;
++	}
++
++	if (cmd == ITS_CMD_RSB_STUFF &&
++	    (!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) {
++		pr_err("RSB stuff mitigation not supported, using default\n");
++		cmd = ITS_CMD_ON;
++	}
++
++	switch (cmd) {
++	case ITS_CMD_OFF:
++		its_mitigation = ITS_MITIGATION_OFF;
++		break;
++	case ITS_CMD_VMEXIT:
++		if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
++			its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
++			goto out;
++		}
++		fallthrough;
++	case ITS_CMD_ON:
++		its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
++		if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
++			setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
++		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++		set_return_thunk(its_return_thunk);
++		break;
++	case ITS_CMD_RSB_STUFF:
++		its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
++		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++		setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
++		set_return_thunk(call_depth_return_thunk);
++		if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
++			retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
++			pr_info("Retbleed mitigation updated to stuffing\n");
++		}
++		break;
++	}
++out:
++	pr_info("%s\n", its_strings[its_mitigation]);
++}
++
+ #undef pr_fmt
+ #define pr_fmt(fmt)     "Spectre V2 : " fmt
+ 
+@@ -1684,11 +1833,11 @@ static void __init bhi_select_mitigation(void)
+ 			return;
+ 	}
+ 
+-	/* Mitigate in hardware if supported */
+-	if (spec_ctrl_bhi_dis())
++	if (!IS_ENABLED(CONFIG_X86_64))
+ 		return;
+ 
+-	if (!IS_ENABLED(CONFIG_X86_64))
++	/* Mitigate in hardware if supported */
++	if (spec_ctrl_bhi_dis())
+ 		return;
+ 
+ 	if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
+@@ -2624,10 +2773,10 @@ static void __init srso_select_mitigation(void)
+ 
+ 			if (boot_cpu_data.x86 == 0x19) {
+ 				setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+-				x86_return_thunk = srso_alias_return_thunk;
++				set_return_thunk(srso_alias_return_thunk);
+ 			} else {
+ 				setup_force_cpu_cap(X86_FEATURE_SRSO);
+-				x86_return_thunk = srso_return_thunk;
++				set_return_thunk(srso_return_thunk);
+ 			}
+ 			if (has_microcode)
+ 				srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+@@ -2802,6 +2951,11 @@ static ssize_t rfds_show_state(char *buf)
+ 	return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
+ }
+ 
++static ssize_t its_show_state(char *buf)
++{
++	return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
++}
++
+ static char *stibp_state(void)
+ {
+ 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+@@ -2984,6 +3138,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ 	case X86_BUG_RFDS:
+ 		return rfds_show_state(buf);
+ 
++	case X86_BUG_ITS:
++		return its_show_state(buf);
++
+ 	default:
+ 		break;
+ 	}
+@@ -3063,6 +3220,11 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib
+ {
+ 	return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
+ }
++
++ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
++}
+ #endif
+ 
+ void __warn_thunk(void)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index f439763f45ae6f..39e9ec3dea985d 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1228,6 +1228,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ #define GDS		BIT(6)
+ /* CPU is affected by Register File Data Sampling */
+ #define RFDS		BIT(7)
++/* CPU is affected by Indirect Target Selection */
++#define ITS		BIT(8)
++/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
++#define ITS_NATIVE_ONLY	BIT(9)
+ 
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE,		X86_STEPPING_ANY,		SRBDS),
+@@ -1239,22 +1243,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G,	X86_STEPPING_ANY,		SRBDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X,	X86_STEPPING_ANY,		MMIO),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL,		X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,		X86_STEPPINGS(0x0, 0x5),	MMIO | RETBLEED | GDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | ITS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,	X86_STEPPINGS(0x0, 0xb),	MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS | ITS),
++	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,		X86_STEPPINGS(0x0, 0xc),	MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS | ITS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D,		X86_STEPPING_ANY,		MMIO | GDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X,		X86_STEPPING_ANY,		MMIO | GDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L,	X86_STEPPING_ANY,		GDS),
+-	VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE,		X86_STEPPING_ANY,		GDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
++	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D,		X86_STEPPING_ANY,		MMIO | GDS | ITS | ITS_NATIVE_ONLY),
++	VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X,		X86_STEPPING_ANY,		MMIO | GDS | ITS | ITS_NATIVE_ONLY),
++	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
++	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | RETBLEED | ITS),
++	VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
++	VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L,	X86_STEPPING_ANY,		GDS | ITS | ITS_NATIVE_ONLY),
++	VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE,		X86_STEPPING_ANY,		GDS | ITS | ITS_NATIVE_ONLY),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD,		X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
++	VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE,		X86_STEPPING_ANY,		RFDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L,	X86_STEPPING_ANY,		RFDS),
+ 	VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE,	X86_STEPPING_ANY,		RFDS),
+@@ -1318,6 +1325,32 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
+ 	return cpu_matches(cpu_vuln_blacklist, RFDS);
+ }
+ 
++static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
++{
++	/* The "immunity" bit trumps everything else: */
++	if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
++		return false;
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++		return false;
++
++	/* None of the affected CPUs have BHI_CTRL */
++	if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
++		return false;
++
++	/*
++	 * If a VMM did not expose ITS_NO, assume that a guest could
++	 * be running on a vulnerable hardware or may migrate to such
++	 * hardware.
++	 */
++	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
++		return true;
++
++	if (cpu_matches(cpu_vuln_blacklist, ITS))
++		return true;
++
++	return false;
++}
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ 	u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
+@@ -1437,9 +1470,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	if (vulnerable_to_rfds(x86_arch_cap_msr))
+ 		setup_force_cpu_bug(X86_BUG_RFDS);
+ 
+-	/* When virtualized, eIBRS could be hidden, assume vulnerable */
+-	if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
+-	    !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
++	/*
++	 * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
++	 * BHI_NO still need to use the BHI mitigation to prevent Intra-mode
++	 * attacks.  When virtualized, eIBRS could be hidden, assume vulnerable.
++	 */
++	if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+ 	    (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
+ 	     boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+ 		setup_force_cpu_bug(X86_BUG_BHI);
+@@ -1447,6 +1483,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
+ 		setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
+ 
++	if (vulnerable_to_its(x86_arch_cap_msr)) {
++		setup_force_cpu_bug(X86_BUG_ITS);
++		if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
++			setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
++	}
++
+ 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ 		return;
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 093d3ca43c4674..2f84164b20e011 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -1099,15 +1099,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
+ 
+ static int __init save_microcode_in_initrd(void)
+ {
+-	unsigned int cpuid_1_eax = native_cpuid_eax(1);
+ 	struct cpuinfo_x86 *c = &boot_cpu_data;
+ 	struct cont_desc desc = { 0 };
++	unsigned int cpuid_1_eax;
+ 	enum ucode_state ret;
+ 	struct cpio_data cp;
+ 
+-	if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
++	if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+ 		return 0;
+ 
++	cpuid_1_eax = native_cpuid_eax(1);
++
+ 	if (!find_blobs_in_containers(&cp))
+ 		return -EINVAL;
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index b3658d11e7b692..079f046ee26d19 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -41,8 +41,8 @@
+ 
+ #include "internal.h"
+ 
+-static struct microcode_ops	*microcode_ops;
+-bool dis_ucode_ldr = true;
++static struct microcode_ops *microcode_ops;
++static bool dis_ucode_ldr = false;
+ 
+ bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
+ module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
+@@ -84,6 +84,9 @@ static bool amd_check_current_patch_level(void)
+ 	u32 lvl, dummy, i;
+ 	u32 *levels;
+ 
++	if (x86_cpuid_vendor() != X86_VENDOR_AMD)
++		return false;
++
+ 	native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
+ 
+ 	levels = final_levels;
+@@ -95,27 +98,29 @@ static bool amd_check_current_patch_level(void)
+ 	return false;
+ }
+ 
+-static bool __init check_loader_disabled_bsp(void)
++bool __init microcode_loader_disabled(void)
+ {
+-	static const char *__dis_opt_str = "dis_ucode_ldr";
+-	const char *cmdline = boot_command_line;
+-	const char *option  = __dis_opt_str;
++	if (dis_ucode_ldr)
++		return true;
+ 
+ 	/*
+-	 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
+-	 * completely accurate as xen pv guests don't see that CPUID bit set but
+-	 * that's good enough as they don't land on the BSP path anyway.
++	 * Disable when:
++	 *
++	 * 1) The CPU does not support CPUID.
++	 *
++	 * 2) Bit 31 in CPUID[1]:ECX is clear
++	 *    The bit is reserved for hypervisor use. This is still not
++	 *    completely accurate as XEN PV guests don't see that CPUID bit
++	 *    set, but that's good enough as they don't land on the BSP
++	 *    path anyway.
++	 *
++	 * 3) Certain AMD patch levels are not allowed to be
++	 *    overwritten.
+ 	 */
+-	if (native_cpuid_ecx(1) & BIT(31))
+-		return true;
+-
+-	if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
+-		if (amd_check_current_patch_level())
+-			return true;
+-	}
+-
+-	if (cmdline_find_option_bool(cmdline, option) <= 0)
+-		dis_ucode_ldr = false;
++	if (!have_cpuid_p() ||
++	    native_cpuid_ecx(1) & BIT(31) ||
++	    amd_check_current_patch_level())
++		dis_ucode_ldr = true;
+ 
+ 	return dis_ucode_ldr;
+ }
+@@ -125,7 +130,10 @@ void __init load_ucode_bsp(void)
+ 	unsigned int cpuid_1_eax;
+ 	bool intel = true;
+ 
+-	if (!have_cpuid_p())
++	if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
++		dis_ucode_ldr = true;
++
++	if (microcode_loader_disabled())
+ 		return;
+ 
+ 	cpuid_1_eax = native_cpuid_eax(1);
+@@ -146,9 +154,6 @@ void __init load_ucode_bsp(void)
+ 		return;
+ 	}
+ 
+-	if (check_loader_disabled_bsp())
+-		return;
+-
+ 	if (intel)
+ 		load_ucode_intel_bsp(&early_data);
+ 	else
+@@ -159,6 +164,11 @@ void load_ucode_ap(void)
+ {
+ 	unsigned int cpuid_1_eax;
+ 
++	/*
++	 * Can't use microcode_loader_disabled() here - .init section
++	 * hell. It doesn't have to either - the BSP variant must've
++	 * parsed cmdline already anyway.
++	 */
+ 	if (dis_ucode_ldr)
+ 		return;
+ 
+@@ -810,7 +820,7 @@ static int __init microcode_init(void)
+ 	struct cpuinfo_x86 *c = &boot_cpu_data;
+ 	int error;
+ 
+-	if (dis_ucode_ldr)
++	if (microcode_loader_disabled())
+ 		return -EINVAL;
+ 
+ 	if (c->x86_vendor == X86_VENDOR_INTEL)
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 815fa67356a2dd..df5650eb3f0881 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -395,7 +395,7 @@ static int __init save_builtin_microcode(void)
+ 	if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
+ 		return 0;
+ 
+-	if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++	if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ 		return 0;
+ 
+ 	uci.mc = get_microcode_blob(&uci, true);
+diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
+index 5df621752fefac..50a9702ae4e2b5 100644
+--- a/arch/x86/kernel/cpu/microcode/internal.h
++++ b/arch/x86/kernel/cpu/microcode/internal.h
+@@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void)
+ 	return x86_family(eax);
+ }
+ 
+-extern bool dis_ucode_ldr;
+ extern bool force_minrev;
+ 
+ #ifdef CONFIG_CPU_SUP_AMD
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 8da0e66ca22dec..bfab966ea56e84 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -354,7 +354,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ 		goto fail;
+ 
+ 	ip = trampoline + size;
+-	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++	if (cpu_wants_rethunk_at(ip))
+ 		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
+ 	else
+ 		memcpy(ip, retq, sizeof(retq));
+diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
+index de001b2146abf3..375f2d7f1762d4 100644
+--- a/arch/x86/kernel/head32.c
++++ b/arch/x86/kernel/head32.c
+@@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
+ 	*ptr = (unsigned long)ptep + PAGE_OFFSET;
+ 
+ #ifdef CONFIG_MICROCODE_INITRD32
+-	/* Running on a hypervisor? */
+-	if (native_cpuid_ecx(1) & BIT(31))
+-		return;
+-
+ 	params = (struct boot_params *)__pa_nodebug(&boot_params);
+ 	if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
+ 		return;
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index 837450b6e882f7..1e231dac61e32b 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -251,6 +251,8 @@ int module_finalize(const Elf_Ehdr *hdr,
+ 			ibt_endbr = s;
+ 	}
+ 
++	its_init_mod(me);
++
+ 	if (retpolines || cfi) {
+ 		void *rseg = NULL, *cseg = NULL;
+ 		unsigned int rsize = 0, csize = 0;
+@@ -271,6 +273,9 @@ int module_finalize(const Elf_Ehdr *hdr,
+ 		void *rseg = (void *)retpolines->sh_addr;
+ 		apply_retpolines(rseg, rseg + retpolines->sh_size);
+ 	}
++
++	its_fini_mod(me);
++
+ 	if (returns) {
+ 		void *rseg = (void *)returns->sh_addr;
+ 		apply_returns(rseg, rseg + returns->sh_size);
+@@ -318,4 +323,5 @@ int module_finalize(const Elf_Ehdr *hdr,
+ void module_arch_cleanup(struct module *mod)
+ {
+ 	alternatives_smp_module_del(mod);
++	its_free_mod(mod);
+ }
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index 9e51242ed125ee..aae909d4ed7853 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 		break;
+ 
+ 	case RET:
+-		if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++		if (cpu_wants_rethunk_at(insn))
+ 			code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
+ 		else
+ 			code = &retinsn;
+@@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 	case JCC:
+ 		if (!func) {
+ 			func = __static_call_return;
+-			if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++			if (cpu_wants_rethunk())
+ 				func = x86_return_thunk;
+ 		}
+ 
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index feb8102a9ca78c..e2567c8f6bac5d 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -530,4 +530,14 @@ INIT_PER_CPU(irq_stack_backing_store);
+ 		"SRSO function pair won't alias");
+ #endif
+ 
++#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
++. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
++. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
++. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
++#endif
++
++#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
++. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
++#endif
++
+ #endif /* CONFIG_X86_64 */
+diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
+index 85241c0c7f569b..b0cab215b08f09 100644
+--- a/arch/x86/kvm/smm.c
++++ b/arch/x86/kvm/smm.c
+@@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
+ 
+ 	kvm_mmu_reset_context(vcpu);
+ }
++EXPORT_SYMBOL_GPL(kvm_smm_changed);
+ 
+ void process_smi(struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index e39ab7c0be4e9c..7cbacd0439211e 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2222,6 +2222,10 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
+ 	 */
+ 	if (!sev_es_guest(vcpu->kvm)) {
+ 		clear_page(svm->vmcb);
++#ifdef CONFIG_KVM_SMM
++		if (is_smm(vcpu))
++			kvm_smm_changed(vcpu, false);
++#endif
+ 		kvm_vcpu_reset(vcpu, true);
+ 	}
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 7a5367b14518f9..f378d479fea3f9 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1623,7 +1623,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
+ 	 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
+ 	 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
+ 	 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
+-	 ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
++	 ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
+ 
+ static u64 kvm_get_arch_capabilities(void)
+ {
+@@ -1657,6 +1657,8 @@ static u64 kvm_get_arch_capabilities(void)
+ 		data |= ARCH_CAP_MDS_NO;
+ 	if (!boot_cpu_has_bug(X86_BUG_RFDS))
+ 		data |= ARCH_CAP_RFDS_NO;
++	if (!boot_cpu_has_bug(X86_BUG_ITS))
++		data |= ARCH_CAP_ITS_NO;
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ 		/*
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 391059b2c6fbc4..614fb9aee2ff65 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -366,6 +366,45 @@ SYM_FUNC_END(call_depth_return_thunk)
+ 
+ #endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
+ 
++#ifdef CONFIG_MITIGATION_ITS
++
++.macro ITS_THUNK reg
++
++SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
++	UNWIND_HINT_UNDEFINED
++	ANNOTATE_NOENDBR
++	ANNOTATE_RETPOLINE_SAFE
++	jmp *%\reg
++	int3
++	.align 32, 0xcc		/* fill to the end of the line */
++	.skip  32, 0xcc		/* skip to the next upper half */
++.endm
++
++/* ITS mitigation requires thunks be aligned to upper half of cacheline */
++.align 64, 0xcc
++.skip 32, 0xcc
++SYM_CODE_START(__x86_indirect_its_thunk_array)
++
++#define GEN(reg) ITS_THUNK reg
++#include <asm/GEN-for-each-reg.h>
++#undef GEN
++
++	.align 64, 0xcc
++SYM_CODE_END(__x86_indirect_its_thunk_array)
++
++.align 64, 0xcc
++.skip 32, 0xcc
++SYM_CODE_START(its_return_thunk)
++	UNWIND_HINT_FUNC
++	ANNOTATE_NOENDBR
++	ANNOTATE_UNRET_SAFE
++	ret
++	int3
++SYM_CODE_END(its_return_thunk)
++EXPORT_SYMBOL(its_return_thunk)
++
++#endif /* CONFIG_MITIGATION_ITS */
++
+ /*
+  * This function name is magical and is used by -mfunction-return=thunk-extern
+  * for the compiler to generate JMPs to it.
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 27d81cb049ff81..8629d90fdcd922 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -624,7 +624,11 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
+ 
+ 		choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+ 
+-		/* Let nmi_uaccess_okay() know that we're changing CR3. */
++		/*
++		 * Indicate that CR3 is about to change. nmi_uaccess_okay()
++		 * and others are sensitive to the window where mm_cpumask(),
++		 * CR3 and cpu_tlbstate.loaded_mm are not all in sync.
++ 		 */
+ 		this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
+ 		barrier();
+ 	}
+@@ -895,8 +899,16 @@ static void flush_tlb_func(void *info)
+ 
+ static bool should_flush_tlb(int cpu, void *data)
+ {
++	struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
+ 	struct flush_tlb_info *info = data;
+ 
++	/*
++	 * Order the 'loaded_mm' and 'is_lazy' against their
++	 * write ordering in switch_mm_irqs_off(). Ensure
++	 * 'is_lazy' is at least as new as 'loaded_mm'.
++	 */
++	smp_rmb();
++
+ 	/* Lazy TLB will get flushed at the next context switch. */
+ 	if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
+ 		return false;
+@@ -905,8 +917,15 @@ static bool should_flush_tlb(int cpu, void *data)
+ 	if (!info->mm)
+ 		return true;
+ 
++	/*
++	 * While switching, the remote CPU could have state from
++	 * either the prev or next mm. Assume the worst and flush.
++	 */
++	if (loaded_mm == LOADED_MM_SWITCHING)
++		return true;
++
+ 	/* The target mm is loaded, and the CPU is not lazy. */
+-	if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
++	if (loaded_mm == info->mm)
+ 		return true;
+ 
+ 	/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 06b080b61aa578..ccb2f7703c33c9 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -41,6 +41,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
+ #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
+ #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
+ #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
++#define EMIT5(b1, b2, b3, b4, b5) \
++	do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
+ 
+ #define EMIT1_off32(b1, off) \
+ 	do { EMIT1(b1); EMIT(off, 4); } while (0)
+@@ -637,7 +639,10 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
+ {
+ 	u8 *prog = *pprog;
+ 
+-	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
++	if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
++		OPTIMIZER_HIDE_VAR(reg);
++		emit_jump(&prog, its_static_thunk(reg), ip);
++	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
+ 		EMIT_LFENCE();
+ 		EMIT2(0xFF, 0xE0 + reg);
+ 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
+@@ -659,7 +664,7 @@ static void emit_return(u8 **pprog, u8 *ip)
+ {
+ 	u8 *prog = *pprog;
+ 
+-	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
++	if (cpu_wants_rethunk()) {
+ 		emit_jump(&prog, x86_return_thunk, ip);
+ 	} else {
+ 		EMIT1(0xC3);		/* ret */
+@@ -1412,6 +1417,48 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
+ #define LOAD_TAIL_CALL_CNT_PTR(stack)				\
+ 	__LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
+ 
++static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
++				    struct bpf_prog *bpf_prog)
++{
++	u8 *prog = *pprog;
++	u8 *func;
++
++	if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
++		/* The clearing sequence clobbers eax and ecx. */
++		EMIT1(0x50); /* push rax */
++		EMIT1(0x51); /* push rcx */
++		ip += 2;
++
++		func = (u8 *)clear_bhb_loop;
++		ip += x86_call_depth_emit_accounting(&prog, func, ip);
++
++		if (emit_call(&prog, func, ip))
++			return -EINVAL;
++		EMIT1(0x59); /* pop rcx */
++		EMIT1(0x58); /* pop rax */
++	}
++	/* Insert IBHF instruction */
++	if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
++	     cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
++	    cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
++		/*
++		 * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
++		 * fence preventing branch history from before the fence from
++		 * affecting indirect branches after the fence. This is
++		 * specifically used in cBPF jitted code to prevent Intra-mode
++		 * BHI attacks. The IBHF instruction is designed to be a NOP on
++		 * hardware that doesn't need or support it.  The REP and REX.W
++		 * prefixes are required by the microcode, and they also ensure
++		 * that the NOP is unlikely to be used in existing code.
++		 *
++		 * IBHF is not a valid instruction in 32-bit mode.
++		 */
++		EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
++	}
++	*pprog = prog;
++	return 0;
++}
++
+ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
+ 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
+ {
+@@ -2402,6 +2449,13 @@ st:			if (is_imm8(insn->off))
+ 			seen_exit = true;
+ 			/* Update cleanup_addr */
+ 			ctx->cleanup_addr = proglen;
++			if (bpf_prog_was_classic(bpf_prog) &&
++			    !capable(CAP_SYS_ADMIN)) {
++				u8 *ip = image + addrs[i - 1];
++
++				if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
++					return -EINVAL;
++			}
+ 			if (bpf_prog->aux->exception_boundary) {
+ 				pop_callee_regs(&prog, all_callee_regs_used);
+ 				pop_r12(&prog);
+diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
+index 08b3cef58fd2d7..1214f155afa111 100644
+--- a/drivers/accel/ivpu/ivpu_hw.c
++++ b/drivers/accel/ivpu/ivpu_hw.c
+@@ -106,7 +106,7 @@ static void timeouts_init(struct ivpu_device *vdev)
+ 		else
+ 			vdev->timeout.autosuspend = 100;
+ 		vdev->timeout.d0i3_entry_msg = 5;
+-		vdev->timeout.state_dump_msg = 10;
++		vdev->timeout.state_dump_msg = 100;
+ 	}
+ }
+ 
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index fdaa24bb641a00..d88f721cf68cde 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -599,6 +599,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
+ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
+ CPU_SHOW_VULN_FALLBACK(gds);
+ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
++CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+ 
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+@@ -614,6 +615,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
+ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
+ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
++static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+ 
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_meltdown.attr,
+@@ -630,6 +632,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_spec_rstack_overflow.attr,
+ 	&dev_attr_gather_data_sampling.attr,
+ 	&dev_attr_reg_file_data_sampling.attr,
++	&dev_attr_indirect_target_selection.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 8827a768284ac4..6bd44ec2c9b1aa 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -493,6 +493,25 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
+ 	return 0;
+ }
+ 
++static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
++{
++	lo->lo_backing_file = file;
++	lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
++	mapping_set_gfp_mask(file->f_mapping,
++			lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
++}
++
++static int loop_check_backing_file(struct file *file)
++{
++	if (!file->f_op->read_iter)
++		return -EINVAL;
++
++	if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
++		return -EINVAL;
++
++	return 0;
++}
++
+ /*
+  * loop_change_fd switched the backing store of a loopback device to
+  * a new file. This is useful for operating system installers to free up
+@@ -513,6 +532,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
+ 	if (!file)
+ 		return -EBADF;
+ 
++	error = loop_check_backing_file(file);
++	if (error)
++		return error;
++
+ 	/* suppress uevents while reconfiguring the device */
+ 	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+ 
+@@ -545,10 +568,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
+ 	disk_force_media_change(lo->lo_disk);
+ 	blk_mq_freeze_queue(lo->lo_queue);
+ 	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
+-	lo->lo_backing_file = file;
+-	lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+-	mapping_set_gfp_mask(file->f_mapping,
+-			     lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
++	loop_assign_backing_file(lo, file);
+ 	loop_update_dio(lo);
+ 	blk_mq_unfreeze_queue(lo->lo_queue);
+ 	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
+@@ -694,12 +714,11 @@ static void loop_sysfs_exit(struct loop_device *lo)
+ 				   &loop_attribute_group);
+ }
+ 
+-static void loop_config_discard(struct loop_device *lo,
+-		struct queue_limits *lim)
++static void loop_get_discard_config(struct loop_device *lo,
++				    u32 *granularity, u32 *max_discard_sectors)
+ {
+ 	struct file *file = lo->lo_backing_file;
+ 	struct inode *inode = file->f_mapping->host;
+-	u32 granularity = 0, max_discard_sectors = 0;
+ 	struct kstatfs sbuf;
+ 
+ 	/*
+@@ -710,27 +729,19 @@ static void loop_config_discard(struct loop_device *lo,
+ 	 * file-backed loop devices: discarded regions read back as zero.
+ 	 */
+ 	if (S_ISBLK(inode->i_mode)) {
+-		struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
++		struct block_device *bdev = I_BDEV(inode);
+ 
+-		max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
+-		granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
+-			queue_physical_block_size(backingq);
++		*max_discard_sectors = bdev_write_zeroes_sectors(bdev);
++		*granularity = bdev_discard_granularity(bdev);
+ 
+ 	/*
+ 	 * We use punch hole to reclaim the free space used by the
+ 	 * image a.k.a. discard.
+ 	 */
+ 	} else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
+-		max_discard_sectors = UINT_MAX >> 9;
+-		granularity = sbuf.f_bsize;
++		*max_discard_sectors = UINT_MAX >> 9;
++		*granularity = sbuf.f_bsize;
+ 	}
+-
+-	lim->max_hw_discard_sectors = max_discard_sectors;
+-	lim->max_write_zeroes_sectors = max_discard_sectors;
+-	if (max_discard_sectors)
+-		lim->discard_granularity = granularity;
+-	else
+-		lim->discard_granularity = 0;
+ }
+ 
+ struct loop_worker {
+@@ -910,12 +921,13 @@ static unsigned int loop_default_blocksize(struct loop_device *lo,
+ 	return SECTOR_SIZE;
+ }
+ 
+-static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
++static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
++		unsigned int bsize)
+ {
+ 	struct file *file = lo->lo_backing_file;
+ 	struct inode *inode = file->f_mapping->host;
+ 	struct block_device *backing_bdev = NULL;
+-	struct queue_limits lim;
++	u32 granularity = 0, max_discard_sectors = 0;
+ 
+ 	if (S_ISBLK(inode->i_mode))
+ 		backing_bdev = I_BDEV(inode);
+@@ -925,17 +937,22 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
+ 	if (!bsize)
+ 		bsize = loop_default_blocksize(lo, backing_bdev);
+ 
+-	lim = queue_limits_start_update(lo->lo_queue);
+-	lim.logical_block_size = bsize;
+-	lim.physical_block_size = bsize;
+-	lim.io_min = bsize;
+-	lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
++	loop_get_discard_config(lo, &granularity, &max_discard_sectors);
++
++	lim->logical_block_size = bsize;
++	lim->physical_block_size = bsize;
++	lim->io_min = bsize;
++	lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
+ 	if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
+-		lim.features |= BLK_FEAT_WRITE_CACHE;
++		lim->features |= BLK_FEAT_WRITE_CACHE;
+ 	if (backing_bdev && !bdev_nonrot(backing_bdev))
+-		lim.features |= BLK_FEAT_ROTATIONAL;
+-	loop_config_discard(lo, &lim);
+-	return queue_limits_commit_update(lo->lo_queue, &lim);
++		lim->features |= BLK_FEAT_ROTATIONAL;
++	lim->max_hw_discard_sectors = max_discard_sectors;
++	lim->max_write_zeroes_sectors = max_discard_sectors;
++	if (max_discard_sectors)
++		lim->discard_granularity = granularity;
++	else
++		lim->discard_granularity = 0;
+ }
+ 
+ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+@@ -943,7 +960,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ 			  const struct loop_config *config)
+ {
+ 	struct file *file = fget(config->fd);
+-	struct address_space *mapping;
++	struct queue_limits lim;
+ 	int error;
+ 	loff_t size;
+ 	bool partscan;
+@@ -951,6 +968,14 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ 
+ 	if (!file)
+ 		return -EBADF;
++
++	if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
++		return -EINVAL;
++
++	error = loop_check_backing_file(file);
++	if (error)
++		return error;
++
+ 	is_loop = is_loop_device(file);
+ 
+ 	/* This is safe, since we have a reference from open(). */
+@@ -978,8 +1003,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ 	if (error)
+ 		goto out_unlock;
+ 
+-	mapping = file->f_mapping;
+-
+ 	if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
+ 		error = -EINVAL;
+ 		goto out_unlock;
+@@ -1011,11 +1034,11 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ 
+ 	lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
+ 	lo->lo_device = bdev;
+-	lo->lo_backing_file = file;
+-	lo->old_gfp_mask = mapping_gfp_mask(mapping);
+-	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
++	loop_assign_backing_file(lo, file);
+ 
+-	error = loop_reconfigure_limits(lo, config->block_size);
++	lim = queue_limits_start_update(lo->lo_queue);
++	loop_update_limits(lo, &lim, config->block_size);
++	error = queue_limits_commit_update(lo->lo_queue, &lim);
+ 	if (error)
+ 		goto out_unlock;
+ 
+@@ -1383,6 +1406,7 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
+ 
+ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+ {
++	struct queue_limits lim;
+ 	int err = 0;
+ 
+ 	if (lo->lo_state != Lo_bound)
+@@ -1395,7 +1419,9 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+ 	invalidate_bdev(lo->lo_device);
+ 
+ 	blk_mq_freeze_queue(lo->lo_queue);
+-	err = loop_reconfigure_limits(lo, arg);
++	lim = queue_limits_start_update(lo->lo_queue);
++	loop_update_limits(lo, &lim, arg);
++	err = queue_limits_commit_update(lo->lo_queue, &lim);
+ 	loop_update_dio(lo);
+ 	blk_mq_unfreeze_queue(lo->lo_queue);
+ 
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index af487abe9932aa..05de2e6f563de4 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -1329,15 +1329,8 @@ int btmtk_usb_setup(struct hci_dev *hdev)
+ 		fwname = FIRMWARE_MT7668;
+ 		break;
+ 	case 0x7922:
+-	case 0x7961:
+ 	case 0x7925:
+-		/* Reset the device to ensure it's in the initial state before
+-		 * downloading the firmware to ensure.
+-		 */
+-
+-		if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
+-			btmtk_usb_subsys_reset(hdev, dev_id);
+-
++	case 0x7961:
+ 		btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
+ 				      fw_version, fw_flavor);
+ 
+@@ -1345,12 +1338,9 @@ int btmtk_usb_setup(struct hci_dev *hdev)
+ 						btmtk_usb_hci_wmt_sync);
+ 		if (err < 0) {
+ 			bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
+-			clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
+ 			return err;
+ 		}
+ 
+-		set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
+-
+ 		/* It's Device EndPoint Reset Option Register */
+ 		err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT,
+ 					      MTK_EP_RST_IN_OUT_OPT);
+diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
+index 39f7c2d736d169..b603c25f3dfaac 100644
+--- a/drivers/clocksource/i8253.c
++++ b/drivers/clocksource/i8253.c
+@@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
+ #ifdef CONFIG_CLKEVT_I8253
+ void clockevent_i8253_disable(void)
+ {
+-	raw_spin_lock(&i8253_lock);
++	guard(raw_spinlock_irqsave)(&i8253_lock);
+ 
+ 	/*
+ 	 * Writing the MODE register should stop the counter, according to
+@@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
+ 	outb_p(0, PIT_CH0);
+ 
+ 	outb_p(0x30, PIT_MODE);
+-
+-	raw_spin_unlock(&i8253_lock);
+ }
+ 
+ static int pit_shutdown(struct clock_event_device *evt)
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index f8934d049d686f..993615fa490ebd 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -1219,7 +1219,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
+ }
+ 
+ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
+-				      struct scmi_xfer *xfer, ktime_t stop)
++				      struct scmi_xfer *xfer, ktime_t stop,
++				      bool *ooo)
+ {
+ 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ 
+@@ -1228,7 +1229,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
+ 	 * in case of out-of-order receptions of delayed responses
+ 	 */
+ 	return info->desc->ops->poll_done(cinfo, xfer) ||
+-	       try_wait_for_completion(&xfer->done) ||
++	       (*ooo = try_wait_for_completion(&xfer->done)) ||
+ 	       ktime_after(ktime_get(), stop);
+ }
+ 
+@@ -1245,15 +1246,17 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
+ 		 * itself to support synchronous commands replies.
+ 		 */
+ 		if (!desc->sync_cmds_completed_on_ret) {
++			bool ooo = false;
++
+ 			/*
+ 			 * Poll on xfer using transport provided .poll_done();
+ 			 * assumes no completion interrupt was available.
+ 			 */
+ 			ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
+ 
+-			spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
+-								  xfer, stop));
+-			if (ktime_after(ktime_get(), stop)) {
++			spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
++								  stop, &ooo));
++			if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
+ 				dev_err(dev,
+ 					"timed out in resp(caller: %pS) - polling\n",
+ 					(void *)_RET_IP_);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 2a1f3dbb14d3f4..3b6254de2c0e5f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -66,7 +66,6 @@
+ #define VCN_ENC_CMD_REG_WAIT		0x0000000c
+ 
+ #define VCN_AON_SOC_ADDRESS_2_0 	0x1f800
+-#define VCN1_AON_SOC_ADDRESS_3_0 	0x48000
+ #define VCN_VID_IP_ADDRESS_2_0		0x0
+ #define VCN_AON_IP_ADDRESS_2_0		0x30000
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+index 194026e9be3331..1ca1bbe7784e50 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+@@ -42,7 +42,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
+ {
+ 	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+-		RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++		/* We just need to read back a register to post the write.
++		 * Reading back the remapped register causes problems on
++		 * some platforms so just read back the memory size register.
++		 */
++		if (adev->nbio.funcs->get_memsize)
++			adev->nbio.funcs->get_memsize(adev);
+ 	} else {
+ 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+index d3962d46908811..40705e13ca567b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+@@ -33,7 +33,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
+ {
+ 	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+-		RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++		/* We just need to read back a register to post the write.
++		 * Reading back the remapped register causes problems on
++		 * some platforms so just read back the memory size register.
++		 */
++		if (adev->nbio.funcs->get_memsize)
++			adev->nbio.funcs->get_memsize(adev);
+ 	} else {
+ 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+index f52552c5fa27b6..6b9f2e1d9d690d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+@@ -34,7 +34,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
+ 	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
+ 			0);
+-		RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++		if (amdgpu_sriov_vf(adev)) {
++			/* this is fine because SR_IOV doesn't remap the register */
++			RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++		} else {
++			/* We just need to read back a register to post the write.
++			 * Reading back the remapped register causes problems on
++			 * some platforms so just read back the memory size register.
++			 */
++			if (adev->nbio.funcs->get_memsize)
++				adev->nbio.funcs->get_memsize(adev);
++		}
+ 	} else {
+ 		amdgpu_ring_emit_wreg(ring,
+ 			(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+index 6948fe9956ce47..20da813299f04a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+@@ -36,7 +36,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
+ {
+ 	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+-		RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++		/* We just need to read back a register to post the write.
++		 * Reading back the remapped register causes problems on
++		 * some platforms so just read back the memory size register.
++		 */
++		if (adev->nbio.funcs->get_memsize)
++			adev->nbio.funcs->get_memsize(adev);
+ 	} else {
+ 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+index 63820329f67eb6..f7ecdd15d52827 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+@@ -33,7 +33,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
+ {
+ 	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+-		RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++		/* We just need to read back a register to post the write.
++		 * Reading back the remapped register causes problems on
++		 * some platforms so just read back the memory size register.
++		 */
++		if (adev->nbio.funcs->get_memsize)
++			adev->nbio.funcs->get_memsize(adev);
+ 	} else {
+ 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+index bfd067e2d2f1d2..f0edaabdcde5d0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+@@ -39,6 +39,7 @@
+ 
+ #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
+ #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
++#define VCN1_AON_SOC_ADDRESS_3_0				0x48000
+ 
+ #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x1fd
+ #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x503
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+index 04e9e806e3187f..e4d0c0310e76d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+@@ -39,6 +39,7 @@
+ 
+ #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
+ #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
++#define VCN1_AON_SOC_ADDRESS_3_0				0x48000
+ 
+ #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
+ #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index 65dd68b322806e..be86f86b49e974 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -40,6 +40,7 @@
+ 
+ #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
+ #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
++#define VCN1_AON_SOC_ADDRESS_3_0				0x48000
+ 
+ #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
+ #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+index 26c6f10a8c8fae..f391f0c54043db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+@@ -46,6 +46,7 @@
+ 
+ #define VCN_VID_SOC_ADDRESS_2_0							0x1fb00
+ #define VCN1_VID_SOC_ADDRESS_3_0						0x48300
++#define VCN1_AON_SOC_ADDRESS_3_0						0x48000
+ 
+ #define VCN_HARVEST_MMSCH								0
+ 
+@@ -575,7 +576,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
+ 
+ 	/* VCN global tiling registers */
+ 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+-		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
++			VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
++			adev->gfx.config.gb_addr_config, 0, indirect);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+index 84c6b0f5c4c0b2..77542dabec59f4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+@@ -44,6 +44,7 @@
+ 
+ #define VCN_VID_SOC_ADDRESS_2_0		0x1fb00
+ #define VCN1_VID_SOC_ADDRESS_3_0	0x48300
++#define VCN1_AON_SOC_ADDRESS_3_0	0x48000
+ 
+ static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
+ 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+index 9d4f5352a62c8c..e0b02bf1c5639e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+@@ -46,6 +46,7 @@
+ 
+ #define VCN_VID_SOC_ADDRESS_2_0						0x1fb00
+ #define VCN1_VID_SOC_ADDRESS_3_0					(0x48300 + 0x38000)
++#define VCN1_AON_SOC_ADDRESS_3_0					(0x48000 + 0x38000)
+ 
+ #define VCN_HARVEST_MMSCH							0
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+index c305386358b4b4..d19eec4d479050 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+@@ -488,7 +488,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
+ 
+ 	/* VCN global tiling registers */
+ 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+-		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
++		VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
++		adev->gfx.config.gb_addr_config, 0, indirect);
+ 
+ 	return;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 115fb3bc456437..66c50a09d2dfd3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -666,15 +666,21 @@ static void dm_crtc_high_irq(void *interrupt_params)
+ 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ 
+ 	if (acrtc->dm_irq_params.stream &&
+-	    acrtc->dm_irq_params.vrr_params.supported &&
+-	    acrtc->dm_irq_params.freesync_config.state ==
+-		    VRR_STATE_ACTIVE_VARIABLE) {
++		acrtc->dm_irq_params.vrr_params.supported) {
++		bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
++		bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
++		bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
++
+ 		mod_freesync_handle_v_update(adev->dm.freesync_module,
+ 					     acrtc->dm_irq_params.stream,
+ 					     &acrtc->dm_irq_params.vrr_params);
+ 
+-		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
+-					   &acrtc->dm_irq_params.vrr_params.adjust);
++		/* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
++		if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
++			dc_stream_adjust_vmin_vmax(adev->dm.dc,
++					acrtc->dm_irq_params.stream,
++					&acrtc->dm_irq_params.vrr_params.adjust);
++		}
+ 	}
+ 
+ 	/*
+@@ -12526,7 +12532,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
+ 		 * Transient states before tunneling is enabled could
+ 		 * lead to this error. We can ignore this for now.
+ 		 */
+-		if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
++		if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
+ 			DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ 					payload->address, payload->length,
+ 					p_notify->result);
+@@ -12535,22 +12541,14 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
+ 		goto out;
+ 	}
+ 
++	payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
++	if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
++		/* The reply is stored in the top nibble of the command. */
++		payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
+ 
+-	payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+-	if (!payload->write && p_notify->aux_reply.length &&
+-			(payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
+-
+-		if (payload->length != p_notify->aux_reply.length) {
+-			DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
+-				p_notify->aux_reply.length,
+-					payload->address, payload->length);
+-			*operation_result = AUX_RET_ERROR_INVALID_REPLY;
+-			goto out;
+-		}
+-
++	if (!payload->write && p_notify->aux_reply.length)
+ 		memcpy(payload->data, p_notify->aux_reply.data,
+ 				p_notify->aux_reply.length);
+-	}
+ 
+ 	/* success */
+ 	ret = p_notify->aux_reply.length;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 5bdf44c692180c..dca8384af95df0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -51,6 +51,9 @@
+ 
+ #define PEAK_FACTOR_X1000 1006
+ 
++/*
++ * This function handles both native AUX and I2C-Over-AUX transactions.
++ */
+ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ 				  struct drm_dp_aux_msg *msg)
+ {
+@@ -87,15 +90,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ 	if (adev->dm.aux_hpd_discon_quirk) {
+ 		if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
+ 			operation_result == AUX_RET_ERROR_HPD_DISCON) {
+-			result = 0;
++			result = msg->size;
+ 			operation_result = AUX_RET_SUCCESS;
+ 		}
+ 	}
+ 
+-	if (payload.write && result >= 0)
+-		result = msg->size;
++	/*
++	 * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
++	 */
++	if (payload.write && result >= 0) {
++		if (result) {
++			/*one byte indicating partially written bytes. Force 0 to retry*/
++			drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
++			result = 0;
++		} else if (!payload.reply[0])
++			/*I2C_ACK|AUX_ACK*/
++			result = msg->size;
++	}
+ 
+-	if (result < 0)
++	if (result < 0) {
+ 		switch (operation_result) {
+ 		case AUX_RET_SUCCESS:
+ 			break;
+@@ -114,6 +127,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ 			break;
+ 		}
+ 
++		drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
++	}
++
++	if (payload.reply[0])
++		drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
++			payload.reply[0]);
++
+ 	return result;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+index 81ba8809a3b4c5..92a3fff1e26165 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+@@ -929,7 +929,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
+ 	}
+ }
+ 
+-static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out)
++static struct scaler_data *get_scaler_data_for_plane(
++		const struct dc_plane_state *in,
++		struct dc_state *context)
+ {
+ 	int i;
+ 	struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
+@@ -950,7 +952,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
+ 	}
+ 
+ 	ASSERT(i < MAX_PIPES);
+-	memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
++	return &temp_pipe->plane_res.scl_data;
+ }
+ 
+ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
+@@ -1013,11 +1015,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
+ 						    const struct dc_plane_state *in, struct dc_state *context,
+ 						    const struct soc_bounding_box_st *soc)
+ {
+-	struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
+-	if (!scaler_data)
+-		return;
+-
+-	get_scaler_data_for_plane(in, context, scaler_data);
++	struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context);
+ 
+ 	out->CursorBPP[location] = dml_cur_32bit;
+ 	out->CursorWidth[location] = 256;
+@@ -1082,8 +1080,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
+ 	out->DynamicMetadataTransmittedBytes[location] = 0;
+ 
+ 	out->NumberOfCursors[location] = 1;
+-
+-	kfree(scaler_data);
+ }
+ 
+ static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 06381c62820975..d041ff542a4eed 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = {
+ 	},
+ };
+ 
+-static const struct drm_display_mode auo_g101evn010_mode = {
+-	.clock = 68930,
+-	.hdisplay = 1280,
+-	.hsync_start = 1280 + 82,
+-	.hsync_end = 1280 + 82 + 2,
+-	.htotal = 1280 + 82 + 2 + 84,
+-	.vdisplay = 800,
+-	.vsync_start = 800 + 8,
+-	.vsync_end = 800 + 8 + 2,
+-	.vtotal = 800 + 8 + 2 + 6,
++static const struct display_timing auo_g101evn010_timing = {
++	.pixelclock = { 64000000, 68930000, 85000000 },
++	.hactive = { 1280, 1280, 1280 },
++	.hfront_porch = { 8, 64, 256 },
++	.hback_porch = { 8, 64, 256 },
++	.hsync_len = { 40, 168, 767 },
++	.vactive = { 800, 800, 800 },
++	.vfront_porch = { 4, 8, 100 },
++	.vback_porch = { 4, 8, 100 },
++	.vsync_len = { 8, 16, 223 },
+ };
+ 
+ static const struct panel_desc auo_g101evn010 = {
+-	.modes = &auo_g101evn010_mode,
+-	.num_modes = 1,
++	.timings = &auo_g101evn010_timing,
++	.num_timings = 1,
+ 	.bpc = 6,
+ 	.size = {
+ 		.width = 216,
+ 		.height = 135,
+ 	},
+ 	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ 	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
+diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
+index 4a6aa36619fe39..ad32e584deeec4 100644
+--- a/drivers/gpu/drm/v3d/v3d_sched.c
++++ b/drivers/gpu/drm/v3d/v3d_sched.c
+@@ -728,11 +728,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+ 	return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+ 
+-/* If the current address or return address have changed, then the GPU
+- * has probably made progress and we should delay the reset.  This
+- * could fail if the GPU got in an infinite loop in the CL, but that
+- * is pretty unlikely outside of an i-g-t testcase.
+- */
++static void
++v3d_sched_skip_reset(struct drm_sched_job *sched_job)
++{
++	struct drm_gpu_scheduler *sched = sched_job->sched;
++
++	spin_lock(&sched->job_list_lock);
++	list_add(&sched_job->list, &sched->pending_list);
++	spin_unlock(&sched->job_list_lock);
++}
++
+ static enum drm_gpu_sched_stat
+ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
+ 		    u32 *timedout_ctca, u32 *timedout_ctra)
+@@ -742,9 +747,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
+ 	u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
+ 	u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
+ 
++	/* If the current address or return address have changed, then the GPU
++	 * has probably made progress and we should delay the reset. This
++	 * could fail if the GPU got in an infinite loop in the CL, but that
++	 * is pretty unlikely outside of an i-g-t testcase.
++	 */
+ 	if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
+ 		*timedout_ctca = ctca;
+ 		*timedout_ctra = ctra;
++
++		v3d_sched_skip_reset(sched_job);
+ 		return DRM_GPU_SCHED_STAT_NOMINAL;
+ 	}
+ 
+@@ -784,11 +796,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
+ 	struct v3d_dev *v3d = job->base.v3d;
+ 	u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
+ 
+-	/* If we've made progress, skip reset and let the timer get
+-	 * rearmed.
++	/* If we've made progress, skip reset, add the job to the pending
++	 * list, and let the timer get rearmed.
+ 	 */
+ 	if (job->timedout_batches != batches) {
+ 		job->timedout_batches = batches;
++
++		v3d_sched_skip_reset(sched_job);
+ 		return DRM_GPU_SCHED_STAT_NOMINAL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
+index 79be73b4a02ba4..61a7d20ce42bfe 100644
+--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
++++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
+@@ -43,12 +43,14 @@ static void read_l3cc_table(struct xe_gt *gt,
+ {
+ 	struct kunit *test = kunit_get_current_test();
+ 	u32 l3cc, l3cc_expected;
+-	unsigned int i;
++	unsigned int fw_ref, i;
+ 	u32 reg_val;
+-	u32 ret;
+ 
+-	ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
++		xe_force_wake_put(gt_to_fw(gt), fw_ref);
++		KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
++	}
+ 
+ 	for (i = 0; i < info->num_mocs_regs; i++) {
+ 		if (!(i & 1)) {
+@@ -72,7 +74,7 @@ static void read_l3cc_table(struct xe_gt *gt,
+ 		KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc,
+ 				    "l3cc idx=%u has incorrect val.\n", i);
+ 	}
+-	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+ 
+ static void read_mocs_table(struct xe_gt *gt,
+@@ -80,15 +82,14 @@ static void read_mocs_table(struct xe_gt *gt,
+ {
+ 	struct kunit *test = kunit_get_current_test();
+ 	u32 mocs, mocs_expected;
+-	unsigned int i;
++	unsigned int fw_ref, i;
+ 	u32 reg_val;
+-	u32 ret;
+ 
+ 	KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index,
+ 			      "Unused entries index should have been defined\n");
+ 
+-	ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+-	KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
++	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
++	KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
+ 
+ 	for (i = 0; i < info->num_mocs_regs; i++) {
+ 		if (regs_are_mcr(gt))
+@@ -106,7 +107,7 @@ static void read_mocs_table(struct xe_gt *gt,
+ 				    "mocs reg 0x%x has incorrect val.\n", i);
+ 	}
+ 
+-	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+ 
+ static int mocs_kernel_test_run_device(struct xe_device *xe)
+diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
+index 79c426dc250597..db540c8be6c7c5 100644
+--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
++++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
+@@ -423,9 +423,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
+ 	num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
+ 				XE_MAX_EU_FUSE_BITS) * num_dss;
+ 
+-	/* user can issue separate page faults per EU and per CS */
++	/*
++	 * user can issue separate page faults per EU and per CS
++	 *
++	 * XXX: Multiplier required as compute UMD are getting PF queue errors
++	 * without it. Follow on why this multiplier is required.
++	 */
++#define PF_MULTIPLIER	8
+ 	pf_queue->num_dw =
+-		(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
++		(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
++#undef PF_MULTIPLIER
+ 
+ 	pf_queue->gt = gt;
+ 	pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
+diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
+index d054721859b3b5..99b05548b7bdbb 100644
+--- a/drivers/iio/accel/adis16201.c
++++ b/drivers/iio/accel/adis16201.c
+@@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = {
+ 			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ 	ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
+ 	ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
+-			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
++			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
+ 	ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
+-			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
++			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
+ 	IIO_CHAN_SOFT_TIMESTAMP(7)
+ };
+ 
+diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
+index eabaefa92f19d1..5e1946828b968a 100644
+--- a/drivers/iio/accel/adxl355_core.c
++++ b/drivers/iio/accel/adxl355_core.c
+@@ -231,7 +231,7 @@ struct adxl355_data {
+ 		u8 transf_buf[3];
+ 		struct {
+ 			u8 buf[14];
+-			s64 ts;
++			aligned_s64 ts;
+ 		} buffer;
+ 	} __aligned(IIO_DMA_MINALIGN);
+ };
+diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
+index e790a66d86c79f..d44d52e5a51401 100644
+--- a/drivers/iio/accel/adxl367.c
++++ b/drivers/iio/accel/adxl367.c
+@@ -604,18 +604,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
+ 	if (ret)
+ 		return ret;
+ 
++	st->odr = odr;
++
+ 	/* Activity timers depend on ODR */
+ 	ret = _adxl367_set_act_time_ms(st, st->act_time_ms);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms);
+-	if (ret)
+-		return ret;
+-
+-	st->odr = odr;
+-
+-	return 0;
++	return _adxl367_set_inact_time_ms(st, st->inact_time_ms);
+ }
+ 
+ static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
+diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
+index 62ec1219530799..32a5448116a135 100644
+--- a/drivers/iio/adc/ad7606_spi.c
++++ b/drivers/iio/adc/ad7606_spi.c
+@@ -127,7 +127,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr)
+ 		{
+ 			.tx_buf = &st->d16[0],
+ 			.len = 2,
+-			.cs_change = 0,
++			.cs_change = 1,
+ 		}, {
+ 			.rx_buf = &st->d16[1],
+ 			.len = 2,
+diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
+index de7252a10047d5..84c23d3def5978 100644
+--- a/drivers/iio/adc/dln2-adc.c
++++ b/drivers/iio/adc/dln2-adc.c
+@@ -481,7 +481,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct {
+ 		__le16 values[DLN2_ADC_MAX_CHANNELS];
+-		int64_t timestamp_space;
++		aligned_s64 timestamp_space;
+ 	} data;
+ 	struct dln2_adc_get_all_vals dev_data;
+ 	struct dln2_adc *dln2 = iio_priv(indio_dev);
+diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
+index dfd47a6e1f4a1b..e4dcd9e782aff9 100644
+--- a/drivers/iio/adc/rockchip_saradc.c
++++ b/drivers/iio/adc/rockchip_saradc.c
+@@ -480,15 +480,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
+ 	if (info->reset)
+ 		rockchip_saradc_reset_controller(info->reset);
+ 
+-	/*
+-	 * Use a default value for the converter clock.
+-	 * This may become user-configurable in the future.
+-	 */
+-	ret = clk_set_rate(info->clk, info->data->clk_rate);
+-	if (ret < 0)
+-		return dev_err_probe(&pdev->dev, ret,
+-				     "failed to set adc clk rate\n");
+-
+ 	ret = regulator_enable(info->vref);
+ 	if (ret < 0)
+ 		return dev_err_probe(&pdev->dev, ret,
+@@ -515,6 +506,14 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
+ 	if (IS_ERR(info->clk))
+ 		return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
+ 				     "failed to get adc clock\n");
++	/*
++	 * Use a default value for the converter clock.
++	 * This may become user-configurable in the future.
++	 */
++	ret = clk_set_rate(info->clk, info->data->clk_rate);
++	if (ret < 0)
++		return dev_err_probe(&pdev->dev, ret,
++				     "failed to set adc clk rate\n");
+ 
+ 	platform_set_drvdata(pdev, indio_dev);
+ 
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+index 3d3b27f28c9d1c..273196e647a2b5 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+@@ -50,7 +50,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+ 	u16 fifo_count;
+ 	u32 fifo_period;
+ 	s64 timestamp;
+-	u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
++	u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(8);
+ 	size_t i, nb;
+ 
+ 	mutex_lock(&st->lock);
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+index 0a7cd8c1aa3313..8a9d2593576a2a 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+@@ -392,6 +392,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
+ 	if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
+ 		return 0;
+ 
++	if (!pattern_len)
++		pattern_len = ST_LSM6DSX_SAMPLE_SIZE;
++
+ 	fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) *
+ 		   ST_LSM6DSX_CHAN_SIZE;
+ 	fifo_len = (fifo_len / pattern_len) * pattern_len;
+@@ -623,6 +626,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
+ 	if (!fifo_len)
+ 		return 0;
+ 
++	if (!pattern_len)
++		pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
++
+ 	for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
+ 		err = st_lsm6dsx_read_block(hw,
+ 					    ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR,
+diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
+index c28a7a6dea5f12..555a61e2f3fdd1 100644
+--- a/drivers/iio/temperature/maxim_thermocouple.c
++++ b/drivers/iio/temperature/maxim_thermocouple.c
+@@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = {
+ struct maxim_thermocouple_data {
+ 	struct spi_device *spi;
+ 	const struct maxim_thermocouple_chip *chip;
++	char tc_type;
+ 
+ 	u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
+-	char tc_type;
+ };
+ 
+ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index a6c7951011308c..e14d8f316ad8f2 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -77,12 +77,13 @@
+  * xbox d-pads should map to buttons, as is required for DDR pads
+  * but we map them to axes when possible to simplify things
+  */
+-#define MAP_DPAD_TO_BUTTONS		(1 << 0)
+-#define MAP_TRIGGERS_TO_BUTTONS		(1 << 1)
+-#define MAP_STICKS_TO_NULL		(1 << 2)
+-#define MAP_SELECT_BUTTON		(1 << 3)
+-#define MAP_PADDLES			(1 << 4)
+-#define MAP_PROFILE_BUTTON		(1 << 5)
++#define MAP_DPAD_TO_BUTTONS		BIT(0)
++#define MAP_TRIGGERS_TO_BUTTONS		BIT(1)
++#define MAP_STICKS_TO_NULL		BIT(2)
++#define MAP_SHARE_BUTTON		BIT(3)
++#define MAP_PADDLES			BIT(4)
++#define MAP_PROFILE_BUTTON		BIT(5)
++#define MAP_SHARE_OFFSET		BIT(6)
+ 
+ #define DANCEPAD_MAP_CONFIG	(MAP_DPAD_TO_BUTTONS |			\
+ 				MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
+@@ -135,7 +136,7 @@ static const struct xpad_device {
+ 	{ 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 },			/* wireless */
+ 	{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
+ 	{ 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
+-	{ 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },		/* v2 */
++	{ 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE },		/* v2 */
+ 	{ 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
+ 	{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+@@ -159,7 +160,7 @@ static const struct xpad_device {
+ 	{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ 	{ 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
+ 	{ 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE },
+-	{ 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
++	{ 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE },
+ 	{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
+ 	{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
+ 	{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
+@@ -205,13 +206,13 @@ static const struct xpad_device {
+ 	{ 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+-	{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
++	{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
+ 	{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
+-	{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
++	{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ 	{ 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
+ 	{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
+ 	{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
+@@ -240,7 +241,7 @@ static const struct xpad_device {
+ 	{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+-	{ 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
++	{ 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+@@ -387,10 +388,11 @@ static const struct xpad_device {
+ 	{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
+ 	{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
++	{ 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
+ 	{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ 	{ 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
+-	{ 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
++	{ 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ 	{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
+@@ -1027,7 +1029,7 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
+  *	The report format was gleaned from
+  *	https://github.com/kylelemons/xbox/blob/master/xbox.go
+  */
+-static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
++static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len)
+ {
+ 	struct input_dev *dev = xpad->dev;
+ 	bool do_sync = false;
+@@ -1068,8 +1070,12 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
+ 		/* menu/view buttons */
+ 		input_report_key(dev, BTN_START,  data[4] & BIT(2));
+ 		input_report_key(dev, BTN_SELECT, data[4] & BIT(3));
+-		if (xpad->mapping & MAP_SELECT_BUTTON)
+-			input_report_key(dev, KEY_RECORD, data[22] & BIT(0));
++		if (xpad->mapping & MAP_SHARE_BUTTON) {
++			if (xpad->mapping & MAP_SHARE_OFFSET)
++				input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0));
++			else
++				input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0));
++		}
+ 
+ 		/* buttons A,B,X,Y */
+ 		input_report_key(dev, BTN_A,	data[4] & BIT(4));
+@@ -1217,7 +1223,7 @@ static void xpad_irq_in(struct urb *urb)
+ 		xpad360w_process_packet(xpad, 0, xpad->idata);
+ 		break;
+ 	case XTYPE_XBOXONE:
+-		xpadone_process_packet(xpad, 0, xpad->idata);
++		xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length);
+ 		break;
+ 	default:
+ 		xpad_process_packet(xpad, 0, xpad->idata);
+@@ -1974,7 +1980,7 @@ static int xpad_init_input(struct usb_xpad *xpad)
+ 	    xpad->xtype == XTYPE_XBOXONE) {
+ 		for (i = 0; xpad360_btn[i] >= 0; i++)
+ 			input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
+-		if (xpad->mapping & MAP_SELECT_BUTTON)
++		if (xpad->mapping & MAP_SHARE_BUTTON)
+ 			input_set_capability(input_dev, EV_KEY, KEY_RECORD);
+ 	} else {
+ 		for (i = 0; xpad_btn[i] >= 0; i++)
+diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
+index 4364c3401ff1c6..486ca8ff86f830 100644
+--- a/drivers/input/keyboard/mtk-pmic-keys.c
++++ b/drivers/input/keyboard/mtk-pmic-keys.c
+@@ -147,8 +147,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
+ 	u32 value, mask;
+ 	int error;
+ 
+-	kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
+-	kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
++	kregs_home = &regs->keys_regs[MTK_PMIC_HOMEKEY_INDEX];
++	kregs_pwr = &regs->keys_regs[MTK_PMIC_PWRKEY_INDEX];
+ 
+ 	error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
+ 				     &long_press_debounce);
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 3d1459b551bb2e..2b8895368437d0 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -163,6 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
+ 
+ static const char * const smbus_pnp_ids[] = {
+ 	/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
++	"DLL060d", /* Dell Precision M3800 */
+ 	"LEN0048", /* X1 Carbon 3 */
+ 	"LEN0046", /* X250 */
+ 	"LEN0049", /* Yoga 11e */
+@@ -189,11 +190,15 @@ static const char * const smbus_pnp_ids[] = {
+ 	"LEN2054", /* E480 */
+ 	"LEN2055", /* E580 */
+ 	"LEN2068", /* T14 Gen 1 */
++	"SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */
++	"SYN3003", /* HP EliteBook 850 G1 */
+ 	"SYN3015", /* HP EliteBook 840 G2 */
+ 	"SYN3052", /* HP EliteBook 840 G4 */
+ 	"SYN3221", /* HP 15-ay000 */
+ 	"SYN323d", /* HP Spectre X360 13-w013dx */
+ 	"SYN3257", /* HP Envy 13-ad105ng */
++	"TOS01f6", /* Dynabook Portege X30L-G */
++	"TOS0213", /* Dynabook Portege X30-D */
+ 	NULL
+ };
+ 
+diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
+index eafe5a9b896484..071b7c9bf566eb 100644
+--- a/drivers/input/touchscreen/cyttsp5.c
++++ b/drivers/input/touchscreen/cyttsp5.c
+@@ -580,7 +580,7 @@ static int cyttsp5_power_control(struct cyttsp5 *ts, bool on)
+ 	int rc;
+ 
+ 	SET_CMD_REPORT_TYPE(cmd[0], 0);
+-	SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP);
++	SET_CMD_REPORT_ID(cmd[0], state);
+ 	SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER);
+ 
+ 	rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd));
+@@ -870,13 +870,16 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
+ 	ts->input->phys = ts->phys;
+ 	input_set_drvdata(ts->input, ts);
+ 
+-	/* Reset the gpio to be in a reset state */
++	/* Assert gpio to be in a reset state */
+ 	ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(ts->reset_gpio)) {
+ 		error = PTR_ERR(ts->reset_gpio);
+ 		dev_err(dev, "Failed to request reset gpio, error %d\n", error);
+ 		return error;
+ 	}
++
++	fsleep(10); /* Ensure long-enough reset pulse (minimum 10us). */
++
+ 	gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ 
+ 	/* Need a delay to have device up */
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 9cacc49f2cb04e..3dc5bc3d29d64a 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1183,7 +1183,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
+ 
+ 	t = dm_get_live_table(md, &srcu_idx);
+ 	if (!t)
+-		return 0;
++		goto put_live_table;
+ 
+ 	for (unsigned int i = 0; i < t->num_targets; i++) {
+ 		struct dm_target *ti = dm_table_get_target(t, i);
+@@ -1194,6 +1194,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
+ 					  (void *)key);
+ 	}
+ 
++put_live_table:
+ 	dm_put_live_table(md, srcu_idx);
+ 	return 0;
+ }
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 97cd8bbf2e32a9..dbd4d8796f9b06 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -2372,6 +2372,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
+ 	SET_NETDEV_DEV(net_dev, dev);
+ 
+ 	m_can_of_parse_mram(class_dev, mram_config_vals);
++	spin_lock_init(&class_dev->tx_handling_spinlock);
+ out:
+ 	return class_dev;
+ }
+@@ -2456,9 +2457,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
+ 
+ void m_can_class_unregister(struct m_can_classdev *cdev)
+ {
++	unregister_candev(cdev->net);
+ 	if (cdev->is_peripheral)
+ 		can_rx_offload_del(&cdev->offload);
+-	unregister_candev(cdev->net);
+ }
+ EXPORT_SYMBOL_GPL(m_can_class_unregister);
+ 
+diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
+index ac514766d431ce..07510b6f90737f 100644
+--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
++++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
+@@ -942,8 +942,8 @@ static void rkcanfd_remove(struct platform_device *pdev)
+ 	struct rkcanfd_priv *priv = platform_get_drvdata(pdev);
+ 	struct net_device *ndev = priv->ndev;
+ 
+-	can_rx_offload_del(&priv->offload);
+ 	rkcanfd_unregister(priv);
++	can_rx_offload_del(&priv->offload);
+ 	free_candev(ndev);
+ }
+ 
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index 3bc56517fe7a99..c30b04f8fc0df8 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
+ 	.brp_inc = 1,
+ };
+ 
++/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
++ * [-64,63] for TDCO, indicating a relative TDCO.
++ *
++ * Manual tests have shown, that using a relative TDCO configuration
++ * results in bus off, while an absolute configuration works.
++ *
++ * For TDCO use the max value (63) from the data sheet, but 0 as the
++ * minimum.
++ */
++static const struct can_tdc_const mcp251xfd_tdc_const = {
++	.tdcv_min = 0,
++	.tdcv_max = 63,
++	.tdco_min = 0,
++	.tdco_max = 63,
++	.tdcf_min = 0,
++	.tdcf_max = 0,
++};
++
+ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
+ {
+ 	switch (model) {
+@@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
+ {
+ 	const struct can_bittiming *bt = &priv->can.bittiming;
+ 	const struct can_bittiming *dbt = &priv->can.data_bittiming;
+-	u32 val = 0;
+-	s8 tdco;
++	u32 tdcmod, val = 0;
+ 	int err;
+ 
+ 	/* CAN Control Register
+@@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
+ 		return err;
+ 
+ 	/* Transmitter Delay Compensation */
+-	tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
+-		       -64, 63);
+-	val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
+-			 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
+-		FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
++	if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
++		tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
++	else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
++		tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
++	else
++		tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
++
++	val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
++		FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) |
++		FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco);
+ 
+ 	return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
+ }
+@@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
+ 	priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
+ 	priv->can.bittiming_const = &mcp251xfd_bittiming_const;
+ 	priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
++	priv->can.tdc_const = &mcp251xfd_tdc_const;
+ 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ 		CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
+ 		CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
+-		CAN_CTRLMODE_CC_LEN8_DLC;
++		CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
++		CAN_CTRLMODE_TDC_MANUAL;
+ 	set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ 	priv->ndev = ndev;
+ 	priv->spi = spi;
+@@ -2174,8 +2198,8 @@ static void mcp251xfd_remove(struct spi_device *spi)
+ 	struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ 	struct net_device *ndev = priv->ndev;
+ 
+-	can_rx_offload_del(&priv->offload);
+ 	mcp251xfd_unregister(priv);
++	can_rx_offload_del(&priv->offload);
+ 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+ 	free_candev(ndev);
+ }
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index d4600ab0b70b3b..e072d2b50c9876 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -373,15 +373,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+ 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
+ 	}
+ 
++	vc1 &= ~VC1_RX_MCST_FWD_EN;
++
+ 	if (enable) {
+ 		vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+-		vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
++		vc1 |= VC1_RX_MCST_UNTAG_EN;
+ 		vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ 		if (enable_filtering) {
+ 			vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ 			vc5 |= VC5_DROP_VTABLE_MISS;
+ 		} else {
+-			vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
++			vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
+ 			vc5 &= ~VC5_DROP_VTABLE_MISS;
+ 		}
+ 
+@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+ 
+ 	} else {
+ 		vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
+-		vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
++		vc1 &= ~VC1_RX_MCST_UNTAG_EN;
+ 		vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ 		vc5 &= ~VC5_DROP_VTABLE_MISS;
+ 
+@@ -576,6 +578,18 @@ static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+ 	b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+ }
+ 
++int b53_setup_port(struct dsa_switch *ds, int port)
++{
++	struct b53_device *dev = ds->priv;
++
++	b53_port_set_ucast_flood(dev, port, true);
++	b53_port_set_mcast_flood(dev, port, true);
++	b53_port_set_learning(dev, port, false);
++
++	return 0;
++}
++EXPORT_SYMBOL(b53_setup_port);
++
+ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+ {
+ 	struct b53_device *dev = ds->priv;
+@@ -588,10 +602,6 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+ 
+ 	cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ 
+-	b53_port_set_ucast_flood(dev, port, true);
+-	b53_port_set_mcast_flood(dev, port, true);
+-	b53_port_set_learning(dev, port, false);
+-
+ 	if (dev->ops->irq_enable)
+ 		ret = dev->ops->irq_enable(dev, port);
+ 	if (ret)
+@@ -722,10 +732,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
+ 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
+ 
+ 	b53_brcm_hdr_setup(dev->ds, port);
+-
+-	b53_port_set_ucast_flood(dev, port, true);
+-	b53_port_set_mcast_flood(dev, port, true);
+-	b53_port_set_learning(dev, port, false);
+ }
+ 
+ static void b53_enable_mib(struct b53_device *dev)
+@@ -761,6 +767,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
+ 	return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
+ }
+ 
++static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
++{
++	struct b53_device *dev = ds->priv;
++	struct dsa_port *dp;
++
++	if (!dev->vlan_filtering)
++		return true;
++
++	dp = dsa_to_port(ds, port);
++
++	if (dsa_port_is_cpu(dp))
++		return true;
++
++	return dp->bridge == NULL;
++}
++
+ int b53_configure_vlan(struct dsa_switch *ds)
+ {
+ 	struct b53_device *dev = ds->priv;
+@@ -779,7 +801,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
+ 		b53_do_vlan_op(dev, VTA_CMD_CLEAR);
+ 	}
+ 
+-	b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
++	b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
+ 
+ 	/* Create an untagged VLAN entry for the default PVID in case
+ 	 * CONFIG_VLAN_8021Q is disabled and there are no calls to
+@@ -787,26 +809,39 @@ int b53_configure_vlan(struct dsa_switch *ds)
+ 	 * entry. Do this only when the tagging protocol is not
+ 	 * DSA_TAG_PROTO_NONE
+ 	 */
++	v = &dev->vlans[def_vid];
+ 	b53_for_each_port(dev, i) {
+-		v = &dev->vlans[def_vid];
+-		v->members |= BIT(i);
++		if (!b53_vlan_port_may_join_untagged(ds, i))
++			continue;
++
++		vl.members |= BIT(i);
+ 		if (!b53_vlan_port_needs_forced_tagged(ds, i))
+-			v->untag = v->members;
+-		b53_write16(dev, B53_VLAN_PAGE,
+-			    B53_VLAN_PORT_DEF_TAG(i), def_vid);
++			vl.untag = vl.members;
++		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
++			    def_vid);
+ 	}
++	b53_set_vlan_entry(dev, def_vid, &vl);
+ 
+-	/* Upon initial call we have not set-up any VLANs, but upon
+-	 * system resume, we need to restore all VLAN entries.
+-	 */
+-	for (vid = def_vid; vid < dev->num_vlans; vid++) {
+-		v = &dev->vlans[vid];
++	if (dev->vlan_filtering) {
++		/* Upon initial call we have not set-up any VLANs, but upon
++		 * system resume, we need to restore all VLAN entries.
++		 */
++		for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
++			v = &dev->vlans[vid];
+ 
+-		if (!v->members)
+-			continue;
++			if (!v->members)
++				continue;
++
++			b53_set_vlan_entry(dev, vid, v);
++			b53_fast_age_vlan(dev, vid);
++		}
+ 
+-		b53_set_vlan_entry(dev, vid, v);
+-		b53_fast_age_vlan(dev, vid);
++		b53_for_each_port(dev, i) {
++			if (!dsa_is_cpu_port(ds, i))
++				b53_write16(dev, B53_VLAN_PAGE,
++					    B53_VLAN_PORT_DEF_TAG(i),
++					    dev->ports[i].pvid);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -1126,7 +1161,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources);
+ static int b53_setup(struct dsa_switch *ds)
+ {
+ 	struct b53_device *dev = ds->priv;
++	struct b53_vlan *vl;
+ 	unsigned int port;
++	u16 pvid;
+ 	int ret;
+ 
+ 	/* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
+@@ -1134,12 +1171,26 @@ static int b53_setup(struct dsa_switch *ds)
+ 	 */
+ 	ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+ 
++	/* The switch does not tell us the original VLAN for untagged
++	 * packets, so keep the CPU port always tagged.
++	 */
++	ds->untag_vlan_aware_bridge_pvid = true;
++
+ 	ret = b53_reset_switch(dev);
+ 	if (ret) {
+ 		dev_err(ds->dev, "failed to reset switch\n");
+ 		return ret;
+ 	}
+ 
++	/* setup default vlan for filtering mode */
++	pvid = b53_default_pvid(dev);
++	vl = &dev->vlans[pvid];
++	b53_for_each_port(dev, port) {
++		vl->members |= BIT(port);
++		if (!b53_vlan_port_needs_forced_tagged(ds, port))
++			vl->untag |= BIT(port);
++	}
++
+ 	b53_reset_mib(dev);
+ 
+ 	ret = b53_apply_config(dev);
+@@ -1493,7 +1544,10 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ {
+ 	struct b53_device *dev = ds->priv;
+ 
+-	b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
++	if (dev->vlan_filtering != vlan_filtering) {
++		dev->vlan_filtering = vlan_filtering;
++		b53_apply_config(dev);
++	}
+ 
+ 	return 0;
+ }
+@@ -1518,7 +1572,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ 	if (vlan->vid >= dev->num_vlans)
+ 		return -ERANGE;
+ 
+-	b53_enable_vlan(dev, port, true, ds->vlan_filtering);
++	b53_enable_vlan(dev, port, true, dev->vlan_filtering);
+ 
+ 	return 0;
+ }
+@@ -1531,18 +1585,29 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ 	struct b53_vlan *vl;
++	u16 old_pvid, new_pvid;
+ 	int err;
+ 
+ 	err = b53_vlan_prepare(ds, port, vlan);
+ 	if (err)
+ 		return err;
+ 
+-	vl = &dev->vlans[vlan->vid];
++	if (vlan->vid == 0)
++		return 0;
+ 
+-	b53_get_vlan_entry(dev, vlan->vid, vl);
++	old_pvid = dev->ports[port].pvid;
++	if (pvid)
++		new_pvid = vlan->vid;
++	else if (!pvid && vlan->vid == old_pvid)
++		new_pvid = b53_default_pvid(dev);
++	else
++		new_pvid = old_pvid;
++	dev->ports[port].pvid = new_pvid;
++
++	vl = &dev->vlans[vlan->vid];
+ 
+-	if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
+-		untagged = true;
++	if (dsa_is_cpu_port(ds, port))
++		untagged = false;
+ 
+ 	vl->members |= BIT(port);
+ 	if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
+@@ -1550,13 +1615,16 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ 	else
+ 		vl->untag &= ~BIT(port);
+ 
++	if (!dev->vlan_filtering)
++		return 0;
++
+ 	b53_set_vlan_entry(dev, vlan->vid, vl);
+ 	b53_fast_age_vlan(dev, vlan->vid);
+ 
+-	if (pvid && !dsa_is_cpu_port(ds, port)) {
++	if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
+ 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+-			    vlan->vid);
+-		b53_fast_age_vlan(dev, vlan->vid);
++			    new_pvid);
++		b53_fast_age_vlan(dev, old_pvid);
+ 	}
+ 
+ 	return 0;
+@@ -1571,20 +1639,25 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
+ 	struct b53_vlan *vl;
+ 	u16 pvid;
+ 
+-	b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
++	if (vlan->vid == 0)
++		return 0;
+ 
+-	vl = &dev->vlans[vlan->vid];
++	pvid = dev->ports[port].pvid;
+ 
+-	b53_get_vlan_entry(dev, vlan->vid, vl);
++	vl = &dev->vlans[vlan->vid];
+ 
+ 	vl->members &= ~BIT(port);
+ 
+ 	if (pvid == vlan->vid)
+ 		pvid = b53_default_pvid(dev);
++	dev->ports[port].pvid = pvid;
+ 
+ 	if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
+ 		vl->untag &= ~(BIT(port));
+ 
++	if (!dev->vlan_filtering)
++		return 0;
++
+ 	b53_set_vlan_entry(dev, vlan->vid, vl);
+ 	b53_fast_age_vlan(dev, vlan->vid);
+ 
+@@ -1917,8 +1990,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ 		bool *tx_fwd_offload, struct netlink_ext_ack *extack)
+ {
+ 	struct b53_device *dev = ds->priv;
++	struct b53_vlan *vl;
+ 	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+-	u16 pvlan, reg;
++	u16 pvlan, reg, pvid;
+ 	unsigned int i;
+ 
+ 	/* On 7278, port 7 which connects to the ASP should only receive
+@@ -1927,15 +2001,29 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ 	if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
+ 		return -EINVAL;
+ 
+-	/* Make this port leave the all VLANs join since we will have proper
+-	 * VLAN entries from now on
+-	 */
+-	if (is58xx(dev)) {
+-		b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+-		reg &= ~BIT(port);
+-		if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+-			reg &= ~BIT(cpu_port);
+-		b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
++	pvid = b53_default_pvid(dev);
++	vl = &dev->vlans[pvid];
++
++	if (dev->vlan_filtering) {
++		/* Make this port leave the all VLANs join since we will have
++		 * proper VLAN entries from now on
++		 */
++		if (is58xx(dev)) {
++			b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
++				   &reg);
++			reg &= ~BIT(port);
++			if ((reg & BIT(cpu_port)) == BIT(cpu_port))
++				reg &= ~BIT(cpu_port);
++			b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
++				    reg);
++		}
++
++		b53_get_vlan_entry(dev, pvid, vl);
++		vl->members &= ~BIT(port);
++		if (vl->members == BIT(cpu_port))
++			vl->members &= ~BIT(cpu_port);
++		vl->untag = vl->members;
++		b53_set_vlan_entry(dev, pvid, vl);
+ 	}
+ 
+ 	b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+@@ -1968,7 +2056,7 @@ EXPORT_SYMBOL(b53_br_join);
+ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ {
+ 	struct b53_device *dev = ds->priv;
+-	struct b53_vlan *vl = &dev->vlans[0];
++	struct b53_vlan *vl;
+ 	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ 	unsigned int i;
+ 	u16 pvlan, reg, pvid;
+@@ -1994,15 +2082,18 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ 	dev->ports[port].vlan_ctl_mask = pvlan;
+ 
+ 	pvid = b53_default_pvid(dev);
++	vl = &dev->vlans[pvid];
++
++	if (dev->vlan_filtering) {
++		/* Make this port join all VLANs without VLAN entries */
++		if (is58xx(dev)) {
++			b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
++			reg |= BIT(port);
++			if (!(reg & BIT(cpu_port)))
++				reg |= BIT(cpu_port);
++			b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
++		}
+ 
+-	/* Make this port join all VLANs without VLAN entries */
+-	if (is58xx(dev)) {
+-		b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+-		reg |= BIT(port);
+-		if (!(reg & BIT(cpu_port)))
+-			reg |= BIT(cpu_port);
+-		b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+-	} else {
+ 		b53_get_vlan_entry(dev, pvid, vl);
+ 		vl->members |= BIT(port) | BIT(cpu_port);
+ 		vl->untag |= BIT(port) | BIT(cpu_port);
+@@ -2307,6 +2398,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
+ 	.phy_read		= b53_phy_read16,
+ 	.phy_write		= b53_phy_write16,
+ 	.phylink_get_caps	= b53_phylink_get_caps,
++	.port_setup		= b53_setup_port,
+ 	.port_enable		= b53_enable_port,
+ 	.port_disable		= b53_disable_port,
+ 	.get_mac_eee		= b53_get_mac_eee,
+@@ -2751,6 +2843,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
+ 	ds->ops = &b53_switch_ops;
+ 	ds->phylink_mac_ops = &b53_phylink_mac_ops;
+ 	dev->vlan_enabled = true;
++	dev->vlan_filtering = false;
+ 	/* Let DSA handle the case were multiple bridges span the same switch
+ 	 * device and different VLAN awareness settings are requested, which
+ 	 * would be breaking filtering semantics for any of the other bridge
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index 05141176daf506..4f8c97098d2a72 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -95,6 +95,7 @@ struct b53_pcs {
+ 
+ struct b53_port {
+ 	u16		vlan_ctl_mask;
++	u16		pvid;
+ 	struct ethtool_keee eee;
+ };
+ 
+@@ -146,6 +147,7 @@ struct b53_device {
+ 	unsigned int num_vlans;
+ 	struct b53_vlan *vlans;
+ 	bool vlan_enabled;
++	bool vlan_filtering;
+ 	unsigned int num_ports;
+ 	struct b53_port *ports;
+ 
+@@ -380,6 +382,7 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ 					   enum dsa_tag_protocol mprot);
+ void b53_mirror_del(struct dsa_switch *ds, int port,
+ 		    struct dsa_mall_mirror_tc_entry *mirror);
++int b53_setup_port(struct dsa_switch *ds, int port);
+ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+ void b53_disable_port(struct dsa_switch *ds, int port);
+ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 0e663ec0c12a3b..c4771a07878ea6 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -1230,6 +1230,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
+ 	.resume			= bcm_sf2_sw_resume,
+ 	.get_wol		= bcm_sf2_sw_get_wol,
+ 	.set_wol		= bcm_sf2_sw_set_wol,
++	.port_setup		= b53_setup_port,
+ 	.port_enable		= bcm_sf2_port_setup,
+ 	.port_disable		= bcm_sf2_port_disable,
+ 	.get_mac_eee		= b53_get_mac_eee,
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
+index f3e195974a8efa..66e070095d1bbe 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
+@@ -1,7 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ // SPDX-FileCopyrightText: Copyright Red Hat
+ 
+-#include <linux/bitfield.h>
+ #include <linux/cleanup.h>
+ #include <linux/mutex.h>
+ #include <linux/pci.h>
+@@ -9,27 +8,21 @@
+ #include <linux/spinlock.h>
+ #include <linux/xarray.h>
+ #include "ice_adapter.h"
++#include "ice.h"
+ 
+ static DEFINE_XARRAY(ice_adapters);
+ static DEFINE_MUTEX(ice_adapters_mutex);
+ 
+-/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
+-#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
+-#define INDEX_FIELD_BUS    GENMASK(12, 5)
+-#define INDEX_FIELD_SLOT   GENMASK(4, 0)
+-
+-static unsigned long ice_adapter_index(const struct pci_dev *pdev)
++static unsigned long ice_adapter_index(u64 dsn)
+ {
+-	unsigned int domain = pci_domain_nr(pdev->bus);
+-
+-	WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
+-
+-	return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
+-	       FIELD_PREP(INDEX_FIELD_BUS,    pdev->bus->number) |
+-	       FIELD_PREP(INDEX_FIELD_SLOT,   PCI_SLOT(pdev->devfn));
++#if BITS_PER_LONG == 64
++	return dsn;
++#else
++	return (u32)dsn ^ (u32)(dsn >> 32);
++#endif
+ }
+ 
+-static struct ice_adapter *ice_adapter_new(void)
++static struct ice_adapter *ice_adapter_new(u64 dsn)
+ {
+ 	struct ice_adapter *adapter;
+ 
+@@ -37,6 +30,7 @@ static struct ice_adapter *ice_adapter_new(void)
+ 	if (!adapter)
+ 		return NULL;
+ 
++	adapter->device_serial_number = dsn;
+ 	spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ 	refcount_set(&adapter->refcount, 1);
+ 
+@@ -67,23 +61,26 @@ static void ice_adapter_free(struct ice_adapter *adapter)
+  * Return:  Pointer to ice_adapter on success.
+  *          ERR_PTR() on error. -ENOMEM is the only possible error.
+  */
+-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
++struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
+ {
+-	unsigned long index = ice_adapter_index(pdev);
++	u64 dsn = pci_get_dsn(pdev);
+ 	struct ice_adapter *adapter;
++	unsigned long index;
+ 	int err;
+ 
++	index = ice_adapter_index(dsn);
+ 	scoped_guard(mutex, &ice_adapters_mutex) {
+ 		err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
+ 		if (err == -EBUSY) {
+ 			adapter = xa_load(&ice_adapters, index);
+ 			refcount_inc(&adapter->refcount);
++			WARN_ON_ONCE(adapter->device_serial_number != dsn);
+ 			return adapter;
+ 		}
+ 		if (err)
+ 			return ERR_PTR(err);
+ 
+-		adapter = ice_adapter_new();
++		adapter = ice_adapter_new(dsn);
+ 		if (!adapter)
+ 			return ERR_PTR(-ENOMEM);
+ 		xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
+@@ -100,11 +97,13 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+  *
+  * Context: Process, may sleep.
+  */
+-void ice_adapter_put(const struct pci_dev *pdev)
++void ice_adapter_put(struct pci_dev *pdev)
+ {
+-	unsigned long index = ice_adapter_index(pdev);
++	u64 dsn = pci_get_dsn(pdev);
+ 	struct ice_adapter *adapter;
++	unsigned long index;
+ 
++	index = ice_adapter_index(dsn);
+ 	scoped_guard(mutex, &ice_adapters_mutex) {
+ 		adapter = xa_load(&ice_adapters, index);
+ 		if (WARN_ON(!adapter))
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
+index e233225848b384..ac15c0d2bc1a47 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
+@@ -32,6 +32,7 @@ struct ice_port_list {
+  * @refcount: Reference count. struct ice_pf objects hold the references.
+  * @ctrl_pf: Control PF of the adapter
+  * @ports: Ports list
++ * @device_serial_number: DSN cached for collision detection on 32bit systems
+  */
+ struct ice_adapter {
+ 	refcount_t refcount;
+@@ -40,9 +41,10 @@ struct ice_adapter {
+ 
+ 	struct ice_pf *ctrl_pf;
+ 	struct ice_port_list ports;
++	u64 device_serial_number;
+ };
+ 
+-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
+-void ice_adapter_put(const struct pci_dev *pdev);
++struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
++void ice_adapter_put(struct pci_dev *pdev);
+ 
+ #endif /* _ICE_ADAPTER_H */
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index c5d5b9ff8bc42f..0a13f7c4684e0e 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3140,11 +3140,19 @@ static int mtk_dma_init(struct mtk_eth *eth)
+ static void mtk_dma_free(struct mtk_eth *eth)
+ {
+ 	const struct mtk_soc_data *soc = eth->soc;
+-	int i;
++	int i, j, txqs = 1;
++
++	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
++		txqs = MTK_QDMA_NUM_QUEUES;
++
++	for (i = 0; i < MTK_MAX_DEVS; i++) {
++		if (!eth->netdev[i])
++			continue;
++
++		for (j = 0; j < txqs; j++)
++			netdev_tx_reset_subqueue(eth->netdev[i], j);
++	}
+ 
+-	for (i = 0; i < MTK_MAX_DEVS; i++)
+-		if (eth->netdev[i])
+-			netdev_reset_queue(eth->netdev[i]);
+ 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+ 		dma_free_coherent(eth->dma_dev,
+ 				  MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+@@ -3419,9 +3427,6 @@ static int mtk_open(struct net_device *dev)
+ 			}
+ 			mtk_gdm_config(eth, target_mac->id, gdm_config);
+ 		}
+-		/* Reset and enable PSE */
+-		mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+-		mtk_w32(eth, 0, MTK_RST_GL);
+ 
+ 		napi_enable(&eth->tx_napi);
+ 		napi_enable(&eth->rx_napi);
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+index 21db509acbc157..e91b4432fddd72 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+@@ -700,8 +700,10 @@ enum {
+ /* PUL User Registers */
+ #define FBNIC_CSR_START_PUL_USER	0x31000	/* CSR section delimiter */
+ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG	0x3103d		/* 0xc40f4 */
++#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH	CSR_BIT(19)
+ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME		CSR_BIT(18)
+ #define FBNIC_PUL_OB_TLP_HDR_AR_CFG	0x3103e		/* 0xc40f8 */
++#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH	CSR_BIT(19)
+ #define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME		CSR_BIT(18)
+ #define FBNIC_CSR_END_PUL_USER	0x31080	/* CSR section delimiter */
+ 
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+index 8f7a2a19ddf802..7775418316df53 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+@@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
+ {
+ 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ 
++	/* Write the upper 32b and then the lower 32b. Doing this the
++	 * FW can then read lower, upper, lower to verify that the state
++	 * of the descriptor wasn't changed mid-transaction.
++	 */
+ 	fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
+ 	fw_wrfl(fbd);
+ 	fw_wr32(fbd, desc_offset, lower_32_bits(desc));
+ }
+ 
++static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
++					int desc_idx, u32 desc)
++{
++	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
++
++	/* For initialization we write the lower 32b of the descriptor first.
++	 * This way we can set the state to mark it invalid before we clear the
++	 * upper 32b.
++	 */
++	fw_wr32(fbd, desc_offset, desc);
++	fw_wrfl(fbd);
++	fw_wr32(fbd, desc_offset + 1, 0);
++}
++
+ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+ {
+ 	u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+@@ -33,29 +51,41 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+ 	return desc;
+ }
+ 
+-static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
++static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ {
+ 	int desc_idx;
+ 
++	/* Disable DMA transactions from the device,
++	 * and flush any transactions triggered during cleaning
++	 */
++	switch (mbx_idx) {
++	case FBNIC_IPC_MBX_RX_IDX:
++		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
++		     FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
++		break;
++	case FBNIC_IPC_MBX_TX_IDX:
++		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
++		     FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
++		break;
++	}
++
++	wrfl(fbd);
++
+ 	/* Initialize first descriptor to all 0s. Doing this gives us a
+ 	 * solid stop for the firmware to hit when it is done looping
+ 	 * through the ring.
+ 	 */
+-	__fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
+-
+-	fw_wrfl(fbd);
++	__fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
+ 
+ 	/* We then fill the rest of the ring starting at the end and moving
+ 	 * back toward descriptor 0 with skip descriptors that have no
+ 	 * length nor address, and tell the firmware that they can skip
+ 	 * them and just move past them to the one we initialized to 0.
+ 	 */
+-	for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
+-		__fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
+-				    FBNIC_IPC_MBX_DESC_FW_CMPL |
+-				    FBNIC_IPC_MBX_DESC_HOST_CMPL);
+-		fw_wrfl(fbd);
+-	}
++	for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
++		__fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
++					    FBNIC_IPC_MBX_DESC_FW_CMPL |
++					    FBNIC_IPC_MBX_DESC_HOST_CMPL);
+ }
+ 
+ void fbnic_mbx_init(struct fbnic_dev *fbd)
+@@ -76,7 +106,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
+ 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+ 
+ 	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+-		fbnic_mbx_init_desc_ring(fbd, i);
++		fbnic_mbx_reset_desc_ring(fbd, i);
+ }
+ 
+ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
+@@ -141,7 +171,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ {
+ 	int i;
+ 
+-	fbnic_mbx_init_desc_ring(fbd, mbx_idx);
++	fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
+ 
+ 	for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
+ 		fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
+@@ -265,67 +295,41 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
+ 	return err;
+ }
+ 
+-/**
+- * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
+- * @fbd: FBNIC device structure
+- *
+- * Return: NULL on failure to allocate, error pointer on error, or pointer
+- * to new TLV test message.
+- *
+- * Sends a single TLV header indicating the host wants the firmware to
+- * confirm the capabilities and version.
+- **/
+-static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
+-{
+-	int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+-
+-	/* Return 0 if we are not calling this on ASIC */
+-	return (err == -EOPNOTSUPP) ? 0 : err;
+-}
+-
+-static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
++static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ {
+ 	struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ 
+-	/* This is a one time init, so just exit if it is completed */
+-	if (mbx->ready)
+-		return;
+-
+ 	mbx->ready = true;
+ 
+ 	switch (mbx_idx) {
+ 	case FBNIC_IPC_MBX_RX_IDX:
++		/* Enable DMA writes from the device */
++		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
++		     FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
++
+ 		/* Make sure we have a page for the FW to write to */
+ 		fbnic_mbx_alloc_rx_msgs(fbd);
+ 		break;
+ 	case FBNIC_IPC_MBX_TX_IDX:
+-		/* Force version to 1 if we successfully requested an update
+-		 * from the firmware. This should be overwritten once we get
+-		 * the actual version from the firmware in the capabilities
+-		 * request message.
+-		 */
+-		if (!fbnic_fw_xmit_cap_msg(fbd) &&
+-		    !fbd->fw_cap.running.mgmt.version)
+-			fbd->fw_cap.running.mgmt.version = 1;
++		/* Enable DMA reads from the device */
++		wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
++		     FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
+ 		break;
+ 	}
+ }
+ 
+-static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
++static bool fbnic_mbx_event(struct fbnic_dev *fbd)
+ {
+-	int i;
+-
+-	/* We only need to do this on the first interrupt following init.
++	/* We only need to do this on the first interrupt following reset.
+ 	 * this primes the mailbox so that we will have cleared all the
+ 	 * skip descriptors.
+ 	 */
+ 	if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
+-		return;
++		return false;
+ 
+ 	wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+ 
+-	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+-		fbnic_mbx_postinit_desc_ring(fbd, i);
++	return true;
+ }
+ 
+ /**
+@@ -726,7 +730,7 @@ static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
+ 
+ void fbnic_mbx_poll(struct fbnic_dev *fbd)
+ {
+-	fbnic_mbx_postinit(fbd);
++	fbnic_mbx_event(fbd);
+ 
+ 	fbnic_mbx_process_tx_msgs(fbd);
+ 	fbnic_mbx_process_rx_msgs(fbd);
+@@ -734,60 +738,80 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
+ 
+ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+ {
+-	struct fbnic_fw_mbx *tx_mbx;
+-	int attempts = 50;
++	unsigned long timeout = jiffies + 10 * HZ + 1;
++	int err, i;
+ 
+-	/* Immediate fail if BAR4 isn't there */
+-	if (!fbnic_fw_present(fbd))
+-		return -ENODEV;
++	do {
++		if (!time_is_after_jiffies(timeout))
++			return -ETIMEDOUT;
+ 
+-	tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+-	while (!tx_mbx->ready && --attempts) {
+ 		/* Force the firmware to trigger an interrupt response to
+ 		 * avoid the mailbox getting stuck closed if the interrupt
+ 		 * is reset.
+ 		 */
+-		fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
++		fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+ 
+-		msleep(200);
++		/* Immediate fail if BAR4 went away */
++		if (!fbnic_fw_present(fbd))
++			return -ENODEV;
+ 
+-		fbnic_mbx_poll(fbd);
+-	}
++		msleep(20);
++	} while (!fbnic_mbx_event(fbd));
++
++	/* FW has shown signs of life. Enable DMA and start Tx/Rx */
++	for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
++		fbnic_mbx_init_desc_ring(fbd, i);
+ 
+-	return attempts ? 0 : -ETIMEDOUT;
++	/* Request an update from the firmware. This should overwrite
++	 * mgmt.version once we get the actual version from the firmware
++	 * in the capabilities request message.
++	 */
++	err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
++	if (err)
++		goto clean_mbx;
++
++	/* Use "1" to indicate we entered the state waiting for a response */
++	fbd->fw_cap.running.mgmt.version = 1;
++
++	return 0;
++clean_mbx:
++	/* Cleanup Rx buffers and disable mailbox */
++	fbnic_mbx_clean(fbd);
++	return err;
+ }
+ 
+ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
+ {
++	unsigned long timeout = jiffies + 10 * HZ + 1;
+ 	struct fbnic_fw_mbx *tx_mbx;
+-	int attempts = 50;
+-	u8 count = 0;
+-
+-	/* Nothing to do if there is no mailbox */
+-	if (!fbnic_fw_present(fbd))
+-		return;
++	u8 tail;
+ 
+ 	/* Record current Rx stats */
+ 	tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ 
+-	/* Nothing to do if mailbox never got to ready */
+-	if (!tx_mbx->ready)
+-		return;
++	spin_lock_irq(&fbd->fw_tx_lock);
++
++	/* Clear ready to prevent any further attempts to transmit */
++	tx_mbx->ready = false;
++
++	/* Read tail to determine the last tail state for the ring */
++	tail = tx_mbx->tail;
++
++	spin_unlock_irq(&fbd->fw_tx_lock);
+ 
+ 	/* Give firmware time to process packet,
+-	 * we will wait up to 10 seconds which is 50 waits of 200ms.
++	 * we will wait up to 10 seconds which is 500 waits of 20ms.
+ 	 */
+ 	do {
+ 		u8 head = tx_mbx->head;
+ 
+-		if (head == tx_mbx->tail)
++		/* Tx ring is empty once head == tail */
++		if (head == tail)
+ 			break;
+ 
+-		msleep(200);
++		msleep(20);
+ 		fbnic_mbx_process_tx_msgs(fbd);
+-
+-		count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
+-	} while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
++	} while (time_is_after_jiffies(timeout));
+ }
+ 
+ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+index 7b654d0a6dac66..06fa65e4f35b61 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+@@ -79,12 +79,6 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
+ 	fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
+ 	fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
+ 	fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
+-
+-	/* Enable XALI AR/AW outbound */
+-	wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+-	     FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+-	wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+-	     FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
+ }
+ 
+ static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 60027b439021b8..fbd1150c33cce7 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -298,6 +298,10 @@ struct send_queue {
+ 
+ 	/* Record whether sq is in reset state. */
+ 	bool reset;
++
++	struct xsk_buff_pool *xsk_pool;
++
++	dma_addr_t xsk_hdr_dma_addr;
+ };
+ 
+ /* Internal representation of a receive virtqueue */
+@@ -501,6 +505,8 @@ struct virtio_net_common_hdr {
+ 	};
+ };
+ 
++static struct virtio_net_common_hdr xsk_hdr;
++
+ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+ static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
+ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+@@ -5421,6 +5427,10 @@ static void virtnet_get_base_stats(struct net_device *dev,
+ 
+ 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
+ 		tx->hw_drop_ratelimits = 0;
++
++	netdev_stat_queue_sum(dev,
++			      dev->real_num_rx_queues, vi->max_queue_pairs, rx,
++			      dev->real_num_tx_queues, vi->max_queue_pairs, tx);
+ }
+ 
+ static const struct netdev_stat_ops virtnet_stat_ops = {
+@@ -5556,6 +5566,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
+ 	return err;
+ }
+ 
++static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
++				    struct send_queue *sq,
++				    struct xsk_buff_pool *pool)
++{
++	int err, qindex;
++
++	qindex = sq - vi->sq;
++
++	virtnet_tx_pause(vi, sq);
++
++	err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
++	if (err) {
++		netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
++		pool = NULL;
++	}
++
++	sq->xsk_pool = pool;
++
++	virtnet_tx_resume(vi, sq);
++
++	return err;
++}
++
+ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ 				   struct xsk_buff_pool *pool,
+ 				   u16 qid)
+@@ -5564,6 +5597,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ 	struct receive_queue *rq;
+ 	struct device *dma_dev;
+ 	struct send_queue *sq;
++	dma_addr_t hdr_dma;
+ 	int err, size;
+ 
+ 	if (vi->hdr_len > xsk_pool_get_headroom(pool))
+@@ -5601,6 +5635,13 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ 	if (!rq->xsk_buffs)
+ 		return -ENOMEM;
+ 
++	hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
++						 DMA_TO_DEVICE, 0);
++	if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) {
++		err = -ENOMEM;
++		goto err_free_buffs;
++	}
++
+ 	err = xsk_pool_dma_map(pool, dma_dev, 0);
+ 	if (err)
+ 		goto err_xsk_map;
+@@ -5609,11 +5650,26 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ 	if (err)
+ 		goto err_rq;
+ 
++	err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
++	if (err)
++		goto err_sq;
++
++	/* Now, we do not support tx offload(such as tx csum), so all the tx
++	 * virtnet hdr is zero. So all the tx packets can share a single hdr.
++	 */
++	sq->xsk_hdr_dma_addr = hdr_dma;
++
+ 	return 0;
+ 
++err_sq:
++	virtnet_rq_bind_xsk_pool(vi, rq, NULL);
+ err_rq:
+ 	xsk_pool_dma_unmap(pool, 0);
+ err_xsk_map:
++	virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
++					 DMA_TO_DEVICE, 0);
++err_free_buffs:
++	kvfree(rq->xsk_buffs);
+ 	return err;
+ }
+ 
+@@ -5622,19 +5678,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
+ 	struct virtnet_info *vi = netdev_priv(dev);
+ 	struct xsk_buff_pool *pool;
+ 	struct receive_queue *rq;
++	struct send_queue *sq;
+ 	int err;
+ 
+ 	if (qid >= vi->curr_queue_pairs)
+ 		return -EINVAL;
+ 
++	sq = &vi->sq[qid];
+ 	rq = &vi->rq[qid];
+ 
+ 	pool = rq->xsk_pool;
+ 
+ 	err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
++	err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
+ 
+ 	xsk_pool_dma_unmap(pool, 0);
+ 
++	virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
++					 vi->hdr_len, DMA_TO_DEVICE, 0);
+ 	kvfree(rq->xsk_buffs);
+ 
+ 	return err;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f19410723b1795..98dad1bdff440a 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4473,7 +4473,8 @@ static void nvme_fw_act_work(struct work_struct *work)
+ 		msleep(100);
+ 	}
+ 
+-	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
++	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
++	    !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
+ 		return;
+ 
+ 	nvme_unquiesce_io_queues(ctrl);
+diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
+index 055518ee354dc9..e9e9aaa91770ae 100644
+--- a/drivers/pci/hotplug/s390_pci_hpc.c
++++ b/drivers/pci/hotplug/s390_pci_hpc.c
+@@ -59,7 +59,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
+ 
+ 	pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+ 	if (pdev && pci_num_vf(pdev)) {
+-		pci_dev_put(pdev);
+ 		rc = -EBUSY;
+ 		goto out;
+ 	}
+diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
+index 1bbb9a6db5979a..6769f066b0b4e8 100644
+--- a/drivers/staging/axis-fifo/axis-fifo.c
++++ b/drivers/staging/axis-fifo/axis-fifo.c
+@@ -393,16 +393,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ 
+ 	bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
+ 	if (!bytes_available) {
+-		dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
+-		reset_ip_core(fifo);
++		dev_err(fifo->dt_device, "received a packet of length 0\n");
+ 		ret = -EIO;
+ 		goto end_unlock;
+ 	}
+ 
+ 	if (bytes_available > len) {
+-		dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
++		dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n",
+ 			bytes_available, len);
+-		reset_ip_core(fifo);
+ 		ret = -EINVAL;
+ 		goto end_unlock;
+ 	}
+@@ -411,8 +409,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ 		/* this probably can't happen unless IP
+ 		 * registers were previously mishandled
+ 		 */
+-		dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
+-		reset_ip_core(fifo);
++		dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n");
+ 		ret = -EIO;
+ 		goto end_unlock;
+ 	}
+@@ -433,7 +430,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ 
+ 		if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
+ 				 copy * sizeof(u32))) {
+-			reset_ip_core(fifo);
+ 			ret = -EFAULT;
+ 			goto end_unlock;
+ 		}
+@@ -542,7 +538,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
+ 
+ 		if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
+ 				   copy * sizeof(u32))) {
+-			reset_ip_core(fifo);
+ 			ret = -EFAULT;
+ 			goto end_unlock;
+ 		}
+@@ -775,9 +770,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo)
+ 		goto end;
+ 	}
+ 
+-	/* IP sets TDFV to fifo depth - 4 so we will do the same */
+-	fifo->tx_fifo_depth -= 4;
+-
+ 	ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
+ 	if (ret) {
+ 		dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
+diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
+index 6c14d7bcdd6750..081b17f498638b 100644
+--- a/drivers/staging/iio/adc/ad7816.c
++++ b/drivers/staging/iio/adc/ad7816.c
+@@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev,
+ 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ 	struct ad7816_chip_info *chip = iio_priv(indio_dev);
+ 
+-	if (strcmp(buf, "full")) {
++	if (strcmp(buf, "full") == 0) {
+ 		gpiod_set_value(chip->rdwr_pin, 1);
+ 		chip->mode = AD7816_FULL;
+ 	} else {
+diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+index deec33f63bcf82..e6724329356b92 100644
+--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+@@ -1902,6 +1902,7 @@ static int bcm2835_mmal_probe(struct vchiq_device *device)
+ 				__func__, ret);
+ 			goto free_dev;
+ 		}
++		dev->v4l2_dev.dev = &device->dev;
+ 
+ 		/* setup v4l controls */
+ 		ret = bcm2835_mmal_init_controls(dev, &dev->ctrl_handler);
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
+index 4a3f0f95825698..79d06958d61936 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.c
++++ b/drivers/usb/cdns3/cdnsp-gadget.c
+@@ -138,6 +138,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
+ 	       (portsc & PORT_CHANGE_BITS), port_regs);
+ }
+ 
++static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev)
++{
++	struct cdns *cdns = dev_get_drvdata(pdev->dev);
++	__le32 __iomem *reg;
++	void __iomem *base;
++	u32 offset = 0;
++	u32 val;
++
++	if (!cdns->override_apb_timeout)
++		return;
++
++	base = &pdev->cap_regs->hc_capbase;
++	offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
++	reg = base + offset + REG_CHICKEN_BITS_3_OFFSET;
++
++	val  = le32_to_cpu(readl(reg));
++	val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout);
++	writel(cpu_to_le32(val), reg);
++}
++
+ static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
+ {
+ 	__le32 __iomem *reg;
+@@ -1772,6 +1792,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
+ 	reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
+ 	pdev->rev_cap  = reg;
+ 
++	pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision);
++
+ 	dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
+ 		 readl(&pdev->rev_cap->ctrl_revision),
+ 		 readl(&pdev->rev_cap->rtl_revision),
+@@ -1797,6 +1819,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev)
+ 	pdev->hci_version = HC_VERSION(pdev->hcc_params);
+ 	pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
+ 
++	/*
++	 * Override the APB timeout value to give the controller more time for
++	 * enabling UTMI clock and synchronizing APB and UTMI clock domains.
++	 * This fix is platform specific and is required to fixes issue with
++	 * reading incorrect value from PORTSC register after resuming
++	 * from L1 state.
++	 */
++	cdnsp_set_apb_timeout_value(pdev);
++
+ 	cdnsp_get_rev_cap(pdev);
+ 
+ 	/* Make sure the Device Controller is halted. */
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
+index 84887dfea7635b..12534be52f39df 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.h
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -520,6 +520,9 @@ struct cdnsp_rev_cap {
+ #define REG_CHICKEN_BITS_2_OFFSET	0x48
+ #define CHICKEN_XDMA_2_TP_CACHE_DIS	BIT(28)
+ 
++#define REG_CHICKEN_BITS_3_OFFSET       0x4C
++#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val))
++
+ /* XBUF Extended Capability ID. */
+ #define XBUF_CAP_ID			0xCB
+ #define XBUF_RX_TAG_MASK_0_OFFSET	0x1C
+@@ -1357,6 +1360,7 @@ struct cdnsp_port {
+  * @rev_cap: Controller Capabilities Registers.
+  * @hcs_params1: Cached register copies of read-only HCSPARAMS1
+  * @hcc_params: Cached register copies of read-only HCCPARAMS1
++ * @rtl_revision: Cached controller rtl revision.
+  * @setup: Temporary buffer for setup packet.
+  * @ep0_preq: Internal allocated request used during enumeration.
+  * @ep0_stage: ep0 stage during enumeration process.
+@@ -1411,6 +1415,8 @@ struct cdnsp_device {
+ 	__u32 hcs_params1;
+ 	__u32 hcs_params3;
+ 	__u32 hcc_params;
++	#define RTL_REVISION_NEW_LPM 0x2700
++	__u32 rtl_revision;
+ 	/* Lock used in interrupt thread context. */
+ 	spinlock_t lock;
+ 	struct usb_ctrlrequest setup;
+diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
+index 2d05368a6745a1..36781ea60f6aa4 100644
+--- a/drivers/usb/cdns3/cdnsp-pci.c
++++ b/drivers/usb/cdns3/cdnsp-pci.c
+@@ -34,6 +34,8 @@
+ #define PCI_CLASS_SERIAL_USB_CDNS_USB3	(PCI_CLASS_SERIAL_USB << 8 | 0x80)
+ #define PCI_CLASS_SERIAL_USB_CDNS_UDC	PCI_CLASS_SERIAL_USB_DEVICE
+ 
++#define CHICKEN_APB_TIMEOUT_VALUE       0x1C20
++
+ static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
+ {
+ 	/*
+@@ -145,6 +147,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
+ 		cdnsp->otg_irq = pdev->irq;
+ 	}
+ 
++	/*
++	 * Cadence PCI based platform require some longer timeout for APB
++	 * to fixes domain clock synchronization issue after resuming
++	 * controller from L1 state.
++	 */
++	cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE;
++	pci_set_drvdata(pdev, cdnsp);
++
+ 	if (pci_is_enabled(func)) {
+ 		cdnsp->dev = dev;
+ 		cdnsp->gadget_init = cdnsp_gadget_init;
+@@ -154,8 +164,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
+ 			goto free_cdnsp;
+ 	}
+ 
+-	pci_set_drvdata(pdev, cdnsp);
+-
+ 	device_wakeup_enable(&pdev->dev);
+ 	if (pci_dev_run_wake(pdev))
+ 		pm_runtime_put_noidle(&pdev->dev);
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index 46852529499d16..fd06cb85c4ea84 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
+ 
+ 	writel(db_value, reg_addr);
+ 
+-	cdnsp_force_l0_go(pdev);
++	if (pdev->rtl_revision < RTL_REVISION_NEW_LPM)
++		cdnsp_force_l0_go(pdev);
+ 
+ 	/* Doorbell was set. */
+ 	return true;
+diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
+index 57d47348dc193b..ac30ee21309d02 100644
+--- a/drivers/usb/cdns3/core.h
++++ b/drivers/usb/cdns3/core.h
+@@ -79,6 +79,8 @@ struct cdns3_platform_data {
+  * @pdata: platform data from glue layer
+  * @lock: spinlock structure
+  * @xhci_plat_data: xhci private data structure pointer
++ * @override_apb_timeout: hold value of APB timeout. For value 0 the default
++ *                        value in CHICKEN_BITS_3 will be preserved.
+  * @gadget_init: pointer to gadget initialization function
+  */
+ struct cdns {
+@@ -117,6 +119,7 @@ struct cdns {
+ 	struct cdns3_platform_data	*pdata;
+ 	spinlock_t			lock;
+ 	struct xhci_plat_priv		*xhci_plat_data;
++	u32                             override_apb_timeout;
+ 
+ 	int (*gadget_init)(struct cdns *cdns);
+ };
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 34e46ef308abfd..740d2d2b19fbe0 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -482,6 +482,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
+ 	u8 *buffer;
+ 	u8 tag;
+ 	int rv;
++	long wait_rv;
+ 
+ 	dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
+ 		data->iin_ep_present);
+@@ -511,16 +512,17 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
+ 	}
+ 
+ 	if (data->iin_ep_present) {
+-		rv = wait_event_interruptible_timeout(
++		wait_rv = wait_event_interruptible_timeout(
+ 			data->waitq,
+ 			atomic_read(&data->iin_data_valid) != 0,
+ 			file_data->timeout);
+-		if (rv < 0) {
+-			dev_dbg(dev, "wait interrupted %d\n", rv);
++		if (wait_rv < 0) {
++			dev_dbg(dev, "wait interrupted %ld\n", wait_rv);
++			rv = wait_rv;
+ 			goto exit;
+ 		}
+ 
+-		if (rv == 0) {
++		if (wait_rv == 0) {
+ 			dev_dbg(dev, "wait timed out\n");
+ 			rv = -ETIMEDOUT;
+ 			goto exit;
+@@ -539,6 +541,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
+ 
+ 	dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv);
+ 
++	rv = 0;
++
+  exit:
+ 	/* bump interrupt bTag */
+ 	data->iin_bTag += 1;
+@@ -602,9 +606,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
+ {
+ 	struct usbtmc_device_data *data = file_data->data;
+ 	struct device *dev = &data->intf->dev;
+-	int rv;
+ 	u32 timeout;
+ 	unsigned long expire;
++	long wait_rv;
+ 
+ 	if (!data->iin_ep_present) {
+ 		dev_dbg(dev, "no interrupt endpoint present\n");
+@@ -618,25 +622,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
+ 
+ 	mutex_unlock(&data->io_mutex);
+ 
+-	rv = wait_event_interruptible_timeout(
+-			data->waitq,
+-			atomic_read(&file_data->srq_asserted) != 0 ||
+-			atomic_read(&file_data->closing),
+-			expire);
++	wait_rv = wait_event_interruptible_timeout(
++		data->waitq,
++		atomic_read(&file_data->srq_asserted) != 0 ||
++		atomic_read(&file_data->closing),
++		expire);
+ 
+ 	mutex_lock(&data->io_mutex);
+ 
+ 	/* Note! disconnect or close could be called in the meantime */
+ 	if (atomic_read(&file_data->closing) || data->zombie)
+-		rv = -ENODEV;
++		return -ENODEV;
+ 
+-	if (rv < 0) {
+-		/* dev can be invalid now! */
+-		pr_debug("%s - wait interrupted %d\n", __func__, rv);
+-		return rv;
++	if (wait_rv < 0) {
++		dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv);
++		return wait_rv;
+ 	}
+ 
+-	if (rv == 0) {
++	if (wait_rv == 0) {
+ 		dev_dbg(dev, "%s - wait timed out\n", __func__);
+ 		return -ETIMEDOUT;
+ 	}
+@@ -830,6 +833,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
+ 	unsigned long expire;
+ 	int bufcount = 1;
+ 	int again = 0;
++	long wait_rv;
+ 
+ 	/* mutex already locked */
+ 
+@@ -942,19 +946,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
+ 		if (!(flags & USBTMC_FLAG_ASYNC)) {
+ 			dev_dbg(dev, "%s: before wait time %lu\n",
+ 				__func__, expire);
+-			retval = wait_event_interruptible_timeout(
++			wait_rv = wait_event_interruptible_timeout(
+ 				file_data->wait_bulk_in,
+ 				usbtmc_do_transfer(file_data),
+ 				expire);
+ 
+-			dev_dbg(dev, "%s: wait returned %d\n",
+-				__func__, retval);
++			dev_dbg(dev, "%s: wait returned %ld\n",
++				__func__, wait_rv);
++
++			if (wait_rv < 0) {
++				retval = wait_rv;
++				goto error;
++			}
+ 
+-			if (retval <= 0) {
+-				if (retval == 0)
+-					retval = -ETIMEDOUT;
++			if (wait_rv == 0) {
++				retval = -ETIMEDOUT;
+ 				goto error;
+ 			}
++
+ 		}
+ 
+ 		urb = usb_get_from_anchor(&file_data->in_anchor);
+@@ -1380,7 +1389,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
+ 	if (!buffer)
+ 		return -ENOMEM;
+ 
+-	mutex_lock(&data->io_mutex);
++	retval = mutex_lock_interruptible(&data->io_mutex);
++	if (retval < 0)
++		goto exit_nolock;
++
+ 	if (data->zombie) {
+ 		retval = -ENODEV;
+ 		goto exit;
+@@ -1503,6 +1515,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
+ 
+ exit:
+ 	mutex_unlock(&data->io_mutex);
++exit_nolock:
+ 	kfree(buffer);
+ 	return retval;
+ }
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index f288d88cd10519..30fe7df1c3ca0e 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -1168,6 +1168,9 @@ struct dwc3_scratchpad_array {
+  * @gsbuscfg0_reqinfo: store GSBUSCFG0.DATRDREQINFO, DESRDREQINFO,
+  *		       DATWRREQINFO, and DESWRREQINFO value passed from
+  *		       glue driver.
++ * @wakeup_pending_funcs: Indicates whether any interface has requested for
++ *			 function wakeup in bitmap format where bit position
++ *			 represents interface_id.
+  */
+ struct dwc3 {
+ 	struct work_struct	drd_work;
+@@ -1398,6 +1401,7 @@ struct dwc3 {
+ 	int			num_ep_resized;
+ 	struct dentry		*debug_root;
+ 	u32			gsbuscfg0_reqinfo;
++	u32			wakeup_pending_funcs;
+ };
+ 
+ #define INCRX_BURST_MODE 0
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index e72bac650981de..76e6000c65c789 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -277,8 +277,6 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
+ 	return ret;
+ }
+ 
+-static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async);
+-
+ /**
+  * dwc3_send_gadget_ep_cmd - issue an endpoint command
+  * @dep: the endpoint to which the command is going to be issued
+@@ -2348,10 +2346,8 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
+ 	return __dwc3_gadget_get_frame(dwc);
+ }
+ 
+-static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
++static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
+ {
+-	int			retries;
+-
+ 	int			ret;
+ 	u32			reg;
+ 
+@@ -2379,8 +2375,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (async)
+-		dwc3_gadget_enable_linksts_evts(dwc, true);
++	dwc3_gadget_enable_linksts_evts(dwc, true);
+ 
+ 	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+ 	if (ret < 0) {
+@@ -2399,27 +2394,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
+ 
+ 	/*
+ 	 * Since link status change events are enabled we will receive
+-	 * an U0 event when wakeup is successful. So bail out.
++	 * an U0 event when wakeup is successful.
+ 	 */
+-	if (async)
+-		return 0;
+-
+-	/* poll until Link State changes to ON */
+-	retries = 20000;
+-
+-	while (retries--) {
+-		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+-
+-		/* in HS, means ON */
+-		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
+-			break;
+-	}
+-
+-	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
+-		dev_err(dwc->dev, "failed to send remote wakeup\n");
+-		return -EINVAL;
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -2440,7 +2416,7 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
+ 		spin_unlock_irqrestore(&dwc->lock, flags);
+ 		return -EINVAL;
+ 	}
+-	ret = __dwc3_gadget_wakeup(dwc, true);
++	ret = __dwc3_gadget_wakeup(dwc);
+ 
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+@@ -2468,14 +2444,10 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id)
+ 	 */
+ 	link_state = dwc3_gadget_get_link_state(dwc);
+ 	if (link_state == DWC3_LINK_STATE_U3) {
+-		ret = __dwc3_gadget_wakeup(dwc, false);
+-		if (ret) {
+-			spin_unlock_irqrestore(&dwc->lock, flags);
+-			return -EINVAL;
+-		}
+-		dwc3_resume_gadget(dwc);
+-		dwc->suspended = false;
+-		dwc->link_state = DWC3_LINK_STATE_U0;
++		dwc->wakeup_pending_funcs |= BIT(intf_id);
++		ret = __dwc3_gadget_wakeup(dwc);
++		spin_unlock_irqrestore(&dwc->lock, flags);
++		return ret;
+ 	}
+ 
+ 	ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION,
+@@ -4320,6 +4292,8 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
+ {
+ 	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
+ 	unsigned int		pwropt;
++	int			ret;
++	int			intf_id;
+ 
+ 	/*
+ 	 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
+@@ -4395,7 +4369,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
+ 
+ 	switch (next) {
+ 	case DWC3_LINK_STATE_U0:
+-		if (dwc->gadget->wakeup_armed) {
++		if (dwc->gadget->wakeup_armed || dwc->wakeup_pending_funcs) {
+ 			dwc3_gadget_enable_linksts_evts(dwc, false);
+ 			dwc3_resume_gadget(dwc);
+ 			dwc->suspended = false;
+@@ -4418,6 +4392,18 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
+ 	}
+ 
+ 	dwc->link_state = next;
++
++	/* Proceed with func wakeup if any interfaces that has requested */
++	while (dwc->wakeup_pending_funcs && (next == DWC3_LINK_STATE_U0)) {
++		intf_id = ffs(dwc->wakeup_pending_funcs) - 1;
++		ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION,
++						       DWC3_DGCMDPAR_DN_FUNC_WAKE |
++						       DWC3_DGCMDPAR_INTF_SEL(intf_id));
++		if (ret)
++			dev_err(dwc->dev, "Failed to send DN wake for intf %d\n", intf_id);
++
++		dwc->wakeup_pending_funcs &= ~BIT(intf_id);
++	}
+ }
+ 
+ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 8402a86176f48c..301a435b9ee373 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -2011,15 +2011,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 
+ 		if (f->get_status) {
+ 			status = f->get_status(f);
++
+ 			if (status < 0)
+ 				break;
+-		} else {
+-			/* Set D0 and D1 bits based on func wakeup capability */
+-			if (f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP) {
+-				status |= USB_INTRF_STAT_FUNC_RW_CAP;
+-				if (f->func_wakeup_armed)
+-					status |= USB_INTRF_STAT_FUNC_RW;
+-			}
++
++			/* if D5 is not set, then device is not wakeup capable */
++			if (!(f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP))
++				status &= ~(USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW);
+ 		}
+ 
+ 		put_unaligned_le16(status & 0x0000ffff, req->buf);
+diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
+index 6cb7771e8a692d..549efc84dd8321 100644
+--- a/drivers/usb/gadget/function/f_ecm.c
++++ b/drivers/usb/gadget/function/f_ecm.c
+@@ -892,6 +892,12 @@ static void ecm_resume(struct usb_function *f)
+ 	gether_resume(&ecm->port);
+ }
+ 
++static int ecm_get_status(struct usb_function *f)
++{
++	return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) |
++		USB_INTRF_STAT_FUNC_RW_CAP;
++}
++
+ static void ecm_free(struct usb_function *f)
+ {
+ 	struct f_ecm *ecm;
+@@ -960,6 +966,7 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
+ 	ecm->port.func.disable = ecm_disable;
+ 	ecm->port.func.free_func = ecm_free;
+ 	ecm->port.func.suspend = ecm_suspend;
++	ecm->port.func.get_status = ecm_get_status;
+ 	ecm->port.func.resume = ecm_resume;
+ 
+ 	return &ecm->port.func;
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index 7aa46d426f31b2..9bb54da8a6ae15 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -1749,6 +1749,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
+ 		val = xudc_readl(xudc, CTRL);
+ 		val &= ~CTRL_RUN;
+ 		xudc_writel(xudc, val, CTRL);
++
++		val = xudc_readl(xudc, ST);
++		if (val & ST_RC)
++			xudc_writel(xudc, ST_RC, ST);
+ 	}
+ 
+ 	dev_info(xudc->dev, "ep %u disabled\n", ep->index);
+diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
+index 3dec5dd3a0d5ca..712389599d468c 100644
+--- a/drivers/usb/host/uhci-platform.c
++++ b/drivers/usb/host/uhci-platform.c
+@@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Get and enable clock if any specified */
+-	uhci->clk = devm_clk_get(&pdev->dev, NULL);
++	uhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ 	if (IS_ERR(uhci->clk)) {
+ 		ret = PTR_ERR(uhci->clk);
+ 		goto err_rmr;
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index 76f228e7443cb6..89b3079194d7b3 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -1363,6 +1363,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
+ 	tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
+ 								    tegra->otg_usb2_port);
+ 
++	pm_runtime_get_sync(tegra->dev);
+ 	if (tegra->host_mode) {
+ 		/* switch to host mode */
+ 		if (tegra->otg_usb3_port >= 0) {
+@@ -1392,6 +1393,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
+ 		}
+ 
+ 		tegra_xhci_set_port_power(tegra, true, true);
++		pm_runtime_mark_last_busy(tegra->dev);
+ 
+ 	} else {
+ 		if (tegra->otg_usb3_port >= 0)
+@@ -1399,6 +1401,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
+ 
+ 		tegra_xhci_set_port_power(tegra, true, false);
+ 	}
++	pm_runtime_put_autosuspend(tegra->dev);
+ }
+ 
+ #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
+diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c
+index 27b0a6e182678b..b4d5408a4371bf 100644
+--- a/drivers/usb/misc/onboard_usb_dev.c
++++ b/drivers/usb/misc/onboard_usb_dev.c
+@@ -569,8 +569,14 @@ static void onboard_dev_usbdev_disconnect(struct usb_device *udev)
+ }
+ 
+ static const struct usb_device_id onboard_dev_id_table[] = {
+-	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 HUB */
+-	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 HUB */
++	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6500) }, /* CYUSB330x 3.0 HUB */
++	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6502) }, /* CYUSB330x 2.0 HUB */
++	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6503) }, /* CYUSB33{0,1}x 2.0 HUB, Vendor Mode */
++	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB331x 3.0 HUB */
++	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB331x 2.0 HUB */
++	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6507) }, /* CYUSB332x 2.0 HUB, Vendor Mode */
++	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6508) }, /* CYUSB332x 3.0 HUB */
++	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x650a) }, /* CYUSB332x 2.0 HUB */
+ 	{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 HUB */
+ 	{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 HUB */
+ 	{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 HUB */
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 48ddf27704619d..bbd7f53f7d5982 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -5890,7 +5890,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
+ 	case SNK_TRY_WAIT_DEBOUNCE:
+ 		if (!tcpm_port_is_sink(port)) {
+ 			port->max_wait = 0;
+-			tcpm_set_state(port, SRC_TRYWAIT, 0);
++			tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE);
+ 		}
+ 		break;
+ 	case SRC_TRY_WAIT:
+diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
+index 420af5139c70a3..5d24a2321e152d 100644
+--- a/drivers/usb/typec/ucsi/displayport.c
++++ b/drivers/usb/typec/ucsi/displayport.c
+@@ -296,6 +296,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
+ 	if (!dp)
+ 		return;
+ 
++	cancel_work_sync(&dp->work);
++
+ 	dp->data.conf = 0;
+ 	dp->data.status = 0;
+ 	dp->initialized = false;
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index 1a4ed5a357d360..c9eaba2276365c 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -1658,14 +1658,14 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
+ {
+ 	struct vm_area_struct *vma = vmf->vma;
+ 	struct vfio_pci_core_device *vdev = vma->vm_private_data;
+-	unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
++	unsigned long addr = vmf->address & ~((PAGE_SIZE << order) - 1);
++	unsigned long pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
++	unsigned long pfn = vma_to_pfn(vma) + pgoff;
+ 	vm_fault_t ret = VM_FAULT_SIGBUS;
+ 
+-	pfn = vma_to_pfn(vma) + pgoff;
+-
+-	if (order && (pfn & ((1 << order) - 1) ||
+-		      vmf->address & ((PAGE_SIZE << order) - 1) ||
+-		      vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
++	if (order && (addr < vma->vm_start ||
++		      addr + (PAGE_SIZE << order) > vma->vm_end ||
++		      pfn & ((1 << order) - 1))) {
+ 		ret = VM_FAULT_FALLBACK;
+ 		goto out;
+ 	}
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 1f65795cf5d7a2..ef56a2500ed69a 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -217,6 +217,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+ 	 * buffering it.
+ 	 */
+ 	if (dma_capable(dev, dev_addr, size, true) &&
++	    !dma_kmalloc_needs_bounce(dev, size, dir) &&
+ 	    !range_straddles_page_boundary(phys, size) &&
+ 		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
+ 		!is_swiotlb_force_bounce(dev))
+diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
+index 13821e7e825efb..9ac0427724a301 100644
+--- a/drivers/xen/xenbus/xenbus.h
++++ b/drivers/xen/xenbus/xenbus.h
+@@ -77,6 +77,7 @@ enum xb_req_state {
+ struct xb_req_data {
+ 	struct list_head list;
+ 	wait_queue_head_t wq;
++	struct kref kref;
+ 	struct xsd_sockmsg msg;
+ 	uint32_t caller_req_id;
+ 	enum xsd_sockmsg_type type;
+@@ -103,6 +104,7 @@ int xb_init_comms(void);
+ void xb_deinit_comms(void);
+ int xs_watch_msg(struct xs_watch_event *event);
+ void xs_request_exit(struct xb_req_data *req);
++void xs_free_req(struct kref *kref);
+ 
+ int xenbus_match(struct device *_dev, const struct device_driver *_drv);
+ int xenbus_dev_probe(struct device *_dev);
+diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
+index e5fda0256feb3d..82df2da1b880b8 100644
+--- a/drivers/xen/xenbus/xenbus_comms.c
++++ b/drivers/xen/xenbus/xenbus_comms.c
+@@ -309,8 +309,8 @@ static int process_msg(void)
+ 			virt_wmb();
+ 			req->state = xb_req_state_got_reply;
+ 			req->cb(req);
+-		} else
+-			kfree(req);
++		}
++		kref_put(&req->kref, xs_free_req);
+ 	}
+ 
+ 	mutex_unlock(&xs_response_mutex);
+@@ -386,14 +386,13 @@ static int process_writes(void)
+ 	state.req->msg.type = XS_ERROR;
+ 	state.req->err = err;
+ 	list_del(&state.req->list);
+-	if (state.req->state == xb_req_state_aborted)
+-		kfree(state.req);
+-	else {
++	if (state.req->state != xb_req_state_aborted) {
+ 		/* write err, then update state */
+ 		virt_wmb();
+ 		state.req->state = xb_req_state_got_reply;
+ 		wake_up(&state.req->wq);
+ 	}
++	kref_put(&state.req->kref, xs_free_req);
+ 
+ 	mutex_unlock(&xb_write_mutex);
+ 
+diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
+index 46f8916597e53d..f5c21ba64df571 100644
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req)
+ 	mutex_unlock(&u->reply_mutex);
+ 
+ 	kfree(req->body);
+-	kfree(req);
++	kref_put(&req->kref, xs_free_req);
+ 
+ 	kref_put(&u->kref, xenbus_file_free);
+ 
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index d32c726f7a12d0..dcf9182c8451ad 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -112,6 +112,12 @@ static void xs_suspend_exit(void)
+ 	wake_up_all(&xs_state_enter_wq);
+ }
+ 
++void xs_free_req(struct kref *kref)
++{
++	struct xb_req_data *req = container_of(kref, struct xb_req_data, kref);
++	kfree(req);
++}
++
+ static uint32_t xs_request_enter(struct xb_req_data *req)
+ {
+ 	uint32_t rq_id;
+@@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
+ 	req->caller_req_id = req->msg.req_id;
+ 	req->msg.req_id = xs_request_enter(req);
+ 
++	/*
++	 * Take 2nd ref.  One for this thread, and the second for the
++	 * xenbus_thread.
++	 */
++	kref_get(&req->kref);
++
+ 	mutex_lock(&xb_write_mutex);
+ 	list_add_tail(&req->list, &xb_write_list);
+ 	notify = list_is_singular(&xb_write_list);
+@@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg)
+ 	if (req->state == xb_req_state_queued ||
+ 	    req->state == xb_req_state_wait_reply)
+ 		req->state = xb_req_state_aborted;
+-	else
+-		kfree(req);
++
++	kref_put(&req->kref, xs_free_req);
+ 	mutex_unlock(&xb_write_mutex);
+ 
+ 	return ret;
+@@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
+ 	req->cb = xenbus_dev_queue_reply;
+ 	req->par = par;
+ 	req->user_req = true;
++	kref_init(&req->kref);
+ 
+ 	xs_send(req, msg);
+ 
+@@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t,
+ 	req->num_vecs = num_vecs;
+ 	req->cb = xs_wake_up;
+ 	req->user_req = false;
++	kref_init(&req->kref);
+ 
+ 	msg.req_id = 0;
+ 	msg.tx_id = t.id;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 587ac07cd19410..8e65018600010b 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -732,82 +732,6 @@ const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
+ 	return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
+ }
+ 
+-/*
+- * We can have very weird soft links passed in.
+- * One example is "/proc/self/fd/<fd>", which can be a soft link to
+- * a block device.
+- *
+- * But it's never a good idea to use those weird names.
+- * Here we check if the path (not following symlinks) is a good one inside
+- * "/dev/".
+- */
+-static bool is_good_dev_path(const char *dev_path)
+-{
+-	struct path path = { .mnt = NULL, .dentry = NULL };
+-	char *path_buf = NULL;
+-	char *resolved_path;
+-	bool is_good = false;
+-	int ret;
+-
+-	if (!dev_path)
+-		goto out;
+-
+-	path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
+-	if (!path_buf)
+-		goto out;
+-
+-	/*
+-	 * Do not follow soft link, just check if the original path is inside
+-	 * "/dev/".
+-	 */
+-	ret = kern_path(dev_path, 0, &path);
+-	if (ret)
+-		goto out;
+-	resolved_path = d_path(&path, path_buf, PATH_MAX);
+-	if (IS_ERR(resolved_path))
+-		goto out;
+-	if (strncmp(resolved_path, "/dev/", strlen("/dev/")))
+-		goto out;
+-	is_good = true;
+-out:
+-	kfree(path_buf);
+-	path_put(&path);
+-	return is_good;
+-}
+-
+-static int get_canonical_dev_path(const char *dev_path, char *canonical)
+-{
+-	struct path path = { .mnt = NULL, .dentry = NULL };
+-	char *path_buf = NULL;
+-	char *resolved_path;
+-	int ret;
+-
+-	if (!dev_path) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+-	path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
+-	if (!path_buf) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+-	ret = kern_path(dev_path, LOOKUP_FOLLOW, &path);
+-	if (ret)
+-		goto out;
+-	resolved_path = d_path(&path, path_buf, PATH_MAX);
+-	if (IS_ERR(resolved_path)) {
+-		ret = PTR_ERR(resolved_path);
+-		goto out;
+-	}
+-	ret = strscpy(canonical, resolved_path, PATH_MAX);
+-out:
+-	kfree(path_buf);
+-	path_put(&path);
+-	return ret;
+-}
+-
+ static bool is_same_device(struct btrfs_device *device, const char *new_path)
+ {
+ 	struct path old = { .mnt = NULL, .dentry = NULL };
+@@ -1495,23 +1419,12 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+ 	bool new_device_added = false;
+ 	struct btrfs_device *device = NULL;
+ 	struct file *bdev_file;
+-	char *canonical_path = NULL;
+ 	u64 bytenr;
+ 	dev_t devt;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&uuid_mutex);
+ 
+-	if (!is_good_dev_path(path)) {
+-		canonical_path = kmalloc(PATH_MAX, GFP_KERNEL);
+-		if (canonical_path) {
+-			ret = get_canonical_dev_path(path, canonical_path);
+-			if (ret < 0) {
+-				kfree(canonical_path);
+-				canonical_path = NULL;
+-			}
+-		}
+-	}
+ 	/*
+ 	 * Avoid an exclusive open here, as the systemd-udev may initiate the
+ 	 * device scan which may race with the user's mount or mkfs command,
+@@ -1556,8 +1469,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+ 		goto free_disk_super;
+ 	}
+ 
+-	device = device_list_add(canonical_path ? : path, disk_super,
+-				 &new_device_added);
++	device = device_list_add(path, disk_super, &new_device_added);
+ 	if (!IS_ERR(device) && new_device_added)
+ 		btrfs_free_stale_devices(device->devt, device);
+ 
+@@ -1566,7 +1478,6 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+ 
+ error_bdev_put:
+ 	fput(bdev_file);
+-	kfree(canonical_path);
+ 
+ 	return device;
+ }
+diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
+index 17aed5f6c5490d..12e709d93445ea 100644
+--- a/fs/erofs/fileio.c
++++ b/fs/erofs/fileio.c
+@@ -150,10 +150,10 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
+ 				io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9;
+ 				attached = 0;
+ 			}
+-			if (!attached++)
+-				erofs_onlinefolio_split(folio);
+ 			if (!bio_add_folio(&io->rq->bio, folio, len, cur))
+ 				goto io_retry;
++			if (!attached++)
++				erofs_onlinefolio_split(folio);
+ 			io->dev.m_pa += len;
+ 		}
+ 		cur += len;
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index a8fb4b525f5443..e5e94afc5af88a 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -82,9 +82,6 @@ struct z_erofs_pcluster {
+ 	/* L: whether partial decompression or not */
+ 	bool partial;
+ 
+-	/* L: indicate several pageofs_outs or not */
+-	bool multibases;
+-
+ 	/* L: whether extra buffer allocations are best-effort */
+ 	bool besteffort;
+ 
+@@ -1073,8 +1070,6 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
+ 				break;
+ 
+ 			erofs_onlinefolio_split(folio);
+-			if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
+-				f->pcl->multibases = true;
+ 			if (f->pcl->length < offset + end - map->m_la) {
+ 				f->pcl->length = offset + end - map->m_la;
+ 				f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
+@@ -1120,7 +1115,6 @@ struct z_erofs_decompress_backend {
+ 	struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
+ 	struct super_block *sb;
+ 	struct z_erofs_pcluster *pcl;
+-
+ 	/* pages with the longest decompressed length for deduplication */
+ 	struct page **decompressed_pages;
+ 	/* pages to keep the compressed data */
+@@ -1129,6 +1123,8 @@ struct z_erofs_decompress_backend {
+ 	struct list_head decompressed_secondary_bvecs;
+ 	struct page **pagepool;
+ 	unsigned int onstack_used, nr_pages;
++	/* indicate if temporary copies should be preserved for later use */
++	bool keepxcpy;
+ };
+ 
+ struct z_erofs_bvec_item {
+@@ -1139,18 +1135,20 @@ struct z_erofs_bvec_item {
+ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
+ 					 struct z_erofs_bvec *bvec)
+ {
++	int poff = bvec->offset + be->pcl->pageofs_out;
+ 	struct z_erofs_bvec_item *item;
+-	unsigned int pgnr;
+-
+-	if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
+-	    (bvec->end == PAGE_SIZE ||
+-	     bvec->offset + bvec->end == be->pcl->length)) {
+-		pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
+-		DBG_BUGON(pgnr >= be->nr_pages);
+-		if (!be->decompressed_pages[pgnr]) {
+-			be->decompressed_pages[pgnr] = bvec->page;
++	struct page **page;
++
++	if (!(poff & ~PAGE_MASK) && (bvec->end == PAGE_SIZE ||
++			bvec->offset + bvec->end == be->pcl->length)) {
++		DBG_BUGON((poff >> PAGE_SHIFT) >= be->nr_pages);
++		page = be->decompressed_pages + (poff >> PAGE_SHIFT);
++		if (!*page) {
++			*page = bvec->page;
+ 			return;
+ 		}
++	} else {
++		be->keepxcpy = true;
+ 	}
+ 
+ 	/* (cold path) one pcluster is requested multiple times */
+@@ -1316,7 +1314,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 					.alg = pcl->algorithmformat,
+ 					.inplace_io = overlapped,
+ 					.partial_decoding = pcl->partial,
+-					.fillgaps = pcl->multibases,
++					.fillgaps = be->keepxcpy,
+ 					.gfp = pcl->besteffort ? GFP_KERNEL :
+ 						GFP_NOWAIT | __GFP_NORETRY
+ 				 }, be->pagepool);
+@@ -1370,7 +1368,6 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 
+ 	pcl->length = 0;
+ 	pcl->partial = true;
+-	pcl->multibases = false;
+ 	pcl->besteffort = false;
+ 	pcl->bvset.nextpage = NULL;
+ 	pcl->vcnt = 0;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index bd601ab26e7811..c3c1e8c644f2e0 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -747,7 +747,7 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
+ 		return 0;
+ 	mnt = real_mount(bastard);
+ 	mnt_add_count(mnt, 1);
+-	smp_mb();			// see mntput_no_expire()
++	smp_mb();		// see mntput_no_expire() and do_umount()
+ 	if (likely(!read_seqretry(&mount_lock, seq)))
+ 		return 0;
+ 	if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
+@@ -1916,6 +1916,7 @@ static int do_umount(struct mount *mnt, int flags)
+ 			umount_tree(mnt, UMOUNT_PROPAGATE);
+ 		retval = 0;
+ 	} else {
++		smp_mb(); // paired with __legitimize_mnt()
+ 		shrink_submounts(mnt);
+ 		retval = -EBUSY;
+ 		if (!propagate_mount_busy(mnt, 2)) {
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 1bf188b6866a67..2ebee1dced1b2d 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -174,7 +174,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb)
+ 	struct ocfs2_recovery_map *rm;
+ 
+ 	mutex_init(&osb->recovery_lock);
+-	osb->disable_recovery = 0;
++	osb->recovery_state = OCFS2_REC_ENABLED;
+ 	osb->recovery_thread_task = NULL;
+ 	init_waitqueue_head(&osb->recovery_event);
+ 
+@@ -190,31 +190,53 @@ int ocfs2_recovery_init(struct ocfs2_super *osb)
+ 	return 0;
+ }
+ 
+-/* we can't grab the goofy sem lock from inside wait_event, so we use
+- * memory barriers to make sure that we'll see the null task before
+- * being woken up */
+ static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
+ {
+-	mb();
+ 	return osb->recovery_thread_task != NULL;
+ }
+ 
+-void ocfs2_recovery_exit(struct ocfs2_super *osb)
++static void ocfs2_recovery_disable(struct ocfs2_super *osb,
++				   enum ocfs2_recovery_state state)
+ {
+-	struct ocfs2_recovery_map *rm;
+-
+-	/* disable any new recovery threads and wait for any currently
+-	 * running ones to exit. Do this before setting the vol_state. */
+ 	mutex_lock(&osb->recovery_lock);
+-	osb->disable_recovery = 1;
++	/*
++	 * If recovery thread is not running, we can directly transition to
++	 * final state.
++	 */
++	if (!ocfs2_recovery_thread_running(osb)) {
++		osb->recovery_state = state + 1;
++		goto out_lock;
++	}
++	osb->recovery_state = state;
++	/* Wait for recovery thread to acknowledge state transition */
++	wait_event_cmd(osb->recovery_event,
++		       !ocfs2_recovery_thread_running(osb) ||
++				osb->recovery_state >= state + 1,
++		       mutex_unlock(&osb->recovery_lock),
++		       mutex_lock(&osb->recovery_lock));
++out_lock:
+ 	mutex_unlock(&osb->recovery_lock);
+-	wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
+ 
+-	/* At this point, we know that no more recovery threads can be
+-	 * launched, so wait for any recovery completion work to
+-	 * complete. */
++	/*
++	 * At this point we know that no more recovery work can be queued so
++	 * wait for any recovery completion work to complete.
++	 */
+ 	if (osb->ocfs2_wq)
+ 		flush_workqueue(osb->ocfs2_wq);
++}
++
++void ocfs2_recovery_disable_quota(struct ocfs2_super *osb)
++{
++	ocfs2_recovery_disable(osb, OCFS2_REC_QUOTA_WANT_DISABLE);
++}
++
++void ocfs2_recovery_exit(struct ocfs2_super *osb)
++{
++	struct ocfs2_recovery_map *rm;
++
++	/* disable any new recovery threads and wait for any currently
++	 * running ones to exit. Do this before setting the vol_state. */
++	ocfs2_recovery_disable(osb, OCFS2_REC_WANT_DISABLE);
+ 
+ 	/*
+ 	 * Now that recovery is shut down, and the osb is about to be
+@@ -1472,6 +1494,18 @@ static int __ocfs2_recovery_thread(void *arg)
+ 		}
+ 	}
+ restart:
++	if (quota_enabled) {
++		mutex_lock(&osb->recovery_lock);
++		/* Confirm that recovery thread will no longer recover quotas */
++		if (osb->recovery_state == OCFS2_REC_QUOTA_WANT_DISABLE) {
++			osb->recovery_state = OCFS2_REC_QUOTA_DISABLED;
++			wake_up(&osb->recovery_event);
++		}
++		if (osb->recovery_state >= OCFS2_REC_QUOTA_DISABLED)
++			quota_enabled = 0;
++		mutex_unlock(&osb->recovery_lock);
++	}
++
+ 	status = ocfs2_super_lock(osb, 1);
+ 	if (status < 0) {
+ 		mlog_errno(status);
+@@ -1569,27 +1603,29 @@ static int __ocfs2_recovery_thread(void *arg)
+ 
+ 	ocfs2_free_replay_slots(osb);
+ 	osb->recovery_thread_task = NULL;
+-	mb(); /* sync with ocfs2_recovery_thread_running */
++	if (osb->recovery_state == OCFS2_REC_WANT_DISABLE)
++		osb->recovery_state = OCFS2_REC_DISABLED;
+ 	wake_up(&osb->recovery_event);
+ 
+ 	mutex_unlock(&osb->recovery_lock);
+ 
+-	if (quota_enabled)
+-		kfree(rm_quota);
++	kfree(rm_quota);
+ 
+ 	return status;
+ }
+ 
+ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
+ {
++	int was_set = -1;
++
+ 	mutex_lock(&osb->recovery_lock);
++	if (osb->recovery_state < OCFS2_REC_WANT_DISABLE)
++		was_set = ocfs2_recovery_map_set(osb, node_num);
+ 
+ 	trace_ocfs2_recovery_thread(node_num, osb->node_num,
+-		osb->disable_recovery, osb->recovery_thread_task,
+-		osb->disable_recovery ?
+-		-1 : ocfs2_recovery_map_set(osb, node_num));
++		osb->recovery_state, osb->recovery_thread_task, was_set);
+ 
+-	if (osb->disable_recovery)
++	if (osb->recovery_state >= OCFS2_REC_WANT_DISABLE)
+ 		goto out;
+ 
+ 	if (osb->recovery_thread_task)
+diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
+index e3c3a35dc5e0e7..6397170f302f22 100644
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -148,6 +148,7 @@ void ocfs2_wait_for_recovery(struct ocfs2_super *osb);
+ 
+ int ocfs2_recovery_init(struct ocfs2_super *osb);
+ void ocfs2_recovery_exit(struct ocfs2_super *osb);
++void ocfs2_recovery_disable_quota(struct ocfs2_super *osb);
+ 
+ int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
+ void ocfs2_free_replay_slots(struct ocfs2_super *osb);
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index 51c52768132d70..6aaa94c554c12a 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -308,6 +308,21 @@ enum ocfs2_journal_trigger_type {
+ void ocfs2_initialize_journal_triggers(struct super_block *sb,
+ 				       struct ocfs2_triggers triggers[]);
+ 
++enum ocfs2_recovery_state {
++	OCFS2_REC_ENABLED = 0,
++	OCFS2_REC_QUOTA_WANT_DISABLE,
++	/*
++	 * Must be OCFS2_REC_QUOTA_WANT_DISABLE + 1 for
++	 * ocfs2_recovery_disable_quota() to work.
++	 */
++	OCFS2_REC_QUOTA_DISABLED,
++	OCFS2_REC_WANT_DISABLE,
++	/*
++	 * Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work
++	 */
++	OCFS2_REC_DISABLED,
++};
++
+ struct ocfs2_journal;
+ struct ocfs2_slot_info;
+ struct ocfs2_recovery_map;
+@@ -370,7 +385,7 @@ struct ocfs2_super
+ 	struct ocfs2_recovery_map *recovery_map;
+ 	struct ocfs2_replay_map *replay_map;
+ 	struct task_struct *recovery_thread_task;
+-	int disable_recovery;
++	enum ocfs2_recovery_state recovery_state;
+ 	wait_queue_head_t checkpoint_event;
+ 	struct ocfs2_journal *journal;
+ 	unsigned long osb_commit_interval;
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index 2956d888c13145..e272429da3db34 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -453,8 +453,7 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
+ 
+ /* Sync changes in local quota file into global quota file and
+  * reinitialize local quota file.
+- * The function expects local quota file to be already locked and
+- * s_umount locked in shared mode. */
++ * The function expects local quota file to be already locked. */
+ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
+ 					  int type,
+ 					  struct ocfs2_quota_recovery *rec)
+@@ -588,7 +587,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
+ {
+ 	unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
+ 					      LOCAL_GROUP_QUOTA_SYSTEM_INODE };
+-	struct super_block *sb = osb->sb;
+ 	struct ocfs2_local_disk_dqinfo *ldinfo;
+ 	struct buffer_head *bh;
+ 	handle_t *handle;
+@@ -600,7 +598,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
+ 	printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
+ 	       "slot %u\n", osb->dev_str, slot_num);
+ 
+-	down_read(&sb->s_umount);
+ 	for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
+ 		if (list_empty(&(rec->r_list[type])))
+ 			continue;
+@@ -677,7 +674,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
+ 			break;
+ 	}
+ out:
+-	up_read(&sb->s_umount);
+ 	kfree(rec);
+ 	return status;
+ }
+@@ -843,8 +839,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
+ 	ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
+ 
+ 	/*
+-	 * s_umount held in exclusive mode protects us against racing with
+-	 * recovery thread...
++	 * ocfs2_dismount_volume() has already aborted quota recovery...
+ 	 */
+ 	if (oinfo->dqi_rec) {
+ 		ocfs2_free_quota_recovery(oinfo->dqi_rec);
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index f7b483f0de2add..6ac4dcd54588cf 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -698,10 +698,12 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
+ 
+ 	bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
+ 					       ac, cl);
+-	if (PTR_ERR(bg_bh) == -ENOSPC)
++	if (PTR_ERR(bg_bh) == -ENOSPC) {
++		ac->ac_which = OCFS2_AC_USE_MAIN_DISCONTIG;
+ 		bg_bh = ocfs2_block_group_alloc_discontig(handle,
+ 							  alloc_inode,
+ 							  ac, cl);
++	}
+ 	if (IS_ERR(bg_bh)) {
+ 		status = PTR_ERR(bg_bh);
+ 		bg_bh = NULL;
+@@ -1794,6 +1796,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+ {
+ 	int status;
+ 	u16 chain;
++	u32 contig_bits;
+ 	u64 next_group;
+ 	struct inode *alloc_inode = ac->ac_inode;
+ 	struct buffer_head *group_bh = NULL;
+@@ -1819,10 +1822,21 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+ 	status = -ENOSPC;
+ 	/* for now, the chain search is a bit simplistic. We just use
+ 	 * the 1st group with any empty bits. */
+-	while ((status = ac->ac_group_search(alloc_inode, group_bh,
+-					     bits_wanted, min_bits,
+-					     ac->ac_max_block,
+-					     res)) == -ENOSPC) {
++	while (1) {
++		if (ac->ac_which == OCFS2_AC_USE_MAIN_DISCONTIG) {
++			contig_bits = le16_to_cpu(bg->bg_contig_free_bits);
++			if (!contig_bits)
++				contig_bits = ocfs2_find_max_contig_free_bits(bg->bg_bitmap,
++						le16_to_cpu(bg->bg_bits), 0);
++			if (bits_wanted > contig_bits && contig_bits >= min_bits)
++				bits_wanted = contig_bits;
++		}
++
++		status = ac->ac_group_search(alloc_inode, group_bh,
++				bits_wanted, min_bits,
++				ac->ac_max_block, res);
++		if (status != -ENOSPC)
++			break;
+ 		if (!bg->bg_next_group)
+ 			break;
+ 
+@@ -1982,6 +1996,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
+ 	victim = ocfs2_find_victim_chain(cl);
+ 	ac->ac_chain = victim;
+ 
++search:
+ 	status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
+ 				    res, &bits_left);
+ 	if (!status) {
+@@ -2022,6 +2037,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
+ 		}
+ 	}
+ 
++	/* Chains can't supply the bits_wanted contiguous space.
++	 * We should switch to using every single bit when allocating
++	 * from the global bitmap. */
++	if (i == le16_to_cpu(cl->cl_next_free_rec) &&
++	    status == -ENOSPC && ac->ac_which == OCFS2_AC_USE_MAIN) {
++		ac->ac_which = OCFS2_AC_USE_MAIN_DISCONTIG;
++		ac->ac_chain = victim;
++		goto search;
++	}
++
+ set_hint:
+ 	if (status != -ENOSPC) {
+ 		/* If the next search of this group is not likely to
+@@ -2365,7 +2390,8 @@ int __ocfs2_claim_clusters(handle_t *handle,
+ 	BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
+ 
+ 	BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
+-	       && ac->ac_which != OCFS2_AC_USE_MAIN);
++	       && ac->ac_which != OCFS2_AC_USE_MAIN
++	       && ac->ac_which != OCFS2_AC_USE_MAIN_DISCONTIG);
+ 
+ 	if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
+ 		WARN_ON(min_clusters > 1);
+diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
+index b481b834857d33..bcf2ed4a86310b 100644
+--- a/fs/ocfs2/suballoc.h
++++ b/fs/ocfs2/suballoc.h
+@@ -29,6 +29,7 @@ struct ocfs2_alloc_context {
+ #define OCFS2_AC_USE_MAIN  2
+ #define OCFS2_AC_USE_INODE 3
+ #define OCFS2_AC_USE_META  4
++#define OCFS2_AC_USE_MAIN_DISCONTIG  5
+ 	u32    ac_which;
+ 
+ 	/* these are used by the chain search */
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 1e87554f6f4104..868ccdf447386f 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1867,6 +1867,9 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
+ 	/* Orphan scan should be stopped as early as possible */
+ 	ocfs2_orphan_scan_stop(osb);
+ 
++	/* Stop quota recovery so that we can disable quotas */
++	ocfs2_recovery_disable_quota(osb);
++
+ 	ocfs2_disable_quotas(osb);
+ 
+ 	/* All dquots should be freed by now */
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 9c0ef4195b5829..74979466729535 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -29,7 +29,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ {
+ 	struct cached_fid *cfid;
+ 
+-	spin_lock(&cfids->cfid_list_lock);
+ 	list_for_each_entry(cfid, &cfids->entries, entry) {
+ 		if (!strcmp(cfid->path, path)) {
+ 			/*
+@@ -38,25 +37,20 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 			 * being deleted due to a lease break.
+ 			 */
+ 			if (!cfid->time || !cfid->has_lease) {
+-				spin_unlock(&cfids->cfid_list_lock);
+ 				return NULL;
+ 			}
+ 			kref_get(&cfid->refcount);
+-			spin_unlock(&cfids->cfid_list_lock);
+ 			return cfid;
+ 		}
+ 	}
+ 	if (lookup_only) {
+-		spin_unlock(&cfids->cfid_list_lock);
+ 		return NULL;
+ 	}
+ 	if (cfids->num_entries >= max_cached_dirs) {
+-		spin_unlock(&cfids->cfid_list_lock);
+ 		return NULL;
+ 	}
+ 	cfid = init_cached_dir(path);
+ 	if (cfid == NULL) {
+-		spin_unlock(&cfids->cfid_list_lock);
+ 		return NULL;
+ 	}
+ 	cfid->cfids = cfids;
+@@ -74,7 +68,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 	 */
+ 	cfid->has_lease = true;
+ 
+-	spin_unlock(&cfids->cfid_list_lock);
+ 	return cfid;
+ }
+ 
+@@ -185,8 +178,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	if (!utf16_path)
+ 		return -ENOMEM;
+ 
++	spin_lock(&cfids->cfid_list_lock);
+ 	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
+ 	if (cfid == NULL) {
++		spin_unlock(&cfids->cfid_list_lock);
+ 		kfree(utf16_path);
+ 		return -ENOENT;
+ 	}
+@@ -195,7 +190,6 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	 * Otherwise, it is either a new entry or laundromat worker removed it
+ 	 * from @cfids->entries.  Caller will put last reference if the latter.
+ 	 */
+-	spin_lock(&cfids->cfid_list_lock);
+ 	if (cfid->has_lease && cfid->time) {
+ 		spin_unlock(&cfids->cfid_list_lock);
+ 		*ret_cfid = cfid;
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 81a29857b1e32f..03f606afad93a0 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1496,7 +1496,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ 
+ 		if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
+ 		    sizeof(struct create_lease_v2) - 4)
+-			return NULL;
++			goto err_out;
+ 
+ 		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+ 		lreq->req_state = lc->lcontext.LeaseState;
+@@ -1512,7 +1512,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ 
+ 		if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
+ 		    sizeof(struct create_lease))
+-			return NULL;
++			goto err_out;
+ 
+ 		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+ 		lreq->req_state = lc->lcontext.LeaseState;
+@@ -1521,6 +1521,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ 		lreq->version = 1;
+ 	}
+ 	return lreq;
++err_out:
++	kfree(lreq);
++	return NULL;
+ }
+ 
+ /**
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 85348705f2555d..f0760d786502fc 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -633,6 +633,11 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
+ 		return name;
+ 	}
+ 
++	if (*name == '\0') {
++		kfree(name);
++		return ERR_PTR(-EINVAL);
++	}
++
+ 	if (*name == '\\') {
+ 		pr_err("not allow directory name included leading slash\n");
+ 		kfree(name);
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index a7694aae0b947b..e059316be36fd0 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -443,6 +443,13 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ 		goto out;
+ 	}
+ 
++	if (v_len <= *pos) {
++		pr_err("stream write position %lld is out of bounds (stream length: %zd)\n",
++				*pos, v_len);
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	if (v_len < size) {
+ 		wbuf = kvzalloc(size, KSMBD_DEFAULT_GFP);
+ 		if (!wbuf) {
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+index 1f8fa3468173ab..dfed6fce890498 100644
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -661,21 +661,40 @@ __close_file_table_ids(struct ksmbd_file_table *ft,
+ 		       bool (*skip)(struct ksmbd_tree_connect *tcon,
+ 				    struct ksmbd_file *fp))
+ {
+-	unsigned int			id;
+-	struct ksmbd_file		*fp;
+-	int				num = 0;
++	struct ksmbd_file *fp;
++	unsigned int id = 0;
++	int num = 0;
++
++	while (1) {
++		write_lock(&ft->lock);
++		fp = idr_get_next(ft->idr, &id);
++		if (!fp) {
++			write_unlock(&ft->lock);
++			break;
++		}
+ 
+-	idr_for_each_entry(ft->idr, fp, id) {
+-		if (skip(tcon, fp))
++		if (skip(tcon, fp) ||
++		    !atomic_dec_and_test(&fp->refcount)) {
++			id++;
++			write_unlock(&ft->lock);
+ 			continue;
++		}
+ 
+ 		set_close_state_blocked_works(fp);
++		idr_remove(ft->idr, fp->volatile_id);
++		fp->volatile_id = KSMBD_NO_FID;
++		write_unlock(&ft->lock);
++
++		down_write(&fp->f_ci->m_lock);
++		list_del_init(&fp->node);
++		up_write(&fp->f_ci->m_lock);
+ 
+-		if (!atomic_dec_and_test(&fp->refcount))
+-			continue;
+ 		__ksmbd_close_fd(ft, fp);
++
+ 		num++;
++		id++;
+ 	}
++
+ 	return num;
+ }
+ 
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 199ec6d10b62af..10df55aea51275 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1585,8 +1585,11 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
+ 	user_uffdio_copy = (struct uffdio_copy __user *) arg;
+ 
+ 	ret = -EAGAIN;
+-	if (atomic_read(&ctx->mmap_changing))
++	if (unlikely(atomic_read(&ctx->mmap_changing))) {
++		if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
++			return -EFAULT;
+ 		goto out;
++	}
+ 
+ 	ret = -EFAULT;
+ 	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
+@@ -1641,8 +1644,11 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
+ 	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
+ 
+ 	ret = -EAGAIN;
+-	if (atomic_read(&ctx->mmap_changing))
++	if (unlikely(atomic_read(&ctx->mmap_changing))) {
++		if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
++			return -EFAULT;
+ 		goto out;
++	}
+ 
+ 	ret = -EFAULT;
+ 	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
+@@ -1744,8 +1750,11 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
+ 	user_uffdio_continue = (struct uffdio_continue __user *)arg;
+ 
+ 	ret = -EAGAIN;
+-	if (atomic_read(&ctx->mmap_changing))
++	if (unlikely(atomic_read(&ctx->mmap_changing))) {
++		if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
++			return -EFAULT;
+ 		goto out;
++	}
+ 
+ 	ret = -EFAULT;
+ 	if (copy_from_user(&uffdio_continue, user_uffdio_continue,
+@@ -1801,8 +1810,11 @@ static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long
+ 	user_uffdio_poison = (struct uffdio_poison __user *)arg;
+ 
+ 	ret = -EAGAIN;
+-	if (atomic_read(&ctx->mmap_changing))
++	if (unlikely(atomic_read(&ctx->mmap_changing))) {
++		if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
++			return -EFAULT;
+ 		goto out;
++	}
+ 
+ 	ret = -EFAULT;
+ 	if (copy_from_user(&uffdio_poison, user_uffdio_poison,
+@@ -1870,8 +1882,12 @@ static int userfaultfd_move(struct userfaultfd_ctx *ctx,
+ 
+ 	user_uffdio_move = (struct uffdio_move __user *) arg;
+ 
+-	if (atomic_read(&ctx->mmap_changing))
+-		return -EAGAIN;
++	ret = -EAGAIN;
++	if (unlikely(atomic_read(&ctx->mmap_changing))) {
++		if (unlikely(put_user(ret, &user_uffdio_move->move)))
++			return -EFAULT;
++		goto out;
++	}
+ 
+ 	if (copy_from_user(&uffdio_move, user_uffdio_move,
+ 			   /* don't copy "move" last field */
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index bdcec173244522..cc668a054d0960 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -77,6 +77,8 @@ extern ssize_t cpu_show_gds(struct device *dev,
+ 			    struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
+ 					       struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
++						  struct device_attribute *attr, char *buf);
+ 
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+diff --git a/include/linux/execmem.h b/include/linux/execmem.h
+index 32cef114411796..584e112ca3805f 100644
+--- a/include/linux/execmem.h
++++ b/include/linux/execmem.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/moduleloader.h>
++#include <linux/cleanup.h>
+ 
+ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ 		!defined(CONFIG_KASAN_VMALLOC)
+@@ -123,6 +124,8 @@ void *execmem_alloc(enum execmem_type type, size_t size);
+  */
+ void execmem_free(void *ptr);
+ 
++DEFINE_FREE(execmem, void *, if (_T) execmem_free(_T));
++
+ #if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE)
+ void execmem_init(void);
+ #else
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 3750e56bfcbb36..777f6aa8efa7b2 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -1524,7 +1524,7 @@ struct ieee80211_mgmt {
+ 				struct {
+ 					u8 action_code;
+ 					u8 dialog_token;
+-					u8 status_code;
++					__le16 status_code;
+ 					u8 variable[];
+ 				} __packed ttlm_res;
+ 				struct {
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 82a9527d43c768..7886217c998812 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -582,6 +582,11 @@ struct module {
+ 	atomic_t refcnt;
+ #endif
+ 
++#ifdef CONFIG_MITIGATION_ITS
++	int its_num_pages;
++	void **its_page_array;
++#endif
++
+ #ifdef CONFIG_CONSTRUCTORS
+ 	/* Constructor functions. */
+ 	ctor_fn_t *ctors;
+diff --git a/include/linux/types.h b/include/linux/types.h
+index 2bc8766ba20cab..2d7b9ae8714ce5 100644
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -115,8 +115,9 @@ typedef u64			u_int64_t;
+ typedef s64			int64_t;
+ #endif
+ 
+-/* this is a special 64bit data type that is 8-byte aligned */
++/* These are the special 64-bit data types that are 8-byte aligned */
+ #define aligned_u64		__aligned_u64
++#define aligned_s64		__aligned_s64
+ #define aligned_be64		__aligned_be64
+ #define aligned_le64		__aligned_le64
+ 
+diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
+index ad2ce7a6ab7af1..2dcf7621913114 100644
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -61,6 +61,7 @@ struct vm_struct {
+ 	unsigned int		nr_pages;
+ 	phys_addr_t		phys_addr;
+ 	const void		*caller;
++	unsigned long		requested_size;
+ };
+ 
+ struct vmap_area {
+diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
+index 5ca019d294ca3d..173bcfcd868a84 100644
+--- a/include/net/netdev_queues.h
++++ b/include/net/netdev_queues.h
+@@ -92,6 +92,12 @@ struct netdev_stat_ops {
+ 			       struct netdev_queue_stats_tx *tx);
+ };
+ 
++void netdev_stat_queue_sum(struct net_device *netdev,
++			   int rx_start, int rx_end,
++			   struct netdev_queue_stats_rx *rx_sum,
++			   int tx_start, int tx_end,
++			   struct netdev_queue_stats_tx *tx_sum);
++
+ /**
+  * struct netdev_queue_mgmt_ops - netdev ops for queue management
+  *
+diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
+index 6375a06840520d..48b933938877d9 100644
+--- a/include/uapi/linux/types.h
++++ b/include/uapi/linux/types.h
+@@ -53,6 +53,7 @@ typedef __u32 __bitwise __wsum;
+  * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
+  */
+ #define __aligned_u64 __u64 __attribute__((aligned(8)))
++#define __aligned_s64 __s64 __attribute__((aligned(8)))
+ #define __aligned_be64 __be64 __attribute__((aligned(8)))
+ #define __aligned_le64 __le64 __attribute__((aligned(8)))
+ 
+diff --git a/init/Kconfig b/init/Kconfig
+index 2b4969758da836..d3755b2264bdfb 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -134,6 +134,9 @@ config LD_CAN_USE_KEEP_IN_OVERLAY
+ 	# https://github.com/llvm/llvm-project/pull/130661
+ 	def_bool LD_IS_BFD || LLD_VERSION >= 210000
+ 
++config RUSTC_HAS_UNNECESSARY_TRANSMUTES
++	def_bool RUSTC_VERSION >= 108800
++
+ config PAHOLE_VERSION
+ 	int
+ 	default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE))
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index fef5c6e3b251e2..8ef0603c07f110 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -441,24 +441,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
+ 	return req->link;
+ }
+ 
+-static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
+-{
+-	if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
+-		return NULL;
+-	return __io_prep_linked_timeout(req);
+-}
+-
+-static noinline void __io_arm_ltimeout(struct io_kiocb *req)
+-{
+-	io_queue_linked_timeout(__io_prep_linked_timeout(req));
+-}
+-
+-static inline void io_arm_ltimeout(struct io_kiocb *req)
+-{
+-	if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
+-		__io_arm_ltimeout(req);
+-}
+-
+ static void io_prep_async_work(struct io_kiocb *req)
+ {
+ 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
+@@ -511,7 +493,6 @@ static void io_prep_async_link(struct io_kiocb *req)
+ 
+ static void io_queue_iowq(struct io_kiocb *req)
+ {
+-	struct io_kiocb *link = io_prep_linked_timeout(req);
+ 	struct io_uring_task *tctx = req->task->io_uring;
+ 
+ 	BUG_ON(!tctx);
+@@ -536,8 +517,6 @@ static void io_queue_iowq(struct io_kiocb *req)
+ 
+ 	trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
+ 	io_wq_enqueue(tctx->io_wq, &req->work);
+-	if (link)
+-		io_queue_linked_timeout(link);
+ }
+ 
+ static void io_req_queue_iowq_tw(struct io_kiocb *req, struct io_tw_state *ts)
+@@ -884,6 +863,14 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 	bool posted;
+ 
++	/*
++	 * If multishot has already posted deferred completions, ensure that
++	 * those are flushed first before posting this one. If not, CQEs
++	 * could get reordered.
++	 */
++	if (!wq_list_empty(&ctx->submit_state.compl_reqs))
++		__io_submit_flush_completions(ctx);
++
+ 	lockdep_assert(!io_wq_current_is_worker());
+ 	lockdep_assert_held(&ctx->uring_lock);
+ 
+@@ -1723,17 +1710,24 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
+ 	return !!req->file;
+ }
+ 
++#define REQ_ISSUE_SLOW_FLAGS	(REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
++
+ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
+ 	const struct cred *creds = NULL;
++	struct io_kiocb *link = NULL;
+ 	int ret;
+ 
+ 	if (unlikely(!io_assign_file(req, def, issue_flags)))
+ 		return -EBADF;
+ 
+-	if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
+-		creds = override_creds(req->creds);
++	if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
++		if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
++			creds = override_creds(req->creds);
++		if (req->flags & REQ_F_ARM_LTIMEOUT)
++			link = __io_prep_linked_timeout(req);
++	}
+ 
+ 	if (!def->audit_skip)
+ 		audit_uring_entry(req->opcode);
+@@ -1743,8 +1737,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (!def->audit_skip)
+ 		audit_uring_exit(!ret, ret);
+ 
+-	if (creds)
+-		revert_creds(creds);
++	if (unlikely(creds || link)) {
++		if (creds)
++			revert_creds(creds);
++		if (link)
++			io_queue_linked_timeout(link);
++	}
+ 
+ 	if (ret == IOU_OK) {
+ 		if (issue_flags & IO_URING_F_COMPLETE_DEFER)
+@@ -1757,7 +1755,6 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	if (ret == IOU_ISSUE_SKIP_COMPLETE) {
+ 		ret = 0;
+-		io_arm_ltimeout(req);
+ 
+ 		/* If the op doesn't have a file, we're not polling for it */
+ 		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
+@@ -1800,8 +1797,6 @@ void io_wq_submit_work(struct io_wq_work *work)
+ 	else
+ 		req_ref_get(req);
+ 
+-	io_arm_ltimeout(req);
+-
+ 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
+ 	if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
+ fail:
+@@ -1921,15 +1916,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
+ static void io_queue_async(struct io_kiocb *req, int ret)
+ 	__must_hold(&req->ctx->uring_lock)
+ {
+-	struct io_kiocb *linked_timeout;
+-
+ 	if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
+ 		io_req_defer_failed(req, ret);
+ 		return;
+ 	}
+ 
+-	linked_timeout = io_prep_linked_timeout(req);
+-
+ 	switch (io_arm_poll_handler(req, 0)) {
+ 	case IO_APOLL_READY:
+ 		io_kbuf_recycle(req, 0);
+@@ -1942,9 +1933,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
+ 	case IO_APOLL_OK:
+ 		break;
+ 	}
+-
+-	if (linked_timeout)
+-		io_queue_linked_timeout(linked_timeout);
+ }
+ 
+ static inline void io_queue_sqe(struct io_kiocb *req)
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 5bc54c6df20fd6..430922c541681e 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -20,7 +20,7 @@
+ #include "sqpoll.h"
+ 
+ #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
+-#define IORING_TW_CAP_ENTRIES_VALUE	8
++#define IORING_TW_CAP_ENTRIES_VALUE	32
+ 
+ enum {
+ 	IO_SQ_THREAD_SHOULD_STOP = 0,
+diff --git a/kernel/params.c b/kernel/params.c
+index 33b2985b31c7fc..9935ff599356b7 100644
+--- a/kernel/params.c
++++ b/kernel/params.c
+@@ -949,7 +949,9 @@ struct kset *module_kset;
+ static void module_kobj_release(struct kobject *kobj)
+ {
+ 	struct module_kobject *mk = to_module_kobject(kobj);
+-	complete(mk->kobj_completion);
++
++	if (mk->kobj_completion)
++		complete(mk->kobj_completion);
+ }
+ 
+ const struct kobj_type module_ktype = {
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index ceb023629d48dd..990d0828bf2a90 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7182,9 +7182,6 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 		idle_h_nr_running = task_has_idle_policy(p);
+ 		if (!task_sleep && !task_delayed)
+ 			h_nr_delayed = !!se->sched_delayed;
+-	} else {
+-		cfs_rq = group_cfs_rq(se);
+-		slice = cfs_rq_min_slice(cfs_rq);
+ 	}
+ 
+ 	for_each_sched_entity(se) {
+@@ -7194,6 +7191,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 			if (p && &p->se == se)
+ 				return -1;
+ 
++			slice = cfs_rq_min_slice(cfs_rq);
+ 			break;
+ 		}
+ 
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 40ac11e294231e..f94a9d41358555 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2879,6 +2879,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
+ 			   pmd_t *pmd, bool freeze, struct folio *folio)
+ {
++	bool pmd_migration = is_pmd_migration_entry(*pmd);
++
+ 	VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio));
+ 	VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
+ 	VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
+@@ -2889,9 +2891,12 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
+ 	 * require a folio to check the PMD against. Otherwise, there
+ 	 * is a risk of replacing the wrong folio.
+ 	 */
+-	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
+-	    is_pmd_migration_entry(*pmd)) {
+-		if (folio && folio != pmd_folio(*pmd))
++	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || pmd_migration) {
++		/*
++		 * Do not apply pmd_folio() to a migration entry; and folio lock
++		 * guarantees that it must be of the wrong folio anyway.
++		 */
++		if (folio && (pmd_migration || folio != pmd_folio(*pmd)))
+ 			return;
+ 		__split_huge_pmd_locked(vma, pmd, address, freeze);
+ 	}
+diff --git a/mm/internal.h b/mm/internal.h
+index 398633d6b6c9f0..9e0577413087c2 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -204,11 +204,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
+ 		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
+ 		bool *any_writable, bool *any_young, bool *any_dirty)
+ {
+-	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
+-	const pte_t *end_ptep = start_ptep + max_nr;
+ 	pte_t expected_pte, *ptep;
+ 	bool writable, young, dirty;
+-	int nr;
++	int nr, cur_nr;
+ 
+ 	if (any_writable)
+ 		*any_writable = false;
+@@ -221,11 +219,15 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
+ 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
+ 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
+ 
++	/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
++	max_nr = min_t(unsigned long, max_nr,
++		       folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
++
+ 	nr = pte_batch_hint(start_ptep, pte);
+ 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
+ 	ptep = start_ptep + nr;
+ 
+-	while (ptep < end_ptep) {
++	while (nr < max_nr) {
+ 		pte = ptep_get(ptep);
+ 		if (any_writable)
+ 			writable = !!pte_write(pte);
+@@ -238,14 +240,6 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
+ 		if (!pte_same(pte, expected_pte))
+ 			break;
+ 
+-		/*
+-		 * Stop immediately once we reached the end of the folio. In
+-		 * corner cases the next PFN might fall into a different
+-		 * folio.
+-		 */
+-		if (pte_pfn(pte) >= folio_end_pfn)
+-			break;
+-
+ 		if (any_writable)
+ 			*any_writable |= writable;
+ 		if (any_young)
+@@ -253,12 +247,13 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
+ 		if (any_dirty)
+ 			*any_dirty |= dirty;
+ 
+-		nr = pte_batch_hint(ptep, pte);
+-		expected_pte = pte_advance_pfn(expected_pte, nr);
+-		ptep += nr;
++		cur_nr = pte_batch_hint(ptep, pte);
++		expected_pte = pte_advance_pfn(expected_pte, cur_nr);
++		ptep += cur_nr;
++		nr += cur_nr;
+ 	}
+ 
+-	return min(ptep - start_ptep, max_nr);
++	return min(nr, max_nr);
+ }
+ 
+ /**
+diff --git a/mm/memblock.c b/mm/memblock.c
+index cc5ee323245e3d..3d7b0114442c45 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -456,7 +456,14 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
+ 				min(new_area_start, memblock.current_limit),
+ 				new_alloc_size, PAGE_SIZE);
+ 
+-		new_array = addr ? __va(addr) : NULL;
++		if (addr) {
++			/* The memory may not have been accepted, yet. */
++			accept_memory(addr, new_alloc_size);
++
++			new_array = __va(addr);
++		} else {
++			new_array = NULL;
++		}
+ 	}
+ 	if (!addr) {
+ 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index fd4e0e1cd65e43..d29da0c6a7f293 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1907,13 +1907,12 @@ static inline bool boost_watermark(struct zone *zone)
+  * can claim the whole pageblock for the requested migratetype. If not, we check
+  * the pageblock for constituent pages; if at least half of the pages are free
+  * or compatible, we can still claim the whole block, so pages freed in the
+- * future will be put on the correct free list. Otherwise, we isolate exactly
+- * the order we need from the fallback block and leave its migratetype alone.
++ * future will be put on the correct free list.
+  */
+ static struct page *
+-steal_suitable_fallback(struct zone *zone, struct page *page,
+-			int current_order, int order, int start_type,
+-			unsigned int alloc_flags, bool whole_block)
++try_to_steal_block(struct zone *zone, struct page *page,
++		   int current_order, int order, int start_type,
++		   unsigned int alloc_flags)
+ {
+ 	int free_pages, movable_pages, alike_pages;
+ 	unsigned long start_pfn;
+@@ -1926,7 +1925,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
+ 	 * highatomic accounting.
+ 	 */
+ 	if (is_migrate_highatomic(block_type))
+-		goto single_page;
++		return NULL;
+ 
+ 	/* Take ownership for orders >= pageblock_order */
+ 	if (current_order >= pageblock_order) {
+@@ -1947,14 +1946,10 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
+ 	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
+ 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
+ 
+-	/* We are not allowed to try stealing from the whole block */
+-	if (!whole_block)
+-		goto single_page;
+-
+ 	/* moving whole block can fail due to zone boundary conditions */
+ 	if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
+ 				       &movable_pages))
+-		goto single_page;
++		return NULL;
+ 
+ 	/*
+ 	 * Determine how many pages are compatible with our allocation.
+@@ -1987,9 +1982,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
+ 		return __rmqueue_smallest(zone, order, start_type);
+ 	}
+ 
+-single_page:
+-	page_del_and_expand(zone, page, order, current_order, block_type);
+-	return page;
++	return NULL;
+ }
+ 
+ /*
+@@ -2171,17 +2164,15 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
+ }
+ 
+ /*
+- * Try finding a free buddy page on the fallback list and put it on the free
+- * list of requested migratetype, possibly along with other pages from the same
+- * block, depending on fragmentation avoidance heuristics. Returns true if
+- * fallback was found so that __rmqueue_smallest() can grab it.
++ * Try to allocate from some fallback migratetype by claiming the entire block,
++ * i.e. converting it to the allocation's start migratetype.
+  *
+  * The use of signed ints for order and current_order is a deliberate
+  * deviation from the rest of this file, to make the for loop
+  * condition simpler.
+  */
+ static __always_inline struct page *
+-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
++__rmqueue_claim(struct zone *zone, int order, int start_migratetype,
+ 						unsigned int alloc_flags)
+ {
+ 	struct free_area *area;
+@@ -2212,58 +2203,66 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
+ 		if (fallback_mt == -1)
+ 			continue;
+ 
+-		/*
+-		 * We cannot steal all free pages from the pageblock and the
+-		 * requested migratetype is movable. In that case it's better to
+-		 * steal and split the smallest available page instead of the
+-		 * largest available page, because even if the next movable
+-		 * allocation falls back into a different pageblock than this
+-		 * one, it won't cause permanent fragmentation.
+-		 */
+-		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
+-					&& current_order > order)
+-			goto find_smallest;
++		if (!can_steal)
++			break;
+ 
+-		goto do_steal;
++		page = get_page_from_free_area(area, fallback_mt);
++		page = try_to_steal_block(zone, page, current_order, order,
++					  start_migratetype, alloc_flags);
++		if (page) {
++			trace_mm_page_alloc_extfrag(page, order, current_order,
++						    start_migratetype, fallback_mt);
++			return page;
++		}
+ 	}
+ 
+ 	return NULL;
++}
++
++/*
++ * Try to steal a single page from some fallback migratetype. Leave the rest of
++ * the block as its current migratetype, potentially causing fragmentation.
++ */
++static __always_inline struct page *
++__rmqueue_steal(struct zone *zone, int order, int start_migratetype)
++{
++	struct free_area *area;
++	int current_order;
++	struct page *page;
++	int fallback_mt;
++	bool can_steal;
+ 
+-find_smallest:
+ 	for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
+ 		area = &(zone->free_area[current_order]);
+ 		fallback_mt = find_suitable_fallback(area, current_order,
+ 				start_migratetype, false, &can_steal);
+-		if (fallback_mt != -1)
+-			break;
+-	}
+-
+-	/*
+-	 * This should not happen - we already found a suitable fallback
+-	 * when looking for the largest page.
+-	 */
+-	VM_BUG_ON(current_order > MAX_PAGE_ORDER);
+-
+-do_steal:
+-	page = get_page_from_free_area(area, fallback_mt);
+-
+-	/* take off list, maybe claim block, expand remainder */
+-	page = steal_suitable_fallback(zone, page, current_order, order,
+-				       start_migratetype, alloc_flags, can_steal);
++		if (fallback_mt == -1)
++			continue;
+ 
+-	trace_mm_page_alloc_extfrag(page, order, current_order,
+-		start_migratetype, fallback_mt);
++		page = get_page_from_free_area(area, fallback_mt);
++		page_del_and_expand(zone, page, order, current_order, fallback_mt);
++		trace_mm_page_alloc_extfrag(page, order, current_order,
++					    start_migratetype, fallback_mt);
++		return page;
++	}
+ 
+-	return page;
++	return NULL;
+ }
+ 
++enum rmqueue_mode {
++	RMQUEUE_NORMAL,
++	RMQUEUE_CMA,
++	RMQUEUE_CLAIM,
++	RMQUEUE_STEAL,
++};
++
+ /*
+  * Do the hard work of removing an element from the buddy allocator.
+  * Call me with the zone->lock already held.
+  */
+ static __always_inline struct page *
+ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
+-						unsigned int alloc_flags)
++	  unsigned int alloc_flags, enum rmqueue_mode *mode)
+ {
+ 	struct page *page;
+ 
+@@ -2282,16 +2281,49 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
+ 		}
+ 	}
+ 
+-	page = __rmqueue_smallest(zone, order, migratetype);
+-	if (unlikely(!page)) {
+-		if (alloc_flags & ALLOC_CMA)
++	/*
++	 * First try the freelists of the requested migratetype, then try
++	 * fallbacks modes with increasing levels of fragmentation risk.
++	 *
++	 * The fallback logic is expensive and rmqueue_bulk() calls in
++	 * a loop with the zone->lock held, meaning the freelists are
++	 * not subject to any outside changes. Remember in *mode where
++	 * we found pay dirt, to save us the search on the next call.
++	 */
++	switch (*mode) {
++	case RMQUEUE_NORMAL:
++		page = __rmqueue_smallest(zone, order, migratetype);
++		if (page)
++			return page;
++		fallthrough;
++	case RMQUEUE_CMA:
++		if (alloc_flags & ALLOC_CMA) {
+ 			page = __rmqueue_cma_fallback(zone, order);
+-
+-		if (!page)
+-			page = __rmqueue_fallback(zone, order, migratetype,
+-						  alloc_flags);
++			if (page) {
++				*mode = RMQUEUE_CMA;
++				return page;
++			}
++		}
++		fallthrough;
++	case RMQUEUE_CLAIM:
++		page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
++		if (page) {
++			/* Replenished preferred freelist, back to normal mode. */
++			*mode = RMQUEUE_NORMAL;
++			return page;
++		}
++		fallthrough;
++	case RMQUEUE_STEAL:
++		if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
++			page = __rmqueue_steal(zone, order, migratetype);
++			if (page) {
++				*mode = RMQUEUE_STEAL;
++				return page;
++			}
++		}
+ 	}
+-	return page;
++
++	return NULL;
+ }
+ 
+ /*
+@@ -2303,13 +2335,14 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ 			unsigned long count, struct list_head *list,
+ 			int migratetype, unsigned int alloc_flags)
+ {
++	enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
+ 	unsigned long flags;
+ 	int i;
+ 
+ 	spin_lock_irqsave(&zone->lock, flags);
+ 	for (i = 0; i < count; ++i) {
+ 		struct page *page = __rmqueue(zone, order, migratetype,
+-								alloc_flags);
++					      alloc_flags, &rmqm);
+ 		if (unlikely(page == NULL))
+ 			break;
+ 
+@@ -2910,7 +2943,9 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
+ 		if (alloc_flags & ALLOC_HIGHATOMIC)
+ 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
+ 		if (!page) {
+-			page = __rmqueue(zone, order, migratetype, alloc_flags);
++			enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
++
++			page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
+ 
+ 			/*
+ 			 * If the allocation fails, allow OOM handling and
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index fd70a7cd1c8fa8..358bd3083b8886 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
+ {
+ 	vm->flags = flags;
+ 	vm->addr = (void *)va->va_start;
+-	vm->size = va_size(va);
++	vm->size = vm->requested_size = va_size(va);
+ 	vm->caller = caller;
+ 	va->vm = vm;
+ }
+@@ -3128,6 +3128,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+ 
+ 	area->flags = flags;
+ 	area->caller = caller;
++	area->requested_size = requested_size;
+ 
+ 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
+ 	if (IS_ERR(va)) {
+@@ -4067,6 +4068,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
+  */
+ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ {
++	struct vm_struct *vm = NULL;
++	size_t alloced_size = 0;
+ 	size_t old_size = 0;
+ 	void *n;
+ 
+@@ -4076,15 +4079,17 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ 	}
+ 
+ 	if (p) {
+-		struct vm_struct *vm;
+-
+ 		vm = find_vm_area(p);
+ 		if (unlikely(!vm)) {
+ 			WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
+ 			return NULL;
+ 		}
+ 
+-		old_size = get_vm_area_size(vm);
++		alloced_size = get_vm_area_size(vm);
++		old_size = vm->requested_size;
++		if (WARN(alloced_size < old_size,
++			 "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
++			return NULL;
+ 	}
+ 
+ 	/*
+@@ -4092,14 +4097,26 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ 	 * would be a good heuristic for when to shrink the vm_area?
+ 	 */
+ 	if (size <= old_size) {
+-		/* Zero out spare memory. */
+-		if (want_init_on_alloc(flags))
++		/* Zero out "freed" memory. */
++		if (want_init_on_free())
+ 			memset((void *)p + size, 0, old_size - size);
++		vm->requested_size = size;
+ 		kasan_poison_vmalloc(p + size, old_size - size);
+-		kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
+ 		return (void *)p;
+ 	}
+ 
++	/*
++	 * We already have the bytes available in the allocation; use them.
++	 */
++	if (size <= alloced_size) {
++		kasan_unpoison_vmalloc(p + old_size, size - old_size,
++				       KASAN_VMALLOC_PROT_NORMAL);
++		/* Zero out "alloced" memory. */
++		if (want_init_on_alloc(flags))
++			memset((void *)p + old_size, 0, size - old_size);
++		vm->requested_size = size;
++	}
++
+ 	/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
+ 	n = __vmalloc_noprof(size, flags);
+ 	if (!n)
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 37528826935e74..e65500c52bf5c7 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -130,7 +130,7 @@ struct cgw_job {
+ 	u32 handled_frames;
+ 	u32 dropped_frames;
+ 	u32 deleted_frames;
+-	struct cf_mod mod;
++	struct cf_mod __rcu *cf_mod;
+ 	union {
+ 		/* CAN frame data source */
+ 		struct net_device *dev;
+@@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ 	struct cgw_job *gwj = (struct cgw_job *)data;
+ 	struct canfd_frame *cf;
+ 	struct sk_buff *nskb;
++	struct cf_mod *mod;
+ 	int modidx = 0;
+ 
+ 	/* process strictly Classic CAN or CAN FD frames */
+@@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ 	 * When there is at least one modification function activated,
+ 	 * we need to copy the skb as we want to modify skb->data.
+ 	 */
+-	if (gwj->mod.modfunc[0])
++	mod = rcu_dereference(gwj->cf_mod);
++	if (mod->modfunc[0])
+ 		nskb = skb_copy(skb, GFP_ATOMIC);
+ 	else
+ 		nskb = skb_clone(skb, GFP_ATOMIC);
+@@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ 	cf = (struct canfd_frame *)nskb->data;
+ 
+ 	/* perform preprocessed modification functions if there are any */
+-	while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
+-		(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
++	while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx])
++		(*mod->modfunc[modidx++])(cf, mod);
+ 
+ 	/* Has the CAN frame been modified? */
+ 	if (modidx) {
+@@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ 		}
+ 
+ 		/* check for checksum updates */
+-		if (gwj->mod.csumfunc.crc8)
+-			(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
++		if (mod->csumfunc.crc8)
++			(*mod->csumfunc.crc8)(cf, &mod->csum.crc8);
+ 
+-		if (gwj->mod.csumfunc.xor)
+-			(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
++		if (mod->csumfunc.xor)
++			(*mod->csumfunc.xor)(cf, &mod->csum.xor);
+ 	}
+ 
+ 	/* clear the skb timestamp if not configured the other way */
+@@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head)
+ {
+ 	struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
+ 
++	/* cgw_job::cf_mod is always accessed from the same cgw_job object within
++	 * the same RCU read section. Once cgw_job is scheduled for removal,
++	 * cf_mod can also be removed without mandating an additional grace period.
++	 */
++	kfree(rcu_access_pointer(gwj->cf_mod));
+ 	kmem_cache_free(cgw_cache, gwj);
+ }
+ 
++/* Return cgw_job::cf_mod with RTNL protected section */
++static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj)
++{
++	return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked());
++}
++
+ static int cgw_notifier(struct notifier_block *nb,
+ 			unsigned long msg, void *ptr)
+ {
+@@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ {
+ 	struct rtcanmsg *rtcan;
+ 	struct nlmsghdr *nlh;
++	struct cf_mod *mod;
+ 
+ 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
+ 	if (!nlh)
+@@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ 			goto cancel;
+ 	}
+ 
++	mod = cgw_job_cf_mod(gwj);
+ 	if (gwj->flags & CGW_FLAGS_CAN_FD) {
+ 		struct cgw_fdframe_mod mb;
+ 
+-		if (gwj->mod.modtype.and) {
+-			memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+-			mb.modtype = gwj->mod.modtype.and;
++		if (mod->modtype.and) {
++			memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
++			mb.modtype = mod->modtype.and;
+ 			if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
+ 				goto cancel;
+ 		}
+ 
+-		if (gwj->mod.modtype.or) {
+-			memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+-			mb.modtype = gwj->mod.modtype.or;
++		if (mod->modtype.or) {
++			memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
++			mb.modtype = mod->modtype.or;
+ 			if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
+ 				goto cancel;
+ 		}
+ 
+-		if (gwj->mod.modtype.xor) {
+-			memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+-			mb.modtype = gwj->mod.modtype.xor;
++		if (mod->modtype.xor) {
++			memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
++			mb.modtype = mod->modtype.xor;
+ 			if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
+ 				goto cancel;
+ 		}
+ 
+-		if (gwj->mod.modtype.set) {
+-			memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+-			mb.modtype = gwj->mod.modtype.set;
++		if (mod->modtype.set) {
++			memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
++			mb.modtype = mod->modtype.set;
+ 			if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
+ 				goto cancel;
+ 		}
+ 	} else {
+ 		struct cgw_frame_mod mb;
+ 
+-		if (gwj->mod.modtype.and) {
+-			memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+-			mb.modtype = gwj->mod.modtype.and;
++		if (mod->modtype.and) {
++			memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
++			mb.modtype = mod->modtype.and;
+ 			if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
+ 				goto cancel;
+ 		}
+ 
+-		if (gwj->mod.modtype.or) {
+-			memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+-			mb.modtype = gwj->mod.modtype.or;
++		if (mod->modtype.or) {
++			memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
++			mb.modtype = mod->modtype.or;
+ 			if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
+ 				goto cancel;
+ 		}
+ 
+-		if (gwj->mod.modtype.xor) {
+-			memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+-			mb.modtype = gwj->mod.modtype.xor;
++		if (mod->modtype.xor) {
++			memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
++			mb.modtype = mod->modtype.xor;
+ 			if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
+ 				goto cancel;
+ 		}
+ 
+-		if (gwj->mod.modtype.set) {
+-			memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+-			mb.modtype = gwj->mod.modtype.set;
++		if (mod->modtype.set) {
++			memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
++			mb.modtype = mod->modtype.set;
+ 			if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
+ 				goto cancel;
+ 		}
+ 	}
+ 
+-	if (gwj->mod.uid) {
+-		if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
++	if (mod->uid) {
++		if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0)
+ 			goto cancel;
+ 	}
+ 
+-	if (gwj->mod.csumfunc.crc8) {
++	if (mod->csumfunc.crc8) {
+ 		if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
+-			    &gwj->mod.csum.crc8) < 0)
++			    &mod->csum.crc8) < 0)
+ 			goto cancel;
+ 	}
+ 
+-	if (gwj->mod.csumfunc.xor) {
++	if (mod->csumfunc.xor) {
+ 		if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
+-			    &gwj->mod.csum.xor) < 0)
++			    &mod->csum.xor) < 0)
+ 			goto cancel;
+ 	}
+ 
+@@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
+ 	struct net *net = sock_net(skb->sk);
+ 	struct rtcanmsg *r;
+ 	struct cgw_job *gwj;
+-	struct cf_mod mod;
++	struct cf_mod *mod;
+ 	struct can_can_gw ccgw;
+ 	u8 limhops = 0;
+ 	int err = 0;
+@@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
+ 	if (r->gwtype != CGW_TYPE_CAN_CAN)
+ 		return -EINVAL;
+ 
+-	err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
++	mod = kmalloc(sizeof(*mod), GFP_KERNEL);
++	if (!mod)
++		return -ENOMEM;
++
++	err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
+ 	if (err < 0)
+-		return err;
++		goto out_free_cf;
+ 
+-	if (mod.uid) {
++	if (mod->uid) {
+ 		ASSERT_RTNL();
+ 
+ 		/* check for updating an existing job with identical uid */
+ 		hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
+-			if (gwj->mod.uid != mod.uid)
++			struct cf_mod *old_cf;
++
++			old_cf = cgw_job_cf_mod(gwj);
++			if (old_cf->uid != mod->uid)
+ 				continue;
+ 
+ 			/* interfaces & filters must be identical */
+-			if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
+-				return -EINVAL;
++			if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) {
++				err = -EINVAL;
++				goto out_free_cf;
++			}
+ 
+-			/* update modifications with disabled softirq & quit */
+-			local_bh_disable();
+-			memcpy(&gwj->mod, &mod, sizeof(mod));
+-			local_bh_enable();
++			rcu_assign_pointer(gwj->cf_mod, mod);
++			kfree_rcu_mightsleep(old_cf);
+ 			return 0;
+ 		}
+ 	}
+ 
+ 	/* ifindex == 0 is not allowed for job creation */
+-	if (!ccgw.src_idx || !ccgw.dst_idx)
+-		return -ENODEV;
++	if (!ccgw.src_idx || !ccgw.dst_idx) {
++		err = -ENODEV;
++		goto out_free_cf;
++	}
+ 
+ 	gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
+-	if (!gwj)
+-		return -ENOMEM;
++	if (!gwj) {
++		err = -ENOMEM;
++		goto out_free_cf;
++	}
+ 
+ 	gwj->handled_frames = 0;
+ 	gwj->dropped_frames = 0;
+@@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
+ 	gwj->limit_hops = limhops;
+ 
+ 	/* insert already parsed information */
+-	memcpy(&gwj->mod, &mod, sizeof(mod));
++	RCU_INIT_POINTER(gwj->cf_mod, mod);
+ 	memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
+ 
+ 	err = -ENODEV;
+@@ -1152,9 +1178,11 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
+ 	if (!err)
+ 		hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
+ out:
+-	if (err)
++	if (err) {
+ 		kmem_cache_free(cgw_cache, gwj);
+-
++out_free_cf:
++		kfree(mod);
++	}
+ 	return err;
+ }
+ 
+@@ -1214,19 +1242,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 
+ 	/* remove only the first matching entry */
+ 	hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
++		struct cf_mod *cf_mod;
++
+ 		if (gwj->flags != r->flags)
+ 			continue;
+ 
+ 		if (gwj->limit_hops != limhops)
+ 			continue;
+ 
++		cf_mod = cgw_job_cf_mod(gwj);
+ 		/* we have a match when uid is enabled and identical */
+-		if (gwj->mod.uid || mod.uid) {
+-			if (gwj->mod.uid != mod.uid)
++		if (cf_mod->uid || mod.uid) {
++			if (cf_mod->uid != mod.uid)
+ 				continue;
+ 		} else {
+ 			/* no uid => check for identical modifications */
+-			if (memcmp(&gwj->mod, &mod, sizeof(mod)))
++			if (memcmp(cf_mod, &mod, sizeof(mod)))
+ 				continue;
+ 		}
+ 
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 790345c2546b7b..99b23fd2f509c9 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2526,6 +2526,7 @@ int skb_do_redirect(struct sk_buff *skb)
+ 			goto out_drop;
+ 		skb->dev = dev;
+ 		dev_sw_netstats_rx_add(dev, skb->len);
++		skb_scrub_packet(skb, false);
+ 		return -EAGAIN;
+ 	}
+ 	return flags & BPF_F_NEIGH ?
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index ad426b3a03b526..0fe537781bc4d9 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -616,25 +616,66 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ 	return 0;
+ }
+ 
++/**
++ * netdev_stat_queue_sum() - add up queue stats from range of queues
++ * @netdev:	net_device
++ * @rx_start:	index of the first Rx queue to query
++ * @rx_end:	index after the last Rx queue (first *not* to query)
++ * @rx_sum:	output Rx stats, should be already initialized
++ * @tx_start:	index of the first Tx queue to query
++ * @tx_end:	index after the last Tx queue (first *not* to query)
++ * @tx_sum:	output Tx stats, should be already initialized
++ *
++ * Add stats from [start, end) range of queue IDs to *x_sum structs.
++ * The sum structs must be already initialized. Usually this
++ * helper is invoked from the .get_base_stats callbacks of drivers
++ * to account for stats of disabled queues. In that case the ranges
++ * are usually [netdev->real_num_*x_queues, netdev->num_*x_queues).
++ */
++void netdev_stat_queue_sum(struct net_device *netdev,
++			   int rx_start, int rx_end,
++			   struct netdev_queue_stats_rx *rx_sum,
++			   int tx_start, int tx_end,
++			   struct netdev_queue_stats_tx *tx_sum)
++{
++	const struct netdev_stat_ops *ops;
++	struct netdev_queue_stats_rx rx;
++	struct netdev_queue_stats_tx tx;
++	int i;
++
++	ops = netdev->stat_ops;
++
++	for (i = rx_start; i < rx_end; i++) {
++		memset(&rx, 0xff, sizeof(rx));
++		if (ops->get_queue_stats_rx)
++			ops->get_queue_stats_rx(netdev, i, &rx);
++		netdev_nl_stats_add(rx_sum, &rx, sizeof(rx));
++	}
++	for (i = tx_start; i < tx_end; i++) {
++		memset(&tx, 0xff, sizeof(tx));
++		if (ops->get_queue_stats_tx)
++			ops->get_queue_stats_tx(netdev, i, &tx);
++		netdev_nl_stats_add(tx_sum, &tx, sizeof(tx));
++	}
++}
++EXPORT_SYMBOL(netdev_stat_queue_sum);
++
+ static int
+ netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
+ 			  const struct genl_info *info)
+ {
+-	struct netdev_queue_stats_rx rx_sum, rx;
+-	struct netdev_queue_stats_tx tx_sum, tx;
+-	const struct netdev_stat_ops *ops;
++	struct netdev_queue_stats_rx rx_sum;
++	struct netdev_queue_stats_tx tx_sum;
+ 	void *hdr;
+-	int i;
+ 
+-	ops = netdev->stat_ops;
+ 	/* Netdev can't guarantee any complete counters */
+-	if (!ops->get_base_stats)
++	if (!netdev->stat_ops->get_base_stats)
+ 		return 0;
+ 
+ 	memset(&rx_sum, 0xff, sizeof(rx_sum));
+ 	memset(&tx_sum, 0xff, sizeof(tx_sum));
+ 
+-	ops->get_base_stats(netdev, &rx_sum, &tx_sum);
++	netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum);
+ 
+ 	/* The op was there, but nothing reported, don't bother */
+ 	if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
+@@ -647,18 +688,8 @@ netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
+ 	if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
+ 		goto nla_put_failure;
+ 
+-	for (i = 0; i < netdev->real_num_rx_queues; i++) {
+-		memset(&rx, 0xff, sizeof(rx));
+-		if (ops->get_queue_stats_rx)
+-			ops->get_queue_stats_rx(netdev, i, &rx);
+-		netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
+-	}
+-	for (i = 0; i < netdev->real_num_tx_queues; i++) {
+-		memset(&tx, 0xff, sizeof(tx));
+-		if (ops->get_queue_stats_tx)
+-			ops->get_queue_stats_tx(netdev, i, &tx);
+-		netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
+-	}
++	netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum,
++			      0, netdev->real_num_tx_queues, &tx_sum);
+ 
+ 	if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
+ 	    netdev_nl_stats_write_tx(rsp, &tx_sum))
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f5d49162f79834..16ba3bb12fc4b9 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3237,16 +3237,13 @@ static void add_v4_addrs(struct inet6_dev *idev)
+ 	struct in6_addr addr;
+ 	struct net_device *dev;
+ 	struct net *net = dev_net(idev->dev);
+-	int scope, plen, offset = 0;
++	int scope, plen;
+ 	u32 pflags = 0;
+ 
+ 	ASSERT_RTNL();
+ 
+ 	memset(&addr, 0, sizeof(struct in6_addr));
+-	/* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
+-	if (idev->dev->addr_len == sizeof(struct in6_addr))
+-		offset = sizeof(struct in6_addr) - 4;
+-	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
++	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
+ 
+ 	if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
+ 		scope = IPV6_ADDR_COMPATv4;
+@@ -3557,7 +3554,13 @@ static void addrconf_gre_config(struct net_device *dev)
+ 		return;
+ 	}
+ 
+-	if (dev->type == ARPHRD_ETHER) {
++	/* Generate the IPv6 link-local address using addrconf_addr_gen(),
++	 * unless we have an IPv4 GRE device not bound to an IP address and
++	 * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
++	 * case). Such devices fall back to add_v4_addrs() instead.
++	 */
++	if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
++	      idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
+ 		addrconf_addr_gen(idev, true);
+ 		return;
+ 	}
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index ad0d040569dcd3..cc8c5d18b130db 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -7177,6 +7177,7 @@ ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ 	int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res);
+ 	int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 		2 * 2 * IEEE80211_TTLM_NUM_TIDS;
++	u16 status_code;
+ 
+ 	skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ 	if (!skb)
+@@ -7199,19 +7200,18 @@ ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ 		WARN_ON(1);
+ 		fallthrough;
+ 	case NEG_TTLM_RES_REJECT:
+-		mgmt->u.action.u.ttlm_res.status_code =
+-			WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
++		status_code = WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
+ 		break;
+ 	case NEG_TTLM_RES_ACCEPT:
+-		mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS;
++		status_code = WLAN_STATUS_SUCCESS;
+ 		break;
+ 	case NEG_TTLM_RES_SUGGEST_PREFERRED:
+-		mgmt->u.action.u.ttlm_res.status_code =
+-			WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
++		status_code = WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
+ 		ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ 		break;
+ 	}
+ 
++	mgmt->u.action.u.ttlm_res.status_code = cpu_to_le16(status_code);
+ 	ieee80211_tx_skb(sdata, skb);
+ }
+ 
+@@ -7377,7 +7377,7 @@ void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ 	 * This can be better implemented in the future, to handle request
+ 	 * rejections.
+ 	 */
+-	if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS)
++	if (le16_to_cpu(mgmt->u.action.u.ttlm_res.status_code) != WLAN_STATUS_SUCCESS)
+ 		__ieee80211_disconnect(sdata);
+ }
+ 
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index cf3ce72c3de645..5251524b96afac 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -64,7 +64,7 @@ struct hbucket {
+ #define ahash_sizeof_regions(htable_bits)		\
+ 	(ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
+ #define ahash_region(n, htable_bits)		\
+-	((n) % ahash_numof_locks(htable_bits))
++	((n) / jhash_size(HTABLE_REGION_BITS))
+ #define ahash_bucket_start(h,  htable_bits)	\
+ 	((htable_bits) < HTABLE_REGION_BITS ? 0	\
+ 		: (h) * jhash_size(HTABLE_REGION_BITS))
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 3313bceb6cc99d..014f077403695f 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -119,13 +119,12 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
+ 	return false;
+ }
+ 
+-/* Get route to daddr, update *saddr, optionally bind route to saddr */
++/* Get route to daddr, optionally bind route to saddr */
+ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+-				       int rt_mode, __be32 *saddr)
++				       int rt_mode, __be32 *ret_saddr)
+ {
+ 	struct flowi4 fl4;
+ 	struct rtable *rt;
+-	bool loop = false;
+ 
+ 	memset(&fl4, 0, sizeof(fl4));
+ 	fl4.daddr = daddr;
+@@ -135,23 +134,17 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+ retry:
+ 	rt = ip_route_output_key(net, &fl4);
+ 	if (IS_ERR(rt)) {
+-		/* Invalid saddr ? */
+-		if (PTR_ERR(rt) == -EINVAL && *saddr &&
+-		    rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
+-			*saddr = 0;
+-			flowi4_update_output(&fl4, 0, daddr, 0);
+-			goto retry;
+-		}
+ 		IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
+ 		return NULL;
+-	} else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
++	}
++	if (rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
+ 		ip_rt_put(rt);
+-		*saddr = fl4.saddr;
+ 		flowi4_update_output(&fl4, 0, daddr, fl4.saddr);
+-		loop = true;
++		rt_mode = 0;
+ 		goto retry;
+ 	}
+-	*saddr = fl4.saddr;
++	if (ret_saddr)
++		*ret_saddr = fl4.saddr;
+ 	return rt;
+ }
+ 
+@@ -344,19 +337,15 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+ 		if (ret_saddr)
+ 			*ret_saddr = dest_dst->dst_saddr.ip;
+ 	} else {
+-		__be32 saddr = htonl(INADDR_ANY);
+-
+ 		noref = 0;
+ 
+ 		/* For such unconfigured boxes avoid many route lookups
+ 		 * for performance reasons because we do not remember saddr
+ 		 */
+ 		rt_mode &= ~IP_VS_RT_MODE_CONNECT;
+-		rt = do_output_route4(net, daddr, rt_mode, &saddr);
++		rt = do_output_route4(net, daddr, rt_mode, ret_saddr);
+ 		if (!rt)
+ 			goto err_unreach;
+-		if (ret_saddr)
+-			*ret_saddr = saddr;
+ 	}
+ 
+ 	local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 61fea7baae5d5c..2f22ca59586f25 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -975,8 +975,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
+ 	upcall.cmd = OVS_PACKET_CMD_ACTION;
+ 	upcall.mru = OVS_CB(skb)->mru;
+ 
+-	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+-	     a = nla_next(a, &rem)) {
++	nla_for_each_nested(a, attr, rem) {
+ 		switch (nla_type(a)) {
+ 		case OVS_USERSPACE_ATTR_USERDATA:
+ 			upcall.userdata = a;
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 12cccc84d58a0e..b2494d24a54253 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -348,7 +348,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
+  */
+ static inline void htb_next_rb_node(struct rb_node **n)
+ {
+-	*n = rb_next(*n);
++	if (*n)
++		*n = rb_next(*n);
+ }
+ 
+ /**
+@@ -609,8 +610,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+  */
+ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
+ {
+-	WARN_ON(!cl->prio_activity);
+-
++	if (!cl->prio_activity)
++		return;
+ 	htb_deactivate_prios(q, cl);
+ 	cl->prio_activity = 0;
+ }
+@@ -1485,8 +1486,6 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct htb_class *cl = (struct htb_class *)arg;
+ 
+-	if (!cl->prio_activity)
+-		return;
+ 	htb_deactivate(qdisc_priv(sch), cl);
+ }
+ 
+@@ -1740,8 +1739,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
+ 	if (cl->parent)
+ 		cl->parent->children--;
+ 
+-	if (cl->prio_activity)
+-		htb_deactivate(q, cl);
++	htb_deactivate(q, cl);
+ 
+ 	if (cl->cmode != HTB_CAN_SEND)
+ 		htb_safe_rb_erase(&cl->pq_node,
+@@ -1949,8 +1947,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ 			/* turn parent into inner node */
+ 			qdisc_purge_queue(parent->leaf.q);
+ 			parent_qdisc = parent->leaf.q;
+-			if (parent->prio_activity)
+-				htb_deactivate(q, parent);
++			htb_deactivate(q, parent);
+ 
+ 			/* remove from evt list because of level change */
+ 			if (parent->cmode != HTB_CAN_SEND) {
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 18e132cdea72a8..f0dd1f448d4d42 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -2644,7 +2644,7 @@ cfg80211_defrag_mle(const struct element *mle, const u8 *ie, size_t ielen,
+ 	/* Required length for first defragmentation */
+ 	buf_len = mle->datalen - 1;
+ 	for_each_element(elem, mle->data + mle->datalen,
+-			 ielen - sizeof(*mle) + mle->datalen) {
++			 ie + ielen - mle->data - mle->datalen) {
+ 		if (elem->id != WLAN_EID_FRAGMENT)
+ 			break;
+ 
+diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
+index 014af0d1fc70cb..a08eb5518cac5d 100644
+--- a/rust/bindings/lib.rs
++++ b/rust/bindings/lib.rs
+@@ -26,6 +26,7 @@
+ 
+ #[allow(dead_code)]
+ #[allow(clippy::undocumented_unsafe_blocks)]
++#[cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))]
+ mod bindings_raw {
+     // Manual definition for blocklisted types.
+     type __kernel_size_t = usize;
+diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
+index ae9d072741cedb..87a71fd40c3cad 100644
+--- a/rust/kernel/alloc/kvec.rs
++++ b/rust/kernel/alloc/kvec.rs
+@@ -2,6 +2,9 @@
+ 
+ //! Implementation of [`Vec`].
+ 
++// May not be needed in Rust 1.87.0 (pending beta backport).
++#![allow(clippy::ptr_eq)]
++
+ use super::{
+     allocator::{KVmalloc, Kmalloc, Vmalloc},
+     layout::ArrayLayout,
+diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
+index fb93330f4af48c..3841ba02ef7a38 100644
+--- a/rust/kernel/list.rs
++++ b/rust/kernel/list.rs
+@@ -4,6 +4,9 @@
+ 
+ //! A linked list implementation.
+ 
++// May not be needed in Rust 1.87.0 (pending beta backport).
++#![allow(clippy::ptr_eq)]
++
+ use crate::init::PinInit;
+ use crate::sync::ArcBorrow;
+ use crate::types::Opaque;
+diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
+index d04c12a1426d1c..78ccfeb7385895 100644
+--- a/rust/kernel/str.rs
++++ b/rust/kernel/str.rs
+@@ -55,7 +55,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                 b'\r' => f.write_str("\\r")?,
+                 // Printable characters.
+                 0x20..=0x7e => f.write_char(b as char)?,
+-                _ => write!(f, "\\x{:02x}", b)?,
++                _ => write!(f, "\\x{b:02x}")?,
+             }
+         }
+         Ok(())
+@@ -90,7 +90,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                 b'\\' => f.write_str("\\\\")?,
+                 // Printable characters.
+                 0x20..=0x7e => f.write_char(b as char)?,
+-                _ => write!(f, "\\x{:02x}", b)?,
++                _ => write!(f, "\\x{b:02x}")?,
+             }
+         }
+         f.write_char('"')
+@@ -397,7 +397,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                 // Printable character.
+                 f.write_char(c as char)?;
+             } else {
+-                write!(f, "\\x{:02x}", c)?;
++                write!(f, "\\x{c:02x}")?;
+             }
+         }
+         Ok(())
+@@ -428,7 +428,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                 // Printable characters.
+                 b'\"' => f.write_str("\\\"")?,
+                 0x20..=0x7e => f.write_char(c as char)?,
+-                _ => write!(f, "\\x{:02x}", c)?,
++                _ => write!(f, "\\x{c:02x}")?,
+             }
+         }
+         f.write_str("\"")
+@@ -588,13 +588,13 @@ fn test_cstr_as_str_unchecked() {
+     #[test]
+     fn test_cstr_display() {
+         let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap();
+-        assert_eq!(format!("{}", hello_world), "hello, world!");
++        assert_eq!(format!("{hello_world}"), "hello, world!");
+         let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap();
+-        assert_eq!(format!("{}", non_printables), "\\x01\\x09\\x0a");
++        assert_eq!(format!("{non_printables}"), "\\x01\\x09\\x0a");
+         let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap();
+-        assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu");
++        assert_eq!(format!("{non_ascii}"), "d\\xe9j\\xe0 vu");
+         let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap();
+-        assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80");
++        assert_eq!(format!("{good_bytes}"), "\\xf0\\x9f\\xa6\\x80");
+     }
+ 
+     #[test]
+@@ -605,47 +605,47 @@ fn test_cstr_display_all_bytes() {
+             bytes[i as usize] = i.wrapping_add(1);
+         }
+         let cstr = CStr::from_bytes_with_nul(&bytes).unwrap();
+-        assert_eq!(format!("{}", cstr), ALL_ASCII_CHARS);
++        assert_eq!(format!("{cstr}"), ALL_ASCII_CHARS);
+     }
+ 
+     #[test]
+     fn test_cstr_debug() {
+         let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap();
+-        assert_eq!(format!("{:?}", hello_world), "\"hello, world!\"");
++        assert_eq!(format!("{hello_world:?}"), "\"hello, world!\"");
+         let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap();
+-        assert_eq!(format!("{:?}", non_printables), "\"\\x01\\x09\\x0a\"");
++        assert_eq!(format!("{non_printables:?}"), "\"\\x01\\x09\\x0a\"");
+         let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap();
+-        assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\"");
++        assert_eq!(format!("{non_ascii:?}"), "\"d\\xe9j\\xe0 vu\"");
+         let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap();
+-        assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\"");
++        assert_eq!(format!("{good_bytes:?}"), "\"\\xf0\\x9f\\xa6\\x80\"");
+     }
+ 
+     #[test]
+     fn test_bstr_display() {
+         let hello_world = BStr::from_bytes(b"hello, world!");
+-        assert_eq!(format!("{}", hello_world), "hello, world!");
++        assert_eq!(format!("{hello_world}"), "hello, world!");
+         let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_");
+-        assert_eq!(format!("{}", escapes), "_\\t_\\n_\\r_\\_'_\"_");
++        assert_eq!(format!("{escapes}"), "_\\t_\\n_\\r_\\_'_\"_");
+         let others = BStr::from_bytes(b"\x01");
+-        assert_eq!(format!("{}", others), "\\x01");
++        assert_eq!(format!("{others}"), "\\x01");
+         let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu");
+-        assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu");
++        assert_eq!(format!("{non_ascii}"), "d\\xe9j\\xe0 vu");
+         let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80");
+-        assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80");
++        assert_eq!(format!("{good_bytes}"), "\\xf0\\x9f\\xa6\\x80");
+     }
+ 
+     #[test]
+     fn test_bstr_debug() {
+         let hello_world = BStr::from_bytes(b"hello, world!");
+-        assert_eq!(format!("{:?}", hello_world), "\"hello, world!\"");
++        assert_eq!(format!("{hello_world:?}"), "\"hello, world!\"");
+         let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_");
+-        assert_eq!(format!("{:?}", escapes), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\"");
++        assert_eq!(format!("{escapes:?}"), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\"");
+         let others = BStr::from_bytes(b"\x01");
+-        assert_eq!(format!("{:?}", others), "\"\\x01\"");
++        assert_eq!(format!("{others:?}"), "\"\\x01\"");
+         let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu");
+-        assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\"");
++        assert_eq!(format!("{non_ascii:?}"), "\"d\\xe9j\\xe0 vu\"");
+         let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80");
+-        assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\"");
++        assert_eq!(format!("{good_bytes:?}"), "\"\\xf0\\x9f\\xa6\\x80\"");
+     }
+ }
+ 
+diff --git a/rust/macros/module.rs b/rust/macros/module.rs
+index e7a087b7e88494..da2a18b276e0bf 100644
+--- a/rust/macros/module.rs
++++ b/rust/macros/module.rs
+@@ -48,7 +48,7 @@ fn emit_base(&mut self, field: &str, content: &str, builtin: bool) {
+             )
+         } else {
+             // Loadable modules' modinfo strings go as-is.
+-            format!("{field}={content}\0", field = field, content = content)
++            format!("{field}={content}\0")
+         };
+ 
+         write!(
+@@ -124,10 +124,7 @@ fn parse(it: &mut token_stream::IntoIter) -> Self {
+             };
+ 
+             if seen_keys.contains(&key) {
+-                panic!(
+-                    "Duplicated key \"{}\". Keys can only be specified once.",
+-                    key
+-                );
++                panic!("Duplicated key \"{key}\". Keys can only be specified once.");
+             }
+ 
+             assert_eq!(expect_punct(it), ':');
+@@ -140,10 +137,7 @@ fn parse(it: &mut token_stream::IntoIter) -> Self {
+                 "license" => info.license = expect_string_ascii(it),
+                 "alias" => info.alias = Some(expect_string_array(it)),
+                 "firmware" => info.firmware = Some(expect_string_array(it)),
+-                _ => panic!(
+-                    "Unknown key \"{}\". Valid keys are: {:?}.",
+-                    key, EXPECTED_KEYS
+-                ),
++                _ => panic!("Unknown key \"{key}\". Valid keys are: {EXPECTED_KEYS:?}."),
+             }
+ 
+             assert_eq!(expect_punct(it), ',');
+@@ -155,7 +149,7 @@ fn parse(it: &mut token_stream::IntoIter) -> Self {
+ 
+         for key in REQUIRED_KEYS {
+             if !seen_keys.iter().any(|e| e == key) {
+-                panic!("Missing required key \"{}\".", key);
++                panic!("Missing required key \"{key}\".");
+             }
+         }
+ 
+@@ -167,10 +161,7 @@ fn parse(it: &mut token_stream::IntoIter) -> Self {
+         }
+ 
+         if seen_keys != ordered_keys {
+-            panic!(
+-                "Keys are not ordered as expected. Order them like: {:?}.",
+-                ordered_keys
+-            );
++            panic!("Keys are not ordered as expected. Order them like: {ordered_keys:?}.");
+         }
+ 
+         info
+diff --git a/rust/macros/pinned_drop.rs b/rust/macros/pinned_drop.rs
+index 88fb72b2066047..79a52e254f719f 100644
+--- a/rust/macros/pinned_drop.rs
++++ b/rust/macros/pinned_drop.rs
+@@ -25,8 +25,7 @@ pub(crate) fn pinned_drop(_args: TokenStream, input: TokenStream) -> TokenStream
+             // Found the end of the generics, this should be `PinnedDrop`.
+             assert!(
+                 matches!(tt, TokenTree::Ident(i) if i.to_string() == "PinnedDrop"),
+-                "expected 'PinnedDrop', found: '{:?}'",
+-                tt
++                "expected 'PinnedDrop', found: '{tt:?}'"
+             );
+             pinned_drop_idx = Some(i);
+             break;
+diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
+index 13495910271faf..c98d7a8cde77da 100644
+--- a/rust/uapi/lib.rs
++++ b/rust/uapi/lib.rs
+@@ -24,6 +24,7 @@
+     unreachable_pub,
+     unsafe_op_in_unsafe_fn
+ )]
++#![cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))]
+ 
+ // Manual definition of blocklisted types.
+ type __kernel_size_t = usize;
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index d8aea31ee393a3..bea6461ac340d0 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -219,6 +219,7 @@ static bool is_rust_noreturn(const struct symbol *func)
+ 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
+ 	       str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference")		||
+ 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
++	       str_ends_with(func->name, "_7___rustc17rust_begin_unwind")				||
+ 	       strstr(func->name, "_4core9panicking13assert_failed")					||
+ 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
+ 	       (strstr(func->name, "_4core5slice5index24slice_") &&
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 363d031a16f7e1..9cf769d415687d 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -115,6 +115,7 @@ TARGETS += user_events
+ TARGETS += vDSO
+ TARGETS += mm
+ TARGETS += x86
++TARGETS += x86/bugs
+ TARGETS += zram
+ #Please keep the TARGETS list alphabetically sorted
+ # Run "make quicktest=1 run_tests" or
+diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
+index 2c3a0eb6b22d31..9bc4591c7b1699 100644
+--- a/tools/testing/selftests/mm/compaction_test.c
++++ b/tools/testing/selftests/mm/compaction_test.c
+@@ -90,6 +90,8 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
+ 	int compaction_index = 0;
+ 	char nr_hugepages[20] = {0};
+ 	char init_nr_hugepages[24] = {0};
++	char target_nr_hugepages[24] = {0};
++	int slen;
+ 
+ 	snprintf(init_nr_hugepages, sizeof(init_nr_hugepages),
+ 		 "%lu", initial_nr_hugepages);
+@@ -106,11 +108,18 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
+ 		goto out;
+ 	}
+ 
+-	/* Request a large number of huge pages. The Kernel will allocate
+-	   as much as it can */
+-	if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
+-		ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
+-			       strerror(errno));
++	/*
++	 * Request huge pages for about half of the free memory. The Kernel
++	 * will allocate as much as it can, and we expect it will get at least 1/3
++	 */
++	nr_hugepages_ul = mem_free / hugepage_size / 2;
++	snprintf(target_nr_hugepages, sizeof(target_nr_hugepages),
++		 "%lu", nr_hugepages_ul);
++
++	slen = strlen(target_nr_hugepages);
++	if (write(fd, target_nr_hugepages, slen) != slen) {
++		ksft_print_msg("Failed to write %lu to /proc/sys/vm/nr_hugepages: %s\n",
++			       nr_hugepages_ul, strerror(errno));
+ 		goto close_fd;
+ 	}
+ 
+diff --git a/tools/testing/selftests/mm/pkey-powerpc.h b/tools/testing/selftests/mm/pkey-powerpc.h
+index 3d0c0bdae5bc19..e90af82f86883e 100644
+--- a/tools/testing/selftests/mm/pkey-powerpc.h
++++ b/tools/testing/selftests/mm/pkey-powerpc.h
+@@ -102,8 +102,18 @@ void expect_fault_on_read_execonly_key(void *p1, int pkey)
+ 	return;
+ }
+ 
++#define REPEAT_8(s) s s s s s s s s
++#define REPEAT_64(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) \
++		     REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s)
++#define REPEAT_512(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) \
++		      REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s)
++#define REPEAT_4096(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) \
++		       REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s)
++#define REPEAT_16384(s) REPEAT_4096(s) REPEAT_4096(s) \
++			REPEAT_4096(s) REPEAT_4096(s)
++
+ /* 4-byte instructions * 16384 = 64K page */
+-#define __page_o_noops() asm(".rept 16384 ; nop; .endr")
++#define __page_o_noops() asm(REPEAT_16384("nop\n"))
+ 
+ void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey)
+ {
+diff --git a/tools/testing/selftests/x86/bugs/Makefile b/tools/testing/selftests/x86/bugs/Makefile
+new file mode 100644
+index 00000000000000..8ff2d7226c7f3f
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/Makefile
+@@ -0,0 +1,3 @@
++TEST_PROGS := its_sysfs.py its_permutations.py its_indirect_alignment.py its_ret_alignment.py
++TEST_FILES := common.py
++include ../../lib.mk
+diff --git a/tools/testing/selftests/x86/bugs/common.py b/tools/testing/selftests/x86/bugs/common.py
+new file mode 100644
+index 00000000000000..2f9664a80617a6
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/common.py
+@@ -0,0 +1,164 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# This contains kselftest framework adapted common functions for testing
++# mitigation for x86 bugs.
++
++import os, sys, re, shutil
++
++sys.path.insert(0, '../../kselftest')
++import ksft
++
++def read_file(path):
++    if not os.path.exists(path):
++        return None
++    with open(path, 'r') as file:
++        return file.read().strip()
++
++def cpuinfo_has(arg):
++    cpuinfo = read_file('/proc/cpuinfo')
++    if arg in cpuinfo:
++        return True
++    return False
++
++def cmdline_has(arg):
++    cmdline = read_file('/proc/cmdline')
++    if arg in cmdline:
++        return True
++    return False
++
++def cmdline_has_either(args):
++    cmdline = read_file('/proc/cmdline')
++    for arg in args:
++        if arg in cmdline:
++            return True
++    return False
++
++def cmdline_has_none(args):
++    return not cmdline_has_either(args)
++
++def cmdline_has_all(args):
++    cmdline = read_file('/proc/cmdline')
++    for arg in args:
++        if arg not in cmdline:
++            return False
++    return True
++
++def get_sysfs(bug):
++    return read_file("/sys/devices/system/cpu/vulnerabilities/" + bug)
++
++def sysfs_has(bug, mitigation):
++    status = get_sysfs(bug)
++    if mitigation in status:
++        return True
++    return False
++
++def sysfs_has_either(bugs, mitigations):
++    for bug in bugs:
++        for mitigation in mitigations:
++            if sysfs_has(bug, mitigation):
++                return True
++    return False
++
++def sysfs_has_none(bugs, mitigations):
++    return not sysfs_has_either(bugs, mitigations)
++
++def sysfs_has_all(bugs, mitigations):
++    for bug in bugs:
++        for mitigation in mitigations:
++            if not sysfs_has(bug, mitigation):
++                return False
++    return True
++
++def bug_check_pass(bug, found):
++    ksft.print_msg(f"\nFound: {found}")
++    # ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
++    ksft.test_result_pass(f'{bug}: {found}')
++
++def bug_check_fail(bug, found, expected):
++    ksft.print_msg(f'\nFound:\t {found}')
++    ksft.print_msg(f'Expected:\t {expected}')
++    ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
++    ksft.test_result_fail(f'{bug}: {found}')
++
++def bug_status_unknown(bug, found):
++    ksft.print_msg(f'\nUnknown status: {found}')
++    ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
++    ksft.test_result_fail(f'{bug}: {found}')
++
++def basic_checks_sufficient(bug, mitigation):
++    if not mitigation:
++        bug_status_unknown(bug, "None")
++        return True
++    elif mitigation == "Not affected":
++        ksft.test_result_pass(bug)
++        return True
++    elif mitigation == "Vulnerable":
++        if cmdline_has_either([f'{bug}=off', 'mitigations=off']):
++            bug_check_pass(bug, mitigation)
++            return True
++    return False
++
++def get_section_info(vmlinux, section_name):
++    from elftools.elf.elffile import ELFFile
++    with open(vmlinux, 'rb') as f:
++        elffile = ELFFile(f)
++        section = elffile.get_section_by_name(section_name)
++        if section is None:
++            ksft.print_msg("Available sections in vmlinux:")
++            for sec in elffile.iter_sections():
++                ksft.print_msg(sec.name)
++            raise ValueError(f"Section {section_name} not found in {vmlinux}")
++        return section['sh_addr'], section['sh_offset'], section['sh_size']
++
++def get_patch_sites(vmlinux, offset, size):
++    import struct
++    output = []
++    with open(vmlinux, 'rb') as f:
++        f.seek(offset)
++        i = 0
++        while i < size:
++            data = f.read(4)  # s32
++            if not data:
++                break
++            sym_offset = struct.unpack('<i', data)[0] + i
++            i += 4
++            output.append(sym_offset)
++    return output
++
++def get_instruction_from_vmlinux(elffile, section, virtual_address, target_address):
++    from capstone import Cs, CS_ARCH_X86, CS_MODE_64
++    section_start = section['sh_addr']
++    section_end = section_start + section['sh_size']
++
++    if not (section_start <= target_address < section_end):
++        return None
++
++    offset = target_address - section_start
++    code = section.data()[offset:offset + 16]
++
++    cap = init_capstone()
++    for instruction in cap.disasm(code, target_address):
++        if instruction.address == target_address:
++            return instruction
++    return None
++
++def init_capstone():
++    from capstone import Cs, CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_ATT
++    cap = Cs(CS_ARCH_X86, CS_MODE_64)
++    cap.syntax = CS_OPT_SYNTAX_ATT
++    return cap
++
++def get_runtime_kernel():
++    import drgn
++    return drgn.program_from_kernel()
++
++def check_dependencies_or_skip(modules, script_name="unknown test"):
++    for mod in modules:
++        try:
++            __import__(mod)
++        except ImportError:
++            ksft.test_result_skip(f"Skipping {script_name}: missing module '{mod}'")
++            ksft.finished()
+diff --git a/tools/testing/selftests/x86/bugs/its_indirect_alignment.py b/tools/testing/selftests/x86/bugs/its_indirect_alignment.py
+new file mode 100644
+index 00000000000000..cdc33ae6a91c33
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/its_indirect_alignment.py
+@@ -0,0 +1,150 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# Test for indirect target selection (ITS) mitigation.
++#
++# Test if indirect CALL/JMP are correctly patched by evaluating
++# the vmlinux .retpoline_sites in /proc/kcore.
++
++# Install dependencies
++# add-apt-repository ppa:michel-slm/kernel-utils
++# apt update
++# apt install -y python3-drgn python3-pyelftools python3-capstone
++#
++# Best to copy the vmlinux at a standard location:
++# mkdir -p /usr/lib/debug/lib/modules/$(uname -r)
++# cp $VMLINUX /usr/lib/debug/lib/modules/$(uname -r)/vmlinux
++#
++# Usage: ./its_indirect_alignment.py [vmlinux]
++
++import os, sys, argparse
++from pathlib import Path
++
++this_dir = os.path.dirname(os.path.realpath(__file__))
++sys.path.insert(0, this_dir + '/../../kselftest')
++import ksft
++import common as c
++
++bug = "indirect_target_selection"
++
++mitigation = c.get_sysfs(bug)
++if not mitigation or "Aligned branch/return thunks" not in mitigation:
++    ksft.test_result_skip("Skipping its_indirect_alignment.py: Aligned branch/return thunks not enabled")
++    ksft.finished()
++
++if c.sysfs_has("spectre_v2", "Retpolines"):
++    ksft.test_result_skip("Skipping its_indirect_alignment.py: Retpolines deployed")
++    ksft.finished()
++
++c.check_dependencies_or_skip(['drgn', 'elftools', 'capstone'], script_name="its_indirect_alignment.py")
++
++from elftools.elf.elffile import ELFFile
++from drgn.helpers.common.memory import identify_address
++
++cap = c.init_capstone()
++
++if len(os.sys.argv) > 1:
++    arg_vmlinux = os.sys.argv[1]
++    if not os.path.exists(arg_vmlinux):
++        ksft.test_result_fail(f"its_indirect_alignment.py: vmlinux not found at argument path: {arg_vmlinux}")
++        ksft.exit_fail()
++    os.makedirs(f"/usr/lib/debug/lib/modules/{os.uname().release}", exist_ok=True)
++    os.system(f'cp {arg_vmlinux} /usr/lib/debug/lib/modules/$(uname -r)/vmlinux')
++
++vmlinux = f"/usr/lib/debug/lib/modules/{os.uname().release}/vmlinux"
++if not os.path.exists(vmlinux):
++    ksft.test_result_fail(f"its_indirect_alignment.py: vmlinux not found at {vmlinux}")
++    ksft.exit_fail()
++
++ksft.print_msg(f"Using vmlinux: {vmlinux}")
++
++retpolines_start_vmlinux, retpolines_sec_offset, size = c.get_section_info(vmlinux, '.retpoline_sites')
++ksft.print_msg(f"vmlinux: Section .retpoline_sites (0x{retpolines_start_vmlinux:x}) found at 0x{retpolines_sec_offset:x} with size 0x{size:x}")
++
++sites_offset = c.get_patch_sites(vmlinux, retpolines_sec_offset, size)
++total_retpoline_tests = len(sites_offset)
++ksft.print_msg(f"Found {total_retpoline_tests} retpoline sites")
++
++prog = c.get_runtime_kernel()
++retpolines_start_kcore = prog.symbol('__retpoline_sites').address
++ksft.print_msg(f'kcore: __retpoline_sites: 0x{retpolines_start_kcore:x}')
++
++x86_indirect_its_thunk_r15 = prog.symbol('__x86_indirect_its_thunk_r15').address
++ksft.print_msg(f'kcore: __x86_indirect_its_thunk_r15: 0x{x86_indirect_its_thunk_r15:x}')
++
++tests_passed = 0
++tests_failed = 0
++tests_unknown = 0
++
++with open(vmlinux, 'rb') as f:
++    elffile = ELFFile(f)
++    text_section = elffile.get_section_by_name('.text')
++
++    for i in range(0, len(sites_offset)):
++        site = retpolines_start_kcore + sites_offset[i]
++        vmlinux_site = retpolines_start_vmlinux + sites_offset[i]
++        passed = unknown = failed = False
++        try:
++            vmlinux_insn = c.get_instruction_from_vmlinux(elffile, text_section, text_section['sh_addr'], vmlinux_site)
++            kcore_insn = list(cap.disasm(prog.read(site, 16), site))[0]
++            operand = kcore_insn.op_str
++            insn_end = site + kcore_insn.size - 1 # TODO handle Jcc.32 __x86_indirect_thunk_\reg
++            safe_site = insn_end & 0x20
++            site_status = "" if safe_site else "(unsafe)"
++
++            ksft.print_msg(f"\nSite {i}: {identify_address(prog, site)} <0x{site:x}> {site_status}")
++            ksft.print_msg(f"\tvmlinux: 0x{vmlinux_insn.address:x}:\t{vmlinux_insn.mnemonic}\t{vmlinux_insn.op_str}")
++            ksft.print_msg(f"\tkcore:   0x{kcore_insn.address:x}:\t{kcore_insn.mnemonic}\t{kcore_insn.op_str}")
++
++            if (site & 0x20) ^ (insn_end & 0x20):
++                ksft.print_msg(f"\tSite at safe/unsafe boundary: {str(kcore_insn.bytes)} {kcore_insn.mnemonic} {operand}")
++            if safe_site:
++                tests_passed += 1
++                passed = True
++                ksft.print_msg(f"\tPASSED: At safe address")
++                continue
++
++            if operand.startswith('0xffffffff'):
++                thunk = int(operand, 16)
++                if thunk > x86_indirect_its_thunk_r15:
++                    insn_at_thunk = list(cap.disasm(prog.read(thunk, 16), thunk))[0]
++                    operand += ' -> ' + insn_at_thunk.mnemonic + ' ' + insn_at_thunk.op_str + ' <dynamic-thunk?>'
++                    if 'jmp' in insn_at_thunk.mnemonic and thunk & 0x20:
++                        ksft.print_msg(f"\tPASSED: Found {operand} at safe address")
++                        passed = True
++                if not passed:
++                    if kcore_insn.operands[0].type == capstone.CS_OP_IMM:
++                        operand += ' <' + prog.symbol(int(operand, 16)) + '>'
++                        if '__x86_indirect_its_thunk_' in operand:
++                            ksft.print_msg(f"\tPASSED: Found {operand}")
++                        else:
++                            ksft.print_msg(f"\tPASSED: Found direct branch: {kcore_insn}, ITS thunk not required.")
++                        passed = True
++                    else:
++                        unknown = True
++            if passed:
++                tests_passed += 1
++            elif unknown:
++                ksft.print_msg(f"UNKNOWN: unexpected operand: {kcore_insn}")
++                tests_unknown += 1
++            else:
++                ksft.print_msg(f'\t************* FAILED *************')
++                ksft.print_msg(f"\tFound {kcore_insn.bytes} {kcore_insn.mnemonic} {operand}")
++                ksft.print_msg(f'\t**********************************')
++                tests_failed += 1
++        except Exception as e:
++            ksft.print_msg(f"UNKNOWN: An unexpected error occurred: {e}")
++            tests_unknown += 1
++
++ksft.print_msg(f"\n\nSummary:")
++ksft.print_msg(f"PASS:    \t{tests_passed} \t/ {total_retpoline_tests}")
++ksft.print_msg(f"FAIL:    \t{tests_failed} \t/ {total_retpoline_tests}")
++ksft.print_msg(f"UNKNOWN: \t{tests_unknown} \t/ {total_retpoline_tests}")
++
++if tests_failed == 0:
++    ksft.test_result_pass("All ITS return thunk sites passed")
++else:
++    ksft.test_result_fail(f"{tests_failed} ITS return thunk sites failed")
++ksft.finished()
+diff --git a/tools/testing/selftests/x86/bugs/its_permutations.py b/tools/testing/selftests/x86/bugs/its_permutations.py
+new file mode 100644
+index 00000000000000..3204f4728c62cc
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/its_permutations.py
+@@ -0,0 +1,109 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# Test for indirect target selection (ITS) cmdline permutations with other bugs
++# like spectre_v2 and retbleed.
++
++import os, sys, subprocess, itertools, re, shutil
++
++test_dir = os.path.dirname(os.path.realpath(__file__))
++sys.path.insert(0, test_dir + '/../../kselftest')
++import ksft
++import common as c
++
++bug = "indirect_target_selection"
++mitigation = c.get_sysfs(bug)
++
++if not mitigation or "Not affected" in mitigation:
++    ksft.test_result_skip("Skipping its_permutations.py: not applicable")
++    ksft.finished()
++
++if shutil.which('vng') is None:
++    ksft.test_result_skip("Skipping its_permutations.py: virtme-ng ('vng') not found in PATH.")
++    ksft.finished()
++
++TEST = f"{test_dir}/its_sysfs.py"
++default_kparam = ['clearcpuid=hypervisor', 'panic=5', 'panic_on_warn=1', 'oops=panic', 'nmi_watchdog=1', 'hung_task_panic=1']
++
++DEBUG = " -v "
++
++# Install dependencies
++# https://github.com/arighi/virtme-ng
++# apt install virtme-ng
++BOOT_CMD = f"vng --run {test_dir}/../../../../../arch/x86/boot/bzImage "
++#BOOT_CMD += DEBUG
++
++bug = "indirect_target_selection"
++
++input_options = {
++    'indirect_target_selection'     : ['off', 'on', 'stuff', 'vmexit'],
++    'retbleed'                      : ['off', 'stuff', 'auto'],
++    'spectre_v2'                    : ['off', 'on', 'eibrs', 'retpoline', 'ibrs', 'eibrs,retpoline'],
++}
++
++def pretty_print(output):
++    OKBLUE = '\033[94m'
++    OKGREEN = '\033[92m'
++    WARNING = '\033[93m'
++    FAIL = '\033[91m'
++    ENDC = '\033[0m'
++    BOLD = '\033[1m'
++
++    # Define patterns and their corresponding colors
++    patterns = {
++        r"^ok \d+": OKGREEN,
++        r"^not ok \d+": FAIL,
++        r"^# Testing .*": OKBLUE,
++        r"^# Found: .*": WARNING,
++        r"^# Totals: .*": BOLD,
++        r"pass:([1-9]\d*)": OKGREEN,
++        r"fail:([1-9]\d*)": FAIL,
++        r"skip:([1-9]\d*)": WARNING,
++    }
++
++    # Apply colors based on patterns
++    for pattern, color in patterns.items():
++        output = re.sub(pattern, lambda match: f"{color}{match.group(0)}{ENDC}", output, flags=re.MULTILINE)
++
++    print(output)
++
++combinations = list(itertools.product(*input_options.values()))
++ksft.print_header()
++ksft.set_plan(len(combinations))
++
++logs = ""
++
++for combination in combinations:
++    append = ""
++    log = ""
++    for p in default_kparam:
++        append += f' --append={p}'
++    command = BOOT_CMD + append
++    test_params = ""
++    for i, key in enumerate(input_options.keys()):
++        param = f'{key}={combination[i]}'
++        test_params += f' {param}'
++        command += f" --append={param}"
++    command += f" -- {TEST}"
++    test_name = f"{bug} {test_params}"
++    pretty_print(f'# Testing {test_name}')
++    t =  subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
++    t.wait()
++    output, _ = t.communicate()
++    if t.returncode == 0:
++        ksft.test_result_pass(test_name)
++    else:
++        ksft.test_result_fail(test_name)
++    output = output.decode()
++    log += f" {output}"
++    pretty_print(log)
++    logs += output + "\n"
++
++# Optionally use tappy to parse the output
++# apt install python3-tappy
++with open("logs.txt", "w") as f:
++    f.write(logs)
++
++ksft.finished()
+diff --git a/tools/testing/selftests/x86/bugs/its_ret_alignment.py b/tools/testing/selftests/x86/bugs/its_ret_alignment.py
+new file mode 100644
+index 00000000000000..f40078d9f6ffc1
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/its_ret_alignment.py
+@@ -0,0 +1,139 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# Test for indirect target selection (ITS) mitigation.
++#
++# Tests if the RETs are correctly patched by evaluating the
++# vmlinux .return_sites in /proc/kcore.
++#
++# Install dependencies
++# add-apt-repository ppa:michel-slm/kernel-utils
++# apt update
++# apt install -y python3-drgn python3-pyelftools python3-capstone
++#
++# Run on target machine
++# mkdir -p /usr/lib/debug/lib/modules/$(uname -r)
++# cp $VMLINUX /usr/lib/debug/lib/modules/$(uname -r)/vmlinux
++#
++# Usage: ./its_ret_alignment.py
++
++import os, sys, argparse
++from pathlib import Path
++
++this_dir = os.path.dirname(os.path.realpath(__file__))
++sys.path.insert(0, this_dir + '/../../kselftest')
++import ksft
++import common as c
++
++bug = "indirect_target_selection"
++mitigation = c.get_sysfs(bug)
++if not mitigation or "Aligned branch/return thunks" not in mitigation:
++    ksft.test_result_skip("Skipping its_ret_alignment.py: Aligned branch/return thunks not enabled")
++    ksft.finished()
++
++c.check_dependencies_or_skip(['drgn', 'elftools', 'capstone'], script_name="its_ret_alignment.py")
++
++from elftools.elf.elffile import ELFFile
++from drgn.helpers.common.memory import identify_address
++
++cap = c.init_capstone()
++
++if len(os.sys.argv) > 1:
++    arg_vmlinux = os.sys.argv[1]
++    if not os.path.exists(arg_vmlinux):
++        ksft.test_result_fail(f"its_ret_alignment.py: vmlinux not found at user-supplied path: {arg_vmlinux}")
++        ksft.exit_fail()
++    os.makedirs(f"/usr/lib/debug/lib/modules/{os.uname().release}", exist_ok=True)
++    os.system(f'cp {arg_vmlinux} /usr/lib/debug/lib/modules/$(uname -r)/vmlinux')
++
++vmlinux = f"/usr/lib/debug/lib/modules/{os.uname().release}/vmlinux"
++if not os.path.exists(vmlinux):
++    ksft.test_result_fail(f"its_ret_alignment.py: vmlinux not found at {vmlinux}")
++    ksft.exit_fail()
++
++ksft.print_msg(f"Using vmlinux: {vmlinux}")
++
++rethunks_start_vmlinux, rethunks_sec_offset, size = c.get_section_info(vmlinux, '.return_sites')
++ksft.print_msg(f"vmlinux: Section .return_sites (0x{rethunks_start_vmlinux:x}) found at 0x{rethunks_sec_offset:x} with size 0x{size:x}")
++
++sites_offset = c.get_patch_sites(vmlinux, rethunks_sec_offset, size)
++total_rethunk_tests = len(sites_offset)
++ksft.print_msg(f"Found {total_rethunk_tests} rethunk sites")
++
++prog = c.get_runtime_kernel()
++rethunks_start_kcore = prog.symbol('__return_sites').address
++ksft.print_msg(f'kcore: __rethunk_sites: 0x{rethunks_start_kcore:x}')
++
++its_return_thunk = prog.symbol('its_return_thunk').address
++ksft.print_msg(f'kcore: its_return_thunk: 0x{its_return_thunk:x}')
++
++tests_passed = 0
++tests_failed = 0
++tests_unknown = 0
++tests_skipped = 0
++
++with open(vmlinux, 'rb') as f:
++    elffile = ELFFile(f)
++    text_section = elffile.get_section_by_name('.text')
++
++    for i in range(len(sites_offset)):
++        site = rethunks_start_kcore + sites_offset[i]
++        vmlinux_site = rethunks_start_vmlinux + sites_offset[i]
++        try:
++            passed = unknown = failed = skipped = False
++
++            symbol = identify_address(prog, site)
++            vmlinux_insn = c.get_instruction_from_vmlinux(elffile, text_section, text_section['sh_addr'], vmlinux_site)
++            kcore_insn = list(cap.disasm(prog.read(site, 16), site))[0]
++
++            insn_end = site + kcore_insn.size - 1
++
++            safe_site = insn_end & 0x20
++            site_status = "" if safe_site else "(unsafe)"
++
++            ksft.print_msg(f"\nSite {i}: {symbol} <0x{site:x}> {site_status}")
++            ksft.print_msg(f"\tvmlinux: 0x{vmlinux_insn.address:x}:\t{vmlinux_insn.mnemonic}\t{vmlinux_insn.op_str}")
++            ksft.print_msg(f"\tkcore:   0x{kcore_insn.address:x}:\t{kcore_insn.mnemonic}\t{kcore_insn.op_str}")
++
++            if safe_site:
++                tests_passed += 1
++                passed = True
++                ksft.print_msg(f"\tPASSED: At safe address")
++                continue
++
++            if "jmp" in kcore_insn.mnemonic:
++                passed = True
++            elif "ret" not in kcore_insn.mnemonic:
++                skipped = True
++
++            if passed:
++                ksft.print_msg(f"\tPASSED: Found {kcore_insn.mnemonic} {kcore_insn.op_str}")
++                tests_passed += 1
++            elif skipped:
++                ksft.print_msg(f"\tSKIPPED: Found '{kcore_insn.mnemonic}'")
++                tests_skipped += 1
++            elif unknown:
++                ksft.print_msg(f"UNKNOWN: An unknown instruction: {kcore_insn}")
++                tests_unknown += 1
++            else:
++                ksft.print_msg(f'\t************* FAILED *************')
++                ksft.print_msg(f"\tFound {kcore_insn.mnemonic} {kcore_insn.op_str}")
++                ksft.print_msg(f'\t**********************************')
++                tests_failed += 1
++        except Exception as e:
++            ksft.print_msg(f"UNKNOWN: An unexpected error occurred: {e}")
++            tests_unknown += 1
++
++ksft.print_msg(f"\n\nSummary:")
++ksft.print_msg(f"PASSED: \t{tests_passed} \t/ {total_rethunk_tests}")
++ksft.print_msg(f"FAILED: \t{tests_failed} \t/ {total_rethunk_tests}")
++ksft.print_msg(f"SKIPPED: \t{tests_skipped} \t/ {total_rethunk_tests}")
++ksft.print_msg(f"UNKNOWN: \t{tests_unknown} \t/ {total_rethunk_tests}")
++
++if tests_failed == 0:
++    ksft.test_result_pass("All ITS return thunk sites passed.")
++else:
++    ksft.test_result_fail(f"{tests_failed} failed sites need ITS return thunks.")
++ksft.finished()
+diff --git a/tools/testing/selftests/x86/bugs/its_sysfs.py b/tools/testing/selftests/x86/bugs/its_sysfs.py
+new file mode 100644
+index 00000000000000..7bca81f2f6065b
+--- /dev/null
++++ b/tools/testing/selftests/x86/bugs/its_sysfs.py
+@@ -0,0 +1,65 @@
++#!/usr/bin/env python3
++# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright (c) 2025 Intel Corporation
++#
++# Test for Indirect Target Selection(ITS) mitigation sysfs status.
++
++import sys, os, re
++this_dir = os.path.dirname(os.path.realpath(__file__))
++sys.path.insert(0, this_dir + '/../../kselftest')
++import ksft
++
++from common import *
++
++bug = "indirect_target_selection"
++mitigation = get_sysfs(bug)
++
++ITS_MITIGATION_ALIGNED_THUNKS	= "Mitigation: Aligned branch/return thunks"
++ITS_MITIGATION_RETPOLINE_STUFF	= "Mitigation: Retpolines, Stuffing RSB"
++ITS_MITIGATION_VMEXIT_ONLY		= "Mitigation: Vulnerable, KVM: Not affected"
++ITS_MITIGATION_VULNERABLE       = "Vulnerable"
++
++def check_mitigation():
++    if mitigation == ITS_MITIGATION_ALIGNED_THUNKS:
++        if cmdline_has(f'{bug}=stuff') and sysfs_has("spectre_v2", "Retpolines"):
++            bug_check_fail(bug, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_RETPOLINE_STUFF)
++            return
++        if cmdline_has(f'{bug}=vmexit') and cpuinfo_has('its_native_only'):
++            bug_check_fail(bug, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_VMEXIT_ONLY)
++            return
++        bug_check_pass(bug, ITS_MITIGATION_ALIGNED_THUNKS)
++        return
++
++    if mitigation == ITS_MITIGATION_RETPOLINE_STUFF:
++        if cmdline_has(f'{bug}=stuff') and sysfs_has("spectre_v2", "Retpolines"):
++            bug_check_pass(bug, ITS_MITIGATION_RETPOLINE_STUFF)
++            return
++        if sysfs_has('retbleed', 'Stuffing'):
++            bug_check_pass(bug, ITS_MITIGATION_RETPOLINE_STUFF)
++            return
++        bug_check_fail(bug, ITS_MITIGATION_RETPOLINE_STUFF, ITS_MITIGATION_ALIGNED_THUNKS)
++
++    if mitigation == ITS_MITIGATION_VMEXIT_ONLY:
++        if cmdline_has(f'{bug}=vmexit') and cpuinfo_has('its_native_only'):
++            bug_check_pass(bug, ITS_MITIGATION_VMEXIT_ONLY)
++            return
++        bug_check_fail(bug, ITS_MITIGATION_VMEXIT_ONLY, ITS_MITIGATION_ALIGNED_THUNKS)
++
++    if mitigation == ITS_MITIGATION_VULNERABLE:
++        if sysfs_has("spectre_v2", "Vulnerable"):
++            bug_check_pass(bug, ITS_MITIGATION_VULNERABLE)
++        else:
++            bug_check_fail(bug, "Mitigation", ITS_MITIGATION_VULNERABLE)
++
++    bug_status_unknown(bug, mitigation)
++    return
++
++ksft.print_header()
++ksft.set_plan(1)
++ksft.print_msg(f'{bug}: {mitigation} ...')
++
++if not basic_checks_sufficient(bug, mitigation):
++    check_mitigation()
++
++ksft.finished()


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-05-09 10:57 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-05-09 10:57 UTC (permalink / raw
  To: gentoo-commits

commit:     97a927178861131f62887a80fe1e3721bcf3410c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri May  9 10:56:58 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri May  9 10:56:58 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=97a92717

Linux patch 6.12.28

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1027_linux-6.12.28.patch | 6726 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6730 insertions(+)

diff --git a/0000_README b/0000_README
index acc6bba1..f98bc820 100644
--- a/0000_README
+++ b/0000_README
@@ -151,6 +151,10 @@ Patch:  1026_linux-6.12.27.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.27
 
+Patch:  1027_linux-6.12.28.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.28
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1027_linux-6.12.28.patch b/1027_linux-6.12.28.patch
new file mode 100644
index 00000000..7a76e822
--- /dev/null
+++ b/1027_linux-6.12.28.patch
@@ -0,0 +1,6726 @@
+diff --git a/Makefile b/Makefile
+index 77f5d180902cd6..f26e0f946f02ee 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
+index f2386dcb9ff2c0..dda4fa91b2f2cc 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi
+@@ -40,6 +40,9 @@ ethphy1: ethernet-phy@1 {
+ 			reg = <1>;
+ 			interrupt-parent = <&gpio4>;
+ 			interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
++			micrel,led-mode = <1>;
++			clocks = <&clks IMX6UL_CLK_ENET_REF>;
++			clock-names = "rmii-ref";
+ 			status = "okay";
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
+index 40cbb071f265cf..f904d6b1c84bf0 100644
+--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
+@@ -1478,7 +1478,7 @@ pcie0: pcie@4c300000 {
+ 			reg = <0 0x4c300000 0 0x10000>,
+ 			      <0 0x60100000 0 0xfe00000>,
+ 			      <0 0x4c360000 0 0x10000>,
+-			      <0 0x4c340000 0 0x2000>;
++			      <0 0x4c340000 0 0x4000>;
+ 			reg-names = "dbi", "config", "atu", "app";
+ 			ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>,
+ 				 <0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>;
+@@ -1518,7 +1518,7 @@ pcie0_ep: pcie-ep@4c300000 {
+ 			reg = <0 0x4c300000 0 0x10000>,
+ 			      <0 0x4c360000 0 0x1000>,
+ 			      <0 0x4c320000 0 0x1000>,
+-			      <0 0x4c340000 0 0x2000>,
++			      <0 0x4c340000 0 0x4000>,
+ 			      <0 0x4c370000 0 0x10000>,
+ 			      <0x9 0 1 0>;
+ 			reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
+@@ -1545,7 +1545,7 @@ pcie1: pcie@4c380000 {
+ 			reg = <0 0x4c380000 0 0x10000>,
+ 			      <8 0x80100000 0 0xfe00000>,
+ 			      <0 0x4c3e0000 0 0x10000>,
+-			      <0 0x4c3c0000 0 0x2000>;
++			      <0 0x4c3c0000 0 0x4000>;
+ 			reg-names = "dbi", "config", "atu", "app";
+ 			ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>,
+ 				 <0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>;
+@@ -1585,7 +1585,7 @@ pcie1_ep: pcie-ep@4c380000 {
+ 			reg = <0 0x4c380000 0 0x10000>,
+ 			      <0 0x4c3e0000 0 0x1000>,
+ 			      <0 0x4c3a0000 0 0x1000>,
+-			      <0 0x4c3c0000 0 0x2000>,
++			      <0 0x4c3c0000 0 0x4000>,
+ 			      <0 0x4c3f0000 0 0x10000>,
+ 			      <0xa 0 1 0>;
+ 			reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space";
+diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi
+index 1167cf63d7e87a..cd9b92144a42cb 100644
+--- a/arch/arm64/boot/dts/st/stm32mp251.dtsi
++++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi
+@@ -114,14 +114,13 @@ scmi_vdda18adc: regulator@7 {
+ 	};
+ 
+ 	intc: interrupt-controller@4ac00000 {
+-		compatible = "arm,cortex-a7-gic";
++		compatible = "arm,gic-400";
+ 		#interrupt-cells = <3>;
+-		#address-cells = <1>;
+ 		interrupt-controller;
+ 		reg = <0x0 0x4ac10000 0x0 0x1000>,
+-		      <0x0 0x4ac20000 0x0 0x2000>,
+-		      <0x0 0x4ac40000 0x0 0x2000>,
+-		      <0x0 0x4ac60000 0x0 0x2000>;
++		      <0x0 0x4ac20000 0x0 0x20000>,
++		      <0x0 0x4ac40000 0x0 0x20000>,
++		      <0x0 0x4ac60000 0x0 0x20000>;
+ 	};
+ 
+ 	psci {
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index 0f51fd10b4b063..30e79f111b35e3 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void)
+ 	static const struct midr_range spectre_bhb_k132_list[] = {
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
+ 		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
++		{},
+ 	};
+ 	static const struct midr_range spectre_bhb_k38_list[] = {
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
++		{},
+ 	};
+ 	static const struct midr_range spectre_bhb_k32_list[] = {
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
+index 38fc0b188e0842..96831c98860658 100644
+--- a/arch/parisc/include/uapi/asm/socket.h
++++ b/arch/parisc/include/uapi/asm/socket.h
+@@ -132,11 +132,15 @@
+ #define SO_PASSPIDFD		0x404A
+ #define SO_PEERPIDFD		0x404B
+ 
+-#define SO_DEVMEM_LINEAR	78
++#define SCM_TS_OPT_ID		0x404C
++
++#define SO_RCVPRIORITY		0x404D
++
++#define SO_DEVMEM_LINEAR	0x404E
+ #define SCM_DEVMEM_LINEAR	SO_DEVMEM_LINEAR
+-#define SO_DEVMEM_DMABUF	79
++#define SO_DEVMEM_DMABUF	0x404F
+ #define SCM_DEVMEM_DMABUF	SO_DEVMEM_DMABUF
+-#define SO_DEVMEM_DONTNEED	80
++#define SO_DEVMEM_DONTNEED	0x4050
+ 
+ #if !defined(__KERNEL__)
+ 
+diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c
+index 34495446e051c2..71829cb7bc812a 100644
+--- a/arch/parisc/math-emu/driver.c
++++ b/arch/parisc/math-emu/driver.c
+@@ -97,9 +97,19 @@ handle_fpe(struct pt_regs *regs)
+ 
+ 	memcpy(regs->fr, frcopy, sizeof regs->fr);
+ 	if (signalcode != 0) {
+-	    force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
+-			    (void __user *) regs->iaoq[0]);
+-	    return -1;
++		int sig = signalcode >> 24;
++
++		if (sig == SIGFPE) {
++			/*
++			 * Clear floating point trap bit to avoid trapping
++			 * again on the first floating-point instruction in
++			 * the userspace signal handler.
++			 */
++			regs->fr[0] &= ~(1ULL << 38);
++		}
++		force_sig_fault(sig, signalcode & 0xffffff,
++				(void __user *) regs->iaoq[0]);
++		return -1;
+ 	}
+ 
+ 	return signalcode ? -1 : 0;
+diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
+index b1f5549a3c9c40..fd6db10ef9e65e 100755
+--- a/arch/powerpc/boot/wrapper
++++ b/arch/powerpc/boot/wrapper
+@@ -234,10 +234,8 @@ fi
+ 
+ # suppress some warnings in recent ld versions
+ nowarn="-z noexecstack"
+-if ! ld_is_lld; then
+-	if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then
+-		nowarn="$nowarn --no-warn-rwx-segments"
+-	fi
++if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
++	nowarn="$nowarn --no-warn-rwx-segments"
+ fi
+ 
+ platformo=$object/"$platform".o
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index b0d927009af83c..04189689c127e3 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -1132,6 +1132,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
+ 	pmd_t *pmd;
+ 	pte_t *pte;
+ 
++	/*
++	 * Make sure we align the start vmemmap addr so that we calculate
++	 * the correct start_pfn in altmap boundary check to decided whether
++	 * we should use altmap or RAM based backing memory allocation. Also
++	 * the address need to be aligned for set_pte operation.
++
++	 * If the start addr is already PMD_SIZE aligned we will try to use
++	 * a pmd mapping. We don't want to be too aggressive here beacause
++	 * that will cause more allocations in RAM. So only if the namespace
++	 * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
++	 */
++
++	start = ALIGN_DOWN(start, PAGE_SIZE);
+ 	for (addr = start; addr < end; addr = next) {
+ 		next = pmd_addr_end(addr, end);
+ 
+@@ -1157,8 +1170,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
+ 			 * in altmap block allocation failures, in which case
+ 			 * we fallback to RAM for vmemmap allocation.
+ 			 */
+-			if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
+-				       altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
++			if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
++			    altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
+ 				/*
+ 				 * make sure we don't create altmap mappings
+ 				 * covering things outside the device.
+diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
+index f676156d9f3db4..0e9f84ab4bdcd1 100644
+--- a/arch/x86/boot/compressed/mem.c
++++ b/arch/x86/boot/compressed/mem.c
+@@ -34,14 +34,11 @@ static bool early_is_tdx_guest(void)
+ 
+ void arch_accept_memory(phys_addr_t start, phys_addr_t end)
+ {
+-	static bool sevsnp;
+-
+ 	/* Platform-specific memory-acceptance call goes here */
+ 	if (early_is_tdx_guest()) {
+ 		if (!tdx_accept_memory(start, end))
+ 			panic("TDX: Failed to accept memory\n");
+-	} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
+-		sevsnp = true;
++	} else if (early_is_sevsnp_guest()) {
+ 		snp_accept_memory(start, end);
+ 	} else {
+ 		error("Cannot accept memory: unknown platform\n");
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index f49f7eef1dba07..a93e3633886699 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -644,3 +644,43 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
+ 
+ 	sev_verify_cbit(top_level_pgt);
+ }
++
++bool early_is_sevsnp_guest(void)
++{
++	static bool sevsnp;
++
++	if (sevsnp)
++		return true;
++
++	if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
++		return false;
++
++	sevsnp = true;
++
++	if (!snp_vmpl) {
++		unsigned int eax, ebx, ecx, edx;
++
++		/*
++		 * CPUID Fn8000_001F_EAX[28] - SVSM support
++		 */
++		eax = 0x8000001f;
++		ecx = 0;
++		native_cpuid(&eax, &ebx, &ecx, &edx);
++		if (eax & BIT(28)) {
++			struct msr m;
++
++			/* Obtain the address of the calling area to use */
++			boot_rdmsr(MSR_SVSM_CAA, &m);
++			boot_svsm_caa = (void *)m.q;
++			boot_svsm_caa_pa = m.q;
++
++			/*
++			 * The real VMPL level cannot be discovered, but the
++			 * memory acceptance routines make no use of that so
++			 * any non-zero value suffices here.
++			 */
++			snp_vmpl = U8_MAX;
++		}
++	}
++	return true;
++}
+diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
+index 4e463f33186df4..d3900384b8abb5 100644
+--- a/arch/x86/boot/compressed/sev.h
++++ b/arch/x86/boot/compressed/sev.h
+@@ -13,12 +13,14 @@
+ bool sev_snp_enabled(void);
+ void snp_accept_memory(phys_addr_t start, phys_addr_t end);
+ u64 sev_get_status(void);
++bool early_is_sevsnp_guest(void);
+ 
+ #else
+ 
+ static inline bool sev_snp_enabled(void) { return false; }
+ static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
+ static inline u64 sev_get_status(void) { return 0; }
++static inline bool early_is_sevsnp_guest(void) { return false; }
+ 
+ #endif
+ 
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index d737d53d03aa94..471eaa46d55f8d 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -753,7 +753,7 @@ void x86_pmu_enable_all(int added)
+ 	}
+ }
+ 
+-static inline int is_x86_event(struct perf_event *event)
++int is_x86_event(struct perf_event *event)
+ {
+ 	int i;
+ 
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index f86e47afd56099..5e43d390f7a3dd 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4333,7 +4333,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
+ 	arr[pebs_enable] = (struct perf_guest_switch_msr){
+ 		.msr = MSR_IA32_PEBS_ENABLE,
+ 		.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
+-		.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
++		.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
+ 	};
+ 
+ 	if (arr[pebs_enable].host) {
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index 8c616656391ec4..76b6d8469dba8c 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -110,9 +110,16 @@ static inline bool is_topdown_event(struct perf_event *event)
+ 	return is_metric_event(event) || is_slots_event(event);
+ }
+ 
++int is_x86_event(struct perf_event *event);
++
++static inline bool check_leader_group(struct perf_event *leader, int flags)
++{
++	return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
++}
++
+ static inline bool is_branch_counters_group(struct perf_event *event)
+ {
+-	return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
++	return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
+ }
+ 
+ struct amd_nb {
+diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
+index ad8d6a363f24ae..444798c5374f48 100644
+--- a/block/blk-mq-cpumap.c
++++ b/block/blk-mq-cpumap.c
+@@ -87,7 +87,6 @@ void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
+ 	return;
+ 
+ fallback:
+-	WARN_ON_ONCE(qmap->nr_queues > 1);
+-	blk_mq_clear_mq_map(qmap);
++	blk_mq_map_queues(qmap);
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 88df2cdc46b62b..168d03d5aa1d07 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -36,8 +36,6 @@
+ 			   __stringify(DRM_IVPU_DRIVER_MINOR) "."
+ #endif
+ 
+-static struct lock_class_key submitted_jobs_xa_lock_class_key;
+-
+ int ivpu_dbg_mask;
+ module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
+ MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
+@@ -260,6 +258,9 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
+ 	if (ret)
+ 		goto err_xa_erase;
+ 
++	file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
++	file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
++
+ 	mutex_unlock(&vdev->context_list_lock);
+ 	drm_dev_exit(idx);
+ 
+@@ -452,26 +453,6 @@ static const struct drm_driver driver = {
+ 	.minor = DRM_IVPU_DRIVER_MINOR,
+ };
+ 
+-static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
+-{
+-	struct ivpu_file_priv *file_priv;
+-	unsigned long ctx_id;
+-
+-	mutex_lock(&vdev->context_list_lock);
+-
+-	xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+-		if (!file_priv->has_mmu_faults || file_priv->aborted)
+-			continue;
+-
+-		mutex_lock(&file_priv->lock);
+-		ivpu_context_abort_locked(file_priv);
+-		file_priv->aborted = true;
+-		mutex_unlock(&file_priv->lock);
+-	}
+-
+-	mutex_unlock(&vdev->context_list_lock);
+-}
+-
+ static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
+ {
+ 	struct ivpu_device *vdev = arg;
+@@ -485,9 +466,6 @@ static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
+ 		case IVPU_HW_IRQ_SRC_IPC:
+ 			ivpu_ipc_irq_thread_handler(vdev);
+ 			break;
+-		case IVPU_HW_IRQ_SRC_MMU_EVTQ:
+-			ivpu_context_abort_invalid(vdev);
+-			break;
+ 		case IVPU_HW_IRQ_SRC_DCT:
+ 			ivpu_pm_dct_irq_thread_handler(vdev);
+ 			break;
+@@ -604,13 +582,21 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
+ 	xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ 	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
+ 	xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
+-	lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
+ 	INIT_LIST_HEAD(&vdev->bo_list);
+ 
++	vdev->db_limit.min = IVPU_MIN_DB;
++	vdev->db_limit.max = IVPU_MAX_DB;
++
++	INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_thread_handler);
++
+ 	ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
+ 	if (ret)
+ 		goto err_xa_destroy;
+ 
++	ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
++	if (ret)
++		goto err_xa_destroy;
++
+ 	ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
+ 	if (ret)
+ 		goto err_xa_destroy;
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index 9430a24994c32e..a5707a85e72556 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -46,6 +46,9 @@
+ #define IVPU_MIN_DB 1
+ #define IVPU_MAX_DB 255
+ 
++#define IVPU_JOB_ID_JOB_MASK		GENMASK(7, 0)
++#define IVPU_JOB_ID_CONTEXT_MASK	GENMASK(31, 8)
++
+ #define IVPU_NUM_ENGINES       2
+ #define IVPU_NUM_PRIORITIES    4
+ #define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
+@@ -134,12 +137,16 @@ struct ivpu_device {
+ 	struct mutex context_list_lock; /* Protects user context addition/removal */
+ 	struct xarray context_xa;
+ 	struct xa_limit context_xa_limit;
++	struct work_struct context_abort_work;
+ 
+ 	struct xarray db_xa;
++	struct xa_limit db_limit;
++	u32 db_next;
+ 
+ 	struct mutex bo_list_lock; /* Protects bo_list */
+ 	struct list_head bo_list;
+ 
++	struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
+ 	struct xarray submitted_jobs_xa;
+ 	struct ivpu_ipc_consumer job_done_consumer;
+ 
+@@ -171,6 +178,8 @@ struct ivpu_file_priv {
+ 	struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
+ 	struct list_head ms_instance_list;
+ 	struct ivpu_bo *ms_info_bo;
++	struct xa_limit job_limit;
++	u32 job_id_next;
+ 	bool has_mmu_faults;
+ 	bool bound;
+ 	bool aborted;
+diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
+index 71792dab3c2107..3855e2df1e0c83 100644
+--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
++++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
+@@ -14,7 +14,7 @@
+ #define PLL_PROFILING_FREQ_DEFAULT   38400000
+ #define PLL_PROFILING_FREQ_HIGH      400000000
+ 
+-#define DCT_DEFAULT_ACTIVE_PERCENT 15u
++#define DCT_DEFAULT_ACTIVE_PERCENT 30u
+ #define DCT_PERIOD_US		   35300u
+ 
+ int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index 91f7f6f3ca675b..27121c66e48f81 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -21,8 +21,6 @@
+ #include "vpu_boot_api.h"
+ 
+ #define CMD_BUF_IDX	     0
+-#define JOB_ID_JOB_MASK	     GENMASK(7, 0)
+-#define JOB_ID_CONTEXT_MASK  GENMASK(31, 8)
+ #define JOB_MAX_BUFFER_COUNT 65535
+ 
+ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
+@@ -79,7 +77,6 @@ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
+ 
+ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+ {
+-	struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
+ 	struct ivpu_device *vdev = file_priv->vdev;
+ 	struct ivpu_cmdq *cmdq;
+ 	int ret;
+@@ -88,8 +85,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+ 	if (!cmdq)
+ 		return NULL;
+ 
+-	ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
+-	if (ret) {
++	ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
++			      GFP_KERNEL);
++	if (ret < 0) {
+ 		ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
+ 		goto err_free_cmdq;
+ 	}
+@@ -337,6 +335,8 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
+ 
+ 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
+ 		ivpu_jsm_context_release(vdev, file_priv->ctx.id);
++
++	file_priv->aborted = true;
+ }
+ 
+ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+@@ -354,7 +354,7 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+ 		return -EBUSY;
+ 	}
+ 
+-	entry = &cmdq->jobq->job[tail];
++	entry = &cmdq->jobq->slot[tail].job;
+ 	entry->batch_buf_addr = job->cmd_buf_vpu_addr;
+ 	entry->job_id = job->job_id;
+ 	entry->flags = 0;
+@@ -469,16 +469,14 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
+ {
+ 	struct ivpu_job *job;
+ 
+-	xa_lock(&vdev->submitted_jobs_xa);
+-	job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
++	lockdep_assert_held(&vdev->submitted_jobs_lock);
+ 
++	job = xa_erase(&vdev->submitted_jobs_xa, job_id);
+ 	if (xa_empty(&vdev->submitted_jobs_xa) && job) {
+ 		vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
+ 					    vdev->busy_time);
+ 	}
+ 
+-	xa_unlock(&vdev->submitted_jobs_xa);
+-
+ 	return job;
+ }
+ 
+@@ -486,6 +484,28 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
+ {
+ 	struct ivpu_job *job;
+ 
++	lockdep_assert_held(&vdev->submitted_jobs_lock);
++
++	job = xa_load(&vdev->submitted_jobs_xa, job_id);
++	if (!job)
++		return -ENOENT;
++
++	if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
++		guard(mutex)(&job->file_priv->lock);
++
++		if (job->file_priv->has_mmu_faults)
++			return 0;
++
++		/*
++		 * Mark context as faulty and defer destruction of the job to jobs abort thread
++		 * handler to synchronize between both faults and jobs returning context violation
++		 * status and ensure both are handled in the same way
++		 */
++		job->file_priv->has_mmu_faults = true;
++		queue_work(system_wq, &vdev->context_abort_work);
++		return 0;
++	}
++
+ 	job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
+ 	if (!job)
+ 		return -ENOENT;
+@@ -503,6 +523,10 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
+ 	ivpu_stop_job_timeout_detection(vdev);
+ 
+ 	ivpu_rpm_put(vdev);
++
++	if (!xa_empty(&vdev->submitted_jobs_xa))
++		ivpu_start_job_timeout_detection(vdev);
++
+ 	return 0;
+ }
+ 
+@@ -511,15 +535,18 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
+ 	struct ivpu_job *job;
+ 	unsigned long id;
+ 
++	mutex_lock(&vdev->submitted_jobs_lock);
++
+ 	xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ 		ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
++
++	mutex_unlock(&vdev->submitted_jobs_lock);
+ }
+ 
+ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+ {
+ 	struct ivpu_file_priv *file_priv = job->file_priv;
+ 	struct ivpu_device *vdev = job->vdev;
+-	struct xa_limit job_id_range;
+ 	struct ivpu_cmdq *cmdq;
+ 	bool is_first_job;
+ 	int ret;
+@@ -528,27 +555,25 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	mutex_lock(&vdev->submitted_jobs_lock);
+ 	mutex_lock(&file_priv->lock);
+ 
+-	cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx, priority);
++	cmdq = ivpu_cmdq_acquire(file_priv, job->engine_idx, priority);
+ 	if (!cmdq) {
+ 		ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
+ 				      file_priv->ctx.id, job->engine_idx, priority);
+ 		ret = -EINVAL;
+-		goto err_unlock_file_priv;
++		goto err_unlock;
+ 	}
+ 
+-	job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+-	job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
+-
+-	xa_lock(&vdev->submitted_jobs_xa);
+ 	is_first_job = xa_empty(&vdev->submitted_jobs_xa);
+-	ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+-	if (ret) {
++	ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
++			      &file_priv->job_id_next, GFP_KERNEL);
++	if (ret < 0) {
+ 		ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
+ 			 file_priv->ctx.id);
+ 		ret = -EBUSY;
+-		goto err_unlock_submitted_jobs_xa;
++		goto err_unlock;
+ 	}
+ 
+ 	ret = ivpu_cmdq_push_job(cmdq, job);
+@@ -570,20 +595,20 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+ 		 job->job_id, file_priv->ctx.id, job->engine_idx, priority,
+ 		 job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
+ 
+-	xa_unlock(&vdev->submitted_jobs_xa);
+-
+ 	mutex_unlock(&file_priv->lock);
+ 
+-	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
++	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
+ 		ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
++	}
++
++	mutex_unlock(&vdev->submitted_jobs_lock);
+ 
+ 	return 0;
+ 
+ err_erase_xa:
+-	__xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+-err_unlock_submitted_jobs_xa:
+-	xa_unlock(&vdev->submitted_jobs_xa);
+-err_unlock_file_priv:
++	xa_erase(&vdev->submitted_jobs_xa, job->job_id);
++err_unlock:
++	mutex_unlock(&vdev->submitted_jobs_lock);
+ 	mutex_unlock(&file_priv->lock);
+ 	ivpu_rpm_put(vdev);
+ 	return ret;
+@@ -753,7 +778,6 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
+ 		       struct vpu_jsm_msg *jsm_msg)
+ {
+ 	struct vpu_ipc_msg_payload_job_done *payload;
+-	int ret;
+ 
+ 	if (!jsm_msg) {
+ 		ivpu_err(vdev, "IPC message has no JSM payload\n");
+@@ -766,9 +790,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
+ 	}
+ 
+ 	payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
+-	ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+-	if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
+-		ivpu_start_job_timeout_detection(vdev);
++
++	mutex_lock(&vdev->submitted_jobs_lock);
++	ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
++	mutex_unlock(&vdev->submitted_jobs_lock);
+ }
+ 
+ void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
+@@ -781,3 +806,41 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
+ {
+ 	ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
+ }
++
++void ivpu_context_abort_thread_handler(struct work_struct *work)
++{
++	struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
++	struct ivpu_file_priv *file_priv;
++	unsigned long ctx_id;
++	struct ivpu_job *job;
++	unsigned long id;
++
++	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
++		ivpu_jsm_reset_engine(vdev, 0);
++
++	mutex_lock(&vdev->context_list_lock);
++	xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
++		if (!file_priv->has_mmu_faults || file_priv->aborted)
++			continue;
++
++		mutex_lock(&file_priv->lock);
++		ivpu_context_abort_locked(file_priv);
++		mutex_unlock(&file_priv->lock);
++	}
++	mutex_unlock(&vdev->context_list_lock);
++
++	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
++		return;
++
++	ivpu_jsm_hws_resume_engine(vdev, 0);
++	/*
++	 * In hardware scheduling mode NPU already has stopped processing jobs
++	 * and won't send us any further notifications, thus we have to free job related resources
++	 * and notify userspace
++	 */
++	mutex_lock(&vdev->submitted_jobs_lock);
++	xa_for_each(&vdev->submitted_jobs_xa, id, job)
++		if (job->file_priv->aborted)
++			ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
++	mutex_unlock(&vdev->submitted_jobs_lock);
++}
+diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
+index 6accb94028c7a2..0ae77f0638fadc 100644
+--- a/drivers/accel/ivpu/ivpu_job.h
++++ b/drivers/accel/ivpu/ivpu_job.h
+@@ -64,6 +64,7 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
+ 
+ void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
+ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
++void ivpu_context_abort_thread_handler(struct work_struct *work);
+ 
+ void ivpu_jobs_abort_all(struct ivpu_device *vdev);
+ 
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
+index f7618b605f0219..ae91ad24d10d86 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -48,9 +48,10 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
+ 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
+ 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
+ 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
+-	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
++	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
+ 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
+ 	IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
++	IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
+ 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
+ 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
+ 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
+diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
+index c078e214b22129..fb15eb75b5ba9a 100644
+--- a/drivers/accel/ivpu/ivpu_mmu.c
++++ b/drivers/accel/ivpu/ivpu_mmu.c
+@@ -917,8 +917,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
+ 		REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ 	}
+ 
+-	if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
+-		ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
++	queue_work(system_wq, &vdev->context_abort_work);
+ }
+ 
+ void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index fbb61a2c3b19ce..d1fbad78f61ba0 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -421,16 +421,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
+ 	active_us = (DCT_PERIOD_US * active_percent) / 100;
+ 	inactive_us = DCT_PERIOD_US - active_us;
+ 
++	vdev->pm->dct_active_percent = active_percent;
++
++	ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
++		 active_percent, active_us, inactive_us);
++
+ 	ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
+ 	if (ret) {
+ 		ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+-	vdev->pm->dct_active_percent = active_percent;
+-
+-	ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
+-		 active_percent, active_us, inactive_us);
+ 	return 0;
+ }
+ 
+@@ -438,15 +439,16 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
+ {
+ 	int ret;
+ 
++	vdev->pm->dct_active_percent = 0;
++
++	ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
++
+ 	ret = ivpu_jsm_dct_disable(vdev);
+ 	if (ret) {
+ 		ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+-	vdev->pm->dct_active_percent = 0;
+-
+-	ivpu_dbg(vdev, PM, "DCT disabled\n");
+ 	return 0;
+ }
+ 
+@@ -458,7 +460,7 @@ void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
+ 	if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
+ 		return;
+ 
+-	if (vdev->pm->dct_active_percent)
++	if (enable)
+ 		ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
+ 	else
+ 		ret = ivpu_pm_dct_disable(vdev);
+diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
+index 616477fc17fa07..8a616791c32f5e 100644
+--- a/drivers/accel/ivpu/ivpu_sysfs.c
++++ b/drivers/accel/ivpu/ivpu_sysfs.c
+@@ -30,11 +30,12 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
+ 	struct ivpu_device *vdev = to_ivpu_device(drm);
+ 	ktime_t total, now = 0;
+ 
+-	xa_lock(&vdev->submitted_jobs_xa);
++	mutex_lock(&vdev->submitted_jobs_lock);
++
+ 	total = vdev->busy_time;
+ 	if (!xa_empty(&vdev->submitted_jobs_xa))
+ 		now = ktime_sub(ktime_get(), vdev->busy_start_ts);
+-	xa_unlock(&vdev->submitted_jobs_xa);
++	mutex_unlock(&vdev->submitted_jobs_lock);
+ 
+ 	return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
+ }
+diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
+index 82954b91b7481c..908e68ea1c39c2 100644
+--- a/drivers/accel/ivpu/vpu_boot_api.h
++++ b/drivers/accel/ivpu/vpu_boot_api.h
+@@ -1,14 +1,13 @@
+ /* SPDX-License-Identifier: MIT */
+ /*
+- * Copyright (c) 2020-2023, Intel Corporation.
++ * Copyright (c) 2020-2024, Intel Corporation.
+  */
+ 
+ #ifndef VPU_BOOT_API_H
+ #define VPU_BOOT_API_H
+ 
+ /*
+- * =========== FW API version information beginning ================
+- *  The bellow values will be used to construct the version info this way:
++ *  The below values will be used to construct the version info this way:
+  *  fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
+  *  VPU_BOOT_API_VER_MINOR;
+  *  VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes
+@@ -27,19 +26,18 @@
+  * Minor version changes when API backward compatibility is preserved.
+  * Resets to 0 if Major version is incremented.
+  */
+-#define VPU_BOOT_API_VER_MINOR 24
++#define VPU_BOOT_API_VER_MINOR 26
+ 
+ /*
+  * API header changed (field names, documentation, formatting) but API itself has not been changed
+  */
+-#define VPU_BOOT_API_VER_PATCH 0
++#define VPU_BOOT_API_VER_PATCH 3
+ 
+ /*
+  * Index in the API version table
+  * Must be unique for each API
+  */
+ #define VPU_BOOT_API_VER_INDEX 0
+-/* ------------ FW API version information end ---------------------*/
+ 
+ #pragma pack(push, 4)
+ 
+@@ -164,8 +162,6 @@ enum vpu_trace_destination {
+ /* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
+ #define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
+ #define VPU_TRACE_PROC_BIT_30XX_LAST  VPU_TRACE_PROC_BIT_SHV_15
+-#define VPU_TRACE_PROC_BIT_KMB_FIRST  VPU_TRACE_PROC_BIT_30XX_FIRST
+-#define VPU_TRACE_PROC_BIT_KMB_LAST   VPU_TRACE_PROC_BIT_30XX_LAST
+ 
+ struct vpu_boot_l2_cache_config {
+ 	u8 use;
+@@ -199,6 +195,17 @@ struct vpu_warm_boot_section {
+  */
+ #define POWER_PROFILE_SURVIVABILITY 0x1
+ 
++/**
++ * Enum for dvfs_mode boot param.
++ */
++enum vpu_governor {
++	VPU_GOV_DEFAULT = 0, /* Default Governor for the system */
++	VPU_GOV_MAX_PERFORMANCE = 1, /* Maximum performance governor */
++	VPU_GOV_ON_DEMAND = 2, /* On Demand frequency control governor */
++	VPU_GOV_POWER_SAVE = 3, /* Power save governor */
++	VPU_GOV_ON_DEMAND_PRIORITY_AWARE = 4 /* On Demand priority based governor */
++};
++
+ struct vpu_boot_params {
+ 	u32 magic;
+ 	u32 vpu_id;
+@@ -301,7 +308,14 @@ struct vpu_boot_params {
+ 	u32 temp_sensor_period_ms;
+ 	/** PLL ratio for efficient clock frequency */
+ 	u32 pn_freq_pll_ratio;
+-	/** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */
++	/**
++	 * DVFS Mode:
++	 * 0 - Default, DVFS mode selected by the firmware
++	 * 1 - Max Performance
++	 * 2 - On Demand
++	 * 3 - Power Save
++	 * 4 - On Demand Priority Aware
++	 */
+ 	u32 dvfs_mode;
+ 	/**
+ 	 * Depending on DVFS Mode:
+@@ -332,8 +346,8 @@ struct vpu_boot_params {
+ 	u64 d0i3_entry_vpu_ts;
+ 	/*
+ 	 * The system time of the host operating system in microseconds.
+-	 * E.g the number of microseconds since 1st of January 1970, or whatever date the
+-	 * host operating system uses to maintain system time.
++	 * E.g the number of microseconds since 1st of January 1970, or whatever
++	 * date the host operating system uses to maintain system time.
+ 	 * This value will be used to track system time on the VPU.
+ 	 * The KMD is required to update this value on every VPU reset.
+ 	 */
+@@ -382,10 +396,7 @@ struct vpu_boot_params {
+ 	u32 pad6[734];
+ };
+ 
+-/*
+- * Magic numbers set between host and vpu to detect corruptio of tracing init
+- */
+-
++/* Magic numbers set between host and vpu to detect corruption of tracing init */
+ #define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
+ 
+ /* Tracing buffer message format definitions */
+@@ -405,7 +416,9 @@ struct vpu_tracing_buffer_header {
+ 	u32 host_canary_start;
+ 	/* offset from start of buffer for trace entries */
+ 	u32 read_index;
+-	u32 pad_to_cache_line_size_0[14];
++	/* keeps track of wrapping on the reader side */
++	u32 read_wrap_count;
++	u32 pad_to_cache_line_size_0[13];
+ 	/* End of first cache line */
+ 
+ 	/**
+diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
+index 33f462b1a25d88..7215c144158cbd 100644
+--- a/drivers/accel/ivpu/vpu_jsm_api.h
++++ b/drivers/accel/ivpu/vpu_jsm_api.h
+@@ -22,7 +22,7 @@
+ /*
+  * Minor version changes when API backward compatibility is preserved.
+  */
+-#define VPU_JSM_API_VER_MINOR 16
++#define VPU_JSM_API_VER_MINOR 25
+ 
+ /*
+  * API header changed (field names, documentation, formatting) but API itself has not been changed
+@@ -36,7 +36,7 @@
+ 
+ /*
+  * Number of Priority Bands for Hardware Scheduling
+- * Bands: RealTime, Focus, Normal, Idle
++ * Bands: Idle(0), Normal(1), Focus(2), RealTime(3)
+  */
+ #define VPU_HWS_NUM_PRIORITY_BANDS 4
+ 
+@@ -74,6 +74,7 @@
+ #define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR		 0xCU
+ /* Job status returned when the job was preempted mid-inference */
+ #define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE		 0xDU
++#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW	 0xEU
+ 
+ /*
+  * Host <-> VPU IPC channels.
+@@ -86,18 +87,58 @@
+ /*
+  * Job flags bit masks.
+  */
+-#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
+-#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK	   0xFF000000
++enum {
++	/*
++	 * Null submission mask.
++	 * When set, batch buffer's commands are not processed but returned as
++	 * successful immediately, except fences and timestamps.
++	 * When cleared, batch buffer's commands are processed normally.
++	 * Used for testing and profiling purposes.
++	 */
++	VPU_JOB_FLAGS_NULL_SUBMISSION_MASK = (1 << 0U),
++	/*
++	 * Inline command mask.
++	 * When set, the object in job queue is an inline command (see struct vpu_inline_cmd below).
++	 * When cleared, the object in job queue is a job (see struct vpu_job_queue_entry below).
++	 */
++	VPU_JOB_FLAGS_INLINE_CMD_MASK = (1 << 1U),
++	/*
++	 * VPU private data mask.
++	 * Reserved for the VPU to store private data about the job (or inline command)
++	 * while being processed.
++	 */
++	VPU_JOB_FLAGS_PRIVATE_DATA_MASK = 0xFFFF0000U
++};
+ 
+ /*
+- * Sizes of the reserved areas in jobs, in bytes.
++ * Job queue flags bit masks.
+  */
+-#define VPU_JOB_RESERVED_BYTES 8
++enum {
++	/*
++	 * No job done notification mask.
++	 * When set, indicates that no job done notification should be sent for any
++	 * job from this queue. When cleared, indicates that job done notification
++	 * should be sent for every job completed from this queue.
++	 */
++	VPU_JOB_QUEUE_FLAGS_NO_JOB_DONE_MASK = (1 << 0U),
++	/*
++	 * Native fence usage mask.
++	 * When set, indicates that job queue uses native fences (as inline commands
++	 * in job queue). Such queues may also use legacy fences (as commands in batch buffers).
++	 * When cleared, indicates the job queue only uses legacy fences.
++	 * NOTE: For queues using native fences, VPU expects that all jobs in the queue
++	 * are immediately followed by an inline command object. This object is expected
++	 * to be a fence signal command in most cases, but can also be a NOP in case the host
++	 * does not need per-job fence signalling. Other inline commands objects can be
++	 * inserted between "job and inline command" pairs.
++	 */
++	VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
+ 
+-/*
+- * Sizes of the reserved areas in job queues, in bytes.
+- */
+-#define VPU_JOB_QUEUE_RESERVED_BYTES 52
++	/*
++	 * Enable turbo mode for testing NPU performance; not recommended for regular usage.
++	 */
++	VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
++};
+ 
+ /*
+  * Max length (including trailing NULL char) of trace entity name (e.g., the
+@@ -140,24 +181,113 @@
+  */
+ #define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL
+ 
++/*
++ * Inline commands types.
++ */
++/*
++ * NOP.
++ * VPU does nothing other than consuming the inline command object.
++ */
++#define VPU_INLINE_CMD_TYPE_NOP		 0x0
++/*
++ * Fence wait.
++ * VPU waits for the fence current value to reach monitored value.
++ * Fence wait operations are executed upon job dispatching. While waiting for
++ * the fence to be satisfied, VPU blocks fetching of the next objects in the queue.
++ * Jobs present in the queue prior to the fence wait object may be processed
++ * concurrently.
++ */
++#define VPU_INLINE_CMD_TYPE_FENCE_WAIT	 0x1
++/*
++ * Fence signal.
++ * VPU sets the fence current value to the provided value. If new current value
++ * is equal to or higher than monitored value, VPU sends fence signalled notification
++ * to the host. Fence signal operations are executed upon completion of all the jobs
++ * present in the queue prior to them, and in-order relative to each other in the queue.
++ * But jobs in-between them may be processed concurrently and may complete out-of-order.
++ */
++#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
++
++/*
++ * Job scheduling priority bands for both hardware scheduling and OS scheduling.
++ */
++enum vpu_job_scheduling_priority_band {
++	VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE = 0,
++	VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL = 1,
++	VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS = 2,
++	VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME = 3,
++	VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
++};
++
+ /*
+  * Job format.
++ * Jobs defines the actual workloads to be executed by a given engine.
+  */
+ struct vpu_job_queue_entry {
+-	u64 batch_buf_addr; /**< Address of VPU commands batch buffer */
+-	u32 job_id;	  /**< Job ID */
+-	u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+-	u64 root_page_table_addr; /**< Address of root page table to use for this job */
+-	u64 root_page_table_update_counter; /**< Page tables update events counter */
+-	u64 primary_preempt_buf_addr;
++	/**< Address of VPU commands batch buffer */
++	u64 batch_buf_addr;
++	/**< Job ID */
++	u32 job_id;
++	/**< Flags bit field, see VPU_JOB_FLAGS_* above */
++	u32 flags;
++	/**
++	 * Doorbell ring timestamp taken by KMD from SoC's global system clock, in
++	 * microseconds. NPU can convert this value to its own fixed clock's timebase,
++	 * to match other profiling timestamps.
++	 */
++	u64 doorbell_timestamp;
++	/**< Extra id for job tracking, used only in the firmware perf traces */
++	u64 host_tracking_id;
+ 	/**< Address of the primary preemption buffer to use for this job */
+-	u32 primary_preempt_buf_size;
++	u64 primary_preempt_buf_addr;
+ 	/**< Size of the primary preemption buffer to use for this job */
+-	u32 secondary_preempt_buf_size;
++	u32 primary_preempt_buf_size;
+ 	/**< Size of secondary preemption buffer to use for this job */
+-	u64 secondary_preempt_buf_addr;
++	u32 secondary_preempt_buf_size;
+ 	/**< Address of secondary preemption buffer to use for this job */
+-	u8 reserved_0[VPU_JOB_RESERVED_BYTES];
++	u64 secondary_preempt_buf_addr;
++	u64 reserved_0;
++};
++
++/*
++ * Inline command format.
++ * Inline commands are the commands executed at scheduler level (typically,
++ * synchronization directives). Inline command and job objects must be of
++ * the same size and have flags field at same offset.
++ */
++struct vpu_inline_cmd {
++	u64 reserved_0;
++	/* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
++	u32 type;
++	/* Flags bit field, see VPU_JOB_FLAGS_* above. */
++	u32 flags;
++	/* Inline command payload. Depends on inline command type. */
++	union {
++		/* Fence (wait and signal) commands' payload. */
++		struct {
++			/* Fence object handle. */
++			u64 fence_handle;
++			/* User VA of the current fence value. */
++			u64 current_value_va;
++			/* User VA of the monitored fence value (read-only). */
++			u64 monitored_value_va;
++			/* Value to wait for or write in fence location. */
++			u64 value;
++			/* User VA of the log buffer in which to add log entry on completion. */
++			u64 log_buffer_va;
++		} fence;
++		/* Other commands do not have a payload. */
++		/* Payload definition for future inline commands can be inserted here. */
++		u64 reserved_1[6];
++	} payload;
++};
++
++/*
++ * Job queue slots can be populated either with job objects or inline command objects.
++ */
++union vpu_jobq_slot {
++	struct vpu_job_queue_entry job;
++	struct vpu_inline_cmd inline_cmd;
+ };
+ 
+ /*
+@@ -167,7 +297,21 @@ struct vpu_job_queue_header {
+ 	u32 engine_idx;
+ 	u32 head;
+ 	u32 tail;
+-	u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES];
++	u32 flags;
++	/* Set to 1 to indicate priority_band field is valid */
++	u32 priority_band_valid;
++	/*
++	 * Priority for the work of this job queue, valid only if the HWS is NOT used
++	 * and the `priority_band_valid` is set to 1. It is applied only during
++	 * the VPU_JSM_MSG_REGISTER_DB message processing.
++	 * The device firmware might use the `priority_band` to optimize the power
++	 * management logic, but it will not affect the order of jobs.
++	 * Available priority bands: @see enum vpu_job_scheduling_priority_band
++	 */
++	u32 priority_band;
++	/* Inside realtime band assigns a further priority, limited to 0..31 range */
++	u32 realtime_priority_level;
++	u32 reserved_0[9];
+ };
+ 
+ /*
+@@ -175,7 +319,7 @@ struct vpu_job_queue_header {
+  */
+ struct vpu_job_queue {
+ 	struct vpu_job_queue_header header;
+-	struct vpu_job_queue_entry job[];
++	union vpu_jobq_slot slot[];
+ };
+ 
+ /**
+@@ -197,9 +341,7 @@ enum vpu_trace_entity_type {
+ struct vpu_hws_log_buffer_header {
+ 	/* Written by VPU after adding a log entry. Initialised by host to 0. */
+ 	u32 first_free_entry_index;
+-	/* Incremented by VPU every time the VPU overwrites the 0th entry;
+-	 * initialised by host to 0.
+-	 */
++	/* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
+ 	u32 wraparound_count;
+ 	/*
+ 	 * This is the number of buffers that can be stored in the log buffer provided by the host.
+@@ -230,14 +372,80 @@ struct vpu_hws_log_buffer_entry {
+ 	u64 operation_data[2];
+ };
+ 
++/* Native fence log buffer types. */
++enum vpu_hws_native_fence_log_type {
++	VPU_HWS_NATIVE_FENCE_LOG_TYPE_WAITS = 1,
++	VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
++};
++
++/* HWS native fence log buffer header. */
++struct vpu_hws_native_fence_log_header {
++	union {
++		struct {
++			/* Index of the first free entry in buffer. */
++			u32 first_free_entry_idx;
++			/* Incremented each time NPU wraps around the buffer to write next entry. */
++			u32 wraparound_count;
++		};
++		/* Field allowing atomic update of both fields above. */
++		u64 atomic_wraparound_and_entry_idx;
++	};
++	/* Log buffer type, see enum vpu_hws_native_fence_log_type. */
++	u64 type;
++	/* Allocated number of entries in the log buffer. */
++	u64 entry_nb;
++	u64 reserved[2];
++};
++
++/* Native fence log operation types. */
++enum vpu_hws_native_fence_log_op {
++	VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
++	VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
++};
++
++/* HWS native fence log entry. */
++struct vpu_hws_native_fence_log_entry {
++	/* Newly signaled/unblocked fence value. */
++	u64 fence_value;
++	/* Native fence object handle to which this operation belongs. */
++	u64 fence_handle;
++	/* Operation type, see enum vpu_hws_native_fence_log_op. */
++	u64 op_type;
++	u64 reserved_0;
++	/*
++	 * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
++	 * wait was started (in NPU SysTime).
++	 */
++	u64 fence_wait_start_ts;
++	u64 reserved_1;
++	/* Timestamp at which fence operation was completed (in NPU SysTime). */
++	u64 fence_end_ts;
++};
++
++/* Native fence log buffer. */
++struct vpu_hws_native_fence_log_buffer {
++	struct vpu_hws_native_fence_log_header header;
++	struct vpu_hws_native_fence_log_entry entry[];
++};
++
+ /*
+  * Host <-> VPU IPC messages types.
+  */
+ enum vpu_ipc_msg_type {
+ 	VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
++
+ 	/* IPC Host -> Device, Async commands */
+ 	VPU_JSM_MSG_ASYNC_CMD = 0x1100,
+ 	VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
++	/**
++	 * Preempt engine. The NPU stops (preempts) all the jobs currently
++	 * executing on the target engine making the engine become idle and ready to
++	 * execute new jobs.
++	 * NOTE: The NPU does not remove unstarted jobs (if any) from job queues of
++	 * the target engine, but it stops processing them (until the queue doorbell
++	 * is rung again); the host is responsible to reset the job queue, either
++	 * after preemption or when resubmitting jobs to the queue.
++	 */
+ 	VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
+ 	VPU_JSM_MSG_REGISTER_DB = 0x1102,
+ 	VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
+@@ -323,9 +531,10 @@ enum vpu_ipc_msg_type {
+ 	 * NOTE: Please introduce new ASYNC commands before this one. *
+ 	 */
+ 	VPU_JSM_MSG_STATE_DUMP = 0x11FF,
++
+ 	/* IPC Host -> Device, General commands */
+ 	VPU_JSM_MSG_GENERAL_CMD = 0x1200,
+-	VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
++	VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
+ 	/**
+ 	 * Control dyndbg behavior by executing a dyndbg command; equivalent to
+ 	 * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
+@@ -335,8 +544,12 @@ enum vpu_ipc_msg_type {
+ 	 * Perform the save procedure for the D0i3 entry
+ 	 */
+ 	VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
++
+ 	/* IPC Device -> Host, Job completion */
+ 	VPU_JSM_MSG_JOB_DONE = 0x2100,
++	/* IPC Device -> Host, Fence signalled */
++	VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
++
+ 	/* IPC Device -> Host, Async command completion */
+ 	VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
+ 	VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
+@@ -422,6 +635,7 @@ enum vpu_ipc_msg_type {
+ 	 * NOTE: Please introduce new ASYNC responses before this one. *
+ 	 */
+ 	VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
++
+ 	/* IPC Device -> Host, General command completion */
+ 	VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
+ 	VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
+@@ -600,11 +814,6 @@ struct vpu_jsm_metric_streamer_update {
+ 	u64 next_buffer_size;
+ };
+ 
+-struct vpu_ipc_msg_payload_blob_deinit {
+-	/* 64-bit unique ID for the blob to be de-initialized. */
+-	u64 blob_id;
+-};
+-
+ struct vpu_ipc_msg_payload_job_done {
+ 	/* Engine to which the job was submitted. */
+ 	u32 engine_idx;
+@@ -622,6 +831,21 @@ struct vpu_ipc_msg_payload_job_done {
+ 	u64 cmdq_id;
+ };
+ 
++/*
++ * Notification message upon native fence signalling.
++ * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
++ */
++struct vpu_ipc_msg_payload_native_fence_signalled {
++	/* Engine ID. */
++	u32 engine_idx;
++	/* Host SSID. */
++	u32 host_ssid;
++	/* CMDQ ID */
++	u64 cmdq_id;
++	/* Fence object handle. */
++	u64 fence_handle;
++};
++
+ struct vpu_jsm_engine_reset_context {
+ 	/* Host SSID */
+ 	u32 host_ssid;
+@@ -700,11 +924,6 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
+ 	u8 power_limit[16];
+ };
+ 
+-struct vpu_ipc_msg_payload_blob_deinit_done {
+-	/* 64-bit unique ID for the blob de-initialized. */
+-	u64 blob_id;
+-};
+-
+ /* HWS priority band setup request / response */
+ struct vpu_ipc_msg_payload_hws_priority_band_setup {
+ 	/*
+@@ -794,7 +1013,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
+ 	u32 reserved_0;
+ 	/* Command queue id */
+ 	u64 cmdq_id;
+-	/* Priority band to assign to work of this context */
++	/*
++	 * Priority band to assign to work of this context.
++	 * Available priority bands: @see enum vpu_job_scheduling_priority_band
++	 */
+ 	u32 priority_band;
+ 	/* Inside realtime band assigns a further priority */
+ 	u32 realtime_priority_level;
+@@ -869,9 +1091,7 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
+ 	 */
+ 	u64 notify_index;
+ 	/*
+-	 * Enable extra events to be output to log for debug of scheduling algorithm.
+-	 * Interpreted by VPU as a boolean to enable or disable, expected values are
+-	 * 0 and 1.
++	 * Field is now deprecated, will be removed when KMD is updated to support removal
+ 	 */
+ 	u32 enable_extra_events;
+ 	/* Zero Padding */
+@@ -1243,10 +1463,10 @@ union vpu_ipc_msg_payload {
+ 	struct vpu_jsm_metric_streamer_start metric_streamer_start;
+ 	struct vpu_jsm_metric_streamer_stop metric_streamer_stop;
+ 	struct vpu_jsm_metric_streamer_update metric_streamer_update;
+-	struct vpu_ipc_msg_payload_blob_deinit blob_deinit;
+ 	struct vpu_ipc_msg_payload_ssid_release ssid_release;
+ 	struct vpu_jsm_hws_register_db hws_register_db;
+ 	struct vpu_ipc_msg_payload_job_done job_done;
++	struct vpu_ipc_msg_payload_native_fence_signalled native_fence_signalled;
+ 	struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done;
+ 	struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done;
+ 	struct vpu_ipc_msg_payload_register_db_done register_db_done;
+@@ -1254,7 +1474,6 @@ union vpu_ipc_msg_payload {
+ 	struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done;
+ 	struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done;
+ 	struct vpu_jsm_metric_streamer_done metric_streamer_done;
+-	struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done;
+ 	struct vpu_ipc_msg_payload_trace_config trace_config;
+ 	struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability;
+ 	struct vpu_ipc_msg_payload_trace_get_name trace_get_name;
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index ef353ca13c356a..bdf09e8b898d05 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -6374,7 +6374,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
+ 		seq_printf(m, " node %d", buffer->target_node->debug_id);
+ 	seq_printf(m, " size %zd:%zd offset %lx\n",
+ 		   buffer->data_size, buffer->offsets_size,
+-		   proc->alloc.buffer - buffer->user_data);
++		   buffer->user_data - proc->alloc.buffer);
+ }
+ 
+ static void print_binder_work_ilocked(struct seq_file *m,
+diff --git a/drivers/base/module.c b/drivers/base/module.c
+index 5bc71bea883a06..218aaa0964552f 100644
+--- a/drivers/base/module.c
++++ b/drivers/base/module.c
+@@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
+ 	if (mod)
+ 		mk = &mod->mkobj;
+ 	else if (drv->mod_name) {
+-		struct kobject *mkobj;
+-
+-		/* Lookup built-in module entry in /sys/modules */
+-		mkobj = kset_find_obj(module_kset, drv->mod_name);
+-		if (mkobj) {
+-			mk = container_of(mkobj, struct module_kobject, kobj);
++		/* Lookup or create built-in module entry in /sys/modules */
++		mk = lookup_or_create_module_kobject(drv->mod_name);
++		if (mk) {
+ 			/* remember our module structure */
+ 			drv->p->mkobj = mk;
+-			/* kset_find_obj took a reference */
+-			kobject_put(mkobj);
++			/* lookup_or_create_module_kobject took a reference */
++			kobject_put(&mk->kobj);
+ 		}
+ 	}
+ 
+diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
+index ab465e13c1f60f..d225f0a37f985d 100644
+--- a/drivers/bluetooth/btintel_pcie.c
++++ b/drivers/bluetooth/btintel_pcie.c
+@@ -581,8 +581,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+ 		/* This is a debug event that comes from IML and OP image when it
+ 		 * starts execution. There is no need pass this event to stack.
+ 		 */
+-		if (skb->data[2] == 0x97)
++		if (skb->data[2] == 0x97) {
++			hci_recv_diag(hdev, skb);
+ 			return 0;
++		}
+ 	}
+ 
+ 	return hci_recv_frame(hdev, skb);
+@@ -598,7 +600,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
+ 	u8 pkt_type;
+ 	u16 plen;
+ 	u32 pcie_pkt_type;
+-	struct sk_buff *new_skb;
+ 	void *pdata;
+ 	struct hci_dev *hdev = data->hdev;
+ 
+@@ -675,24 +676,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
+ 
+ 	bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
+ 
+-	new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
+-	if (!new_skb) {
+-		bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
+-			   skb->len);
+-		ret = -ENOMEM;
+-		goto exit_error;
+-	}
+-
+-	hci_skb_pkt_type(new_skb) = pkt_type;
+-	skb_put_data(new_skb, skb->data, plen);
++	hci_skb_pkt_type(skb) = pkt_type;
+ 	hdev->stat.byte_rx += plen;
++	skb_trim(skb, plen);
+ 
+ 	if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
+-		ret = btintel_pcie_recv_event(hdev, new_skb);
++		ret = btintel_pcie_recv_event(hdev, skb);
+ 	else
+-		ret = hci_recv_frame(hdev, new_skb);
++		ret = hci_recv_frame(hdev, skb);
++	skb = NULL; /* skb is freed in the callee  */
+ 
+ exit_error:
++	if (skb)
++		kfree_skb(skb);
++
+ 	if (ret)
+ 		hdev->stat.err_rx++;
+ 
+@@ -706,16 +703,10 @@ static void btintel_pcie_rx_work(struct work_struct *work)
+ 	struct btintel_pcie_data *data = container_of(work,
+ 					struct btintel_pcie_data, rx_work);
+ 	struct sk_buff *skb;
+-	int err;
+-	struct hci_dev *hdev = data->hdev;
+ 
+ 	/* Process the sk_buf in queue and send to the HCI layer */
+ 	while ((skb = skb_dequeue(&data->rx_skb_q))) {
+-		err = btintel_pcie_recv_frame(data, skb);
+-		if (err)
+-			bt_dev_err(hdev, "Failed to send received frame: %d",
+-				   err);
+-		kfree_skb(skb);
++		btintel_pcie_recv_frame(data, skb);
+ 	}
+ }
+ 
+@@ -770,10 +761,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
+ 	bt_dev_dbg(hdev, "RXQ: cr_hia: %u  cr_tia: %u", cr_hia, cr_tia);
+ 
+ 	/* Check CR_TIA and CR_HIA for change */
+-	if (cr_tia == cr_hia) {
+-		bt_dev_warn(hdev, "RXQ: no new CD found");
++	if (cr_tia == cr_hia)
+ 		return;
+-	}
+ 
+ 	rxq = &data->rxq;
+ 
+@@ -809,6 +798,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
+ 	return IRQ_WAKE_THREAD;
+ }
+ 
++static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
++{
++	return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
++}
++
++static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
++{
++	return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
++}
++
+ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
+ {
+ 	struct msix_entry *entry = dev_id;
+@@ -836,12 +835,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
+ 		btintel_pcie_msix_gp0_handler(data);
+ 
+ 	/* For TX */
+-	if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
++	if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
+ 		btintel_pcie_msix_tx_handle(data);
++		if (!btintel_pcie_is_rxq_empty(data))
++			btintel_pcie_msix_rx_handle(data);
++	}
+ 
+ 	/* For RX */
+-	if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
++	if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
+ 		btintel_pcie_msix_rx_handle(data);
++		if (!btintel_pcie_is_txackq_empty(data))
++			btintel_pcie_msix_tx_handle(data);
++	}
+ 
+ 	/*
+ 	 * Before sending the interrupt the HW disables it to prevent a nested
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 151054a718522a..7e1f03231b4c90 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -371,6 +371,42 @@ static const struct usb_device_id quirks_table[] = {
+ 	/* QCA WCN785x chipset */
+ 	{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe0fc), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe100), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe103), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe10a), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe10d), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe11b), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe11c), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe11f), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe141), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe14a), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe14b), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe14d), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3624), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x2c7c, 0x0131), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x2c7c, 0x0132), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Broadcom BCM2035 */
+ 	{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
+@@ -2939,22 +2975,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
+ 		bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
+ }
+ 
+-/*
+- * ==0: not a dump pkt.
+- * < 0: fails to handle a dump pkt
+- * > 0: otherwise.
+- */
++/* Return: 0 on success, negative errno on failure. */
+ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+-	int ret = 1;
++	int ret = 0;
+ 	u8 pkt_type;
+ 	u8 *sk_ptr;
+ 	unsigned int sk_len;
+ 	u16 seqno;
+ 	u32 dump_size;
+ 
+-	struct hci_event_hdr *event_hdr;
+-	struct hci_acl_hdr *acl_hdr;
+ 	struct qca_dump_hdr *dump_hdr;
+ 	struct btusb_data *btdata = hci_get_drvdata(hdev);
+ 	struct usb_device *udev = btdata->udev;
+@@ -2964,30 +2994,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ 	sk_len = skb->len;
+ 
+ 	if (pkt_type == HCI_ACLDATA_PKT) {
+-		acl_hdr = hci_acl_hdr(skb);
+-		if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
+-			return 0;
+ 		sk_ptr += HCI_ACL_HDR_SIZE;
+ 		sk_len -= HCI_ACL_HDR_SIZE;
+-		event_hdr = (struct hci_event_hdr *)sk_ptr;
+-	} else {
+-		event_hdr = hci_event_hdr(skb);
+ 	}
+ 
+-	if ((event_hdr->evt != HCI_VENDOR_PKT)
+-		|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
+-		return 0;
+-
+ 	sk_ptr += HCI_EVENT_HDR_SIZE;
+ 	sk_len -= HCI_EVENT_HDR_SIZE;
+ 
+ 	dump_hdr = (struct qca_dump_hdr *)sk_ptr;
+-	if ((sk_len < offsetof(struct qca_dump_hdr, data))
+-		|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
+-	    || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+-		return 0;
+-
+-	/*it is dump pkt now*/
+ 	seqno = le16_to_cpu(dump_hdr->seqno);
+ 	if (seqno == 0) {
+ 		set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
+@@ -3061,17 +3075,84 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ 	return ret;
+ }
+ 
++/* Return: true if the ACL packet is a dump packet, false otherwise. */
++static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
++{
++	u8 *sk_ptr;
++	unsigned int sk_len;
++
++	struct hci_event_hdr *event_hdr;
++	struct hci_acl_hdr *acl_hdr;
++	struct qca_dump_hdr *dump_hdr;
++
++	sk_ptr = skb->data;
++	sk_len = skb->len;
++
++	acl_hdr = hci_acl_hdr(skb);
++	if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
++		return false;
++
++	sk_ptr += HCI_ACL_HDR_SIZE;
++	sk_len -= HCI_ACL_HDR_SIZE;
++	event_hdr = (struct hci_event_hdr *)sk_ptr;
++
++	if ((event_hdr->evt != HCI_VENDOR_PKT) ||
++	    (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
++		return false;
++
++	sk_ptr += HCI_EVENT_HDR_SIZE;
++	sk_len -= HCI_EVENT_HDR_SIZE;
++
++	dump_hdr = (struct qca_dump_hdr *)sk_ptr;
++	if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
++	    (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
++	    (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
++		return false;
++
++	return true;
++}
++
++/* Return: true if the event packet is a dump packet, false otherwise. */
++static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
++{
++	u8 *sk_ptr;
++	unsigned int sk_len;
++
++	struct hci_event_hdr *event_hdr;
++	struct qca_dump_hdr *dump_hdr;
++
++	sk_ptr = skb->data;
++	sk_len = skb->len;
++
++	event_hdr = hci_event_hdr(skb);
++
++	if ((event_hdr->evt != HCI_VENDOR_PKT)
++	    || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
++		return false;
++
++	sk_ptr += HCI_EVENT_HDR_SIZE;
++	sk_len -= HCI_EVENT_HDR_SIZE;
++
++	dump_hdr = (struct qca_dump_hdr *)sk_ptr;
++	if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
++	    (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
++	    (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
++		return false;
++
++	return true;
++}
++
+ static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+-	if (handle_dump_pkt_qca(hdev, skb))
+-		return 0;
++	if (acl_pkt_is_dump_qca(hdev, skb))
++		return handle_dump_pkt_qca(hdev, skb);
+ 	return hci_recv_frame(hdev, skb);
+ }
+ 
+ static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+-	if (handle_dump_pkt_qca(hdev, skb))
+-		return 0;
++	if (evt_pkt_is_dump_qca(hdev, skb))
++		return handle_dump_pkt_qca(hdev, skb);
+ 	return hci_recv_frame(hdev, skb);
+ }
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 67b4e3d18ffe22..1f52bced4c2959 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -534,16 +534,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
+ EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
+ 
+ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
+-		unsigned int target_freq, unsigned int relation)
++				   unsigned int target_freq,
++				   unsigned int min, unsigned int max,
++				   unsigned int relation)
+ {
+ 	unsigned int idx;
+ 
+-	target_freq = clamp_val(target_freq, policy->min, policy->max);
++	target_freq = clamp_val(target_freq, min, max);
+ 
+ 	if (!policy->freq_table)
+ 		return target_freq;
+ 
+-	idx = cpufreq_frequency_table_target(policy, target_freq, relation);
++	idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
+ 	policy->cached_resolved_idx = idx;
+ 	policy->cached_target_freq = target_freq;
+ 	return policy->freq_table[idx].frequency;
+@@ -563,7 +565,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
+ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+ 					 unsigned int target_freq)
+ {
+-	return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
++	unsigned int min = READ_ONCE(policy->min);
++	unsigned int max = READ_ONCE(policy->max);
++
++	/*
++	 * If this function runs in parallel with cpufreq_set_policy(), it may
++	 * read policy->min before the update and policy->max after the update
++	 * or the other way around, so there is no ordering guarantee.
++	 *
++	 * Resolve this by always honoring the max (in case it comes from
++	 * thermal throttling or similar).
++	 */
++	if (unlikely(min > max))
++		min = max;
++
++	return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+ 
+@@ -2323,7 +2339,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
+ 	if (cpufreq_disabled())
+ 		return -ENODEV;
+ 
+-	target_freq = __resolve_freq(policy, target_freq, relation);
++	target_freq = __resolve_freq(policy, target_freq, policy->min,
++				     policy->max, relation);
+ 
+ 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
+ 		 policy->cpu, target_freq, relation, old_target_freq);
+@@ -2647,11 +2664,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ 	 * Resolve policy min/max to available frequencies. It ensures
+ 	 * no frequency resolution will neither overshoot the requested maximum
+ 	 * nor undershoot the requested minimum.
++	 *
++	 * Avoid storing intermediate values in policy->max or policy->min and
++	 * compiler optimizations around them because they may be accessed
++	 * concurrently by cpufreq_driver_resolve_freq() during the update.
+ 	 */
+-	policy->min = new_data.min;
+-	policy->max = new_data.max;
+-	policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
+-	policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
++	WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
++					       new_data.min, new_data.max,
++					       CPUFREQ_RELATION_H));
++	new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
++				      new_data.max, CPUFREQ_RELATION_L);
++	WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
++
+ 	trace_cpu_frequency_limits(policy);
+ 
+ 	cpufreq_update_pressure(policy);
+diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
+index a7c38b8b3e7890..0e65d37c923113 100644
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
+ 		return freq_next;
+ 	}
+ 
+-	index = cpufreq_frequency_table_target(policy, freq_next, relation);
++	index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
++					       policy->max, relation);
+ 	freq_req = freq_table[index].frequency;
+ 	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
+ 	freq_avg = freq_req - freq_reduc;
+diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
+index 10e80d912b8d85..178e17009a16ef 100644
+--- a/drivers/cpufreq/freq_table.c
++++ b/drivers/cpufreq/freq_table.c
+@@ -116,8 +116,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
+ EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
+ 
+ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+-				 unsigned int target_freq,
+-				 unsigned int relation)
++				 unsigned int target_freq, unsigned int min,
++				 unsigned int max, unsigned int relation)
+ {
+ 	struct cpufreq_frequency_table optimal = {
+ 		.driver_data = ~0,
+@@ -148,7 +148,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+ 	cpufreq_for_each_valid_entry_idx(pos, table, i) {
+ 		freq = pos->frequency;
+ 
+-		if ((freq < policy->min) || (freq > policy->max))
++		if (freq < min || freq > max)
+ 			continue;
+ 		if (freq == target_freq) {
+ 			optimal.driver_data = i;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 400337f3b572da..54e7310454cc64 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -600,6 +600,9 @@ static bool turbo_is_disabled(void)
+ {
+ 	u64 misc_en;
+ 
++	if (!cpu_feature_enabled(X86_FEATURE_IDA))
++		return true;
++
+ 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ 
+ 	return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
+diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
+index fe89f5c4837f49..8420862c90a4d5 100644
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
+ 	if (status & priv->ecc_stat_ce_mask) {
+ 		regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
+ 			    &err_addr);
+-		if (priv->ecc_uecnt_offset)
++		if (priv->ecc_cecnt_offset)
+ 			regmap_read(drvdata->mc_vbase,  priv->ecc_cecnt_offset,
+ 				    &err_count);
+ 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
+@@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
+ 		}
+ 	}
+ 
+-	/* Interrupt mode set to every SBERR */
+-	regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
+-		     ALTR_A10_ECC_INTMODE);
+ 	/* Enable ECC */
+ 	ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
+ 					ALTR_A10_ECC_CTRL_OFST));
+@@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
+ 		return PTR_ERR(edac->ecc_mgr_map);
+ 	}
+ 
++	/* Set irq mask for DDR SBE to avoid any pending irq before registration */
++	regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
++		     (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
++
+ 	edac->irq_chip.name = pdev->dev.of_node->name;
+ 	edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
+ 	edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
+diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
+index 3727e72c8c2e70..7248d24c4908d7 100644
+--- a/drivers/edac/altera_edac.h
++++ b/drivers/edac/altera_edac.h
+@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
+ #define A10_SYSMGR_ECC_INTMASK_SET_OFST   0x94
+ #define A10_SYSMGR_ECC_INTMASK_CLR_OFST   0x98
+ #define A10_SYSMGR_ECC_INTMASK_OCRAM      BIT(1)
++#define A10_SYSMGR_ECC_INTMASK_SDMMCB     BIT(16)
++#define A10_SYSMGR_ECC_INTMASK_DDR0       BIT(17)
+ 
+ #define A10_SYSMGR_ECC_INTSTAT_SERR_OFST  0x9C
+ #define A10_SYSMGR_ECC_INTSTAT_DERR_OFST  0xA0
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 2c2ec3c35f1561..dce448687e28e7 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -280,7 +280,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
+ 			memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
+ 			       buf_sz);
+ 
+-	ffa_rx_release();
++	if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
++		ffa_rx_release();
+ 
+ 	mutex_unlock(&drv_info->rx_lock);
+ 
+diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
+index 157172a5f2b577..782c9bec8361c5 100644
+--- a/drivers/firmware/arm_scmi/bus.c
++++ b/drivers/firmware/arm_scmi/bus.c
+@@ -260,6 +260,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent,
+ 	if (!dev)
+ 		return NULL;
+ 
++	/* Drop the refcnt bumped implicitly by device_find_child */
++	put_device(dev);
++
+ 	return to_scmi_dev(dev);
+ }
+ 
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index ae53f26da945f8..1160a439e92a85 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -185,7 +185,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+         bool "Enable refcount backtrace history in the DP MST helpers"
+ 	depends on STACKTRACE_SUPPORT
+         select STACKDEPOT
+-        depends on DRM_KMS_HELPER
++        select DRM_KMS_HELPER
+         depends on DEBUG_KERNEL
+         depends on EXPERT
+         help
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+index 41421da63a0846..a11f556b3ff113 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+@@ -361,7 +361,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev,
+ 		*flags |= AMD_CG_SUPPORT_BIF_LS;
+ }
+ 
+-#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
++#define MMIO_REG_HOLE_OFFSET 0x44000
+ 
+ static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
+ {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f3cbff86155705..115fb3bc456437 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1887,26 +1887,6 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode(
+ 
+ 	switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ 	case IP_VERSION(3, 5, 0):
+-		/*
+-		 * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
+-		 * cause a hard hang. A fix exists for newer PMFW.
+-		 *
+-		 * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
+-		 * IPS state in all cases, except for s0ix and all displays off (DPMS),
+-		 * where IPS2 is allowed.
+-		 *
+-		 * When checking pmfw version, use the major and minor only.
+-		 */
+-		if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
+-			ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+-		else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
+-			/*
+-			 * Other ASICs with DCN35 that have residency issues with
+-			 * IPS2 in idle.
+-			 * We want them to use IPS2 only in display off cases.
+-			 */
+-			ret =  DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+-		break;
+ 	case IP_VERSION(3, 5, 1):
+ 		ret =  DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+index c0dc2324404908..10ba4d7bf63254 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+@@ -172,7 +172,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+ 	struct mod_hdcp_display_adjustment display_adjust;
+ 	unsigned int conn_index = aconnector->base.index;
+ 
+-	mutex_lock(&hdcp_w->mutex);
++	guard(mutex)(&hdcp_w->mutex);
++	drm_connector_get(&aconnector->base);
++	if (hdcp_w->aconnector[conn_index])
++		drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ 	hdcp_w->aconnector[conn_index] = aconnector;
+ 
+ 	memset(&link_adjust, 0, sizeof(link_adjust));
+@@ -209,7 +212,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+ 	mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
+ 
+ 	process_output(hdcp_w);
+-	mutex_unlock(&hdcp_w->mutex);
+ }
+ 
+ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
+@@ -220,8 +222,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
+ 	struct drm_connector_state *conn_state = aconnector->base.state;
+ 	unsigned int conn_index = aconnector->base.index;
+ 
+-	mutex_lock(&hdcp_w->mutex);
+-	hdcp_w->aconnector[conn_index] = aconnector;
++	guard(mutex)(&hdcp_w->mutex);
+ 
+ 	/* the removal of display will invoke auth reset -> hdcp destroy and
+ 	 * we'd expect the Content Protection (CP) property changed back to
+@@ -237,9 +238,11 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
+ 	}
+ 
+ 	mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
+-
++	if (hdcp_w->aconnector[conn_index]) {
++		drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
++		hdcp_w->aconnector[conn_index] = NULL;
++	}
+ 	process_output(hdcp_w);
+-	mutex_unlock(&hdcp_w->mutex);
+ }
+ 
+ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+@@ -247,7 +250,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
+ 	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ 	unsigned int conn_index;
+ 
+-	mutex_lock(&hdcp_w->mutex);
++	guard(mutex)(&hdcp_w->mutex);
+ 
+ 	mod_hdcp_reset_connection(&hdcp_w->hdcp,  &hdcp_w->output);
+ 
+@@ -256,11 +259,13 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
+ 	for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
+ 		hdcp_w->encryption_status[conn_index] =
+ 			MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
++		if (hdcp_w->aconnector[conn_index]) {
++			drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
++			hdcp_w->aconnector[conn_index] = NULL;
++		}
+ 	}
+ 
+ 	process_output(hdcp_w);
+-
+-	mutex_unlock(&hdcp_w->mutex);
+ }
+ 
+ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+@@ -277,7 +282,7 @@ static void event_callback(struct work_struct *work)
+ 	hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
+ 				 callback_dwork);
+ 
+-	mutex_lock(&hdcp_work->mutex);
++	guard(mutex)(&hdcp_work->mutex);
+ 
+ 	cancel_delayed_work(&hdcp_work->callback_dwork);
+ 
+@@ -285,8 +290,6 @@ static void event_callback(struct work_struct *work)
+ 			       &hdcp_work->output);
+ 
+ 	process_output(hdcp_work);
+-
+-	mutex_unlock(&hdcp_work->mutex);
+ }
+ 
+ static void event_property_update(struct work_struct *work)
+@@ -323,7 +326,7 @@ static void event_property_update(struct work_struct *work)
+ 			continue;
+ 
+ 		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+-		mutex_lock(&hdcp_work->mutex);
++		guard(mutex)(&hdcp_work->mutex);
+ 
+ 		if (conn_state->commit) {
+ 			ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
+@@ -355,7 +358,6 @@ static void event_property_update(struct work_struct *work)
+ 			drm_hdcp_update_content_protection(connector,
+ 							   DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ 		}
+-		mutex_unlock(&hdcp_work->mutex);
+ 		drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ 	}
+ }
+@@ -368,7 +370,7 @@ static void event_property_validate(struct work_struct *work)
+ 	struct amdgpu_dm_connector *aconnector;
+ 	unsigned int conn_index;
+ 
+-	mutex_lock(&hdcp_work->mutex);
++	guard(mutex)(&hdcp_work->mutex);
+ 
+ 	for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
+ 	     conn_index++) {
+@@ -408,8 +410,6 @@ static void event_property_validate(struct work_struct *work)
+ 			schedule_work(&hdcp_work->property_update_work);
+ 		}
+ 	}
+-
+-	mutex_unlock(&hdcp_work->mutex);
+ }
+ 
+ static void event_watchdog_timer(struct work_struct *work)
+@@ -420,7 +420,7 @@ static void event_watchdog_timer(struct work_struct *work)
+ 				 struct hdcp_workqueue,
+ 				      watchdog_timer_dwork);
+ 
+-	mutex_lock(&hdcp_work->mutex);
++	guard(mutex)(&hdcp_work->mutex);
+ 
+ 	cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+ 
+@@ -429,8 +429,6 @@ static void event_watchdog_timer(struct work_struct *work)
+ 			       &hdcp_work->output);
+ 
+ 	process_output(hdcp_work);
+-
+-	mutex_unlock(&hdcp_work->mutex);
+ }
+ 
+ static void event_cpirq(struct work_struct *work)
+@@ -439,13 +437,11 @@ static void event_cpirq(struct work_struct *work)
+ 
+ 	hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
+ 
+-	mutex_lock(&hdcp_work->mutex);
++	guard(mutex)(&hdcp_work->mutex);
+ 
+ 	mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
+ 
+ 	process_output(hdcp_work);
+-
+-	mutex_unlock(&hdcp_work->mutex);
+ }
+ 
+ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
+@@ -479,7 +475,7 @@ static bool enable_assr(void *handle, struct dc_link *link)
+ 
+ 	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
+ 
+-	mutex_lock(&psp->dtm_context.mutex);
++	guard(mutex)(&psp->dtm_context.mutex);
+ 	memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+ 
+ 	dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
+@@ -494,8 +490,6 @@ static bool enable_assr(void *handle, struct dc_link *link)
+ 		res = false;
+ 	}
+ 
+-	mutex_unlock(&psp->dtm_context.mutex);
+-
+ 	return res;
+ }
+ 
+@@ -504,6 +498,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
+ 	struct hdcp_workqueue *hdcp_work = handle;
+ 	struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
+ 	int link_index = aconnector->dc_link->link_index;
++	unsigned int conn_index = aconnector->base.index;
+ 	struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ 	struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+ 	struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+@@ -557,13 +552,14 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
+ 			 (!!aconnector->base.state) ?
+ 			 aconnector->base.state->hdcp_content_type : -1);
+ 
+-	mutex_lock(&hdcp_w->mutex);
++	guard(mutex)(&hdcp_w->mutex);
+ 
+ 	mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
+-
++	drm_connector_get(&aconnector->base);
++	if (hdcp_w->aconnector[conn_index])
++		drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
++	hdcp_w->aconnector[conn_index] = aconnector;
+ 	process_output(hdcp_w);
+-	mutex_unlock(&hdcp_w->mutex);
+-
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index ce82c9451dfe7d..e2cf118ff01d33 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -938,6 +938,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
+ 	struct drm_file *file = f->private_data;
+ 	struct drm_device *dev = file->minor->dev;
+ 	struct drm_printer p = drm_seq_file_printer(m);
++	int idx;
++
++	if (!drm_dev_enter(dev, &idx))
++		return;
+ 
+ 	drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
+ 	drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
+@@ -952,6 +956,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
+ 
+ 	if (dev->driver->show_fdinfo)
+ 		dev->driver->show_fdinfo(&p, file);
++
++	drm_dev_exit(idx);
+ }
+ EXPORT_SYMBOL(drm_show_fdinfo);
+ 
+diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
+index 34bca756757660..3ea9f23b4f67af 100644
+--- a/drivers/gpu/drm/drm_mipi_dbi.c
++++ b/drivers/gpu/drm/drm_mipi_dbi.c
+@@ -404,12 +404,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
+ 	u16 height = drm->mode_config.min_height;
+ 	u16 width = drm->mode_config.min_width;
+ 	struct mipi_dbi *dbi = &dbidev->dbi;
+-	size_t len = width * height * 2;
++	const struct drm_format_info *dst_format;
++	size_t len;
+ 	int idx;
+ 
+ 	if (!drm_dev_enter(drm, &idx))
+ 		return;
+ 
++	dst_format = drm_format_info(dbidev->pixel_format);
++	len = drm_format_info_min_pitch(dst_format, 0, width) * height;
++
+ 	memset(dbidev->tx_buf, 0, len);
+ 
+ 	mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
+diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
+index 9aae779c4da318..4969d3de2bac3d 100644
+--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
++++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
+@@ -23,6 +23,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp);
+ 
+ int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
+ void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
++bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
+ 
+ #else
+ static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
+@@ -34,8 +35,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
+ 	return 0;
+ }
+ 
+-#endif
++static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
++{
++	return false;
++}
+ 
+-bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
++#endif
+ 
+ #endif /*__INTEL_PXP_GSCCS_H__ */
+diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
+index 2a942dc6a6dc23..2a82119eb58ed8 100644
+--- a/drivers/gpu/drm/meson/meson_vclk.c
++++ b/drivers/gpu/drm/meson/meson_vclk.c
+@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ 				 FREQ_1000_1001(params[i].pixel_freq));
+ 		DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ 				 i, params[i].phy_freq,
+-				 FREQ_1000_1001(params[i].phy_freq/1000)*1000);
++				 FREQ_1000_1001(params[i].phy_freq/10)*10);
+ 		/* Match strict frequency */
+ 		if (phy_freq == params[i].phy_freq &&
+ 		    vclk_freq == params[i].vclk_freq)
+ 			return MODE_OK;
+ 		/* Match 1000/1001 variant */
+-		if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
++		if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+ 		    vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
+ 			return MODE_OK;
+ 	}
+@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ 
+ 	for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+ 		if ((phy_freq == params[freq].phy_freq ||
+-		     phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
++		     phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+ 		    (vclk_freq == params[freq].vclk_freq ||
+ 		     vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ 			if (vclk_freq != params[freq].vclk_freq)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+index 7cc84472cecec2..edddfc036c6d1e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
+ 	while (!list_empty(&fctx->pending)) {
+ 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ 
+-		if (error)
++		if (error && !dma_fence_is_signaled_locked(&fence->base))
+ 			dma_fence_set_error(&fence->base, error);
+ 
+ 		if (nouveau_fence_signal(fence))
+diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+index fd4215e2f982d2..925fbc2cda700a 100644
+--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
++++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+@@ -216,6 +216,9 @@ static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ 	KUNIT_EXPECT_NULL(test, shmem->sgt);
+ 
++	ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
++	KUNIT_ASSERT_EQ(test, ret, 0);
++
+ 	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
+index b11bc0f00dfda1..0248597c6269a3 100644
+--- a/drivers/gpu/drm/xe/xe_hw_engine.c
++++ b/drivers/gpu/drm/xe/xe_hw_engine.c
+@@ -381,12 +381,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
+ 				 blit_cctl_val,
+ 				 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ 		},
+-		/* Use Fixed slice CCS mode */
+-		{ XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
+-		  XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
+-		  XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
+-					   RCU_MODE_FIXED_SLICE_CCS_MODE))
+-		},
+ 		/* Disable WMTP if HW doesn't support it */
+ 		{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
+ 		  XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
+@@ -454,6 +448,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
+ 		  XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
+ 				     XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ 		},
++		/* Use Fixed slice CCS mode */
++		{ XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
++		  XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
++		  XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
++					   RCU_MODE_FIXED_SLICE_CCS_MODE))
++		},
+ 		{}
+ 	};
+ 
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index 976d43f73f3830..86b41cd48ad4cb 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -652,9 +652,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ rpm_disable:
+-	pm_runtime_put(&pdev->dev);
+-	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_dont_use_autosuspend(&pdev->dev);
++	pm_runtime_put_sync(&pdev->dev);
++	pm_runtime_disable(&pdev->dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index dbe2d13972feff..ff11cd7e5c068c 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3655,6 +3655,14 @@ static int __init parse_ivrs_acpihid(char *str)
+ 	while (*uid == '0' && *(uid + 1))
+ 		uid++;
+ 
++	if (strlen(hid) >= ACPIHID_HID_LEN) {
++		pr_err("Invalid command line: hid is too long\n");
++		return 1;
++	} else if (strlen(uid) >= ACPIHID_UID_LEN) {
++		pr_err("Invalid command line: uid is too long\n");
++		return 1;
++	}
++
+ 	i = early_acpihid_map_size++;
+ 	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
+ 	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+index a7c36654dee5a5..32f3e91a7d7f5d 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+@@ -397,6 +397,12 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
+ 		return ERR_CAST(smmu_domain);
+ 	smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
+ 	smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
++
++	/*
++	 * Choose page_size as the leaf page size for invalidation when
++	 * ARM_SMMU_FEAT_RANGE_INV is present
++	 */
++	smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
+ 	smmu_domain->smmu = smmu;
+ 
+ 	ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 6bf8ecbbe0c263..780e2d9e4ea819 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -3220,6 +3220,7 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
+ 	mutex_lock(&smmu->streams_mutex);
+ 	for (i = 0; i < fwspec->num_ids; i++) {
+ 		struct arm_smmu_stream *new_stream = &master->streams[i];
++		struct rb_node *existing;
+ 		u32 sid = fwspec->ids[i];
+ 
+ 		new_stream->id = sid;
+@@ -3230,10 +3231,20 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
+ 			break;
+ 
+ 		/* Insert into SID tree */
+-		if (rb_find_add(&new_stream->node, &smmu->streams,
+-				arm_smmu_streams_cmp_node)) {
+-			dev_warn(master->dev, "stream %u already in tree\n",
+-				 sid);
++		existing = rb_find_add(&new_stream->node, &smmu->streams,
++				       arm_smmu_streams_cmp_node);
++		if (existing) {
++			struct arm_smmu_master *existing_master =
++				rb_entry(existing, struct arm_smmu_stream, node)
++					->master;
++
++			/* Bridged PCI devices may end up with duplicated IDs */
++			if (existing_master == master)
++				continue;
++
++			dev_warn(master->dev,
++				 "stream %u already in tree from dev %s\n", sid,
++				 dev_name(existing_master->dev));
+ 			ret = -EINVAL;
+ 			break;
+ 		}
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 038a66388564a8..157542c07aaafa 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4666,6 +4666,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
+ 
++/* QM57/QS57 integrated gfx malfunctions with dmar */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
++
+ /* Broadwell igfx malfunctions with dmar */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
+@@ -4743,7 +4746,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+ 	}
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+ 
+diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
+index 7942d8eb3d00ea..f772deb9cba574 100644
+--- a/drivers/irqchip/irq-qcom-mpm.c
++++ b/drivers/irqchip/irq-qcom-mpm.c
+@@ -227,6 +227,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
+ 	if (ret)
+ 		return ret;
+ 
++	if (pin == GPIO_NO_WAKE_IRQ)
++		return irq_domain_disconnect_hierarchy(domain, virq);
++
+ 	ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
+ 					    &qcom_mpm_chip, priv);
+ 	if (ret)
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 23e0b71b991e75..ca60ef209df837 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -68,6 +68,8 @@
+ #define LIST_DIRTY	1
+ #define LIST_SIZE	2
+ 
++#define SCAN_RESCHED_CYCLE	16
++
+ /*--------------------------------------------------------------*/
+ 
+ /*
+@@ -2414,7 +2416,12 @@ static void __scan(struct dm_bufio_client *c)
+ 
+ 			atomic_long_dec(&c->need_shrink);
+ 			freed++;
+-			cond_resched();
++
++			if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
++				dm_bufio_unlock(c);
++				cond_resched();
++				dm_bufio_lock(c);
++			}
+ 		}
+ 	}
+ }
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index b35b779b170443..450e1a7e7bac7a 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -5173,7 +5173,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ 	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
+ 	BUG_ON(!list_empty(&ic->wait_list));
+ 
+-	if (ic->mode == 'B')
++	if (ic->mode == 'B' && ic->bitmap_flush_work.work.func)
+ 		cancel_delayed_work_sync(&ic->bitmap_flush_work);
+ 	if (ic->metadata_wq)
+ 		destroy_workqueue(ic->metadata_wq);
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index dbd39b9722b912..9cacc49f2cb04e 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -523,8 +523,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv)
+ 		gfp = GFP_NOIO;
+ 	}
+ 	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
+-	if (argv && old_argv) {
+-		memcpy(argv, old_argv, *size * sizeof(*argv));
++	if (argv) {
++		if (old_argv)
++			memcpy(argv, old_argv, *size * sizeof(*argv));
+ 		*size = new_size;
+ 	}
+ 
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index f73b84bae0c4c7..6ebb3d1eeb4d6f 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -1112,26 +1112,26 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 	num_irqs = platform_irq_count(pdev);
+ 	if (num_irqs < 0) {
+ 		ret = num_irqs;
+-		goto eirq;
++		goto edisclk;
+ 	}
+ 
+ 	/* There must be at least one IRQ source */
+ 	if (!num_irqs) {
+ 		ret = -ENXIO;
+-		goto eirq;
++		goto edisclk;
+ 	}
+ 
+ 	for (i = 0; i < num_irqs; i++) {
+ 		irq = platform_get_irq(pdev, i);
+ 		if (irq < 0) {
+ 			ret = irq;
+-			goto eirq;
++			goto edisclk;
+ 		}
+ 
+ 		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
+ 				       dev_name(&pdev->dev), host);
+ 		if (ret)
+-			goto eirq;
++			goto edisclk;
+ 	}
+ 
+ 	ret = tmio_mmc_host_probe(host);
+@@ -1143,8 +1143,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 
+ 	return ret;
+ 
+-eirq:
+-	tmio_mmc_host_remove(host);
+ edisclk:
+ 	renesas_sdhi_clk_disable(host);
+ efree:
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 940f1b71226d64..7b35d24c38d765 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+ 	struct tc_taprio_qopt_offload *taprio;
+ 	struct ocelot_port *ocelot_port;
+ 	struct timespec64 base_ts;
+-	int port;
++	int i, port;
+ 	u32 val;
+ 
+ 	mutex_lock(&ocelot->fwd_domain_lock);
+@@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+ 			   QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
+ 			   QSYS_PARAM_CFG_REG_3);
+ 
++		for (i = 0; i < taprio->num_entries; i++)
++			vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
++
+ 		ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ 			   QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ 			   QSYS_TAS_PARAM_CFG_CTRL);
+diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
+index b76a9b7e0aed66..889a18962270aa 100644
+--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
++++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
+@@ -172,34 +172,31 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
+ 	return padev;
+ }
+ 
+-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
++void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
++			 struct pds_auxiliary_dev **pd_ptr)
+ {
+ 	struct pds_auxiliary_dev *padev;
+-	int err = 0;
+ 
+-	if (!cf)
+-		return -ENODEV;
++	if (!*pd_ptr)
++		return;
+ 
+ 	mutex_lock(&pf->config_lock);
+ 
+-	padev = pf->vfs[cf->vf_id].padev;
+-	if (padev) {
+-		pds_client_unregister(pf, padev->client_id);
+-		auxiliary_device_delete(&padev->aux_dev);
+-		auxiliary_device_uninit(&padev->aux_dev);
+-		padev->client_id = 0;
+-	}
+-	pf->vfs[cf->vf_id].padev = NULL;
++	padev = *pd_ptr;
++	pds_client_unregister(pf, padev->client_id);
++	auxiliary_device_delete(&padev->aux_dev);
++	auxiliary_device_uninit(&padev->aux_dev);
++	*pd_ptr = NULL;
+ 
+ 	mutex_unlock(&pf->config_lock);
+-	return err;
+ }
+ 
+-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
++int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
++			enum pds_core_vif_types vt,
++			struct pds_auxiliary_dev **pd_ptr)
+ {
+ 	struct pds_auxiliary_dev *padev;
+ 	char devname[PDS_DEVNAME_LEN];
+-	enum pds_core_vif_types vt;
+ 	unsigned long mask;
+ 	u16 vt_support;
+ 	int client_id;
+@@ -208,6 +205,9 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+ 	if (!cf)
+ 		return -ENODEV;
+ 
++	if (vt >= PDS_DEV_TYPE_MAX)
++		return -EINVAL;
++
+ 	mutex_lock(&pf->config_lock);
+ 
+ 	mask = BIT_ULL(PDSC_S_FW_DEAD) |
+@@ -219,17 +219,10 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+ 		goto out_unlock;
+ 	}
+ 
+-	/* We only support vDPA so far, so it is the only one to
+-	 * be verified that it is available in the Core device and
+-	 * enabled in the devlink param.  In the future this might
+-	 * become a loop for several VIF types.
+-	 */
+-
+ 	/* Verify that the type is supported and enabled.  It is not
+ 	 * an error if there is no auxbus device support for this
+ 	 * VF, it just means something else needs to happen with it.
+ 	 */
+-	vt = PDS_DEV_TYPE_VDPA;
+ 	vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
+ 	if (!(vt_support &&
+ 	      pf->viftype_status[vt].supported &&
+@@ -255,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+ 		err = PTR_ERR(padev);
+ 		goto out_unlock;
+ 	}
+-	pf->vfs[cf->vf_id].padev = padev;
++	*pd_ptr = padev;
+ 
+ out_unlock:
+ 	mutex_unlock(&pf->config_lock);
+diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
+index ec637dc4327a5d..becd3104473c2e 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -303,8 +303,11 @@ void pdsc_health_thread(struct work_struct *work);
+ int pdsc_register_notify(struct notifier_block *nb);
+ void pdsc_unregister_notify(struct notifier_block *nb);
+ void pdsc_notify(unsigned long event, void *data);
+-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
+-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
++int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
++			enum pds_core_vif_types vt,
++			struct pds_auxiliary_dev **pd_ptr);
++void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
++			 struct pds_auxiliary_dev **pd_ptr);
+ 
+ void pdsc_process_adminq(struct pdsc_qcq *qcq);
+ void pdsc_work_thread(struct work_struct *work);
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index ca23cde385e67b..d8dc39da4161fb 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -56,8 +56,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
+ 	for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
+ 		struct pdsc *vf = pdsc->vfs[vf_id].vf;
+ 
+-		err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
+-				       pdsc_auxbus_dev_del(vf, pdsc);
++		if (ctx->val.vbool)
++			err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
++						  &pdsc->vfs[vf_id].padev);
++		else
++			pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
+index 660268ff95623f..a3a68889137b63 100644
+--- a/drivers/net/ethernet/amd/pds_core/main.c
++++ b/drivers/net/ethernet/amd/pds_core/main.c
+@@ -190,7 +190,8 @@ static int pdsc_init_vf(struct pdsc *vf)
+ 	devl_unlock(dl);
+ 
+ 	pf->vfs[vf->vf_id].vf = vf;
+-	err = pdsc_auxbus_dev_add(vf, pf);
++	err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
++				  &pf->vfs[vf->vf_id].padev);
+ 	if (err) {
+ 		devl_lock(dl);
+ 		devl_unregister(dl);
+@@ -417,7 +418,7 @@ static void pdsc_remove(struct pci_dev *pdev)
+ 
+ 		pf = pdsc_get_pf_struct(pdsc->pdev);
+ 		if (!IS_ERR(pf)) {
+-			pdsc_auxbus_dev_del(pdsc, pf);
++			pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
+ 			pf->vfs[pdsc->vf_id].vf = NULL;
+ 		}
+ 	} else {
+@@ -482,7 +483,8 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
+ 
+ 		pf = pdsc_get_pf_struct(pdsc->pdev);
+ 		if (!IS_ERR(pf))
+-			pdsc_auxbus_dev_del(pdsc, pf);
++			pdsc_auxbus_dev_del(pdsc, pf,
++					    &pf->vfs[pdsc->vf_id].padev);
+ 	}
+ 
+ 	pdsc_unmap_bars(pdsc);
+@@ -527,7 +529,8 @@ static void pdsc_reset_done(struct pci_dev *pdev)
+ 
+ 		pf = pdsc_get_pf_struct(pdsc->pdev);
+ 		if (!IS_ERR(pf))
+-			pdsc_auxbus_dev_add(pdsc, pf);
++			pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
++					    &pf->vfs[pdsc->vf_id].padev);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index 230726d7b74f63..d41b58fad37bbf 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ 	}
+ 
+ 	/* Set up the header page info */
+-	xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+-			     XGBE_SKB_ALLOC_SIZE);
++	if (pdata->netdev->features & NETIF_F_RXCSUM) {
++		xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
++				     XGBE_SKB_ALLOC_SIZE);
++	} else {
++		xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
++				     pdata->rx_buf_size);
++	}
+ 
+ 	/* Set up the buffer page info */
+ 	xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index f393228d41c7be..f1b0fb02b3cd14 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+ 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+ }
+ 
++static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
++{
++	unsigned int i;
++
++	for (i = 0; i < pdata->channel_count; i++) {
++		if (!pdata->channel[i]->rx_ring)
++			break;
++
++		XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
++	}
++}
++
+ static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ 			      unsigned int index, unsigned int val)
+ {
+@@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+ 	xgbe_config_tx_coalesce(pdata);
+ 	xgbe_config_rx_buffer_size(pdata);
+ 	xgbe_config_tso_mode(pdata);
+-	xgbe_config_sph_mode(pdata);
+-	xgbe_config_rss(pdata);
++
++	if (pdata->netdev->features & NETIF_F_RXCSUM) {
++		xgbe_config_sph_mode(pdata);
++		xgbe_config_rss(pdata);
++	}
++
+ 	desc_if->wrapper_tx_desc_init(pdata);
+ 	desc_if->wrapper_rx_desc_init(pdata);
+ 	xgbe_enable_dma_interrupts(pdata);
+@@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+ 	hw_if->disable_vxlan = xgbe_disable_vxlan;
+ 	hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+ 
++	/* For Split Header*/
++	hw_if->enable_sph = xgbe_config_sph_mode;
++	hw_if->disable_sph = xgbe_disable_sph_mode;
++
+ 	DBGPR("<--xgbe_init_function_ptrs\n");
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 5475867708f426..8bc49259d71af1 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev,
+ 	if (ret)
+ 		return ret;
+ 
+-	if ((features & NETIF_F_RXCSUM) && !rxcsum)
++	if ((features & NETIF_F_RXCSUM) && !rxcsum) {
++		hw_if->enable_sph(pdata);
++		hw_if->enable_vxlan(pdata);
+ 		hw_if->enable_rx_csum(pdata);
+-	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
++		schedule_work(&pdata->restart_work);
++	} else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
++		hw_if->disable_sph(pdata);
++		hw_if->disable_vxlan(pdata);
+ 		hw_if->disable_rx_csum(pdata);
++		schedule_work(&pdata->restart_work);
++	}
+ 
+ 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+ 		hw_if->enable_rx_vlan_stripping(pdata);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index d85386cac8d166..ed5d43c16d0e23 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -865,6 +865,10 @@ struct xgbe_hw_if {
+ 	void (*enable_vxlan)(struct xgbe_prv_data *);
+ 	void (*disable_vxlan)(struct xgbe_prv_data *);
+ 	void (*set_vxlan_id)(struct xgbe_prv_data *);
++
++	/* For Split Header */
++	void (*enable_sph)(struct xgbe_prv_data *pdata);
++	void (*disable_sph)(struct xgbe_prv_data *pdata);
+ };
+ 
+ /* This structure represents implementation specific routines for an
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 016dcfec8d4965..12b61a6fcda428 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1986,6 +1986,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
+ 	}
+ 	return skb;
+ vlan_err:
++	skb_mark_for_recycle(skb);
+ 	dev_kfree_skb(skb);
+ 	return NULL;
+ }
+@@ -3320,6 +3321,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
+ 		}
+ 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ 	}
++
++	if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
++		bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
+ }
+ 
+ static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+@@ -11076,6 +11080,9 @@ static void bnxt_init_napi(struct bnxt *bp)
+ 		poll_fn = bnxt_poll_p5;
+ 	else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ 		cp_nr_rings--;
++
++	set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
++
+ 	for (i = 0; i < cp_nr_rings; i++) {
+ 		bnapi = bp->bnapi[i];
+ 		netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
+@@ -11844,13 +11851,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+ 				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ 				return rc;
+ 			}
++			/* IRQ will be initialized later in bnxt_request_irq()*/
+ 			bnxt_clear_int_mode(bp);
+-			rc = bnxt_init_int_mode(bp);
+-			if (rc) {
+-				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+-				netdev_err(bp->dev, "init int mode failed\n");
+-				return rc;
+-			}
+ 		}
+ 		rc = bnxt_cancel_reservations(bp, fw_reset);
+ 	}
+@@ -12249,8 +12251,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ 	/* VF-reps may need to be re-opened after the PF is re-opened */
+ 	if (BNXT_PF(bp))
+ 		bnxt_vf_reps_open(bp);
+-	if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+-		WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
+ 	bnxt_ptp_init_rtc(bp, true);
+ 	bnxt_ptp_cfg_tstamp_filters(bp);
+ 	if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+@@ -15421,8 +15421,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
+ 
+ 	bnxt_rdma_aux_device_del(bp);
+ 
+-	bnxt_ptp_clear(bp);
+ 	unregister_netdev(dev);
++	bnxt_ptp_clear(bp);
+ 
+ 	bnxt_rdma_aux_device_uninit(bp);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+index 4e2b938ed1f7e0..d80ce437435f05 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+@@ -66,20 +66,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ 			}
+ 		}
+ 
++		if (cmn_req->req_type ==
++				cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
++			info->dest_buf_size += len;
++
+ 		if (info->dest_buf) {
+ 			if ((info->seg_start + off + len) <=
+ 			    BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+-				memcpy(info->dest_buf + off, dma_buf, len);
++				u16 copylen = min_t(u16, len,
++						    info->dest_buf_size - off);
++
++				memcpy(info->dest_buf + off, dma_buf, copylen);
++				if (copylen < len)
++					break;
+ 			} else {
+ 				rc = -ENOBUFS;
++				if (cmn_req->req_type ==
++				    cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
++					kfree(info->dest_buf);
++					info->dest_buf = NULL;
++				}
+ 				break;
+ 			}
+ 		}
+ 
+-		if (cmn_req->req_type ==
+-				cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+-			info->dest_buf_size += len;
+-
+ 		if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ 			break;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index b901ecb57f2552..54ae90526d8ffc 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2041,6 +2041,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
+ 	return reg_len;
+ }
+ 
++#define BNXT_PCIE_32B_ENTRY(start, end)			\
++	 { offsetof(struct pcie_ctx_hw_stats, start),	\
++	   offsetof(struct pcie_ctx_hw_stats, end) }
++
++static const struct {
++	u16 start;
++	u16 end;
++} bnxt_pcie_32b_entries[] = {
++	BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
++};
++
+ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ 			  void *_p)
+ {
+@@ -2072,12 +2083,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ 	req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ 	rc = hwrm_req_send(bp, req);
+ 	if (!rc) {
+-		__le64 *src = (__le64 *)hw_pcie_stats;
+-		u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
+-		int i;
+-
+-		for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
+-			dst[i] = le64_to_cpu(src[i]);
++		u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
++		u8 *src = (u8 *)hw_pcie_stats;
++		int i, j;
++
++		for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
++			if (i >= bnxt_pcie_32b_entries[j].start &&
++			    i <= bnxt_pcie_32b_entries[j].end) {
++				u32 *dst32 = (u32 *)(dst + i);
++
++				*dst32 = le32_to_cpu(*(__le32 *)(src + i));
++				i += 4;
++				if (i > bnxt_pcie_32b_entries[j].end &&
++				    j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
++					j++;
++			} else {
++				u64 *dst64 = (u64 *)(dst + i);
++
++				*dst64 = le64_to_cpu(*(__le64 *)(src + i));
++				i += 8;
++			}
++		}
+ 	}
+ 	hwrm_req_drop(bp, req);
+ }
+@@ -4848,6 +4874,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ 	if (!bp->num_tests || !BNXT_PF(bp))
+ 		return;
+ 
++	memset(buf, 0, sizeof(u64) * bp->num_tests);
+ 	if (etest->flags & ETH_TEST_FL_OFFLINE &&
+ 	    bnxt_ulp_registered(bp->edev)) {
+ 		etest->flags |= ETH_TEST_FL_FAILED;
+@@ -4855,7 +4882,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ 		return;
+ 	}
+ 
+-	memset(buf, 0, sizeof(u64) * bp->num_tests);
+ 	if (!netif_running(dev)) {
+ 		etest->flags |= ETH_TEST_FL_FAILED;
+ 		return;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+index 781225d3ba8ffc..650034a4bb46d1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+@@ -777,6 +777,27 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
+ 	return HZ;
+ }
+ 
++void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
++{
++	struct bnxt_ptp_tx_req *txts_req;
++	u16 cons = ptp->txts_cons;
++
++	/* make sure ptp aux worker finished with
++	 * possible BNXT_STATE_OPEN set
++	 */
++	ptp_cancel_worker_sync(ptp->ptp_clock);
++
++	ptp->tx_avail = BNXT_MAX_TX_TS;
++	while (cons != ptp->txts_prod) {
++		txts_req = &ptp->txts_req[cons];
++		if (!IS_ERR_OR_NULL(txts_req->tx_skb))
++			dev_kfree_skb_any(txts_req->tx_skb);
++		cons = NEXT_TXTS(cons);
++	}
++	ptp->txts_cons = cons;
++	ptp_schedule_worker(ptp->ptp_clock, 0);
++}
++
+ int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
+ {
+ 	spin_lock_bh(&ptp->ptp_tx_lock);
+@@ -1095,7 +1116,6 @@ int bnxt_ptp_init(struct bnxt *bp)
+ void bnxt_ptp_clear(struct bnxt *bp)
+ {
+ 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+-	int i;
+ 
+ 	if (!ptp)
+ 		return;
+@@ -1107,12 +1127,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
+ 	kfree(ptp->ptp_info.pin_config);
+ 	ptp->ptp_info.pin_config = NULL;
+ 
+-	for (i = 0; i < BNXT_MAX_TX_TS; i++) {
+-		if (ptp->txts_req[i].tx_skb) {
+-			dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
+-			ptp->txts_req[i].tx_skb = NULL;
+-		}
+-	}
+-
+ 	bnxt_unmap_ptp_regs(bp);
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+index 61e89bb2d2690c..999b497990bce6 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+@@ -174,6 +174,7 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
+ void bnxt_ptp_reapply_pps(struct bnxt *bp);
+ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
+ int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
++void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
+ int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
+ void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
+ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index d0ea9260787061..6bf8a7aeef9081 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev)
+ 	eth_hw_addr_set(dev, psrom->mac_addr);
+ 
+ 	if (np->chip_id == CHIP_IP1000A) {
+-		np->led_mode = psrom->led_mode;
++		np->led_mode = le16_to_cpu(psrom->led_mode);
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
+index 195dc6cfd8955c..0e33e2eaae9606 100644
+--- a/drivers/net/ethernet/dlink/dl2k.h
++++ b/drivers/net/ethernet/dlink/dl2k.h
+@@ -335,7 +335,7 @@ typedef struct t_SROM {
+ 	u16 sub_system_id;	/* 0x06 */
+ 	u16 pci_base_1;		/* 0x08 (IP1000A only) */
+ 	u16 pci_base_2;		/* 0x0a (IP1000A only) */
+-	u16 led_mode;		/* 0x0c (IP1000A only) */
++	__le16 led_mode;	/* 0x0c (IP1000A only) */
+ 	u16 reserved1[9];	/* 0x0e-0x1f */
+ 	u8 mac_addr[6];		/* 0x20-0x25 */
+ 	u8 reserved2[10];	/* 0x26-0x2f */
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 2b05d9c6c21a43..04906897615d87 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+ 	txq->bd.cur = bdp;
+ 
+ 	/* Trigger transmission start */
+-	writel(0, txq->bd.reg_desc_active);
++	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
++	    !readl(txq->bd.reg_desc_active) ||
++	    !readl(txq->bd.reg_desc_active) ||
++	    !readl(txq->bd.reg_desc_active) ||
++	    !readl(txq->bd.reg_desc_active))
++		writel(0, txq->bd.reg_desc_active);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 9bbece25552b17..3d70c97a0bedf6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ 		.name = "tm_qset",
+ 		.cmd = HNAE3_DBG_CMD_TM_QSET,
+ 		.dentry = HNS3_DBG_DENTRY_TM,
+-		.buf_len = HNS3_DBG_READ_LEN,
++		.buf_len = HNS3_DBG_READ_LEN_1MB,
+ 		.init = hns3_dbg_common_file_init,
+ 	},
+ 	{
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index dc60ac3bde7f2c..24062a40a7793f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
+ 	writel(mask_en, tqp_vector->mask_addr);
+ }
+ 
+-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
++static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
+ {
+ 	napi_enable(&tqp_vector->napi);
+ 	enable_irq(tqp_vector->vector_irq);
+-
+-	/* enable vector */
+-	hns3_mask_vector_irq(tqp_vector, 1);
+ }
+ 
+-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
++static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
+ {
+-	/* disable vector */
+-	hns3_mask_vector_irq(tqp_vector, 0);
+-
+ 	disable_irq(tqp_vector->vector_irq);
+ 	napi_disable(&tqp_vector->napi);
+ 	cancel_work_sync(&tqp_vector->rx_group.dim.work);
+@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+ 	return 0;
+ }
+ 
++static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
++{
++	struct hns3_nic_priv *priv = netdev_priv(netdev);
++	struct hnae3_handle *h = priv->ae_handle;
++	u16 i;
++
++	for (i = 0; i < priv->vector_num; i++)
++		hns3_irq_enable(&priv->tqp_vector[i]);
++
++	for (i = 0; i < priv->vector_num; i++)
++		hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
++
++	for (i = 0; i < h->kinfo.num_tqps; i++)
++		hns3_tqp_enable(h->kinfo.tqp[i]);
++}
++
++static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
++{
++	struct hns3_nic_priv *priv = netdev_priv(netdev);
++	struct hnae3_handle *h = priv->ae_handle;
++	u16 i;
++
++	for (i = 0; i < h->kinfo.num_tqps; i++)
++		hns3_tqp_disable(h->kinfo.tqp[i]);
++
++	for (i = 0; i < priv->vector_num; i++)
++		hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
++
++	for (i = 0; i < priv->vector_num; i++)
++		hns3_irq_disable(&priv->tqp_vector[i]);
++}
++
+ static int hns3_nic_net_up(struct net_device *netdev)
+ {
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+ 	struct hnae3_handle *h = priv->ae_handle;
+-	int i, j;
+ 	int ret;
+ 
+ 	ret = hns3_nic_reset_all_ring(h);
+@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
+ 
+ 	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ 
+-	/* enable the vectors */
+-	for (i = 0; i < priv->vector_num; i++)
+-		hns3_vector_enable(&priv->tqp_vector[i]);
+-
+-	/* enable rcb */
+-	for (j = 0; j < h->kinfo.num_tqps; j++)
+-		hns3_tqp_enable(h->kinfo.tqp[j]);
++	hns3_enable_irqs_and_tqps(netdev);
+ 
+ 	/* start the ae_dev */
+ 	ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
+ 	if (ret) {
+ 		set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+-		while (j--)
+-			hns3_tqp_disable(h->kinfo.tqp[j]);
+-
+-		for (j = i - 1; j >= 0; j--)
+-			hns3_vector_disable(&priv->tqp_vector[j]);
++		hns3_disable_irqs_and_tqps(netdev);
+ 	}
+ 
+ 	return ret;
+@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
+ static void hns3_nic_net_down(struct net_device *netdev)
+ {
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+-	struct hnae3_handle *h = hns3_get_handle(netdev);
+ 	const struct hnae3_ae_ops *ops;
+-	int i;
+ 
+-	/* disable vectors */
+-	for (i = 0; i < priv->vector_num; i++)
+-		hns3_vector_disable(&priv->tqp_vector[i]);
+-
+-	/* disable rcb */
+-	for (i = 0; i < h->kinfo.num_tqps; i++)
+-		hns3_tqp_disable(h->kinfo.tqp[i]);
++	hns3_disable_irqs_and_tqps(netdev);
+ 
+ 	/* stop ae_dev */
+ 	ops = priv->ae_handle->ae_algo->ops;
+@@ -5864,8 +5871,6 @@ int hns3_set_channels(struct net_device *netdev,
+ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+ {
+ 	struct hns3_nic_priv *priv = netdev_priv(ndev);
+-	struct hnae3_handle *h = priv->ae_handle;
+-	int i;
+ 
+ 	if (!if_running)
+ 		return;
+@@ -5876,11 +5881,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+ 	netif_carrier_off(ndev);
+ 	netif_tx_disable(ndev);
+ 
+-	for (i = 0; i < priv->vector_num; i++)
+-		hns3_vector_disable(&priv->tqp_vector[i]);
+-
+-	for (i = 0; i < h->kinfo.num_tqps; i++)
+-		hns3_tqp_disable(h->kinfo.tqp[i]);
++	hns3_disable_irqs_and_tqps(ndev);
+ 
+ 	/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ 	 * during reset process, because driver may not be able
+@@ -5896,7 +5897,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+ {
+ 	struct hns3_nic_priv *priv = netdev_priv(ndev);
+ 	struct hnae3_handle *h = priv->ae_handle;
+-	int i;
+ 
+ 	if (!if_running)
+ 		return;
+@@ -5912,11 +5912,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+ 
+ 	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ 
+-	for (i = 0; i < priv->vector_num; i++)
+-		hns3_vector_enable(&priv->tqp_vector[i]);
+-
+-	for (i = 0; i < h->kinfo.num_tqps; i++)
+-		hns3_tqp_enable(h->kinfo.tqp[i]);
++	hns3_enable_irqs_and_tqps(ndev);
+ 
+ 	netif_tx_wake_all_queues(ndev);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index 181af419b878d5..0ffda5146bae58 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -439,6 +439,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+ 	ptp->info.settime64 = hclge_ptp_settime;
+ 
+ 	ptp->info.n_alarm = 0;
++
++	spin_lock_init(&ptp->lock);
++	ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
++	ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
++	ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
++	hdev->ptp = ptp;
++
+ 	ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
+ 	if (IS_ERR(ptp->clock)) {
+ 		dev_err(&hdev->pdev->dev,
+@@ -450,12 +457,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	spin_lock_init(&ptp->lock);
+-	ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+-	ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+-	ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+-	hdev->ptp = ptp;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index fd5992164846b1..8f5a85b97ac0c5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1294,9 +1294,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+ 	rtnl_unlock();
+ }
+ 
+-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
++static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
+ {
+-	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ 	struct hclge_vf_to_pf_msg send_msg;
+ 
+ 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+@@ -1305,6 +1304,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+ 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ }
+ 
++static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
++{
++	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++	int ret;
++
++	ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
++	if (ret)
++		return ret;
++
++	hdev->rxvtag_strip_en = enable;
++	return 0;
++}
++
+ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
+ {
+ #define HCLGEVF_RESET_ALL_QUEUE_DONE	1U
+@@ -2206,12 +2218,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+ 					  tc_valid, tc_size);
+ }
+ 
+-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
++static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
++				    bool rxvtag_strip_en)
+ {
+ 	struct hnae3_handle *nic = &hdev->nic;
+ 	int ret;
+ 
+-	ret = hclgevf_en_hw_strip_rxvtag(nic, true);
++	ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"failed to enable rx vlan offload, ret = %d\n", ret);
+@@ -2881,7 +2894,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = hclgevf_init_vlan_config(hdev);
++	ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"failed(%d) to initialize VLAN config\n", ret);
+@@ -2996,7 +3009,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ 		goto err_config;
+ 	}
+ 
+-	ret = hclgevf_init_vlan_config(hdev);
++	ret = hclgevf_init_vlan_config(hdev, true);
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"failed(%d) to initialize VLAN config\n", ret);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index cccef32284616b..0208425ab594f5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -253,6 +253,7 @@ struct hclgevf_dev {
+ 	int *vector_irq;
+ 
+ 	bool gro_en;
++	bool rxvtag_strip_en;
+ 
+ 	unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index 14e3f0f89c78d6..ef755cee64ca99 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -2091,6 +2091,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ 	pf = vf->pf;
+ 	dev = ice_pf_to_dev(pf);
+ 	vf_vsi = ice_get_vf_vsi(vf);
++	if (!vf_vsi) {
++		dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
++		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
++		goto err_exit;
++	}
+ 
+ #define ICE_VF_MAX_FDIR_FILTERS	128
+ 	if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
+diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
+index 66544faab710aa..aef0e9775a3305 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf.h
++++ b/drivers/net/ethernet/intel/idpf/idpf.h
+@@ -629,13 +629,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
+ 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|\
+ 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
+ 
+-#define IDPF_CAP_RX_CSUM_L4V4 (\
+-	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|\
+-	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
++#define IDPF_CAP_TX_CSUM_L4V4 (\
++	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|\
++	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
+ 
+-#define IDPF_CAP_RX_CSUM_L4V6 (\
+-	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|\
+-	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
++#define IDPF_CAP_TX_CSUM_L4V6 (\
++	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|\
++	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
+ 
+ #define IDPF_CAP_RX_CSUM (\
+ 	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		|\
+@@ -644,11 +644,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
+ 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|\
+ 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+ 
+-#define IDPF_CAP_SCTP_CSUM (\
++#define IDPF_CAP_TX_SCTP_CSUM (\
+ 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|\
+-	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|\
+-	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|\
+-	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
++	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
+ 
+ #define IDPF_CAP_TUNNEL_TX_CSUM (\
+ 	VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	|\
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index a3d6b8f198a86a..5ce663d04de00b 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -703,8 +703,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
+ {
+ 	struct idpf_adapter *adapter = vport->adapter;
+ 	struct idpf_vport_config *vport_config;
++	netdev_features_t other_offloads = 0;
++	netdev_features_t csum_offloads = 0;
++	netdev_features_t tso_offloads = 0;
+ 	netdev_features_t dflt_features;
+-	netdev_features_t offloads = 0;
+ 	struct idpf_netdev_priv *np;
+ 	struct net_device *netdev;
+ 	u16 idx = vport->idx;
+@@ -766,53 +768,32 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
+ 
+ 	if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+ 		dflt_features |= NETIF_F_RXHASH;
+-	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
+-		dflt_features |= NETIF_F_IP_CSUM;
+-	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
+-		dflt_features |= NETIF_F_IPV6_CSUM;
++	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
++		csum_offloads |= NETIF_F_IP_CSUM;
++	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
++		csum_offloads |= NETIF_F_IPV6_CSUM;
+ 	if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
+-		dflt_features |= NETIF_F_RXCSUM;
+-	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
+-		dflt_features |= NETIF_F_SCTP_CRC;
++		csum_offloads |= NETIF_F_RXCSUM;
++	if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
++		csum_offloads |= NETIF_F_SCTP_CRC;
+ 
+ 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
+-		dflt_features |= NETIF_F_TSO;
++		tso_offloads |= NETIF_F_TSO;
+ 	if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
+-		dflt_features |= NETIF_F_TSO6;
++		tso_offloads |= NETIF_F_TSO6;
+ 	if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
+ 				VIRTCHNL2_CAP_SEG_IPV4_UDP |
+ 				VIRTCHNL2_CAP_SEG_IPV6_UDP))
+-		dflt_features |= NETIF_F_GSO_UDP_L4;
++		tso_offloads |= NETIF_F_GSO_UDP_L4;
+ 	if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
+-		offloads |= NETIF_F_GRO_HW;
+-	/* advertise to stack only if offloads for encapsulated packets is
+-	 * supported
+-	 */
+-	if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
+-			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
+-		offloads |= NETIF_F_GSO_UDP_TUNNEL	|
+-			    NETIF_F_GSO_GRE		|
+-			    NETIF_F_GSO_GRE_CSUM	|
+-			    NETIF_F_GSO_PARTIAL		|
+-			    NETIF_F_GSO_UDP_TUNNEL_CSUM	|
+-			    NETIF_F_GSO_IPXIP4		|
+-			    NETIF_F_GSO_IPXIP6		|
+-			    0;
+-
+-		if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
+-					 IDPF_CAP_TUNNEL_TX_CSUM))
+-			netdev->gso_partial_features |=
+-				NETIF_F_GSO_UDP_TUNNEL_CSUM;
+-
+-		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+-		offloads |= NETIF_F_TSO_MANGLEID;
+-	}
++		other_offloads |= NETIF_F_GRO_HW;
+ 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
+-		offloads |= NETIF_F_LOOPBACK;
++		other_offloads |= NETIF_F_LOOPBACK;
+ 
+-	netdev->features |= dflt_features;
+-	netdev->hw_features |= dflt_features | offloads;
+-	netdev->hw_enc_features |= dflt_features | offloads;
++	netdev->features |= dflt_features | csum_offloads | tso_offloads;
++	netdev->hw_features |=  netdev->features | other_offloads;
++	netdev->vlan_features |= netdev->features | other_offloads;
++	netdev->hw_enc_features |= dflt_features | other_offloads;
+ 	idpf_set_ethtool_ops(netdev);
+ 	SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
+ 
+@@ -1127,11 +1108,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
+ 
+ 	num_max_q = max(max_q->max_txq, max_q->max_rxq);
+ 	vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
+-	if (!vport->q_vector_idxs) {
+-		kfree(vport);
++	if (!vport->q_vector_idxs)
++		goto free_vport;
+ 
+-		return NULL;
+-	}
+ 	idpf_vport_init(vport, max_q);
+ 
+ 	/* This alloc is done separate from the LUT because it's not strictly
+@@ -1141,11 +1120,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
+ 	 */
+ 	rss_data = &adapter->vport_config[idx]->user_config.rss_data;
+ 	rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
+-	if (!rss_data->rss_key) {
+-		kfree(vport);
++	if (!rss_data->rss_key)
++		goto free_vector_idxs;
+ 
+-		return NULL;
+-	}
+ 	/* Initialize default rss key */
+ 	netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
+ 
+@@ -1158,6 +1135,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
+ 	adapter->next_vport = idpf_get_free_slot(adapter);
+ 
+ 	return vport;
++
++free_vector_idxs:
++	kfree(vport->q_vector_idxs);
++free_vport:
++	kfree(vport);
++
++	return NULL;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
+index 7557bb6694c090..734da1680c5a48 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
+@@ -89,6 +89,7 @@ static void idpf_shutdown(struct pci_dev *pdev)
+ {
+ 	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+ 
++	cancel_delayed_work_sync(&adapter->serv_task);
+ 	cancel_delayed_work_sync(&adapter->vc_event_task);
+ 	idpf_vc_core_deinit(adapter);
+ 	idpf_deinit_dflt_mbx(adapter);
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index 612ed26a29c5d4..efc7b30e421133 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -1290,6 +1290,8 @@ void igc_ptp_reset(struct igc_adapter *adapter)
+ 	/* reset the tstamp_config */
+ 	igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+ 
++	mutex_lock(&adapter->ptm_lock);
++
+ 	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ 
+ 	switch (adapter->hw.mac.type) {
+@@ -1308,7 +1310,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
+ 		if (!igc_is_crosststamp_supported(adapter))
+ 			break;
+ 
+-		mutex_lock(&adapter->ptm_lock);
+ 		wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
+ 		wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
+ 
+@@ -1332,7 +1333,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
+ 			netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
+ 
+ 		igc_ptm_reset(hw);
+-		mutex_unlock(&adapter->ptm_lock);
+ 		break;
+ 	default:
+ 		/* No work to do. */
+@@ -1349,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
+ out:
+ 	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ 
++	mutex_unlock(&adapter->ptm_lock);
++
+ 	wrfl();
+ }
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index a89f80bac39b8d..1b2f5cae06449d 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -1184,7 +1184,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
+ 		miss_cnt);
+ 	rtnl_lock();
+ 	if (netif_running(oct->netdev))
+-		octep_stop(oct->netdev);
++		dev_close(oct->netdev);
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 18c922dd5fc64d..ccb69bc5c95292 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -835,7 +835,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+ 	struct octep_vf_device *oct = netdev_priv(netdev);
+ 
+ 	netdev_hold(netdev, NULL, GFP_ATOMIC);
+-	schedule_work(&oct->tx_timeout_task);
++	if (!schedule_work(&oct->tx_timeout_task))
++		netdev_put(netdev, NULL);
++
+ }
+ 
+ static int octep_vf_set_mac(struct net_device *netdev, void *p)
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 223aee1af44304..c5d5b9ff8bc42f 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -269,12 +269,8 @@ static const char * const mtk_clks_source_name[] = {
+ 	"ethwarp_wocpu2",
+ 	"ethwarp_wocpu1",
+ 	"ethwarp_wocpu0",
+-	"top_usxgmii0_sel",
+-	"top_usxgmii1_sel",
+ 	"top_sgm0_sel",
+ 	"top_sgm1_sel",
+-	"top_xfi_phy0_xtal_sel",
+-	"top_xfi_phy1_xtal_sel",
+ 	"top_eth_gmii_sel",
+ 	"top_eth_refck_50m_sel",
+ 	"top_eth_sys_200m_sel",
+@@ -2206,14 +2202,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 		ring->data[idx] = new_data;
+ 		rxd->rxd1 = (unsigned int)dma_addr;
+ release_desc:
++		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
++			if (unlikely(dma_addr == DMA_MAPPING_ERROR))
++				addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
++						   rxd->rxd2);
++			else
++				addr64 = RX_DMA_PREP_ADDR64(dma_addr);
++		}
++
+ 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ 			rxd->rxd2 = RX_DMA_LSO;
+ 		else
+-			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+-
+-		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
+-		    likely(dma_addr != DMA_MAPPING_ERROR))
+-			rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
++			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
+ 
+ 		ring->calc_idx = idx;
+ 		done++;
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index 25989c79c92e61..c2ab87828d8589 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
+ 	struct net_device *ndev = priv->ndev;
+ 	unsigned int head = ring->head;
+ 	unsigned int entry = ring->tail;
++	unsigned long flags;
+ 
+ 	while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
+ 		ret = mtk_star_tx_complete_one(priv);
+@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
+ 		netif_wake_queue(ndev);
+ 
+ 	if (napi_complete(napi)) {
+-		spin_lock(&priv->lock);
++		spin_lock_irqsave(&priv->lock, flags);
+ 		mtk_star_enable_dma_irq(priv, false, true);
+-		spin_unlock(&priv->lock);
++		spin_unlock_irqrestore(&priv->lock, flags);
+ 	}
+ 
+ 	return 0;
+@@ -1341,16 +1342,16 @@ static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
+ static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+ {
+ 	struct mtk_star_priv *priv;
++	unsigned long flags;
+ 	int work_done = 0;
+ 
+ 	priv = container_of(napi, struct mtk_star_priv, rx_napi);
+ 
+ 	work_done = mtk_star_rx(priv, budget);
+-	if (work_done < budget) {
+-		napi_complete_done(napi, work_done);
+-		spin_lock(&priv->lock);
++	if (work_done < budget && napi_complete_done(napi, work_done)) {
++		spin_lock_irqsave(&priv->lock, flags);
+ 		mtk_star_enable_dma_irq(priv, true, false);
+-		spin_unlock(&priv->lock);
++		spin_unlock_irqrestore(&priv->lock, flags);
+ 	}
+ 
+ 	return work_done;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index 09433b91be176f..c8adf309ecad04 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -177,6 +177,7 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
+ 
+ 	priv = ptpsq->txqsq.priv;
+ 
++	rtnl_lock();
+ 	mutex_lock(&priv->state_lock);
+ 	chs = &priv->channels;
+ 	netdev = priv->netdev;
+@@ -184,22 +185,19 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
+ 	carrier_ok = netif_carrier_ok(netdev);
+ 	netif_carrier_off(netdev);
+ 
+-	rtnl_lock();
+ 	mlx5e_deactivate_priv_channels(priv);
+-	rtnl_unlock();
+ 
+ 	mlx5e_ptp_close(chs->ptp);
+ 	err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
+ 
+-	rtnl_lock();
+ 	mlx5e_activate_priv_channels(priv);
+-	rtnl_unlock();
+ 
+ 	/* return carrier back if needed */
+ 	if (carrier_ok)
+ 		netif_carrier_on(netdev);
+ 
+ 	mutex_unlock(&priv->state_lock);
++	rtnl_unlock();
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+index e4e487c8431b88..b9cf79e2712440 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+@@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
+ 	struct flow_match_enc_keyid enc_keyid;
+ 	void *misc_c, *misc_v;
+ 
+-	misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+-	misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+-
+ 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ 		return 0;
+ 
+@@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
+ 		err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
+ 		if (err)
+ 			return err;
++
++		/* We can't mix custom tunnel headers with symbolic ones and we
++		 * don't have a symbolic field name for GBP, so we use custom
++		 * tunnel headers in this case. We need hardware support to
++		 * match on custom tunnel headers, but we already know it's
++		 * supported because the previous call successfully checked for
++		 * that.
++		 */
++		misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
++				      misc_parameters_5);
++		misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
++				      misc_parameters_5);
++
++		/* Shift by 8 to account for the reserved bits in the vxlan
++		 * header after the VNI.
++		 */
++		MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1,
++			 be32_to_cpu(enc_keyid.mask->keyid) << 8);
++		MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1,
++			 be32_to_cpu(enc_keyid.key->keyid) << 8);
++
++		spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
++
++		return 0;
+ 	}
+ 
+ 	/* match on VNI is required */
+@@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
++			      misc_parameters);
++	misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
++			      misc_parameters);
++
+ 	MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
+ 		 be32_to_cpu(enc_keyid.mask->keyid));
+ 	MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 6b3b1afe831214..218d5402cd1a65 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1750,9 +1750,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
+ 	    !list_is_first(&attr->list, &flow->attrs))
+ 		return 0;
+ 
+-	if (flow_flag_test(flow, SLOW))
+-		return 0;
+-
+ 	esw_attr = attr->esw_attr;
+ 	if (!esw_attr->split_count ||
+ 	    esw_attr->split_count == esw_attr->out_count - 1)
+@@ -1766,7 +1763,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
+ 	for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
+ 		/* external dest with encap is considered as internal by firmware */
+ 		if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
+-		    !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
++		    !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
+ 			ext_dest = true;
+ 		else
+ 			int_dest = true;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 3950b1d4b3d8e5..988df7047b01d7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -3514,7 +3514,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ 	int err;
+ 
+ 	mutex_init(&esw->offloads.termtbl_mutex);
+-	mlx5_rdma_enable_roce(esw->dev);
++	err = mlx5_rdma_enable_roce(esw->dev);
++	if (err)
++		goto err_roce;
+ 
+ 	err = mlx5_esw_host_number_init(esw);
+ 	if (err)
+@@ -3575,6 +3577,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ 	esw_offloads_metadata_uninit(esw);
+ err_metadata:
+ 	mlx5_rdma_disable_roce(esw->dev);
++err_roce:
+ 	mutex_destroy(&esw->offloads.termtbl_mutex);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index a42f6cd99b7448..5c552b71e371c5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
+ 
+ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
+ {
++	u8 mac[ETH_ALEN] = {};
+ 	union ib_gid gid;
+-	u8 mac[ETH_ALEN];
+ 
+ 	mlx5_rdma_make_default_gid(dev, &gid);
+ 	return mlx5_core_roce_gid_set(dev, 0,
+@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
+ 	mlx5_nic_vport_disable_roce(dev);
+ }
+ 
+-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
++int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+ {
+ 	int err;
+ 
+ 	if (!MLX5_CAP_GEN(dev, roce))
+-		return;
++		return 0;
+ 
+ 	err = mlx5_nic_vport_enable_roce(dev);
+ 	if (err) {
+ 		mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
+-		return;
++		return err;
+ 	}
+ 
+ 	err = mlx5_rdma_add_roce_addr(dev);
+@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+ 		goto del_roce_addr;
+ 	}
+ 
+-	return;
++	return err;
+ 
+ del_roce_addr:
+ 	mlx5_rdma_del_roce_addr(dev);
+ disable_roce:
+ 	mlx5_nic_vport_disable_roce(dev);
++	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+index 750cff2a71a4bb..3d9e76c3d42fb1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+@@ -8,12 +8,12 @@
+ 
+ #ifdef CONFIG_MLX5_ESWITCH
+ 
+-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
++int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
+ 
+ #else /* CONFIG_MLX5_ESWITCH */
+ 
+-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
++static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
+ static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
+ 
+ #endif /* CONFIG_MLX5_ESWITCH */
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 4dc5adcda6a381..547255ca1c4efa 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1815,6 +1815,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+ 	if (nr_frags <= 0) {
+ 		tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ 		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++		tx->frame_last = tx->frame_first;
+ 	}
+ 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ 	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+@@ -1884,6 +1885,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
+ 		tx->frame_first = 0;
+ 		tx->frame_data0 = 0;
+ 		tx->frame_tail = 0;
++		tx->frame_last = 0;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -1924,16 +1926,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
+ 	    TX_DESC_DATA0_DTYPE_DATA_) {
+ 		tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ 		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++		tx->frame_last = tx->frame_tail;
+ 	}
+ 
+-	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+-	buffer_info = &tx->buffer_info[tx->frame_tail];
++	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
++	buffer_info = &tx->buffer_info[tx->frame_last];
+ 	buffer_info->skb = skb;
+ 	if (time_stamp)
+ 		buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
+ 	if (ignore_sync)
+ 		buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+ 
++	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ 	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
+ 	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
+ 	tx->last_tail = tx->frame_tail;
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index 8ef897c114d3ce..2f0cab0c85e1d0 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -980,6 +980,7 @@ struct lan743x_tx {
+ 	u32		frame_first;
+ 	u32		frame_data0;
+ 	u32		frame_tail;
++	u32		frame_last;
+ 
+ 	struct lan743x_tx_buffer_info *buffer_info;
+ 
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index ef93df52088710..08bee56aea35f3 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -830,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
+ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ 		    bool untagged)
+ {
++	struct ocelot_port *ocelot_port = ocelot->ports[port];
+ 	int err;
+ 
+ 	/* Ignore VID 0 added to our RX filter by the 8021q module, since
+@@ -849,6 +850,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ 					   ocelot_bridge_vlan_find(ocelot, vid));
+ 		if (err)
+ 			return err;
++	} else if (ocelot_port->pvid_vlan &&
++		   ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
++		err = ocelot_port_set_pvid(ocelot, port, NULL);
++		if (err)
++			return err;
+ 	}
+ 
+ 	/* Untagged egress vlan clasification */
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+index 86dd034fdddc52..3a588aaa89c58a 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
++++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+@@ -1924,8 +1924,8 @@ static u16 rtase_calc_time_mitigation(u32 time_us)
+ 
+ 	time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
+ 
+-	msb = fls(time_us);
+-	if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
++	if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
++		msb = fls(time_us);
+ 		time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
+ 		time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
+ 	} else {
+diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
+index 89dc4c401a8de4..e4d993f3137407 100644
+--- a/drivers/net/ethernet/vertexcom/mse102x.c
++++ b/drivers/net/ethernet/vertexcom/mse102x.c
+@@ -6,6 +6,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/if_vlan.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -33,7 +34,7 @@
+ #define CMD_CTR		(0x2 << CMD_SHIFT)
+ 
+ #define CMD_MASK	GENMASK(15, CMD_SHIFT)
+-#define LEN_MASK	GENMASK(CMD_SHIFT - 1, 0)
++#define LEN_MASK	GENMASK(CMD_SHIFT - 2, 0)
+ 
+ #define DET_CMD_LEN	4
+ #define DET_SOF_LEN	2
+@@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
+ }
+ 
+ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
+-				unsigned int frame_len)
++				unsigned int frame_len, bool drop)
+ {
+ 	struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ 	struct spi_transfer *xfer = &mses->spi_xfer;
+@@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
+ 		netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ 			   __func__, ret);
+ 		mse->stats.xfer_err++;
++	} else if (drop) {
++		netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__);
++		ret = -EINVAL;
+ 	} else if (*sof != cpu_to_be16(DET_SOF)) {
+ 		netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
+ 			   __func__, *sof);
+@@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ 	struct sk_buff *skb;
+ 	unsigned int rxalign;
+ 	unsigned int rxlen;
++	bool drop = false;
+ 	__be16 rx = 0;
+ 	u16 cmd_resp;
+ 	u8 *rxpkt;
+@@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ 			net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ 					    __func__, cmd_resp);
+ 			mse->stats.invalid_rts++;
+-			return;
++			drop = true;
++			goto drop;
+ 		}
+ 
+ 		net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
+@@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ 	}
+ 
+ 	rxlen = cmd_resp & LEN_MASK;
+-	if (!rxlen) {
+-		net_dbg_ratelimited("%s: No frame length defined\n", __func__);
++	if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) {
++		net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
++				    rxlen);
+ 		mse->stats.invalid_len++;
+-		return;
++		drop = true;
+ 	}
+ 
++	/* In case of a invalid CMD_RTS, the frame must be consumed anyway.
++	 * So assume the maximum possible frame length.
++	 */
++drop:
++	if (drop)
++		rxlen = VLAN_ETH_FRAME_LEN;
++
+ 	rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
+ 	skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
+ 	if (!skb)
+@@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+ 	 * They are copied, but ignored.
+ 	 */
+ 	rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
+-	if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
++	if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
+ 		mse->ndev->stats.rx_errors++;
+ 		dev_kfree_skb(skb);
+ 		return;
+@@ -509,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
+ static int mse102x_net_open(struct net_device *ndev)
+ {
+ 	struct mse102x_net *mse = netdev_priv(ndev);
++	struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ 	int ret;
+ 
+ 	ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
+@@ -524,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev)
+ 
+ 	netif_carrier_on(ndev);
+ 
++	/* The SPI interrupt can stuck in case of pending packet(s).
++	 * So poll for possible packet(s) to re-arm the interrupt.
++	 */
++	mutex_lock(&mses->lock);
++	mse102x_rx_pkt_spi(mse);
++	mutex_unlock(&mses->lock);
++
+ 	netif_dbg(mse, ifup, ndev, "network device up\n");
+ 
+ 	return 0;
+diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c
+index 89554021b5ccc4..f9f02d31364921 100644
+--- a/drivers/net/mdio/mdio-mux-meson-gxl.c
++++ b/drivers/net/mdio/mdio-mux-meson-gxl.c
+@@ -17,6 +17,7 @@
+ #define  REG2_LEDACT		GENMASK(23, 22)
+ #define  REG2_LEDLINK		GENMASK(25, 24)
+ #define  REG2_DIV4SEL		BIT(27)
++#define  REG2_REVERSED		BIT(28)
+ #define  REG2_ADCBYPASS		BIT(30)
+ #define  REG2_CLKINSEL		BIT(31)
+ #define ETH_REG3		0x4
+@@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
+ 	 * The only constraint is that it must match the one in
+ 	 * drivers/net/phy/meson-gxl.c to properly match the PHY.
+ 	 */
+-	writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
++	writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
+ 	       priv->regs + ETH_REG2);
+ 
+ 	/* Enable the internal phy */
+diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
+index bb0bf141587274..7b3739b29c8f72 100644
+--- a/drivers/net/usb/rndis_host.c
++++ b/drivers/net/usb/rndis_host.c
+@@ -630,16 +630,6 @@ static const struct driver_info	zte_rndis_info = {
+ 	.tx_fixup =	rndis_tx_fixup,
+ };
+ 
+-static const struct driver_info	wwan_rndis_info = {
+-	.description =	"Mobile Broadband RNDIS device",
+-	.flags =	FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
+-	.bind =		rndis_bind,
+-	.unbind =	rndis_unbind,
+-	.status =	rndis_status,
+-	.rx_fixup =	rndis_rx_fixup,
+-	.tx_fixup =	rndis_tx_fixup,
+-};
+-
+ /*-------------------------------------------------------------------------*/
+ 
+ static const struct usb_device_id	products [] = {
+@@ -676,11 +666,9 @@ static const struct usb_device_id	products [] = {
+ 	USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
+ 	.driver_info = (unsigned long) &rndis_info,
+ }, {
+-	/* Mobile Broadband Modem, seen in Novatel Verizon USB730L and
+-	 * Telit FN990A (RNDIS)
+-	 */
++	/* Novatel Verizon USB730L */
+ 	USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
+-	.driver_info = (unsigned long)&wwan_rndis_info,
++	.driver_info = (unsigned long) &rndis_info,
+ },
+ 	{ },		// END
+ };
+diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
+index 6e6e9f05509ab0..06d19e90eadb59 100644
+--- a/drivers/net/vxlan/vxlan_vnifilter.c
++++ b/drivers/net/vxlan/vxlan_vnifilter.c
+@@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
+ 	 * default dst remote_ip previously added for this vni
+ 	 */
+ 	if (!vxlan_addr_any(&vninode->remote_ip) ||
+-	    !vxlan_addr_any(&dst->remote_ip))
++	    !vxlan_addr_any(&dst->remote_ip)) {
++		u32 hash_index = fdb_head_index(vxlan, all_zeros_mac,
++						vninode->vni);
++
++		spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ 		__vxlan_fdb_delete(vxlan, all_zeros_mac,
+ 				   (vxlan_addr_any(&vninode->remote_ip) ?
+ 				   dst->remote_ip : vninode->remote_ip),
+@@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
+ 				   vninode->vni, vninode->vni,
+ 				   dst->remote_ifindex,
+ 				   true);
++		spin_unlock_bh(&vxlan->hash_lock[hash_index]);
++	}
+ 
+ 	if (vxlan->dev->flags & IFF_UP) {
+ 		if (vxlan_addr_multicast(&vninode->remote_ip) &&
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+index 8afbf529c74503..0364b81f6e9163 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+@@ -896,14 +896,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
+ 	}
+ 
+ 	/* 1) Prepare USB boot loader for runtime image */
+-	brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
++	err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
++	if (err)
++		goto fail;
+ 
+ 	rdlstate = le32_to_cpu(state.state);
+ 	rdlbytes = le32_to_cpu(state.bytes);
+ 
+ 	/* 2) Check we are in the Waiting state */
+ 	if (rdlstate != DL_WAITING) {
+-		brcmf_err("Failed to DL_START\n");
++		brcmf_err("Invalid DL state: %u\n", rdlstate);
+ 		err = -EINVAL;
+ 		goto fail;
+ 	}
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+index 98563757ce2c9d..405bba199fe7fb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+@@ -148,6 +148,7 @@
+  * during a error FW error.
+  */
+ #define CSR_FUNC_SCRATCH_INIT_VALUE		(0x01010101)
++#define CSR_FUNC_SCRATCH_POWER_OFF_MASK		0xFFFF
+ 
+ /* Bits for CSR_HW_IF_CONFIG_REG */
+ #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH	(0x0000000F)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+index 3c9d91496c8267..3b3dcaf33c9d9e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+@@ -347,7 +347,6 @@ void __releases(nic_access)
+ iwl_trans_release_nic_access(struct iwl_trans *trans)
+ {
+ 	iwl_trans_pcie_release_nic_access(trans);
+-	__release(nic_access);
+ }
+ IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 9dd0e0a51ce5cc..9141ea57abfce1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -1651,11 +1651,27 @@ static int _iwl_pci_resume(struct device *device, bool restore)
+ 	 * Scratch value was altered, this means the device was powered off, we
+ 	 * need to reset it completely.
+ 	 * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
+-	 * so assume that any bits there mean that the device is usable.
++	 * but not bits [15:8]. So if we have bits set in lower word, assume
++	 * the device is alive.
++	 * For older devices, just try silently to grab the NIC.
+ 	 */
+-	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
+-	    !iwl_read32(trans, CSR_FUNC_SCRATCH))
+-		device_was_powered_off = true;
++	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
++		if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) &
++		      CSR_FUNC_SCRATCH_POWER_OFF_MASK))
++			device_was_powered_off = true;
++	} else {
++		/*
++		 * bh are re-enabled by iwl_trans_pcie_release_nic_access,
++		 * so re-enable them if _iwl_trans_pcie_grab_nic_access fails.
++		 */
++		local_bh_disable();
++		if (_iwl_trans_pcie_grab_nic_access(trans, true)) {
++			iwl_trans_pcie_release_nic_access(trans);
++		} else {
++			device_was_powered_off = true;
++			local_bh_enable();
++		}
++	}
+ 
+ 	if (restore || device_was_powered_off) {
+ 		trans->state = IWL_TRANS_NO_FW;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index ebe9b25cc53a99..6019114a5d1aa6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -558,10 +558,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
+ void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
+ 					   struct device *dev);
+ 
+-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
+-#define _iwl_trans_pcie_grab_nic_access(trans)			\
++bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
++#define _iwl_trans_pcie_grab_nic_access(trans, silent)		\
+ 	__cond_lock(nic_access_nobh,				\
+-		    likely(__iwl_trans_pcie_grab_nic_access(trans)))
++		    likely(__iwl_trans_pcie_grab_nic_access(trans, silent)))
+ 
+ /*****************************************************
+ * RX
+@@ -1102,7 +1102,8 @@ void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ 				 u32 *val);
+ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
+-void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
++void __releases(nic_access_nobh)
++iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+ 
+ /* transport gen 1 exported functions */
+ void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index d19b3bd0866bda..18d7d59ae58147 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -2191,7 +2191,7 @@ EXPORT_SYMBOL(iwl_trans_pcie_remove);
+  * This version doesn't disable BHs but rather assumes they're
+  * already disabled.
+  */
+-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
++bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
+ {
+ 	int ret;
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+@@ -2243,6 +2243,11 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+ 	if (unlikely(ret < 0)) {
+ 		u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
+ 
++		if (silent) {
++			spin_unlock(&trans_pcie->reg_lock);
++			return false;
++		}
++
+ 		WARN_ONCE(1,
+ 			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+ 			  cntrl);
+@@ -2273,7 +2278,7 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+ 	bool ret;
+ 
+ 	local_bh_disable();
+-	ret = __iwl_trans_pcie_grab_nic_access(trans);
++	ret = __iwl_trans_pcie_grab_nic_access(trans, false);
+ 	if (ret) {
+ 		/* keep BHs disabled until iwl_trans_pcie_release_nic_access */
+ 		return ret;
+@@ -2282,7 +2287,8 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+ 	return false;
+ }
+ 
+-void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
++void __releases(nic_access_nobh)
++iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 
+@@ -2309,6 +2315,7 @@ void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+ 	 * scheduled on different CPUs (after we drop reg_lock).
+ 	 */
+ out:
++	__release(nic_access_nobh);
+ 	spin_unlock_bh(&trans_pcie->reg_lock);
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 9fcdd06e126ae1..f0213a6b8cf536 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -1021,7 +1021,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
+ 	 * returned. This needs to be done only on NICs that have
+ 	 * apmg_wake_up_wa set (see above.)
+ 	 */
+-	if (!_iwl_trans_pcie_grab_nic_access(trans))
++	if (!_iwl_trans_pcie_grab_nic_access(trans, false))
+ 		return -EIO;
+ 
+ 	/*
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
+index eae93efa615044..82d1bf7edba20d 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
+@@ -102,7 +102,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
+ void plfxlc_mac_release(struct plfxlc_mac *mac)
+ {
+ 	plfxlc_chip_release(&mac->chip);
+-	lockdep_assert_held(&mac->lock);
+ }
+ 
+ int plfxlc_op_start(struct ieee80211_hw *hw)
+diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
+index 486afe59818454..09ed1f61c9a85a 100644
+--- a/drivers/nvme/host/Kconfig
++++ b/drivers/nvme/host/Kconfig
+@@ -97,6 +97,7 @@ config NVME_TCP_TLS
+ 	depends on NVME_TCP
+ 	select NET_HANDSHAKE
+ 	select KEYS
++	select TLS
+ 	help
+ 	  Enables TLS encryption for NVMe TCP using the netlink handshake API.
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index e70618e8d35eb4..83ee433b694150 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3535,7 +3535,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
+ 
+ 	dev_info(dev->ctrl.device, "restart after slot reset\n");
+ 	pci_restore_state(pdev);
+-	if (!nvme_try_sched_reset(&dev->ctrl))
++	if (nvme_try_sched_reset(&dev->ctrl))
+ 		nvme_unquiesce_io_queues(&dev->ctrl);
+ 	return PCI_ERS_RESULT_RECOVERED;
+ }
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 854aa6a070ca87..4cc72be28c7319 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1944,7 +1944,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+ 	cancel_work_sync(&queue->io_work);
+ }
+ 
+-static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
++static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
+ {
+ 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+@@ -1963,6 +1963,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ 	mutex_unlock(&queue->queue_lock);
+ }
+ 
++static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
++{
++	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
++	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
++	int timeout = 100;
++
++	while (timeout > 0) {
++		if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
++		    !sk_wmem_alloc_get(queue->sock->sk))
++			return;
++		msleep(2);
++		timeout -= 2;
++	}
++	dev_warn(nctrl->device,
++		 "qid %d: timeout draining sock wmem allocation expired\n",
++		 qid);
++}
++
++static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
++{
++	nvme_tcp_stop_queue_nowait(nctrl, qid);
++	nvme_tcp_wait_queue(nctrl, qid);
++}
++
++
+ static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
+ {
+ 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
+@@ -2030,7 +2055,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
+ 	int i;
+ 
+ 	for (i = 1; i < ctrl->queue_count; i++)
+-		nvme_tcp_stop_queue(ctrl, i);
++		nvme_tcp_stop_queue_nowait(ctrl, i);
++	for (i = 1; i < ctrl->queue_count; i++)
++		nvme_tcp_wait_queue(ctrl, i);
+ }
+ 
+ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
+diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
+index 46be031f91b43b..34438cec32b90b 100644
+--- a/drivers/nvme/target/Kconfig
++++ b/drivers/nvme/target/Kconfig
+@@ -98,6 +98,7 @@ config NVME_TARGET_TCP_TLS
+ 	bool "NVMe over Fabrics TCP target TLS encryption support"
+ 	depends on NVME_TARGET_TCP
+ 	select NET_HANDSHAKE
++	select TLS
+ 	help
+ 	  Enables TLS encryption for the NVMe TCP target using the netlink handshake API.
+ 
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index d05c2c478e7950..4e1fe457a608a4 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -37,16 +37,16 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name(
+ 				struct pinctrl_dev *pctldev,
+ 				const char *name)
+ {
+-	const struct group_desc *grp = NULL;
++	const struct group_desc *grp;
+ 	int i;
+ 
+ 	for (i = 0; i < pctldev->num_groups; i++) {
+ 		grp = pinctrl_generic_get_group(pctldev, i);
+ 		if (grp && !strcmp(grp->grp.name, name))
+-			break;
++			return grp;
+ 	}
+ 
+-	return grp;
++	return NULL;
+ }
+ 
+ static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
+index 4d3acfe849bf4e..dc071b4257d7bf 100644
+--- a/drivers/platform/x86/amd/pmc/pmc.c
++++ b/drivers/platform/x86/amd/pmc/pmc.c
+@@ -892,10 +892,9 @@ static void amd_pmc_s2idle_check(void)
+ 	struct smu_metrics table;
+ 	int rc;
+ 
+-	/* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
+-	if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
+-	    table.s0i3_last_entry_status)
+-		usleep_range(10000, 20000);
++	/* Avoid triggering OVP */
++	if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status)
++		msleep(2500);
+ 
+ 	/* Dump the IdleMask before we add to the STB */
+ 	amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+index a450b8a6bcec10..529b19833537ac 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+@@ -146,15 +146,13 @@ static int uncore_event_cpu_online(unsigned int cpu)
+ {
+ 	struct uncore_data *data;
+ 	int target;
++	int ret;
+ 
+ 	/* Check if there is an online cpu in the package for uncore MSR */
+ 	target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+ 	if (target < nr_cpu_ids)
+ 		return 0;
+ 
+-	/* Use this CPU on this die as a control CPU */
+-	cpumask_set_cpu(cpu, &uncore_cpu_mask);
+-
+ 	data = uncore_get_instance(cpu);
+ 	if (!data)
+ 		return 0;
+@@ -163,7 +161,14 @@ static int uncore_event_cpu_online(unsigned int cpu)
+ 	data->die_id = topology_die_id(cpu);
+ 	data->domain_id = UNCORE_DOMAIN_ID_INVALID;
+ 
+-	return uncore_freq_add_entry(data, cpu);
++	ret = uncore_freq_add_entry(data, cpu);
++	if (ret)
++		return ret;
++
++	/* Use this CPU on this die as a control CPU */
++	cpumask_set_cpu(cpu, &uncore_cpu_mask);
++
++	return 0;
+ }
+ 
+ static int uncore_event_cpu_offline(unsigned int cpu)
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 0eeb503e06c230..1a936829975e11 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -2574,12 +2574,60 @@ static const struct ocp_sma_op ocp_fb_sma_op = {
+ 	.set_output	= ptp_ocp_sma_fb_set_output,
+ };
+ 
++static int
++ptp_ocp_sma_adva_set_output(struct ptp_ocp *bp, int sma_nr, u32 val)
++{
++	u32 reg, mask, shift;
++	unsigned long flags;
++	u32 __iomem *gpio;
++
++	gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
++	shift = sma_nr & 1 ? 0 : 16;
++
++	mask = 0xffff << (16 - shift);
++
++	spin_lock_irqsave(&bp->lock, flags);
++
++	reg = ioread32(gpio);
++	reg = (reg & mask) | (val << shift);
++
++	iowrite32(reg, gpio);
++
++	spin_unlock_irqrestore(&bp->lock, flags);
++
++	return 0;
++}
++
++static int
++ptp_ocp_sma_adva_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val)
++{
++	u32 reg, mask, shift;
++	unsigned long flags;
++	u32 __iomem *gpio;
++
++	gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
++	shift = sma_nr & 1 ? 0 : 16;
++
++	mask = 0xffff << (16 - shift);
++
++	spin_lock_irqsave(&bp->lock, flags);
++
++	reg = ioread32(gpio);
++	reg = (reg & mask) | (val << shift);
++
++	iowrite32(reg, gpio);
++
++	spin_unlock_irqrestore(&bp->lock, flags);
++
++	return 0;
++}
++
+ static const struct ocp_sma_op ocp_adva_sma_op = {
+ 	.tbl		= { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out },
+ 	.init		= ptp_ocp_sma_fb_init,
+ 	.get		= ptp_ocp_sma_fb_get,
+-	.set_inputs	= ptp_ocp_sma_fb_set_inputs,
+-	.set_output	= ptp_ocp_sma_fb_set_output,
++	.set_inputs	= ptp_ocp_sma_adva_set_inputs,
++	.set_output	= ptp_ocp_sma_adva_set_output,
+ };
+ 
+ static int
+diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
+index bc7cc4088eea0d..5c7eb020943b8c 100644
+--- a/drivers/spi/spi-tegra114.c
++++ b/drivers/spi/spi-tegra114.c
+@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
+ 	u32 inactive_cycles;
+ 	u8 cs_state;
+ 
+-	if (setup->unit != SPI_DELAY_UNIT_SCK ||
+-	    hold->unit != SPI_DELAY_UNIT_SCK ||
+-	    inactive->unit != SPI_DELAY_UNIT_SCK) {
++	if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) ||
++	    (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) ||
++	    (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ 		dev_err(&spi->dev,
+ 			"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
+ 			SPI_DELAY_UNIT_SCK);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 8d4a5b8371b63d..a9b032d2f4a8db 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -7256,8 +7256,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
+ 			err = -EINVAL;
+ 		}
+ 	}
+-	ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+-				    (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ 
+ 	return err;
+ }
+diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h
+index c7916011ef34d3..67426e33d04e56 100644
+--- a/fs/bcachefs/xattr_format.h
++++ b/fs/bcachefs/xattr_format.h
+@@ -13,7 +13,13 @@ struct bch_xattr {
+ 	__u8			x_type;
+ 	__u8			x_name_len;
+ 	__le16			x_val_len;
+-	__u8			x_name[] __counted_by(x_name_len);
++	/*
++	 * x_name contains the name and value counted by
++	 * x_name_len + x_val_len. The introduction of
++	 * __counted_by(x_name_len) caused a false positive
++	 * detection of an out of bounds write.
++	 */
++	__u8			x_name[];
+ } __packed __aligned(8);
+ 
+ #endif /* _BCACHEFS_XATTR_FORMAT_H */
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 6551fb003eed25..e263d4b0546fa2 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1870,7 +1870,7 @@ static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
+ 			      subpage->bitmaps)) {
+ 			spin_unlock_irqrestore(&subpage->lock, flags);
+ 			spin_unlock(&folio->mapping->i_private_lock);
+-			bit_start++;
++			bit_start += sectors_per_node;
+ 			continue;
+ 		}
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 5b842276573e82..bee8852e81554e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2155,12 +2155,13 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 
+ 		/*
+ 		 * If the found extent starts after requested offset, then
+-		 * adjust extent_end to be right before this extent begins
++		 * adjust cur_offset to be right before this extent begins.
+ 		 */
+ 		if (found_key.offset > cur_offset) {
+-			extent_end = found_key.offset;
+-			extent_type = 0;
+-			goto must_cow;
++			if (cow_start == (u64)-1)
++				cow_start = cur_offset;
++			cur_offset = found_key.offset;
++			goto next_slot;
+ 		}
+ 
+ 		/*
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 1a7b82664255ab..83022a476e3b3e 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2938,6 +2938,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ 		req->CreateContextsOffset = cpu_to_le32(
+ 			sizeof(struct smb2_create_req) +
+ 			iov[1].iov_len);
++		le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len);
+ 		pc_buf = iov[n_iov-1].iov_base;
+ 	}
+ 
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+index 83caa384974932..b3d121052408cc 100644
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -550,7 +550,19 @@ int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
+ 		retval = -ENOMEM;
+ 		goto out;
+ 	}
+-	sess->user = user;
++
++	if (!sess->user) {
++		/* First successful authentication */
++		sess->user = user;
++	} else {
++		if (!ksmbd_compare_user(sess->user, user)) {
++			ksmbd_debug(AUTH, "different user tried to reuse session\n");
++			retval = -EPERM;
++			ksmbd_free_user(user);
++			goto out;
++		}
++		ksmbd_free_user(user);
++	}
+ 
+ 	memcpy(sess->sess_key, resp->payload, resp->session_key_len);
+ 	memcpy(out_blob, resp->payload + resp->session_key_len,
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index 3f45f28f6f0f8e..9dec4c2940bc04 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -59,10 +59,12 @@ static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess)
+ 	struct ksmbd_session_rpc *entry;
+ 	long index;
+ 
++	down_write(&sess->rpc_lock);
+ 	xa_for_each(&sess->rpc_handle_list, index, entry) {
+ 		xa_erase(&sess->rpc_handle_list, index);
+ 		__session_rpc_close(sess, entry);
+ 	}
++	up_write(&sess->rpc_lock);
+ 
+ 	xa_destroy(&sess->rpc_handle_list);
+ }
+@@ -92,7 +94,7 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ {
+ 	struct ksmbd_session_rpc *entry, *old;
+ 	struct ksmbd_rpc_command *resp;
+-	int method;
++	int method, id;
+ 
+ 	method = __rpc_method(rpc_name);
+ 	if (!method)
+@@ -102,26 +104,29 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ 	if (!entry)
+ 		return -ENOMEM;
+ 
++	down_read(&sess->rpc_lock);
+ 	entry->method = method;
+-	entry->id = ksmbd_ipc_id_alloc();
+-	if (entry->id < 0)
++	entry->id = id = ksmbd_ipc_id_alloc();
++	if (id < 0)
+ 		goto free_entry;
+-	old = xa_store(&sess->rpc_handle_list, entry->id, entry, KSMBD_DEFAULT_GFP);
++	old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP);
+ 	if (xa_is_err(old))
+ 		goto free_id;
+ 
+-	resp = ksmbd_rpc_open(sess, entry->id);
++	resp = ksmbd_rpc_open(sess, id);
+ 	if (!resp)
+ 		goto erase_xa;
+ 
++	up_read(&sess->rpc_lock);
+ 	kvfree(resp);
+-	return entry->id;
++	return id;
+ erase_xa:
+ 	xa_erase(&sess->rpc_handle_list, entry->id);
+ free_id:
+ 	ksmbd_rpc_id_free(entry->id);
+ free_entry:
+ 	kfree(entry);
++	up_read(&sess->rpc_lock);
+ 	return -EINVAL;
+ }
+ 
+@@ -129,9 +134,11 @@ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
+ {
+ 	struct ksmbd_session_rpc *entry;
+ 
++	down_write(&sess->rpc_lock);
+ 	entry = xa_erase(&sess->rpc_handle_list, id);
+ 	if (entry)
+ 		__session_rpc_close(sess, entry);
++	up_write(&sess->rpc_lock);
+ }
+ 
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+@@ -439,6 +446,7 @@ static struct ksmbd_session *__session_create(int protocol)
+ 	sess->sequence_number = 1;
+ 	rwlock_init(&sess->tree_conns_lock);
+ 	atomic_set(&sess->refcnt, 2);
++	init_rwsem(&sess->rpc_lock);
+ 
+ 	ret = __init_smb2_session(sess);
+ 	if (ret)
+diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
+index f21348381d5984..c5749d6ec7151c 100644
+--- a/fs/smb/server/mgmt/user_session.h
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -63,6 +63,7 @@ struct ksmbd_session {
+ 	rwlock_t			tree_conns_lock;
+ 
+ 	atomic_t			refcnt;
++	struct rw_semaphore		rpc_lock;
+ };
+ 
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 6b9286c9634391..85348705f2555d 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1600,11 +1600,6 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ 	if (prev_sess_id && prev_sess_id != sess->id)
+ 		destroy_previous_session(conn, sess->user, prev_sess_id);
+ 
+-	if (sess->state == SMB2_SESSION_VALID) {
+-		ksmbd_free_user(sess->user);
+-		sess->user = NULL;
+-	}
+-
+ 	retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
+ 					 out_blob, &out_len);
+ 	if (retval) {
+@@ -2245,10 +2240,6 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ 	sess->state = SMB2_SESSION_EXPIRED;
+ 	up_write(&conn->session_lock);
+ 
+-	if (sess->user) {
+-		ksmbd_free_user(sess->user);
+-		sess->user = NULL;
+-	}
+ 	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
+ 
+ 	rsp->StructureSize = cpu_to_le16(4);
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 7fe0981a7e4674..a604c54ae44dad 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -770,8 +770,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
+ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
+ 
+ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+-				 unsigned int target_freq,
+-				 unsigned int relation);
++				 unsigned int target_freq, unsigned int min,
++				 unsigned int max, unsigned int relation);
+ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
+ 		unsigned int freq);
+ 
+@@ -836,12 +836,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
+ 	return best;
+ }
+ 
+-/* Works only on sorted freq-tables */
+-static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+-					     unsigned int target_freq,
+-					     bool efficiencies)
++static inline int find_index_l(struct cpufreq_policy *policy,
++			       unsigned int target_freq,
++			       unsigned int min, unsigned int max,
++			       bool efficiencies)
+ {
+-	target_freq = clamp_val(target_freq, policy->min, policy->max);
++	target_freq = clamp_val(target_freq, min, max);
+ 
+ 	if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ 		return cpufreq_table_find_index_al(policy, target_freq,
+@@ -851,6 +851,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+ 						   efficiencies);
+ }
+ 
++/* Works only on sorted freq-tables */
++static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
++					     unsigned int target_freq,
++					     bool efficiencies)
++{
++	return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
++}
++
+ /* Find highest freq at or below target in a table in ascending order */
+ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
+ 					      unsigned int target_freq,
+@@ -904,12 +912,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
+ 	return best;
+ }
+ 
+-/* Works only on sorted freq-tables */
+-static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+-					     unsigned int target_freq,
+-					     bool efficiencies)
++static inline int find_index_h(struct cpufreq_policy *policy,
++			       unsigned int target_freq,
++			       unsigned int min, unsigned int max,
++			       bool efficiencies)
+ {
+-	target_freq = clamp_val(target_freq, policy->min, policy->max);
++	target_freq = clamp_val(target_freq, min, max);
+ 
+ 	if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ 		return cpufreq_table_find_index_ah(policy, target_freq,
+@@ -919,6 +927,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+ 						   efficiencies);
+ }
+ 
++/* Works only on sorted freq-tables */
++static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
++					     unsigned int target_freq,
++					     bool efficiencies)
++{
++	return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
++}
++
+ /* Find closest freq to target in a table in ascending order */
+ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
+ 					      unsigned int target_freq,
+@@ -989,12 +1005,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
+ 	return best;
+ }
+ 
+-/* Works only on sorted freq-tables */
+-static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+-					     unsigned int target_freq,
+-					     bool efficiencies)
++static inline int find_index_c(struct cpufreq_policy *policy,
++			       unsigned int target_freq,
++			       unsigned int min, unsigned int max,
++			       bool efficiencies)
+ {
+-	target_freq = clamp_val(target_freq, policy->min, policy->max);
++	target_freq = clamp_val(target_freq, min, max);
+ 
+ 	if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ 		return cpufreq_table_find_index_ac(policy, target_freq,
+@@ -1004,7 +1020,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ 						   efficiencies);
+ }
+ 
+-static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
++/* Works only on sorted freq-tables */
++static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
++					     unsigned int target_freq,
++					     bool efficiencies)
++{
++	return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
++}
++
++static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
++					unsigned int min, unsigned int max,
++					int idx)
+ {
+ 	unsigned int freq;
+ 
+@@ -1013,11 +1039,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
+ 
+ 	freq = policy->freq_table[idx].frequency;
+ 
+-	return freq == clamp_val(freq, policy->min, policy->max);
++	return freq == clamp_val(freq, min, max);
+ }
+ 
+ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ 						 unsigned int target_freq,
++						 unsigned int min,
++						 unsigned int max,
+ 						 unsigned int relation)
+ {
+ 	bool efficiencies = policy->efficiencies_available &&
+@@ -1028,29 +1056,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ 	relation &= ~CPUFREQ_RELATION_E;
+ 
+ 	if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
+-		return cpufreq_table_index_unsorted(policy, target_freq,
+-						    relation);
++		return cpufreq_table_index_unsorted(policy, target_freq, min,
++						    max, relation);
+ retry:
+ 	switch (relation) {
+ 	case CPUFREQ_RELATION_L:
+-		idx = cpufreq_table_find_index_l(policy, target_freq,
+-						 efficiencies);
++		idx = find_index_l(policy, target_freq, min, max, efficiencies);
+ 		break;
+ 	case CPUFREQ_RELATION_H:
+-		idx = cpufreq_table_find_index_h(policy, target_freq,
+-						 efficiencies);
++		idx = find_index_h(policy, target_freq, min, max, efficiencies);
+ 		break;
+ 	case CPUFREQ_RELATION_C:
+-		idx = cpufreq_table_find_index_c(policy, target_freq,
+-						 efficiencies);
++		idx = find_index_c(policy, target_freq, min, max, efficiencies);
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 		return 0;
+ 	}
+ 
+-	/* Limit frequency index to honor policy->min/max */
+-	if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
++	/* Limit frequency index to honor min and max */
++	if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
+ 		efficiencies = false;
+ 		goto retry;
+ 	}
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index bd722f47363520..10f7b1df072361 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -423,10 +423,10 @@ static inline int __iommu_copy_struct_from_user(
+ 	void *dst_data, const struct iommu_user_data *src_data,
+ 	unsigned int data_type, size_t data_len, size_t min_len)
+ {
+-	if (src_data->type != data_type)
+-		return -EINVAL;
+ 	if (WARN_ON(!dst_data || !src_data))
+ 		return -EINVAL;
++	if (src_data->type != data_type)
++		return -EINVAL;
+ 	if (src_data->len < min_len || data_len < src_data->len)
+ 		return -EINVAL;
+ 	return copy_struct_from_user(dst_data, data_len, src_data->uptr,
+@@ -439,8 +439,8 @@ static inline int __iommu_copy_struct_from_user(
+  *        include/uapi/linux/iommufd.h
+  * @user_data: Pointer to a struct iommu_user_data for user space data info
+  * @data_type: The data type of the @kdst. Must match with @user_data->type
+- * @min_last: The last memember of the data structure @kdst points in the
+- *            initial version.
++ * @min_last: The last member of the data structure @kdst points in the initial
++ *            version.
+  * Return 0 for success, otherwise -error.
+  */
+ #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 88ecc5e9f52307..82a9527d43c768 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -162,6 +162,8 @@ extern void cleanup_module(void);
+ #define __INITRODATA_OR_MODULE __INITRODATA
+ #endif /*CONFIG_MODULES*/
+ 
++struct module_kobject *lookup_or_create_module_kobject(const char *name);
++
+ /* Generic info of form tag = "info" */
+ #define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+ 
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 71d24328764065..40fce4193cc1dd 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -1913,6 +1913,8 @@ struct hci_cp_le_pa_create_sync {
+ 	__u8      sync_cte_type;
+ } __packed;
+ 
++#define HCI_OP_LE_PA_CREATE_SYNC_CANCEL	0x2045
++
+ #define HCI_OP_LE_PA_TERM_SYNC		0x2046
+ struct hci_cp_le_pa_term_sync {
+ 	__le16    handle;
+@@ -2812,7 +2814,7 @@ struct hci_evt_le_create_big_complete {
+ 	__le16  bis_handle[];
+ } __packed;
+ 
+-#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
++#define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d
+ struct hci_evt_le_big_sync_estabilished {
+ 	__u8    status;
+ 	__u8    handle;
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 4245910ffc4a2d..4f3b537476e106 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1105,10 +1105,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
+ 	return NULL;
+ }
+ 
+-static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
+-							__u8 sid,
+-							bdaddr_t *dst,
+-							__u8 dst_type)
++static inline struct hci_conn *
++hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
+ {
+ 	struct hci_conn_hash *h = &hdev->conn_hash;
+ 	struct hci_conn  *c;
+@@ -1116,8 +1114,10 @@ static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
+ 	rcu_read_lock();
+ 
+ 	list_for_each_entry_rcu(c, &h->list, list) {
+-		if (c->type != ISO_LINK  || bacmp(&c->dst, dst) ||
+-		    c->dst_type != dst_type || c->sid != sid)
++		if (c->type != ISO_LINK)
++			continue;
++
++		if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
+ 			continue;
+ 
+ 		rcu_read_unlock();
+@@ -1506,8 +1506,6 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
+ void hci_sco_setup(struct hci_conn *conn, __u8 status);
+ bool hci_iso_setup_path(struct hci_conn *conn);
+ int hci_le_create_cis_pending(struct hci_dev *hdev);
+-int hci_pa_create_sync_pending(struct hci_dev *hdev);
+-int hci_le_big_create_sync_pending(struct hci_dev *hdev);
+ int hci_conn_check_create_cis(struct hci_conn *conn);
+ 
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+@@ -1548,9 +1546,9 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 				 __u8 data_len, __u8 *data);
+ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ 		       __u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
+-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+-			   struct bt_iso_qos *qos,
+-			   __u16 sync_handle, __u8 num_bis, __u8 bis[]);
++int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
++			     struct bt_iso_qos *qos, __u16 sync_handle,
++			     __u8 num_bis, __u8 bis[]);
+ int hci_conn_check_link_mode(struct hci_conn *conn);
+ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
+ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index f3052cb252efdd..dbabc17b30cdfa 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -186,3 +186,6 @@ int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
+ int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
+ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ 			    struct hci_conn_params *params);
++
++int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn);
++int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn);
+diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
+index bfe625b55d55d7..df3f5f07bc7c2e 100644
+--- a/include/net/xdp_sock.h
++++ b/include/net/xdp_sock.h
+@@ -71,9 +71,6 @@ struct xdp_sock {
+ 	 */
+ 	u32 tx_budget_spent;
+ 
+-	/* Protects generic receive. */
+-	spinlock_t rx_lock;
+-
+ 	/* Statistics */
+ 	u64 rx_dropped;
+ 	u64 rx_queue_full;
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index bacb33f1e3e581..823fd5c7a3b188 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -55,6 +55,8 @@ struct xsk_buff_pool {
+ 	refcount_t users;
+ 	struct xdp_umem *umem;
+ 	struct work_struct work;
++	/* Protects generic receive in shared and non-shared umem mode. */
++	spinlock_t rx_lock;
+ 	struct list_head free_list;
+ 	struct list_head xskb_list;
+ 	u32 heads_cnt;
+diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
+index d099ae27f8491a..682499b871eac4 100644
+--- a/include/sound/ump_convert.h
++++ b/include/sound/ump_convert.h
+@@ -19,7 +19,7 @@ struct ump_cvt_to_ump_bank {
+ /* context for converting from MIDI1 byte stream to UMP packet */
+ struct ump_cvt_to_ump {
+ 	/* MIDI1 intermediate buffer */
+-	unsigned char buf[4];
++	unsigned char buf[6]; /* up to 6 bytes for SysEx */
+ 	int len;
+ 	int cmd_bytes;
+ 
+diff --git a/kernel/params.c b/kernel/params.c
+index 2e447f8ae183e7..33b2985b31c7fc 100644
+--- a/kernel/params.c
++++ b/kernel/params.c
+@@ -763,7 +763,7 @@ void destroy_params(const struct kernel_param *params, unsigned num)
+ 			params[i].ops->free(params[i].arg);
+ }
+ 
+-static struct module_kobject * __init locate_module_kobject(const char *name)
++struct module_kobject __modinit * lookup_or_create_module_kobject(const char *name)
+ {
+ 	struct module_kobject *mk;
+ 	struct kobject *kobj;
+@@ -805,7 +805,7 @@ static void __init kernel_add_sysfs_param(const char *name,
+ 	struct module_kobject *mk;
+ 	int err;
+ 
+-	mk = locate_module_kobject(name);
++	mk = lookup_or_create_module_kobject(name);
+ 	if (!mk)
+ 		return;
+ 
+@@ -876,7 +876,7 @@ static void __init version_sysfs_builtin(void)
+ 	int err;
+ 
+ 	for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
+-		mk = locate_module_kobject(vattr->module_name);
++		mk = lookup_or_create_module_kobject(vattr->module_name);
+ 		if (mk) {
+ 			err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
+ 			WARN_ON_ONCE(err);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ffe1422ab03f88..06104c2c66ab2a 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6703,13 +6703,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+ 		/* Copy the data into the page, so we can start over. */
+ 		ret = trace_seq_to_buffer(&iter->seq,
+ 					  page_address(spd.pages[i]),
+-					  trace_seq_used(&iter->seq));
++					  min((size_t)trace_seq_used(&iter->seq),
++						  PAGE_SIZE));
+ 		if (ret < 0) {
+ 			__free_page(spd.pages[i]);
+ 			break;
+ 		}
+ 		spd.partial[i].offset = 0;
+-		spd.partial[i].len = trace_seq_used(&iter->seq);
++		spd.partial[i].len = ret;
+ 
+ 		trace_seq_init(&iter->seq);
+ 	}
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 6e7090e8bf3097..1ecc28a3946a12 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -950,11 +950,12 @@ enum print_line_t print_event_fields(struct trace_iterator *iter,
+ 	struct trace_event_call *call;
+ 	struct list_head *head;
+ 
++	lockdep_assert_held_read(&trace_event_sem);
++
+ 	/* ftrace defined events have separate call structures */
+ 	if (event->type <= __TRACE_LAST_TYPE) {
+ 		bool found = false;
+ 
+-		down_read(&trace_event_sem);
+ 		list_for_each_entry(call, &ftrace_events, list) {
+ 			if (call->event.type == event->type) {
+ 				found = true;
+@@ -964,7 +965,6 @@ enum print_line_t print_event_fields(struct trace_iterator *iter,
+ 			if (call->event.type > __TRACE_LAST_TYPE)
+ 				break;
+ 		}
+-		up_read(&trace_event_sem);
+ 		if (!found) {
+ 			trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
+ 			goto out;
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 095c18b5c430da..cc5ee323245e3d 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -2160,11 +2160,14 @@ static void __init memmap_init_reserved_pages(void)
+ 	struct memblock_region *region;
+ 	phys_addr_t start, end;
+ 	int nid;
++	unsigned long max_reserved;
+ 
+ 	/*
+ 	 * set nid on all reserved pages and also treat struct
+ 	 * pages for the NOMAP regions as PageReserved
+ 	 */
++repeat:
++	max_reserved = memblock.reserved.max;
+ 	for_each_mem_region(region) {
+ 		nid = memblock_get_region_node(region);
+ 		start = region->base;
+@@ -2173,8 +2176,15 @@ static void __init memmap_init_reserved_pages(void)
+ 		if (memblock_is_nomap(region))
+ 			reserve_bootmem_region(start, end, nid);
+ 
+-		memblock_set_node(start, end, &memblock.reserved, nid);
++		memblock_set_node(start, region->size, &memblock.reserved, nid);
+ 	}
++	/*
++	 * 'max' is changed means memblock.reserved has been doubled its
++	 * array, which may result a new reserved region before current
++	 * 'start'. Now we should repeat the procedure to set its node id.
++	 */
++	if (max_reserved != memblock.reserved.max)
++		goto repeat;
+ 
+ 	/*
+ 	 * initialize struct pages for reserved regions that don't have
+diff --git a/mm/slub.c b/mm/slub.c
+index c26d9cd107ccbc..66f86e5328182d 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2035,18 +2035,6 @@ static inline void free_slab_obj_exts(struct slab *slab)
+ 	slab->obj_exts = 0;
+ }
+ 
+-static inline bool need_slab_obj_ext(void)
+-{
+-	if (mem_alloc_profiling_enabled())
+-		return true;
+-
+-	/*
+-	 * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
+-	 * inside memcg_slab_post_alloc_hook. No other users for now.
+-	 */
+-	return false;
+-}
+-
+ #else /* CONFIG_SLAB_OBJ_EXT */
+ 
+ static inline void init_slab_obj_exts(struct slab *slab)
+@@ -2063,11 +2051,6 @@ static inline void free_slab_obj_exts(struct slab *slab)
+ {
+ }
+ 
+-static inline bool need_slab_obj_ext(void)
+-{
+-	return false;
+-}
+-
+ #endif /* CONFIG_SLAB_OBJ_EXT */
+ 
+ #ifdef CONFIG_MEM_ALLOC_PROFILING
+@@ -2099,7 +2082,7 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
+ static inline void
+ alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
+ {
+-	if (need_slab_obj_ext()) {
++	if (mem_alloc_profiling_enabled()) {
+ 		struct slabobj_ext *obj_exts;
+ 
+ 		obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
+@@ -2577,8 +2560,12 @@ static __always_inline void account_slab(struct slab *slab, int order,
+ static __always_inline void unaccount_slab(struct slab *slab, int order,
+ 					   struct kmem_cache *s)
+ {
+-	if (memcg_kmem_online() || need_slab_obj_ext())
+-		free_slab_obj_exts(slab);
++	/*
++	 * The slab object extensions should now be freed regardless of
++	 * whether mem_alloc_profiling_enabled() or not because profiling
++	 * might have been disabled after slab->obj_exts got allocated.
++	 */
++	free_slab_obj_exts(slab);
+ 
+ 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
+ 			    -(PAGE_SIZE << order));
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index e6591f487a5119..ae66fa0a5fb584 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -2061,103 +2061,6 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
+ 	return hci_le_create_big(conn, &conn->iso_qos);
+ }
+ 
+-static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+-{
+-	bt_dev_dbg(hdev, "");
+-
+-	if (err)
+-		bt_dev_err(hdev, "Unable to create PA: %d", err);
+-}
+-
+-static bool hci_conn_check_create_pa_sync(struct hci_conn *conn)
+-{
+-	if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID)
+-		return false;
+-
+-	return true;
+-}
+-
+-static int create_pa_sync(struct hci_dev *hdev, void *data)
+-{
+-	struct hci_cp_le_pa_create_sync *cp = NULL;
+-	struct hci_conn *conn;
+-	int err = 0;
+-
+-	hci_dev_lock(hdev);
+-
+-	rcu_read_lock();
+-
+-	/* The spec allows only one pending LE Periodic Advertising Create
+-	 * Sync command at a time. If the command is pending now, don't do
+-	 * anything. We check for pending connections after each PA Sync
+-	 * Established event.
+-	 *
+-	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+-	 * page 2493:
+-	 *
+-	 * If the Host issues this command when another HCI_LE_Periodic_
+-	 * Advertising_Create_Sync command is pending, the Controller shall
+-	 * return the error code Command Disallowed (0x0C).
+-	 */
+-	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+-		if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags))
+-			goto unlock;
+-	}
+-
+-	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+-		if (hci_conn_check_create_pa_sync(conn)) {
+-			struct bt_iso_qos *qos = &conn->iso_qos;
+-
+-			cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+-			if (!cp) {
+-				err = -ENOMEM;
+-				goto unlock;
+-			}
+-
+-			cp->options = qos->bcast.options;
+-			cp->sid = conn->sid;
+-			cp->addr_type = conn->dst_type;
+-			bacpy(&cp->addr, &conn->dst);
+-			cp->skip = cpu_to_le16(qos->bcast.skip);
+-			cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
+-			cp->sync_cte_type = qos->bcast.sync_cte_type;
+-
+-			break;
+-		}
+-	}
+-
+-unlock:
+-	rcu_read_unlock();
+-
+-	hci_dev_unlock(hdev);
+-
+-	if (cp) {
+-		hci_dev_set_flag(hdev, HCI_PA_SYNC);
+-		set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+-
+-		err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
+-					    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
+-		if (!err)
+-			err = hci_update_passive_scan_sync(hdev);
+-
+-		kfree(cp);
+-
+-		if (err) {
+-			hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+-			clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+-		}
+-	}
+-
+-	return err;
+-}
+-
+-int hci_pa_create_sync_pending(struct hci_dev *hdev)
+-{
+-	/* Queue start pa_create_sync and scan */
+-	return hci_cmd_sync_queue(hdev, create_pa_sync,
+-				  NULL, create_pa_complete);
+-}
+-
+ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ 				    __u8 dst_type, __u8 sid,
+ 				    struct bt_iso_qos *qos)
+@@ -2172,97 +2075,18 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ 	conn->dst_type = dst_type;
+ 	conn->sid = sid;
+ 	conn->state = BT_LISTEN;
++	conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10);
+ 
+ 	hci_conn_hold(conn);
+ 
+-	hci_pa_create_sync_pending(hdev);
++	hci_connect_pa_sync(hdev, conn);
+ 
+ 	return conn;
+ }
+ 
+-static bool hci_conn_check_create_big_sync(struct hci_conn *conn)
+-{
+-	if (!conn->num_bis)
+-		return false;
+-
+-	return true;
+-}
+-
+-static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err)
+-{
+-	bt_dev_dbg(hdev, "");
+-
+-	if (err)
+-		bt_dev_err(hdev, "Unable to create BIG sync: %d", err);
+-}
+-
+-static int big_create_sync(struct hci_dev *hdev, void *data)
+-{
+-	DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
+-	struct hci_conn *conn;
+-
+-	rcu_read_lock();
+-
+-	pdu->num_bis = 0;
+-
+-	/* The spec allows only one pending LE BIG Create Sync command at
+-	 * a time. If the command is pending now, don't do anything. We
+-	 * check for pending connections after each BIG Sync Established
+-	 * event.
+-	 *
+-	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+-	 * page 2586:
+-	 *
+-	 * If the Host sends this command when the Controller is in the
+-	 * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
+-	 * Established event has not been generated, the Controller shall
+-	 * return the error code Command Disallowed (0x0C).
+-	 */
+-	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+-		if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags))
+-			goto unlock;
+-	}
+-
+-	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+-		if (hci_conn_check_create_big_sync(conn)) {
+-			struct bt_iso_qos *qos = &conn->iso_qos;
+-
+-			set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
+-
+-			pdu->handle = qos->bcast.big;
+-			pdu->sync_handle = cpu_to_le16(conn->sync_handle);
+-			pdu->encryption = qos->bcast.encryption;
+-			memcpy(pdu->bcode, qos->bcast.bcode,
+-			       sizeof(pdu->bcode));
+-			pdu->mse = qos->bcast.mse;
+-			pdu->timeout = cpu_to_le16(qos->bcast.timeout);
+-			pdu->num_bis = conn->num_bis;
+-			memcpy(pdu->bis, conn->bis, conn->num_bis);
+-
+-			break;
+-		}
+-	}
+-
+-unlock:
+-	rcu_read_unlock();
+-
+-	if (!pdu->num_bis)
+-		return 0;
+-
+-	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
+-			    struct_size(pdu, bis, pdu->num_bis), pdu);
+-}
+-
+-int hci_le_big_create_sync_pending(struct hci_dev *hdev)
+-{
+-	/* Queue big_create_sync */
+-	return hci_cmd_sync_queue_once(hdev, big_create_sync,
+-				       NULL, big_create_sync_complete);
+-}
+-
+-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+-			   struct bt_iso_qos *qos,
+-			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
++int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
++			     struct bt_iso_qos *qos, __u16 sync_handle,
++			     __u8 num_bis, __u8 bis[])
+ {
+ 	int err;
+ 
+@@ -2279,9 +2103,10 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ 
+ 		hcon->num_bis = num_bis;
+ 		memcpy(hcon->bis, bis, num_bis);
++		hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10);
+ 	}
+ 
+-	return hci_le_big_create_sync_pending(hdev);
++	return hci_connect_big_sync(hdev, hcon);
+ }
+ 
+ static void create_big_complete(struct hci_dev *hdev, void *data, int err)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 96ad1b75d1c4d4..bc5b42fce2b801 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6368,8 +6368,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ 
+ 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+ 
+-	conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr,
+-					ev->bdaddr_type);
++	conn = hci_conn_hash_lookup_create_pa_sync(hdev);
+ 	if (!conn) {
+ 		bt_dev_err(hdev,
+ 			   "Unable to find connection for dst %pMR sid 0x%2.2x",
+@@ -6408,9 +6407,6 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ 	}
+ 
+ unlock:
+-	/* Handle any other pending PA sync command */
+-	hci_pa_create_sync_pending(hdev);
+-
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -6922,7 +6918,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ 
+-	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
++	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
+ 				flex_array_size(ev, bis, ev->num_bis)))
+ 		return;
+ 
+@@ -6992,9 +6988,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 		}
+ 
+ unlock:
+-	/* Handle any other pending BIG sync command */
+-	hci_le_big_create_sync_pending(hdev);
+-
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -7116,8 +7109,8 @@ static const struct hci_le_ev {
+ 		     hci_le_create_big_complete_evt,
+ 		     sizeof(struct hci_evt_le_create_big_complete),
+ 		     HCI_MAX_EVENT_SIZE),
+-	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
+-	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
++	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
++	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
+ 		     hci_le_big_sync_established_evt,
+ 		     sizeof(struct hci_evt_le_big_sync_estabilished),
+ 		     HCI_MAX_EVENT_SIZE),
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index cb4d47ae129e8b..6597936fbd51b9 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2717,16 +2717,16 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+ 
+ 	/* Force address filtering if PA Sync is in progress */
+ 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
+-		struct hci_cp_le_pa_create_sync *sent;
++		struct hci_conn *conn;
+ 
+-		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
+-		if (sent) {
++		conn = hci_conn_hash_lookup_create_pa_sync(hdev);
++		if (conn) {
+ 			struct conn_params pa;
+ 
+ 			memset(&pa, 0, sizeof(pa));
+ 
+-			bacpy(&pa.addr, &sent->addr);
+-			pa.addr_type = sent->addr_type;
++			bacpy(&pa.addr, &conn->dst);
++			pa.addr_type = conn->dst_type;
+ 
+ 			/* Clear first since there could be addresses left
+ 			 * behind.
+@@ -6887,3 +6887,143 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
+ 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ }
++
++static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
++{
++	bt_dev_dbg(hdev, "err %d", err);
++
++	if (!err)
++		return;
++
++	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
++
++	if (err == -ECANCELED)
++		return;
++
++	hci_dev_lock(hdev);
++
++	hci_update_passive_scan_sync(hdev);
++
++	hci_dev_unlock(hdev);
++}
++
++static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
++{
++	struct hci_cp_le_pa_create_sync cp;
++	struct hci_conn *conn = data;
++	struct bt_iso_qos *qos = &conn->iso_qos;
++	int err;
++
++	if (!hci_conn_valid(hdev, conn))
++		return -ECANCELED;
++
++	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
++		return -EBUSY;
++
++	/* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can
++	 * program the address in the allow list so PA advertisements can be
++	 * received.
++	 */
++	set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
++
++	hci_update_passive_scan_sync(hdev);
++
++	memset(&cp, 0, sizeof(cp));
++	cp.options = qos->bcast.options;
++	cp.sid = conn->sid;
++	cp.addr_type = conn->dst_type;
++	bacpy(&cp.addr, &conn->dst);
++	cp.skip = cpu_to_le16(qos->bcast.skip);
++	cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
++	cp.sync_cte_type = qos->bcast.sync_cte_type;
++
++	/* The spec allows only one pending LE Periodic Advertising Create
++	 * Sync command at a time so we forcefully wait for PA Sync Established
++	 * event since cmd_work can only schedule one command at a time.
++	 *
++	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
++	 * page 2493:
++	 *
++	 * If the Host issues this command when another HCI_LE_Periodic_
++	 * Advertising_Create_Sync command is pending, the Controller shall
++	 * return the error code Command Disallowed (0x0C).
++	 */
++	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC,
++				       sizeof(cp), &cp,
++				       HCI_EV_LE_PA_SYNC_ESTABLISHED,
++				       conn->conn_timeout, NULL);
++	if (err == -ETIMEDOUT)
++		__hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
++				      0, NULL, HCI_CMD_TIMEOUT);
++
++	return err;
++}
++
++int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn)
++{
++	return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn,
++				       create_pa_complete);
++}
++
++static void create_big_complete(struct hci_dev *hdev, void *data, int err)
++{
++	struct hci_conn *conn = data;
++
++	bt_dev_dbg(hdev, "err %d", err);
++
++	if (err == -ECANCELED)
++		return;
++
++	if (hci_conn_valid(hdev, conn))
++		clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
++}
++
++static int hci_le_big_create_sync(struct hci_dev *hdev, void *data)
++{
++	DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11);
++	struct hci_conn *conn = data;
++	struct bt_iso_qos *qos = &conn->iso_qos;
++	int err;
++
++	if (!hci_conn_valid(hdev, conn))
++		return -ECANCELED;
++
++	set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
++
++	memset(cp, 0, sizeof(*cp));
++	cp->handle = qos->bcast.big;
++	cp->sync_handle = cpu_to_le16(conn->sync_handle);
++	cp->encryption = qos->bcast.encryption;
++	memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode));
++	cp->mse = qos->bcast.mse;
++	cp->timeout = cpu_to_le16(qos->bcast.timeout);
++	cp->num_bis = conn->num_bis;
++	memcpy(cp->bis, conn->bis, conn->num_bis);
++
++	/* The spec allows only one pending LE BIG Create Sync command at
++	 * a time, so we forcefully wait for BIG Sync Established event since
++	 * cmd_work can only schedule one command at a time.
++	 *
++	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
++	 * page 2586:
++	 *
++	 * If the Host sends this command when the Controller is in the
++	 * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
++	 * Established event has not been generated, the Controller shall
++	 * return the error code Command Disallowed (0x0C).
++	 */
++	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
++				       struct_size(cp, bis, cp->num_bis), cp,
++				       HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
++				       conn->conn_timeout, NULL);
++	if (err == -ETIMEDOUT)
++		hci_le_big_terminate_sync(hdev, cp->handle);
++
++	return err;
++}
++
++int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn)
++{
++	return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn,
++				       create_big_complete);
++}
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 644b606743e212..72bf9b1db22471 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1414,14 +1414,13 @@ static void iso_conn_big_sync(struct sock *sk)
+ 	lock_sock(sk);
+ 
+ 	if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
+-		err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
+-					     &iso_pi(sk)->qos,
+-					     iso_pi(sk)->sync_handle,
+-					     iso_pi(sk)->bc_num_bis,
+-					     iso_pi(sk)->bc_bis);
++		err = hci_conn_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
++					       &iso_pi(sk)->qos,
++					       iso_pi(sk)->sync_handle,
++					       iso_pi(sk)->bc_num_bis,
++					       iso_pi(sk)->bc_bis);
+ 		if (err)
+-			bt_dev_err(hdev, "hci_le_big_create_sync: %d",
+-				   err);
++			bt_dev_err(hdev, "hci_big_create_sync: %d", err);
+ 	}
+ 
+ 	release_sock(sk);
+@@ -1855,7 +1854,7 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 		if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags) ||
+ 		    test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
+ 			ev = hci_recv_event_data(hcon->hdev,
+-						 HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
++						 HCI_EVT_LE_BIG_SYNC_ESTABLISHED);
+ 
+ 			/* Get reference to PA sync parent socket, if it exists */
+ 			parent = iso_get_sock(&hcon->src, &hcon->dst,
+@@ -2047,12 +2046,11 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 
+ 			if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
+ 			    !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
+-				err = hci_le_big_create_sync(hdev,
+-							     hcon,
+-							     &iso_pi(sk)->qos,
+-							     iso_pi(sk)->sync_handle,
+-							     iso_pi(sk)->bc_num_bis,
+-							     iso_pi(sk)->bc_bis);
++				err = hci_conn_big_create_sync(hdev, hcon,
++							       &iso_pi(sk)->qos,
++							       iso_pi(sk)->sync_handle,
++							       iso_pi(sk)->bc_num_bis,
++							       iso_pi(sk)->bc_bis);
+ 				if (err) {
+ 					bt_dev_err(hdev, "hci_le_big_create_sync: %d",
+ 						   err);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a55388fbf07c84..c219a8c596d3e5 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -7380,6 +7380,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
+ 			return -ENOMEM;
+ 		/* Init rx_len */
+ 		conn->rx_len = len;
++
++		skb_set_delivery_time(conn->rx_skb, skb->tstamp,
++				      skb->tstamp_type);
+ 	}
+ 
+ 	/* Copy as much as the rx_skb can hold */
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 2dfac79dc78b8b..e04ebe651c3347 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -435,7 +435,7 @@ static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
+ 				       iif, sdif);
+ 	NAPI_GRO_CB(skb)->is_flist = !sk;
+ 	if (sk)
+-		sock_put(sk);
++		sock_gen_put(sk);
+ }
+ 
+ INDIRECT_CALLABLE_SCOPE
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index ecfca59f31f13e..da5d4aea1b5915 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -247,6 +247,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
+ 	return segs;
+ }
+ 
++static void __udpv6_gso_segment_csum(struct sk_buff *seg,
++				     struct in6_addr *oldip,
++				     const struct in6_addr *newip,
++				     __be16 *oldport, __be16 newport)
++{
++	struct udphdr *uh = udp_hdr(seg);
++
++	if (ipv6_addr_equal(oldip, newip) && *oldport == newport)
++		return;
++
++	if (uh->check) {
++		inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32,
++					  newip->s6_addr32, true);
++
++		inet_proto_csum_replace2(&uh->check, seg, *oldport, newport,
++					 false);
++		if (!uh->check)
++			uh->check = CSUM_MANGLED_0;
++	}
++
++	*oldip = *newip;
++	*oldport = newport;
++}
++
++static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs)
++{
++	const struct ipv6hdr *iph;
++	const struct udphdr *uh;
++	struct ipv6hdr *iph2;
++	struct sk_buff *seg;
++	struct udphdr *uh2;
++
++	seg = segs;
++	uh = udp_hdr(seg);
++	iph = ipv6_hdr(seg);
++	uh2 = udp_hdr(seg->next);
++	iph2 = ipv6_hdr(seg->next);
++
++	if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) &&
++	    ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
++	    ipv6_addr_equal(&iph->daddr, &iph2->daddr))
++		return segs;
++
++	while ((seg = seg->next)) {
++		uh2 = udp_hdr(seg);
++		iph2 = ipv6_hdr(seg);
++
++		__udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
++					 &uh2->source, uh->source);
++		__udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
++					 &uh2->dest, uh->dest);
++	}
++
++	return segs;
++}
++
+ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
+ 					      netdev_features_t features,
+ 					      bool is_ipv6)
+@@ -259,7 +315,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
+ 
+ 	udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
+ 
+-	return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
++	if (is_ipv6)
++		return __udpv6_gso_segment_list_csum(skb);
++	else
++		return __udpv4_gso_segment_list_csum(skb);
+ }
+ 
+ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
+index ae2da28f9dfb1c..5ab509a5fbdfcf 100644
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -42,7 +42,7 @@ static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
+ 					iif, sdif);
+ 	NAPI_GRO_CB(skb)->is_flist = !sk;
+ 	if (sk)
+-		sock_put(sk);
++		sock_gen_put(sk);
+ #endif /* IS_ENABLED(CONFIG_IPV6) */
+ }
+ 
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index c69b999fae171c..9b6d79bd873712 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -35,6 +35,11 @@ struct drr_sched {
+ 	struct Qdisc_class_hash		clhash;
+ };
+ 
++static bool cl_is_active(struct drr_class *cl)
++{
++	return !list_empty(&cl->alist);
++}
++
+ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+ {
+ 	struct drr_sched *q = qdisc_priv(sch);
+@@ -105,6 +110,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 		return -ENOBUFS;
+ 
+ 	gnet_stats_basic_sync_init(&cl->bstats);
++	INIT_LIST_HEAD(&cl->alist);
+ 	cl->common.classid = classid;
+ 	cl->quantum	   = quantum;
+ 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
+@@ -229,7 +235,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
+ {
+ 	struct drr_class *cl = (struct drr_class *)arg;
+ 
+-	list_del(&cl->alist);
++	list_del_init(&cl->alist);
+ }
+ 
+ static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
+@@ -336,7 +342,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	struct drr_sched *q = qdisc_priv(sch);
+ 	struct drr_class *cl;
+ 	int err = 0;
+-	bool first;
+ 
+ 	cl = drr_classify(skb, sch, &err);
+ 	if (cl == NULL) {
+@@ -346,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		return err;
+ 	}
+ 
+-	first = !cl->qdisc->q.qlen;
+ 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ 	if (unlikely(err != NET_XMIT_SUCCESS)) {
+ 		if (net_xmit_drop_count(err)) {
+@@ -356,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		return err;
+ 	}
+ 
+-	if (first) {
++	if (!cl_is_active(cl)) {
+ 		list_add_tail(&cl->alist, &q->active);
+ 		cl->deficit = cl->quantum;
+ 	}
+@@ -390,7 +394,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
+ 			if (unlikely(skb == NULL))
+ 				goto out;
+ 			if (cl->qdisc->q.qlen == 0)
+-				list_del(&cl->alist);
++				list_del_init(&cl->alist);
+ 
+ 			bstats_update(&cl->bstats, skb);
+ 			qdisc_bstats_update(sch, skb);
+@@ -431,7 +435,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
+ 	for (i = 0; i < q->clhash.hashsize; i++) {
+ 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
+ 			if (cl->qdisc->q.qlen)
+-				list_del(&cl->alist);
++				list_del_init(&cl->alist);
+ 			qdisc_reset(cl->qdisc);
+ 		}
+ 	}
+diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
+index 516038a4416380..2c069f0181c62b 100644
+--- a/net/sched/sch_ets.c
++++ b/net/sched/sch_ets.c
+@@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
+ 	[TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
+ };
+ 
++static bool cl_is_active(struct ets_class *cl)
++{
++	return !list_empty(&cl->alist);
++}
++
+ static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
+ 			     unsigned int *quantum,
+ 			     struct netlink_ext_ack *extack)
+@@ -293,7 +298,7 @@ static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
+ 	 * to remove them.
+ 	 */
+ 	if (!ets_class_is_strict(q, cl) && sch->q.qlen)
+-		list_del(&cl->alist);
++		list_del_init(&cl->alist);
+ }
+ 
+ static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
+@@ -416,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	struct ets_sched *q = qdisc_priv(sch);
+ 	struct ets_class *cl;
+ 	int err = 0;
+-	bool first;
+ 
+ 	cl = ets_classify(skb, sch, &err);
+ 	if (!cl) {
+@@ -426,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		return err;
+ 	}
+ 
+-	first = !cl->qdisc->q.qlen;
+ 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ 	if (unlikely(err != NET_XMIT_SUCCESS)) {
+ 		if (net_xmit_drop_count(err)) {
+@@ -436,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		return err;
+ 	}
+ 
+-	if (first && !ets_class_is_strict(q, cl)) {
++	if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) {
+ 		list_add_tail(&cl->alist, &q->active);
+ 		cl->deficit = cl->quantum;
+ 	}
+@@ -488,7 +491,7 @@ static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
+ 			if (unlikely(!skb))
+ 				goto out;
+ 			if (cl->qdisc->q.qlen == 0)
+-				list_del(&cl->alist);
++				list_del_init(&cl->alist);
+ 			return ets_qdisc_dequeue_skb(sch, skb);
+ 		}
+ 
+@@ -657,7 +660,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
+ 	}
+ 	for (i = q->nbands; i < oldbands; i++) {
+ 		if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
+-			list_del(&q->classes[i].alist);
++			list_del_init(&q->classes[i].alist);
+ 		qdisc_tree_flush_backlog(q->classes[i].qdisc);
+ 	}
+ 	WRITE_ONCE(q->nstrict, nstrict);
+@@ -713,7 +716,7 @@ static void ets_qdisc_reset(struct Qdisc *sch)
+ 
+ 	for (band = q->nstrict; band < q->nbands; band++) {
+ 		if (q->classes[band].qdisc->q.qlen)
+-			list_del(&q->classes[band].alist);
++			list_del_init(&q->classes[band].alist);
+ 	}
+ 	for (band = 0; band < q->nbands; band++)
+ 		qdisc_reset(q->classes[band].qdisc);
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 5bb4ab9941d6e9..cb8c525ea20eab 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -203,7 +203,10 @@ eltree_insert(struct hfsc_class *cl)
+ static inline void
+ eltree_remove(struct hfsc_class *cl)
+ {
+-	rb_erase(&cl->el_node, &cl->sched->eligible);
++	if (!RB_EMPTY_NODE(&cl->el_node)) {
++		rb_erase(&cl->el_node, &cl->sched->eligible);
++		RB_CLEAR_NODE(&cl->el_node);
++	}
+ }
+ 
+ static inline void
+@@ -1225,7 +1228,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
+ 	/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
+ 	 * needs to be called explicitly to remove a class from vttree.
+ 	 */
+-	update_vf(cl, 0, 0);
++	if (cl->cl_nactive)
++		update_vf(cl, 0, 0);
+ 	if (cl->cl_flags & HFSC_RSC)
+ 		eltree_remove(cl);
+ }
+@@ -1565,7 +1569,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ 		return err;
+ 	}
+ 
+-	if (first) {
++	if (first && !cl->cl_nactive) {
+ 		if (cl->cl_flags & HFSC_RSC)
+ 			init_ed(cl, len);
+ 		if (cl->cl_flags & HFSC_FSC)
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index ff3de37874e4b3..12cccc84d58a0e 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1485,6 +1485,8 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct htb_class *cl = (struct htb_class *)arg;
+ 
++	if (!cl->prio_activity)
++		return;
+ 	htb_deactivate(qdisc_priv(sch), cl);
+ }
+ 
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index d584c0c25899cc..aa4fbd2fae29eb 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -202,6 +202,11 @@ struct qfq_sched {
+  */
+ enum update_reason {enqueue, requeue};
+ 
++static bool cl_is_active(struct qfq_class *cl)
++{
++	return !list_empty(&cl->alist);
++}
++
+ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ {
+ 	struct qfq_sched *q = qdisc_priv(sch);
+@@ -347,7 +352,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
+ 	struct qfq_aggregate *agg = cl->agg;
+ 
+ 
+-	list_del(&cl->alist); /* remove from RR queue of the aggregate */
++	list_del_init(&cl->alist); /* remove from RR queue of the aggregate */
+ 	if (list_empty(&agg->active)) /* agg is now inactive */
+ 		qfq_deactivate_agg(q, agg);
+ }
+@@ -477,6 +482,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	gnet_stats_basic_sync_init(&cl->bstats);
+ 	cl->common.classid = classid;
+ 	cl->deficit = lmax;
++	INIT_LIST_HEAD(&cl->alist);
+ 
+ 	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ 				      classid, NULL);
+@@ -985,7 +991,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
+ 	cl->deficit -= (int) len;
+ 
+ 	if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
+-		list_del(&cl->alist);
++		list_del_init(&cl->alist);
+ 	else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
+ 		cl->deficit += agg->lmax;
+ 		list_move_tail(&cl->alist, &agg->active);
+@@ -1217,7 +1223,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	struct qfq_class *cl;
+ 	struct qfq_aggregate *agg;
+ 	int err = 0;
+-	bool first;
+ 
+ 	cl = qfq_classify(skb, sch, &err);
+ 	if (cl == NULL) {
+@@ -1239,7 +1244,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	}
+ 
+ 	gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+-	first = !cl->qdisc->q.qlen;
+ 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
+ 	if (unlikely(err != NET_XMIT_SUCCESS)) {
+ 		pr_debug("qfq_enqueue: enqueue failed %d\n", err);
+@@ -1255,8 +1259,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	++sch->q.qlen;
+ 
+ 	agg = cl->agg;
+-	/* if the queue was not empty, then done here */
+-	if (!first) {
++	/* if the class is active, then done here */
++	if (cl_is_active(cl)) {
+ 		if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
+ 		    list_first_entry(&agg->active, struct qfq_class, alist)
+ 		    == cl && cl->deficit < len)
+@@ -1418,6 +1422,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
+ 	struct qfq_sched *q = qdisc_priv(sch);
+ 	struct qfq_class *cl = (struct qfq_class *)arg;
+ 
++	if (list_empty(&cl->alist))
++		return;
+ 	qfq_deactivate_class(q, cl);
+ }
+ 
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index b57d5d2904eb46..f031b07baa57a9 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -338,13 +338,14 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+ 	u32 len = xdp_get_buff_len(xdp);
+ 	int err;
+ 
+-	spin_lock_bh(&xs->rx_lock);
+ 	err = xsk_rcv_check(xs, xdp, len);
+ 	if (!err) {
++		spin_lock_bh(&xs->pool->rx_lock);
+ 		err = __xsk_rcv(xs, xdp, len);
+ 		xsk_flush(xs);
++		spin_unlock_bh(&xs->pool->rx_lock);
+ 	}
+-	spin_unlock_bh(&xs->rx_lock);
++
+ 	return err;
+ }
+ 
+@@ -1720,7 +1721,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
+ 	xs = xdp_sk(sk);
+ 	xs->state = XSK_READY;
+ 	mutex_init(&xs->mutex);
+-	spin_lock_init(&xs->rx_lock);
+ 
+ 	INIT_LIST_HEAD(&xs->map_list);
+ 	spin_lock_init(&xs->map_list_lock);
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index 87e865b9b83af9..b69dbd8615fc47 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -87,6 +87,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ 	pool->addrs = umem->addrs;
+ 	pool->tx_metadata_len = umem->tx_metadata_len;
+ 	pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM;
++	spin_lock_init(&pool->rx_lock);
+ 	INIT_LIST_HEAD(&pool->free_list);
+ 	INIT_LIST_HEAD(&pool->xskb_list);
+ 	INIT_LIST_HEAD(&pool->xsk_tx_list);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4171aa22747c33..db72c5fce9d183 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -440,6 +440,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 		alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+ 		fallthrough;
+ 	case 0x10ec0215:
++	case 0x10ec0236:
++	case 0x10ec0245:
++	case 0x10ec0256:
++	case 0x10ec0257:
+ 	case 0x10ec0285:
+ 	case 0x10ec0289:
+ 		alc_update_coef_idx(codec, 0x36, 1<<13, 0);
+@@ -447,12 +451,8 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 	case 0x10ec0230:
+ 	case 0x10ec0233:
+ 	case 0x10ec0235:
+-	case 0x10ec0236:
+-	case 0x10ec0245:
+ 	case 0x10ec0255:
+-	case 0x10ec0256:
+ 	case 0x19e58326:
+-	case 0x10ec0257:
+ 	case 0x10ec0282:
+ 	case 0x10ec0283:
+ 	case 0x10ec0286:
+@@ -10687,8 +10687,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8caf, "HP Elite mt645 G8 Mobile Thin Client", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8cbd, "HP Pavilion Aero Laptop 13-bg0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+-	SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x103c, 0x8cde, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX),
++	SND_PCI_QUIRK(0x103c, 0x8cde, "HP OmniBook Ultra Flip Laptop 14t", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX),
+ 	SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+@@ -10723,10 +10723,10 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
+ 	SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
+-	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
++	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
++	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -10780,7 +10780,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+-	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
++	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1ccf, "ASUS G814JU/JV/JI", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1cdf, "ASUS G814JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1cef, "ASUS G834JY/JZ/JI/JG", ALC285_FIXUP_ASUS_HEADSET_MIC),
+diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
+index 92c5ff0deea2cd..607a3eaeb6da8a 100644
+--- a/sound/soc/amd/acp/acp-i2s.c
++++ b/sound/soc/amd/acp/acp-i2s.c
+@@ -101,7 +101,7 @@ static int acp_i2s_set_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, u32 rx_mas
+ 	struct acp_stream *stream;
+ 	int slot_len, no_of_slots;
+ 
+-	chip = dev_get_platdata(dev);
++	chip = dev_get_drvdata(dev->parent);
+ 	switch (slot_width) {
+ 	case SLOT_WIDTH_8:
+ 		slot_len = 8;
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 0d9d1d250f2b5e..6a72561c4189b5 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -746,10 +746,9 @@ config SND_SOC_CS_AMP_LIB
+ 	tristate
+ 
+ config SND_SOC_CS_AMP_LIB_TEST
+-	tristate "KUnit test for Cirrus Logic cs-amp-lib"
+-	depends on KUNIT
++	tristate "KUnit test for Cirrus Logic cs-amp-lib" if !KUNIT_ALL_TESTS
++	depends on SND_SOC_CS_AMP_LIB && KUNIT
+ 	default KUNIT_ALL_TESTS
+-	select SND_SOC_CS_AMP_LIB
+ 	help
+ 	  This builds KUnit tests for the Cirrus Logic common
+ 	  amplifier library.
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index 975ffd2cad292c..809dbb9ded3658 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -1150,9 +1150,9 @@ void graph_util_parse_link_direction(struct device_node *np,
+ 	bool is_playback_only = of_property_read_bool(np, "playback-only");
+ 	bool is_capture_only  = of_property_read_bool(np, "capture-only");
+ 
+-	if (is_playback_only)
++	if (playback_only)
+ 		*playback_only = is_playback_only;
+-	if (is_capture_only)
++	if (capture_only)
+ 		*capture_only = is_capture_only;
+ }
+ EXPORT_SYMBOL_GPL(graph_util_parse_link_direction);
+diff --git a/sound/soc/sdw_utils/soc_sdw_rt_dmic.c b/sound/soc/sdw_utils/soc_sdw_rt_dmic.c
+index 7f24806d809d9d..74bca3d04e4f1e 100644
+--- a/sound/soc/sdw_utils/soc_sdw_rt_dmic.c
++++ b/sound/soc/sdw_utils/soc_sdw_rt_dmic.c
+@@ -29,6 +29,8 @@ int asoc_sdw_rt_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_da
+ 		mic_name = devm_kasprintf(card->dev, GFP_KERNEL, "rt715-sdca");
+ 	else
+ 		mic_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s", component->name_prefix);
++	if (!mic_name)
++		return -ENOMEM;
+ 
+ 	card->components = devm_kasprintf(card->dev, GFP_KERNEL,
+ 					  "%s mic:%s", card->components,
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 20248a29d1674a..e3c8d4f20b9c13 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -3057,7 +3057,7 @@ int snd_soc_of_parse_pin_switches(struct snd_soc_card *card, const char *prop)
+ 	unsigned int i, nb_controls;
+ 	int ret;
+ 
+-	if (!of_property_read_bool(dev->of_node, prop))
++	if (!of_property_present(dev->of_node, prop))
+ 		return 0;
+ 
+ 	strings = devm_kcalloc(dev, nb_controls_max,
+@@ -3131,23 +3131,17 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
+ 	if (rx_mask)
+ 		snd_soc_of_get_slot_mask(np, "dai-tdm-slot-rx-mask", rx_mask);
+ 
+-	if (of_property_read_bool(np, "dai-tdm-slot-num")) {
+-		ret = of_property_read_u32(np, "dai-tdm-slot-num", &val);
+-		if (ret)
+-			return ret;
+-
+-		if (slots)
+-			*slots = val;
+-	}
+-
+-	if (of_property_read_bool(np, "dai-tdm-slot-width")) {
+-		ret = of_property_read_u32(np, "dai-tdm-slot-width", &val);
+-		if (ret)
+-			return ret;
++	ret = of_property_read_u32(np, "dai-tdm-slot-num", &val);
++	if (ret && ret != -EINVAL)
++		return ret;
++	if (!ret && slots)
++		*slots = val;
+ 
+-		if (slot_width)
+-			*slot_width = val;
+-	}
++	ret = of_property_read_u32(np, "dai-tdm-slot-width", &val);
++	if (ret && ret != -EINVAL)
++		return ret;
++	if (!ret && slot_width)
++		*slot_width = val;
+ 
+ 	return 0;
+ }
+@@ -3411,12 +3405,12 @@ unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
+ 	 * check "[prefix]frame-master"
+ 	 */
+ 	snprintf(prop, sizeof(prop), "%sbitclock-master", prefix);
+-	bit = of_property_read_bool(np, prop);
++	bit = of_property_present(np, prop);
+ 	if (bit && bitclkmaster)
+ 		*bitclkmaster = of_parse_phandle(np, prop, 0);
+ 
+ 	snprintf(prop, sizeof(prop), "%sframe-master", prefix);
+-	frame = of_property_read_bool(np, prop);
++	frame = of_property_present(np, prop);
+ 	if (frame && framemaster)
+ 		*framemaster = of_parse_phandle(np, prop, 0);
+ 
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 1102599403c534..0e21ff9f7b74eb 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1633,10 +1633,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
+ 		/*
+ 		 * Filter for systems with 'component_chaining' enabled.
+ 		 * This helps to avoid unnecessary re-configuration of an
+-		 * already active BE on such systems.
++		 * already active BE on such systems and ensures the BE DAI
++		 * widget is powered ON after hw_params() BE DAI callback.
+ 		 */
+ 		if (fe->card->component_chaining &&
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) &&
++		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
++		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
+ 			continue;
+ 
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index a29f28eb7d0c64..f36ec98da4601d 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -926,6 +926,8 @@ static int endpoint_set_interface(struct snd_usb_audio *chip,
+ {
+ 	int altset = set ? ep->altsetting : 0;
+ 	int err;
++	int retries = 0;
++	const int max_retries = 5;
+ 
+ 	if (ep->iface_ref->altset == altset)
+ 		return 0;
+@@ -935,8 +937,13 @@ static int endpoint_set_interface(struct snd_usb_audio *chip,
+ 
+ 	usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n",
+ 		      ep->iface, altset, ep->ep_num);
++retry:
+ 	err = usb_set_interface(chip->dev, ep->iface, altset);
+ 	if (err < 0) {
++		if (err == -EPROTO && ++retries <= max_retries) {
++			msleep(5 * (1 << (retries - 1)));
++			goto retry;
++		}
+ 		usb_audio_err_ratelimited(
+ 			chip, "%d:%d: usb_set_interface failed (%d)\n",
+ 			ep->iface, altset, err);
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 6049d957694ca6..a9283b2bd2f4e5 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -260,7 +260,8 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
+ 	}
+ 
+ 	/* Jabra Evolve 65 headset */
+-	if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) {
++	if (chip->usb_id == USB_ID(0x0b0e, 0x030b) ||
++	    chip->usb_id == USB_ID(0x0b0e, 0x030c)) {
+ 		/* only 48kHz for playback while keeping 16kHz for capture */
+ 		if (fp->nr_rates != 1)
+ 			return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-05-05 11:31 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-05-05 11:31 UTC (permalink / raw
  To: gentoo-commits

commit:     e01303c9fa1284e6cc3792509af1900e248b803b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon May  5 11:31:22 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon May  5 11:31:22 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e01303c9

Linux patch 6.12.27

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |  4 ++++
 1026_linux-6.12.27.patch | 24 ++++++++++++++++++++++++
 2 files changed, 28 insertions(+)

diff --git a/0000_README b/0000_README
index 0531656a..acc6bba1 100644
--- a/0000_README
+++ b/0000_README
@@ -147,6 +147,10 @@ Patch:  1025_linux-6.12.26.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.26
 
+Patch:  1026_linux-6.12.27.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.27
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1026_linux-6.12.27.patch b/1026_linux-6.12.27.patch
new file mode 100644
index 00000000..0be0fe9b
--- /dev/null
+++ b/1026_linux-6.12.27.patch
@@ -0,0 +1,24 @@
+diff --git a/Makefile b/Makefile
+index 467d820fa23faf..77f5d180902cd6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 26
++SUBLEVEL = 27
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
+index 56a81df7a9d7c1..fdad0eb308fe60 100644
+--- a/kernel/bpf/preload/bpf_preload_kern.c
++++ b/kernel/bpf/preload/bpf_preload_kern.c
+@@ -89,5 +89,5 @@ static void __exit fini(void)
+ }
+ late_initcall(load);
+ module_exit(fini);
+-MODULE_IMPORT_NS("BPF_INTERNAL");
++MODULE_IMPORT_NS(BPF_INTERNAL);
+ MODULE_LICENSE("GPL");


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-05-02 11:35 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-05-02 11:35 UTC (permalink / raw
  To: gentoo-commits

commit:     f3f9e12c0aad5499dad4b462f79cfc3b2b7ddf63
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri May  2 11:35:05 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri May  2 11:35:05 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f3f9e12c

Linux patch 6.12.26

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1025_linux-6.12.26.patch | 19905 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 19909 insertions(+)

diff --git a/0000_README b/0000_README
index f5e1ddec..0531656a 100644
--- a/0000_README
+++ b/0000_README
@@ -143,6 +143,10 @@ Patch:  1024_linux-6.12.25.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.25
 
+Patch:  1025_linux-6.12.26.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.26
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1025_linux-6.12.26.patch b/1025_linux-6.12.26.patch
new file mode 100644
index 00000000..6e2023c3
--- /dev/null
+++ b/1025_linux-6.12.26.patch
@@ -0,0 +1,19905 @@
+diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst
+index de27e1620821c4..0acb4c9b8d90f3 100644
+--- a/Documentation/bpf/bpf_devel_QA.rst
++++ b/Documentation/bpf/bpf_devel_QA.rst
+@@ -382,6 +382,14 @@ In case of new BPF instructions, once the changes have been accepted
+ into the Linux kernel, please implement support into LLVM's BPF back
+ end. See LLVM_ section below for further information.
+ 
++Q: What "BPF_INTERNAL" symbol namespace is for?
++-----------------------------------------------
++A: Symbols exported as BPF_INTERNAL can only be used by BPF infrastructure
++like preload kernel modules with light skeleton. Most symbols outside
++of BPF_INTERNAL are not expected to be used by code outside of BPF either.
++Symbols may lack the designation because they predate the namespaces,
++or due to an oversight.
++
+ Stable submission
+ =================
+ 
+diff --git a/Makefile b/Makefile
+index 93f4ba25a45336..467d820fa23faf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
+index 5ff49a5e9afc9d..f87e63b2212eba 100644
+--- a/arch/arm/crypto/Kconfig
++++ b/arch/arm/crypto/Kconfig
+@@ -3,10 +3,12 @@
+ menu "Accelerated Cryptographic Algorithms for CPU (arm)"
+ 
+ config CRYPTO_CURVE25519_NEON
+-	tristate "Public key crypto: Curve25519 (NEON)"
++	tristate
+ 	depends on KERNEL_MODE_NEON
++	select CRYPTO_KPP
+ 	select CRYPTO_LIB_CURVE25519_GENERIC
+ 	select CRYPTO_ARCH_HAVE_LIB_CURVE25519
++	default CRYPTO_LIB_CURVE25519_INTERNAL
+ 	help
+ 	  Curve25519 algorithm
+ 
+@@ -45,9 +47,10 @@ config CRYPTO_NHPOLY1305_NEON
+ 	  - NEON (Advanced SIMD) extensions
+ 
+ config CRYPTO_POLY1305_ARM
+-	tristate "Hash functions: Poly1305 (NEON)"
++	tristate
+ 	select CRYPTO_HASH
+ 	select CRYPTO_ARCH_HAVE_LIB_POLY1305
++	default CRYPTO_LIB_POLY1305_INTERNAL
+ 	help
+ 	  Poly1305 authenticator algorithm (RFC7539)
+ 
+@@ -212,9 +215,10 @@ config CRYPTO_AES_ARM_CE
+ 	  - ARMv8 Crypto Extensions
+ 
+ config CRYPTO_CHACHA20_NEON
+-	tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (NEON)"
++	tristate
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_ARCH_HAVE_LIB_CHACHA
++	default CRYPTO_LIB_CHACHA_INTERNAL
+ 	help
+ 	  Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ 	  stream cipher algorithms
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-common.dtsi
+new file mode 100644
+index 00000000000000..1dceff119a4707
+--- /dev/null
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-common.dtsi
+@@ -0,0 +1,148 @@
++// SPDX-License-Identifier: GPL-2.0-only OR MIT
++/*
++ * Device Tree Source for J784S4 and J742S2 SoC Family
++ *
++ * TRM (j784s4) (SPRUJ43 JULY 2022): https://www.ti.com/lit/zip/spruj52
++ * TRM (j742s2): https://www.ti.com/lit/pdf/spruje3
++ *
++ * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
++ *
++ */
++
++#include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++#include <dt-bindings/soc/ti,sci_pm_domain.h>
++
++#include "k3-pinctrl.h"
++
++/ {
++	interrupt-parent = <&gic500>;
++	#address-cells = <2>;
++	#size-cells = <2>;
++
++	L2_0: l2-cache0 {
++		compatible = "cache";
++		cache-level = <2>;
++		cache-unified;
++		cache-size = <0x200000>;
++		cache-line-size = <64>;
++		cache-sets = <1024>;
++		next-level-cache = <&msmc_l3>;
++	};
++
++	L2_1: l2-cache1 {
++		compatible = "cache";
++		cache-level = <2>;
++		cache-unified;
++		cache-size = <0x200000>;
++		cache-line-size = <64>;
++		cache-sets = <1024>;
++		next-level-cache = <&msmc_l3>;
++	};
++
++	msmc_l3: l3-cache0 {
++		compatible = "cache";
++		cache-level = <3>;
++		cache-unified;
++	};
++
++	firmware {
++		optee {
++			compatible = "linaro,optee-tz";
++			method = "smc";
++		};
++
++		psci: psci {
++			compatible = "arm,psci-1.0";
++			method = "smc";
++		};
++	};
++
++	a72_timer0: timer-cl0-cpu0 {
++		compatible = "arm,armv8-timer";
++		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* cntpsirq */
++			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* cntpnsirq */
++			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* cntvirq */
++			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* cnthpirq */
++	};
++
++	pmu: pmu {
++		compatible = "arm,cortex-a72-pmu";
++		/* Recommendation from GIC500 TRM Table A.3 */
++		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
++	};
++
++	cbass_main: bus@100000 {
++		bootph-all;
++		compatible = "simple-bus";
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */
++			 <0x00 0x00600000 0x00 0x00600000 0x00 0x00031100>, /* GPIO */
++			 <0x00 0x00700000 0x00 0x00700000 0x00 0x00001000>, /* ESM */
++			 <0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */
++			 <0x00 0x04210000 0x00 0x04210000 0x00 0x00010000>, /* VPU0 */
++			 <0x00 0x04220000 0x00 0x04220000 0x00 0x00010000>, /* VPU1 */
++			 <0x00 0x0d000000 0x00 0x0d000000 0x00 0x00800000>, /* PCIe0 Core*/
++			 <0x00 0x0d800000 0x00 0x0d800000 0x00 0x00800000>, /* PCIe1 Core*/
++			 <0x00 0x0e000000 0x00 0x0e000000 0x00 0x00800000>, /* PCIe2 Core*/
++			 <0x00 0x0e800000 0x00 0x0e800000 0x00 0x00800000>, /* PCIe3 Core*/
++			 <0x00 0x10000000 0x00 0x10000000 0x00 0x08000000>, /* PCIe0 DAT0 */
++			 <0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */
++			 <0x00 0x64800000 0x00 0x64800000 0x00 0x0070c000>, /* C71_1 */
++			 <0x00 0x65800000 0x00 0x65800000 0x00 0x0070c000>, /* C71_2 */
++			 <0x00 0x66800000 0x00 0x66800000 0x00 0x0070c000>, /* C71_3 */
++			 <0x00 0x67800000 0x00 0x67800000 0x00 0x0070c000>, /* C71_4 */
++			 <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
++			 <0x00 0x70000000 0x00 0x70000000 0x00 0x00400000>, /* MSMC RAM */
++			 <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */
++			 <0x40 0x00000000 0x40 0x00000000 0x01 0x00000000>, /* PCIe0 DAT1 */
++			 <0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */
++			 <0x42 0x00000000 0x42 0x00000000 0x01 0x00000000>, /* PCIe2 DAT1 */
++			 <0x43 0x00000000 0x43 0x00000000 0x01 0x00000000>, /* PCIe3 DAT1 */
++			 <0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT0 */
++			 <0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT0 */
++			 <0x4e 0x20000000 0x4e 0x20000000 0x00 0x00080000>, /* GPU */
++
++			 /* MCUSS_WKUP Range */
++			 <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>,
++			 <0x00 0x40200000 0x00 0x40200000 0x00 0x00998400>,
++			 <0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>,
++			 <0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>,
++			 <0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>,
++			 <0x00 0x41c00000 0x00 0x41c00000 0x00 0x00100000>,
++			 <0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>,
++			 <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>,
++			 <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
++			 <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
++			 <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
++			 <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
++
++		cbass_mcu_wakeup: bus@28380000 {
++			bootph-all;
++			compatible = "simple-bus";
++			#address-cells = <2>;
++			#size-cells = <2>;
++			ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>, /* MCU NAVSS*/
++				 <0x00 0x40200000 0x00 0x40200000 0x00 0x00998400>, /* First peripheral window */
++				 <0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>, /* CTRL_MMR0 */
++				 <0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>, /* MCU R5F Core0 */
++				 <0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>, /* MCU R5F Core1 */
++				 <0x00 0x41c00000 0x00 0x41c00000 0x00 0x00100000>, /* MCU SRAM */
++				 <0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>, /* WKUP peripheral window */
++				 <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
++				 <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
++				 <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */
++				 <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
++				 <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
++		};
++	};
++
++	thermal_zones: thermal-zones {
++		#include "k3-j784s4-j742s2-thermal-common.dtsi"
++	};
++};
++
++/* Now include peripherals from each bus segment */
++#include "k3-j784s4-j742s2-main-common.dtsi"
++#include "k3-j784s4-j742s2-mcu-wakeup-common.dtsi"
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
+new file mode 100644
+index 00000000000000..2bf4547485e1b3
+--- /dev/null
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
+@@ -0,0 +1,2673 @@
++// SPDX-License-Identifier: GPL-2.0-only OR MIT
++/*
++ * Device Tree Source for J784S4 and J742S2 SoC Family Main Domain peripherals
++ *
++ * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
++ */
++
++#include <dt-bindings/mux/mux.h>
++#include <dt-bindings/phy/phy.h>
++#include <dt-bindings/phy/phy-ti.h>
++
++#include "k3-serdes.h"
++
++/ {
++	serdes_refclk: clock-serdes {
++		#clock-cells = <0>;
++		compatible = "fixed-clock";
++		/* To be enabled when serdes_wiz* is functional */
++		status = "disabled";
++	};
++};
++
++&cbass_main {
++	/*
++	 * MSMC is configured by bootloaders and a runtime fixup is done in the
++	 * DT for this node
++	 */
++	msmc_ram: sram@70000000 {
++		compatible = "mmio-sram";
++		reg = <0x00 0x70000000 0x00 0x800000>;
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0x00 0x00 0x70000000 0x800000>;
++
++		atf-sram@0 {
++			reg = <0x00 0x20000>;
++		};
++
++		tifs-sram@1f0000 {
++			reg = <0x1f0000 0x10000>;
++		};
++
++		l3cache-sram@200000 {
++			reg = <0x200000 0x200000>;
++		};
++	};
++
++	scm_conf: bus@100000 {
++		compatible = "simple-bus";
++		reg = <0x00 0x00100000 0x00 0x1c000>;
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0x00 0x00 0x00100000 0x1c000>;
++
++		cpsw1_phy_gmii_sel: phy@4034 {
++			compatible = "ti,am654-phy-gmii-sel";
++			reg = <0x4034 0x4>;
++			#phy-cells = <1>;
++		};
++
++		cpsw0_phy_gmii_sel: phy@4044 {
++			compatible = "ti,j784s4-cpsw9g-phy-gmii-sel";
++			reg = <0x4044 0x20>;
++			#phy-cells = <1>;
++			ti,qsgmii-main-ports = <7>, <7>;
++		};
++
++		pcie0_ctrl: pcie0-ctrl@4070 {
++			compatible = "ti,j784s4-pcie-ctrl", "syscon";
++			reg = <0x4070 0x4>;
++		};
++
++		pcie1_ctrl: pcie1-ctrl@4074 {
++			compatible = "ti,j784s4-pcie-ctrl", "syscon";
++			reg = <0x4074 0x4>;
++		};
++
++		serdes_ln_ctrl: mux-controller@4080 {
++			compatible = "reg-mux";
++			reg = <0x00004080 0x30>;
++			#mux-control-cells = <1>;
++			mux-reg-masks = <0x0 0x3>, <0x4 0x3>, /* SERDES0 lane0/1 select */
++					<0x8 0x3>, <0xc 0x3>, /* SERDES0 lane2/3 select */
++					<0x10 0x3>, <0x14 0x3>, /* SERDES1 lane0/1 select */
++					<0x18 0x3>, <0x1c 0x3>, /* SERDES1 lane2/3 select */
++					<0x20 0x3>, <0x24 0x3>, /* SERDES2 lane0/1 select */
++					<0x28 0x3>, <0x2c 0x3>, /* SERDES2 lane2/3 select */
++					<0x40 0x3>, <0x44 0x3>, /* SERDES4 lane0/1 select */
++					<0x48 0x3>, <0x4c 0x3>; /* SERDES4 lane2/3 select */
++			idle-states = <J784S4_SERDES0_LANE0_PCIE1_LANE0>,
++				      <J784S4_SERDES0_LANE1_PCIE1_LANE1>,
++				      <J784S4_SERDES0_LANE2_IP3_UNUSED>,
++				      <J784S4_SERDES0_LANE3_USB>,
++				      <J784S4_SERDES1_LANE0_PCIE0_LANE0>,
++				      <J784S4_SERDES1_LANE1_PCIE0_LANE1>,
++				      <J784S4_SERDES1_LANE2_PCIE0_LANE2>,
++				      <J784S4_SERDES1_LANE3_PCIE0_LANE3>,
++				      <J784S4_SERDES2_LANE0_IP2_UNUSED>,
++				      <J784S4_SERDES2_LANE1_IP2_UNUSED>,
++				      <J784S4_SERDES2_LANE2_QSGMII_LANE1>,
++				      <J784S4_SERDES2_LANE3_QSGMII_LANE2>,
++				      <J784S4_SERDES4_LANE0_EDP_LANE0>,
++				      <J784S4_SERDES4_LANE1_EDP_LANE1>,
++				      <J784S4_SERDES4_LANE2_EDP_LANE2>,
++				      <J784S4_SERDES4_LANE3_EDP_LANE3>;
++		};
++
++		usb_serdes_mux: mux-controller@4000 {
++			compatible = "reg-mux";
++			reg = <0x4000 0x4>;
++			#mux-control-cells = <1>;
++			mux-reg-masks = <0x0 0x8000000>; /* USB0 to SERDES0 lane 3 mux */
++		};
++
++		ehrpwm_tbclk: clock-controller@4140 {
++			compatible = "ti,am654-ehrpwm-tbclk";
++			reg = <0x4140 0x18>;
++			#clock-cells = <1>;
++		};
++
++		audio_refclk1: clock@82e4 {
++			compatible = "ti,am62-audio-refclk";
++			reg = <0x82e4 0x4>;
++			clocks = <&k3_clks 157 34>;
++			assigned-clocks = <&k3_clks 157 34>;
++			assigned-clock-parents = <&k3_clks 157 63>;
++			#clock-cells = <0>;
++		};
++	};
++
++	main_ehrpwm0: pwm@3000000 {
++		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
++		reg = <0x00 0x3000000 0x00 0x100>;
++		clocks = <&ehrpwm_tbclk 0>, <&k3_clks 219 0>;
++		clock-names = "tbclk", "fck";
++		power-domains = <&k3_pds 219 TI_SCI_PD_EXCLUSIVE>;
++		#pwm-cells = <3>;
++		status = "disabled";
++	};
++
++	main_ehrpwm1: pwm@3010000 {
++		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
++		reg = <0x00 0x3010000 0x00 0x100>;
++		clocks = <&ehrpwm_tbclk 1>, <&k3_clks 220 0>;
++		clock-names = "tbclk", "fck";
++		power-domains = <&k3_pds 220 TI_SCI_PD_EXCLUSIVE>;
++		#pwm-cells = <3>;
++		status = "disabled";
++	};
++
++	main_ehrpwm2: pwm@3020000 {
++		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
++		reg = <0x00 0x3020000 0x00 0x100>;
++		clocks = <&ehrpwm_tbclk 2>, <&k3_clks 221 0>;
++		clock-names = "tbclk", "fck";
++		power-domains = <&k3_pds 221 TI_SCI_PD_EXCLUSIVE>;
++		#pwm-cells = <3>;
++		status = "disabled";
++	};
++
++	main_ehrpwm3: pwm@3030000 {
++		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
++		reg = <0x00 0x3030000 0x00 0x100>;
++		clocks = <&ehrpwm_tbclk 3>, <&k3_clks 222 0>;
++		clock-names = "tbclk", "fck";
++		power-domains = <&k3_pds 222 TI_SCI_PD_EXCLUSIVE>;
++		#pwm-cells = <3>;
++		status = "disabled";
++	};
++
++	main_ehrpwm4: pwm@3040000 {
++		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
++		reg = <0x00 0x3040000 0x00 0x100>;
++		clocks = <&ehrpwm_tbclk 4>, <&k3_clks 223 0>;
++		clock-names = "tbclk", "fck";
++		power-domains = <&k3_pds 223 TI_SCI_PD_EXCLUSIVE>;
++		#pwm-cells = <3>;
++		status = "disabled";
++	};
++
++	main_ehrpwm5: pwm@3050000 {
++		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
++		reg = <0x00 0x3050000 0x00 0x100>;
++		clocks = <&ehrpwm_tbclk 5>, <&k3_clks 224 0>;
++		clock-names = "tbclk", "fck";
++		power-domains = <&k3_pds 224 TI_SCI_PD_EXCLUSIVE>;
++		#pwm-cells = <3>;
++		status = "disabled";
++	};
++
++	gic500: interrupt-controller@1800000 {
++		compatible = "arm,gic-v3";
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges;
++		#interrupt-cells = <3>;
++		interrupt-controller;
++		reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
++		      <0x00 0x01900000 0x00 0x100000>, /* GICR */
++		      <0x00 0x6f000000 0x00 0x2000>,   /* GICC */
++		      <0x00 0x6f010000 0x00 0x1000>,   /* GICH */
++		      <0x00 0x6f020000 0x00 0x2000>;   /* GICV */
++
++		/* vcpumntirq: virtual CPU interface maintenance interrupt */
++		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
++
++		gic_its: msi-controller@1820000 {
++			compatible = "arm,gic-v3-its";
++			reg = <0x00 0x01820000 0x00 0x10000>;
++			socionext,synquacer-pre-its = <0x1000000 0x400000>;
++			msi-controller;
++			#msi-cells = <1>;
++		};
++	};
++
++	main_gpio_intr: interrupt-controller@a00000 {
++		compatible = "ti,sci-intr";
++		reg = <0x00 0x00a00000 0x00 0x800>;
++		ti,intr-trigger-type = <1>;
++		interrupt-controller;
++		interrupt-parent = <&gic500>;
++		#interrupt-cells = <1>;
++		ti,sci = <&sms>;
++		ti,sci-dev-id = <10>;
++		ti,interrupt-ranges = <8 392 56>;
++	};
++
++	main_pmx0: pinctrl@11c000 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x11c000 0x00 0x120>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	/* TIMERIO pad input CTRLMMR_TIMER*_CTRL registers */
++	main_timerio_input: pinctrl@104200 {
++		compatible = "pinctrl-single";
++		reg = <0x00 0x104200 0x00 0x50>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0x00000007>;
++	};
++
++	/* TIMERIO pad output CTCTRLMMR_TIMERIO*_CTRL registers */
++	main_timerio_output: pinctrl@104280 {
++		compatible = "pinctrl-single";
++		reg = <0x00 0x104280 0x00 0x20>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0x0000001f>;
++	};
++
++	main_crypto: crypto@4e00000 {
++		compatible = "ti,j721e-sa2ul";
++		reg = <0x00 0x4e00000 0x00 0x1200>;
++		power-domains = <&k3_pds 369 TI_SCI_PD_EXCLUSIVE>;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges = <0x00 0x04e00000 0x00 0x04e00000 0x00 0x30000>;
++
++		dmas = <&main_udmap 0xca40>, <&main_udmap 0x4a40>,
++				<&main_udmap 0x4a41>;
++		dma-names = "tx", "rx1", "rx2";
++
++		rng: rng@4e10000 {
++			compatible = "inside-secure,safexcel-eip76";
++			reg = <0x00 0x4e10000 0x00 0x7d>;
++			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
++		};
++	};
++
++	main_timer0: timer@2400000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2400000 0x00 0x400>;
++		interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 97 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 97 2>;
++		assigned-clock-parents = <&k3_clks 97 3>;
++		power-domains = <&k3_pds 97 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer1: timer@2410000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2410000 0x00 0x400>;
++		interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 98 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 98 2>;
++		assigned-clock-parents = <&k3_clks 98 3>;
++		power-domains = <&k3_pds 98 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer2: timer@2420000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2420000 0x00 0x400>;
++		interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 99 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 99 2>;
++		assigned-clock-parents = <&k3_clks 99 3>;
++		power-domains = <&k3_pds 99 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer3: timer@2430000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2430000 0x00 0x400>;
++		interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 100 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 100 2>;
++		assigned-clock-parents = <&k3_clks 100 3>;
++		power-domains = <&k3_pds 100 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer4: timer@2440000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2440000 0x00 0x400>;
++		interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 101 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 101 2>;
++		assigned-clock-parents = <&k3_clks 101 3>;
++		power-domains = <&k3_pds 101 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer5: timer@2450000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2450000 0x00 0x400>;
++		interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 102 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 102 2>;
++		assigned-clock-parents = <&k3_clks 102 3>;
++		power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer6: timer@2460000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2460000 0x00 0x400>;
++		interrupts = <GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 103 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 103 2>;
++		assigned-clock-parents = <&k3_clks 103 3>;
++		power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer7: timer@2470000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2470000 0x00 0x400>;
++		interrupts = <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 104 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 104 2>;
++		assigned-clock-parents = <&k3_clks 104 3>;
++		power-domains = <&k3_pds 104 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer8: timer@2480000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2480000 0x00 0x400>;
++		interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 105 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 105 2>;
++		assigned-clock-parents = <&k3_clks 105 3>;
++		power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer9: timer@2490000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2490000 0x00 0x400>;
++		interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 106 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 106 2>;
++		assigned-clock-parents = <&k3_clks 106 3>;
++		power-domains = <&k3_pds 106 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer10: timer@24a0000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x24a0000 0x00 0x400>;
++		interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 107 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 107 2>;
++		assigned-clock-parents = <&k3_clks 107 3>;
++		power-domains = <&k3_pds 107 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer11: timer@24b0000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x24b0000 0x00 0x400>;
++		interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 108 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 108 2>;
++		assigned-clock-parents = <&k3_clks 108 3>;
++		power-domains = <&k3_pds 108 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer12: timer@24c0000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x24c0000 0x00 0x400>;
++		interrupts = <GIC_SPI 236 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 109 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 109 2>;
++		assigned-clock-parents = <&k3_clks 109 3>;
++		power-domains = <&k3_pds 109 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer13: timer@24d0000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x24d0000 0x00 0x400>;
++		interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 110 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 110 2>;
++		assigned-clock-parents = <&k3_clks 110 3>;
++		power-domains = <&k3_pds 110 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer14: timer@24e0000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x24e0000 0x00 0x400>;
++		interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 111 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 111 2>;
++		assigned-clock-parents = <&k3_clks 111 3>;
++		power-domains = <&k3_pds 111 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer15: timer@24f0000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x24f0000 0x00 0x400>;
++		interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 112 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 112 2>;
++		assigned-clock-parents = <&k3_clks 112 3>;
++		power-domains = <&k3_pds 112 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer16: timer@2500000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2500000 0x00 0x400>;
++		interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 113 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 113 2>;
++		assigned-clock-parents = <&k3_clks 113 3>;
++		power-domains = <&k3_pds 113 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer17: timer@2510000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2510000 0x00 0x400>;
++		interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 114 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 114 2>;
++		assigned-clock-parents = <&k3_clks 114 3>;
++		power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer18: timer@2520000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2520000 0x00 0x400>;
++		interrupts = <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 115 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 115 2>;
++		assigned-clock-parents = <&k3_clks 115 3>;
++		power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_timer19: timer@2530000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x2530000 0x00 0x400>;
++		interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 116 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 116 2>;
++		assigned-clock-parents = <&k3_clks 116 3>;
++		power-domains = <&k3_pds 116 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++	};
++
++	main_uart0: serial@2800000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02800000 0x00 0x200>;
++		interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 146 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart1: serial@2810000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02810000 0x00 0x200>;
++		interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 388 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 388 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart2: serial@2820000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02820000 0x00 0x200>;
++		interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 389 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 389 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart3: serial@2830000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02830000 0x00 0x200>;
++		interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 390 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 390 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart4: serial@2840000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02840000 0x00 0x200>;
++		interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 391 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 391 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart5: serial@2850000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02850000 0x00 0x200>;
++		interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 392 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 392 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart6: serial@2860000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02860000 0x00 0x200>;
++		interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 393 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 393 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart7: serial@2870000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02870000 0x00 0x200>;
++		interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 394 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 394 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart8: serial@2880000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02880000 0x00 0x200>;
++		interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 395 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 395 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_uart9: serial@2890000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x02890000 0x00 0x200>;
++		interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 396 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 396 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_gpio0: gpio@600000 {
++		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
++		reg = <0x00 0x00600000 0x00 0x100>;
++		gpio-controller;
++		#gpio-cells = <2>;
++		interrupt-parent = <&main_gpio_intr>;
++		interrupts = <145>, <146>, <147>, <148>, <149>;
++		interrupt-controller;
++		#interrupt-cells = <2>;
++		ti,ngpio = <66>;
++		ti,davinci-gpio-unbanked = <0>;
++		power-domains = <&k3_pds 163 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 163 0>;
++		clock-names = "gpio";
++		status = "disabled";
++	};
++
++	main_gpio2: gpio@610000 {
++		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
++		reg = <0x00 0x00610000 0x00 0x100>;
++		gpio-controller;
++		#gpio-cells = <2>;
++		interrupt-parent = <&main_gpio_intr>;
++		interrupts = <154>, <155>, <156>, <157>, <158>;
++		interrupt-controller;
++		#interrupt-cells = <2>;
++		ti,ngpio = <66>;
++		ti,davinci-gpio-unbanked = <0>;
++		power-domains = <&k3_pds 164 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 164 0>;
++		clock-names = "gpio";
++		status = "disabled";
++	};
++
++	main_gpio4: gpio@620000 {
++		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
++		reg = <0x00 0x00620000 0x00 0x100>;
++		gpio-controller;
++		#gpio-cells = <2>;
++		interrupt-parent = <&main_gpio_intr>;
++		interrupts = <163>, <164>, <165>, <166>, <167>;
++		interrupt-controller;
++		#interrupt-cells = <2>;
++		ti,ngpio = <66>;
++		ti,davinci-gpio-unbanked = <0>;
++		power-domains = <&k3_pds 165 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 165 0>;
++		clock-names = "gpio";
++		status = "disabled";
++	};
++
++	main_gpio6: gpio@630000 {
++		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
++		reg = <0x00 0x00630000 0x00 0x100>;
++		gpio-controller;
++		#gpio-cells = <2>;
++		interrupt-parent = <&main_gpio_intr>;
++		interrupts = <172>, <173>, <174>, <175>, <176>;
++		interrupt-controller;
++		#interrupt-cells = <2>;
++		ti,ngpio = <66>;
++		ti,davinci-gpio-unbanked = <0>;
++		power-domains = <&k3_pds 166 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 166 0>;
++		clock-names = "gpio";
++		status = "disabled";
++	};
++
++	usbss0: usb@4104000 {
++		bootph-all;
++		compatible = "ti,j721e-usb";
++		reg = <0x00 0x4104000 0x00 0x100>;
++		dma-coherent;
++		power-domains = <&k3_pds 398 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 398 21>, <&k3_clks 398 2>;
++		clock-names = "ref", "lpm";
++		assigned-clocks = <&k3_clks 398 21>;    /* USB2_REFCLK */
++		assigned-clock-parents = <&k3_clks 398 22>; /* HFOSC0 */
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges;
++
++		status = "disabled"; /* Needs lane config */
++
++		usb0: usb@6000000 {
++			bootph-all;
++			compatible = "cdns,usb3";
++			reg = <0x00 0x6000000 0x00 0x10000>,
++			      <0x00 0x6010000 0x00 0x10000>,
++			      <0x00 0x6020000 0x00 0x10000>;
++			reg-names = "otg", "xhci", "dev";
++			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,  /* irq.0 */
++				     <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
++				     <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; /* otgirq.0 */
++			interrupt-names = "host",
++					  "peripheral",
++					  "otg";
++		};
++	};
++
++	main_i2c0: i2c@2000000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x02000000 0x00 0x100>;
++		interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 270 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 270 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_i2c1: i2c@2010000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x02010000 0x00 0x100>;
++		interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 271 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 271 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_i2c2: i2c@2020000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x02020000 0x00 0x100>;
++		interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 272 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 272 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_i2c3: i2c@2030000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x02030000 0x00 0x100>;
++		interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 273 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 273 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_i2c4: i2c@2040000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x02040000 0x00 0x100>;
++		interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 274 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 274 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_i2c5: i2c@2050000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x02050000 0x00 0x100>;
++		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 275 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 275 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	main_i2c6: i2c@2060000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x02060000 0x00 0x100>;
++		interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 276 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 276 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	ti_csi2rx0: ticsi2rx@4500000 {
++		compatible = "ti,j721e-csi2rx-shim";
++		reg = <0x00 0x04500000 0x00 0x00001000>;
++		ranges;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		dmas = <&main_bcdma_csi 0 0x4940 0>;
++		dma-names = "rx0";
++		power-domains = <&k3_pds 72 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++
++		cdns_csi2rx0: csi-bridge@4504000 {
++			compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
++			reg = <0x00 0x04504000 0x00 0x00001000>;
++			clocks = <&k3_clks 72 2>, <&k3_clks 72 0>, <&k3_clks 72 2>,
++				<&k3_clks 72 2>, <&k3_clks 72 3>, <&k3_clks 72 3>;
++			clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
++				"pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
++			phys = <&dphy0>;
++			phy-names = "dphy";
++
++			ports {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
++				csi0_port0: port@0 {
++					reg = <0>;
++					status = "disabled";
++				};
++
++				csi0_port1: port@1 {
++					reg = <1>;
++					status = "disabled";
++				};
++
++				csi0_port2: port@2 {
++					reg = <2>;
++					status = "disabled";
++				};
++
++				csi0_port3: port@3 {
++					reg = <3>;
++					status = "disabled";
++				};
++
++				csi0_port4: port@4 {
++					reg = <4>;
++					status = "disabled";
++				};
++			};
++		};
++	};
++
++	ti_csi2rx1: ticsi2rx@4510000 {
++		compatible = "ti,j721e-csi2rx-shim";
++		reg = <0x00 0x04510000 0x00 0x1000>;
++		ranges;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		dmas = <&main_bcdma_csi 0 0x4960 0>;
++		dma-names = "rx0";
++		power-domains = <&k3_pds 73 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++
++		cdns_csi2rx1: csi-bridge@4514000 {
++			compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
++			reg = <0x00 0x04514000 0x00 0x00001000>;
++			clocks = <&k3_clks 73 2>, <&k3_clks 73 0>, <&k3_clks 73 2>,
++				<&k3_clks 73 2>, <&k3_clks 73 3>, <&k3_clks 73 3>;
++			clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
++				"pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
++			phys = <&dphy1>;
++			phy-names = "dphy";
++			ports {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
++				csi1_port0: port@0 {
++					reg = <0>;
++					status = "disabled";
++				};
++
++				csi1_port1: port@1 {
++					reg = <1>;
++					status = "disabled";
++				};
++
++				csi1_port2: port@2 {
++					reg = <2>;
++					status = "disabled";
++				};
++
++				csi1_port3: port@3 {
++					reg = <3>;
++					status = "disabled";
++				};
++
++				csi1_port4: port@4 {
++					reg = <4>;
++					status = "disabled";
++				};
++			};
++		};
++	};
++
++	ti_csi2rx2: ticsi2rx@4520000 {
++		compatible = "ti,j721e-csi2rx-shim";
++		reg = <0x00 0x04520000 0x00 0x00001000>;
++		ranges;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		dmas = <&main_bcdma_csi 0 0x4980 0>;
++		dma-names = "rx0";
++		power-domains = <&k3_pds 74 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++
++		cdns_csi2rx2: csi-bridge@4524000 {
++			compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
++			reg = <0x00 0x04524000 0x00 0x00001000>;
++			clocks = <&k3_clks 74 2>, <&k3_clks 74 0>, <&k3_clks 74 2>,
++				<&k3_clks 74 2>, <&k3_clks 74 3>, <&k3_clks 74 3>;
++			clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
++				"pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
++			phys = <&dphy2>;
++			phy-names = "dphy";
++
++			ports {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
++				csi2_port0: port@0 {
++					reg = <0>;
++					status = "disabled";
++				};
++
++				csi2_port1: port@1 {
++					reg = <1>;
++					status = "disabled";
++				};
++
++				csi2_port2: port@2 {
++					reg = <2>;
++					status = "disabled";
++				};
++
++				csi2_port3: port@3 {
++					reg = <3>;
++					status = "disabled";
++				};
++
++				csi2_port4: port@4 {
++					reg = <4>;
++					status = "disabled";
++				};
++			};
++		};
++	};
++
++	dphy0: phy@4580000 {
++		compatible = "cdns,dphy-rx";
++		reg = <0x00 0x04580000 0x00 0x00001100>;
++		#phy-cells = <0>;
++		power-domains = <&k3_pds 212 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	dphy1: phy@4590000 {
++		compatible = "cdns,dphy-rx";
++		reg = <0x00 0x04590000 0x00 0x00001100>;
++		#phy-cells = <0>;
++		power-domains = <&k3_pds 213 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	dphy2: phy@45a0000 {
++		compatible = "cdns,dphy-rx";
++		reg = <0x00 0x045a0000 0x00 0x00001100>;
++		#phy-cells = <0>;
++		power-domains = <&k3_pds 214 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	vpu0: video-codec@4210000 {
++		compatible = "ti,j721s2-wave521c", "cnm,wave521c";
++		reg = <0x00 0x4210000 0x00 0x10000>;
++		interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 241 2>;
++		power-domains = <&k3_pds 241 TI_SCI_PD_EXCLUSIVE>;
++	};
++
++	vpu1: video-codec@4220000 {
++		compatible = "ti,j721s2-wave521c", "cnm,wave521c";
++		reg = <0x00 0x4220000 0x00 0x10000>;
++		interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 242 2>;
++		power-domains = <&k3_pds 242 TI_SCI_PD_EXCLUSIVE>;
++	};
++
++	main_sdhci0: mmc@4f80000 {
++		compatible = "ti,j721e-sdhci-8bit";
++		reg = <0x00 0x04f80000 0x00 0x1000>,
++		      <0x00 0x04f88000 0x00 0x400>;
++		interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
++		power-domains = <&k3_pds 140 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 140 1>, <&k3_clks 140 2>;
++		clock-names = "clk_ahb", "clk_xin";
++		assigned-clocks = <&k3_clks 140 2>;
++		assigned-clock-parents = <&k3_clks 140 3>;
++		bus-width = <8>;
++		ti,otap-del-sel-legacy = <0x0>;
++		ti,otap-del-sel-mmc-hs = <0x0>;
++		ti,otap-del-sel-ddr52 = <0x6>;
++		ti,otap-del-sel-hs200 = <0x8>;
++		ti,otap-del-sel-hs400 = <0x5>;
++		ti,itap-del-sel-legacy = <0x10>;
++		ti,itap-del-sel-mmc-hs = <0xa>;
++		ti,strobe-sel = <0x77>;
++		ti,clkbuf-sel = <0x7>;
++		ti,trm-icp = <0x8>;
++		mmc-ddr-1_8v;
++		mmc-hs200-1_8v;
++		mmc-hs400-1_8v;
++		dma-coherent;
++		status = "disabled";
++	};
++
++	main_sdhci1: mmc@4fb0000 {
++		compatible = "ti,j721e-sdhci-4bit";
++		reg = <0x00 0x04fb0000 0x00 0x1000>,
++		      <0x00 0x04fb8000 0x00 0x400>;
++		interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
++		power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 141 3>, <&k3_clks 141 4>;
++		clock-names = "clk_ahb", "clk_xin";
++		assigned-clocks = <&k3_clks 141 4>;
++		assigned-clock-parents = <&k3_clks 141 5>;
++		bus-width = <4>;
++		ti,otap-del-sel-legacy = <0x0>;
++		ti,otap-del-sel-sd-hs = <0x0>;
++		ti,otap-del-sel-sdr12 = <0xf>;
++		ti,otap-del-sel-sdr25 = <0xf>;
++		ti,otap-del-sel-sdr50 = <0xc>;
++		ti,otap-del-sel-sdr104 = <0x5>;
++		ti,otap-del-sel-ddr50 = <0xc>;
++		ti,itap-del-sel-legacy = <0x0>;
++		ti,itap-del-sel-sd-hs = <0x0>;
++		ti,itap-del-sel-sdr12 = <0x0>;
++		ti,itap-del-sel-sdr25 = <0x0>;
++		ti,itap-del-sel-ddr50 = <0x2>;
++		ti,clkbuf-sel = <0x7>;
++		ti,trm-icp = <0x8>;
++		dma-coherent;
++		status = "disabled";
++	};
++
++	pcie0_rc: pcie@2900000 {
++		compatible = "ti,j784s4-pcie-host";
++		reg = <0x00 0x02900000 0x00 0x1000>,
++		      <0x00 0x02907000 0x00 0x400>,
++		      <0x00 0x0d000000 0x00 0x00800000>,
++		      <0x00 0x10000000 0x00 0x00001000>;
++		reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
++		interrupt-names = "link_state";
++		interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
++		device_type = "pci";
++		ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
++		max-link-speed = <3>;
++		num-lanes = <4>;
++		power-domains = <&k3_pds 332 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 332 0>;
++		clock-names = "fck";
++		#address-cells = <3>;
++		#size-cells = <2>;
++		bus-range = <0x0 0xff>;
++		vendor-id = <0x104c>;
++		device-id = <0xb012>;
++		msi-map = <0x0 &gic_its 0x0 0x10000>;
++		dma-coherent;
++		ranges = <0x01000000 0x0 0x10001000 0x0 0x10001000 0x0 0x0010000>,
++			 <0x02000000 0x0 0x10011000 0x0 0x10011000 0x0 0x7fef000>;
++		dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
++		status = "disabled";
++	};
++
++	pcie1_rc: pcie@2910000 {
++		compatible = "ti,j784s4-pcie-host";
++		reg = <0x00 0x02910000 0x00 0x1000>,
++		      <0x00 0x02917000 0x00 0x400>,
++		      <0x00 0x0d800000 0x00 0x00800000>,
++		      <0x00 0x18000000 0x00 0x00001000>;
++		reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
++		interrupt-names = "link_state";
++		interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
++		device_type = "pci";
++		ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
++		max-link-speed = <3>;
++		num-lanes = <4>;
++		power-domains = <&k3_pds 333 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 333 0>;
++		clock-names = "fck";
++		#address-cells = <3>;
++		#size-cells = <2>;
++		bus-range = <0x0 0xff>;
++		vendor-id = <0x104c>;
++		device-id = <0xb012>;
++		msi-map = <0x0 &gic_its 0x10000 0x10000>;
++		dma-coherent;
++		ranges = <0x01000000 0x0 0x18001000  0x00 0x18001000  0x0 0x0010000>,
++			 <0x02000000 0x0 0x18011000  0x00 0x18011000  0x0 0x7fef000>;
++		dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
++		status = "disabled";
++	};
++
++	serdes_wiz0: wiz@5060000 {
++		compatible = "ti,j784s4-wiz-10g";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		power-domains = <&k3_pds 404 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 404 2>, <&k3_clks 404 6>, <&serdes_refclk>, <&k3_clks 404 5>;
++		clock-names = "fck", "core_ref_clk", "ext_ref_clk", "core_ref1_clk";
++		assigned-clocks = <&k3_clks 404 6>;
++		assigned-clock-parents = <&k3_clks 404 10>;
++		num-lanes = <4>;
++		#reset-cells = <1>;
++		#clock-cells = <1>;
++		ranges = <0x5060000 0x00 0x5060000 0x10000>;
++		status = "disabled";
++
++		serdes0: serdes@5060000 {
++			compatible = "ti,j721e-serdes-10g";
++			reg = <0x05060000 0x010000>;
++			reg-names = "torrent_phy";
++			resets = <&serdes_wiz0 0>;
++			reset-names = "torrent_reset";
++			clocks = <&serdes_wiz0 TI_WIZ_PLL0_REFCLK>,
++				 <&serdes_wiz0 TI_WIZ_PHY_EN_REFCLK>;
++			clock-names = "refclk", "phy_en_refclk";
++			assigned-clocks = <&serdes_wiz0 TI_WIZ_PLL0_REFCLK>,
++					  <&serdes_wiz0 TI_WIZ_PLL1_REFCLK>,
++					  <&serdes_wiz0 TI_WIZ_REFCLK_DIG>;
++			assigned-clock-parents = <&k3_clks 404 6>,
++						 <&k3_clks 404 6>,
++						 <&k3_clks 404 6>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			#clock-cells = <1>;
++			status = "disabled";
++		};
++	};
++
++	serdes_wiz1: wiz@5070000 {
++		compatible = "ti,j784s4-wiz-10g";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		power-domains = <&k3_pds 405 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 405 2>, <&k3_clks 405 6>, <&serdes_refclk>, <&k3_clks 405 5>;
++		clock-names = "fck", "core_ref_clk", "ext_ref_clk", "core_ref1_clk";
++		assigned-clocks = <&k3_clks 405 6>;
++		assigned-clock-parents = <&k3_clks 405 10>;
++		num-lanes = <4>;
++		#reset-cells = <1>;
++		#clock-cells = <1>;
++		ranges = <0x05070000 0x00 0x05070000 0x10000>;
++		status = "disabled";
++
++		serdes1: serdes@5070000 {
++			compatible = "ti,j721e-serdes-10g";
++			reg = <0x05070000 0x010000>;
++			reg-names = "torrent_phy";
++			resets = <&serdes_wiz1 0>;
++			reset-names = "torrent_reset";
++			clocks = <&serdes_wiz1 TI_WIZ_PLL0_REFCLK>,
++				 <&serdes_wiz1 TI_WIZ_PHY_EN_REFCLK>;
++			clock-names = "refclk", "phy_en_refclk";
++			assigned-clocks = <&serdes_wiz1 TI_WIZ_PLL0_REFCLK>,
++					  <&serdes_wiz1 TI_WIZ_PLL1_REFCLK>,
++					  <&serdes_wiz1 TI_WIZ_REFCLK_DIG>;
++			assigned-clock-parents = <&k3_clks 405 6>,
++						 <&k3_clks 405 6>,
++						 <&k3_clks 405 6>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			#clock-cells = <1>;
++			status = "disabled";
++		};
++	};
++
++	serdes_wiz4: wiz@5050000 {
++		compatible = "ti,j784s4-wiz-10g";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		power-domains = <&k3_pds 407 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 407 2>, <&k3_clks 407 6>, <&serdes_refclk>, <&k3_clks 407 5>;
++		clock-names = "fck", "core_ref_clk", "ext_ref_clk", "core_ref1_clk";
++		assigned-clocks = <&k3_clks 407 6>;
++		assigned-clock-parents = <&k3_clks 407 10>;
++		num-lanes = <4>;
++		#reset-cells = <1>;
++		#clock-cells = <1>;
++		ranges = <0x05050000 0x00 0x05050000 0x10000>,
++			 <0xa030a00 0x00 0xa030a00 0x40>; /* DPTX PHY */
++		status = "disabled";
++
++		serdes4: serdes@5050000 {
++			/*
++			 * Note: we also map DPTX PHY registers as the Torrent
++			 * needs to manage those.
++			 */
++			compatible = "ti,j721e-serdes-10g";
++			reg = <0x05050000 0x010000>,
++			      <0x0a030a00 0x40>; /* DPTX PHY */
++			reg-names = "torrent_phy";
++			resets = <&serdes_wiz4 0>;
++			reset-names = "torrent_reset";
++			clocks = <&serdes_wiz4 TI_WIZ_PLL0_REFCLK>,
++				 <&serdes_wiz4 TI_WIZ_PHY_EN_REFCLK>;
++			clock-names = "refclk", "phy_en_refclk";
++			assigned-clocks = <&serdes_wiz4 TI_WIZ_PLL0_REFCLK>,
++					  <&serdes_wiz4 TI_WIZ_PLL1_REFCLK>,
++					  <&serdes_wiz4 TI_WIZ_REFCLK_DIG>;
++			assigned-clock-parents = <&k3_clks 407 6>,
++						 <&k3_clks 407 6>,
++						 <&k3_clks 407 6>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			#clock-cells = <1>;
++			status = "disabled";
++		};
++	};
++
++	main_navss: bus@30000000 {
++		bootph-all;
++		compatible = "simple-bus";
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
++		ti,sci-dev-id = <280>;
++		dma-coherent;
++		dma-ranges;
++
++		main_navss_intr: interrupt-controller@310e0000 {
++			compatible = "ti,sci-intr";
++			reg = <0x00 0x310e0000 0x00 0x4000>;
++			ti,intr-trigger-type = <4>;
++			interrupt-controller;
++			interrupt-parent = <&gic500>;
++			#interrupt-cells = <1>;
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <283>;
++			ti,interrupt-ranges = <0 64 64>,
++					      <64 448 64>,
++					      <128 672 64>;
++		};
++
++		main_udmass_inta: msi-controller@33d00000 {
++			compatible = "ti,sci-inta";
++			reg = <0x00 0x33d00000 0x00 0x100000>;
++			interrupt-controller;
++			#interrupt-cells = <0>;
++			interrupt-parent = <&main_navss_intr>;
++			msi-controller;
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <321>;
++			ti,interrupt-ranges = <0 0 256>;
++			ti,unmapped-event-sources = <&main_bcdma_csi>;
++		};
++
++		secure_proxy_main: mailbox@32c00000 {
++			bootph-all;
++			compatible = "ti,am654-secure-proxy";
++			#mbox-cells = <1>;
++			reg-names = "target_data", "rt", "scfg";
++			reg = <0x00 0x32c00000 0x00 0x100000>,
++			      <0x00 0x32400000 0x00 0x100000>,
++			      <0x00 0x32800000 0x00 0x100000>;
++			interrupt-names = "rx_011";
++			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
++		};
++
++		hwspinlock: hwlock@30e00000 {
++			compatible = "ti,am654-hwspinlock";
++			reg = <0x00 0x30e00000 0x00 0x1000>;
++			#hwlock-cells = <1>;
++		};
++
++		mailbox0_cluster0: mailbox@31f80000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f80000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster1: mailbox@31f81000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f81000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster2: mailbox@31f82000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f82000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster3: mailbox@31f83000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f83000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster4: mailbox@31f84000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f84000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster5: mailbox@31f85000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f85000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster6: mailbox@31f86000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f86000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster7: mailbox@31f87000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f87000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster8: mailbox@31f88000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f88000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster9: mailbox@31f89000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f89000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster10: mailbox@31f8a000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f8a000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox0_cluster11: mailbox@31f8b000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f8b000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster0: mailbox@31f90000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f90000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster1: mailbox@31f91000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f91000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster2: mailbox@31f92000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f92000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster3: mailbox@31f93000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f93000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster4: mailbox@31f94000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f94000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster5: mailbox@31f95000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f95000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster6: mailbox@31f96000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f96000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster7: mailbox@31f97000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f97000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster8: mailbox@31f98000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f98000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster9: mailbox@31f99000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f99000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster10: mailbox@31f9a000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f9a000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		mailbox1_cluster11: mailbox@31f9b000 {
++			compatible = "ti,am654-mailbox";
++			reg = <0x00 0x31f9b000 0x00 0x200>;
++			#mbox-cells = <1>;
++			ti,mbox-num-users = <4>;
++			ti,mbox-num-fifos = <16>;
++			interrupt-parent = <&main_navss_intr>;
++			status = "disabled";
++		};
++
++		main_ringacc: ringacc@3c000000 {
++			compatible = "ti,am654-navss-ringacc";
++			reg = <0x00 0x3c000000 0x00 0x400000>,
++			      <0x00 0x38000000 0x00 0x400000>,
++			      <0x00 0x31120000 0x00 0x100>,
++			      <0x00 0x33000000 0x00 0x40000>,
++			      <0x00 0x31080000 0x00 0x40000>;
++			reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target", "cfg";
++			ti,num-rings = <1024>;
++			ti,sci-rm-range-gp-rings = <0x1>;
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <315>;
++			msi-parent = <&main_udmass_inta>;
++		};
++
++		main_udmap: dma-controller@31150000 {
++			compatible = "ti,j721e-navss-main-udmap";
++			reg = <0x00 0x31150000 0x00 0x100>,
++			      <0x00 0x34000000 0x00 0x80000>,
++			      <0x00 0x35000000 0x00 0x200000>,
++			      <0x00 0x30b00000 0x00 0x20000>,
++			      <0x00 0x30c00000 0x00 0x8000>,
++			      <0x00 0x30d00000 0x00 0x4000>;
++			reg-names = "gcfg", "rchanrt", "tchanrt",
++				    "tchan", "rchan", "rflow";
++			msi-parent = <&main_udmass_inta>;
++			#dma-cells = <1>;
++
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <319>;
++			ti,ringacc = <&main_ringacc>;
++
++			ti,sci-rm-range-tchan = <0x0d>, /* TX_CHAN */
++						<0x0f>, /* TX_HCHAN */
++						<0x10>; /* TX_UHCHAN */
++			ti,sci-rm-range-rchan = <0x0a>, /* RX_CHAN */
++						<0x0b>, /* RX_HCHAN */
++						<0x0c>; /* RX_UHCHAN */
++			ti,sci-rm-range-rflow = <0x00>; /* GP RFLOW */
++		};
++
++		main_bcdma_csi: dma-controller@311a0000 {
++			compatible = "ti,j721s2-dmss-bcdma-csi";
++			reg = <0x00 0x311a0000 0x00 0x100>,
++			      <0x00 0x35d00000 0x00 0x20000>,
++			      <0x00 0x35c00000 0x00 0x10000>,
++			      <0x00 0x35e00000 0x00 0x80000>;
++			reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt";
++			msi-parent = <&main_udmass_inta>;
++			#dma-cells = <3>;
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <281>;
++			ti,sci-rm-range-rchan = <0x21>;
++			ti,sci-rm-range-tchan = <0x22>;
++		};
++
++		cpts@310d0000 {
++			compatible = "ti,j721e-cpts";
++			reg = <0x00 0x310d0000 0x00 0x400>;
++			reg-names = "cpts";
++			clocks = <&k3_clks 282 0>;
++			clock-names = "cpts";
++			assigned-clocks = <&k3_clks 62 3>; /* CPTS_RFT_CLK */
++			assigned-clock-parents = <&k3_clks 62 5>; /* MAIN_0_HSDIV6_CLK */
++			interrupts-extended = <&main_navss_intr 391>;
++			interrupt-names = "cpts";
++			ti,cpts-periodic-outputs = <6>;
++			ti,cpts-ext-ts-inputs = <8>;
++		};
++	};
++
++	main_cpsw0: ethernet@c000000 {
++		compatible = "ti,j784s4-cpswxg-nuss";
++		reg = <0x00 0xc000000 0x00 0x200000>;
++		reg-names = "cpsw_nuss";
++		ranges = <0x00 0x00 0x00 0xc000000 0x00 0x200000>;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		dma-coherent;
++		clocks = <&k3_clks 64 0>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 64 TI_SCI_PD_EXCLUSIVE>;
++
++		dmas = <&main_udmap 0xca00>,
++		       <&main_udmap 0xca01>,
++		       <&main_udmap 0xca02>,
++		       <&main_udmap 0xca03>,
++		       <&main_udmap 0xca04>,
++		       <&main_udmap 0xca05>,
++		       <&main_udmap 0xca06>,
++		       <&main_udmap 0xca07>,
++		       <&main_udmap 0x4a00>;
++		dma-names = "tx0", "tx1", "tx2", "tx3",
++			    "tx4", "tx5", "tx6", "tx7",
++			    "rx";
++
++		status = "disabled";
++
++		ethernet-ports {
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			main_cpsw0_port1: port@1 {
++				reg = <1>;
++				label = "port1";
++				ti,mac-only;
++				status = "disabled";
++			};
++
++			main_cpsw0_port2: port@2 {
++				reg = <2>;
++				label = "port2";
++				ti,mac-only;
++				status = "disabled";
++			};
++
++			main_cpsw0_port3: port@3 {
++				reg = <3>;
++				label = "port3";
++				ti,mac-only;
++				status = "disabled";
++			};
++
++			main_cpsw0_port4: port@4 {
++				reg = <4>;
++				label = "port4";
++				ti,mac-only;
++				status = "disabled";
++			};
++
++			main_cpsw0_port5: port@5 {
++				reg = <5>;
++				label = "port5";
++				ti,mac-only;
++				status = "disabled";
++			};
++
++			main_cpsw0_port6: port@6 {
++				reg = <6>;
++				label = "port6";
++				ti,mac-only;
++				status = "disabled";
++			};
++
++			main_cpsw0_port7: port@7 {
++				reg = <7>;
++				label = "port7";
++				ti,mac-only;
++				status = "disabled";
++			};
++
++			main_cpsw0_port8: port@8 {
++				reg = <8>;
++				label = "port8";
++				ti,mac-only;
++				status = "disabled";
++			};
++		};
++
++		main_cpsw0_mdio: mdio@f00 {
++			compatible = "ti,cpsw-mdio","ti,davinci_mdio";
++			reg = <0x00 0xf00 0x00 0x100>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			clocks = <&k3_clks 64 0>;
++			clock-names = "fck";
++			bus_freq = <1000000>;
++			status = "disabled";
++		};
++
++		cpts@3d000 {
++			compatible = "ti,am65-cpts";
++			reg = <0x00 0x3d000 0x00 0x400>;
++			clocks = <&k3_clks 64 3>;
++			clock-names = "cpts";
++			interrupts-extended = <&gic500 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "cpts";
++			ti,cpts-ext-ts-inputs = <4>;
++			ti,cpts-periodic-outputs = <2>;
++		};
++	};
++
++	main_cpsw1: ethernet@c200000 {
++		compatible = "ti,j721e-cpsw-nuss";
++		reg = <0x00 0xc200000 0x00 0x200000>;
++		reg-names = "cpsw_nuss";
++		ranges = <0x00 0x00 0x00 0xc200000 0x00 0x200000>;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		dma-coherent;
++		clocks = <&k3_clks 62 0>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 62 TI_SCI_PD_EXCLUSIVE>;
++
++		dmas = <&main_udmap 0xc640>,
++			<&main_udmap 0xc641>,
++			<&main_udmap 0xc642>,
++			<&main_udmap 0xc643>,
++			<&main_udmap 0xc644>,
++			<&main_udmap 0xc645>,
++			<&main_udmap 0xc646>,
++			<&main_udmap 0xc647>,
++			<&main_udmap 0x4640>;
++		dma-names = "tx0", "tx1", "tx2", "tx3",
++				"tx4", "tx5", "tx6", "tx7",
++				"rx";
++
++		status = "disabled";
++
++		ethernet-ports {
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			main_cpsw1_port1: port@1 {
++				reg = <1>;
++				label = "port1";
++				phys = <&cpsw1_phy_gmii_sel 1>;
++				ti,mac-only;
++				status = "disabled";
++			};
++		};
++
++		main_cpsw1_mdio: mdio@f00 {
++			compatible = "ti,cpsw-mdio", "ti,davinci_mdio";
++			reg = <0x00 0xf00 0x00 0x100>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			clocks = <&k3_clks 62 0>;
++			clock-names = "fck";
++			bus_freq = <1000000>;
++			status = "disabled";
++		};
++
++		cpts@3d000 {
++			compatible = "ti,am65-cpts";
++			reg = <0x00 0x3d000 0x00 0x400>;
++			clocks = <&k3_clks 62 3>;
++			clock-names = "cpts";
++			interrupts-extended = <&gic500 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "cpts";
++			ti,cpts-ext-ts-inputs = <4>;
++			ti,cpts-periodic-outputs = <2>;
++		};
++	};
++
++	main_mcan0: can@2701000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02701000 0x00 0x200>,
++		      <0x00 0x02708000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 245 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 245 6>, <&k3_clks 245 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan1: can@2711000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02711000 0x00 0x200>,
++		      <0x00 0x02718000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 246 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 246 6>, <&k3_clks 246 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan2: can@2721000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02721000 0x00 0x200>,
++		      <0x00 0x02728000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 247 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 247 6>, <&k3_clks 247 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan3: can@2731000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02731000 0x00 0x200>,
++		      <0x00 0x02738000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 248 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 248 6>, <&k3_clks 248 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan4: can@2741000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02741000 0x00 0x200>,
++		      <0x00 0x02748000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 249 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 249 6>, <&k3_clks 249 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan5: can@2751000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02751000 0x00 0x200>,
++		      <0x00 0x02758000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 250 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 250 6>, <&k3_clks 250 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan6: can@2761000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02761000 0x00 0x200>,
++		      <0x00 0x02768000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 251 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 251 6>, <&k3_clks 251 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan7: can@2771000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02771000 0x00 0x200>,
++		      <0x00 0x02778000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 252 6>, <&k3_clks 252 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan8: can@2781000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02781000 0x00 0x200>,
++		      <0x00 0x02788000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 253 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 253 6>, <&k3_clks 253 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 576 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 577 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan9: can@2791000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02791000 0x00 0x200>,
++		      <0x00 0x02798000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 254 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 254 6>, <&k3_clks 254 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan10: can@27a1000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x027a1000 0x00 0x200>,
++		      <0x00 0x027a8000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 255 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 255 6>, <&k3_clks 255 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan11: can@27b1000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x027b1000 0x00 0x200>,
++		      <0x00 0x027b8000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 256 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 256 6>, <&k3_clks 256 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan12: can@27c1000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x027c1000 0x00 0x200>,
++		      <0x00 0x027c8000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 257 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 257 6>, <&k3_clks 257 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 589 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan13: can@27d1000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x027d1000 0x00 0x200>,
++		      <0x00 0x027d8000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 258 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 258 6>, <&k3_clks 258 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 591 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 592 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan14: can@2681000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02681000 0x00 0x200>,
++		      <0x00 0x02688000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 259 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 259 6>, <&k3_clks 259 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 594 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 595 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan15: can@2691000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x02691000 0x00 0x200>,
++		      <0x00 0x02698000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 260 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 260 6>, <&k3_clks 260 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 597 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 598 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan16: can@26a1000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x026a1000 0x00 0x200>,
++		      <0x00 0x026a8000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 261 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 261 6>, <&k3_clks 261 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 784 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 785 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_mcan17: can@26b1000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x026b1000 0x00 0x200>,
++		      <0x00 0x026b8000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 262 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 262 6>, <&k3_clks 262 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 787 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 788 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	main_spi0: spi@2100000 {
++		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
++		reg = <0x00 0x02100000 0x00 0x400>;
++		interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 376 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 376 1>;
++		status = "disabled";
++	};
++
++	main_spi1: spi@2110000 {
++		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
++		reg = <0x00 0x02110000 0x00 0x400>;
++		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 377 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 377 1>;
++		status = "disabled";
++	};
++
++	main_spi2: spi@2120000 {
++		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
++		reg = <0x00 0x02120000 0x00 0x400>;
++		interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 378 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 378 1>;
++		status = "disabled";
++	};
++
++	main_spi3: spi@2130000 {
++		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
++		reg = <0x00 0x02130000 0x00 0x400>;
++		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 379 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 379 1>;
++		status = "disabled";
++	};
++
++	main_spi4: spi@2140000 {
++		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
++		reg = <0x00 0x02140000 0x00 0x400>;
++		interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 380 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 380 1>;
++		status = "disabled";
++	};
++
++	main_spi5: spi@2150000 {
++		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
++		reg = <0x00 0x02150000 0x00 0x400>;
++		interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 381 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 381 1>;
++		status = "disabled";
++	};
++
++	main_spi6: spi@2160000 {
++		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
++		reg = <0x00 0x02160000 0x00 0x400>;
++		interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 382 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 382 1>;
++		status = "disabled";
++	};
++
++	main_spi7: spi@2170000 {
++		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
++		reg = <0x00 0x02170000 0x00 0x400>;
++		interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 383 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 383 1>;
++		status = "disabled";
++	};
++
++	ufs_wrapper: ufs-wrapper@4e80000 {
++		compatible = "ti,j721e-ufs";
++		reg = <0x00 0x4e80000 0x00 0x100>;
++		power-domains = <&k3_pds 387 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 387 3>;
++		assigned-clocks = <&k3_clks 387 3>;
++		assigned-clock-parents = <&k3_clks 387 6>;
++		ranges;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		status = "disabled";
++
++		ufs@4e84000 {
++			compatible = "cdns,ufshc-m31-16nm", "jedec,ufs-2.0";
++			reg = <0x00 0x4e84000 0x00 0x10000>;
++			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
++			freq-table-hz = <250000000 250000000>, <19200000 19200000>,
++					<19200000 19200000>;
++			clocks = <&k3_clks 387 1>, <&k3_clks 387 3>, <&k3_clks 387 3>;
++			clock-names = "core_clk", "phy_clk", "ref_clk";
++			dma-coherent;
++		};
++	};
++
++	main_r5fss0: r5fss@5c00000 {
++		compatible = "ti,j721s2-r5fss";
++		ti,cluster-mode = <1>;
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0x5c00000 0x00 0x5c00000 0x20000>,
++			 <0x5d00000 0x00 0x5d00000 0x20000>;
++		power-domains = <&k3_pds 336 TI_SCI_PD_EXCLUSIVE>;
++
++		main_r5fss0_core0: r5f@5c00000 {
++			compatible = "ti,j721s2-r5f";
++			reg = <0x5c00000 0x00010000>,
++			      <0x5c10000 0x00010000>;
++			reg-names = "atcm", "btcm";
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <339>;
++			ti,sci-proc-ids = <0x06 0xff>;
++			resets = <&k3_reset 339 1>;
++			firmware-name = "j784s4-main-r5f0_0-fw";
++			ti,atcm-enable = <1>;
++			ti,btcm-enable = <1>;
++			ti,loczrama = <1>;
++		};
++
++		main_r5fss0_core1: r5f@5d00000 {
++			compatible = "ti,j721s2-r5f";
++			reg = <0x5d00000 0x00010000>,
++			      <0x5d10000 0x00010000>;
++			reg-names = "atcm", "btcm";
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <340>;
++			ti,sci-proc-ids = <0x07 0xff>;
++			resets = <&k3_reset 340 1>;
++			firmware-name = "j784s4-main-r5f0_1-fw";
++			ti,atcm-enable = <1>;
++			ti,btcm-enable = <1>;
++			ti,loczrama = <1>;
++		};
++	};
++
++	main_r5fss1: r5fss@5e00000 {
++		compatible = "ti,j721s2-r5fss";
++		ti,cluster-mode = <1>;
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0x5e00000 0x00 0x5e00000 0x20000>,
++			 <0x5f00000 0x00 0x5f00000 0x20000>;
++		power-domains = <&k3_pds 337 TI_SCI_PD_EXCLUSIVE>;
++
++		main_r5fss1_core0: r5f@5e00000 {
++			compatible = "ti,j721s2-r5f";
++			reg = <0x5e00000 0x00010000>,
++			      <0x5e10000 0x00010000>;
++			reg-names = "atcm", "btcm";
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <341>;
++			ti,sci-proc-ids = <0x08 0xff>;
++			resets = <&k3_reset 341 1>;
++			firmware-name = "j784s4-main-r5f1_0-fw";
++			ti,atcm-enable = <1>;
++			ti,btcm-enable = <1>;
++			ti,loczrama = <1>;
++		};
++
++		main_r5fss1_core1: r5f@5f00000 {
++			compatible = "ti,j721s2-r5f";
++			reg = <0x5f00000 0x00010000>,
++			      <0x5f10000 0x00010000>;
++			reg-names = "atcm", "btcm";
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <342>;
++			ti,sci-proc-ids = <0x09 0xff>;
++			resets = <&k3_reset 342 1>;
++			firmware-name = "j784s4-main-r5f1_1-fw";
++			ti,atcm-enable = <1>;
++			ti,btcm-enable = <1>;
++			ti,loczrama = <1>;
++		};
++	};
++
++	main_r5fss2: r5fss@5900000 {
++		compatible = "ti,j721s2-r5fss";
++		ti,cluster-mode = <1>;
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0x5900000 0x00 0x5900000 0x20000>,
++			 <0x5a00000 0x00 0x5a00000 0x20000>;
++		power-domains = <&k3_pds 338 TI_SCI_PD_EXCLUSIVE>;
++
++		main_r5fss2_core0: r5f@5900000 {
++			compatible = "ti,j721s2-r5f";
++			reg = <0x5900000 0x00010000>,
++			      <0x5910000 0x00010000>;
++			reg-names = "atcm", "btcm";
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <343>;
++			ti,sci-proc-ids = <0x0a 0xff>;
++			resets = <&k3_reset 343 1>;
++			firmware-name = "j784s4-main-r5f2_0-fw";
++			ti,atcm-enable = <1>;
++			ti,btcm-enable = <1>;
++			ti,loczrama = <1>;
++		};
++
++		main_r5fss2_core1: r5f@5a00000 {
++			compatible = "ti,j721s2-r5f";
++			reg = <0x5a00000 0x00010000>,
++			      <0x5a10000 0x00010000>;
++			reg-names = "atcm", "btcm";
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <344>;
++			ti,sci-proc-ids = <0x0b 0xff>;
++			resets = <&k3_reset 344 1>;
++			firmware-name = "j784s4-main-r5f2_1-fw";
++			ti,atcm-enable = <1>;
++			ti,btcm-enable = <1>;
++			ti,loczrama = <1>;
++		};
++	};
++
++	c71_0: dsp@64800000 {
++		compatible = "ti,j721s2-c71-dsp";
++		reg = <0x00 0x64800000 0x00 0x00080000>,
++		      <0x00 0x64e00000 0x00 0x0000c000>;
++		reg-names = "l2sram", "l1dram";
++		ti,sci = <&sms>;
++		ti,sci-dev-id = <30>;
++		ti,sci-proc-ids = <0x30 0xff>;
++		resets = <&k3_reset 30 1>;
++		firmware-name = "j784s4-c71_0-fw";
++		status = "disabled";
++	};
++
++	c71_1: dsp@65800000 {
++		compatible = "ti,j721s2-c71-dsp";
++		reg = <0x00 0x65800000 0x00 0x00080000>,
++		      <0x00 0x65e00000 0x00 0x0000c000>;
++		reg-names = "l2sram", "l1dram";
++		ti,sci = <&sms>;
++		ti,sci-dev-id = <33>;
++		ti,sci-proc-ids = <0x31 0xff>;
++		resets = <&k3_reset 33 1>;
++		firmware-name = "j784s4-c71_1-fw";
++		status = "disabled";
++	};
++
++	c71_2: dsp@66800000 {
++		compatible = "ti,j721s2-c71-dsp";
++		reg = <0x00 0x66800000 0x00 0x00080000>,
++		      <0x00 0x66e00000 0x00 0x0000c000>;
++		reg-names = "l2sram", "l1dram";
++		ti,sci = <&sms>;
++		ti,sci-dev-id = <37>;
++		ti,sci-proc-ids = <0x32 0xff>;
++		resets = <&k3_reset 37 1>;
++		firmware-name = "j784s4-c71_2-fw";
++		status = "disabled";
++	};
++
++	main_esm: esm@700000 {
++		compatible = "ti,j721e-esm";
++		reg = <0x00 0x700000 0x00 0x1000>;
++		ti,esm-pins = <688>, <689>, <690>, <691>, <692>, <693>, <694>,
++			      <695>;
++		bootph-pre-ram;
++	};
++
++	watchdog0: watchdog@2200000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2200000 0x00 0x100>;
++		clocks = <&k3_clks 348 0>;
++		power-domains = <&k3_pds 348 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 348 0>;
++		assigned-clock-parents = <&k3_clks 348 4>;
++	};
++
++	watchdog1: watchdog@2210000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2210000 0x00 0x100>;
++		clocks = <&k3_clks 349 0>;
++		power-domains = <&k3_pds 349 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 349 0>;
++		assigned-clock-parents = <&k3_clks 349 4>;
++	};
++
++	watchdog2: watchdog@2220000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2220000 0x00 0x100>;
++		clocks = <&k3_clks 350 0>;
++		power-domains = <&k3_pds 350 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 350 0>;
++		assigned-clock-parents = <&k3_clks 350 4>;
++	};
++
++	watchdog3: watchdog@2230000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2230000 0x00 0x100>;
++		clocks = <&k3_clks 351 0>;
++		power-domains = <&k3_pds 351 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 351 0>;
++		assigned-clock-parents = <&k3_clks 351 4>;
++	};
++
++	watchdog4: watchdog@2240000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2240000 0x00 0x100>;
++		clocks = <&k3_clks 352 0>;
++		power-domains = <&k3_pds 352 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 352 0>;
++		assigned-clock-parents = <&k3_clks 352 4>;
++	};
++
++	watchdog5: watchdog@2250000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2250000 0x00 0x100>;
++		clocks = <&k3_clks 353 0>;
++		power-domains = <&k3_pds 353 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 353 0>;
++		assigned-clock-parents = <&k3_clks 353 4>;
++	};
++
++	watchdog6: watchdog@2260000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2260000 0x00 0x100>;
++		clocks = <&k3_clks 354 0>;
++		power-domains = <&k3_pds 354 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 354 0>;
++		assigned-clock-parents = <&k3_clks 354 4>;
++	};
++
++	watchdog7: watchdog@2270000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2270000 0x00 0x100>;
++		clocks = <&k3_clks 355 0>;
++		power-domains = <&k3_pds 355 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 355 0>;
++		assigned-clock-parents = <&k3_clks 355 4>;
++	};
++
++	/*
++	 * The following RTI instances are coupled with MCU R5Fs, c7x and
++	 * GPU so keeping them reserved as these will be used by their
++	 * respective firmware
++	 */
++	watchdog8: watchdog@22f0000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x22f0000 0x00 0x100>;
++		clocks = <&k3_clks 360 0>;
++		power-domains = <&k3_pds 360 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 360 0>;
++		assigned-clock-parents = <&k3_clks 360 4>;
++		/* reserved for GPU */
++		status = "reserved";
++	};
++
++	watchdog9: watchdog@2300000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2300000 0x00 0x100>;
++		clocks = <&k3_clks 356 0>;
++		power-domains = <&k3_pds 356 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 356 0>;
++		assigned-clock-parents = <&k3_clks 356 4>;
++		/* reserved for C7X_0 DSP */
++		status = "reserved";
++	};
++
++	watchdog10: watchdog@2310000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2310000 0x00 0x100>;
++		clocks = <&k3_clks 357 0>;
++		power-domains = <&k3_pds 357 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 357 0>;
++		assigned-clock-parents = <&k3_clks 357 4>;
++		/* reserved for C7X_1 DSP */
++		status = "reserved";
++	};
++
++	watchdog11: watchdog@2320000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2320000 0x00 0x100>;
++		clocks = <&k3_clks 358 0>;
++		power-domains = <&k3_pds 358 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 358 0>;
++		assigned-clock-parents = <&k3_clks 358 4>;
++		/* reserved for C7X_2 DSP */
++		status = "reserved";
++	};
++
++	watchdog12: watchdog@2330000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2330000 0x00 0x100>;
++		clocks = <&k3_clks 359 0>;
++		power-domains = <&k3_pds 359 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 359 0>;
++		assigned-clock-parents = <&k3_clks 359 4>;
++		/* reserved for C7X_3 DSP */
++		status = "reserved";
++	};
++
++	watchdog13: watchdog@23c0000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x23c0000 0x00 0x100>;
++		clocks = <&k3_clks 361 0>;
++		power-domains = <&k3_pds 361 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 361 0>;
++		assigned-clock-parents = <&k3_clks 361 4>;
++		/* reserved for MAIN_R5F0_0 */
++		status = "reserved";
++	};
++
++	watchdog14: watchdog@23d0000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x23d0000 0x00 0x100>;
++		clocks = <&k3_clks 362 0>;
++		power-domains = <&k3_pds 362 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 362 0>;
++		assigned-clock-parents = <&k3_clks 362 4>;
++		/* reserved for MAIN_R5F0_1 */
++		status = "reserved";
++	};
++
++	watchdog15: watchdog@23e0000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x23e0000 0x00 0x100>;
++		clocks = <&k3_clks 363 0>;
++		power-domains = <&k3_pds 363 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 363 0>;
++		assigned-clock-parents = <&k3_clks 363 4>;
++		/* reserved for MAIN_R5F1_0 */
++		status = "reserved";
++	};
++
++	watchdog16: watchdog@23f0000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x23f0000 0x00 0x100>;
++		clocks = <&k3_clks 364 0>;
++		power-domains = <&k3_pds 364 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 364 0>;
++		assigned-clock-parents = <&k3_clks 364 4>;
++		/* reserved for MAIN_R5F1_1 */
++		status = "reserved";
++	};
++
++	watchdog17: watchdog@2540000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2540000 0x00 0x100>;
++		clocks = <&k3_clks 365 0>;
++		power-domains = <&k3_pds 365 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 365 0>;
++		assigned-clock-parents = <&k3_clks 366 4>;
++		/* reserved for MAIN_R5F2_0 */
++		status = "reserved";
++	};
++
++	watchdog18: watchdog@2550000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x2550000 0x00 0x100>;
++		clocks = <&k3_clks 366 0>;
++		power-domains = <&k3_pds 366 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 366 0>;
++		assigned-clock-parents = <&k3_clks 366 4>;
++		/* reserved for MAIN_R5F2_1 */
++		status = "reserved";
++	};
++
++	mhdp: bridge@a000000 {
++		compatible = "ti,j721e-mhdp8546";
++		reg = <0x0 0xa000000 0x0 0x30a00>,
++		      <0x0 0x4f40000 0x0 0x20>;
++		reg-names = "mhdptx", "j721e-intg";
++		clocks = <&k3_clks 217 11>;
++		interrupt-parent = <&gic500>;
++		interrupts = <GIC_SPI 614 IRQ_TYPE_LEVEL_HIGH>;
++		power-domains = <&k3_pds 217 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++
++		dp0_ports: ports {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			/* Remote-endpoints are on the boards so
++			 * ports are defined in the platform dt file.
++			 */
++		};
++	};
++
++	dss: dss@4a00000 {
++		compatible = "ti,j721e-dss";
++		reg = <0x00 0x04a00000 0x00 0x10000>, /* common_m */
++		      <0x00 0x04a10000 0x00 0x10000>, /* common_s0*/
++		      <0x00 0x04b00000 0x00 0x10000>, /* common_s1*/
++		      <0x00 0x04b10000 0x00 0x10000>, /* common_s2*/
++		      <0x00 0x04a20000 0x00 0x10000>, /* vidl1 */
++		      <0x00 0x04a30000 0x00 0x10000>, /* vidl2 */
++		      <0x00 0x04a50000 0x00 0x10000>, /* vid1 */
++		      <0x00 0x04a60000 0x00 0x10000>, /* vid2 */
++		      <0x00 0x04a70000 0x00 0x10000>, /* ovr1 */
++		      <0x00 0x04a90000 0x00 0x10000>, /* ovr2 */
++		      <0x00 0x04ab0000 0x00 0x10000>, /* ovr3 */
++		      <0x00 0x04ad0000 0x00 0x10000>, /* ovr4 */
++		      <0x00 0x04a80000 0x00 0x10000>, /* vp1 */
++		      <0x00 0x04aa0000 0x00 0x10000>, /* vp1 */
++		      <0x00 0x04ac0000 0x00 0x10000>, /* vp1 */
++		      <0x00 0x04ae0000 0x00 0x10000>, /* vp4 */
++		      <0x00 0x04af0000 0x00 0x10000>; /* wb */
++		reg-names = "common_m", "common_s0",
++			    "common_s1", "common_s2",
++			    "vidl1", "vidl2","vid1","vid2",
++			    "ovr1", "ovr2", "ovr3", "ovr4",
++			    "vp1", "vp2", "vp3", "vp4",
++			    "wb";
++		clocks = <&k3_clks 218 0>,
++			 <&k3_clks 218 2>,
++			 <&k3_clks 218 5>,
++			 <&k3_clks 218 14>,
++			 <&k3_clks 218 18>;
++		clock-names = "fck", "vp1", "vp2", "vp3", "vp4";
++		power-domains = <&k3_pds 218 TI_SCI_PD_EXCLUSIVE>;
++		interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "common_m",
++				  "common_s0",
++				  "common_s1",
++				  "common_s2";
++		status = "disabled";
++
++		dss_ports: ports {
++			/* Ports that DSS drives are platform specific
++			 * so they are defined in platform dt file.
++			 */
++		};
++	};
++
++	mcasp0: mcasp@2b00000 {
++		compatible = "ti,am33xx-mcasp-audio";
++		reg = <0x00 0x02b00000 0x00 0x2000>,
++		      <0x00 0x02b08000 0x00 0x1000>;
++		reg-names = "mpu","dat";
++		interrupts = <GIC_SPI 544 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 545 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "tx", "rx";
++		dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
++		dma-names = "tx", "rx";
++		clocks = <&k3_clks 265 0>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 265 0>;
++		assigned-clock-parents = <&k3_clks 265 1>;
++		power-domains = <&k3_pds 265 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	mcasp1: mcasp@2b10000 {
++		compatible = "ti,am33xx-mcasp-audio";
++		reg = <0x00 0x02b10000 0x00 0x2000>,
++		      <0x00 0x02b18000 0x00 0x1000>;
++		reg-names = "mpu","dat";
++		interrupts = <GIC_SPI 546 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 547 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "tx", "rx";
++		dmas = <&main_udmap 0xc401>, <&main_udmap 0x4401>;
++		dma-names = "tx", "rx";
++		clocks = <&k3_clks 266 0>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 266 0>;
++		assigned-clock-parents = <&k3_clks 266 1>;
++		power-domains = <&k3_pds 266 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	mcasp2: mcasp@2b20000 {
++		compatible = "ti,am33xx-mcasp-audio";
++		reg = <0x00 0x02b20000 0x00 0x2000>,
++		      <0x00 0x02b28000 0x00 0x1000>;
++		reg-names = "mpu","dat";
++		interrupts = <GIC_SPI 548 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 549 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "tx", "rx";
++		dmas = <&main_udmap 0xc402>, <&main_udmap 0x4402>;
++		dma-names = "tx", "rx";
++		clocks = <&k3_clks 267 0>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 267 0>;
++		assigned-clock-parents = <&k3_clks 267 1>;
++		power-domains = <&k3_pds 267 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	mcasp3: mcasp@2b30000 {
++		compatible = "ti,am33xx-mcasp-audio";
++		reg = <0x00 0x02b30000 0x00 0x2000>,
++		      <0x00 0x02b38000 0x00 0x1000>;
++		reg-names = "mpu","dat";
++		interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "tx", "rx";
++		dmas = <&main_udmap 0xc403>, <&main_udmap 0x4403>;
++		dma-names = "tx", "rx";
++		clocks = <&k3_clks 268 0>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 268 0>;
++		assigned-clock-parents = <&k3_clks 268 1>;
++		power-domains = <&k3_pds 268 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	mcasp4: mcasp@2b40000 {
++		compatible = "ti,am33xx-mcasp-audio";
++		reg = <0x00 0x02b40000 0x00 0x2000>,
++		      <0x00 0x02b48000 0x00 0x1000>;
++		reg-names = "mpu","dat";
++		interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "tx", "rx";
++		dmas = <&main_udmap 0xc404>, <&main_udmap 0x4404>;
++		dma-names = "tx", "rx";
++		clocks = <&k3_clks 269 0>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 269 0>;
++		assigned-clock-parents = <&k3_clks 269 1>;
++		power-domains = <&k3_pds 269 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++};
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-mcu-wakeup-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-mcu-wakeup-common.dtsi
+new file mode 100644
+index 00000000000000..cba8d0e64f2e63
+--- /dev/null
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-mcu-wakeup-common.dtsi
+@@ -0,0 +1,760 @@
++// SPDX-License-Identifier: GPL-2.0-only OR MIT
++/*
++ * Device Tree Source for J784S4 and J742S2 SoC Family MCU/WAKEUP Domain peripherals
++ *
++ * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
++ */
++
++&cbass_mcu_wakeup {
++	sms: system-controller@44083000 {
++		bootph-all;
++		compatible = "ti,k2g-sci";
++		ti,host-id = <12>;
++
++		mbox-names = "rx", "tx";
++
++		mboxes = <&secure_proxy_main 11>,
++			 <&secure_proxy_main 13>;
++
++		reg-names = "debug_messages";
++		reg = <0x00 0x44083000 0x00 0x1000>;
++
++		k3_pds: power-controller {
++			bootph-all;
++			compatible = "ti,sci-pm-domain";
++			#power-domain-cells = <2>;
++		};
++
++		k3_clks: clock-controller {
++			bootph-all;
++			compatible = "ti,k2g-sci-clk";
++			#clock-cells = <2>;
++		};
++
++		k3_reset: reset-controller {
++			bootph-all;
++			compatible = "ti,sci-reset";
++			#reset-cells = <2>;
++		};
++	};
++
++	wkup_conf: bus@43000000 {
++		bootph-all;
++		compatible = "simple-bus";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0x0 0x00 0x43000000 0x20000>;
++
++		chipid: chipid@14 {
++			bootph-all;
++			compatible = "ti,am654-chipid";
++			reg = <0x14 0x4>;
++		};
++	};
++
++	secure_proxy_sa3: mailbox@43600000 {
++		compatible = "ti,am654-secure-proxy";
++		#mbox-cells = <1>;
++		reg-names = "target_data", "rt", "scfg";
++		reg = <0x00 0x43600000 0x00 0x10000>,
++		      <0x00 0x44880000 0x00 0x20000>,
++		      <0x00 0x44860000 0x00 0x20000>;
++		/*
++		 * Marked Disabled:
++		 * Node is incomplete as it is meant for bootloaders and
++		 * firmware on non-MPU processors
++		 */
++		status = "disabled";
++	};
++
++	mcu_ram: sram@41c00000 {
++		compatible = "mmio-sram";
++		reg = <0x00 0x41c00000 0x00 0x100000>;
++		ranges = <0x00 0x00 0x41c00000 0x100000>;
++		#address-cells = <1>;
++		#size-cells = <1>;
++	};
++
++	wkup_pmx0: pinctrl@4301c000 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c000 0x00 0x034>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx1: pinctrl@4301c038 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c038 0x00 0x02c>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx2: pinctrl@4301c068 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c068 0x00 0x120>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx3: pinctrl@4301c190 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c190 0x00 0x004>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_gpio_intr: interrupt-controller@42200000 {
++		compatible = "ti,sci-intr";
++		reg = <0x00 0x42200000 0x00 0x400>;
++		ti,intr-trigger-type = <1>;
++		interrupt-controller;
++		interrupt-parent = <&gic500>;
++		#interrupt-cells = <1>;
++		ti,sci = <&sms>;
++		ti,sci-dev-id = <177>;
++		ti,interrupt-ranges = <16 960 16>;
++	};
++
++	/* MCU_TIMERIO pad input CTRLMMR_MCU_TIMER*_CTRL registers */
++	mcu_timerio_input: pinctrl@40f04200 {
++		compatible = "pinctrl-single";
++		reg = <0x00 0x40f04200 0x00 0x28>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0x0000000f>;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	/* MCU_TIMERIO pad output CTRLMMR_MCU_TIMERIO*_CTRL registers */
++	mcu_timerio_output: pinctrl@40f04280 {
++		compatible = "pinctrl-single";
++		reg = <0x00 0x40f04280 0x00 0x28>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0x0000000f>;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_conf: bus@40f00000 {
++		compatible = "simple-bus";
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0x0 0x0 0x40f00000 0x20000>;
++
++		cpsw_mac_syscon: ethernet-mac-syscon@200 {
++			compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
++			reg = <0x200 0x8>;
++		};
++
++		phy_gmii_sel: phy@4040 {
++			compatible = "ti,am654-phy-gmii-sel";
++			reg = <0x4040 0x4>;
++			#phy-cells = <1>;
++		};
++	};
++
++	mcu_timer0: timer@40400000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40400000 0x00 0x400>;
++		interrupts = <GIC_SPI 816 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 35 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 35 2>;
++		assigned-clock-parents = <&k3_clks 35 3>;
++		power-domains = <&k3_pds 35 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer1: timer@40410000 {
++		bootph-all;
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40410000 0x00 0x400>;
++		interrupts = <GIC_SPI 817 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 117 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 117 2>;
++		assigned-clock-parents = <&k3_clks 117 3>;
++		power-domains = <&k3_pds 117 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer2: timer@40420000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40420000 0x00 0x400>;
++		interrupts = <GIC_SPI 818 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 118 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 118 2>;
++		assigned-clock-parents = <&k3_clks 118 3>;
++		power-domains = <&k3_pds 118 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer3: timer@40430000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40430000 0x00 0x400>;
++		interrupts = <GIC_SPI 819 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 119 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 119 2>;
++		assigned-clock-parents = <&k3_clks 119 3>;
++		power-domains = <&k3_pds 119 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer4: timer@40440000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40440000 0x00 0x400>;
++		interrupts = <GIC_SPI 820 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 120 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 120 2>;
++		assigned-clock-parents = <&k3_clks 120 3>;
++		power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer5: timer@40450000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40450000 0x00 0x400>;
++		interrupts = <GIC_SPI 821 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 121 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 121 2>;
++		assigned-clock-parents = <&k3_clks 121 3>;
++		power-domains = <&k3_pds 121 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer6: timer@40460000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40460000 0x00 0x400>;
++		interrupts = <GIC_SPI 822 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 122 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 122 2>;
++		assigned-clock-parents = <&k3_clks 122 3>;
++		power-domains = <&k3_pds 122 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer7: timer@40470000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40470000 0x00 0x400>;
++		interrupts = <GIC_SPI 823 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 123 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 123 2>;
++		assigned-clock-parents = <&k3_clks 123 3>;
++		power-domains = <&k3_pds 123 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer8: timer@40480000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40480000 0x00 0x400>;
++		interrupts = <GIC_SPI 824 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 124 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 124 2>;
++		assigned-clock-parents = <&k3_clks 124 3>;
++		power-domains = <&k3_pds 124 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	mcu_timer9: timer@40490000 {
++		compatible = "ti,am654-timer";
++		reg = <0x00 0x40490000 0x00 0x400>;
++		interrupts = <GIC_SPI 825 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 125 2>;
++		clock-names = "fck";
++		assigned-clocks = <&k3_clks 125 2>;
++		assigned-clock-parents = <&k3_clks 125 3>;
++		power-domains = <&k3_pds 125 TI_SCI_PD_EXCLUSIVE>;
++		ti,timer-pwm;
++		/* Non-MPU Firmware usage */
++		status = "reserved";
++	};
++
++	wkup_uart0: serial@42300000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x42300000 0x00 0x200>;
++		interrupts = <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 397 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 397 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	mcu_uart0: serial@40a00000 {
++		compatible = "ti,j721e-uart", "ti,am654-uart";
++		reg = <0x00 0x40a00000 0x00 0x200>;
++		interrupts = <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&k3_clks 149 0>;
++		clock-names = "fclk";
++		power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	wkup_gpio0: gpio@42110000 {
++		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
++		reg = <0x00 0x42110000 0x00 0x100>;
++		gpio-controller;
++		#gpio-cells = <2>;
++		interrupt-parent = <&wkup_gpio_intr>;
++		interrupts = <103>, <104>, <105>, <106>, <107>, <108>;
++		interrupt-controller;
++		#interrupt-cells = <2>;
++		ti,ngpio = <89>;
++		ti,davinci-gpio-unbanked = <0>;
++		power-domains = <&k3_pds 167 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 167 0>;
++		clock-names = "gpio";
++		status = "disabled";
++	};
++
++	wkup_gpio1: gpio@42100000 {
++		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
++		reg = <0x00 0x42100000 0x00 0x100>;
++		gpio-controller;
++		#gpio-cells = <2>;
++		interrupt-parent = <&wkup_gpio_intr>;
++		interrupts = <112>, <113>, <114>, <115>, <116>, <117>;
++		interrupt-controller;
++		#interrupt-cells = <2>;
++		ti,ngpio = <89>;
++		ti,davinci-gpio-unbanked = <0>;
++		power-domains = <&k3_pds 168 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 168 0>;
++		clock-names = "gpio";
++		status = "disabled";
++	};
++
++	wkup_i2c0: i2c@42120000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x42120000 0x00 0x100>;
++		interrupts = <GIC_SPI 896 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 279 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 279 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	mcu_i2c0: i2c@40b00000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x40b00000 0x00 0x100>;
++		interrupts = <GIC_SPI 852 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 277 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 277 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	mcu_i2c1: i2c@40b10000 {
++		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
++		reg = <0x00 0x40b10000 0x00 0x100>;
++		interrupts = <GIC_SPI 853 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		clocks = <&k3_clks 278 2>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 278 TI_SCI_PD_EXCLUSIVE>;
++		status = "disabled";
++	};
++
++	mcu_mcan0: can@40528000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x40528000 0x00 0x200>,
++		      <0x00 0x40500000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 263 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 263 6>, <&k3_clks 263 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 832 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 833 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	mcu_mcan1: can@40568000 {
++		compatible = "bosch,m_can";
++		reg = <0x00 0x40568000 0x00 0x200>,
++		      <0x00 0x40540000 0x00 0x8000>;
++		reg-names = "m_can", "message_ram";
++		power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 264 6>, <&k3_clks 264 1>;
++		clock-names = "hclk", "cclk";
++		interrupts = <GIC_SPI 835 IRQ_TYPE_LEVEL_HIGH>,
++			     <GIC_SPI 836 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "int0", "int1";
++		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
++		status = "disabled";
++	};
++
++	mcu_spi0: spi@40300000 {
++		compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
++		reg = <0x00 0x040300000 0x00 0x400>;
++		interrupts = <GIC_SPI 848 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 384 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 384 0>;
++		status = "disabled";
++	};
++
++	mcu_spi1: spi@40310000 {
++		compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
++		reg = <0x00 0x040310000 0x00 0x400>;
++		interrupts = <GIC_SPI 849 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 385 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 385 0>;
++		status = "disabled";
++	};
++
++	mcu_spi2: spi@40320000 {
++		compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
++		reg = <0x00 0x040320000 0x00 0x400>;
++		interrupts = <GIC_SPI 850 IRQ_TYPE_LEVEL_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++		power-domains = <&k3_pds 386 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 386 0>;
++		status = "disabled";
++	};
++
++	mcu_navss: bus@28380000 {
++		bootph-all;
++		compatible = "simple-bus";
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
++		ti,sci-dev-id = <323>;
++		dma-coherent;
++		dma-ranges;
++
++		mcu_ringacc: ringacc@2b800000 {
++			bootph-all;
++			compatible = "ti,am654-navss-ringacc";
++			reg = <0x00 0x2b800000 0x00 0x400000>,
++			      <0x00 0x2b000000 0x00 0x400000>,
++			      <0x00 0x28590000 0x00 0x100>,
++			      <0x00 0x2a500000 0x00 0x40000>,
++			      <0x00 0x28440000 0x00 0x40000>;
++			reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target", "cfg";
++			ti,num-rings = <286>;
++			ti,sci-rm-range-gp-rings = <0x1>;
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <328>;
++			msi-parent = <&main_udmass_inta>;
++		};
++
++		mcu_udmap: dma-controller@285c0000 {
++			bootph-all;
++			compatible = "ti,j721e-navss-mcu-udmap";
++			reg = <0x00 0x285c0000 0x00 0x100>,
++			      <0x00 0x2a800000 0x00 0x40000>,
++			      <0x00 0x2aa00000 0x00 0x40000>,
++			      <0x00 0x284a0000 0x00 0x4000>,
++			      <0x00 0x284c0000 0x00 0x4000>,
++			      <0x00 0x28400000 0x00 0x2000>;
++			reg-names = "gcfg", "rchanrt", "tchanrt",
++				    "tchan", "rchan", "rflow";
++			msi-parent = <&main_udmass_inta>;
++			#dma-cells = <1>;
++
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <329>;
++			ti,ringacc = <&mcu_ringacc>;
++			ti,sci-rm-range-tchan = <0x0d>, /* TX_CHAN */
++						<0x0f>; /* TX_HCHAN */
++			ti,sci-rm-range-rchan = <0x0a>, /* RX_CHAN */
++						<0x0b>; /* RX_HCHAN */
++			ti,sci-rm-range-rflow = <0x00>; /* GP RFLOW */
++		};
++	};
++
++	secure_proxy_mcu: mailbox@2a480000 {
++		compatible = "ti,am654-secure-proxy";
++		#mbox-cells = <1>;
++		reg-names = "target_data", "rt", "scfg";
++		reg = <0x00 0x2a480000 0x00 0x80000>,
++		      <0x00 0x2a380000 0x00 0x80000>,
++		      <0x00 0x2a400000 0x00 0x80000>;
++		/*
++		 * Marked Disabled:
++		 * Node is incomplete as it is meant for bootloaders and
++		 * firmware on non-MPU processors
++		 */
++		status = "disabled";
++	};
++
++	mcu_cpsw: ethernet@46000000 {
++		compatible = "ti,j721e-cpsw-nuss";
++		#address-cells = <2>;
++		#size-cells = <2>;
++		reg = <0x00 0x46000000 0x00 0x200000>;
++		reg-names = "cpsw_nuss";
++		ranges = <0x00 0x00 0x00 0x46000000 0x00 0x200000>;
++		dma-coherent;
++		clocks = <&k3_clks 63 0>;
++		clock-names = "fck";
++		power-domains = <&k3_pds 63 TI_SCI_PD_EXCLUSIVE>;
++
++		dmas = <&mcu_udmap 0xf000>,
++		       <&mcu_udmap 0xf001>,
++		       <&mcu_udmap 0xf002>,
++		       <&mcu_udmap 0xf003>,
++		       <&mcu_udmap 0xf004>,
++		       <&mcu_udmap 0xf005>,
++		       <&mcu_udmap 0xf006>,
++		       <&mcu_udmap 0xf007>,
++		       <&mcu_udmap 0x7000>;
++		dma-names = "tx0", "tx1", "tx2", "tx3",
++			    "tx4", "tx5", "tx6", "tx7",
++			    "rx";
++		status = "disabled";
++
++		ethernet-ports {
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			mcu_cpsw_port1: port@1 {
++				reg = <1>;
++				ti,mac-only;
++				label = "port1";
++				ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
++				phys = <&phy_gmii_sel 1>;
++			};
++		};
++
++		davinci_mdio: mdio@f00 {
++			compatible = "ti,cpsw-mdio","ti,davinci_mdio";
++			reg = <0x00 0xf00 0x00 0x100>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			clocks = <&k3_clks 63 0>;
++			clock-names = "fck";
++			bus_freq = <1000000>;
++		};
++
++		cpts@3d000 {
++			compatible = "ti,am65-cpts";
++			reg = <0x00 0x3d000 0x00 0x400>;
++			clocks = <&k3_clks 63 3>;
++			clock-names = "cpts";
++			assigned-clocks = <&k3_clks 63 3>; /* CPTS_RFT_CLK */
++			assigned-clock-parents = <&k3_clks 63 5>; /* MAIN_0_HSDIV6_CLK */
++			interrupts-extended = <&gic500 GIC_SPI 858 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "cpts";
++			ti,cpts-ext-ts-inputs = <4>;
++			ti,cpts-periodic-outputs = <2>;
++		};
++	};
++
++	mcu_r5fss0: r5fss@41000000 {
++		compatible = "ti,j721s2-r5fss";
++		ti,cluster-mode = <1>;
++		#address-cells = <1>;
++		#size-cells = <1>;
++		ranges = <0x41000000 0x00 0x41000000 0x20000>,
++			 <0x41400000 0x00 0x41400000 0x20000>;
++		power-domains = <&k3_pds 345 TI_SCI_PD_EXCLUSIVE>;
++
++		mcu_r5fss0_core0: r5f@41000000 {
++			compatible = "ti,j721s2-r5f";
++			reg = <0x41000000 0x00010000>,
++			      <0x41010000 0x00010000>;
++			reg-names = "atcm", "btcm";
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <346>;
++			ti,sci-proc-ids = <0x01 0xff>;
++			resets = <&k3_reset 346 1>;
++			firmware-name = "j784s4-mcu-r5f0_0-fw";
++			ti,atcm-enable = <1>;
++			ti,btcm-enable = <1>;
++			ti,loczrama = <1>;
++		};
++
++		mcu_r5fss0_core1: r5f@41400000 {
++			compatible = "ti,j721s2-r5f";
++			reg = <0x41400000 0x00010000>,
++			      <0x41410000 0x00010000>;
++			reg-names = "atcm", "btcm";
++			ti,sci = <&sms>;
++			ti,sci-dev-id = <347>;
++			ti,sci-proc-ids = <0x02 0xff>;
++			resets = <&k3_reset 347 1>;
++			firmware-name = "j784s4-mcu-r5f0_1-fw";
++			ti,atcm-enable = <1>;
++			ti,btcm-enable = <1>;
++			ti,loczrama = <1>;
++		};
++	};
++
++	wkup_vtm0: temperature-sensor@42040000 {
++		compatible = "ti,j7200-vtm";
++		reg = <0x00 0x42040000 0x00 0x350>,
++		      <0x00 0x42050000 0x00 0x350>;
++		power-domains = <&k3_pds 243 TI_SCI_PD_SHARED>;
++		#thermal-sensor-cells = <1>;
++	};
++
++	tscadc0: tscadc@40200000 {
++		compatible = "ti,am3359-tscadc";
++		reg = <0x00 0x40200000 0x00 0x1000>;
++		interrupts = <GIC_SPI 860 IRQ_TYPE_LEVEL_HIGH>;
++		power-domains = <&k3_pds 0 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 0 0>;
++		assigned-clocks = <&k3_clks 0 2>;
++		assigned-clock-rates = <60000000>;
++		clock-names = "fck";
++		dmas = <&main_udmap 0x7400>,
++			<&main_udmap 0x7401>;
++		dma-names = "fifo0", "fifo1";
++		status = "disabled";
++
++		adc {
++			#io-channel-cells = <1>;
++			compatible = "ti,am3359-adc";
++		};
++	};
++
++	tscadc1: tscadc@40210000 {
++		compatible = "ti,am3359-tscadc";
++		reg = <0x00 0x40210000 0x00 0x1000>;
++		interrupts = <GIC_SPI 861 IRQ_TYPE_LEVEL_HIGH>;
++		power-domains = <&k3_pds 1 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 1 0>;
++		assigned-clocks = <&k3_clks 1 2>;
++		assigned-clock-rates = <60000000>;
++		clock-names = "fck";
++		dmas = <&main_udmap 0x7402>,
++			<&main_udmap 0x7403>;
++		dma-names = "fifo0", "fifo1";
++		status = "disabled";
++
++		adc {
++			#io-channel-cells = <1>;
++			compatible = "ti,am3359-adc";
++		};
++	};
++
++	fss: bus@47000000 {
++		compatible = "simple-bus";
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges = <0x00 0x47000000 0x00 0x47000000 0x00 0x00000100>, /* FSS Control */
++			 <0x00 0x47040000 0x00 0x47040000 0x00 0x00000100>, /* OSPI0 Control */
++			 <0x00 0x47050000 0x00 0x47050000 0x00 0x00000100>, /* OSPI1 Control */
++			 <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
++			 <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
++
++		ospi0: spi@47040000 {
++			compatible = "ti,am654-ospi", "cdns,qspi-nor";
++			reg = <0x00 0x47040000 0x00 0x100>,
++			      <0x05 0x00000000 0x01 0x00000000>;
++			interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>;
++			cdns,fifo-depth = <256>;
++			cdns,fifo-width = <4>;
++			cdns,trigger-address = <0x0>;
++			clocks = <&k3_clks 161 7>;
++			assigned-clocks = <&k3_clks 161 7>;
++			assigned-clock-parents = <&k3_clks 161 9>;
++			assigned-clock-rates = <166666666>;
++			power-domains = <&k3_pds 161 TI_SCI_PD_EXCLUSIVE>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			status = "disabled";
++		};
++
++		ospi1: spi@47050000 {
++			compatible = "ti,am654-ospi", "cdns,qspi-nor";
++			reg = <0x00 0x47050000 0x00 0x100>,
++			      <0x07 0x00000000 0x01 0x00000000>;
++			interrupts = <GIC_SPI 841 IRQ_TYPE_LEVEL_HIGH>;
++			cdns,fifo-depth = <256>;
++			cdns,fifo-width = <4>;
++			cdns,trigger-address = <0x0>;
++			clocks = <&k3_clks 162 7>;
++			power-domains = <&k3_pds 162 TI_SCI_PD_EXCLUSIVE>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			status = "disabled";
++		};
++	};
++
++	mcu_esm: esm@40800000 {
++		compatible = "ti,j721e-esm";
++		reg = <0x00 0x40800000 0x00 0x1000>;
++		ti,esm-pins = <95>;
++		bootph-pre-ram;
++	};
++
++	wkup_esm: esm@42080000 {
++		compatible = "ti,j721e-esm";
++		reg = <0x00 0x42080000 0x00 0x1000>;
++		ti,esm-pins = <63>;
++		bootph-pre-ram;
++	};
++
++	/*
++	 * The 2 RTI instances are couple with MCU R5Fs so keeping them
++	 * reserved as these will be used by their respective firmware
++	 */
++	mcu_watchdog0: watchdog@40600000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x40600000 0x00 0x100>;
++		clocks = <&k3_clks 367 1>;
++		power-domains = <&k3_pds 367 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 367 0>;
++		assigned-clock-parents = <&k3_clks 367 4>;
++		/* reserved for MCU_R5F0_0 */
++		status = "reserved";
++	};
++
++	mcu_watchdog1: watchdog@40610000 {
++		compatible = "ti,j7-rti-wdt";
++		reg = <0x00 0x40610000 0x00 0x100>;
++		clocks = <&k3_clks 368 1>;
++		power-domains = <&k3_pds 368 TI_SCI_PD_EXCLUSIVE>;
++		assigned-clocks = <&k3_clks 368 0>;
++		assigned-clock-parents = <&k3_clks 368 4>;
++		/* reserved for MCU_R5F0_1 */
++		status = "reserved";
++	};
++};
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-thermal-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-thermal-common.dtsi
+new file mode 100644
+index 00000000000000..e3ef61c1658f4b
+--- /dev/null
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-thermal-common.dtsi
+@@ -0,0 +1,104 @@
++// SPDX-License-Identifier: GPL-2.0-only OR MIT
++/*
++ * Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
++ */
++
++#include <dt-bindings/thermal/thermal.h>
++
++wkup0_thermal: wkup0-thermal {
++	polling-delay-passive = <250>; /* milliseconds */
++	polling-delay = <500>; /* milliseconds */
++	thermal-sensors = <&wkup_vtm0 0>;
++
++	trips {
++		wkup0_crit: wkup0-crit {
++			temperature = <125000>; /* milliCelsius */
++			hysteresis = <2000>; /* milliCelsius */
++			type = "critical";
++		};
++	};
++};
++
++wkup1_thermal: wkup1-thermal {
++	polling-delay-passive = <250>; /* milliseconds */
++	polling-delay = <500>; /* milliseconds */
++	thermal-sensors = <&wkup_vtm0 1>;
++
++	trips {
++		wkup1_crit: wkup1-crit {
++			temperature = <125000>; /* milliCelsius */
++			hysteresis = <2000>; /* milliCelsius */
++			type = "critical";
++		};
++	};
++};
++
++main0_thermal: main0-thermal {
++	polling-delay-passive = <250>; /* milliseconds */
++	polling-delay = <500>; /* milliseconds */
++	thermal-sensors = <&wkup_vtm0 2>;
++
++	trips {
++		main0_crit: main0-crit {
++			temperature = <125000>; /* milliCelsius */
++			hysteresis = <2000>; /* milliCelsius */
++			type = "critical";
++		};
++	};
++};
++
++main1_thermal: main1-thermal {
++	polling-delay-passive = <250>; /* milliseconds */
++	polling-delay = <500>; /* milliseconds */
++	thermal-sensors = <&wkup_vtm0 3>;
++
++	trips {
++		main1_crit: main1-crit {
++			temperature = <125000>; /* milliCelsius */
++			hysteresis = <2000>; /* milliCelsius */
++			type = "critical";
++		};
++	};
++};
++
++main2_thermal: main2-thermal {
++	polling-delay-passive = <250>; /* milliseconds */
++	polling-delay = <500>; /* milliseconds */
++	thermal-sensors = <&wkup_vtm0 4>;
++
++	trips {
++		main2_crit: main2-crit {
++			temperature = <125000>; /* milliCelsius */
++			hysteresis = <2000>; /* milliCelsius */
++			type = "critical";
++		};
++	};
++};
++
++main3_thermal: main3-thermal {
++	polling-delay-passive = <250>; /* milliseconds */
++	polling-delay = <500>; /* milliseconds */
++	thermal-sensors = <&wkup_vtm0 5>;
++
++	trips {
++		main3_crit: main3-crit {
++			temperature = <125000>; /* milliCelsius */
++			hysteresis = <2000>; /* milliCelsius */
++			type = "critical";
++		};
++	};
++};
++
++main4_thermal: main4-thermal {
++	polling-delay-passive = <250>; /* milliseconds */
++	polling-delay = <500>; /* milliseconds */
++	thermal-sensors = <&wkup_vtm0 6>;
++
++	trips {
++		main4_crit: main4-crit {
++			temperature = <125000>; /* milliCelsius */
++			hysteresis = <2000>; /* milliCelsius */
++			type = "critical";
++		};
++	};
++};
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
+index e73bb750b09ad5..0160fe0da98388 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
+@@ -5,2781 +5,124 @@
+  * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
+  */
+ 
+-#include <dt-bindings/mux/mux.h>
+-#include <dt-bindings/phy/phy.h>
+-#include <dt-bindings/phy/phy-ti.h>
+-
+-#include "k3-serdes.h"
+-
+-/ {
+-	serdes_refclk: clock-serdes {
+-		#clock-cells = <0>;
+-		compatible = "fixed-clock";
+-		/* To be enabled when serdes_wiz* is functional */
+-		status = "disabled";
+-	};
+-};
+-
+-&cbass_main {
+-	msmc_ram: sram@70000000 {
+-		compatible = "mmio-sram";
+-		reg = <0x00 0x70000000 0x00 0x800000>;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x00 0x00 0x70000000 0x800000>;
+-
+-		atf-sram@0 {
+-			reg = <0x00 0x20000>;
+-		};
+-
+-		tifs-sram@1f0000 {
+-			reg = <0x1f0000 0x10000>;
+-		};
+-
+-		l3cache-sram@200000 {
+-			reg = <0x200000 0x200000>;
+-		};
+-	};
+-
+-	scm_conf: bus@100000 {
+-		compatible = "simple-bus";
+-		reg = <0x00 0x00100000 0x00 0x1c000>;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x00 0x00 0x00100000 0x1c000>;
+-
+-		cpsw1_phy_gmii_sel: phy@4034 {
+-			compatible = "ti,am654-phy-gmii-sel";
+-			reg = <0x4034 0x4>;
+-			#phy-cells = <1>;
+-		};
+-
+-		cpsw0_phy_gmii_sel: phy@4044 {
+-			compatible = "ti,j784s4-cpsw9g-phy-gmii-sel";
+-			reg = <0x4044 0x20>;
+-			#phy-cells = <1>;
+-			ti,qsgmii-main-ports = <7>, <7>;
+-		};
+-
+-		pcie0_ctrl: pcie0-ctrl@4070 {
+-			compatible = "ti,j784s4-pcie-ctrl", "syscon";
+-			reg = <0x4070 0x4>;
+-		};
+-
+-		pcie1_ctrl: pcie1-ctrl@4074 {
+-			compatible = "ti,j784s4-pcie-ctrl", "syscon";
+-			reg = <0x4074 0x4>;
+-		};
+-
+-		pcie2_ctrl: pcie2-ctrl@4078 {
+-			compatible = "ti,j784s4-pcie-ctrl", "syscon";
+-			reg = <0x4078 0x4>;
+-		};
+-
+-		pcie3_ctrl: pcie3-ctrl@407c {
+-			compatible = "ti,j784s4-pcie-ctrl", "syscon";
+-			reg = <0x407c 0x4>;
+-		};
+-
+-		serdes_ln_ctrl: mux-controller@4080 {
+-			compatible = "reg-mux";
+-			reg = <0x00004080 0x30>;
+-			#mux-control-cells = <1>;
+-			mux-reg-masks = <0x0 0x3>, <0x4 0x3>, /* SERDES0 lane0/1 select */
+-					<0x8 0x3>, <0xc 0x3>, /* SERDES0 lane2/3 select */
+-					<0x10 0x3>, <0x14 0x3>, /* SERDES1 lane0/1 select */
+-					<0x18 0x3>, <0x1c 0x3>, /* SERDES1 lane2/3 select */
+-					<0x20 0x3>, <0x24 0x3>, /* SERDES2 lane0/1 select */
+-					<0x28 0x3>, <0x2c 0x3>; /* SERDES2 lane2/3 select */
+-			idle-states = <J784S4_SERDES0_LANE0_PCIE1_LANE0>,
+-				      <J784S4_SERDES0_LANE1_PCIE1_LANE1>,
+-				      <J784S4_SERDES0_LANE2_IP3_UNUSED>,
+-				      <J784S4_SERDES0_LANE3_USB>,
+-				      <J784S4_SERDES1_LANE0_PCIE0_LANE0>,
+-				      <J784S4_SERDES1_LANE1_PCIE0_LANE1>,
+-				      <J784S4_SERDES1_LANE2_PCIE0_LANE2>,
+-				      <J784S4_SERDES1_LANE3_PCIE0_LANE3>,
+-				      <J784S4_SERDES2_LANE0_IP2_UNUSED>,
+-				      <J784S4_SERDES2_LANE1_IP2_UNUSED>,
+-				      <J784S4_SERDES2_LANE2_QSGMII_LANE1>,
+-				      <J784S4_SERDES2_LANE3_QSGMII_LANE2>,
+-				      <J784S4_SERDES4_LANE0_EDP_LANE0>,
+-				      <J784S4_SERDES4_LANE1_EDP_LANE1>,
+-				      <J784S4_SERDES4_LANE2_EDP_LANE2>,
+-				      <J784S4_SERDES4_LANE3_EDP_LANE3>;
+-		};
+-
+-		usb_serdes_mux: mux-controller@4000 {
+-			compatible = "reg-mux";
+-			reg = <0x4000 0x4>;
+-			#mux-control-cells = <1>;
+-			mux-reg-masks = <0x0 0x8000000>; /* USB0 to SERDES0 lane 3 mux */
+-		};
+-
+-		ehrpwm_tbclk: clock-controller@4140 {
+-			compatible = "ti,am654-ehrpwm-tbclk";
+-			reg = <0x4140 0x18>;
+-			#clock-cells = <1>;
+-		};
+-
+-		audio_refclk1: clock@82e4 {
+-			compatible = "ti,am62-audio-refclk";
+-			reg = <0x82e4 0x4>;
+-			clocks = <&k3_clks 157 34>;
+-			assigned-clocks = <&k3_clks 157 34>;
+-			assigned-clock-parents = <&k3_clks 157 63>;
+-			#clock-cells = <0>;
+-		};
+-	};
+-
+-	main_ehrpwm0: pwm@3000000 {
+-		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+-		reg = <0x00 0x3000000 0x00 0x100>;
+-		clocks = <&ehrpwm_tbclk 0>, <&k3_clks 219 0>;
+-		clock-names = "tbclk", "fck";
+-		power-domains = <&k3_pds 219 TI_SCI_PD_EXCLUSIVE>;
+-		#pwm-cells = <3>;
+-		status = "disabled";
+-	};
+-
+-	main_ehrpwm1: pwm@3010000 {
+-		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+-		reg = <0x00 0x3010000 0x00 0x100>;
+-		clocks = <&ehrpwm_tbclk 1>, <&k3_clks 220 0>;
+-		clock-names = "tbclk", "fck";
+-		power-domains = <&k3_pds 220 TI_SCI_PD_EXCLUSIVE>;
+-		#pwm-cells = <3>;
+-		status = "disabled";
+-	};
+-
+-	main_ehrpwm2: pwm@3020000 {
+-		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+-		reg = <0x00 0x3020000 0x00 0x100>;
+-		clocks = <&ehrpwm_tbclk 2>, <&k3_clks 221 0>;
+-		clock-names = "tbclk", "fck";
+-		power-domains = <&k3_pds 221 TI_SCI_PD_EXCLUSIVE>;
+-		#pwm-cells = <3>;
+-		status = "disabled";
+-	};
+-
+-	main_ehrpwm3: pwm@3030000 {
+-		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+-		reg = <0x00 0x3030000 0x00 0x100>;
+-		clocks = <&ehrpwm_tbclk 3>, <&k3_clks 222 0>;
+-		clock-names = "tbclk", "fck";
+-		power-domains = <&k3_pds 222 TI_SCI_PD_EXCLUSIVE>;
+-		#pwm-cells = <3>;
+-		status = "disabled";
+-	};
+-
+-	main_ehrpwm4: pwm@3040000 {
+-		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+-		reg = <0x00 0x3040000 0x00 0x100>;
+-		clocks = <&ehrpwm_tbclk 4>, <&k3_clks 223 0>;
+-		clock-names = "tbclk", "fck";
+-		power-domains = <&k3_pds 223 TI_SCI_PD_EXCLUSIVE>;
+-		#pwm-cells = <3>;
+-		status = "disabled";
+-	};
+-
+-	main_ehrpwm5: pwm@3050000 {
+-		compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+-		reg = <0x00 0x3050000 0x00 0x100>;
+-		clocks = <&ehrpwm_tbclk 5>, <&k3_clks 224 0>;
+-		clock-names = "tbclk", "fck";
+-		power-domains = <&k3_pds 224 TI_SCI_PD_EXCLUSIVE>;
+-		#pwm-cells = <3>;
+-		status = "disabled";
+-	};
+-
+-	gic500: interrupt-controller@1800000 {
+-		compatible = "arm,gic-v3";
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		ranges;
+-		#interrupt-cells = <3>;
+-		interrupt-controller;
+-		reg = <0x00 0x01800000 0x00 0x200000>, /* GICD */
+-		      <0x00 0x01900000 0x00 0x100000>, /* GICR */
+-		      <0x00 0x6f000000 0x00 0x2000>,   /* GICC */
+-		      <0x00 0x6f010000 0x00 0x1000>,   /* GICH */
+-		      <0x00 0x6f020000 0x00 0x2000>;   /* GICV */
+-
+-		/* vcpumntirq: virtual CPU interface maintenance interrupt */
+-		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+-
+-		gic_its: msi-controller@1820000 {
+-			compatible = "arm,gic-v3-its";
+-			reg = <0x00 0x01820000 0x00 0x10000>;
+-			socionext,synquacer-pre-its = <0x1000000 0x400000>;
+-			msi-controller;
+-			#msi-cells = <1>;
+-		};
+-	};
+-
+-	main_gpio_intr: interrupt-controller@a00000 {
+-		compatible = "ti,sci-intr";
+-		reg = <0x00 0x00a00000 0x00 0x800>;
+-		ti,intr-trigger-type = <1>;
+-		interrupt-controller;
+-		interrupt-parent = <&gic500>;
+-		#interrupt-cells = <1>;
+-		ti,sci = <&sms>;
+-		ti,sci-dev-id = <10>;
+-		ti,interrupt-ranges = <8 392 56>;
+-	};
+-
+-	main_pmx0: pinctrl@11c000 {
+-		compatible = "pinctrl-single";
+-		/* Proxy 0 addressing */
+-		reg = <0x00 0x11c000 0x00 0x120>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0xffffffff>;
+-	};
+-
+-	/* TIMERIO pad input CTRLMMR_TIMER*_CTRL registers */
+-	main_timerio_input: pinctrl@104200 {
+-		compatible = "pinctrl-single";
+-		reg = <0x00 0x104200 0x00 0x50>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0x00000007>;
+-	};
+-
+-	/* TIMERIO pad output CTCTRLMMR_TIMERIO*_CTRL registers */
+-	main_timerio_output: pinctrl@104280 {
+-		compatible = "pinctrl-single";
+-		reg = <0x00 0x104280 0x00 0x20>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0x0000001f>;
+-	};
+-
+-	main_crypto: crypto@4e00000 {
+-		compatible = "ti,j721e-sa2ul";
+-		reg = <0x00 0x4e00000 0x00 0x1200>;
+-		power-domains = <&k3_pds 369 TI_SCI_PD_EXCLUSIVE>;
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		ranges = <0x00 0x04e00000 0x00 0x04e00000 0x00 0x30000>;
+-
+-		dmas = <&main_udmap 0xca40>, <&main_udmap 0x4a40>,
+-				<&main_udmap 0x4a41>;
+-		dma-names = "tx", "rx1", "rx2";
+-
+-		rng: rng@4e10000 {
+-			compatible = "inside-secure,safexcel-eip76";
+-			reg = <0x00 0x4e10000 0x00 0x7d>;
+-			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+-		};
+-	};
+-
+-	main_timer0: timer@2400000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2400000 0x00 0x400>;
+-		interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 97 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 97 2>;
+-		assigned-clock-parents = <&k3_clks 97 3>;
+-		power-domains = <&k3_pds 97 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer1: timer@2410000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2410000 0x00 0x400>;
+-		interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 98 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 98 2>;
+-		assigned-clock-parents = <&k3_clks 98 3>;
+-		power-domains = <&k3_pds 98 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer2: timer@2420000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2420000 0x00 0x400>;
+-		interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 99 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 99 2>;
+-		assigned-clock-parents = <&k3_clks 99 3>;
+-		power-domains = <&k3_pds 99 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer3: timer@2430000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2430000 0x00 0x400>;
+-		interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 100 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 100 2>;
+-		assigned-clock-parents = <&k3_clks 100 3>;
+-		power-domains = <&k3_pds 100 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer4: timer@2440000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2440000 0x00 0x400>;
+-		interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 101 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 101 2>;
+-		assigned-clock-parents = <&k3_clks 101 3>;
+-		power-domains = <&k3_pds 101 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer5: timer@2450000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2450000 0x00 0x400>;
+-		interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 102 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 102 2>;
+-		assigned-clock-parents = <&k3_clks 102 3>;
+-		power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer6: timer@2460000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2460000 0x00 0x400>;
+-		interrupts = <GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 103 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 103 2>;
+-		assigned-clock-parents = <&k3_clks 103 3>;
+-		power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer7: timer@2470000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2470000 0x00 0x400>;
+-		interrupts = <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 104 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 104 2>;
+-		assigned-clock-parents = <&k3_clks 104 3>;
+-		power-domains = <&k3_pds 104 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer8: timer@2480000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2480000 0x00 0x400>;
+-		interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 105 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 105 2>;
+-		assigned-clock-parents = <&k3_clks 105 3>;
+-		power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer9: timer@2490000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2490000 0x00 0x400>;
+-		interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 106 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 106 2>;
+-		assigned-clock-parents = <&k3_clks 106 3>;
+-		power-domains = <&k3_pds 106 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer10: timer@24a0000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x24a0000 0x00 0x400>;
+-		interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 107 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 107 2>;
+-		assigned-clock-parents = <&k3_clks 107 3>;
+-		power-domains = <&k3_pds 107 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer11: timer@24b0000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x24b0000 0x00 0x400>;
+-		interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 108 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 108 2>;
+-		assigned-clock-parents = <&k3_clks 108 3>;
+-		power-domains = <&k3_pds 108 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer12: timer@24c0000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x24c0000 0x00 0x400>;
+-		interrupts = <GIC_SPI 236 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 109 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 109 2>;
+-		assigned-clock-parents = <&k3_clks 109 3>;
+-		power-domains = <&k3_pds 109 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer13: timer@24d0000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x24d0000 0x00 0x400>;
+-		interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 110 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 110 2>;
+-		assigned-clock-parents = <&k3_clks 110 3>;
+-		power-domains = <&k3_pds 110 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer14: timer@24e0000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x24e0000 0x00 0x400>;
+-		interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 111 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 111 2>;
+-		assigned-clock-parents = <&k3_clks 111 3>;
+-		power-domains = <&k3_pds 111 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer15: timer@24f0000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x24f0000 0x00 0x400>;
+-		interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 112 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 112 2>;
+-		assigned-clock-parents = <&k3_clks 112 3>;
+-		power-domains = <&k3_pds 112 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer16: timer@2500000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2500000 0x00 0x400>;
+-		interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 113 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 113 2>;
+-		assigned-clock-parents = <&k3_clks 113 3>;
+-		power-domains = <&k3_pds 113 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer17: timer@2510000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2510000 0x00 0x400>;
+-		interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 114 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 114 2>;
+-		assigned-clock-parents = <&k3_clks 114 3>;
+-		power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer18: timer@2520000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2520000 0x00 0x400>;
+-		interrupts = <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 115 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 115 2>;
+-		assigned-clock-parents = <&k3_clks 115 3>;
+-		power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_timer19: timer@2530000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x2530000 0x00 0x400>;
+-		interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 116 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 116 2>;
+-		assigned-clock-parents = <&k3_clks 116 3>;
+-		power-domains = <&k3_pds 116 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-	};
+-
+-	main_uart0: serial@2800000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02800000 0x00 0x200>;
+-		interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 146 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart1: serial@2810000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02810000 0x00 0x200>;
+-		interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 388 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 388 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart2: serial@2820000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02820000 0x00 0x200>;
+-		interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 389 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 389 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart3: serial@2830000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02830000 0x00 0x200>;
+-		interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 390 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 390 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart4: serial@2840000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02840000 0x00 0x200>;
+-		interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 391 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 391 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart5: serial@2850000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02850000 0x00 0x200>;
+-		interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 392 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 392 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart6: serial@2860000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02860000 0x00 0x200>;
+-		interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 393 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 393 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart7: serial@2870000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02870000 0x00 0x200>;
+-		interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 394 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 394 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart8: serial@2880000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02880000 0x00 0x200>;
+-		interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 395 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 395 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_uart9: serial@2890000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x02890000 0x00 0x200>;
+-		interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 396 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 396 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_gpio0: gpio@600000 {
+-		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+-		reg = <0x00 0x00600000 0x00 0x100>;
+-		gpio-controller;
+-		#gpio-cells = <2>;
+-		interrupt-parent = <&main_gpio_intr>;
+-		interrupts = <145>, <146>, <147>, <148>, <149>;
+-		interrupt-controller;
+-		#interrupt-cells = <2>;
+-		ti,ngpio = <66>;
+-		ti,davinci-gpio-unbanked = <0>;
+-		power-domains = <&k3_pds 163 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 163 0>;
+-		clock-names = "gpio";
+-		status = "disabled";
+-	};
+-
+-	main_gpio2: gpio@610000 {
+-		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+-		reg = <0x00 0x00610000 0x00 0x100>;
+-		gpio-controller;
+-		#gpio-cells = <2>;
+-		interrupt-parent = <&main_gpio_intr>;
+-		interrupts = <154>, <155>, <156>, <157>, <158>;
+-		interrupt-controller;
+-		#interrupt-cells = <2>;
+-		ti,ngpio = <66>;
+-		ti,davinci-gpio-unbanked = <0>;
+-		power-domains = <&k3_pds 164 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 164 0>;
+-		clock-names = "gpio";
+-		status = "disabled";
+-	};
+-
+-	main_gpio4: gpio@620000 {
+-		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+-		reg = <0x00 0x00620000 0x00 0x100>;
+-		gpio-controller;
+-		#gpio-cells = <2>;
+-		interrupt-parent = <&main_gpio_intr>;
+-		interrupts = <163>, <164>, <165>, <166>, <167>;
+-		interrupt-controller;
+-		#interrupt-cells = <2>;
+-		ti,ngpio = <66>;
+-		ti,davinci-gpio-unbanked = <0>;
+-		power-domains = <&k3_pds 165 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 165 0>;
+-		clock-names = "gpio";
+-		status = "disabled";
+-	};
+-
+-	main_gpio6: gpio@630000 {
+-		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+-		reg = <0x00 0x00630000 0x00 0x100>;
+-		gpio-controller;
+-		#gpio-cells = <2>;
+-		interrupt-parent = <&main_gpio_intr>;
+-		interrupts = <172>, <173>, <174>, <175>, <176>;
+-		interrupt-controller;
+-		#interrupt-cells = <2>;
+-		ti,ngpio = <66>;
+-		ti,davinci-gpio-unbanked = <0>;
+-		power-domains = <&k3_pds 166 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 166 0>;
+-		clock-names = "gpio";
+-		status = "disabled";
+-	};
+-
+-	usbss0: usb@4104000 {
+-		bootph-all;
+-		compatible = "ti,j721e-usb";
+-		reg = <0x00 0x4104000 0x00 0x100>;
+-		dma-coherent;
+-		power-domains = <&k3_pds 398 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 398 21>, <&k3_clks 398 2>;
+-		clock-names = "ref", "lpm";
+-		assigned-clocks = <&k3_clks 398 21>;    /* USB2_REFCLK */
+-		assigned-clock-parents = <&k3_clks 398 22>; /* HFOSC0 */
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		ranges;
+-
+-		status = "disabled"; /* Needs lane config */
+-
+-		usb0: usb@6000000 {
+-			bootph-all;
+-			compatible = "cdns,usb3";
+-			reg = <0x00 0x6000000 0x00 0x10000>,
+-			      <0x00 0x6010000 0x00 0x10000>,
+-			      <0x00 0x6020000 0x00 0x10000>;
+-			reg-names = "otg", "xhci", "dev";
+-			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,  /* irq.0 */
+-				     <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
+-				     <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; /* otgirq.0 */
+-			interrupt-names = "host",
+-					  "peripheral",
+-					  "otg";
+-		};
+-	};
+-
+-	main_i2c0: i2c@2000000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x02000000 0x00 0x100>;
+-		interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 270 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 270 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_i2c1: i2c@2010000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x02010000 0x00 0x100>;
+-		interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 271 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 271 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_i2c2: i2c@2020000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x02020000 0x00 0x100>;
+-		interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 272 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 272 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_i2c3: i2c@2030000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x02030000 0x00 0x100>;
+-		interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 273 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 273 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_i2c4: i2c@2040000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x02040000 0x00 0x100>;
+-		interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 274 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 274 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_i2c5: i2c@2050000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x02050000 0x00 0x100>;
+-		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 275 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 275 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	main_i2c6: i2c@2060000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x02060000 0x00 0x100>;
+-		interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 276 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 276 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	ti_csi2rx0: ticsi2rx@4500000 {
+-		compatible = "ti,j721e-csi2rx-shim";
+-		reg = <0x00 0x04500000 0x00 0x00001000>;
+-		ranges;
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		dmas = <&main_bcdma_csi 0 0x4940 0>;
+-		dma-names = "rx0";
+-		power-domains = <&k3_pds 72 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-
+-		cdns_csi2rx0: csi-bridge@4504000 {
+-			compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+-			reg = <0x00 0x04504000 0x00 0x00001000>;
+-			clocks = <&k3_clks 72 2>, <&k3_clks 72 0>, <&k3_clks 72 2>,
+-				<&k3_clks 72 2>, <&k3_clks 72 3>, <&k3_clks 72 3>;
+-			clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+-				"pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+-			phys = <&dphy0>;
+-			phy-names = "dphy";
+-
+-			ports {
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+-
+-				csi0_port0: port@0 {
+-					reg = <0>;
+-					status = "disabled";
+-				};
+-
+-				csi0_port1: port@1 {
+-					reg = <1>;
+-					status = "disabled";
+-				};
+-
+-				csi0_port2: port@2 {
+-					reg = <2>;
+-					status = "disabled";
+-				};
+-
+-				csi0_port3: port@3 {
+-					reg = <3>;
+-					status = "disabled";
+-				};
+-
+-				csi0_port4: port@4 {
+-					reg = <4>;
+-					status = "disabled";
+-				};
+-			};
+-		};
+-	};
+-
+-	ti_csi2rx1: ticsi2rx@4510000 {
+-		compatible = "ti,j721e-csi2rx-shim";
+-		reg = <0x00 0x04510000 0x00 0x1000>;
+-		ranges;
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		dmas = <&main_bcdma_csi 0 0x4960 0>;
+-		dma-names = "rx0";
+-		power-domains = <&k3_pds 73 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-
+-		cdns_csi2rx1: csi-bridge@4514000 {
+-			compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+-			reg = <0x00 0x04514000 0x00 0x00001000>;
+-			clocks = <&k3_clks 73 2>, <&k3_clks 73 0>, <&k3_clks 73 2>,
+-				<&k3_clks 73 2>, <&k3_clks 73 3>, <&k3_clks 73 3>;
+-			clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+-				"pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+-			phys = <&dphy1>;
+-			phy-names = "dphy";
+-			ports {
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+-
+-				csi1_port0: port@0 {
+-					reg = <0>;
+-					status = "disabled";
+-				};
+-
+-				csi1_port1: port@1 {
+-					reg = <1>;
+-					status = "disabled";
+-				};
+-
+-				csi1_port2: port@2 {
+-					reg = <2>;
+-					status = "disabled";
+-				};
+-
+-				csi1_port3: port@3 {
+-					reg = <3>;
+-					status = "disabled";
+-				};
+-
+-				csi1_port4: port@4 {
+-					reg = <4>;
+-					status = "disabled";
+-				};
+-			};
+-		};
+-	};
+-
+-	ti_csi2rx2: ticsi2rx@4520000 {
+-		compatible = "ti,j721e-csi2rx-shim";
+-		reg = <0x00 0x04520000 0x00 0x00001000>;
+-		ranges;
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		dmas = <&main_bcdma_csi 0 0x4980 0>;
+-		dma-names = "rx0";
+-		power-domains = <&k3_pds 74 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-
+-		cdns_csi2rx2: csi-bridge@4524000 {
+-			compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+-			reg = <0x00 0x04524000 0x00 0x00001000>;
+-			clocks = <&k3_clks 74 2>, <&k3_clks 74 0>, <&k3_clks 74 2>,
+-				<&k3_clks 74 2>, <&k3_clks 74 3>, <&k3_clks 74 3>;
+-			clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+-				"pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+-			phys = <&dphy2>;
+-			phy-names = "dphy";
+-
+-			ports {
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+-
+-				csi2_port0: port@0 {
+-					reg = <0>;
+-					status = "disabled";
+-				};
+-
+-				csi2_port1: port@1 {
+-					reg = <1>;
+-					status = "disabled";
+-				};
+-
+-				csi2_port2: port@2 {
+-					reg = <2>;
+-					status = "disabled";
+-				};
+-
+-				csi2_port3: port@3 {
+-					reg = <3>;
+-					status = "disabled";
+-				};
+-
+-				csi2_port4: port@4 {
+-					reg = <4>;
+-					status = "disabled";
+-				};
+-			};
+-		};
+-	};
+-
+-	dphy0: phy@4580000 {
+-		compatible = "cdns,dphy-rx";
+-		reg = <0x00 0x04580000 0x00 0x00001100>;
+-		#phy-cells = <0>;
+-		power-domains = <&k3_pds 212 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	dphy1: phy@4590000 {
+-		compatible = "cdns,dphy-rx";
+-		reg = <0x00 0x04590000 0x00 0x00001100>;
+-		#phy-cells = <0>;
+-		power-domains = <&k3_pds 213 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	dphy2: phy@45a0000 {
+-		compatible = "cdns,dphy-rx";
+-		reg = <0x00 0x045a0000 0x00 0x00001100>;
+-		#phy-cells = <0>;
+-		power-domains = <&k3_pds 214 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	vpu0: video-codec@4210000 {
+-		compatible = "ti,j721s2-wave521c", "cnm,wave521c";
+-		reg = <0x00 0x4210000 0x00 0x10000>;
+-		interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 241 2>;
+-		power-domains = <&k3_pds 241 TI_SCI_PD_EXCLUSIVE>;
+-	};
+-
+-	vpu1: video-codec@4220000 {
+-		compatible = "ti,j721s2-wave521c", "cnm,wave521c";
+-		reg = <0x00 0x4220000 0x00 0x10000>;
+-		interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 242 2>;
+-		power-domains = <&k3_pds 242 TI_SCI_PD_EXCLUSIVE>;
+-	};
+-
+-	main_sdhci0: mmc@4f80000 {
+-		compatible = "ti,j721e-sdhci-8bit";
+-		reg = <0x00 0x04f80000 0x00 0x1000>,
+-		      <0x00 0x04f88000 0x00 0x400>;
+-		interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+-		power-domains = <&k3_pds 140 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 140 1>, <&k3_clks 140 2>;
+-		clock-names = "clk_ahb", "clk_xin";
+-		assigned-clocks = <&k3_clks 140 2>;
+-		assigned-clock-parents = <&k3_clks 140 3>;
+-		bus-width = <8>;
+-		ti,otap-del-sel-legacy = <0x0>;
+-		ti,otap-del-sel-mmc-hs = <0x0>;
+-		ti,otap-del-sel-ddr52 = <0x6>;
+-		ti,otap-del-sel-hs200 = <0x8>;
+-		ti,otap-del-sel-hs400 = <0x5>;
+-		ti,itap-del-sel-legacy = <0x10>;
+-		ti,itap-del-sel-mmc-hs = <0xa>;
+-		ti,strobe-sel = <0x77>;
+-		ti,clkbuf-sel = <0x7>;
+-		ti,trm-icp = <0x8>;
+-		mmc-ddr-1_8v;
+-		mmc-hs200-1_8v;
+-		mmc-hs400-1_8v;
+-		dma-coherent;
+-		status = "disabled";
+-	};
+-
+-	main_sdhci1: mmc@4fb0000 {
+-		compatible = "ti,j721e-sdhci-4bit";
+-		reg = <0x00 0x04fb0000 0x00 0x1000>,
+-		      <0x00 0x04fb8000 0x00 0x400>;
+-		interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+-		power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 141 3>, <&k3_clks 141 4>;
+-		clock-names = "clk_ahb", "clk_xin";
+-		assigned-clocks = <&k3_clks 141 4>;
+-		assigned-clock-parents = <&k3_clks 141 5>;
+-		bus-width = <4>;
+-		ti,otap-del-sel-legacy = <0x0>;
+-		ti,otap-del-sel-sd-hs = <0x0>;
+-		ti,otap-del-sel-sdr12 = <0xf>;
+-		ti,otap-del-sel-sdr25 = <0xf>;
+-		ti,otap-del-sel-sdr50 = <0xc>;
+-		ti,otap-del-sel-sdr104 = <0x5>;
+-		ti,otap-del-sel-ddr50 = <0xc>;
+-		ti,itap-del-sel-legacy = <0x0>;
+-		ti,itap-del-sel-sd-hs = <0x0>;
+-		ti,itap-del-sel-sdr12 = <0x0>;
+-		ti,itap-del-sel-sdr25 = <0x0>;
+-		ti,itap-del-sel-ddr50 = <0x2>;
+-		ti,clkbuf-sel = <0x7>;
+-		ti,trm-icp = <0x8>;
+-		dma-coherent;
+-		status = "disabled";
+-	};
+-
+-	pcie0_rc: pcie@2900000 {
+-		compatible = "ti,j784s4-pcie-host";
+-		reg = <0x00 0x02900000 0x00 0x1000>,
+-		      <0x00 0x02907000 0x00 0x400>,
+-		      <0x00 0x0d000000 0x00 0x00800000>,
+-		      <0x00 0x10000000 0x00 0x00001000>;
+-		reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+-		interrupt-names = "link_state";
+-		interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
+-		device_type = "pci";
+-		ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
+-		max-link-speed = <3>;
+-		num-lanes = <4>;
+-		power-domains = <&k3_pds 332 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 332 0>;
+-		clock-names = "fck";
+-		#address-cells = <3>;
+-		#size-cells = <2>;
+-		bus-range = <0x0 0xff>;
+-		vendor-id = <0x104c>;
+-		device-id = <0xb012>;
+-		msi-map = <0x0 &gic_its 0x0 0x10000>;
+-		dma-coherent;
+-		ranges = <0x01000000 0x0 0x10001000 0x0 0x10001000 0x0 0x0010000>,
+-			 <0x02000000 0x0 0x10011000 0x0 0x10011000 0x0 0x7fef000>;
+-		dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+-		status = "disabled";
+-	};
+-
+-	pcie1_rc: pcie@2910000 {
+-		compatible = "ti,j784s4-pcie-host";
+-		reg = <0x00 0x02910000 0x00 0x1000>,
+-		      <0x00 0x02917000 0x00 0x400>,
+-		      <0x00 0x0d800000 0x00 0x00800000>,
+-		      <0x00 0x18000000 0x00 0x00001000>;
+-		reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+-		interrupt-names = "link_state";
+-		interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
+-		device_type = "pci";
+-		ti,syscon-pcie-ctrl = <&pcie1_ctrl 0x0>;
+-		max-link-speed = <3>;
+-		num-lanes = <4>;
+-		power-domains = <&k3_pds 333 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 333 0>;
+-		clock-names = "fck";
+-		#address-cells = <3>;
+-		#size-cells = <2>;
+-		bus-range = <0x0 0xff>;
+-		vendor-id = <0x104c>;
+-		device-id = <0xb012>;
+-		msi-map = <0x0 &gic_its 0x10000 0x10000>;
+-		dma-coherent;
+-		ranges = <0x01000000 0x0 0x18001000  0x00 0x18001000  0x0 0x0010000>,
+-			 <0x02000000 0x0 0x18011000  0x00 0x18011000  0x0 0x7fef000>;
+-		dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+-		status = "disabled";
+-	};
+-
+-	pcie2_rc: pcie@2920000 {
+-		compatible = "ti,j784s4-pcie-host";
+-		reg = <0x00 0x02920000 0x00 0x1000>,
+-		      <0x00 0x02927000 0x00 0x400>,
+-		      <0x00 0x0e000000 0x00 0x00800000>,
+-		      <0x44 0x00000000 0x00 0x00001000>;
+-		reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+-		interrupt-names = "link_state";
+-		interrupts = <GIC_SPI 342 IRQ_TYPE_EDGE_RISING>;
+-		device_type = "pci";
+-		ti,syscon-pcie-ctrl = <&pcie2_ctrl 0x0>;
+-		max-link-speed = <3>;
+-		num-lanes = <2>;
+-		power-domains = <&k3_pds 334 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 334 0>;
+-		clock-names = "fck";
+-		#address-cells = <3>;
+-		#size-cells = <2>;
+-		bus-range = <0x0 0xff>;
+-		vendor-id = <0x104c>;
+-		device-id = <0xb012>;
+-		msi-map = <0x0 &gic_its 0x20000 0x10000>;
+-		dma-coherent;
+-		ranges = <0x01000000 0x0 0x00001000 0x44 0x00001000 0x0 0x0010000>,
+-			 <0x02000000 0x0 0x00011000 0x44 0x00011000 0x0 0x7fef000>;
+-		dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+-		status = "disabled";
+-	};
+-
+-	pcie3_rc: pcie@2930000 {
+-		compatible = "ti,j784s4-pcie-host";
+-		reg = <0x00 0x02930000 0x00 0x1000>,
+-		      <0x00 0x02937000 0x00 0x400>,
+-		      <0x00 0x0e800000 0x00 0x00800000>,
+-		      <0x44 0x10000000 0x00 0x00001000>;
+-		reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+-		interrupt-names = "link_state";
+-		interrupts = <GIC_SPI 354 IRQ_TYPE_EDGE_RISING>;
+-		device_type = "pci";
+-		ti,syscon-pcie-ctrl = <&pcie3_ctrl 0x0>;
+-		max-link-speed = <3>;
+-		num-lanes = <2>;
+-		power-domains = <&k3_pds 335 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 335 0>;
+-		clock-names = "fck";
+-		#address-cells = <3>;
+-		#size-cells = <2>;
+-		bus-range = <0x0 0xff>;
+-		vendor-id = <0x104c>;
+-		device-id = <0xb012>;
+-		msi-map = <0x0 &gic_its 0x30000 0x10000>;
+-		dma-coherent;
+-		ranges = <0x01000000 0x0 0x00001000 0x44 0x10001000 0x0 0x0010000>,
+-			 <0x02000000 0x0 0x00011000 0x44 0x10011000 0x0 0x7fef000>;
+-		dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+-		status = "disabled";
+-	};
+-
+-	serdes_wiz0: wiz@5060000 {
+-		compatible = "ti,j784s4-wiz-10g";
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		power-domains = <&k3_pds 404 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 404 2>, <&k3_clks 404 6>, <&serdes_refclk>, <&k3_clks 404 5>;
+-		clock-names = "fck", "core_ref_clk", "ext_ref_clk", "core_ref1_clk";
+-		assigned-clocks = <&k3_clks 404 6>;
+-		assigned-clock-parents = <&k3_clks 404 10>;
+-		num-lanes = <4>;
+-		#reset-cells = <1>;
+-		#clock-cells = <1>;
+-		ranges = <0x5060000 0x00 0x5060000 0x10000>;
+-		status = "disabled";
+-
+-		serdes0: serdes@5060000 {
+-			compatible = "ti,j721e-serdes-10g";
+-			reg = <0x05060000 0x010000>;
+-			reg-names = "torrent_phy";
+-			resets = <&serdes_wiz0 0>;
+-			reset-names = "torrent_reset";
+-			clocks = <&serdes_wiz0 TI_WIZ_PLL0_REFCLK>,
+-				 <&serdes_wiz0 TI_WIZ_PHY_EN_REFCLK>;
+-			clock-names = "refclk", "phy_en_refclk";
+-			assigned-clocks = <&serdes_wiz0 TI_WIZ_PLL0_REFCLK>,
+-					  <&serdes_wiz0 TI_WIZ_PLL1_REFCLK>,
+-					  <&serdes_wiz0 TI_WIZ_REFCLK_DIG>;
+-			assigned-clock-parents = <&k3_clks 404 6>,
+-						 <&k3_clks 404 6>,
+-						 <&k3_clks 404 6>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			#clock-cells = <1>;
+-			status = "disabled";
+-		};
+-	};
+-
+-	serdes_wiz1: wiz@5070000 {
+-		compatible = "ti,j784s4-wiz-10g";
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		power-domains = <&k3_pds 405 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 405 2>, <&k3_clks 405 6>, <&serdes_refclk>, <&k3_clks 405 5>;
+-		clock-names = "fck", "core_ref_clk", "ext_ref_clk", "core_ref1_clk";
+-		assigned-clocks = <&k3_clks 405 6>;
+-		assigned-clock-parents = <&k3_clks 405 10>;
+-		num-lanes = <4>;
+-		#reset-cells = <1>;
+-		#clock-cells = <1>;
+-		ranges = <0x05070000 0x00 0x05070000 0x10000>;
+-		status = "disabled";
+-
+-		serdes1: serdes@5070000 {
+-			compatible = "ti,j721e-serdes-10g";
+-			reg = <0x05070000 0x010000>;
+-			reg-names = "torrent_phy";
+-			resets = <&serdes_wiz1 0>;
+-			reset-names = "torrent_reset";
+-			clocks = <&serdes_wiz1 TI_WIZ_PLL0_REFCLK>,
+-				 <&serdes_wiz1 TI_WIZ_PHY_EN_REFCLK>;
+-			clock-names = "refclk", "phy_en_refclk";
+-			assigned-clocks = <&serdes_wiz1 TI_WIZ_PLL0_REFCLK>,
+-					  <&serdes_wiz1 TI_WIZ_PLL1_REFCLK>,
+-					  <&serdes_wiz1 TI_WIZ_REFCLK_DIG>;
+-			assigned-clock-parents = <&k3_clks 405 6>,
+-						 <&k3_clks 405 6>,
+-						 <&k3_clks 405 6>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			#clock-cells = <1>;
+-			status = "disabled";
+-		};
+-	};
+-
+-	serdes_wiz2: wiz@5020000 {
+-		compatible = "ti,j784s4-wiz-10g";
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		power-domains = <&k3_pds 406 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 406 2>, <&k3_clks 406 6>, <&serdes_refclk>, <&k3_clks 406 5>;
+-		clock-names = "fck", "core_ref_clk", "ext_ref_clk", "core_ref1_clk";
+-		assigned-clocks = <&k3_clks 406 6>;
+-		assigned-clock-parents = <&k3_clks 406 10>;
+-		num-lanes = <4>;
+-		#reset-cells = <1>;
+-		#clock-cells = <1>;
+-		ranges = <0x05020000 0x00 0x05020000 0x10000>;
+-		status = "disabled";
+-
+-		serdes2: serdes@5020000 {
+-			compatible = "ti,j721e-serdes-10g";
+-			reg = <0x05020000 0x010000>;
+-			reg-names = "torrent_phy";
+-			resets = <&serdes_wiz2 0>;
+-			reset-names = "torrent_reset";
+-			clocks = <&serdes_wiz2 TI_WIZ_PLL0_REFCLK>,
+-				 <&serdes_wiz2 TI_WIZ_PHY_EN_REFCLK>;
+-			clock-names = "refclk", "phy_en_refclk";
+-			assigned-clocks = <&serdes_wiz2 TI_WIZ_PLL0_REFCLK>,
+-					  <&serdes_wiz2 TI_WIZ_PLL1_REFCLK>,
+-					  <&serdes_wiz2 TI_WIZ_REFCLK_DIG>;
+-			assigned-clock-parents = <&k3_clks 406 6>,
+-						 <&k3_clks 406 6>,
+-						 <&k3_clks 406 6>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			#clock-cells = <1>;
+-			status = "disabled";
+-		};
+-	};
+-
+-	serdes_wiz4: wiz@5050000 {
+-		compatible = "ti,j784s4-wiz-10g";
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		power-domains = <&k3_pds 407 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 407 2>, <&k3_clks 407 6>, <&serdes_refclk>, <&k3_clks 407 5>;
+-		clock-names = "fck", "core_ref_clk", "ext_ref_clk", "core_ref1_clk";
+-		assigned-clocks = <&k3_clks 407 6>;
+-		assigned-clock-parents = <&k3_clks 407 10>;
+-		num-lanes = <4>;
+-		#reset-cells = <1>;
+-		#clock-cells = <1>;
+-		ranges = <0x05050000 0x00 0x05050000 0x10000>,
+-			 <0xa030a00 0x00 0xa030a00 0x40>; /* DPTX PHY */
+-		status = "disabled";
+-
+-		serdes4: serdes@5050000 {
+-			/*
+-			 * Note: we also map DPTX PHY registers as the Torrent
+-			 * needs to manage those.
+-			 */
+-			compatible = "ti,j721e-serdes-10g";
+-			reg = <0x05050000 0x010000>,
+-			      <0x0a030a00 0x40>; /* DPTX PHY */
+-			reg-names = "torrent_phy";
+-			resets = <&serdes_wiz4 0>;
+-			reset-names = "torrent_reset";
+-			clocks = <&serdes_wiz4 TI_WIZ_PLL0_REFCLK>,
+-				 <&serdes_wiz4 TI_WIZ_PHY_EN_REFCLK>;
+-			clock-names = "refclk", "phy_en_refclk";
+-			assigned-clocks = <&serdes_wiz4 TI_WIZ_PLL0_REFCLK>,
+-					  <&serdes_wiz4 TI_WIZ_PLL1_REFCLK>,
+-					  <&serdes_wiz4 TI_WIZ_REFCLK_DIG>;
+-			assigned-clock-parents = <&k3_clks 407 6>,
+-						 <&k3_clks 407 6>,
+-						 <&k3_clks 407 6>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			#clock-cells = <1>;
+-			status = "disabled";
+-		};
+-	};
+-
+-	main_navss: bus@30000000 {
+-		bootph-all;
+-		compatible = "simple-bus";
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
+-		ti,sci-dev-id = <280>;
+-		dma-coherent;
+-		dma-ranges;
+-
+-		main_navss_intr: interrupt-controller@310e0000 {
+-			compatible = "ti,sci-intr";
+-			reg = <0x00 0x310e0000 0x00 0x4000>;
+-			ti,intr-trigger-type = <4>;
+-			interrupt-controller;
+-			interrupt-parent = <&gic500>;
+-			#interrupt-cells = <1>;
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <283>;
+-			ti,interrupt-ranges = <0 64 64>,
+-					      <64 448 64>,
+-					      <128 672 64>;
+-		};
+-
+-		main_udmass_inta: msi-controller@33d00000 {
+-			compatible = "ti,sci-inta";
+-			reg = <0x00 0x33d00000 0x00 0x100000>;
+-			interrupt-controller;
+-			#interrupt-cells = <0>;
+-			interrupt-parent = <&main_navss_intr>;
+-			msi-controller;
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <321>;
+-			ti,interrupt-ranges = <0 0 256>;
+-			ti,unmapped-event-sources = <&main_bcdma_csi>;
+-		};
+-
+-		secure_proxy_main: mailbox@32c00000 {
+-			bootph-all;
+-			compatible = "ti,am654-secure-proxy";
+-			#mbox-cells = <1>;
+-			reg-names = "target_data", "rt", "scfg";
+-			reg = <0x00 0x32c00000 0x00 0x100000>,
+-			      <0x00 0x32400000 0x00 0x100000>,
+-			      <0x00 0x32800000 0x00 0x100000>;
+-			interrupt-names = "rx_011";
+-			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+-		};
+-
+-		hwspinlock: hwlock@30e00000 {
+-			compatible = "ti,am654-hwspinlock";
+-			reg = <0x00 0x30e00000 0x00 0x1000>;
+-			#hwlock-cells = <1>;
+-		};
+-
+-		mailbox0_cluster0: mailbox@31f80000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f80000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster1: mailbox@31f81000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f81000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster2: mailbox@31f82000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f82000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster3: mailbox@31f83000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f83000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster4: mailbox@31f84000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f84000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster5: mailbox@31f85000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f85000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster6: mailbox@31f86000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f86000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster7: mailbox@31f87000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f87000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster8: mailbox@31f88000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f88000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster9: mailbox@31f89000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f89000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster10: mailbox@31f8a000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f8a000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox0_cluster11: mailbox@31f8b000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f8b000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster0: mailbox@31f90000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f90000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster1: mailbox@31f91000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f91000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster2: mailbox@31f92000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f92000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster3: mailbox@31f93000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f93000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster4: mailbox@31f94000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f94000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster5: mailbox@31f95000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f95000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster6: mailbox@31f96000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f96000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster7: mailbox@31f97000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f97000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster8: mailbox@31f98000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f98000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster9: mailbox@31f99000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f99000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster10: mailbox@31f9a000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f9a000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		mailbox1_cluster11: mailbox@31f9b000 {
+-			compatible = "ti,am654-mailbox";
+-			reg = <0x00 0x31f9b000 0x00 0x200>;
+-			#mbox-cells = <1>;
+-			ti,mbox-num-users = <4>;
+-			ti,mbox-num-fifos = <16>;
+-			interrupt-parent = <&main_navss_intr>;
+-			status = "disabled";
+-		};
+-
+-		main_ringacc: ringacc@3c000000 {
+-			compatible = "ti,am654-navss-ringacc";
+-			reg = <0x00 0x3c000000 0x00 0x400000>,
+-			      <0x00 0x38000000 0x00 0x400000>,
+-			      <0x00 0x31120000 0x00 0x100>,
+-			      <0x00 0x33000000 0x00 0x40000>,
+-			      <0x00 0x31080000 0x00 0x40000>;
+-			reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target", "cfg";
+-			ti,num-rings = <1024>;
+-			ti,sci-rm-range-gp-rings = <0x1>;
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <315>;
+-			msi-parent = <&main_udmass_inta>;
+-		};
+-
+-		main_udmap: dma-controller@31150000 {
+-			compatible = "ti,j721e-navss-main-udmap";
+-			reg = <0x00 0x31150000 0x00 0x100>,
+-			      <0x00 0x34000000 0x00 0x80000>,
+-			      <0x00 0x35000000 0x00 0x200000>,
+-			      <0x00 0x30b00000 0x00 0x20000>,
+-			      <0x00 0x30c00000 0x00 0x8000>,
+-			      <0x00 0x30d00000 0x00 0x4000>;
+-			reg-names = "gcfg", "rchanrt", "tchanrt",
+-				    "tchan", "rchan", "rflow";
+-			msi-parent = <&main_udmass_inta>;
+-			#dma-cells = <1>;
+-
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <319>;
+-			ti,ringacc = <&main_ringacc>;
+-
+-			ti,sci-rm-range-tchan = <0x0d>, /* TX_CHAN */
+-						<0x0f>, /* TX_HCHAN */
+-						<0x10>; /* TX_UHCHAN */
+-			ti,sci-rm-range-rchan = <0x0a>, /* RX_CHAN */
+-						<0x0b>, /* RX_HCHAN */
+-						<0x0c>; /* RX_UHCHAN */
+-			ti,sci-rm-range-rflow = <0x00>; /* GP RFLOW */
+-		};
+-
+-		main_bcdma_csi: dma-controller@311a0000 {
+-			compatible = "ti,j721s2-dmss-bcdma-csi";
+-			reg = <0x00 0x311a0000 0x00 0x100>,
+-			      <0x00 0x35d00000 0x00 0x20000>,
+-			      <0x00 0x35c00000 0x00 0x10000>,
+-			      <0x00 0x35e00000 0x00 0x80000>;
+-			reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt";
+-			msi-parent = <&main_udmass_inta>;
+-			#dma-cells = <3>;
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <281>;
+-			ti,sci-rm-range-rchan = <0x21>;
+-			ti,sci-rm-range-tchan = <0x22>;
+-		};
+-
+-		cpts@310d0000 {
+-			compatible = "ti,j721e-cpts";
+-			reg = <0x00 0x310d0000 0x00 0x400>;
+-			reg-names = "cpts";
+-			clocks = <&k3_clks 282 0>;
+-			clock-names = "cpts";
+-			assigned-clocks = <&k3_clks 62 3>; /* CPTS_RFT_CLK */
+-			assigned-clock-parents = <&k3_clks 62 5>; /* MAIN_0_HSDIV6_CLK */
+-			interrupts-extended = <&main_navss_intr 391>;
+-			interrupt-names = "cpts";
+-			ti,cpts-periodic-outputs = <6>;
+-			ti,cpts-ext-ts-inputs = <8>;
+-		};
+-	};
+-
+-	main_cpsw0: ethernet@c000000 {
+-		compatible = "ti,j784s4-cpswxg-nuss";
+-		reg = <0x00 0xc000000 0x00 0x200000>;
+-		reg-names = "cpsw_nuss";
+-		ranges = <0x00 0x00 0x00 0xc000000 0x00 0x200000>;
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		dma-coherent;
+-		clocks = <&k3_clks 64 0>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 64 TI_SCI_PD_EXCLUSIVE>;
+-
+-		dmas = <&main_udmap 0xca00>,
+-		       <&main_udmap 0xca01>,
+-		       <&main_udmap 0xca02>,
+-		       <&main_udmap 0xca03>,
+-		       <&main_udmap 0xca04>,
+-		       <&main_udmap 0xca05>,
+-		       <&main_udmap 0xca06>,
+-		       <&main_udmap 0xca07>,
+-		       <&main_udmap 0x4a00>;
+-		dma-names = "tx0", "tx1", "tx2", "tx3",
+-			    "tx4", "tx5", "tx6", "tx7",
+-			    "rx";
+-
+-		status = "disabled";
+-
+-		ethernet-ports {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-
+-			main_cpsw0_port1: port@1 {
+-				reg = <1>;
+-				label = "port1";
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-
+-			main_cpsw0_port2: port@2 {
+-				reg = <2>;
+-				label = "port2";
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-
+-			main_cpsw0_port3: port@3 {
+-				reg = <3>;
+-				label = "port3";
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-
+-			main_cpsw0_port4: port@4 {
+-				reg = <4>;
+-				label = "port4";
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-
+-			main_cpsw0_port5: port@5 {
+-				reg = <5>;
+-				label = "port5";
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-
+-			main_cpsw0_port6: port@6 {
+-				reg = <6>;
+-				label = "port6";
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-
+-			main_cpsw0_port7: port@7 {
+-				reg = <7>;
+-				label = "port7";
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-
+-			main_cpsw0_port8: port@8 {
+-				reg = <8>;
+-				label = "port8";
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-		};
+-
+-		main_cpsw0_mdio: mdio@f00 {
+-			compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+-			reg = <0x00 0xf00 0x00 0x100>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			clocks = <&k3_clks 64 0>;
+-			clock-names = "fck";
+-			bus_freq = <1000000>;
+-			status = "disabled";
+-		};
+-
+-		cpts@3d000 {
+-			compatible = "ti,am65-cpts";
+-			reg = <0x00 0x3d000 0x00 0x400>;
+-			clocks = <&k3_clks 64 3>;
+-			clock-names = "cpts";
+-			interrupts-extended = <&gic500 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "cpts";
+-			ti,cpts-ext-ts-inputs = <4>;
+-			ti,cpts-periodic-outputs = <2>;
+-		};
+-	};
+-
+-	main_cpsw1: ethernet@c200000 {
+-		compatible = "ti,j721e-cpsw-nuss";
+-		reg = <0x00 0xc200000 0x00 0x200000>;
+-		reg-names = "cpsw_nuss";
+-		ranges = <0x00 0x00 0x00 0xc200000 0x00 0x200000>;
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		dma-coherent;
+-		clocks = <&k3_clks 62 0>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 62 TI_SCI_PD_EXCLUSIVE>;
+-
+-		dmas = <&main_udmap 0xc640>,
+-			<&main_udmap 0xc641>,
+-			<&main_udmap 0xc642>,
+-			<&main_udmap 0xc643>,
+-			<&main_udmap 0xc644>,
+-			<&main_udmap 0xc645>,
+-			<&main_udmap 0xc646>,
+-			<&main_udmap 0xc647>,
+-			<&main_udmap 0x4640>;
+-		dma-names = "tx0", "tx1", "tx2", "tx3",
+-				"tx4", "tx5", "tx6", "tx7",
+-				"rx";
+-
+-		status = "disabled";
+-
+-		ethernet-ports {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-
+-			main_cpsw1_port1: port@1 {
+-				reg = <1>;
+-				label = "port1";
+-				phys = <&cpsw1_phy_gmii_sel 1>;
+-				ti,mac-only;
+-				status = "disabled";
+-			};
+-		};
+-
+-		main_cpsw1_mdio: mdio@f00 {
+-			compatible = "ti,cpsw-mdio", "ti,davinci_mdio";
+-			reg = <0x00 0xf00 0x00 0x100>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			clocks = <&k3_clks 62 0>;
+-			clock-names = "fck";
+-			bus_freq = <1000000>;
+-			status = "disabled";
+-		};
+-
+-		cpts@3d000 {
+-			compatible = "ti,am65-cpts";
+-			reg = <0x00 0x3d000 0x00 0x400>;
+-			clocks = <&k3_clks 62 3>;
+-			clock-names = "cpts";
+-			interrupts-extended = <&gic500 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "cpts";
+-			ti,cpts-ext-ts-inputs = <4>;
+-			ti,cpts-periodic-outputs = <2>;
+-		};
+-	};
+-
+-	main_mcan0: can@2701000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02701000 0x00 0x200>,
+-		      <0x00 0x02708000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 245 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 245 6>, <&k3_clks 245 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan1: can@2711000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02711000 0x00 0x200>,
+-		      <0x00 0x02718000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 246 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 246 6>, <&k3_clks 246 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan2: can@2721000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02721000 0x00 0x200>,
+-		      <0x00 0x02728000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 247 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 247 6>, <&k3_clks 247 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan3: can@2731000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02731000 0x00 0x200>,
+-		      <0x00 0x02738000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 248 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 248 6>, <&k3_clks 248 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan4: can@2741000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02741000 0x00 0x200>,
+-		      <0x00 0x02748000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 249 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 249 6>, <&k3_clks 249 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan5: can@2751000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02751000 0x00 0x200>,
+-		      <0x00 0x02758000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 250 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 250 6>, <&k3_clks 250 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan6: can@2761000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02761000 0x00 0x200>,
+-		      <0x00 0x02768000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 251 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 251 6>, <&k3_clks 251 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan7: can@2771000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02771000 0x00 0x200>,
+-		      <0x00 0x02778000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 252 6>, <&k3_clks 252 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan8: can@2781000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02781000 0x00 0x200>,
+-		      <0x00 0x02788000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 253 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 253 6>, <&k3_clks 253 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 576 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 577 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan9: can@2791000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02791000 0x00 0x200>,
+-		      <0x00 0x02798000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 254 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 254 6>, <&k3_clks 254 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan10: can@27a1000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x027a1000 0x00 0x200>,
+-		      <0x00 0x027a8000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 255 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 255 6>, <&k3_clks 255 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan11: can@27b1000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x027b1000 0x00 0x200>,
+-		      <0x00 0x027b8000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 256 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 256 6>, <&k3_clks 256 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan12: can@27c1000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x027c1000 0x00 0x200>,
+-		      <0x00 0x027c8000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 257 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 257 6>, <&k3_clks 257 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 589 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan13: can@27d1000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x027d1000 0x00 0x200>,
+-		      <0x00 0x027d8000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 258 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 258 6>, <&k3_clks 258 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 591 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 592 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan14: can@2681000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02681000 0x00 0x200>,
+-		      <0x00 0x02688000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 259 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 259 6>, <&k3_clks 259 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 594 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 595 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan15: can@2691000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x02691000 0x00 0x200>,
+-		      <0x00 0x02698000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 260 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 260 6>, <&k3_clks 260 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 597 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 598 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan16: can@26a1000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x026a1000 0x00 0x200>,
+-		      <0x00 0x026a8000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 261 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 261 6>, <&k3_clks 261 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 784 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 785 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_mcan17: can@26b1000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x026b1000 0x00 0x200>,
+-		      <0x00 0x026b8000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 262 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 262 6>, <&k3_clks 262 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 787 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 788 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	main_spi0: spi@2100000 {
+-		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+-		reg = <0x00 0x02100000 0x00 0x400>;
+-		interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 376 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 376 1>;
+-		status = "disabled";
+-	};
+-
+-	main_spi1: spi@2110000 {
+-		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+-		reg = <0x00 0x02110000 0x00 0x400>;
+-		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 377 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 377 1>;
+-		status = "disabled";
+-	};
+-
+-	main_spi2: spi@2120000 {
+-		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+-		reg = <0x00 0x02120000 0x00 0x400>;
+-		interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 378 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 378 1>;
+-		status = "disabled";
+-	};
+-
+-	main_spi3: spi@2130000 {
+-		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+-		reg = <0x00 0x02130000 0x00 0x400>;
+-		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 379 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 379 1>;
+-		status = "disabled";
+-	};
+-
+-	main_spi4: spi@2140000 {
+-		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+-		reg = <0x00 0x02140000 0x00 0x400>;
+-		interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 380 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 380 1>;
+-		status = "disabled";
+-	};
+-
+-	main_spi5: spi@2150000 {
+-		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+-		reg = <0x00 0x02150000 0x00 0x400>;
+-		interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 381 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 381 1>;
+-		status = "disabled";
+-	};
+-
+-	main_spi6: spi@2160000 {
+-		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+-		reg = <0x00 0x02160000 0x00 0x400>;
+-		interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 382 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 382 1>;
+-		status = "disabled";
+-	};
+-
+-	main_spi7: spi@2170000 {
+-		compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+-		reg = <0x00 0x02170000 0x00 0x400>;
+-		interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 383 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 383 1>;
+-		status = "disabled";
+-	};
+-
+-	ufs_wrapper: ufs-wrapper@4e80000 {
+-		compatible = "ti,j721e-ufs";
+-		reg = <0x00 0x4e80000 0x00 0x100>;
+-		power-domains = <&k3_pds 387 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 387 3>;
+-		assigned-clocks = <&k3_clks 387 3>;
+-		assigned-clock-parents = <&k3_clks 387 6>;
+-		ranges;
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		status = "disabled";
+-
+-		ufs@4e84000 {
+-			compatible = "cdns,ufshc-m31-16nm", "jedec,ufs-2.0";
+-			reg = <0x00 0x4e84000 0x00 0x10000>;
+-			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+-			freq-table-hz = <250000000 250000000>, <19200000 19200000>,
+-					<19200000 19200000>;
+-			clocks = <&k3_clks 387 1>, <&k3_clks 387 3>, <&k3_clks 387 3>;
+-			clock-names = "core_clk", "phy_clk", "ref_clk";
+-			dma-coherent;
+-		};
+-	};
+-
+-	main_r5fss0: r5fss@5c00000 {
+-		compatible = "ti,j721s2-r5fss";
+-		ti,cluster-mode = <1>;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x5c00000 0x00 0x5c00000 0x20000>,
+-			 <0x5d00000 0x00 0x5d00000 0x20000>;
+-		power-domains = <&k3_pds 336 TI_SCI_PD_EXCLUSIVE>;
+-
+-		main_r5fss0_core0: r5f@5c00000 {
+-			compatible = "ti,j721s2-r5f";
+-			reg = <0x5c00000 0x00010000>,
+-			      <0x5c10000 0x00010000>;
+-			reg-names = "atcm", "btcm";
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <339>;
+-			ti,sci-proc-ids = <0x06 0xff>;
+-			resets = <&k3_reset 339 1>;
+-			firmware-name = "j784s4-main-r5f0_0-fw";
+-			ti,atcm-enable = <1>;
+-			ti,btcm-enable = <1>;
+-			ti,loczrama = <1>;
+-		};
+-
+-		main_r5fss0_core1: r5f@5d00000 {
+-			compatible = "ti,j721s2-r5f";
+-			reg = <0x5d00000 0x00010000>,
+-			      <0x5d10000 0x00010000>;
+-			reg-names = "atcm", "btcm";
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <340>;
+-			ti,sci-proc-ids = <0x07 0xff>;
+-			resets = <&k3_reset 340 1>;
+-			firmware-name = "j784s4-main-r5f0_1-fw";
+-			ti,atcm-enable = <1>;
+-			ti,btcm-enable = <1>;
+-			ti,loczrama = <1>;
+-		};
+-	};
+-
+-	main_r5fss1: r5fss@5e00000 {
+-		compatible = "ti,j721s2-r5fss";
+-		ti,cluster-mode = <1>;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x5e00000 0x00 0x5e00000 0x20000>,
+-			 <0x5f00000 0x00 0x5f00000 0x20000>;
+-		power-domains = <&k3_pds 337 TI_SCI_PD_EXCLUSIVE>;
+-
+-		main_r5fss1_core0: r5f@5e00000 {
+-			compatible = "ti,j721s2-r5f";
+-			reg = <0x5e00000 0x00010000>,
+-			      <0x5e10000 0x00010000>;
+-			reg-names = "atcm", "btcm";
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <341>;
+-			ti,sci-proc-ids = <0x08 0xff>;
+-			resets = <&k3_reset 341 1>;
+-			firmware-name = "j784s4-main-r5f1_0-fw";
+-			ti,atcm-enable = <1>;
+-			ti,btcm-enable = <1>;
+-			ti,loczrama = <1>;
+-		};
+-
+-		main_r5fss1_core1: r5f@5f00000 {
+-			compatible = "ti,j721s2-r5f";
+-			reg = <0x5f00000 0x00010000>,
+-			      <0x5f10000 0x00010000>;
+-			reg-names = "atcm", "btcm";
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <342>;
+-			ti,sci-proc-ids = <0x09 0xff>;
+-			resets = <&k3_reset 342 1>;
+-			firmware-name = "j784s4-main-r5f1_1-fw";
+-			ti,atcm-enable = <1>;
+-			ti,btcm-enable = <1>;
+-			ti,loczrama = <1>;
+-		};
+-	};
+-
+-	main_r5fss2: r5fss@5900000 {
+-		compatible = "ti,j721s2-r5fss";
+-		ti,cluster-mode = <1>;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x5900000 0x00 0x5900000 0x20000>,
+-			 <0x5a00000 0x00 0x5a00000 0x20000>;
+-		power-domains = <&k3_pds 338 TI_SCI_PD_EXCLUSIVE>;
+-
+-		main_r5fss2_core0: r5f@5900000 {
+-			compatible = "ti,j721s2-r5f";
+-			reg = <0x5900000 0x00010000>,
+-			      <0x5910000 0x00010000>;
+-			reg-names = "atcm", "btcm";
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <343>;
+-			ti,sci-proc-ids = <0x0a 0xff>;
+-			resets = <&k3_reset 343 1>;
+-			firmware-name = "j784s4-main-r5f2_0-fw";
+-			ti,atcm-enable = <1>;
+-			ti,btcm-enable = <1>;
+-			ti,loczrama = <1>;
+-		};
+-
+-		main_r5fss2_core1: r5f@5a00000 {
+-			compatible = "ti,j721s2-r5f";
+-			reg = <0x5a00000 0x00010000>,
+-			      <0x5a10000 0x00010000>;
+-			reg-names = "atcm", "btcm";
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <344>;
+-			ti,sci-proc-ids = <0x0b 0xff>;
+-			resets = <&k3_reset 344 1>;
+-			firmware-name = "j784s4-main-r5f2_1-fw";
+-			ti,atcm-enable = <1>;
+-			ti,btcm-enable = <1>;
+-			ti,loczrama = <1>;
+-		};
+-	};
+-
+-	c71_0: dsp@64800000 {
+-		compatible = "ti,j721s2-c71-dsp";
+-		reg = <0x00 0x64800000 0x00 0x00080000>,
+-		      <0x00 0x64e00000 0x00 0x0000c000>;
+-		reg-names = "l2sram", "l1dram";
+-		ti,sci = <&sms>;
+-		ti,sci-dev-id = <30>;
+-		ti,sci-proc-ids = <0x30 0xff>;
+-		resets = <&k3_reset 30 1>;
+-		firmware-name = "j784s4-c71_0-fw";
+-		status = "disabled";
+-	};
+-
+-	c71_1: dsp@65800000 {
+-		compatible = "ti,j721s2-c71-dsp";
+-		reg = <0x00 0x65800000 0x00 0x00080000>,
+-		      <0x00 0x65e00000 0x00 0x0000c000>;
+-		reg-names = "l2sram", "l1dram";
+-		ti,sci = <&sms>;
+-		ti,sci-dev-id = <33>;
+-		ti,sci-proc-ids = <0x31 0xff>;
+-		resets = <&k3_reset 33 1>;
+-		firmware-name = "j784s4-c71_1-fw";
+-		status = "disabled";
+-	};
+-
+-	c71_2: dsp@66800000 {
+-		compatible = "ti,j721s2-c71-dsp";
+-		reg = <0x00 0x66800000 0x00 0x00080000>,
+-		      <0x00 0x66e00000 0x00 0x0000c000>;
+-		reg-names = "l2sram", "l1dram";
+-		ti,sci = <&sms>;
+-		ti,sci-dev-id = <37>;
+-		ti,sci-proc-ids = <0x32 0xff>;
+-		resets = <&k3_reset 37 1>;
+-		firmware-name = "j784s4-c71_2-fw";
+-		status = "disabled";
+-	};
+-
++&cbass_main {
+ 	c71_3: dsp@67800000 {
+ 		compatible = "ti,j721s2-c71-dsp";
+ 		reg = <0x00 0x67800000 0x00 0x00080000>,
+ 		      <0x00 0x67e00000 0x00 0x0000c000>;
+ 		reg-names = "l2sram", "l1dram";
++		resets = <&k3_reset 40 1>;
++		firmware-name = "j784s4-c71_3-fw";
+ 		ti,sci = <&sms>;
+ 		ti,sci-dev-id = <40>;
+ 		ti,sci-proc-ids = <0x33 0xff>;
+-		resets = <&k3_reset 40 1>;
+-		firmware-name = "j784s4-c71_3-fw";
+-		status = "disabled";
+-	};
+-
+-	main_esm: esm@700000 {
+-		compatible = "ti,j721e-esm";
+-		reg = <0x00 0x700000 0x00 0x1000>;
+-		ti,esm-pins = <688>, <689>, <690>, <691>, <692>, <693>, <694>,
+-			      <695>;
+-		bootph-pre-ram;
+-	};
+-
+-	watchdog0: watchdog@2200000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2200000 0x00 0x100>;
+-		clocks = <&k3_clks 348 0>;
+-		power-domains = <&k3_pds 348 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 348 0>;
+-		assigned-clock-parents = <&k3_clks 348 4>;
+-	};
+-
+-	watchdog1: watchdog@2210000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2210000 0x00 0x100>;
+-		clocks = <&k3_clks 349 0>;
+-		power-domains = <&k3_pds 349 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 349 0>;
+-		assigned-clock-parents = <&k3_clks 349 4>;
+-	};
+-
+-	watchdog2: watchdog@2220000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2220000 0x00 0x100>;
+-		clocks = <&k3_clks 350 0>;
+-		power-domains = <&k3_pds 350 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 350 0>;
+-		assigned-clock-parents = <&k3_clks 350 4>;
+-	};
+-
+-	watchdog3: watchdog@2230000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2230000 0x00 0x100>;
+-		clocks = <&k3_clks 351 0>;
+-		power-domains = <&k3_pds 351 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 351 0>;
+-		assigned-clock-parents = <&k3_clks 351 4>;
+-	};
+-
+-	watchdog4: watchdog@2240000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2240000 0x00 0x100>;
+-		clocks = <&k3_clks 352 0>;
+-		power-domains = <&k3_pds 352 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 352 0>;
+-		assigned-clock-parents = <&k3_clks 352 4>;
+-	};
+-
+-	watchdog5: watchdog@2250000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2250000 0x00 0x100>;
+-		clocks = <&k3_clks 353 0>;
+-		power-domains = <&k3_pds 353 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 353 0>;
+-		assigned-clock-parents = <&k3_clks 353 4>;
+-	};
+-
+-	watchdog6: watchdog@2260000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2260000 0x00 0x100>;
+-		clocks = <&k3_clks 354 0>;
+-		power-domains = <&k3_pds 354 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 354 0>;
+-		assigned-clock-parents = <&k3_clks 354 4>;
+-	};
+-
+-	watchdog7: watchdog@2270000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2270000 0x00 0x100>;
+-		clocks = <&k3_clks 355 0>;
+-		power-domains = <&k3_pds 355 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 355 0>;
+-		assigned-clock-parents = <&k3_clks 355 4>;
+-	};
+-
+-	/*
+-	 * The following RTI instances are coupled with MCU R5Fs, c7x and
+-	 * GPU so keeping them reserved as these will be used by their
+-	 * respective firmware
+-	 */
+-	watchdog8: watchdog@22f0000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x22f0000 0x00 0x100>;
+-		clocks = <&k3_clks 360 0>;
+-		power-domains = <&k3_pds 360 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 360 0>;
+-		assigned-clock-parents = <&k3_clks 360 4>;
+-		/* reserved for GPU */
+-		status = "reserved";
+-	};
+-
+-	watchdog9: watchdog@2300000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2300000 0x00 0x100>;
+-		clocks = <&k3_clks 356 0>;
+-		power-domains = <&k3_pds 356 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 356 0>;
+-		assigned-clock-parents = <&k3_clks 356 4>;
+-		/* reserved for C7X_0 DSP */
+-		status = "reserved";
+-	};
+-
+-	watchdog10: watchdog@2310000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2310000 0x00 0x100>;
+-		clocks = <&k3_clks 357 0>;
+-		power-domains = <&k3_pds 357 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 357 0>;
+-		assigned-clock-parents = <&k3_clks 357 4>;
+-		/* reserved for C7X_1 DSP */
+-		status = "reserved";
+-	};
+-
+-	watchdog11: watchdog@2320000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2320000 0x00 0x100>;
+-		clocks = <&k3_clks 358 0>;
+-		power-domains = <&k3_pds 358 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 358 0>;
+-		assigned-clock-parents = <&k3_clks 358 4>;
+-		/* reserved for C7X_2 DSP */
+-		status = "reserved";
+-	};
+-
+-	watchdog12: watchdog@2330000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2330000 0x00 0x100>;
+-		clocks = <&k3_clks 359 0>;
+-		power-domains = <&k3_pds 359 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 359 0>;
+-		assigned-clock-parents = <&k3_clks 359 4>;
+-		/* reserved for C7X_3 DSP */
+-		status = "reserved";
+-	};
+-
+-	watchdog13: watchdog@23c0000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x23c0000 0x00 0x100>;
+-		clocks = <&k3_clks 361 0>;
+-		power-domains = <&k3_pds 361 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 361 0>;
+-		assigned-clock-parents = <&k3_clks 361 4>;
+-		/* reserved for MAIN_R5F0_0 */
+-		status = "reserved";
+-	};
+-
+-	watchdog14: watchdog@23d0000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x23d0000 0x00 0x100>;
+-		clocks = <&k3_clks 362 0>;
+-		power-domains = <&k3_pds 362 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 362 0>;
+-		assigned-clock-parents = <&k3_clks 362 4>;
+-		/* reserved for MAIN_R5F0_1 */
+-		status = "reserved";
+-	};
+-
+-	watchdog15: watchdog@23e0000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x23e0000 0x00 0x100>;
+-		clocks = <&k3_clks 363 0>;
+-		power-domains = <&k3_pds 363 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 363 0>;
+-		assigned-clock-parents = <&k3_clks 363 4>;
+-		/* reserved for MAIN_R5F1_0 */
+-		status = "reserved";
+-	};
+-
+-	watchdog16: watchdog@23f0000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x23f0000 0x00 0x100>;
+-		clocks = <&k3_clks 364 0>;
+-		power-domains = <&k3_pds 364 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 364 0>;
+-		assigned-clock-parents = <&k3_clks 364 4>;
+-		/* reserved for MAIN_R5F1_1 */
+-		status = "reserved";
+-	};
+-
+-	watchdog17: watchdog@2540000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2540000 0x00 0x100>;
+-		clocks = <&k3_clks 365 0>;
+-		power-domains = <&k3_pds 365 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 365 0>;
+-		assigned-clock-parents = <&k3_clks 366 4>;
+-		/* reserved for MAIN_R5F2_0 */
+-		status = "reserved";
+-	};
+-
+-	watchdog18: watchdog@2550000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x2550000 0x00 0x100>;
+-		clocks = <&k3_clks 366 0>;
+-		power-domains = <&k3_pds 366 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 366 0>;
+-		assigned-clock-parents = <&k3_clks 366 4>;
+-		/* reserved for MAIN_R5F2_1 */
+-		status = "reserved";
+-	};
+-
+-	mhdp: bridge@a000000 {
+-		compatible = "ti,j721e-mhdp8546";
+-		reg = <0x0 0xa000000 0x0 0x30a00>,
+-		      <0x0 0x4f40000 0x0 0x20>;
+-		reg-names = "mhdptx", "j721e-intg";
+-		clocks = <&k3_clks 217 11>;
+-		interrupt-parent = <&gic500>;
+-		interrupts = <GIC_SPI 614 IRQ_TYPE_LEVEL_HIGH>;
+-		power-domains = <&k3_pds 217 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-
+-		dp0_ports: ports {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			/* Remote-endpoints are on the boards so
+-			 * ports are defined in the platform dt file.
+-			 */
+-		};
+-	};
+-
+-	dss: dss@4a00000 {
+-		compatible = "ti,j721e-dss";
+-		reg = <0x00 0x04a00000 0x00 0x10000>, /* common_m */
+-		      <0x00 0x04a10000 0x00 0x10000>, /* common_s0*/
+-		      <0x00 0x04b00000 0x00 0x10000>, /* common_s1*/
+-		      <0x00 0x04b10000 0x00 0x10000>, /* common_s2*/
+-		      <0x00 0x04a20000 0x00 0x10000>, /* vidl1 */
+-		      <0x00 0x04a30000 0x00 0x10000>, /* vidl2 */
+-		      <0x00 0x04a50000 0x00 0x10000>, /* vid1 */
+-		      <0x00 0x04a60000 0x00 0x10000>, /* vid2 */
+-		      <0x00 0x04a70000 0x00 0x10000>, /* ovr1 */
+-		      <0x00 0x04a90000 0x00 0x10000>, /* ovr2 */
+-		      <0x00 0x04ab0000 0x00 0x10000>, /* ovr3 */
+-		      <0x00 0x04ad0000 0x00 0x10000>, /* ovr4 */
+-		      <0x00 0x04a80000 0x00 0x10000>, /* vp1 */
+-		      <0x00 0x04aa0000 0x00 0x10000>, /* vp1 */
+-		      <0x00 0x04ac0000 0x00 0x10000>, /* vp1 */
+-		      <0x00 0x04ae0000 0x00 0x10000>, /* vp4 */
+-		      <0x00 0x04af0000 0x00 0x10000>; /* wb */
+-		reg-names = "common_m", "common_s0",
+-			    "common_s1", "common_s2",
+-			    "vidl1", "vidl2","vid1","vid2",
+-			    "ovr1", "ovr2", "ovr3", "ovr4",
+-			    "vp1", "vp2", "vp3", "vp4",
+-			    "wb";
+-		clocks = <&k3_clks 218 0>,
+-			 <&k3_clks 218 2>,
+-			 <&k3_clks 218 5>,
+-			 <&k3_clks 218 14>,
+-			 <&k3_clks 218 18>;
+-		clock-names = "fck", "vp1", "vp2", "vp3", "vp4";
+-		power-domains = <&k3_pds 218 TI_SCI_PD_EXCLUSIVE>;
+-		interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "common_m",
+-				  "common_s0",
+-				  "common_s1",
+-				  "common_s2";
+ 		status = "disabled";
+-
+-		dss_ports: ports {
+-			/* Ports that DSS drives are platform specific
+-			 * so they are defined in platform dt file.
+-			 */
+-		};
+ 	};
+ 
+-	mcasp0: mcasp@2b00000 {
+-		compatible = "ti,am33xx-mcasp-audio";
+-		reg = <0x00 0x02b00000 0x00 0x2000>,
+-		      <0x00 0x02b08000 0x00 0x1000>;
+-		reg-names = "mpu","dat";
+-		interrupts = <GIC_SPI 544 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 545 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "tx", "rx";
+-		dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
+-		dma-names = "tx", "rx";
+-		clocks = <&k3_clks 265 0>;
++	pcie2_rc: pcie@2920000 {
++		compatible = "ti,j784s4-pcie-host";
++		reg = <0x00 0x02920000 0x00 0x1000>,
++		      <0x00 0x02927000 0x00 0x400>,
++		      <0x00 0x0e000000 0x00 0x00800000>,
++		      <0x44 0x00000000 0x00 0x00001000>;
++		ranges = <0x01000000 0x0 0x00001000 0x44 0x00001000 0x0 0x0010000>,
++			 <0x02000000 0x0 0x00011000 0x44 0x00011000 0x0 0x7fef000>;
++		reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
++		interrupt-names = "link_state";
++		interrupts = <GIC_SPI 342 IRQ_TYPE_EDGE_RISING>;
++		device_type = "pci";
++		max-link-speed = <3>;
++		num-lanes = <2>;
++		power-domains = <&k3_pds 334 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 334 0>;
+ 		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 265 0>;
+-		assigned-clock-parents = <&k3_clks 265 1>;
+-		power-domains = <&k3_pds 265 TI_SCI_PD_EXCLUSIVE>;
++		#address-cells = <3>;
++		#size-cells = <2>;
++		bus-range = <0x0 0xff>;
++		vendor-id = <0x104c>;
++		device-id = <0xb012>;
++		msi-map = <0x0 &gic_its 0x20000 0x10000>;
++		dma-coherent;
++		dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
++		ti,syscon-pcie-ctrl = <&pcie2_ctrl 0x0>;
+ 		status = "disabled";
+ 	};
+ 
+-	mcasp1: mcasp@2b10000 {
+-		compatible = "ti,am33xx-mcasp-audio";
+-		reg = <0x00 0x02b10000 0x00 0x2000>,
+-		      <0x00 0x02b18000 0x00 0x1000>;
+-		reg-names = "mpu","dat";
+-		interrupts = <GIC_SPI 546 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 547 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "tx", "rx";
+-		dmas = <&main_udmap 0xc401>, <&main_udmap 0x4401>;
+-		dma-names = "tx", "rx";
+-		clocks = <&k3_clks 266 0>;
++	pcie3_rc: pcie@2930000 {
++		compatible = "ti,j784s4-pcie-host";
++		reg = <0x00 0x02930000 0x00 0x1000>,
++		      <0x00 0x02937000 0x00 0x400>,
++		      <0x00 0x0e800000 0x00 0x00800000>,
++		      <0x44 0x10000000 0x00 0x00001000>;
++		ranges = <0x01000000 0x0 0x00001000 0x44 0x10001000 0x0 0x0010000>,
++			 <0x02000000 0x0 0x00011000 0x44 0x10011000 0x0 0x7fef000>;
++		reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
++		interrupt-names = "link_state";
++		interrupts = <GIC_SPI 354 IRQ_TYPE_EDGE_RISING>;
++		device_type = "pci";
++		max-link-speed = <3>;
++		num-lanes = <2>;
++		power-domains = <&k3_pds 335 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 335 0>;
+ 		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 266 0>;
+-		assigned-clock-parents = <&k3_clks 266 1>;
+-		power-domains = <&k3_pds 266 TI_SCI_PD_EXCLUSIVE>;
++		#address-cells = <3>;
++		#size-cells = <2>;
++		bus-range = <0x0 0xff>;
++		vendor-id = <0x104c>;
++		device-id = <0xb012>;
++		msi-map = <0x0 &gic_its 0x30000 0x10000>;
++		dma-coherent;
++		dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
++		ti,syscon-pcie-ctrl = <&pcie3_ctrl 0x0>;
+ 		status = "disabled";
+ 	};
+ 
+-	mcasp2: mcasp@2b20000 {
+-		compatible = "ti,am33xx-mcasp-audio";
+-		reg = <0x00 0x02b20000 0x00 0x2000>,
+-		      <0x00 0x02b28000 0x00 0x1000>;
+-		reg-names = "mpu","dat";
+-		interrupts = <GIC_SPI 548 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 549 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "tx", "rx";
+-		dmas = <&main_udmap 0xc402>, <&main_udmap 0x4402>;
+-		dma-names = "tx", "rx";
+-		clocks = <&k3_clks 267 0>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 267 0>;
+-		assigned-clock-parents = <&k3_clks 267 1>;
+-		power-domains = <&k3_pds 267 TI_SCI_PD_EXCLUSIVE>;
++	serdes_wiz2: wiz@5020000 {
++		compatible = "ti,j784s4-wiz-10g";
++		ranges = <0x05020000 0x00 0x05020000 0x10000>;
++		#address-cells = <1>;
++		#size-cells = <1>;
++		power-domains = <&k3_pds 406 TI_SCI_PD_EXCLUSIVE>;
++		clocks = <&k3_clks 406 2>, <&k3_clks 406 6>, <&serdes_refclk>, <&k3_clks 406 5>;
++		clock-names = "fck", "core_ref_clk", "ext_ref_clk", "core_ref1_clk";
++		assigned-clocks = <&k3_clks 406 6>;
++		assigned-clock-parents = <&k3_clks 406 10>;
++		num-lanes = <4>;
++		#reset-cells = <1>;
++		#clock-cells = <1>;
+ 		status = "disabled";
++
++		serdes2: serdes@5020000 {
++			compatible = "ti,j721e-serdes-10g";
++			reg = <0x05020000 0x010000>;
++			reg-names = "torrent_phy";
++			resets = <&serdes_wiz2 0>;
++			reset-names = "torrent_reset";
++			clocks = <&serdes_wiz2 TI_WIZ_PLL0_REFCLK>,
++				 <&serdes_wiz2 TI_WIZ_PHY_EN_REFCLK>;
++			clock-names = "refclk", "phy_en_refclk";
++			assigned-clocks = <&serdes_wiz2 TI_WIZ_PLL0_REFCLK>,
++					  <&serdes_wiz2 TI_WIZ_PLL1_REFCLK>,
++					  <&serdes_wiz2 TI_WIZ_REFCLK_DIG>;
++			assigned-clock-parents = <&k3_clks 406 6>,
++						 <&k3_clks 406 6>,
++						 <&k3_clks 406 6>;
++			#address-cells = <1>;
++			#size-cells = <0>;
++			#clock-cells = <1>;
++			status = "disabled";
++		};
+ 	};
++};
+ 
+-	mcasp3: mcasp@2b30000 {
+-		compatible = "ti,am33xx-mcasp-audio";
+-		reg = <0x00 0x02b30000 0x00 0x2000>,
+-		      <0x00 0x02b38000 0x00 0x1000>;
+-		reg-names = "mpu","dat";
+-		interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "tx", "rx";
+-		dmas = <&main_udmap 0xc403>, <&main_udmap 0x4403>;
+-		dma-names = "tx", "rx";
+-		clocks = <&k3_clks 268 0>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 268 0>;
+-		assigned-clock-parents = <&k3_clks 268 1>;
+-		power-domains = <&k3_pds 268 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
++&scm_conf {
++	pcie2_ctrl: pcie2-ctrl@4078 {
++		compatible = "ti,j784s4-pcie-ctrl", "syscon";
++		reg = <0x4078 0x4>;
+ 	};
+ 
+-	mcasp4: mcasp@2b40000 {
+-		compatible = "ti,am33xx-mcasp-audio";
+-		reg = <0x00 0x02b40000 0x00 0x2000>,
+-		      <0x00 0x02b48000 0x00 0x1000>;
+-		reg-names = "mpu","dat";
+-		interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "tx", "rx";
+-		dmas = <&main_udmap 0xc404>, <&main_udmap 0x4404>;
+-		dma-names = "tx", "rx";
+-		clocks = <&k3_clks 269 0>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 269 0>;
+-		assigned-clock-parents = <&k3_clks 269 1>;
+-		power-domains = <&k3_pds 269 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
++	pcie3_ctrl: pcie3-ctrl@407c {
++		compatible = "ti,j784s4-pcie-ctrl", "syscon";
++		reg = <0x407c 0x4>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+deleted file mode 100644
+index f603380fc91cf4..00000000000000
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
++++ /dev/null
+@@ -1,760 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only OR MIT
+-/*
+- * Device Tree Source for J784S4 SoC Family MCU/WAKEUP Domain peripherals
+- *
+- * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
+- */
+-
+-&cbass_mcu_wakeup {
+-	sms: system-controller@44083000 {
+-		bootph-all;
+-		compatible = "ti,k2g-sci";
+-		ti,host-id = <12>;
+-
+-		mbox-names = "rx", "tx";
+-
+-		mboxes = <&secure_proxy_main 11>,
+-			 <&secure_proxy_main 13>;
+-
+-		reg-names = "debug_messages";
+-		reg = <0x00 0x44083000 0x00 0x1000>;
+-
+-		k3_pds: power-controller {
+-			bootph-all;
+-			compatible = "ti,sci-pm-domain";
+-			#power-domain-cells = <2>;
+-		};
+-
+-		k3_clks: clock-controller {
+-			bootph-all;
+-			compatible = "ti,k2g-sci-clk";
+-			#clock-cells = <2>;
+-		};
+-
+-		k3_reset: reset-controller {
+-			bootph-all;
+-			compatible = "ti,sci-reset";
+-			#reset-cells = <2>;
+-		};
+-	};
+-
+-	wkup_conf: bus@43000000 {
+-		bootph-all;
+-		compatible = "simple-bus";
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x0 0x00 0x43000000 0x20000>;
+-
+-		chipid: chipid@14 {
+-			bootph-all;
+-			compatible = "ti,am654-chipid";
+-			reg = <0x14 0x4>;
+-		};
+-	};
+-
+-	secure_proxy_sa3: mailbox@43600000 {
+-		compatible = "ti,am654-secure-proxy";
+-		#mbox-cells = <1>;
+-		reg-names = "target_data", "rt", "scfg";
+-		reg = <0x00 0x43600000 0x00 0x10000>,
+-		      <0x00 0x44880000 0x00 0x20000>,
+-		      <0x00 0x44860000 0x00 0x20000>;
+-		/*
+-		 * Marked Disabled:
+-		 * Node is incomplete as it is meant for bootloaders and
+-		 * firmware on non-MPU processors
+-		 */
+-		status = "disabled";
+-	};
+-
+-	mcu_ram: sram@41c00000 {
+-		compatible = "mmio-sram";
+-		reg = <0x00 0x41c00000 0x00 0x100000>;
+-		ranges = <0x00 0x00 0x41c00000 0x100000>;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-	};
+-
+-	wkup_pmx0: pinctrl@4301c000 {
+-		compatible = "pinctrl-single";
+-		/* Proxy 0 addressing */
+-		reg = <0x00 0x4301c000 0x00 0x034>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0xffffffff>;
+-	};
+-
+-	wkup_pmx1: pinctrl@4301c038 {
+-		compatible = "pinctrl-single";
+-		/* Proxy 0 addressing */
+-		reg = <0x00 0x4301c038 0x00 0x02c>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0xffffffff>;
+-	};
+-
+-	wkup_pmx2: pinctrl@4301c068 {
+-		compatible = "pinctrl-single";
+-		/* Proxy 0 addressing */
+-		reg = <0x00 0x4301c068 0x00 0x120>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0xffffffff>;
+-	};
+-
+-	wkup_pmx3: pinctrl@4301c190 {
+-		compatible = "pinctrl-single";
+-		/* Proxy 0 addressing */
+-		reg = <0x00 0x4301c190 0x00 0x004>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0xffffffff>;
+-	};
+-
+-	wkup_gpio_intr: interrupt-controller@42200000 {
+-		compatible = "ti,sci-intr";
+-		reg = <0x00 0x42200000 0x00 0x400>;
+-		ti,intr-trigger-type = <1>;
+-		interrupt-controller;
+-		interrupt-parent = <&gic500>;
+-		#interrupt-cells = <1>;
+-		ti,sci = <&sms>;
+-		ti,sci-dev-id = <177>;
+-		ti,interrupt-ranges = <16 960 16>;
+-	};
+-
+-	/* MCU_TIMERIO pad input CTRLMMR_MCU_TIMER*_CTRL registers */
+-	mcu_timerio_input: pinctrl@40f04200 {
+-		compatible = "pinctrl-single";
+-		reg = <0x00 0x40f04200 0x00 0x28>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0x0000000f>;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	/* MCU_TIMERIO pad output CTRLMMR_MCU_TIMERIO*_CTRL registers */
+-	mcu_timerio_output: pinctrl@40f04280 {
+-		compatible = "pinctrl-single";
+-		reg = <0x00 0x40f04280 0x00 0x28>;
+-		#pinctrl-cells = <1>;
+-		pinctrl-single,register-width = <32>;
+-		pinctrl-single,function-mask = <0x0000000f>;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_conf: bus@40f00000 {
+-		compatible = "simple-bus";
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x0 0x0 0x40f00000 0x20000>;
+-
+-		cpsw_mac_syscon: ethernet-mac-syscon@200 {
+-			compatible = "ti,am62p-cpsw-mac-efuse", "syscon";
+-			reg = <0x200 0x8>;
+-		};
+-
+-		phy_gmii_sel: phy@4040 {
+-			compatible = "ti,am654-phy-gmii-sel";
+-			reg = <0x4040 0x4>;
+-			#phy-cells = <1>;
+-		};
+-	};
+-
+-	mcu_timer0: timer@40400000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40400000 0x00 0x400>;
+-		interrupts = <GIC_SPI 816 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 35 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 35 2>;
+-		assigned-clock-parents = <&k3_clks 35 3>;
+-		power-domains = <&k3_pds 35 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer1: timer@40410000 {
+-		bootph-all;
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40410000 0x00 0x400>;
+-		interrupts = <GIC_SPI 817 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 117 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 117 2>;
+-		assigned-clock-parents = <&k3_clks 117 3>;
+-		power-domains = <&k3_pds 117 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer2: timer@40420000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40420000 0x00 0x400>;
+-		interrupts = <GIC_SPI 818 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 118 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 118 2>;
+-		assigned-clock-parents = <&k3_clks 118 3>;
+-		power-domains = <&k3_pds 118 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer3: timer@40430000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40430000 0x00 0x400>;
+-		interrupts = <GIC_SPI 819 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 119 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 119 2>;
+-		assigned-clock-parents = <&k3_clks 119 3>;
+-		power-domains = <&k3_pds 119 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer4: timer@40440000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40440000 0x00 0x400>;
+-		interrupts = <GIC_SPI 820 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 120 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 120 2>;
+-		assigned-clock-parents = <&k3_clks 120 3>;
+-		power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer5: timer@40450000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40450000 0x00 0x400>;
+-		interrupts = <GIC_SPI 821 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 121 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 121 2>;
+-		assigned-clock-parents = <&k3_clks 121 3>;
+-		power-domains = <&k3_pds 121 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer6: timer@40460000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40460000 0x00 0x400>;
+-		interrupts = <GIC_SPI 822 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 122 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 122 2>;
+-		assigned-clock-parents = <&k3_clks 122 3>;
+-		power-domains = <&k3_pds 122 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer7: timer@40470000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40470000 0x00 0x400>;
+-		interrupts = <GIC_SPI 823 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 123 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 123 2>;
+-		assigned-clock-parents = <&k3_clks 123 3>;
+-		power-domains = <&k3_pds 123 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer8: timer@40480000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40480000 0x00 0x400>;
+-		interrupts = <GIC_SPI 824 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 124 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 124 2>;
+-		assigned-clock-parents = <&k3_clks 124 3>;
+-		power-domains = <&k3_pds 124 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	mcu_timer9: timer@40490000 {
+-		compatible = "ti,am654-timer";
+-		reg = <0x00 0x40490000 0x00 0x400>;
+-		interrupts = <GIC_SPI 825 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 125 2>;
+-		clock-names = "fck";
+-		assigned-clocks = <&k3_clks 125 2>;
+-		assigned-clock-parents = <&k3_clks 125 3>;
+-		power-domains = <&k3_pds 125 TI_SCI_PD_EXCLUSIVE>;
+-		ti,timer-pwm;
+-		/* Non-MPU Firmware usage */
+-		status = "reserved";
+-	};
+-
+-	wkup_uart0: serial@42300000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x42300000 0x00 0x200>;
+-		interrupts = <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 397 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 397 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	mcu_uart0: serial@40a00000 {
+-		compatible = "ti,j721e-uart", "ti,am654-uart";
+-		reg = <0x00 0x40a00000 0x00 0x200>;
+-		interrupts = <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>;
+-		clocks = <&k3_clks 149 0>;
+-		clock-names = "fclk";
+-		power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	wkup_gpio0: gpio@42110000 {
+-		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+-		reg = <0x00 0x42110000 0x00 0x100>;
+-		gpio-controller;
+-		#gpio-cells = <2>;
+-		interrupt-parent = <&wkup_gpio_intr>;
+-		interrupts = <103>, <104>, <105>, <106>, <107>, <108>;
+-		interrupt-controller;
+-		#interrupt-cells = <2>;
+-		ti,ngpio = <89>;
+-		ti,davinci-gpio-unbanked = <0>;
+-		power-domains = <&k3_pds 167 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 167 0>;
+-		clock-names = "gpio";
+-		status = "disabled";
+-	};
+-
+-	wkup_gpio1: gpio@42100000 {
+-		compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+-		reg = <0x00 0x42100000 0x00 0x100>;
+-		gpio-controller;
+-		#gpio-cells = <2>;
+-		interrupt-parent = <&wkup_gpio_intr>;
+-		interrupts = <112>, <113>, <114>, <115>, <116>, <117>;
+-		interrupt-controller;
+-		#interrupt-cells = <2>;
+-		ti,ngpio = <89>;
+-		ti,davinci-gpio-unbanked = <0>;
+-		power-domains = <&k3_pds 168 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 168 0>;
+-		clock-names = "gpio";
+-		status = "disabled";
+-	};
+-
+-	wkup_i2c0: i2c@42120000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x42120000 0x00 0x100>;
+-		interrupts = <GIC_SPI 896 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 279 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 279 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	mcu_i2c0: i2c@40b00000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x40b00000 0x00 0x100>;
+-		interrupts = <GIC_SPI 852 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 277 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 277 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	mcu_i2c1: i2c@40b10000 {
+-		compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+-		reg = <0x00 0x40b10000 0x00 0x100>;
+-		interrupts = <GIC_SPI 853 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		clocks = <&k3_clks 278 2>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 278 TI_SCI_PD_EXCLUSIVE>;
+-		status = "disabled";
+-	};
+-
+-	mcu_mcan0: can@40528000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x40528000 0x00 0x200>,
+-		      <0x00 0x40500000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 263 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 263 6>, <&k3_clks 263 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 832 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 833 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	mcu_mcan1: can@40568000 {
+-		compatible = "bosch,m_can";
+-		reg = <0x00 0x40568000 0x00 0x200>,
+-		      <0x00 0x40540000 0x00 0x8000>;
+-		reg-names = "m_can", "message_ram";
+-		power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 264 6>, <&k3_clks 264 1>;
+-		clock-names = "hclk", "cclk";
+-		interrupts = <GIC_SPI 835 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 836 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "int0", "int1";
+-		bosch,mram-cfg = <0x00 128 64 64 64 64 32 32>;
+-		status = "disabled";
+-	};
+-
+-	mcu_spi0: spi@40300000 {
+-		compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
+-		reg = <0x00 0x040300000 0x00 0x400>;
+-		interrupts = <GIC_SPI 848 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 384 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 384 0>;
+-		status = "disabled";
+-	};
+-
+-	mcu_spi1: spi@40310000 {
+-		compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
+-		reg = <0x00 0x040310000 0x00 0x400>;
+-		interrupts = <GIC_SPI 849 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 385 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 385 0>;
+-		status = "disabled";
+-	};
+-
+-	mcu_spi2: spi@40320000 {
+-		compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
+-		reg = <0x00 0x040320000 0x00 0x400>;
+-		interrupts = <GIC_SPI 850 IRQ_TYPE_LEVEL_HIGH>;
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		power-domains = <&k3_pds 386 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 386 0>;
+-		status = "disabled";
+-	};
+-
+-	mcu_navss: bus@28380000 {
+-		bootph-all;
+-		compatible = "simple-bus";
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
+-		ti,sci-dev-id = <323>;
+-		dma-coherent;
+-		dma-ranges;
+-
+-		mcu_ringacc: ringacc@2b800000 {
+-			bootph-all;
+-			compatible = "ti,am654-navss-ringacc";
+-			reg = <0x00 0x2b800000 0x00 0x400000>,
+-			      <0x00 0x2b000000 0x00 0x400000>,
+-			      <0x00 0x28590000 0x00 0x100>,
+-			      <0x00 0x2a500000 0x00 0x40000>,
+-			      <0x00 0x28440000 0x00 0x40000>;
+-			reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target", "cfg";
+-			ti,num-rings = <286>;
+-			ti,sci-rm-range-gp-rings = <0x1>;
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <328>;
+-			msi-parent = <&main_udmass_inta>;
+-		};
+-
+-		mcu_udmap: dma-controller@285c0000 {
+-			bootph-all;
+-			compatible = "ti,j721e-navss-mcu-udmap";
+-			reg = <0x00 0x285c0000 0x00 0x100>,
+-			      <0x00 0x2a800000 0x00 0x40000>,
+-			      <0x00 0x2aa00000 0x00 0x40000>,
+-			      <0x00 0x284a0000 0x00 0x4000>,
+-			      <0x00 0x284c0000 0x00 0x4000>,
+-			      <0x00 0x28400000 0x00 0x2000>;
+-			reg-names = "gcfg", "rchanrt", "tchanrt",
+-				    "tchan", "rchan", "rflow";
+-			msi-parent = <&main_udmass_inta>;
+-			#dma-cells = <1>;
+-
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <329>;
+-			ti,ringacc = <&mcu_ringacc>;
+-			ti,sci-rm-range-tchan = <0x0d>, /* TX_CHAN */
+-						<0x0f>; /* TX_HCHAN */
+-			ti,sci-rm-range-rchan = <0x0a>, /* RX_CHAN */
+-						<0x0b>; /* RX_HCHAN */
+-			ti,sci-rm-range-rflow = <0x00>; /* GP RFLOW */
+-		};
+-	};
+-
+-	secure_proxy_mcu: mailbox@2a480000 {
+-		compatible = "ti,am654-secure-proxy";
+-		#mbox-cells = <1>;
+-		reg-names = "target_data", "rt", "scfg";
+-		reg = <0x00 0x2a480000 0x00 0x80000>,
+-		      <0x00 0x2a380000 0x00 0x80000>,
+-		      <0x00 0x2a400000 0x00 0x80000>;
+-		/*
+-		 * Marked Disabled:
+-		 * Node is incomplete as it is meant for bootloaders and
+-		 * firmware on non-MPU processors
+-		 */
+-		status = "disabled";
+-	};
+-
+-	mcu_cpsw: ethernet@46000000 {
+-		compatible = "ti,j721e-cpsw-nuss";
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		reg = <0x00 0x46000000 0x00 0x200000>;
+-		reg-names = "cpsw_nuss";
+-		ranges = <0x00 0x00 0x00 0x46000000 0x00 0x200000>;
+-		dma-coherent;
+-		clocks = <&k3_clks 63 0>;
+-		clock-names = "fck";
+-		power-domains = <&k3_pds 63 TI_SCI_PD_EXCLUSIVE>;
+-
+-		dmas = <&mcu_udmap 0xf000>,
+-		       <&mcu_udmap 0xf001>,
+-		       <&mcu_udmap 0xf002>,
+-		       <&mcu_udmap 0xf003>,
+-		       <&mcu_udmap 0xf004>,
+-		       <&mcu_udmap 0xf005>,
+-		       <&mcu_udmap 0xf006>,
+-		       <&mcu_udmap 0xf007>,
+-		       <&mcu_udmap 0x7000>;
+-		dma-names = "tx0", "tx1", "tx2", "tx3",
+-			    "tx4", "tx5", "tx6", "tx7",
+-			    "rx";
+-		status = "disabled";
+-
+-		ethernet-ports {
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-
+-			mcu_cpsw_port1: port@1 {
+-				reg = <1>;
+-				ti,mac-only;
+-				label = "port1";
+-				ti,syscon-efuse = <&cpsw_mac_syscon 0x0>;
+-				phys = <&phy_gmii_sel 1>;
+-			};
+-		};
+-
+-		davinci_mdio: mdio@f00 {
+-			compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+-			reg = <0x00 0xf00 0x00 0x100>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			clocks = <&k3_clks 63 0>;
+-			clock-names = "fck";
+-			bus_freq = <1000000>;
+-		};
+-
+-		cpts@3d000 {
+-			compatible = "ti,am65-cpts";
+-			reg = <0x00 0x3d000 0x00 0x400>;
+-			clocks = <&k3_clks 63 3>;
+-			clock-names = "cpts";
+-			assigned-clocks = <&k3_clks 63 3>; /* CPTS_RFT_CLK */
+-			assigned-clock-parents = <&k3_clks 63 5>; /* MAIN_0_HSDIV6_CLK */
+-			interrupts-extended = <&gic500 GIC_SPI 858 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "cpts";
+-			ti,cpts-ext-ts-inputs = <4>;
+-			ti,cpts-periodic-outputs = <2>;
+-		};
+-	};
+-
+-	mcu_r5fss0: r5fss@41000000 {
+-		compatible = "ti,j721s2-r5fss";
+-		ti,cluster-mode = <1>;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x41000000 0x00 0x41000000 0x20000>,
+-			 <0x41400000 0x00 0x41400000 0x20000>;
+-		power-domains = <&k3_pds 345 TI_SCI_PD_EXCLUSIVE>;
+-
+-		mcu_r5fss0_core0: r5f@41000000 {
+-			compatible = "ti,j721s2-r5f";
+-			reg = <0x41000000 0x00010000>,
+-			      <0x41010000 0x00010000>;
+-			reg-names = "atcm", "btcm";
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <346>;
+-			ti,sci-proc-ids = <0x01 0xff>;
+-			resets = <&k3_reset 346 1>;
+-			firmware-name = "j784s4-mcu-r5f0_0-fw";
+-			ti,atcm-enable = <1>;
+-			ti,btcm-enable = <1>;
+-			ti,loczrama = <1>;
+-		};
+-
+-		mcu_r5fss0_core1: r5f@41400000 {
+-			compatible = "ti,j721s2-r5f";
+-			reg = <0x41400000 0x00010000>,
+-			      <0x41410000 0x00010000>;
+-			reg-names = "atcm", "btcm";
+-			ti,sci = <&sms>;
+-			ti,sci-dev-id = <347>;
+-			ti,sci-proc-ids = <0x02 0xff>;
+-			resets = <&k3_reset 347 1>;
+-			firmware-name = "j784s4-mcu-r5f0_1-fw";
+-			ti,atcm-enable = <1>;
+-			ti,btcm-enable = <1>;
+-			ti,loczrama = <1>;
+-		};
+-	};
+-
+-	wkup_vtm0: temperature-sensor@42040000 {
+-		compatible = "ti,j7200-vtm";
+-		reg = <0x00 0x42040000 0x00 0x350>,
+-		      <0x00 0x42050000 0x00 0x350>;
+-		power-domains = <&k3_pds 243 TI_SCI_PD_SHARED>;
+-		#thermal-sensor-cells = <1>;
+-	};
+-
+-	tscadc0: tscadc@40200000 {
+-		compatible = "ti,am3359-tscadc";
+-		reg = <0x00 0x40200000 0x00 0x1000>;
+-		interrupts = <GIC_SPI 860 IRQ_TYPE_LEVEL_HIGH>;
+-		power-domains = <&k3_pds 0 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 0 0>;
+-		assigned-clocks = <&k3_clks 0 2>;
+-		assigned-clock-rates = <60000000>;
+-		clock-names = "fck";
+-		dmas = <&main_udmap 0x7400>,
+-			<&main_udmap 0x7401>;
+-		dma-names = "fifo0", "fifo1";
+-		status = "disabled";
+-
+-		adc {
+-			#io-channel-cells = <1>;
+-			compatible = "ti,am3359-adc";
+-		};
+-	};
+-
+-	tscadc1: tscadc@40210000 {
+-		compatible = "ti,am3359-tscadc";
+-		reg = <0x00 0x40210000 0x00 0x1000>;
+-		interrupts = <GIC_SPI 861 IRQ_TYPE_LEVEL_HIGH>;
+-		power-domains = <&k3_pds 1 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 1 0>;
+-		assigned-clocks = <&k3_clks 1 2>;
+-		assigned-clock-rates = <60000000>;
+-		clock-names = "fck";
+-		dmas = <&main_udmap 0x7402>,
+-			<&main_udmap 0x7403>;
+-		dma-names = "fifo0", "fifo1";
+-		status = "disabled";
+-
+-		adc {
+-			#io-channel-cells = <1>;
+-			compatible = "ti,am3359-adc";
+-		};
+-	};
+-
+-	fss: bus@47000000 {
+-		compatible = "simple-bus";
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		ranges = <0x00 0x47000000 0x00 0x47000000 0x00 0x00000100>, /* FSS Control */
+-			 <0x00 0x47040000 0x00 0x47040000 0x00 0x00000100>, /* OSPI0 Control */
+-			 <0x00 0x47050000 0x00 0x47050000 0x00 0x00000100>, /* OSPI1 Control */
+-			 <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
+-			 <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
+-
+-		ospi0: spi@47040000 {
+-			compatible = "ti,am654-ospi", "cdns,qspi-nor";
+-			reg = <0x00 0x47040000 0x00 0x100>,
+-			      <0x05 0x00000000 0x01 0x00000000>;
+-			interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>;
+-			cdns,fifo-depth = <256>;
+-			cdns,fifo-width = <4>;
+-			cdns,trigger-address = <0x0>;
+-			clocks = <&k3_clks 161 7>;
+-			assigned-clocks = <&k3_clks 161 7>;
+-			assigned-clock-parents = <&k3_clks 161 9>;
+-			assigned-clock-rates = <166666666>;
+-			power-domains = <&k3_pds 161 TI_SCI_PD_EXCLUSIVE>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			status = "disabled";
+-		};
+-
+-		ospi1: spi@47050000 {
+-			compatible = "ti,am654-ospi", "cdns,qspi-nor";
+-			reg = <0x00 0x47050000 0x00 0x100>,
+-			      <0x07 0x00000000 0x01 0x00000000>;
+-			interrupts = <GIC_SPI 841 IRQ_TYPE_LEVEL_HIGH>;
+-			cdns,fifo-depth = <256>;
+-			cdns,fifo-width = <4>;
+-			cdns,trigger-address = <0x0>;
+-			clocks = <&k3_clks 162 7>;
+-			power-domains = <&k3_pds 162 TI_SCI_PD_EXCLUSIVE>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-			status = "disabled";
+-		};
+-	};
+-
+-	mcu_esm: esm@40800000 {
+-		compatible = "ti,j721e-esm";
+-		reg = <0x00 0x40800000 0x00 0x1000>;
+-		ti,esm-pins = <95>;
+-		bootph-pre-ram;
+-	};
+-
+-	wkup_esm: esm@42080000 {
+-		compatible = "ti,j721e-esm";
+-		reg = <0x00 0x42080000 0x00 0x1000>;
+-		ti,esm-pins = <63>;
+-		bootph-pre-ram;
+-	};
+-
+-	/*
+-	 * The 2 RTI instances are couple with MCU R5Fs so keeping them
+-	 * reserved as these will be used by their respective firmware
+-	 */
+-	mcu_watchdog0: watchdog@40600000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x40600000 0x00 0x100>;
+-		clocks = <&k3_clks 367 1>;
+-		power-domains = <&k3_pds 367 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 367 0>;
+-		assigned-clock-parents = <&k3_clks 367 4>;
+-		/* reserved for MCU_R5F0_0 */
+-		status = "reserved";
+-	};
+-
+-	mcu_watchdog1: watchdog@40610000 {
+-		compatible = "ti,j7-rti-wdt";
+-		reg = <0x00 0x40610000 0x00 0x100>;
+-		clocks = <&k3_clks 368 1>;
+-		power-domains = <&k3_pds 368 TI_SCI_PD_EXCLUSIVE>;
+-		assigned-clocks = <&k3_clks 368 0>;
+-		assigned-clock-parents = <&k3_clks 368 4>;
+-		/* reserved for MCU_R5F0_1 */
+-		status = "reserved";
+-	};
+-};
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-thermal.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-thermal.dtsi
+deleted file mode 100644
+index e3ef61c1658f4b..00000000000000
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-thermal.dtsi
++++ /dev/null
+@@ -1,104 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only OR MIT
+-/*
+- * Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
+- */
+-
+-#include <dt-bindings/thermal/thermal.h>
+-
+-wkup0_thermal: wkup0-thermal {
+-	polling-delay-passive = <250>; /* milliseconds */
+-	polling-delay = <500>; /* milliseconds */
+-	thermal-sensors = <&wkup_vtm0 0>;
+-
+-	trips {
+-		wkup0_crit: wkup0-crit {
+-			temperature = <125000>; /* milliCelsius */
+-			hysteresis = <2000>; /* milliCelsius */
+-			type = "critical";
+-		};
+-	};
+-};
+-
+-wkup1_thermal: wkup1-thermal {
+-	polling-delay-passive = <250>; /* milliseconds */
+-	polling-delay = <500>; /* milliseconds */
+-	thermal-sensors = <&wkup_vtm0 1>;
+-
+-	trips {
+-		wkup1_crit: wkup1-crit {
+-			temperature = <125000>; /* milliCelsius */
+-			hysteresis = <2000>; /* milliCelsius */
+-			type = "critical";
+-		};
+-	};
+-};
+-
+-main0_thermal: main0-thermal {
+-	polling-delay-passive = <250>; /* milliseconds */
+-	polling-delay = <500>; /* milliseconds */
+-	thermal-sensors = <&wkup_vtm0 2>;
+-
+-	trips {
+-		main0_crit: main0-crit {
+-			temperature = <125000>; /* milliCelsius */
+-			hysteresis = <2000>; /* milliCelsius */
+-			type = "critical";
+-		};
+-	};
+-};
+-
+-main1_thermal: main1-thermal {
+-	polling-delay-passive = <250>; /* milliseconds */
+-	polling-delay = <500>; /* milliseconds */
+-	thermal-sensors = <&wkup_vtm0 3>;
+-
+-	trips {
+-		main1_crit: main1-crit {
+-			temperature = <125000>; /* milliCelsius */
+-			hysteresis = <2000>; /* milliCelsius */
+-			type = "critical";
+-		};
+-	};
+-};
+-
+-main2_thermal: main2-thermal {
+-	polling-delay-passive = <250>; /* milliseconds */
+-	polling-delay = <500>; /* milliseconds */
+-	thermal-sensors = <&wkup_vtm0 4>;
+-
+-	trips {
+-		main2_crit: main2-crit {
+-			temperature = <125000>; /* milliCelsius */
+-			hysteresis = <2000>; /* milliCelsius */
+-			type = "critical";
+-		};
+-	};
+-};
+-
+-main3_thermal: main3-thermal {
+-	polling-delay-passive = <250>; /* milliseconds */
+-	polling-delay = <500>; /* milliseconds */
+-	thermal-sensors = <&wkup_vtm0 5>;
+-
+-	trips {
+-		main3_crit: main3-crit {
+-			temperature = <125000>; /* milliCelsius */
+-			hysteresis = <2000>; /* milliCelsius */
+-			type = "critical";
+-		};
+-	};
+-};
+-
+-main4_thermal: main4-thermal {
+-	polling-delay-passive = <250>; /* milliseconds */
+-	polling-delay = <500>; /* milliseconds */
+-	thermal-sensors = <&wkup_vtm0 6>;
+-
+-	trips {
+-		main4_crit: main4-crit {
+-			temperature = <125000>; /* milliCelsius */
+-			hysteresis = <2000>; /* milliCelsius */
+-			type = "critical";
+-		};
+-	};
+-};
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
+index 5e84c6b4f5ad48..f5afa32157cb80 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
+@@ -8,18 +8,11 @@
+  *
+  */
+ 
+-#include <dt-bindings/interrupt-controller/irq.h>
+-#include <dt-bindings/interrupt-controller/arm-gic.h>
+-#include <dt-bindings/soc/ti,sci_pm_domain.h>
+-
+-#include "k3-pinctrl.h"
++#include "k3-j784s4-j742s2-common.dtsi"
+ 
+ / {
+ 	model = "Texas Instruments K3 J784S4 SoC";
+ 	compatible = "ti,j784s4";
+-	interrupt-parent = <&gic500>;
+-	#address-cells = <2>;
+-	#size-cells = <2>;
+ 
+ 	cpus {
+ 		#address-cells = <1>;
+@@ -174,130 +167,6 @@ cpu7: cpu@103 {
+ 			next-level-cache = <&L2_1>;
+ 		};
+ 	};
+-
+-	L2_0: l2-cache0 {
+-		compatible = "cache";
+-		cache-level = <2>;
+-		cache-unified;
+-		cache-size = <0x200000>;
+-		cache-line-size = <64>;
+-		cache-sets = <1024>;
+-		next-level-cache = <&msmc_l3>;
+-	};
+-
+-	L2_1: l2-cache1 {
+-		compatible = "cache";
+-		cache-level = <2>;
+-		cache-unified;
+-		cache-size = <0x200000>;
+-		cache-line-size = <64>;
+-		cache-sets = <1024>;
+-		next-level-cache = <&msmc_l3>;
+-	};
+-
+-	msmc_l3: l3-cache0 {
+-		compatible = "cache";
+-		cache-level = <3>;
+-		cache-unified;
+-	};
+-
+-	firmware {
+-		optee {
+-			compatible = "linaro,optee-tz";
+-			method = "smc";
+-		};
+-
+-		psci: psci {
+-			compatible = "arm,psci-1.0";
+-			method = "smc";
+-		};
+-	};
+-
+-	a72_timer0: timer-cl0-cpu0 {
+-		compatible = "arm,armv8-timer";
+-		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* cntpsirq */
+-			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* cntpnsirq */
+-			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* cntvirq */
+-			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* cnthpirq */
+-	};
+-
+-	pmu: pmu {
+-		compatible = "arm,cortex-a72-pmu";
+-		/* Recommendation from GIC500 TRM Table A.3 */
+-		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+-	};
+-
+-	cbass_main: bus@100000 {
+-		bootph-all;
+-		compatible = "simple-bus";
+-		#address-cells = <2>;
+-		#size-cells = <2>;
+-		ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */
+-			 <0x00 0x00600000 0x00 0x00600000 0x00 0x00031100>, /* GPIO */
+-			 <0x00 0x00700000 0x00 0x00700000 0x00 0x00001000>, /* ESM */
+-			 <0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */
+-			 <0x00 0x04210000 0x00 0x04210000 0x00 0x00010000>, /* VPU0 */
+-			 <0x00 0x04220000 0x00 0x04220000 0x00 0x00010000>, /* VPU1 */
+-			 <0x00 0x0d000000 0x00 0x0d000000 0x00 0x00800000>, /* PCIe0 Core*/
+-			 <0x00 0x0d800000 0x00 0x0d800000 0x00 0x00800000>, /* PCIe1 Core*/
+-			 <0x00 0x0e000000 0x00 0x0e000000 0x00 0x00800000>, /* PCIe2 Core*/
+-			 <0x00 0x0e800000 0x00 0x0e800000 0x00 0x00800000>, /* PCIe3 Core*/
+-			 <0x00 0x10000000 0x00 0x10000000 0x00 0x08000000>, /* PCIe0 DAT0 */
+-			 <0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */
+-			 <0x00 0x64800000 0x00 0x64800000 0x00 0x0070c000>, /* C71_1 */
+-			 <0x00 0x65800000 0x00 0x65800000 0x00 0x0070c000>, /* C71_2 */
+-			 <0x00 0x66800000 0x00 0x66800000 0x00 0x0070c000>, /* C71_3 */
+-			 <0x00 0x67800000 0x00 0x67800000 0x00 0x0070c000>, /* C71_4 */
+-			 <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
+-			 <0x00 0x70000000 0x00 0x70000000 0x00 0x00400000>, /* MSMC RAM */
+-			 <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */
+-			 <0x40 0x00000000 0x40 0x00000000 0x01 0x00000000>, /* PCIe0 DAT1 */
+-			 <0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */
+-			 <0x42 0x00000000 0x42 0x00000000 0x01 0x00000000>, /* PCIe2 DAT1 */
+-			 <0x43 0x00000000 0x43 0x00000000 0x01 0x00000000>, /* PCIe3 DAT1 */
+-			 <0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT0 */
+-			 <0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT0 */
+-			 <0x4e 0x20000000 0x4e 0x20000000 0x00 0x00080000>, /* GPU */
+-
+-			 /* MCUSS_WKUP Range */
+-			 <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>,
+-			 <0x00 0x40200000 0x00 0x40200000 0x00 0x00998400>,
+-			 <0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>,
+-			 <0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>,
+-			 <0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>,
+-			 <0x00 0x41c00000 0x00 0x41c00000 0x00 0x00100000>,
+-			 <0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>,
+-			 <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>,
+-			 <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
+-			 <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
+-			 <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
+-			 <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
+-
+-		cbass_mcu_wakeup: bus@28380000 {
+-			bootph-all;
+-			compatible = "simple-bus";
+-			#address-cells = <2>;
+-			#size-cells = <2>;
+-			ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>, /* MCU NAVSS*/
+-				 <0x00 0x40200000 0x00 0x40200000 0x00 0x00998400>, /* First peripheral window */
+-				 <0x00 0x40f00000 0x00 0x40f00000 0x00 0x00020000>, /* CTRL_MMR0 */
+-				 <0x00 0x41000000 0x00 0x41000000 0x00 0x00020000>, /* MCU R5F Core0 */
+-				 <0x00 0x41400000 0x00 0x41400000 0x00 0x00020000>, /* MCU R5F Core1 */
+-				 <0x00 0x41c00000 0x00 0x41c00000 0x00 0x00100000>, /* MCU SRAM */
+-				 <0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>, /* WKUP peripheral window */
+-				 <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
+-				 <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
+-				 <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */
+-				 <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
+-				 <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
+-		};
+-	};
+-
+-	thermal_zones: thermal-zones {
+-		#include "k3-j784s4-thermal.dtsi"
+-	};
+ };
+ 
+-/* Now include peripherals from each bus segment */
+ #include "k3-j784s4-main.dtsi"
+-#include "k3-j784s4-mcu-wakeup.dtsi"
+diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
+index e7d9bd8e4709b6..20c7b828e2fbca 100644
+--- a/arch/arm64/crypto/Kconfig
++++ b/arch/arm64/crypto/Kconfig
+@@ -26,10 +26,11 @@ config CRYPTO_NHPOLY1305_NEON
+ 	  - NEON (Advanced SIMD) extensions
+ 
+ config CRYPTO_POLY1305_NEON
+-	tristate "Hash functions: Poly1305 (NEON)"
++	tristate
+ 	depends on KERNEL_MODE_NEON
+ 	select CRYPTO_HASH
+ 	select CRYPTO_ARCH_HAVE_LIB_POLY1305
++	default CRYPTO_LIB_POLY1305_INTERNAL
+ 	help
+ 	  Poly1305 authenticator algorithm (RFC7539)
+ 
+@@ -186,11 +187,12 @@ config CRYPTO_AES_ARM64_NEON_BLK
+ 	  - NEON (Advanced SIMD) extensions
+ 
+ config CRYPTO_CHACHA20_NEON
+-	tristate "Ciphers: ChaCha (NEON)"
++	tristate
+ 	depends on KERNEL_MODE_NEON
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_LIB_CHACHA_GENERIC
+ 	select CRYPTO_ARCH_HAVE_LIB_CHACHA
++	default CRYPTO_LIB_CHACHA_INTERNAL
+ 	help
+ 	  Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ 	  stream cipher algorithms
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index fe9f895138dba5..a7a1f15bcc6724 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -68,6 +68,7 @@ config LOONGARCH
+ 	select ARCH_SUPPORTS_NUMA_BALANCING
+ 	select ARCH_USE_BUILTIN_BSWAP
+ 	select ARCH_USE_CMPXCHG_LOCKREF
++	select ARCH_USE_MEMTEST
+ 	select ARCH_USE_QUEUED_RWLOCKS
+ 	select ARCH_USE_QUEUED_SPINLOCKS
+ 	select ARCH_WANT_DEFAULT_BPF_JIT
+diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
+index 3177674228f896..45514f314664d8 100644
+--- a/arch/loongarch/include/asm/fpu.h
++++ b/arch/loongarch/include/asm/fpu.h
+@@ -22,22 +22,29 @@
+ struct sigcontext;
+ 
+ #define kernel_fpu_available() cpu_has_fpu
+-extern void kernel_fpu_begin(void);
+-extern void kernel_fpu_end(void);
+-
+-extern void _init_fpu(unsigned int);
+-extern void _save_fp(struct loongarch_fpu *);
+-extern void _restore_fp(struct loongarch_fpu *);
+-
+-extern void _save_lsx(struct loongarch_fpu *fpu);
+-extern void _restore_lsx(struct loongarch_fpu *fpu);
+-extern void _init_lsx_upper(void);
+-extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
+-
+-extern void _save_lasx(struct loongarch_fpu *fpu);
+-extern void _restore_lasx(struct loongarch_fpu *fpu);
+-extern void _init_lasx_upper(void);
+-extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
++
++void kernel_fpu_begin(void);
++void kernel_fpu_end(void);
++
++asmlinkage void _init_fpu(unsigned int);
++asmlinkage void _save_fp(struct loongarch_fpu *);
++asmlinkage void _restore_fp(struct loongarch_fpu *);
++asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
++asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
++
++asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
++asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
++asmlinkage void _init_lsx_upper(void);
++asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
++asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
++asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
++
++asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
++asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
++asmlinkage void _init_lasx_upper(void);
++asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
++asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
++asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+ 
+ static inline void enable_lsx(void);
+ static inline void disable_lsx(void);
+diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h
+index e671978bf5523f..38566574e56214 100644
+--- a/arch/loongarch/include/asm/lbt.h
++++ b/arch/loongarch/include/asm/lbt.h
+@@ -12,9 +12,13 @@
+ #include <asm/loongarch.h>
+ #include <asm/processor.h>
+ 
+-extern void _init_lbt(void);
+-extern void _save_lbt(struct loongarch_lbt *);
+-extern void _restore_lbt(struct loongarch_lbt *);
++asmlinkage void _init_lbt(void);
++asmlinkage void _save_lbt(struct loongarch_lbt *);
++asmlinkage void _restore_lbt(struct loongarch_lbt *);
++asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
++asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
++asmlinkage int _save_ftop_context(void __user *ftop);
++asmlinkage int _restore_ftop_context(void __user *ftop);
+ 
+ static inline int is_lbt_enabled(void)
+ {
+diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
+index f3ddaed9ef7f08..a5b63c84f8541a 100644
+--- a/arch/loongarch/include/asm/ptrace.h
++++ b/arch/loongarch/include/asm/ptrace.h
+@@ -33,9 +33,9 @@ struct pt_regs {
+ 	unsigned long __last[];
+ } __aligned(8);
+ 
+-static inline int regs_irqs_disabled(struct pt_regs *regs)
++static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
+ {
+-	return arch_irqs_disabled_flags(regs->csr_prmd);
++	return !(regs->csr_prmd & CSR_PRMD_PIE);
+ }
+ 
+ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
+index 6ab640101457cc..28caf416ae36e6 100644
+--- a/arch/loongarch/kernel/fpu.S
++++ b/arch/loongarch/kernel/fpu.S
+@@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
+ 	li.w		a0, 0				# success
+ 	jr		ra
+ SYM_FUNC_END(_save_fp_context)
++EXPORT_SYMBOL_GPL(_save_fp_context)
+ 
+ /*
+  * a0: fpregs
+@@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
+ 	li.w		a0, 0				# success
+ 	jr		ra
+ SYM_FUNC_END(_restore_fp_context)
++EXPORT_SYMBOL_GPL(_restore_fp_context)
+ 
+ /*
+  * a0: fpregs
+@@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
+ 	li.w	a0, 0					# success
+ 	jr	ra
+ SYM_FUNC_END(_save_lsx_context)
++EXPORT_SYMBOL_GPL(_save_lsx_context)
+ 
+ /*
+  * a0: fpregs
+@@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
+ 	li.w	a0, 0					# success
+ 	jr	ra
+ SYM_FUNC_END(_restore_lsx_context)
++EXPORT_SYMBOL_GPL(_restore_lsx_context)
+ 
+ /*
+  * a0: fpregs
+@@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
+ 	li.w	a0, 0					# success
+ 	jr	ra
+ SYM_FUNC_END(_save_lasx_context)
++EXPORT_SYMBOL_GPL(_save_lasx_context)
+ 
+ /*
+  * a0: fpregs
+@@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
+ 	li.w	a0, 0					# success
+ 	jr	ra
+ SYM_FUNC_END(_restore_lasx_context)
++EXPORT_SYMBOL_GPL(_restore_lasx_context)
+ 
+ .L_fpu_fault:
+ 	li.w	a0, -EFAULT				# failure
+diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
+index 001f061d226ab5..71678912d24ce2 100644
+--- a/arch/loongarch/kernel/lbt.S
++++ b/arch/loongarch/kernel/lbt.S
+@@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
+ 	li.w		a0, 0			# success
+ 	jr		ra
+ SYM_FUNC_END(_save_lbt_context)
++EXPORT_SYMBOL_GPL(_save_lbt_context)
+ 
+ /*
+  * a0: scr
+@@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
+ 	li.w		a0, 0			# success
+ 	jr		ra
+ SYM_FUNC_END(_restore_lbt_context)
++EXPORT_SYMBOL_GPL(_restore_lbt_context)
+ 
+ /*
+  * a0: ftop
+@@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
+ 	li.w		a0, 0			# success
+ 	jr		ra
+ SYM_FUNC_END(_save_ftop_context)
++EXPORT_SYMBOL_GPL(_save_ftop_context)
+ 
+ /*
+  * a0: ftop
+@@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
+ 	li.w		a0, 0			# success
+ 	jr		ra
+ SYM_FUNC_END(_restore_ftop_context)
++EXPORT_SYMBOL_GPL(_restore_ftop_context)
+ 
+ .L_lbt_fault:
+ 	li.w		a0, -EFAULT		# failure
+diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
+index 7a555b60017193..4740cb5b238898 100644
+--- a/arch/loongarch/kernel/signal.c
++++ b/arch/loongarch/kernel/signal.c
+@@ -51,27 +51,6 @@
+ #define lock_lbt_owner()	({ preempt_disable(); pagefault_disable(); })
+ #define unlock_lbt_owner()	({ pagefault_enable(); preempt_enable(); })
+ 
+-/* Assembly functions to move context to/from the FPU */
+-extern asmlinkage int
+-_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+-extern asmlinkage int
+-_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+-extern asmlinkage int
+-_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+-extern asmlinkage int
+-_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+-extern asmlinkage int
+-_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+-extern asmlinkage int
+-_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+-
+-#ifdef CONFIG_CPU_HAS_LBT
+-extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
+-extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
+-extern asmlinkage int _save_ftop_context(void __user *ftop);
+-extern asmlinkage int _restore_ftop_context(void __user *ftop);
+-#endif
+-
+ struct rt_sigframe {
+ 	struct siginfo rs_info;
+ 	struct ucontext rs_uctx;
+diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
+index c57b4134f3e84b..00424b7e34c155 100644
+--- a/arch/loongarch/kernel/traps.c
++++ b/arch/loongarch/kernel/traps.c
+@@ -553,9 +553,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ 	die_if_kernel("Kernel ale access", regs);
+ 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+ #else
++	bool pie = regs_irqs_disabled(regs);
+ 	unsigned int *pc;
+ 
+-	if (regs->csr_prmd & CSR_PRMD_PIE)
++	if (!pie)
+ 		local_irq_enable();
+ 
+ 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
+@@ -582,7 +583,7 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ 	die_if_kernel("Kernel ale access", regs);
+ 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+ out:
+-	if (regs->csr_prmd & CSR_PRMD_PIE)
++	if (!pie)
+ 		local_irq_disable();
+ #endif
+ 	irqentry_exit(regs, state);
+@@ -614,12 +615,13 @@ static void bug_handler(struct pt_regs *regs)
+ asmlinkage void noinstr do_bce(struct pt_regs *regs)
+ {
+ 	bool user = user_mode(regs);
++	bool pie = regs_irqs_disabled(regs);
+ 	unsigned long era = exception_era(regs);
+ 	u64 badv = 0, lower = 0, upper = ULONG_MAX;
+ 	union loongarch_instruction insn;
+ 	irqentry_state_t state = irqentry_enter(regs);
+ 
+-	if (regs->csr_prmd & CSR_PRMD_PIE)
++	if (!pie)
+ 		local_irq_enable();
+ 
+ 	current->thread.trap_nr = read_csr_excode();
+@@ -685,7 +687,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
+ 	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
+ 
+ out:
+-	if (regs->csr_prmd & CSR_PRMD_PIE)
++	if (!pie)
+ 		local_irq_disable();
+ 
+ 	irqentry_exit(regs, state);
+@@ -703,11 +705,12 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
+ asmlinkage void noinstr do_bp(struct pt_regs *regs)
+ {
+ 	bool user = user_mode(regs);
++	bool pie = regs_irqs_disabled(regs);
+ 	unsigned int opcode, bcode;
+ 	unsigned long era = exception_era(regs);
+ 	irqentry_state_t state = irqentry_enter(regs);
+ 
+-	if (regs->csr_prmd & CSR_PRMD_PIE)
++	if (!pie)
+ 		local_irq_enable();
+ 
+ 	if (__get_inst(&opcode, (u32 *)era, user))
+@@ -773,7 +776,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
+ 	}
+ 
+ out:
+-	if (regs->csr_prmd & CSR_PRMD_PIE)
++	if (!pie)
+ 		local_irq_disable();
+ 
+ 	irqentry_exit(regs, state);
+@@ -1008,6 +1011,7 @@ static void init_restore_lbt(void)
+ 
+ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+ {
++	bool pie = regs_irqs_disabled(regs);
+ 	irqentry_state_t state = irqentry_enter(regs);
+ 
+ 	/*
+@@ -1017,7 +1021,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+ 	 * (including the user using 'MOVGR2GCSR' to turn on TM, which
+ 	 * will not trigger the BTE), we need to check PRMD first.
+ 	 */
+-	if (regs->csr_prmd & CSR_PRMD_PIE)
++	if (!pie)
+ 		local_irq_enable();
+ 
+ 	if (!cpu_has_lbt) {
+@@ -1031,7 +1035,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+ 	preempt_enable();
+ 
+ out:
+-	if (regs->csr_prmd & CSR_PRMD_PIE)
++	if (!pie)
+ 		local_irq_disable();
+ 
+ 	irqentry_exit(regs, state);
+diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
+index e7a084de64f7bf..4b0ae29b8acab8 100644
+--- a/arch/loongarch/kvm/vcpu.c
++++ b/arch/loongarch/kvm/vcpu.c
+@@ -294,6 +294,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
+ 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
+ 
+ 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
++			kvm_lose_pmu(vcpu);
+ 			/* make sure the vcpu mode has been written */
+ 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
+ 			local_irq_enable();
+@@ -874,6 +875,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
+ 			vcpu->arch.st.guest_addr = 0;
+ 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
+ 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
++
++			/*
++			 * When vCPU reset, clear the ESTAT and GINTC registers
++			 * Other CSR registers are cleared with function _kvm_setcsr().
++			 */
++			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
++			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
+ 			break;
+ 		default:
+ 			ret = -EINVAL;
+diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
+index e4068906143b33..cea84d7f2b91a1 100644
+--- a/arch/loongarch/mm/hugetlbpage.c
++++ b/arch/loongarch/mm/hugetlbpage.c
+@@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ 				pmd = pmd_offset(pud, addr);
+ 		}
+ 	}
+-	return (pte_t *) pmd;
++	return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
+ }
+ 
+ uint64_t pmd_to_entrylo(unsigned long pmd_val)
+diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
+index 188b52bbb25427..61497f9c3fef70 100644
+--- a/arch/loongarch/mm/init.c
++++ b/arch/loongarch/mm/init.c
+@@ -65,9 +65,6 @@ void __init paging_init(void)
+ {
+ 	unsigned long max_zone_pfns[MAX_NR_ZONES];
+ 
+-#ifdef CONFIG_ZONE_DMA
+-	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+-#endif
+ #ifdef CONFIG_ZONE_DMA32
+ 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+ #endif
+diff --git a/arch/mips/crypto/Kconfig b/arch/mips/crypto/Kconfig
+index 9003a5c1e879fa..ee9604fd203729 100644
+--- a/arch/mips/crypto/Kconfig
++++ b/arch/mips/crypto/Kconfig
+@@ -12,9 +12,11 @@ config CRYPTO_CRC32_MIPS
+ 	  Architecture: mips
+ 
+ config CRYPTO_POLY1305_MIPS
+-	tristate "Hash functions: Poly1305"
++	tristate
+ 	depends on MIPS
++	select CRYPTO_HASH
+ 	select CRYPTO_ARCH_HAVE_LIB_POLY1305
++	default CRYPTO_LIB_POLY1305_INTERNAL
+ 	help
+ 	  Poly1305 authenticator algorithm (RFC7539)
+ 
+@@ -61,10 +63,11 @@ config CRYPTO_SHA512_OCTEON
+ 	  Architecture: mips OCTEON using crypto instructions, when available
+ 
+ config CRYPTO_CHACHA_MIPS
+-	tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (MIPS32r2)"
++	tristate
+ 	depends on CPU_MIPS32_R2
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_ARCH_HAVE_LIB_CHACHA
++	default CRYPTO_LIB_CHACHA_INTERNAL
+ 	help
+ 	  Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ 	  stream cipher algorithms
+diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
+index 1e782275850a36..9fb50827090acc 100644
+--- a/arch/mips/include/asm/mips-cm.h
++++ b/arch/mips/include/asm/mips-cm.h
+@@ -59,6 +59,16 @@ extern phys_addr_t mips_cm_l2sync_phys_base(void);
+  */
+ extern int mips_cm_is64;
+ 
++/*
++ * mips_cm_is_l2_hci_broken  - determine if HCI is broken
++ *
++ * Some CM reports show that Hardware Cache Initialization is
++ * complete, but in reality it's not the case. They also incorrectly
++ * indicate that Hardware Cache Initialization is supported. This
++ * flags allows warning about this broken feature.
++ */
++extern bool mips_cm_is_l2_hci_broken;
++
+ /**
+  * mips_cm_error_report - Report CM cache errors
+  */
+@@ -97,6 +107,18 @@ static inline bool mips_cm_present(void)
+ #endif
+ }
+ 
++/**
++ * mips_cm_update_property - update property from the device tree
++ *
++ * Retrieve the properties from the device tree if a CM node exist and
++ * update the internal variable based on this.
++ */
++#ifdef CONFIG_MIPS_CM
++extern void mips_cm_update_property(void);
++#else
++static inline void mips_cm_update_property(void) {}
++#endif
++
+ /**
+  * mips_cm_has_l2sync - determine whether an L2-only sync region is present
+  *
+diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
+index 3eb2cfb893e19c..9cfabaa94d010f 100644
+--- a/arch/mips/kernel/mips-cm.c
++++ b/arch/mips/kernel/mips-cm.c
+@@ -5,6 +5,7 @@
+  */
+ 
+ #include <linux/errno.h>
++#include <linux/of.h>
+ #include <linux/percpu.h>
+ #include <linux/spinlock.h>
+ 
+@@ -14,6 +15,7 @@
+ void __iomem *mips_gcr_base;
+ void __iomem *mips_cm_l2sync_base;
+ int mips_cm_is64;
++bool mips_cm_is_l2_hci_broken;
+ 
+ static char *cm2_tr[8] = {
+ 	"mem",	"gcr",	"gic",	"mmio",
+@@ -237,6 +239,18 @@ static void mips_cm_probe_l2sync(void)
+ 	mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
+ }
+ 
++void mips_cm_update_property(void)
++{
++	struct device_node *cm_node;
++
++	cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm");
++	if (!cm_node)
++		return;
++	pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken");
++	mips_cm_is_l2_hci_broken = true;
++	of_node_put(cm_node);
++}
++
+ int mips_cm_probe(void)
+ {
+ 	phys_addr_t addr;
+diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
+index 0f9b3b5914cf69..b70b67adb855f6 100644
+--- a/arch/parisc/kernel/pdt.c
++++ b/arch/parisc/kernel/pdt.c
+@@ -63,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
+ #define PDT_ADDR_PERM_ERR	(pdt_type != PDT_PDC ? 2UL : 0UL)
+ #define PDT_ADDR_SINGLE_ERR	1UL
+ 
++#ifdef CONFIG_PROC_FS
+ /* report PDT entries via /proc/meminfo */
+ void arch_report_meminfo(struct seq_file *m)
+ {
+@@ -74,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m)
+ 	seq_printf(m, "PDT_cur_entries: %7lu\n",
+ 			pdt_status.pdt_entries);
+ }
++#endif
+ 
+ static int get_info_pat_new(void)
+ {
+diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
+index 46a4c85e85e245..7012fa55aceb91 100644
+--- a/arch/powerpc/crypto/Kconfig
++++ b/arch/powerpc/crypto/Kconfig
+@@ -3,10 +3,12 @@
+ menu "Accelerated Cryptographic Algorithms for CPU (powerpc)"
+ 
+ config CRYPTO_CURVE25519_PPC64
+-	tristate "Public key crypto: Curve25519 (PowerPC64)"
++	tristate
+ 	depends on PPC64 && CPU_LITTLE_ENDIAN
++	select CRYPTO_KPP
+ 	select CRYPTO_LIB_CURVE25519_GENERIC
+ 	select CRYPTO_ARCH_HAVE_LIB_CURVE25519
++	default CRYPTO_LIB_CURVE25519_INTERNAL
+ 	help
+ 	  Curve25519 algorithm
+ 
+@@ -124,11 +126,12 @@ config CRYPTO_AES_GCM_P10
+ 	  later CPU. This module supports stitched acceleration for AES/GCM.
+ 
+ config CRYPTO_CHACHA20_P10
+-	tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)"
++	tristate
+ 	depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_LIB_CHACHA_GENERIC
+ 	select CRYPTO_ARCH_HAVE_LIB_CHACHA
++	default CRYPTO_LIB_CHACHA_INTERNAL
+ 	help
+ 	  Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ 	  stream cipher algorithms
+diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
+index ad58dad9a58076..c67095a3d66907 100644
+--- a/arch/riscv/crypto/Kconfig
++++ b/arch/riscv/crypto/Kconfig
+@@ -22,7 +22,6 @@ config CRYPTO_CHACHA_RISCV64
+ 	tristate "Ciphers: ChaCha"
+ 	depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+ 	select CRYPTO_SKCIPHER
+-	select CRYPTO_LIB_CHACHA_GENERIC
+ 	help
+ 	  Length-preserving ciphers: ChaCha20 stream cipher algorithm
+ 
+diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
+index 721ec275ce57e3..231d777d936c2d 100644
+--- a/arch/riscv/include/asm/alternative-macros.h
++++ b/arch/riscv/include/asm/alternative-macros.h
+@@ -115,24 +115,19 @@
+ 	\old_c
+ .endm
+ 
+-#define _ALTERNATIVE_CFG(old_c, ...)	\
+-	ALTERNATIVE_CFG old_c
+-
+-#define _ALTERNATIVE_CFG_2(old_c, ...)	\
+-	ALTERNATIVE_CFG old_c
++#define __ALTERNATIVE_CFG(old_c, ...)		ALTERNATIVE_CFG old_c
++#define __ALTERNATIVE_CFG_2(old_c, ...)		ALTERNATIVE_CFG old_c
+ 
+ #else /* !__ASSEMBLY__ */
+ 
+-#define __ALTERNATIVE_CFG(old_c)	\
+-	old_c "\n"
++#define __ALTERNATIVE_CFG(old_c, ...)		old_c "\n"
++#define __ALTERNATIVE_CFG_2(old_c, ...)		old_c "\n"
+ 
+-#define _ALTERNATIVE_CFG(old_c, ...)	\
+-	__ALTERNATIVE_CFG(old_c)
++#endif /* __ASSEMBLY__ */
+ 
+-#define _ALTERNATIVE_CFG_2(old_c, ...)	\
+-	__ALTERNATIVE_CFG(old_c)
++#define _ALTERNATIVE_CFG(old_c, ...)		__ALTERNATIVE_CFG(old_c)
++#define _ALTERNATIVE_CFG_2(old_c, ...)		__ALTERNATIVE_CFG_2(old_c)
+ 
+-#endif /* __ASSEMBLY__ */
+ #endif /* CONFIG_RISCV_ALTERNATIVE */
+ 
+ /*
+diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
+index 8de73f91bfa371..b59ffeb668d6a5 100644
+--- a/arch/riscv/include/asm/cacheflush.h
++++ b/arch/riscv/include/asm/cacheflush.h
+@@ -34,11 +34,6 @@ static inline void flush_dcache_page(struct page *page)
+ 	flush_dcache_folio(page_folio(page));
+ }
+ 
+-/*
+- * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+- * so instead we just flush the whole thing.
+- */
+-#define flush_icache_range(start, end) flush_icache_all()
+ #define flush_icache_user_page(vma, pg, addr, len)	\
+ do {							\
+ 	if (vma->vm_flags & VM_EXEC)			\
+@@ -78,6 +73,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
+ 
+ #endif /* CONFIG_SMP */
+ 
++/*
++ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
++ * so instead we just flush the whole thing.
++ */
++#define flush_icache_range flush_icache_range
++static inline void flush_icache_range(unsigned long start, unsigned long end)
++{
++	flush_icache_all();
++}
++
+ extern unsigned int riscv_cbom_block_size;
+ extern unsigned int riscv_cboz_block_size;
+ void riscv_init_cbo_blocksizes(void);
+diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
+index 4b3dc8beaf77d3..cc15f7ca6cc17b 100644
+--- a/arch/riscv/kernel/probes/uprobes.c
++++ b/arch/riscv/kernel/probes/uprobes.c
+@@ -167,6 +167,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ 	/* Initialize the slot */
+ 	void *kaddr = kmap_atomic(page);
+ 	void *dst = kaddr + (vaddr & ~PAGE_MASK);
++	unsigned long start = (unsigned long)dst;
+ 
+ 	memcpy(dst, src, len);
+ 
+@@ -176,13 +177,6 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ 		*(uprobe_opcode_t *)dst = __BUG_INSN_32;
+ 	}
+ 
++	flush_icache_range(start, start + len);
+ 	kunmap_atomic(kaddr);
+-
+-	/*
+-	 * We probably need flush_icache_user_page() but it needs vma.
+-	 * This should work on most of architectures by default. If
+-	 * architecture needs to do something different it can define
+-	 * its own version of the function.
+-	 */
+-	flush_dcache_page(page);
+ }
+diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
+index d3eb3a23369321..16ced220393579 100644
+--- a/arch/s390/crypto/Kconfig
++++ b/arch/s390/crypto/Kconfig
+@@ -120,11 +120,12 @@ config CRYPTO_DES_S390
+ 	  As of z196 the CTR mode is hardware accelerated.
+ 
+ config CRYPTO_CHACHA_S390
+-	tristate "Ciphers: ChaCha20"
++	tristate
+ 	depends on S390
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_LIB_CHACHA_GENERIC
+ 	select CRYPTO_ARCH_HAVE_LIB_CHACHA
++	default CRYPTO_LIB_CHACHA_INTERNAL
+ 	help
+ 	  Length-preserving cipher: ChaCha20 stream cipher (RFC 7539)
+ 
+diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
+index b16352083ff987..f0be263b334ced 100644
+--- a/arch/s390/kvm/intercept.c
++++ b/arch/s390/kvm/intercept.c
+@@ -94,7 +94,7 @@ static int handle_validity(struct kvm_vcpu *vcpu)
+ 
+ 	vcpu->stat.exit_validity++;
+ 	trace_kvm_s390_intercept_validity(vcpu, viwhy);
+-	KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
++	KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%p)", viwhy,
+ 		  current->pid, vcpu->kvm);
+ 
+ 	/* do not warn on invalid runtime instrumentation mode */
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 4f0e7f61edf788..bc65fa6dc15550 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -3161,7 +3161,7 @@ void kvm_s390_gisa_clear(struct kvm *kvm)
+ 	if (!gi->origin)
+ 		return;
+ 	gisa_clear_ipm(gi->origin);
+-	VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
++	VM_EVENT(kvm, 3, "gisa 0x%p cleared", gi->origin);
+ }
+ 
+ void kvm_s390_gisa_init(struct kvm *kvm)
+@@ -3178,7 +3178,7 @@ void kvm_s390_gisa_init(struct kvm *kvm)
+ 	gi->timer.function = gisa_vcpu_kicker;
+ 	memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
+ 	gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
+-	VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
++	VM_EVENT(kvm, 3, "gisa 0x%p initialized", gi->origin);
+ }
+ 
+ void kvm_s390_gisa_enable(struct kvm *kvm)
+@@ -3219,7 +3219,7 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
+ 		process_gib_alert_list();
+ 	hrtimer_cancel(&gi->timer);
+ 	gi->origin = NULL;
+-	VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
++	VM_EVENT(kvm, 3, "gisa 0x%p destroyed", gisa);
+ }
+ 
+ void kvm_s390_gisa_disable(struct kvm *kvm)
+@@ -3468,7 +3468,7 @@ int __init kvm_s390_gib_init(u8 nisc)
+ 		}
+ 	}
+ 
+-	KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
++	KVM_EVENT(3, "gib 0x%p (nisc=%d) initialized", gib, gib->nisc);
+ 	goto out;
+ 
+ out_unreg_gal:
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index bb7134faaebff3..286a224c81ee42 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -998,7 +998,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
+ 		}
+ 		mutex_unlock(&kvm->lock);
+ 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
+-		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
++		VM_EVENT(kvm, 3, "New guest asce: 0x%p",
+ 			 (void *) kvm->arch.gmap->asce);
+ 		break;
+ 	}
+@@ -3421,7 +3421,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 		kvm_s390_gisa_init(kvm);
+ 	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
+ 	kvm->arch.pv.set_aside = NULL;
+-	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
++	KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
+ 
+ 	return 0;
+ out_err:
+@@ -3484,7 +3484,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+ 	kvm_s390_destroy_adapters(kvm);
+ 	kvm_s390_clear_float_irqs(kvm);
+ 	kvm_s390_vsie_destroy(kvm);
+-	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
++	KVM_EVENT(3, "vm 0x%p destroyed", kvm);
+ }
+ 
+ /* Section: vcpu related */
+@@ -3605,7 +3605,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
+ 
+ 	free_page((unsigned long)old_sca);
+ 
+-	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
++	VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)",
+ 		 old_sca, kvm->arch.sca);
+ 	return 0;
+ }
+@@ -3978,7 +3978,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ 			goto out_free_sie_block;
+ 	}
+ 
+-	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
++	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
+ 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
+ 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
+ 
+diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
+index 9ac92dbf680dbb..9e28f165c114ca 100644
+--- a/arch/s390/kvm/trace-s390.h
++++ b/arch/s390/kvm/trace-s390.h
+@@ -56,7 +56,7 @@ TRACE_EVENT(kvm_s390_create_vcpu,
+ 		    __entry->sie_block = sie_block;
+ 		    ),
+ 
+-	    TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK",
++	    TP_printk("create cpu %d at 0x%p, sie block at 0x%p",
+ 		      __entry->id, __entry->vcpu, __entry->sie_block)
+ 	);
+ 
+@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_s390_enable_css,
+ 		    __entry->kvm = kvm;
+ 		    ),
+ 
+-	    TP_printk("enabling channel I/O support (kvm @ %pK)\n",
++	    TP_printk("enabling channel I/O support (kvm @ %p)\n",
+ 		      __entry->kvm)
+ 	);
+ 
+diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h
+index b22226634ff609..138908b999d76c 100644
+--- a/arch/um/include/linux/time-internal.h
++++ b/arch/um/include/linux/time-internal.h
+@@ -83,6 +83,8 @@ extern void time_travel_not_configured(void);
+ #define time_travel_del_event(...) time_travel_not_configured()
+ #endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
+ 
++extern unsigned long tt_extra_sched_jiffies;
++
+ /*
+  * Without CONFIG_UML_TIME_TRAVEL_SUPPORT this is a linker error if used,
+  * which is intentional since we really shouldn't link it in that case.
+diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
+index b09e85279d2b8c..a5beaea2967ec3 100644
+--- a/arch/um/kernel/skas/syscall.c
++++ b/arch/um/kernel/skas/syscall.c
+@@ -31,6 +31,17 @@ void handle_syscall(struct uml_pt_regs *r)
+ 		goto out;
+ 
+ 	syscall = UPT_SYSCALL_NR(r);
++
++	/*
++	 * If no time passes, then sched_yield may not actually yield, causing
++	 * broken spinlock implementations in userspace (ASAN) to hang for long
++	 * periods of time.
++	 */
++	if ((time_travel_mode == TT_MODE_INFCPU ||
++	     time_travel_mode == TT_MODE_EXTERNAL) &&
++	    syscall == __NR_sched_yield)
++		tt_extra_sched_jiffies += 1;
++
+ 	if (syscall >= 0 && syscall < __NR_syscalls) {
+ 		unsigned long ret = EXECUTE_SYSCALL(syscall, regs);
+ 
+diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig
+index 7b1bebed879df3..46b53ab061657a 100644
+--- a/arch/x86/crypto/Kconfig
++++ b/arch/x86/crypto/Kconfig
+@@ -3,10 +3,12 @@
+ menu "Accelerated Cryptographic Algorithms for CPU (x86)"
+ 
+ config CRYPTO_CURVE25519_X86
+-	tristate "Public key crypto: Curve25519 (ADX)"
++	tristate
+ 	depends on X86 && 64BIT
++	select CRYPTO_KPP
+ 	select CRYPTO_LIB_CURVE25519_GENERIC
+ 	select CRYPTO_ARCH_HAVE_LIB_CURVE25519
++	default CRYPTO_LIB_CURVE25519_INTERNAL
+ 	help
+ 	  Curve25519 algorithm
+ 
+@@ -348,11 +350,12 @@ config CRYPTO_ARIA_GFNI_AVX512_X86_64
+ 	  Processes 64 blocks in parallel.
+ 
+ config CRYPTO_CHACHA20_X86_64
+-	tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (SSSE3/AVX2/AVX-512VL)"
++	tristate
+ 	depends on X86 && 64BIT
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_LIB_CHACHA_GENERIC
+ 	select CRYPTO_ARCH_HAVE_LIB_CHACHA
++	default CRYPTO_LIB_CHACHA_INTERNAL
+ 	help
+ 	  Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ 	  stream cipher algorithms
+@@ -417,10 +420,12 @@ config CRYPTO_POLYVAL_CLMUL_NI
+ 	  - CLMUL-NI (carry-less multiplication new instructions)
+ 
+ config CRYPTO_POLY1305_X86_64
+-	tristate "Hash functions: Poly1305 (SSE2/AVX2)"
++	tristate
+ 	depends on X86 && 64BIT
++	select CRYPTO_HASH
+ 	select CRYPTO_LIB_POLY1305_GENERIC
+ 	select CRYPTO_ARCH_HAVE_LIB_POLY1305
++	default CRYPTO_LIB_POLY1305_INTERNAL
+ 	help
+ 	  Poly1305 authenticator algorithm (RFC7539)
+ 
+diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
+index b7ea3e8e9eccd5..58e3124ee2b420 100644
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -18,7 +18,7 @@
+ 
+ SYM_FUNC_START(entry_ibpb)
+ 	movl	$MSR_IA32_PRED_CMD, %ecx
+-	movl	$PRED_CMD_IBPB, %eax
++	movl	_ASM_RIP(x86_pred_cmd), %eax
+ 	xorl	%edx, %edx
+ 	wrmsr
+ 
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 0d33c85da45355..d737d53d03aa94 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
+ 	if (event->attr.type == event->pmu->type)
+ 		event->hw.config |= x86_pmu_get_event_config(event);
+ 
+-	if (!event->attr.freq && x86_pmu.limit_period) {
++	if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
+ 		s64 left = event->attr.sample_period;
+ 		x86_pmu.limit_period(event, &left);
+ 		if (left > event->attr.sample_period)
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 913fd3a7bac650..64fa42175a1577 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -449,6 +449,7 @@
+ #define X86_FEATURE_SME_COHERENT	(19*32+10) /* AMD hardware-enforced cache coherency */
+ #define X86_FEATURE_DEBUG_SWAP		(19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
+ #define X86_FEATURE_SVSM		(19*32+28) /* "svsm" SVSM present */
++#define X86_FEATURE_HV_INUSE_WR_ALLOWED	(19*32+30) /* Allow Write to in-use hypervisor-owned pages */
+ 
+ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
+ #define X86_FEATURE_NO_NESTED_DATA_BP	(20*32+ 0) /* No Nested Data Breakpoints */
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index 1a42f829667a37..62d8b9448dc5c5 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -115,6 +115,8 @@
+ #define INTEL_GRANITERAPIDS_X		IFM(6, 0xAD)
+ #define INTEL_GRANITERAPIDS_D		IFM(6, 0xAE)
+ 
++#define INTEL_BARTLETTLAKE		IFM(6, 0xD7) /* Raptor Cove */
++
+ /* "Hybrid" Processors (P-Core/E-Core) */
+ 
+ #define INTEL_LAKEFIELD			IFM(6, 0x8A) /* Sunny Cove / Tremont */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 5fba44a4f988c0..46bddb5bb15ffb 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1578,7 +1578,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
+ 	rrsba_disabled = true;
+ }
+ 
+-static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
++static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
+ {
+ 	/*
+ 	 * Similar to context switches, there are two types of RSB attacks
+@@ -1602,27 +1602,30 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
+ 	 */
+ 	switch (mode) {
+ 	case SPECTRE_V2_NONE:
+-		return;
++		break;
+ 
+-	case SPECTRE_V2_EIBRS_LFENCE:
+ 	case SPECTRE_V2_EIBRS:
++	case SPECTRE_V2_EIBRS_LFENCE:
++	case SPECTRE_V2_EIBRS_RETPOLINE:
+ 		if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
+-			setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
+ 			pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
++			setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
+ 		}
+-		return;
++		break;
+ 
+-	case SPECTRE_V2_EIBRS_RETPOLINE:
+ 	case SPECTRE_V2_RETPOLINE:
+ 	case SPECTRE_V2_LFENCE:
+ 	case SPECTRE_V2_IBRS:
++		pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
++		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ 		setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+-		pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
+-		return;
+-	}
++		break;
+ 
+-	pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
+-	dump_stack();
++	default:
++		pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
++		dump_stack();
++		break;
++	}
+ }
+ 
+ /*
+@@ -1854,10 +1857,7 @@ static void __init spectre_v2_select_mitigation(void)
+ 	 *
+ 	 * FIXME: Is this pointless for retbleed-affected AMD?
+ 	 */
+-	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+-	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
+-
+-	spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
++	spectre_v2_select_rsb_mitigation(mode);
+ 
+ 	/*
+ 	 * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
+diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
+index 80e262bb627fe1..cb9852ad609893 100644
+--- a/arch/x86/kernel/i8253.c
++++ b/arch/x86/kernel/i8253.c
+@@ -46,7 +46,8 @@ bool __init pit_timer_init(void)
+ 		 * VMMs otherwise steal CPU time just to pointlessly waggle
+ 		 * the (masked) IRQ.
+ 		 */
+-		clockevent_i8253_disable();
++		scoped_guard(irq)
++			clockevent_i8253_disable();
+ 		return false;
+ 	}
+ 	clockevent_i8253_init(true);
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index 4b74ea91f4e6bb..63dea8ecd7efc3 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -820,7 +820,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+ 	 * Allocating new amd_iommu_pi_data, which will get
+ 	 * add to the per-vcpu ir_list.
+ 	 */
+-	ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
++	ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
+ 	if (!ir) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -896,6 +896,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ {
+ 	struct kvm_kernel_irq_routing_entry *e;
+ 	struct kvm_irq_routing_table *irq_rt;
++	bool enable_remapped_mode = true;
+ 	int idx, ret = 0;
+ 
+ 	if (!kvm_arch_has_assigned_device(kvm) ||
+@@ -933,6 +934,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ 		    kvm_vcpu_apicv_active(&svm->vcpu)) {
+ 			struct amd_iommu_pi_data pi;
+ 
++			enable_remapped_mode = false;
++
+ 			/* Try to enable guest_mode in IRTE */
+ 			pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
+ 					    AVIC_HPA_MASK);
+@@ -951,33 +954,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ 			 */
+ 			if (!ret && pi.is_guest_mode)
+ 				svm_ir_list_add(svm, &pi);
+-		} else {
+-			/* Use legacy mode in IRTE */
+-			struct amd_iommu_pi_data pi;
+-
+-			/**
+-			 * Here, pi is used to:
+-			 * - Tell IOMMU to use legacy mode for this interrupt.
+-			 * - Retrieve ga_tag of prior interrupt remapping data.
+-			 */
+-			pi.prev_ga_tag = 0;
+-			pi.is_guest_mode = false;
+-			ret = irq_set_vcpu_affinity(host_irq, &pi);
+-
+-			/**
+-			 * Check if the posted interrupt was previously
+-			 * setup with the guest_mode by checking if the ga_tag
+-			 * was cached. If so, we need to clean up the per-vcpu
+-			 * ir_list.
+-			 */
+-			if (!ret && pi.prev_ga_tag) {
+-				int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
+-				struct kvm_vcpu *vcpu;
+-
+-				vcpu = kvm_get_vcpu_by_id(kvm, id);
+-				if (vcpu)
+-					svm_ir_list_del(to_svm(vcpu), &pi);
+-			}
+ 		}
+ 
+ 		if (!ret && svm) {
+@@ -993,6 +969,34 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ 	}
+ 
+ 	ret = 0;
++	if (enable_remapped_mode) {
++		/* Use legacy mode in IRTE */
++		struct amd_iommu_pi_data pi;
++
++		/**
++		 * Here, pi is used to:
++		 * - Tell IOMMU to use legacy mode for this interrupt.
++		 * - Retrieve ga_tag of prior interrupt remapping data.
++		 */
++		pi.prev_ga_tag = 0;
++		pi.is_guest_mode = false;
++		ret = irq_set_vcpu_affinity(host_irq, &pi);
++
++		/**
++		 * Check if the posted interrupt was previously
++		 * setup with the guest_mode by checking if the ga_tag
++		 * was cached. If so, we need to clean up the per-vcpu
++		 * ir_list.
++		 */
++		if (!ret && pi.prev_ga_tag) {
++			int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
++			struct kvm_vcpu *vcpu;
++
++			vcpu = kvm_get_vcpu_by_id(kvm, id);
++			if (vcpu)
++				svm_ir_list_del(to_svm(vcpu), &pi);
++		}
++	}
+ out:
+ 	srcu_read_unlock(&kvm->irq_srcu, idx);
+ 	return ret;
+@@ -1199,6 +1203,12 @@ bool avic_hardware_setup(void)
+ 		return false;
+ 	}
+ 
++	if (cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
++	    !boot_cpu_has(X86_FEATURE_HV_INUSE_WR_ALLOWED)) {
++		pr_warn("AVIC disabled: missing HvInUseWrAllowed on SNP-enabled system\n");
++		return false;
++	}
++
+ 	if (boot_cpu_has(X86_FEATURE_AVIC)) {
+ 		pr_info("AVIC enabled\n");
+ 	} else if (force_avic) {
+diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
+index ec08fa3caf43ce..6b803324a981a2 100644
+--- a/arch/x86/kvm/vmx/posted_intr.c
++++ b/arch/x86/kvm/vmx/posted_intr.c
+@@ -274,6 +274,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ {
+ 	struct kvm_kernel_irq_routing_entry *e;
+ 	struct kvm_irq_routing_table *irq_rt;
++	bool enable_remapped_mode = true;
+ 	struct kvm_lapic_irq irq;
+ 	struct kvm_vcpu *vcpu;
+ 	struct vcpu_data vcpu_info;
+@@ -312,21 +313,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ 
+ 		kvm_set_msi_irq(kvm, e, &irq);
+ 		if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
+-		    !kvm_irq_is_postable(&irq)) {
+-			/*
+-			 * Make sure the IRTE is in remapped mode if
+-			 * we don't handle it in posted mode.
+-			 */
+-			ret = irq_set_vcpu_affinity(host_irq, NULL);
+-			if (ret < 0) {
+-				printk(KERN_INFO
+-				   "failed to back to remapped mode, irq: %u\n",
+-				   host_irq);
+-				goto out;
+-			}
+-
++		    !kvm_irq_is_postable(&irq))
+ 			continue;
+-		}
+ 
+ 		vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
+ 		vcpu_info.vector = irq.vector;
+@@ -334,11 +322,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ 		trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
+ 				vcpu_info.vector, vcpu_info.pi_desc_addr, set);
+ 
+-		if (set)
+-			ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
+-		else
+-			ret = irq_set_vcpu_affinity(host_irq, NULL);
++		if (!set)
++			continue;
+ 
++		enable_remapped_mode = false;
++
++		ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
+ 		if (ret < 0) {
+ 			printk(KERN_INFO "%s: failed to update PI IRTE\n",
+ 					__func__);
+@@ -346,6 +335,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ 		}
+ 	}
+ 
++	if (enable_remapped_mode)
++		ret = irq_set_vcpu_affinity(host_irq, NULL);
++
+ 	ret = 0;
+ out:
+ 	srcu_read_unlock(&kvm->irq_srcu, idx);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 1a4ca471d63df6..7a5367b14518f9 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -13555,15 +13555,22 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+ {
+ 	struct kvm_kernel_irqfd *irqfd =
+ 		container_of(cons, struct kvm_kernel_irqfd, consumer);
++	struct kvm *kvm = irqfd->kvm;
+ 	int ret;
+ 
+-	irqfd->producer = prod;
+ 	kvm_arch_start_assignment(irqfd->kvm);
++
++	spin_lock_irq(&kvm->irqfds.lock);
++	irqfd->producer = prod;
++
+ 	ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
+ 					   prod->irq, irqfd->gsi, 1);
+ 	if (ret)
+ 		kvm_arch_end_assignment(irqfd->kvm);
+ 
++	spin_unlock_irq(&kvm->irqfds.lock);
++
++
+ 	return ret;
+ }
+ 
+@@ -13573,9 +13580,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+ 	int ret;
+ 	struct kvm_kernel_irqfd *irqfd =
+ 		container_of(cons, struct kvm_kernel_irqfd, consumer);
++	struct kvm *kvm = irqfd->kvm;
+ 
+ 	WARN_ON(irqfd->producer != prod);
+-	irqfd->producer = NULL;
+ 
+ 	/*
+ 	 * When producer of consumer is unregistered, we change back to
+@@ -13583,12 +13590,18 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+ 	 * when the irq is masked/disabled or the consumer side (KVM
+ 	 * int this case doesn't want to receive the interrupts.
+ 	*/
++	spin_lock_irq(&kvm->irqfds.lock);
++	irqfd->producer = NULL;
++
+ 	ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
+ 					   prod->irq, irqfd->gsi, 0);
+ 	if (ret)
+ 		printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
+ 		       " fails: %d\n", irqfd->consumer.token, ret);
+ 
++	spin_unlock_irq(&kvm->irqfds.lock);
++
++
+ 	kvm_arch_end_assignment(irqfd->kvm);
+ }
+ 
+@@ -13601,7 +13614,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
+ bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
+ 				  struct kvm_kernel_irq_routing_entry *new)
+ {
+-	if (new->type != KVM_IRQ_ROUTING_MSI)
++	if (old->type != KVM_IRQ_ROUTING_MSI ||
++	    new->type != KVM_IRQ_ROUTING_MSI)
+ 		return true;
+ 
+ 	return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
+index caedb3ef6688fc..f5dd84eb55dcda 100644
+--- a/arch/x86/lib/x86-opcode-map.txt
++++ b/arch/x86/lib/x86-opcode-map.txt
+@@ -996,8 +996,8 @@ AVXcode: 4
+ 83: Grp1 Ev,Ib (1A),(es)
+ # CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
+ #			     CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
+-84: CTESTSCC (ev)
+-85: CTESTSCC (es) | CTESTSCC (66),(es)
++84: CTESTSCC Eb,Gb (ev)
++85: CTESTSCC Ev,Gv (es) | CTESTSCC Ev,Gv (66),(es)
+ 88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
+ 8f: POP2 Bq,Rq (000),(11B),(ev)
+ a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 00ffa74d0dd0bf..27d81cb049ff81 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -389,9 +389,9 @@ static void cond_mitigation(struct task_struct *next)
+ 	prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
+ 
+ 	/*
+-	 * Avoid user/user BTB poisoning by flushing the branch predictor
+-	 * when switching between processes. This stops one process from
+-	 * doing Spectre-v2 attacks on another.
++	 * Avoid user->user BTB/RSB poisoning by flushing them when switching
++	 * between processes. This stops one process from doing Spectre-v2
++	 * attacks on another.
+ 	 *
+ 	 * Both, the conditional and the always IBPB mode use the mm
+ 	 * pointer to avoid the IBPB when switching between tasks of the
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 0f2fe524f60dcd..b8755cde241993 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -436,7 +436,8 @@ static struct msi_domain_ops xen_pci_msi_domain_ops = {
+ };
+ 
+ static struct msi_domain_info xen_pci_msi_domain_info = {
+-	.flags			= MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS,
++	.flags			= MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS |
++				  MSI_FLAG_DEV_SYSFS | MSI_FLAG_NO_MASK,
+ 	.ops			= &xen_pci_msi_domain_ops,
+ };
+ 
+@@ -484,11 +485,6 @@ static __init void xen_setup_pci_msi(void)
+ 	 * in allocating the native domain and never use it.
+ 	 */
+ 	x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
+-	/*
+-	 * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
+-	 * controlled by the hypervisor.
+-	 */
+-	pci_msi_ignore_mask = 1;
+ }
+ 
+ #else /* CONFIG_PCI_MSI */
+diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
+index 0e3d930bcb89e8..9d25d9373945cb 100644
+--- a/arch/x86/xen/enlighten_pvh.c
++++ b/arch/x86/xen/enlighten_pvh.c
+@@ -1,5 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/acpi.h>
++#include <linux/cpufreq.h>
++#include <linux/cpuidle.h>
+ #include <linux/export.h>
+ #include <linux/mm.h>
+ 
+@@ -123,8 +125,23 @@ static void __init pvh_arch_setup(void)
+ {
+ 	pvh_reserve_extra_memory();
+ 
+-	if (xen_initial_domain())
++	if (xen_initial_domain()) {
+ 		xen_add_preferred_consoles();
++
++		/*
++		 * Disable usage of CPU idle and frequency drivers: when
++		 * running as hardware domain the exposed native ACPI tables
++		 * causes idle and/or frequency drivers to attach and
++		 * malfunction.  It's Xen the entity that controls the idle and
++		 * frequency states.
++		 *
++		 * For unprivileged domains the exposed ACPI tables are
++		 * fabricated and don't contain such data.
++		 */
++		disable_cpuidle();
++		disable_cpufreq();
++		WARN_ON(xen_set_default_idle());
++	}
+ }
+ 
+ void __init xen_pvh_init(struct boot_params *boot_params)
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index ceac64e796ea82..f575cc1705b3f4 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -864,12 +864,13 @@ static struct request *attempt_merge(struct request_queue *q,
+ 	if (rq_data_dir(req) != rq_data_dir(next))
+ 		return NULL;
+ 
+-	/* Don't merge requests with different write hints. */
+-	if (req->write_hint != next->write_hint)
+-		return NULL;
+-
+-	if (req->ioprio != next->ioprio)
+-		return NULL;
++	if (req->bio && next->bio) {
++		/* Don't merge requests with different write hints. */
++		if (req->bio->bi_write_hint != next->bio->bi_write_hint)
++			return NULL;
++		if (req->bio->bi_ioprio != next->bio->bi_ioprio)
++			return NULL;
++	}
+ 
+ 	if (!blk_atomic_write_mergeable_rqs(req, next))
+ 		return NULL;
+@@ -998,12 +999,13 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
+ 	if (!bio_crypt_rq_ctx_compatible(rq, bio))
+ 		return false;
+ 
+-	/* Don't merge requests with different write hints. */
+-	if (rq->write_hint != bio->bi_write_hint)
+-		return false;
+-
+-	if (rq->ioprio != bio_prio(bio))
+-		return false;
++	if (rq->bio) {
++		/* Don't merge requests with different write hints. */
++		if (rq->bio->bi_write_hint != bio->bi_write_hint)
++			return false;
++		if (rq->bio->bi_ioprio != bio->bi_ioprio)
++			return false;
++	}
+ 
+ 	if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
+ 		return false;
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index f26bee56269363..a7765e96cf40e5 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -870,7 +870,7 @@ static void blk_print_req_error(struct request *req, blk_status_t status)
+ 		blk_op_str(req_op(req)),
+ 		(__force u32)(req->cmd_flags & ~REQ_OP_MASK),
+ 		req->nr_phys_segments,
+-		IOPRIO_PRIO_CLASS(req->ioprio));
++		IOPRIO_PRIO_CLASS(req_get_ioprio(req)));
+ }
+ 
+ /*
+@@ -2654,7 +2654,6 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
+ 		rq->cmd_flags |= REQ_FAILFAST_MASK;
+ 
+ 	rq->__sector = bio->bi_iter.bi_sector;
+-	rq->write_hint = bio->bi_write_hint;
+ 	blk_rq_bio_prep(rq, bio, nr_segs);
+ 	if (bio_integrity(bio))
+ 		rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
+@@ -3307,8 +3306,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ 		rq->special_vec = rq_src->special_vec;
+ 	}
+ 	rq->nr_phys_segments = rq_src->nr_phys_segments;
+-	rq->ioprio = rq_src->ioprio;
+-	rq->write_hint = rq_src->write_hint;
++	rq->nr_integrity_segments = rq_src->nr_integrity_segments;
+ 
+ 	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
+ 		goto free_and_out;
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 7abf034089cd96..1e63e3dd544020 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -61,8 +61,14 @@ void blk_apply_bdi_limits(struct backing_dev_info *bdi,
+ 	/*
+ 	 * For read-ahead of large files to be effective, we need to read ahead
+ 	 * at least twice the optimal I/O size.
++	 *
++	 * There is no hardware limitation for the read-ahead size and the user
++	 * might have increased the read-ahead size through sysfs, so don't ever
++	 * decrease it.
+ 	 */
+-	bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
++	bdi->ra_pages = max3(bdi->ra_pages,
++				lim->io_opt * 2 / PAGE_SIZE,
++				VM_READAHEAD_PAGES);
+ 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
+ }
+ 
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index acdc28756d9d77..91b3789f710e7a 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -685,10 +685,9 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ 
+ 	prio = ioprio_class_to_prio[ioprio_class];
+ 	per_prio = &dd->per_prio[prio];
+-	if (!rq->elv.priv[0]) {
++	if (!rq->elv.priv[0])
+ 		per_prio->stats.inserted++;
+-		rq->elv.priv[0] = (void *)(uintptr_t)1;
+-	}
++	rq->elv.priv[0] = per_prio;
+ 
+ 	if (blk_mq_sched_try_insert_merge(q, rq, free))
+ 		return;
+@@ -753,18 +752,14 @@ static void dd_prepare_request(struct request *rq)
+  */
+ static void dd_finish_request(struct request *rq)
+ {
+-	struct request_queue *q = rq->q;
+-	struct deadline_data *dd = q->elevator->elevator_data;
+-	const u8 ioprio_class = dd_rq_ioclass(rq);
+-	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
+-	struct dd_per_prio *per_prio = &dd->per_prio[prio];
++	struct dd_per_prio *per_prio = rq->elv.priv[0];
+ 
+ 	/*
+ 	 * The block layer core may call dd_finish_request() without having
+ 	 * called dd_insert_requests(). Skip requests that bypassed I/O
+ 	 * scheduling. See also blk_mq_request_bypass_insert().
+ 	 */
+-	if (rq->elv.priv[0])
++	if (per_prio)
+ 		atomic_inc(&per_prio->stats.completed);
+ }
+ 
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index a779cab668c24c..e7528986e94f94 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -317,6 +317,7 @@ config CRYPTO_CURVE25519
+ 	tristate "Curve25519"
+ 	select CRYPTO_KPP
+ 	select CRYPTO_LIB_CURVE25519_GENERIC
++	select CRYPTO_LIB_CURVE25519_INTERNAL
+ 	help
+ 	  Curve25519 elliptic curve (RFC7748)
+ 
+@@ -615,6 +616,7 @@ config CRYPTO_ARC4
+ config CRYPTO_CHACHA20
+ 	tristate "ChaCha"
+ 	select CRYPTO_LIB_CHACHA_GENERIC
++	select CRYPTO_LIB_CHACHA_INTERNAL
+ 	select CRYPTO_SKCIPHER
+ 	help
+ 	  The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms
+@@ -944,6 +946,7 @@ config CRYPTO_POLY1305
+ 	tristate "Poly1305"
+ 	select CRYPTO_HASH
+ 	select CRYPTO_LIB_POLY1305_GENERIC
++	select CRYPTO_LIB_POLY1305_INTERNAL
+ 	help
+ 	  Poly1305 authenticator algorithm (RFC7539)
+ 
+diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
+index 5b84b0f7cc178f..3378670286535a 100644
+--- a/crypto/crypto_null.c
++++ b/crypto/crypto_null.c
+@@ -17,10 +17,10 @@
+ #include <crypto/internal/skcipher.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+-#include <linux/mm.h>
++#include <linux/spinlock.h>
+ #include <linux/string.h>
+ 
+-static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
++static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock);
+ static struct crypto_sync_skcipher *crypto_default_null_skcipher;
+ static int crypto_default_null_skcipher_refcnt;
+ 
+@@ -152,23 +152,32 @@ MODULE_ALIAS_CRYPTO("cipher_null");
+ 
+ struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
+ {
++	struct crypto_sync_skcipher *ntfm = NULL;
+ 	struct crypto_sync_skcipher *tfm;
+ 
+-	mutex_lock(&crypto_default_null_skcipher_lock);
++	spin_lock_bh(&crypto_default_null_skcipher_lock);
+ 	tfm = crypto_default_null_skcipher;
+ 
+ 	if (!tfm) {
+-		tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
+-		if (IS_ERR(tfm))
+-			goto unlock;
+-
+-		crypto_default_null_skcipher = tfm;
++		spin_unlock_bh(&crypto_default_null_skcipher_lock);
++
++		ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
++		if (IS_ERR(ntfm))
++			return ntfm;
++
++		spin_lock_bh(&crypto_default_null_skcipher_lock);
++		tfm = crypto_default_null_skcipher;
++		if (!tfm) {
++			tfm = ntfm;
++			ntfm = NULL;
++			crypto_default_null_skcipher = tfm;
++		}
+ 	}
+ 
+ 	crypto_default_null_skcipher_refcnt++;
++	spin_unlock_bh(&crypto_default_null_skcipher_lock);
+ 
+-unlock:
+-	mutex_unlock(&crypto_default_null_skcipher_lock);
++	crypto_free_sync_skcipher(ntfm);
+ 
+ 	return tfm;
+ }
+@@ -176,12 +185,16 @@ EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
+ 
+ void crypto_put_default_null_skcipher(void)
+ {
+-	mutex_lock(&crypto_default_null_skcipher_lock);
++	struct crypto_sync_skcipher *tfm = NULL;
++
++	spin_lock_bh(&crypto_default_null_skcipher_lock);
+ 	if (!--crypto_default_null_skcipher_refcnt) {
+-		crypto_free_sync_skcipher(crypto_default_null_skcipher);
++		tfm = crypto_default_null_skcipher;
+ 		crypto_default_null_skcipher = NULL;
+ 	}
+-	mutex_unlock(&crypto_default_null_skcipher_lock);
++	spin_unlock_bh(&crypto_default_null_skcipher_lock);
++
++	crypto_free_sync_skcipher(tfm);
+ }
+ EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
+ 
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 38b4158f52784b..88df2cdc46b62b 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (C) 2020-2024 Intel Corporation
++ * Copyright (C) 2020-2025 Intel Corporation
+  */
+ 
+ #include <linux/firmware.h>
+@@ -54,9 +54,9 @@ u8 ivpu_pll_max_ratio = U8_MAX;
+ module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
+ MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
+ 
+-int ivpu_sched_mode;
++int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
+ module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
+-MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler");
++MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
+ 
+ bool ivpu_disable_mmu_cont_pages;
+ module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
+@@ -165,7 +165,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
+ 		args->value = vdev->platform;
+ 		break;
+ 	case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
+-		args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
++		args->value = ivpu_hw_dpu_max_freq_get(vdev);
+ 		break;
+ 	case DRM_IVPU_PARAM_NUM_CONTEXTS:
+ 		args->value = ivpu_get_context_count(vdev);
+@@ -347,7 +347,7 @@ static int ivpu_hw_sched_init(struct ivpu_device *vdev)
+ {
+ 	int ret = 0;
+ 
+-	if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
++	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ 		ret = ivpu_jsm_hws_setup_priority_bands(vdev);
+ 		if (ret) {
+ 			ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index 2b30cc2e9272e4..9430a24994c32e 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -57,6 +57,8 @@
+ #define IVPU_PLATFORM_FPGA    3
+ #define IVPU_PLATFORM_INVALID 8
+ 
++#define IVPU_SCHED_MODE_AUTO -1
++
+ #define IVPU_DBG_REG	 BIT(0)
+ #define IVPU_DBG_IRQ	 BIT(1)
+ #define IVPU_DBG_MMU	 BIT(2)
+diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
+index b2b6d89f06537f..8a9395a2abb5d9 100644
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (C) 2020-2024 Intel Corporation
++ * Copyright (C) 2020-2025 Intel Corporation
+  */
+ 
+ #include <linux/firmware.h>
+@@ -134,6 +134,15 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range
+ 	return true;
+ }
+ 
++static u32
++ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
++{
++	if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
++		return ivpu_sched_mode;
++
++	return VPU_SCHEDULING_MODE_OS;
++}
++
+ static int ivpu_fw_parse(struct ivpu_device *vdev)
+ {
+ 	struct ivpu_fw_info *fw = vdev->fw;
+@@ -215,8 +224,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
+ 
+ 	fw->dvfs_mode = 0;
+ 
++	fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
+ 	fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+ 	fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
++	ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
+ 
+ 	if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
+ 								 fw_hdr->ro_section_size,
+@@ -545,7 +556,6 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
+ 
+ 	boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
+ 	boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
+-	boot_params->frequency = ivpu_hw_pll_freq_get(vdev);
+ 
+ 	/*
+ 	 * This param is a debug firmware feature.  It switches default clock
+@@ -605,8 +615,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
+ 	boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev);
+ 	boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev);
+ 	boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev);
+-	boot_params->vpu_scheduling_mode = vdev->hw->sched_mode;
+-	if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
++	boot_params->vpu_scheduling_mode = vdev->fw->sched_mode;
++	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ 		boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
+ 	boot_params->dvfs_mode = vdev->fw->dvfs_mode;
+ 	if (!IVPU_WA(disable_d0i3_msg))
+diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
+index 5e8eb608b70f1f..1d0b2bd9d65cf0 100644
+--- a/drivers/accel/ivpu/ivpu_fw.h
++++ b/drivers/accel/ivpu/ivpu_fw.h
+@@ -6,6 +6,8 @@
+ #ifndef __IVPU_FW_H__
+ #define __IVPU_FW_H__
+ 
++#include "vpu_jsm_api.h"
++
+ #define FW_VERSION_HEADER_SIZE	SZ_4K
+ #define FW_VERSION_STR_SIZE	SZ_256
+ 
+@@ -36,6 +38,7 @@ struct ivpu_fw_info {
+ 	u32 secondary_preempt_buf_size;
+ 	u64 read_only_addr;
+ 	u32 read_only_size;
++	u32 sched_mode;
+ };
+ 
+ int ivpu_fw_init(struct ivpu_device *vdev);
+diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
+index a96a05b2acda9a..1e85306bcd0653 100644
+--- a/drivers/accel/ivpu/ivpu_hw.h
++++ b/drivers/accel/ivpu/ivpu_hw.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (C) 2020-2024 Intel Corporation
++ * Copyright (C) 2020-2025 Intel Corporation
+  */
+ 
+ #ifndef __IVPU_HW_H__
+@@ -46,7 +46,6 @@ struct ivpu_hw_info {
+ 		u32 profiling_freq;
+ 	} pll;
+ 	u32 tile_fuse;
+-	u32 sched_mode;
+ 	u32 sku;
+ 	u16 config;
+ 	int dma_bits;
+@@ -87,9 +86,9 @@ static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
+ 	return range->end - range->start;
+ }
+ 
+-static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
++static inline u32 ivpu_hw_dpu_max_freq_get(struct ivpu_device *vdev)
+ {
+-	return ivpu_hw_btrs_ratio_to_freq(vdev, ratio);
++	return ivpu_hw_btrs_dpu_max_freq_get(vdev);
+ }
+ 
+ static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
+@@ -97,11 +96,6 @@ static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
+ 	ivpu_hw_ip_irq_clear(vdev);
+ }
+ 
+-static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev)
+-{
+-	return ivpu_hw_btrs_pll_freq_get(vdev);
+-}
+-
+ static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
+ {
+ 	return vdev->hw->pll.profiling_freq;
+diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
+index 745e5248803daf..2d88357b9a3a4b 100644
+--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
++++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
+@@ -1,8 +1,10 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (C) 2020-2024 Intel Corporation
++ * Copyright (C) 2020-2025 Intel Corporation
+  */
+ 
++#include <linux/units.h>
++
+ #include "ivpu_drv.h"
+ #include "ivpu_hw.h"
+ #include "ivpu_hw_btrs.h"
+@@ -28,17 +30,13 @@
+ 
+ #define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
+ 
+-#define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3)
+-#define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3)
+-#define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3)
+-#define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3)
+-#define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF   WP_CONFIG(0, 0)
+ 
+ #define PLL_CDYN_DEFAULT               0x80
+ #define PLL_EPP_DEFAULT                0x80
+ #define PLL_CONFIG_DEFAULT             0x0
+-#define PLL_SIMULATION_FREQ            10000000
+-#define PLL_REF_CLK_FREQ               50000000
++#define PLL_REF_CLK_FREQ               50000000ull
++#define PLL_RATIO_TO_FREQ(x)           ((x) * PLL_REF_CLK_FREQ)
++
+ #define PLL_TIMEOUT_US		       (1500 * USEC_PER_MSEC)
+ #define IDLE_TIMEOUT_US		       (5 * USEC_PER_MSEC)
+ #define TIMEOUT_US                     (150 * USEC_PER_MSEC)
+@@ -62,6 +60,8 @@
+ #define DCT_ENABLE                     0x1
+ #define DCT_DISABLE                    0x0
+ 
++static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
++
+ int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
+ {
+ 	REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
+@@ -162,8 +162,7 @@ static int info_init_mtl(struct ivpu_device *vdev)
+ 
+ 	hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
+ 	hw->sku = BTRS_MTL_TILE_SKU_BOTH;
+-	hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO;
+-	hw->sched_mode = ivpu_sched_mode;
++	hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
+ 
+ 	return 0;
+ }
+@@ -178,7 +177,6 @@ static int info_init_lnl(struct ivpu_device *vdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	hw->sched_mode = ivpu_sched_mode;
+ 	hw->tile_fuse = tile_fuse_config;
+ 	hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+ 
+@@ -346,8 +344,8 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
+ 
+ 	prepare_wp_request(vdev, &wp, enable);
+ 
+-	ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
+-		 PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn);
++	ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
++		 pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
+ 
+ 	ret = wp_request_send(vdev, &wp);
+ 	if (ret) {
+@@ -588,6 +586,39 @@ int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
+ 		return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
+ }
+ 
++static u32 pll_config_get_mtl(struct ivpu_device *vdev)
++{
++	return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
++}
++
++static u32 pll_config_get_lnl(struct ivpu_device *vdev)
++{
++	return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
++}
++
++static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
++{
++	return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
++}
++
++static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
++{
++	return PLL_RATIO_TO_FREQ(ratio) / 2;
++}
++
++static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
++{
++	if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
++		return pll_ratio_to_dpu_freq_mtl(ratio);
++	else
++		return pll_ratio_to_dpu_freq_lnl(ratio);
++}
++
++u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
++{
++	return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
++}
++
+ /* Handler for IRQs from Buttress core (irqB) */
+ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
+ {
+@@ -597,9 +628,12 @@ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
+ 	if (!status)
+ 		return false;
+ 
+-	if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status))
+-		ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
+-			 REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL));
++	if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
++		u32 pll = pll_config_get_mtl(vdev);
++
++		ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
++			 pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
++	}
+ 
+ 	if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
+ 		ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
+@@ -649,8 +683,12 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
+ 			ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ 	}
+ 
+-	if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
+-		ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ));
++	if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
++		u32 pll = pll_config_get_lnl(vdev);
++
++		ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
++			 pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
++	}
+ 
+ 	if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
+ 		ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+@@ -733,60 +771,6 @@ void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 acti
+ 	REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
+ }
+ 
+-static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
+-{
+-	u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
+-	u32 cpu_clock;
+-
+-	if ((config & 0xff) == MTL_PLL_RATIO_4_3)
+-		cpu_clock = pll_clock * 2 / 4;
+-	else
+-		cpu_clock = pll_clock * 2 / 5;
+-
+-	return cpu_clock;
+-}
+-
+-u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+-{
+-	struct ivpu_hw_info *hw = vdev->hw;
+-
+-	if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+-		return pll_ratio_to_freq_mtl(ratio, hw->config);
+-	else
+-		return PLL_RATIO_TO_FREQ(ratio);
+-}
+-
+-static u32 pll_freq_get_mtl(struct ivpu_device *vdev)
+-{
+-	u32 pll_curr_ratio;
+-
+-	pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
+-	pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK;
+-
+-	if (!ivpu_is_silicon(vdev))
+-		return PLL_SIMULATION_FREQ;
+-
+-	return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config);
+-}
+-
+-static u32 pll_freq_get_lnl(struct ivpu_device *vdev)
+-{
+-	u32 pll_curr_ratio;
+-
+-	pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
+-	pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK;
+-
+-	return PLL_RATIO_TO_FREQ(pll_curr_ratio);
+-}
+-
+-u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev)
+-{
+-	if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+-		return pll_freq_get_mtl(vdev);
+-	else
+-		return pll_freq_get_lnl(vdev);
+-}
+-
+ u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
+ {
+ 	if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
+index 04f14f50fed62e..71792dab3c2107 100644
+--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
++++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (C) 2020-2024 Intel Corporation
++ * Copyright (C) 2020-2025 Intel Corporation
+  */
+ 
+ #ifndef __IVPU_HW_BTRS_H__
+@@ -13,7 +13,6 @@
+ 
+ #define PLL_PROFILING_FREQ_DEFAULT   38400000
+ #define PLL_PROFILING_FREQ_HIGH      400000000
+-#define PLL_RATIO_TO_FREQ(x)         ((x) * PLL_REF_CLK_FREQ)
+ 
+ #define DCT_DEFAULT_ACTIVE_PERCENT 15u
+ #define DCT_PERIOD_US		   35300u
+@@ -32,12 +31,11 @@ int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev);
+ void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev);
+ void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev);
+ void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
++u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev);
+ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
+ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
+ int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
+ void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
+-u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev);
+-u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio);
+ u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
+ u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
+ u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index be2e2bf0f43f02..91f7f6f3ca675b 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -37,7 +37,7 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
+ 	u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
+ 	struct ivpu_addr_range range;
+ 
+-	if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
++	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ 		return 0;
+ 
+ 	range.start = vdev->hw->ranges.user.end - (primary_size * IVPU_NUM_CMDQS_PER_CTX);
+@@ -68,7 +68,7 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
+ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
+ 					 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+ {
+-	if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
++	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ 		return;
+ 
+ 	drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
+@@ -149,7 +149,7 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
+ 	struct ivpu_device *vdev = file_priv->vdev;
+ 	int ret;
+ 
+-	if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
++	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ 		ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
+ 					       cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+ 	else
+@@ -184,7 +184,7 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
+ 	jobq_header->tail = 0;
+ 	wmb(); /* Flush WC buffer for jobq->header */
+ 
+-	if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
++	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ 		ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
+ 		if (ret)
+ 			return ret;
+@@ -211,7 +211,7 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
+ 
+ 	cmdq->db_registered = false;
+ 
+-	if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
++	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ 		ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
+ 		if (!ret)
+ 			ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
+@@ -335,7 +335,7 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
+ 
+ 	ivpu_cmdq_fini_all(file_priv);
+ 
+-	if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_OS)
++	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
+ 		ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+ }
+ 
+@@ -361,7 +361,7 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+ 	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
+ 		entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
+ 
+-	if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW &&
++	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW &&
+ 	    (unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
+ 		entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
+ 		entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
+diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
+index 913669f1786e86..616477fc17fa07 100644
+--- a/drivers/accel/ivpu/ivpu_sysfs.c
++++ b/drivers/accel/ivpu/ivpu_sysfs.c
+@@ -6,6 +6,8 @@
+ #include <linux/device.h>
+ #include <linux/err.h>
+ 
++#include "ivpu_drv.h"
++#include "ivpu_fw.h"
+ #include "ivpu_hw.h"
+ #include "ivpu_sysfs.h"
+ 
+@@ -39,8 +41,30 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
+ 
+ static DEVICE_ATTR_RO(npu_busy_time_us);
+ 
++/**
++ * DOC: sched_mode
++ *
++ * The sched_mode is used to report current NPU scheduling mode.
++ *
++ * It returns following strings:
++ * - "HW"		- Hardware Scheduler mode
++ * - "OS"		- Operating System Scheduler mode
++ *
++ */
++static ssize_t
++sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	struct drm_device *drm = dev_get_drvdata(dev);
++	struct ivpu_device *vdev = to_ivpu_device(drm);
++
++	return sysfs_emit(buf, "%s\n", vdev->fw->sched_mode ? "HW" : "OS");
++}
++
++static DEVICE_ATTR_RO(sched_mode);
++
+ static struct attribute *ivpu_dev_attrs[] = {
+ 	&dev_attr_npu_busy_time_us.attr,
++	&dev_attr_sched_mode.attr,
+ 	NULL,
+ };
+ 
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 25399f6dde7e27..e614e4bef9ea1b 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+ 			DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
+ 		},
+ 	},
++	/*
++	 * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
++	 * https://gitlab.freedesktop.org/drm/amd/-/issues/3929
++	 */
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
++		}
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
++		}
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
++		}
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
++		}
++	},
+ 	{ },
+ };
+ 
+diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
+index a35dd0e41c2704..f73ce6e13065dd 100644
+--- a/drivers/acpi/pptt.c
++++ b/drivers/acpi/pptt.c
+@@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
+ 	node_entry = ACPI_PTR_DIFF(node, table_hdr);
+ 	entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
+ 			     sizeof(struct acpi_table_pptt));
+-	proc_sz = sizeof(struct acpi_pptt_processor *);
++	proc_sz = sizeof(struct acpi_pptt_processor);
+ 
+ 	while ((unsigned long)entry + proc_sz < table_end) {
+ 		cpu_node = (struct acpi_pptt_processor *)entry;
+@@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
+ 	table_end = (unsigned long)table_hdr + table_hdr->length;
+ 	entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
+ 			     sizeof(struct acpi_table_pptt));
+-	proc_sz = sizeof(struct acpi_pptt_processor *);
++	proc_sz = sizeof(struct acpi_pptt_processor);
+ 
+ 	/* find the processor structure associated with this cpuid */
+ 	while ((unsigned long)entry + proc_sz < table_end) {
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index f915e3df57a9a0..1660f46dc08b59 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2325,8 +2325,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
+ 	 */
+ 	put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
+ 
+-	if (dev->flags & ATA_DFLAG_CDL)
+-		buf[4] = 0x02; /* Support T2A and T2B pages */
++	if (dev->flags & ATA_DFLAG_CDL_ENABLED)
++		buf[4] = 0x02; /* T2A and T2B pages enabled */
+ 	else
+ 		buf[4] = 0;
+ 
+@@ -3734,12 +3734,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
+ }
+ 
+ /*
+- * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
++ * Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
+  * page) into a SET FEATURES command.
+  */
+-static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
+-						    const u8 *buf, int len,
+-						    u16 *fp)
++static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
++					   const u8 *buf, int len, u16 *fp)
+ {
+ 	struct ata_device *dev = qc->dev;
+ 	struct ata_taskfile *tf = &qc->tf;
+@@ -3757,17 +3756,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
+ 	/* Check cdl_ctrl */
+ 	switch (buf[0] & 0x03) {
+ 	case 0:
+-		/* Disable CDL */
++		/* Disable CDL if it is enabled */
++		if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
++			return 0;
++		ata_dev_dbg(dev, "Disabling CDL\n");
+ 		cdl_action = 0;
+ 		dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
+ 		break;
+ 	case 0x02:
+-		/* Enable CDL T2A/T2B: NCQ priority must be disabled */
++		/*
++		 * Enable CDL if not already enabled. Since this is mutually
++		 * exclusive with NCQ priority, allow this only if NCQ priority
++		 * is disabled.
++		 */
++		if (dev->flags & ATA_DFLAG_CDL_ENABLED)
++			return 0;
+ 		if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
+ 			ata_dev_err(dev,
+ 				"NCQ priority must be disabled to enable CDL\n");
+ 			return -EINVAL;
+ 		}
++		ata_dev_dbg(dev, "Enabling CDL\n");
+ 		cdl_action = 1;
+ 		dev->flags |= ATA_DFLAG_CDL_ENABLED;
+ 		break;
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index 8cf04a557bdb0d..c4ffd099504337 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
+ 		kset_put(&sp->subsys);
+ }
+ 
++struct subsys_private *bus_to_subsys(const struct bus_type *bus);
+ struct subsys_private *class_to_subsys(const struct class *class);
+ 
+ struct driver_private {
+@@ -179,6 +180,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr
+ void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
+ void device_driver_detach(struct device *dev);
+ 
++static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
++{
++	/*
++	 * Majority (all?) read accesses to dev->driver happens either
++	 * while holding device lock or in bus/driver code that is only
++	 * invoked when the device is bound to a driver and there is no
++	 * concern of the pointer being changed while it is being read.
++	 * However when reading device's uevent file we read driver pointer
++	 * without taking device lock (so we do not block there for
++	 * arbitrary amount of time). We use WRITE_ONCE() here to prevent
++	 * tearing so that READ_ONCE() can safely be used in uevent code.
++	 */
++	// FIXME - this cast should not be needed "soon"
++	WRITE_ONCE(dev->driver, (struct device_driver *)drv);
++}
++
+ int devres_release_all(struct device *dev);
+ void device_block_probing(void);
+ void device_unblock_probing(void);
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 657c93c38b0dc2..eaf38a6f6091c2 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
+  * NULL.  A call to subsys_put() must be done when finished with the pointer in
+  * order for it to be properly freed.
+  */
+-static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
++struct subsys_private *bus_to_subsys(const struct bus_type *bus)
+ {
+ 	struct subsys_private *sp = NULL;
+ 	struct kobject *kobj;
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index ec0ef6a0de9427..ba9b4cbef9e08d 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -2624,6 +2624,35 @@ static const char *dev_uevent_name(const struct kobject *kobj)
+ 	return NULL;
+ }
+ 
++/*
++ * Try filling "DRIVER=<name>" uevent variable for a device. Because this
++ * function may race with binding and unbinding the device from a driver,
++ * we need to be careful. Binding is generally safe, at worst we miss the
++ * fact that the device is already bound to a driver (but the driver
++ * information that is delivered through uevents is best-effort, it may
++ * become obsolete as soon as it is generated anyways). Unbinding is more
++ * risky as driver pointer is transitioning to NULL, so READ_ONCE() should
++ * be used to make sure we are dealing with the same pointer, and to
++ * ensure that driver structure is not going to disappear from under us
++ * we take bus' drivers klist lock. The assumption that only registered
++ * driver can be bound to a device, and to unregister a driver bus code
++ * will take the same lock.
++ */
++static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
++{
++	struct subsys_private *sp = bus_to_subsys(dev->bus);
++
++	if (sp) {
++		scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
++			struct device_driver *drv = READ_ONCE(dev->driver);
++			if (drv)
++				add_uevent_var(env, "DRIVER=%s", drv->name);
++		}
++
++		subsys_put(sp);
++	}
++}
++
+ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
+ {
+ 	const struct device *dev = kobj_to_dev(kobj);
+@@ -2655,8 +2684,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
+ 	if (dev->type && dev->type->name)
+ 		add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
+ 
+-	if (dev->driver)
+-		add_uevent_var(env, "DRIVER=%s", dev->driver->name);
++	/* Add "DRIVER=%s" variable if the device is bound to a driver */
++	dev_driver_uevent(dev, env);
+ 
+ 	/* Add common DT information about the device */
+ 	of_device_uevent(dev, env);
+@@ -2726,11 +2755,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
+ 	if (!env)
+ 		return -ENOMEM;
+ 
+-	/* Synchronize with really_probe() */
+-	device_lock(dev);
+ 	/* let the kset specific function add its keys */
+ 	retval = kset->uevent_ops->uevent(&dev->kobj, env);
+-	device_unlock(dev);
+ 	if (retval)
+ 		goto out;
+ 
+@@ -3700,7 +3726,7 @@ int device_add(struct device *dev)
+ 	device_pm_remove(dev);
+ 	dpm_sysfs_remove(dev);
+  DPMError:
+-	dev->driver = NULL;
++	device_set_driver(dev, NULL);
+ 	bus_remove_device(dev);
+  BusError:
+ 	device_remove_attrs(dev);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index f0e4b4aba885c6..b526e0e0f52d79 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev)
+ 	arch_teardown_dma_ops(dev);
+ 	kfree(dev->dma_range_map);
+ 	dev->dma_range_map = NULL;
+-	dev->driver = NULL;
++	device_set_driver(dev, NULL);
+ 	dev_set_drvdata(dev, NULL);
+ 	if (dev->pm_domain && dev->pm_domain->dismiss)
+ 		dev->pm_domain->dismiss(dev);
+@@ -629,8 +629,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv)
+ 	}
+ 
+ re_probe:
+-	// FIXME - this cast should not be needed "soon"
+-	dev->driver = (struct device_driver *)drv;
++	device_set_driver(dev, drv);
+ 
+ 	/* If using pinctrl, bind pins now before probing */
+ 	ret = pinctrl_bind_pins(dev);
+@@ -1014,7 +1013,7 @@ static int __device_attach(struct device *dev, bool allow_async)
+ 		if (ret == 0)
+ 			ret = 1;
+ 		else {
+-			dev->driver = NULL;
++			device_set_driver(dev, NULL);
+ 			ret = 0;
+ 		}
+ 	} else {
+diff --git a/drivers/char/misc.c b/drivers/char/misc.c
+index f7dd455dd0dd3c..dda466f9181acf 100644
+--- a/drivers/char/misc.c
++++ b/drivers/char/misc.c
+@@ -315,7 +315,7 @@ static int __init misc_init(void)
+ 		goto fail_remove;
+ 
+ 	err = -EIO;
+-	if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
++	if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
+ 		goto fail_printk;
+ 	return 0;
+ 
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index c62b208b42f131..abcfdd3c291835 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1579,8 +1579,8 @@ static void handle_control_message(struct virtio_device *vdev,
+ 		break;
+ 	case VIRTIO_CONSOLE_RESIZE: {
+ 		struct {
+-			__u16 rows;
+-			__u16 cols;
++			__virtio16 rows;
++			__virtio16 cols;
+ 		} size;
+ 
+ 		if (!is_console_port(port))
+@@ -1588,7 +1588,8 @@ static void handle_control_message(struct virtio_device *vdev,
+ 
+ 		memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+ 		       sizeof(size));
+-		set_console_size(port, size.rows, size.cols);
++		set_console_size(port, virtio16_to_cpu(vdev, size.rows),
++				 virtio16_to_cpu(vdev, size.cols));
+ 
+ 		port->cons.hvc->irq_requested = 1;
+ 		resize_console(port);
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 5b4ab94193c2b0..7de3dfdae4b500 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -5264,6 +5264,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
+ 	if (!clkspec)
+ 		return ERR_PTR(-EINVAL);
+ 
++	/* Check if node in clkspec is in disabled/fail state */
++	if (!of_device_is_available(clkspec->np))
++		return ERR_PTR(-ENOENT);
++
+ 	mutex_lock(&of_clk_mutex);
+ 	list_for_each_entry(provider, &of_clk_providers, link) {
+ 		if (provider->node == clkspec->np) {
+diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
+index 951c23fa0369ea..75dce1ff24193b 100644
+--- a/drivers/comedi/drivers/jr3_pci.c
++++ b/drivers/comedi/drivers/jr3_pci.c
+@@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev)
+ 	struct jr3_pci_dev_private *devpriv = dev->private;
+ 
+ 	if (devpriv)
+-		del_timer_sync(&devpriv->timer);
++		timer_shutdown_sync(&devpriv->timer);
+ 
+ 	comedi_pci_detach(dev);
+ }
+diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
+index e67b2326671c9c..71f4b612dd971a 100644
+--- a/drivers/cpufreq/Kconfig.arm
++++ b/drivers/cpufreq/Kconfig.arm
+@@ -67,7 +67,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
+ config ARM_BRCMSTB_AVS_CPUFREQ
+ 	tristate "Broadcom STB AVS CPUfreq driver"
+ 	depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
+-	default y
++	default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ
+ 	help
+ 	  Some Broadcom STB SoCs use a co-processor running proprietary firmware
+ 	  ("AVS") to handle voltage and frequency scaling. This driver provides
+@@ -79,7 +79,7 @@ config ARM_HIGHBANK_CPUFREQ
+ 	tristate "Calxeda Highbank-based"
+ 	depends on ARCH_HIGHBANK || COMPILE_TEST
+ 	depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
+-	default m
++	default m if ARCH_HIGHBANK
+ 	help
+ 	  This adds the CPUFreq driver for Calxeda Highbank SoC
+ 	  based boards.
+@@ -124,7 +124,7 @@ config ARM_MEDIATEK_CPUFREQ
+ config ARM_MEDIATEK_CPUFREQ_HW
+ 	tristate "MediaTek CPUFreq HW driver"
+ 	depends on ARCH_MEDIATEK || COMPILE_TEST
+-	default m
++	default m if ARCH_MEDIATEK
+ 	help
+ 	  Support for the CPUFreq HW driver.
+ 	  Some MediaTek chipsets have a HW engine to offload the steps
+@@ -172,7 +172,7 @@ config ARM_RASPBERRYPI_CPUFREQ
+ config ARM_S3C64XX_CPUFREQ
+ 	bool "Samsung S3C64XX"
+ 	depends on CPU_S3C6410 || COMPILE_TEST
+-	default y
++	default CPU_S3C6410
+ 	help
+ 	  This adds the CPUFreq driver for Samsung S3C6410 SoC.
+ 
+@@ -181,7 +181,7 @@ config ARM_S3C64XX_CPUFREQ
+ config ARM_S5PV210_CPUFREQ
+ 	bool "Samsung S5PV210 and S5PC110"
+ 	depends on CPU_S5PV210 || COMPILE_TEST
+-	default y
++	default CPU_S5PV210
+ 	help
+ 	  This adds the CPUFreq driver for Samsung S5PV210 and
+ 	  S5PC110 SoCs.
+@@ -205,7 +205,7 @@ config ARM_SCMI_CPUFREQ
+ config ARM_SPEAR_CPUFREQ
+ 	bool "SPEAr CPUFreq support"
+ 	depends on PLAT_SPEAR || COMPILE_TEST
+-	default y
++	default PLAT_SPEAR
+ 	help
+ 	  This adds the CPUFreq driver support for SPEAr SOCs.
+ 
+@@ -224,7 +224,7 @@ config ARM_TEGRA20_CPUFREQ
+ 	tristate "Tegra20/30 CPUFreq support"
+ 	depends on ARCH_TEGRA || COMPILE_TEST
+ 	depends on CPUFREQ_DT
+-	default y
++	default ARCH_TEGRA
+ 	help
+ 	  This adds the CPUFreq driver support for Tegra20/30 SOCs.
+ 
+@@ -232,7 +232,7 @@ config ARM_TEGRA124_CPUFREQ
+ 	bool "Tegra124 CPUFreq support"
+ 	depends on ARCH_TEGRA || COMPILE_TEST
+ 	depends on CPUFREQ_DT
+-	default y
++	default ARCH_TEGRA
+ 	help
+ 	  This adds the CPUFreq driver support for Tegra124 SOCs.
+ 
+@@ -247,14 +247,14 @@ config ARM_TEGRA194_CPUFREQ
+ 	tristate "Tegra194 CPUFreq support"
+ 	depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
+ 	depends on TEGRA_BPMP
+-	default y
++	default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
+ 	help
+ 	  This adds CPU frequency driver support for Tegra194 SOCs.
+ 
+ config ARM_TI_CPUFREQ
+ 	bool "Texas Instruments CPUFreq support"
+ 	depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
+-	default y
++	default ARCH_OMAP2PLUS || ARCH_K3
+ 	help
+ 	  This driver enables valid OPPs on the running platform based on
+ 	  values contained within the SoC in use. Enable this in order to
+diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
+index 4dcacab9b4bf25..ddf7dcb3e9b0b5 100644
+--- a/drivers/cpufreq/apple-soc-cpufreq.c
++++ b/drivers/cpufreq/apple-soc-cpufreq.c
+@@ -103,11 +103,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
+ 
+ static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
+ {
+-	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+-	struct apple_cpu_priv *priv = policy->driver_data;
++	struct cpufreq_policy *policy;
++	struct apple_cpu_priv *priv;
+ 	struct cpufreq_frequency_table *p;
+ 	unsigned int pstate;
+ 
++	policy = cpufreq_cpu_get_raw(cpu);
++	if (unlikely(!policy))
++		return 0;
++
++	priv = policy->driver_data;
++
+ 	if (priv->info->cur_pstate_mask) {
+ 		u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+ 
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index c1cdf0f4d0ddda..36ea181260c7ee 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -767,7 +767,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+ 	int ret;
+ 
+ 	if (!policy)
+-		return -ENODEV;
++		return 0;
+ 
+ 	cpu_data = policy->driver_data;
+ 
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index 07d6f9a9b7c820..7e7c1613a67c6d 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -34,11 +34,17 @@ static struct cpufreq_driver scmi_cpufreq_driver;
+ 
+ static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
+ {
+-	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+-	struct scmi_data *priv = policy->driver_data;
++	struct cpufreq_policy *policy;
++	struct scmi_data *priv;
+ 	unsigned long rate;
+ 	int ret;
+ 
++	policy = cpufreq_cpu_get_raw(cpu);
++	if (unlikely(!policy))
++		return 0;
++
++	priv = policy->driver_data;
++
+ 	ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
+ 	if (ret)
+ 		return 0;
+diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
+index f2d913a91be9e0..a191d9bdf667ad 100644
+--- a/drivers/cpufreq/scpi-cpufreq.c
++++ b/drivers/cpufreq/scpi-cpufreq.c
+@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
+ 
+ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+ {
+-	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+-	struct scpi_data *priv = policy->driver_data;
+-	unsigned long rate = clk_get_rate(priv->clk);
++	struct cpufreq_policy *policy;
++	struct scpi_data *priv;
++	unsigned long rate;
++
++	policy = cpufreq_cpu_get_raw(cpu);
++	if (unlikely(!policy))
++		return 0;
++
++	priv = policy->driver_data;
++	rate = clk_get_rate(priv->clk);
+ 
+ 	return rate / 1000;
+ }
+diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+index 293921acec9378..0599dbf851ebe5 100644
+--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
++++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+@@ -167,7 +167,9 @@ static int sun50i_cpufreq_get_efuse(void)
+ 	struct nvmem_cell *speedbin_nvmem;
+ 	const struct of_device_id *match;
+ 	struct device *cpu_dev;
+-	u32 *speedbin;
++	void *speedbin_ptr;
++	u32 speedbin = 0;
++	size_t len;
+ 	int ret;
+ 
+ 	cpu_dev = get_cpu_device(0);
+@@ -190,14 +192,18 @@ static int sun50i_cpufreq_get_efuse(void)
+ 		return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+ 				     "Could not get nvmem cell\n");
+ 
+-	speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
++	speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len);
+ 	nvmem_cell_put(speedbin_nvmem);
+-	if (IS_ERR(speedbin))
+-		return PTR_ERR(speedbin);
++	if (IS_ERR(speedbin_ptr))
++		return PTR_ERR(speedbin_ptr);
+ 
+-	ret = opp_data->efuse_xlate(*speedbin);
++	if (len <= 4)
++		memcpy(&speedbin, speedbin_ptr, len);
++	speedbin = le32_to_cpu(speedbin);
+ 
+-	kfree(speedbin);
++	ret = opp_data->efuse_xlate(speedbin);
++
++	kfree(speedbin_ptr);
+ 
+ 	return ret;
+ };
+diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
+index a02d496f4c410f..b9658e38cb034c 100644
+--- a/drivers/crypto/atmel-sha204a.c
++++ b/drivers/crypto/atmel-sha204a.c
+@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
+ 	i2c_priv->hwrng.name = dev_name(&client->dev);
+ 	i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ 
++	/*
++	 * According to review by Bill Cox [1], this HWRNG has very low entropy.
++	 * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
++	 */
++	i2c_priv->hwrng.quality = 1;
++
+ 	ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
+ 	if (ret)
+ 		dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
+diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
+index 157f9a9ed63616..2ebc878da16095 100644
+--- a/drivers/crypto/ccp/sp-pci.c
++++ b/drivers/crypto/ccp/sp-pci.c
+@@ -532,6 +532,7 @@ static const struct pci_device_id sp_pci_table[] = {
+ 	{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
+ 	{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
+ 	{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
++	{ PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
+ 	{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
+ 	{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
+ 	/* Last entry must be zero */
+diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
+index e1082e749c69e6..a9c39025535f2c 100644
+--- a/drivers/cxl/core/regs.c
++++ b/drivers/cxl/core/regs.c
+@@ -513,7 +513,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
+ 	resource_size_t rcrb = ri->base;
+ 	void __iomem *addr;
+ 	u32 bar0, bar1;
+-	u16 cmd;
+ 	u32 id;
+ 
+ 	if (which == CXL_RCRB_UPSTREAM)
+@@ -535,7 +534,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
+ 	}
+ 
+ 	id = readl(addr + PCI_VENDOR_ID);
+-	cmd = readw(addr + PCI_COMMAND);
+ 	bar0 = readl(addr + PCI_BASE_ADDRESS_0);
+ 	bar1 = readl(addr + PCI_BASE_ADDRESS_1);
+ 	iounmap(addr);
+@@ -550,8 +548,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
+ 			dev_err(dev, "Failed to access Downstream Port RCRB\n");
+ 		return CXL_RESOURCE_NONE;
+ 	}
+-	if (!(cmd & PCI_COMMAND_MEMORY))
+-		return CXL_RESOURCE_NONE;
+ 	/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
+ 	if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
+ 		return CXL_RESOURCE_NONE;
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index 91b2fbc0b86471..d891dfca358e20 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -841,9 +841,9 @@ static int dmatest_func(void *data)
+ 		} else {
+ 			dma_async_issue_pending(chan);
+ 
+-			wait_event_freezable_timeout(thread->done_wait,
+-					done->done,
+-					msecs_to_jiffies(params->timeout));
++			wait_event_timeout(thread->done_wait,
++					   done->done,
++					   msecs_to_jiffies(params->timeout));
+ 
+ 			status = dma_async_is_tx_complete(chan, cookie, NULL,
+ 							  NULL);
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index 528f37417aea48..f9429dd52fd9bf 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -1227,22 +1227,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 	if (!svc->intel_svc_fcs) {
+ 		dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
+ 		ret = -ENOMEM;
+-		goto err_unregister_dev;
++		goto err_unregister_rsu_dev;
+ 	}
+ 
+ 	ret = platform_device_add(svc->intel_svc_fcs);
+ 	if (ret) {
+ 		platform_device_put(svc->intel_svc_fcs);
+-		goto err_unregister_dev;
++		goto err_unregister_rsu_dev;
+ 	}
+ 
++	ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
++	if (ret)
++		goto err_unregister_fcs_dev;
++
+ 	dev_set_drvdata(dev, svc);
+ 
+ 	pr_info("Intel Service Layer Driver Initialized\n");
+ 
+ 	return 0;
+ 
+-err_unregister_dev:
++err_unregister_fcs_dev:
++	platform_device_unregister(svc->intel_svc_fcs);
++err_unregister_rsu_dev:
+ 	platform_device_unregister(svc->stratix10_svc_rsu);
+ err_free_kfifo:
+ 	kfifo_free(&controller->svc_fifo);
+@@ -1256,6 +1262,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
+ 	struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
+ 	struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ 
++	of_platform_depopulate(ctrl->dev);
++
+ 	platform_device_unregister(svc->intel_svc_fcs);
+ 	platform_device_unregister(svc->stratix10_svc_rsu);
+ 
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index e543129d360500..626daedb016987 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -259,6 +259,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
+ 		{ "fsl,imx8qm-fec",  "phy-reset-gpios", "phy-reset-active-high" },
+ 		{ "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" },
+ #endif
++#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
++		{ "atmel,hsmci",       "cd-gpios",     "cd-inverted" },
++#endif
+ #if IS_ENABLED(CONFIG_PCI_IMX6)
+ 		{ "fsl,imx6q-pcie",  "reset-gpio", "reset-gpio-active-high" },
+ 		{ "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" },
+@@ -284,9 +287,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
+ #if IS_ENABLED(CONFIG_REGULATOR_GPIO)
+ 		{ "regulator-gpio",    "enable-gpio",  "enable-active-high" },
+ 		{ "regulator-gpio",    "enable-gpios", "enable-active-high" },
+-#endif
+-#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
+-		{ "atmel,hsmci",       "cd-gpios",     "cd-inverted" },
+ #endif
+ 	};
+ 	unsigned int i;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 9b1e0ede05a452..b7aad43d9ad07b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -350,7 +350,6 @@ enum amdgpu_kiq_irq {
+ 	AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
+ 	AMDGPU_CP_KIQ_IRQ_LAST
+ };
+-#define SRIOV_USEC_TIMEOUT  1200000 /* wait 12 * 100ms for SRIOV */
+ #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
+ #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
+ #define MAX_KIQ_REG_TRY 1000
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 05ebb8216a55a5..3c2ac5f4e814b7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -1426,9 +1426,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
+ 	struct amdgpu_device *adev = ring->adev;
+ 	struct drm_gpu_scheduler *sched = &ring->sched;
+ 	struct drm_sched_entity entity;
++	static atomic_t counter;
+ 	struct dma_fence *f;
+ 	struct amdgpu_job *job;
+ 	struct amdgpu_ib *ib;
++	void *owner;
+ 	int i, r;
+ 
+ 	/* Initialize the scheduler entity */
+@@ -1439,9 +1441,15 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
+ 		goto err;
+ 	}
+ 
+-	r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
+-				     64, 0,
+-				     &job);
++	/*
++	 * Use some unique dummy value as the owner to make sure we execute
++	 * the cleaner shader on each submission. The value just need to change
++	 * for each submission and is otherwise meaningless.
++	 */
++	owner = (void *)(unsigned long)atomic_inc_return(&counter);
++
++	r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
++				     64, 0, &job);
+ 	if (r)
+ 		goto err;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 17a19d49d30a57..9d130d3af0b392 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -678,12 +678,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
+ 				   uint32_t flush_type, bool all_hub,
+ 				   uint32_t inst)
+ {
+-	u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
+-		adev->usec_timeout;
+ 	struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
+ 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
+ 	unsigned int ndw;
+-	int r;
++	int r, cnt = 0;
+ 	uint32_t seq;
+ 
+ 	/*
+@@ -740,10 +738,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
+ 
+ 		amdgpu_ring_commit(ring);
+ 		spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+-		if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
++
++		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
++
++		might_sleep();
++		while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
++		       !amdgpu_reset_pending(adev->reset_domain)) {
++			msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
++			r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
++		}
++
++		if (cnt > MAX_KIQ_REG_TRY) {
+ 			dev_err(adev->dev, "timeout waiting for kiq fence\n");
+ 			r = -ETIME;
+-		}
++		} else
++			r = 0;
+ 	}
+ 
+ error_unlock_reset:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 45ed97038df0c8..24d711b0e6346c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -5998,7 +5998,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
+ 	}
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
+ 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
+@@ -6076,7 +6076,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
+ 	}
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
+ 	tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
+@@ -6153,7 +6153,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
+ 	}
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
+ 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
+@@ -6528,7 +6528,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
+ 	}
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
+ 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 84cf5fd297b7f6..0357fea8ae1dff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -2327,7 +2327,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
+ 	}
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
+ 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
+@@ -2371,7 +2371,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
+ 	}
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
+ 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
+@@ -2416,7 +2416,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
+ 	}
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
+ 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+@@ -3051,7 +3051,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
+ 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
+ 		lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
+@@ -3269,7 +3269,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
+ 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
+ 		lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
+@@ -4487,7 +4487,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
+ 	if (r)
+ 		return r;
+ 
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
+ 		false : true;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+index b259e217930c75..241619ee10e4be 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+@@ -2264,7 +2264,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
+ 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
+ 		lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
+@@ -2408,7 +2408,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
+ 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
+ 
+ 	if (amdgpu_emu_mode == 1)
+-		adev->hdp.funcs->flush_hdp(adev, NULL);
++		amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
+ 		lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
+@@ -3429,7 +3429,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
+ 	if (r)
+ 		return r;
+ 
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
+ 		false : true;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index 9784a28921853f..c6e74292128276 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -265,7 +265,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ 	ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
+ 
+ 	/* flush hdp cache */
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	/* This is necessary for SRIOV as well as for GFXOFF to function
+ 	 * properly under bare metal
+@@ -966,7 +966,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
+ 	adev->hdp.funcs->init_registers(adev);
+ 
+ 	/* Flush HDP after it is initialized */
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
+ 		false : true;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index 2797fd84432b22..4e9c23d65b02ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -226,7 +226,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ 	ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
+ 
+ 	/* flush hdp cache */
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	/* This is necessary for SRIOV as well as for GFXOFF to function
+ 	 * properly under bare metal
+@@ -893,7 +893,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
+ 		return r;
+ 
+ 	/* Flush HDP after it is initialized */
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
+ 		false : true;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+index 60acf676000b34..525e435ee22d8e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+@@ -294,7 +294,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ 		return;
+ 
+ 	/* flush hdp cache */
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	/* This is necessary for SRIOV as well as for GFXOFF to function
+ 	 * properly under bare metal
+@@ -862,7 +862,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
+ 		return r;
+ 
+ 	/* Flush HDP after it is initialized */
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
+ 		false : true;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 7a45f3fdc73410..9a212413c6d3a6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -2351,7 +2351,7 @@ static int gmc_v9_0_hw_init(void *handle)
+ 	adev->hdp.funcs->init_registers(adev);
+ 
+ 	/* After HDP is initialized, flush HDP.*/
+-	adev->hdp.funcs->flush_hdp(adev, NULL);
++	amdgpu_device_flush_hdp(adev, NULL);
+ 
+ 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+ 		value = false;
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+index 2395f1856962ad..e77a467af7ac31 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+@@ -532,7 +532,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
+ 			}
+ 
+ 			memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+-			adev->hdp.funcs->flush_hdp(adev, NULL);
++			amdgpu_device_flush_hdp(adev, NULL);
+ 			vfree(buf);
+ 			drm_dev_exit(idx);
+ 		} else {
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+index 51e470e8d67d9e..bf00de763acb0e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+@@ -600,7 +600,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
+ 			}
+ 
+ 			memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+-			adev->hdp.funcs->flush_hdp(adev, NULL);
++			amdgpu_device_flush_hdp(adev, NULL);
+ 			vfree(buf);
+ 			drm_dev_exit(idx);
+ 		} else {
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+index 4d33c95a511631..89f6c06946c51b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+@@ -488,7 +488,7 @@ static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
+ 			}
+ 
+ 			memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+-			adev->hdp.funcs->flush_hdp(adev, NULL);
++			amdgpu_device_flush_hdp(adev, NULL);
+ 			vfree(buf);
+ 			drm_dev_exit(idx);
+ 		} else {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c22da13859bd51..f3cbff86155705 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3216,16 +3216,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ 	for (k = 0; k < dc_state->stream_count; k++) {
+ 		bundle->stream_update.stream = dc_state->streams[k];
+ 
+-		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
++		for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
+ 			bundle->surface_updates[m].surface =
+-				dc_state->stream_status->plane_states[m];
++				dc_state->stream_status[k].plane_states[m];
+ 			bundle->surface_updates[m].surface->force_full_update =
+ 				true;
+ 		}
+ 
+ 		update_planes_and_stream_adapter(dm->dc,
+ 					 UPDATE_TYPE_FULL,
+-					 dc_state->stream_status->plane_count,
++					 dc_state->stream_status[k].plane_count,
+ 					 dc_state->streams[k],
+ 					 &bundle->stream_update,
+ 					 bundle->surface_updates);
+@@ -10775,6 +10775,9 @@ static bool should_reset_plane(struct drm_atomic_state *state,
+ 	    state->allow_modeset)
+ 		return true;
+ 
++	if (amdgpu_in_reset(adev) && state->allow_modeset)
++		return true;
++
+ 	/* Exit early if we know that we're adding or removing the plane. */
+ 	if (old_plane_state->crtc != new_plane_state->crtc)
+ 		return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+index cb187604744e96..e3e4f40bd41238 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+@@ -2,6 +2,7 @@
+ //
+ // Copyright 2024 Advanced Micro Devices, Inc.
+ 
++#include <linux/vmalloc.h>
+ 
+ #include "dml2_internal_types.h"
+ #include "dml_top.h"
+@@ -13,11 +14,11 @@
+ 
+ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
+ {
+-	*dml_ctx = (struct dml2_context *)kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
++	*dml_ctx = vzalloc(sizeof(struct dml2_context));
+ 	if (!(*dml_ctx))
+ 		return false;
+ 
+-	(*dml_ctx)->v21.dml_init.dml2_instance = (struct dml2_instance *)kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
++	(*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance));
+ 	if (!((*dml_ctx)->v21.dml_init.dml2_instance))
+ 		return false;
+ 
+@@ -27,7 +28,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
+ 	(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
+ 	(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
+ 
+-	(*dml_ctx)->v21.mode_programming.programming = (struct dml2_display_cfg_programming *)kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
++	(*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming));
+ 	if (!((*dml_ctx)->v21.mode_programming.programming))
+ 		return false;
+ 
+@@ -116,8 +117,8 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
+ 
+ void dml21_destroy(struct dml2_context *dml2)
+ {
+-	kfree(dml2->v21.dml_init.dml2_instance);
+-	kfree(dml2->v21.mode_programming.programming);
++	vfree(dml2->v21.dml_init.dml2_instance);
++	vfree(dml2->v21.mode_programming.programming);
+ }
+ 
+ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+index cb2cb89dfecb2c..03812f862b3d68 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+@@ -24,6 +24,8 @@
+  *
+  */
+ 
++#include <linux/vmalloc.h>
++
+ #include "display_mode_core.h"
+ #include "dml2_internal_types.h"
+ #include "dml2_utils.h"
+@@ -749,7 +751,7 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
+ 
+ static inline struct dml2_context *dml2_allocate_memory(void)
+ {
+-	return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
++	return (struct dml2_context *) vzalloc(sizeof(struct dml2_context));
+ }
+ 
+ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+@@ -820,7 +822,7 @@ void dml2_destroy(struct dml2_context *dml2)
+ 
+ 	if (dml2->architecture == dml2_architecture_21)
+ 		dml21_destroy(dml2);
+-	kfree(dml2);
++	vfree(dml2);
+ }
+ 
+ void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
+diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+index 7d68a8acfe2ea4..eb0f8373258c34 100644
+--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
++++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+@@ -129,11 +129,11 @@ static int jadard_unprepare(struct drm_panel *panel)
+ {
+ 	struct jadard *jadard = panel_to_jadard(panel);
+ 
+-	gpiod_set_value(jadard->reset, 1);
++	gpiod_set_value(jadard->reset, 0);
+ 	msleep(120);
+ 
+ 	if (jadard->desc->reset_before_power_off_vcioo) {
+-		gpiod_set_value(jadard->reset, 0);
++		gpiod_set_value(jadard->reset, 1);
+ 
+ 		usleep_range(1000, 2000);
+ 	}
+diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
+index 264d6e116499ce..93fa2708ee3786 100644
+--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
++++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
+@@ -29,8 +29,10 @@
+ 13011645652	GRAPHICS_VERSION(2004)
+ 14022293748	GRAPHICS_VERSION(2001)
+ 		GRAPHICS_VERSION(2004)
++		GRAPHICS_VERSION_RANGE(3000, 3001)
+ 22019794406	GRAPHICS_VERSION(2001)
+ 		GRAPHICS_VERSION(2004)
++		GRAPHICS_VERSION_RANGE(3000, 3001)
+ 22019338487	MEDIA_VERSION(2000)
+ 		GRAPHICS_VERSION(2001)
+ 		MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 42102baabcddad..7911814ad82ac6 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -158,6 +158,10 @@ struct svc_i3c_regs_save {
+ 	u32 mdynaddr;
+ };
+ 
++struct svc_i3c_drvdata {
++	u32 quirks;
++};
++
+ /**
+  * struct svc_i3c_master - Silvaco I3C Master structure
+  * @base: I3C master controller
+@@ -183,6 +187,7 @@ struct svc_i3c_regs_save {
+  * @ibi.tbq_slot: To be queued IBI slot
+  * @ibi.lock: IBI lock
+  * @lock: Transfer lock, protect between IBI work thread and callbacks from master
++ * @drvdata: Driver data
+  * @enabled_events: Bit masks for enable events (IBI, HotJoin).
+  * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
+  */
+@@ -214,6 +219,7 @@ struct svc_i3c_master {
+ 		spinlock_t lock;
+ 	} ibi;
+ 	struct mutex lock;
++	const struct svc_i3c_drvdata *drvdata;
+ 	u32 enabled_events;
+ 	u32 mctrl_config;
+ };
+@@ -1768,6 +1774,10 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
+ 	if (!master)
+ 		return -ENOMEM;
+ 
++	master->drvdata = of_device_get_match_data(dev);
++	if (!master->drvdata)
++		return -EINVAL;
++
+ 	master->regs = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(master->regs))
+ 		return PTR_ERR(master->regs);
+@@ -1909,8 +1919,13 @@ static const struct dev_pm_ops svc_i3c_pm_ops = {
+ 			   svc_i3c_runtime_resume, NULL)
+ };
+ 
++static const struct svc_i3c_drvdata npcm845_drvdata = {};
++
++static const struct svc_i3c_drvdata svc_default_drvdata = {};
++
+ static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
+-	{ .compatible = "silvaco,i3c-master-v1"},
++	{ .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
++	{ .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
+ 	{ /* sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
+diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
+index 6f8816483f1a02..157a0df97f971b 100644
+--- a/drivers/iio/adc/ad7768-1.c
++++ b/drivers/iio/adc/ad7768-1.c
+@@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = {
+ 		.channel = 0,
+ 		.scan_index = 0,
+ 		.scan_type = {
+-			.sign = 'u',
++			.sign = 's',
+ 			.realbits = 24,
+ 			.storagebits = 32,
+ 			.shift = 8,
+@@ -370,12 +370,11 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
+ 			return ret;
+ 
+ 		ret = ad7768_scan_direct(indio_dev);
+-		if (ret >= 0)
+-			*val = ret;
+ 
+ 		iio_device_release_direct_mode(indio_dev);
+ 		if (ret < 0)
+ 			return ret;
++		*val = sign_extend32(ret, chan->scan_type.realbits - 1);
+ 
+ 		return IIO_VAL_INT;
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
+index b27791029fa934..b9f4a2937c3acc 100644
+--- a/drivers/infiniband/hw/qib/qib_fs.c
++++ b/drivers/infiniband/hw/qib/qib_fs.c
+@@ -55,6 +55,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
+ 	struct inode *inode = new_inode(dir->i_sb);
+ 
+ 	if (!inode) {
++		dput(dentry);
+ 		error = -EPERM;
+ 		goto bail;
+ 	}
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index a24a97a2c6469b..f61e48f2373249 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -3660,7 +3660,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+ 	 * we should not modify the IRTE
+ 	 */
+ 	if (!dev_data || !dev_data->use_vapic)
+-		return 0;
++		return -EINVAL;
+ 
+ 	ir_data->cfg = irqd_cfg(data);
+ 	pi_data->ir_data = ir_data;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 83c8e617a2c588..cac3dce111689c 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -503,6 +503,9 @@ static void iommu_deinit_device(struct device *dev)
+ 	dev->iommu_group = NULL;
+ 	module_put(ops->owner);
+ 	dev_iommu_free(dev);
++#ifdef CONFIG_IOMMU_DMA
++	dev->dma_iommu = false;
++#endif
+ }
+ 
+ DEFINE_MUTEX(iommu_probe_device_lock);
+@@ -3112,6 +3115,11 @@ int iommu_device_use_default_domain(struct device *dev)
+ 		return 0;
+ 
+ 	mutex_lock(&group->mutex);
++	/* We may race against bus_iommu_probe() finalising groups here */
++	if (!group->default_domain) {
++		ret = -EPROBE_DEFER;
++		goto unlock_out;
++	}
+ 	if (group->owner_cnt) {
+ 		if (group->domain != group->default_domain || group->owner ||
+ 		    !xa_empty(&group->pasid_array)) {
+diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
+index be35c5349986aa..a1e370d0200f15 100644
+--- a/drivers/irqchip/irq-gic-v2m.c
++++ b/drivers/irqchip/irq-gic-v2m.c
+@@ -423,7 +423,7 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
+ #ifdef CONFIG_ACPI
+ static int acpi_num_msi;
+ 
+-static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
++static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
+ {
+ 	struct v2m_data *data;
+ 
+diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
+index 82102a4c5d6883..f8215a8f656a46 100644
+--- a/drivers/mailbox/pcc.c
++++ b/drivers/mailbox/pcc.c
+@@ -313,6 +313,10 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
+ 	int ret;
+ 
+ 	pchan = chan->con_priv;
++
++	if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
++		return IRQ_NONE;
++
+ 	if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
+ 	    !pchan->chan_in_use)
+ 		return IRQ_NONE;
+@@ -330,13 +334,16 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
+ 		return IRQ_NONE;
+ 	}
+ 
+-	if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+-		return IRQ_NONE;
+-
++	/*
++	 * Clear this flag after updating interrupt ack register and just
++	 * before mbox_chan_received_data() which might call pcc_send_data()
++	 * where the flag is set again to start new transfer. This is
++	 * required to avoid any possible race in updatation of this flag.
++	 */
++	pchan->chan_in_use = false;
+ 	mbox_chan_received_data(chan, NULL);
+ 
+ 	check_and_ack(pchan, chan);
+-	pchan->chan_in_use = false;
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index a5f8ab9a09103d..7acff8e02eb4d4 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -96,7 +96,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 
+ 	ret = mcb_device_register(bus, mdev);
+ 	if (ret < 0)
+-		goto err;
++		return ret;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 8a994a1975ca7b..6b6cd753d61a9a 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2156,14 +2156,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+ 				if (!rdev_set_badblocks(rdev, sect, s, 0))
+ 					abort = 1;
+ 			}
+-			if (abort) {
+-				conf->recovery_disabled =
+-					mddev->recovery_disabled;
+-				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+-				md_done_sync(mddev, r1_bio->sectors, 0);
+-				put_buf(r1_bio);
++			if (abort)
+ 				return 0;
+-			}
++
+ 			/* Try next page */
+ 			sectors -= s;
+ 			sect += s;
+@@ -2302,10 +2297,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
+ 	int disks = conf->raid_disks * 2;
+ 	struct bio *wbio;
+ 
+-	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
+-		/* ouch - failed to read all of that. */
+-		if (!fix_sync_read_error(r1_bio))
++	if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
++		/*
++		 * ouch - failed to read all of that.
++		 * No need to fix read error for check/repair
++		 * because all member disks are read.
++		 */
++		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
++		    !fix_sync_read_error(r1_bio)) {
++			conf->recovery_disabled = mddev->recovery_disabled;
++			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
++			md_done_sync(mddev, r1_bio->sectors, 0);
++			put_buf(r1_bio);
+ 			return;
++		}
++	}
+ 
+ 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ 		process_checks(r1_bio);
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 8ba096b8ebca24..85ecb2aeefdbff 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -140,6 +140,7 @@ config VIDEO_IMX214
+ 	tristate "Sony IMX214 sensor support"
+ 	depends on GPIOLIB
+ 	select REGMAP_I2C
++	select V4L2_CCI_I2C
+ 	help
+ 	  This is a Video4Linux2 sensor driver for the Sony
+ 	  IMX214 camera.
+diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
+index 6a393e18267f42..ea5e294327e7be 100644
+--- a/drivers/media/i2c/imx214.c
++++ b/drivers/media/i2c/imx214.c
+@@ -15,26 +15,152 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <media/media-entity.h>
++#include <media/v4l2-cci.h>
+ #include <media/v4l2-ctrls.h>
+ #include <media/v4l2-fwnode.h>
+ #include <media/v4l2-subdev.h>
+ 
+-#define IMX214_REG_MODE_SELECT		0x0100
++#define IMX214_REG_MODE_SELECT		CCI_REG8(0x0100)
+ #define IMX214_MODE_STANDBY		0x00
+ #define IMX214_MODE_STREAMING		0x01
+ 
++#define IMX214_REG_FAST_STANDBY_CTRL	CCI_REG8(0x0106)
++
+ #define IMX214_DEFAULT_CLK_FREQ	24000000
+-#define IMX214_DEFAULT_LINK_FREQ 480000000
++#define IMX214_DEFAULT_LINK_FREQ	600000000
++/* Keep wrong link frequency for backward compatibility */
++#define IMX214_DEFAULT_LINK_FREQ_LEGACY	480000000
+ #define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
+ #define IMX214_FPS 30
+ #define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+ 
++/* V-TIMING internal */
++#define IMX214_REG_FRM_LENGTH_LINES	CCI_REG16(0x0340)
++
+ /* Exposure control */
+-#define IMX214_REG_EXPOSURE		0x0202
++#define IMX214_REG_EXPOSURE		CCI_REG16(0x0202)
+ #define IMX214_EXPOSURE_MIN		0
+ #define IMX214_EXPOSURE_MAX		3184
+ #define IMX214_EXPOSURE_STEP		1
+ #define IMX214_EXPOSURE_DEFAULT		3184
++#define IMX214_REG_EXPOSURE_RATIO	CCI_REG8(0x0222)
++#define IMX214_REG_SHORT_EXPOSURE	CCI_REG16(0x0224)
++
++/* Analog gain control */
++#define IMX214_REG_ANALOG_GAIN		CCI_REG16(0x0204)
++#define IMX214_REG_SHORT_ANALOG_GAIN	CCI_REG16(0x0216)
++
++/* Digital gain control */
++#define IMX214_REG_DIG_GAIN_GREENR	CCI_REG16(0x020e)
++#define IMX214_REG_DIG_GAIN_RED		CCI_REG16(0x0210)
++#define IMX214_REG_DIG_GAIN_BLUE	CCI_REG16(0x0212)
++#define IMX214_REG_DIG_GAIN_GREENB	CCI_REG16(0x0214)
++
++#define IMX214_REG_ORIENTATION		CCI_REG8(0x0101)
++
++#define IMX214_REG_MASK_CORR_FRAMES	CCI_REG8(0x0105)
++#define IMX214_CORR_FRAMES_TRANSMIT	0
++#define IMX214_CORR_FRAMES_MASK		1
++
++#define IMX214_REG_CSI_DATA_FORMAT	CCI_REG16(0x0112)
++#define IMX214_CSI_DATA_FORMAT_RAW8	0x0808
++#define IMX214_CSI_DATA_FORMAT_RAW10	0x0A0A
++#define IMX214_CSI_DATA_FORMAT_COMP6	0x0A06
++#define IMX214_CSI_DATA_FORMAT_COMP8	0x0A08
++
++#define IMX214_REG_CSI_LANE_MODE	CCI_REG8(0x0114)
++#define IMX214_CSI_2_LANE_MODE		1
++#define IMX214_CSI_4_LANE_MODE		3
++
++#define IMX214_REG_EXCK_FREQ		CCI_REG16(0x0136)
++#define IMX214_EXCK_FREQ(n)		((n) * 256)	/* n expressed in MHz */
++
++#define IMX214_REG_TEMP_SENSOR_CONTROL	CCI_REG8(0x0138)
++
++#define IMX214_REG_HDR_MODE		CCI_REG8(0x0220)
++#define IMX214_HDR_MODE_OFF		0
++#define IMX214_HDR_MODE_ON		1
++
++#define IMX214_REG_HDR_RES_REDUCTION	CCI_REG8(0x0221)
++#define IMX214_HDR_RES_REDU_THROUGH	0x11
++#define IMX214_HDR_RES_REDU_2_BINNING	0x22
++
++/* PLL settings */
++#define IMX214_REG_VTPXCK_DIV		CCI_REG8(0x0301)
++#define IMX214_REG_VTSYCK_DIV		CCI_REG8(0x0303)
++#define IMX214_REG_PREPLLCK_VT_DIV	CCI_REG8(0x0305)
++#define IMX214_REG_PLL_VT_MPY		CCI_REG16(0x0306)
++#define IMX214_REG_OPPXCK_DIV		CCI_REG8(0x0309)
++#define IMX214_REG_OPSYCK_DIV		CCI_REG8(0x030b)
++#define IMX214_REG_PLL_MULT_DRIV	CCI_REG8(0x0310)
++#define IMX214_PLL_SINGLE		0
++#define IMX214_PLL_DUAL			1
++
++#define IMX214_REG_LINE_LENGTH_PCK	CCI_REG16(0x0342)
++#define IMX214_REG_X_ADD_STA		CCI_REG16(0x0344)
++#define IMX214_REG_Y_ADD_STA		CCI_REG16(0x0346)
++#define IMX214_REG_X_ADD_END		CCI_REG16(0x0348)
++#define IMX214_REG_Y_ADD_END		CCI_REG16(0x034a)
++#define IMX214_REG_X_OUTPUT_SIZE	CCI_REG16(0x034c)
++#define IMX214_REG_Y_OUTPUT_SIZE	CCI_REG16(0x034e)
++#define IMX214_REG_X_EVEN_INC		CCI_REG8(0x0381)
++#define IMX214_REG_X_ODD_INC		CCI_REG8(0x0383)
++#define IMX214_REG_Y_EVEN_INC		CCI_REG8(0x0385)
++#define IMX214_REG_Y_ODD_INC		CCI_REG8(0x0387)
++
++#define IMX214_REG_SCALE_MODE		CCI_REG8(0x0401)
++#define IMX214_SCALE_NONE		0
++#define IMX214_SCALE_HORIZONTAL		1
++#define IMX214_SCALE_FULL		2
++#define IMX214_REG_SCALE_M		CCI_REG16(0x0404)
++
++#define IMX214_REG_DIG_CROP_X_OFFSET	CCI_REG16(0x0408)
++#define IMX214_REG_DIG_CROP_Y_OFFSET	CCI_REG16(0x040a)
++#define IMX214_REG_DIG_CROP_WIDTH	CCI_REG16(0x040c)
++#define IMX214_REG_DIG_CROP_HEIGHT	CCI_REG16(0x040e)
++
++#define IMX214_REG_REQ_LINK_BIT_RATE	CCI_REG32(0x0820)
++#define IMX214_LINK_BIT_RATE_MBPS(n)	((n) << 16)
++
++/* Binning mode */
++#define IMX214_REG_BINNING_MODE		CCI_REG8(0x0900)
++#define IMX214_BINNING_NONE		0
++#define IMX214_BINNING_ENABLE		1
++#define IMX214_REG_BINNING_TYPE		CCI_REG8(0x0901)
++#define IMX214_REG_BINNING_WEIGHTING	CCI_REG8(0x0902)
++#define IMX214_BINNING_AVERAGE		0x00
++#define IMX214_BINNING_SUMMED		0x01
++#define IMX214_BINNING_BAYER		0x02
++
++#define IMX214_REG_SING_DEF_CORR_EN	CCI_REG8(0x0b06)
++#define IMX214_SING_DEF_CORR_OFF	0
++#define IMX214_SING_DEF_CORR_ON		1
++
++/* AWB control */
++#define IMX214_REG_ABS_GAIN_GREENR	CCI_REG16(0x0b8e)
++#define IMX214_REG_ABS_GAIN_RED		CCI_REG16(0x0b90)
++#define IMX214_REG_ABS_GAIN_BLUE	CCI_REG16(0x0b92)
++#define IMX214_REG_ABS_GAIN_GREENB	CCI_REG16(0x0b94)
++
++#define IMX214_REG_RMSC_NR_MODE		CCI_REG8(0x3001)
++#define IMX214_REG_STATS_OUT_EN		CCI_REG8(0x3013)
++#define IMX214_STATS_OUT_OFF		0
++#define IMX214_STATS_OUT_ON		1
++
++/* Chroma noise reduction */
++#define IMX214_REG_NML_NR_EN		CCI_REG8(0x30a2)
++#define IMX214_NML_NR_OFF		0
++#define IMX214_NML_NR_ON		1
++
++#define IMX214_REG_EBD_SIZE_V		CCI_REG8(0x5041)
++#define IMX214_EBD_NO			0
++#define IMX214_EBD_4_LINE		4
++
++#define IMX214_REG_RG_STATS_LMT		CCI_REG16(0x6d12)
++#define IMX214_RG_STATS_LMT_10_BIT	0x03FF
++#define IMX214_RG_STATS_LMT_14_BIT	0x3FFF
++
++#define IMX214_REG_ATR_FAST_MOVE	CCI_REG8(0x9300)
+ 
+ /* IMX214 native and active pixel array size */
+ #define IMX214_NATIVE_WIDTH		4224U
+@@ -59,8 +185,6 @@ struct imx214 {
+ 
+ 	struct v4l2_subdev sd;
+ 	struct media_pad pad;
+-	struct v4l2_mbus_framefmt fmt;
+-	struct v4l2_rect crop;
+ 
+ 	struct v4l2_ctrl_handler ctrls;
+ 	struct v4l2_ctrl *pixel_rate;
+@@ -71,353 +195,266 @@ struct imx214 {
+ 	struct regulator_bulk_data	supplies[IMX214_NUM_SUPPLIES];
+ 
+ 	struct gpio_desc *enable_gpio;
+-
+-	/*
+-	 * Serialize control access, get/set format, get selection
+-	 * and start streaming.
+-	 */
+-	struct mutex mutex;
+-};
+-
+-struct reg_8 {
+-	u16 addr;
+-	u8 val;
+-};
+-
+-enum {
+-	IMX214_TABLE_WAIT_MS = 0,
+-	IMX214_TABLE_END,
+-	IMX214_MAX_RETRIES,
+-	IMX214_WAIT_MS
+ };
+ 
+ /*From imx214_mode_tbls.h*/
+-static const struct reg_8 mode_4096x2304[] = {
+-	{0x0114, 0x03},
+-	{0x0220, 0x00},
+-	{0x0221, 0x11},
+-	{0x0222, 0x01},
+-	{0x0340, 0x0C},
+-	{0x0341, 0x7A},
+-	{0x0342, 0x13},
+-	{0x0343, 0x90},
+-	{0x0344, 0x00},
+-	{0x0345, 0x38},
+-	{0x0346, 0x01},
+-	{0x0347, 0x98},
+-	{0x0348, 0x10},
+-	{0x0349, 0x37},
+-	{0x034A, 0x0A},
+-	{0x034B, 0x97},
+-	{0x0381, 0x01},
+-	{0x0383, 0x01},
+-	{0x0385, 0x01},
+-	{0x0387, 0x01},
+-	{0x0900, 0x00},
+-	{0x0901, 0x00},
+-	{0x0902, 0x00},
+-	{0x3000, 0x35},
+-	{0x3054, 0x01},
+-	{0x305C, 0x11},
+-
+-	{0x0112, 0x0A},
+-	{0x0113, 0x0A},
+-	{0x034C, 0x10},
+-	{0x034D, 0x00},
+-	{0x034E, 0x09},
+-	{0x034F, 0x00},
+-	{0x0401, 0x00},
+-	{0x0404, 0x00},
+-	{0x0405, 0x10},
+-	{0x0408, 0x00},
+-	{0x0409, 0x00},
+-	{0x040A, 0x00},
+-	{0x040B, 0x00},
+-	{0x040C, 0x10},
+-	{0x040D, 0x00},
+-	{0x040E, 0x09},
+-	{0x040F, 0x00},
+-
+-	{0x0301, 0x05},
+-	{0x0303, 0x02},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x96},
+-	{0x0309, 0x0A},
+-	{0x030B, 0x01},
+-	{0x0310, 0x00},
+-
+-	{0x0820, 0x12},
+-	{0x0821, 0xC0},
+-	{0x0822, 0x00},
+-	{0x0823, 0x00},
+-
+-	{0x3A03, 0x09},
+-	{0x3A04, 0x50},
+-	{0x3A05, 0x01},
+-
+-	{0x0B06, 0x01},
+-	{0x30A2, 0x00},
+-
+-	{0x30B4, 0x00},
+-
+-	{0x3A02, 0xFF},
+-
+-	{0x3011, 0x00},
+-	{0x3013, 0x01},
+-
+-	{0x0202, 0x0C},
+-	{0x0203, 0x70},
+-	{0x0224, 0x01},
+-	{0x0225, 0xF4},
+-
+-	{0x0204, 0x00},
+-	{0x0205, 0x00},
+-	{0x020E, 0x01},
+-	{0x020F, 0x00},
+-	{0x0210, 0x01},
+-	{0x0211, 0x00},
+-	{0x0212, 0x01},
+-	{0x0213, 0x00},
+-	{0x0214, 0x01},
+-	{0x0215, 0x00},
+-	{0x0216, 0x00},
+-	{0x0217, 0x00},
+-
+-	{0x4170, 0x00},
+-	{0x4171, 0x10},
+-	{0x4176, 0x00},
+-	{0x4177, 0x3C},
+-	{0xAE20, 0x04},
+-	{0xAE21, 0x5C},
+-
+-	{IMX214_TABLE_WAIT_MS, 10},
+-	{0x0138, 0x01},
+-	{IMX214_TABLE_END, 0x00}
++static const struct cci_reg_sequence mode_4096x2304[] = {
++	{ IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
++	{ IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
++	{ IMX214_REG_EXPOSURE_RATIO, 1 },
++	{ IMX214_REG_FRM_LENGTH_LINES, 3194 },
++	{ IMX214_REG_LINE_LENGTH_PCK, 5008 },
++	{ IMX214_REG_X_ADD_STA, 56 },
++	{ IMX214_REG_Y_ADD_STA, 408 },
++	{ IMX214_REG_X_ADD_END, 4151 },
++	{ IMX214_REG_Y_ADD_END, 2711 },
++	{ IMX214_REG_X_EVEN_INC, 1 },
++	{ IMX214_REG_X_ODD_INC, 1 },
++	{ IMX214_REG_Y_EVEN_INC, 1 },
++	{ IMX214_REG_Y_ODD_INC, 1 },
++	{ IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
++	{ IMX214_REG_BINNING_TYPE, 0 },
++	{ IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
++	{ CCI_REG8(0x3000), 0x35 },
++	{ CCI_REG8(0x3054), 0x01 },
++	{ CCI_REG8(0x305C), 0x11 },
++
++	{ IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
++	{ IMX214_REG_X_OUTPUT_SIZE, 4096 },
++	{ IMX214_REG_Y_OUTPUT_SIZE, 2304 },
++	{ IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
++	{ IMX214_REG_SCALE_M, 2 },
++	{ IMX214_REG_DIG_CROP_X_OFFSET, 0 },
++	{ IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
++	{ IMX214_REG_DIG_CROP_WIDTH, 4096 },
++	{ IMX214_REG_DIG_CROP_HEIGHT, 2304 },
++
++	{ IMX214_REG_VTPXCK_DIV, 5 },
++	{ IMX214_REG_VTSYCK_DIV, 2 },
++	{ IMX214_REG_PREPLLCK_VT_DIV, 3 },
++	{ IMX214_REG_PLL_VT_MPY, 150 },
++	{ IMX214_REG_OPPXCK_DIV, 10 },
++	{ IMX214_REG_OPSYCK_DIV, 1 },
++	{ IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
++
++	{ IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
++
++	{ CCI_REG8(0x3A03), 0x09 },
++	{ CCI_REG8(0x3A04), 0x50 },
++	{ CCI_REG8(0x3A05), 0x01 },
++
++	{ IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
++	{ IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
++
++	{ CCI_REG8(0x30B4), 0x00 },
++
++	{ CCI_REG8(0x3A02), 0xFF },
++
++	{ CCI_REG8(0x3011), 0x00 },
++	{ IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
++
++	{ IMX214_REG_EXPOSURE, IMX214_EXPOSURE_DEFAULT },
++	{ IMX214_REG_SHORT_EXPOSURE, 500 },
++
++	{ IMX214_REG_ANALOG_GAIN, 0 },
++	{ IMX214_REG_DIG_GAIN_GREENR, 256 },
++	{ IMX214_REG_DIG_GAIN_RED, 256 },
++	{ IMX214_REG_DIG_GAIN_BLUE, 256 },
++	{ IMX214_REG_DIG_GAIN_GREENB, 256 },
++	{ IMX214_REG_SHORT_ANALOG_GAIN, 0 },
++
++	{ CCI_REG8(0x4170), 0x00 },
++	{ CCI_REG8(0x4171), 0x10 },
++	{ CCI_REG8(0x4176), 0x00 },
++	{ CCI_REG8(0x4177), 0x3C },
++	{ CCI_REG8(0xAE20), 0x04 },
++	{ CCI_REG8(0xAE21), 0x5C },
+ };
+ 
+-static const struct reg_8 mode_1920x1080[] = {
+-	{0x0114, 0x03},
+-	{0x0220, 0x00},
+-	{0x0221, 0x11},
+-	{0x0222, 0x01},
+-	{0x0340, 0x0C},
+-	{0x0341, 0x7A},
+-	{0x0342, 0x13},
+-	{0x0343, 0x90},
+-	{0x0344, 0x04},
+-	{0x0345, 0x78},
+-	{0x0346, 0x03},
+-	{0x0347, 0xFC},
+-	{0x0348, 0x0B},
+-	{0x0349, 0xF7},
+-	{0x034A, 0x08},
+-	{0x034B, 0x33},
+-	{0x0381, 0x01},
+-	{0x0383, 0x01},
+-	{0x0385, 0x01},
+-	{0x0387, 0x01},
+-	{0x0900, 0x00},
+-	{0x0901, 0x00},
+-	{0x0902, 0x00},
+-	{0x3000, 0x35},
+-	{0x3054, 0x01},
+-	{0x305C, 0x11},
+-
+-	{0x0112, 0x0A},
+-	{0x0113, 0x0A},
+-	{0x034C, 0x07},
+-	{0x034D, 0x80},
+-	{0x034E, 0x04},
+-	{0x034F, 0x38},
+-	{0x0401, 0x00},
+-	{0x0404, 0x00},
+-	{0x0405, 0x10},
+-	{0x0408, 0x00},
+-	{0x0409, 0x00},
+-	{0x040A, 0x00},
+-	{0x040B, 0x00},
+-	{0x040C, 0x07},
+-	{0x040D, 0x80},
+-	{0x040E, 0x04},
+-	{0x040F, 0x38},
+-
+-	{0x0301, 0x05},
+-	{0x0303, 0x02},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x96},
+-	{0x0309, 0x0A},
+-	{0x030B, 0x01},
+-	{0x0310, 0x00},
+-
+-	{0x0820, 0x12},
+-	{0x0821, 0xC0},
+-	{0x0822, 0x00},
+-	{0x0823, 0x00},
+-
+-	{0x3A03, 0x04},
+-	{0x3A04, 0xF8},
+-	{0x3A05, 0x02},
+-
+-	{0x0B06, 0x01},
+-	{0x30A2, 0x00},
+-
+-	{0x30B4, 0x00},
+-
+-	{0x3A02, 0xFF},
+-
+-	{0x3011, 0x00},
+-	{0x3013, 0x01},
+-
+-	{0x0202, 0x0C},
+-	{0x0203, 0x70},
+-	{0x0224, 0x01},
+-	{0x0225, 0xF4},
+-
+-	{0x0204, 0x00},
+-	{0x0205, 0x00},
+-	{0x020E, 0x01},
+-	{0x020F, 0x00},
+-	{0x0210, 0x01},
+-	{0x0211, 0x00},
+-	{0x0212, 0x01},
+-	{0x0213, 0x00},
+-	{0x0214, 0x01},
+-	{0x0215, 0x00},
+-	{0x0216, 0x00},
+-	{0x0217, 0x00},
+-
+-	{0x4170, 0x00},
+-	{0x4171, 0x10},
+-	{0x4176, 0x00},
+-	{0x4177, 0x3C},
+-	{0xAE20, 0x04},
+-	{0xAE21, 0x5C},
+-
+-	{IMX214_TABLE_WAIT_MS, 10},
+-	{0x0138, 0x01},
+-	{IMX214_TABLE_END, 0x00}
++static const struct cci_reg_sequence mode_1920x1080[] = {
++	{ IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
++	{ IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
++	{ IMX214_REG_EXPOSURE_RATIO, 1 },
++	{ IMX214_REG_FRM_LENGTH_LINES, 3194 },
++	{ IMX214_REG_LINE_LENGTH_PCK, 5008 },
++	{ IMX214_REG_X_ADD_STA, 1144 },
++	{ IMX214_REG_Y_ADD_STA, 1020 },
++	{ IMX214_REG_X_ADD_END, 3063 },
++	{ IMX214_REG_Y_ADD_END, 2099 },
++	{ IMX214_REG_X_EVEN_INC, 1 },
++	{ IMX214_REG_X_ODD_INC, 1 },
++	{ IMX214_REG_Y_EVEN_INC, 1 },
++	{ IMX214_REG_Y_ODD_INC, 1 },
++	{ IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
++	{ IMX214_REG_BINNING_TYPE, 0 },
++	{ IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
++	{ CCI_REG8(0x3000), 0x35 },
++	{ CCI_REG8(0x3054), 0x01 },
++	{ CCI_REG8(0x305C), 0x11 },
++
++	{ IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
++	{ IMX214_REG_X_OUTPUT_SIZE, 1920 },
++	{ IMX214_REG_Y_OUTPUT_SIZE, 1080 },
++	{ IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
++	{ IMX214_REG_SCALE_M, 2 },
++	{ IMX214_REG_DIG_CROP_X_OFFSET, 0 },
++	{ IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
++	{ IMX214_REG_DIG_CROP_WIDTH, 1920 },
++	{ IMX214_REG_DIG_CROP_HEIGHT, 1080 },
++
++	{ IMX214_REG_VTPXCK_DIV, 5 },
++	{ IMX214_REG_VTSYCK_DIV, 2 },
++	{ IMX214_REG_PREPLLCK_VT_DIV, 3 },
++	{ IMX214_REG_PLL_VT_MPY, 150 },
++	{ IMX214_REG_OPPXCK_DIV, 10 },
++	{ IMX214_REG_OPSYCK_DIV, 1 },
++	{ IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
++
++	{ IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
++
++	{ CCI_REG8(0x3A03), 0x04 },
++	{ CCI_REG8(0x3A04), 0xF8 },
++	{ CCI_REG8(0x3A05), 0x02 },
++
++	{ IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
++	{ IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
++
++	{ CCI_REG8(0x30B4), 0x00 },
++
++	{ CCI_REG8(0x3A02), 0xFF },
++
++	{ CCI_REG8(0x3011), 0x00 },
++	{ IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
++
++	{ IMX214_REG_EXPOSURE, IMX214_EXPOSURE_DEFAULT },
++	{ IMX214_REG_SHORT_EXPOSURE, 500 },
++
++	{ IMX214_REG_ANALOG_GAIN, 0 },
++	{ IMX214_REG_DIG_GAIN_GREENR, 256 },
++	{ IMX214_REG_DIG_GAIN_RED, 256 },
++	{ IMX214_REG_DIG_GAIN_BLUE, 256 },
++	{ IMX214_REG_DIG_GAIN_GREENB, 256 },
++	{ IMX214_REG_SHORT_ANALOG_GAIN, 0 },
++
++	{ CCI_REG8(0x4170), 0x00 },
++	{ CCI_REG8(0x4171), 0x10 },
++	{ CCI_REG8(0x4176), 0x00 },
++	{ CCI_REG8(0x4177), 0x3C },
++	{ CCI_REG8(0xAE20), 0x04 },
++	{ CCI_REG8(0xAE21), 0x5C },
+ };
+ 
+-static const struct reg_8 mode_table_common[] = {
++static const struct cci_reg_sequence mode_table_common[] = {
+ 	/* software reset */
+ 
+ 	/* software standby settings */
+-	{0x0100, 0x00},
++	{ IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY },
+ 
+ 	/* ATR setting */
+-	{0x9300, 0x02},
++	{ IMX214_REG_ATR_FAST_MOVE, 2 },
+ 
+ 	/* external clock setting */
+-	{0x0136, 0x18},
+-	{0x0137, 0x00},
++	{ IMX214_REG_EXCK_FREQ, IMX214_EXCK_FREQ(IMX214_DEFAULT_CLK_FREQ / 1000000) },
+ 
+ 	/* global setting */
+ 	/* basic config */
+-	{0x0101, 0x00},
+-	{0x0105, 0x01},
+-	{0x0106, 0x01},
+-	{0x4550, 0x02},
+-	{0x4601, 0x00},
+-	{0x4642, 0x05},
+-	{0x6227, 0x11},
+-	{0x6276, 0x00},
+-	{0x900E, 0x06},
+-	{0xA802, 0x90},
+-	{0xA803, 0x11},
+-	{0xA804, 0x62},
+-	{0xA805, 0x77},
+-	{0xA806, 0xAE},
+-	{0xA807, 0x34},
+-	{0xA808, 0xAE},
+-	{0xA809, 0x35},
+-	{0xA80A, 0x62},
+-	{0xA80B, 0x83},
+-	{0xAE33, 0x00},
++	{ IMX214_REG_ORIENTATION, 0 },
++	{ IMX214_REG_MASK_CORR_FRAMES, IMX214_CORR_FRAMES_MASK },
++	{ IMX214_REG_FAST_STANDBY_CTRL, 1 },
++	{ CCI_REG8(0x4550), 0x02 },
++	{ CCI_REG8(0x4601), 0x00 },
++	{ CCI_REG8(0x4642), 0x05 },
++	{ CCI_REG8(0x6227), 0x11 },
++	{ CCI_REG8(0x6276), 0x00 },
++	{ CCI_REG8(0x900E), 0x06 },
++	{ CCI_REG8(0xA802), 0x90 },
++	{ CCI_REG8(0xA803), 0x11 },
++	{ CCI_REG8(0xA804), 0x62 },
++	{ CCI_REG8(0xA805), 0x77 },
++	{ CCI_REG8(0xA806), 0xAE },
++	{ CCI_REG8(0xA807), 0x34 },
++	{ CCI_REG8(0xA808), 0xAE },
++	{ CCI_REG8(0xA809), 0x35 },
++	{ CCI_REG8(0xA80A), 0x62 },
++	{ CCI_REG8(0xA80B), 0x83 },
++	{ CCI_REG8(0xAE33), 0x00 },
+ 
+ 	/* analog setting */
+-	{0x4174, 0x00},
+-	{0x4175, 0x11},
+-	{0x4612, 0x29},
+-	{0x461B, 0x12},
+-	{0x461F, 0x06},
+-	{0x4635, 0x07},
+-	{0x4637, 0x30},
+-	{0x463F, 0x18},
+-	{0x4641, 0x0D},
+-	{0x465B, 0x12},
+-	{0x465F, 0x11},
+-	{0x4663, 0x11},
+-	{0x4667, 0x0F},
+-	{0x466F, 0x0F},
+-	{0x470E, 0x09},
+-	{0x4909, 0xAB},
+-	{0x490B, 0x95},
+-	{0x4915, 0x5D},
+-	{0x4A5F, 0xFF},
+-	{0x4A61, 0xFF},
+-	{0x4A73, 0x62},
+-	{0x4A85, 0x00},
+-	{0x4A87, 0xFF},
++	{ CCI_REG8(0x4174), 0x00 },
++	{ CCI_REG8(0x4175), 0x11 },
++	{ CCI_REG8(0x4612), 0x29 },
++	{ CCI_REG8(0x461B), 0x12 },
++	{ CCI_REG8(0x461F), 0x06 },
++	{ CCI_REG8(0x4635), 0x07 },
++	{ CCI_REG8(0x4637), 0x30 },
++	{ CCI_REG8(0x463F), 0x18 },
++	{ CCI_REG8(0x4641), 0x0D },
++	{ CCI_REG8(0x465B), 0x12 },
++	{ CCI_REG8(0x465F), 0x11 },
++	{ CCI_REG8(0x4663), 0x11 },
++	{ CCI_REG8(0x4667), 0x0F },
++	{ CCI_REG8(0x466F), 0x0F },
++	{ CCI_REG8(0x470E), 0x09 },
++	{ CCI_REG8(0x4909), 0xAB },
++	{ CCI_REG8(0x490B), 0x95 },
++	{ CCI_REG8(0x4915), 0x5D },
++	{ CCI_REG8(0x4A5F), 0xFF },
++	{ CCI_REG8(0x4A61), 0xFF },
++	{ CCI_REG8(0x4A73), 0x62 },
++	{ CCI_REG8(0x4A85), 0x00 },
++	{ CCI_REG8(0x4A87), 0xFF },
+ 
+ 	/* embedded data */
+-	{0x5041, 0x04},
+-	{0x583C, 0x04},
+-	{0x620E, 0x04},
+-	{0x6EB2, 0x01},
+-	{0x6EB3, 0x00},
+-	{0x9300, 0x02},
++	{ IMX214_REG_EBD_SIZE_V, IMX214_EBD_4_LINE },
++	{ CCI_REG8(0x583C), 0x04 },
++	{ CCI_REG8(0x620E), 0x04 },
++	{ CCI_REG8(0x6EB2), 0x01 },
++	{ CCI_REG8(0x6EB3), 0x00 },
++	{ IMX214_REG_ATR_FAST_MOVE, 2 },
+ 
+ 	/* imagequality */
+ 	/* HDR setting */
+-	{0x3001, 0x07},
+-	{0x6D12, 0x3F},
+-	{0x6D13, 0xFF},
+-	{0x9344, 0x03},
+-	{0x9706, 0x10},
+-	{0x9707, 0x03},
+-	{0x9708, 0x03},
+-	{0x9E04, 0x01},
+-	{0x9E05, 0x00},
+-	{0x9E0C, 0x01},
+-	{0x9E0D, 0x02},
+-	{0x9E24, 0x00},
+-	{0x9E25, 0x8C},
+-	{0x9E26, 0x00},
+-	{0x9E27, 0x94},
+-	{0x9E28, 0x00},
+-	{0x9E29, 0x96},
++	{ IMX214_REG_RMSC_NR_MODE, 0x07 },
++	{ IMX214_REG_RG_STATS_LMT, IMX214_RG_STATS_LMT_14_BIT },
++	{ CCI_REG8(0x9344), 0x03 },
++	{ CCI_REG8(0x9706), 0x10 },
++	{ CCI_REG8(0x9707), 0x03 },
++	{ CCI_REG8(0x9708), 0x03 },
++	{ CCI_REG8(0x9E04), 0x01 },
++	{ CCI_REG8(0x9E05), 0x00 },
++	{ CCI_REG8(0x9E0C), 0x01 },
++	{ CCI_REG8(0x9E0D), 0x02 },
++	{ CCI_REG8(0x9E24), 0x00 },
++	{ CCI_REG8(0x9E25), 0x8C },
++	{ CCI_REG8(0x9E26), 0x00 },
++	{ CCI_REG8(0x9E27), 0x94 },
++	{ CCI_REG8(0x9E28), 0x00 },
++	{ CCI_REG8(0x9E29), 0x96 },
+ 
+ 	/* CNR parameter setting */
+-	{0x69DB, 0x01},
++	{ CCI_REG8(0x69DB), 0x01 },
+ 
+ 	/* Moire reduction */
+-	{0x6957, 0x01},
++	{ CCI_REG8(0x6957), 0x01 },
+ 
+ 	/* image enhancement */
+-	{0x6987, 0x17},
+-	{0x698A, 0x03},
+-	{0x698B, 0x03},
++	{ CCI_REG8(0x6987), 0x17 },
++	{ CCI_REG8(0x698A), 0x03 },
++	{ CCI_REG8(0x698B), 0x03 },
+ 
+ 	/* white balanace */
+-	{0x0B8E, 0x01},
+-	{0x0B8F, 0x00},
+-	{0x0B90, 0x01},
+-	{0x0B91, 0x00},
+-	{0x0B92, 0x01},
+-	{0x0B93, 0x00},
+-	{0x0B94, 0x01},
+-	{0x0B95, 0x00},
++	{ IMX214_REG_ABS_GAIN_GREENR, 0x0100 },
++	{ IMX214_REG_ABS_GAIN_RED, 0x0100 },
++	{ IMX214_REG_ABS_GAIN_BLUE, 0x0100 },
++	{ IMX214_REG_ABS_GAIN_GREENB, 0x0100 },
+ 
+ 	/* ATR setting */
+-	{0x6E50, 0x00},
+-	{0x6E51, 0x32},
+-	{0x9340, 0x00},
+-	{0x9341, 0x3C},
+-	{0x9342, 0x03},
+-	{0x9343, 0xFF},
+-	{IMX214_TABLE_END, 0x00}
++	{ CCI_REG8(0x6E50), 0x00 },
++	{ CCI_REG8(0x6E51), 0x32 },
++	{ CCI_REG8(0x9340), 0x00 },
++	{ CCI_REG8(0x9341), 0x3C },
++	{ CCI_REG8(0x9342), 0x03 },
++	{ CCI_REG8(0x9343), 0xFF },
+ };
+ 
+ /*
+@@ -427,16 +464,19 @@ static const struct reg_8 mode_table_common[] = {
+ static const struct imx214_mode {
+ 	u32 width;
+ 	u32 height;
+-	const struct reg_8 *reg_table;
++	unsigned int num_of_regs;
++	const struct cci_reg_sequence *reg_table;
+ } imx214_modes[] = {
+ 	{
+ 		.width = 4096,
+ 		.height = 2304,
++		.num_of_regs = ARRAY_SIZE(mode_4096x2304),
+ 		.reg_table = mode_4096x2304,
+ 	},
+ 	{
+ 		.width = 1920,
+ 		.height = 1080,
++		.num_of_regs = ARRAY_SIZE(mode_1920x1080),
+ 		.reg_table = mode_1920x1080,
+ 	},
+ };
+@@ -490,6 +530,22 @@ static int __maybe_unused imx214_power_off(struct device *dev)
+ 	return 0;
+ }
+ 
++static void imx214_update_pad_format(struct imx214 *imx214,
++				     const struct imx214_mode *mode,
++				     struct v4l2_mbus_framefmt *fmt, u32 code)
++{
++	fmt->code = IMX214_MBUS_CODE;
++	fmt->width = mode->width;
++	fmt->height = mode->height;
++	fmt->field = V4L2_FIELD_NONE;
++	fmt->colorspace = V4L2_COLORSPACE_SRGB;
++	fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
++	fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
++							  fmt->colorspace,
++							  fmt->ycbcr_enc);
++	fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
++}
++
+ static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
+ 				 struct v4l2_subdev_state *sd_state,
+ 				 struct v4l2_subdev_mbus_code_enum *code)
+@@ -549,52 +605,6 @@ static const struct v4l2_subdev_core_ops imx214_core_ops = {
+ #endif
+ };
+ 
+-static struct v4l2_mbus_framefmt *
+-__imx214_get_pad_format(struct imx214 *imx214,
+-			struct v4l2_subdev_state *sd_state,
+-			unsigned int pad,
+-			enum v4l2_subdev_format_whence which)
+-{
+-	switch (which) {
+-	case V4L2_SUBDEV_FORMAT_TRY:
+-		return v4l2_subdev_state_get_format(sd_state, pad);
+-	case V4L2_SUBDEV_FORMAT_ACTIVE:
+-		return &imx214->fmt;
+-	default:
+-		return NULL;
+-	}
+-}
+-
+-static int imx214_get_format(struct v4l2_subdev *sd,
+-			     struct v4l2_subdev_state *sd_state,
+-			     struct v4l2_subdev_format *format)
+-{
+-	struct imx214 *imx214 = to_imx214(sd);
+-
+-	mutex_lock(&imx214->mutex);
+-	format->format = *__imx214_get_pad_format(imx214, sd_state,
+-						  format->pad,
+-						  format->which);
+-	mutex_unlock(&imx214->mutex);
+-
+-	return 0;
+-}
+-
+-static struct v4l2_rect *
+-__imx214_get_pad_crop(struct imx214 *imx214,
+-		      struct v4l2_subdev_state *sd_state,
+-		      unsigned int pad, enum v4l2_subdev_format_whence which)
+-{
+-	switch (which) {
+-	case V4L2_SUBDEV_FORMAT_TRY:
+-		return v4l2_subdev_state_get_crop(sd_state, pad);
+-	case V4L2_SUBDEV_FORMAT_ACTIVE:
+-		return &imx214->crop;
+-	default:
+-		return NULL;
+-	}
+-}
+-
+ static int imx214_set_format(struct v4l2_subdev *sd,
+ 			     struct v4l2_subdev_state *sd_state,
+ 			     struct v4l2_subdev_format *format)
+@@ -604,34 +614,20 @@ static int imx214_set_format(struct v4l2_subdev *sd,
+ 	struct v4l2_rect *__crop;
+ 	const struct imx214_mode *mode;
+ 
+-	mutex_lock(&imx214->mutex);
+-
+-	__crop = __imx214_get_pad_crop(imx214, sd_state, format->pad,
+-				       format->which);
+-
+ 	mode = v4l2_find_nearest_size(imx214_modes,
+ 				      ARRAY_SIZE(imx214_modes), width, height,
+ 				      format->format.width,
+ 				      format->format.height);
+ 
+-	__crop->width = mode->width;
+-	__crop->height = mode->height;
+-
+-	__format = __imx214_get_pad_format(imx214, sd_state, format->pad,
+-					   format->which);
+-	__format->width = __crop->width;
+-	__format->height = __crop->height;
+-	__format->code = IMX214_MBUS_CODE;
+-	__format->field = V4L2_FIELD_NONE;
+-	__format->colorspace = V4L2_COLORSPACE_SRGB;
+-	__format->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(__format->colorspace);
+-	__format->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+-				__format->colorspace, __format->ycbcr_enc);
+-	__format->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(__format->colorspace);
++	imx214_update_pad_format(imx214, mode, &format->format,
++				 format->format.code);
++	__format = v4l2_subdev_state_get_format(sd_state, 0);
+ 
+-	format->format = *__format;
++	*__format = format->format;
+ 
+-	mutex_unlock(&imx214->mutex);
++	__crop = v4l2_subdev_state_get_crop(sd_state, 0);
++	__crop->width = mode->width;
++	__crop->height = mode->height;
+ 
+ 	return 0;
+ }
+@@ -640,14 +636,9 @@ static int imx214_get_selection(struct v4l2_subdev *sd,
+ 				struct v4l2_subdev_state *sd_state,
+ 				struct v4l2_subdev_selection *sel)
+ {
+-	struct imx214 *imx214 = to_imx214(sd);
+-
+ 	switch (sel->target) {
+ 	case V4L2_SEL_TGT_CROP:
+-		mutex_lock(&imx214->mutex);
+-		sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
+-						sel->which);
+-		mutex_unlock(&imx214->mutex);
++		sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
+ 		return 0;
+ 
+ 	case V4L2_SEL_TGT_NATIVE_SIZE:
+@@ -687,7 +678,6 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
+ {
+ 	struct imx214 *imx214 = container_of(ctrl->handler,
+ 					     struct imx214, ctrls);
+-	u8 vals[2];
+ 	int ret;
+ 
+ 	/*
+@@ -699,12 +689,7 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
+ 
+ 	switch (ctrl->id) {
+ 	case V4L2_CID_EXPOSURE:
+-		vals[1] = ctrl->val;
+-		vals[0] = ctrl->val >> 8;
+-		ret = regmap_bulk_write(imx214->regmap, IMX214_REG_EXPOSURE, vals, 2);
+-		if (ret < 0)
+-			dev_err(imx214->dev, "Error %d\n", ret);
+-		ret = 0;
++		cci_write(imx214->regmap, IMX214_REG_EXPOSURE, ctrl->val, &ret);
+ 		break;
+ 
+ 	default:
+@@ -790,76 +775,52 @@ static int imx214_ctrls_init(struct imx214 *imx214)
+ 	return 0;
+ };
+ 
+-#define MAX_CMD 4
+-static int imx214_write_table(struct imx214 *imx214,
+-			      const struct reg_8 table[])
+-{
+-	u8 vals[MAX_CMD];
+-	int i;
+-	int ret;
+-
+-	for (; table->addr != IMX214_TABLE_END ; table++) {
+-		if (table->addr == IMX214_TABLE_WAIT_MS) {
+-			usleep_range(table->val * 1000,
+-				     table->val * 1000 + 500);
+-			continue;
+-		}
+-
+-		for (i = 0; i < MAX_CMD; i++) {
+-			if (table[i].addr != (table[0].addr + i))
+-				break;
+-			vals[i] = table[i].val;
+-		}
+-
+-		ret = regmap_bulk_write(imx214->regmap, table->addr, vals, i);
+-
+-		if (ret) {
+-			dev_err(imx214->dev, "write_table error: %d\n", ret);
+-			return ret;
+-		}
+-
+-		table += i - 1;
+-	}
+-
+-	return 0;
+-}
+-
+ static int imx214_start_streaming(struct imx214 *imx214)
+ {
++	const struct v4l2_mbus_framefmt *fmt;
++	struct v4l2_subdev_state *state;
+ 	const struct imx214_mode *mode;
+ 	int ret;
+ 
+-	mutex_lock(&imx214->mutex);
+-	ret = imx214_write_table(imx214, mode_table_common);
++	ret = cci_multi_reg_write(imx214->regmap, mode_table_common,
++				  ARRAY_SIZE(mode_table_common), NULL);
+ 	if (ret < 0) {
+ 		dev_err(imx214->dev, "could not sent common table %d\n", ret);
+-		goto error;
++		return ret;
+ 	}
+ 
+-	mode = v4l2_find_nearest_size(imx214_modes,
+-				ARRAY_SIZE(imx214_modes), width, height,
+-				imx214->fmt.width, imx214->fmt.height);
+-	ret = imx214_write_table(imx214, mode->reg_table);
++	ret = cci_write(imx214->regmap, IMX214_REG_CSI_LANE_MODE,
++			IMX214_CSI_4_LANE_MODE, NULL);
++	if (ret) {
++		dev_err(imx214->dev, "failed to configure lanes\n");
++		return ret;
++	}
++
++	state = v4l2_subdev_get_locked_active_state(&imx214->sd);
++	fmt = v4l2_subdev_state_get_format(state, 0);
++	mode = v4l2_find_nearest_size(imx214_modes, ARRAY_SIZE(imx214_modes),
++				      width, height, fmt->width, fmt->height);
++	ret = cci_multi_reg_write(imx214->regmap, mode->reg_table,
++				  mode->num_of_regs, NULL);
+ 	if (ret < 0) {
+ 		dev_err(imx214->dev, "could not sent mode table %d\n", ret);
+-		goto error;
++		return ret;
+ 	}
++
++	usleep_range(10000, 10500);
++
++	cci_write(imx214->regmap, IMX214_REG_TEMP_SENSOR_CONTROL, 0x01, NULL);
++
+ 	ret = __v4l2_ctrl_handler_setup(&imx214->ctrls);
+ 	if (ret < 0) {
+ 		dev_err(imx214->dev, "could not sync v4l2 controls\n");
+-		goto error;
++		return ret;
+ 	}
+-	ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STREAMING);
+-	if (ret < 0) {
++	ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
++			IMX214_MODE_STREAMING, NULL);
++	if (ret < 0)
+ 		dev_err(imx214->dev, "could not sent start table %d\n", ret);
+-		goto error;
+-	}
+ 
+-	mutex_unlock(&imx214->mutex);
+-	return 0;
+-
+-error:
+-	mutex_unlock(&imx214->mutex);
+ 	return ret;
+ }
+ 
+@@ -867,7 +828,8 @@ static int imx214_stop_streaming(struct imx214 *imx214)
+ {
+ 	int ret;
+ 
+-	ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY);
++	ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
++			IMX214_MODE_STANDBY, NULL);
+ 	if (ret < 0)
+ 		dev_err(imx214->dev, "could not sent stop table %d\n",	ret);
+ 
+@@ -877,14 +839,17 @@ static int imx214_stop_streaming(struct imx214 *imx214)
+ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
+ {
+ 	struct imx214 *imx214 = to_imx214(subdev);
+-	int ret;
++	struct v4l2_subdev_state *state;
++	int ret = 0;
+ 
+ 	if (enable) {
+ 		ret = pm_runtime_resume_and_get(imx214->dev);
+ 		if (ret < 0)
+ 			return ret;
+ 
++		state = v4l2_subdev_lock_and_get_active_state(subdev);
+ 		ret = imx214_start_streaming(imx214);
++		v4l2_subdev_unlock_state(state);
+ 		if (ret < 0)
+ 			goto err_rpm_put;
+ 	} else {
+@@ -948,7 +913,7 @@ static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = {
+ 	.enum_mbus_code = imx214_enum_mbus_code,
+ 	.enum_frame_size = imx214_enum_frame_size,
+ 	.enum_frame_interval = imx214_enum_frame_interval,
+-	.get_fmt = imx214_get_format,
++	.get_fmt = v4l2_subdev_get_fmt,
+ 	.set_fmt = imx214_set_format,
+ 	.get_selection = imx214_get_selection,
+ 	.get_frame_interval = imx214_get_frame_interval,
+@@ -965,12 +930,6 @@ static const struct v4l2_subdev_internal_ops imx214_internal_ops = {
+ 	.init_state = imx214_entity_init_state,
+ };
+ 
+-static const struct regmap_config sensor_regmap_config = {
+-	.reg_bits = 16,
+-	.val_bits = 8,
+-	.cache_type = REGCACHE_MAPLE,
+-};
+-
+ static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
+ {
+ 	unsigned int i;
+@@ -992,28 +951,42 @@ static int imx214_parse_fwnode(struct device *dev)
+ 	int ret;
+ 
+ 	endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+-	if (!endpoint) {
+-		dev_err(dev, "endpoint node not found\n");
+-		return -EINVAL;
+-	}
++	if (!endpoint)
++		return dev_err_probe(dev, -EINVAL, "endpoint node not found\n");
+ 
+ 	ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
+ 	if (ret) {
+-		dev_err(dev, "parsing endpoint node failed\n");
++		dev_err_probe(dev, ret, "parsing endpoint node failed\n");
++		goto done;
++	}
++
++	/* Check the number of MIPI CSI2 data lanes */
++	if (bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
++		ret = dev_err_probe(dev, -EINVAL,
++				    "only 4 data lanes are currently supported\n");
+ 		goto done;
+ 	}
+ 
+-	for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
++	if (bus_cfg.nr_of_link_frequencies != 1)
++		dev_warn(dev, "Only one link-frequency supported, please review your DT. Continuing anyway\n");
++
++	for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
+ 		if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
+ 			break;
+-
+-	if (i == bus_cfg.nr_of_link_frequencies) {
+-		dev_err(dev, "link-frequencies %d not supported, Please review your DT\n",
+-			IMX214_DEFAULT_LINK_FREQ);
+-		ret = -EINVAL;
+-		goto done;
++		if (bus_cfg.link_frequencies[i] ==
++		    IMX214_DEFAULT_LINK_FREQ_LEGACY) {
++			dev_warn(dev,
++				 "link-frequencies %d not supported, please review your DT. Continuing anyway\n",
++				 IMX214_DEFAULT_LINK_FREQ);
++			break;
++		}
+ 	}
+ 
++	if (i == bus_cfg.nr_of_link_frequencies)
++		ret = dev_err_probe(dev, -EINVAL,
++				    "link-frequencies %d not supported, please review your DT\n",
++				    IMX214_DEFAULT_LINK_FREQ);
++
+ done:
+ 	v4l2_fwnode_endpoint_free(&bus_cfg);
+ 	fwnode_handle_put(endpoint);
+@@ -1037,34 +1010,28 @@ static int imx214_probe(struct i2c_client *client)
+ 	imx214->dev = dev;
+ 
+ 	imx214->xclk = devm_clk_get(dev, NULL);
+-	if (IS_ERR(imx214->xclk)) {
+-		dev_err(dev, "could not get xclk");
+-		return PTR_ERR(imx214->xclk);
+-	}
++	if (IS_ERR(imx214->xclk))
++		return dev_err_probe(dev, PTR_ERR(imx214->xclk),
++				     "failed to get xclk\n");
+ 
+ 	ret = clk_set_rate(imx214->xclk, IMX214_DEFAULT_CLK_FREQ);
+-	if (ret) {
+-		dev_err(dev, "could not set xclk frequency\n");
+-		return ret;
+-	}
++	if (ret)
++		return dev_err_probe(dev, ret,
++				     "failed to set xclk frequency\n");
+ 
+ 	ret = imx214_get_regulators(dev, imx214);
+-	if (ret < 0) {
+-		dev_err(dev, "cannot get regulators\n");
+-		return ret;
+-	}
++	if (ret < 0)
++		return dev_err_probe(dev, ret, "failed to get regulators\n");
+ 
+ 	imx214->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+-	if (IS_ERR(imx214->enable_gpio)) {
+-		dev_err(dev, "cannot get enable gpio\n");
+-		return PTR_ERR(imx214->enable_gpio);
+-	}
++	if (IS_ERR(imx214->enable_gpio))
++		return dev_err_probe(dev, PTR_ERR(imx214->enable_gpio),
++				     "failed to get enable gpio\n");
+ 
+-	imx214->regmap = devm_regmap_init_i2c(client, &sensor_regmap_config);
+-	if (IS_ERR(imx214->regmap)) {
+-		dev_err(dev, "regmap init failed\n");
+-		return PTR_ERR(imx214->regmap);
+-	}
++	imx214->regmap = devm_cci_regmap_init_i2c(client, 16);
++	if (IS_ERR(imx214->regmap))
++		return dev_err_probe(dev, PTR_ERR(imx214->regmap),
++				     "failed to initialize CCI\n");
+ 
+ 	v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops);
+ 	imx214->sd.internal_ops = &imx214_internal_ops;
+@@ -1079,9 +1046,6 @@ static int imx214_probe(struct i2c_client *client)
+ 	if (ret < 0)
+ 		goto error_power_off;
+ 
+-	mutex_init(&imx214->mutex);
+-	imx214->ctrls.lock = &imx214->mutex;
+-
+ 	imx214->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ 	imx214->pad.flags = MEDIA_PAD_FL_SOURCE;
+ 	imx214->sd.dev = &client->dev;
+@@ -1089,32 +1053,40 @@ static int imx214_probe(struct i2c_client *client)
+ 
+ 	ret = media_entity_pads_init(&imx214->sd.entity, 1, &imx214->pad);
+ 	if (ret < 0) {
+-		dev_err(dev, "could not register media entity\n");
++		dev_err_probe(dev, ret, "failed to init entity pads\n");
+ 		goto free_ctrl;
+ 	}
+ 
+-	imx214_entity_init_state(&imx214->sd, NULL);
++	imx214->sd.state_lock = imx214->ctrls.lock;
++	ret = v4l2_subdev_init_finalize(&imx214->sd);
++	if (ret < 0) {
++		dev_err_probe(dev, ret, "subdev init error\n");
++		goto free_entity;
++	}
+ 
+ 	pm_runtime_set_active(imx214->dev);
+ 	pm_runtime_enable(imx214->dev);
+ 
+ 	ret = v4l2_async_register_subdev_sensor(&imx214->sd);
+ 	if (ret < 0) {
+-		dev_err(dev, "could not register v4l2 device\n");
+-		goto free_entity;
++		dev_err_probe(dev, ret,
++			      "failed to register sensor sub-device\n");
++		goto error_subdev_cleanup;
+ 	}
+ 
+ 	pm_runtime_idle(imx214->dev);
+ 
+ 	return 0;
+ 
+-free_entity:
++error_subdev_cleanup:
+ 	pm_runtime_disable(imx214->dev);
+ 	pm_runtime_set_suspended(&client->dev);
++	v4l2_subdev_cleanup(&imx214->sd);
++
++free_entity:
+ 	media_entity_cleanup(&imx214->sd.entity);
+ 
+ free_ctrl:
+-	mutex_destroy(&imx214->mutex);
+ 	v4l2_ctrl_handler_free(&imx214->ctrls);
+ 
+ error_power_off:
+@@ -1129,9 +1101,9 @@ static void imx214_remove(struct i2c_client *client)
+ 	struct imx214 *imx214 = to_imx214(sd);
+ 
+ 	v4l2_async_unregister_subdev(&imx214->sd);
++	v4l2_subdev_cleanup(sd);
+ 	media_entity_cleanup(&imx214->sd.entity);
+ 	v4l2_ctrl_handler_free(&imx214->ctrls);
+-	mutex_destroy(&imx214->mutex);
+ 	pm_runtime_disable(&client->dev);
+ 	if (!pm_runtime_status_suspended(&client->dev)) {
+ 		imx214_power_off(imx214->dev);
+diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
+index 67b86dabc67eb1..1fe8e9b584f80a 100644
+--- a/drivers/media/i2c/ov08x40.c
++++ b/drivers/media/i2c/ov08x40.c
+@@ -1869,6 +1869,32 @@ static int ov08x40_stop_streaming(struct ov08x40 *ov08x)
+ 				 OV08X40_REG_VALUE_08BIT, OV08X40_MODE_STANDBY);
+ }
+ 
++/* Verify chip ID */
++static int ov08x40_identify_module(struct ov08x40 *ov08x)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
++	int ret;
++	u32 val;
++
++	if (ov08x->identified)
++		return 0;
++
++	ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
++			       OV08X40_REG_VALUE_24BIT, &val);
++	if (ret)
++		return ret;
++
++	if (val != OV08X40_CHIP_ID) {
++		dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
++			OV08X40_CHIP_ID, val);
++		return -ENXIO;
++	}
++
++	ov08x->identified = true;
++
++	return 0;
++}
++
+ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
+ {
+ 	struct ov08x40 *ov08x = to_ov08x40(sd);
+@@ -1882,6 +1908,10 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
+ 		if (ret < 0)
+ 			goto err_unlock;
+ 
++		ret = ov08x40_identify_module(ov08x);
++		if (ret)
++			goto err_rpm_put;
++
+ 		/*
+ 		 * Apply default & customized values
+ 		 * and then start streaming.
+@@ -1906,32 +1936,6 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
+ 	return ret;
+ }
+ 
+-/* Verify chip ID */
+-static int ov08x40_identify_module(struct ov08x40 *ov08x)
+-{
+-	struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
+-	int ret;
+-	u32 val;
+-
+-	if (ov08x->identified)
+-		return 0;
+-
+-	ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
+-			       OV08X40_REG_VALUE_24BIT, &val);
+-	if (ret)
+-		return ret;
+-
+-	if (val != OV08X40_CHIP_ID) {
+-		dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+-			OV08X40_CHIP_ID, val);
+-		return -ENXIO;
+-	}
+-
+-	ov08x->identified = true;
+-
+-	return 0;
+-}
+-
+ static const struct v4l2_subdev_video_ops ov08x40_video_ops = {
+ 	.s_stream = ov08x40_set_stream,
+ };
+diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
+index 5b861dbff27e9a..6c24426104ba6f 100644
+--- a/drivers/misc/lkdtm/perms.c
++++ b/drivers/misc/lkdtm/perms.c
+@@ -28,6 +28,13 @@ static const unsigned long rodata = 0xAA55AA55;
+ /* This is marked __ro_after_init, so it should ultimately be .rodata. */
+ static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
+ 
++/*
++ * This is a pointer to do_nothing() which is initialized at runtime rather
++ * than build time to avoid objtool IBT validation warnings caused by an
++ * inlined unrolled memcpy() in execute_location().
++ */
++static void __ro_after_init *do_nothing_ptr;
++
+ /*
+  * This just returns to the caller. It is designed to be copied into
+  * non-executable memory regions.
+@@ -65,13 +72,12 @@ static noinline __nocfi void execute_location(void *dst, bool write)
+ {
+ 	void (*func)(void);
+ 	func_desc_t fdesc;
+-	void *do_nothing_text = dereference_function_descriptor(do_nothing);
+ 
+-	pr_info("attempting ok execution at %px\n", do_nothing_text);
++	pr_info("attempting ok execution at %px\n", do_nothing_ptr);
+ 	do_nothing();
+ 
+ 	if (write == CODE_WRITE) {
+-		memcpy(dst, do_nothing_text, EXEC_SIZE);
++		memcpy(dst, do_nothing_ptr, EXEC_SIZE);
+ 		flush_icache_range((unsigned long)dst,
+ 				   (unsigned long)dst + EXEC_SIZE);
+ 	}
+@@ -267,6 +273,8 @@ static void lkdtm_ACCESS_NULL(void)
+ 
+ void __init lkdtm_perms_init(void)
+ {
++	do_nothing_ptr = dereference_function_descriptor(do_nothing);
++
+ 	/* Make sure we can write to __ro_after_init values during __init */
+ 	ro_after_init |= 0xAA;
+ }
+diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+index 3c1359d8d4e692..55b892f982e93e 100644
+--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+@@ -37,6 +37,7 @@
+ struct pci1xxxx_gpio {
+ 	struct auxiliary_device *aux_dev;
+ 	void __iomem *reg_base;
++	raw_spinlock_t wa_lock;
+ 	struct gpio_chip gpio;
+ 	spinlock_t lock;
+ 	int irq_base;
+@@ -164,7 +165,7 @@ static void pci1xxxx_gpio_irq_ack(struct irq_data *data)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&priv->lock, flags);
+-	pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true);
++	writel(BIT(gpio % 32), priv->reg_base + INTR_STAT_OFFSET(gpio));
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ 
+@@ -254,6 +255,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
+ 	struct pci1xxxx_gpio *priv = dev_id;
+ 	struct gpio_chip *gc =  &priv->gpio;
+ 	unsigned long int_status = 0;
++	unsigned long wa_flags;
+ 	unsigned long flags;
+ 	u8 pincount;
+ 	int bit;
+@@ -277,7 +279,9 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
+ 			writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
+ 			spin_unlock_irqrestore(&priv->lock, flags);
+ 			irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
+-			handle_nested_irq(irq);
++			raw_spin_lock_irqsave(&priv->wa_lock, wa_flags);
++			generic_handle_irq(irq);
++			raw_spin_unlock_irqrestore(&priv->wa_lock, wa_flags);
+ 		}
+ 	}
+ 	spin_lock_irqsave(&priv->lock, flags);
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index a5f88ec97df753..bc40b940ae2145 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -117,6 +117,7 @@
+ 
+ #define MEI_DEV_ID_LNL_M      0xA870  /* Lunar Lake Point M */
+ 
++#define MEI_DEV_ID_PTL_H      0xE370  /* Panther Lake H */
+ #define MEI_DEV_ID_PTL_P      0xE470  /* Panther Lake P */
+ 
+ /*
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index d6ff9d82ae94b3..3f9c60b579ae48 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -124,6 +124,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+ 
++	{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)},
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
+ 
+ 	/* required last entry */
+diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
+index ef0a9f423c8f8d..eb51fbe8d92fb4 100644
+--- a/drivers/misc/mei/vsc-tp.c
++++ b/drivers/misc/mei/vsc-tp.c
+@@ -36,20 +36,24 @@
+ #define VSC_TP_XFER_TIMEOUT_BYTES		700
+ #define VSC_TP_PACKET_PADDING_SIZE		1
+ #define VSC_TP_PACKET_SIZE(pkt) \
+-	(sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
++	(sizeof(struct vsc_tp_packet_hdr) + le16_to_cpu((pkt)->hdr.len) + VSC_TP_CRC_SIZE)
+ #define VSC_TP_MAX_PACKET_SIZE \
+-	(sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
++	(sizeof(struct vsc_tp_packet_hdr) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
+ #define VSC_TP_MAX_XFER_SIZE \
+ 	(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
+ #define VSC_TP_NEXT_XFER_LEN(len, offset) \
+-	(len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
++	(len + sizeof(struct vsc_tp_packet_hdr) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
+ 
+-struct vsc_tp_packet {
++struct vsc_tp_packet_hdr {
+ 	__u8 sync;
+ 	__u8 cmd;
+ 	__le16 len;
+ 	__le32 seq;
+-	__u8 buf[] __counted_by(len);
++};
++
++struct vsc_tp_packet {
++	struct vsc_tp_packet_hdr hdr;
++	__u8 buf[VSC_TP_MAX_XFER_SIZE - sizeof(struct vsc_tp_packet_hdr)];
+ };
+ 
+ struct vsc_tp {
+@@ -158,12 +162,12 @@ static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len
+ static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
+ 			      void *ibuf, u16 ilen)
+ {
+-	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
++	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr);
+ 	int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
+ 	u8 *src, *crc_src, *rx_buf = tp->rx_buf;
+ 	int count_down = VSC_TP_MAX_XFER_COUNT;
+ 	u32 recv_crc = 0, crc = ~0;
+-	struct vsc_tp_packet ack;
++	struct vsc_tp_packet_hdr ack;
+ 	u8 *dst = (u8 *)&ack;
+ 	bool synced = false;
+ 
+@@ -280,10 +284,10 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
+ 
+ 	guard(mutex)(&tp->mutex);
+ 
+-	pkt->sync = VSC_TP_PACKET_SYNC;
+-	pkt->cmd = cmd;
+-	pkt->len = cpu_to_le16(olen);
+-	pkt->seq = cpu_to_le32(++tp->seq);
++	pkt->hdr.sync = VSC_TP_PACKET_SYNC;
++	pkt->hdr.cmd = cmd;
++	pkt->hdr.len = cpu_to_le16(olen);
++	pkt->hdr.seq = cpu_to_le32(++tp->seq);
+ 	memcpy(pkt->buf, obuf, olen);
+ 
+ 	crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 945d08531de376..82808cc373f68b 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -1866,7 +1866,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
+ 	if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
+ 		return 0;
+ 
+-	ice = of_qcom_ice_get(dev);
++	ice = devm_of_qcom_ice_get(dev);
+ 	if (ice == ERR_PTR(-EOPNOTSUPP)) {
+ 		dev_warn(dev, "Disabling inline encryption support\n");
+ 		ice = NULL;
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index abc979fbb45d18..93bf085a61d39f 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -2540,6 +2540,9 @@ mt7531_setup_common(struct dsa_switch *ds)
+ 	struct mt7530_priv *priv = ds->priv;
+ 	int ret, i;
+ 
++	ds->assisted_learning_on_cpu_port = true;
++	ds->mtu_enforcement_ingress = true;
++
+ 	mt753x_trap_frames(priv);
+ 
+ 	/* Enable and reset MIB counters */
+@@ -2687,9 +2690,6 @@ mt7531_setup(struct dsa_switch *ds)
+ 	if (ret)
+ 		return ret;
+ 
+-	ds->assisted_learning_on_cpu_port = true;
+-	ds->mtu_enforcement_ingress = true;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index df1df601541217..211c219dd52db8 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -5186,6 +5186,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
+ 	.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
+ 	.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ 	.port_tag_remap = mv88e6095_port_tag_remap,
++	.port_set_policy = mv88e6352_port_set_policy,
+ 	.port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ 	.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ 	.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+@@ -5210,8 +5211,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
+ 	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ 	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+-	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+-	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
++	.vtu_getnext = mv88e6352_g1_vtu_getnext,
++	.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
++	.stu_getnext = mv88e6352_g1_stu_getnext,
++	.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ 	.gpio_ops = &mv88e6352_gpio_ops,
+ 	.avb_ops = &mv88e6352_avb_ops,
+ 	.ptp_ops = &mv88e6352_ptp_ops,
+@@ -5236,6 +5239,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ 	.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
+ 	.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ 	.port_tag_remap = mv88e6095_port_tag_remap,
++	.port_set_policy = mv88e6352_port_set_policy,
+ 	.port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ 	.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ 	.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+@@ -5259,8 +5263,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ 	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ 	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+-	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+-	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
++	.vtu_getnext = mv88e6352_g1_vtu_getnext,
++	.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
++	.stu_getnext = mv88e6352_g1_stu_getnext,
++	.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ 	.gpio_ops = &mv88e6352_gpio_ops,
+ 	.avb_ops = &mv88e6352_avb_ops,
+ 	.ptp_ops = &mv88e6352_ptp_ops,
+@@ -5852,7 +5858,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.global1_addr = 0x1b,
+ 		.global2_addr = 0x1c,
+ 		.age_time_coeff = 3750,
+-		.atu_move_port_mask = 0x1f,
++		.atu_move_port_mask = 0xf,
+ 		.g1_irqs = 9,
+ 		.g2_irqs = 10,
+ 		.pvt = true,
+@@ -6182,8 +6188,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.num_databases = 4096,
+ 		.num_macs = 8192,
+ 		.num_ports = 7,
+-		.num_internal_phys = 2,
+-		.internal_phys_offset = 3,
++		.num_internal_phys = 5,
+ 		.num_gpio = 15,
+ 		.max_vid = 4095,
+ 		.max_sid = 63,
+@@ -6257,6 +6262,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.num_internal_phys = 5,
+ 		.num_gpio = 15,
+ 		.max_vid = 4095,
++		.max_sid = 63,
+ 		.port_base_addr = 0x10,
+ 		.phy_base_addr = 0x0,
+ 		.global1_addr = 0x1b,
+@@ -6282,6 +6288,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.num_internal_phys = 5,
+ 		.num_gpio = 15,
+ 		.max_vid = 4095,
++		.max_sid = 63,
+ 		.port_base_addr = 0x10,
+ 		.phy_base_addr = 0x0,
+ 		.global1_addr = 0x1b,
+@@ -6290,6 +6297,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.g1_irqs = 8,
+ 		.g2_irqs = 10,
+ 		.atu_move_port_mask = 0xf,
++		.pvt = true,
+ 		.multi_chip = true,
+ 		.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ 		.ptp_support = true,
+@@ -6312,7 +6320,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.global1_addr = 0x1b,
+ 		.global2_addr = 0x1c,
+ 		.age_time_coeff = 3750,
+-		.atu_move_port_mask = 0x1f,
++		.atu_move_port_mask = 0xf,
+ 		.g1_irqs = 9,
+ 		.g2_irqs = 10,
+ 		.pvt = true,
+@@ -6377,8 +6385,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.num_databases = 4096,
+ 		.num_macs = 8192,
+ 		.num_ports = 7,
+-		.num_internal_phys = 2,
+-		.internal_phys_offset = 3,
++		.num_internal_phys = 5,
+ 		.num_gpio = 15,
+ 		.max_vid = 4095,
+ 		.max_sid = 63,
+diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
+index c83a0a80d5334e..506f682d15c10a 100644
+--- a/drivers/net/ethernet/amd/pds_core/adminq.c
++++ b/drivers/net/ethernet/amd/pds_core/adminq.c
+@@ -5,11 +5,6 @@
+ 
+ #include "core.h"
+ 
+-struct pdsc_wait_context {
+-	struct pdsc_qcq *qcq;
+-	struct completion wait_completion;
+-};
+-
+ static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
+ {
+ 	union pds_core_notifyq_comp *comp;
+@@ -109,10 +104,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
+ 		q_info = &q->info[q->tail_idx];
+ 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+ 
+-		/* Copy out the completion data */
+-		memcpy(q_info->dest, comp, sizeof(*comp));
+-
+-		complete_all(&q_info->wc->wait_completion);
++		if (!completion_done(&q_info->completion)) {
++			memcpy(q_info->dest, comp, sizeof(*comp));
++			complete(&q_info->completion);
++		}
+ 
+ 		if (cq->tail_idx == cq->num_descs - 1)
+ 			cq->done_color = !cq->done_color;
+@@ -162,8 +157,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
+ static int __pdsc_adminq_post(struct pdsc *pdsc,
+ 			      struct pdsc_qcq *qcq,
+ 			      union pds_core_adminq_cmd *cmd,
+-			      union pds_core_adminq_comp *comp,
+-			      struct pdsc_wait_context *wc)
++			      union pds_core_adminq_comp *comp)
+ {
+ 	struct pdsc_queue *q = &qcq->q;
+ 	struct pdsc_q_info *q_info;
+@@ -205,9 +199,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
+ 	/* Post the request */
+ 	index = q->head_idx;
+ 	q_info = &q->info[index];
+-	q_info->wc = wc;
+ 	q_info->dest = comp;
+ 	memcpy(q_info->desc, cmd, sizeof(*cmd));
++	reinit_completion(&q_info->completion);
+ 
+ 	dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
+ 		q->head_idx, q->tail_idx);
+@@ -231,16 +225,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ 		     union pds_core_adminq_comp *comp,
+ 		     bool fast_poll)
+ {
+-	struct pdsc_wait_context wc = {
+-		.wait_completion =
+-			COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
+-	};
+ 	unsigned long poll_interval = 1;
+ 	unsigned long poll_jiffies;
+ 	unsigned long time_limit;
+ 	unsigned long time_start;
+ 	unsigned long time_done;
+ 	unsigned long remaining;
++	struct completion *wc;
+ 	int err = 0;
+ 	int index;
+ 
+@@ -250,20 +241,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ 		return -ENXIO;
+ 	}
+ 
+-	wc.qcq = &pdsc->adminqcq;
+-	index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
++	index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
+ 	if (index < 0) {
+ 		err = index;
+ 		goto err_out;
+ 	}
+ 
++	wc = &pdsc->adminqcq.q.info[index].completion;
+ 	time_start = jiffies;
+ 	time_limit = time_start + HZ * pdsc->devcmd_timeout;
+ 	do {
+ 		/* Timeslice the actual wait to catch IO errors etc early */
+ 		poll_jiffies = msecs_to_jiffies(poll_interval);
+-		remaining = wait_for_completion_timeout(&wc.wait_completion,
+-							poll_jiffies);
++		remaining = wait_for_completion_timeout(wc, poll_jiffies);
+ 		if (remaining)
+ 			break;
+ 
+@@ -292,9 +282,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ 	dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
+ 		__func__, jiffies_to_msecs(time_done - time_start));
+ 
+-	/* Check the results */
+-	if (time_after_eq(time_done, time_limit))
++	/* Check the results and clear an un-completed timeout */
++	if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
+ 		err = -ETIMEDOUT;
++		complete(wc);
++	}
+ 
+ 	dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
+ 	dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
+index 2babea11099179..b76a9b7e0aed66 100644
+--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
++++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
+@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
+ 	dev_dbg(pf->dev, "%s: %s opcode %d\n",
+ 		__func__, dev_name(&padev->aux_dev.dev), req->opcode);
+ 
+-	if (pf->state)
+-		return -ENXIO;
+-
+ 	/* Wrap the client's request */
+ 	cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
+ 	cmd.client_request.client_id = cpu_to_le16(padev->client_id);
+diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
+index 536635e5772799..3c60d4cf9d0e17 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.c
++++ b/drivers/net/ethernet/amd/pds_core/core.c
+@@ -167,8 +167,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
+ 	q->base = base;
+ 	q->base_pa = base_pa;
+ 
+-	for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
++	for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
+ 		cur->desc = base + (i * q->desc_size);
++		init_completion(&cur->completion);
++	}
+ }
+ 
+ static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
+@@ -325,10 +327,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
+ 	size_t sz;
+ 	int err;
+ 
+-	/* Scale the descriptor ring length based on number of CPUs and VFs */
+-	numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
+-	numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
+-	numdescs = roundup_pow_of_two(numdescs);
++	numdescs = PDSC_ADMINQ_MAX_LENGTH;
+ 	err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ 			     PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ 			     numdescs,
+diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
+index 14522d6d5f86bb..ec637dc4327a5d 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -16,7 +16,7 @@
+ 
+ #define PDSC_WATCHDOG_SECS	5
+ #define PDSC_QUEUE_NAME_MAX_SZ  16
+-#define PDSC_ADMINQ_MIN_LENGTH	16	/* must be a power of two */
++#define PDSC_ADMINQ_MAX_LENGTH	16	/* must be a power of two */
+ #define PDSC_NOTIFYQ_LENGTH	64	/* must be a power of two */
+ #define PDSC_TEARDOWN_RECOVERY	false
+ #define PDSC_TEARDOWN_REMOVING	true
+@@ -96,7 +96,7 @@ struct pdsc_q_info {
+ 	unsigned int bytes;
+ 	unsigned int nbufs;
+ 	struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
+-	struct pdsc_wait_context *wc;
++	struct completion completion;
+ 	void *dest;
+ };
+ 
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index 44971e71991ff5..ca23cde385e67b 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -102,7 +102,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ 		.fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ 		.fw_control.oper = PDS_CORE_FW_GET_LIST,
+ 	};
+-	struct pds_core_fw_list_info fw_list;
++	struct pds_core_fw_list_info fw_list = {};
+ 	struct pdsc *pdsc = devlink_priv(dl);
+ 	union pds_core_dev_comp comp;
+ 	char buf[32];
+@@ -115,8 +115,6 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ 	if (!err)
+ 		memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ 	mutex_unlock(&pdsc->devcmd_lock);
+-	if (err && err != -EIO)
+-		return err;
+ 
+ 	listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
+ 	for (i = 0; i < listlen; i++) {
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index d408dcda76d794..223aee1af44304 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3997,11 +3997,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+ 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ 
+ 	if (mtk_is_netsys_v3_or_greater(eth)) {
+-		/* PSE should not drop port1, port8 and port9 packets */
+-		mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
++		/* PSE dummy page mechanism */
++		mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
++			PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
++
++		/* PSE free buffer drop threshold */
++		mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
++
++		/* PSE should not drop port8, port9 and port13 packets from
++		 * WDMA Tx
++		 */
++		mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
++
++		/* PSE should drop packets to port8, port9 and port13 on WDMA Rx
++		 * ring full
++		 */
++		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
++		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
++		mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
+ 
+ 		/* GDM and CDM Threshold */
+-		mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
++		mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
+ 		mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
+ 
+ 		/* Disable GDM1 RX CRC stripping */
+@@ -4018,7 +4034,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+ 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+ 
+ 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
+-		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
++		mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
+ 
+ 		/* PSE Free Queue Flow Control  */
+ 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 8d7b6818d86012..0570623e569d5e 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -151,7 +151,15 @@
+ #define PSE_FQFC_CFG1		0x100
+ #define PSE_FQFC_CFG2		0x104
+ #define PSE_DROP_CFG		0x108
+-#define PSE_PPE0_DROP		0x110
++#define PSE_PPE_DROP(x)		(0x110 + ((x) * 0x4))
++
++/* PSE Last FreeQ Page Request Control */
++#define PSE_DUMY_REQ		0x10C
++/* PSE_DUMY_REQ is not a typo but actually called like that also in
++ * MediaTek's datasheet
++ */
++#define PSE_DUMMY_WORK_GDM(x)	BIT(16 + (x))
++#define DUMMY_PAGE_THR		0x1
+ 
+ /* PSE Input Queue Reservation Register*/
+ #define PSE_IQ_REV(x)		(0x140 + (((x) - 1) << 2))
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+index 9f13cea164465e..43b2216bc0a22b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+@@ -618,10 +618,6 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
+ 	bool use_l4_type;
+ 	int err;
+ 
+-	ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+-	if (!ttc)
+-		return ERR_PTR(-ENOMEM);
+-
+ 	switch (params->ns_type) {
+ 	case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ 		use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+@@ -635,7 +631,16 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
++	if (!ttc)
++		return ERR_PTR(-ENOMEM);
++
+ 	ns = mlx5_get_flow_namespace(dev, params->ns_type);
++	if (!ns) {
++		kvfree(ttc);
++		return ERR_PTR(-EOPNOTSUPP);
++	}
++
+ 	groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ 			       &inner_ttc_groups[TTC_GROUPS_DEFAULT];
+ 
+@@ -691,10 +696,6 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
+ 	bool use_l4_type;
+ 	int err;
+ 
+-	ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+-	if (!ttc)
+-		return ERR_PTR(-ENOMEM);
+-
+ 	switch (params->ns_type) {
+ 	case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ 		use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+@@ -708,7 +709,16 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
++	if (!ttc)
++		return ERR_PTR(-ENOMEM);
++
+ 	ns = mlx5_get_flow_namespace(dev, params->ns_type);
++	if (!ns) {
++		kvfree(ttc);
++		return ERR_PTR(-EOPNOTSUPP);
++	}
++
+ 	groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ 			       &ttc_groups[TTC_GROUPS_DEFAULT];
+ 
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index 41a27ae58cedf6..f5449b73b9a76b 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -9058,6 +9058,8 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
+ 		msi_vec[i].entry = i;
+ 	}
+ 
++	pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST;
++
+ 	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
+ 	if (num_irqs < 0) {
+ 		np->flags &= ~NIU_FLAGS_MSIX;
+diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
+index 691969a4910f2b..e3a5961dced9bb 100644
+--- a/drivers/net/phy/microchip.c
++++ b/drivers/net/phy/microchip.c
+@@ -37,47 +37,6 @@ static int lan88xx_write_page(struct phy_device *phydev, int page)
+ 	return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
+ }
+ 
+-static int lan88xx_phy_config_intr(struct phy_device *phydev)
+-{
+-	int rc;
+-
+-	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+-		/* unmask all source and clear them before enable */
+-		rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
+-		rc = phy_read(phydev, LAN88XX_INT_STS);
+-		rc = phy_write(phydev, LAN88XX_INT_MASK,
+-			       LAN88XX_INT_MASK_MDINTPIN_EN_ |
+-			       LAN88XX_INT_MASK_LINK_CHANGE_);
+-	} else {
+-		rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
+-		if (rc)
+-			return rc;
+-
+-		/* Ack interrupts after they have been disabled */
+-		rc = phy_read(phydev, LAN88XX_INT_STS);
+-	}
+-
+-	return rc < 0 ? rc : 0;
+-}
+-
+-static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
+-{
+-	int irq_status;
+-
+-	irq_status = phy_read(phydev, LAN88XX_INT_STS);
+-	if (irq_status < 0) {
+-		phy_error(phydev);
+-		return IRQ_NONE;
+-	}
+-
+-	if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
+-		return IRQ_NONE;
+-
+-	phy_trigger_machine(phydev);
+-
+-	return IRQ_HANDLED;
+-}
+-
+ static int lan88xx_suspend(struct phy_device *phydev)
+ {
+ 	struct lan88xx_priv *priv = phydev->priv;
+@@ -528,8 +487,9 @@ static struct phy_driver microchip_phy_driver[] = {
+ 	.config_aneg	= lan88xx_config_aneg,
+ 	.link_change_notify = lan88xx_link_change_notify,
+ 
+-	.config_intr	= lan88xx_phy_config_intr,
+-	.handle_interrupt = lan88xx_handle_interrupt,
++	/* Interrupt handling is broken, do not define related
++	 * functions to force polling.
++	 */
+ 
+ 	.suspend	= lan88xx_suspend,
+ 	.resume		= genphy_resume,
+diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
+index f550576eb9dae7..6f9d8da76c4dfb 100644
+--- a/drivers/net/phy/phy_led_triggers.c
++++ b/drivers/net/phy/phy_led_triggers.c
+@@ -91,9 +91,8 @@ int phy_led_triggers_register(struct phy_device *phy)
+ 	if (!phy->phy_num_led_triggers)
+ 		return 0;
+ 
+-	phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev,
+-					     sizeof(*phy->led_link_trigger),
+-					     GFP_KERNEL);
++	phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger),
++					GFP_KERNEL);
+ 	if (!phy->led_link_trigger) {
+ 		err = -ENOMEM;
+ 		goto out_clear;
+@@ -103,10 +102,9 @@ int phy_led_triggers_register(struct phy_device *phy)
+ 	if (err)
+ 		goto out_free_link;
+ 
+-	phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev,
+-					    phy->phy_num_led_triggers,
+-					    sizeof(struct phy_led_trigger),
+-					    GFP_KERNEL);
++	phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers,
++					sizeof(struct phy_led_trigger),
++					GFP_KERNEL);
+ 	if (!phy->phy_led_triggers) {
+ 		err = -ENOMEM;
+ 		goto out_unreg_link;
+@@ -127,11 +125,11 @@ int phy_led_triggers_register(struct phy_device *phy)
+ out_unreg:
+ 	while (i--)
+ 		phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+-	devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
++	kfree(phy->phy_led_triggers);
+ out_unreg_link:
+ 	phy_led_trigger_unregister(phy->led_link_trigger);
+ out_free_link:
+-	devm_kfree(&phy->mdio.dev, phy->led_link_trigger);
++	kfree(phy->led_link_trigger);
+ 	phy->led_link_trigger = NULL;
+ out_clear:
+ 	phy->phy_num_led_triggers = 0;
+@@ -145,8 +143,13 @@ void phy_led_triggers_unregister(struct phy_device *phy)
+ 
+ 	for (i = 0; i < phy->phy_num_led_triggers; i++)
+ 		phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
++	kfree(phy->phy_led_triggers);
++	phy->phy_led_triggers = NULL;
+ 
+-	if (phy->led_link_trigger)
++	if (phy->led_link_trigger) {
+ 		phy_led_trigger_unregister(phy->led_link_trigger);
++		kfree(phy->led_link_trigger);
++		phy->led_link_trigger = NULL;
++	}
+ }
+ EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
+diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
+index 616ecc38d1726c..5f470499e60024 100644
+--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
++++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
+@@ -397,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
+ 
+ 	xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
+ 	xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
+-			 rbi->len, false);
++			 rcd->len, false);
+ 	xdp_buff_clear_frags_flag(&xdp);
+ 
+ 	xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 63fe51d0e64db3..809b407cece15e 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -985,20 +985,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
+ 	act = bpf_prog_run_xdp(prog, xdp);
+ 	switch (act) {
+ 	case XDP_TX:
+-		get_page(pdata);
+ 		xdpf = xdp_convert_buff_to_frame(xdp);
++		if (unlikely(!xdpf)) {
++			trace_xdp_exception(queue->info->netdev, prog, act);
++			break;
++		}
++		get_page(pdata);
+ 		err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
+-		if (unlikely(!err))
++		if (unlikely(err <= 0)) {
++			if (err < 0)
++				trace_xdp_exception(queue->info->netdev, prog, act);
+ 			xdp_return_frame_rx_napi(xdpf);
+-		else if (unlikely(err < 0))
+-			trace_xdp_exception(queue->info->netdev, prog, act);
++		}
+ 		break;
+ 	case XDP_REDIRECT:
+ 		get_page(pdata);
+ 		err = xdp_do_redirect(queue->info->netdev, xdp, prog);
+ 		*need_xdp_flush = true;
+-		if (unlikely(err))
++		if (unlikely(err)) {
+ 			trace_xdp_exception(queue->info->netdev, prog, act);
++			xdp_return_buff(xdp);
++		}
+ 		break;
+ 	case XDP_PASS:
+ 	case XDP_DROP:
+diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
+index d687e8c2cc78dc..63ceed89b62ef9 100644
+--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
+@@ -1318,6 +1318,7 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = {
+ 	{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
+ 	{ PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
+ 	{ PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
++	{ PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] },
+ 	{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
+ 	{ 0, }
+ };
+diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
+index 6fc9dfe8247477..419de7038570df 100644
+--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
++++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
+@@ -1041,7 +1041,7 @@ static inline char *idt_get_mw_name(enum idt_mw_type mw_type)
+ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
+ 				       unsigned char *mw_cnt)
+ {
+-	struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws;
++	struct idt_mw_cfg *mws;
+ 	const struct idt_ntb_bar *bars;
+ 	enum idt_mw_type mw_type;
+ 	unsigned char widx, bidx, en_cnt;
+@@ -1049,6 +1049,11 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
+ 	int aprt_size;
+ 	u32 data;
+ 
++	mws = devm_kcalloc(&ndev->ntb.pdev->dev, IDT_MAX_NR_MWS,
++			   sizeof(*mws), GFP_KERNEL);
++	if (!mws)
++		return ERR_PTR(-ENOMEM);
++
+ 	/* Retrieve the array of the BARs registers */
+ 	bars = portdata_tbl[port].bars;
+ 
+@@ -1103,16 +1108,7 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
+ 		}
+ 	}
+ 
+-	/* Allocate memory for memory window descriptors */
+-	ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
+-			       GFP_KERNEL);
+-	if (!ret_mws)
+-		return ERR_PTR(-ENOMEM);
+-
+-	/* Copy the info of detected memory windows */
+-	memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws));
+-
+-	return ret_mws;
++	return mws;
+ }
+ 
+ /*
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 9bdf6fc53697c0..f19410723b1795 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4273,6 +4273,15 @@ static void nvme_scan_work(struct work_struct *work)
+ 			nvme_scan_ns_sequential(ctrl);
+ 	}
+ 	mutex_unlock(&ctrl->scan_lock);
++
++	/* Requeue if we have missed AENs */
++	if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
++		nvme_queue_scan(ctrl);
++#ifdef CONFIG_NVME_MULTIPATH
++	else if (ctrl->ana_log_buf)
++		/* Re-read the ANA log page to not miss updates */
++		queue_work(nvme_wq, &ctrl->ana_work);
++#endif
+ }
+ 
+ /*
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index f25582e4d88bb0..561dd08022c061 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -427,7 +427,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
+ 	struct nvme_ns *ns;
+ 
+ 	if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+-		return NULL;
++		return false;
+ 
+ 	list_for_each_entry_srcu(ns, &head->list, siblings,
+ 				 srcu_read_lock_held(&head->srcu)) {
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 7318b736d41417..ef8c5961e10c89 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -1028,33 +1028,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ 	struct nvmet_fc_hostport *newhost, *match = NULL;
+ 	unsigned long flags;
+ 
++	/*
++	 * Caller holds a reference on tgtport.
++	 */
++
+ 	/* if LLDD not implemented, leave as NULL */
+ 	if (!hosthandle)
+ 		return NULL;
+ 
+-	/*
+-	 * take reference for what will be the newly allocated hostport if
+-	 * we end up using a new allocation
+-	 */
+-	if (!nvmet_fc_tgtport_get(tgtport))
+-		return ERR_PTR(-EINVAL);
+-
+ 	spin_lock_irqsave(&tgtport->lock, flags);
+ 	match = nvmet_fc_match_hostport(tgtport, hosthandle);
+ 	spin_unlock_irqrestore(&tgtport->lock, flags);
+ 
+-	if (match) {
+-		/* no new allocation - release reference */
+-		nvmet_fc_tgtport_put(tgtport);
++	if (match)
+ 		return match;
+-	}
+ 
+ 	newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
+-	if (!newhost) {
+-		/* no new allocation - release reference */
+-		nvmet_fc_tgtport_put(tgtport);
++	if (!newhost)
+ 		return ERR_PTR(-ENOMEM);
+-	}
+ 
+ 	spin_lock_irqsave(&tgtport->lock, flags);
+ 	match = nvmet_fc_match_hostport(tgtport, hosthandle);
+@@ -1063,6 +1054,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ 		kfree(newhost);
+ 		newhost = match;
+ 	} else {
++		nvmet_fc_tgtport_get(tgtport);
+ 		newhost->tgtport = tgtport;
+ 		newhost->hosthandle = hosthandle;
+ 		INIT_LIST_HEAD(&newhost->host_list);
+@@ -1097,7 +1089,8 @@ static void
+ nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+ {
+ 	nvmet_fc_tgtport_get(assoc->tgtport);
+-	queue_work(nvmet_wq, &assoc->del_work);
++	if (!queue_work(nvmet_wq, &assoc->del_work))
++		nvmet_fc_tgtport_put(assoc->tgtport);
+ }
+ 
+ static bool
+diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
+index 5cf96776dd7d31..7d935908b54317 100644
+--- a/drivers/of/resolver.c
++++ b/drivers/of/resolver.c
+@@ -249,25 +249,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
+  */
+ int of_resolve_phandles(struct device_node *overlay)
+ {
+-	struct device_node *child, *local_fixups, *refnode;
+-	struct device_node *tree_symbols, *overlay_fixups;
++	struct device_node *child, *refnode;
++	struct device_node *overlay_fixups;
++	struct device_node __free(device_node) *local_fixups = NULL;
+ 	struct property *prop;
+ 	const char *refpath;
+ 	phandle phandle, phandle_delta;
+ 	int err;
+ 
+-	tree_symbols = NULL;
+-
+ 	if (!overlay) {
+ 		pr_err("null overlay\n");
+-		err = -EINVAL;
+-		goto out;
++		return -EINVAL;
+ 	}
+ 
+ 	if (!of_node_check_flag(overlay, OF_DETACHED)) {
+ 		pr_err("overlay not detached\n");
+-		err = -EINVAL;
+-		goto out;
++		return -EINVAL;
+ 	}
+ 
+ 	phandle_delta = live_tree_max_phandle() + 1;
+@@ -279,7 +276,7 @@ int of_resolve_phandles(struct device_node *overlay)
+ 
+ 	err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+ 	overlay_fixups = NULL;
+ 
+@@ -288,16 +285,13 @@ int of_resolve_phandles(struct device_node *overlay)
+ 			overlay_fixups = child;
+ 	}
+ 
+-	if (!overlay_fixups) {
+-		err = 0;
+-		goto out;
+-	}
++	if (!overlay_fixups)
++		return 0;
+ 
+-	tree_symbols = of_find_node_by_path("/__symbols__");
++	struct device_node __free(device_node) *tree_symbols = of_find_node_by_path("/__symbols__");
+ 	if (!tree_symbols) {
+ 		pr_err("no symbols in root of device tree.\n");
+-		err = -EINVAL;
+-		goto out;
++		return -EINVAL;
+ 	}
+ 
+ 	for_each_property_of_node(overlay_fixups, prop) {
+@@ -311,14 +305,12 @@ int of_resolve_phandles(struct device_node *overlay)
+ 		if (err) {
+ 			pr_err("node label '%s' not found in live devicetree symbols table\n",
+ 			       prop->name);
+-			goto out;
++			return err;
+ 		}
+ 
+ 		refnode = of_find_node_by_path(refpath);
+-		if (!refnode) {
+-			err = -ENOENT;
+-			goto out;
+-		}
++		if (!refnode)
++			return -ENOENT;
+ 
+ 		phandle = refnode->phandle;
+ 		of_node_put(refnode);
+@@ -328,11 +320,8 @@ int of_resolve_phandles(struct device_node *overlay)
+ 			break;
+ 	}
+ 
+-out:
+ 	if (err)
+ 		pr_err("overlay phandle fixup failed: %d\n", err);
+-	of_node_put(tree_symbols);
+-
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(of_resolve_phandles);
+diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
+index 2f647cac4cae34..8b884878861842 100644
+--- a/drivers/pci/msi/msi.c
++++ b/drivers/pci/msi/msi.c
+@@ -10,12 +10,12 @@
+ #include <linux/err.h>
+ #include <linux/export.h>
+ #include <linux/irq.h>
++#include <linux/irqdomain.h>
+ 
+ #include "../pci.h"
+ #include "msi.h"
+ 
+ int pci_msi_enable = 1;
+-int pci_msi_ignore_mask;
+ 
+ /**
+  * pci_msi_supported - check whether MSI may be enabled on a device
+@@ -295,8 +295,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
+ 	/* Lies, damned lies, and MSIs */
+ 	if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
+ 		control |= PCI_MSI_FLAGS_MASKBIT;
+-	/* Respect XEN's mask disabling */
+-	if (pci_msi_ignore_mask)
++	if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
+ 		control &= ~PCI_MSI_FLAGS_MASKBIT;
+ 
+ 	desc.nvec_used			= nvec;
+@@ -609,12 +608,16 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
+ 	desc->pci.msi_attrib.is_64		= 1;
+ 	desc->pci.msi_attrib.default_irq	= dev->irq;
+ 	desc->pci.mask_base			= dev->msix_base;
+-	desc->pci.msi_attrib.can_mask		= !pci_msi_ignore_mask &&
+-						  !desc->pci.msi_attrib.is_virtual;
+ 
+-	if (desc->pci.msi_attrib.can_mask) {
++
++	if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
++	    !desc->pci.msi_attrib.is_virtual) {
+ 		void __iomem *addr = pci_msix_desc_addr(desc);
+ 
++		desc->pci.msi_attrib.can_mask = 1;
++		/* Workaround for SUN NIU insanity, which requires write before read */
++		if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST)
++			writel(0, addr + PCI_MSIX_ENTRY_DATA);
+ 		desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ 	}
+ }
+@@ -659,9 +662,6 @@ static void msix_mask_all(void __iomem *base, int tsize)
+ 	u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ 	int i;
+ 
+-	if (pci_msi_ignore_mask)
+-		return;
+-
+ 	for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+ 		writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ }
+@@ -744,15 +744,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
+ 	/* Disable INTX */
+ 	pci_intx_for_msi(dev, 0);
+ 
+-	/*
+-	 * Ensure that all table entries are masked to prevent
+-	 * stale entries from firing in a crash kernel.
+-	 *
+-	 * Done late to deal with a broken Marvell NVME device
+-	 * which takes the MSI-X mask bits into account even
+-	 * when MSI-X is disabled, which prevents MSI delivery.
+-	 */
+-	msix_mask_all(dev->msix_base, tsize);
++	if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
++		/*
++		 * Ensure that all table entries are masked to prevent
++		 * stale entries from firing in a crash kernel.
++		 *
++		 * Done late to deal with a broken Marvell NVME device
++		 * which takes the MSI-X mask bits into account even
++		 * when MSI-X is disabled, which prevents MSI delivery.
++		 */
++		msix_mask_all(dev->msix_base, tsize);
++	}
+ 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
+ 
+ 	pcibios_free_irq(dev);
+diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c
+index 2c51e5c62d3eb5..f5c6d264d89ed9 100644
+--- a/drivers/phy/rockchip/phy-rockchip-usbdp.c
++++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c
+@@ -1045,7 +1045,6 @@ static int rk_udphy_dp_phy_init(struct phy *phy)
+ 	mutex_lock(&udphy->mutex);
+ 
+ 	udphy->dp_in_use = true;
+-	rk_udphy_dp_hpd_event_trigger(udphy, udphy->dp_sink_hpd_cfg);
+ 
+ 	mutex_unlock(&udphy->mutex);
+ 
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index f384c72d955452..70d7485ada3643 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -382,6 +382,7 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
+ {
+ 	struct mcp23s08 *mcp = data;
+ 	int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval, gpinten;
++	bool need_unmask = false;
+ 	unsigned long int enabled_interrupts;
+ 	unsigned int child_irq;
+ 	bool intf_set, intcap_changed, gpio_bit_changed,
+@@ -396,9 +397,6 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
+ 		goto unlock;
+ 	}
+ 
+-	if (mcp_read(mcp, MCP_INTCAP, &intcap))
+-		goto unlock;
+-
+ 	if (mcp_read(mcp, MCP_INTCON, &intcon))
+ 		goto unlock;
+ 
+@@ -408,6 +406,16 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
+ 	if (mcp_read(mcp, MCP_DEFVAL, &defval))
+ 		goto unlock;
+ 
++	/* Mask level interrupts to avoid their immediate reactivation after clearing */
++	if (intcon) {
++		need_unmask = true;
++		if (mcp_write(mcp, MCP_GPINTEN, gpinten & ~intcon))
++			goto unlock;
++	}
++
++	if (mcp_read(mcp, MCP_INTCAP, &intcap))
++		goto unlock;
++
+ 	/* This clears the interrupt(configurable on S18) */
+ 	if (mcp_read(mcp, MCP_GPIO, &gpio))
+ 		goto unlock;
+@@ -470,9 +478,18 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
+ 		}
+ 	}
+ 
++	if (need_unmask) {
++		mutex_lock(&mcp->lock);
++		goto unlock;
++	}
++
+ 	return IRQ_HANDLED;
+ 
+ unlock:
++	if (need_unmask)
++		if (mcp_write(mcp, MCP_GPINTEN, gpinten))
++			dev_err(mcp->chip.parent, "can't unmask GPINTEN\n");
++
+ 	mutex_unlock(&mcp->lock);
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
+index 773eaf508565b0..8369fab61758dd 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
++++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
+@@ -243,6 +243,9 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
+ 	int ret;
+ 
+ 	chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np);
++	if (!chip.label)
++		return -ENOMEM;
++
+ 	chip.parent = priv->dev;
+ 	chip.ngpio = priv->npins;
+ 
+diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
+index 37476d2558fda7..72df554b6375b8 100644
+--- a/drivers/regulator/rk808-regulator.c
++++ b/drivers/regulator/rk808-regulator.c
+@@ -270,8 +270,8 @@ static const unsigned int rk817_buck1_4_ramp_table[] = {
+ 
+ static int rk806_set_mode_dcdc(struct regulator_dev *rdev, unsigned int mode)
+ {
+-	int rid = rdev_get_id(rdev);
+-	int ctr_bit, reg;
++	unsigned int rid = rdev_get_id(rdev);
++	unsigned int ctr_bit, reg;
+ 
+ 	reg = RK806_POWER_FPWM_EN0 + rid / 8;
+ 	ctr_bit = rid % 8;
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
+index 905986c616559b..73848f764559b4 100644
+--- a/drivers/rtc/rtc-pcf85063.c
++++ b/drivers/rtc/rtc-pcf85063.c
+@@ -35,6 +35,7 @@
+ #define PCF85063_REG_CTRL1_CAP_SEL	BIT(0)
+ #define PCF85063_REG_CTRL1_STOP		BIT(5)
+ #define PCF85063_REG_CTRL1_EXT_TEST	BIT(7)
++#define PCF85063_REG_CTRL1_SWR		0x58
+ 
+ #define PCF85063_REG_CTRL2		0x01
+ #define PCF85063_CTRL2_AF		BIT(6)
+@@ -589,7 +590,7 @@ static int pcf85063_probe(struct i2c_client *client)
+ 
+ 	i2c_set_clientdata(client, pcf85063);
+ 
+-	err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp);
++	err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp);
+ 	if (err) {
+ 		dev_err(&client->dev, "RTC chip is not present\n");
+ 		return err;
+@@ -599,6 +600,22 @@ static int pcf85063_probe(struct i2c_client *client)
+ 	if (IS_ERR(pcf85063->rtc))
+ 		return PTR_ERR(pcf85063->rtc);
+ 
++	/*
++	 * If a Power loss is detected, SW reset the device.
++	 * From PCF85063A datasheet:
++	 * There is a low probability that some devices will have corruption
++	 * of the registers after the automatic power-on reset...
++	 */
++	if (tmp & PCF85063_REG_SC_OS) {
++		dev_warn(&client->dev,
++			 "POR issue detected, sending a SW reset\n");
++		err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1,
++				   PCF85063_REG_CTRL1_SWR);
++		if (err < 0)
++			dev_warn(&client->dev,
++				 "SW reset failed, trying to continue\n");
++	}
++
+ 	err = pcf85063_load_capacitance(pcf85063, client->dev.of_node,
+ 					config->force_cap_7000 ? 7000 : 0);
+ 	if (err < 0)
+diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
+index e5d947c763ea5d..6a030ba38bf360 100644
+--- a/drivers/s390/char/sclp_con.c
++++ b/drivers/s390/char/sclp_con.c
+@@ -263,6 +263,19 @@ static struct console sclp_console =
+ 	.index = 0 /* ttyS0 */
+ };
+ 
++/*
++ *  Release allocated pages.
++ */
++static void __init __sclp_console_free_pages(void)
++{
++	struct list_head *page, *p;
++
++	list_for_each_safe(page, p, &sclp_con_pages) {
++		list_del(page);
++		free_page((unsigned long)page);
++	}
++}
++
+ /*
+  * called by console_init() in drivers/char/tty_io.c at boot-time.
+  */
+@@ -282,6 +295,10 @@ sclp_console_init(void)
+ 	/* Allocate pages for output buffering */
+ 	for (i = 0; i < sclp_console_pages; i++) {
+ 		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
++		if (!page) {
++			__sclp_console_free_pages();
++			return -ENOMEM;
++		}
+ 		list_add_tail(page, &sclp_con_pages);
+ 	}
+ 	sclp_conbuf = NULL;
+diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
+index 892c18d2f87e90..d3edacb6ee148b 100644
+--- a/drivers/s390/char/sclp_tty.c
++++ b/drivers/s390/char/sclp_tty.c
+@@ -490,6 +490,17 @@ static const struct tty_operations sclp_ops = {
+ 	.flush_buffer = sclp_tty_flush_buffer,
+ };
+ 
++/* Release allocated pages. */
++static void __init __sclp_tty_free_pages(void)
++{
++	struct list_head *page, *p;
++
++	list_for_each_safe(page, p, &sclp_tty_pages) {
++		list_del(page);
++		free_page((unsigned long)page);
++	}
++}
++
+ static int __init
+ sclp_tty_init(void)
+ {
+@@ -516,6 +527,7 @@ sclp_tty_init(void)
+ 	for (i = 0; i < MAX_KMEM_PAGES; i++) {
+ 		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ 		if (page == NULL) {
++			__sclp_tty_free_pages();
+ 			tty_driver_kref_put(driver);
+ 			return -ENOMEM;
+ 		}
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index ffd15fa4f9e596..e98e6b2b9f5700 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -912,8 +912,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work,
+ 		container_of(work, typeof(*phy), works[event]);
+ 	struct hisi_hba *hisi_hba = phy->hisi_hba;
+ 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
++	struct asd_sas_port *sas_port = sas_phy->port;
++	struct hisi_sas_port *port = phy->port;
++	struct device *dev = hisi_hba->dev;
++	struct domain_device *port_dev;
+ 	int phy_no = sas_phy->id;
+ 
++	if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) &&
++	    sas_port && port && (port->id != phy->port_id)) {
++		dev_info(dev, "phy%d's hw port id changed from %d to %llu\n",
++				phy_no, port->id, phy->port_id);
++		port_dev = sas_port->port_dev;
++		if (port_dev && !dev_is_expander(port_dev->dev_type)) {
++			/*
++			 * Set the device state to gone to block
++			 * sending IO to the device.
++			 */
++			set_bit(SAS_DEV_GONE, &port_dev->state);
++			hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
++			return;
++		}
++	}
++
+ 	phy->wait_phyup_cnt = 0;
+ 	if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
+ 		hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index ec5b1ab2871776..c0a372868e1d7f 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -563,7 +563,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
+ 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
+ 		    reply_qidx);
+-		atomic_dec(&op_reply_q->pend_ios);
++
+ 		if (reply_dma)
+ 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ 		num_op_reply++;
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index ee2da8e49d4cfb..a9d6dac4133466 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -719,6 +719,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
+ 			spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 		}
+ 		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
++		pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
+ 		pm8001_free_dev(pm8001_dev);
+ 	} else {
+ 		pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index a77e0499b738a6..9d2db5bc8ee7ad 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -695,26 +695,23 @@ void scsi_cdl_check(struct scsi_device *sdev)
+  */
+ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
+ {
+-	struct scsi_mode_data data;
+-	struct scsi_sense_hdr sshdr;
+-	struct scsi_vpd *vpd;
+-	bool is_ata = false;
+ 	char buf[64];
++	bool is_ata;
+ 	int ret;
+ 
+ 	if (!sdev->cdl_supported)
+ 		return -EOPNOTSUPP;
+ 
+ 	rcu_read_lock();
+-	vpd = rcu_dereference(sdev->vpd_pg89);
+-	if (vpd)
+-		is_ata = true;
++	is_ata = rcu_dereference(sdev->vpd_pg89);
+ 	rcu_read_unlock();
+ 
+ 	/*
+ 	 * For ATA devices, CDL needs to be enabled with a SET FEATURES command.
+ 	 */
+ 	if (is_ata) {
++		struct scsi_mode_data data;
++		struct scsi_sense_hdr sshdr;
+ 		char *buf_data;
+ 		int len;
+ 
+@@ -723,16 +720,30 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
+ 		if (ret)
+ 			return -EINVAL;
+ 
+-		/* Enable CDL using the ATA feature page */
++		/* Enable or disable CDL using the ATA feature page */
+ 		len = min_t(size_t, sizeof(buf),
+ 			    data.length - data.header_length -
+ 			    data.block_descriptor_length);
+ 		buf_data = buf + data.header_length +
+ 			data.block_descriptor_length;
+-		if (enable)
+-			buf_data[4] = 0x02;
+-		else
+-			buf_data[4] = 0;
++
++		/*
++		 * If we want to enable CDL and CDL is already enabled on the
++		 * device, do nothing. This avoids needlessly resetting the CDL
++		 * statistics on the device as that is implied by the CDL enable
++		 * action. Similar to this, there is no need to do anything if
++		 * we want to disable CDL and CDL is already disabled.
++		 */
++		if (enable) {
++			if ((buf_data[4] & 0x03) == 0x02)
++				goto out;
++			buf_data[4] &= ~0x03;
++			buf_data[4] |= 0x02;
++		} else {
++			if ((buf_data[4] & 0x03) == 0x00)
++				goto out;
++			buf_data[4] &= ~0x03;
++		}
+ 
+ 		ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3,
+ 				       &data, &sshdr);
+@@ -744,6 +755,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
+ 		}
+ 	}
+ 
++out:
+ 	sdev->cdl_enable = enable;
+ 
+ 	return 0;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 3023b07dc483b5..ce4b428b63f832 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1237,8 +1237,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request);
+  */
+ static void scsi_cleanup_rq(struct request *rq)
+ {
++	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
++
++	cmd->flags = 0;
++
+ 	if (rq->rq_flags & RQF_DONTPREP) {
+-		scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
++		scsi_mq_uninit_cmd(cmd);
+ 		rq->rq_flags &= ~RQF_DONTPREP;
+ 	}
+ }
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index ca4bc0ac76adcf..8947dab132d789 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1190,8 +1190,8 @@ static u8 sd_group_number(struct scsi_cmnd *cmd)
+ 	if (!sdkp->rscs)
+ 		return 0;
+ 
+-	return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count,
+-		    0x3fu);
++	return min3((u32)rq->bio->bi_write_hint,
++		    (u32)sdkp->permanent_stream_count, 0x3fu);
+ }
+ 
+ static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
+@@ -1389,7 +1389,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ 		ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
+ 					 protect | fua, dld);
+ 	} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
+-		   sdp->use_10_for_rw || protect || rq->write_hint) {
++		   sdp->use_10_for_rw || protect || rq->bio->bi_write_hint) {
+ 		ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
+ 					 protect | fua);
+ 	} else {
+diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
+index 50be7a9274a14e..9d89bfc50e8b86 100644
+--- a/drivers/soc/qcom/ice.c
++++ b/drivers/soc/qcom/ice.c
+@@ -11,6 +11,7 @@
+ #include <linux/cleanup.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
++#include <linux/device.h>
+ #include <linux/iopoll.h>
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
+@@ -324,6 +325,53 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+ 
++static void qcom_ice_put(const struct qcom_ice *ice)
++{
++	struct platform_device *pdev = to_platform_device(ice->dev);
++
++	if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
++		platform_device_put(pdev);
++}
++
++static void devm_of_qcom_ice_put(struct device *dev, void *res)
++{
++	qcom_ice_put(*(struct qcom_ice **)res);
++}
++
++/**
++ * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
++ * a DT node.
++ * @dev: device pointer for the consumer device.
++ *
++ * This function will provide an ICE instance either by creating one for the
++ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
++ * clock (for legacy DT style). On the other hand, if consumer provides a
++ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
++ * be created and so this function will return that instead.
++ *
++ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
++ * consumer or ERR_PTR() on error.
++ */
++struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
++{
++	struct qcom_ice *ice, **dr;
++
++	dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
++	if (!dr)
++		return ERR_PTR(-ENOMEM);
++
++	ice = of_qcom_ice_get(dev);
++	if (!IS_ERR_OR_NULL(ice)) {
++		*dr = ice;
++		devres_add(dev, dr);
++	} else {
++		devres_free(dr);
++	}
++
++	return ice;
++}
++EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
++
+ static int qcom_ice_probe(struct platform_device *pdev)
+ {
+ 	struct qcom_ice *engine;
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 4c31d36f3130a9..810541eed213e2 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1614,10 +1614,13 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
+ 				struct spi_device *spi,
+ 				struct spi_transfer *transfer)
+ {
++	int ret;
+ 	struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
+ 	unsigned long hz_per_byte, byte_limit;
+ 
+-	spi_imx_setupxfer(spi, transfer);
++	ret = spi_imx_setupxfer(spi, transfer);
++	if (ret < 0)
++		return ret;
+ 	transfer->effective_speed_hz = spi_imx->spi_bus_clk;
+ 
+ 	/* flush rxfifo before transfer */
+diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
+index 43f11b0e9e765c..2d48ad844fb80b 100644
+--- a/drivers/spi/spi-tegra210-quad.c
++++ b/drivers/spi/spi-tegra210-quad.c
+@@ -1117,9 +1117,9 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ 					(&tqspi->xfer_completion,
+ 					QSPI_DMA_TIMEOUT);
+ 
+-			if (WARN_ON(ret == 0)) {
+-				dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
+-					ret);
++			if (WARN_ON_ONCE(ret == 0)) {
++				dev_err_ratelimited(tqspi->dev,
++						    "QSPI Transfer failed with timeout\n");
+ 				if (tqspi->is_curr_dma_xfer &&
+ 				    (tqspi->cur_direction & DATA_DIR_TX))
+ 					dmaengine_terminate_all
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index a7c6919fbf9788..e1da433a9e7fbe 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -1295,11 +1295,15 @@ static void tb_scan_port(struct tb_port *port)
+ 		goto out_rpm_put;
+ 	}
+ 
+-	tb_retimer_scan(port, true);
+-
+ 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
+ 			     tb_downstream_route(port));
+ 	if (IS_ERR(sw)) {
++		/*
++		 * Make the downstream retimers available even if there
++		 * is no router connected.
++		 */
++		tb_retimer_scan(port, true);
++
+ 		/*
+ 		 * If there is an error accessing the connected switch
+ 		 * it may be connected to another domain. Also we allow
+@@ -1349,6 +1353,14 @@ static void tb_scan_port(struct tb_port *port)
+ 	upstream_port = tb_upstream_port(sw);
+ 	tb_configure_link(port, upstream_port, sw);
+ 
++	/*
++	 * Scan for downstream retimers. We only scan them after the
++	 * router has been enumerated to avoid issues with certain
++	 * Pluggable devices that expect the host to enumerate them
++	 * within certain timeout.
++	 */
++	tb_retimer_scan(port, true);
++
+ 	/*
+ 	 * CL0s and CL1 are enabled and supported together.
+ 	 * Silently ignore CLx enabling in case CLx is not supported.
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index 0a9c5219df88bf..9eeabedfb4d0ef 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -1746,6 +1746,12 @@ msm_serial_early_console_setup_dm(struct earlycon_device *device,
+ 	if (!device->port.membase)
+ 		return -ENODEV;
+ 
++	/* Disable DM / single-character modes */
++	msm_write(&device->port, 0, UARTDM_DMEN);
++	msm_write(&device->port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
++	msm_write(&device->port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
++	msm_write(&device->port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
++
+ 	device->con->write = msm_serial_early_write_dm;
+ 	return 0;
+ }
+diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
+index cbfce65c9d221a..7fbb170cf0f431 100644
+--- a/drivers/tty/serial/sifive.c
++++ b/drivers/tty/serial/sifive.c
+@@ -563,8 +563,11 @@ static void sifive_serial_break_ctl(struct uart_port *port, int break_state)
+ static int sifive_serial_startup(struct uart_port *port)
+ {
+ 	struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
++	unsigned long flags;
+ 
++	uart_port_lock_irqsave(&ssp->port, &flags);
+ 	__ssp_enable_rxwm(ssp);
++	uart_port_unlock_irqrestore(&ssp->port, flags);
+ 
+ 	return 0;
+ }
+@@ -572,9 +575,12 @@ static int sifive_serial_startup(struct uart_port *port)
+ static void sifive_serial_shutdown(struct uart_port *port)
+ {
+ 	struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
++	unsigned long flags;
+ 
++	uart_port_lock_irqsave(&ssp->port, &flags);
+ 	__ssp_disable_rxwm(ssp);
+ 	__ssp_disable_txwm(ssp);
++	uart_port_unlock_irqrestore(&ssp->port, flags);
+ }
+ 
+ /**
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 0bd6544e30a6b3..791e2f1f7c0b65 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -193,13 +193,12 @@ int set_selection_user(const struct tiocl_selection __user *sel,
+ 		return -EFAULT;
+ 
+ 	/*
+-	 * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to
+-	 * use without CAP_SYS_ADMIN as they do not modify the selection.
++	 * TIOCL_SELCLEAR and TIOCL_SELPOINTER are OK to use without
++	 * CAP_SYS_ADMIN as they do not modify the selection.
+ 	 */
+ 	switch (v.sel_mode) {
+ 	case TIOCL_SELCLEAR:
+ 	case TIOCL_SELPOINTER:
+-	case TIOCL_SELMOUSEREPORT:
+ 		break;
+ 	default:
+ 		if (!capable(CAP_SYS_ADMIN))
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index dba935c712d64b..45b04f3c377643 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -673,13 +673,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 	unsigned long flags;
+ 	int err;
+ 
+-	if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+-		dev_err(hba->dev,
+-			"%s: skip abort. cmd at tag %d already completed.\n",
+-			__func__, tag);
+-		return FAILED;
+-	}
+-
+ 	/* Skip task abort in case previous aborts failed and report failure */
+ 	if (lrbp->req_abort_skip) {
+ 		dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
+@@ -688,6 +681,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 	}
+ 
+ 	hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
++	if (!hwq) {
++		dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
++			__func__, tag);
++		return FAILED;
++	}
+ 
+ 	if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
+ 		/*
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 89fc0b5662919b..8d4a5b8371b63d 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -5689,6 +5689,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
+ 			continue;
+ 
+ 		hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
++		if (!hwq)
++			continue;
+ 
+ 		if (force_compl) {
+ 			ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
+diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
+index f2cbfc2d399cdb..5ba17ccf6417fe 100644
+--- a/drivers/ufs/host/ufs-exynos.c
++++ b/drivers/ufs/host/ufs-exynos.c
+@@ -34,7 +34,7 @@
+  * Exynos's Vendor specific registers for UFSHCI
+  */
+ #define HCI_TXPRDT_ENTRY_SIZE	0x00
+-#define PRDT_PREFECT_EN		BIT(31)
++#define PRDT_PREFETCH_EN	BIT(31)
+ #define HCI_RXPRDT_ENTRY_SIZE	0x04
+ #define HCI_1US_TO_CNT_VAL	0x0C
+ #define CNT_VAL_1US_MASK	0x3FF
+@@ -86,11 +86,16 @@
+ 				 UIC_TRANSPORT_NO_CONNECTION_RX |\
+ 				 UIC_TRANSPORT_BAD_TC)
+ 
+-/* FSYS UFS Shareability */
+-#define UFS_WR_SHARABLE		BIT(2)
+-#define UFS_RD_SHARABLE		BIT(1)
+-#define UFS_SHARABLE		(UFS_WR_SHARABLE | UFS_RD_SHARABLE)
+-#define UFS_SHAREABILITY_OFFSET	0x710
++/* UFS Shareability */
++#define UFS_EXYNOSAUTO_WR_SHARABLE	BIT(2)
++#define UFS_EXYNOSAUTO_RD_SHARABLE	BIT(1)
++#define UFS_EXYNOSAUTO_SHARABLE		(UFS_EXYNOSAUTO_WR_SHARABLE | \
++					 UFS_EXYNOSAUTO_RD_SHARABLE)
++#define UFS_GS101_WR_SHARABLE		BIT(1)
++#define UFS_GS101_RD_SHARABLE		BIT(0)
++#define UFS_GS101_SHARABLE		(UFS_GS101_WR_SHARABLE | \
++					 UFS_GS101_RD_SHARABLE)
++#define UFS_SHAREABILITY_OFFSET		0x710
+ 
+ /* Multi-host registers */
+ #define MHCTRL			0xC4
+@@ -198,20 +203,15 @@ static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs)
+ 	exynos_ufs_ctrl_clkstop(ufs, false);
+ }
+ 
+-static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
+-{
+-	return 0;
+-}
+-
+-static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
++static int exynos_ufs_shareability(struct exynos_ufs *ufs)
+ {
+ 	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ 
+ 	/* IO Coherency setting */
+ 	if (ufs->sysreg) {
+ 		return regmap_update_bits(ufs->sysreg,
+-					  ufs->shareability_reg_offset,
+-					  UFS_SHARABLE, UFS_SHARABLE);
++					  ufs->iocc_offset,
++					  ufs->iocc_mask, ufs->iocc_val);
+ 	}
+ 
+ 	attr->tx_dif_p_nsec = 3200000;
+@@ -219,6 +219,21 @@ static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
+ 	return 0;
+ }
+ 
++static int gs101_ufs_drv_init(struct exynos_ufs *ufs)
++{
++	struct ufs_hba *hba = ufs->hba;
++
++	/* Enable WriteBooster */
++	hba->caps |= UFSHCD_CAP_WB_EN;
++
++	return exynos_ufs_shareability(ufs);
++}
++
++static int exynosauto_ufs_drv_init(struct exynos_ufs *ufs)
++{
++	return exynos_ufs_shareability(ufs);
++}
++
+ static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
+ {
+ 	struct ufs_hba *hba = ufs->hba;
+@@ -1013,9 +1028,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
+ 	exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
+ 	exynos_ufs_set_unipro_pclk_div(ufs);
+ 
++	exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
++
+ 	/* unipro */
+ 	exynos_ufs_config_unipro(ufs);
+ 
++	if (ufs->drv_data->pre_link)
++		ufs->drv_data->pre_link(ufs);
++
+ 	/* m-phy */
+ 	exynos_ufs_phy_init(ufs);
+ 	if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
+@@ -1023,11 +1043,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
+ 		exynos_ufs_config_phy_cap_attr(ufs);
+ 	}
+ 
+-	exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
+-
+-	if (ufs->drv_data->pre_link)
+-		ufs->drv_data->pre_link(ufs);
+-
+ 	return 0;
+ }
+ 
+@@ -1051,12 +1066,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
+ 	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ 	struct phy *generic_phy = ufs->phy;
+ 	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
++	u32 val = ilog2(DATA_UNIT_SIZE);
+ 
+ 	exynos_ufs_establish_connt(ufs);
+ 	exynos_ufs_fit_aggr_timeout(ufs);
+ 
+ 	hci_writel(ufs, 0xa, HCI_DATA_REORDER);
+-	hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE);
++
++	if (hba->caps & UFSHCD_CAP_CRYPTO)
++		val |= PRDT_PREFETCH_EN;
++	hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
++
+ 	hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
+ 	hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
+ 	hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
+@@ -1132,12 +1152,22 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
+ 		ufs->sysreg = NULL;
+ 	else {
+ 		if (of_property_read_u32_index(np, "samsung,sysreg", 1,
+-					       &ufs->shareability_reg_offset)) {
++					       &ufs->iocc_offset)) {
+ 			dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
+-			ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
++			ufs->iocc_offset = UFS_SHAREABILITY_OFFSET;
+ 		}
+ 	}
+ 
++	ufs->iocc_mask = ufs->drv_data->iocc_mask;
++	/*
++	 * no 'dma-coherent' property means the descriptors are
++	 * non-cacheable so iocc shareability should be disabled.
++	 */
++	if (of_dma_is_coherent(dev->of_node))
++		ufs->iocc_val = ufs->iocc_mask;
++	else
++		ufs->iocc_val = 0;
++
+ 	ufs->pclk_avail_min = PCLK_AVAIL_MIN;
+ 	ufs->pclk_avail_max = PCLK_AVAIL_MAX;
+ 
+@@ -1438,7 +1468,7 @@ static int exynos_ufs_init(struct ufs_hba *hba)
+ 	exynos_ufs_fmp_init(hba, ufs);
+ 
+ 	if (ufs->drv_data->drv_init) {
+-		ret = ufs->drv_data->drv_init(dev, ufs);
++		ret = ufs->drv_data->drv_init(ufs);
+ 		if (ret) {
+ 			dev_err(dev, "failed to init drv-data\n");
+ 			goto out;
+@@ -1460,6 +1490,14 @@ static int exynos_ufs_init(struct ufs_hba *hba)
+ 	return ret;
+ }
+ 
++static void exynos_ufs_exit(struct ufs_hba *hba)
++{
++	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
++
++	phy_power_off(ufs->phy);
++	phy_exit(ufs->phy);
++}
++
+ static int exynos_ufs_host_reset(struct ufs_hba *hba)
+ {
+ 	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+@@ -1649,6 +1687,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
+ 	}
+ }
+ 
++static int gs101_ufs_suspend(struct exynos_ufs *ufs)
++{
++	hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
++	return 0;
++}
++
+ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ 	enum ufs_notify_change_status status)
+ {
+@@ -1657,6 +1701,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ 	if (status == PRE_CHANGE)
+ 		return 0;
+ 
++	if (ufs->drv_data->suspend)
++		ufs->drv_data->suspend(ufs);
++
+ 	if (!ufshcd_is_link_active(hba))
+ 		phy_power_off(ufs->phy);
+ 
+@@ -1928,6 +1975,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
+ 	.name				= "exynos_ufs",
+ 	.init				= exynos_ufs_init,
++	.exit				= exynos_ufs_exit,
+ 	.hce_enable_notify		= exynos_ufs_hce_enable_notify,
+ 	.link_startup_notify		= exynos_ufs_link_startup_notify,
+ 	.pwr_change_notify		= exynos_ufs_pwr_change_notify,
+@@ -1966,13 +2014,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
+ 
+ static void exynos_ufs_remove(struct platform_device *pdev)
+ {
+-	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+-	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+-
+ 	ufshcd_pltfrm_remove(pdev);
+-
+-	phy_power_off(ufs->phy);
+-	phy_exit(ufs->phy);
+ }
+ 
+ static struct exynos_ufs_uic_attr exynos7_uic_attr = {
+@@ -2011,6 +2053,7 @@ static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
+ 	.opts			= EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ 				  EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ 				  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
++	.iocc_mask		= UFS_EXYNOSAUTO_SHARABLE,
+ 	.drv_init		= exynosauto_ufs_drv_init,
+ 	.post_hce_enable	= exynosauto_ufs_post_hce_enable,
+ 	.pre_link		= exynosauto_ufs_pre_link,
+@@ -2044,7 +2087,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
+ 				  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
+ 				  EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB |
+ 				  EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER,
+-	.drv_init		= exynos7_ufs_drv_init,
+ 	.pre_link		= exynos7_ufs_pre_link,
+ 	.post_link		= exynos7_ufs_post_link,
+ 	.pre_pwr_change		= exynos7_ufs_pre_pwr_change,
+@@ -2134,10 +2176,12 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
+ 				  EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ 				  EXYNOS_UFS_OPT_UFSPR_SECURE |
+ 				  EXYNOS_UFS_OPT_TIMER_TICK_SELECT,
+-	.drv_init		= exynosauto_ufs_drv_init,
++	.iocc_mask		= UFS_GS101_SHARABLE,
++	.drv_init		= gs101_ufs_drv_init,
+ 	.pre_link		= gs101_ufs_pre_link,
+ 	.post_link		= gs101_ufs_post_link,
+ 	.pre_pwr_change		= gs101_ufs_pre_pwr_change,
++	.suspend		= gs101_ufs_suspend,
+ };
+ 
+ static const struct of_device_id exynos_ufs_of_match[] = {
+diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
+index 1646c4a9bb088d..3c6fe5132190ab 100644
+--- a/drivers/ufs/host/ufs-exynos.h
++++ b/drivers/ufs/host/ufs-exynos.h
+@@ -181,8 +181,9 @@ struct exynos_ufs_drv_data {
+ 	struct exynos_ufs_uic_attr *uic_attr;
+ 	unsigned int quirks;
+ 	unsigned int opts;
++	u32 iocc_mask;
+ 	/* SoC's specific operations */
+-	int (*drv_init)(struct device *dev, struct exynos_ufs *ufs);
++	int (*drv_init)(struct exynos_ufs *ufs);
+ 	int (*pre_link)(struct exynos_ufs *ufs);
+ 	int (*post_link)(struct exynos_ufs *ufs);
+ 	int (*pre_pwr_change)(struct exynos_ufs *ufs,
+@@ -191,6 +192,7 @@ struct exynos_ufs_drv_data {
+ 				struct ufs_pa_layer_attr *pwr);
+ 	int (*pre_hce_enable)(struct exynos_ufs *ufs);
+ 	int (*post_hce_enable)(struct exynos_ufs *ufs);
++	int (*suspend)(struct exynos_ufs *ufs);
+ };
+ 
+ struct ufs_phy_time_cfg {
+@@ -230,7 +232,9 @@ struct exynos_ufs {
+ 	ktime_t entry_hibern8_t;
+ 	const struct exynos_ufs_drv_data *drv_data;
+ 	struct regmap *sysreg;
+-	u32 shareability_reg_offset;
++	u32 iocc_offset;
++	u32 iocc_mask;
++	u32 iocc_val;
+ 
+ 	u32 opts;
+ #define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL		BIT(0)
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index e12c5f9f795638..4557b1bcd6356b 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -118,7 +118,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
+ 	struct device *dev = hba->dev;
+ 	struct qcom_ice *ice;
+ 
+-	ice = of_qcom_ice_get(dev);
++	ice = devm_of_qcom_ice_get(dev);
+ 	if (ice == ERR_PTR(-EOPNOTSUPP)) {
+ 		dev_warn(dev, "Disabling inline encryption support\n");
+ 		ice = NULL;
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index fd1beb10bba726..19101ff1cf1bd7 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -1963,6 +1963,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
+ 	unsigned int bit;
+ 	unsigned long reg;
+ 
++	local_bh_disable();
+ 	spin_lock_irqsave(&priv_dev->lock, flags);
+ 
+ 	reg = readl(&priv_dev->regs->usb_ists);
+@@ -2004,6 +2005,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
+ irqend:
+ 	writel(~0, &priv_dev->regs->ep_ien);
+ 	spin_unlock_irqrestore(&priv_dev->lock, flags);
++	local_bh_enable();
+ 
+ 	return ret;
+ }
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 46d1a4428b9a98..2174f6e1f82aa6 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -336,6 +336,13 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
+ 	return ret;
+ }
+ 
++static void ci_hdrc_imx_disable_regulator(void *arg)
++{
++	struct ci_hdrc_imx_data *data = arg;
++
++	regulator_disable(data->hsic_pad_regulator);
++}
++
+ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ {
+ 	struct ci_hdrc_imx_data *data;
+@@ -394,6 +401,13 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 					"Failed to enable HSIC pad regulator\n");
+ 				goto err_put;
+ 			}
++			ret = devm_add_action_or_reset(dev,
++					ci_hdrc_imx_disable_regulator, data);
++			if (ret) {
++				dev_err(dev,
++					"Failed to add regulator devm action\n");
++				goto err_put;
++			}
+ 		}
+ 	}
+ 
+@@ -432,11 +446,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 
+ 	ret = imx_get_clks(dev);
+ 	if (ret)
+-		goto disable_hsic_regulator;
++		goto qos_remove_request;
+ 
+ 	ret = imx_prepare_enable_clks(dev);
+ 	if (ret)
+-		goto disable_hsic_regulator;
++		goto qos_remove_request;
+ 
+ 	ret = clk_prepare_enable(data->clk_wakeup);
+ 	if (ret)
+@@ -470,7 +484,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 	    of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) {
+ 		pdata.flags |= CI_HDRC_OVERRIDE_PHY_CONTROL;
+ 		data->override_phy_control = true;
+-		usb_phy_init(pdata.usb_phy);
++		ret = usb_phy_init(pdata.usb_phy);
++		if (ret) {
++			dev_err(dev, "Failed to init phy\n");
++			goto err_clk;
++		}
+ 	}
+ 
+ 	if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
+@@ -479,7 +497,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 	ret = imx_usbmisc_init(data->usbmisc_data);
+ 	if (ret) {
+ 		dev_err(dev, "usbmisc init failed, ret=%d\n", ret);
+-		goto err_clk;
++		goto phy_shutdown;
+ 	}
+ 
+ 	data->ci_pdev = ci_hdrc_add_device(dev,
+@@ -488,7 +506,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 	if (IS_ERR(data->ci_pdev)) {
+ 		ret = PTR_ERR(data->ci_pdev);
+ 		dev_err_probe(dev, ret, "ci_hdrc_add_device failed\n");
+-		goto err_clk;
++		goto phy_shutdown;
+ 	}
+ 
+ 	if (data->usbmisc_data) {
+@@ -522,19 +540,20 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 
+ disable_device:
+ 	ci_hdrc_remove_device(data->ci_pdev);
++phy_shutdown:
++	if (data->override_phy_control)
++		usb_phy_shutdown(data->phy);
+ err_clk:
+ 	clk_disable_unprepare(data->clk_wakeup);
+ err_wakeup_clk:
+ 	imx_disable_unprepare_clks(dev);
+-disable_hsic_regulator:
+-	if (data->hsic_pad_regulator)
+-		/* don't overwrite original ret (cf. EPROBE_DEFER) */
+-		regulator_disable(data->hsic_pad_regulator);
++qos_remove_request:
+ 	if (pdata.flags & CI_HDRC_PMQOS)
+ 		cpu_latency_qos_remove_request(&data->pm_qos_req);
+ 	data->ci_pdev = NULL;
+ err_put:
+-	put_device(data->usbmisc_data->dev);
++	if (data->usbmisc_data)
++		put_device(data->usbmisc_data->dev);
+ 	return ret;
+ }
+ 
+@@ -556,10 +575,9 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
+ 		clk_disable_unprepare(data->clk_wakeup);
+ 		if (data->plat_data->flags & CI_HDRC_PMQOS)
+ 			cpu_latency_qos_remove_request(&data->pm_qos_req);
+-		if (data->hsic_pad_regulator)
+-			regulator_disable(data->hsic_pad_regulator);
+ 	}
+-	put_device(data->usbmisc_data->dev);
++	if (data->usbmisc_data)
++		put_device(data->usbmisc_data->dev);
+ }
+ 
+ static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 86ee39db013f39..16e7fa4d488d37 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -726,7 +726,7 @@ static int wdm_open(struct inode *inode, struct file *file)
+ 		rv = -EBUSY;
+ 		goto out;
+ 	}
+-
++	smp_rmb(); /* ordered against wdm_wwan_port_stop() */
+ 	rv = usb_autopm_get_interface(desc->intf);
+ 	if (rv < 0) {
+ 		dev_err(&desc->intf->dev, "Error autopm - %d\n", rv);
+@@ -829,6 +829,7 @@ static struct usb_class_driver wdm_class = {
+ static int wdm_wwan_port_start(struct wwan_port *port)
+ {
+ 	struct wdm_device *desc = wwan_port_get_drvdata(port);
++	int rv;
+ 
+ 	/* The interface is both exposed via the WWAN framework and as a
+ 	 * legacy usbmisc chardev. If chardev is already open, just fail
+@@ -848,7 +849,15 @@ static int wdm_wwan_port_start(struct wwan_port *port)
+ 	wwan_port_txon(port);
+ 
+ 	/* Start getting events */
+-	return usb_submit_urb(desc->validity, GFP_KERNEL);
++	rv = usb_submit_urb(desc->validity, GFP_KERNEL);
++	if (rv < 0) {
++		wwan_port_txoff(port);
++		desc->manage_power(desc->intf, 0);
++		/* this must be last lest we race with chardev open */
++		clear_bit(WDM_WWAN_IN_USE, &desc->flags);
++	}
++
++	return rv;
+ }
+ 
+ static void wdm_wwan_port_stop(struct wwan_port *port)
+@@ -859,8 +868,10 @@ static void wdm_wwan_port_stop(struct wwan_port *port)
+ 	poison_urbs(desc);
+ 	desc->manage_power(desc->intf, 0);
+ 	clear_bit(WDM_READ, &desc->flags);
+-	clear_bit(WDM_WWAN_IN_USE, &desc->flags);
+ 	unpoison_urbs(desc);
++	smp_wmb(); /* ordered against wdm_open() */
++	/* this must be last lest we open a poisoned device */
++	clear_bit(WDM_WWAN_IN_USE, &desc->flags);
+ }
+ 
+ static void wdm_wwan_port_tx_complete(struct urb *urb)
+@@ -868,7 +879,7 @@ static void wdm_wwan_port_tx_complete(struct urb *urb)
+ 	struct sk_buff *skb = urb->context;
+ 	struct wdm_device *desc = skb_shinfo(skb)->destructor_arg;
+ 
+-	usb_autopm_put_interface(desc->intf);
++	usb_autopm_put_interface_async(desc->intf);
+ 	wwan_port_txon(desc->wwanp);
+ 	kfree_skb(skb);
+ }
+@@ -898,7 +909,7 @@ static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb)
+ 	req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+ 	req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
+ 	req->wValue = 0;
+-	req->wIndex = desc->inum;
++	req->wIndex = desc->inum; /* already converted */
+ 	req->wLength = cpu_to_le16(skb->len);
+ 
+ 	skb_shinfo(skb)->destructor_arg = desc;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 6926bd639ec6ff..4903c733d37ae7 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -369,6 +369,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
+ 	{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* SanDisk Corp. SanDisk 3.2Gen1 */
++	{ USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Realforce 87U Keyboard */
+ 	{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
+ 
+@@ -383,6 +386,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0904, 0x6103), .driver_info =
+ 			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ 
++	/* Silicon Motion Flash Drive */
++	{ USB_DEVICE(0x090c, 0x1000), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Sound Devices USBPre2 */
+ 	{ USB_DEVICE(0x0926, 0x0202), .driver_info =
+ 			USB_QUIRK_ENDPOINT_IGNORE },
+@@ -536,6 +542,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x2040, 0x7200), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+ 
++	/* VLI disk */
++	{ USB_DEVICE(0x2109, 0x0711), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Raydium Touchscreen */
+ 	{ USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 052852f8014676..54a4ee2b90b7f4 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -148,11 +148,21 @@ static const struct property_entry dwc3_pci_intel_byt_properties[] = {
+ 	{}
+ };
+ 
++/*
++ * Intel Merrifield SoC uses these endpoints for tracing and they cannot
++ * be re-allocated if being used because the side band flow control signals
++ * are hard wired to certain endpoints:
++ * - 1 High BW Bulk IN (IN#1) (RTIT)
++ * - 1 1KB BW Bulk IN (IN#8) + 1 1KB BW Bulk OUT (Run Control) (OUT#8)
++ */
++static const u8 dwc3_pci_mrfld_reserved_endpoints[] = { 3, 16, 17 };
++
+ static const struct property_entry dwc3_pci_mrfld_properties[] = {
+ 	PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ 	PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
+ 	PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ 	PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
++	PROPERTY_ENTRY_U8_ARRAY("snps,reserved-endpoints", dwc3_pci_mrfld_reserved_endpoints),
+ 	PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
+ 	PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ 	{}
+diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
+index 96c87dc4757f22..47e891c9233770 100644
+--- a/drivers/usb/dwc3/dwc3-xilinx.c
++++ b/drivers/usb/dwc3/dwc3-xilinx.c
+@@ -207,15 +207,13 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
+ 
+ skip_usb3_phy:
+ 	/* ulpi reset via gpio-modepin or gpio-framework driver */
+-	reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
++	reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(reset_gpio)) {
+ 		return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ 				     "Failed to request reset GPIO\n");
+ 	}
+ 
+ 	if (reset_gpio) {
+-		/* Toggle ulpi to reset the phy. */
+-		gpiod_set_value_cansleep(reset_gpio, 1);
+ 		usleep_range(5000, 10000);
+ 		gpiod_set_value_cansleep(reset_gpio, 0);
+ 		usleep_range(5000, 10000);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 309a871453bfad..e72bac650981de 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -548,6 +548,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
+ int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
+ {
+ 	struct dwc3_gadget_ep_cmd_params params;
++	struct dwc3_ep		*dep;
+ 	u32			cmd;
+ 	int			i;
+ 	int			ret;
+@@ -564,8 +565,13 @@ int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
+ 		return ret;
+ 
+ 	/* Reset resource allocation flags */
+-	for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++)
+-		dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
++	for (i = resource_index; i < dwc->num_eps; i++) {
++		dep = dwc->eps[i];
++		if (!dep)
++			continue;
++
++		dep->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
++	}
+ 
+ 	return 0;
+ }
+@@ -752,9 +758,11 @@ void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
+ 
+ 	dwc->last_fifo_depth = fifo_depth;
+ 	/* Clear existing TXFIFO for all IN eps except ep0 */
+-	for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
+-	     num += 2) {
++	for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); num += 2) {
+ 		dep = dwc->eps[num];
++		if (!dep)
++			continue;
++
+ 		/* Don't change TXFRAMNUM on usb31 version */
+ 		size = DWC3_IP_IS(DWC3) ? 0 :
+ 			dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
+@@ -3670,6 +3678,8 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
+ 
+ 		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ 			dep = dwc->eps[i];
++			if (!dep)
++				continue;
+ 
+ 			if (!(dep->flags & DWC3_EP_ENABLED))
+ 				continue;
+@@ -3858,6 +3868,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
+ 	u8			epnum = event->endpoint_number;
+ 
+ 	dep = dwc->eps[epnum];
++	if (!dep) {
++		dev_warn(dwc->dev, "spurious event, endpoint %u is not allocated\n", epnum);
++		return;
++	}
+ 
+ 	if (!(dep->flags & DWC3_EP_ENABLED)) {
+ 		if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
+@@ -4570,6 +4584,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
+ 	if (!count)
+ 		return IRQ_NONE;
+ 
++	if (count > evt->length) {
++		dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n",
++			count, evt->length);
++		return IRQ_NONE;
++	}
++
+ 	evt->count = count;
+ 	evt->flags |= DWC3_EVENT_PENDING;
+ 
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+index 573109ca5b7990..a09f72772e6e95 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+@@ -548,6 +548,9 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
+ 	d->vhub = vhub;
+ 	d->index = idx;
+ 	d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1);
++	if (!d->name)
++		return -ENOMEM;
++
+ 	d->regs = vhub->regs + 0x100 + 0x10 * idx;
+ 
+ 	ast_vhub_init_ep0(vhub, &d->ep0, d);
+diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
+index 0881fdd1823e0b..dcf31a592f5d11 100644
+--- a/drivers/usb/host/max3421-hcd.c
++++ b/drivers/usb/host/max3421-hcd.c
+@@ -1946,6 +1946,12 @@ max3421_remove(struct spi_device *spi)
+ 	usb_put_hcd(hcd);
+ }
+ 
++static const struct spi_device_id max3421_spi_ids[] = {
++	{ "max3421" },
++	{ },
++};
++MODULE_DEVICE_TABLE(spi, max3421_spi_ids);
++
+ static const struct of_device_id max3421_of_match_table[] = {
+ 	{ .compatible = "maxim,max3421", },
+ 	{},
+@@ -1955,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table);
+ static struct spi_driver max3421_driver = {
+ 	.probe		= max3421_probe,
+ 	.remove		= max3421_remove,
++	.id_table	= max3421_spi_ids,
+ 	.driver		= {
+ 		.name	= "max3421-hcd",
+ 		.of_match_table = max3421_of_match_table,
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index 900ea0d368e034..9f0a6b27e47cb6 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -165,6 +165,25 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
+ 	return 0;
+ }
+ 
++static int ohci_quirk_loongson(struct usb_hcd *hcd)
++{
++	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
++
++	/*
++	 * Loongson's LS7A OHCI controller (rev 0x02) has a
++	 * flaw. MMIO register with offset 0x60/64 is treated
++	 * as legacy PS2-compatible keyboard/mouse interface.
++	 * Since OHCI only use 4KB BAR resource, LS7A OHCI's
++	 * 32KB BAR is wrapped around (the 2nd 4KB BAR space
++	 * is the same as the 1st 4KB internally). So add 4KB
++	 * offset (0x1000) to the OHCI registers as a quirk.
++	 */
++	if (pdev->revision == 0x2)
++		hcd->regs += SZ_4K;	/* SZ_4K = 0x1000 */
++
++	return 0;
++}
++
+ static int ohci_quirk_qemu(struct usb_hcd *hcd)
+ {
+ 	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+@@ -224,6 +243,10 @@ static const struct pci_device_id ohci_pci_quirks[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
+ 		.driver_data = (unsigned long)ohci_quirk_amd700,
+ 	},
++	{
++		PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a24),
++		.driver_data = (unsigned long)ohci_quirk_loongson,
++	},
+ 	{
+ 		.vendor		= PCI_VENDOR_ID_APPLE,
+ 		.device		= 0x003f,
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 2fe3a92978fa29..1952e05033407f 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1878,9 +1878,10 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ 	int max_ports, port_index;
+ 	int sret;
+ 	u32 next_state;
+-	u32 temp, portsc;
++	u32 portsc;
+ 	struct xhci_hub *rhub;
+ 	struct xhci_port **ports;
++	bool disabled_irq = false;
+ 
+ 	rhub = xhci_get_rhub(hcd);
+ 	ports = rhub->ports;
+@@ -1896,17 +1897,20 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ 		return -ESHUTDOWN;
+ 	}
+ 
+-	/* delay the irqs */
+-	temp = readl(&xhci->op_regs->command);
+-	temp &= ~CMD_EIE;
+-	writel(temp, &xhci->op_regs->command);
+-
+ 	/* bus specific resume for ports we suspended at bus_suspend */
+-	if (hcd->speed >= HCD_USB3)
++	if (hcd->speed >= HCD_USB3) {
+ 		next_state = XDEV_U0;
+-	else
++	} else {
+ 		next_state = XDEV_RESUME;
+-
++		if (bus_state->bus_suspended) {
++			/*
++			 * prevent port event interrupts from interfering
++			 * with usb2 port resume process
++			 */
++			xhci_disable_interrupter(xhci->interrupters[0]);
++			disabled_irq = true;
++		}
++	}
+ 	port_index = max_ports;
+ 	while (port_index--) {
+ 		portsc = readl(ports[port_index]->addr);
+@@ -1974,11 +1978,9 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ 	(void) readl(&xhci->op_regs->command);
+ 
+ 	bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
+-	/* re-enable irqs */
+-	temp = readl(&xhci->op_regs->command);
+-	temp |= CMD_EIE;
+-	writel(temp, &xhci->op_regs->command);
+-	temp = readl(&xhci->op_regs->command);
++	/* re-enable interrupter */
++	if (disabled_irq)
++		xhci_enable_interrupter(xhci->interrupters[0]);
+ 
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+ 	return 0;
+diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
+index 87f1597a0e5ab7..257e4d79971fda 100644
+--- a/drivers/usb/host/xhci-mvebu.c
++++ b/drivers/usb/host/xhci-mvebu.c
+@@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
+ 
+ 	return 0;
+ }
+-
+-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
+-{
+-	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+-
+-	/* Without reset on resume, the HC won't work at all */
+-	xhci->quirks |= XHCI_RESET_ON_RESUME;
+-
+-	return 0;
+-}
+diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
+index 3be021793cc8b0..9d26e22c48422f 100644
+--- a/drivers/usb/host/xhci-mvebu.h
++++ b/drivers/usb/host/xhci-mvebu.h
+@@ -12,16 +12,10 @@ struct usb_hcd;
+ 
+ #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
+ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
+ #else
+ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
+ {
+ 	return 0;
+ }
+-
+-static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
+-{
+-	return 0;
+-}
+ #endif
+ #endif /* __LINUX_XHCI_MVEBU_H */
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index e6660472501e4d..2379a67e34e125 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -106,7 +106,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
+ };
+ 
+ static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
+-	.init_quirk = xhci_mvebu_a3700_init_quirk,
++	.quirks = XHCI_RESET_ON_RESUME,
+ };
+ 
+ static const struct xhci_plat_priv xhci_plat_brcm = {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2fad9563dca40b..3e70e4f6bf0832 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1190,16 +1190,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ 			 * Stopped state, but it will soon change to Running.
+ 			 *
+ 			 * Assume this bug on unexpected Stop Endpoint failures.
+-			 * Keep retrying until the EP starts and stops again, on
+-			 * chips where this is known to help. Wait for 100ms.
++			 * Keep retrying until the EP starts and stops again.
+ 			 */
+-			if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+-				break;
+ 			fallthrough;
+ 		case EP_STATE_RUNNING:
+ 			/* Race, HW handled stop ep cmd before ep was running */
+ 			xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
+ 					GET_EP_CTX_STATE(ep_ctx));
++			/*
++			 * Don't retry forever if we guessed wrong or a defective HC never starts
++			 * the EP or says 'Running' but fails the command. We must give back TDs.
++			 */
++			if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
++				break;
+ 
+ 			command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ 			if (!command) {
+@@ -2642,6 +2645,22 @@ static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_
+ 	return 0;
+ }
+ 
++static bool xhci_spurious_success_tx_event(struct xhci_hcd *xhci,
++					   struct xhci_ring *ring)
++{
++	switch (ring->old_trb_comp_code) {
++	case COMP_SHORT_PACKET:
++		return xhci->quirks & XHCI_SPURIOUS_SUCCESS;
++	case COMP_USB_TRANSACTION_ERROR:
++	case COMP_BABBLE_DETECTED_ERROR:
++	case COMP_ISOCH_BUFFER_OVERRUN:
++		return xhci->quirks & XHCI_ETRON_HOST &&
++			ring->type == TYPE_ISOC;
++	default:
++		return false;
++	}
++}
++
+ /*
+  * If this function returns an error condition, it means it got a Transfer
+  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
+@@ -2662,6 +2681,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 	int status = -EINPROGRESS;
+ 	struct xhci_ep_ctx *ep_ctx;
+ 	u32 trb_comp_code;
++	bool ring_xrun_event = false;
+ 
+ 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+@@ -2695,8 +2715,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 	case COMP_SUCCESS:
+ 		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ 			trb_comp_code = COMP_SHORT_PACKET;
+-			xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
+-				 slot_id, ep_index, ep_ring->last_td_was_short);
++			xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n",
++				 slot_id, ep_index, ep_ring->old_trb_comp_code);
+ 		}
+ 		break;
+ 	case COMP_SHORT_PACKET:
+@@ -2768,14 +2788,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		 * Underrun Event for OUT Isoch endpoint.
+ 		 */
+ 		xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
+-		if (ep->skip)
+-			break;
+-		return 0;
++		ring_xrun_event = true;
++		break;
+ 	case COMP_RING_OVERRUN:
+ 		xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
+-		if (ep->skip)
+-			break;
+-		return 0;
++		ring_xrun_event = true;
++		break;
+ 	case COMP_MISSED_SERVICE_ERROR:
+ 		/*
+ 		 * When encounter missed service error, one or more isoc tds
+@@ -2787,7 +2805,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		xhci_dbg(xhci,
+ 			 "Miss service interval error for slot %u ep %u, set skip flag\n",
+ 			 slot_id, ep_index);
+-		return 0;
++		break;
+ 	case COMP_NO_PING_RESPONSE_ERROR:
+ 		ep->skip = true;
+ 		xhci_dbg(xhci,
+@@ -2838,6 +2856,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		xhci_td_cleanup(xhci, td, ep_ring, td->status);
+ 	}
+ 
++	/* Missed TDs will be skipped on the next event */
++	if (trb_comp_code == COMP_MISSED_SERVICE_ERROR)
++		return 0;
++
+ 	if (list_empty(&ep_ring->td_list)) {
+ 		/*
+ 		 * Don't print wanings if ring is empty due to a stopped endpoint generating an
+@@ -2847,7 +2869,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		 */
+ 		if (trb_comp_code != COMP_STOPPED &&
+ 		    trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
+-		    !ep_ring->last_td_was_short) {
++		    !ring_xrun_event &&
++		    !xhci_spurious_success_tx_event(xhci, ep_ring)) {
+ 			xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
+ 				  slot_id, ep_index);
+ 		}
+@@ -2881,6 +2904,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 				goto check_endpoint_halted;
+ 			}
+ 
++			/* TD was queued after xrun, maybe xrun was on a link, don't panic yet */
++			if (ring_xrun_event)
++				return 0;
++
+ 			/*
+ 			 * Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
+ 			 * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
+@@ -2895,11 +2922,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 
+ 			/*
+ 			 * Some hosts give a spurious success event after a short
+-			 * transfer. Ignore it.
++			 * transfer or error on last TRB. Ignore it.
+ 			 */
+-			if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+-			    ep_ring->last_td_was_short) {
+-				ep_ring->last_td_was_short = false;
++			if (xhci_spurious_success_tx_event(xhci, ep_ring)) {
++				xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n",
++					 &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code);
++				ep_ring->old_trb_comp_code = 0;
+ 				return 0;
+ 			}
+ 
+@@ -2927,10 +2955,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 	 */
+ 	} while (ep->skip);
+ 
+-	if (trb_comp_code == COMP_SHORT_PACKET)
+-		ep_ring->last_td_was_short = true;
+-	else
+-		ep_ring->last_td_was_short = false;
++	ep_ring->old_trb_comp_code = trb_comp_code;
++
++	/* Get out if a TD was queued at enqueue after the xrun occurred */
++	if (ring_xrun_event)
++		return 0;
+ 
+ 	ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
+ 	trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb);
+@@ -3778,7 +3807,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		 * enqueue a No Op TRB, this can prevent the Setup and Data Stage
+ 		 * TRB to be breaked by the Link TRB.
+ 		 */
+-		if (trb_is_link(ep_ring->enqueue + 1)) {
++		if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) {
+ 			field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state;
+ 			queue_trb(xhci, ep_ring, false, 0, 0,
+ 					TRB_INTR_TARGET(0), field);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 358ed674f782fb..799941b6ad6c6a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -321,7 +321,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ 		xhci_info(xhci, "Fault detected\n");
+ }
+ 
+-static int xhci_enable_interrupter(struct xhci_interrupter *ir)
++int xhci_enable_interrupter(struct xhci_interrupter *ir)
+ {
+ 	u32 iman;
+ 
+@@ -334,7 +334,7 @@ static int xhci_enable_interrupter(struct xhci_interrupter *ir)
+ 	return 0;
+ }
+ 
+-static int xhci_disable_interrupter(struct xhci_interrupter *ir)
++int xhci_disable_interrupter(struct xhci_interrupter *ir)
+ {
+ 	u32 iman;
+ 
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 71588e4db0e34b..2a954efa53e80e 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1360,7 +1360,7 @@ struct xhci_ring {
+ 	unsigned int		num_trbs_free; /* used only by xhci DbC */
+ 	unsigned int		bounce_buf_len;
+ 	enum xhci_ring_type	type;
+-	bool			last_td_was_short;
++	u32			old_trb_comp_code;
+ 	struct radix_tree_root	*trb_address_map;
+ };
+ 
+@@ -1881,6 +1881,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+ 		struct usb_tt *tt, gfp_t mem_flags);
+ int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ 				    u32 imod_interval);
++int xhci_enable_interrupter(struct xhci_interrupter *ir);
++int xhci_disable_interrupter(struct xhci_interrupter *ir);
+ 
+ /* xHCI ring, segment, TRB, and TD functions */
+ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 236205ce350030..eef614be7db579 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1093,6 +1093,8 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) },
+ 	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) },
+ 	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) },
++	/* Abacus Electrics */
++	{ USB_DEVICE(FTDI_VID, ABACUS_OPTICAL_PROBE_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 52be47d684ea66..9acb6f83732763 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -442,6 +442,11 @@
+ #define LINX_FUTURE_1_PID   0xF44B	/* Linx future device */
+ #define LINX_FUTURE_2_PID   0xF44C	/* Linx future device */
+ 
++/*
++ * Abacus Electrics
++ */
++#define ABACUS_OPTICAL_PROBE_PID	0xf458 /* ABACUS ELECTRICS Optical Probe */
++
+ /*
+  * Oceanic product ids
+  */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5cd26dac2069fa..27879cc575365c 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -611,6 +611,7 @@ static void option_instat_callback(struct urb *urb);
+ /* Sierra Wireless products */
+ #define SIERRA_VENDOR_ID			0x1199
+ #define SIERRA_PRODUCT_EM9191			0x90d3
++#define SIERRA_PRODUCT_EM9291			0x90e3
+ 
+ /* UNISOC (Spreadtrum) products */
+ #define UNISOC_VENDOR_ID			0x1782
+@@ -2432,6 +2433,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff),			/* TCL IK512 MBIM */
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 2c12449ff60c51..a0afaf254d1229 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -100,6 +100,11 @@ DEVICE(nokia, NOKIA_IDS);
+ 	{ USB_DEVICE(0x09d7, 0x0100) }	/* NovAtel FlexPack GPS */
+ DEVICE_N(novatel_gps, NOVATEL_IDS, 3);
+ 
++/* OWON electronic test and measurement equipment driver */
++#define OWON_IDS()			\
++	{ USB_DEVICE(0x5345, 0x1234) } /* HDS200 oscilloscopes and others */
++DEVICE(owon, OWON_IDS);
++
+ /* Siemens USB/MPI adapter */
+ #define SIEMENS_IDS()			\
+ 	{ USB_DEVICE(0x908, 0x0004) }
+@@ -134,6 +139,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ 	&motorola_tetra_device,
+ 	&nokia_device,
+ 	&novatel_gps_device,
++	&owon_device,
+ 	&siemens_mpi_device,
+ 	&suunto_device,
+ 	&vivopay_device,
+@@ -153,6 +159,7 @@ static const struct usb_device_id id_table[] = {
+ 	MOTOROLA_TETRA_IDS(),
+ 	NOKIA_IDS(),
+ 	NOVATEL_IDS(),
++	OWON_IDS(),
+ 	SIEMENS_IDS(),
+ 	SUUNTO_IDS(),
+ 	VIVOPAY_IDS(),
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 1f8c9b16a0fb85..d460d71b425783 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -83,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_REPORT_LUNS),
+ 
++/* Reported-by: Oliver Neukum <oneukum@suse.com> */
++UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160,
++		"ADATA",
++		"Portable HDD CH94",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_ATA_1X),
++
+ /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
+ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
+ 		"Initio Corporation",
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 58f40156de5622..5c75634b8fa380 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -932,9 +932,11 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
+ 	partner->dev.type = &typec_partner_dev_type;
+ 	dev_set_name(&partner->dev, "%s-partner", dev_name(&port->dev));
+ 
++	mutex_lock(&port->partner_link_lock);
+ 	ret = device_register(&partner->dev);
+ 	if (ret) {
+ 		dev_err(&port->dev, "failed to register partner (%d)\n", ret);
++		mutex_unlock(&port->partner_link_lock);
+ 		put_device(&partner->dev);
+ 		return ERR_PTR(ret);
+ 	}
+@@ -943,6 +945,7 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
+ 		typec_partner_link_device(partner, port->usb2_dev);
+ 	if (port->usb3_dev)
+ 		typec_partner_link_device(partner, port->usb3_dev);
++	mutex_unlock(&port->partner_link_lock);
+ 
+ 	return partner;
+ }
+@@ -963,12 +966,18 @@ void typec_unregister_partner(struct typec_partner *partner)
+ 
+ 	port = to_typec_port(partner->dev.parent);
+ 
+-	if (port->usb2_dev)
++	mutex_lock(&port->partner_link_lock);
++	if (port->usb2_dev) {
+ 		typec_partner_unlink_device(partner, port->usb2_dev);
+-	if (port->usb3_dev)
++		port->usb2_dev = NULL;
++	}
++	if (port->usb3_dev) {
+ 		typec_partner_unlink_device(partner, port->usb3_dev);
++		port->usb3_dev = NULL;
++	}
+ 
+ 	device_unregister(&partner->dev);
++	mutex_unlock(&port->partner_link_lock);
+ }
+ EXPORT_SYMBOL_GPL(typec_unregister_partner);
+ 
+@@ -1862,25 +1871,30 @@ static struct typec_partner *typec_get_partner(struct typec_port *port)
+ static void typec_partner_attach(struct typec_connector *con, struct device *dev)
+ {
+ 	struct typec_port *port = container_of(con, struct typec_port, con);
+-	struct typec_partner *partner = typec_get_partner(port);
++	struct typec_partner *partner;
+ 	struct usb_device *udev = to_usb_device(dev);
+ 
++	mutex_lock(&port->partner_link_lock);
+ 	if (udev->speed < USB_SPEED_SUPER)
+ 		port->usb2_dev = dev;
+ 	else
+ 		port->usb3_dev = dev;
+ 
++	partner = typec_get_partner(port);
+ 	if (partner) {
+ 		typec_partner_link_device(partner, dev);
+ 		put_device(&partner->dev);
+ 	}
++	mutex_unlock(&port->partner_link_lock);
+ }
+ 
+ static void typec_partner_deattach(struct typec_connector *con, struct device *dev)
+ {
+ 	struct typec_port *port = container_of(con, struct typec_port, con);
+-	struct typec_partner *partner = typec_get_partner(port);
++	struct typec_partner *partner;
+ 
++	mutex_lock(&port->partner_link_lock);
++	partner = typec_get_partner(port);
+ 	if (partner) {
+ 		typec_partner_unlink_device(partner, dev);
+ 		put_device(&partner->dev);
+@@ -1890,6 +1904,7 @@ static void typec_partner_deattach(struct typec_connector *con, struct device *d
+ 		port->usb2_dev = NULL;
+ 	else if (port->usb3_dev == dev)
+ 		port->usb3_dev = NULL;
++	mutex_unlock(&port->partner_link_lock);
+ }
+ 
+ /**
+@@ -2425,6 +2440,7 @@ struct typec_port *typec_register_port(struct device *parent,
+ 
+ 	ida_init(&port->mode_ids);
+ 	mutex_init(&port->port_type_lock);
++	mutex_init(&port->partner_link_lock);
+ 
+ 	port->id = id;
+ 	port->ops = cap->ops;
+diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
+index 7485cdb9dd2017..300312a1c15279 100644
+--- a/drivers/usb/typec/class.h
++++ b/drivers/usb/typec/class.h
+@@ -56,6 +56,7 @@ struct typec_port {
+ 	enum typec_pwr_opmode		pwr_opmode;
+ 	enum typec_port_type		port_type;
+ 	struct mutex			port_type_lock;
++	struct mutex			partner_link_lock;
+ 
+ 	enum typec_orientation		orientation;
+ 	struct typec_switch		*sw;
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 7aeff435c1d873..35a03306d13454 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -630,7 +630,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+ 
+ 	tag = sbitmap_get(&svq->scsi_tags);
+ 	if (tag < 0) {
+-		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
++		pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n");
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+@@ -930,24 +930,69 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
+ }
+ 
+ static void
+-vhost_scsi_send_bad_target(struct vhost_scsi *vs,
+-			   struct vhost_virtqueue *vq,
+-			   int head, unsigned out)
++vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
++		       struct vhost_scsi_ctx *vc, u8 status)
+ {
+-	struct virtio_scsi_cmd_resp __user *resp;
+ 	struct virtio_scsi_cmd_resp rsp;
++	struct iov_iter iov_iter;
+ 	int ret;
+ 
+ 	memset(&rsp, 0, sizeof(rsp));
+-	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+-	resp = vq->iov[out].iov_base;
+-	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+-	if (!ret)
+-		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
++	rsp.status = status;
++
++	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
++		      sizeof(rsp));
++
++	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
++
++	if (likely(ret == sizeof(rsp)))
++		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ 	else
+ 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
+ }
+ 
++#define TYPE_IO_CMD    0
++#define TYPE_CTRL_TMF  1
++#define TYPE_CTRL_AN   2
++
++static void
++vhost_scsi_send_bad_target(struct vhost_scsi *vs,
++			   struct vhost_virtqueue *vq,
++			   struct vhost_scsi_ctx *vc, int type)
++{
++	union {
++		struct virtio_scsi_cmd_resp cmd;
++		struct virtio_scsi_ctrl_tmf_resp tmf;
++		struct virtio_scsi_ctrl_an_resp an;
++	} rsp;
++	struct iov_iter iov_iter;
++	size_t rsp_size;
++	int ret;
++
++	memset(&rsp, 0, sizeof(rsp));
++
++	if (type == TYPE_IO_CMD) {
++		rsp_size = sizeof(struct virtio_scsi_cmd_resp);
++		rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
++	} else if (type == TYPE_CTRL_TMF) {
++		rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
++		rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
++	} else {
++		rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
++		rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
++	}
++
++	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
++		      rsp_size);
++
++	ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
++
++	if (likely(ret == rsp_size))
++		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
++	else
++		pr_err("Faulted on virtio scsi type=%d\n", type);
++}
++
+ static int
+ vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ 		    struct vhost_scsi_ctx *vc)
+@@ -1216,8 +1261,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 					 exp_data_len + prot_bytes,
+ 					 data_direction);
+ 		if (IS_ERR(cmd)) {
+-			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
+-			       PTR_ERR(cmd));
++			ret = PTR_ERR(cmd);
++			vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret);
+ 			goto err;
+ 		}
+ 		cmd->tvc_vhost = vs;
+@@ -1254,11 +1299,15 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 		 * EINVAL: Invalid response buffer, drop the request
+ 		 * EIO:    Respond with bad target
+ 		 * EAGAIN: Pending request
++		 * ENOMEM: Could not allocate resources for request
+ 		 */
+ 		if (ret == -ENXIO)
+ 			break;
+ 		else if (ret == -EIO)
+-			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
++			vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
++		else if (ret == -ENOMEM)
++			vhost_scsi_send_status(vs, vq, &vc,
++					       SAM_STAT_TASK_SET_FULL);
+ 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
+ out:
+ 	mutex_unlock(&vq->mutex);
+@@ -1488,7 +1537,10 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 		if (ret == -ENXIO)
+ 			break;
+ 		else if (ret == -EIO)
+-			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
++			vhost_scsi_send_bad_target(vs, vq, &vc,
++						   v_req.type == VIRTIO_SCSI_T_TMF ?
++						   TYPE_CTRL_TMF :
++						   TYPE_CTRL_AN);
+ 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
+ out:
+ 	mutex_unlock(&vq->mutex);
+diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
+index f7d6f47971fdf8..24f485827e0399 100644
+--- a/drivers/xen/Kconfig
++++ b/drivers/xen/Kconfig
+@@ -278,7 +278,7 @@ config XEN_PRIVCMD_EVENTFD
+ 
+ config XEN_ACPI_PROCESSOR
+ 	tristate "Xen ACPI processor"
+-	depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
++	depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+ 	default m
+ 	help
+ 	  This ACPI processor uploads Power Management information to the Xen
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 78c4a3765002eb..eaa991e6980492 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2235,15 +2235,20 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
+ 	 * will always return true.
+ 	 * So here we need to do extra page alignment for
+ 	 * filemap_range_has_page().
++	 *
++	 * And do not decrease page_lockend right now, as it can be 0.
+ 	 */
+ 	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
+-	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
++	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE);
+ 
+ 	while (1) {
+ 		truncate_pagecache_range(inode, lockstart, lockend);
+ 
+ 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ 			    cached_state);
++		/* The same page or adjacent pages. */
++		if (page_lockend <= page_lockstart)
++			break;
+ 		/*
+ 		 * We can't have ordered extents in the range, nor dirty/writeback
+ 		 * pages, because we have locked the inode's VFS lock in exclusive
+@@ -2255,7 +2260,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
+ 		 * we do, unlock the range and retry.
+ 		 */
+ 		if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
+-					    page_lockend))
++					    page_lockend - 1))
+ 			break;
+ 
+ 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 2bb7e32ad94588..2603c9d60fd21b 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1655,7 +1655,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 		 * stripe.
+ 		 */
+ 		cache->alloc_offset = cache->zone_capacity;
+-		ret = 0;
+ 	}
+ 
+ out:
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 315ef02f9a3fa6..f7875e6f302902 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -2362,7 +2362,7 @@ static int fill_fscrypt_truncate(struct inode *inode,
+ 
+ 	/* Try to writeback the dirty pagecaches */
+ 	if (issued & (CEPH_CAP_FILE_BUFFER)) {
+-		loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1;
++		loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SIZE - 1;
+ 
+ 		ret = filemap_write_and_wait_range(inode->i_mapping,
+ 						   orig_pos, lend);
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index 87ee3a17bd29c9..e8c5525afc67a2 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -351,10 +351,9 @@ int ext4_check_blockref(const char *function, unsigned int line,
+ {
+ 	__le32 *bref = p;
+ 	unsigned int blk;
++	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ 
+-	if (ext4_has_feature_journal(inode->i_sb) &&
+-	    (inode->i_ino ==
+-	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
++	if (journal && inode == journal->j_inode)
+ 		return 0;
+ 
+ 	while (bref < p+max) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ffa6aa55a1a7a8..487d9aec56c9d7 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -383,10 +383,11 @@ static int __check_block_validity(struct inode *inode, const char *func,
+ 				unsigned int line,
+ 				struct ext4_map_blocks *map)
+ {
+-	if (ext4_has_feature_journal(inode->i_sb) &&
+-	    (inode->i_ino ==
+-	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
++	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
++
++	if (journal && inode == journal->j_inode)
+ 		return 0;
++
+ 	if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
+ 		ext4_error_inode(inode, func, line, map->m_pblk,
+ 				 "lblock %lu mapped to illegal pblock %llu "
+@@ -5467,7 +5468,7 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 			    oldsize & (inode->i_sb->s_blocksize - 1)) {
+ 				error = ext4_inode_attach_jinode(inode);
+ 				if (error)
+-					goto err_out;
++					goto out_mmap_sem;
+ 			}
+ 
+ 			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index 1bad460275ebe2..d4b990938399cf 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -263,7 +263,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ 		}
+ 
+ 		/* truncate len if we find any trailing uptodate block(s) */
+-		for ( ; i <= last; i++) {
++		while (++i <= last) {
+ 			if (ifs_block_is_uptodate(ifs, i)) {
+ 				plen -= (last - i + 1) * block_size;
+ 				last = i - 1;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index f898de3a6f7056..bd601ab26e7811 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2596,56 +2596,62 @@ static struct mountpoint *do_lock_mount(struct path *path, bool beneath)
+ 	struct vfsmount *mnt = path->mnt;
+ 	struct dentry *dentry;
+ 	struct mountpoint *mp = ERR_PTR(-ENOENT);
++	struct path under = {};
+ 
+ 	for (;;) {
+-		struct mount *m;
++		struct mount *m = real_mount(mnt);
+ 
+ 		if (beneath) {
+-			m = real_mount(mnt);
++			path_put(&under);
+ 			read_seqlock_excl(&mount_lock);
+-			dentry = dget(m->mnt_mountpoint);
++			under.mnt = mntget(&m->mnt_parent->mnt);
++			under.dentry = dget(m->mnt_mountpoint);
+ 			read_sequnlock_excl(&mount_lock);
++			dentry = under.dentry;
+ 		} else {
+ 			dentry = path->dentry;
+ 		}
+ 
+ 		inode_lock(dentry->d_inode);
+-		if (unlikely(cant_mount(dentry))) {
+-			inode_unlock(dentry->d_inode);
+-			goto out;
+-		}
+-
+ 		namespace_lock();
+ 
+-		if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) {
++		if (unlikely(cant_mount(dentry) || !is_mounted(mnt)))
++			break;		// not to be mounted on
++
++		if (beneath && unlikely(m->mnt_mountpoint != dentry ||
++				        &m->mnt_parent->mnt != under.mnt)) {
+ 			namespace_unlock();
+ 			inode_unlock(dentry->d_inode);
+-			goto out;
++			continue;	// got moved
+ 		}
+ 
+ 		mnt = lookup_mnt(path);
+-		if (likely(!mnt))
++		if (unlikely(mnt)) {
++			namespace_unlock();
++			inode_unlock(dentry->d_inode);
++			path_put(path);
++			path->mnt = mnt;
++			path->dentry = dget(mnt->mnt_root);
++			continue;	// got overmounted
++		}
++		mp = get_mountpoint(dentry);
++		if (IS_ERR(mp))
+ 			break;
+-
+-		namespace_unlock();
+-		inode_unlock(dentry->d_inode);
+-		if (beneath)
+-			dput(dentry);
+-		path_put(path);
+-		path->mnt = mnt;
+-		path->dentry = dget(mnt->mnt_root);
+-	}
+-
+-	mp = get_mountpoint(dentry);
+-	if (IS_ERR(mp)) {
+-		namespace_unlock();
+-		inode_unlock(dentry->d_inode);
++		if (beneath) {
++			/*
++			 * @under duplicates the references that will stay
++			 * at least until namespace_unlock(), so the path_put()
++			 * below is safe (and OK to do under namespace_lock -
++			 * we are not dropping the final references here).
++			 */
++			path_put(&under);
++		}
++		return mp;
+ 	}
+-
+-out:
++	namespace_unlock();
++	inode_unlock(dentry->d_inode);
+ 	if (beneath)
+-		dput(dentry);
+-
++		path_put(&under);
+ 	return mp;
+ }
+ 
+@@ -2656,14 +2662,11 @@ static inline struct mountpoint *lock_mount(struct path *path)
+ 
+ static void unlock_mount(struct mountpoint *where)
+ {
+-	struct dentry *dentry = where->m_dentry;
+-
++	inode_unlock(where->m_dentry->d_inode);
+ 	read_seqlock_excl(&mount_lock);
+ 	put_mountpoint(where);
+ 	read_sequnlock_excl(&mount_lock);
+-
+ 	namespace_unlock();
+-	inode_unlock(dentry->d_inode);
+ }
+ 
+ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
+diff --git a/fs/netfs/main.c b/fs/netfs/main.c
+index 6c7be1377ee0ed..3a8433e802cc22 100644
+--- a/fs/netfs/main.c
++++ b/fs/netfs/main.c
+@@ -125,11 +125,13 @@ static int __init netfs_init(void)
+ 	if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0)
+ 		goto error_subreqpool;
+ 
++#ifdef CONFIG_PROC_FS
+ 	if (!proc_mkdir("fs/netfs", NULL))
+ 		goto error_proc;
+ 	if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
+ 			     &netfs_requests_seq_ops))
+ 		goto error_procfile;
++#endif
+ #ifdef CONFIG_FSCACHE_STATS
+ 	if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
+ 				netfs_stats_show))
+@@ -142,9 +144,11 @@ static int __init netfs_init(void)
+ 	return 0;
+ 
+ error_fscache:
++#ifdef CONFIG_PROC_FS
+ error_procfile:
+ 	remove_proc_subtree("fs/netfs", NULL);
+ error_proc:
++#endif
+ 	mempool_exit(&netfs_subrequest_pool);
+ error_subreqpool:
+ 	kmem_cache_destroy(netfs_subrequest_slab);
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index 7976ac4611c8d0..748c4be912db56 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -428,6 +428,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
+ 	}
+ 
+ 	if (extend_init && !is_compressed(ni)) {
++		WARN_ON(ni->i_valid >= pos);
+ 		err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
+ 		if (err)
+ 			goto out;
+@@ -1238,21 +1239,22 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	ssize_t ret;
+ 	int err;
+ 
+-	err = check_write_restriction(inode);
+-	if (err)
+-		return err;
+-
+-	if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
+-		ntfs_inode_warn(inode, "direct i/o + compressed not supported");
+-		return -EOPNOTSUPP;
+-	}
+-
+ 	if (!inode_trylock(inode)) {
+ 		if (iocb->ki_flags & IOCB_NOWAIT)
+ 			return -EAGAIN;
+ 		inode_lock(inode);
+ 	}
+ 
++	ret = check_write_restriction(inode);
++	if (ret)
++		goto out;
++
++	if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
++		ntfs_inode_warn(inode, "direct i/o + compressed not supported");
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
+ 	ret = generic_write_checks(iocb, from);
+ 	if (ret <= 0)
+ 		goto out;
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 2426fa7405173c..9b32f7821b718f 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -707,6 +707,22 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
+ 	*pbcc_area = bcc_ptr;
+ }
+ 
++static void
++ascii_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
++{
++	char *bcc_ptr = *pbcc_area;
++
++	strcpy(bcc_ptr, "Linux version ");
++	bcc_ptr += strlen("Linux version ");
++	strcpy(bcc_ptr, init_utsname()->release);
++	bcc_ptr += strlen(init_utsname()->release) + 1;
++
++	strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
++	bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
++
++	*pbcc_area = bcc_ptr;
++}
++
+ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
+ 				   const struct nls_table *nls_cp)
+ {
+@@ -731,6 +747,25 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
+ 	*pbcc_area = bcc_ptr;
+ }
+ 
++static void ascii_domain_string(char **pbcc_area, struct cifs_ses *ses,
++				const struct nls_table *nls_cp)
++{
++	char *bcc_ptr = *pbcc_area;
++	int len;
++
++	/* copy domain */
++	if (ses->domainName != NULL) {
++		len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
++		if (WARN_ON_ONCE(len < 0))
++			len = CIFS_MAX_DOMAINNAME_LEN - 1;
++		bcc_ptr += len;
++	} /* else we send a null domain name so server will default to its own domain */
++	*bcc_ptr = 0;
++	bcc_ptr++;
++
++	*pbcc_area = bcc_ptr;
++}
++
+ static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+ 				   const struct nls_table *nls_cp)
+ {
+@@ -776,25 +811,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+ 	*bcc_ptr = 0;
+ 	bcc_ptr++; /* account for null termination */
+ 
+-	/* copy domain */
+-	if (ses->domainName != NULL) {
+-		len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+-		if (WARN_ON_ONCE(len < 0))
+-			len = CIFS_MAX_DOMAINNAME_LEN - 1;
+-		bcc_ptr += len;
+-	} /* else we send a null domain name so server will default to its own domain */
+-	*bcc_ptr = 0;
+-	bcc_ptr++;
+-
+ 	/* BB check for overflow here */
+ 
+-	strcpy(bcc_ptr, "Linux version ");
+-	bcc_ptr += strlen("Linux version ");
+-	strcpy(bcc_ptr, init_utsname()->release);
+-	bcc_ptr += strlen(init_utsname()->release) + 1;
+-
+-	strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+-	bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
++	ascii_domain_string(&bcc_ptr, ses, nls_cp);
++	ascii_oslm_strings(&bcc_ptr, nls_cp);
+ 
+ 	*pbcc_area = bcc_ptr;
+ }
+@@ -1597,7 +1617,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
+ 	sess_data->iov[1].iov_len = msg->secblob_len;
+ 	pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len);
+ 
+-	if (ses->capabilities & CAP_UNICODE) {
++	if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) {
+ 		/* unicode strings must be word aligned */
+ 		if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
+ 			*bcc_ptr = 0;
+@@ -1606,8 +1626,8 @@ sess_auth_kerberos(struct sess_data *sess_data)
+ 		unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ 		unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
+ 	} else {
+-		/* BB: is this right? */
+-		ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
++		ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp);
++		ascii_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
+ 	}
+ 
+ 	sess_data->iov[2].iov_len = (long) bcc_ptr -
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index bd791aa54681f6..55cceb82293236 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -597,6 +597,42 @@ static int cifs_query_path_info(const unsigned int xid,
+ 			CIFSSMBClose(xid, tcon, fid.netfid);
+ 	}
+ 
++#ifdef CONFIG_CIFS_XATTR
++	/*
++	 * For WSL CHR and BLK reparse points it is required to fetch
++	 * EA $LXDEV which contains major and minor device numbers.
++	 */
++	if (!rc && data->reparse_point) {
++		struct smb2_file_full_ea_info *ea;
++
++		ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
++		rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV,
++				    &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
++				    SMB2_WSL_XATTR_DEV_SIZE, cifs_sb);
++		if (rc == SMB2_WSL_XATTR_DEV_SIZE) {
++			ea->next_entry_offset = cpu_to_le32(0);
++			ea->flags = 0;
++			ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
++			ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE);
++			memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1);
++			data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
++					    SMB2_WSL_XATTR_DEV_SIZE;
++			rc = 0;
++		} else if (rc >= 0) {
++			/* It is an error if EA $LXDEV has wrong size. */
++			rc = -EINVAL;
++		} else {
++			/*
++			 * In all other cases ignore error if fetching
++			 * of EA $LXDEV failed. It is needed only for
++			 * WSL CHR and BLK reparse points and wsl_to_fattr()
++			 * handle the case when EA is missing.
++			 */
++			rc = 0;
++		}
++	}
++#endif
++
+ 	return rc;
+ }
+ 
+diff --git a/fs/smb/server/asn1.c b/fs/smb/server/asn1.c
+index b931a99ab9c85e..5c4c5121fece1c 100644
+--- a/fs/smb/server/asn1.c
++++ b/fs/smb/server/asn1.c
+@@ -104,7 +104,7 @@ int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen,
+ 			oid_len + ntlmssp_len) * 2 +
+ 			neg_result_len + oid_len + ntlmssp_len;
+ 
+-	buf = kmalloc(total_len, GFP_KERNEL);
++	buf = kmalloc(total_len, KSMBD_DEFAULT_GFP);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+@@ -140,7 +140,7 @@ int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
+ 	int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len) * 2 +
+ 		neg_result_len;
+ 
+-	buf = kmalloc(total_len, GFP_KERNEL);
++	buf = kmalloc(total_len, KSMBD_DEFAULT_GFP);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+@@ -217,7 +217,7 @@ static int ksmbd_neg_token_alloc(void *context, size_t hdrlen,
+ 	if (!vlen)
+ 		return -EINVAL;
+ 
+-	conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
++	conn->mechToken = kmemdup_nul(value, vlen, KSMBD_DEFAULT_GFP);
+ 	if (!conn->mechToken)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+index 95449751368314..83caa384974932 100644
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -151,7 +151,7 @@ static int calc_ntlmv2_hash(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 
+ 	/* convert user_name to unicode */
+ 	len = strlen(user_name(sess->user));
+-	uniname = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
++	uniname = kzalloc(2 + UNICODE_LEN(len), KSMBD_DEFAULT_GFP);
+ 	if (!uniname) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -175,7 +175,7 @@ static int calc_ntlmv2_hash(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 
+ 	/* Convert domain name or conn name to unicode and uppercase */
+ 	len = strlen(dname);
+-	domain = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
++	domain = kzalloc(2 + UNICODE_LEN(len), KSMBD_DEFAULT_GFP);
+ 	if (!domain) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -254,7 +254,7 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 	}
+ 
+ 	len = CIFS_CRYPTO_KEY_SIZE + blen;
+-	construct = kzalloc(len, GFP_KERNEL);
++	construct = kzalloc(len, KSMBD_DEFAULT_GFP);
+ 	if (!construct) {
+ 		rc = -ENOMEM;
+ 		goto out;
+@@ -361,7 +361,7 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+ 		if (sess_key_len > CIFS_KEY_SIZE)
+ 			return -EINVAL;
+ 
+-		ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
++		ctx_arc4 = kmalloc(sizeof(*ctx_arc4), KSMBD_DEFAULT_GFP);
+ 		if (!ctx_arc4)
+ 			return -ENOMEM;
+ 
+@@ -451,7 +451,7 @@ ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
+ 
+ 	chgblob->NegotiateFlags = cpu_to_le32(flags);
+ 	len = strlen(ksmbd_netbios_name());
+-	name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
++	name = kmalloc(2 + UNICODE_LEN(len), KSMBD_DEFAULT_GFP);
+ 	if (!name)
+ 		return -ENOMEM;
+ 
+@@ -1045,7 +1045,7 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
+ 	if (!nvec)
+ 		return NULL;
+ 
+-	nr_entries = kcalloc(nvec, sizeof(int), GFP_KERNEL);
++	nr_entries = kcalloc(nvec, sizeof(int), KSMBD_DEFAULT_GFP);
+ 	if (!nr_entries)
+ 		return NULL;
+ 
+@@ -1065,7 +1065,8 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
+ 	/* Add two entries for transform header and signature */
+ 	total_entries += 2;
+ 
+-	sg = kmalloc_array(total_entries, sizeof(struct scatterlist), GFP_KERNEL);
++	sg = kmalloc_array(total_entries, sizeof(struct scatterlist),
++			   KSMBD_DEFAULT_GFP);
+ 	if (!sg) {
+ 		kfree(nr_entries);
+ 		return NULL;
+@@ -1165,7 +1166,7 @@ int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
+ 		goto free_ctx;
+ 	}
+ 
+-	req = aead_request_alloc(tfm, GFP_KERNEL);
++	req = aead_request_alloc(tfm, KSMBD_DEFAULT_GFP);
+ 	if (!req) {
+ 		rc = -ENOMEM;
+ 		goto free_ctx;
+@@ -1184,7 +1185,7 @@ int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
+ 	}
+ 
+ 	iv_len = crypto_aead_ivsize(tfm);
+-	iv = kzalloc(iv_len, GFP_KERNEL);
++	iv = kzalloc(iv_len, KSMBD_DEFAULT_GFP);
+ 	if (!iv) {
+ 		rc = -ENOMEM;
+ 		goto free_sg;
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index ab11246ccd8a09..7aaea71a4f2061 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -39,8 +39,10 @@ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ 	xa_destroy(&conn->sessions);
+ 	kvfree(conn->request_buf);
+ 	kfree(conn->preauth_info);
+-	if (atomic_dec_and_test(&conn->refcnt))
++	if (atomic_dec_and_test(&conn->refcnt)) {
++		ksmbd_free_transport(conn->transport);
+ 		kfree(conn);
++	}
+ }
+ 
+ /**
+@@ -52,7 +54,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ {
+ 	struct ksmbd_conn *conn;
+ 
+-	conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
++	conn = kzalloc(sizeof(struct ksmbd_conn), KSMBD_DEFAULT_GFP);
+ 	if (!conn)
+ 		return NULL;
+ 
+@@ -369,7 +371,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		/* 4 for rfc1002 length field */
+ 		/* 1 for implied bcc[0] */
+ 		size = pdu_size + 4 + 1;
+-		conn->request_buf = kvmalloc(size, GFP_KERNEL);
++		conn->request_buf = kvmalloc(size, KSMBD_DEFAULT_GFP);
+ 		if (!conn->request_buf)
+ 			break;
+ 
+diff --git a/fs/smb/server/crypto_ctx.c b/fs/smb/server/crypto_ctx.c
+index 81488d04199da8..ce733dc9a4a35b 100644
+--- a/fs/smb/server/crypto_ctx.c
++++ b/fs/smb/server/crypto_ctx.c
+@@ -89,7 +89,7 @@ static struct shash_desc *alloc_shash_desc(int id)
+ 		return NULL;
+ 
+ 	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+-			GFP_KERNEL);
++			KSMBD_DEFAULT_GFP);
+ 	if (!shash)
+ 		crypto_free_shash(tfm);
+ 	else
+@@ -133,7 +133,7 @@ static struct ksmbd_crypto_ctx *ksmbd_find_crypto_ctx(void)
+ 		ctx_list.avail_ctx++;
+ 		spin_unlock(&ctx_list.ctx_lock);
+ 
+-		ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL);
++		ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), KSMBD_DEFAULT_GFP);
+ 		if (!ctx) {
+ 			spin_lock(&ctx_list.ctx_lock);
+ 			ctx_list.avail_ctx--;
+@@ -258,7 +258,7 @@ int ksmbd_crypto_create(void)
+ 	init_waitqueue_head(&ctx_list.ctx_wait);
+ 	ctx_list.avail_ctx = 1;
+ 
+-	ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL);
++	ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), KSMBD_DEFAULT_GFP);
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 	list_add(&ctx->list, &ctx_list.idle_ctx);
+diff --git a/fs/smb/server/glob.h b/fs/smb/server/glob.h
+index d528b20b37a85f..4ea187af234803 100644
+--- a/fs/smb/server/glob.h
++++ b/fs/smb/server/glob.h
+@@ -44,4 +44,6 @@ extern int ksmbd_debug_types;
+ 
+ #define UNICODE_LEN(x)		((x) * 2)
+ 
++#define KSMBD_DEFAULT_GFP	(GFP_KERNEL | __GFP_RETRY_MAYFAIL)
++
+ #endif /* __KSMBD_GLOB_H */
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+index 3d01d9d1529341..3f07a612c05b40 100644
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -111,7 +111,8 @@ struct ksmbd_startup_request {
+ 	__u32	smb2_max_credits;	/* MAX credits */
+ 	__u32	smbd_max_io_size;	/* smbd read write size */
+ 	__u32	max_connections;	/* Number of maximum simultaneous connections */
+-	__u32	reserved[126];		/* Reserved room */
++	__s8	bind_interfaces_only;
++	__s8	reserved[503];		/* Reserved room */
+ 	__u32	ifc_list_sz;		/* interfaces list size */
+ 	__s8	____payload[];
+ };
+diff --git a/fs/smb/server/ksmbd_work.c b/fs/smb/server/ksmbd_work.c
+index 544d8ccd29b0a0..72b00ca6e45517 100644
+--- a/fs/smb/server/ksmbd_work.c
++++ b/fs/smb/server/ksmbd_work.c
+@@ -18,7 +18,7 @@ static struct workqueue_struct *ksmbd_wq;
+ 
+ struct ksmbd_work *ksmbd_alloc_work_struct(void)
+ {
+-	struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL);
++	struct ksmbd_work *work = kmem_cache_zalloc(work_cache, KSMBD_DEFAULT_GFP);
+ 
+ 	if (work) {
+ 		work->compound_fid = KSMBD_NO_FID;
+@@ -29,7 +29,7 @@ struct ksmbd_work *ksmbd_alloc_work_struct(void)
+ 		INIT_LIST_HEAD(&work->aux_read_list);
+ 		work->iov_alloc_cnt = 4;
+ 		work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
+-				    GFP_KERNEL);
++				    KSMBD_DEFAULT_GFP);
+ 		if (!work->iov) {
+ 			kmem_cache_free(work_cache, work);
+ 			work = NULL;
+@@ -111,7 +111,7 @@ static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
+ 
+ 	if (aux_size) {
+ 		need_iov_cnt++;
+-		ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
++		ar = kmalloc(sizeof(struct aux_read), KSMBD_DEFAULT_GFP);
+ 		if (!ar)
+ 			return -ENOMEM;
+ 	}
+@@ -122,7 +122,7 @@ static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
+ 		work->iov_alloc_cnt += 4;
+ 		new = krealloc(work->iov,
+ 			       sizeof(struct kvec) * work->iov_alloc_cnt,
+-			       GFP_KERNEL | __GFP_ZERO);
++			       KSMBD_DEFAULT_GFP | __GFP_ZERO);
+ 		if (!new) {
+ 			kfree(ar);
+ 			work->iov_alloc_cnt -= 4;
+@@ -166,7 +166,7 @@ int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
+ 
+ int allocate_interim_rsp_buf(struct ksmbd_work *work)
+ {
+-	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
++	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, KSMBD_DEFAULT_GFP);
+ 	if (!work->response_buf)
+ 		return -ENOMEM;
+ 	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+diff --git a/fs/smb/server/mgmt/ksmbd_ida.c b/fs/smb/server/mgmt/ksmbd_ida.c
+index a18e27e9e0cd93..0e2ae994ab5257 100644
+--- a/fs/smb/server/mgmt/ksmbd_ida.c
++++ b/fs/smb/server/mgmt/ksmbd_ida.c
+@@ -4,31 +4,32 @@
+  */
+ 
+ #include "ksmbd_ida.h"
++#include "../glob.h"
+ 
+ int ksmbd_acquire_smb2_tid(struct ida *ida)
+ {
+-	return ida_alloc_range(ida, 1, 0xFFFFFFFE, GFP_KERNEL);
++	return ida_alloc_range(ida, 1, 0xFFFFFFFE, KSMBD_DEFAULT_GFP);
+ }
+ 
+ int ksmbd_acquire_smb2_uid(struct ida *ida)
+ {
+ 	int id;
+ 
+-	id = ida_alloc_min(ida, 1, GFP_KERNEL);
++	id = ida_alloc_min(ida, 1, KSMBD_DEFAULT_GFP);
+ 	if (id == 0xFFFE)
+-		id = ida_alloc_min(ida, 1, GFP_KERNEL);
++		id = ida_alloc_min(ida, 1, KSMBD_DEFAULT_GFP);
+ 
+ 	return id;
+ }
+ 
+ int ksmbd_acquire_async_msg_id(struct ida *ida)
+ {
+-	return ida_alloc_min(ida, 1, GFP_KERNEL);
++	return ida_alloc_min(ida, 1, KSMBD_DEFAULT_GFP);
+ }
+ 
+ int ksmbd_acquire_id(struct ida *ida)
+ {
+-	return ida_alloc(ida, GFP_KERNEL);
++	return ida_alloc(ida, KSMBD_DEFAULT_GFP);
+ }
+ 
+ void ksmbd_release_id(struct ida *ida, int id)
+diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
+index d8d03070ae44b4..d3d5f99bdd34ed 100644
+--- a/fs/smb/server/mgmt/share_config.c
++++ b/fs/smb/server/mgmt/share_config.c
+@@ -102,11 +102,11 @@ static int parse_veto_list(struct ksmbd_share_config *share,
+ 		if (!sz)
+ 			break;
+ 
+-		p = kzalloc(sizeof(struct ksmbd_veto_pattern), GFP_KERNEL);
++		p = kzalloc(sizeof(struct ksmbd_veto_pattern), KSMBD_DEFAULT_GFP);
+ 		if (!p)
+ 			return -ENOMEM;
+ 
+-		p->pattern = kstrdup(veto_list, GFP_KERNEL);
++		p->pattern = kstrdup(veto_list, KSMBD_DEFAULT_GFP);
+ 		if (!p->pattern) {
+ 			kfree(p);
+ 			return -ENOMEM;
+@@ -150,14 +150,14 @@ static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work,
+ 			goto out;
+ 	}
+ 
+-	share = kzalloc(sizeof(struct ksmbd_share_config), GFP_KERNEL);
++	share = kzalloc(sizeof(struct ksmbd_share_config), KSMBD_DEFAULT_GFP);
+ 	if (!share)
+ 		goto out;
+ 
+ 	share->flags = resp->flags;
+ 	atomic_set(&share->refcount, 1);
+ 	INIT_LIST_HEAD(&share->veto_list);
+-	share->name = kstrdup(name, GFP_KERNEL);
++	share->name = kstrdup(name, KSMBD_DEFAULT_GFP);
+ 
+ 	if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+ 		int path_len = PATH_MAX;
+@@ -166,7 +166,7 @@ static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work,
+ 			path_len = resp->payload_sz - resp->veto_list_sz;
+ 
+ 		share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
+-				      GFP_KERNEL);
++				      KSMBD_DEFAULT_GFP);
+ 		if (share->path) {
+ 			share->path_sz = strlen(share->path);
+ 			while (share->path_sz > 1 &&
+diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
+index 94a52a75014a43..ecfc5750867124 100644
+--- a/fs/smb/server/mgmt/tree_connect.c
++++ b/fs/smb/server/mgmt/tree_connect.c
+@@ -31,7 +31,8 @@ ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
+ 	if (!sc)
+ 		return status;
+ 
+-	tree_conn = kzalloc(sizeof(struct ksmbd_tree_connect), GFP_KERNEL);
++	tree_conn = kzalloc(sizeof(struct ksmbd_tree_connect),
++			    KSMBD_DEFAULT_GFP);
+ 	if (!tree_conn) {
+ 		status.ret = -ENOMEM;
+ 		goto out_error;
+@@ -80,7 +81,7 @@ ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
+ 	init_waitqueue_head(&tree_conn->refcount_q);
+ 
+ 	ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
+-			      GFP_KERNEL));
++			      KSMBD_DEFAULT_GFP));
+ 	if (ret) {
+ 		status.ret = -ENOMEM;
+ 		goto out_error;
+diff --git a/fs/smb/server/mgmt/user_config.c b/fs/smb/server/mgmt/user_config.c
+index 421a4a95e216aa..56c9a38ca87890 100644
+--- a/fs/smb/server/mgmt/user_config.c
++++ b/fs/smb/server/mgmt/user_config.c
+@@ -36,16 +36,16 @@ struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp,
+ {
+ 	struct ksmbd_user *user;
+ 
+-	user = kmalloc(sizeof(struct ksmbd_user), GFP_KERNEL);
++	user = kmalloc(sizeof(struct ksmbd_user), KSMBD_DEFAULT_GFP);
+ 	if (!user)
+ 		return NULL;
+ 
+-	user->name = kstrdup(resp->account, GFP_KERNEL);
++	user->name = kstrdup(resp->account, KSMBD_DEFAULT_GFP);
+ 	user->flags = resp->status;
+ 	user->gid = resp->gid;
+ 	user->uid = resp->uid;
+ 	user->passkey_sz = resp->hash_sz;
+-	user->passkey = kmalloc(resp->hash_sz, GFP_KERNEL);
++	user->passkey = kmalloc(resp->hash_sz, KSMBD_DEFAULT_GFP);
+ 	if (user->passkey)
+ 		memcpy(user->passkey, resp->hash, resp->hash_sz);
+ 
+@@ -64,7 +64,7 @@ struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp,
+ 
+ 		user->sgid = kmemdup(resp_ext->____payload,
+ 				     resp_ext->ngroups * sizeof(gid_t),
+-				     GFP_KERNEL);
++				     KSMBD_DEFAULT_GFP);
+ 		if (!user->sgid)
+ 			goto err_free;
+ 
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index f83daf72f877e2..3f45f28f6f0f8e 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -98,7 +98,7 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ 	if (!method)
+ 		return -EINVAL;
+ 
+-	entry = kzalloc(sizeof(struct ksmbd_session_rpc), GFP_KERNEL);
++	entry = kzalloc(sizeof(struct ksmbd_session_rpc), KSMBD_DEFAULT_GFP);
+ 	if (!entry)
+ 		return -ENOMEM;
+ 
+@@ -106,7 +106,7 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ 	entry->id = ksmbd_ipc_id_alloc();
+ 	if (entry->id < 0)
+ 		goto free_entry;
+-	old = xa_store(&sess->rpc_handle_list, entry->id, entry, GFP_KERNEL);
++	old = xa_store(&sess->rpc_handle_list, entry->id, entry, KSMBD_DEFAULT_GFP);
+ 	if (xa_is_err(old))
+ 		goto free_id;
+ 
+@@ -201,7 +201,7 @@ int ksmbd_session_register(struct ksmbd_conn *conn,
+ 	sess->dialect = conn->dialect;
+ 	memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
+ 	ksmbd_expire_session(conn);
+-	return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
++	return xa_err(xa_store(&conn->sessions, sess->id, sess, KSMBD_DEFAULT_GFP));
+ }
+ 
+ static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+@@ -339,7 +339,7 @@ struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+ {
+ 	struct preauth_session *sess;
+ 
+-	sess = kmalloc(sizeof(struct preauth_session), GFP_KERNEL);
++	sess = kmalloc(sizeof(struct preauth_session), KSMBD_DEFAULT_GFP);
+ 	if (!sess)
+ 		return NULL;
+ 
+@@ -423,7 +423,7 @@ static struct ksmbd_session *__session_create(int protocol)
+ 	if (protocol != CIFDS_SESSION_FLAG_SMB2)
+ 		return NULL;
+ 
+-	sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
++	sess = kzalloc(sizeof(struct ksmbd_session), KSMBD_DEFAULT_GFP);
+ 	if (!sess)
+ 		return NULL;
+ 
+diff --git a/fs/smb/server/misc.c b/fs/smb/server/misc.c
+index 1a5faa6f6e7bc3..cb2a11ffb23fe3 100644
+--- a/fs/smb/server/misc.c
++++ b/fs/smb/server/misc.c
+@@ -165,7 +165,7 @@ char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+ 	char *pathname, *ab_pathname, *nt_pathname;
+ 	int share_path_len = share->path_sz;
+ 
+-	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
++	pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP);
+ 	if (!pathname)
+ 		return ERR_PTR(-EACCES);
+ 
+@@ -180,7 +180,8 @@ char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+ 		goto free_pathname;
+ 	}
+ 
+-	nt_pathname = kzalloc(strlen(&ab_pathname[share_path_len]) + 2, GFP_KERNEL);
++	nt_pathname = kzalloc(strlen(&ab_pathname[share_path_len]) + 2,
++			      KSMBD_DEFAULT_GFP);
+ 	if (!nt_pathname) {
+ 		nt_pathname = ERR_PTR(-ENOMEM);
+ 		goto free_pathname;
+@@ -232,7 +233,7 @@ char *ksmbd_casefold_sharename(struct unicode_map *um, const char *name)
+ 	char *cf_name;
+ 	int cf_len;
+ 
+-	cf_name = kzalloc(KSMBD_REQ_MAX_SHARE_NAME, GFP_KERNEL);
++	cf_name = kzalloc(KSMBD_REQ_MAX_SHARE_NAME, KSMBD_DEFAULT_GFP);
+ 	if (!cf_name)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -294,7 +295,7 @@ char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name)
+ 
+ 	path_len = share->path_sz;
+ 	name_len = strlen(name);
+-	new_name = kmalloc(path_len + name_len + 2, GFP_KERNEL);
++	new_name = kmalloc(path_len + name_len + 2, KSMBD_DEFAULT_GFP);
+ 	if (!new_name)
+ 		return new_name;
+ 
+@@ -320,7 +321,7 @@ char *ksmbd_convert_dir_info_name(struct ksmbd_dir_info *d_info,
+ 	if (!sz)
+ 		return NULL;
+ 
+-	conv = kmalloc(sz, GFP_KERNEL);
++	conv = kmalloc(sz, KSMBD_DEFAULT_GFP);
+ 	if (!conv)
+ 		return NULL;
+ 
+diff --git a/fs/smb/server/ndr.c b/fs/smb/server/ndr.c
+index 3507d8f8907499..58d71560f626b4 100644
+--- a/fs/smb/server/ndr.c
++++ b/fs/smb/server/ndr.c
+@@ -18,7 +18,7 @@ static int try_to_realloc_ndr_blob(struct ndr *n, size_t sz)
+ {
+ 	char *data;
+ 
+-	data = krealloc(n->data, n->offset + sz + 1024, GFP_KERNEL);
++	data = krealloc(n->data, n->offset + sz + 1024, KSMBD_DEFAULT_GFP);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+@@ -174,7 +174,7 @@ int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+ 
+ 	n->offset = 0;
+ 	n->length = 1024;
+-	n->data = kzalloc(n->length, GFP_KERNEL);
++	n->data = kzalloc(n->length, KSMBD_DEFAULT_GFP);
+ 	if (!n->data)
+ 		return -ENOMEM;
+ 
+@@ -350,7 +350,7 @@ int ndr_encode_posix_acl(struct ndr *n,
+ 
+ 	n->offset = 0;
+ 	n->length = 1024;
+-	n->data = kzalloc(n->length, GFP_KERNEL);
++	n->data = kzalloc(n->length, KSMBD_DEFAULT_GFP);
+ 	if (!n->data)
+ 		return -ENOMEM;
+ 
+@@ -401,7 +401,7 @@ int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+ 
+ 	n->offset = 0;
+ 	n->length = 2048;
+-	n->data = kzalloc(n->length, GFP_KERNEL);
++	n->data = kzalloc(n->length, KSMBD_DEFAULT_GFP);
+ 	if (!n->data)
+ 		return -ENOMEM;
+ 
+@@ -505,7 +505,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+ 		return ret;
+ 
+ 	acl->sd_size = n->length - n->offset;
+-	acl->sd_buf = kzalloc(acl->sd_size, GFP_KERNEL);
++	acl->sd_buf = kzalloc(acl->sd_size, KSMBD_DEFAULT_GFP);
+ 	if (!acl->sd_buf)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index e2ba0dadb5fbf7..81a29857b1e32f 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -34,7 +34,7 @@ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
+ 	struct ksmbd_session *sess = work->sess;
+ 	struct oplock_info *opinfo;
+ 
+-	opinfo = kzalloc(sizeof(struct oplock_info), GFP_KERNEL);
++	opinfo = kzalloc(sizeof(struct oplock_info), KSMBD_DEFAULT_GFP);
+ 	if (!opinfo)
+ 		return NULL;
+ 
+@@ -93,7 +93,7 @@ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+ {
+ 	struct lease *lease;
+ 
+-	lease = kmalloc(sizeof(struct lease), GFP_KERNEL);
++	lease = kmalloc(sizeof(struct lease), KSMBD_DEFAULT_GFP);
+ 	if (!lease)
+ 		return -ENOMEM;
+ 
+@@ -701,7 +701,7 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
+ 	if (!work)
+ 		return -ENOMEM;
+ 
+-	br_info = kmalloc(sizeof(struct oplock_break_info), GFP_KERNEL);
++	br_info = kmalloc(sizeof(struct oplock_break_info), KSMBD_DEFAULT_GFP);
+ 	if (!br_info) {
+ 		ksmbd_free_work_struct(work);
+ 		return -ENOMEM;
+@@ -806,7 +806,7 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
+ 	if (!work)
+ 		return -ENOMEM;
+ 
+-	br_info = kmalloc(sizeof(struct lease_break_info), GFP_KERNEL);
++	br_info = kmalloc(sizeof(struct lease_break_info), KSMBD_DEFAULT_GFP);
+ 	if (!br_info) {
+ 		ksmbd_free_work_struct(work);
+ 		return -ENOMEM;
+@@ -1049,7 +1049,7 @@ static int add_lease_global_list(struct oplock_info *opinfo)
+ 	}
+ 	read_unlock(&lease_list_lock);
+ 
+-	lb = kmalloc(sizeof(struct lease_table), GFP_KERNEL);
++	lb = kmalloc(sizeof(struct lease_table), KSMBD_DEFAULT_GFP);
+ 	if (!lb)
+ 		return -ENOMEM;
+ 
+@@ -1487,7 +1487,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ 	if (IS_ERR_OR_NULL(cc))
+ 		return NULL;
+ 
+-	lreq = kzalloc(sizeof(struct lease_ctx_info), GFP_KERNEL);
++	lreq = kzalloc(sizeof(struct lease_ctx_info), KSMBD_DEFAULT_GFP);
+ 	if (!lreq)
+ 		return NULL;
+ 
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index d523b860236ab3..ab533c6029879f 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -47,7 +47,7 @@ static int ___server_conf_set(int idx, char *val)
+ 		return -EINVAL;
+ 
+ 	kfree(server_conf.conf[idx]);
+-	server_conf.conf[idx] = kstrdup(val, GFP_KERNEL);
++	server_conf.conf[idx] = kstrdup(val, KSMBD_DEFAULT_GFP);
+ 	if (!server_conf.conf[idx])
+ 		return -ENOMEM;
+ 	return 0;
+@@ -404,7 +404,7 @@ static int __queue_ctrl_work(int type)
+ {
+ 	struct server_ctrl_struct *ctrl;
+ 
+-	ctrl = kmalloc(sizeof(struct server_ctrl_struct), GFP_KERNEL);
++	ctrl = kmalloc(sizeof(struct server_ctrl_struct), KSMBD_DEFAULT_GFP);
+ 	if (!ctrl)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
+index 94187628ff089f..995555febe7d16 100644
+--- a/fs/smb/server/server.h
++++ b/fs/smb/server/server.h
+@@ -46,6 +46,7 @@ struct ksmbd_server_config {
+ 
+ 	char			*conf[SERVER_CONF_WORK_GROUP + 1];
+ 	struct task_struct	*dh_task;
++	bool			bind_interfaces_only;
+ };
+ 
+ extern struct ksmbd_server_config server_conf;
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 129517a0c5c739..6b9286c9634391 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -38,6 +38,7 @@
+ #include "mgmt/user_session.h"
+ #include "mgmt/ksmbd_ida.h"
+ #include "ndr.h"
++#include "transport_tcp.h"
+ 
+ static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
+ {
+@@ -553,7 +554,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
+ 	if (le32_to_cpu(hdr->NextCommand) > 0)
+ 		sz = large_sz;
+ 
+-	work->response_buf = kvzalloc(sz, GFP_KERNEL);
++	work->response_buf = kvzalloc(sz, KSMBD_DEFAULT_GFP);
+ 	if (!work->response_buf)
+ 		return -ENOMEM;
+ 
+@@ -1150,7 +1151,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 	case SMB311_PROT_ID:
+ 		conn->preauth_info =
+ 			kzalloc(sizeof(struct preauth_integrity_info),
+-				GFP_KERNEL);
++				KSMBD_DEFAULT_GFP);
+ 		if (!conn->preauth_info) {
+ 			rc = -ENOMEM;
+ 			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+@@ -1272,7 +1273,7 @@ static int alloc_preauth_hash(struct ksmbd_session *sess,
+ 		return -ENOMEM;
+ 
+ 	sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
+-					  PREAUTH_HASHVALUE_SIZE, GFP_KERNEL);
++					  PREAUTH_HASHVALUE_SIZE, KSMBD_DEFAULT_GFP);
+ 	if (!sess->Preauth_HashValue)
+ 		return -ENOMEM;
+ 
+@@ -1358,7 +1359,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
+ 	sz = sizeof(struct challenge_message);
+ 	sz += (strlen(ksmbd_netbios_name()) * 2 + 1 + 4) * 6;
+ 
+-	neg_blob = kzalloc(sz, GFP_KERNEL);
++	neg_blob = kzalloc(sz, KSMBD_DEFAULT_GFP);
+ 	if (!neg_blob)
+ 		return -ENOMEM;
+ 
+@@ -1549,12 +1550,12 @@ static int ntlm_authenticate(struct ksmbd_work *work,
+ 	if (conn->dialect >= SMB30_PROT_ID) {
+ 		chann = lookup_chann_list(sess, conn);
+ 		if (!chann) {
+-			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
++			chann = kmalloc(sizeof(struct channel), KSMBD_DEFAULT_GFP);
+ 			if (!chann)
+ 				return -ENOMEM;
+ 
+ 			chann->conn = conn;
+-			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
++			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP);
+ 		}
+ 	}
+ 
+@@ -1632,12 +1633,12 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ 	if (conn->dialect >= SMB30_PROT_ID) {
+ 		chann = lookup_chann_list(sess, conn);
+ 		if (!chann) {
+-			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
++			chann = kmalloc(sizeof(struct channel), KSMBD_DEFAULT_GFP);
+ 			if (!chann)
+ 				return -ENOMEM;
+ 
+ 			chann->conn = conn;
+-			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
++			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP);
+ 		}
+ 	}
+ 
+@@ -2356,7 +2357,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ 			le16_to_cpu(eabuf->EaValueLength))
+ 		return -EINVAL;
+ 
+-	attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
++	attr_name = kmalloc(XATTR_NAME_MAX + 1, KSMBD_DEFAULT_GFP);
+ 	if (!attr_name)
+ 		return -ENOMEM;
+ 
+@@ -2928,7 +2929,7 @@ int smb2_open(struct ksmbd_work *work)
+ 			goto err_out2;
+ 		}
+ 	} else {
+-		name = kstrdup("", GFP_KERNEL);
++		name = kstrdup("", KSMBD_DEFAULT_GFP);
+ 		if (!name) {
+ 			rc = -ENOMEM;
+ 			goto err_out2;
+@@ -3369,7 +3370,7 @@ int smb2_open(struct ksmbd_work *work)
+ 							sizeof(struct smb_sid) * 3 +
+ 							sizeof(struct smb_acl) +
+ 							sizeof(struct smb_ace) * ace_num * 2,
+-							GFP_KERNEL);
++							KSMBD_DEFAULT_GFP);
+ 					if (!pntsd) {
+ 						posix_acl_release(fattr.cf_acls);
+ 						posix_acl_release(fattr.cf_dacls);
+@@ -5007,7 +5008,7 @@ static int get_file_stream_info(struct ksmbd_work *work,
+ 
+ 		/* plus : size */
+ 		streamlen += 1;
+-		stream_buf = kmalloc(streamlen + 1, GFP_KERNEL);
++		stream_buf = kmalloc(streamlen + 1, KSMBD_DEFAULT_GFP);
+ 		if (!stream_buf)
+ 			break;
+ 
+@@ -6002,7 +6003,7 @@ static int smb2_create_link(struct ksmbd_work *work,
+ 		return -EINVAL;
+ 
+ 	ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");
+-	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
++	pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP);
+ 	if (!pathname)
+ 		return -ENOMEM;
+ 
+@@ -6562,7 +6563,7 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
+ 		}
+ 
+ 		aux_payload_buf =
+-			kvmalloc(rpc_resp->payload_sz, GFP_KERNEL);
++			kvmalloc(rpc_resp->payload_sz, KSMBD_DEFAULT_GFP);
+ 		if (!aux_payload_buf) {
+ 			err = -ENOMEM;
+ 			goto out;
+@@ -6745,7 +6746,7 @@ int smb2_read(struct ksmbd_work *work)
+ 	ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n",
+ 		    fp->filp, offset, length);
+ 
+-	aux_payload_buf = kvzalloc(length, GFP_KERNEL);
++	aux_payload_buf = kvzalloc(length, KSMBD_DEFAULT_GFP);
+ 	if (!aux_payload_buf) {
+ 		err = -ENOMEM;
+ 		goto out;
+@@ -6897,7 +6898,7 @@ static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
+ 	int ret;
+ 	ssize_t nbytes;
+ 
+-	data_buf = kvzalloc(length, GFP_KERNEL);
++	data_buf = kvzalloc(length, KSMBD_DEFAULT_GFP);
+ 	if (!data_buf)
+ 		return -ENOMEM;
+ 
+@@ -7228,7 +7229,7 @@ static struct ksmbd_lock *smb2_lock_init(struct file_lock *flock,
+ {
+ 	struct ksmbd_lock *lock;
+ 
+-	lock = kzalloc(sizeof(struct ksmbd_lock), GFP_KERNEL);
++	lock = kzalloc(sizeof(struct ksmbd_lock), KSMBD_DEFAULT_GFP);
+ 	if (!lock)
+ 		return NULL;
+ 
+@@ -7496,7 +7497,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 					    "would have to wait for getting lock\n");
+ 				list_add(&smb_lock->llist, &rollback_list);
+ 
+-				argv = kmalloc(sizeof(void *), GFP_KERNEL);
++				argv = kmalloc(sizeof(void *), KSMBD_DEFAULT_GFP);
+ 				if (!argv) {
+ 					err = -ENOMEM;
+ 					goto out;
+@@ -7771,6 +7772,9 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
+ 		if (netdev->type == ARPHRD_LOOPBACK)
+ 			continue;
+ 
++		if (!ksmbd_find_netdev_name_iface_list(netdev->name))
++			continue;
++
+ 		flags = dev_get_flags(netdev);
+ 		if (!(flags & IFF_RUNNING))
+ 			continue;
+@@ -8990,7 +8994,7 @@ int smb3_encrypt_resp(struct ksmbd_work *work)
+ 	int rc = -ENOMEM;
+ 	void *tr_buf;
+ 
+-	tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
++	tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, KSMBD_DEFAULT_GFP);
+ 	if (!tr_buf)
+ 		return rc;
+ 
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index af8e24163bf261..191df59748e003 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -358,7 +358,7 @@ static int smb1_check_user_session(struct ksmbd_work *work)
+ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+ {
+ 	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
+-			GFP_KERNEL);
++			KSMBD_DEFAULT_GFP);
+ 	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+ 
+ 	if (!work->response_buf) {
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index 376ae68144afa0..5aa7a66334d93d 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -350,10 +350,10 @@ int init_acl_state(struct posix_acl_state *state, u16 cnt)
+ 	 */
+ 	alloc = sizeof(struct posix_ace_state_array)
+ 		+ cnt * sizeof(struct posix_user_ace_state);
+-	state->users = kzalloc(alloc, GFP_KERNEL);
++	state->users = kzalloc(alloc, KSMBD_DEFAULT_GFP);
+ 	if (!state->users)
+ 		return -ENOMEM;
+-	state->groups = kzalloc(alloc, GFP_KERNEL);
++	state->groups = kzalloc(alloc, KSMBD_DEFAULT_GFP);
+ 	if (!state->groups) {
+ 		kfree(state->users);
+ 		return -ENOMEM;
+@@ -417,7 +417,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ 		return;
+ 	}
+ 
+-	ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
++	ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), KSMBD_DEFAULT_GFP);
+ 	if (!ppace) {
+ 		free_acl_state(&default_acl_state);
+ 		free_acl_state(&acl_state);
+@@ -561,7 +561,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ 		if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+ 			fattr->cf_acls =
+ 				posix_acl_alloc(acl_state.users->n +
+-					acl_state.groups->n + 4, GFP_KERNEL);
++					acl_state.groups->n + 4, KSMBD_DEFAULT_GFP);
+ 			if (fattr->cf_acls) {
+ 				cf_pace = fattr->cf_acls->a_entries;
+ 				posix_state_to_acl(&acl_state, cf_pace);
+@@ -575,7 +575,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ 		if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+ 			fattr->cf_dacls =
+ 				posix_acl_alloc(default_acl_state.users->n +
+-				default_acl_state.groups->n + 4, GFP_KERNEL);
++				default_acl_state.groups->n + 4, KSMBD_DEFAULT_GFP);
+ 			if (fattr->cf_dacls) {
+ 				cf_pdace = fattr->cf_dacls->a_entries;
+ 				posix_state_to_acl(&default_acl_state, cf_pdace);
+@@ -603,7 +603,7 @@ static void set_posix_acl_entries_dacl(struct mnt_idmap *idmap,
+ 	for (i = 0; i < fattr->cf_acls->a_count; i++, pace++) {
+ 		int flags = 0;
+ 
+-		sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
++		sid = kmalloc(sizeof(struct smb_sid), KSMBD_DEFAULT_GFP);
+ 		if (!sid)
+ 			break;
+ 
+@@ -670,7 +670,7 @@ static void set_posix_acl_entries_dacl(struct mnt_idmap *idmap,
+ 
+ 	pace = fattr->cf_dacls->a_entries;
+ 	for (i = 0; i < fattr->cf_dacls->a_count; i++, pace++) {
+-		sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
++		sid = kmalloc(sizeof(struct smb_sid), KSMBD_DEFAULT_GFP);
+ 		if (!sid)
+ 			break;
+ 
+@@ -930,7 +930,7 @@ int build_sec_desc(struct mnt_idmap *idmap,
+ 	gid_t gid;
+ 	unsigned int sid_type = SIDOWNER;
+ 
+-	nowner_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
++	nowner_sid_ptr = kmalloc(sizeof(struct smb_sid), KSMBD_DEFAULT_GFP);
+ 	if (!nowner_sid_ptr)
+ 		return -ENOMEM;
+ 
+@@ -939,7 +939,7 @@ int build_sec_desc(struct mnt_idmap *idmap,
+ 		sid_type = SIDUNIX_USER;
+ 	id_to_sid(uid, sid_type, nowner_sid_ptr);
+ 
+-	ngroup_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
++	ngroup_sid_ptr = kmalloc(sizeof(struct smb_sid), KSMBD_DEFAULT_GFP);
+ 	if (!ngroup_sid_ptr) {
+ 		kfree(nowner_sid_ptr);
+ 		return -ENOMEM;
+@@ -1062,7 +1062,8 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ 		goto free_parent_pntsd;
+ 	}
+ 
+-	aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2, GFP_KERNEL);
++	aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2,
++			    KSMBD_DEFAULT_GFP);
+ 	if (!aces_base) {
+ 		rc = -ENOMEM;
+ 		goto free_parent_pntsd;
+@@ -1156,7 +1157,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ 		pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
+ 			pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
+ 
+-		pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
++		pntsd = kzalloc(pntsd_alloc_size, KSMBD_DEFAULT_GFP);
+ 		if (!pntsd) {
+ 			rc = -ENOMEM;
+ 			goto free_aces_base;
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 9b3c68014aee28..2da2a5f6b983a5 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -244,7 +244,7 @@ static struct ksmbd_ipc_msg *ipc_msg_alloc(size_t sz)
+ 	struct ksmbd_ipc_msg *msg;
+ 	size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg);
+ 
+-	msg = kvzalloc(msg_sz, GFP_KERNEL);
++	msg = kvzalloc(msg_sz, KSMBD_DEFAULT_GFP);
+ 	if (msg)
+ 		msg->sz = sz;
+ 	return msg;
+@@ -284,7 +284,7 @@ static int handle_response(int type, void *payload, size_t sz)
+ 			continue;
+ 		}
+ 
+-		entry->response = kvzalloc(sz, GFP_KERNEL);
++		entry->response = kvzalloc(sz, KSMBD_DEFAULT_GFP);
+ 		if (!entry->response) {
+ 			ret = -ENOMEM;
+ 			break;
+@@ -338,6 +338,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ 	ret = ksmbd_set_netbios_name(req->netbios_name);
+ 	ret |= ksmbd_set_server_string(req->server_string);
+ 	ret |= ksmbd_set_work_group(req->work_group);
++	server_conf.bind_interfaces_only = req->bind_interfaces_only;
+ 	ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
+ 					req->ifc_list_sz);
+ out:
+@@ -453,7 +454,7 @@ static int ipc_msg_send(struct ksmbd_ipc_msg *msg)
+ 	if (!ksmbd_tools_pid)
+ 		return ret;
+ 
+-	skb = genlmsg_new(msg->sz, GFP_KERNEL);
++	skb = genlmsg_new(msg->sz, KSMBD_DEFAULT_GFP);
+ 	if (!skb)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 17c76713c6d086..7c5a0d712873d2 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -362,7 +362,7 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+ 	struct smb_direct_transport *t;
+ 	struct ksmbd_conn *conn;
+ 
+-	t = kzalloc(sizeof(*t), GFP_KERNEL);
++	t = kzalloc(sizeof(*t), KSMBD_DEFAULT_GFP);
+ 	if (!t)
+ 		return NULL;
+ 
+@@ -462,7 +462,7 @@ static struct smb_direct_sendmsg
+ {
+ 	struct smb_direct_sendmsg *msg;
+ 
+-	msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
++	msg = mempool_alloc(t->sendmsg_mempool, KSMBD_DEFAULT_GFP);
+ 	if (!msg)
+ 		return ERR_PTR(-ENOMEM);
+ 	msg->transport = t;
+@@ -1406,7 +1406,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
+ 	desc_buf = buf;
+ 	for (i = 0; i < desc_num; i++) {
+ 		msg = kzalloc(struct_size(msg, sg_list, SG_CHUNK_SIZE),
+-			      GFP_KERNEL);
++			      KSMBD_DEFAULT_GFP);
+ 		if (!msg) {
+ 			ret = -ENOMEM;
+ 			goto out;
+@@ -1852,7 +1852,7 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
+ 	INIT_LIST_HEAD(&t->recvmsg_queue);
+ 
+ 	for (i = 0; i < t->recv_credit_max; i++) {
+-		recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
++		recvmsg = mempool_alloc(t->recvmsg_mempool, KSMBD_DEFAULT_GFP);
+ 		if (!recvmsg)
+ 			goto err;
+ 		recvmsg->transport = t;
+@@ -2144,7 +2144,7 @@ static int smb_direct_ib_client_add(struct ib_device *ib_dev)
+ 	if (!rdma_frwr_is_supported(&ib_dev->attrs))
+ 		return 0;
+ 
+-	smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
++	smb_dev = kzalloc(sizeof(*smb_dev), KSMBD_DEFAULT_GFP);
+ 	if (!smb_dev)
+ 		return -ENOMEM;
+ 	smb_dev->ib_dev = ib_dev;
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index aaed9e293b2e02..abedf510899a74 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -76,7 +76,7 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
+ 	struct tcp_transport *t;
+ 	struct ksmbd_conn *conn;
+ 
+-	t = kzalloc(sizeof(*t), GFP_KERNEL);
++	t = kzalloc(sizeof(*t), KSMBD_DEFAULT_GFP);
+ 	if (!t)
+ 		return NULL;
+ 	t->sock = client_sk;
+@@ -93,17 +93,21 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
+ 	return t;
+ }
+ 
+-static void free_transport(struct tcp_transport *t)
++void ksmbd_free_transport(struct ksmbd_transport *kt)
+ {
+-	kernel_sock_shutdown(t->sock, SHUT_RDWR);
+-	sock_release(t->sock);
+-	t->sock = NULL;
++	struct tcp_transport *t = TCP_TRANS(kt);
+ 
+-	ksmbd_conn_free(KSMBD_TRANS(t)->conn);
++	sock_release(t->sock);
+ 	kfree(t->iov);
+ 	kfree(t);
+ }
+ 
++static void free_transport(struct tcp_transport *t)
++{
++	kernel_sock_shutdown(t->sock, SHUT_RDWR);
++	ksmbd_conn_free(KSMBD_TRANS(t)->conn);
++}
++
+ /**
+  * kvec_array_init() - initialize a IO vector segment
+  * @new:	IO vector to be initialized
+@@ -151,7 +155,7 @@ static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs
+ 		return t->iov;
+ 
+ 	/* not big enough -- allocate a new one and release the old */
+-	new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL);
++	new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), KSMBD_DEFAULT_GFP);
+ 	if (new_iov) {
+ 		kfree(t->iov);
+ 		t->iov = new_iov;
+@@ -504,52 +508,61 @@ static int create_socket(struct interface *iface)
+ 	return ret;
+ }
+ 
++struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name)
++{
++	struct interface *iface;
++
++	list_for_each_entry(iface, &iface_list, entry)
++		if (!strcmp(iface->name, netdev_name))
++			return iface;
++	return NULL;
++}
++
+ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
+ 			      void *ptr)
+ {
+ 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ 	struct interface *iface;
+-	int ret, found = 0;
++	int ret;
+ 
+ 	switch (event) {
+ 	case NETDEV_UP:
+ 		if (netif_is_bridge_port(netdev))
+ 			return NOTIFY_OK;
+ 
+-		list_for_each_entry(iface, &iface_list, entry) {
+-			if (!strcmp(iface->name, netdev->name)) {
+-				found = 1;
+-				if (iface->state != IFACE_STATE_DOWN)
+-					break;
+-				ret = create_socket(iface);
+-				if (ret)
+-					return NOTIFY_OK;
+-				break;
+-			}
++		iface = ksmbd_find_netdev_name_iface_list(netdev->name);
++		if (iface && iface->state == IFACE_STATE_DOWN) {
++			ksmbd_debug(CONN, "netdev-up event: netdev(%s) is going up\n",
++					iface->name);
++			ret = create_socket(iface);
++			if (ret)
++				return NOTIFY_OK;
+ 		}
+-		if (!found && bind_additional_ifaces) {
+-			iface = alloc_iface(kstrdup(netdev->name, GFP_KERNEL));
++		if (!iface && bind_additional_ifaces) {
++			iface = alloc_iface(kstrdup(netdev->name, KSMBD_DEFAULT_GFP));
+ 			if (!iface)
+ 				return NOTIFY_OK;
++			ksmbd_debug(CONN, "netdev-up event: netdev(%s) is going up\n",
++				    iface->name);
+ 			ret = create_socket(iface);
+ 			if (ret)
+ 				break;
+ 		}
+ 		break;
+ 	case NETDEV_DOWN:
+-		list_for_each_entry(iface, &iface_list, entry) {
+-			if (!strcmp(iface->name, netdev->name) &&
+-			    iface->state == IFACE_STATE_CONFIGURED) {
+-				tcp_stop_kthread(iface->ksmbd_kthread);
+-				iface->ksmbd_kthread = NULL;
+-				mutex_lock(&iface->sock_release_lock);
+-				tcp_destroy_socket(iface->ksmbd_socket);
+-				iface->ksmbd_socket = NULL;
+-				mutex_unlock(&iface->sock_release_lock);
+-
+-				iface->state = IFACE_STATE_DOWN;
+-				break;
+-			}
++		iface = ksmbd_find_netdev_name_iface_list(netdev->name);
++		if (iface && iface->state == IFACE_STATE_CONFIGURED) {
++			ksmbd_debug(CONN, "netdev-down event: netdev(%s) is going down\n",
++					iface->name);
++			tcp_stop_kthread(iface->ksmbd_kthread);
++			iface->ksmbd_kthread = NULL;
++			mutex_lock(&iface->sock_release_lock);
++			tcp_destroy_socket(iface->ksmbd_socket);
++			iface->ksmbd_socket = NULL;
++			mutex_unlock(&iface->sock_release_lock);
++
++			iface->state = IFACE_STATE_DOWN;
++			break;
+ 		}
+ 		break;
+ 	}
+@@ -600,7 +613,7 @@ static struct interface *alloc_iface(char *ifname)
+ 	if (!ifname)
+ 		return NULL;
+ 
+-	iface = kzalloc(sizeof(struct interface), GFP_KERNEL);
++	iface = kzalloc(sizeof(struct interface), KSMBD_DEFAULT_GFP);
+ 	if (!iface) {
+ 		kfree(ifname);
+ 		return NULL;
+@@ -618,24 +631,12 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
+ 	int sz = 0;
+ 
+ 	if (!ifc_list_sz) {
+-		struct net_device *netdev;
+-
+-		rtnl_lock();
+-		for_each_netdev(&init_net, netdev) {
+-			if (netif_is_bridge_port(netdev))
+-				continue;
+-			if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) {
+-				rtnl_unlock();
+-				return -ENOMEM;
+-			}
+-		}
+-		rtnl_unlock();
+ 		bind_additional_ifaces = 1;
+ 		return 0;
+ 	}
+ 
+ 	while (ifc_list_sz > 0) {
+-		if (!alloc_iface(kstrdup(ifc_list, GFP_KERNEL)))
++		if (!alloc_iface(kstrdup(ifc_list, KSMBD_DEFAULT_GFP)))
+ 			return -ENOMEM;
+ 
+ 		sz = strlen(ifc_list);
+diff --git a/fs/smb/server/transport_tcp.h b/fs/smb/server/transport_tcp.h
+index e338bebe322f10..1e51675ee1b209 100644
+--- a/fs/smb/server/transport_tcp.h
++++ b/fs/smb/server/transport_tcp.h
+@@ -7,6 +7,8 @@
+ #define __KSMBD_TRANSPORT_TCP_H__
+ 
+ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz);
++struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name);
++void ksmbd_free_transport(struct ksmbd_transport *kt);
+ int ksmbd_tcp_init(void);
+ void ksmbd_tcp_destroy(void);
+ 
+diff --git a/fs/smb/server/unicode.c b/fs/smb/server/unicode.c
+index 217106ff7b8287..85e6791745ec8e 100644
+--- a/fs/smb/server/unicode.c
++++ b/fs/smb/server/unicode.c
+@@ -297,7 +297,7 @@ char *smb_strndup_from_utf16(const char *src, const int maxlen,
+ 	if (is_unicode) {
+ 		len = smb_utf16_bytes((__le16 *)src, maxlen, codepage);
+ 		len += nls_nullsize(codepage);
+-		dst = kmalloc(len, GFP_KERNEL);
++		dst = kmalloc(len, KSMBD_DEFAULT_GFP);
+ 		if (!dst)
+ 			return ERR_PTR(-ENOMEM);
+ 		ret = smb_from_utf16(dst, (__le16 *)src, len, maxlen, codepage,
+@@ -309,7 +309,7 @@ char *smb_strndup_from_utf16(const char *src, const int maxlen,
+ 	} else {
+ 		len = strnlen(src, maxlen);
+ 		len++;
+-		dst = kmalloc(len, GFP_KERNEL);
++		dst = kmalloc(len, KSMBD_DEFAULT_GFP);
+ 		if (!dst)
+ 			return ERR_PTR(-ENOMEM);
+ 		strscpy(dst, src, len);
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index 8fd070e31fa7dd..a7694aae0b947b 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -444,7 +444,7 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ 	}
+ 
+ 	if (v_len < size) {
+-		wbuf = kvzalloc(size, GFP_KERNEL);
++		wbuf = kvzalloc(size, KSMBD_DEFAULT_GFP);
+ 		if (!wbuf) {
+ 			err = -ENOMEM;
+ 			goto out;
+@@ -866,7 +866,7 @@ ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list)
+ 	if (size <= 0)
+ 		return size;
+ 
+-	vlist = kvzalloc(size, GFP_KERNEL);
++	vlist = kvzalloc(size, KSMBD_DEFAULT_GFP);
+ 	if (!vlist)
+ 		return -ENOMEM;
+ 
+@@ -908,7 +908,7 @@ ssize_t ksmbd_vfs_getxattr(struct mnt_idmap *idmap,
+ 	if (xattr_len < 0)
+ 		return xattr_len;
+ 
+-	buf = kmalloc(xattr_len + 1, GFP_KERNEL);
++	buf = kmalloc(xattr_len + 1, KSMBD_DEFAULT_GFP);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+@@ -1413,7 +1413,7 @@ static struct xattr_smb_acl *ksmbd_vfs_make_xattr_posix_acl(struct mnt_idmap *id
+ 
+ 	smb_acl = kzalloc(sizeof(struct xattr_smb_acl) +
+ 			  sizeof(struct xattr_acl_entry) * posix_acls->a_count,
+-			  GFP_KERNEL);
++			  KSMBD_DEFAULT_GFP);
+ 	if (!smb_acl)
+ 		goto out;
+ 
+@@ -1769,7 +1769,7 @@ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+ 	else
+ 		type = ":$DATA";
+ 
+-	buf = kasprintf(GFP_KERNEL, "%s%s%s",
++	buf = kasprintf(KSMBD_DEFAULT_GFP, "%s%s%s",
+ 			XATTR_NAME_STREAM, stream_name,	type);
+ 	if (!buf)
+ 		return -ENOMEM;
+@@ -1898,7 +1898,7 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
+ 		acl_state.group.allow;
+ 	acl_state.mask.allow = 0x07;
+ 
+-	acls = posix_acl_alloc(6, GFP_KERNEL);
++	acls = posix_acl_alloc(6, KSMBD_DEFAULT_GFP);
+ 	if (!acls) {
+ 		free_acl_state(&acl_state);
+ 		return -ENOMEM;
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+index a19f4e563c7e54..1f8fa3468173ab 100644
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -188,7 +188,7 @@ static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
+ 	if (ci)
+ 		return ci;
+ 
+-	ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
++	ci = kmalloc(sizeof(struct ksmbd_inode), KSMBD_DEFAULT_GFP);
+ 	if (!ci)
+ 		return NULL;
+ 
+@@ -577,7 +577,7 @@ static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
+ 		return -EMFILE;
+ 	}
+ 
+-	idr_preload(GFP_KERNEL);
++	idr_preload(KSMBD_DEFAULT_GFP);
+ 	write_lock(&ft->lock);
+ 	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
+ 	if (ret >= 0) {
+@@ -605,7 +605,7 @@ struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
+ 	struct ksmbd_file *fp;
+ 	int ret;
+ 
+-	fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
++	fp = kmem_cache_zalloc(filp_cache, KSMBD_DEFAULT_GFP);
+ 	if (!fp) {
+ 		pr_err("Failed to allocate memory\n");
+ 		return ERR_PTR(-ENOMEM);
+@@ -713,12 +713,8 @@ static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
+ 
+ static bool ksmbd_durable_scavenger_alive(void)
+ {
+-	mutex_lock(&durable_scavenger_lock);
+-	if (!durable_scavenger_running) {
+-		mutex_unlock(&durable_scavenger_lock);
++	if (!durable_scavenger_running)
+ 		return false;
+-	}
+-	mutex_unlock(&durable_scavenger_lock);
+ 
+ 	if (kthread_should_stop())
+ 		return false;
+@@ -799,9 +795,7 @@ static int ksmbd_durable_scavenger(void *dummy)
+ 			break;
+ 	}
+ 
+-	mutex_lock(&durable_scavenger_lock);
+ 	durable_scavenger_running = false;
+-	mutex_unlock(&durable_scavenger_lock);
+ 
+ 	module_put(THIS_MODULE);
+ 
+@@ -923,7 +917,7 @@ int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share,
+ 	char *pathname, *ab_pathname;
+ 	int ret = 0;
+ 
+-	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
++	pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP);
+ 	if (!pathname)
+ 		return -EACCES;
+ 
+@@ -983,7 +977,7 @@ int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
+ 
+ int ksmbd_init_file_table(struct ksmbd_file_table *ft)
+ {
+-	ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
++	ft->idr = kzalloc(sizeof(struct idr), KSMBD_DEFAULT_GFP);
+ 	if (!ft->idr)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/splice.c b/fs/splice.c
+index 06232d7e505f63..38f8c94267315d 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -45,7 +45,7 @@
+  * here if set to avoid blocking other users of this pipe if splice is
+  * being done on it.
+  */
+-static noinline void noinline pipe_clear_nowait(struct file *file)
++static noinline void pipe_clear_nowait(struct file *file)
+ {
+ 	fmode_t fmode = READ_ONCE(file->f_mode);
+ 
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 559a3a57709748..ba6b4a180e8081 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -19,6 +19,7 @@
+ #include "xfs_reflink.h"
+ #include "xfs_errortag.h"
+ #include "xfs_error.h"
++#include "xfs_icache.h"
+ 
+ struct xfs_writepage_ctx {
+ 	struct iomap_writepage_ctx ctx;
+@@ -528,12 +529,44 @@ xfs_vm_readahead(
+ }
+ 
+ static int
+-xfs_iomap_swapfile_activate(
++xfs_vm_swap_activate(
+ 	struct swap_info_struct		*sis,
+ 	struct file			*swap_file,
+ 	sector_t			*span)
+ {
+-	sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
++	struct xfs_inode		*ip = XFS_I(file_inode(swap_file));
++
++	/*
++	 * Swap file activation can race against concurrent shared extent
++	 * removal in files that have been cloned.  If this happens,
++	 * iomap_swapfile_iter() can fail because it encountered a shared
++	 * extent even though an operation is in progress to remove those
++	 * shared extents.
++	 *
++	 * This race becomes problematic when we defer extent removal
++	 * operations beyond the end of a syscall (i.e. use async background
++	 * processing algorithms).  Users think the extents are no longer
++	 * shared, but iomap_swapfile_iter() still sees them as shared
++	 * because the refcountbt entries for the extents being removed have
++	 * not yet been updated.  Hence the swapon call fails unexpectedly.
++	 *
++	 * The race condition is currently most obvious from the unlink()
++	 * operation as extent removal is deferred until after the last
++	 * reference to the inode goes away.  We then process the extent
++	 * removal asynchronously, hence triggers the "syscall completed but
++	 * work not done" condition mentioned above.  To close this race
++	 * window, we need to flush any pending inodegc operations to ensure
++	 * they have updated the refcountbt records before we try to map the
++	 * swapfile.
++	 */
++	xfs_inodegc_flush(ip->i_mount);
++
++	/*
++	 * Direct the swap code to the correct block device when this file
++	 * sits on the RT device.
++	 */
++	sis->bdev = xfs_inode_buftarg(ip)->bt_bdev;
++
+ 	return iomap_swapfile_activate(sis, swap_file, span,
+ 			&xfs_read_iomap_ops);
+ }
+@@ -549,11 +582,11 @@ const struct address_space_operations xfs_address_space_operations = {
+ 	.migrate_folio		= filemap_migrate_folio,
+ 	.is_partially_uptodate  = iomap_is_partially_uptodate,
+ 	.error_remove_folio	= generic_error_remove_folio,
+-	.swap_activate		= xfs_iomap_swapfile_activate,
++	.swap_activate		= xfs_vm_swap_activate,
+ };
+ 
+ const struct address_space_operations xfs_dax_aops = {
+ 	.writepages		= xfs_dax_writepages,
+ 	.dirty_folio		= noop_dirty_folio,
+-	.swap_activate		= xfs_iomap_swapfile_activate,
++	.swap_activate		= xfs_vm_swap_activate,
+ };
+diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
+index ed1d597c30ca25..dabb1d6d7e463e 100644
+--- a/fs/xfs/xfs_qm_bhv.c
++++ b/fs/xfs/xfs_qm_bhv.c
+@@ -79,6 +79,28 @@ xfs_qm_statvfs(
+ 	}
+ }
+ 
++STATIC int
++xfs_qm_validate_state_change(
++	struct xfs_mount	*mp,
++	uint			uqd,
++	uint			gqd,
++	uint			pqd)
++{
++	int state;
++
++	/* Is quota state changing? */
++	state = ((uqd && !XFS_IS_UQUOTA_ON(mp)) ||
++		(!uqd &&  XFS_IS_UQUOTA_ON(mp)) ||
++		 (gqd && !XFS_IS_GQUOTA_ON(mp)) ||
++		(!gqd &&  XFS_IS_GQUOTA_ON(mp)) ||
++		 (pqd && !XFS_IS_PQUOTA_ON(mp)) ||
++		(!pqd &&  XFS_IS_PQUOTA_ON(mp)));
++
++	return  state &&
++		(xfs_dev_is_read_only(mp, "changing quota state") ||
++		xfs_has_norecovery(mp));
++}
++
+ int
+ xfs_qm_newmount(
+ 	xfs_mount_t	*mp,
+@@ -98,24 +120,21 @@ xfs_qm_newmount(
+ 	}
+ 
+ 	/*
+-	 * If the device itself is read-only, we can't allow
+-	 * the user to change the state of quota on the mount -
+-	 * this would generate a transaction on the ro device,
+-	 * which would lead to an I/O error and shutdown
++	 * If the device itself is read-only and/or in norecovery
++	 * mode, we can't allow the user to change the state of
++	 * quota on the mount - this would generate a transaction
++	 * on the ro device, which would lead to an I/O error and
++	 * shutdown.
+ 	 */
+ 
+-	if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
+-	    (!uquotaondisk &&  XFS_IS_UQUOTA_ON(mp)) ||
+-	     (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
+-	    (!gquotaondisk &&  XFS_IS_GQUOTA_ON(mp)) ||
+-	     (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
+-	    (!pquotaondisk &&  XFS_IS_PQUOTA_ON(mp)))  &&
+-	    xfs_dev_is_read_only(mp, "changing quota state")) {
++	if (xfs_qm_validate_state_change(mp, uquotaondisk,
++			    gquotaondisk, pquotaondisk)) {
++
+ 		xfs_warn(mp, "please mount with%s%s%s%s.",
+-			(!quotaondisk ? "out quota" : ""),
+-			(uquotaondisk ? " usrquota" : ""),
+-			(gquotaondisk ? " grpquota" : ""),
+-			(pquotaondisk ? " prjquota" : ""));
++				(!quotaondisk ? "out quota" : ""),
++				(uquotaondisk ? " usrquota" : ""),
++				(gquotaondisk ? " grpquota" : ""),
++				(pquotaondisk ? " prjquota" : ""));
+ 		return -EPERM;
+ 	}
+ 
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 8f7c9eaeb36090..201a86b3574da5 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1619,8 +1619,12 @@ xfs_fs_fill_super(
+ #endif
+ 	}
+ 
+-	/* Filesystem claims it needs repair, so refuse the mount. */
+-	if (xfs_has_needsrepair(mp)) {
++	/*
++	 * Filesystem claims it needs repair, so refuse the mount unless
++	 * norecovery is also specified, in which case the filesystem can
++	 * be mounted with no risk of further damage.
++	 */
++	if (xfs_has_needsrepair(mp) && !xfs_has_norecovery(mp)) {
+ 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
+ 		error = -EFSCORRUPTED;
+ 		goto out_free_sb;
+diff --git a/include/drm/intel/i915_pciids.h b/include/drm/intel/i915_pciids.h
+index dacea289acaf5a..1ff00e3d4418ef 100644
+--- a/include/drm/intel/i915_pciids.h
++++ b/include/drm/intel/i915_pciids.h
+@@ -810,6 +810,7 @@
+ 	MACRO__(0xE20C, ## __VA_ARGS__), \
+ 	MACRO__(0xE20D, ## __VA_ARGS__), \
+ 	MACRO__(0xE210, ## __VA_ARGS__), \
++	MACRO__(0xE211, ## __VA_ARGS__), \
+ 	MACRO__(0xE212, ## __VA_ARGS__), \
+ 	MACRO__(0xE215, ## __VA_ARGS__), \
+ 	MACRO__(0xE216, ## __VA_ARGS__)
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index 318245b4e38fb3..959f8f82a65098 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -156,9 +156,6 @@ struct request {
+ 	struct blk_crypto_keyslot *crypt_keyslot;
+ #endif
+ 
+-	enum rw_hint write_hint;
+-	unsigned short ioprio;
+-
+ 	enum mq_rq_state state;
+ 	atomic_t ref;
+ 
+@@ -222,7 +219,9 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
+ 
+ static inline unsigned short req_get_ioprio(struct request *req)
+ {
+-	return req->ioprio;
++	if (req->bio)
++		return req->bio->bi_ioprio;
++	return 0;
+ }
+ 
+ #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
+@@ -1010,7 +1009,6 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
+ 	rq->nr_phys_segments = nr_segs;
+ 	rq->__data_len = bio->bi_iter.bi_size;
+ 	rq->bio = rq->biotail = bio;
+-	rq->ioprio = bio_prio(bio);
+ }
+ 
+ void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
+diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
+index 1ff52020cf7576..34498652f78027 100644
+--- a/include/linux/energy_model.h
++++ b/include/linux/energy_model.h
+@@ -163,13 +163,13 @@ struct em_data_callback {
+ struct em_perf_domain *em_cpu_get(int cpu);
+ struct em_perf_domain *em_pd_get(struct device *dev);
+ int em_dev_update_perf_domain(struct device *dev,
+-			      struct em_perf_table __rcu *new_table);
++			      struct em_perf_table *new_table);
+ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ 				struct em_data_callback *cb, cpumask_t *span,
+ 				bool microwatts);
+ void em_dev_unregister_perf_domain(struct device *dev);
+-struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd);
+-void em_table_free(struct em_perf_table __rcu *table);
++struct em_perf_table *em_table_alloc(struct em_perf_domain *pd);
++void em_table_free(struct em_perf_table *table);
+ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+ 			 int nr_states);
+ int em_dev_update_chip_binning(struct device *dev);
+@@ -365,14 +365,14 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
+ 	return 0;
+ }
+ static inline
+-struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
++struct em_perf_table *em_table_alloc(struct em_perf_domain *pd)
+ {
+ 	return NULL;
+ }
+-static inline void em_table_free(struct em_perf_table __rcu *table) {}
++static inline void em_table_free(struct em_perf_table *table) {}
+ static inline
+ int em_dev_update_perf_domain(struct device *dev,
+-			      struct em_perf_table __rcu *new_table)
++			      struct em_perf_table *new_table)
+ {
+ 	return -EINVAL;
+ }
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index b10093c4d00ea5..59a421fc42bf07 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -73,7 +73,6 @@ struct msi_msg {
+ 	};
+ };
+ 
+-extern int pci_msi_ignore_mask;
+ /* Helper functions */
+ struct msi_desc;
+ struct pci_dev;
+@@ -556,6 +555,8 @@ enum {
+ 	MSI_FLAG_PCI_MSIX_ALLOC_DYN	= (1 << 20),
+ 	/* PCI MSIs cannot be steered separately to CPU cores */
+ 	MSI_FLAG_NO_AFFINITY		= (1 << 21),
++	/* Inhibit usage of entry masking */
++	MSI_FLAG_NO_MASK		= (1 << 22),
+ };
+ 
+ /**
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 74114acbb07fbb..ade889ded4e1e9 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -245,6 +245,8 @@ enum pci_dev_flags {
+ 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
+ 	/* Device does honor MSI masking despite saying otherwise */
+ 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
++	/* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
++	PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
+ };
+ 
+ enum pci_irq_reroute_variant {
+diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
+index 38cae7113de462..6e202ed5e63f3c 100644
+--- a/include/net/netfilter/nft_fib.h
++++ b/include/net/netfilter/nft_fib.h
+@@ -18,6 +18,27 @@ nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
+ 	return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
+ }
+ 
++static inline bool nft_fib_can_skip(const struct nft_pktinfo *pkt)
++{
++	const struct net_device *indev = nft_in(pkt);
++	const struct sock *sk;
++
++	switch (nft_hook(pkt)) {
++	case NF_INET_PRE_ROUTING:
++	case NF_INET_INGRESS:
++	case NF_INET_LOCAL_IN:
++		break;
++	default:
++		return false;
++	}
++
++	sk = pkt->skb->sk;
++	if (sk && sk_fullsock(sk))
++	       return sk->sk_rx_dst_ifindex == indev->ifindex;
++
++	return nft_fib_is_loopback(pkt->skb, indev);
++}
++
+ int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset);
+ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 		 const struct nlattr * const tb[]);
+diff --git a/include/soc/qcom/ice.h b/include/soc/qcom/ice.h
+index 5870a94599a258..d5f6a228df6594 100644
+--- a/include/soc/qcom/ice.h
++++ b/include/soc/qcom/ice.h
+@@ -34,4 +34,6 @@ int qcom_ice_program_key(struct qcom_ice *ice,
+ 			 int slot);
+ int qcom_ice_evict_key(struct qcom_ice *ice, int slot);
+ struct qcom_ice *of_qcom_ice_get(struct device *dev);
++struct qcom_ice *devm_of_qcom_ice_get(struct device *dev);
++
+ #endif /* __QCOM_ICE_H__ */
+diff --git a/include/trace/events/block.h b/include/trace/events/block.h
+index 1527d5d45e01a4..bd0ea07338eb6c 100644
+--- a/include/trace/events/block.h
++++ b/include/trace/events/block.h
+@@ -99,7 +99,7 @@ TRACE_EVENT(block_rq_requeue,
+ 		__entry->dev	   = rq->q->disk ? disk_devt(rq->q->disk) : 0;
+ 		__entry->sector    = blk_rq_trace_sector(rq);
+ 		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
+-		__entry->ioprio    = rq->ioprio;
++		__entry->ioprio    = req_get_ioprio(rq);
+ 
+ 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
+ 		__get_str(cmd)[0] = '\0';
+@@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(block_rq_completion,
+ 		__entry->sector    = blk_rq_pos(rq);
+ 		__entry->nr_sector = nr_bytes >> 9;
+ 		__entry->error     = blk_status_to_errno(error);
+-		__entry->ioprio    = rq->ioprio;
++		__entry->ioprio    = req_get_ioprio(rq);
+ 
+ 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
+ 		__get_str(cmd)[0] = '\0';
+@@ -209,7 +209,7 @@ DECLARE_EVENT_CLASS(block_rq,
+ 		__entry->sector    = blk_rq_trace_sector(rq);
+ 		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
+ 		__entry->bytes     = blk_rq_bytes(rq);
+-		__entry->ioprio	   = rq->ioprio;
++		__entry->ioprio	   = req_get_ioprio(rq);
+ 
+ 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
+ 		__get_str(cmd)[0] = '\0';
+diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h
+index c1fb1355d3094b..1e7b0bef95f525 100644
+--- a/include/trace/stages/stage3_trace_output.h
++++ b/include/trace/stages/stage3_trace_output.h
+@@ -119,6 +119,14 @@
+ 		trace_print_array_seq(p, array, count, el_size);	\
+ 	})
+ 
++#undef __print_dynamic_array
++#define __print_dynamic_array(array, el_size)				\
++	({								\
++		__print_array(__get_dynamic_array(array),		\
++			      __get_dynamic_array_len(array) / (el_size), \
++			      (el_size));				\
++	})
++
+ #undef __print_hex_dump
+ #define __print_hex_dump(prefix_str, prefix_type,			\
+ 			 rowsize, groupsize, buf, len, ascii)		\
+diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h
+index bcb960d16fc0ed..fcd564a590f434 100644
+--- a/include/trace/stages/stage7_class_define.h
++++ b/include/trace/stages/stage7_class_define.h
+@@ -22,6 +22,7 @@
+ #undef __get_rel_cpumask
+ #undef __get_rel_sockaddr
+ #undef __print_array
++#undef __print_dynamic_array
+ #undef __print_hex_dump
+ #undef __get_buf
+ 
+diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
+index 084fb529e1e96c..13001da141c336 100644
+--- a/include/uapi/drm/ivpu_accel.h
++++ b/include/uapi/drm/ivpu_accel.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+ /*
+- * Copyright (C) 2020-2024 Intel Corporation
++ * Copyright (C) 2020-2025 Intel Corporation
+  */
+ 
+ #ifndef __UAPI_IVPU_DRM_H__
+@@ -131,7 +131,7 @@ struct drm_ivpu_param {
+ 	 * platform type when executing on a simulator or emulator (read-only)
+ 	 *
+ 	 * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
+-	 * Current PLL frequency (read-only)
++	 * Maximum frequency of the NPU data processing unit clock (read-only)
+ 	 *
+ 	 * %DRM_IVPU_PARAM_NUM_CONTEXTS:
+ 	 * Maximum number of simultaneously existing contexts (read-only)
+diff --git a/init/Kconfig b/init/Kconfig
+index 243d0087f94458..2b4969758da836 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -708,7 +708,7 @@ endmenu # "CPU/Task time and stats accounting"
+ 
+ config CPU_ISOLATION
+ 	bool "CPU isolation"
+-	depends on SMP || COMPILE_TEST
++	depends on SMP
+ 	default y
+ 	help
+ 	  Make sure that CPUs running critical tasks are not disturbed by
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 19de7129ae0b35..fef5c6e3b251e2 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1103,21 +1103,22 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+ 	while (node) {
+ 		req = container_of(node, struct io_kiocb, io_task_work.node);
+ 		node = node->next;
+-		if (sync && last_ctx != req->ctx) {
++		if (last_ctx != req->ctx) {
+ 			if (last_ctx) {
+-				flush_delayed_work(&last_ctx->fallback_work);
++				if (sync)
++					flush_delayed_work(&last_ctx->fallback_work);
+ 				percpu_ref_put(&last_ctx->refs);
+ 			}
+ 			last_ctx = req->ctx;
+ 			percpu_ref_get(&last_ctx->refs);
+ 		}
+-		if (llist_add(&req->io_task_work.node,
+-			      &req->ctx->fallback_llist))
+-			schedule_delayed_work(&req->ctx->fallback_work, 1);
++		if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist))
++			schedule_delayed_work(&last_ctx->fallback_work, 1);
+ 	}
+ 
+ 	if (last_ctx) {
+-		flush_delayed_work(&last_ctx->fallback_work);
++		if (sync)
++			flush_delayed_work(&last_ctx->fallback_work);
+ 		percpu_ref_put(&last_ctx->refs);
+ 	}
+ }
+@@ -1777,7 +1778,7 @@ struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
+ 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ 	struct io_kiocb *nxt = NULL;
+ 
+-	if (req_ref_put_and_test(req)) {
++	if (req_ref_put_and_test_atomic(req)) {
+ 		if (req->flags & IO_REQ_LINK_FLAGS)
+ 			nxt = io_req_find_next(req);
+ 		io_free_req(req);
+diff --git a/io_uring/refs.h b/io_uring/refs.h
+index 63982ead9f7dab..0d928d87c4ed13 100644
+--- a/io_uring/refs.h
++++ b/io_uring/refs.h
+@@ -17,6 +17,13 @@ static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
+ 	return atomic_inc_not_zero(&req->refs);
+ }
+ 
++static inline bool req_ref_put_and_test_atomic(struct io_kiocb *req)
++{
++	WARN_ON_ONCE(!(data_race(req->flags) & REQ_F_REFCOUNT));
++	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
++	return atomic_dec_and_test(&req->refs);
++}
++
+ static inline bool req_ref_put_and_test(struct io_kiocb *req)
+ {
+ 	if (likely(!(req->flags & REQ_F_REFCOUNT)))
+diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c
+index 6547fb7ac0dcb2..129a51b1da1b12 100644
+--- a/kernel/bpf/bpf_cgrp_storage.c
++++ b/kernel/bpf/bpf_cgrp_storage.c
+@@ -162,6 +162,7 @@ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
+ 	   void *, value, u64, flags, gfp_t, gfp_flags)
+ {
+ 	struct bpf_local_storage_data *sdata;
++	bool nobusy;
+ 
+ 	WARN_ON_ONCE(!bpf_rcu_lock_held());
+ 	if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
+@@ -170,21 +171,21 @@ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
+ 	if (!cgroup)
+ 		return (unsigned long)NULL;
+ 
+-	if (!bpf_cgrp_storage_trylock())
+-		return (unsigned long)NULL;
++	nobusy = bpf_cgrp_storage_trylock();
+ 
+-	sdata = cgroup_storage_lookup(cgroup, map, true);
++	sdata = cgroup_storage_lookup(cgroup, map, nobusy);
+ 	if (sdata)
+ 		goto unlock;
+ 
+ 	/* only allocate new storage, when the cgroup is refcounted */
+ 	if (!percpu_ref_is_dying(&cgroup->self.refcnt) &&
+-	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
++	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy)
+ 		sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
+ 						 value, BPF_NOEXIST, gfp_flags);
+ 
+ unlock:
+-	bpf_cgrp_storage_unlock();
++	if (nobusy)
++		bpf_cgrp_storage_unlock();
+ 	return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
+ }
+ 
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 3ec941a0ea41c5..bb3ba8ebaf3d24 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -198,12 +198,12 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
+ static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
+ 				     void __percpu *pptr)
+ {
+-	*(void __percpu **)(l->key + key_size) = pptr;
++	*(void __percpu **)(l->key + roundup(key_size, 8)) = pptr;
+ }
+ 
+ static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
+ {
+-	return *(void __percpu **)(l->key + key_size);
++	return *(void __percpu **)(l->key + roundup(key_size, 8));
+ }
+ 
+ static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
+@@ -2355,7 +2355,7 @@ static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn
+ 	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
+ 	*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3);
+ 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
+-				offsetof(struct htab_elem, key) + map->key_size);
++				offsetof(struct htab_elem, key) + roundup(map->key_size, 8));
+ 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
+ 	*insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
+ 
+diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
+index 0c63bc2cd895a2..56a81df7a9d7c1 100644
+--- a/kernel/bpf/preload/bpf_preload_kern.c
++++ b/kernel/bpf/preload/bpf_preload_kern.c
+@@ -89,4 +89,5 @@ static void __exit fini(void)
+ }
+ late_initcall(load);
+ module_exit(fini);
++MODULE_IMPORT_NS("BPF_INTERNAL");
+ MODULE_LICENSE("GPL");
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 696e5a2cbea2e8..977c0845775652 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1457,7 +1457,7 @@ struct bpf_map *bpf_map_get(u32 ufd)
+ 
+ 	return map;
+ }
+-EXPORT_SYMBOL(bpf_map_get);
++EXPORT_SYMBOL_NS(bpf_map_get, BPF_INTERNAL);
+ 
+ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
+ {
+@@ -3223,7 +3223,7 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd)
+ 	bpf_link_inc(link);
+ 	return link;
+ }
+-EXPORT_SYMBOL(bpf_link_get_from_fd);
++EXPORT_SYMBOL_NS(bpf_link_get_from_fd, BPF_INTERNAL);
+ 
+ static void bpf_tracing_link_release(struct bpf_link *link)
+ {
+@@ -5853,7 +5853,7 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
+ 		return ____bpf_sys_bpf(cmd, attr, size);
+ 	}
+ }
+-EXPORT_SYMBOL(kern_sys_bpf);
++EXPORT_SYMBOL_NS(kern_sys_bpf, BPF_INTERNAL);
+ 
+ static const struct bpf_func_proto bpf_sys_bpf_proto = {
+ 	.func		= bpf_sys_bpf,
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index d2ef289993f20d..8656208aa4bbb6 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -22289,6 +22289,33 @@ BTF_ID(func, __rcu_read_unlock)
+ #endif
+ BTF_SET_END(btf_id_deny)
+ 
++/* fexit and fmod_ret can't be used to attach to __noreturn functions.
++ * Currently, we must manually list all __noreturn functions here. Once a more
++ * robust solution is implemented, this workaround can be removed.
++ */
++BTF_SET_START(noreturn_deny)
++#ifdef CONFIG_IA32_EMULATION
++BTF_ID(func, __ia32_sys_exit)
++BTF_ID(func, __ia32_sys_exit_group)
++#endif
++#ifdef CONFIG_KUNIT
++BTF_ID(func, __kunit_abort)
++BTF_ID(func, kunit_try_catch_throw)
++#endif
++#ifdef CONFIG_MODULES
++BTF_ID(func, __module_put_and_kthread_exit)
++#endif
++#ifdef CONFIG_X86_64
++BTF_ID(func, __x64_sys_exit)
++BTF_ID(func, __x64_sys_exit_group)
++#endif
++BTF_ID(func, do_exit)
++BTF_ID(func, do_group_exit)
++BTF_ID(func, kthread_complete_and_exit)
++BTF_ID(func, kthread_exit)
++BTF_ID(func, make_task_dead)
++BTF_SET_END(noreturn_deny)
++
+ static bool can_be_sleepable(struct bpf_prog *prog)
+ {
+ 	if (prog->type == BPF_PROG_TYPE_TRACING) {
+@@ -22377,6 +22404,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
+ 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
+ 		return -EINVAL;
++	} else if ((prog->expected_attach_type == BPF_TRACE_FEXIT ||
++		   prog->expected_attach_type == BPF_MODIFY_RETURN) &&
++		   btf_id_set_contains(&noreturn_deny, btf_id)) {
++		verbose(env, "Attaching fexit/fmod_ret to __noreturn functions is rejected.\n");
++		return -EINVAL;
+ 	}
+ 
+ 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 4378f3eff25d25..e63d6f3b004702 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2339,9 +2339,37 @@ static struct file_system_type cgroup2_fs_type = {
+ };
+ 
+ #ifdef CONFIG_CPUSETS_V1
++enum cpuset_param {
++	Opt_cpuset_v2_mode,
++};
++
++static const struct fs_parameter_spec cpuset_fs_parameters[] = {
++	fsparam_flag  ("cpuset_v2_mode", Opt_cpuset_v2_mode),
++	{}
++};
++
++static int cpuset_parse_param(struct fs_context *fc, struct fs_parameter *param)
++{
++	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
++	struct fs_parse_result result;
++	int opt;
++
++	opt = fs_parse(fc, cpuset_fs_parameters, param, &result);
++	if (opt < 0)
++		return opt;
++
++	switch (opt) {
++	case Opt_cpuset_v2_mode:
++		ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
++		return 0;
++	}
++	return -EINVAL;
++}
++
+ static const struct fs_context_operations cpuset_fs_context_ops = {
+ 	.get_tree	= cgroup1_get_tree,
+ 	.free		= cgroup_fs_context_free,
++	.parse_param	= cpuset_parse_param,
+ };
+ 
+ /*
+@@ -2378,6 +2406,7 @@ static int cpuset_init_fs_context(struct fs_context *fc)
+ static struct file_system_type cpuset_fs_type = {
+ 	.name			= "cpuset",
+ 	.init_fs_context	= cpuset_init_fs_context,
++	.parameters		= cpuset_fs_parameters,
+ 	.fs_flags		= FS_USERNS_MOUNT,
+ };
+ #endif
+diff --git a/kernel/cgroup/cpuset-internal.h b/kernel/cgroup/cpuset-internal.h
+index 976a8bc3ff6031..383963e28ac69c 100644
+--- a/kernel/cgroup/cpuset-internal.h
++++ b/kernel/cgroup/cpuset-internal.h
+@@ -33,6 +33,7 @@ enum prs_errcode {
+ 	PERR_CPUSEMPTY,
+ 	PERR_HKEEPING,
+ 	PERR_ACCESS,
++	PERR_REMOTE,
+ };
+ 
+ /* bits in struct cpuset flags field */
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 839f88ba17f7d3..c709a05023cd99 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -62,6 +62,7 @@ static const char * const perr_strings[] = {
+ 	[PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
+ 	[PERR_HKEEPING]  = "partition config conflicts with housekeeping setup",
+ 	[PERR_ACCESS]    = "Enable partition not permitted",
++	[PERR_REMOTE]    = "Have remote partition underneath",
+ };
+ 
+ /*
+@@ -2824,6 +2825,19 @@ static int update_prstate(struct cpuset *cs, int new_prs)
+ 			goto out;
+ 		}
+ 
++		/*
++		 * We don't support the creation of a new local partition with
++		 * a remote partition underneath it. This unsupported
++		 * setting can happen only if parent is the top_cpuset because
++		 * a remote partition cannot be created underneath an existing
++		 * local or remote partition.
++		 */
++		if ((parent == &top_cpuset) &&
++		    cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
++			err = PERR_REMOTE;
++			goto out;
++		}
++
+ 		/*
+ 		 * If parent is valid partition, enable local partiion.
+ 		 * Otherwise, enable a remote partition.
+diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
+index 055da410ac71d6..8df0dfaaca18ee 100644
+--- a/kernel/dma/contiguous.c
++++ b/kernel/dma/contiguous.c
+@@ -64,8 +64,7 @@ struct cma *dma_contiguous_default_area;
+  * Users, who want to set the size of global CMA area for their system
+  * should use cma= kernel parameter.
+  */
+-static const phys_addr_t size_bytes __initconst =
+-	(phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
++#define size_bytes ((phys_addr_t)CMA_SIZE_MBYTES * SZ_1M)
+ static phys_addr_t  size_cmdline __initdata = -1;
+ static phys_addr_t base_cmdline __initdata;
+ static phys_addr_t limit_cmdline __initdata;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 97af53c43608e4..edafe9fc4bdd06 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -13661,6 +13661,9 @@ inherit_event(struct perf_event *parent_event,
+ 	if (IS_ERR(child_event))
+ 		return child_event;
+ 
++	get_ctx(child_ctx);
++	child_event->ctx = child_ctx;
++
+ 	pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
+ 	if (IS_ERR(pmu_ctx)) {
+ 		free_event(child_event);
+@@ -13683,8 +13686,6 @@ inherit_event(struct perf_event *parent_event,
+ 		return NULL;
+ 	}
+ 
+-	get_ctx(child_ctx);
+-
+ 	/*
+ 	 * Make the child state follow the state of the parent event,
+ 	 * not its attr.disabled bit.  We hold the parent's mutex,
+@@ -13705,7 +13706,6 @@ inherit_event(struct perf_event *parent_event,
+ 		local64_set(&hwc->period_left, sample_period);
+ 	}
+ 
+-	child_event->ctx = child_ctx;
+ 	child_event->overflow_handler = parent_event->overflow_handler;
+ 	child_event->overflow_handler_context
+ 		= parent_event->overflow_handler_context;
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index 396a067a8a56b5..7682c36cbccc63 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -1143,7 +1143,7 @@ static bool msi_check_reservation_mode(struct irq_domain *domain,
+ 	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
+ 		return false;
+ 
+-	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
++	if (info->flags & MSI_FLAG_NO_MASK)
+ 		return false;
+ 
+ 	/*
+diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
+index 7c6588148d42d3..0c746a150e34e2 100644
+--- a/kernel/module/Kconfig
++++ b/kernel/module/Kconfig
+@@ -231,6 +231,7 @@ comment "Do not forget to sign required modules with scripts/sign-file"
+ choice
+ 	prompt "Hash algorithm to sign modules"
+ 	depends on MODULE_SIG || IMA_APPRAISE_MODSIG
++	default MODULE_SIG_SHA512
+ 	help
+ 	  This determines which sort of hashing algorithm will be used during
+ 	  signature generation.  This algorithm _must_ be built into the kernel
+diff --git a/kernel/panic.c b/kernel/panic.c
+index fbc59b3b64d0b5..ddad0578355bb5 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -832,9 +832,15 @@ device_initcall(register_warn_debugfs);
+  */
+ __visible noinstr void __stack_chk_fail(void)
+ {
++	unsigned long flags;
++
+ 	instrumentation_begin();
++	flags = user_access_save();
++
+ 	panic("stack-protector: Kernel stack is corrupted in: %pB",
+ 		__builtin_return_address(0));
++
++	user_access_restore(flags);
+ 	instrumentation_end();
+ }
+ EXPORT_SYMBOL(__stack_chk_fail);
+diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
+index 927cc55ba0b3d1..4e1778071d704e 100644
+--- a/kernel/power/energy_model.c
++++ b/kernel/power/energy_model.c
+@@ -161,22 +161,10 @@ static void em_debug_create_pd(struct device *dev) {}
+ static void em_debug_remove_pd(struct device *dev) {}
+ #endif
+ 
+-static void em_destroy_table_rcu(struct rcu_head *rp)
+-{
+-	struct em_perf_table __rcu *table;
+-
+-	table = container_of(rp, struct em_perf_table, rcu);
+-	kfree(table);
+-}
+-
+ static void em_release_table_kref(struct kref *kref)
+ {
+-	struct em_perf_table __rcu *table;
+-
+ 	/* It was the last owner of this table so we can free */
+-	table = container_of(kref, struct em_perf_table, kref);
+-
+-	call_rcu(&table->rcu, em_destroy_table_rcu);
++	kfree_rcu(container_of(kref, struct em_perf_table, kref), rcu);
+ }
+ 
+ /**
+@@ -185,7 +173,7 @@ static void em_release_table_kref(struct kref *kref)
+  *
+  * No return values.
+  */
+-void em_table_free(struct em_perf_table __rcu *table)
++void em_table_free(struct em_perf_table *table)
+ {
+ 	kref_put(&table->kref, em_release_table_kref);
+ }
+@@ -198,9 +186,9 @@ void em_table_free(struct em_perf_table __rcu *table)
+  * has a user.
+  * Returns allocated table or NULL.
+  */
+-struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
++struct em_perf_table *em_table_alloc(struct em_perf_domain *pd)
+ {
+-	struct em_perf_table __rcu *table;
++	struct em_perf_table *table;
+ 	int table_size;
+ 
+ 	table_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
+@@ -308,9 +296,9 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+  * Return 0 on success or an error code on failure.
+  */
+ int em_dev_update_perf_domain(struct device *dev,
+-			      struct em_perf_table __rcu *new_table)
++			      struct em_perf_table *new_table)
+ {
+-	struct em_perf_table __rcu *old_table;
++	struct em_perf_table *old_table;
+ 	struct em_perf_domain *pd;
+ 
+ 	if (!dev)
+@@ -327,7 +315,8 @@ int em_dev_update_perf_domain(struct device *dev,
+ 
+ 	kref_get(&new_table->kref);
+ 
+-	old_table = pd->em_table;
++	old_table = rcu_dereference_protected(pd->em_table,
++					      lockdep_is_held(&em_pd_mutex));
+ 	rcu_assign_pointer(pd->em_table, new_table);
+ 
+ 	em_cpufreq_update_efficiencies(dev, new_table->state);
+@@ -399,7 +388,7 @@ static int em_create_pd(struct device *dev, int nr_states,
+ 			struct em_data_callback *cb, cpumask_t *cpus,
+ 			unsigned long flags)
+ {
+-	struct em_perf_table __rcu *em_table;
++	struct em_perf_table *em_table;
+ 	struct em_perf_domain *pd;
+ 	struct device *cpu_dev;
+ 	int cpu, ret, num_cpus;
+@@ -559,6 +548,7 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ 				struct em_data_callback *cb, cpumask_t *cpus,
+ 				bool microwatts)
+ {
++	struct em_perf_table *em_table;
+ 	unsigned long cap, prev_cap = 0;
+ 	unsigned long flags = 0;
+ 	int cpu, ret;
+@@ -629,7 +619,9 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ 
+ 	dev->em_pd->flags |= flags;
+ 
+-	em_cpufreq_update_efficiencies(dev, dev->em_pd->em_table->state);
++	em_table = rcu_dereference_protected(dev->em_pd->em_table,
++					     lockdep_is_held(&em_pd_mutex));
++	em_cpufreq_update_efficiencies(dev, em_table->state);
+ 
+ 	em_debug_create_pd(dev);
+ 	dev_info(dev, "EM: created perf domain\n");
+@@ -666,7 +658,8 @@ void em_dev_unregister_perf_domain(struct device *dev)
+ 	mutex_lock(&em_pd_mutex);
+ 	em_debug_remove_pd(dev);
+ 
+-	em_table_free(dev->em_pd->em_table);
++	em_table_free(rcu_dereference_protected(dev->em_pd->em_table,
++						lockdep_is_held(&em_pd_mutex)));
+ 
+ 	kfree(dev->em_pd);
+ 	dev->em_pd = NULL;
+@@ -674,9 +667,9 @@ void em_dev_unregister_perf_domain(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain);
+ 
+-static struct em_perf_table __rcu *em_table_dup(struct em_perf_domain *pd)
++static struct em_perf_table *em_table_dup(struct em_perf_domain *pd)
+ {
+-	struct em_perf_table __rcu *em_table;
++	struct em_perf_table *em_table;
+ 	struct em_perf_state *ps, *new_ps;
+ 	int ps_size;
+ 
+@@ -698,7 +691,7 @@ static struct em_perf_table __rcu *em_table_dup(struct em_perf_domain *pd)
+ }
+ 
+ static int em_recalc_and_update(struct device *dev, struct em_perf_domain *pd,
+-				struct em_perf_table __rcu *em_table)
++				struct em_perf_table *em_table)
+ {
+ 	int ret;
+ 
+@@ -729,7 +722,7 @@ static void em_adjust_new_capacity(struct device *dev,
+ 				   struct em_perf_domain *pd,
+ 				   u64 max_cap)
+ {
+-	struct em_perf_table __rcu *em_table;
++	struct em_perf_table *em_table;
+ 
+ 	em_table = em_table_dup(pd);
+ 	if (!em_table) {
+@@ -820,7 +813,7 @@ static void em_update_workfn(struct work_struct *work)
+  */
+ int em_dev_update_chip_binning(struct device *dev)
+ {
+-	struct em_perf_table __rcu *em_table;
++	struct em_perf_table *em_table;
+ 	struct em_perf_domain *pd;
+ 	int i, ret;
+ 
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index fcf968490308b9..7ed25654820fd4 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4530,7 +4530,7 @@ static void scx_ops_bypass(bool bypass)
+ 
+ static void free_exit_info(struct scx_exit_info *ei)
+ {
+-	kfree(ei->dump);
++	kvfree(ei->dump);
+ 	kfree(ei->msg);
+ 	kfree(ei->bt);
+ 	kfree(ei);
+@@ -4546,7 +4546,7 @@ static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
+ 
+ 	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
+ 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
+-	ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
++	ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
+ 
+ 	if (!ei->bt || !ei->msg || !ei->dump) {
+ 		free_exit_info(ei);
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index a47bcf71defcf5..9a3859443c042c 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -509,6 +509,7 @@ void tick_resume(void)
+ 
+ #ifdef CONFIG_SUSPEND
+ static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
++static DEFINE_WAIT_OVERRIDE_MAP(tick_freeze_map, LD_WAIT_SLEEP);
+ static unsigned int tick_freeze_depth;
+ 
+ /**
+@@ -528,9 +529,22 @@ void tick_freeze(void)
+ 	if (tick_freeze_depth == num_online_cpus()) {
+ 		trace_suspend_resume(TPS("timekeeping_freeze"),
+ 				     smp_processor_id(), true);
++		/*
++		 * All other CPUs have their interrupts disabled and are
++		 * suspended to idle. Other tasks have been frozen so there
++		 * is no scheduling happening. This means that there is no
++		 * concurrency in the system at this point. Therefore it is
++		 * okay to acquire a sleeping lock on PREEMPT_RT, such as a
++		 * spinlock, because the lock cannot be held by other CPUs
++		 * or threads and acquiring it cannot block.
++		 *
++		 * Inform lockdep about the situation.
++		 */
++		lock_map_acquire_try(&tick_freeze_map);
+ 		system_state = SYSTEM_SUSPEND;
+ 		sched_clock_suspend();
+ 		timekeeping_suspend();
++		lock_map_release(&tick_freeze_map);
+ 	} else {
+ 		tick_suspend_local();
+ 	}
+@@ -552,8 +566,16 @@ void tick_unfreeze(void)
+ 	raw_spin_lock(&tick_freeze_lock);
+ 
+ 	if (tick_freeze_depth == num_online_cpus()) {
++		/*
++		 * Similar to tick_freeze(). On resumption the first CPU may
++		 * acquire uncontended sleeping locks while other CPUs block on
++		 * tick_freeze_lock.
++		 */
++		lock_map_acquire_try(&tick_freeze_map);
+ 		timekeeping_resume();
+ 		sched_clock_resume();
++		lock_map_release(&tick_freeze_map);
++
+ 		system_state = SYSTEM_RUNNING;
+ 		trace_suspend_resume(TPS("timekeeping_freeze"),
+ 				     smp_processor_id(), false);
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 55f279ddfd63d5..e5c063fc8ef97a 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -403,7 +403,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
+ 	.arg2_type	= ARG_CONST_SIZE,
+ };
+ 
+-static void __set_printk_clr_event(void)
++static void __set_printk_clr_event(struct work_struct *work)
+ {
+ 	/*
+ 	 * This program might be calling bpf_trace_printk,
+@@ -416,10 +416,11 @@ static void __set_printk_clr_event(void)
+ 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
+ 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
+ }
++static DECLARE_WORK(set_printk_work, __set_printk_clr_event);
+ 
+ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+ {
+-	__set_printk_clr_event();
++	schedule_work(&set_printk_work);
+ 	return &bpf_trace_printk_proto;
+ }
+ 
+@@ -462,7 +463,7 @@ static const struct bpf_func_proto bpf_trace_vprintk_proto = {
+ 
+ const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
+ {
+-	__set_printk_clr_event();
++	schedule_work(&set_printk_work);
+ 	return &bpf_trace_vprintk_proto;
+ }
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 11dea25ef880a5..15fb255733fb63 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -470,6 +470,7 @@ static void test_event_printk(struct trace_event_call *call)
+ 			case '%':
+ 				continue;
+ 			case 'p':
++ do_pointer:
+ 				/* Find dereferencing fields */
+ 				switch (fmt[i + 1]) {
+ 				case 'B': case 'R': case 'r':
+@@ -498,6 +499,12 @@ static void test_event_printk(struct trace_event_call *call)
+ 						continue;
+ 					if (fmt[i + j] == '*') {
+ 						star = true;
++						/* Handle %*pbl case */
++						if (!j && fmt[i + 1] == 'p') {
++							arg++;
++							i++;
++							goto do_pointer;
++						}
+ 						continue;
+ 					}
+ 					if ((fmt[i + j] == 's')) {
+diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
+index 1d4aa7a83b3a55..37655f58b8554e 100644
+--- a/lib/Kconfig.ubsan
++++ b/lib/Kconfig.ubsan
+@@ -118,7 +118,6 @@ config UBSAN_UNREACHABLE
+ 
+ config UBSAN_SIGNED_WRAP
+ 	bool "Perform checking for signed arithmetic wrap-around"
+-	default UBSAN
+ 	depends on !COMPILE_TEST
+ 	# The no_sanitize attribute was introduced in GCC with version 8.
+ 	depends on !CC_IS_GCC || GCC_VERSION >= 80000
+diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
+index b01253cac70a74..b09e78da959ac3 100644
+--- a/lib/crypto/Kconfig
++++ b/lib/crypto/Kconfig
+@@ -42,7 +42,7 @@ config CRYPTO_LIB_BLAKE2S_GENERIC
+ 	  of CRYPTO_LIB_BLAKE2S.
+ 
+ config CRYPTO_ARCH_HAVE_LIB_CHACHA
+-	tristate
++	bool
+ 	help
+ 	  Declares whether the architecture provides an arch-specific
+ 	  accelerated implementation of the ChaCha library interface,
+@@ -58,17 +58,21 @@ config CRYPTO_LIB_CHACHA_GENERIC
+ 	  implementation is enabled, this implementation serves the users
+ 	  of CRYPTO_LIB_CHACHA.
+ 
++config CRYPTO_LIB_CHACHA_INTERNAL
++	tristate
++	select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
++
+ config CRYPTO_LIB_CHACHA
+ 	tristate "ChaCha library interface"
+-	depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+-	select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
++	select CRYPTO
++	select CRYPTO_LIB_CHACHA_INTERNAL
+ 	help
+ 	  Enable the ChaCha library interface. This interface may be fulfilled
+ 	  by either the generic implementation or an arch-specific one, if one
+ 	  is available and enabled.
+ 
+ config CRYPTO_ARCH_HAVE_LIB_CURVE25519
+-	tristate
++	bool
+ 	help
+ 	  Declares whether the architecture provides an arch-specific
+ 	  accelerated implementation of the Curve25519 library interface,
+@@ -76,6 +80,7 @@ config CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ 
+ config CRYPTO_LIB_CURVE25519_GENERIC
+ 	tristate
++	select CRYPTO_LIB_UTILS
+ 	help
+ 	  This symbol can be depended upon by arch implementations of the
+ 	  Curve25519 library interface that require the generic code as a
+@@ -83,11 +88,14 @@ config CRYPTO_LIB_CURVE25519_GENERIC
+ 	  implementation is enabled, this implementation serves the users
+ 	  of CRYPTO_LIB_CURVE25519.
+ 
++config CRYPTO_LIB_CURVE25519_INTERNAL
++	tristate
++	select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
++
+ config CRYPTO_LIB_CURVE25519
+ 	tristate "Curve25519 scalar multiplication library"
+-	depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
+-	select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
+-	select CRYPTO_LIB_UTILS
++	select CRYPTO
++	select CRYPTO_LIB_CURVE25519_INTERNAL
+ 	help
+ 	  Enable the Curve25519 library interface. This interface may be
+ 	  fulfilled by either the generic implementation or an arch-specific
+@@ -104,7 +112,7 @@ config CRYPTO_LIB_POLY1305_RSIZE
+ 	default 1
+ 
+ config CRYPTO_ARCH_HAVE_LIB_POLY1305
+-	tristate
++	bool
+ 	help
+ 	  Declares whether the architecture provides an arch-specific
+ 	  accelerated implementation of the Poly1305 library interface,
+@@ -119,10 +127,14 @@ config CRYPTO_LIB_POLY1305_GENERIC
+ 	  implementation is enabled, this implementation serves the users
+ 	  of CRYPTO_LIB_POLY1305.
+ 
++config CRYPTO_LIB_POLY1305_INTERNAL
++	tristate
++	select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
++
+ config CRYPTO_LIB_POLY1305
+ 	tristate "Poly1305 library interface"
+-	depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+-	select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
++	select CRYPTO
++	select CRYPTO_LIB_POLY1305_INTERNAL
+ 	help
+ 	  Enable the Poly1305 library interface. This interface may be fulfilled
+ 	  by either the generic implementation or an arch-specific one, if one
+@@ -130,11 +142,10 @@ config CRYPTO_LIB_POLY1305
+ 
+ config CRYPTO_LIB_CHACHA20POLY1305
+ 	tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
+-	depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+-	depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+-	depends on CRYPTO
++	select CRYPTO
+ 	select CRYPTO_LIB_CHACHA
+ 	select CRYPTO_LIB_POLY1305
++	select CRYPTO_LIB_UTILS
+ 	select CRYPTO_ALGAPI
+ 
+ config CRYPTO_LIB_SHA1
+diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
+index 5d7b10e9861070..63b7566e78639e 100644
+--- a/lib/test_ubsan.c
++++ b/lib/test_ubsan.c
+@@ -68,18 +68,22 @@ static void test_ubsan_shift_out_of_bounds(void)
+ 
+ static void test_ubsan_out_of_bounds(void)
+ {
+-	volatile int i = 4, j = 5, k = -1;
+-	volatile char above[4] = { }; /* Protect surrounding memory. */
+-	volatile int arr[4];
+-	volatile char below[4] = { }; /* Protect surrounding memory. */
++	int i = 4, j = 4, k = -1;
++	volatile struct {
++		char above[4]; /* Protect surrounding memory. */
++		int arr[4];
++		char below[4]; /* Protect surrounding memory. */
++	} data;
+ 
+-	above[0] = below[0];
++	OPTIMIZER_HIDE_VAR(i);
++	OPTIMIZER_HIDE_VAR(j);
++	OPTIMIZER_HIDE_VAR(k);
+ 
+ 	UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above");
+-	arr[j] = i;
++	data.arr[j] = i;
+ 
+ 	UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below");
+-	arr[k] = i;
++	data.arr[k] = i;
+ }
+ 
+ enum ubsan_test_enum {
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 39b3c7f35ea85c..0eb5d510d4f6b6 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1079,6 +1079,13 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
+ 		if (!folio_trylock(folio))
+ 			goto keep;
+ 
++		if (folio_contain_hwpoisoned_page(folio)) {
++			unmap_poisoned_folio(folio, folio_pfn(folio), false);
++			folio_unlock(folio);
++			folio_put(folio);
++			continue;
++		}
++
+ 		VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
+ 
+ 		nr_pages = folio_nr_pages(folio);
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 09f8ced9f8bb7f..52a5497cfca794 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1548,7 +1548,8 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ 	struct p9_client *clnt = fid->clnt;
+ 	struct p9_req_t *req;
+ 	int count = iov_iter_count(to);
+-	int rsize, received, non_zc = 0;
++	u32 rsize, received;
++	bool non_zc = false;
+ 	char *dataptr;
+ 
+ 	*err = 0;
+@@ -1571,7 +1572,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ 				       0, 11, "dqd", fid->fid,
+ 				       offset, rsize);
+ 	} else {
+-		non_zc = 1;
++		non_zc = true;
+ 		req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
+ 				    rsize);
+ 	}
+@@ -1592,11 +1593,11 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ 		return 0;
+ 	}
+ 	if (rsize < received) {
+-		pr_err("bogus RREAD count (%d > %d)\n", received, rsize);
++		pr_err("bogus RREAD count (%u > %u)\n", received, rsize);
+ 		received = rsize;
+ 	}
+ 
+-	p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received);
++	p9_debug(P9_DEBUG_9P, "<<< RREAD count %u\n", received);
+ 
+ 	if (non_zc) {
+ 		int n = copy_to_iter(dataptr, received, to);
+@@ -1623,9 +1624,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 	*err = 0;
+ 
+ 	while (iov_iter_count(from)) {
+-		int count = iov_iter_count(from);
+-		int rsize = fid->iounit;
+-		int written;
++		size_t count = iov_iter_count(from);
++		u32 rsize = fid->iounit;
++		u32 written;
+ 
+ 		if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
+ 			rsize = clnt->msize - P9_IOHDRSZ;
+@@ -1633,7 +1634,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 		if (count < rsize)
+ 			rsize = count;
+ 
+-		p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n",
++		p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %u (/%zu)\n",
+ 			 fid->fid, offset, rsize, count);
+ 
+ 		/* Don't bother zerocopy for small IO (< 1024) */
+@@ -1659,11 +1660,11 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 			break;
+ 		}
+ 		if (rsize < written) {
+-			pr_err("bogus RWRITE count (%d > %d)\n", written, rsize);
++			pr_err("bogus RWRITE count (%u > %u)\n", written, rsize);
+ 			written = rsize;
+ 		}
+ 
+-		p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written);
++		p9_debug(P9_DEBUG_9P, "<<< RWRITE count %u\n", written);
+ 
+ 		p9_req_put(clnt, req);
+ 		iov_iter_revert(from, count - written - iov_iter_count(from));
+@@ -2098,7 +2099,8 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
+ 
+ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
+ {
+-	int err, rsize, non_zc = 0;
++	int err, non_zc = 0;
++	u32 rsize;
+ 	struct p9_client *clnt;
+ 	struct p9_req_t *req;
+ 	char *dataptr;
+@@ -2107,7 +2109,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
+ 
+ 	iov_iter_kvec(&to, ITER_DEST, &kv, 1, count);
+ 
+-	p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
++	p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %u\n",
+ 		 fid->fid, offset, count);
+ 
+ 	clnt = fid->clnt;
+@@ -2142,11 +2144,11 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
+ 		goto free_and_error;
+ 	}
+ 	if (rsize < count) {
+-		pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
++		pr_err("bogus RREADDIR count (%u > %u)\n", count, rsize);
+ 		count = rsize;
+ 	}
+ 
+-	p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
++	p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %u\n", count);
+ 
+ 	if (non_zc)
+ 		memmove(data, dataptr, count);
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 196060dc6138af..791e4868f2d4e1 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -191,12 +191,13 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
+ 
+ 	spin_lock(&m->req_lock);
+ 
+-	if (m->err) {
++	if (READ_ONCE(m->err)) {
+ 		spin_unlock(&m->req_lock);
+ 		return;
+ 	}
+ 
+-	m->err = err;
++	WRITE_ONCE(m->err, err);
++	ASSERT_EXCLUSIVE_WRITER(m->err);
+ 
+ 	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
+ 		list_move(&req->req_list, &cancel_list);
+@@ -283,7 +284,7 @@ static void p9_read_work(struct work_struct *work)
+ 
+ 	m = container_of(work, struct p9_conn, rq);
+ 
+-	if (m->err < 0)
++	if (READ_ONCE(m->err) < 0)
+ 		return;
+ 
+ 	p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
+@@ -450,7 +451,7 @@ static void p9_write_work(struct work_struct *work)
+ 
+ 	m = container_of(work, struct p9_conn, wq);
+ 
+-	if (m->err < 0) {
++	if (READ_ONCE(m->err) < 0) {
+ 		clear_bit(Wworksched, &m->wsched);
+ 		return;
+ 	}
+@@ -622,7 +623,7 @@ static void p9_poll_mux(struct p9_conn *m)
+ 	__poll_t n;
+ 	int err = -ECONNRESET;
+ 
+-	if (m->err < 0)
++	if (READ_ONCE(m->err) < 0)
+ 		return;
+ 
+ 	n = p9_fd_poll(m->client, NULL, &err);
+@@ -665,6 +666,7 @@ static void p9_poll_mux(struct p9_conn *m)
+ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
+ {
+ 	__poll_t n;
++	int err;
+ 	struct p9_trans_fd *ts = client->trans;
+ 	struct p9_conn *m = &ts->conn;
+ 
+@@ -673,9 +675,10 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
+ 
+ 	spin_lock(&m->req_lock);
+ 
+-	if (m->err < 0) {
++	err = READ_ONCE(m->err);
++	if (err < 0) {
+ 		spin_unlock(&m->req_lock);
+-		return m->err;
++		return err;
+ 	}
+ 
+ 	WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
+diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
+index 4417a18b3e951a..f63586c9ce0216 100644
+--- a/net/core/lwtunnel.c
++++ b/net/core/lwtunnel.c
+@@ -332,6 +332,8 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	struct dst_entry *dst;
+ 	int ret;
+ 
++	local_bh_disable();
++
+ 	if (dev_xmit_recursion()) {
+ 		net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ 				     __func__);
+@@ -347,8 +349,10 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	lwtstate = dst->lwtstate;
+ 
+ 	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+-	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+-		return 0;
++	    lwtstate->type > LWTUNNEL_ENCAP_MAX) {
++		ret = 0;
++		goto out;
++	}
+ 
+ 	ret = -EOPNOTSUPP;
+ 	rcu_read_lock();
+@@ -363,11 +367,13 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	if (ret == -EOPNOTSUPP)
+ 		goto drop;
+ 
+-	return ret;
++	goto out;
+ 
+ drop:
+ 	kfree_skb(skb);
+ 
++out:
++	local_bh_enable();
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(lwtunnel_output);
+@@ -379,6 +385,8 @@ int lwtunnel_xmit(struct sk_buff *skb)
+ 	struct dst_entry *dst;
+ 	int ret;
+ 
++	local_bh_disable();
++
+ 	if (dev_xmit_recursion()) {
+ 		net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ 				     __func__);
+@@ -395,8 +403,10 @@ int lwtunnel_xmit(struct sk_buff *skb)
+ 	lwtstate = dst->lwtstate;
+ 
+ 	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+-	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+-		return 0;
++	    lwtstate->type > LWTUNNEL_ENCAP_MAX) {
++		ret = 0;
++		goto out;
++	}
+ 
+ 	ret = -EOPNOTSUPP;
+ 	rcu_read_lock();
+@@ -411,11 +421,13 @@ int lwtunnel_xmit(struct sk_buff *skb)
+ 	if (ret == -EOPNOTSUPP)
+ 		goto drop;
+ 
+-	return ret;
++	goto out;
+ 
+ drop:
+ 	kfree_skb(skb);
+ 
++out:
++	local_bh_enable();
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(lwtunnel_xmit);
+@@ -427,6 +439,8 @@ int lwtunnel_input(struct sk_buff *skb)
+ 	struct dst_entry *dst;
+ 	int ret;
+ 
++	DEBUG_NET_WARN_ON_ONCE(!in_softirq());
++
+ 	if (dev_xmit_recursion()) {
+ 		net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
+ 				     __func__);
+diff --git a/net/core/selftests.c b/net/core/selftests.c
+index 8f801e6e3b91b4..561653f9d71d44 100644
+--- a/net/core/selftests.c
++++ b/net/core/selftests.c
+@@ -100,10 +100,10 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev,
+ 	ehdr->h_proto = htons(ETH_P_IP);
+ 
+ 	if (attr->tcp) {
++		memset(thdr, 0, sizeof(*thdr));
+ 		thdr->source = htons(attr->sport);
+ 		thdr->dest = htons(attr->dport);
+ 		thdr->doff = sizeof(struct tcphdr) / 4;
+-		thdr->check = 0;
+ 	} else {
+ 		uhdr->source = htons(attr->sport);
+ 		uhdr->dest = htons(attr->dport);
+@@ -144,10 +144,18 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev,
+ 	attr->id = net_test_next_id;
+ 	shdr->id = net_test_next_id++;
+ 
+-	if (attr->size)
+-		skb_put(skb, attr->size);
+-	if (attr->max_size && attr->max_size > skb->len)
+-		skb_put(skb, attr->max_size - skb->len);
++	if (attr->size) {
++		void *payload = skb_put(skb, attr->size);
++
++		memset(payload, 0, attr->size);
++	}
++
++	if (attr->max_size && attr->max_size > skb->len) {
++		size_t pad_len = attr->max_size - skb->len;
++		void *pad = skb_put(skb, pad_len);
++
++		memset(pad, 0, pad_len);
++	}
+ 
+ 	skb->csum = 0;
+ 	skb->ip_summed = CHECKSUM_PARTIAL;
+diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
+index 09fff5d424efc2..d25d717c121f0c 100644
+--- a/net/ipv4/netfilter/nft_fib_ipv4.c
++++ b/net/ipv4/netfilter/nft_fib_ipv4.c
+@@ -70,6 +70,11 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 	const struct net_device *oif;
+ 	const struct net_device *found;
+ 
++	if (nft_fib_can_skip(pkt)) {
++		nft_fib_store_result(dest, priv, nft_in(pkt));
++		return;
++	}
++
+ 	/*
+ 	 * Do not set flowi4_oif, it restricts results (for example, asking
+ 	 * for oif 3 will get RTN_UNICAST result even if the daddr exits
+@@ -84,12 +89,6 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 	else
+ 		oif = NULL;
+ 
+-	if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+-	    nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+-		nft_fib_store_result(dest, priv, nft_in(pkt));
+-		return;
+-	}
+-
+ 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
+ 	if (!iph) {
+ 		regs->verdict.code = NFT_BREAK;
+diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
+index c9f1634b3838ae..7fd9d7b21cd42d 100644
+--- a/net/ipv6/netfilter/nft_fib_ipv6.c
++++ b/net/ipv6/netfilter/nft_fib_ipv6.c
+@@ -170,6 +170,11 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 	struct rt6_info *rt;
+ 	int lookup_flags;
+ 
++	if (nft_fib_can_skip(pkt)) {
++		nft_fib_store_result(dest, priv, nft_in(pkt));
++		return;
++	}
++
+ 	if (priv->flags & NFTA_FIB_F_IIF)
+ 		oif = nft_in(pkt);
+ 	else if (priv->flags & NFTA_FIB_F_OIF)
+@@ -181,17 +186,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 		return;
+ 	}
+ 
+-	lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
+-
+-	if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
+-	    nft_hook(pkt) == NF_INET_INGRESS) {
+-		if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
+-		    nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
+-			nft_fib_store_result(dest, priv, nft_in(pkt));
+-			return;
+-		}
++	if (nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
++		nft_fib_store_result(dest, priv, nft_in(pkt));
++		return;
+ 	}
+ 
++	lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
++
+ 	*dest = 0;
+ 	rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb,
+ 				      lookup_flags);
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index c287bf8423b47b..5bb4ab9941d6e9 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -958,6 +958,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 
+ 	if (cl != NULL) {
+ 		int old_flags;
++		int len = 0;
+ 
+ 		if (parentid) {
+ 			if (cl->cl_parent &&
+@@ -988,9 +989,13 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 		if (usc != NULL)
+ 			hfsc_change_usc(cl, usc, cur_time);
+ 
++		if (cl->qdisc->q.qlen != 0)
++			len = qdisc_peek_len(cl->qdisc);
++		/* Check queue length again since some qdisc implementations
++		 * (e.g., netem/codel) might empty the queue during the peek
++		 * operation.
++		 */
+ 		if (cl->qdisc->q.qlen != 0) {
+-			int len = qdisc_peek_len(cl->qdisc);
+-
+ 			if (cl->cl_flags & HFSC_RSC) {
+ 				if (old_flags & HFSC_RSC)
+ 					update_ed(cl, len);
+@@ -1632,10 +1637,16 @@ hfsc_dequeue(struct Qdisc *sch)
+ 		if (cl->qdisc->q.qlen != 0) {
+ 			/* update ed */
+ 			next_len = qdisc_peek_len(cl->qdisc);
+-			if (realtime)
+-				update_ed(cl, next_len);
+-			else
+-				update_d(cl, next_len);
++			/* Check queue length again since some qdisc implementations
++			 * (e.g., netem/codel) might empty the queue during the peek
++			 * operation.
++			 */
++			if (cl->qdisc->q.qlen != 0) {
++				if (realtime)
++					update_ed(cl, next_len);
++				else
++					update_d(cl, next_len);
++			}
+ 		} else {
+ 			/* the class becomes passive */
+ 			eltree_remove(cl);
+diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
+index e2f19627e43d52..b45c5b91bc7afb 100644
+--- a/net/tipc/monitor.c
++++ b/net/tipc/monitor.c
+@@ -716,7 +716,8 @@ void tipc_mon_reinit_self(struct net *net)
+ 		if (!mon)
+ 			continue;
+ 		write_lock_bh(&mon->lock);
+-		mon->self->addr = tipc_own_addr(net);
++		if (mon->self)
++			mon->self->addr = tipc_own_addr(net);
+ 		write_unlock_bh(&mon->lock);
+ 	}
+ }
+diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
+index c5162fdc95ff05..74c61bd61fbc8a 100644
+--- a/rust/kernel/firmware.rs
++++ b/rust/kernel/firmware.rs
+@@ -4,7 +4,7 @@
+ //!
+ //! C header: [`include/linux/firmware.h`](srctree/include/linux/firmware.h)
+ 
+-use crate::{bindings, device::Device, error::Error, error::Result, str::CStr};
++use crate::{bindings, device::Device, error::Error, error::Result, ffi, str::CStr};
+ use core::ptr::NonNull;
+ 
+ /// # Invariants
+@@ -12,7 +12,11 @@
+ /// One of the following: `bindings::request_firmware`, `bindings::firmware_request_nowarn`,
+ /// `bindings::firmware_request_platform`, `bindings::request_firmware_direct`.
+ struct FwFunc(
+-    unsafe extern "C" fn(*mut *const bindings::firmware, *const u8, *mut bindings::device) -> i32,
++    unsafe extern "C" fn(
++        *mut *const bindings::firmware,
++        *const ffi::c_char,
++        *mut bindings::device,
++    ) -> i32,
+ );
+ 
+ impl FwFunc {
+diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
+index 55f9a3da92d5f9..1a05fc15335315 100644
+--- a/samples/trace_events/trace-events-sample.h
++++ b/samples/trace_events/trace-events-sample.h
+@@ -319,7 +319,8 @@ TRACE_EVENT(foo_bar,
+ 		__assign_cpumask(cpum, cpumask_bits(mask));
+ 	),
+ 
+-	TP_printk("foo %s %d %s %s %s %s %s (%s) (%s) %s", __entry->foo, __entry->bar,
++	TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s [%d] %*pbl",
++		  __entry->foo, __entry->bar,
+ 
+ /*
+  * Notice here the use of some helper functions. This includes:
+@@ -363,9 +364,17 @@ TRACE_EVENT(foo_bar,
+ 		  __print_array(__get_dynamic_array(list),
+ 				__get_dynamic_array_len(list) / sizeof(int),
+ 				sizeof(int)),
++
++/*     A shortcut is to use __print_dynamic_array for dynamic arrays */
++
++		  __print_dynamic_array(list, sizeof(int)),
++
+ 		  __get_str(str), __get_str(lstr),
+ 		  __get_bitmask(cpus), __get_cpumask(cpum),
+-		  __get_str(vstr))
++		  __get_str(vstr),
++		  __get_dynamic_array_len(cpus),
++		  __get_dynamic_array_len(cpus),
++		  __get_dynamic_array(cpus))
+ );
+ 
+ /*
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index fe5e132fcea89a..85e41c68c2c508 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -257,7 +257,7 @@ objtool-args-$(CONFIG_MITIGATION_SLS)			+= --sls
+ objtool-args-$(CONFIG_STACK_VALIDATION)			+= --stackval
+ objtool-args-$(CONFIG_HAVE_STATIC_CALL_INLINE)		+= --static-call
+ objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION)		+= --uaccess
+-objtool-args-$(CONFIG_GCOV_KERNEL)			+= --no-unreachable
++objtool-args-$(or $(CONFIG_GCOV_KERNEL),$(CONFIG_KCOV))	+= --no-unreachable
+ objtool-args-$(CONFIG_PREFIX_SYMBOLS)			+= --prefix=$(CONFIG_FUNCTION_PADDING_BYTES)
+ 
+ objtool-args = $(objtool-args-y)					\
+diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
+index 1284f05555b97f..0c2494ffcaf873 100644
+--- a/scripts/Makefile.vmlinux
++++ b/scripts/Makefile.vmlinux
+@@ -33,6 +33,10 @@ targets += vmlinux
+ vmlinux: scripts/link-vmlinux.sh vmlinux.o $(KBUILD_LDS) FORCE
+ 	+$(call if_changed_dep,link_vmlinux)
+ 
++ifdef CONFIG_BUILDTIME_TABLE_SORT
++vmlinux: scripts/sorttable
++endif
++
+ # module.builtin.ranges
+ # ---------------------------------------------------------------------------
+ ifdef CONFIG_BUILTIN_MODULE_RANGES
+diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
+index 910852eb9698c1..c7f1b28f3b2302 100644
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -2273,7 +2273,7 @@ static irqreturn_t wcd934x_slim_irq_handler(int irq, void *data)
+ {
+ 	struct wcd934x_codec *wcd = data;
+ 	unsigned long status = 0;
+-	int i, j, port_id;
++	unsigned int i, j, port_id;
+ 	unsigned int val, int_val = 0;
+ 	irqreturn_t ret = IRQ_NONE;
+ 	bool tx;
+diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
+index f501f47242fb0b..1bba48318e2ddf 100644
+--- a/sound/soc/fsl/fsl_asrc_dma.c
++++ b/sound/soc/fsl/fsl_asrc_dma.c
+@@ -156,11 +156,24 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
+ 	for_each_dpcm_be(rtd, stream, dpcm) {
+ 		struct snd_soc_pcm_runtime *be = dpcm->be;
+ 		struct snd_pcm_substream *substream_be;
+-		struct snd_soc_dai *dai = snd_soc_rtd_to_cpu(be, 0);
++		struct snd_soc_dai *dai_cpu = snd_soc_rtd_to_cpu(be, 0);
++		struct snd_soc_dai *dai_codec = snd_soc_rtd_to_codec(be, 0);
++		struct snd_soc_dai *dai;
+ 
+ 		if (dpcm->fe != rtd)
+ 			continue;
+ 
++		/*
++		 * With audio graph card, original cpu dai is changed to codec
++		 * device in backend, so if cpu dai is dummy device in backend,
++		 * get the codec dai device, which is the real hardware device
++		 * connected.
++		 */
++		if (!snd_soc_dai_is_dummy(dai_cpu))
++			dai = dai_cpu;
++		else
++			dai = dai_codec;
++
+ 		substream_be = snd_soc_dpcm_get_substream(be, stream);
+ 		dma_params_be = snd_soc_dai_get_dma_data(dai, substream_be);
+ 		dev_be = dai->dev;
+diff --git a/sound/virtio/virtio_pcm.c b/sound/virtio/virtio_pcm.c
+index 967e4c45be9bb3..2f7c5e709f0755 100644
+--- a/sound/virtio/virtio_pcm.c
++++ b/sound/virtio/virtio_pcm.c
+@@ -339,6 +339,21 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd)
+ 	if (!snd->substreams)
+ 		return -ENOMEM;
+ 
++	/*
++	 * Initialize critical substream fields early in case we hit an
++	 * error path and end up trying to clean up uninitialized structures
++	 * elsewhere.
++	 */
++	for (i = 0; i < snd->nsubstreams; ++i) {
++		struct virtio_pcm_substream *vss = &snd->substreams[i];
++
++		vss->snd = snd;
++		vss->sid = i;
++		INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed);
++		init_waitqueue_head(&vss->msg_empty);
++		spin_lock_init(&vss->lock);
++	}
++
+ 	info = kcalloc(snd->nsubstreams, sizeof(*info), GFP_KERNEL);
+ 	if (!info)
+ 		return -ENOMEM;
+@@ -352,12 +367,6 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd)
+ 		struct virtio_pcm_substream *vss = &snd->substreams[i];
+ 		struct virtio_pcm *vpcm;
+ 
+-		vss->snd = snd;
+-		vss->sid = i;
+-		INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed);
+-		init_waitqueue_head(&vss->msg_empty);
+-		spin_lock_init(&vss->lock);
+-
+ 		rc = virtsnd_pcm_build_hw(vss, &info[i]);
+ 		if (rc)
+ 			goto on_exit;
+diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
+index caedb3ef6688fc..f5dd84eb55dcda 100644
+--- a/tools/arch/x86/lib/x86-opcode-map.txt
++++ b/tools/arch/x86/lib/x86-opcode-map.txt
+@@ -996,8 +996,8 @@ AVXcode: 4
+ 83: Grp1 Ev,Ib (1A),(es)
+ # CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
+ #			     CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
+-84: CTESTSCC (ev)
+-85: CTESTSCC (es) | CTESTSCC (66),(es)
++84: CTESTSCC Eb,Gb (ev)
++85: CTESTSCC Ev,Gv (es) | CTESTSCC Ev,Gv (66),(es)
+ 88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
+ 8f: POP2 Bq,Rq (000),(11B),(ev)
+ a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index e71be67f1d8658..52ffb74ae4e89a 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -1928,6 +1928,7 @@ static int do_loader(int argc, char **argv)
+ 
+ 	obj = bpf_object__open_file(file, &open_opts);
+ 	if (!obj) {
++		err = -1;
+ 		p_err("failed to open object file");
+ 		goto err_close_obj;
+ 	}
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index ce3ea0c2de0425..d8aea31ee393a3 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1243,12 +1243,15 @@ static const char *uaccess_safe_builtin[] = {
+ 	"__ubsan_handle_load_invalid_value",
+ 	/* STACKLEAK */
+ 	"stackleak_track_stack",
++	/* TRACE_BRANCH_PROFILING */
++	"ftrace_likely_update",
++	/* STACKPROTECTOR */
++	"__stack_chk_fail",
+ 	/* misc */
+ 	"csum_partial_copy_generic",
+ 	"copy_mc_fragile",
+ 	"copy_mc_fragile_handle_tail",
+ 	"copy_mc_enhanced_fast_string",
+-	"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
+ 	"rep_stos_alternative",
+ 	"rep_movs_alternative",
+ 	"__copy_user_nocache",
+@@ -1567,6 +1570,8 @@ static int add_jump_destinations(struct objtool_file *file)
+ 	unsigned long dest_off;
+ 
+ 	for_each_insn(file, insn) {
++		struct symbol *func = insn_func(insn);
++
+ 		if (insn->jump_dest) {
+ 			/*
+ 			 * handle_group_alt() may have previously set
+@@ -1590,7 +1595,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ 		} else if (reloc->sym->return_thunk) {
+ 			add_return_call(file, insn, true);
+ 			continue;
+-		} else if (insn_func(insn)) {
++		} else if (func) {
+ 			/*
+ 			 * External sibling call or internal sibling call with
+ 			 * STT_FUNC reloc.
+@@ -1623,6 +1628,15 @@ static int add_jump_destinations(struct objtool_file *file)
+ 				continue;
+ 			}
+ 
++			/*
++			 * GCOV/KCOV dead code can jump to the end of the
++			 * function/section.
++			 */
++			if (file->ignore_unreachables && func &&
++			    dest_sec == insn->sec &&
++			    dest_off == func->offset + func->len)
++				continue;
++
+ 			WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
+ 				  dest_sec->name, dest_off);
+ 			return -1;
+@@ -1647,8 +1661,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ 		/*
+ 		 * Cross-function jump.
+ 		 */
+-		if (insn_func(insn) && insn_func(jump_dest) &&
+-		    insn_func(insn) != insn_func(jump_dest)) {
++		if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) {
+ 
+ 			/*
+ 			 * For GCC 8+, create parent/child links for any cold
+@@ -1665,10 +1678,10 @@ static int add_jump_destinations(struct objtool_file *file)
+ 			 * case where the parent function's only reference to a
+ 			 * subfunction is through a jump table.
+ 			 */
+-			if (!strstr(insn_func(insn)->name, ".cold") &&
++			if (!strstr(func->name, ".cold") &&
+ 			    strstr(insn_func(jump_dest)->name, ".cold")) {
+-				insn_func(insn)->cfunc = insn_func(jump_dest);
+-				insn_func(jump_dest)->pfunc = insn_func(insn);
++				func->cfunc = insn_func(jump_dest);
++				insn_func(jump_dest)->pfunc = func;
+ 			}
+ 		}
+ 
+@@ -3634,6 +3647,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ 			    !strncmp(func->name, "__pfx_", 6))
+ 				return 0;
+ 
++			if (file->ignore_unreachables)
++				return 0;
++
+ 			WARN("%s() falls through to next function %s()",
+ 			     func->name, insn_func(insn)->name);
+ 			return 1;
+@@ -3853,6 +3869,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ 		if (!next_insn) {
+ 			if (state.cfi.cfa.base == CFI_UNDEFINED)
+ 				return 0;
++			if (file->ignore_unreachables)
++				return 0;
++
+ 			WARN("%s: unexpected end of section", sec->name);
+ 			return 1;
+ 		}
+@@ -4005,6 +4024,9 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
+ 			break;
+ 		}
+ 
++		if (insn->dead_end)
++			return 0;
++
+ 		if (!next) {
+ 			WARN_INSN(insn, "teh end!");
+ 			return -1;
+diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
+index 27784946b01b8b..af0ee70a53f9f0 100644
+--- a/tools/testing/selftests/bpf/network_helpers.c
++++ b/tools/testing/selftests/bpf/network_helpers.c
+@@ -771,12 +771,13 @@ static const char *pkt_type_str(u16 pkt_type)
+ 	return "Unknown";
+ }
+ 
++#define MAX_FLAGS_STRLEN 21
+ /* Show the information of the transport layer in the packet */
+ static void show_transport(const u_char *packet, u16 len, u32 ifindex,
+ 			   const char *src_addr, const char *dst_addr,
+ 			   u16 proto, bool ipv6, u8 pkt_type)
+ {
+-	char *ifname, _ifname[IF_NAMESIZE];
++	char *ifname, _ifname[IF_NAMESIZE], flags[MAX_FLAGS_STRLEN] = "";
+ 	const char *transport_str;
+ 	u16 src_port, dst_port;
+ 	struct udphdr *udp;
+@@ -817,29 +818,21 @@ static void show_transport(const u_char *packet, u16 len, u32 ifindex,
+ 
+ 	/* TCP or UDP*/
+ 
+-	flockfile(stdout);
++	if (proto == IPPROTO_TCP)
++		snprintf(flags, MAX_FLAGS_STRLEN, "%s%s%s%s",
++			 tcp->fin ? ", FIN" : "",
++			 tcp->syn ? ", SYN" : "",
++			 tcp->rst ? ", RST" : "",
++			 tcp->ack ? ", ACK" : "");
++
+ 	if (ipv6)
+-		printf("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d",
++		printf("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d%s\n",
+ 		       ifname, pkt_type_str(pkt_type), src_addr, src_port,
+-		       dst_addr, dst_port, transport_str, len);
++		       dst_addr, dst_port, transport_str, len, flags);
+ 	else
+-		printf("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d",
++		printf("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d%s\n",
+ 		       ifname, pkt_type_str(pkt_type), src_addr, src_port,
+-		       dst_addr, dst_port, transport_str, len);
+-
+-	if (proto == IPPROTO_TCP) {
+-		if (tcp->fin)
+-			printf(", FIN");
+-		if (tcp->syn)
+-			printf(", SYN");
+-		if (tcp->rst)
+-			printf(", RST");
+-		if (tcp->ack)
+-			printf(", ACK");
+-	}
+-
+-	printf("\n");
+-	funlockfile(stdout);
++		       dst_addr, dst_port, transport_str, len, flags);
+ }
+ 
+ static void show_ipv6_packet(const u_char *packet, u32 ifindex, u8 pkt_type)
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
+index 481626a875d1c3..df27535995af8d 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
+@@ -2,35 +2,41 @@
+ #include <uapi/linux/bpf.h>
+ #include <linux/if_link.h>
+ #include <test_progs.h>
++#include <network_helpers.h>
+ 
+ #include "test_xdp_with_cpumap_frags_helpers.skel.h"
+ #include "test_xdp_with_cpumap_helpers.skel.h"
+ 
+ #define IFINDEX_LO	1
++#define TEST_NS "cpu_attach_ns"
+ 
+ static void test_xdp_with_cpumap_helpers(void)
+ {
+-	struct test_xdp_with_cpumap_helpers *skel;
++	struct test_xdp_with_cpumap_helpers *skel = NULL;
+ 	struct bpf_prog_info info = {};
+ 	__u32 len = sizeof(info);
+ 	struct bpf_cpumap_val val = {
+ 		.qsize = 192,
+ 	};
+-	int err, prog_fd, map_fd;
++	int err, prog_fd, prog_redir_fd, map_fd;
++	struct nstoken *nstoken = NULL;
+ 	__u32 idx = 0;
+ 
++	SYS(out_close, "ip netns add %s", TEST_NS);
++	nstoken = open_netns(TEST_NS);
++	if (!ASSERT_OK_PTR(nstoken, "open_netns"))
++		goto out_close;
++	SYS(out_close, "ip link set dev lo up");
++
+ 	skel = test_xdp_with_cpumap_helpers__open_and_load();
+ 	if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load"))
+ 		return;
+ 
+-	prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
+-	err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
++	prog_redir_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
++	err = bpf_xdp_attach(IFINDEX_LO, prog_redir_fd, XDP_FLAGS_SKB_MODE, NULL);
+ 	if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP"))
+ 		goto out_close;
+ 
+-	err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
+-	ASSERT_OK(err, "XDP program detach");
+-
+ 	prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
+ 	map_fd = bpf_map__fd(skel->maps.cpu_map);
+ 	err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
+@@ -45,6 +51,26 @@ static void test_xdp_with_cpumap_helpers(void)
+ 	ASSERT_OK(err, "Read cpumap entry");
+ 	ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id");
+ 
++	/* send a packet to trigger any potential bugs in there */
++	char data[ETH_HLEN] = {};
++	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
++			    .data_in = &data,
++			    .data_size_in = sizeof(data),
++			    .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
++			    .repeat = 1,
++		);
++	err = bpf_prog_test_run_opts(prog_redir_fd, &opts);
++	ASSERT_OK(err, "XDP test run");
++
++	/* wait for the packets to be flushed, then check that redirect has been
++	 * performed
++	 */
++	kern_sync_rcu();
++	ASSERT_NEQ(skel->bss->redirect_count, 0, "redirected packets");
++
++	err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
++	ASSERT_OK(err, "XDP program detach");
++
+ 	/* can not attach BPF_XDP_CPUMAP program to a device */
+ 	err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
+ 	if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program"))
+@@ -65,6 +91,8 @@ static void test_xdp_with_cpumap_helpers(void)
+ 	ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry");
+ 
+ out_close:
++	close_netns(nstoken);
++	SYS_NOFAIL("ip netns del %s", TEST_NS);
+ 	test_xdp_with_cpumap_helpers__destroy(skel);
+ }
+ 
+@@ -111,7 +139,7 @@ static void test_xdp_with_cpumap_frags_helpers(void)
+ 	test_xdp_with_cpumap_frags_helpers__destroy(skel);
+ }
+ 
+-void serial_test_xdp_cpumap_attach(void)
++void test_xdp_cpumap_attach(void)
+ {
+ 	if (test__start_subtest("CPUMAP with programs in entries"))
+ 		test_xdp_with_cpumap_helpers();
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
+index 27ffed17d4be33..461ab18705d5c0 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
+@@ -23,7 +23,7 @@ static void test_xdp_with_devmap_helpers(void)
+ 	__u32 len = sizeof(info);
+ 	int err, dm_fd, dm_fd_redir, map_fd;
+ 	struct nstoken *nstoken = NULL;
+-	char data[10] = {};
++	char data[ETH_HLEN] = {};
+ 	__u32 idx = 0;
+ 
+ 	SYS(out_close, "ip netns add %s", TEST_NS);
+@@ -58,7 +58,7 @@ static void test_xdp_with_devmap_helpers(void)
+ 	/* send a packet to trigger any potential bugs in there */
+ 	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ 			    .data_in = &data,
+-			    .data_size_in = 10,
++			    .data_size_in = sizeof(data),
+ 			    .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ 			    .repeat = 1,
+ 		);
+@@ -158,7 +158,7 @@ static void test_xdp_with_devmap_helpers_veth(void)
+ 	struct nstoken *nstoken = NULL;
+ 	__u32 len = sizeof(info);
+ 	int err, dm_fd, dm_fd_redir, map_fd, ifindex_dst;
+-	char data[10] = {};
++	char data[ETH_HLEN] = {};
+ 	__u32 idx = 0;
+ 
+ 	SYS(out_close, "ip netns add %s", TEST_NS);
+@@ -208,7 +208,7 @@ static void test_xdp_with_devmap_helpers_veth(void)
+ 	/* send a packet to trigger any potential bugs in there */
+ 	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ 			    .data_in = &data,
+-			    .data_size_in = 10,
++			    .data_size_in = sizeof(data),
+ 			    .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ 			    .repeat = 1,
+ 		);
+diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
+index 20ec6723df18a6..3619239b01b741 100644
+--- a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
++++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
+@@ -12,10 +12,12 @@ struct {
+ 	__uint(max_entries, 4);
+ } cpu_map SEC(".maps");
+ 
++__u32 redirect_count = 0;
++
+ SEC("xdp")
+ int xdp_redir_prog(struct xdp_md *ctx)
+ {
+-	return bpf_redirect_map(&cpu_map, 1, 0);
++	return bpf_redirect_map(&cpu_map, 0, 0);
+ }
+ 
+ SEC("xdp")
+@@ -27,6 +29,9 @@ int xdp_dummy_prog(struct xdp_md *ctx)
+ SEC("xdp/cpumap")
+ int xdp_dummy_cm(struct xdp_md *ctx)
+ {
++	if (bpf_get_smp_processor_id() == 0)
++		redirect_count++;
++
+ 	if (ctx->ingress_ifindex == IFINDEX_LO)
+ 		return XDP_DROP;
+ 
+diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c
+index e949a43a614508..efabfcbe0b498c 100644
+--- a/tools/testing/selftests/mincore/mincore_selftest.c
++++ b/tools/testing/selftests/mincore/mincore_selftest.c
+@@ -261,9 +261,6 @@ TEST(check_file_mmap)
+ 		TH_LOG("No read-ahead pages found in memory");
+ 	}
+ 
+-	EXPECT_LT(i, vec_size) {
+-		TH_LOG("Read-ahead pages reached the end of the file");
+-	}
+ 	/*
+ 	 * End of the readahead window. The rest of the pages shouldn't
+ 	 * be in memory.
+diff --git a/tools/testing/selftests/ublk/test_stripe_04.sh b/tools/testing/selftests/ublk/test_stripe_04.sh
+new file mode 100755
+index 00000000000000..1f2b642381d179
+--- /dev/null
++++ b/tools/testing/selftests/ublk/test_stripe_04.sh
+@@ -0,0 +1,24 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
++
++TID="stripe_04"
++ERR_CODE=0
++
++_prep_test "stripe" "mkfs & mount & umount on zero copy"
++
++backfile_0=$(_create_backfile 256M)
++backfile_1=$(_create_backfile 256M)
++dev_id=$(_add_ublk_dev -t stripe -z -q 2 "$backfile_0" "$backfile_1")
++_check_add_dev $TID $? "$backfile_0" "$backfile_1"
++
++_mkfs_mount_test /dev/ublkb"${dev_id}"
++ERR_CODE=$?
++
++_cleanup_test "stripe"
++
++_remove_backfile "$backfile_0"
++_remove_backfile "$backfile_1"
++
++_show_result $TID $ERR_CODE


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-04-25 11:54 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-04-25 11:54 UTC (permalink / raw
  To: gentoo-commits

commit:     13c5d1f1019dc81393a5e15a0b7f0da86eb07edd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 25 11:53:47 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 25 11:53:47 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=13c5d1f1

Remove no longer applying BMQ patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                  |     8 -
 5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch | 11302 -------------------------
 5021_BMQ-and-PDS-gentoo-defaults.patch       |    13 -
 3 files changed, 11323 deletions(-)

diff --git a/0000_README b/0000_README
index e07d8d2e..f5e1ddec 100644
--- a/0000_README
+++ b/0000_README
@@ -194,11 +194,3 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
-
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
-From:   https://gitlab.com/alfredchen/projectc
-Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
-
-Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch
-From:   https://gitweb.gentoo.org/proj/linux-patches.git/
-Desc:   Set defaults for BMQ. default to n

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch b/5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
deleted file mode 100644
index 532813fd..00000000
--- a/5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
+++ /dev/null
@@ -1,11302 +0,0 @@
-diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
-index f8bc1630eba0..1b90768a0916 100644
---- a/Documentation/admin-guide/sysctl/kernel.rst
-+++ b/Documentation/admin-guide/sysctl/kernel.rst
-@@ -1673,3 +1673,12 @@ is 10 seconds.
- 
- The softlockup threshold is (``2 * watchdog_thresh``). Setting this
- tunable to zero will disable lockup detection altogether.
-+
-+yield_type:
-+===========
-+
-+BMQ/PDS CPU scheduler only. This determines what type of yield calls
-+to sched_yield() will be performed.
-+
-+  0 - No yield.
-+  1 - Requeue task. (default)
-diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
-new file mode 100644
-index 000000000000..05c84eec0f31
---- /dev/null
-+++ b/Documentation/scheduler/sched-BMQ.txt
-@@ -0,0 +1,110 @@
-+                         BitMap queue CPU Scheduler
-+                         --------------------------
-+
-+CONTENT
-+========
-+
-+ Background
-+ Design
-+   Overview
-+   Task policy
-+   Priority management
-+   BitMap Queue
-+   CPU Assignment and Migration
-+
-+
-+Background
-+==========
-+
-+BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
-+of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
-+and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
-+simple, while efficiency and scalable for interactive tasks, such as desktop,
-+movie playback and gaming etc.
-+
-+Design
-+======
-+
-+Overview
-+--------
-+
-+BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
-+each CPU is responsible for scheduling the tasks that are putting into it's
-+run queue.
-+
-+The run queue is a set of priority queues. Note that these queues are fifo
-+queue for non-rt tasks or priority queue for rt tasks in data structure. See
-+BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
-+that most applications are non-rt tasks. No matter the queue is fifo or
-+priority, In each queue is an ordered list of runnable tasks awaiting execution
-+and the data structures are the same. When it is time for a new task to run,
-+the scheduler simply looks the lowest numbered queueue that contains a task,
-+and runs the first task from the head of that queue. And per CPU idle task is
-+also in the run queue, so the scheduler can always find a task to run on from
-+its run queue.
-+
-+Each task will assigned the same timeslice(default 4ms) when it is picked to
-+start running. Task will be reinserted at the end of the appropriate priority
-+queue when it uses its whole timeslice. When the scheduler selects a new task
-+from the priority queue it sets the CPU's preemption timer for the remainder of
-+the previous timeslice. When that timer fires the scheduler will stop execution
-+on that task, select another task and start over again.
-+
-+If a task blocks waiting for a shared resource then it's taken out of its
-+priority queue and is placed in a wait queue for the shared resource. When it
-+is unblocked it will be reinserted in the appropriate priority queue of an
-+eligible CPU.
-+
-+Task policy
-+-----------
-+
-+BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
-+mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
-+NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
-+policy.
-+
-+DEADLINE
-+	It is squashed as priority 0 FIFO task.
-+
-+FIFO/RR
-+	All RT tasks share one single priority queue in BMQ run queue designed. The
-+complexity of insert operation is O(n). BMQ is not designed for system runs
-+with major rt policy tasks.
-+
-+NORMAL/BATCH/IDLE
-+	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
-+NORMAL policy tasks, but they just don't boost. To control the priority of
-+NORMAL/BATCH/IDLE tasks, simply use nice level.
-+
-+ISO
-+	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
-+task instead.
-+
-+Priority management
-+-------------------
-+
-+RT tasks have priority from 0-99. For non-rt tasks, there are three different
-+factors used to determine the effective priority of a task. The effective
-+priority being what is used to determine which queue it will be in.
-+
-+The first factor is simply the task’s static priority. Which is assigned from
-+task's nice level, within [-20, 19] in userland's point of view and [0, 39]
-+internally.
-+
-+The second factor is the priority boost. This is a value bounded between
-+[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
-+modified by the following cases:
-+
-+*When a thread has used up its entire timeslice, always deboost its boost by
-+increasing by one.
-+*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
-+and its switch-in time(time after last switch and run) below the thredhold
-+based on its priority boost, will boost its boost by decreasing by one buti is
-+capped at 0 (won’t go negative).
-+
-+The intent in this system is to ensure that interactive threads are serviced
-+quickly. These are usually the threads that interact directly with the user
-+and cause user-perceivable latency. These threads usually do little work and
-+spend most of their time blocked awaiting another user event. So they get the
-+priority boost from unblocking while background threads that do most of the
-+processing receive the priority penalty for using their entire timeslice.
-diff --git a/fs/proc/base.c b/fs/proc/base.c
-index b31283d81c52..e27c5c7b05f6 100644
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -516,7 +516,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
- 		seq_puts(m, "0 0 0\n");
- 	else
- 		seq_printf(m, "%llu %llu %lu\n",
--		   (unsigned long long)task->se.sum_exec_runtime,
-+		   (unsigned long long)tsk_seruntime(task),
- 		   (unsigned long long)task->sched_info.run_delay,
- 		   task->sched_info.pcount);
- 
-diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
-index 8874f681b056..59eb72bf7d5f 100644
---- a/include/asm-generic/resource.h
-+++ b/include/asm-generic/resource.h
-@@ -23,7 +23,7 @@
- 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
- 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
- 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
--	[RLIMIT_NICE]		= { 0, 0 },				\
-+	[RLIMIT_NICE]		= { 30, 30 },				\
- 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
- 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
- }
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index bb343136ddd0..6adfea989b7b 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -804,9 +804,13 @@ struct task_struct {
- 	struct alloc_tag		*alloc_tag;
- #endif
- 
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
- 	int				on_cpu;
-+#endif
-+
-+#ifdef CONFIG_SMP
- 	struct __call_single_node	wake_entry;
-+#ifndef CONFIG_SCHED_ALT
- 	unsigned int			wakee_flips;
- 	unsigned long			wakee_flip_decay_ts;
- 	struct task_struct		*last_wakee;
-@@ -820,6 +824,7 @@ struct task_struct {
- 	 */
- 	int				recent_used_cpu;
- 	int				wake_cpu;
-+#endif /* !CONFIG_SCHED_ALT */
- #endif
- 	int				on_rq;
- 
-@@ -828,6 +833,19 @@ struct task_struct {
- 	int				normal_prio;
- 	unsigned int			rt_priority;
- 
-+#ifdef CONFIG_SCHED_ALT
-+	u64				last_ran;
-+	s64				time_slice;
-+	struct list_head		sq_node;
-+#ifdef CONFIG_SCHED_BMQ
-+	int				boost_prio;
-+#endif /* CONFIG_SCHED_BMQ */
-+#ifdef CONFIG_SCHED_PDS
-+	u64				deadline;
-+#endif /* CONFIG_SCHED_PDS */
-+	/* sched_clock time spent running */
-+	u64				sched_time;
-+#else /* !CONFIG_SCHED_ALT */
- 	struct sched_entity		se;
- 	struct sched_rt_entity		rt;
- 	struct sched_dl_entity		dl;
-@@ -842,6 +860,7 @@ struct task_struct {
- 	unsigned long			core_cookie;
- 	unsigned int			core_occupation;
- #endif
-+#endif /* !CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_CGROUP_SCHED
- 	struct task_group		*sched_task_group;
-@@ -878,11 +897,15 @@ struct task_struct {
- 	const cpumask_t			*cpus_ptr;
- 	cpumask_t			*user_cpus_ptr;
- 	cpumask_t			cpus_mask;
-+#ifndef CONFIG_SCHED_ALT
- 	void				*migration_pending;
-+#endif
- #ifdef CONFIG_SMP
- 	unsigned short			migration_disabled;
- #endif
-+#ifndef CONFIG_SCHED_ALT
- 	unsigned short			migration_flags;
-+#endif
- 
- #ifdef CONFIG_PREEMPT_RCU
- 	int				rcu_read_lock_nesting;
-@@ -914,8 +937,10 @@ struct task_struct {
- 
- 	struct list_head		tasks;
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 	struct plist_node		pushable_tasks;
- 	struct rb_node			pushable_dl_tasks;
-+#endif
- #endif
- 
- 	struct mm_struct		*mm;
-@@ -1609,6 +1634,15 @@ struct task_struct {
- 	 */
- };
- 
-+#ifdef CONFIG_SCHED_ALT
-+#define tsk_seruntime(t)		((t)->sched_time)
-+/* replace the uncertian rt_timeout with 0UL */
-+#define tsk_rttimeout(t)		(0UL)
-+#else /* CFS */
-+#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
-+#define tsk_rttimeout(t)	((t)->rt.timeout)
-+#endif /* !CONFIG_SCHED_ALT */
-+
- #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
- #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
- 
-@@ -2135,7 +2169,11 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
- 
- static inline bool task_is_runnable(struct task_struct *p)
- {
-+#ifdef CONFIG_SCHED_ALT
-+	return p->on_rq;
-+#else
- 	return p->on_rq && !p->se.sched_delayed;
-+#endif /* !CONFIG_SCHED_ALT */
- }
- 
- extern bool sched_task_on_rq(struct task_struct *p);
-diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
-index 3a912ab42bb5..269a1513a153 100644
---- a/include/linux/sched/deadline.h
-+++ b/include/linux/sched/deadline.h
-@@ -2,6 +2,25 @@
- #ifndef _LINUX_SCHED_DEADLINE_H
- #define _LINUX_SCHED_DEADLINE_H
- 
-+#ifdef CONFIG_SCHED_ALT
-+
-+static inline int dl_task(struct task_struct *p)
-+{
-+	return 0;
-+}
-+
-+#ifdef CONFIG_SCHED_BMQ
-+#define __tsk_deadline(p)	(0UL)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
-+#endif
-+
-+#else
-+
-+#define __tsk_deadline(p)	((p)->dl.deadline)
-+
- /*
-  * SCHED_DEADLINE tasks has negative priorities, reflecting
-  * the fact that any of them has higher prio than RT and
-@@ -23,6 +42,7 @@ static inline bool dl_task(struct task_struct *p)
- {
- 	return dl_prio(p->prio);
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- static inline bool dl_time_before(u64 a, u64 b)
- {
-diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
-index 6ab43b4f72f9..ef1cff556c5e 100644
---- a/include/linux/sched/prio.h
-+++ b/include/linux/sched/prio.h
-@@ -19,6 +19,28 @@
- #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
- #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
- 
-+#ifdef CONFIG_SCHED_ALT
-+
-+/* Undefine MAX_PRIO and DEFAULT_PRIO */
-+#undef MAX_PRIO
-+#undef DEFAULT_PRIO
-+
-+/* +/- priority levels from the base priority */
-+#ifdef CONFIG_SCHED_BMQ
-+#define MAX_PRIORITY_ADJ	(12)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+#define MAX_PRIORITY_ADJ	(0)
-+#endif
-+
-+#define MIN_NORMAL_PRIO		(128)
-+#define NORMAL_PRIO_NUM		(64)
-+#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
-+#define DEFAULT_PRIO		(MAX_PRIO - MAX_PRIORITY_ADJ - NICE_WIDTH / 2)
-+
-+#endif /* CONFIG_SCHED_ALT */
-+
- /*
-  * Convert user-nice values [ -20 ... 0 ... 19 ]
-  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
-diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
-index 4e3338103654..6dfef878fe3b 100644
---- a/include/linux/sched/rt.h
-+++ b/include/linux/sched/rt.h
-@@ -45,8 +45,10 @@ static inline bool rt_or_dl_task_policy(struct task_struct *tsk)
- 
- 	if (policy == SCHED_FIFO || policy == SCHED_RR)
- 		return true;
-+#ifndef CONFIG_SCHED_ALT
- 	if (policy == SCHED_DEADLINE)
- 		return true;
-+#endif
- 	return false;
- }
- 
-diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
-index 4237daa5ac7a..3cebd93c49c8 100644
---- a/include/linux/sched/topology.h
-+++ b/include/linux/sched/topology.h
-@@ -244,7 +244,8 @@ static inline bool cpus_share_resources(int this_cpu, int that_cpu)
- 
- #endif	/* !CONFIG_SMP */
- 
--#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
-+	!defined(CONFIG_SCHED_ALT)
- extern void rebuild_sched_domains_energy(void);
- #else
- static inline void rebuild_sched_domains_energy(void)
-diff --git a/init/Kconfig b/init/Kconfig
-index c521e1421ad4..4a397b48a453 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -652,6 +652,7 @@ config TASK_IO_ACCOUNTING
- 
- config PSI
- 	bool "Pressure stall information tracking"
-+	depends on !SCHED_ALT
- 	select KERNFS
- 	help
- 	  Collect metrics that indicate how overcommitted the CPU, memory,
-@@ -863,6 +864,35 @@ config UCLAMP_BUCKETS_COUNT
- 
- 	  If in doubt, use the default value.
- 
-+menuconfig SCHED_ALT
-+	bool "Alternative CPU Schedulers"
-+	default y
-+	help
-+	  This feature enable alternative CPU scheduler"
-+
-+if SCHED_ALT
-+
-+choice
-+	prompt "Alternative CPU Scheduler"
-+	default SCHED_BMQ
-+
-+config SCHED_BMQ
-+	bool "BMQ CPU scheduler"
-+	help
-+	  The BitMap Queue CPU scheduler for excellent interactivity and
-+	  responsiveness on the desktop and solid scalability on normal
-+	  hardware and commodity servers.
-+
-+config SCHED_PDS
-+	bool "PDS CPU scheduler"
-+	help
-+	  The Priority and Deadline based Skip list multiple queue CPU
-+	  Scheduler.
-+
-+endchoice
-+
-+endif
-+
- endmenu
- 
- #
-@@ -928,6 +958,7 @@ config NUMA_BALANCING
- 	depends on ARCH_SUPPORTS_NUMA_BALANCING
- 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
- 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
-+	depends on !SCHED_ALT
- 	help
- 	  This option adds support for automatic NUMA aware memory/task placement.
- 	  The mechanism is quite primitive and is based on migrating memory when
-@@ -1334,6 +1365,7 @@ config CHECKPOINT_RESTORE
- 
- config SCHED_AUTOGROUP
- 	bool "Automatic process group scheduling"
-+	depends on !SCHED_ALT
- 	select CGROUPS
- 	select CGROUP_SCHED
- 	select FAIR_GROUP_SCHED
-diff --git a/init/init_task.c b/init/init_task.c
-index 136a8231355a..12c01ab8e718 100644
---- a/init/init_task.c
-+++ b/init/init_task.c
-@@ -71,9 +71,16 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
- 	.stack		= init_stack,
- 	.usage		= REFCOUNT_INIT(2),
- 	.flags		= PF_KTHREAD,
-+#ifdef CONFIG_SCHED_ALT
-+	.on_cpu		= 1,
-+	.prio		= DEFAULT_PRIO,
-+	.static_prio	= DEFAULT_PRIO,
-+	.normal_prio	= DEFAULT_PRIO,
-+#else
- 	.prio		= MAX_PRIO - 20,
- 	.static_prio	= MAX_PRIO - 20,
- 	.normal_prio	= MAX_PRIO - 20,
-+#endif
- 	.policy		= SCHED_NORMAL,
- 	.cpus_ptr	= &init_task.cpus_mask,
- 	.user_cpus_ptr	= NULL,
-@@ -86,6 +93,16 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
- 	.restart_block	= {
- 		.fn = do_no_restart_syscall,
- 	},
-+#ifdef CONFIG_SCHED_ALT
-+	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
-+#ifdef CONFIG_SCHED_BMQ
-+	.boost_prio	= 0,
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+	.deadline	= 0,
-+#endif
-+	.time_slice	= HZ,
-+#else
- 	.se		= {
- 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
- 	},
-@@ -93,10 +110,13 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
- 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
- 		.time_slice	= RR_TIMESLICE,
- 	},
-+#endif
- 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_SMP
- 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
- #endif
-+#endif
- #ifdef CONFIG_CGROUP_SCHED
- 	.sched_task_group = &root_task_group,
- #endif
-diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index fe782cd77388..d27d2154d71a 100644
---- a/kernel/Kconfig.preempt
-+++ b/kernel/Kconfig.preempt
-@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
- 
- config SCHED_CORE
- 	bool "Core Scheduling for SMT"
--	depends on SCHED_SMT
-+	depends on SCHED_SMT && !SCHED_ALT
- 	help
- 	  This option permits Core Scheduling, a means of coordinated task
- 	  selection across SMT siblings. When enabled -- see
-@@ -135,7 +135,7 @@ config SCHED_CORE
- 
- config SCHED_CLASS_EXT
- 	bool "Extensible Scheduling Class"
--	depends on BPF_SYSCALL && BPF_JIT && DEBUG_INFO_BTF
-+	depends on BPF_SYSCALL && BPF_JIT && DEBUG_INFO_BTF && !SCHED_ALT
- 	select STACKTRACE if STACKTRACE_SUPPORT
- 	help
- 	  This option enables a new scheduler class sched_ext (SCX), which
-diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index a4dd285cdf39..5b4ebe58d032 100644
---- a/kernel/cgroup/cpuset.c
-+++ b/kernel/cgroup/cpuset.c
-@@ -620,7 +620,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
- 	return ret;
- }
- 
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
- /*
-  * Helper routine for generate_sched_domains().
-  * Do cpusets a, b have overlapping effective cpus_allowed masks?
-@@ -1031,7 +1031,7 @@ void rebuild_sched_domains_locked(void)
- 	/* Have scheduler rebuild the domains */
- 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
- }
--#else /* !CONFIG_SMP */
-+#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
- void rebuild_sched_domains_locked(void)
- {
- }
-@@ -2926,12 +2926,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
- 				goto out_unlock;
- 		}
- 
-+#ifndef CONFIG_SCHED_ALT
- 		if (dl_task(task)) {
- 			cs->nr_migrate_dl_tasks++;
- 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
- 		}
-+#endif
- 	}
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (!cs->nr_migrate_dl_tasks)
- 		goto out_success;
- 
-@@ -2952,6 +2955,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
- 	}
- 
- out_success:
-+#endif
- 	/*
- 	 * Mark attach is in progress.  This makes validate_change() fail
- 	 * changes which zero cpus/mems_allowed.
-@@ -2973,12 +2977,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
- 	mutex_lock(&cpuset_mutex);
- 	dec_attach_in_progress_locked(cs);
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (cs->nr_migrate_dl_tasks) {
- 		int cpu = cpumask_any(cs->effective_cpus);
- 
- 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
- 		reset_migrate_dl_data(cs);
- 	}
-+#endif
- 
- 	mutex_unlock(&cpuset_mutex);
- }
-diff --git a/kernel/delayacct.c b/kernel/delayacct.c
-index dead51de8eb5..8edef9676ab3 100644
---- a/kernel/delayacct.c
-+++ b/kernel/delayacct.c
-@@ -149,7 +149,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
- 	 */
- 	t1 = tsk->sched_info.pcount;
- 	t2 = tsk->sched_info.run_delay;
--	t3 = tsk->se.sum_exec_runtime;
-+	t3 = tsk_seruntime(tsk);
- 
- 	d->cpu_count += t1;
- 
-diff --git a/kernel/exit.c b/kernel/exit.c
-index 619f0014c33b..7dc53ddd45a8 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -175,7 +175,7 @@ static void __exit_signal(struct task_struct *tsk)
- 			sig->curr_target = next_thread(tsk);
- 	}
- 
--	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
-+	add_device_randomness((const void*) &tsk_seruntime(tsk),
- 			      sizeof(unsigned long long));
- 
- 	/*
-@@ -196,7 +196,7 @@ static void __exit_signal(struct task_struct *tsk)
- 	sig->inblock += task_io_get_inblock(tsk);
- 	sig->oublock += task_io_get_oublock(tsk);
- 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
--	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
-+	sig->sum_sched_runtime += tsk_seruntime(tsk);
- 	sig->nr_threads--;
- 	__unhash_process(tsk, group_dead);
- 	write_sequnlock(&sig->stats_lock);
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index ebebd0eec7f6..802112207855 100644
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -363,7 +363,7 @@ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
- 	lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
- 
- 	waiter->tree.prio = __waiter_prio(task);
--	waiter->tree.deadline = task->dl.deadline;
-+	waiter->tree.deadline = __tsk_deadline(task);
- }
- 
- /*
-@@ -384,16 +384,20 @@ waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
-  * Only use with rt_waiter_node_{less,equal}()
-  */
- #define task_to_waiter_node(p)	\
--	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
-+	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
- #define task_to_waiter(p)	\
- 	&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
- 
- static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
- 					       struct rt_waiter_node *right)
- {
-+#ifdef CONFIG_SCHED_PDS
-+	return (left->deadline < right->deadline);
-+#else
- 	if (left->prio < right->prio)
- 		return 1;
- 
-+#ifndef CONFIG_SCHED_BMQ
- 	/*
- 	 * If both waiters have dl_prio(), we check the deadlines of the
- 	 * associated tasks.
-@@ -402,16 +406,22 @@ static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
- 	 */
- 	if (dl_prio(left->prio))
- 		return dl_time_before(left->deadline, right->deadline);
-+#endif
- 
- 	return 0;
-+#endif
- }
- 
- static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
- 						 struct rt_waiter_node *right)
- {
-+#ifdef CONFIG_SCHED_PDS
-+	return (left->deadline == right->deadline);
-+#else
- 	if (left->prio != right->prio)
- 		return 0;
- 
-+#ifndef CONFIG_SCHED_BMQ
- 	/*
- 	 * If both waiters have dl_prio(), we check the deadlines of the
- 	 * associated tasks.
-@@ -420,8 +430,10 @@ static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
- 	 */
- 	if (dl_prio(left->prio))
- 		return left->deadline == right->deadline;
-+#endif
- 
- 	return 1;
-+#endif
- }
- 
- static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
-diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
-index 76d204b7d29c..de1a52f963e5 100644
---- a/kernel/locking/ww_mutex.h
-+++ b/kernel/locking/ww_mutex.h
-@@ -247,6 +247,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
- 
- 		/* equal static prio */
- 
-+#ifndef	CONFIG_SCHED_ALT
- 		if (dl_prio(a_prio)) {
- 			if (dl_time_before(b->task->dl.deadline,
- 					   a->task->dl.deadline))
-@@ -256,6 +257,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
- 					   b->task->dl.deadline))
- 				return false;
- 		}
-+#endif
- 
- 		/* equal prio */
- 	}
-diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
-index 976092b7bd45..31d587c16ec1 100644
---- a/kernel/sched/Makefile
-+++ b/kernel/sched/Makefile
-@@ -28,7 +28,12 @@ endif
- # These compilation units have roughly the same size and complexity - so their
- # build parallelizes well and finishes roughly at once:
- #
-+ifdef CONFIG_SCHED_ALT
-+obj-y += alt_core.o
-+obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
-+else
- obj-y += core.o
- obj-y += fair.o
-+endif
- obj-y += build_policy.o
- obj-y += build_utility.o
-diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
-new file mode 100644
-index 000000000000..0a08bc0176ac
---- /dev/null
-+++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7515 @@
-+/*
-+ *  kernel/sched/alt_core.c
-+ *
-+ *  Core alternative kernel scheduler code and related syscalls
-+ *
-+ *  Copyright (C) 1991-2002  Linus Torvalds
-+ *
-+ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
-+ *		a whole lot of those previous things.
-+ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
-+ *		scheduler by Alfred Chen.
-+ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
-+ */
-+#include <linux/sched/clock.h>
-+#include <linux/sched/cputime.h>
-+#include <linux/sched/debug.h>
-+#include <linux/sched/hotplug.h>
-+#include <linux/sched/init.h>
-+#include <linux/sched/isolation.h>
-+#include <linux/sched/loadavg.h>
-+#include <linux/sched/mm.h>
-+#include <linux/sched/nohz.h>
-+#include <linux/sched/stat.h>
-+#include <linux/sched/wake_q.h>
-+
-+#include <linux/blkdev.h>
-+#include <linux/context_tracking.h>
-+#include <linux/cpuset.h>
-+#include <linux/delayacct.h>
-+#include <linux/init_task.h>
-+#include <linux/kcov.h>
-+#include <linux/kprobes.h>
-+#include <linux/nmi.h>
-+#include <linux/rseq.h>
-+#include <linux/scs.h>
-+
-+#include <uapi/linux/sched/types.h>
-+
-+#include <asm/irq_regs.h>
-+#include <asm/switch_to.h>
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/sched.h>
-+#include <trace/events/ipi.h>
-+#undef CREATE_TRACE_POINTS
-+
-+#include "sched.h"
-+#include "smp.h"
-+
-+#include "pelt.h"
-+
-+#include "../../io_uring/io-wq.h"
-+#include "../smpboot.h"
-+
-+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
-+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
-+
-+/*
-+ * Export tracepoints that act as a bare tracehook (ie: have no trace event
-+ * associated with them) to allow external modules to probe them.
-+ */
-+EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+#define sched_feat(x)	(1)
-+/*
-+ * Print a warning if need_resched is set for the given duration (if
-+ * LATENCY_WARN is enabled).
-+ *
-+ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
-+ * per boot.
-+ */
-+__read_mostly int sysctl_resched_latency_warn_ms = 100;
-+__read_mostly int sysctl_resched_latency_warn_once = 1;
-+#else
-+#define sched_feat(x)	(0)
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+#define ALT_SCHED_VERSION "v6.12-r1"
-+
-+#define STOP_PRIO		(MAX_RT_PRIO - 1)
-+
-+/*
-+ * Time slice
-+ * (default: 4 msec, units: nanoseconds)
-+ */
-+unsigned int sysctl_sched_base_slice __read_mostly	= (4 << 20);
-+
-+#include "alt_core.h"
-+#include "alt_topology.h"
-+
-+/* Reschedule if less than this many μs left */
-+#define RESCHED_NS		(100 << 10)
-+
-+/**
-+ * sched_yield_type - Type of sched_yield() will be performed.
-+ * 0: No yield.
-+ * 1: Requeue task. (default)
-+ */
-+int sched_yield_type __read_mostly = 1;
-+
-+#ifdef CONFIG_SMP
-+cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
-+
-+DEFINE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
-+DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask);
-+DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_topo_end_mask);
-+
-+#ifdef CONFIG_SCHED_SMT
-+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-+EXPORT_SYMBOL_GPL(sched_smt_present);
-+
-+cpumask_t sched_smt_mask ____cacheline_aligned_in_smp;
-+#endif
-+
-+/*
-+ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
-+ * the domain), this allows us to quickly tell if two cpus are in the same cache
-+ * domain, see cpus_share_cache().
-+ */
-+DEFINE_PER_CPU(int, sd_llc_id);
-+#endif /* CONFIG_SMP */
-+
-+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next)	do { } while (0)
-+#endif
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch()	do { } while (0)
-+#endif
-+
-+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS + 2] ____cacheline_aligned_in_smp;
-+
-+cpumask_t *const sched_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS - 1];
-+cpumask_t *const sched_sg_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS];
-+cpumask_t *const sched_pcore_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS];
-+cpumask_t *const sched_ecore_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS + 1];
-+
-+/* task function */
-+static inline const struct cpumask *task_user_cpus(struct task_struct *p)
-+{
-+	if (!p->user_cpus_ptr)
-+		return cpu_possible_mask; /* &init_task.cpus_mask */
-+	return p->user_cpus_ptr;
-+}
-+
-+/* sched_queue related functions */
-+static inline void sched_queue_init(struct sched_queue *q)
-+{
-+	int i;
-+
-+	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
-+	for(i = 0; i < SCHED_LEVELS; i++)
-+		INIT_LIST_HEAD(&q->heads[i]);
-+}
-+
-+/*
-+ * Init idle task and put into queue structure of rq
-+ * IMPORTANT: may be called multiple times for a single cpu
-+ */
-+static inline void sched_queue_init_idle(struct sched_queue *q,
-+					 struct task_struct *idle)
-+{
-+	INIT_LIST_HEAD(&q->heads[IDLE_TASK_SCHED_PRIO]);
-+	list_add_tail(&idle->sq_node, &q->heads[IDLE_TASK_SCHED_PRIO]);
-+	idle->on_rq = TASK_ON_RQ_QUEUED;
-+}
-+
-+#define CLEAR_CACHED_PREEMPT_MASK(pr, low, high, cpu)		\
-+	if (low < pr && pr <= high)				\
-+		cpumask_clear_cpu(cpu, sched_preempt_mask + pr);
-+
-+#define SET_CACHED_PREEMPT_MASK(pr, low, high, cpu)		\
-+	if (low < pr && pr <= high)				\
-+		cpumask_set_cpu(cpu, sched_preempt_mask + pr);
-+
-+static atomic_t sched_prio_record = ATOMIC_INIT(0);
-+
-+/* water mark related functions */
-+static inline void update_sched_preempt_mask(struct rq *rq)
-+{
-+	int prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+	int last_prio = rq->prio;
-+	int cpu, pr;
-+
-+	if (prio == last_prio)
-+		return;
-+
-+	rq->prio = prio;
-+#ifdef CONFIG_SCHED_PDS
-+	rq->prio_idx = sched_prio2idx(rq->prio, rq);
-+#endif
-+	cpu = cpu_of(rq);
-+	pr = atomic_read(&sched_prio_record);
-+
-+	if (prio < last_prio) {
-+		if (IDLE_TASK_SCHED_PRIO == last_prio) {
-+			rq->clear_idle_mask_func(cpu, sched_idle_mask);
-+			last_prio -= 2;
-+		}
-+		CLEAR_CACHED_PREEMPT_MASK(pr, prio, last_prio, cpu);
-+
-+		return;
-+	}
-+	/* last_prio < prio */
-+	if (IDLE_TASK_SCHED_PRIO == prio) {
-+		rq->set_idle_mask_func(cpu, sched_idle_mask);
-+		prio -= 2;
-+	}
-+	SET_CACHED_PREEMPT_MASK(pr, last_prio, prio, cpu);
-+}
-+
-+/*
-+ * Serialization rules:
-+ *
-+ * Lock order:
-+ *
-+ *   p->pi_lock
-+ *     rq->lock
-+ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
-+ *
-+ *  rq1->lock
-+ *    rq2->lock  where: rq1 < rq2
-+ *
-+ * Regular state:
-+ *
-+ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
-+ * local CPU's rq->lock, it optionally removes the task from the runqueue and
-+ * always looks at the local rq data structures to find the most eligible task
-+ * to run next.
-+ *
-+ * Task enqueue is also under rq->lock, possibly taken from another CPU.
-+ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
-+ * the local CPU to avoid bouncing the runqueue state around [ see
-+ * ttwu_queue_wakelist() ]
-+ *
-+ * Task wakeup, specifically wakeups that involve migration, are horribly
-+ * complicated to avoid having to take two rq->locks.
-+ *
-+ * Special state:
-+ *
-+ * System-calls and anything external will use task_rq_lock() which acquires
-+ * both p->pi_lock and rq->lock. As a consequence the state they change is
-+ * stable while holding either lock:
-+ *
-+ *  - sched_setaffinity()/
-+ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
-+ *  - set_user_nice():		p->se.load, p->*prio
-+ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
-+ *				p->se.load, p->rt_priority,
-+ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
-+ *  - sched_setnuma():		p->numa_preferred_nid
-+ *  - sched_move_task():        p->sched_task_group
-+ *  - uclamp_update_active()	p->uclamp*
-+ *
-+ * p->state <- TASK_*:
-+ *
-+ *   is changed locklessly using set_current_state(), __set_current_state() or
-+ *   set_special_state(), see their respective comments, or by
-+ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
-+ *   concurrent self.
-+ *
-+ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
-+ *
-+ *   is set by activate_task() and cleared by deactivate_task(), under
-+ *   rq->lock. Non-zero indicates the task is runnable, the special
-+ *   ON_RQ_MIGRATING state is used for migration without holding both
-+ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
-+ *
-+ *   Additionally it is possible to be ->on_rq but still be considered not
-+ *   runnable when p->se.sched_delayed is true. These tasks are on the runqueue
-+ *   but will be dequeued as soon as they get picked again. See the
-+ *   task_is_runnable() helper.
-+ *
-+ * p->on_cpu <- { 0, 1 }:
-+ *
-+ *   is set by prepare_task() and cleared by finish_task() such that it will be
-+ *   set before p is scheduled-in and cleared after p is scheduled-out, both
-+ *   under rq->lock. Non-zero indicates the task is running on its CPU.
-+ *
-+ *   [ The astute reader will observe that it is possible for two tasks on one
-+ *     CPU to have ->on_cpu = 1 at the same time. ]
-+ *
-+ * task_cpu(p): is changed by set_task_cpu(), the rules are:
-+ *
-+ *  - Don't call set_task_cpu() on a blocked task:
-+ *
-+ *    We don't care what CPU we're not running on, this simplifies hotplug,
-+ *    the CPU assignment of blocked tasks isn't required to be valid.
-+ *
-+ *  - for try_to_wake_up(), called under p->pi_lock:
-+ *
-+ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
-+ *
-+ *  - for migration called under rq->lock:
-+ *    [ see task_on_rq_migrating() in task_rq_lock() ]
-+ *
-+ *    o move_queued_task()
-+ *    o detach_task()
-+ *
-+ *  - for migration called under double_rq_lock():
-+ *
-+ *    o __migrate_swap_task()
-+ *    o push_rt_task() / pull_rt_task()
-+ *    o push_dl_task() / pull_dl_task()
-+ *    o dl_task_offline_migration()
-+ *
-+ */
-+
-+/*
-+ * Context: p->pi_lock
-+ */
-+static inline struct rq *
-+task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock, unsigned long *flags)
-+{
-+	struct rq *rq;
-+	for (;;) {
-+		rq = task_rq(p);
-+		if (p->on_cpu || task_on_rq_queued(p)) {
-+			raw_spin_lock_irqsave(&rq->lock, *flags);
-+			if (likely((p->on_cpu || task_on_rq_queued(p)) && rq == task_rq(p))) {
-+				*plock = &rq->lock;
-+				return rq;
-+			}
-+			raw_spin_unlock_irqrestore(&rq->lock, *flags);
-+		} else if (task_on_rq_migrating(p)) {
-+			do {
-+				cpu_relax();
-+			} while (unlikely(task_on_rq_migrating(p)));
-+		} else {
-+			raw_spin_lock_irqsave(&p->pi_lock, *flags);
-+			if (likely(!p->on_cpu && !p->on_rq && rq == task_rq(p))) {
-+				*plock = &p->pi_lock;
-+				return rq;
-+			}
-+			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-+		}
-+	}
-+}
-+
-+static inline void
-+task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock, unsigned long *flags)
-+{
-+	raw_spin_unlock_irqrestore(lock, *flags);
-+}
-+
-+/*
-+ * __task_rq_lock - lock the rq @p resides on.
-+ */
-+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	lockdep_assert_held(&p->pi_lock);
-+
-+	for (;;) {
-+		rq = task_rq(p);
-+		raw_spin_lock(&rq->lock);
-+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-+			return rq;
-+		raw_spin_unlock(&rq->lock);
-+
-+		while (unlikely(task_on_rq_migrating(p)))
-+			cpu_relax();
-+	}
-+}
-+
-+/*
-+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
-+ */
-+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(p->pi_lock)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	for (;;) {
-+		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
-+		rq = task_rq(p);
-+		raw_spin_lock(&rq->lock);
-+		/*
-+		 *	move_queued_task()		task_rq_lock()
-+		 *
-+		 *	ACQUIRE (rq->lock)
-+		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
-+		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
-+		 *	[S] ->cpu = new_cpu		[L] task_rq()
-+		 *					[L] ->on_rq
-+		 *	RELEASE (rq->lock)
-+		 *
-+		 * If we observe the old CPU in task_rq_lock(), the acquire of
-+		 * the old rq->lock will fully serialize against the stores.
-+		 *
-+		 * If we observe the new CPU in task_rq_lock(), the address
-+		 * dependency headed by '[L] rq = task_rq()' and the acquire
-+		 * will pair with the WMB to ensure we then also see migrating.
-+		 */
-+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-+			return rq;
-+		}
-+		raw_spin_unlock(&rq->lock);
-+		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-+
-+		while (unlikely(task_on_rq_migrating(p)))
-+			cpu_relax();
-+	}
-+}
-+
-+static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irqsave(&rq->lock, rf->flags);
-+}
-+
-+static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
-+}
-+
-+DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
-+		    rq_lock_irqsave(_T->lock, &_T->rf),
-+		    rq_unlock_irqrestore(_T->lock, &_T->rf),
-+		    struct rq_flags rf)
-+
-+void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
-+{
-+	raw_spinlock_t *lock;
-+
-+	/* Matches synchronize_rcu() in __sched_core_enable() */
-+	preempt_disable();
-+
-+	for (;;) {
-+		lock = __rq_lockp(rq);
-+		raw_spin_lock_nested(lock, subclass);
-+		if (likely(lock == __rq_lockp(rq))) {
-+			/* preempt_count *MUST* be > 1 */
-+			preempt_enable_no_resched();
-+			return;
-+		}
-+		raw_spin_unlock(lock);
-+	}
-+}
-+
-+void raw_spin_rq_unlock(struct rq *rq)
-+{
-+	raw_spin_unlock(rq_lockp(rq));
-+}
-+
-+/*
-+ * RQ-clock updating methods:
-+ */
-+
-+static void update_rq_clock_task(struct rq *rq, s64 delta)
-+{
-+/*
-+ * In theory, the compile should just see 0 here, and optimize out the call
-+ * to sched_rt_avg_update. But I don't trust it...
-+ */
-+	s64 __maybe_unused steal = 0, irq_delta = 0;
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
-+
-+	/*
-+	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
-+	 * this case when a previous update_rq_clock() happened inside a
-+	 * {soft,}IRQ region.
-+	 *
-+	 * When this happens, we stop ->clock_task and only update the
-+	 * prev_irq_time stamp to account for the part that fit, so that a next
-+	 * update will consume the rest. This ensures ->clock_task is
-+	 * monotonic.
-+	 *
-+	 * It does however cause some slight miss-attribution of {soft,}IRQ
-+	 * time, a more accurate solution would be to update the irq_time using
-+	 * the current rq->clock timestamp, except that would require using
-+	 * atomic ops.
-+	 */
-+	if (irq_delta > delta)
-+		irq_delta = delta;
-+
-+	rq->prev_irq_time += irq_delta;
-+	delta -= irq_delta;
-+	delayacct_irq(rq->curr, irq_delta);
-+#endif
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	if (static_key_false((&paravirt_steal_rq_enabled))) {
-+		steal = paravirt_steal_clock(cpu_of(rq));
-+		steal -= rq->prev_steal_time_rq;
-+
-+		if (unlikely(steal > delta))
-+			steal = delta;
-+
-+		rq->prev_steal_time_rq += steal;
-+		delta -= steal;
-+	}
-+#endif
-+
-+	rq->clock_task += delta;
-+
-+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
-+	if ((irq_delta + steal))
-+		update_irq_load_avg(rq, irq_delta + steal);
-+#endif
-+}
-+
-+static inline void update_rq_clock(struct rq *rq)
-+{
-+	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
-+
-+	if (unlikely(delta <= 0))
-+		return;
-+	rq->clock += delta;
-+	sched_update_rq_clock(rq);
-+	update_rq_clock_task(rq, delta);
-+}
-+
-+/*
-+ * RQ Load update routine
-+ */
-+#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
-+#define RQ_UTIL_SHIFT			(8)
-+#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
-+
-+#define LOAD_BLOCK(t)		((t) >> 17)
-+#define LOAD_HALF_BLOCK(t)	((t) >> 16)
-+#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
-+#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
-+#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
-+
-+static inline void rq_load_update(struct rq *rq)
-+{
-+	u64 time = rq->clock;
-+	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp), RQ_LOAD_HISTORY_BITS - 1);
-+	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
-+	u64 curr = !!rq->nr_running;
-+
-+	if (delta) {
-+		rq->load_history = rq->load_history >> delta;
-+
-+		if (delta < RQ_UTIL_SHIFT) {
-+			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
-+			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
-+				rq->load_history ^= LOAD_BLOCK_BIT(delta);
-+		}
-+
-+		rq->load_block = BLOCK_MASK(time) * prev;
-+	} else {
-+		rq->load_block += (time - rq->load_stamp) * prev;
-+	}
-+	if (prev ^ curr)
-+		rq->load_history ^= CURRENT_LOAD_BIT;
-+	rq->load_stamp = time;
-+}
-+
-+unsigned long rq_load_util(struct rq *rq, unsigned long max)
-+{
-+	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
-+}
-+
-+#ifdef CONFIG_SMP
-+unsigned long sched_cpu_util(int cpu)
-+{
-+	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
-+}
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_CPU_FREQ
-+/**
-+ * cpufreq_update_util - Take a note about CPU utilization changes.
-+ * @rq: Runqueue to carry out the update for.
-+ * @flags: Update reason flags.
-+ *
-+ * This function is called by the scheduler on the CPU whose utilization is
-+ * being updated.
-+ *
-+ * It can only be called from RCU-sched read-side critical sections.
-+ *
-+ * The way cpufreq is currently arranged requires it to evaluate the CPU
-+ * performance state (frequency/voltage) on a regular basis to prevent it from
-+ * being stuck in a completely inadequate performance level for too long.
-+ * That is not guaranteed to happen if the updates are only triggered from CFS
-+ * and DL, though, because they may not be coming in if only RT tasks are
-+ * active all the time (or there are RT tasks only).
-+ *
-+ * As a workaround for that issue, this function is called periodically by the
-+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
-+ * but that really is a band-aid.  Going forward it should be replaced with
-+ * solutions targeted more specifically at RT tasks.
-+ */
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-+{
-+	struct update_util_data *data;
-+
-+#ifdef CONFIG_SMP
-+	rq_load_update(rq);
-+#endif
-+	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu_of(rq)));
-+	if (data)
-+		data->func(data, rq_clock(rq), flags);
-+}
-+#else
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-+{
-+#ifdef CONFIG_SMP
-+	rq_load_update(rq);
-+#endif
-+}
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+/*
-+ * Tick may be needed by tasks in the runqueue depending on their policy and
-+ * requirements. If tick is needed, lets send the target an IPI to kick it out
-+ * of nohz mode if necessary.
-+ */
-+static inline void sched_update_tick_dependency(struct rq *rq)
-+{
-+	int cpu = cpu_of(rq);
-+
-+	if (!tick_nohz_full_cpu(cpu))
-+		return;
-+
-+	if (rq->nr_running < 2)
-+		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
-+	else
-+		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
-+}
-+#else /* !CONFIG_NO_HZ_FULL */
-+static inline void sched_update_tick_dependency(struct rq *rq) { }
-+#endif
-+
-+bool sched_task_on_rq(struct task_struct *p)
-+{
-+	return task_on_rq_queued(p);
-+}
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+	unsigned long ip = 0;
-+	unsigned int state;
-+
-+	if (!p || p == current)
-+		return 0;
-+
-+	/* Only get wchan if task is blocked and we can keep it that way. */
-+	raw_spin_lock_irq(&p->pi_lock);
-+	state = READ_ONCE(p->__state);
-+	smp_rmb(); /* see try_to_wake_up() */
-+	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
-+		ip = __get_wchan(p);
-+	raw_spin_unlock_irq(&p->pi_lock);
-+
-+	return ip;
-+}
-+
-+/*
-+ * Add/Remove/Requeue task to/from the runqueue routines
-+ * Context: rq->lock
-+ */
-+#define __SCHED_DEQUEUE_TASK(p, rq, flags, func)					\
-+	sched_info_dequeue(rq, p);							\
-+											\
-+	__list_del_entry(&p->sq_node);							\
-+	if (p->sq_node.prev == p->sq_node.next) {					\
-+		clear_bit(sched_idx2prio(p->sq_node.next - &rq->queue.heads[0], rq),	\
-+			  rq->queue.bitmap);						\
-+		func;									\
-+	}
-+
-+#define __SCHED_ENQUEUE_TASK(p, rq, flags, func)					\
-+	sched_info_enqueue(rq, p);							\
-+	{										\
-+	int idx, prio;									\
-+	TASK_SCHED_PRIO_IDX(p, rq, idx, prio);						\
-+	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);				\
-+	if (list_is_first(&p->sq_node, &rq->queue.heads[idx])) {			\
-+		set_bit(prio, rq->queue.bitmap);					\
-+		func;									\
-+	}										\
-+	}
-+
-+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
-+{
-+#ifdef ALT_SCHED_DEBUG
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
-+		  task_cpu(p), cpu_of(rq));
-+#endif
-+
-+	__SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
-+	--rq->nr_running;
-+#ifdef CONFIG_SMP
-+	if (1 == rq->nr_running)
-+		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
-+#endif
-+
-+	sched_update_tick_dependency(rq);
-+}
-+
-+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
-+{
-+#ifdef ALT_SCHED_DEBUG
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*printk(KERN_INFO "sched: enqueue(%d) %px %d\n", cpu_of(rq), p, p->prio);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
-+		  task_cpu(p), cpu_of(rq));
-+#endif
-+
-+	__SCHED_ENQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
-+	++rq->nr_running;
-+#ifdef CONFIG_SMP
-+	if (2 == rq->nr_running)
-+		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
-+#endif
-+
-+	sched_update_tick_dependency(rq);
-+}
-+
-+void requeue_task(struct task_struct *p, struct rq *rq)
-+{
-+	struct list_head *node = &p->sq_node;
-+	int deq_idx, idx, prio;
-+
-+	TASK_SCHED_PRIO_IDX(p, rq, idx, prio);
-+#ifdef ALT_SCHED_DEBUG
-+	lockdep_assert_held(&rq->lock);
-+	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
-+		  cpu_of(rq), task_cpu(p));
-+#endif
-+	if (list_is_last(node, &rq->queue.heads[idx]))
-+		return;
-+
-+	__list_del_entry(node);
-+	if (node->prev == node->next && (deq_idx = node->next - &rq->queue.heads[0]) != idx)
-+		clear_bit(sched_idx2prio(deq_idx, rq), rq->queue.bitmap);
-+
-+	list_add_tail(node, &rq->queue.heads[idx]);
-+	if (list_is_first(node, &rq->queue.heads[idx]))
-+		set_bit(prio, rq->queue.bitmap);
-+	update_sched_preempt_mask(rq);
-+}
-+
-+/*
-+ * try_cmpxchg based fetch_or() macro so it works for different integer types:
-+ */
-+#define fetch_or(ptr, mask)						\
-+	({								\
-+		typeof(ptr) _ptr = (ptr);				\
-+		typeof(mask) _mask = (mask);				\
-+		typeof(*_ptr) _val = *_ptr;				\
-+									\
-+		do {							\
-+		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
-+	_val;								\
-+})
-+
-+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
-+/*
-+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
-+ * this avoids any races wrt polling state changes and thereby avoids
-+ * spurious IPIs.
-+ */
-+static inline bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+	struct thread_info *ti = task_thread_info(p);
-+	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
-+}
-+
-+/*
-+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
-+ *
-+ * If this returns true, then the idle task promises to call
-+ * sched_ttwu_pending() and reschedule soon.
-+ */
-+static bool set_nr_if_polling(struct task_struct *p)
-+{
-+	struct thread_info *ti = task_thread_info(p);
-+	typeof(ti->flags) val = READ_ONCE(ti->flags);
-+
-+	do {
-+		if (!(val & _TIF_POLLING_NRFLAG))
-+			return false;
-+		if (val & _TIF_NEED_RESCHED)
-+			return true;
-+	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
-+
-+	return true;
-+}
-+
-+#else
-+static inline bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+	set_tsk_need_resched(p);
-+	return true;
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline bool set_nr_if_polling(struct task_struct *p)
-+{
-+	return false;
-+}
-+#endif
-+#endif
-+
-+static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+	struct wake_q_node *node = &task->wake_q;
-+
-+	/*
-+	 * Atomically grab the task, if ->wake_q is !nil already it means
-+	 * it's already queued (either by us or someone else) and will get the
-+	 * wakeup due to that.
-+	 *
-+	 * In order to ensure that a pending wakeup will observe our pending
-+	 * state, even in the failed case, an explicit smp_mb() must be used.
-+	 */
-+	smp_mb__before_atomic();
-+	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
-+		return false;
-+
-+	/*
-+	 * The head is context local, there can be no concurrency.
-+	 */
-+	*head->lastp = node;
-+	head->lastp = &node->next;
-+	return true;
-+}
-+
-+/**
-+ * wake_q_add() - queue a wakeup for 'later' waking.
-+ * @head: the wake_q_head to add @task to
-+ * @task: the task to queue for 'later' wakeup
-+ *
-+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
-+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
-+ * instantly.
-+ *
-+ * This function must be used as-if it were wake_up_process(); IOW the task
-+ * must be ready to be woken at this location.
-+ */
-+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+	if (__wake_q_add(head, task))
-+		get_task_struct(task);
-+}
-+
-+/**
-+ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
-+ * @head: the wake_q_head to add @task to
-+ * @task: the task to queue for 'later' wakeup
-+ *
-+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
-+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
-+ * instantly.
-+ *
-+ * This function must be used as-if it were wake_up_process(); IOW the task
-+ * must be ready to be woken at this location.
-+ *
-+ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
-+ * that already hold reference to @task can call the 'safe' version and trust
-+ * wake_q to do the right thing depending whether or not the @task is already
-+ * queued for wakeup.
-+ */
-+void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
-+{
-+	if (!__wake_q_add(head, task))
-+		put_task_struct(task);
-+}
-+
-+void wake_up_q(struct wake_q_head *head)
-+{
-+	struct wake_q_node *node = head->first;
-+
-+	while (node != WAKE_Q_TAIL) {
-+		struct task_struct *task;
-+
-+		task = container_of(node, struct task_struct, wake_q);
-+		/* task can safely be re-inserted now: */
-+		node = node->next;
-+		task->wake_q.next = NULL;
-+
-+		/*
-+		 * wake_up_process() executes a full barrier, which pairs with
-+		 * the queueing in wake_q_add() so as not to miss wakeups.
-+		 */
-+		wake_up_process(task);
-+		put_task_struct(task);
-+	}
-+}
-+
-+/*
-+ * resched_curr - mark rq's current task 'to be rescheduled now'.
-+ *
-+ * On UP this means the setting of the need_resched flag, on SMP it
-+ * might also involve a cross-CPU call to trigger the scheduler on
-+ * the target CPU.
-+ */
-+static inline void resched_curr(struct rq *rq)
-+{
-+	struct task_struct *curr = rq->curr;
-+	int cpu;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	if (test_tsk_need_resched(curr))
-+		return;
-+
-+	cpu = cpu_of(rq);
-+	if (cpu == smp_processor_id()) {
-+		set_tsk_need_resched(curr);
-+		set_preempt_need_resched();
-+		return;
-+	}
-+
-+	if (set_nr_and_not_polling(curr))
-+		smp_send_reschedule(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+void resched_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (cpu_online(cpu) || cpu == smp_processor_id())
-+		resched_curr(cpu_rq(cpu));
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+}
-+
-+#ifdef CONFIG_SMP
-+#ifdef CONFIG_NO_HZ_COMMON
-+/*
-+ * This routine will record that the CPU is going idle with tick stopped.
-+ * This info will be used in performing idle load balancing in the future.
-+ */
-+void nohz_balance_enter_idle(int cpu) {}
-+
-+/*
-+ * In the semi idle case, use the nearest busy CPU for migrating timers
-+ * from an idle CPU.  This is good for power-savings.
-+ *
-+ * We don't do similar optimization for completely idle system, as
-+ * selecting an idle CPU will add more delays to the timers than intended
-+ * (as that CPU's timer base may not be up to date wrt jiffies etc).
-+ */
-+int get_nohz_timer_target(void)
-+{
-+	int i, cpu = smp_processor_id(), default_cpu = -1;
-+	struct cpumask *mask;
-+	const struct cpumask *hk_mask;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
-+		if (!idle_cpu(cpu))
-+			return cpu;
-+		default_cpu = cpu;
-+	}
-+
-+	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
-+
-+	for (mask = per_cpu(sched_cpu_topo_masks, cpu);
-+	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
-+		for_each_cpu_and(i, mask, hk_mask)
-+			if (!idle_cpu(i))
-+				return i;
-+
-+	if (default_cpu == -1)
-+		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
-+	cpu = default_cpu;
-+
-+	return cpu;
-+}
-+
-+/*
-+ * When add_timer_on() enqueues a timer into the timer wheel of an
-+ * idle CPU then this timer might expire before the next timer event
-+ * which is scheduled to wake up that CPU. In case of a completely
-+ * idle system the next event might even be infinite time into the
-+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
-+ * leaves the inner idle loop so the newly added timer is taken into
-+ * account when the CPU goes back to idle and evaluates the timer
-+ * wheel for the next timer event.
-+ */
-+static inline void wake_up_idle_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (cpu == smp_processor_id())
-+		return;
-+
-+	/*
-+	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
-+	 * part of the idle loop. This forces an exit from the idle loop
-+	 * and a round trip to schedule(). Now this could be optimized
-+	 * because a simple new idle loop iteration is enough to
-+	 * re-evaluate the next tick. Provided some re-ordering of tick
-+	 * nohz functions that would need to follow TIF_NR_POLLING
-+	 * clearing:
-+	 *
-+	 * - On most architectures, a simple fetch_or on ti::flags with a
-+	 *   "0" value would be enough to know if an IPI needs to be sent.
-+	 *
-+	 * - x86 needs to perform a last need_resched() check between
-+	 *   monitor and mwait which doesn't take timers into account.
-+	 *   There a dedicated TIF_TIMER flag would be required to
-+	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
-+	 *   before mwait().
-+	 *
-+	 * However, remote timer enqueue is not such a frequent event
-+	 * and testing of the above solutions didn't appear to report
-+	 * much benefits.
-+	 */
-+	if (set_nr_and_not_polling(rq->idle))
-+		smp_send_reschedule(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+static inline bool wake_up_full_nohz_cpu(int cpu)
-+{
-+	/*
-+	 * We just need the target to call irq_exit() and re-evaluate
-+	 * the next tick. The nohz full kick at least implies that.
-+	 * If needed we can still optimize that later with an
-+	 * empty IRQ.
-+	 */
-+	if (cpu_is_offline(cpu))
-+		return true;  /* Don't try to wake offline CPUs. */
-+	if (tick_nohz_full_cpu(cpu)) {
-+		if (cpu != smp_processor_id() ||
-+		    tick_nohz_tick_stopped())
-+			tick_nohz_full_kick_cpu(cpu);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void wake_up_nohz_cpu(int cpu)
-+{
-+	if (!wake_up_full_nohz_cpu(cpu))
-+		wake_up_idle_cpu(cpu);
-+}
-+
-+static void nohz_csd_func(void *info)
-+{
-+	struct rq *rq = info;
-+	int cpu = cpu_of(rq);
-+	unsigned int flags;
-+
-+	/*
-+	 * Release the rq::nohz_csd.
-+	 */
-+	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
-+	WARN_ON(!(flags & NOHZ_KICK_MASK));
-+
-+	rq->idle_balance = idle_cpu(cpu);
-+	if (rq->idle_balance && !need_resched()) {
-+		rq->nohz_idle_balance = flags;
-+		raise_softirq_irqoff(SCHED_SOFTIRQ);
-+	}
-+}
-+
-+#endif /* CONFIG_NO_HZ_COMMON */
-+#endif /* CONFIG_SMP */
-+
-+static inline void wakeup_preempt(struct rq *rq)
-+{
-+	if (sched_rq_first_task(rq) != rq->curr)
-+		resched_curr(rq);
-+}
-+
-+static __always_inline
-+int __task_state_match(struct task_struct *p, unsigned int state)
-+{
-+	if (READ_ONCE(p->__state) & state)
-+		return 1;
-+
-+	if (READ_ONCE(p->saved_state) & state)
-+		return -1;
-+
-+	return 0;
-+}
-+
-+static __always_inline
-+int task_state_match(struct task_struct *p, unsigned int state)
-+{
-+	/*
-+	 * Serialize against current_save_and_set_rtlock_wait_state(),
-+	 * current_restore_rtlock_saved_state(), and __refrigerator().
-+	 */
-+	guard(raw_spinlock_irq)(&p->pi_lock);
-+
-+	return __task_state_match(p, state);
-+}
-+
-+/*
-+ * wait_task_inactive - wait for a thread to unschedule.
-+ *
-+ * Wait for the thread to block in any of the states set in @match_state.
-+ * If it changes, i.e. @p might have woken up, then return zero.  When we
-+ * succeed in waiting for @p to be off its CPU, we return a positive number
-+ * (its total switch count).  If a second call a short while later returns the
-+ * same number, the caller can be sure that @p has remained unscheduled the
-+ * whole time.
-+ *
-+ * The caller must ensure that the task *will* unschedule sometime soon,
-+ * else this function might spin for a *long* time. This function can't
-+ * be called with interrupts off, or it may introduce deadlock with
-+ * smp_call_function() if an IPI is sent by the same process we are
-+ * waiting to become inactive.
-+ */
-+unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
-+{
-+	unsigned long flags;
-+	int running, queued, match;
-+	unsigned long ncsw;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	for (;;) {
-+		rq = task_rq(p);
-+
-+		/*
-+		 * If the task is actively running on another CPU
-+		 * still, just relax and busy-wait without holding
-+		 * any locks.
-+		 *
-+		 * NOTE! Since we don't hold any locks, it's not
-+		 * even sure that "rq" stays as the right runqueue!
-+		 * But we don't care, since this will return false
-+		 * if the runqueue has changed and p is actually now
-+		 * running somewhere else!
-+		 */
-+		while (task_on_cpu(p)) {
-+			if (!task_state_match(p, match_state))
-+				return 0;
-+			cpu_relax();
-+		}
-+
-+		/*
-+		 * Ok, time to look more closely! We need the rq
-+		 * lock now, to be *sure*. If we're wrong, we'll
-+		 * just go back and repeat.
-+		 */
-+		task_access_lock_irqsave(p, &lock, &flags);
-+		trace_sched_wait_task(p);
-+		running = task_on_cpu(p);
-+		queued = p->on_rq;
-+		ncsw = 0;
-+		if ((match = __task_state_match(p, match_state))) {
-+			/*
-+			 * When matching on p->saved_state, consider this task
-+			 * still queued so it will wait.
-+			 */
-+			if (match < 0)
-+				queued = 1;
-+			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-+		}
-+		task_access_unlock_irqrestore(p, lock, &flags);
-+
-+		/*
-+		 * If it changed from the expected state, bail out now.
-+		 */
-+		if (unlikely(!ncsw))
-+			break;
-+
-+		/*
-+		 * Was it really running after all now that we
-+		 * checked with the proper locks actually held?
-+		 *
-+		 * Oops. Go back and try again..
-+		 */
-+		if (unlikely(running)) {
-+			cpu_relax();
-+			continue;
-+		}
-+
-+		/*
-+		 * It's not enough that it's not actively running,
-+		 * it must be off the runqueue _entirely_, and not
-+		 * preempted!
-+		 *
-+		 * So if it was still runnable (but just not actively
-+		 * running right now), it's preempted, and we should
-+		 * yield - it could be a while.
-+		 */
-+		if (unlikely(queued)) {
-+			ktime_t to = NSEC_PER_SEC / HZ;
-+
-+			set_current_state(TASK_UNINTERRUPTIBLE);
-+			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
-+			continue;
-+		}
-+
-+		/*
-+		 * Ahh, all good. It wasn't running, and it wasn't
-+		 * runnable, which means that it will never become
-+		 * running in the future either. We're all done!
-+		 */
-+		break;
-+	}
-+
-+	return ncsw;
-+}
-+
-+#ifdef CONFIG_SCHED_HRTICK
-+/*
-+ * Use HR-timers to deliver accurate preemption points.
-+ */
-+
-+static void hrtick_clear(struct rq *rq)
-+{
-+	if (hrtimer_active(&rq->hrtick_timer))
-+		hrtimer_cancel(&rq->hrtick_timer);
-+}
-+
-+/*
-+ * High-resolution timer tick.
-+ * Runs from hardirq context with interrupts disabled.
-+ */
-+static enum hrtimer_restart hrtick(struct hrtimer *timer)
-+{
-+	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
-+
-+	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
-+
-+	raw_spin_lock(&rq->lock);
-+	resched_curr(rq);
-+	raw_spin_unlock(&rq->lock);
-+
-+	return HRTIMER_NORESTART;
-+}
-+
-+/*
-+ * Use hrtick when:
-+ *  - enabled by features
-+ *  - hrtimer is actually high res
-+ */
-+static inline int hrtick_enabled(struct rq *rq)
-+{
-+	/**
-+	 * Alt schedule FW doesn't support sched_feat yet
-+	if (!sched_feat(HRTICK))
-+		return 0;
-+	*/
-+	if (!cpu_active(cpu_of(rq)))
-+		return 0;
-+	return hrtimer_is_hres_active(&rq->hrtick_timer);
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+static void __hrtick_restart(struct rq *rq)
-+{
-+	struct hrtimer *timer = &rq->hrtick_timer;
-+	ktime_t time = rq->hrtick_time;
-+
-+	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
-+}
-+
-+/*
-+ * called from hardirq (IPI) context
-+ */
-+static void __hrtick_start(void *arg)
-+{
-+	struct rq *rq = arg;
-+
-+	raw_spin_lock(&rq->lock);
-+	__hrtick_restart(rq);
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * Called to set the hrtick timer state.
-+ *
-+ * called with rq->lock held and IRQs disabled
-+ */
-+static inline void hrtick_start(struct rq *rq, u64 delay)
-+{
-+	struct hrtimer *timer = &rq->hrtick_timer;
-+	s64 delta;
-+
-+	/*
-+	 * Don't schedule slices shorter than 10000ns, that just
-+	 * doesn't make sense and can cause timer DoS.
-+	 */
-+	delta = max_t(s64, delay, 10000LL);
-+
-+	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
-+
-+	if (rq == this_rq())
-+		__hrtick_restart(rq);
-+	else
-+		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
-+}
-+
-+#else
-+/*
-+ * Called to set the hrtick timer state.
-+ *
-+ * called with rq->lock held and IRQs disabled
-+ */
-+static inline void hrtick_start(struct rq *rq, u64 delay)
-+{
-+	/*
-+	 * Don't schedule slices shorter than 10000ns, that just
-+	 * doesn't make sense. Rely on vruntime for fairness.
-+	 */
-+	delay = max_t(u64, delay, 10000LL);
-+	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
-+		      HRTIMER_MODE_REL_PINNED_HARD);
-+}
-+#endif /* CONFIG_SMP */
-+
-+static void hrtick_rq_init(struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
-+#endif
-+
-+	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
-+	rq->hrtick_timer.function = hrtick;
-+}
-+#else	/* CONFIG_SCHED_HRTICK */
-+static inline int hrtick_enabled(struct rq *rq)
-+{
-+	return 0;
-+}
-+
-+static inline void hrtick_clear(struct rq *rq)
-+{
-+}
-+
-+static inline void hrtick_rq_init(struct rq *rq)
-+{
-+}
-+#endif	/* CONFIG_SCHED_HRTICK */
-+
-+/*
-+ * activate_task - move a task to the runqueue.
-+ *
-+ * Context: rq->lock
-+ */
-+static void activate_task(struct task_struct *p, struct rq *rq)
-+{
-+	enqueue_task(p, rq, ENQUEUE_WAKEUP);
-+
-+	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
-+	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
-+
-+	/*
-+	 * If in_iowait is set, the code below may not trigger any cpufreq
-+	 * utilization updates, so do it here explicitly with the IOWAIT flag
-+	 * passed.
-+	 */
-+	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
-+}
-+
-+static void block_task(struct rq *rq, struct task_struct *p)
-+{
-+	dequeue_task(p, rq, DEQUEUE_SLEEP);
-+
-+	WRITE_ONCE(p->on_rq, 0);
-+	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
-+	if (p->sched_contributes_to_load)
-+		rq->nr_uninterruptible++;
-+
-+	if (p->in_iowait) {
-+		atomic_inc(&rq->nr_iowait);
-+		delayacct_blkio_start();
-+	}
-+}
-+
-+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-+{
-+#ifdef CONFIG_SMP
-+	/*
-+	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
-+	 * successfully executed on another CPU. We must ensure that updates of
-+	 * per-task data have been completed by this moment.
-+	 */
-+	smp_wmb();
-+
-+	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
-+{
-+#ifdef CONFIG_SCHED_DEBUG
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/*
-+	 * We should never call set_task_cpu() on a blocked task,
-+	 * ttwu() will sort out the placement.
-+	 */
-+	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
-+
-+#ifdef CONFIG_LOCKDEP
-+	/*
-+	 * The caller should hold either p->pi_lock or rq->lock, when changing
-+	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
-+	 *
-+	 * sched_move_task() holds both and thus holding either pins the cgroup,
-+	 * see task_group().
-+	 */
-+	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-+				      lockdep_is_held(&task_rq(p)->lock)));
-+#endif
-+	/*
-+	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
-+	 */
-+	WARN_ON_ONCE(!cpu_online(new_cpu));
-+
-+	WARN_ON_ONCE(is_migration_disabled(p));
-+#endif
-+	trace_sched_migrate_task(p, new_cpu);
-+
-+	if (task_cpu(p) != new_cpu)
-+	{
-+		rseq_migrate(p);
-+		sched_mm_cid_migrate_from(p);
-+		perf_event_task_migrate(p);
-+	}
-+
-+	__set_task_cpu(p, new_cpu);
-+}
-+
-+static void
-+__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	/*
-+	 * This here violates the locking rules for affinity, since we're only
-+	 * supposed to change these variables while holding both rq->lock and
-+	 * p->pi_lock.
-+	 *
-+	 * HOWEVER, it magically works, because ttwu() is the only code that
-+	 * accesses these variables under p->pi_lock and only does so after
-+	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
-+	 * before finish_task().
-+	 *
-+	 * XXX do further audits, this smells like something putrid.
-+	 */
-+	SCHED_WARN_ON(!p->on_cpu);
-+	p->cpus_ptr = new_mask;
-+}
-+
-+void migrate_disable(void)
-+{
-+	struct task_struct *p = current;
-+	int cpu;
-+
-+	if (p->migration_disabled) {
-+#ifdef CONFIG_DEBUG_PREEMPT
-+		/*
-+		 * Warn about overflow half-way through the range.
-+		 */
-+		WARN_ON_ONCE((s16)p->migration_disabled < 0);
-+#endif
-+		p->migration_disabled++;
-+		return;
-+	}
-+
-+	guard(preempt)();
-+	cpu = smp_processor_id();
-+	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
-+		cpu_rq(cpu)->nr_pinned++;
-+		p->migration_disabled = 1;
-+		/*
-+		 * Violates locking rules! see comment in __do_set_cpus_ptr().
-+		 */
-+		if (p->cpus_ptr == &p->cpus_mask)
-+			__do_set_cpus_ptr(p, cpumask_of(cpu));
-+	}
-+}
-+EXPORT_SYMBOL_GPL(migrate_disable);
-+
-+void migrate_enable(void)
-+{
-+	struct task_struct *p = current;
-+
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Check both overflow from migrate_disable() and superfluous
-+	 * migrate_enable().
-+	 */
-+	if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
-+		return;
-+#endif
-+
-+	if (p->migration_disabled > 1) {
-+		p->migration_disabled--;
-+		return;
-+	}
-+
-+	/*
-+	 * Ensure stop_task runs either before or after this, and that
-+	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
-+	 */
-+	guard(preempt)();
-+	/*
-+	 * Assumption: current should be running on allowed cpu
-+	 */
-+	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
-+	if (p->cpus_ptr != &p->cpus_mask)
-+		__do_set_cpus_ptr(p, &p->cpus_mask);
-+	/*
-+	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
-+	 * regular cpus_mask, otherwise things that race (eg.
-+	 * select_fallback_rq) get confused.
-+	 */
-+	barrier();
-+	p->migration_disabled = 0;
-+	this_rq()->nr_pinned--;
-+}
-+EXPORT_SYMBOL_GPL(migrate_enable);
-+
-+static void __migrate_force_enable(struct task_struct *p, struct rq *rq)
-+{
-+	if (likely(p->cpus_ptr != &p->cpus_mask))
-+		__do_set_cpus_ptr(p, &p->cpus_mask);
-+	p->migration_disabled = 0;
-+	/* When p is migrate_disabled, rq->lock should be held */
-+	rq->nr_pinned--;
-+}
-+
-+static inline bool rq_has_pinned_tasks(struct rq *rq)
-+{
-+	return rq->nr_pinned;
-+}
-+
-+/*
-+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
-+ * __set_cpus_allowed_ptr() and select_fallback_rq().
-+ */
-+static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
-+{
-+	/* When not in the task's cpumask, no point in looking further. */
-+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-+		return false;
-+
-+	/* migrate_disabled() must be allowed to finish. */
-+	if (is_migration_disabled(p))
-+		return cpu_online(cpu);
-+
-+	/* Non kernel threads are not allowed during either online or offline. */
-+	if (!(p->flags & PF_KTHREAD))
-+		return cpu_active(cpu) && task_cpu_possible(cpu, p);
-+
-+	/* KTHREAD_IS_PER_CPU is always allowed. */
-+	if (kthread_is_per_cpu(p))
-+		return cpu_online(cpu);
-+
-+	/* Regular kernel threads don't get to stay during offline. */
-+	if (cpu_dying(cpu))
-+		return false;
-+
-+	/* But are allowed during online. */
-+	return cpu_online(cpu);
-+}
-+
-+/*
-+ * This is how migration works:
-+ *
-+ * 1) we invoke migration_cpu_stop() on the target CPU using
-+ *    stop_one_cpu().
-+ * 2) stopper starts to run (implicitly forcing the migrated thread
-+ *    off the CPU)
-+ * 3) it checks whether the migrated task is still in the wrong runqueue.
-+ * 4) if it's in the wrong runqueue then the migration thread removes
-+ *    it and puts it into the right queue.
-+ * 5) stopper completes and stop_one_cpu() returns and the migration
-+ *    is done.
-+ */
-+
-+/*
-+ * move_queued_task - move a queued task to new rq.
-+ *
-+ * Returns (locked) new rq. Old rq's lock is released.
-+ */
-+struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
-+	dequeue_task(p, rq, 0);
-+	set_task_cpu(p, new_cpu);
-+	raw_spin_unlock(&rq->lock);
-+
-+	rq = cpu_rq(new_cpu);
-+
-+	raw_spin_lock(&rq->lock);
-+	WARN_ON_ONCE(task_cpu(p) != new_cpu);
-+
-+	sched_mm_cid_migrate_to(rq, p);
-+
-+	sched_task_sanity_check(p, rq);
-+	enqueue_task(p, rq, 0);
-+	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
-+	wakeup_preempt(rq);
-+
-+	return rq;
-+}
-+
-+struct migration_arg {
-+	struct task_struct *task;
-+	int dest_cpu;
-+};
-+
-+/*
-+ * Move (not current) task off this CPU, onto the destination CPU. We're doing
-+ * this because either it can't run here any more (set_cpus_allowed()
-+ * away from this CPU, or CPU going down), or because we're
-+ * attempting to rebalance this task on exec (sched_exec).
-+ *
-+ * So we race with normal scheduler movements, but that's OK, as long
-+ * as the task is no longer on this CPU.
-+ */
-+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
-+{
-+	/* Affinity changed (again). */
-+	if (!is_cpu_allowed(p, dest_cpu))
-+		return rq;
-+
-+	return move_queued_task(rq, p, dest_cpu);
-+}
-+
-+/*
-+ * migration_cpu_stop - this will be executed by a high-prio stopper thread
-+ * and performs thread migration by bumping thread off CPU then
-+ * 'pushing' onto another runqueue.
-+ */
-+static int migration_cpu_stop(void *data)
-+{
-+	struct migration_arg *arg = data;
-+	struct task_struct *p = arg->task;
-+	struct rq *rq = this_rq();
-+	unsigned long flags;
-+
-+	/*
-+	 * The original target CPU might have gone down and we might
-+	 * be on another CPU but it doesn't matter.
-+	 */
-+	local_irq_save(flags);
-+	/*
-+	 * We need to explicitly wake pending tasks before running
-+	 * __migrate_task() such that we will not miss enforcing cpus_ptr
-+	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
-+	 */
-+	flush_smp_call_function_queue();
-+
-+	raw_spin_lock(&p->pi_lock);
-+	raw_spin_lock(&rq->lock);
-+	/*
-+	 * If task_rq(p) != rq, it cannot be migrated here, because we're
-+	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
-+	 * we're holding p->pi_lock.
-+	 */
-+	if (task_rq(p) == rq && task_on_rq_queued(p)) {
-+		update_rq_clock(rq);
-+		rq = __migrate_task(rq, p, arg->dest_cpu);
-+	}
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	return 0;
-+}
-+
-+static inline void
-+set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
-+{
-+	cpumask_copy(&p->cpus_mask, ctx->new_mask);
-+	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
-+
-+	/*
-+	 * Swap in a new user_cpus_ptr if SCA_USER flag set
-+	 */
-+	if (ctx->flags & SCA_USER)
-+		swap(p->user_cpus_ptr, ctx->user_mask);
-+}
-+
-+static void
-+__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
-+{
-+	lockdep_assert_held(&p->pi_lock);
-+	set_cpus_allowed_common(p, ctx);
-+}
-+
-+/*
-+ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
-+ * affinity (if any) should be destroyed too.
-+ */
-+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	struct affinity_context ac = {
-+		.new_mask  = new_mask,
-+		.user_mask = NULL,
-+		.flags     = SCA_USER,	/* clear the user requested mask */
-+	};
-+	union cpumask_rcuhead {
-+		cpumask_t cpumask;
-+		struct rcu_head rcu;
-+	};
-+
-+	__do_set_cpus_allowed(p, &ac);
-+
-+	if (is_migration_disabled(p) && !cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
-+		__migrate_force_enable(p, task_rq(p));
-+
-+	/*
-+	 * Because this is called with p->pi_lock held, it is not possible
-+	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
-+	 * kfree_rcu().
-+	 */
-+	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
-+}
-+
-+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
-+		      int node)
-+{
-+	cpumask_t *user_mask;
-+	unsigned long flags;
-+
-+	/*
-+	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
-+	 * may differ by now due to racing.
-+	 */
-+	dst->user_cpus_ptr = NULL;
-+
-+	/*
-+	 * This check is racy and losing the race is a valid situation.
-+	 * It is not worth the extra overhead of taking the pi_lock on
-+	 * every fork/clone.
-+	 */
-+	if (data_race(!src->user_cpus_ptr))
-+		return 0;
-+
-+	user_mask = alloc_user_cpus_ptr(node);
-+	if (!user_mask)
-+		return -ENOMEM;
-+
-+	/*
-+	 * Use pi_lock to protect content of user_cpus_ptr
-+	 *
-+	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
-+	 * do_set_cpus_allowed().
-+	 */
-+	raw_spin_lock_irqsave(&src->pi_lock, flags);
-+	if (src->user_cpus_ptr) {
-+		swap(dst->user_cpus_ptr, user_mask);
-+		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
-+	}
-+	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
-+
-+	if (unlikely(user_mask))
-+		kfree(user_mask);
-+
-+	return 0;
-+}
-+
-+static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
-+{
-+	struct cpumask *user_mask = NULL;
-+
-+	swap(p->user_cpus_ptr, user_mask);
-+
-+	return user_mask;
-+}
-+
-+void release_user_cpus_ptr(struct task_struct *p)
-+{
-+	kfree(clear_user_cpus_ptr(p));
-+}
-+
-+#endif
-+
-+/**
-+ * task_curr - is this task currently executing on a CPU?
-+ * @p: the task in question.
-+ *
-+ * Return: 1 if the task is currently executing. 0 otherwise.
-+ */
-+inline int task_curr(const struct task_struct *p)
-+{
-+	return cpu_curr(task_cpu(p)) == p;
-+}
-+
-+#ifdef CONFIG_SMP
-+/***
-+ * kick_process - kick a running thread to enter/exit the kernel
-+ * @p: the to-be-kicked thread
-+ *
-+ * Cause a process which is running on another CPU to enter
-+ * kernel-mode, without any delay. (to get signals handled.)
-+ *
-+ * NOTE: this function doesn't have to take the runqueue lock,
-+ * because all it wants to ensure is that the remote task enters
-+ * the kernel. If the IPI races and the task has been migrated
-+ * to another CPU then no harm is done and the purpose has been
-+ * achieved as well.
-+ */
-+void kick_process(struct task_struct *p)
-+{
-+	guard(preempt)();
-+	int cpu = task_cpu(p);
-+
-+	if ((cpu != smp_processor_id()) && task_curr(p))
-+		smp_send_reschedule(cpu);
-+}
-+EXPORT_SYMBOL_GPL(kick_process);
-+
-+/*
-+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
-+ *
-+ * A few notes on cpu_active vs cpu_online:
-+ *
-+ *  - cpu_active must be a subset of cpu_online
-+ *
-+ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
-+ *    see __set_cpus_allowed_ptr(). At this point the newly online
-+ *    CPU isn't yet part of the sched domains, and balancing will not
-+ *    see it.
-+ *
-+ *  - on cpu-down we clear cpu_active() to mask the sched domains and
-+ *    avoid the load balancer to place new tasks on the to be removed
-+ *    CPU. Existing tasks will remain running there and will be taken
-+ *    off.
-+ *
-+ * This means that fallback selection must not select !active CPUs.
-+ * And can assume that any active CPU must be online. Conversely
-+ * select_task_rq() below may allow selection of !active CPUs in order
-+ * to satisfy the above rules.
-+ */
-+static int select_fallback_rq(int cpu, struct task_struct *p)
-+{
-+	int nid = cpu_to_node(cpu);
-+	const struct cpumask *nodemask = NULL;
-+	enum { cpuset, possible, fail } state = cpuset;
-+	int dest_cpu;
-+
-+	/*
-+	 * If the node that the CPU is on has been offlined, cpu_to_node()
-+	 * will return -1. There is no CPU on the node, and we should
-+	 * select the CPU on the other node.
-+	 */
-+	if (nid != -1) {
-+		nodemask = cpumask_of_node(nid);
-+
-+		/* Look for allowed, online CPU in same node. */
-+		for_each_cpu(dest_cpu, nodemask) {
-+			if (is_cpu_allowed(p, dest_cpu))
-+				return dest_cpu;
-+		}
-+	}
-+
-+	for (;;) {
-+		/* Any allowed, online CPU? */
-+		for_each_cpu(dest_cpu, p->cpus_ptr) {
-+			if (!is_cpu_allowed(p, dest_cpu))
-+				continue;
-+			goto out;
-+		}
-+
-+		/* No more Mr. Nice Guy. */
-+		switch (state) {
-+		case cpuset:
-+			if (cpuset_cpus_allowed_fallback(p)) {
-+				state = possible;
-+				break;
-+			}
-+			fallthrough;
-+		case possible:
-+			/*
-+			 * XXX When called from select_task_rq() we only
-+			 * hold p->pi_lock and again violate locking order.
-+			 *
-+			 * More yuck to audit.
-+			 */
-+			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
-+			state = fail;
-+			break;
-+
-+		case fail:
-+			BUG();
-+			break;
-+		}
-+	}
-+
-+out:
-+	if (state != cpuset) {
-+		/*
-+		 * Don't tell them about moving exiting tasks or
-+		 * kernel threads (both mm NULL), since they never
-+		 * leave kernel.
-+		 */
-+		if (p->mm && printk_ratelimit()) {
-+			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
-+					task_pid_nr(p), p->comm, cpu);
-+		}
-+	}
-+
-+	return dest_cpu;
-+}
-+
-+static inline void
-+sched_preempt_mask_flush(cpumask_t *mask, int prio, int ref)
-+{
-+	int cpu;
-+
-+	cpumask_copy(mask, sched_preempt_mask + ref);
-+	if (prio < ref) {
-+		for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits) {
-+			if (prio < cpu_rq(cpu)->prio)
-+				cpumask_set_cpu(cpu, mask);
-+		}
-+	} else {
-+		for_each_cpu_andnot(cpu, mask, sched_idle_mask) {
-+			if (prio >= cpu_rq(cpu)->prio)
-+				cpumask_clear_cpu(cpu, mask);
-+		}
-+	}
-+}
-+
-+static inline int
-+preempt_mask_check(cpumask_t *preempt_mask, cpumask_t *allow_mask, int prio)
-+{
-+	cpumask_t *mask = sched_preempt_mask + prio;
-+	int pr = atomic_read(&sched_prio_record);
-+
-+	if (pr != prio && SCHED_QUEUE_BITS - 1 != prio) {
-+		sched_preempt_mask_flush(mask, prio, pr);
-+		atomic_set(&sched_prio_record, prio);
-+	}
-+
-+	return cpumask_and(preempt_mask, allow_mask, mask);
-+}
-+
-+__read_mostly idle_select_func_t idle_select_func ____cacheline_aligned_in_smp = cpumask_and;
-+
-+static inline int select_task_rq(struct task_struct *p)
-+{
-+	cpumask_t allow_mask, mask;
-+
-+	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
-+		return select_fallback_rq(task_cpu(p), p);
-+
-+	if (idle_select_func(&mask, &allow_mask, sched_idle_mask)	||
-+	    preempt_mask_check(&mask, &allow_mask, task_sched_prio(p)))
-+		return best_mask_cpu(task_cpu(p), &mask);
-+
-+	return best_mask_cpu(task_cpu(p), &allow_mask);
-+}
-+
-+void sched_set_stop_task(int cpu, struct task_struct *stop)
-+{
-+	static struct lock_class_key stop_pi_lock;
-+	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
-+	struct sched_param start_param = { .sched_priority = 0 };
-+	struct task_struct *old_stop = cpu_rq(cpu)->stop;
-+
-+	if (stop) {
-+		/*
-+		 * Make it appear like a SCHED_FIFO task, its something
-+		 * userspace knows about and won't get confused about.
-+		 *
-+		 * Also, it will make PI more or less work without too
-+		 * much confusion -- but then, stop work should not
-+		 * rely on PI working anyway.
-+		 */
-+		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
-+
-+		/*
-+		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
-+		 * adjust the effective priority of a task. As a result,
-+		 * rt_mutex_setprio() can trigger (RT) balancing operations,
-+		 * which can then trigger wakeups of the stop thread to push
-+		 * around the current task.
-+		 *
-+		 * The stop task itself will never be part of the PI-chain, it
-+		 * never blocks, therefore that ->pi_lock recursion is safe.
-+		 * Tell lockdep about this by placing the stop->pi_lock in its
-+		 * own class.
-+		 */
-+		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
-+	}
-+
-+	cpu_rq(cpu)->stop = stop;
-+
-+	if (old_stop) {
-+		/*
-+		 * Reset it back to a normal scheduling policy so that
-+		 * it can die in pieces.
-+		 */
-+		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
-+	}
-+}
-+
-+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
-+			    raw_spinlock_t *lock, unsigned long irq_flags)
-+	__releases(rq->lock)
-+	__releases(p->pi_lock)
-+{
-+	/* Can the task run on the task's current CPU? If so, we're done */
-+	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
-+		if (is_migration_disabled(p))
-+			__migrate_force_enable(p, rq);
-+
-+		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
-+			struct migration_arg arg = { p, dest_cpu };
-+
-+			/* Need help from migration thread: drop lock and wait. */
-+			__task_access_unlock(p, lock);
-+			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-+			return 0;
-+		}
-+		if (task_on_rq_queued(p)) {
-+			/*
-+			 * OK, since we're going to drop the lock immediately
-+			 * afterwards anyway.
-+			 */
-+			update_rq_clock(rq);
-+			rq = move_queued_task(rq, p, dest_cpu);
-+			lock = &rq->lock;
-+		}
-+	}
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+	return 0;
-+}
-+
-+static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
-+					 struct affinity_context *ctx,
-+					 struct rq *rq,
-+					 raw_spinlock_t *lock,
-+					 unsigned long irq_flags)
-+{
-+	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
-+	const struct cpumask *cpu_valid_mask = cpu_active_mask;
-+	bool kthread = p->flags & PF_KTHREAD;
-+	int dest_cpu;
-+	int ret = 0;
-+
-+	if (kthread || is_migration_disabled(p)) {
-+		/*
-+		 * Kernel threads are allowed on online && !active CPUs,
-+		 * however, during cpu-hot-unplug, even these might get pushed
-+		 * away if not KTHREAD_IS_PER_CPU.
-+		 *
-+		 * Specifically, migration_disabled() tasks must not fail the
-+		 * cpumask_any_and_distribute() pick below, esp. so on
-+		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
-+		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
-+		 */
-+		cpu_valid_mask = cpu_online_mask;
-+	}
-+
-+	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	/*
-+	 * Must re-check here, to close a race against __kthread_bind(),
-+	 * sched_setaffinity() is not guaranteed to observe the flag.
-+	 */
-+	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
-+		goto out;
-+
-+	dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask);
-+	if (dest_cpu >= nr_cpu_ids) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	__do_set_cpus_allowed(p, ctx);
-+
-+	return affine_move_task(rq, p, dest_cpu, lock, irq_flags);
-+
-+out:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+
-+	return ret;
-+}
-+
-+/*
-+ * Change a given task's CPU affinity. Migrate the thread to a
-+ * is removed from the allowed bitmask.
-+ *
-+ * NOTE: the caller must have a valid reference to the task, the
-+ * task must not exit() & deallocate itself prematurely. The
-+ * call is not atomic; no spinlocks may be held.
-+ */
-+int __set_cpus_allowed_ptr(struct task_struct *p,
-+			   struct affinity_context *ctx)
-+{
-+	unsigned long irq_flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
-+	rq = __task_access_lock(p, &lock);
-+	/*
-+	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
-+	 * flags are set.
-+	 */
-+	if (p->user_cpus_ptr &&
-+	    !(ctx->flags & SCA_USER) &&
-+	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
-+		ctx->new_mask = rq->scratch_mask;
-+
-+
-+	return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
-+}
-+
-+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	struct affinity_context ac = {
-+		.new_mask  = new_mask,
-+		.flags     = 0,
-+	};
-+
-+	return __set_cpus_allowed_ptr(p, &ac);
-+}
-+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
-+
-+/*
-+ * Change a given task's CPU affinity to the intersection of its current
-+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
-+ * If user_cpus_ptr is defined, use it as the basis for restricting CPU
-+ * affinity or use cpu_online_mask instead.
-+ *
-+ * If the resulting mask is empty, leave the affinity unchanged and return
-+ * -EINVAL.
-+ */
-+static int restrict_cpus_allowed_ptr(struct task_struct *p,
-+				     struct cpumask *new_mask,
-+				     const struct cpumask *subset_mask)
-+{
-+	struct affinity_context ac = {
-+		.new_mask  = new_mask,
-+		.flags     = 0,
-+	};
-+	unsigned long irq_flags;
-+	raw_spinlock_t *lock;
-+	struct rq *rq;
-+	int err;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
-+		err = -EINVAL;
-+		goto err_unlock;
-+	}
-+
-+	return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags);
-+
-+err_unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+	return err;
-+}
-+
-+/*
-+ * Restrict the CPU affinity of task @p so that it is a subset of
-+ * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
-+ * old affinity mask. If the resulting mask is empty, we warn and walk
-+ * up the cpuset hierarchy until we find a suitable mask.
-+ */
-+void force_compatible_cpus_allowed_ptr(struct task_struct *p)
-+{
-+	cpumask_var_t new_mask;
-+	const struct cpumask *override_mask = task_cpu_possible_mask(p);
-+
-+	alloc_cpumask_var(&new_mask, GFP_KERNEL);
-+
-+	/*
-+	 * __migrate_task() can fail silently in the face of concurrent
-+	 * offlining of the chosen destination CPU, so take the hotplug
-+	 * lock to ensure that the migration succeeds.
-+	 */
-+	cpus_read_lock();
-+	if (!cpumask_available(new_mask))
-+		goto out_set_mask;
-+
-+	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
-+		goto out_free_mask;
-+
-+	/*
-+	 * We failed to find a valid subset of the affinity mask for the
-+	 * task, so override it based on its cpuset hierarchy.
-+	 */
-+	cpuset_cpus_allowed(p, new_mask);
-+	override_mask = new_mask;
-+
-+out_set_mask:
-+	if (printk_ratelimit()) {
-+		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
-+				task_pid_nr(p), p->comm,
-+				cpumask_pr_args(override_mask));
-+	}
-+
-+	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
-+out_free_mask:
-+	cpus_read_unlock();
-+	free_cpumask_var(new_mask);
-+}
-+
-+/*
-+ * Restore the affinity of a task @p which was previously restricted by a
-+ * call to force_compatible_cpus_allowed_ptr().
-+ *
-+ * It is the caller's responsibility to serialise this with any calls to
-+ * force_compatible_cpus_allowed_ptr(@p).
-+ */
-+void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
-+{
-+	struct affinity_context ac = {
-+		.new_mask  = task_user_cpus(p),
-+		.flags     = 0,
-+	};
-+	int ret;
-+
-+	/*
-+	 * Try to restore the old affinity mask with __sched_setaffinity().
-+	 * Cpuset masking will be done there too.
-+	 */
-+	ret = __sched_setaffinity(p, &ac);
-+	WARN_ON_ONCE(ret);
-+}
-+
-+#else /* CONFIG_SMP */
-+
-+static inline int select_task_rq(struct task_struct *p)
-+{
-+	return 0;
-+}
-+
-+static inline bool rq_has_pinned_tasks(struct rq *rq)
-+{
-+	return false;
-+}
-+
-+#endif /* !CONFIG_SMP */
-+
-+static void
-+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq;
-+
-+	if (!schedstat_enabled())
-+		return;
-+
-+	rq = this_rq();
-+
-+#ifdef CONFIG_SMP
-+	if (cpu == rq->cpu) {
-+		__schedstat_inc(rq->ttwu_local);
-+		__schedstat_inc(p->stats.nr_wakeups_local);
-+	} else {
-+		/** Alt schedule FW ToDo:
-+		 * How to do ttwu_wake_remote
-+		 */
-+	}
-+#endif /* CONFIG_SMP */
-+
-+	__schedstat_inc(rq->ttwu_count);
-+	__schedstat_inc(p->stats.nr_wakeups);
-+}
-+
-+/*
-+ * Mark the task runnable.
-+ */
-+static inline void ttwu_do_wakeup(struct task_struct *p)
-+{
-+	WRITE_ONCE(p->__state, TASK_RUNNING);
-+	trace_sched_wakeup(p);
-+}
-+
-+static inline void
-+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+	if (p->sched_contributes_to_load)
-+		rq->nr_uninterruptible--;
-+
-+	if (
-+#ifdef CONFIG_SMP
-+	    !(wake_flags & WF_MIGRATED) &&
-+#endif
-+	    p->in_iowait) {
-+		delayacct_blkio_end(p);
-+		atomic_dec(&task_rq(p)->nr_iowait);
-+	}
-+
-+	activate_task(p, rq);
-+	wakeup_preempt(rq);
-+
-+	ttwu_do_wakeup(p);
-+}
-+
-+/*
-+ * Consider @p being inside a wait loop:
-+ *
-+ *   for (;;) {
-+ *      set_current_state(TASK_UNINTERRUPTIBLE);
-+ *
-+ *      if (CONDITION)
-+ *         break;
-+ *
-+ *      schedule();
-+ *   }
-+ *   __set_current_state(TASK_RUNNING);
-+ *
-+ * between set_current_state() and schedule(). In this case @p is still
-+ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
-+ * an atomic manner.
-+ *
-+ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
-+ * then schedule() must still happen and p->state can be changed to
-+ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
-+ * need to do a full wakeup with enqueue.
-+ *
-+ * Returns: %true when the wakeup is done,
-+ *          %false otherwise.
-+ */
-+static int ttwu_runnable(struct task_struct *p, int wake_flags)
-+{
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+	int ret = 0;
-+
-+	rq = __task_access_lock(p, &lock);
-+	if (task_on_rq_queued(p)) {
-+		if (!task_on_cpu(p)) {
-+			/*
-+			 * When on_rq && !on_cpu the task is preempted, see if
-+			 * it should preempt the task that is current now.
-+			 */
-+			update_rq_clock(rq);
-+			wakeup_preempt(rq);
-+		}
-+		ttwu_do_wakeup(p);
-+		ret = 1;
-+	}
-+	__task_access_unlock(p, lock);
-+
-+	return ret;
-+}
-+
-+#ifdef CONFIG_SMP
-+void sched_ttwu_pending(void *arg)
-+{
-+	struct llist_node *llist = arg;
-+	struct rq *rq = this_rq();
-+	struct task_struct *p, *t;
-+	struct rq_flags rf;
-+
-+	if (!llist)
-+		return;
-+
-+	rq_lock_irqsave(rq, &rf);
-+	update_rq_clock(rq);
-+
-+	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
-+		if (WARN_ON_ONCE(p->on_cpu))
-+			smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
-+			set_task_cpu(p, cpu_of(rq));
-+
-+		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
-+	}
-+
-+	/*
-+	 * Must be after enqueueing at least once task such that
-+	 * idle_cpu() does not observe a false-negative -- if it does,
-+	 * it is possible for select_idle_siblings() to stack a number
-+	 * of tasks on this CPU during that window.
-+	 *
-+	 * It is OK to clear ttwu_pending when another task pending.
-+	 * We will receive IPI after local IRQ enabled and then enqueue it.
-+	 * Since now nr_running > 0, idle_cpu() will always get correct result.
-+	 */
-+	WRITE_ONCE(rq->ttwu_pending, 0);
-+	rq_unlock_irqrestore(rq, &rf);
-+}
-+
-+/*
-+ * Prepare the scene for sending an IPI for a remote smp_call
-+ *
-+ * Returns true if the caller can proceed with sending the IPI.
-+ * Returns false otherwise.
-+ */
-+bool call_function_single_prep_ipi(int cpu)
-+{
-+	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
-+		trace_sched_wake_idle_without_ipi(cpu);
-+		return false;
-+	}
-+
-+	return true;
-+}
-+
-+/*
-+ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
-+ * necessary. The wakee CPU on receipt of the IPI will queue the task
-+ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
-+ * of the wakeup instead of the waker.
-+ */
-+static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
-+
-+	WRITE_ONCE(rq->ttwu_pending, 1);
-+	__smp_call_single_queue(cpu, &p->wake_entry.llist);
-+}
-+
-+static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
-+{
-+	/*
-+	 * Do not complicate things with the async wake_list while the CPU is
-+	 * in hotplug state.
-+	 */
-+	if (!cpu_active(cpu))
-+		return false;
-+
-+	/* Ensure the task will still be allowed to run on the CPU. */
-+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-+		return false;
-+
-+	/*
-+	 * If the CPU does not share cache, then queue the task on the
-+	 * remote rqs wakelist to avoid accessing remote data.
-+	 */
-+	if (!cpus_share_cache(smp_processor_id(), cpu))
-+		return true;
-+
-+	if (cpu == smp_processor_id())
-+		return false;
-+
-+	/*
-+	 * If the wakee cpu is idle, or the task is descheduling and the
-+	 * only running task on the CPU, then use the wakelist to offload
-+	 * the task activation to the idle (or soon-to-be-idle) CPU as
-+	 * the current CPU is likely busy. nr_running is checked to
-+	 * avoid unnecessary task stacking.
-+	 *
-+	 * Note that we can only get here with (wakee) p->on_rq=0,
-+	 * p->on_cpu can be whatever, we've done the dequeue, so
-+	 * the wakee has been accounted out of ->nr_running.
-+	 */
-+	if (!cpu_rq(cpu)->nr_running)
-+		return true;
-+
-+	return false;
-+}
-+
-+static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
-+		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
-+		__ttwu_queue_wakelist(p, cpu, wake_flags);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void wake_up_if_idle(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	guard(rcu)();
-+	if (is_idle_task(rcu_dereference(rq->curr))) {
-+		guard(raw_spinlock_irqsave)(&rq->lock);
-+		if (is_idle_task(rq->curr))
-+			resched_curr(rq);
-+	}
-+}
-+
-+extern struct static_key_false sched_asym_cpucapacity;
-+
-+static __always_inline bool sched_asym_cpucap_active(void)
-+{
-+	return static_branch_unlikely(&sched_asym_cpucapacity);
-+}
-+
-+bool cpus_equal_capacity(int this_cpu, int that_cpu)
-+{
-+	if (!sched_asym_cpucap_active())
-+		return true;
-+
-+	if (this_cpu == that_cpu)
-+		return true;
-+
-+	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
-+}
-+
-+bool cpus_share_cache(int this_cpu, int that_cpu)
-+{
-+	if (this_cpu == that_cpu)
-+		return true;
-+
-+	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
-+}
-+#else /* !CONFIG_SMP */
-+
-+static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	return false;
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (ttwu_queue_wakelist(p, cpu, wake_flags))
-+		return;
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+	ttwu_do_activate(rq, p, wake_flags);
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * Invoked from try_to_wake_up() to check whether the task can be woken up.
-+ *
-+ * The caller holds p::pi_lock if p != current or has preemption
-+ * disabled when p == current.
-+ *
-+ * The rules of saved_state:
-+ *
-+ *   The related locking code always holds p::pi_lock when updating
-+ *   p::saved_state, which means the code is fully serialized in both cases.
-+ *
-+ *  For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
-+ *  No other bits set. This allows to distinguish all wakeup scenarios.
-+ *
-+ *  For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
-+ *  allows us to prevent early wakeup of tasks before they can be run on
-+ *  asymmetric ISA architectures (eg ARMv9).
-+ */
-+static __always_inline
-+bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
-+{
-+	int match;
-+
-+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
-+		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
-+			     state != TASK_RTLOCK_WAIT);
-+	}
-+
-+	*success = !!(match = __task_state_match(p, state));
-+
-+	/*
-+	 * Saved state preserves the task state across blocking on
-+	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
-+	 * set p::saved_state to TASK_RUNNING, but do not wake the task
-+	 * because it waits for a lock wakeup or __thaw_task(). Also
-+	 * indicate success because from the regular waker's point of
-+	 * view this has succeeded.
-+	 *
-+	 * After acquiring the lock the task will restore p::__state
-+	 * from p::saved_state which ensures that the regular
-+	 * wakeup is not lost. The restore will also set
-+	 * p::saved_state to TASK_RUNNING so any further tests will
-+	 * not result in false positives vs. @success
-+	 */
-+	if (match < 0)
-+		p->saved_state = TASK_RUNNING;
-+
-+	return match > 0;
-+}
-+
-+/*
-+ * Notes on Program-Order guarantees on SMP systems.
-+ *
-+ *  MIGRATION
-+ *
-+ * The basic program-order guarantee on SMP systems is that when a task [t]
-+ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
-+ * execution on its new CPU [c1].
-+ *
-+ * For migration (of runnable tasks) this is provided by the following means:
-+ *
-+ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
-+ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
-+ *     rq(c1)->lock (if not at the same time, then in that order).
-+ *  C) LOCK of the rq(c1)->lock scheduling in task
-+ *
-+ * Transitivity guarantees that B happens after A and C after B.
-+ * Note: we only require RCpc transitivity.
-+ * Note: the CPU doing B need not be c0 or c1
-+ *
-+ * Example:
-+ *
-+ *   CPU0            CPU1            CPU2
-+ *
-+ *   LOCK rq(0)->lock
-+ *   sched-out X
-+ *   sched-in Y
-+ *   UNLOCK rq(0)->lock
-+ *
-+ *                                   LOCK rq(0)->lock // orders against CPU0
-+ *                                   dequeue X
-+ *                                   UNLOCK rq(0)->lock
-+ *
-+ *                                   LOCK rq(1)->lock
-+ *                                   enqueue X
-+ *                                   UNLOCK rq(1)->lock
-+ *
-+ *                   LOCK rq(1)->lock // orders against CPU2
-+ *                   sched-out Z
-+ *                   sched-in X
-+ *                   UNLOCK rq(1)->lock
-+ *
-+ *
-+ *  BLOCKING -- aka. SLEEP + WAKEUP
-+ *
-+ * For blocking we (obviously) need to provide the same guarantee as for
-+ * migration. However the means are completely different as there is no lock
-+ * chain to provide order. Instead we do:
-+ *
-+ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
-+ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
-+ *
-+ * Example:
-+ *
-+ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
-+ *
-+ *   LOCK rq(0)->lock LOCK X->pi_lock
-+ *   dequeue X
-+ *   sched-out X
-+ *   smp_store_release(X->on_cpu, 0);
-+ *
-+ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
-+ *                    X->state = WAKING
-+ *                    set_task_cpu(X,2)
-+ *
-+ *                    LOCK rq(2)->lock
-+ *                    enqueue X
-+ *                    X->state = RUNNING
-+ *                    UNLOCK rq(2)->lock
-+ *
-+ *                                          LOCK rq(2)->lock // orders against CPU1
-+ *                                          sched-out Z
-+ *                                          sched-in X
-+ *                                          UNLOCK rq(2)->lock
-+ *
-+ *                    UNLOCK X->pi_lock
-+ *   UNLOCK rq(0)->lock
-+ *
-+ *
-+ * However; for wakeups there is a second guarantee we must provide, namely we
-+ * must observe the state that lead to our wakeup. That is, not only must our
-+ * task observe its own prior state, it must also observe the stores prior to
-+ * its wakeup.
-+ *
-+ * This means that any means of doing remote wakeups must order the CPU doing
-+ * the wakeup against the CPU the task is going to end up running on. This,
-+ * however, is already required for the regular Program-Order guarantee above,
-+ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
-+ *
-+ */
-+
-+/**
-+ * try_to_wake_up - wake up a thread
-+ * @p: the thread to be awakened
-+ * @state: the mask of task states that can be woken
-+ * @wake_flags: wake modifier flags (WF_*)
-+ *
-+ * Conceptually does:
-+ *
-+ *   If (@state & @p->state) @p->state = TASK_RUNNING.
-+ *
-+ * If the task was not queued/runnable, also place it back on a runqueue.
-+ *
-+ * This function is atomic against schedule() which would dequeue the task.
-+ *
-+ * It issues a full memory barrier before accessing @p->state, see the comment
-+ * with set_current_state().
-+ *
-+ * Uses p->pi_lock to serialize against concurrent wake-ups.
-+ *
-+ * Relies on p->pi_lock stabilizing:
-+ *  - p->sched_class
-+ *  - p->cpus_ptr
-+ *  - p->sched_task_group
-+ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
-+ *
-+ * Tries really hard to only take one task_rq(p)->lock for performance.
-+ * Takes rq->lock in:
-+ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
-+ *  - ttwu_queue()       -- new rq, for enqueue of the task;
-+ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
-+ *
-+ * As a consequence we race really badly with just about everything. See the
-+ * many memory barriers and their comments for details.
-+ *
-+ * Return: %true if @p->state changes (an actual wakeup was done),
-+ *	   %false otherwise.
-+ */
-+int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
-+{
-+	guard(preempt)();
-+	int cpu, success = 0;
-+
-+	if (p == current) {
-+		/*
-+		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
-+		 * == smp_processor_id()'. Together this means we can special
-+		 * case the whole 'p->on_rq && ttwu_runnable()' case below
-+		 * without taking any locks.
-+		 *
-+		 * In particular:
-+		 *  - we rely on Program-Order guarantees for all the ordering,
-+		 *  - we're serialized against set_special_state() by virtue of
-+		 *    it disabling IRQs (this allows not taking ->pi_lock).
-+		 */
-+		if (!ttwu_state_match(p, state, &success))
-+			goto out;
-+
-+		trace_sched_waking(p);
-+		ttwu_do_wakeup(p);
-+		goto out;
-+	}
-+
-+	/*
-+	 * If we are going to wake up a thread waiting for CONDITION we
-+	 * need to ensure that CONDITION=1 done by the caller can not be
-+	 * reordered with p->state check below. This pairs with smp_store_mb()
-+	 * in set_current_state() that the waiting thread does.
-+	 */
-+	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
-+		smp_mb__after_spinlock();
-+		if (!ttwu_state_match(p, state, &success))
-+			break;
-+
-+		trace_sched_waking(p);
-+
-+		/*
-+		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
-+		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
-+		 * in smp_cond_load_acquire() below.
-+		 *
-+		 * sched_ttwu_pending()			try_to_wake_up()
-+		 *   STORE p->on_rq = 1			  LOAD p->state
-+		 *   UNLOCK rq->lock
-+		 *
-+		 * __schedule() (switch to task 'p')
-+		 *   LOCK rq->lock			  smp_rmb();
-+		 *   smp_mb__after_spinlock();
-+		 *   UNLOCK rq->lock
-+		 *
-+		 * [task p]
-+		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
-+		 *
-+		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
-+		 * __schedule().  See the comment for smp_mb__after_spinlock().
-+		 *
-+		 * A similar smp_rmb() lives in __task_needs_rq_lock().
-+		 */
-+		smp_rmb();
-+		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
-+			break;
-+
-+#ifdef CONFIG_SMP
-+		/*
-+		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
-+		 * possible to, falsely, observe p->on_cpu == 0.
-+		 *
-+		 * One must be running (->on_cpu == 1) in order to remove oneself
-+		 * from the runqueue.
-+		 *
-+		 * __schedule() (switch to task 'p')	try_to_wake_up()
-+		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
-+		 *   UNLOCK rq->lock
-+		 *
-+		 * __schedule() (put 'p' to sleep)
-+		 *   LOCK rq->lock			  smp_rmb();
-+		 *   smp_mb__after_spinlock();
-+		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
-+		 *
-+		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
-+		 * __schedule().  See the comment for smp_mb__after_spinlock().
-+		 *
-+		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
-+		 * schedule()'s deactivate_task() has 'happened' and p will no longer
-+		 * care about it's own p->state. See the comment in __schedule().
-+		 */
-+		smp_acquire__after_ctrl_dep();
-+
-+		/*
-+		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
-+		 * == 0), which means we need to do an enqueue, change p->state to
-+		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
-+		 * enqueue, such as ttwu_queue_wakelist().
-+		 */
-+		WRITE_ONCE(p->__state, TASK_WAKING);
-+
-+		/*
-+		 * If the owning (remote) CPU is still in the middle of schedule() with
-+		 * this task as prev, considering queueing p on the remote CPUs wake_list
-+		 * which potentially sends an IPI instead of spinning on p->on_cpu to
-+		 * let the waker make forward progress. This is safe because IRQs are
-+		 * disabled and the IPI will deliver after on_cpu is cleared.
-+		 *
-+		 * Ensure we load task_cpu(p) after p->on_cpu:
-+		 *
-+		 * set_task_cpu(p, cpu);
-+		 *   STORE p->cpu = @cpu
-+		 * __schedule() (switch to task 'p')
-+		 *   LOCK rq->lock
-+		 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
-+		 *   STORE p->on_cpu = 1                LOAD p->cpu
-+		 *
-+		 * to ensure we observe the correct CPU on which the task is currently
-+		 * scheduling.
-+		 */
-+		if (smp_load_acquire(&p->on_cpu) &&
-+		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
-+			break;
-+
-+		/*
-+		 * If the owning (remote) CPU is still in the middle of schedule() with
-+		 * this task as prev, wait until it's done referencing the task.
-+		 *
-+		 * Pairs with the smp_store_release() in finish_task().
-+		 *
-+		 * This ensures that tasks getting woken will be fully ordered against
-+		 * their previous state and preserve Program Order.
-+		 */
-+		smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+		sched_task_ttwu(p);
-+
-+		if ((wake_flags & WF_CURRENT_CPU) &&
-+		    cpumask_test_cpu(smp_processor_id(), p->cpus_ptr))
-+			cpu = smp_processor_id();
-+		else
-+			cpu = select_task_rq(p);
-+
-+		if (cpu != task_cpu(p)) {
-+			if (p->in_iowait) {
-+				delayacct_blkio_end(p);
-+				atomic_dec(&task_rq(p)->nr_iowait);
-+			}
-+
-+			wake_flags |= WF_MIGRATED;
-+			set_task_cpu(p, cpu);
-+		}
-+#else
-+		sched_task_ttwu(p);
-+
-+		cpu = task_cpu(p);
-+#endif /* CONFIG_SMP */
-+
-+		ttwu_queue(p, cpu, wake_flags);
-+	}
-+out:
-+	if (success)
-+		ttwu_stat(p, task_cpu(p), wake_flags);
-+
-+	return success;
-+}
-+
-+static bool __task_needs_rq_lock(struct task_struct *p)
-+{
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/*
-+	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
-+	 * the task is blocked. Make sure to check @state since ttwu() can drop
-+	 * locks at the end, see ttwu_queue_wakelist().
-+	 */
-+	if (state == TASK_RUNNING || state == TASK_WAKING)
-+		return true;
-+
-+	/*
-+	 * Ensure we load p->on_rq after p->__state, otherwise it would be
-+	 * possible to, falsely, observe p->on_rq == 0.
-+	 *
-+	 * See try_to_wake_up() for a longer comment.
-+	 */
-+	smp_rmb();
-+	if (p->on_rq)
-+		return true;
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Ensure the task has finished __schedule() and will not be referenced
-+	 * anymore. Again, see try_to_wake_up() for a longer comment.
-+	 */
-+	smp_rmb();
-+	smp_cond_load_acquire(&p->on_cpu, !VAL);
-+#endif
-+
-+	return false;
-+}
-+
-+/**
-+ * task_call_func - Invoke a function on task in fixed state
-+ * @p: Process for which the function is to be invoked, can be @current.
-+ * @func: Function to invoke.
-+ * @arg: Argument to function.
-+ *
-+ * Fix the task in it's current state by avoiding wakeups and or rq operations
-+ * and call @func(@arg) on it.  This function can use task_is_runnable() and
-+ * task_curr() to work out what the state is, if required.  Given that @func
-+ * can be invoked with a runqueue lock held, it had better be quite
-+ * lightweight.
-+ *
-+ * Returns:
-+ *   Whatever @func returns
-+ */
-+int task_call_func(struct task_struct *p, task_call_f func, void *arg)
-+{
-+	struct rq *rq = NULL;
-+	struct rq_flags rf;
-+	int ret;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
-+
-+	if (__task_needs_rq_lock(p))
-+		rq = __task_rq_lock(p, &rf);
-+
-+	/*
-+	 * At this point the task is pinned; either:
-+	 *  - blocked and we're holding off wakeups      (pi->lock)
-+	 *  - woken, and we're holding off enqueue       (rq->lock)
-+	 *  - queued, and we're holding off schedule     (rq->lock)
-+	 *  - running, and we're holding off de-schedule (rq->lock)
-+	 *
-+	 * The called function (@func) can use: task_curr(), p->on_rq and
-+	 * p->__state to differentiate between these states.
-+	 */
-+	ret = func(p, arg);
-+
-+	if (rq)
-+		__task_rq_unlock(rq, &rf);
-+
-+	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
-+	return ret;
-+}
-+
-+/**
-+ * cpu_curr_snapshot - Return a snapshot of the currently running task
-+ * @cpu: The CPU on which to snapshot the task.
-+ *
-+ * Returns the task_struct pointer of the task "currently" running on
-+ * the specified CPU.  If the same task is running on that CPU throughout,
-+ * the return value will be a pointer to that task's task_struct structure.
-+ * If the CPU did any context switches even vaguely concurrently with the
-+ * execution of this function, the return value will be a pointer to the
-+ * task_struct structure of a randomly chosen task that was running on
-+ * that CPU somewhere around the time that this function was executing.
-+ *
-+ * If the specified CPU was offline, the return value is whatever it
-+ * is, perhaps a pointer to the task_struct structure of that CPU's idle
-+ * task, but there is no guarantee.  Callers wishing a useful return
-+ * value must take some action to ensure that the specified CPU remains
-+ * online throughout.
-+ *
-+ * This function executes full memory barriers before and after fetching
-+ * the pointer, which permits the caller to confine this function's fetch
-+ * with respect to the caller's accesses to other shared variables.
-+ */
-+struct task_struct *cpu_curr_snapshot(int cpu)
-+{
-+	struct task_struct *t;
-+
-+	smp_mb(); /* Pairing determined by caller's synchronization design. */
-+	t = rcu_dereference(cpu_curr(cpu));
-+	smp_mb(); /* Pairing determined by caller's synchronization design. */
-+	return t;
-+}
-+
-+/**
-+ * wake_up_process - Wake up a specific process
-+ * @p: The process to be woken up.
-+ *
-+ * Attempt to wake up the nominated process and move it to the set of runnable
-+ * processes.
-+ *
-+ * Return: 1 if the process was woken up, 0 if it was already running.
-+ *
-+ * This function executes a full memory barrier before accessing the task state.
-+ */
-+int wake_up_process(struct task_struct *p)
-+{
-+	return try_to_wake_up(p, TASK_NORMAL, 0);
-+}
-+EXPORT_SYMBOL(wake_up_process);
-+
-+int wake_up_state(struct task_struct *p, unsigned int state)
-+{
-+	return try_to_wake_up(p, state, 0);
-+}
-+
-+/*
-+ * Perform scheduler related setup for a newly forked process p.
-+ * p is forked by current.
-+ *
-+ * __sched_fork() is basic setup used by init_idle() too:
-+ */
-+static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
-+{
-+	p->on_rq			= 0;
-+	p->on_cpu			= 0;
-+	p->utime			= 0;
-+	p->stime			= 0;
-+	p->sched_time			= 0;
-+
-+#ifdef CONFIG_SCHEDSTATS
-+	/* Even if schedstat is disabled, there should not be garbage */
-+	memset(&p->stats, 0, sizeof(p->stats));
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+	INIT_HLIST_HEAD(&p->preempt_notifiers);
-+#endif
-+
-+#ifdef CONFIG_COMPACTION
-+	p->capture_control = NULL;
-+#endif
-+#ifdef CONFIG_SMP
-+	p->wake_entry.u_flags = CSD_TYPE_TTWU;
-+#endif
-+	init_sched_mm_cid(p);
-+}
-+
-+/*
-+ * fork()/clone()-time setup:
-+ */
-+int sched_fork(unsigned long clone_flags, struct task_struct *p)
-+{
-+	__sched_fork(clone_flags, p);
-+	/*
-+	 * We mark the process as NEW here. This guarantees that
-+	 * nobody will actually run it, and a signal or other external
-+	 * event cannot wake it up and insert it on the runqueue either.
-+	 */
-+	p->__state = TASK_NEW;
-+
-+	/*
-+	 * Make sure we do not leak PI boosting priority to the child.
-+	 */
-+	p->prio = current->normal_prio;
-+
-+	/*
-+	 * Revert to default priority/policy on fork if requested.
-+	 */
-+	if (unlikely(p->sched_reset_on_fork)) {
-+		if (task_has_rt_policy(p)) {
-+			p->policy = SCHED_NORMAL;
-+			p->static_prio = NICE_TO_PRIO(0);
-+			p->rt_priority = 0;
-+		} else if (PRIO_TO_NICE(p->static_prio) < 0)
-+			p->static_prio = NICE_TO_PRIO(0);
-+
-+		p->prio = p->normal_prio = p->static_prio;
-+
-+		/*
-+		 * We don't need the reset flag anymore after the fork. It has
-+		 * fulfilled its duty:
-+		 */
-+		p->sched_reset_on_fork = 0;
-+	}
-+
-+#ifdef CONFIG_SCHED_INFO
-+	if (unlikely(sched_info_on()))
-+		memset(&p->sched_info, 0, sizeof(p->sched_info));
-+#endif
-+	init_task_preempt_count(p);
-+
-+	return 0;
-+}
-+
-+int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+
-+	/*
-+	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
-+	 * required yet, but lockdep gets upset if rules are violated.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	/*
-+	 * Share the timeslice between parent and child, thus the
-+	 * total amount of pending timeslices in the system doesn't change,
-+	 * resulting in more scheduling fairness.
-+	 */
-+	rq = this_rq();
-+	raw_spin_lock(&rq->lock);
-+
-+	rq->curr->time_slice /= 2;
-+	p->time_slice = rq->curr->time_slice;
-+#ifdef CONFIG_SCHED_HRTICK
-+	hrtick_start(rq, rq->curr->time_slice);
-+#endif
-+
-+	if (p->time_slice < RESCHED_NS) {
-+		p->time_slice = sysctl_sched_base_slice;
-+		resched_curr(rq);
-+	}
-+	sched_task_fork(p, rq);
-+	raw_spin_unlock(&rq->lock);
-+
-+	rseq_migrate(p);
-+	/*
-+	 * We're setting the CPU for the first time, we don't migrate,
-+	 * so use __set_task_cpu().
-+	 */
-+	__set_task_cpu(p, smp_processor_id());
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	return 0;
-+}
-+
-+void sched_cancel_fork(struct task_struct *p)
-+{
-+}
-+
-+void sched_post_fork(struct task_struct *p)
-+{
-+}
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
-+
-+static void set_schedstats(bool enabled)
-+{
-+	if (enabled)
-+		static_branch_enable(&sched_schedstats);
-+	else
-+		static_branch_disable(&sched_schedstats);
-+}
-+
-+void force_schedstat_enabled(void)
-+{
-+	if (!schedstat_enabled()) {
-+		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
-+		static_branch_enable(&sched_schedstats);
-+	}
-+}
-+
-+static int __init setup_schedstats(char *str)
-+{
-+	int ret = 0;
-+	if (!str)
-+		goto out;
-+
-+	if (!strcmp(str, "enable")) {
-+		set_schedstats(true);
-+		ret = 1;
-+	} else if (!strcmp(str, "disable")) {
-+		set_schedstats(false);
-+		ret = 1;
-+	}
-+out:
-+	if (!ret)
-+		pr_warn("Unable to parse schedstats=\n");
-+
-+	return ret;
-+}
-+__setup("schedstats=", setup_schedstats);
-+
-+#ifdef CONFIG_PROC_SYSCTL
-+static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
-+		size_t *lenp, loff_t *ppos)
-+{
-+	struct ctl_table t;
-+	int err;
-+	int state = static_branch_likely(&sched_schedstats);
-+
-+	if (write && !capable(CAP_SYS_ADMIN))
-+		return -EPERM;
-+
-+	t = *table;
-+	t.data = &state;
-+	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
-+	if (err < 0)
-+		return err;
-+	if (write)
-+		set_schedstats(state);
-+	return err;
-+}
-+
-+static struct ctl_table sched_core_sysctls[] = {
-+	{
-+		.procname       = "sched_schedstats",
-+		.data           = NULL,
-+		.maxlen         = sizeof(unsigned int),
-+		.mode           = 0644,
-+		.proc_handler   = sysctl_schedstats,
-+		.extra1         = SYSCTL_ZERO,
-+		.extra2         = SYSCTL_ONE,
-+	},
-+};
-+static int __init sched_core_sysctl_init(void)
-+{
-+	register_sysctl_init("kernel", sched_core_sysctls);
-+	return 0;
-+}
-+late_initcall(sched_core_sysctl_init);
-+#endif /* CONFIG_PROC_SYSCTL */
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+/*
-+ * wake_up_new_task - wake up a newly created task for the first time.
-+ *
-+ * This function will do some initial scheduler statistics housekeeping
-+ * that must be done for every newly created context, then puts the task
-+ * on the runqueue and wakes it.
-+ */
-+void wake_up_new_task(struct task_struct *p)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	WRITE_ONCE(p->__state, TASK_RUNNING);
-+	rq = cpu_rq(select_task_rq(p));
-+#ifdef CONFIG_SMP
-+	rseq_migrate(p);
-+	/*
-+	 * Fork balancing, do it here and not earlier because:
-+	 * - cpus_ptr can change in the fork path
-+	 * - any previously selected CPU might disappear through hotplug
-+	 *
-+	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-+	 * as we're not fully set-up yet.
-+	 */
-+	__set_task_cpu(p, cpu_of(rq));
-+#endif
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+
-+	activate_task(p, rq);
-+	trace_sched_wakeup_new(p);
-+	wakeup_preempt(rq);
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+
-+static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
-+
-+void preempt_notifier_inc(void)
-+{
-+	static_branch_inc(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
-+
-+void preempt_notifier_dec(void)
-+{
-+	static_branch_dec(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
-+
-+/**
-+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
-+ * @notifier: notifier struct to register
-+ */
-+void preempt_notifier_register(struct preempt_notifier *notifier)
-+{
-+	if (!static_branch_unlikely(&preempt_notifier_key))
-+		WARN(1, "registering preempt_notifier while notifiers disabled\n");
-+
-+	hlist_add_head(&notifier->link, &current->preempt_notifiers);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_register);
-+
-+/**
-+ * preempt_notifier_unregister - no longer interested in preemption notifications
-+ * @notifier: notifier struct to unregister
-+ *
-+ * This is *not* safe to call from within a preemption notifier.
-+ */
-+void preempt_notifier_unregister(struct preempt_notifier *notifier)
-+{
-+	hlist_del(&notifier->link);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
-+
-+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+	struct preempt_notifier *notifier;
-+
-+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+		notifier->ops->sched_in(notifier, raw_smp_processor_id());
-+}
-+
-+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+	if (static_branch_unlikely(&preempt_notifier_key))
-+		__fire_sched_in_preempt_notifiers(curr);
-+}
-+
-+static void
-+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				   struct task_struct *next)
-+{
-+	struct preempt_notifier *notifier;
-+
-+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+		notifier->ops->sched_out(notifier, next);
-+}
-+
-+static __always_inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				 struct task_struct *next)
-+{
-+	if (static_branch_unlikely(&preempt_notifier_key))
-+		__fire_sched_out_preempt_notifiers(curr, next);
-+}
-+
-+#else /* !CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+}
-+
-+static inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				 struct task_struct *next)
-+{
-+}
-+
-+#endif /* CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void prepare_task(struct task_struct *next)
-+{
-+	/*
-+	 * Claim the task as running, we do this before switching to it
-+	 * such that any running task will have this set.
-+	 *
-+	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
-+	 * its ordering comment.
-+	 */
-+	WRITE_ONCE(next->on_cpu, 1);
-+}
-+
-+static inline void finish_task(struct task_struct *prev)
-+{
-+#ifdef CONFIG_SMP
-+	/*
-+	 * This must be the very last reference to @prev from this CPU. After
-+	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
-+	 * must ensure this doesn't happen until the switch is completely
-+	 * finished.
-+	 *
-+	 * In particular, the load of prev->state in finish_task_switch() must
-+	 * happen before this.
-+	 *
-+	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
-+	 */
-+	smp_store_release(&prev->on_cpu, 0);
-+#else
-+	prev->on_cpu = 0;
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+	void (*func)(struct rq *rq);
-+	struct balance_callback *next;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	while (head) {
-+		func = (void (*)(struct rq *))head->func;
-+		next = head->next;
-+		head->next = NULL;
-+		head = next;
-+
-+		func(rq);
-+	}
-+}
-+
-+static void balance_push(struct rq *rq);
-+
-+/*
-+ * balance_push_callback is a right abuse of the callback interface and plays
-+ * by significantly different rules.
-+ *
-+ * Where the normal balance_callback's purpose is to be ran in the same context
-+ * that queued it (only later, when it's safe to drop rq->lock again),
-+ * balance_push_callback is specifically targeted at __schedule().
-+ *
-+ * This abuse is tolerated because it places all the unlikely/odd cases behind
-+ * a single test, namely: rq->balance_callback == NULL.
-+ */
-+struct balance_callback balance_push_callback = {
-+	.next = NULL,
-+	.func = balance_push,
-+};
-+
-+static inline struct balance_callback *
-+__splice_balance_callbacks(struct rq *rq, bool split)
-+{
-+	struct balance_callback *head = rq->balance_callback;
-+
-+	if (likely(!head))
-+		return NULL;
-+
-+	lockdep_assert_rq_held(rq);
-+	/*
-+	 * Must not take balance_push_callback off the list when
-+	 * splice_balance_callbacks() and balance_callbacks() are not
-+	 * in the same rq->lock section.
-+	 *
-+	 * In that case it would be possible for __schedule() to interleave
-+	 * and observe the list empty.
-+	 */
-+	if (split && head == &balance_push_callback)
-+		head = NULL;
-+	else
-+		rq->balance_callback = NULL;
-+
-+	return head;
-+}
-+
-+struct balance_callback *splice_balance_callbacks(struct rq *rq)
-+{
-+	return __splice_balance_callbacks(rq, true);
-+}
-+
-+static void __balance_callbacks(struct rq *rq)
-+{
-+	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
-+}
-+
-+void balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+	unsigned long flags;
-+
-+	if (unlikely(head)) {
-+		raw_spin_lock_irqsave(&rq->lock, flags);
-+		do_balance_callbacks(rq, head);
-+		raw_spin_unlock_irqrestore(&rq->lock, flags);
-+	}
-+}
-+
-+#else
-+
-+static inline void __balance_callbacks(struct rq *rq)
-+{
-+}
-+#endif
-+
-+static inline void
-+prepare_lock_switch(struct rq *rq, struct task_struct *next)
-+{
-+	/*
-+	 * Since the runqueue lock will be released by the next
-+	 * task (which is an invalid locking op but in the case
-+	 * of the scheduler it's an obvious special-case), so we
-+	 * do an early lockdep release here:
-+	 */
-+	spin_release(&rq->lock.dep_map, _THIS_IP_);
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	/* this is a valid case when another task releases the spinlock */
-+	rq->lock.owner = next;
-+#endif
-+}
-+
-+static inline void finish_lock_switch(struct rq *rq)
-+{
-+	/*
-+	 * If we are tracking spinlock dependencies then we have to
-+	 * fix up the runqueue lock - which gets 'carried over' from
-+	 * prev into current:
-+	 */
-+	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-+	__balance_callbacks(rq);
-+	raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+/*
-+ * NOP if the arch has not defined these:
-+ */
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next)	do { } while (0)
-+#endif
-+
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch()	do { } while (0)
-+#endif
-+
-+static inline void kmap_local_sched_out(void)
-+{
-+#ifdef CONFIG_KMAP_LOCAL
-+	if (unlikely(current->kmap_ctrl.idx))
-+		__kmap_local_sched_out();
-+#endif
-+}
-+
-+static inline void kmap_local_sched_in(void)
-+{
-+#ifdef CONFIG_KMAP_LOCAL
-+	if (unlikely(current->kmap_ctrl.idx))
-+		__kmap_local_sched_in();
-+#endif
-+}
-+
-+/**
-+ * prepare_task_switch - prepare to switch tasks
-+ * @rq: the runqueue preparing to switch
-+ * @next: the task we are going to switch to.
-+ *
-+ * This is called with the rq lock held and interrupts off. It must
-+ * be paired with a subsequent finish_task_switch after the context
-+ * switch.
-+ *
-+ * prepare_task_switch sets up locking and calls architecture specific
-+ * hooks.
-+ */
-+static inline void
-+prepare_task_switch(struct rq *rq, struct task_struct *prev,
-+		    struct task_struct *next)
-+{
-+	kcov_prepare_switch(prev);
-+	sched_info_switch(rq, prev, next);
-+	perf_event_task_sched_out(prev, next);
-+	rseq_preempt(prev);
-+	fire_sched_out_preempt_notifiers(prev, next);
-+	kmap_local_sched_out();
-+	prepare_task(next);
-+	prepare_arch_switch(next);
-+}
-+
-+/**
-+ * finish_task_switch - clean up after a task-switch
-+ * @rq: runqueue associated with task-switch
-+ * @prev: the thread we just switched away from.
-+ *
-+ * finish_task_switch must be called after the context switch, paired
-+ * with a prepare_task_switch call before the context switch.
-+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
-+ * and do any other architecture-specific cleanup actions.
-+ *
-+ * Note that we may have delayed dropping an mm in context_switch(). If
-+ * so, we finish that here outside of the runqueue lock.  (Doing it
-+ * with the lock held can cause deadlocks; see schedule() for
-+ * details.)
-+ *
-+ * The context switch have flipped the stack from under us and restored the
-+ * local variables which were saved when this task called schedule() in the
-+ * past. 'prev == current' is still correct but we need to recalculate this_rq
-+ * because prev may have moved to another CPU.
-+ */
-+static struct rq *finish_task_switch(struct task_struct *prev)
-+	__releases(rq->lock)
-+{
-+	struct rq *rq = this_rq();
-+	struct mm_struct *mm = rq->prev_mm;
-+	unsigned int prev_state;
-+
-+	/*
-+	 * The previous task will have left us with a preempt_count of 2
-+	 * because it left us after:
-+	 *
-+	 *	schedule()
-+	 *	  preempt_disable();			// 1
-+	 *	  __schedule()
-+	 *	    raw_spin_lock_irq(&rq->lock)	// 2
-+	 *
-+	 * Also, see FORK_PREEMPT_COUNT.
-+	 */
-+	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
-+		      "corrupted preempt_count: %s/%d/0x%x\n",
-+		      current->comm, current->pid, preempt_count()))
-+		preempt_count_set(FORK_PREEMPT_COUNT);
-+
-+	rq->prev_mm = NULL;
-+
-+	/*
-+	 * A task struct has one reference for the use as "current".
-+	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
-+	 * schedule one last time. The schedule call will never return, and
-+	 * the scheduled task must drop that reference.
-+	 *
-+	 * We must observe prev->state before clearing prev->on_cpu (in
-+	 * finish_task), otherwise a concurrent wakeup can get prev
-+	 * running on another CPU and we could rave with its RUNNING -> DEAD
-+	 * transition, resulting in a double drop.
-+	 */
-+	prev_state = READ_ONCE(prev->__state);
-+	vtime_task_switch(prev);
-+	perf_event_task_sched_in(prev, current);
-+	finish_task(prev);
-+	tick_nohz_task_switch();
-+	finish_lock_switch(rq);
-+	finish_arch_post_lock_switch();
-+	kcov_finish_switch(current);
-+	/*
-+	 * kmap_local_sched_out() is invoked with rq::lock held and
-+	 * interrupts disabled. There is no requirement for that, but the
-+	 * sched out code does not have an interrupt enabled section.
-+	 * Restoring the maps on sched in does not require interrupts being
-+	 * disabled either.
-+	 */
-+	kmap_local_sched_in();
-+
-+	fire_sched_in_preempt_notifiers(current);
-+	/*
-+	 * When switching through a kernel thread, the loop in
-+	 * membarrier_{private,global}_expedited() may have observed that
-+	 * kernel thread and not issued an IPI. It is therefore possible to
-+	 * schedule between user->kernel->user threads without passing though
-+	 * switch_mm(). Membarrier requires a barrier after storing to
-+	 * rq->curr, before returning to userspace, so provide them here:
-+	 *
-+	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
-+	 *   provided by mmdrop(),
-+	 * - a sync_core for SYNC_CORE.
-+	 */
-+	if (mm) {
-+		membarrier_mm_sync_core_before_usermode(mm);
-+		mmdrop_sched(mm);
-+	}
-+	if (unlikely(prev_state == TASK_DEAD)) {
-+		/* Task is done with its stack. */
-+		put_task_stack(prev);
-+
-+		put_task_struct_rcu_user(prev);
-+	}
-+
-+	return rq;
-+}
-+
-+/**
-+ * schedule_tail - first thing a freshly forked thread must call.
-+ * @prev: the thread we just switched away from.
-+ */
-+asmlinkage __visible void schedule_tail(struct task_struct *prev)
-+	__releases(rq->lock)
-+{
-+	/*
-+	 * New tasks start with FORK_PREEMPT_COUNT, see there and
-+	 * finish_task_switch() for details.
-+	 *
-+	 * finish_task_switch() will drop rq->lock() and lower preempt_count
-+	 * and the preempt_enable() will end up enabling preemption (on
-+	 * PREEMPT_COUNT kernels).
-+	 */
-+
-+	finish_task_switch(prev);
-+	preempt_enable();
-+
-+	if (current->set_child_tid)
-+		put_user(task_pid_vnr(current), current->set_child_tid);
-+
-+	calculate_sigpending();
-+}
-+
-+/*
-+ * context_switch - switch to the new MM and the new thread's register state.
-+ */
-+static __always_inline struct rq *
-+context_switch(struct rq *rq, struct task_struct *prev,
-+	       struct task_struct *next)
-+{
-+	prepare_task_switch(rq, prev, next);
-+
-+	/*
-+	 * For paravirt, this is coupled with an exit in switch_to to
-+	 * combine the page table reload and the switch backend into
-+	 * one hypercall.
-+	 */
-+	arch_start_context_switch(prev);
-+
-+	/*
-+	 * kernel -> kernel   lazy + transfer active
-+	 *   user -> kernel   lazy + mmgrab() active
-+	 *
-+	 * kernel ->   user   switch + mmdrop() active
-+	 *   user ->   user   switch
-+	 *
-+	 * switch_mm_cid() needs to be updated if the barriers provided
-+	 * by context_switch() are modified.
-+	 */
-+	if (!next->mm) {                                // to kernel
-+		enter_lazy_tlb(prev->active_mm, next);
-+
-+		next->active_mm = prev->active_mm;
-+		if (prev->mm)                           // from user
-+			mmgrab(prev->active_mm);
-+		else
-+			prev->active_mm = NULL;
-+	} else {                                        // to user
-+		membarrier_switch_mm(rq, prev->active_mm, next->mm);
-+		/*
-+		 * sys_membarrier() requires an smp_mb() between setting
-+		 * rq->curr / membarrier_switch_mm() and returning to userspace.
-+		 *
-+		 * The below provides this either through switch_mm(), or in
-+		 * case 'prev->active_mm == next->mm' through
-+		 * finish_task_switch()'s mmdrop().
-+		 */
-+		switch_mm_irqs_off(prev->active_mm, next->mm, next);
-+		lru_gen_use_mm(next->mm);
-+
-+		if (!prev->mm) {                        // from kernel
-+			/* will mmdrop() in finish_task_switch(). */
-+			rq->prev_mm = prev->active_mm;
-+			prev->active_mm = NULL;
-+		}
-+	}
-+
-+	/* switch_mm_cid() requires the memory barriers above. */
-+	switch_mm_cid(rq, prev, next);
-+
-+	prepare_lock_switch(rq, next);
-+
-+	/* Here we just switch the register state and the stack. */
-+	switch_to(prev, next, prev);
-+	barrier();
-+
-+	return finish_task_switch(prev);
-+}
-+
-+/*
-+ * nr_running, nr_uninterruptible and nr_context_switches:
-+ *
-+ * externally visible scheduler statistics: current number of runnable
-+ * threads, total number of context switches performed since bootup.
-+ */
-+unsigned int nr_running(void)
-+{
-+	unsigned int i, sum = 0;
-+
-+	for_each_online_cpu(i)
-+		sum += cpu_rq(i)->nr_running;
-+
-+	return sum;
-+}
-+
-+/*
-+ * Check if only the current task is running on the CPU.
-+ *
-+ * Caution: this function does not check that the caller has disabled
-+ * preemption, thus the result might have a time-of-check-to-time-of-use
-+ * race.  The caller is responsible to use it correctly, for example:
-+ *
-+ * - from a non-preemptible section (of course)
-+ *
-+ * - from a thread that is bound to a single CPU
-+ *
-+ * - in a loop with very short iterations (e.g. a polling loop)
-+ */
-+bool single_task_running(void)
-+{
-+	return raw_rq()->nr_running == 1;
-+}
-+EXPORT_SYMBOL(single_task_running);
-+
-+unsigned long long nr_context_switches_cpu(int cpu)
-+{
-+	return cpu_rq(cpu)->nr_switches;
-+}
-+
-+unsigned long long nr_context_switches(void)
-+{
-+	int i;
-+	unsigned long long sum = 0;
-+
-+	for_each_possible_cpu(i)
-+		sum += cpu_rq(i)->nr_switches;
-+
-+	return sum;
-+}
-+
-+/*
-+ * Consumers of these two interfaces, like for example the cpuidle menu
-+ * governor, are using nonsensical data. Preferring shallow idle state selection
-+ * for a CPU that has IO-wait which might not even end up running the task when
-+ * it does become runnable.
-+ */
-+
-+unsigned int nr_iowait_cpu(int cpu)
-+{
-+	return atomic_read(&cpu_rq(cpu)->nr_iowait);
-+}
-+
-+/*
-+ * IO-wait accounting, and how it's mostly bollocks (on SMP).
-+ *
-+ * The idea behind IO-wait account is to account the idle time that we could
-+ * have spend running if it were not for IO. That is, if we were to improve the
-+ * storage performance, we'd have a proportional reduction in IO-wait time.
-+ *
-+ * This all works nicely on UP, where, when a task blocks on IO, we account
-+ * idle time as IO-wait, because if the storage were faster, it could've been
-+ * running and we'd not be idle.
-+ *
-+ * This has been extended to SMP, by doing the same for each CPU. This however
-+ * is broken.
-+ *
-+ * Imagine for instance the case where two tasks block on one CPU, only the one
-+ * CPU will have IO-wait accounted, while the other has regular idle. Even
-+ * though, if the storage were faster, both could've ran at the same time,
-+ * utilising both CPUs.
-+ *
-+ * This means, that when looking globally, the current IO-wait accounting on
-+ * SMP is a lower bound, by reason of under accounting.
-+ *
-+ * Worse, since the numbers are provided per CPU, they are sometimes
-+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
-+ * associated with any one particular CPU, it can wake to another CPU than it
-+ * blocked on. This means the per CPU IO-wait number is meaningless.
-+ *
-+ * Task CPU affinities can make all that even more 'interesting'.
-+ */
-+
-+unsigned int nr_iowait(void)
-+{
-+	unsigned int i, sum = 0;
-+
-+	for_each_possible_cpu(i)
-+		sum += nr_iowait_cpu(i);
-+
-+	return sum;
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+/*
-+ * sched_exec - execve() is a valuable balancing opportunity, because at
-+ * this point the task has the smallest effective memory and cache
-+ * footprint.
-+ */
-+void sched_exec(void)
-+{
-+}
-+
-+#endif
-+
-+DEFINE_PER_CPU(struct kernel_stat, kstat);
-+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-+
-+EXPORT_PER_CPU_SYMBOL(kstat);
-+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
-+
-+static inline void update_curr(struct rq *rq, struct task_struct *p)
-+{
-+	s64 ns = rq->clock_task - p->last_ran;
-+
-+	p->sched_time += ns;
-+	cgroup_account_cputime(p, ns);
-+	account_group_exec_runtime(p, ns);
-+
-+	p->time_slice -= ns;
-+	p->last_ran = rq->clock_task;
-+}
-+
-+/*
-+ * Return accounted runtime for the task.
-+ * Return separately the current's pending runtime that have not been
-+ * accounted yet.
-+ */
-+unsigned long long task_sched_runtime(struct task_struct *p)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+	u64 ns;
-+
-+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
-+	/*
-+	 * 64-bit doesn't need locks to atomically read a 64-bit value.
-+	 * So we have a optimization chance when the task's delta_exec is 0.
-+	 * Reading ->on_cpu is racy, but this is OK.
-+	 *
-+	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
-+	 * If we race with it entering CPU, unaccounted time is 0. This is
-+	 * indistinguishable from the read occurring a few cycles earlier.
-+	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
-+	 * been accounted, so we're correct here as well.
-+	 */
-+	if (!p->on_cpu || !task_on_rq_queued(p))
-+		return tsk_seruntime(p);
-+#endif
-+
-+	rq = task_access_lock_irqsave(p, &lock, &flags);
-+	/*
-+	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
-+	 * project cycles that may never be accounted to this
-+	 * thread, breaking clock_gettime().
-+	 */
-+	if (p == rq->curr && task_on_rq_queued(p)) {
-+		update_rq_clock(rq);
-+		update_curr(rq, p);
-+	}
-+	ns = tsk_seruntime(p);
-+	task_access_unlock_irqrestore(p, lock, &flags);
-+
-+	return ns;
-+}
-+
-+/* This manages tasks that have run out of timeslice during a scheduler_tick */
-+static inline void scheduler_task_tick(struct rq *rq)
-+{
-+	struct task_struct *p = rq->curr;
-+
-+	if (is_idle_task(p))
-+		return;
-+
-+	update_curr(rq, p);
-+	cpufreq_update_util(rq, 0);
-+
-+	/*
-+	 * Tasks have less than RESCHED_NS of time slice left they will be
-+	 * rescheduled.
-+	 */
-+	if (p->time_slice >= RESCHED_NS)
-+		return;
-+	set_tsk_need_resched(p);
-+	set_preempt_need_resched();
-+}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+static u64 cpu_resched_latency(struct rq *rq)
-+{
-+	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
-+	u64 resched_latency, now = rq_clock(rq);
-+	static bool warned_once;
-+
-+	if (sysctl_resched_latency_warn_once && warned_once)
-+		return 0;
-+
-+	if (!need_resched() || !latency_warn_ms)
-+		return 0;
-+
-+	if (system_state == SYSTEM_BOOTING)
-+		return 0;
-+
-+	if (!rq->last_seen_need_resched_ns) {
-+		rq->last_seen_need_resched_ns = now;
-+		rq->ticks_without_resched = 0;
-+		return 0;
-+	}
-+
-+	rq->ticks_without_resched++;
-+	resched_latency = now - rq->last_seen_need_resched_ns;
-+	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
-+		return 0;
-+
-+	warned_once = true;
-+
-+	return resched_latency;
-+}
-+
-+static int __init setup_resched_latency_warn_ms(char *str)
-+{
-+	long val;
-+
-+	if ((kstrtol(str, 0, &val))) {
-+		pr_warn("Unable to set resched_latency_warn_ms\n");
-+		return 1;
-+	}
-+
-+	sysctl_resched_latency_warn_ms = val;
-+	return 1;
-+}
-+__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
-+#else
-+static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+/*
-+ * This function gets called by the timer code, with HZ frequency.
-+ * We call it with interrupts disabled.
-+ */
-+void sched_tick(void)
-+{
-+	int cpu __maybe_unused = smp_processor_id();
-+	struct rq *rq = cpu_rq(cpu);
-+	struct task_struct *curr = rq->curr;
-+	u64 resched_latency;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		arch_scale_freq_tick();
-+
-+	sched_clock_tick();
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+
-+	scheduler_task_tick(rq);
-+	if (sched_feat(LATENCY_WARN))
-+		resched_latency = cpu_resched_latency(rq);
-+	calc_global_load_tick(rq);
-+
-+	task_tick_mm_cid(rq, rq->curr);
-+
-+	raw_spin_unlock(&rq->lock);
-+
-+	if (sched_feat(LATENCY_WARN) && resched_latency)
-+		resched_latency_warn(cpu, resched_latency);
-+
-+	perf_event_task_tick();
-+
-+	if (curr->flags & PF_WQ_WORKER)
-+		wq_worker_tick(curr);
-+}
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+
-+struct tick_work {
-+	int			cpu;
-+	atomic_t		state;
-+	struct delayed_work	work;
-+};
-+/* Values for ->state, see diagram below. */
-+#define TICK_SCHED_REMOTE_OFFLINE	0
-+#define TICK_SCHED_REMOTE_OFFLINING	1
-+#define TICK_SCHED_REMOTE_RUNNING	2
-+
-+/*
-+ * State diagram for ->state:
-+ *
-+ *
-+ *          TICK_SCHED_REMOTE_OFFLINE
-+ *                    |   ^
-+ *                    |   |
-+ *                    |   | sched_tick_remote()
-+ *                    |   |
-+ *                    |   |
-+ *                    +--TICK_SCHED_REMOTE_OFFLINING
-+ *                    |   ^
-+ *                    |   |
-+ * sched_tick_start() |   | sched_tick_stop()
-+ *                    |   |
-+ *                    V   |
-+ *          TICK_SCHED_REMOTE_RUNNING
-+ *
-+ *
-+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
-+ * and sched_tick_start() are happy to leave the state in RUNNING.
-+ */
-+
-+static struct tick_work __percpu *tick_work_cpu;
-+
-+static void sched_tick_remote(struct work_struct *work)
-+{
-+	struct delayed_work *dwork = to_delayed_work(work);
-+	struct tick_work *twork = container_of(dwork, struct tick_work, work);
-+	int cpu = twork->cpu;
-+	struct rq *rq = cpu_rq(cpu);
-+	int os;
-+
-+	/*
-+	 * Handle the tick only if it appears the remote CPU is running in full
-+	 * dynticks mode. The check is racy by nature, but missing a tick or
-+	 * having one too much is no big deal because the scheduler tick updates
-+	 * statistics and checks timeslices in a time-independent way, regardless
-+	 * of when exactly it is running.
-+	 */
-+	if (tick_nohz_tick_stopped_cpu(cpu)) {
-+		guard(raw_spinlock_irqsave)(&rq->lock);
-+		struct task_struct *curr = rq->curr;
-+
-+		if (cpu_online(cpu)) {
-+			update_rq_clock(rq);
-+
-+			if (!is_idle_task(curr)) {
-+				/*
-+				 * Make sure the next tick runs within a
-+				 * reasonable amount of time.
-+				 */
-+				u64 delta = rq_clock_task(rq) - curr->last_ran;
-+				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
-+			}
-+			scheduler_task_tick(rq);
-+
-+			calc_load_nohz_remote(rq);
-+		}
-+	}
-+
-+	/*
-+	 * Run the remote tick once per second (1Hz). This arbitrary
-+	 * frequency is large enough to avoid overload but short enough
-+	 * to keep scheduler internal stats reasonably up to date.  But
-+	 * first update state to reflect hotplug activity if required.
-+	 */
-+	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
-+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
-+	if (os == TICK_SCHED_REMOTE_RUNNING)
-+		queue_delayed_work(system_unbound_wq, dwork, HZ);
-+}
-+
-+static void sched_tick_start(int cpu)
-+{
-+	int os;
-+	struct tick_work *twork;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		return;
-+
-+	WARN_ON_ONCE(!tick_work_cpu);
-+
-+	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
-+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
-+	if (os == TICK_SCHED_REMOTE_OFFLINE) {
-+		twork->cpu = cpu;
-+		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
-+		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
-+	}
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void sched_tick_stop(int cpu)
-+{
-+	struct tick_work *twork;
-+	int os;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		return;
-+
-+	WARN_ON_ONCE(!tick_work_cpu);
-+
-+	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	/* There cannot be competing actions, but don't rely on stop-machine. */
-+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
-+	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
-+	/* Don't cancel, as this would mess up the state machine. */
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __init sched_tick_offload_init(void)
-+{
-+	tick_work_cpu = alloc_percpu(struct tick_work);
-+	BUG_ON(!tick_work_cpu);
-+	return 0;
-+}
-+
-+#else /* !CONFIG_NO_HZ_FULL */
-+static inline void sched_tick_start(int cpu) { }
-+static inline void sched_tick_stop(int cpu) { }
-+#endif
-+
-+#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
-+				defined(CONFIG_PREEMPT_TRACER))
-+/*
-+ * If the value passed in is equal to the current preempt count
-+ * then we just disabled preemption. Start timing the latency.
-+ */
-+static inline void preempt_latency_start(int val)
-+{
-+	if (preempt_count() == val) {
-+		unsigned long ip = get_lock_parent_ip();
-+#ifdef CONFIG_DEBUG_PREEMPT
-+		current->preempt_disable_ip = ip;
-+#endif
-+		trace_preempt_off(CALLER_ADDR0, ip);
-+	}
-+}
-+
-+void preempt_count_add(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Underflow?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
-+		return;
-+#endif
-+	__preempt_count_add(val);
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Spinlock count overflowing soon?
-+	 */
-+	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
-+				PREEMPT_MASK - 10);
-+#endif
-+	preempt_latency_start(val);
-+}
-+EXPORT_SYMBOL(preempt_count_add);
-+NOKPROBE_SYMBOL(preempt_count_add);
-+
-+/*
-+ * If the value passed in equals to the current preempt count
-+ * then we just enabled preemption. Stop timing the latency.
-+ */
-+static inline void preempt_latency_stop(int val)
-+{
-+	if (preempt_count() == val)
-+		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
-+}
-+
-+void preempt_count_sub(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Underflow?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
-+		return;
-+	/*
-+	 * Is the spinlock portion underflowing?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
-+			!(preempt_count() & PREEMPT_MASK)))
-+		return;
-+#endif
-+
-+	preempt_latency_stop(val);
-+	__preempt_count_sub(val);
-+}
-+EXPORT_SYMBOL(preempt_count_sub);
-+NOKPROBE_SYMBOL(preempt_count_sub);
-+
-+#else
-+static inline void preempt_latency_start(int val) { }
-+static inline void preempt_latency_stop(int val) { }
-+#endif
-+
-+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	return p->preempt_disable_ip;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+/*
-+ * Print scheduling while atomic bug:
-+ */
-+static noinline void __schedule_bug(struct task_struct *prev)
-+{
-+	/* Save this before calling printk(), since that will clobber it */
-+	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+	if (oops_in_progress)
-+		return;
-+
-+	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
-+		prev->comm, prev->pid, preempt_count());
-+
-+	debug_show_held_locks(prev);
-+	print_modules();
-+	if (irqs_disabled())
-+		print_irqtrace_events(prev);
-+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
-+		pr_err("Preemption disabled at:");
-+		print_ip_sym(KERN_ERR, preempt_disable_ip);
-+	}
-+	check_panic_on_warn("scheduling while atomic");
-+
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+
-+/*
-+ * Various schedule()-time debugging checks and statistics:
-+ */
-+static inline void schedule_debug(struct task_struct *prev, bool preempt)
-+{
-+#ifdef CONFIG_SCHED_STACK_END_CHECK
-+	if (task_stack_end_corrupted(prev))
-+		panic("corrupted stack end detected inside scheduler\n");
-+
-+	if (task_scs_end_corrupted(prev))
-+		panic("corrupted shadow stack detected inside scheduler\n");
-+#endif
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
-+		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
-+			prev->comm, prev->pid, prev->non_block_count);
-+		dump_stack();
-+		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+	}
-+#endif
-+
-+	if (unlikely(in_atomic_preempt_off())) {
-+		__schedule_bug(prev);
-+		preempt_count_set(PREEMPT_DISABLED);
-+	}
-+	rcu_sleep_check();
-+	SCHED_WARN_ON(ct_state() == CT_STATE_USER);
-+
-+	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
-+
-+	schedstat_inc(this_rq()->sched_count);
-+}
-+
-+#ifdef ALT_SCHED_DEBUG
-+void alt_sched_debug(void)
-+{
-+	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx,"
-+	       " ecore_idle: 0x%04lx\n",
-+	       sched_rq_pending_mask.bits[0],
-+	       sched_idle_mask->bits[0],
-+	       sched_pcore_idle_mask->bits[0],
-+	       sched_ecore_idle_mask->bits[0]);
-+}
-+#endif
-+
-+#ifdef	CONFIG_SMP
-+
-+#ifdef CONFIG_PREEMPT_RT
-+#define SCHED_NR_MIGRATE_BREAK 8
-+#else
-+#define SCHED_NR_MIGRATE_BREAK 32
-+#endif
-+
-+const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
-+
-+/*
-+ * Migrate pending tasks in @rq to @dest_cpu
-+ */
-+static inline int
-+migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
-+{
-+	struct task_struct *p, *skip = rq->curr;
-+	int nr_migrated = 0;
-+	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
-+
-+	/* WA to check rq->curr is still on rq */
-+	if (!task_on_rq_queued(skip))
-+		return 0;
-+
-+	while (skip != rq->idle && nr_tries &&
-+	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
-+		skip = sched_rq_next_task(p, rq);
-+		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
-+			__SCHED_DEQUEUE_TASK(p, rq, 0, );
-+			set_task_cpu(p, dest_cpu);
-+			sched_task_sanity_check(p, dest_rq);
-+			sched_mm_cid_migrate_to(dest_rq, p);
-+			__SCHED_ENQUEUE_TASK(p, dest_rq, 0, );
-+			nr_migrated++;
-+		}
-+		nr_tries--;
-+	}
-+
-+	return nr_migrated;
-+}
-+
-+static inline int take_other_rq_tasks(struct rq *rq, int cpu)
-+{
-+	cpumask_t *topo_mask, *end_mask, chk;
-+
-+	if (unlikely(!rq->online))
-+		return 0;
-+
-+	if (cpumask_empty(&sched_rq_pending_mask))
-+		return 0;
-+
-+	topo_mask = per_cpu(sched_cpu_topo_masks, cpu);
-+	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
-+	do {
-+		int i;
-+
-+		if (!cpumask_and(&chk, &sched_rq_pending_mask, topo_mask))
-+			continue;
-+
-+		for_each_cpu_wrap(i, &chk, cpu) {
-+			int nr_migrated;
-+			struct rq *src_rq;
-+
-+			src_rq = cpu_rq(i);
-+			if (!do_raw_spin_trylock(&src_rq->lock))
-+				continue;
-+			spin_acquire(&src_rq->lock.dep_map,
-+				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
-+
-+			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
-+				src_rq->nr_running -= nr_migrated;
-+				if (src_rq->nr_running < 2)
-+					cpumask_clear_cpu(i, &sched_rq_pending_mask);
-+
-+				spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+				do_raw_spin_unlock(&src_rq->lock);
-+
-+				rq->nr_running += nr_migrated;
-+				if (rq->nr_running > 1)
-+					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
-+
-+				update_sched_preempt_mask(rq);
-+				cpufreq_update_util(rq, 0);
-+
-+				return 1;
-+			}
-+
-+			spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+			do_raw_spin_unlock(&src_rq->lock);
-+		}
-+	} while (++topo_mask < end_mask);
-+
-+	return 0;
-+}
-+#endif
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+	p->time_slice = sysctl_sched_base_slice;
-+
-+	sched_task_renew(p, rq);
-+
-+	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
-+		requeue_task(p, rq);
-+}
-+
-+/*
-+ * Timeslices below RESCHED_NS are considered as good as expired as there's no
-+ * point rescheduling when there's so little time left.
-+ */
-+static inline void check_curr(struct task_struct *p, struct rq *rq)
-+{
-+	if (unlikely(rq->idle == p))
-+		return;
-+
-+	update_curr(rq, p);
-+
-+	if (p->time_slice < RESCHED_NS)
-+		time_slice_expired(p, rq);
-+}
-+
-+static inline struct task_struct *
-+choose_next_task(struct rq *rq, int cpu)
-+{
-+	struct task_struct *next = sched_rq_first_task(rq);
-+
-+	if (next == rq->idle) {
-+#ifdef	CONFIG_SMP
-+		if (!take_other_rq_tasks(rq, cpu)) {
-+			if (likely(rq->balance_func && rq->online))
-+				rq->balance_func(rq, cpu);
-+#endif /* CONFIG_SMP */
-+
-+			schedstat_inc(rq->sched_goidle);
-+			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
-+			return next;
-+#ifdef	CONFIG_SMP
-+		}
-+		next = sched_rq_first_task(rq);
-+#endif
-+	}
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+	hrtick_start(rq, next->time_slice);
-+#endif
-+	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, next);*/
-+	return next;
-+}
-+
-+/*
-+ * Constants for the sched_mode argument of __schedule().
-+ *
-+ * The mode argument allows RT enabled kernels to differentiate a
-+ * preemption from blocking on an 'sleeping' spin/rwlock.
-+ */
-+ #define SM_IDLE		(-1)
-+ #define SM_NONE		0
-+ #define SM_PREEMPT		1
-+ #define SM_RTLOCK_WAIT		2
-+
-+/*
-+ * schedule() is the main scheduler function.
-+ *
-+ * The main means of driving the scheduler and thus entering this function are:
-+ *
-+ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
-+ *
-+ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
-+ *      paths. For example, see arch/x86/entry_64.S.
-+ *
-+ *      To drive preemption between tasks, the scheduler sets the flag in timer
-+ *      interrupt handler sched_tick().
-+ *
-+ *   3. Wakeups don't really cause entry into schedule(). They add a
-+ *      task to the run-queue and that's it.
-+ *
-+ *      Now, if the new task added to the run-queue preempts the current
-+ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
-+ *      called on the nearest possible occasion:
-+ *
-+ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
-+ *
-+ *         - in syscall or exception context, at the next outmost
-+ *           preempt_enable(). (this might be as soon as the wake_up()'s
-+ *           spin_unlock()!)
-+ *
-+ *         - in IRQ context, return from interrupt-handler to
-+ *           preemptible context
-+ *
-+ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
-+ *         then at the next:
-+ *
-+ *          - cond_resched() call
-+ *          - explicit schedule() call
-+ *          - return from syscall or exception to user-space
-+ *          - return from interrupt-handler to user-space
-+ *
-+ * WARNING: must be called with preemption disabled!
-+ */
-+static void __sched notrace __schedule(int sched_mode)
-+{
-+	struct task_struct *prev, *next;
-+	/*
-+	 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
-+	 * as a preemption by schedule_debug() and RCU.
-+	 */
-+	bool preempt = sched_mode > SM_NONE;
-+	unsigned long *switch_count;
-+	unsigned long prev_state;
-+	struct rq *rq;
-+	int cpu;
-+
-+	cpu = smp_processor_id();
-+	rq = cpu_rq(cpu);
-+	prev = rq->curr;
-+
-+	schedule_debug(prev, preempt);
-+
-+	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
-+	hrtick_clear(rq);
-+
-+	local_irq_disable();
-+	rcu_note_context_switch(preempt);
-+
-+	/*
-+	 * Make sure that signal_pending_state()->signal_pending() below
-+	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
-+	 * done by the caller to avoid the race with signal_wake_up():
-+	 *
-+	 * __set_current_state(@state)		signal_wake_up()
-+	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
-+	 *					  wake_up_state(p, state)
-+	 *   LOCK rq->lock			    LOCK p->pi_state
-+	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
-+	 *     if (signal_pending_state())	    if (p->state & @state)
-+	 *
-+	 * Also, the membarrier system call requires a full memory barrier
-+	 * after coming from user-space, before storing to rq->curr; this
-+	 * barrier matches a full barrier in the proximity of the membarrier
-+	 * system call exit.
-+	 */
-+	raw_spin_lock(&rq->lock);
-+	smp_mb__after_spinlock();
-+
-+	update_rq_clock(rq);
-+
-+	switch_count = &prev->nivcsw;
-+
-+	/* Task state changes only considers SM_PREEMPT as preemption */
-+	preempt = sched_mode == SM_PREEMPT;
-+
-+	/*
-+	 * We must load prev->state once (task_struct::state is volatile), such
-+	 * that we form a control dependency vs deactivate_task() below.
-+	 */
-+	prev_state = READ_ONCE(prev->__state);
-+	if (sched_mode == SM_IDLE) {
-+		if (!rq->nr_running) {
-+			next = prev;
-+			goto picked;
-+		}
-+	} else if (!preempt && prev_state) {
-+		if (signal_pending_state(prev_state, prev)) {
-+			WRITE_ONCE(prev->__state, TASK_RUNNING);
-+		} else {
-+			prev->sched_contributes_to_load =
-+				(prev_state & TASK_UNINTERRUPTIBLE) &&
-+				!(prev_state & TASK_NOLOAD) &&
-+				!(prev_state & TASK_FROZEN);
-+
-+			/*
-+			 * __schedule()			ttwu()
-+			 *   prev_state = prev->state;    if (p->on_rq && ...)
-+			 *   if (prev_state)		    goto out;
-+			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
-+			 *				  p->state = TASK_WAKING
-+			 *
-+			 * Where __schedule() and ttwu() have matching control dependencies.
-+			 *
-+			 * After this, schedule() must not care about p->state any more.
-+			 */
-+			sched_task_deactivate(prev, rq);
-+			block_task(rq, prev);
-+		}
-+		switch_count = &prev->nvcsw;
-+	}
-+
-+	check_curr(prev, rq);
-+
-+	next = choose_next_task(rq, cpu);
-+picked:
-+	clear_tsk_need_resched(prev);
-+	clear_preempt_need_resched();
-+#ifdef CONFIG_SCHED_DEBUG
-+	rq->last_seen_need_resched_ns = 0;
-+#endif
-+
-+	if (likely(prev != next)) {
-+		next->last_ran = rq->clock_task;
-+
-+		/*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
-+		rq->nr_switches++;
-+		/*
-+		 * RCU users of rcu_dereference(rq->curr) may not see
-+		 * changes to task_struct made by pick_next_task().
-+		 */
-+		RCU_INIT_POINTER(rq->curr, next);
-+		/*
-+		 * The membarrier system call requires each architecture
-+		 * to have a full memory barrier after updating
-+		 * rq->curr, before returning to user-space.
-+		 *
-+		 * Here are the schemes providing that barrier on the
-+		 * various architectures:
-+		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
-+		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
-+		 *   on PowerPC and on RISC-V.
-+		 * - finish_lock_switch() for weakly-ordered
-+		 *   architectures where spin_unlock is a full barrier,
-+		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
-+		 *   is a RELEASE barrier),
-+		 *
-+		 * The barrier matches a full barrier in the proximity of
-+		 * the membarrier system call entry.
-+		 *
-+		 * On RISC-V, this barrier pairing is also needed for the
-+		 * SYNC_CORE command when switching between processes, cf.
-+		 * the inline comments in membarrier_arch_switch_mm().
-+		 */
-+		++*switch_count;
-+
-+		trace_sched_switch(preempt, prev, next, prev_state);
-+
-+		/* Also unlocks the rq: */
-+		rq = context_switch(rq, prev, next);
-+
-+		cpu = cpu_of(rq);
-+	} else {
-+		__balance_callbacks(rq);
-+		raw_spin_unlock_irq(&rq->lock);
-+	}
-+}
-+
-+void __noreturn do_task_dead(void)
-+{
-+	/* Causes final put_task_struct in finish_task_switch(): */
-+	set_special_state(TASK_DEAD);
-+
-+	/* Tell freezer to ignore us: */
-+	current->flags |= PF_NOFREEZE;
-+
-+	__schedule(SM_NONE);
-+	BUG();
-+
-+	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-+	for (;;)
-+		cpu_relax();
-+}
-+
-+static inline void sched_submit_work(struct task_struct *tsk)
-+{
-+	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
-+	unsigned int task_flags;
-+
-+	/*
-+	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
-+	 * will use a blocking primitive -- which would lead to recursion.
-+	 */
-+	lock_map_acquire_try(&sched_map);
-+
-+	task_flags = tsk->flags;
-+	/*
-+	 * If a worker goes to sleep, notify and ask workqueue whether it
-+	 * wants to wake up a task to maintain concurrency.
-+	 */
-+	if (task_flags & PF_WQ_WORKER)
-+		wq_worker_sleeping(tsk);
-+	else if (task_flags & PF_IO_WORKER)
-+		io_wq_worker_sleeping(tsk);
-+
-+	/*
-+	 * spinlock and rwlock must not flush block requests.  This will
-+	 * deadlock if the callback attempts to acquire a lock which is
-+	 * already acquired.
-+	 */
-+	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
-+
-+	/*
-+	 * If we are going to sleep and we have plugged IO queued,
-+	 * make sure to submit it to avoid deadlocks.
-+	 */
-+	blk_flush_plug(tsk->plug, true);
-+
-+	lock_map_release(&sched_map);
-+}
-+
-+static void sched_update_worker(struct task_struct *tsk)
-+{
-+	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
-+		if (tsk->flags & PF_BLOCK_TS)
-+			blk_plug_invalidate_ts(tsk);
-+		if (tsk->flags & PF_WQ_WORKER)
-+			wq_worker_running(tsk);
-+		else if (tsk->flags & PF_IO_WORKER)
-+			io_wq_worker_running(tsk);
-+	}
-+}
-+
-+static __always_inline void __schedule_loop(int sched_mode)
-+{
-+	do {
-+		preempt_disable();
-+		__schedule(sched_mode);
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+}
-+
-+asmlinkage __visible void __sched schedule(void)
-+{
-+	struct task_struct *tsk = current;
-+
-+#ifdef CONFIG_RT_MUTEXES
-+	lockdep_assert(!tsk->sched_rt_mutex);
-+#endif
-+
-+	if (!task_is_running(tsk))
-+		sched_submit_work(tsk);
-+	__schedule_loop(SM_NONE);
-+	sched_update_worker(tsk);
-+}
-+EXPORT_SYMBOL(schedule);
-+
-+/*
-+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
-+ * state (have scheduled out non-voluntarily) by making sure that all
-+ * tasks have either left the run queue or have gone into user space.
-+ * As idle tasks do not do either, they must not ever be preempted
-+ * (schedule out non-voluntarily).
-+ *
-+ * schedule_idle() is similar to schedule_preempt_disable() except that it
-+ * never enables preemption because it does not call sched_submit_work().
-+ */
-+void __sched schedule_idle(void)
-+{
-+	/*
-+	 * As this skips calling sched_submit_work(), which the idle task does
-+	 * regardless because that function is a NOP when the task is in a
-+	 * TASK_RUNNING state, make sure this isn't used someplace that the
-+	 * current task can be in any other state. Note, idle is always in the
-+	 * TASK_RUNNING state.
-+	 */
-+	WARN_ON_ONCE(current->__state);
-+	do {
-+		__schedule(SM_IDLE);
-+	} while (need_resched());
-+}
-+
-+#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
-+asmlinkage __visible void __sched schedule_user(void)
-+{
-+	/*
-+	 * If we come here after a random call to set_need_resched(),
-+	 * or we have been woken up remotely but the IPI has not yet arrived,
-+	 * we haven't yet exited the RCU idle mode. Do it here manually until
-+	 * we find a better solution.
-+	 *
-+	 * NB: There are buggy callers of this function.  Ideally we
-+	 * should warn if prev_state != CT_STATE_USER, but that will trigger
-+	 * too frequently to make sense yet.
-+	 */
-+	enum ctx_state prev_state = exception_enter();
-+	schedule();
-+	exception_exit(prev_state);
-+}
-+#endif
-+
-+/**
-+ * schedule_preempt_disabled - called with preemption disabled
-+ *
-+ * Returns with preemption disabled. Note: preempt_count must be 1
-+ */
-+void __sched schedule_preempt_disabled(void)
-+{
-+	sched_preempt_enable_no_resched();
-+	schedule();
-+	preempt_disable();
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT
-+void __sched notrace schedule_rtlock(void)
-+{
-+	__schedule_loop(SM_RTLOCK_WAIT);
-+}
-+NOKPROBE_SYMBOL(schedule_rtlock);
-+#endif
-+
-+static void __sched notrace preempt_schedule_common(void)
-+{
-+	do {
-+		/*
-+		 * Because the function tracer can trace preempt_count_sub()
-+		 * and it also uses preempt_enable/disable_notrace(), if
-+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
-+		 * by the function tracer will call this function again and
-+		 * cause infinite recursion.
-+		 *
-+		 * Preemption must be disabled here before the function
-+		 * tracer can trace. Break up preempt_disable() into two
-+		 * calls. One to disable preemption without fear of being
-+		 * traced. The other to still record the preemption latency,
-+		 * which can also be traced by the function tracer.
-+		 */
-+		preempt_disable_notrace();
-+		preempt_latency_start(1);
-+		__schedule(SM_PREEMPT);
-+		preempt_latency_stop(1);
-+		preempt_enable_no_resched_notrace();
-+
-+		/*
-+		 * Check again in case we missed a preemption opportunity
-+		 * between schedule and now.
-+		 */
-+	} while (need_resched());
-+}
-+
-+#ifdef CONFIG_PREEMPTION
-+/*
-+ * This is the entry point to schedule() from in-kernel preemption
-+ * off of preempt_enable.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule(void)
-+{
-+	/*
-+	 * If there is a non-zero preempt_count or interrupts are disabled,
-+	 * we do not want to preempt the current task. Just return..
-+	 */
-+	if (likely(!preemptible()))
-+		return;
-+
-+	preempt_schedule_common();
-+}
-+NOKPROBE_SYMBOL(preempt_schedule);
-+EXPORT_SYMBOL(preempt_schedule);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#ifndef preempt_schedule_dynamic_enabled
-+#define preempt_schedule_dynamic_enabled	preempt_schedule
-+#define preempt_schedule_dynamic_disabled	NULL
-+#endif
-+DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
-+EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
-+void __sched notrace dynamic_preempt_schedule(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
-+		return;
-+	preempt_schedule();
-+}
-+NOKPROBE_SYMBOL(dynamic_preempt_schedule);
-+EXPORT_SYMBOL(dynamic_preempt_schedule);
-+#endif
-+#endif
-+
-+/**
-+ * preempt_schedule_notrace - preempt_schedule called by tracing
-+ *
-+ * The tracing infrastructure uses preempt_enable_notrace to prevent
-+ * recursion and tracing preempt enabling caused by the tracing
-+ * infrastructure itself. But as tracing can happen in areas coming
-+ * from userspace or just about to enter userspace, a preempt enable
-+ * can occur before user_exit() is called. This will cause the scheduler
-+ * to be called when the system is still in usermode.
-+ *
-+ * To prevent this, the preempt_enable_notrace will use this function
-+ * instead of preempt_schedule() to exit user context if needed before
-+ * calling the scheduler.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
-+{
-+	enum ctx_state prev_ctx;
-+
-+	if (likely(!preemptible()))
-+		return;
-+
-+	do {
-+		/*
-+		 * Because the function tracer can trace preempt_count_sub()
-+		 * and it also uses preempt_enable/disable_notrace(), if
-+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
-+		 * by the function tracer will call this function again and
-+		 * cause infinite recursion.
-+		 *
-+		 * Preemption must be disabled here before the function
-+		 * tracer can trace. Break up preempt_disable() into two
-+		 * calls. One to disable preemption without fear of being
-+		 * traced. The other to still record the preemption latency,
-+		 * which can also be traced by the function tracer.
-+		 */
-+		preempt_disable_notrace();
-+		preempt_latency_start(1);
-+		/*
-+		 * Needs preempt disabled in case user_exit() is traced
-+		 * and the tracer calls preempt_enable_notrace() causing
-+		 * an infinite recursion.
-+		 */
-+		prev_ctx = exception_enter();
-+		__schedule(SM_PREEMPT);
-+		exception_exit(prev_ctx);
-+
-+		preempt_latency_stop(1);
-+		preempt_enable_no_resched_notrace();
-+	} while (need_resched());
-+}
-+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#ifndef preempt_schedule_notrace_dynamic_enabled
-+#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
-+#define preempt_schedule_notrace_dynamic_disabled	NULL
-+#endif
-+DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
-+EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
-+void __sched notrace dynamic_preempt_schedule_notrace(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
-+		return;
-+	preempt_schedule_notrace();
-+}
-+NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
-+EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
-+#endif
-+#endif
-+
-+#endif /* CONFIG_PREEMPTION */
-+
-+/*
-+ * This is the entry point to schedule() from kernel preemption
-+ * off of IRQ context.
-+ * Note, that this is called and return with IRQs disabled. This will
-+ * protect us against recursive calling from IRQ contexts.
-+ */
-+asmlinkage __visible void __sched preempt_schedule_irq(void)
-+{
-+	enum ctx_state prev_state;
-+
-+	/* Catch callers which need to be fixed */
-+	BUG_ON(preempt_count() || !irqs_disabled());
-+
-+	prev_state = exception_enter();
-+
-+	do {
-+		preempt_disable();
-+		local_irq_enable();
-+		__schedule(SM_PREEMPT);
-+		local_irq_disable();
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+
-+	exception_exit(prev_state);
-+}
-+
-+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
-+			  void *key)
-+{
-+	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
-+	return try_to_wake_up(curr->private, mode, wake_flags);
-+}
-+EXPORT_SYMBOL(default_wake_function);
-+
-+void check_task_changed(struct task_struct *p, struct rq *rq)
-+{
-+	/* Trigger resched if task sched_prio has been modified. */
-+	if (task_on_rq_queued(p)) {
-+		update_rq_clock(rq);
-+		requeue_task(p, rq);
-+		wakeup_preempt(rq);
-+	}
-+}
-+
-+void __setscheduler_prio(struct task_struct *p, int prio)
-+{
-+	p->prio = prio;
-+}
-+
-+#ifdef CONFIG_RT_MUTEXES
-+
-+/*
-+ * Would be more useful with typeof()/auto_type but they don't mix with
-+ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
-+ * name such that if someone were to implement this function we get to compare
-+ * notes.
-+ */
-+#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
-+
-+void rt_mutex_pre_schedule(void)
-+{
-+	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
-+	sched_submit_work(current);
-+}
-+
-+void rt_mutex_schedule(void)
-+{
-+	lockdep_assert(current->sched_rt_mutex);
-+	__schedule_loop(SM_NONE);
-+}
-+
-+void rt_mutex_post_schedule(void)
-+{
-+	sched_update_worker(current);
-+	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
-+}
-+
-+/*
-+ * rt_mutex_setprio - set the current priority of a task
-+ * @p: task to boost
-+ * @pi_task: donor task
-+ *
-+ * This function changes the 'effective' priority of a task. It does
-+ * not touch ->normal_prio like __setscheduler().
-+ *
-+ * Used by the rt_mutex code to implement priority inheritance
-+ * logic. Call site only calls if the priority of the task changed.
-+ */
-+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
-+{
-+	int prio;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	/* XXX used to be waiter->prio, not waiter->task->prio */
-+	prio = __rt_effective_prio(pi_task, p->normal_prio);
-+
-+	/*
-+	 * If nothing changed; bail early.
-+	 */
-+	if (p->pi_top_task == pi_task && prio == p->prio)
-+		return;
-+
-+	rq = __task_access_lock(p, &lock);
-+	/*
-+	 * Set under pi_lock && rq->lock, such that the value can be used under
-+	 * either lock.
-+	 *
-+	 * Note that there is loads of tricky to make this pointer cache work
-+	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
-+	 * ensure a task is de-boosted (pi_task is set to NULL) before the
-+	 * task is allowed to run again (and can exit). This ensures the pointer
-+	 * points to a blocked task -- which guarantees the task is present.
-+	 */
-+	p->pi_top_task = pi_task;
-+
-+	/*
-+	 * For FIFO/RR we only need to set prio, if that matches we're done.
-+	 */
-+	if (prio == p->prio)
-+		goto out_unlock;
-+
-+	/*
-+	 * Idle task boosting is a no-no in general. There is one
-+	 * exception, when PREEMPT_RT and NOHZ is active:
-+	 *
-+	 * The idle task calls get_next_timer_interrupt() and holds
-+	 * the timer wheel base->lock on the CPU and another CPU wants
-+	 * to access the timer (probably to cancel it). We can safely
-+	 * ignore the boosting request, as the idle CPU runs this code
-+	 * with interrupts disabled and will complete the lock
-+	 * protected section without being interrupted. So there is no
-+	 * real need to boost.
-+	 */
-+	if (unlikely(p == rq->idle)) {
-+		WARN_ON(p != rq->curr);
-+		WARN_ON(p->pi_blocked_on);
-+		goto out_unlock;
-+	}
-+
-+	trace_sched_pi_setprio(p, pi_task);
-+
-+	__setscheduler_prio(p, prio);
-+
-+	check_task_changed(p, rq);
-+out_unlock:
-+	/* Avoid rq from going away on us: */
-+	preempt_disable();
-+
-+	if (task_on_rq_queued(p))
-+		__balance_callbacks(rq);
-+	__task_access_unlock(p, lock);
-+
-+	preempt_enable();
-+}
-+#endif
-+
-+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
-+int __sched __cond_resched(void)
-+{
-+	if (should_resched(0)) {
-+		preempt_schedule_common();
-+		return 1;
-+	}
-+	/*
-+	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
-+	 * whether the current CPU is in an RCU read-side critical section,
-+	 * so the tick can report quiescent states even for CPUs looping
-+	 * in kernel context.  In contrast, in non-preemptible kernels,
-+	 * RCU readers leave no in-memory hints, which means that CPU-bound
-+	 * processes executing in kernel context might never report an
-+	 * RCU quiescent state.  Therefore, the following code causes
-+	 * cond_resched() to report a quiescent state, but only when RCU
-+	 * is in urgent need of one.
-+	 */
-+#ifndef CONFIG_PREEMPT_RCU
-+	rcu_all_qs();
-+#endif
-+	return 0;
-+}
-+EXPORT_SYMBOL(__cond_resched);
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#define cond_resched_dynamic_enabled	__cond_resched
-+#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
-+DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
-+EXPORT_STATIC_CALL_TRAMP(cond_resched);
-+
-+#define might_resched_dynamic_enabled	__cond_resched
-+#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
-+DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
-+EXPORT_STATIC_CALL_TRAMP(might_resched);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
-+int __sched dynamic_cond_resched(void)
-+{
-+	klp_sched_try_switch();
-+	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
-+		return 0;
-+	return __cond_resched();
-+}
-+EXPORT_SYMBOL(dynamic_cond_resched);
-+
-+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
-+int __sched dynamic_might_resched(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_might_resched))
-+		return 0;
-+	return __cond_resched();
-+}
-+EXPORT_SYMBOL(dynamic_might_resched);
-+#endif
-+#endif
-+
-+/*
-+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
-+ * call schedule, and on return reacquire the lock.
-+ *
-+ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
-+ * operations here to prevent schedule() from being called twice (once via
-+ * spin_unlock(), once by hand).
-+ */
-+int __cond_resched_lock(spinlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held(lock);
-+
-+	if (spin_needbreak(lock) || resched) {
-+		spin_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		spin_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_lock);
-+
-+int __cond_resched_rwlock_read(rwlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held_read(lock);
-+
-+	if (rwlock_needbreak(lock) || resched) {
-+		read_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		read_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_rwlock_read);
-+
-+int __cond_resched_rwlock_write(rwlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held_write(lock);
-+
-+	if (rwlock_needbreak(lock) || resched) {
-+		write_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		write_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_rwlock_write);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+
-+#ifdef CONFIG_GENERIC_ENTRY
-+#include <linux/entry-common.h>
-+#endif
-+
-+/*
-+ * SC:cond_resched
-+ * SC:might_resched
-+ * SC:preempt_schedule
-+ * SC:preempt_schedule_notrace
-+ * SC:irqentry_exit_cond_resched
-+ *
-+ *
-+ * NONE:
-+ *   cond_resched               <- __cond_resched
-+ *   might_resched              <- RET0
-+ *   preempt_schedule           <- NOP
-+ *   preempt_schedule_notrace   <- NOP
-+ *   irqentry_exit_cond_resched <- NOP
-+ *
-+ * VOLUNTARY:
-+ *   cond_resched               <- __cond_resched
-+ *   might_resched              <- __cond_resched
-+ *   preempt_schedule           <- NOP
-+ *   preempt_schedule_notrace   <- NOP
-+ *   irqentry_exit_cond_resched <- NOP
-+ *
-+ * FULL:
-+ *   cond_resched               <- RET0
-+ *   might_resched              <- RET0
-+ *   preempt_schedule           <- preempt_schedule
-+ *   preempt_schedule_notrace   <- preempt_schedule_notrace
-+ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
-+ */
-+
-+enum {
-+	preempt_dynamic_undefined = -1,
-+	preempt_dynamic_none,
-+	preempt_dynamic_voluntary,
-+	preempt_dynamic_full,
-+};
-+
-+int preempt_dynamic_mode = preempt_dynamic_undefined;
-+
-+int sched_dynamic_mode(const char *str)
-+{
-+	if (!strcmp(str, "none"))
-+		return preempt_dynamic_none;
-+
-+	if (!strcmp(str, "voluntary"))
-+		return preempt_dynamic_voluntary;
-+
-+	if (!strcmp(str, "full"))
-+		return preempt_dynamic_full;
-+
-+	return -EINVAL;
-+}
-+
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
-+#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
-+#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
-+#else
-+#error "Unsupported PREEMPT_DYNAMIC mechanism"
-+#endif
-+
-+static DEFINE_MUTEX(sched_dynamic_mutex);
-+static bool klp_override;
-+
-+static void __sched_dynamic_update(int mode)
-+{
-+	/*
-+	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
-+	 * the ZERO state, which is invalid.
-+	 */
-+	if (!klp_override)
-+		preempt_dynamic_enable(cond_resched);
-+	preempt_dynamic_enable(cond_resched);
-+	preempt_dynamic_enable(might_resched);
-+	preempt_dynamic_enable(preempt_schedule);
-+	preempt_dynamic_enable(preempt_schedule_notrace);
-+	preempt_dynamic_enable(irqentry_exit_cond_resched);
-+
-+	switch (mode) {
-+	case preempt_dynamic_none:
-+		if (!klp_override)
-+			preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_disable(might_resched);
-+		preempt_dynamic_disable(preempt_schedule);
-+		preempt_dynamic_disable(preempt_schedule_notrace);
-+		preempt_dynamic_disable(irqentry_exit_cond_resched);
-+		if (mode != preempt_dynamic_mode)
-+			pr_info("Dynamic Preempt: none\n");
-+		break;
-+
-+	case preempt_dynamic_voluntary:
-+		if (!klp_override)
-+			preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_enable(might_resched);
-+		preempt_dynamic_disable(preempt_schedule);
-+		preempt_dynamic_disable(preempt_schedule_notrace);
-+		preempt_dynamic_disable(irqentry_exit_cond_resched);
-+		if (mode != preempt_dynamic_mode)
-+			pr_info("Dynamic Preempt: voluntary\n");
-+		break;
-+
-+	case preempt_dynamic_full:
-+		if (!klp_override)
-+			preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_disable(might_resched);
-+		preempt_dynamic_enable(preempt_schedule);
-+		preempt_dynamic_enable(preempt_schedule_notrace);
-+		preempt_dynamic_enable(irqentry_exit_cond_resched);
-+		if (mode != preempt_dynamic_mode)
-+			pr_info("Dynamic Preempt: full\n");
-+		break;
-+	}
-+
-+	preempt_dynamic_mode = mode;
-+}
-+
-+void sched_dynamic_update(int mode)
-+{
-+	mutex_lock(&sched_dynamic_mutex);
-+	__sched_dynamic_update(mode);
-+	mutex_unlock(&sched_dynamic_mutex);
-+}
-+
-+#ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
-+
-+static int klp_cond_resched(void)
-+{
-+	__klp_sched_try_switch();
-+	return __cond_resched();
-+}
-+
-+void sched_dynamic_klp_enable(void)
-+{
-+	mutex_lock(&sched_dynamic_mutex);
-+
-+	klp_override = true;
-+	static_call_update(cond_resched, klp_cond_resched);
-+
-+	mutex_unlock(&sched_dynamic_mutex);
-+}
-+
-+void sched_dynamic_klp_disable(void)
-+{
-+	mutex_lock(&sched_dynamic_mutex);
-+
-+	klp_override = false;
-+	__sched_dynamic_update(preempt_dynamic_mode);
-+
-+	mutex_unlock(&sched_dynamic_mutex);
-+}
-+
-+#endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
-+
-+
-+static int __init setup_preempt_mode(char *str)
-+{
-+	int mode = sched_dynamic_mode(str);
-+	if (mode < 0) {
-+		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
-+		return 0;
-+	}
-+
-+	sched_dynamic_update(mode);
-+	return 1;
-+}
-+__setup("preempt=", setup_preempt_mode);
-+
-+static void __init preempt_dynamic_init(void)
-+{
-+	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
-+		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
-+			sched_dynamic_update(preempt_dynamic_none);
-+		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
-+			sched_dynamic_update(preempt_dynamic_voluntary);
-+		} else {
-+			/* Default static call setting, nothing to do */
-+			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
-+			preempt_dynamic_mode = preempt_dynamic_full;
-+			pr_info("Dynamic Preempt: full\n");
-+		}
-+	}
-+}
-+
-+#define PREEMPT_MODEL_ACCESSOR(mode) \
-+	bool preempt_model_##mode(void)						 \
-+	{									 \
-+		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
-+		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
-+	}									 \
-+	EXPORT_SYMBOL_GPL(preempt_model_##mode)
-+
-+PREEMPT_MODEL_ACCESSOR(none);
-+PREEMPT_MODEL_ACCESSOR(voluntary);
-+PREEMPT_MODEL_ACCESSOR(full);
-+
-+#else /* !CONFIG_PREEMPT_DYNAMIC: */
-+
-+static inline void preempt_dynamic_init(void) { }
-+
-+#endif /* CONFIG_PREEMPT_DYNAMIC */
-+
-+int io_schedule_prepare(void)
-+{
-+	int old_iowait = current->in_iowait;
-+
-+	current->in_iowait = 1;
-+	blk_flush_plug(current->plug, true);
-+	return old_iowait;
-+}
-+
-+void io_schedule_finish(int token)
-+{
-+	current->in_iowait = token;
-+}
-+
-+/*
-+ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
-+ * that process accounting knows that this is a task in IO wait state.
-+ *
-+ * But don't do that if it is a deliberate, throttling IO wait (this task
-+ * has set its backing_dev_info: the queue against which it should throttle)
-+ */
-+
-+long __sched io_schedule_timeout(long timeout)
-+{
-+	int token;
-+	long ret;
-+
-+	token = io_schedule_prepare();
-+	ret = schedule_timeout(timeout);
-+	io_schedule_finish(token);
-+
-+	return ret;
-+}
-+EXPORT_SYMBOL(io_schedule_timeout);
-+
-+void __sched io_schedule(void)
-+{
-+	int token;
-+
-+	token = io_schedule_prepare();
-+	schedule();
-+	io_schedule_finish(token);
-+}
-+EXPORT_SYMBOL(io_schedule);
-+
-+void sched_show_task(struct task_struct *p)
-+{
-+	unsigned long free;
-+	int ppid;
-+
-+	if (!try_get_task_stack(p))
-+		return;
-+
-+	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
-+
-+	if (task_is_running(p))
-+		pr_cont("  running task    ");
-+	free = stack_not_used(p);
-+	ppid = 0;
-+	rcu_read_lock();
-+	if (pid_alive(p))
-+		ppid = task_pid_nr(rcu_dereference(p->real_parent));
-+	rcu_read_unlock();
-+	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
-+		free, task_pid_nr(p), task_tgid_nr(p),
-+		ppid, read_task_thread_flags(p));
-+
-+	print_worker_info(KERN_INFO, p);
-+	print_stop_info(KERN_INFO, p);
-+	show_stack(p, NULL, KERN_INFO);
-+	put_task_stack(p);
-+}
-+EXPORT_SYMBOL_GPL(sched_show_task);
-+
-+static inline bool
-+state_filter_match(unsigned long state_filter, struct task_struct *p)
-+{
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/* no filter, everything matches */
-+	if (!state_filter)
-+		return true;
-+
-+	/* filter, but doesn't match */
-+	if (!(state & state_filter))
-+		return false;
-+
-+	/*
-+	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
-+	 * TASK_KILLABLE).
-+	 */
-+	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
-+		return false;
-+
-+	return true;
-+}
-+
-+
-+void show_state_filter(unsigned int state_filter)
-+{
-+	struct task_struct *g, *p;
-+
-+	rcu_read_lock();
-+	for_each_process_thread(g, p) {
-+		/*
-+		 * reset the NMI-timeout, listing all files on a slow
-+		 * console might take a lot of time:
-+		 * Also, reset softlockup watchdogs on all CPUs, because
-+		 * another CPU might be blocked waiting for us to process
-+		 * an IPI.
-+		 */
-+		touch_nmi_watchdog();
-+		touch_all_softlockup_watchdogs();
-+		if (state_filter_match(state_filter, p))
-+			sched_show_task(p);
-+	}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	/* TODO: Alt schedule FW should support this
-+	if (!state_filter)
-+		sysrq_sched_debug_show();
-+	*/
-+#endif
-+	rcu_read_unlock();
-+	/*
-+	 * Only show locks if all tasks are dumped:
-+	 */
-+	if (!state_filter)
-+		debug_show_all_locks();
-+}
-+
-+void dump_cpu_task(int cpu)
-+{
-+	if (in_hardirq() && cpu == smp_processor_id()) {
-+		struct pt_regs *regs;
-+
-+		regs = get_irq_regs();
-+		if (regs) {
-+			show_regs(regs);
-+			return;
-+		}
-+	}
-+
-+	if (trigger_single_cpu_backtrace(cpu))
-+		return;
-+
-+	pr_info("Task dump for CPU %d:\n", cpu);
-+	sched_show_task(cpu_curr(cpu));
-+}
-+
-+/**
-+ * init_idle - set up an idle thread for a given CPU
-+ * @idle: task in question
-+ * @cpu: CPU the idle task belongs to
-+ *
-+ * NOTE: this function does not set the idle thread's NEED_RESCHED
-+ * flag, to make booting more robust.
-+ */
-+void __init init_idle(struct task_struct *idle, int cpu)
-+{
-+#ifdef CONFIG_SMP
-+	struct affinity_context ac = (struct affinity_context) {
-+		.new_mask  = cpumask_of(cpu),
-+		.flags     = 0,
-+	};
-+#endif
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	__sched_fork(0, idle);
-+
-+	raw_spin_lock_irqsave(&idle->pi_lock, flags);
-+	raw_spin_lock(&rq->lock);
-+
-+	idle->last_ran = rq->clock_task;
-+	idle->__state = TASK_RUNNING;
-+	/*
-+	 * PF_KTHREAD should already be set at this point; regardless, make it
-+	 * look like a proper per-CPU kthread.
-+	 */
-+	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
-+	kthread_set_per_cpu(idle, cpu);
-+
-+	sched_queue_init_idle(&rq->queue, idle);
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * It's possible that init_idle() gets called multiple times on a task,
-+	 * in that case do_set_cpus_allowed() will not do the right thing.
-+	 *
-+	 * And since this is boot we can forgo the serialisation.
-+	 */
-+	set_cpus_allowed_common(idle, &ac);
-+#endif
-+
-+	/* Silence PROVE_RCU */
-+	rcu_read_lock();
-+	__set_task_cpu(idle, cpu);
-+	rcu_read_unlock();
-+
-+	rq->idle = idle;
-+	rcu_assign_pointer(rq->curr, idle);
-+	idle->on_cpu = 1;
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
-+
-+	/* Set the preempt count _outside_ the spinlocks! */
-+	init_idle_preempt_count(idle, cpu);
-+
-+	ftrace_graph_init_idle_task(idle, cpu);
-+	vtime_init_idle(idle, cpu);
-+#ifdef CONFIG_SMP
-+	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
-+			      const struct cpumask __maybe_unused *trial)
-+{
-+	return 1;
-+}
-+
-+int task_can_attach(struct task_struct *p)
-+{
-+	int ret = 0;
-+
-+	/*
-+	 * Kthreads which disallow setaffinity shouldn't be moved
-+	 * to a new cpuset; we don't want to change their CPU
-+	 * affinity and isolating such threads by their set of
-+	 * allowed nodes is unnecessary.  Thus, cpusets are not
-+	 * applicable for such threads.  This prevents checking for
-+	 * success of set_cpus_allowed_ptr() on all attached tasks
-+	 * before cpus_mask may be changed.
-+	 */
-+	if (p->flags & PF_NO_SETAFFINITY)
-+		ret = -EINVAL;
-+
-+	return ret;
-+}
-+
-+bool sched_smp_initialized __read_mostly;
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+/*
-+ * Ensures that the idle task is using init_mm right before its CPU goes
-+ * offline.
-+ */
-+void idle_task_exit(void)
-+{
-+	struct mm_struct *mm = current->active_mm;
-+
-+	BUG_ON(current != this_rq()->idle);
-+
-+	if (mm != &init_mm) {
-+		switch_mm(mm, &init_mm, current);
-+		finish_arch_post_lock_switch();
-+	}
-+
-+	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
-+}
-+
-+static int __balance_push_cpu_stop(void *arg)
-+{
-+	struct task_struct *p = arg;
-+	struct rq *rq = this_rq();
-+	struct rq_flags rf;
-+	int cpu;
-+
-+	raw_spin_lock_irq(&p->pi_lock);
-+	rq_lock(rq, &rf);
-+
-+	update_rq_clock(rq);
-+
-+	if (task_rq(p) == rq && task_on_rq_queued(p)) {
-+		cpu = select_fallback_rq(rq->cpu, p);
-+		rq = __migrate_task(rq, p, cpu);
-+	}
-+
-+	rq_unlock(rq, &rf);
-+	raw_spin_unlock_irq(&p->pi_lock);
-+
-+	put_task_struct(p);
-+
-+	return 0;
-+}
-+
-+static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
-+
-+/*
-+ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
-+ * effective when the hotplug motion is down.
-+ */
-+static void balance_push(struct rq *rq)
-+{
-+	struct task_struct *push_task = rq->curr;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*
-+	 * Ensure the thing is persistent until balance_push_set(.on = false);
-+	 */
-+	rq->balance_callback = &balance_push_callback;
-+
-+	/*
-+	 * Only active while going offline and when invoked on the outgoing
-+	 * CPU.
-+	 */
-+	if (!cpu_dying(rq->cpu) || rq != this_rq())
-+		return;
-+
-+	/*
-+	 * Both the cpu-hotplug and stop task are in this case and are
-+	 * required to complete the hotplug process.
-+	 */
-+	if (kthread_is_per_cpu(push_task) ||
-+	    is_migration_disabled(push_task)) {
-+
-+		/*
-+		 * If this is the idle task on the outgoing CPU try to wake
-+		 * up the hotplug control thread which might wait for the
-+		 * last task to vanish. The rcuwait_active() check is
-+		 * accurate here because the waiter is pinned on this CPU
-+		 * and can't obviously be running in parallel.
-+		 *
-+		 * On RT kernels this also has to check whether there are
-+		 * pinned and scheduled out tasks on the runqueue. They
-+		 * need to leave the migrate disabled section first.
-+		 */
-+		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
-+		    rcuwait_active(&rq->hotplug_wait)) {
-+			raw_spin_unlock(&rq->lock);
-+			rcuwait_wake_up(&rq->hotplug_wait);
-+			raw_spin_lock(&rq->lock);
-+		}
-+		return;
-+	}
-+
-+	get_task_struct(push_task);
-+	/*
-+	 * Temporarily drop rq->lock such that we can wake-up the stop task.
-+	 * Both preemption and IRQs are still disabled.
-+	 */
-+	preempt_disable();
-+	raw_spin_unlock(&rq->lock);
-+	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
-+			    this_cpu_ptr(&push_work));
-+	preempt_enable();
-+	/*
-+	 * At this point need_resched() is true and we'll take the loop in
-+	 * schedule(). The next pick is obviously going to be the stop task
-+	 * which kthread_is_per_cpu() and will push this task away.
-+	 */
-+	raw_spin_lock(&rq->lock);
-+}
-+
-+static void balance_push_set(int cpu, bool on)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	struct rq_flags rf;
-+
-+	rq_lock_irqsave(rq, &rf);
-+	if (on) {
-+		WARN_ON_ONCE(rq->balance_callback);
-+		rq->balance_callback = &balance_push_callback;
-+	} else if (rq->balance_callback == &balance_push_callback) {
-+		rq->balance_callback = NULL;
-+	}
-+	rq_unlock_irqrestore(rq, &rf);
-+}
-+
-+/*
-+ * Invoked from a CPUs hotplug control thread after the CPU has been marked
-+ * inactive. All tasks which are not per CPU kernel threads are either
-+ * pushed off this CPU now via balance_push() or placed on a different CPU
-+ * during wakeup. Wait until the CPU is quiescent.
-+ */
-+static void balance_hotplug_wait(void)
-+{
-+	struct rq *rq = this_rq();
-+
-+	rcuwait_wait_event(&rq->hotplug_wait,
-+			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
-+			   TASK_UNINTERRUPTIBLE);
-+}
-+
-+#else
-+
-+static void balance_push(struct rq *rq)
-+{
-+}
-+
-+static void balance_push_set(int cpu, bool on)
-+{
-+}
-+
-+static inline void balance_hotplug_wait(void)
-+{
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+static void set_rq_offline(struct rq *rq)
-+{
-+	if (rq->online) {
-+		update_rq_clock(rq);
-+		rq->online = false;
-+	}
-+}
-+
-+static void set_rq_online(struct rq *rq)
-+{
-+	if (!rq->online)
-+		rq->online = true;
-+}
-+
-+static inline void sched_set_rq_online(struct rq *rq, int cpu)
-+{
-+	unsigned long flags;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	set_rq_online(rq);
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+}
-+
-+static inline void sched_set_rq_offline(struct rq *rq, int cpu)
-+{
-+	unsigned long flags;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	set_rq_offline(rq);
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+}
-+
-+/*
-+ * used to mark begin/end of suspend/resume:
-+ */
-+static int num_cpus_frozen;
-+
-+/*
-+ * Update cpusets according to cpu_active mask.  If cpusets are
-+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
-+ * around partition_sched_domains().
-+ *
-+ * If we come here as part of a suspend/resume, don't touch cpusets because we
-+ * want to restore it back to its original state upon resume anyway.
-+ */
-+static void cpuset_cpu_active(void)
-+{
-+	if (cpuhp_tasks_frozen) {
-+		/*
-+		 * num_cpus_frozen tracks how many CPUs are involved in suspend
-+		 * resume sequence. As long as this is not the last online
-+		 * operation in the resume sequence, just build a single sched
-+		 * domain, ignoring cpusets.
-+		 */
-+		partition_sched_domains(1, NULL, NULL);
-+		if (--num_cpus_frozen)
-+			return;
-+		/*
-+		 * This is the last CPU online operation. So fall through and
-+		 * restore the original sched domains by considering the
-+		 * cpuset configurations.
-+		 */
-+		cpuset_force_rebuild();
-+	}
-+
-+	cpuset_update_active_cpus();
-+}
-+
-+static int cpuset_cpu_inactive(unsigned int cpu)
-+{
-+	if (!cpuhp_tasks_frozen) {
-+		cpuset_update_active_cpus();
-+	} else {
-+		num_cpus_frozen++;
-+		partition_sched_domains(1, NULL, NULL);
-+	}
-+	return 0;
-+}
-+
-+static inline void sched_smt_present_inc(int cpu)
-+{
-+#ifdef CONFIG_SCHED_SMT
-+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
-+		static_branch_inc_cpuslocked(&sched_smt_present);
-+		cpumask_or(&sched_smt_mask, &sched_smt_mask, cpu_smt_mask(cpu));
-+	}
-+#endif
-+}
-+
-+static inline void sched_smt_present_dec(int cpu)
-+{
-+#ifdef CONFIG_SCHED_SMT
-+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
-+		static_branch_dec_cpuslocked(&sched_smt_present);
-+		if (!static_branch_likely(&sched_smt_present))
-+			cpumask_clear(sched_pcore_idle_mask);
-+		cpumask_andnot(&sched_smt_mask, &sched_smt_mask, cpu_smt_mask(cpu));
-+	}
-+#endif
-+}
-+
-+int sched_cpu_activate(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	/*
-+	 * Clear the balance_push callback and prepare to schedule
-+	 * regular tasks.
-+	 */
-+	balance_push_set(cpu, false);
-+
-+	set_cpu_active(cpu, true);
-+
-+	if (sched_smp_initialized)
-+		cpuset_cpu_active();
-+
-+	/*
-+	 * Put the rq online, if not already. This happens:
-+	 *
-+	 * 1) In the early boot process, because we build the real domains
-+	 *    after all cpus have been brought up.
-+	 *
-+	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
-+	 *    domains.
-+	 */
-+	sched_set_rq_online(rq, cpu);
-+
-+	/*
-+	 * When going up, increment the number of cores with SMT present.
-+	 */
-+	sched_smt_present_inc(cpu);
-+
-+	return 0;
-+}
-+
-+int sched_cpu_deactivate(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	int ret;
-+
-+	set_cpu_active(cpu, false);
-+
-+	/*
-+	 * From this point forward, this CPU will refuse to run any task that
-+	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
-+	 * push those tasks away until this gets cleared, see
-+	 * sched_cpu_dying().
-+	 */
-+	balance_push_set(cpu, true);
-+
-+	/*
-+	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-+	 * users of this state to go away such that all new such users will
-+	 * observe it.
-+	 *
-+	 * Specifically, we rely on ttwu to no longer target this CPU, see
-+	 * ttwu_queue_cond() and is_cpu_allowed().
-+	 *
-+	 * Do sync before park smpboot threads to take care the RCU boost case.
-+	 */
-+	synchronize_rcu();
-+
-+	sched_set_rq_offline(rq, cpu);
-+
-+	/*
-+	 * When going down, decrement the number of cores with SMT present.
-+	 */
-+	sched_smt_present_dec(cpu);
-+
-+	if (!sched_smp_initialized)
-+		return 0;
-+
-+	ret = cpuset_cpu_inactive(cpu);
-+	if (ret) {
-+		sched_smt_present_inc(cpu);
-+		sched_set_rq_online(rq, cpu);
-+		balance_push_set(cpu, false);
-+		set_cpu_active(cpu, true);
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static void sched_rq_cpu_starting(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	rq->calc_load_update = calc_load_update;
-+}
-+
-+int sched_cpu_starting(unsigned int cpu)
-+{
-+	sched_rq_cpu_starting(cpu);
-+	sched_tick_start(cpu);
-+	return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/*
-+ * Invoked immediately before the stopper thread is invoked to bring the
-+ * CPU down completely. At this point all per CPU kthreads except the
-+ * hotplug thread (current) and the stopper thread (inactive) have been
-+ * either parked or have been unbound from the outgoing CPU. Ensure that
-+ * any of those which might be on the way out are gone.
-+ *
-+ * If after this point a bound task is being woken on this CPU then the
-+ * responsible hotplug callback has failed to do it's job.
-+ * sched_cpu_dying() will catch it with the appropriate fireworks.
-+ */
-+int sched_cpu_wait_empty(unsigned int cpu)
-+{
-+	balance_hotplug_wait();
-+	return 0;
-+}
-+
-+/*
-+ * Since this CPU is going 'away' for a while, fold any nr_active delta we
-+ * might have. Called from the CPU stopper task after ensuring that the
-+ * stopper is the last running task on the CPU, so nr_active count is
-+ * stable. We need to take the tear-down thread which is calling this into
-+ * account, so we hand in adjust = 1 to the load calculation.
-+ *
-+ * Also see the comment "Global load-average calculations".
-+ */
-+static void calc_load_migrate(struct rq *rq)
-+{
-+	long delta = calc_load_fold_active(rq, 1);
-+
-+	if (delta)
-+		atomic_long_add(delta, &calc_load_tasks);
-+}
-+
-+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
-+{
-+	struct task_struct *g, *p;
-+	int cpu = cpu_of(rq);
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
-+	for_each_process_thread(g, p) {
-+		if (task_cpu(p) != cpu)
-+			continue;
-+
-+		if (!task_on_rq_queued(p))
-+			continue;
-+
-+		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
-+	}
-+}
-+
-+int sched_cpu_dying(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	/* Handle pending wakeups and then migrate everything off */
-+	sched_tick_stop(cpu);
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
-+		WARN(true, "Dying CPU not properly vacated!");
-+		dump_rq_tasks(rq, KERN_WARNING);
-+	}
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	calc_load_migrate(rq);
-+	hrtick_clear(rq);
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_SMP
-+static void sched_init_topology_cpumask_early(void)
-+{
-+	int cpu;
-+	cpumask_t *tmp;
-+
-+	for_each_possible_cpu(cpu) {
-+		/* init topo masks */
-+		tmp = per_cpu(sched_cpu_topo_masks, cpu);
-+
-+		cpumask_copy(tmp, cpu_possible_mask);
-+		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
-+		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
-+	}
-+}
-+
-+#define TOPOLOGY_CPUMASK(name, mask, last)\
-+	if (cpumask_and(topo, topo, mask)) {					\
-+		cpumask_copy(topo, mask);					\
-+		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
-+		       cpu, (topo++)->bits[0]);					\
-+	}									\
-+	if (!last)								\
-+		bitmap_complement(cpumask_bits(topo), cpumask_bits(mask),	\
-+				  nr_cpumask_bits);
-+
-+static void sched_init_topology_cpumask(void)
-+{
-+	int cpu;
-+	cpumask_t *topo;
-+
-+	for_each_online_cpu(cpu) {
-+		topo = per_cpu(sched_cpu_topo_masks, cpu);
-+
-+		bitmap_complement(cpumask_bits(topo), cpumask_bits(cpumask_of(cpu)),
-+				  nr_cpumask_bits);
-+#ifdef CONFIG_SCHED_SMT
-+		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
-+#endif
-+		TOPOLOGY_CPUMASK(cluster, topology_cluster_cpumask(cpu), false);
-+
-+		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
-+		per_cpu(sched_cpu_llc_mask, cpu) = topo;
-+		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
-+
-+		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
-+
-+		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
-+
-+		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
-+		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
-+		       cpu, per_cpu(sd_llc_id, cpu),
-+		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
-+			      per_cpu(sched_cpu_topo_masks, cpu)));
-+	}
-+}
-+#endif
-+
-+void __init sched_init_smp(void)
-+{
-+	/* Move init over to a non-isolated CPU */
-+	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
-+		BUG();
-+	current->flags &= ~PF_NO_SETAFFINITY;
-+
-+	sched_init_topology();
-+	sched_init_topology_cpumask();
-+
-+	sched_smp_initialized = true;
-+}
-+
-+static int __init migration_init(void)
-+{
-+	sched_cpu_starting(smp_processor_id());
-+	return 0;
-+}
-+early_initcall(migration_init);
-+
-+#else
-+void __init sched_init_smp(void)
-+{
-+	cpu_rq(0)->idle->time_slice = sysctl_sched_base_slice;
-+}
-+#endif /* CONFIG_SMP */
-+
-+int in_sched_functions(unsigned long addr)
-+{
-+	return in_lock_functions(addr) ||
-+		(addr >= (unsigned long)__sched_text_start
-+		&& addr < (unsigned long)__sched_text_end);
-+}
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+/*
-+ * Default task group.
-+ * Every task in system belongs to this group at bootup.
-+ */
-+struct task_group root_task_group;
-+LIST_HEAD(task_groups);
-+
-+/* Cacheline aligned slab cache for task_group */
-+static struct kmem_cache *task_group_cache __ro_after_init;
-+#endif /* CONFIG_CGROUP_SCHED */
-+
-+void __init sched_init(void)
-+{
-+	int i;
-+	struct rq *rq;
-+
-+	printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler "ALT_SCHED_VERSION\
-+			 " by Alfred Chen.\n");
-+
-+	wait_bit_init();
-+
-+#ifdef CONFIG_SMP
-+	for (i = 0; i < SCHED_QUEUE_BITS; i++)
-+		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+	task_group_cache = KMEM_CACHE(task_group, 0);
-+
-+	list_add(&root_task_group.list, &task_groups);
-+	INIT_LIST_HEAD(&root_task_group.children);
-+	INIT_LIST_HEAD(&root_task_group.siblings);
-+#endif /* CONFIG_CGROUP_SCHED */
-+	for_each_possible_cpu(i) {
-+		rq = cpu_rq(i);
-+
-+		sched_queue_init(&rq->queue);
-+		rq->prio = IDLE_TASK_SCHED_PRIO;
-+#ifdef CONFIG_SCHED_PDS
-+		rq->prio_idx = rq->prio;
-+#endif
-+
-+		raw_spin_lock_init(&rq->lock);
-+		rq->nr_running = rq->nr_uninterruptible = 0;
-+		rq->calc_load_active = 0;
-+		rq->calc_load_update = jiffies + LOAD_FREQ;
-+#ifdef CONFIG_SMP
-+		rq->online = false;
-+		rq->cpu = i;
-+
-+		rq->clear_idle_mask_func = cpumask_clear_cpu;
-+		rq->set_idle_mask_func = cpumask_set_cpu;
-+		rq->balance_func = NULL;
-+		rq->active_balance_arg.active = 0;
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
-+#endif
-+		rq->balance_callback = &balance_push_callback;
-+#ifdef CONFIG_HOTPLUG_CPU
-+		rcuwait_init(&rq->hotplug_wait);
-+#endif
-+#endif /* CONFIG_SMP */
-+		rq->nr_switches = 0;
-+
-+		hrtick_rq_init(rq);
-+		atomic_set(&rq->nr_iowait, 0);
-+
-+		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
-+	}
-+#ifdef CONFIG_SMP
-+	/* Set rq->online for cpu 0 */
-+	cpu_rq(0)->online = true;
-+#endif
-+	/*
-+	 * The boot idle thread does lazy MMU switching as well:
-+	 */
-+	mmgrab(&init_mm);
-+	enter_lazy_tlb(&init_mm, current);
-+
-+	/*
-+	 * The idle task doesn't need the kthread struct to function, but it
-+	 * is dressed up as a per-CPU kthread and thus needs to play the part
-+	 * if we want to avoid special-casing it in code that deals with per-CPU
-+	 * kthreads.
-+	 */
-+	WARN_ON(!set_kthread_struct(current));
-+
-+	/*
-+	 * Make us the idle thread. Technically, schedule() should not be
-+	 * called from this thread, however somewhere below it might be,
-+	 * but because we are the idle thread, we just pick up running again
-+	 * when this runqueue becomes "idle".
-+	 */
-+	init_idle(current, smp_processor_id());
-+
-+	calc_load_update = jiffies + LOAD_FREQ;
-+
-+#ifdef CONFIG_SMP
-+	idle_thread_set_boot_cpu();
-+	balance_push_set(smp_processor_id(), false);
-+
-+	sched_init_topology_cpumask_early();
-+#endif /* SMP */
-+
-+	preempt_dynamic_init();
-+}
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+
-+void __might_sleep(const char *file, int line)
-+{
-+	unsigned int state = get_current_state();
-+	/*
-+	 * Blocking primitives will set (and therefore destroy) current->state,
-+	 * since we will exit with TASK_RUNNING make sure we enter with it,
-+	 * otherwise we will destroy state.
-+	 */
-+	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
-+			"do not call blocking ops when !TASK_RUNNING; "
-+			"state=%x set at [<%p>] %pS\n", state,
-+			(void *)current->task_state_change,
-+			(void *)current->task_state_change);
-+
-+	__might_resched(file, line, 0);
-+}
-+EXPORT_SYMBOL(__might_sleep);
-+
-+static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
-+{
-+	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
-+		return;
-+
-+	if (preempt_count() == preempt_offset)
-+		return;
-+
-+	pr_err("Preemption disabled at:");
-+	print_ip_sym(KERN_ERR, ip);
-+}
-+
-+static inline bool resched_offsets_ok(unsigned int offsets)
-+{
-+	unsigned int nested = preempt_count();
-+
-+	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
-+
-+	return nested == offsets;
-+}
-+
-+void __might_resched(const char *file, int line, unsigned int offsets)
-+{
-+	/* Ratelimiting timestamp: */
-+	static unsigned long prev_jiffy;
-+
-+	unsigned long preempt_disable_ip;
-+
-+	/* WARN_ON_ONCE() by default, no rate limit required: */
-+	rcu_sleep_check();
-+
-+	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
-+	     !is_idle_task(current) && !current->non_block_count) ||
-+	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
-+	    oops_in_progress)
-+		return;
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	/* Save this before calling printk(), since that will clobber it: */
-+	preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
-+	       file, line);
-+	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
-+	       in_atomic(), irqs_disabled(), current->non_block_count,
-+	       current->pid, current->comm);
-+	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
-+	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
-+
-+	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
-+		pr_err("RCU nest depth: %d, expected: %u\n",
-+		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
-+	}
-+
-+	if (task_stack_end_corrupted(current))
-+		pr_emerg("Thread overran stack, or stack corrupted\n");
-+
-+	debug_show_held_locks(current);
-+	if (irqs_disabled())
-+		print_irqtrace_events(current);
-+
-+	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
-+				 preempt_disable_ip);
-+
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL(__might_resched);
-+
-+void __cant_sleep(const char *file, int line, int preempt_offset)
-+{
-+	static unsigned long prev_jiffy;
-+
-+	if (irqs_disabled())
-+		return;
-+
-+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
-+		return;
-+
-+	if (preempt_count() > preempt_offset)
-+		return;
-+
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
-+	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-+			in_atomic(), irqs_disabled(),
-+			current->pid, current->comm);
-+
-+	debug_show_held_locks(current);
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL_GPL(__cant_sleep);
-+
-+#ifdef CONFIG_SMP
-+void __cant_migrate(const char *file, int line)
-+{
-+	static unsigned long prev_jiffy;
-+
-+	if (irqs_disabled())
-+		return;
-+
-+	if (is_migration_disabled(current))
-+		return;
-+
-+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
-+		return;
-+
-+	if (preempt_count() > 0)
-+		return;
-+
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
-+	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
-+	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
-+	       current->pid, current->comm);
-+
-+	debug_show_held_locks(current);
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL_GPL(__cant_migrate);
-+#endif
-+#endif
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+void normalize_rt_tasks(void)
-+{
-+	struct task_struct *g, *p;
-+	struct sched_attr attr = {
-+		.sched_policy = SCHED_NORMAL,
-+	};
-+
-+	read_lock(&tasklist_lock);
-+	for_each_process_thread(g, p) {
-+		/*
-+		 * Only normalize user tasks:
-+		 */
-+		if (p->flags & PF_KTHREAD)
-+			continue;
-+
-+		schedstat_set(p->stats.wait_start,  0);
-+		schedstat_set(p->stats.sleep_start, 0);
-+		schedstat_set(p->stats.block_start, 0);
-+
-+		if (!rt_or_dl_task(p)) {
-+			/*
-+			 * Renice negative nice level userspace
-+			 * tasks back to 0:
-+			 */
-+			if (task_nice(p) < 0)
-+				set_user_nice(p, 0);
-+			continue;
-+		}
-+
-+		__sched_setscheduler(p, &attr, false, false);
-+	}
-+	read_unlock(&tasklist_lock);
-+}
-+#endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#if defined(CONFIG_KGDB_KDB)
-+/*
-+ * These functions are only useful for KDB.
-+ *
-+ * They can only be called when the whole system has been
-+ * stopped - every CPU needs to be quiescent, and no scheduling
-+ * activity can take place. Using them for anything else would
-+ * be a serious bug, and as a result, they aren't even visible
-+ * under any other configuration.
-+ */
-+
-+/**
-+ * curr_task - return the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ *
-+ * Return: The current task for @cpu.
-+ */
-+struct task_struct *curr_task(int cpu)
-+{
-+	return cpu_curr(cpu);
-+}
-+
-+#endif /* defined(CONFIG_KGDB_KDB) */
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+static void sched_free_group(struct task_group *tg)
-+{
-+	kmem_cache_free(task_group_cache, tg);
-+}
-+
-+static void sched_free_group_rcu(struct rcu_head *rhp)
-+{
-+	sched_free_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+static void sched_unregister_group(struct task_group *tg)
-+{
-+	/*
-+	 * We have to wait for yet another RCU grace period to expire, as
-+	 * print_cfs_stats() might run concurrently.
-+	 */
-+	call_rcu(&tg->rcu, sched_free_group_rcu);
-+}
-+
-+/* allocate runqueue etc for a new task group */
-+struct task_group *sched_create_group(struct task_group *parent)
-+{
-+	struct task_group *tg;
-+
-+	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
-+	if (!tg)
-+		return ERR_PTR(-ENOMEM);
-+
-+	return tg;
-+}
-+
-+void sched_online_group(struct task_group *tg, struct task_group *parent)
-+{
-+}
-+
-+/* RCU callback to free various structures associated with a task group */
-+static void sched_unregister_group_rcu(struct rcu_head *rhp)
-+{
-+	/* Now it should be safe to free those cfs_rqs: */
-+	sched_unregister_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+void sched_destroy_group(struct task_group *tg)
-+{
-+	/* Wait for possible concurrent references to cfs_rqs complete: */
-+	call_rcu(&tg->rcu, sched_unregister_group_rcu);
-+}
-+
-+void sched_release_group(struct task_group *tg)
-+{
-+}
-+
-+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-+{
-+	return css ? container_of(css, struct task_group, css) : NULL;
-+}
-+
-+static struct cgroup_subsys_state *
-+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
-+{
-+	struct task_group *parent = css_tg(parent_css);
-+	struct task_group *tg;
-+
-+	if (!parent) {
-+		/* This is early initialization for the top cgroup */
-+		return &root_task_group.css;
-+	}
-+
-+	tg = sched_create_group(parent);
-+	if (IS_ERR(tg))
-+		return ERR_PTR(-ENOMEM);
-+	return &tg->css;
-+}
-+
-+/* Expose task group only after completing cgroup initialization */
-+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+	struct task_group *parent = css_tg(css->parent);
-+
-+	if (parent)
-+		sched_online_group(tg, parent);
-+	return 0;
-+}
-+
-+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	sched_release_group(tg);
-+}
-+
-+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	/*
-+	 * Relies on the RCU grace period between css_released() and this.
-+	 */
-+	sched_unregister_group(tg);
-+}
-+
-+#ifdef CONFIG_RT_GROUP_SCHED
-+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
-+{
-+	return 0;
-+}
-+#endif
-+
-+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
-+{
-+}
-+
-+#ifdef CONFIG_GROUP_SCHED_WEIGHT
-+static int sched_group_set_shares(struct task_group *tg, unsigned long shares)
-+{
-+	return 0;
-+}
-+
-+static int sched_group_set_idle(struct task_group *tg, long idle)
-+{
-+	return 0;
-+}
-+
-+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
-+				struct cftype *cftype, u64 shareval)
-+{
-+	return sched_group_set_shares(css_tg(css), shareval);
-+}
-+
-+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
-+			       struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
-+			       struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
-+				struct cftype *cft, s64 idle)
-+{
-+	return sched_group_set_idle(css_tg(css), idle);
-+}
-+#endif
-+
-+#ifdef CONFIG_CFS_BANDWIDTH
-+static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
-+				  struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
-+				   struct cftype *cftype, s64 cfs_quota_us)
-+{
-+	return 0;
-+}
-+
-+static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
-+				   struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
-+				    struct cftype *cftype, u64 cfs_period_us)
-+{
-+	return 0;
-+}
-+
-+static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
-+				  struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
-+				   struct cftype *cftype, u64 cfs_burst_us)
-+{
-+	return 0;
-+}
-+
-+static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
-+{
-+	return 0;
-+}
-+
-+static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
-+{
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_RT_GROUP_SCHED
-+static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
-+				struct cftype *cft, s64 val)
-+{
-+	return 0;
-+}
-+
-+static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
-+			       struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
-+				    struct cftype *cftype, u64 rt_period_us)
-+{
-+	return 0;
-+}
-+
-+static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
-+				   struct cftype *cft)
-+{
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_UCLAMP_TASK_GROUP
-+static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
-+{
-+	return 0;
-+}
-+
-+static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
-+{
-+	return 0;
-+}
-+
-+static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
-+				    char *buf, size_t nbytes,
-+				    loff_t off)
-+{
-+	return nbytes;
-+}
-+
-+static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
-+				    char *buf, size_t nbytes,
-+				    loff_t off)
-+{
-+	return nbytes;
-+}
-+#endif
-+
-+static struct cftype cpu_legacy_files[] = {
-+#ifdef CONFIG_GROUP_SCHED_WEIGHT
-+	{
-+		.name = "shares",
-+		.read_u64 = cpu_shares_read_u64,
-+		.write_u64 = cpu_shares_write_u64,
-+	},
-+	{
-+		.name = "idle",
-+		.read_s64 = cpu_idle_read_s64,
-+		.write_s64 = cpu_idle_write_s64,
-+	},
-+#endif
-+#ifdef CONFIG_CFS_BANDWIDTH
-+	{
-+		.name = "cfs_quota_us",
-+		.read_s64 = cpu_cfs_quota_read_s64,
-+		.write_s64 = cpu_cfs_quota_write_s64,
-+	},
-+	{
-+		.name = "cfs_period_us",
-+		.read_u64 = cpu_cfs_period_read_u64,
-+		.write_u64 = cpu_cfs_period_write_u64,
-+	},
-+	{
-+		.name = "cfs_burst_us",
-+		.read_u64 = cpu_cfs_burst_read_u64,
-+		.write_u64 = cpu_cfs_burst_write_u64,
-+	},
-+	{
-+		.name = "stat",
-+		.seq_show = cpu_cfs_stat_show,
-+	},
-+	{
-+		.name = "stat.local",
-+		.seq_show = cpu_cfs_local_stat_show,
-+	},
-+#endif
-+#ifdef CONFIG_RT_GROUP_SCHED
-+	{
-+		.name = "rt_runtime_us",
-+		.read_s64 = cpu_rt_runtime_read,
-+		.write_s64 = cpu_rt_runtime_write,
-+	},
-+	{
-+		.name = "rt_period_us",
-+		.read_u64 = cpu_rt_period_read_uint,
-+		.write_u64 = cpu_rt_period_write_uint,
-+	},
-+#endif
-+#ifdef CONFIG_UCLAMP_TASK_GROUP
-+	{
-+		.name = "uclamp.min",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.seq_show = cpu_uclamp_min_show,
-+		.write = cpu_uclamp_min_write,
-+	},
-+	{
-+		.name = "uclamp.max",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.seq_show = cpu_uclamp_max_show,
-+		.write = cpu_uclamp_max_write,
-+	},
-+#endif
-+	{ }	/* Terminate */
-+};
-+
-+#ifdef CONFIG_GROUP_SCHED_WEIGHT
-+static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
-+			       struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
-+				struct cftype *cft, u64 weight)
-+{
-+	return 0;
-+}
-+
-+static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
-+				    struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
-+				     struct cftype *cft, s64 nice)
-+{
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_CFS_BANDWIDTH
-+static int cpu_max_show(struct seq_file *sf, void *v)
-+{
-+	return 0;
-+}
-+
-+static ssize_t cpu_max_write(struct kernfs_open_file *of,
-+			     char *buf, size_t nbytes, loff_t off)
-+{
-+	return nbytes;
-+}
-+#endif
-+
-+static struct cftype cpu_files[] = {
-+#ifdef CONFIG_GROUP_SCHED_WEIGHT
-+	{
-+		.name = "weight",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.read_u64 = cpu_weight_read_u64,
-+		.write_u64 = cpu_weight_write_u64,
-+	},
-+	{
-+		.name = "weight.nice",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.read_s64 = cpu_weight_nice_read_s64,
-+		.write_s64 = cpu_weight_nice_write_s64,
-+	},
-+	{
-+		.name = "idle",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.read_s64 = cpu_idle_read_s64,
-+		.write_s64 = cpu_idle_write_s64,
-+	},
-+#endif
-+#ifdef CONFIG_CFS_BANDWIDTH
-+	{
-+		.name = "max",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.seq_show = cpu_max_show,
-+		.write = cpu_max_write,
-+	},
-+	{
-+		.name = "max.burst",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.read_u64 = cpu_cfs_burst_read_u64,
-+		.write_u64 = cpu_cfs_burst_write_u64,
-+	},
-+#endif
-+#ifdef CONFIG_UCLAMP_TASK_GROUP
-+	{
-+		.name = "uclamp.min",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.seq_show = cpu_uclamp_min_show,
-+		.write = cpu_uclamp_min_write,
-+	},
-+	{
-+		.name = "uclamp.max",
-+		.flags = CFTYPE_NOT_ON_ROOT,
-+		.seq_show = cpu_uclamp_max_show,
-+		.write = cpu_uclamp_max_write,
-+	},
-+#endif
-+	{ }	/* terminate */
-+};
-+
-+static int cpu_extra_stat_show(struct seq_file *sf,
-+			       struct cgroup_subsys_state *css)
-+{
-+	return 0;
-+}
-+
-+static int cpu_local_stat_show(struct seq_file *sf,
-+			       struct cgroup_subsys_state *css)
-+{
-+	return 0;
-+}
-+
-+struct cgroup_subsys cpu_cgrp_subsys = {
-+	.css_alloc	= cpu_cgroup_css_alloc,
-+	.css_online	= cpu_cgroup_css_online,
-+	.css_released	= cpu_cgroup_css_released,
-+	.css_free	= cpu_cgroup_css_free,
-+	.css_extra_stat_show = cpu_extra_stat_show,
-+	.css_local_stat_show = cpu_local_stat_show,
-+#ifdef CONFIG_RT_GROUP_SCHED
-+	.can_attach	= cpu_cgroup_can_attach,
-+#endif
-+	.attach		= cpu_cgroup_attach,
-+	.legacy_cftypes	= cpu_legacy_files,
-+	.dfl_cftypes	= cpu_files,
-+	.early_init	= true,
-+	.threaded	= true,
-+};
-+#endif	/* CONFIG_CGROUP_SCHED */
-+
-+#undef CREATE_TRACE_POINTS
-+
-+#ifdef CONFIG_SCHED_MM_CID
-+
-+#
-+/*
-+ * @cid_lock: Guarantee forward-progress of cid allocation.
-+ *
-+ * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
-+ * is only used when contention is detected by the lock-free allocation so
-+ * forward progress can be guaranteed.
-+ */
-+DEFINE_RAW_SPINLOCK(cid_lock);
-+
-+/*
-+ * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
-+ *
-+ * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
-+ * detected, it is set to 1 to ensure that all newly coming allocations are
-+ * serialized by @cid_lock until the allocation which detected contention
-+ * completes and sets @use_cid_lock back to 0. This guarantees forward progress
-+ * of a cid allocation.
-+ */
-+int use_cid_lock;
-+
-+/*
-+ * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
-+ * concurrently with respect to the execution of the source runqueue context
-+ * switch.
-+ *
-+ * There is one basic properties we want to guarantee here:
-+ *
-+ * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
-+ * used by a task. That would lead to concurrent allocation of the cid and
-+ * userspace corruption.
-+ *
-+ * Provide this guarantee by introducing a Dekker memory ordering to guarantee
-+ * that a pair of loads observe at least one of a pair of stores, which can be
-+ * shown as:
-+ *
-+ *      X = Y = 0
-+ *
-+ *      w[X]=1          w[Y]=1
-+ *      MB              MB
-+ *      r[Y]=y          r[X]=x
-+ *
-+ * Which guarantees that x==0 && y==0 is impossible. But rather than using
-+ * values 0 and 1, this algorithm cares about specific state transitions of the
-+ * runqueue current task (as updated by the scheduler context switch), and the
-+ * per-mm/cpu cid value.
-+ *
-+ * Let's introduce task (Y) which has task->mm == mm and task (N) which has
-+ * task->mm != mm for the rest of the discussion. There are two scheduler state
-+ * transitions on context switch we care about:
-+ *
-+ * (TSA) Store to rq->curr with transition from (N) to (Y)
-+ *
-+ * (TSB) Store to rq->curr with transition from (Y) to (N)
-+ *
-+ * On the remote-clear side, there is one transition we care about:
-+ *
-+ * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
-+ *
-+ * There is also a transition to UNSET state which can be performed from all
-+ * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
-+ * guarantees that only a single thread will succeed:
-+ *
-+ * (TMB) cmpxchg to *pcpu_cid to mark UNSET
-+ *
-+ * Just to be clear, what we do _not_ want to happen is a transition to UNSET
-+ * when a thread is actively using the cid (property (1)).
-+ *
-+ * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
-+ *
-+ * Scenario A) (TSA)+(TMA) (from next task perspective)
-+ *
-+ * CPU0                                      CPU1
-+ *
-+ * Context switch CS-1                       Remote-clear
-+ *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
-+ *                                             (implied barrier after cmpxchg)
-+ *   - switch_mm_cid()
-+ *     - memory barrier (see switch_mm_cid()
-+ *       comment explaining how this barrier
-+ *       is combined with other scheduler
-+ *       barriers)
-+ *     - mm_cid_get (next)
-+ *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
-+ *
-+ * This Dekker ensures that either task (Y) is observed by the
-+ * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
-+ * observed.
-+ *
-+ * If task (Y) store is observed by rcu_dereference(), it means that there is
-+ * still an active task on the cpu. Remote-clear will therefore not transition
-+ * to UNSET, which fulfills property (1).
-+ *
-+ * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
-+ * it will move its state to UNSET, which clears the percpu cid perhaps
-+ * uselessly (which is not an issue for correctness). Because task (Y) is not
-+ * observed, CPU1 can move ahead to set the state to UNSET. Because moving
-+ * state to UNSET is done with a cmpxchg expecting that the old state has the
-+ * LAZY flag set, only one thread will successfully UNSET.
-+ *
-+ * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
-+ * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
-+ * CPU1 will observe task (Y) and do nothing more, which is fine.
-+ *
-+ * What we are effectively preventing with this Dekker is a scenario where
-+ * neither LAZY flag nor store (Y) are observed, which would fail property (1)
-+ * because this would UNSET a cid which is actively used.
-+ */
-+
-+void sched_mm_cid_migrate_from(struct task_struct *t)
-+{
-+	t->migrate_from_cpu = task_cpu(t);
-+}
-+
-+static
-+int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
-+					  struct task_struct *t,
-+					  struct mm_cid *src_pcpu_cid)
-+{
-+	struct mm_struct *mm = t->mm;
-+	struct task_struct *src_task;
-+	int src_cid, last_mm_cid;
-+
-+	if (!mm)
-+		return -1;
-+
-+	last_mm_cid = t->last_mm_cid;
-+	/*
-+	 * If the migrated task has no last cid, or if the current
-+	 * task on src rq uses the cid, it means the source cid does not need
-+	 * to be moved to the destination cpu.
-+	 */
-+	if (last_mm_cid == -1)
-+		return -1;
-+	src_cid = READ_ONCE(src_pcpu_cid->cid);
-+	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
-+		return -1;
-+
-+	/*
-+	 * If we observe an active task using the mm on this rq, it means we
-+	 * are not the last task to be migrated from this cpu for this mm, so
-+	 * there is no need to move src_cid to the destination cpu.
-+	 */
-+	guard(rcu)();
-+	src_task = rcu_dereference(src_rq->curr);
-+	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
-+		t->last_mm_cid = -1;
-+		return -1;
-+	}
-+
-+	return src_cid;
-+}
-+
-+static
-+int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
-+					      struct task_struct *t,
-+					      struct mm_cid *src_pcpu_cid,
-+					      int src_cid)
-+{
-+	struct task_struct *src_task;
-+	struct mm_struct *mm = t->mm;
-+	int lazy_cid;
-+
-+	if (src_cid == -1)
-+		return -1;
-+
-+	/*
-+	 * Attempt to clear the source cpu cid to move it to the destination
-+	 * cpu.
-+	 */
-+	lazy_cid = mm_cid_set_lazy_put(src_cid);
-+	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
-+		return -1;
-+
-+	/*
-+	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
-+	 * rq->curr->mm matches the scheduler barrier in context_switch()
-+	 * between store to rq->curr and load of prev and next task's
-+	 * per-mm/cpu cid.
-+	 *
-+	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
-+	 * rq->curr->mm_cid_active matches the barrier in
-+	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
-+	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
-+	 * load of per-mm/cpu cid.
-+	 */
-+
-+	/*
-+	 * If we observe an active task using the mm on this rq after setting
-+	 * the lazy-put flag, this task will be responsible for transitioning
-+	 * from lazy-put flag set to MM_CID_UNSET.
-+	 */
-+	scoped_guard (rcu) {
-+		src_task = rcu_dereference(src_rq->curr);
-+		if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
-+			rcu_read_unlock();
-+			/*
-+			 * We observed an active task for this mm, there is therefore
-+			 * no point in moving this cid to the destination cpu.
-+			 */
-+			t->last_mm_cid = -1;
-+			return -1;
-+		}
-+	}
-+
-+	/*
-+	 * The src_cid is unused, so it can be unset.
-+	 */
-+	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
-+		return -1;
-+	return src_cid;
-+}
-+
-+/*
-+ * Migration to dst cpu. Called with dst_rq lock held.
-+ * Interrupts are disabled, which keeps the window of cid ownership without the
-+ * source rq lock held small.
-+ */
-+void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
-+{
-+	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
-+	struct mm_struct *mm = t->mm;
-+	int src_cid, dst_cid, src_cpu;
-+	struct rq *src_rq;
-+
-+	lockdep_assert_rq_held(dst_rq);
-+
-+	if (!mm)
-+		return;
-+	src_cpu = t->migrate_from_cpu;
-+	if (src_cpu == -1) {
-+		t->last_mm_cid = -1;
-+		return;
-+	}
-+	/*
-+	 * Move the src cid if the dst cid is unset. This keeps id
-+	 * allocation closest to 0 in cases where few threads migrate around
-+	 * many CPUs.
-+	 *
-+	 * If destination cid is already set, we may have to just clear
-+	 * the src cid to ensure compactness in frequent migrations
-+	 * scenarios.
-+	 *
-+	 * It is not useful to clear the src cid when the number of threads is
-+	 * greater or equal to the number of allowed CPUs, because user-space
-+	 * can expect that the number of allowed cids can reach the number of
-+	 * allowed CPUs.
-+	 */
-+	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
-+	dst_cid = READ_ONCE(dst_pcpu_cid->cid);
-+	if (!mm_cid_is_unset(dst_cid) &&
-+	    atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
-+		return;
-+	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
-+	src_rq = cpu_rq(src_cpu);
-+	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
-+	if (src_cid == -1)
-+		return;
-+	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
-+							    src_cid);
-+	if (src_cid == -1)
-+		return;
-+	if (!mm_cid_is_unset(dst_cid)) {
-+		__mm_cid_put(mm, src_cid);
-+		return;
-+	}
-+	/* Move src_cid to dst cpu. */
-+	mm_cid_snapshot_time(dst_rq, mm);
-+	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
-+}
-+
-+static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
-+				      int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	struct task_struct *t;
-+	int cid, lazy_cid;
-+
-+	cid = READ_ONCE(pcpu_cid->cid);
-+	if (!mm_cid_is_valid(cid))
-+		return;
-+
-+	/*
-+	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
-+	 * there happens to be other tasks left on the source cpu using this
-+	 * mm, the next task using this mm will reallocate its cid on context
-+	 * switch.
-+	 */
-+	lazy_cid = mm_cid_set_lazy_put(cid);
-+	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
-+		return;
-+
-+	/*
-+	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
-+	 * rq->curr->mm matches the scheduler barrier in context_switch()
-+	 * between store to rq->curr and load of prev and next task's
-+	 * per-mm/cpu cid.
-+	 *
-+	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
-+	 * rq->curr->mm_cid_active matches the barrier in
-+	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
-+	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
-+	 * load of per-mm/cpu cid.
-+	 */
-+
-+	/*
-+	 * If we observe an active task using the mm on this rq after setting
-+	 * the lazy-put flag, that task will be responsible for transitioning
-+	 * from lazy-put flag set to MM_CID_UNSET.
-+	 */
-+	scoped_guard (rcu) {
-+		t = rcu_dereference(rq->curr);
-+		if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
-+			return;
-+	}
-+
-+	/*
-+	 * The cid is unused, so it can be unset.
-+	 * Disable interrupts to keep the window of cid ownership without rq
-+	 * lock small.
-+	 */
-+	scoped_guard (irqsave) {
-+		if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
-+			__mm_cid_put(mm, cid);
-+	}
-+}
-+
-+static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	struct mm_cid *pcpu_cid;
-+	struct task_struct *curr;
-+	u64 rq_clock;
-+
-+	/*
-+	 * rq->clock load is racy on 32-bit but one spurious clear once in a
-+	 * while is irrelevant.
-+	 */
-+	rq_clock = READ_ONCE(rq->clock);
-+	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
-+
-+	/*
-+	 * In order to take care of infrequently scheduled tasks, bump the time
-+	 * snapshot associated with this cid if an active task using the mm is
-+	 * observed on this rq.
-+	 */
-+	scoped_guard (rcu) {
-+		curr = rcu_dereference(rq->curr);
-+		if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
-+			WRITE_ONCE(pcpu_cid->time, rq_clock);
-+			return;
-+		}
-+	}
-+
-+	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
-+		return;
-+	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
-+}
-+
-+static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
-+					     int weight)
-+{
-+	struct mm_cid *pcpu_cid;
-+	int cid;
-+
-+	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
-+	cid = READ_ONCE(pcpu_cid->cid);
-+	if (!mm_cid_is_valid(cid) || cid < weight)
-+		return;
-+	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
-+}
-+
-+static void task_mm_cid_work(struct callback_head *work)
-+{
-+	unsigned long now = jiffies, old_scan, next_scan;
-+	struct task_struct *t = current;
-+	struct cpumask *cidmask;
-+	struct mm_struct *mm;
-+	int weight, cpu;
-+
-+	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
-+
-+	work->next = work;	/* Prevent double-add */
-+	if (t->flags & PF_EXITING)
-+		return;
-+	mm = t->mm;
-+	if (!mm)
-+		return;
-+	old_scan = READ_ONCE(mm->mm_cid_next_scan);
-+	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
-+	if (!old_scan) {
-+		unsigned long res;
-+
-+		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
-+		if (res != old_scan)
-+			old_scan = res;
-+		else
-+			old_scan = next_scan;
-+	}
-+	if (time_before(now, old_scan))
-+		return;
-+	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
-+		return;
-+	cidmask = mm_cidmask(mm);
-+	/* Clear cids that were not recently used. */
-+	for_each_possible_cpu(cpu)
-+		sched_mm_cid_remote_clear_old(mm, cpu);
-+	weight = cpumask_weight(cidmask);
-+	/*
-+	 * Clear cids that are greater or equal to the cidmask weight to
-+	 * recompact it.
-+	 */
-+	for_each_possible_cpu(cpu)
-+		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
-+}
-+
-+void init_sched_mm_cid(struct task_struct *t)
-+{
-+	struct mm_struct *mm = t->mm;
-+	int mm_users = 0;
-+
-+	if (mm) {
-+		mm_users = atomic_read(&mm->mm_users);
-+		if (mm_users == 1)
-+			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
-+	}
-+	t->cid_work.next = &t->cid_work;	/* Protect against double add */
-+	init_task_work(&t->cid_work, task_mm_cid_work);
-+}
-+
-+void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
-+{
-+	struct callback_head *work = &curr->cid_work;
-+	unsigned long now = jiffies;
-+
-+	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
-+	    work->next != work)
-+		return;
-+	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
-+		return;
-+
-+	/* No page allocation under rq lock */
-+	task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
-+}
-+
-+void sched_mm_cid_exit_signals(struct task_struct *t)
-+{
-+	struct mm_struct *mm = t->mm;
-+	struct rq *rq;
-+
-+	if (!mm)
-+		return;
-+
-+	preempt_disable();
-+	rq = this_rq();
-+	guard(rq_lock_irqsave)(rq);
-+	preempt_enable_no_resched();	/* holding spinlock */
-+	WRITE_ONCE(t->mm_cid_active, 0);
-+	/*
-+	 * Store t->mm_cid_active before loading per-mm/cpu cid.
-+	 * Matches barrier in sched_mm_cid_remote_clear_old().
-+	 */
-+	smp_mb();
-+	mm_cid_put(mm);
-+	t->last_mm_cid = t->mm_cid = -1;
-+}
-+
-+void sched_mm_cid_before_execve(struct task_struct *t)
-+{
-+	struct mm_struct *mm = t->mm;
-+	struct rq *rq;
-+
-+	if (!mm)
-+		return;
-+
-+	preempt_disable();
-+	rq = this_rq();
-+	guard(rq_lock_irqsave)(rq);
-+	preempt_enable_no_resched();	/* holding spinlock */
-+	WRITE_ONCE(t->mm_cid_active, 0);
-+	/*
-+	 * Store t->mm_cid_active before loading per-mm/cpu cid.
-+	 * Matches barrier in sched_mm_cid_remote_clear_old().
-+	 */
-+	smp_mb();
-+	mm_cid_put(mm);
-+	t->last_mm_cid = t->mm_cid = -1;
-+}
-+
-+void sched_mm_cid_after_execve(struct task_struct *t)
-+{
-+	struct mm_struct *mm = t->mm;
-+	struct rq *rq;
-+
-+	if (!mm)
-+		return;
-+
-+	preempt_disable();
-+	rq = this_rq();
-+	scoped_guard (rq_lock_irqsave, rq) {
-+		preempt_enable_no_resched();	/* holding spinlock */
-+		WRITE_ONCE(t->mm_cid_active, 1);
-+		/*
-+		 * Store t->mm_cid_active before loading per-mm/cpu cid.
-+		 * Matches barrier in sched_mm_cid_remote_clear_old().
-+		 */
-+		smp_mb();
-+		t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
-+	}
-+	rseq_set_notify_resume(t);
-+}
-+
-+void sched_mm_cid_fork(struct task_struct *t)
-+{
-+	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
-+	t->mm_cid_active = 1;
-+}
-+#endif
-diff --git a/kernel/sched/alt_core.h b/kernel/sched/alt_core.h
-new file mode 100644
-index 000000000000..12d76d9d290e
---- /dev/null
-+++ b/kernel/sched/alt_core.h
-@@ -0,0 +1,213 @@
-+#ifndef _KERNEL_SCHED_ALT_CORE_H
-+#define _KERNEL_SCHED_ALT_CORE_H
-+
-+/*
-+ * Compile time debug macro
-+ * #define ALT_SCHED_DEBUG
-+ */
-+
-+/*
-+ * Task related inlined functions
-+ */
-+static inline bool is_migration_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_SMP
-+	return p->migration_disabled;
-+#else
-+	return false;
-+#endif
-+}
-+
-+/* rt_prio(prio) defined in include/linux/sched/rt.h */
-+#define rt_task(p)		rt_prio((p)->prio)
-+#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
-+#define task_has_rt_policy(p)	(rt_policy((p)->policy))
-+
-+struct affinity_context {
-+	const struct cpumask	*new_mask;
-+	struct cpumask		*user_mask;
-+	unsigned int		flags;
-+};
-+
-+/* CONFIG_SCHED_CLASS_EXT is not supported */
-+#define scx_switched_all()	false
-+
-+#define SCA_CHECK		0x01
-+#define SCA_MIGRATE_DISABLE	0x02
-+#define SCA_MIGRATE_ENABLE	0x04
-+#define SCA_USER		0x08
-+
-+#ifdef CONFIG_SMP
-+
-+extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx);
-+
-+static inline cpumask_t *alloc_user_cpus_ptr(int node)
-+{
-+	/*
-+	 * See do_set_cpus_allowed() above for the rcu_head usage.
-+	 */
-+	int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
-+
-+	return kmalloc_node(size, GFP_KERNEL, node);
-+}
-+
-+#else /* !CONFIG_SMP: */
-+
-+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
-+					 struct affinity_context *ctx)
-+{
-+	return set_cpus_allowed_ptr(p, ctx->new_mask);
-+}
-+
-+static inline cpumask_t *alloc_user_cpus_ptr(int node)
-+{
-+	return NULL;
-+}
-+
-+#endif /* !CONFIG_SMP */
-+
-+#ifdef CONFIG_RT_MUTEXES
-+
-+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
-+{
-+	if (pi_task)
-+		prio = min(prio, pi_task->prio);
-+
-+	return prio;
-+}
-+
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+	struct task_struct *pi_task = rt_mutex_get_top_task(p);
-+
-+	return __rt_effective_prio(pi_task, prio);
-+}
-+
-+#else /* !CONFIG_RT_MUTEXES: */
-+
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+	return prio;
-+}
-+
-+#endif /* !CONFIG_RT_MUTEXES */
-+
-+extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi);
-+extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
-+extern void __setscheduler_prio(struct task_struct *p, int prio);
-+
-+/*
-+ * Context API
-+ */
-+static inline struct rq *__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
-+{
-+	struct rq *rq;
-+	for (;;) {
-+		rq = task_rq(p);
-+		if (p->on_cpu || task_on_rq_queued(p)) {
-+			raw_spin_lock(&rq->lock);
-+			if (likely((p->on_cpu || task_on_rq_queued(p)) && rq == task_rq(p))) {
-+				*plock = &rq->lock;
-+				return rq;
-+			}
-+			raw_spin_unlock(&rq->lock);
-+		} else if (task_on_rq_migrating(p)) {
-+			do {
-+				cpu_relax();
-+			} while (unlikely(task_on_rq_migrating(p)));
-+		} else {
-+			*plock = NULL;
-+			return rq;
-+		}
-+	}
-+}
-+
-+static inline void __task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
-+{
-+	if (NULL != lock)
-+		raw_spin_unlock(lock);
-+}
-+
-+void check_task_changed(struct task_struct *p, struct rq *rq);
-+
-+/*
-+ * RQ related inlined functions
-+ */
-+
-+/*
-+ * This routine assume that the idle task always in queue
-+ */
-+static inline struct task_struct *sched_rq_first_task(struct rq *rq)
-+{
-+	const struct list_head *head = &rq->queue.heads[sched_rq_prio_idx(rq)];
-+
-+	return list_first_entry(head, struct task_struct, sq_node);
-+}
-+
-+static inline struct task_struct * sched_rq_next_task(struct task_struct *p, struct rq *rq)
-+{
-+	struct list_head *next = p->sq_node.next;
-+
-+	if (&rq->queue.heads[0] <= next && next < &rq->queue.heads[SCHED_LEVELS]) {
-+		struct list_head *head;
-+		unsigned long idx = next - &rq->queue.heads[0];
-+
-+		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
-+				    sched_idx2prio(idx, rq) + 1);
-+		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
-+
-+		return list_first_entry(head, struct task_struct, sq_node);
-+	}
-+
-+	return list_next_entry(p, sq_node);
-+}
-+
-+extern void requeue_task(struct task_struct *p, struct rq *rq);
-+
-+#ifdef ALT_SCHED_DEBUG
-+extern void alt_sched_debug(void);
-+#else
-+static inline void alt_sched_debug(void) {}
-+#endif
-+
-+extern int sched_yield_type;
-+
-+#ifdef CONFIG_SMP
-+extern cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
-+
-+DECLARE_STATIC_KEY_FALSE(sched_smt_present);
-+DECLARE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask);
-+
-+extern cpumask_t sched_smt_mask ____cacheline_aligned_in_smp;
-+
-+extern cpumask_t *const sched_idle_mask;
-+extern cpumask_t *const sched_sg_idle_mask;
-+extern cpumask_t *const sched_pcore_idle_mask;
-+extern cpumask_t *const sched_ecore_idle_mask;
-+
-+extern struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu);
-+
-+typedef bool (*idle_select_func_t)(struct cpumask *dstp, const struct cpumask *src1p,
-+				   const struct cpumask *src2p);
-+
-+extern idle_select_func_t idle_select_func;
-+#endif
-+
-+/* balance callback */
-+#ifdef CONFIG_SMP
-+extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
-+extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
-+#else
-+
-+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-+{
-+	return NULL;
-+}
-+
-+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+}
-+
-+#endif
-+
-+#endif /* _KERNEL_SCHED_ALT_CORE_H */
-diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
-new file mode 100644
-index 000000000000..1dbd7eb6a434
---- /dev/null
-+++ b/kernel/sched/alt_debug.c
-@@ -0,0 +1,32 @@
-+/*
-+ * kernel/sched/alt_debug.c
-+ *
-+ * Print the alt scheduler debugging details
-+ *
-+ * Author: Alfred Chen
-+ * Date  : 2020
-+ */
-+#include "sched.h"
-+#include "linux/sched/debug.h"
-+
-+/*
-+ * This allows printing both to /proc/sched_debug and
-+ * to the console
-+ */
-+#define SEQ_printf(m, x...)			\
-+ do {						\
-+	if (m)					\
-+		seq_printf(m, x);		\
-+	else					\
-+		pr_cont(x);			\
-+ } while (0)
-+
-+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
-+			  struct seq_file *m)
-+{
-+	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
-+						get_nr_threads(p));
-+}
-+
-+void proc_sched_set_task(struct task_struct *p)
-+{}
-diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
-new file mode 100644
-index 000000000000..7fb3433c5c41
---- /dev/null
-+++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,997 @@
-+#ifndef _KERNEL_SCHED_ALT_SCHED_H
-+#define _KERNEL_SCHED_ALT_SCHED_H
-+
-+#include <linux/context_tracking.h>
-+#include <linux/profile.h>
-+#include <linux/stop_machine.h>
-+#include <linux/syscalls.h>
-+#include <linux/tick.h>
-+
-+#include <trace/events/power.h>
-+#include <trace/events/sched.h>
-+
-+#include "../workqueue_internal.h"
-+
-+#include "cpupri.h"
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+/* task group related information */
-+struct task_group {
-+	struct cgroup_subsys_state css;
-+
-+	struct rcu_head rcu;
-+	struct list_head list;
-+
-+	struct task_group *parent;
-+	struct list_head siblings;
-+	struct list_head children;
-+};
-+
-+extern struct task_group *sched_create_group(struct task_group *parent);
-+extern void sched_online_group(struct task_group *tg,
-+			       struct task_group *parent);
-+extern void sched_destroy_group(struct task_group *tg);
-+extern void sched_release_group(struct task_group *tg);
-+#endif /* CONFIG_CGROUP_SCHED */
-+
-+#define MIN_SCHED_NORMAL_PRIO	(32)
-+/*
-+ * levels: RT(0-24), reserved(25-31), NORMAL(32-63), cpu idle task(64)
-+ *
-+ * -- BMQ --
-+ * NORMAL: (lower boost range 12, NICE_WIDTH 40, higher boost range 12) / 2
-+ * -- PDS --
-+ * NORMAL: SCHED_EDGE_DELTA + ((NICE_WIDTH 40) / 2)
-+ */
-+#define SCHED_LEVELS		(64 + 1)
-+
-+#define IDLE_TASK_SCHED_PRIO	(SCHED_LEVELS - 1)
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
-+extern void resched_latency_warn(int cpu, u64 latency);
-+#else
-+# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
-+static inline void resched_latency_warn(int cpu, u64 latency) {}
-+#endif
-+
-+/*
-+ * Increase resolution of nice-level calculations for 64-bit architectures.
-+ * The extra resolution improves shares distribution and load balancing of
-+ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
-+ * hierarchies, especially on larger systems. This is not a user-visible change
-+ * and does not change the user-interface for setting shares/weights.
-+ *
-+ * We increase resolution only if we have enough bits to allow this increased
-+ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
-+ * are pretty high and the returns do not justify the increased costs.
-+ *
-+ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
-+ * increase coverage and consistency always enable it on 64-bit platforms.
-+ */
-+#ifdef CONFIG_64BIT
-+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load_down(w) \
-+({ \
-+	unsigned long __w = (w); \
-+	if (__w) \
-+		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
-+	__w; \
-+})
-+#else
-+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load(w)		(w)
-+# define scale_load_down(w)	(w)
-+#endif
-+
-+/*
-+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
-+ */
-+#ifdef CONFIG_SCHED_DEBUG
-+# define const_debug __read_mostly
-+#else
-+# define const_debug const
-+#endif
-+
-+/* task_struct::on_rq states: */
-+#define TASK_ON_RQ_QUEUED	1
-+#define TASK_ON_RQ_MIGRATING	2
-+
-+static inline int task_on_rq_queued(struct task_struct *p)
-+{
-+	return p->on_rq == TASK_ON_RQ_QUEUED;
-+}
-+
-+static inline int task_on_rq_migrating(struct task_struct *p)
-+{
-+	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
-+}
-+
-+/* Wake flags. The first three directly map to some SD flag value */
-+#define WF_EXEC         0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
-+#define WF_FORK         0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
-+#define WF_TTWU         0x08 /* Wakeup;            maps to SD_BALANCE_WAKE */
-+
-+#define WF_SYNC         0x10 /* Waker goes to sleep after wakeup */
-+#define WF_MIGRATED     0x20 /* Internal use, task got migrated */
-+#define WF_CURRENT_CPU  0x40 /* Prefer to move the wakee to the current CPU. */
-+
-+#ifdef CONFIG_SMP
-+static_assert(WF_EXEC == SD_BALANCE_EXEC);
-+static_assert(WF_FORK == SD_BALANCE_FORK);
-+static_assert(WF_TTWU == SD_BALANCE_WAKE);
-+#endif
-+
-+#define SCHED_QUEUE_BITS	(SCHED_LEVELS - 1)
-+
-+struct sched_queue {
-+	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
-+	struct list_head heads[SCHED_LEVELS];
-+};
-+
-+struct rq;
-+struct cpuidle_state;
-+
-+struct balance_callback {
-+	struct balance_callback *next;
-+	void (*func)(struct rq *rq);
-+};
-+
-+typedef void (*balance_func_t)(struct rq *rq, int cpu);
-+typedef void (*set_idle_mask_func_t)(unsigned int cpu, struct cpumask *dstp);
-+typedef void (*clear_idle_mask_func_t)(int cpu, struct cpumask *dstp);
-+
-+struct balance_arg {
-+	struct task_struct	*task;
-+	int			active;
-+	cpumask_t		*cpumask;
-+};
-+
-+/*
-+ * This is the main, per-CPU runqueue data structure.
-+ * This data should only be modified by the local cpu.
-+ */
-+struct rq {
-+	/* runqueue lock: */
-+	raw_spinlock_t			lock;
-+
-+	struct task_struct __rcu	*curr;
-+	struct task_struct		*idle;
-+	struct task_struct		*stop;
-+	struct mm_struct		*prev_mm;
-+
-+	struct sched_queue		queue		____cacheline_aligned;
-+
-+	int				prio;
-+#ifdef CONFIG_SCHED_PDS
-+	int				prio_idx;
-+	u64				time_edge;
-+#endif
-+
-+	/* switch count */
-+	u64 nr_switches;
-+
-+	atomic_t nr_iowait;
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	u64 last_seen_need_resched_ns;
-+	int ticks_without_resched;
-+#endif
-+
-+#ifdef CONFIG_MEMBARRIER
-+	int membarrier_state;
-+#endif
-+
-+	set_idle_mask_func_t	set_idle_mask_func;
-+	clear_idle_mask_func_t	clear_idle_mask_func;
-+
-+#ifdef CONFIG_SMP
-+	int cpu;		/* cpu of this runqueue */
-+	bool online;
-+
-+	unsigned int		ttwu_pending;
-+	unsigned char		nohz_idle_balance;
-+	unsigned char		idle_balance;
-+
-+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
-+	struct sched_avg	avg_irq;
-+#endif
-+
-+	balance_func_t		balance_func;
-+	struct balance_arg	active_balance_arg		____cacheline_aligned;
-+	struct cpu_stop_work	active_balance_work;
-+
-+	struct balance_callback	*balance_callback;
-+#ifdef CONFIG_HOTPLUG_CPU
-+	struct rcuwait		hotplug_wait;
-+#endif
-+	unsigned int		nr_pinned;
-+
-+#endif /* CONFIG_SMP */
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	u64 prev_irq_time;
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+#ifdef CONFIG_PARAVIRT
-+	u64 prev_steal_time;
-+#endif /* CONFIG_PARAVIRT */
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	u64 prev_steal_time_rq;
-+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
-+
-+	/* For genenal cpu load util */
-+	s32 load_history;
-+	u64 load_block;
-+	u64 load_stamp;
-+
-+	/* calc_load related fields */
-+	unsigned long calc_load_update;
-+	long calc_load_active;
-+
-+	/* Ensure that all clocks are in the same cache line */
-+	u64			clock ____cacheline_aligned;
-+	u64			clock_task;
-+
-+	unsigned int  nr_running;
-+	unsigned long nr_uninterruptible;
-+
-+#ifdef CONFIG_SCHED_HRTICK
-+#ifdef CONFIG_SMP
-+	call_single_data_t hrtick_csd;
-+#endif
-+	struct hrtimer		hrtick_timer;
-+	ktime_t			hrtick_time;
-+#endif
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+	/* latency stats */
-+	struct sched_info rq_sched_info;
-+	unsigned long long rq_cpu_time;
-+	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
-+
-+	/* sys_sched_yield() stats */
-+	unsigned int yld_count;
-+
-+	/* schedule() stats */
-+	unsigned int sched_switch;
-+	unsigned int sched_count;
-+	unsigned int sched_goidle;
-+
-+	/* try_to_wake_up() stats */
-+	unsigned int ttwu_count;
-+	unsigned int ttwu_local;
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+#ifdef CONFIG_CPU_IDLE
-+	/* Must be inspected within a rcu lock section */
-+	struct cpuidle_state *idle_state;
-+#endif
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+#ifdef CONFIG_SMP
-+	call_single_data_t	nohz_csd;
-+#endif
-+	atomic_t		nohz_flags;
-+#endif /* CONFIG_NO_HZ_COMMON */
-+
-+	/* Scratch cpumask to be temporarily used under rq_lock */
-+	cpumask_var_t		scratch_mask;
-+};
-+
-+extern unsigned int sysctl_sched_base_slice;
-+
-+extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
-+
-+extern unsigned long calc_load_update;
-+extern atomic_long_t calc_load_tasks;
-+
-+extern void calc_global_load_tick(struct rq *this_rq);
-+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
-+
-+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-+#define this_rq()		this_cpu_ptr(&runqueues)
-+#define task_rq(p)		cpu_rq(task_cpu(p))
-+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-+#define raw_rq()		raw_cpu_ptr(&runqueues)
-+
-+#ifdef CONFIG_SMP
-+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-+void register_sched_domain_sysctl(void);
-+void unregister_sched_domain_sysctl(void);
-+#else
-+static inline void register_sched_domain_sysctl(void)
-+{
-+}
-+static inline void unregister_sched_domain_sysctl(void)
-+{
-+}
-+#endif
-+
-+extern bool sched_smp_initialized;
-+
-+enum {
-+#ifdef CONFIG_SCHED_SMT
-+	SMT_LEVEL_SPACE_HOLDER,
-+#endif
-+	COREGROUP_LEVEL_SPACE_HOLDER,
-+	CORE_LEVEL_SPACE_HOLDER,
-+	OTHER_LEVEL_SPACE_HOLDER,
-+	NR_CPU_AFFINITY_LEVELS
-+};
-+
-+DECLARE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
-+
-+static inline int
-+__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
-+{
-+	int cpu;
-+
-+	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
-+		mask++;
-+
-+	return cpu;
-+}
-+
-+static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
-+{
-+	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
-+}
-+
-+#endif
-+
-+#ifndef arch_scale_freq_tick
-+static __always_inline
-+void arch_scale_freq_tick(void)
-+{
-+}
-+#endif
-+
-+#ifndef arch_scale_freq_capacity
-+static __always_inline
-+unsigned long arch_scale_freq_capacity(int cpu)
-+{
-+	return SCHED_CAPACITY_SCALE;
-+}
-+#endif
-+
-+static inline u64 __rq_clock_broken(struct rq *rq)
-+{
-+	return READ_ONCE(rq->clock);
-+}
-+
-+static inline u64 rq_clock(struct rq *rq)
-+{
-+	/*
-+	 * Relax lockdep_assert_held() checking as in VRQ, call to
-+	 * sched_info_xxxx() may not held rq->lock
-+	 * lockdep_assert_held(&rq->lock);
-+	 */
-+	return rq->clock;
-+}
-+
-+static inline u64 rq_clock_task(struct rq *rq)
-+{
-+	/*
-+	 * Relax lockdep_assert_held() checking as in VRQ, call to
-+	 * sched_info_xxxx() may not held rq->lock
-+	 * lockdep_assert_held(&rq->lock);
-+	 */
-+	return rq->clock_task;
-+}
-+
-+/*
-+ * {de,en}queue flags:
-+ *
-+ * DEQUEUE_SLEEP  - task is no longer runnable
-+ * ENQUEUE_WAKEUP - task just became runnable
-+ *
-+ */
-+
-+#define DEQUEUE_SLEEP		0x01
-+
-+#define ENQUEUE_WAKEUP		0x01
-+
-+
-+/*
-+ * Below are scheduler API which using in other kernel code
-+ * It use the dummy rq_flags
-+ * ToDo : BMQ need to support these APIs for compatibility with mainline
-+ * scheduler code.
-+ */
-+struct rq_flags {
-+	unsigned long flags;
-+};
-+
-+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(rq->lock);
-+
-+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(p->pi_lock)
-+	__acquires(rq->lock);
-+
-+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void
-+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-+	__releases(rq->lock)
-+	__releases(p->pi_lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-+}
-+
-+static inline void
-+rq_lock(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock(&rq->lock);
-+}
-+
-+static inline void
-+rq_unlock(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void
-+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irq(&rq->lock);
-+}
-+
-+static inline void
-+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+static inline struct rq *
-+this_rq_lock_irq(struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	local_irq_disable();
-+	rq = this_rq();
-+	raw_spin_lock(&rq->lock);
-+
-+	return rq;
-+}
-+
-+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
-+{
-+	return &rq->lock;
-+}
-+
-+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
-+{
-+	return __rq_lockp(rq);
-+}
-+
-+static inline void lockdep_assert_rq_held(struct rq *rq)
-+{
-+	lockdep_assert_held(__rq_lockp(rq));
-+}
-+
-+extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
-+extern void raw_spin_rq_unlock(struct rq *rq);
-+
-+static inline void raw_spin_rq_lock(struct rq *rq)
-+{
-+	raw_spin_rq_lock_nested(rq, 0);
-+}
-+
-+static inline void raw_spin_rq_lock_irq(struct rq *rq)
-+{
-+	local_irq_disable();
-+	raw_spin_rq_lock(rq);
-+}
-+
-+static inline void raw_spin_rq_unlock_irq(struct rq *rq)
-+{
-+	raw_spin_rq_unlock(rq);
-+	local_irq_enable();
-+}
-+
-+static inline int task_current(struct rq *rq, struct task_struct *p)
-+{
-+	return rq->curr == p;
-+}
-+
-+static inline bool task_on_cpu(struct task_struct *p)
-+{
-+	return p->on_cpu;
-+}
-+
-+extern struct static_key_false sched_schedstats;
-+
-+#ifdef CONFIG_CPU_IDLE
-+static inline void idle_set_state(struct rq *rq,
-+				  struct cpuidle_state *idle_state)
-+{
-+	rq->idle_state = idle_state;
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+	WARN_ON(!rcu_read_lock_held());
-+	return rq->idle_state;
-+}
-+#else
-+static inline void idle_set_state(struct rq *rq,
-+				  struct cpuidle_state *idle_state)
-+{
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+	return NULL;
-+}
-+#endif
-+
-+static inline int cpu_of(const struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+	return rq->cpu;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+extern void resched_cpu(int cpu);
-+
-+#include "stats.h"
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+#define NOHZ_BALANCE_KICK_BIT	0
-+#define NOHZ_STATS_KICK_BIT	1
-+
-+#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
-+#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
-+
-+#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
-+
-+#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
-+
-+/* TODO: needed?
-+extern void nohz_balance_exit_idle(struct rq *rq);
-+#else
-+static inline void nohz_balance_exit_idle(struct rq *rq) { }
-+*/
-+#endif
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+struct irqtime {
-+	u64			total;
-+	u64			tick_delta;
-+	u64			irq_start_time;
-+	struct u64_stats_sync	sync;
-+};
-+
-+DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
-+
-+/*
-+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
-+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
-+ * and never move forward.
-+ */
-+static inline u64 irq_time_read(int cpu)
-+{
-+	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
-+	unsigned int seq;
-+	u64 total;
-+
-+	do {
-+		seq = __u64_stats_fetch_begin(&irqtime->sync);
-+		total = irqtime->total;
-+	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
-+
-+	return total;
-+}
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+
-+#ifdef CONFIG_CPU_FREQ
-+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+extern int __init sched_tick_offload_init(void);
-+#else
-+static inline int sched_tick_offload_init(void) { return 0; }
-+#endif
-+
-+#ifdef arch_scale_freq_capacity
-+#ifndef arch_scale_freq_invariant
-+#define arch_scale_freq_invariant()	(true)
-+#endif
-+#else /* arch_scale_freq_capacity */
-+#define arch_scale_freq_invariant()	(false)
-+#endif
-+
-+#ifdef CONFIG_SMP
-+unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
-+				 unsigned long min,
-+				 unsigned long max);
-+#endif /* CONFIG_SMP */
-+
-+extern void schedule_idle(void);
-+
-+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-+
-+/*
-+ * !! For sched_setattr_nocheck() (kernel) only !!
-+ *
-+ * This is actually gross. :(
-+ *
-+ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
-+ * tasks, but still be able to sleep. We need this on platforms that cannot
-+ * atomically change clock frequency. Remove once fast switching will be
-+ * available on such platforms.
-+ *
-+ * SUGOV stands for SchedUtil GOVernor.
-+ */
-+#define SCHED_FLAG_SUGOV	0x10000000
-+
-+#ifdef CONFIG_MEMBARRIER
-+/*
-+ * The scheduler provides memory barriers required by membarrier between:
-+ * - prior user-space memory accesses and store to rq->membarrier_state,
-+ * - store to rq->membarrier_state and following user-space memory accesses.
-+ * In the same way it provides those guarantees around store to rq->curr.
-+ */
-+static inline void membarrier_switch_mm(struct rq *rq,
-+					struct mm_struct *prev_mm,
-+					struct mm_struct *next_mm)
-+{
-+	int membarrier_state;
-+
-+	if (prev_mm == next_mm)
-+		return;
-+
-+	membarrier_state = atomic_read(&next_mm->membarrier_state);
-+	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
-+		return;
-+
-+	WRITE_ONCE(rq->membarrier_state, membarrier_state);
-+}
-+#else
-+static inline void membarrier_switch_mm(struct rq *rq,
-+					struct mm_struct *prev_mm,
-+					struct mm_struct *next_mm)
-+{
-+}
-+#endif
-+
-+#ifdef CONFIG_NUMA
-+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
-+#else
-+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
-+{
-+	return nr_cpu_ids;
-+}
-+#endif
-+
-+extern void swake_up_all_locked(struct swait_queue_head *q);
-+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-+
-+extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+extern int preempt_dynamic_mode;
-+extern int sched_dynamic_mode(const char *str);
-+extern void sched_dynamic_update(int mode);
-+#endif
-+
-+static inline void nohz_run_idle_balance(int cpu) { }
-+
-+static inline unsigned long
-+uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
-+{
-+	if (clamp_id == UCLAMP_MIN)
-+		return 0;
-+
-+	return SCHED_CAPACITY_SCALE;
-+}
-+
-+static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
-+
-+static inline bool uclamp_is_used(void)
-+{
-+	return false;
-+}
-+
-+static inline unsigned long
-+uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
-+{
-+	if (clamp_id == UCLAMP_MIN)
-+		return 0;
-+
-+	return SCHED_CAPACITY_SCALE;
-+}
-+
-+static inline void
-+uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value)
-+{
-+}
-+
-+static inline bool uclamp_rq_is_idle(struct rq *rq)
-+{
-+	return false;
-+}
-+
-+#ifdef CONFIG_SCHED_MM_CID
-+
-+#define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */
-+#define MM_CID_SCAN_DELAY	100			/* 100ms */
-+
-+extern raw_spinlock_t cid_lock;
-+extern int use_cid_lock;
-+
-+extern void sched_mm_cid_migrate_from(struct task_struct *t);
-+extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
-+extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
-+extern void init_sched_mm_cid(struct task_struct *t);
-+
-+static inline void __mm_cid_put(struct mm_struct *mm, int cid)
-+{
-+	if (cid < 0)
-+		return;
-+	cpumask_clear_cpu(cid, mm_cidmask(mm));
-+}
-+
-+/*
-+ * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to
-+ * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to
-+ * be held to transition to other states.
-+ *
-+ * State transitions synchronized with cmpxchg or try_cmpxchg need to be
-+ * consistent across cpus, which prevents use of this_cpu_cmpxchg.
-+ */
-+static inline void mm_cid_put_lazy(struct task_struct *t)
-+{
-+	struct mm_struct *mm = t->mm;
-+	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
-+	int cid;
-+
-+	lockdep_assert_irqs_disabled();
-+	cid = __this_cpu_read(pcpu_cid->cid);
-+	if (!mm_cid_is_lazy_put(cid) ||
-+	    !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
-+		return;
-+	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
-+}
-+
-+static inline int mm_cid_pcpu_unset(struct mm_struct *mm)
-+{
-+	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
-+	int cid, res;
-+
-+	lockdep_assert_irqs_disabled();
-+	cid = __this_cpu_read(pcpu_cid->cid);
-+	for (;;) {
-+		if (mm_cid_is_unset(cid))
-+			return MM_CID_UNSET;
-+		/*
-+		 * Attempt transition from valid or lazy-put to unset.
-+		 */
-+		res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET);
-+		if (res == cid)
-+			break;
-+		cid = res;
-+	}
-+	return cid;
-+}
-+
-+static inline void mm_cid_put(struct mm_struct *mm)
-+{
-+	int cid;
-+
-+	lockdep_assert_irqs_disabled();
-+	cid = mm_cid_pcpu_unset(mm);
-+	if (cid == MM_CID_UNSET)
-+		return;
-+	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
-+}
-+
-+static inline int __mm_cid_try_get(struct mm_struct *mm)
-+{
-+	struct cpumask *cpumask;
-+	int cid;
-+
-+	cpumask = mm_cidmask(mm);
-+	/*
-+	 * Retry finding first zero bit if the mask is temporarily
-+	 * filled. This only happens during concurrent remote-clear
-+	 * which owns a cid without holding a rq lock.
-+	 */
-+	for (;;) {
-+		cid = cpumask_first_zero(cpumask);
-+		if (cid < nr_cpu_ids)
-+			break;
-+		cpu_relax();
-+	}
-+	if (cpumask_test_and_set_cpu(cid, cpumask))
-+		return -1;
-+	return cid;
-+}
-+
-+/*
-+ * Save a snapshot of the current runqueue time of this cpu
-+ * with the per-cpu cid value, allowing to estimate how recently it was used.
-+ */
-+static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
-+{
-+	struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq));
-+
-+	lockdep_assert_rq_held(rq);
-+	WRITE_ONCE(pcpu_cid->time, rq->clock);
-+}
-+
-+static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm)
-+{
-+	int cid;
-+
-+	/*
-+	 * All allocations (even those using the cid_lock) are lock-free. If
-+	 * use_cid_lock is set, hold the cid_lock to perform cid allocation to
-+	 * guarantee forward progress.
-+	 */
-+	if (!READ_ONCE(use_cid_lock)) {
-+		cid = __mm_cid_try_get(mm);
-+		if (cid >= 0)
-+			goto end;
-+		raw_spin_lock(&cid_lock);
-+	} else {
-+		raw_spin_lock(&cid_lock);
-+		cid = __mm_cid_try_get(mm);
-+		if (cid >= 0)
-+			goto unlock;
-+	}
-+
-+	/*
-+	 * cid concurrently allocated. Retry while forcing following
-+	 * allocations to use the cid_lock to ensure forward progress.
-+	 */
-+	WRITE_ONCE(use_cid_lock, 1);
-+	/*
-+	 * Set use_cid_lock before allocation. Only care about program order
-+	 * because this is only required for forward progress.
-+	 */
-+	barrier();
-+	/*
-+	 * Retry until it succeeds. It is guaranteed to eventually succeed once
-+	 * all newcoming allocations observe the use_cid_lock flag set.
-+	 */
-+	do {
-+		cid = __mm_cid_try_get(mm);
-+		cpu_relax();
-+	} while (cid < 0);
-+	/*
-+	 * Allocate before clearing use_cid_lock. Only care about
-+	 * program order because this is for forward progress.
-+	 */
-+	barrier();
-+	WRITE_ONCE(use_cid_lock, 0);
-+unlock:
-+	raw_spin_unlock(&cid_lock);
-+end:
-+	mm_cid_snapshot_time(rq, mm);
-+	return cid;
-+}
-+
-+static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
-+{
-+	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
-+	struct cpumask *cpumask;
-+	int cid;
-+
-+	lockdep_assert_rq_held(rq);
-+	cpumask = mm_cidmask(mm);
-+	cid = __this_cpu_read(pcpu_cid->cid);
-+	if (mm_cid_is_valid(cid)) {
-+		mm_cid_snapshot_time(rq, mm);
-+		return cid;
-+	}
-+	if (mm_cid_is_lazy_put(cid)) {
-+		if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
-+			__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
-+	}
-+	cid = __mm_cid_get(rq, mm);
-+	__this_cpu_write(pcpu_cid->cid, cid);
-+	return cid;
-+}
-+
-+static inline void switch_mm_cid(struct rq *rq,
-+				 struct task_struct *prev,
-+				 struct task_struct *next)
-+{
-+	/*
-+	 * Provide a memory barrier between rq->curr store and load of
-+	 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.
-+	 *
-+	 * Should be adapted if context_switch() is modified.
-+	 */
-+	if (!next->mm) {                                // to kernel
-+		/*
-+		 * user -> kernel transition does not guarantee a barrier, but
-+		 * we can use the fact that it performs an atomic operation in
-+		 * mmgrab().
-+		 */
-+		if (prev->mm)                           // from user
-+			smp_mb__after_mmgrab();
-+		/*
-+		 * kernel -> kernel transition does not change rq->curr->mm
-+		 * state. It stays NULL.
-+		 */
-+	} else {                                        // to user
-+		/*
-+		 * kernel -> user transition does not provide a barrier
-+		 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
-+		 * Provide it here.
-+		 */
-+		if (!prev->mm)                          // from kernel
-+			smp_mb();
-+		/*
-+		 * user -> user transition guarantees a memory barrier through
-+		 * switch_mm() when current->mm changes. If current->mm is
-+		 * unchanged, no barrier is needed.
-+		 */
-+	}
-+	if (prev->mm_cid_active) {
-+		mm_cid_snapshot_time(rq, prev->mm);
-+		mm_cid_put_lazy(prev);
-+		prev->mm_cid = -1;
-+	}
-+	if (next->mm_cid_active)
-+		next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm);
-+}
-+
-+#else
-+static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
-+static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
-+static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
-+static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
-+static inline void init_sched_mm_cid(struct task_struct *t) { }
-+#endif
-+
-+#ifdef CONFIG_SMP
-+extern struct balance_callback balance_push_callback;
-+
-+static inline void
-+queue_balance_callback(struct rq *rq,
-+		       struct balance_callback *head,
-+		       void (*func)(struct rq *rq))
-+{
-+	lockdep_assert_rq_held(rq);
-+
-+	/*
-+	 * Don't (re)queue an already queued item; nor queue anything when
-+	 * balance_push() is active, see the comment with
-+	 * balance_push_callback.
-+	 */
-+	if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
-+		return;
-+
-+	head->func = func;
-+	head->next = rq->balance_callback;
-+	rq->balance_callback = head;
-+}
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_SCHED_BMQ
-+#include "bmq.h"
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+#include "pds.h"
-+#endif
-+
-+#endif /* _KERNEL_SCHED_ALT_SCHED_H */
-diff --git a/kernel/sched/alt_topology.c b/kernel/sched/alt_topology.c
-new file mode 100644
-index 000000000000..2266138ee783
---- /dev/null
-+++ b/kernel/sched/alt_topology.c
-@@ -0,0 +1,350 @@
-+#include "alt_core.h"
-+#include "alt_topology.h"
-+
-+#ifdef CONFIG_SMP
-+
-+static cpumask_t sched_pcore_mask ____cacheline_aligned_in_smp;
-+
-+static int __init sched_pcore_mask_setup(char *str)
-+{
-+	if (cpulist_parse(str, &sched_pcore_mask))
-+		pr_warn("sched/alt: pcore_cpus= incorrect CPU range\n");
-+
-+	return 0;
-+}
-+__setup("pcore_cpus=", sched_pcore_mask_setup);
-+
-+/*
-+ * set/clear idle mask functions
-+ */
-+#ifdef CONFIG_SCHED_SMT
-+static void set_idle_mask_smt(unsigned int cpu, struct cpumask *dstp)
-+{
-+	cpumask_set_cpu(cpu, dstp);
-+	if (cpumask_subset(cpu_smt_mask(cpu), sched_idle_mask))
-+		cpumask_or(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
-+}
-+
-+static void clear_idle_mask_smt(int cpu, struct cpumask *dstp)
-+{
-+	cpumask_clear_cpu(cpu, dstp);
-+	cpumask_andnot(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
-+}
-+#endif
-+
-+static void set_idle_mask_pcore(unsigned int cpu, struct cpumask *dstp)
-+{
-+	cpumask_set_cpu(cpu, dstp);
-+	cpumask_set_cpu(cpu, sched_pcore_idle_mask);
-+}
-+
-+static void clear_idle_mask_pcore(int cpu, struct cpumask *dstp)
-+{
-+	cpumask_clear_cpu(cpu, dstp);
-+	cpumask_clear_cpu(cpu, sched_pcore_idle_mask);
-+}
-+
-+static void set_idle_mask_ecore(unsigned int cpu, struct cpumask *dstp)
-+{
-+	cpumask_set_cpu(cpu, dstp);
-+	cpumask_set_cpu(cpu, sched_ecore_idle_mask);
-+}
-+
-+static void clear_idle_mask_ecore(int cpu, struct cpumask *dstp)
-+{
-+	cpumask_clear_cpu(cpu, dstp);
-+	cpumask_clear_cpu(cpu, sched_ecore_idle_mask);
-+}
-+
-+/*
-+ * Idle cpu/rq selection functions
-+ */
-+#ifdef CONFIG_SCHED_SMT
-+static bool p1_idle_select_func(struct cpumask *dstp, const struct cpumask *src1p,
-+				 const struct cpumask *src2p)
-+{
-+	return cpumask_and(dstp, src1p, src2p + 1)	||
-+	       cpumask_and(dstp, src1p, src2p);
-+}
-+#endif
-+
-+static bool p1p2_idle_select_func(struct cpumask *dstp, const struct cpumask *src1p,
-+					const struct cpumask *src2p)
-+{
-+	return cpumask_and(dstp, src1p, src2p + 1)	||
-+	       cpumask_and(dstp, src1p, src2p + 2)	||
-+	       cpumask_and(dstp, src1p, src2p);
-+}
-+
-+/* common balance functions */
-+static int active_balance_cpu_stop(void *data)
-+{
-+	struct balance_arg *arg = data;
-+	struct task_struct *p = arg->task;
-+	struct rq *rq = this_rq();
-+	unsigned long flags;
-+	cpumask_t tmp;
-+
-+	local_irq_save(flags);
-+
-+	raw_spin_lock(&p->pi_lock);
-+	raw_spin_lock(&rq->lock);
-+
-+	arg->active = 0;
-+
-+	if (task_on_rq_queued(p) && task_rq(p) == rq &&
-+	    cpumask_and(&tmp, p->cpus_ptr, arg->cpumask) &&
-+	    !is_migration_disabled(p)) {
-+		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu_of(rq)));
-+		rq = move_queued_task(rq, p, dcpu);
-+	}
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	return 0;
-+}
-+
-+/* trigger_active_balance - for @rq */
-+static inline int
-+trigger_active_balance(struct rq *src_rq, struct rq *rq, cpumask_t *target_mask)
-+{
-+	struct balance_arg *arg;
-+	unsigned long flags;
-+	struct task_struct *p;
-+	int res;
-+
-+	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
-+		return 0;
-+
-+	arg = &rq->active_balance_arg;
-+	res = (1 == rq->nr_running) &&					\
-+	      !is_migration_disabled((p = sched_rq_first_task(rq))) &&	\
-+	      cpumask_intersects(p->cpus_ptr, target_mask) &&		\
-+	      !arg->active;
-+	if (res) {
-+		arg->task = p;
-+		arg->cpumask = target_mask;
-+
-+		arg->active = 1;
-+	}
-+
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	if (res) {
-+		preempt_disable();
-+		raw_spin_unlock(&src_rq->lock);
-+
-+		stop_one_cpu_nowait(cpu_of(rq), active_balance_cpu_stop, arg,
-+				    &rq->active_balance_work);
-+
-+		preempt_enable();
-+		raw_spin_lock(&src_rq->lock);
-+	}
-+
-+	return res;
-+}
-+
-+static inline int
-+ecore_source_balance(struct rq *rq, cpumask_t *single_task_mask, cpumask_t *target_mask)
-+{
-+	if (cpumask_andnot(single_task_mask, single_task_mask, &sched_pcore_mask)) {
-+		int i, cpu = cpu_of(rq);
-+
-+		for_each_cpu_wrap(i, single_task_mask, cpu)
-+			if (trigger_active_balance(rq, cpu_rq(i), target_mask))
-+				return 1;
-+	}
-+
-+	return 0;
-+}
-+
-+static DEFINE_PER_CPU(struct balance_callback, active_balance_head);
-+
-+#ifdef CONFIG_SCHED_SMT
-+static inline int
-+smt_pcore_source_balance(struct rq *rq, cpumask_t *single_task_mask, cpumask_t *target_mask)
-+{
-+	cpumask_t smt_single_mask;
-+
-+	if (cpumask_and(&smt_single_mask, single_task_mask, &sched_smt_mask)) {
-+		int i, cpu = cpu_of(rq);
-+
-+		for_each_cpu_wrap(i, &smt_single_mask, cpu) {
-+			if (cpumask_subset(cpu_smt_mask(i), &smt_single_mask) &&
-+			    trigger_active_balance(rq, cpu_rq(i), target_mask))
-+				return 1;
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+/* smt p core balance functions */
-+static inline void smt_pcore_balance(struct rq *rq)
-+{
-+	cpumask_t single_task_mask;
-+
-+	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
-+	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
-+	    (/* smt core group balance */
-+	     (static_key_count(&sched_smt_present.key) > 1 &&
-+	      smt_pcore_source_balance(rq, &single_task_mask, sched_sg_idle_mask)
-+	     ) ||
-+	     /* e core to idle smt core balance */
-+	     ecore_source_balance(rq, &single_task_mask, sched_sg_idle_mask)))
-+		return;
-+}
-+
-+static void smt_pcore_balance_func(struct rq *rq, const int cpu)
-+{
-+	if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
-+		queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), smt_pcore_balance);
-+}
-+
-+/* smt balance functions */
-+static inline void smt_balance(struct rq *rq)
-+{
-+	cpumask_t single_task_mask;
-+
-+	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
-+	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
-+	    static_key_count(&sched_smt_present.key) > 1 &&
-+	    smt_pcore_source_balance(rq, &single_task_mask, sched_sg_idle_mask))
-+		return;
-+}
-+
-+static void smt_balance_func(struct rq *rq, const int cpu)
-+{
-+	if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
-+		queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), smt_balance);
-+}
-+
-+/* e core balance functions */
-+static inline void ecore_balance(struct rq *rq)
-+{
-+	cpumask_t single_task_mask;
-+
-+	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
-+	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
-+	    /* smt occupied p core to idle e core balance */
-+	    smt_pcore_source_balance(rq, &single_task_mask, sched_ecore_idle_mask))
-+		return;
-+}
-+
-+static void ecore_balance_func(struct rq *rq, const int cpu)
-+{
-+	queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), ecore_balance);
-+}
-+#endif /* CONFIG_SCHED_SMT */
-+
-+/* p core balance functions */
-+static inline void pcore_balance(struct rq *rq)
-+{
-+	cpumask_t single_task_mask;
-+
-+	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
-+	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
-+	    /* idle e core to p core balance */
-+	    ecore_source_balance(rq, &single_task_mask, sched_pcore_idle_mask))
-+		return;
-+}
-+
-+static void pcore_balance_func(struct rq *rq, const int cpu)
-+{
-+	queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), pcore_balance);
-+}
-+
-+#ifdef ALT_SCHED_DEBUG
-+#define SCHED_DEBUG_INFO(...)	printk(KERN_INFO __VA_ARGS__)
-+#else
-+#define SCHED_DEBUG_INFO(...)	do { } while(0)
-+#endif
-+
-+#define SET_IDLE_SELECT_FUNC(func)						\
-+{										\
-+	idle_select_func = func;						\
-+	printk(KERN_INFO "sched: "#func);					\
-+}
-+
-+#define SET_RQ_BALANCE_FUNC(rq, cpu, func)					\
-+{										\
-+	rq->balance_func = func;						\
-+	SCHED_DEBUG_INFO("sched: cpu#%02d -> "#func, cpu);			\
-+}
-+
-+#define SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_func, clear_func)			\
-+{										\
-+	rq->set_idle_mask_func		= set_func;				\
-+	rq->clear_idle_mask_func	= clear_func;				\
-+	SCHED_DEBUG_INFO("sched: cpu#%02d -> "#set_func" "#clear_func, cpu);	\
-+}
-+
-+void sched_init_topology(void)
-+{
-+	int cpu;
-+	struct rq *rq;
-+	cpumask_t sched_ecore_mask = { CPU_BITS_NONE };
-+	int ecore_present = 0;
-+
-+#ifdef CONFIG_SCHED_SMT
-+	if (!cpumask_empty(&sched_smt_mask))
-+		printk(KERN_INFO "sched: smt mask: 0x%08lx\n", sched_smt_mask.bits[0]);
-+#endif
-+
-+	if (!cpumask_empty(&sched_pcore_mask)) {
-+		cpumask_andnot(&sched_ecore_mask, cpu_online_mask, &sched_pcore_mask);
-+		printk(KERN_INFO "sched: pcore mask: 0x%08lx, ecore mask: 0x%08lx\n",
-+		       sched_pcore_mask.bits[0], sched_ecore_mask.bits[0]);
-+
-+		ecore_present = !cpumask_empty(&sched_ecore_mask);
-+	}
-+
-+#ifdef CONFIG_SCHED_SMT
-+	/* idle select function */
-+	if (cpumask_equal(&sched_smt_mask, cpu_online_mask)) {
-+		SET_IDLE_SELECT_FUNC(p1_idle_select_func);
-+	} else
-+#endif
-+	if (!cpumask_empty(&sched_pcore_mask)) {
-+		SET_IDLE_SELECT_FUNC(p1p2_idle_select_func);
-+	}
-+
-+	for_each_online_cpu(cpu) {
-+		rq = cpu_rq(cpu);
-+		/* take chance to reset time slice for idle tasks */
-+		rq->idle->time_slice = sysctl_sched_base_slice;
-+
-+#ifdef CONFIG_SCHED_SMT
-+		if (cpumask_weight(cpu_smt_mask(cpu)) > 1) {
-+			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_smt, clear_idle_mask_smt);
-+
-+			if (cpumask_test_cpu(cpu, &sched_pcore_mask) &&
-+			    !cpumask_intersects(&sched_ecore_mask, &sched_smt_mask)) {
-+				SET_RQ_BALANCE_FUNC(rq, cpu, smt_pcore_balance_func);
-+			} else {
-+				SET_RQ_BALANCE_FUNC(rq, cpu, smt_balance_func);
-+			}
-+
-+			continue;
-+		}
-+#endif
-+		/* !SMT or only one cpu in sg */
-+		if (cpumask_test_cpu(cpu, &sched_pcore_mask)) {
-+			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_pcore, clear_idle_mask_pcore);
-+
-+			if (ecore_present)
-+				SET_RQ_BALANCE_FUNC(rq, cpu, pcore_balance_func);
-+
-+			continue;
-+		}
-+		if (cpumask_test_cpu(cpu, &sched_ecore_mask)) {
-+			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_ecore, clear_idle_mask_ecore);
-+#ifdef CONFIG_SCHED_SMT
-+			if (cpumask_intersects(&sched_pcore_mask, &sched_smt_mask))
-+				SET_RQ_BALANCE_FUNC(rq, cpu, ecore_balance_func);
-+#endif
-+		}
-+	}
-+}
-+#endif /* CONFIG_SMP */
-diff --git a/kernel/sched/alt_topology.h b/kernel/sched/alt_topology.h
-new file mode 100644
-index 000000000000..076174cd2bc6
---- /dev/null
-+++ b/kernel/sched/alt_topology.h
-@@ -0,0 +1,6 @@
-+#ifndef _KERNEL_SCHED_ALT_TOPOLOGY_H
-+#define _KERNEL_SCHED_ALT_TOPOLOGY_H
-+
-+extern void sched_init_topology(void);
-+
-+#endif /* _KERNEL_SCHED_ALT_TOPOLOGY_H */
-diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
-new file mode 100644
-index 000000000000..5a7835246ec3
---- /dev/null
-+++ b/kernel/sched/bmq.h
-@@ -0,0 +1,103 @@
-+#ifndef _KERNEL_SCHED_BMQ_H
-+#define _KERNEL_SCHED_BMQ_H
-+
-+#define ALT_SCHED_NAME "BMQ"
-+
-+/*
-+ * BMQ only routines
-+ */
-+static inline void boost_task(struct task_struct *p, int n)
-+{
-+	int limit;
-+
-+	switch (p->policy) {
-+	case SCHED_NORMAL:
-+		limit = -MAX_PRIORITY_ADJ;
-+		break;
-+	case SCHED_BATCH:
-+		limit = 0;
-+		break;
-+	default:
-+		return;
-+	}
-+
-+	p->boost_prio = max(limit, p->boost_prio - n);
-+}
-+
-+static inline void deboost_task(struct task_struct *p)
-+{
-+	if (p->boost_prio < MAX_PRIORITY_ADJ)
-+		p->boost_prio++;
-+}
-+
-+/*
-+ * Common interfaces
-+ */
-+static inline void sched_timeslice_imp(const int timeslice_ms) {}
-+
-+/* This API is used in task_prio(), return value readed by human users */
-+static inline int
-+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
-+{
-+	return p->prio + p->boost_prio - MIN_NORMAL_PRIO;
-+}
-+
-+static inline int task_sched_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MIN_NORMAL_PRIO)? (p->prio >> 2) :
-+		MIN_SCHED_NORMAL_PRIO + (p->prio + p->boost_prio - MIN_NORMAL_PRIO) / 2;
-+}
-+
-+#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio)	\
-+	prio = task_sched_prio(p);		\
-+	idx = prio;
-+
-+static inline int sched_prio2idx(int prio, struct rq *rq)
-+{
-+	return prio;
-+}
-+
-+static inline int sched_idx2prio(int idx, struct rq *rq)
-+{
-+	return idx;
-+}
-+
-+static inline int sched_rq_prio_idx(struct rq *rq)
-+{
-+	return rq->prio;
-+}
-+
-+static inline int task_running_nice(struct task_struct *p)
-+{
-+	return (p->prio + p->boost_prio > DEFAULT_PRIO);
-+}
-+
-+static inline void sched_update_rq_clock(struct rq *rq) {}
-+
-+static inline void sched_task_renew(struct task_struct *p, const struct rq *rq)
-+{
-+	deboost_task(p);
-+}
-+
-+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
-+static inline void sched_task_fork(struct task_struct *p, struct rq *rq) {}
-+
-+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
-+{
-+	p->boost_prio = MAX_PRIORITY_ADJ;
-+}
-+
-+static inline void sched_task_ttwu(struct task_struct *p)
-+{
-+	s64 delta = this_rq()->clock_task > p->last_ran;
-+
-+	if (likely(delta > 0))
-+		boost_task(p, delta  >> 22);
-+}
-+
-+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
-+{
-+	boost_task(p, 1);
-+}
-+
-+#endif /* _KERNEL_SCHED_BMQ_H */
-diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
-index fae1f5c921eb..1e06434b5b9b 100644
---- a/kernel/sched/build_policy.c
-+++ b/kernel/sched/build_policy.c
-@@ -49,15 +49,21 @@
- 
- #include "idle.c"
- 
-+#ifndef CONFIG_SCHED_ALT
- #include "rt.c"
-+#endif
- 
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- # include "cpudeadline.c"
-+#endif
- # include "pelt.c"
- #endif
- 
- #include "cputime.c"
-+#ifndef CONFIG_SCHED_ALT
- #include "deadline.c"
-+#endif
- 
- #ifdef CONFIG_SCHED_CLASS_EXT
- # include "ext.c"
-diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
-index 80a3df49ab47..58d04aa73634 100644
---- a/kernel/sched/build_utility.c
-+++ b/kernel/sched/build_utility.c
-@@ -56,6 +56,10 @@
- 
- #include "clock.c"
- 
-+#ifdef CONFIG_SCHED_ALT
-+# include "alt_topology.c"
-+#endif
-+
- #ifdef CONFIG_CGROUP_CPUACCT
- # include "cpuacct.c"
- #endif
-@@ -84,7 +88,9 @@
- 
- #ifdef CONFIG_SMP
- # include "cpupri.c"
-+#ifndef CONFIG_SCHED_ALT
- # include "stop_task.c"
-+#endif
- # include "topology.c"
- #endif
- 
-diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index c6ba15388ea7..56590821f074 100644
---- a/kernel/sched/cpufreq_schedutil.c
-+++ b/kernel/sched/cpufreq_schedutil.c
-@@ -197,6 +197,7 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
- 
- static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu);
- 
- 	if (!scx_switched_all())
-@@ -205,6 +206,10 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
- 	util = max(util, boost);
- 	sg_cpu->bw_min = min;
- 	sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
-+#else /* CONFIG_SCHED_ALT */
-+	sg_cpu->bw_min = 0;
-+	sg_cpu->util = rq_load_util(cpu_rq(sg_cpu->cpu), arch_scale_cpu_capacity(sg_cpu->cpu));
-+#endif /* CONFIG_SCHED_ALT */
- }
- 
- /**
-@@ -364,8 +369,10 @@ static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
-  */
- static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
- 		sg_cpu->sg_policy->limits_changed = true;
-+#endif
- }
- 
- static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
-@@ -684,6 +691,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
- 	}
- 
- 	ret = sched_setattr_nocheck(thread, &attr);
-+
- 	if (ret) {
- 		kthread_stop(thread);
- 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
-diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
-index 0bed0fa1acd9..031affa09446 100644
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -126,7 +126,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
- 	p->utime += cputime;
- 	account_group_user_time(p, cputime);
- 
--	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
-+	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
- 
- 	/* Add user time to cpustat. */
- 	task_group_account_field(p, index, cputime);
-@@ -150,7 +150,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
- 	p->gtime += cputime;
- 
- 	/* Add guest time to cpustat. */
--	if (task_nice(p) > 0) {
-+	if (task_running_nice(p)) {
- 		task_group_account_field(p, CPUTIME_NICE, cputime);
- 		cpustat[CPUTIME_GUEST_NICE] += cputime;
- 	} else {
-@@ -288,7 +288,7 @@ static inline u64 account_other_time(u64 max)
- #ifdef CONFIG_64BIT
- static inline u64 read_sum_exec_runtime(struct task_struct *t)
- {
--	return t->se.sum_exec_runtime;
-+	return tsk_seruntime(t);
- }
- #else
- static u64 read_sum_exec_runtime(struct task_struct *t)
-@@ -298,7 +298,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
- 	struct rq *rq;
- 
- 	rq = task_rq_lock(t, &rf);
--	ns = t->se.sum_exec_runtime;
-+	ns = tsk_seruntime(t);
- 	task_rq_unlock(rq, t, &rf);
- 
- 	return ns;
-@@ -623,7 +623,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
- void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- 	struct task_cputime cputime = {
--		.sum_exec_runtime = p->se.sum_exec_runtime,
-+		.sum_exec_runtime = tsk_seruntime(p),
- 	};
- 
- 	if (task_cputime(p, &cputime.utime, &cputime.stime))
-diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index f4035c7a0fa1..4df4ad88d6a9 100644
---- a/kernel/sched/debug.c
-+++ b/kernel/sched/debug.c
-@@ -7,6 +7,7 @@
-  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
-  */
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * This allows printing both to /sys/kernel/debug/sched/debug and
-  * to the console
-@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
- };
- 
- #endif /* SMP */
-+#endif /* !CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_PREEMPT_DYNAMIC
- 
-@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
- 
- #endif /* CONFIG_PREEMPT_DYNAMIC */
- 
-+#ifndef CONFIG_SCHED_ALT
- __read_mostly bool sched_debug_verbose;
- 
- #ifdef CONFIG_SMP
-@@ -468,9 +471,11 @@ static const struct file_operations fair_server_period_fops = {
- 	.llseek		= seq_lseek,
- 	.release	= single_release,
- };
-+#endif /* !CONFIG_SCHED_ALT */
- 
- static struct dentry *debugfs_sched;
- 
-+#ifndef CONFIG_SCHED_ALT
- static void debugfs_fair_server_init(void)
- {
- 	struct dentry *d_fair;
-@@ -491,6 +496,7 @@ static void debugfs_fair_server_init(void)
- 		debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
- 	}
- }
-+#endif /* !CONFIG_SCHED_ALT */
- 
- static __init int sched_init_debug(void)
- {
-@@ -498,14 +504,17 @@ static __init int sched_init_debug(void)
- 
- 	debugfs_sched = debugfs_create_dir("sched", NULL);
- 
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
- 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
-+#endif /* !CONFIG_SCHED_ALT */
- #ifdef CONFIG_PREEMPT_DYNAMIC
- 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
- #endif
- 
- 	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
- 
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
- 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
- 
-@@ -530,13 +539,17 @@ static __init int sched_init_debug(void)
- #endif
- 
- 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
-+#endif /* !CONFIG_SCHED_ALT */
- 
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_fair_server_init();
-+#endif /* !CONFIG_SCHED_ALT */
- 
- 	return 0;
- }
- late_initcall(sched_init_debug);
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_SMP
- 
- static cpumask_var_t		sd_sysctl_cpus;
-@@ -1288,6 +1301,7 @@ void proc_sched_set_task(struct task_struct *p)
- 	memset(&p->stats, 0, sizeof(p->stats));
- #endif
- }
-+#endif /* !CONFIG_SCHED_ALT */
- 
- void resched_latency_warn(int cpu, u64 latency)
- {
-diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
-index d2f096bb274c..36071f4b7b7f 100644
---- a/kernel/sched/idle.c
-+++ b/kernel/sched/idle.c
-@@ -424,6 +424,7 @@ void cpu_startup_entry(enum cpuhp_state state)
- 		do_idle();
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * idle-task scheduling class.
-  */
-@@ -538,3 +539,4 @@ DEFINE_SCHED_CLASS(idle) = {
- 	.switched_to		= switched_to_idle,
- 	.update_curr		= update_curr_idle,
- };
-+#endif
-diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
-new file mode 100644
-index 000000000000..fe3099071eb7
---- /dev/null
-+++ b/kernel/sched/pds.h
-@@ -0,0 +1,139 @@
-+#ifndef _KERNEL_SCHED_PDS_H
-+#define _KERNEL_SCHED_PDS_H
-+
-+#define ALT_SCHED_NAME "PDS"
-+
-+static const u64 RT_MASK = ((1ULL << MIN_SCHED_NORMAL_PRIO) - 1);
-+
-+#define SCHED_NORMAL_PRIO_NUM	(32)
-+#define SCHED_EDGE_DELTA	(SCHED_NORMAL_PRIO_NUM - NICE_WIDTH / 2)
-+
-+/* PDS assume SCHED_NORMAL_PRIO_NUM is power of 2 */
-+#define SCHED_NORMAL_PRIO_MOD(x)	((x) & (SCHED_NORMAL_PRIO_NUM - 1))
-+
-+/* default time slice 4ms -> shift 22, 2 time slice slots -> shift 23 */
-+static __read_mostly int sched_timeslice_shift = 23;
-+
-+/*
-+ * Common interfaces
-+ */
-+static inline int
-+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
-+{
-+	u64 sched_dl = max(p->deadline, rq->time_edge);
-+
-+#ifdef ALT_SCHED_DEBUG
-+	if (WARN_ONCE(sched_dl - rq->time_edge > NORMAL_PRIO_NUM - 1,
-+		      "pds: task_sched_prio_normal() delta %lld\n", sched_dl - rq->time_edge))
-+		return SCHED_NORMAL_PRIO_NUM - 1;
-+#endif
-+
-+	return sched_dl - rq->time_edge;
-+}
-+
-+static inline int task_sched_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MIN_NORMAL_PRIO) ? (p->prio >> 2) :
-+		MIN_SCHED_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
-+}
-+
-+#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio)							\
-+	if (p->prio < MIN_NORMAL_PRIO) {							\
-+		prio = p->prio >> 2;								\
-+		idx = prio;									\
-+	} else {										\
-+		u64 sched_dl = max(p->deadline, rq->time_edge);					\
-+		prio = MIN_SCHED_NORMAL_PRIO + sched_dl - rq->time_edge;			\
-+		idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_dl);			\
-+	}
-+
-+static inline int sched_prio2idx(int sched_prio, struct rq *rq)
-+{
-+	return (IDLE_TASK_SCHED_PRIO == sched_prio || sched_prio < MIN_SCHED_NORMAL_PRIO) ?
-+		sched_prio :
-+		MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_prio + rq->time_edge);
-+}
-+
-+static inline int sched_idx2prio(int sched_idx, struct rq *rq)
-+{
-+	return (sched_idx < MIN_SCHED_NORMAL_PRIO) ?
-+		sched_idx :
-+		MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_idx - rq->time_edge);
-+}
-+
-+static inline int sched_rq_prio_idx(struct rq *rq)
-+{
-+	return rq->prio_idx;
-+}
-+
-+static inline int task_running_nice(struct task_struct *p)
-+{
-+	return (p->prio > DEFAULT_PRIO);
-+}
-+
-+static inline void sched_update_rq_clock(struct rq *rq)
-+{
-+	struct list_head head;
-+	u64 old = rq->time_edge;
-+	u64 now = rq->clock >> sched_timeslice_shift;
-+	u64 prio, delta;
-+	DECLARE_BITMAP(normal, SCHED_QUEUE_BITS);
-+
-+	if (now == old)
-+		return;
-+
-+	rq->time_edge = now;
-+	delta = min_t(u64, SCHED_NORMAL_PRIO_NUM, now - old);
-+	INIT_LIST_HEAD(&head);
-+
-+	prio = MIN_SCHED_NORMAL_PRIO;
-+	for_each_set_bit_from(prio, rq->queue.bitmap, MIN_SCHED_NORMAL_PRIO + delta)
-+		list_splice_tail_init(rq->queue.heads + MIN_SCHED_NORMAL_PRIO +
-+				      SCHED_NORMAL_PRIO_MOD(prio + old), &head);
-+
-+	bitmap_shift_right(normal, rq->queue.bitmap, delta, SCHED_QUEUE_BITS);
-+	if (!list_empty(&head)) {
-+		u64 idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(now);
-+
-+		__list_splice(&head, rq->queue.heads + idx, rq->queue.heads[idx].next);
-+		set_bit(MIN_SCHED_NORMAL_PRIO, normal);
-+	}
-+	bitmap_replace(rq->queue.bitmap, normal, rq->queue.bitmap,
-+		       (const unsigned long *)&RT_MASK, SCHED_QUEUE_BITS);
-+
-+	if (rq->prio < MIN_SCHED_NORMAL_PRIO || IDLE_TASK_SCHED_PRIO == rq->prio)
-+		return;
-+
-+	rq->prio = max_t(u64, MIN_SCHED_NORMAL_PRIO, rq->prio - delta);
-+	rq->prio_idx = sched_prio2idx(rq->prio, rq);
-+}
-+
-+static inline void sched_task_renew(struct task_struct *p, const struct rq *rq)
-+{
-+	if (p->prio >= MIN_NORMAL_PRIO)
-+		p->deadline = rq->time_edge + SCHED_EDGE_DELTA +
-+			      (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
-+}
-+
-+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
-+{
-+	u64 max_dl = rq->time_edge + SCHED_EDGE_DELTA + NICE_WIDTH / 2 - 1;
-+	if (unlikely(p->deadline > max_dl))
-+		p->deadline = max_dl;
-+}
-+
-+static inline void sched_task_fork(struct task_struct *p, struct rq *rq)
-+{
-+	sched_task_renew(p, rq);
-+}
-+
-+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
-+{
-+	p->time_slice = sysctl_sched_base_slice;
-+	sched_task_renew(p, rq);
-+}
-+
-+static inline void sched_task_ttwu(struct task_struct *p) {}
-+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
-+
-+#endif /* _KERNEL_SCHED_PDS_H */
-diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
-index a9c65d97b3ca..a66431e6527c 100644
---- a/kernel/sched/pelt.c
-+++ b/kernel/sched/pelt.c
-@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
- 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * sched_entity:
-  *
-@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
- 
- 	return 0;
- }
-+#endif
- 
--#ifdef CONFIG_SCHED_HW_PRESSURE
-+#if defined(CONFIG_SCHED_HW_PRESSURE) && !defined(CONFIG_SCHED_ALT)
- /*
-  * hardware:
-  *
-@@ -468,6 +470,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
- }
- #endif
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * Load avg and utiliztion metrics need to be updated periodically and before
-  * consumption. This function updates the metrics for all subsystems except for
-@@ -487,3 +490,4 @@ bool update_other_load_avgs(struct rq *rq)
- 		update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
- 		update_irq_load_avg(rq, 0);
- }
-+#endif /* !CONFIG_SCHED_ALT */
-diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
-index f4f6a0875c66..ee780f2b6c17 100644
---- a/kernel/sched/pelt.h
-+++ b/kernel/sched/pelt.h
-@@ -1,14 +1,16 @@
- #ifdef CONFIG_SMP
- #include "sched-pelt.h"
- 
-+#ifndef CONFIG_SCHED_ALT
- int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
- int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
- int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
- int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
- int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
- bool update_other_load_avgs(struct rq *rq);
-+#endif
- 
--#ifdef CONFIG_SCHED_HW_PRESSURE
-+#if defined(CONFIG_SCHED_HW_PRESSURE) && !defined(CONFIG_SCHED_ALT)
- int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
- 
- static inline u64 hw_load_avg(struct rq *rq)
-@@ -45,6 +47,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
- 	return PELT_MIN_DIVIDER + avg->period_contrib;
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline void cfs_se_util_change(struct sched_avg *avg)
- {
- 	unsigned int enqueued;
-@@ -181,9 +184,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
- 	return rq_clock_pelt(rq_of(cfs_rq));
- }
- #endif
-+#endif /* CONFIG_SCHED_ALT */
- 
- #else
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline int
- update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
- {
-@@ -201,6 +206,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
- {
- 	return 0;
- }
-+#endif
- 
- static inline int
- update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index c03b3d7b320e..08ee4a9cd6a5 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -5,6 +5,10 @@
- #ifndef _KERNEL_SCHED_SCHED_H
- #define _KERNEL_SCHED_SCHED_H
- 
-+#ifdef CONFIG_SCHED_ALT
-+#include "alt_sched.h"
-+#else
-+
- #include <linux/sched/affinity.h>
- #include <linux/sched/autogroup.h>
- #include <linux/sched/cpufreq.h>
-@@ -3878,4 +3882,9 @@ void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
- 
- #include "ext.h"
- 
-+static inline int task_running_nice(struct task_struct *p)
-+{
-+	return (task_nice(p) > 0);
-+}
-+#endif /* !CONFIG_SCHED_ALT */
- #endif /* _KERNEL_SCHED_SCHED_H */
-diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
-index eb0cdcd4d921..72224ecb5cbf 100644
---- a/kernel/sched/stats.c
-+++ b/kernel/sched/stats.c
-@@ -115,8 +115,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 	} else {
- 		struct rq *rq;
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 		struct sched_domain *sd;
- 		int dcount = 0;
-+#endif
- #endif
- 		cpu = (unsigned long)(v - 2);
- 		rq = cpu_rq(cpu);
-@@ -133,6 +135,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 		seq_printf(seq, "\n");
- 
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 		/* domain-specific stats */
- 		rcu_read_lock();
- 		for_each_domain(cpu, sd) {
-@@ -160,6 +163,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 			    sd->ttwu_move_balance);
- 		}
- 		rcu_read_unlock();
-+#endif
- #endif
- 	}
- 	return 0;
-diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
-index 767e098a3bd1..4cbf4d3e611e 100644
---- a/kernel/sched/stats.h
-+++ b/kernel/sched/stats.h
-@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
- 
- #endif /* CONFIG_SCHEDSTATS */
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_FAIR_GROUP_SCHED
- struct sched_entity_stats {
- 	struct sched_entity     se;
-@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
- #endif
- 	return &task_of(se)->stats;
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_PSI
- void psi_task_change(struct task_struct *task, int clear, int set);
-diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
-index 24f9f90b6574..9aa01e45c920 100644
---- a/kernel/sched/syscalls.c
-+++ b/kernel/sched/syscalls.c
-@@ -16,6 +16,14 @@
- #include "sched.h"
- #include "autogroup.h"
- 
-+#ifdef CONFIG_SCHED_ALT
-+#include "alt_core.h"
-+
-+static inline int __normal_prio(int policy, int rt_prio, int static_prio)
-+{
-+	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) : static_prio;
-+}
-+#else /* !CONFIG_SCHED_ALT */
- static inline int __normal_prio(int policy, int rt_prio, int nice)
- {
- 	int prio;
-@@ -29,6 +37,7 @@ static inline int __normal_prio(int policy, int rt_prio, int nice)
- 
- 	return prio;
- }
-+#endif /* !CONFIG_SCHED_ALT */
- 
- /*
-  * Calculate the expected normal priority: i.e. priority
-@@ -39,7 +48,11 @@ static inline int __normal_prio(int policy, int rt_prio, int nice)
-  */
- static inline int normal_prio(struct task_struct *p)
- {
-+#ifdef CONFIG_SCHED_ALT
-+	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
-+#else /* !CONFIG_SCHED_ALT */
- 	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
-+#endif /* !CONFIG_SCHED_ALT */
- }
- 
- /*
-@@ -64,6 +77,37 @@ static int effective_prio(struct task_struct *p)
- 
- void set_user_nice(struct task_struct *p, long nice)
- {
-+#ifdef CONFIG_SCHED_ALT
-+	unsigned long flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
-+		return;
-+	/*
-+	 * We have to be careful, if called from sys_setpriority(),
-+	 * the task might be in the middle of scheduling on another CPU.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	p->static_prio = NICE_TO_PRIO(nice);
-+	/*
-+	 * The RT priorities are set via sched_setscheduler(), but we still
-+	 * allow the 'normal' nice value to be set - but as expected
-+	 * it won't have any effect on scheduling until the task is
-+	 * not SCHED_NORMAL/SCHED_BATCH:
-+	 */
-+	if (task_has_rt_policy(p))
-+		goto out_unlock;
-+
-+	p->prio = effective_prio(p);
-+
-+	check_task_changed(p, rq);
-+out_unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+#else
- 	bool queued, running;
- 	struct rq *rq;
- 	int old_prio;
-@@ -112,6 +156,7 @@ void set_user_nice(struct task_struct *p, long nice)
- 	 * lowered its priority, then reschedule its CPU:
- 	 */
- 	p->sched_class->prio_changed(rq, p, old_prio);
-+#endif /* !CONFIG_SCHED_ALT */
- }
- EXPORT_SYMBOL(set_user_nice);
- 
-@@ -190,7 +235,19 @@ SYSCALL_DEFINE1(nice, int, increment)
-  */
- int task_prio(const struct task_struct *p)
- {
-+#ifdef CONFIG_SCHED_ALT
-+/*
-+ * sched policy         return value   kernel prio    user prio/nice
-+ *
-+ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
-+ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
-+ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
-+ */
-+	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
-+		task_sched_prio_normal(p, task_rq(p));
-+#else
- 	return p->prio - MAX_RT_PRIO;
-+#endif /* !CONFIG_SCHED_ALT */
- }
- 
- /**
-@@ -300,10 +357,13 @@ static void __setscheduler_params(struct task_struct *p,
- 
- 	p->policy = policy;
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_policy(policy)) {
- 		__setparam_dl(p, attr);
- 	} else if (fair_policy(policy)) {
-+#endif /* !CONFIG_SCHED_ALT */
- 		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
-+#ifndef CONFIG_SCHED_ALT
- 		if (attr->sched_runtime) {
- 			p->se.custom_slice = 1;
- 			p->se.slice = clamp_t(u64, attr->sched_runtime,
-@@ -322,6 +382,7 @@ static void __setscheduler_params(struct task_struct *p,
- 		/* when switching back to non-rt policy, restore timerslack */
- 		p->timer_slack_ns = p->default_timer_slack_ns;
- 	}
-+#endif /* !CONFIG_SCHED_ALT */
- 
- 	/*
- 	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
-@@ -330,7 +391,9 @@ static void __setscheduler_params(struct task_struct *p,
- 	 */
- 	p->rt_priority = attr->sched_priority;
- 	p->normal_prio = normal_prio(p);
-+#ifndef CONFIG_SCHED_ALT
- 	set_load_weight(p, true);
-+#endif /* !CONFIG_SCHED_ALT */
- }
- 
- /*
-@@ -346,6 +409,8 @@ static bool check_same_owner(struct task_struct *p)
- 		uid_eq(cred->euid, pcred->uid));
- }
- 
-+#ifndef CONFIG_SCHED_ALT
-+
- #ifdef CONFIG_UCLAMP_TASK
- 
- static int uclamp_validate(struct task_struct *p,
-@@ -459,6 +524,7 @@ static inline int uclamp_validate(struct task_struct *p,
- static void __setscheduler_uclamp(struct task_struct *p,
- 				  const struct sched_attr *attr) { }
- #endif
-+#endif /* !CONFIG_SCHED_ALT */
- 
- /*
-  * Allow unprivileged RT tasks to decrease priority.
-@@ -469,11 +535,13 @@ static int user_check_sched_setscheduler(struct task_struct *p,
- 					 const struct sched_attr *attr,
- 					 int policy, int reset_on_fork)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	if (fair_policy(policy)) {
- 		if (attr->sched_nice < task_nice(p) &&
- 		    !is_nice_reduction(p, attr->sched_nice))
- 			goto req_priv;
- 	}
-+#endif /* !CONFIG_SCHED_ALT */
- 
- 	if (rt_policy(policy)) {
- 		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
-@@ -488,6 +556,7 @@ static int user_check_sched_setscheduler(struct task_struct *p,
- 			goto req_priv;
- 	}
- 
-+#ifndef CONFIG_SCHED_ALT
- 	/*
- 	 * Can't set/change SCHED_DEADLINE policy at all for now
- 	 * (safest behavior); in the future we would like to allow
-@@ -505,6 +574,7 @@ static int user_check_sched_setscheduler(struct task_struct *p,
- 		if (!is_nice_reduction(p, task_nice(p)))
- 			goto req_priv;
- 	}
-+#endif /* !CONFIG_SCHED_ALT */
- 
- 	/* Can't change other user's priorities: */
- 	if (!check_same_owner(p))
-@@ -527,6 +597,158 @@ int __sched_setscheduler(struct task_struct *p,
- 			 const struct sched_attr *attr,
- 			 bool user, bool pi)
- {
-+#ifdef CONFIG_SCHED_ALT
-+	const struct sched_attr dl_squash_attr = {
-+		.size		= sizeof(struct sched_attr),
-+		.sched_policy	= SCHED_FIFO,
-+		.sched_nice	= 0,
-+		.sched_priority = 99,
-+	};
-+	int oldpolicy = -1, policy = attr->sched_policy;
-+	int retval, newprio;
-+	struct balance_callback *head;
-+	unsigned long flags;
-+	struct rq *rq;
-+	int reset_on_fork;
-+	raw_spinlock_t *lock;
-+
-+	/* The pi code expects interrupts enabled */
-+	BUG_ON(pi && in_interrupt());
-+
-+	/*
-+	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
-+	 */
-+	if (unlikely(SCHED_DEADLINE == policy)) {
-+		attr = &dl_squash_attr;
-+		policy = attr->sched_policy;
-+	}
-+recheck:
-+	/* Double check policy once rq lock held */
-+	if (policy < 0) {
-+		reset_on_fork = p->sched_reset_on_fork;
-+		policy = oldpolicy = p->policy;
-+	} else {
-+		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
-+
-+		if (policy > SCHED_IDLE)
-+			return -EINVAL;
-+	}
-+
-+	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
-+		return -EINVAL;
-+
-+	/*
-+	 * Valid priorities for SCHED_FIFO and SCHED_RR are
-+	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
-+	 * SCHED_BATCH and SCHED_IDLE is 0.
-+	 */
-+	if (attr->sched_priority < 0 ||
-+	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
-+	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
-+		return -EINVAL;
-+	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
-+	    (attr->sched_priority != 0))
-+		return -EINVAL;
-+
-+	if (user) {
-+		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
-+		if (retval)
-+			return retval;
-+
-+		retval = security_task_setscheduler(p);
-+		if (retval)
-+			return retval;
-+	}
-+
-+	/*
-+	 * Make sure no PI-waiters arrive (or leave) while we are
-+	 * changing the priority of the task:
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+
-+	/*
-+	 * To be able to change p->policy safely, task_access_lock()
-+	 * must be called.
-+	 * IF use task_access_lock() here:
-+	 * For the task p which is not running, reading rq->stop is
-+	 * racy but acceptable as ->stop doesn't change much.
-+	 * An enhancemnet can be made to read rq->stop saftly.
-+	 */
-+	rq = __task_access_lock(p, &lock);
-+
-+	/*
-+	 * Changing the policy of the stop threads its a very bad idea
-+	 */
-+	if (p == rq->stop) {
-+		retval = -EINVAL;
-+		goto unlock;
-+	}
-+
-+	/*
-+	 * If not changing anything there's no need to proceed further:
-+	 */
-+	if (unlikely(policy == p->policy)) {
-+		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
-+			goto change;
-+		if (!rt_policy(policy) &&
-+		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
-+			goto change;
-+
-+		p->sched_reset_on_fork = reset_on_fork;
-+		retval = 0;
-+		goto unlock;
-+	}
-+change:
-+
-+	/* Re-check policy now with rq lock held */
-+	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
-+		policy = oldpolicy = -1;
-+		__task_access_unlock(p, lock);
-+		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+		goto recheck;
-+	}
-+
-+	p->sched_reset_on_fork = reset_on_fork;
-+
-+	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
-+	if (pi) {
-+		/*
-+		 * Take priority boosted tasks into account. If the new
-+		 * effective priority is unchanged, we just store the new
-+		 * normal parameters and do not touch the scheduler class and
-+		 * the runqueue. This will be done when the task deboost
-+		 * itself.
-+		 */
-+		newprio = rt_effective_prio(p, newprio);
-+	}
-+
-+	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
-+		__setscheduler_params(p, attr);
-+		__setscheduler_prio(p, newprio);
-+	}
-+
-+	check_task_changed(p, rq);
-+
-+	/* Avoid rq from going away on us: */
-+	preempt_disable();
-+	head = splice_balance_callbacks(rq);
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	if (pi)
-+		rt_mutex_adjust_pi(p);
-+
-+	/* Run balance callbacks after we've adjusted the PI chain: */
-+	balance_callbacks(rq, head);
-+	preempt_enable();
-+
-+	return 0;
-+
-+unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+	return retval;
-+#else /* !CONFIG_SCHED_ALT */
- 	int oldpolicy = -1, policy = attr->sched_policy;
- 	int retval, oldprio, newprio, queued, running;
- 	const struct sched_class *prev_class, *next_class;
-@@ -764,6 +986,7 @@ int __sched_setscheduler(struct task_struct *p,
- 	if (cpuset_locked)
- 		cpuset_unlock();
- 	return retval;
-+#endif /* !CONFIG_SCHED_ALT */
- }
- 
- static int _sched_setscheduler(struct task_struct *p, int policy,
-@@ -775,8 +998,10 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
- 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
- 	};
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (p->se.custom_slice)
- 		attr.sched_runtime = p->se.slice;
-+#endif /* !CONFIG_SCHED_ALT */
- 
- 	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
- 	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
-@@ -944,13 +1169,18 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
- 
- static void get_params(struct task_struct *p, struct sched_attr *attr)
- {
--	if (task_has_dl_policy(p)) {
-+#ifndef CONFIG_SCHED_ALT
-+	if (task_has_dl_policy(p))
- 		__getparam_dl(p, attr);
--	} else if (task_has_rt_policy(p)) {
-+	else
-+#endif
-+	if (task_has_rt_policy(p)) {
- 		attr->sched_priority = p->rt_priority;
- 	} else {
- 		attr->sched_nice = task_nice(p);
-+#ifndef CONFIG_SCHED_ALT
- 		attr->sched_runtime = p->se.slice;
-+#endif
- 	}
- }
- 
-@@ -1170,6 +1400,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
- #ifdef CONFIG_SMP
- int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	/*
- 	 * If the task isn't a deadline task or admission control is
- 	 * disabled then we don't care about affinity changes.
-@@ -1186,6 +1417,7 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
- 	guard(rcu)();
- 	if (!cpumask_subset(task_rq(p)->rd->span, mask))
- 		return -EBUSY;
-+#endif
- 
- 	return 0;
- }
-@@ -1210,9 +1442,11 @@ int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
- 	ctx->new_mask = new_mask;
- 	ctx->flags |= SCA_CHECK;
- 
-+#ifndef CONFIG_SCHED_ALT
- 	retval = dl_task_check_affinity(p, new_mask);
- 	if (retval)
- 		goto out_free_new_mask;
-+#endif
- 
- 	retval = __set_cpus_allowed_ptr(p, ctx);
- 	if (retval)
-@@ -1392,13 +1626,34 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
- 
- static void do_sched_yield(void)
- {
--	struct rq_flags rf;
- 	struct rq *rq;
-+	struct rq_flags rf;
-+
-+#ifdef CONFIG_SCHED_ALT
-+	struct task_struct *p;
-+
-+	if (!sched_yield_type)
-+		return;
- 
- 	rq = this_rq_lock_irq(&rf);
- 
-+	schedstat_inc(rq->yld_count);
-+
-+	p = current;
-+	if (rt_task(p)) {
-+		if (task_on_rq_queued(p))
-+			requeue_task(p, rq);
-+	} else if (rq->nr_running > 1) {
-+		do_sched_yield_type_1(p, rq);
-+		if (task_on_rq_queued(p))
-+			requeue_task(p, rq);
-+	}
-+#else /* !CONFIG_SCHED_ALT */
-+	rq = this_rq_lock_irq(&rf);
-+
- 	schedstat_inc(rq->yld_count);
- 	current->sched_class->yield_task(rq);
-+#endif /* !CONFIG_SCHED_ALT */
- 
- 	preempt_disable();
- 	rq_unlock_irq(rq, &rf);
-@@ -1467,6 +1722,9 @@ EXPORT_SYMBOL(yield);
-  */
- int __sched yield_to(struct task_struct *p, bool preempt)
- {
-+#ifdef CONFIG_SCHED_ALT
-+	return 0;
-+#else /* !CONFIG_SCHED_ALT */
- 	struct task_struct *curr = current;
- 	struct rq *rq, *p_rq;
- 	int yielded = 0;
-@@ -1512,6 +1770,7 @@ int __sched yield_to(struct task_struct *p, bool preempt)
- 		schedule();
- 
- 	return yielded;
-+#endif /* !CONFIG_SCHED_ALT */
- }
- EXPORT_SYMBOL_GPL(yield_to);
- 
-@@ -1532,7 +1791,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
- 	case SCHED_RR:
- 		ret = MAX_RT_PRIO-1;
- 		break;
-+#ifndef CONFIG_SCHED_ALT
- 	case SCHED_DEADLINE:
-+#endif
- 	case SCHED_NORMAL:
- 	case SCHED_BATCH:
- 	case SCHED_IDLE:
-@@ -1560,7 +1821,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
- 	case SCHED_RR:
- 		ret = 1;
- 		break;
-+#ifndef CONFIG_SCHED_ALT
- 	case SCHED_DEADLINE:
-+#endif
- 	case SCHED_NORMAL:
- 	case SCHED_BATCH:
- 	case SCHED_IDLE:
-@@ -1572,7 +1835,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
- 
- static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	unsigned int time_slice = 0;
-+#endif
- 	int retval;
- 
- 	if (pid < 0)
-@@ -1587,6 +1852,7 @@ static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
- 		if (retval)
- 			return retval;
- 
-+#ifndef CONFIG_SCHED_ALT
- 		scoped_guard (task_rq_lock, p) {
- 			struct rq *rq = scope.rq;
- 			if (p->sched_class->get_rr_interval)
-@@ -1595,6 +1861,13 @@ static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
- 	}
- 
- 	jiffies_to_timespec64(time_slice, t);
-+#else
-+	}
-+
-+	alt_sched_debug();
-+
-+	*t = ns_to_timespec64(sysctl_sched_base_slice);
-+#endif /* !CONFIG_SCHED_ALT */
- 	return 0;
- }
- 
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 9748a4c8d668..1e2bdd70d69a 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -3,6 +3,7 @@
-  * Scheduler topology setup/handling methods
-  */
- 
-+#ifndef CONFIG_SCHED_ALT
- #include <linux/bsearch.h>
- 
- DEFINE_MUTEX(sched_domains_mutex);
-@@ -1459,8 +1460,10 @@ static void asym_cpu_capacity_scan(void)
-  */
- 
- static int default_relax_domain_level = -1;
-+#endif /* CONFIG_SCHED_ALT */
- int sched_domain_level_max;
- 
-+#ifndef CONFIG_SCHED_ALT
- static int __init setup_relax_domain_level(char *str)
- {
- 	if (kstrtoint(str, 0, &default_relax_domain_level))
-@@ -1695,6 +1698,7 @@ sd_init(struct sched_domain_topology_level *tl,
- 
- 	return sd;
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- /*
-  * Topology list, bottom-up.
-@@ -1731,6 +1735,7 @@ void __init set_sched_topology(struct sched_domain_topology_level *tl)
- 	sched_domain_topology_saved = NULL;
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_NUMA
- 
- static const struct cpumask *sd_numa_mask(int cpu)
-@@ -2797,3 +2802,28 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
- 	mutex_unlock(&sched_domains_mutex);
- }
-+#else /* CONFIG_SCHED_ALT */
-+DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
-+
-+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
-+			     struct sched_domain_attr *dattr_new)
-+{}
-+
-+#ifdef CONFIG_NUMA
-+int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
-+{
-+	return best_mask_cpu(cpu, cpus);
-+}
-+
-+int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
-+{
-+	return cpumask_nth(cpu, cpus);
-+}
-+
-+const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
-+{
-+	return ERR_PTR(-EOPNOTSUPP);
-+}
-+EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
-+#endif /* CONFIG_NUMA */
-+#endif
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 79e6cb1d5c48..61bc0352e233 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -92,6 +92,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
- 
- /* Constants used for minimum and maximum */
- 
-+#ifdef CONFIG_SCHED_ALT
-+extern int sched_yield_type;
-+#endif
-+
- #ifdef CONFIG_PERF_EVENTS
- static const int six_hundred_forty_kb = 640 * 1024;
- #endif
-@@ -1907,6 +1911,17 @@ static struct ctl_table kern_table[] = {
- 		.proc_handler	= proc_dointvec,
- 	},
- #endif
-+#ifdef CONFIG_SCHED_ALT
-+	{
-+		.procname	= "yield_type",
-+		.data		= &sched_yield_type,
-+		.maxlen		= sizeof (int),
-+		.mode		= 0644,
-+		.proc_handler	= &proc_dointvec_minmax,
-+		.extra1		= SYSCTL_ZERO,
-+		.extra2		= SYSCTL_TWO,
-+	},
-+#endif
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- 	{
- 		.procname	= "spin_retry",
-diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
-index 6bcee4704059..cf88205fd4a2 100644
---- a/kernel/time/posix-cpu-timers.c
-+++ b/kernel/time/posix-cpu-timers.c
-@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
- 	u64 stime, utime;
- 
- 	task_cputime(p, &utime, &stime);
--	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
-+	store_samples(samples, stime, utime, tsk_seruntime(p));
- }
- 
- static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
-@@ -830,6 +830,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
- 	}
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline void check_dl_overrun(struct task_struct *tsk)
- {
- 	if (tsk->dl.dl_overrun) {
-@@ -837,6 +838,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
- 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
- 	}
- }
-+#endif
- 
- static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
- {
-@@ -864,8 +866,10 @@ static void check_thread_timers(struct task_struct *tsk,
- 	u64 samples[CPUCLOCK_MAX];
- 	unsigned long soft;
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_task(tsk))
- 		check_dl_overrun(tsk);
-+#endif
- 
- 	if (expiry_cache_is_inactive(pct))
- 		return;
-@@ -879,7 +883,7 @@ static void check_thread_timers(struct task_struct *tsk,
- 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
- 	if (soft != RLIM_INFINITY) {
- 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
--		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
-+		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
- 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
- 
- 		/* At the hard limit, send SIGKILL. No further action. */
-@@ -1115,8 +1119,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
- 			return true;
- 	}
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_task(tsk) && tsk->dl.dl_overrun)
- 		return true;
-+#endif
- 
- 	return false;
- }
-diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
-index a50ed23bee77..be0477666049 100644
---- a/kernel/trace/trace_osnoise.c
-+++ b/kernel/trace/trace_osnoise.c
-@@ -1665,6 +1665,9 @@ static void osnoise_sleep(bool skip_period)
-  */
- static inline int osnoise_migration_pending(void)
- {
-+#ifdef CONFIG_SCHED_ALT
-+	return 0;
-+#else
- 	if (!current->migration_pending)
- 		return 0;
- 
-@@ -1686,6 +1689,7 @@ static inline int osnoise_migration_pending(void)
- 	mutex_unlock(&interface_lock);
- 
- 	return 1;
-+#endif
- }
- 
- /*
-diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
-index 1469dd8075fa..803527a0e48a 100644
---- a/kernel/trace/trace_selftest.c
-+++ b/kernel/trace/trace_selftest.c
-@@ -1419,10 +1419,15 @@ static int trace_wakeup_test_thread(void *data)
- {
- 	/* Make this a -deadline thread */
- 	static const struct sched_attr attr = {
-+#ifdef CONFIG_SCHED_ALT
-+		/* No deadline on BMQ/PDS, use RR */
-+		.sched_policy = SCHED_RR,
-+#else
- 		.sched_policy = SCHED_DEADLINE,
- 		.sched_runtime = 100000ULL,
- 		.sched_deadline = 10000000ULL,
- 		.sched_period = 10000000ULL
-+#endif
- 	};
- 	struct wakeup_test_data *x = data;
- 
-diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 9949ffad8df0..90eac9d802a8 100644
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -1247,6 +1247,7 @@ static bool kick_pool(struct worker_pool *pool)
- 
- 	p = worker->task;
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_SMP
- 	/*
- 	 * Idle @worker is about to execute @work and waking up provides an
-@@ -1276,6 +1277,8 @@ static bool kick_pool(struct worker_pool *pool)
- 		}
- 	}
- #endif
-+#endif /* !CONFIG_SCHED_ALT */
-+
- 	wake_up_process(p);
- 	return true;
- }
-@@ -1404,7 +1407,11 @@ void wq_worker_running(struct task_struct *task)
- 	 * CPU intensive auto-detection cares about how long a work item hogged
- 	 * CPU without sleeping. Reset the starting timestamp on wakeup.
- 	 */
-+#ifdef CONFIG_SCHED_ALT
-+	worker->current_at = worker->task->sched_time;
-+#else
- 	worker->current_at = worker->task->se.sum_exec_runtime;
-+#endif
- 
- 	WRITE_ONCE(worker->sleeping, 0);
- }
-@@ -1489,7 +1496,11 @@ void wq_worker_tick(struct task_struct *task)
- 	 * We probably want to make this prettier in the future.
- 	 */
- 	if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
-+#ifdef CONFIG_SCHED_ALT
-+	    worker->task->sched_time - worker->current_at <
-+#else
- 	    worker->task->se.sum_exec_runtime - worker->current_at <
-+#endif
- 	    wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
- 		return;
- 
-@@ -3157,7 +3168,11 @@ __acquires(&pool->lock)
- 	worker->current_func = work->func;
- 	worker->current_pwq = pwq;
- 	if (worker->task)
-+#ifdef CONFIG_SCHED_ALT
-+		worker->current_at = worker->task->sched_time;
-+#else
- 		worker->current_at = worker->task->se.sum_exec_runtime;
-+#endif
- 	work_data = *work_data_bits(work);
- 	worker->current_color = get_work_color(work_data);
- 

diff --git a/5021_BMQ-and-PDS-gentoo-defaults.patch b/5021_BMQ-and-PDS-gentoo-defaults.patch
deleted file mode 100644
index 7748d78c..00000000
--- a/5021_BMQ-and-PDS-gentoo-defaults.patch
+++ /dev/null
@@ -1,13 +0,0 @@
---- a/init/Kconfig	2024-11-13 14:45:36.566335895 -0500
-+++ b/init/Kconfig	2024-11-13 14:47:02.670787774 -0500
-@@ -860,8 +860,9 @@ config UCLAMP_BUCKETS_COUNT
- 	  If in doubt, use the default value.
- 
- menuconfig SCHED_ALT
-+	depends on X86_64
- 	bool "Alternative CPU Schedulers"
--	default y
-+	default n
- 	help
- 	  This feature enable alternative CPU scheduler"
- 


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-04-25 11:47 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-04-25 11:47 UTC (permalink / raw
  To: gentoo-commits

commit:     a8bb1202a1b4d7cdd7d8e5bf773b6e5b93796b80
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 25 11:47:35 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 25 11:47:35 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a8bb1202

Linux patch 6.12.25

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1024_linux-6.12.25.patch | 8312 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8316 insertions(+)

diff --git a/0000_README b/0000_README
index b04d2cdd..e07d8d2e 100644
--- a/0000_README
+++ b/0000_README
@@ -139,6 +139,10 @@ Patch:  1023_linux-6.12.24.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.24
 
+Patch:  1024_linux-6.12.25.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.25
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1024_linux-6.12.25.patch b/1024_linux-6.12.25.patch
new file mode 100644
index 00000000..0bba333c
--- /dev/null
+++ b/1024_linux-6.12.25.patch
@@ -0,0 +1,8312 @@
+diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst
+index b57776a68f156d..15bcd1b4003a73 100644
+--- a/Documentation/arch/arm64/booting.rst
++++ b/Documentation/arch/arm64/booting.rst
+@@ -285,6 +285,12 @@ Before jumping into the kernel, the following conditions must be met:
+ 
+     - SCR_EL3.FGTEn (bit 27) must be initialised to 0b1.
+ 
++  For CPUs with the Fine Grained Traps 2 (FEAT_FGT2) extension present:
++
++  - If EL3 is present and the kernel is entered at EL2:
++
++    - SCR_EL3.FGTEn2 (bit 59) must be initialised to 0b1.
++
+   For CPUs with support for HCRX_EL2 (FEAT_HCX) present:
+ 
+   - If EL3 is present and the kernel is entered at EL2:
+@@ -379,6 +385,22 @@ Before jumping into the kernel, the following conditions must be met:
+ 
+     - SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1.
+ 
++  For CPUs with the Performance Monitors Extension (FEAT_PMUv3p9):
++
++ - If EL3 is present:
++
++    - MDCR_EL3.EnPM2 (bit 7) must be initialised to 0b1.
++
++ - If the kernel is entered at EL1 and EL2 is present:
++
++    - HDFGRTR2_EL2.nPMICNTR_EL0 (bit 2) must be initialised to 0b1.
++    - HDFGRTR2_EL2.nPMICFILTR_EL0 (bit 3) must be initialised to 0b1.
++    - HDFGRTR2_EL2.nPMUACR_EL1 (bit 4) must be initialised to 0b1.
++
++    - HDFGWTR2_EL2.nPMICNTR_EL0 (bit 2) must be initialised to 0b1.
++    - HDFGWTR2_EL2.nPMICFILTR_EL0 (bit 3) must be initialised to 0b1.
++    - HDFGWTR2_EL2.nPMUACR_EL1 (bit 4) must be initialised to 0b1.
++
+   For CPUs with Memory Copy and Memory Set instructions (FEAT_MOPS):
+ 
+   - If the kernel is entered at EL1 and EL2 is present:
+diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
+index 31295be910130c..234089b5954ddb 100644
+--- a/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
++++ b/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml
+@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
+ title: Freescale Layerscape Reset Registers Module
+ 
+ maintainers:
+-  - Frank Li
++  - Frank Li <Frank.Li@nxp.com>
+ 
+ description:
+   Reset Module includes chip reset, service processor control and Reset Control
+diff --git a/Documentation/netlink/specs/ovs_vport.yaml b/Documentation/netlink/specs/ovs_vport.yaml
+index 86ba9ac2a52103..b538bb99ee9b5f 100644
+--- a/Documentation/netlink/specs/ovs_vport.yaml
++++ b/Documentation/netlink/specs/ovs_vport.yaml
+@@ -123,12 +123,12 @@ attribute-sets:
+ 
+ operations:
+   name-prefix: ovs-vport-cmd-
++  fixed-header: ovs-header
+   list:
+     -
+       name: new
+       doc: Create a new OVS vport
+       attribute-set: vport
+-      fixed-header: ovs-header
+       do:
+         request:
+           attributes:
+@@ -141,7 +141,6 @@ operations:
+       name: del
+       doc: Delete existing OVS vport from a data path
+       attribute-set: vport
+-      fixed-header: ovs-header
+       do:
+         request:
+           attributes:
+@@ -152,7 +151,6 @@ operations:
+       name: get
+       doc: Get / dump OVS vport configuration and state
+       attribute-set: vport
+-      fixed-header: ovs-header
+       do: &vport-get-op
+         request:
+           attributes:
+diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml
+index 0c4d5d40cae905..a048fc30389d68 100644
+--- a/Documentation/netlink/specs/rt_link.yaml
++++ b/Documentation/netlink/specs/rt_link.yaml
+@@ -1094,11 +1094,10 @@ attribute-sets:
+       -
+         name: prop-list
+         type: nest
+-        nested-attributes: link-attrs
++        nested-attributes: prop-list-link-attrs
+       -
+         name: alt-ifname
+         type: string
+-        multi-attr: true
+       -
+         name: perm-address
+         type: binary
+@@ -1137,6 +1136,13 @@ attribute-sets:
+         name: dpll-pin
+         type: nest
+         nested-attributes: link-dpll-pin-attrs
++  -
++    name: prop-list-link-attrs
++    subset-of: link-attrs
++    attributes:
++      -
++        name: alt-ifname
++        multi-attr: true
+   -
+     name: af-spec-attrs
+     attributes:
+@@ -2071,9 +2077,10 @@ attribute-sets:
+         type: u32
+   -
+     name: mctp-attrs
++    name-prefix: ifla-mctp-
+     attributes:
+       -
+-        name: mctp-net
++        name: net
+         type: u32
+   -
+     name: stats-attrs
+@@ -2319,7 +2326,6 @@ operations:
+             - min-mtu
+             - max-mtu
+             - prop-list
+-            - alt-ifname
+             - perm-address
+             - proto-down-reason
+             - parent-dev-name
+diff --git a/Documentation/wmi/devices/msi-wmi-platform.rst b/Documentation/wmi/devices/msi-wmi-platform.rst
+index 31a13694289238..73197b31926a57 100644
+--- a/Documentation/wmi/devices/msi-wmi-platform.rst
++++ b/Documentation/wmi/devices/msi-wmi-platform.rst
+@@ -138,6 +138,10 @@ input data, the meaning of which depends on the subfeature being accessed.
+ The output buffer contains a single byte which signals success or failure (``0x00`` on failure)
+ and 31 bytes of output data, the meaning if which depends on the subfeature being accessed.
+ 
++.. note::
++   The ACPI control method responsible for handling the WMI method calls is not thread-safe.
++   This is a firmware bug that needs to be handled inside the driver itself.
++
+ WMI method Get_EC()
+ -------------------
+ 
+diff --git a/Makefile b/Makefile
+index e1fa425089c220..93f4ba25a45336 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 24
++SUBLEVEL = 25
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -455,7 +455,6 @@ export rust_common_flags := --edition=2021 \
+ 			    -Wclippy::ignored_unit_patterns \
+ 			    -Wclippy::mut_mut \
+ 			    -Wclippy::needless_bitwise_bool \
+-			    -Wclippy::needless_continue \
+ 			    -Aclippy::needless_lifetimes \
+ 			    -Wclippy::no_mangle_with_rust_abi \
+ 			    -Wclippy::undocumented_unsafe_blocks \
+@@ -1016,6 +1015,9 @@ endif
+ # Ensure compilers do not transform certain loops into calls to wcslen()
+ KBUILD_CFLAGS += -fno-builtin-wcslen
+ 
++# Ensure compilers do not transform certain loops into calls to wcslen()
++KBUILD_CFLAGS += -fno-builtin-wcslen
++
+ # change __FILE__ to the relative path from the srctree
+ KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
+ 
+diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
+index e0ffdf13a18b3f..bdbe9e08664a69 100644
+--- a/arch/arm64/include/asm/el2_setup.h
++++ b/arch/arm64/include/asm/el2_setup.h
+@@ -215,6 +215,30 @@
+ .Lskip_fgt_\@:
+ .endm
+ 
++.macro __init_el2_fgt2
++	mrs	x1, id_aa64mmfr0_el1
++	ubfx	x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
++	cmp	x1, #ID_AA64MMFR0_EL1_FGT_FGT2
++	b.lt	.Lskip_fgt2_\@
++
++	mov	x0, xzr
++	mrs	x1, id_aa64dfr0_el1
++	ubfx	x1, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
++	cmp	x1, #ID_AA64DFR0_EL1_PMUVer_V3P9
++	b.lt	.Lskip_pmuv3p9_\@
++
++	orr	x0, x0, #HDFGRTR2_EL2_nPMICNTR_EL0
++	orr	x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0
++	orr	x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1
++.Lskip_pmuv3p9_\@:
++	msr_s   SYS_HDFGRTR2_EL2, x0
++	msr_s   SYS_HDFGWTR2_EL2, x0
++	msr_s   SYS_HFGRTR2_EL2, xzr
++	msr_s   SYS_HFGWTR2_EL2, xzr
++	msr_s   SYS_HFGITR2_EL2, xzr
++.Lskip_fgt2_\@:
++.endm
++
+ .macro __init_el2_nvhe_prepare_eret
+ 	mov	x0, #INIT_PSTATE_EL1
+ 	msr	spsr_el2, x0
+@@ -240,6 +264,7 @@
+ 	__init_el2_nvhe_idregs
+ 	__init_el2_cptr
+ 	__init_el2_fgt
++	__init_el2_fgt2
+ .endm
+ 
+ #ifndef __KVM_NVHE_HYPERVISOR__
+diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
+index 8d637ac4b7c6b9..362bcfa0aed18f 100644
+--- a/arch/arm64/tools/sysreg
++++ b/arch/arm64/tools/sysreg
+@@ -1238,6 +1238,7 @@ UnsignedEnum	11:8	PMUVer
+ 	0b0110	V3P5
+ 	0b0111	V3P7
+ 	0b1000	V3P8
++	0b1001	V3P9
+ 	0b1111	IMP_DEF
+ EndEnum
+ UnsignedEnum	7:4	TraceVer
+@@ -1556,6 +1557,7 @@ EndEnum
+ UnsignedEnum	59:56	FGT
+ 	0b0000	NI
+ 	0b0001	IMP
++	0b0010	FGT2
+ EndEnum
+ Res0	55:48
+ UnsignedEnum	47:44	EXS
+@@ -1617,6 +1619,7 @@ Enum	3:0	PARANGE
+ 	0b0100	44
+ 	0b0101	48
+ 	0b0110	52
++	0b0111	56
+ EndEnum
+ EndSysreg
+ 
+@@ -2463,6 +2466,101 @@ Field	1	ICIALLU
+ Field	0	ICIALLUIS
+ EndSysreg
+ 
++Sysreg HDFGRTR2_EL2	3	4	3	1	0
++Res0	63:25
++Field	24	nPMBMAR_EL1
++Field	23	nMDSTEPOP_EL1
++Field	22	nTRBMPAM_EL1
++Res0	21
++Field	20	nTRCITECR_EL1
++Field	19	nPMSDSFR_EL1
++Field	18	nSPMDEVAFF_EL1
++Field	17	nSPMID
++Field	16	nSPMSCR_EL1
++Field	15	nSPMACCESSR_EL1
++Field	14	nSPMCR_EL0
++Field	13	nSPMOVS
++Field	12	nSPMINTEN
++Field	11	nSPMCNTEN
++Field	10	nSPMSELR_EL0
++Field	9	nSPMEVTYPERn_EL0
++Field	8	nSPMEVCNTRn_EL0
++Field	7	nPMSSCR_EL1
++Field	6	nPMSSDATA
++Field	5	nMDSELR_EL1
++Field	4	nPMUACR_EL1
++Field	3	nPMICFILTR_EL0
++Field	2	nPMICNTR_EL0
++Field	1	nPMIAR_EL1
++Field	0	nPMECR_EL1
++EndSysreg
++
++Sysreg HDFGWTR2_EL2	3	4	3	1	1
++Res0	63:25
++Field	24	nPMBMAR_EL1
++Field	23	nMDSTEPOP_EL1
++Field	22	nTRBMPAM_EL1
++Field	21	nPMZR_EL0
++Field	20	nTRCITECR_EL1
++Field	19	nPMSDSFR_EL1
++Res0	18:17
++Field	16	nSPMSCR_EL1
++Field	15	nSPMACCESSR_EL1
++Field	14	nSPMCR_EL0
++Field	13	nSPMOVS
++Field	12	nSPMINTEN
++Field	11	nSPMCNTEN
++Field	10	nSPMSELR_EL0
++Field	9	nSPMEVTYPERn_EL0
++Field	8	nSPMEVCNTRn_EL0
++Field	7	nPMSSCR_EL1
++Res0	6
++Field	5	nMDSELR_EL1
++Field	4	nPMUACR_EL1
++Field	3	nPMICFILTR_EL0
++Field	2	nPMICNTR_EL0
++Field	1	nPMIAR_EL1
++Field	0	nPMECR_EL1
++EndSysreg
++
++Sysreg	HFGRTR2_EL2	3	4	3	1	2
++Res0	63:15
++Field	14	nACTLRALIAS_EL1
++Field	13	nACTLRMASK_EL1
++Field	12	nTCR2ALIAS_EL1
++Field	11	nTCRALIAS_EL1
++Field	10	nSCTLRALIAS2_EL1
++Field	9	nSCTLRALIAS_EL1
++Field	8	nCPACRALIAS_EL1
++Field	7	nTCR2MASK_EL1
++Field	6	nTCRMASK_EL1
++Field	5	nSCTLR2MASK_EL1
++Field	4	nSCTLRMASK_EL1
++Field	3	nCPACRMASK_EL1
++Field	2	nRCWSMASK_EL1
++Field	1	nERXGSR_EL1
++Field	0	nPFAR_EL1
++EndSysreg
++
++Sysreg	HFGWTR2_EL2	3	4	3	1	3
++Res0	63:15
++Field	14	nACTLRALIAS_EL1
++Field	13	nACTLRMASK_EL1
++Field	12	nTCR2ALIAS_EL1
++Field	11	nTCRALIAS_EL1
++Field	10	nSCTLRALIAS2_EL1
++Field	9	nSCTLRALIAS_EL1
++Field	8	nCPACRALIAS_EL1
++Field	7	nTCR2MASK_EL1
++Field	6	nTCRMASK_EL1
++Field	5	nSCTLR2MASK_EL1
++Field	4	nSCTLRMASK_EL1
++Field	3	nCPACRMASK_EL1
++Field	2	nRCWSMASK_EL1
++Res0	1
++Field	0	nPFAR_EL1
++EndSysreg
++
+ Sysreg HDFGRTR_EL2	3	4	3	1	4
+ Field	63	PMBIDR_EL1
+ Field	62	nPMSNEVFR_EL1
+@@ -2635,6 +2733,12 @@ Field	1	AMEVCNTR00_EL0
+ Field	0	AMCNTEN0
+ EndSysreg
+ 
++Sysreg	HFGITR2_EL2	3	4	3	1	7
++Res0	63:2
++Field	1	nDCCIVAPS
++Field	0	TSBCSYNC
++EndSysreg
++
+ Sysreg	ZCR_EL2	3	4	1	2	0
+ Fields	ZCR_ELx
+ EndSysreg
+diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
+index 382a09a7152c30..1120ac2824f6e8 100644
+--- a/arch/loongarch/kernel/acpi.c
++++ b/arch/loongarch/kernel/acpi.c
+@@ -249,18 +249,6 @@ static __init int setup_node(int pxm)
+ 	return acpi_map_pxm_to_node(pxm);
+ }
+ 
+-/*
+- * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
+- * I/O localities since SRAT does not list them.  I/O localities are
+- * not supported at this point.
+- */
+-unsigned int numa_distance_cnt;
+-
+-static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
+-{
+-	return slit->locality_count;
+-}
+-
+ void __init numa_set_distance(int from, int to, int distance)
+ {
+ 	if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
+diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
+index cb12eb211a49e0..8d74d7d6c05b47 100644
+--- a/arch/mips/dec/prom/init.c
++++ b/arch/mips/dec/prom/init.c
+@@ -42,7 +42,7 @@ int (*__pmax_close)(int);
+  * Detect which PROM the DECSTATION has, and set the callback vectors
+  * appropriately.
+  */
+-void __init which_prom(s32 magic, s32 *prom_vec)
++static void __init which_prom(s32 magic, s32 *prom_vec)
+ {
+ 	/*
+ 	 * No sign of the REX PROM's magic number means we assume a non-REX
+diff --git a/arch/mips/include/asm/ds1287.h b/arch/mips/include/asm/ds1287.h
+index 46cfb01f9a14e7..51cb61fd4c0330 100644
+--- a/arch/mips/include/asm/ds1287.h
++++ b/arch/mips/include/asm/ds1287.h
+@@ -8,7 +8,7 @@
+ #define __ASM_DS1287_H
+ 
+ extern int ds1287_timer_state(void);
+-extern void ds1287_set_base_clock(unsigned int clock);
++extern int ds1287_set_base_clock(unsigned int hz);
+ extern int ds1287_clockevent_init(int irq);
+ 
+ #endif
+diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
+index 9a47fbcd4638a6..de64d6bb7ba36c 100644
+--- a/arch/mips/kernel/cevt-ds1287.c
++++ b/arch/mips/kernel/cevt-ds1287.c
+@@ -10,6 +10,7 @@
+ #include <linux/mc146818rtc.h>
+ #include <linux/irq.h>
+ 
++#include <asm/ds1287.h>
+ #include <asm/time.h>
+ 
+ int ds1287_timer_state(void)
+diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
+index 46677daf708bd0..cc11c4544cffd1 100644
+--- a/arch/riscv/include/asm/kgdb.h
++++ b/arch/riscv/include/asm/kgdb.h
+@@ -19,16 +19,9 @@
+ 
+ #ifndef	__ASSEMBLY__
+ 
++void arch_kgdb_breakpoint(void);
+ extern unsigned long kgdb_compiled_break;
+ 
+-static inline void arch_kgdb_breakpoint(void)
+-{
+-	asm(".global kgdb_compiled_break\n"
+-	    ".option norvc\n"
+-	    "kgdb_compiled_break: ebreak\n"
+-	    ".option rvc\n");
+-}
+-
+ #endif /* !__ASSEMBLY__ */
+ 
+ #define DBG_REG_ZERO "zero"
+diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
+index 121fff429dce66..eceabf59ae482a 100644
+--- a/arch/riscv/include/asm/syscall.h
++++ b/arch/riscv/include/asm/syscall.h
+@@ -62,8 +62,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ 					 unsigned long *args)
+ {
+ 	args[0] = regs->orig_a0;
+-	args++;
+-	memcpy(args, &regs->a1, 5 * sizeof(args[0]));
++	args[1] = regs->a1;
++	args[2] = regs->a2;
++	args[3] = regs->a3;
++	args[4] = regs->a4;
++	args[5] = regs->a5;
+ }
+ 
+ static inline int syscall_get_arch(struct task_struct *task)
+diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c
+index 2e0266ae6bd728..9f3db3503dabd6 100644
+--- a/arch/riscv/kernel/kgdb.c
++++ b/arch/riscv/kernel/kgdb.c
+@@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+ 	regs->epc = pc;
+ }
+ 
++noinline void arch_kgdb_breakpoint(void)
++{
++	asm(".global kgdb_compiled_break\n"
++	    "kgdb_compiled_break: ebreak\n");
++}
++
+ void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+ 				char *remcom_out_buffer)
+ {
+diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c
+index e264e59e596e80..91d0b355ceeff6 100644
+--- a/arch/riscv/kernel/module-sections.c
++++ b/arch/riscv/kernel/module-sections.c
+@@ -73,16 +73,17 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx)
+ static void count_max_entries(Elf_Rela *relas, int num,
+ 			      unsigned int *plts, unsigned int *gots)
+ {
+-	unsigned int type, i;
+-
+-	for (i = 0; i < num; i++) {
+-		type = ELF_RISCV_R_TYPE(relas[i].r_info);
+-		if (type == R_RISCV_CALL_PLT) {
++	for (int i = 0; i < num; i++) {
++		switch (ELF_R_TYPE(relas[i].r_info)) {
++		case R_RISCV_CALL_PLT:
++		case R_RISCV_PLT32:
+ 			if (!duplicate_rela(relas, i))
+ 				(*plts)++;
+-		} else if (type == R_RISCV_GOT_HI20) {
++			break;
++		case R_RISCV_GOT_HI20:
+ 			if (!duplicate_rela(relas, i))
+ 				(*gots)++;
++			break;
+ 		}
+ 	}
+ }
+diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
+index 47d0ebeec93c23..7f6147c18033b2 100644
+--- a/arch/riscv/kernel/module.c
++++ b/arch/riscv/kernel/module.c
+@@ -648,7 +648,7 @@ process_accumulated_relocations(struct module *me,
+ 		kfree(bucket_iter);
+ 	}
+ 
+-	kfree(*relocation_hashtable);
++	kvfree(*relocation_hashtable);
+ }
+ 
+ static int add_relocation_to_accumulate(struct module *me, int type,
+@@ -752,9 +752,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
+ 
+ 	hashtable_size <<= should_double_size;
+ 
+-	*relocation_hashtable = kmalloc_array(hashtable_size,
+-					      sizeof(**relocation_hashtable),
+-					      GFP_KERNEL);
++	/* Number of relocations may be large, so kvmalloc it */
++	*relocation_hashtable = kvmalloc_array(hashtable_size,
++					       sizeof(**relocation_hashtable),
++					       GFP_KERNEL);
+ 	if (!*relocation_hashtable)
+ 		return 0;
+ 
+@@ -859,7 +860,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ 				}
+ 
+ 				j++;
+-				if (j > sechdrs[relsec].sh_size / sizeof(*rel))
++				if (j == num_relocations)
+ 					j = 0;
+ 
+ 			} while (j_idx != j);
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index 7934613a98c883..194bda6d74ce72 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -66,6 +66,9 @@ static struct resource bss_res = { .name = "Kernel bss", };
+ static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
+ #endif
+ 
++static int num_standard_resources;
++static struct resource *standard_resources;
++
+ static int __init add_resource(struct resource *parent,
+ 				struct resource *res)
+ {
+@@ -139,7 +142,7 @@ static void __init init_resources(void)
+ 	struct resource *res = NULL;
+ 	struct resource *mem_res = NULL;
+ 	size_t mem_res_sz = 0;
+-	int num_resources = 0, res_idx = 0;
++	int num_resources = 0, res_idx = 0, non_resv_res = 0;
+ 	int ret = 0;
+ 
+ 	/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
+@@ -195,6 +198,7 @@ static void __init init_resources(void)
+ 	/* Add /memory regions to the resource tree */
+ 	for_each_mem_region(region) {
+ 		res = &mem_res[res_idx--];
++		non_resv_res++;
+ 
+ 		if (unlikely(memblock_is_nomap(region))) {
+ 			res->name = "Reserved";
+@@ -212,6 +216,9 @@ static void __init init_resources(void)
+ 			goto error;
+ 	}
+ 
++	num_standard_resources = non_resv_res;
++	standard_resources = &mem_res[res_idx + 1];
++
+ 	/* Clean-up any unused pre-allocated resources */
+ 	if (res_idx >= 0)
+ 		memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
+@@ -223,6 +230,33 @@ static void __init init_resources(void)
+ 	memblock_free(mem_res, mem_res_sz);
+ }
+ 
++static int __init reserve_memblock_reserved_regions(void)
++{
++	u64 i, j;
++
++	for (i = 0; i < num_standard_resources; i++) {
++		struct resource *mem = &standard_resources[i];
++		phys_addr_t r_start, r_end, mem_size = resource_size(mem);
++
++		if (!memblock_is_region_reserved(mem->start, mem_size))
++			continue;
++
++		for_each_reserved_mem_range(j, &r_start, &r_end) {
++			resource_size_t start, end;
++
++			start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
++			end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
++
++			if (start > mem->end || end < mem->start)
++				continue;
++
++			reserve_region_with_split(mem, start, end, "Reserved");
++		}
++	}
++
++	return 0;
++}
++arch_initcall(reserve_memblock_reserved_regions);
+ 
+ static void __init parse_dtb(void)
+ {
+diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
+index dbba332e4a12d7..f676156d9f3db4 100644
+--- a/arch/x86/boot/compressed/mem.c
++++ b/arch/x86/boot/compressed/mem.c
+@@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void)
+ 
+ void arch_accept_memory(phys_addr_t start, phys_addr_t end)
+ {
++	static bool sevsnp;
++
+ 	/* Platform-specific memory-acceptance call goes here */
+ 	if (early_is_tdx_guest()) {
+ 		if (!tdx_accept_memory(start, end))
+ 			panic("TDX: Failed to accept memory\n");
+-	} else if (sev_snp_enabled()) {
++	} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
++		sevsnp = true;
+ 		snp_accept_memory(start, end);
+ 	} else {
+ 		error("Cannot accept memory: unknown platform\n");
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index cd44e120fe5377..f49f7eef1dba07 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -164,10 +164,7 @@ bool sev_snp_enabled(void)
+ 
+ static void __page_state_change(unsigned long paddr, enum psc_op op)
+ {
+-	u64 val;
+-
+-	if (!sev_snp_enabled())
+-		return;
++	u64 val, msr;
+ 
+ 	/*
+ 	 * If private -> shared then invalidate the page before requesting the
+@@ -176,6 +173,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
+ 	if (op == SNP_PAGE_STATE_SHARED)
+ 		pvalidate_4k_page(paddr, paddr, false);
+ 
++	/* Save the current GHCB MSR value */
++	msr = sev_es_rd_ghcb_msr();
++
+ 	/* Issue VMGEXIT to change the page state in RMP table. */
+ 	sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
+ 	VMGEXIT();
+@@ -185,6 +185,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
+ 	if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
+ 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ 
++	/* Restore the GHCB MSR value */
++	sev_es_wr_ghcb_msr(msr);
++
+ 	/*
+ 	 * Now that page state is changed in the RMP table, validate it so that it is
+ 	 * consistent with the RMP entry.
+@@ -195,11 +198,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
+ 
+ void snp_set_page_private(unsigned long paddr)
+ {
++	if (!sev_snp_enabled())
++		return;
++
+ 	__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
+ }
+ 
+ void snp_set_page_shared(unsigned long paddr)
+ {
++	if (!sev_snp_enabled())
++		return;
++
+ 	__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
+ }
+ 
+@@ -223,56 +232,10 @@ static bool early_setup_ghcb(void)
+ 	return true;
+ }
+ 
+-static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
+-				       phys_addr_t pa, phys_addr_t pa_end)
+-{
+-	struct psc_hdr *hdr;
+-	struct psc_entry *e;
+-	unsigned int i;
+-
+-	hdr = &desc->hdr;
+-	memset(hdr, 0, sizeof(*hdr));
+-
+-	e = desc->entries;
+-
+-	i = 0;
+-	while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
+-		hdr->end_entry = i;
+-
+-		e->gfn = pa >> PAGE_SHIFT;
+-		e->operation = SNP_PAGE_STATE_PRIVATE;
+-		if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
+-			e->pagesize = RMP_PG_SIZE_2M;
+-			pa += PMD_SIZE;
+-		} else {
+-			e->pagesize = RMP_PG_SIZE_4K;
+-			pa += PAGE_SIZE;
+-		}
+-
+-		e++;
+-		i++;
+-	}
+-
+-	if (vmgexit_psc(boot_ghcb, desc))
+-		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+-
+-	pvalidate_pages(desc);
+-
+-	return pa;
+-}
+-
+ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
+ {
+-	struct snp_psc_desc desc = {};
+-	unsigned int i;
+-	phys_addr_t pa;
+-
+-	if (!boot_ghcb && !early_setup_ghcb())
+-		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+-
+-	pa = start;
+-	while (pa < end)
+-		pa = __snp_accept_memory(&desc, pa, end);
++	for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
++		__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
+ }
+ 
+ void sev_es_shutdown_ghcb(void)
+diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
+index fc725a981b093b..4e463f33186df4 100644
+--- a/arch/x86/boot/compressed/sev.h
++++ b/arch/x86/boot/compressed/sev.h
+@@ -12,11 +12,13 @@
+ 
+ bool sev_snp_enabled(void);
+ void snp_accept_memory(phys_addr_t start, phys_addr_t end);
++u64 sev_get_status(void);
+ 
+ #else
+ 
+ static inline bool sev_snp_enabled(void) { return false; }
+ static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
++static inline u64 sev_get_status(void) { return 0; }
+ 
+ #endif
+ 
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 1617aa3efd68b1..1b82bcc6fa5564 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1317,8 +1317,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
+ 	 * + precise_ip < 2 for the non event IP
+ 	 * + For RTM TSX weight we need GPRs for the abort code.
+ 	 */
+-	gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
+-	       (attr->sample_regs_intr & PEBS_GP_REGS);
++	gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
++		(attr->sample_regs_intr & PEBS_GP_REGS)) ||
++	       ((sample_type & PERF_SAMPLE_REGS_USER) &&
++		(attr->sample_regs_user & PEBS_GP_REGS));
+ 
+ 	tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
+ 		     ((attr->config & INTEL_ARCH_EVENT_MASK) ==
+@@ -1970,7 +1972,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
+ 			regs->flags &= ~PERF_EFLAGS_EXACT;
+ 		}
+ 
+-		if (sample_type & PERF_SAMPLE_REGS_INTR)
++		if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
+ 			adaptive_pebs_save_regs(regs, gprs);
+ 	}
+ 
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index ca98744343b89e..543609d1231efc 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -4891,28 +4891,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
+ 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
+ 	/* Free-Running IIO BANDWIDTH IN Counters */
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.0517578125e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.0517578125e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.0517578125e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.0517578125e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.0517578125e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.0517578125e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.0517578125e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.0517578125e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
+ 	{ /* end: all zeroes */ },
+ };
+@@ -5485,37 +5485,6 @@ static struct freerunning_counters icx_iio_freerunning[] = {
+ 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
+ };
+ 
+-static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
+-	/* Free-Running IIO CLOCKS Counter */
+-	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
+-	/* Free-Running IIO BANDWIDTH IN Counters */
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
+-	{ /* end: all zeroes */ },
+-};
+-
+ static struct intel_uncore_type icx_uncore_iio_free_running = {
+ 	.name			= "iio_free_running",
+ 	.num_counters		= 9,
+@@ -5523,7 +5492,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = {
+ 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
+ 	.freerunning		= icx_iio_freerunning,
+ 	.ops			= &skx_uncore_iio_freerunning_ops,
+-	.event_descs		= icx_uncore_iio_freerunning_events,
++	.event_descs		= snr_uncore_iio_freerunning_events,
+ 	.format_group		= &skx_uncore_iio_freerunning_format_group,
+ };
+ 
+@@ -6320,69 +6289,13 @@ static struct freerunning_counters spr_iio_freerunning[] = {
+ 	[SPR_IIO_MSR_BW_OUT]	= { 0x3808, 0x1, 0x10, 8, 48 },
+ };
+ 
+-static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
+-	/* Free-Running IIO CLOCKS Counter */
+-	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
+-	/* Free-Running IIO BANDWIDTH IN Counters */
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
+-	/* Free-Running IIO BANDWIDTH OUT Counters */
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x30"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x31"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x32"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x33"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port4,		"event=0xff,umask=0x34"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port5,		"event=0xff,umask=0x35"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port6,		"event=0xff,umask=0x36"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,	"MiB"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port7,		"event=0xff,umask=0x37"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,	"3.814697266e-6"),
+-	INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,	"MiB"),
+-	{ /* end: all zeroes */ },
+-};
+-
+ static struct intel_uncore_type spr_uncore_iio_free_running = {
+ 	.name			= "iio_free_running",
+ 	.num_counters		= 17,
+ 	.num_freerunning_types	= SPR_IIO_FREERUNNING_TYPE_MAX,
+ 	.freerunning		= spr_iio_freerunning,
+ 	.ops			= &skx_uncore_iio_freerunning_ops,
+-	.event_descs		= spr_uncore_iio_freerunning_events,
++	.event_descs		= snr_uncore_iio_freerunning_events,
+ 	.format_group		= &skx_uncore_iio_freerunning_format_group,
+ };
+ 
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 425bed00b2e071..e432910859cb1a 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -862,6 +862,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
+ 
+ 	pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
+ 	setup_force_cpu_bug(X86_BUG_DIV0);
++
++	/*
++	 * Turn off the Instructions Retired free counter on machines that are
++	 * susceptible to erratum #1054 "Instructions Retired Performance
++	 * Counter May Be Inaccurate".
++	 */
++	if (c->x86_model < 0x30) {
++		msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
++		clear_cpu_cap(c, X86_FEATURE_IRPERF);
++	}
+ }
+ 
+ static bool cpu_has_zenbleed_microcode(void)
+@@ -1045,13 +1055,8 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 	if (!cpu_feature_enabled(X86_FEATURE_XENPV))
+ 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ 
+-	/*
+-	 * Turn on the Instructions Retired free counter on machines not
+-	 * susceptible to erratum #1054 "Instructions Retired Performance
+-	 * Counter May Be Inaccurate".
+-	 */
+-	if (cpu_has(c, X86_FEATURE_IRPERF) &&
+-	    (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
++	/* Enable the Instructions Retired free counter */
++	if (cpu_has(c, X86_FEATURE_IRPERF))
+ 		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+ 
+ 	check_null_seg_clears_base(c);
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 5cd735728fa028..093d3ca43c4674 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -199,6 +199,12 @@ static bool need_sha_check(u32 cur_rev)
+ 	case 0xa70c0: return cur_rev <= 0xa70C009; break;
+ 	case 0xaa001: return cur_rev <= 0xaa00116; break;
+ 	case 0xaa002: return cur_rev <= 0xaa00218; break;
++	case 0xb0021: return cur_rev <= 0xb002146; break;
++	case 0xb1010: return cur_rev <= 0xb101046; break;
++	case 0xb2040: return cur_rev <= 0xb204031; break;
++	case 0xb4040: return cur_rev <= 0xb404031; break;
++	case 0xb6000: return cur_rev <= 0xb600031; break;
++	case 0xb7000: return cur_rev <= 0xb700031; break;
+ 	default: break;
+ 	}
+ 
+@@ -214,8 +220,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
+ 	struct sha256_state s;
+ 	int i;
+ 
+-	if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
+-	    x86_family(bsp_cpuid_1_eax) > 0x19)
++	if (x86_family(bsp_cpuid_1_eax) < 0x17)
+ 		return true;
+ 
+ 	if (!need_sha_check(cur_rev))
+diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
+index 10c660fae8b300..7237d56a9d3f01 100644
+--- a/arch/x86/xen/multicalls.c
++++ b/arch/x86/xen/multicalls.c
+@@ -54,14 +54,20 @@ struct mc_debug_data {
+ 
+ static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
+ static struct mc_debug_data mc_debug_data_early __initdata;
+-static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) =
+-	&mc_debug_data_early;
+ static struct mc_debug_data __percpu *mc_debug_data_ptr;
+ DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
+ 
+ static struct static_key mc_debug __ro_after_init;
+ static bool mc_debug_enabled __initdata;
+ 
++static struct mc_debug_data * __ref get_mc_debug(void)
++{
++	if (!mc_debug_data_ptr)
++		return &mc_debug_data_early;
++
++	return this_cpu_ptr(mc_debug_data_ptr);
++}
++
+ static int __init xen_parse_mc_debug(char *arg)
+ {
+ 	mc_debug_enabled = true;
+@@ -71,20 +77,16 @@ static int __init xen_parse_mc_debug(char *arg)
+ }
+ early_param("xen_mc_debug", xen_parse_mc_debug);
+ 
+-void mc_percpu_init(unsigned int cpu)
+-{
+-	per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu);
+-}
+-
+ static int __init mc_debug_enable(void)
+ {
+ 	unsigned long flags;
++	struct mc_debug_data __percpu *mcdb;
+ 
+ 	if (!mc_debug_enabled)
+ 		return 0;
+ 
+-	mc_debug_data_ptr = alloc_percpu(struct mc_debug_data);
+-	if (!mc_debug_data_ptr) {
++	mcdb = alloc_percpu(struct mc_debug_data);
++	if (!mcdb) {
+ 		pr_err("xen_mc_debug inactive\n");
+ 		static_key_slow_dec(&mc_debug);
+ 		return -ENOMEM;
+@@ -93,7 +95,7 @@ static int __init mc_debug_enable(void)
+ 	/* Be careful when switching to percpu debug data. */
+ 	local_irq_save(flags);
+ 	xen_mc_flush();
+-	mc_percpu_init(0);
++	mc_debug_data_ptr = mcdb;
+ 	local_irq_restore(flags);
+ 
+ 	pr_info("xen_mc_debug active\n");
+@@ -155,7 +157,7 @@ void xen_mc_flush(void)
+ 	trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
+ 
+ 	if (static_key_false(&mc_debug)) {
+-		mcdb = __this_cpu_read(mc_debug_data);
++		mcdb = get_mc_debug();
+ 		memcpy(mcdb->entries, b->entries,
+ 		       b->mcidx * sizeof(struct multicall_entry));
+ 	}
+@@ -235,7 +237,7 @@ struct multicall_space __xen_mc_entry(size_t args)
+ 
+ 	ret.mc = &b->entries[b->mcidx];
+ 	if (static_key_false(&mc_debug)) {
+-		struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data);
++		struct mc_debug_data *mcdb = get_mc_debug();
+ 
+ 		mcdb->caller[b->mcidx] = __builtin_return_address(0);
+ 		mcdb->argsz[b->mcidx] = args;
+diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
+index 6863d3da7decfc..7ea57f728b89db 100644
+--- a/arch/x86/xen/smp_pv.c
++++ b/arch/x86/xen/smp_pv.c
+@@ -305,7 +305,6 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
+ 		return rc;
+ 
+ 	xen_pmu_init(cpu);
+-	mc_percpu_init(cpu);
+ 
+ 	/*
+ 	 * Why is this a BUG? If the hypercall fails then everything can be
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 63c13a2ccf556a..25e318ef27d6b0 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -261,9 +261,6 @@ void xen_mc_callback(void (*fn)(void *), void *data);
+  */
+ struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
+ 
+-/* Do percpu data initialization for multicalls. */
+-void mc_percpu_init(unsigned int cpu);
+-
+ extern bool is_xen_pmu;
+ 
+ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 9aed61fcd0bf94..456026c4a3c962 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -104,16 +104,12 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ }
+ EXPORT_SYMBOL(bio_integrity_alloc);
+ 
+-static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
+-				     bool dirty)
++static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < nr_vecs; i++) {
+-		if (dirty && !PageCompound(bv[i].bv_page))
+-			set_page_dirty_lock(bv[i].bv_page);
++	for (i = 0; i < nr_vecs; i++)
+ 		unpin_user_page(bv[i].bv_page);
+-	}
+ }
+ 
+ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
+@@ -129,7 +125,7 @@ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
+ 	ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
+ 	WARN_ON_ONCE(ret != bytes);
+ 
+-	bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
++	bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs);
+ }
+ 
+ /**
+@@ -149,8 +145,7 @@ void bio_integrity_unmap_user(struct bio *bio)
+ 		return;
+ 	}
+ 
+-	bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
+-			bio_data_dir(bio) == READ);
++	bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt);
+ }
+ 
+ /**
+@@ -236,7 +231,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
+ 	}
+ 
+ 	if (write)
+-		bio_integrity_unpin_bvec(bvec, nr_vecs, false);
++		bio_integrity_unpin_bvec(bvec, nr_vecs);
+ 	else
+ 		memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
+ 
+@@ -362,7 +357,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
+ 	return 0;
+ 
+ release_pages:
+-	bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
++	bio_integrity_unpin_bvec(bvec, nr_bvecs);
+ free_bvec:
+ 	if (bvec != stack_vec)
+ 		kfree(bvec);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 42023addf9cda6..c7b6c1f7635978 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1121,8 +1121,8 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
+ 		return;
+ 
+ 	plug->cur_ktime = 0;
+-	plug->mq_list = NULL;
+-	plug->cached_rq = NULL;
++	rq_list_init(&plug->mq_list);
++	rq_list_init(&plug->cached_rqs);
+ 	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
+ 	plug->rq_count = 0;
+ 	plug->multiple_queues = false;
+@@ -1218,7 +1218,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
+ 	 * queue for cached requests, we don't want a blocked task holding
+ 	 * up a queue freeze/quiesce event.
+ 	 */
+-	if (unlikely(!rq_list_empty(plug->cached_rq)))
++	if (unlikely(!rq_list_empty(&plug->cached_rqs)))
+ 		blk_mq_free_plug_rqs(plug);
+ 
+ 	plug->cur_ktime = 0;
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 5baa950f34fe21..ceac64e796ea82 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -1175,7 +1175,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
+ 	struct blk_plug *plug = current->plug;
+ 	struct request *rq;
+ 
+-	if (!plug || rq_list_empty(plug->mq_list))
++	if (!plug || rq_list_empty(&plug->mq_list))
+ 		return false;
+ 
+ 	rq_list_for_each(&plug->mq_list, rq) {
+diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
+index 9638b25fd52124..ad8d6a363f24ae 100644
+--- a/block/blk-mq-cpumap.c
++++ b/block/blk-mq-cpumap.c
+@@ -11,6 +11,7 @@
+ #include <linux/smp.h>
+ #include <linux/cpu.h>
+ #include <linux/group_cpus.h>
++#include <linux/device/bus.h>
+ 
+ #include "blk.h"
+ #include "blk-mq.h"
+@@ -54,3 +55,39 @@ int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
+ 
+ 	return NUMA_NO_NODE;
+ }
++
++/**
++ * blk_mq_map_hw_queues - Create CPU to hardware queue mapping
++ * @qmap:	CPU to hardware queue map
++ * @dev:	The device to map queues
++ * @offset:	Queue offset to use for the device
++ *
++ * Create a CPU to hardware queue mapping in @qmap. The struct bus_type
++ * irq_get_affinity callback will be used to retrieve the affinity.
++ */
++void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
++			  struct device *dev, unsigned int offset)
++
++{
++	const struct cpumask *mask;
++	unsigned int queue, cpu;
++
++	if (!dev->bus->irq_get_affinity)
++		goto fallback;
++
++	for (queue = 0; queue < qmap->nr_queues; queue++) {
++		mask = dev->bus->irq_get_affinity(dev, queue + offset);
++		if (!mask)
++			goto fallback;
++
++		for_each_cpu(cpu, mask)
++			qmap->mq_map[cpu] = qmap->queue_offset + queue;
++	}
++
++	return;
++
++fallback:
++	WARN_ON_ONCE(qmap->nr_queues > 1);
++	blk_mq_clear_mq_map(qmap);
++}
++EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 662e52ab06467f..f26bee56269363 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -506,7 +506,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
+ 		prefetch(tags->static_rqs[tag]);
+ 		tag_mask &= ~(1UL << i);
+ 		rq = blk_mq_rq_ctx_init(data, tags, tag);
+-		rq_list_add(data->cached_rq, rq);
++		rq_list_add_head(data->cached_rqs, rq);
+ 		nr++;
+ 	}
+ 	if (!(data->rq_flags & RQF_SCHED_TAGS))
+@@ -515,7 +515,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
+ 	percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
+ 	data->nr_tags -= nr;
+ 
+-	return rq_list_pop(data->cached_rq);
++	return rq_list_pop(data->cached_rqs);
+ }
+ 
+ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
+@@ -612,7 +612,7 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
+ 		.flags		= flags,
+ 		.cmd_flags	= opf,
+ 		.nr_tags	= plug->nr_ios,
+-		.cached_rq	= &plug->cached_rq,
++		.cached_rqs	= &plug->cached_rqs,
+ 	};
+ 	struct request *rq;
+ 
+@@ -637,14 +637,14 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
+ 	if (!plug)
+ 		return NULL;
+ 
+-	if (rq_list_empty(plug->cached_rq)) {
++	if (rq_list_empty(&plug->cached_rqs)) {
+ 		if (plug->nr_ios == 1)
+ 			return NULL;
+ 		rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
+ 		if (!rq)
+ 			return NULL;
+ 	} else {
+-		rq = rq_list_peek(&plug->cached_rq);
++		rq = rq_list_peek(&plug->cached_rqs);
+ 		if (!rq || rq->q != q)
+ 			return NULL;
+ 
+@@ -653,7 +653,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
+ 		if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
+ 			return NULL;
+ 
+-		plug->cached_rq = rq_list_next(rq);
++		rq_list_pop(&plug->cached_rqs);
+ 		blk_mq_rq_time_init(rq, 0);
+ 	}
+ 
+@@ -830,7 +830,7 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
+ {
+ 	struct request *rq;
+ 
+-	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
++	while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL)
+ 		blk_mq_free_request(rq);
+ }
+ 
+@@ -1386,8 +1386,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+ 	 */
+ 	if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
+ 		plug->has_elevator = true;
+-	rq->rq_next = NULL;
+-	rq_list_add(&plug->mq_list, rq);
++	rq_list_add_tail(&plug->mq_list, rq);
+ 	plug->rq_count++;
+ }
+ 
+@@ -2781,7 +2780,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
+ 	blk_status_t ret = BLK_STS_OK;
+ 
+ 	while ((rq = rq_list_pop(&plug->mq_list))) {
+-		bool last = rq_list_empty(plug->mq_list);
++		bool last = rq_list_empty(&plug->mq_list);
+ 
+ 		if (hctx != rq->mq_hctx) {
+ 			if (hctx) {
+@@ -2824,8 +2823,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+ {
+ 	struct blk_mq_hw_ctx *this_hctx = NULL;
+ 	struct blk_mq_ctx *this_ctx = NULL;
+-	struct request *requeue_list = NULL;
+-	struct request **requeue_lastp = &requeue_list;
++	struct rq_list requeue_list = {};
+ 	unsigned int depth = 0;
+ 	bool is_passthrough = false;
+ 	LIST_HEAD(list);
+@@ -2839,12 +2837,12 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+ 			is_passthrough = blk_rq_is_passthrough(rq);
+ 		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
+ 			   is_passthrough != blk_rq_is_passthrough(rq)) {
+-			rq_list_add_tail(&requeue_lastp, rq);
++			rq_list_add_tail(&requeue_list, rq);
+ 			continue;
+ 		}
+-		list_add(&rq->queuelist, &list);
++		list_add_tail(&rq->queuelist, &list);
+ 		depth++;
+-	} while (!rq_list_empty(plug->mq_list));
++	} while (!rq_list_empty(&plug->mq_list));
+ 
+ 	plug->mq_list = requeue_list;
+ 	trace_block_unplug(this_hctx->queue, depth, !from_sched);
+@@ -2899,19 +2897,19 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ 		if (q->mq_ops->queue_rqs) {
+ 			blk_mq_run_dispatch_ops(q,
+ 				__blk_mq_flush_plug_list(q, plug));
+-			if (rq_list_empty(plug->mq_list))
++			if (rq_list_empty(&plug->mq_list))
+ 				return;
+ 		}
+ 
+ 		blk_mq_run_dispatch_ops(q,
+ 				blk_mq_plug_issue_direct(plug));
+-		if (rq_list_empty(plug->mq_list))
++		if (rq_list_empty(&plug->mq_list))
+ 			return;
+ 	}
+ 
+ 	do {
+ 		blk_mq_dispatch_plug_list(plug, from_schedule);
+-	} while (!rq_list_empty(plug->mq_list));
++	} while (!rq_list_empty(&plug->mq_list));
+ }
+ 
+ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+@@ -2976,7 +2974,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ 	if (plug) {
+ 		data.nr_tags = plug->nr_ios;
+ 		plug->nr_ios = 1;
+-		data.cached_rq = &plug->cached_rq;
++		data.cached_rqs = &plug->cached_rqs;
+ 	}
+ 
+ 	rq = __blk_mq_alloc_requests(&data);
+@@ -2999,7 +2997,7 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
+ 
+ 	if (!plug)
+ 		return NULL;
+-	rq = rq_list_peek(&plug->cached_rq);
++	rq = rq_list_peek(&plug->cached_rqs);
+ 	if (!rq || rq->q != q)
+ 		return NULL;
+ 	if (type != rq->mq_hctx->type &&
+@@ -3013,14 +3011,14 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
+ static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
+ 		struct bio *bio)
+ {
+-	WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
++	if (rq_list_pop(&plug->cached_rqs) != rq)
++		WARN_ON_ONCE(1);
+ 
+ 	/*
+ 	 * If any qos ->throttle() end up blocking, we will have flushed the
+ 	 * plug and hence killed the cached_rq list as well. Pop this entry
+ 	 * before we throttle.
+ 	 */
+-	plug->cached_rq = rq_list_next(rq);
+ 	rq_qos_throttle(rq->q, bio);
+ 
+ 	blk_mq_rq_time_init(rq, 0);
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 364c0415293cf7..a80d3b3105f9ed 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -155,7 +155,7 @@ struct blk_mq_alloc_data {
+ 
+ 	/* allocate multiple requests/tags in one go */
+ 	unsigned int nr_tags;
+-	struct request **cached_rq;
++	struct rq_list *cached_rqs;
+ 
+ 	/* input & output parameter */
+ 	struct blk_mq_ctx *ctx;
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 692b27266220fe..0e2520d929e1db 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -813,6 +813,8 @@ int blk_register_queue(struct gendisk *disk)
+ out_debugfs_remove:
+ 	blk_debugfs_remove(disk);
+ 	mutex_unlock(&q->sysfs_lock);
++	if (queue_is_mq(q))
++		blk_mq_sysfs_unregister(disk);
+ out_put_queue_kobj:
+ 	kobject_put(&disk->queue_kobj);
+ 	mutex_unlock(&q->sysfs_dir_lock);
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
+index 9c76fb1ad2ec50..a7442dc0bd8e10 100644
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -1510,6 +1510,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
+ 	unsigned int err_mask, tag;
+ 	u8 *sense, sk = 0, asc = 0, ascq = 0;
+ 	u64 sense_valid, val;
++	u16 extended_sense;
++	bool aux_icc_valid;
+ 	int ret = 0;
+ 
+ 	err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
+@@ -1529,6 +1531,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
+ 
+ 	sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
+ 		((u64)buf[10] << 16) | ((u64)buf[11] << 24);
++	extended_sense = get_unaligned_le16(&buf[14]);
++	aux_icc_valid = extended_sense & BIT(15);
+ 
+ 	ata_qc_for_each_raw(ap, qc, tag) {
+ 		if (!(qc->flags & ATA_QCFLAG_EH) ||
+@@ -1556,6 +1560,17 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
+ 			continue;
+ 		}
+ 
++		qc->result_tf.nsect = sense[6];
++		qc->result_tf.hob_nsect = sense[7];
++		qc->result_tf.lbal = sense[8];
++		qc->result_tf.lbam = sense[9];
++		qc->result_tf.lbah = sense[10];
++		qc->result_tf.hob_lbal = sense[11];
++		qc->result_tf.hob_lbam = sense[12];
++		qc->result_tf.hob_lbah = sense[13];
++		if (aux_icc_valid)
++			qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
++
+ 		/* Set sense without also setting scsicmd->result */
+ 		scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
+ 					qc->scsicmd->sense_buffer, sk,
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 86cc3b19faae86..8827a768284ac4 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -233,72 +233,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
+ 		kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
+ }
+ 
+-static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
+-{
+-	struct iov_iter i;
+-	ssize_t bw;
+-
+-	iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
+-
+-	bw = vfs_iter_write(file, &i, ppos, 0);
+-
+-	if (likely(bw ==  bvec->bv_len))
+-		return 0;
+-
+-	printk_ratelimited(KERN_ERR
+-		"loop: Write error at byte offset %llu, length %i.\n",
+-		(unsigned long long)*ppos, bvec->bv_len);
+-	if (bw >= 0)
+-		bw = -EIO;
+-	return bw;
+-}
+-
+-static int lo_write_simple(struct loop_device *lo, struct request *rq,
+-		loff_t pos)
+-{
+-	struct bio_vec bvec;
+-	struct req_iterator iter;
+-	int ret = 0;
+-
+-	rq_for_each_segment(bvec, rq, iter) {
+-		ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
+-		if (ret < 0)
+-			break;
+-		cond_resched();
+-	}
+-
+-	return ret;
+-}
+-
+-static int lo_read_simple(struct loop_device *lo, struct request *rq,
+-		loff_t pos)
+-{
+-	struct bio_vec bvec;
+-	struct req_iterator iter;
+-	struct iov_iter i;
+-	ssize_t len;
+-
+-	rq_for_each_segment(bvec, rq, iter) {
+-		iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
+-		len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
+-		if (len < 0)
+-			return len;
+-
+-		flush_dcache_page(bvec.bv_page);
+-
+-		if (len != bvec.bv_len) {
+-			struct bio *bio;
+-
+-			__rq_for_each_bio(bio, rq)
+-				zero_fill_bio(bio);
+-			break;
+-		}
+-		cond_resched();
+-	}
+-
+-	return 0;
+-}
+-
+ static void loop_clear_limits(struct loop_device *lo, int mode)
+ {
+ 	struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
+@@ -357,7 +291,7 @@ static void lo_complete_rq(struct request *rq)
+ 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ 	blk_status_t ret = BLK_STS_OK;
+ 
+-	if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
++	if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+ 	    req_op(rq) != REQ_OP_READ) {
+ 		if (cmd->ret < 0)
+ 			ret = errno_to_blk_status(cmd->ret);
+@@ -373,14 +307,13 @@ static void lo_complete_rq(struct request *rq)
+ 		cmd->ret = 0;
+ 		blk_mq_requeue_request(rq, true);
+ 	} else {
+-		if (cmd->use_aio) {
+-			struct bio *bio = rq->bio;
++		struct bio *bio = rq->bio;
+ 
+-			while (bio) {
+-				zero_fill_bio(bio);
+-				bio = bio->bi_next;
+-			}
++		while (bio) {
++			zero_fill_bio(bio);
++			bio = bio->bi_next;
+ 		}
++
+ 		ret = BLK_STS_IOERR;
+ end_io:
+ 		blk_mq_end_request(rq, ret);
+@@ -460,9 +393,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+ 
+ 	cmd->iocb.ki_pos = pos;
+ 	cmd->iocb.ki_filp = file;
+-	cmd->iocb.ki_complete = lo_rw_aio_complete;
+-	cmd->iocb.ki_flags = IOCB_DIRECT;
+-	cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
++	cmd->iocb.ki_ioprio = req_get_ioprio(rq);
++	if (cmd->use_aio) {
++		cmd->iocb.ki_complete = lo_rw_aio_complete;
++		cmd->iocb.ki_flags = IOCB_DIRECT;
++	} else {
++		cmd->iocb.ki_complete = NULL;
++		cmd->iocb.ki_flags = 0;
++	}
+ 
+ 	if (rw == ITER_SOURCE)
+ 		ret = file->f_op->write_iter(&cmd->iocb, &iter);
+@@ -473,7 +411,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+ 
+ 	if (ret != -EIOCBQUEUED)
+ 		lo_rw_aio_complete(&cmd->iocb, ret);
+-	return 0;
++	return -EIOCBQUEUED;
+ }
+ 
+ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
+@@ -481,15 +419,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
+ 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ 	loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
+ 
+-	/*
+-	 * lo_write_simple and lo_read_simple should have been covered
+-	 * by io submit style function like lo_rw_aio(), one blocker
+-	 * is that lo_read_simple() need to call flush_dcache_page after
+-	 * the page is written from kernel, and it isn't easy to handle
+-	 * this in io submit style function which submits all segments
+-	 * of the req at one time. And direct read IO doesn't need to
+-	 * run flush_dcache_page().
+-	 */
+ 	switch (req_op(rq)) {
+ 	case REQ_OP_FLUSH:
+ 		return lo_req_flush(lo, rq);
+@@ -505,15 +434,9 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
+ 	case REQ_OP_DISCARD:
+ 		return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
+ 	case REQ_OP_WRITE:
+-		if (cmd->use_aio)
+-			return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
+-		else
+-			return lo_write_simple(lo, rq, pos);
++		return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
+ 	case REQ_OP_READ:
+-		if (cmd->use_aio)
+-			return lo_rw_aio(lo, cmd, pos, ITER_DEST);
+-		else
+-			return lo_read_simple(lo, rq, pos);
++		return lo_rw_aio(lo, cmd, pos, ITER_DEST);
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 		return -EIO;
+@@ -645,19 +568,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
+ 	 * dependency.
+ 	 */
+ 	fput(old_file);
++	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ 	if (partscan)
+ 		loop_reread_partitions(lo);
+ 
+ 	error = 0;
+ done:
+-	/* enable and uncork uevent now that we are done */
+-	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
++	kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
+ 	return error;
+ 
+ out_err:
+ 	loop_global_unlock(lo, is_loop);
+ out_putf:
+ 	fput(file);
++	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ 	goto done;
+ }
+ 
+@@ -1111,8 +1035,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
+ 	if (partscan)
+ 		clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ 
+-	/* enable and uncork uevent now that we are done */
+ 	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
++	kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
+ 
+ 	loop_global_unlock(lo, is_loop);
+ 	if (partscan)
+@@ -1888,7 +1812,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
+ 	struct loop_device *lo = rq->q->queuedata;
+ 	int ret = 0;
+ 	struct mem_cgroup *old_memcg = NULL;
+-	const bool use_aio = cmd->use_aio;
+ 
+ 	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+ 		ret = -EIO;
+@@ -1918,7 +1841,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
+ 	}
+  failed:
+ 	/* complete non-aio request */
+-	if (!use_aio || ret) {
++	if (ret != -EIOCBQUEUED) {
+ 		if (ret == -EOPNOTSUPP)
+ 			cmd->ret = ret;
+ 		else
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index c479348ce8ff69..f10369ad90f768 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1638,10 +1638,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	return BLK_STS_OK;
+ }
+ 
+-static void null_queue_rqs(struct request **rqlist)
++static void null_queue_rqs(struct rq_list *rqlist)
+ {
+-	struct request *requeue_list = NULL;
+-	struct request **requeue_lastp = &requeue_list;
++	struct rq_list requeue_list = {};
+ 	struct blk_mq_queue_data bd = { };
+ 	blk_status_t ret;
+ 
+@@ -1651,8 +1650,8 @@ static void null_queue_rqs(struct request **rqlist)
+ 		bd.rq = rq;
+ 		ret = null_queue_rq(rq->mq_hctx, &bd);
+ 		if (ret != BLK_STS_OK)
+-			rq_list_add_tail(&requeue_lastp, rq);
+-	} while (!rq_list_empty(*rqlist));
++			rq_list_add_tail(&requeue_list, rq);
++	} while (!rq_list_empty(rqlist));
+ 
+ 	*rqlist = requeue_list;
+ }
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 44a6937a4b65cc..fd6c565f8a507c 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -472,7 +472,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
+ }
+ 
+ static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
+-					struct request **rqlist)
++		struct rq_list *rqlist)
+ {
+ 	struct request *req;
+ 	unsigned long flags;
+@@ -499,11 +499,10 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ 		virtqueue_notify(vq->vq);
+ }
+ 
+-static void virtio_queue_rqs(struct request **rqlist)
++static void virtio_queue_rqs(struct rq_list *rqlist)
+ {
+-	struct request *submit_list = NULL;
+-	struct request *requeue_list = NULL;
+-	struct request **requeue_lastp = &requeue_list;
++	struct rq_list submit_list = { };
++	struct rq_list requeue_list = { };
+ 	struct virtio_blk_vq *vq = NULL;
+ 	struct request *req;
+ 
+@@ -515,9 +514,9 @@ static void virtio_queue_rqs(struct request **rqlist)
+ 		vq = this_vq;
+ 
+ 		if (virtblk_prep_rq_batch(req))
+-			rq_list_add(&submit_list, req); /* reverse order */
++			rq_list_add_tail(&submit_list, req);
+ 		else
+-			rq_list_add_tail(&requeue_lastp, req);
++			rq_list_add_tail(&requeue_list, req);
+ 	}
+ 
+ 	if (vq)
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 0a6ca6dfb94841..59eb9486642232 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -1215,6 +1215,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ 			rtl_dev_err(hdev, "mandatory config file %s not found",
+ 				    btrtl_dev->ic_info->cfg_name);
+ 			ret = btrtl_dev->cfg_len;
++			if (!ret)
++				ret = -EINVAL;
+ 			goto err_free;
+ 		}
+ 	}
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 7651321d351ccd..9ac22e4a070bef 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev)
+ 
+ static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+-	char buf[80];
++	const char *buf;
+ 
+-	snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n");
++	buf = "Controller Name: vhci_ctrl\n";
+ 	skb_put_data(skb, buf, strlen(buf));
+ 
+-	snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n");
++	buf = "Firmware Version: vhci_fw\n";
+ 	skb_put_data(skb, buf, strlen(buf));
+ 
+-	snprintf(buf, sizeof(buf), "Driver: vhci_drv\n");
++	buf = "Driver: vhci_drv\n";
+ 	skb_put_data(skb, buf, strlen(buf));
+ 
+-	snprintf(buf, sizeof(buf), "Vendor: vhci\n");
++	buf = "Vendor: vhci\n";
+ 	skb_put_data(skb, buf, strlen(buf));
+ }
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index f98c9438760c97..67b4e3d18ffe22 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2748,10 +2748,18 @@ EXPORT_SYMBOL(cpufreq_update_policy);
+  */
+ void cpufreq_update_limits(unsigned int cpu)
+ {
++	struct cpufreq_policy *policy;
++
++	policy = cpufreq_cpu_get(cpu);
++	if (!policy)
++		return;
++
+ 	if (cpufreq_driver->update_limits)
+ 		cpufreq_driver->update_limits(cpu);
+ 	else
+ 		cpufreq_update_policy(cpu);
++
++	cpufreq_cpu_put(policy);
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
+ 
+diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
+index 8ed2bb01a619fd..44860630050019 100644
+--- a/drivers/crypto/caam/qi.c
++++ b/drivers/crypto/caam/qi.c
+@@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
+ 	qm_fd_addr_set64(&fd, addr);
+ 
+ 	do {
++		refcount_inc(&req->drv_ctx->refcnt);
+ 		ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
+-		if (likely(!ret)) {
+-			refcount_inc(&req->drv_ctx->refcnt);
++		if (likely(!ret))
+ 			return 0;
+-		}
+ 
++		refcount_dec(&req->drv_ctx->refcnt);
+ 		if (ret != -EBUSY)
+ 			break;
+ 		num_retries++;
+diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
+index 0ed0515e1ed54c..cd52807e76afdb 100644
+--- a/drivers/crypto/tegra/tegra-se-aes.c
++++ b/drivers/crypto/tegra/tegra-se-aes.c
+@@ -263,13 +263,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
+ 	unsigned int cmdlen;
+ 	int ret;
+ 
+-	rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
+-					      &rctx->datbuf.addr, GFP_KERNEL);
+-	if (!rctx->datbuf.buf)
+-		return -ENOMEM;
+-
+-	rctx->datbuf.size = SE_AES_BUFLEN;
+-	rctx->iv = (u32 *)req->iv;
++	rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
+ 	rctx->len = req->cryptlen;
+ 
+ 	/* Pad input to AES Block size */
+@@ -278,6 +272,12 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
+ 			rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
+ 	}
+ 
++	rctx->datbuf.size = rctx->len;
++	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
++					      &rctx->datbuf.addr, GFP_KERNEL);
++	if (!rctx->datbuf.buf)
++		return -ENOMEM;
++
+ 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
+ 
+ 	/* Prepare the command and submit for execution */
+@@ -289,7 +289,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
+ 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
+ 
+ 	/* Free the buffer */
+-	dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
++	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
+ 			  rctx->datbuf.buf, rctx->datbuf.addr);
+ 
+ 	crypto_finalize_skcipher_request(se->engine, req, ret);
+@@ -443,9 +443,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
+ 	if (!req->cryptlen)
+ 		return 0;
+ 
+-	if (ctx->alg == SE_ALG_ECB)
+-		req->iv = NULL;
+-
+ 	rctx->encrypt = encrypt;
+ 	rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
+ 	rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
+@@ -1120,6 +1117,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
+ 	rctx->assoclen = req->assoclen;
+ 	rctx->authsize = crypto_aead_authsize(tfm);
+ 
++	if (rctx->encrypt)
++		rctx->cryptlen = req->cryptlen;
++	else
++		rctx->cryptlen = req->cryptlen - rctx->authsize;
++
+ 	memcpy(iv, req->iv, 16);
+ 
+ 	ret = tegra_ccm_check_iv(iv);
+@@ -1148,30 +1150,26 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
+ 	struct tegra_se *se = ctx->se;
+ 	int ret;
+ 
++	ret = tegra_ccm_crypt_init(req, se, rctx);
++	if (ret)
++		return ret;
++
+ 	/* Allocate buffers required */
+-	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
++	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
++	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
+ 					     &rctx->inbuf.addr, GFP_KERNEL);
+ 	if (!rctx->inbuf.buf)
+ 		return -ENOMEM;
+ 
+-	rctx->inbuf.size = SE_AES_BUFLEN;
+-
+-	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
++	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
++	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
+ 					      &rctx->outbuf.addr, GFP_KERNEL);
+ 	if (!rctx->outbuf.buf) {
+ 		ret = -ENOMEM;
+ 		goto outbuf_err;
+ 	}
+ 
+-	rctx->outbuf.size = SE_AES_BUFLEN;
+-
+-	ret = tegra_ccm_crypt_init(req, se, rctx);
+-	if (ret)
+-		goto out;
+-
+ 	if (rctx->encrypt) {
+-		rctx->cryptlen = req->cryptlen;
+-
+ 		/* CBC MAC Operation */
+ 		ret = tegra_ccm_compute_auth(ctx, rctx);
+ 		if (ret)
+@@ -1182,10 +1180,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
+ 		if (ret)
+ 			goto out;
+ 	} else {
+-		rctx->cryptlen = req->cryptlen - ctx->authsize;
+-		if (ret)
+-			goto out;
+-
+ 		/* CTR operation */
+ 		ret = tegra_ccm_do_ctr(ctx, rctx);
+ 		if (ret)
+@@ -1198,11 +1192,11 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
+ 	}
+ 
+ out:
+-	dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
++	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
+ 			  rctx->outbuf.buf, rctx->outbuf.addr);
+ 
+ outbuf_err:
+-	dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
++	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
+ 			  rctx->inbuf.buf, rctx->inbuf.addr);
+ 
+ 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
+@@ -1218,23 +1212,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
+ 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
+ 	int ret;
+ 
+-	/* Allocate buffers required */
+-	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+-					     &rctx->inbuf.addr, GFP_KERNEL);
+-	if (!rctx->inbuf.buf)
+-		return -ENOMEM;
+-
+-	rctx->inbuf.size = SE_AES_BUFLEN;
+-
+-	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+-					      &rctx->outbuf.addr, GFP_KERNEL);
+-	if (!rctx->outbuf.buf) {
+-		ret = -ENOMEM;
+-		goto outbuf_err;
+-	}
+-
+-	rctx->outbuf.size = SE_AES_BUFLEN;
+-
+ 	rctx->src_sg = req->src;
+ 	rctx->dst_sg = req->dst;
+ 	rctx->assoclen = req->assoclen;
+@@ -1248,6 +1225,21 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
+ 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
+ 	rctx->iv[3] = (1 << 24);
+ 
++	/* Allocate buffers required */
++	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
++	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
++					     &rctx->inbuf.addr, GFP_KERNEL);
++	if (!rctx->inbuf.buf)
++		return -ENOMEM;
++
++	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
++	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
++					      &rctx->outbuf.addr, GFP_KERNEL);
++	if (!rctx->outbuf.buf) {
++		ret = -ENOMEM;
++		goto outbuf_err;
++	}
++
+ 	/* If there is associated data perform GMAC operation */
+ 	if (rctx->assoclen) {
+ 		ret = tegra_gcm_do_gmac(ctx, rctx);
+@@ -1271,11 +1263,11 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
+ 		ret = tegra_gcm_do_verify(ctx->se, rctx);
+ 
+ out:
+-	dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
++	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
+ 			  rctx->outbuf.buf, rctx->outbuf.addr);
+ 
+ outbuf_err:
+-	dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
++	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
+ 			  rctx->inbuf.buf, rctx->inbuf.addr);
+ 
+ 	/* Finalize the request if there are no errors */
+@@ -1502,6 +1494,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
+ 		return 0;
+ 	}
+ 
++	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
++					      &rctx->datbuf.addr, GFP_KERNEL);
++	if (!rctx->datbuf.buf)
++		return -ENOMEM;
++
+ 	/* Copy the previous residue first */
+ 	if (rctx->residue.size)
+ 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+@@ -1527,6 +1524,9 @@ static int tegra_cmac_do_update(struct ahash_request *req)
+ 
+ 	tegra_cmac_copy_result(ctx->se, rctx);
+ 
++	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
++			  rctx->datbuf.buf, rctx->datbuf.addr);
++
+ 	return ret;
+ }
+ 
+@@ -1541,10 +1541,20 @@ static int tegra_cmac_do_final(struct ahash_request *req)
+ 
+ 	if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
+ 		return crypto_shash_tfm_digest(ctx->fallback_tfm,
+-					rctx->datbuf.buf, 0, req->result);
++					NULL, 0, req->result);
++	}
++
++	if (rctx->residue.size) {
++		rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
++						      &rctx->datbuf.addr, GFP_KERNEL);
++		if (!rctx->datbuf.buf) {
++			ret = -ENOMEM;
++			goto out_free;
++		}
++
++		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ 	}
+ 
+-	memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ 	rctx->datbuf.size = rctx->residue.size;
+ 	rctx->total_len += rctx->residue.size;
+ 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+@@ -1570,8 +1580,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
+ 		writel(0, se->base + se->hw->regs->result + (i * 4));
+ 
+ out:
+-	dma_free_coherent(se->dev, SE_SHA_BUFLEN,
+-			  rctx->datbuf.buf, rctx->datbuf.addr);
++	if (rctx->residue.size)
++		dma_free_coherent(se->dev, rctx->datbuf.size,
++				  rctx->datbuf.buf, rctx->datbuf.addr);
++out_free:
+ 	dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
+ 			  rctx->residue.buf, rctx->residue.addr);
+ 	return ret;
+@@ -1683,28 +1695,15 @@ static int tegra_cmac_init(struct ahash_request *req)
+ 	rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
+ 					       &rctx->residue.addr, GFP_KERNEL);
+ 	if (!rctx->residue.buf)
+-		goto resbuf_fail;
++		return -ENOMEM;
+ 
+ 	rctx->residue.size = 0;
+ 
+-	rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
+-					      &rctx->datbuf.addr, GFP_KERNEL);
+-	if (!rctx->datbuf.buf)
+-		goto datbuf_fail;
+-
+-	rctx->datbuf.size = 0;
+-
+ 	/* Clear any previous result */
+ 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ 		writel(0, se->base + se->hw->regs->result + (i * 4));
+ 
+ 	return 0;
+-
+-datbuf_fail:
+-	dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
+-			  rctx->residue.addr);
+-resbuf_fail:
+-	return -ENOMEM;
+ }
+ 
+ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
+index 726e30c0e63ebb..451b8eaab16aab 100644
+--- a/drivers/crypto/tegra/tegra-se-hash.c
++++ b/drivers/crypto/tegra/tegra-se-hash.c
+@@ -332,6 +332,11 @@ static int tegra_sha_do_update(struct ahash_request *req)
+ 		return 0;
+ 	}
+ 
++	rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size,
++					      &rctx->datbuf.addr, GFP_KERNEL);
++	if (!rctx->datbuf.buf)
++		return -ENOMEM;
++
+ 	/* Copy the previous residue first */
+ 	if (rctx->residue.size)
+ 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+@@ -368,6 +373,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
+ 	if (!(rctx->task & SHA_FINAL))
+ 		tegra_sha_copy_hash_result(se, rctx);
+ 
++	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
++			  rctx->datbuf.buf, rctx->datbuf.addr);
++
+ 	return ret;
+ }
+ 
+@@ -380,7 +388,17 @@ static int tegra_sha_do_final(struct ahash_request *req)
+ 	u32 *cpuvaddr = se->cmdbuf->addr;
+ 	int size, ret = 0;
+ 
+-	memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
++	if (rctx->residue.size) {
++		rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
++						      &rctx->datbuf.addr, GFP_KERNEL);
++		if (!rctx->datbuf.buf) {
++			ret = -ENOMEM;
++			goto out_free;
++		}
++
++		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
++	}
++
+ 	rctx->datbuf.size = rctx->residue.size;
+ 	rctx->total_len += rctx->residue.size;
+ 
+@@ -397,8 +415,10 @@ static int tegra_sha_do_final(struct ahash_request *req)
+ 	memcpy(req->result, rctx->digest.buf, rctx->digest.size);
+ 
+ out:
+-	dma_free_coherent(se->dev, SE_SHA_BUFLEN,
+-			  rctx->datbuf.buf, rctx->datbuf.addr);
++	if (rctx->residue.size)
++		dma_free_coherent(se->dev, rctx->datbuf.size,
++				  rctx->datbuf.buf, rctx->datbuf.addr);
++out_free:
+ 	dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
+ 			  rctx->residue.buf, rctx->residue.addr);
+ 	dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
+@@ -534,19 +554,11 @@ static int tegra_sha_init(struct ahash_request *req)
+ 	if (!rctx->residue.buf)
+ 		goto resbuf_fail;
+ 
+-	rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
+-					      &rctx->datbuf.addr, GFP_KERNEL);
+-	if (!rctx->datbuf.buf)
+-		goto datbuf_fail;
+-
+ 	return 0;
+ 
+-datbuf_fail:
+-	dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
+-			  rctx->residue.addr);
+ resbuf_fail:
+-	dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
+-			  rctx->datbuf.addr);
++	dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
++			  rctx->digest.addr);
+ digbuf_fail:
+ 	return -ENOMEM;
+ }
+diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h
+index b54aefe717a174..e196a90eedb92c 100644
+--- a/drivers/crypto/tegra/tegra-se.h
++++ b/drivers/crypto/tegra/tegra-se.h
+@@ -340,8 +340,6 @@
+ #define SE_CRYPTO_CTR_REG_COUNT			4
+ #define SE_MAX_KEYSLOT				15
+ #define SE_MAX_MEM_ALLOC			SZ_4M
+-#define SE_AES_BUFLEN				0x8000
+-#define SE_SHA_BUFLEN				0x2000
+ 
+ #define SHA_FIRST	BIT(0)
+ #define SHA_UPDATE	BIT(1)
+diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
+index c353029789cf1a..1290886f065e33 100644
+--- a/drivers/dma-buf/sw_sync.c
++++ b/drivers/dma-buf/sw_sync.c
+@@ -444,15 +444,17 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
+ 		return -EINVAL;
+ 
+ 	pt = dma_fence_to_sync_pt(fence);
+-	if (!pt)
+-		return -EINVAL;
++	if (!pt) {
++		ret = -EINVAL;
++		goto put_fence;
++	}
+ 
+ 	spin_lock_irqsave(fence->lock, flags);
+-	if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+-		data.deadline_ns = ktime_to_ns(pt->deadline);
+-	} else {
++	if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+ 		ret = -ENOENT;
++		goto unlock;
+ 	}
++	data.deadline_ns = ktime_to_ns(pt->deadline);
+ 	spin_unlock_irqrestore(fence->lock, flags);
+ 
+ 	dma_fence_put(fence);
+@@ -464,6 +466,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
+ 		return -EFAULT;
+ 
+ 	return 0;
++
++unlock:
++	spin_unlock_irqrestore(fence->lock, flags);
++put_fence:
++	dma_fence_put(fence);
++
++	return ret;
+ }
+ 
+ static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
+index 685098f9626f2b..eebcdf653d0729 100644
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
+  * the EFI memory map. Other related structures, e.g. x86 e820ext, need
+  * to factor in this headroom requirement as well.
+  */
+-#define EFI_MMAP_NR_SLACK_SLOTS	8
++#define EFI_MMAP_NR_SLACK_SLOTS	32
+ 
+ typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index 45affc02548c16..a3a7d20ab4fea9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -437,6 +437,13 @@ static bool amdgpu_get_bios_apu(struct amdgpu_device *adev)
+ 	return true;
+ }
+ 
++static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev)
++{
++	struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE];
++
++	return (res->flags & IORESOURCE_ROM_SHADOW);
++}
++
+ static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
+ {
+ 	if (amdgpu_atrm_get_bios(adev)) {
+@@ -455,14 +462,27 @@ static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
+ 		goto success;
+ 	}
+ 
+-	if (amdgpu_read_platform_bios(adev)) {
+-		dev_info(adev->dev, "Fetched VBIOS from platform\n");
+-		goto success;
+-	}
++	if (amdgpu_prefer_rom_resource(adev)) {
++		if (amdgpu_read_bios(adev)) {
++			dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
++			goto success;
++		}
+ 
+-	if (amdgpu_read_bios(adev)) {
+-		dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+-		goto success;
++		if (amdgpu_read_platform_bios(adev)) {
++			dev_info(adev->dev, "Fetched VBIOS from platform\n");
++			goto success;
++		}
++
++	} else {
++		if (amdgpu_read_platform_bios(adev)) {
++			dev_info(adev->dev, "Fetched VBIOS from platform\n");
++			goto success;
++		}
++
++		if (amdgpu_read_bios(adev)) {
++			dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
++			goto success;
++		}
+ 	}
+ 
+ 	if (amdgpu_read_bios_from_rom(adev)) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 31d4df96889812..24d007715a14ae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3322,6 +3322,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ 			amdgpu_device_mem_scratch_fini(adev);
+ 			amdgpu_ib_pool_fini(adev);
+ 			amdgpu_seq64_fini(adev);
++			amdgpu_doorbell_fini(adev);
+ 		}
+ 
+ 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
+@@ -4670,7 +4671,6 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ 
+ 		iounmap(adev->rmmio);
+ 		adev->rmmio = NULL;
+-		amdgpu_doorbell_fini(adev);
+ 		drm_dev_exit(idx);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+index 8e81a83d37d846..2f90fff1b9ddc0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+@@ -181,7 +181,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
+ 				 struct sg_table *sgt,
+ 				 enum dma_data_direction dir)
+ {
+-	if (sgt->sgl->page_link) {
++	if (sg_page(sgt->sgl)) {
+ 		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
+ 		sg_free_table(sgt);
+ 		kfree(sgt);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 7978d5189c37d4..a9eb0927a7664f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1795,7 +1795,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
+ };
+ 
+ static const struct pci_device_id pciidlist[] = {
+-#ifdef CONFIG_DRM_AMDGPU_SI
+ 	{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ 	{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ 	{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+@@ -1868,8 +1867,6 @@ static const struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ 	{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ 	{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+-#endif
+-#ifdef CONFIG_DRM_AMDGPU_CIK
+ 	/* Kaveri */
+ 	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ 	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+@@ -1952,7 +1949,6 @@ static const struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ 	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ 	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+-#endif
+ 	/* topaz */
+ 	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ 	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+@@ -2284,14 +2280,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ 		return -ENOTSUPP;
+ 	}
+ 
++	switch (flags & AMD_ASIC_MASK) {
++	case CHIP_TAHITI:
++	case CHIP_PITCAIRN:
++	case CHIP_VERDE:
++	case CHIP_OLAND:
++	case CHIP_HAINAN:
+ #ifdef CONFIG_DRM_AMDGPU_SI
+-	if (!amdgpu_si_support) {
+-		switch (flags & AMD_ASIC_MASK) {
+-		case CHIP_TAHITI:
+-		case CHIP_PITCAIRN:
+-		case CHIP_VERDE:
+-		case CHIP_OLAND:
+-		case CHIP_HAINAN:
++		if (!amdgpu_si_support) {
+ 			dev_info(&pdev->dev,
+ 				 "SI support provided by radeon.\n");
+ 			dev_info(&pdev->dev,
+@@ -2299,16 +2295,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ 				);
+ 			return -ENODEV;
+ 		}
+-	}
++		break;
++#else
++		dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
++		return -ENODEV;
+ #endif
++	case CHIP_KAVERI:
++	case CHIP_BONAIRE:
++	case CHIP_HAWAII:
++	case CHIP_KABINI:
++	case CHIP_MULLINS:
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+-	if (!amdgpu_cik_support) {
+-		switch (flags & AMD_ASIC_MASK) {
+-		case CHIP_KAVERI:
+-		case CHIP_BONAIRE:
+-		case CHIP_HAWAII:
+-		case CHIP_KABINI:
+-		case CHIP_MULLINS:
++		if (!amdgpu_cik_support) {
+ 			dev_info(&pdev->dev,
+ 				 "CIK support provided by radeon.\n");
+ 			dev_info(&pdev->dev,
+@@ -2316,8 +2314,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ 				);
+ 			return -ENODEV;
+ 		}
+-	}
++		break;
++#else
++		dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
++		return -ENODEV;
+ #endif
++	default:
++		break;
++	}
+ 
+ 	adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
+ 	if (IS_ERR(adev))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 971419e3a9bbdf..4c4bdc4f51b294 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -161,8 +161,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+ 		 * When GTT is just an alternative to VRAM make sure that we
+ 		 * only use it as fallback and still try to fill up VRAM first.
+ 		 */
+-		if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+-		    !(adev->flags & AMD_IS_APU))
++		if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
++		    domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
+ 			places[c].flags |= TTM_PL_FLAG_FALLBACK;
+ 		c++;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 231a3d490ea8e3..7a773fcd7752c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -859,6 +859,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
+ {
+ 	int pipe;
+ 
++	/* return early if we have already fetched these */
++	if (adev->mes.sched_version && adev->mes.kiq_version)
++		return;
++
+ 	/* get MES scheduler/KIQ versions */
+ 	mutex_lock(&adev->srbm_mutex);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+index b3175ff676f33c..459f7b8d72b4d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+@@ -1225,17 +1225,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
+ 		mes_v12_0_queue_init_register(ring);
+ 	}
+ 
+-	/* get MES scheduler/KIQ versions */
+-	mutex_lock(&adev->srbm_mutex);
+-	soc21_grbm_select(adev, 3, pipe, 0, 0);
++	if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) ||
++	    ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) {
++		/* get MES scheduler/KIQ versions */
++		mutex_lock(&adev->srbm_mutex);
++		soc21_grbm_select(adev, 3, pipe, 0, 0);
+ 
+-	if (pipe == AMDGPU_MES_SCHED_PIPE)
+-		adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+-	else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
+-		adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
++		if (pipe == AMDGPU_MES_SCHED_PIPE)
++			adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
++		else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
++			adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ 
+-	soc21_grbm_select(adev, 0, 0, 0, 0);
+-	mutex_unlock(&adev->srbm_mutex);
++		soc21_grbm_select(adev, 0, 0, 0, 0);
++		mutex_unlock(&adev->srbm_mutex);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 260b6b8d29fd6c..c22da13859bd51 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1690,6 +1690,13 @@ static const struct dmi_system_id dmi_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ 		},
+ 	},
++	{
++		.callback = edp0_on_dp1_callback,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
++		},
++	},
+ 	{
+ 		.callback = edp0_on_dp1_callback,
+ 		.matches = {
+@@ -1697,6 +1704,20 @@ static const struct dmi_system_id dmi_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ 		},
+ 	},
++	{
++		.callback = edp0_on_dp1_callback,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
++		},
++	},
++	{
++		.callback = edp0_on_dp1_callback,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
++		},
++	},
+ 	{}
+ 	/* TODO: refactor this from a fixed table to a dynamic option */
+ };
+@@ -8458,14 +8479,39 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
+ 	int offdelay;
+ 
+ 	if (acrtc_state) {
+-		if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
+-		    IP_VERSION(3, 5, 0) ||
+-		    acrtc_state->stream->link->psr_settings.psr_version <
+-		    DC_PSR_VERSION_UNSUPPORTED ||
+-		    !(adev->flags & AMD_IS_APU)) {
+-			timing = &acrtc_state->stream->timing;
+-
+-			/* at least 2 frames */
++		timing = &acrtc_state->stream->timing;
++
++		/*
++		 * Depending on when the HW latching event of double-buffered
++		 * registers happen relative to the PSR SDP deadline, and how
++		 * bad the Panel clock has drifted since the last ALPM off
++		 * event, there can be up to 3 frames of delay between sending
++		 * the PSR exit cmd to DMUB fw, and when the panel starts
++		 * displaying live frames.
++		 *
++		 * We can set:
++		 *
++		 * 20/100 * offdelay_ms = 3_frames_ms
++		 * => offdelay_ms = 5 * 3_frames_ms
++		 *
++		 * This ensures that `3_frames_ms` will only be experienced as a
++		 * 20% delay on top how long the display has been static, and
++		 * thus make the delay less perceivable.
++		 */
++		if (acrtc_state->stream->link->psr_settings.psr_version <
++		    DC_PSR_VERSION_UNSUPPORTED) {
++			offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 *
++						      timing->v_total *
++						      timing->h_total,
++						      timing->pix_clk_100hz);
++			config.offdelay_ms = offdelay ?: 30;
++		} else if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
++			   IP_VERSION(3, 5, 0) ||
++			   !(adev->flags & AMD_IS_APU)) {
++			/*
++			 * Older HW and DGPU have issues with instant off;
++			 * use a 2 frame offdelay.
++			 */
+ 			offdelay = DIV64_U64_ROUND_UP((u64)20 *
+ 						      timing->v_total *
+ 						      timing->h_total,
+@@ -8473,6 +8519,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
+ 
+ 			config.offdelay_ms = offdelay ?: 30;
+ 		} else {
++			/* offdelay_ms = 0 will never disable vblank */
++			config.offdelay_ms = 1;
+ 			config.disable_immediate = true;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 70fcfae8e4c552..2ac56e79df05e6 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
+  *
+  * Panel Replay and PSR SU
+  * - Enable when:
++ *      - VRR is disabled
+  *      - vblank counter is disabled
+  *      - entry is allowed: usermode demonstrates an adequate number of fast
+  *        commits)
+@@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
+ 	bool is_sr_active = (link->replay_settings.replay_allow_active ||
+ 				 link->psr_settings.psr_allow_active);
+ 	bool is_crc_window_active = false;
++	bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
+ 
+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ 	is_crc_window_active =
+ 		amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
+ #endif
+ 
+-	if (link->replay_settings.replay_feature_enabled &&
++	if (link->replay_settings.replay_feature_enabled && !vrr_active &&
+ 		allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+ 		amdgpu_dm_replay_enable(vblank_work->stream, true);
+ 	} else if (vblank_enabled) {
+ 		if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
+ 			amdgpu_dm_psr_disable(vblank_work->stream, false);
+-	} else if (link->psr_settings.psr_feature_enabled &&
++	} else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
+ 		allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+ 
+ 		struct amdgpu_dm_connector *aconn =
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+index d35dd507cb9f85..cb187604744e96 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+@@ -87,6 +87,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
+ 	/* Store configuration options */
+ 	(*dml_ctx)->config = *config;
+ 
++	DC_FP_START();
++
+ 	/*Initialize SOCBB and DCNIP params */
+ 	dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
+ 	dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
+@@ -97,6 +99,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
+ 
+ 	/*Initialize DML21 instance */
+ 	dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
++
++	DC_FP_END();
+ }
+ 
+ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+@@ -277,11 +281,16 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
+ {
+ 	bool out = false;
+ 
++	DC_FP_START();
++
+ 	/* Use dml_validate_only for fast_validate path */
+-	if (fast_validate) {
++	if (fast_validate)
+ 		out = dml21_check_mode_support(in_dc, context, dml_ctx);
+-	} else
++	else
+ 		out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
++
++	DC_FP_END();
++
+ 	return out;
+ }
+ 
+@@ -420,8 +429,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
+ 
+ 	dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
+ 
++	DC_FP_START();
++
+ 	/* need to initialize copied instance for internal references to be correct */
+ 	dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
++
++	DC_FP_END();
+ }
+ 
+ bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+index 4d64c45930da49..cb2cb89dfecb2c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+@@ -734,11 +734,16 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
+ 		return out;
+ 	}
+ 
++	DC_FP_START();
++
+ 	/* Use dml_validate_only for fast_validate path */
+ 	if (fast_validate)
+ 		out = dml2_validate_only(context);
+ 	else
+ 		out = dml2_validate_and_build_resource(in_dc, context);
++
++	DC_FP_END();
++
+ 	return out;
+ }
+ 
+@@ -779,11 +784,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
+ 		break;
+ 	}
+ 
++	DC_FP_START();
++
+ 	initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
+ 
+ 	initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
+ 
+ 	initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
++
++	DC_FP_END();
+ }
+ 
+ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+index 36d12db8d02256..f5f1ccd8303cf3 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -3003,7 +3003,11 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
+ 		dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+ 
+ 		phyd32clk = get_phyd32clk_src(link);
+-		dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
++		if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
++			dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
++		} else {
++			dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
++		}
+ 	} else {
+ 		if (dccg->funcs->enable_symclk_se)
+ 			dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+index 0b743669f23b44..62f1e597787e69 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+@@ -1001,8 +1001,11 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
+ 	if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ 		if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ 			dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
+-
+-			dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
++			if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
++				dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
++			} else {
++				dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
++			}
+ 		} else {
+ 			/* need to set DTBCLK_P source to DPREFCLK for DP8B10B */
+ 			dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst);
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+index 80386f698ae4de..0ca6358a9782e2 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+@@ -891,7 +891,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.disable_z10 = true,
+ 	.enable_legacy_fast_update = true,
+ 	.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+-	.dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
++	.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
+ 	.using_dml2 = false,
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+index a8fc0fa44db69d..ba5c1237fcfe1a 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+@@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+ 	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+ 			(hwmgr->thermal_controller.fanInfo.
+ 			ucTachometerPulsesPerRevolution == 0) ||
+-			speed == 0 ||
++			(!speed || speed > UINT_MAX/8) ||
+ 			(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+ 			(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+-		return 0;
++		return -EINVAL;
+ 
+ 	if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ 		smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+index 379012494da57b..56423aedf3fa7c 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+@@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+ 	int result = 0;
+ 
+ 	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+-	    speed == 0 ||
++	    (!speed || speed > UINT_MAX/8) ||
+ 	    (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+ 	    (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+-		return -1;
++		return -EINVAL;
+ 
+ 	if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ 		result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+index a3331ffb2daf7f..1b1c88590156cd 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+@@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+ 	uint32_t tach_period, crystal_clock_freq;
+ 	int result = 0;
+ 
+-	if (!speed)
++	if (!speed || speed > UINT_MAX/8)
+ 		return -EINVAL;
+ 
+ 	if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+index fc1297fecc62e0..d4b954b22441c5 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+@@ -1267,6 +1267,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu,
+ 	uint32_t crystal_clock_freq = 2500;
+ 	uint32_t tach_period;
+ 
++	if (!speed || speed > UINT_MAX/8)
++		return -EINVAL;
++
+ 	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+ 	WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT,
+ 		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index 16fcd9dcd202e0..6c61e87359dd48 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -1199,7 +1199,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ 	uint32_t crystal_clock_freq = 2500;
+ 	uint32_t tach_period;
+ 
+-	if (speed == 0)
++	if (!speed || speed > UINT_MAX/8)
+ 		return -EINVAL;
+ 	/*
+ 	 * To prevent from possible overheat, some ASICs may have requirement
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 2024a85fa11bd5..4f78c84da780c7 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -1228,7 +1228,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
+ 	uint32_t tach_period;
+ 	int ret;
+ 
+-	if (!speed)
++	if (!speed || speed > UINT_MAX/8)
+ 		return -EINVAL;
+ 
+ 	ret = smu_v13_0_auto_fan_control(smu, 0);
+diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
+index 00b364f9a71e54..5dadc895e7f26b 100644
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -17,6 +17,12 @@ static bool ast_astdp_is_connected(struct ast_device *ast)
+ {
+ 	if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD))
+ 		return false;
++	/*
++	 * HPD might be set even if no monitor is connected, so also check that
++	 * the link training was successful.
++	 */
++	if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, AST_IO_VGACRDC_LINK_SUCCESS))
++		return false;
+ 	return true;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index d5eb8de645a9a3..4f8899cd125d9d 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -1006,7 +1006,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
+ 		old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
+ 		old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
+ 		old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
+-		old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
++		old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
++		old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
++		old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
+ }
+ 
+ static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
+diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
+index 908f910420c20c..4ef45520e199af 100644
+--- a/drivers/gpu/drm/i915/gvt/opregion.c
++++ b/drivers/gpu/drm/i915/gvt/opregion.c
+@@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
+ 	u8 *buf;
+ 	struct opregion_header *header;
+ 	struct vbt v;
+-	const char opregion_signature[16] = OPREGION_SIGNATURE;
+ 
+ 	gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
+ 	vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
+@@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
+ 	/* emulated opregion with VBT mailbox only */
+ 	buf = (u8 *)vgpu_opregion(vgpu)->va;
+ 	header = (struct opregion_header *)buf;
+-	memcpy(header->signature, opregion_signature,
+-	       sizeof(opregion_signature));
++
++	static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1);
++	memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature));
++
+ 	header->size = 0x8;
+ 	header->opregion_ver = 0x02000000;
+ 	header->mboxes = MBOX_VBT;
+diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
+index 3debc9870a82ae..d09c4c68411627 100644
+--- a/drivers/gpu/drm/imagination/pvr_fw.c
++++ b/drivers/gpu/drm/imagination/pvr_fw.c
+@@ -732,7 +732,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
+ 					       fw_mem->core_data, fw_mem->core_code_alloc_size);
+ 
+ 	if (err)
+-		goto err_free_fw_core_data_obj;
++		goto err_free_kdata;
+ 
+ 	memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
+ 	memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
+@@ -742,10 +742,14 @@ pvr_fw_process(struct pvr_device *pvr_dev)
+ 		memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
+ 
+ 	/* We're finished with the firmware section memory on the CPU, unmap. */
+-	if (fw_core_data_ptr)
++	if (fw_core_data_ptr) {
+ 		pvr_fw_object_vunmap(fw_mem->core_data_obj);
+-	if (fw_core_code_ptr)
++		fw_core_data_ptr = NULL;
++	}
++	if (fw_core_code_ptr) {
+ 		pvr_fw_object_vunmap(fw_mem->core_code_obj);
++		fw_core_code_ptr = NULL;
++	}
+ 	pvr_fw_object_vunmap(fw_mem->data_obj);
+ 	fw_data_ptr = NULL;
+ 	pvr_fw_object_vunmap(fw_mem->code_obj);
+@@ -753,7 +757,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
+ 
+ 	err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
+ 	if (err)
+-		goto err_free_fw_core_data_obj;
++		goto err_free_kdata;
+ 
+ 	return 0;
+ 
+@@ -763,13 +767,16 @@ pvr_fw_process(struct pvr_device *pvr_dev)
+ 	kfree(fw_mem->data);
+ 	kfree(fw_mem->code);
+ 
+-err_free_fw_core_data_obj:
+ 	if (fw_core_data_ptr)
+-		pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj);
++		pvr_fw_object_vunmap(fw_mem->core_data_obj);
++	if (fw_mem->core_data_obj)
++		pvr_fw_object_destroy(fw_mem->core_data_obj);
+ 
+ err_free_fw_core_code_obj:
+ 	if (fw_core_code_ptr)
+-		pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj);
++		pvr_fw_object_vunmap(fw_mem->core_code_obj);
++	if (fw_mem->core_code_obj)
++		pvr_fw_object_destroy(fw_mem->core_code_obj);
+ 
+ err_free_fw_data_obj:
+ 	if (fw_data_ptr)
+@@ -836,6 +843,12 @@ pvr_fw_cleanup(struct pvr_device *pvr_dev)
+ 	struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+ 
+ 	pvr_fw_fini_fwif_connection_ctl(pvr_dev);
++
++	kfree(fw_mem->core_data);
++	kfree(fw_mem->core_code);
++	kfree(fw_mem->data);
++	kfree(fw_mem->code);
++
+ 	if (fw_mem->core_code_obj)
+ 		pvr_fw_object_destroy(fw_mem->core_code_obj);
+ 	if (fw_mem->core_data_obj)
+diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
+index 78c2f3c6dce019..6a15c1d2d871d3 100644
+--- a/drivers/gpu/drm/imagination/pvr_job.c
++++ b/drivers/gpu/drm/imagination/pvr_job.c
+@@ -684,6 +684,13 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
+ 		geom_job->paired_job = frag_job;
+ 		frag_job->paired_job = geom_job;
+ 
++		/* The geometry job pvr_job structure is used when the fragment
++		 * job is being prepared by the GPU scheduler. Have the fragment
++		 * job hold a reference on the geometry job to prevent it being
++		 * freed until the fragment job has finished with it.
++		 */
++		pvr_job_get(geom_job);
++
+ 		/* Skip the fragment job we just paired to the geometry job. */
+ 		i++;
+ 	}
+diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
+index 87780cc7c0c322..130473cfdfc9b7 100644
+--- a/drivers/gpu/drm/imagination/pvr_queue.c
++++ b/drivers/gpu/drm/imagination/pvr_queue.c
+@@ -866,6 +866,10 @@ static void pvr_queue_free_job(struct drm_sched_job *sched_job)
+ 	struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
+ 
+ 	drm_sched_job_cleanup(sched_job);
++
++	if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job)
++		pvr_job_put(job->paired_job);
++
+ 	job->paired_job = NULL;
+ 	pvr_job_put(job);
+ }
+diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
+index fb71658c3117b2..6067d08aeee34b 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -223,7 +223,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
+ 	vsyncstr = mode->crtc_vsync_start - 1;
+ 	vsyncend = mode->crtc_vsync_end - 1;
+ 	vtotal = mode->crtc_vtotal - 2;
+-	vblkstr = mode->crtc_vblank_start;
++	vblkstr = mode->crtc_vblank_start - 1;
+ 	vblkend = vtotal + 1;
+ 
+ 	linecomp = vdispend;
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index e386b059187acf..67fa528f546d33 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -1126,49 +1126,50 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
+ 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ 	u32 val;
++	int ret;
+ 
+ 	/*
+-	 * The GMU may still be in slumber unless the GPU started so check and
+-	 * skip putting it back into slumber if so
++	 * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when
++	 * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when
++	 * required
+ 	 */
+-	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
++	if (adreno_gpu->base.needs_hw_init) {
++		if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET))
++			goto force_off;
+ 
+-	if (val != 0xf) {
+-		int ret = a6xx_gmu_wait_for_idle(gmu);
++		a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
++	}
+ 
+-		/* If the GMU isn't responding assume it is hung */
+-		if (ret) {
+-			a6xx_gmu_force_off(gmu);
+-			return;
+-		}
++	ret = a6xx_gmu_wait_for_idle(gmu);
+ 
+-		a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
++	/* If the GMU isn't responding assume it is hung */
++	if (ret)
++		goto force_off;
+ 
+-		/* tell the GMU we want to slumber */
+-		ret = a6xx_gmu_notify_slumber(gmu);
+-		if (ret) {
+-			a6xx_gmu_force_off(gmu);
+-			return;
+-		}
++	a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+ 
+-		ret = gmu_poll_timeout(gmu,
+-			REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+-			!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+-			100, 10000);
++	/* tell the GMU we want to slumber */
++	ret = a6xx_gmu_notify_slumber(gmu);
++	if (ret)
++		goto force_off;
+ 
+-		/*
+-		 * Let the user know we failed to slumber but don't worry too
+-		 * much because we are powering down anyway
+-		 */
++	ret = gmu_poll_timeout(gmu,
++		REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
++		!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
++		100, 10000);
+ 
+-		if (ret)
+-			DRM_DEV_ERROR(gmu->dev,
+-				"Unable to slumber GMU: status = 0%x/0%x\n",
+-				gmu_read(gmu,
+-					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+-				gmu_read(gmu,
+-					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
+-	}
++	/*
++	 * Let the user know we failed to slumber but don't worry too
++	 * much because we are powering down anyway
++	 */
++
++	if (ret)
++		DRM_DEV_ERROR(gmu->dev,
++			"Unable to slumber GMU: status = 0%x/0%x\n",
++			gmu_read(gmu,
++				REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
++			gmu_read(gmu,
++				REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
+ 
+ 	/* Turn off HFI */
+ 	a6xx_hfi_stop(gmu);
+@@ -1178,6 +1179,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
+ 
+ 	/* Tell RPMh to power off the GPU */
+ 	a6xx_rpmh_stop(gmu);
++
++	return;
++
++force_off:
++	a6xx_gmu_force_off(gmu);
+ }
+ 
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 702b8d4b349723..d903ad9c0b5fb8 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -233,10 +233,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 				break;
+ 			fallthrough;
+ 		case MSM_SUBMIT_CMD_BUF:
+-			OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
++			OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
+ 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+-			OUT_RING(ring, submit->cmd[i].size);
++			OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size));
+ 			ibs++;
+ 			break;
+ 		}
+@@ -319,10 +319,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 				break;
+ 			fallthrough;
+ 		case MSM_SUBMIT_CMD_BUF:
+-			OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
++			OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
+ 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+-			OUT_RING(ring, submit->cmd[i].size);
++			OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size));
+ 			ibs++;
+ 			break;
+ 		}
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 7459fb8c517746..d22e01751f5eeb 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -1827,8 +1827,15 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+ 			__func__, ret);
+ 		goto err;
+ 	}
+-	if (!ret)
++	if (!ret) {
+ 		msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
++		if (!msm_dsi->te_source) {
++			DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n",
++				__func__);
++			ret = -ENOMEM;
++			goto err;
++		}
++	}
+ 	ret = 0;
+ 
+ 	if (of_property_read_bool(np, "syscon-sfpb")) {
+diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+index cab01af55d2226..c6cdc5c003dc07 100644
+--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
++++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+@@ -2264,5 +2264,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
+ 	</reg32>
+ </domain>
+ 
++<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-">
++	<reg64 offset="0" name="IB_BASE" type="address"/>
++	<reg32 offset="2" name="2">
++		<bitfield name="IB_SIZE" low="0" high="19"/>
++	</reg32>
++</domain>
++
+ </database>
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index db961eade2257f..2016c1e7242fe3 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
+ 	nouveau_bo_del_io_reserve_lru(bo);
+ 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+ 
++	if (bo->base.import_attach)
++		drm_prime_gem_destroy(&bo->base, bo->sg);
++
+ 	/*
+ 	 * If nouveau_bo_new() allocated this buffer, the GEM object was never
+ 	 * initialized, so don't attempt to release it.
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 9ae2cee1c7c580..67e3c99de73ae6 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
+ 		return;
+ 	}
+ 
+-	if (gem->import_attach)
+-		drm_prime_gem_destroy(gem, nvbo->bo.sg);
+-
+ 	ttm_bo_put(&nvbo->bo);
+ 
+ 	pm_runtime_mark_last_busy(dev);
+diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
+index f203ac5514ae0b..f778a4eee7c9cf 100644
+--- a/drivers/gpu/drm/sti/Makefile
++++ b/drivers/gpu/drm/sti/Makefile
+@@ -7,8 +7,6 @@ sti-drm-y := \
+ 	sti_compositor.o \
+ 	sti_crtc.o \
+ 	sti_plane.o \
+-	sti_crtc.o \
+-	sti_plane.o \
+ 	sti_hdmi.o \
+ 	sti_hdmi_tx3g4c28phy.o \
+ 	sti_dvo.o \
+diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
+index 1f78aa3d26bbd4..768dfea15aec09 100644
+--- a/drivers/gpu/drm/tiny/repaper.c
++++ b/drivers/gpu/drm/tiny/repaper.c
+@@ -455,7 +455,7 @@ static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value,
+ 				       enum repaper_stage stage)
+ {
+ 	u64 start = local_clock();
+-	u64 end = start + (epd->factored_stage_time * 1000 * 1000);
++	u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000);
+ 
+ 	do {
+ 		repaper_frame_fixed(epd, fixed_value, stage);
+@@ -466,7 +466,7 @@ static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image,
+ 				      const u8 *mask, enum repaper_stage stage)
+ {
+ 	u64 start = local_clock();
+-	u64 end = start + (epd->factored_stage_time * 1000 * 1000);
++	u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000);
+ 
+ 	do {
+ 		repaper_frame_data(epd, image, mask, stage);
+diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
+index 3066cfdb054cc0..4a6aa36619fe39 100644
+--- a/drivers/gpu/drm/v3d/v3d_sched.c
++++ b/drivers/gpu/drm/v3d/v3d_sched.c
+@@ -410,7 +410,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
+ 	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ 	struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
+ 	struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
+-	u32 *wg_counts;
++	struct v3d_dev *v3d = job->base.v3d;
++	u32 num_batches, *wg_counts;
+ 
+ 	v3d_get_bo_vaddr(bo);
+ 	v3d_get_bo_vaddr(indirect);
+@@ -423,8 +424,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
+ 	args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ 	args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ 	args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+-	args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+-		       (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
++
++	num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
++		      (wg_counts[0] * wg_counts[1] * wg_counts[2]);
++
++	/* V3D 7.1.6 and later don't subtract 1 from the number of batches */
++	if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
++		args->cfg[4] = num_batches - 1;
++	else
++		args->cfg[4] = num_batches;
++
++	WARN_ON(args->cfg[4] == ~0);
+ 
+ 	for (int i = 0; i < 3; i++) {
+ 		/* 0xffffffff indicates that the uniform rewrite is not needed */
+diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
+index f3bf7d3157b479..78204578443f46 100644
+--- a/drivers/gpu/drm/xe/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/xe_dma_buf.c
+@@ -145,10 +145,7 @@ static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
+ 			     struct sg_table *sgt,
+ 			     enum dma_data_direction dir)
+ {
+-	struct dma_buf *dma_buf = attach->dmabuf;
+-	struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
+-
+-	if (!xe_bo_is_vram(bo)) {
++	if (sg_page(sgt->sgl)) {
+ 		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
+ 		sg_free_table(sgt);
+ 		kfree(sgt);
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+index ace1fe831a7b72..98a450271f5cee 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+@@ -310,6 +310,13 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
+ 	return 0;
+ }
+ 
++/*
++ * Ensure that roundup_pow_of_two(length) doesn't overflow.
++ * Note that roundup_pow_of_two() operates on unsigned long,
++ * not on u64.
++ */
++#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
++
+ /**
+  * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
+  * address range
+@@ -334,6 +341,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
+ 	struct xe_device *xe = gt_to_xe(gt);
+ #define MAX_TLB_INVALIDATION_LEN	7
+ 	u32 action[MAX_TLB_INVALIDATION_LEN];
++	u64 length = end - start;
+ 	int len = 0;
+ 
+ 	xe_gt_assert(gt, fence);
+@@ -346,11 +354,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
+ 
+ 	action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
+ 	action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
+-	if (!xe->info.has_range_tlb_invalidation) {
++	if (!xe->info.has_range_tlb_invalidation ||
++	    length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
+ 		action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
+ 	} else {
+ 		u64 orig_start = start;
+-		u64 length = end - start;
+ 		u64 align;
+ 
+ 		if (length < SZ_4K)
+diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
+index d1902a8581ca11..e144fd41c0a762 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ads.c
++++ b/drivers/gpu/drm/xe/xe_guc_ads.c
+@@ -483,24 +483,52 @@ static void fill_engine_enable_masks(struct xe_gt *gt,
+ 		       engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER));
+ }
+ 
+-static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads)
++/*
++ * Write the offsets corresponding to the golden LRCs. The actual data is
++ * populated later by guc_golden_lrc_populate()
++ */
++static void guc_golden_lrc_init(struct xe_guc_ads *ads)
+ {
+ 	struct xe_device *xe = ads_to_xe(ads);
++	struct xe_gt *gt = ads_to_gt(ads);
+ 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ 			offsetof(struct __guc_ads_blob, system_info));
+-	u8 guc_class;
++	size_t alloc_size, real_size;
++	u32 addr_ggtt, offset;
++	int class;
++
++	offset = guc_ads_golden_lrc_offset(ads);
++	addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
++
++	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
++		u8 guc_class;
++
++		guc_class = xe_engine_class_to_guc_class(class);
+ 
+-	for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) {
+ 		if (!info_map_read(xe, &info_map,
+ 				   engine_enabled_masks[guc_class]))
+ 			continue;
+ 
++		real_size = xe_gt_lrc_size(gt, class);
++		alloc_size = PAGE_ALIGN(real_size);
++
++		/*
++		 * This interface is slightly confusing. We need to pass the
++		 * base address of the full golden context and the size of just
++		 * the engine state, which is the section of the context image
++		 * that starts after the execlists LRC registers. This is
++		 * required to allow the GuC to restore just the engine state
++		 * when a watchdog reset occurs.
++		 * We calculate the engine state size by removing the size of
++		 * what comes before it in the context image (which is identical
++		 * on all engines).
++		 */
+ 		ads_blob_write(ads, ads.eng_state_size[guc_class],
+-			       guc_ads_golden_lrc_size(ads) -
+-			       xe_lrc_skip_size(xe));
++			       real_size - xe_lrc_skip_size(xe));
+ 		ads_blob_write(ads, ads.golden_context_lrca[guc_class],
+-			       xe_bo_ggtt_addr(ads->bo) +
+-			       guc_ads_golden_lrc_offset(ads));
++			       addr_ggtt);
++
++		addr_ggtt += alloc_size;
+ 	}
+ }
+ 
+@@ -710,7 +738,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
+ 
+ 	xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ 	guc_policies_init(ads);
+-	guc_prep_golden_lrc_null(ads);
++	guc_golden_lrc_init(ads);
+ 	guc_mapping_table_init_invalid(gt, &info_map);
+ 	guc_doorbell_init(ads);
+ 
+@@ -736,7 +764,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
+ 	guc_policies_init(ads);
+ 	fill_engine_enable_masks(gt, &info_map);
+ 	guc_mmio_reg_state_init(ads);
+-	guc_prep_golden_lrc_null(ads);
++	guc_golden_lrc_init(ads);
+ 	guc_mapping_table_init(gt, &info_map);
+ 	guc_capture_list_init(ads);
+ 	guc_doorbell_init(ads);
+@@ -756,18 +784,22 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
+ 		       guc_ads_private_data_offset(ads));
+ }
+ 
+-static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
++/*
++ * After the golden LRC's are recorded for each engine class by the first
++ * submission, copy them to the ADS, as initialized earlier by
++ * guc_golden_lrc_init().
++ */
++static void guc_golden_lrc_populate(struct xe_guc_ads *ads)
+ {
+ 	struct xe_device *xe = ads_to_xe(ads);
+ 	struct xe_gt *gt = ads_to_gt(ads);
+ 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ 			offsetof(struct __guc_ads_blob, system_info));
+ 	size_t total_size = 0, alloc_size, real_size;
+-	u32 addr_ggtt, offset;
++	u32 offset;
+ 	int class;
+ 
+ 	offset = guc_ads_golden_lrc_offset(ads);
+-	addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
+ 
+ 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ 		u8 guc_class;
+@@ -784,26 +816,9 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
+ 		alloc_size = PAGE_ALIGN(real_size);
+ 		total_size += alloc_size;
+ 
+-		/*
+-		 * This interface is slightly confusing. We need to pass the
+-		 * base address of the full golden context and the size of just
+-		 * the engine state, which is the section of the context image
+-		 * that starts after the execlists LRC registers. This is
+-		 * required to allow the GuC to restore just the engine state
+-		 * when a watchdog reset occurs.
+-		 * We calculate the engine state size by removing the size of
+-		 * what comes before it in the context image (which is identical
+-		 * on all engines).
+-		 */
+-		ads_blob_write(ads, ads.eng_state_size[guc_class],
+-			       real_size - xe_lrc_skip_size(xe));
+-		ads_blob_write(ads, ads.golden_context_lrca[guc_class],
+-			       addr_ggtt);
+-
+ 		xe_map_memcpy_to(xe, ads_to_map(ads), offset,
+ 				 gt->default_lrc[class], real_size);
+ 
+-		addr_ggtt += alloc_size;
+ 		offset += alloc_size;
+ 	}
+ 
+@@ -812,7 +827,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
+ 
+ void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
+ {
+-	guc_populate_golden_lrc(ads);
++	guc_golden_lrc_populate(ads);
+ }
+ 
+ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset)
+diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
+index f6bc4f29d7538e..3d0278c3db9355 100644
+--- a/drivers/gpu/drm/xe/xe_hmm.c
++++ b/drivers/gpu/drm/xe/xe_hmm.c
+@@ -19,29 +19,6 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
+ 	return (end - start) >> PAGE_SHIFT;
+ }
+ 
+-/**
+- * xe_mark_range_accessed() - mark a range is accessed, so core mm
+- * have such information for memory eviction or write back to
+- * hard disk
+- * @range: the range to mark
+- * @write: if write to this range, we mark pages in this range
+- * as dirty
+- */
+-static void xe_mark_range_accessed(struct hmm_range *range, bool write)
+-{
+-	struct page *page;
+-	u64 i, npages;
+-
+-	npages = xe_npages_in_range(range->start, range->end);
+-	for (i = 0; i < npages; i++) {
+-		page = hmm_pfn_to_page(range->hmm_pfns[i]);
+-		if (write)
+-			set_page_dirty_lock(page);
+-
+-		mark_page_accessed(page);
+-	}
+-}
+-
+ static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
+ 		       struct hmm_range *range, struct rw_semaphore *notifier_sem)
+ {
+@@ -331,7 +308,6 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
+ 	if (ret)
+ 		goto out_unlock;
+ 
+-	xe_mark_range_accessed(&hmm_range, write);
+ 	userptr->sg = &userptr->sgt;
+ 	xe_hmm_userptr_set_mapped(uvma);
+ 	userptr->notifier_seq = hmm_range.notifier_seq;
+diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
+index 1b97d90aaddaf4..6431697c616939 100644
+--- a/drivers/gpu/drm/xe/xe_migrate.c
++++ b/drivers/gpu/drm/xe/xe_migrate.c
+@@ -1177,7 +1177,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
+ err_sync:
+ 		/* Sync partial copies if any. FIXME: job_mutex? */
+ 		if (fence) {
+-			dma_fence_wait(m->fence, false);
++			dma_fence_wait(fence, false);
+ 			dma_fence_put(fence);
+ 		}
+ 
+diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+index ab2688bd4d338a..e19cb62d6796d9 100644
+--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
++++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+@@ -247,6 +247,9 @@ static int ec_i2c_probe(struct platform_device *pdev)
+ 	u32 remote_bus;
+ 	int err;
+ 
++	if (!ec)
++		return dev_err_probe(dev, -EPROBE_DEFER, "couldn't find parent EC device\n");
++
+ 	if (!ec->cmd_xfer) {
+ 		dev_err(dev, "Missing sendrecv\n");
+ 		return -EINVAL;
+diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
+index 0d54d0b5e32731..5342e934aa5e40 100644
+--- a/drivers/i2c/i2c-atr.c
++++ b/drivers/i2c/i2c-atr.c
+@@ -8,12 +8,12 @@
+  * Originally based on i2c-mux.c
+  */
+ 
+-#include <linux/fwnode.h>
+ #include <linux/i2c-atr.h>
+ #include <linux/i2c.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/property.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ 
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 91db10515d7472..176d0b3e448870 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -72,6 +72,8 @@ static const char * const cma_events[] = {
+ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ 			      enum ib_gid_type gid_type);
+ 
++static void cma_netevent_work_handler(struct work_struct *_work);
++
+ const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
+ {
+ 	size_t index = event;
+@@ -1033,6 +1035,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
+ 	get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
+ 	id_priv->id.route.addr.dev_addr.net = get_net(net);
+ 	id_priv->seq_num &= 0x00ffffff;
++	INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler);
+ 
+ 	rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
+ 	if (parent)
+@@ -5227,7 +5230,6 @@ static int cma_netevent_callback(struct notifier_block *self,
+ 		if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
+ 			   neigh->ha, ETH_ALEN))
+ 			continue;
+-		INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
+ 		cma_id_get(current_id);
+ 		queue_work(cma_wq, &current_id->id.net_work);
+ 	}
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index e9fa22d31c2332..c48ef608302055 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -76,12 +76,14 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ 
+ 		npfns = (end - start) >> PAGE_SHIFT;
+ 		umem_odp->pfn_list = kvcalloc(
+-			npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
++			npfns, sizeof(*umem_odp->pfn_list),
++			GFP_KERNEL | __GFP_NOWARN);
+ 		if (!umem_odp->pfn_list)
+ 			return -ENOMEM;
+ 
+ 		umem_odp->dma_list = kvcalloc(
+-			ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
++			ndmas, sizeof(*umem_odp->dma_list),
++			GFP_KERNEL | __GFP_NOWARN);
+ 		if (!umem_odp->dma_list) {
+ 			ret = -ENOMEM;
+ 			goto out_pfn_list;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index cf89a8db4f64cd..8d0b63d4b50a6c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -763,7 +763,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
+ 		if (ret)
+ 			return ret;
+ 	}
+-	dma_set_max_seg_size(dev, UINT_MAX);
++	dma_set_max_seg_size(dev, SZ_2G);
+ 	ret = ib_register_device(ib_dev, "hns_%d", dev);
+ 	if (ret) {
+ 		dev_err(dev, "ib_register_device failed!\n");
+diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
+index 13b654ddd3cc8d..bcf7d8607d56ef 100644
+--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
++++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
+@@ -380,7 +380,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
+ 	if (!us_ibdev) {
+ 		usnic_err("Device %s context alloc failed\n",
+ 				netdev_name(pci_get_drvdata(dev)));
+-		return ERR_PTR(-EFAULT);
++		return NULL;
+ 	}
+ 
+ 	us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
+@@ -500,8 +500,8 @@ static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
+ 	}
+ 
+ 	us_ibdev = usnic_ib_device_add(parent_pci);
+-	if (IS_ERR_OR_NULL(us_ibdev)) {
+-		us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
++	if (!us_ibdev) {
++		us_ibdev = ERR_PTR(-EFAULT);
+ 		goto out;
+ 	}
+ 
+@@ -569,10 +569,10 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
+ 	}
+ 
+ 	pf = usnic_ib_discover_pf(vf->vnic);
+-	if (IS_ERR_OR_NULL(pf)) {
+-		usnic_err("Failed to discover pf of vnic %s with err%ld\n",
+-				pci_name(pdev), PTR_ERR(pf));
+-		err = pf ? PTR_ERR(pf) : -EFAULT;
++	if (IS_ERR(pf)) {
++		err = PTR_ERR(pf);
++		usnic_err("Failed to discover pf of vnic %s with err%d\n",
++				pci_name(pdev), err);
+ 		goto out_clean_vnic;
+ 	}
+ 
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 2e3087556adb37..fbb4f57010da69 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -2355,9 +2355,8 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
+ 
+ 	if (!bitmap)
+ 		return -ENOENT;
+-	if (bitmap->mddev->bitmap_info.external)
+-		return -ENOENT;
+-	if (!bitmap->storage.sb_page) /* no superblock */
++	if (!bitmap->mddev->bitmap_info.external &&
++	    !bitmap->storage.sb_page)
+ 		return -EINVAL;
+ 	sb = kmap_local_page(bitmap->storage.sb_page);
+ 	stats->sync_size = le64_to_cpu(sb->sync_size);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index fff28aea23c89e..7809b951e09aa0 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -629,6 +629,12 @@ static void __mddev_put(struct mddev *mddev)
+ 	queue_work(md_misc_wq, &mddev->del_work);
+ }
+ 
++static void mddev_put_locked(struct mddev *mddev)
++{
++	if (atomic_dec_and_test(&mddev->active))
++		__mddev_put(mddev);
++}
++
+ void mddev_put(struct mddev *mddev)
+ {
+ 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
+@@ -8461,9 +8467,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ 	if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
+ 		status_unused(seq);
+ 
+-	if (atomic_dec_and_test(&mddev->active))
+-		__mddev_put(mddev);
+-
++	mddev_put_locked(mddev);
+ 	return 0;
+ }
+ 
+@@ -9886,11 +9890,11 @@ EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
+ static int md_notify_reboot(struct notifier_block *this,
+ 			    unsigned long code, void *x)
+ {
+-	struct mddev *mddev, *n;
++	struct mddev *mddev;
+ 	int need_delay = 0;
+ 
+ 	spin_lock(&all_mddevs_lock);
+-	list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
++	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ 		if (!mddev_get(mddev))
+ 			continue;
+ 		spin_unlock(&all_mddevs_lock);
+@@ -9902,8 +9906,8 @@ static int md_notify_reboot(struct notifier_block *this,
+ 			mddev_unlock(mddev);
+ 		}
+ 		need_delay = 1;
+-		mddev_put(mddev);
+ 		spin_lock(&all_mddevs_lock);
++		mddev_put_locked(mddev);
+ 	}
+ 	spin_unlock(&all_mddevs_lock);
+ 
+@@ -10236,7 +10240,7 @@ void md_autostart_arrays(int part)
+ 
+ static __exit void md_exit(void)
+ {
+-	struct mddev *mddev, *n;
++	struct mddev *mddev;
+ 	int delay = 1;
+ 
+ 	unregister_blkdev(MD_MAJOR,"md");
+@@ -10257,7 +10261,7 @@ static __exit void md_exit(void)
+ 	remove_proc_entry("mdstat", NULL);
+ 
+ 	spin_lock(&all_mddevs_lock);
+-	list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
++	list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ 		if (!mddev_get(mddev))
+ 			continue;
+ 		spin_unlock(&all_mddevs_lock);
+@@ -10269,8 +10273,8 @@ static __exit void md_exit(void)
+ 		 * the mddev for destruction by a workqueue, and the
+ 		 * destroy_workqueue() below will wait for that to complete.
+ 		 */
+-		mddev_put(mddev);
+ 		spin_lock(&all_mddevs_lock);
++		mddev_put_locked(mddev);
+ 	}
+ 	spin_unlock(&all_mddevs_lock);
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index a214fed4f16226..cc194f6ec18dab 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1687,6 +1687,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	 * The discard bio returns only first r10bio finishes
+ 	 */
+ 	if (first_copy) {
++		md_account_bio(mddev, &bio);
+ 		r10_bio->master_bio = bio;
+ 		set_bit(R10BIO_Discard, &r10_bio->state);
+ 		first_copy = false;
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index 8dea2b44fd8bfe..e22afb420d099e 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -251,6 +251,9 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
+ 		break;
+ 	}
+ 
++	test->num_irqs = i;
++	pci_endpoint_test_release_irq(test);
++
+ 	return false;
+ }
+ 
+@@ -738,6 +741,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
+ 	if (!pci_endpoint_test_request_irq(test))
+ 		goto err;
+ 
++	irq_type = test->irq_type;
+ 	return true;
+ 
+ err:
+diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
+index d9a937ba126c3c..ac514766d431ce 100644
+--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
++++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
+@@ -907,15 +907,16 @@ static int rkcanfd_probe(struct platform_device *pdev)
+ 	priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const;
+ 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ 		CAN_CTRLMODE_BERR_REPORTING;
+-	if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
+-		priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ 	priv->can.do_set_mode = rkcanfd_set_mode;
+ 	priv->can.do_get_berr_counter = rkcanfd_get_berr_counter;
+ 	priv->ndev = ndev;
+ 
+ 	match = device_get_match_data(&pdev->dev);
+-	if (match)
++	if (match) {
+ 		priv->devtype_data = *(struct rkcanfd_devtype_data *)match;
++		if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
++			priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
++	}
+ 
+ 	err = can_rx_offload_add_manual(ndev, &priv->offload,
+ 					RKCANFD_NAPI_WEIGHT);
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index c39cb119e760db..d4600ab0b70b3b 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -737,6 +737,15 @@ static void b53_enable_mib(struct b53_device *dev)
+ 	b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+ }
+ 
++static void b53_enable_stp(struct b53_device *dev)
++{
++	u8 gc;
++
++	b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
++	gc |= GC_RX_BPDU_EN;
++	b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
++}
++
+ static u16 b53_default_pvid(struct b53_device *dev)
+ {
+ 	if (is5325(dev) || is5365(dev))
+@@ -876,6 +885,7 @@ static int b53_switch_reset(struct b53_device *dev)
+ 	}
+ 
+ 	b53_enable_mib(dev);
++	b53_enable_stp(dev);
+ 
+ 	return b53_flush_arl(dev, FAST_AGE_STATIC);
+ }
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index e20d9d62032e31..df1df601541217 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -1878,6 +1878,8 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
+ 	if (!chip->info->ops->vtu_getnext)
+ 		return -EOPNOTSUPP;
+ 
++	memset(entry, 0, sizeof(*entry));
++
+ 	entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip);
+ 	entry->valid = false;
+ 
+@@ -2013,7 +2015,16 @@ static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
+ 	struct mv88e6xxx_mst *mst, *tmp;
+ 	int err;
+ 
+-	if (!sid)
++	/* If the SID is zero, it is for a VLAN mapped to the default MSTI,
++	 * and mv88e6xxx_stu_setup() made sure it is always present, and thus,
++	 * should not be removed here.
++	 *
++	 * If the chip lacks STU support, numerically the "sid" variable will
++	 * happen to also be zero, but we don't want to rely on that fact, so
++	 * we explicitly test that first. In that case, there is also nothing
++	 * to do here.
++	 */
++	if (!mv88e6xxx_has_stu(chip) || !sid)
+ 		return 0;
+ 
+ 	list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
+diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
+index a08dab75e0c0c1..f57fde02077d22 100644
+--- a/drivers/net/dsa/mv88e6xxx/devlink.c
++++ b/drivers/net/dsa/mv88e6xxx/devlink.c
+@@ -743,7 +743,8 @@ void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
+-		dsa_devlink_region_destroy(chip->regions[i]);
++		if (chip->regions[i])
++			dsa_devlink_region_destroy(chip->regions[i]);
+ }
+ 
+ void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
+diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
+index ac37a4e738ae7d..04c5e3abd8d706 100644
+--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
++++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
+@@ -154,8 +154,9 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+ 		debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
+ 		debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector);
+ 
+-		intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset),
+-					   GFP_KERNEL);
++		intr_ctrl_regset = devm_kzalloc(pdsc->dev,
++						sizeof(*intr_ctrl_regset),
++						GFP_KERNEL);
+ 		if (!intr_ctrl_regset)
+ 			return;
+ 		intr_ctrl_regset->regs = intr_ctrl_regs;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index e7580df13229a6..016dcfec8d4965 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -758,7 +758,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	dev_kfree_skb_any(skb);
+ tx_kick_pending:
+ 	if (BNXT_TX_PTP_IS_SET(lflags)) {
+-		txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
++		txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
+ 		atomic64_inc(&bp->ptp_cfg->stats.ts_err);
+ 		if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ 			/* set SKB to err so PTP worker will clean up */
+@@ -766,7 +766,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	}
+ 	if (txr->kick_pending)
+ 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+-	txr->tx_buf_ring[txr->tx_prod].skb = NULL;
++	txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
+ 	dev_core_stats_tx_dropped_inc(dev);
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+index 7f3f5afa864f4a..1546c3db08f093 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -2270,6 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
+ 		eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
+ 		if (!eth_filter->port[i].bmap) {
+ 			ret = -ENOMEM;
++			kvfree(eth_filter->port[i].loc_array);
+ 			goto free_eth_finfo;
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index eac0f966e0e4c5..323db1e2be3886 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -319,6 +319,7 @@ struct igc_adapter {
+ 	struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
+ 	ktime_t ptp_reset_start; /* Reset time in clock mono */
+ 	struct system_time_snapshot snapshot;
++	struct mutex ptm_lock; /* Only allow one PTM transaction at a time */
+ 
+ 	char fw_version[32];
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index 8e449904aa7dbd..d19325b0e6e0ba 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -574,7 +574,10 @@
+ #define IGC_PTM_CTRL_SHRT_CYC(usec)	(((usec) & 0x3f) << 2)
+ #define IGC_PTM_CTRL_PTM_TO(usec)	(((usec) & 0xff) << 8)
+ 
+-#define IGC_PTM_SHORT_CYC_DEFAULT	1   /* Default short cycle interval */
++/* A short cycle time of 1us theoretically should work, but appears to be too
++ * short in practice.
++ */
++#define IGC_PTM_SHORT_CYC_DEFAULT	4   /* Default short cycle interval */
+ #define IGC_PTM_CYC_TIME_DEFAULT	5   /* Default PTM cycle time */
+ #define IGC_PTM_TIMEOUT_DEFAULT		255 /* Default timeout for PTM errors */
+ 
+@@ -593,6 +596,7 @@
+ #define IGC_PTM_STAT_T4M1_OVFL		BIT(3) /* T4 minus T1 overflow */
+ #define IGC_PTM_STAT_ADJUST_1ST		BIT(4) /* 1588 timer adjusted during 1st PTM cycle */
+ #define IGC_PTM_STAT_ADJUST_CYC		BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */
++#define IGC_PTM_STAT_ALL		GENMASK(5, 0) /* Used to clear all status */
+ 
+ /* PCIe PTM Cycle Control */
+ #define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec)	((msec) & 0x3ff) /* PTM Cycle Time (msec) */
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 1ec9e8cc99d947..082b0baf5d37c5 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -7173,6 +7173,7 @@ static int igc_probe(struct pci_dev *pdev,
+ 
+ err_register:
+ 	igc_release_hw_control(adapter);
++	igc_ptp_stop(adapter);
+ err_eeprom:
+ 	if (!igc_check_reset_block(hw))
+ 		igc_reset_phy(hw);
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index 946edbad43022c..612ed26a29c5d4 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -974,45 +974,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat)
+ 	}
+ }
+ 
++/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */
++static void igc_ptm_trigger(struct igc_hw *hw)
++{
++	u32 ctrl;
++
++	/* To "manually" start the PTM cycle we need to set the
++	 * trigger (TRIG) bit
++	 */
++	ctrl = rd32(IGC_PTM_CTRL);
++	ctrl |= IGC_PTM_CTRL_TRIG;
++	wr32(IGC_PTM_CTRL, ctrl);
++	/* Perform flush after write to CTRL register otherwise
++	 * transaction may not start
++	 */
++	wrfl();
++}
++
++/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */
++static void igc_ptm_reset(struct igc_hw *hw)
++{
++	u32 ctrl;
++
++	ctrl = rd32(IGC_PTM_CTRL);
++	ctrl &= ~IGC_PTM_CTRL_TRIG;
++	wr32(IGC_PTM_CTRL, ctrl);
++	/* Write to clear all status */
++	wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL);
++}
++
+ static int igc_phc_get_syncdevicetime(ktime_t *device,
+ 				      struct system_counterval_t *system,
+ 				      void *ctx)
+ {
+-	u32 stat, t2_curr_h, t2_curr_l, ctrl;
+ 	struct igc_adapter *adapter = ctx;
+ 	struct igc_hw *hw = &adapter->hw;
++	u32 stat, t2_curr_h, t2_curr_l;
+ 	int err, count = 100;
+ 	ktime_t t1, t2_curr;
+ 
+-	/* Get a snapshot of system clocks to use as historic value. */
+-	ktime_get_snapshot(&adapter->snapshot);
+-
++	/* Doing this in a loop because in the event of a
++	 * badly timed (ha!) system clock adjustment, we may
++	 * get PTM errors from the PCI root, but these errors
++	 * are transitory. Repeating the process returns valid
++	 * data eventually.
++	 */
+ 	do {
+-		/* Doing this in a loop because in the event of a
+-		 * badly timed (ha!) system clock adjustment, we may
+-		 * get PTM errors from the PCI root, but these errors
+-		 * are transitory. Repeating the process returns valid
+-		 * data eventually.
+-		 */
++		/* Get a snapshot of system clocks to use as historic value. */
++		ktime_get_snapshot(&adapter->snapshot);
+ 
+-		/* To "manually" start the PTM cycle we need to clear and
+-		 * then set again the TRIG bit.
+-		 */
+-		ctrl = rd32(IGC_PTM_CTRL);
+-		ctrl &= ~IGC_PTM_CTRL_TRIG;
+-		wr32(IGC_PTM_CTRL, ctrl);
+-		ctrl |= IGC_PTM_CTRL_TRIG;
+-		wr32(IGC_PTM_CTRL, ctrl);
+-
+-		/* The cycle only starts "for real" when software notifies
+-		 * that it has read the registers, this is done by setting
+-		 * VALID bit.
+-		 */
+-		wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
++		igc_ptm_trigger(hw);
+ 
+ 		err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat,
+ 					 stat, IGC_PTM_STAT_SLEEP,
+ 					 IGC_PTM_STAT_TIMEOUT);
++		igc_ptm_reset(hw);
++
+ 		if (err < 0) {
+ 			netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
+ 			return err;
+@@ -1021,15 +1038,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device,
+ 		if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID)
+ 			break;
+ 
+-		if (stat & ~IGC_PTM_STAT_VALID) {
+-			/* An error occurred, log it. */
+-			igc_ptm_log_error(adapter, stat);
+-			/* The STAT register is write-1-to-clear (W1C),
+-			 * so write the previous error status to clear it.
+-			 */
+-			wr32(IGC_PTM_STAT, stat);
+-			continue;
+-		}
++		igc_ptm_log_error(adapter, stat);
+ 	} while (--count);
+ 
+ 	if (!count) {
+@@ -1061,9 +1070,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ {
+ 	struct igc_adapter *adapter = container_of(ptp, struct igc_adapter,
+ 						   ptp_caps);
++	int ret;
++
++	/* This blocks until any in progress PTM transactions complete */
++	mutex_lock(&adapter->ptm_lock);
+ 
+-	return get_device_system_crosststamp(igc_phc_get_syncdevicetime,
+-					     adapter, &adapter->snapshot, cts);
++	ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime,
++					    adapter, &adapter->snapshot, cts);
++	mutex_unlock(&adapter->ptm_lock);
++
++	return ret;
+ }
+ 
+ static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
+@@ -1162,6 +1178,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
+ 	spin_lock_init(&adapter->ptp_tx_lock);
+ 	spin_lock_init(&adapter->free_timer_lock);
+ 	spin_lock_init(&adapter->tmreg_lock);
++	mutex_init(&adapter->ptm_lock);
+ 
+ 	adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ 	adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+@@ -1174,6 +1191,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
+ 	if (IS_ERR(adapter->ptp_clock)) {
+ 		adapter->ptp_clock = NULL;
+ 		netdev_err(netdev, "ptp_clock_register failed\n");
++		mutex_destroy(&adapter->ptm_lock);
+ 	} else if (adapter->ptp_clock) {
+ 		netdev_info(netdev, "PHC added\n");
+ 		adapter->ptp_flags |= IGC_PTP_ENABLED;
+@@ -1203,10 +1221,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter)
+ 	struct igc_hw *hw = &adapter->hw;
+ 	u32 ctrl;
+ 
++	mutex_lock(&adapter->ptm_lock);
+ 	ctrl = rd32(IGC_PTM_CTRL);
+ 	ctrl &= ~IGC_PTM_CTRL_EN;
+ 
+ 	wr32(IGC_PTM_CTRL, ctrl);
++	mutex_unlock(&adapter->ptm_lock);
+ }
+ 
+ /**
+@@ -1237,13 +1257,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
+  **/
+ void igc_ptp_stop(struct igc_adapter *adapter)
+ {
++	if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
++		return;
++
+ 	igc_ptp_suspend(adapter);
+ 
++	adapter->ptp_flags &= ~IGC_PTP_ENABLED;
+ 	if (adapter->ptp_clock) {
+ 		ptp_clock_unregister(adapter->ptp_clock);
+ 		netdev_info(adapter->netdev, "PHC removed\n");
+ 		adapter->ptp_flags &= ~IGC_PTP_ENABLED;
+ 	}
++	mutex_destroy(&adapter->ptm_lock);
+ }
+ 
+ /**
+@@ -1255,10 +1280,13 @@ void igc_ptp_stop(struct igc_adapter *adapter)
+ void igc_ptp_reset(struct igc_adapter *adapter)
+ {
+ 	struct igc_hw *hw = &adapter->hw;
+-	u32 cycle_ctrl, ctrl;
++	u32 cycle_ctrl, ctrl, stat;
+ 	unsigned long flags;
+ 	u32 timadj;
+ 
++	if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
++		return;
++
+ 	/* reset the tstamp_config */
+ 	igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+ 
+@@ -1280,6 +1308,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
+ 		if (!igc_is_crosststamp_supported(adapter))
+ 			break;
+ 
++		mutex_lock(&adapter->ptm_lock);
+ 		wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
+ 		wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
+ 
+@@ -1290,14 +1319,20 @@ void igc_ptp_reset(struct igc_adapter *adapter)
+ 		ctrl = IGC_PTM_CTRL_EN |
+ 			IGC_PTM_CTRL_START_NOW |
+ 			IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) |
+-			IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) |
+-			IGC_PTM_CTRL_TRIG;
++			IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT);
+ 
+ 		wr32(IGC_PTM_CTRL, ctrl);
+ 
+ 		/* Force the first cycle to run. */
+-		wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
++		igc_ptm_trigger(hw);
++
++		if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat,
++					      stat, IGC_PTM_STAT_SLEEP,
++					      IGC_PTM_STAT_TIMEOUT))
++			netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
+ 
++		igc_ptm_reset(hw);
++		mutex_unlock(&adapter->ptm_lock);
+ 		break;
+ 	default:
+ 		/* No work to do. */
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index ed7313c10a0524..d408dcda76d794 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -734,7 +734,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
+ 		case SPEED_100:
+ 			val |= MTK_QTX_SCH_MAX_RATE_EN |
+ 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
+-			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
++			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
+ 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ 			break;
+ 		case SPEED_1000:
+@@ -757,13 +757,13 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
+ 		case SPEED_100:
+ 			val |= MTK_QTX_SCH_MAX_RATE_EN |
+ 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+-			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
++			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ 			break;
+ 		case SPEED_1000:
+ 			val |= MTK_QTX_SCH_MAX_RATE_EN |
+-			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
+-			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
++			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
++			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
+ 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
+ 			break;
+ 		default:
+@@ -823,9 +823,25 @@ static const struct phylink_mac_ops mtk_phylink_ops = {
+ 	.mac_link_up = mtk_mac_link_up,
+ };
+ 
++static void mtk_mdio_config(struct mtk_eth *eth)
++{
++	u32 val;
++
++	/* Configure MDC Divider */
++	val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
++
++	/* Configure MDC Turbo Mode */
++	if (mtk_is_netsys_v3_or_greater(eth))
++		mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
++	else
++		val |= PPSC_MDC_TURBO;
++
++	mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
++}
++
+ static int mtk_mdio_init(struct mtk_eth *eth)
+ {
+-	unsigned int max_clk = 2500000, divider;
++	unsigned int max_clk = 2500000;
+ 	struct device_node *mii_np;
+ 	int ret;
+ 	u32 val;
+@@ -865,20 +881,9 @@ static int mtk_mdio_init(struct mtk_eth *eth)
+ 		}
+ 		max_clk = val;
+ 	}
+-	divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+-
+-	/* Configure MDC Turbo Mode */
+-	if (mtk_is_netsys_v3_or_greater(eth))
+-		mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
+-
+-	/* Configure MDC Divider */
+-	val = FIELD_PREP(PPSC_MDC_CFG, divider);
+-	if (!mtk_is_netsys_v3_or_greater(eth))
+-		val |= PPSC_MDC_TURBO;
+-	mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
+-
+-	dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
+-
++	eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
++	mtk_mdio_config(eth);
++	dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
+ 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
+ 
+ err_put_node:
+@@ -3269,7 +3274,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
+ 		if (mtk_is_netsys_v2_or_greater(eth))
+ 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
+ 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
+-			       MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
++			       MTK_CHK_DDONE_EN;
+ 		else
+ 			val |= MTK_RX_BT_32DWORDS;
+ 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
+@@ -3928,6 +3933,10 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+ 	else
+ 		mtk_hw_reset(eth);
+ 
++	/* No MT7628/88 support yet */
++	if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
++		mtk_mdio_config(eth);
++
+ 	if (mtk_is_netsys_v3_or_greater(eth)) {
+ 		/* Set FE to PDMAv2 if necessary */
+ 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 0d5225f1d3eef6..8d7b6818d86012 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1260,6 +1260,7 @@ struct mtk_eth {
+ 	struct clk			*clks[MTK_CLK_MAX];
+ 
+ 	struct mii_bus			*mii_bus;
++	unsigned int			mdc_divider;
+ 	struct work_struct		pending_work;
+ 	unsigned long			state;
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 308a2b72a65de3..a21e7c0afbfdc8 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2680,7 +2680,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
+ 				of_property_read_bool(port_np, "ti,mac-only");
+ 
+ 		/* get phy/link info */
+-		port->slave.port_np = port_np;
++		port->slave.port_np = of_node_get(port_np);
+ 		ret = of_get_phy_mode(port_np, &port->slave.phy_if);
+ 		if (ret) {
+ 			dev_err(dev, "%pOF read phy-mode err %d\n",
+@@ -2741,6 +2741,17 @@ static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
+ 	}
+ }
+ 
++static void am65_cpsw_remove_dt(struct am65_cpsw_common *common)
++{
++	struct am65_cpsw_port *port;
++	int i;
++
++	for (i = 0; i < common->port_num; i++) {
++		port = &common->ports[i];
++		of_node_put(port->slave.port_np);
++	}
++}
++
+ static int
+ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
+ {
+@@ -3647,6 +3658,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+ 	am65_cpsw_nuss_cleanup_ndev(common);
+ 	am65_cpsw_nuss_phylink_cleanup(common);
+ 	am65_cpts_release(common->cpts);
++	am65_cpsw_remove_dt(common);
+ err_of_clear:
+ 	if (common->mdio_dev)
+ 		of_platform_device_destroy(common->mdio_dev, NULL);
+@@ -3686,6 +3698,7 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
+ 	am65_cpsw_nuss_phylink_cleanup(common);
+ 	am65_cpts_release(common->cpts);
+ 	am65_cpsw_disable_serdes_phy(common);
++	am65_cpsw_remove_dt(common);
+ 
+ 	if (common->mdio_dev)
+ 		of_platform_device_destroy(common->mdio_dev, NULL);
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index d59c1744840af2..2a1c43316f462b 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -406,66 +406,79 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
+ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ 				     struct ptp_perout_request *req, int on)
+ {
++	struct timespec64 ts;
++	u64 ns_start;
++	u64 ns_width;
+ 	int ret;
+ 	u64 cmp;
+ 
++	if (!on) {
++		/* Disable CMP 1 */
++		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
++				   IEP_CMP_CFG_CMP_EN(1), 0);
++
++		/* clear CMP regs */
++		regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
++		if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
++			regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
++
++		/* Disable sync */
++		regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
++
++		return 0;
++	}
++
++	/* Calculate width of the signal for PPS/PEROUT handling */
++	ts.tv_sec = req->on.sec;
++	ts.tv_nsec = req->on.nsec;
++	ns_width = timespec64_to_ns(&ts);
++
++	if (req->flags & PTP_PEROUT_PHASE) {
++		ts.tv_sec = req->phase.sec;
++		ts.tv_nsec = req->phase.nsec;
++		ns_start = timespec64_to_ns(&ts);
++	} else {
++		ns_start = 0;
++	}
++
+ 	if (iep->ops && iep->ops->perout_enable) {
+ 		ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
+ 		if (ret)
+ 			return ret;
+ 
+-		if (on) {
+-			/* Configure CMP */
+-			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+-			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+-				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+-			/* Configure SYNC, 1ms pulse width */
+-			regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
+-			regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+-			regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
+-			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+-			/* Enable CMP 1 */
+-			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+-					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+-		} else {
+-			/* Disable CMP 1 */
+-			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+-					   IEP_CMP_CFG_CMP_EN(1), 0);
+-
+-			/* clear regs */
+-			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+-			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+-				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+-		}
++		/* Configure CMP */
++		regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
++		if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
++			regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
++		/* Configure SYNC, based on req on width */
++		regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
++			     div_u64(ns_width, iep->def_inc));
++		regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
++		regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
++			     div_u64(ns_start, iep->def_inc));
++		regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
++		/* Enable CMP 1 */
++		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
++				   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ 	} else {
+-		if (on) {
+-			u64 start_ns;
+-
+-			iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+-				      req->period.nsec;
+-			start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+-				   + req->period.nsec;
+-			icss_iep_update_to_next_boundary(iep, start_ns);
+-
+-			/* Enable Sync in single shot mode  */
+-			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+-				     IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+-			/* Enable CMP 1 */
+-			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+-					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+-		} else {
+-			/* Disable CMP 1 */
+-			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+-					   IEP_CMP_CFG_CMP_EN(1), 0);
+-
+-			/* clear CMP regs */
+-			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+-			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+-				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+-
+-			/* Disable sync */
+-			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+-		}
++		u64 start_ns;
++
++		iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
++				req->period.nsec;
++		start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
++				+ req->period.nsec;
++		icss_iep_update_to_next_boundary(iep, start_ns);
++
++		regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
++			     div_u64(ns_width, iep->def_inc));
++		regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
++			     div_u64(ns_start, iep->def_inc));
++		/* Enable Sync in single shot mode  */
++		regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
++			     IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
++		/* Enable CMP 1 */
++		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
++				   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ 	}
+ 
+ 	return 0;
+@@ -474,7 +487,41 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ static int icss_iep_perout_enable(struct icss_iep *iep,
+ 				  struct ptp_perout_request *req, int on)
+ {
+-	return -EOPNOTSUPP;
++	int ret = 0;
++
++	if (!on)
++		goto disable;
++
++	/* Reject requests with unsupported flags */
++	if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
++			  PTP_PEROUT_PHASE))
++		return -EOPNOTSUPP;
++
++	/* Set default "on" time (1ms) for the signal if not passed by the app */
++	if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
++		req->on.sec = 0;
++		req->on.nsec = NSEC_PER_MSEC;
++	}
++
++disable:
++	mutex_lock(&iep->ptp_clk_mutex);
++
++	if (iep->pps_enabled) {
++		ret = -EBUSY;
++		goto exit;
++	}
++
++	if (iep->perout_enabled == !!on)
++		goto exit;
++
++	ret = icss_iep_perout_enable_hw(iep, req, on);
++	if (!ret)
++		iep->perout_enabled = !!on;
++
++exit:
++	mutex_unlock(&iep->ptp_clk_mutex);
++
++	return ret;
+ }
+ 
+ static void icss_iep_cap_cmp_work(struct work_struct *work)
+@@ -549,10 +596,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
+ 	if (on) {
+ 		ns = icss_iep_gettime(iep, NULL);
+ 		ts = ns_to_timespec64(ns);
++		rq.perout.flags = 0;
+ 		rq.perout.period.sec = 1;
+ 		rq.perout.period.nsec = 0;
+ 		rq.perout.start.sec = ts.tv_sec + 2;
+ 		rq.perout.start.nsec = 0;
++		rq.perout.on.sec = 0;
++		rq.perout.on.nsec = NSEC_PER_MSEC;
+ 		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ 	} else {
+ 		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+index 53aeae2f884b01..1be2a5cc4a83c3 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+@@ -607,7 +607,7 @@ static int ngbe_probe(struct pci_dev *pdev,
+ 	/* setup the private structure */
+ 	err = ngbe_sw_init(wx);
+ 	if (err)
+-		goto err_free_mac_table;
++		goto err_pci_release_regions;
+ 
+ 	/* check if flash load is done after hw power up */
+ 	err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST);
+@@ -701,6 +701,7 @@ static int ngbe_probe(struct pci_dev *pdev,
+ err_clear_interrupt_scheme:
+ 	wx_clear_interrupt_scheme(wx);
+ err_free_mac_table:
++	kfree(wx->rss_key);
+ 	kfree(wx->mac_table);
+ err_pci_release_regions:
+ 	pci_release_selected_regions(pdev,
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+index f7745026803643..7e352837184fad 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -559,7 +559,7 @@ static int txgbe_probe(struct pci_dev *pdev,
+ 	/* setup the private structure */
+ 	err = txgbe_sw_init(wx);
+ 	if (err)
+-		goto err_free_mac_table;
++		goto err_pci_release_regions;
+ 
+ 	/* check if flash load is done after hw power up */
+ 	err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST);
+@@ -717,6 +717,7 @@ static int txgbe_probe(struct pci_dev *pdev,
+ 	wx_clear_interrupt_scheme(wx);
+ 	wx_control_hw(wx, false);
+ err_free_mac_table:
++	kfree(wx->rss_key);
+ 	kfree(wx->mac_table);
+ err_pci_release_regions:
+ 	pci_release_selected_regions(pdev,
+diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
+index 1706ec27eb9c0f..4c98b9de1e5840 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
++++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
+@@ -2118,7 +2118,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
+ 		dest_idx = 0;
+ move_next:
+ 		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+-		ath12k_hal_srng_src_get_next_entry(ab, srng);
++		ath12k_hal_srng_dst_get_next_entry(ab, srng);
+ 		num_buffs_reaped++;
+ 	}
+ 
+@@ -2533,7 +2533,7 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
+ 		dest_idx = 0;
+ move_next:
+ 		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+-		ath12k_hal_srng_dst_get_next_entry(ab, srng);
++		ath12k_hal_srng_src_get_next_entry(ab, srng);
+ 		num_buffs_reaped++;
+ 	}
+ 
+diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
+index 504e05ea30f298..97ea7ab0f49102 100644
+--- a/drivers/net/wireless/atmel/at76c50x-usb.c
++++ b/drivers/net/wireless/atmel/at76c50x-usb.c
+@@ -2552,7 +2552,7 @@ static void at76_disconnect(struct usb_interface *interface)
+ 
+ 	wiphy_info(priv->hw->wiphy, "disconnecting\n");
+ 	at76_delete_device(priv);
+-	usb_put_dev(priv->udev);
++	usb_put_dev(interface_to_usbdev(interface));
+ 	dev_info(&interface->dev, "disconnected\n");
+ }
+ 
+diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
+index 474b603c121cba..adb4840b048932 100644
+--- a/drivers/net/wireless/ti/wl1251/tx.c
++++ b/drivers/net/wireless/ti/wl1251/tx.c
+@@ -342,8 +342,10 @@ void wl1251_tx_work(struct work_struct *work)
+ 	while ((skb = skb_dequeue(&wl->tx_queue))) {
+ 		if (!woken_up) {
+ 			ret = wl1251_ps_elp_wakeup(wl);
+-			if (ret < 0)
++			if (ret < 0) {
++				skb_queue_head(&wl->tx_queue, skb);
+ 				goto out;
++			}
+ 			woken_up = true;
+ 		}
+ 
+diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
+index e79a0adf13950b..328f5a103628fe 100644
+--- a/drivers/nvme/host/apple.c
++++ b/drivers/nvme/host/apple.c
+@@ -650,7 +650,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
+ 
+ 	found = apple_nvme_poll_cq(q, &iob);
+ 
+-	if (!rq_list_empty(iob.req_list))
++	if (!rq_list_empty(&iob.req_list))
+ 		apple_nvme_complete_batch(&iob);
+ 
+ 	return found;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index af45a1b865ee10..e70618e8d35eb4 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -985,7 +985,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	return BLK_STS_OK;
+ }
+ 
+-static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
++static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
+ {
+ 	struct request *req;
+ 
+@@ -1013,11 +1013,10 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
+ 	return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
+ }
+ 
+-static void nvme_queue_rqs(struct request **rqlist)
++static void nvme_queue_rqs(struct rq_list *rqlist)
+ {
+-	struct request *submit_list = NULL;
+-	struct request *requeue_list = NULL;
+-	struct request **requeue_lastp = &requeue_list;
++	struct rq_list submit_list = { };
++	struct rq_list requeue_list = { };
+ 	struct nvme_queue *nvmeq = NULL;
+ 	struct request *req;
+ 
+@@ -1027,9 +1026,9 @@ static void nvme_queue_rqs(struct request **rqlist)
+ 		nvmeq = req->mq_hctx->driver_data;
+ 
+ 		if (nvme_prep_rq_batch(nvmeq, req))
+-			rq_list_add(&submit_list, req); /* reverse order */
++			rq_list_add_tail(&submit_list, req);
+ 		else
+-			rq_list_add_tail(&requeue_lastp, req);
++			rq_list_add_tail(&requeue_list, req);
+ 	}
+ 
+ 	if (nvmeq)
+@@ -1176,7 +1175,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
+ 	DEFINE_IO_COMP_BATCH(iob);
+ 
+ 	if (nvme_poll_cq(nvmeq, &iob)) {
+-		if (!rq_list_empty(iob.req_list))
++		if (!rq_list_empty(&iob.req_list))
+ 			nvme_pci_complete_batch(&iob);
+ 		return IRQ_HANDLED;
+ 	}
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 3ef4beacde3257..7318b736d41417 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -172,20 +172,6 @@ struct nvmet_fc_tgt_assoc {
+ 	struct work_struct		del_work;
+ };
+ 
+-
+-static inline int
+-nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
+-{
+-	return (iodptr - iodptr->tgtport->iod);
+-}
+-
+-static inline int
+-nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
+-{
+-	return (fodptr - fodptr->queue->fod);
+-}
+-
+-
+ /*
+  * Association and Connection IDs:
+  *
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index be61fa93d39712..25c07af1686b9b 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5534,8 +5534,6 @@ static bool pci_bus_resettable(struct pci_bus *bus)
+ 		return false;
+ 
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
+-		if (!pci_reset_supported(dev))
+-			return false;
+ 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
+ 			return false;
+@@ -5612,8 +5610,6 @@ static bool pci_slot_resettable(struct pci_slot *slot)
+ 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ 		if (!dev->slot || dev->slot != slot)
+ 			continue;
+-		if (!pci_reset_supported(dev))
+-			return false;
+ 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
+ 			return false;
+diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
+index 02ff68be10d012..a184922bba8d65 100644
+--- a/drivers/platform/x86/amd/pmf/auto-mode.c
++++ b/drivers/platform/x86/amd/pmf/auto-mode.c
+@@ -120,9 +120,9 @@ static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
+ 	amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
+ 	amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
+ 	amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+-			 pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL);
++			 fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL);
+ 	amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+-			 pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL);
++			 fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL);
+ 
+ 	if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ 		apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual,
+diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
+index bc8899e15c914b..207a0b33d8d368 100644
+--- a/drivers/platform/x86/amd/pmf/cnqf.c
++++ b/drivers/platform/x86/amd/pmf/cnqf.c
+@@ -81,10 +81,10 @@ static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
+ 	amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
+ 	amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
+ 	amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
+-	amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU],
+-			 NULL);
+-	amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2],
+-			 NULL);
++	amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
++			 fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL);
++	amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
++			 fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL);
+ 
+ 	if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ 		apmf_update_fan_idx(dev,
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index 347bb43a5f2b75..719caa2a00f056 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -176,6 +176,20 @@ static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
+ 	dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
+ }
+ 
++/**
++ * fixp_q88_fromint: Convert integer to Q8.8
++ * @val: input value
++ *
++ * Converts an integer into binary fixed point format where 8 bits
++ * are used for integer and 8 bits are used for the decimal.
++ *
++ * Return: unsigned integer converted to Q8.8 format
++ */
++u32 fixp_q88_fromint(u32 val)
++{
++	return val << 8;
++}
++
+ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
+ {
+ 	int rc;
+diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
+index 43ba1b9aa1811a..34ba0309a33a2f 100644
+--- a/drivers/platform/x86/amd/pmf/pmf.h
++++ b/drivers/platform/x86/amd/pmf/pmf.h
+@@ -746,6 +746,7 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
+ int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
+ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer);
+ int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag);
++u32 fixp_q88_fromint(u32 val);
+ 
+ /* SPS Layer */
+ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
+diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
+index 92f7fb22277dca..3a24209f7df03e 100644
+--- a/drivers/platform/x86/amd/pmf/sps.c
++++ b/drivers/platform/x86/amd/pmf/sps.c
+@@ -198,9 +198,11 @@ static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
+ 	amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ 			 apts_config_store.val[idx].stt_min_limit, NULL);
+ 	amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+-			 apts_config_store.val[idx].stt_skin_temp_limit_apu, NULL);
++			 fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu),
++			 NULL);
+ 	amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+-			 apts_config_store.val[idx].stt_skin_temp_limit_hs2, NULL);
++			 fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2),
++			 NULL);
+ }
+ 
+ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+@@ -217,9 +219,11 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ 		amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ 				 config_store.prop[src][idx].stt_min, NULL);
+ 		amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+-				 config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
++				 fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]),
++				 NULL);
+ 		amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+-				 config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
++				 fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]),
++				 NULL);
+ 	} else if (op == SLIDER_OP_GET) {
+ 		amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
+ 		amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
+diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
+index 09131507d7a925..cb5abab2210a7b 100644
+--- a/drivers/platform/x86/amd/pmf/tee-if.c
++++ b/drivers/platform/x86/amd/pmf/tee-if.c
+@@ -123,7 +123,8 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
+ 
+ 		case PMF_POLICY_STT_SKINTEMP_APU:
+ 			if (dev->prev_data->stt_skintemp_apu != val) {
+-				amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, val, NULL);
++				amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
++						 fixp_q88_fromint(val), NULL);
+ 				dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val);
+ 				dev->prev_data->stt_skintemp_apu = val;
+ 			}
+@@ -131,7 +132,8 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
+ 
+ 		case PMF_POLICY_STT_SKINTEMP_HS2:
+ 			if (dev->prev_data->stt_skintemp_hs2 != val) {
+-				amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, val, NULL);
++				amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
++						 fixp_q88_fromint(val), NULL);
+ 				dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val);
+ 				dev->prev_data->stt_skintemp_hs2 = val;
+ 			}
+diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
+index 9d7e6b712abf11..8d2e6d8be9e54a 100644
+--- a/drivers/platform/x86/asus-laptop.c
++++ b/drivers/platform/x86/asus-laptop.c
+@@ -426,11 +426,14 @@ static int asus_pega_lucid_set(struct asus_laptop *asus, int unit, bool enable)
+ 
+ static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method)
+ {
++	unsigned long long val = (unsigned long long)curr;
++	acpi_status status;
+ 	int i, delta;
+-	unsigned long long val;
+-	for (i = 0; i < PEGA_ACC_RETRIES; i++) {
+-		acpi_evaluate_integer(asus->handle, method, NULL, &val);
+ 
++	for (i = 0; i < PEGA_ACC_RETRIES; i++) {
++		status = acpi_evaluate_integer(asus->handle, method, NULL, &val);
++		if (ACPI_FAILURE(status))
++			continue;
+ 		/* The output is noisy.  From reading the ASL
+ 		 * dissassembly, timeout errors are returned with 1's
+ 		 * in the high word, and the lack of locking around
+diff --git a/drivers/platform/x86/msi-wmi-platform.c b/drivers/platform/x86/msi-wmi-platform.c
+index 9b5c7f8c79b0dd..dc5e9878cb6822 100644
+--- a/drivers/platform/x86/msi-wmi-platform.c
++++ b/drivers/platform/x86/msi-wmi-platform.c
+@@ -10,6 +10,7 @@
+ #include <linux/acpi.h>
+ #include <linux/bits.h>
+ #include <linux/bitfield.h>
++#include <linux/cleanup.h>
+ #include <linux/debugfs.h>
+ #include <linux/device.h>
+ #include <linux/device/driver.h>
+@@ -17,6 +18,7 @@
+ #include <linux/hwmon.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/printk.h>
+ #include <linux/rwsem.h>
+ #include <linux/types.h>
+@@ -76,8 +78,13 @@ enum msi_wmi_platform_method {
+ 	MSI_PLATFORM_GET_WMI		= 0x1d,
+ };
+ 
+-struct msi_wmi_platform_debugfs_data {
++struct msi_wmi_platform_data {
+ 	struct wmi_device *wdev;
++	struct mutex wmi_lock;	/* Necessary when calling WMI methods */
++};
++
++struct msi_wmi_platform_debugfs_data {
++	struct msi_wmi_platform_data *data;
+ 	enum msi_wmi_platform_method method;
+ 	struct rw_semaphore buffer_lock;	/* Protects debugfs buffer */
+ 	size_t length;
+@@ -132,8 +139,9 @@ static int msi_wmi_platform_parse_buffer(union acpi_object *obj, u8 *output, siz
+ 	return 0;
+ }
+ 
+-static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform_method method,
+-				  u8 *input, size_t input_length, u8 *output, size_t output_length)
++static int msi_wmi_platform_query(struct msi_wmi_platform_data *data,
++				  enum msi_wmi_platform_method method, u8 *input,
++				  size_t input_length, u8 *output, size_t output_length)
+ {
+ 	struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ 	struct acpi_buffer in = {
+@@ -147,9 +155,15 @@ static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform
+ 	if (!input_length || !output_length)
+ 		return -EINVAL;
+ 
+-	status = wmidev_evaluate_method(wdev, 0x0, method, &in, &out);
+-	if (ACPI_FAILURE(status))
+-		return -EIO;
++	/*
++	 * The ACPI control method responsible for handling the WMI method calls
++	 * is not thread-safe. Because of this we have to do the locking ourself.
++	 */
++	scoped_guard(mutex, &data->wmi_lock) {
++		status = wmidev_evaluate_method(data->wdev, 0x0, method, &in, &out);
++		if (ACPI_FAILURE(status))
++			return -EIO;
++	}
+ 
+ 	obj = out.pointer;
+ 	if (!obj)
+@@ -170,22 +184,22 @@ static umode_t msi_wmi_platform_is_visible(const void *drvdata, enum hwmon_senso
+ static int msi_wmi_platform_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ 				 int channel, long *val)
+ {
+-	struct wmi_device *wdev = dev_get_drvdata(dev);
++	struct msi_wmi_platform_data *data = dev_get_drvdata(dev);
+ 	u8 input[32] = { 0 };
+ 	u8 output[32];
+-	u16 data;
++	u16 value;
+ 	int ret;
+ 
+-	ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_FAN, input, sizeof(input), output,
++	ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_FAN, input, sizeof(input), output,
+ 				     sizeof(output));
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	data = get_unaligned_be16(&output[channel * 2 + 1]);
+-	if (!data)
++	value = get_unaligned_be16(&output[channel * 2 + 1]);
++	if (!value)
+ 		*val = 0;
+ 	else
+-		*val = 480000 / data;
++		*val = 480000 / value;
+ 
+ 	return 0;
+ }
+@@ -231,7 +245,7 @@ static ssize_t msi_wmi_platform_write(struct file *fp, const char __user *input,
+ 		return ret;
+ 
+ 	down_write(&data->buffer_lock);
+-	ret = msi_wmi_platform_query(data->wdev, data->method, payload, data->length, data->buffer,
++	ret = msi_wmi_platform_query(data->data, data->method, payload, data->length, data->buffer,
+ 				     data->length);
+ 	up_write(&data->buffer_lock);
+ 
+@@ -277,17 +291,17 @@ static void msi_wmi_platform_debugfs_remove(void *data)
+ 	debugfs_remove_recursive(dir);
+ }
+ 
+-static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry *dir,
++static void msi_wmi_platform_debugfs_add(struct msi_wmi_platform_data *drvdata, struct dentry *dir,
+ 					 const char *name, enum msi_wmi_platform_method method)
+ {
+ 	struct msi_wmi_platform_debugfs_data *data;
+ 	struct dentry *entry;
+ 
+-	data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
++	data = devm_kzalloc(&drvdata->wdev->dev, sizeof(*data), GFP_KERNEL);
+ 	if (!data)
+ 		return;
+ 
+-	data->wdev = wdev;
++	data->data = drvdata;
+ 	data->method = method;
+ 	init_rwsem(&data->buffer_lock);
+ 
+@@ -298,82 +312,82 @@ static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry
+ 
+ 	entry = debugfs_create_file(name, 0600, dir, data, &msi_wmi_platform_debugfs_fops);
+ 	if (IS_ERR(entry))
+-		devm_kfree(&wdev->dev, data);
++		devm_kfree(&drvdata->wdev->dev, data);
+ }
+ 
+-static void msi_wmi_platform_debugfs_init(struct wmi_device *wdev)
++static void msi_wmi_platform_debugfs_init(struct msi_wmi_platform_data *data)
+ {
+ 	struct dentry *dir;
+ 	char dir_name[64];
+ 	int ret, method;
+ 
+-	scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&wdev->dev));
++	scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&data->wdev->dev));
+ 
+ 	dir = debugfs_create_dir(dir_name, NULL);
+ 	if (IS_ERR(dir))
+ 		return;
+ 
+-	ret = devm_add_action_or_reset(&wdev->dev, msi_wmi_platform_debugfs_remove, dir);
++	ret = devm_add_action_or_reset(&data->wdev->dev, msi_wmi_platform_debugfs_remove, dir);
+ 	if (ret < 0)
+ 		return;
+ 
+ 	for (method = MSI_PLATFORM_GET_PACKAGE; method <= MSI_PLATFORM_GET_WMI; method++)
+-		msi_wmi_platform_debugfs_add(wdev, dir, msi_wmi_platform_debugfs_names[method - 1],
++		msi_wmi_platform_debugfs_add(data, dir, msi_wmi_platform_debugfs_names[method - 1],
+ 					     method);
+ }
+ 
+-static int msi_wmi_platform_hwmon_init(struct wmi_device *wdev)
++static int msi_wmi_platform_hwmon_init(struct msi_wmi_platform_data *data)
+ {
+ 	struct device *hdev;
+ 
+-	hdev = devm_hwmon_device_register_with_info(&wdev->dev, "msi_wmi_platform", wdev,
++	hdev = devm_hwmon_device_register_with_info(&data->wdev->dev, "msi_wmi_platform", data,
+ 						    &msi_wmi_platform_chip_info, NULL);
+ 
+ 	return PTR_ERR_OR_ZERO(hdev);
+ }
+ 
+-static int msi_wmi_platform_ec_init(struct wmi_device *wdev)
++static int msi_wmi_platform_ec_init(struct msi_wmi_platform_data *data)
+ {
+ 	u8 input[32] = { 0 };
+ 	u8 output[32];
+ 	u8 flags;
+ 	int ret;
+ 
+-	ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_EC, input, sizeof(input), output,
++	ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_EC, input, sizeof(input), output,
+ 				     sizeof(output));
+ 	if (ret < 0)
+ 		return ret;
+ 
+ 	flags = output[MSI_PLATFORM_EC_FLAGS_OFFSET];
+ 
+-	dev_dbg(&wdev->dev, "EC RAM version %lu.%lu\n",
++	dev_dbg(&data->wdev->dev, "EC RAM version %lu.%lu\n",
+ 		FIELD_GET(MSI_PLATFORM_EC_MAJOR_MASK, flags),
+ 		FIELD_GET(MSI_PLATFORM_EC_MINOR_MASK, flags));
+-	dev_dbg(&wdev->dev, "EC firmware version %.28s\n",
++	dev_dbg(&data->wdev->dev, "EC firmware version %.28s\n",
+ 		&output[MSI_PLATFORM_EC_VERSION_OFFSET]);
+ 
+ 	if (!(flags & MSI_PLATFORM_EC_IS_TIGERLAKE)) {
+ 		if (!force)
+ 			return -ENODEV;
+ 
+-		dev_warn(&wdev->dev, "Loading on a non-Tigerlake platform\n");
++		dev_warn(&data->wdev->dev, "Loading on a non-Tigerlake platform\n");
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-static int msi_wmi_platform_init(struct wmi_device *wdev)
++static int msi_wmi_platform_init(struct msi_wmi_platform_data *data)
+ {
+ 	u8 input[32] = { 0 };
+ 	u8 output[32];
+ 	int ret;
+ 
+-	ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_WMI, input, sizeof(input), output,
++	ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_WMI, input, sizeof(input), output,
+ 				     sizeof(output));
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	dev_dbg(&wdev->dev, "WMI interface version %u.%u\n",
++	dev_dbg(&data->wdev->dev, "WMI interface version %u.%u\n",
+ 		output[MSI_PLATFORM_WMI_MAJOR_OFFSET],
+ 		output[MSI_PLATFORM_WMI_MINOR_OFFSET]);
+ 
+@@ -381,7 +395,8 @@ static int msi_wmi_platform_init(struct wmi_device *wdev)
+ 		if (!force)
+ 			return -ENODEV;
+ 
+-		dev_warn(&wdev->dev, "Loading despite unsupported WMI interface version (%u.%u)\n",
++		dev_warn(&data->wdev->dev,
++			 "Loading despite unsupported WMI interface version (%u.%u)\n",
+ 			 output[MSI_PLATFORM_WMI_MAJOR_OFFSET],
+ 			 output[MSI_PLATFORM_WMI_MINOR_OFFSET]);
+ 	}
+@@ -391,19 +406,31 @@ static int msi_wmi_platform_init(struct wmi_device *wdev)
+ 
+ static int msi_wmi_platform_probe(struct wmi_device *wdev, const void *context)
+ {
++	struct msi_wmi_platform_data *data;
+ 	int ret;
+ 
+-	ret = msi_wmi_platform_init(wdev);
++	data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
++	data->wdev = wdev;
++	dev_set_drvdata(&wdev->dev, data);
++
++	ret = devm_mutex_init(&wdev->dev, &data->wmi_lock);
++	if (ret < 0)
++		return ret;
++
++	ret = msi_wmi_platform_init(data);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = msi_wmi_platform_ec_init(wdev);
++	ret = msi_wmi_platform_ec_init(data);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	msi_wmi_platform_debugfs_init(wdev);
++	msi_wmi_platform_debugfs_init(data);
+ 
+-	return msi_wmi_platform_hwmon_init(wdev);
++	return msi_wmi_platform_hwmon_init(data);
+ }
+ 
+ static const struct wmi_device_id msi_wmi_platform_id_table[] = {
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 120db96d9e95d6..0eeb503e06c230 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -2067,6 +2067,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
+ 	if (!s->start) {
+ 		/* roundup() does not work on 32-bit systems */
+ 		s->start = DIV64_U64_ROUND_UP(start_ns, s->period);
++		s->start *= s->period;
+ 		s->start = ktime_add(s->start, s->phase);
+ 	}
+ 
+diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
+index 143d04c779a821..b7c7d5ba4d9dd1 100644
+--- a/drivers/ras/amd/atl/internal.h
++++ b/drivers/ras/amd/atl/internal.h
+@@ -361,4 +361,7 @@ static inline void atl_debug_on_bad_intlv_mode(struct addr_ctx *ctx)
+ 	atl_debug(ctx, "Unrecognized interleave mode: %u", ctx->map.intlv_mode);
+ }
+ 
++#define MI300_UMC_MCA_COL	GENMASK(5, 1)
++#define MI300_UMC_MCA_ROW13	BIT(23)
++
+ #endif /* __AMD_ATL_INTERNAL_H__ */
+diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
+index dc8aa12f63c811..6e072b7667e98b 100644
+--- a/drivers/ras/amd/atl/umc.c
++++ b/drivers/ras/amd/atl/umc.c
+@@ -229,7 +229,6 @@ int get_umc_info_mi300(void)
+  * Additionally, the PC and Bank bits may be hashed. This must be accounted for before
+  * reconstructing the normalized address.
+  */
+-#define MI300_UMC_MCA_COL	GENMASK(5, 1)
+ #define MI300_UMC_MCA_BANK	GENMASK(9, 6)
+ #define MI300_UMC_MCA_ROW	GENMASK(24, 10)
+ #define MI300_UMC_MCA_PC	BIT(25)
+@@ -320,7 +319,7 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
+  * See amd_atl::convert_dram_to_norm_addr_mi300() for MI300 address formats.
+  */
+ #define MI300_NUM_COL		BIT(HWEIGHT(MI300_UMC_MCA_COL))
+-static void retire_row_mi300(struct atl_err *a_err)
++static void _retire_row_mi300(struct atl_err *a_err)
+ {
+ 	unsigned long addr;
+ 	struct page *p;
+@@ -351,6 +350,22 @@ static void retire_row_mi300(struct atl_err *a_err)
+ 	}
+ }
+ 
++/*
++ * In addition to the column bits, the row[13] bit should also be included when
++ * calculating addresses affected by a physical row.
++ *
++ * Instead of running through another loop over a single bit, just run through
++ * the column bits twice and flip the row[13] bit in-between.
++ *
++ * See MI300_UMC_MCA_ROW for the row bits in MCA_ADDR_UMC value.
++ */
++static void retire_row_mi300(struct atl_err *a_err)
++{
++	_retire_row_mi300(a_err);
++	a_err->addr ^= MI300_UMC_MCA_ROW13;
++	_retire_row_mi300(a_err);
++}
++
+ void amd_retire_dram_row(struct atl_err *a_err)
+ {
+ 	if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c
+index 90de737fbc9097..8877c6ff64c468 100644
+--- a/drivers/ras/amd/fmpm.c
++++ b/drivers/ras/amd/fmpm.c
+@@ -250,6 +250,13 @@ static bool rec_has_valid_entries(struct fru_rec *rec)
+ 	return true;
+ }
+ 
++/*
++ * Row retirement is done on MI300 systems, and some bits are 'don't
++ * care' for comparing addresses with unique physical rows.  This
++ * includes all column bits and the row[13] bit.
++ */
++#define MASK_ADDR(addr)	((addr) & ~(MI300_UMC_MCA_ROW13 | MI300_UMC_MCA_COL))
++
+ static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_desc *new)
+ {
+ 	/*
+@@ -258,7 +265,7 @@ static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_
+ 	 *
+ 	 * Also, order the checks from most->least likely to fail to shortcut the code.
+ 	 */
+-	if (old->addr != new->addr)
++	if (MASK_ADDR(old->addr) != MASK_ADDR(new->addr))
+ 		return false;
+ 
+ 	if (old->hw_id != new->hw_id)
+diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
+index adec0df24bc475..1cb517f731f4ac 100644
+--- a/drivers/scsi/fnic/fnic_main.c
++++ b/drivers/scsi/fnic/fnic_main.c
+@@ -16,7 +16,6 @@
+ #include <linux/spinlock.h>
+ #include <linux/workqueue.h>
+ #include <linux/if_ether.h>
+-#include <linux/blk-mq-pci.h>
+ #include <scsi/fc/fc_fip.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi_transport.h>
+@@ -601,7 +600,7 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
+ 		return;
+ 	}
+ 
+-	blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
++	blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
+ }
+ 
+ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
+index d223f482488fc6..010479a354eeeb 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas.h
++++ b/drivers/scsi/hisi_sas/hisi_sas.h
+@@ -9,7 +9,6 @@
+ 
+ #include <linux/acpi.h>
+ #include <linux/blk-mq.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/clk.h>
+ #include <linux/debugfs.h>
+ #include <linux/dmapool.h>
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+index 342d75f12051d2..89ff33daba4041 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+@@ -2501,6 +2501,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
+ 	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ 	struct sas_ata_task *ata_task = &task->ata_task;
+ 	struct sas_tmf_task *tmf = slot->tmf;
++	int phy_id;
+ 	u8 *buf_cmd;
+ 	int has_data = 0, hdr_tag = 0;
+ 	u32 dw0, dw1 = 0, dw2 = 0;
+@@ -2508,10 +2509,14 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
+ 	/* create header */
+ 	/* dw0 */
+ 	dw0 = port->id << CMD_HDR_PORT_OFF;
+-	if (parent_dev && dev_is_expander(parent_dev->dev_type))
++	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ 		dw0 |= 3 << CMD_HDR_CMD_OFF;
+-	else
++	} else {
++		phy_id = device->phy->identify.phy_identifier;
++		dw0 |= (1U << phy_id) << CMD_HDR_PHY_ID_OFF;
++		dw0 |= CMD_HDR_FORCE_PHY_MSK;
+ 		dw0 |= 4 << CMD_HDR_CMD_OFF;
++	}
+ 
+ 	if (tmf && ata_task->force_phy) {
+ 		dw0 |= CMD_HDR_FORCE_PHY_MSK;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index cd394d8c9f07f0..2b04556681a1ac 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -358,6 +358,10 @@
+ #define CMD_HDR_RESP_REPORT_MSK		(0x1 << CMD_HDR_RESP_REPORT_OFF)
+ #define CMD_HDR_TLR_CTRL_OFF		6
+ #define CMD_HDR_TLR_CTRL_MSK		(0x3 << CMD_HDR_TLR_CTRL_OFF)
++#define CMD_HDR_PHY_ID_OFF		8
++#define CMD_HDR_PHY_ID_MSK		(0x1ff << CMD_HDR_PHY_ID_OFF)
++#define CMD_HDR_FORCE_PHY_OFF		17
++#define CMD_HDR_FORCE_PHY_MSK		(0x1U << CMD_HDR_FORCE_PHY_OFF)
+ #define CMD_HDR_PORT_OFF		18
+ #define CMD_HDR_PORT_MSK		(0xf << CMD_HDR_PORT_OFF)
+ #define CMD_HDR_PRIORITY_OFF		27
+@@ -1425,15 +1429,21 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
+ 	struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ 	struct asd_sas_port *sas_port = device->port;
+ 	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
++	int phy_id;
+ 	u8 *buf_cmd;
+ 	int has_data = 0, hdr_tag = 0;
+ 	u32 dw1 = 0, dw2 = 0;
+ 
+ 	hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
+-	if (parent_dev && dev_is_expander(parent_dev->dev_type))
++	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ 		hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
+-	else
++	} else {
++		phy_id = device->phy->identify.phy_identifier;
++		hdr->dw0 |= cpu_to_le32((1U << phy_id)
++				<< CMD_HDR_PHY_ID_OFF);
++		hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
+ 		hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF);
++	}
+ 
+ 	switch (task->data_dir) {
+ 	case DMA_TO_DEVICE:
+@@ -3323,8 +3333,8 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
+ 		if (i == HCTX_TYPE_POLL)
+ 			blk_mq_map_queues(qmap);
+ 		else
+-			blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
+-					      BASE_VECTORS_V3_HW);
++			blk_mq_map_hw_queues(qmap, hisi_hba->dev,
++					     BASE_VECTORS_V3_HW);
+ 		qoff += qmap->nr_queues;
+ 	}
+ }
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 50f1dcb6d58460..21f22e913cd08d 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -37,7 +37,6 @@
+ #include <linux/poll.h>
+ #include <linux/vmalloc.h>
+ #include <linux/irq_poll.h>
+-#include <linux/blk-mq-pci.h>
+ 
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -2104,6 +2103,9 @@ static int megasas_device_configure(struct scsi_device *sdev,
+ 	/* This sdev property may change post OCR */
+ 	megasas_set_dynamic_target_properties(sdev, lim, is_target_prop);
+ 
++	if (!MEGASAS_IS_LOGICAL(sdev))
++		sdev->no_vpd_size = 1;
++
+ 	mutex_unlock(&instance->reset_mutex);
+ 
+ 	return 0;
+@@ -3193,7 +3195,7 @@ static void megasas_map_queues(struct Scsi_Host *shost)
+ 	map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ 	map->nr_queues = instance->msix_vectors - offset;
+ 	map->queue_offset = 0;
+-	blk_mq_pci_map_queues(map, instance->pdev, offset);
++	blk_mq_map_hw_queues(map, &instance->pdev->dev, offset);
+ 	qoff += map->nr_queues;
+ 	offset += map->nr_queues;
+ 
+@@ -3663,8 +3665,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ 
+ 		case MFI_STAT_SCSI_IO_FAILED:
+ 		case MFI_STAT_LD_INIT_IN_PROGRESS:
+-			cmd->scmd->result =
+-			    (DID_ERROR << 16) | hdr->scsi_status;
++			if (hdr->scsi_status == 0xf0)
++				cmd->scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION;
++			else
++				cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status;
+ 			break;
+ 
+ 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 1eec23da28e2d6..1eea4df9e47d35 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -2043,7 +2043,10 @@ map_cmd_status(struct fusion_context *fusion,
+ 
+ 	case MFI_STAT_SCSI_IO_FAILED:
+ 	case MFI_STAT_LD_INIT_IN_PROGRESS:
+-		scmd->result = (DID_ERROR << 16) | ext_status;
++		if (ext_status == 0xf0)
++			scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION;
++		else
++			scmd->result = (DID_ERROR << 16) | ext_status;
+ 		break;
+ 
+ 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index ee5a75a4b3bb80..ab7c5f1fc04121 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -12,7 +12,6 @@
+ 
+ #include <linux/blkdev.h>
+ #include <linux/blk-mq.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/delay.h>
+ #include <linux/dmapool.h>
+ #include <linux/errno.h>
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 1bef88130d0c06..1e8735538b238e 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -4042,7 +4042,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost)
+ 		 */
+ 		map->queue_offset = qoff;
+ 		if (i != HCTX_TYPE_POLL)
+-			blk_mq_pci_map_queues(map, mrioc->pdev, offset);
++			blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
+ 		else
+ 			blk_mq_map_queues(map);
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index f2a55aa5fe6503..9599d7a5002868 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -53,7 +53,6 @@
+ #include <linux/pci.h>
+ #include <linux/interrupt.h>
+ #include <linux/raid_class.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/unaligned.h>
+ 
+ #include "mpt3sas_base.h"
+@@ -11890,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost)
+ 		 */
+ 		map->queue_offset = qoff;
+ 		if (i != HCTX_TYPE_POLL)
+-			blk_mq_pci_map_queues(map, ioc->pdev, offset);
++			blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
+ 		else
+ 			blk_mq_map_queues(map);
+ 
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index 33e1eba62ca12c..b53b1ae5b74c30 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -101,7 +101,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
+ 	struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ 
+ 	if (pm8001_ha->number_of_intr > 1) {
+-		blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
++		blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
+index ced6721380a853..c46470e0cf63b7 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.h
++++ b/drivers/scsi/pm8001/pm8001_sas.h
+@@ -56,7 +56,6 @@
+ #include <scsi/sas_ata.h>
+ #include <linux/atomic.h>
+ #include <linux/blk-mq.h>
+-#include <linux/blk-mq-pci.h>
+ #include "pm8001_defs.h"
+ 
+ #define DRV_NAME		"pm80xx"
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 8f4cc136a9c9c4..8ee2e337c9e1b7 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -8,7 +8,6 @@
+ #include <linux/delay.h>
+ #include <linux/nvme.h>
+ #include <linux/nvme-fc.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/blk-mq.h>
+ 
+ static struct nvme_fc_port_template qla_nvme_fc_transport;
+@@ -841,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
+ {
+ 	struct scsi_qla_host *vha = lport->private;
+ 
+-	blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
++	blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
+ }
+ 
+ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 7ab717ed72327e..31535beaaa161c 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -13,7 +13,6 @@
+ #include <linux/mutex.h>
+ #include <linux/kobject.h>
+ #include <linux/slab.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/refcount.h>
+ #include <linux/crash_dump.h>
+ #include <linux/trace_events.h>
+@@ -8071,7 +8070,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost)
+ 	if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
+ 		blk_mq_map_queues(qmap);
+ 	else
+-		blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
++		blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev,
++				       vha->irq_offset);
+ }
+ 
+ struct scsi_host_template qla2xxx_driver_template = {
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 9b47f91c5b9720..8274fe0ec7146f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3209,11 +3209,14 @@ iscsi_set_host_param(struct iscsi_transport *transport,
+ 	}
+ 
+ 	/* see similar check in iscsi_if_set_param() */
+-	if (strlen(data) > ev->u.set_host_param.len)
+-		return -EINVAL;
++	if (strlen(data) > ev->u.set_host_param.len) {
++		err = -EINVAL;
++		goto out;
++	}
+ 
+ 	err = transport->set_host_param(shost, ev->u.set_host_param.param,
+ 					data, ev->u.set_host_param.len);
++out:
+ 	scsi_host_put(shost);
+ 	return err;
+ }
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 870f37b7054644..d919a74746a056 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -19,7 +19,7 @@
+ #include <linux/bcd.h>
+ #include <linux/reboot.h>
+ #include <linux/cciss_ioctl.h>
+-#include <linux/blk-mq-pci.h>
++#include <linux/crash_dump.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_device.h>
+@@ -5247,7 +5247,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
+ 	ctrl_info->error_buffer_length =
+ 		ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
+ 
+-	if (reset_devices)
++	if (is_kdump_kernel())
+ 		max_transfer_size = min(ctrl_info->max_transfer_size,
+ 			PQI_MAX_TRANSFER_SIZE_KDUMP);
+ 	else
+@@ -5276,7 +5276,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
+ 	u16 num_elements_per_iq;
+ 	u16 num_elements_per_oq;
+ 
+-	if (reset_devices) {
++	if (is_kdump_kernel()) {
+ 		num_queue_groups = 1;
+ 	} else {
+ 		int num_cpus;
+@@ -6547,10 +6547,10 @@ static void pqi_map_queues(struct Scsi_Host *shost)
+ 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ 
+ 	if (!ctrl_info->disable_managed_interrupts)
+-		return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+-			      ctrl_info->pci_dev, 0);
++		blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
++				       &ctrl_info->pci_dev->dev, 0);
+ 	else
+-		return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
++		blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ }
+ 
+ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
+@@ -8288,12 +8288,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+ 	u32 product_id;
+ 
+ 	if (reset_devices) {
+-		if (pqi_is_fw_triage_supported(ctrl_info)) {
++		if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) {
+ 			rc = sis_wait_for_fw_triage_completion(ctrl_info);
+ 			if (rc)
+ 				return rc;
+ 		}
+-		if (sis_is_ctrl_logging_supported(ctrl_info)) {
++		if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) {
+ 			sis_notify_kdump(ctrl_info);
+ 			rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
+ 			if (rc)
+@@ -8344,7 +8344,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+ 	ctrl_info->product_id = (u8)product_id;
+ 	ctrl_info->product_revision = (u8)(product_id >> 8);
+ 
+-	if (reset_devices) {
++	if (is_kdump_kernel()) {
+ 		if (ctrl_info->max_outstanding_requests >
+ 			PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
+ 				ctrl_info->max_outstanding_requests =
+@@ -8480,7 +8480,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+ 	if (rc)
+ 		return rc;
+ 
+-	if (ctrl_info->ctrl_logging_supported && !reset_devices) {
++	if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) {
+ 		pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
+ 		pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ 	}
+diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
+index 98505c68103d0e..f2cbfc2d399cdb 100644
+--- a/drivers/ufs/host/ufs-exynos.c
++++ b/drivers/ufs/host/ufs-exynos.c
+@@ -915,6 +915,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
+ 	}
+ 
+ 	phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
++
++	if (generic_phy->power_count) {
++		phy_power_off(generic_phy);
++		phy_exit(generic_phy);
++	}
++
+ 	ret = phy_init(generic_phy);
+ 	if (ret) {
+ 		dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
+diff --git a/fs/Kconfig b/fs/Kconfig
+index aae170fc279524..3117304676331c 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -369,6 +369,7 @@ config GRACE_PERIOD
+ config LOCKD
+ 	tristate
+ 	depends on FILE_LOCKING
++	select CRC32
+ 	select GRACE_PERIOD
+ 
+ config LOCKD_V4
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 73343503ea60e4..08ccf5d5e14407 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1140,8 +1140,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
+ 	subvol_name = btrfs_get_subvol_name_from_objectid(info,
+ 			btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
+ 	if (!IS_ERR(subvol_name)) {
+-		seq_puts(seq, ",subvol=");
+-		seq_escape(seq, subvol_name, " \t\n\\");
++		seq_show_option(seq, "subvol", subvol_name);
+ 		kfree(subvol_name);
+ 	}
+ 	return 0;
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index d220e28e755fef..749c9f66d74c6d 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -1663,6 +1663,9 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
+ 	unsigned int virtqueue_size;
+ 	int err = -EIO;
+ 
++	if (!fsc->source)
++		return invalf(fsc, "No source specified");
++
+ 	/* This gets a reference on virtio_fs object. This ptr gets installed
+ 	 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
+ 	 * to drop the reference to this object.
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index 6add6ebfef8967..cb823a8a6ba960 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -67,6 +67,12 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
+ 	else
+ 		key_len = tree->max_key_len + 1;
+ 
++	if (key_len > sizeof(hfs_btree_key) || key_len < 1) {
++		memset(key, 0, sizeof(hfs_btree_key));
++		pr_err("hfs: Invalid key length: %d\n", key_len);
++		return;
++	}
++
+ 	hfs_bnode_read(node, key, off, key_len);
+ }
+ 
+diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
+index 87974d5e679156..079ea80534f7de 100644
+--- a/fs/hfsplus/bnode.c
++++ b/fs/hfsplus/bnode.c
+@@ -67,6 +67,12 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
+ 	else
+ 		key_len = tree->max_key_len + 2;
+ 
++	if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
++		memset(key, 0, sizeof(hfsplus_btree_key));
++		pr_err("hfsplus: Invalid key length: %d\n", key_len);
++		return;
++	}
++
+ 	hfs_bnode_read(node, key, off, key_len);
+ }
+ 
+diff --git a/fs/isofs/export.c b/fs/isofs/export.c
+index 35768a63fb1d23..421d247fae5230 100644
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -180,7 +180,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
+ 		return NULL;
+ 
+ 	return isofs_export_iget(sb,
+-			fh_len > 2 ? ifid->parent_block : 0,
++			fh_len > 3 ? ifid->parent_block : 0,
+ 			ifid->parent_offset,
+ 			fh_len > 4 ? ifid->parent_generation : 0);
+ }
+diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
+index d3f76101ad4b91..07932ce9246c17 100644
+--- a/fs/nfs/Kconfig
++++ b/fs/nfs/Kconfig
+@@ -2,6 +2,7 @@
+ config NFS_FS
+ 	tristate "NFS client support"
+ 	depends on INET && FILE_LOCKING && MULTIUSER
++	select CRC32
+ 	select LOCKD
+ 	select SUNRPC
+ 	select NFS_COMMON
+@@ -196,7 +197,6 @@ config NFS_USE_KERNEL_DNS
+ config NFS_DEBUG
+ 	bool
+ 	depends on NFS_FS && SUNRPC_DEBUG
+-	select CRC32
+ 	default y
+ 
+ config NFS_DISABLE_UDP_SUPPORT
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 6bcc4b0e00ab72..8b568a514fd1c6 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -895,18 +895,11 @@ u64 nfs_timespec_to_change_attr(const struct timespec64 *ts)
+ 	return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
+ }
+ 
+-#ifdef CONFIG_CRC32
+ static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid)
+ {
+ 	return ~crc32_le(0xFFFFFFFF, &stateid->other[0],
+ 				NFS4_STATEID_OTHER_SIZE);
+ }
+-#else
+-static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
+-{
+-	return 0;
+-}
+-#endif
+ 
+ static inline bool nfs_error_is_fatal(int err)
+ {
+diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
+index 351616c61df541..f9c291e2165cd8 100644
+--- a/fs/nfs/nfs4session.h
++++ b/fs/nfs/nfs4session.h
+@@ -148,16 +148,12 @@ static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
+ 	memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
+ }
+ 
+-#ifdef CONFIG_CRC32
+ /*
+  * nfs_session_id_hash - calculate the crc32 hash for the session id
+  * @session - pointer to session
+  */
+ #define nfs_session_id_hash(sess_id) \
+ 	(~crc32_le(0xFFFFFFFF, &(sess_id)->data[0], sizeof((sess_id)->data)))
+-#else
+-#define nfs_session_id_hash(session) (0)
+-#endif
+ #else /* defined(CONFIG_NFS_V4_1) */
+ 
+ static inline int nfs4_init_session(struct nfs_client *clp)
+diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
+index c0bd1509ccd480..9eb2e795c43c4c 100644
+--- a/fs/nfsd/Kconfig
++++ b/fs/nfsd/Kconfig
+@@ -4,6 +4,7 @@ config NFSD
+ 	depends on INET
+ 	depends on FILE_LOCKING
+ 	depends on FSNOTIFY
++	select CRC32
+ 	select LOCKD
+ 	select SUNRPC
+ 	select EXPORTFS
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 5e81c819c3846a..c50839a015e94f 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -5287,7 +5287,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
+ 	queued = nfsd4_run_cb(&dp->dl_recall);
+ 	WARN_ON_ONCE(!queued);
+ 	if (!queued)
+-		nfs4_put_stid(&dp->dl_stid);
++		refcount_dec(&dp->dl_stid.sc_count);
+ }
+ 
+ /* Called from break_lease() with flc_lock held. */
+diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
+index 876152a91f122f..5103c2f4d2253a 100644
+--- a/fs/nfsd/nfsfh.h
++++ b/fs/nfsd/nfsfh.h
+@@ -267,7 +267,6 @@ static inline bool fh_fsid_match(const struct knfsd_fh *fh1,
+ 	return true;
+ }
+ 
+-#ifdef CONFIG_CRC32
+ /**
+  * knfsd_fh_hash - calculate the crc32 hash for the filehandle
+  * @fh - pointer to filehandle
+@@ -279,12 +278,6 @@ static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
+ {
+ 	return ~crc32_le(0xFFFFFFFF, fh->fh_raw, fh->fh_size);
+ }
+-#else
+-static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
+-{
+-	return 0;
+-}
+-#endif
+ 
+ /**
+  * fh_clear_pre_post_attrs - Reset pre/post attributes
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 844874b4a91a94..500a9634ad5334 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -547,8 +547,6 @@ int ovl_set_metacopy_xattr(struct ovl_fs *ofs, struct dentry *d,
+ bool ovl_is_metacopy_dentry(struct dentry *dentry);
+ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, const struct path *path, int padding);
+ int ovl_ensure_verity_loaded(struct path *path);
+-int ovl_get_verity_xattr(struct ovl_fs *ofs, const struct path *path,
+-			 u8 *digest_buf, int *buf_length);
+ int ovl_validate_verity(struct ovl_fs *ofs,
+ 			struct path *metapath,
+ 			struct path *datapath);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index fe511192f83cb0..87a36c6eea5f36 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1119,6 +1119,11 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	if (ctx->nr == ctx->nr_data) {
++		pr_err("at least one non-data lowerdir is required\n");
++		return ERR_PTR(-EINVAL);
++	}
++
+ 	err = -EINVAL;
+ 	for (i = 0; i < ctx->nr; i++) {
+ 		l = &ctx->lower[i];
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 907af3cbffd1bc..90b7b30abfbd87 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -160,6 +160,8 @@ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+ extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
+ extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
+ 				  struct cifsFileInfo **ret_file);
++extern int cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
++				  struct file *file);
+ extern unsigned int smbCalcSize(void *buf);
+ extern int decode_negTokenInit(unsigned char *security_blob, int length,
+ 			struct TCP_Server_Info *server);
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 3aaf5cdce1b720..d5549e06a533d8 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -316,7 +316,6 @@ cifs_abort_connection(struct TCP_Server_Info *server)
+ 			 server->ssocket->flags);
+ 		sock_release(server->ssocket);
+ 		server->ssocket = NULL;
+-		put_net(cifs_net_ns(server));
+ 	}
+ 	server->sequence_number = 0;
+ 	server->session_estab = false;
+@@ -988,13 +987,9 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
+ 	msleep(125);
+ 	if (cifs_rdma_enabled(server))
+ 		smbd_destroy(server);
+-
+ 	if (server->ssocket) {
+ 		sock_release(server->ssocket);
+ 		server->ssocket = NULL;
+-
+-		/* Release netns reference for the socket. */
+-		put_net(cifs_net_ns(server));
+ 	}
+ 
+ 	if (!list_empty(&server->pending_mid_q)) {
+@@ -1042,7 +1037,6 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
+ 		 */
+ 	}
+ 
+-	/* Release netns reference for this server. */
+ 	put_net(cifs_net_ns(server));
+ 	kfree(server->leaf_fullpath);
+ 	kfree(server->hostname);
+@@ -1718,8 +1712,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ 
+ 	tcp_ses->ops = ctx->ops;
+ 	tcp_ses->vals = ctx->vals;
+-
+-	/* Grab netns reference for this server. */
+ 	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
+ 
+ 	tcp_ses->sign = ctx->sign;
+@@ -1852,7 +1844,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ out_err_crypto_release:
+ 	cifs_crypto_secmech_release(tcp_ses);
+ 
+-	/* Release netns reference for this server. */
+ 	put_net(cifs_net_ns(tcp_ses));
+ 
+ out_err:
+@@ -1861,10 +1852,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ 			cifs_put_tcp_session(tcp_ses->primary_server, false);
+ 		kfree(tcp_ses->hostname);
+ 		kfree(tcp_ses->leaf_fullpath);
+-		if (tcp_ses->ssocket) {
++		if (tcp_ses->ssocket)
+ 			sock_release(tcp_ses->ssocket);
+-			put_net(cifs_net_ns(tcp_ses));
+-		}
+ 		kfree(tcp_ses);
+ 	}
+ 	return ERR_PTR(rc);
+@@ -3132,24 +3121,20 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 		socket = server->ssocket;
+ 	} else {
+ 		struct net *net = cifs_net_ns(server);
++		struct sock *sk;
+ 
+-		rc = sock_create_kern(net, sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket);
++		rc = __sock_create(net, sfamily, SOCK_STREAM,
++				   IPPROTO_TCP, &server->ssocket, 1);
+ 		if (rc < 0) {
+ 			cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
+ 			return rc;
+ 		}
+ 
+-		/*
+-		 * Grab netns reference for the socket.
+-		 *
+-		 * This reference will be released in several situations:
+-		 * - In the failure path before the cifsd thread is started.
+-		 * - In the all place where server->socket is released, it is
+-		 *   also set to NULL.
+-		 * - Ultimately in clean_demultiplex_info(), during the final
+-		 *   teardown.
+-		 */
+-		get_net(net);
++		sk = server->ssocket->sk;
++		__netns_tracker_free(net, &sk->ns_tracker, false);
++		sk->sk_net_refcnt = 1;
++		get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
++		sock_inuse_add(net, 1);
+ 
+ 		/* BB other socket options to set KEEPALIVE, NODELAY? */
+ 		cifs_dbg(FYI, "Socket created\n");
+@@ -3201,7 +3186,6 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 	if (rc < 0) {
+ 		cifs_dbg(FYI, "Error %d connecting to server\n", rc);
+ 		trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
+-		put_net(cifs_net_ns(server));
+ 		sock_release(socket);
+ 		server->ssocket = NULL;
+ 		return rc;
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 313c851fc1c122..0f6fec042f6a03 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -1002,6 +1002,11 @@ int cifs_open(struct inode *inode, struct file *file)
+ 		} else {
+ 			_cifsFileInfo_put(cfile, true, false);
+ 		}
++	} else {
++		/* hard link on the defeered close file */
++		rc = cifs_get_hardlink_path(tcon, inode, file);
++		if (rc)
++			cifs_close_deferred_file(CIFS_I(inode));
+ 	}
+ 
+ 	if (server->oplocks)
+@@ -2066,6 +2071,29 @@ cifs_move_llist(struct list_head *source, struct list_head *dest)
+ 		list_move(li, dest);
+ }
+ 
++int
++cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
++				struct file *file)
++{
++	struct cifsFileInfo *open_file = NULL;
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	int rc = 0;
++
++	spin_lock(&tcon->open_file_lock);
++	spin_lock(&cinode->open_file_lock);
++
++	list_for_each_entry(open_file, &cinode->openFileList, flist) {
++		if (file->f_flags == open_file->f_flags) {
++			rc = -EINVAL;
++			break;
++		}
++	}
++
++	spin_unlock(&cinode->open_file_lock);
++	spin_unlock(&tcon->open_file_lock);
++	return rc;
++}
++
+ void
+ cifs_free_llist(struct list_head *llist)
+ {
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index deacf78b4400cc..e2ba0dadb5fbf7 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -129,14 +129,6 @@ static void free_opinfo(struct oplock_info *opinfo)
+ 	kfree(opinfo);
+ }
+ 
+-static inline void opinfo_free_rcu(struct rcu_head *rcu_head)
+-{
+-	struct oplock_info *opinfo;
+-
+-	opinfo = container_of(rcu_head, struct oplock_info, rcu_head);
+-	free_opinfo(opinfo);
+-}
+-
+ struct oplock_info *opinfo_get(struct ksmbd_file *fp)
+ {
+ 	struct oplock_info *opinfo;
+@@ -157,8 +149,8 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
+ 	if (list_empty(&ci->m_op_list))
+ 		return NULL;
+ 
+-	rcu_read_lock();
+-	opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
++	down_read(&ci->m_lock);
++	opinfo = list_first_entry(&ci->m_op_list, struct oplock_info,
+ 					op_entry);
+ 	if (opinfo) {
+ 		if (opinfo->conn == NULL ||
+@@ -171,8 +163,7 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
+ 			}
+ 		}
+ 	}
+-
+-	rcu_read_unlock();
++	up_read(&ci->m_lock);
+ 
+ 	return opinfo;
+ }
+@@ -185,7 +176,7 @@ void opinfo_put(struct oplock_info *opinfo)
+ 	if (!atomic_dec_and_test(&opinfo->refcount))
+ 		return;
+ 
+-	call_rcu(&opinfo->rcu_head, opinfo_free_rcu);
++	free_opinfo(opinfo);
+ }
+ 
+ static void opinfo_add(struct oplock_info *opinfo)
+@@ -193,7 +184,7 @@ static void opinfo_add(struct oplock_info *opinfo)
+ 	struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+ 
+ 	down_write(&ci->m_lock);
+-	list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
++	list_add(&opinfo->op_entry, &ci->m_op_list);
+ 	up_write(&ci->m_lock);
+ }
+ 
+@@ -207,7 +198,7 @@ static void opinfo_del(struct oplock_info *opinfo)
+ 		write_unlock(&lease_list_lock);
+ 	}
+ 	down_write(&ci->m_lock);
+-	list_del_rcu(&opinfo->op_entry);
++	list_del(&opinfo->op_entry);
+ 	up_write(&ci->m_lock);
+ }
+ 
+@@ -1347,8 +1338,8 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 	ci = fp->f_ci;
+ 	op = opinfo_get(fp);
+ 
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
++	down_read(&ci->m_lock);
++	list_for_each_entry(brk_op, &ci->m_op_list, op_entry) {
+ 		if (brk_op->conn == NULL)
+ 			continue;
+ 
+@@ -1358,7 +1349,6 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 		if (ksmbd_conn_releasing(brk_op->conn))
+ 			continue;
+ 
+-		rcu_read_unlock();
+ 		if (brk_op->is_lease && (brk_op->o_lease->state &
+ 		    (~(SMB2_LEASE_READ_CACHING_LE |
+ 				SMB2_LEASE_HANDLE_CACHING_LE)))) {
+@@ -1388,9 +1378,8 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 		oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE, NULL);
+ next:
+ 		opinfo_put(brk_op);
+-		rcu_read_lock();
+ 	}
+-	rcu_read_unlock();
++	up_read(&ci->m_lock);
+ 
+ 	if (op)
+ 		opinfo_put(op);
+diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h
+index 3f64f07872638e..9a56eaadd0dd8f 100644
+--- a/fs/smb/server/oplock.h
++++ b/fs/smb/server/oplock.h
+@@ -71,7 +71,6 @@ struct oplock_info {
+ 	struct list_head        lease_entry;
+ 	wait_queue_head_t oplock_q; /* Other server threads */
+ 	wait_queue_head_t oplock_brk; /* oplock breaking wait */
+-	struct rcu_head		rcu_head;
+ };
+ 
+ struct lease_break_info {
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 7fea86edc71763..129517a0c5c739 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1599,8 +1599,10 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ 	if (prev_sess_id && prev_sess_id != sess->id)
+ 		destroy_previous_session(conn, sess->user, prev_sess_id);
+ 
+-	if (sess->state == SMB2_SESSION_VALID)
++	if (sess->state == SMB2_SESSION_VALID) {
+ 		ksmbd_free_user(sess->user);
++		sess->user = NULL;
++	}
+ 
+ 	retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
+ 					 out_blob, &out_len);
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 87af57cf35a157..9b3c68014aee28 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -310,7 +310,11 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ 	server_conf.signing = req->signing;
+ 	server_conf.tcp_port = req->tcp_port;
+ 	server_conf.ipc_timeout = req->ipc_timeout * HZ;
+-	server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL;
++	if (check_mul_overflow(req->deadtime, SMB_ECHO_INTERVAL,
++					&server_conf.deadtime)) {
++		ret = -EINVAL;
++		goto out;
++	}
+ 	server_conf.share_fake_fscaps = req->share_fake_fscaps;
+ 	ksmbd_init_domain(req->sub_auth);
+ 
+@@ -336,6 +340,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ 	ret |= ksmbd_set_work_group(req->work_group);
+ 	ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
+ 					req->ifc_list_sz);
++out:
+ 	if (ret) {
+ 		pr_err("Server configuration error: %s %s %s\n",
+ 		       req->netbios_name, req->server_string,
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index ee825971abd9ab..8fd070e31fa7dd 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -496,7 +496,8 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 	int err = 0;
+ 
+ 	if (work->conn->connection_type) {
+-		if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) {
++		if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) ||
++		    S_ISDIR(file_inode(fp->filp)->i_mode)) {
+ 			pr_err("no right to write(%pD)\n", fp->filp);
+ 			err = -EACCES;
+ 			goto out;
+diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
+index 8e7af9a03b41dd..e721148c95d07d 100644
+--- a/include/linux/backing-dev.h
++++ b/include/linux/backing-dev.h
+@@ -249,6 +249,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
+ {
+ #ifdef CONFIG_LOCKDEP
+ 	WARN_ON_ONCE(debug_locks &&
++		     (inode->i_sb->s_iflags & SB_I_CGROUPWB) &&
+ 		     (!lockdep_is_held(&inode->i_lock) &&
+ 		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
+ 		      !lockdep_is_held(&inode->i_wb->list_lock)));
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index 7b5e5388c3801a..318245b4e38fb3 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -230,62 +230,61 @@ static inline unsigned short req_get_ioprio(struct request *req)
+ #define rq_dma_dir(rq) \
+ 	(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+ 
+-#define rq_list_add(listptr, rq)	do {		\
+-	(rq)->rq_next = *(listptr);			\
+-	*(listptr) = rq;				\
+-} while (0)
+-
+-#define rq_list_add_tail(lastpptr, rq)	do {		\
+-	(rq)->rq_next = NULL;				\
+-	**(lastpptr) = rq;				\
+-	*(lastpptr) = &rq->rq_next;			\
+-} while (0)
+-
+-#define rq_list_pop(listptr)				\
+-({							\
+-	struct request *__req = NULL;			\
+-	if ((listptr) && *(listptr))	{		\
+-		__req = *(listptr);			\
+-		*(listptr) = __req->rq_next;		\
+-	}						\
+-	__req;						\
+-})
++static inline int rq_list_empty(const struct rq_list *rl)
++{
++	return rl->head == NULL;
++}
+ 
+-#define rq_list_peek(listptr)				\
+-({							\
+-	struct request *__req = NULL;			\
+-	if ((listptr) && *(listptr))			\
+-		__req = *(listptr);			\
+-	__req;						\
+-})
++static inline void rq_list_init(struct rq_list *rl)
++{
++	rl->head = NULL;
++	rl->tail = NULL;
++}
+ 
+-#define rq_list_for_each(listptr, pos)			\
+-	for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
++static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
++{
++	rq->rq_next = NULL;
++	if (rl->tail)
++		rl->tail->rq_next = rq;
++	else
++		rl->head = rq;
++	rl->tail = rq;
++}
+ 
+-#define rq_list_for_each_safe(listptr, pos, nxt)			\
+-	for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos);	\
+-		pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
++static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
++{
++	rq->rq_next = rl->head;
++	rl->head = rq;
++	if (!rl->tail)
++		rl->tail = rq;
++}
+ 
+-#define rq_list_next(rq)	(rq)->rq_next
+-#define rq_list_empty(list)	((list) == (struct request *) NULL)
++static inline struct request *rq_list_pop(struct rq_list *rl)
++{
++	struct request *rq = rl->head;
+ 
+-/**
+- * rq_list_move() - move a struct request from one list to another
+- * @src: The source list @rq is currently in
+- * @dst: The destination list that @rq will be appended to
+- * @rq: The request to move
+- * @prev: The request preceding @rq in @src (NULL if @rq is the head)
+- */
+-static inline void rq_list_move(struct request **src, struct request **dst,
+-				struct request *rq, struct request *prev)
++	if (rq) {
++		rl->head = rl->head->rq_next;
++		if (!rl->head)
++			rl->tail = NULL;
++		rq->rq_next = NULL;
++	}
++
++	return rq;
++}
++
++static inline struct request *rq_list_peek(struct rq_list *rl)
+ {
+-	if (prev)
+-		prev->rq_next = rq->rq_next;
+-	else
+-		*src = rq->rq_next;
+-	rq_list_add(dst, rq);
++	return rl->head;
+ }
+ 
++#define rq_list_for_each(rl, pos)					\
++	for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
++
++#define rq_list_for_each_safe(rl, pos, nxt)				\
++	for (pos = rq_list_peek((rl)), nxt = pos->rq_next;		\
++		pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
++
+ /**
+  * enum blk_eh_timer_return - How the timeout handler should proceed
+  * @BLK_EH_DONE: The block driver completed the command or will complete it at
+@@ -577,7 +576,7 @@ struct blk_mq_ops {
+ 	 * empty the @rqlist completely, then the rest will be queued
+ 	 * individually by the block layer upon return.
+ 	 */
+-	void (*queue_rqs)(struct request **rqlist);
++	void (*queue_rqs)(struct rq_list *rqlist);
+ 
+ 	/**
+ 	 * @get_budget: Reserve budget before queue request, once .queue_rq is
+@@ -910,7 +909,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
+ 	else if (iob->complete != complete)
+ 		return false;
+ 	iob->need_ts |= blk_mq_need_time_stamp(req);
+-	rq_list_add(&iob->req_list, req);
++	rq_list_add_head(&iob->req_list, req);
+ 	return true;
+ }
+ 
+@@ -947,6 +946,8 @@ void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
+ void blk_freeze_queue_start_non_owner(struct request_queue *q);
+ 
+ void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
++void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
++			  struct device *dev, unsigned int offset);
+ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
+ 
+ void blk_mq_quiesce_queue_nowait(struct request_queue *q);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 8f37c5dd52b215..b94dc4b796f5a1 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -995,6 +995,11 @@ extern void blk_put_queue(struct request_queue *);
+ 
+ void blk_mark_disk_dead(struct gendisk *disk);
+ 
++struct rq_list {
++	struct request *head;
++	struct request *tail;
++};
++
+ #ifdef CONFIG_BLOCK
+ /*
+  * blk_plug permits building a queue of related requests by holding the I/O
+@@ -1008,10 +1013,10 @@ void blk_mark_disk_dead(struct gendisk *disk);
+  * blk_flush_plug() is called.
+  */
+ struct blk_plug {
+-	struct request *mq_list; /* blk-mq requests */
++	struct rq_list mq_list; /* blk-mq requests */
+ 
+ 	/* if ios_left is > 1, we can batch tag/rq allocations */
+-	struct request *cached_rq;
++	struct rq_list cached_rqs;
+ 	u64 cur_ktime;
+ 	unsigned short nr_ios;
+ 
+@@ -1660,7 +1665,7 @@ int bdev_thaw(struct block_device *bdev);
+ void bdev_fput(struct file *bdev_file);
+ 
+ struct io_comp_batch {
+-	struct request *req_list;
++	struct rq_list req_list;
+ 	bool need_ts;
+ 	void (*complete)(struct io_comp_batch *);
+ };
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index a7af13f550e0d4..1150a595aa54c2 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1499,6 +1499,7 @@ struct bpf_prog_aux {
+ 	bool exception_cb;
+ 	bool exception_boundary;
+ 	bool is_extended; /* true if extended by freplace program */
++	bool changes_pkt_data;
+ 	u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
+ 	struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
+ 	struct bpf_arena *arena;
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 4513372c5bc8e0..50eeb5b86ed70b 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -668,6 +668,7 @@ struct bpf_subprog_info {
+ 	bool args_cached: 1;
+ 	/* true if bpf_fastcall stack region is used by functions that can't be inlined */
+ 	bool keep_fastcall_stack: 1;
++	bool changes_pkt_data: 1;
+ 
+ 	u8 arg_cnt;
+ 	struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
+diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
+index cdc4757217f9bb..b18658bce2c381 100644
+--- a/include/linux/device/bus.h
++++ b/include/linux/device/bus.h
+@@ -48,6 +48,7 @@ struct fwnode_handle;
+  *		will never get called until they do.
+  * @remove:	Called when a device removed from this bus.
+  * @shutdown:	Called at shut-down time to quiesce the device.
++ * @irq_get_affinity:	Get IRQ affinity mask for the device on this bus.
+  *
+  * @online:	Called to put the device back online (after offlining it).
+  * @offline:	Called to put the device offline for hot-removal. May fail.
+@@ -87,6 +88,8 @@ struct bus_type {
+ 	void (*sync_state)(struct device *dev);
+ 	void (*remove)(struct device *dev);
+ 	void (*shutdown)(struct device *dev);
++	const struct cpumask *(*irq_get_affinity)(struct device *dev,
++			unsigned int irq_vec);
+ 
+ 	int (*online)(struct device *dev);
+ 	int (*offline)(struct device *dev);
+diff --git a/include/linux/nfs.h b/include/linux/nfs.h
+index 9ad727ddfedb34..0906a0b40c6aa5 100644
+--- a/include/linux/nfs.h
++++ b/include/linux/nfs.h
+@@ -55,7 +55,6 @@ enum nfs3_stable_how {
+ 	NFS_INVALID_STABLE_HOW = -1
+ };
+ 
+-#ifdef CONFIG_CRC32
+ /**
+  * nfs_fhandle_hash - calculate the crc32 hash for the filehandle
+  * @fh - pointer to filehandle
+@@ -67,10 +66,4 @@ static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
+ {
+ 	return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
+ }
+-#else /* CONFIG_CRC32 */
+-static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
+-{
+-	return 0;
+-}
+-#endif /* CONFIG_CRC32 */
+ #endif /* _LINUX_NFS_H */
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 6abc495602a4e9..a1ed64760eba2d 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -1190,12 +1190,12 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
+ 			poll_flags |= BLK_POLL_ONESHOT;
+ 
+ 		/* iopoll may have completed current req */
+-		if (!rq_list_empty(iob.req_list) ||
++		if (!rq_list_empty(&iob.req_list) ||
+ 		    READ_ONCE(req->iopoll_completed))
+ 			break;
+ 	}
+ 
+-	if (!rq_list_empty(iob.req_list))
++	if (!rq_list_empty(&iob.req_list))
+ 		iob.complete(&iob);
+ 	else if (!pos)
+ 		return 0;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 9000806ee3bae8..d2ef289993f20d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2528,16 +2528,36 @@ static int cmp_subprogs(const void *a, const void *b)
+ 	       ((struct bpf_subprog_info *)b)->start;
+ }
+ 
++/* Find subprogram that contains instruction at 'off' */
++static struct bpf_subprog_info *find_containing_subprog(struct bpf_verifier_env *env, int off)
++{
++	struct bpf_subprog_info *vals = env->subprog_info;
++	int l, r, m;
++
++	if (off >= env->prog->len || off < 0 || env->subprog_cnt == 0)
++		return NULL;
++
++	l = 0;
++	r = env->subprog_cnt - 1;
++	while (l < r) {
++		m = l + (r - l + 1) / 2;
++		if (vals[m].start <= off)
++			l = m;
++		else
++			r = m - 1;
++	}
++	return &vals[l];
++}
++
++/* Find subprogram that starts exactly at 'off' */
+ static int find_subprog(struct bpf_verifier_env *env, int off)
+ {
+ 	struct bpf_subprog_info *p;
+ 
+-	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
+-		    sizeof(env->subprog_info[0]), cmp_subprogs);
+-	if (!p)
++	p = find_containing_subprog(env, off);
++	if (!p || p->start != off)
+ 		return -ENOENT;
+ 	return p - env->subprog_info;
+-
+ }
+ 
+ static int add_subprog(struct bpf_verifier_env *env, int off)
+@@ -9811,6 +9831,8 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 
+ 		verbose(env, "Func#%d ('%s') is global and assumed valid.\n",
+ 			subprog, sub_name);
++		if (env->subprog_info[subprog].changes_pkt_data)
++			clear_all_pkt_pointers(env);
+ 		/* mark global subprog for verifying after main prog */
+ 		subprog_aux(env, subprog)->called = true;
+ 		clear_caller_saved_regs(env, caller->regs);
+@@ -16001,6 +16023,29 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
+ 	return 0;
+ }
+ 
++static void mark_subprog_changes_pkt_data(struct bpf_verifier_env *env, int off)
++{
++	struct bpf_subprog_info *subprog;
++
++	subprog = find_containing_subprog(env, off);
++	subprog->changes_pkt_data = true;
++}
++
++/* 't' is an index of a call-site.
++ * 'w' is a callee entry point.
++ * Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED.
++ * Rely on DFS traversal order and absence of recursive calls to guarantee that
++ * callee's change_pkt_data marks would be correct at that moment.
++ */
++static void merge_callee_effects(struct bpf_verifier_env *env, int t, int w)
++{
++	struct bpf_subprog_info *caller, *callee;
++
++	caller = find_containing_subprog(env, t);
++	callee = find_containing_subprog(env, w);
++	caller->changes_pkt_data |= callee->changes_pkt_data;
++}
++
+ /* non-recursive DFS pseudo code
+  * 1  procedure DFS-iterative(G,v):
+  * 2      label v as discovered
+@@ -16134,6 +16179,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ 				bool visit_callee)
+ {
+ 	int ret, insn_sz;
++	int w;
+ 
+ 	insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
+ 	ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
+@@ -16145,8 +16191,10 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ 	mark_jmp_point(env, t + insn_sz);
+ 
+ 	if (visit_callee) {
++		w = t + insns[t].imm + 1;
+ 		mark_prune_point(env, t);
+-		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
++		merge_callee_effects(env, t, w);
++		ret = push_insn(t, w, BRANCH, env);
+ 	}
+ 	return ret;
+ }
+@@ -16466,6 +16514,8 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
+ 			mark_prune_point(env, t);
+ 			mark_jmp_point(env, t);
+ 		}
++		if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm))
++			mark_subprog_changes_pkt_data(env, t);
+ 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+ 			struct bpf_kfunc_call_arg_meta meta;
+ 
+@@ -16600,6 +16650,7 @@ static int check_cfg(struct bpf_verifier_env *env)
+ 		}
+ 	}
+ 	ret = 0; /* cfg looks good */
++	env->prog->aux->changes_pkt_data = env->subprog_info[0].changes_pkt_data;
+ 
+ err_free:
+ 	kvfree(insn_state);
+@@ -20102,6 +20153,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
+ 		func[i]->aux->num_exentries = num_exentries;
+ 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
+ 		func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb;
++		func[i]->aux->changes_pkt_data = env->subprog_info[i].changes_pkt_data;
+ 		if (!i)
+ 			func[i]->aux->exception_boundary = env->seen_exception;
+ 		func[i] = bpf_int_jit_compile(func[i]);
+@@ -21938,6 +21990,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
+ 	}
+ 	if (tgt_prog) {
+ 		struct bpf_prog_aux *aux = tgt_prog->aux;
++		bool tgt_changes_pkt_data;
+ 
+ 		if (bpf_prog_is_dev_bound(prog->aux) &&
+ 		    !bpf_prog_dev_bound_match(prog, tgt_prog)) {
+@@ -21972,6 +22025,14 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
+ 					"Extension programs should be JITed\n");
+ 				return -EINVAL;
+ 			}
++			tgt_changes_pkt_data = aux->func
++					       ? aux->func[subprog]->aux->changes_pkt_data
++					       : aux->changes_pkt_data;
++			if (prog->aux->changes_pkt_data && !tgt_changes_pkt_data) {
++				bpf_log(log,
++					"Extension program changes packet data, while original does not\n");
++				return -EINVAL;
++			}
+ 		}
+ 		if (!tgt_prog->jited) {
+ 			bpf_log(log, "Can attach to only JITed progs\n");
+@@ -22437,10 +22498,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
+ 	if (ret < 0)
+ 		goto skip_full_check;
+ 
+-	ret = check_attach_btf_id(env);
+-	if (ret)
+-		goto skip_full_check;
+-
+ 	ret = resolve_pseudo_ldimm64(env);
+ 	if (ret < 0)
+ 		goto skip_full_check;
+@@ -22455,6 +22512,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
+ 	if (ret < 0)
+ 		goto skip_full_check;
+ 
++	ret = check_attach_btf_id(env);
++	if (ret)
++		goto skip_full_check;
++
+ 	ret = mark_fastcall_patterns(env);
+ 	if (ret < 0)
+ 		goto skip_full_check;
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index e51d5ce730be15..e5ced97d9681c1 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -81,9 +81,20 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+ 	if (!cpufreq_this_cpu_can_update(sg_policy->policy))
+ 		return false;
+ 
+-	if (unlikely(sg_policy->limits_changed)) {
+-		sg_policy->limits_changed = false;
+-		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
++	if (unlikely(READ_ONCE(sg_policy->limits_changed))) {
++		WRITE_ONCE(sg_policy->limits_changed, false);
++		sg_policy->need_freq_update = true;
++
++		/*
++		 * The above limits_changed update must occur before the reads
++		 * of policy limits in cpufreq_driver_resolve_freq() or a policy
++		 * limits update might be missed, so use a memory barrier to
++		 * ensure it.
++		 *
++		 * This pairs with the write memory barrier in sugov_limits().
++		 */
++		smp_mb();
++
+ 		return true;
+ 	}
+ 
+@@ -95,10 +106,22 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
+ 				   unsigned int next_freq)
+ {
+-	if (sg_policy->need_freq_update)
++	if (sg_policy->need_freq_update) {
+ 		sg_policy->need_freq_update = false;
+-	else if (sg_policy->next_freq == next_freq)
++		/*
++		 * The policy limits have changed, but if the return value of
++		 * cpufreq_driver_resolve_freq() after applying the new limits
++		 * is still equal to the previously selected frequency, the
++		 * driver callback need not be invoked unless the driver
++		 * specifically wants that to happen on every update of the
++		 * policy limits.
++		 */
++		if (sg_policy->next_freq == next_freq &&
++		    !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
++			return false;
++	} else if (sg_policy->next_freq == next_freq) {
+ 		return false;
++	}
+ 
+ 	sg_policy->next_freq = next_freq;
+ 	sg_policy->last_freq_update_time = time;
+@@ -365,7 +388,7 @@ static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
+-		sg_cpu->sg_policy->limits_changed = true;
++		WRITE_ONCE(sg_cpu->sg_policy->limits_changed, true);
+ }
+ 
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -888,7 +911,16 @@ static void sugov_limits(struct cpufreq_policy *policy)
+ 		mutex_unlock(&sg_policy->work_lock);
+ 	}
+ 
+-	sg_policy->limits_changed = true;
++	/*
++	 * The limits_changed update below must take place before the updates
++	 * of policy limits in cpufreq_set_policy() or a policy limits update
++	 * might be missed, so use a memory barrier to ensure it.
++	 *
++	 * This pairs with the memory barrier in sugov_should_update_freq().
++	 */
++	smp_wmb();
++
++	WRITE_ONCE(sg_policy->limits_changed, true);
+ }
+ 
+ struct cpufreq_governor schedutil_gov = {
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 90b59c627bb8e7..e67d67f7b90650 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5944,9 +5944,10 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+ 
+ 	/* Make a copy hash to place the new and the old entries in */
+ 	size = hash->count + direct_functions->count;
+-	if (size > 32)
+-		size = 32;
+-	new_hash = alloc_ftrace_hash(fls(size));
++	size = fls(size);
++	if (size > FTRACE_HASH_MAX_BITS)
++		size = FTRACE_HASH_MAX_BITS;
++	new_hash = alloc_ftrace_hash(size);
+ 	if (!new_hash)
+ 		goto out_unlock;
+ 
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 0c611b281a5b5f..f50c2ad43f3d82 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -808,7 +808,7 @@ static __always_inline char *test_string(char *str)
+ 	kstr = ubuf->buffer;
+ 
+ 	/* For safety, do not trust the string pointer */
+-	if (!strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE))
++	if (strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE) < 0)
+ 		return NULL;
+ 	return kstr;
+ }
+@@ -827,7 +827,7 @@ static __always_inline char *test_ustring(char *str)
+ 
+ 	/* user space address? */
+ 	ustr = (char __user *)str;
+-	if (!strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE))
++	if (strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE) < 0)
+ 		return NULL;
+ 
+ 	return kstr;
+diff --git a/lib/string.c b/lib/string.c
+index 76327b51e36f25..e657809fa71892 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -113,6 +113,7 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
+ 	if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
+ 		return -E2BIG;
+ 
++#ifndef CONFIG_DCACHE_WORD_ACCESS
+ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ 	/*
+ 	 * If src is unaligned, don't cross a page boundary,
+@@ -127,12 +128,14 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
+ 	/* If src or dest is unaligned, don't do word-at-a-time. */
+ 	if (((long) dest | (long) src) & (sizeof(long) - 1))
+ 		max = 0;
++#endif
+ #endif
+ 
+ 	/*
+-	 * read_word_at_a_time() below may read uninitialized bytes after the
+-	 * trailing zero and use them in comparisons. Disable this optimization
+-	 * under KMSAN to prevent false positive reports.
++	 * load_unaligned_zeropad() or read_word_at_a_time() below may read
++	 * uninitialized bytes after the trailing zero and use them in
++	 * comparisons. Disable this optimization under KMSAN to prevent
++	 * false positive reports.
+ 	 */
+ 	if (IS_ENABLED(CONFIG_KMSAN))
+ 		max = 0;
+@@ -140,7 +143,11 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
+ 	while (max >= sizeof(unsigned long)) {
+ 		unsigned long c, data;
+ 
++#ifdef CONFIG_DCACHE_WORD_ACCESS
++		c = load_unaligned_zeropad(src+res);
++#else
+ 		c = read_word_at_a_time(src+res);
++#endif
+ 		if (has_zero(c, &data, &constants)) {
+ 			data = prep_zero_mask(c, data, &constants);
+ 			data = create_zero_mask(data);
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 77dbb9022b47f0..eb5474dea04d9d 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -980,13 +980,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
+ 		}
+ 
+ 		if (PageHuge(page)) {
++			const unsigned int order = compound_order(page);
+ 			/*
+ 			 * skip hugetlbfs if we are not compacting for pages
+ 			 * bigger than its order. THPs and other compound pages
+ 			 * are handled below.
+ 			 */
+ 			if (!cc->alloc_contig) {
+-				const unsigned int order = compound_order(page);
+ 
+ 				if (order <= MAX_PAGE_ORDER) {
+ 					low_pfn += (1UL << order) - 1;
+@@ -1010,8 +1010,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
+ 				 /* Do not report -EBUSY down the chain */
+ 				if (ret == -EBUSY)
+ 					ret = 0;
+-				low_pfn += compound_nr(page) - 1;
+-				nr_scanned += compound_nr(page) - 1;
++				low_pfn += (1UL << order) - 1;
++				nr_scanned += (1UL << order) - 1;
+ 				goto isolate_fail;
+ 			}
+ 
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 3c37ad6c598bbb..fa18e71f9c8895 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2222,6 +2222,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
+ 			*start = folio->index + nr;
+ 			goto out;
+ 		}
++		xas_advance(&xas, folio_next_index(folio) - 1);
+ 		continue;
+ put_folio:
+ 		folio_put(folio);
+diff --git a/mm/gup.c b/mm/gup.c
+index d27e7c9e2596ce..90866b827b60f4 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2213,8 +2213,8 @@ size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
+ 	} while (start != end);
+ 	mmap_read_unlock(mm);
+ 
+-	if (size > (unsigned long)uaddr - start)
+-		return size - ((unsigned long)uaddr - start);
++	if (size > start - (unsigned long)uaddr)
++		return size - (start - (unsigned long)uaddr);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(fault_in_safe_writeable);
+diff --git a/mm/memory.c b/mm/memory.c
+index 99dceaf6a10579..b6daa0e673a549 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2811,11 +2811,11 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ 	if (fn) {
+ 		do {
+ 			if (create || !pte_none(ptep_get(pte))) {
+-				err = fn(pte++, addr, data);
++				err = fn(pte, addr, data);
+ 				if (err)
+ 					break;
+ 			}
+-		} while (addr += PAGE_SIZE, addr != end);
++		} while (pte++, addr += PAGE_SIZE, addr != end);
+ 	}
+ 	*mask |= PGTBL_PTE_MODIFIED;
+ 
+diff --git a/mm/slub.c b/mm/slub.c
+index b9447a955f6112..c26d9cd107ccbc 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1960,6 +1960,11 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
+ #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
+ 				__GFP_ACCOUNT | __GFP_NOFAIL)
+ 
++static inline void init_slab_obj_exts(struct slab *slab)
++{
++	slab->obj_exts = 0;
++}
++
+ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
+ 		        gfp_t gfp, bool new_slab)
+ {
+@@ -2044,6 +2049,10 @@ static inline bool need_slab_obj_ext(void)
+ 
+ #else /* CONFIG_SLAB_OBJ_EXT */
+ 
++static inline void init_slab_obj_exts(struct slab *slab)
++{
++}
++
+ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
+ 			       gfp_t gfp, bool new_slab)
+ {
+@@ -2613,6 +2622,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+ 	slab->objects = oo_objects(oo);
+ 	slab->inuse = 0;
+ 	slab->frozen = 0;
++	init_slab_obj_exts(slab);
+ 
+ 	account_slab(slab, oo_order(oo), s, flags);
+ 
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 080a00d916f6b6..748b52ce856755 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -1873,6 +1873,14 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
+ 					     unsigned long end)
+ {
+ 	struct vm_area_struct *ret;
++	bool give_up_on_oom = false;
++
++	/*
++	 * If we are modifying only and not splitting, just give up on the merge
++	 * if OOM prevents us from merging successfully.
++	 */
++	if (start == vma->vm_start && end == vma->vm_end)
++		give_up_on_oom = true;
+ 
+ 	/* Reset ptes for the whole vma range if wr-protected */
+ 	if (userfaultfd_wp(vma))
+@@ -1880,7 +1888,7 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
+ 
+ 	ret = vma_modify_flags_uffd(vmi, prev, vma, start, end,
+ 				    vma->vm_flags & ~__VM_UFFD_FLAGS,
+-				    NULL_VM_UFFD_CTX);
++				    NULL_VM_UFFD_CTX, give_up_on_oom);
+ 
+ 	/*
+ 	 * In the vma_merge() successful mprotect-like case 8:
+@@ -1931,7 +1939,8 @@ int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
+ 		new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
+ 		vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
+ 					    new_flags,
+-					    (struct vm_userfaultfd_ctx){ctx});
++					    (struct vm_userfaultfd_ctx){ctx},
++					    /* give_up_on_oom = */false);
+ 		if (IS_ERR(vma))
+ 			return PTR_ERR(vma);
+ 
+diff --git a/mm/vma.c b/mm/vma.c
+index c9ddc06b672a52..9b4517944901dd 100644
+--- a/mm/vma.c
++++ b/mm/vma.c
+@@ -846,7 +846,13 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *
+ 		if (anon_dup)
+ 			unlink_anon_vmas(anon_dup);
+ 
+-		vmg->state = VMA_MERGE_ERROR_NOMEM;
++		/*
++		 * We've cleaned up any cloned anon_vma's, no VMAs have been
++		 * modified, no harm no foul if the user requests that we not
++		 * report this and just give up, leaving the VMAs unmerged.
++		 */
++		if (!vmg->give_up_on_oom)
++			vmg->state = VMA_MERGE_ERROR_NOMEM;
+ 		return NULL;
+ 	}
+ 
+@@ -859,7 +865,15 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *
+ abort:
+ 	vma_iter_set(vmg->vmi, start);
+ 	vma_iter_load(vmg->vmi);
+-	vmg->state = VMA_MERGE_ERROR_NOMEM;
++
++	/*
++	 * This means we have failed to clone anon_vma's correctly, but no
++	 * actual changes to VMAs have occurred, so no harm no foul - if the
++	 * user doesn't want this reported and instead just wants to give up on
++	 * the merge, allow it.
++	 */
++	if (!vmg->give_up_on_oom)
++		vmg->state = VMA_MERGE_ERROR_NOMEM;
+ 	return NULL;
+ }
+ 
+@@ -1033,9 +1047,15 @@ int vma_expand(struct vma_merge_struct *vmg)
+ 	return 0;
+ 
+ nomem:
+-	vmg->state = VMA_MERGE_ERROR_NOMEM;
+ 	if (anon_dup)
+ 		unlink_anon_vmas(anon_dup);
++	/*
++	 * If the user requests that we just give upon OOM, we are safe to do so
++	 * here, as commit merge provides this contract to us. Nothing has been
++	 * changed - no harm no foul, just don't report it.
++	 */
++	if (!vmg->give_up_on_oom)
++		vmg->state = VMA_MERGE_ERROR_NOMEM;
+ 	return -ENOMEM;
+ }
+ 
+@@ -1428,6 +1448,13 @@ static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
+ 	if (vmg_nomem(vmg))
+ 		return ERR_PTR(-ENOMEM);
+ 
++	/*
++	 * Split can fail for reasons other than OOM, so if the user requests
++	 * this it's probably a mistake.
++	 */
++	VM_WARN_ON(vmg->give_up_on_oom &&
++		   (vma->vm_start != start || vma->vm_end != end));
++
+ 	/* Split any preceding portion of the VMA. */
+ 	if (vma->vm_start < start) {
+ 		int err = split_vma(vmg->vmi, vma, start, 1);
+@@ -1496,12 +1523,15 @@ struct vm_area_struct
+ 		       struct vm_area_struct *vma,
+ 		       unsigned long start, unsigned long end,
+ 		       unsigned long new_flags,
+-		       struct vm_userfaultfd_ctx new_ctx)
++		       struct vm_userfaultfd_ctx new_ctx,
++		       bool give_up_on_oom)
+ {
+ 	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+ 
+ 	vmg.flags = new_flags;
+ 	vmg.uffd_ctx = new_ctx;
++	if (give_up_on_oom)
++		vmg.give_up_on_oom = true;
+ 
+ 	return vma_modify(&vmg);
+ }
+diff --git a/mm/vma.h b/mm/vma.h
+index d58068c0ff2eaa..729fe3741e897b 100644
+--- a/mm/vma.h
++++ b/mm/vma.h
+@@ -87,6 +87,12 @@ struct vma_merge_struct {
+ 	struct anon_vma_name *anon_name;
+ 	enum vma_merge_flags merge_flags;
+ 	enum vma_merge_state state;
++
++	/*
++	 * If a merge is possible, but an OOM error occurs, give up and don't
++	 * execute the merge, returning NULL.
++	 */
++	bool give_up_on_oom :1;
+ };
+ 
+ static inline bool vmg_nomem(struct vma_merge_struct *vmg)
+@@ -303,7 +309,8 @@ struct vm_area_struct
+ 		       struct vm_area_struct *vma,
+ 		       unsigned long start, unsigned long end,
+ 		       unsigned long new_flags,
+-		       struct vm_userfaultfd_ctx new_ctx);
++		       struct vm_userfaultfd_ctx new_ctx,
++		       bool give_up_on_oom);
+ 
+ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index d64117be62cc44..96ad1b75d1c4d4 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6150,11 +6150,12 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ 	 * event or send an immediate device found event if the data
+ 	 * should not be stored for later.
+ 	 */
+-	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
++	if (!has_pending_adv_report(hdev)) {
+ 		/* If the report will trigger a SCAN_REQ store it for
+ 		 * later merging.
+ 		 */
+-		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
++		if (!ext_adv && (type == LE_ADV_IND ||
++				 type == LE_ADV_SCAN_IND)) {
+ 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
+ 						 rssi, flags, data, len);
+ 			return;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index c27ea70f71e1e1..a55388fbf07c84 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3956,7 +3956,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ 
+ 	/* Check if the ACL is secure enough (if not SDP) */
+ 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
+-	    !hci_conn_check_link_mode(conn->hcon)) {
++	    (!hci_conn_check_link_mode(conn->hcon) ||
++	    !l2cap_check_enc_key_size(conn->hcon))) {
+ 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
+ 		result = L2CAP_CR_SEC_BLOCK;
+ 		goto response;
+@@ -7503,8 +7504,24 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 		if (skb->len > len) {
+ 			BT_ERR("Frame is too long (len %u, expected len %d)",
+ 			       skb->len, len);
++			/* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
++			 * (Multiple Signaling Command in one PDU, Data
++			 * Truncated, BR/EDR) send a C-frame to the IUT with
++			 * PDU Length set to 8 and Channel ID set to the
++			 * correct signaling channel for the logical link.
++			 * The Information payload contains one L2CAP_ECHO_REQ
++			 * packet with Data Length set to 0 with 0 octets of
++			 * echo data and one invalid command packet due to
++			 * data truncated in PDU but present in HCI packet.
++			 *
++			 * Shorter the socket buffer to the PDU length to
++			 * allow to process valid commands from the PDU before
++			 * setting the socket unreliable.
++			 */
++			skb->len = len;
++			l2cap_recv_frame(conn, skb);
+ 			l2cap_conn_unreliable(conn, ECOMM);
+-			goto drop;
++			goto unlock;
+ 		}
+ 
+ 		/* Append fragment into frame (with header) */
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 89f51ea4cabece..f2efb58d152bc2 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -715,8 +715,8 @@ static int br_vlan_add_existing(struct net_bridge *br,
+ 				u16 flags, bool *changed,
+ 				struct netlink_ext_ack *extack)
+ {
+-	bool would_change = __vlan_flags_would_change(vlan, flags);
+ 	bool becomes_brentry = false;
++	bool would_change = false;
+ 	int err;
+ 
+ 	if (!br_vlan_is_brentry(vlan)) {
+@@ -725,6 +725,8 @@ static int br_vlan_add_existing(struct net_bridge *br,
+ 			return -EINVAL;
+ 
+ 		becomes_brentry = true;
++	} else {
++		would_change = __vlan_flags_would_change(vlan, flags);
+ 	}
+ 
+ 	/* Master VLANs that aren't brentries weren't notified before,
+diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
+index 1664547deffd07..ac3a252969cb61 100644
+--- a/net/dsa/dsa.c
++++ b/net/dsa/dsa.c
+@@ -862,6 +862,16 @@ static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
+ 	kfree(dst->lags);
+ }
+ 
++static void dsa_tree_teardown_routing_table(struct dsa_switch_tree *dst)
++{
++	struct dsa_link *dl, *next;
++
++	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
++		list_del(&dl->list);
++		kfree(dl);
++	}
++}
++
+ static int dsa_tree_setup(struct dsa_switch_tree *dst)
+ {
+ 	bool complete;
+@@ -879,7 +889,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
+ 
+ 	err = dsa_tree_setup_cpu_ports(dst);
+ 	if (err)
+-		return err;
++		goto teardown_rtable;
+ 
+ 	err = dsa_tree_setup_switches(dst);
+ 	if (err)
+@@ -911,14 +921,14 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
+ 	dsa_tree_teardown_switches(dst);
+ teardown_cpu_ports:
+ 	dsa_tree_teardown_cpu_ports(dst);
++teardown_rtable:
++	dsa_tree_teardown_routing_table(dst);
+ 
+ 	return err;
+ }
+ 
+ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
+ {
+-	struct dsa_link *dl, *next;
+-
+ 	if (!dst->setup)
+ 		return;
+ 
+@@ -932,10 +942,7 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
+ 
+ 	dsa_tree_teardown_cpu_ports(dst);
+ 
+-	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
+-		list_del(&dl->list);
+-		kfree(dl);
+-	}
++	dsa_tree_teardown_routing_table(dst);
+ 
+ 	pr_info("DSA: tree %d torn down\n", dst->index);
+ 
+@@ -1478,12 +1485,44 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
+ 
+ static void dsa_switch_release_ports(struct dsa_switch *ds)
+ {
++	struct dsa_mac_addr *a, *tmp;
+ 	struct dsa_port *dp, *next;
++	struct dsa_vlan *v, *n;
+ 
+ 	dsa_switch_for_each_port_safe(dp, next, ds) {
+-		WARN_ON(!list_empty(&dp->fdbs));
+-		WARN_ON(!list_empty(&dp->mdbs));
+-		WARN_ON(!list_empty(&dp->vlans));
++		/* These are either entries that upper layers lost track of
++		 * (probably due to bugs), or installed through interfaces
++		 * where one does not necessarily have to remove them, like
++		 * ndo_dflt_fdb_add().
++		 */
++		list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
++			dev_info(ds->dev,
++				 "Cleaning up unicast address %pM vid %u from port %d\n",
++				 a->addr, a->vid, dp->index);
++			list_del(&a->list);
++			kfree(a);
++		}
++
++		list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
++			dev_info(ds->dev,
++				 "Cleaning up multicast address %pM vid %u from port %d\n",
++				 a->addr, a->vid, dp->index);
++			list_del(&a->list);
++			kfree(a);
++		}
++
++		/* These are entries that upper layers have lost track of,
++		 * probably due to bugs, but also due to dsa_port_do_vlan_del()
++		 * having failed and the VLAN entry still lingering on.
++		 */
++		list_for_each_entry_safe(v, n, &dp->vlans, list) {
++			dev_info(ds->dev,
++				 "Cleaning up vid %u from port %d\n",
++				 v->vid, dp->index);
++			list_del(&v->list);
++			kfree(v);
++		}
++
+ 		list_del(&dp->list);
+ 		kfree(dp);
+ 	}
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
+index 3ee53e28ec2e9f..53e03fd8071b4a 100644
+--- a/net/dsa/tag_8021q.c
++++ b/net/dsa/tag_8021q.c
+@@ -197,7 +197,7 @@ static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
+ 
+ 	err = ds->ops->tag_8021q_vlan_del(ds, port, vid);
+ 	if (err) {
+-		refcount_inc(&v->refcount);
++		refcount_set(&v->refcount, 1);
+ 		return err;
+ 	}
+ 
+diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c
+index 4d558114795203..8bf99295bfbe96 100644
+--- a/net/ethtool/cmis_cdb.c
++++ b/net/ethtool/cmis_cdb.c
+@@ -346,7 +346,7 @@ ethtool_cmis_module_poll(struct net_device *dev,
+ 	struct netlink_ext_ack extack = {};
+ 	int err;
+ 
+-	ethtool_cmis_page_init(&page_data, 0, offset, sizeof(rpl));
++	ethtool_cmis_page_init(&page_data, 0, offset, sizeof(*rpl));
+ 	page_data.data = (u8 *)rpl;
+ 
+ 	err = ops->get_module_eeprom_by_page(dev, &page_data, &extack);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index bae8ece3e881e0..d9ab070e78e052 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1771,6 +1771,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
+ 	if (!err) {
+ 		spin_lock_bh(&f6i->fib6_table->tb6_lock);
+ 		fib6_update_sernum(net, f6i);
++		fib6_add_gc_list(f6i);
+ 		spin_unlock_bh(&f6i->fib6_table->tb6_lock);
+ 		fib6_force_start_gc(net);
+ 	}
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index dbcd75c5d778e6..7e1e561ef76c1c 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -667,6 +667,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ 		ieee80211_txq_remove_vlan(local, sdata);
+ 
++	if (sdata->vif.txq)
++		ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
++
+ 	sdata->bss = NULL;
+ 
+ 	if (local->open_count == 0)
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index f6de136008f6f9..57850d4dac5db9 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -630,6 +630,9 @@ static int mctp_sk_hash(struct sock *sk)
+ {
+ 	struct net *net = sock_net(sk);
+ 
++	/* Bind lookup runs under RCU, remain live during that. */
++	sock_set_flag(sk, SOCK_RCU_FREE);
++
+ 	mutex_lock(&net->mctp.bind_lock);
+ 	sk_add_node_rcu(sk, &net->mctp.binds);
+ 	mutex_unlock(&net->mctp.bind_lock);
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 0df89240b73361..305daf57a4f9dd 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2876,7 +2876,8 @@ static int validate_set(const struct nlattr *a,
+ 	size_t key_len;
+ 
+ 	/* There can be only one key in a action */
+-	if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
++	if (!nla_ok(ovs_key, nla_len(a)) ||
++	    nla_total_size(nla_len(ovs_key)) != nla_len(a))
+ 		return -EINVAL;
+ 
+ 	key_len = nla_len(ovs_key);
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index ebc41a7b13dbec..78b0e6dba0a2b7 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -362,6 +362,9 @@ static void smc_destruct(struct sock *sk)
+ 		return;
+ }
+ 
++static struct lock_class_key smc_key;
++static struct lock_class_key smc_slock_key;
++
+ void smc_sk_init(struct net *net, struct sock *sk, int protocol)
+ {
+ 	struct smc_sock *smc = smc_sk(sk);
+@@ -375,6 +378,8 @@ void smc_sk_init(struct net *net, struct sock *sk, int protocol)
+ 	INIT_WORK(&smc->connect_work, smc_connect_work);
+ 	INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
+ 	INIT_LIST_HEAD(&smc->accept_q);
++	sock_lock_init_class_and_name(sk, "slock-AF_SMC", &smc_slock_key,
++				      "sk_lock-AF_SMC", &smc_key);
+ 	spin_lock_init(&smc->accept_q_lock);
+ 	spin_lock_init(&smc->conn.send_lock);
+ 	sk->sk_prot->hash(sk);
+diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
+index e0842496d26ed7..c6cd729b65cbfb 100644
+--- a/scripts/Makefile.compiler
++++ b/scripts/Makefile.compiler
+@@ -75,8 +75,8 @@ ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
+ # Usage: MY_RUSTFLAGS += $(call __rustc-option,$(RUSTC),$(MY_RUSTFLAGS),-Cinstrument-coverage,-Zinstrument-coverage)
+ # TODO: remove RUSTC_BOOTSTRAP=1 when we raise the minimum GNU Make version to 4.4
+ __rustc-option = $(call try-run,\
+-	echo '#![allow(missing_docs)]#![feature(no_core)]#![no_core]' | RUSTC_BOOTSTRAP=1\
+-	$(1) --sysroot=/dev/null $(filter-out --sysroot=/dev/null,$(2)) $(3)\
++	echo '$(pound)![allow(missing_docs)]$(pound)![feature(no_core)]$(pound)![no_core]' | RUSTC_BOOTSTRAP=1\
++	$(1) --sysroot=/dev/null $(filter-out --sysroot=/dev/null --target=%,$(2)) $(3)\
+ 	--crate-type=rlib --out-dir=$(TMPOUT) --emit=obj=- - >/dev/null,$(3),$(4))
+ 
+ # rustc-option
+diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
+index d1f5adbf33f91c..690f9830f06482 100755
+--- a/scripts/generate_rust_analyzer.py
++++ b/scripts/generate_rust_analyzer.py
+@@ -90,6 +90,12 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+         ["core", "compiler_builtins"],
+     )
+ 
++    append_crate(
++        "ffi",
++        srctree / "rust" / "ffi.rs",
++        ["core", "compiler_builtins"],
++    )
++
+     def append_crate_with_generated(
+         display_name,
+         deps,
+@@ -109,9 +115,9 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+             "exclude_dirs": [],
+         }
+ 
+-    append_crate_with_generated("bindings", ["core"])
+-    append_crate_with_generated("uapi", ["core"])
+-    append_crate_with_generated("kernel", ["core", "macros", "build_error", "bindings", "uapi"])
++    append_crate_with_generated("bindings", ["core", "ffi"])
++    append_crate_with_generated("uapi", ["core", "ffi"])
++    append_crate_with_generated("kernel", ["core", "macros", "build_error", "ffi", "bindings", "uapi"])
+ 
+     def is_root_crate(build_file, target):
+         try:
+diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
+index dbf933c18a8219..fd9391e61b3d98 100644
+--- a/sound/pci/hda/Kconfig
++++ b/sound/pci/hda/Kconfig
+@@ -96,9 +96,7 @@ config SND_HDA_CIRRUS_SCODEC
+ 
+ config SND_HDA_CIRRUS_SCODEC_KUNIT_TEST
+ 	tristate "KUnit test for Cirrus side-codec library" if !KUNIT_ALL_TESTS
+-	select SND_HDA_CIRRUS_SCODEC
+-	select GPIOLIB
+-	depends on KUNIT
++	depends on SND_HDA_CIRRUS_SCODEC && GPIOLIB && KUNIT
+ 	default KUNIT_ALL_TESTS
+ 	help
+ 	  This builds KUnit tests for the cirrus side-codec library.
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0bf833c9602155..4171aa22747c33 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6603,6 +6603,16 @@ static void alc285_fixup_speaker2_to_dac1(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* disable DAC3 (0x06) selection on NID 0x15 - share Speaker/Bass Speaker DAC 0x03 */
++static void alc294_fixup_bass_speaker_15(struct hda_codec *codec,
++					 const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		static const hda_nid_t conn[] = { 0x02, 0x03 };
++		snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn), conn);
++	}
++}
++
+ /* Hook to update amp GPIO4 for automute */
+ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
+ 					  struct hda_jack_callback *jack)
+@@ -7587,6 +7597,16 @@ static void alc287_fixup_lenovo_thinkpad_with_alc1318(struct hda_codec *codec,
+ 	spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook;
+ }
+ 
++/*
++ * Clear COEF 0x0d (PCBEEP passthrough) bit 0x40 where BIOS sets it wrongly
++ * at PM resume
++ */
++static void alc283_fixup_dell_hp_resume(struct hda_codec *codec,
++					const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_INIT)
++		alc_write_coef_idx(codec, 0xd, 0x2800);
++}
+ 
+ enum {
+ 	ALC269_FIXUP_GPIO2,
+@@ -7888,6 +7908,9 @@ enum {
+ 	ALC245_FIXUP_CLEVO_NOISY_MIC,
+ 	ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE,
+ 	ALC233_FIXUP_MEDION_MTL_SPK,
++	ALC294_FIXUP_BASS_SPEAKER_15,
++	ALC283_FIXUP_DELL_HP_RESUME,
++	ALC294_FIXUP_ASUS_CS35L41_SPI_2,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -10222,6 +10245,20 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[ALC294_FIXUP_BASS_SPEAKER_15] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc294_fixup_bass_speaker_15,
++	},
++	[ALC283_FIXUP_DELL_HP_RESUME] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc283_fixup_dell_hp_resume,
++	},
++	[ALC294_FIXUP_ASUS_CS35L41_SPI_2] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cs35l41_fixup_spi_two,
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC,
++	},
+ };
+ 
+ static const struct hda_quirk alc269_fixup_tbl[] = {
+@@ -10282,6 +10319,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1028, 0x0604, "Dell Venue 11 Pro 7130", ALC283_FIXUP_DELL_HP_RESUME),
+ 	SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
+@@ -10684,7 +10722,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
+ 	SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -10750,6 +10788,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x1df3, "ASUS UM5606WA", ALC294_FIXUP_BASS_SPEAKER_15),
+ 	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10772,14 +10811,14 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1fb3, "ASUS ROG Flow Z13 GZ302EA", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3011, "ASUS B5605CVA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+-	SND_PCI_QUIRK(0x1043, 0x3061, "ASUS B3405CCA", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x3071, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x30c1, "ASUS B3605CCA / P3605CCA", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x30d1, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x30e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3061, "ASUS B3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3071, "ASUS B5405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x30c1, "ASUS B3605CCA / P3605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x30d1, "ASUS B5405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x30e1, "ASUS B5605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x31d0, "ASUS Zen AIO 27 Z272SD_A272SD", ALC274_FIXUP_ASUS_ZEN_AIO_27),
+-	SND_PCI_QUIRK(0x1043, 0x31e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x31f1, "ASUS B3605CCA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x31e1, "ASUS B5605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x31f1, "ASUS B3605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
+index d9ab003e166bfa..73d764fc853929 100644
+--- a/sound/soc/codecs/cs42l43-jack.c
++++ b/sound/soc/codecs/cs42l43-jack.c
+@@ -702,6 +702,9 @@ static void cs42l43_clear_jack(struct cs42l43_codec *priv)
+ 			   CS42L43_PGA_WIDESWING_MODE_EN_MASK, 0);
+ 	regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CTRL,
+ 			   CS42L43_JACK_STEREO_CONFIG_MASK, 0);
++	regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL,
++			   CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK,
++			   CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK);
+ 	regmap_update_bits(cs42l43->regmap, CS42L43_HS2,
+ 			   CS42L43_HSDET_MODE_MASK | CS42L43_HSDET_MANUAL_MODE_MASK,
+ 			   0x2 << CS42L43_HSDET_MODE_SHIFT);
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index c989d82d1d3c17..81bab8299eae4b 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -63,6 +63,10 @@
+ #define CDC_WSA_TX_SPKR_PROT_CLK_DISABLE	0
+ #define CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK	GENMASK(3, 0)
+ #define CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K	0
++#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_16K	1
++#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_24K	2
++#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_32K	3
++#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_48K	4
+ #define CDC_WSA_TX0_SPKR_PROT_PATH_CFG0		(0x0248)
+ #define CDC_WSA_TX1_SPKR_PROT_PATH_CTL		(0x0264)
+ #define CDC_WSA_TX1_SPKR_PROT_PATH_CFG0		(0x0268)
+@@ -407,6 +411,7 @@ struct wsa_macro {
+ 	int ear_spkr_gain;
+ 	int spkr_gain_offset;
+ 	int spkr_mode;
++	u32 pcm_rate_vi;
+ 	int is_softclip_on[WSA_MACRO_SOFTCLIP_MAX];
+ 	int softclip_clk_users[WSA_MACRO_SOFTCLIP_MAX];
+ 	struct regmap *regmap;
+@@ -1280,6 +1285,7 @@ static int wsa_macro_hw_params(struct snd_pcm_substream *substream,
+ 			       struct snd_soc_dai *dai)
+ {
+ 	struct snd_soc_component *component = dai->component;
++	struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
+ 	int ret;
+ 
+ 	switch (substream->stream) {
+@@ -1291,6 +1297,11 @@ static int wsa_macro_hw_params(struct snd_pcm_substream *substream,
+ 				__func__, params_rate(params));
+ 			return ret;
+ 		}
++		break;
++	case SNDRV_PCM_STREAM_CAPTURE:
++		if (dai->id == WSA_MACRO_AIF_VI)
++			wsa->pcm_rate_vi = params_rate(params);
++
+ 		break;
+ 	default:
+ 		break;
+@@ -1448,35 +1459,11 @@ static void wsa_macro_mclk_enable(struct wsa_macro *wsa, bool mclk_enable)
+ 	}
+ }
+ 
+-static int wsa_macro_mclk_event(struct snd_soc_dapm_widget *w,
+-				struct snd_kcontrol *kcontrol, int event)
++static void wsa_macro_enable_disable_vi_sense(struct snd_soc_component *component, bool enable,
++						u32 tx_reg0, u32 tx_reg1, u32 val)
+ {
+-	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+-	struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
+-
+-	wsa_macro_mclk_enable(wsa, event == SND_SOC_DAPM_PRE_PMU);
+-	return 0;
+-}
+-
+-static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
+-					struct snd_kcontrol *kcontrol,
+-					int event)
+-{
+-	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+-	struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
+-	u32 tx_reg0, tx_reg1;
+-
+-	if (test_bit(WSA_MACRO_TX0, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) {
+-		tx_reg0 = CDC_WSA_TX0_SPKR_PROT_PATH_CTL;
+-		tx_reg1 = CDC_WSA_TX1_SPKR_PROT_PATH_CTL;
+-	} else if (test_bit(WSA_MACRO_TX1, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) {
+-		tx_reg0 = CDC_WSA_TX2_SPKR_PROT_PATH_CTL;
+-		tx_reg1 = CDC_WSA_TX3_SPKR_PROT_PATH_CTL;
+-	}
+-
+-	switch (event) {
+-	case SND_SOC_DAPM_POST_PMU:
+-			/* Enable V&I sensing */
++	if (enable) {
++		/* Enable V&I sensing */
+ 		snd_soc_component_update_bits(component, tx_reg0,
+ 					      CDC_WSA_TX_SPKR_PROT_RESET_MASK,
+ 					      CDC_WSA_TX_SPKR_PROT_RESET);
+@@ -1485,10 +1472,10 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
+ 					      CDC_WSA_TX_SPKR_PROT_RESET);
+ 		snd_soc_component_update_bits(component, tx_reg0,
+ 					      CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK,
+-					      CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K);
++					      val);
+ 		snd_soc_component_update_bits(component, tx_reg1,
+ 					      CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK,
+-					      CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K);
++					      val);
+ 		snd_soc_component_update_bits(component, tx_reg0,
+ 					      CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK,
+ 					      CDC_WSA_TX_SPKR_PROT_CLK_ENABLE);
+@@ -1501,9 +1488,7 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
+ 		snd_soc_component_update_bits(component, tx_reg1,
+ 					      CDC_WSA_TX_SPKR_PROT_RESET_MASK,
+ 					      CDC_WSA_TX_SPKR_PROT_NO_RESET);
+-		break;
+-	case SND_SOC_DAPM_POST_PMD:
+-		/* Disable V&I sensing */
++	} else {
+ 		snd_soc_component_update_bits(component, tx_reg0,
+ 					      CDC_WSA_TX_SPKR_PROT_RESET_MASK,
+ 					      CDC_WSA_TX_SPKR_PROT_RESET);
+@@ -1516,6 +1501,72 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
+ 		snd_soc_component_update_bits(component, tx_reg1,
+ 					      CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK,
+ 					      CDC_WSA_TX_SPKR_PROT_CLK_DISABLE);
++	}
++}
++
++static void wsa_macro_enable_disable_vi_feedback(struct snd_soc_component *component,
++						 bool enable, u32 rate)
++{
++	struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
++
++	if (test_bit(WSA_MACRO_TX0, &wsa->active_ch_mask[WSA_MACRO_AIF_VI]))
++		wsa_macro_enable_disable_vi_sense(component, enable,
++				CDC_WSA_TX0_SPKR_PROT_PATH_CTL,
++				CDC_WSA_TX1_SPKR_PROT_PATH_CTL, rate);
++
++	if (test_bit(WSA_MACRO_TX1, &wsa->active_ch_mask[WSA_MACRO_AIF_VI]))
++		wsa_macro_enable_disable_vi_sense(component, enable,
++				CDC_WSA_TX2_SPKR_PROT_PATH_CTL,
++				CDC_WSA_TX3_SPKR_PROT_PATH_CTL, rate);
++}
++
++static int wsa_macro_mclk_event(struct snd_soc_dapm_widget *w,
++				struct snd_kcontrol *kcontrol, int event)
++{
++	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
++	struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
++
++	wsa_macro_mclk_enable(wsa, event == SND_SOC_DAPM_PRE_PMU);
++	return 0;
++}
++
++static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w,
++					struct snd_kcontrol *kcontrol,
++					int event)
++{
++	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
++	struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
++	u32 rate_val;
++
++	switch (wsa->pcm_rate_vi) {
++	case 8000:
++		rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K;
++		break;
++	case 16000:
++		rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_16K;
++		break;
++	case 24000:
++		rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_24K;
++		break;
++	case 32000:
++		rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_32K;
++		break;
++	case 48000:
++		rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_48K;
++		break;
++	default:
++		rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K;
++		break;
++	}
++
++	switch (event) {
++	case SND_SOC_DAPM_POST_PMU:
++		/* Enable V&I sensing */
++		wsa_macro_enable_disable_vi_feedback(component, true, rate_val);
++		break;
++	case SND_SOC_DAPM_POST_PMD:
++		/* Disable V&I sensing */
++		wsa_macro_enable_disable_vi_feedback(component, false, rate_val);
+ 		break;
+ 	}
+ 
+diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
+index 57b789d7fbedd4..5b4f20dbf7bba4 100644
+--- a/sound/soc/dwc/dwc-i2s.c
++++ b/sound/soc/dwc/dwc-i2s.c
+@@ -199,12 +199,10 @@ static void i2s_start(struct dw_i2s_dev *dev,
+ 	else
+ 		i2s_write_reg(dev->i2s_base, IRER, 1);
+ 
+-	/* I2S needs to enable IRQ to make a handshake with DMAC on the JH7110 SoC */
+-	if (dev->use_pio || dev->is_jh7110)
+-		i2s_enable_irqs(dev, substream->stream, config->chan_nr);
+-	else
++	if (!(dev->use_pio || dev->is_jh7110))
+ 		i2s_enable_dma(dev, substream->stream);
+ 
++	i2s_enable_irqs(dev, substream->stream, config->chan_nr);
+ 	i2s_write_reg(dev->i2s_base, CER, 1);
+ }
+ 
+@@ -218,11 +216,12 @@ static void i2s_stop(struct dw_i2s_dev *dev,
+ 	else
+ 		i2s_write_reg(dev->i2s_base, IRER, 0);
+ 
+-	if (dev->use_pio || dev->is_jh7110)
+-		i2s_disable_irqs(dev, substream->stream, 8);
+-	else
++	if (!(dev->use_pio || dev->is_jh7110))
+ 		i2s_disable_dma(dev, substream->stream);
+ 
++	i2s_disable_irqs(dev, substream->stream, 8);
++
++
+ 	if (!dev->active) {
+ 		i2s_write_reg(dev->i2s_base, CER, 0);
+ 		i2s_write_reg(dev->i2s_base, IER, 0);
+diff --git a/sound/soc/fsl/fsl_qmc_audio.c b/sound/soc/fsl/fsl_qmc_audio.c
+index 8668abd3520800..d41cb6f3efcacc 100644
+--- a/sound/soc/fsl/fsl_qmc_audio.c
++++ b/sound/soc/fsl/fsl_qmc_audio.c
+@@ -250,6 +250,9 @@ static int qmc_audio_pcm_trigger(struct snd_soc_component *component,
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_START:
+ 		bitmap_zero(prtd->chans_pending, 64);
++		prtd->buffer_ended = 0;
++		prtd->ch_dma_addr_current = prtd->ch_dma_addr_start;
++
+ 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			for (i = 0; i < prtd->channels; i++)
+ 				prtd->qmc_dai->chans[i].prtd_tx = prtd;
+diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
+index 945f9c0a6a5455..15defce0f3eb84 100644
+--- a/sound/soc/intel/avs/pcm.c
++++ b/sound/soc/intel/avs/pcm.c
+@@ -925,7 +925,8 @@ static int avs_component_probe(struct snd_soc_component *component)
+ 		else
+ 			mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL,
+ 							     "hda-generic-tplg.bin");
+-
++		if (!mach->tplg_filename)
++			return -ENOMEM;
+ 		filename = kasprintf(GFP_KERNEL, "%s/%s", component->driver->topology_name_prefix,
+ 				     mach->tplg_filename);
+ 		if (!filename)
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 380fc3be8c932e..5911a055865160 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -688,6 +688,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 
+ static const struct snd_pci_quirk sof_sdw_ssid_quirk_table[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1e13, "ASUS Zenbook S14", SOC_SDW_CODEC_MIC),
++	SND_PCI_QUIRK(0x1043, 0x1f43, "ASUS Zenbook S16", SOC_SDW_CODEC_MIC),
+ 	{}
+ };
+ 
+diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
+index 27a2bf9a661393..de3ec6f594c11c 100644
+--- a/sound/soc/qcom/lpass.h
++++ b/sound/soc/qcom/lpass.h
+@@ -13,10 +13,11 @@
+ #include <linux/platform_device.h>
+ #include <linux/regmap.h>
+ #include <dt-bindings/sound/qcom,lpass.h>
++#include <dt-bindings/sound/qcom,q6afe.h>
+ #include "lpass-hdmi.h"
+ 
+ #define LPASS_AHBIX_CLOCK_FREQUENCY		131072000
+-#define LPASS_MAX_PORTS			(LPASS_CDC_DMA_VA_TX8 + 1)
++#define LPASS_MAX_PORTS			(DISPLAY_PORT_RX_7 + 1)
+ #define LPASS_MAX_MI2S_PORTS			(8)
+ #define LPASS_MAX_DMA_CHANNELS			(8)
+ #define LPASS_MAX_HDMI_DMA_CHANNELS		(4)
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 127862fa05c619..ce3ea0c2de0425 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -217,6 +217,7 @@ static bool is_rust_noreturn(const struct symbol *func)
+ 	       str_ends_with(func->name, "_4core9panicking14panic_nounwind")				||
+ 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
+ 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
++	       str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference")		||
+ 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
+ 	       strstr(func->name, "_4core9panicking13assert_failed")					||
+ 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
+diff --git a/tools/testing/kunit/qemu_configs/sh.py b/tools/testing/kunit/qemu_configs/sh.py
+index 78a474a5b95f3a..f00cb89fdef6aa 100644
+--- a/tools/testing/kunit/qemu_configs/sh.py
++++ b/tools/testing/kunit/qemu_configs/sh.py
+@@ -7,7 +7,9 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
+ CONFIG_MEMORY_START=0x0c000000
+ CONFIG_SH_RTS7751R2D=y
+ CONFIG_RTS7751R2D_PLUS=y
+-CONFIG_SERIAL_SH_SCI=y''',
++CONFIG_SERIAL_SH_SCI=y
++CONFIG_CMDLINE_EXTEND=y
++''',
+ 			   qemu_arch='sh4',
+ 			   kernel_path='arch/sh/boot/zImage',
+ 			   kernel_command_line='console=ttySC1',
+diff --git a/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c b/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c
+new file mode 100644
+index 00000000000000..7526de3790814c
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c
+@@ -0,0 +1,107 @@
++// SPDX-License-Identifier: GPL-2.0
++#include "bpf/libbpf.h"
++#include "changes_pkt_data_freplace.skel.h"
++#include "changes_pkt_data.skel.h"
++#include <test_progs.h>
++
++static void print_verifier_log(const char *log)
++{
++	if (env.verbosity >= VERBOSE_VERY)
++		fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log);
++}
++
++static void test_aux(const char *main_prog_name,
++		     const char *to_be_replaced,
++		     const char *replacement,
++		     bool expect_load)
++{
++	struct changes_pkt_data_freplace *freplace = NULL;
++	struct bpf_program *freplace_prog = NULL;
++	struct bpf_program *main_prog = NULL;
++	LIBBPF_OPTS(bpf_object_open_opts, opts);
++	struct changes_pkt_data *main = NULL;
++	char log[16*1024];
++	int err;
++
++	opts.kernel_log_buf = log;
++	opts.kernel_log_size = sizeof(log);
++	if (env.verbosity >= VERBOSE_SUPER)
++		opts.kernel_log_level = 1 | 2 | 4;
++	main = changes_pkt_data__open_opts(&opts);
++	if (!ASSERT_OK_PTR(main, "changes_pkt_data__open"))
++		goto out;
++	main_prog = bpf_object__find_program_by_name(main->obj, main_prog_name);
++	if (!ASSERT_OK_PTR(main_prog, "main_prog"))
++		goto out;
++	bpf_program__set_autoload(main_prog, true);
++	err = changes_pkt_data__load(main);
++	print_verifier_log(log);
++	if (!ASSERT_OK(err, "changes_pkt_data__load"))
++		goto out;
++	freplace = changes_pkt_data_freplace__open_opts(&opts);
++	if (!ASSERT_OK_PTR(freplace, "changes_pkt_data_freplace__open"))
++		goto out;
++	freplace_prog = bpf_object__find_program_by_name(freplace->obj, replacement);
++	if (!ASSERT_OK_PTR(freplace_prog, "freplace_prog"))
++		goto out;
++	bpf_program__set_autoload(freplace_prog, true);
++	bpf_program__set_autoattach(freplace_prog, true);
++	bpf_program__set_attach_target(freplace_prog,
++				       bpf_program__fd(main_prog),
++				       to_be_replaced);
++	err = changes_pkt_data_freplace__load(freplace);
++	print_verifier_log(log);
++	if (expect_load) {
++		ASSERT_OK(err, "changes_pkt_data_freplace__load");
++	} else {
++		ASSERT_ERR(err, "changes_pkt_data_freplace__load");
++		ASSERT_HAS_SUBSTR(log, "Extension program changes packet data", "error log");
++	}
++
++out:
++	changes_pkt_data_freplace__destroy(freplace);
++	changes_pkt_data__destroy(main);
++}
++
++/* There are two global subprograms in both changes_pkt_data.skel.h:
++ * - one changes packet data;
++ * - another does not.
++ * It is ok to freplace subprograms that change packet data with those
++ * that either do or do not. It is only ok to freplace subprograms
++ * that do not change packet data with those that do not as well.
++ * The below tests check outcomes for each combination of such freplace.
++ * Also test a case when main subprogram itself is replaced and is a single
++ * subprogram in a program.
++ */
++void test_changes_pkt_data_freplace(void)
++{
++	struct {
++		const char *main;
++		const char *to_be_replaced;
++		bool changes;
++	} mains[] = {
++		{ "main_with_subprogs",   "changes_pkt_data",         true },
++		{ "main_with_subprogs",   "does_not_change_pkt_data", false },
++		{ "main_changes",         "main_changes",             true },
++		{ "main_does_not_change", "main_does_not_change",     false },
++	};
++	struct {
++		const char *func;
++		bool changes;
++	} replacements[] = {
++		{ "changes_pkt_data",         true },
++		{ "does_not_change_pkt_data", false }
++	};
++	char buf[64];
++
++	for (int i = 0; i < ARRAY_SIZE(mains); ++i) {
++		for (int j = 0; j < ARRAY_SIZE(replacements); ++j) {
++			snprintf(buf, sizeof(buf), "%s_with_%s",
++				 mains[i].to_be_replaced, replacements[j].func);
++			if (!test__start_subtest(buf))
++				continue;
++			test_aux(mains[i].main, mains[i].to_be_replaced, replacements[j].func,
++				 mains[i].changes || !replacements[j].changes);
++		}
++	}
++}
+diff --git a/tools/testing/selftests/bpf/progs/changes_pkt_data.c b/tools/testing/selftests/bpf/progs/changes_pkt_data.c
+new file mode 100644
+index 00000000000000..43cada48b28ad4
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/changes_pkt_data.c
+@@ -0,0 +1,39 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bpf.h>
++#include <bpf/bpf_helpers.h>
++
++__noinline
++long changes_pkt_data(struct __sk_buff *sk)
++{
++	return bpf_skb_pull_data(sk, 0);
++}
++
++__noinline __weak
++long does_not_change_pkt_data(struct __sk_buff *sk)
++{
++	return 0;
++}
++
++SEC("?tc")
++int main_with_subprogs(struct __sk_buff *sk)
++{
++	changes_pkt_data(sk);
++	does_not_change_pkt_data(sk);
++	return 0;
++}
++
++SEC("?tc")
++int main_changes(struct __sk_buff *sk)
++{
++	bpf_skb_pull_data(sk, 0);
++	return 0;
++}
++
++SEC("?tc")
++int main_does_not_change(struct __sk_buff *sk)
++{
++	return 0;
++}
++
++char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c b/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c
+new file mode 100644
+index 00000000000000..f9a622705f1b3b
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c
+@@ -0,0 +1,18 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bpf.h>
++#include <bpf/bpf_helpers.h>
++
++SEC("?freplace")
++long changes_pkt_data(struct __sk_buff *sk)
++{
++	return bpf_skb_pull_data(sk, 0);
++}
++
++SEC("?freplace")
++long does_not_change_pkt_data(struct __sk_buff *sk)
++{
++	return 0;
++}
++
++char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c
+index 457f34c151e32f..5927054b6dd96f 100644
+--- a/tools/testing/selftests/bpf/progs/raw_tp_null.c
++++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c
+@@ -3,6 +3,7 @@
+ 
+ #include <vmlinux.h>
+ #include <bpf/bpf_tracing.h>
++#include "bpf_misc.h"
+ 
+ char _license[] SEC("license") = "GPL";
+ 
+@@ -17,16 +18,14 @@ int BPF_PROG(test_raw_tp_null, struct sk_buff *skb)
+ 	if (task->pid != tid)
+ 		return 0;
+ 
+-	i = i + skb->mark + 1;
+-	/* The compiler may move the NULL check before this deref, which causes
+-	 * the load to fail as deref of scalar. Prevent that by using a barrier.
++	/* If dead code elimination kicks in, the increment +=2 will be
++	 * removed. For raw_tp programs attaching to tracepoints in kernel
++	 * modules, we mark input arguments as PTR_MAYBE_NULL, so branch
++	 * prediction should never kick in.
+ 	 */
+-	barrier();
+-	/* If dead code elimination kicks in, the increment below will
+-	 * be removed. For raw_tp programs, we mark input arguments as
+-	 * PTR_MAYBE_NULL, so branch prediction should never kick in.
+-	 */
+-	if (!skb)
+-		i += 2;
++	asm volatile ("%[i] += 1; if %[ctx] != 0 goto +1; %[i] += 2;"
++			: [i]"+r"(i)
++			: [ctx]"r"(skb)
++			: "memory");
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c
+index ee76b51005abe7..3c8f6646e33dae 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_sock.c
++++ b/tools/testing/selftests/bpf/progs/verifier_sock.c
+@@ -50,6 +50,13 @@ struct {
+ 	__uint(map_flags, BPF_F_NO_PREALLOC);
+ } sk_storage_map SEC(".maps");
+ 
++struct {
++	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
++	__uint(max_entries, 1);
++	__uint(key_size, sizeof(__u32));
++	__uint(value_size, sizeof(__u32));
++} jmp_table SEC(".maps");
++
+ SEC("cgroup/skb")
+ __description("skb->sk: no NULL check")
+ __failure __msg("invalid mem access 'sock_common_or_null'")
+@@ -977,4 +984,53 @@ l1_%=:	r0 = *(u8*)(r7 + 0);				\
+ 	: __clobber_all);
+ }
+ 
++__noinline
++long skb_pull_data2(struct __sk_buff *sk, __u32 len)
++{
++	return bpf_skb_pull_data(sk, len);
++}
++
++__noinline
++long skb_pull_data1(struct __sk_buff *sk, __u32 len)
++{
++	return skb_pull_data2(sk, len);
++}
++
++/* global function calls bpf_skb_pull_data(), which invalidates packet
++ * pointers established before global function call.
++ */
++SEC("tc")
++__failure __msg("invalid mem access")
++int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk)
++{
++	int *p = (void *)(long)sk->data;
++
++	if ((void *)(p + 1) > (void *)(long)sk->data_end)
++		return TCX_DROP;
++	skb_pull_data1(sk, 0);
++	*p = 42; /* this is unsafe */
++	return TCX_PASS;
++}
++
++__noinline
++int tail_call(struct __sk_buff *sk)
++{
++	bpf_tail_call_static(sk, &jmp_table, 0);
++	return 0;
++}
++
++/* Tail calls invalidate packet pointers. */
++SEC("tc")
++__failure __msg("invalid mem access")
++int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
++{
++	int *p = (void *)(long)sk->data;
++
++	if ((void *)(p + 1) > (void *)(long)sk->data_end)
++		return TCX_DROP;
++	tail_call(sk);
++	*p = 42; /* this is unsafe */
++	return TCX_PASS;
++}
++
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+index 67df7b47087f03..e1fe16bcbbe880 100755
+--- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
++++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+@@ -29,7 +29,7 @@ fi
+ if [[ $cgroup2 ]]; then
+   cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}')
+   if [[ -z "$cgroup_path" ]]; then
+-    cgroup_path=/dev/cgroup/memory
++    cgroup_path=$(mktemp -d)
+     mount -t cgroup2 none $cgroup_path
+     do_umount=1
+   fi
+@@ -37,7 +37,7 @@ if [[ $cgroup2 ]]; then
+ else
+   cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
+   if [[ -z "$cgroup_path" ]]; then
+-    cgroup_path=/dev/cgroup/memory
++    cgroup_path=$(mktemp -d)
+     mount -t cgroup memory,hugetlb $cgroup_path
+     do_umount=1
+   fi
+diff --git a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
+index 11f9bbe7dc222b..0b0d4ba1af2771 100755
+--- a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
++++ b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
+@@ -23,7 +23,7 @@ fi
+ if [[ $cgroup2 ]]; then
+   CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}')
+   if [[ -z "$CGROUP_ROOT" ]]; then
+-    CGROUP_ROOT=/dev/cgroup/memory
++    CGROUP_ROOT=$(mktemp -d)
+     mount -t cgroup2 none $CGROUP_ROOT
+     do_umount=1
+   fi
+diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
+index 17263696b5d880..61b3f571f7a708 100644
+--- a/tools/testing/shared/linux.c
++++ b/tools/testing/shared/linux.c
+@@ -147,7 +147,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
+ {
+ 	if (kmalloc_verbose)
+-		pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
++		pr_debug("Bulk free %p[0-%zu]\n", list, size - 1);
+ 
+ 	pthread_mutex_lock(&cachep->lock);
+ 	for (int i = 0; i < size; i++)
+@@ -165,7 +165,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+ 	size_t i;
+ 
+ 	if (kmalloc_verbose)
+-		pr_debug("Bulk alloc %lu\n", size);
++		pr_debug("Bulk alloc %zu\n", size);
+ 
+ 	pthread_mutex_lock(&cachep->lock);
+ 	if (cachep->nr_objs >= size) {


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-04-22 18:48 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-04-22 18:48 UTC (permalink / raw
  To: gentoo-commits

commit:     f9872df78d5468a6053ce8e99dbdd4a344188222
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Apr 22 18:48:35 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Apr 22 18:48:35 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f9872df7

x86/insn_decoder_test: allow longer symbol-names

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ++
 ...sn-decoder-test-allow-longer-symbol-names.patch | 49 ++++++++++++++++++++++
 2 files changed, 53 insertions(+)

diff --git a/0000_README b/0000_README
index 6b594792..b04d2cdd 100644
--- a/0000_README
+++ b/0000_README
@@ -155,6 +155,10 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
+Patch:  1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
+From:   https://gitlab.com/cki-project/kernel-ark/-/commit/8d4a52c3921d278f27241fc0c6949d8fdc13a7f5
+Desc:   x86/insn_decoder_test: allow longer symbol-names
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch b/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
new file mode 100644
index 00000000..70c706ba
--- /dev/null
+++ b/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
@@ -0,0 +1,49 @@
+From 8d4a52c3921d278f27241fc0c6949d8fdc13a7f5 Mon Sep 17 00:00:00 2001
+From: David Rheinsberg <david@readahead.eu>
+Date: Tue, 24 Jan 2023 12:04:59 +0100
+Subject: [PATCH] x86/insn_decoder_test: allow longer symbol-names
+
+Increase the allowed line-length of the insn-decoder-test to 4k to allow
+for symbol-names longer than 256 characters.
+
+The insn-decoder-test takes objdump output as input, which may contain
+symbol-names as instruction arguments. With rust-code entering the
+kernel, those symbol-names will include mangled-symbols which might
+exceed the current line-length-limit of the tool.
+
+By bumping the line-length-limit of the tool to 4k, we get a reasonable
+buffer for all objdump outputs I have seen so far. Unfortunately, ELF
+symbol-names are not restricted in length, so technically this might
+still end up failing if we encounter longer names in the future.
+
+My compile-failure looks like this:
+
+    arch/x86/tools/insn_decoder_test: error: malformed line 1152000:
+    tBb_+0xf2>
+
+..which overflowed by 10 characters reading this line:
+
+    ffffffff81458193:   74 3d                   je     ffffffff814581d2 <_RNvXse_NtNtNtCshGpAVYOtgW1_4core4iter8adapters7flattenINtB5_13FlattenCompatINtNtB7_3map3MapNtNtNtBb_3str4iter5CharsNtB1v_17CharEscapeDefaultENtNtBb_4char13EscapeDefaultENtNtBb_3fmt5Debug3fmtBb_+0xf2>
+
+Signed-off-by: David Rheinsberg <david@readahead.eu>
+Signed-off-by: Scott Weaver <scweaver@redhat.com>
+---
+ arch/x86/tools/insn_decoder_test.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
+index 472540aeabc23..366e07546344b 100644
+--- a/arch/x86/tools/insn_decoder_test.c
++++ b/arch/x86/tools/insn_decoder_test.c
+@@ -106,7 +106,7 @@ static void parse_args(int argc, char **argv)
+ 	}
+ }
+ 
+-#define BUFSIZE 256
++#define BUFSIZE 4096
+ 
+ int main(int argc, char **argv)
+ {
+-- 
+GitLab
+


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-04-20  9:38 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-04-20  9:38 UTC (permalink / raw
  To: gentoo-commits

commit:     a28004010231768f718bec49a133cfe566a60c83
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 20 09:38:11 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Apr 20 09:38:11 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a2800401

Linux patch 6.12.24

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1023_linux-6.12.24.patch | 16325 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 16329 insertions(+)

diff --git a/0000_README b/0000_README
index 7e2e4141..6b594792 100644
--- a/0000_README
+++ b/0000_README
@@ -135,6 +135,10 @@ Patch:  1022_linux-6.12.23.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.23
 
+Patch:  1023_linux-6.12.24.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.24
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1023_linux-6.12.24.patch b/1023_linux-6.12.24.patch
new file mode 100644
index 00000000..a8202e32
--- /dev/null
+++ b/1023_linux-6.12.24.patch
@@ -0,0 +1,16325 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index d401577b5a6ace..607a8937f17549 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3028,6 +3028,8 @@
+ 			* max_sec_lba48: Set or clear transfer size limit to
+ 			  65535 sectors.
+ 
++			* external: Mark port as external (hotplug-capable).
++
+ 			* [no]lpm: Enable or disable link power management.
+ 
+ 			* [no]setxfer: Indicate if transfer speed mode setting
+diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml
+index 76163abed655a2..5ed40f21b8eb5d 100644
+--- a/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml
++++ b/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml
+@@ -55,8 +55,7 @@ properties:
+       - const: arm,primecell
+ 
+   reg:
+-    minItems: 1
+-    maxItems: 2
++    maxItems: 1
+ 
+   clocks:
+     maxItems: 1
+diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml
+index 8eec07d9d45428..07d21a3617f5b2 100644
+--- a/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml
++++ b/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml
+@@ -41,8 +41,7 @@ properties:
+       - const: arm,primecell
+ 
+   reg:
+-    minItems: 1
+-    maxItems: 2
++    maxItems: 1
+ 
+   qcom,dsb-element-bits:
+     description:
+diff --git a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
+index b68141264c0e9f..4d40e75b4e1eff 100644
+--- a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
++++ b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
+@@ -71,7 +71,7 @@ properties:
+                 description:
+                   Any lane can be inverted or not.
+                 minItems: 1
+-                maxItems: 2
++                maxItems: 3
+ 
+             required:
+               - data-lanes
+diff --git a/Makefile b/Makefile
+index 6a2a60eb67a3e7..e1fa425089c220 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -1013,6 +1013,9 @@ ifdef CONFIG_CC_IS_GCC
+ KBUILD_CFLAGS   += -fconserve-stack
+ endif
+ 
++# Ensure compilers do not transform certain loops into calls to wcslen()
++KBUILD_CFLAGS += -fno-builtin-wcslen
++
+ # change __FILE__ to the relative path from the srctree
+ KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
+ 
+diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+index 302c5beb224aa4..b8f8255f840b13 100644
+--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
++++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+@@ -1451,6 +1451,7 @@ pinctrl_gsacore: pinctrl@17a80000 {
+ 			/* TODO: update once support for this CMU exists */
+ 			clocks = <0>;
+ 			clock-names = "pclk";
++			status = "disabled";
+ 		};
+ 
+ 		cmu_top: clock-controller@1e080000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+index 3458be7f7f6114..f49ec749590609 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+@@ -1255,8 +1255,7 @@ dpi0_out: endpoint {
+ 		};
+ 
+ 		pwm0: pwm@1401e000 {
+-			compatible = "mediatek,mt8173-disp-pwm",
+-				     "mediatek,mt6595-disp-pwm";
++			compatible = "mediatek,mt8173-disp-pwm";
+ 			reg = <0 0x1401e000 0 0x1000>;
+ 			#pwm-cells = <2>;
+ 			clocks = <&mmsys CLK_MM_DISP_PWM026M>,
+@@ -1266,8 +1265,7 @@ pwm0: pwm@1401e000 {
+ 		};
+ 
+ 		pwm1: pwm@1401f000 {
+-			compatible = "mediatek,mt8173-disp-pwm",
+-				     "mediatek,mt6595-disp-pwm";
++			compatible = "mediatek,mt8173-disp-pwm";
+ 			reg = <0 0x1401f000 0 0x1000>;
+ 			#pwm-cells = <2>;
+ 			clocks = <&mmsys CLK_MM_DISP_PWM126M>,
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
+index 19340d13f789f0..41821354bbdae6 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
+@@ -227,13 +227,6 @@ key-power {
+ 			wakeup-event-action = <EV_ACT_ASSERTED>;
+ 			wakeup-source;
+ 		};
+-
+-		key-suspend {
+-			label = "Suspend";
+-			gpios = <&gpio TEGRA234_MAIN_GPIO(G, 2) GPIO_ACTIVE_LOW>;
+-			linux,input-type = <EV_KEY>;
+-			linux,code = <KEY_SLEEP>;
+-		};
+ 	};
+ 
+ 	fan: pwm-fan {
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 488f8e75134959..2a4e686e633c62 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -75,6 +75,7 @@
+ #define ARM_CPU_PART_CORTEX_A76		0xD0B
+ #define ARM_CPU_PART_NEOVERSE_N1	0xD0C
+ #define ARM_CPU_PART_CORTEX_A77		0xD0D
++#define ARM_CPU_PART_CORTEX_A76AE	0xD0E
+ #define ARM_CPU_PART_NEOVERSE_V1	0xD40
+ #define ARM_CPU_PART_CORTEX_A78		0xD41
+ #define ARM_CPU_PART_CORTEX_A78AE	0xD42
+@@ -119,6 +120,7 @@
+ #define QCOM_CPU_PART_KRYO		0x200
+ #define QCOM_CPU_PART_KRYO_2XX_GOLD	0x800
+ #define QCOM_CPU_PART_KRYO_2XX_SILVER	0x801
++#define QCOM_CPU_PART_KRYO_3XX_GOLD	0x802
+ #define QCOM_CPU_PART_KRYO_3XX_SILVER	0x803
+ #define QCOM_CPU_PART_KRYO_4XX_GOLD	0x804
+ #define QCOM_CPU_PART_KRYO_4XX_SILVER	0x805
+@@ -158,6 +160,7 @@
+ #define MIDR_CORTEX_A76	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
+ #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
+ #define MIDR_CORTEX_A77	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
++#define MIDR_CORTEX_A76AE	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE)
+ #define MIDR_NEOVERSE_V1	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
+ #define MIDR_CORTEX_A78	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+ #define MIDR_CORTEX_A78AE	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
+@@ -195,6 +198,7 @@
+ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+ #define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
+ #define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
++#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
+ #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+ #define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
+ #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
+diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
+index 0c4d9045c31f47..f1524cdeacf1c4 100644
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -97,7 +97,6 @@ enum mitigation_state arm64_get_meltdown_state(void);
+ 
+ enum mitigation_state arm64_get_spectre_bhb_state(void);
+ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
+-u8 spectre_bhb_loop_affected(int scope);
+ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+ bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
+ 
+diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
+index d780d1bd2eacb9..82cf1f879c61df 100644
+--- a/arch/arm64/include/asm/traps.h
++++ b/arch/arm64/include/asm/traps.h
+@@ -109,10 +109,9 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
+ 	int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
+ 	int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
+ 	int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
+-	unsigned long dst, src, size;
++	unsigned long dst, size;
+ 
+ 	dst = regs->regs[dstreg];
+-	src = regs->regs[srcreg];
+ 	size = regs->regs[sizereg];
+ 
+ 	/*
+@@ -129,6 +128,7 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
+ 		}
+ 	} else {
+ 		/* CPY* instruction */
++		unsigned long src = regs->regs[srcreg];
+ 		if (!(option_a ^ wrong_option)) {
+ 			/* Format is from Option B */
+ 			if (regs->pstate & PSR_N_BIT) {
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index da53722f95d41a..0f51fd10b4b063 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -845,52 +845,86 @@ static unsigned long system_bhb_mitigations;
+  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
+  * SCOPE_SYSTEM call will give the right answer.
+  */
+-u8 spectre_bhb_loop_affected(int scope)
++static bool is_spectre_bhb_safe(int scope)
++{
++	static const struct midr_range spectre_bhb_safe_list[] = {
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
++		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
++		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
++		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
++		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
++		{},
++	};
++	static bool all_safe = true;
++
++	if (scope != SCOPE_LOCAL_CPU)
++		return all_safe;
++
++	if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list))
++		return true;
++
++	all_safe = false;
++
++	return false;
++}
++
++static u8 spectre_bhb_loop_affected(void)
+ {
+ 	u8 k = 0;
+-	static u8 max_bhb_k;
+-
+-	if (scope == SCOPE_LOCAL_CPU) {
+-		static const struct midr_range spectre_bhb_k32_list[] = {
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+-			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+-			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+-			{},
+-		};
+-		static const struct midr_range spectre_bhb_k24_list[] = {
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+-			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+-			{},
+-		};
+-		static const struct midr_range spectre_bhb_k11_list[] = {
+-			MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+-			{},
+-		};
+-		static const struct midr_range spectre_bhb_k8_list[] = {
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+-			MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+-			{},
+-		};
+-
+-		if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
+-			k = 32;
+-		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
+-			k = 24;
+-		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
+-			k = 11;
+-		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
+-			k =  8;
+-
+-		max_bhb_k = max(max_bhb_k, k);
+-	} else {
+-		k = max_bhb_k;
+-	}
++
++	static const struct midr_range spectre_bhb_k132_list[] = {
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
++		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
++	};
++	static const struct midr_range spectre_bhb_k38_list[] = {
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
++	};
++	static const struct midr_range spectre_bhb_k32_list[] = {
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
++		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
++		{},
++	};
++	static const struct midr_range spectre_bhb_k24_list[] = {
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
++		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
++		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
++		{},
++	};
++	static const struct midr_range spectre_bhb_k11_list[] = {
++		MIDR_ALL_VERSIONS(MIDR_AMPERE1),
++		{},
++	};
++	static const struct midr_range spectre_bhb_k8_list[] = {
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++		{},
++	};
++
++	if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list))
++		k = 132;
++	else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list))
++		k = 38;
++	else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
++		k = 32;
++	else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
++		k = 24;
++	else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
++		k = 11;
++	else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
++		k =  8;
+ 
+ 	return k;
+ }
+@@ -916,29 +950,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
+ 	}
+ }
+ 
+-static bool is_spectre_bhb_fw_affected(int scope)
++static bool has_spectre_bhb_fw_mitigation(void)
+ {
+-	static bool system_affected;
+ 	enum mitigation_state fw_state;
+ 	bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
+-	static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
+-		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+-		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+-		{},
+-	};
+-	bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
+-					 spectre_bhb_firmware_mitigated_list);
+-
+-	if (scope != SCOPE_LOCAL_CPU)
+-		return system_affected;
+ 
+ 	fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+-	if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
+-		system_affected = true;
+-		return true;
+-	}
+-
+-	return false;
++	return has_smccc && fw_state == SPECTRE_MITIGATED;
+ }
+ 
+ static bool supports_ecbhb(int scope)
+@@ -954,6 +972,8 @@ static bool supports_ecbhb(int scope)
+ 						    ID_AA64MMFR1_EL1_ECBHB_SHIFT);
+ }
+ 
++static u8 max_bhb_k;
++
+ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
+ 			     int scope)
+ {
+@@ -962,16 +982,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
+ 	if (supports_csv2p3(scope))
+ 		return false;
+ 
+-	if (supports_clearbhb(scope))
+-		return true;
+-
+-	if (spectre_bhb_loop_affected(scope))
+-		return true;
++	if (is_spectre_bhb_safe(scope))
++		return false;
+ 
+-	if (is_spectre_bhb_fw_affected(scope))
+-		return true;
++	/*
++	 * At this point the core isn't known to be "safe" so we're going to
++	 * assume it's vulnerable. We still need to update `max_bhb_k` though,
++	 * but only if we aren't mitigating with clearbhb though.
++	 */
++	if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
++		max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
+ 
+-	return false;
++	return true;
+ }
+ 
+ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+@@ -1002,7 +1024,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
+ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+ {
+ 	bp_hardening_cb_t cpu_cb;
+-	enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
++	enum mitigation_state state = SPECTRE_VULNERABLE;
+ 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
+ 
+ 	if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
+@@ -1028,7 +1050,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+ 		this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
+ 		state = SPECTRE_MITIGATED;
+ 		set_bit(BHB_INSN, &system_bhb_mitigations);
+-	} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
++	} else if (spectre_bhb_loop_affected()) {
+ 		/*
+ 		 * Ensure KVM uses the indirect vector which will have the
+ 		 * branchy-loop added. A57/A72-r0 will already have selected
+@@ -1041,32 +1063,29 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+ 		this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
+ 		state = SPECTRE_MITIGATED;
+ 		set_bit(BHB_LOOP, &system_bhb_mitigations);
+-	} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
+-		fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+-		if (fw_state == SPECTRE_MITIGATED) {
+-			/*
+-			 * Ensure KVM uses one of the spectre bp_hardening
+-			 * vectors. The indirect vector doesn't include the EL3
+-			 * call, so needs upgrading to
+-			 * HYP_VECTOR_SPECTRE_INDIRECT.
+-			 */
+-			if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
+-				data->slot += 1;
+-
+-			this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
+-
+-			/*
+-			 * The WA3 call in the vectors supersedes the WA1 call
+-			 * made during context-switch. Uninstall any firmware
+-			 * bp_hardening callback.
+-			 */
+-			cpu_cb = spectre_v2_get_sw_mitigation_cb();
+-			if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
+-				__this_cpu_write(bp_hardening_data.fn, NULL);
+-
+-			state = SPECTRE_MITIGATED;
+-			set_bit(BHB_FW, &system_bhb_mitigations);
+-		}
++	} else if (has_spectre_bhb_fw_mitigation()) {
++		/*
++		 * Ensure KVM uses one of the spectre bp_hardening
++		 * vectors. The indirect vector doesn't include the EL3
++		 * call, so needs upgrading to
++		 * HYP_VECTOR_SPECTRE_INDIRECT.
++		 */
++		if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
++			data->slot += 1;
++
++		this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
++
++		/*
++		 * The WA3 call in the vectors supersedes the WA1 call
++		 * made during context-switch. Uninstall any firmware
++		 * bp_hardening callback.
++		 */
++		cpu_cb = spectre_v2_get_sw_mitigation_cb();
++		if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
++			__this_cpu_write(bp_hardening_data.fn, NULL);
++
++		state = SPECTRE_MITIGATED;
++		set_bit(BHB_FW, &system_bhb_mitigations);
+ 	}
+ 
+ 	update_mitigation_state(&spectre_bhb_state, state);
+@@ -1100,7 +1119,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
+ {
+ 	u8 rd;
+ 	u32 insn;
+-	u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
+ 
+ 	BUG_ON(nr_inst != 1); /* MOV -> MOV */
+ 
+@@ -1109,7 +1127,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
+ 
+ 	insn = le32_to_cpu(*origptr);
+ 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
+-	insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
++	insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
+ 					 AARCH64_INSN_VARIANT_64BIT,
+ 					 AARCH64_INSN_MOVEWIDE_ZERO);
+ 	*updptr++ = cpu_to_le32(insn);
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 634d3f62481827..7d301da8ff2899 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -493,7 +493,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ 	if (err)
+ 		return err;
+ 
+-	return kvm_share_hyp(vcpu, vcpu + 1);
++	err = kvm_share_hyp(vcpu, vcpu + 1);
++	if (err)
++		kvm_vgic_vcpu_destroy(vcpu);
++
++	return err;
+ }
+ 
+ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index e59c628c93f20d..9bcd51fd67d4e0 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -1360,7 +1360,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
+ 		__remove_pgd_mapping(swapper_pg_dir,
+ 				     __phys_to_virt(start), size);
+ 	else {
+-		max_pfn = PFN_UP(start + size);
++		/* Address of hotplugged memory can be smaller */
++		max_pfn = max(max_pfn, PFN_UP(start + size));
+ 		max_low_pfn = max_pfn;
+ 	}
+ 
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index f14329989e9a71..4b6ce4f07bc2c3 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -550,12 +550,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ 
+ #ifdef CONFIG_PPC_BOOK3S_64
+ 	case KVM_CAP_SPAPR_TCE:
++		fallthrough;
+ 	case KVM_CAP_SPAPR_TCE_64:
+-		r = 1;
+-		break;
+ 	case KVM_CAP_SPAPR_TCE_VFIO:
+-		r = !!cpu_has_feature(CPU_FTR_HVMODE);
+-		break;
+ 	case KVM_CAP_PPC_RTAS:
+ 	case KVM_CAP_PPC_FIXUP_HCALL:
+ 	case KVM_CAP_PPC_ENABLE_HCALL:
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index 9b772093278704..5b97af31170928 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -15,7 +15,7 @@ KBUILD_CFLAGS_MODULE += -fPIC
+ KBUILD_AFLAGS	+= -m64
+ KBUILD_CFLAGS	+= -m64
+ KBUILD_CFLAGS	+= -fPIC
+-LDFLAGS_vmlinux	:= -no-pie --emit-relocs --discard-none
++LDFLAGS_vmlinux	:= $(call ld-option,-no-pie) --emit-relocs --discard-none
+ extra_tools	:= relocs
+ aflags_dwarf	:= -Wa,-gdwarf-2
+ KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index c3075e4a8efc31..6d6b057b562fda 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -858,18 +858,13 @@ static int cpumf_pmu_event_type(struct perf_event *event)
+ static int cpumf_pmu_event_init(struct perf_event *event)
+ {
+ 	unsigned int type = event->attr.type;
+-	int err;
++	int err = -ENOENT;
+ 
+ 	if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
+ 		err = __hw_perf_event_init(event, type);
+ 	else if (event->pmu->type == type)
+ 		/* Registered as unknown PMU */
+ 		err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
+-	else
+-		return -ENOENT;
+-
+-	if (unlikely(err) && event->destroy)
+-		event->destroy(event);
+ 
+ 	return err;
+ }
+@@ -1819,8 +1814,6 @@ static int cfdiag_event_init(struct perf_event *event)
+ 	event->destroy = hw_perf_event_destroy;
+ 
+ 	err = cfdiag_event_init2(event);
+-	if (unlikely(err))
+-		event->destroy(event);
+ out:
+ 	return err;
+ }
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 331e0654d61d78..efdd6ead7ba812 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -898,9 +898,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
+ 		event->attr.exclude_idle = 0;
+ 
+ 	err = __hw_perf_event_init(event);
+-	if (unlikely(err))
+-		if (event->destroy)
+-			event->destroy(event);
+ 	return err;
+ }
+ 
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index c3854682934557..23c27c6320130b 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -335,6 +335,9 @@ static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev
+ {
+ 	struct pci_dev *pdev;
+ 
++	if (!zdev->vfn)
++		return false;
++
+ 	pdev = zpci_iov_find_parent_pf(zbus, zdev);
+ 	if (!pdev)
+ 		return true;
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index de5c0b389a3ec8..4779c3cb6cfab2 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -171,8 +171,12 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
+ 	args.address = mmio_addr;
+ 	args.vma = vma;
+ 	ret = follow_pfnmap_start(&args);
+-	if (ret)
+-		goto out_unlock_mmap;
++	if (ret) {
++		fixup_user_fault(current->mm, mmio_addr, FAULT_FLAG_WRITE, NULL);
++		ret = follow_pfnmap_start(&args);
++		if (ret)
++			goto out_unlock_mmap;
++	}
+ 
+ 	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
+ 			(mmio_addr & ~PAGE_MASK));
+@@ -305,14 +309,18 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
+ 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ 		goto out_unlock_mmap;
+ 	ret = -EACCES;
+-	if (!(vma->vm_flags & VM_WRITE))
++	if (!(vma->vm_flags & VM_READ))
+ 		goto out_unlock_mmap;
+ 
+ 	args.vma = vma;
+ 	args.address = mmio_addr;
+ 	ret = follow_pfnmap_start(&args);
+-	if (ret)
+-		goto out_unlock_mmap;
++	if (ret) {
++		fixup_user_fault(current->mm, mmio_addr, 0, NULL);
++		ret = follow_pfnmap_start(&args);
++		if (ret)
++			goto out_unlock_mmap;
++	}
+ 
+ 	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
+ 			(mmio_addr & ~PAGE_MASK));
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 2b7f358762c187..dc28f2c4eee3f2 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -936,7 +936,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ 		pte_t *ptep, pte_t pte, unsigned int nr)
+ {
+-	arch_enter_lazy_mmu_mode();
+ 	for (;;) {
+ 		__set_pte_at(mm, addr, ptep, pte, 0);
+ 		if (--nr == 0)
+@@ -945,7 +944,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ 		pte_val(pte) += PAGE_SIZE;
+ 		addr += PAGE_SIZE;
+ 	}
+-	arch_leave_lazy_mmu_mode();
+ }
+ #define set_ptes set_ptes
+ 
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index 8648a50afe8899..a35ddcca5e7668 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -52,8 +52,10 @@ void flush_tlb_pending(void)
+ 
+ void arch_enter_lazy_mmu_mode(void)
+ {
+-	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
++	struct tlb_batch *tb;
+ 
++	preempt_disable();
++	tb = this_cpu_ptr(&tlb_batch);
+ 	tb->active = 1;
+ }
+ 
+@@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void)
+ 	if (tb->tlb_nr)
+ 		flush_tlb_pending();
+ 	tb->active = 0;
++	preempt_enable();
+ }
+ 
+ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index db38d2b9b78868..e54da3b4d334e4 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2434,18 +2434,20 @@ config CC_HAS_NAMED_AS
+ 	def_bool $(success,echo 'int __seg_fs fs; int __seg_gs gs;' | $(CC) -x c - -S -o /dev/null)
+ 	depends on CC_IS_GCC
+ 
++#
++# -fsanitize=kernel-address (KASAN) and -fsanitize=thread (KCSAN)
++# are incompatible with named address spaces with GCC < 13.3
++# (see GCC PR sanitizer/111736 and also PR sanitizer/115172).
++#
++
+ config CC_HAS_NAMED_AS_FIXED_SANITIZERS
+-	def_bool CC_IS_GCC && GCC_VERSION >= 130300
++	def_bool y
++	depends on !(KASAN || KCSAN) || GCC_VERSION >= 130300
++	depends on !(UBSAN_BOOL && KASAN) || GCC_VERSION >= 140200
+ 
+ config USE_X86_SEG_SUPPORT
+-	def_bool y
+-	depends on CC_HAS_NAMED_AS
+-	#
+-	# -fsanitize=kernel-address (KASAN) and -fsanitize=thread
+-	# (KCSAN) are incompatible with named address spaces with
+-	# GCC < 13.3 - see GCC PR sanitizer/111736.
+-	#
+-	depends on !(KASAN || KCSAN) || CC_HAS_NAMED_AS_FIXED_SANITIZERS
++	def_bool CC_HAS_NAMED_AS
++	depends on CC_HAS_NAMED_AS_FIXED_SANITIZERS
+ 
+ config CC_HAS_SLS
+ 	def_bool $(cc-option,-mharden-sls=all)
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index cf7fc2b8e3ce1f..1c2db11a2c3cb9 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -76,6 +76,28 @@ static __always_inline void native_local_irq_restore(unsigned long flags)
+ 
+ #endif
+ 
++#ifndef CONFIG_PARAVIRT
++#ifndef __ASSEMBLY__
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++static __always_inline void arch_safe_halt(void)
++{
++	native_safe_halt();
++}
++
++/*
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
++ */
++static __always_inline void halt(void)
++{
++	native_halt();
++}
++#endif /* __ASSEMBLY__ */
++#endif /* CONFIG_PARAVIRT */
++
+ #ifdef CONFIG_PARAVIRT_XXL
+ #include <asm/paravirt.h>
+ #else
+@@ -97,24 +119,6 @@ static __always_inline void arch_local_irq_enable(void)
+ 	native_irq_enable();
+ }
+ 
+-/*
+- * Used in the idle loop; sti takes one instruction cycle
+- * to complete:
+- */
+-static __always_inline void arch_safe_halt(void)
+-{
+-	native_safe_halt();
+-}
+-
+-/*
+- * Used when interrupts are already enabled or to
+- * shutdown the processor:
+- */
+-static __always_inline void halt(void)
+-{
+-	native_halt();
+-}
+-
+ /*
+  * For spinlocks, etc:
+  */
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index d4eb9e1d61b8ef..75d4c994f5e2a5 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -107,6 +107,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn,
+ 	PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
+ }
+ 
++static __always_inline void arch_safe_halt(void)
++{
++	PVOP_VCALL0(irq.safe_halt);
++}
++
++static inline void halt(void)
++{
++	PVOP_VCALL0(irq.halt);
++}
++
+ #ifdef CONFIG_PARAVIRT_XXL
+ static inline void load_sp0(unsigned long sp0)
+ {
+@@ -170,16 +180,6 @@ static inline void __write_cr4(unsigned long x)
+ 	PVOP_VCALL1(cpu.write_cr4, x);
+ }
+ 
+-static __always_inline void arch_safe_halt(void)
+-{
+-	PVOP_VCALL0(irq.safe_halt);
+-}
+-
+-static inline void halt(void)
+-{
+-	PVOP_VCALL0(irq.halt);
+-}
+-
+ extern noinstr void pv_native_wbinvd(void);
+ 
+ static __always_inline void wbinvd(void)
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 8d4fbe1be48954..9334fdd1f63502 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -122,10 +122,9 @@ struct pv_irq_ops {
+ 	struct paravirt_callee_save save_fl;
+ 	struct paravirt_callee_save irq_disable;
+ 	struct paravirt_callee_save irq_enable;
+-
++#endif
+ 	void (*safe_halt)(void);
+ 	void (*halt)(void);
+-#endif
+ } __no_randomize_layout;
+ 
+ struct pv_mmu_ops {
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index c70b86f1f2954f..63adda8a143f93 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -23,6 +23,8 @@
+ #include <linux/serial_core.h>
+ #include <linux/pgtable.h>
+ 
++#include <xen/xen.h>
++
+ #include <asm/e820/api.h>
+ #include <asm/irqdomain.h>
+ #include <asm/pci_x86.h>
+@@ -1730,6 +1732,15 @@ int __init acpi_mps_check(void)
+ {
+ #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
+ /* mptable code is not built-in*/
++
++	/*
++	 * Xen disables ACPI in PV DomU guests but it still emulates APIC and
++	 * supports SMP. Returning early here ensures that APIC is not disabled
++	 * unnecessarily and the guest is not limited to a single vCPU.
++	 */
++	if (xen_pv_domain() && !xen_initial_domain())
++		return 0;
++
+ 	if (acpi_disabled || acpi_noirq) {
+ 		pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
+ 		return 1;
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 79d2e17f6582e9..425bed00b2e071 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -627,7 +627,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
+ 	 * (model = 0x14) and later actually support it.
+ 	 * (AMD Erratum #110, docId: 25759).
+ 	 */
+-	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
++	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
+ 		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
+ 		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
+ 			value &= ~BIT_64(32);
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index 4893d30ce43844..b4746eb8b11526 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -754,22 +754,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
+ void __init e820__register_nosave_regions(unsigned long limit_pfn)
+ {
+ 	int i;
+-	unsigned long pfn = 0;
++	u64 last_addr = 0;
+ 
+ 	for (i = 0; i < e820_table->nr_entries; i++) {
+ 		struct e820_entry *entry = &e820_table->entries[i];
+ 
+-		if (pfn < PFN_UP(entry->addr))
+-			register_nosave_region(pfn, PFN_UP(entry->addr));
+-
+-		pfn = PFN_DOWN(entry->addr + entry->size);
+-
+ 		if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
+-			register_nosave_region(PFN_UP(entry->addr), pfn);
++			continue;
+ 
+-		if (pfn >= limit_pfn)
+-			break;
++		if (last_addr < entry->addr)
++			register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
++
++		last_addr = entry->addr + entry->size;
+ 	}
++
++	register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
+ }
+ 
+ #ifdef CONFIG_ACPI
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index fec38153355581..0c1b915d7efac8 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -100,6 +100,11 @@ int paravirt_disable_iospace(void)
+ 	return request_resource(&ioport_resource, &reserve_ioports);
+ }
+ 
++static noinstr void pv_native_safe_halt(void)
++{
++	native_safe_halt();
++}
++
+ #ifdef CONFIG_PARAVIRT_XXL
+ static noinstr void pv_native_write_cr2(unsigned long val)
+ {
+@@ -121,10 +126,6 @@ noinstr void pv_native_wbinvd(void)
+ 	native_wbinvd();
+ }
+ 
+-static noinstr void pv_native_safe_halt(void)
+-{
+-	native_safe_halt();
+-}
+ #endif
+ 
+ struct pv_info pv_info = {
+@@ -182,9 +183,11 @@ struct paravirt_patch_template pv_ops = {
+ 	.irq.save_fl		= __PV_IS_CALLEE_SAVE(pv_native_save_fl),
+ 	.irq.irq_disable	= __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
+ 	.irq.irq_enable		= __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
++#endif /* CONFIG_PARAVIRT_XXL */
++
++	/* Irq HLT ops. */
+ 	.irq.safe_halt		= pv_native_safe_halt,
+ 	.irq.halt		= native_halt,
+-#endif /* CONFIG_PARAVIRT_XXL */
+ 
+ 	/* Mmu ops. */
+ 	.mmu.flush_tlb_user	= native_flush_tlb_local,
+diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
+index ef654530bf5a93..98123ff10506c6 100644
+--- a/arch/x86/kernel/signal_32.c
++++ b/arch/x86/kernel/signal_32.c
+@@ -33,25 +33,55 @@
+ #include <asm/smap.h>
+ #include <asm/gsseg.h>
+ 
++/*
++ * The first GDT descriptor is reserved as 'NULL descriptor'.  As bits 0
++ * and 1 of a segment selector, i.e., the RPL bits, are NOT used to index
++ * GDT, selector values 0~3 all point to the NULL descriptor, thus values
++ * 0, 1, 2 and 3 are all valid NULL selector values.
++ *
++ * However IRET zeros ES, FS, GS, and DS segment registers if any of them
++ * is found to have any nonzero NULL selector value, which can be used by
++ * userspace in pre-FRED systems to spot any interrupt/exception by loading
++ * a nonzero NULL selector and waiting for it to become zero.  Before FRED
++ * there was nothing software could do to prevent such an information leak.
++ *
++ * ERETU, the only legit instruction to return to userspace from kernel
++ * under FRED, by design does NOT zero any segment register to avoid this
++ * problem behavior.
++ *
++ * As such, leave NULL selector values 0~3 unchanged.
++ */
++static inline u16 fixup_rpl(u16 sel)
++{
++	return sel <= 3 ? sel : sel | 3;
++}
++
+ #ifdef CONFIG_IA32_EMULATION
+ #include <asm/unistd_32_ia32.h>
+ 
+ static inline void reload_segments(struct sigcontext_32 *sc)
+ {
+-	unsigned int cur;
++	u16 cur;
+ 
++	/*
++	 * Reload fs and gs if they have changed in the signal
++	 * handler.  This does not handle long fs/gs base changes in
++	 * the handler, but does not clobber them at least in the
++	 * normal case.
++	 */
+ 	savesegment(gs, cur);
+-	if ((sc->gs | 0x03) != cur)
+-		load_gs_index(sc->gs | 0x03);
++	if (fixup_rpl(sc->gs) != cur)
++		load_gs_index(fixup_rpl(sc->gs));
+ 	savesegment(fs, cur);
+-	if ((sc->fs | 0x03) != cur)
+-		loadsegment(fs, sc->fs | 0x03);
++	if (fixup_rpl(sc->fs) != cur)
++		loadsegment(fs, fixup_rpl(sc->fs));
++
+ 	savesegment(ds, cur);
+-	if ((sc->ds | 0x03) != cur)
+-		loadsegment(ds, sc->ds | 0x03);
++	if (fixup_rpl(sc->ds) != cur)
++		loadsegment(ds, fixup_rpl(sc->ds));
+ 	savesegment(es, cur);
+-	if ((sc->es | 0x03) != cur)
+-		loadsegment(es, sc->es | 0x03);
++	if (fixup_rpl(sc->es) != cur)
++		loadsegment(es, fixup_rpl(sc->es));
+ }
+ 
+ #define sigset32_t			compat_sigset_t
+@@ -105,18 +135,12 @@ static bool ia32_restore_sigcontext(struct pt_regs *regs,
+ 	regs->orig_ax = -1;
+ 
+ #ifdef CONFIG_IA32_EMULATION
+-	/*
+-	 * Reload fs and gs if they have changed in the signal
+-	 * handler.  This does not handle long fs/gs base changes in
+-	 * the handler, but does not clobber them at least in the
+-	 * normal case.
+-	 */
+ 	reload_segments(&sc);
+ #else
+-	loadsegment(gs, sc.gs);
+-	regs->fs = sc.fs;
+-	regs->es = sc.es;
+-	regs->ds = sc.ds;
++	loadsegment(gs, fixup_rpl(sc.gs));
++	regs->fs = fixup_rpl(sc.fs);
++	regs->es = fixup_rpl(sc.es);
++	regs->ds = fixup_rpl(sc.ds);
+ #endif
+ 
+ 	return fpu__restore_sig(compat_ptr(sc.fpstate), 1);
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 9157b4485dedce..c92e43f2d0c4ec 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -1047,8 +1047,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		}
+ 		break;
+ 	case 0xa: { /* Architectural Performance Monitoring */
+-		union cpuid10_eax eax;
+-		union cpuid10_edx edx;
++		union cpuid10_eax eax = { };
++		union cpuid10_edx edx = { };
+ 
+ 		if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
+ 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+@@ -1064,8 +1064,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 
+ 		if (kvm_pmu_cap.version)
+ 			edx.split.anythread_deprecated = 1;
+-		edx.split.reserved1 = 0;
+-		edx.split.reserved2 = 0;
+ 
+ 		entry->eax = eax.full;
+ 		entry->ebx = kvm_pmu_cap.events_mask;
+@@ -1383,7 +1381,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		break;
+ 	/* AMD Extended Performance Monitoring and Debug */
+ 	case 0x80000022: {
+-		union cpuid_0x80000022_ebx ebx;
++		union cpuid_0x80000022_ebx ebx = { };
+ 
+ 		entry->ecx = entry->edx = 0;
+ 		if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 45337a3fc03cd7..1a4ca471d63df6 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11769,6 +11769,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ 	if (kvm_mpx_supported())
+ 		kvm_load_guest_fpu(vcpu);
+ 
++	kvm_vcpu_srcu_read_lock(vcpu);
++
+ 	r = kvm_apic_accept_events(vcpu);
+ 	if (r < 0)
+ 		goto out;
+@@ -11782,6 +11784,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ 		mp_state->mp_state = vcpu->arch.mp_state;
+ 
+ out:
++	kvm_vcpu_srcu_read_unlock(vcpu);
++
+ 	if (kvm_mpx_supported())
+ 		kvm_put_guest_fpu(vcpu);
+ 	vcpu_put(vcpu);
+diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
+index 44f7b2ea6a073f..69ceb967d73e9c 100644
+--- a/arch/x86/mm/pat/set_memory.c
++++ b/arch/x86/mm/pat/set_memory.c
+@@ -2422,7 +2422,7 @@ static int __set_pages_np(struct page *page, int numpages)
+ 				.pgd = NULL,
+ 				.numpages = numpages,
+ 				.mask_set = __pgprot(0),
+-				.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
++				.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
+ 				.flags = CPA_NO_CHECK_ALIAS };
+ 
+ 	/*
+@@ -2501,7 +2501,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ 		.pgd = pgd,
+ 		.numpages = numpages,
+ 		.mask_set = __pgprot(0),
+-		.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
++		.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW|_PAGE_DIRTY)),
+ 		.flags = CPA_NO_CHECK_ALIAS,
+ 	};
+ 
+@@ -2544,7 +2544,7 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
+ 		.pgd		= pgd,
+ 		.numpages	= numpages,
+ 		.mask_set	= __pgprot(0),
+-		.mask_clr	= __pgprot(_PAGE_PRESENT | _PAGE_RW),
++		.mask_clr	= __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
+ 		.flags		= CPA_NO_CHECK_ALIAS,
+ 	};
+ 
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index b4f3784f27e956..0c950bbca309ff 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -70,6 +70,9 @@ EXPORT_SYMBOL(xen_start_flags);
+  */
+ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
+ 
++/* Number of pages released from the initial allocation. */
++unsigned long xen_released_pages;
++
+ static __ref void xen_get_vendor(void)
+ {
+ 	init_cpu_devs();
+@@ -465,6 +468,13 @@ int __init arch_xen_unpopulated_init(struct resource **res)
+ 			xen_free_unpopulated_pages(1, &pg);
+ 		}
+ 
++		/*
++		 * Account for the region being in the physmap but unpopulated.
++		 * The value in xen_released_pages is used by the balloon
++		 * driver to know how much of the physmap is unpopulated and
++		 * set an accurate initial memory target.
++		 */
++		xen_released_pages += xen_extra_mem[i].n_pfns;
+ 		/* Zero so region is not also added to the balloon driver. */
+ 		xen_extra_mem[i].n_pfns = 0;
+ 	}
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index c3db71d96c434a..3823e52aef523c 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -37,9 +37,6 @@
+ 
+ #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
+ 
+-/* Number of pages released from the initial allocation. */
+-unsigned long xen_released_pages;
+-
+ /* Memory map would allow PCI passthrough. */
+ bool xen_pv_pci_possible;
+ 
+diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
+index 8d50981594d153..eccedb0c8886bf 100644
+--- a/drivers/accel/ivpu/ivpu_debugfs.c
++++ b/drivers/accel/ivpu/ivpu_debugfs.c
+@@ -331,7 +331,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si
+ 		return -EINVAL;
+ 
+ 	ret = ivpu_rpm_get(vdev);
+-	if (ret)
++	if (ret < 0)
+ 		return ret;
+ 
+ 	ivpu_pm_trigger_recovery(vdev, "debugfs");
+@@ -408,7 +408,7 @@ static int dct_active_set(void *data, u64 active_percent)
+ 		return -EINVAL;
+ 
+ 	ret = ivpu_rpm_get(vdev);
+-	if (ret)
++	if (ret < 0)
+ 		return ret;
+ 
+ 	if (active_percent)
+diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
+index 13c8a12162e89e..f0402dc847582a 100644
+--- a/drivers/accel/ivpu/ivpu_ipc.c
++++ b/drivers/accel/ivpu/ivpu_ipc.c
+@@ -299,7 +299,8 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
+ 	struct ivpu_ipc_consumer cons;
+ 	int ret;
+ 
+-	drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
++	drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
++		    pm_runtime_enabled(vdev->drm.dev));
+ 
+ 	ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
+ 
+diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
+index 2f9d37f5c208a9..a961002fe25b2b 100644
+--- a/drivers/accel/ivpu/ivpu_ms.c
++++ b/drivers/accel/ivpu/ivpu_ms.c
+@@ -4,6 +4,7 @@
+  */
+ 
+ #include <drm/drm_file.h>
++#include <linux/pm_runtime.h>
+ 
+ #include "ivpu_drv.h"
+ #include "ivpu_gem.h"
+@@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
+ 	    args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
+ 		return -EINVAL;
+ 
++	ret = ivpu_rpm_get(vdev);
++	if (ret < 0)
++		return ret;
++
+ 	mutex_lock(&file_priv->ms_lock);
+ 
+ 	if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
+@@ -96,6 +101,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
+ 	kfree(ms);
+ unlock:
+ 	mutex_unlock(&file_priv->ms_lock);
++
++	ivpu_rpm_put(vdev);
+ 	return ret;
+ }
+ 
+@@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
+ 	if (!args->metric_group_mask)
+ 		return -EINVAL;
+ 
++	ret = ivpu_rpm_get(vdev);
++	if (ret < 0)
++		return ret;
++
+ 	mutex_lock(&file_priv->ms_lock);
+ 
+ 	ms = get_instance_by_mask(file_priv, args->metric_group_mask);
+@@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
+ unlock:
+ 	mutex_unlock(&file_priv->ms_lock);
+ 
++	ivpu_rpm_put(vdev);
+ 	return ret;
+ }
+ 
+@@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
+ {
+ 	struct ivpu_file_priv *file_priv = file->driver_priv;
+ 	struct drm_ivpu_metric_streamer_stop *args = data;
++	struct ivpu_device *vdev = file_priv->vdev;
+ 	struct ivpu_ms_instance *ms;
++	int ret;
+ 
+ 	if (!args->metric_group_mask)
+ 		return -EINVAL;
+ 
++	ret = ivpu_rpm_get(vdev);
++	if (ret < 0)
++		return ret;
++
+ 	mutex_lock(&file_priv->ms_lock);
+ 
+ 	ms = get_instance_by_mask(file_priv, args->metric_group_mask);
+@@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
+ 
+ 	mutex_unlock(&file_priv->ms_lock);
+ 
++	ivpu_rpm_put(vdev);
+ 	return ms ? 0 : -EINVAL;
+ }
+ 
+@@ -281,6 +300,9 @@ int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *
+ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
+ {
+ 	struct ivpu_ms_instance *ms, *tmp;
++	struct ivpu_device *vdev = file_priv->vdev;
++
++	pm_runtime_get_sync(vdev->drm.dev);
+ 
+ 	mutex_lock(&file_priv->ms_lock);
+ 
+@@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
+ 		free_instance(file_priv, ms);
+ 
+ 	mutex_unlock(&file_priv->ms_lock);
++
++	pm_runtime_put_autosuspend(vdev->drm.dev);
+ }
+ 
+ void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
+diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
+index d2f7fd7743a13d..11278f785526d4 100644
+--- a/drivers/acpi/platform_profile.c
++++ b/drivers/acpi/platform_profile.c
+@@ -22,8 +22,8 @@ static const char * const profile_names[] = {
+ };
+ static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
+ 
+-static ssize_t platform_profile_choices_show(struct device *dev,
+-					struct device_attribute *attr,
++static ssize_t platform_profile_choices_show(struct kobject *kobj,
++					struct kobj_attribute *attr,
+ 					char *buf)
+ {
+ 	int len = 0;
+@@ -49,8 +49,8 @@ static ssize_t platform_profile_choices_show(struct device *dev,
+ 	return len;
+ }
+ 
+-static ssize_t platform_profile_show(struct device *dev,
+-					struct device_attribute *attr,
++static ssize_t platform_profile_show(struct kobject *kobj,
++					struct kobj_attribute *attr,
+ 					char *buf)
+ {
+ 	enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+@@ -77,8 +77,8 @@ static ssize_t platform_profile_show(struct device *dev,
+ 	return sysfs_emit(buf, "%s\n", profile_names[profile]);
+ }
+ 
+-static ssize_t platform_profile_store(struct device *dev,
+-			    struct device_attribute *attr,
++static ssize_t platform_profile_store(struct kobject *kobj,
++			    struct kobj_attribute *attr,
+ 			    const char *buf, size_t count)
+ {
+ 	int err, i;
+@@ -115,12 +115,12 @@ static ssize_t platform_profile_store(struct device *dev,
+ 	return count;
+ }
+ 
+-static DEVICE_ATTR_RO(platform_profile_choices);
+-static DEVICE_ATTR_RW(platform_profile);
++static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
++static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
+ 
+ static struct attribute *platform_profile_attrs[] = {
+-	&dev_attr_platform_profile_choices.attr,
+-	&dev_attr_platform_profile.attr,
++	&attr_platform_profile_choices.attr,
++	&attr_platform_profile.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 45f63b09828a1a..650122deb480d0 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -63,6 +63,7 @@ enum board_ids {
+ 	board_ahci_pcs_quirk_no_devslp,
+ 	board_ahci_pcs_quirk_no_sntf,
+ 	board_ahci_yes_fbs,
++	board_ahci_yes_fbs_atapi_dma,
+ 
+ 	/* board IDs for specific chipsets in alphabetical order */
+ 	board_ahci_al,
+@@ -188,6 +189,14 @@ static const struct ata_port_info ahci_port_info[] = {
+ 		.udma_mask	= ATA_UDMA6,
+ 		.port_ops	= &ahci_ops,
+ 	},
++	[board_ahci_yes_fbs_atapi_dma] = {
++		AHCI_HFLAGS	(AHCI_HFLAG_YES_FBS |
++				 AHCI_HFLAG_ATAPI_DMA_QUIRK),
++		.flags		= AHCI_FLAG_COMMON,
++		.pio_mask	= ATA_PIO4,
++		.udma_mask	= ATA_UDMA6,
++		.port_ops	= &ahci_ops,
++	},
+ 	/* by chipsets */
+ 	[board_ahci_al] = {
+ 		AHCI_HFLAGS	(AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
+@@ -589,6 +598,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	  .driver_data = board_ahci_yes_fbs },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
+ 	  .driver_data = board_ahci_yes_fbs },
++	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215),
++	  .driver_data = board_ahci_yes_fbs_atapi_dma },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+ 	  .driver_data = board_ahci_yes_fbs },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 8f40f75ba08cff..10a5fe02f0a453 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -246,6 +246,7 @@ enum {
+ 	AHCI_HFLAG_NO_SXS		= BIT(26), /* SXS not supported */
+ 	AHCI_HFLAG_43BIT_ONLY		= BIT(27), /* 43bit DMA addr limit */
+ 	AHCI_HFLAG_INTEL_PCS_QUIRK	= BIT(28), /* apply Intel PCS quirk */
++	AHCI_HFLAG_ATAPI_DMA_QUIRK	= BIT(29), /* force ATAPI to use DMA */
+ 
+ 	/* ap->flags bits */
+ 
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index fdfa7b2662180b..a28ffe1e596918 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1321,6 +1321,10 @@ static void ahci_dev_config(struct ata_device *dev)
+ {
+ 	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+ 
++	if ((dev->class == ATA_DEV_ATAPI) &&
++	    (hpriv->flags & AHCI_HFLAG_ATAPI_DMA_QUIRK))
++		dev->quirks |= ATA_QUIRK_ATAPI_MOD16_DMA;
++
+ 	if (hpriv->flags & AHCI_HFLAG_SECT255) {
+ 		dev->max_sectors = 255;
+ 		ata_dev_info(dev,
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d956735e2a7645..0cb97181d10a9e 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -88,6 +88,7 @@ struct ata_force_param {
+ 	unsigned int	xfer_mask;
+ 	unsigned int	quirk_on;
+ 	unsigned int	quirk_off;
++	unsigned int	pflags_on;
+ 	u16		lflags_on;
+ 	u16		lflags_off;
+ };
+@@ -331,6 +332,35 @@ void ata_force_cbl(struct ata_port *ap)
+ 	}
+ }
+ 
++/**
++ *	ata_force_pflags - force port flags according to libata.force
++ *	@ap: ATA port of interest
++ *
++ *	Force port flags according to libata.force and whine about it.
++ *
++ *	LOCKING:
++ *	EH context.
++ */
++static void ata_force_pflags(struct ata_port *ap)
++{
++	int i;
++
++	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
++		const struct ata_force_ent *fe = &ata_force_tbl[i];
++
++		if (fe->port != -1 && fe->port != ap->print_id)
++			continue;
++
++		/* let pflags stack */
++		if (fe->param.pflags_on) {
++			ap->pflags |= fe->param.pflags_on;
++			ata_port_notice(ap,
++					"FORCE: port flag 0x%x forced -> 0x%x\n",
++					fe->param.pflags_on, ap->pflags);
++		}
++	}
++}
++
+ /**
+  *	ata_force_link_limits - force link limits according to libata.force
+  *	@link: ATA link of interest
+@@ -486,6 +516,7 @@ static void ata_force_quirks(struct ata_device *dev)
+ 	}
+ }
+ #else
++static inline void ata_force_pflags(struct ata_port *ap) { }
+ static inline void ata_force_link_limits(struct ata_link *link) { }
+ static inline void ata_force_xfermask(struct ata_device *dev) { }
+ static inline void ata_force_quirks(struct ata_device *dev) { }
+@@ -5460,6 +5491,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
+ #endif
+ 	ata_sff_port_init(ap);
+ 
++	ata_force_pflags(ap);
++
+ 	return ap;
+ }
+ EXPORT_SYMBOL_GPL(ata_port_alloc);
+@@ -6272,6 +6305,9 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one);
+ 	{ "no" #name,	.lflags_on	= (flags) },	\
+ 	{ #name,	.lflags_off	= (flags) }
+ 
++#define force_pflag_on(name, flags)			\
++	{ #name,	.pflags_on	= (flags) }
++
+ #define force_quirk_on(name, flag)			\
+ 	{ #name,	.quirk_on	= (flag) }
+ 
+@@ -6331,6 +6367,8 @@ static const struct ata_force_param force_tbl[] __initconst = {
+ 	force_lflag_on(rstonce,		ATA_LFLAG_RST_ONCE),
+ 	force_lflag_onoff(dbdelay,	ATA_LFLAG_NO_DEBOUNCE_DELAY),
+ 
++	force_pflag_on(external,	ATA_PFLAG_EXTERNAL),
++
+ 	force_quirk_onoff(ncq,		ATA_QUIRK_NONCQ),
+ 	force_quirk_onoff(ncqtrim,	ATA_QUIRK_NO_NCQ_TRIM),
+ 	force_quirk_onoff(ncqati,	ATA_QUIRK_NO_NCQ_ON_ATI),
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 3b303d4ae37a01..16cd676eae1f9a 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -1542,8 +1542,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
+ 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ 	tf.command = ATA_CMD_PACKET;
+ 
+-	/* is it pointless to prefer PIO for "safety reasons"? */
+-	if (ap->flags & ATA_FLAG_PIO_DMA) {
++	/*
++	 * Do not use DMA if the connected device only supports PIO, even if the
++	 * port prefers PIO commands via DMA.
++	 *
++	 * Ideally, we should call atapi_check_dma() to check if it is safe for
++	 * the LLD to use DMA for REQUEST_SENSE, but we don't have a qc.
++	 * Since we can't check the command, perhaps we should only use pio?
++	 */
++	if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) {
+ 		tf.protocol = ATAPI_PROT_DMA;
+ 		tf.feature |= ATAPI_PKT_DMA;
+ 	} else {
+diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
+index 538bd3423d859d..1bdcd6ee741d36 100644
+--- a/drivers/ata/pata_pxa.c
++++ b/drivers/ata/pata_pxa.c
+@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
+ 
+ 	ap->ioaddr.cmd_addr	= devm_ioremap(&pdev->dev, cmd_res->start,
+ 						resource_size(cmd_res));
++	if (!ap->ioaddr.cmd_addr)
++		return -ENOMEM;
+ 	ap->ioaddr.ctl_addr	= devm_ioremap(&pdev->dev, ctl_res->start,
+ 						resource_size(ctl_res));
++	if (!ap->ioaddr.ctl_addr)
++		return -ENOMEM;
+ 	ap->ioaddr.bmdma_addr	= devm_ioremap(&pdev->dev, dma_res->start,
+ 						resource_size(dma_res));
++	if (!ap->ioaddr.bmdma_addr)
++		return -ENOMEM;
+ 
+ 	/*
+ 	 * Adjust register offsets
+diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
+index a482741eb181ff..c3042eca6332df 100644
+--- a/drivers/ata/sata_sx4.c
++++ b/drivers/ata/sata_sx4.c
+@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
+ 	mmio += PDC_CHIP0_OFS;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
+-		pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+-				  pdc_i2c_read_data[i].reg,
+-				  &spd0[pdc_i2c_read_data[i].ofs]);
++		if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
++				       pdc_i2c_read_data[i].reg,
++				       &spd0[pdc_i2c_read_data[i].ofs])) {
++			dev_err(host->dev,
++				"Failed in i2c read at index %d: device=%#x, reg=%#x\n",
++				i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
++			return -EIO;
++		}
+ 
+ 	data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
+ 	data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
+@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
+ 
+ 	/* Programming DIMM0 Module Control Register (index_CID0:80h) */
+ 	size = pdc20621_prog_dimm0(host);
++	if (size < 0)
++		return size;
+ 	dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
+ 
+ 	/* Programming DIMM Module Global Control Register (index_CID0:88h) */
+diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
+index 025dc6855cb253..41807ce363399d 100644
+--- a/drivers/auxdisplay/hd44780.c
++++ b/drivers/auxdisplay/hd44780.c
+@@ -313,7 +313,7 @@ static int hd44780_probe(struct platform_device *pdev)
+ fail3:
+ 	kfree(hd);
+ fail2:
+-	kfree(lcd);
++	charlcd_free(lcd);
+ fail1:
+ 	kfree(hdc);
+ 	return ret;
+@@ -328,7 +328,7 @@ static void hd44780_remove(struct platform_device *pdev)
+ 	kfree(hdc->hd44780);
+ 	kfree(lcd->drvdata);
+ 
+-	kfree(lcd);
++	charlcd_free(lcd);
+ }
+ 
+ static const struct of_device_id hd44780_of_match[] = {
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 2152eec0c1352c..68224f2f83fff2 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -687,6 +687,13 @@ int devres_release_group(struct device *dev, void *id)
+ 		spin_unlock_irqrestore(&dev->devres_lock, flags);
+ 
+ 		release_nodes(dev, &todo);
++	} else if (list_empty(&dev->devres_head)) {
++		/*
++		 * dev is probably dying via devres_release_all(): groups
++		 * have already been removed and are on the process of
++		 * being released - don't touch and don't warn.
++		 */
++		spin_unlock_irqrestore(&dev->devres_lock, flags);
+ 	} else {
+ 		WARN_ON(1);
+ 		spin_unlock_irqrestore(&dev->devres_lock, flags);
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 79b7bd8bfd4584..38b9e485e520d5 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -681,22 +681,44 @@ static int ublk_max_cmd_buf_size(void)
+ 	return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
+ }
+ 
+-static inline bool ublk_queue_can_use_recovery_reissue(
+-		struct ublk_queue *ubq)
++/*
++ * Should I/O outstanding to the ublk server when it exits be reissued?
++ * If not, outstanding I/O will get errors.
++ */
++static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
+ {
+-	return (ubq->flags & UBLK_F_USER_RECOVERY) &&
+-			(ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
++	return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
++	       (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
+ }
+ 
+-static inline bool ublk_queue_can_use_recovery(
+-		struct ublk_queue *ubq)
++/*
++ * Should I/O issued while there is no ublk server queue? If not, I/O
++ * issued while there is no ublk server will get errors.
++ */
++static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
++{
++	return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
++}
++
++/*
++ * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
++ * of the device flags for smaller cache footprint - better for fast
++ * paths.
++ */
++static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
+ {
+ 	return ubq->flags & UBLK_F_USER_RECOVERY;
+ }
+ 
+-static inline bool ublk_can_use_recovery(struct ublk_device *ub)
++/*
++ * Should ublk devices be stopped (i.e. no recovery possible) when the
++ * ublk server exits? If not, devices can be used again by a future
++ * incarnation of a ublk server via the start_recovery/end_recovery
++ * commands.
++ */
++static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
+ {
+-	return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
++	return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
+ }
+ 
+ static void ublk_free_disk(struct gendisk *disk)
+@@ -1059,6 +1081,25 @@ static void ublk_complete_rq(struct kref *ref)
+ 	__ublk_complete_rq(req);
+ }
+ 
++static void ublk_do_fail_rq(struct request *req)
++{
++	struct ublk_queue *ubq = req->mq_hctx->driver_data;
++
++	if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
++		blk_mq_requeue_request(req, false);
++	else
++		__ublk_complete_rq(req);
++}
++
++static void ublk_fail_rq_fn(struct kref *ref)
++{
++	struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
++			ref);
++	struct request *req = blk_mq_rq_from_pdu(data);
++
++	ublk_do_fail_rq(req);
++}
++
+ /*
+  * Since __ublk_rq_task_work always fails requests immediately during
+  * exiting, __ublk_fail_req() is only called from abort context during
+@@ -1072,10 +1113,13 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+ {
+ 	WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
+ 
+-	if (ublk_queue_can_use_recovery_reissue(ubq))
+-		blk_mq_requeue_request(req, false);
+-	else
+-		ublk_put_req_ref(ubq, req);
++	if (ublk_need_req_ref(ubq)) {
++		struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
++
++		kref_put(&data->ref, ublk_fail_rq_fn);
++	} else {
++		ublk_do_fail_rq(req);
++	}
+ }
+ 
+ static void ubq_complete_io_cmd(struct ublk_io *io, int res,
+@@ -1100,7 +1144,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
+ 		struct request *rq)
+ {
+ 	/* We cannot process this rq so just requeue it. */
+-	if (ublk_queue_can_use_recovery(ubq))
++	if (ublk_nosrv_dev_should_queue_io(ubq->dev))
+ 		blk_mq_requeue_request(rq, false);
+ 	else
+ 		blk_mq_end_request(rq, BLK_STS_IOERR);
+@@ -1245,10 +1289,10 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
+ 		struct ublk_device *ub = ubq->dev;
+ 
+ 		if (ublk_abort_requests(ub, ubq)) {
+-			if (ublk_can_use_recovery(ub))
+-				schedule_work(&ub->quiesce_work);
+-			else
++			if (ublk_nosrv_should_stop_dev(ub))
+ 				schedule_work(&ub->stop_work);
++			else
++				schedule_work(&ub->quiesce_work);
+ 		}
+ 		return BLK_EH_DONE;
+ 	}
+@@ -1277,7 +1321,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	 * Note: force_abort is guaranteed to be seen because it is set
+ 	 * before request queue is unqiuesced.
+ 	 */
+-	if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
++	if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
+ 		return BLK_STS_IOERR;
+ 
+ 	if (unlikely(ubq->canceling)) {
+@@ -1517,10 +1561,10 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
+ 	ublk_cancel_cmd(ubq, io, issue_flags);
+ 
+ 	if (need_schedule) {
+-		if (ublk_can_use_recovery(ub))
+-			schedule_work(&ub->quiesce_work);
+-		else
++		if (ublk_nosrv_should_stop_dev(ub))
+ 			schedule_work(&ub->stop_work);
++		else
++			schedule_work(&ub->quiesce_work);
+ 	}
+ }
+ 
+@@ -1640,7 +1684,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
+ 	mutex_lock(&ub->mutex);
+ 	if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ 		goto unlock;
+-	if (ublk_can_use_recovery(ub)) {
++	if (ublk_nosrv_dev_should_queue_io(ub)) {
+ 		if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ 			__ublk_quiesce_dev(ub);
+ 		ublk_unquiesce_dev(ub);
+@@ -2738,7 +2782,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
+ 	int i;
+ 
+ 	mutex_lock(&ub->mutex);
+-	if (!ublk_can_use_recovery(ub))
++	if (ublk_nosrv_should_stop_dev(ub))
+ 		goto out_unlock;
+ 	if (!ub->nr_queues_ready)
+ 		goto out_unlock;
+@@ -2791,7 +2835,7 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
+ 			__func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ 
+ 	mutex_lock(&ub->mutex);
+-	if (!ublk_can_use_recovery(ub))
++	if (ublk_nosrv_should_stop_dev(ub))
+ 		goto out_unlock;
+ 
+ 	if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
+index 53f6b4f76bccdd..ab465e13c1f60f 100644
+--- a/drivers/bluetooth/btintel_pcie.c
++++ b/drivers/bluetooth/btintel_pcie.c
+@@ -36,6 +36,7 @@
+ /* Intel Bluetooth PCIe device id table */
+ static const struct pci_device_id btintel_pcie_table[] = {
+ 	{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
++	{ BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
+ 	{ 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index 04d02c746ec0fd..dd2c0485b9848d 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -785,6 +785,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 		   const char *firmware_name)
+ {
+ 	struct qca_fw_config config = {};
++	const char *variant = "";
+ 	int err;
+ 	u8 rom_ver = 0;
+ 	u32 soc_ver;
+@@ -879,13 +880,11 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 		case QCA_WCN3990:
+ 		case QCA_WCN3991:
+ 		case QCA_WCN3998:
+-			if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
+-				snprintf(config.fwname, sizeof(config.fwname),
+-					 "qca/crnv%02xu.bin", rom_ver);
+-			} else {
+-				snprintf(config.fwname, sizeof(config.fwname),
+-					 "qca/crnv%02x.bin", rom_ver);
+-			}
++			if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID)
++				variant = "u";
++
++			snprintf(config.fwname, sizeof(config.fwname),
++				 "qca/crnv%02x%s.bin", rom_ver, variant);
+ 			break;
+ 		case QCA_WCN3988:
+ 			snprintf(config.fwname, sizeof(config.fwname),
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 3a0b9dc98707f5..151054a718522a 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -626,6 +626,10 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe152), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index 395d66e32a2ea9..2f322f890b81f2 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
+ 	if (!skb) {
+ 		percpu_down_read(&hu->proto_lock);
+ 
+-		if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
++		if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
++		    test_bit(HCI_UART_PROTO_INIT, &hu->flags))
+ 			skb = hu->proto->dequeue(hu);
+ 
+ 		percpu_up_read(&hu->proto_lock);
+@@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
+ 	if (!percpu_down_read_trylock(&hu->proto_lock))
+ 		return 0;
+ 
+-	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
++	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
++	    !test_bit(HCI_UART_PROTO_INIT, &hu->flags))
+ 		goto no_schedule;
+ 
+ 	set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+@@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 
+ 	percpu_down_read(&hu->proto_lock);
+ 
+-	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
++	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
++	    !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
+ 		percpu_up_read(&hu->proto_lock);
+ 		return -EUNATCH;
+ 	}
+@@ -585,7 +588,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
+ 	if (tty != hu->tty)
+ 		return;
+ 
+-	if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
++	if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
++	    test_bit(HCI_UART_PROTO_INIT, &hu->flags))
+ 		hci_uart_tx_wakeup(hu);
+ }
+ 
+@@ -611,7 +615,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
+ 
+ 	percpu_down_read(&hu->proto_lock);
+ 
+-	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
++	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
++	    !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
+ 		percpu_up_read(&hu->proto_lock);
+ 		return;
+ 	}
+@@ -707,12 +712,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
+ 
+ 	hu->proto = p;
+ 
++	set_bit(HCI_UART_PROTO_INIT, &hu->flags);
++
+ 	err = hci_uart_register_dev(hu);
+ 	if (err) {
+ 		return err;
+ 	}
+ 
+ 	set_bit(HCI_UART_PROTO_READY, &hu->flags);
++	clear_bit(HCI_UART_PROTO_INIT, &hu->flags);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 37fddf6055bebb..1837622ea625a8 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2353,6 +2353,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 	switch (qcadev->btsoc_type) {
+ 	case QCA_WCN6855:
+ 	case QCA_WCN7850:
++	case QCA_WCN6750:
+ 		if (!device_property_present(&serdev->dev, "enable-gpios")) {
+ 			/*
+ 			 * Backward compatibility with old DT sources. If the
+@@ -2372,7 +2373,6 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 	case QCA_WCN3990:
+ 	case QCA_WCN3991:
+ 	case QCA_WCN3998:
+-	case QCA_WCN6750:
+ 		qcadev->bt_power->dev = &serdev->dev;
+ 		err = qca_init_regulators(qcadev->bt_power, data->vregs,
+ 					  data->num_vregs);
+diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
+index fbf3079b92a533..5ea5dd80e297c7 100644
+--- a/drivers/bluetooth/hci_uart.h
++++ b/drivers/bluetooth/hci_uart.h
+@@ -90,6 +90,7 @@ struct hci_uart {
+ #define HCI_UART_REGISTERED		1
+ #define HCI_UART_PROTO_READY		2
+ #define HCI_UART_NO_SUSPEND_NOTIFIER	3
++#define HCI_UART_PROTO_INIT		4
+ 
+ /* TX states  */
+ #define HCI_UART_SENDING	1
+diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
+index 4de75674f19350..aa8a0ef697c779 100644
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -1207,11 +1207,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ 	struct mhi_ring_element *mhi_tre;
+ 	struct mhi_buf_info *buf_info;
+ 	int eot, eob, chain, bei;
+-	int ret;
++	int ret = 0;
+ 
+ 	/* Protect accesses for reading and incrementing WP */
+ 	write_lock_bh(&mhi_chan->lock);
+ 
++	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	buf_ring = &mhi_chan->buf_ring;
+ 	tre_ring = &mhi_chan->tre_ring;
+ 
+@@ -1229,10 +1234,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ 
+ 	if (!info->pre_mapped) {
+ 		ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+-		if (ret) {
+-			write_unlock_bh(&mhi_chan->lock);
+-			return ret;
+-		}
++		if (ret)
++			goto out;
+ 	}
+ 
+ 	eob = !!(flags & MHI_EOB);
+@@ -1250,9 +1253,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ 	mhi_add_ring_element(mhi_cntrl, tre_ring);
+ 	mhi_add_ring_element(mhi_cntrl, buf_ring);
+ 
++out:
+ 	write_unlock_bh(&mhi_chan->lock);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 7df7abaf3e526b..e25daf2396d37b 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -168,6 +168,11 @@ int tpm_try_get_ops(struct tpm_chip *chip)
+ 		goto out_ops;
+ 
+ 	mutex_lock(&chip->tpm_mutex);
++
++	/* tmp_chip_start may issue IO that is denied while suspended */
++	if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
++		goto out_lock;
++
+ 	rc = tpm_chip_start(chip);
+ 	if (rc)
+ 		goto out_lock;
+@@ -300,6 +305,7 @@ int tpm_class_shutdown(struct device *dev)
+ 	down_write(&chip->ops_sem);
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ 		if (!tpm_chip_start(chip)) {
++			tpm2_end_auth_session(chip);
+ 			tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ 			tpm_chip_stop(chip);
+ 		}
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index b1daa0d7b341b1..f62f7871edbdb0 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -445,18 +445,11 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
+ 	if (!chip)
+ 		return -ENODEV;
+ 
+-	/* Give back zero bytes, as TPM chip has not yet fully resumed: */
+-	if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) {
+-		rc = 0;
+-		goto out;
+-	}
+-
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ 		rc = tpm2_get_random(chip, out, max);
+ 	else
+ 		rc = tpm1_get_random(chip, out, max);
+ 
+-out:
+ 	tpm_put_ops(chip);
+ 	return rc;
+ }
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index fdef214b9f6bff..ed0d3d8449b306 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -114,11 +114,10 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ 		return 0;
+ 	/* process status changes without irq support */
+ 	do {
++		usleep_range(priv->timeout_min, priv->timeout_max);
+ 		status = chip->ops->status(chip);
+ 		if ((status & mask) == mask)
+ 			return 0;
+-		usleep_range(priv->timeout_min,
+-			     priv->timeout_max);
+ 	} while (time_before(jiffies, stop));
+ 	return -ETIME;
+ }
+@@ -464,7 +463,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
+ 
+ 		if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ 					&priv->int_queue, false) < 0) {
+-			rc = -ETIME;
++			if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
++				rc = -EAGAIN;
++			else
++				rc = -ETIME;
+ 			goto out_err;
+ 		}
+ 		status = tpm_tis_status(chip);
+@@ -481,7 +483,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
+ 
+ 	if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ 				&priv->int_queue, false) < 0) {
+-		rc = -ETIME;
++		if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
++			rc = -EAGAIN;
++		else
++			rc = -ETIME;
+ 		goto out_err;
+ 	}
+ 	status = tpm_tis_status(chip);
+@@ -546,9 +551,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
+ 		if (rc >= 0)
+ 			/* Data transfer done successfully */
+ 			break;
+-		else if (rc != -EIO)
++		else if (rc != -EAGAIN && rc != -EIO)
+ 			/* Data transfer failed, not recoverable */
+ 			return rc;
++
++		usleep_range(priv->timeout_min, priv->timeout_max);
+ 	}
+ 
+ 	/* go and do it */
+@@ -1144,6 +1151,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 		priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
+ 	}
+ 
++	if (priv->manufacturer_id == TPM_VID_IFX)
++		set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags);
++
+ 	if (is_bsw()) {
+ 		priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+ 					ILB_REMAP_SIZE);
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index 690ad8e9b73190..970d02c337c7f1 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -89,6 +89,7 @@ enum tpm_tis_flags {
+ 	TPM_TIS_INVALID_STATUS		= 1,
+ 	TPM_TIS_DEFAULT_CANCELLATION	= 2,
+ 	TPM_TIS_IRQ_TESTED		= 3,
++	TPM_TIS_STATUS_VALID_RETRY	= 4,
+ };
+ 
+ struct tpm_tis_data {
+diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
+index 229480c5b075a0..0f10090d4ae681 100644
+--- a/drivers/clk/qcom/clk-branch.c
++++ b/drivers/clk/qcom/clk-branch.c
+@@ -28,7 +28,7 @@ static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
+ 
+ static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling)
+ {
+-	bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
++	bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
+ 	u32 val;
+ 
+ 	regmap_read(br->clkr.regmap, br->halt_reg, &val);
+@@ -44,7 +44,7 @@ static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
+ {
+ 	u32 val;
+ 	u32 mask;
+-	bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
++	bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
+ 
+ 	mask = CBCR_NOC_FSM_STATUS;
+ 	mask |= CBCR_CLK_OFF;
+diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
+index fa5fe4c2a2ee77..208fc430ec98f1 100644
+--- a/drivers/clk/qcom/gdsc.c
++++ b/drivers/clk/qcom/gdsc.c
+@@ -292,6 +292,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
+ 	 */
+ 	udelay(1);
+ 
++	if (sc->flags & RETAIN_FF_ENABLE)
++		gdsc_retain_ff_on(sc);
++
+ 	/* Turn on HW trigger mode if supported */
+ 	if (sc->flags & HW_CTRL) {
+ 		ret = gdsc_hwctrl(sc, true);
+@@ -308,9 +311,6 @@ static int gdsc_enable(struct generic_pm_domain *domain)
+ 		udelay(1);
+ 	}
+ 
+-	if (sc->flags & RETAIN_FF_ENABLE)
+-		gdsc_retain_ff_on(sc);
+-
+ 	return 0;
+ }
+ 
+@@ -457,13 +457,6 @@ static int gdsc_init(struct gdsc *sc)
+ 				goto err_disable_supply;
+ 		}
+ 
+-		/* Turn on HW trigger mode if supported */
+-		if (sc->flags & HW_CTRL) {
+-			ret = gdsc_hwctrl(sc, true);
+-			if (ret < 0)
+-				goto err_disable_supply;
+-		}
+-
+ 		/*
+ 		 * Make sure the retain bit is set if the GDSC is already on,
+ 		 * otherwise we end up turning off the GDSC and destroying all
+@@ -471,6 +464,14 @@ static int gdsc_init(struct gdsc *sc)
+ 		 */
+ 		if (sc->flags & RETAIN_FF_ENABLE)
+ 			gdsc_retain_ff_on(sc);
++
++		/* Turn on HW trigger mode if supported */
++		if (sc->flags & HW_CTRL) {
++			ret = gdsc_hwctrl(sc, true);
++			if (ret < 0)
++				goto err_disable_supply;
++		}
++
+ 	} else if (sc->flags & ALWAYS_ON) {
+ 		/* If ALWAYS_ON GDSCs are not ON, turn them ON */
+ 		gdsc_enable(&sc->pd);
+@@ -506,6 +507,23 @@ static int gdsc_init(struct gdsc *sc)
+ 	return ret;
+ }
+ 
++static void gdsc_pm_subdomain_remove(struct gdsc_desc *desc, size_t num)
++{
++	struct device *dev = desc->dev;
++	struct gdsc **scs = desc->scs;
++	int i;
++
++	/* Remove subdomains */
++	for (i = num - 1; i >= 0; i--) {
++		if (!scs[i])
++			continue;
++		if (scs[i]->parent)
++			pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
++		else if (!IS_ERR_OR_NULL(dev->pm_domain))
++			pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
++	}
++}
++
+ int gdsc_register(struct gdsc_desc *desc,
+ 		  struct reset_controller_dev *rcdev, struct regmap *regmap)
+ {
+@@ -555,30 +573,27 @@ int gdsc_register(struct gdsc_desc *desc,
+ 		if (!scs[i])
+ 			continue;
+ 		if (scs[i]->parent)
+-			pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
++			ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
+ 		else if (!IS_ERR_OR_NULL(dev->pm_domain))
+-			pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
++			ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
++		if (ret)
++			goto err_pm_subdomain_remove;
+ 	}
+ 
+ 	return of_genpd_add_provider_onecell(dev->of_node, data);
++
++err_pm_subdomain_remove:
++	gdsc_pm_subdomain_remove(desc, i);
++
++	return ret;
+ }
+ 
+ void gdsc_unregister(struct gdsc_desc *desc)
+ {
+-	int i;
+ 	struct device *dev = desc->dev;
+-	struct gdsc **scs = desc->scs;
+ 	size_t num = desc->num;
+ 
+-	/* Remove subdomains */
+-	for (i = 0; i < num; i++) {
+-		if (!scs[i])
+-			continue;
+-		if (scs[i]->parent)
+-			pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
+-		else if (!IS_ERR_OR_NULL(dev->pm_domain))
+-			pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
+-	}
++	gdsc_pm_subdomain_remove(desc, num);
+ 	of_genpd_del_provider(dev->of_node);
+ }
+ 
+diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
+index c3c2b0c4398330..fce2eecfa8c03c 100644
+--- a/drivers/clk/renesas/r9a07g043-cpg.c
++++ b/drivers/clk/renesas/r9a07g043-cpg.c
+@@ -89,7 +89,9 @@ static const struct clk_div_table dtable_1_32[] = {
+ 
+ /* Mux clock tables */
+ static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
++#ifdef CONFIG_ARM64
+ static const char * const sel_pll6_2[]	= { ".pll6_250", ".pll5_250" };
++#endif
+ static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
+ 
+ static const u32 mtable_sdhi[] = { 1, 2, 3 };
+@@ -137,7 +139,12 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
+ 	DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32),
+ 	DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
+ 	DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
++#ifdef CONFIG_ARM64
+ 	DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2),
++#endif
++#ifdef CONFIG_RISCV
++	DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1),
++#endif
+ 	DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
+ 	DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
+ 	DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
+diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
+index a4c95161cb22c4..193e4f643358bc 100644
+--- a/drivers/clocksource/timer-stm32-lp.c
++++ b/drivers/clocksource/timer-stm32-lp.c
+@@ -168,9 +168,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
+-		ret = device_init_wakeup(&pdev->dev, true);
+-		if (ret)
+-			goto out_clk_disable;
++		device_set_wakeup_capable(&pdev->dev, true);
+ 
+ 		ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+ 		if (ret)
+diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
+index 248d98fd8c48d0..157f9a9ed63616 100644
+--- a/drivers/crypto/ccp/sp-pci.c
++++ b/drivers/crypto/ccp/sp-pci.c
+@@ -189,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp)
+ 	pdev_new = to_pci_dev(dev_new);
+ 	pdev_cur = to_pci_dev(dev_cur);
+ 
+-	if (pdev_new->bus->number < pdev_cur->bus->number)
+-		return true;
++	if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus))
++		return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus);
+ 
+-	if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
+-		return true;
++	if (pdev_new->bus->number != pdev_cur->bus->number)
++		return pdev_new->bus->number < pdev_cur->bus->number;
+ 
+-	if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
+-		return true;
++	if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn))
++		return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn);
++
++	if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn))
++		return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn);
+ 
+ 	return false;
+ }
+diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
+index 1ecb733a5e88b4..45543ab5073f66 100644
+--- a/drivers/gpio/gpio-tegra186.c
++++ b/drivers/gpio/gpio-tegra186.c
+@@ -823,6 +823,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
+ 	struct gpio_irq_chip *irq;
+ 	struct tegra_gpio *gpio;
+ 	struct device_node *np;
++	struct resource *res;
+ 	char **names;
+ 	int err;
+ 
+@@ -842,19 +843,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
+ 	gpio->num_banks++;
+ 
+ 	/* get register apertures */
+-	gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
+-	if (IS_ERR(gpio->secure)) {
+-		gpio->secure = devm_platform_ioremap_resource(pdev, 0);
+-		if (IS_ERR(gpio->secure))
+-			return PTR_ERR(gpio->secure);
+-	}
+-
+-	gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
+-	if (IS_ERR(gpio->base)) {
+-		gpio->base = devm_platform_ioremap_resource(pdev, 1);
+-		if (IS_ERR(gpio->base))
+-			return PTR_ERR(gpio->base);
+-	}
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security");
++	if (!res)
++		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	gpio->secure = devm_ioremap_resource(&pdev->dev, res);
++	if (IS_ERR(gpio->secure))
++		return PTR_ERR(gpio->secure);
++
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
++	if (!res)
++		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++	gpio->base = devm_ioremap_resource(&pdev->dev, res);
++	if (IS_ERR(gpio->base))
++		return PTR_ERR(gpio->base);
+ 
+ 	err = platform_irq_count(pdev);
+ 	if (err < 0)
+diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
+index 1a42336dfc1d4a..cc53e6940ad7e6 100644
+--- a/drivers/gpio/gpio-zynq.c
++++ b/drivers/gpio/gpio-zynq.c
+@@ -1011,6 +1011,7 @@ static void zynq_gpio_remove(struct platform_device *pdev)
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+ 	if (ret < 0)
+ 		dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
++	device_init_wakeup(&pdev->dev, 0);
+ 	gpiochip_remove(&gpio->chip);
+ 	device_set_wakeup_capable(&pdev->dev, 0);
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 880f1efcaca534..e543129d360500 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -193,6 +193,8 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
+ 		 */
+ 		{ "himax,hx8357",	"gpios-reset",	false },
+ 		{ "himax,hx8369",	"gpios-reset",	false },
++#endif
++#if IS_ENABLED(CONFIG_MTD_NAND_JZ4780)
+ 		/*
+ 		 * The rb-gpios semantics was undocumented and qi,lb60 (along with
+ 		 * the ingenic driver) got it wrong. The active state encodes the
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 96845541b2d255..31d4df96889812 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -6575,18 +6575,26 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ {
+ 	struct dma_fence *old = NULL;
+ 
++	dma_fence_get(gang);
+ 	do {
+ 		dma_fence_put(old);
+ 		old = amdgpu_device_get_gang(adev);
+ 		if (old == gang)
+ 			break;
+ 
+-		if (!dma_fence_is_signaled(old))
++		if (!dma_fence_is_signaled(old)) {
++			dma_fence_put(gang);
+ 			return old;
++		}
+ 
+ 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
+ 			 old, gang) != old);
+ 
++	/*
++	 * Drop it once for the exchanged reference in adev and once for the
++	 * thread local reference acquired in amdgpu_device_get_gang().
++	 */
++	dma_fence_put(old);
+ 	dma_fence_put(old);
+ 	return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 73e02141a6e215..37d53578825b33 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2434,8 +2434,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ 	spin_lock_init(&vm->status_lock);
+ 	INIT_LIST_HEAD(&vm->freed);
+ 	INIT_LIST_HEAD(&vm->done);
+-	INIT_LIST_HEAD(&vm->pt_freed);
+-	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
+ 	INIT_KFIFO(vm->faults);
+ 
+ 	r = amdgpu_vm_init_entities(adev, vm);
+@@ -2607,8 +2605,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ 
+ 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ 
+-	flush_work(&vm->pt_free_work);
+-
+ 	root = amdgpu_bo_ref(vm->root.bo);
+ 	amdgpu_bo_reserve(root, true);
+ 	amdgpu_vm_put_task_info(vm->task_info);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 52dd7cdfdc8145..ee893527a4f1db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -360,10 +360,6 @@ struct amdgpu_vm {
+ 	/* BOs which are invalidated, has been updated in the PTs */
+ 	struct list_head        done;
+ 
+-	/* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
+-	struct list_head	pt_freed;
+-	struct work_struct	pt_free_work;
+-
+ 	/* contains the page directory */
+ 	struct amdgpu_vm_bo_base     root;
+ 	struct dma_fence	*last_update;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+index f78a0434a48fa2..54ae0e9bc6d772 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+@@ -546,27 +546,6 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
+ 	amdgpu_bo_unref(&entry->bo);
+ }
+ 
+-void amdgpu_vm_pt_free_work(struct work_struct *work)
+-{
+-	struct amdgpu_vm_bo_base *entry, *next;
+-	struct amdgpu_vm *vm;
+-	LIST_HEAD(pt_freed);
+-
+-	vm = container_of(work, struct amdgpu_vm, pt_free_work);
+-
+-	spin_lock(&vm->status_lock);
+-	list_splice_init(&vm->pt_freed, &pt_freed);
+-	spin_unlock(&vm->status_lock);
+-
+-	/* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
+-	amdgpu_bo_reserve(vm->root.bo, true);
+-
+-	list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
+-		amdgpu_vm_pt_free(entry);
+-
+-	amdgpu_bo_unreserve(vm->root.bo);
+-}
+-
+ /**
+  * amdgpu_vm_pt_free_list - free PD/PT levels
+  *
+@@ -579,19 +558,15 @@ void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
+ 			    struct amdgpu_vm_update_params *params)
+ {
+ 	struct amdgpu_vm_bo_base *entry, *next;
+-	struct amdgpu_vm *vm = params->vm;
+ 	bool unlocked = params->unlocked;
+ 
+ 	if (list_empty(&params->tlb_flush_waitlist))
+ 		return;
+ 
+-	if (unlocked) {
+-		spin_lock(&vm->status_lock);
+-		list_splice_init(&params->tlb_flush_waitlist, &vm->pt_freed);
+-		spin_unlock(&vm->status_lock);
+-		schedule_work(&vm->pt_free_work);
+-		return;
+-	}
++	/*
++	 * unlocked unmap clear page table leaves, warning to free the page entry.
++	 */
++	WARN_ON(unlocked);
+ 
+ 	list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
+ 		amdgpu_vm_pt_free(entry);
+@@ -899,7 +874,15 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
+ 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
+ 		mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
+ 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
+-		entry_end = ((uint64_t)mask + 1) << shift;
++
++		if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
++			/*
++			 * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
++			 * only clear one entry. Next entry search again for PDE or PTE leave.
++			 */
++			entry_end = 1ULL << shift;
++		else
++			entry_end = ((uint64_t)mask + 1) << shift;
+ 		entry_end += cursor.pfn & ~(entry_end - 1);
+ 		entry_end = min(entry_end, end);
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 3e6b4736a7feaa..67b5f3d7ff8e91 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -212,6 +212,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
+ 		return -EINVAL;
+ 	}
+ 
++	if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
++		args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
++		pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
++	}
++
+ 	if (!access_ok((const void __user *) args->read_pointer_address,
+ 			sizeof(uint32_t))) {
+ 		pr_err("Can't access read pointer\n");
+@@ -461,6 +466,11 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
+ 		return -EINVAL;
+ 	}
+ 
++	if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
++		args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
++		pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
++	}
++
+ 	properties.queue_address = args->ring_base_address;
+ 	properties.queue_size = args->ring_size;
+ 	properties.queue_percent = args->queue_percentage & 0xFF;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index d350c7ce35b3d6..9186ef0bd2a32a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -1493,6 +1493,11 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev)
+ 		return -EINVAL;
+ 	}
+ 
++	if (dev->kfd->shared_resources.enable_mes) {
++		dev_err(dev->adev->dev, "Inducing MES hang is not supported\n");
++		return -EINVAL;
++	}
++
+ 	return dqm_debugfs_hang_hws(dev->dqm);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 264bd764f6f27d..0ec8b457494bd7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -35,6 +35,7 @@
+ #include <linux/pm_runtime.h>
+ #include "amdgpu_amdkfd.h"
+ #include "amdgpu.h"
++#include "amdgpu_reset.h"
+ 
+ struct mm_struct;
+ 
+@@ -1140,6 +1141,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p)
+ 	p->kobj = NULL;
+ }
+ 
++/*
++ * If any GPU is ongoing reset, wait for reset complete.
++ */
++static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p)
++{
++	int i;
++
++	for (i = 0; i < p->n_pdds; i++)
++		flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq);
++}
++
+ /* No process locking is needed in this function, because the process
+  * is not findable any more. We must assume that no other thread is
+  * using it any more, otherwise we couldn't safely free the process
+@@ -1154,6 +1166,11 @@ static void kfd_process_wq_release(struct work_struct *work)
+ 	kfd_process_dequeue_from_all_devices(p);
+ 	pqm_uninit(&p->pqm);
+ 
++	/*
++	 * If GPU in reset, user queues may still running, wait for reset complete.
++	 */
++	kfd_process_wait_gpu_reset_complete(p);
++
+ 	/* Signal the eviction fence after user mode queues are
+ 	 * destroyed. This allows any BOs to be freed without
+ 	 * triggering pointless evictions or waiting for fences.
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index ac777244ee0a18..4078a81761871c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -546,7 +546,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
+ 				pqm->process->pasid,
+ 				pqn->q->properties.queue_id, retval);
+-			if (retval != -ETIME)
++			if (retval != -ETIME && retval != -EIO)
+ 				goto err_destroy_queue;
+ 		}
+ 		kfd_procfs_del_queue(pqn->q);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 8c61dee5ca0db1..b50283864dcd26 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -2992,19 +2992,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+ 		goto out;
+ 	}
+ 
+-	/* check if this page fault time stamp is before svms->checkpoint_ts */
+-	if (svms->checkpoint_ts[gpuidx] != 0) {
+-		if (amdgpu_ih_ts_after(ts,  svms->checkpoint_ts[gpuidx])) {
+-			pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+-			r = 0;
+-			goto out;
+-		} else
+-			/* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
+-			 * to zero to avoid following ts wrap around give wrong comparing
+-			 */
+-			svms->checkpoint_ts[gpuidx] = 0;
+-	}
+-
+ 	if (!p->xnack_enabled) {
+ 		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
+ 		r = -EFAULT;
+@@ -3024,6 +3011,21 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+ 	mmap_read_lock(mm);
+ retry_write_locked:
+ 	mutex_lock(&svms->lock);
++
++	/* check if this page fault time stamp is before svms->checkpoint_ts */
++	if (svms->checkpoint_ts[gpuidx] != 0) {
++		if (amdgpu_ih_ts_after(ts,  svms->checkpoint_ts[gpuidx])) {
++			pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
++			r = -EAGAIN;
++			goto out_unlock_svms;
++		} else {
++			/* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
++			 * to zero to avoid following ts wrap around give wrong comparing
++			 */
++			svms->checkpoint_ts[gpuidx] = 0;
++		}
++	}
++
+ 	prange = svm_range_from_addr(svms, addr, NULL);
+ 	if (!prange) {
+ 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
+@@ -3148,7 +3150,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+ 	mutex_unlock(&svms->lock);
+ 	mmap_read_unlock(mm);
+ 
+-	svm_range_count_fault(node, p, gpuidx);
++	if (r != -EAGAIN)
++		svm_range_count_fault(node, p, gpuidx);
+ 
+ 	mmput(mm);
+ out:
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+index 1ed21c1b86a5bb..a966abd4078810 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+@@ -532,26 +532,6 @@ static void calculate_odm_slices(const struct dc_stream_state *stream, unsigned
+ 	odm_slice_end_x[odm_factor - 1] = stream->src.width - 1;
+ }
+ 
+-static bool is_plane_in_odm_slice(const struct dc_plane_state *plane, unsigned int slice_index, unsigned int *odm_slice_end_x, unsigned int num_slices)
+-{
+-	unsigned int slice_start_x, slice_end_x;
+-
+-	if (slice_index == 0)
+-		slice_start_x = 0;
+-	else
+-		slice_start_x = odm_slice_end_x[slice_index - 1] + 1;
+-
+-	slice_end_x = odm_slice_end_x[slice_index];
+-
+-	if (plane->clip_rect.x + plane->clip_rect.width < slice_start_x)
+-		return false;
+-
+-	if (plane->clip_rect.x > slice_end_x)
+-		return false;
+-
+-	return true;
+-}
+-
+ static void add_odm_slice_to_odm_tree(struct dml2_context *ctx,
+ 		struct dc_state *state,
+ 		struct dc_pipe_mapping_scratch *scratch,
+@@ -791,12 +771,6 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
+ 	sort_pipes_for_splitting(&scratch->pipe_pool);
+ 
+ 	for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) {
+-		// We build the tree for one ODM slice at a time.
+-		// Each ODM slice shares a common OPP
+-		if (!is_plane_in_odm_slice(plane, odm_slice_index, scratch->odm_info.odm_slice_end_x, scratch->odm_info.odm_factor)) {
+-			continue;
+-		}
+-
+ 		// Now we have a list of all pipes to be used for this plane/stream, now setup the tree.
+ 		scratch->odm_info.next_higher_pipe_for_odm_slice[odm_slice_index] = add_plane_to_blend_tree(ctx, state,
+ 				plane,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+index a65a0ddee64672..c671908ba7d06c 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+@@ -44,7 +44,7 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable)
+ 	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ 
+ 	REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable);
+-	REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable);
++	REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, 1);
+ }
+ 
+ void hubp31_soft_reset(struct hubp *hubp, bool reset)
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+index fd0530251c6e5a..d725af14af371a 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+@@ -1992,20 +1992,11 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ 	dc->hwss.get_position(&pipe_ctx, 1, &position);
+ 	vpos = position.vertical_count;
+ 
+-	/* Avoid wraparound calculation issues */
+-	vupdate_start += stream->timing.v_total;
+-	vupdate_end += stream->timing.v_total;
+-	vpos += stream->timing.v_total;
+-
+ 	if (vpos <= vupdate_start) {
+ 		/* VPOS is in VACTIVE or back porch. */
+ 		lines_to_vupdate = vupdate_start - vpos;
+-	} else if (vpos > vupdate_end) {
+-		/* VPOS is in the front porch. */
+-		return;
+ 	} else {
+-		/* VPOS is in VUPDATE. */
+-		lines_to_vupdate = 0;
++		lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
+ 	}
+ 
+ 	/* Calculate time until VUPDATE in microseconds. */
+@@ -2013,13 +2004,18 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ 	us_to_vupdate = lines_to_vupdate * us_per_line;
+ 
++	/* Stall out until the cursor update completes. */
++	if (vupdate_end < vupdate_start)
++		vupdate_end += stream->timing.v_total;
++
++	/* Position is in the range of vupdate start and end*/
++	if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
++		us_to_vupdate = 0;
++
+ 	/* 70 us is a conservative estimate of cursor update time*/
+ 	if (us_to_vupdate > 70)
+ 		return;
+ 
+-	/* Stall out until the cursor update completes. */
+-	if (vupdate_end < vupdate_start)
+-		vupdate_end += stream->timing.v_total;
+ 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+ 	udelay(us_to_vupdate + us_vupdate);
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+index a71c6117d7e547..0115d26b5af92d 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
+ 	hwmgr->adev = adev;
+ 	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
+ 	hwmgr->device = amdgpu_cgs_create_device(adev);
++	if (!hwmgr->device) {
++		kfree(hwmgr);
++		return -ENOMEM;
++	}
++
+ 	mutex_init(&hwmgr->msg_lock);
+ 	hwmgr->chip_family = adev->family;
+ 	hwmgr->chip_id = adev->asic_type;
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 5186d2114a5037..32902f77f00dd8 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -1376,7 +1376,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+ 		mode = &new_crtc_state->mode;
+ 		adjusted_mode = &new_crtc_state->adjusted_mode;
+ 
+-		if (!new_crtc_state->mode_changed)
++		if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
+ 			continue;
+ 
+ 		drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
+diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
+index 9d3e6dd68810e3..98a37dc3324e4f 100644
+--- a/drivers/gpu/drm/drm_debugfs.c
++++ b/drivers/gpu/drm/drm_debugfs.c
+@@ -743,7 +743,7 @@ static int bridges_show(struct seq_file *m, void *data)
+ 	unsigned int idx = 0;
+ 
+ 	drm_for_each_bridge_in_chain(encoder, bridge) {
+-		drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs);
++		drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs);
+ 		drm_printf(&p, "\ttype: [%d] %s\n",
+ 			   bridge->type,
+ 			   drm_get_connector_type_name(bridge->type));
+diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
+index 19ab0a794add31..fd8fa2e0ef6fac 100644
+--- a/drivers/gpu/drm/drm_panel.c
++++ b/drivers/gpu/drm/drm_panel.c
+@@ -49,7 +49,7 @@ static LIST_HEAD(panel_list);
+  * @dev: parent device of the panel
+  * @funcs: panel operations
+  * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+- *	the panel interface
++ *	the panel interface (must NOT be DRM_MODE_CONNECTOR_Unknown)
+  *
+  * Initialize the panel structure for subsequent registration with
+  * drm_panel_add().
+@@ -57,6 +57,9 @@ static LIST_HEAD(panel_list);
+ void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ 		    const struct drm_panel_funcs *funcs, int connector_type)
+ {
++	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
++		DRM_WARN("%s: %s: a valid connector type is required!\n", __func__, dev_name(dev));
++
+ 	INIT_LIST_HEAD(&panel->list);
+ 	INIT_LIST_HEAD(&panel->followers);
+ 	mutex_init(&panel->follower_lock);
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 4a73821b81f6fd..c554ad8f246b65 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -93,6 +93,12 @@ static const struct drm_dmi_panel_orientation_data onegx1_pro = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data lcd640x960_leftside_up = {
++	.width = 640,
++	.height = 960,
++	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
+ 	.width = 720,
+ 	.height = 1280,
+@@ -123,6 +129,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data lcd1200x1920_leftside_up = {
++	.width = 1200,
++	.height = 1920,
++	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
+ 	.width = 1200,
+ 	.height = 1920,
+@@ -184,10 +196,10 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
+-	}, {	/* AYA NEO AYANEO 2 */
++	}, {	/* AYA NEO AYANEO 2/2S */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+-		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
++		  DMI_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* AYA NEO 2021 */
+@@ -202,6 +214,18 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
+ 		},
+ 		.driver_data = (void *)&lcd1080x1920_leftside_up,
++	}, {    /* AYA NEO Flip DS Bottom Screen */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FLIP DS"),
++		},
++		.driver_data = (void *)&lcd640x960_leftside_up,
++	}, {    /* AYA NEO Flip KB/DS Top Screen */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
++		  DMI_MATCH(DMI_PRODUCT_NAME, "FLIP"),
++		},
++		.driver_data = (void *)&lcd1080x1920_leftside_up,
+ 	}, {	/* AYA NEO Founder */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
+@@ -226,6 +250,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ 		},
+ 		.driver_data = (void *)&lcd1600x2560_rightside_up,
++	}, {	/* AYA NEO SLIDE */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
++		  DMI_MATCH(DMI_PRODUCT_NAME, "SLIDE"),
++		},
++		.driver_data = (void *)&lcd1080x1920_leftside_up,
+ 	}, {    /* AYN Loki Max */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
+@@ -315,6 +345,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ 		},
+ 		.driver_data = (void *)&gpd_win2,
++	}, {	/* GPD Win 2 (correct DMI strings) */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WIN2")
++		},
++		.driver_data = (void *)&lcd720x1280_rightside_up,
+ 	}, {	/* GPD Win 3 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+@@ -443,6 +479,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ 		},
+ 		.driver_data = (void *)&lcd1600x2560_leftside_up,
++	}, {	/* OneXPlayer Mini (Intel) */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
++		},
++		.driver_data = (void *)&lcd1200x1920_leftside_up,
+ 	}, {	/* OrangePi Neo */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index 9378d5901c4939..9ca42589da4dad 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -117,21 +117,10 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
+ 			GEN6_RC_CTL_RC6_ENABLE |
+ 			GEN6_RC_CTL_EI_MODE(1);
+ 
+-	/*
+-	 * BSpec 52698 - Render powergating must be off.
+-	 * FIXME BSpec is outdated, disabling powergating for MTL is just
+-	 * temporary wa and should be removed after fixing real cause
+-	 * of forcewake timeouts.
+-	 */
+-	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
+-		pg_enable =
+-			GEN9_MEDIA_PG_ENABLE |
+-			GEN11_MEDIA_SAMPLER_PG_ENABLE;
+-	else
+-		pg_enable =
+-			GEN9_RENDER_PG_ENABLE |
+-			GEN9_MEDIA_PG_ENABLE |
+-			GEN11_MEDIA_SAMPLER_PG_ENABLE;
++	pg_enable =
++		GEN9_RENDER_PG_ENABLE |
++		GEN9_MEDIA_PG_ENABLE |
++		GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ 
+ 	if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
+ 		for (i = 0; i < I915_MAX_VCS; i++)
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+index 2d9152eb728255..24fdce844d9e3e 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+@@ -317,6 +317,11 @@ void intel_huc_init_early(struct intel_huc *huc)
+ 	}
+ }
+ 
++void intel_huc_fini_late(struct intel_huc *huc)
++{
++	delayed_huc_load_fini(huc);
++}
++
+ #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
+ static int check_huc_loading_mode(struct intel_huc *huc)
+ {
+@@ -414,12 +419,6 @@ int intel_huc_init(struct intel_huc *huc)
+ 
+ void intel_huc_fini(struct intel_huc *huc)
+ {
+-	/*
+-	 * the fence is initialized in init_early, so we need to clean it up
+-	 * even if HuC loading is off.
+-	 */
+-	delayed_huc_load_fini(huc);
+-
+ 	if (huc->heci_pkt)
+ 		i915_vma_unpin_and_release(&huc->heci_pkt, 0);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+index ba5cb08e9e7bf1..09aff3148f7ddb 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+@@ -55,6 +55,7 @@ struct intel_huc {
+ 
+ int intel_huc_sanitize(struct intel_huc *huc);
+ void intel_huc_init_early(struct intel_huc *huc);
++void intel_huc_fini_late(struct intel_huc *huc);
+ int intel_huc_init(struct intel_huc *huc);
+ void intel_huc_fini(struct intel_huc *huc);
+ void intel_huc_suspend(struct intel_huc *huc);
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+index 5b8080ec5315b6..4f751ce74214d4 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+@@ -136,6 +136,7 @@ void intel_uc_init_late(struct intel_uc *uc)
+ 
+ void intel_uc_driver_late_release(struct intel_uc *uc)
+ {
++	intel_huc_fini_late(&uc->huc);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
+index fee76c1d2f4500..889281819c5b13 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
++++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
+@@ -23,7 +23,9 @@
+ 
+ #include <linux/random.h>
+ 
++#include "gt/intel_gt.h"
+ #include "gt/intel_gt_pm.h"
++#include "gt/intel_gt_regs.h"
+ #include "gt/uc/intel_gsc_fw.h"
+ 
+ #include "i915_driver.h"
+@@ -253,11 +255,27 @@ int i915_mock_selftests(void)
+ int i915_live_selftests(struct pci_dev *pdev)
+ {
+ 	struct drm_i915_private *i915 = pdev_to_i915(pdev);
++	struct intel_uncore *uncore = &i915->uncore;
+ 	int err;
++	u32 pg_enable;
++	intel_wakeref_t wakeref;
+ 
+ 	if (!i915_selftest.live)
+ 		return 0;
+ 
++	/*
++	 * FIXME Disable render powergating, this is temporary wa and should be removed
++	 * after fixing real cause of forcewake timeouts.
++	 */
++	with_intel_runtime_pm(uncore->rpm, wakeref) {
++		if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 00), IP_VER(12, 74))) {
++			pg_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE);
++			if (pg_enable & GEN9_RENDER_PG_ENABLE)
++				intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
++						      pg_enable & ~GEN9_RENDER_PG_ENABLE);
++		}
++	}
++
+ 	__wait_gsc_proxy_completed(i915);
+ 	__wait_gsc_huc_load_completed(i915);
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
+index a08d2065495432..9c11d3158324c1 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
+@@ -127,14 +127,14 @@ struct mtk_dpi_yc_limit {
+  * @is_ck_de_pol: Support CK/DE polarity.
+  * @swap_input_support: Support input swap function.
+  * @support_direct_pin: IP supports direct connection to dpi panels.
+- * @input_2pixel: Input pixel of dp_intf is 2 pixel per round, so enable this
+- *		  config to enable this feature.
+  * @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH
+  *		    (no shift).
+  * @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift).
+  * @channel_swap_shift: Shift value of channel swap.
+  * @yuv422_en_bit: Enable bit of yuv422.
+  * @csc_enable_bit: Enable bit of CSC.
++ * @input_2p_en_bit: Enable bit for input two pixel per round feature.
++ *		     If present, implies that the feature must be enabled.
+  * @pixels_per_iter: Quantity of transferred pixels per iteration.
+  * @edge_cfg_in_mmsys: If the edge configuration for DPI's output needs to be set in MMSYS.
+  */
+@@ -148,12 +148,12 @@ struct mtk_dpi_conf {
+ 	bool is_ck_de_pol;
+ 	bool swap_input_support;
+ 	bool support_direct_pin;
+-	bool input_2pixel;
+ 	u32 dimension_mask;
+ 	u32 hvsize_mask;
+ 	u32 channel_swap_shift;
+ 	u32 yuv422_en_bit;
+ 	u32 csc_enable_bit;
++	u32 input_2p_en_bit;
+ 	u32 pixels_per_iter;
+ 	bool edge_cfg_in_mmsys;
+ };
+@@ -471,6 +471,7 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
+ 
+ 	mtk_dpi_disable(dpi);
+ 	clk_disable_unprepare(dpi->pixel_clk);
++	clk_disable_unprepare(dpi->tvd_clk);
+ 	clk_disable_unprepare(dpi->engine_clk);
+ }
+ 
+@@ -487,6 +488,12 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
+ 		goto err_refcount;
+ 	}
+ 
++	ret = clk_prepare_enable(dpi->tvd_clk);
++	if (ret) {
++		dev_err(dpi->dev, "Failed to enable tvd pll: %d\n", ret);
++		goto err_engine;
++	}
++
+ 	ret = clk_prepare_enable(dpi->pixel_clk);
+ 	if (ret) {
+ 		dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret);
+@@ -496,6 +503,8 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
+ 	return 0;
+ 
+ err_pixel:
++	clk_disable_unprepare(dpi->tvd_clk);
++err_engine:
+ 	clk_disable_unprepare(dpi->engine_clk);
+ err_refcount:
+ 	dpi->refcount--;
+@@ -610,9 +619,9 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
+ 		mtk_dpi_dual_edge(dpi);
+ 		mtk_dpi_config_disable_edge(dpi);
+ 	}
+-	if (dpi->conf->input_2pixel) {
+-		mtk_dpi_mask(dpi, DPI_CON, DPINTF_INPUT_2P_EN,
+-			     DPINTF_INPUT_2P_EN);
++	if (dpi->conf->input_2p_en_bit) {
++		mtk_dpi_mask(dpi, DPI_CON, dpi->conf->input_2p_en_bit,
++			     dpi->conf->input_2p_en_bit);
+ 	}
+ 	mtk_dpi_sw_reset(dpi, false);
+ 
+@@ -992,12 +1001,12 @@ static const struct mtk_dpi_conf mt8195_dpintf_conf = {
+ 	.output_fmts = mt8195_output_fmts,
+ 	.num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
+ 	.pixels_per_iter = 4,
+-	.input_2pixel = true,
+ 	.dimension_mask = DPINTF_HPW_MASK,
+ 	.hvsize_mask = DPINTF_HSIZE_MASK,
+ 	.channel_swap_shift = DPINTF_CH_SWAP,
+ 	.yuv422_en_bit = DPINTF_YUV422_EN,
+ 	.csc_enable_bit = DPINTF_CSC_ENABLE,
++	.input_2p_en_bit = DPINTF_INPUT_2P_EN,
+ };
+ 
+ static int mtk_dpi_probe(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
+index 7516f6cb36e4e3..3e9518d7b8b7eb 100644
+--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
++++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
+@@ -95,6 +95,9 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
+ 	expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false);
+ 	KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+ 
++	ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
++	KUNIT_ASSERT_EQ(test, ret, 0);
++
+ 	KUNIT_ASSERT_TRUE(test,
+ 			  drm_mode_parse_command_line_for_connector(cmdline,
+ 								    connector,
+diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
+index 59c8408c453c2e..1cfcb597b088b4 100644
+--- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
++++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
+@@ -7,6 +7,7 @@
+ #include <kunit/test.h>
+ 
+ #include <drm/drm_connector.h>
++#include <drm/drm_kunit_helpers.h>
+ #include <drm/drm_modes.h>
+ 
+ static const struct drm_connector no_connector = {};
+@@ -955,8 +956,15 @@ struct drm_cmdline_tv_option_test {
+ static void drm_test_cmdline_tv_options(struct kunit *test)
+ {
+ 	const struct drm_cmdline_tv_option_test *params = test->param_value;
+-	const struct drm_display_mode *expected_mode = params->mode_fn(NULL);
++	struct drm_display_mode *expected_mode;
+ 	struct drm_cmdline_mode mode = { };
++	int ret;
++
++	expected_mode = params->mode_fn(NULL);
++	KUNIT_ASSERT_NOT_NULL(test, expected_mode);
++
++	ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
++	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+ 	KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
+ 									  &no_connector, &mode));
+diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
+index 3c0b7824c0be37..922c4b6ed1dc9b 100644
+--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
++++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
+@@ -319,6 +319,28 @@ static void kunit_action_drm_mode_destroy(void *ptr)
+ 	drm_mode_destroy(NULL, mode);
+ }
+ 
++/**
++ * drm_kunit_add_mode_destroy_action() - Add a drm_destroy_mode kunit action
++ * @test: The test context object
++ * @mode: The drm_display_mode to destroy eventually
++ *
++ * Registers a kunit action that will destroy the drm_display_mode at
++ * the end of the test.
++ *
++ * If an error occurs, the drm_display_mode will be destroyed.
++ *
++ * Returns:
++ * 0 on success, an error code otherwise.
++ */
++int drm_kunit_add_mode_destroy_action(struct kunit *test,
++				      struct drm_display_mode *mode)
++{
++	return kunit_add_action_or_reset(test,
++					 kunit_action_drm_mode_destroy,
++					 mode);
++}
++EXPORT_SYMBOL_GPL(drm_kunit_add_mode_destroy_action);
++
+ /**
+  * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
+  * @test: The test context object
+diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c
+index 6ed51f99e133c9..7ba646d87856f5 100644
+--- a/drivers/gpu/drm/tests/drm_modes_test.c
++++ b/drivers/gpu/drm/tests/drm_modes_test.c
+@@ -40,6 +40,7 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
+ {
+ 	struct drm_test_modes_priv *priv = test->priv;
+ 	struct drm_display_mode *mode;
++	int ret;
+ 
+ 	mode = drm_analog_tv_mode(priv->drm,
+ 				  DRM_MODE_TV_MODE_NTSC,
+@@ -47,6 +48,9 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
+ 				  true);
+ 	KUNIT_ASSERT_NOT_NULL(test, mode);
+ 
++	ret = drm_kunit_add_mode_destroy_action(test, mode);
++	KUNIT_ASSERT_EQ(test, ret, 0);
++
+ 	KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60);
+ 	KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
+ 
+@@ -70,6 +74,7 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
+ {
+ 	struct drm_test_modes_priv *priv = test->priv;
+ 	struct drm_display_mode *expected, *mode;
++	int ret;
+ 
+ 	expected = drm_analog_tv_mode(priv->drm,
+ 				      DRM_MODE_TV_MODE_NTSC,
+@@ -77,9 +82,15 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
+ 				      true);
+ 	KUNIT_ASSERT_NOT_NULL(test, expected);
+ 
++	ret = drm_kunit_add_mode_destroy_action(test, expected);
++	KUNIT_ASSERT_EQ(test, ret, 0);
++
+ 	mode = drm_mode_analog_ntsc_480i(priv->drm);
+ 	KUNIT_ASSERT_NOT_NULL(test, mode);
+ 
++	ret = drm_kunit_add_mode_destroy_action(test, mode);
++	KUNIT_ASSERT_EQ(test, ret, 0);
++
+ 	KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
+ }
+ 
+@@ -87,6 +98,7 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
+ {
+ 	struct drm_test_modes_priv *priv = test->priv;
+ 	struct drm_display_mode *mode;
++	int ret;
+ 
+ 	mode = drm_analog_tv_mode(priv->drm,
+ 				  DRM_MODE_TV_MODE_PAL,
+@@ -94,6 +106,9 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
+ 				  true);
+ 	KUNIT_ASSERT_NOT_NULL(test, mode);
+ 
++	ret = drm_kunit_add_mode_destroy_action(test, mode);
++	KUNIT_ASSERT_EQ(test, ret, 0);
++
+ 	KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
+ 	KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
+ 
+@@ -117,6 +132,7 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
+ {
+ 	struct drm_test_modes_priv *priv = test->priv;
+ 	struct drm_display_mode *expected, *mode;
++	int ret;
+ 
+ 	expected = drm_analog_tv_mode(priv->drm,
+ 				      DRM_MODE_TV_MODE_PAL,
+@@ -124,9 +140,15 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
+ 				      true);
+ 	KUNIT_ASSERT_NOT_NULL(test, expected);
+ 
++	ret = drm_kunit_add_mode_destroy_action(test, expected);
++	KUNIT_ASSERT_EQ(test, ret, 0);
++
+ 	mode = drm_mode_analog_pal_576i(priv->drm);
+ 	KUNIT_ASSERT_NOT_NULL(test, mode);
+ 
++	ret = drm_kunit_add_mode_destroy_action(test, mode);
++	KUNIT_ASSERT_EQ(test, ret, 0);
++
+ 	KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
+ }
+ 
+diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c
+index bc09ff38aca18e..db0e4f5df275e8 100644
+--- a/drivers/gpu/drm/tests/drm_probe_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c
+@@ -98,7 +98,7 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
+ 	struct drm_connector *connector = &priv->connector;
+ 	struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ 	struct drm_display_mode *mode;
+-	const struct drm_display_mode *expected;
++	struct drm_display_mode *expected;
+ 	size_t len;
+ 	int ret;
+ 
+@@ -134,6 +134,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
+ 
+ 		KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
+ 		KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
++
++		ret = drm_kunit_add_mode_destroy_action(test, expected);
++		KUNIT_ASSERT_EQ(test, ret, 0);
+ 	}
+ 
+ 	if (params->num_expected_modes >= 2) {
+@@ -145,6 +148,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
+ 
+ 		KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
+ 		KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
++
++		ret = drm_kunit_add_mode_destroy_action(test, expected);
++		KUNIT_ASSERT_EQ(test, ret, 0);
+ 	}
+ 
+ 	mutex_unlock(&priv->drm->mode_config.mutex);
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index 98fe8573e054e9..17ba15132a9840 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -32,6 +32,7 @@
+ #include "xe_gt_pagefault.h"
+ #include "xe_gt_printk.h"
+ #include "xe_gt_sriov_pf.h"
++#include "xe_gt_sriov_vf.h"
+ #include "xe_gt_sysfs.h"
+ #include "xe_gt_tlb_invalidation.h"
+ #include "xe_gt_topology.h"
+@@ -647,6 +648,9 @@ static int do_gt_reset(struct xe_gt *gt)
+ {
+ 	int err;
+ 
++	if (IS_SRIOV_VF(gt_to_xe(gt)))
++		return xe_gt_sriov_vf_reset(gt);
++
+ 	xe_gsc_wa_14015076503(gt, true);
+ 
+ 	xe_mmio_write32(gt, GDRST, GRDOM_FULL);
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+index 4ebc82e607af65..f982d6f9f218d8 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+@@ -57,6 +57,22 @@ static int vf_reset_guc_state(struct xe_gt *gt)
+ 	return err;
+ }
+ 
++/**
++ * xe_gt_sriov_vf_reset - Reset GuC VF internal state.
++ * @gt: the &xe_gt
++ *
++ * It requires functional `GuC MMIO based communication`_.
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int xe_gt_sriov_vf_reset(struct xe_gt *gt)
++{
++	if (!xe_device_uc_enabled(gt_to_xe(gt)))
++		return -ENODEV;
++
++	return vf_reset_guc_state(gt);
++}
++
+ static int guc_action_match_version(struct xe_guc *guc,
+ 				    u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
+ 				    u32 *branch, u32 *major, u32 *minor, u32 *patch)
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+index e541ce57bec246..576ff5e795a8b0 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+@@ -12,6 +12,7 @@ struct drm_printer;
+ struct xe_gt;
+ struct xe_reg;
+ 
++int xe_gt_sriov_vf_reset(struct xe_gt *gt);
+ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
+ int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
+ int xe_gt_sriov_vf_connect(struct xe_gt *gt);
+diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+index b53e8d2accdbd7..a440442b4d7270 100644
+--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
++++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+@@ -32,14 +32,61 @@ bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max)
+ 	return timeout >= min && timeout <= max;
+ }
+ 
+-static void kobj_xe_hw_engine_release(struct kobject *kobj)
++static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
+ {
+ 	kfree(kobj);
+ }
+ 
++static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
++						  struct attribute *attr,
++						  char *buf)
++{
++	struct xe_device *xe = kobj_to_xe(kobj);
++	struct kobj_attribute *kattr;
++	ssize_t ret = -EIO;
++
++	kattr = container_of(attr, struct kobj_attribute, attr);
++	if (kattr->show) {
++		xe_pm_runtime_get(xe);
++		ret = kattr->show(kobj, kattr, buf);
++		xe_pm_runtime_put(xe);
++	}
++
++	return ret;
++}
++
++static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
++						   struct attribute *attr,
++						   const char *buf,
++						   size_t count)
++{
++	struct xe_device *xe = kobj_to_xe(kobj);
++	struct kobj_attribute *kattr;
++	ssize_t ret = -EIO;
++
++	kattr = container_of(attr, struct kobj_attribute, attr);
++	if (kattr->store) {
++		xe_pm_runtime_get(xe);
++		ret = kattr->store(kobj, kattr, buf, count);
++		xe_pm_runtime_put(xe);
++	}
++
++	return ret;
++}
++
++static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
++	.show = xe_hw_engine_class_sysfs_attr_show,
++	.store = xe_hw_engine_class_sysfs_attr_store,
++};
++
+ static const struct kobj_type kobj_xe_hw_engine_type = {
+-	.release = kobj_xe_hw_engine_release,
+-	.sysfs_ops = &kobj_sysfs_ops
++	.release = xe_hw_engine_sysfs_kobj_release,
++	.sysfs_ops = &xe_hw_engine_class_sysfs_ops,
++};
++
++static const struct kobj_type kobj_xe_hw_engine_type_def = {
++	.release = xe_hw_engine_sysfs_kobj_release,
++	.sysfs_ops = &kobj_sysfs_ops,
+ };
+ 
+ static ssize_t job_timeout_max_store(struct kobject *kobj,
+@@ -543,7 +590,7 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
+ 	if (!kobj)
+ 		return -ENOMEM;
+ 
+-	kobject_init(kobj, &kobj_xe_hw_engine_type);
++	kobject_init(kobj, &kobj_xe_hw_engine_type_def);
+ 	err = kobject_add(kobj, parent, "%s", ".defaults");
+ 	if (err)
+ 		goto err_object;
+@@ -559,57 +606,6 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
+ 	return err;
+ }
+ 
+-static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
+-{
+-	kfree(kobj);
+-}
+-
+-static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
+-						  struct attribute *attr,
+-						  char *buf)
+-{
+-	struct xe_device *xe = kobj_to_xe(kobj);
+-	struct kobj_attribute *kattr;
+-	ssize_t ret = -EIO;
+-
+-	kattr = container_of(attr, struct kobj_attribute, attr);
+-	if (kattr->show) {
+-		xe_pm_runtime_get(xe);
+-		ret = kattr->show(kobj, kattr, buf);
+-		xe_pm_runtime_put(xe);
+-	}
+-
+-	return ret;
+-}
+-
+-static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
+-						   struct attribute *attr,
+-						   const char *buf,
+-						   size_t count)
+-{
+-	struct xe_device *xe = kobj_to_xe(kobj);
+-	struct kobj_attribute *kattr;
+-	ssize_t ret = -EIO;
+-
+-	kattr = container_of(attr, struct kobj_attribute, attr);
+-	if (kattr->store) {
+-		xe_pm_runtime_get(xe);
+-		ret = kattr->store(kobj, kattr, buf, count);
+-		xe_pm_runtime_put(xe);
+-	}
+-
+-	return ret;
+-}
+-
+-static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
+-	.show = xe_hw_engine_class_sysfs_attr_show,
+-	.store = xe_hw_engine_class_sysfs_attr_store,
+-};
+-
+-static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
+-	.release = xe_hw_engine_sysfs_kobj_release,
+-	.sysfs_ops = &xe_hw_engine_class_sysfs_ops,
+-};
+ 
+ static void hw_engine_class_sysfs_fini(void *arg)
+ {
+@@ -640,7 +636,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
+ 	if (!kobj)
+ 		return -ENOMEM;
+ 
+-	kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type);
++	kobject_init(kobj, &kobj_xe_hw_engine_type);
+ 
+ 	err = kobject_add(kobj, gt->sysfs, "engines");
+ 	if (err)
+diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
+index 0d5e04158917be..1fb12da21c9e4c 100644
+--- a/drivers/gpu/drm/xe/xe_tuning.c
++++ b/drivers/gpu/drm/xe/xe_tuning.c
+@@ -97,14 +97,6 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
+ };
+ 
+ static const struct xe_rtp_entry_sr lrc_tunings[] = {
+-	{ XE_RTP_NAME("Tuning: ganged timer, also known as 16011163337"),
+-	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+-	  /* read verification is ignored due to 1608008084. */
+-	  XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
+-						FF_MODE2_GS_TIMER_MASK,
+-						FF_MODE2_GS_TIMER_224))
+-	},
+-
+ 	/* DG2 */
+ 
+ 	{ XE_RTP_NAME("Tuning: L3 cache"),
+diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
+index 37e592b2bf062a..0a1905f8d380a8 100644
+--- a/drivers/gpu/drm/xe/xe_wa.c
++++ b/drivers/gpu/drm/xe/xe_wa.c
+@@ -606,6 +606,13 @@ static const struct xe_rtp_entry_sr engine_was[] = {
+ };
+ 
+ static const struct xe_rtp_entry_sr lrc_was[] = {
++	{ XE_RTP_NAME("16011163337"),
++	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
++	  /* read verification is ignored due to 1608008084. */
++	  XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
++						FF_MODE2_GS_TIMER_MASK,
++						FF_MODE2_GS_TIMER_224))
++	},
+ 	{ XE_RTP_NAME("1409342910, 14010698770, 14010443199, 1408979724, 1409178076, 1409207793, 1409217633, 1409252684, 1409347922, 1409142259"),
+ 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
+ 	  XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN3,
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 4500d7653b05ee..95a4ede2709917 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -1205,6 +1205,20 @@ config HID_U2FZERO
+ 	  allow setting the brightness to anything but 1, which will
+ 	  trigger a single blink and immediately reset back to 0.
+ 
++config HID_UNIVERSAL_PIDFF
++	tristate "universal-pidff: extended USB PID driver compatibility and usage"
++	depends on USB_HID
++	depends on HID_PID
++	help
++	  Extended PID support for selected devices.
++
++	  Contains report fixups, extended usable button range and
++	  pidff quirk management to extend compatibility with slightly
++	  non-compliant USB PID devices and better fuzz/flat values for
++	  high precision direct drive devices.
++
++	  Supports Moza Racing, Cammus, VRS, FFBeast and more.
++
+ config HID_WACOM
+ 	tristate "Wacom Intuos/Graphire tablet support (USB)"
+ 	depends on USB_HID
+diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
+index f2900ee2ef8582..27ee02bf6f26d3 100644
+--- a/drivers/hid/Makefile
++++ b/drivers/hid/Makefile
+@@ -139,6 +139,7 @@ hid-uclogic-objs		:= hid-uclogic-core.o \
+ 				   hid-uclogic-params.o
+ obj-$(CONFIG_HID_UCLOGIC)	+= hid-uclogic.o
+ obj-$(CONFIG_HID_UDRAW_PS3)	+= hid-udraw-ps3.o
++obj-$(CONFIG_HID_UNIVERSAL_PIDFF)	+= hid-universal-pidff.o
+ obj-$(CONFIG_HID_LED)		+= hid-led.o
+ obj-$(CONFIG_HID_XIAOMI)	+= hid-xiaomi.o
+ obj-$(CONFIG_HID_XINMO)		+= hid-xinmo.o
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index c6ae7c4268b84c..92baa34f42f28a 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -190,6 +190,12 @@
+ #define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
+ #define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
+ 
++#define USB_VENDOR_ID_ASETEK			0x2433
++#define USB_DEVICE_ID_ASETEK_INVICTA		0xf300
++#define USB_DEVICE_ID_ASETEK_FORTE		0xf301
++#define USB_DEVICE_ID_ASETEK_LA_PRIMA		0xf303
++#define USB_DEVICE_ID_ASETEK_TONY_KANAAN	0xf306
++
+ #define USB_VENDOR_ID_ASUS		0x0486
+ #define USB_DEVICE_ID_ASUS_T91MT	0x0185
+ #define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO	0x0186
+@@ -262,6 +268,10 @@
+ #define USB_DEVICE_ID_BTC_EMPREX_REMOTE	0x5578
+ #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2	0x5577
+ 
++#define USB_VENDOR_ID_CAMMUS		0x3416
++#define USB_DEVICE_ID_CAMMUS_C5		0x0301
++#define USB_DEVICE_ID_CAMMUS_C12	0x0302
++
+ #define USB_VENDOR_ID_CANDO		0x2087
+ #define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
+ #define USB_DEVICE_ID_CANDO_MULTI_TOUCH	0x0a01
+@@ -453,6 +463,11 @@
+ #define USB_VENDOR_ID_EVISION           0x320f
+ #define USB_DEVICE_ID_EVISION_ICL01     0x5041
+ 
++#define USB_VENDOR_ID_FFBEAST		0x045b
++#define USB_DEVICE_ID_FFBEAST_JOYSTICK	0x58f9
++#define USB_DEVICE_ID_FFBEAST_RUDDER	0x5968
++#define USB_DEVICE_ID_FFBEAST_WHEEL	0x59d7
++
+ #define USB_VENDOR_ID_FLATFROG		0x25b5
+ #define USB_DEVICE_ID_MULTITOUCH_3200	0x0002
+ 
+@@ -813,6 +828,13 @@
+ #define I2C_DEVICE_ID_LG_8001		0x8001
+ #define I2C_DEVICE_ID_LG_7010		0x7010
+ 
++#define USB_VENDOR_ID_LITE_STAR		0x11ff
++#define USB_DEVICE_ID_PXN_V10		0x3245
++#define USB_DEVICE_ID_PXN_V12		0x1212
++#define USB_DEVICE_ID_PXN_V12_LITE	0x1112
++#define USB_DEVICE_ID_PXN_V12_LITE_2	0x1211
++#define USB_DEVICE_LITE_STAR_GT987_FF	0x2141
++
+ #define USB_VENDOR_ID_LOGITECH		0x046d
+ #define USB_DEVICE_ID_LOGITECH_Z_10_SPK	0x0a07
+ #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
+@@ -960,6 +982,18 @@
+ #define USB_VENDOR_ID_MONTEREY		0x0566
+ #define USB_DEVICE_ID_GENIUS_KB29E	0x3004
+ 
++#define USB_VENDOR_ID_MOZA		0x346e
++#define USB_DEVICE_ID_MOZA_R3		0x0005
++#define USB_DEVICE_ID_MOZA_R3_2		0x0015
++#define USB_DEVICE_ID_MOZA_R5		0x0004
++#define USB_DEVICE_ID_MOZA_R5_2		0x0014
++#define USB_DEVICE_ID_MOZA_R9		0x0002
++#define USB_DEVICE_ID_MOZA_R9_2		0x0012
++#define USB_DEVICE_ID_MOZA_R12		0x0006
++#define USB_DEVICE_ID_MOZA_R12_2	0x0016
++#define USB_DEVICE_ID_MOZA_R16_R21	0x0000
++#define USB_DEVICE_ID_MOZA_R16_R21_2	0x0010
++
+ #define USB_VENDOR_ID_MSI		0x1770
+ #define USB_DEVICE_ID_MSI_GT683R_LED_PANEL 0xff00
+ 
+@@ -1371,6 +1405,9 @@
+ #define USB_DEVICE_ID_VELLEMAN_K8061_FIRST	0x8061
+ #define USB_DEVICE_ID_VELLEMAN_K8061_LAST	0x8068
+ 
++#define USB_VENDOR_ID_VRS	0x0483
++#define USB_DEVICE_ID_VRS_DFP	0xa355
++
+ #define USB_VENDOR_ID_VTL		0x0306
+ #define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F	0xff3f
+ 
+diff --git a/drivers/hid/hid-universal-pidff.c b/drivers/hid/hid-universal-pidff.c
+new file mode 100644
+index 00000000000000..5b89ec7b5c26c5
+--- /dev/null
++++ b/drivers/hid/hid-universal-pidff.c
+@@ -0,0 +1,202 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * HID UNIVERSAL PIDFF
++ * hid-pidff wrapper for PID-enabled devices
++ * Handles device reports, quirks and extends usable button range
++ *
++ * Copyright (c) 2024, 2025 Oleg Makarenko
++ * Copyright (c) 2024, 2025 Tomasz Pakuła
++ */
++
++#include <linux/device.h>
++#include <linux/hid.h>
++#include <linux/module.h>
++#include <linux/input-event-codes.h>
++#include "hid-ids.h"
++#include "usbhid/hid-pidff.h"
++
++#define JOY_RANGE (BTN_DEAD - BTN_JOYSTICK + 1)
++
++/*
++ * Map buttons manually to extend the default joystick button limit
++ */
++static int universal_pidff_input_mapping(struct hid_device *hdev,
++	struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
++	unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
++		return 0;
++
++	if (field->application != HID_GD_JOYSTICK)
++		return 0;
++
++	int button = ((usage->hid - 1) & HID_USAGE);
++	int code = button + BTN_JOYSTICK;
++
++	/* Detect the end of JOYSTICK buttons range */
++	if (code > BTN_DEAD)
++		code = button + KEY_NEXT_FAVORITE - JOY_RANGE;
++
++	/*
++	 * Map overflowing buttons to KEY_RESERVED to not ignore
++	 * them and let them still trigger MSC_SCAN
++	 */
++	if (code > KEY_MAX)
++		code = KEY_RESERVED;
++
++	hid_map_usage(hi, usage, bit, max, EV_KEY, code);
++	hid_dbg(hdev, "Button %d: usage %d", button, code);
++	return 1;
++}
++
++/*
++ * Check if the device is PID and initialize it
++ * Add quirks after initialisation
++ */
++static int universal_pidff_probe(struct hid_device *hdev,
++				 const struct hid_device_id *id)
++{
++	int i, error;
++	error = hid_parse(hdev);
++	if (error) {
++		hid_err(hdev, "HID parse failed\n");
++		goto err;
++	}
++
++	error = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
++	if (error) {
++		hid_err(hdev, "HID hw start failed\n");
++		goto err;
++	}
++
++	/* Check if device contains PID usage page */
++	error = 1;
++	for (i = 0; i < hdev->collection_size; i++)
++		if ((hdev->collection[i].usage & HID_USAGE_PAGE) == HID_UP_PID) {
++			error = 0;
++			hid_dbg(hdev, "PID usage page found\n");
++			break;
++		}
++
++	/*
++	 * Do not fail as this might be the second "device"
++	 * just for additional buttons/axes. Exit cleanly if force
++	 * feedback usage page wasn't found (included devices were
++	 * tested and confirmed to be USB PID after all).
++	 */
++	if (error) {
++		hid_dbg(hdev, "PID usage page not found in the descriptor\n");
++		return 0;
++	}
++
++	/* Check if HID_PID support is enabled */
++	int (*init_function)(struct hid_device *, u32);
++	init_function = hid_pidff_init_with_quirks;
++
++	if (!init_function) {
++		hid_warn(hdev, "HID_PID support not enabled!\n");
++		return 0;
++	}
++
++	error = init_function(hdev, id->driver_data);
++	if (error) {
++		hid_warn(hdev, "Error initialising force feedback\n");
++		goto err;
++	}
++
++	hid_info(hdev, "Universal pidff driver loaded sucessfully!");
++
++	return 0;
++err:
++	return error;
++}
++
++static int universal_pidff_input_configured(struct hid_device *hdev,
++					    struct hid_input *hidinput)
++{
++	int axis;
++	struct input_dev *input = hidinput->input;
++
++	if (!input->absinfo)
++		return 0;
++
++	/* Decrease fuzz and deadzone on available axes */
++	for (axis = ABS_X; axis <= ABS_BRAKE; axis++) {
++		if (!test_bit(axis, input->absbit))
++			continue;
++
++		input_set_abs_params(input, axis,
++			input->absinfo[axis].minimum,
++			input->absinfo[axis].maximum,
++			axis == ABS_X ? 0 : 8, 0);
++	}
++
++	/* Remove fuzz and deadzone from the second joystick axis */
++	if (hdev->vendor == USB_VENDOR_ID_FFBEAST &&
++	    hdev->product == USB_DEVICE_ID_FFBEAST_JOYSTICK)
++		input_set_abs_params(input, ABS_Y,
++			input->absinfo[ABS_Y].minimum,
++			input->absinfo[ABS_Y].maximum, 0, 0);
++
++	return 0;
++}
++
++static const struct hid_device_id universal_pidff_devices[] = {
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3_2),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5_2),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9_2),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12_2),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21_2),
++		.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C5) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C12) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_DFP),
++		.driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_JOYSTICK), },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_RUDDER), },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_WHEEL) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V10),
++		.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12),
++		.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE),
++		.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE_2),
++		.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_LITE_STAR_GT987_FF),
++		.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_INVICTA) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_FORTE) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_LA_PRIMA) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_TONY_KANAAN) },
++	{ }
++};
++MODULE_DEVICE_TABLE(hid, universal_pidff_devices);
++
++static struct hid_driver universal_pidff = {
++	.name = "hid-universal-pidff",
++	.id_table = universal_pidff_devices,
++	.input_mapping = universal_pidff_input_mapping,
++	.probe = universal_pidff_probe,
++	.input_configured = universal_pidff_input_configured
++};
++module_hid_driver(universal_pidff);
++
++MODULE_DESCRIPTION("Universal driver for USB PID Force Feedback devices");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Oleg Makarenko <oleg@makarenk.ooo>");
++MODULE_AUTHOR("Tomasz Pakuła <tomasz.pakula.oficjalny@gmail.com>");
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index a9e85bdd4cc656..bf0f51ef0149ff 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -35,6 +35,7 @@
+ #include <linux/hid-debug.h>
+ #include <linux/hidraw.h>
+ #include "usbhid.h"
++#include "hid-pidff.h"
+ 
+ /*
+  * Version Information
+diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
+index 3b4ee21cd81119..8dfd2c554a2762 100644
+--- a/drivers/hid/usbhid/hid-pidff.c
++++ b/drivers/hid/usbhid/hid-pidff.c
+@@ -3,27 +3,27 @@
+  *  Force feedback driver for USB HID PID compliant devices
+  *
+  *  Copyright (c) 2005, 2006 Anssi Hannula <anssi.hannula@gmail.com>
++ *  Upgraded 2025 by Oleg Makarenko and Tomasz Pakuła
+  */
+ 
+-/*
+- */
+-
+-/* #define DEBUG */
+-
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include "hid-pidff.h"
+ #include <linux/input.h>
+ #include <linux/slab.h>
+ #include <linux/usb.h>
+-
+ #include <linux/hid.h>
++#include <linux/minmax.h>
+ 
+-#include "usbhid.h"
+ 
+ #define	PID_EFFECTS_MAX		64
++#define	PID_INFINITE		U16_MAX
+ 
+-/* Report usage table used to put reports into an array */
++/* Linux Force Feedback API uses miliseconds as time unit */
++#define FF_TIME_EXPONENT	-3
++#define FF_INFINITE		0
+ 
++/* Report usage table used to put reports into an array */
+ #define PID_SET_EFFECT		0
+ #define PID_EFFECT_OPERATION	1
+ #define PID_DEVICE_GAIN		2
+@@ -44,12 +44,19 @@ static const u8 pidff_reports[] = {
+ 	0x21, 0x77, 0x7d, 0x7f, 0x89, 0x90, 0x96, 0xab,
+ 	0x5a, 0x5f, 0x6e, 0x73, 0x74
+ };
++/*
++ * device_control is really 0x95, but 0x96 specified
++ * as it is the usage of the only field in that report.
++ */
+ 
+-/* device_control is really 0x95, but 0x96 specified as it is the usage of
+-the only field in that report */
++/* PID special fields */
++#define PID_EFFECT_TYPE			0x25
++#define PID_DIRECTION			0x57
++#define PID_EFFECT_OPERATION_ARRAY	0x78
++#define PID_BLOCK_LOAD_STATUS		0x8b
++#define PID_DEVICE_CONTROL_ARRAY	0x96
+ 
+ /* Value usage tables used to put fields and values into arrays */
+-
+ #define PID_EFFECT_BLOCK_INDEX	0
+ 
+ #define PID_DURATION		1
+@@ -107,10 +114,13 @@ static const u8 pidff_device_gain[] = { 0x7e };
+ static const u8 pidff_pool[] = { 0x80, 0x83, 0xa9 };
+ 
+ /* Special field key tables used to put special field keys into arrays */
+-
+ #define PID_ENABLE_ACTUATORS	0
+-#define PID_RESET		1
+-static const u8 pidff_device_control[] = { 0x97, 0x9a };
++#define PID_DISABLE_ACTUATORS	1
++#define PID_STOP_ALL_EFFECTS	2
++#define PID_RESET		3
++#define PID_PAUSE		4
++#define PID_CONTINUE		5
++static const u8 pidff_device_control[] = { 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c };
+ 
+ #define PID_CONSTANT	0
+ #define PID_RAMP	1
+@@ -130,12 +140,16 @@ static const u8 pidff_effect_types[] = {
+ 
+ #define PID_BLOCK_LOAD_SUCCESS	0
+ #define PID_BLOCK_LOAD_FULL	1
+-static const u8 pidff_block_load_status[] = { 0x8c, 0x8d };
++#define PID_BLOCK_LOAD_ERROR	2
++static const u8 pidff_block_load_status[] = { 0x8c, 0x8d, 0x8e};
+ 
+ #define PID_EFFECT_START	0
+ #define PID_EFFECT_STOP		1
+ static const u8 pidff_effect_operation_status[] = { 0x79, 0x7b };
+ 
++/* Polar direction 90 degrees (East) */
++#define PIDFF_FIXED_WHEEL_DIRECTION	0x4000
++
+ struct pidff_usage {
+ 	struct hid_field *field;
+ 	s32 *value;
+@@ -159,8 +173,10 @@ struct pidff_device {
+ 	struct pidff_usage effect_operation[sizeof(pidff_effect_operation)];
+ 	struct pidff_usage block_free[sizeof(pidff_block_free)];
+ 
+-	/* Special field is a field that is not composed of
+-	   usage<->value pairs that pidff_usage values are */
++	/*
++	 * Special field is a field that is not composed of
++	 * usage<->value pairs that pidff_usage values are
++	 */
+ 
+ 	/* Special field in create_new_effect */
+ 	struct hid_field *create_new_effect_type;
+@@ -184,30 +200,61 @@ struct pidff_device {
+ 	int operation_id[sizeof(pidff_effect_operation_status)];
+ 
+ 	int pid_id[PID_EFFECTS_MAX];
++
++	u32 quirks;
++	u8 effect_count;
+ };
+ 
++/*
++ * Clamp value for a given field
++ */
++static s32 pidff_clamp(s32 i, struct hid_field *field)
++{
++	s32 clamped = clamp(i, field->logical_minimum, field->logical_maximum);
++	pr_debug("clamped from %d to %d", i, clamped);
++	return clamped;
++}
++
+ /*
+  * Scale an unsigned value with range 0..max for the given field
+  */
+ static int pidff_rescale(int i, int max, struct hid_field *field)
+ {
+ 	return i * (field->logical_maximum - field->logical_minimum) / max +
+-	    field->logical_minimum;
++		field->logical_minimum;
+ }
+ 
+ /*
+- * Scale a signed value in range -0x8000..0x7fff for the given field
++ * Scale a signed value in range S16_MIN..S16_MAX for the given field
+  */
+ static int pidff_rescale_signed(int i, struct hid_field *field)
+ {
+-	return i == 0 ? 0 : i >
+-	    0 ? i * field->logical_maximum / 0x7fff : i *
+-	    field->logical_minimum / -0x8000;
++	if (i > 0) return i * field->logical_maximum / S16_MAX;
++	if (i < 0) return i * field->logical_minimum / S16_MIN;
++	return 0;
++}
++
++/*
++ * Scale time value from Linux default (ms) to field units
++ */
++static u32 pidff_rescale_time(u16 time, struct hid_field *field)
++{
++	u32 scaled_time = time;
++	int exponent = field->unit_exponent;
++	pr_debug("time field exponent: %d\n", exponent);
++
++	for (;exponent < FF_TIME_EXPONENT; exponent++)
++		scaled_time *= 10;
++	for (;exponent > FF_TIME_EXPONENT; exponent--)
++		scaled_time /= 10;
++
++	pr_debug("time calculated from %d to %d\n", time, scaled_time);
++	return scaled_time;
+ }
+ 
+ static void pidff_set(struct pidff_usage *usage, u16 value)
+ {
+-	usage->value[0] = pidff_rescale(value, 0xffff, usage->field);
++	usage->value[0] = pidff_rescale(value, U16_MAX, usage->field);
+ 	pr_debug("calculated from %d to %d\n", value, usage->value[0]);
+ }
+ 
+@@ -218,14 +265,35 @@ static void pidff_set_signed(struct pidff_usage *usage, s16 value)
+ 	else {
+ 		if (value < 0)
+ 			usage->value[0] =
+-			    pidff_rescale(-value, 0x8000, usage->field);
++			    pidff_rescale(-value, -S16_MIN, usage->field);
+ 		else
+ 			usage->value[0] =
+-			    pidff_rescale(value, 0x7fff, usage->field);
++			    pidff_rescale(value, S16_MAX, usage->field);
+ 	}
+ 	pr_debug("calculated from %d to %d\n", value, usage->value[0]);
+ }
+ 
++static void pidff_set_time(struct pidff_usage *usage, u16 time)
++{
++	u32 modified_time = pidff_rescale_time(time, usage->field);
++	usage->value[0] = pidff_clamp(modified_time, usage->field);
++}
++
++static void pidff_set_duration(struct pidff_usage *usage, u16 duration)
++{
++	/* Infinite value conversion from Linux API -> PID */
++	if (duration == FF_INFINITE)
++		duration = PID_INFINITE;
++
++	/* PID defines INFINITE as the max possible value for duration field */
++	if (duration == PID_INFINITE) {
++		usage->value[0] = (1U << usage->field->report_size) - 1;
++		return;
++	}
++
++	pidff_set_time(usage, duration);
++}
++
+ /*
+  * Send envelope report to the device
+  */
+@@ -233,19 +301,21 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
+ 				      struct ff_envelope *envelope)
+ {
+ 	pidff->set_envelope[PID_EFFECT_BLOCK_INDEX].value[0] =
+-	    pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
++		pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
+ 
+ 	pidff->set_envelope[PID_ATTACK_LEVEL].value[0] =
+-	    pidff_rescale(envelope->attack_level >
+-			  0x7fff ? 0x7fff : envelope->attack_level, 0x7fff,
+-			  pidff->set_envelope[PID_ATTACK_LEVEL].field);
++		pidff_rescale(envelope->attack_level >
++			S16_MAX ? S16_MAX : envelope->attack_level, S16_MAX,
++			pidff->set_envelope[PID_ATTACK_LEVEL].field);
+ 	pidff->set_envelope[PID_FADE_LEVEL].value[0] =
+-	    pidff_rescale(envelope->fade_level >
+-			  0x7fff ? 0x7fff : envelope->fade_level, 0x7fff,
+-			  pidff->set_envelope[PID_FADE_LEVEL].field);
++		pidff_rescale(envelope->fade_level >
++			S16_MAX ? S16_MAX : envelope->fade_level, S16_MAX,
++			pidff->set_envelope[PID_FADE_LEVEL].field);
+ 
+-	pidff->set_envelope[PID_ATTACK_TIME].value[0] = envelope->attack_length;
+-	pidff->set_envelope[PID_FADE_TIME].value[0] = envelope->fade_length;
++	pidff_set_time(&pidff->set_envelope[PID_ATTACK_TIME],
++			envelope->attack_length);
++	pidff_set_time(&pidff->set_envelope[PID_FADE_TIME],
++			envelope->fade_length);
+ 
+ 	hid_dbg(pidff->hid, "attack %u => %d\n",
+ 		envelope->attack_level,
+@@ -261,10 +331,22 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
+ static int pidff_needs_set_envelope(struct ff_envelope *envelope,
+ 				    struct ff_envelope *old)
+ {
+-	return envelope->attack_level != old->attack_level ||
+-	       envelope->fade_level != old->fade_level ||
++	bool needs_new_envelope;
++	needs_new_envelope = envelope->attack_level  != 0 ||
++			     envelope->fade_level    != 0 ||
++			     envelope->attack_length != 0 ||
++			     envelope->fade_length   != 0;
++
++	if (!needs_new_envelope)
++		return false;
++
++	if (!old)
++		return needs_new_envelope;
++
++	return envelope->attack_level  != old->attack_level  ||
++	       envelope->fade_level    != old->fade_level    ||
+ 	       envelope->attack_length != old->attack_length ||
+-	       envelope->fade_length != old->fade_length;
++	       envelope->fade_length   != old->fade_length;
+ }
+ 
+ /*
+@@ -301,17 +383,27 @@ static void pidff_set_effect_report(struct pidff_device *pidff,
+ 		pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
+ 	pidff->set_effect_type->value[0] =
+ 		pidff->create_new_effect_type->value[0];
+-	pidff->set_effect[PID_DURATION].value[0] = effect->replay.length;
++
++	pidff_set_duration(&pidff->set_effect[PID_DURATION],
++		effect->replay.length);
++
+ 	pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button;
+-	pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] =
+-		effect->trigger.interval;
++	pidff_set_time(&pidff->set_effect[PID_TRIGGER_REPEAT_INT],
++			effect->trigger.interval);
+ 	pidff->set_effect[PID_GAIN].value[0] =
+ 		pidff->set_effect[PID_GAIN].field->logical_maximum;
+ 	pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
+-	pidff->effect_direction->value[0] =
+-		pidff_rescale(effect->direction, 0xffff,
+-				pidff->effect_direction);
+-	pidff->set_effect[PID_START_DELAY].value[0] = effect->replay.delay;
++
++	/* Use fixed direction if needed */
++	pidff->effect_direction->value[0] = pidff_rescale(
++		pidff->quirks & HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION ?
++		PIDFF_FIXED_WHEEL_DIRECTION : effect->direction,
++		U16_MAX, pidff->effect_direction);
++
++	/* Omit setting delay field if it's missing */
++	if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
++		pidff_set_time(&pidff->set_effect[PID_START_DELAY],
++				effect->replay.delay);
+ 
+ 	hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
+ 			HID_REQ_SET_REPORT);
+@@ -343,11 +435,11 @@ static void pidff_set_periodic_report(struct pidff_device *pidff,
+ 	pidff_set_signed(&pidff->set_periodic[PID_OFFSET],
+ 			 effect->u.periodic.offset);
+ 	pidff_set(&pidff->set_periodic[PID_PHASE], effect->u.periodic.phase);
+-	pidff->set_periodic[PID_PERIOD].value[0] = effect->u.periodic.period;
++	pidff_set_time(&pidff->set_periodic[PID_PERIOD],
++			effect->u.periodic.period);
+ 
+ 	hid_hw_request(pidff->hid, pidff->reports[PID_SET_PERIODIC],
+ 			HID_REQ_SET_REPORT);
+-
+ }
+ 
+ /*
+@@ -368,13 +460,19 @@ static int pidff_needs_set_periodic(struct ff_effect *effect,
+ static void pidff_set_condition_report(struct pidff_device *pidff,
+ 				       struct ff_effect *effect)
+ {
+-	int i;
++	int i, max_axis;
++
++	/* Devices missing Parameter Block Offset can only have one axis */
++	max_axis = pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO ? 1 : 2;
+ 
+ 	pidff->set_condition[PID_EFFECT_BLOCK_INDEX].value[0] =
+ 		pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
+ 
+-	for (i = 0; i < 2; i++) {
+-		pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i;
++	for (i = 0; i < max_axis; i++) {
++		/* Omit Parameter Block Offset if missing */
++		if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO))
++			pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i;
++
+ 		pidff_set_signed(&pidff->set_condition[PID_CP_OFFSET],
+ 				 effect->u.condition[i].center);
+ 		pidff_set_signed(&pidff->set_condition[PID_POS_COEFFICIENT],
+@@ -441,9 +539,104 @@ static int pidff_needs_set_ramp(struct ff_effect *effect, struct ff_effect *old)
+ 	       effect->u.ramp.end_level != old->u.ramp.end_level;
+ }
+ 
++/*
++ * Set device gain
++ */
++static void pidff_set_gain_report(struct pidff_device *pidff, u16 gain)
++{
++	if (!pidff->device_gain[PID_DEVICE_GAIN_FIELD].field)
++		return;
++
++	pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain);
++	hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
++			HID_REQ_SET_REPORT);
++}
++
++/*
++ * Send device control report to the device
++ */
++static void pidff_set_device_control(struct pidff_device *pidff, int field)
++{
++	int i, index;
++	int field_index = pidff->control_id[field];
++
++	if (field_index < 1)
++		return;
++
++	/* Detect if the field is a bitmask variable or an array */
++	if (pidff->device_control->flags & HID_MAIN_ITEM_VARIABLE) {
++		hid_dbg(pidff->hid, "DEVICE_CONTROL is a bitmask\n");
++
++		/* Clear current bitmask */
++		for(i = 0; i < sizeof(pidff_device_control); i++) {
++			index = pidff->control_id[i];
++			if (index < 1)
++				continue;
++
++			pidff->device_control->value[index - 1] = 0;
++		}
++
++		pidff->device_control->value[field_index - 1] = 1;
++	} else {
++		hid_dbg(pidff->hid, "DEVICE_CONTROL is an array\n");
++		pidff->device_control->value[0] = field_index;
++	}
++
++	hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
++	hid_hw_wait(pidff->hid);
++}
++
++/*
++ * Modify actuators state
++ */
++static void pidff_set_actuators(struct pidff_device *pidff, bool enable)
++{
++	hid_dbg(pidff->hid, "%s actuators\n", enable ? "Enable" : "Disable");
++	pidff_set_device_control(pidff,
++		enable ? PID_ENABLE_ACTUATORS : PID_DISABLE_ACTUATORS);
++}
++
++/*
++ * Reset the device, stop all effects, enable actuators
++ */
++static void pidff_reset(struct pidff_device *pidff)
++{
++	/* We reset twice as sometimes hid_wait_io isn't waiting long enough */
++	pidff_set_device_control(pidff, PID_RESET);
++	pidff_set_device_control(pidff, PID_RESET);
++	pidff->effect_count = 0;
++
++	pidff_set_device_control(pidff, PID_STOP_ALL_EFFECTS);
++	pidff_set_actuators(pidff, 1);
++}
++
++/*
++ * Fetch pool report
++ */
++static void pidff_fetch_pool(struct pidff_device *pidff)
++{
++	int i;
++	struct hid_device *hid = pidff->hid;
++
++	/* Repeat if PID_SIMULTANEOUS_MAX < 2 to make sure it's correct */
++	for(i = 0; i < 20; i++) {
++		hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT);
++		hid_hw_wait(hid);
++
++		if (!pidff->pool[PID_SIMULTANEOUS_MAX].value)
++			return;
++		if (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] >= 2)
++			return;
++	}
++	hid_warn(hid, "device reports %d simultaneous effects\n",
++		 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
++}
++
+ /*
+  * Send a request for effect upload to the device
+  *
++ * Reset and enable actuators if no effects were present on the device
++ *
+  * Returns 0 if device reported success, -ENOSPC if the device reported memory
+  * is full. Upon unknown response the function will retry for 60 times, if
+  * still unsuccessful -EIO is returned.
+@@ -452,6 +645,9 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
+ {
+ 	int j;
+ 
++	if (!pidff->effect_count)
++		pidff_reset(pidff);
++
+ 	pidff->create_new_effect_type->value[0] = efnum;
+ 	hid_hw_request(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT],
+ 			HID_REQ_SET_REPORT);
+@@ -471,6 +667,8 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
+ 			hid_dbg(pidff->hid, "device reported free memory: %d bytes\n",
+ 				 pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
+ 				 pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
++
++			pidff->effect_count++;
+ 			return 0;
+ 		}
+ 		if (pidff->block_load_status->value[0] ==
+@@ -480,6 +678,11 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
+ 				pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
+ 			return -ENOSPC;
+ 		}
++		if (pidff->block_load_status->value[0] ==
++		    pidff->status_id[PID_BLOCK_LOAD_ERROR]) {
++			hid_dbg(pidff->hid, "device error during effect creation\n");
++			return -EREMOTEIO;
++		}
+ 	}
+ 	hid_err(pidff->hid, "pid_block_load failed 60 times\n");
+ 	return -EIO;
+@@ -498,7 +701,8 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
+ 	} else {
+ 		pidff->effect_operation_status->value[0] =
+ 			pidff->operation_id[PID_EFFECT_START];
+-		pidff->effect_operation[PID_LOOP_COUNT].value[0] = n;
++		pidff->effect_operation[PID_LOOP_COUNT].value[0] =
++			pidff_clamp(n, pidff->effect_operation[PID_LOOP_COUNT].field);
+ 	}
+ 
+ 	hid_hw_request(pidff->hid, pidff->reports[PID_EFFECT_OPERATION],
+@@ -511,20 +715,22 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
+ static int pidff_playback(struct input_dev *dev, int effect_id, int value)
+ {
+ 	struct pidff_device *pidff = dev->ff->private;
+-
+ 	pidff_playback_pid(pidff, pidff->pid_id[effect_id], value);
+-
+ 	return 0;
+ }
+ 
+ /*
+  * Erase effect with PID id
++ * Decrease the device effect counter
+  */
+ static void pidff_erase_pid(struct pidff_device *pidff, int pid_id)
+ {
+ 	pidff->block_free[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id;
+ 	hid_hw_request(pidff->hid, pidff->reports[PID_BLOCK_FREE],
+ 			HID_REQ_SET_REPORT);
++
++	if (pidff->effect_count > 0)
++		pidff->effect_count--;
+ }
+ 
+ /*
+@@ -537,8 +743,11 @@ static int pidff_erase_effect(struct input_dev *dev, int effect_id)
+ 
+ 	hid_dbg(pidff->hid, "starting to erase %d/%d\n",
+ 		effect_id, pidff->pid_id[effect_id]);
+-	/* Wait for the queue to clear. We do not want a full fifo to
+-	   prevent the effect removal. */
++
++	/*
++	 * Wait for the queue to clear. We do not want
++	 * a full fifo to prevent the effect removal.
++	 */
+ 	hid_hw_wait(pidff->hid);
+ 	pidff_playback_pid(pidff, pid_id, 0);
+ 	pidff_erase_pid(pidff, pid_id);
+@@ -574,11 +783,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+ 			pidff_set_effect_report(pidff, effect);
+ 		if (!old || pidff_needs_set_constant(effect, old))
+ 			pidff_set_constant_force_report(pidff, effect);
+-		if (!old ||
+-		    pidff_needs_set_envelope(&effect->u.constant.envelope,
+-					&old->u.constant.envelope))
+-			pidff_set_envelope_report(pidff,
+-					&effect->u.constant.envelope);
++		if (pidff_needs_set_envelope(&effect->u.constant.envelope,
++					old ? &old->u.constant.envelope : NULL))
++			pidff_set_envelope_report(pidff, &effect->u.constant.envelope);
+ 		break;
+ 
+ 	case FF_PERIODIC:
+@@ -604,6 +811,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+ 				return -EINVAL;
+ 			}
+ 
++			if (pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY)
++				type_id = PID_SINE;
++
+ 			error = pidff_request_effect_upload(pidff,
+ 					pidff->type_id[type_id]);
+ 			if (error)
+@@ -613,11 +823,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+ 			pidff_set_effect_report(pidff, effect);
+ 		if (!old || pidff_needs_set_periodic(effect, old))
+ 			pidff_set_periodic_report(pidff, effect);
+-		if (!old ||
+-		    pidff_needs_set_envelope(&effect->u.periodic.envelope,
+-					&old->u.periodic.envelope))
+-			pidff_set_envelope_report(pidff,
+-					&effect->u.periodic.envelope);
++		if (pidff_needs_set_envelope(&effect->u.periodic.envelope,
++					old ? &old->u.periodic.envelope : NULL))
++			pidff_set_envelope_report(pidff, &effect->u.periodic.envelope);
+ 		break;
+ 
+ 	case FF_RAMP:
+@@ -631,56 +839,32 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+ 			pidff_set_effect_report(pidff, effect);
+ 		if (!old || pidff_needs_set_ramp(effect, old))
+ 			pidff_set_ramp_force_report(pidff, effect);
+-		if (!old ||
+-		    pidff_needs_set_envelope(&effect->u.ramp.envelope,
+-					&old->u.ramp.envelope))
+-			pidff_set_envelope_report(pidff,
+-					&effect->u.ramp.envelope);
++		if (pidff_needs_set_envelope(&effect->u.ramp.envelope,
++					old ? &old->u.ramp.envelope : NULL))
++			pidff_set_envelope_report(pidff, &effect->u.ramp.envelope);
+ 		break;
+ 
+ 	case FF_SPRING:
+-		if (!old) {
+-			error = pidff_request_effect_upload(pidff,
+-					pidff->type_id[PID_SPRING]);
+-			if (error)
+-				return error;
+-		}
+-		if (!old || pidff_needs_set_effect(effect, old))
+-			pidff_set_effect_report(pidff, effect);
+-		if (!old || pidff_needs_set_condition(effect, old))
+-			pidff_set_condition_report(pidff, effect);
+-		break;
+-
+-	case FF_FRICTION:
+-		if (!old) {
+-			error = pidff_request_effect_upload(pidff,
+-					pidff->type_id[PID_FRICTION]);
+-			if (error)
+-				return error;
+-		}
+-		if (!old || pidff_needs_set_effect(effect, old))
+-			pidff_set_effect_report(pidff, effect);
+-		if (!old || pidff_needs_set_condition(effect, old))
+-			pidff_set_condition_report(pidff, effect);
+-		break;
+-
+ 	case FF_DAMPER:
+-		if (!old) {
+-			error = pidff_request_effect_upload(pidff,
+-					pidff->type_id[PID_DAMPER]);
+-			if (error)
+-				return error;
+-		}
+-		if (!old || pidff_needs_set_effect(effect, old))
+-			pidff_set_effect_report(pidff, effect);
+-		if (!old || pidff_needs_set_condition(effect, old))
+-			pidff_set_condition_report(pidff, effect);
+-		break;
+-
+ 	case FF_INERTIA:
++	case FF_FRICTION:
+ 		if (!old) {
++			switch(effect->type) {
++			case FF_SPRING:
++				type_id = PID_SPRING;
++				break;
++			case FF_DAMPER:
++				type_id = PID_DAMPER;
++				break;
++			case FF_INERTIA:
++				type_id = PID_INERTIA;
++				break;
++			case FF_FRICTION:
++				type_id = PID_FRICTION;
++				break;
++			}
+ 			error = pidff_request_effect_upload(pidff,
+-					pidff->type_id[PID_INERTIA]);
++					pidff->type_id[type_id]);
+ 			if (error)
+ 				return error;
+ 		}
+@@ -709,11 +893,7 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+  */
+ static void pidff_set_gain(struct input_dev *dev, u16 gain)
+ {
+-	struct pidff_device *pidff = dev->ff->private;
+-
+-	pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain);
+-	hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
+-			HID_REQ_SET_REPORT);
++	pidff_set_gain_report(dev->ff->private, gain);
+ }
+ 
+ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
+@@ -736,7 +916,10 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
+ 	pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0;
+ 	pidff_set(&pidff->set_effect[PID_GAIN], magnitude);
+ 	pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
+-	pidff->set_effect[PID_START_DELAY].value[0] = 0;
++
++	/* Omit setting delay field if it's missing */
++	if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
++		pidff->set_effect[PID_START_DELAY].value[0] = 0;
+ 
+ 	hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
+ 			HID_REQ_SET_REPORT);
+@@ -747,9 +930,7 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
+  */
+ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
+ {
+-	struct pidff_device *pidff = dev->ff->private;
+-
+-	pidff_autocenter(pidff, magnitude);
++	pidff_autocenter(dev->ff->private, magnitude);
+ }
+ 
+ /*
+@@ -758,7 +939,13 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
+ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
+ 			     struct hid_report *report, int count, int strict)
+ {
++	if (!report) {
++		pr_debug("pidff_find_fields, null report\n");
++		return -1;
++	}
++
+ 	int i, j, k, found;
++	int return_value = 0;
+ 
+ 	for (k = 0; k < count; k++) {
+ 		found = 0;
+@@ -783,12 +970,22 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
+ 			if (found)
+ 				break;
+ 		}
+-		if (!found && strict) {
++		if (!found && table[k] == pidff_set_effect[PID_START_DELAY]) {
++			pr_debug("Delay field not found, but that's OK\n");
++			pr_debug("Setting MISSING_DELAY quirk\n");
++			return_value |= HID_PIDFF_QUIRK_MISSING_DELAY;
++		}
++		else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) {
++			pr_debug("PBO field not found, but that's OK\n");
++			pr_debug("Setting MISSING_PBO quirk\n");
++			return_value |= HID_PIDFF_QUIRK_MISSING_PBO;
++		}
++		else if (!found && strict) {
+ 			pr_debug("failed to locate %d\n", k);
+ 			return -1;
+ 		}
+ 	}
+-	return 0;
++	return return_value;
+ }
+ 
+ /*
+@@ -871,6 +1068,11 @@ static int pidff_reports_ok(struct pidff_device *pidff)
+ static struct hid_field *pidff_find_special_field(struct hid_report *report,
+ 						  int usage, int enforce_min)
+ {
++	if (!report) {
++		pr_debug("pidff_find_special_field, null report\n");
++		return NULL;
++	}
++
+ 	int i;
+ 
+ 	for (i = 0; i < report->maxfield; i++) {
+@@ -923,22 +1125,24 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
+ 
+ 	pidff->create_new_effect_type =
+ 		pidff_find_special_field(pidff->reports[PID_CREATE_NEW_EFFECT],
+-					 0x25, 1);
++					 PID_EFFECT_TYPE, 1);
+ 	pidff->set_effect_type =
+ 		pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
+-					 0x25, 1);
++					 PID_EFFECT_TYPE, 1);
+ 	pidff->effect_direction =
+ 		pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
+-					 0x57, 0);
++					 PID_DIRECTION, 0);
+ 	pidff->device_control =
+ 		pidff_find_special_field(pidff->reports[PID_DEVICE_CONTROL],
+-					 0x96, 1);
++			PID_DEVICE_CONTROL_ARRAY,
++			!(pidff->quirks & HID_PIDFF_QUIRK_PERMISSIVE_CONTROL));
++
+ 	pidff->block_load_status =
+ 		pidff_find_special_field(pidff->reports[PID_BLOCK_LOAD],
+-					 0x8b, 1);
++					 PID_BLOCK_LOAD_STATUS, 1);
+ 	pidff->effect_operation_status =
+ 		pidff_find_special_field(pidff->reports[PID_EFFECT_OPERATION],
+-					 0x78, 1);
++					 PID_EFFECT_OPERATION_ARRAY, 1);
+ 
+ 	hid_dbg(pidff->hid, "search done\n");
+ 
+@@ -967,10 +1171,6 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
+ 		return -1;
+ 	}
+ 
+-	pidff_find_special_keys(pidff->control_id, pidff->device_control,
+-				pidff_device_control,
+-				sizeof(pidff_device_control));
+-
+ 	PIDFF_FIND_SPECIAL_KEYS(control_id, device_control, device_control);
+ 
+ 	if (!PIDFF_FIND_SPECIAL_KEYS(type_id, create_new_effect_type,
+@@ -1049,7 +1249,6 @@ static int pidff_find_effects(struct pidff_device *pidff,
+ 		set_bit(FF_FRICTION, dev->ffbit);
+ 
+ 	return 0;
+-
+ }
+ 
+ #define PIDFF_FIND_FIELDS(name, report, strict) \
+@@ -1062,12 +1261,19 @@ static int pidff_find_effects(struct pidff_device *pidff,
+  */
+ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
+ {
+-	int envelope_ok = 0;
++	int status = 0;
+ 
+-	if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) {
++	/* Save info about the device not having the DELAY ffb field. */
++	status = PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1);
++	if (status == -1) {
+ 		hid_err(pidff->hid, "unknown set_effect report layout\n");
+ 		return -ENODEV;
+ 	}
++	pidff->quirks |= status;
++
++	if (status & HID_PIDFF_QUIRK_MISSING_DELAY)
++		hid_dbg(pidff->hid, "Adding MISSING_DELAY quirk\n");
++
+ 
+ 	PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0);
+ 	if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) {
+@@ -1085,13 +1291,10 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
+ 		return -ENODEV;
+ 	}
+ 
+-	if (!PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1))
+-		envelope_ok = 1;
+-
+ 	if (pidff_find_special_fields(pidff) || pidff_find_effects(pidff, dev))
+ 		return -ENODEV;
+ 
+-	if (!envelope_ok) {
++	if (PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1)) {
+ 		if (test_and_clear_bit(FF_CONSTANT, dev->ffbit))
+ 			hid_warn(pidff->hid,
+ 				 "has constant effect but no envelope\n");
+@@ -1116,16 +1319,20 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
+ 		clear_bit(FF_RAMP, dev->ffbit);
+ 	}
+ 
+-	if ((test_bit(FF_SPRING, dev->ffbit) ||
+-	     test_bit(FF_DAMPER, dev->ffbit) ||
+-	     test_bit(FF_FRICTION, dev->ffbit) ||
+-	     test_bit(FF_INERTIA, dev->ffbit)) &&
+-	    PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) {
+-		hid_warn(pidff->hid, "unknown condition effect layout\n");
+-		clear_bit(FF_SPRING, dev->ffbit);
+-		clear_bit(FF_DAMPER, dev->ffbit);
+-		clear_bit(FF_FRICTION, dev->ffbit);
+-		clear_bit(FF_INERTIA, dev->ffbit);
++	if (test_bit(FF_SPRING, dev->ffbit) ||
++	    test_bit(FF_DAMPER, dev->ffbit) ||
++	    test_bit(FF_FRICTION, dev->ffbit) ||
++	    test_bit(FF_INERTIA, dev->ffbit)) {
++		status = PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1);
++
++		if (status < 0) {
++			hid_warn(pidff->hid, "unknown condition effect layout\n");
++			clear_bit(FF_SPRING, dev->ffbit);
++			clear_bit(FF_DAMPER, dev->ffbit);
++			clear_bit(FF_FRICTION, dev->ffbit);
++			clear_bit(FF_INERTIA, dev->ffbit);
++		}
++		pidff->quirks |= status;
+ 	}
+ 
+ 	if (test_bit(FF_PERIODIC, dev->ffbit) &&
+@@ -1142,46 +1349,6 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
+ 	return 0;
+ }
+ 
+-/*
+- * Reset the device
+- */
+-static void pidff_reset(struct pidff_device *pidff)
+-{
+-	struct hid_device *hid = pidff->hid;
+-	int i = 0;
+-
+-	pidff->device_control->value[0] = pidff->control_id[PID_RESET];
+-	/* We reset twice as sometimes hid_wait_io isn't waiting long enough */
+-	hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
+-	hid_hw_wait(hid);
+-	hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
+-	hid_hw_wait(hid);
+-
+-	pidff->device_control->value[0] =
+-		pidff->control_id[PID_ENABLE_ACTUATORS];
+-	hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
+-	hid_hw_wait(hid);
+-
+-	/* pool report is sometimes messed up, refetch it */
+-	hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT);
+-	hid_hw_wait(hid);
+-
+-	if (pidff->pool[PID_SIMULTANEOUS_MAX].value) {
+-		while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) {
+-			if (i++ > 20) {
+-				hid_warn(pidff->hid,
+-					 "device reports %d simultaneous effects\n",
+-					 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
+-				break;
+-			}
+-			hid_dbg(pidff->hid, "pid_pool requested again\n");
+-			hid_hw_request(hid, pidff->reports[PID_POOL],
+-					  HID_REQ_GET_REPORT);
+-			hid_hw_wait(hid);
+-		}
+-	}
+-}
+-
+ /*
+  * Test if autocenter modification is using the supported method
+  */
+@@ -1206,24 +1373,23 @@ static int pidff_check_autocenter(struct pidff_device *pidff,
+ 
+ 	if (pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] ==
+ 	    pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum + 1) {
+-		pidff_autocenter(pidff, 0xffff);
++		pidff_autocenter(pidff, U16_MAX);
+ 		set_bit(FF_AUTOCENTER, dev->ffbit);
+ 	} else {
+ 		hid_notice(pidff->hid,
+ 			   "device has unknown autocenter control method\n");
+ 	}
+-
+ 	pidff_erase_pid(pidff,
+ 			pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]);
+ 
+ 	return 0;
+-
+ }
+ 
+ /*
+  * Check if the device is PID and initialize it
++ * Set initial quirks
+  */
+-int hid_pidff_init(struct hid_device *hid)
++int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks)
+ {
+ 	struct pidff_device *pidff;
+ 	struct hid_input *hidinput = list_entry(hid->inputs.next,
+@@ -1245,6 +1411,8 @@ int hid_pidff_init(struct hid_device *hid)
+ 		return -ENOMEM;
+ 
+ 	pidff->hid = hid;
++	pidff->quirks = initial_quirks;
++	pidff->effect_count = 0;
+ 
+ 	hid_device_io_start(hid);
+ 
+@@ -1261,14 +1429,9 @@ int hid_pidff_init(struct hid_device *hid)
+ 	if (error)
+ 		goto fail;
+ 
+-	pidff_reset(pidff);
+-
+-	if (test_bit(FF_GAIN, dev->ffbit)) {
+-		pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], 0xffff);
+-		hid_hw_request(hid, pidff->reports[PID_DEVICE_GAIN],
+-				     HID_REQ_SET_REPORT);
+-	}
+-
++	/* pool report is sometimes messed up, refetch it */
++	pidff_fetch_pool(pidff);
++	pidff_set_gain_report(pidff, U16_MAX);
+ 	error = pidff_check_autocenter(pidff, dev);
+ 	if (error)
+ 		goto fail;
+@@ -1311,6 +1474,7 @@ int hid_pidff_init(struct hid_device *hid)
+ 	ff->playback = pidff_playback;
+ 
+ 	hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
++	hid_dbg(dev, "Active quirks mask: 0x%x\n", pidff->quirks);
+ 
+ 	hid_device_io_stop(hid);
+ 
+@@ -1322,3 +1486,14 @@ int hid_pidff_init(struct hid_device *hid)
+ 	kfree(pidff);
+ 	return error;
+ }
++EXPORT_SYMBOL_GPL(hid_pidff_init_with_quirks);
++
++/*
++ * Check if the device is PID and initialize it
++ * Wrapper made to keep the compatibility with old
++ * init function
++ */
++int hid_pidff_init(struct hid_device *hid)
++{
++	return hid_pidff_init_with_quirks(hid, 0);
++}
+diff --git a/drivers/hid/usbhid/hid-pidff.h b/drivers/hid/usbhid/hid-pidff.h
+new file mode 100644
+index 00000000000000..dda571e0a5bd38
+--- /dev/null
++++ b/drivers/hid/usbhid/hid-pidff.h
+@@ -0,0 +1,33 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++#ifndef __HID_PIDFF_H
++#define __HID_PIDFF_H
++
++#include <linux/hid.h>
++
++/* HID PIDFF quirks */
++
++/* Delay field (0xA7) missing. Skip it during set effect report upload */
++#define HID_PIDFF_QUIRK_MISSING_DELAY		BIT(0)
++
++/* Missing Paramter block offset (0x23). Skip it during SET_CONDITION
++   report upload */
++#define HID_PIDFF_QUIRK_MISSING_PBO		BIT(1)
++
++/* Initialise device control field even if logical_minimum != 1 */
++#define HID_PIDFF_QUIRK_PERMISSIVE_CONTROL	BIT(2)
++
++/* Use fixed 0x4000 direction during SET_EFFECT report upload */
++#define HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION	BIT(3)
++
++/* Force all periodic effects to be uploaded as SINE */
++#define HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY	BIT(4)
++
++#ifdef CONFIG_HID_PID
++int hid_pidff_init(struct hid_device *hid);
++int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks);
++#else
++#define hid_pidff_init NULL
++#define hid_pidff_init_with_quirks NULL
++#endif
++
++#endif
+diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
+index afe470f3661c77..6105ea9a6c6aa2 100644
+--- a/drivers/hsi/clients/ssi_protocol.c
++++ b/drivers/hsi/clients/ssi_protocol.c
+@@ -401,6 +401,7 @@ static void ssip_reset(struct hsi_client *cl)
+ 	del_timer(&ssi->rx_wd);
+ 	del_timer(&ssi->tx_wd);
+ 	del_timer(&ssi->keep_alive);
++	cancel_work_sync(&ssi->work);
+ 	ssi->main_state = 0;
+ 	ssi->send_state = 0;
+ 	ssi->recv_state = 0;
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 53ab814b676ffd..7c1dc42b809bfc 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -2553,6 +2553,9 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
+  */
+ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
+ {
++	if (!dev->ibi || !slot)
++		return;
++
+ 	atomic_inc(&dev->ibi->pending_ibis);
+ 	queue_work(dev->ibi->wq, &slot->work);
+ }
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 87f98fa8afd582..42102baabcddad 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -378,7 +378,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ 	       slot->len < SVC_I3C_FIFO_SIZE) {
+ 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+ 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
+-		readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
++		readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
+ 		slot->len += count;
+ 		buf += count;
+ 	}
+diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+index d525ab43a4aebf..dd7d030d2e8909 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
++++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+@@ -487,17 +487,6 @@ static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
+ 
+ /* VCMDQ Resource Helpers */
+ 
+-static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
+-{
+-	struct arm_smmu_queue *q = &vcmdq->cmdq.q;
+-	size_t nents = 1 << q->llq.max_n_shift;
+-	size_t qsz = nents << CMDQ_ENT_SZ_SHIFT;
+-
+-	if (!q->base)
+-		return;
+-	dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma);
+-}
+-
+ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
+ {
+ 	struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
+@@ -560,7 +549,8 @@ static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
+ 	struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
+ 	char header[64];
+ 
+-	tegra241_vcmdq_free_smmu_cmdq(vcmdq);
++	/* Note that the lvcmdq queue memory space is managed by devres */
++
+ 	tegra241_vintf_deinit_lvcmdq(vintf, lidx);
+ 
+ 	dev_dbg(vintf->cmdqv->dev,
+@@ -768,13 +758,13 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
+ 
+ 	vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
+ 	if (!vintf)
+-		goto out_fallback;
++		return -ENOMEM;
+ 
+ 	/* Init VINTF0 for in-kernel use */
+ 	ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
+ 	if (ret) {
+ 		dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
+-		goto free_vintf;
++		return ret;
+ 	}
+ 
+ 	/* Preallocate logical VCMDQs to VINTF0 */
+@@ -783,24 +773,12 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
+ 
+ 		vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
+ 		if (IS_ERR(vcmdq))
+-			goto free_lvcmdq;
++			return PTR_ERR(vcmdq);
+ 	}
+ 
+ 	/* Now, we are ready to run all the impl ops */
+ 	smmu->impl_ops = &tegra241_cmdqv_impl_ops;
+ 	return 0;
+-
+-free_lvcmdq:
+-	for (lidx--; lidx >= 0; lidx--)
+-		tegra241_vintf_free_lvcmdq(vintf, lidx);
+-	tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
+-free_vintf:
+-	kfree(vintf);
+-out_fallback:
+-	dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
+-	smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
+-	tegra241_cmdqv_remove(smmu);
+-	return 0;
+ }
+ 
+ #ifdef CONFIG_IOMMU_DEBUGFS
+diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
+index c666ecab955d21..7465dbb6fa80c8 100644
+--- a/drivers/iommu/exynos-iommu.c
++++ b/drivers/iommu/exynos-iommu.c
+@@ -832,7 +832,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
+ 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
+ 
+ 		mutex_lock(&owner->rpm_lock);
+-		if (&data->domain->domain != &exynos_identity_domain) {
++		if (data->domain) {
+ 			dev_dbg(data->sysmmu, "saving state\n");
+ 			__sysmmu_disable(data);
+ 		}
+@@ -850,7 +850,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
+ 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
+ 
+ 		mutex_lock(&owner->rpm_lock);
+-		if (&data->domain->domain != &exynos_identity_domain) {
++		if (data->domain) {
+ 			dev_dbg(data->sysmmu, "restoring state\n");
+ 			__sysmmu_enable(data);
+ 		}
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 9c46a4cd384842..038a66388564a8 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3174,6 +3174,7 @@ static int __init probe_acpi_namespace_devices(void)
+ 			if (dev->bus != &acpi_bus_type)
+ 				continue;
+ 
++			up_read(&dmar_global_lock);
+ 			adev = to_acpi_device(dev);
+ 			mutex_lock(&adev->physical_node_lock);
+ 			list_for_each_entry(pn,
+@@ -3183,6 +3184,7 @@ static int __init probe_acpi_namespace_devices(void)
+ 					break;
+ 			}
+ 			mutex_unlock(&adev->physical_node_lock);
++			down_read(&dmar_global_lock);
+ 
+ 			if (ret)
+ 				return ret;
+diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
+index 7a6d188e3bea09..71b3383b7115cb 100644
+--- a/drivers/iommu/intel/irq_remapping.c
++++ b/drivers/iommu/intel/irq_remapping.c
+@@ -26,11 +26,6 @@
+ #include "../iommu-pages.h"
+ #include "cap_audit.h"
+ 
+-enum irq_mode {
+-	IRQ_REMAPPING,
+-	IRQ_POSTING,
+-};
+-
+ struct ioapic_scope {
+ 	struct intel_iommu *iommu;
+ 	unsigned int id;
+@@ -50,8 +45,8 @@ struct irq_2_iommu {
+ 	u16 irte_index;
+ 	u16 sub_handle;
+ 	u8  irte_mask;
+-	enum irq_mode mode;
+ 	bool posted_msi;
++	bool posted_vcpu;
+ };
+ 
+ struct intel_ir_data {
+@@ -139,7 +134,6 @@ static int alloc_irte(struct intel_iommu *iommu,
+ 		irq_iommu->irte_index =  index;
+ 		irq_iommu->sub_handle = 0;
+ 		irq_iommu->irte_mask = mask;
+-		irq_iommu->mode = IRQ_REMAPPING;
+ 	}
+ 	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ 
+@@ -194,8 +188,6 @@ static int modify_irte(struct irq_2_iommu *irq_iommu,
+ 
+ 	rc = qi_flush_iec(iommu, index, 0);
+ 
+-	/* Update iommu mode according to the IRTE mode */
+-	irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
+ 	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ 
+ 	return rc;
+@@ -1173,7 +1165,26 @@ static void intel_ir_reconfigure_irte_posted(struct irq_data *irqd)
+ static inline void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) {}
+ #endif
+ 
+-static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
++static void __intel_ir_reconfigure_irte(struct irq_data *irqd, bool force_host)
++{
++	struct intel_ir_data *ir_data = irqd->chip_data;
++
++	/*
++	 * Don't modify IRTEs for IRQs that are being posted to vCPUs if the
++	 * host CPU affinity changes.
++	 */
++	if (ir_data->irq_2_iommu.posted_vcpu && !force_host)
++		return;
++
++	ir_data->irq_2_iommu.posted_vcpu = false;
++
++	if (ir_data->irq_2_iommu.posted_msi)
++		intel_ir_reconfigure_irte_posted(irqd);
++	else
++		modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
++}
++
++static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force_host)
+ {
+ 	struct intel_ir_data *ir_data = irqd->chip_data;
+ 	struct irte *irte = &ir_data->irte_entry;
+@@ -1186,10 +1197,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
+ 	irte->vector = cfg->vector;
+ 	irte->dest_id = IRTE_DEST(cfg->dest_apicid);
+ 
+-	if (ir_data->irq_2_iommu.posted_msi)
+-		intel_ir_reconfigure_irte_posted(irqd);
+-	else if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+-		modify_irte(&ir_data->irq_2_iommu, irte);
++	__intel_ir_reconfigure_irte(irqd, force_host);
+ }
+ 
+ /*
+@@ -1244,7 +1252,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
+ 
+ 	/* stop posting interrupts, back to the default mode */
+ 	if (!vcpu_pi_info) {
+-		modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
++		__intel_ir_reconfigure_irte(data, true);
+ 	} else {
+ 		struct irte irte_pi;
+ 
+@@ -1267,6 +1275,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
+ 		irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
+ 				~(-1UL << PDA_HIGH_BIT);
+ 
++		ir_data->irq_2_iommu.posted_vcpu = true;
+ 		modify_irte(&ir_data->irq_2_iommu, &irte_pi);
+ 	}
+ 
+@@ -1282,43 +1291,44 @@ static struct irq_chip intel_ir_chip = {
+ };
+ 
+ /*
+- * With posted MSIs, all vectors are multiplexed into a single notification
+- * vector. Devices MSIs are then dispatched in a demux loop where
+- * EOIs can be coalesced as well.
++ * With posted MSIs, the MSI vectors are multiplexed into a single notification
++ * vector, and only the notification vector is sent to the APIC IRR.  Device
++ * MSIs are then dispatched in a demux loop that harvests the MSIs from the
++ * CPU's Posted Interrupt Request bitmap.  I.e. Posted MSIs never get sent to
++ * the APIC IRR, and thus do not need an EOI.  The notification handler instead
++ * performs a single EOI after processing the PIR.
+  *
+- * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack()
+- * function. Instead EOI is performed by the posted interrupt notification
+- * handler.
++ * Note!  Pending SMP/CPU affinity changes, which are per MSI, must still be
++ * honored, only the APIC EOI is omitted.
+  *
+  * For the example below, 3 MSIs are coalesced into one CPU notification. Only
+- * one apic_eoi() is needed.
++ * one apic_eoi() is needed, but each MSI needs to process pending changes to
++ * its CPU affinity.
+  *
+  * __sysvec_posted_msi_notification()
+  *	irq_enter();
+  *		handle_edge_irq()
+  *			irq_chip_ack_parent()
+- *				dummy(); // No EOI
++ *				irq_move_irq(); // No EOI
+  *			handle_irq_event()
+  *				driver_handler()
+  *		handle_edge_irq()
+  *			irq_chip_ack_parent()
+- *				dummy(); // No EOI
++ *				irq_move_irq(); // No EOI
+  *			handle_irq_event()
+  *				driver_handler()
+  *		handle_edge_irq()
+  *			irq_chip_ack_parent()
+- *				dummy(); // No EOI
++ *				irq_move_irq(); // No EOI
+  *			handle_irq_event()
+  *				driver_handler()
+  *	apic_eoi()
+  *	irq_exit()
++ *
+  */
+-
+-static void dummy_ack(struct irq_data *d) { }
+-
+ static struct irq_chip intel_ir_chip_post_msi = {
+ 	.name			= "INTEL-IR-POST",
+-	.irq_ack		= dummy_ack,
++	.irq_ack		= irq_move_irq,
+ 	.irq_set_affinity	= intel_ir_set_affinity,
+ 	.irq_compose_msi_msg	= intel_ir_compose_msi_msg,
+ 	.irq_set_vcpu_affinity	= intel_ir_set_vcpu_affinity,
+@@ -1494,6 +1504,9 @@ static void intel_irq_remapping_deactivate(struct irq_domain *domain,
+ 	struct intel_ir_data *data = irq_data->chip_data;
+ 	struct irte entry;
+ 
++	WARN_ON_ONCE(data->irq_2_iommu.posted_vcpu);
++	data->irq_2_iommu.posted_vcpu = false;
++
+ 	memset(&entry, 0, sizeof(entry));
+ 	modify_irte(&data->irq_2_iommu, &entry);
+ }
+diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
+index 5fd3dd42029015..3fd8920e79ffb9 100644
+--- a/drivers/iommu/iommufd/device.c
++++ b/drivers/iommu/iommufd/device.c
+@@ -352,6 +352,122 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
+ 	return 0;
+ }
+ 
++/* The device attach/detach/replace helpers for attach_handle */
++
++/* Check if idev is attached to igroup->hwpt */
++static bool iommufd_device_is_attached(struct iommufd_device *idev)
++{
++	struct iommufd_device *cur;
++
++	list_for_each_entry(cur, &idev->igroup->device_list, group_item)
++		if (cur == idev)
++			return true;
++	return false;
++}
++
++static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
++				      struct iommufd_device *idev)
++{
++	struct iommufd_attach_handle *handle;
++	int rc;
++
++	lockdep_assert_held(&idev->igroup->lock);
++
++	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
++	if (!handle)
++		return -ENOMEM;
++
++	if (hwpt->fault) {
++		rc = iommufd_fault_iopf_enable(idev);
++		if (rc)
++			goto out_free_handle;
++	}
++
++	handle->idev = idev;
++	rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
++				       &handle->handle);
++	if (rc)
++		goto out_disable_iopf;
++
++	return 0;
++
++out_disable_iopf:
++	if (hwpt->fault)
++		iommufd_fault_iopf_disable(idev);
++out_free_handle:
++	kfree(handle);
++	return rc;
++}
++
++static struct iommufd_attach_handle *
++iommufd_device_get_attach_handle(struct iommufd_device *idev)
++{
++	struct iommu_attach_handle *handle;
++
++	lockdep_assert_held(&idev->igroup->lock);
++
++	handle =
++		iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
++	if (IS_ERR(handle))
++		return NULL;
++	return to_iommufd_handle(handle);
++}
++
++static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
++				       struct iommufd_device *idev)
++{
++	struct iommufd_attach_handle *handle;
++
++	handle = iommufd_device_get_attach_handle(idev);
++	iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
++	if (hwpt->fault) {
++		iommufd_auto_response_faults(hwpt, handle);
++		iommufd_fault_iopf_disable(idev);
++	}
++	kfree(handle);
++}
++
++static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
++				       struct iommufd_hw_pagetable *hwpt,
++				       struct iommufd_hw_pagetable *old)
++{
++	struct iommufd_attach_handle *handle, *old_handle =
++		iommufd_device_get_attach_handle(idev);
++	int rc;
++
++	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
++	if (!handle)
++		return -ENOMEM;
++
++	if (hwpt->fault && !old->fault) {
++		rc = iommufd_fault_iopf_enable(idev);
++		if (rc)
++			goto out_free_handle;
++	}
++
++	handle->idev = idev;
++	rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain,
++					&handle->handle);
++	if (rc)
++		goto out_disable_iopf;
++
++	if (old->fault) {
++		iommufd_auto_response_faults(hwpt, old_handle);
++		if (!hwpt->fault)
++			iommufd_fault_iopf_disable(idev);
++	}
++	kfree(old_handle);
++
++	return 0;
++
++out_disable_iopf:
++	if (hwpt->fault && !old->fault)
++		iommufd_fault_iopf_disable(idev);
++out_free_handle:
++	kfree(handle);
++	return rc;
++}
++
+ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
+ 				struct iommufd_device *idev)
+ {
+@@ -488,6 +604,11 @@ iommufd_device_do_replace(struct iommufd_device *idev,
+ 		goto err_unlock;
+ 	}
+ 
++	if (!iommufd_device_is_attached(idev)) {
++		rc = -EINVAL;
++		goto err_unlock;
++	}
++
+ 	if (hwpt == igroup->hwpt) {
+ 		mutex_unlock(&idev->igroup->lock);
+ 		return NULL;
+@@ -1127,7 +1248,7 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ 	struct io_pagetable *iopt;
+ 	struct iopt_area *area;
+ 	unsigned long last_iova;
+-	int rc;
++	int rc = -EINVAL;
+ 
+ 	if (!length)
+ 		return -EINVAL;
+diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
+index 95e2e99ab27241..1b0812f8bf840a 100644
+--- a/drivers/iommu/iommufd/fault.c
++++ b/drivers/iommu/iommufd/fault.c
+@@ -16,7 +16,7 @@
+ #include "../iommu-priv.h"
+ #include "iommufd_private.h"
+ 
+-static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
++int iommufd_fault_iopf_enable(struct iommufd_device *idev)
+ {
+ 	struct device *dev = idev->dev;
+ 	int ret;
+@@ -45,7 +45,7 @@ static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
+ 	return ret;
+ }
+ 
+-static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
++void iommufd_fault_iopf_disable(struct iommufd_device *idev)
+ {
+ 	mutex_lock(&idev->iopf_lock);
+ 	if (!WARN_ON(idev->iopf_enabled == 0)) {
+@@ -93,8 +93,8 @@ int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
+ 	return ret;
+ }
+ 
+-static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
+-					 struct iommufd_attach_handle *handle)
++void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
++				  struct iommufd_attach_handle *handle)
+ {
+ 	struct iommufd_fault *fault = hwpt->fault;
+ 	struct iopf_group *group, *next;
+diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
+index c1f82cb6824256..18cdf1391a0348 100644
+--- a/drivers/iommu/iommufd/iommufd_private.h
++++ b/drivers/iommu/iommufd/iommufd_private.h
+@@ -523,35 +523,10 @@ int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
+ 				     struct iommufd_hw_pagetable *hwpt,
+ 				     struct iommufd_hw_pagetable *old);
+ 
+-static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
+-					     struct iommufd_device *idev)
+-{
+-	if (hwpt->fault)
+-		return iommufd_fault_domain_attach_dev(hwpt, idev);
+-
+-	return iommu_attach_group(hwpt->domain, idev->igroup->group);
+-}
+-
+-static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
+-					      struct iommufd_device *idev)
+-{
+-	if (hwpt->fault) {
+-		iommufd_fault_domain_detach_dev(hwpt, idev);
+-		return;
+-	}
+-
+-	iommu_detach_group(hwpt->domain, idev->igroup->group);
+-}
+-
+-static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
+-					      struct iommufd_hw_pagetable *hwpt,
+-					      struct iommufd_hw_pagetable *old)
+-{
+-	if (old->fault || hwpt->fault)
+-		return iommufd_fault_domain_replace_dev(idev, hwpt, old);
+-
+-	return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
+-}
++int iommufd_fault_iopf_enable(struct iommufd_device *idev);
++void iommufd_fault_iopf_disable(struct iommufd_device *idev);
++void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
++				  struct iommufd_attach_handle *handle);
+ 
+ #ifdef CONFIG_IOMMUFD_TEST
+ int iommufd_test(struct iommufd_ucmd *ucmd);
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 6a2707fe7a78c0..32deab732209ec 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -1371,15 +1371,6 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, data);
+ 	mutex_init(&data->mutex);
+ 
+-	ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
+-				     "mtk-iommu.%pa", &ioaddr);
+-	if (ret)
+-		goto out_link_remove;
+-
+-	ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
+-	if (ret)
+-		goto out_sysfs_remove;
+-
+ 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) {
+ 		list_add_tail(&data->list, data->plat_data->hw_list);
+ 		data->hw_list = data->plat_data->hw_list;
+@@ -1389,19 +1380,28 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ 		data->hw_list = &data->hw_list_head;
+ 	}
+ 
++	ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
++				     "mtk-iommu.%pa", &ioaddr);
++	if (ret)
++		goto out_list_del;
++
++	ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
++	if (ret)
++		goto out_sysfs_remove;
++
+ 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+ 		ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ 		if (ret)
+-			goto out_list_del;
++			goto out_device_unregister;
+ 	}
+ 	return ret;
+ 
+-out_list_del:
+-	list_del(&data->list);
++out_device_unregister:
+ 	iommu_device_unregister(&data->iommu);
+ out_sysfs_remove:
+ 	iommu_device_sysfs_remove(&data->iommu);
+-out_link_remove:
++out_list_del:
++	list_del(&data->list);
+ 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
+ 		device_link_remove(data->smicomm_dev, dev);
+ out_runtime_disable:
+diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
+index f3c9ef2bfa572f..5d8e27e2e7ae71 100644
+--- a/drivers/leds/rgb/leds-qcom-lpg.c
++++ b/drivers/leds/rgb/leds-qcom-lpg.c
+@@ -461,7 +461,7 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
+ 		max_res = LPG_RESOLUTION_9BIT;
+ 	}
+ 
+-	min_period = div64_u64((u64)NSEC_PER_SEC * (1 << pwm_resolution_arr[0]),
++	min_period = div64_u64((u64)NSEC_PER_SEC * ((1 << pwm_resolution_arr[0]) - 1),
+ 			       clk_rate_arr[clk_len - 1]);
+ 	if (period <= min_period)
+ 		return -EINVAL;
+@@ -482,7 +482,7 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
+ 	 */
+ 
+ 	for (i = 0; i < pwm_resolution_count; i++) {
+-		resolution = 1 << pwm_resolution_arr[i];
++		resolution = (1 << pwm_resolution_arr[i]) - 1;
+ 		for (clk_sel = 1; clk_sel < clk_len; clk_sel++) {
+ 			u64 numerator = period * clk_rate_arr[clk_sel];
+ 
+@@ -529,7 +529,7 @@ static void lpg_calc_duty(struct lpg_channel *chan, uint64_t duty)
+ 	unsigned int clk_rate;
+ 
+ 	if (chan->subtype == LPG_SUBTYPE_HI_RES_PWM) {
+-		max = LPG_RESOLUTION_15BIT - 1;
++		max = BIT(lpg_pwm_resolution_hi_res[chan->pwm_resolution_sel]) - 1;
+ 		clk_rate = lpg_clk_rates_hi_res[chan->clk_sel];
+ 	} else {
+ 		max = LPG_RESOLUTION_9BIT - 1;
+@@ -1291,7 +1291,7 @@ static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		if (ret)
+ 			return ret;
+ 
+-		state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * (1 << resolution) *
++		state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * ((1 << resolution) - 1) *
+ 						 pre_div * (1 << m), refclk);
+ 		state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pwm_value * pre_div * (1 << m), refclk);
+ 	} else {
+diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
+index 46c921000a34cf..76f54f8b6b6c5e 100644
+--- a/drivers/mailbox/tegra-hsp.c
++++ b/drivers/mailbox/tegra-hsp.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2016-2023, NVIDIA CORPORATION.  All rights reserved.
++ * Copyright (c) 2016-2025, NVIDIA CORPORATION.  All rights reserved.
+  */
+ 
+ #include <linux/delay.h>
+@@ -28,12 +28,6 @@
+ #define HSP_INT_FULL_MASK	0xff
+ 
+ #define HSP_INT_DIMENSIONING	0x380
+-#define HSP_nSM_SHIFT		0
+-#define HSP_nSS_SHIFT		4
+-#define HSP_nAS_SHIFT		8
+-#define HSP_nDB_SHIFT		12
+-#define HSP_nSI_SHIFT		16
+-#define HSP_nINT_MASK		0xf
+ 
+ #define HSP_DB_TRIGGER	0x0
+ #define HSP_DB_ENABLE	0x4
+@@ -97,6 +91,20 @@ struct tegra_hsp_soc {
+ 	bool has_per_mb_ie;
+ 	bool has_128_bit_mb;
+ 	unsigned int reg_stride;
++
++	/* Shifts for dimensioning register. */
++	unsigned int si_shift;
++	unsigned int db_shift;
++	unsigned int as_shift;
++	unsigned int ss_shift;
++	unsigned int sm_shift;
++
++	/* Masks for dimensioning register. */
++	unsigned int si_mask;
++	unsigned int db_mask;
++	unsigned int as_mask;
++	unsigned int ss_mask;
++	unsigned int sm_mask;
+ };
+ 
+ struct tegra_hsp {
+@@ -747,11 +755,11 @@ static int tegra_hsp_probe(struct platform_device *pdev)
+ 		return PTR_ERR(hsp->regs);
+ 
+ 	value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
+-	hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
+-	hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
+-	hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
+-	hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+-	hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
++	hsp->num_sm = (value >> hsp->soc->sm_shift) & hsp->soc->sm_mask;
++	hsp->num_ss = (value >> hsp->soc->ss_shift) & hsp->soc->ss_mask;
++	hsp->num_as = (value >> hsp->soc->as_shift) & hsp->soc->as_mask;
++	hsp->num_db = (value >> hsp->soc->db_shift) & hsp->soc->db_mask;
++	hsp->num_si = (value >> hsp->soc->si_shift) & hsp->soc->si_mask;
+ 
+ 	err = platform_get_irq_byname_optional(pdev, "doorbell");
+ 	if (err >= 0)
+@@ -915,6 +923,16 @@ static const struct tegra_hsp_soc tegra186_hsp_soc = {
+ 	.has_per_mb_ie = false,
+ 	.has_128_bit_mb = false,
+ 	.reg_stride = 0x100,
++	.si_shift = 16,
++	.db_shift = 12,
++	.as_shift = 8,
++	.ss_shift = 4,
++	.sm_shift = 0,
++	.si_mask = 0xf,
++	.db_mask = 0xf,
++	.as_mask = 0xf,
++	.ss_mask = 0xf,
++	.sm_mask = 0xf,
+ };
+ 
+ static const struct tegra_hsp_soc tegra194_hsp_soc = {
+@@ -922,6 +940,16 @@ static const struct tegra_hsp_soc tegra194_hsp_soc = {
+ 	.has_per_mb_ie = true,
+ 	.has_128_bit_mb = false,
+ 	.reg_stride = 0x100,
++	.si_shift = 16,
++	.db_shift = 12,
++	.as_shift = 8,
++	.ss_shift = 4,
++	.sm_shift = 0,
++	.si_mask = 0xf,
++	.db_mask = 0xf,
++	.as_mask = 0xf,
++	.ss_mask = 0xf,
++	.sm_mask = 0xf,
+ };
+ 
+ static const struct tegra_hsp_soc tegra234_hsp_soc = {
+@@ -929,6 +957,16 @@ static const struct tegra_hsp_soc tegra234_hsp_soc = {
+ 	.has_per_mb_ie = false,
+ 	.has_128_bit_mb = true,
+ 	.reg_stride = 0x100,
++	.si_shift = 16,
++	.db_shift = 12,
++	.as_shift = 8,
++	.ss_shift = 4,
++	.sm_shift = 0,
++	.si_mask = 0xf,
++	.db_mask = 0xf,
++	.as_mask = 0xf,
++	.ss_mask = 0xf,
++	.sm_mask = 0xf,
+ };
+ 
+ static const struct tegra_hsp_soc tegra264_hsp_soc = {
+@@ -936,6 +974,16 @@ static const struct tegra_hsp_soc tegra264_hsp_soc = {
+ 	.has_per_mb_ie = false,
+ 	.has_128_bit_mb = true,
+ 	.reg_stride = 0x1000,
++	.si_shift = 17,
++	.db_shift = 12,
++	.as_shift = 8,
++	.ss_shift = 4,
++	.sm_shift = 0,
++	.si_mask = 0x1f,
++	.db_mask = 0x1f,
++	.as_mask = 0xf,
++	.ss_mask = 0xf,
++	.sm_mask = 0xf,
+ };
+ 
+ static const struct of_device_id tegra_hsp_match[] = {
+diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
+index 18ae45dcbfb28b..b19b0142a690a3 100644
+--- a/drivers/md/dm-ebs-target.c
++++ b/drivers/md/dm-ebs-target.c
+@@ -390,6 +390,12 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
+ 	return DM_MAPIO_REMAPPED;
+ }
+ 
++static void ebs_postsuspend(struct dm_target *ti)
++{
++	struct ebs_c *ec = ti->private;
++	dm_bufio_client_reset(ec->bufio);
++}
++
+ static void ebs_status(struct dm_target *ti, status_type_t type,
+ 		       unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+@@ -447,6 +453,7 @@ static struct target_type ebs_target = {
+ 	.ctr		 = ebs_ctr,
+ 	.dtr		 = ebs_dtr,
+ 	.map		 = ebs_map,
++	.postsuspend	 = ebs_postsuspend,
+ 	.status		 = ebs_status,
+ 	.io_hints	 = ebs_io_hints,
+ 	.prepare_ioctl	 = ebs_prepare_ioctl,
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 555dc06b942287..b35b779b170443 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -21,6 +21,7 @@
+ #include <linux/reboot.h>
+ #include <crypto/hash.h>
+ #include <crypto/skcipher.h>
++#include <crypto/utils.h>
+ #include <linux/async_tx.h>
+ #include <linux/dm-bufio.h>
+ 
+@@ -516,7 +517,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
+ 			dm_integrity_io_error(ic, "crypto_shash_digest", r);
+ 			return r;
+ 		}
+-		if (memcmp(mac, actual_mac, mac_size)) {
++		if (crypto_memneq(mac, actual_mac, mac_size)) {
+ 			dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
+ 			dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
+ 			return -EILSEQ;
+@@ -859,7 +860,7 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool
+ 		if (likely(wr))
+ 			memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
+ 		else {
+-			if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
++			if (crypto_memneq(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
+ 				dm_integrity_io_error(ic, "journal mac", -EILSEQ);
+ 				dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
+ 			}
+@@ -1401,10 +1402,9 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
+ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
+ 			       unsigned int *metadata_offset, unsigned int total_size, int op)
+ {
+-#define MAY_BE_FILLER		1
+-#define MAY_BE_HASH		2
+ 	unsigned int hash_offset = 0;
+-	unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
++	unsigned char mismatch_hash = 0;
++	unsigned char mismatch_filler = !ic->discard;
+ 
+ 	do {
+ 		unsigned char *data, *dp;
+@@ -1425,7 +1425,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
+ 		if (op == TAG_READ) {
+ 			memcpy(tag, dp, to_copy);
+ 		} else if (op == TAG_WRITE) {
+-			if (memcmp(dp, tag, to_copy)) {
++			if (crypto_memneq(dp, tag, to_copy)) {
+ 				memcpy(dp, tag, to_copy);
+ 				dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
+ 			}
+@@ -1433,29 +1433,30 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
+ 			/* e.g.: op == TAG_CMP */
+ 
+ 			if (likely(is_power_of_2(ic->tag_size))) {
+-				if (unlikely(memcmp(dp, tag, to_copy)))
+-					if (unlikely(!ic->discard) ||
+-					    unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
+-						goto thorough_test;
+-				}
++				if (unlikely(crypto_memneq(dp, tag, to_copy)))
++					goto thorough_test;
+ 			} else {
+ 				unsigned int i, ts;
+ thorough_test:
+ 				ts = total_size;
+ 
+ 				for (i = 0; i < to_copy; i++, ts--) {
+-					if (unlikely(dp[i] != tag[i]))
+-						may_be &= ~MAY_BE_HASH;
+-					if (likely(dp[i] != DISCARD_FILLER))
+-						may_be &= ~MAY_BE_FILLER;
++					/*
++					 * Warning: the control flow must not be
++					 * dependent on match/mismatch of
++					 * individual bytes.
++					 */
++					mismatch_hash |= dp[i] ^ tag[i];
++					mismatch_filler |= dp[i] ^ DISCARD_FILLER;
+ 					hash_offset++;
+ 					if (unlikely(hash_offset == ic->tag_size)) {
+-						if (unlikely(!may_be)) {
++						if (unlikely(mismatch_hash) && unlikely(mismatch_filler)) {
+ 							dm_bufio_release(b);
+ 							return ts;
+ 						}
+ 						hash_offset = 0;
+-						may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
++						mismatch_hash = 0;
++						mismatch_filler = !ic->discard;
+ 					}
+ 				}
+ 			}
+@@ -1476,8 +1477,6 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
+ 	} while (unlikely(total_size));
+ 
+ 	return 0;
+-#undef MAY_BE_FILLER
+-#undef MAY_BE_HASH
+ }
+ 
+ struct flush_request {
+@@ -2076,7 +2075,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
+ 					char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ 
+ 					integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
+-					if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
++					if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
+ 						DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
+ 							    logical_sector);
+ 						dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
+@@ -2595,7 +2594,7 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
+ 		bio_put(outgoing_bio);
+ 
+ 		integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
+-		if (unlikely(memcmp(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
++		if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ 			DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ 				ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
+ 			atomic64_inc(&ic->number_of_mismatches);
+@@ -2634,7 +2633,7 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
+ 				char *mem = bvec_kmap_local(&bv);
+ 				//memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT);
+ 				integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
+-				if (unlikely(memcmp(digest, dio->integrity_payload + pos,
++				if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
+ 						min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ 					kunmap_local(mem);
+ 					dm_integrity_free_payload(dio);
+@@ -2911,7 +2910,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
+ 
+ 					integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
+ 								  (char *)access_journal_data(ic, i, l), test_tag);
+-					if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
++					if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
+ 						dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
+ 						dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
+ 					}
+@@ -5081,16 +5080,19 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
+ 
+ 		ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ 		if (!ic->recalc_bitmap) {
++			ti->error = "Could not allocate memory for bitmap";
+ 			r = -ENOMEM;
+ 			goto bad;
+ 		}
+ 		ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ 		if (!ic->may_write_bitmap) {
++			ti->error = "Could not allocate memory for bitmap";
+ 			r = -ENOMEM;
+ 			goto bad;
+ 		}
+ 		ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
+ 		if (!ic->bbs) {
++			ti->error = "Could not allocate memory for bitmap";
+ 			r = -ENOMEM;
+ 			goto bad;
+ 		}
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index c142ec5458b70f..53ba0fbdf495c8 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -796,6 +796,13 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+ 	return DM_MAPIO_SUBMITTED;
+ }
+ 
++static void verity_postsuspend(struct dm_target *ti)
++{
++	struct dm_verity *v = ti->private;
++	flush_workqueue(v->verify_wq);
++	dm_bufio_client_reset(v->bufio);
++}
++
+ /*
+  * Status: V (valid) or C (corruption found)
+  */
+@@ -1766,6 +1773,7 @@ static struct target_type verity_target = {
+ 	.ctr		= verity_ctr,
+ 	.dtr		= verity_dtr,
+ 	.map		= verity_map,
++	.postsuspend	= verity_postsuspend,
+ 	.status		= verity_status,
+ 	.prepare_ioctl	= verity_prepare_ioctl,
+ 	.iterate_devices = verity_iterate_devices,
+diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
+index 44d8fe8b220e79..9b1a650ed055c9 100644
+--- a/drivers/media/common/siano/smsdvb-main.c
++++ b/drivers/media/common/siano/smsdvb-main.c
+@@ -1243,6 +1243,8 @@ static int __init smsdvb_module_init(void)
+ 	smsdvb_debugfs_register();
+ 
+ 	rc = smscore_register_hotplug(smsdvb_hotplug);
++	if (rc)
++		smsdvb_debugfs_unregister();
+ 
+ 	pr_debug("\n");
+ 
+diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
+index 9bc0121d0eff39..2c1db5968af8e7 100644
+--- a/drivers/media/i2c/adv748x/adv748x.h
++++ b/drivers/media/i2c/adv748x/adv748x.h
+@@ -320,7 +320,7 @@ struct adv748x_state {
+ 
+ /* Free run pattern select */
+ #define ADV748X_SDP_FRP			0x14
+-#define ADV748X_SDP_FRP_MASK		GENMASK(3, 1)
++#define ADV748X_SDP_FRP_MASK		GENMASK(2, 0)
+ 
+ /* Saturation */
+ #define ADV748X_SDP_SD_SAT_U		0xe3	/* user_map_rw_reg_e3 */
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index cb21df46bab169..4b7d8039b1c9fc 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3562,6 +3562,7 @@ static int ccs_probe(struct i2c_client *client)
+ out_disable_runtime_pm:
+ 	pm_runtime_put_noidle(&client->dev);
+ 	pm_runtime_disable(&client->dev);
++	pm_runtime_set_suspended(&client->dev);
+ 
+ out_cleanup:
+ 	ccs_cleanup(sensor);
+@@ -3591,9 +3592,10 @@ static void ccs_remove(struct i2c_client *client)
+ 	v4l2_async_unregister_subdev(subdev);
+ 
+ 	pm_runtime_disable(&client->dev);
+-	if (!pm_runtime_status_suspended(&client->dev))
++	if (!pm_runtime_status_suspended(&client->dev)) {
+ 		ccs_power_off(&client->dev);
+-	pm_runtime_set_suspended(&client->dev);
++		pm_runtime_set_suspended(&client->dev);
++	}
+ 
+ 	for (i = 0; i < sensor->ssds_used; i++)
+ 		v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
+diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
+index f31f9886c924e4..0e89aff9c664da 100644
+--- a/drivers/media/i2c/hi556.c
++++ b/drivers/media/i2c/hi556.c
+@@ -1230,12 +1230,13 @@ static int hi556_check_hwcfg(struct device *dev)
+ 	ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ 	if (ret) {
+ 		dev_err(dev, "can't get clock frequency");
+-		return ret;
++		goto check_hwcfg_error;
+ 	}
+ 
+ 	if (mclk != HI556_MCLK) {
+ 		dev_err(dev, "external clock %d is not supported", mclk);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto check_hwcfg_error;
+ 	}
+ 
+ 	if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
+index 4962cfe7c83d62..6a393e18267f42 100644
+--- a/drivers/media/i2c/imx214.c
++++ b/drivers/media/i2c/imx214.c
+@@ -1075,10 +1075,6 @@ static int imx214_probe(struct i2c_client *client)
+ 	 */
+ 	imx214_power_on(imx214->dev);
+ 
+-	pm_runtime_set_active(imx214->dev);
+-	pm_runtime_enable(imx214->dev);
+-	pm_runtime_idle(imx214->dev);
+-
+ 	ret = imx214_ctrls_init(imx214);
+ 	if (ret < 0)
+ 		goto error_power_off;
+@@ -1099,22 +1095,30 @@ static int imx214_probe(struct i2c_client *client)
+ 
+ 	imx214_entity_init_state(&imx214->sd, NULL);
+ 
++	pm_runtime_set_active(imx214->dev);
++	pm_runtime_enable(imx214->dev);
++
+ 	ret = v4l2_async_register_subdev_sensor(&imx214->sd);
+ 	if (ret < 0) {
+ 		dev_err(dev, "could not register v4l2 device\n");
+ 		goto free_entity;
+ 	}
+ 
++	pm_runtime_idle(imx214->dev);
++
+ 	return 0;
+ 
+ free_entity:
++	pm_runtime_disable(imx214->dev);
++	pm_runtime_set_suspended(&client->dev);
+ 	media_entity_cleanup(&imx214->sd.entity);
++
+ free_ctrl:
+ 	mutex_destroy(&imx214->mutex);
+ 	v4l2_ctrl_handler_free(&imx214->ctrls);
++
+ error_power_off:
+-	pm_runtime_disable(imx214->dev);
+-	regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies);
++	imx214_power_off(imx214->dev);
+ 
+ 	return ret;
+ }
+@@ -1127,11 +1131,12 @@ static void imx214_remove(struct i2c_client *client)
+ 	v4l2_async_unregister_subdev(&imx214->sd);
+ 	media_entity_cleanup(&imx214->sd.entity);
+ 	v4l2_ctrl_handler_free(&imx214->ctrls);
+-
+-	pm_runtime_disable(&client->dev);
+-	pm_runtime_set_suspended(&client->dev);
+-
+ 	mutex_destroy(&imx214->mutex);
++	pm_runtime_disable(&client->dev);
++	if (!pm_runtime_status_suspended(&client->dev)) {
++		imx214_power_off(imx214->dev);
++		pm_runtime_set_suspended(&client->dev);
++	}
+ }
+ 
+ static const struct of_device_id imx214_of_match[] = {
+diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
+index e78a80b2bb2e45..906aa314b7f84c 100644
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -134,10 +134,11 @@
+ 
+ /* Pixel rate is fixed for all the modes */
+ #define IMX219_PIXEL_RATE		182400000
+-#define IMX219_PIXEL_RATE_4LANE		280800000
++#define IMX219_PIXEL_RATE_4LANE		281600000
+ 
+ #define IMX219_DEFAULT_LINK_FREQ	456000000
+-#define IMX219_DEFAULT_LINK_FREQ_4LANE	363000000
++#define IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED	363000000
++#define IMX219_DEFAULT_LINK_FREQ_4LANE	364000000
+ 
+ /* IMX219 native and active pixel array size. */
+ #define IMX219_NATIVE_WIDTH		3296U
+@@ -169,15 +170,6 @@ static const struct cci_reg_sequence imx219_common_regs[] = {
+ 	{ CCI_REG8(0x30eb), 0x05 },
+ 	{ CCI_REG8(0x30eb), 0x09 },
+ 
+-	/* PLL Clock Table */
+-	{ IMX219_REG_VTPXCK_DIV, 5 },
+-	{ IMX219_REG_VTSYCK_DIV, 1 },
+-	{ IMX219_REG_PREPLLCK_VT_DIV, 3 },	/* 0x03 = AUTO set */
+-	{ IMX219_REG_PREPLLCK_OP_DIV, 3 },	/* 0x03 = AUTO set */
+-	{ IMX219_REG_PLL_VT_MPY, 57 },
+-	{ IMX219_REG_OPSYCK_DIV, 1 },
+-	{ IMX219_REG_PLL_OP_MPY, 114 },
+-
+ 	/* Undocumented registers */
+ 	{ CCI_REG8(0x455e), 0x00 },
+ 	{ CCI_REG8(0x471e), 0x4b },
+@@ -202,12 +194,45 @@ static const struct cci_reg_sequence imx219_common_regs[] = {
+ 	{ IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) },
+ };
+ 
++static const struct cci_reg_sequence imx219_2lane_regs[] = {
++	/* PLL Clock Table */
++	{ IMX219_REG_VTPXCK_DIV, 5 },
++	{ IMX219_REG_VTSYCK_DIV, 1 },
++	{ IMX219_REG_PREPLLCK_VT_DIV, 3 },	/* 0x03 = AUTO set */
++	{ IMX219_REG_PREPLLCK_OP_DIV, 3 },	/* 0x03 = AUTO set */
++	{ IMX219_REG_PLL_VT_MPY, 57 },
++	{ IMX219_REG_OPSYCK_DIV, 1 },
++	{ IMX219_REG_PLL_OP_MPY, 114 },
++
++	/* 2-Lane CSI Mode */
++	{ IMX219_REG_CSI_LANE_MODE, IMX219_CSI_2_LANE_MODE },
++};
++
++static const struct cci_reg_sequence imx219_4lane_regs[] = {
++	/* PLL Clock Table */
++	{ IMX219_REG_VTPXCK_DIV, 5 },
++	{ IMX219_REG_VTSYCK_DIV, 1 },
++	{ IMX219_REG_PREPLLCK_VT_DIV, 3 },	/* 0x03 = AUTO set */
++	{ IMX219_REG_PREPLLCK_OP_DIV, 3 },	/* 0x03 = AUTO set */
++	{ IMX219_REG_PLL_VT_MPY, 88 },
++	{ IMX219_REG_OPSYCK_DIV, 1 },
++	{ IMX219_REG_PLL_OP_MPY, 91 },
++
++	/* 4-Lane CSI Mode */
++	{ IMX219_REG_CSI_LANE_MODE, IMX219_CSI_4_LANE_MODE },
++};
++
+ static const s64 imx219_link_freq_menu[] = {
+ 	IMX219_DEFAULT_LINK_FREQ,
+ };
+ 
+ static const s64 imx219_link_freq_4lane_menu[] = {
+ 	IMX219_DEFAULT_LINK_FREQ_4LANE,
++	/*
++	 * This will never be advertised to userspace, but will be used for
++	 * v4l2_link_freq_to_bitmap
++	 */
++	IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED,
+ };
+ 
+ static const char * const imx219_test_pattern_menu[] = {
+@@ -663,9 +688,11 @@ static int imx219_set_framefmt(struct imx219 *imx219,
+ 
+ static int imx219_configure_lanes(struct imx219 *imx219)
+ {
+-	return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE,
+-			 imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE :
+-			 IMX219_CSI_4_LANE_MODE, NULL);
++	/* Write the appropriate PLL settings for the number of MIPI lanes */
++	return cci_multi_reg_write(imx219->regmap,
++				  imx219->lanes == 2 ? imx219_2lane_regs : imx219_4lane_regs,
++				  imx219->lanes == 2 ? ARRAY_SIZE(imx219_2lane_regs) :
++				  ARRAY_SIZE(imx219_4lane_regs), NULL);
+ };
+ 
+ static int imx219_start_streaming(struct imx219 *imx219,
+@@ -1042,6 +1069,7 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
+ 	struct v4l2_fwnode_endpoint ep_cfg = {
+ 		.bus_type = V4L2_MBUS_CSI2_DPHY
+ 	};
++	unsigned long link_freq_bitmap;
+ 	int ret = -EINVAL;
+ 
+ 	endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+@@ -1063,23 +1091,40 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
+ 	imx219->lanes = ep_cfg.bus.mipi_csi2.num_data_lanes;
+ 
+ 	/* Check the link frequency set in device tree */
+-	if (!ep_cfg.nr_of_link_frequencies) {
+-		dev_err_probe(dev, -EINVAL,
+-			      "link-frequency property not found in DT\n");
+-		goto error_out;
++	switch (imx219->lanes) {
++	case 2:
++		ret = v4l2_link_freq_to_bitmap(dev,
++					       ep_cfg.link_frequencies,
++					       ep_cfg.nr_of_link_frequencies,
++					       imx219_link_freq_menu,
++					       ARRAY_SIZE(imx219_link_freq_menu),
++					       &link_freq_bitmap);
++		break;
++	case 4:
++		ret = v4l2_link_freq_to_bitmap(dev,
++					       ep_cfg.link_frequencies,
++					       ep_cfg.nr_of_link_frequencies,
++					       imx219_link_freq_4lane_menu,
++					       ARRAY_SIZE(imx219_link_freq_4lane_menu),
++					       &link_freq_bitmap);
++
++		if (!ret && (link_freq_bitmap & BIT(1))) {
++			dev_warn(dev, "Link frequency of %d not supported, but has been incorrectly advertised previously\n",
++				 IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED);
++			dev_warn(dev, "Using link frequency of %d\n",
++				 IMX219_DEFAULT_LINK_FREQ_4LANE);
++			link_freq_bitmap |= BIT(0);
++		}
++		break;
+ 	}
+ 
+-	if (ep_cfg.nr_of_link_frequencies != 1 ||
+-	   (ep_cfg.link_frequencies[0] != ((imx219->lanes == 2) ?
+-	    IMX219_DEFAULT_LINK_FREQ : IMX219_DEFAULT_LINK_FREQ_4LANE))) {
++	if (ret || !(link_freq_bitmap & BIT(0))) {
++		ret = -EINVAL;
+ 		dev_err_probe(dev, -EINVAL,
+ 			      "Link frequency not supported: %lld\n",
+ 			      ep_cfg.link_frequencies[0]);
+-		goto error_out;
+ 	}
+ 
+-	ret = 0;
+-
+ error_out:
+ 	v4l2_fwnode_endpoint_free(&ep_cfg);
+ 	fwnode_handle_put(endpoint);
+@@ -1186,6 +1231,9 @@ static int imx219_probe(struct i2c_client *client)
+ 		goto error_media_entity;
+ 	}
+ 
++	pm_runtime_set_active(dev);
++	pm_runtime_enable(dev);
++
+ 	ret = v4l2_async_register_subdev_sensor(&imx219->sd);
+ 	if (ret < 0) {
+ 		dev_err_probe(dev, ret,
+@@ -1193,15 +1241,14 @@ static int imx219_probe(struct i2c_client *client)
+ 		goto error_subdev_cleanup;
+ 	}
+ 
+-	/* Enable runtime PM and turn off the device */
+-	pm_runtime_set_active(dev);
+-	pm_runtime_enable(dev);
+ 	pm_runtime_idle(dev);
+ 
+ 	return 0;
+ 
+ error_subdev_cleanup:
+ 	v4l2_subdev_cleanup(&imx219->sd);
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
+ 
+ error_media_entity:
+ 	media_entity_cleanup(&imx219->sd.entity);
+@@ -1226,9 +1273,10 @@ static void imx219_remove(struct i2c_client *client)
+ 	imx219_free_controls(imx219);
+ 
+ 	pm_runtime_disable(&client->dev);
+-	if (!pm_runtime_status_suspended(&client->dev))
++	if (!pm_runtime_status_suspended(&client->dev)) {
+ 		imx219_power_off(&client->dev);
+-	pm_runtime_set_suspended(&client->dev);
++		pm_runtime_set_suspended(&client->dev);
++	}
+ }
+ 
+ static const struct of_device_id imx219_dt_ids[] = {
+diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
+index dd1b4ff983dcb1..701840f4a5cc00 100644
+--- a/drivers/media/i2c/imx319.c
++++ b/drivers/media/i2c/imx319.c
+@@ -2442,17 +2442,19 @@ static int imx319_probe(struct i2c_client *client)
+ 	if (full_power)
+ 		pm_runtime_set_active(&client->dev);
+ 	pm_runtime_enable(&client->dev);
+-	pm_runtime_idle(&client->dev);
+ 
+ 	ret = v4l2_async_register_subdev_sensor(&imx319->sd);
+ 	if (ret < 0)
+ 		goto error_media_entity_pm;
+ 
++	pm_runtime_idle(&client->dev);
++
+ 	return 0;
+ 
+ error_media_entity_pm:
+ 	pm_runtime_disable(&client->dev);
+-	pm_runtime_set_suspended(&client->dev);
++	if (full_power)
++		pm_runtime_set_suspended(&client->dev);
+ 	media_entity_cleanup(&imx319->sd.entity);
+ 
+ error_handler_free:
+@@ -2474,7 +2476,8 @@ static void imx319_remove(struct i2c_client *client)
+ 	v4l2_ctrl_handler_free(sd->ctrl_handler);
+ 
+ 	pm_runtime_disable(&client->dev);
+-	pm_runtime_set_suspended(&client->dev);
++	if (!pm_runtime_status_suspended(&client->dev))
++		pm_runtime_set_suspended(&client->dev);
+ 
+ 	mutex_destroy(&imx319->mutex);
+ }
+diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
+index 30f61e04ecaf51..3226888d77e9c7 100644
+--- a/drivers/media/i2c/ov7251.c
++++ b/drivers/media/i2c/ov7251.c
+@@ -922,6 +922,8 @@ static int ov7251_set_power_on(struct device *dev)
+ 		return ret;
+ 	}
+ 
++	usleep_range(1000, 1100);
++
+ 	gpiod_set_value_cansleep(ov7251->enable_gpio, 1);
+ 
+ 	/* wait at least 65536 external clock cycles */
+@@ -1696,7 +1698,7 @@ static int ov7251_probe(struct i2c_client *client)
+ 		return PTR_ERR(ov7251->analog_regulator);
+ 	}
+ 
+-	ov7251->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
++	ov7251->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ 	if (IS_ERR(ov7251->enable_gpio)) {
+ 		dev_err(dev, "cannot get enable gpio\n");
+ 		return PTR_ERR(ov7251->enable_gpio);
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+index b37561352ead38..48388c0c851ca1 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+@@ -1296,6 +1296,7 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av)
+ 	av->vdev.release = video_device_release_empty;
+ 	av->vdev.fops = &isys_fops;
+ 	av->vdev.v4l2_dev = &av->isys->v4l2_dev;
++	av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev;
+ 	if (!av->vdev.ioctl_ops)
+ 		av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops;
+ 	av->vdev.queue = &av->aq.vbq;
+diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c
+index a25b68403bc608..c22ef51436ed5d 100644
+--- a/drivers/media/pci/mgb4/mgb4_cmt.c
++++ b/drivers/media/pci/mgb4/mgb4_cmt.c
+@@ -135,8 +135,8 @@ static const u16 cmt_vals_out[][15] = {
+ };
+ 
+ static const u16 cmt_vals_in[][13] = {
+-	{0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100},
+ 	{0x1104, 0x0000, 0x9208, 0x0000, 0x138E, 0x0000, 0x1041, 0x015E, 0x7C01, 0xFFE9, 0x0100, 0x0908, 0x1000},
++	{0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100},
+ };
+ 
+ static const u32 cmt_addrs_out[][15] = {
+@@ -206,10 +206,11 @@ u32 mgb4_cmt_set_vout_freq(struct mgb4_vout_dev *voutdev, unsigned int freq)
+ 
+ 	mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+ 
++	mgb4_mask_reg(video, regs->config, 0x100, 0x100);
++
+ 	for (i = 0; i < ARRAY_SIZE(cmt_addrs_out[0]); i++)
+ 		mgb4_write_reg(&voutdev->mgbdev->cmt, addr[i], reg_set[i]);
+ 
+-	mgb4_mask_reg(video, regs->config, 0x100, 0x100);
+ 	mgb4_mask_reg(video, regs->config, 0x100, 0x0);
+ 
+ 	mgb4_write_reg(video, regs->config, config & ~0x1);
+@@ -236,10 +237,11 @@ void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev,
+ 
+ 	mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+ 
++	mgb4_mask_reg(video, regs->config, 0x1000, 0x1000);
++
+ 	for (i = 0; i < ARRAY_SIZE(cmt_addrs_in[0]); i++)
+ 		mgb4_write_reg(&vindev->mgbdev->cmt, addr[i], reg_set[i]);
+ 
+-	mgb4_mask_reg(video, regs->config, 0x1000, 0x1000);
+ 	mgb4_mask_reg(video, regs->config, 0x1000, 0x0);
+ 
+ 	mgb4_write_reg(video, regs->config, config & ~0x1);
+diff --git a/drivers/media/platform/chips-media/wave5/wave5-hw.c b/drivers/media/platform/chips-media/wave5/wave5-hw.c
+index c89aafabc74213..710311d8511386 100644
+--- a/drivers/media/platform/chips-media/wave5/wave5-hw.c
++++ b/drivers/media/platform/chips-media/wave5/wave5-hw.c
+@@ -576,7 +576,7 @@ int wave5_vpu_build_up_dec_param(struct vpu_instance *inst,
+ 		vpu_write_reg(inst->dev, W5_CMD_NUM_CQ_DEPTH_M1,
+ 			      WAVE521_COMMAND_QUEUE_DEPTH - 1);
+ 	}
+-
++	vpu_write_reg(inst->dev, W5_CMD_ERR_CONCEAL, 0);
+ 	ret = send_firmware_command(inst, W5_CREATE_INSTANCE, true, NULL, NULL);
+ 	if (ret) {
+ 		wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
+diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+index 0c5c9a8de91faa..e238447c88bbf3 100644
+--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
++++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+@@ -1424,10 +1424,24 @@ static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count
+ 		if (ret)
+ 			goto free_bitstream_vbuf;
+ 	} else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
++		struct dec_initial_info *initial_info =
++			&inst->codec_info->dec_info.initial_info;
++
+ 		if (inst->state == VPU_INST_STATE_STOP)
+ 			ret = switch_state(inst, VPU_INST_STATE_INIT_SEQ);
+ 		if (ret)
+ 			goto return_buffers;
++
++		if (inst->state == VPU_INST_STATE_INIT_SEQ &&
++		    inst->dev->product_code == WAVE521C_CODE) {
++			if (initial_info->luma_bitdepth != 8) {
++				dev_info(inst->dev->dev, "%s: no support for %d bit depth",
++					 __func__, initial_info->luma_bitdepth);
++				ret = -EINVAL;
++				goto return_buffers;
++			}
++		}
++
+ 	}
+ 
+ 	return ret;
+@@ -1446,6 +1460,16 @@ static int streamoff_output(struct vb2_queue *q)
+ 	struct vb2_v4l2_buffer *buf;
+ 	int ret;
+ 	dma_addr_t new_rd_ptr;
++	struct dec_output_info dec_info;
++	unsigned int i;
++
++	for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
++		ret = wave5_vpu_dec_set_disp_flag(inst, i);
++		if (ret)
++			dev_dbg(inst->dev->dev,
++				"%s: Setting display flag of buf index: %u, fail: %d\n",
++				__func__, i, ret);
++	}
+ 
+ 	while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx))) {
+ 		dev_dbg(inst->dev->dev, "%s: (Multiplanar) buf type %4u | index %4u\n",
+@@ -1453,6 +1477,11 @@ static int streamoff_output(struct vb2_queue *q)
+ 		v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ 	}
+ 
++	while (wave5_vpu_dec_get_output_info(inst, &dec_info) == 0) {
++		if (dec_info.index_frame_display >= 0)
++			wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display);
++	}
++
+ 	ret = wave5_vpu_flush_instance(inst);
+ 	if (ret)
+ 		return ret;
+@@ -1535,7 +1564,7 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
+ 			break;
+ 
+ 		if (wave5_vpu_dec_get_output_info(inst, &dec_output_info))
+-			dev_dbg(inst->dev->dev, "Getting decoding results from fw, fail\n");
++			dev_dbg(inst->dev->dev, "there is no output info\n");
+ 	}
+ 
+ 	v4l2_m2m_update_stop_streaming_state(m2m_ctx, q);
+diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+index 7273254ecb0349..b13c5cd46d7e48 100644
+--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
++++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+@@ -54,12 +54,12 @@ static void wave5_vpu_handle_irq(void *dev_id)
+ 	struct vpu_device *dev = dev_id;
+ 
+ 	irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON);
++	seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
++	cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
+ 	wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason);
+ 	wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1);
+ 
+ 	list_for_each_entry(inst, &dev->instances, list) {
+-		seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
+-		cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
+ 
+ 		if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) ||
+ 		    irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) {
+diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
+index 1a3efb638dde5a..65fdabcd9d2921 100644
+--- a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
++++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
+@@ -73,6 +73,16 @@ int wave5_vpu_flush_instance(struct vpu_instance *inst)
+ 				 inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id);
+ 			mutex_unlock(&inst->dev->hw_lock);
+ 			return -ETIMEDOUT;
++		} else if (ret == -EBUSY) {
++			struct dec_output_info dec_info;
++
++			mutex_unlock(&inst->dev->hw_lock);
++			wave5_vpu_dec_get_output_info(inst, &dec_info);
++			ret = mutex_lock_interruptible(&inst->dev->hw_lock);
++			if (ret)
++				return ret;
++			if (dec_info.index_frame_display > 0)
++				wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display);
+ 		}
+ 	} while (ret != 0);
+ 	mutex_unlock(&inst->dev->hw_lock);
+diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+index ff23b225db705a..1b0bc47355c05f 100644
+--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
++++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+@@ -79,8 +79,11 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
+ 	}
+ 
+ 	fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+-	if (!fw)
++	if (!fw) {
++		scp_put(scp);
+ 		return ERR_PTR(-ENOMEM);
++	}
++
+ 	fw->type = SCP;
+ 	fw->ops = &mtk_vcodec_rproc_msg;
+ 	fw->scp = scp;
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
+index eea709d9382091..47c302745c1de9 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
+@@ -1188,7 +1188,8 @@ static int vdec_vp9_slice_setup_lat(struct vdec_vp9_slice_instance *instance,
+ 	return ret;
+ }
+ 
+-static
++/* clang stack usage explodes if this is inlined */
++static noinline_for_stack
+ void vdec_vp9_slice_map_counts_eob_coef(unsigned int i, unsigned int j, unsigned int k,
+ 					struct vdec_vp9_slice_frame_counts *counts,
+ 					struct v4l2_vp9_frame_symbol_counts *counts_helper)
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+index f8145998fcaf78..8522f71fc901d5 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+@@ -594,7 +594,11 @@ static int h264_enc_init(struct mtk_vcodec_enc_ctx *ctx)
+ 
+ 	inst->ctx = ctx;
+ 	inst->vpu_inst.ctx = ctx;
+-	inst->vpu_inst.id = is_ext ? SCP_IPI_VENC_H264 : IPI_VENC_H264;
++	if (is_ext)
++		inst->vpu_inst.id = SCP_IPI_VENC_H264;
++	else
++		inst->vpu_inst.id = IPI_VENC_H264;
++
+ 	inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx->dev->reg_base, VENC_SYS);
+ 
+ 	ret = vpu_enc_init(&inst->vpu_inst);
+diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
+index db454c9d2641f8..e0dee768a3be1d 100644
+--- a/drivers/media/platform/nuvoton/npcm-video.c
++++ b/drivers/media/platform/nuvoton/npcm-video.c
+@@ -1650,8 +1650,8 @@ static int npcm_video_setup_video(struct npcm_video *video)
+ 
+ static int npcm_video_ece_init(struct npcm_video *video)
+ {
++	struct device_node *ece_node __free(device_node) = NULL;
+ 	struct device *dev = video->dev;
+-	struct device_node *ece_node;
+ 	struct platform_device *ece_pdev;
+ 	void __iomem *regs;
+ 
+@@ -1671,7 +1671,7 @@ static int npcm_video_ece_init(struct npcm_video *video)
+ 			dev_err(dev, "Failed to find ECE device\n");
+ 			return -ENODEV;
+ 		}
+-		of_node_put(ece_node);
++		struct device *ece_dev __free(put_device) = &ece_pdev->dev;
+ 
+ 		regs = devm_platform_ioremap_resource(ece_pdev, 0);
+ 		if (IS_ERR(regs)) {
+@@ -1686,7 +1686,7 @@ static int npcm_video_ece_init(struct npcm_video *video)
+ 			return PTR_ERR(video->ece.regmap);
+ 		}
+ 
+-		video->ece.reset = devm_reset_control_get(&ece_pdev->dev, NULL);
++		video->ece.reset = devm_reset_control_get(ece_dev, NULL);
+ 		if (IS_ERR(video->ece.reset)) {
+ 			dev_err(dev, "Failed to get ECE reset control in DTS\n");
+ 			return PTR_ERR(video->ece.reset);
+diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
+index 3df241dc3a118b..1b3db2caa99fe4 100644
+--- a/drivers/media/platform/qcom/venus/hfi_parser.c
++++ b/drivers/media/platform/qcom/venus/hfi_parser.c
+@@ -19,6 +19,8 @@ static void init_codecs(struct venus_core *core)
+ 	struct hfi_plat_caps *caps = core->caps, *cap;
+ 	unsigned long bit;
+ 
++	core->codecs_count = 0;
++
+ 	if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
+ 		return;
+ 
+@@ -62,7 +64,7 @@ fill_buf_mode(struct hfi_plat_caps *cap, const void *data, unsigned int num)
+ 		cap->cap_bufs_mode_dynamic = true;
+ }
+ 
+-static void
++static int
+ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ {
+ 	struct hfi_buffer_alloc_mode_supported *mode = data;
+@@ -70,7 +72,7 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 	u32 *type;
+ 
+ 	if (num_entries > MAX_ALLOC_MODE_ENTRIES)
+-		return;
++		return -EINVAL;
+ 
+ 	type = mode->data;
+ 
+@@ -82,6 +84,8 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 
+ 		type++;
+ 	}
++
++	return sizeof(*mode);
+ }
+ 
+ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
+@@ -96,7 +100,7 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
+ 	cap->num_pl += num;
+ }
+ 
+-static void
++static int
+ parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ {
+ 	struct hfi_profile_level_supported *pl = data;
+@@ -104,12 +108,14 @@ parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 	struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {};
+ 
+ 	if (pl->profile_count > HFI_MAX_PROFILE_COUNT)
+-		return;
++		return -EINVAL;
+ 
+ 	memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel));
+ 
+ 	for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ 		       fill_profile_level, pl_arr, pl->profile_count);
++
++	return pl->profile_count * sizeof(*proflevel) + sizeof(u32);
+ }
+ 
+ static void
+@@ -124,7 +130,7 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
+ 	cap->num_caps += num;
+ }
+ 
+-static void
++static int
+ parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ {
+ 	struct hfi_capabilities *caps = data;
+@@ -133,12 +139,14 @@ parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 	struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {};
+ 
+ 	if (num_caps > MAX_CAP_ENTRIES)
+-		return;
++		return -EINVAL;
+ 
+ 	memcpy(caps_arr, cap, num_caps * sizeof(*cap));
+ 
+ 	for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ 		       fill_caps, caps_arr, num_caps);
++
++	return sizeof(*caps);
+ }
+ 
+ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
+@@ -153,7 +161,7 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
+ 	cap->num_fmts += num_fmts;
+ }
+ 
+-static void
++static int
+ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ {
+ 	struct hfi_uncompressed_format_supported *fmt = data;
+@@ -162,7 +170,8 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 	struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
+ 	u32 entries = fmt->format_entries;
+ 	unsigned int i = 0;
+-	u32 num_planes;
++	u32 num_planes = 0;
++	u32 size;
+ 
+ 	while (entries) {
+ 		num_planes = pinfo->num_planes;
+@@ -172,7 +181,7 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 		i++;
+ 
+ 		if (i >= MAX_FMT_ENTRIES)
+-			return;
++			return -EINVAL;
+ 
+ 		if (pinfo->num_planes > MAX_PLANES)
+ 			break;
+@@ -184,9 +193,13 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 
+ 	for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ 		       fill_raw_fmts, rawfmts, i);
++	size = fmt->format_entries * (sizeof(*constr) * num_planes + 2 * sizeof(u32))
++		+ 2 * sizeof(u32);
++
++	return size;
+ }
+ 
+-static void parse_codecs(struct venus_core *core, void *data)
++static int parse_codecs(struct venus_core *core, void *data)
+ {
+ 	struct hfi_codec_supported *codecs = data;
+ 
+@@ -198,21 +211,27 @@ static void parse_codecs(struct venus_core *core, void *data)
+ 		core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
+ 		core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
+ 	}
++
++	return sizeof(*codecs);
+ }
+ 
+-static void parse_max_sessions(struct venus_core *core, const void *data)
++static int parse_max_sessions(struct venus_core *core, const void *data)
+ {
+ 	const struct hfi_max_sessions_supported *sessions = data;
+ 
+ 	core->max_sessions_supported = sessions->max_sessions;
++
++	return sizeof(*sessions);
+ }
+ 
+-static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
++static int parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
+ {
+ 	struct hfi_codec_mask_supported *mask = data;
+ 
+ 	*codecs = mask->codecs;
+ 	*domain = mask->video_domains;
++
++	return sizeof(*mask);
+ }
+ 
+ static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain)
+@@ -281,8 +300,9 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
+ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
+ 	       u32 size)
+ {
+-	unsigned int words_count = size >> 2;
+-	u32 *word = buf, *data, codecs = 0, domain = 0;
++	u32 *words = buf, *payload, codecs = 0, domain = 0;
++	u32 *frame_size = buf + size;
++	u32 rem_bytes = size;
+ 	int ret;
+ 
+ 	ret = hfi_platform_parser(core, inst);
+@@ -299,38 +319,66 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
+ 		memset(core->caps, 0, sizeof(core->caps));
+ 	}
+ 
+-	while (words_count) {
+-		data = word + 1;
++	while (words < frame_size) {
++		payload = words + 1;
+ 
+-		switch (*word) {
++		switch (*words) {
+ 		case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
+-			parse_codecs(core, data);
++			if (rem_bytes <= sizeof(struct hfi_codec_supported))
++				return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
++			ret = parse_codecs(core, payload);
++			if (ret < 0)
++				return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
+ 			init_codecs(core);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED:
+-			parse_max_sessions(core, data);
++			if (rem_bytes <= sizeof(struct hfi_max_sessions_supported))
++				return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
++			ret = parse_max_sessions(core, payload);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
+-			parse_codecs_mask(&codecs, &domain, data);
++			if (rem_bytes <= sizeof(struct hfi_codec_mask_supported))
++				return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
++			ret = parse_codecs_mask(&codecs, &domain, payload);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+-			parse_raw_formats(core, codecs, domain, data);
++			if (rem_bytes <= sizeof(struct hfi_uncompressed_format_supported))
++				return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
++			ret = parse_raw_formats(core, codecs, domain, payload);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+-			parse_caps(core, codecs, domain, data);
++			if (rem_bytes <= sizeof(struct hfi_capabilities))
++				return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
++			ret = parse_caps(core, codecs, domain, payload);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+-			parse_profile_level(core, codecs, domain, data);
++			if (rem_bytes <= sizeof(struct hfi_profile_level_supported))
++				return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
++			ret = parse_profile_level(core, codecs, domain, payload);
+ 			break;
+ 		case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED:
+-			parse_alloc_mode(core, codecs, domain, data);
++			if (rem_bytes <= sizeof(struct hfi_buffer_alloc_mode_supported))
++				return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
++			ret = parse_alloc_mode(core, codecs, domain, payload);
+ 			break;
+ 		default:
++			ret = sizeof(u32);
+ 			break;
+ 		}
+ 
+-		word++;
+-		words_count--;
++		if (ret < 0)
++			return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
++
++		words += ret / sizeof(u32);
++		rem_bytes -= ret;
+ 	}
+ 
+ 	if (!core->max_sessions_supported)
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index f9437b6412b91c..ab93757fff4b31 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -187,6 +187,9 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
+ 	/* ensure rd/wr indices's are read from memory */
+ 	rmb();
+ 
++	if (qsize > IFACEQ_QUEUE_SIZE / 4)
++		return -EINVAL;
++
+ 	if (wr_idx >= rd_idx)
+ 		empty_space = qsize - (wr_idx - rd_idx);
+ 	else
+@@ -255,6 +258,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
+ 	wr_idx = qhdr->write_idx;
+ 	qsize = qhdr->q_size;
+ 
++	if (qsize > IFACEQ_QUEUE_SIZE / 4)
++		return -EINVAL;
++
+ 	/* make sure data is valid before using it */
+ 	rmb();
+ 
+@@ -1035,18 +1041,26 @@ static void venus_sfr_print(struct venus_hfi_device *hdev)
+ {
+ 	struct device *dev = hdev->core->dev;
+ 	struct hfi_sfr *sfr = hdev->sfr.kva;
++	u32 size;
+ 	void *p;
+ 
+ 	if (!sfr)
+ 		return;
+ 
+-	p = memchr(sfr->data, '\0', sfr->buf_size);
++	size = sfr->buf_size;
++	if (!size)
++		return;
++
++	if (size > ALIGNED_SFR_SIZE)
++		size = ALIGNED_SFR_SIZE;
++
++	p = memchr(sfr->data, '\0', size);
+ 	/*
+ 	 * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
+ 	 * that Venus is in the process of crashing.
+ 	 */
+ 	if (!p)
+-		sfr->data[sfr->buf_size - 1] = '\0';
++		sfr->data[size - 1] = '\0';
+ 
+ 	dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
+ }
+diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
+index 11c3d72347572b..b2ef3beec5258a 100644
+--- a/drivers/media/platform/rockchip/rga/rga-hw.c
++++ b/drivers/media/platform/rockchip/rga/rga-hw.c
+@@ -376,7 +376,7 @@ static void rga_cmd_set_dst_info(struct rga_ctx *ctx,
+ 	 * Configure the dest framebuffer base address with pixel offset.
+ 	 */
+ 	offsets = rga_get_addr_offset(&ctx->out, offset, dst_x, dst_y, dst_w, dst_h);
+-	dst_offset = rga_lookup_draw_pos(&offsets, mir_mode, rot_mode);
++	dst_offset = rga_lookup_draw_pos(&offsets, rot_mode, mir_mode);
+ 
+ 	dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ 		dst_offset->y_off;
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+index 73f7af674c01bd..0c636090d723de 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+@@ -549,8 +549,9 @@ static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx)
+ 		case V4L2_PIX_FMT_NV21M:
+ 			ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
+ 			ctx->stride[1] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
+-			ctx->luma_size = ctx->stride[0] * ALIGN(ctx->img_height, 16);
+-			ctx->chroma_size =  ctx->stride[0] * ALIGN(ctx->img_height / 2, 16);
++			ctx->luma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height, 16), 256);
++			ctx->chroma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height / 2, 16),
++					256);
+ 			break;
+ 		case V4L2_PIX_FMT_YUV420M:
+ 		case V4L2_PIX_FMT_YVU420M:
+diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c
+index 92f1edee58f899..3c64e91260250e 100644
+--- a/drivers/media/platform/st/stm32/dma2d/dma2d.c
++++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c
+@@ -492,7 +492,8 @@ static void device_run(void *prv)
+ 	dst->sequence = frm_cap->sequence++;
+ 	v4l2_m2m_buf_copy_metadata(src, dst, true);
+ 
+-	clk_enable(dev->gate);
++	if (clk_enable(dev->gate))
++		goto end;
+ 
+ 	dma2d_config_fg(dev, frm_out,
+ 			vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
+diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
+index 2ce62fe5d60f5a..d3b48a0dd1f474 100644
+--- a/drivers/media/rc/streamzap.c
++++ b/drivers/media/rc/streamzap.c
+@@ -138,39 +138,10 @@ static void sz_push_half_space(struct streamzap_ir *sz,
+ 	sz_push_full_space(sz, value & SZ_SPACE_MASK);
+ }
+ 
+-/*
+- * streamzap_callback - usb IRQ handler callback
+- *
+- * This procedure is invoked on reception of data from
+- * the usb remote.
+- */
+-static void streamzap_callback(struct urb *urb)
++static void sz_process_ir_data(struct streamzap_ir *sz, int len)
+ {
+-	struct streamzap_ir *sz;
+ 	unsigned int i;
+-	int len;
+-
+-	if (!urb)
+-		return;
+-
+-	sz = urb->context;
+-	len = urb->actual_length;
+-
+-	switch (urb->status) {
+-	case -ECONNRESET:
+-	case -ENOENT:
+-	case -ESHUTDOWN:
+-		/*
+-		 * this urb is terminated, clean up.
+-		 * sz might already be invalid at this point
+-		 */
+-		dev_err(sz->dev, "urb terminated, status: %d\n", urb->status);
+-		return;
+-	default:
+-		break;
+-	}
+ 
+-	dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
+ 	for (i = 0; i < len; i++) {
+ 		dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n",
+ 			i, (unsigned char)sz->buf_in[i]);
+@@ -219,6 +190,43 @@ static void streamzap_callback(struct urb *urb)
+ 	}
+ 
+ 	ir_raw_event_handle(sz->rdev);
++}
++
++/*
++ * streamzap_callback - usb IRQ handler callback
++ *
++ * This procedure is invoked on reception of data from
++ * the usb remote.
++ */
++static void streamzap_callback(struct urb *urb)
++{
++	struct streamzap_ir *sz;
++	int len;
++
++	if (!urb)
++		return;
++
++	sz = urb->context;
++	len = urb->actual_length;
++
++	switch (urb->status) {
++	case 0:
++		dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
++		sz_process_ir_data(sz, len);
++		break;
++	case -ECONNRESET:
++	case -ENOENT:
++	case -ESHUTDOWN:
++		/*
++		 * this urb is terminated, clean up.
++		 * sz might already be invalid at this point
++		 */
++		dev_err(sz->dev, "urb terminated, status: %d\n", urb->status);
++		return;
++	default:
++		break;
++	}
++
+ 	usb_submit_urb(urb, GFP_ATOMIC);
+ }
+ 
+diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
+index 3e3b424b486058..8ca6459286ba6e 100644
+--- a/drivers/media/test-drivers/vim2m.c
++++ b/drivers/media/test-drivers/vim2m.c
+@@ -1316,9 +1316,6 @@ static int vim2m_probe(struct platform_device *pdev)
+ 	vfd->v4l2_dev = &dev->v4l2_dev;
+ 
+ 	video_set_drvdata(vfd, dev);
+-	v4l2_info(&dev->v4l2_dev,
+-		  "Device registered as /dev/video%d\n", vfd->num);
+-
+ 	platform_set_drvdata(pdev, dev);
+ 
+ 	dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+@@ -1345,6 +1342,9 @@ static int vim2m_probe(struct platform_device *pdev)
+ 		goto error_m2m;
+ 	}
+ 
++	v4l2_info(&dev->v4l2_dev,
++		  "Device registered as /dev/video%d\n", vfd->num);
++
+ #ifdef CONFIG_MEDIA_CONTROLLER
+ 	ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
+ 						 MEDIA_ENT_F_PROC_VIDEO_SCALER);
+diff --git a/drivers/media/test-drivers/visl/visl-core.c b/drivers/media/test-drivers/visl/visl-core.c
+index c46464bcaf2e13..93239391f2cf64 100644
+--- a/drivers/media/test-drivers/visl/visl-core.c
++++ b/drivers/media/test-drivers/visl/visl-core.c
+@@ -161,9 +161,15 @@ static const struct visl_ctrl_desc visl_h264_ctrl_descs[] = {
+ 	},
+ 	{
+ 		.cfg.id = V4L2_CID_STATELESS_H264_DECODE_MODE,
++		.cfg.min = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
++		.cfg.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
++		.cfg.def = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ 	},
+ 	{
+ 		.cfg.id = V4L2_CID_STATELESS_H264_START_CODE,
++		.cfg.min = V4L2_STATELESS_H264_START_CODE_NONE,
++		.cfg.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
++		.cfg.def = V4L2_STATELESS_H264_START_CODE_NONE,
+ 	},
+ 	{
+ 		.cfg.id = V4L2_CID_STATELESS_H264_SLICE_PARAMS,
+@@ -198,9 +204,15 @@ static const struct visl_ctrl_desc visl_hevc_ctrl_descs[] = {
+ 	},
+ 	{
+ 		.cfg.id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
++		.cfg.min = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
++		.cfg.max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
++		.cfg.def = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ 	},
+ 	{
+ 		.cfg.id = V4L2_CID_STATELESS_HEVC_START_CODE,
++		.cfg.min = V4L2_STATELESS_HEVC_START_CODE_NONE,
++		.cfg.max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
++		.cfg.def = V4L2_STATELESS_HEVC_START_CODE_NONE,
+ 	},
+ 	{
+ 		.cfg.id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS,
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 4d8e00b425f443..a0d683d2664719 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -3039,6 +3039,15 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX
+ 					| UVC_QUIRK_IGNORE_SELECTOR_UNIT) },
++	/* Actions Microelectronics Co. Display capture-UVC05 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x1de1,
++	  .idProduct		= 0xf105,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) },
+ 	/* NXP Semiconductors IR VIDEO */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
+index 2cf5dcee0ce800..4d05873892c168 100644
+--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
+@@ -764,7 +764,7 @@ bool v4l2_detect_gtf(unsigned int frame_height,
+ 		u64 num;
+ 		u32 den;
+ 
+-		num = ((image_width * GTF_D_C_PRIME * (u64)hfreq) -
++		num = (((u64)image_width * GTF_D_C_PRIME * hfreq) -
+ 		      ((u64)image_width * GTF_D_M_PRIME * 1000));
+ 		den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) *
+ 		      (2 * GTF_CELL_GRAN);
+@@ -774,7 +774,7 @@ bool v4l2_detect_gtf(unsigned int frame_height,
+ 		u64 num;
+ 		u32 den;
+ 
+-		num = ((image_width * GTF_S_C_PRIME * (u64)hfreq) -
++		num = (((u64)image_width * GTF_S_C_PRIME * hfreq) -
+ 		      ((u64)image_width * GTF_S_M_PRIME * 1000));
+ 		den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) *
+ 		      (2 * GTF_CELL_GRAN);
+diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c
+index fa0ad2f14a3961..9460a67acb0b5e 100644
+--- a/drivers/mfd/ene-kb3930.c
++++ b/drivers/mfd/ene-kb3930.c
+@@ -162,7 +162,7 @@ static int kb3930_probe(struct i2c_client *client)
+ 			devm_gpiod_get_array_optional(dev, "off", GPIOD_IN);
+ 		if (IS_ERR(ddata->off_gpios))
+ 			return PTR_ERR(ddata->off_gpios);
+-		if (ddata->off_gpios->ndescs < 2) {
++		if (ddata->off_gpios && ddata->off_gpios->ndescs < 2) {
+ 			dev_err(dev, "invalid off-gpios property\n");
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index 3aaaf47fa4ee20..8dea2b44fd8bfe 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -85,7 +85,6 @@
+ #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
+ #define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
+ 
+-#define PCI_VENDOR_ID_ROCKCHIP			0x1d87
+ #define PCI_DEVICE_ID_ROCKCHIP_RK3588		0x3588
+ 
+ static DEFINE_IDA(pci_endpoint_test_ida);
+@@ -235,7 +234,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
+ 	return true;
+ 
+ fail:
+-	switch (irq_type) {
++	switch (test->irq_type) {
+ 	case IRQ_TYPE_INTX:
+ 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
+ 			pci_irq_vector(pdev, i));
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index e9f6e4e622901a..55158540c28cfd 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2579,6 +2579,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
+ 	}
+ }
+ 
++static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt)
++{
++	struct mmc_data *data = host->data;
++	int init_cnt = cnt;
++
++	/* try and push anything in the part_buf */
++	if (unlikely(host->part_buf_count)) {
++		int len = dw_mci_push_part_bytes(host, buf, cnt);
++
++		buf += len;
++		cnt -= len;
++
++		if (host->part_buf_count == 8) {
++			mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
++			host->part_buf_count = 0;
++		}
++	}
++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
++	if (unlikely((unsigned long)buf & 0x7)) {
++		while (cnt >= 8) {
++			u64 aligned_buf[16];
++			int len = min(cnt & -8, (int)sizeof(aligned_buf));
++			int items = len >> 3;
++			int i;
++			/* memcpy from input buffer into aligned buffer */
++			memcpy(aligned_buf, buf, len);
++			buf += len;
++			cnt -= len;
++			/* push data from aligned buffer into fifo */
++			for (i = 0; i < items; ++i)
++				mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]);
++		}
++	} else
++#endif
++	{
++		u64 *pdata = buf;
++
++		for (; cnt >= 8; cnt -= 8)
++			mci_fifo_l_writeq(host->fifo_reg, *pdata++);
++		buf = pdata;
++	}
++	/* put anything remaining in the part_buf */
++	if (cnt) {
++		dw_mci_set_part_bytes(host, buf, cnt);
++		/* Push data if we have reached the expected data length */
++		if ((data->bytes_xfered + init_cnt) ==
++		    (data->blksz * data->blocks))
++			mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
++	}
++}
++
++static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt)
++{
++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
++	if (unlikely((unsigned long)buf & 0x7)) {
++		while (cnt >= 8) {
++			/* pull data from fifo into aligned buffer */
++			u64 aligned_buf[16];
++			int len = min(cnt & -8, (int)sizeof(aligned_buf));
++			int items = len >> 3;
++			int i;
++
++			for (i = 0; i < items; ++i)
++				aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg);
++
++			/* memcpy from aligned buffer into output buffer */
++			memcpy(buf, aligned_buf, len);
++			buf += len;
++			cnt -= len;
++		}
++	} else
++#endif
++	{
++		u64 *pdata = buf;
++
++		for (; cnt >= 8; cnt -= 8)
++			*pdata++ = mci_fifo_l_readq(host->fifo_reg);
++		buf = pdata;
++	}
++	if (cnt) {
++		host->part_buf = mci_fifo_l_readq(host->fifo_reg);
++		dw_mci_pull_final_bytes(host, buf, cnt);
++	}
++}
++
+ static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
+ {
+ 	int len;
+@@ -3379,8 +3464,13 @@ int dw_mci_probe(struct dw_mci *host)
+ 		width = 16;
+ 		host->data_shift = 1;
+ 	} else if (i == 2) {
+-		host->push_data = dw_mci_push_data64;
+-		host->pull_data = dw_mci_pull_data64;
++		if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) {
++			host->push_data = dw_mci_push_data64_32;
++			host->pull_data = dw_mci_pull_data64_32;
++		} else {
++			host->push_data = dw_mci_push_data64;
++			host->pull_data = dw_mci_pull_data64;
++		}
+ 		width = 64;
+ 		host->data_shift = 3;
+ 	} else {
+diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
+index 6447b916990dcd..5463392dc81105 100644
+--- a/drivers/mmc/host/dw_mmc.h
++++ b/drivers/mmc/host/dw_mmc.h
+@@ -281,6 +281,8 @@ struct dw_mci_board {
+ 
+ /* Support for longer data read timeout */
+ #define DW_MMC_QUIRK_EXTENDED_TMOUT            BIT(0)
++/* Force 32-bit access to the FIFO */
++#define DW_MMC_QUIRK_FIFO64_32                 BIT(1)
+ 
+ #define DW_MMC_240A		0x240a
+ #define DW_MMC_280A		0x280a
+@@ -472,6 +474,31 @@ struct dw_mci_board {
+ #define mci_fifo_writel(__value, __reg)	__raw_writel(__reg, __value)
+ #define mci_fifo_writeq(__value, __reg)	__raw_writeq(__reg, __value)
+ 
++/*
++ * Some dw_mmc devices have 64-bit FIFOs, but expect them to be
++ * accessed using two 32-bit accesses. If such controller is used
++ * with a 64-bit kernel, this has to be done explicitly.
++ */
++static inline u64 mci_fifo_l_readq(void __iomem *addr)
++{
++	u64 ans;
++	u32 proxy[2];
++
++	proxy[0] = mci_fifo_readl(addr);
++	proxy[1] = mci_fifo_readl(addr + 4);
++	memcpy(&ans, proxy, 8);
++	return ans;
++}
++
++static inline void mci_fifo_l_writeq(void __iomem *addr, u64 value)
++{
++	u32 proxy[2];
++
++	memcpy(proxy, &value, 8);
++	mci_fifo_writel(addr, proxy[0]);
++	mci_fifo_writel(addr + 4, proxy[1]);
++}
++
+ /* Register access macros */
+ #define mci_readl(dev, reg)			\
+ 	readl_relaxed((dev)->regs + SDMMC_##reg)
+diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
+index 9739387cff8c91..58c6e1743f5c65 100644
+--- a/drivers/mtd/inftlcore.c
++++ b/drivers/mtd/inftlcore.c
+@@ -482,10 +482,11 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
+ 		silly = MAX_LOOPS;
+ 
+ 		while (thisEUN <= inftl->lastEUN) {
+-			inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+-				       blockofs, 8, &retlen, (char *)&bci);
+-
+-			status = bci.Status | bci.Status1;
++			if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
++				       blockofs, 8, &retlen, (char *)&bci) < 0)
++				status = SECTOR_IGNORE;
++			else
++				status = bci.Status | bci.Status1;
+ 			pr_debug("INFTL: status of block %d in EUN %d is %x\n",
+ 					block , writeEUN, status);
+ 
+diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
+index 7ac8ac90130685..9cf3872e37ae14 100644
+--- a/drivers/mtd/mtdpstore.c
++++ b/drivers/mtd/mtdpstore.c
+@@ -417,11 +417,14 @@ static void mtdpstore_notify_add(struct mtd_info *mtd)
+ 	}
+ 
+ 	longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
+-	cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+-	cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
++	cxt->rmmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
++	cxt->usedmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+ 
+ 	longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
+-	cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
++	cxt->badmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
++
++	if (!cxt->rmmap || !cxt->usedmap || !cxt->badmap)
++		return;
+ 
+ 	/* just support dmesg right now */
+ 	cxt->dev.flags = PSTORE_FLAGS_DMESG;
+@@ -527,9 +530,6 @@ static void mtdpstore_notify_remove(struct mtd_info *mtd)
+ 	mtdpstore_flush_removed(cxt);
+ 
+ 	unregister_pstore_device(&cxt->dev);
+-	kfree(cxt->badmap);
+-	kfree(cxt->usedmap);
+-	kfree(cxt->rmmap);
+ 	cxt->mtd = NULL;
+ 	cxt->index = -1;
+ }
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index e76df6a00ed4f5..2eb44c1428fbc2 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -3008,7 +3008,7 @@ static int brcmnand_resume(struct device *dev)
+ 		brcmnand_save_restore_cs_config(host, 1);
+ 
+ 		/* Reset the chip, required by some chips after power-up */
+-		nand_reset_op(chip);
++		nand_reset(chip, 0);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
+index ed0cf732d20e40..36cfe03cd4ac3b 100644
+--- a/drivers/mtd/nand/raw/r852.c
++++ b/drivers/mtd/nand/raw/r852.c
+@@ -387,6 +387,9 @@ static int r852_wait(struct nand_chip *chip)
+ static int r852_ready(struct nand_chip *chip)
+ {
+ 	struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
++	if (dev->card_unstable)
++		return 0;
++
+ 	return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+ }
+ 
+diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
+index b080740bcb104f..fca290afb5329a 100644
+--- a/drivers/net/can/flexcan/flexcan-core.c
++++ b/drivers/net/can/flexcan/flexcan-core.c
+@@ -386,6 +386,16 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
+ 		FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
+ };
+ 
++static const struct flexcan_devtype_data nxp_s32g2_devtype_data = {
++	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
++		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
++		FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
++		FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 |
++		FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
++		FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
++		FLEXCAN_QUIRK_SECONDARY_MB_IRQ,
++};
++
+ static const struct can_bittiming_const flexcan_bittiming_const = {
+ 	.name = DRV_NAME,
+ 	.tseg1_min = 4,
+@@ -1762,14 +1772,25 @@ static int flexcan_open(struct net_device *dev)
+ 			goto out_free_irq_boff;
+ 	}
+ 
++	if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
++		err = request_irq(priv->irq_secondary_mb,
++				  flexcan_irq, IRQF_SHARED, dev->name, dev);
++		if (err)
++			goto out_free_irq_err;
++	}
++
+ 	flexcan_chip_interrupts_enable(dev);
+ 
+ 	netif_start_queue(dev);
+ 
+ 	return 0;
+ 
++ out_free_irq_err:
++	if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
++		free_irq(priv->irq_err, dev);
+  out_free_irq_boff:
+-	free_irq(priv->irq_boff, dev);
++	if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
++		free_irq(priv->irq_boff, dev);
+  out_free_irq:
+ 	free_irq(dev->irq, dev);
+  out_can_rx_offload_disable:
+@@ -1794,6 +1815,9 @@ static int flexcan_close(struct net_device *dev)
+ 	netif_stop_queue(dev);
+ 	flexcan_chip_interrupts_disable(dev);
+ 
++	if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ)
++		free_irq(priv->irq_secondary_mb, dev);
++
+ 	if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ 		free_irq(priv->irq_err, dev);
+ 		free_irq(priv->irq_boff, dev);
+@@ -2041,6 +2065,7 @@ static const struct of_device_id flexcan_of_match[] = {
+ 	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+ 	{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
+ 	{ .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
++	{ .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, },
+ 	{ /* sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, flexcan_of_match);
+@@ -2187,6 +2212,14 @@ static int flexcan_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
++	if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
++		priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1");
++		if (priv->irq_secondary_mb < 0) {
++			err = priv->irq_secondary_mb;
++			goto failed_platform_get_irq;
++		}
++	}
++
+ 	if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
+ 		priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ 			CAN_CTRLMODE_FD_NON_ISO;
+diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
+index 4933d8c7439e62..2cf886618c9621 100644
+--- a/drivers/net/can/flexcan/flexcan.h
++++ b/drivers/net/can/flexcan/flexcan.h
+@@ -70,6 +70,10 @@
+ #define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
+ /* Setup stop mode with ATF SCMI protocol to support wakeup */
+ #define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
++/* Device has two separate interrupt lines for two mailbox ranges, which
++ * both need to have an interrupt handler registered.
++ */
++#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ	BIT(18)
+ 
+ struct flexcan_devtype_data {
+ 	u32 quirks;		/* quirks needed for different IP cores */
+@@ -107,6 +111,7 @@ struct flexcan_priv {
+ 
+ 	int irq_boff;
+ 	int irq_err;
++	int irq_secondary_mb;
+ 
+ 	/* IPC handle when setup stop mode by System Controller firmware(scfw) */
+ 	struct imx_sc_ipc *sc_ipc_handle;
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 5935100e7d65f8..e20d9d62032e31 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3691,6 +3691,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_stats_clear(chip);
+ }
+ 
++static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip)
++{
++	u16 dummy;
++	int err;
++
++	/* Workaround for erratum
++	 *   3.3 RGMII timing may be out of spec when transmit delay is enabled
++	 */
++	err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000);
++	if (err)
++		return err;
++
++	return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy);
++}
++
+ /* Check if the errata has already been applied. */
+ static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
+ {
+@@ -5144,6 +5159,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
+ 
+ static const struct mv88e6xxx_ops mv88e6320_ops = {
+ 	/* MV88E6XXX_FAMILY_6320 */
++	.setup_errata = mv88e6320_setup_errata,
+ 	.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ 	.ip_pri_map = mv88e6085_g1_ip_pri_map,
+ 	.irl_init_all = mv88e6352_g2_irl_init_all,
+@@ -5193,6 +5209,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
+ 
+ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ 	/* MV88E6XXX_FAMILY_6320 */
++	.setup_errata = mv88e6320_setup_errata,
+ 	.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ 	.ip_pri_map = mv88e6085_g1_ip_pri_map,
+ 	.irl_init_all = mv88e6352_g2_irl_init_all,
+@@ -6154,7 +6171,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.num_databases = 4096,
+ 		.num_macs = 8192,
+ 		.num_ports = 7,
+-		.num_internal_phys = 5,
++		.num_internal_phys = 2,
++		.internal_phys_offset = 3,
+ 		.num_gpio = 15,
+ 		.max_vid = 4095,
+ 		.max_sid = 63,
+@@ -6348,7 +6366,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.num_databases = 4096,
+ 		.num_macs = 8192,
+ 		.num_ports = 7,
+-		.num_internal_phys = 5,
++		.num_internal_phys = 2,
++		.internal_phys_offset = 3,
+ 		.num_gpio = 15,
+ 		.max_vid = 4095,
+ 		.max_sid = 63,
+diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
+index bdfc6e77b2af56..1f5db1096d4a40 100644
+--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
+@@ -392,7 +392,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 				 */
+ 				data[i++] = 0;
+ 				data[i++] = 0;
+-				data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
++				data[i++] =
++					(tx->dqo_tx.tail - tx->dqo_tx.head) &
++					tx->mask;
+ 			}
+ 			do {
+ 				start =
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+index 0f844c14485a0e..35acc07bd96489 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+@@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
+ 
+ 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+ 	} else if (level == NIX_TXSCH_LVL_TL2) {
++		/* configure parent txschq */
++		cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
++		cfg->regval[num_regs] = (u64)hw->tx_link << 16;
++		num_regs++;
++
+ 		/* configure link cfg */
+ 		if (level == pfvf->qos.link_cfg_lvl) {
+ 			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index b2d206dec70c8a..12c22261dd3a8b 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -636,30 +636,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
+ 	mpc->rxbpre_total = 0;
+ 
+ 	for (i = 0; i < num_rxb; i++) {
+-		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
+-			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
+-			if (!va)
+-				goto error;
+-
+-			page = virt_to_head_page(va);
+-			/* Check if the frag falls back to single page */
+-			if (compound_order(page) <
+-			    get_order(mpc->rxbpre_alloc_size)) {
+-				put_page(page);
+-				goto error;
+-			}
+-		} else {
+-			page = dev_alloc_page();
+-			if (!page)
+-				goto error;
++		page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
++		if (!page)
++			goto error;
+ 
+-			va = page_to_virt(page);
+-		}
++		va = page_to_virt(page);
+ 
+ 		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
+ 				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
+ 		if (dma_mapping_error(dev, da)) {
+-			put_page(virt_to_head_page(va));
++			put_page(page);
+ 			goto error;
+ 		}
+ 
+@@ -1618,7 +1604,7 @@ static void mana_rx_skb(void *buf_va, bool from_pool,
+ }
+ 
+ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
+-			     dma_addr_t *da, bool *from_pool, bool is_napi)
++			     dma_addr_t *da, bool *from_pool)
+ {
+ 	struct page *page;
+ 	void *va;
+@@ -1629,21 +1615,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
+ 	if (rxq->xdp_save_va) {
+ 		va = rxq->xdp_save_va;
+ 		rxq->xdp_save_va = NULL;
+-	} else if (rxq->alloc_size > PAGE_SIZE) {
+-		if (is_napi)
+-			va = napi_alloc_frag(rxq->alloc_size);
+-		else
+-			va = netdev_alloc_frag(rxq->alloc_size);
+-
+-		if (!va)
+-			return NULL;
+-
+-		page = virt_to_head_page(va);
+-		/* Check if the frag falls back to single page */
+-		if (compound_order(page) < get_order(rxq->alloc_size)) {
+-			put_page(page);
+-			return NULL;
+-		}
+ 	} else {
+ 		page = page_pool_dev_alloc_pages(rxq->page_pool);
+ 		if (!page)
+@@ -1676,7 +1647,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
+ 	dma_addr_t da;
+ 	void *va;
+ 
+-	va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
++	va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
+ 	if (!va)
+ 		return;
+ 
+@@ -2083,7 +2054,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
+ 	if (mpc->rxbufs_pre)
+ 		va = mana_get_rxbuf_pre(rxq, &da);
+ 	else
+-		va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
++		va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
+ 
+ 	if (!va)
+ 		return -ENOMEM;
+@@ -2169,6 +2140,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
+ 	pprm.nid = gc->numa_node;
+ 	pprm.napi = &rxq->rx_cq.napi;
+ 	pprm.netdev = rxq->ndev;
++	pprm.order = get_order(rxq->alloc_size);
+ 
+ 	rxq->page_pool = page_pool_create(&pprm);
+ 
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index 2b3d6586f44a53..71c891d14fb626 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -309,7 +309,8 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
+ 		return true;
+ 
+ 	page = page_pool_dev_alloc_pages(rx_ring->page_pool);
+-	WARN_ON(!page);
++	if (unlikely(!page))
++		return false;
+ 	dma = page_pool_get_dma_addr(page);
+ 
+ 	bi->page_dma = dma;
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 119dfa2d6643a9..8af44224480f15 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -289,6 +289,46 @@ static bool phy_drv_wol_enabled(struct phy_device *phydev)
+ 	return wol.wolopts != 0;
+ }
+ 
++static void phy_link_change(struct phy_device *phydev, bool up)
++{
++	struct net_device *netdev = phydev->attached_dev;
++
++	if (up)
++		netif_carrier_on(netdev);
++	else
++		netif_carrier_off(netdev);
++	phydev->adjust_link(netdev);
++	if (phydev->mii_ts && phydev->mii_ts->link_state)
++		phydev->mii_ts->link_state(phydev->mii_ts, phydev);
++}
++
++/**
++ * phy_uses_state_machine - test whether consumer driver uses PAL state machine
++ * @phydev: the target PHY device structure
++ *
++ * Ultimately, this aims to indirectly determine whether the PHY is attached
++ * to a consumer which uses the state machine by calling phy_start() and
++ * phy_stop().
++ *
++ * When the PHY driver consumer uses phylib, it must have previously called
++ * phy_connect_direct() or one of its derivatives, so that phy_prepare_link()
++ * has set up a hook for monitoring state changes.
++ *
++ * When the PHY driver is used by the MAC driver consumer through phylink (the
++ * only other provider of a phy_link_change() method), using the PHY state
++ * machine is not optional.
++ *
++ * Return: true if consumer calls phy_start() and phy_stop(), false otherwise.
++ */
++static bool phy_uses_state_machine(struct phy_device *phydev)
++{
++	if (phydev->phy_link_change == phy_link_change)
++		return phydev->attached_dev && phydev->adjust_link;
++
++	/* phydev->phy_link_change is implicitly phylink_phy_change() */
++	return true;
++}
++
+ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
+ {
+ 	struct device_driver *drv = phydev->mdio.dev.driver;
+@@ -355,7 +395,7 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
+ 	 * may call phy routines that try to grab the same lock, and that may
+ 	 * lead to a deadlock.
+ 	 */
+-	if (phydev->attached_dev && phydev->adjust_link)
++	if (phy_uses_state_machine(phydev))
+ 		phy_stop_machine(phydev);
+ 
+ 	if (!mdio_bus_phy_may_suspend(phydev))
+@@ -409,7 +449,7 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
+ 		}
+ 	}
+ 
+-	if (phydev->attached_dev && phydev->adjust_link)
++	if (phy_uses_state_machine(phydev))
+ 		phy_start_machine(phydev);
+ 
+ 	return 0;
+@@ -1101,19 +1141,6 @@ struct phy_device *phy_find_first(struct mii_bus *bus)
+ }
+ EXPORT_SYMBOL(phy_find_first);
+ 
+-static void phy_link_change(struct phy_device *phydev, bool up)
+-{
+-	struct net_device *netdev = phydev->attached_dev;
+-
+-	if (up)
+-		netif_carrier_on(netdev);
+-	else
+-		netif_carrier_off(netdev);
+-	phydev->adjust_link(netdev);
+-	if (phydev->mii_ts && phydev->mii_ts->link_state)
+-		phydev->mii_ts->link_state(phydev->mii_ts, phydev);
+-}
+-
+ /**
+  * phy_prepare_link - prepares the PHY layer to monitor link status
+  * @phydev: target phy_device struct
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index dcec92625cf651..7b33993f7001e4 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -385,7 +385,7 @@ static void sfp_fixup_rollball(struct sfp *sfp)
+ 	sfp->phy_t_retry = msecs_to_jiffies(1000);
+ }
+ 
+-static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
++static void sfp_fixup_rollball_wait4s(struct sfp *sfp)
+ {
+ 	sfp_fixup_rollball(sfp);
+ 
+@@ -399,7 +399,7 @@ static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
+ static void sfp_fixup_fs_10gt(struct sfp *sfp)
+ {
+ 	sfp_fixup_10gbaset_30m(sfp);
+-	sfp_fixup_fs_2_5gt(sfp);
++	sfp_fixup_rollball_wait4s(sfp);
+ }
+ 
+ static void sfp_fixup_halny_gsfp(struct sfp *sfp)
+@@ -479,9 +479,10 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	// PHY.
+ 	SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+ 
+-	// Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
+-	// needs 4 sec wait before probing the PHY.
+-	SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
++	// Fiberstore SFP-2.5G-T and SFP-10GM-T uses Rollball protocol to talk
++	// to the PHY and needs 4 sec wait before probing the PHY.
++	SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_rollball_wait4s),
++	SFP_QUIRK_F("FS", "SFP-10GM-T", sfp_fixup_rollball_wait4s),
+ 
+ 	// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
+ 	// NRZ in their EEPROM
+@@ -515,6 +516,8 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 
+ 	SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
+ 	SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
++	SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex),
++	SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex),
+ 	SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
+ 	SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
+ 	SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball),
+diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
+index 644e99fc3623f5..9c4932198931f3 100644
+--- a/drivers/net/ppp/ppp_synctty.c
++++ b/drivers/net/ppp/ppp_synctty.c
+@@ -506,6 +506,11 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
+ 	unsigned char *data;
+ 	int islcp;
+ 
++	/* Ensure we can safely access protocol field and LCP code */
++	if (!pskb_may_pull(skb, 3)) {
++		kfree_skb(skb);
++		return NULL;
++	}
+ 	data  = skb->data;
+ 	proto = get_unaligned_be16(data);
+ 
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 57d6e5abc30e88..da24941a6e4446 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -1421,6 +1421,19 @@ static const struct driver_info hg20f9_info = {
+ 	.data = FLAG_EEPROM_MAC,
+ };
+ 
++static const struct driver_info lyconsys_fibergecko100_info = {
++	.description = "LyconSys FiberGecko 100 USB 2.0 to SFP Adapter",
++	.bind = ax88178_bind,
++	.status = asix_status,
++	.link_reset = ax88178_link_reset,
++	.reset = ax88178_link_reset,
++	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
++		 FLAG_MULTI_PACKET,
++	.rx_fixup = asix_rx_fixup_common,
++	.tx_fixup = asix_tx_fixup,
++	.data = 0x20061201,
++};
++
+ static const struct usb_device_id	products [] = {
+ {
+ 	// Linksys USB200M
+@@ -1578,6 +1591,10 @@ static const struct usb_device_id	products [] = {
+ 	// Linux Automation GmbH USB 10Base-T1L
+ 	USB_DEVICE(0x33f7, 0x0004),
+ 	.driver_info = (unsigned long) &lxausb_t1l_info,
++}, {
++	/* LyconSys FiberGecko 100 */
++	USB_DEVICE(0x1d2a, 0x0801),
++	.driver_info = (unsigned long) &lyconsys_fibergecko100_info,
+ },
+ 	{ },		// END
+ };
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index a6469235d904e7..a032c1ded40634 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -783,6 +783,13 @@ static const struct usb_device_id	products[] = {
+ 	.driver_info = 0,
+ },
+ 
++/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */
++{
++	USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa359, USB_CLASS_COMM,
++			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++	.driver_info = 0,
++},
++
+ /* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */
+ {
+ 	USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101,
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 468c739740463d..96fa3857d8e257 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -785,6 +785,7 @@ enum rtl8152_flags {
+ #define DEVICE_ID_THINKPAD_USB_C_DONGLE			0x720c
+ #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2		0xa387
+ #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3		0x3062
++#define DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK		0xa359
+ 
+ struct tally_counter {
+ 	__le64	tx_packets;
+@@ -9787,6 +9788,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
+ 		case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+ 		case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
+ 		case DEVICE_ID_THINKPAD_USB_C_DONGLE:
++		case DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK:
+ 			return 1;
+ 		}
+ 	} else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) {
+@@ -10064,6 +10066,8 @@ static const struct usb_device_id rtl8152_table[] = {
+ 	{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) },
+ 	{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) },
+ 	{ USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) },
++
++	/* Lenovo */
+ 	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x304f) },
+ 	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x3054) },
+ 	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x3062) },
+@@ -10074,7 +10078,9 @@ static const struct usb_device_id rtl8152_table[] = {
+ 	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x720c) },
+ 	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x7214) },
+ 	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x721e) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0xa359) },
+ 	{ USB_DEVICE(VENDOR_ID_LENOVO,  0xa387) },
++
+ 	{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
+ 	{ USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff) },
+ 	{ USB_DEVICE(VENDOR_ID_TPLINK,  0x0601) },
+diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
+index 20b2df8d74ae1b..8d860dacdf49b2 100644
+--- a/drivers/net/usb/r8153_ecm.c
++++ b/drivers/net/usb/r8153_ecm.c
+@@ -135,6 +135,12 @@ static const struct usb_device_id products[] = {
+ 				      USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ 	.driver_info = (unsigned long)&r8153_info,
+ },
++/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */
++{
++	USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0xa359, USB_CLASS_COMM,
++				      USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++	.driver_info = (unsigned long)&r8153_info,
++},
+ 
+ 	{ },		/* END */
+ };
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index 97b12f51ef28c0..9389dc5f4a3dac 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/module.h>
+@@ -1290,6 +1290,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
+ 	ath11k_core_deinit(ab);
+ 
+ qmi_fail:
++	ath11k_fw_destroy(ab);
+ 	ath11k_ahb_free_resources(ab);
+ }
+ 
+@@ -1309,6 +1310,7 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev)
+ 	ath11k_core_deinit(ab);
+ 
+ free_resources:
++	ath11k_fw_destroy(ab);
+ 	ath11k_ahb_free_resources(ab);
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index ccf4ad35fdc335..7eba6ee054ffef 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/module.h>
+@@ -2214,7 +2214,6 @@ void ath11k_core_deinit(struct ath11k_base *ab)
+ 	ath11k_hif_power_down(ab);
+ 	ath11k_mac_destroy(ab);
+ 	ath11k_core_soc_destroy(ab);
+-	ath11k_fw_destroy(ab);
+ }
+ EXPORT_SYMBOL(ath11k_core_deinit);
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
+index fbf666d0ecf1dc..f124b7329e1ac2 100644
+--- a/drivers/net/wireless/ath/ath11k/dp.c
++++ b/drivers/net/wireless/ath/ath11k/dp.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <crypto/hash.h>
+@@ -104,14 +104,12 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
+ 	if (!ring->vaddr_unaligned)
+ 		return;
+ 
+-	if (ring->cached) {
+-		dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
+-				 DMA_FROM_DEVICE);
+-		kfree(ring->vaddr_unaligned);
+-	} else {
++	if (ring->cached)
++		dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
++				     ring->paddr_unaligned, DMA_FROM_DEVICE);
++	else
+ 		dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ 				  ring->paddr_unaligned);
+-	}
+ 
+ 	ring->vaddr_unaligned = NULL;
+ }
+@@ -249,25 +247,14 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ 		default:
+ 			cached = false;
+ 		}
+-
+-		if (cached) {
+-			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
+-			if (!ring->vaddr_unaligned)
+-				return -ENOMEM;
+-
+-			ring->paddr_unaligned = dma_map_single(ab->dev,
+-							       ring->vaddr_unaligned,
+-							       ring->size,
+-							       DMA_FROM_DEVICE);
+-			if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
+-				kfree(ring->vaddr_unaligned);
+-				ring->vaddr_unaligned = NULL;
+-				return -ENOMEM;
+-			}
+-		}
+ 	}
+ 
+-	if (!cached)
++	if (cached)
++		ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
++							      &ring->paddr_unaligned,
++							      DMA_FROM_DEVICE,
++							      GFP_KERNEL);
++	else
+ 		ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ 							   &ring->paddr_unaligned,
+ 							   GFP_KERNEL);
+diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c
+index 4e36292a79db89..cbbd8e57119f28 100644
+--- a/drivers/net/wireless/ath/ath11k/fw.c
++++ b/drivers/net/wireless/ath/ath11k/fw.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+- * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include "core.h"
+@@ -166,3 +166,4 @@ void ath11k_fw_destroy(struct ath11k_base *ab)
+ {
+ 	release_firmware(ab->fw.fw);
+ }
++EXPORT_SYMBOL(ath11k_fw_destroy);
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index be9d2c69cc4137..6ebfa5d02e2e54 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/module.h>
+@@ -981,6 +981,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
+ 	ath11k_core_deinit(ab);
+ 
+ qmi_fail:
++	ath11k_fw_destroy(ab);
+ 	ath11k_mhi_unregister(ab_pci);
+ 
+ 	ath11k_pcic_free_irq(ab);
+diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
+index 5c6749bc4039d2..1706ec27eb9c0f 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
++++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
+@@ -2533,7 +2533,7 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
+ 		dest_idx = 0;
+ move_next:
+ 		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+-		ath12k_hal_srng_src_get_next_entry(ab, srng);
++		ath12k_hal_srng_dst_get_next_entry(ab, srng);
+ 		num_buffs_reaped++;
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index 91e3393f7b5f40..4cbba96121a114 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -2470,6 +2470,29 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
+ 	ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
+ }
+ 
++static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
++						   struct hal_rx_desc *rx_desc,
++						   struct sk_buff *msdu)
++{
++	struct ieee80211_hdr *hdr;
++	u8 decap_type;
++	u32 hdr_len;
++
++	decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
++	if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
++		return true;
++
++	hdr = (struct ieee80211_hdr *)msdu->data;
++	hdr_len = ieee80211_hdrlen(hdr->frame_control);
++
++	if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
++		return true;
++
++	ab->soc_stats.invalid_rbm++;
++	WARN_ON_ONCE(1);
++	return false;
++}
++
+ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
+ 				     struct sk_buff *msdu,
+ 				     struct sk_buff_head *msdu_list,
+@@ -2528,6 +2551,11 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
+ 		}
+ 	}
+ 
++	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) {
++		ret = -EINVAL;
++		goto free_out;
++	}
++
+ 	ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+ 	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+ 
+@@ -2880,6 +2908,9 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
+ 		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
+ 	skb_pull(msdu, hal_rx_desc_sz);
+ 
++	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
++		return -EINVAL;
++
+ 	ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
+@@ -3600,6 +3631,9 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
+ 		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ 		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ 	}
++	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
++		return -EINVAL;
++
+ 	ath12k_dp_rx_h_ppdu(ar, desc, status);
+ 
+ 	ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
+@@ -3644,7 +3678,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
+ 	return drop;
+ }
+ 
+-static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
++static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+ 					struct ieee80211_rx_status *status)
+ {
+ 	struct ath12k_base *ab = ar->ab;
+@@ -3662,6 +3696,9 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+ 	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ 	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ 
++	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
++		return true;
++
+ 	ath12k_dp_rx_h_ppdu(ar, desc, status);
+ 
+ 	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+@@ -3669,6 +3706,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+ 
+ 	ath12k_dp_rx_h_undecap(ar, msdu, desc,
+ 			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
++	return false;
+ }
+ 
+ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
+@@ -3687,7 +3725,7 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
+ 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ 		err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+ 		if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
+-			ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
++			drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ 			break;
+ 		}
+ 		fallthrough;
+diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
+index bd269aa1740bcd..2ff866e1d7d5bb 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.c
++++ b/drivers/net/wireless/ath/ath12k/pci.c
+@@ -1541,6 +1541,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
+ 	ath12k_core_deinit(ab);
+ 
+ qmi_fail:
++	ath12k_fw_unmap(ab);
+ 	ath12k_mhi_unregister(ab_pci);
+ 
+ 	ath12k_pci_free_irq(ab);
+diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
+index 0bc66cc19acd1e..443517d06c9fa9 100644
+--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
+@@ -95,6 +95,10 @@ int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int l
+ 
+ #ifdef CONFIG_NL80211_TESTMODE
+ 	dev->test_mtd.name = devm_kstrdup(dev->dev, part, GFP_KERNEL);
++	if (!dev->test_mtd.name) {
++		ret = -ENOMEM;
++		goto out_put_node;
++	}
+ 	dev->test_mtd.offset = offset;
+ #endif
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 0b75a45ad2e821..e2e9b5ece74e21 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -755,6 +755,7 @@ struct mt76_testmode_data {
+ 
+ struct mt76_vif {
+ 	u8 idx;
++	u8 link_idx;
+ 	u8 omac_idx;
+ 	u8 band_idx;
+ 	u8 wmm_idx;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 7d07e720e4ec1d..452579ccc49228 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -1164,7 +1164,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ 			.tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ 			.len = cpu_to_le16(sizeof(struct req_tlv)),
+ 			.active = enable,
+-			.link_idx = mvif->idx,
++			.link_idx = mvif->link_idx,
+ 		},
+ 	};
+ 	struct {
+@@ -1187,7 +1187,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ 			.bmc_tx_wlan_idx = cpu_to_le16(wcid->idx),
+ 			.sta_idx = cpu_to_le16(wcid->idx),
+ 			.conn_state = 1,
+-			.link_idx = mvif->idx,
++			.link_idx = mvif->link_idx,
+ 		},
+ 	};
+ 	int err, idx, cmd, len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+index e832ad53e2393b..a4f4d12f904e7c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+@@ -22,6 +22,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
+ 	{ USB_DEVICE(0x0846, 0x9053) },	/* Netgear A6210 */
+ 	{ USB_DEVICE(0x045e, 0x02e6) },	/* XBox One Wireless Adapter */
+ 	{ USB_DEVICE(0x045e, 0x02fe) },	/* XBox One Wireless Adapter */
++	{ USB_DEVICE(0x2357, 0x0137) },	/* TP-Link TL-WDN6200 */
+ 	{ },
+ };
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+index ddc67423efe2cb..d2a98c92e1147d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+@@ -256,7 +256,7 @@ int mt7925_init_mlo_caps(struct mt792x_phy *phy)
+ 
+ 	ext_capab[0].eml_capabilities = phy->eml_cap;
+ 	ext_capab[0].mld_capa_and_ops =
+-		u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS);
++		u16_encode_bits(0, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS);
+ 
+ 	wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ 	wiphy->iftype_ext_capab = ext_capab;
+@@ -356,10 +356,15 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ 	struct mt76_txq *mtxq;
+ 	int idx, ret = 0;
+ 
+-	mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+-	if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) {
+-		ret = -ENOSPC;
+-		goto out;
++	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++		mconf->mt76.idx = MT792x_MAX_INTERFACES;
++	} else {
++		mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask);
++
++		if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) {
++			ret = -ENOSPC;
++			goto out;
++		}
+ 	}
+ 
+ 	mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
+@@ -367,6 +372,7 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ 	mconf->mt76.band_idx = 0xff;
+ 	mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ?
+ 			      0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
++	mconf->mt76.link_idx = hweight16(mvif->valid_links);
+ 
+ 	if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
+ 		mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index c7eba60897d276..8476f9caa98dbf 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -3119,13 +3119,14 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
+ 		.env = env_cap,
+ 	};
+ 	int ret, valid_cnt = 0;
+-	u8 i, *pos;
++	u8 *pos, *last_pos;
+ 
+ 	if (!clc)
+ 		return 0;
+ 
+ 	pos = clc->data + sizeof(*seg) * clc->nr_seg;
+-	for (i = 0; i < clc->nr_country; i++) {
++	last_pos = clc->data + le32_to_cpu(*(__le32 *)(clc->data + 4));
++	while (pos < last_pos) {
+ 		struct mt7925_clc_rule *rule = (struct mt7925_clc_rule *)pos;
+ 
+ 		pos += sizeof(*rule);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+index fe6a613ba00889..887427e0760aed 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+@@ -566,8 +566,8 @@ struct mt7925_wow_pattern_tlv {
+ 	u8 offset;
+ 	u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN];
+ 	u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN];
+-	u8 rsv[7];
+-} __packed;
++	u8 rsv[4];
++};
+ 
+ struct roc_acquire_tlv {
+ 	__le16 tag;
+diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
+index a22ea4a4b202bd..4f775c3e218f45 100644
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -1353,7 +1353,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
+ 	qp_count = ilog2(qp_bitmap);
+ 	if (nt->use_msi) {
+ 		qp_count -= 1;
+-		nt->msi_db_mask = 1 << qp_count;
++		nt->msi_db_mask = BIT_ULL(qp_count);
+ 		ntb_db_clear_mask(ndev, nt->msi_db_mask);
+ 	}
+ 
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index e1abb27927ff74..da195d61a9664c 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -478,7 +478,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ 	if (targetport) {
+ 		tport = targetport->private;
+ 		spin_lock(&tport->lock);
+-		list_add_tail(&tport->ls_list, &tls_req->ls_list);
++		list_add_tail(&tls_req->ls_list, &tport->ls_list);
+ 		spin_unlock(&tport->lock);
+ 		queue_work(nvmet_wq, &tport->ls_work);
+ 	}
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 1fb329c0a55b8c..5fbfc4d4e06e49 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -16,6 +16,7 @@
+ 
+ #define pr_fmt(fmt)	"OF: " fmt
+ 
++#include <linux/cleanup.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
+ #include <linux/list.h>
+@@ -38,11 +39,15 @@
+ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
+ {
+ 	struct of_phandle_args oirq;
++	unsigned int ret;
+ 
+ 	if (of_irq_parse_one(dev, index, &oirq))
+ 		return 0;
+ 
+-	return irq_create_of_mapping(&oirq);
++	ret = irq_create_of_mapping(&oirq);
++	of_node_put(oirq.np);
++
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
+ 
+@@ -165,6 +170,8 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph
+  * the specifier for each map, and then returns the translated map.
+  *
+  * Return: 0 on success and a negative number on error
++ *
++ * Note: refcount of node @out_irq->np is increased by 1 on success.
+  */
+ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ {
+@@ -310,6 +317,12 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 		addrsize = (imap - match_array) - intsize;
+ 
+ 		if (ipar == newpar) {
++			/*
++			 * We got @ipar's refcount, but the refcount was
++			 * gotten again by of_irq_parse_imap_parent() via its
++			 * alias @newpar.
++			 */
++			of_node_put(ipar);
+ 			pr_debug("%pOF interrupt-map entry to self\n", ipar);
+ 			return 0;
+ 		}
+@@ -339,10 +352,12 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
+  * This function resolves an interrupt for a node by walking the interrupt tree,
+  * finding which interrupt controller node it is attached to, and returning the
+  * interrupt specifier that can be used to retrieve a Linux IRQ number.
++ *
++ * Note: refcount of node @out_irq->np is increased by 1 on success.
+  */
+ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq)
+ {
+-	struct device_node *p;
++	struct device_node __free(device_node) *p = NULL;
+ 	const __be32 *addr;
+ 	u32 intsize;
+ 	int i, res, addr_len;
+@@ -367,41 +382,33 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 	/* Try the new-style interrupts-extended first */
+ 	res = of_parse_phandle_with_args(device, "interrupts-extended",
+ 					"#interrupt-cells", index, out_irq);
+-	if (!res)
+-		return of_irq_parse_raw(addr_buf, out_irq);
+-
+-	/* Look for the interrupt parent. */
+-	p = of_irq_find_parent(device);
+-	if (p == NULL)
+-		return -EINVAL;
++	if (!res) {
++		p = out_irq->np;
++	} else {
++		/* Look for the interrupt parent. */
++		p = of_irq_find_parent(device);
++		/* Get size of interrupt specifier */
++		if (!p || of_property_read_u32(p, "#interrupt-cells", &intsize))
++			return -EINVAL;
++
++		pr_debug(" parent=%pOF, intsize=%d\n", p, intsize);
++
++		/* Copy intspec into irq structure */
++		out_irq->np = p;
++		out_irq->args_count = intsize;
++		for (i = 0; i < intsize; i++) {
++			res = of_property_read_u32_index(device, "interrupts",
++							(index * intsize) + i,
++							out_irq->args + i);
++			if (res)
++				return res;
++		}
+ 
+-	/* Get size of interrupt specifier */
+-	if (of_property_read_u32(p, "#interrupt-cells", &intsize)) {
+-		res = -EINVAL;
+-		goto out;
++		pr_debug(" intspec=%d\n", *out_irq->args);
+ 	}
+ 
+-	pr_debug(" parent=%pOF, intsize=%d\n", p, intsize);
+-
+-	/* Copy intspec into irq structure */
+-	out_irq->np = p;
+-	out_irq->args_count = intsize;
+-	for (i = 0; i < intsize; i++) {
+-		res = of_property_read_u32_index(device, "interrupts",
+-						 (index * intsize) + i,
+-						 out_irq->args + i);
+-		if (res)
+-			goto out;
+-	}
+-
+-	pr_debug(" intspec=%d\n", *out_irq->args);
+-
+-
+ 	/* Check if there are any interrupt-map translations to process */
+-	res = of_irq_parse_raw(addr_buf, out_irq);
+- out:
+-	of_node_put(p);
+-	return res;
++	return of_irq_parse_raw(addr_buf, out_irq);
+ }
+ EXPORT_SYMBOL_GPL(of_irq_parse_one);
+ 
+@@ -505,8 +512,10 @@ int of_irq_count(struct device_node *dev)
+ 	struct of_phandle_args irq;
+ 	int nr = 0;
+ 
+-	while (of_irq_parse_one(dev, nr, &irq) == 0)
++	while (of_irq_parse_one(dev, nr, &irq) == 0) {
++		of_node_put(irq.np);
+ 		nr++;
++	}
+ 
+ 	return nr;
+ }
+@@ -623,6 +632,8 @@ void __init of_irq_init(const struct of_device_id *matches)
+ 				       __func__, desc->dev, desc->dev,
+ 				       desc->interrupt_parent);
+ 				of_node_clear_flag(desc->dev, OF_POPULATED);
++				of_node_put(desc->interrupt_parent);
++				of_node_put(desc->dev);
+ 				kfree(desc);
+ 				continue;
+ 			}
+@@ -653,6 +664,7 @@ void __init of_irq_init(const struct of_device_id *matches)
+ err:
+ 	list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
+ 		list_del(&desc->list);
++		of_node_put(desc->interrupt_parent);
+ 		of_node_put(desc->dev);
+ 		kfree(desc);
+ 	}
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index e091c3e55b5c6f..bae829ac759e12 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -355,6 +355,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
+ static const struct j721e_pcie_data j7200_pcie_ep_data = {
+ 	.mode = PCI_MODE_EP,
+ 	.quirk_detect_quiet_flag = true,
++	.linkdown_irq_regfield = J7200_LINK_DOWN,
+ 	.quirk_disable_flr = true,
+ 	.max_lanes = 2,
+ };
+@@ -376,13 +377,13 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = {
+ 	.mode = PCI_MODE_RC,
+ 	.quirk_retrain_flag = true,
+ 	.byte_access_allowed = false,
+-	.linkdown_irq_regfield = LINK_DOWN,
++	.linkdown_irq_regfield = J7200_LINK_DOWN,
+ 	.max_lanes = 4,
+ };
+ 
+ static const struct j721e_pcie_data j784s4_pcie_ep_data = {
+ 	.mode = PCI_MODE_EP,
+-	.linkdown_irq_regfield = LINK_DOWN,
++	.linkdown_irq_regfield = J7200_LINK_DOWN,
+ 	.max_lanes = 4,
+ };
+ 
+diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
+index 582fa110708781..792d24cea5747b 100644
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -1786,7 +1786,7 @@ static struct pci_ops brcm7425_pcie_ops = {
+ 
+ static int brcm_pcie_probe(struct platform_device *pdev)
+ {
+-	struct device_node *np = pdev->dev.of_node, *msi_np;
++	struct device_node *np = pdev->dev.of_node;
+ 	struct pci_host_bridge *bridge;
+ 	const struct pcie_cfg_data *data;
+ 	struct brcm_pcie *pcie;
+@@ -1890,9 +1890,14 @@ static int brcm_pcie_probe(struct platform_device *pdev)
+ 		goto fail;
+ 	}
+ 
+-	msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+-	if (pci_msi_enabled() && msi_np == pcie->np) {
+-		ret = brcm_pcie_enable_msi(pcie);
++	if (pci_msi_enabled()) {
++		struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
++
++		if (msi_np == pcie->np)
++			ret = brcm_pcie_enable_msi(pcie);
++
++		of_node_put(msi_np);
++
+ 		if (ret) {
+ 			dev_err(pcie->dev, "probe of internal MSI failed");
+ 			goto fail;
+diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
+index cbec711148253a..481dcc476c556b 100644
+--- a/drivers/pci/controller/pcie-rockchip-host.c
++++ b/drivers/pci/controller/pcie-rockchip-host.c
+@@ -367,7 +367,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+ 		}
+ 	}
+ 
+-	rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
++	rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP,
+ 			    PCIE_CORE_CONFIG_VENDOR);
+ 	rockchip_pcie_write(rockchip,
+ 			    PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
+diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
+index 15ee949f2485e3..688f51d9bde631 100644
+--- a/drivers/pci/controller/pcie-rockchip.h
++++ b/drivers/pci/controller/pcie-rockchip.h
+@@ -188,7 +188,6 @@
+ #define AXI_WRAPPER_NOR_MSG			0xc
+ 
+ #define PCIE_RC_SEND_PME_OFF			0x11960
+-#define ROCKCHIP_VENDOR_ID			0x1d87
+ #define PCIE_LINK_IS_L2(x) \
+ 	(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+ #define PCIE_LINK_UP(x) \
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index 9d9596947350f5..94ceec50a2b94c 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -125,7 +125,7 @@ struct vmd_irq_list {
+ struct vmd_dev {
+ 	struct pci_dev		*dev;
+ 
+-	spinlock_t		cfg_lock;
++	raw_spinlock_t		cfg_lock;
+ 	void __iomem		*cfgbar;
+ 
+ 	int msix_count;
+@@ -391,7 +391,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
+ 	if (!addr)
+ 		return -EFAULT;
+ 
+-	spin_lock_irqsave(&vmd->cfg_lock, flags);
++	raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
+ 	switch (len) {
+ 	case 1:
+ 		*value = readb(addr);
+@@ -406,7 +406,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
+ 		ret = -EINVAL;
+ 		break;
+ 	}
+-	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
++	raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ 	return ret;
+ }
+ 
+@@ -426,7 +426,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
+ 	if (!addr)
+ 		return -EFAULT;
+ 
+-	spin_lock_irqsave(&vmd->cfg_lock, flags);
++	raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
+ 	switch (len) {
+ 	case 1:
+ 		writeb(value, addr);
+@@ -444,7 +444,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
+ 		ret = -EINVAL;
+ 		break;
+ 	}
+-	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
++	raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ 	return ret;
+ }
+ 
+@@ -1009,7 +1009,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
+ 		vmd->first_vec = 1;
+ 
+-	spin_lock_init(&vmd->cfg_lock);
++	raw_spin_lock_init(&vmd->cfg_lock);
+ 	pci_set_drvdata(dev, vmd);
+ 	err = vmd_enable_domain(vmd, features);
+ 	if (err)
+diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
+index 643f85849ef64b..3f2691888c35d3 100644
+--- a/drivers/pci/devres.c
++++ b/drivers/pci/devres.c
+@@ -40,7 +40,7 @@
+  * Legacy struct storing addresses to whole mapped BARs.
+  */
+ struct pcim_iomap_devres {
+-	void __iomem *table[PCI_STD_NUM_BARS];
++	void __iomem *table[PCI_NUM_RESOURCES];
+ };
+ 
+ /* Used to restore the old INTx state on driver detach. */
+@@ -577,7 +577,7 @@ static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
+ {
+ 	void __iomem **legacy_iomap_table;
+ 
+-	if (bar >= PCI_STD_NUM_BARS)
++	if (!pci_bar_index_is_valid(bar))
+ 		return -EINVAL;
+ 
+ 	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+@@ -622,7 +622,7 @@ static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
+ {
+ 	void __iomem **legacy_iomap_table;
+ 
+-	if (bar >= PCI_STD_NUM_BARS)
++	if (!pci_bar_index_is_valid(bar))
+ 		return;
+ 
+ 	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+@@ -655,6 +655,9 @@ void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+ 	void __iomem *mapping;
+ 	struct pcim_addr_devres *res;
+ 
++	if (!pci_bar_index_is_valid(bar))
++		return NULL;
++
+ 	res = pcim_addr_devres_alloc(pdev);
+ 	if (!res)
+ 		return NULL;
+@@ -722,6 +725,9 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ 	int ret;
+ 	struct pcim_addr_devres *res;
+ 
++	if (!pci_bar_index_is_valid(bar))
++		return IOMEM_ERR_PTR(-EINVAL);
++
+ 	res = pcim_addr_devres_alloc(pdev);
+ 	if (!res)
+ 		return IOMEM_ERR_PTR(-ENOMEM);
+@@ -822,6 +828,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
+ 	int ret;
+ 	struct pcim_addr_devres *res;
+ 
++	if (!pci_bar_index_is_valid(bar))
++		return -EINVAL;
++
+ 	res = pcim_addr_devres_alloc(pdev);
+ 	if (!res)
+ 		return -ENOMEM;
+@@ -1043,6 +1052,9 @@ void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
+ 	void __iomem *mapping;
+ 	struct pcim_addr_devres *res;
+ 
++	if (!pci_bar_index_is_valid(bar))
++		return IOMEM_ERR_PTR(-EINVAL);
++
+ 	res = pcim_addr_devres_alloc(pdev);
+ 	if (!res)
+ 		return IOMEM_ERR_PTR(-ENOMEM);
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index ff458e692fedb3..997841c6989359 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -286,9 +286,12 @@ static int pciehp_suspend(struct pcie_device *dev)
+ 
+ static bool pciehp_device_replaced(struct controller *ctrl)
+ {
+-	struct pci_dev *pdev __free(pci_dev_put);
++	struct pci_dev *pdev __free(pci_dev_put) = NULL;
+ 	u32 reg;
+ 
++	if (pci_dev_is_disconnected(ctrl->pcie->port))
++		return false;
++
+ 	pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
+ 	if (!pdev)
+ 		return true;
+diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
+index 9fb7cacc15cdef..fe706ed946dfd2 100644
+--- a/drivers/pci/iomap.c
++++ b/drivers/pci/iomap.c
+@@ -9,6 +9,8 @@
+ 
+ #include <linux/export.h>
+ 
++#include "pci.h" /* for pci_bar_index_is_valid() */
++
+ /**
+  * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
+  * @dev: PCI device that owns the BAR
+@@ -33,12 +35,19 @@ void __iomem *pci_iomap_range(struct pci_dev *dev,
+ 			      unsigned long offset,
+ 			      unsigned long maxlen)
+ {
+-	resource_size_t start = pci_resource_start(dev, bar);
+-	resource_size_t len = pci_resource_len(dev, bar);
+-	unsigned long flags = pci_resource_flags(dev, bar);
++	resource_size_t start, len;
++	unsigned long flags;
++
++	if (!pci_bar_index_is_valid(bar))
++		return NULL;
++
++	start = pci_resource_start(dev, bar);
++	len = pci_resource_len(dev, bar);
++	flags = pci_resource_flags(dev, bar);
+ 
+ 	if (len <= offset || !start)
+ 		return NULL;
++
+ 	len -= offset;
+ 	start += offset;
+ 	if (maxlen && len > maxlen)
+@@ -77,16 +86,20 @@ void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+ 				 unsigned long offset,
+ 				 unsigned long maxlen)
+ {
+-	resource_size_t start = pci_resource_start(dev, bar);
+-	resource_size_t len = pci_resource_len(dev, bar);
+-	unsigned long flags = pci_resource_flags(dev, bar);
++	resource_size_t start, len;
++	unsigned long flags;
+ 
+-
+-	if (flags & IORESOURCE_IO)
++	if (!pci_bar_index_is_valid(bar))
+ 		return NULL;
+ 
++	start = pci_resource_start(dev, bar);
++	len = pci_resource_len(dev, bar);
++	flags = pci_resource_flags(dev, bar);
++
+ 	if (len <= offset || !start)
+ 		return NULL;
++	if (flags & IORESOURCE_IO)
++		return NULL;
+ 
+ 	len -= offset;
+ 	start += offset;
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 169aa8fd74a11f..be61fa93d39712 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3922,6 +3922,9 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
+  */
+ void pci_release_region(struct pci_dev *pdev, int bar)
+ {
++	if (!pci_bar_index_is_valid(bar))
++		return;
++
+ 	/*
+ 	 * This is done for backwards compatibility, because the old PCI devres
+ 	 * API had a mode in which the function became managed if it had been
+@@ -3967,6 +3970,9 @@ EXPORT_SYMBOL(pci_release_region);
+ static int __pci_request_region(struct pci_dev *pdev, int bar,
+ 				const char *res_name, int exclusive)
+ {
++	if (!pci_bar_index_is_valid(bar))
++		return -EINVAL;
++
+ 	if (pci_is_managed(pdev)) {
+ 		if (exclusive == IORESOURCE_EXCLUSIVE)
+ 			return pcim_request_region_exclusive(pdev, bar, res_name);
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 1cdc2c9547a7e1..65df6d2ac0032e 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -165,6 +165,22 @@ static inline void pci_wakeup_event(struct pci_dev *dev)
+ 	pm_wakeup_event(&dev->dev, 100);
+ }
+ 
++/**
++ * pci_bar_index_is_valid - Check whether a BAR index is within valid range
++ * @bar: BAR index
++ *
++ * Protects against overflowing &struct pci_dev.resource array.
++ *
++ * Return: true for valid index, false otherwise.
++ */
++static inline bool pci_bar_index_is_valid(int bar)
++{
++	if (bar >= 0 && bar < PCI_NUM_RESOURCES)
++		return true;
++
++	return false;
++}
++
+ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
+ {
+ 	return !!(pci_dev->subordinate);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 0e757b23a09f0f..cf7c7886b64203 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -908,6 +908,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ 	resource_size_t offset, next_offset;
+ 	LIST_HEAD(resources);
+ 	struct resource *res, *next_res;
++	bool bus_registered = false;
+ 	char addr[64], *fmt;
+ 	const char *name;
+ 	int err;
+@@ -971,6 +972,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ 	name = dev_name(&bus->dev);
+ 
+ 	err = device_register(&bus->dev);
++	bus_registered = true;
+ 	if (err)
+ 		goto unregister;
+ 
+@@ -1057,12 +1059,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ unregister:
+ 	put_device(&bridge->dev);
+ 	device_del(&bridge->dev);
+-
+ free:
+ #ifdef CONFIG_PCI_DOMAINS_GENERIC
+ 	pci_bus_release_domain_nr(parent, bus->domain_nr);
+ #endif
+-	kfree(bus);
++	if (bus_registered)
++		put_device(&bus->dev);
++	else
++		kfree(bus);
++
+ 	return err;
+ }
+ 
+@@ -1171,7 +1176,10 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
+ add_dev:
+ 	pci_set_bus_msi_domain(child);
+ 	ret = device_register(&child->dev);
+-	WARN_ON(ret < 0);
++	if (WARN_ON(ret < 0)) {
++		put_device(&child->dev);
++		return NULL;
++	}
+ 
+ 	pcibios_add_bus(child);
+ 
+@@ -1327,8 +1335,6 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
+ 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
+ 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
+ 
+-	pci_enable_rrs_sv(dev);
+-
+ 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
+ 	    !is_cardbus && !broken) {
+ 		unsigned int cmax, buses;
+@@ -1569,6 +1575,11 @@ void set_pcie_port_type(struct pci_dev *pdev)
+ 	pdev->pcie_cap = pos;
+ 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ 	pdev->pcie_flags_reg = reg16;
++
++	type = pci_pcie_type(pdev);
++	if (type == PCI_EXP_TYPE_ROOT_PORT)
++		pci_enable_rrs_sv(pdev);
++
+ 	pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
+ 	pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
+ 
+@@ -1585,7 +1596,6 @@ void set_pcie_port_type(struct pci_dev *pdev)
+ 	 * correctly so detect impossible configurations here and correct
+ 	 * the port type accordingly.
+ 	 */
+-	type = pci_pcie_type(pdev);
+ 	if (type == PCI_EXP_TYPE_DOWNSTREAM) {
+ 		/*
+ 		 * If pdev claims to be downstream port but the parent
+diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
+index 398cce3d76fc44..2f33e69a8caf20 100644
+--- a/drivers/perf/arm_pmu.c
++++ b/drivers/perf/arm_pmu.c
+@@ -342,12 +342,10 @@ armpmu_add(struct perf_event *event, int flags)
+ 	if (idx < 0)
+ 		return idx;
+ 
+-	/*
+-	 * If there is an event in the counter we are going to use then make
+-	 * sure it is disabled.
+-	 */
++	/* The newly-allocated counter should be empty */
++	WARN_ON_ONCE(hw_events->events[idx]);
++
+ 	event->hw.idx = idx;
+-	armpmu->disable(event);
+ 	hw_events->events[idx] = event;
+ 
+ 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
+index 4ca50f9b6dfed8..7dbda36884c8d3 100644
+--- a/drivers/perf/dwc_pcie_pmu.c
++++ b/drivers/perf/dwc_pcie_pmu.c
+@@ -567,8 +567,10 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev)
+ 		return PTR_ERR(plat_dev);
+ 
+ 	dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+-	if (!dev_info)
++	if (!dev_info) {
++		platform_device_unregister(plat_dev);
+ 		return -ENOMEM;
++	}
+ 
+ 	/* Cache platform device to handle pci device hotplug */
+ 	dev_info->plat_dev = plat_dev;
+@@ -724,6 +726,15 @@ static struct platform_driver dwc_pcie_pmu_driver = {
+ 	.driver = {.name = "dwc_pcie_pmu",},
+ };
+ 
++static void dwc_pcie_cleanup_devices(void)
++{
++	struct dwc_pcie_dev_info *dev_info, *tmp;
++
++	list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) {
++		dwc_pcie_unregister_dev(dev_info);
++	}
++}
++
+ static int __init dwc_pcie_pmu_init(void)
+ {
+ 	struct pci_dev *pdev = NULL;
+@@ -736,7 +747,7 @@ static int __init dwc_pcie_pmu_init(void)
+ 		ret = dwc_pcie_register_dev(pdev);
+ 		if (ret) {
+ 			pci_dev_put(pdev);
+-			return ret;
++			goto err_cleanup;
+ 		}
+ 	}
+ 
+@@ -745,35 +756,35 @@ static int __init dwc_pcie_pmu_init(void)
+ 				      dwc_pcie_pmu_online_cpu,
+ 				      dwc_pcie_pmu_offline_cpu);
+ 	if (ret < 0)
+-		return ret;
++		goto err_cleanup;
+ 
+ 	dwc_pcie_pmu_hp_state = ret;
+ 
+ 	ret = platform_driver_register(&dwc_pcie_pmu_driver);
+ 	if (ret)
+-		goto platform_driver_register_err;
++		goto err_remove_cpuhp;
+ 
+ 	ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
+ 	if (ret)
+-		goto platform_driver_register_err;
++		goto err_unregister_driver;
+ 	notify = true;
+ 
+ 	return 0;
+ 
+-platform_driver_register_err:
++err_unregister_driver:
++	platform_driver_unregister(&dwc_pcie_pmu_driver);
++err_remove_cpuhp:
+ 	cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
+-
++err_cleanup:
++	dwc_pcie_cleanup_devices();
+ 	return ret;
+ }
+ 
+ static void __exit dwc_pcie_pmu_exit(void)
+ {
+-	struct dwc_pcie_dev_info *dev_info, *tmp;
+-
+ 	if (notify)
+ 		bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
+-	list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
+-		dwc_pcie_unregister_dev(dev_info);
++	dwc_pcie_cleanup_devices();
+ 	platform_driver_unregister(&dwc_pcie_pmu_driver);
+ 	cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
+ }
+diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+index e98361dcdeadfe..afd52392cd5301 100644
+--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
++++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+@@ -162,6 +162,16 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
+ 	return ret;
+ }
+ 
++static int imx8_pcie_phy_power_off(struct phy *phy)
++{
++	struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
++
++	reset_control_assert(imx8_phy->reset);
++	reset_control_assert(imx8_phy->perst);
++
++	return 0;
++}
++
+ static int imx8_pcie_phy_init(struct phy *phy)
+ {
+ 	struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
+@@ -182,6 +192,7 @@ static const struct phy_ops imx8_pcie_phy_ops = {
+ 	.init		= imx8_pcie_phy_init,
+ 	.exit		= imx8_pcie_phy_exit,
+ 	.power_on	= imx8_pcie_phy_power_on,
++	.power_off	= imx8_pcie_phy_power_off,
+ 	.owner		= THIS_MODULE,
+ };
+ 
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index aeaf0d1958f56a..a6bdff7a0bb254 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -1044,8 +1044,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 	const struct msm_pingroup *g;
+ 	u32 intr_target_mask = GENMASK(2, 0);
+ 	unsigned long flags;
+-	bool was_enabled;
+-	u32 val;
++	u32 val, oldval;
+ 
+ 	if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
+ 		set_bit(d->hwirq, pctrl->dual_edge_irqs);
+@@ -1107,8 +1106,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 	 * internal circuitry of TLMM, toggling the RAW_STATUS
+ 	 * could cause the INTR_STATUS to be set for EDGE interrupts.
+ 	 */
+-	val = msm_readl_intr_cfg(pctrl, g);
+-	was_enabled = val & BIT(g->intr_raw_status_bit);
++	val = oldval = msm_readl_intr_cfg(pctrl, g);
+ 	val |= BIT(g->intr_raw_status_bit);
+ 	if (g->intr_detection_width == 2) {
+ 		val &= ~(3 << g->intr_detection_bit);
+@@ -1161,9 +1159,11 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 	/*
+ 	 * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+ 	 * Clear the interrupt.  This is safe because we have
+-	 * IRQCHIP_SET_TYPE_MASKED.
++	 * IRQCHIP_SET_TYPE_MASKED. When changing the interrupt type, we could
++	 * also still have a non-matching interrupt latched, so clear whenever
++	 * making changes to the interrupt configuration.
+ 	 */
+-	if (!was_enabled)
++	if (val != oldval)
+ 		msm_ack_intr_status(pctrl, g);
+ 
+ 	if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+index 5480e0884abecf..23b4bc1e5da81c 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+@@ -939,83 +939,83 @@ const struct samsung_pinctrl_of_match_data fsd_of_data __initconst = {
+ 
+ /* pin banks of gs101 pin-controller (ALIVE) */
+ static const struct samsung_pin_bank_data gs101_pin_alive[] = {
+-	EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00),
+-	EXYNOS850_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04),
+-	EXYNOS850_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08),
+-	EXYNOS850_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c),
+-	EXYNOS850_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10),
+-	EXYNOS850_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14),
+-	EXYNOS850_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18),
+-	EXYNOS850_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c),
++	GS101_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00, 0x00),
++	GS101_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04, 0x08),
++	GS101_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08, 0x10),
++	GS101_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c, 0x18),
++	GS101_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10, 0x1c),
++	GS101_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14, 0x20),
++	GS101_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18, 0x28),
++	GS101_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c, 0x30),
+ };
+ 
+ /* pin banks of gs101 pin-controller (FAR_ALIVE) */
+ static const struct samsung_pin_bank_data gs101_pin_far_alive[] = {
+-	EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00),
+-	EXYNOS850_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04),
+-	EXYNOS850_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08),
+-	EXYNOS850_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c),
++	GS101_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00, 0x00),
++	GS101_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04, 0x08),
++	GS101_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08, 0x0c),
++	GS101_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c, 0x14),
+ };
+ 
+ /* pin banks of gs101 pin-controller (GSACORE) */
+ static const struct samsung_pin_bank_data gs101_pin_gsacore[] = {
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00),
+-	EXYNOS850_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04),
+-	EXYNOS850_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08),
++	GS101_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00, 0x00),
++	GS101_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04, 0x04),
++	GS101_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08, 0x0c),
+ };
+ 
+ /* pin banks of gs101 pin-controller (GSACTRL) */
+ static const struct samsung_pin_bank_data gs101_pin_gsactrl[] = {
+-	EXYNOS850_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00),
++	GS101_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00, 0x00),
+ };
+ 
+ /* pin banks of gs101 pin-controller (PERIC0) */
+ static const struct samsung_pin_bank_data gs101_pin_peric0[] = {
+-	EXYNOS850_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c),
++	GS101_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00, 0x00),
++	GS101_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04, 0x08),
++	GS101_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08, 0x0c),
++	GS101_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c, 0x10),
++	GS101_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10, 0x14),
++	GS101_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14, 0x18),
++	GS101_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18, 0x1c),
++	GS101_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c, 0x20),
++	GS101_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20, 0x24),
++	GS101_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24, 0x28),
++	GS101_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28, 0x2c),
++	GS101_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c, 0x30),
++	GS101_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30, 0x34),
++	GS101_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34, 0x38),
++	GS101_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38, 0x3c),
++	GS101_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c, 0x40),
++	GS101_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40, 0x44),
++	GS101_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44, 0x48),
++	GS101_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48, 0x4c),
++	GS101_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c, 0x50),
+ };
+ 
+ /* pin banks of gs101 pin-controller (PERIC1) */
+ static const struct samsung_pin_bank_data gs101_pin_peric1[] = {
+-	EXYNOS850_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08),
+-	EXYNOS850_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14),
+-	EXYNOS850_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18),
+-	EXYNOS850_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c),
++	GS101_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00, 0x00),
++	GS101_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04, 0x08),
++	GS101_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08, 0x0c),
++	GS101_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c, 0x10),
++	GS101_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10, 0x18),
++	GS101_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14, 0x1c),
++	GS101_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18, 0x20),
++	GS101_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c, 0x28),
+ };
+ 
+ /* pin banks of gs101 pin-controller (HSI1) */
+ static const struct samsung_pin_bank_data gs101_pin_hsi1[] = {
+-	EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00),
+-	EXYNOS850_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04),
++	GS101_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00, 0x00),
++	GS101_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04, 0x08),
+ };
+ 
+ /* pin banks of gs101 pin-controller (HSI2) */
+ static const struct samsung_pin_bank_data gs101_pin_hsi2[] = {
+-	EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00),
+-	EXYNOS850_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04),
+-	EXYNOS850_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08),
++	GS101_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00, 0x00),
++	GS101_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04, 0x08),
++	GS101_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08, 0x0c),
+ };
+ 
+ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
+index 305cb1d31de491..97a43fa4dfc567 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
+@@ -165,6 +165,28 @@
+ 		.name			= id				\
+ 	}
+ 
++#define GS101_PIN_BANK_EINTG(pins, reg, id, offs, fltcon_offs) \
++	{							\
++		.type			= &exynos850_bank_type_off,	\
++		.pctl_offset		= reg,			\
++		.nr_pins		= pins,			\
++		.eint_type		= EINT_TYPE_GPIO,	\
++		.eint_offset		= offs,			\
++		.eint_fltcon_offset	= fltcon_offs,		\
++		.name			= id			\
++	}
++
++#define GS101_PIN_BANK_EINTW(pins, reg, id, offs, fltcon_offs) \
++	{								\
++		.type			= &exynos850_bank_type_alive,	\
++		.pctl_offset		= reg,				\
++		.nr_pins		= pins,				\
++		.eint_type		= EINT_TYPE_WKUP,		\
++		.eint_offset		= offs,				\
++		.eint_fltcon_offset	= fltcon_offs,			\
++		.name			= id				\
++	}
++
+ /**
+  * struct exynos_weint_data: irq specific data for all the wakeup interrupts
+  * generated by the external wakeup interrupt controller.
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
+index c142cd7920307f..63ac89a802d301 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
+@@ -1230,6 +1230,7 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
+ 		bank->eint_con_offset = bdata->eint_con_offset;
+ 		bank->eint_mask_offset = bdata->eint_mask_offset;
+ 		bank->eint_pend_offset = bdata->eint_pend_offset;
++		bank->eint_fltcon_offset = bdata->eint_fltcon_offset;
+ 		bank->name = bdata->name;
+ 
+ 		raw_spin_lock_init(&bank->slock);
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
+index a1e7377bd890b7..14c3b6b965851e 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
+@@ -144,6 +144,7 @@ struct samsung_pin_bank_type {
+  * @eint_con_offset: ExynosAuto SoC-specific EINT control register offset of bank.
+  * @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
+  * @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
++ * @eint_fltcon_offset: GS101 SoC-specific EINT filter config register offset.
+  * @name: name to be prefixed for each pin in this pin bank.
+  */
+ struct samsung_pin_bank_data {
+@@ -158,6 +159,7 @@ struct samsung_pin_bank_data {
+ 	u32		eint_con_offset;
+ 	u32		eint_mask_offset;
+ 	u32		eint_pend_offset;
++	u32		eint_fltcon_offset;
+ 	const char	*name;
+ };
+ 
+@@ -175,6 +177,7 @@ struct samsung_pin_bank_data {
+  * @eint_con_offset: ExynosAuto SoC-specific EINT register or interrupt offset of bank.
+  * @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
+  * @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
++ * @eint_fltcon_offset: GS101 SoC-specific EINT filter config register offset.
+  * @name: name to be prefixed for each pin in this pin bank.
+  * @id: id of the bank, propagated to the pin range.
+  * @pin_base: starting pin number of the bank.
+@@ -201,6 +204,7 @@ struct samsung_pin_bank {
+ 	u32		eint_con_offset;
+ 	u32		eint_mask_offset;
+ 	u32		eint_pend_offset;
++	u32		eint_fltcon_offset;
+ 	const char	*name;
+ 	u32		id;
+ 
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index 626e2635e3da70..ac198d1fd17073 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -30,6 +30,7 @@
+ 
+ #define DRV_NAME "cros_ec_lpcs"
+ #define ACPI_DRV_NAME "GOOG0004"
++#define FRMW_ACPI_DRV_NAME "FRMWC004"
+ 
+ /* True if ACPI device is present */
+ static bool cros_ec_lpc_acpi_device_found;
+@@ -460,7 +461,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
+ 	acpi_status status;
+ 	struct cros_ec_device *ec_dev;
+ 	struct cros_ec_lpc *ec_lpc;
+-	struct lpc_driver_data *driver_data;
++	const struct lpc_driver_data *driver_data;
+ 	u8 buf[2] = {};
+ 	int irq, ret;
+ 	u32 quirks;
+@@ -472,6 +473,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
+ 	ec_lpc->mmio_memory_base = EC_LPC_ADDR_MEMMAP;
+ 
+ 	driver_data = platform_get_drvdata(pdev);
++	if (!driver_data)
++		driver_data = acpi_device_get_match_data(dev);
++
+ 	if (driver_data) {
+ 		quirks = driver_data->quirks;
+ 
+@@ -625,12 +629,6 @@ static void cros_ec_lpc_remove(struct platform_device *pdev)
+ 	cros_ec_unregister(ec_dev);
+ }
+ 
+-static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
+-	{ ACPI_DRV_NAME, 0 },
+-	{ }
+-};
+-MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
+-
+ static const struct lpc_driver_data framework_laptop_npcx_lpc_driver_data __initconst = {
+ 	.quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY,
+ 	.quirk_mmio_memory_base = 0xE00,
+@@ -642,6 +640,13 @@ static const struct lpc_driver_data framework_laptop_mec_lpc_driver_data __initc
+ 	.quirk_aml_mutex_name = "ECMT",
+ };
+ 
++static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
++	{ ACPI_DRV_NAME, 0 },
++	{ FRMW_ACPI_DRV_NAME, (kernel_ulong_t)&framework_laptop_npcx_lpc_driver_data },
++	{ }
++};
++MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
++
+ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
+ 	{
+ 		/*
+@@ -795,7 +800,8 @@ static int __init cros_ec_lpc_init(void)
+ 	int ret;
+ 	const struct dmi_system_id *dmi_match;
+ 
+-	cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME);
++	cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME) ||
++		!!cros_ec_lpc_get_device(FRMW_ACPI_DRV_NAME);
+ 
+ 	dmi_match = dmi_first_match(cros_ec_lpc_dmi_table);
+ 
+diff --git a/drivers/platform/x86/x86-android-tablets/Kconfig b/drivers/platform/x86/x86-android-tablets/Kconfig
+index 88d9e8f2ff24ec..c98dfbdfb9dda3 100644
+--- a/drivers/platform/x86/x86-android-tablets/Kconfig
++++ b/drivers/platform/x86/x86-android-tablets/Kconfig
+@@ -8,6 +8,7 @@ config X86_ANDROID_TABLETS
+ 	depends on I2C && SPI && SERIAL_DEV_BUS && ACPI && EFI && GPIOLIB && PMIC_OPREGION
+ 	select NEW_LEDS
+ 	select LEDS_CLASS
++	select POWER_SUPPLY
+ 	help
+ 	  X86 tablets which ship with Android as (part of) the factory image
+ 	  typically have various problems with their DSDTs. The factory kernels
+diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
+index 2510c10ca47303..c45a5fca4cbbd2 100644
+--- a/drivers/pwm/pwm-fsl-ftm.c
++++ b/drivers/pwm/pwm-fsl-ftm.c
+@@ -118,6 +118,9 @@ static unsigned int fsl_pwm_ticks_to_ns(struct fsl_pwm_chip *fpc,
+ 	unsigned long long exval;
+ 
+ 	rate = clk_get_rate(fpc->clk[fpc->period.clk_select]);
++	if (rate >> fpc->period.clk_ps == 0)
++		return 0;
++
+ 	exval = ticks;
+ 	exval *= 1000000000UL;
+ 	do_div(exval, rate >> fpc->period.clk_ps);
+@@ -190,6 +193,9 @@ static unsigned int fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc,
+ 	unsigned int period = fpc->period.mod_period + 1;
+ 	unsigned int period_ns = fsl_pwm_ticks_to_ns(fpc, period);
+ 
++	if (!period_ns)
++		return 0;
++
+ 	duty = (unsigned long long)duty_ns * period;
+ 	do_div(duty, period_ns);
+ 
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
+index 01dfa0fab80a44..7eaab58314995c 100644
+--- a/drivers/pwm/pwm-mediatek.c
++++ b/drivers/pwm/pwm-mediatek.c
+@@ -121,21 +121,25 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
+ 	u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
+ 	    reg_thres = PWMTHRES;
++	unsigned long clk_rate;
+ 	u64 resolution;
+ 	int ret;
+ 
+ 	ret = pwm_mediatek_clk_enable(chip, pwm);
+-
+ 	if (ret < 0)
+ 		return ret;
+ 
++	clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]);
++	if (!clk_rate)
++		return -EINVAL;
++
+ 	/* Make sure we use the bus clock and not the 26MHz clock */
+ 	if (pc->soc->has_ck_26m_sel)
+ 		writel(0, pc->regs + PWM_CK_26M_SEL);
+ 
+ 	/* Using resolution in picosecond gets accuracy higher */
+ 	resolution = (u64)NSEC_PER_SEC * 1000;
+-	do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm]));
++	do_div(resolution, clk_rate);
+ 
+ 	cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
+ 	while (cnt_period > 8191) {
+diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
+index 2261789cc27dae..578dbdd2d5a721 100644
+--- a/drivers/pwm/pwm-rcar.c
++++ b/drivers/pwm/pwm-rcar.c
+@@ -8,6 +8,7 @@
+  * - The hardware cannot generate a 0% duty cycle.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+@@ -102,23 +103,24 @@ static void rcar_pwm_set_clock_control(struct rcar_pwm_chip *rp,
+ 	rcar_pwm_write(rp, value, RCAR_PWMCR);
+ }
+ 
+-static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
+-				int period_ns)
++static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, u64 duty_ns,
++				u64 period_ns)
+ {
+-	unsigned long long one_cycle, tmp;	/* 0.01 nanoseconds */
++	unsigned long long tmp;
+ 	unsigned long clk_rate = clk_get_rate(rp->clk);
+ 	u32 cyc, ph;
+ 
+-	one_cycle = NSEC_PER_SEC * 100ULL << div;
+-	do_div(one_cycle, clk_rate);
++	/* div <= 24 == RCAR_PWM_MAX_DIVISION, so the shift doesn't overflow. */
++	tmp = mul_u64_u64_div_u64(period_ns, clk_rate, (u64)NSEC_PER_SEC << div);
++	if (tmp > FIELD_MAX(RCAR_PWMCNT_CYC0_MASK))
++		tmp = FIELD_MAX(RCAR_PWMCNT_CYC0_MASK);
+ 
+-	tmp = period_ns * 100ULL;
+-	do_div(tmp, one_cycle);
+-	cyc = (tmp << RCAR_PWMCNT_CYC0_SHIFT) & RCAR_PWMCNT_CYC0_MASK;
++	cyc = FIELD_PREP(RCAR_PWMCNT_CYC0_MASK, tmp);
+ 
+-	tmp = duty_ns * 100ULL;
+-	do_div(tmp, one_cycle);
+-	ph = tmp & RCAR_PWMCNT_PH0_MASK;
++	tmp = mul_u64_u64_div_u64(duty_ns, clk_rate, (u64)NSEC_PER_SEC << div);
++	if (tmp > FIELD_MAX(RCAR_PWMCNT_PH0_MASK))
++		tmp = FIELD_MAX(RCAR_PWMCNT_PH0_MASK);
++	ph = FIELD_PREP(RCAR_PWMCNT_PH0_MASK, tmp);
+ 
+ 	/* Avoid prohibited setting */
+ 	if (cyc == 0 || ph == 0)
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index 21fa7ac849e5c3..4904b831c0a75f 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -302,11 +302,17 @@ static struct airq_info *new_airq_info(int index)
+ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ 					 u64 *first, void **airq_info)
+ {
+-	int i, j;
++	int i, j, queue_idx, highest_queue_idx = -1;
+ 	struct airq_info *info;
+ 	unsigned long *indicator_addr = NULL;
+ 	unsigned long bit, flags;
+ 
++	/* Array entries without an actual queue pointer must be ignored. */
++	for (i = 0; i < nvqs; i++) {
++		if (vqs[i])
++			highest_queue_idx++;
++	}
++
+ 	for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+ 		mutex_lock(&airq_areas_lock);
+ 		if (!airq_areas[i])
+@@ -316,7 +322,7 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ 		if (!info)
+ 			return NULL;
+ 		write_lock_irqsave(&info->lock, flags);
+-		bit = airq_iv_alloc(info->aiv, nvqs);
++		bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1);
+ 		if (bit == -1UL) {
+ 			/* Not enough vacancies. */
+ 			write_unlock_irqrestore(&info->lock, flags);
+@@ -325,8 +331,10 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ 		*first = bit;
+ 		*airq_info = info;
+ 		indicator_addr = info->aiv->vector;
+-		for (j = 0; j < nvqs; j++) {
+-			airq_iv_set_ptr(info->aiv, bit + j,
++		for (j = 0, queue_idx = 0; j < nvqs; j++) {
++			if (!vqs[j])
++				continue;
++			airq_iv_set_ptr(info->aiv, bit + queue_idx++,
+ 					(unsigned long)vqs[j]);
+ 		}
+ 		write_unlock_irqrestore(&info->lock, flags);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index 1e715fd65a7d4b..ee5a75a4b3bb80 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -81,13 +81,14 @@ extern atomic64_t event_counter;
+ 
+ /* Admin queue management definitions */
+ #define MPI3MR_ADMIN_REQ_Q_SIZE		(2 * MPI3MR_PAGE_SIZE_4K)
+-#define MPI3MR_ADMIN_REPLY_Q_SIZE	(4 * MPI3MR_PAGE_SIZE_4K)
++#define MPI3MR_ADMIN_REPLY_Q_SIZE	(8 * MPI3MR_PAGE_SIZE_4K)
+ #define MPI3MR_ADMIN_REQ_FRAME_SZ	128
+ #define MPI3MR_ADMIN_REPLY_FRAME_SZ	16
+ 
+ /* Operational queue management definitions */
+ #define MPI3MR_OP_REQ_Q_QD		512
+ #define MPI3MR_OP_REP_Q_QD		1024
++#define MPI3MR_OP_REP_Q_QD2K		2048
+ #define MPI3MR_OP_REP_Q_QD4K		4096
+ #define MPI3MR_OP_REQ_Q_SEG_SIZE	4096
+ #define MPI3MR_OP_REP_Q_SEG_SIZE	4096
+@@ -329,6 +330,7 @@ enum mpi3mr_reset_reason {
+ #define MPI3MR_RESET_REASON_OSTYPE_SHIFT	28
+ #define MPI3MR_RESET_REASON_IOCNUM_SHIFT	20
+ 
++
+ /* Queue type definitions */
+ enum queue_type {
+ 	MPI3MR_DEFAULT_QUEUE = 0,
+@@ -388,6 +390,7 @@ struct mpi3mr_ioc_facts {
+ 	u16 max_msix_vectors;
+ 	u8 personality;
+ 	u8 dma_mask;
++	bool max_req_limit;
+ 	u8 protocol_flags;
+ 	u8 sge_mod_mask;
+ 	u8 sge_mod_value;
+@@ -457,6 +460,8 @@ struct op_req_qinfo {
+  * @enable_irq_poll: Flag to indicate polling is enabled
+  * @in_use: Queue is handled by poll/ISR
+  * @qtype: Type of queue (types defined in enum queue_type)
++ * @qfull_watermark: Watermark defined in reply queue to avoid
++ *                    reply queue full
+  */
+ struct op_reply_qinfo {
+ 	u16 ci;
+@@ -472,6 +477,7 @@ struct op_reply_qinfo {
+ 	bool enable_irq_poll;
+ 	atomic_t in_use;
+ 	enum queue_type qtype;
++	u16 qfull_watermark;
+ };
+ 
+ /**
+@@ -1091,6 +1097,7 @@ struct scmd_priv {
+  * @ts_update_interval: Timestamp update interval
+  * @reset_in_progress: Reset in progress flag
+  * @unrecoverable: Controller unrecoverable flag
++ * @io_admin_reset_sync: Manage state of I/O ops during an admin reset process
+  * @prev_reset_result: Result of previous reset
+  * @reset_mutex: Controller reset mutex
+  * @reset_waitq: Controller reset  wait queue
+@@ -1154,6 +1161,8 @@ struct scmd_priv {
+  * @snapdump_trigger_active: Snapdump trigger active flag
+  * @pci_err_recovery: PCI error recovery in progress
+  * @block_on_pci_err: Block IO during PCI error recovery
++ * @reply_qfull_count: Occurences of reply queue full avoidance kicking-in
++ * @prevent_reply_qfull: Enable reply queue prevention
+  */
+ struct mpi3mr_ioc {
+ 	struct list_head list;
+@@ -1277,6 +1286,7 @@ struct mpi3mr_ioc {
+ 	u16 ts_update_interval;
+ 	u8 reset_in_progress;
+ 	u8 unrecoverable;
++	u8 io_admin_reset_sync;
+ 	int prev_reset_result;
+ 	struct mutex reset_mutex;
+ 	wait_queue_head_t reset_waitq;
+@@ -1352,6 +1362,8 @@ struct mpi3mr_ioc {
+ 	bool fw_release_trigger_active;
+ 	bool pci_err_recovery;
+ 	bool block_on_pci_err;
++	atomic_t reply_qfull_count;
++	bool prevent_reply_qfull;
+ };
+ 
+ /**
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 7589f48aebc80f..1532436f0f3af1 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -3060,6 +3060,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr,
+ 
+ static DEVICE_ATTR_RO(reply_queue_count);
+ 
++/**
++ * reply_qfull_count_show - Show reply qfull count
++ * @dev: class device
++ * @attr: Device attributes
++ * @buf: Buffer to copy
++ *
++ * Retrieves the current value of the reply_qfull_count from the mrioc structure and
++ * formats it as a string for display.
++ *
++ * Return: sysfs_emit() return
++ */
++static ssize_t
++reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
++			char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(dev);
++	struct mpi3mr_ioc *mrioc = shost_priv(shost);
++
++	return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
++}
++
++static DEVICE_ATTR_RO(reply_qfull_count);
++
+ /**
+  * logging_level_show - Show controller debug level
+  * @dev: class device
+@@ -3152,6 +3175,7 @@ static struct attribute *mpi3mr_host_attrs[] = {
+ 	&dev_attr_fw_queue_depth.attr,
+ 	&dev_attr_op_req_q_count.attr,
+ 	&dev_attr_reply_queue_count.attr,
++	&dev_attr_reply_qfull_count.attr,
+ 	&dev_attr_logging_level.attr,
+ 	&dev_attr_adp_state.attr,
+ 	NULL,
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 5ed31fe57474a3..ec5b1ab2871776 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -17,7 +17,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ 	struct mpi3_ioc_facts_data *facts_data);
+ static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+ 	struct mpi3mr_drv_cmd *drv_cmd);
+-
++static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
+ static int poll_queues;
+ module_param(poll_queues, int, 0444);
+ MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
+@@ -459,7 +459,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ 	}
+ 
+ 	do {
+-		if (mrioc->unrecoverable)
++		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
+ 			break;
+ 
+ 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
+@@ -554,7 +554,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ 	}
+ 
+ 	do {
+-		if (mrioc->unrecoverable)
++		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
+ 			break;
+ 
+ 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
+@@ -2104,15 +2104,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+ 	}
+ 
+ 	reply_qid = qidx + 1;
+-	op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+-	if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
+-		!mrioc->pdev->revision)
+-		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
++
++	if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
++		if (mrioc->pdev->revision)
++			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
++		else
++			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
++	} else
++		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
++
+ 	op_reply_q->ci = 0;
+ 	op_reply_q->ephase = 1;
+ 	atomic_set(&op_reply_q->pend_ios, 0);
+ 	atomic_set(&op_reply_q->in_use, 0);
+ 	op_reply_q->enable_irq_poll = false;
++	op_reply_q->qfull_watermark =
++		op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
+ 
+ 	if (!op_reply_q->q_segments) {
+ 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
+@@ -2416,8 +2423,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ 	void *segment_base_addr;
+ 	u16 req_sz = mrioc->facts.op_req_sz;
+ 	struct segments *segments = op_req_q->q_segments;
++	struct op_reply_qinfo *op_reply_q = NULL;
+ 
+ 	reply_qidx = op_req_q->reply_qid - 1;
++	op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
+ 
+ 	if (mrioc->unrecoverable)
+ 		return -EFAULT;
+@@ -2448,6 +2457,15 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ 		goto out;
+ 	}
+ 
++	/* Reply queue is nearing to get full, push back IOs to SML */
++	if ((mrioc->prevent_reply_qfull == true) &&
++		(atomic_read(&op_reply_q->pend_ios) >
++	     (op_reply_q->qfull_watermark))) {
++		atomic_inc(&mrioc->reply_qfull_count);
++		retval = -EAGAIN;
++		goto out;
++	}
++
+ 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
+ 	req_entry = (u8 *)segment_base_addr +
+ 	    ((pi % op_req_q->segment_qd) * req_sz);
+@@ -3091,6 +3109,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ 	mrioc->facts.dma_mask = (facts_flags &
+ 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
++	mrioc->facts.dma_mask = (facts_flags &
++	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
++	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
+ 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
+ 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
+@@ -4214,6 +4235,9 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
+ 		mrioc->shost->transportt = mpi3mr_transport_template;
+ 	}
+ 
++	if (mrioc->facts.max_req_limit)
++		mrioc->prevent_reply_qfull = true;
++
+ 	mrioc->reply_sz = mrioc->facts.reply_sz;
+ 
+ 	retval = mpi3mr_check_reset_dma_mask(mrioc);
+@@ -4370,6 +4394,7 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
+ 		goto out_failed_noretry;
+ 	}
+ 
++	mrioc->io_admin_reset_sync = 0;
+ 	if (is_resume || mrioc->block_on_pci_err) {
+ 		dprint_reset(mrioc, "setting up single ISR\n");
+ 		retval = mpi3mr_setup_isr(mrioc, 1);
+@@ -5228,6 +5253,55 @@ void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+ 	drv_cmd->retry_count = 0;
+ }
+ 
++/**
++ * mpi3mr_check_op_admin_proc -
++ * @mrioc: Adapter instance reference
++ *
++ * Check if any of the operation reply queues
++ * or the admin reply queue are currently in use.
++ * If any queue is in use, this function waits for
++ * a maximum of 10 seconds for them to become available.
++ *
++ * Return: 0 on success, non-zero on failure.
++ */
++static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
++{
++
++	u16 timeout = 10 * 10;
++	u16 elapsed_time = 0;
++	bool op_admin_in_use = false;
++
++	do {
++		op_admin_in_use = false;
++
++		/* Check admin_reply queue first to exit early */
++		if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
++			op_admin_in_use = true;
++		else {
++			/* Check op_reply queues */
++			int i;
++
++			for (i = 0; i < mrioc->num_queues; i++) {
++				if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
++					op_admin_in_use = true;
++					break;
++				}
++			}
++		}
++
++		if (!op_admin_in_use)
++			break;
++
++		msleep(100);
++
++	} while (++elapsed_time < timeout);
++
++	if (op_admin_in_use)
++		return 1;
++
++	return 0;
++}
++
+ /**
+  * mpi3mr_soft_reset_handler - Reset the controller
+  * @mrioc: Adapter instance reference
+@@ -5308,6 +5382,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
+ 
+ 	mpi3mr_ioc_disable_intr(mrioc);
++	mrioc->io_admin_reset_sync = 1;
+ 
+ 	if (snapdump) {
+ 		mpi3mr_set_diagsave(mrioc);
+@@ -5335,6 +5410,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
+ 		goto out;
+ 	}
++
++	retval = mpi3mr_check_op_admin_proc(mrioc);
++	if (retval) {
++		ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
++				"thread still processing replies even after a 10 second\n"
++				"timeout. Marking the controller as unrecoverable!\n");
++
++		goto out;
++	}
++
+ 	if (mrioc->num_io_throttle_group !=
+ 	    mrioc->facts.max_io_throttle_group) {
+ 		ioc_err(mrioc,
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index 0dc37fc6f23678..a17441635ff3ab 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -4119,7 +4119,7 @@ static void validate_options(void)
+  */
+ static int __init st_setup(char *str)
+ {
+-	int i, len, ints[5];
++	int i, len, ints[ARRAY_SIZE(parms) + 1];
+ 	char *stp;
+ 
+ 	stp = get_options(str, ARRAY_SIZE(ints), ints);
+diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
+index b1118d37779e46..bba8d86ae1bb06 100644
+--- a/drivers/soc/samsung/exynos-chipid.c
++++ b/drivers/soc/samsung/exynos-chipid.c
+@@ -131,6 +131,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
+ 
+ 	soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ 						"%x", soc_info.revision);
++	if (!soc_dev_attr->revision)
++		return -ENOMEM;
+ 	soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
+ 	if (!soc_dev_attr->soc_id) {
+ 		pr_err("Unknown SoC\n");
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 73b1edd0531b43..f9463f263fba16 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1634,6 +1634,12 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
+ 		int ret = PTR_ERR(cqspi->rx_chan);
+ 
+ 		cqspi->rx_chan = NULL;
++		if (ret == -ENODEV) {
++			/* DMA support is not mandatory */
++			dev_info(&cqspi->pdev->dev, "No Rx DMA available\n");
++			return 0;
++		}
++
+ 		return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
+ 	}
+ 	init_completion(&cqspi->rx_dma_complete);
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index ea14a38356814d..61c065702350e0 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -2243,7 +2243,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
+ 			response_length += spc_rsoc_encode_command_descriptor(
+ 					&buf[response_length], rctd, descr);
+ 		}
+-		put_unaligned_be32(response_length - 3, buf);
++		put_unaligned_be32(response_length - 4, buf);
+ 	} else {
+ 		response_length = spc_rsoc_encode_one_command_descriptor(
+ 				&buf[response_length], rctd, descr,
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index 1997e91bb3be94..4b3225377e8f8f 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -65,7 +65,7 @@
+ #define LVTS_HW_FILTER				0x0
+ #define LVTS_TSSEL_CONF				0x13121110
+ #define LVTS_CALSCALE_CONF			0x300
+-#define LVTS_MONINT_CONF			0x8300318C
++#define LVTS_MONINT_CONF			0x0300318C
+ 
+ #define LVTS_MONINT_OFFSET_SENSOR0		0xC
+ #define LVTS_MONINT_OFFSET_SENSOR1		0x180
+@@ -91,8 +91,6 @@
+ #define LVTS_MSR_READ_TIMEOUT_US	400
+ #define LVTS_MSR_READ_WAIT_US		(LVTS_MSR_READ_TIMEOUT_US / 2)
+ 
+-#define LVTS_HW_TSHUT_TEMP		105000
+-
+ #define LVTS_MINIMUM_THRESHOLD		20000
+ 
+ static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT;
+@@ -145,7 +143,6 @@ struct lvts_ctrl {
+ 	struct lvts_sensor sensors[LVTS_SENSOR_MAX];
+ 	const struct lvts_data *lvts_data;
+ 	u32 calibration[LVTS_SENSOR_MAX];
+-	u32 hw_tshut_raw_temp;
+ 	u8 valid_sensor_mask;
+ 	int mode;
+ 	void __iomem *base;
+@@ -837,14 +834,6 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
+ 		 */
+ 		lvts_ctrl[i].mode = lvts_data->lvts_ctrl[i].mode;
+ 
+-		/*
+-		 * The temperature to raw temperature must be done
+-		 * after initializing the calibration.
+-		 */
+-		lvts_ctrl[i].hw_tshut_raw_temp =
+-			lvts_temp_to_raw(LVTS_HW_TSHUT_TEMP,
+-					 lvts_data->temp_factor);
+-
+ 		lvts_ctrl[i].low_thresh = INT_MIN;
+ 		lvts_ctrl[i].high_thresh = INT_MIN;
+ 	}
+@@ -860,6 +849,32 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
+ 	return 0;
+ }
+ 
++static void lvts_ctrl_monitor_enable(struct device *dev, struct lvts_ctrl *lvts_ctrl, bool enable)
++{
++	/*
++	 * Bitmaps to enable each sensor on filtered mode in the MONCTL0
++	 * register.
++	 */
++	static const u8 sensor_filt_bitmap[] = { BIT(0), BIT(1), BIT(2), BIT(3) };
++	u32 sensor_map = 0;
++	int i;
++
++	if (lvts_ctrl->mode != LVTS_MSR_FILTERED_MODE)
++		return;
++
++	if (enable) {
++		lvts_for_each_valid_sensor(i, lvts_ctrl)
++			sensor_map |= sensor_filt_bitmap[i];
++	}
++
++	/*
++	 * Bits:
++	 *      9: Single point access flow
++	 *    0-3: Enable sensing point 0-3
++	 */
++	writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
++}
++
+ /*
+  * At this point the configuration register is the only place in the
+  * driver where we write multiple values. Per hardware constraint,
+@@ -893,7 +908,6 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
+ 	 *         10 : Selected sensor with bits 19-18
+ 	 *         11 : Reserved
+ 	 */
+-	writel(BIT(16), LVTS_PROTCTL(lvts_ctrl->base));
+ 
+ 	/*
+ 	 * LVTS_PROTTA : Stage 1 temperature threshold
+@@ -906,8 +920,8 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
+ 	 *
+ 	 * writel(0x0, LVTS_PROTTA(lvts_ctrl->base));
+ 	 * writel(0x0, LVTS_PROTTB(lvts_ctrl->base));
++	 * writel(0x0, LVTS_PROTTC(lvts_ctrl->base));
+ 	 */
+-	writel(lvts_ctrl->hw_tshut_raw_temp, LVTS_PROTTC(lvts_ctrl->base));
+ 
+ 	/*
+ 	 * LVTS_MONINT : Interrupt configuration register
+@@ -1381,8 +1395,11 @@ static int lvts_suspend(struct device *dev)
+ 
+ 	lvts_td = dev_get_drvdata(dev);
+ 
+-	for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
++	for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
++		lvts_ctrl_monitor_enable(dev, &lvts_td->lvts_ctrl[i], false);
++		usleep_range(100, 200);
+ 		lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false);
++	}
+ 
+ 	clk_disable_unprepare(lvts_td->clk);
+ 
+@@ -1400,8 +1417,11 @@ static int lvts_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
++	for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
+ 		lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], true);
++		usleep_range(100, 200);
++		lvts_ctrl_monitor_enable(dev, &lvts_td->lvts_ctrl[i], true);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
+index 086ed42dd16cd4..a84f48a752d159 100644
+--- a/drivers/thermal/rockchip_thermal.c
++++ b/drivers/thermal/rockchip_thermal.c
+@@ -386,6 +386,7 @@ static const struct tsadc_table rk3328_code_table[] = {
+ 	{296, -40000},
+ 	{304, -35000},
+ 	{313, -30000},
++	{322, -25000},
+ 	{331, -20000},
+ 	{340, -15000},
+ 	{349, -10000},
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index 8455f08f5d4060..61424342c09641 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -190,9 +190,12 @@ static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, v
+ 			klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
+ 			preve = dmr->end;
+ 		} else {
++			u64 bcount = min_t(u64, dmr->start - preve, MAX_KLM_SIZE);
++
+ 			klm->key = cpu_to_be32(mvdev->res.null_mkey);
+-			klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
+-			preve = dmr->start;
++			klm->bcount = cpu_to_be32(klm_bcount(bcount));
++			preve += bcount;
++
+ 			goto again;
+ 		}
+ 	}
+diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
+index c7aefcd6e4e3e3..78260060184575 100644
+--- a/drivers/video/backlight/led_bl.c
++++ b/drivers/video/backlight/led_bl.c
+@@ -229,8 +229,11 @@ static void led_bl_remove(struct platform_device *pdev)
+ 	backlight_device_unregister(bl);
+ 
+ 	led_bl_power_off(priv);
+-	for (i = 0; i < priv->nb_leds; i++)
++	for (i = 0; i < priv->nb_leds; i++) {
++		mutex_lock(&priv->leds[i]->led_access);
+ 		led_sysfs_enable(priv->leds[i]);
++		mutex_unlock(&priv->leds[i]->led_access);
++	}
+ }
+ 
+ static const struct of_device_id led_bl_of_match[] = {
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+index 5832485ab998c4..c29b6236952b31 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+@@ -2749,9 +2749,13 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
+ 		bool mem_to_mem)
+ {
+ 	int r;
+-	enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
++	enum omap_overlay_caps caps;
+ 	enum omap_channel channel;
+ 
++	if (plane == OMAP_DSS_WB)
++		return -EINVAL;
++
++	caps = dss_feat_get_overlay_caps(plane);
+ 	channel = dispc_ovl_get_channel_out(plane);
+ 
+ 	DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->"
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 528395133b4f8e..4bd31242bd773c 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -675,7 +675,7 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
+ }
+ EXPORT_SYMBOL(xen_free_ballooned_pages);
+ 
+-static void __init balloon_add_regions(void)
++static int __init balloon_add_regions(void)
+ {
+ 	unsigned long start_pfn, pages;
+ 	unsigned long pfn, extra_pfn_end;
+@@ -698,26 +698,38 @@ static void __init balloon_add_regions(void)
+ 		for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
+ 			balloon_append(pfn_to_page(pfn));
+ 
+-		balloon_stats.total_pages += extra_pfn_end - start_pfn;
++		/*
++		 * Extra regions are accounted for in the physmap, but need
++		 * decreasing from current_pages to balloon down the initial
++		 * allocation, because they are already accounted for in
++		 * total_pages.
++		 */
++		if (extra_pfn_end - start_pfn >= balloon_stats.current_pages) {
++			WARN(1, "Extra pages underflow current target");
++			return -ERANGE;
++		}
++		balloon_stats.current_pages -= extra_pfn_end - start_pfn;
+ 	}
++
++	return 0;
+ }
+ 
+ static int __init balloon_init(void)
+ {
+ 	struct task_struct *task;
++	int rc;
+ 
+ 	if (!xen_domain())
+ 		return -ENODEV;
+ 
+ 	pr_info("Initialising balloon driver\n");
+ 
+-#ifdef CONFIG_XEN_PV
+-	balloon_stats.current_pages = xen_pv_domain()
+-		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
+-		: get_num_physpages();
+-#else
+-	balloon_stats.current_pages = get_num_physpages();
+-#endif
++	if (xen_released_pages >= get_num_physpages()) {
++		WARN(1, "Released pages underflow current target");
++		return -ERANGE;
++	}
++
++	balloon_stats.current_pages = get_num_physpages() - xen_released_pages;
+ 	balloon_stats.target_pages  = balloon_stats.current_pages;
+ 	balloon_stats.balloon_low   = 0;
+ 	balloon_stats.balloon_high  = 0;
+@@ -734,7 +746,9 @@ static int __init balloon_init(void)
+ 	register_sysctl_init("xen/balloon", balloon_table);
+ #endif
+ 
+-	balloon_add_regions();
++	rc = balloon_add_regions();
++	if (rc)
++		return rc;
+ 
+ 	task = kthread_run(balloon_thread, NULL, "xen-balloon");
+ 	if (IS_ERR(task)) {
+diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
+index b799bc759c15f4..088b7f02c35866 100644
+--- a/drivers/xen/xenfs/xensyms.c
++++ b/drivers/xen/xenfs/xensyms.c
+@@ -48,7 +48,7 @@ static int xensyms_next_sym(struct xensyms *xs)
+ 			return -ENOMEM;
+ 
+ 		set_xen_guest_handle(symdata->name, xs->name);
+-		symdata->symnum--; /* Rewind */
++		symdata->symnum = symnum; /* Rewind */
+ 
+ 		ret = HYPERVISOR_platform_op(&xs->op);
+ 		if (ret < 0)
+@@ -78,7 +78,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
+ {
+ 	struct xensyms *xs = m->private;
+ 
+-	xs->op.u.symdata.symnum = ++(*pos);
++	*pos = xs->op.u.symdata.symnum;
+ 
+ 	if (xensyms_next_sym(xs))
+ 		return NULL;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 563f106774e592..19e5f8eaae772d 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4274,6 +4274,18 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ 	 */
+ 	btrfs_flush_workqueue(fs_info->delalloc_workers);
+ 
++	/*
++	 * When finishing a compressed write bio we schedule a work queue item
++	 * to finish an ordered extent - btrfs_finish_compressed_write_work()
++	 * calls btrfs_finish_ordered_extent() which in turns does a call to
++	 * btrfs_queue_ordered_fn(), and that queues the ordered extent
++	 * completion either in the endio_write_workers work queue or in the
++	 * fs_info->endio_freespace_worker work queue. We flush those queues
++	 * below, so before we flush them we must flush this queue for the
++	 * workers of compressed writes.
++	 */
++	flush_workqueue(fs_info->compressed_write_workers);
++
+ 	/*
+ 	 * After we parked the cleaner kthread, ordered extents may have
+ 	 * completed and created new delayed iputs. If one of the async reclaim
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index f3e93ba7ec97fa..4ceffbef32987b 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2897,7 +2897,15 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
+ 						   block_group->length,
+ 						   &trimmed);
+ 
++		/*
++		 * Not strictly necessary to lock, as the block_group should be
++		 * read-only from btrfs_delete_unused_bgs().
++		 */
++		ASSERT(block_group->ro);
++		spin_lock(&fs_info->unused_bgs_lock);
+ 		list_del_init(&block_group->bg_list);
++		spin_unlock(&fs_info->unused_bgs_lock);
++
+ 		btrfs_unfreeze_block_group(block_group);
+ 		btrfs_put_block_group(block_group);
+ 
+diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
+index 56e61ac1cc64c8..609bb6c9c0873f 100644
+--- a/fs/btrfs/tests/extent-map-tests.c
++++ b/fs/btrfs/tests/extent-map-tests.c
+@@ -1045,6 +1045,7 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
+ 	ret = btrfs_add_chunk_map(fs_info, map);
+ 	if (ret) {
+ 		test_err("error adding chunk map to mapping tree");
++		btrfs_free_chunk_map(map);
+ 		goto out_free;
+ 	}
+ 
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 82dd9ee89fbc5b..24806e19c7c410 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -161,7 +161,13 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
+ 			cache = list_first_entry(&transaction->deleted_bgs,
+ 						 struct btrfs_block_group,
+ 						 bg_list);
++			/*
++			 * Not strictly necessary to lock, as no other task will be using a
++			 * block_group on the deleted_bgs list during a transaction abort.
++			 */
++			spin_lock(&transaction->fs_info->unused_bgs_lock);
+ 			list_del_init(&cache->bg_list);
++			spin_unlock(&transaction->fs_info->unused_bgs_lock);
+ 			btrfs_unfreeze_block_group(cache);
+ 			btrfs_put_block_group(cache);
+ 		}
+@@ -2099,7 +2105,13 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
+ 
+        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
+                btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
++		/*
++		* Not strictly necessary to lock, as no other task will be using a
++		* block_group on the new_bgs list during a transaction abort.
++		*/
++	       spin_lock(&fs_info->unused_bgs_lock);
+                list_del_init(&block_group->bg_list);
++	       spin_unlock(&fs_info->unused_bgs_lock);
+        }
+ }
+ 
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 69d03feea4e0ec..2bb7e32ad94588 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2107,6 +2107,9 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ 		physical = map->stripes[i].physical;
+ 		zinfo = device->zone_info;
+ 
++		if (!device->bdev)
++			continue;
++
+ 		if (zinfo->max_active_zones == 0)
+ 			continue;
+ 
+@@ -2268,6 +2271,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ 		struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ 		unsigned int nofs_flags;
+ 
++		if (!device->bdev)
++			continue;
++
+ 		if (zinfo->max_active_zones == 0)
+ 			continue;
+ 
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index 0c01e4423ee2a8..0ad496ceb638d2 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -741,6 +741,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
+ 	read_lock_bh(&ls->ls_rsbtbl_lock);
+ 	if (!rsb_flag(r, RSB_HASHED)) {
+ 		read_unlock_bh(&ls->ls_rsbtbl_lock);
++		error = -EBADR;
+ 		goto do_new;
+ 	}
+ 	
+@@ -784,6 +785,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
+ 		}
+ 	} else {
+ 		write_unlock_bh(&ls->ls_rsbtbl_lock);
++		error = -EBADR;
+ 		goto do_new;
+ 	}
+ 
+diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
+index 33f8539dda4aeb..17aed5f6c5490d 100644
+--- a/fs/erofs/fileio.c
++++ b/fs/erofs/fileio.c
+@@ -32,6 +32,8 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
+ 		ret = 0;
+ 	}
+ 	if (rq->bio.bi_end_io) {
++		if (ret < 0 && !rq->bio.bi_status)
++			rq->bio.bi_status = errno_to_blk_status(ret);
+ 		rq->bio.bi_end_io(&rq->bio);
+ 	} else {
+ 		bio_for_each_folio_all(fi, &rq->bio) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 67a5b937f5a92d..ffa6aa55a1a7a8 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4681,22 +4681,43 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
+ 		inode_set_iversion_queried(inode, val);
+ }
+ 
+-static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
+-
++static int check_igot_inode(struct inode *inode, ext4_iget_flags flags,
++			    const char *function, unsigned int line)
+ {
++	const char *err_str;
++
+ 	if (flags & EXT4_IGET_EA_INODE) {
+-		if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+-			return "missing EA_INODE flag";
++		if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
++			err_str = "missing EA_INODE flag";
++			goto error;
++		}
+ 		if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+-		    EXT4_I(inode)->i_file_acl)
+-			return "ea_inode with extended attributes";
++		    EXT4_I(inode)->i_file_acl) {
++			err_str = "ea_inode with extended attributes";
++			goto error;
++		}
+ 	} else {
+-		if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+-			return "unexpected EA_INODE flag";
++		if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
++			/*
++			 * open_by_handle_at() could provide an old inode number
++			 * that has since been reused for an ea_inode; this does
++			 * not indicate filesystem corruption
++			 */
++			if (flags & EXT4_IGET_HANDLE)
++				return -ESTALE;
++			err_str = "unexpected EA_INODE flag";
++			goto error;
++		}
++	}
++	if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
++		err_str = "unexpected bad inode w/o EXT4_IGET_BAD";
++		goto error;
+ 	}
+-	if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
+-		return "unexpected bad inode w/o EXT4_IGET_BAD";
+-	return NULL;
++	return 0;
++
++error:
++	ext4_error_inode(inode, function, line, 0, err_str);
++	return -EFSCORRUPTED;
+ }
+ 
+ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+@@ -4708,7 +4729,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 	struct ext4_inode_info *ei;
+ 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ 	struct inode *inode;
+-	const char *err_str;
+ 	journal_t *journal = EXT4_SB(sb)->s_journal;
+ 	long ret;
+ 	loff_t size;
+@@ -4737,10 +4757,10 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 	if (!inode)
+ 		return ERR_PTR(-ENOMEM);
+ 	if (!(inode->i_state & I_NEW)) {
+-		if ((err_str = check_igot_inode(inode, flags)) != NULL) {
+-			ext4_error_inode(inode, function, line, 0, err_str);
++		ret = check_igot_inode(inode, flags, function, line);
++		if (ret) {
+ 			iput(inode);
+-			return ERR_PTR(-EFSCORRUPTED);
++			return ERR_PTR(ret);
+ 		}
+ 		return inode;
+ 	}
+@@ -5012,13 +5032,21 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 		ret = -EFSCORRUPTED;
+ 		goto bad_inode;
+ 	}
+-	if ((err_str = check_igot_inode(inode, flags)) != NULL) {
+-		ext4_error_inode(inode, function, line, 0, err_str);
+-		ret = -EFSCORRUPTED;
+-		goto bad_inode;
++	ret = check_igot_inode(inode, flags, function, line);
++	/*
++	 * -ESTALE here means there is nothing inherently wrong with the inode,
++	 * it's just not an inode we can return for an fhandle lookup.
++	 */
++	if (ret == -ESTALE) {
++		brelse(iloc.bh);
++		unlock_new_inode(inode);
++		iput(inode);
++		return ERR_PTR(-ESTALE);
+ 	}
+-
++	if (ret)
++		goto bad_inode;
+ 	brelse(iloc.bh);
++
+ 	unlock_new_inode(inode);
+ 	return inode;
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 790db7eac6c2ad..286f8fcb74cc9d 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1995,7 +1995,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+ 	 * split it in half by count; each resulting block will have at least
+ 	 * half the space free.
+ 	 */
+-	if (i > 0)
++	if (i >= 0)
+ 		split = count - move;
+ 	else
+ 		split = count/2;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index d3795c6c0a9d8e..4291ab3c20be67 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6906,12 +6906,25 @@ static int ext4_release_dquot(struct dquot *dquot)
+ {
+ 	int ret, err;
+ 	handle_t *handle;
++	bool freeze_protected = false;
++
++	/*
++	 * Trying to sb_start_intwrite() in a running transaction
++	 * can result in a deadlock. Further, running transactions
++	 * are already protected from freezing.
++	 */
++	if (!ext4_journal_current_handle()) {
++		sb_start_intwrite(dquot->dq_sb);
++		freeze_protected = true;
++	}
+ 
+ 	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
+ 				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+ 	if (IS_ERR(handle)) {
+ 		/* Release dquot anyway to avoid endless cycle in dqput() */
+ 		dquot_release(dquot);
++		if (freeze_protected)
++			sb_end_intwrite(dquot->dq_sb);
+ 		return PTR_ERR(handle);
+ 	}
+ 	ret = dquot_release(dquot);
+@@ -6922,6 +6935,10 @@ static int ext4_release_dquot(struct dquot *dquot)
+ 	err = ext4_journal_stop(handle);
+ 	if (!ret)
+ 		ret = err;
++
++	if (freeze_protected)
++		sb_end_intwrite(dquot->dq_sb);
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 7647e9f6e1903a..6ff94cdf1515c5 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1176,15 +1176,24 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
+ {
+ 	struct inode *ea_inode;
+ 	struct ext4_xattr_entry *entry;
++	struct ext4_iloc iloc;
+ 	bool dirty = false;
+ 	unsigned int ea_ino;
+ 	int err;
+ 	int credits;
++	void *end;
++
++	if (block_csum)
++		end = (void *)bh->b_data + bh->b_size;
++	else {
++		ext4_get_inode_loc(parent, &iloc);
++		end = (void *)ext4_raw_inode(&iloc) + EXT4_SB(parent->i_sb)->s_inode_size;
++	}
+ 
+ 	/* One credit for dec ref on ea_inode, one for orphan list addition, */
+ 	credits = 2 + extra_credits;
+ 
+-	for (entry = first; !IS_LAST_ENTRY(entry);
++	for (entry = first; (void *)entry < end && !IS_LAST_ENTRY(entry);
+ 	     entry = EXT4_XATTR_NEXT(entry)) {
+ 		if (!entry->e_value_inum)
+ 			continue;
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index efda9a0229816b..86228f82f54d0c 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -1344,21 +1344,13 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ 	unsigned long flags;
+ 
+-	if (cpc->reason & CP_UMOUNT) {
+-		if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
+-			NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) {
+-			clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+-			f2fs_notice(sbi, "Disable nat_bits due to no space");
+-		} else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
+-						f2fs_nat_bitmap_enabled(sbi)) {
+-			f2fs_enable_nat_bits(sbi);
+-			set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+-			f2fs_notice(sbi, "Rebuild and enable nat_bits");
+-		}
+-	}
+-
+ 	spin_lock_irqsave(&sbi->cp_lock, flags);
+ 
++	if ((cpc->reason & CP_UMOUNT) &&
++			le32_to_cpu(ckpt->cp_pack_total_block_count) >
++			sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
++		disable_nat_bits(sbi, false);
++
+ 	if (cpc->reason & CP_TRIMMED)
+ 		__set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
+ 	else
+@@ -1541,8 +1533,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	start_blk = __start_cp_next_addr(sbi);
+ 
+ 	/* write nat bits */
+-	if ((cpc->reason & CP_UMOUNT) &&
+-			is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) {
++	if (enabled_nat_bits(sbi, cpc)) {
+ 		__u64 cp_ver = cur_cp_version(ckpt);
+ 		block_t blk;
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index b52df8aa95350e..1c783c2e4902ae 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2231,6 +2231,36 @@ static inline void f2fs_up_write(struct f2fs_rwsem *sem)
+ #endif
+ }
+ 
++static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
++{
++	unsigned long flags;
++	unsigned char *nat_bits;
++
++	/*
++	 * In order to re-enable nat_bits we need to call fsck.f2fs by
++	 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
++	 * so let's rely on regular fsck or unclean shutdown.
++	 */
++
++	if (lock)
++		spin_lock_irqsave(&sbi->cp_lock, flags);
++	__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
++	nat_bits = NM_I(sbi)->nat_bits;
++	NM_I(sbi)->nat_bits = NULL;
++	if (lock)
++		spin_unlock_irqrestore(&sbi->cp_lock, flags);
++
++	kvfree(nat_bits);
++}
++
++static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
++					struct cp_control *cpc)
++{
++	bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
++
++	return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
++}
++
+ static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
+ {
+ 	f2fs_down_read(&sbi->cp_rwsem);
+@@ -3671,7 +3701,6 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
+ int f2fs_truncate_xattr_node(struct inode *inode);
+ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+ 					unsigned int seq_id);
+-bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
+ int f2fs_remove_inode_page(struct inode *inode);
+ struct page *f2fs_new_inode_page(struct inode *inode);
+ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
+@@ -3696,7 +3725,6 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
+ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
+ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+ 			unsigned int segno, struct f2fs_summary_block *sum);
+-void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
+ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+ int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
+ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 10780e37fc7b68..a60db5e795a4c4 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -34,10 +34,8 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
+ 	if (f2fs_inode_dirtied(inode, sync))
+ 		return;
+ 
+-	if (f2fs_is_atomic_file(inode)) {
+-		set_inode_flag(inode, FI_ATOMIC_DIRTIED);
++	if (f2fs_is_atomic_file(inode))
+ 		return;
+-	}
+ 
+ 	mark_inode_dirty_sync(inode);
+ }
+@@ -751,8 +749,12 @@ void f2fs_update_inode_page(struct inode *inode)
+ 		if (err == -ENOENT)
+ 			return;
+ 
++		if (err == -EFSCORRUPTED)
++			goto stop_checkpoint;
++
+ 		if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
+ 			goto retry;
++stop_checkpoint:
+ 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
+ 		return;
+ 	}
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 4d7b9fd6ef31ab..12c76e3d1cd49d 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1134,7 +1134,14 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
+ 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
+ 
+ 	level = get_node_path(inode, from, offset, noffset);
+-	if (level < 0) {
++	if (level <= 0) {
++		if (!level) {
++			level = -EFSCORRUPTED;
++			f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
++					__func__, inode->i_ino,
++					from, ADDRS_PER_INODE(inode));
++			set_sbi_flag(sbi, SBI_NEED_FSCK);
++		}
+ 		trace_f2fs_truncate_inode_blocks_exit(inode, level);
+ 		return level;
+ 	}
+@@ -2270,24 +2277,6 @@ static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
+ 	}
+ }
+ 
+-bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
+-{
+-	struct f2fs_nm_info *nm_i = NM_I(sbi);
+-	unsigned int i;
+-	bool ret = true;
+-
+-	f2fs_down_read(&nm_i->nat_tree_lock);
+-	for (i = 0; i < nm_i->nat_blocks; i++) {
+-		if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
+-			ret = false;
+-			break;
+-		}
+-	}
+-	f2fs_up_read(&nm_i->nat_tree_lock);
+-
+-	return ret;
+-}
+-
+ static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
+ 							bool set, bool build)
+ {
+@@ -2966,23 +2955,7 @@ static void __adjust_nat_entry_set(struct nat_entry_set *nes,
+ 	list_add_tail(&nes->set_list, head);
+ }
+ 
+-static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
+-							unsigned int valid)
+-{
+-	if (valid == 0) {
+-		__set_bit_le(nat_ofs, nm_i->empty_nat_bits);
+-		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
+-		return;
+-	}
+-
+-	__clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
+-	if (valid == NAT_ENTRY_PER_BLOCK)
+-		__set_bit_le(nat_ofs, nm_i->full_nat_bits);
+-	else
+-		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
+-}
+-
+-static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
++static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+ 						struct page *page)
+ {
+ 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+@@ -2991,7 +2964,7 @@ static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+ 	int valid = 0;
+ 	int i = 0;
+ 
+-	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
++	if (!enabled_nat_bits(sbi, NULL))
+ 		return;
+ 
+ 	if (nat_index == 0) {
+@@ -3002,36 +2975,17 @@ static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+ 		if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
+ 			valid++;
+ 	}
+-
+-	__update_nat_bits(nm_i, nat_index, valid);
+-}
+-
+-void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
+-{
+-	struct f2fs_nm_info *nm_i = NM_I(sbi);
+-	unsigned int nat_ofs;
+-
+-	f2fs_down_read(&nm_i->nat_tree_lock);
+-
+-	for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
+-		unsigned int valid = 0, nid_ofs = 0;
+-
+-		/* handle nid zero due to it should never be used */
+-		if (unlikely(nat_ofs == 0)) {
+-			valid = 1;
+-			nid_ofs = 1;
+-		}
+-
+-		for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
+-			if (!test_bit_le(nid_ofs,
+-					nm_i->free_nid_bitmap[nat_ofs]))
+-				valid++;
+-		}
+-
+-		__update_nat_bits(nm_i, nat_ofs, valid);
++	if (valid == 0) {
++		__set_bit_le(nat_index, nm_i->empty_nat_bits);
++		__clear_bit_le(nat_index, nm_i->full_nat_bits);
++		return;
+ 	}
+ 
+-	f2fs_up_read(&nm_i->nat_tree_lock);
++	__clear_bit_le(nat_index, nm_i->empty_nat_bits);
++	if (valid == NAT_ENTRY_PER_BLOCK)
++		__set_bit_le(nat_index, nm_i->full_nat_bits);
++	else
++		__clear_bit_le(nat_index, nm_i->full_nat_bits);
+ }
+ 
+ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+@@ -3050,7 +3004,7 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+ 	 * #1, flush nat entries to journal in current hot data summary block.
+ 	 * #2, flush nat entries to nat page.
+ 	 */
+-	if ((cpc->reason & CP_UMOUNT) ||
++	if (enabled_nat_bits(sbi, cpc) ||
+ 		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
+ 		to_journal = false;
+ 
+@@ -3097,7 +3051,7 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+ 	if (to_journal) {
+ 		up_write(&curseg->journal_rwsem);
+ 	} else {
+-		update_nat_bits(sbi, start_nid, page);
++		__update_nat_bits(sbi, start_nid, page);
+ 		f2fs_put_page(page, 1);
+ 	}
+ 
+@@ -3128,7 +3082,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	 * during unmount, let's flush nat_bits before checking
+ 	 * nat_cnt[DIRTY_NAT].
+ 	 */
+-	if (cpc->reason & CP_UMOUNT) {
++	if (enabled_nat_bits(sbi, cpc)) {
+ 		f2fs_down_write(&nm_i->nat_tree_lock);
+ 		remove_nats_in_journal(sbi);
+ 		f2fs_up_write(&nm_i->nat_tree_lock);
+@@ -3144,7 +3098,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	 * entries, remove all entries from journal and merge them
+ 	 * into nat entry set.
+ 	 */
+-	if (cpc->reason & CP_UMOUNT ||
++	if (enabled_nat_bits(sbi, cpc) ||
+ 		!__has_cursum_space(journal,
+ 			nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
+ 		remove_nats_in_journal(sbi);
+@@ -3181,18 +3135,15 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
+ 	__u64 cp_ver = cur_cp_version(ckpt);
+ 	block_t nat_bits_addr;
+ 
++	if (!enabled_nat_bits(sbi, NULL))
++		return 0;
++
+ 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
+ 	nm_i->nat_bits = f2fs_kvzalloc(sbi,
+ 			F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
+ 	if (!nm_i->nat_bits)
+ 		return -ENOMEM;
+ 
+-	nm_i->full_nat_bits = nm_i->nat_bits + 8;
+-	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
+-
+-	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
+-		return 0;
+-
+ 	nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
+ 						nm_i->nat_bits_blocks;
+ 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
+@@ -3209,12 +3160,13 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
+ 
+ 	cp_ver |= (cur_cp_crc(ckpt) << 32);
+ 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
+-		clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+-		f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
+-			cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
++		disable_nat_bits(sbi, true);
+ 		return 0;
+ 	}
+ 
++	nm_i->full_nat_bits = nm_i->nat_bits + 8;
++	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
++
+ 	f2fs_notice(sbi, "Found nat_bits in checkpoint");
+ 	return 0;
+ }
+@@ -3225,7 +3177,7 @@ static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
+ 	unsigned int i = 0;
+ 	nid_t nid, last_nid;
+ 
+-	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
++	if (!enabled_nat_bits(sbi, NULL))
+ 		return;
+ 
+ 	for (i = 0; i < nm_i->nat_blocks; i++) {
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index a622056f27f3a2..573cc4725e2e88 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1515,6 +1515,10 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync)
+ 		inc_page_count(sbi, F2FS_DIRTY_IMETA);
+ 	}
+ 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
++
++	if (!ret && f2fs_is_atomic_file(inode))
++		set_inode_flag(inode, FI_ATOMIC_DIRTIED);
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/file.c b/fs/file.c
+index 4cb952541dd036..b6fb6d18ac3b9b 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -367,17 +367,25 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho
+ 	old_fds = old_fdt->fd;
+ 	new_fds = new_fdt->fd;
+ 
++	/*
++	 * We may be racing against fd allocation from other threads using this
++	 * files_struct, despite holding ->file_lock.
++	 *
++	 * alloc_fd() might have already claimed a slot, while fd_install()
++	 * did not populate it yet. Note the latter operates locklessly, so
++	 * the file can show up as we are walking the array below.
++	 *
++	 * At the same time we know no files will disappear as all other
++	 * operations take the lock.
++	 *
++	 * Instead of trying to placate userspace racing with itself, we
++	 * ref the file if we see it and mark the fd slot as unused otherwise.
++	 */
+ 	for (i = open_files; i != 0; i--) {
+-		struct file *f = *old_fds++;
++		struct file *f = rcu_dereference_raw(*old_fds++);
+ 		if (f) {
+ 			get_file(f);
+ 		} else {
+-			/*
+-			 * The fd may be claimed in the fd bitmap but not yet
+-			 * instantiated in the files array if a sibling thread
+-			 * is partway through open().  So make sure that this
+-			 * fd is available to the new process.
+-			 */
+ 			__clear_open_fd(open_files - i, new_fdt);
+ 		}
+ 		rcu_assign_pointer(*new_fds++, f);
+@@ -637,7 +645,7 @@ struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
+ 		return NULL;
+ 
+ 	fd = array_index_nospec(fd, fdt->max_fds);
+-	file = fdt->fd[fd];
++	file = rcu_dereference_raw(fdt->fd[fd]);
+ 	if (file) {
+ 		rcu_assign_pointer(fdt->fd[fd], NULL);
+ 		__put_unused_fd(files, fd);
+@@ -1219,7 +1227,7 @@ __releases(&files->file_lock)
+ 	 */
+ 	fdt = files_fdtable(files);
+ 	fd = array_index_nospec(fd, fdt->max_fds);
+-	tofree = fdt->fd[fd];
++	tofree = rcu_dereference_raw(fdt->fd[fd]);
+ 	if (!tofree && fd_is_open(fd, fdt))
+ 		goto Ebusy;
+ 	get_file(file);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 97f487c3d8fcf0..c073f5fb98594f 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1884,7 +1884,6 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ 
+ 	/* Log is no longer empty */
+ 	write_lock(&journal->j_state_lock);
+-	WARN_ON(!sb->s_sequence);
+ 	journal->j_flags &= ~JBD2_FLUSHED;
+ 	write_unlock(&journal->j_state_lock);
+ 
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index f9009e4f9ffd89..0e1019382cf519 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -204,6 +204,10 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+ 	bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
+ 	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
++	if (!bmp->db_agwidth) {
++		err = -EINVAL;
++		goto err_release_metapage;
++	}
+ 	bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+ 	bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
+ 	if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
+@@ -3403,7 +3407,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno,	s64 nblocks)
+ 	oldl2agsize = bmp->db_agl2size;
+ 
+ 	bmp->db_agl2size = l2agsize;
+-	bmp->db_agsize = 1 << l2agsize;
++	bmp->db_agsize = (s64)1 << l2agsize;
+ 
+ 	/* compute new number of AG */
+ 	agno = bmp->db_numag;
+@@ -3666,8 +3670,8 @@ void dbFinalizeBmap(struct inode *ipbmap)
+ 	 * system size is not a multiple of the group size).
+ 	 */
+ 	inactfree = (inactags && ag_rem) ?
+-	    ((inactags - 1) << bmp->db_agl2size) + ag_rem
+-	    : inactags << bmp->db_agl2size;
++	    (((s64)inactags - 1) << bmp->db_agl2size) + ag_rem
++	    : ((s64)inactags << bmp->db_agl2size);
+ 
+ 	/* determine how many free blocks are in the active
+ 	 * allocation groups plus the average number of free blocks
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index a360b24ed320c0..8ddc14c56501ac 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -102,7 +102,7 @@ int diMount(struct inode *ipimap)
+ 	 * allocate/initialize the in-memory inode map control structure
+ 	 */
+ 	/* allocate the in-memory inode map control structure. */
+-	imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
++	imap = kzalloc(sizeof(struct inomap), GFP_KERNEL);
+ 	if (imap == NULL)
+ 		return -ENOMEM;
+ 
+@@ -456,7 +456,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
+ 	dp += inum % 8;		/* 8 inodes per 4K page */
+ 
+ 	/* copy on-disk inode to in-memory inode */
+-	if ((copy_from_dinode(dp, ip)) != 0) {
++	if ((copy_from_dinode(dp, ip) != 0) || (ip->i_nlink == 0)) {
+ 		/* handle bad return by returning NULL for ip */
+ 		set_nlink(ip, 1);	/* Don't want iput() deleting it */
+ 		iput(ip);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 73da51ac5a0349..f898de3a6f7056 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1986,6 +1986,7 @@ static void warn_mandlock(void)
+ static int can_umount(const struct path *path, int flags)
+ {
+ 	struct mount *mnt = real_mount(path->mnt);
++	struct super_block *sb = path->dentry->d_sb;
+ 
+ 	if (!may_mount())
+ 		return -EPERM;
+@@ -1995,7 +1996,7 @@ static int can_umount(const struct path *path, int flags)
+ 		return -EINVAL;
+ 	if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
+ 		return -EINVAL;
+-	if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
++	if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 	return 0;
+ }
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 88c03e18257323..127626aba7a234 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -605,7 +605,7 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
+ 		return status;
+ 
+ 	status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
+-	if (status)
++	if (unlikely(status || cb->cb_status))
+ 		return status;
+ 	if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
+ 		return -NFSERR_BAD_XDR;
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index e83629f396044b..2e835e7c107ee0 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -2244,8 +2244,14 @@ static __net_init int nfsd_net_init(struct net *net)
+ 					  NFSD_STATS_COUNTERS_NUM);
+ 	if (retval)
+ 		goto out_repcache_error;
++
+ 	memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
+ 	nn->nfsd_svcstats.program = &nfsd_programs[0];
++	if (!nfsd_proc_stat_init(net)) {
++		retval = -ENOMEM;
++		goto out_proc_error;
++	}
++
+ 	for (i = 0; i < sizeof(nn->nfsd_versions); i++)
+ 		nn->nfsd_versions[i] = nfsd_support_version(i);
+ 	for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
+@@ -2255,12 +2261,13 @@ static __net_init int nfsd_net_init(struct net *net)
+ 	nfsd4_init_leases_net(nn);
+ 	get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ 	seqlock_init(&nn->writeverf_lock);
+-	nfsd_proc_stat_init(net);
+ #if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ 	INIT_LIST_HEAD(&nn->local_clients);
+ #endif
+ 	return 0;
+ 
++out_proc_error:
++	percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ out_repcache_error:
+ 	nfsd_idmap_shutdown(net);
+ out_idmap_error:
+diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
+index bb22893f1157e4..f7eaf95e20fc87 100644
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -73,11 +73,11 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ 
+ DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
+ 
+-void nfsd_proc_stat_init(struct net *net)
++struct proc_dir_entry *nfsd_proc_stat_init(struct net *net)
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+-	svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
++	return svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
+ }
+ 
+ void nfsd_proc_stat_shutdown(struct net *net)
+diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
+index 04aacb6c36e257..e4efb0e4e56d46 100644
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -10,7 +10,7 @@
+ #include <uapi/linux/nfsd/stats.h>
+ #include <linux/percpu_counter.h>
+ 
+-void nfsd_proc_stat_init(struct net *net);
++struct proc_dir_entry *nfsd_proc_stat_init(struct net *net);
+ void nfsd_proc_stat_shutdown(struct net *net);
+ 
+ static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
+diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
+index 7a43daacc81595..7c61c1e944c7ae 100644
+--- a/fs/smb/client/cifsencrypt.c
++++ b/fs/smb/client/cifsencrypt.c
+@@ -702,18 +702,12 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
+ 	cifs_free_hash(&server->secmech.md5);
+ 	cifs_free_hash(&server->secmech.sha512);
+ 
+-	if (!SERVER_IS_CHAN(server)) {
+-		if (server->secmech.enc) {
+-			crypto_free_aead(server->secmech.enc);
+-			server->secmech.enc = NULL;
+-		}
+-
+-		if (server->secmech.dec) {
+-			crypto_free_aead(server->secmech.dec);
+-			server->secmech.dec = NULL;
+-		}
+-	} else {
++	if (server->secmech.enc) {
++		crypto_free_aead(server->secmech.enc);
+ 		server->secmech.enc = NULL;
++	}
++	if (server->secmech.dec) {
++		crypto_free_aead(server->secmech.dec);
+ 		server->secmech.dec = NULL;
+ 	}
+ }
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 8b8475b4e26277..3aaf5cdce1b720 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1722,6 +1722,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ 	/* Grab netns reference for this server. */
+ 	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
+ 
++	tcp_ses->sign = ctx->sign;
+ 	tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
+ 	tcp_ses->noblockcnt = ctx->rootfs;
+ 	tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
+@@ -2474,6 +2475,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 		return 0;
+ 	if (tcon->nodelete != ctx->nodelete)
+ 		return 0;
++	if (tcon->posix_extensions != ctx->linux_ext)
++		return 0;
+ 	return 1;
+ }
+ 
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index f8bc1da3003781..1f1f4586673a7a 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -1287,6 +1287,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 		ctx->closetimeo = HZ * result.uint_32;
+ 		break;
+ 	case Opt_echo_interval:
++		if (result.uint_32 < SMB_ECHO_INTERVAL_MIN ||
++		    result.uint_32 > SMB_ECHO_INTERVAL_MAX) {
++			cifs_errorf(fc, "echo interval is out of bounds\n");
++			goto cifs_parse_mount_err;
++		}
+ 		ctx->echo_interval = result.uint_32;
+ 		break;
+ 	case Opt_snapshot:
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 97151715d1a413..31fce0a1b57191 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1206,6 +1206,16 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
+ 				cifs_create_junction_fattr(fattr, sb);
+ 				goto out;
+ 			}
++			/*
++			 * If the reparse point is unsupported by the Linux SMB
++			 * client then let it process by the SMB server. So mask
++			 * the -EOPNOTSUPP error code. This will allow Linux SMB
++			 * client to send SMB OPEN request to server. If server
++			 * does not support this reparse point too then server
++			 * will return error during open the path.
++			 */
++			if (rc == -EOPNOTSUPP)
++				rc = 0;
+ 		}
+ 		break;
+ 	}
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+index bb246ef0458fb5..b6556fe3dfa11a 100644
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -633,8 +633,6 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
+ 			const char *full_path,
+ 			bool unicode, struct cifs_open_info_data *data)
+ {
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-
+ 	data->reparse.buf = buf;
+ 
+ 	/* See MS-FSCC 2.1.2 */
+@@ -658,8 +656,6 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
+ 		}
+ 		return 0;
+ 	default:
+-		cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n",
+-			      le32_to_cpu(buf->ReparseTag));
+ 		return -EOPNOTSUPP;
+ 	}
+ }
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 95e14977baeab0..2426fa7405173c 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -550,6 +550,13 @@ cifs_ses_add_channel(struct cifs_ses *ses,
+ 	ctx->sockopt_tcp_nodelay = ses->server->tcp_nodelay;
+ 	ctx->echo_interval = ses->server->echo_interval / HZ;
+ 	ctx->max_credits = ses->server->max_credits;
++	ctx->min_offload = ses->server->min_offload;
++	ctx->compress = ses->server->compression.requested;
++	ctx->dfs_conn = ses->server->dfs_conn;
++	ctx->ignore_signature = ses->server->ignore_signature;
++	ctx->leaf_fullpath = ses->server->leaf_fullpath;
++	ctx->rootfs = ses->server->noblockcnt;
++	ctx->retrans = ses->server->retrans;
+ 
+ 	/*
+ 	 * This will be used for encoding/decoding user/domain/pw
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index f3c4b70b77b94f..cddf273c14aed7 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -816,11 +816,12 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
+ 		WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
+ 		spin_unlock(&cifs_tcp_ses_lock);
+ 
+-		if (tcon->ses)
++		if (tcon->ses) {
+ 			server = tcon->ses->server;
+-
+-		cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
+-				tcon->tid, persistent_fid, volatile_fid);
++			cifs_server_dbg(FYI,
++					"tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
++					tcon->tid, persistent_fid, volatile_fid);
++		}
+ 
+ 		return 0;
+ 	}
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 516be8c0b2a9b4..590b70d71694be 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4576,9 +4576,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+ 			return rc;
+ 		}
+ 	} else {
+-		if (unlikely(!server->secmech.dec))
+-			return -EIO;
+-
++		rc = smb3_crypto_aead_allocate(server);
++		if (unlikely(rc))
++			return rc;
+ 		tfm = server->secmech.dec;
+ 	}
+ 
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 75b13175a2e781..1a7b82664255ab 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -1269,15 +1269,8 @@ SMB2_negotiate(const unsigned int xid,
+ 			cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
+ 	}
+ 
+-	if (server->cipher_type && !rc) {
+-		if (!SERVER_IS_CHAN(server)) {
+-			rc = smb3_crypto_aead_allocate(server);
+-		} else {
+-			/* For channels, just reuse the primary server crypto secmech. */
+-			server->secmech.enc = server->primary_server->secmech.enc;
+-			server->secmech.dec = server->primary_server->secmech.dec;
+-		}
+-	}
++	if (server->cipher_type && !rc)
++		rc = smb3_crypto_aead_allocate(server);
+ neg_exit:
+ 	free_rsp_buf(resp_buftype, rsp);
+ 	return rc;
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 70c907fe8af9eb..4386dd845e4009 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -810,6 +810,7 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
+ 		}
+ 		map->oflags = UDF_BLK_MAPPED;
+ 		map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
++		ret = 0;
+ 		goto out_free;
+ 	}
+ 
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 7c0bd0b55f8800..199ec6d10b62af 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -395,32 +395,6 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
+ 	if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
+ 		goto out;
+ 
+-	/*
+-	 * If it's already released don't get it. This avoids to loop
+-	 * in __get_user_pages if userfaultfd_release waits on the
+-	 * caller of handle_userfault to release the mmap_lock.
+-	 */
+-	if (unlikely(READ_ONCE(ctx->released))) {
+-		/*
+-		 * Don't return VM_FAULT_SIGBUS in this case, so a non
+-		 * cooperative manager can close the uffd after the
+-		 * last UFFDIO_COPY, without risking to trigger an
+-		 * involuntary SIGBUS if the process was starting the
+-		 * userfaultfd while the userfaultfd was still armed
+-		 * (but after the last UFFDIO_COPY). If the uffd
+-		 * wasn't already closed when the userfault reached
+-		 * this point, that would normally be solved by
+-		 * userfaultfd_must_wait returning 'false'.
+-		 *
+-		 * If we were to return VM_FAULT_SIGBUS here, the non
+-		 * cooperative manager would be instead forced to
+-		 * always call UFFDIO_UNREGISTER before it can safely
+-		 * close the uffd.
+-		 */
+-		ret = VM_FAULT_NOPAGE;
+-		goto out;
+-	}
+-
+ 	/*
+ 	 * Check that we can return VM_FAULT_RETRY.
+ 	 *
+@@ -457,6 +431,31 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
+ 	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+ 		goto out;
+ 
++	if (unlikely(READ_ONCE(ctx->released))) {
++		/*
++		 * If a concurrent release is detected, do not return
++		 * VM_FAULT_SIGBUS or VM_FAULT_NOPAGE, but instead always
++		 * return VM_FAULT_RETRY with lock released proactively.
++		 *
++		 * If we were to return VM_FAULT_SIGBUS here, the non
++		 * cooperative manager would be instead forced to
++		 * always call UFFDIO_UNREGISTER before it can safely
++		 * close the uffd, to avoid involuntary SIGBUS triggered.
++		 *
++		 * If we were to return VM_FAULT_NOPAGE, it would work for
++		 * the fault path, in which the lock will be released
++		 * later.  However for GUP, faultin_page() does nothing
++		 * special on NOPAGE, so GUP would spin retrying without
++		 * releasing the mmap read lock, causing possible livelock.
++		 *
++		 * Here only VM_FAULT_RETRY would make sure the mmap lock
++		 * be released immediately, so that the thread concurrently
++		 * releasing the userfault would always make progress.
++		 */
++		release_fault_lock(vmf);
++		goto out;
++	}
++
+ 	/* take the reference before dropping the mmap_lock */
+ 	userfaultfd_ctx_get(ctx);
+ 
+diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
+index afdd46ef04f70d..c835f113055dc4 100644
+--- a/include/drm/drm_kunit_helpers.h
++++ b/include/drm/drm_kunit_helpers.h
+@@ -120,6 +120,9 @@ drm_kunit_helper_create_crtc(struct kunit *test,
+ 			     const struct drm_crtc_funcs *funcs,
+ 			     const struct drm_crtc_helper_funcs *helper_funcs);
+ 
++int drm_kunit_add_mode_destroy_action(struct kunit *test,
++				      struct drm_display_mode *mode);
++
+ struct drm_display_mode *
+ drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
+ 				    u8 video_code);
+diff --git a/include/drm/intel/i915_pciids.h b/include/drm/intel/i915_pciids.h
+index f35534522d3338..dacea289acaf5a 100644
+--- a/include/drm/intel/i915_pciids.h
++++ b/include/drm/intel/i915_pciids.h
+@@ -809,6 +809,9 @@
+ 	MACRO__(0xE20B, ## __VA_ARGS__), \
+ 	MACRO__(0xE20C, ## __VA_ARGS__), \
+ 	MACRO__(0xE20D, ## __VA_ARGS__), \
+-	MACRO__(0xE212, ## __VA_ARGS__)
++	MACRO__(0xE210, ## __VA_ARGS__), \
++	MACRO__(0xE212, ## __VA_ARGS__), \
++	MACRO__(0xE215, ## __VA_ARGS__), \
++	MACRO__(0xE216, ## __VA_ARGS__)
+ 
+ #endif /* _I915_PCIIDS_H */
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 38b2af336e4a01..252eed781a6e94 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -711,6 +711,7 @@ struct cgroup_subsys {
+ 	void (*css_released)(struct cgroup_subsys_state *css);
+ 	void (*css_free)(struct cgroup_subsys_state *css);
+ 	void (*css_reset)(struct cgroup_subsys_state *css);
++	void (*css_killed)(struct cgroup_subsys_state *css);
+ 	void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
+ 	int (*css_extra_stat_show)(struct seq_file *seq,
+ 				   struct cgroup_subsys_state *css);
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index f8ef47f8a634df..fc1324ed597d6b 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -343,7 +343,7 @@ static inline u64 cgroup_id(const struct cgroup *cgrp)
+  */
+ static inline bool css_is_dying(struct cgroup_subsys_state *css)
+ {
+-	return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
++	return css->flags & CSS_DYING;
+ }
+ 
+ static inline void cgroup_get(struct cgroup *cgrp)
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index dd33423012538d..018de72505b073 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -1221,12 +1221,6 @@ unsigned long hid_lookup_quirk(const struct hid_device *hdev);
+ int hid_quirks_init(char **quirks_param, __u16 bus, int count);
+ void hid_quirks_exit(__u16 bus);
+ 
+-#ifdef CONFIG_HID_PID
+-int hid_pidff_init(struct hid_device *hid);
+-#else
+-#define hid_pidff_init NULL
+-#endif
+-
+ #define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__)
+ 
+ #define hid_err(hid, fmt, ...)				\
+diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
+index 4b9ba523978d20..5ce332fc6ff507 100644
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -457,6 +457,7 @@ enum {
+ 	REQ_F_SKIP_LINK_CQES_BIT,
+ 	REQ_F_SINGLE_POLL_BIT,
+ 	REQ_F_DOUBLE_POLL_BIT,
++	REQ_F_MULTISHOT_BIT,
+ 	REQ_F_APOLL_MULTISHOT_BIT,
+ 	REQ_F_CLEAR_POLLIN_BIT,
+ 	REQ_F_HASH_LOCKED_BIT,
+@@ -530,6 +531,8 @@ enum {
+ 	REQ_F_SINGLE_POLL	= IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
+ 	/* double poll may active */
+ 	REQ_F_DOUBLE_POLL	= IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
++	/* request posts multiple completions, should be set at prep time */
++	REQ_F_MULTISHOT		= IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
+ 	/* fast poll multishot mode */
+ 	REQ_F_APOLL_MULTISHOT	= IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
+ 	/* recvmsg special flag, clear EPOLLIN */
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 2c66ca21801c17..15206450929d5e 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -2330,7 +2330,7 @@ static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
+ struct kvm_vcpu *kvm_get_running_vcpu(void);
+ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
++#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+ bool kvm_arch_has_irq_bypass(void);
+ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
+ 			   struct irq_bypass_producer *);
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index 48c66b84668281..a9244291f5067a 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -1111,6 +1111,12 @@ static inline bool is_page_hwpoison(const struct page *page)
+ 	return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
+ }
+ 
++static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
++{
++	return folio_test_hwpoison(folio) ||
++	    (folio_test_large(folio) && folio_test_has_hwpoisoned(folio));
++}
++
+ bool is_free_buddy_page(const struct page *page);
+ 
+ PAGEFLAG(Isolated, isolated, PF_ANY);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index c9dc15355f1bac..c395b3c5c05cfb 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2605,6 +2605,8 @@
+ 
+ #define PCI_VENDOR_ID_ZHAOXIN		0x1d17
+ 
++#define PCI_VENDOR_ID_ROCKCHIP		0x1d87
++
+ #define PCI_VENDOR_ID_HYGON		0x1d94
+ 
+ #define PCI_VENDOR_ID_META		0x1d9b
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 347901525a46ae..0997077bcc52ad 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -170,6 +170,12 @@ struct hw_perf_event {
+ 		};
+ 		struct { /* aux / Intel-PT */
+ 			u64		aux_config;
++			/*
++			 * For AUX area events, aux_paused cannot be a state
++			 * flag because it can be updated asynchronously to
++			 * state.
++			 */
++			unsigned int	aux_paused;
+ 		};
+ 		struct { /* software */
+ 			struct hrtimer	hrtimer;
+@@ -294,6 +300,7 @@ struct perf_event_pmu_context;
+ #define PERF_PMU_CAP_NO_EXCLUDE			0x0040
+ #define PERF_PMU_CAP_AUX_OUTPUT			0x0080
+ #define PERF_PMU_CAP_EXTENDED_HW_TYPE		0x0100
++#define PERF_PMU_CAP_AUX_PAUSE			0x0200
+ 
+ /**
+  * pmu::scope
+@@ -384,6 +391,8 @@ struct pmu {
+ #define PERF_EF_START	0x01		/* start the counter when adding    */
+ #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
+ #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
++#define PERF_EF_PAUSE	0x08		/* AUX area event, pause tracing */
++#define PERF_EF_RESUME	0x10		/* AUX area event, resume tracing */
+ 
+ 	/*
+ 	 * Adds/Removes a counter to/from the PMU, can be done inside a
+@@ -423,6 +432,18 @@ struct pmu {
+ 	 *
+ 	 * ->start() with PERF_EF_RELOAD will reprogram the counter
+ 	 *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
++	 *
++	 * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not
++	 * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with
++	 * PERF_EF_RESUME.
++	 *
++	 * ->start() with PERF_EF_RESUME will start as simply as possible but
++	 * only if the counter is not otherwise stopped. Will not overlap
++	 * another ->start() with PERF_EF_RESUME nor ->stop() with
++	 * PERF_EF_PAUSE.
++	 *
++	 * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other
++	 * ->stop()/->start() invocations, just not itself.
+ 	 */
+ 	void (*start)			(struct perf_event *event, int flags);
+ 	void (*stop)			(struct perf_event *event, int flags);
+@@ -652,13 +673,15 @@ struct swevent_hlist {
+ 	struct rcu_head			rcu_head;
+ };
+ 
+-#define PERF_ATTACH_CONTEXT	0x01
+-#define PERF_ATTACH_GROUP	0x02
+-#define PERF_ATTACH_TASK	0x04
+-#define PERF_ATTACH_TASK_DATA	0x08
+-#define PERF_ATTACH_ITRACE	0x10
+-#define PERF_ATTACH_SCHED_CB	0x20
+-#define PERF_ATTACH_CHILD	0x40
++#define PERF_ATTACH_CONTEXT	0x0001
++#define PERF_ATTACH_GROUP	0x0002
++#define PERF_ATTACH_TASK	0x0004
++#define PERF_ATTACH_TASK_DATA	0x0008
++#define PERF_ATTACH_ITRACE	0x0010
++#define PERF_ATTACH_SCHED_CB	0x0020
++#define PERF_ATTACH_CHILD	0x0040
++#define PERF_ATTACH_EXCLUSIVE	0x0080
++#define PERF_ATTACH_CALLCHAIN	0x0100
+ 
+ struct bpf_prog;
+ struct perf_cgroup;
+@@ -810,7 +833,6 @@ struct perf_event {
+ 	struct irq_work			pending_disable_irq;
+ 	struct callback_head		pending_task;
+ 	unsigned int			pending_work;
+-	struct rcuwait			pending_work_wait;
+ 
+ 	atomic_t			event_limit;
+ 
+@@ -1685,6 +1707,13 @@ static inline bool has_aux(struct perf_event *event)
+ 	return event->pmu->setup_aux;
+ }
+ 
++static inline bool has_aux_action(struct perf_event *event)
++{
++	return event->attr.aux_sample_size ||
++	       event->attr.aux_pause ||
++	       event->attr.aux_resume;
++}
++
+ static inline bool is_write_backward(struct perf_event *event)
+ {
+ 	return !!event->attr.write_backward;
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index 8df030ebd86286..be6ca84db4d85c 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -201,10 +201,14 @@ static inline int pmd_dirty(pmd_t pmd)
+  * hazard could result in the direct mode hypervisor case, since the actual
+  * write to the page tables may not yet have taken place, so reads though
+  * a raw PTE pointer after it has been modified are not guaranteed to be
+- * up to date.  This mode can only be entered and left under the protection of
+- * the page table locks for all page tables which may be modified.  In the UP
+- * case, this is required so that preemption is disabled, and in the SMP case,
+- * it must synchronize the delayed page table writes properly on other CPUs.
++ * up to date.
++ *
++ * In the general case, no lock is guaranteed to be held between entry and exit
++ * of the lazy mode. So the implementation must assume preemption may be enabled
++ * and cpu migration is possible; it must take steps to be robust against this.
++ * (In practice, for user PTE updates, the appropriate page table lock(s) are
++ * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
++ * and the mode cannot be used in interrupt context.
+  */
+ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+ #define arch_enter_lazy_mmu_mode()	do {} while (0)
+@@ -266,7 +270,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ {
+ 	page_table_check_ptes_set(mm, ptep, pte, nr);
+ 
+-	arch_enter_lazy_mmu_mode();
+ 	for (;;) {
+ 		set_pte(ptep, pte);
+ 		if (--nr == 0)
+@@ -274,7 +277,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ 		ptep++;
+ 		pte = pte_next_pfn(pte);
+ 	}
+-	arch_leave_lazy_mmu_mode();
+ }
+ #endif
+ #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index eca9bb2ee637b6..0cb647ecd77f54 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -204,6 +204,7 @@ void printk_legacy_allow_panic_sync(void);
+ extern bool nbcon_device_try_acquire(struct console *con);
+ extern void nbcon_device_release(struct console *con);
+ void nbcon_atomic_flush_unsafe(void);
++bool pr_flush(int timeout_ms, bool reset_on_progress);
+ #else
+ static inline __printf(1, 0)
+ int vprintk(const char *s, va_list args)
+@@ -304,6 +305,11 @@ static inline void nbcon_atomic_flush_unsafe(void)
+ {
+ }
+ 
++static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
++{
++	return true;
++}
++
+ #endif
+ 
+ bool this_cpu_in_panic(void);
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index 20a40ade803086..6c3125300c009a 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -335,6 +335,7 @@ enum tpm2_cc_attrs {
+ #define TPM_VID_WINBOND  0x1050
+ #define TPM_VID_STM      0x104A
+ #define TPM_VID_ATML     0x1114
++#define TPM_VID_IFX      0x15D1
+ 
+ enum tpm_chip_flags {
+ 	TPM_CHIP_FLAG_BOOTSTRAPPED		= BIT(0),
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index dd10e02bfc746e..71d24328764065 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -353,6 +353,22 @@ enum {
+ 	 * during the hdev->setup vendor callback.
+ 	 */
+ 	HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
++
++	/* When this quirk is set, the HCI_OP_READ_VOICE_SETTING command is
++	 * skipped. This is required for a subset of the CSR controller clones
++	 * which erroneously claim to support it.
++	 *
++	 * This quirk must be set before hci_register_dev is called.
++	 */
++	HCI_QUIRK_BROKEN_READ_VOICE_SETTING,
++
++	/* When this quirk is set, the HCI_OP_READ_PAGE_SCAN_TYPE command is
++	 * skipped. This is required for a subset of the CSR controller clones
++	 * which erroneously claim to support it.
++	 *
++	 * This quirk must be set before hci_register_dev is called.
++	 */
++	HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE,
+ };
+ 
+ /* HCI device flags */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c95f7e6ba25514..4245910ffc4a2d 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1921,6 +1921,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ 	((dev)->commands[20] & 0x10 && \
+ 	 !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+ 
++#define read_voice_setting_capable(dev) \
++	((dev)->commands[9] & 0x04 && \
++	 !test_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &(dev)->quirks))
++
+ /* Use enhanced synchronous connection if command is supported and its quirk
+  * has not been set.
+  */
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 5b712582f9a9ce..3b964f8834e719 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -2826,6 +2826,11 @@ struct ieee80211_txq {
+  *	implements MLO, so operation can continue on other links when one
+  *	link is switching.
+  *
++ * @IEEE80211_HW_STRICT: strictly enforce certain things mandated by the spec
++ *	but otherwise ignored/worked around for interoperability. This is a
++ *	HW flag so drivers can opt in according to their own control, e.g. in
++ *	testing.
++ *
+  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
+  */
+ enum ieee80211_hw_flags {
+@@ -2885,6 +2890,7 @@ enum ieee80211_hw_flags {
+ 	IEEE80211_HW_DISALLOW_PUNCTURING,
+ 	IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ,
+ 	IEEE80211_HW_HANDLES_QUIET_CSA,
++	IEEE80211_HW_STRICT,
+ 
+ 	/* keep last, obviously */
+ 	NUM_IEEE80211_HW_FLAGS
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 31248cfdfb235f..dcd288fa1bb6fb 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -775,6 +775,7 @@ struct sctp_transport {
+ 
+ 	/* Reference counting. */
+ 	refcount_t refcnt;
++	__u32	dead:1,
+ 		/* RTO-Pending : A flag used to track if one of the DATA
+ 		 *		chunks sent to this address is currently being
+ 		 *		used to compute a RTT. If this flag is 0,
+@@ -784,7 +785,7 @@ struct sctp_transport {
+ 		 *		calculation completes (i.e. the DATA chunk
+ 		 *		is SACK'd) clear this flag.
+ 		 */
+-	__u32	rto_pending:1,
++		rto_pending:1,
+ 
+ 		/*
+ 		 * hb_sent : a flag that signals that we have a pending
+diff --git a/include/net/sock.h b/include/net/sock.h
+index fa055cf1785efd..fa9b9dadbe1709 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -338,6 +338,8 @@ struct sk_filter;
+   *	@sk_txtime_unused: unused txtime flags
+   *	@ns_tracker: tracker for netns reference
+   *	@sk_user_frags: xarray of pages the user is holding a reference on.
++  *	@sk_owner: reference to the real owner of the socket that calls
++  *		   sock_lock_init_class_and_name().
+   */
+ struct sock {
+ 	/*
+@@ -544,6 +546,10 @@ struct sock {
+ 	struct rcu_head		sk_rcu;
+ 	netns_tracker		ns_tracker;
+ 	struct xarray		sk_user_frags;
++
++#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
++	struct module		*sk_owner;
++#endif
+ };
+ 
+ struct sock_bh_locked {
+@@ -1585,6 +1591,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
+ 	sk_mem_reclaim(sk);
+ }
+ 
++#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
++static inline void sk_owner_set(struct sock *sk, struct module *owner)
++{
++	__module_get(owner);
++	sk->sk_owner = owner;
++}
++
++static inline void sk_owner_clear(struct sock *sk)
++{
++	sk->sk_owner = NULL;
++}
++
++static inline void sk_owner_put(struct sock *sk)
++{
++	module_put(sk->sk_owner);
++}
++#else
++static inline void sk_owner_set(struct sock *sk, struct module *owner)
++{
++}
++
++static inline void sk_owner_clear(struct sock *sk)
++{
++}
++
++static inline void sk_owner_put(struct sock *sk)
++{
++}
++#endif
+ /*
+  * Macro so as to not evaluate some arguments when
+  * lockdep is not enabled.
+@@ -1594,13 +1629,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
+  */
+ #define sock_lock_init_class_and_name(sk, sname, skey, name, key)	\
+ do {									\
++	sk_owner_set(sk, THIS_MODULE);					\
+ 	sk->sk_lock.owned = 0;						\
+ 	init_waitqueue_head(&sk->sk_lock.wq);				\
+ 	spin_lock_init(&(sk)->sk_lock.slock);				\
+ 	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
+-			sizeof((sk)->sk_lock));				\
++				   sizeof((sk)->sk_lock));		\
+ 	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
+-				(skey), (sname));				\
++				   (skey), (sname));			\
+ 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
+ } while (0)
+ 
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+index 717307d6b5b74c..3e1c11d9d9808f 100644
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -62,6 +62,8 @@ struct kfd_ioctl_get_version_args {
+ #define KFD_MAX_QUEUE_PERCENTAGE	100
+ #define KFD_MAX_QUEUE_PRIORITY		15
+ 
++#define KFD_MIN_QUEUE_RING_SIZE		1024
++
+ struct kfd_ioctl_create_queue_args {
+ 	__u64 ring_base_address;	/* to KFD */
+ 	__u64 write_pointer_address;	/* from KFD */
+diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
+index 33745642f7875a..c223572f82296b 100644
+--- a/include/uapi/linux/landlock.h
++++ b/include/uapi/linux/landlock.h
+@@ -57,9 +57,11 @@ struct landlock_ruleset_attr {
+  *
+  * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI
+  *   version.
++ * - %LANDLOCK_CREATE_RULESET_ERRATA: Get a bitmask of fixed issues.
+  */
+ /* clang-format off */
+ #define LANDLOCK_CREATE_RULESET_VERSION			(1U << 0)
++#define LANDLOCK_CREATE_RULESET_ERRATA			(1U << 1)
+ /* clang-format on */
+ 
+ /**
+diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
+index 4842c36fdf8019..0524d541d4e3d5 100644
+--- a/include/uapi/linux/perf_event.h
++++ b/include/uapi/linux/perf_event.h
+@@ -511,7 +511,16 @@ struct perf_event_attr {
+ 	__u16	sample_max_stack;
+ 	__u16	__reserved_2;
+ 	__u32	aux_sample_size;
+-	__u32	__reserved_3;
++
++	union {
++		__u32	aux_action;
++		struct {
++			__u32	aux_start_paused :  1, /* start AUX area tracing paused */
++				aux_pause        :  1, /* on overflow, pause AUX area tracing */
++				aux_resume       :  1, /* on overflow, resume AUX area tracing */
++				__reserved_3     : 29;
++		};
++	};
+ 
+ 	/*
+ 	 * User provided data if sigtrap=1, passed back to user via
+diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
+index 832c15d9155bdb..eeb20dfb1fdaa4 100644
+--- a/include/uapi/linux/psp-sev.h
++++ b/include/uapi/linux/psp-sev.h
+@@ -73,13 +73,20 @@ typedef enum {
+ 	SEV_RET_INVALID_PARAM,
+ 	SEV_RET_RESOURCE_LIMIT,
+ 	SEV_RET_SECURE_DATA_INVALID,
+-	SEV_RET_INVALID_KEY = 0x27,
+-	SEV_RET_INVALID_PAGE_SIZE,
+-	SEV_RET_INVALID_PAGE_STATE,
+-	SEV_RET_INVALID_MDATA_ENTRY,
+-	SEV_RET_INVALID_PAGE_OWNER,
+-	SEV_RET_INVALID_PAGE_AEAD_OFLOW,
+-	SEV_RET_RMP_INIT_REQUIRED,
++	SEV_RET_INVALID_PAGE_SIZE          = 0x0019,
++	SEV_RET_INVALID_PAGE_STATE         = 0x001A,
++	SEV_RET_INVALID_MDATA_ENTRY        = 0x001B,
++	SEV_RET_INVALID_PAGE_OWNER         = 0x001C,
++	SEV_RET_AEAD_OFLOW                 = 0x001D,
++	SEV_RET_EXIT_RING_BUFFER           = 0x001F,
++	SEV_RET_RMP_INIT_REQUIRED          = 0x0020,
++	SEV_RET_BAD_SVN                    = 0x0021,
++	SEV_RET_BAD_VERSION                = 0x0022,
++	SEV_RET_SHUTDOWN_REQUIRED          = 0x0023,
++	SEV_RET_UPDATE_FAILED              = 0x0024,
++	SEV_RET_RESTORE_REQUIRED           = 0x0025,
++	SEV_RET_RMP_INITIALIZATION_FAILED  = 0x0026,
++	SEV_RET_INVALID_KEY                = 0x0027,
+ 	SEV_RET_MAX,
+ } sev_ret_code;
+ 
+diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
+index 430daceafac705..2d995f3c1ca378 100644
+--- a/include/uapi/linux/rkisp1-config.h
++++ b/include/uapi/linux/rkisp1-config.h
+@@ -1528,7 +1528,7 @@ enum rksip1_ext_param_buffer_version {
+  * The expected memory layout of the parameters buffer is::
+  *
+  *	+-------------------- struct rkisp1_ext_params_cfg -------------------+
+- *	| version = RKISP_EXT_PARAMS_BUFFER_V1;                               |
++ *	| version = RKISP1_EXT_PARAM_BUFFER_V1;                               |
+  *	| data_size = sizeof(struct rkisp1_ext_params_bls_config)             |
+  *	|           + sizeof(struct rkisp1_ext_params_dpcc_config);           |
+  *	| +------------------------- data  ---------------------------------+ |
+diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h
+index 464aa6b3a5f928..1c9afbe8cc2600 100644
+--- a/include/xen/interface/xen-mca.h
++++ b/include/xen/interface/xen-mca.h
+@@ -372,7 +372,7 @@ struct xen_mce {
+ #define XEN_MCE_LOG_LEN 32
+ 
+ struct xen_mce_log {
+-	char signature[12]; /* "MACHINECHECK" */
++	char signature[12] __nonstring; /* "MACHINECHECK" */
+ 	unsigned len;	    /* = XEN_MCE_LOG_LEN */
+ 	unsigned next;
+ 	unsigned flags;
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index cf28d29fffbf0e..19de7129ae0b35 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1821,7 +1821,7 @@ void io_wq_submit_work(struct io_wq_work *work)
+ 	 * Don't allow any multishot execution from io-wq. It's more restrictive
+ 	 * than necessary and also cleaner.
+ 	 */
+-	if (req->flags & REQ_F_APOLL_MULTISHOT) {
++	if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) {
+ 		err = -EBADFD;
+ 		if (!io_file_can_poll(req))
+ 			goto fail;
+@@ -1832,7 +1832,7 @@ void io_wq_submit_work(struct io_wq_work *work)
+ 				goto fail;
+ 			return;
+ 		} else {
+-			req->flags &= ~REQ_F_APOLL_MULTISHOT;
++			req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT);
+ 		}
+ 	}
+ 
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index e1895952066eeb..7a8c3a004800ed 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -484,6 +484,8 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ 	p->nbufs = tmp;
+ 	p->addr = READ_ONCE(sqe->addr);
+ 	p->len = READ_ONCE(sqe->len);
++	if (!p->len)
++		return -EINVAL;
+ 
+ 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
+ 				&size))
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 7ea99e082e97e7..384915d931b72c 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -435,6 +435,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 		sr->msg_flags |= MSG_WAITALL;
+ 		sr->buf_group = req->buf_index;
+ 		req->buf_list = NULL;
++		req->flags |= REQ_F_MULTISHOT;
+ 	}
+ 
+ #ifdef CONFIG_COMPAT
+@@ -1616,6 +1617,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
+ 	}
+ 
+ 	io_req_set_res(req, ret, cflags);
++	if (!(issue_flags & IO_URING_F_MULTISHOT))
++		return IOU_OK;
+ 	return IOU_STOP_MULTISHOT;
+ }
+ 
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 216535e055e112..4378f3eff25d25 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -5909,6 +5909,12 @@ static void kill_css(struct cgroup_subsys_state *css)
+ 	if (css->flags & CSS_DYING)
+ 		return;
+ 
++	/*
++	 * Call css_killed(), if defined, before setting the CSS_DYING flag
++	 */
++	if (css->ss->css_killed)
++		css->ss->css_killed(css);
++
+ 	css->flags |= CSS_DYING;
+ 
+ 	/*
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 24ece85fd3b126..839f88ba17f7d3 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -84,9 +84,19 @@ static bool		have_boot_isolcpus;
+ static struct list_head remote_children;
+ 
+ /*
+- * A flag to force sched domain rebuild at the end of an operation while
+- * inhibiting it in the intermediate stages when set. Currently it is only
+- * set in hotplug code.
++ * A flag to force sched domain rebuild at the end of an operation.
++ * It can be set in
++ *  - update_partition_sd_lb()
++ *  - remote_partition_check()
++ *  - update_cpumasks_hier()
++ *  - cpuset_update_flag()
++ *  - cpuset_hotplug_update_tasks()
++ *  - cpuset_handle_hotplug()
++ *
++ * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
++ *
++ * Note that update_relax_domain_level() in cpuset-v1.c can still call
++ * rebuild_sched_domains_locked() directly without using this flag.
+  */
+ static bool force_sd_rebuild;
+ 
+@@ -283,6 +293,12 @@ static inline void dec_attach_in_progress(struct cpuset *cs)
+ 	mutex_unlock(&cpuset_mutex);
+ }
+ 
++static inline bool cpuset_v2(void)
++{
++	return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
++		cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
++}
++
+ /*
+  * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
+  * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
+@@ -293,7 +309,7 @@ static inline void dec_attach_in_progress(struct cpuset *cs)
+  */
+ static inline bool is_in_v2_mode(void)
+ {
+-	return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
++	return cpuset_v2() ||
+ 	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
+ }
+ 
+@@ -728,7 +744,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
+ 	int nslot;		/* next empty doms[] struct cpumask slot */
+ 	struct cgroup_subsys_state *pos_css;
+ 	bool root_load_balance = is_sched_load_balance(&top_cpuset);
+-	bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
++	bool cgrpv2 = cpuset_v2();
+ 	int nslot_update;
+ 
+ 	doms = NULL;
+@@ -998,6 +1014,7 @@ void rebuild_sched_domains_locked(void)
+ 
+ 	lockdep_assert_cpus_held();
+ 	lockdep_assert_held(&cpuset_mutex);
++	force_sd_rebuild = false;
+ 
+ 	/*
+ 	 * If we have raced with CPU hotplug, return early to avoid
+@@ -1172,8 +1189,8 @@ static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
+ 			clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+ 	}
+ 
+-	if (rebuild_domains && !force_sd_rebuild)
+-		rebuild_sched_domains_locked();
++	if (rebuild_domains)
++		cpuset_force_rebuild();
+ }
+ 
+ /*
+@@ -1195,7 +1212,7 @@ static void reset_partition_data(struct cpuset *cs)
+ {
+ 	struct cpuset *parent = parent_cs(cs);
+ 
+-	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
++	if (!cpuset_v2())
+ 		return;
+ 
+ 	lockdep_assert_held(&callback_lock);
+@@ -1383,6 +1400,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
+ 	list_add(&cs->remote_sibling, &remote_children);
+ 	spin_unlock_irq(&callback_lock);
+ 	update_unbound_workqueue_cpumask(isolcpus_updated);
++	cs->prs_err = 0;
+ 
+ 	/*
+ 	 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
+@@ -1413,9 +1431,11 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
+ 	list_del_init(&cs->remote_sibling);
+ 	isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
+ 					       NULL, tmp->new_cpus);
+-	cs->partition_root_state = -cs->partition_root_state;
+-	if (!cs->prs_err)
+-		cs->prs_err = PERR_INVCPUS;
++	if (cs->prs_err)
++		cs->partition_root_state = -cs->partition_root_state;
++	else
++		cs->partition_root_state = PRS_MEMBER;
++
+ 	reset_partition_data(cs);
+ 	spin_unlock_irq(&callback_lock);
+ 	update_unbound_workqueue_cpumask(isolcpus_updated);
+@@ -1448,8 +1468,10 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
+ 
+ 	WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
+ 
+-	if (cpumask_empty(newmask))
++	if (cpumask_empty(newmask)) {
++		cs->prs_err = PERR_CPUSEMPTY;
+ 		goto invalidate;
++	}
+ 
+ 	adding   = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus);
+ 	deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask);
+@@ -1459,10 +1481,15 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
+ 	 * not allocated to other partitions and there are effective_cpus
+ 	 * left in the top cpuset.
+ 	 */
+-	if (adding && (!capable(CAP_SYS_ADMIN) ||
+-		       cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
+-		       cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)))
+-		goto invalidate;
++	if (adding) {
++		if (!capable(CAP_SYS_ADMIN))
++			cs->prs_err = PERR_ACCESS;
++		else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
++			 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
++			cs->prs_err = PERR_NOCPUS;
++		if (cs->prs_err)
++			goto invalidate;
++	}
+ 
+ 	spin_lock_irq(&callback_lock);
+ 	if (adding)
+@@ -1520,8 +1547,8 @@ static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
+ 			remote_partition_disable(child, tmp);
+ 			disable_cnt++;
+ 		}
+-	if (disable_cnt && !force_sd_rebuild)
+-		rebuild_sched_domains_locked();
++	if (disable_cnt)
++		cpuset_force_rebuild();
+ }
+ 
+ /*
+@@ -1578,7 +1605,7 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
+  * The partcmd_update command is used by update_cpumasks_hier() with newmask
+  * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
+  * by update_cpumask() with NULL newmask. In both cases, the callers won't
+- * check for error and so partition_root_state and prs_error will be updated
++ * check for error and so partition_root_state and prs_err will be updated
+  * directly.
+  */
+ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
+@@ -1656,9 +1683,9 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
+ 		if (nocpu)
+ 			return PERR_NOCPUS;
+ 
+-		cpumask_copy(tmp->delmask, xcpus);
+-		deleting = true;
+-		subparts_delta++;
++		deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus);
++		if (deleting)
++			subparts_delta++;
+ 		new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
+ 	} else if (cmd == partcmd_disable) {
+ 		/*
+@@ -1930,12 +1957,6 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
+ 	rcu_read_unlock();
+ }
+ 
+-/*
+- * update_cpumasks_hier() flags
+- */
+-#define HIER_CHECKALL		0x01	/* Check all cpusets with no skipping */
+-#define HIER_NO_SD_REBUILD	0x02	/* Don't rebuild sched domains */
+-
+ /*
+  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
+  * @cs:  the cpuset to consider
+@@ -1950,7 +1971,7 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
+  * Called with cpuset_mutex held
+  */
+ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+-				 int flags)
++				 bool force)
+ {
+ 	struct cpuset *cp;
+ 	struct cgroup_subsys_state *pos_css;
+@@ -2015,12 +2036,12 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ 		 * Skip the whole subtree if
+ 		 * 1) the cpumask remains the same,
+ 		 * 2) has no partition root state,
+-		 * 3) HIER_CHECKALL flag not set, and
++		 * 3) force flag not set, and
+ 		 * 4) for v2 load balance state same as its parent.
+ 		 */
+-		if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
++		if (!cp->partition_root_state && !force &&
+ 		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
+-		    (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
++		    (!cpuset_v2() ||
+ 		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
+ 			pos_css = css_rightmost_descendant(pos_css);
+ 			continue;
+@@ -2094,8 +2115,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ 		 * from parent if current cpuset isn't a valid partition root
+ 		 * and their load balance states differ.
+ 		 */
+-		if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+-		    !is_partition_valid(cp) &&
++		if (cpuset_v2() && !is_partition_valid(cp) &&
+ 		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
+ 			if (is_sched_load_balance(parent))
+ 				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
+@@ -2111,8 +2131,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ 		 */
+ 		if (!cpumask_empty(cp->cpus_allowed) &&
+ 		    is_sched_load_balance(cp) &&
+-		   (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
+-		    is_partition_valid(cp)))
++		   (!cpuset_v2() || is_partition_valid(cp)))
+ 			need_rebuild_sched_domains = true;
+ 
+ 		rcu_read_lock();
+@@ -2120,9 +2139,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ 	}
+ 	rcu_read_unlock();
+ 
+-	if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) &&
+-	    !force_sd_rebuild)
+-		rebuild_sched_domains_locked();
++	if (need_rebuild_sched_domains)
++		cpuset_force_rebuild();
+ }
+ 
+ /**
+@@ -2149,9 +2167,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
+ 	 * directly.
+ 	 *
+ 	 * The update_cpumasks_hier() function may sleep. So we have to
+-	 * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
+-	 * flag is used to suppress rebuild of sched domains as the callers
+-	 * will take care of that.
++	 * release the RCU read lock before calling it.
+ 	 */
+ 	rcu_read_lock();
+ 	cpuset_for_each_child(sibling, pos_css, parent) {
+@@ -2167,7 +2183,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
+ 			continue;
+ 
+ 		rcu_read_unlock();
+-		update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
++		update_cpumasks_hier(sibling, tmp, false);
+ 		rcu_read_lock();
+ 		css_put(&sibling->css);
+ 	}
+@@ -2187,7 +2203,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	struct tmpmasks tmp;
+ 	struct cpuset *parent = parent_cs(cs);
+ 	bool invalidate = false;
+-	int hier_flags = 0;
++	bool force = false;
+ 	int old_prs = cs->partition_root_state;
+ 
+ 	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
+@@ -2248,12 +2264,11 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	 * Check all the descendants in update_cpumasks_hier() if
+ 	 * effective_xcpus is to be changed.
+ 	 */
+-	if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
+-		hier_flags = HIER_CHECKALL;
++	force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
+ 
+ 	retval = validate_change(cs, trialcs);
+ 
+-	if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
++	if ((retval == -EINVAL) && cpuset_v2()) {
+ 		struct cgroup_subsys_state *css;
+ 		struct cpuset *cp;
+ 
+@@ -2317,7 +2332,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	spin_unlock_irq(&callback_lock);
+ 
+ 	/* effective_cpus/effective_xcpus will be updated here */
+-	update_cpumasks_hier(cs, &tmp, hier_flags);
++	update_cpumasks_hier(cs, &tmp, force);
+ 
+ 	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
+ 	if (cs->partition_root_state)
+@@ -2342,7 +2357,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	struct tmpmasks tmp;
+ 	struct cpuset *parent = parent_cs(cs);
+ 	bool invalidate = false;
+-	int hier_flags = 0;
++	bool force = false;
+ 	int old_prs = cs->partition_root_state;
+ 
+ 	if (!*buf) {
+@@ -2365,8 +2380,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	 * Check all the descendants in update_cpumasks_hier() if
+ 	 * effective_xcpus is to be changed.
+ 	 */
+-	if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
+-		hier_flags = HIER_CHECKALL;
++	force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
+ 
+ 	retval = validate_change(cs, trialcs);
+ 	if (retval)
+@@ -2419,8 +2433,8 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	 * of the subtree when it is a valid partition root or effective_xcpus
+ 	 * is updated.
+ 	 */
+-	if (is_partition_valid(cs) || hier_flags)
+-		update_cpumasks_hier(cs, &tmp, hier_flags);
++	if (is_partition_valid(cs) || force)
++		update_cpumasks_hier(cs, &tmp, force);
+ 
+ 	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
+ 	if (cs->partition_root_state)
+@@ -2745,9 +2759,12 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
+ 	cs->flags = trialcs->flags;
+ 	spin_unlock_irq(&callback_lock);
+ 
+-	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed &&
+-	    !force_sd_rebuild)
+-		rebuild_sched_domains_locked();
++	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
++		if (cpuset_v2())
++			cpuset_force_rebuild();
++		else
++			rebuild_sched_domains_locked();
++	}
+ 
+ 	if (spread_flag_changed)
+ 		cpuset1_update_tasks_flags(cs);
+@@ -2861,12 +2878,14 @@ static int update_prstate(struct cpuset *cs, int new_prs)
+ 	update_unbound_workqueue_cpumask(new_xcpus_state);
+ 
+ 	/* Force update if switching back to member */
+-	update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
++	update_cpumasks_hier(cs, &tmpmask, !new_prs);
+ 
+ 	/* Update sched domains and load balance flag */
+ 	update_partition_sd_lb(cs, old_prs);
+ 
+ 	notify_partition_change(cs, old_prs);
++	if (force_sd_rebuild)
++		rebuild_sched_domains_locked();
+ 	free_cpumasks(NULL, &tmpmask);
+ 	return 0;
+ }
+@@ -2927,8 +2946,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 		 * migration permission derives from hierarchy ownership in
+ 		 * cgroup_procs_write_permission()).
+ 		 */
+-		if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
+-		    (cpus_updated || mems_updated)) {
++		if (!cpuset_v2() || (cpus_updated || mems_updated)) {
+ 			ret = security_task_setscheduler(task);
+ 			if (ret)
+ 				goto out_unlock;
+@@ -3042,8 +3060,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+ 	 * in effective cpus and mems. In that case, we can optimize out
+ 	 * by skipping the task iteration and update.
+ 	 */
+-	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+-	    !cpus_updated && !mems_updated) {
++	if (cpuset_v2() && !cpus_updated && !mems_updated) {
+ 		cpuset_attach_nodemask_to = cs->effective_mems;
+ 		goto out;
+ 	}
+@@ -3137,6 +3154,8 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ 	}
+ 
+ 	free_cpuset(trialcs);
++	if (force_sd_rebuild)
++		rebuild_sched_domains_locked();
+ out_unlock:
+ 	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+@@ -3366,7 +3385,7 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
+ 	INIT_LIST_HEAD(&cs->remote_sibling);
+ 
+ 	/* Set CS_MEMORY_MIGRATE for default hierarchy */
+-	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
++	if (cpuset_v2())
+ 		__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
+ 
+ 	return &cs->css;
+@@ -3393,8 +3412,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+ 	/*
+ 	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
+ 	 */
+-	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+-	    !is_sched_load_balance(parent))
++	if (cpuset_v2() && !is_sched_load_balance(parent))
+ 		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+ 
+ 	cpuset_inc();
+@@ -3461,11 +3479,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
+ 	cpus_read_lock();
+ 	mutex_lock(&cpuset_mutex);
+ 
+-	if (is_partition_valid(cs))
+-		update_prstate(cs, 0);
+-
+-	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+-	    is_sched_load_balance(cs))
++	if (!cpuset_v2() && is_sched_load_balance(cs))
+ 		cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
+ 
+ 	cpuset_dec();
+@@ -3475,6 +3489,22 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
+ 	cpus_read_unlock();
+ }
+ 
++static void cpuset_css_killed(struct cgroup_subsys_state *css)
++{
++	struct cpuset *cs = css_cs(css);
++
++	cpus_read_lock();
++	mutex_lock(&cpuset_mutex);
++
++	/* Reset valid partition back to member */
++	if (is_partition_valid(cs))
++		update_prstate(cs, PRS_MEMBER);
++
++	mutex_unlock(&cpuset_mutex);
++	cpus_read_unlock();
++
++}
++
+ static void cpuset_css_free(struct cgroup_subsys_state *css)
+ {
+ 	struct cpuset *cs = css_cs(css);
+@@ -3596,6 +3626,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ 	.css_alloc	= cpuset_css_alloc,
+ 	.css_online	= cpuset_css_online,
+ 	.css_offline	= cpuset_css_offline,
++	.css_killed	= cpuset_css_killed,
+ 	.css_free	= cpuset_css_free,
+ 	.can_attach	= cpuset_can_attach,
+ 	.cancel_attach	= cpuset_cancel_attach,
+@@ -3726,6 +3757,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ 
+ 	if (remote && cpumask_empty(&new_cpus) &&
+ 	    partition_is_populated(cs, NULL)) {
++		cs->prs_err = PERR_HOTPLUG;
+ 		remote_partition_disable(cs, tmp);
+ 		compute_effective_cpumask(&new_cpus, cs, parent);
+ 		remote = false;
+@@ -3879,11 +3911,9 @@ static void cpuset_handle_hotplug(void)
+ 		rcu_read_unlock();
+ 	}
+ 
+-	/* rebuild sched domains if cpus_allowed has changed */
+-	if (force_sd_rebuild) {
+-		force_sd_rebuild = false;
++	/* rebuild sched domains if necessary */
++	if (force_sd_rebuild)
+ 		rebuild_sched_domains_cpuslocked();
+-	}
+ 
+ 	free_cpumasks(NULL, ptmp);
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index b5ccf52bb71baa..97af53c43608e4 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2146,7 +2146,7 @@ static void perf_put_aux_event(struct perf_event *event)
+ 
+ static bool perf_need_aux_event(struct perf_event *event)
+ {
+-	return !!event->attr.aux_output || !!event->attr.aux_sample_size;
++	return event->attr.aux_output || has_aux_action(event);
+ }
+ 
+ static int perf_get_aux_event(struct perf_event *event,
+@@ -2171,6 +2171,10 @@ static int perf_get_aux_event(struct perf_event *event,
+ 	    !perf_aux_output_match(event, group_leader))
+ 		return 0;
+ 
++	if ((event->attr.aux_pause || event->attr.aux_resume) &&
++	    !(group_leader->pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE))
++		return 0;
++
+ 	if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
+ 		return 0;
+ 
+@@ -5258,6 +5262,8 @@ static int exclusive_event_init(struct perf_event *event)
+ 			return -EBUSY;
+ 	}
+ 
++	event->attach_state |= PERF_ATTACH_EXCLUSIVE;
++
+ 	return 0;
+ }
+ 
+@@ -5265,14 +5271,13 @@ static void exclusive_event_destroy(struct perf_event *event)
+ {
+ 	struct pmu *pmu = event->pmu;
+ 
+-	if (!is_exclusive_pmu(pmu))
+-		return;
+-
+ 	/* see comment in exclusive_event_init() */
+ 	if (event->attach_state & PERF_ATTACH_TASK)
+ 		atomic_dec(&pmu->exclusive_cnt);
+ 	else
+ 		atomic_inc(&pmu->exclusive_cnt);
++
++	event->attach_state &= ~PERF_ATTACH_EXCLUSIVE;
+ }
+ 
+ static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
+@@ -5307,35 +5312,58 @@ static bool exclusive_event_installable(struct perf_event *event,
+ static void perf_addr_filters_splice(struct perf_event *event,
+ 				       struct list_head *head);
+ 
+-static void perf_pending_task_sync(struct perf_event *event)
++/* vs perf_event_alloc() error */
++static void __free_event(struct perf_event *event)
+ {
+-	struct callback_head *head = &event->pending_task;
++	if (event->attach_state & PERF_ATTACH_CALLCHAIN)
++		put_callchain_buffers();
++
++	kfree(event->addr_filter_ranges);
++
++	if (event->attach_state & PERF_ATTACH_EXCLUSIVE)
++		exclusive_event_destroy(event);
++
++	if (is_cgroup_event(event))
++		perf_detach_cgroup(event);
++
++	if (event->destroy)
++		event->destroy(event);
+ 
+-	if (!event->pending_work)
+-		return;
+ 	/*
+-	 * If the task is queued to the current task's queue, we
+-	 * obviously can't wait for it to complete. Simply cancel it.
++	 * Must be after ->destroy(), due to uprobe_perf_close() using
++	 * hw.target.
+ 	 */
+-	if (task_work_cancel(current, head)) {
+-		event->pending_work = 0;
+-		local_dec(&event->ctx->nr_no_switch_fast);
+-		return;
++	if (event->hw.target)
++		put_task_struct(event->hw.target);
++
++	if (event->pmu_ctx) {
++		/*
++		 * put_pmu_ctx() needs an event->ctx reference, because of
++		 * epc->ctx.
++		 */
++		WARN_ON_ONCE(!event->ctx);
++		WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx);
++		put_pmu_ctx(event->pmu_ctx);
+ 	}
+ 
+ 	/*
+-	 * All accesses related to the event are within the same RCU section in
+-	 * perf_pending_task(). The RCU grace period before the event is freed
+-	 * will make sure all those accesses are complete by then.
++	 * perf_event_free_task() relies on put_ctx() being 'last', in
++	 * particular all task references must be cleaned up.
+ 	 */
+-	rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
++	if (event->ctx)
++		put_ctx(event->ctx);
++
++	if (event->pmu)
++		module_put(event->pmu->module);
++
++	call_rcu(&event->rcu_head, free_event_rcu);
+ }
+ 
++/* vs perf_event_alloc() success */
+ static void _free_event(struct perf_event *event)
+ {
+ 	irq_work_sync(&event->pending_irq);
+ 	irq_work_sync(&event->pending_disable_irq);
+-	perf_pending_task_sync(event);
+ 
+ 	unaccount_event(event);
+ 
+@@ -5353,42 +5381,10 @@ static void _free_event(struct perf_event *event)
+ 		mutex_unlock(&event->mmap_mutex);
+ 	}
+ 
+-	if (is_cgroup_event(event))
+-		perf_detach_cgroup(event);
+-
+-	if (!event->parent) {
+-		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+-			put_callchain_buffers();
+-	}
+-
+ 	perf_event_free_bpf_prog(event);
+ 	perf_addr_filters_splice(event, NULL);
+-	kfree(event->addr_filter_ranges);
+-
+-	if (event->destroy)
+-		event->destroy(event);
+-
+-	/*
+-	 * Must be after ->destroy(), due to uprobe_perf_close() using
+-	 * hw.target.
+-	 */
+-	if (event->hw.target)
+-		put_task_struct(event->hw.target);
+ 
+-	if (event->pmu_ctx)
+-		put_pmu_ctx(event->pmu_ctx);
+-
+-	/*
+-	 * perf_event_free_task() relies on put_ctx() being 'last', in particular
+-	 * all task references must be cleaned up.
+-	 */
+-	if (event->ctx)
+-		put_ctx(event->ctx);
+-
+-	exclusive_event_destroy(event);
+-	module_put(event->pmu->module);
+-
+-	call_rcu(&event->rcu_head, free_event_rcu);
++	__free_event(event);
+ }
+ 
+ /*
+@@ -5460,10 +5456,17 @@ static void perf_remove_from_owner(struct perf_event *event)
+ 
+ static void put_event(struct perf_event *event)
+ {
++	struct perf_event *parent;
++
+ 	if (!atomic_long_dec_and_test(&event->refcount))
+ 		return;
+ 
++	parent = event->parent;
+ 	_free_event(event);
++
++	/* Matches the refcount bump in inherit_event() */
++	if (parent)
++		put_event(parent);
+ }
+ 
+ /*
+@@ -5547,11 +5550,6 @@ int perf_event_release_kernel(struct perf_event *event)
+ 		if (tmp == child) {
+ 			perf_remove_from_context(child, DETACH_GROUP);
+ 			list_move(&child->child_list, &free_list);
+-			/*
+-			 * This matches the refcount bump in inherit_event();
+-			 * this can't be the last reference.
+-			 */
+-			put_event(event);
+ 		} else {
+ 			var = &ctx->refcount;
+ 		}
+@@ -5577,7 +5575,8 @@ int perf_event_release_kernel(struct perf_event *event)
+ 		void *var = &child->ctx->refcount;
+ 
+ 		list_del(&child->child_list);
+-		free_event(child);
++		/* Last reference unless ->pending_task work is pending */
++		put_event(child);
+ 
+ 		/*
+ 		 * Wake any perf_event_free_task() waiting for this event to be
+@@ -5588,7 +5587,11 @@ int perf_event_release_kernel(struct perf_event *event)
+ 	}
+ 
+ no_ctx:
+-	put_event(event); /* Must be the 'last' reference */
++	/*
++	 * Last reference unless ->pending_task work is pending on this event
++	 * or any of its children.
++	 */
++	put_event(event);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+@@ -6973,12 +6976,6 @@ static void perf_pending_task(struct callback_head *head)
+ 	struct perf_event *event = container_of(head, struct perf_event, pending_task);
+ 	int rctx;
+ 
+-	/*
+-	 * All accesses to the event must belong to the same implicit RCU read-side
+-	 * critical section as the ->pending_work reset. See comment in
+-	 * perf_pending_task_sync().
+-	 */
+-	rcu_read_lock();
+ 	/*
+ 	 * If we 'fail' here, that's OK, it means recursion is already disabled
+ 	 * and we won't recurse 'further'.
+@@ -6989,9 +6986,8 @@ static void perf_pending_task(struct callback_head *head)
+ 		event->pending_work = 0;
+ 		perf_sigtrap(event);
+ 		local_dec(&event->ctx->nr_no_switch_fast);
+-		rcuwait_wake_up(&event->pending_work_wait);
+ 	}
+-	rcu_read_unlock();
++	put_event(event);
+ 
+ 	if (rctx >= 0)
+ 		perf_swevent_put_recursion_context(rctx);
+@@ -8029,6 +8025,49 @@ void perf_prepare_header(struct perf_event_header *header,
+ 	WARN_ON_ONCE(header->size & 7);
+ }
+ 
++static void __perf_event_aux_pause(struct perf_event *event, bool pause)
++{
++	if (pause) {
++		if (!event->hw.aux_paused) {
++			event->hw.aux_paused = 1;
++			event->pmu->stop(event, PERF_EF_PAUSE);
++		}
++	} else {
++		if (event->hw.aux_paused) {
++			event->hw.aux_paused = 0;
++			event->pmu->start(event, PERF_EF_RESUME);
++		}
++	}
++}
++
++static void perf_event_aux_pause(struct perf_event *event, bool pause)
++{
++	struct perf_buffer *rb;
++
++	if (WARN_ON_ONCE(!event))
++		return;
++
++	rb = ring_buffer_get(event);
++	if (!rb)
++		return;
++
++	scoped_guard (irqsave) {
++		/*
++		 * Guard against self-recursion here. Another event could trip
++		 * this same from NMI context.
++		 */
++		if (READ_ONCE(rb->aux_in_pause_resume))
++			break;
++
++		WRITE_ONCE(rb->aux_in_pause_resume, 1);
++		barrier();
++		__perf_event_aux_pause(event, pause);
++		barrier();
++		WRITE_ONCE(rb->aux_in_pause_resume, 0);
++	}
++	ring_buffer_put(rb);
++}
++
+ static __always_inline int
+ __perf_event_output(struct perf_event *event,
+ 		    struct perf_sample_data *data,
+@@ -9832,9 +9871,12 @@ static int __perf_event_overflow(struct perf_event *event,
+ 
+ 	ret = __perf_event_account_interrupt(event, throttle);
+ 
++	if (event->attr.aux_pause)
++		perf_event_aux_pause(event->aux_event, true);
++
+ 	if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT &&
+ 	    !bpf_overflow_handler(event, data, regs))
+-		return ret;
++		goto out;
+ 
+ 	/*
+ 	 * XXX event_limit might not quite work as expected on inherited
+@@ -9868,6 +9910,7 @@ static int __perf_event_overflow(struct perf_event *event,
+ 		    !task_work_add(current, &event->pending_task, notify_mode)) {
+ 			event->pending_work = pending_id;
+ 			local_inc(&event->ctx->nr_no_switch_fast);
++			WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
+ 
+ 			event->pending_addr = 0;
+ 			if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
+@@ -9896,6 +9939,9 @@ static int __perf_event_overflow(struct perf_event *event,
+ 		event->pending_wakeup = 1;
+ 		irq_work_queue(&event->pending_irq);
+ 	}
++out:
++	if (event->attr.aux_resume)
++		perf_event_aux_pause(event->aux_event, false);
+ 
+ 	return ret;
+ }
+@@ -11961,8 +12007,10 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
+ 			event->destroy(event);
+ 	}
+ 
+-	if (ret)
++	if (ret) {
++		event->pmu = NULL;
+ 		module_put(pmu->module);
++	}
+ 
+ 	return ret;
+ }
+@@ -12211,7 +12259,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 	init_irq_work(&event->pending_irq, perf_pending_irq);
+ 	event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
+ 	init_task_work(&event->pending_task, perf_pending_task);
+-	rcuwait_init(&event->pending_work_wait);
+ 
+ 	mutex_init(&event->mmap_mutex);
+ 	raw_spin_lock_init(&event->addr_filters.lock);
+@@ -12290,7 +12337,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 	 * See perf_output_read().
+ 	 */
+ 	if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
+-		goto err_ns;
++		goto err;
+ 
+ 	if (!has_branch_stack(event))
+ 		event->attr.branch_sample_type = 0;
+@@ -12298,7 +12345,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 	pmu = perf_init_event(event);
+ 	if (IS_ERR(pmu)) {
+ 		err = PTR_ERR(pmu);
+-		goto err_ns;
++		goto err;
+ 	}
+ 
+ 	/*
+@@ -12308,24 +12355,38 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 	 */
+ 	if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
+ 		err = -EINVAL;
+-		goto err_pmu;
++		goto err;
+ 	}
+ 
+ 	if (event->attr.aux_output &&
+-	    !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
++	    (!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
++	     event->attr.aux_pause || event->attr.aux_resume)) {
+ 		err = -EOPNOTSUPP;
+-		goto err_pmu;
++		goto err;
++	}
++
++	if (event->attr.aux_pause && event->attr.aux_resume) {
++		err = -EINVAL;
++		goto err;
++	}
++
++	if (event->attr.aux_start_paused) {
++		if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
++			err = -EOPNOTSUPP;
++			goto err;
++		}
++		event->hw.aux_paused = 1;
+ 	}
+ 
+ 	if (cgroup_fd != -1) {
+ 		err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
+ 		if (err)
+-			goto err_pmu;
++			goto err;
+ 	}
+ 
+ 	err = exclusive_event_init(event);
+ 	if (err)
+-		goto err_pmu;
++		goto err;
+ 
+ 	if (has_addr_filter(event)) {
+ 		event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
+@@ -12333,7 +12394,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 						    GFP_KERNEL);
+ 		if (!event->addr_filter_ranges) {
+ 			err = -ENOMEM;
+-			goto err_per_task;
++			goto err;
+ 		}
+ 
+ 		/*
+@@ -12358,41 +12419,22 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+ 			err = get_callchain_buffers(attr->sample_max_stack);
+ 			if (err)
+-				goto err_addr_filters;
++				goto err;
++			event->attach_state |= PERF_ATTACH_CALLCHAIN;
+ 		}
+ 	}
+ 
+ 	err = security_perf_event_alloc(event);
+ 	if (err)
+-		goto err_callchain_buffer;
++		goto err;
+ 
+ 	/* symmetric to unaccount_event() in _free_event() */
+ 	account_event(event);
+ 
+ 	return event;
+ 
+-err_callchain_buffer:
+-	if (!event->parent) {
+-		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+-			put_callchain_buffers();
+-	}
+-err_addr_filters:
+-	kfree(event->addr_filter_ranges);
+-
+-err_per_task:
+-	exclusive_event_destroy(event);
+-
+-err_pmu:
+-	if (is_cgroup_event(event))
+-		perf_detach_cgroup(event);
+-	if (event->destroy)
+-		event->destroy(event);
+-	module_put(pmu->module);
+-err_ns:
+-	if (event->hw.target)
+-		put_task_struct(event->hw.target);
+-	call_rcu(&event->rcu_head, free_event_rcu);
+-
++err:
++	__free_event(event);
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -13112,7 +13154,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ 	 * Grouping is not supported for kernel events, neither is 'AUX',
+ 	 * make sure the caller's intentions are adjusted.
+ 	 */
+-	if (attr->aux_output)
++	if (attr->aux_output || attr->aux_action)
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
+@@ -13359,8 +13401,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
+ 		 * Kick perf_poll() for is_event_hup();
+ 		 */
+ 		perf_event_wakeup(parent_event);
+-		free_event(event);
+-		put_event(parent_event);
++		put_event(event);
+ 		return;
+ 	}
+ 
+@@ -13478,13 +13519,11 @@ static void perf_free_event(struct perf_event *event,
+ 	list_del_init(&event->child_list);
+ 	mutex_unlock(&parent->child_mutex);
+ 
+-	put_event(parent);
+-
+ 	raw_spin_lock_irq(&ctx->lock);
+ 	perf_group_detach(event);
+ 	list_del_event(event, ctx);
+ 	raw_spin_unlock_irq(&ctx->lock);
+-	free_event(event);
++	put_event(event);
+ }
+ 
+ /*
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index e072d995d670f7..249288d82b8dcf 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -52,6 +52,7 @@ struct perf_buffer {
+ 	void				(*free_aux)(void *);
+ 	refcount_t			aux_refcount;
+ 	int				aux_in_sampling;
++	int				aux_in_pause_resume;
+ 	void				**aux_pages;
+ 	void				*aux_priv;
+ 
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 536bd471557f5b..53c76dc71f3f57 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -6223,6 +6223,9 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
+ 		hlist_del_rcu(&class->hash_entry);
+ 		WRITE_ONCE(class->key, NULL);
+ 		WRITE_ONCE(class->name, NULL);
++		/* Class allocated but not used, -1 in nr_unused_locks */
++		if (class->usage_mask == 0)
++			debug_atomic_dec(nr_unused_locks);
+ 		nr_lock_classes--;
+ 		__clear_bit(class - lock_classes, lock_classes_in_use);
+ 		if (class - lock_classes == max_lock_class_idx)
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index b483fcea811b1a..d8bad1eeedd3e5 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -1443,10 +1443,10 @@ static const char * const comp_alg_enabled[] = {
+ static int hibernate_compressor_param_set(const char *compressor,
+ 		const struct kernel_param *kp)
+ {
+-	unsigned int sleep_flags;
+ 	int index, ret;
+ 
+-	sleep_flags = lock_system_sleep();
++	if (!mutex_trylock(&system_transition_mutex))
++		return -EBUSY;
+ 
+ 	index = sysfs_match_string(comp_alg_enabled, compressor);
+ 	if (index >= 0) {
+@@ -1458,7 +1458,7 @@ static int hibernate_compressor_param_set(const char *compressor,
+ 		ret = index;
+ 	}
+ 
+-	unlock_system_sleep(sleep_flags);
++	mutex_unlock(&system_transition_mutex);
+ 
+ 	if (ret)
+ 		pr_debug("Cannot set specified compressor %s\n",
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 3b75f6e8410b9d..881a26e18c658b 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2436,7 +2436,6 @@ asmlinkage __visible int _printk(const char *fmt, ...)
+ }
+ EXPORT_SYMBOL(_printk);
+ 
+-static bool pr_flush(int timeout_ms, bool reset_on_progress);
+ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
+ 
+ #else /* CONFIG_PRINTK */
+@@ -2449,7 +2448,6 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
+ 
+ static u64 syslog_seq;
+ 
+-static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
+ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
+ 
+ #endif /* CONFIG_PRINTK */
+@@ -4436,7 +4434,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
+  * Context: Process context. May sleep while acquiring console lock.
+  * Return: true if all usable printers are caught up.
+  */
+-static bool pr_flush(int timeout_ms, bool reset_on_progress)
++bool pr_flush(int timeout_ms, bool reset_on_progress)
+ {
+ 	return __pr_flush(NULL, timeout_ms, reset_on_progress);
+ }
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index f05dbde2c93fe7..d6ee090eda943c 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -697,6 +697,7 @@ void kernel_power_off(void)
+ 	migrate_to_reboot_cpu();
+ 	syscore_shutdown();
+ 	pr_emerg("Power down\n");
++	pr_flush(1000, true);
+ 	kmsg_dump(KMSG_DUMP_SHUTDOWN);
+ 	machine_power_off();
+ }
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index e5cab54dfdd142..fcf968490308b9 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4160,8 +4160,8 @@ static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
+ 
+ 	init_dsq(dsq, dsq_id);
+ 
+-	ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
+-				     dsq_hash_params);
++	ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node,
++					    dsq_hash_params);
+ 	if (ret) {
+ 		kfree(dsq);
+ 		return ERR_PTR(ret);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index dbd375f28ee098..90b59c627bb8e7 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3523,16 +3523,16 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
+ 	    ftrace_hash_empty(subops->func_hash->notrace_hash)) {
+ 		notrace_hash = EMPTY_HASH;
+ 	} else {
+-		size_bits = max(ops->func_hash->filter_hash->size_bits,
+-				subops->func_hash->filter_hash->size_bits);
++		size_bits = max(ops->func_hash->notrace_hash->size_bits,
++				subops->func_hash->notrace_hash->size_bits);
+ 		notrace_hash = alloc_ftrace_hash(size_bits);
+ 		if (!notrace_hash) {
+ 			free_ftrace_hash(filter_hash);
+ 			return -ENOMEM;
+ 		}
+ 
+-		ret = intersect_hash(&notrace_hash, ops->func_hash->filter_hash,
+-				     subops->func_hash->filter_hash);
++		ret = intersect_hash(&notrace_hash, ops->func_hash->notrace_hash,
++				     subops->func_hash->notrace_hash);
+ 		if (ret < 0) {
+ 			free_ftrace_hash(filter_hash);
+ 			free_ftrace_hash(notrace_hash);
+@@ -6848,6 +6848,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
+ 				}
+ 			}
+ 		}
++		cond_resched();
+ 	} while_for_each_ftrace_rec();
+ out:
+ 	mutex_unlock(&ftrace_lock);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 3e252ba16d5c6e..e1ffbed8cc5eb5 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -5994,7 +5994,7 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
+ 	meta->read = cpu_buffer->read;
+ 
+ 	/* Some archs do not have data cache coherency between kernel and user-space */
+-	flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page));
++	flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
+ }
+ 
+ static void
+@@ -7309,7 +7309,8 @@ int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
+ 
+ out:
+ 	/* Some archs do not have data cache coherency between kernel and user-space */
+-	flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page));
++	flush_kernel_vmap_range(cpu_buffer->reader_page->page,
++				buffer->subbuf_size + BUF_PAGE_HDR_SIZE);
+ 
+ 	rb_update_meta_page(cpu_buffer);
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 29eba68e07859d..11dea25ef880a5 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -790,7 +790,9 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+ 				clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
+ 			}
+ 
+-			call->class->reg(call, TRACE_REG_UNREGISTER, file);
++			ret = call->class->reg(call, TRACE_REG_UNREGISTER, file);
++
++			WARN_ON_ONCE(ret);
+ 		}
+ 		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
+ 		if (file->flags & EVENT_FILE_FL_SOFT_MODE)
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 24c9962c40db1a..1b9e32f6442fb5 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -377,7 +377,6 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
+ 				union trace_synth_field *data = &entry->fields[n_u64];
+ 
+ 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
+-						 STR_VAR_LEN_MAX,
+ 						 (char *)entry + data->as_dynamic.offset,
+ 						 i == se->n_fields - 1 ? "" : " ");
+ 				n_u64++;
+diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
+index 4acdab16579390..af7d6e2060d9d9 100644
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -888,9 +888,15 @@ static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mo
+ 	struct __find_tracepoint_cb_data *data = priv;
+ 
+ 	if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
+-		data->tpoint = tp;
+-		if (!data->mod)
++		/* If module is not specified, try getting module refcount. */
++		if (!data->mod && mod) {
++			/* If failed to get refcount, ignore this tracepoint. */
++			if (!try_module_get(mod))
++				return;
++
+ 			data->mod = mod;
++		}
++		data->tpoint = tp;
+ 	}
+ }
+ 
+@@ -902,7 +908,11 @@ static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
+ 		data->tpoint = tp;
+ }
+ 
+-/* Find a tracepoint from kernel and module. */
++/*
++ * Find a tracepoint from kernel and module. If the tracepoint is on the module,
++ * the module's refcount is incremented and returned as *@tp_mod. Thus, if it is
++ * not NULL, caller must call module_put(*tp_mod) after used the tracepoint.
++ */
+ static struct tracepoint *find_tracepoint(const char *tp_name,
+ 					  struct module **tp_mod)
+ {
+@@ -931,7 +941,10 @@ static void reenable_trace_fprobe(struct trace_fprobe *tf)
+ 	}
+ }
+ 
+-/* Find a tracepoint from specified module. */
++/*
++ * Find a tracepoint from specified module. In this case, this does not get the
++ * module's refcount. The caller must ensure the module is not freed.
++ */
+ static struct tracepoint *find_tracepoint_in_module(struct module *mod,
+ 						    const char *tp_name)
+ {
+@@ -1167,11 +1180,6 @@ static int __trace_fprobe_create(int argc, const char *argv[])
+ 	if (is_tracepoint) {
+ 		ctx.flags |= TPARG_FL_TPOINT;
+ 		tpoint = find_tracepoint(symbol, &tp_mod);
+-		/* lock module until register this tprobe. */
+-		if (tp_mod && !try_module_get(tp_mod)) {
+-			tpoint = NULL;
+-			tp_mod = NULL;
+-		}
+ 		if (tpoint) {
+ 			ctx.funcname = kallsyms_lookup(
+ 				(unsigned long)tpoint->probestub,
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 16a5e368e7b77c..578919962e5dff 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -770,6 +770,10 @@ static int check_prepare_btf_string_fetch(char *typename,
+ 
+ #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+ 
++/*
++ * Add the entry code to store the 'argnum'th parameter and return the offset
++ * in the entry data buffer where the data will be stored.
++ */
+ static int __store_entry_arg(struct trace_probe *tp, int argnum)
+ {
+ 	struct probe_entry_arg *earg = tp->entry_arg;
+@@ -793,6 +797,20 @@ static int __store_entry_arg(struct trace_probe *tp, int argnum)
+ 		tp->entry_arg = earg;
+ 	}
+ 
++	/*
++	 * The entry code array is repeating the pair of
++	 * [FETCH_OP_ARG(argnum)][FETCH_OP_ST_EDATA(offset of entry data buffer)]
++	 * and the rest of entries are filled with [FETCH_OP_END].
++	 *
++	 * To reduce the redundant function parameter fetching, we scan the entry
++	 * code array to find the FETCH_OP_ARG which already fetches the 'argnum'
++	 * parameter. If it doesn't match, update 'offset' to find the last
++	 * offset.
++	 * If we find the FETCH_OP_END without matching FETCH_OP_ARG entry, we
++	 * will save the entry with FETCH_OP_ARG and FETCH_OP_ST_EDATA, and
++	 * return data offset so that caller can find the data offset in the entry
++	 * data buffer.
++	 */
+ 	offset = 0;
+ 	for (i = 0; i < earg->size - 1; i++) {
+ 		switch (earg->code[i].op) {
+@@ -826,6 +844,16 @@ int traceprobe_get_entry_data_size(struct trace_probe *tp)
+ 	if (!earg)
+ 		return 0;
+ 
++	/*
++	 * earg->code[] array has an operation sequence which is run in
++	 * the entry handler.
++	 * The sequence stopped by FETCH_OP_END and each data stored in
++	 * the entry data buffer by FETCH_OP_ST_EDATA. The FETCH_OP_ST_EDATA
++	 * stores the data at the data buffer + its offset, and all data are
++	 * "unsigned long" size. The offset must be increased when a data is
++	 * stored. Thus we need to find the last FETCH_OP_ST_EDATA in the
++	 * code array.
++	 */
+ 	for (i = 0; i < earg->size; i++) {
+ 		switch (earg->code[i].op) {
+ 		case FETCH_OP_END:
+diff --git a/lib/sg_split.c b/lib/sg_split.c
+index 60a0babebf2efc..0f89aab5c6715b 100644
+--- a/lib/sg_split.c
++++ b/lib/sg_split.c
+@@ -88,8 +88,6 @@ static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
+ 			if (!j) {
+ 				out_sg->offset += split->skip_sg0;
+ 				out_sg->length -= split->skip_sg0;
+-			} else {
+-				out_sg->offset = 0;
+ 			}
+ 			sg_dma_address(out_sg) = 0;
+ 			sg_dma_len(out_sg) = 0;
+diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h
+index 0e3b2c0a527db7..0dde8bf56595ea 100644
+--- a/lib/zstd/common/portability_macros.h
++++ b/lib/zstd/common/portability_macros.h
+@@ -55,7 +55,7 @@
+ #ifndef DYNAMIC_BMI2
+   #if ((defined(__clang__) && __has_attribute(__target__)) \
+       || (defined(__GNUC__) \
+-          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
++          && (__GNUC__ >= 11))) \
+       && (defined(__x86_64__) || defined(_M_X64)) \
+       && !defined(__BMI2__)
+   #  define DYNAMIC_BMI2 1
+diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
+index d25d99cb5f2bb9..d511be201c4c9e 100644
+--- a/mm/damon/ops-common.c
++++ b/mm/damon/ops-common.c
+@@ -24,7 +24,7 @@ struct folio *damon_get_folio(unsigned long pfn)
+ 	struct page *page = pfn_to_online_page(pfn);
+ 	struct folio *folio;
+ 
+-	if (!page || PageTail(page))
++	if (!page)
+ 		return NULL;
+ 
+ 	folio = page_folio(page);
+diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
+index a9ff35341d65d2..8813038abc6fb3 100644
+--- a/mm/damon/paddr.c
++++ b/mm/damon/paddr.c
+@@ -264,11 +264,14 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
+ 		damos_add_filter(s, filter);
+ 	}
+ 
+-	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
++	addr = r->ar.start;
++	while (addr < r->ar.end) {
+ 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+ 
+-		if (!folio)
++		if (!folio) {
++			addr += PAGE_SIZE;
+ 			continue;
++		}
+ 
+ 		if (damos_pa_filter_out(s, folio))
+ 			goto put_folio;
+@@ -282,6 +285,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
+ 		else
+ 			list_add(&folio->lru, &folio_list);
+ put_folio:
++		addr += folio_size(folio);
+ 		folio_put(folio);
+ 	}
+ 	if (install_young_filter)
+@@ -296,11 +300,14 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
+ {
+ 	unsigned long addr, applied = 0;
+ 
+-	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
++	addr = r->ar.start;
++	while (addr < r->ar.end) {
+ 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+ 
+-		if (!folio)
++		if (!folio) {
++			addr += PAGE_SIZE;
+ 			continue;
++		}
+ 
+ 		if (damos_pa_filter_out(s, folio))
+ 			goto put_folio;
+@@ -311,6 +318,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
+ 			folio_deactivate(folio);
+ 		applied += folio_nr_pages(folio);
+ put_folio:
++		addr += folio_size(folio);
+ 		folio_put(folio);
+ 	}
+ 	return applied * PAGE_SIZE;
+@@ -454,11 +462,14 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
+ 	unsigned long addr, applied;
+ 	LIST_HEAD(folio_list);
+ 
+-	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
++	addr = r->ar.start;
++	while (addr < r->ar.end) {
+ 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+ 
+-		if (!folio)
++		if (!folio) {
++			addr += PAGE_SIZE;
+ 			continue;
++		}
+ 
+ 		if (damos_pa_filter_out(s, folio))
+ 			goto put_folio;
+@@ -467,6 +478,7 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
+ 			goto put_folio;
+ 		list_add(&folio->lru, &folio_list);
+ put_folio:
++		addr += folio_size(folio);
+ 		folio_put(folio);
+ 	}
+ 	applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index e28e820fdb7756..ad646fe6688a49 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4863,7 +4863,7 @@ static struct ctl_table hugetlb_table[] = {
+ 	},
+ };
+ 
+-static void hugetlb_sysctl_init(void)
++static void __init hugetlb_sysctl_init(void)
+ {
+ 	register_sysctl_init("vm", hugetlb_table);
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index fa25a022e64d71..ec1c71abe88dfd 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -879,12 +879,17 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
+ 	mmap_read_lock(p->mm);
+ 	ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
+ 			      (void *)&priv);
++	/*
++	 * ret = 1 when CMCI wins, regardless of whether try_to_unmap()
++	 * succeeds or fails, then kill the process with SIGBUS.
++	 * ret = 0 when poison page is a clean page and it's dropped, no
++	 * SIGBUS is needed.
++	 */
+ 	if (ret == 1 && priv.tk.addr)
+ 		kill_proc(&priv.tk, pfn, flags);
+-	else
+-		ret = 0;
+ 	mmap_read_unlock(p->mm);
+-	return ret > 0 ? -EHWPOISON : -EFAULT;
++
++	return ret > 0 ? -EHWPOISON : 0;
+ }
+ 
+ /*
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 619445096ef4a6..0a42e9a8caba2a 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1801,8 +1801,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+ 		if (unlikely(page_folio(page) != folio))
+ 			goto put_folio;
+ 
+-		if (folio_test_hwpoison(folio) ||
+-		    (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
++		if (folio_contain_hwpoisoned_page(folio)) {
+ 			if (WARN_ON(folio_test_lru(folio)))
+ 				folio_isolate_lru(folio);
+ 			if (folio_mapped(folio)) {
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 1b2edd65c2a172..12af89b4342a7b 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -696,8 +696,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
+ 	unsigned long vm_flags = vma->vm_flags;
+ 	unsigned long new_pgoff;
+ 	unsigned long moved_len;
+-	unsigned long account_start = 0;
+-	unsigned long account_end = 0;
++	bool account_start = false;
++	bool account_end = false;
+ 	unsigned long hiwater_vm;
+ 	int err = 0;
+ 	bool need_rmap_locks;
+@@ -781,9 +781,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
+ 	if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
+ 		vm_flags_clear(vma, VM_ACCOUNT);
+ 		if (vma->vm_start < old_addr)
+-			account_start = vma->vm_start;
++			account_start = true;
+ 		if (vma->vm_end > old_addr + old_len)
+-			account_end = vma->vm_end;
++			account_end = true;
+ 	}
+ 
+ 	/*
+@@ -823,7 +823,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
+ 		/* OOM: unable to split vma, just get accounts right */
+ 		if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
+ 			vm_acct_memory(old_len >> PAGE_SHIFT);
+-		account_start = account_end = 0;
++		account_start = account_end = false;
+ 	}
+ 
+ 	if (vm_flags & VM_LOCKED) {
+diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
+index ae5cc42aa2087a..585a53f7b06f08 100644
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -77,6 +77,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
+  * mapped at the @pvmw->pte
+  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
+  * for checking
++ * @pte_nr: the number of small pages described by @pvmw->pte.
+  *
+  * page_vma_mapped_walk() found a place where pfn range is *potentially*
+  * mapped. check_pte() has to validate this.
+@@ -93,7 +94,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
+  * Otherwise, return false.
+  *
+  */
+-static bool check_pte(struct page_vma_mapped_walk *pvmw)
++static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
+ {
+ 	unsigned long pfn;
+ 	pte_t ptent = ptep_get(pvmw->pte);
+@@ -126,7 +127,11 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
+ 		pfn = pte_pfn(ptent);
+ 	}
+ 
+-	return (pfn - pvmw->pfn) < pvmw->nr_pages;
++	if ((pfn + pte_nr - 1) < pvmw->pfn)
++		return false;
++	if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
++		return false;
++	return true;
+ }
+ 
+ /* Returns true if the two ranges overlap.  Careful to not overflow. */
+@@ -201,7 +206,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+ 			return false;
+ 
+ 		pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
+-		if (!check_pte(pvmw))
++		if (!check_pte(pvmw, pages_per_huge_page(hstate)))
+ 			return not_found(pvmw);
+ 		return true;
+ 	}
+@@ -284,7 +289,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+ 			goto next_pte;
+ 		}
+ this_pte:
+-		if (check_pte(pvmw))
++		if (check_pte(pvmw, 1))
+ 			return true;
+ next_pte:
+ 		do {
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 73d5998677d40f..674362de029d2a 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -2488,7 +2488,7 @@ static bool folio_make_device_exclusive(struct folio *folio,
+ 	 * Restrict to anonymous folios for now to avoid potential writeback
+ 	 * issues.
+ 	 */
+-	if (!folio_test_anon(folio))
++	if (!folio_test_anon(folio) || folio_test_hugetlb(folio))
+ 		return false;
+ 
+ 	rmap_walk(folio, &rwc);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 5960e5035f9835..88fd6e2a2dcf8a 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3042,8 +3042,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (folio_test_hwpoison(folio) ||
+-	    (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
++	if (folio_contain_hwpoisoned_page(folio)) {
+ 		folio_unlock(folio);
+ 		folio_put(folio);
+ 		return -EIO;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 77d015d5db0c5b..39b3c7f35ea85c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -7557,7 +7557,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
+ 		return NODE_RECLAIM_NOSCAN;
+ 
+ 	ret = __node_reclaim(pgdat, gfp_mask, order);
+-	clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
++	clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
+ 
+ 	if (ret)
+ 		count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 458040e8a0e0be..9184cf7eb12864 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -273,17 +273,6 @@ static int vlan_dev_open(struct net_device *dev)
+ 			goto out;
+ 	}
+ 
+-	if (dev->flags & IFF_ALLMULTI) {
+-		err = dev_set_allmulti(real_dev, 1);
+-		if (err < 0)
+-			goto del_unicast;
+-	}
+-	if (dev->flags & IFF_PROMISC) {
+-		err = dev_set_promiscuity(real_dev, 1);
+-		if (err < 0)
+-			goto clear_allmulti;
+-	}
+-
+ 	ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);
+ 
+ 	if (vlan->flags & VLAN_FLAG_GVRP)
+@@ -297,12 +286,6 @@ static int vlan_dev_open(struct net_device *dev)
+ 		netif_carrier_on(dev);
+ 	return 0;
+ 
+-clear_allmulti:
+-	if (dev->flags & IFF_ALLMULTI)
+-		dev_set_allmulti(real_dev, -1);
+-del_unicast:
+-	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+-		dev_uc_del(real_dev, dev->dev_addr);
+ out:
+ 	netif_carrier_off(dev);
+ 	return err;
+@@ -315,10 +298,6 @@ static int vlan_dev_stop(struct net_device *dev)
+ 
+ 	dev_mc_unsync(real_dev, dev);
+ 	dev_uc_unsync(real_dev, dev);
+-	if (dev->flags & IFF_ALLMULTI)
+-		dev_set_allmulti(real_dev, -1);
+-	if (dev->flags & IFF_PROMISC)
+-		dev_set_promiscuity(real_dev, -1);
+ 
+ 	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+ 		dev_uc_del(real_dev, dev->dev_addr);
+@@ -490,12 +469,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
+ {
+ 	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ 
+-	if (dev->flags & IFF_UP) {
+-		if (change & IFF_ALLMULTI)
+-			dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+-		if (change & IFF_PROMISC)
+-			dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+-	}
++	if (change & IFF_ALLMULTI)
++		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
++	if (change & IFF_PROMISC)
++		dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+ }
+ 
+ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 7b2b04d6b85630..cb4d47ae129e8b 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3720,6 +3720,9 @@ static int hci_read_local_name_sync(struct hci_dev *hdev)
+ /* Read Voice Setting */
+ static int hci_read_voice_setting_sync(struct hci_dev *hdev)
+ {
++	if (!read_voice_setting_capable(hdev))
++		return 0;
++
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
+ 				     0, NULL, HCI_CMD_TIMEOUT);
+ }
+@@ -4153,7 +4156,8 @@ static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
+ 	 * support the Read Page Scan Type command. Check support for
+ 	 * this command in the bit mask of supported commands.
+ 	 */
+-	if (!(hdev->commands[13] & 0x01))
++	if (!(hdev->commands[13] & 0x01) ||
++	    test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks))
+ 		return 0;
+ 
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
+diff --git a/net/core/filter.c b/net/core/filter.c
+index a2f990bf51e5e1..790345c2546b7b 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -218,24 +218,36 @@ BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
+ 	return 0;
+ }
+ 
++static int bpf_skb_load_helper_convert_offset(const struct sk_buff *skb, int offset)
++{
++	if (likely(offset >= 0))
++		return offset;
++
++	if (offset >= SKF_NET_OFF)
++		return offset - SKF_NET_OFF + skb_network_offset(skb);
++
++	if (offset >= SKF_LL_OFF && skb_mac_header_was_set(skb))
++		return offset - SKF_LL_OFF + skb_mac_offset(skb);
++
++	return INT_MIN;
++}
++
+ BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
+ 	   data, int, headlen, int, offset)
+ {
+-	u8 tmp, *ptr;
++	u8 tmp;
+ 	const int len = sizeof(tmp);
+ 
+-	if (offset >= 0) {
+-		if (headlen - offset >= len)
+-			return *(u8 *)(data + offset);
+-		if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+-			return tmp;
+-	} else {
+-		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
+-		if (likely(ptr))
+-			return *(u8 *)ptr;
+-	}
++	offset = bpf_skb_load_helper_convert_offset(skb, offset);
++	if (offset == INT_MIN)
++		return -EFAULT;
+ 
+-	return -EFAULT;
++	if (headlen - offset >= len)
++		return *(u8 *)(data + offset);
++	if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
++		return tmp;
++	else
++		return -EFAULT;
+ }
+ 
+ BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
+@@ -248,21 +260,19 @@ BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
+ BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
+ 	   data, int, headlen, int, offset)
+ {
+-	__be16 tmp, *ptr;
++	__be16 tmp;
+ 	const int len = sizeof(tmp);
+ 
+-	if (offset >= 0) {
+-		if (headlen - offset >= len)
+-			return get_unaligned_be16(data + offset);
+-		if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+-			return be16_to_cpu(tmp);
+-	} else {
+-		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
+-		if (likely(ptr))
+-			return get_unaligned_be16(ptr);
+-	}
++	offset = bpf_skb_load_helper_convert_offset(skb, offset);
++	if (offset == INT_MIN)
++		return -EFAULT;
+ 
+-	return -EFAULT;
++	if (headlen - offset >= len)
++		return get_unaligned_be16(data + offset);
++	if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
++		return be16_to_cpu(tmp);
++	else
++		return -EFAULT;
+ }
+ 
+ BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
+@@ -275,21 +285,19 @@ BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
+ BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
+ 	   data, int, headlen, int, offset)
+ {
+-	__be32 tmp, *ptr;
++	__be32 tmp;
+ 	const int len = sizeof(tmp);
+ 
+-	if (likely(offset >= 0)) {
+-		if (headlen - offset >= len)
+-			return get_unaligned_be32(data + offset);
+-		if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
+-			return be32_to_cpu(tmp);
+-	} else {
+-		ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
+-		if (likely(ptr))
+-			return get_unaligned_be32(ptr);
+-	}
++	offset = bpf_skb_load_helper_convert_offset(skb, offset);
++	if (offset == INT_MIN)
++		return -EFAULT;
+ 
+-	return -EFAULT;
++	if (headlen - offset >= len)
++		return get_unaligned_be32(data + offset);
++	if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
++		return be32_to_cpu(tmp);
++	else
++		return -EFAULT;
+ }
+ 
+ BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index a813d30d213536..7b20f6fcb82c02 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -1066,7 +1066,13 @@ static void page_pool_release_retry(struct work_struct *wq)
+ 	int inflight;
+ 
+ 	inflight = page_pool_release(pool);
+-	if (!inflight)
++	/* In rare cases, a driver bug may cause inflight to go negative.
++	 * Don't reschedule release if inflight is 0 or negative.
++	 * - If 0, the page_pool has been destroyed
++	 * - if negative, we will never recover
++	 * in both cases no reschedule is necessary.
++	 */
++	if (inflight <= 0)
+ 		return;
+ 
+ 	/* Periodic warning for page pools the user can't see */
+diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
+index 48335766c1bfd6..8d31c71bea1a39 100644
+--- a/net/core/page_pool_user.c
++++ b/net/core/page_pool_user.c
+@@ -353,7 +353,7 @@ void page_pool_unlist(struct page_pool *pool)
+ int page_pool_check_memory_provider(struct net_device *dev,
+ 				    struct netdev_rx_queue *rxq)
+ {
+-	struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv;
++	void *binding = rxq->mp_params.mp_priv;
+ 	struct page_pool *pool;
+ 	struct hlist_node *n;
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index a83f64a1d96a29..0842dc9189bf80 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2107,6 +2107,8 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+  */
+ static inline void sock_lock_init(struct sock *sk)
+ {
++	sk_owner_clear(sk);
++
+ 	if (sk->sk_kern_sock)
+ 		sock_lock_init_class_and_name(
+ 			sk,
+@@ -2203,6 +2205,9 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
+ 	cgroup_sk_free(&sk->sk_cgrp_data);
+ 	mem_cgroup_sk_free(sk);
+ 	security_sk_free(sk);
++
++	sk_owner_put(sk);
++
+ 	if (slab != NULL)
+ 		kmem_cache_free(slab, sk);
+ 	else
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index e233dfc8ca4bec..a52be67139d0ac 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -490,7 +490,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
+ 	ret = ops->prepare_data(req_info, reply_data, info);
+ 	rtnl_unlock();
+ 	if (ret < 0)
+-		goto err_cleanup;
++		goto err_dev;
+ 	ret = ops->reply_size(req_info, reply_data);
+ 	if (ret < 0)
+ 		goto err_cleanup;
+@@ -548,7 +548,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
+ 	ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, info);
+ 	rtnl_unlock();
+ 	if (ret < 0)
+-		goto out;
++		goto out_cancel;
+ 	ret = ethnl_fill_reply_header(skb, dev, ctx->ops->hdr_attr);
+ 	if (ret < 0)
+ 		goto out;
+@@ -557,6 +557,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
+ out:
+ 	if (ctx->ops->cleanup_data)
+ 		ctx->ops->cleanup_data(ctx->reply_data);
++out_cancel:
+ 	ctx->reply_data->dev = NULL;
+ 	if (ret < 0)
+ 		genlmsg_cancel(skb, ehdr);
+@@ -760,7 +761,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
+ 	ethnl_init_reply_data(reply_data, ops, dev);
+ 	ret = ops->prepare_data(req_info, reply_data, &info);
+ 	if (ret < 0)
+-		goto err_cleanup;
++		goto err_rep;
+ 	ret = ops->reply_size(req_info, reply_data);
+ 	if (ret < 0)
+ 		goto err_cleanup;
+@@ -795,6 +796,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
+ err_cleanup:
+ 	if (ops->cleanup_data)
+ 		ops->cleanup_data(reply_data);
++err_rep:
+ 	kfree(reply_data);
+ 	kfree(req_info);
+ 	return;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 987492dcb07ca8..bae8ece3e881e0 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -470,10 +470,10 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
+ 		goto out;
+ 
+ 	hash = fl6->mp_hash;
+-	if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound) &&
+-	    rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
+-			    strict) >= 0) {
+-		match = first;
++	if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) {
++		if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
++				    strict) >= 0)
++			match = first;
+ 		goto out;
+ 	}
+ 
+diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
+index 02b5476a4376c0..a0710ae0e7a499 100644
+--- a/net/mac80211/debugfs.c
++++ b/net/mac80211/debugfs.c
+@@ -499,6 +499,7 @@ static const char *hw_flag_names[] = {
+ 	FLAG(DISALLOW_PUNCTURING),
+ 	FLAG(DISALLOW_PUNCTURING_5GHZ),
+ 	FLAG(HANDLES_QUIET_CSA),
++	FLAG(STRICT),
+ #undef FLAG
+ };
+ 
+@@ -531,6 +532,46 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
+ 	return rv;
+ }
+ 
++static ssize_t hwflags_write(struct file *file, const char __user *user_buf,
++			     size_t count, loff_t *ppos)
++{
++	struct ieee80211_local *local = file->private_data;
++	char buf[100];
++	int val;
++
++	if (count >= sizeof(buf))
++		return -EINVAL;
++
++	if (copy_from_user(buf, user_buf, count))
++		return -EFAULT;
++
++	if (count && buf[count - 1] == '\n')
++		buf[count - 1] = '\0';
++	else
++		buf[count] = '\0';
++
++	if (sscanf(buf, "strict=%d", &val) == 1) {
++		switch (val) {
++		case 0:
++			ieee80211_hw_set(&local->hw, STRICT);
++			return count;
++		case 1:
++			__clear_bit(IEEE80211_HW_STRICT, local->hw.flags);
++			return count;
++		default:
++			return -EINVAL;
++		}
++	}
++
++	return -EINVAL;
++}
++
++static const struct file_operations hwflags_ops = {
++	.open = simple_open,
++	.read = hwflags_read,
++	.write = hwflags_write,
++};
++
+ static ssize_t misc_read(struct file *file, char __user *user_buf,
+ 			 size_t count, loff_t *ppos)
+ {
+@@ -581,7 +622,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
+ 	return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ }
+ 
+-DEBUGFS_READONLY_FILE_OPS(hwflags);
+ DEBUGFS_READONLY_FILE_OPS(queues);
+ DEBUGFS_READONLY_FILE_OPS(misc);
+ 
+@@ -659,7 +699,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
+ #ifdef CONFIG_PM
+ 	DEBUGFS_ADD_MODE(reset, 0200);
+ #endif
+-	DEBUGFS_ADD(hwflags);
++	DEBUGFS_ADD_MODE(hwflags, 0600);
+ 	DEBUGFS_ADD(user_power);
+ 	DEBUGFS_ADD(power);
+ 	DEBUGFS_ADD(hw_conf);
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 8bbfa45e1796df..dbcd75c5d778e6 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -8,7 +8,7 @@
+  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright (c) 2016        Intel Deutschland GmbH
+- * Copyright (C) 2018-2024 Intel Corporation
++ * Copyright (C) 2018-2025 Intel Corporation
+  */
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+@@ -812,6 +812,9 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
+  */
+ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
+ {
++	if (WARN_ON(!list_empty(&sdata->work.entry)))
++		wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work);
++
+ 	/* free extra data */
+ 	ieee80211_free_keys(sdata, false);
+ 
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index 579d0f24ac9d61..2922a9fec950dd 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -367,6 +367,12 @@ u32 airtime_link_metric_get(struct ieee80211_local *local,
+ 	return (u32)result;
+ }
+ 
++/* Check that the first metric is at least 10% better than the second one */
++static bool is_metric_better(u32 x, u32 y)
++{
++	return (x < y) && (x < (y - x / 10));
++}
++
+ /**
+  * hwmp_route_info_get - Update routing info to originator and transmitter
+  *
+@@ -458,8 +464,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
+ 				    (mpath->sn == orig_sn &&
+ 				     (rcu_access_pointer(mpath->next_hop) !=
+ 						      sta ?
+-					      mult_frac(new_metric, 10, 9) :
+-					      new_metric) >= mpath->metric)) {
++					      !is_metric_better(new_metric, mpath->metric) :
++					      new_metric >= mpath->metric))) {
+ 					process = false;
+ 					fresh_info = false;
+ 				}
+@@ -533,8 +539,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
+ 			if ((mpath->flags & MESH_PATH_FIXED) ||
+ 			    ((mpath->flags & MESH_PATH_ACTIVE) &&
+ 			     ((rcu_access_pointer(mpath->next_hop) != sta ?
+-				       mult_frac(last_hop_metric, 10, 9) :
+-				       last_hop_metric) > mpath->metric)))
++				      !is_metric_better(last_hop_metric, mpath->metric) :
++				       last_hop_metric > mpath->metric))))
+ 				fresh_info = false;
+ 		} else {
+ 			mpath = mesh_path_add(sdata, ta);
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 88751b0eb317a3..ad0d040569dcd3 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -166,6 +166,9 @@ ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata,
+ 	bool no_vht = false;
+ 	u32 ht_cfreq;
+ 
++	if (ieee80211_hw_check(&sdata->local->hw, STRICT))
++		ignore_ht_channel_mismatch = false;
++
+ 	*chandef = (struct cfg80211_chan_def) {
+ 		.chan = channel,
+ 		.width = NL80211_CHAN_WIDTH_20_NOHT,
+@@ -385,7 +388,7 @@ ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ 	 * zeroes, which is nonsense, and completely inconsistent with itself
+ 	 * (it doesn't have 8 streams). Accept the settings in this case anyway.
+ 	 */
+-	if (!ap_min_req_set)
++	if (!ieee80211_hw_check(&sdata->local->hw, STRICT) && !ap_min_req_set)
+ 		return true;
+ 
+ 	/* make sure the AP is consistent with itself
+@@ -445,7 +448,7 @@ ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ 	 * zeroes, which is nonsense, and completely inconsistent with itself
+ 	 * (it doesn't have 8 streams). Accept the settings in this case anyway.
+ 	 */
+-	if (!ap_min_req_set)
++	if (!ieee80211_hw_check(&sdata->local->hw, STRICT) && !ap_min_req_set)
+ 		return true;
+ 
+ 	/* Need to go over for 80MHz, 160MHz and for 80+80 */
+@@ -1212,13 +1215,15 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
+ 	 * Some APs apparently get confused if our capabilities are better
+ 	 * than theirs, so restrict what we advertise in the assoc request.
+ 	 */
+-	if (!(ap_vht_cap->vht_cap_info &
+-			cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
+-		cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+-			 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+-	else if (!(ap_vht_cap->vht_cap_info &
+-			cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+-		cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
++	if (!ieee80211_hw_check(&local->hw, STRICT)) {
++		if (!(ap_vht_cap->vht_cap_info &
++				cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
++			cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
++				 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
++		else if (!(ap_vht_cap->vht_cap_info &
++				cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
++			cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
++	}
+ 
+ 	/*
+ 	 * If some other vif is using the MU-MIMO capability we cannot associate
+@@ -1260,14 +1265,16 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
+ 	return mu_mimo_owner;
+ }
+ 
+-static void ieee80211_assoc_add_rates(struct sk_buff *skb,
++static void ieee80211_assoc_add_rates(struct ieee80211_local *local,
++				      struct sk_buff *skb,
+ 				      enum nl80211_chan_width width,
+ 				      struct ieee80211_supported_band *sband,
+ 				      struct ieee80211_mgd_assoc_data *assoc_data)
+ {
+ 	u32 rates;
+ 
+-	if (assoc_data->supp_rates_len) {
++	if (assoc_data->supp_rates_len &&
++	    !ieee80211_hw_check(&local->hw, STRICT)) {
+ 		/*
+ 		 * Get all rates supported by the device and the AP as
+ 		 * some APs don't like getting a superset of their rates
+@@ -1481,7 +1488,7 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
+ 		*capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
+ 
+ 	if (sband->band != NL80211_BAND_S1GHZ)
+-		ieee80211_assoc_add_rates(skb, width, sband, assoc_data);
++		ieee80211_assoc_add_rates(local, skb, width, sband, assoc_data);
+ 
+ 	if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT ||
+ 	    *capab & WLAN_CAPABILITY_RADIO_MEASURE) {
+@@ -1925,7 +1932,8 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
+ 	 * for some reason check it and want it to be set, set the bit for all
+ 	 * pre-EHT connections as we used to do.
+ 	 */
+-	if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT)
++	if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT &&
++	    !ieee80211_hw_check(&local->hw, STRICT))
+ 		capab |= WLAN_CAPABILITY_ESS;
+ 
+ 	/* add the elements for the assoc (main) link */
+@@ -4710,7 +4718,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
+ 	 * 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
+ 	 * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
+ 	 */
+-	if (!is_6ghz &&
++	if (!ieee80211_hw_check(&local->hw, STRICT) && !is_6ghz &&
+ 	    ((assoc_data->wmm && !elems->wmm_param) ||
+ 	     (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
+ 	      (!elems->ht_cap_elem || !elems->ht_operation)) ||
+@@ -4846,6 +4854,15 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
+ 				bss_vht_cap = (const void *)elem->data;
+ 		}
+ 
++		if (ieee80211_hw_check(&local->hw, STRICT) &&
++		    (!bss_vht_cap || memcmp(bss_vht_cap, elems->vht_cap_elem,
++					    sizeof(*bss_vht_cap)))) {
++			rcu_read_unlock();
++			ret = false;
++			link_info(link, "VHT capabilities mismatch\n");
++			goto out;
++		}
++
+ 		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+ 						    elems->vht_cap_elem,
+ 						    bss_vht_cap, link_sta);
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 505445a9598faf..3caa0a9d3b3885 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -1419,6 +1419,12 @@ static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname,
+ 	switch (optname) {
+ 	case IP_TOS:
+ 		return mptcp_put_int_option(msk, optval, optlen, READ_ONCE(inet_sk(sk)->tos));
++	case IP_FREEBIND:
++		return mptcp_put_int_option(msk, optval, optlen,
++				inet_test_bit(FREEBIND, sk));
++	case IP_TRANSPARENT:
++		return mptcp_put_int_option(msk, optval, optlen,
++				inet_test_bit(TRANSPARENT, sk));
+ 	case IP_BIND_ADDRESS_NO_PORT:
+ 		return mptcp_put_int_option(msk, optval, optlen,
+ 				inet_test_bit(BIND_ADDRESS_NO_PORT, sk));
+@@ -1430,6 +1436,26 @@ static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname,
+ 	return -EOPNOTSUPP;
+ }
+ 
++static int mptcp_getsockopt_v6(struct mptcp_sock *msk, int optname,
++			       char __user *optval, int __user *optlen)
++{
++	struct sock *sk = (void *)msk;
++
++	switch (optname) {
++	case IPV6_V6ONLY:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    sk->sk_ipv6only);
++	case IPV6_TRANSPARENT:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    inet_test_bit(TRANSPARENT, sk));
++	case IPV6_FREEBIND:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    inet_test_bit(FREEBIND, sk));
++	}
++
++	return -EOPNOTSUPP;
++}
++
+ static int mptcp_getsockopt_sol_mptcp(struct mptcp_sock *msk, int optname,
+ 				      char __user *optval, int __user *optlen)
+ {
+@@ -1469,6 +1495,8 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname,
+ 
+ 	if (level == SOL_IP)
+ 		return mptcp_getsockopt_v4(msk, optname, optval, option);
++	if (level == SOL_IPV6)
++		return mptcp_getsockopt_v6(msk, optname, optval, option);
+ 	if (level == SOL_TCP)
+ 		return mptcp_getsockopt_sol_tcp(msk, optname, optval, option);
+ 	if (level == SOL_MPTCP)
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index b56bbee7312c48..4c2aa45c466d93 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -754,8 +754,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
+ 
+ 	subflow_req = mptcp_subflow_rsk(req);
+ 	msk = subflow_req->msk;
+-	if (!msk)
+-		return false;
+ 
+ 	subflow_generate_hmac(READ_ONCE(msk->remote_key),
+ 			      READ_ONCE(msk->local_key),
+@@ -853,12 +851,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 
+ 	} else if (subflow_req->mp_join) {
+ 		mptcp_get_options(skb, &mp_opt);
+-		if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
+-		    !subflow_hmac_valid(req, &mp_opt) ||
+-		    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+-			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
++		if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK))
+ 			fallback = true;
+-		}
+ 	}
+ 
+ create_child:
+@@ -908,6 +902,17 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 				goto dispose_child;
+ 			}
+ 
++			if (!subflow_hmac_valid(req, &mp_opt)) {
++				SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
++				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
++				goto dispose_child;
++			}
++
++			if (!mptcp_can_accept_new_subflow(owner)) {
++				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
++				goto dispose_child;
++			}
++
+ 			/* move the msk reference ownership to the subflow */
+ 			subflow_req->msk = NULL;
+ 			ctx->conn = (struct sock *)owner;
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index b8d3c3213efee5..c15db28c5ebc43 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -994,8 +994,9 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
+ 		NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt,  8,  pkt[8], bsize);
+ 
+ 		NFT_PIPAPO_AVX2_AND(6, 2, 3);
++		NFT_PIPAPO_AVX2_AND(3, 4, 7);
+ 		NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt,  9,  pkt[9], bsize);
+-		NFT_PIPAPO_AVX2_AND(0, 4, 5);
++		NFT_PIPAPO_AVX2_AND(0, 3, 5);
+ 		NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 10, pkt[10], bsize);
+ 		NFT_PIPAPO_AVX2_AND(2, 6, 7);
+ 		NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 11, pkt[11], bsize);
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 998ea3b5badfce..a3bab5e27e71bb 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -2051,6 +2051,7 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
+ 	struct tcmsg *tcm;
+ 	struct nlmsghdr  *nlh;
+ 	unsigned char *b = skb_tail_pointer(skb);
++	int ret = -EMSGSIZE;
+ 
+ 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
+ 	if (!nlh)
+@@ -2095,11 +2096,45 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
+ 
+ 	return skb->len;
+ 
++cls_op_not_supp:
++	ret = -EOPNOTSUPP;
+ out_nlmsg_trim:
+ nla_put_failure:
+-cls_op_not_supp:
+ 	nlmsg_trim(skb, b);
+-	return -1;
++	return ret;
++}
++
++static struct sk_buff *tfilter_notify_prep(struct net *net,
++					   struct sk_buff *oskb,
++					   struct nlmsghdr *n,
++					   struct tcf_proto *tp,
++					   struct tcf_block *block,
++					   struct Qdisc *q, u32 parent,
++					   void *fh, int event,
++					   u32 portid, bool rtnl_held,
++					   struct netlink_ext_ack *extack)
++{
++	unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE;
++	struct sk_buff *skb;
++	int ret;
++
++retry:
++	skb = alloc_skb(size, GFP_KERNEL);
++	if (!skb)
++		return ERR_PTR(-ENOBUFS);
++
++	ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
++			    n->nlmsg_seq, n->nlmsg_flags, event, false,
++			    rtnl_held, extack);
++	if (ret <= 0) {
++		kfree_skb(skb);
++		if (ret == -EMSGSIZE) {
++			size += NLMSG_GOODSIZE;
++			goto retry;
++		}
++		return ERR_PTR(-EINVAL);
++	}
++	return skb;
+ }
+ 
+ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
+@@ -2115,16 +2150,10 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
+ 	if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
+ 		return 0;
+ 
+-	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+-	if (!skb)
+-		return -ENOBUFS;
+-
+-	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+-			  n->nlmsg_seq, n->nlmsg_flags, event,
+-			  false, rtnl_held, extack) <= 0) {
+-		kfree_skb(skb);
+-		return -EINVAL;
+-	}
++	skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event,
++				  portid, rtnl_held, extack);
++	if (IS_ERR(skb))
++		return PTR_ERR(skb);
+ 
+ 	if (unicast)
+ 		err = rtnl_unicast(skb, net, portid);
+@@ -2147,16 +2176,11 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
+ 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
+ 		return tp->ops->delete(tp, fh, last, rtnl_held, extack);
+ 
+-	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+-	if (!skb)
+-		return -ENOBUFS;
+-
+-	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+-			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
+-			  false, rtnl_held, extack) <= 0) {
++	skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh,
++				  RTM_DELTFILTER, portid, rtnl_held, extack);
++	if (IS_ERR(skb)) {
+ 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
+-		kfree_skb(skb);
+-		return -EINVAL;
++		return PTR_ERR(skb);
+ 	}
+ 
+ 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index 3e8d4fe4d91e3e..e1f6e7618debd4 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -65,10 +65,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+ 			    &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
+ 			    drop_func, dequeue_func);
+ 
+-	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+-	 * or HTB crashes. Defer it for next round.
+-	 */
+-	if (q->stats.drop_count && sch->q.qlen) {
++	if (q->stats.drop_count) {
+ 		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
+ 		q->stats.drop_count = 0;
+ 		q->stats.drop_len = 0;
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 4f908c11ba9528..778f6e5966be80 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -314,10 +314,8 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
+ 	}
+ 	qdisc_bstats_update(sch, skb);
+ 	flow->deficit -= qdisc_pkt_len(skb);
+-	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+-	 * or HTB crashes. Defer it for next round.
+-	 */
+-	if (q->cstats.drop_count && sch->q.qlen) {
++
++	if (q->cstats.drop_count) {
+ 		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
+ 					  q->cstats.drop_len);
+ 		q->cstats.drop_count = 0;
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 65d5b59da58303..58b42dcf8f2013 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -631,6 +631,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
+ 	struct red_parms *p = NULL;
+ 	struct sk_buff *to_free = NULL;
+ 	struct sk_buff *tail = NULL;
++	unsigned int maxflows;
++	unsigned int quantum;
++	unsigned int divisor;
++	int perturb_period;
++	u8 headdrop;
++	u8 maxdepth;
++	int limit;
++	u8 flags;
++
+ 
+ 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
+ 		return -EINVAL;
+@@ -652,39 +661,64 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
+ 		if (!p)
+ 			return -ENOMEM;
+ 	}
+-	if (ctl->limit == 1) {
+-		NL_SET_ERR_MSG_MOD(extack, "invalid limit");
+-		return -EINVAL;
+-	}
++
+ 	sch_tree_lock(sch);
++
++	limit = q->limit;
++	divisor = q->divisor;
++	headdrop = q->headdrop;
++	maxdepth = q->maxdepth;
++	maxflows = q->maxflows;
++	perturb_period = q->perturb_period;
++	quantum = q->quantum;
++	flags = q->flags;
++
++	/* update and validate configuration */
+ 	if (ctl->quantum)
+-		q->quantum = ctl->quantum;
+-	WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
++		quantum = ctl->quantum;
++	perturb_period = ctl->perturb_period * HZ;
+ 	if (ctl->flows)
+-		q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
++		maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
+ 	if (ctl->divisor) {
+-		q->divisor = ctl->divisor;
+-		q->maxflows = min_t(u32, q->maxflows, q->divisor);
++		divisor = ctl->divisor;
++		maxflows = min_t(u32, maxflows, divisor);
+ 	}
+ 	if (ctl_v1) {
+ 		if (ctl_v1->depth)
+-			q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
++			maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
+ 		if (p) {
+-			swap(q->red_parms, p);
+-			red_set_parms(q->red_parms,
++			red_set_parms(p,
+ 				      ctl_v1->qth_min, ctl_v1->qth_max,
+ 				      ctl_v1->Wlog,
+ 				      ctl_v1->Plog, ctl_v1->Scell_log,
+ 				      NULL,
+ 				      ctl_v1->max_P);
+ 		}
+-		q->flags = ctl_v1->flags;
+-		q->headdrop = ctl_v1->headdrop;
++		flags = ctl_v1->flags;
++		headdrop = ctl_v1->headdrop;
+ 	}
+ 	if (ctl->limit) {
+-		q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
+-		q->maxflows = min_t(u32, q->maxflows, q->limit);
++		limit = min_t(u32, ctl->limit, maxdepth * maxflows);
++		maxflows = min_t(u32, maxflows, limit);
+ 	}
++	if (limit == 1) {
++		sch_tree_unlock(sch);
++		kfree(p);
++		NL_SET_ERR_MSG_MOD(extack, "invalid limit");
++		return -EINVAL;
++	}
++
++	/* commit configuration */
++	q->limit = limit;
++	q->divisor = divisor;
++	q->headdrop = headdrop;
++	q->maxdepth = maxdepth;
++	q->maxflows = maxflows;
++	WRITE_ONCE(q->perturb_period, perturb_period);
++	q->quantum = quantum;
++	q->flags = flags;
++	if (p)
++		swap(q->red_parms, p);
+ 
+ 	qlen = sch->q.qlen;
+ 	while (sch->q.qlen > q->limit) {
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 36ee34f483d703..53725ee7ba06d7 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -72,8 +72,9 @@
+ /* Forward declarations for internal helper functions. */
+ static bool sctp_writeable(const struct sock *sk);
+ static void sctp_wfree(struct sk_buff *skb);
+-static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-				size_t msg_len);
++static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
++				struct sctp_transport *transport,
++				long *timeo_p, size_t msg_len);
+ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
+ static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
+ static int sctp_wait_for_accept(struct sock *sk, long timeo);
+@@ -1828,7 +1829,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
+ 
+ 	if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
+ 		timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+-		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
++		err = sctp_wait_for_sndbuf(asoc, transport, &timeo, msg_len);
+ 		if (err)
+ 			goto err;
+ 		if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
+@@ -9214,8 +9215,9 @@ void sctp_sock_rfree(struct sk_buff *skb)
+ 
+ 
+ /* Helper function to wait for space in the sndbuf.  */
+-static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-				size_t msg_len)
++static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
++				struct sctp_transport *transport,
++				long *timeo_p, size_t msg_len)
+ {
+ 	struct sock *sk = asoc->base.sk;
+ 	long current_timeo = *timeo_p;
+@@ -9225,7 +9227,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 	pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
+ 		 *timeo_p, msg_len);
+ 
+-	/* Increment the association's refcnt.  */
++	/* Increment the transport and association's refcnt. */
++	if (transport)
++		sctp_transport_hold(transport);
+ 	sctp_association_hold(asoc);
+ 
+ 	/* Wait on the association specific sndbuf space. */
+@@ -9234,7 +9238,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 					  TASK_INTERRUPTIBLE);
+ 		if (asoc->base.dead)
+ 			goto do_dead;
+-		if (!*timeo_p)
++		if ((!*timeo_p) || (transport && transport->dead))
+ 			goto do_nonblock;
+ 		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
+ 			goto do_error;
+@@ -9259,7 +9263,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ out:
+ 	finish_wait(&asoc->wait, &wait);
+ 
+-	/* Release the association's refcnt.  */
++	/* Release the transport and association's refcnt. */
++	if (transport)
++		sctp_transport_put(transport);
+ 	sctp_association_put(asoc);
+ 
+ 	return err;
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index 2abe45af98e7c6..31eca29b6cfbfb 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -117,6 +117,8 @@ struct sctp_transport *sctp_transport_new(struct net *net,
+  */
+ void sctp_transport_free(struct sctp_transport *transport)
+ {
++	transport->dead = 1;
++
+ 	/* Try to delete the heartbeat timer.  */
+ 	if (del_timer(&transport->hb_timer))
+ 		sctp_transport_put(transport);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index c3fbf0779d4ab6..aca8bdf65d729f 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -621,7 +621,8 @@ static void __svc_rdma_free(struct work_struct *work)
+ 	/* Destroy the CM ID */
+ 	rdma_destroy_id(rdma->sc_cm_id);
+ 
+-	rpcrdma_rn_unregister(device, &rdma->sc_rn);
++	if (!test_bit(XPT_LISTENER, &rdma->sc_xprt.xpt_flags))
++		rpcrdma_rn_unregister(device, &rdma->sc_rn);
+ 	kfree(rdma);
+ }
+ 
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 5c2088a469cea1..5689e1f4854797 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1046,6 +1046,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ 	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
+ 		if (imp == TIPC_SYSTEM_IMPORTANCE) {
+ 			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
++			__skb_queue_purge(list);
+ 			return -ENOBUFS;
+ 		}
+ 		rc = link_schedule_user(l, hdr);
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 6b4b9f2749a6fd..0acf313deb01ff 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -809,6 +809,11 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
+ 	return do_tls_setsockopt(sk, optname, optval, optlen);
+ }
+ 
++static int tls_disconnect(struct sock *sk, int flags)
++{
++	return -EOPNOTSUPP;
++}
++
+ struct tls_context *tls_ctx_create(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -904,6 +909,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ 	prot[TLS_BASE][TLS_BASE] = *base;
+ 	prot[TLS_BASE][TLS_BASE].setsockopt	= tls_setsockopt;
+ 	prot[TLS_BASE][TLS_BASE].getsockopt	= tls_getsockopt;
++	prot[TLS_BASE][TLS_BASE].disconnect	= tls_disconnect;
+ 	prot[TLS_BASE][TLS_BASE].close		= tls_sk_proto_close;
+ 
+ 	prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
+diff --git a/scripts/generate_builtin_ranges.awk b/scripts/generate_builtin_ranges.awk
+index b9ec761b3befc4..d4bd5c2b998ca2 100755
+--- a/scripts/generate_builtin_ranges.awk
++++ b/scripts/generate_builtin_ranges.awk
+@@ -282,6 +282,11 @@ ARGIND == 2 && !anchor && NF == 2 && $1 ~ /^0x/ && $2 !~ /^0x/ {
+ # section.
+ #
+ ARGIND == 2 && sect && NF == 4 && /^ [^ \*]/ && !($1 in sect_addend) {
++	# There are a few sections with constant data (without symbols) that
++	# can get resized during linking, so it is best to ignore them.
++	if ($1 ~ /^\.rodata\.(cst|str)[0-9]/)
++		next;
++
+ 	if (!($1 in sect_base)) {
+ 		sect_base[$1] = base;
+ 
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index abfdb4905ca2ac..56bf2f55d9387d 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -181,7 +181,8 @@ struct ima_kexec_hdr {
+ #define IMA_UPDATE_XATTR	1
+ #define IMA_CHANGE_ATTR		2
+ #define IMA_DIGSIG		3
+-#define IMA_MUST_MEASURE	4
++#define IMA_MAY_EMIT_TOMTOU	4
++#define IMA_EMITTED_OPENWRITERS	5
+ 
+ /* IMA integrity metadata associated with an inode */
+ struct ima_iint_cache {
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 4b213de8dcb40c..a9aab10bebcaa1 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -129,16 +129,22 @@ static void ima_rdwr_violation_check(struct file *file,
+ 		if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
+ 			if (!iint)
+ 				iint = ima_iint_find(inode);
++
+ 			/* IMA_MEASURE is set from reader side */
+-			if (iint && test_bit(IMA_MUST_MEASURE,
+-						&iint->atomic_flags))
++			if (iint && test_and_clear_bit(IMA_MAY_EMIT_TOMTOU,
++						       &iint->atomic_flags))
+ 				send_tomtou = true;
+ 		}
+ 	} else {
+ 		if (must_measure)
+-			set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
+-		if (inode_is_open_for_write(inode) && must_measure)
+-			send_writers = true;
++			set_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags);
++
++		/* Limit number of open_writers violations */
++		if (inode_is_open_for_write(inode) && must_measure) {
++			if (!test_and_set_bit(IMA_EMITTED_OPENWRITERS,
++					      &iint->atomic_flags))
++				send_writers = true;
++		}
+ 	}
+ 
+ 	if (!send_tomtou && !send_writers)
+@@ -167,6 +173,8 @@ static void ima_check_last_writer(struct ima_iint_cache *iint,
+ 	if (atomic_read(&inode->i_writecount) == 1) {
+ 		struct kstat stat;
+ 
++		clear_bit(IMA_EMITTED_OPENWRITERS, &iint->atomic_flags);
++
+ 		update = test_and_clear_bit(IMA_UPDATE_XATTR,
+ 					    &iint->atomic_flags);
+ 		if ((iint->flags & IMA_NEW_FILE) ||
+diff --git a/security/landlock/errata.h b/security/landlock/errata.h
+new file mode 100644
+index 00000000000000..8e626accac1011
+--- /dev/null
++++ b/security/landlock/errata.h
+@@ -0,0 +1,99 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Landlock - Errata information
++ *
++ * Copyright © 2025 Microsoft Corporation
++ */
++
++#ifndef _SECURITY_LANDLOCK_ERRATA_H
++#define _SECURITY_LANDLOCK_ERRATA_H
++
++#include <linux/init.h>
++
++struct landlock_erratum {
++	const int abi;
++	const u8 number;
++};
++
++/* clang-format off */
++#define LANDLOCK_ERRATUM(NUMBER) \
++	{ \
++		.abi = LANDLOCK_ERRATA_ABI, \
++		.number = NUMBER, \
++	},
++/* clang-format on */
++
++/*
++ * Some fixes may require user space to check if they are applied on the running
++ * kernel before using a specific feature.  For instance, this applies when a
++ * restriction was previously too restrictive and is now getting relaxed (for
++ * compatibility or semantic reasons).  However, non-visible changes for
++ * legitimate use (e.g. security fixes) do not require an erratum.
++ */
++static const struct landlock_erratum landlock_errata_init[] __initconst = {
++
++/*
++ * Only Sparse may not implement __has_include.  If a compiler does not
++ * implement __has_include, a warning will be printed at boot time (see
++ * setup.c).
++ */
++#ifdef __has_include
++
++#define LANDLOCK_ERRATA_ABI 1
++#if __has_include("errata/abi-1.h")
++#include "errata/abi-1.h"
++#endif
++#undef LANDLOCK_ERRATA_ABI
++
++#define LANDLOCK_ERRATA_ABI 2
++#if __has_include("errata/abi-2.h")
++#include "errata/abi-2.h"
++#endif
++#undef LANDLOCK_ERRATA_ABI
++
++#define LANDLOCK_ERRATA_ABI 3
++#if __has_include("errata/abi-3.h")
++#include "errata/abi-3.h"
++#endif
++#undef LANDLOCK_ERRATA_ABI
++
++#define LANDLOCK_ERRATA_ABI 4
++#if __has_include("errata/abi-4.h")
++#include "errata/abi-4.h"
++#endif
++#undef LANDLOCK_ERRATA_ABI
++
++#define LANDLOCK_ERRATA_ABI 5
++#if __has_include("errata/abi-5.h")
++#include "errata/abi-5.h"
++#endif
++#undef LANDLOCK_ERRATA_ABI
++
++#define LANDLOCK_ERRATA_ABI 6
++#if __has_include("errata/abi-6.h")
++#include "errata/abi-6.h"
++#endif
++#undef LANDLOCK_ERRATA_ABI
++
++/*
++ * For each new erratum, we need to include all the ABI files up to the impacted
++ * ABI to make all potential future intermediate errata easy to backport.
++ *
++ * If such change involves more than one ABI addition, then it must be in a
++ * dedicated commit with the same Fixes tag as used for the actual fix.
++ *
++ * Each commit creating a new security/landlock/errata/abi-*.h file must have a
++ * Depends-on tag to reference the commit that previously added the line to
++ * include this new file, except if the original Fixes tag is enough.
++ *
++ * Each erratum must be documented in its related ABI file, and a dedicated
++ * commit must update Documentation/userspace-api/landlock.rst to include this
++ * erratum.  This commit will not be backported.
++ */
++
++#endif
++
++	{}
++};
++
++#endif /* _SECURITY_LANDLOCK_ERRATA_H */
+diff --git a/security/landlock/errata/abi-4.h b/security/landlock/errata/abi-4.h
+new file mode 100644
+index 00000000000000..c052ee54f89f60
+--- /dev/null
++++ b/security/landlock/errata/abi-4.h
+@@ -0,0 +1,15 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++/**
++ * DOC: erratum_1
++ *
++ * Erratum 1: TCP socket identification
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This fix addresses an issue where IPv4 and IPv6 stream sockets (e.g., SMC,
++ * MPTCP, or SCTP) were incorrectly restricted by TCP access rights during
++ * :manpage:`bind(2)` and :manpage:`connect(2)` operations. This change ensures
++ * that only TCP sockets are subject to TCP access rights, allowing other
++ * protocols to operate without unnecessary restrictions.
++ */
++LANDLOCK_ERRATUM(1)
+diff --git a/security/landlock/errata/abi-6.h b/security/landlock/errata/abi-6.h
+new file mode 100644
+index 00000000000000..df7bc0e1fdf472
+--- /dev/null
++++ b/security/landlock/errata/abi-6.h
+@@ -0,0 +1,19 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++/**
++ * DOC: erratum_2
++ *
++ * Erratum 2: Scoped signal handling
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This fix addresses an issue where signal scoping was overly restrictive,
++ * preventing sandboxed threads from signaling other threads within the same
++ * process if they belonged to different domains.  Because threads are not
++ * security boundaries, user space might assume that any thread within the same
++ * process can send signals between themselves (see :manpage:`nptl(7)` and
++ * :manpage:`libpsx(3)`).  Consistent with :manpage:`ptrace(2)` behavior, direct
++ * interaction between threads of the same process should always be allowed.
++ * This change ensures that any thread is allowed to send signals to any other
++ * thread within the same process, regardless of their domain.
++ */
++LANDLOCK_ERRATUM(2)
+diff --git a/security/landlock/fs.c b/security/landlock/fs.c
+index 7adb25150488fc..511e6ae8b79c9e 100644
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -27,7 +27,9 @@
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+ #include <linux/path.h>
++#include <linux/pid.h>
+ #include <linux/rcupdate.h>
++#include <linux/sched/signal.h>
+ #include <linux/spinlock.h>
+ #include <linux/stat.h>
+ #include <linux/types.h>
+@@ -1623,21 +1625,46 @@ static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
+ 	return -EACCES;
+ }
+ 
+-static void hook_file_set_fowner(struct file *file)
++/*
++ * Always allow sending signals between threads of the same process.  This
++ * ensures consistency with hook_task_kill().
++ */
++static bool control_current_fowner(struct fown_struct *const fown)
+ {
+-	struct landlock_ruleset *new_dom, *prev_dom;
++	struct task_struct *p;
+ 
+ 	/*
+ 	 * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
+ 	 * file_set_fowner LSM hook inconsistencies").
+ 	 */
+-	lockdep_assert_held(&file_f_owner(file)->lock);
+-	new_dom = landlock_get_current_domain();
+-	landlock_get_ruleset(new_dom);
++	lockdep_assert_held(&fown->lock);
++
++	/*
++	 * Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side
++	 * critical section.
++	 */
++	guard(rcu)();
++	p = pid_task(fown->pid, fown->pid_type);
++	if (!p)
++		return true;
++
++	return !same_thread_group(p, current);
++}
++
++static void hook_file_set_fowner(struct file *file)
++{
++	struct landlock_ruleset *prev_dom;
++	struct landlock_ruleset *new_dom = NULL;
++
++	if (control_current_fowner(file_f_owner(file))) {
++		new_dom = landlock_get_current_domain();
++		landlock_get_ruleset(new_dom);
++	}
++
+ 	prev_dom = landlock_file(file)->fown_domain;
+ 	landlock_file(file)->fown_domain = new_dom;
+ 
+-	/* Called in an RCU read-side critical section. */
++	/* May be called in an RCU read-side critical section. */
+ 	landlock_put_ruleset_deferred(prev_dom);
+ }
+ 
+diff --git a/security/landlock/setup.c b/security/landlock/setup.c
+index 28519a45b11ffb..0c85ea27e40990 100644
+--- a/security/landlock/setup.c
++++ b/security/landlock/setup.c
+@@ -6,12 +6,14 @@
+  * Copyright © 2018-2020 ANSSI
+  */
+ 
++#include <linux/bits.h>
+ #include <linux/init.h>
+ #include <linux/lsm_hooks.h>
+ #include <uapi/linux/lsm.h>
+ 
+ #include "common.h"
+ #include "cred.h"
++#include "errata.h"
+ #include "fs.h"
+ #include "net.h"
+ #include "setup.h"
+@@ -19,6 +21,11 @@
+ 
+ bool landlock_initialized __ro_after_init = false;
+ 
++const struct lsm_id landlock_lsmid = {
++	.name = LANDLOCK_NAME,
++	.id = LSM_ID_LANDLOCK,
++};
++
+ struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = {
+ 	.lbs_cred = sizeof(struct landlock_cred_security),
+ 	.lbs_file = sizeof(struct landlock_file_security),
+@@ -26,13 +33,36 @@ struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = {
+ 	.lbs_superblock = sizeof(struct landlock_superblock_security),
+ };
+ 
+-const struct lsm_id landlock_lsmid = {
+-	.name = LANDLOCK_NAME,
+-	.id = LSM_ID_LANDLOCK,
+-};
++int landlock_errata __ro_after_init;
++
++static void __init compute_errata(void)
++{
++	size_t i;
++
++#ifndef __has_include
++	/*
++	 * This is a safeguard to make sure the compiler implements
++	 * __has_include (see errata.h).
++	 */
++	WARN_ON_ONCE(1);
++	return;
++#endif
++
++	for (i = 0; landlock_errata_init[i].number; i++) {
++		const int prev_errata = landlock_errata;
++
++		if (WARN_ON_ONCE(landlock_errata_init[i].abi >
++				 landlock_abi_version))
++			continue;
++
++		landlock_errata |= BIT(landlock_errata_init[i].number - 1);
++		WARN_ON_ONCE(prev_errata == landlock_errata);
++	}
++}
+ 
+ static int __init landlock_init(void)
+ {
++	compute_errata();
+ 	landlock_add_cred_hooks();
+ 	landlock_add_task_hooks();
+ 	landlock_add_fs_hooks();
+diff --git a/security/landlock/setup.h b/security/landlock/setup.h
+index c4252d46d49d48..fca307c35fee5d 100644
+--- a/security/landlock/setup.h
++++ b/security/landlock/setup.h
+@@ -11,7 +11,10 @@
+ 
+ #include <linux/lsm_hooks.h>
+ 
++extern const int landlock_abi_version;
++
+ extern bool landlock_initialized;
++extern int landlock_errata;
+ 
+ extern struct lsm_blob_sizes landlock_blob_sizes;
+ extern const struct lsm_id landlock_lsmid;
+diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
+index c097d356fa4535..4fa2d09f657aee 100644
+--- a/security/landlock/syscalls.c
++++ b/security/landlock/syscalls.c
+@@ -159,7 +159,9 @@ static const struct file_operations ruleset_fops = {
+  *        the new ruleset.
+  * @size: Size of the pointed &struct landlock_ruleset_attr (needed for
+  *        backward and forward compatibility).
+- * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION.
++ * @flags: Supported value:
++ *         - %LANDLOCK_CREATE_RULESET_VERSION
++ *         - %LANDLOCK_CREATE_RULESET_ERRATA
+  *
+  * This system call enables to create a new Landlock ruleset, and returns the
+  * related file descriptor on success.
+@@ -168,6 +170,10 @@ static const struct file_operations ruleset_fops = {
+  * 0, then the returned value is the highest supported Landlock ABI version
+  * (starting at 1).
+  *
++ * If @flags is %LANDLOCK_CREATE_RULESET_ERRATA and @attr is NULL and @size is
++ * 0, then the returned value is a bitmask of fixed issues for the current
++ * Landlock ABI version.
++ *
+  * Possible returned errors are:
+  *
+  * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
+@@ -191,9 +197,15 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
+ 		return -EOPNOTSUPP;
+ 
+ 	if (flags) {
+-		if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr &&
+-		    !size)
+-			return LANDLOCK_ABI_VERSION;
++		if (attr || size)
++			return -EINVAL;
++
++		if (flags == LANDLOCK_CREATE_RULESET_VERSION)
++			return landlock_abi_version;
++
++		if (flags == LANDLOCK_CREATE_RULESET_ERRATA)
++			return landlock_errata;
++
+ 		return -EINVAL;
+ 	}
+ 
+@@ -234,6 +246,8 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
+ 	return ruleset_fd;
+ }
+ 
++const int landlock_abi_version = LANDLOCK_ABI_VERSION;
++
+ /*
+  * Returns an owned ruleset from a FD. It is thus needed to call
+  * landlock_put_ruleset() on the return value.
+diff --git a/security/landlock/task.c b/security/landlock/task.c
+index dc7dab78392edc..4578ce6e319d83 100644
+--- a/security/landlock/task.c
++++ b/security/landlock/task.c
+@@ -13,6 +13,7 @@
+ #include <linux/lsm_hooks.h>
+ #include <linux/rcupdate.h>
+ #include <linux/sched.h>
++#include <linux/sched/signal.h>
+ #include <net/af_unix.h>
+ #include <net/sock.h>
+ 
+@@ -264,6 +265,17 @@ static int hook_task_kill(struct task_struct *const p,
+ 		/* Dealing with USB IO. */
+ 		dom = landlock_cred(cred)->domain;
+ 	} else {
++		/*
++		 * Always allow sending signals between threads of the same process.
++		 * This is required for process credential changes by the Native POSIX
++		 * Threads Library and implemented by the set*id(2) wrappers and
++		 * libcap(3) with tgkill(2).  See nptl(7) and libpsx(3).
++		 *
++		 * This exception is similar to the __ptrace_may_access() one.
++		 */
++		if (same_thread_group(p, current))
++			return 0;
++
+ 		dom = landlock_get_current_domain();
+ 	}
+ 	dom = landlock_get_applicable_domain(dom, signal_scope);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index cb9925948175f9..25b1984898ab21 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -37,6 +37,7 @@
+ #include <linux/completion.h>
+ #include <linux/acpi.h>
+ #include <linux/pgtable.h>
++#include <linux/dmi.h>
+ 
+ #ifdef CONFIG_X86
+ /* for snoop control */
+@@ -1360,8 +1361,21 @@ static void azx_free(struct azx *chip)
+ 	if (use_vga_switcheroo(hda)) {
+ 		if (chip->disabled && hda->probe_continued)
+ 			snd_hda_unlock_devices(&chip->bus);
+-		if (hda->vga_switcheroo_registered)
++		if (hda->vga_switcheroo_registered) {
+ 			vga_switcheroo_unregister_client(chip->pci);
++
++			/* Some GPUs don't have sound, and azx_first_init fails,
++			 * leaving the device probed but non-functional. As long
++			 * as it's probed, the PCI subsystem keeps its runtime
++			 * PM status as active. Force it to suspended (as we
++			 * actually stop the chip) to allow GPU to suspend via
++			 * vga_switcheroo, and print a warning.
++			 */
++			dev_warn(&pci->dev, "GPU sound probed, but not operational: please add a quirk to driver_denylist\n");
++			pm_runtime_disable(&pci->dev);
++			pm_runtime_set_suspended(&pci->dev);
++			pm_runtime_enable(&pci->dev);
++		}
+ 	}
+ 
+ 	if (bus->chip_init) {
+@@ -2071,6 +2085,27 @@ static const struct pci_device_id driver_denylist[] = {
+ 	{}
+ };
+ 
++static struct pci_device_id driver_denylist_ideapad_z570[] = {
++	{ PCI_DEVICE_SUB(0x10de, 0x0bea, 0x0000, 0x0000) }, /* NVIDIA GF108 HDA */
++	{}
++};
++
++/* DMI-based denylist, to be used when:
++ *  - PCI subsystem IDs are zero, impossible to distinguish from valid sound cards.
++ *  - Different modifications of the same laptop use different GPU models.
++ */
++static const struct dmi_system_id driver_denylist_dmi[] = {
++	{
++		/* No HDA in NVIDIA DGPU. BIOS disables it, but quirk_nvidia_hda() reenables. */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
++		},
++		.driver_data = &driver_denylist_ideapad_z570,
++	},
++	{}
++};
++
+ static const struct hda_controller_ops pci_hda_ops = {
+ 	.disable_msi_reset_irq = disable_msi_reset_irq,
+ 	.position_check = azx_position_check,
+@@ -2081,6 +2116,7 @@ static DECLARE_BITMAP(probed_devs, SNDRV_CARDS);
+ static int azx_probe(struct pci_dev *pci,
+ 		     const struct pci_device_id *pci_id)
+ {
++	const struct dmi_system_id *dmi;
+ 	struct snd_card *card;
+ 	struct hda_intel *hda;
+ 	struct azx *chip;
+@@ -2093,6 +2129,12 @@ static int azx_probe(struct pci_dev *pci,
+ 		return -ENODEV;
+ 	}
+ 
++	dmi = dmi_first_match(driver_denylist_dmi);
++	if (dmi && pci_match_id(dmi->driver_data, pci)) {
++		dev_info(&pci->dev, "Skipping the device on the DMI denylist\n");
++		return -ENODEV;
++	}
++
+ 	dev = find_first_zero_bit(probed_devs, SNDRV_CARDS);
+ 	if (dev >= SNDRV_CARDS)
+ 		return -ENODEV;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 59e59fdc38f2c4..0bf833c9602155 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4744,6 +4744,22 @@ static void alc245_fixup_hp_mute_led_coefbit(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc245_fixup_hp_mute_led_v1_coefbit(struct hda_codec *codec,
++					  const struct hda_fixup *fix,
++					  int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->mute_led_polarity = 0;
++		spec->mute_led_coef.idx = 0x0b;
++		spec->mute_led_coef.mask = 1 << 3;
++		spec->mute_led_coef.on = 1 << 3;
++		spec->mute_led_coef.off = 0;
++		snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set);
++	}
++}
++
+ /* turn on/off mic-mute LED per capture hook by coef bit */
+ static int coef_micmute_led_set(struct led_classdev *led_cdev,
+ 				enum led_brightness brightness)
+@@ -7851,6 +7867,7 @@ enum {
+ 	ALC287_FIXUP_TAS2781_I2C,
+ 	ALC287_FIXUP_YOGA7_14ARB7_I2C,
+ 	ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
++	ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT,
+ 	ALC245_FIXUP_HP_X360_MUTE_LEDS,
+ 	ALC287_FIXUP_THINKPAD_I2S_SPK,
+ 	ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+@@ -10084,6 +10101,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc245_fixup_hp_mute_led_coefbit,
+ 	},
++	[ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc245_fixup_hp_mute_led_v1_coefbit,
++	},
+ 	[ALC245_FIXUP_HP_X360_MUTE_LEDS] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc245_fixup_hp_mute_led_coefbit,
+@@ -10569,6 +10590,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+diff --git a/sound/soc/amd/ps/acp63.h b/sound/soc/amd/ps/acp63.h
+index 39208305dd6c3c..f9759c9342cf38 100644
+--- a/sound/soc/amd/ps/acp63.h
++++ b/sound/soc/amd/ps/acp63.h
+@@ -11,6 +11,7 @@
+ #define ACP_DEVICE_ID 0x15E2
+ #define ACP63_REG_START		0x1240000
+ #define ACP63_REG_END		0x125C000
++#define ACP63_PCI_REV		0x63
+ 
+ #define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK	0x00010001
+ #define ACP_PGFSM_CNTL_POWER_ON_MASK	1
+diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
+index 5c4a0be7a78892..aec3150ecf5812 100644
+--- a/sound/soc/amd/ps/pci-ps.c
++++ b/sound/soc/amd/ps/pci-ps.c
+@@ -559,7 +559,7 @@ static int snd_acp63_probe(struct pci_dev *pci,
+ 
+ 	/* Pink Sardine device check */
+ 	switch (pci->revision) {
+-	case 0x63:
++	case ACP63_PCI_REV:
+ 		break;
+ 	default:
+ 		dev_dbg(&pci->dev, "acp63 pci device not found\n");
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index a7637056972aab..e632f16c910250 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -339,6 +339,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83J2"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -584,6 +591,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "pang13"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7UCX"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c
+index 08fb13a334a4cc..9c1997a42334d6 100644
+--- a/sound/soc/codecs/wcd937x.c
++++ b/sound/soc/codecs/wcd937x.c
+@@ -2564,6 +2564,7 @@ static int wcd937x_soc_codec_probe(struct snd_soc_component *component)
+ 						ARRAY_SIZE(wcd9375_dapm_widgets));
+ 		if (ret < 0) {
+ 			dev_err(component->dev, "Failed to add snd_ctls\n");
++			wcd_clsh_ctrl_free(wcd937x->clsh_info);
+ 			return ret;
+ 		}
+ 
+@@ -2571,6 +2572,7 @@ static int wcd937x_soc_codec_probe(struct snd_soc_component *component)
+ 					      ARRAY_SIZE(wcd9375_audio_map));
+ 		if (ret < 0) {
+ 			dev_err(component->dev, "Failed to add routes\n");
++			wcd_clsh_ctrl_free(wcd937x->clsh_info);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c
+index 3cd9a66b70a157..7981d598ba139b 100644
+--- a/sound/soc/fsl/fsl_audmix.c
++++ b/sound/soc/fsl/fsl_audmix.c
+@@ -488,11 +488,17 @@ static int fsl_audmix_probe(struct platform_device *pdev)
+ 		goto err_disable_pm;
+ 	}
+ 
+-	priv->pdev = platform_device_register_data(dev, "imx-audmix", 0, NULL, 0);
+-	if (IS_ERR(priv->pdev)) {
+-		ret = PTR_ERR(priv->pdev);
+-		dev_err(dev, "failed to register platform: %d\n", ret);
+-		goto err_disable_pm;
++	/*
++	 * If dais property exist, then register the imx-audmix card driver.
++	 * otherwise, it should be linked by audio graph card.
++	 */
++	if (of_find_property(pdev->dev.of_node, "dais", NULL)) {
++		priv->pdev = platform_device_register_data(dev, "imx-audmix", 0, NULL, 0);
++		if (IS_ERR(priv->pdev)) {
++			ret = PTR_ERR(priv->pdev);
++			dev_err(dev, "failed to register platform: %d\n", ret);
++			goto err_disable_pm;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index bb1324fb588e97..a68efbe98948f4 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -214,6 +214,15 @@ static const struct snd_soc_acpi_adr_device rt1316_1_group2_adr[] = {
+ 	}
+ };
+ 
++static const struct snd_soc_acpi_adr_device rt1316_2_group2_adr[] = {
++	{
++		.adr = 0x000232025D131601ull,
++		.num_endpoints = 1,
++		.endpoints = &spk_r_endpoint,
++		.name_prefix = "rt1316-2"
++	}
++};
++
+ static const struct snd_soc_acpi_adr_device rt1316_1_single_adr[] = {
+ 	{
+ 		.adr = 0x000130025D131601ull,
+@@ -547,6 +556,20 @@ static const struct snd_soc_acpi_link_adr adl_chromebook_base[] = {
+ 	{}
+ };
+ 
++static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link02[] = {
++	{
++		.mask = BIT(0),
++		.num_adr = ARRAY_SIZE(rt1316_0_group2_adr),
++		.adr_d = rt1316_0_group2_adr,
++	},
++	{
++		.mask = BIT(2),
++		.num_adr = ARRAY_SIZE(rt1316_2_group2_adr),
++		.adr_d = rt1316_2_group2_adr,
++	},
++	{}
++};
++
+ static const struct snd_soc_acpi_codecs adl_max98357a_amp = {
+ 	.num_codecs = 1,
+ 	.codecs = {"MX98357A"}
+@@ -749,6 +772,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-adl-sdw-max98373-rt5682.tplg",
+ 	},
++	{
++		.link_mask = BIT(0) | BIT(2),
++		.links = adl_sdw_rt1316_link02,
++		.drv_name = "sof_sdw",
++		.sof_tplg_filename = "sof-adl-rt1316-l02.tplg",
++	},
+ 	{},
+ };
+ EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_adl_sdw_machines);
+diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
+index c9404b5934c7e6..2cd522108221a2 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
+@@ -24,8 +24,8 @@
+ #define PLAYBACK_MIN_PERIOD_SIZE	128
+ #define CAPTURE_MIN_NUM_PERIODS		2
+ #define CAPTURE_MAX_NUM_PERIODS		8
+-#define CAPTURE_MAX_PERIOD_SIZE		4096
+-#define CAPTURE_MIN_PERIOD_SIZE		320
++#define CAPTURE_MAX_PERIOD_SIZE		65536
++#define CAPTURE_MIN_PERIOD_SIZE		6144
+ #define BUFFER_BYTES_MAX (PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE)
+ #define BUFFER_BYTES_MIN (PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE)
+ #define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024)
+@@ -64,12 +64,12 @@ struct q6apm_dai_rtd {
+ 	phys_addr_t phys;
+ 	unsigned int pcm_size;
+ 	unsigned int pcm_count;
+-	unsigned int pos;       /* Buffer position */
+ 	unsigned int periods;
+ 	unsigned int bytes_sent;
+ 	unsigned int bytes_received;
+ 	unsigned int copied_total;
+ 	uint16_t bits_per_sample;
++	snd_pcm_uframes_t queue_ptr;
+ 	bool next_track;
+ 	enum stream_state state;
+ 	struct q6apm_graph *graph;
+@@ -123,25 +123,16 @@ static void event_handler(uint32_t opcode, uint32_t token, void *payload, void *
+ {
+ 	struct q6apm_dai_rtd *prtd = priv;
+ 	struct snd_pcm_substream *substream = prtd->substream;
+-	unsigned long flags;
+ 
+ 	switch (opcode) {
+ 	case APM_CLIENT_EVENT_CMD_EOS_DONE:
+ 		prtd->state = Q6APM_STREAM_STOPPED;
+ 		break;
+ 	case APM_CLIENT_EVENT_DATA_WRITE_DONE:
+-		spin_lock_irqsave(&prtd->lock, flags);
+-		prtd->pos += prtd->pcm_count;
+-		spin_unlock_irqrestore(&prtd->lock, flags);
+ 		snd_pcm_period_elapsed(substream);
+-		if (prtd->state == Q6APM_STREAM_RUNNING)
+-			q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
+ 
+ 		break;
+ 	case APM_CLIENT_EVENT_DATA_READ_DONE:
+-		spin_lock_irqsave(&prtd->lock, flags);
+-		prtd->pos += prtd->pcm_count;
+-		spin_unlock_irqrestore(&prtd->lock, flags);
+ 		snd_pcm_period_elapsed(substream);
+ 		if (prtd->state == Q6APM_STREAM_RUNNING)
+ 			q6apm_read(prtd->graph);
+@@ -248,7 +239,6 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
+ 	}
+ 
+ 	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+-	prtd->pos = 0;
+ 	/* rate and channels are sent to audio driver */
+ 	ret = q6apm_graph_media_format_shmem(prtd->graph, &cfg);
+ 	if (ret < 0) {
+@@ -294,6 +284,27 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
+ 	return 0;
+ }
+ 
++static int q6apm_dai_ack(struct snd_soc_component *component, struct snd_pcm_substream *substream)
++{
++	struct snd_pcm_runtime *runtime = substream->runtime;
++	struct q6apm_dai_rtd *prtd = runtime->private_data;
++	int i, ret = 0, avail_periods;
++
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++		avail_periods = (runtime->control->appl_ptr - prtd->queue_ptr)/runtime->period_size;
++		for (i = 0; i < avail_periods; i++) {
++			ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, NO_TIMESTAMP);
++			if (ret < 0) {
++				dev_err(component->dev, "Error queuing playback buffer %d\n", ret);
++				return ret;
++			}
++			prtd->queue_ptr += runtime->period_size;
++		}
++	}
++
++	return ret;
++}
++
+ static int q6apm_dai_trigger(struct snd_soc_component *component,
+ 			     struct snd_pcm_substream *substream, int cmd)
+ {
+@@ -305,9 +316,6 @@ static int q6apm_dai_trigger(struct snd_soc_component *component,
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+-		 /* start writing buffers for playback only as we already queued capture buffers */
+-		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+-			ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 		/* TODO support be handled via SoftPause Module */
+@@ -377,13 +385,14 @@ static int q6apm_dai_open(struct snd_soc_component *component,
+ 		}
+ 	}
+ 
+-	ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
++	/* setup 10ms latency to accommodate DSP restrictions */
++	ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 480);
+ 	if (ret < 0) {
+ 		dev_err(dev, "constraint for period bytes step ret = %d\n", ret);
+ 		goto err;
+ 	}
+ 
+-	ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
++	ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 480);
+ 	if (ret < 0) {
+ 		dev_err(dev, "constraint for buffer bytes step ret = %d\n", ret);
+ 		goto err;
+@@ -428,16 +437,12 @@ static snd_pcm_uframes_t q6apm_dai_pointer(struct snd_soc_component *component,
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct q6apm_dai_rtd *prtd = runtime->private_data;
+ 	snd_pcm_uframes_t ptr;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&prtd->lock, flags);
+-	if (prtd->pos == prtd->pcm_size)
+-		prtd->pos = 0;
+-
+-	ptr =  bytes_to_frames(runtime, prtd->pos);
+-	spin_unlock_irqrestore(&prtd->lock, flags);
++	ptr = q6apm_get_hw_pointer(prtd->graph, substream->stream) * runtime->period_size;
++	if (ptr)
++		return ptr - 1;
+ 
+-	return ptr;
++	return 0;
+ }
+ 
+ static int q6apm_dai_hw_params(struct snd_soc_component *component,
+@@ -652,8 +657,6 @@ static int q6apm_dai_compr_set_params(struct snd_soc_component *component,
+ 	prtd->pcm_size = runtime->fragments * runtime->fragment_size;
+ 	prtd->bits_per_sample = 16;
+ 
+-	prtd->pos = 0;
+-
+ 	if (prtd->next_track != true) {
+ 		memcpy(&prtd->codec, codec, sizeof(*codec));
+ 
+@@ -836,6 +839,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = {
+ 	.hw_params	= q6apm_dai_hw_params,
+ 	.pointer	= q6apm_dai_pointer,
+ 	.trigger	= q6apm_dai_trigger,
++	.ack		= q6apm_dai_ack,
+ 	.compress_ops	= &q6apm_dai_compress_ops,
+ 	.use_dai_pcm_id = true,
+ };
+diff --git a/sound/soc/qcom/qdsp6/q6apm.c b/sound/soc/qcom/qdsp6/q6apm.c
+index 2a2a5bd98110bc..ca57413cb7847a 100644
+--- a/sound/soc/qcom/qdsp6/q6apm.c
++++ b/sound/soc/qcom/qdsp6/q6apm.c
+@@ -494,6 +494,19 @@ int q6apm_read(struct q6apm_graph *graph)
+ }
+ EXPORT_SYMBOL_GPL(q6apm_read);
+ 
++int q6apm_get_hw_pointer(struct q6apm_graph *graph, int dir)
++{
++	struct audioreach_graph_data *data;
++
++	if (dir == SNDRV_PCM_STREAM_PLAYBACK)
++		data = &graph->rx_data;
++	else
++		data = &graph->tx_data;
++
++	return (int)atomic_read(&data->hw_ptr);
++}
++EXPORT_SYMBOL_GPL(q6apm_get_hw_pointer);
++
+ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
+ {
+ 	struct data_cmd_rsp_rd_sh_mem_ep_data_buffer_done_v2 *rd_done;
+@@ -520,7 +533,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
+ 		done = data->payload;
+ 		phys = graph->rx_data.buf[token].phys;
+ 		mutex_unlock(&graph->lock);
+-
++		/* token numbering starts at 0 */
++		atomic_set(&graph->rx_data.hw_ptr, token + 1);
+ 		if (lower_32_bits(phys) == done->buf_addr_lsw &&
+ 		    upper_32_bits(phys) == done->buf_addr_msw) {
+ 			graph->result.opcode = hdr->opcode;
+@@ -553,6 +567,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
+ 		rd_done = data->payload;
+ 		phys = graph->tx_data.buf[hdr->token].phys;
+ 		mutex_unlock(&graph->lock);
++		/* token numbering starts at 0 */
++		atomic_set(&graph->tx_data.hw_ptr, hdr->token + 1);
+ 
+ 		if (upper_32_bits(phys) == rd_done->buf_addr_msw &&
+ 		    lower_32_bits(phys) == rd_done->buf_addr_lsw) {
+diff --git a/sound/soc/qcom/qdsp6/q6apm.h b/sound/soc/qcom/qdsp6/q6apm.h
+index c248c8d2b1ab7f..7ce08b401e3102 100644
+--- a/sound/soc/qcom/qdsp6/q6apm.h
++++ b/sound/soc/qcom/qdsp6/q6apm.h
+@@ -2,6 +2,7 @@
+ #ifndef __Q6APM_H__
+ #define __Q6APM_H__
+ #include <linux/types.h>
++#include <linux/atomic.h>
+ #include <linux/slab.h>
+ #include <linux/wait.h>
+ #include <linux/kernel.h>
+@@ -77,6 +78,7 @@ struct audioreach_graph_data {
+ 	uint32_t num_periods;
+ 	uint32_t dsp_buf;
+ 	uint32_t mem_map_handle;
++	atomic_t hw_ptr;
+ };
+ 
+ struct audioreach_graph {
+@@ -150,4 +152,5 @@ int q6apm_enable_compress_module(struct device *dev, struct q6apm_graph *graph,
+ int q6apm_remove_initial_silence(struct device *dev, struct q6apm_graph *graph, uint32_t samples);
+ int q6apm_remove_trailing_silence(struct device *dev, struct q6apm_graph *graph, uint32_t samples);
+ int q6apm_set_real_module_id(struct device *dev, struct q6apm_graph *graph, uint32_t codec_id);
++int q6apm_get_hw_pointer(struct q6apm_graph *graph, int dir);
+ #endif /* __APM_GRAPH_ */
+diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
+index 045100c9435271..a400c9a31fead5 100644
+--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
+@@ -892,9 +892,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
+ 
+ 		if (ret < 0) {
+ 			dev_err(dev, "q6asm_open_write failed\n");
+-			q6asm_audio_client_free(prtd->audio_client);
+-			prtd->audio_client = NULL;
+-			return ret;
++			goto open_err;
+ 		}
+ 	}
+ 
+@@ -903,7 +901,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
+ 			      prtd->session_id, dir);
+ 	if (ret) {
+ 		dev_err(dev, "Stream reg failed ret:%d\n", ret);
+-		return ret;
++		goto q6_err;
+ 	}
+ 
+ 	ret = __q6asm_dai_compr_set_codec_params(component, stream,
+@@ -911,7 +909,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
+ 						 prtd->stream_id);
+ 	if (ret) {
+ 		dev_err(dev, "codec param setup failed ret:%d\n", ret);
+-		return ret;
++		goto q6_err;
+ 	}
+ 
+ 	ret = q6asm_map_memory_regions(dir, prtd->audio_client, prtd->phys,
+@@ -920,12 +918,21 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
+ 
+ 	if (ret < 0) {
+ 		dev_err(dev, "Buffer Mapping failed ret:%d\n", ret);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto q6_err;
+ 	}
+ 
+ 	prtd->state = Q6ASM_STREAM_RUNNING;
+ 
+ 	return 0;
++
++q6_err:
++	q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE);
++
++open_err:
++	q6asm_audio_client_free(prtd->audio_client);
++	prtd->audio_client = NULL;
++	return ret;
+ }
+ 
+ static int q6asm_dai_compr_set_metadata(struct snd_soc_component *component,
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index b3fca5fd87d68c..37ca15cc5728ca 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -1269,8 +1269,8 @@ static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_s
+ 			struct snd_sof_tuple *new_tuples;
+ 
+ 			num_tuples += token_list[object_token_list[i]].count * (num_sets - 1);
+-			new_tuples = krealloc(swidget->tuples,
+-					      sizeof(*new_tuples) * num_tuples, GFP_KERNEL);
++			new_tuples = krealloc_array(swidget->tuples,
++						    num_tuples, sizeof(*new_tuples), GFP_KERNEL);
+ 			if (!new_tuples) {
+ 				ret = -ENOMEM;
+ 				goto err;
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 779d97d31f170e..826ac870f24690 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -489,16 +489,84 @@ static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
+ 
+ /*
+  * CME protocol: like the standard protocol, but SysEx commands are sent as a
+- * single USB packet preceded by a 0x0F byte.
++ * single USB packet preceded by a 0x0F byte, as are system realtime
++ * messages and MIDI Active Sensing.
++ * Also, multiple messages can be sent in the same packet.
+  */
+ static void snd_usbmidi_cme_input(struct snd_usb_midi_in_endpoint *ep,
+ 				  uint8_t *buffer, int buffer_length)
+ {
+-	if (buffer_length < 2 || (buffer[0] & 0x0f) != 0x0f)
+-		snd_usbmidi_standard_input(ep, buffer, buffer_length);
+-	else
+-		snd_usbmidi_input_data(ep, buffer[0] >> 4,
+-				       &buffer[1], buffer_length - 1);
++	int remaining = buffer_length;
++
++	/*
++	 * CME send sysex, song position pointer, system realtime
++	 * and active sensing using CIN 0x0f, which in the standard
++	 * is only intended for single byte unparsed data.
++	 * So we need to interpret these here before sending them on.
++	 * By default, we assume single byte data, which is true
++	 * for system realtime (midi clock, start, stop and continue)
++	 * and active sensing, and handle the other (known) cases
++	 * separately.
++	 * In contrast to the standard, CME does not split sysex
++	 * into multiple 4-byte packets, but lumps everything together
++	 * into one. In addition, CME can string multiple messages
++	 * together in the same packet; pressing the Record button
++	 * on an UF6 sends a sysex message directly followed
++	 * by a song position pointer in the same packet.
++	 * For it to have any reasonable meaning, a sysex message
++	 * needs to be at least 3 bytes in length (0xf0, id, 0xf7),
++	 * corresponding to a packet size of 4 bytes, and the ones sent
++	 * by CME devices are 6 or 7 bytes, making the packet fragments
++	 * 7 or 8 bytes long (six or seven bytes plus preceding CN+CIN byte).
++	 * For the other types, the packet size is always 4 bytes,
++	 * as per the standard, with the data size being 3 for SPP
++	 * and 1 for the others.
++	 * Thus all packet fragments are at least 4 bytes long, so we can
++	 * skip anything that is shorter; this also conveniantly skips
++	 * packets with size 0, which CME devices continuously send when
++	 * they have nothing better to do.
++	 * Another quirk is that sometimes multiple messages are sent
++	 * in the same packet. This has been observed for midi clock
++	 * and active sensing i.e. 0x0f 0xf8 0x00 0x00 0x0f 0xfe 0x00 0x00,
++	 * but also multiple note ons/offs, and control change together
++	 * with MIDI clock. Similarly, some sysex messages are followed by
++	 * the song position pointer in the same packet, and occasionally
++	 * additionally by a midi clock or active sensing.
++	 * We handle this by looping over all data and parsing it along the way.
++	 */
++	while (remaining >= 4) {
++		int source_length = 4; /* default */
++
++		if ((buffer[0] & 0x0f) == 0x0f) {
++			int data_length = 1; /* default */
++
++			if (buffer[1] == 0xf0) {
++				/* Sysex: Find EOX and send on whole message. */
++				/* To kick off the search, skip the first
++				 * two bytes (CN+CIN and SYSEX (0xf0).
++				 */
++				uint8_t *tmp_buf = buffer + 2;
++				int tmp_length = remaining - 2;
++
++				while (tmp_length > 1 && *tmp_buf != 0xf7) {
++					tmp_buf++;
++					tmp_length--;
++				}
++				data_length = tmp_buf - buffer;
++				source_length = data_length + 1;
++			} else if (buffer[1] == 0xf2) {
++				/* Three byte song position pointer */
++				data_length = 3;
++			}
++			snd_usbmidi_input_data(ep, buffer[0] >> 4,
++					       &buffer[1], data_length);
++		} else {
++			/* normal channel events */
++			snd_usbmidi_standard_input(ep, buffer, source_length);
++		}
++		buffer += source_length;
++		remaining -= source_length;
++	}
+ }
+ 
+ /*
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 0a7327541c17f1..46cce18c830864 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -867,8 +867,8 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
+ 	} pads[] = {
+ 		{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
+ 	};
+-	int new_off, pad_bits, bits, i;
+-	const char *pad_type;
++	int new_off = 0, pad_bits = 0, bits, i;
++	const char *pad_type = NULL;
+ 
+ 	if (cur_off >= next_off)
+ 		return; /* no gap */
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 286a2c0af02aa8..127862fa05c619 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -3990,6 +3990,11 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
+ 			WARN_INSN(insn, "RET before UNTRAIN");
+ 			return 1;
+ 
++		case INSN_CONTEXT_SWITCH:
++			if (insn_func(insn))
++				break;
++			return 0;
++
+ 		case INSN_NOP:
+ 			if (insn->retpoline_safe)
+ 				return 0;
+diff --git a/tools/power/cpupower/bench/parse.c b/tools/power/cpupower/bench/parse.c
+index e63dc11fa3a533..48e25be6e16356 100644
+--- a/tools/power/cpupower/bench/parse.c
++++ b/tools/power/cpupower/bench/parse.c
+@@ -120,6 +120,10 @@ FILE *prepare_output(const char *dirname)
+ struct config *prepare_default_config()
+ {
+ 	struct config *config = malloc(sizeof(struct config));
++	if (!config) {
++		perror("malloc");
++		return NULL;
++	}
+ 
+ 	dprintf("loading defaults\n");
+ 
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index c76ad0be54e2ed..7e524601e01ada 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -4303,6 +4303,14 @@ if (defined($opt{"LOG_FILE"})) {
+     if ($opt{"CLEAR_LOG"}) {
+ 	unlink $opt{"LOG_FILE"};
+     }
++
++    if (! -e $opt{"LOG_FILE"} && $opt{"LOG_FILE"} =~ m,^(.*/),) {
++        my $dir = $1;
++        if (! -d $dir) {
++            mkpath($dir) or die "Failed to create directories '$dir': $!";
++            print "\nThe log directory $dir did not exist, so it was created.\n";
++        }
++    }
+     open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
+     LOG->autoflush(1);
+ }
+diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+index 7d7a6a06cdb75b..2d8230da906429 100644
+--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
++++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+@@ -98,7 +98,7 @@ int main(int argc, char *argv[])
+ 	info("Calling futex_waitv on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
+ 	res = futex_waitv(&waitv, 1, 0, &to, CLOCK_MONOTONIC);
+ 	if (!res || errno != EWOULDBLOCK) {
+-		ksft_test_result_pass("futex_waitv returned: %d %s\n",
++		ksft_test_result_fail("futex_waitv returned: %d %s\n",
+ 				      res ? errno : res,
+ 				      res ? strerror(errno) : "");
+ 		ret = RET_FAIL;
+diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
+index 1bc16fde2e8aea..4766f8fec9f605 100644
+--- a/tools/testing/selftests/landlock/base_test.c
++++ b/tools/testing/selftests/landlock/base_test.c
+@@ -98,10 +98,54 @@ TEST(abi_version)
+ 	ASSERT_EQ(EINVAL, errno);
+ }
+ 
++/*
++ * Old source trees might not have the set of Kselftest fixes related to kernel
++ * UAPI headers.
++ */
++#ifndef LANDLOCK_CREATE_RULESET_ERRATA
++#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1)
++#endif
++
++TEST(errata)
++{
++	const struct landlock_ruleset_attr ruleset_attr = {
++		.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
++	};
++	int errata;
++
++	errata = landlock_create_ruleset(NULL, 0,
++					 LANDLOCK_CREATE_RULESET_ERRATA);
++	/* The errata bitmask will not be backported to tests. */
++	ASSERT_LE(0, errata);
++	TH_LOG("errata: 0x%x", errata);
++
++	ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0,
++					      LANDLOCK_CREATE_RULESET_ERRATA));
++	ASSERT_EQ(EINVAL, errno);
++
++	ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr),
++					      LANDLOCK_CREATE_RULESET_ERRATA));
++	ASSERT_EQ(EINVAL, errno);
++
++	ASSERT_EQ(-1,
++		  landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr),
++					  LANDLOCK_CREATE_RULESET_ERRATA));
++	ASSERT_EQ(EINVAL, errno);
++
++	ASSERT_EQ(-1, landlock_create_ruleset(
++			      NULL, 0,
++			      LANDLOCK_CREATE_RULESET_VERSION |
++				      LANDLOCK_CREATE_RULESET_ERRATA));
++	ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0,
++					      LANDLOCK_CREATE_RULESET_ERRATA |
++						      1 << 31));
++	ASSERT_EQ(EINVAL, errno);
++}
++
+ /* Tests ordering of syscall argument checks. */
+ TEST(create_ruleset_checks_ordering)
+ {
+-	const int last_flag = LANDLOCK_CREATE_RULESET_VERSION;
++	const int last_flag = LANDLOCK_CREATE_RULESET_ERRATA;
+ 	const int invalid_flag = last_flag << 1;
+ 	int ruleset_fd;
+ 	const struct landlock_ruleset_attr ruleset_attr = {
+diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
+index 40a2def50b837e..60afc1ce11bcd7 100644
+--- a/tools/testing/selftests/landlock/common.h
++++ b/tools/testing/selftests/landlock/common.h
+@@ -68,6 +68,7 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
+ 		CAP_MKNOD,
+ 		CAP_NET_ADMIN,
+ 		CAP_NET_BIND_SERVICE,
++		CAP_SETUID,
+ 		CAP_SYS_ADMIN,
+ 		CAP_SYS_CHROOT,
+ 		/* clang-format on */
+diff --git a/tools/testing/selftests/landlock/scoped_signal_test.c b/tools/testing/selftests/landlock/scoped_signal_test.c
+index 475ee62a832d6d..d8bf33417619f6 100644
+--- a/tools/testing/selftests/landlock/scoped_signal_test.c
++++ b/tools/testing/selftests/landlock/scoped_signal_test.c
+@@ -249,47 +249,67 @@ TEST_F(scoped_domains, check_access_signal)
+ 		_metadata->exit_code = KSFT_FAIL;
+ }
+ 
+-static int thread_pipe[2];
+-
+ enum thread_return {
+ 	THREAD_INVALID = 0,
+ 	THREAD_SUCCESS = 1,
+ 	THREAD_ERROR = 2,
++	THREAD_TEST_FAILED = 3,
+ };
+ 
+-void *thread_func(void *arg)
++static void *thread_sync(void *arg)
+ {
++	const int pipe_read = *(int *)arg;
+ 	char buf;
+ 
+-	if (read(thread_pipe[0], &buf, 1) != 1)
++	if (read(pipe_read, &buf, 1) != 1)
+ 		return (void *)THREAD_ERROR;
+ 
+ 	return (void *)THREAD_SUCCESS;
+ }
+ 
+-TEST(signal_scoping_threads)
++TEST(signal_scoping_thread_before)
+ {
+-	pthread_t no_sandbox_thread, scoped_thread;
++	pthread_t no_sandbox_thread;
+ 	enum thread_return ret = THREAD_INVALID;
++	int thread_pipe[2];
+ 
+ 	drop_caps(_metadata);
+ 	ASSERT_EQ(0, pipe2(thread_pipe, O_CLOEXEC));
+ 
+-	ASSERT_EQ(0,
+-		  pthread_create(&no_sandbox_thread, NULL, thread_func, NULL));
++	ASSERT_EQ(0, pthread_create(&no_sandbox_thread, NULL, thread_sync,
++				    &thread_pipe[0]));
+ 
+-	/* Restricts the domain after creating the first thread. */
++	/* Enforces restriction after creating the thread. */
+ 	create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+ 
+-	ASSERT_EQ(EPERM, pthread_kill(no_sandbox_thread, 0));
+-	ASSERT_EQ(1, write(thread_pipe[1], ".", 1));
+-
+-	ASSERT_EQ(0, pthread_create(&scoped_thread, NULL, thread_func, NULL));
+-	ASSERT_EQ(0, pthread_kill(scoped_thread, 0));
+-	ASSERT_EQ(1, write(thread_pipe[1], ".", 1));
++	EXPECT_EQ(0, pthread_kill(no_sandbox_thread, 0));
++	EXPECT_EQ(1, write(thread_pipe[1], ".", 1));
+ 
+ 	EXPECT_EQ(0, pthread_join(no_sandbox_thread, (void **)&ret));
+ 	EXPECT_EQ(THREAD_SUCCESS, ret);
++
++	EXPECT_EQ(0, close(thread_pipe[0]));
++	EXPECT_EQ(0, close(thread_pipe[1]));
++}
++
++TEST(signal_scoping_thread_after)
++{
++	pthread_t scoped_thread;
++	enum thread_return ret = THREAD_INVALID;
++	int thread_pipe[2];
++
++	drop_caps(_metadata);
++	ASSERT_EQ(0, pipe2(thread_pipe, O_CLOEXEC));
++
++	/* Enforces restriction before creating the thread. */
++	create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
++
++	ASSERT_EQ(0, pthread_create(&scoped_thread, NULL, thread_sync,
++				    &thread_pipe[0]));
++
++	EXPECT_EQ(0, pthread_kill(scoped_thread, 0));
++	EXPECT_EQ(1, write(thread_pipe[1], ".", 1));
++
+ 	EXPECT_EQ(0, pthread_join(scoped_thread, (void **)&ret));
+ 	EXPECT_EQ(THREAD_SUCCESS, ret);
+ 
+@@ -297,6 +317,64 @@ TEST(signal_scoping_threads)
+ 	EXPECT_EQ(0, close(thread_pipe[1]));
+ }
+ 
++struct thread_setuid_args {
++	int pipe_read, new_uid;
++};
++
++void *thread_setuid(void *ptr)
++{
++	const struct thread_setuid_args *arg = ptr;
++	char buf;
++
++	if (read(arg->pipe_read, &buf, 1) != 1)
++		return (void *)THREAD_ERROR;
++
++	/* libc's setuid() should update all thread's credentials. */
++	if (getuid() != arg->new_uid)
++		return (void *)THREAD_TEST_FAILED;
++
++	return (void *)THREAD_SUCCESS;
++}
++
++TEST(signal_scoping_thread_setuid)
++{
++	struct thread_setuid_args arg;
++	pthread_t no_sandbox_thread;
++	enum thread_return ret = THREAD_INVALID;
++	int pipe_parent[2];
++	int prev_uid;
++
++	disable_caps(_metadata);
++
++	/* This test does not need to be run as root. */
++	prev_uid = getuid();
++	arg.new_uid = prev_uid + 1;
++	EXPECT_LT(0, arg.new_uid);
++
++	ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
++	arg.pipe_read = pipe_parent[0];
++
++	/* Capabilities must be set before creating a new thread. */
++	set_cap(_metadata, CAP_SETUID);
++	ASSERT_EQ(0, pthread_create(&no_sandbox_thread, NULL, thread_setuid,
++				    &arg));
++
++	/* Enforces restriction after creating the thread. */
++	create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
++
++	EXPECT_NE(arg.new_uid, getuid());
++	EXPECT_EQ(0, setuid(arg.new_uid));
++	EXPECT_EQ(arg.new_uid, getuid());
++	EXPECT_EQ(1, write(pipe_parent[1], ".", 1));
++
++	EXPECT_EQ(0, pthread_join(no_sandbox_thread, (void **)&ret));
++	EXPECT_EQ(THREAD_SUCCESS, ret);
++
++	clear_cap(_metadata, CAP_SETUID);
++	EXPECT_EQ(0, close(pipe_parent[0]));
++	EXPECT_EQ(0, close(pipe_parent[1]));
++}
++
+ const short backlog = 10;
+ 
+ static volatile sig_atomic_t signal_received;
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index d240d02fa443a1..c83a8b47bbdfa5 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1270,7 +1270,7 @@ int main_loop(void)
+ 
+ 	if (cfg_input && cfg_sockopt_types.mptfo) {
+ 		fd_in = open(cfg_input, O_RDONLY);
+-		if (fd < 0)
++		if (fd_in < 0)
+ 			xerror("can't open %s:%d", cfg_input, errno);
+ 	}
+ 
+@@ -1293,13 +1293,13 @@ int main_loop(void)
+ 
+ 	if (cfg_input && !cfg_sockopt_types.mptfo) {
+ 		fd_in = open(cfg_input, O_RDONLY);
+-		if (fd < 0)
++		if (fd_in < 0)
+ 			xerror("can't open %s:%d", cfg_input, errno);
+ 	}
+ 
+ 	ret = copyfd_io(fd_in, fd, 1, 0, &winfo);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	if (cfg_truncate > 0) {
+ 		shutdown(fd, SHUT_WR);
+@@ -1320,7 +1320,10 @@ int main_loop(void)
+ 		close(fd);
+ 	}
+ 
+-	return 0;
++out:
++	if (cfg_input)
++		close(fd_in);
++	return ret;
+ }
+ 
+ int parse_proto(const char *proto)
+diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
+index fd6a3010afa833..1f51a4d906b877 100644
+--- a/virt/kvm/Kconfig
++++ b/virt/kvm/Kconfig
+@@ -75,7 +75,7 @@ config KVM_COMPAT
+        depends on KVM && COMPAT && !(S390 || ARM64 || RISCV)
+ 
+ config HAVE_KVM_IRQ_BYPASS
+-       bool
++       tristate
+        select IRQ_BYPASS_MANAGER
+ 
+ config HAVE_KVM_VCPU_ASYNC_IOCTL
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 6b390b622b728e..929c7980fda6a4 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -149,7 +149,7 @@ irqfd_shutdown(struct work_struct *work)
+ 	/*
+ 	 * It is now safe to release the object's resources
+ 	 */
+-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
++#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+ 	irq_bypass_unregister_consumer(&irqfd->consumer);
+ #endif
+ 	eventfd_ctx_put(irqfd->eventfd);
+@@ -274,7 +274,7 @@ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
+ 	write_seqcount_end(&irqfd->irq_entry_sc);
+ }
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
++#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+ void __attribute__((weak)) kvm_arch_irq_bypass_stop(
+ 				struct irq_bypass_consumer *cons)
+ {
+@@ -425,7 +425,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+ 	if (events & EPOLLIN)
+ 		schedule_work(&irqfd->inject);
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
++#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+ 	if (kvm_arch_has_irq_bypass()) {
+ 		irqfd->consumer.token = (void *)irqfd->eventfd;
+ 		irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
+@@ -618,14 +618,14 @@ void kvm_irq_routing_update(struct kvm *kvm)
+ 	spin_lock_irq(&kvm->irqfds.lock);
+ 
+ 	list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
+-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
++#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+ 		/* Under irqfds.lock, so can read irq_entry safely */
+ 		struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
+ #endif
+ 
+ 		irqfd_update(kvm, irqfd);
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
++#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+ 		if (irqfd->producer &&
+ 		    kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
+ 			int ret = kvm_arch_update_irqfd_routing(


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-04-10 13:50 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-04-10 13:50 UTC (permalink / raw
  To: gentoo-commits

commit:     39b5396d38f81c7ab29618580608f8a8cfce9d74
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 10 13:50:02 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 10 13:50:02 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=39b5396d

Remove redundant patch

Removed:
2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                    |  4 --
 2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch | 74 --------------------------
 2 files changed, 78 deletions(-)

diff --git a/0000_README b/0000_README
index 26583822..7e2e4141 100644
--- a/0000_README
+++ b/0000_README
@@ -155,10 +155,6 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
-Patch:  2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch
-From:   https://github.com/nbd168/wireless/commit/adc3fd2a2277b7cc0b61692463771bf9bd298036
-Desc:   wifi: mt76: mt7921: fix kernel panic due to null pointer dereference
-
 Patch:  2910_bfp-mark-get-entry-ip-as--maybe-unused.patch
 From:   https://www.spinics.net/lists/stable/msg604665.html
 Desc:   bpf: mark get_entry_ip as __maybe_unused

diff --git a/2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch b/2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch
deleted file mode 100644
index 1cc1dbf3..00000000
--- a/2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From adc3fd2a2277b7cc0b61692463771bf9bd298036 Mon Sep 17 00:00:00 2001
-From: Ming Yen Hsieh <mingyen.hsieh@mediatek.com>
-Date: Tue, 18 Feb 2025 11:33:42 +0800
-Subject: [PATCH] wifi: mt76: mt7921: fix kernel panic due to null pointer
- dereference
-
-Address a kernel panic caused by a null pointer dereference in the
-`mt792x_rx_get_wcid` function. The issue arises because the `deflink` structure
-is not properly initialized with the `sta` context. This patch ensures that the
-`deflink` structure is correctly linked to the `sta` context, preventing the
-null pointer dereference.
-
- BUG: kernel NULL pointer dereference, address: 0000000000000400
- #PF: supervisor read access in kernel mode
- #PF: error_code(0x0000) - not-present page
- PGD 0 P4D 0
- Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
- CPU: 0 UID: 0 PID: 470 Comm: mt76-usb-rx phy Not tainted 6.12.13-gentoo-dist #1
- Hardware name:  /AMD HUDSON-M1, BIOS 4.6.4 11/15/2011
- RIP: 0010:mt792x_rx_get_wcid+0x48/0x140 [mt792x_lib]
- RSP: 0018:ffffa147c055fd98 EFLAGS: 00010202
- RAX: 0000000000000000 RBX: ffff8e9ecb652000 RCX: 0000000000000000
- RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff8e9ecb652000
- RBP: 0000000000000685 R08: ffff8e9ec6570000 R09: 0000000000000000
- R10: ffff8e9ecd2ca000 R11: ffff8e9f22a217c0 R12: 0000000038010119
- R13: 0000000080843801 R14: ffff8e9ec6570000 R15: ffff8e9ecb652000
- FS:  0000000000000000(0000) GS:ffff8e9f22a00000(0000) knlGS:0000000000000000
- CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
- CR2: 0000000000000400 CR3: 000000000d2ea000 CR4: 00000000000006f0
- Call Trace:
-  <TASK>
-  ? __die_body.cold+0x19/0x27
-  ? page_fault_oops+0x15a/0x2f0
-  ? search_module_extables+0x19/0x60
-  ? search_bpf_extables+0x5f/0x80
-  ? exc_page_fault+0x7e/0x180
-  ? asm_exc_page_fault+0x26/0x30
-  ? mt792x_rx_get_wcid+0x48/0x140 [mt792x_lib]
-  mt7921_queue_rx_skb+0x1c6/0xaa0 [mt7921_common]
-  mt76u_alloc_queues+0x784/0x810 [mt76_usb]
-  ? __pfx___mt76_worker_fn+0x10/0x10 [mt76]
-  __mt76_worker_fn+0x4f/0x80 [mt76]
-  kthread+0xd2/0x100
-  ? __pfx_kthread+0x10/0x10
-  ret_from_fork+0x34/0x50
-  ? __pfx_kthread+0x10/0x10
-  ret_from_fork_asm+0x1a/0x30
-  </TASK>
- ---[ end trace 0000000000000000 ]---
-
-Reported-by: Nick Morrow <usbwifi2024@gmail.com>
-Closes: https://github.com/morrownr/USB-WiFi/issues/577
-Cc: stable@vger.kernel.org
-Fixes: 90c10286b176 ("wifi: mt76: mt7925: Update mt792x_rx_get_wcid for per-link STA")
-Signed-off-by: Ming Yen Hsieh <mingyen.hsieh@mediatek.com>
-Tested-by: Salah Coronya <salah.coronya@gmail.com>
-Link: https://patch.msgid.link/20250218033343.1999648-1-mingyen.hsieh@mediatek.com
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
- drivers/net/wireless/mediatek/mt76/mt7921/main.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
-index 13e58c328aff..78b77a54d195 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
-@@ -811,6 +811,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- 	msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
- 	msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- 	msta->deflink.last_txs = jiffies;
-+	msta->deflink.sta = msta;
- 
- 	ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
- 	if (ret)


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-04-10 13:29 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-04-10 13:29 UTC (permalink / raw
  To: gentoo-commits

commit:     ccc6a3ab6573c6ffd2c26c881967e7a52dfd73c0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 10 13:29:16 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 10 13:29:16 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ccc6a3ab

Linux patch 6.12.23

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1022_linux-6.12.23.patch | 15453 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 15457 insertions(+)

diff --git a/0000_README b/0000_README
index 696eb7c9..26583822 100644
--- a/0000_README
+++ b/0000_README
@@ -131,6 +131,10 @@ Patch:  1021_linux-6.12.22.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.22
 
+Patch:  1022_linux-6.12.23.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.23
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1022_linux-6.12.23.patch b/1022_linux-6.12.23.patch
new file mode 100644
index 00000000..932e4ca5
--- /dev/null
+++ b/1022_linux-6.12.23.patch
@@ -0,0 +1,15453 @@
+diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
+index fbfce9b4ae6b8e..71a1a399e1e1fe 100644
+--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
++++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
+@@ -581,6 +581,8 @@ patternProperties:
+     description: GlobalTop Technology, Inc.
+   "^gmt,.*":
+     description: Global Mixed-mode Technology, Inc.
++  "^gocontroll,.*":
++    description: GOcontroll Modular Embedded Electronics B.V.
+   "^goldelico,.*":
+     description: Golden Delicious Computers GmbH & Co. KG
+   "^goodix,.*":
+diff --git a/Makefile b/Makefile
+index f380005d1600ad..6a2a60eb67a3e7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 202397be76d803..d0040fb67c36f3 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -118,7 +118,7 @@ config ARM
+ 	select HAVE_KERNEL_XZ
+ 	select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
+ 	select HAVE_KRETPROBES if HAVE_KPROBES
+-	select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD)
++	select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY)
+ 	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_NMI
+ 	select HAVE_OPTPROBES if !THUMB2_KERNEL
+diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
+index d60f6e83a9f700..14811b4f48ec8a 100644
+--- a/arch/arm/include/asm/vmlinux.lds.h
++++ b/arch/arm/include/asm/vmlinux.lds.h
+@@ -34,6 +34,12 @@
+ #define NOCROSSREFS
+ #endif
+ 
++#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY
++#define OVERLAY_KEEP(x)		KEEP(x)
++#else
++#define OVERLAY_KEEP(x)		x
++#endif
++
+ /* Set start/end symbol names to the LMA for the section */
+ #define ARM_LMA(sym, section)						\
+ 	sym##_start = LOADADDR(section);				\
+@@ -125,13 +131,13 @@
+ 	__vectors_lma = .;						\
+ 	OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {		\
+ 		.vectors {						\
+-			*(.vectors)					\
++			OVERLAY_KEEP(*(.vectors))			\
+ 		}							\
+ 		.vectors.bhb.loop8 {					\
+-			*(.vectors.bhb.loop8)				\
++			OVERLAY_KEEP(*(.vectors.bhb.loop8))		\
+ 		}							\
+ 		.vectors.bhb.bpiall {					\
+-			*(.vectors.bhb.bpiall)				\
++			OVERLAY_KEEP(*(.vectors.bhb.bpiall))		\
+ 		}							\
+ 	}								\
+ 	ARM_LMA(__vectors, .vectors);					\
+diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
+index deff21bfa6800c..b68e1d328d4cb9 100644
+--- a/arch/arm64/kernel/compat_alignment.c
++++ b/arch/arm64/kernel/compat_alignment.c
+@@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
+ 		return 1;
+ 	}
+ 
++	if (!handler)
++		return 1;
+ 	type = handler(addr, instr, regs);
+ 
+ 	if (type == TYPE_ERROR || type == TYPE_FAULT)
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index d9fce0fd475a04..fe9f895138dba5 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -375,8 +375,8 @@ config CMDLINE_BOOTLOADER
+ config CMDLINE_EXTEND
+ 	bool "Use built-in to extend bootloader kernel arguments"
+ 	help
+-	  The command-line arguments provided during boot will be
+-	  appended to the built-in command line. This is useful in
++	  The built-in command line will be appended to the command-
++	  line arguments provided during boot. This is useful in
+ 	  cases where the provided arguments are insufficient and
+ 	  you don't want to or cannot modify them.
+ 
+diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
+index 1b6d0961719989..aa622c75441442 100644
+--- a/arch/loongarch/include/asm/cache.h
++++ b/arch/loongarch/include/asm/cache.h
+@@ -8,6 +8,8 @@
+ #define L1_CACHE_SHIFT		CONFIG_L1_CACHE_SHIFT
+ #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+ 
++#define ARCH_DMA_MINALIGN	(16)
++
+ #define __read_mostly __section(".data..read_mostly")
+ 
+ #endif /* _ASM_CACHE_H */
+diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
+index 9c2ca785faa9bd..b2915fd5386209 100644
+--- a/arch/loongarch/include/asm/irq.h
++++ b/arch/loongarch/include/asm/irq.h
+@@ -53,7 +53,7 @@ void spurious_interrupt(void);
+ #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
+ 
+-#define MAX_IO_PICS 2
++#define MAX_IO_PICS 8
+ #define NR_IRQS	(64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
+ 
+ struct acpi_vector_group {
+diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
+index f23adb15f418fb..fc8b64773794a9 100644
+--- a/arch/loongarch/include/asm/stacktrace.h
++++ b/arch/loongarch/include/asm/stacktrace.h
+@@ -8,6 +8,7 @@
+ #include <asm/asm.h>
+ #include <asm/ptrace.h>
+ #include <asm/loongarch.h>
++#include <asm/unwind_hints.h>
+ #include <linux/stringify.h>
+ 
+ enum stack_type {
+@@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
+ static __always_inline void prepare_frametrace(struct pt_regs *regs)
+ {
+ 	__asm__ __volatile__(
++		UNWIND_HINT_SAVE
+ 		/* Save $ra */
+ 		STORE_ONE_REG(1)
+ 		/* Use $ra to save PC */
+@@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
+ 		STORE_ONE_REG(29)
+ 		STORE_ONE_REG(30)
+ 		STORE_ONE_REG(31)
++		UNWIND_HINT_RESTORE
+ 		: "=m" (regs->csr_era)
+ 		: "r" (regs->regs)
+ 		: "memory");
+diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h
+index a01086ad9ddea4..2c68bc72736c95 100644
+--- a/arch/loongarch/include/asm/unwind_hints.h
++++ b/arch/loongarch/include/asm/unwind_hints.h
+@@ -23,6 +23,14 @@
+ 	UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
+ .endm
+ 
+-#endif /* __ASSEMBLY__ */
++#else /* !__ASSEMBLY__ */
++
++#define UNWIND_HINT_SAVE \
++	UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
++
++#define UNWIND_HINT_RESTORE \
++	UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
++
++#endif /* !__ASSEMBLY__ */
+ 
+ #endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
+diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
+index 2f1f5b08638f81..27144de5c5fe4f 100644
+--- a/arch/loongarch/kernel/env.c
++++ b/arch/loongarch/kernel/env.c
+@@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void)
+ 		return -ENODEV;
+ 
+ 	clk = of_clk_get(np, 0);
++	of_node_put(np);
++
+ 	if (IS_ERR(clk))
+ 		return -ENODEV;
+ 
+diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
+index 445c452d72a79c..7be5b4c0c90020 100644
+--- a/arch/loongarch/kernel/kgdb.c
++++ b/arch/loongarch/kernel/kgdb.c
+@@ -8,6 +8,7 @@
+ #include <linux/hw_breakpoint.h>
+ #include <linux/kdebug.h>
+ #include <linux/kgdb.h>
++#include <linux/objtool.h>
+ #include <linux/processor.h>
+ #include <linux/ptrace.h>
+ #include <linux/sched.h>
+@@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+ 	regs->csr_era = pc;
+ }
+ 
+-void arch_kgdb_breakpoint(void)
++noinline void arch_kgdb_breakpoint(void)
+ {
+ 	__asm__ __volatile__ (			\
+ 		".globl kgdb_breakinst\n\t"	\
+-		"nop\n"				\
+ 		"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
+ }
++STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
+ 
+ /*
+  * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index ea357a3edc0943..fa1500d4aa3e3a 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx)
+ 	 */
+ 	if (seen_tail_call(ctx) && seen_call(ctx))
+ 		move_reg(ctx, TCC_SAVED, REG_TCC);
++	else
++		emit_insn(ctx, nop);
+ 
+ 	ctx->stack_size = stack_adjust;
+ }
+@@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 
+ 		move_addr(ctx, t1, func_addr);
+ 		emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
+-		move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
++
++		if (insn->src_reg != BPF_PSEUDO_CALL)
++			move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
++
+ 		break;
+ 
+ 	/* tail call */
+@@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 	{
+ 		const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+ 
+-		move_imm(ctx, dst, imm64, is32);
++		if (bpf_pseudo_func(insn))
++			move_addr(ctx, dst, imm64);
++		else
++			move_imm(ctx, dst, imm64, is32);
+ 		return 1;
+ 	}
+ 
+diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
+index 68586338ecf859..f9c569f5394914 100644
+--- a/arch/loongarch/net/bpf_jit.h
++++ b/arch/loongarch/net/bpf_jit.h
+@@ -27,6 +27,11 @@ struct jit_data {
+ 	struct jit_ctx ctx;
+ };
+ 
++static inline void emit_nop(union loongarch_instruction *insn)
++{
++	insn->word = INSN_NOP;
++}
++
+ #define emit_insn(ctx, func, ...)						\
+ do {										\
+ 	if (ctx->image != NULL) {						\
+diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
+index 77306be62e9ee8..129355f87f80fc 100644
+--- a/arch/powerpc/configs/mpc885_ads_defconfig
++++ b/arch/powerpc/configs/mpc885_ads_defconfig
+@@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y
+ CONFIG_DETECT_HUNG_TASK=y
+ CONFIG_BDI_SWITCH=y
+ CONFIG_PPC_EARLY_DEBUG=y
+-CONFIG_GENERIC_PTDUMP=y
++CONFIG_PTDUMP_DEBUGFS=y
+diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
+index 59808592f0a1b5..1e52b02d8943b3 100644
+--- a/arch/powerpc/crypto/Makefile
++++ b/arch/powerpc/crypto/Makefile
+@@ -56,3 +56,4 @@ $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
+ OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
+ OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
+ OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
++OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
+diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S
+index 104c9911f40611..dd86e338307d3f 100644
+--- a/arch/powerpc/kexec/relocate_32.S
++++ b/arch/powerpc/kexec/relocate_32.S
+@@ -348,16 +348,13 @@ write_utlb:
+ 	rlwinm	r10, r24, 0, 22, 27
+ 
+ 	cmpwi	r10, PPC47x_TLB0_4K
+-	bne	0f
+ 	li	r10, 0x1000			/* r10 = 4k */
+-	ANNOTATE_INTRA_FUNCTION_CALL
+-	bl	1f
++	beq	0f
+ 
+-0:
+ 	/* Defaults to 256M */
+ 	lis	r10, 0x1000
+ 
+-	bcl	20,31,$+4
++0:	bcl	20,31,$+4
+ 1:	mflr	r4
+ 	addi	r4, r4, (2f-1b)			/* virtual address  of 2f */
+ 
+diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
+index 827d338deaf4c6..2c2999de6bfa25 100644
+--- a/arch/powerpc/platforms/cell/spufs/gang.c
++++ b/arch/powerpc/platforms/cell/spufs/gang.c
+@@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void)
+ 	mutex_init(&gang->aff_mutex);
+ 	INIT_LIST_HEAD(&gang->list);
+ 	INIT_LIST_HEAD(&gang->aff_list_head);
++	gang->alive = 1;
+ 
+ out:
+ 	return gang;
+diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
+index 70236d1df3d3e0..9f9e4b87162782 100644
+--- a/arch/powerpc/platforms/cell/spufs/inode.c
++++ b/arch/powerpc/platforms/cell/spufs/inode.c
+@@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir,
+ 			return -ENOMEM;
+ 		ret = spufs_new_file(dir->d_sb, dentry, files->ops,
+ 					files->mode & mode, files->size, ctx);
+-		if (ret)
++		if (ret) {
++			dput(dentry);
+ 			return ret;
++		}
+ 		files++;
+ 	}
+ 	return 0;
+ }
+ 
++static void unuse_gang(struct dentry *dir)
++{
++	struct inode *inode = dir->d_inode;
++	struct spu_gang *gang = SPUFS_I(inode)->i_gang;
++
++	if (gang) {
++		bool dead;
++
++		inode_lock(inode); // exclusion with spufs_create_context()
++		dead = !--gang->alive;
++		inode_unlock(inode);
++
++		if (dead)
++			simple_recursive_removal(dir, NULL);
++	}
++}
++
+ static int spufs_dir_close(struct inode *inode, struct file *file)
+ {
+ 	struct inode *parent;
+@@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
+ 	inode_unlock(parent);
+ 	WARN_ON(ret);
+ 
++	unuse_gang(dir->d_parent);
+ 	return dcache_dir_close(inode, file);
+ }
+ 
+@@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
+ {
+ 	int ret;
+ 	int affinity;
+-	struct spu_gang *gang;
++	struct spu_gang *gang = SPUFS_I(inode)->i_gang;
+ 	struct spu_context *neighbor;
+ 	struct path path = {.mnt = mnt, .dentry = dentry};
+ 
+@@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
+ 	if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
+ 		return -ENODEV;
+ 
+-	gang = NULL;
++	if (gang) {
++		if (!gang->alive)
++			return -ENOENT;
++		gang->alive++;
++	}
++
+ 	neighbor = NULL;
+ 	affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
+ 	if (affinity) {
+-		gang = SPUFS_I(inode)->i_gang;
+ 		if (!gang)
+ 			return -EINVAL;
+ 		mutex_lock(&gang->aff_mutex);
+@@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
+ 	}
+ 
+ 	ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
+-	if (ret)
++	if (ret) {
++		if (neighbor)
++			put_spu_context(neighbor);
+ 		goto out_aff_unlock;
++	}
+ 
+ 	if (affinity) {
+ 		spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
+@@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
+ out_aff_unlock:
+ 	if (affinity)
+ 		mutex_unlock(&gang->aff_mutex);
++	if (ret && gang)
++		gang->alive--; // can't reach 0
+ 	return ret;
+ }
+ 
+@@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 	inode->i_fop = &simple_dir_operations;
+ 
+ 	d_instantiate(dentry, inode);
++	dget(dentry);
+ 	inc_nlink(dir);
+ 	inc_nlink(d_inode(dentry));
+ 	return ret;
+@@ -492,6 +522,21 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 	return ret;
+ }
+ 
++static int spufs_gang_close(struct inode *inode, struct file *file)
++{
++	unuse_gang(file->f_path.dentry);
++	return dcache_dir_close(inode, file);
++}
++
++static const struct file_operations spufs_gang_fops = {
++	.open		= dcache_dir_open,
++	.release	= spufs_gang_close,
++	.llseek		= dcache_dir_lseek,
++	.read		= generic_read_dir,
++	.iterate_shared	= dcache_readdir,
++	.fsync		= noop_fsync,
++};
++
+ static int spufs_gang_open(const struct path *path)
+ {
+ 	int ret;
+@@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path)
+ 		return PTR_ERR(filp);
+ 	}
+ 
+-	filp->f_op = &simple_dir_operations;
++	filp->f_op = &spufs_gang_fops;
+ 	fd_install(ret, filp);
+ 	return ret;
+ }
+@@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode,
+ 	ret = spufs_mkgang(inode, dentry, mode & 0777);
+ 	if (!ret) {
+ 		ret = spufs_gang_open(&path);
+-		if (ret < 0) {
+-			int err = simple_rmdir(inode, dentry);
+-			WARN_ON(err);
+-		}
++		if (ret < 0)
++			unuse_gang(dentry);
+ 	}
+ 	return ret;
+ }
+diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
+index 84958487f696a4..d33787c57c39a2 100644
+--- a/arch/powerpc/platforms/cell/spufs/spufs.h
++++ b/arch/powerpc/platforms/cell/spufs/spufs.h
+@@ -151,6 +151,8 @@ struct spu_gang {
+ 	int aff_flags;
+ 	struct spu *aff_ref_spu;
+ 	atomic_t aff_sched_count;
++
++	int alive;
+ };
+ 
+ /* Flag bits for spu_gang aff_flags */
+diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile
+index f0da9d7b39c374..bc6c77ba837d2d 100644
+--- a/arch/riscv/errata/Makefile
++++ b/arch/riscv/errata/Makefile
+@@ -1,5 +1,9 @@
+ ifdef CONFIG_RELOCATABLE
+-KBUILD_CFLAGS += -fno-pie
++# We can't use PIC/PIE when handling early-boot errata parsing, as the kernel
++# doesn't have a GOT setup at that point.  So instead just use medany: it's
++# usually position-independent, so it should be good enough for the errata
++# handling.
++KBUILD_CFLAGS += -fno-pie -mcmodel=medany
+ endif
+ 
+ ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index 2cddd79ff21b1e..f253c8dae878ef 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -92,7 +92,7 @@ struct dyn_arch_ftrace {
+ #define make_call_t0(caller, callee, call)				\
+ do {									\
+ 	unsigned int offset =						\
+-		(unsigned long) callee - (unsigned long) caller;	\
++		(unsigned long) (callee) - (unsigned long) (caller);	\
+ 	call[0] = to_auipc_t0(offset);					\
+ 	call[1] = to_jalr_t0(offset);					\
+ } while (0)
+@@ -108,7 +108,7 @@ do {									\
+ #define make_call_ra(caller, callee, call)				\
+ do {									\
+ 	unsigned int offset =						\
+-		(unsigned long) callee - (unsigned long) caller;	\
++		(unsigned long) (callee) - (unsigned long) (caller);	\
+ 	call[0] = to_auipc_ra(offset);					\
+ 	call[1] = to_jalr_ra(offset);					\
+ } while (0)
+diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
+index 3c37661801f95d..e783a72d051f43 100644
+--- a/arch/riscv/kernel/elf_kexec.c
++++ b/arch/riscv/kernel/elf_kexec.c
+@@ -468,6 +468,9 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ 		case R_RISCV_ALIGN:
+ 		case R_RISCV_RELAX:
+ 			break;
++		case R_RISCV_64:
++			*(u64 *)loc = val;
++			break;
+ 		default:
+ 			pr_err("Unknown rela relocation: %d\n", r_type);
+ 			return -ENOEXEC;
+diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
+index 2707a51b082ca7..78ac3216a54ddb 100644
+--- a/arch/riscv/kvm/vcpu_pmu.c
++++ b/arch/riscv/kvm/vcpu_pmu.c
+@@ -666,6 +666,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
+ 		.type = etype,
+ 		.size = sizeof(struct perf_event_attr),
+ 		.pinned = true,
++		.disabled = true,
+ 		/*
+ 		 * It should never reach here if the platform doesn't support the sscofpmf
+ 		 * extension as mode filtering won't work without it.
+diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
+index b4a78a4b35cff5..375dd96bb4a0d2 100644
+--- a/arch/riscv/mm/hugetlbpage.c
++++ b/arch/riscv/mm/hugetlbpage.c
+@@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
+ static pte_t get_clear_contig(struct mm_struct *mm,
+ 			      unsigned long addr,
+ 			      pte_t *ptep,
+-			      unsigned long pte_num)
++			      unsigned long ncontig)
+ {
+-	pte_t orig_pte = ptep_get(ptep);
+-	unsigned long i;
+-
+-	for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) {
+-		pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+-
+-		if (pte_dirty(pte))
+-			orig_pte = pte_mkdirty(orig_pte);
+-
+-		if (pte_young(pte))
+-			orig_pte = pte_mkyoung(orig_pte);
++	pte_t pte, tmp_pte;
++	bool present;
++
++	pte = ptep_get_and_clear(mm, addr, ptep);
++	present = pte_present(pte);
++	while (--ncontig) {
++		ptep++;
++		addr += PAGE_SIZE;
++		tmp_pte = ptep_get_and_clear(mm, addr, ptep);
++		if (present) {
++			if (pte_dirty(tmp_pte))
++				pte = pte_mkdirty(pte);
++			if (pte_young(tmp_pte))
++				pte = pte_mkyoung(pte);
++		}
+ 	}
+-
+-	return orig_pte;
++	return pte;
+ }
+ 
+ static pte_t get_clear_contig_flush(struct mm_struct *mm,
+@@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm,
+ 	flush_tlb_range(&vma, saddr, addr);
+ }
+ 
++static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize)
++{
++	unsigned long hugepage_shift;
++
++	if (sz >= PGDIR_SIZE)
++		hugepage_shift = PGDIR_SHIFT;
++	else if (sz >= P4D_SIZE)
++		hugepage_shift = P4D_SHIFT;
++	else if (sz >= PUD_SIZE)
++		hugepage_shift = PUD_SHIFT;
++	else if (sz >= PMD_SIZE)
++		hugepage_shift = PMD_SHIFT;
++	else
++		hugepage_shift = PAGE_SHIFT;
++
++	*pgsize = 1 << hugepage_shift;
++
++	return sz >> hugepage_shift;
++}
++
+ /*
+  * When dealing with NAPOT mappings, the privileged specification indicates that
+  * "if an update needs to be made, the OS generally should first mark all of the
+@@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm,
+ 		     pte_t pte,
+ 		     unsigned long sz)
+ {
+-	unsigned long hugepage_shift, pgsize;
++	size_t pgsize;
+ 	int i, pte_num;
+ 
+-	if (sz >= PGDIR_SIZE)
+-		hugepage_shift = PGDIR_SHIFT;
+-	else if (sz >= P4D_SIZE)
+-		hugepage_shift = P4D_SHIFT;
+-	else if (sz >= PUD_SIZE)
+-		hugepage_shift = PUD_SHIFT;
+-	else if (sz >= PMD_SIZE)
+-		hugepage_shift = PMD_SHIFT;
+-	else
+-		hugepage_shift = PAGE_SHIFT;
+-
+-	pte_num = sz >> hugepage_shift;
+-	pgsize = 1 << hugepage_shift;
++	pte_num = num_contig_ptes_from_size(sz, &pgsize);
+ 
+ 	if (!pte_present(pte)) {
+ 		for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
+@@ -295,13 +306,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ 			      unsigned long addr,
+ 			      pte_t *ptep, unsigned long sz)
+ {
++	size_t pgsize;
+ 	pte_t orig_pte = ptep_get(ptep);
+ 	int pte_num;
+ 
+ 	if (!pte_napot(orig_pte))
+ 		return ptep_get_and_clear(mm, addr, ptep);
+ 
+-	pte_num = napot_pte_num(napot_cont_order(orig_pte));
++	pte_num = num_contig_ptes_from_size(sz, &pgsize);
+ 
+ 	return get_clear_contig(mm, addr, ptep, pte_num);
+ }
+@@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm,
+ 		    pte_t *ptep,
+ 		    unsigned long sz)
+ {
++	size_t pgsize;
+ 	pte_t pte = ptep_get(ptep);
+ 	int i, pte_num;
+ 
+@@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm,
+ 		return;
+ 	}
+ 
+-	pte_num = napot_pte_num(napot_cont_order(pte));
+-	for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
++	pte_num = num_contig_ptes_from_size(sz, &pgsize);
++
++	for (i = 0; i < pte_num; i++, addr += pgsize, ptep++)
+ 		pte_clear(mm, addr, ptep);
+ }
+ 
+diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
+index 0e6ca6d5ae4b41..c5db2f072c341a 100644
+--- a/arch/riscv/purgatory/entry.S
++++ b/arch/riscv/purgatory/entry.S
+@@ -12,6 +12,7 @@
+ 
+ .text
+ 
++.align	2
+ SYM_CODE_START(purgatory_start)
+ 
+ 	lla	sp, .Lstack
+diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
+index fc9933a743d692..251e0372ccbd0a 100644
+--- a/arch/s390/include/asm/io.h
++++ b/arch/s390/include/asm/io.h
+@@ -34,8 +34,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
+ 
+ #define ioremap_wc(addr, size)  \
+ 	ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
+-#define ioremap_wt(addr, size)  \
+-	ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL)))
+ 
+ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+ {
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 0ffbaf7419558b..5ee73f245a0c0b 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1365,9 +1365,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
+ #define pgprot_writecombine	pgprot_writecombine
+ pgprot_t pgprot_writecombine(pgprot_t prot);
+ 
+-#define pgprot_writethrough	pgprot_writethrough
+-pgprot_t pgprot_writethrough(pgprot_t prot);
+-
+ #define PFN_PTE_SHIFT		PAGE_SHIFT
+ 
+ /*
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 594da4cba707a6..a7de838f803189 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -501,7 +501,7 @@ SYM_CODE_START(mcck_int_handler)
+ 	clgrjl	%r9,%r14, 4f
+ 	larl	%r14,.Lsie_leave
+ 	clgrjhe	%r9,%r14, 4f
+-	lg	%r10,__LC_PCPU
++	lg	%r10,__LC_PCPU(%r13)
+ 	oi	__PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
+ 4:	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
+ 	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 2c944bafb0309c..b03c665d72426a 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -34,16 +34,6 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
+ }
+ EXPORT_SYMBOL_GPL(pgprot_writecombine);
+ 
+-pgprot_t pgprot_writethrough(pgprot_t prot)
+-{
+-	/*
+-	 * mio_wb_bit_mask may be set on a different CPU, but it is only set
+-	 * once at init and only read afterwards.
+-	 */
+-	return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
+-}
+-EXPORT_SYMBOL_GPL(pgprot_writethrough);
+-
+ static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
+ 				   pte_t *ptep, int nodat)
+ {
+diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
+index 9a039d6f1f7483..77a8593f219a13 100644
+--- a/arch/um/include/shared/os.h
++++ b/arch/um/include/shared/os.h
+@@ -218,7 +218,6 @@ extern int os_protect_memory(void *addr, unsigned long len,
+ extern int os_unmap_memory(void *addr, int len);
+ extern int os_drop_memory(void *addr, int length);
+ extern int can_drop_memory(void);
+-extern int os_mincore(void *addr, unsigned long len);
+ 
+ /* execvp.c */
+ extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
+diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
+index f8567b933ffaa9..4df1cd0d20179e 100644
+--- a/arch/um/kernel/Makefile
++++ b/arch/um/kernel/Makefile
+@@ -17,7 +17,7 @@ extra-y := vmlinux.lds
+ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
+ 	physmem.o process.o ptrace.o reboot.o sigio.o \
+ 	signal.o sysrq.o time.o tlb.o trap.o \
+-	um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/
++	um_arch.o umid.o kmsg_dump.o capflags.o skas/
+ obj-y += load_file.o
+ 
+ obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
+diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
+deleted file mode 100644
+index 8ccd56813f684f..00000000000000
+--- a/arch/um/kernel/maccess.c
++++ /dev/null
+@@ -1,19 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
+- */
+-
+-#include <linux/uaccess.h>
+-#include <linux/kernel.h>
+-#include <os.h>
+-
+-bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
+-{
+-	void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
+-
+-	if ((unsigned long)src < PAGE_SIZE || size <= 0)
+-		return false;
+-	if (os_mincore(psrc, size + src - psrc) <= 0)
+-		return false;
+-	return true;
+-}
+diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
+index e52dd37ddadccc..2686120ab2325a 100644
+--- a/arch/um/os-Linux/process.c
++++ b/arch/um/os-Linux/process.c
+@@ -223,57 +223,6 @@ int __init can_drop_memory(void)
+ 	return ok;
+ }
+ 
+-static int os_page_mincore(void *addr)
+-{
+-	char vec[2];
+-	int ret;
+-
+-	ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
+-	if (ret < 0) {
+-		if (errno == ENOMEM || errno == EINVAL)
+-			return 0;
+-		else
+-			return -errno;
+-	}
+-
+-	return vec[0] & 1;
+-}
+-
+-int os_mincore(void *addr, unsigned long len)
+-{
+-	char *vec;
+-	int ret, i;
+-
+-	if (len <= UM_KERN_PAGE_SIZE)
+-		return os_page_mincore(addr);
+-
+-	vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
+-	if (!vec)
+-		return -ENOMEM;
+-
+-	ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
+-	if (ret < 0) {
+-		if (errno == ENOMEM || errno == EINVAL)
+-			ret = 0;
+-		else
+-			ret = -errno;
+-
+-		goto out;
+-	}
+-
+-	for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
+-		if (!(vec[i] & 1)) {
+-			ret = 0;
+-			goto out;
+-		}
+-	}
+-
+-	ret = 1;
+-out:
+-	free(vec);
+-	return ret;
+-}
+-
+ void init_new_thread_signals(void)
+ {
+ 	set_handler(SIGSEGV);
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 6f8e9af827e0c9..db38d2b9b78868 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -226,7 +226,7 @@ config X86
+ 	select HAVE_SAMPLE_FTRACE_DIRECT_MULTI	if X86_64
+ 	select HAVE_EBPF_JIT
+ 	select HAVE_EFFICIENT_UNALIGNED_ACCESS
+-	select HAVE_EISA
++	select HAVE_EISA			if X86_32
+ 	select HAVE_EXIT_THREAD
+ 	select HAVE_GUP_FAST
+ 	select HAVE_FENTRY			if X86_64 || DYNAMIC_FTRACE
+@@ -894,6 +894,7 @@ config INTEL_TDX_GUEST
+ 	depends on X86_64 && CPU_SUP_INTEL
+ 	depends on X86_X2APIC
+ 	depends on EFI_STUB
++	depends on PARAVIRT
+ 	select ARCH_HAS_CC_PLATFORM
+ 	select X86_MEM_ENCRYPT
+ 	select X86_MCE
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 2a7279d80460a8..42e6a40876ea4c 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -368,7 +368,7 @@ config X86_HAVE_PAE
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7
++	depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 || MGEODEGX1 || MGEODE_LX
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
+index a46b1397ad01c2..c86cbd9cbba38f 100644
+--- a/arch/x86/Makefile.um
++++ b/arch/x86/Makefile.um
+@@ -7,12 +7,13 @@ core-y += arch/x86/crypto/
+ # GCC versions < 11. See:
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
+ #
+-ifeq ($(CONFIG_CC_IS_CLANG),y)
+-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
+-KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
++ifeq ($(call gcc-min-version, 110000)$(CONFIG_CC_IS_CLANG),y)
++KBUILD_CFLAGS +=  -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
+ KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
+ endif
+ 
++KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
++
+ ifeq ($(CONFIG_X86_32),y)
+ START := 0x8048000
+ 
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index 2f85ed005c42f1..b8aeb3ac7d28b7 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -14,6 +14,7 @@
+ #include <asm/ia32.h>
+ #include <asm/insn.h>
+ #include <asm/insn-eval.h>
++#include <asm/paravirt_types.h>
+ #include <asm/pgtable.h>
+ #include <asm/set_memory.h>
+ #include <asm/traps.h>
+@@ -359,7 +360,7 @@ static int handle_halt(struct ve_info *ve)
+ 	return ve_instr_len(ve);
+ }
+ 
+-void __cpuidle tdx_safe_halt(void)
++void __cpuidle tdx_halt(void)
+ {
+ 	const bool irq_disabled = false;
+ 
+@@ -370,6 +371,16 @@ void __cpuidle tdx_safe_halt(void)
+ 		WARN_ONCE(1, "HLT instruction emulation failed\n");
+ }
+ 
++static void __cpuidle tdx_safe_halt(void)
++{
++	tdx_halt();
++	/*
++	 * "__cpuidle" section doesn't support instrumentation, so stick
++	 * with raw_* variant that avoids tracing hooks.
++	 */
++	raw_local_irq_enable();
++}
++
+ static int read_msr(struct pt_regs *regs, struct ve_info *ve)
+ {
+ 	struct tdx_module_args args = {
+@@ -1056,6 +1067,19 @@ void __init tdx_early_init(void)
+ 	x86_platform.guest.enc_kexec_begin	     = tdx_kexec_begin;
+ 	x86_platform.guest.enc_kexec_finish	     = tdx_kexec_finish;
+ 
++	/*
++	 * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
++	 * will enable interrupts before HLT TDCALL invocation if executed
++	 * in STI-shadow, possibly resulting in missed wakeup events.
++	 *
++	 * Modify all possible HLT execution paths to use TDX specific routines
++	 * that directly execute TDCALL and toggle the interrupt state as
++	 * needed after TDCALL completion. This also reduces HLT related #VEs
++	 * in addition to having a reliable halt logic execution.
++	 */
++	pv_ops.irq.safe_halt = tdx_safe_halt;
++	pv_ops.irq.halt = tdx_halt;
++
+ 	/*
+ 	 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
+ 	 * bringup low level code. That raises #VE which cannot be handled
+diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
+index ea81770629eea6..626a81c6015bda 100644
+--- a/arch/x86/entry/calling.h
++++ b/arch/x86/entry/calling.h
+@@ -70,6 +70,8 @@ For 32-bit we have the following conventions - kernel is built with
+ 	pushq	%rsi		/* pt_regs->si */
+ 	movq	8(%rsp), %rsi	/* temporarily store the return address in %rsi */
+ 	movq	%rdi, 8(%rsp)	/* pt_regs->di (overwriting original return address) */
++	/* We just clobbered the return address - use the IRET frame for unwinding: */
++	UNWIND_HINT_IRET_REGS offset=3*8
+ 	.else
+ 	pushq   %rdi		/* pt_regs->di */
+ 	pushq   %rsi		/* pt_regs->si */
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 94941c5a10ac10..51efd2da4d7fdd 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -142,7 +142,7 @@ static __always_inline int syscall_32_enter(struct pt_regs *regs)
+ #ifdef CONFIG_IA32_EMULATION
+ bool __ia32_enabled __ro_after_init = !IS_ENABLED(CONFIG_IA32_EMULATION_DEFAULT_DISABLED);
+ 
+-static int ia32_emulation_override_cmdline(char *arg)
++static int __init ia32_emulation_override_cmdline(char *arg)
+ {
+ 	return kstrtobool(arg, &__ia32_enabled);
+ }
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 3a68b3e0b7a358..f86e47afd56099 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2779,28 +2779,33 @@ static u64 icl_update_topdown_event(struct perf_event *event)
+ 
+ DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
+ 
+-static void intel_pmu_read_topdown_event(struct perf_event *event)
++static void intel_pmu_read_event(struct perf_event *event)
+ {
+-	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++	if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) {
++		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++		bool pmu_enabled = cpuc->enabled;
+ 
+-	/* Only need to call update_topdown_event() once for group read. */
+-	if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
+-	    !is_slots_event(event))
+-		return;
++		/* Only need to call update_topdown_event() once for group read. */
++		if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
++			return;
+ 
+-	perf_pmu_disable(event->pmu);
+-	static_call(intel_pmu_update_topdown_event)(event);
+-	perf_pmu_enable(event->pmu);
+-}
++		cpuc->enabled = 0;
++		if (pmu_enabled)
++			intel_pmu_disable_all();
+ 
+-static void intel_pmu_read_event(struct perf_event *event)
+-{
+-	if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+-		intel_pmu_auto_reload_read(event);
+-	else if (is_topdown_count(event))
+-		intel_pmu_read_topdown_event(event);
+-	else
+-		x86_perf_event_update(event);
++		if (is_topdown_event(event))
++			static_call(intel_pmu_update_topdown_event)(event);
++		else
++			intel_pmu_drain_pebs_buffer();
++
++		cpuc->enabled = pmu_enabled;
++		if (pmu_enabled)
++			intel_pmu_enable_all(0);
++
++		return;
++	}
++
++	x86_perf_event_update(event);
+ }
+ 
+ static void intel_pmu_enable_fixed(struct perf_event *event)
+@@ -3067,7 +3072,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
+ 
+ 		handled++;
+ 		x86_pmu_handle_guest_pebs(regs, &data);
+-		x86_pmu.drain_pebs(regs, &data);
++		static_call(x86_pmu_drain_pebs)(regs, &data);
+ 		status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
+ 
+ 		/*
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index c07ca43e67e7f1..1617aa3efd68b1 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -932,11 +932,11 @@ int intel_pmu_drain_bts_buffer(void)
+ 	return 1;
+ }
+ 
+-static inline void intel_pmu_drain_pebs_buffer(void)
++void intel_pmu_drain_pebs_buffer(void)
+ {
+ 	struct perf_sample_data data;
+ 
+-	x86_pmu.drain_pebs(NULL, &data);
++	static_call(x86_pmu_drain_pebs)(NULL, &data);
+ }
+ 
+ /*
+@@ -2079,15 +2079,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
+ 	return NULL;
+ }
+ 
+-void intel_pmu_auto_reload_read(struct perf_event *event)
+-{
+-	WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
+-
+-	perf_pmu_disable(event->pmu);
+-	intel_pmu_drain_pebs_buffer();
+-	perf_pmu_enable(event->pmu);
+-}
+-
+ /*
+  * Special variant of intel_pmu_save_and_restart() for auto-reload.
+  */
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index ac1182141bf67f..8c616656391ec4 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -1092,6 +1092,7 @@ extern struct x86_pmu x86_pmu __read_mostly;
+ 
+ DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
+ DECLARE_STATIC_CALL(x86_pmu_update,     *x86_pmu.update);
++DECLARE_STATIC_CALL(x86_pmu_drain_pebs,	*x86_pmu.drain_pebs);
+ 
+ static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
+ {
+@@ -1626,7 +1627,7 @@ void intel_pmu_pebs_disable_all(void);
+ 
+ void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
+ 
+-void intel_pmu_auto_reload_read(struct perf_event *event);
++void intel_pmu_drain_pebs_buffer(void);
+ 
+ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
+ 
+diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
+index 04775346369c59..d04ccd4b3b4af0 100644
+--- a/arch/x86/hyperv/hv_vtl.c
++++ b/arch/x86/hyperv/hv_vtl.c
+@@ -30,6 +30,7 @@ void __init hv_vtl_init_platform(void)
+ 	x86_platform.realmode_init = x86_init_noop;
+ 	x86_init.irqs.pre_vector_init = x86_init_noop;
+ 	x86_init.timers.timer_init = x86_init_noop;
++	x86_init.resources.probe_roms = x86_init_noop;
+ 
+ 	/* Avoid searching for BIOS MP tables */
+ 	x86_init.mpparse.find_mptable = x86_init_noop;
+diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
+index 60fc3ed728304c..4065f5ef3ae08e 100644
+--- a/arch/x86/hyperv/ivm.c
++++ b/arch/x86/hyperv/ivm.c
+@@ -339,7 +339,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
+ 	vmsa->sev_features = sev_status >> 2;
+ 
+ 	ret = snp_set_vmsa(vmsa, true);
+-	if (!ret) {
++	if (ret) {
+ 		pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
+ 		free_page((u64)vmsa);
+ 		return ret;
+@@ -465,7 +465,6 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
+ 			   enum hv_mem_host_visibility visibility)
+ {
+ 	struct hv_gpa_range_for_visibility *input;
+-	u16 pages_processed;
+ 	u64 hv_status;
+ 	unsigned long flags;
+ 
+@@ -494,7 +493,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
+ 	memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
+ 	hv_status = hv_do_rep_hypercall(
+ 			HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
+-			0, input, &pages_processed);
++			0, input, NULL);
+ 	local_irq_restore(flags);
+ 
+ 	if (hv_result_success(hv_status))
+diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
+index eba178996d8459..b5b63329406137 100644
+--- a/arch/x86/include/asm/tdx.h
++++ b/arch/x86/include/asm/tdx.h
+@@ -58,7 +58,7 @@ void tdx_get_ve_info(struct ve_info *ve);
+ 
+ bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
+ 
+-void tdx_safe_halt(void);
++void tdx_halt(void);
+ 
+ bool tdx_early_handle_ve(struct pt_regs *regs);
+ 
+@@ -69,7 +69,7 @@ u64 tdx_hcall_get_quote(u8 *buf, size_t size);
+ #else
+ 
+ static inline void tdx_early_init(void) { };
+-static inline void tdx_safe_halt(void) { };
++static inline void tdx_halt(void) { };
+ 
+ static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
+ 
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 02fc2aa06e9e0e..3da64513974853 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -242,7 +242,7 @@ void flush_tlb_multi(const struct cpumask *cpumask,
+ 	flush_tlb_mm_range((vma)->vm_mm, start, end,			\
+ 			   ((vma)->vm_flags & VM_HUGETLB)		\
+ 				? huge_page_shift(hstate_vma(vma))	\
+-				: PAGE_SHIFT, false)
++				: PAGE_SHIFT, true)
+ 
+ extern void flush_tlb_all(void);
+ extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
+index dac4d64dfb2a8e..2235a74774360d 100644
+--- a/arch/x86/kernel/cpu/mce/severity.c
++++ b/arch/x86/kernel/cpu/mce/severity.c
+@@ -300,13 +300,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
+ 	copy_user  = is_copy_from_user(regs);
+ 	instrumentation_end();
+ 
+-	switch (fixup_type) {
+-	case EX_TYPE_UACCESS:
+-		if (!copy_user)
+-			return IN_KERNEL;
+-		m->kflags |= MCE_IN_KERNEL_COPYIN;
+-		fallthrough;
++	if (copy_user) {
++		m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV;
++		return IN_KERNEL_RECOV;
++	}
+ 
++	switch (fixup_type) {
+ 	case EX_TYPE_FAULT_MCE_SAFE:
+ 	case EX_TYPE_DEFAULT_MCE_SAFE:
+ 		m->kflags |= MCE_IN_KERNEL_RECOV;
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 07fc145f353103..5cd735728fa028 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -600,7 +600,7 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
+ 	unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
+ 
+ 	if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
+-		return -1;
++		return false;
+ 
+ 	native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
+ 
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index d7163b764c6268..2d48db66fca857 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -148,7 +148,8 @@ static int closid_alloc(void)
+ 
+ 	lockdep_assert_held(&rdtgroup_mutex);
+ 
+-	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
++	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) &&
++	    is_llc_occupancy_enabled()) {
+ 		cleanest_closid = resctrl_find_cleanest_closid();
+ 		if (cleanest_closid < 0)
+ 			return cleanest_closid;
+diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c
+index 22b65a5f5ec6c4..7f8d1e11dbee24 100644
+--- a/arch/x86/kernel/cpu/sgx/driver.c
++++ b/arch/x86/kernel/cpu/sgx/driver.c
+@@ -150,13 +150,15 @@ int __init sgx_drv_init(void)
+ 	u64 xfrm_mask;
+ 	int ret;
+ 
+-	if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
++	if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
++		pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n");
+ 		return -ENODEV;
++	}
+ 
+ 	cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
+ 
+ 	if (!(eax & 1))  {
+-		pr_err("SGX disabled: SGX1 instruction support not available.\n");
++		pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n");
+ 		return -ENODEV;
+ 	}
+ 
+@@ -173,8 +175,10 @@ int __init sgx_drv_init(void)
+ 	}
+ 
+ 	ret = misc_register(&sgx_dev_enclave);
+-	if (ret)
++	if (ret) {
++		pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret);
+ 		return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index a7d562697e50e4..b2b118a8c09be9 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -195,6 +195,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ 	printk("%sCall Trace:\n", log_lvl);
+ 
+ 	unwind_start(&state, task, regs, stack);
++	stack = stack ?: get_stack_pointer(task, regs);
+ 	regs = unwind_get_entry_regs(&state, &partial);
+ 
+ 	/*
+@@ -213,9 +214,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ 	 * - hardirq stack
+ 	 * - entry stack
+ 	 */
+-	for (stack = stack ?: get_stack_pointer(task, regs);
+-	     stack;
+-	     stack = stack_info.next_sp) {
++	for (; stack; stack = stack_info.next_sp) {
+ 		const char *stack_name;
+ 
+ 		stack = PTR_ALIGN(stack, sizeof(long));
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 1209c7aebb211f..dcac3c058fb761 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -220,7 +220,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
+ 	struct fpstate *fpstate;
+ 	unsigned int size;
+ 
+-	size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
++	size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
+ 	fpstate = vzalloc(size);
+ 	if (!fpstate)
+ 		return false;
+@@ -232,8 +232,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
+ 	fpstate->is_guest	= true;
+ 
+ 	gfpu->fpstate		= fpstate;
+-	gfpu->xfeatures		= fpu_user_cfg.default_features;
+-	gfpu->perm		= fpu_user_cfg.default_features;
++	gfpu->xfeatures		= fpu_kernel_cfg.default_features;
++	gfpu->perm		= fpu_kernel_cfg.default_features;
+ 
+ 	/*
+ 	 * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 15507e739c255b..c7ce3655b70780 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -92,7 +92,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
+  */
+ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+ {
+-	memcpy(dst, src, arch_task_struct_size);
++	/* init_task is not dynamically sized (incomplete FPU state) */
++	if (unlikely(src == &init_task))
++		memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0);
++	else
++		memcpy(dst, src, arch_task_struct_size);
++
+ #ifdef CONFIG_VM86
+ 	dst->thread.vm86 = NULL;
+ #endif
+@@ -933,7 +938,7 @@ void __init select_idle_routine(void)
+ 		static_call_update(x86_idle, mwait_idle);
+ 	} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
+ 		pr_info("using TDX aware idle routine\n");
+-		static_call_update(x86_idle, tdx_safe_halt);
++		static_call_update(x86_idle, tdx_halt);
+ 	} else {
+ 		static_call_update(x86_idle, default_idle);
+ 	}
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 2dbadf347b5f4f..5e3e036e6e537f 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -379,6 +379,21 @@ __visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
+ }
+ #endif
+ 
++/*
++ * Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64
++ * version of exc_double_fault() as noreturn.  Otherwise the noreturn mismatch
++ * between configs triggers objtool warnings.
++ *
++ * This is a temporary hack until we have compiler or plugin support for
++ * annotating noreturns.
++ */
++#ifdef CONFIG_X86_ESPFIX64
++#define always_true() true
++#else
++bool always_true(void);
++bool __weak always_true(void) { return true; }
++#endif
++
+ /*
+  * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
+  *
+@@ -514,7 +529,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
+ 
+ 	pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
+ 	die("double fault", regs, error_code);
+-	panic("Machine halted.");
++	if (always_true())
++		panic("Machine halted.");
+ 	instrumentation_end();
+ }
+ 
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index dfe6847fd99e5e..310d8cdf7ca3a9 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -956,7 +956,7 @@ static unsigned long long cyc2ns_suspend;
+ 
+ void tsc_save_sched_clock_state(void)
+ {
+-	if (!sched_clock_stable())
++	if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
+ 		return;
+ 
+ 	cyc2ns_suspend = sched_clock();
+@@ -976,7 +976,7 @@ void tsc_restore_sched_clock_state(void)
+ 	unsigned long flags;
+ 	int cpu;
+ 
+-	if (!sched_clock_stable())
++	if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
+ 		return;
+ 
+ 	local_irq_save(flags);
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 5a952c5ea66bc6..9194695662b26f 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -357,19 +357,23 @@ void *arch_uprobe_trampoline(unsigned long *psize)
+ 	return &insn;
+ }
+ 
+-static unsigned long trampoline_check_ip(void)
++static unsigned long trampoline_check_ip(unsigned long tramp)
+ {
+-	unsigned long tramp = uprobe_get_trampoline_vaddr();
+-
+ 	return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry);
+ }
+ 
+ SYSCALL_DEFINE0(uretprobe)
+ {
+ 	struct pt_regs *regs = task_pt_regs(current);
+-	unsigned long err, ip, sp, r11_cx_ax[3];
++	unsigned long err, ip, sp, r11_cx_ax[3], tramp;
++
++	/* If there's no trampoline, we are called from wrong place. */
++	tramp = uprobe_get_trampoline_vaddr();
++	if (unlikely(tramp == UPROBE_NO_TRAMPOLINE_VADDR))
++		goto sigill;
+ 
+-	if (regs->ip != trampoline_check_ip())
++	/* Make sure the ip matches the only allowed sys_uretprobe caller. */
++	if (unlikely(regs->ip != trampoline_check_ip(tramp)))
+ 		goto sigill;
+ 
+ 	err = copy_from_user(r11_cx_ax, (void __user *)regs->sp, sizeof(r11_cx_ax));
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 3ec56bf76ef164..6154cb450b448b 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -3957,16 +3957,12 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
+ 
+ 	/*
+ 	 * The target vCPU is valid, so the vCPU will be kicked unless the
+-	 * request is for CREATE_ON_INIT. For any errors at this stage, the
+-	 * kick will place the vCPU in an non-runnable state.
++	 * request is for CREATE_ON_INIT.
+ 	 */
+ 	kick = true;
+ 
+ 	mutex_lock(&target_svm->sev_es.snp_vmsa_mutex);
+ 
+-	target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
+-	target_svm->sev_es.snp_ap_waiting_for_reset = true;
+-
+ 	/* Interrupt injection mode shouldn't change for AP creation */
+ 	if (request < SVM_VMGEXIT_AP_DESTROY) {
+ 		u64 sev_features;
+@@ -4012,20 +4008,23 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
+ 		target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2;
+ 		break;
+ 	case SVM_VMGEXIT_AP_DESTROY:
++		target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
+ 		break;
+ 	default:
+ 		vcpu_unimpl(vcpu, "vmgexit: invalid AP creation request [%#x] from guest\n",
+ 			    request);
+ 		ret = -EINVAL;
+-		break;
++		goto out;
+ 	}
+ 
+-out:
++	target_svm->sev_es.snp_ap_waiting_for_reset = true;
++
+ 	if (kick) {
+ 		kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
+ 		kvm_vcpu_kick(target_vcpu);
+ 	}
+ 
++out:
+ 	mutex_unlock(&target_svm->sev_es.snp_vmsa_mutex);
+ 
+ 	return ret;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8794c0a8a2e447..45337a3fc03cd7 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4590,6 +4590,11 @@ static bool kvm_is_vm_type_supported(unsigned long type)
+ 	return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
+ }
+ 
++static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
++{
++	return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
++}
++
+ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ {
+ 	int r = 0;
+@@ -4698,7 +4703,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ 		break;
+ #endif
+ 	case KVM_CAP_SYNC_REGS:
+-		r = KVM_SYNC_X86_VALID_FIELDS;
++		r = kvm_sync_valid_fields(kvm);
+ 		break;
+ 	case KVM_CAP_ADJUST_CLOCK:
+ 		r = KVM_CLOCK_VALID_FLAGS;
+@@ -11470,6 +11475,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ {
+ 	struct kvm_queued_exception *ex = &vcpu->arch.exception;
+ 	struct kvm_run *kvm_run = vcpu->run;
++	u32 sync_valid_fields;
+ 	int r;
+ 
+ 	r = kvm_mmu_post_init_vm(vcpu->kvm);
+@@ -11515,8 +11521,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 		goto out;
+ 	}
+ 
+-	if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
+-	    (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
++	sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
++	if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
++	    (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
+ 		r = -EINVAL;
+ 		goto out;
+ 	}
+@@ -11574,7 +11581,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 
+ out:
+ 	kvm_put_guest_fpu(vcpu);
+-	if (kvm_run->kvm_valid_regs)
++	if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
+ 		store_regs(vcpu);
+ 	post_kvm_run_save(vcpu);
+ 	kvm_vcpu_srcu_read_unlock(vcpu);
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index fc9fb5d0617443..b8f74d80f35c61 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -74,6 +74,24 @@ SYM_FUNC_START(rep_movs_alternative)
+ 	_ASM_EXTABLE_UA( 0b, 1b)
+ 
+ .Llarge_movsq:
++	/* Do the first possibly unaligned word */
++0:	movq (%rsi),%rax
++1:	movq %rax,(%rdi)
++
++	_ASM_EXTABLE_UA( 0b, .Lcopy_user_tail)
++	_ASM_EXTABLE_UA( 1b, .Lcopy_user_tail)
++
++	/* What would be the offset to the aligned destination? */
++	leaq 8(%rdi),%rax
++	andq $-8,%rax
++	subq %rdi,%rax
++
++	/* .. and update pointers and count to match */
++	addq %rax,%rdi
++	addq %rax,%rsi
++	subq %rax,%rcx
++
++	/* make %rcx contain the number of words, %rax the remainder */
+ 	movq %rcx,%rax
+ 	shrq $3,%rcx
+ 	andl $7,%eax
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index ac33b2263a434d..b922b9fea6b648 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -562,7 +562,7 @@ void __head sme_enable(struct boot_params *bp)
+ 	}
+ 
+ 	RIP_REL_REF(sme_me_mask) = me_mask;
+-	physical_mask &= ~me_mask;
+-	cc_vendor = CC_VENDOR_AMD;
++	RIP_REL_REF(physical_mask) &= ~me_mask;
++	RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD;
+ 	cc_set_mask(me_mask);
+ }
+diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c
+index 3d2f7f0a6ed142..ad3c1feec990db 100644
+--- a/arch/x86/mm/pat/cpa-test.c
++++ b/arch/x86/mm/pat/cpa-test.c
+@@ -183,7 +183,7 @@ static int pageattr_test(void)
+ 			break;
+ 
+ 		case 1:
+-			err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1);
++			err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1);
+ 			break;
+ 
+ 		case 2:
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index feb8cc6a12bf23..d721cc19addbd6 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -984,29 +984,42 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
+ 	return -EINVAL;
+ }
+ 
+-/*
+- * track_pfn_copy is called when vma that is covering the pfnmap gets
+- * copied through copy_page_range().
+- *
+- * If the vma has a linear pfn mapping for the entire range, we get the prot
+- * from pte and reserve the entire vma range with single reserve_pfn_range call.
+- */
+-int track_pfn_copy(struct vm_area_struct *vma)
++int track_pfn_copy(struct vm_area_struct *dst_vma,
++		struct vm_area_struct *src_vma, unsigned long *pfn)
+ {
++	const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start;
+ 	resource_size_t paddr;
+-	unsigned long vma_size = vma->vm_end - vma->vm_start;
+ 	pgprot_t pgprot;
++	int rc;
+ 
+-	if (vma->vm_flags & VM_PAT) {
+-		if (get_pat_info(vma, &paddr, &pgprot))
+-			return -EINVAL;
+-		/* reserve the whole chunk covered by vma. */
+-		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
+-	}
++	if (!(src_vma->vm_flags & VM_PAT))
++		return 0;
++
++	/*
++	 * Duplicate the PAT information for the dst VMA based on the src
++	 * VMA.
++	 */
++	if (get_pat_info(src_vma, &paddr, &pgprot))
++		return -EINVAL;
++	rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1);
++	if (rc)
++		return rc;
+ 
++	/* Reservation for the destination VMA succeeded. */
++	vm_flags_set(dst_vma, VM_PAT);
++	*pfn = PHYS_PFN(paddr);
+ 	return 0;
+ }
+ 
++void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn)
++{
++	untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true);
++	/*
++	 * Reservation was freed, any copied page tables will get cleaned
++	 * up later, but without getting PAT involved again.
++	 */
++}
++
+ /*
+  * prot is passed in as a parameter for the new mapping. If the vma has
+  * a linear pfn mapping for the entire range, or no vma is provided,
+@@ -1095,15 +1108,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ 	}
+ }
+ 
+-/*
+- * untrack_pfn_clear is called if the following situation fits:
+- *
+- * 1) while mremapping a pfnmap for a new region,  with the old vma after
+- * its pfnmap page table has been removed.  The new vma has a new pfnmap
+- * to the same pfn & cache type with VM_PAT set.
+- * 2) while duplicating vm area, the new vma fails to copy the pgtable from
+- * old vma.
+- */
+ void untrack_pfn_clear(struct vm_area_struct *vma)
+ {
+ 	vm_flags_clear(vma, VM_PAT);
+diff --git a/crypto/api.c b/crypto/api.c
+index bfd177a4313a01..c2c4eb14ef955f 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -36,7 +36,8 @@ EXPORT_SYMBOL_GPL(crypto_chain);
+ DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
+ #endif
+ 
+-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
++					     u32 type, u32 mask);
+ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
+ 					    u32 mask);
+ 
+@@ -145,7 +146,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
+ 	if (alg != &larval->alg) {
+ 		kfree(larval);
+ 		if (crypto_is_larval(alg))
+-			alg = crypto_larval_wait(alg);
++			alg = crypto_larval_wait(alg, type, mask);
+ 	}
+ 
+ 	return alg;
+@@ -197,7 +198,8 @@ static void crypto_start_test(struct crypto_larval *larval)
+ 	crypto_schedule_test(larval);
+ }
+ 
+-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
++					     u32 type, u32 mask)
+ {
+ 	struct crypto_larval *larval;
+ 	long time_left;
+@@ -219,12 +221,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
+ 			crypto_larval_kill(larval);
+ 		alg = ERR_PTR(-ETIMEDOUT);
+ 	} else if (!alg) {
+-		u32 type;
+-		u32 mask;
+-
+ 		alg = &larval->alg;
+-		type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
+-		mask = larval->mask;
+ 		alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
+ 		      ERR_PTR(-EAGAIN);
+ 	} else if (IS_ERR(alg))
+@@ -304,7 +301,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
+ 	}
+ 
+ 	if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
+-		alg = crypto_larval_wait(alg);
++		alg = crypto_larval_wait(alg, type, mask);
+ 	else if (alg)
+ 		;
+ 	else if (!(mask & CRYPTO_ALG_TESTED))
+@@ -352,7 +349,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
+ 	ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
+ 
+ 	if (ok == NOTIFY_STOP)
+-		alg = crypto_larval_wait(larval);
++		alg = crypto_larval_wait(larval, type, mask);
+ 	else {
+ 		crypto_mod_put(larval);
+ 		alg = ERR_PTR(-ENOENT);
+diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c
+index b5e657415770a3..a88798d3e8c872 100644
+--- a/crypto/bpf_crypto_skcipher.c
++++ b/crypto/bpf_crypto_skcipher.c
+@@ -80,3 +80,4 @@ static void __exit bpf_crypto_skcipher_exit(void)
+ module_init(bpf_crypto_skcipher_init);
+ module_exit(bpf_crypto_skcipher_exit);
+ MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Symmetric key cipher support for BPF");
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index a5d47819b3a4e2..ae035b93da0878 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ 		cmd_mask = nd_desc->cmd_mask;
+ 		if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
+ 			family = call_pkg->nd_family;
+-			if (family > NVDIMM_BUS_FAMILY_MAX ||
++			if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX ||
+ 			    !test_bit(family, &nd_desc->bus_family_mask))
+ 				return -EINVAL;
+ 			family = array_index_nospec(family,
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 831fa4a1215985..0888e4d618d53a 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
+ 			 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
+ 			 pr->power.states[ACPI_STATE_C3].address);
+ 
++	if (!pr->power.states[ACPI_STATE_C2].address &&
++	    !pr->power.states[ACPI_STATE_C3].address)
++		return -ENODEV;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index b4cd14e7fa76cc..14c7bac4100b46 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ 		},
+ 	},
++	{
++		/* Asus Vivobook X1404VAP */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"),
++		},
++	},
+ 	{
+ 		/* Asus Vivobook X1504VAP */
+ 		.matches = {
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index 068c1612660bc0..4ee30c2897a2b9 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -374,7 +374,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ 		},
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+-					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ 	},
+ 	{
+ 		/* Medion Lifetab S10346 */
+diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
+index 21545ffba0658f..2a9bb31633a71e 100644
+--- a/drivers/auxdisplay/Kconfig
++++ b/drivers/auxdisplay/Kconfig
+@@ -503,6 +503,7 @@ config HT16K33
+ config MAX6959
+ 	tristate "Maxim MAX6958/6959 7-segment LED controller"
+ 	depends on I2C
++	select BITREVERSE
+ 	select REGMAP_I2C
+ 	select LINEDISP
+ 	help
+diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
+index a731f28455b45f..6dc8798d01f98c 100644
+--- a/drivers/auxdisplay/panel.c
++++ b/drivers/auxdisplay/panel.c
+@@ -1664,7 +1664,7 @@ static void panel_attach(struct parport *port)
+ 	if (lcd.enabled)
+ 		charlcd_unregister(lcd.charlcd);
+ err_unreg_device:
+-	kfree(lcd.charlcd);
++	charlcd_free(lcd.charlcd);
+ 	lcd.charlcd = NULL;
+ 	parport_unregister_device(pprt);
+ 	pprt = NULL;
+@@ -1692,7 +1692,7 @@ static void panel_detach(struct parport *port)
+ 		charlcd_unregister(lcd.charlcd);
+ 		lcd.initialized = false;
+ 		kfree(lcd.charlcd->drvdata);
+-		kfree(lcd.charlcd);
++		charlcd_free(lcd.charlcd);
+ 		lcd.charlcd = NULL;
+ 	}
+ 
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 4a67e83300e164..1abe61f11525d9 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -913,6 +913,9 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
+ 	if (dev->power.syscore)
+ 		goto Complete;
+ 
++	if (!dev->power.is_suspended)
++		goto Complete;
++
+ 	if (dev->power.direct_complete) {
+ 		/* Match the pm_runtime_disable() in __device_suspend(). */
+ 		pm_runtime_enable(dev);
+@@ -931,9 +934,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
+ 	 */
+ 	dev->power.is_prepared = false;
+ 
+-	if (!dev->power.is_suspended)
+-		goto Unlock;
+-
+ 	if (dev->pm_domain) {
+ 		info = "power domain ";
+ 		callback = pm_op(&dev->pm_domain->ops, state);
+@@ -973,7 +973,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
+ 	error = dpm_run_callback(callback, dev, state, info);
+ 	dev->power.is_suspended = false;
+ 
+- Unlock:
+ 	device_unlock(dev);
+ 	dpm_watchdog_clear(&wd);
+ 
+@@ -1254,14 +1253,13 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
+ 	dev->power.is_noirq_suspended = true;
+ 
+ 	/*
+-	 * Skipping the resume of devices that were in use right before the
+-	 * system suspend (as indicated by their PM-runtime usage counters)
+-	 * would be suboptimal.  Also resume them if doing that is not allowed
+-	 * to be skipped.
++	 * Devices must be resumed unless they are explicitly allowed to be left
++	 * in suspend, but even in that case skipping the resume of devices that
++	 * were in use right before the system suspend (as indicated by their
++	 * runtime PM usage counters and child counters) would be suboptimal.
+ 	 */
+-	if (atomic_read(&dev->power.usage_count) > 1 ||
+-	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+-	      dev->power.may_skip_resume))
++	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
++	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
+ 		dev->power.must_resume = true;
+ 
+ 	if (dev->power.must_resume)
+@@ -1628,6 +1626,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
+ 			pm_runtime_disable(dev);
+ 			if (pm_runtime_status_suspended(dev)) {
+ 				pm_dev_dbg(dev, state, "direct-complete ");
++				dev->power.is_suspended = true;
+ 				goto Complete;
+ 			}
+ 
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 2ee45841486bc7..04113adb092b52 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1874,7 +1874,7 @@ void pm_runtime_drop_link(struct device_link *link)
+ 	pm_request_idle(link->supplier);
+ }
+ 
+-static bool pm_runtime_need_not_resume(struct device *dev)
++bool pm_runtime_need_not_resume(struct device *dev)
+ {
+ 	return atomic_read(&dev->power.usage_count) <= 1 &&
+ 		(atomic_read(&dev->power.child_count) == 0 ||
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index c7d728d686e5a5..79b7bd8bfd4584 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1416,17 +1416,27 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
+ 	}
+ }
+ 
++/* Must be called when queue is frozen */
++static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
++{
++	bool canceled;
++
++	spin_lock(&ubq->cancel_lock);
++	canceled = ubq->canceling;
++	if (!canceled)
++		ubq->canceling = true;
++	spin_unlock(&ubq->cancel_lock);
++
++	return canceled;
++}
++
+ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
+ {
++	bool was_canceled = ubq->canceling;
+ 	struct gendisk *disk;
+ 
+-	spin_lock(&ubq->cancel_lock);
+-	if (ubq->canceling) {
+-		spin_unlock(&ubq->cancel_lock);
++	if (was_canceled)
+ 		return false;
+-	}
+-	ubq->canceling = true;
+-	spin_unlock(&ubq->cancel_lock);
+ 
+ 	spin_lock(&ub->lock);
+ 	disk = ub->ub_disk;
+@@ -1438,14 +1448,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
+ 	if (!disk)
+ 		return false;
+ 
+-	/* Now we are serialized with ublk_queue_rq() */
++	/*
++	 * Now we are serialized with ublk_queue_rq()
++	 *
++	 * Make sure that ubq->canceling is set when queue is frozen,
++	 * because ublk_queue_rq() has to rely on this flag for avoiding to
++	 * touch completed uring_cmd
++	 */
+ 	blk_mq_quiesce_queue(disk->queue);
+-	/* abort queue is for making forward progress */
+-	ublk_abort_queue(ub, ubq);
++	was_canceled = ublk_mark_queue_canceling(ubq);
++	if (!was_canceled) {
++		/* abort queue is for making forward progress */
++		ublk_abort_queue(ub, ubq);
++	}
+ 	blk_mq_unquiesce_queue(disk->queue);
+ 	put_device(disk_to_dev(disk));
+ 
+-	return true;
++	return !was_canceled;
+ }
+ 
+ static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
+diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
+index c409fc7e061869..775f62dddb11d8 100644
+--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
++++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
+@@ -180,14 +180,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
+ 	CLK_GATE("asrc", ASRC_IPG),
+ 	CLK_GATE("pdm", PDM_IPG),
+ 	CLK_GATE("earc", EARC_IPG),
+-	CLK_GATE("ocrama", OCRAMA_IPG),
++	CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"),
+ 	CLK_GATE("aud2htx", AUD2HTX_IPG),
+ 	CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
+ 	CLK_GATE("sdma2", SDMA2_ROOT),
+ 	CLK_GATE("sdma3", SDMA3_ROOT),
+ 	CLK_GATE("spba2", SPBA2_ROOT),
+-	CLK_GATE("dsp", DSP_ROOT),
+-	CLK_GATE("dspdbg", DSPDBG_ROOT),
++	CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"),
++	CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"),
+ 	CLK_GATE("edma", EDMA_ROOT),
+ 	CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
+ 	CLK_GATE("mu2", MU2_ROOT),
+diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
+index 02dda57105b10e..4f92b83965d5a9 100644
+--- a/drivers/clk/meson/g12a.c
++++ b/drivers/clk/meson/g12a.c
+@@ -1139,8 +1139,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = {
+ 	.hw.init = &(struct clk_init_data) {
+ 		.name = "cpu_clk_div16_en",
+ 		.ops = &clk_regmap_gate_ro_ops,
+-		.parent_hws = (const struct clk_hw *[]) {
+-			&g12a_cpu_clk.hw
++		.parent_data = &(const struct clk_parent_data) {
++			/*
++			 * Note:
++			 * G12A and G12B have different cpu clocks (with
++			 * different struct clk_hw). We fallback to the global
++			 * naming string mechanism so this clock picks
++			 * up the appropriate one. Same goes for the other
++			 * clock using cpu cluster A clock output and present
++			 * on both G12 variant.
++			 */
++			.name = "cpu_clk",
++			.index = -1,
+ 		},
+ 		.num_parents = 1,
+ 		/*
+@@ -1205,7 +1215,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = {
+ 	.hw.init = &(struct clk_init_data){
+ 		.name = "cpu_clk_apb_div",
+ 		.ops = &clk_regmap_divider_ro_ops,
+-		.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
++		.parent_data = &(const struct clk_parent_data) {
++			.name = "cpu_clk",
++			.index = -1,
++		},
+ 		.num_parents = 1,
+ 	},
+ };
+@@ -1239,7 +1252,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = {
+ 	.hw.init = &(struct clk_init_data){
+ 		.name = "cpu_clk_atb_div",
+ 		.ops = &clk_regmap_divider_ro_ops,
+-		.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
++		.parent_data = &(const struct clk_parent_data) {
++			.name = "cpu_clk",
++			.index = -1,
++		},
+ 		.num_parents = 1,
+ 	},
+ };
+@@ -1273,7 +1289,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = {
+ 	.hw.init = &(struct clk_init_data){
+ 		.name = "cpu_clk_axi_div",
+ 		.ops = &clk_regmap_divider_ro_ops,
+-		.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
++		.parent_data = &(const struct clk_parent_data) {
++			.name = "cpu_clk",
++			.index = -1,
++		},
+ 		.num_parents = 1,
+ 	},
+ };
+@@ -1308,13 +1327,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = {
+ 		.name = "cpu_clk_trace_div",
+ 		.ops = &clk_regmap_divider_ro_ops,
+ 		.parent_data = &(const struct clk_parent_data) {
+-			/*
+-			 * Note:
+-			 * G12A and G12B have different cpu_clks (with
+-			 * different struct clk_hw). We fallback to the global
+-			 * naming string mechanism so cpu_clk_trace_div picks
+-			 * up the appropriate one.
+-			 */
+ 			.name = "cpu_clk",
+ 			.index = -1,
+ 		},
+@@ -4317,7 +4329,7 @@ static MESON_GATE(g12a_spicc_1,			HHI_GCLK_MPEG0,	14);
+ static MESON_GATE(g12a_hiu_reg,			HHI_GCLK_MPEG0,	19);
+ static MESON_GATE(g12a_mipi_dsi_phy,		HHI_GCLK_MPEG0,	20);
+ static MESON_GATE(g12a_assist_misc,		HHI_GCLK_MPEG0,	23);
+-static MESON_GATE(g12a_emmc_a,			HHI_GCLK_MPEG0,	4);
++static MESON_GATE(g12a_emmc_a,			HHI_GCLK_MPEG0,	24);
+ static MESON_GATE(g12a_emmc_b,			HHI_GCLK_MPEG0,	25);
+ static MESON_GATE(g12a_emmc_c,			HHI_GCLK_MPEG0,	26);
+ static MESON_GATE(g12a_audio_codec,		HHI_GCLK_MPEG0,	28);
+diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
+index f071faad1ebb70..d9529de200ae44 100644
+--- a/drivers/clk/meson/gxbb.c
++++ b/drivers/clk/meson/gxbb.c
+@@ -1272,14 +1272,13 @@ static struct clk_regmap gxbb_cts_i958 = {
+ 	},
+ };
+ 
++/*
++ * This table skips a clock named 'cts_slow_oscin' in the documentation
++ * This clock does not exist yet in this controller or the AO one
++ */
++static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
+ static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
+ 	{ .fw_name = "xtal", },
+-	/*
+-	 * FIXME: This clock is provided by the ao clock controller but the
+-	 * clock is not yet part of the binding of this controller, so string
+-	 * name must be use to set this parent.
+-	 */
+-	{ .name = "cts_slow_oscin", .index = -1 },
+ 	{ .hw = &gxbb_fclk_div3.hw },
+ 	{ .hw = &gxbb_fclk_div5.hw },
+ };
+@@ -1289,6 +1288,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
+ 		.offset = HHI_32K_CLK_CNTL,
+ 		.mask = 0x3,
+ 		.shift = 16,
++		.table = gxbb_32k_clk_parents_val_table,
+ 		},
+ 	.hw.init = &(struct clk_init_data){
+ 		.name = "32k_clk_sel",
+@@ -1312,7 +1312,7 @@ static struct clk_regmap gxbb_32k_clk_div = {
+ 			&gxbb_32k_clk_sel.hw
+ 		},
+ 		.num_parents = 1,
+-		.flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
++		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
+index 855a61966f3ef5..8f29ecc74c50bf 100644
+--- a/drivers/clk/qcom/gcc-msm8953.c
++++ b/drivers/clk/qcom/gcc-msm8953.c
+@@ -3770,7 +3770,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
+ 
+ static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ 	.halt_reg = 0x4c02c,
+-	.halt_check = BRANCH_HALT,
++	.halt_check = BRANCH_HALT_SKIP,
+ 	.clkr = {
+ 		.enable_reg = 0x4c02c,
+ 		.enable_mask = BIT(0),
+diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
+index 9dd5c48f33bed5..fa1672c4e7d814 100644
+--- a/drivers/clk/qcom/gcc-sm8650.c
++++ b/drivers/clk/qcom/gcc-sm8650.c
+@@ -3497,7 +3497,7 @@ static struct gdsc usb30_prim_gdsc = {
+ 	.pd = {
+ 		.name = "usb30_prim_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -3506,7 +3506,7 @@ static struct gdsc usb3_phy_gdsc = {
+ 	.pd = {
+ 		.name = "usb3_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
+index 7288af845434d8..009f39139b6440 100644
+--- a/drivers/clk/qcom/gcc-x1e80100.c
++++ b/drivers/clk/qcom/gcc-x1e80100.c
+@@ -2564,19 +2564,6 @@ static struct clk_branch gcc_disp_hf_axi_clk = {
+ 	},
+ };
+ 
+-static struct clk_branch gcc_disp_xo_clk = {
+-	.halt_reg = 0x27018,
+-	.halt_check = BRANCH_HALT,
+-	.clkr = {
+-		.enable_reg = 0x27018,
+-		.enable_mask = BIT(0),
+-		.hw.init = &(const struct clk_init_data) {
+-			.name = "gcc_disp_xo_clk",
+-			.ops = &clk_branch2_ops,
+-		},
+-	},
+-};
+-
+ static struct clk_branch gcc_gp1_clk = {
+ 	.halt_reg = 0x64000,
+ 	.halt_check = BRANCH_HALT,
+@@ -2631,21 +2618,6 @@ static struct clk_branch gcc_gp3_clk = {
+ 	},
+ };
+ 
+-static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+-	.halt_reg = 0x71004,
+-	.halt_check = BRANCH_HALT_VOTED,
+-	.hwcg_reg = 0x71004,
+-	.hwcg_bit = 1,
+-	.clkr = {
+-		.enable_reg = 0x71004,
+-		.enable_mask = BIT(0),
+-		.hw.init = &(const struct clk_init_data) {
+-			.name = "gcc_gpu_cfg_ahb_clk",
+-			.ops = &clk_branch2_ops,
+-		},
+-	},
+-};
+-
+ static struct clk_branch gcc_gpu_gpll0_cph_clk_src = {
+ 	.halt_check = BRANCH_HALT_DELAY,
+ 	.clkr = {
+@@ -6268,7 +6240,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
+ 	[GCC_CNOC_PCIE_TUNNEL_CLK] = &gcc_cnoc_pcie_tunnel_clk.clkr,
+ 	[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ 	[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+-	[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ 	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ 	[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ 	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+@@ -6281,7 +6252,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
+ 	[GCC_GPLL7] = &gcc_gpll7.clkr,
+ 	[GCC_GPLL8] = &gcc_gpll8.clkr,
+ 	[GCC_GPLL9] = &gcc_gpll9.clkr,
+-	[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ 	[GCC_GPU_GPLL0_CPH_CLK_SRC] = &gcc_gpu_gpll0_cph_clk_src.clkr,
+ 	[GCC_GPU_GPLL0_DIV_CPH_CLK_SRC] = &gcc_gpu_gpll0_div_cph_clk_src.clkr,
+ 	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
+index 98ba5b4518fb3b..b9f02d91004e8b 100644
+--- a/drivers/clk/qcom/mmcc-sdm660.c
++++ b/drivers/clk/qcom/mmcc-sdm660.c
+@@ -2544,7 +2544,7 @@ static struct clk_branch video_core_clk = {
+ 
+ static struct clk_branch video_subcore0_clk = {
+ 	.halt_reg = 0x1048,
+-	.halt_check = BRANCH_HALT,
++	.halt_check = BRANCH_HALT_SKIP,
+ 	.clkr = {
+ 		.enable_reg = 0x1048,
+ 		.enable_mask = BIT(0),
+diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
+index 1ce40fb51f13bd..a1f961d5b85691 100644
+--- a/drivers/clk/renesas/r9a08g045-cpg.c
++++ b/drivers/clk/renesas/r9a08g045-cpg.c
+@@ -50,7 +50,7 @@
+ #define G3S_SEL_SDHI2		SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 8, 2)
+ 
+ /* PLL 1/4/6 configuration registers macro. */
+-#define G3S_PLL146_CONF(clk1, clk2)	((clk1) << 22 | (clk2) << 12)
++#define G3S_PLL146_CONF(clk1, clk2, setting)	((clk1) << 22 | (clk2) << 12 | (setting))
+ 
+ #define DEF_G3S_MUX(_name, _id, _conf, _parent_names, _mux_flags, _clk_flags) \
+ 	DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = (_conf), \
+@@ -133,7 +133,8 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
+ 
+ 	/* Internal Core Clocks */
+ 	DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
+-	DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8)),
++	DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8, 0x100),
++		    1100000000UL),
+ 	DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
+ 	DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
+ 	DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 100, 3),
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index b43b763dfe186a..229f4540b219e3 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -51,6 +51,7 @@
+ #define RZG3S_DIV_M		GENMASK(25, 22)
+ #define RZG3S_DIV_NI		GENMASK(21, 13)
+ #define RZG3S_DIV_NF		GENMASK(12, 1)
++#define RZG3S_SEL_PLL		BIT(0)
+ 
+ #define CLK_ON_R(reg)		(reg)
+ #define CLK_MON_R(reg)		(0x180 + (reg))
+@@ -60,6 +61,7 @@
+ #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
+ #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
+ #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
++#define GET_REG_SAMPLL_SETTING(val)	((val) & 0xfff)
+ 
+ #define CPG_WEN_BIT		BIT(16)
+ 
+@@ -943,6 +945,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
+ 
+ struct pll_clk {
+ 	struct clk_hw hw;
++	unsigned long default_rate;
+ 	unsigned int conf;
+ 	unsigned int type;
+ 	void __iomem *base;
+@@ -980,12 +983,19 @@ static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ {
+ 	struct pll_clk *pll_clk = to_pll(hw);
+ 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
+-	u32 nir, nfr, mr, pr, val;
++	u32 nir, nfr, mr, pr, val, setting;
+ 	u64 rate;
+ 
+ 	if (pll_clk->type != CLK_TYPE_G3S_PLL)
+ 		return parent_rate;
+ 
++	setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
++	if (setting) {
++		val = readl(priv->base + setting);
++		if (val & RZG3S_SEL_PLL)
++			return pll_clk->default_rate;
++	}
++
+ 	val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
+ 
+ 	pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
+@@ -1038,6 +1048,7 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
+ 	pll_clk->base = priv->base;
+ 	pll_clk->priv = priv;
+ 	pll_clk->type = core->type;
++	pll_clk->default_rate = core->default_rate;
+ 
+ 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ 	if (ret)
+diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
+index ecfe7e7ea8a177..019efe00ffd9f2 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.h
++++ b/drivers/clk/renesas/rzg2l-cpg.h
+@@ -102,7 +102,10 @@ struct cpg_core_clk {
+ 	const struct clk_div_table *dtable;
+ 	const u32 *mtable;
+ 	const unsigned long invalid_rate;
+-	const unsigned long max_rate;
++	union {
++		const unsigned long max_rate;
++		const unsigned long default_rate;
++	};
+ 	const char * const *parent_names;
+ 	notifier_fn_t notifier;
+ 	u32 flag;
+@@ -144,8 +147,9 @@ enum clk_types {
+ 	DEF_TYPE(_name, _id, _type, .parent = _parent)
+ #define DEF_SAMPLL(_name, _id, _parent, _conf) \
+ 	DEF_TYPE(_name, _id, CLK_TYPE_SAM_PLL, .parent = _parent, .conf = _conf)
+-#define DEF_G3S_PLL(_name, _id, _parent, _conf) \
+-	DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf)
++#define DEF_G3S_PLL(_name, _id, _parent, _conf, _default_rate) \
++	DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf, \
++		 .default_rate = _default_rate)
+ #define DEF_INPUT(_name, _id) \
+ 	DEF_TYPE(_name, _id, CLK_TYPE_IN)
+ #define DEF_FIXED(_name, _id, _parent, _mult, _div) \
+diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
+index 3bb87b27b662da..cf60fcf2fa5cde 100644
+--- a/drivers/clk/rockchip/clk-rk3328.c
++++ b/drivers/clk/rockchip/clk-rk3328.c
+@@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p)	= { "cpll_peri",
+ 				    "gpll_peri",
+ 				    "hdmiphy_peri" };
+ PNAME(mux_ref_usb3otg_src_p)	= { "xin24m",
+-				    "clk_usb3otg_ref" };
++				    "clk_ref_usb3otg_src" };
+ PNAME(mux_xin24m_32k_p)		= { "xin24m",
+ 				    "clk_rtc32k" };
+ PNAME(mux_mac2io_src_p)		= { "clk_mac2io_src",
+diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
+index afa5760ed3a11b..e6533513f8ac29 100644
+--- a/drivers/clk/samsung/clk.c
++++ b/drivers/clk/samsung/clk.c
+@@ -74,12 +74,12 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev,
+ 	if (!ctx)
+ 		panic("could not allocate clock provider context.\n");
+ 
++	ctx->clk_data.num = nr_clks;
+ 	for (i = 0; i < nr_clks; ++i)
+ 		ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+ 
+ 	ctx->dev = dev;
+ 	ctx->reg_base = base;
+-	ctx->clk_data.num = nr_clks;
+ 	spin_lock_init(&ctx->lock);
+ 
+ 	return ctx;
+diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
+index 5f7e13e60c8023..e67b2326671c9c 100644
+--- a/drivers/cpufreq/Kconfig.arm
++++ b/drivers/cpufreq/Kconfig.arm
+@@ -245,7 +245,7 @@ config ARM_TEGRA186_CPUFREQ
+ 
+ config ARM_TEGRA194_CPUFREQ
+ 	tristate "Tegra194 CPUFreq support"
+-	depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST)
++	depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
+ 	depends on TEGRA_BPMP
+ 	default y
+ 	help
+diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
+index af44ee6a64304f..1a7fcaf39cc9b5 100644
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+ 		time_elapsed = update_time - j_cdbs->prev_update_time;
+ 		j_cdbs->prev_update_time = update_time;
+ 
+-		idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
++		/*
++		 * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
++		 * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
++		 * off, where idle_time is calculated by the difference between
++		 * time elapsed in jiffies and "busy time" obtained from CPU
++		 * statistics.  If a CPU is 100% busy, the time elapsed and busy
++		 * time should grow with the same amount in two consecutive
++		 * samples, but in practice there could be a tiny difference,
++		 * making the accumulated idle time decrease sometimes.  Hence,
++		 * in this case, idle_time should be regarded as 0 in order to
++		 * make the further process correct.
++		 */
++		if (cur_idle_time > j_cdbs->prev_cpu_idle)
++			idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
++		else
++			idle_time = 0;
++
+ 		j_cdbs->prev_cpu_idle = cur_idle_time;
+ 
+ 		if (ignore_nice) {
+@@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+ 			 * calls, so the previous load value can be used then.
+ 			 */
+ 			load = j_cdbs->prev_load;
+-		} else if (unlikely((int)idle_time > 2 * sampling_rate &&
++		} else if (unlikely(idle_time > 2 * sampling_rate &&
+ 				    j_cdbs->prev_load)) {
+ 			/*
+ 			 * If the CPU had gone completely idle and a task has
+@@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+ 			load = j_cdbs->prev_load;
+ 			j_cdbs->prev_load = 0;
+ 		} else {
+-			if (time_elapsed >= idle_time) {
++			if (time_elapsed > idle_time)
+ 				load = 100 * (time_elapsed - idle_time) / time_elapsed;
+-			} else {
+-				/*
+-				 * That can happen if idle_time is returned by
+-				 * get_cpu_idle_time_jiffy().  In that case
+-				 * idle_time is roughly equal to the difference
+-				 * between time_elapsed and "busy time" obtained
+-				 * from CPU statistics.  Then, the "busy time"
+-				 * can end up being greater than time_elapsed
+-				 * (for example, if jiffies_64 and the CPU
+-				 * statistics are updated by different CPUs),
+-				 * so idle_time may in fact be negative.  That
+-				 * means, though, that the CPU was busy all
+-				 * the time (on the rough average) during the
+-				 * last sampling interval and 100 can be
+-				 * returned as the load.
+-				 */
+-				load = (int)idle_time < 0 ? 100 : 0;
+-			}
++			else
++				load = 0;
++
+ 			j_cdbs->prev_load = load;
+ 		}
+ 
+-		if (unlikely((int)idle_time > 2 * sampling_rate)) {
++		if (unlikely(idle_time > 2 * sampling_rate)) {
+ 			unsigned int periods = idle_time / sampling_rate;
+ 
+ 			if (periods < idle_periods)
+diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
+index 8d73e6e8be2a58..f2d913a91be9e0 100644
+--- a/drivers/cpufreq/scpi-cpufreq.c
++++ b/drivers/cpufreq/scpi-cpufreq.c
+@@ -39,8 +39,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+ static int
+ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+ {
+-	u64 rate = policy->freq_table[index].frequency * 1000;
++	unsigned long freq_khz = policy->freq_table[index].frequency;
+ 	struct scpi_data *priv = policy->driver_data;
++	unsigned long rate = freq_khz * 1000;
+ 	int ret;
+ 
+ 	ret = clk_set_rate(priv->clk, rate);
+@@ -48,7 +49,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (clk_get_rate(priv->clk) != rate)
++	if (clk_get_rate(priv->clk) / 1000 != freq_khz)
+ 		return -EIO;
+ 
+ 	return 0;
+diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
+index 30c2b1a64695c0..2fc04e210bc4f6 100644
+--- a/drivers/crypto/hisilicon/sec2/sec.h
++++ b/drivers/crypto/hisilicon/sec2/sec.h
+@@ -37,7 +37,6 @@ struct sec_aead_req {
+ 	u8 *a_ivin;
+ 	dma_addr_t a_ivin_dma;
+ 	struct aead_request *aead_req;
+-	bool fallback;
+ };
+ 
+ /* SEC request of Crypto */
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index a9b1b9b0b03bf7..8605cb3cae92cd 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -57,7 +57,6 @@
+ #define SEC_TYPE_MASK		0x0F
+ #define SEC_DONE_MASK		0x0001
+ #define SEC_ICV_MASK		0x000E
+-#define SEC_SQE_LEN_RATE_MASK	0x3
+ 
+ #define SEC_TOTAL_IV_SZ(depth)	(SEC_IV_SIZE * (depth))
+ #define SEC_SGL_SGE_NR		128
+@@ -80,16 +79,16 @@
+ #define SEC_TOTAL_PBUF_SZ(depth)	(PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) +	\
+ 				SEC_PBUF_LEFT_SZ(depth))
+ 
+-#define SEC_SQE_LEN_RATE	4
+ #define SEC_SQE_CFLAG		2
+ #define SEC_SQE_AEAD_FLAG	3
+ #define SEC_SQE_DONE		0x1
+ #define SEC_ICV_ERR		0x2
+-#define MIN_MAC_LEN		4
+ #define MAC_LEN_MASK		0x1U
+ #define MAX_INPUT_DATA_LEN	0xFFFE00
+ #define BITS_MASK		0xFF
++#define WORD_MASK		0x3
+ #define BYTE_BITS		0x8
++#define BYTES_TO_WORDS(bcount)	((bcount) >> 2)
+ #define SEC_XTS_NAME_SZ		0x3
+ #define IV_CM_CAL_NUM		2
+ #define IV_CL_MASK		0x7
+@@ -691,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
+ 
+ 	c_ctx->fallback = false;
+ 
+-	/* Currently, only XTS mode need fallback tfm when using 192bit key */
+-	if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
+-		return 0;
+-
+ 	c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
+ 						  CRYPTO_ALG_NEED_FALLBACK);
+ 	if (IS_ERR(c_ctx->fbtfm)) {
+-		pr_err("failed to alloc xts mode fallback tfm!\n");
++		pr_err("failed to alloc fallback tfm for %s!\n", alg);
+ 		return PTR_ERR(c_ctx->fbtfm);
+ 	}
+ 
+@@ -858,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ 	}
+ 
+ 	memcpy(c_ctx->c_key, key, keylen);
+-	if (c_ctx->fallback && c_ctx->fbtfm) {
++	if (c_ctx->fbtfm) {
+ 		ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
+ 		if (ret) {
+ 			dev_err(dev, "failed to set fallback skcipher key!\n");
+@@ -1090,11 +1085,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
+ 	struct crypto_shash *hash_tfm = ctx->hash_tfm;
+ 	int blocksize, digestsize, ret;
+ 
+-	if (!keys->authkeylen) {
+-		pr_err("hisi_sec2: aead auth key error!\n");
+-		return -EINVAL;
+-	}
+-
+ 	blocksize = crypto_shash_blocksize(hash_tfm);
+ 	digestsize = crypto_shash_digestsize(hash_tfm);
+ 	if (keys->authkeylen > blocksize) {
+@@ -1106,7 +1096,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
+ 		}
+ 		ctx->a_key_len = digestsize;
+ 	} else {
+-		memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
++		if (keys->authkeylen)
++			memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ 		ctx->a_key_len = keys->authkeylen;
+ 	}
+ 
+@@ -1160,8 +1151,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 	}
+ 
+ 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
+-	if (ret)
++	if (ret) {
++		dev_err(dev, "sec extract aead keys err!\n");
+ 		goto bad_key;
++	}
+ 
+ 	ret = sec_aead_aes_set_key(c_ctx, &keys);
+ 	if (ret) {
+@@ -1175,12 +1168,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 		goto bad_key;
+ 	}
+ 
+-	if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
+-		ret = -EINVAL;
+-		dev_err(dev, "AUTH key length error!\n");
+-		goto bad_key;
+-	}
+-
+ 	ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ 	if (ret) {
+ 		dev_err(dev, "set sec fallback key err!\n");
+@@ -1583,11 +1570,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
+ 
+ 	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
+ 
+-	sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
++	sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
+ 
+ 	sec_sqe->type2.mac_key_alg |=
+-			cpu_to_le32((u32)((ctx->a_key_len) /
+-			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
++			cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
+ 
+ 	sec_sqe->type2.mac_key_alg |=
+ 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
+@@ -1639,12 +1625,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
+ 	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
+ 
+ 	sqe3->auth_mac_key |=
+-			cpu_to_le32((u32)(authsize /
+-			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
++			cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
+ 
+ 	sqe3->auth_mac_key |=
+-			cpu_to_le32((u32)(ctx->a_key_len /
+-			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
++			cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
+ 
+ 	sqe3->auth_mac_key |=
+ 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
+@@ -2003,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
+ 	return sec_aead_ctx_init(tfm, "sha512");
+ }
+ 
+-static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
+-	struct sec_req *sreq)
++static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ {
+ 	u32 cryptlen = sreq->c_req.sk_req->cryptlen;
+ 	struct device *dev = ctx->dev;
+@@ -2026,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
+ 		}
+ 		break;
+ 	case SEC_CMODE_CTR:
+-		if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
+-			dev_err(dev, "skcipher HW version error!\n");
+-			ret = -EINVAL;
+-		}
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+@@ -2038,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
+ 	return ret;
+ }
+ 
+-static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
++static int sec_skcipher_param_check(struct sec_ctx *ctx,
++				    struct sec_req *sreq, bool *need_fallback)
+ {
+ 	struct skcipher_request *sk_req = sreq->c_req.sk_req;
+ 	struct device *dev = ctx->dev;
+ 	u8 c_alg = ctx->c_ctx.c_alg;
+ 
+-	if (unlikely(!sk_req->src || !sk_req->dst ||
+-		     sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
++	if (unlikely(!sk_req->src || !sk_req->dst)) {
+ 		dev_err(dev, "skcipher input param error!\n");
+ 		return -EINVAL;
+ 	}
++
++	if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
++		*need_fallback = true;
++
+ 	sreq->c_req.c_len = sk_req->cryptlen;
+ 
+ 	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
+@@ -2106,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
+ 	struct sec_req *req = skcipher_request_ctx(sk_req);
+ 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
++	bool need_fallback = false;
+ 	int ret;
+ 
+ 	if (!sk_req->cryptlen) {
+@@ -2119,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
+ 	req->c_req.encrypt = encrypt;
+ 	req->ctx = ctx;
+ 
+-	ret = sec_skcipher_param_check(ctx, req);
++	ret = sec_skcipher_param_check(ctx, req, &need_fallback);
+ 	if (unlikely(ret))
+ 		return -EINVAL;
+ 
+-	if (unlikely(ctx->c_ctx.fallback))
++	if (unlikely(ctx->c_ctx.fallback || need_fallback))
+ 		return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
+ 
+ 	return ctx->req_op->process(ctx, req);
+@@ -2231,52 +2215,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	size_t sz = crypto_aead_authsize(tfm);
+ 	u8 c_mode = ctx->c_ctx.c_mode;
+-	struct device *dev = ctx->dev;
+ 	int ret;
+ 
+-	/* Hardware does not handle cases where authsize is less than 4 bytes */
+-	if (unlikely(sz < MIN_MAC_LEN)) {
+-		sreq->aead_req.fallback = true;
++	if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
+ 		return -EINVAL;
+-	}
+ 
+ 	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+-	    req->assoclen > SEC_MAX_AAD_LEN)) {
+-		dev_err(dev, "aead input spec error!\n");
++		     req->assoclen > SEC_MAX_AAD_LEN))
+ 		return -EINVAL;
+-	}
+ 
+ 	if (c_mode == SEC_CMODE_CCM) {
+-		if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
+-			dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
++		if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
+ 			return -EINVAL;
+-		}
+-		ret = aead_iv_demension_check(req);
+-		if (ret) {
+-			dev_err(dev, "aead input iv param error!\n");
+-			return ret;
+-		}
+-	}
+ 
+-	if (sreq->c_req.encrypt)
+-		sreq->c_req.c_len = req->cryptlen;
+-	else
+-		sreq->c_req.c_len = req->cryptlen - sz;
+-	if (c_mode == SEC_CMODE_CBC) {
+-		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+-			dev_err(dev, "aead crypto length error!\n");
++		ret = aead_iv_demension_check(req);
++		if (unlikely(ret))
++			return -EINVAL;
++	} else if (c_mode == SEC_CMODE_CBC) {
++		if (unlikely(sz & WORD_MASK))
++			return -EINVAL;
++		if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
+ 			return -EINVAL;
+-		}
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
++static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
+ {
+ 	struct aead_request *req = sreq->aead_req.aead_req;
+-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+-	size_t authsize = crypto_aead_authsize(tfm);
+ 	struct device *dev = ctx->dev;
+ 	u8 c_alg = ctx->c_ctx.c_alg;
+ 
+@@ -2285,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (ctx->sec->qm.ver == QM_HW_V2) {
+-		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
+-			     req->cryptlen <= authsize))) {
+-			sreq->aead_req.fallback = true;
+-			return -EINVAL;
+-		}
++	if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
++		     sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
++		dev_err(dev, "aead cbc mode input data length error!\n");
++		return -EINVAL;
+ 	}
+ 
+ 	/* Support AES or SM4 */
+@@ -2299,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (unlikely(sec_aead_spec_check(ctx, sreq)))
++	if (unlikely(sec_aead_spec_check(ctx, sreq))) {
++		*need_fallback = true;
+ 		return -EINVAL;
++	}
+ 
+ 	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
+ 		SEC_PBUF_SZ)
+@@ -2344,17 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ 	struct sec_req *req = aead_request_ctx(a_req);
+ 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
++	size_t sz = crypto_aead_authsize(tfm);
++	bool need_fallback = false;
+ 	int ret;
+ 
+ 	req->flag = a_req->base.flags;
+ 	req->aead_req.aead_req = a_req;
+ 	req->c_req.encrypt = encrypt;
+ 	req->ctx = ctx;
+-	req->aead_req.fallback = false;
++	req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
+ 
+-	ret = sec_aead_param_check(ctx, req);
++	ret = sec_aead_param_check(ctx, req, &need_fallback);
+ 	if (unlikely(ret)) {
+-		if (req->aead_req.fallback)
++		if (need_fallback)
+ 			return sec_aead_soft_crypto(ctx, a_req, encrypt);
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
+index d2f07e34f3142d..e1f60f0f507c96 100644
+--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
++++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
+@@ -1527,7 +1527,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
+ 	iaa_wq = idxd_wq_get_private(wq);
+ 
+ 	if (!req->dst) {
+-		gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
++		gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+ 
+ 		/* incompressible data will always be < 2 * slen */
+ 		req->dlen = 2 * req->slen;
+@@ -1609,7 +1609,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
+ 
+ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
+ {
+-	gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
++	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ 		GFP_KERNEL : GFP_ATOMIC;
+ 	struct crypto_tfm *tfm = req->base.tfm;
+ 	dma_addr_t src_addr, dst_addr;
+diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+index 9faef33e54bd32..a17adc4beda2e3 100644
+--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+@@ -420,6 +420,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
+ 	dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
+ 	dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
+ 	dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
++	dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK;
+ 	dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
+ }
+ 
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
+index 2dd3772bf58a6c..0f7f00a19e7dc6 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
+@@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev,
+ 	if (err_mask->parerr_wat_wcp_mask)
+ 		adf_poll_slicehang_csr(accel_dev, csr,
+ 				       ADF_GEN4_SLICEHANGSTATUS_WAT_WCP,
+-				       "ath_cph");
++				       "wat_wcp");
+ 
+ 	return false;
+ }
+@@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev,
+ 	return reset_required;
+ }
+ 
+-static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
++static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
+ 				   void __iomem *csr, u32 iastatssm)
+ {
+-	struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+-	u32 reg;
+-
+ 	if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT))
+-		return false;
+-
+-	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC);
+-	reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT;
+-	if (reg) {
+-		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+-		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg);
+-	}
+-
+-	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH);
+-	reg &= err_mask->parerr_ath_cph_mask;
+-	if (reg) {
+-		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+-		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg);
+-	}
+-
+-	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT);
+-	reg &= err_mask->parerr_cpr_xlt_mask;
+-	if (reg) {
+-		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+-		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg);
+-	}
+-
+-	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS);
+-	reg &= err_mask->parerr_dcpr_ucs_mask;
+-	if (reg) {
+-		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+-		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg);
+-	}
+-
+-	reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE);
+-	reg &= err_mask->parerr_pke_mask;
+-	if (reg) {
+-		ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+-		ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg);
+-	}
+-
+-	if (err_mask->parerr_wat_wcp_mask) {
+-		reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP);
+-		reg &= err_mask->parerr_wat_wcp_mask;
+-		if (reg) {
+-			ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+-			ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP,
+-				   reg);
+-		}
+-	}
++		return;
+ 
++	ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ 	dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported");
+ 
+-	return false;
++	return;
+ }
+ 
+ static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev,
+@@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev,
+ 	reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm);
+ 	reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm);
+ 	reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm);
+-	reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
+ 	reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm);
++	adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
+ 
+ 	ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm);
+ 
+diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
+index 35f2d0d8507ed7..7e98f174f69b99 100644
+--- a/drivers/crypto/nx/nx-common-pseries.c
++++ b/drivers/crypto/nx/nx-common-pseries.c
+@@ -1144,6 +1144,7 @@ static void __init nxcop_get_capabilities(void)
+ {
+ 	struct hv_vas_all_caps *hv_caps;
+ 	struct hv_nx_cop_caps *hv_nxc;
++	u64 feat;
+ 	int rc;
+ 
+ 	hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
+@@ -1154,27 +1155,26 @@ static void __init nxcop_get_capabilities(void)
+ 	 */
+ 	rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
+ 					  (u64)virt_to_phys(hv_caps));
++	if (!rc)
++		feat = be64_to_cpu(hv_caps->feat_type);
++	kfree(hv_caps);
+ 	if (rc)
+-		goto out;
++		return;
++	if (!(feat & VAS_NX_GZIP_FEAT_BIT))
++		return;
+ 
+-	caps_feat = be64_to_cpu(hv_caps->feat_type);
+ 	/*
+ 	 * NX-GZIP feature available
+ 	 */
+-	if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
+-		hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
+-		if (!hv_nxc)
+-			goto out;
+-		/*
+-		 * Get capabilities for NX-GZIP feature
+-		 */
+-		rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
+-						  VAS_NX_GZIP_FEAT,
+-						  (u64)virt_to_phys(hv_nxc));
+-	} else {
+-		pr_err("NX-GZIP feature is not available\n");
+-		rc = -EINVAL;
+-	}
++	hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
++	if (!hv_nxc)
++		return;
++	/*
++	 * Get capabilities for NX-GZIP feature
++	 */
++	rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
++					  VAS_NX_GZIP_FEAT,
++					  (u64)virt_to_phys(hv_nxc));
+ 
+ 	if (!rc) {
+ 		nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
+@@ -1184,13 +1184,10 @@ static void __init nxcop_get_capabilities(void)
+ 				be64_to_cpu(hv_nxc->min_compress_len);
+ 		nx_cop_caps.min_decompress_len =
+ 				be64_to_cpu(hv_nxc->min_decompress_len);
+-	} else {
+-		caps_feat = 0;
++		caps_feat = feat;
+ 	}
+ 
+ 	kfree(hv_nxc);
+-out:
+-	kfree(hv_caps);
+ }
+ 
+ static const struct vio_device_id nx842_vio_driver_ids[] = {
+diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
+index 3106fd1e84b91e..0ed0515e1ed54c 100644
+--- a/drivers/crypto/tegra/tegra-se-aes.c
++++ b/drivers/crypto/tegra/tegra-se-aes.c
+@@ -282,7 +282,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
+ 
+ 	/* Prepare the command and submit for execution */
+ 	cmdlen = tegra_aes_prep_cmd(ctx, rctx);
+-	ret = tegra_se_host1x_submit(se, cmdlen);
++	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
+ 
+ 	/* Copy the result */
+ 	tegra_aes_update_iv(req, ctx);
+@@ -443,6 +443,9 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
+ 	if (!req->cryptlen)
+ 		return 0;
+ 
++	if (ctx->alg == SE_ALG_ECB)
++		req->iv = NULL;
++
+ 	rctx->encrypt = encrypt;
+ 	rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
+ 	rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
+@@ -719,7 +722,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
+ 
+ 	cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
+ 
+-	return tegra_se_host1x_submit(se, cmdlen);
++	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
+ }
+ 
+ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+@@ -736,7 +739,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
+ 
+ 	/* Prepare command and submit */
+ 	cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
+-	ret = tegra_se_host1x_submit(se, cmdlen);
++	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -759,7 +762,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
+ 
+ 	/* Prepare command and submit */
+ 	cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
+-	ret = tegra_se_host1x_submit(se, cmdlen);
++	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -891,7 +894,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
+ 	/* Prepare command and submit */
+ 	cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
+ 
+-	return tegra_se_host1x_submit(se, cmdlen);
++	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
+ }
+ 
+ static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+@@ -1098,7 +1101,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
+ 
+ 	/* Prepare command and submit */
+ 	cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
+-	ret = tegra_se_host1x_submit(se, cmdlen);
++	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1513,23 +1516,16 @@ static int tegra_cmac_do_update(struct ahash_request *req)
+ 	rctx->residue.size = nresidue;
+ 
+ 	/*
+-	 * If this is not the first 'update' call, paste the previous copied
++	 * If this is not the first task, paste the previous copied
+ 	 * intermediate results to the registers so that it gets picked up.
+-	 * This is to support the import/export functionality.
+ 	 */
+ 	if (!(rctx->task & SHA_FIRST))
+ 		tegra_cmac_paste_result(ctx->se, rctx);
+ 
+ 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
++	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
+ 
+-	ret = tegra_se_host1x_submit(se, cmdlen);
+-	/*
+-	 * If this is not the final update, copy the intermediate results
+-	 * from the registers so that it can be used in the next 'update'
+-	 * call. This is to support the import/export functionality.
+-	 */
+-	if (!(rctx->task & SHA_FINAL))
+-		tegra_cmac_copy_result(ctx->se, rctx);
++	tegra_cmac_copy_result(ctx->se, rctx);
+ 
+ 	return ret;
+ }
+@@ -1553,9 +1549,16 @@ static int tegra_cmac_do_final(struct ahash_request *req)
+ 	rctx->total_len += rctx->residue.size;
+ 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+ 
++	/*
++	 * If this is not the first task, paste the previous copied
++	 * intermediate results to the registers so that it gets picked up.
++	 */
++	if (!(rctx->task & SHA_FIRST))
++		tegra_cmac_paste_result(ctx->se, rctx);
++
+ 	/* Prepare command and submit */
+ 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+-	ret = tegra_se_host1x_submit(se, cmdlen);
++	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -1581,18 +1584,24 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ 	struct tegra_se *se = ctx->se;
+-	int ret;
++	int ret = 0;
+ 
+ 	if (rctx->task & SHA_UPDATE) {
+ 		ret = tegra_cmac_do_update(req);
++		if (ret)
++			goto out;
++
+ 		rctx->task &= ~SHA_UPDATE;
+ 	}
+ 
+ 	if (rctx->task & SHA_FINAL) {
+ 		ret = tegra_cmac_do_final(req);
++		if (ret)
++			goto out;
++
+ 		rctx->task &= ~SHA_FINAL;
+ 	}
+-
++out:
+ 	crypto_finalize_hash_request(se->engine, req, ret);
+ 
+ 	return 0;
+diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
+index 0b5cdd5676b17e..726e30c0e63ebb 100644
+--- a/drivers/crypto/tegra/tegra-se-hash.c
++++ b/drivers/crypto/tegra/tegra-se-hash.c
+@@ -300,8 +300,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
+ {
+ 	struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ 	struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
++	struct tegra_se *se = ctx->se;
+ 	unsigned int nblks, nresidue, size, ret;
+-	u32 *cpuvaddr = ctx->se->cmdbuf->addr;
++	u32 *cpuvaddr = se->cmdbuf->addr;
+ 
+ 	nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
+ 	nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
+@@ -353,11 +354,11 @@ static int tegra_sha_do_update(struct ahash_request *req)
+ 	 * This is to support the import/export functionality.
+ 	 */
+ 	if (!(rctx->task & SHA_FIRST))
+-		tegra_sha_paste_hash_result(ctx->se, rctx);
++		tegra_sha_paste_hash_result(se, rctx);
+ 
+-	size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
++	size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
+ 
+-	ret = tegra_se_host1x_submit(ctx->se, size);
++	ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
+ 
+ 	/*
+ 	 * If this is not the final update, copy the intermediate results
+@@ -365,7 +366,7 @@ static int tegra_sha_do_update(struct ahash_request *req)
+ 	 * call. This is to support the import/export functionality.
+ 	 */
+ 	if (!(rctx->task & SHA_FINAL))
+-		tegra_sha_copy_hash_result(ctx->se, rctx);
++		tegra_sha_copy_hash_result(se, rctx);
+ 
+ 	return ret;
+ }
+@@ -388,7 +389,7 @@ static int tegra_sha_do_final(struct ahash_request *req)
+ 
+ 	size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
+ 
+-	ret = tegra_se_host1x_submit(se, size);
++	ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -416,14 +417,21 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
+ 
+ 	if (rctx->task & SHA_UPDATE) {
+ 		ret = tegra_sha_do_update(req);
++		if (ret)
++			goto out;
++
+ 		rctx->task &= ~SHA_UPDATE;
+ 	}
+ 
+ 	if (rctx->task & SHA_FINAL) {
+ 		ret = tegra_sha_do_final(req);
++		if (ret)
++			goto out;
++
+ 		rctx->task &= ~SHA_FINAL;
+ 	}
+ 
++out:
+ 	crypto_finalize_hash_request(se->engine, req, ret);
+ 
+ 	return 0;
+@@ -559,13 +567,18 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			     unsigned int keylen)
+ {
+ 	struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
++	int ret;
+ 
+ 	if (aes_check_keylen(keylen))
+ 		return tegra_hmac_fallback_setkey(ctx, key, keylen);
+ 
++	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
++	if (ret)
++		return tegra_hmac_fallback_setkey(ctx, key, keylen);
++
+ 	ctx->fallback = false;
+ 
+-	return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
++	return 0;
+ }
+ 
+ static int tegra_sha_update(struct ahash_request *req)
+diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c
+index ac14678dbd30d5..276b261fb6df1f 100644
+--- a/drivers/crypto/tegra/tegra-se-key.c
++++ b/drivers/crypto/tegra/tegra-se-key.c
+@@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
+ 			    u32 keylen, u16 slot, u32 alg)
+ {
+ 	const u32 *keyval = (u32 *)key;
+-	u32 *addr = se->cmdbuf->addr, size;
++	u32 *addr = se->keybuf->addr, size;
++	int ret;
++
++	mutex_lock(&kslt_lock);
+ 
+ 	size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
++	ret = tegra_se_host1x_submit(se, se->keybuf, size);
+ 
+-	return tegra_se_host1x_submit(se, size);
++	mutex_unlock(&kslt_lock);
++
++	return ret;
+ }
+ 
+ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
+diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
+index f94c0331b148cc..55690b044e4174 100644
+--- a/drivers/crypto/tegra/tegra-se-main.c
++++ b/drivers/crypto/tegra/tegra-se-main.c
+@@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
+ 	return cmdbuf;
+ }
+ 
+-int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
++int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
+ {
+ 	struct host1x_job *job;
+ 	int ret;
+@@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
+ 	job->engine_fallback_streamid = se->stream_id;
+ 	job->engine_streamid_offset = SE_STREAM_ID;
+ 
+-	se->cmdbuf->words = size;
++	cmdbuf->words = size;
+ 
+-	host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
++	host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
+ 
+ 	ret = host1x_job_pin(job, se->dev);
+ 	if (ret) {
+@@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
+ 		goto syncpt_put;
+ 	}
+ 
++	se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
++	if (!se->keybuf) {
++		ret = -ENOMEM;
++		goto cmdbuf_put;
++	}
++
+ 	ret = se->hw->init_alg(se);
+ 	if (ret) {
+ 		dev_err(se->dev, "failed to register algorithms\n");
+-		goto cmdbuf_put;
++		goto keybuf_put;
+ 	}
+ 
+ 	return 0;
+ 
++keybuf_put:
++	tegra_se_cmdbuf_put(&se->keybuf->bo);
+ cmdbuf_put:
+ 	tegra_se_cmdbuf_put(&se->cmdbuf->bo);
+ syncpt_put:
+diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h
+index b9dd7ceb8783c9..b54aefe717a174 100644
+--- a/drivers/crypto/tegra/tegra-se.h
++++ b/drivers/crypto/tegra/tegra-se.h
+@@ -420,6 +420,7 @@ struct tegra_se {
+ 	struct host1x_client client;
+ 	struct host1x_channel *channel;
+ 	struct tegra_se_cmdbuf *cmdbuf;
++	struct tegra_se_cmdbuf *keybuf;
+ 	struct crypto_engine *engine;
+ 	struct host1x_syncpt *syncpt;
+ 	struct device *dev;
+@@ -502,7 +503,7 @@ void tegra_deinit_hash(struct tegra_se *se);
+ int tegra_key_submit(struct tegra_se *se, const u8 *key,
+ 		     u32 keylen, u32 alg, u32 *keyid);
+ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
+-int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
++int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
+ 
+ /* HOST1x OPCODES */
+ static inline u32 host1x_opcode_setpayload(unsigned int payload)
+diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
+index 70cb7fda757a94..27645606f900b8 100644
+--- a/drivers/dma/fsl-edma-main.c
++++ b/drivers/dma/fsl-edma-main.c
+@@ -303,6 +303,7 @@ fsl_edma2_irq_init(struct platform_device *pdev,
+ 
+ 		/* The last IRQ is for eDMA err */
+ 		if (i == count - 1) {
++			fsl_edma->errirq = irq;
+ 			ret = devm_request_irq(&pdev->dev, irq,
+ 						fsl_edma_err_handler,
+ 						0, "eDMA2-ERR", fsl_edma);
+@@ -322,10 +323,13 @@ static void fsl_edma_irq_exit(
+ 		struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+ {
+ 	if (fsl_edma->txirq == fsl_edma->errirq) {
+-		devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
++		if (fsl_edma->txirq >= 0)
++			devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ 	} else {
+-		devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+-		devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
++		if (fsl_edma->txirq >= 0)
++			devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
++		if (fsl_edma->errirq >= 0)
++			devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
+ 	}
+ }
+ 
+@@ -513,6 +517,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
+ 	if (!fsl_edma)
+ 		return -ENOMEM;
+ 
++	fsl_edma->errirq = -EINVAL;
++	fsl_edma->txirq = -EINVAL;
+ 	fsl_edma->drvdata = drvdata;
+ 	fsl_edma->n_chans = chans;
+ 	mutex_init(&fsl_edma->fsl_edma_mutex);
+@@ -699,9 +705,9 @@ static void fsl_edma_remove(struct platform_device *pdev)
+ 	struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
+ 
+ 	fsl_edma_irq_exit(pdev, fsl_edma);
+-	fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
+ 	of_dma_controller_free(np);
+ 	dma_async_device_unregister(&fsl_edma->dma_dev);
++	fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
+ 	fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
+ }
+ 
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index 51556c72a96746..fbdf005bed3a49 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -751,6 +751,8 @@ static int i10nm_get_ddr_munits(void)
+ 				continue;
+ 			} else {
+ 				d->imc[lmc].mdev = mdev;
++				if (res_cfg->type == SPR)
++					skx_set_mc_mapping(d, i, lmc);
+ 				lmc++;
+ 			}
+ 		}
+diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
+index 9ef13570f2e540..56be8ef40f376b 100644
+--- a/drivers/edac/ie31200_edac.c
++++ b/drivers/edac/ie31200_edac.c
+@@ -91,8 +91,6 @@
+ 	 (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) ==                 \
+ 	  PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
+ 
+-#define IE31200_DIMMS			4
+-#define IE31200_RANKS			8
+ #define IE31200_RANKS_PER_CHANNEL	4
+ #define IE31200_DIMMS_PER_CHANNEL	2
+ #define IE31200_CHANNELS		2
+@@ -164,6 +162,7 @@
+ #define IE31200_MAD_DIMM_0_OFFSET		0x5004
+ #define IE31200_MAD_DIMM_0_OFFSET_SKL		0x500C
+ #define IE31200_MAD_DIMM_SIZE			GENMASK_ULL(7, 0)
++#define IE31200_MAD_DIMM_SIZE_SKL		GENMASK_ULL(5, 0)
+ #define IE31200_MAD_DIMM_A_RANK			BIT(17)
+ #define IE31200_MAD_DIMM_A_RANK_SHIFT		17
+ #define IE31200_MAD_DIMM_A_RANK_SKL		BIT(10)
+@@ -377,7 +376,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
+ static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
+ 				     int chan)
+ {
+-	dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
++	dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE_SKL;
+ 	dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
+ 	dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
+ 				(IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+@@ -426,7 +425,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ 
+ 	nr_channels = how_many_channels(pdev);
+ 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+-	layers[0].size = IE31200_DIMMS;
++	layers[0].size = IE31200_RANKS_PER_CHANNEL;
+ 	layers[0].is_virt_csrow = true;
+ 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ 	layers[1].size = nr_channels;
+@@ -618,7 +617,7 @@ static int __init ie31200_init(void)
+ 
+ 	pci_rc = pci_register_driver(&ie31200_driver);
+ 	if (pci_rc < 0)
+-		goto fail0;
++		return pci_rc;
+ 
+ 	if (!mci_pdev) {
+ 		ie31200_registered = 0;
+@@ -629,11 +628,13 @@ static int __init ie31200_init(void)
+ 			if (mci_pdev)
+ 				break;
+ 		}
++
+ 		if (!mci_pdev) {
+ 			edac_dbg(0, "ie31200 pci_get_device fail\n");
+ 			pci_rc = -ENODEV;
+-			goto fail1;
++			goto fail0;
+ 		}
++
+ 		pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
+ 		if (pci_rc < 0) {
+ 			edac_dbg(0, "ie31200 init fail\n");
+@@ -641,12 +642,12 @@ static int __init ie31200_init(void)
+ 			goto fail1;
+ 		}
+ 	}
+-	return 0;
+ 
++	return 0;
+ fail1:
+-	pci_unregister_driver(&ie31200_driver);
+-fail0:
+ 	pci_dev_put(mci_pdev);
++fail0:
++	pci_unregister_driver(&ie31200_driver);
+ 
+ 	return pci_rc;
+ }
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index 6cf17af7d9112b..85ec3196664d30 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -120,6 +120,35 @@ void skx_adxl_put(void)
+ }
+ EXPORT_SYMBOL_GPL(skx_adxl_put);
+ 
++static void skx_init_mc_mapping(struct skx_dev *d)
++{
++	/*
++	 * By default, the BIOS presents all memory controllers within each
++	 * socket to the EDAC driver. The physical indices are the same as
++	 * the logical indices of the memory controllers enumerated by the
++	 * EDAC driver.
++	 */
++	for (int i = 0; i < NUM_IMC; i++)
++		d->mc_mapping[i] = i;
++}
++
++void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
++{
++	edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
++		 pmc, lmc);
++
++	d->mc_mapping[pmc] = lmc;
++}
++EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
++
++static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
++{
++	edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
++		 pmc, d->mc_mapping[pmc]);
++
++	return d->mc_mapping[pmc];
++}
++
+ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
+ {
+ 	struct skx_dev *d;
+@@ -187,6 +216,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
+ 		return false;
+ 	}
+ 
++	res->imc = skx_get_mc_mapping(d, res->imc);
++
+ 	for (i = 0; i < adxl_component_count; i++) {
+ 		if (adxl_values[i] == ~0x0ull)
+ 			continue;
+@@ -307,6 +338,8 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
+ 			 d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ 		list_add_tail(&d->list, &dev_edac_list);
+ 		prev = pdev;
++
++		skx_init_mc_mapping(d);
+ 	}
+ 
+ 	if (list)
+diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
+index 54bba8a62f727c..849198fd14da69 100644
+--- a/drivers/edac/skx_common.h
++++ b/drivers/edac/skx_common.h
+@@ -93,6 +93,16 @@ struct skx_dev {
+ 	struct pci_dev *uracu; /* for i10nm CPU */
+ 	struct pci_dev *pcu_cr3; /* for HBM memory detection */
+ 	u32 mcroute;
++	/*
++	 * Some server BIOS may hide certain memory controllers, and the
++	 * EDAC driver skips those hidden memory controllers. However, the
++	 * ADXL still decodes memory error address using physical memory
++	 * controller indices. The mapping table is used to convert the
++	 * physical indices (reported by ADXL) to the logical indices
++	 * (used the EDAC driver) of present memory controllers during the
++	 * error handling process.
++	 */
++	u8 mc_mapping[NUM_IMC];
+ 	struct skx_imc {
+ 		struct mem_ctl_info *mci;
+ 		struct pci_dev *mdev; /* for i10nm CPU */
+@@ -242,6 +252,7 @@ void skx_adxl_put(void);
+ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
+ void skx_set_mem_cfg(bool mem_cfg_2lm);
+ void skx_set_res_cfg(struct res_config *cfg);
++void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
+ 
+ int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
+ int skx_get_node_id(struct skx_dev *d, u8 *id);
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index bd1ea99c3b4751..ea452f19085427 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -1631,6 +1631,7 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 
+ 	cs_dsp_debugfs_save_wmfwname(dsp, file);
+ 
++	ret = 0;
+ out_fw:
+ 	cs_dsp_buf_free(&buf_list);
+ 
+@@ -2338,6 +2339,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 
+ 	cs_dsp_debugfs_save_binname(dsp, file);
+ 
++	ret = 0;
+ out_fw:
+ 	cs_dsp_buf_free(&buf_list);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 32afcf9485245e..7978d5189c37d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2633,7 +2633,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
+ 
+ 	adev->in_s4 = true;
+ 	r = amdgpu_device_suspend(drm_dev, true);
+-	adev->in_s4 = false;
+ 	if (r)
+ 		return r;
+ 
+@@ -2645,8 +2644,13 @@ static int amdgpu_pmops_freeze(struct device *dev)
+ static int amdgpu_pmops_thaw(struct device *dev)
+ {
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
++	struct amdgpu_device *adev = drm_to_adev(drm_dev);
++	int r;
+ 
+-	return amdgpu_device_resume(drm_dev, true);
++	r = amdgpu_device_resume(drm_dev, true);
++	adev->in_s4 = false;
++
++	return r;
+ }
+ 
+ static int amdgpu_pmops_poweroff(struct device *dev)
+@@ -2659,6 +2663,9 @@ static int amdgpu_pmops_poweroff(struct device *dev)
+ static int amdgpu_pmops_restore(struct device *dev)
+ {
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
++	struct amdgpu_device *adev = drm_to_adev(drm_dev);
++
++	adev->in_s4 = false;
+ 
+ 	return amdgpu_device_resume(drm_dev, true);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+index 6162582d0aa272..d5125523bfa7be 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+@@ -584,7 +584,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
+ 		fw_name = "amdgpu/umsch_mm_4_0_0.bin";
+ 		break;
+ 	default:
+-		break;
++		return -EINVAL;
+ 	}
+ 
+ 	r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index d3e8be82a1727a..84cf5fd297b7f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -1549,7 +1549,7 @@ static int gfx_v11_0_sw_init(void *handle)
+ 		adev->gfx.me.num_me = 1;
+ 		adev->gfx.me.num_pipe_per_me = 1;
+ 		adev->gfx.me.num_queue_per_pipe = 1;
+-		adev->gfx.mec.num_mec = 2;
++		adev->gfx.mec.num_mec = 1;
+ 		adev->gfx.mec.num_pipe_per_mec = 4;
+ 		adev->gfx.mec.num_queue_per_pipe = 4;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+index d3798a333d1f88..b259e217930c75 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+@@ -1332,7 +1332,7 @@ static int gfx_v12_0_sw_init(void *handle)
+ 		adev->gfx.me.num_me = 1;
+ 		adev->gfx.me.num_pipe_per_me = 1;
+ 		adev->gfx.me.num_queue_per_pipe = 1;
+-		adev->gfx.mec.num_mec = 2;
++		adev->gfx.mec.num_mec = 1;
+ 		adev->gfx.mec.num_pipe_per_mec = 2;
+ 		adev->gfx.mec.num_queue_per_pipe = 4;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 05d1ae2ef84b4e..114653a0b57013 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1269,6 +1269,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
+ 	adev->gfx.mec_fw_write_wait = false;
+ 
+ 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
++	    (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) &&
+ 	    ((adev->gfx.mec_fw_version < 0x000001a5) ||
+ 	     (adev->gfx.mec_feature_version < 46) ||
+ 	     (adev->gfx.pfp_fw_version < 0x000000b7) ||
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index dffe2a86f383ef..951b87e7e3f68e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -205,21 +205,6 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
+ 	if (!down_read_trylock(&adev->reset_domain->sem))
+ 		return -EIO;
+ 
+-	if (!pdd->proc_ctx_cpu_ptr) {
+-		r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+-				AMDGPU_MES_PROC_CTX_SIZE,
+-				&pdd->proc_ctx_bo,
+-				&pdd->proc_ctx_gpu_addr,
+-				&pdd->proc_ctx_cpu_ptr,
+-				false);
+-		if (r) {
+-			dev_err(adev->dev,
+-				"failed to allocate process context bo\n");
+-			return r;
+-		}
+-		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+-	}
+-
+ 	memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
+ 	queue_input.process_id = qpd->pqm->process->pasid;
+ 	queue_input.page_table_base_addr =  qpd->page_table_base;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 42fd7669ac7d37..ac777244ee0a18 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -361,10 +361,26 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ 	if (retval != 0)
+ 		return retval;
+ 
++	/* Register process if this is the first queue */
+ 	if (list_empty(&pdd->qpd.queues_list) &&
+ 	    list_empty(&pdd->qpd.priv_queue_list))
+ 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
+ 
++	/* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */
++	if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) {
++		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
++						     AMDGPU_MES_PROC_CTX_SIZE,
++						     &pdd->proc_ctx_bo,
++						     &pdd->proc_ctx_gpu_addr,
++						     &pdd->proc_ctx_cpu_ptr,
++						     false);
++		if (retval) {
++			dev_err(dev->adev->dev, "failed to allocate process context bo\n");
++			return retval;
++		}
++		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
++	}
++
+ 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
+ 	if (!pqn) {
+ 		retval = -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c4c6538eabae6d..260b6b8d29fd6c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3306,6 +3306,11 @@ static int dm_resume(void *handle)
+ 
+ 		return 0;
+ 	}
++
++	/* leave display off for S4 sequence */
++	if (adev->in_s4)
++		return 0;
++
+ 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ 	dc_state_release(dm_state->context);
+ 	dm_state->context = dc_state_create(dm->dc, NULL);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+index 6e2fce329d7382..d37ecfdde4f1bc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+@@ -63,6 +63,10 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+ 
+ bool should_use_dmub_lock(struct dc_link *link)
+ {
++	/* ASIC doesn't support DMUB */
++	if (!link->ctx->dmub_srv)
++		return false;
++
+ 	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ 		return true;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+index 1c10ba4dcddea4..abe51cf3aab29e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+@@ -281,10 +281,10 @@ static void CalculateDynamicMetadataParameters(
+ 		double DISPCLK,
+ 		double DCFClkDeepSleep,
+ 		double PixelClock,
+-		long HTotal,
+-		long VBlank,
+-		long DynamicMetadataTransmittedBytes,
+-		long DynamicMetadataLinesBeforeActiveRequired,
++		unsigned int HTotal,
++		unsigned int VBlank,
++		unsigned int DynamicMetadataTransmittedBytes,
++		int DynamicMetadataLinesBeforeActiveRequired,
+ 		int InterlaceEnable,
+ 		bool ProgressiveToInterlaceUnitInOPP,
+ 		double *Tsetup,
+@@ -3277,8 +3277,8 @@ static double CalculateWriteBackDelay(
+ 
+ 
+ static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK,
+-		double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes,
+-		long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
++		double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes,
++		int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
+ 		double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks)
+ {
+ 	double TotalRepeaterDelayTime = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+index 0aa4e4d343b04e..2c1316d1b6eb85 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+@@ -139,9 +139,8 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
+ 		core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
+ 		core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines;
+ 	} else {
+-			memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
++		memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
+ 		patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps);
+-
+ 		core->clean_me_up.mode_lib.ip.imall_supported = false;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+index 0d71db7be325da..0ce1766c859f5c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+@@ -459,8 +459,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
+ 	}
+ 	if (read_arg) {
+ 		smu_cmn_read_arg(smu, read_arg);
+-		dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\
+-			readval: 0x%08x\n",
++		dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",
+ 			smu_get_message_name(smu, msg), index, param, reg, *read_arg);
+ 	} else {
+ 		dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+index 41f72d458487fb..9ba2a667a1f3a1 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+@@ -2463,9 +2463,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
+ 	if (!mhdp)
+ 		return -ENOMEM;
+ 
+-	clk = devm_clk_get(dev, NULL);
++	clk = devm_clk_get_enabled(dev, NULL);
+ 	if (IS_ERR(clk)) {
+-		dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
++		dev_err(dev, "couldn't get and enable clk: %ld\n", PTR_ERR(clk));
+ 		return PTR_ERR(clk);
+ 	}
+ 
+@@ -2504,14 +2504,12 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
+ 
+ 	mhdp->info = of_device_get_match_data(dev);
+ 
+-	clk_prepare_enable(clk);
+-
+ 	pm_runtime_enable(dev);
+ 	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ 		pm_runtime_disable(dev);
+-		goto clk_disable;
++		return ret;
+ 	}
+ 
+ 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
+@@ -2590,8 +2588,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
+ runtime_put:
+ 	pm_runtime_put_sync(dev);
+ 	pm_runtime_disable(dev);
+-clk_disable:
+-	clk_disable_unprepare(mhdp->clk);
+ 
+ 	return ret;
+ }
+@@ -2632,8 +2628,6 @@ static void cdns_mhdp_remove(struct platform_device *pdev)
+ 	cancel_work_sync(&mhdp->modeset_retry_work);
+ 	flush_work(&mhdp->hpd_work);
+ 	/* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
+-
+-	clk_disable_unprepare(mhdp->clk);
+ }
+ 
+ static const struct of_device_id mhdp_ids[] = {
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index faee8e2e82a053..967aa24b7c5377 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -2042,12 +2042,13 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
+ 			continue;
+ 		}
+ 
+-		for (i = 0; i < 5; i++) {
++		for (i = 0; i < 5; i++)
+ 			if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
+-			    av[i][1] != av[i][2] || bv[i][0] != av[i][3])
++			    bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
+ 				break;
+ 
+-			DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
++		if (i == 5) {
++			DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d", retry);
+ 			return true;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 582cf4f73a74c7..95ce50ed53acf6 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -480,6 +480,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
+ 				       const char *name)
+ {
+ 	struct device *dev = pdata->dev;
++	const struct i2c_client *client = to_i2c_client(dev);
+ 	struct auxiliary_device *aux;
+ 	int ret;
+ 
+@@ -488,6 +489,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
+ 		return -ENOMEM;
+ 
+ 	aux->name = name;
++	aux->id = (client->adapter->nr << 10) | client->addr;
+ 	aux->dev.parent = dev;
+ 	aux->dev.release = ti_sn65dsi86_aux_device_release;
+ 	device_set_of_node_from_dev(&aux->dev, dev);
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index da6ff36623d30f..3e5f721d754005 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -179,13 +179,13 @@ static int
+ drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
+ {
+ 	int i;
+-	u8 unpacked_rad[16];
++	u8 unpacked_rad[16] = {};
+ 
+-	for (i = 0; i < lct; i++) {
++	for (i = 1; i < lct; i++) {
+ 		if (i % 2)
+-			unpacked_rad[i] = rad[i / 2] >> 4;
++			unpacked_rad[i] = rad[(i - 1) / 2] >> 4;
+ 		else
+-			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
++			unpacked_rad[i] = rad[(i - 1) / 2] & 0xF;
+ 	}
+ 
+ 	/* TODO: Eventually add something to printk so we can format the rad
+diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
+index 5674f5707cca83..8f6fba4217ece5 100644
+--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
+@@ -620,13 +620,16 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
+ 
+ 		mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
+ 		mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
++		goto update_config_out;
+ 	}
+-#else
++#endif
+ 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ 	mtk_crtc->config_updating = false;
+ 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+-#endif
+ 
++#if IS_REACHABLE(CONFIG_MTK_CMDQ)
++update_config_out:
++#endif
+ 	mutex_unlock(&mtk_crtc->hw_lock);
+ }
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index cad65ea851edc7..4979d49ae25a61 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -1746,7 +1746,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+ 
+ 	ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val);
+ 	if (ret < 1) {
+-		drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n");
++		dev_err(mtk_dp->dev, "Read mstm cap failed: %zd\n", ret);
+ 		return ret == 0 ? -EIO : ret;
+ 	}
+ 
+@@ -1756,7 +1756,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+ 					DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+ 					&val);
+ 		if (ret < 1) {
+-			drm_err(mtk_dp->drm_dev, "Read irq vector failed\n");
++			dev_err(mtk_dp->dev, "Read irq vector failed: %zd\n", ret);
+ 			return ret == 0 ? -EIO : ret;
+ 		}
+ 
+@@ -2039,7 +2039,7 @@ static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wa
+ 
+ 	ret = mtk_dp_parse_capabilities(mtk_dp);
+ 	if (ret) {
+-		drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
++		dev_err(mtk_dp->dev, "Can't parse capabilities: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index b9b7fd08b7d7e9..88f3dfeb4731d3 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -1108,12 +1108,12 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
+ 				     const struct mipi_dsi_msg *msg)
+ {
+ 	struct mtk_dsi *dsi = host_to_dsi(host);
+-	u32 recv_cnt, i;
++	ssize_t recv_cnt;
+ 	u8 read_data[16];
+ 	void *src_addr;
+ 	u8 irq_flag = CMD_DONE_INT_FLAG;
+ 	u32 dsi_mode;
+-	int ret;
++	int ret, i;
+ 
+ 	dsi_mode = readl(dsi->regs + DSI_MODE_CTRL);
+ 	if (dsi_mode & MODE) {
+@@ -1162,7 +1162,7 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
+ 	if (recv_cnt)
+ 		memcpy(msg->rx_buf, src_addr, recv_cnt);
+ 
+-	DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
++	DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n",
+ 		 recv_cnt, *((u8 *)(msg->tx_buf)));
+ 
+ restore_dsi_mode:
+diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+index 7687f673964ec7..1aad8e6cf52e75 100644
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+@@ -137,7 +137,7 @@ enum hdmi_aud_channel_swap_type {
+ 
+ struct hdmi_audio_param {
+ 	enum hdmi_audio_coding_type aud_codec;
+-	enum hdmi_audio_sample_size aud_sampe_size;
++	enum hdmi_audio_sample_size aud_sample_size;
+ 	enum hdmi_aud_input_type aud_input_type;
+ 	enum hdmi_aud_i2s_fmt aud_i2s_fmt;
+ 	enum hdmi_aud_mclk aud_mclk;
+@@ -173,6 +173,7 @@ struct mtk_hdmi {
+ 	unsigned int sys_offset;
+ 	void __iomem *regs;
+ 	enum hdmi_colorspace csp;
++	struct platform_device *audio_pdev;
+ 	struct hdmi_audio_param aud_param;
+ 	bool audio_enable;
+ 	bool powered;
+@@ -1074,7 +1075,7 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
+ 
+ 	hdmi->csp = HDMI_COLORSPACE_RGB;
+ 	aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+-	aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
++	aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ 	aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+ 	aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ 	aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+@@ -1572,14 +1573,14 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
+ 	switch (daifmt->fmt) {
+ 	case HDMI_I2S:
+ 		hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+-		hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
++		hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ 		hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+ 		hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ 		hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+ 		break;
+ 	case HDMI_SPDIF:
+ 		hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+-		hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
++		hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ 		hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
+ 		break;
+ 	default:
+@@ -1663,6 +1664,11 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
+ 	.no_capture_mute = 1,
+ };
+ 
++static void mtk_hdmi_unregister_audio_driver(void *data)
++{
++	platform_device_unregister(data);
++}
++
+ static int mtk_hdmi_register_audio_driver(struct device *dev)
+ {
+ 	struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+@@ -1672,13 +1678,20 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
+ 		.i2s = 1,
+ 		.data = hdmi,
+ 	};
+-	struct platform_device *pdev;
++	int ret;
+ 
+-	pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
+-					     PLATFORM_DEVID_AUTO, &codec_data,
+-					     sizeof(codec_data));
+-	if (IS_ERR(pdev))
+-		return PTR_ERR(pdev);
++	hdmi->audio_pdev = platform_device_register_data(dev,
++							 HDMI_CODEC_DRV_NAME,
++							 PLATFORM_DEVID_AUTO,
++							 &codec_data,
++							 sizeof(codec_data));
++	if (IS_ERR(hdmi->audio_pdev))
++		return PTR_ERR(hdmi->audio_pdev);
++
++	ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver,
++				       hdmi->audio_pdev);
++	if (ret)
++		return ret;
+ 
+ 	DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
+ 	return 0;
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+index 0fcae53c0b140b..159665cb6b14f9 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+@@ -1507,6 +1507,8 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ 
+ 	/* Restore the size in the hardware */
+ 	gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
++
++	a6xx_state->nr_indexed_regs = count;
+ }
+ 
+ static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index db6c57900781d9..ecd595215a6bea 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -1191,10 +1191,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ 
+ 	DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
+ 
+-	/* force a full mode set if active state changed */
+-	if (crtc_state->active_changed)
+-		crtc_state->mode_changed = true;
+-
+ 	if (cstate->num_mixers) {
+ 		rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
+ 		if (rc)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 2cf8150adf81ff..47b514c89ce667 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -718,12 +718,11 @@ static int dpu_encoder_virt_atomic_check(
+ 		crtc_state->mode_changed = true;
+ 	/*
+ 	 * Release and Allocate resources on every modeset
+-	 * Dont allocate when active is false.
+ 	 */
+ 	if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ 		dpu_rm_release(global_state, drm_enc);
+ 
+-		if (!crtc_state->active_changed || crtc_state->enable)
++		if (crtc_state->enable)
+ 			ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ 					drm_enc, crtc_state, topology);
+ 		if (!ret)
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index a98d24b7cb00b4..7459fb8c517746 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -846,7 +846,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
+ 		dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
+ }
+ 
+-static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
++static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode)
+ {
+ 	struct drm_dsc_config *dsc = msm_host->dsc;
+ 	u32 reg, reg_ctrl, reg_ctrl2;
+@@ -858,7 +858,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ 	/* first calculate dsc parameters and then program
+ 	 * compress mode registers
+ 	 */
+-	slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
++	slice_per_intf = dsc->slice_count;
+ 
+ 	total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
+ 	bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */
+@@ -991,7 +991,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+ 
+ 	if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ 		if (msm_host->dsc)
+-			dsi_update_dsc_timing(msm_host, false, mode->hdisplay);
++			dsi_update_dsc_timing(msm_host, false);
+ 
+ 		dsi_write(msm_host, REG_DSI_ACTIVE_H,
+ 			DSI_ACTIVE_H_START(ha_start) |
+@@ -1012,7 +1012,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+ 			DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
+ 	} else {		/* command mode */
+ 		if (msm_host->dsc)
+-			dsi_update_dsc_timing(msm_host, true, mode->hdisplay);
++			dsi_update_dsc_timing(msm_host, true);
+ 
+ 		/* image data and 1 byte write_memory_start cmd */
+ 		if (!msm_host->dsc)
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+index a210b7c9e5ca28..4fabb01345aa2a 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+@@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id)
+ 	int ret;
+ 
+ 	if (!IS_BONDED_DSI()) {
++		/*
++		 * Set the usecase before calling msm_dsi_host_register(), which would
++		 * already program the PLL source mux based on a default usecase.
++		 */
++		msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
++		msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
++
+ 		ret = msm_dsi_host_register(msm_dsi->host);
+ 		if (ret)
+ 			return ret;
+-
+-		msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
+-		msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ 	} else if (other_dsi) {
+ 		struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
+ 							msm_dsi : other_dsi;
+ 		struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
+ 							other_dsi : msm_dsi;
++
++		/*
++		 * PLL0 is to drive both DSI link clocks in bonded DSI mode.
++		 *
++		 * Set the usecase before calling msm_dsi_host_register(), which would
++		 * already program the PLL source mux based on a default usecase.
++		 */
++		msm_dsi_phy_set_usecase(clk_master_dsi->phy,
++					MSM_DSI_PHY_MASTER);
++		msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
++					MSM_DSI_PHY_SLAVE);
++		msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
++		msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
++
+ 		/* Register slave host first, so that slave DSI device
+ 		 * has a chance to probe, and do not block the master
+ 		 * DSI device's probe.
+@@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id)
+ 		ret = msm_dsi_host_register(master_link_dsi->host);
+ 		if (ret)
+ 			return ret;
+-
+-		/* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
+-		msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+-					MSM_DSI_PHY_MASTER);
+-		msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+-					MSM_DSI_PHY_SLAVE);
+-		msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+-		msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+index 798168180c1ab6..a2c87c84aa05b8 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+@@ -305,7 +305,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
+ 	writel(pll->phy->cphy_mode ? 0x00 : 0x10,
+ 	       base + REG_DSI_7nm_PHY_PLL_CMODE_1);
+ 	writel(config->pll_clock_inverters,
+-	       base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS);
++	       base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1);
+ }
+ 
+ static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h
+index b9049fe1e27907..63f95523b2cbb4 100644
+--- a/drivers/gpu/drm/msm/msm_dsc_helper.h
++++ b/drivers/gpu/drm/msm/msm_dsc_helper.h
+@@ -12,17 +12,6 @@
+ #include <linux/math.h>
+ #include <drm/display/drm_dsc_helper.h>
+ 
+-/**
+- * msm_dsc_get_slices_per_intf() - calculate number of slices per interface
+- * @dsc: Pointer to drm dsc config struct
+- * @intf_width: interface width in pixels
+- * Returns: Integer representing the number of slices for the given interface
+- */
+-static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width)
+-{
+-	return DIV_ROUND_UP(intf_width, dsc->slice_width);
+-}
+-
+ /**
+  * msm_dsc_get_bytes_per_line() - calculate bytes per line
+  * @dsc: Pointer to drm dsc config struct
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+index 266a087fe14c13..3c24a63b6be8c7 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+@@ -607,7 +607,7 @@ static int ili9882t_add(struct ili9882t *ili)
+ 
+ 	ili->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ 	if (IS_ERR(ili->enable_gpio)) {
+-		dev_err(dev, "cannot get reset-gpios %ld\n",
++		dev_err(dev, "cannot get enable-gpios %ld\n",
+ 			PTR_ERR(ili->enable_gpio));
+ 		return PTR_ERR(ili->enable_gpio);
+ 	}
+diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h
+index 22448abde99232..6598d96c6d2aab 100644
+--- a/drivers/gpu/drm/panthor/panthor_fw.h
++++ b/drivers/gpu/drm/panthor/panthor_fw.h
+@@ -102,9 +102,9 @@ struct panthor_fw_cs_output_iface {
+ #define CS_STATUS_BLOCKED_REASON_SB_WAIT	1
+ #define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT	2
+ #define CS_STATUS_BLOCKED_REASON_SYNC_WAIT	3
+-#define CS_STATUS_BLOCKED_REASON_DEFERRED	5
+-#define CS_STATUS_BLOCKED_REASON_RES		6
+-#define CS_STATUS_BLOCKED_REASON_FLUSH		7
++#define CS_STATUS_BLOCKED_REASON_DEFERRED	4
++#define CS_STATUS_BLOCKED_REASON_RESOURCE	5
++#define CS_STATUS_BLOCKED_REASON_FLUSH		6
+ #define CS_STATUS_BLOCKED_REASON_MASK		GENMASK(3, 0)
+ 	u32 status_blocked_reason;
+ 	u32 status_wait_sync_value_hi;
+diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
+index 84bfde31d1724a..fd1b858dcb788e 100644
+--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
++++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
+@@ -151,7 +151,6 @@ static const struct of_device_id ssd130x_of_match[] = {
+ };
+ MODULE_DEVICE_TABLE(of, ssd130x_of_match);
+ 
+-#if IS_MODULE(CONFIG_DRM_SSD130X_SPI)
+ /*
+  * The SPI core always reports a MODALIAS uevent of the form "spi:<dev>", even
+  * if the device was registered via OF. This means that the module will not be
+@@ -160,7 +159,7 @@ MODULE_DEVICE_TABLE(of, ssd130x_of_match);
+  * To workaround this issue, add a SPI device ID table. Even when this should
+  * not be needed for this driver to match the registered SPI devices.
+  */
+-static const struct spi_device_id ssd130x_spi_table[] = {
++static const struct spi_device_id ssd130x_spi_id[] = {
+ 	/* ssd130x family */
+ 	{ "sh1106",  SH1106_ID },
+ 	{ "ssd1305", SSD1305_ID },
+@@ -175,14 +174,14 @@ static const struct spi_device_id ssd130x_spi_table[] = {
+ 	{ "ssd1331", SSD1331_ID },
+ 	{ /* sentinel */ }
+ };
+-MODULE_DEVICE_TABLE(spi, ssd130x_spi_table);
+-#endif
++MODULE_DEVICE_TABLE(spi, ssd130x_spi_id);
+ 
+ static struct spi_driver ssd130x_spi_driver = {
+ 	.driver = {
+ 		.name = DRIVER_NAME,
+ 		.of_match_table = ssd130x_of_match,
+ 	},
++	.id_table = ssd130x_spi_id,
+ 	.probe = ssd130x_spi_probe,
+ 	.remove = ssd130x_spi_remove,
+ 	.shutdown = ssd130x_spi_shutdown,
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index 6f51bcf774e27c..06f5057690bd87 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -880,7 +880,7 @@ static int ssd132x_update_rect(struct ssd130x_device *ssd130x,
+ 			u8 n1 = buf[i * width + j];
+ 			u8 n2 = buf[i * width + j + 1];
+ 
+-			data_array[array_idx++] = (n2 << 4) | n1;
++			data_array[array_idx++] = (n2 & 0xf0) | (n1 >> 4);
+ 		}
+ 	}
+ 
+@@ -1037,7 +1037,7 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
+ 				struct drm_format_conv_state *fmtcnv_state)
+ {
+ 	struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
+-	unsigned int dst_pitch = drm_rect_width(rect);
++	unsigned int dst_pitch;
+ 	struct iosys_map dst;
+ 	int ret = 0;
+ 
+@@ -1046,6 +1046,8 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
+ 	rect->x2 = min_t(unsigned int, round_up(rect->x2, SSD132X_SEGMENT_WIDTH),
+ 			 ssd130x->width);
+ 
++	dst_pitch = drm_rect_width(rect);
++
+ 	ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
+index 0c1a713b7b7b3b..be642ee739c4fb 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.c
++++ b/drivers/gpu/drm/vkms/vkms_drv.c
+@@ -245,17 +245,19 @@ static int __init vkms_init(void)
+ 	if (!config)
+ 		return -ENOMEM;
+ 
+-	default_config = config;
+-
+ 	config->cursor = enable_cursor;
+ 	config->writeback = enable_writeback;
+ 	config->overlay = enable_overlay;
+ 
+ 	ret = vkms_create(config);
+-	if (ret)
++	if (ret) {
+ 		kfree(config);
++		return ret;
++	}
+ 
+-	return ret;
++	default_config = config;
++
++	return 0;
+ }
+ 
+ static void vkms_destroy(struct vkms_config *config)
+@@ -279,9 +281,10 @@ static void vkms_destroy(struct vkms_config *config)
+ 
+ static void __exit vkms_exit(void)
+ {
+-	if (default_config->dev)
+-		vkms_destroy(default_config);
++	if (!default_config)
++		return;
+ 
++	vkms_destroy(default_config);
+ 	kfree(default_config);
+ }
+ 
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+index f5781939de9c35..a25b22238e3d2f 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+@@ -231,6 +231,8 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
++	dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
++
+ 	/* Try the reserved memory. Proceed if there's none. */
+ 	of_reserved_mem_device_init(&pdev->dev);
+ 
+diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
+index 473ac3f2d38219..da31f1131afcab 100644
+--- a/drivers/greybus/gb-beagleplay.c
++++ b/drivers/greybus/gb-beagleplay.c
+@@ -912,7 +912,9 @@ static enum fw_upload_err cc1352_prepare(struct fw_upload *fw_upload,
+ 		cc1352_bootloader_reset(bg);
+ 		WRITE_ONCE(bg->flashing_mode, false);
+ 		msleep(200);
+-		gb_greybus_init(bg);
++		if (gb_greybus_init(bg) < 0)
++			return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_RW_ERROR,
++					     "Failed to initialize greybus");
+ 		gb_beagleplay_start_svc(bg);
+ 		return FW_UPLOAD_ERR_FW_INVALID;
+ 	}
+diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
+index 496dab54c73a82..f2900ee2ef8582 100644
+--- a/drivers/hid/Makefile
++++ b/drivers/hid/Makefile
+@@ -165,7 +165,6 @@ obj-$(CONFIG_USB_KBD)		+= usbhid/
+ obj-$(CONFIG_I2C_HID_CORE)	+= i2c-hid/
+ 
+ obj-$(CONFIG_INTEL_ISH_HID)	+= intel-ish-hid/
+-obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER)	+= intel-ish-hid/
+ 
+ obj-$(CONFIG_AMD_SFH_HID)       += amd-sfh-hid/
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 4e87380d3edd6b..bcca89ef73606d 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -284,7 +284,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid,
+ 			     ihid->rawbuf, recv_len + sizeof(__le16));
+ 	if (error) {
+ 		dev_err(&ihid->client->dev,
+-			"failed to set a report to device: %d\n", error);
++			"failed to get a report from device: %d\n", error);
+ 		return error;
+ 	}
+ 
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index fa3351351825b7..79bc67ffb9986f 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -273,8 +273,8 @@ static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = {
+ static const u16 NCT6776_REG_TOLERANCE_H[] = {
+ 	0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c };
+ 
+-static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 };
+-static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
++static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 };
++static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 };
+ 
+ static const u16 NCT6776_REG_FAN_MIN[] = {
+ 	0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
+diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
+index bfea880d6dfbf1..d8ad64ea81f119 100644
+--- a/drivers/hwtracing/coresight/coresight-catu.c
++++ b/drivers/hwtracing/coresight/coresight-catu.c
+@@ -269,7 +269,7 @@ catu_init_sg_table(struct device *catu_dev, int node,
+ 	 * Each table can address upto 1MB and we can have
+ 	 * CATU_PAGES_PER_SYSPAGE tables in a system page.
+ 	 */
+-	nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
++	nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M);
+ 	catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
+ 					size >> PAGE_SHIFT, pages);
+ 	if (IS_ERR(catu_table))
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index ea38ecf26fcbfb..c42aa9fddab9b7 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -1017,18 +1017,20 @@ static void coresight_remove_conns(struct coresight_device *csdev)
+ }
+ 
+ /**
+- * coresight_timeout - loop until a bit has changed to a specific register
+- *			state.
++ * coresight_timeout_action - loop until a bit has changed to a specific register
++ *                  state, with a callback after every trial.
+  * @csa: coresight device access for the device
+  * @offset: Offset of the register from the base of the device.
+  * @position: the position of the bit of interest.
+  * @value: the value the bit should have.
++ * @cb: Call back after each trial.
+  *
+  * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
+  * TIMEOUT_US has elapsed, which ever happens first.
+  */
+-int coresight_timeout(struct csdev_access *csa, u32 offset,
+-		      int position, int value)
++int coresight_timeout_action(struct csdev_access *csa, u32 offset,
++		      int position, int value,
++			  coresight_timeout_cb_t cb)
+ {
+ 	int i;
+ 	u32 val;
+@@ -1044,7 +1046,8 @@ int coresight_timeout(struct csdev_access *csa, u32 offset,
+ 			if (!(val & BIT(position)))
+ 				return 0;
+ 		}
+-
++		if (cb)
++			cb(csa, offset, position, value);
+ 		/*
+ 		 * Delay is arbitrary - the specification doesn't say how long
+ 		 * we are expected to wait.  Extra check required to make sure
+@@ -1056,6 +1059,13 @@ int coresight_timeout(struct csdev_access *csa, u32 offset,
+ 
+ 	return -EAGAIN;
+ }
++EXPORT_SYMBOL_GPL(coresight_timeout_action);
++
++int coresight_timeout(struct csdev_access *csa, u32 offset,
++		      int position, int value)
++{
++	return coresight_timeout_action(csa, offset, position, value, NULL);
++}
+ EXPORT_SYMBOL_GPL(coresight_timeout);
+ 
+ u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index 66d44a404ad0cd..be8b46f26ddc83 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -399,6 +399,29 @@ static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
+ }
+ #endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
+ 
++static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val)
++{
++	if (!csa->io_mem)
++		isb();
++}
++
++/*
++ * etm4x_wait_status: Poll for TRCSTATR.<pos> == <val>. While using system
++ * instruction to access the trace unit, each access must be separated by a
++ * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of
++ * register updates", for system instructions section, in "Notes":
++ *
++ *   "In particular, whenever disabling or enabling the trace unit, a poll of
++ *    TRCSTATR needs explicit synchronization between each read of TRCSTATR"
++ */
++static int etm4x_wait_status(struct csdev_access *csa, int pos, int val)
++{
++	if (!csa->io_mem)
++		return coresight_timeout_action(csa, TRCSTATR, pos, val,
++						etm4x_sys_ins_barrier);
++	return coresight_timeout(csa, TRCSTATR, pos, val);
++}
++
+ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ {
+ 	int i, rc;
+@@ -430,7 +453,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ 		isb();
+ 
+ 	/* wait for TRCSTATR.IDLE to go up */
+-	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
++	if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1))
+ 		dev_err(etm_dev,
+ 			"timeout while waiting for Idle Trace Status\n");
+ 	if (drvdata->nr_pe)
+@@ -523,7 +546,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ 		isb();
+ 
+ 	/* wait for TRCSTATR.IDLE to go back down to '0' */
+-	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
++	if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0))
+ 		dev_err(etm_dev,
+ 			"timeout while waiting for Idle Trace Status\n");
+ 
+@@ -906,10 +929,25 @@ static void etm4_disable_hw(void *info)
+ 	tsb_csync();
+ 	etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
+ 
++	/*
++	 * As recommended by section 4.3.7 ("Synchronization when using system
++	 * instructions to progrom the trace unit") of ARM IHI 0064H.b, the
++	 * self-hosted trace analyzer must perform a Context synchronization
++	 * event between writing to the TRCPRGCTLR and reading the TRCSTATR.
++	 */
++	if (!csa->io_mem)
++		isb();
++
+ 	/* wait for TRCSTATR.PMSTABLE to go to '1' */
+-	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
++	if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1))
+ 		dev_err(etm_dev,
+ 			"timeout while waiting for PM stable Trace Status\n");
++	/*
++	 * As recommended by section 4.3.7 (Synchronization of register updates)
++	 * of ARM IHI 0064H.b.
++	 */
++	isb();
++
+ 	/* read the status of the single shot comparators */
+ 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ 		config->ss_status[i] =
+@@ -1711,7 +1749,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	etm4_os_lock(drvdata);
+ 
+ 	/* wait for TRCSTATR.PMSTABLE to go up */
+-	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
++	if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) {
+ 		dev_err(etm_dev,
+ 			"timeout while waiting for PM Stable Status\n");
+ 		etm4_os_unlock(drvdata);
+@@ -1802,7 +1840,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 		state->trcpdcr = etm4x_read32(csa, TRCPDCR);
+ 
+ 	/* wait for TRCSTATR.IDLE to go up */
+-	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
++	if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) {
+ 		dev_err(etm_dev,
+ 			"timeout while waiting for Idle Trace Status\n");
+ 		etm4_os_unlock(drvdata);
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 565af3759813bd..87f98fa8afd582 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -990,7 +990,7 @@ static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
+ 
+ 	/* Create the IBIRULES register for both cases */
+ 	i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
+-		if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
++		if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
+ 			continue;
+ 
+ 		if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
+diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
+index 62e6369e22696c..de207526babee2 100644
+--- a/drivers/iio/accel/mma8452.c
++++ b/drivers/iio/accel/mma8452.c
+@@ -711,7 +711,7 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
+ 			     int val, int val2, long mask)
+ {
+ 	struct mma8452_data *data = iio_priv(indio_dev);
+-	int i, ret;
++	int i, j, ret;
+ 
+ 	ret = iio_device_claim_direct_mode(indio_dev);
+ 	if (ret)
+@@ -771,14 +771,18 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
+ 		break;
+ 
+ 	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+-		ret = mma8452_get_odr_index(data);
++		j = mma8452_get_odr_index(data);
+ 
+ 		for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
+-			if (mma8452_os_ratio[i][ret] == val) {
++			if (mma8452_os_ratio[i][j] == val) {
+ 				ret = mma8452_set_power_mode(data, i);
+ 				break;
+ 			}
+ 		}
++		if (i == ARRAY_SIZE(mma8452_os_ratio)) {
++			ret = -EINVAL;
++			break;
++		}
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
+index 57025354c7cd58..f484be27058d98 100644
+--- a/drivers/iio/accel/msa311.c
++++ b/drivers/iio/accel/msa311.c
+@@ -593,23 +593,25 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev,
+ 	__le16 axis;
+ 	int err;
+ 
+-	err = pm_runtime_resume_and_get(dev);
++	err = iio_device_claim_direct_mode(indio_dev);
+ 	if (err)
+ 		return err;
+ 
+-	err = iio_device_claim_direct_mode(indio_dev);
+-	if (err)
++	err = pm_runtime_resume_and_get(dev);
++	if (err) {
++		iio_device_release_direct_mode(indio_dev);
+ 		return err;
++	}
+ 
+ 	mutex_lock(&msa311->lock);
+ 	err = msa311_get_axis(msa311, chan, &axis);
+ 	mutex_unlock(&msa311->lock);
+ 
+-	iio_device_release_direct_mode(indio_dev);
+-
+ 	pm_runtime_mark_last_busy(dev);
+ 	pm_runtime_put_autosuspend(dev);
+ 
++	iio_device_release_direct_mode(indio_dev);
++
+ 	if (err) {
+ 		dev_err(dev, "can't get axis %s (%pe)\n",
+ 			chan->datasheet_name, ERR_PTR(err));
+@@ -755,10 +757,6 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
+ 	unsigned int odr;
+ 	int err;
+ 
+-	err = pm_runtime_resume_and_get(dev);
+-	if (err)
+-		return err;
+-
+ 	/*
+ 	 * Sampling frequency changing is prohibited when buffer mode is
+ 	 * enabled, because sometimes MSA311 chip returns outliers during
+@@ -768,6 +766,12 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
+ 	if (err)
+ 		return err;
+ 
++	err = pm_runtime_resume_and_get(dev);
++	if (err) {
++		iio_device_release_direct_mode(indio_dev);
++		return err;
++	}
++
+ 	err = -EINVAL;
+ 	for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++)
+ 		if (val == msa311_odr_table[odr].integral &&
+@@ -778,11 +782,11 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
+ 			break;
+ 		}
+ 
+-	iio_device_release_direct_mode(indio_dev);
+-
+ 	pm_runtime_mark_last_busy(dev);
+ 	pm_runtime_put_autosuspend(dev);
+ 
++	iio_device_release_direct_mode(indio_dev);
++
+ 	if (err)
+ 		dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err));
+ 
+diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
+index de32cc9d18c5ef..712f95f53c9ecd 100644
+--- a/drivers/iio/adc/ad4130.c
++++ b/drivers/iio/adc/ad4130.c
+@@ -223,6 +223,10 @@ enum ad4130_pin_function {
+ 	AD4130_PIN_FN_VBIAS = BIT(3),
+ };
+ 
++/*
++ * If you make adaptations in this struct, you most likely also have to adapt
++ * ad4130_setup_info_eq(), too.
++ */
+ struct ad4130_setup_info {
+ 	unsigned int			iout0_val;
+ 	unsigned int			iout1_val;
+@@ -591,6 +595,40 @@ static irqreturn_t ad4130_irq_handler(int irq, void *private)
+ 	return IRQ_HANDLED;
+ }
+ 
++static bool ad4130_setup_info_eq(struct ad4130_setup_info *a,
++				 struct ad4130_setup_info *b)
++{
++	/*
++	 * This is just to make sure that the comparison is adapted after
++	 * struct ad4130_setup_info was changed.
++	 */
++	static_assert(sizeof(*a) ==
++		      sizeof(struct {
++				     unsigned int iout0_val;
++				     unsigned int iout1_val;
++				     unsigned int burnout;
++				     unsigned int pga;
++				     unsigned int fs;
++				     u32 ref_sel;
++				     enum ad4130_filter_mode filter_mode;
++				     bool ref_bufp;
++				     bool ref_bufm;
++			     }));
++
++	if (a->iout0_val != b->iout0_val ||
++	    a->iout1_val != b->iout1_val ||
++	    a->burnout != b->burnout ||
++	    a->pga != b->pga ||
++	    a->fs != b->fs ||
++	    a->ref_sel != b->ref_sel ||
++	    a->filter_mode != b->filter_mode ||
++	    a->ref_bufp != b->ref_bufp ||
++	    a->ref_bufm != b->ref_bufm)
++		return false;
++
++	return true;
++}
++
+ static int ad4130_find_slot(struct ad4130_state *st,
+ 			    struct ad4130_setup_info *target_setup_info,
+ 			    unsigned int *slot, bool *overwrite)
+@@ -604,8 +642,7 @@ static int ad4130_find_slot(struct ad4130_state *st,
+ 		struct ad4130_slot_info *slot_info = &st->slots_info[i];
+ 
+ 		/* Immediately accept a matching setup info. */
+-		if (!memcmp(target_setup_info, &slot_info->setup,
+-			    sizeof(*target_setup_info))) {
++		if (ad4130_setup_info_eq(target_setup_info, &slot_info->setup)) {
+ 			*slot = i;
+ 			return 0;
+ 		}
+diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
+index 8d94bc2b1cac35..30a7392c4f8b95 100644
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -147,7 +147,11 @@ struct ad7124_chip_info {
+ struct ad7124_channel_config {
+ 	bool live;
+ 	unsigned int cfg_slot;
+-	/* Following fields are used to compare equality. */
++	/*
++	 * Following fields are used to compare for equality. If you
++	 * make adaptations in it, you most likely also have to adapt
++	 * ad7124_find_similar_live_cfg(), too.
++	 */
+ 	struct_group(config_props,
+ 		enum ad7124_ref_sel refsel;
+ 		bool bipolar;
+@@ -334,15 +338,38 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
+ 								  struct ad7124_channel_config *cfg)
+ {
+ 	struct ad7124_channel_config *cfg_aux;
+-	ptrdiff_t cmp_size;
+ 	int i;
+ 
+-	cmp_size = sizeof_field(struct ad7124_channel_config, config_props);
++	/*
++	 * This is just to make sure that the comparison is adapted after
++	 * struct ad7124_channel_config was changed.
++	 */
++	static_assert(sizeof_field(struct ad7124_channel_config, config_props) ==
++		      sizeof(struct {
++				     enum ad7124_ref_sel refsel;
++				     bool bipolar;
++				     bool buf_positive;
++				     bool buf_negative;
++				     unsigned int vref_mv;
++				     unsigned int pga_bits;
++				     unsigned int odr;
++				     unsigned int odr_sel_bits;
++				     unsigned int filter_type;
++			     }));
++
+ 	for (i = 0; i < st->num_channels; i++) {
+ 		cfg_aux = &st->channels[i].cfg;
+ 
+ 		if (cfg_aux->live &&
+-		    !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
++		    cfg->refsel == cfg_aux->refsel &&
++		    cfg->bipolar == cfg_aux->bipolar &&
++		    cfg->buf_positive == cfg_aux->buf_positive &&
++		    cfg->buf_negative == cfg_aux->buf_negative &&
++		    cfg->vref_mv == cfg_aux->vref_mv &&
++		    cfg->pga_bits == cfg_aux->pga_bits &&
++		    cfg->odr == cfg_aux->odr &&
++		    cfg->odr_sel_bits == cfg_aux->odr_sel_bits &&
++		    cfg->filter_type == cfg_aux->filter_type)
+ 			return cfg_aux;
+ 	}
+ 
+diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
+index 5a65be00dd190f..2eebc6f761a632 100644
+--- a/drivers/iio/adc/ad7173.c
++++ b/drivers/iio/adc/ad7173.c
+@@ -181,7 +181,11 @@ struct ad7173_channel_config {
+ 	u8 cfg_slot;
+ 	bool live;
+ 
+-	/* Following fields are used to compare equality. */
++	/*
++	 * Following fields are used to compare equality. If you
++	 * make adaptations in it, you most likely also have to adapt
++	 * ad7173_find_live_config(), too.
++	 */
+ 	struct_group(config_props,
+ 		bool bipolar;
+ 		bool input_buf;
+@@ -582,15 +586,28 @@ static struct ad7173_channel_config *
+ ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
+ {
+ 	struct ad7173_channel_config *cfg_aux;
+-	ptrdiff_t cmp_size;
+ 	int i;
+ 
+-	cmp_size = sizeof_field(struct ad7173_channel_config, config_props);
++	/*
++	 * This is just to make sure that the comparison is adapted after
++	 * struct ad7173_channel_config was changed.
++	 */
++	static_assert(sizeof_field(struct ad7173_channel_config, config_props) ==
++		      sizeof(struct {
++				     bool bipolar;
++				     bool input_buf;
++				     u8 odr;
++				     u8 ref_sel;
++			     }));
++
+ 	for (i = 0; i < st->num_channels; i++) {
+ 		cfg_aux = &st->channels[i].cfg;
+ 
+ 		if (cfg_aux->live &&
+-		    !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
++		    cfg->bipolar == cfg_aux->bipolar &&
++		    cfg->input_buf == cfg_aux->input_buf &&
++		    cfg->odr == cfg_aux->odr &&
++		    cfg->ref_sel == cfg_aux->ref_sel)
+ 			return cfg_aux;
+ 	}
+ 	return NULL;
+diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
+index 113703fb724544..6f8816483f1a02 100644
+--- a/drivers/iio/adc/ad7768-1.c
++++ b/drivers/iio/adc/ad7768-1.c
+@@ -574,6 +574,21 @@ static int ad7768_probe(struct spi_device *spi)
+ 		return -ENOMEM;
+ 
+ 	st = iio_priv(indio_dev);
++	/*
++	 * Datasheet recommends SDI line to be kept high when data is not being
++	 * clocked out of the controller and the spi clock is free running,
++	 * to prevent accidental reset.
++	 * Since many controllers do not support the SPI_MOSI_IDLE_HIGH flag
++	 * yet, only request the MOSI idle state to enable if the controller
++	 * supports it.
++	 */
++	if (spi->controller->mode_bits & SPI_MOSI_IDLE_HIGH) {
++		spi->mode |= SPI_MOSI_IDLE_HIGH;
++		ret = spi_setup(spi);
++		if (ret < 0)
++			return ret;
++	}
++
+ 	st->spi = spi;
+ 
+ 	st->vref = devm_regulator_get(&spi->dev, "vref");
+diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
+index fb34a8e4d04e74..42e0ee683ef6b2 100644
+--- a/drivers/iio/industrialio-backend.c
++++ b/drivers/iio/industrialio-backend.c
+@@ -155,10 +155,12 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file,
+ 	ssize_t rc;
+ 	int ret;
+ 
+-	rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count);
++	rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count);
+ 	if (rc < 0)
+ 		return rc;
+ 
++	buf[count] = '\0';
++
+ 	ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
+ 
+ 	switch (ret) {
+diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c
+index 05d4c0e9015d6e..859891e8f11521 100644
+--- a/drivers/iio/light/veml6075.c
++++ b/drivers/iio/light/veml6075.c
+@@ -195,13 +195,17 @@ static int veml6075_read_uv_direct(struct veml6075_data *data, int chan,
+ 
+ static int veml6075_read_int_time_index(struct veml6075_data *data)
+ {
+-	int ret, conf;
++	int ret, conf, int_index;
+ 
+ 	ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return FIELD_GET(VEML6075_CONF_IT, conf);
++	int_index = FIELD_GET(VEML6075_CONF_IT, conf);
++	if (int_index >= ARRAY_SIZE(veml6075_it_ms))
++		return -EINVAL;
++
++	return int_index;
+ }
+ 
+ static int veml6075_read_int_time_ms(struct veml6075_data *data, int *val)
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index e029401b56805f..46102f179955ba 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -544,6 +544,8 @@ static struct class ib_class = {
+ static void rdma_init_coredev(struct ib_core_device *coredev,
+ 			      struct ib_device *dev, struct net *net)
+ {
++	bool is_full_dev = &dev->coredev == coredev;
++
+ 	/* This BUILD_BUG_ON is intended to catch layout change
+ 	 * of union of ib_core_device and device.
+ 	 * dev must be the first element as ib_core and providers
+@@ -555,6 +557,13 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
+ 
+ 	coredev->dev.class = &ib_class;
+ 	coredev->dev.groups = dev->groups;
++
++	/*
++	 * Don't expose hw counters outside of the init namespace.
++	 */
++	if (!is_full_dev && dev->hw_stats_attr_index)
++		coredev->dev.groups[dev->hw_stats_attr_index] = NULL;
++
+ 	device_initialize(&coredev->dev);
+ 	coredev->owner = dev;
+ 	INIT_LIST_HEAD(&coredev->port_list);
+@@ -1357,9 +1366,11 @@ static void ib_device_notify_register(struct ib_device *device)
+ 	u32 port;
+ 	int ret;
+ 
++	down_read(&devices_rwsem);
++
+ 	ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
+ 	if (ret)
+-		return;
++		goto out;
+ 
+ 	rdma_for_each_port(device, port) {
+ 		netdev = ib_device_get_netdev(device, port);
+@@ -1370,8 +1381,11 @@ static void ib_device_notify_register(struct ib_device *device)
+ 					   RDMA_NETDEV_ATTACH_EVENT);
+ 		dev_put(netdev);
+ 		if (ret)
+-			return;
++			goto out;
+ 	}
++
++out:
++	up_read(&devices_rwsem);
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 1fd54d5c4dd8b7..73f3a0b9a54b5f 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -2671,11 +2671,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
+ 				    struct ib_mad_private *mad)
+ {
+ 	unsigned long flags;
+-	int post, ret;
+ 	struct ib_mad_private *mad_priv;
+ 	struct ib_sge sg_list;
+ 	struct ib_recv_wr recv_wr;
+ 	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
++	int ret = 0;
+ 
+ 	/* Initialize common scatter list fields */
+ 	sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
+@@ -2685,7 +2685,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
+ 	recv_wr.sg_list = &sg_list;
+ 	recv_wr.num_sge = 1;
+ 
+-	do {
++	while (true) {
+ 		/* Allocate and map receive buffer */
+ 		if (mad) {
+ 			mad_priv = mad;
+@@ -2693,10 +2693,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
+ 		} else {
+ 			mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
+ 						     GFP_ATOMIC);
+-			if (!mad_priv) {
+-				ret = -ENOMEM;
+-				break;
+-			}
++			if (!mad_priv)
++				return -ENOMEM;
+ 		}
+ 		sg_list.length = mad_priv_dma_size(mad_priv);
+ 		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
+@@ -2705,37 +2703,41 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
+ 						 DMA_FROM_DEVICE);
+ 		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
+ 						  sg_list.addr))) {
+-			kfree(mad_priv);
+ 			ret = -ENOMEM;
+-			break;
++			goto free_mad_priv;
+ 		}
+ 		mad_priv->header.mapping = sg_list.addr;
+ 		mad_priv->header.mad_list.mad_queue = recv_queue;
+ 		mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
+ 		recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
+-
+-		/* Post receive WR */
+ 		spin_lock_irqsave(&recv_queue->lock, flags);
+-		post = (++recv_queue->count < recv_queue->max_active);
+-		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
++		if (recv_queue->count >= recv_queue->max_active) {
++			/* Fully populated the receive queue */
++			spin_unlock_irqrestore(&recv_queue->lock, flags);
++			break;
++		}
++		recv_queue->count++;
++		list_add_tail(&mad_priv->header.mad_list.list,
++			      &recv_queue->list);
+ 		spin_unlock_irqrestore(&recv_queue->lock, flags);
++
+ 		ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
+ 		if (ret) {
+ 			spin_lock_irqsave(&recv_queue->lock, flags);
+ 			list_del(&mad_priv->header.mad_list.list);
+ 			recv_queue->count--;
+ 			spin_unlock_irqrestore(&recv_queue->lock, flags);
+-			ib_dma_unmap_single(qp_info->port_priv->device,
+-					    mad_priv->header.mapping,
+-					    mad_priv_dma_size(mad_priv),
+-					    DMA_FROM_DEVICE);
+-			kfree(mad_priv);
+ 			dev_err(&qp_info->port_priv->device->dev,
+ 				"ib_post_recv failed: %d\n", ret);
+ 			break;
+ 		}
+-	} while (post);
++	}
+ 
++	ib_dma_unmap_single(qp_info->port_priv->device,
++			    mad_priv->header.mapping,
++			    mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE);
++free_mad_priv:
++	kfree(mad_priv);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index 9f97bef0214975..210092b9bf17d2 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -988,6 +988,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
+ 	for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++)
+ 		if (!ibdev->groups[i]) {
+ 			ibdev->groups[i] = &data->group;
++			ibdev->hw_stats_attr_index = i;
+ 			return 0;
+ 		}
+ 	WARN(true, "struct ib_device->groups is too small");
+diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
+index 771059a8eb7d7f..e349e8d2fb50a8 100644
+--- a/drivers/infiniband/hw/erdma/erdma_cm.c
++++ b/drivers/infiniband/hw/erdma/erdma_cm.c
+@@ -705,7 +705,6 @@ static void erdma_accept_newconn(struct erdma_cep *cep)
+ 		erdma_cancel_mpatimer(new_cep);
+ 
+ 		erdma_cep_put(new_cep);
+-		new_cep->sock = NULL;
+ 	}
+ 
+ 	if (new_s) {
+diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
+index 457cea6d990958..f6bf289041bfe3 100644
+--- a/drivers/infiniband/hw/mana/main.c
++++ b/drivers/infiniband/hw/mana/main.c
+@@ -358,7 +358,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem
+ 	unsigned int tail = 0;
+ 	u64 *page_addr_list;
+ 	void *request_buf;
+-	int err;
++	int err = 0;
+ 
+ 	gc = mdev_to_gc(dev);
+ 	hwc = gc->hwc.driver_data;
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index 4c54dc57806901..1aa5311b03e9f5 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -490,7 +490,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
+ 	}
+ 
+ 	qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
+-	if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
++	if (!*cur_qp || (qpn != (*cur_qp)->trans_qp.base.mqp.qpn)) {
+ 		/* We do not have to take the QP table lock here,
+ 		 * because CQs will be locked while QPs are removed
+ 		 * from the table.
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 753faa9ad06a88..068eac3bdb50ba 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -56,7 +56,7 @@ static void
+ create_mkey_callback(int status, struct mlx5_async_work *context);
+ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ 				     u64 iova, int access_flags,
+-				     unsigned int page_size, bool populate,
++				     unsigned long page_size, bool populate,
+ 				     int access_mode);
+ static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
+ 
+@@ -919,6 +919,25 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
+ 	return ERR_PTR(ret);
+ }
+ 
++static void mlx5r_destroy_cache_entries(struct mlx5_ib_dev *dev)
++{
++	struct rb_root *root = &dev->cache.rb_root;
++	struct mlx5_cache_ent *ent;
++	struct rb_node *node;
++
++	mutex_lock(&dev->cache.rb_lock);
++	node = rb_first(root);
++	while (node) {
++		ent = rb_entry(node, struct mlx5_cache_ent, node);
++		node = rb_next(node);
++		clean_keys(dev, ent);
++		rb_erase(&ent->node, root);
++		mlx5r_mkeys_uninit(ent);
++		kfree(ent);
++	}
++	mutex_unlock(&dev->cache.rb_lock);
++}
++
+ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
+ {
+ 	struct mlx5_mkey_cache *cache = &dev->cache;
+@@ -970,6 +989,8 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
+ err:
+ 	mutex_unlock(&cache->rb_lock);
+ 	mlx5_mkey_cache_debugfs_cleanup(dev);
++	mlx5r_destroy_cache_entries(dev);
++	destroy_workqueue(cache->wq);
+ 	mlx5_ib_warn(dev, "failed to create mkey cache entry\n");
+ 	return ret;
+ }
+@@ -1003,17 +1024,7 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
+ 	mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
+ 
+ 	/* At this point all entries are disabled and have no concurrent work. */
+-	mutex_lock(&dev->cache.rb_lock);
+-	node = rb_first(root);
+-	while (node) {
+-		ent = rb_entry(node, struct mlx5_cache_ent, node);
+-		node = rb_next(node);
+-		clean_keys(dev, ent);
+-		rb_erase(&ent->node, root);
+-		mlx5r_mkeys_uninit(ent);
+-		kfree(ent);
+-	}
+-	mutex_unlock(&dev->cache.rb_lock);
++	mlx5r_destroy_cache_entries(dev);
+ 
+ 	destroy_workqueue(dev->cache.wq);
+ 	del_timer_sync(&dev->delay_timer);
+@@ -1115,7 +1126,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
+ 	struct mlx5r_cache_rb_key rb_key = {};
+ 	struct mlx5_cache_ent *ent;
+ 	struct mlx5_ib_mr *mr;
+-	unsigned int page_size;
++	unsigned long page_size;
+ 
+ 	if (umem->is_dmabuf)
+ 		page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
+@@ -1219,7 +1230,7 @@ reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_f
+  */
+ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ 				     u64 iova, int access_flags,
+-				     unsigned int page_size, bool populate,
++				     unsigned long page_size, bool populate,
+ 				     int access_mode)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
+@@ -1425,7 +1436,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
+ 		mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
+ 					MLX5_MKC_ACCESS_MODE_MTT);
+ 	} else {
+-		unsigned int page_size =
++		unsigned long page_size =
+ 			mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
+ 
+ 		mutex_lock(&dev->slow_path_mutex);
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index b4e2a6f9cb9c3d..e158d5b1ab17b1 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -309,9 +309,6 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ 				blk_start_idx = idx;
+ 				in_block = 1;
+ 			}
+-
+-			/* Count page invalidations */
+-			invalidations += idx - blk_start_idx + 1;
+ 		} else {
+ 			u64 umr_offset = idx & umr_block_mask;
+ 
+@@ -321,14 +318,19 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ 						     MLX5_IB_UPD_XLT_ZAP |
+ 						     MLX5_IB_UPD_XLT_ATOMIC);
+ 				in_block = 0;
++				/* Count page invalidations */
++				invalidations += idx - blk_start_idx + 1;
+ 			}
+ 		}
+ 	}
+-	if (in_block)
++	if (in_block) {
+ 		mlx5r_umr_update_xlt(mr, blk_start_idx,
+ 				     idx - blk_start_idx + 1, 0,
+ 				     MLX5_IB_UPD_XLT_ZAP |
+ 				     MLX5_IB_UPD_XLT_ATOMIC);
++		/* Count page invalidations */
++		invalidations += idx - blk_start_idx + 1;
++	}
+ 
+ 	mlx5_update_odp_stats(mr, invalidations, invalidations);
+ 
+diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
+index 001c290bc07b7d..cda0995b167988 100644
+--- a/drivers/leds/led-core.c
++++ b/drivers/leds/led-core.c
+@@ -159,8 +159,19 @@ static void set_brightness_delayed(struct work_struct *ws)
+ 	 * before this work item runs once. To make sure this works properly
+ 	 * handle LED_SET_BRIGHTNESS_OFF first.
+ 	 */
+-	if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags))
++	if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) {
+ 		set_brightness_delayed_set_brightness(led_cdev, LED_OFF);
++		/*
++		 * The consecutives led_set_brightness(LED_OFF),
++		 * led_set_brightness(LED_FULL) could have been executed out of
++		 * order (LED_FULL first), if the work_flags has been set
++		 * between LED_SET_BRIGHTNESS_OFF and LED_SET_BRIGHTNESS of this
++		 * work. To avoid ending with the LED turned off, turn the LED
++		 * on again.
++		 */
++		if (led_cdev->delayed_set_value != LED_OFF)
++			set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
++	}
+ 
+ 	if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags))
+ 		set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value);
+@@ -331,10 +342,13 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value)
+ 	 * change is done immediately afterwards (before the work runs),
+ 	 * it uses a separate work_flag.
+ 	 */
+-	if (value) {
+-		led_cdev->delayed_set_value = value;
++	led_cdev->delayed_set_value = value;
++	/* Ensure delayed_set_value is seen before work_flags modification */
++	smp_mb__before_atomic();
++
++	if (value)
+ 		set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
+-	} else {
++	else {
+ 		clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
+ 		clear_bit(LED_SET_BLINK, &led_cdev->work_flags);
+ 		set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags);
+diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
+index 2f5165918163df..cfe59c3255f706 100644
+--- a/drivers/media/dvb-frontends/dib8000.c
++++ b/drivers/media/dvb-frontends/dib8000.c
+@@ -2701,8 +2701,11 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz)
+ 	u8 ratio;
+ 
+ 	if (state->revision == 0x8090) {
++		u32 internal = dib8000_read32(state, 23) / 1000;
++
+ 		ratio = 4;
+-		unit_khz_dds_val = (1<<26) / (dib8000_read32(state, 23) / 1000);
++
++		unit_khz_dds_val = (1<<26) / (internal ?: 1);
+ 		if (offset_khz < 0)
+ 			dds = (1 << 26) - (abs_offset_khz * unit_khz_dds_val);
+ 		else
+diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
+index 88c36eb6174ad6..9ca4e2f94647b0 100644
+--- a/drivers/media/platform/allegro-dvt/allegro-core.c
++++ b/drivers/media/platform/allegro-dvt/allegro-core.c
+@@ -3914,6 +3914,7 @@ static int allegro_probe(struct platform_device *pdev)
+ 	if (ret < 0) {
+ 		v4l2_err(&dev->v4l2_dev,
+ 			 "failed to request firmware: %d\n", ret);
++		v4l2_device_unregister(&dev->v4l2_dev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
+index 91101ba88ef01f..b2210841a320f4 100644
+--- a/drivers/media/platform/ti/omap3isp/isp.c
++++ b/drivers/media/platform/ti/omap3isp/isp.c
+@@ -1961,6 +1961,13 @@ static int isp_attach_iommu(struct isp_device *isp)
+ 	struct dma_iommu_mapping *mapping;
+ 	int ret;
+ 
++	/* We always want to replace any default mapping from the arch code */
++	mapping = to_dma_iommu_mapping(isp->dev);
++	if (mapping) {
++		arm_iommu_detach_device(isp->dev);
++		arm_iommu_release_mapping(mapping);
++	}
++
+ 	/*
+ 	 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
+ 	 * VAs. This will allocate a corresponding IOMMU domain.
+diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
+index 85a44143b3786b..0e212198dd65b1 100644
+--- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
++++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
+@@ -518,6 +518,7 @@ static void set_buffers(struct hantro_ctx *ctx)
+ 	hantro_reg_write(vpu, &g2_stream_len, src_len);
+ 	hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len);
+ 	hantro_reg_write(vpu, &g2_strm_start_offset, 0);
++	hantro_reg_write(vpu, &g2_start_bit, 0);
+ 	hantro_reg_write(vpu, &g2_write_mvs_e, 1);
+ 
+ 	hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma);
+diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
+index 9b209e687f256d..2ce62fe5d60f5a 100644
+--- a/drivers/media/rc/streamzap.c
++++ b/drivers/media/rc/streamzap.c
+@@ -385,8 +385,8 @@ static void streamzap_disconnect(struct usb_interface *interface)
+ 	if (!sz)
+ 		return;
+ 
+-	rc_unregister_device(sz->rdev);
+ 	usb_kill_urb(sz->urb_in);
++	rc_unregister_device(sz->rdev);
+ 	usb_free_urb(sz->urb_in);
+ 	usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in);
+ 
+diff --git a/drivers/media/test-drivers/vimc/vimc-streamer.c b/drivers/media/test-drivers/vimc/vimc-streamer.c
+index 807551a5143b78..15d863f97cbf96 100644
+--- a/drivers/media/test-drivers/vimc/vimc-streamer.c
++++ b/drivers/media/test-drivers/vimc/vimc-streamer.c
+@@ -59,6 +59,12 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
+ 			continue;
+ 
+ 		sd = media_entity_to_v4l2_subdev(ved->ent);
++		/*
++		 * Do not call .s_stream() to stop an already
++		 * stopped/unstarted subdev.
++		 */
++		if (!v4l2_subdev_is_streaming(sd))
++			continue;
+ 		v4l2_subdev_call(sd, video, s_stream, 0);
+ 	}
+ }
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index c8a0d82f9c27df..719225c09a4d60 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -2245,26 +2245,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
+ 		goto err;
+ 	}
+ 
+-	if (of_node_name_eq(child, "nand")) {
+-		/* Warn about older DT blobs with no compatible property */
+-		if (!of_property_read_bool(child, "compatible")) {
+-			dev_warn(&pdev->dev,
+-				 "Incompatible NAND node: missing compatible");
+-			ret = -EINVAL;
+-			goto err;
+-		}
+-	}
+-
+-	if (of_node_name_eq(child, "onenand")) {
+-		/* Warn about older DT blobs with no compatible property */
+-		if (!of_property_read_bool(child, "compatible")) {
+-			dev_warn(&pdev->dev,
+-				 "Incompatible OneNAND node: missing compatible");
+-			ret = -EINVAL;
+-			goto err;
+-		}
+-	}
+-
+ 	if (of_match_node(omap_nand_ids, child)) {
+ 		/* NAND specific setup */
+ 		val = 8;
+diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
+index b3592982a83b55..5b6dc1cb9bfc36 100644
+--- a/drivers/mfd/sm501.c
++++ b/drivers/mfd/sm501.c
+@@ -920,7 +920,7 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ {
+ 	struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
+ 	struct sm501_gpio *smgpio = smchip->ourgpio;
+-	unsigned long bit = 1 << offset;
++	unsigned long bit = BIT(offset);
+ 	void __iomem *regs = smchip->regbase;
+ 	unsigned long save;
+ 	unsigned long val;
+@@ -946,7 +946,7 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
+ 	struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
+ 	struct sm501_gpio *smgpio = smchip->ourgpio;
+ 	void __iomem *regs = smchip->regbase;
+-	unsigned long bit = 1 << offset;
++	unsigned long bit = BIT(offset);
+ 	unsigned long save;
+ 	unsigned long ddr;
+ 
+@@ -971,7 +971,7 @@ static int sm501_gpio_output(struct gpio_chip *chip,
+ {
+ 	struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
+ 	struct sm501_gpio *smgpio = smchip->ourgpio;
+-	unsigned long bit = 1 << offset;
++	unsigned long bit = BIT(offset);
+ 	void __iomem *regs = smchip->regbase;
+ 	unsigned long save;
+ 	unsigned long val;
+diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
+index 335350a4e99aba..ee0940d96febf6 100644
+--- a/drivers/mmc/host/omap.c
++++ b/drivers/mmc/host/omap.c
+@@ -1272,19 +1272,25 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+ 	/* Check for some optional GPIO controls */
+ 	slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
+ 						  id, GPIOD_OUT_LOW);
+-	if (IS_ERR(slot->vsd))
+-		return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
++	if (IS_ERR(slot->vsd)) {
++		r = dev_err_probe(host->dev, PTR_ERR(slot->vsd),
+ 				     "error looking up VSD GPIO\n");
++		goto err_free_host;
++	}
+ 	slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
+ 						  id, GPIOD_OUT_LOW);
+-	if (IS_ERR(slot->vio))
+-		return dev_err_probe(host->dev, PTR_ERR(slot->vio),
++	if (IS_ERR(slot->vio)) {
++		r = dev_err_probe(host->dev, PTR_ERR(slot->vio),
+ 				     "error looking up VIO GPIO\n");
++		goto err_free_host;
++	}
+ 	slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
+ 						    id, GPIOD_IN);
+-	if (IS_ERR(slot->cover))
+-		return dev_err_probe(host->dev, PTR_ERR(slot->cover),
++	if (IS_ERR(slot->cover)) {
++		r = dev_err_probe(host->dev, PTR_ERR(slot->cover),
+ 				     "error looking up cover switch GPIO\n");
++		goto err_free_host;
++	}
+ 
+ 	host->slots[id] = slot;
+ 
+@@ -1344,6 +1350,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+ 		device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
+ err_remove_host:
+ 	mmc_remove_host(mmc);
++err_free_host:
+ 	mmc_free_host(mmc);
+ 	return r;
+ }
+diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
+index 5841a9afeb9f50..ea4a801c9ace5c 100644
+--- a/drivers/mmc/host/sdhci-omap.c
++++ b/drivers/mmc/host/sdhci-omap.c
+@@ -1339,8 +1339,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
+ 	/* R1B responses is required to properly manage HW busy detection. */
+ 	mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+ 
+-	/* Allow card power off and runtime PM for eMMC/SD card devices */
+-	mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM;
++	/*  Enable SDIO card power off. */
++	mmc->caps |= MMC_CAP_POWER_OFF_CARD;
+ 
+ 	ret = sdhci_setup_host(host);
+ 	if (ret)
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 3af43ac0582552..376fd927ae7386 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -399,6 +399,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ 	if (!IS_ERR(pxa->clk_core))
+ 		clk_prepare_enable(pxa->clk_core);
+ 
++	host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+ 	/* enable 1/8V DDR capable */
+ 	host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ 
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index c5e571ec94c990..0472bcdff13072 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -251,18 +251,33 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 			card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+ 							GFP_KERNEL, "arc%d-%d-tx",
+ 							dev->dev_id, i);
++			if (!card->tx_led.default_trigger) {
++				ret = -ENOMEM;
++				goto err_free_arcdev;
++			}
+ 			card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ 							"pci:green:tx:%d-%d",
+ 							dev->dev_id, i);
+-
++			if (!card->tx_led.name) {
++				ret = -ENOMEM;
++				goto err_free_arcdev;
++			}
+ 			card->tx_led.dev = &dev->dev;
+ 			card->recon_led.brightness_set = led_recon_set;
+ 			card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+ 							GFP_KERNEL, "arc%d-%d-recon",
+ 							dev->dev_id, i);
++			if (!card->recon_led.default_trigger) {
++				ret = -ENOMEM;
++				goto err_free_arcdev;
++			}
+ 			card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ 							"pci:red:recon:%d-%d",
+ 							dev->dev_id, i);
++			if (!card->recon_led.name) {
++				ret = -ENOMEM;
++				goto err_free_arcdev;
++			}
+ 			card->recon_led.dev = &dev->dev;
+ 
+ 			ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 5aeecfab96306c..5935100e7d65f8 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -7301,13 +7301,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+ 	err = mv88e6xxx_switch_reset(chip);
+ 	mv88e6xxx_reg_unlock(chip);
+ 	if (err)
+-		goto out;
++		goto out_phy;
+ 
+ 	if (np) {
+ 		chip->irq = of_irq_get(np, 0);
+ 		if (chip->irq == -EPROBE_DEFER) {
+ 			err = chip->irq;
+-			goto out;
++			goto out_phy;
+ 		}
+ 	}
+ 
+@@ -7326,7 +7326,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+ 	if (err)
+-		goto out;
++		goto out_phy;
+ 
+ 	if (chip->info->g2_irqs > 0) {
+ 		err = mv88e6xxx_g2_irq_setup(chip);
+@@ -7360,6 +7360,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+ 		mv88e6xxx_g1_irq_free(chip);
+ 	else
+ 		mv88e6xxx_irq_poll_free(chip);
++out_phy:
++	mv88e6xxx_phy_destroy(chip);
+ out:
+ 	if (pdata)
+ 		dev_put(pdata->netdev);
+@@ -7382,7 +7384,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+ 		mv88e6xxx_ptp_free(chip);
+ 	}
+ 
+-	mv88e6xxx_phy_destroy(chip);
+ 	mv88e6xxx_unregister_switch(chip);
+ 
+ 	mv88e6xxx_g1_vtu_prob_irq_free(chip);
+@@ -7395,6 +7396,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+ 		mv88e6xxx_g1_irq_free(chip);
+ 	else
+ 		mv88e6xxx_irq_poll_free(chip);
++
++	mv88e6xxx_phy_destroy(chip);
+ }
+ 
+ static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
+diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
+index 8bb88b3d900db3..ee9e5d7e527709 100644
+--- a/drivers/net/dsa/mv88e6xxx/phy.c
++++ b/drivers/net/dsa/mv88e6xxx/phy.c
+@@ -229,7 +229,10 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
+ 
+ static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
+ {
++	mutex_lock(&chip->ppu_mutex);
+ 	del_timer_sync(&chip->ppu_timer);
++	cancel_work_sync(&chip->ppu_work);
++	mutex_unlock(&chip->ppu_mutex);
+ }
+ 
+ int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
+index 10687722d14c08..d6eb6713e5f6ba 100644
+--- a/drivers/net/dsa/realtek/Kconfig
++++ b/drivers/net/dsa/realtek/Kconfig
+@@ -44,7 +44,7 @@ config NET_DSA_REALTEK_RTL8366RB
+ 	  Select to enable support for Realtek RTL8366RB.
+ 
+ config NET_DSA_REALTEK_RTL8366RB_LEDS
+-	bool "Support RTL8366RB LED control"
++	bool
+ 	depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
+ 	depends on NET_DSA_REALTEK_RTL8366RB
+ 	default NET_DSA_REALTEK_RTL8366RB
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index b619a3ec245b24..04192190bebabb 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1802,18 +1802,22 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
+ 	long value = simple_strtol(buf, NULL, 10);
+ 	long rc;
+ 
++	rtnl_lock();
++
+ 	if (attr == &veth_active_attr) {
+ 		if (value && !pool->active) {
+ 			if (netif_running(netdev)) {
+ 				if (ibmveth_alloc_buffer_pool(pool)) {
+ 					netdev_err(netdev,
+ 						   "unable to alloc pool\n");
+-					return -ENOMEM;
++					rc = -ENOMEM;
++					goto unlock_err;
+ 				}
+ 				pool->active = 1;
+ 				ibmveth_close(netdev);
+-				if ((rc = ibmveth_open(netdev)))
+-					return rc;
++				rc = ibmveth_open(netdev);
++				if (rc)
++					goto unlock_err;
+ 			} else {
+ 				pool->active = 1;
+ 			}
+@@ -1833,48 +1837,59 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
+ 
+ 			if (i == IBMVETH_NUM_BUFF_POOLS) {
+ 				netdev_err(netdev, "no active pool >= MTU\n");
+-				return -EPERM;
++				rc = -EPERM;
++				goto unlock_err;
+ 			}
+ 
+ 			if (netif_running(netdev)) {
+ 				ibmveth_close(netdev);
+ 				pool->active = 0;
+-				if ((rc = ibmveth_open(netdev)))
+-					return rc;
++				rc = ibmveth_open(netdev);
++				if (rc)
++					goto unlock_err;
+ 			}
+ 			pool->active = 0;
+ 		}
+ 	} else if (attr == &veth_num_attr) {
+ 		if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
+-			return -EINVAL;
++			rc = -EINVAL;
++			goto unlock_err;
+ 		} else {
+ 			if (netif_running(netdev)) {
+ 				ibmveth_close(netdev);
+ 				pool->size = value;
+-				if ((rc = ibmveth_open(netdev)))
+-					return rc;
++				rc = ibmveth_open(netdev);
++				if (rc)
++					goto unlock_err;
+ 			} else {
+ 				pool->size = value;
+ 			}
+ 		}
+ 	} else if (attr == &veth_size_attr) {
+ 		if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
+-			return -EINVAL;
++			rc = -EINVAL;
++			goto unlock_err;
+ 		} else {
+ 			if (netif_running(netdev)) {
+ 				ibmveth_close(netdev);
+ 				pool->buff_size = value;
+-				if ((rc = ibmveth_open(netdev)))
+-					return rc;
++				rc = ibmveth_open(netdev);
++				if (rc)
++					goto unlock_err;
+ 			} else {
+ 				pool->buff_size = value;
+ 			}
+ 		}
+ 	}
++	rtnl_unlock();
+ 
+ 	/* kick the interrupt handler to allocate/deallocate pools */
+ 	ibmveth_interrupt(netdev->irq, netdev);
+ 	return count;
++
++unlock_err:
++	rtnl_unlock();
++	return rc;
+ }
+ 
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
+index 5e2cfa73f8891c..8294a7c4f122c3 100644
+--- a/drivers/net/ethernet/intel/e1000e/defines.h
++++ b/drivers/net/ethernet/intel/e1000e/defines.h
+@@ -803,4 +803,7 @@
+ /* SerDes Control */
+ #define E1000_GEN_POLL_TIMEOUT          640
+ 
++#define E1000_FEXTNVM12_PHYPD_CTRL_MASK	0x00C00000
++#define E1000_FEXTNVM12_PHYPD_CTRL_P1	0x00800000
++
+ #endif /* _E1000_DEFINES_H_ */
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index 2f9655cf5dd9ee..364378133526a1 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -285,6 +285,45 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+ 	}
+ }
+ 
++/**
++ * e1000_reconfigure_k1_exit_timeout - reconfigure K1 exit timeout to
++ * align to MTP and later platform requirements.
++ * @hw: pointer to the HW structure
++ *
++ * Context: PHY semaphore must be held by caller.
++ * Return: 0 on success, negative on failure
++ */
++static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw)
++{
++	u16 phy_timeout;
++	u32 fextnvm12;
++	s32 ret_val;
++
++	if (hw->mac.type < e1000_pch_mtp)
++		return 0;
++
++	/* Change Kumeran K1 power down state from P0s to P1 */
++	fextnvm12 = er32(FEXTNVM12);
++	fextnvm12 &= ~E1000_FEXTNVM12_PHYPD_CTRL_MASK;
++	fextnvm12 |= E1000_FEXTNVM12_PHYPD_CTRL_P1;
++	ew32(FEXTNVM12, fextnvm12);
++
++	/* Wait for the interface the settle */
++	usleep_range(1000, 1100);
++
++	/* Change K1 exit timeout */
++	ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG,
++				  &phy_timeout);
++	if (ret_val)
++		return ret_val;
++
++	phy_timeout &= ~I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK;
++	phy_timeout |= 0xF00;
++
++	return e1e_wphy_locked(hw, I217_PHY_TIMEOUTS_REG,
++				  phy_timeout);
++}
++
+ /**
+  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
+  *  @hw: pointer to the HW structure
+@@ -327,15 +366,22 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+ 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
+ 	 */
+ 	switch (hw->mac.type) {
++	case e1000_pch_mtp:
++	case e1000_pch_lnp:
++	case e1000_pch_ptp:
++	case e1000_pch_nvp:
++		/* At this point the PHY might be inaccessible so don't
++		 * propagate the failure
++		 */
++		if (e1000_reconfigure_k1_exit_timeout(hw))
++			e_dbg("Failed to reconfigure K1 exit timeout\n");
++
++		fallthrough;
+ 	case e1000_pch_lpt:
+ 	case e1000_pch_spt:
+ 	case e1000_pch_cnp:
+ 	case e1000_pch_tgp:
+ 	case e1000_pch_adp:
+-	case e1000_pch_mtp:
+-	case e1000_pch_lnp:
+-	case e1000_pch_ptp:
+-	case e1000_pch_nvp:
+ 		if (e1000_phy_is_accessible_pchlan(hw))
+ 			break;
+ 
+@@ -419,8 +465,20 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+ 		 *  the PHY is in.
+ 		 */
+ 		ret_val = hw->phy.ops.check_reset_block(hw);
+-		if (ret_val)
++		if (ret_val) {
+ 			e_err("ME blocked access to PHY after reset\n");
++			goto out;
++		}
++
++		if (hw->mac.type >= e1000_pch_mtp) {
++			ret_val = hw->phy.ops.acquire(hw);
++			if (ret_val) {
++				e_err("Failed to reconfigure K1 exit timeout\n");
++				goto out;
++			}
++			ret_val = e1000_reconfigure_k1_exit_timeout(hw);
++			hw->phy.ops.release(hw);
++		}
+ 	}
+ 
+ out:
+@@ -4888,6 +4946,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+ 	u16 i;
+ 
+ 	e1000_initialize_hw_bits_ich8lan(hw);
++	if (hw->mac.type >= e1000_pch_mtp) {
++		ret_val = hw->phy.ops.acquire(hw);
++		if (ret_val)
++			return ret_val;
++
++		ret_val = e1000_reconfigure_k1_exit_timeout(hw);
++		hw->phy.ops.release(hw);
++		if (ret_val) {
++			e_dbg("Error failed to reconfigure K1 exit timeout\n");
++			return ret_val;
++		}
++	}
+ 
+ 	/* Initialize identification LED */
+ 	ret_val = mac->ops.id_led_init(hw);
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+index 2504b11c3169fa..5feb589a9b5ff2 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+@@ -219,6 +219,10 @@
+ #define I217_PLL_CLOCK_GATE_REG	PHY_REG(772, 28)
+ #define I217_PLL_CLOCK_GATE_MASK	0x07FF
+ 
++/* PHY Timeouts */
++#define I217_PHY_TIMEOUTS_REG                   PHY_REG(770, 21)
++#define I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK       0x0FC0
++
+ #define SW_FLAG_TIMEOUT		1000	/* SW Semaphore flag timeout in ms */
+ 
+ /* Inband Control */
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
+index dfd56fc5ff6550..7557bb6694c090 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
+@@ -87,7 +87,11 @@ static void idpf_remove(struct pci_dev *pdev)
+  */
+ static void idpf_shutdown(struct pci_dev *pdev)
+ {
+-	idpf_remove(pdev);
++	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
++
++	cancel_delayed_work_sync(&adapter->vc_event_task);
++	idpf_vc_core_deinit(adapter);
++	idpf_deinit_dflt_mbx(adapter);
+ 
+ 	if (system_state == SYSTEM_POWER_OFF)
+ 		pci_set_power_state(pdev, PCI_D3hot);
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+index f0537826f8403f..9c1fe84108ed2e 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -438,7 +438,8 @@ struct idpf_q_vector {
+ 	__cacheline_group_end_aligned(cold);
+ };
+ libeth_cacheline_set_assert(struct idpf_q_vector, 112,
+-			    424 + 2 * sizeof(struct dim),
++			    24 + sizeof(struct napi_struct) +
++			    2 * sizeof(struct dim),
+ 			    8 + sizeof(cpumask_var_t));
+ 
+ struct idpf_rx_queue_stats {
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+index 9e02e4367bec81..9bd3d76b5fe2ac 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+@@ -1108,6 +1108,9 @@ struct mvpp2 {
+ 
+ 	/* Spinlocks for CM3 shared memory configuration */
+ 	spinlock_t mss_spinlock;
++
++	/* Spinlock for shared PRS parser memory and shadow table */
++	spinlock_t prs_spinlock;
+ };
+ 
+ struct mvpp2_pcpu_stats {
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 3880dcc0418b2d..66b5a80c9c28aa 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -7640,8 +7640,9 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 	if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
+ 		priv->hw_version = MVPP23;
+ 
+-	/* Init mss lock */
++	/* Init locks for shared packet processor resources */
+ 	spin_lock_init(&priv->mss_spinlock);
++	spin_lock_init(&priv->prs_spinlock);
+ 
+ 	/* Initialize network controller */
+ 	err = mvpp2_init(pdev, priv);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+index 9af22f497a40f5..93e978bdf303c4 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+@@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+ {
+ 	int i;
+ 
++	lockdep_assert_held(&priv->prs_spinlock);
++
+ 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ 		return -EINVAL;
+ 
+@@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+ }
+ 
+ /* Initialize tcam entry from hw */
+-int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+-			   int tid)
++static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv,
++				    struct mvpp2_prs_entry *pe, int tid)
+ {
+ 	int i;
+ 
++	lockdep_assert_held(&priv->prs_spinlock);
++
+ 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ 		return -EINVAL;
+ 
+@@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ 	return 0;
+ }
+ 
++int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
++			   int tid)
++{
++	int err;
++
++	spin_lock_bh(&priv->prs_spinlock);
++	err = __mvpp2_prs_init_from_hw(priv, pe, tid);
++	spin_unlock_bh(&priv->prs_spinlock);
++
++	return err;
++}
++
+ /* Invalidate tcam hw entry */
+ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
+ {
+@@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
+ 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
+ 			continue;
+ 
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 		bits = mvpp2_prs_sram_ai_get(&pe);
+ 
+ 		/* Sram store classification lookup ID in AI bits [5:0] */
+@@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
+ 
+ 	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+ 		/* Entry exist - update port only */
+-		mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
++		__mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
+ 	} else {
+ 		/* Entry doesn't exist - create new */
+ 		memset(&pe, 0, sizeof(pe));
+@@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
+ }
+ 
+ /* Set port to unicast or multicast promiscuous mode */
+-void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+-			       enum mvpp2_prs_l2_cast l2_cast, bool add)
++static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
++					enum mvpp2_prs_l2_cast l2_cast,
++					bool add)
+ {
+ 	struct mvpp2_prs_entry pe;
+ 	unsigned char cast_match;
+ 	unsigned int ri;
+ 	int tid;
+ 
++	lockdep_assert_held(&priv->prs_spinlock);
++
+ 	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
+ 		cast_match = MVPP2_PRS_UCAST_VAL;
+ 		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
+@@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ 
+ 	/* promiscuous mode - Accept unknown unicast or multicast packets */
+ 	if (priv->prs_shadow[tid].valid) {
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 	} else {
+ 		memset(&pe, 0, sizeof(pe));
+ 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+@@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ 	mvpp2_prs_hw_write(priv, &pe);
+ }
+ 
++void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
++			       enum mvpp2_prs_l2_cast l2_cast, bool add)
++{
++	spin_lock_bh(&priv->prs_spinlock);
++	__mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add);
++	spin_unlock_bh(&priv->prs_spinlock);
++}
++
+ /* Set entry for dsa packets */
+ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
+ 				  bool tagged, bool extend)
+@@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
+ 
+ 	if (priv->prs_shadow[tid].valid) {
+ 		/* Entry exist - update port only */
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 	} else {
+ 		/* Entry doesn't exist - create new */
+ 		memset(&pe, 0, sizeof(pe));
+@@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
+ 
+ 	if (priv->prs_shadow[tid].valid) {
+ 		/* Entry exist - update port only */
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 	} else {
+ 		/* Entry doesn't exist - create new */
+ 		memset(&pe, 0, sizeof(pe));
+@@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
+ 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ 			continue;
+ 
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
+ 		if (!match)
+ 			continue;
+@@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
+ 			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ 				continue;
+ 
+-			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
++			__mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ 			ri_bits = mvpp2_prs_sram_ri_get(&pe);
+ 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
+ 			    MVPP2_PRS_RI_VLAN_DOUBLE)
+@@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
+ 
+ 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ 	} else {
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 	}
+ 	/* Update ports' mask */
+ 	mvpp2_prs_tcam_port_map_set(&pe, port_map);
+@@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
+ 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ 			continue;
+ 
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 
+ 		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
+ 			mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
+@@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
+ 			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ 				continue;
+ 
+-			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
++			__mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ 			ri_bits = mvpp2_prs_sram_ri_get(&pe);
+ 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+ 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+@@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
+ 
+ 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ 	} else {
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 	}
+ 
+ 	/* Update ports' mask */
+@@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
+ 	/* Create dummy entries for drop all and promiscuous modes */
+ 	mvpp2_prs_drop_fc(priv);
+ 	mvpp2_prs_mac_drop_all_set(priv, 0, false);
+-	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
+-	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
++	__mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
++	__mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
+ }
+ 
+ /* Set default entries for various types of dsa packets */
+@@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+ 	struct mvpp2_prs_entry pe;
+ 	int err;
+ 
+-	priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+-					      MVPP2_PRS_DBL_VLANS_MAX,
+-					      GFP_KERNEL);
+-	if (!priv->prs_double_vlans)
+-		return -ENOMEM;
+-
+ 	/* Double VLAN: 0x88A8, 0x8100 */
+ 	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
+ 					MVPP2_PRS_PORT_MASK);
+@@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
+ 		    port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+ 			continue;
+ 
+-		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ 
+ 		mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ 		mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+@@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
+ 
+ 	memset(&pe, 0, sizeof(pe));
+ 
++	spin_lock_bh(&priv->prs_spinlock);
++
+ 	/* Scan TCAM and see if entry with this <vid,port> already exist */
+ 	tid = mvpp2_prs_vid_range_find(port, vid, mask);
+ 
+@@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
+ 						MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
+ 
+ 		/* There isn't room for a new VID filter */
+-		if (tid < 0)
++		if (tid < 0) {
++			spin_unlock_bh(&priv->prs_spinlock);
+ 			return tid;
++		}
+ 
+ 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+ 		pe.index = tid;
+@@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
+ 		/* Mask all ports */
+ 		mvpp2_prs_tcam_port_map_set(&pe, 0);
+ 	} else {
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 	}
+ 
+ 	/* Enable the current port */
+@@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
+ 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ 	mvpp2_prs_hw_write(priv, &pe);
+ 
++	spin_unlock_bh(&priv->prs_spinlock);
+ 	return 0;
+ }
+ 
+@@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
+ 	struct mvpp2 *priv = port->priv;
+ 	int tid;
+ 
+-	/* Scan TCAM and see if entry with this <vid,port> already exist */
+-	tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
++	spin_lock_bh(&priv->prs_spinlock);
+ 
+-	/* No such entry */
+-	if (tid < 0)
+-		return;
++	/* Invalidate TCAM entry with this <vid,port>, if it exists */
++	tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
++	if (tid >= 0) {
++		mvpp2_prs_hw_inv(priv, tid);
++		priv->prs_shadow[tid].valid = false;
++	}
+ 
+-	mvpp2_prs_hw_inv(priv, tid);
+-	priv->prs_shadow[tid].valid = false;
++	spin_unlock_bh(&priv->prs_spinlock);
+ }
+ 
+ /* Remove all existing VID filters on this port */
+@@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
+ 	struct mvpp2 *priv = port->priv;
+ 	int tid;
+ 
++	spin_lock_bh(&priv->prs_spinlock);
++
+ 	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ 	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ 		if (priv->prs_shadow[tid].valid) {
+@@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
+ 			priv->prs_shadow[tid].valid = false;
+ 		}
+ 	}
++
++	spin_unlock_bh(&priv->prs_spinlock);
+ }
+ 
+ /* Remove VID filering entry for this port */
+@@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
+ 	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
+ 	struct mvpp2 *priv = port->priv;
+ 
++	spin_lock_bh(&priv->prs_spinlock);
++
+ 	/* Invalidate the guard entry */
+ 	mvpp2_prs_hw_inv(priv, tid);
+ 
+ 	priv->prs_shadow[tid].valid = false;
++
++	spin_unlock_bh(&priv->prs_spinlock);
+ }
+ 
+ /* Add guard entry that drops packets when no VID is matched on this port */
+@@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
+ 
+ 	memset(&pe, 0, sizeof(pe));
+ 
++	spin_lock_bh(&priv->prs_spinlock);
++
+ 	pe.index = tid;
+ 
+ 	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+@@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
+ 	/* Update shadow table */
+ 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ 	mvpp2_prs_hw_write(priv, &pe);
++
++	spin_unlock_bh(&priv->prs_spinlock);
+ }
+ 
+ /* Parser default initialization */
+@@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
+ {
+ 	int err, index, i;
+ 
++	priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
++					sizeof(*priv->prs_shadow),
++					GFP_KERNEL);
++	if (!priv->prs_shadow)
++		return -ENOMEM;
++
++	priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
++					      MVPP2_PRS_DBL_VLANS_MAX,
++					      GFP_KERNEL);
++	if (!priv->prs_double_vlans)
++		return -ENOMEM;
++
++	spin_lock_bh(&priv->prs_spinlock);
++
+ 	/* Enable tcam table */
+ 	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+ 
+@@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
+ 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
+ 		mvpp2_prs_hw_inv(priv, index);
+ 
+-	priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+-					sizeof(*priv->prs_shadow),
+-					GFP_KERNEL);
+-	if (!priv->prs_shadow)
+-		return -ENOMEM;
+-
+ 	/* Always start from lookup = 0 */
+ 	for (index = 0; index < MVPP2_MAX_PORTS; index++)
+ 		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
+@@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
+ 	mvpp2_prs_vid_init(priv);
+ 
+ 	err = mvpp2_prs_etype_init(priv);
+-	if (err)
+-		return err;
+-
+-	err = mvpp2_prs_vlan_init(pdev, priv);
+-	if (err)
+-		return err;
+-
+-	err = mvpp2_prs_pppoe_init(priv);
+-	if (err)
+-		return err;
+-
+-	err = mvpp2_prs_ip6_init(priv);
+-	if (err)
+-		return err;
+-
+-	err = mvpp2_prs_ip4_init(priv);
+-	if (err)
+-		return err;
++	err = err ? : mvpp2_prs_vlan_init(pdev, priv);
++	err = err ? : mvpp2_prs_pppoe_init(priv);
++	err = err ? : mvpp2_prs_ip6_init(priv);
++	err = err ? : mvpp2_prs_ip4_init(priv);
+ 
+-	return 0;
++	spin_unlock_bh(&priv->prs_spinlock);
++	return err;
+ }
+ 
+ /* Compare MAC DA with tcam entry data */
+@@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
+ 		    (priv->prs_shadow[tid].udf != udf_type))
+ 			continue;
+ 
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ 
+ 		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
+@@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
+ }
+ 
+ /* Update parser's mac da entry */
+-int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
++static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port,
++				     const u8 *da, bool add)
+ {
+ 	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ 	struct mvpp2 *priv = port->priv;
+@@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+ 		/* Mask all ports */
+ 		mvpp2_prs_tcam_port_map_set(&pe, 0);
+ 	} else {
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 	}
+ 
+ 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+@@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+ 	return 0;
+ }
+ 
++int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
++{
++	int err;
++
++	spin_lock_bh(&port->priv->prs_spinlock);
++	err = __mvpp2_prs_mac_da_accept(port, da, add);
++	spin_unlock_bh(&port->priv->prs_spinlock);
++
++	return err;
++}
++
+ int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
+ {
+ 	struct mvpp2_port *port = netdev_priv(dev);
+@@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
+ 	unsigned long pmap;
+ 	int index, tid;
+ 
++	spin_lock_bh(&priv->prs_spinlock);
++
+ 	for (tid = MVPP2_PE_MAC_RANGE_START;
+ 	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ 		unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+@@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
+ 		    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
+ 			continue;
+ 
+-		mvpp2_prs_init_from_hw(priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(priv, &pe, tid);
+ 
+ 		pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ 
+@@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
+ 			continue;
+ 
+ 		/* Remove entry from TCAM */
+-		mvpp2_prs_mac_da_accept(port, da, false);
++		__mvpp2_prs_mac_da_accept(port, da, false);
+ 	}
++
++	spin_unlock_bh(&priv->prs_spinlock);
+ }
+ 
+ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+ {
+ 	switch (type) {
+ 	case MVPP2_TAG_TYPE_EDSA:
++		spin_lock_bh(&priv->prs_spinlock);
+ 		/* Add port to EDSA entries */
+ 		mvpp2_prs_dsa_tag_set(priv, port, true,
+ 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+@@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+ 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ 		mvpp2_prs_dsa_tag_set(priv, port, false,
+ 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
++		spin_unlock_bh(&priv->prs_spinlock);
+ 		break;
+ 
+ 	case MVPP2_TAG_TYPE_DSA:
++		spin_lock_bh(&priv->prs_spinlock);
+ 		/* Add port to DSA entries */
+ 		mvpp2_prs_dsa_tag_set(priv, port, true,
+ 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+@@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+ 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ 		mvpp2_prs_dsa_tag_set(priv, port, false,
+ 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
++		spin_unlock_bh(&priv->prs_spinlock);
+ 		break;
+ 
+ 	case MVPP2_TAG_TYPE_MH:
+ 	case MVPP2_TAG_TYPE_NONE:
++		spin_lock_bh(&priv->prs_spinlock);
+ 		/* Remove port form EDSA and DSA entries */
+ 		mvpp2_prs_dsa_tag_set(priv, port, false,
+ 				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+@@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+ 				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ 		mvpp2_prs_dsa_tag_set(priv, port, false,
+ 				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
++		spin_unlock_bh(&priv->prs_spinlock);
+ 		break;
+ 
+ 	default:
+@@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
+ 
+ 	memset(&pe, 0, sizeof(pe));
+ 
++	spin_lock_bh(&priv->prs_spinlock);
++
+ 	tid = mvpp2_prs_tcam_first_free(priv,
+ 					MVPP2_PE_LAST_FREE_TID,
+ 					MVPP2_PE_FIRST_FREE_TID);
+-	if (tid < 0)
++	if (tid < 0) {
++		spin_unlock_bh(&priv->prs_spinlock);
+ 		return tid;
++	}
+ 
+ 	pe.index = tid;
+ 
+@@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
+ 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ 	mvpp2_prs_hw_write(priv, &pe);
+ 
++	spin_unlock_bh(&priv->prs_spinlock);
+ 	return 0;
+ }
+ 
+@@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
+ 
+ 	memset(&pe, 0, sizeof(pe));
+ 
++	spin_lock_bh(&port->priv->prs_spinlock);
++
+ 	tid = mvpp2_prs_flow_find(port->priv, port->id);
+ 
+ 	/* Such entry not exist */
+@@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
+ 		tid = mvpp2_prs_tcam_first_free(port->priv,
+ 						MVPP2_PE_LAST_FREE_TID,
+ 					       MVPP2_PE_FIRST_FREE_TID);
+-		if (tid < 0)
++		if (tid < 0) {
++			spin_unlock_bh(&port->priv->prs_spinlock);
+ 			return tid;
++		}
+ 
+ 		pe.index = tid;
+ 
+@@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
+ 		/* Update shadow table */
+ 		mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ 	} else {
+-		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
++		__mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ 	}
+ 
+ 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
+ 	mvpp2_prs_hw_write(port->priv, &pe);
+ 
++	spin_unlock_bh(&port->priv->prs_spinlock);
+ 	return 0;
+ }
+ 
+@@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index)
+ 	if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
+ 		return -EINVAL;
+ 
++	spin_lock_bh(&priv->prs_spinlock);
++
+ 	mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
+ 
+ 	val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
+ 
+ 	val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+ 
++	spin_unlock_bh(&priv->prs_spinlock);
+ 	return val;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index cd0d7b7774f1af..6575c422635b76 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -2634,7 +2634,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+ 		rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ 
+ 		rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
+-		vfs -= 64;
++		vfs = 64;
+ 	}
+ 
+ 	intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+index 7498ab429963d4..06f778baaeef2d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+@@ -207,7 +207,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu)
+ 		rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ 	}
+ 
+-	for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
++	for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
+ 		if (rvu->irq_allocated[offs + i]) {
+ 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ 			rvu->irq_allocated[offs + i] = false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+index 64b62ed17b07a7..31eb99f09c63c1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+@@ -423,7 +423,7 @@ u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
+ 				     struct mlx5e_params *params)
+ {
+ 	u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
+-			 PAGE_SIZE;
++			 MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
+ 
+ 	return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
+ }
+@@ -827,7 +827,8 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
+ 					struct mlx5e_params *params,
+ 					struct mlx5e_xsk_param *xsk)
+ {
+-	int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
++	int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
++		MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
+ 	u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ 	int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ 	u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+@@ -1036,7 +1037,8 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
+ 			    struct mlx5e_params *params,
+ 			    struct mlx5e_rq_param *rq_param)
+ {
+-	int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
++	int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
++		MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
+ 	u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
+ 	int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ 	u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index ddded162c44c13..d2a9cf3fde5ace 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -859,7 +859,7 @@ static int brcm_fet_config_init(struct phy_device *phydev)
+ 		return reg;
+ 
+ 	/* Unmask events we are interested in and mask interrupts globally. */
+-	if (phydev->phy_id == PHY_ID_BCM5221)
++	if (phydev->drv->phy_id == PHY_ID_BCM5221)
+ 		reg = MII_BRCM_FET_IR_ENABLE |
+ 		      MII_BRCM_FET_IR_MASK;
+ 	else
+@@ -888,7 +888,7 @@ static int brcm_fet_config_init(struct phy_device *phydev)
+ 		return err;
+ 	}
+ 
+-	if (phydev->phy_id != PHY_ID_BCM5221) {
++	if (phydev->drv->phy_id != PHY_ID_BCM5221) {
+ 		/* Set the LED mode */
+ 		reg = __phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4);
+ 		if (reg < 0) {
+@@ -1009,7 +1009,7 @@ static int brcm_fet_suspend(struct phy_device *phydev)
+ 		return err;
+ 	}
+ 
+-	if (phydev->phy_id == PHY_ID_BCM5221)
++	if (phydev->drv->phy_id == PHY_ID_BCM5221)
+ 		/* Force Low Power Mode with clock enabled */
+ 		reg = BCM5221_SHDW_AM4_EN_CLK_LPM | BCM5221_SHDW_AM4_FORCE_LPM;
+ 	else
+diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
+index 7b3739b29c8f72..bb0bf141587274 100644
+--- a/drivers/net/usb/rndis_host.c
++++ b/drivers/net/usb/rndis_host.c
+@@ -630,6 +630,16 @@ static const struct driver_info	zte_rndis_info = {
+ 	.tx_fixup =	rndis_tx_fixup,
+ };
+ 
++static const struct driver_info	wwan_rndis_info = {
++	.description =	"Mobile Broadband RNDIS device",
++	.flags =	FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
++	.bind =		rndis_bind,
++	.unbind =	rndis_unbind,
++	.status =	rndis_status,
++	.rx_fixup =	rndis_rx_fixup,
++	.tx_fixup =	rndis_tx_fixup,
++};
++
+ /*-------------------------------------------------------------------------*/
+ 
+ static const struct usb_device_id	products [] = {
+@@ -666,9 +676,11 @@ static const struct usb_device_id	products [] = {
+ 	USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
+ 	.driver_info = (unsigned long) &rndis_info,
+ }, {
+-	/* Novatel Verizon USB730L */
++	/* Mobile Broadband Modem, seen in Novatel Verizon USB730L and
++	 * Telit FN990A (RNDIS)
++	 */
+ 	USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
+-	.driver_info = (unsigned long) &rndis_info,
++	.driver_info = (unsigned long)&wwan_rndis_info,
+ },
+ 	{ },		// END
+ };
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index aeab2308b15008..724b93aa4f7eb3 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -530,7 +530,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ 	    netif_device_present (dev->net) &&
+ 	    test_bit(EVENT_DEV_OPEN, &dev->flags) &&
+ 	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
+-	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
++	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags) &&
++	    !usbnet_going_away(dev)) {
+ 		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
+ 		case -EPIPE:
+ 			usbnet_defer_kevent (dev, EVENT_RX_HALT);
+@@ -551,8 +552,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ 			tasklet_schedule (&dev->bh);
+ 			break;
+ 		case 0:
+-			if (!usbnet_going_away(dev))
+-				__usbnet_queue_skb(&dev->rxq, skb, rx_start);
++			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ 		}
+ 	} else {
+ 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+index 8a1e3376424487..cfdd92564060af 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+@@ -1167,6 +1167,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
+ 	struct brcmf_bus *bus_if;
+ 	struct brcmf_sdio_dev *sdiodev;
+ 	mmc_pm_flag_t sdio_flags;
++	bool cap_power_off;
+ 	int ret = 0;
+ 
+ 	func = container_of(dev, struct sdio_func, dev);
+@@ -1174,19 +1175,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
+ 	if (func->num != 1)
+ 		return 0;
+ 
++	cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
+ 
+ 	bus_if = dev_get_drvdata(dev);
+ 	sdiodev = bus_if->bus_priv.sdio;
+ 
+-	if (sdiodev->wowl_enabled) {
++	if (sdiodev->wowl_enabled || !cap_power_off) {
+ 		brcmf_sdiod_freezer_on(sdiodev);
+ 		brcmf_sdio_wd_timer(sdiodev->bus, 0);
+ 
+ 		sdio_flags = MMC_PM_KEEP_POWER;
+-		if (sdiodev->settings->bus.sdio.oob_irq_supported)
+-			enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+-		else
+-			sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
++
++		if (sdiodev->wowl_enabled) {
++			if (sdiodev->settings->bus.sdio.oob_irq_supported)
++				enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
++			else
++				sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
++		}
+ 
+ 		if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
+ 			brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
+@@ -1208,18 +1213,19 @@ static int brcmf_ops_sdio_resume(struct device *dev)
+ 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ 	struct sdio_func *func = container_of(dev, struct sdio_func, dev);
+ 	int ret = 0;
++	bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
+ 
+ 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+ 	if (func->num != 2)
+ 		return 0;
+ 
+-	if (!sdiodev->wowl_enabled) {
++	if (!sdiodev->wowl_enabled && cap_power_off) {
+ 		/* bus was powered off and device removed, probe again */
+ 		ret = brcmf_sdiod_probe(sdiodev);
+ 		if (ret)
+ 			brcmf_err("Failed to probe device on resume\n");
+ 	} else {
+-		if (sdiodev->settings->bus.sdio.oob_irq_supported)
++		if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported)
+ 			disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ 
+ 		brcmf_sdiod_freezer_off(sdiodev);
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index fb2ea38e89acab..6594216f873c47 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
+ }
+ 
+ /*
+- * alloc_sgtable - allocates scallerlist table in the given size,
+- * fills it with pages and returns it
++ * alloc_sgtable - allocates (chained) scatterlist in the given size,
++ *	fills it with pages and returns it
+  * @size: the size (in bytes) of the table
+-*/
+-static struct scatterlist *alloc_sgtable(int size)
++ */
++static struct scatterlist *alloc_sgtable(ssize_t size)
+ {
+-	int alloc_size, nents, i;
+-	struct page *new_page;
+-	struct scatterlist *iter;
+-	struct scatterlist *table;
++	struct scatterlist *result = NULL, *prev;
++	int nents, i, n_prev;
+ 
+ 	nents = DIV_ROUND_UP(size, PAGE_SIZE);
+-	table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
+-	if (!table)
+-		return NULL;
+-	sg_init_table(table, nents);
+-	iter = table;
+-	for_each_sg(table, iter, sg_nents(table), i) {
+-		new_page = alloc_page(GFP_KERNEL);
+-		if (!new_page) {
+-			/* release all previous allocated pages in the table */
+-			iter = table;
+-			for_each_sg(table, iter, sg_nents(table), i) {
+-				new_page = sg_page(iter);
+-				if (new_page)
+-					__free_page(new_page);
+-			}
+-			kfree(table);
++
++#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result))
++	/*
++	 * We need an additional entry for table chaining,
++	 * this ensures the loop can finish i.e. we can
++	 * fit at least two entries per page (obviously,
++	 * many more really fit.)
++	 */
++	BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2);
++
++	while (nents > 0) {
++		struct scatterlist *new, *iter;
++		int n_fill, n_alloc;
++
++		if (nents <= N_ENTRIES_PER_PAGE) {
++			/* last needed table */
++			n_fill = nents;
++			n_alloc = nents;
++			nents = 0;
++		} else {
++			/* fill a page with entries */
++			n_alloc = N_ENTRIES_PER_PAGE;
++			/* reserve one for chaining */
++			n_fill = n_alloc - 1;
++			nents -= n_fill;
++		}
++
++		new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL);
++		if (!new) {
++			if (result)
++				_devcd_free_sgtable(result);
+ 			return NULL;
+ 		}
+-		alloc_size = min_t(int, size, PAGE_SIZE);
+-		size -= PAGE_SIZE;
+-		sg_set_page(iter, new_page, alloc_size, 0);
++		sg_init_table(new, n_alloc);
++
++		if (!result)
++			result = new;
++		else
++			sg_chain(prev, n_prev, new);
++		prev = new;
++		n_prev = n_alloc;
++
++		for_each_sg(new, iter, n_fill, i) {
++			struct page *new_page = alloc_page(GFP_KERNEL);
++
++			if (!new_page) {
++				_devcd_free_sgtable(result);
++				return NULL;
++			}
++
++			sg_set_page(iter, new_page, PAGE_SIZE, 0);
++		}
+ 	}
+-	return table;
++
++	return result;
+ }
+ 
+ static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 65f8933c34b420..0b52d77f578375 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -995,7 +995,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
+ 	 */
+ 	u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+ 	u32 rate_n_flags = phy_data->rate_n_flags;
+-	u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
++	u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ 	u8 offs = 0;
+ 
+ 	rx_status->bw = RATE_INFO_BW_HE_RU;
+@@ -1050,13 +1050,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
+ 
+ 	if (he_mu)
+ 		he_mu->flags2 |=
+-			le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
++			le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ 						   rate_n_flags),
+ 					 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
+-	else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1)
++	else if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ 		he->data6 |=
+ 			cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
+-			le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
++			le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ 						   rate_n_flags),
+ 					 IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index e2dfd3670c4c93..6a3629f71caaa7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -805,6 +805,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ 	msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
+ 	msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ 	msta->deflink.last_txs = jiffies;
++	msta->deflink.sta = msta;
+ 
+ 	ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ 	if (ret)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index ce3d8197b026a6..c7eba60897d276 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -3117,7 +3117,6 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
+ 
+ 		.idx = idx,
+ 		.env = env_cap,
+-		.acpi_conf = mt792x_acpi_get_flags(&dev->phy),
+ 	};
+ 	int ret, valid_cnt = 0;
+ 	u8 i, *pos;
+diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c
+index ffcfc3e02c3532..a5aa96a31f4a64 100644
+--- a/drivers/ntb/hw/intel/ntb_hw_gen3.c
++++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c
+@@ -215,6 +215,9 @@ static int gen3_init_ntb(struct intel_ntb_dev *ndev)
+ 	}
+ 
+ 	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
++	/* Make sure we are not using DB's used for link status */
++	if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
++		ndev->db_valid_mask &= ~ndev->db_link_mask;
+ 
+ 	ndev->reg->db_iowrite(ndev->db_valid_mask,
+ 			      ndev->self_mmio +
+diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+index ad1786be2554b3..f851397b65d6e5 100644
+--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
++++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+@@ -288,7 +288,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+ 	if (size != 0 && xlate_pos < 12)
+ 		return -EINVAL;
+ 
+-	if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
++	if (xlate_pos >= 0 && !IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
+ 		/*
+ 		 * In certain circumstances we can get a buffer that is
+ 		 * not aligned to its size. (Most of the time
+diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
+index 72bc1d017a46ee..dfd175f79e8f08 100644
+--- a/drivers/ntb/test/ntb_perf.c
++++ b/drivers/ntb/test/ntb_perf.c
+@@ -839,10 +839,8 @@ static int perf_copy_chunk(struct perf_thread *pthr,
+ 	dma_set_unmap(tx, unmap);
+ 
+ 	ret = dma_submit_error(dmaengine_submit(tx));
+-	if (ret) {
+-		dmaengine_unmap_put(unmap);
++	if (ret)
+ 		goto err_free_resource;
+-	}
+ 
+ 	dmaengine_unmap_put(unmap);
+ 
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index e4daac9c244015..a1b3c538a4bd2e 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -141,7 +141,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
+ 		struct iov_iter iter;
+ 
+ 		/* fixedbufs is only for non-vectored io */
+-		if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
++		if (flags & NVME_IOCTL_VEC) {
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 1d3205f08af847..af45a1b865ee10 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1413,9 +1413,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+ 	struct nvme_dev *dev = nvmeq->dev;
+ 	struct request *abort_req;
+ 	struct nvme_command cmd = { };
++	struct pci_dev *pdev = to_pci_dev(dev->dev);
+ 	u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ 	u8 opcode;
+ 
++	/*
++	 * Shutdown the device immediately if we see it is disconnected. This
++	 * unblocks PCIe error handling if the nvme driver is waiting in
++	 * error_resume for a device that has been removed. We can't unbind the
++	 * driver while the driver's error callback is waiting to complete, so
++	 * we're relying on a timeout to break that deadlock if a removal
++	 * occurs while reset work is running.
++	 */
++	if (pci_dev_is_disconnected(pdev))
++		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+ 	if (nvme_state_terminal(&dev->ctrl))
+ 		goto disable;
+ 
+@@ -1423,7 +1434,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+ 	 * the recovery mechanism will surely fail.
+ 	 */
+ 	mb();
+-	if (pci_channel_offline(to_pci_dev(dev->dev)))
++	if (pci_channel_offline(pdev))
+ 		return BLK_EH_RESET_TIMER;
+ 
+ 	/*
+@@ -1984,6 +1995,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
+ 	if (offset > bar_size)
+ 		return;
+ 
++	/*
++	 * Controllers may support a CMB size larger than their BAR, for
++	 * example, due to being behind a bridge. Reduce the CMB to the
++	 * reported size of the BAR
++	 */
++	size = min(size, bar_size - offset);
++
++	if (!IS_ALIGNED(size, memremap_compat_align()) ||
++	    !IS_ALIGNED(pci_resource_start(pdev, bar),
++			memremap_compat_align()))
++		return;
++
+ 	/*
+ 	 * Tell the controller about the host side address mapping the CMB,
+ 	 * and enable CMB decoding for the NVMe 1.4+ scheme:
+@@ -1994,17 +2017,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
+ 			     dev->bar + NVME_REG_CMBMSC);
+ 	}
+ 
+-	/*
+-	 * Controllers may support a CMB size larger than their BAR,
+-	 * for example, due to being behind a bridge. Reduce the CMB to
+-	 * the reported size of the BAR
+-	 */
+-	if (size > bar_size - offset)
+-		size = bar_size - offset;
+-
+ 	if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
+ 		dev_warn(dev->ctrl.device,
+ 			 "failed to register the CMB\n");
++		hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index eeb05b7bc0fd01..854aa6a070ca87 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -2736,6 +2736,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+ {
+ 	struct nvme_tcp_queue *queue = hctx->driver_data;
+ 	struct sock *sk = queue->sock->sk;
++	int ret;
+ 
+ 	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ 		return 0;
+@@ -2743,9 +2744,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+ 	set_bit(NVME_TCP_Q_POLLING, &queue->flags);
+ 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
+ 		sk_busy_loop(sk, true);
+-	nvme_tcp_try_recv(queue);
++	ret = nvme_tcp_try_recv(queue);
+ 	clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
+-	return queue->nr_cqe;
++	return ret < 0 ? ret : queue->nr_cqe;
+ }
+ 
+ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c
+index 220c7391fc19ad..c6571fbd35e30e 100644
+--- a/drivers/nvme/target/debugfs.c
++++ b/drivers/nvme/target/debugfs.c
+@@ -78,7 +78,7 @@ static int nvmet_ctrl_state_show(struct seq_file *m, void *p)
+ 	bool sep = false;
+ 	int i;
+ 
+-	for (i = 0; i < 7; i++) {
++	for (i = 0; i < ARRAY_SIZE(csts_state_names); i++) {
+ 		int state = BIT(i);
+ 
+ 		if (!(ctrl->csts & state))
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
+index e0cc4560dfde7f..0bf4cde34f5171 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
+@@ -352,8 +352,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
+ 	spin_unlock_irqrestore(&ep->lock, flags);
+ 
+ 	offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
+-		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
+-		 CDNS_PCIE_MSG_NO_DATA;
++		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
+ 	writel(0, ep->irq_cpu_addr + offset);
+ }
+ 
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
+index f5eeff834ec192..39ee9945c903ec 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence.h
++++ b/drivers/pci/controller/cadence/pcie-cadence.h
+@@ -246,7 +246,7 @@ struct cdns_pcie_rp_ib_bar {
+ #define CDNS_PCIE_NORMAL_MSG_CODE_MASK		GENMASK(15, 8)
+ #define CDNS_PCIE_NORMAL_MSG_CODE(code) \
+ 	(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
+-#define CDNS_PCIE_MSG_NO_DATA			BIT(16)
++#define CDNS_PCIE_MSG_DATA			BIT(16)
+ 
+ struct cdns_pcie;
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index b58e89ea566b8d..dea19250598a66 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -755,6 +755,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
+ 	if (ret)
+ 		return ret;
+ 
++	ret = -ENOMEM;
+ 	if (!ep->ib_window_map) {
+ 		ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ 						       GFP_KERNEL);
+diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
+index 7a11c618b9d9c4..5538e5bf99fb68 100644
+--- a/drivers/pci/controller/dwc/pcie-histb.c
++++ b/drivers/pci/controller/dwc/pcie-histb.c
+@@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev)
+ 	ret = histb_pcie_host_enable(pp);
+ 	if (ret) {
+ 		dev_err(dev, "failed to enable host\n");
+-		return ret;
++		goto err_exit_phy;
+ 	}
+ 
+ 	ret = dw_pcie_host_init(pp);
+ 	if (ret) {
+ 		dev_err(dev, "failed to initialize host\n");
+-		return ret;
++		goto err_exit_phy;
+ 	}
+ 
+ 	return 0;
++
++err_exit_phy:
++	phy_exit(hipcie->phy);
++
++	return ret;
+ }
+ 
+ static void histb_pcie_remove(struct platform_device *pdev)
+@@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev)
+ 
+ 	histb_pcie_host_disable(hipcie);
+ 
+-	if (hipcie->phy)
+-		phy_exit(hipcie->phy);
++	phy_exit(hipcie->phy);
+ }
+ 
+ static const struct of_device_id histb_pcie_of_match[] = {
+diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
+index 9321280f6edbab..582fa110708781 100644
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -403,10 +403,10 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
+ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
+ {
+ 	u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
+-	u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
++	u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ 
+ 	lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
+-	writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
++	writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ 
+ 	lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ 	writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
+@@ -1276,6 +1276,10 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
+ 	bool ssc_good = false;
+ 	int ret, i;
+ 
++	/* Limit the generation if specified */
++	if (pcie->gen)
++		brcm_pcie_set_gen(pcie, pcie->gen);
++
+ 	/* Unassert the fundamental reset */
+ 	ret = pcie->perst_set(pcie, 0);
+ 	if (ret)
+@@ -1302,9 +1306,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
+ 
+ 	brcm_config_clkreq(pcie);
+ 
+-	if (pcie->gen)
+-		brcm_pcie_set_gen(pcie, pcie->gen);
+-
+ 	if (pcie->ssc) {
+ 		ret = brcm_pcie_set_ssc(pcie);
+ 		if (ret == 0)
+@@ -1367,7 +1368,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)
+ 
+ 		ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
+ 		if (ret) {
+-			dev_info(dev, "No regulators for downstream device\n");
++			dev_info(dev, "Did not get regulators, err=%d\n", ret);
++			pcie->sr = NULL;
+ 			goto no_regulators;
+ 		}
+ 
+@@ -1390,7 +1392,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus)
+ 	struct subdev_regulators *sr = pcie->sr;
+ 	struct device *dev = &bus->dev;
+ 
+-	if (!sr)
++	if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
+ 		return;
+ 
+ 	if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
+diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
+index a0f5e1d67b04c6..1594d9e9e637af 100644
+--- a/drivers/pci/controller/pcie-xilinx-cpm.c
++++ b/drivers/pci/controller/pcie-xilinx-cpm.c
+@@ -570,15 +570,17 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
+ 		return err;
+ 
+ 	bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+-	if (!bus)
+-		return -ENODEV;
++	if (!bus) {
++		err = -ENODEV;
++		goto err_free_irq_domains;
++	}
+ 
+ 	port->variant = of_device_get_match_data(dev);
+ 
+ 	err = xilinx_cpm_pcie_parse_dt(port, bus->res);
+ 	if (err) {
+ 		dev_err(dev, "Parsing DT failed\n");
+-		goto err_parse_dt;
++		goto err_free_irq_domains;
+ 	}
+ 
+ 	xilinx_cpm_pcie_init_port(port);
+@@ -602,7 +604,7 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
+ 	xilinx_cpm_free_interrupts(port);
+ err_setup_irq:
+ 	pci_ecam_free(port->cfg);
+-err_parse_dt:
++err_free_irq_domains:
+ 	xilinx_cpm_free_irq_domains(port);
+ 	return err;
+ }
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 736ad8baa2a555..8f3e4c7de961f6 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -842,7 +842,9 @@ void pcie_enable_interrupt(struct controller *ctrl)
+ {
+ 	u16 mask;
+ 
+-	mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
++	mask = PCI_EXP_SLTCTL_DLLSCE;
++	if (!pciehp_poll_mode)
++		mask |= PCI_EXP_SLTCTL_HPIE;
+ 	pcie_write_cmd(ctrl, mask, mask);
+ }
+ 
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 3e5a117f5b5d60..5af4a804a4f896 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -1444,7 +1444,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
+ 		return -EINVAL;
+ 
+ 	device_lock(dev);
+-	if (dev->driver) {
++	if (dev->driver || pci_num_vf(pdev)) {
+ 		ret = -EBUSY;
+ 		goto unlock;
+ 	}
+@@ -1466,7 +1466,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
+ 
+ 	pci_remove_resource_files(pdev);
+ 
+-	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
++	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
+ 		if (pci_resource_len(pdev, i) &&
+ 		    pci_resource_flags(pdev, i) == flags)
+ 			pci_release_resource(pdev, i);
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 1aa5d6f98ebda2..169aa8fd74a11f 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -955,8 +955,10 @@ struct pci_acs {
+ };
+ 
+ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+-			     const char *p, u16 mask, u16 flags)
++			     const char *p, const u16 acs_mask, const u16 acs_flags)
+ {
++	u16 flags = acs_flags;
++	u16 mask = acs_mask;
+ 	char *delimit;
+ 	int ret = 0;
+ 
+@@ -964,7 +966,7 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+ 		return;
+ 
+ 	while (*p) {
+-		if (!mask) {
++		if (!acs_mask) {
+ 			/* Check for ACS flags */
+ 			delimit = strstr(p, "@");
+ 			if (delimit) {
+@@ -972,6 +974,8 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+ 				u32 shift = 0;
+ 
+ 				end = delimit - p - 1;
++				mask = 0;
++				flags = 0;
+ 
+ 				while (end > -1) {
+ 					if (*(p + end) == '0') {
+@@ -1028,10 +1032,14 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+ 
+ 	pci_dbg(dev, "ACS mask  = %#06x\n", mask);
+ 	pci_dbg(dev, "ACS flags = %#06x\n", flags);
++	pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
++	pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
+ 
+-	/* If mask is 0 then we copy the bit from the firmware setting. */
+-	caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask);
+-	caps->ctrl |= flags;
++	/*
++	 * For mask bits that are 0, copy them from the firmware setting
++	 * and apply flags for all the mask bits that are 1.
++	 */
++	caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
+ 
+ 	pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
+ }
+@@ -5520,6 +5528,8 @@ static bool pci_bus_resettable(struct pci_bus *bus)
+ 		return false;
+ 
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
++		if (!pci_reset_supported(dev))
++			return false;
+ 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
+ 			return false;
+@@ -5596,6 +5606,8 @@ static bool pci_slot_resettable(struct pci_slot *slot)
+ 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ 		if (!dev->slot || dev->slot != slot)
+ 			continue;
++		if (!pci_reset_supported(dev))
++			return false;
+ 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
+ 			return false;
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index cee2365e54b8b2..62650a2f00ccc1 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1242,16 +1242,16 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ 	parent_link = link->parent;
+ 
+ 	/*
+-	 * link->downstream is a pointer to the pci_dev of function 0.  If
+-	 * we remove that function, the pci_dev is about to be deallocated,
+-	 * so we can't use link->downstream again.  Free the link state to
+-	 * avoid this.
++	 * Free the parent link state, no later than function 0 (i.e.
++	 * link->downstream) being removed.
+ 	 *
+-	 * If we're removing a non-0 function, it's possible we could
+-	 * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
+-	 * programming the same ASPM Control value for all functions of
+-	 * multi-function devices, so disable ASPM for all of them.
++	 * Do not free the link state any earlier. If function 0 is a
++	 * switch upstream port, this link state is parent_link to all
++	 * subordinate ones.
+ 	 */
++	if (pdev != link->downstream)
++		goto out;
++
+ 	pcie_config_aspm_link(link, 0);
+ 	list_del(&link->sibling);
+ 	free_link_state(link);
+@@ -1262,6 +1262,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ 		pcie_config_aspm_path(parent_link);
+ 	}
+ 
++ out:
+ 	mutex_unlock(&aspm_lock);
+ 	up_read(&pci_bus_sem);
+ }
+diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
+index 6af5e042587285..604c055f607867 100644
+--- a/drivers/pci/pcie/portdrv.c
++++ b/drivers/pci/pcie/portdrv.c
+@@ -228,10 +228,12 @@ static int get_port_device_capability(struct pci_dev *dev)
+ 
+ 		/*
+ 		 * Disable hot-plug interrupts in case they have been enabled
+-		 * by the BIOS and the hot-plug service driver is not loaded.
++		 * by the BIOS and the hot-plug service driver won't be loaded
++		 * to handle them.
+ 		 */
+-		pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
+-			  PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
++		if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
++			pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
++				PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+ 	}
+ 
+ #ifdef CONFIG_PCIEAER
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index ebb0c1d5cae255..0e757b23a09f0f 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -950,10 +950,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ 	/* Temporarily move resources off the list */
+ 	list_splice_init(&bridge->windows, &resources);
+ 	err = device_add(&bridge->dev);
+-	if (err) {
+-		put_device(&bridge->dev);
++	if (err)
+ 		goto free;
+-	}
++
+ 	bus->bridge = get_device(&bridge->dev);
+ 	device_enable_async_suspend(bus->bridge);
+ 	pci_set_bus_of_node(bus);
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index 23082bc0ca37ae..f16c7ce3bf3fc8 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -1150,7 +1150,6 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ 		min_align = 1ULL << (max_order + __ffs(SZ_1M));
+ 		min_align = max(min_align, win_align);
+ 		size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
+-		add_align = win_align;
+ 		pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
+ 			 b_res, &bus->busn_res);
+ 	}
+@@ -2105,8 +2104,7 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus,
+ 		 * in case of root bus.
+ 		 */
+ 		if (bridge && pci_bridge_resources_not_assigned(dev))
+-			pci_bridge_distribute_available_resources(bridge,
+-								  add_list);
++			pci_bridge_distribute_available_resources(dev, add_list);
+ 		else
+ 			pci_root_bus_distribute_available_resources(b, add_list);
+ 	}
+diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+index 69c3ec0938f74f..be6f1ca9095aaa 100644
+--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
++++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+@@ -266,11 +266,22 @@ enum rk_hdptx_reset {
+ 	RST_MAX
+ };
+ 
++#define MAX_HDPTX_PHY_NUM	2
++
++struct rk_hdptx_phy_cfg {
++	unsigned int num_phys;
++	unsigned int phy_ids[MAX_HDPTX_PHY_NUM];
++};
++
+ struct rk_hdptx_phy {
+ 	struct device *dev;
+ 	struct regmap *regmap;
+ 	struct regmap *grf;
+ 
++	/* PHY const config */
++	const struct rk_hdptx_phy_cfg *cfgs;
++	int phy_id;
++
+ 	struct phy *phy;
+ 	struct phy_config *phy_cfg;
+ 	struct clk_bulk_data *clks;
+@@ -1019,15 +1030,14 @@ static int rk_hdptx_phy_clk_register(struct rk_hdptx_phy *hdptx)
+ 	struct device *dev = hdptx->dev;
+ 	const char *name, *pname;
+ 	struct clk *refclk;
+-	int ret, id;
++	int ret;
+ 
+ 	refclk = devm_clk_get(dev, "ref");
+ 	if (IS_ERR(refclk))
+ 		return dev_err_probe(dev, PTR_ERR(refclk),
+ 				     "Failed to get ref clock\n");
+ 
+-	id = of_alias_get_id(dev->of_node, "hdptxphy");
+-	name = id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0";
++	name = hdptx->phy_id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0";
+ 	pname = __clk_get_name(refclk);
+ 
+ 	hdptx->hw.init = CLK_HW_INIT(name, pname, &hdptx_phy_clk_ops,
+@@ -1070,8 +1080,9 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
+ 	struct phy_provider *phy_provider;
+ 	struct device *dev = &pdev->dev;
+ 	struct rk_hdptx_phy *hdptx;
++	struct resource *res;
+ 	void __iomem *regs;
+-	int ret;
++	int ret, id;
+ 
+ 	hdptx = devm_kzalloc(dev, sizeof(*hdptx), GFP_KERNEL);
+ 	if (!hdptx)
+@@ -1079,11 +1090,27 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
+ 
+ 	hdptx->dev = dev;
+ 
+-	regs = devm_platform_ioremap_resource(pdev, 0);
++	regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ 	if (IS_ERR(regs))
+ 		return dev_err_probe(dev, PTR_ERR(regs),
+ 				     "Failed to ioremap resource\n");
+ 
++	hdptx->cfgs = device_get_match_data(dev);
++	if (!hdptx->cfgs)
++		return dev_err_probe(dev, -EINVAL, "missing match data\n");
++
++	/* find the phy-id from the io address */
++	hdptx->phy_id = -ENODEV;
++	for (id = 0; id < hdptx->cfgs->num_phys; id++) {
++		if (res->start == hdptx->cfgs->phy_ids[id]) {
++			hdptx->phy_id = id;
++			break;
++		}
++	}
++
++	if (hdptx->phy_id < 0)
++		return dev_err_probe(dev, -ENODEV, "no matching device found\n");
++
+ 	ret = devm_clk_bulk_get_all(dev, &hdptx->clks);
+ 	if (ret < 0)
+ 		return dev_err_probe(dev, ret, "Failed to get clocks\n");
+@@ -1147,8 +1174,19 @@ static const struct dev_pm_ops rk_hdptx_phy_pm_ops = {
+ 		       rk_hdptx_phy_runtime_resume, NULL)
+ };
+ 
++static const struct rk_hdptx_phy_cfg rk3588_hdptx_phy_cfgs = {
++	.num_phys = 2,
++	.phy_ids = {
++		0xfed60000,
++		0xfed70000,
++	},
++};
++
+ static const struct of_device_id rk_hdptx_phy_of_match[] = {
+-	{ .compatible = "rockchip,rk3588-hdptx-phy", },
++	{
++		.compatible = "rockchip,rk3588-hdptx-phy",
++		.data = &rk3588_hdptx_phy_cfgs
++	},
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, rk_hdptx_phy_of_match);
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
+index 928607a21d36db..f8abc69a39d162 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -1531,7 +1531,6 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl,
+ 		.clk_rate = 19200000,
+ 		.npwm = 1,
+ 		.base_unit_bits = 22,
+-		.bypass = true,
+ 	};
+ 	struct pwm_chip *chip;
+ 
+diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+index d09a5e9b2eca53..f6a1e684a3864e 100644
+--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
++++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+@@ -1290,12 +1290,14 @@ static struct npcm8xx_func npcm8xx_funcs[] = {
+ };
+ 
+ #define NPCM8XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) \
+-	[a] { .fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \
++	[a] = {								  \
++			.flag = q,					  \
++			.fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \
+ 			.fn1 = fn_ ## e, .reg1 = NPCM8XX_GCR_ ## f, .bit1 = g, \
+ 			.fn2 = fn_ ## h, .reg2 = NPCM8XX_GCR_ ## i, .bit2 = j, \
+ 			.fn3 = fn_ ## k, .reg3 = NPCM8XX_GCR_ ## l, .bit3 = m, \
+ 			.fn4 = fn_ ## n, .reg4 = NPCM8XX_GCR_ ## o, .bit4 = p, \
+-			.flag = q }
++	}
+ 
+ /* Drive strength controlled by NPCM8XX_GP_N_ODSC */
+ #define DRIVE_STRENGTH_LO_SHIFT		8
+@@ -2361,8 +2363,8 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
+ 			return dev_err_probe(dev, ret, "gpio-ranges fail for GPIO bank %u\n", id);
+ 
+ 		ret = fwnode_irq_get(child, 0);
+-		if (!ret)
+-			return dev_err_probe(dev, ret, "No IRQ for GPIO bank %u\n", id);
++		if (ret < 0)
++			return dev_err_probe(dev, ret, "Failed to retrieve IRQ for bank %u\n", id);
+ 
+ 		pctrl->gpio_bank[id].irq = ret;
+ 		pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip;
+diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
+index af689d7c117f35..773eaf508565b0 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
++++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
+@@ -253,6 +253,8 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
+ 		return ret;
+ 	}
+ 
++	of_node_put(of_args.np);
++
+ 	if ((of_args.args[0] != 0) ||
+ 	    (of_args.args[1] != 0) ||
+ 	    (of_args.args[2] != priv->npins)) {
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 5081c7d8064fae..d90685cfe2e1a4 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -2583,6 +2583,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
+ 	if (ret)
+ 		return dev_err_probe(pctrl->dev, ret, "Unable to parse gpio-ranges\n");
+ 
++	of_node_put(of_args.np);
++
+ 	if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ 	    of_args.args[2] != pctrl->data->n_port_pins)
+ 		return dev_err_probe(pctrl->dev, -EINVAL,
+@@ -3180,6 +3182,7 @@ static struct platform_driver rzg2l_pinctrl_driver = {
+ 		.name = DRV_NAME,
+ 		.of_match_table = of_match_ptr(rzg2l_pinctrl_of_table),
+ 		.pm = pm_sleep_ptr(&rzg2l_pinctrl_pm_ops),
++		.suppress_bind_attrs = true,
+ 	},
+ 	.probe = rzg2l_pinctrl_probe,
+ };
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+index 4062c56619f595..8c7169db4fcce6 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+@@ -940,6 +940,8 @@ static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl)
+ 		return ret;
+ 	}
+ 
++	of_node_put(of_args.np);
++
+ 	if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ 	    of_args.args[2] != pctrl->data->n_port_pins) {
+ 		dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
+index c83e5a65e6801c..3b046450bd3ff8 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
+@@ -270,6 +270,9 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ 	val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
+ 	val &= ~(0x3 << g->mux_bit);
+ 	val |= i << g->mux_bit;
++	/* Set the SFIO/GPIO selection to SFIO when under pinmux control*/
++	if (pmx->soc->sfsel_in_mux)
++		val |= (1 << g->sfsel_bit);
+ 	pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
+ 
+ 	return 0;
+diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
+index 8ce8816da9c168..43ba1b9aa1811a 100644
+--- a/drivers/platform/x86/amd/pmf/pmf.h
++++ b/drivers/platform/x86/amd/pmf/pmf.h
+@@ -105,9 +105,12 @@ struct cookie_header {
+ #define PMF_TA_IF_VERSION_MAJOR				1
+ #define TA_PMF_ACTION_MAX					32
+ #define TA_PMF_UNDO_MAX						8
+-#define TA_OUTPUT_RESERVED_MEM				906
++#define TA_OUTPUT_RESERVED_MEM				922
+ #define MAX_OPERATION_PARAMS					4
+ 
++#define TA_ERROR_CRYPTO_INVALID_PARAM				0x20002
++#define TA_ERROR_CRYPTO_BIN_TOO_LARGE				0x2000d
++
+ #define PMF_IF_V1		1
+ #define PMF_IF_V2		2
+ 
+diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
+index 19c27b6e46663c..09131507d7a925 100644
+--- a/drivers/platform/x86/amd/pmf/tee-if.c
++++ b/drivers/platform/x86/amd/pmf/tee-if.c
+@@ -27,8 +27,11 @@ module_param(pb_side_load, bool, 0444);
+ MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures");
+ #endif
+ 
+-static const uuid_t amd_pmf_ta_uuid = UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d,
+-						0xb1, 0x2d, 0xc5, 0x29, 0xb1, 0x3d, 0x85, 0x43);
++static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a,
++						    0xcc, 0x2b, 0x2b, 0x60, 0xd6),
++					  UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5,
++						    0x29, 0xb1, 0x3d, 0x85, 0x43),
++					};
+ 
+ static const char *amd_pmf_uevent_as_str(unsigned int state)
+ {
+@@ -321,9 +324,9 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
+ 		 */
+ 		schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
+ 	} else {
+-		dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
++		dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res);
+ 		dev->smart_pc_enabled = false;
+-		return -EIO;
++		return res;
+ 	}
+ 
+ 	return 0;
+@@ -390,12 +393,12 @@ static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const voi
+ 	return ver->impl_id == TEE_IMPL_ID_AMDTEE;
+ }
+ 
+-static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id)
++static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid)
+ {
+ 	struct tee_ioctl_open_session_arg sess_arg = {};
+ 	int rc;
+ 
+-	export_uuid(sess_arg.uuid, &amd_pmf_ta_uuid);
++	export_uuid(sess_arg.uuid, uuid);
+ 	sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ 	sess_arg.num_params = 0;
+ 
+@@ -434,7 +437,7 @@ static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
+ 	return 0;
+ }
+ 
+-static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
++static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
+ {
+ 	u32 size;
+ 	int ret;
+@@ -445,7 +448,7 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
+ 		return PTR_ERR(dev->tee_ctx);
+ 	}
+ 
+-	ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id);
++	ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
+ 	if (ret) {
+ 		dev_err(dev->dev, "Failed to open TA session (%d)\n", ret);
+ 		ret = -EINVAL;
+@@ -489,7 +492,8 @@ static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
+ 
+ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
+ {
+-	int ret;
++	bool status;
++	int ret, i;
+ 
+ 	ret = apmf_check_smart_pc(dev);
+ 	if (ret) {
+@@ -502,26 +506,22 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
+ 		return -ENODEV;
+ 	}
+ 
+-	ret = amd_pmf_tee_init(dev);
+-	if (ret)
+-		return ret;
+-
+ 	INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
+ 
+ 	ret = amd_pmf_set_dram_addr(dev, true);
+ 	if (ret)
+-		goto error;
++		goto err_cancel_work;
+ 
+ 	dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
+ 	if (!dev->policy_base) {
+ 		ret = -ENOMEM;
+-		goto error;
++		goto err_free_dram_buf;
+ 	}
+ 
+ 	dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
+ 	if (!dev->policy_buf) {
+ 		ret = -ENOMEM;
+-		goto error;
++		goto err_free_dram_buf;
+ 	}
+ 
+ 	memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
+@@ -531,24 +531,60 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
+ 	dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
+ 	if (!dev->prev_data) {
+ 		ret = -ENOMEM;
+-		goto error;
++		goto err_free_policy;
+ 	}
+ 
+-	ret = amd_pmf_start_policy_engine(dev);
+-	if (ret)
+-		goto error;
++	for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
++		ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
++		if (ret)
++			goto err_free_prev_data;
++
++		ret = amd_pmf_start_policy_engine(dev);
++		switch (ret) {
++		case TA_PMF_TYPE_SUCCESS:
++			status = true;
++			break;
++		case TA_ERROR_CRYPTO_INVALID_PARAM:
++		case TA_ERROR_CRYPTO_BIN_TOO_LARGE:
++			amd_pmf_tee_deinit(dev);
++			status = false;
++			break;
++		default:
++			ret = -EINVAL;
++			amd_pmf_tee_deinit(dev);
++			goto err_free_prev_data;
++		}
++
++		if (status)
++			break;
++	}
++
++	if (!status && !pb_side_load) {
++		ret = -EINVAL;
++		goto err_free_prev_data;
++	}
+ 
+ 	if (pb_side_load)
+ 		amd_pmf_open_pb(dev, dev->dbgfs_dir);
+ 
+ 	ret = amd_pmf_register_input_device(dev);
+ 	if (ret)
+-		goto error;
++		goto err_pmf_remove_pb;
+ 
+ 	return 0;
+ 
+-error:
+-	amd_pmf_deinit_smart_pc(dev);
++err_pmf_remove_pb:
++	if (pb_side_load && dev->esbin)
++		amd_pmf_remove_pb(dev);
++	amd_pmf_tee_deinit(dev);
++err_free_prev_data:
++	kfree(dev->prev_data);
++err_free_policy:
++	kfree(dev->policy_buf);
++err_free_dram_buf:
++	kfree(dev->buf);
++err_cancel_work:
++	cancel_delayed_work_sync(&dev->pb_work);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
+index c45bc332af7a02..e4868584cde2db 100644
+--- a/drivers/platform/x86/dell/dell-uart-backlight.c
++++ b/drivers/platform/x86/dell/dell-uart-backlight.c
+@@ -325,7 +325,7 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
+ 	return PTR_ERR_OR_ZERO(dell_bl->bl);
+ }
+ 
+-struct serdev_device_driver dell_uart_bl_serdev_driver = {
++static struct serdev_device_driver dell_uart_bl_serdev_driver = {
+ 	.probe = dell_uart_bl_serdev_probe,
+ 	.driver = {
+ 		.name = KBUILD_MODNAME,
+diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
+index e75cd6e1efe6ac..ab5f7d3ab82497 100644
+--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
++++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
+@@ -665,8 +665,10 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	/* Use 2731 instead of 2731.5 to avoid unnecessary rounding */
+-	return sysfs_emit(buf, "%d\n", value - 2731);
++	/* Use 2732 instead of 2731.5 to avoid unnecessary rounding and to emulate
++	 * the behaviour of the OEM application which seems to round down the result.
++	 */
++	return sysfs_emit(buf, "%d\n", value - 2732);
+ }
+ 
+ static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
+diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
+index 445e7a59beb414..9a609358956f3a 100644
+--- a/drivers/platform/x86/intel/hid.c
++++ b/drivers/platform/x86/intel/hid.c
+@@ -132,6 +132,13 @@ static const struct dmi_system_id button_array_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ 		},
+ 	},
++	{
++		.ident = "Microsoft Surface Go 4",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+index dbcd3087aaa4b0..31239a93dd71bd 100644
+--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+@@ -84,7 +84,7 @@ static DECLARE_HASHTABLE(isst_hash, 8);
+ static DEFINE_MUTEX(isst_hash_lock);
+ 
+ static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
+-			      u32 data)
++			      u64 data)
+ {
+ 	struct isst_cmd *sst_cmd;
+ 
+diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
+index 7b5cc9993974ef..55dd2286f3f354 100644
+--- a/drivers/platform/x86/intel/vsec.c
++++ b/drivers/platform/x86/intel/vsec.c
+@@ -410,6 +410,11 @@ static const struct intel_vsec_platform_info oobmsm_info = {
+ 	.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI,
+ };
+ 
++/* DMR OOBMSM info */
++static const struct intel_vsec_platform_info dmr_oobmsm_info = {
++	.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI,
++};
++
+ /* TGL info */
+ static const struct intel_vsec_platform_info tgl_info = {
+ 	.caps = VSEC_CAP_TELEMETRY,
+@@ -426,6 +431,7 @@ static const struct intel_vsec_platform_info lnl_info = {
+ #define PCI_DEVICE_ID_INTEL_VSEC_MTL_M		0x7d0d
+ #define PCI_DEVICE_ID_INTEL_VSEC_MTL_S		0xad0d
+ #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM		0x09a7
++#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR	0x09a1
+ #define PCI_DEVICE_ID_INTEL_VSEC_RPL		0xa77d
+ #define PCI_DEVICE_ID_INTEL_VSEC_TGL		0x9a0d
+ #define PCI_DEVICE_ID_INTEL_VSEC_LNL_M		0x647d
+@@ -435,6 +441,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
+ 	{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
+ 	{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) },
++	{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) },
+ 	{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
+ 	{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
+ 	{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
+diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+index 32d9b6009c4229..21de7c3a1ee3db 100644
+--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
++++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+@@ -219,7 +219,7 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
+ 	return 0;
+ }
+ 
+-struct serdev_device_driver yt2_1380_fc_serdev_driver = {
++static struct serdev_device_driver yt2_1380_fc_serdev_driver = {
+ 	.probe = yt2_1380_fc_serdev_probe,
+ 	.driver = {
+ 		.name = KBUILD_MODNAME,
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index a3c73abb00f21e..dea40da867552f 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8795,6 +8795,7 @@ static const struct attribute_group fan_driver_attr_group = {
+ #define TPACPI_FAN_NS		0x0010		/* For EC with non-Standard register addresses */
+ #define TPACPI_FAN_DECRPM	0x0020		/* For ECFW's with RPM in register as decimal */
+ #define TPACPI_FAN_TPR		0x0040		/* Fan speed is in Ticks Per Revolution */
++#define TPACPI_FAN_NOACPI	0x0080		/* Don't use ACPI methods even if detected */
+ 
+ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
+@@ -8825,6 +8826,9 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN),	/* X1 Tablet (2nd gen) */
+ 	TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */
+ 	TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR),		/* ThinkPad x120e */
++	TPACPI_Q_LNV3('R', '0', '0', TPACPI_FAN_NOACPI),/* E560 */
++	TPACPI_Q_LNV3('R', '1', '2', TPACPI_FAN_NOACPI),/* T495 */
++	TPACPI_Q_LNV3('R', '1', '3', TPACPI_FAN_NOACPI),/* T495s */
+ };
+ 
+ static int __init fan_init(struct ibm_init_struct *iibm)
+@@ -8876,6 +8880,13 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ 		tp_features.fan_ctrl_status_undef = 1;
+ 	}
+ 
++	if (quirks & TPACPI_FAN_NOACPI) {
++		/* E560, T495, T495s */
++		pr_info("Ignoring buggy ACPI fan access method\n");
++		fang_handle = NULL;
++		fanw_handle = NULL;
++	}
++
+ 	if (gfan_handle) {
+ 		/* 570, 600e/x, 770e, 770x */
+ 		fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 51fb88aca0f9fd..1a20c775489c72 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1913,7 +1913,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
+ 		cache.flags = -1; /* read error */
+ 	if (cache.flags >= 0) {
+ 		cache.capacity = bq27xxx_battery_read_soc(di);
+-		di->cache.flags = cache.flags;
+ 
+ 		/*
+ 		 * On gauges with signed current reporting the current must be
+diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
+index 4caac142c4285a..b32d881111850b 100644
+--- a/drivers/power/supply/max77693_charger.c
++++ b/drivers/power/supply/max77693_charger.c
+@@ -608,7 +608,7 @@ static int max77693_set_charge_input_threshold_volt(struct max77693_charger *chg
+ 	case 4700000:
+ 	case 4800000:
+ 	case 4900000:
+-		data = (uvolt - 4700000) / 100000;
++		data = ((uvolt - 4700000) / 100000) + 1;
+ 		break;
+ 	default:
+ 		dev_err(chg->dev, "Wrong value for charge input voltage regulation threshold\n");
+diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
+index 9714afe347dcc0..1ffa145319f239 100644
+--- a/drivers/regulator/pca9450-regulator.c
++++ b/drivers/regulator/pca9450-regulator.c
+@@ -454,7 +454,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
+ 			.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
+ 			.vsel_reg = PCA9450_REG_LDO5CTRL_H,
+ 			.vsel_mask = LDO5HOUT_MASK,
+-			.enable_reg = PCA9450_REG_LDO5CTRL_H,
++			.enable_reg = PCA9450_REG_LDO5CTRL_L,
+ 			.enable_mask = LDO5H_EN_MASK,
+ 			.owner = THIS_MODULE,
+ 		},
+@@ -663,7 +663,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
+ 			.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
+ 			.vsel_reg = PCA9450_REG_LDO5CTRL_H,
+ 			.vsel_mask = LDO5HOUT_MASK,
+-			.enable_reg = PCA9450_REG_LDO5CTRL_H,
++			.enable_reg = PCA9450_REG_LDO5CTRL_L,
+ 			.enable_mask = LDO5H_EN_MASK,
+ 			.owner = THIS_MODULE,
+ 		},
+@@ -835,7 +835,7 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = {
+ 			.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
+ 			.vsel_reg = PCA9450_REG_LDO5CTRL_H,
+ 			.vsel_mask = LDO5HOUT_MASK,
+-			.enable_reg = PCA9450_REG_LDO5CTRL_H,
++			.enable_reg = PCA9450_REG_LDO5CTRL_L,
+ 			.enable_mask = LDO5H_EN_MASK,
+ 			.owner = THIS_MODULE,
+ 		},
+diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
+index 32c3531b20c70a..e19081d530226b 100644
+--- a/drivers/remoteproc/qcom_q6v5_mss.c
++++ b/drivers/remoteproc/qcom_q6v5_mss.c
+@@ -1839,6 +1839,13 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs,
+ 	while (pd_names[num_pds])
+ 		num_pds++;
+ 
++	/* Handle single power domain */
++	if (num_pds == 1 && dev->pm_domain) {
++		devs[0] = dev;
++		pm_runtime_enable(dev);
++		return 1;
++	}
++
+ 	for (i = 0; i < num_pds; i++) {
+ 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ 		if (IS_ERR_OR_NULL(devs[i])) {
+@@ -1859,8 +1866,15 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs,
+ static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
+ 			    size_t pd_count)
+ {
++	struct device *dev = qproc->dev;
+ 	int i;
+ 
++	/* Handle single power domain */
++	if (pd_count == 1 && dev->pm_domain) {
++		pm_runtime_disable(dev);
++		return;
++	}
++
+ 	for (i = 0; i < pd_count; i++)
+ 		dev_pm_domain_detach(pds[i], false);
+ }
+@@ -2469,13 +2483,13 @@ static const struct rproc_hexagon_res msm8974_mss = {
+ 			.supply = "pll",
+ 			.uA = 100000,
+ 		},
+-		{}
+-	},
+-	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
+ 		{
+ 			.supply = "mx",
+ 			.uV = 1050000,
+ 		},
++		{}
++	},
++	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
+ 		{
+ 			.supply = "cx",
+ 			.uA = 100000,
+@@ -2501,7 +2515,6 @@ static const struct rproc_hexagon_res msm8974_mss = {
+ 		NULL
+ 	},
+ 	.proxy_pd_names = (char*[]){
+-		"mx",
+ 		"cx",
+ 		NULL
+ 	},
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
+index 1a2d08ec9de9ef..ea4a91f37b506d 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -509,16 +509,16 @@ static int adsp_pds_attach(struct device *dev, struct device **devs,
+ 	if (!pd_names)
+ 		return 0;
+ 
++	while (pd_names[num_pds])
++		num_pds++;
++
+ 	/* Handle single power domain */
+-	if (dev->pm_domain) {
++	if (num_pds == 1 && dev->pm_domain) {
+ 		devs[0] = dev;
+ 		pm_runtime_enable(dev);
+ 		return 1;
+ 	}
+ 
+-	while (pd_names[num_pds])
+-		num_pds++;
+-
+ 	for (i = 0; i < num_pds; i++) {
+ 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ 		if (IS_ERR_OR_NULL(devs[i])) {
+@@ -543,7 +543,7 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+ 	int i;
+ 
+ 	/* Handle single power domain */
+-	if (dev->pm_domain && pd_count) {
++	if (pd_count == 1 && dev->pm_domain) {
+ 		pm_runtime_disable(dev);
+ 		return;
+ 	}
+@@ -1356,6 +1356,7 @@ static const struct adsp_data sc7280_wpss_resource = {
+ 	.crash_reason_smem = 626,
+ 	.firmware_name = "wpss.mdt",
+ 	.pas_id = 6,
++	.minidump_id = 4,
+ 	.auto_boot = false,
+ 	.proxy_pd_names = (char*[]){
+ 		"cx",
+@@ -1418,7 +1419,7 @@ static const struct adsp_data sm8650_mpss_resource = {
+ };
+ 
+ static const struct of_device_id adsp_of_match[] = {
+-	{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
++	{ .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource},
+ 	{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
+ 	{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
+ 	{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index ef6febe3563307..d2308c2f97eb94 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -2025,6 +2025,7 @@ int rproc_shutdown(struct rproc *rproc)
+ 	kfree(rproc->cached_table);
+ 	rproc->cached_table = NULL;
+ 	rproc->table_ptr = NULL;
++	rproc->table_sz = 0;
+ out:
+ 	mutex_unlock(&rproc->lock);
+ 	return ret;
+diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
+index f1a4df6cfebd9c..2bcb733de4de49 100644
+--- a/drivers/soundwire/slave.c
++++ b/drivers/soundwire/slave.c
+@@ -12,6 +12,7 @@ static void sdw_slave_release(struct device *dev)
+ {
+ 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ 
++	of_node_put(slave->dev.of_node);
+ 	mutex_destroy(&slave->sdw_dev_lock);
+ 	kfree(slave);
+ }
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index e1b9b12357877f..5926e004d9a659 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -1162,7 +1162,8 @@ static void bcm2835_spi_cleanup(struct spi_device *spi)
+ 				 sizeof(u32),
+ 				 DMA_TO_DEVICE);
+ 
+-	gpiod_put(bs->cs_gpio);
++	if (!IS_ERR(bs->cs_gpio))
++		gpiod_put(bs->cs_gpio);
+ 	spi_set_csgpiod(spi, 0, NULL);
+ 
+ 	kfree(target);
+@@ -1225,7 +1226,12 @@ static int bcm2835_spi_setup(struct spi_device *spi)
+ 	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ 	struct bcm2835_spidev *target = spi_get_ctldata(spi);
+ 	struct gpiod_lookup_table *lookup __free(kfree) = NULL;
+-	int ret;
++	const char *pinctrl_compats[] = {
++		"brcm,bcm2835-gpio",
++		"brcm,bcm2711-gpio",
++		"brcm,bcm7211-gpio",
++	};
++	int ret, i;
+ 	u32 cs;
+ 
+ 	if (!target) {
+@@ -1290,6 +1296,14 @@ static int bcm2835_spi_setup(struct spi_device *spi)
+ 		goto err_cleanup;
+ 	}
+ 
++	for (i = 0; i < ARRAY_SIZE(pinctrl_compats); i++) {
++		if (of_find_compatible_node(NULL, NULL, pinctrl_compats[i]))
++			break;
++	}
++
++	if (i == ARRAY_SIZE(pinctrl_compats))
++		return 0;
++
+ 	/*
+ 	 * TODO: The code below is a slightly better alternative to the utter
+ 	 * abuse of the GPIO API that I found here before. It creates a
+diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
+index aed98ab1433467..6dcba0e0ddaa3e 100644
+--- a/drivers/spi/spi-cadence-xspi.c
++++ b/drivers/spi/spi-cadence-xspi.c
+@@ -432,7 +432,7 @@ static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi,
+ 	u32 clk_reg;
+ 	bool update_clk = false;
+ 
+-	while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) {
++	while (i < (ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list) - 1)) {
+ 		clk_val = MRVL_XSPI_CLOCK_DIVIDED(
+ 				cdns_mrvl_xspi_clk_div_list[i]);
+ 		if (clk_val <= requested_clk)
+diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
+index 8d48c61961a6b7..353e6ee2c14508 100644
+--- a/drivers/staging/rtl8723bs/Kconfig
++++ b/drivers/staging/rtl8723bs/Kconfig
+@@ -4,6 +4,7 @@ config RTL8723BS
+ 	depends on WLAN && MMC && CFG80211
+ 	depends on m
+ 	select CRYPTO
++	select CRYPTO_LIB_AES
+ 	select CRYPTO_LIB_ARC4
+ 	help
+ 	This option enables support for RTL8723BS SDIO drivers, such as
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index 5fab33adf58ed0..97787002080a18 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -1745,8 +1745,6 @@ static int vchiq_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto failed_platform_init;
+ 
+-	vchiq_debugfs_init(&mgmt->state);
+-
+ 	dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
+ 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+ 
+@@ -1760,6 +1758,8 @@ static int vchiq_probe(struct platform_device *pdev)
+ 		goto error_exit;
+ 	}
+ 
++	vchiq_debugfs_init(&mgmt->state);
++
+ 	bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
+ 	bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
+ 
+@@ -1786,7 +1786,8 @@ static void vchiq_remove(struct platform_device *pdev)
+ 	kthread_stop(mgmt->state.slot_handler_thread);
+ 
+ 	arm_state = vchiq_platform_get_arm_state(&mgmt->state);
+-	kthread_stop(arm_state->ka_thread);
++	if (!IS_ERR_OR_NULL(arm_state->ka_thread))
++		kthread_stop(arm_state->ka_thread);
+ }
+ 
+ static struct platform_driver vchiq_driver = {
+diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
+index ab8bfb5a3946bc..40ab6b2d4fb05f 100644
+--- a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
++++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
+@@ -45,6 +45,9 @@ static int int3402_thermal_probe(struct platform_device *pdev)
+ 	struct int3402_thermal_data *d;
+ 	int ret;
+ 
++	if (!adev)
++		return -ENODEV;
++
+ 	if (!acpi_has_method(adev->handle, "_TMP"))
+ 		return -ENODEV;
+ 
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 5e9ca4376d686e..94fa981081fdb5 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -486,7 +486,8 @@ static int do_output_char(u8 c, struct tty_struct *tty, int space)
+ static int process_output(u8 c, struct tty_struct *tty)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+-	int	space, retval;
++	unsigned int space;
++	int retval;
+ 
+ 	mutex_lock(&ldata->output_lock);
+ 
+@@ -522,16 +523,16 @@ static ssize_t process_output_block(struct tty_struct *tty,
+ 				    const u8 *buf, unsigned int nr)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+-	int	space;
+-	int	i;
++	unsigned int space;
++	int i;
+ 	const u8 *cp;
+ 
+ 	mutex_lock(&ldata->output_lock);
+ 
+ 	space = tty_write_room(tty);
+-	if (space <= 0) {
++	if (space == 0) {
+ 		mutex_unlock(&ldata->output_lock);
+-		return space;
++		return 0;
+ 	}
+ 	if (nr > space)
+ 		nr = space;
+@@ -696,7 +697,7 @@ static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail,
+ static size_t __process_echoes(struct tty_struct *tty)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+-	int	space, old_space;
++	unsigned int space, old_space;
+ 	size_t tail;
+ 	u8 c;
+ 
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 9f9fc733eb2c1f..951c3cdac3b947 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -440,7 +440,7 @@ static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport)
+ 
+ static void lpuart_stop_tx(struct uart_port *port)
+ {
+-	unsigned char temp;
++	u8 temp;
+ 
+ 	temp = readb(port->membase + UARTCR2);
+ 	temp &= ~(UARTCR2_TIE | UARTCR2_TCIE);
+@@ -449,7 +449,7 @@ static void lpuart_stop_tx(struct uart_port *port)
+ 
+ static void lpuart32_stop_tx(struct uart_port *port)
+ {
+-	unsigned long temp;
++	u32 temp;
+ 
+ 	temp = lpuart32_read(port, UARTCTRL);
+ 	temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE);
+@@ -458,7 +458,7 @@ static void lpuart32_stop_tx(struct uart_port *port)
+ 
+ static void lpuart_stop_rx(struct uart_port *port)
+ {
+-	unsigned char temp;
++	u8 temp;
+ 
+ 	temp = readb(port->membase + UARTCR2);
+ 	writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2);
+@@ -466,7 +466,7 @@ static void lpuart_stop_rx(struct uart_port *port)
+ 
+ static void lpuart32_stop_rx(struct uart_port *port)
+ {
+-	unsigned long temp;
++	u32 temp;
+ 
+ 	temp = lpuart32_read(port, UARTCTRL);
+ 	lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL);
+@@ -580,7 +580,7 @@ static int lpuart_dma_tx_request(struct uart_port *port)
+ 	ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
+ 
+ 	if (ret) {
+-		dev_err(sport->port.dev,
++		dev_err(port->dev,
+ 				"DMA slave config failed, err = %d\n", ret);
+ 		return ret;
+ 	}
+@@ -610,13 +610,13 @@ static void lpuart_flush_buffer(struct uart_port *port)
+ 	}
+ 
+ 	if (lpuart_is_32(sport)) {
+-		val = lpuart32_read(&sport->port, UARTFIFO);
++		val = lpuart32_read(port, UARTFIFO);
+ 		val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+-		lpuart32_write(&sport->port, val, UARTFIFO);
++		lpuart32_write(port, val, UARTFIFO);
+ 	} else {
+-		val = readb(sport->port.membase + UARTCFIFO);
++		val = readb(port->membase + UARTCFIFO);
+ 		val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
+-		writeb(val, sport->port.membase + UARTCFIFO);
++		writeb(val, port->membase + UARTCFIFO);
+ 	}
+ }
+ 
+@@ -638,38 +638,36 @@ static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset,
+ 
+ static int lpuart_poll_init(struct uart_port *port)
+ {
+-	struct lpuart_port *sport = container_of(port,
+-					struct lpuart_port, port);
+ 	unsigned long flags;
+-	unsigned char temp;
++	u8 temp;
+ 
+-	sport->port.fifosize = 0;
++	port->fifosize = 0;
+ 
+-	uart_port_lock_irqsave(&sport->port, &flags);
++	uart_port_lock_irqsave(port, &flags);
+ 	/* Disable Rx & Tx */
+-	writeb(0, sport->port.membase + UARTCR2);
++	writeb(0, port->membase + UARTCR2);
+ 
+-	temp = readb(sport->port.membase + UARTPFIFO);
++	temp = readb(port->membase + UARTPFIFO);
+ 	/* Enable Rx and Tx FIFO */
+ 	writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
+-			sport->port.membase + UARTPFIFO);
++			port->membase + UARTPFIFO);
+ 
+ 	/* flush Tx and Rx FIFO */
+ 	writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
+-			sport->port.membase + UARTCFIFO);
++			port->membase + UARTCFIFO);
+ 
+ 	/* explicitly clear RDRF */
+-	if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
+-		readb(sport->port.membase + UARTDR);
+-		writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
++	if (readb(port->membase + UARTSR1) & UARTSR1_RDRF) {
++		readb(port->membase + UARTDR);
++		writeb(UARTSFIFO_RXUF, port->membase + UARTSFIFO);
+ 	}
+ 
+-	writeb(0, sport->port.membase + UARTTWFIFO);
+-	writeb(1, sport->port.membase + UARTRWFIFO);
++	writeb(0, port->membase + UARTTWFIFO);
++	writeb(1, port->membase + UARTRWFIFO);
+ 
+ 	/* Enable Rx and Tx */
+-	writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
+-	uart_port_unlock_irqrestore(&sport->port, flags);
++	writeb(UARTCR2_RE | UARTCR2_TE, port->membase + UARTCR2);
++	uart_port_unlock_irqrestore(port, flags);
+ 
+ 	return 0;
+ }
+@@ -692,33 +690,32 @@ static int lpuart_poll_get_char(struct uart_port *port)
+ static int lpuart32_poll_init(struct uart_port *port)
+ {
+ 	unsigned long flags;
+-	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ 	u32 temp;
+ 
+-	sport->port.fifosize = 0;
++	port->fifosize = 0;
+ 
+-	uart_port_lock_irqsave(&sport->port, &flags);
++	uart_port_lock_irqsave(port, &flags);
+ 
+ 	/* Disable Rx & Tx */
+-	lpuart32_write(&sport->port, 0, UARTCTRL);
++	lpuart32_write(port, 0, UARTCTRL);
+ 
+-	temp = lpuart32_read(&sport->port, UARTFIFO);
++	temp = lpuart32_read(port, UARTFIFO);
+ 
+ 	/* Enable Rx and Tx FIFO */
+-	lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
++	lpuart32_write(port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
+ 
+ 	/* flush Tx and Rx FIFO */
+-	lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
++	lpuart32_write(port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
+ 
+ 	/* explicitly clear RDRF */
+-	if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
+-		lpuart32_read(&sport->port, UARTDATA);
+-		lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
++	if (lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF) {
++		lpuart32_read(port, UARTDATA);
++		lpuart32_write(port, UARTFIFO_RXUF, UARTFIFO);
+ 	}
+ 
+ 	/* Enable Rx and Tx */
+-	lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
+-	uart_port_unlock_irqrestore(&sport->port, flags);
++	lpuart32_write(port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
++	uart_port_unlock_irqrestore(port, flags);
+ 
+ 	return 0;
+ }
+@@ -751,7 +748,7 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
+ static inline void lpuart32_transmit_buffer(struct lpuart_port *sport)
+ {
+ 	struct tty_port *tport = &sport->port.state->port;
+-	unsigned long txcnt;
++	u32 txcnt;
+ 	unsigned char c;
+ 
+ 	if (sport->port.x_char) {
+@@ -788,7 +785,7 @@ static void lpuart_start_tx(struct uart_port *port)
+ {
+ 	struct lpuart_port *sport = container_of(port,
+ 			struct lpuart_port, port);
+-	unsigned char temp;
++	u8 temp;
+ 
+ 	temp = readb(port->membase + UARTCR2);
+ 	writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
+@@ -805,7 +802,7 @@ static void lpuart_start_tx(struct uart_port *port)
+ static void lpuart32_start_tx(struct uart_port *port)
+ {
+ 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+-	unsigned long temp;
++	u32 temp;
+ 
+ 	if (sport->lpuart_dma_tx_use) {
+ 		if (!lpuart_stopped_or_empty(port))
+@@ -838,8 +835,8 @@ static unsigned int lpuart_tx_empty(struct uart_port *port)
+ {
+ 	struct lpuart_port *sport = container_of(port,
+ 			struct lpuart_port, port);
+-	unsigned char sr1 = readb(port->membase + UARTSR1);
+-	unsigned char sfifo = readb(port->membase + UARTSFIFO);
++	u8 sr1 = readb(port->membase + UARTSR1);
++	u8 sfifo = readb(port->membase + UARTSFIFO);
+ 
+ 	if (sport->dma_tx_in_progress)
+ 		return 0;
+@@ -854,9 +851,9 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
+ {
+ 	struct lpuart_port *sport = container_of(port,
+ 			struct lpuart_port, port);
+-	unsigned long stat = lpuart32_read(port, UARTSTAT);
+-	unsigned long sfifo = lpuart32_read(port, UARTFIFO);
+-	unsigned long ctrl = lpuart32_read(port, UARTCTRL);
++	u32 stat = lpuart32_read(port, UARTSTAT);
++	u32 sfifo = lpuart32_read(port, UARTFIFO);
++	u32 ctrl = lpuart32_read(port, UARTCTRL);
+ 
+ 	if (sport->dma_tx_in_progress)
+ 		return 0;
+@@ -883,7 +880,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
+ {
+ 	unsigned int flg, ignored = 0, overrun = 0;
+ 	struct tty_port *port = &sport->port.state->port;
+-	unsigned char rx, sr;
++	u8 rx, sr;
+ 
+ 	uart_port_lock(&sport->port);
+ 
+@@ -960,7 +957,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
+ {
+ 	unsigned int flg, ignored = 0;
+ 	struct tty_port *port = &sport->port.state->port;
+-	unsigned long rx, sr;
++	u32 rx, sr;
+ 	bool is_break;
+ 
+ 	uart_port_lock(&sport->port);
+@@ -1038,7 +1035,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
+ static irqreturn_t lpuart_int(int irq, void *dev_id)
+ {
+ 	struct lpuart_port *sport = dev_id;
+-	unsigned char sts;
++	u8 sts;
+ 
+ 	sts = readb(sport->port.membase + UARTSR1);
+ 
+@@ -1112,7 +1109,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
+ 	int count, copied;
+ 
+ 	if (lpuart_is_32(sport)) {
+-		unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
++		u32 sr = lpuart32_read(&sport->port, UARTSTAT);
+ 
+ 		if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
+ 			/* Clear the error flags */
+@@ -1124,10 +1121,10 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
+ 				sport->port.icount.frame++;
+ 		}
+ 	} else {
+-		unsigned char sr = readb(sport->port.membase + UARTSR1);
++		u8 sr = readb(sport->port.membase + UARTSR1);
+ 
+ 		if (sr & (UARTSR1_PE | UARTSR1_FE)) {
+-			unsigned char cr2;
++			u8 cr2;
+ 
+ 			/* Disable receiver during this operation... */
+ 			cr2 = readb(sport->port.membase + UARTCR2);
+@@ -1278,7 +1275,7 @@ static void lpuart32_dma_idleint(struct lpuart_port *sport)
+ static irqreturn_t lpuart32_int(int irq, void *dev_id)
+ {
+ 	struct lpuart_port *sport = dev_id;
+-	unsigned long sts, rxcount;
++	u32 sts, rxcount;
+ 
+ 	sts = lpuart32_read(&sport->port, UARTSTAT);
+ 	rxcount = lpuart32_read(&sport->port, UARTWATER);
+@@ -1410,12 +1407,12 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
+ 	dma_async_issue_pending(chan);
+ 
+ 	if (lpuart_is_32(sport)) {
+-		unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
++		u32 temp = lpuart32_read(&sport->port, UARTBAUD);
+ 
+ 		lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
+ 
+ 		if (sport->dma_idle_int) {
+-			unsigned long ctrl = lpuart32_read(&sport->port, UARTCTRL);
++			u32 ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ 
+ 			lpuart32_write(&sport->port, ctrl | UARTCTRL_ILIE, UARTCTRL);
+ 		}
+@@ -1448,12 +1445,9 @@ static void lpuart_dma_rx_free(struct uart_port *port)
+ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
+ 			struct serial_rs485 *rs485)
+ {
+-	struct lpuart_port *sport = container_of(port,
+-			struct lpuart_port, port);
+-
+-	u8 modem = readb(sport->port.membase + UARTMODEM) &
++	u8 modem = readb(port->membase + UARTMODEM) &
+ 		~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
+-	writeb(modem, sport->port.membase + UARTMODEM);
++	writeb(modem, port->membase + UARTMODEM);
+ 
+ 	if (rs485->flags & SER_RS485_ENABLED) {
+ 		/* Enable auto RS-485 RTS mode */
+@@ -1471,32 +1465,29 @@ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
+ 			modem &= ~UARTMODEM_TXRTSPOL;
+ 	}
+ 
+-	writeb(modem, sport->port.membase + UARTMODEM);
++	writeb(modem, port->membase + UARTMODEM);
+ 	return 0;
+ }
+ 
+ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios,
+ 			struct serial_rs485 *rs485)
+ {
+-	struct lpuart_port *sport = container_of(port,
+-			struct lpuart_port, port);
+-
+-	unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
++	u32 modem = lpuart32_read(port, UARTMODIR)
+ 				& ~(UARTMODIR_TXRTSPOL | UARTMODIR_TXRTSE);
+ 	u32 ctrl;
+ 
+ 	/* TXRTSE and TXRTSPOL only can be changed when transmitter is disabled. */
+-	ctrl = lpuart32_read(&sport->port, UARTCTRL);
++	ctrl = lpuart32_read(port, UARTCTRL);
+ 	if (ctrl & UARTCTRL_TE) {
+ 		/* wait for the transmit engine to complete */
+-		lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+-		lpuart32_write(&sport->port, ctrl & ~UARTCTRL_TE, UARTCTRL);
++		lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC);
++		lpuart32_write(port, ctrl & ~UARTCTRL_TE, UARTCTRL);
+ 
+-		while (lpuart32_read(&sport->port, UARTCTRL) & UARTCTRL_TE)
++		while (lpuart32_read(port, UARTCTRL) & UARTCTRL_TE)
+ 			cpu_relax();
+ 	}
+ 
+-	lpuart32_write(&sport->port, modem, UARTMODIR);
++	lpuart32_write(port, modem, UARTMODIR);
+ 
+ 	if (rs485->flags & SER_RS485_ENABLED) {
+ 		/* Enable auto RS-485 RTS mode */
+@@ -1514,10 +1505,10 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio
+ 			modem &= ~UARTMODIR_TXRTSPOL;
+ 	}
+ 
+-	lpuart32_write(&sport->port, modem, UARTMODIR);
++	lpuart32_write(port, modem, UARTMODIR);
+ 
+ 	if (ctrl & UARTCTRL_TE)
+-		lpuart32_write(&sport->port, ctrl, UARTCTRL);
++		lpuart32_write(port, ctrl, UARTCTRL);
+ 
+ 	return 0;
+ }
+@@ -1576,7 +1567,7 @@ static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
+ 
+ static void lpuart_break_ctl(struct uart_port *port, int break_state)
+ {
+-	unsigned char temp;
++	u8 temp;
+ 
+ 	temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
+ 
+@@ -1588,7 +1579,7 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
+ 
+ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
+ {
+-	unsigned long temp;
++	u32 temp;
+ 
+ 	temp = lpuart32_read(port, UARTCTRL);
+ 
+@@ -1622,8 +1613,7 @@ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
+ 
+ static void lpuart_setup_watermark(struct lpuart_port *sport)
+ {
+-	unsigned char val, cr2;
+-	unsigned char cr2_saved;
++	u8 val, cr2, cr2_saved;
+ 
+ 	cr2 = readb(sport->port.membase + UARTCR2);
+ 	cr2_saved = cr2;
+@@ -1656,7 +1646,7 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
+ 
+ static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
+ {
+-	unsigned char cr2;
++	u8 cr2;
+ 
+ 	lpuart_setup_watermark(sport);
+ 
+@@ -1667,8 +1657,7 @@ static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
+ 
+ static void lpuart32_setup_watermark(struct lpuart_port *sport)
+ {
+-	unsigned long val, ctrl;
+-	unsigned long ctrl_saved;
++	u32 val, ctrl, ctrl_saved;
+ 
+ 	ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ 	ctrl_saved = ctrl;
+@@ -1777,7 +1766,7 @@ static void lpuart_tx_dma_startup(struct lpuart_port *sport)
+ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
+ {
+ 	int ret;
+-	unsigned char cr3;
++	u8 cr3;
+ 
+ 	if (uart_console(&sport->port))
+ 		goto err;
+@@ -1827,14 +1816,14 @@ static void lpuart_hw_setup(struct lpuart_port *sport)
+ static int lpuart_startup(struct uart_port *port)
+ {
+ 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+-	unsigned char temp;
++	u8 temp;
+ 
+ 	/* determine FIFO size and enable FIFO mode */
+-	temp = readb(sport->port.membase + UARTPFIFO);
++	temp = readb(port->membase + UARTPFIFO);
+ 
+ 	sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) &
+ 					    UARTPFIFO_FIFOSIZE_MASK);
+-	sport->port.fifosize = sport->txfifo_size;
++	port->fifosize = sport->txfifo_size;
+ 
+ 	sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) &
+ 					    UARTPFIFO_FIFOSIZE_MASK);
+@@ -1847,7 +1836,7 @@ static int lpuart_startup(struct uart_port *port)
+ 
+ static void lpuart32_hw_disable(struct lpuart_port *sport)
+ {
+-	unsigned long temp;
++	u32 temp;
+ 
+ 	temp = lpuart32_read(&sport->port, UARTCTRL);
+ 	temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE |
+@@ -1857,7 +1846,7 @@ static void lpuart32_hw_disable(struct lpuart_port *sport)
+ 
+ static void lpuart32_configure(struct lpuart_port *sport)
+ {
+-	unsigned long temp;
++	u32 temp;
+ 
+ 	temp = lpuart32_read(&sport->port, UARTCTRL);
+ 	if (!sport->lpuart_dma_rx_use)
+@@ -1887,14 +1876,14 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
+ static int lpuart32_startup(struct uart_port *port)
+ {
+ 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+-	unsigned long temp;
++	u32 temp;
+ 
+ 	/* determine FIFO size */
+-	temp = lpuart32_read(&sport->port, UARTFIFO);
++	temp = lpuart32_read(port, UARTFIFO);
+ 
+ 	sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) &
+ 					    UARTFIFO_FIFOSIZE_MASK);
+-	sport->port.fifosize = sport->txfifo_size;
++	port->fifosize = sport->txfifo_size;
+ 
+ 	sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) &
+ 					    UARTFIFO_FIFOSIZE_MASK);
+@@ -1907,7 +1896,7 @@ static int lpuart32_startup(struct uart_port *port)
+ 	if (is_layerscape_lpuart(sport)) {
+ 		sport->rxfifo_size = 16;
+ 		sport->txfifo_size = 16;
+-		sport->port.fifosize = sport->txfifo_size;
++		port->fifosize = sport->txfifo_size;
+ 	}
+ 
+ 	lpuart_request_dma(sport);
+@@ -1941,7 +1930,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
+ static void lpuart_shutdown(struct uart_port *port)
+ {
+ 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+-	unsigned char temp;
++	u8 temp;
+ 	unsigned long flags;
+ 
+ 	uart_port_lock_irqsave(port, &flags);
+@@ -1961,14 +1950,14 @@ static void lpuart32_shutdown(struct uart_port *port)
+ {
+ 	struct lpuart_port *sport =
+ 		container_of(port, struct lpuart_port, port);
+-	unsigned long temp;
++	u32 temp;
+ 	unsigned long flags;
+ 
+ 	uart_port_lock_irqsave(port, &flags);
+ 
+ 	/* clear status */
+-	temp = lpuart32_read(&sport->port, UARTSTAT);
+-	lpuart32_write(&sport->port, temp, UARTSTAT);
++	temp = lpuart32_read(port, UARTSTAT);
++	lpuart32_write(port, temp, UARTSTAT);
+ 
+ 	/* disable Rx/Tx DMA */
+ 	temp = lpuart32_read(port, UARTBAUD);
+@@ -1992,17 +1981,17 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ {
+ 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ 	unsigned long flags;
+-	unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
++	u8 cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
+ 	unsigned int  baud;
+ 	unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
+ 	unsigned int sbr, brfa;
+ 
+-	cr1 = old_cr1 = readb(sport->port.membase + UARTCR1);
+-	old_cr2 = readb(sport->port.membase + UARTCR2);
+-	cr3 = readb(sport->port.membase + UARTCR3);
+-	cr4 = readb(sport->port.membase + UARTCR4);
+-	bdh = readb(sport->port.membase + UARTBDH);
+-	modem = readb(sport->port.membase + UARTMODEM);
++	cr1 = old_cr1 = readb(port->membase + UARTCR1);
++	old_cr2 = readb(port->membase + UARTCR2);
++	cr3 = readb(port->membase + UARTCR3);
++	cr4 = readb(port->membase + UARTCR4);
++	bdh = readb(port->membase + UARTBDH);
++	modem = readb(port->membase + UARTMODEM);
+ 	/*
+ 	 * only support CS8 and CS7, and for CS7 must enable PE.
+ 	 * supported mode:
+@@ -2034,7 +2023,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * When auto RS-485 RTS mode is enabled,
+ 	 * hardware flow control need to be disabled.
+ 	 */
+-	if (sport->port.rs485.flags & SER_RS485_ENABLED)
++	if (port->rs485.flags & SER_RS485_ENABLED)
+ 		termios->c_cflag &= ~CRTSCTS;
+ 
+ 	if (termios->c_cflag & CRTSCTS)
+@@ -2075,59 +2064,59 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * Need to update the Ring buffer length according to the selected
+ 	 * baud rate and restart Rx DMA path.
+ 	 *
+-	 * Since timer function acqures sport->port.lock, need to stop before
++	 * Since timer function acqures port->lock, need to stop before
+ 	 * acquring same lock because otherwise del_timer_sync() can deadlock.
+ 	 */
+ 	if (old && sport->lpuart_dma_rx_use)
+-		lpuart_dma_rx_free(&sport->port);
++		lpuart_dma_rx_free(port);
+ 
+-	uart_port_lock_irqsave(&sport->port, &flags);
++	uart_port_lock_irqsave(port, &flags);
+ 
+-	sport->port.read_status_mask = 0;
++	port->read_status_mask = 0;
+ 	if (termios->c_iflag & INPCK)
+-		sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE;
++		port->read_status_mask |= UARTSR1_FE | UARTSR1_PE;
+ 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+-		sport->port.read_status_mask |= UARTSR1_FE;
++		port->read_status_mask |= UARTSR1_FE;
+ 
+ 	/* characters to ignore */
+-	sport->port.ignore_status_mask = 0;
++	port->ignore_status_mask = 0;
+ 	if (termios->c_iflag & IGNPAR)
+-		sport->port.ignore_status_mask |= UARTSR1_PE;
++		port->ignore_status_mask |= UARTSR1_PE;
+ 	if (termios->c_iflag & IGNBRK) {
+-		sport->port.ignore_status_mask |= UARTSR1_FE;
++		port->ignore_status_mask |= UARTSR1_FE;
+ 		/*
+ 		 * if we're ignoring parity and break indicators,
+ 		 * ignore overruns too (for real raw support).
+ 		 */
+ 		if (termios->c_iflag & IGNPAR)
+-			sport->port.ignore_status_mask |= UARTSR1_OR;
++			port->ignore_status_mask |= UARTSR1_OR;
+ 	}
+ 
+ 	/* update the per-port timeout */
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+ 
+ 	/* wait transmit engin complete */
+-	lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
++	lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TC);
+ 
+ 	/* disable transmit and receive */
+ 	writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE),
+-			sport->port.membase + UARTCR2);
++			port->membase + UARTCR2);
+ 
+-	sbr = sport->port.uartclk / (16 * baud);
+-	brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud;
++	sbr = port->uartclk / (16 * baud);
++	brfa = ((port->uartclk - (16 * sbr * baud)) * 2) / baud;
+ 	bdh &= ~UARTBDH_SBR_MASK;
+ 	bdh |= (sbr >> 8) & 0x1F;
+ 	cr4 &= ~UARTCR4_BRFA_MASK;
+ 	brfa &= UARTCR4_BRFA_MASK;
+-	writeb(cr4 | brfa, sport->port.membase + UARTCR4);
+-	writeb(bdh, sport->port.membase + UARTBDH);
+-	writeb(sbr & 0xFF, sport->port.membase + UARTBDL);
+-	writeb(cr3, sport->port.membase + UARTCR3);
+-	writeb(cr1, sport->port.membase + UARTCR1);
+-	writeb(modem, sport->port.membase + UARTMODEM);
++	writeb(cr4 | brfa, port->membase + UARTCR4);
++	writeb(bdh, port->membase + UARTBDH);
++	writeb(sbr & 0xFF, port->membase + UARTBDL);
++	writeb(cr3, port->membase + UARTCR3);
++	writeb(cr1, port->membase + UARTCR1);
++	writeb(modem, port->membase + UARTMODEM);
+ 
+ 	/* restore control register */
+-	writeb(old_cr2, sport->port.membase + UARTCR2);
++	writeb(old_cr2, port->membase + UARTCR2);
+ 
+ 	if (old && sport->lpuart_dma_rx_use) {
+ 		if (!lpuart_start_rx_dma(sport))
+@@ -2136,7 +2125,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ 			sport->lpuart_dma_rx_use = false;
+ 	}
+ 
+-	uart_port_unlock_irqrestore(&sport->port, flags);
++	uart_port_unlock_irqrestore(port, flags);
+ }
+ 
+ static void __lpuart32_serial_setbrg(struct uart_port *port,
+@@ -2230,13 +2219,13 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ {
+ 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ 	unsigned long flags;
+-	unsigned long ctrl, old_ctrl, bd, modem;
++	u32 ctrl, old_ctrl, bd, modem;
+ 	unsigned int  baud;
+ 	unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
+ 
+-	ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
+-	bd = lpuart32_read(&sport->port, UARTBAUD);
+-	modem = lpuart32_read(&sport->port, UARTMODIR);
++	ctrl = old_ctrl = lpuart32_read(port, UARTCTRL);
++	bd = lpuart32_read(port, UARTBAUD);
++	modem = lpuart32_read(port, UARTMODIR);
+ 	sport->is_cs7 = false;
+ 	/*
+ 	 * only support CS8 and CS7, and for CS7 must enable PE.
+@@ -2269,7 +2258,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * When auto RS-485 RTS mode is enabled,
+ 	 * hardware flow control need to be disabled.
+ 	 */
+-	if (sport->port.rs485.flags & SER_RS485_ENABLED)
++	if (port->rs485.flags & SER_RS485_ENABLED)
+ 		termios->c_cflag &= ~CRTSCTS;
+ 
+ 	if (termios->c_cflag & CRTSCTS)
+@@ -2310,59 +2299,61 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * Need to update the Ring buffer length according to the selected
+ 	 * baud rate and restart Rx DMA path.
+ 	 *
+-	 * Since timer function acqures sport->port.lock, need to stop before
++	 * Since timer function acqures port->lock, need to stop before
+ 	 * acquring same lock because otherwise del_timer_sync() can deadlock.
+ 	 */
+ 	if (old && sport->lpuart_dma_rx_use)
+-		lpuart_dma_rx_free(&sport->port);
++		lpuart_dma_rx_free(port);
+ 
+-	uart_port_lock_irqsave(&sport->port, &flags);
++	uart_port_lock_irqsave(port, &flags);
+ 
+-	sport->port.read_status_mask = 0;
++	port->read_status_mask = 0;
+ 	if (termios->c_iflag & INPCK)
+-		sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
++		port->read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
+ 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+-		sport->port.read_status_mask |= UARTSTAT_FE;
++		port->read_status_mask |= UARTSTAT_FE;
+ 
+ 	/* characters to ignore */
+-	sport->port.ignore_status_mask = 0;
++	port->ignore_status_mask = 0;
+ 	if (termios->c_iflag & IGNPAR)
+-		sport->port.ignore_status_mask |= UARTSTAT_PE;
++		port->ignore_status_mask |= UARTSTAT_PE;
+ 	if (termios->c_iflag & IGNBRK) {
+-		sport->port.ignore_status_mask |= UARTSTAT_FE;
++		port->ignore_status_mask |= UARTSTAT_FE;
+ 		/*
+ 		 * if we're ignoring parity and break indicators,
+ 		 * ignore overruns too (for real raw support).
+ 		 */
+ 		if (termios->c_iflag & IGNPAR)
+-			sport->port.ignore_status_mask |= UARTSTAT_OR;
++			port->ignore_status_mask |= UARTSTAT_OR;
+ 	}
+ 
+ 	/* update the per-port timeout */
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+ 
++	/*
++	 * disable CTS to ensure the transmit engine is not blocked by the flow
++	 * control when there is dirty data in TX FIFO
++	 */
++	lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
++
+ 	/*
+ 	 * LPUART Transmission Complete Flag may never be set while queuing a break
+ 	 * character, so skip waiting for transmission complete when UARTCTRL_SBK is
+ 	 * asserted.
+ 	 */
+-	if (!(old_ctrl & UARTCTRL_SBK)) {
+-		lpuart32_write(&sport->port, 0, UARTMODIR);
+-		lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+-	}
++	if (!(old_ctrl & UARTCTRL_SBK))
++		lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC);
+ 
+ 	/* disable transmit and receive */
+-	lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
++	lpuart32_write(port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
+ 		       UARTCTRL);
+ 
+-	lpuart32_write(&sport->port, bd, UARTBAUD);
++	lpuart32_write(port, bd, UARTBAUD);
+ 	lpuart32_serial_setbrg(sport, baud);
+-	/* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
+-	lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
+ 	/* restore control register */
+-	lpuart32_write(&sport->port, ctrl, UARTCTRL);
++	lpuart32_write(port, ctrl, UARTCTRL);
+ 	/* re-enable the CTS if needed */
+-	lpuart32_write(&sport->port, modem, UARTMODIR);
++	lpuart32_write(port, modem, UARTMODIR);
+ 
+ 	if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
+ 		sport->is_cs7 = true;
+@@ -2374,7 +2365,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 			sport->lpuart_dma_rx_use = false;
+ 	}
+ 
+-	uart_port_unlock_irqrestore(&sport->port, flags);
++	uart_port_unlock_irqrestore(port, flags);
+ }
+ 
+ static const char *lpuart_type(struct uart_port *port)
+@@ -2487,7 +2478,7 @@ static void
+ lpuart_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ 	struct lpuart_port *sport = lpuart_ports[co->index];
+-	unsigned char  old_cr2, cr2;
++	u8  old_cr2, cr2;
+ 	unsigned long flags;
+ 	int locked = 1;
+ 
+@@ -2517,7 +2508,7 @@ static void
+ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ 	struct lpuart_port *sport = lpuart_ports[co->index];
+-	unsigned long  old_cr, cr;
++	u32 old_cr, cr;
+ 	unsigned long flags;
+ 	int locked = 1;
+ 
+@@ -2551,7 +2542,7 @@ static void __init
+ lpuart_console_get_options(struct lpuart_port *sport, int *baud,
+ 			   int *parity, int *bits)
+ {
+-	unsigned char cr, bdh, bdl, brfa;
++	u8 cr, bdh, bdl, brfa;
+ 	unsigned int sbr, uartclk, baud_raw;
+ 
+ 	cr = readb(sport->port.membase + UARTCR2);
+@@ -2600,7 +2591,7 @@ static void __init
+ lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
+ 			   int *parity, int *bits)
+ {
+-	unsigned long cr, bd;
++	u32 cr, bd;
+ 	unsigned int sbr, uartclk, baud_raw;
+ 
+ 	cr = lpuart32_read(&sport->port, UARTCTRL);
+@@ -2806,13 +2797,13 @@ static int lpuart_global_reset(struct lpuart_port *sport)
+ {
+ 	struct uart_port *port = &sport->port;
+ 	void __iomem *global_addr;
+-	unsigned long ctrl, bd;
++	u32 ctrl, bd;
+ 	unsigned int val = 0;
+ 	int ret;
+ 
+ 	ret = clk_prepare_enable(sport->ipg_clk);
+ 	if (ret) {
+-		dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret);
++		dev_err(port->dev, "failed to enable uart ipg clk: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -2823,10 +2814,10 @@ static int lpuart_global_reset(struct lpuart_port *sport)
+ 		 */
+ 		ctrl = lpuart32_read(port, UARTCTRL);
+ 		if (ctrl & UARTCTRL_TE) {
+-			bd = lpuart32_read(&sport->port, UARTBAUD);
++			bd = lpuart32_read(port, UARTBAUD);
+ 			if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false,
+ 					      port)) {
+-				dev_warn(sport->port.dev,
++				dev_warn(port->dev,
+ 					 "timeout waiting for transmit engine to complete\n");
+ 				clk_disable_unprepare(sport->ipg_clk);
+ 				return 0;
+@@ -3012,7 +3003,7 @@ static int lpuart_runtime_resume(struct device *dev)
+ 
+ static void serial_lpuart_enable_wakeup(struct lpuart_port *sport, bool on)
+ {
+-	unsigned int val, baud;
++	u32 val, baud;
+ 
+ 	if (lpuart_is_32(sport)) {
+ 		val = lpuart32_read(&sport->port, UARTCTRL);
+@@ -3077,7 +3068,7 @@ static int lpuart_suspend_noirq(struct device *dev)
+ static int lpuart_resume_noirq(struct device *dev)
+ {
+ 	struct lpuart_port *sport = dev_get_drvdata(dev);
+-	unsigned int val;
++	u32 val;
+ 
+ 	pinctrl_pm_select_default_state(dev);
+ 
+@@ -3097,7 +3088,8 @@ static int lpuart_resume_noirq(struct device *dev)
+ static int lpuart_suspend(struct device *dev)
+ {
+ 	struct lpuart_port *sport = dev_get_drvdata(dev);
+-	unsigned long temp, flags;
++	u32 temp;
++	unsigned long flags;
+ 
+ 	uart_suspend_port(&lpuart_reg, &sport->port);
+ 
+@@ -3177,7 +3169,7 @@ static void lpuart_console_fixup(struct lpuart_port *sport)
+ 	 * in VLLS mode, or restore console setting here.
+ 	 */
+ 	if (is_imx7ulp_lpuart(sport) && lpuart_uport_is_active(sport) &&
+-	    console_suspend_enabled && uart_console(&sport->port)) {
++	    console_suspend_enabled && uart_console(uport)) {
+ 
+ 		mutex_lock(&port->mutex);
+ 		memset(&termios, 0, sizeof(struct ktermios));
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 32c8693b438b07..8c26275696df99 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -2397,10 +2397,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 	page_size = readl(&xhci->op_regs->page_size);
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ 			"Supported page size register = 0x%x", page_size);
+-	i = ffs(page_size);
+-	if (i < 16)
++	val = ffs(page_size) - 1;
++	if (val < 16)
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+-			"Supported page size of %iK", (1 << (i+12)) / 1024);
++			"Supported page size of %iK", (1 << (val + 12)) / 1024);
+ 	else
+ 		xhci_warn(xhci, "WARN: no supported page size\n");
+ 	/* Use 4K pages, since that's common and the minimum the HC supports */
+diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
+index 4b1668733a4bec..511dd1b224ae51 100644
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
+@@ -1433,11 +1433,10 @@ static int ucsi_ccg_probe(struct i2c_client *client)
+ 			uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
+ 		else if (!strcmp(fw_name, "nvidia,gpu"))
+ 			uc->fw_build = CCG_FW_BUILD_NVIDIA;
++		if (!uc->fw_build)
++			dev_err(uc->dev, "failed to get FW build information\n");
+ 	}
+ 
+-	if (!uc->fw_build)
+-		dev_err(uc->dev, "failed to get FW build information\n");
+-
+ 	/* reset ccg device and initialize ucsi */
+ 	status = ucsi_ccg_init(uc);
+ 	if (status < 0) {
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 718fa4e0b31ec2..7aeff435c1d873 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1699,14 +1699,19 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 		}
+ 	}
+ 
++	if (vs->vs_tpg) {
++		pr_err("vhost-scsi endpoint already set for %s.\n",
++		       vs->vs_vhost_wwpn);
++		ret = -EEXIST;
++		goto out;
++	}
++
+ 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
+ 	vs_tpg = kzalloc(len, GFP_KERNEL);
+ 	if (!vs_tpg) {
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+-	if (vs->vs_tpg)
+-		memcpy(vs_tpg, vs->vs_tpg, len);
+ 
+ 	mutex_lock(&vhost_scsi_mutex);
+ 	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
+@@ -1722,12 +1727,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 		tv_tport = tpg->tport;
+ 
+ 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+-			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
+-				mutex_unlock(&tpg->tv_tpg_mutex);
+-				mutex_unlock(&vhost_scsi_mutex);
+-				ret = -EEXIST;
+-				goto undepend;
+-			}
+ 			/*
+ 			 * In order to ensure individual vhost-scsi configfs
+ 			 * groups cannot be removed while in use by vhost ioctl,
+@@ -1774,15 +1773,15 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 		}
+ 		ret = 0;
+ 	} else {
+-		ret = -EEXIST;
++		ret = -ENODEV;
++		goto free_tpg;
+ 	}
+ 
+ 	/*
+-	 * Act as synchronize_rcu to make sure access to
+-	 * old vs->vs_tpg is finished.
++	 * Act as synchronize_rcu to make sure requests after this point
++	 * see a fully setup device.
+ 	 */
+ 	vhost_scsi_flush(vs);
+-	kfree(vs->vs_tpg);
+ 	vs->vs_tpg = vs_tpg;
+ 	goto out;
+ 
+@@ -1802,6 +1801,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
+ 		}
+ 	}
++free_tpg:
+ 	kfree(vs_tpg);
+ out:
+ 	mutex_unlock(&vs->dev.mutex);
+@@ -1904,6 +1904,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
+ 	vhost_scsi_flush(vs);
+ 	kfree(vs->vs_tpg);
+ 	vs->vs_tpg = NULL;
++	memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn));
+ 	WARN_ON(vs->vs_events_nr);
+ 	mutex_unlock(&vs->dev.mutex);
+ 	return 0;
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index bc31db6ef7d262..3e9f2bda67027e 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -24,7 +24,7 @@ config VGA_CONSOLE
+ 	  Say Y.
+ 
+ config MDA_CONSOLE
+-	depends on !M68K && !PARISC && ISA
++	depends on VGA_CONSOLE && ISA
+ 	tristate "MDA text console (dual-headed)"
+ 	help
+ 	  Say Y here if you have an old MDA or monochrome Hercules graphics
+@@ -52,7 +52,7 @@ config DUMMY_CONSOLE
+ 
+ config DUMMY_CONSOLE_COLUMNS
+ 	int "Initial number of console screen columns"
+-	depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE
++	depends on DUMMY_CONSOLE && !(ARCH_FOOTBRIDGE && VGA_CONSOLE)
+ 	default 160 if PARISC
+ 	default 80
+ 	help
+@@ -62,7 +62,7 @@ config DUMMY_CONSOLE_COLUMNS
+ 
+ config DUMMY_CONSOLE_ROWS
+ 	int "Initial number of console screen rows"
+-	depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE
++	depends on DUMMY_CONSOLE && !(ARCH_FOOTBRIDGE && VGA_CONSOLE)
+ 	default 64 if PARISC
+ 	default 30 if ARM
+ 	default 25
+diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
+index 840f221607635b..6251a6b07b3a11 100644
+--- a/drivers/video/fbdev/au1100fb.c
++++ b/drivers/video/fbdev/au1100fb.c
+@@ -137,13 +137,15 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
+ 	 */
+ int au1100fb_setmode(struct au1100fb_device *fbdev)
+ {
+-	struct fb_info *info = &fbdev->info;
++	struct fb_info *info;
+ 	u32 words;
+ 	int index;
+ 
+ 	if (!fbdev)
+ 		return -EINVAL;
+ 
++	info = &fbdev->info;
++
+ 	/* Update var-dependent FB info */
+ 	if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) {
+ 		if (info->var.bits_per_pixel <= 8) {
+diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
+index 86ecbb2d86db8d..2eb27ebf822e80 100644
+--- a/drivers/video/fbdev/sm501fb.c
++++ b/drivers/video/fbdev/sm501fb.c
+@@ -326,6 +326,13 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var,
+ 	if (var->xres_virtual > 4096 || var->yres_virtual > 2048)
+ 		return -EINVAL;
+ 
++	/* geometry sanity checks */
++	if (var->xres + var->xoffset > var->xres_virtual)
++		return -EINVAL;
++
++	if (var->yres + var->yoffset > var->yres_virtual)
++		return -EINVAL;
++
+ 	/* can cope with 8,16 or 32bpp */
+ 
+ 	if (var->bits_per_pixel <= 8)
+diff --git a/drivers/w1/masters/w1-uart.c b/drivers/w1/masters/w1-uart.c
+index a31782e56ba75a..c87eea34780678 100644
+--- a/drivers/w1/masters/w1-uart.c
++++ b/drivers/w1/masters/w1-uart.c
+@@ -372,11 +372,11 @@ static int w1_uart_probe(struct serdev_device *serdev)
+ 	init_completion(&w1dev->rx_byte_received);
+ 	mutex_init(&w1dev->rx_mutex);
+ 
++	serdev_device_set_drvdata(serdev, w1dev);
++	serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops);
+ 	ret = w1_uart_serdev_open(w1dev);
+ 	if (ret < 0)
+ 		return ret;
+-	serdev_device_set_drvdata(serdev, w1dev);
+-	serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops);
+ 
+ 	return w1_add_master_device(&w1dev->bus);
+ }
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 143ac03b7425c0..3397939fd2d5af 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -407,8 +407,8 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
+ 			 err);
+ 		goto error;
+ 	}
+-	v9fs_fid_add(dentry, &fid);
+ 	v9fs_set_create_acl(inode, fid, dacl, pacl);
++	v9fs_fid_add(dentry, &fid);
+ 	d_instantiate(dentry, inode);
+ 	err = 0;
+ 	inc_nlink(dir);
+diff --git a/fs/affs/file.c b/fs/affs/file.c
+index a5a861dd522301..7a71018e3f6758 100644
+--- a/fs/affs/file.c
++++ b/fs/affs/file.c
+@@ -596,7 +596,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
+ 		BUG_ON(tmp > bsize);
+ 		AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
+ 		AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
+-		AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
++		AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
+ 		AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
+ 		affs_fix_checksum(sb, bh);
+ 		bh->b_state &= ~(1UL << BH_New);
+@@ -724,7 +724,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
+ 		tmp = min(bsize - boff, to - from);
+ 		BUG_ON(boff + tmp > bsize || tmp > bsize);
+ 		memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
+-		be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
++		AFFS_DATA_HEAD(bh)->size = cpu_to_be32(
++			max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size)));
+ 		affs_fix_checksum(sb, bh);
+ 		mark_buffer_dirty_inode(bh, inode);
+ 		written += tmp;
+@@ -746,7 +747,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
+ 		if (buffer_new(bh)) {
+ 			AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
+ 			AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
+-			AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
++			AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
+ 			AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
+ 			AFFS_DATA_HEAD(bh)->next = 0;
+ 			bh->b_state &= ~(1UL << BH_New);
+@@ -780,7 +781,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
+ 		if (buffer_new(bh)) {
+ 			AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
+ 			AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
+-			AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
++			AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
+ 			AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
+ 			AFFS_DATA_HEAD(bh)->next = 0;
+ 			bh->b_state &= ~(1UL << BH_New);
+diff --git a/fs/exec.c b/fs/exec.c
+index 67513bd606c249..d6079437296383 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1246,13 +1246,12 @@ int begin_new_exec(struct linux_binprm * bprm)
+ 	 */
+ 	bprm->point_of_no_return = true;
+ 
+-	/*
+-	 * Make this the only thread in the thread group.
+-	 */
++	/* Make this the only thread in the thread group */
+ 	retval = de_thread(me);
+ 	if (retval)
+ 		goto out;
+-
++	/* see the comment in check_unsafe_exec() */
++	current->fs->in_exec = 0;
+ 	/*
+ 	 * Cancel any io_uring activity across execve
+ 	 */
+@@ -1514,6 +1513,8 @@ static void free_bprm(struct linux_binprm *bprm)
+ 	}
+ 	free_arg_pages(bprm);
+ 	if (bprm->cred) {
++		/* in case exec fails before de_thread() succeeds */
++		current->fs->in_exec = 0;
+ 		mutex_unlock(&current->signal->cred_guard_mutex);
+ 		abort_creds(bprm->cred);
+ 	}
+@@ -1620,6 +1621,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ 	 * suid exec because the differently privileged task
+ 	 * will be able to manipulate the current directory, etc.
+ 	 * It would be nice to force an unshare instead...
++	 *
++	 * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS)
++	 * from another sub-thread until de_thread() succeeds, this
++	 * state is protected by cred_guard_mutex we hold.
+ 	 */
+ 	n_fs = 1;
+ 	spin_lock(&p->fs->lock);
+@@ -1878,7 +1883,6 @@ static int bprm_execve(struct linux_binprm *bprm)
+ 
+ 	sched_mm_cid_after_execve(current);
+ 	/* execve succeeded */
+-	current->fs->in_exec = 0;
+ 	current->in_execve = 0;
+ 	rseq_execve(current);
+ 	user_events_execve(current);
+@@ -1897,7 +1901,6 @@ static int bprm_execve(struct linux_binprm *bprm)
+ 		force_fatal_sig(SIGSEGV);
+ 
+ 	sched_mm_cid_after_execve(current);
+-	current->fs->in_exec = 0;
+ 	current->in_execve = 0;
+ 
+ 	return retval;
+diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
+index 6f3651c6ca91ef..8df5ad6ebb10cb 100644
+--- a/fs/exfat/fatent.c
++++ b/fs/exfat/fatent.c
+@@ -265,7 +265,7 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain,
+ 		clu = next;
+ 		if (exfat_ent_get(sb, clu, &next))
+ 			return -EIO;
+-	} while (next != EXFAT_EOF_CLUSTER);
++	} while (next != EXFAT_EOF_CLUSTER && count <= p_chain->size);
+ 
+ 	if (p_chain->size != count) {
+ 		exfat_fs_error(sb,
+diff --git a/fs/exfat/file.c b/fs/exfat/file.c
+index 807349d8ea0501..841a5b18e3dfdb 100644
+--- a/fs/exfat/file.c
++++ b/fs/exfat/file.c
+@@ -582,6 +582,9 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	loff_t pos = iocb->ki_pos;
+ 	loff_t valid_size;
+ 
++	if (unlikely(exfat_forced_shutdown(inode->i_sb)))
++		return -EIO;
++
+ 	inode_lock(inode);
+ 
+ 	valid_size = ei->valid_size;
+@@ -635,6 +638,16 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	return ret;
+ }
+ 
++static ssize_t exfat_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++
++	if (unlikely(exfat_forced_shutdown(inode->i_sb)))
++		return -EIO;
++
++	return generic_file_read_iter(iocb, iter);
++}
++
+ static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf)
+ {
+ 	int err;
+@@ -672,14 +685,26 @@ static const struct vm_operations_struct exfat_file_vm_ops = {
+ 
+ static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+ {
++	if (unlikely(exfat_forced_shutdown(file_inode(file)->i_sb)))
++		return -EIO;
++
+ 	file_accessed(file);
+ 	vma->vm_ops = &exfat_file_vm_ops;
+ 	return 0;
+ }
+ 
++static ssize_t exfat_splice_read(struct file *in, loff_t *ppos,
++		struct pipe_inode_info *pipe, size_t len, unsigned int flags)
++{
++	if (unlikely(exfat_forced_shutdown(file_inode(in)->i_sb)))
++		return -EIO;
++
++	return filemap_splice_read(in, ppos, pipe, len, flags);
++}
++
+ const struct file_operations exfat_file_operations = {
+ 	.llseek		= generic_file_llseek,
+-	.read_iter	= generic_file_read_iter,
++	.read_iter	= exfat_file_read_iter,
+ 	.write_iter	= exfat_file_write_iter,
+ 	.unlocked_ioctl = exfat_ioctl,
+ #ifdef CONFIG_COMPAT
+@@ -687,7 +712,7 @@ const struct file_operations exfat_file_operations = {
+ #endif
+ 	.mmap		= exfat_file_mmap,
+ 	.fsync		= exfat_file_fsync,
+-	.splice_read	= filemap_splice_read,
++	.splice_read	= exfat_splice_read,
+ 	.splice_write	= iter_file_splice_write,
+ };
+ 
+diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
+index d724de8f57bf92..3801516ac50716 100644
+--- a/fs/exfat/inode.c
++++ b/fs/exfat/inode.c
+@@ -344,7 +344,8 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
+ 			 * The block has been partially written,
+ 			 * zero the unwritten part and map the block.
+ 			 */
+-			loff_t size, off, pos;
++			loff_t size, pos;
++			void *addr;
+ 
+ 			max_blocks = 1;
+ 
+@@ -355,17 +356,43 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
+ 			if (!bh_result->b_folio)
+ 				goto done;
+ 
++			/*
++			 * No buffer_head is allocated.
++			 * (1) bmap: It's enough to fill bh_result without I/O.
++			 * (2) read: The unwritten part should be filled with 0
++			 *           If a folio does not have any buffers,
++			 *           let's returns -EAGAIN to fallback to
++			 *           per-bh IO like block_read_full_folio().
++			 */
++			if (!folio_buffers(bh_result->b_folio)) {
++				err = -EAGAIN;
++				goto done;
++			}
++
+ 			pos = EXFAT_BLK_TO_B(iblock, sb);
+ 			size = ei->valid_size - pos;
+-			off = pos & (PAGE_SIZE - 1);
++			addr = folio_address(bh_result->b_folio) +
++			       offset_in_folio(bh_result->b_folio, pos);
++
++			/* Check if bh->b_data points to proper addr in folio */
++			if (bh_result->b_data != addr) {
++				exfat_fs_error_ratelimit(sb,
++					"b_data(%p) != folio_addr(%p)",
++					bh_result->b_data, addr);
++				err = -EINVAL;
++				goto done;
++			}
+ 
+-			folio_set_bh(bh_result, bh_result->b_folio, off);
++			/* Read a block */
+ 			err = bh_read(bh_result, 0);
+ 			if (err < 0)
+-				goto unlock_ret;
++				goto done;
++
++			/* Zero unwritten part of a block */
++			memset(bh_result->b_data + size, 0,
++			       bh_result->b_size - size);
+ 
+-			folio_zero_segment(bh_result->b_folio, off + size,
+-					off + sb->s_blocksize);
++			err = 0;
+ 		} else {
+ 			/*
+ 			 * The range has not been written, clear the mapped flag
+@@ -376,6 +403,8 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
+ 	}
+ done:
+ 	bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb);
++	if (err < 0)
++		clear_buffer_mapped(bh_result);
+ unlock_ret:
+ 	mutex_unlock(&sbi->s_lock);
+ 	return err;
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index e47a5ddfc79b3d..7b3951951f8af1 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -639,6 +639,11 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
+ 	info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size);
+ 	info->size = le64_to_cpu(ep2->dentry.stream.size);
+ 
++	if (unlikely(EXFAT_B_TO_CLU_ROUND_UP(info->size, sbi) > sbi->used_clusters)) {
++		exfat_fs_error(sb, "data size is invalid(%lld)", info->size);
++		return -EIO;
++	}
++
+ 	info->start_clu = le32_to_cpu(ep2->dentry.stream.start_clu);
+ 	if (!is_valid_cluster(sbi, info->start_clu) && info->size) {
+ 		exfat_warn(sb, "start_clu is invalid cluster(0x%x)",
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index ef6a3c8f3a9a06..b278b5703c1977 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -104,6 +104,9 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
+ 	else if (unlikely(le32_to_cpu(de->inode) >
+ 			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
+ 		error_msg = "inode out of bounds";
++	else if (unlikely(next_offset == size && de->name_len == 1 &&
++			  de->name[0] == '.'))
++		error_msg = "'.' directory cannot be the last in data block";
+ 	else
+ 		return 0;
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 940ac1a49b729e..d3795c6c0a9d8e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6781,22 +6781,29 @@ static int ext4_statfs_project(struct super_block *sb,
+ 			     dquot->dq_dqb.dqb_bhardlimit);
+ 	limit >>= sb->s_blocksize_bits;
+ 
+-	if (limit && buf->f_blocks > limit) {
++	if (limit) {
++		uint64_t	remaining = 0;
++
+ 		curblock = (dquot->dq_dqb.dqb_curspace +
+ 			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
+-		buf->f_blocks = limit;
+-		buf->f_bfree = buf->f_bavail =
+-			(buf->f_blocks > curblock) ?
+-			 (buf->f_blocks - curblock) : 0;
++		if (limit > curblock)
++			remaining = limit - curblock;
++
++		buf->f_blocks = min(buf->f_blocks, limit);
++		buf->f_bfree = min(buf->f_bfree, remaining);
++		buf->f_bavail = min(buf->f_bavail, remaining);
+ 	}
+ 
+ 	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
+ 			     dquot->dq_dqb.dqb_ihardlimit);
+-	if (limit && buf->f_files > limit) {
+-		buf->f_files = limit;
+-		buf->f_ffree =
+-			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
+-			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
++	if (limit) {
++		uint64_t	remaining = 0;
++
++		if (limit > dquot->dq_dqb.dqb_curinodes)
++			remaining = limit - dquot->dq_dqb.dqb_curinodes;
++
++		buf->f_files = min(buf->f_files, limit);
++		buf->f_ffree = min(buf->f_ffree, remaining);
+ 	}
+ 
+ 	spin_unlock(&dquot->dq_dqb_lock);
+diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
+index 12ef91d170bb30..7faf1af59d5d84 100644
+--- a/fs/fuse/dax.c
++++ b/fs/fuse/dax.c
+@@ -681,7 +681,6 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
+ 			0, 0, fuse_wait_dax_page(inode));
+ }
+ 
+-/* dmap_end == 0 leads to unmapping of whole file */
+ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
+ 				  u64 dmap_end)
+ {
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index bd6e675023c622..a1e86ec07c38b5 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1936,7 +1936,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 	if (FUSE_IS_DAX(inode) && is_truncate) {
+ 		filemap_invalidate_lock(mapping);
+ 		fault_blocked = true;
+-		err = fuse_dax_break_layouts(inode, 0, 0);
++		err = fuse_dax_break_layouts(inode, 0, -1);
+ 		if (err) {
+ 			filemap_invalidate_unlock(mapping);
+ 			return err;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index e20d91d0ae558c..f597f7e68e5014 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -253,7 +253,7 @@ static int fuse_open(struct inode *inode, struct file *file)
+ 
+ 	if (dax_truncate) {
+ 		filemap_invalidate_lock(inode->i_mapping);
+-		err = fuse_dax_break_layouts(inode, 0, 0);
++		err = fuse_dax_break_layouts(inode, 0, -1);
+ 		if (err)
+ 			goto out_inode_unlock;
+ 	}
+@@ -3146,7 +3146,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+ 	inode_lock(inode);
+ 	if (block_faults) {
+ 		filemap_invalidate_lock(inode->i_mapping);
+-		err = fuse_dax_break_layouts(inode, 0, 0);
++		err = fuse_dax_break_layouts(inode, 0, -1);
+ 		if (err)
+ 			goto out;
+ 	}
+diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
+index 8b39c15c408ccd..15b2f094d36ef8 100644
+--- a/fs/hostfs/hostfs.h
++++ b/fs/hostfs/hostfs.h
+@@ -60,7 +60,7 @@ struct hostfs_stat {
+ 	unsigned int uid;
+ 	unsigned int gid;
+ 	unsigned long long size;
+-	struct hostfs_timespec atime, mtime, ctime;
++	struct hostfs_timespec atime, mtime, ctime, btime;
+ 	unsigned int blksize;
+ 	unsigned long long blocks;
+ 	struct {
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 94f3cc42c74035..a16a7df0766cd1 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -33,6 +33,7 @@ struct hostfs_inode_info {
+ 	struct inode vfs_inode;
+ 	struct mutex open_mutex;
+ 	dev_t dev;
++	struct hostfs_timespec btime;
+ };
+ 
+ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
+@@ -550,6 +551,7 @@ static int hostfs_inode_set(struct inode *ino, void *data)
+ 	}
+ 
+ 	HOSTFS_I(ino)->dev = dev;
++	HOSTFS_I(ino)->btime = st->btime;
+ 	ino->i_ino = st->ino;
+ 	ino->i_mode = st->mode;
+ 	return hostfs_inode_update(ino, st);
+@@ -560,7 +562,10 @@ static int hostfs_inode_test(struct inode *inode, void *data)
+ 	const struct hostfs_stat *st = data;
+ 	dev_t dev = MKDEV(st->dev.maj, st->dev.min);
+ 
+-	return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev;
++	return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev &&
++	       (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) &&
++	       HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec &&
++	       HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec;
+ }
+ 
+ static struct inode *hostfs_iget(struct super_block *sb, char *name)
+diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
+index 97e9c40a944883..3bcd9f35e70b22 100644
+--- a/fs/hostfs/hostfs_user.c
++++ b/fs/hostfs/hostfs_user.c
+@@ -18,39 +18,48 @@
+ #include "hostfs.h"
+ #include <utime.h>
+ 
+-static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
++static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p)
+ {
+-	p->ino = buf->st_ino;
+-	p->mode = buf->st_mode;
+-	p->nlink = buf->st_nlink;
+-	p->uid = buf->st_uid;
+-	p->gid = buf->st_gid;
+-	p->size = buf->st_size;
+-	p->atime.tv_sec = buf->st_atime;
+-	p->atime.tv_nsec = 0;
+-	p->ctime.tv_sec = buf->st_ctime;
+-	p->ctime.tv_nsec = 0;
+-	p->mtime.tv_sec = buf->st_mtime;
+-	p->mtime.tv_nsec = 0;
+-	p->blksize = buf->st_blksize;
+-	p->blocks = buf->st_blocks;
+-	p->rdev.maj = os_major(buf->st_rdev);
+-	p->rdev.min = os_minor(buf->st_rdev);
+-	p->dev.maj = os_major(buf->st_dev);
+-	p->dev.min = os_minor(buf->st_dev);
++	p->ino = buf->stx_ino;
++	p->mode = buf->stx_mode;
++	p->nlink = buf->stx_nlink;
++	p->uid = buf->stx_uid;
++	p->gid = buf->stx_gid;
++	p->size = buf->stx_size;
++	p->atime.tv_sec = buf->stx_atime.tv_sec;
++	p->atime.tv_nsec = buf->stx_atime.tv_nsec;
++	p->ctime.tv_sec = buf->stx_ctime.tv_sec;
++	p->ctime.tv_nsec = buf->stx_ctime.tv_nsec;
++	p->mtime.tv_sec = buf->stx_mtime.tv_sec;
++	p->mtime.tv_nsec = buf->stx_mtime.tv_nsec;
++	if (buf->stx_mask & STATX_BTIME) {
++		p->btime.tv_sec = buf->stx_btime.tv_sec;
++		p->btime.tv_nsec = buf->stx_btime.tv_nsec;
++	} else {
++		memset(&p->btime, 0, sizeof(p->btime));
++	}
++	p->blksize = buf->stx_blksize;
++	p->blocks = buf->stx_blocks;
++	p->rdev.maj = buf->stx_rdev_major;
++	p->rdev.min = buf->stx_rdev_minor;
++	p->dev.maj = buf->stx_dev_major;
++	p->dev.min = buf->stx_dev_minor;
+ }
+ 
+ int stat_file(const char *path, struct hostfs_stat *p, int fd)
+ {
+-	struct stat64 buf;
++	struct statx buf;
++	int flags = AT_SYMLINK_NOFOLLOW;
+ 
+ 	if (fd >= 0) {
+-		if (fstat64(fd, &buf) < 0)
+-			return -errno;
+-	} else if (lstat64(path, &buf) < 0) {
+-		return -errno;
++		flags |= AT_EMPTY_PATH;
++		path = "";
+ 	}
+-	stat64_to_hostfs(&buf, p);
++
++	if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0)
++		return -errno;
++
++	statx_to_hostfs(&buf, p);
+ 	return 0;
+ }
+ 
+diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
+index eb2f8273e6f15e..09df40b612fbf2 100644
+--- a/fs/isofs/dir.c
++++ b/fs/isofs/dir.c
+@@ -147,7 +147,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
+ 			de = tmpde;
+ 		}
+ 		/* Basic sanity check, whether name doesn't exceed dir entry */
+-		if (de_len < de->name_len[0] +
++		if (de_len < sizeof(struct iso_directory_record) ||
++		    de_len < de->name_len[0] +
+ 					sizeof(struct iso_directory_record)) {
+ 			printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+ 			       " in block %lu of inode %lu\n", block,
+diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
+index 8f85177f284b5a..93db6eec446556 100644
+--- a/fs/jfs/jfs_dtree.c
++++ b/fs/jfs/jfs_dtree.c
+@@ -117,7 +117,8 @@ do {									\
+ 	if (!(RC)) {							\
+ 		if (((P)->header.nextindex >				\
+ 		     (((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \
+-		    ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT))) {	\
++		    ((BN) && (((P)->header.maxslot > DTPAGEMAXSLOT) ||	\
++		    ((P)->header.stblindex >= DTPAGEMAXSLOT)))) {	\
+ 			BT_PUTPAGE(MP);					\
+ 			jfs_error((IP)->i_sb,				\
+ 				  "DT_GETPAGE: dtree page corrupt\n");	\
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index 24afbae87225a7..11d7f74d207be0 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -559,11 +559,16 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
+ 
+       size_check:
+ 	if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
+-		int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr));
+-
+-		printk(KERN_ERR "ea_get: invalid extended attribute\n");
+-		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
+-				     ea_buf->xattr, size, 1);
++		if (unlikely(EALIST_SIZE(ea_buf->xattr) > INT_MAX)) {
++			printk(KERN_ERR "ea_get: extended attribute size too large: %u > INT_MAX\n",
++			       EALIST_SIZE(ea_buf->xattr));
++		} else {
++			int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr));
++
++			printk(KERN_ERR "ea_get: invalid extended attribute\n");
++			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
++				       ea_buf->xattr, size, 1);
++		}
+ 		ea_release(inode, ea_buf);
+ 		rc = -EIO;
+ 		goto clean_up;
+diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
+index b1a66a6e6bc2d6..917b7edc34ef57 100644
+--- a/fs/netfs/direct_read.c
++++ b/fs/netfs/direct_read.c
+@@ -108,9 +108,9 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
+  * Perform a read to an application buffer, bypassing the pagecache and the
+  * local disk cache.
+  */
+-static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
++static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
+ {
+-	int ret;
++	ssize_t ret;
+ 
+ 	_enter("R=%x %llx-%llx",
+ 	       rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+@@ -149,7 +149,7 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
+ 	}
+ 
+ out:
+-	_leave(" = %d", ret);
++	_leave(" = %zd", ret);
+ 	return ret;
+ }
+ 
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 4db912f5623055..325ba0663a6de2 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -79,6 +79,7 @@ static void nfs_mark_return_delegation(struct nfs_server *server,
+ 				       struct nfs_delegation *delegation)
+ {
+ 	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
++	set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
+ 	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ }
+ 
+@@ -330,14 +331,16 @@ nfs_start_delegation_return(struct nfs_inode *nfsi)
+ }
+ 
+ static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
+-					struct nfs_client *clp, int err)
++					struct nfs_server *server, int err)
+ {
+-
+ 	spin_lock(&delegation->lock);
+ 	clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
+ 	if (err == -EAGAIN) {
+ 		set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
+-		set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
++		set_bit(NFS4SERV_DELEGRETURN_DELAYED,
++			&server->delegation_flags);
++		set_bit(NFS4CLNT_DELEGRETURN_DELAYED,
++			&server->nfs_client->cl_state);
+ 	}
+ 	spin_unlock(&delegation->lock);
+ }
+@@ -547,7 +550,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
+  */
+ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
+ {
+-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
++	struct nfs_server *server = NFS_SERVER(inode);
+ 	unsigned int mode = O_WRONLY | O_RDWR;
+ 	int err = 0;
+ 
+@@ -569,11 +572,11 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
+ 		/*
+ 		 * Guard against state recovery
+ 		 */
+-		err = nfs4_wait_clnt_recover(clp);
++		err = nfs4_wait_clnt_recover(server->nfs_client);
+ 	}
+ 
+ 	if (err) {
+-		nfs_abort_delegation_return(delegation, clp, err);
++		nfs_abort_delegation_return(delegation, server, err);
+ 		goto out;
+ 	}
+ 
+@@ -590,17 +593,6 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
+ 
+ 	if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
+ 		ret = true;
+-	else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
+-		struct inode *inode;
+-
+-		spin_lock(&delegation->lock);
+-		inode = delegation->inode;
+-		if (inode && list_empty(&NFS_I(inode)->open_files))
+-			ret = true;
+-		spin_unlock(&delegation->lock);
+-	}
+-	if (ret)
+-		clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+ 	if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
+ 	    test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
+ 	    test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+@@ -619,6 +611,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ 	struct nfs_delegation *place_holder_deleg = NULL;
+ 	int err = 0;
+ 
++	if (!test_and_clear_bit(NFS4SERV_DELEGRETURN,
++				&server->delegation_flags))
++		return 0;
+ restart:
+ 	/*
+ 	 * To avoid quadratic looping we hold a reference
+@@ -670,6 +665,7 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ 		cond_resched();
+ 		if (!err)
+ 			goto restart;
++		set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
+ 		set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ 		goto out;
+ 	}
+@@ -684,6 +680,9 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
+ 	struct nfs_delegation *d;
+ 	bool ret = false;
+ 
++	if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED,
++				&server->delegation_flags))
++		goto out;
+ 	list_for_each_entry_rcu (d, &server->delegations, super_list) {
+ 		if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
+ 			continue;
+@@ -691,6 +690,7 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
+ 		clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
+ 		ret = true;
+ 	}
++out:
+ 	return ret;
+ }
+ 
+@@ -878,11 +878,25 @@ int nfs4_inode_make_writeable(struct inode *inode)
+ 	return nfs4_inode_return_delegation(inode);
+ }
+ 
+-static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
+-		struct nfs_delegation *delegation)
++static void
++nfs_mark_return_if_closed_delegation(struct nfs_server *server,
++				     struct nfs_delegation *delegation)
+ {
+-	set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+-	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
++	struct inode *inode;
++
++	if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) ||
++	    test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags))
++		return;
++	spin_lock(&delegation->lock);
++	inode = delegation->inode;
++	if (!inode)
++		goto out;
++	if (list_empty(&NFS_I(inode)->open_files))
++		nfs_mark_return_delegation(server, delegation);
++	else
++		set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
++out:
++	spin_unlock(&delegation->lock);
+ }
+ 
+ static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
+@@ -1276,6 +1290,7 @@ static void nfs_mark_test_expired_delegation(struct nfs_server *server,
+ 		return;
+ 	clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+ 	set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
++	set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
+ 	set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
+ }
+ 
+@@ -1354,6 +1369,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ 	nfs4_stateid stateid;
+ 	unsigned long gen = ++server->delegation_gen;
+ 
++	if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED,
++				&server->delegation_flags))
++		return 0;
+ restart:
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+@@ -1383,6 +1401,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ 			goto restart;
+ 		}
+ 		nfs_inode_mark_test_expired_delegation(server,inode);
++		set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
++		set_bit(NFS4CLNT_DELEGATION_EXPIRED,
++			&server->nfs_client->cl_state);
+ 		iput(inode);
+ 		return -EAGAIN;
+ 	}
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index e8ac3f615f932e..71f45cc0ca74d1 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -82,9 +82,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
+  * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT  >> 2)
+  */
+ #define pagepad_maxsz		(1)
+-#define open_owner_id_maxsz	(1 + 2 + 1 + 1 + 2)
+-#define lock_owner_id_maxsz	(1 + 1 + 4)
+-#define decode_lockowner_maxsz	(1 + XDR_QUADLEN(IDMAP_NAMESZ))
++#define open_owner_id_maxsz	(2 + 1 + 2 + 2)
++#define lock_owner_id_maxsz	(2 + 1 + 2)
+ #define compound_encode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
+ #define compound_decode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
+ #define op_encode_hdr_maxsz	(1)
+@@ -185,7 +184,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
+ #define encode_claim_null_maxsz	(1 + nfs4_name_maxsz)
+ #define encode_open_maxsz	(op_encode_hdr_maxsz + \
+ 				2 + encode_share_access_maxsz + 2 + \
+-				open_owner_id_maxsz + \
++				1 + open_owner_id_maxsz + \
+ 				encode_opentype_maxsz + \
+ 				encode_claim_null_maxsz)
+ #define decode_space_limit_maxsz	(3)
+@@ -255,13 +254,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
+ #define encode_link_maxsz	(op_encode_hdr_maxsz + \
+ 				nfs4_name_maxsz)
+ #define decode_link_maxsz	(op_decode_hdr_maxsz + decode_change_info_maxsz)
+-#define encode_lockowner_maxsz	(7)
++#define encode_lockowner_maxsz	(2 + 1 + lock_owner_id_maxsz)
++
+ #define encode_lock_maxsz	(op_encode_hdr_maxsz + \
+ 				 7 + \
+ 				 1 + encode_stateid_maxsz + 1 + \
+ 				 encode_lockowner_maxsz)
+ #define decode_lock_denied_maxsz \
+-				(8 + decode_lockowner_maxsz)
++				(2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz)
+ #define decode_lock_maxsz	(op_decode_hdr_maxsz + \
+ 				 decode_lock_denied_maxsz)
+ #define encode_lockt_maxsz	(op_encode_hdr_maxsz + 5 + \
+@@ -617,7 +617,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
+ 				 encode_lockowner_maxsz)
+ #define NFS4_dec_release_lockowner_sz \
+ 				(compound_decode_hdr_maxsz + \
+-				 decode_lockowner_maxsz)
++				 decode_release_lockowner_maxsz)
+ #define NFS4_enc_access_sz	(compound_encode_hdr_maxsz + \
+ 				encode_sequence_maxsz + \
+ 				encode_putfh_maxsz + \
+@@ -1412,7 +1412,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
+ 	__be32 *p;
+  /*
+  * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
+- * owner 4 = 32
++ * owner 28
+  */
+ 	encode_nfs4_seqid(xdr, arg->seqid);
+ 	encode_share_access(xdr, arg->share_access);
+@@ -5077,7 +5077,7 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+ /*
+  * We create the owner, so we know a proper owner.id length is 4.
+  */
+-static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
++static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl)
+ {
+ 	uint64_t offset, length, clientid;
+ 	__be32 *p;
+diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
+index 7b59a40d40c061..784f7c1d003bfc 100644
+--- a/fs/nfs/sysfs.c
++++ b/fs/nfs/sysfs.c
+@@ -14,6 +14,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/lockd/lockd.h>
+ 
++#include "internal.h"
+ #include "nfs4_fs.h"
+ #include "netns.h"
+ #include "sysfs.h"
+@@ -228,6 +229,25 @@ static void shutdown_client(struct rpc_clnt *clnt)
+ 	rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL);
+ }
+ 
++/*
++ * Shut down the nfs_client only once all the superblocks
++ * have been shut down.
++ */
++static void shutdown_nfs_client(struct nfs_client *clp)
++{
++	struct nfs_server *server;
++	rcu_read_lock();
++	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
++		if (!(server->flags & NFS_MOUNT_SHUTDOWN)) {
++			rcu_read_unlock();
++			return;
++		}
++	}
++	rcu_read_unlock();
++	nfs_mark_client_ready(clp, -EIO);
++	shutdown_client(clp->cl_rpcclient);
++}
++
+ static ssize_t
+ shutdown_show(struct kobject *kobj, struct kobj_attribute *attr,
+ 				char *buf)
+@@ -259,7 +279,6 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 
+ 	server->flags |= NFS_MOUNT_SHUTDOWN;
+ 	shutdown_client(server->client);
+-	shutdown_client(server->nfs_client->cl_rpcclient);
+ 
+ 	if (!IS_ERR(server->client_acl))
+ 		shutdown_client(server->client_acl);
+@@ -267,6 +286,7 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	if (server->nlm_host)
+ 		shutdown_client(server->nlm_host->h_rpcclnt);
+ out:
++	shutdown_nfs_client(server->nfs_client);
+ 	return count;
+ }
+ 
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 82ae2b85d393cb..8ff8db09a1e066 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -579,8 +579,10 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ 
+ 	while (!nfs_lock_request(head)) {
+ 		ret = nfs_wait_on_request(head);
+-		if (ret < 0)
++		if (ret < 0) {
++			nfs_release_request(head);
+ 			return ERR_PTR(ret);
++		}
+ 	}
+ 
+ 	/* Ensure that nobody removed the request before we locked it */
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 57f8818aa47c5f..5e81c819c3846a 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1057,6 +1057,12 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
+ 	return openlockstateid(stid);
+ }
+ 
++/*
++ * As the sc_free callback of deleg, this may be called by nfs4_put_stid
++ * in nfsd_break_one_deleg.
++ * Considering nfsd_break_one_deleg is called with the flc->flc_lock held,
++ * this function mustn't ever sleep.
++ */
+ static void nfs4_free_deleg(struct nfs4_stid *stid)
+ {
+ 	struct nfs4_delegation *dp = delegstateid(stid);
+@@ -5269,6 +5275,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
+ 
+ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
+ {
++	bool queued;
+ 	/*
+ 	 * We're assuming the state code never drops its reference
+ 	 * without first removing the lease.  Since we're in this lease
+@@ -5277,7 +5284,10 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
+ 	 * we know it's safe to take a reference.
+ 	 */
+ 	refcount_inc(&dp->dl_stid.sc_count);
+-	WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
++	queued = nfsd4_run_cb(&dp->dl_recall);
++	WARN_ON_ONCE(!queued);
++	if (!queued)
++		nfs4_put_stid(&dp->dl_stid);
+ }
+ 
+ /* Called from break_lease() with flc_lock held. */
+@@ -6689,14 +6699,19 @@ deleg_reaper(struct nfsd_net *nn)
+ 	spin_lock(&nn->client_lock);
+ 	list_for_each_safe(pos, next, &nn->client_lru) {
+ 		clp = list_entry(pos, struct nfs4_client, cl_lru);
+-		if (clp->cl_state != NFSD4_ACTIVE ||
+-			list_empty(&clp->cl_delegations) ||
+-			atomic_read(&clp->cl_delegs_in_recall) ||
+-			test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
+-			(ktime_get_boottime_seconds() -
+-				clp->cl_ra_time < 5)) {
++
++		if (clp->cl_state != NFSD4_ACTIVE)
++			continue;
++		if (list_empty(&clp->cl_delegations))
++			continue;
++		if (atomic_read(&clp->cl_delegs_in_recall))
++			continue;
++		if (test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags))
++			continue;
++		if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5)
++			continue;
++		if (clp->cl_cb_state != NFSD4_CB_UP)
+ 			continue;
+-		}
+ 		list_add(&clp->cl_ra_cblist, &cblist);
+ 
+ 		/* release in nfsd4_cb_recall_any_release */
+@@ -6880,7 +6895,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ 		 */
+ 		statusmask |= SC_STATUS_REVOKED;
+ 
+-	statusmask |= SC_STATUS_ADMIN_REVOKED;
++	statusmask |= SC_STATUS_ADMIN_REVOKED | SC_STATUS_FREEABLE;
+ 
+ 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+ 		CLOSE_STATEID(stateid))
+@@ -7535,9 +7550,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
+ 		return status;
+ 
+-	status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG,
+-				      SC_STATUS_REVOKED | SC_STATUS_FREEABLE,
+-				      &s, nn);
++	status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, SC_STATUS_REVOKED, &s, nn);
+ 	if (status)
+ 		goto out;
+ 	dp = delegstateid(s);
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 3adbc05ebaac4c..e83629f396044b 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1959,6 +1959,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
+ 	struct svc_serv *serv;
+ 	LIST_HEAD(permsocks);
+ 	struct nfsd_net *nn;
++	bool delete = false;
+ 	int err, rem;
+ 
+ 	mutex_lock(&nfsd_mutex);
+@@ -2019,34 +2020,28 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
+ 		}
+ 	}
+ 
+-	/* For now, no removing old sockets while server is running */
+-	if (serv->sv_nrthreads && !list_empty(&permsocks)) {
++	/*
++	 * If there are listener transports remaining on the permsocks list,
++	 * it means we were asked to remove a listener.
++	 */
++	if (!list_empty(&permsocks)) {
+ 		list_splice_init(&permsocks, &serv->sv_permsocks);
+-		spin_unlock_bh(&serv->sv_lock);
+-		err = -EBUSY;
+-		goto out_unlock_mtx;
++		delete = true;
+ 	}
++	spin_unlock_bh(&serv->sv_lock);
+ 
+-	/* Close the remaining sockets on the permsocks list */
+-	while (!list_empty(&permsocks)) {
+-		xprt = list_first_entry(&permsocks, struct svc_xprt, xpt_list);
+-		list_move(&xprt->xpt_list, &serv->sv_permsocks);
+-
+-		/*
+-		 * Newly-created sockets are born with the BUSY bit set. Clear
+-		 * it if there are no threads, since nothing can pick it up
+-		 * in that case.
+-		 */
+-		if (!serv->sv_nrthreads)
+-			clear_bit(XPT_BUSY, &xprt->xpt_flags);
+-
+-		set_bit(XPT_CLOSE, &xprt->xpt_flags);
+-		spin_unlock_bh(&serv->sv_lock);
+-		svc_xprt_close(xprt);
+-		spin_lock_bh(&serv->sv_lock);
++	/* Do not remove listeners while there are active threads. */
++	if (serv->sv_nrthreads) {
++		err = -EBUSY;
++		goto out_unlock_mtx;
+ 	}
+ 
+-	spin_unlock_bh(&serv->sv_lock);
++	/*
++	 * Since we can't delete an arbitrary llist entry, destroy the
++	 * remaining listeners and recreate the list.
++	 */
++	if (delete)
++		svc_xprt_destroy_all(serv, net);
+ 
+ 	/* walk list of addrs again, open any that still don't exist */
+ 	nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+@@ -2073,6 +2068,9 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		xprt = svc_find_listener(serv, xcl_name, net, sa);
+ 		if (xprt) {
++			if (delete)
++				WARN_ONCE(1, "Transport type=%s already exists\n",
++					  xcl_name);
+ 			svc_xprt_put(xprt);
+ 			continue;
+ 		}
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index d6d4f2a0e89826..ca29a5e1600fd9 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1935,9 +1935,17 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
+ 	return err;
+ }
+ 
+-/*
+- * Unlink a file or directory
+- * N.B. After this call fhp needs an fh_put
++/**
++ * nfsd_unlink - remove a directory entry
++ * @rqstp: RPC transaction context
++ * @fhp: the file handle of the parent directory to be modified
++ * @type: enforced file type of the object to be removed
++ * @fname: the name of directory entry to be removed
++ * @flen: length of @fname in octets
++ *
++ * After this call fhp needs an fh_put.
++ *
++ * Returns a generic NFS status code in network byte-order.
+  */
+ __be32
+ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+@@ -2011,15 +2019,17 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ 	fh_drop_write(fhp);
+ out_nfserr:
+ 	if (host_err == -EBUSY) {
+-		/* name is mounted-on. There is no perfect
+-		 * error status.
++		/*
++		 * See RFC 8881 Section 18.25.4 para 4: NFSv4 REMOVE
++		 * wants a status unique to the object type.
+ 		 */
+-		err = nfserr_file_open;
+-	} else {
+-		err = nfserrno(host_err);
++		if (type != S_IFDIR)
++			err = nfserr_file_open;
++		else
++			err = nfserr_acces;
+ 	}
+ out:
+-	return err;
++	return err != nfs_ok ? err : nfserrno(host_err);
+ out_unlock:
+ 	inode_unlock(dirp);
+ 	goto out_drop_write;
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index da1a9312e61a0e..dd459316529e8e 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -2663,8 +2663,9 @@ int attr_set_compress(struct ntfs_inode *ni, bool compr)
+ 		attr->nres.run_off = cpu_to_le16(run_off);
+ 	}
+ 
+-	/* Update data attribute flags. */
++	/* Update attribute flags. */
+ 	if (compr) {
++		attr->flags &= ~ATTR_FLAG_SPARSED;
+ 		attr->flags |= ATTR_FLAG_COMPRESSED;
+ 		attr->nres.c_unit = NTFS_LZNT_CUNIT;
+ 	} else {
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index f704ceef953948..7976ac4611c8d0 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -101,8 +101,26 @@ int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
+ 	/* Allowed to change compression for empty files and for directories only. */
+ 	if (!is_dedup(ni) && !is_encrypted(ni) &&
+ 	    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
+-		/* Change compress state. */
+-		int err = ni_set_compress(inode, flags & FS_COMPR_FL);
++		int err = 0;
++		struct address_space *mapping = inode->i_mapping;
++
++		/* write out all data and wait. */
++		filemap_invalidate_lock(mapping);
++		err = filemap_write_and_wait(mapping);
++
++		if (err >= 0) {
++			/* Change compress state. */
++			bool compr = flags & FS_COMPR_FL;
++			err = ni_set_compress(inode, compr);
++
++			/* For files change a_ops too. */
++			if (!err)
++				mapping->a_ops = compr ? &ntfs_aops_cmpr :
++							 &ntfs_aops;
++		}
++
++		filemap_invalidate_unlock(mapping);
++
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 175662acd5eaf0..608634361a302f 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -3431,10 +3431,12 @@ int ni_set_compress(struct inode *inode, bool compr)
+ 	}
+ 
+ 	ni->std_fa = std->fa;
+-	if (compr)
++	if (compr) {
++		std->fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
+ 		std->fa |= FILE_ATTRIBUTE_COMPRESSED;
+-	else
++	} else {
+ 		std->fa &= ~FILE_ATTRIBUTE_COMPRESSED;
++	}
+ 
+ 	if (ni->std_fa != std->fa) {
+ 		ni->std_fa = std->fa;
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 7eb9fae22f8da6..78d20e4baa2c9a 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -618,7 +618,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes)
+ 	u32 off = le32_to_cpu(hdr->de_off);
+ 
+ 	if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot ||
+-	    off + sizeof(struct NTFS_DE) > end) {
++	    size_add(off, sizeof(struct NTFS_DE)) > end) {
+ 		/* incorrect index buffer. */
+ 		return false;
+ 	}
+@@ -736,7 +736,7 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
+ 	if (end > total)
+ 		return NULL;
+ 
+-	if (off + sizeof(struct NTFS_DE) > end)
++	if (size_add(off, sizeof(struct NTFS_DE)) > end)
+ 		return NULL;
+ 
+ 	e = Add2Ptr(hdr, off);
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index 241f2ffdd9201a..1ff13b6f961326 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -717,7 +717,7 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
+ 	struct NTFS_DE *e;
+ 	u16 esize;
+ 
+-	if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used )
++	if (de_off >= used || size_add(de_off, sizeof(struct NTFS_DE)) > used)
+ 		return NULL;
+ 
+ 	e = Add2Ptr(hdr, de_off);
+diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
+index ea9127ba320844..5d9388b44e5be7 100644
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -1803,6 +1803,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
+ 
+ 	el = root_el;
+ 	while (el->l_tree_depth) {
++		if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) {
++			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
++				    "Owner %llu has invalid tree depth %u in extent list\n",
++				    (unsigned long long)ocfs2_metadata_cache_owner(ci),
++				    le16_to_cpu(el->l_tree_depth));
++			ret = -EROFS;
++			goto out;
++		}
+ 		if (le16_to_cpu(el->l_next_free_rec) == 0) {
+ 			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
+ 				    "Owner %llu has empty extent list at depth %u\n",
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index b31283d81c52ea..a2541f5204af06 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -417,7 +417,7 @@ static const struct file_operations proc_pid_cmdline_ops = {
+ #ifdef CONFIG_KALLSYMS
+ /*
+  * Provides a wchan file via kallsyms in a proper one-value-per-file format.
+- * Returns the resolved symbol.  If that fails, simply return the address.
++ * Returns the resolved symbol to user space.
+  */
+ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
+ 			  struct pid *pid, struct task_struct *task)
+diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
+index ebe9a7d7c70e86..e36f0e2d7d21e2 100644
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -763,7 +763,7 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
+ 		       struct cifs_fattr *fattr, bool mode_from_special_sid)
+ {
+ 	int i;
+-	int num_aces = 0;
++	u16 num_aces = 0;
+ 	int acl_size;
+ 	char *acl_base;
+ 	struct smb_ace **ppace;
+@@ -778,14 +778,15 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
+ 	}
+ 
+ 	/* validate that we do not go past end of acl */
+-	if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
++	if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
++	    end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
+ 		cifs_dbg(VFS, "ACL too small to parse DACL\n");
+ 		return;
+ 	}
+ 
+ 	cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
+ 		 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+-		 le32_to_cpu(pdacl->num_aces));
++		 le16_to_cpu(pdacl->num_aces));
+ 
+ 	/* reset rwx permissions for user/group/other.
+ 	   Also, if num_aces is 0 i.e. DACL has no ACEs,
+@@ -795,12 +796,15 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
+ 	acl_base = (char *)pdacl;
+ 	acl_size = sizeof(struct smb_acl);
+ 
+-	num_aces = le32_to_cpu(pdacl->num_aces);
++	num_aces = le16_to_cpu(pdacl->num_aces);
+ 	if (num_aces > 0) {
+ 		umode_t denied_mode = 0;
+ 
+-		if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
++		if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) /
++				(offsetof(struct smb_ace, sid) +
++				 offsetof(struct smb_sid, sub_auth) + sizeof(__le16)))
+ 			return;
++
+ 		ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *),
+ 				      GFP_KERNEL);
+ 		if (!ppace)
+@@ -937,12 +941,12 @@ unsigned int setup_special_user_owner_ACE(struct smb_ace *pntace)
+ static void populate_new_aces(char *nacl_base,
+ 		struct smb_sid *pownersid,
+ 		struct smb_sid *pgrpsid,
+-		__u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
++		__u64 *pnmode, u16 *pnum_aces, u16 *pnsize,
+ 		bool modefromsid,
+ 		bool posix)
+ {
+ 	__u64 nmode;
+-	u32 num_aces = 0;
++	u16 num_aces = 0;
+ 	u16 nsize = 0;
+ 	__u64 user_mode;
+ 	__u64 group_mode;
+@@ -1050,7 +1054,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p
+ 	u16 size = 0;
+ 	struct smb_ace *pntace = NULL;
+ 	char *acl_base = NULL;
+-	u32 src_num_aces = 0;
++	u16 src_num_aces = 0;
+ 	u16 nsize = 0;
+ 	struct smb_ace *pnntace = NULL;
+ 	char *nacl_base = NULL;
+@@ -1058,7 +1062,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p
+ 
+ 	acl_base = (char *)pdacl;
+ 	size = sizeof(struct smb_acl);
+-	src_num_aces = le32_to_cpu(pdacl->num_aces);
++	src_num_aces = le16_to_cpu(pdacl->num_aces);
+ 
+ 	nacl_base = (char *)pndacl;
+ 	nsize = sizeof(struct smb_acl);
+@@ -1090,11 +1094,11 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ 	u16 size = 0;
+ 	struct smb_ace *pntace = NULL;
+ 	char *acl_base = NULL;
+-	u32 src_num_aces = 0;
++	u16 src_num_aces = 0;
+ 	u16 nsize = 0;
+ 	struct smb_ace *pnntace = NULL;
+ 	char *nacl_base = NULL;
+-	u32 num_aces = 0;
++	u16 num_aces = 0;
+ 	bool new_aces_set = false;
+ 
+ 	/* Assuming that pndacl and pnmode are never NULL */
+@@ -1112,7 +1116,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ 
+ 	acl_base = (char *)pdacl;
+ 	size = sizeof(struct smb_acl);
+-	src_num_aces = le32_to_cpu(pdacl->num_aces);
++	src_num_aces = le16_to_cpu(pdacl->num_aces);
+ 
+ 	/* Retain old ACEs which we can retain */
+ 	for (i = 0; i < src_num_aces; ++i) {
+@@ -1158,7 +1162,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ 	}
+ 
+ finalize_dacl:
+-	pndacl->num_aces = cpu_to_le32(num_aces);
++	pndacl->num_aces = cpu_to_le16(num_aces);
+ 	pndacl->size = cpu_to_le16(nsize);
+ 
+ 	return 0;
+@@ -1293,7 +1297,7 @@ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
+ 			dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
+ 
+ 		ndacl_ptr->size = cpu_to_le16(0);
+-		ndacl_ptr->num_aces = cpu_to_le32(0);
++		ndacl_ptr->num_aces = cpu_to_le16(0);
+ 
+ 		rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
+ 				    pnmode, mode_from_sid, posix);
+@@ -1651,7 +1655,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+ 			dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ 			if (mode_from_sid)
+ 				nsecdesclen +=
+-					le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
++					le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
+ 			else /* cifsacl */
+ 				nsecdesclen += le16_to_cpu(dacl_ptr->size);
+ 		}
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index d327f31b317db9..8b8475b4e26277 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -316,6 +316,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
+ 			 server->ssocket->flags);
+ 		sock_release(server->ssocket);
+ 		server->ssocket = NULL;
++		put_net(cifs_net_ns(server));
+ 	}
+ 	server->sequence_number = 0;
+ 	server->session_estab = false;
+@@ -3138,8 +3139,12 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 		/*
+ 		 * Grab netns reference for the socket.
+ 		 *
+-		 * It'll be released here, on error, or in clean_demultiplex_info() upon server
+-		 * teardown.
++		 * This reference will be released in several situations:
++		 * - In the failure path before the cifsd thread is started.
++		 * - In the all place where server->socket is released, it is
++		 *   also set to NULL.
++		 * - Ultimately in clean_demultiplex_info(), during the final
++		 *   teardown.
+ 		 */
+ 		get_net(net);
+ 
+@@ -3155,10 +3160,8 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 	}
+ 
+ 	rc = bind_socket(server);
+-	if (rc < 0) {
+-		put_net(cifs_net_ns(server));
++	if (rc < 0)
+ 		return rc;
+-	}
+ 
+ 	/*
+ 	 * Eventually check for other socket options to change from
+@@ -3204,9 +3207,6 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 	if (sport == htons(RFC1001_PORT))
+ 		rc = ip_rfc1001_connect(server);
+ 
+-	if (rc < 0)
+-		put_net(cifs_net_ns(server));
+-
+ 	return rc;
+ }
+ 
+diff --git a/fs/smb/common/smbacl.h b/fs/smb/common/smbacl.h
+index 6a60698fc6f0f4..a624ec9e4a1443 100644
+--- a/fs/smb/common/smbacl.h
++++ b/fs/smb/common/smbacl.h
+@@ -107,7 +107,8 @@ struct smb_sid {
+ struct smb_acl {
+ 	__le16 revision; /* revision level */
+ 	__le16 size;
+-	__le32 num_aces;
++	__le16 num_aces;
++	__le16 reserved;
+ } __attribute__((packed));
+ 
+ struct smb_ace {
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+index 8892177e500f19..95449751368314 100644
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -1016,9 +1016,9 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
+ 
+ 	ses_enc_key = enc ? sess->smb3encryptionkey :
+ 		sess->smb3decryptionkey;
+-	if (enc)
+-		ksmbd_user_session_get(sess);
+ 	memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
++	if (!enc)
++		ksmbd_user_session_put(sess);
+ 
+ 	return 0;
+ }
+@@ -1217,7 +1217,7 @@ int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
+ free_sg:
+ 	kfree(sg);
+ free_req:
+-	kfree(req);
++	aead_request_free(req);
+ free_ctx:
+ 	ksmbd_release_crypto_ctx(ctx);
+ 	return rc;
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 91c2318639e766..14620e147dda57 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -27,6 +27,7 @@ enum {
+ 	KSMBD_SESS_EXITING,
+ 	KSMBD_SESS_NEED_RECONNECT,
+ 	KSMBD_SESS_NEED_NEGOTIATE,
++	KSMBD_SESS_NEED_SETUP,
+ 	KSMBD_SESS_RELEASING
+ };
+ 
+@@ -187,6 +188,11 @@ static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
+ 	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
+ }
+ 
++static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn)
++{
++	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP;
++}
++
+ static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
+ {
+ 	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
+@@ -217,6 +223,11 @@ static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
+ 	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
+ }
+ 
++static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP);
++}
++
+ static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
+ {
+ 	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index d960ddcbba1657..f83daf72f877e2 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -181,7 +181,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+ 	down_write(&sessions_table_lock);
+ 	down_write(&conn->session_lock);
+ 	xa_for_each(&conn->sessions, id, sess) {
+-		if (atomic_read(&sess->refcnt) == 0 &&
++		if (atomic_read(&sess->refcnt) <= 1 &&
+ 		    (sess->state != SMB2_SESSION_VALID ||
+ 		     time_after(jiffies,
+ 			       sess->last_active + SMB2_SESSION_TIMEOUT))) {
+@@ -230,7 +230,11 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ 			if (!ksmbd_chann_del(conn, sess) &&
+ 			    xa_empty(&sess->ksmbd_chann_list)) {
+ 				hash_del(&sess->hlist);
+-				ksmbd_session_destroy(sess);
++				down_write(&conn->session_lock);
++				xa_erase(&conn->sessions, sess->id);
++				up_write(&conn->session_lock);
++				if (atomic_dec_and_test(&sess->refcnt))
++					ksmbd_session_destroy(sess);
+ 			}
+ 		}
+ 	}
+@@ -249,13 +253,30 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ 		if (xa_empty(&sess->ksmbd_chann_list)) {
+ 			xa_erase(&conn->sessions, sess->id);
+ 			hash_del(&sess->hlist);
+-			ksmbd_session_destroy(sess);
++			if (atomic_dec_and_test(&sess->refcnt))
++				ksmbd_session_destroy(sess);
+ 		}
+ 	}
+ 	up_write(&conn->session_lock);
+ 	up_write(&sessions_table_lock);
+ }
+ 
++bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn,
++				   unsigned long long id)
++{
++	struct ksmbd_session *sess;
++
++	down_read(&conn->session_lock);
++	sess = xa_load(&conn->sessions, id);
++	if (sess) {
++		up_read(&conn->session_lock);
++		return true;
++	}
++	up_read(&conn->session_lock);
++
++	return false;
++}
++
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ 					   unsigned long long id)
+ {
+@@ -309,8 +330,8 @@ void ksmbd_user_session_put(struct ksmbd_session *sess)
+ 
+ 	if (atomic_read(&sess->refcnt) <= 0)
+ 		WARN_ON(1);
+-	else
+-		atomic_dec(&sess->refcnt);
++	else if (atomic_dec_and_test(&sess->refcnt))
++		ksmbd_session_destroy(sess);
+ }
+ 
+ struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+@@ -353,13 +374,13 @@ void destroy_previous_session(struct ksmbd_conn *conn,
+ 	ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT);
+ 	err = ksmbd_conn_wait_idle_sess_id(conn, id);
+ 	if (err) {
+-		ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
++		ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
+ 		goto out;
+ 	}
+ 
+ 	ksmbd_destroy_file_table(&prev_sess->file_table);
+ 	prev_sess->state = SMB2_SESSION_EXPIRED;
+-	ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
++	ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
+ 	ksmbd_launch_ksmbd_durable_scavenger();
+ out:
+ 	up_write(&conn->session_lock);
+@@ -417,7 +438,7 @@ static struct ksmbd_session *__session_create(int protocol)
+ 	xa_init(&sess->rpc_handle_list);
+ 	sess->sequence_number = 1;
+ 	rwlock_init(&sess->tree_conns_lock);
+-	atomic_set(&sess->refcnt, 1);
++	atomic_set(&sess->refcnt, 2);
+ 
+ 	ret = __init_smb2_session(sess);
+ 	if (ret)
+diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
+index c1c4b20bd5c6cf..f21348381d5984 100644
+--- a/fs/smb/server/mgmt/user_session.h
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -87,6 +87,8 @@ void ksmbd_session_destroy(struct ksmbd_session *sess);
+ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id);
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ 					   unsigned long long id);
++bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn,
++				     unsigned long long id);
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+ 			   struct ksmbd_session *sess);
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn);
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 592fe665973a87..deacf78b4400cc 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -724,8 +724,8 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
+ 	work->conn = conn;
+ 	work->sess = opinfo->sess;
+ 
++	ksmbd_conn_r_count_inc(conn);
+ 	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+-		ksmbd_conn_r_count_inc(conn);
+ 		INIT_WORK(&work->work, __smb2_oplock_break_noti);
+ 		ksmbd_queue_work(work);
+ 
+@@ -833,8 +833,8 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
+ 	work->conn = conn;
+ 	work->sess = opinfo->sess;
+ 
++	ksmbd_conn_r_count_inc(conn);
+ 	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+-		ksmbd_conn_r_count_inc(conn);
+ 		INIT_WORK(&work->work, __smb2_lease_break_noti);
+ 		ksmbd_queue_work(work);
+ 		wait_for_break_ack(opinfo);
+@@ -1505,6 +1505,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ 	if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
+ 		struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+ 
++		if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
++		    sizeof(struct create_lease_v2) - 4)
++			return NULL;
++
+ 		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+ 		lreq->req_state = lc->lcontext.LeaseState;
+ 		lreq->flags = lc->lcontext.LeaseFlags;
+@@ -1517,6 +1521,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ 	} else {
+ 		struct create_lease *lc = (struct create_lease *)cc;
+ 
++		if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
++		    sizeof(struct create_lease))
++			return NULL;
++
+ 		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+ 		lreq->req_state = lc->lcontext.LeaseState;
+ 		lreq->flags = lc->lcontext.LeaseFlags;
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 8464261d763876..7fea86edc71763 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1247,7 +1247,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 	}
+ 
+ 	conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+-	ksmbd_conn_set_need_negotiate(conn);
++	ksmbd_conn_set_need_setup(conn);
+ 
+ err_out:
+ 	if (rc)
+@@ -1268,6 +1268,9 @@ static int alloc_preauth_hash(struct ksmbd_session *sess,
+ 	if (sess->Preauth_HashValue)
+ 		return 0;
+ 
++	if (!conn->preauth_info)
++		return -ENOMEM;
++
+ 	sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
+ 					  PREAUTH_HASHVALUE_SIZE, GFP_KERNEL);
+ 	if (!sess->Preauth_HashValue)
+@@ -1671,6 +1674,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "Received request for session setup\n");
+ 
++	if (!ksmbd_conn_need_setup(conn) && !ksmbd_conn_good(conn)) {
++		work->send_no_response = 1;
++		return rc;
++	}
++
+ 	WORK_BUFFERS(work, req, rsp);
+ 
+ 	rsp->StructureSize = cpu_to_le16(9);
+@@ -1704,44 +1712,38 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 
+ 		if (conn->dialect != sess->dialect) {
+ 			rc = -EINVAL;
+-			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+ 
+ 		if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) {
+ 			rc = -EINVAL;
+-			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+ 
+ 		if (strncmp(conn->ClientGUID, sess->ClientGUID,
+ 			    SMB2_CLIENT_GUID_SIZE)) {
+ 			rc = -ENOENT;
+-			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+ 
+ 		if (sess->state == SMB2_SESSION_IN_PROGRESS) {
+ 			rc = -EACCES;
+-			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+ 
+ 		if (sess->state == SMB2_SESSION_EXPIRED) {
+ 			rc = -EFAULT;
+-			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+-		ksmbd_user_session_put(sess);
+ 
+ 		if (ksmbd_conn_need_reconnect(conn)) {
+ 			rc = -EFAULT;
++			ksmbd_user_session_put(sess);
+ 			sess = NULL;
+ 			goto out_err;
+ 		}
+ 
+-		sess = ksmbd_session_lookup(conn, sess_id);
+-		if (!sess) {
++		if (is_ksmbd_session_in_connection(conn, sess_id)) {
+ 			rc = -EACCES;
+ 			goto out_err;
+ 		}
+@@ -1907,10 +1909,12 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 
+ 			sess->last_active = jiffies;
+ 			sess->state = SMB2_SESSION_EXPIRED;
++			ksmbd_user_session_put(sess);
++			work->sess = NULL;
+ 			if (try_delay) {
+ 				ksmbd_conn_set_need_reconnect(conn);
+ 				ssleep(5);
+-				ksmbd_conn_set_need_negotiate(conn);
++				ksmbd_conn_set_need_setup(conn);
+ 			}
+ 		}
+ 		smb2_set_err_rsp(work);
+@@ -2234,14 +2238,15 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ 		return -ENOENT;
+ 	}
+ 
+-	ksmbd_destroy_file_table(&sess->file_table);
+ 	down_write(&conn->session_lock);
+ 	sess->state = SMB2_SESSION_EXPIRED;
+ 	up_write(&conn->session_lock);
+ 
+-	ksmbd_free_user(sess->user);
+-	sess->user = NULL;
+-	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
++	if (sess->user) {
++		ksmbd_free_user(sess->user);
++		sess->user = NULL;
++	}
++	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
+ 
+ 	rsp->StructureSize = cpu_to_le16(4);
+ 	err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
+@@ -2703,6 +2708,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
+ 				goto out;
+ 			}
+ 
++			if (le16_to_cpu(context->DataOffset) +
++				le32_to_cpu(context->DataLength) <
++			    sizeof(struct create_durable_reconn_v2_req)) {
++				err = -EINVAL;
++				goto out;
++			}
++
+ 			recon_v2 = (struct create_durable_reconn_v2_req *)context;
+ 			persistent_id = recon_v2->Fid.PersistentFileId;
+ 			dh_info->fp = ksmbd_lookup_durable_fd(persistent_id);
+@@ -2736,6 +2748,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
+ 				goto out;
+ 			}
+ 
++			if (le16_to_cpu(context->DataOffset) +
++				le32_to_cpu(context->DataLength) <
++			    sizeof(struct create_durable_reconn_req)) {
++				err = -EINVAL;
++				goto out;
++			}
++
+ 			recon = (struct create_durable_reconn_req *)context;
+ 			persistent_id = recon->Data.Fid.PersistentFileId;
+ 			dh_info->fp = ksmbd_lookup_durable_fd(persistent_id);
+@@ -2761,6 +2780,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
+ 				goto out;
+ 			}
+ 
++			if (le16_to_cpu(context->DataOffset) +
++				le32_to_cpu(context->DataLength) <
++			    sizeof(struct create_durable_req_v2)) {
++				err = -EINVAL;
++				goto out;
++			}
++
+ 			durable_v2_blob =
+ 				(struct create_durable_req_v2 *)context;
+ 			ksmbd_debug(SMB, "Request for durable v2 open\n");
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index 109036e2227ca1..376ae68144afa0 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -270,6 +270,11 @@ static int sid_to_id(struct mnt_idmap *idmap,
+ 		return -EIO;
+ 	}
+ 
++	if (psid->num_subauth == 0) {
++		pr_err("%s: zero subauthorities!\n", __func__);
++		return -EIO;
++	}
++
+ 	if (sidtype == SIDOWNER) {
+ 		kuid_t uid;
+ 		uid_t id;
+@@ -333,7 +338,7 @@ void posix_state_to_acl(struct posix_acl_state *state,
+ 	pace->e_perm = state->other.allow;
+ }
+ 
+-int init_acl_state(struct posix_acl_state *state, int cnt)
++int init_acl_state(struct posix_acl_state *state, u16 cnt)
+ {
+ 	int alloc;
+ 
+@@ -368,7 +373,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ 		       struct smb_fattr *fattr)
+ {
+ 	int i, ret;
+-	int num_aces = 0;
++	u16 num_aces = 0;
+ 	unsigned int acl_size;
+ 	char *acl_base;
+ 	struct smb_ace **ppace;
+@@ -389,12 +394,12 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ 
+ 	ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n",
+ 		    le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+-		    le32_to_cpu(pdacl->num_aces));
++		    le16_to_cpu(pdacl->num_aces));
+ 
+ 	acl_base = (char *)pdacl;
+ 	acl_size = sizeof(struct smb_acl);
+ 
+-	num_aces = le32_to_cpu(pdacl->num_aces);
++	num_aces = le16_to_cpu(pdacl->num_aces);
+ 	if (num_aces <= 0)
+ 		return;
+ 
+@@ -583,7 +588,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ 
+ static void set_posix_acl_entries_dacl(struct mnt_idmap *idmap,
+ 				       struct smb_ace *pndace,
+-				       struct smb_fattr *fattr, u32 *num_aces,
++				       struct smb_fattr *fattr, u16 *num_aces,
+ 				       u16 *size, u32 nt_aces_num)
+ {
+ 	struct posix_acl_entry *pace;
+@@ -704,7 +709,7 @@ static void set_ntacl_dacl(struct mnt_idmap *idmap,
+ 			   struct smb_fattr *fattr)
+ {
+ 	struct smb_ace *ntace, *pndace;
+-	int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0;
++	u16 nt_num_aces = le16_to_cpu(nt_dacl->num_aces), num_aces = 0;
+ 	unsigned short size = 0;
+ 	int i;
+ 
+@@ -731,7 +736,7 @@ static void set_ntacl_dacl(struct mnt_idmap *idmap,
+ 
+ 	set_posix_acl_entries_dacl(idmap, pndace, fattr,
+ 				   &num_aces, &size, nt_num_aces);
+-	pndacl->num_aces = cpu_to_le32(num_aces);
++	pndacl->num_aces = cpu_to_le16(num_aces);
+ 	pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
+ }
+ 
+@@ -739,7 +744,7 @@ static void set_mode_dacl(struct mnt_idmap *idmap,
+ 			  struct smb_acl *pndacl, struct smb_fattr *fattr)
+ {
+ 	struct smb_ace *pace, *pndace;
+-	u32 num_aces = 0;
++	u16 num_aces = 0;
+ 	u16 size = 0, ace_size = 0;
+ 	uid_t uid;
+ 	const struct smb_sid *sid;
+@@ -795,7 +800,7 @@ static void set_mode_dacl(struct mnt_idmap *idmap,
+ 				 fattr->cf_mode, 0007);
+ 
+ out:
+-	pndacl->num_aces = cpu_to_le32(num_aces);
++	pndacl->num_aces = cpu_to_le16(num_aces);
+ 	pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
+ }
+ 
+@@ -1025,8 +1030,11 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ 	struct smb_sid owner_sid, group_sid;
+ 	struct dentry *parent = path->dentry->d_parent;
+ 	struct mnt_idmap *idmap = mnt_idmap(path->mnt);
+-	int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size;
+-	int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
++	int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size;
++	int rc = 0, pntsd_type, pntsd_size, acl_len, aces_size;
++	unsigned int dacloffset;
++	size_t dacl_struct_end;
++	u16 num_aces, ace_cnt = 0;
+ 	char *aces_base;
+ 	bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
+ 
+@@ -1034,15 +1042,18 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ 					    parent, &parent_pntsd);
+ 	if (pntsd_size <= 0)
+ 		return -ENOENT;
++
+ 	dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
+-	if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) {
++	if (!dacloffset ||
++	    check_add_overflow(dacloffset, sizeof(struct smb_acl), &dacl_struct_end) ||
++	    dacl_struct_end > (size_t)pntsd_size) {
+ 		rc = -EINVAL;
+ 		goto free_parent_pntsd;
+ 	}
+ 
+ 	parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset);
+ 	acl_len = pntsd_size - dacloffset;
+-	num_aces = le32_to_cpu(parent_pdacl->num_aces);
++	num_aces = le16_to_cpu(parent_pdacl->num_aces);
+ 	pntsd_type = le16_to_cpu(parent_pntsd->type);
+ 	pdacl_size = le16_to_cpu(parent_pdacl->size);
+ 
+@@ -1201,7 +1212,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ 			pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
+ 			pdacl->revision = cpu_to_le16(2);
+ 			pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size);
+-			pdacl->num_aces = cpu_to_le32(ace_cnt);
++			pdacl->num_aces = cpu_to_le16(ace_cnt);
+ 			pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ 			memcpy(pace, aces_base, nt_size);
+ 			pntsd_size += sizeof(struct smb_acl) + nt_size;
+@@ -1238,7 +1249,9 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ 	struct smb_ntsd *pntsd = NULL;
+ 	struct smb_acl *pdacl;
+ 	struct posix_acl *posix_acls;
+-	int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset;
++	int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size;
++	unsigned int dacl_offset;
++	size_t dacl_struct_end;
+ 	struct smb_sid sid;
+ 	int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
+ 	struct smb_ace *ace;
+@@ -1257,7 +1270,8 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ 
+ 	dacl_offset = le32_to_cpu(pntsd->dacloffset);
+ 	if (!dacl_offset ||
+-	    (dacl_offset + sizeof(struct smb_acl) > pntsd_size))
++	    check_add_overflow(dacl_offset, sizeof(struct smb_acl), &dacl_struct_end) ||
++	    dacl_struct_end > (size_t)pntsd_size)
+ 		goto err_out;
+ 
+ 	pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
+@@ -1282,7 +1296,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ 
+ 		ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ 		aces_size = acl_size - sizeof(struct smb_acl);
+-		for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
++		for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) {
+ 			if (offsetof(struct smb_ace, access_req) > aces_size)
+ 				break;
+ 			ace_size = le16_to_cpu(ace->size);
+@@ -1303,7 +1317,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ 
+ 	ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ 	aces_size = acl_size - sizeof(struct smb_acl);
+-	for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
++	for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) {
+ 		if (offsetof(struct smb_ace, access_req) > aces_size)
+ 			break;
+ 		ace_size = le16_to_cpu(ace->size);
+diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
+index 24ce576fc2924b..355adaee39b871 100644
+--- a/fs/smb/server/smbacl.h
++++ b/fs/smb/server/smbacl.h
+@@ -86,7 +86,7 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
+ int build_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
+ 		   struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info,
+ 		   __u32 *secdesclen, struct smb_fattr *fattr);
+-int init_acl_state(struct posix_acl_state *state, int cnt);
++int init_acl_state(struct posix_acl_state *state, u16 cnt);
+ void free_acl_state(struct posix_acl_state *state);
+ void posix_state_to_acl(struct posix_acl_state *state,
+ 			struct posix_acl_entry *pace);
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index a80ba457a858f3..6398a6b50bd1b7 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -222,6 +222,13 @@ struct drm_dp_mst_branch {
+ 	 */
+ 	struct list_head destroy_next;
+ 
++	/**
++	 * @rad: Relative Address of the MST branch.
++	 * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0,
++	 * unset and unused. For MST branches connected after mst_primary,
++	 * in each element of rad[] the nibbles are ordered by the most
++	 * signifcant 4 bits first and the least significant 4 bits second.
++	 */
+ 	u8 rad[8];
+ 	u8 lct;
+ 	int num_ports;
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index a32eebcd23da47..38b2af336e4a01 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -324,6 +324,7 @@ struct cgroup_base_stat {
+ #ifdef CONFIG_SCHED_CORE
+ 	u64 forceidle_sum;
+ #endif
++	u64 ntime;
+ };
+ 
+ /*
+diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
+index c50b5670c4a52f..197916ee91a4bd 100644
+--- a/include/linux/context_tracking_irq.h
++++ b/include/linux/context_tracking_irq.h
+@@ -10,12 +10,12 @@ void ct_irq_exit_irqson(void);
+ void ct_nmi_enter(void);
+ void ct_nmi_exit(void);
+ #else
+-static inline void ct_irq_enter(void) { }
+-static inline void ct_irq_exit(void) { }
++static __always_inline void ct_irq_enter(void) { }
++static __always_inline void ct_irq_exit(void) { }
+ static inline void ct_irq_enter_irqson(void) { }
+ static inline void ct_irq_exit_irqson(void) { }
+-static inline void ct_nmi_enter(void) { }
+-static inline void ct_nmi_exit(void) { }
++static __always_inline void ct_nmi_enter(void) { }
++static __always_inline void ct_nmi_exit(void) { }
+ #endif
+ 
+ #endif
+diff --git a/include/linux/coresight.h b/include/linux/coresight.h
+index c1334259427850..f106b102511189 100644
+--- a/include/linux/coresight.h
++++ b/include/linux/coresight.h
+@@ -639,6 +639,10 @@ extern int coresight_enable_sysfs(struct coresight_device *csdev);
+ extern void coresight_disable_sysfs(struct coresight_device *csdev);
+ extern int coresight_timeout(struct csdev_access *csa, u32 offset,
+ 			     int position, int value);
++typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int);
++extern int coresight_timeout_action(struct csdev_access *csa, u32 offset,
++					int position, int value,
++					coresight_timeout_cb_t cb);
+ 
+ extern int coresight_claim_device(struct coresight_device *csdev);
+ extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
+diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
+index 0d79070c5a70f2..487d4bd9b0c999 100644
+--- a/include/linux/fwnode.h
++++ b/include/linux/fwnode.h
+@@ -91,7 +91,7 @@ struct fwnode_endpoint {
+ #define SWNODE_GRAPH_PORT_NAME_FMT		"port@%u"
+ #define SWNODE_GRAPH_ENDPOINT_NAME_FMT		"endpoint@%u"
+ 
+-#define NR_FWNODE_REFERENCE_ARGS	8
++#define NR_FWNODE_REFERENCE_ARGS	16
+ 
+ /**
+  * struct fwnode_reference_args - Fwnode reference with additional arguments
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 457151f9f263d9..b378fbf885ce37 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -448,7 +448,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ static inline void disable_irq_nosync_lockdep(unsigned int irq)
+ {
+ 	disable_irq_nosync(irq);
+-#ifdef CONFIG_LOCKDEP
++#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
+ 	local_irq_disable();
+ #endif
+ }
+@@ -456,7 +456,7 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq)
+ static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
+ {
+ 	disable_irq_nosync(irq);
+-#ifdef CONFIG_LOCKDEP
++#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
+ 	local_irq_save(*flags);
+ #endif
+ }
+@@ -471,7 +471,7 @@ static inline void disable_irq_lockdep(unsigned int irq)
+ 
+ static inline void enable_irq_lockdep(unsigned int irq)
+ {
+-#ifdef CONFIG_LOCKDEP
++#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
+ 	local_irq_enable();
+ #endif
+ 	enable_irq(irq);
+@@ -479,7 +479,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
+ 
+ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
+ {
+-#ifdef CONFIG_LOCKDEP
++#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
+ 	local_irq_restore(*flags);
+ #endif
+ 	enable_irq(irq);
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index b804346a974195..81ab18658d72dc 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -251,6 +251,10 @@ struct nfs_server {
+ 	struct list_head	ss_copies;
+ 	struct list_head	ss_src_copies;
+ 
++	unsigned long		delegation_flags;
++#define NFS4SERV_DELEGRETURN		(1)
++#define NFS4SERV_DELEGATION_EXPIRED	(2)
++#define NFS4SERV_DELEGRETURN_DELAYED	(3)
+ 	unsigned long		delegation_gen;
+ 	unsigned long		mig_gen;
+ 	unsigned long		mig_status;
+diff --git a/include/linux/nmi.h b/include/linux/nmi.h
+index a8dfb38c9bb6f1..e78fa535f61dd8 100644
+--- a/include/linux/nmi.h
++++ b/include/linux/nmi.h
+@@ -17,7 +17,6 @@
+ void lockup_detector_init(void);
+ void lockup_detector_retry_init(void);
+ void lockup_detector_soft_poweroff(void);
+-void lockup_detector_cleanup(void);
+ 
+ extern int watchdog_user_enabled;
+ extern int watchdog_thresh;
+@@ -37,7 +36,6 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
+ static inline void lockup_detector_init(void) { }
+ static inline void lockup_detector_retry_init(void) { }
+ static inline void lockup_detector_soft_poweroff(void) { }
+-static inline void lockup_detector_cleanup(void) { }
+ #endif /* !CONFIG_LOCKUP_DETECTOR */
+ 
+ #ifdef CONFIG_SOFTLOCKUP_DETECTOR
+@@ -104,12 +102,10 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
+ #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
+ extern void hardlockup_detector_perf_stop(void);
+ extern void hardlockup_detector_perf_restart(void);
+-extern void hardlockup_detector_perf_cleanup(void);
+ extern void hardlockup_config_perf_event(const char *str);
+ #else
+ static inline void hardlockup_detector_perf_stop(void) { }
+ static inline void hardlockup_detector_perf_restart(void) { }
+-static inline void hardlockup_detector_perf_cleanup(void) { }
+ static inline void hardlockup_config_perf_event(const char *str) { }
+ #endif
+ 
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index e8b2ac6bd2ae3b..8df030ebd86286 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1518,14 +1518,25 @@ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ }
+ 
+ /*
+- * track_pfn_copy is called when vma that is covering the pfnmap gets
+- * copied through copy_page_range().
++ * track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page
++ * tables copied during copy_page_range(). On success, stores the pfn to be
++ * passed to untrack_pfn_copy().
+  */
+-static inline int track_pfn_copy(struct vm_area_struct *vma)
++static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
++		struct vm_area_struct *src_vma, unsigned long *pfn)
+ {
+ 	return 0;
+ }
+ 
++/*
++ * untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during
++ * copy_page_range(), but after track_pfn_copy() was already called.
++ */
++static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma,
++		unsigned long pfn)
++{
++}
++
+ /*
+  * untrack_pfn is called while unmapping a pfnmap for a region.
+  * untrack can be called for a specific region indicated by pfn and size or
+@@ -1538,8 +1549,10 @@ static inline void untrack_pfn(struct vm_area_struct *vma,
+ }
+ 
+ /*
+- * untrack_pfn_clear is called while mremapping a pfnmap for a new region
+- * or fails to copy pgtable during duplicate vm area.
++ * untrack_pfn_clear is called in the following cases on a VM_PFNMAP VMA:
++ *
++ * 1) During mremap() on the src VMA after the page tables were moved.
++ * 2) During fork() on the dst VMA, immediately after duplicating the src VMA.
+  */
+ static inline void untrack_pfn_clear(struct vm_area_struct *vma)
+ {
+@@ -1550,7 +1563,10 @@ extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ 			   unsigned long size);
+ extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ 			     pfn_t pfn);
+-extern int track_pfn_copy(struct vm_area_struct *vma);
++extern int track_pfn_copy(struct vm_area_struct *dst_vma,
++		struct vm_area_struct *src_vma, unsigned long *pfn);
++extern void untrack_pfn_copy(struct vm_area_struct *dst_vma,
++		unsigned long pfn);
+ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ 			unsigned long size, bool mm_wr_locked);
+ extern void untrack_pfn_clear(struct vm_area_struct *vma);
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index d39dc863f612fe..d0b29cd1fd204e 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -66,6 +66,7 @@ static inline bool queue_pm_work(struct work_struct *work)
+ 
+ extern int pm_generic_runtime_suspend(struct device *dev);
+ extern int pm_generic_runtime_resume(struct device *dev);
++extern bool pm_runtime_need_not_resume(struct device *dev);
+ extern int pm_runtime_force_suspend(struct device *dev);
+ extern int pm_runtime_force_resume(struct device *dev);
+ 
+@@ -241,6 +242,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; }
+ 
+ static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
+ static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
++static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; }
+ static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
+ static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
+ 
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 48e5c03df1dd83..bd69ddc102fbc5 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -138,7 +138,7 @@ static inline void rcu_sysrq_end(void) { }
+ #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
+ void rcu_irq_work_resched(void);
+ #else
+-static inline void rcu_irq_work_resched(void) { }
++static __always_inline void rcu_irq_work_resched(void) { }
+ #endif
+ 
+ #ifdef CONFIG_RCU_NOCB_CPU
+diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
+index fb1e295e7e63e2..166b19af956f8b 100644
+--- a/include/linux/sched/smt.h
++++ b/include/linux/sched/smt.h
+@@ -12,7 +12,7 @@ static __always_inline bool sched_smt_active(void)
+ 	return static_branch_likely(&sched_smt_present);
+ }
+ #else
+-static inline bool sched_smt_active(void) { return false; }
++static __always_inline bool sched_smt_active(void) { return false; }
+ #endif
+ 
+ void arch_smt_update(void);
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 25ea8fe2313e6d..0da2c257e32cf9 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -83,8 +83,6 @@ struct thermal_trip {
+ #define THERMAL_TRIP_PRIV_TO_INT(_val_)	(uintptr_t)(_val_)
+ #define THERMAL_INT_TO_TRIP_PRIV(_val_)	(void *)(uintptr_t)(_val_)
+ 
+-struct thermal_zone_device;
+-
+ struct cooling_spec {
+ 	unsigned long upper;	/* Highest cooling state  */
+ 	unsigned long lower;	/* Lowest cooling state  */
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 77769ff5054441..fcf5a64d5cfe2d 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -689,6 +689,20 @@ struct trace_event_file {
+ 	atomic_t		tm_ref;	/* trigger-mode reference counter */
+ };
+ 
++#ifdef CONFIG_HIST_TRIGGERS
++extern struct irq_work hist_poll_work;
++extern wait_queue_head_t hist_poll_wq;
++
++static inline void hist_poll_wakeup(void)
++{
++	if (wq_has_sleeper(&hist_poll_wq))
++		irq_work_queue(&hist_poll_work);
++}
++
++#define hist_poll_wait(file, wait)	\
++	poll_wait(file, &hist_poll_wq, wait)
++#endif
++
+ #define __TRACE_EVENT_FLAGS(name, value)				\
+ 	static int __init trace_init_flags_##name(void)			\
+ 	{								\
+diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
+index 2b294bf1881fef..d0cb0e02cd6ae0 100644
+--- a/include/linux/uprobes.h
++++ b/include/linux/uprobes.h
+@@ -28,6 +28,8 @@ struct page;
+ 
+ #define MAX_URETPROBE_DEPTH		64
+ 
++#define UPROBE_NO_TRAMPOLINE_VADDR	(~0UL)
++
+ struct uprobe_consumer {
+ 	/*
+ 	 * handler() can return UPROBE_HANDLER_REMOVE to signal the need to
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 67551133b5228e..c2b5de75daf252 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2737,6 +2737,7 @@ struct ib_device {
+ 	 * It is a NULL terminated array.
+ 	 */
+ 	const struct attribute_group	*groups[4];
++	u8				hw_stats_attr_index;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 
+diff --git a/init/Kconfig b/init/Kconfig
+index 293c565c62168e..243d0087f94458 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -129,6 +129,11 @@ config CC_HAS_COUNTED_BY
+ 	# https://github.com/llvm/llvm-project/pull/112636
+ 	depends on !(CC_IS_CLANG && CLANG_VERSION < 190103)
+ 
++config LD_CAN_USE_KEEP_IN_OVERLAY
++	# ld.lld prior to 21.0.0 did not support KEEP within an overlay description
++	# https://github.com/llvm/llvm-project/pull/130661
++	def_bool LD_IS_BFD || LLD_VERSION >= 210000
++
+ config PAHOLE_VERSION
+ 	int
+ 	default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE))
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 2b9c8c168a0ba3..a60a6a2ce0d7f4 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2290,17 +2290,18 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
+ 	insn->code = BPF_JMP | BPF_CALL_ARGS;
+ }
+ #endif
+-#else
++#endif
++
+ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+ 					 const struct bpf_insn *insn)
+ {
+ 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+-	 * is not working properly, so warn about it!
++	 * is not working properly, or interpreter is being used when
++	 * prog->jit_requested is not 0, so warn about it!
+ 	 */
+ 	WARN_ON_ONCE(1);
+ 	return 0;
+ }
+-#endif
+ 
+ bool bpf_prog_map_compatible(struct bpf_map *map,
+ 			     const struct bpf_prog *fp)
+@@ -2380,8 +2381,18 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
+ {
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
++	u32 idx = (round_up(stack_depth, 32) / 32) - 1;
+ 
+-	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
++	/* may_goto may cause stack size > 512, leading to idx out-of-bounds.
++	 * But for non-JITed programs, we don't need bpf_func, so no bounds
++	 * check needed.
++	 */
++	if (!fp->jit_requested &&
++	    !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
++		fp->bpf_func = interpreters[idx];
++	} else {
++		fp->bpf_func = __bpf_prog_ret0_warn;
++	}
+ #else
+ 	fp->bpf_func = __bpf_prog_ret0_warn;
+ #endif
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a0cab0d0252fab..9000806ee3bae8 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -21276,6 +21276,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
+ 		if (subprogs[cur_subprog + 1].start == i + delta + 1) {
+ 			subprogs[cur_subprog].stack_depth += stack_depth_extra;
+ 			subprogs[cur_subprog].stack_extra = stack_depth_extra;
++
++			stack_depth = subprogs[cur_subprog].stack_depth;
++			if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) {
++				verbose(env, "stack size %d(extra %d) is too large\n",
++					stack_depth, stack_depth_extra);
++				return -EINVAL;
++			}
+ 			cur_subprog++;
+ 			stack_depth = subprogs[cur_subprog].stack_depth;
+ 			stack_depth_extra = 0;
+diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
+index ce295b73c0a366..3e01781aeb7bd0 100644
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -444,6 +444,7 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
+ #ifdef CONFIG_SCHED_CORE
+ 	dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
+ #endif
++	dst_bstat->ntime += src_bstat->ntime;
+ }
+ 
+ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
+@@ -455,6 +456,7 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
+ #ifdef CONFIG_SCHED_CORE
+ 	dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
+ #endif
++	dst_bstat->ntime -= src_bstat->ntime;
+ }
+ 
+ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
+@@ -534,8 +536,10 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
+ 	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
+ 
+ 	switch (index) {
+-	case CPUTIME_USER:
+ 	case CPUTIME_NICE:
++		rstatc->bstat.ntime += delta_exec;
++		fallthrough;
++	case CPUTIME_USER:
+ 		rstatc->bstat.cputime.utime += delta_exec;
+ 		break;
+ 	case CPUTIME_SYSTEM:
+@@ -590,6 +594,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
+ #ifdef CONFIG_SCHED_CORE
+ 		bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
+ #endif
++		bstat->ntime += cpustat[CPUTIME_NICE];
+ 	}
+ }
+ 
+@@ -607,32 +612,33 @@ static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat
+ void cgroup_base_stat_cputime_show(struct seq_file *seq)
+ {
+ 	struct cgroup *cgrp = seq_css(seq)->cgroup;
+-	u64 usage, utime, stime;
++	struct cgroup_base_stat bstat;
+ 
+ 	if (cgroup_parent(cgrp)) {
+ 		cgroup_rstat_flush_hold(cgrp);
+-		usage = cgrp->bstat.cputime.sum_exec_runtime;
++		bstat = cgrp->bstat;
+ 		cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
+-			       &utime, &stime);
++			       &bstat.cputime.utime, &bstat.cputime.stime);
+ 		cgroup_rstat_flush_release(cgrp);
+ 	} else {
+-		/* cgrp->bstat of root is not actually used, reuse it */
+-		root_cgroup_cputime(&cgrp->bstat);
+-		usage = cgrp->bstat.cputime.sum_exec_runtime;
+-		utime = cgrp->bstat.cputime.utime;
+-		stime = cgrp->bstat.cputime.stime;
++		root_cgroup_cputime(&bstat);
+ 	}
+ 
+-	do_div(usage, NSEC_PER_USEC);
+-	do_div(utime, NSEC_PER_USEC);
+-	do_div(stime, NSEC_PER_USEC);
++	do_div(bstat.cputime.sum_exec_runtime, NSEC_PER_USEC);
++	do_div(bstat.cputime.utime, NSEC_PER_USEC);
++	do_div(bstat.cputime.stime, NSEC_PER_USEC);
++	do_div(bstat.ntime, NSEC_PER_USEC);
+ 
+ 	seq_printf(seq, "usage_usec %llu\n"
+-		   "user_usec %llu\n"
+-		   "system_usec %llu\n",
+-		   usage, utime, stime);
+-
+-	cgroup_force_idle_show(seq, &cgrp->bstat);
++			"user_usec %llu\n"
++			"system_usec %llu\n"
++			"nice_usec %llu\n",
++			bstat.cputime.sum_exec_runtime,
++			bstat.cputime.utime,
++			bstat.cputime.stime,
++			bstat.ntime);
++
++	cgroup_force_idle_show(seq, &bstat);
+ }
+ 
+ /* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 9ee6c9145b1df9..cf02a629f99023 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1452,11 +1452,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ 
+ out:
+ 	cpus_write_unlock();
+-	/*
+-	 * Do post unplug cleanup. This is still protected against
+-	 * concurrent CPU hotplug via cpu_add_remove_lock.
+-	 */
+-	lockup_detector_cleanup();
+ 	arch_smt_update();
+ 	return ret;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 5fff74c736063c..b5ccf52bb71baa 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2407,6 +2407,7 @@ ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event)
+ #define DETACH_GROUP	0x01UL
+ #define DETACH_CHILD	0x02UL
+ #define DETACH_DEAD	0x04UL
++#define DETACH_EXIT	0x08UL
+ 
+ /*
+  * Cross CPU call to remove a performance event
+@@ -2421,6 +2422,7 @@ __perf_remove_from_context(struct perf_event *event,
+ 			   void *info)
+ {
+ 	struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
++	enum perf_event_state state = PERF_EVENT_STATE_OFF;
+ 	unsigned long flags = (unsigned long)info;
+ 
+ 	ctx_time_update(cpuctx, ctx);
+@@ -2429,16 +2431,19 @@ __perf_remove_from_context(struct perf_event *event,
+ 	 * Ensure event_sched_out() switches to OFF, at the very least
+ 	 * this avoids raising perf_pending_task() at this time.
+ 	 */
+-	if (flags & DETACH_DEAD)
++	if (flags & DETACH_EXIT)
++		state = PERF_EVENT_STATE_EXIT;
++	if (flags & DETACH_DEAD) {
+ 		event->pending_disable = 1;
++		state = PERF_EVENT_STATE_DEAD;
++	}
+ 	event_sched_out(event, ctx);
++	perf_event_set_state(event, min(event->state, state));
+ 	if (flags & DETACH_GROUP)
+ 		perf_group_detach(event);
+ 	if (flags & DETACH_CHILD)
+ 		perf_child_detach(event);
+ 	list_del_event(event, ctx);
+-	if (flags & DETACH_DEAD)
+-		event->state = PERF_EVENT_STATE_DEAD;
+ 
+ 	if (!pmu_ctx->nr_events) {
+ 		pmu_ctx->rotate_necessary = 0;
+@@ -11737,6 +11742,21 @@ static int pmu_dev_alloc(struct pmu *pmu)
+ static struct lock_class_key cpuctx_mutex;
+ static struct lock_class_key cpuctx_lock;
+ 
++static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new)
++{
++	void *tmp, *val = idr_find(idr, id);
++
++	if (val != old)
++		return false;
++
++	tmp = idr_replace(idr, new, id);
++	if (IS_ERR(tmp))
++		return false;
++
++	WARN_ON_ONCE(tmp != val);
++	return true;
++}
++
+ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
+ {
+ 	int cpu, ret, max = PERF_TYPE_MAX;
+@@ -11763,7 +11783,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
+ 	if (type >= 0)
+ 		max = type;
+ 
+-	ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL);
++	ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL);
+ 	if (ret < 0)
+ 		goto free_pdc;
+ 
+@@ -11771,6 +11791,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
+ 
+ 	type = ret;
+ 	pmu->type = type;
++	atomic_set(&pmu->exclusive_cnt, 0);
+ 
+ 	if (pmu_bus_running && !pmu->dev) {
+ 		ret = pmu_dev_alloc(pmu);
+@@ -11819,14 +11840,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
+ 	if (!pmu->event_idx)
+ 		pmu->event_idx = perf_event_idx_default;
+ 
++	/*
++	 * Now that the PMU is complete, make it visible to perf_try_init_event().
++	 */
++	if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu))
++		goto free_context;
+ 	list_add_rcu(&pmu->entry, &pmus);
+-	atomic_set(&pmu->exclusive_cnt, 0);
++
+ 	ret = 0;
+ unlock:
+ 	mutex_unlock(&pmus_lock);
+ 
+ 	return ret;
+ 
++free_context:
++	free_percpu(pmu->cpu_pmu_context);
++
+ free_dev:
+ 	if (pmu->dev && pmu->dev != PMU_NULL_DEV) {
+ 		device_del(pmu->dev);
+@@ -13319,12 +13348,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
+ 		mutex_lock(&parent_event->child_mutex);
+ 	}
+ 
+-	perf_remove_from_context(event, detach_flags);
+-
+-	raw_spin_lock_irq(&ctx->lock);
+-	if (event->state > PERF_EVENT_STATE_EXIT)
+-		perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
+-	raw_spin_unlock_irq(&ctx->lock);
++	perf_remove_from_context(event, detach_flags | DETACH_EXIT);
+ 
+ 	/*
+ 	 * Child events can be freed.
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 4f46f688d0d490..bbfa22c0a1597a 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -19,7 +19,7 @@
+ 
+ static void perf_output_wakeup(struct perf_output_handle *handle)
+ {
+-	atomic_set(&handle->rb->poll, EPOLLIN);
++	atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM);
+ 
+ 	handle->event->pending_wakeup = 1;
+ 
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 4fdc08ca0f3cbd..e60f5e71e35df7 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -167,6 +167,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
+ 	DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
+ 	int err;
+ 	struct mmu_notifier_range range;
++	pte_t pte;
+ 
+ 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
+ 				addr + PAGE_SIZE);
+@@ -186,6 +187,16 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
+ 	if (!page_vma_mapped_walk(&pvmw))
+ 		goto unlock;
+ 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
++	pte = ptep_get(pvmw.pte);
++
++	/*
++	 * Handle PFN swap PTES, such as device-exclusive ones, that actually
++	 * map pages: simply trigger GUP again to fix it up.
++	 */
++	if (unlikely(!pte_present(pte))) {
++		page_vma_mapped_walk_done(&pvmw);
++		goto unlock;
++	}
+ 
+ 	if (new_page) {
+ 		folio_get(new_folio);
+@@ -200,7 +211,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
+ 		inc_mm_counter(mm, MM_ANONPAGES);
+ 	}
+ 
+-	flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
++	flush_cache_page(vma, addr, pte_pfn(pte));
+ 	ptep_clear_flush(vma, addr, pvmw.pte);
+ 	if (new_page)
+ 		set_pte_at(mm, addr, pvmw.pte,
+@@ -1887,8 +1898,8 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
+  */
+ unsigned long uprobe_get_trampoline_vaddr(void)
+ {
++	unsigned long trampoline_vaddr = UPROBE_NO_TRAMPOLINE_VADDR;
+ 	struct xol_area *area;
+-	unsigned long trampoline_vaddr = -1;
+ 
+ 	/* Pairs with xol_add_vma() smp_store_release() */
+ 	area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index e192bdbc9adebb..12decadff468f5 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -505,6 +505,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
+ 	vma_numab_state_init(new);
+ 	dup_anon_vma_name(orig, new);
+ 
++	/* track_pfn_copy() will later take care of copying internal state. */
++	if (unlikely(new->vm_flags & VM_PFNMAP))
++		untrack_pfn_clear(new);
++
+ 	return new;
+ }
+ 
+diff --git a/kernel/kexec_elf.c b/kernel/kexec_elf.c
+index d3689632e8b90f..3a5c25b2adc94d 100644
+--- a/kernel/kexec_elf.c
++++ b/kernel/kexec_elf.c
+@@ -390,7 +390,7 @@ int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
+ 			 struct kexec_buf *kbuf,
+ 			 unsigned long *lowest_load_addr)
+ {
+-	unsigned long lowest_addr = UINT_MAX;
++	unsigned long lowest_addr = ULONG_MAX;
+ 	int ret;
+ 	size_t i;
+ 
+diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
+index 34bfae72f29526..de9117c0e671e9 100644
+--- a/kernel/locking/semaphore.c
++++ b/kernel/locking/semaphore.c
+@@ -29,6 +29,7 @@
+ #include <linux/export.h>
+ #include <linux/sched.h>
+ #include <linux/sched/debug.h>
++#include <linux/sched/wake_q.h>
+ #include <linux/semaphore.h>
+ #include <linux/spinlock.h>
+ #include <linux/ftrace.h>
+@@ -38,7 +39,7 @@ static noinline void __down(struct semaphore *sem);
+ static noinline int __down_interruptible(struct semaphore *sem);
+ static noinline int __down_killable(struct semaphore *sem);
+ static noinline int __down_timeout(struct semaphore *sem, long timeout);
+-static noinline void __up(struct semaphore *sem);
++static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q);
+ 
+ /**
+  * down - acquire the semaphore
+@@ -183,13 +184,16 @@ EXPORT_SYMBOL(down_timeout);
+ void __sched up(struct semaphore *sem)
+ {
+ 	unsigned long flags;
++	DEFINE_WAKE_Q(wake_q);
+ 
+ 	raw_spin_lock_irqsave(&sem->lock, flags);
+ 	if (likely(list_empty(&sem->wait_list)))
+ 		sem->count++;
+ 	else
+-		__up(sem);
++		__up(sem, &wake_q);
+ 	raw_spin_unlock_irqrestore(&sem->lock, flags);
++	if (!wake_q_empty(&wake_q))
++		wake_up_q(&wake_q);
+ }
+ EXPORT_SYMBOL(up);
+ 
+@@ -269,11 +273,12 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
+ 	return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
+ }
+ 
+-static noinline void __sched __up(struct semaphore *sem)
++static noinline void __sched __up(struct semaphore *sem,
++				  struct wake_q_head *wake_q)
+ {
+ 	struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
+ 						struct semaphore_waiter, list);
+ 	list_del(&waiter->list);
+ 	waiter->up = true;
+-	wake_up_process(waiter->task);
++	wake_q_add(wake_q, waiter->task);
+ }
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index a17c23b53049cc..5e7ae404c8d2a4 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -3179,7 +3179,7 @@ int sched_dl_global_validate(void)
+ 	 * value smaller than the currently allocated bandwidth in
+ 	 * any of the root_domains.
+ 	 */
+-	for_each_possible_cpu(cpu) {
++	for_each_online_cpu(cpu) {
+ 		rcu_read_lock_sched();
+ 
+ 		if (dl_bw_visited(cpu, gen))
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 58ba14ed8fbcb9..ceb023629d48dd 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -885,6 +885,26 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
+ 	return __node_2_se(left);
+ }
+ 
++/*
++ * HACK, stash a copy of deadline at the point of pick in vlag,
++ * which isn't used until dequeue.
++ */
++static inline void set_protect_slice(struct sched_entity *se)
++{
++	se->vlag = se->deadline;
++}
++
++static inline bool protect_slice(struct sched_entity *se)
++{
++	return se->vlag == se->deadline;
++}
++
++static inline void cancel_protect_slice(struct sched_entity *se)
++{
++	if (protect_slice(se))
++		se->vlag = se->deadline + 1;
++}
++
+ /*
+  * Earliest Eligible Virtual Deadline First
+  *
+@@ -921,11 +941,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+ 	if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
+ 		curr = NULL;
+ 
+-	/*
+-	 * Once selected, run a task until it either becomes non-eligible or
+-	 * until it gets a new slice. See the HACK in set_next_entity().
+-	 */
+-	if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
++	if (sched_feat(RUN_TO_PARITY) && curr && protect_slice(curr))
+ 		return curr;
+ 
+ 	/* Pick the leftmost entity if it's eligible */
+@@ -5626,11 +5642,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ 		update_stats_wait_end_fair(cfs_rq, se);
+ 		__dequeue_entity(cfs_rq, se);
+ 		update_load_avg(cfs_rq, se, UPDATE_TG);
+-		/*
+-		 * HACK, stash a copy of deadline at the point of pick in vlag,
+-		 * which isn't used until dequeue.
+-		 */
+-		se->vlag = se->deadline;
++
++		set_protect_slice(se);
+ 	}
+ 
+ 	update_stats_curr_start(cfs_rq, se);
+@@ -7090,6 +7103,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 		update_cfs_group(se);
+ 
+ 		se->slice = slice;
++		if (se != cfs_rq->curr)
++			min_vruntime_cb_propagate(&se->run_node, NULL);
+ 		slice = cfs_rq_min_slice(cfs_rq);
+ 
+ 		cfs_rq->h_nr_running++;
+@@ -7219,6 +7234,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 		update_cfs_group(se);
+ 
+ 		se->slice = slice;
++		if (se != cfs_rq->curr)
++			min_vruntime_cb_propagate(&se->run_node, NULL);
+ 		slice = cfs_rq_min_slice(cfs_rq);
+ 
+ 		cfs_rq->h_nr_running -= h_nr_running;
+@@ -8882,8 +8899,15 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
+ 	 * Preempt an idle entity in favor of a non-idle entity (and don't preempt
+ 	 * in the inverse case).
+ 	 */
+-	if (cse_is_idle && !pse_is_idle)
++	if (cse_is_idle && !pse_is_idle) {
++		/*
++		 * When non-idle entity preempt an idle entity,
++		 * don't give idle entity slice protection.
++		 */
++		cancel_protect_slice(se);
+ 		goto preempt;
++	}
++
+ 	if (cse_is_idle != pse_is_idle)
+ 		return;
+ 
+@@ -8902,8 +8926,8 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
+ 	 * Note that even if @p does not turn out to be the most eligible
+ 	 * task at this moment, current's slice protection will be lost.
+ 	 */
+-	if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline)
+-		se->vlag = se->deadline + 1;
++	if (do_preempt_short(cfs_rq, pse, se))
++		cancel_protect_slice(se);
+ 
+ 	/*
+ 	 * If @p has become the most eligible task, force preemption.
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 449efaaa387a68..55f279ddfd63d5 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -833,7 +833,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
+ 	if (unlikely(is_global_init(current)))
+ 		return -EPERM;
+ 
+-	if (!preemptible()) {
++	if (preempt_count() != 0 || irqs_disabled()) {
+ 		/* Do an early check on signal validity. Otherwise,
+ 		 * the error is lost in deferred irq_work.
+ 		 */
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index ea8ad5480e286d..3e252ba16d5c6e 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -7442,9 +7442,9 @@ static __init int rb_write_something(struct rb_test_data *data, bool nested)
+ 		/* Ignore dropped events before test starts. */
+ 		if (started) {
+ 			if (nested)
+-				data->bytes_dropped += len;
+-			else
+ 				data->bytes_dropped_nested += len;
++			else
++				data->bytes_dropped += len;
+ 		}
+ 		return len;
+ 	}
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index ea9b44847ce6b7..29eba68e07859d 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -3111,6 +3111,20 @@ static bool event_in_systems(struct trace_event_call *call,
+ 	return !*p || isspace(*p) || *p == ',';
+ }
+ 
++#ifdef CONFIG_HIST_TRIGGERS
++/*
++ * Wake up waiter on the hist_poll_wq from irq_work because the hist trigger
++ * may happen in any context.
++ */
++static void hist_poll_event_irq_work(struct irq_work *work)
++{
++	wake_up_all(&hist_poll_wq);
++}
++
++DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work);
++DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq);
++#endif
++
+ static struct trace_event_file *
+ trace_create_new_event(struct trace_event_call *call,
+ 		       struct trace_array *tr)
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 31f5ad322fab0a..4ebafc655223a8 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -5314,6 +5314,8 @@ static void event_hist_trigger(struct event_trigger_data *data,
+ 
+ 	if (resolve_var_refs(hist_data, key, var_ref_vals, true))
+ 		hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals);
++
++	hist_poll_wakeup();
+ }
+ 
+ static void hist_trigger_stacktrace_print(struct seq_file *m,
+@@ -5593,49 +5595,137 @@ static void hist_trigger_show(struct seq_file *m,
+ 		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
+ }
+ 
++struct hist_file_data {
++	struct file *file;
++	u64 last_read;
++	u64 last_act;
++};
++
++static u64 get_hist_hit_count(struct trace_event_file *event_file)
++{
++	struct hist_trigger_data *hist_data;
++	struct event_trigger_data *data;
++	u64 ret = 0;
++
++	list_for_each_entry(data, &event_file->triggers, list) {
++		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) {
++			hist_data = data->private_data;
++			ret += atomic64_read(&hist_data->map->hits);
++		}
++	}
++	return ret;
++}
++
+ static int hist_show(struct seq_file *m, void *v)
+ {
++	struct hist_file_data *hist_file = m->private;
+ 	struct event_trigger_data *data;
+ 	struct trace_event_file *event_file;
+-	int n = 0, ret = 0;
++	int n = 0;
+ 
+-	mutex_lock(&event_mutex);
++	guard(mutex)(&event_mutex);
+ 
+-	event_file = event_file_file(m->private);
+-	if (unlikely(!event_file)) {
+-		ret = -ENODEV;
+-		goto out_unlock;
+-	}
++	event_file = event_file_file(hist_file->file);
++	if (unlikely(!event_file))
++		return -ENODEV;
+ 
+ 	list_for_each_entry(data, &event_file->triggers, list) {
+ 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
+ 			hist_trigger_show(m, data, n++);
+ 	}
++	hist_file->last_read = get_hist_hit_count(event_file);
++	/*
++	 * Update last_act too so that poll()/POLLPRI can wait for the next
++	 * event after any syscall on hist file.
++	 */
++	hist_file->last_act = hist_file->last_read;
+ 
+- out_unlock:
+-	mutex_unlock(&event_mutex);
++	return 0;
++}
++
++static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait)
++{
++	struct trace_event_file *event_file;
++	struct seq_file *m = file->private_data;
++	struct hist_file_data *hist_file = m->private;
++	__poll_t ret = 0;
++	u64 cnt;
++
++	guard(mutex)(&event_mutex);
++
++	event_file = event_file_data(file);
++	if (!event_file)
++		return EPOLLERR;
++
++	hist_poll_wait(file, wait);
++
++	cnt = get_hist_hit_count(event_file);
++	if (hist_file->last_read != cnt)
++		ret |= EPOLLIN | EPOLLRDNORM;
++	if (hist_file->last_act != cnt) {
++		hist_file->last_act = cnt;
++		ret |= EPOLLPRI;
++	}
+ 
+ 	return ret;
+ }
+ 
++static int event_hist_release(struct inode *inode, struct file *file)
++{
++	struct seq_file *m = file->private_data;
++	struct hist_file_data *hist_file = m->private;
++
++	kfree(hist_file);
++	return tracing_single_release_file_tr(inode, file);
++}
++
+ static int event_hist_open(struct inode *inode, struct file *file)
+ {
++	struct trace_event_file *event_file;
++	struct hist_file_data *hist_file;
+ 	int ret;
+ 
+ 	ret = tracing_open_file_tr(inode, file);
+ 	if (ret)
+ 		return ret;
+ 
++	guard(mutex)(&event_mutex);
++
++	event_file = event_file_data(file);
++	if (!event_file) {
++		ret = -ENODEV;
++		goto err;
++	}
++
++	hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL);
++	if (!hist_file) {
++		ret = -ENOMEM;
++		goto err;
++	}
++
++	hist_file->file = file;
++	hist_file->last_act = get_hist_hit_count(event_file);
++
+ 	/* Clear private_data to avoid warning in single_open() */
+ 	file->private_data = NULL;
+-	return single_open(file, hist_show, file);
++	ret = single_open(file, hist_show, hist_file);
++	if (ret) {
++		kfree(hist_file);
++		goto err;
++	}
++
++	return 0;
++err:
++	tracing_release_file_tr(inode, file);
++	return ret;
+ }
+ 
+ const struct file_operations event_hist_fops = {
+ 	.open = event_hist_open,
+ 	.read = seq_read,
+ 	.llseek = seq_lseek,
+-	.release = tracing_single_release_file_tr,
++	.release = event_hist_release,
++	.poll = event_hist_poll,
+ };
+ 
+ #ifdef CONFIG_HIST_TRIGGERS_DEBUG
+@@ -5876,25 +5966,19 @@ static int hist_debug_show(struct seq_file *m, void *v)
+ {
+ 	struct event_trigger_data *data;
+ 	struct trace_event_file *event_file;
+-	int n = 0, ret = 0;
++	int n = 0;
+ 
+-	mutex_lock(&event_mutex);
++	guard(mutex)(&event_mutex);
+ 
+ 	event_file = event_file_file(m->private);
+-	if (unlikely(!event_file)) {
+-		ret = -ENODEV;
+-		goto out_unlock;
+-	}
++	if (unlikely(!event_file))
++		return -ENODEV;
+ 
+ 	list_for_each_entry(data, &event_file->triggers, list) {
+ 		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
+ 			hist_trigger_debug_show(m, data, n++);
+ 	}
+-
+- out_unlock:
+-	mutex_unlock(&event_mutex);
+-
+-	return ret;
++	return 0;
+ }
+ 
+ static int event_hist_debug_open(struct inode *inode, struct file *file)
+@@ -5907,7 +5991,10 @@ static int event_hist_debug_open(struct inode *inode, struct file *file)
+ 
+ 	/* Clear private_data to avoid warning in single_open() */
+ 	file->private_data = NULL;
+-	return single_open(file, hist_debug_show, file);
++	ret = single_open(file, hist_debug_show, file);
++	if (ret)
++		tracing_release_file_tr(inode, file);
++	return ret;
+ }
+ 
+ const struct file_operations event_hist_debug_fops = {
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index c82b401a294d96..24c9962c40db1a 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -312,7 +312,7 @@ static const char *synth_field_fmt(char *type)
+ 	else if (strcmp(type, "gfp_t") == 0)
+ 		fmt = "%x";
+ 	else if (synth_field_is_string(type))
+-		fmt = "%.*s";
++		fmt = "%s";
+ 	else if (synth_field_is_stack(type))
+ 		fmt = "%s";
+ 
+@@ -859,6 +859,38 @@ static struct trace_event_fields synth_event_fields_array[] = {
+ 	{}
+ };
+ 
++static int synth_event_reg(struct trace_event_call *call,
++		    enum trace_reg type, void *data)
++{
++	struct synth_event *event = container_of(call, struct synth_event, call);
++
++	switch (type) {
++#ifdef CONFIG_PERF_EVENTS
++	case TRACE_REG_PERF_REGISTER:
++#endif
++	case TRACE_REG_REGISTER:
++		if (!try_module_get(event->mod))
++			return -EBUSY;
++		break;
++	default:
++		break;
++	}
++
++	int ret = trace_event_reg(call, type, data);
++
++	switch (type) {
++#ifdef CONFIG_PERF_EVENTS
++	case TRACE_REG_PERF_UNREGISTER:
++#endif
++	case TRACE_REG_UNREGISTER:
++		module_put(event->mod);
++		break;
++	default:
++		break;
++	}
++	return ret;
++}
++
+ static int register_synth_event(struct synth_event *event)
+ {
+ 	struct trace_event_call *call = &event->call;
+@@ -888,7 +920,7 @@ static int register_synth_event(struct synth_event *event)
+ 		goto out;
+ 	}
+ 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
+-	call->class->reg = trace_event_reg;
++	call->class->reg = synth_event_reg;
+ 	call->class->probe = trace_event_raw_event_synth;
+ 	call->data = event;
+ 	call->tp = event->tp;
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index ebb61ddca749d8..655246a9bec348 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -1353,6 +1353,7 @@ void graph_trace_close(struct trace_iterator *iter)
+ 	if (data) {
+ 		free_percpu(data->cpu_data);
+ 		kfree(data);
++		iter->private = NULL;
+ 	}
+ }
+ 
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index fce064e205706f..10ea6d0a35e85d 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -233,8 +233,6 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
+ {
+ 	if (is_graph(iter->tr))
+ 		graph_trace_open(iter);
+-	else
+-		iter->private = NULL;
+ }
+ 
+ static void irqsoff_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index 032fdeba37d350..a94790f5cda727 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -2038,7 +2038,6 @@ static int start_kthread(unsigned int cpu)
+ 
+ 	if (IS_ERR(kthread)) {
+ 		pr_err(BANNER "could not start sampling thread\n");
+-		stop_per_cpu_kthreads();
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index ae2ace5e515af5..039382576bc165 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -170,8 +170,6 @@ static void wakeup_trace_open(struct trace_iterator *iter)
+ {
+ 	if (is_graph(iter->tr))
+ 		graph_trace_open(iter);
+-	else
+-		iter->private = NULL;
+ }
+ 
+ static void wakeup_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index d36242fd493644..e55f9810b91add 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -269,6 +269,15 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
+ 	if (ret < 0)
+ 		goto error;
+ 
++	/*
++	 * pipe_resize_ring() does not update nr_accounted for watch_queue
++	 * pipes, because the above vastly overprovisions. Set nr_accounted on
++	 * and max_usage this pipe to the number that was actually charged to
++	 * the user above via account_pipe_buffers.
++	 */
++	pipe->max_usage = nr_pages;
++	pipe->nr_accounted = nr_pages;
++
+ 	ret = -ENOMEM;
+ 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ 	if (!pages)
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 262691ba62b7ad..4dc72540c3b0fb 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -347,8 +347,6 @@ static int __init watchdog_thresh_setup(char *str)
+ }
+ __setup("watchdog_thresh=", watchdog_thresh_setup);
+ 
+-static void __lockup_detector_cleanup(void);
+-
+ #ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
+ enum stats_per_group {
+ 	STATS_SYSTEM,
+@@ -878,11 +876,6 @@ static void __lockup_detector_reconfigure(void)
+ 
+ 	watchdog_hardlockup_start();
+ 	cpus_read_unlock();
+-	/*
+-	 * Must be called outside the cpus locked section to prevent
+-	 * recursive locking in the perf code.
+-	 */
+-	__lockup_detector_cleanup();
+ }
+ 
+ void lockup_detector_reconfigure(void)
+@@ -932,24 +925,6 @@ static inline void lockup_detector_setup(void)
+ }
+ #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
+ 
+-static void __lockup_detector_cleanup(void)
+-{
+-	lockdep_assert_held(&watchdog_mutex);
+-	hardlockup_detector_perf_cleanup();
+-}
+-
+-/**
+- * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
+- *
+- * Caller must not hold the cpu hotplug rwsem.
+- */
+-void lockup_detector_cleanup(void)
+-{
+-	mutex_lock(&watchdog_mutex);
+-	__lockup_detector_cleanup();
+-	mutex_unlock(&watchdog_mutex);
+-}
+-
+ /**
+  * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
+  *
+diff --git a/kernel/watchdog_perf.c b/kernel/watchdog_perf.c
+index 59c1d86a73a248..2fdb96eaf49336 100644
+--- a/kernel/watchdog_perf.c
++++ b/kernel/watchdog_perf.c
+@@ -21,8 +21,6 @@
+ #include <linux/perf_event.h>
+ 
+ static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+-static DEFINE_PER_CPU(struct perf_event *, dead_event);
+-static struct cpumask dead_events_mask;
+ 
+ static atomic_t watchdog_cpus = ATOMIC_INIT(0);
+ 
+@@ -181,36 +179,12 @@ void watchdog_hardlockup_disable(unsigned int cpu)
+ 
+ 	if (event) {
+ 		perf_event_disable(event);
++		perf_event_release_kernel(event);
+ 		this_cpu_write(watchdog_ev, NULL);
+-		this_cpu_write(dead_event, event);
+-		cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
+ 		atomic_dec(&watchdog_cpus);
+ 	}
+ }
+ 
+-/**
+- * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
+- *
+- * Called from lockup_detector_cleanup(). Serialized by the caller.
+- */
+-void hardlockup_detector_perf_cleanup(void)
+-{
+-	int cpu;
+-
+-	for_each_cpu(cpu, &dead_events_mask) {
+-		struct perf_event *event = per_cpu(dead_event, cpu);
+-
+-		/*
+-		 * Required because for_each_cpu() reports  unconditionally
+-		 * CPU0 as set on UP kernels. Sigh.
+-		 */
+-		if (event)
+-			perf_event_release_kernel(event);
+-		per_cpu(dead_event, cpu) = NULL;
+-	}
+-	cpumask_clear(&dead_events_mask);
+-}
+-
+ /**
+  * hardlockup_detector_perf_stop - Globally stop watchdog events
+  *
+diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c
+index c02baa4168e168..055356508d97c5 100644
+--- a/lib/842/842_compress.c
++++ b/lib/842/842_compress.c
+@@ -532,6 +532,8 @@ int sw842_compress(const u8 *in, unsigned int ilen,
+ 		}
+ 		if (repeat_count) {
+ 			ret = add_repeat_template(p, repeat_count);
++			if (ret)
++				return ret;
+ 			repeat_count = 0;
+ 			if (next == last) /* reached max repeat bits */
+ 				goto repeat;
+diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
+index c40818ec9c1801..49d32e43d06ef8 100644
+--- a/lib/stackinit_kunit.c
++++ b/lib/stackinit_kunit.c
+@@ -146,6 +146,15 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
+ #define INIT_STRUCT_assigned_copy(var_type)				\
+ 					; var = *(arg)
+ 
++/*
++ * The "did we actually fill the stack?" check value needs
++ * to be neither 0 nor any of the "pattern" bytes. The
++ * pattern bytes are compiler, architecture, and type based,
++ * so we have to pick a value that never appears for those
++ * combinations. Use 0x99 which is not 0xFF, 0xFE, nor 0xAA.
++ */
++#define FILL_BYTE	0x99
++
+ /*
+  * @name: unique string name for the test
+  * @var_type: type to be tested for zeroing initialization
+@@ -168,12 +177,12 @@ static noinline void test_ ## name (struct kunit *test)		\
+ 	ZERO_CLONE_ ## which(zero);				\
+ 	/* Clear entire check buffer for 0xFF overlap test. */	\
+ 	memset(check_buf, 0x00, sizeof(check_buf));		\
+-	/* Fill stack with 0xFF. */				\
++	/* Fill stack with FILL_BYTE. */			\
+ 	ignored = leaf_ ##name((unsigned long)&ignored, 1,	\
+ 				FETCH_ARG_ ## which(zero));	\
+-	/* Verify all bytes overwritten with 0xFF. */		\
++	/* Verify all bytes overwritten with FILL_BYTE. */	\
+ 	for (sum = 0, i = 0; i < target_size; i++)		\
+-		sum += (check_buf[i] != 0xFF);			\
++		sum += (check_buf[i] != FILL_BYTE);		\
+ 	/* Clear entire check buffer for later bit tests. */	\
+ 	memset(check_buf, 0x00, sizeof(check_buf));		\
+ 	/* Extract stack-defined variable contents. */		\
+@@ -184,7 +193,8 @@ static noinline void test_ ## name (struct kunit *test)		\
+ 	 * possible between the two leaf function calls.	\
+ 	 */							\
+ 	KUNIT_ASSERT_EQ_MSG(test, sum, 0,			\
+-			    "leaf fill was not 0xFF!?\n");	\
++			    "leaf fill was not 0x%02X!?\n",	\
++			    FILL_BYTE);				\
+ 								\
+ 	/* Validate that compiler lined up fill and target. */	\
+ 	KUNIT_ASSERT_TRUE_MSG(test,				\
+@@ -196,9 +206,9 @@ static noinline void test_ ## name (struct kunit *test)		\
+ 		(int)((ssize_t)(uintptr_t)fill_start -		\
+ 		      (ssize_t)(uintptr_t)target_start));	\
+ 								\
+-	/* Look for any bytes still 0xFF in check region. */	\
++	/* Validate check region has no FILL_BYTE bytes. */	\
+ 	for (sum = 0, i = 0; i < target_size; i++)		\
+-		sum += (check_buf[i] == 0xFF);			\
++		sum += (check_buf[i] == FILL_BYTE);		\
+ 								\
+ 	if (sum != 0 && xfail)					\
+ 		kunit_skip(test,				\
+@@ -233,12 +243,12 @@ static noinline int leaf_ ## name(unsigned long sp, bool fill,	\
+ 	 * stack frame of SOME kind...				\
+ 	 */							\
+ 	memset(buf, (char)(sp & 0xff), sizeof(buf));		\
+-	/* Fill variable with 0xFF. */				\
++	/* Fill variable with FILL_BYTE. */			\
+ 	if (fill) {						\
+ 		fill_start = &var;				\
+ 		fill_size = sizeof(var);			\
+ 		memset(fill_start,				\
+-		       (char)((sp & 0xff) | forced_mask),	\
++		       FILL_BYTE & forced_mask,			\
+ 		       fill_size);				\
+ 	}							\
+ 								\
+@@ -380,7 +390,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
+ 			fill_start = &var;
+ 			fill_size = sizeof(var);
+ 
+-			memset(fill_start, forced_mask | 0x55, fill_size);
++			memset(fill_start, (forced_mask | 0x55) & FILL_BYTE, fill_size);
+ 		}
+ 		memcpy(check_buf, target_start, target_size);
+ 		break;
+@@ -391,7 +401,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
+ 			fill_start = &var;
+ 			fill_size = sizeof(var);
+ 
+-			memset(fill_start, forced_mask | 0xaa, fill_size);
++			memset(fill_start, (forced_mask | 0xaa) & FILL_BYTE, fill_size);
+ 		}
+ 		memcpy(check_buf, target_start, target_size);
+ 		break;
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index c5e2ec9303c5d3..a69e71a1ca55e6 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -2255,7 +2255,7 @@ int __init no_hash_pointers_enable(char *str)
+ early_param("no_hash_pointers", no_hash_pointers_enable);
+ 
+ /* Used for Rust formatting ('%pA'). */
+-char *rust_fmt_argument(char *buf, char *end, void *ptr);
++char *rust_fmt_argument(char *buf, char *end, const void *ptr);
+ 
+ /*
+  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
+diff --git a/mm/gup.c b/mm/gup.c
+index 44c536904a83bb..d27e7c9e2596ce 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1283,6 +1283,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
+ 	if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
+ 		return -EOPNOTSUPP;
+ 
++	if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma))
++		return -EOPNOTSUPP;
++
+ 	if (vma_is_secretmem(vma))
+ 		return -EFAULT;
+ 
+diff --git a/mm/memory.c b/mm/memory.c
+index 525f96ad65b8d7..99dceaf6a10579 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1356,12 +1356,12 @@ int
+ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
+ {
+ 	pgd_t *src_pgd, *dst_pgd;
+-	unsigned long next;
+ 	unsigned long addr = src_vma->vm_start;
+ 	unsigned long end = src_vma->vm_end;
+ 	struct mm_struct *dst_mm = dst_vma->vm_mm;
+ 	struct mm_struct *src_mm = src_vma->vm_mm;
+ 	struct mmu_notifier_range range;
++	unsigned long next, pfn;
+ 	bool is_cow;
+ 	int ret;
+ 
+@@ -1372,11 +1372,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
+ 		return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
+ 
+ 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
+-		/*
+-		 * We do not free on error cases below as remove_vma
+-		 * gets called on error from higher level routine
+-		 */
+-		ret = track_pfn_copy(src_vma);
++		ret = track_pfn_copy(dst_vma, src_vma, &pfn);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -1413,7 +1409,6 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
+ 			continue;
+ 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
+ 					    addr, next))) {
+-			untrack_pfn_clear(dst_vma);
+ 			ret = -ENOMEM;
+ 			break;
+ 		}
+@@ -1423,6 +1418,8 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
+ 		raw_write_seqcount_end(&src_mm->write_protect_seq);
+ 		mmu_notifier_invalidate_range_end(&range);
+ 	}
++	if (ret && unlikely(src_vma->vm_flags & VM_PFNMAP))
++		untrack_pfn_copy(dst_vma, pfn);
+ 	return ret;
+ }
+ 
+@@ -6718,10 +6715,8 @@ void __might_fault(const char *file, int line)
+ 	if (pagefault_disabled())
+ 		return;
+ 	__might_sleep(file, line);
+-#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
+ 	if (current->mm)
+ 		might_lock_read(&current->mm->mmap_lock);
+-#endif
+ }
+ EXPORT_SYMBOL(__might_fault);
+ #endif
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 7fefb2eb3fcd80..00d51d01375746 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -876,18 +876,32 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
+ {
+ 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+ 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
++	struct acomp_req *req;
++	struct crypto_acomp *acomp;
++	u8 *buffer;
++
++	if (IS_ERR_OR_NULL(acomp_ctx))
++		return 0;
+ 
+ 	mutex_lock(&acomp_ctx->mutex);
+-	if (!IS_ERR_OR_NULL(acomp_ctx)) {
+-		if (!IS_ERR_OR_NULL(acomp_ctx->req))
+-			acomp_request_free(acomp_ctx->req);
+-		acomp_ctx->req = NULL;
+-		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
+-			crypto_free_acomp(acomp_ctx->acomp);
+-		kfree(acomp_ctx->buffer);
+-	}
++	req = acomp_ctx->req;
++	acomp = acomp_ctx->acomp;
++	buffer = acomp_ctx->buffer;
++	acomp_ctx->req = NULL;
++	acomp_ctx->acomp = NULL;
++	acomp_ctx->buffer = NULL;
+ 	mutex_unlock(&acomp_ctx->mutex);
+ 
++	/*
++	 * Do the actual freeing after releasing the mutex to avoid subtle
++	 * locking dependencies causing deadlocks.
++	 */
++	if (!IS_ERR_OR_NULL(req))
++		acomp_request_free(req);
++	if (!IS_ERR_OR_NULL(acomp))
++		crypto_free_acomp(acomp);
++	kfree(buffer);
++
+ 	return 0;
+ }
+ 
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 01f3fbb3b67dc6..65230e81fa08c9 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -287,8 +287,8 @@ int can_send(struct sk_buff *skb, int loop)
+ 		netif_rx(newskb);
+ 
+ 	/* update statistics */
+-	pkg_stats->tx_frames++;
+-	pkg_stats->tx_frames_delta++;
++	atomic_long_inc(&pkg_stats->tx_frames);
++	atomic_long_inc(&pkg_stats->tx_frames_delta);
+ 
+ 	return 0;
+ 
+@@ -647,8 +647,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
+ 	int matches;
+ 
+ 	/* update statistics */
+-	pkg_stats->rx_frames++;
+-	pkg_stats->rx_frames_delta++;
++	atomic_long_inc(&pkg_stats->rx_frames);
++	atomic_long_inc(&pkg_stats->rx_frames_delta);
+ 
+ 	/* create non-zero unique skb identifier together with *skb */
+ 	while (!(can_skb_prv(skb)->skbcnt))
+@@ -669,8 +669,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
+ 	consume_skb(skb);
+ 
+ 	if (matches > 0) {
+-		pkg_stats->matches++;
+-		pkg_stats->matches_delta++;
++		atomic_long_inc(&pkg_stats->matches);
++		atomic_long_inc(&pkg_stats->matches_delta);
+ 	}
+ }
+ 
+diff --git a/net/can/af_can.h b/net/can/af_can.h
+index 7c2d9161e22457..22f3352c77fece 100644
+--- a/net/can/af_can.h
++++ b/net/can/af_can.h
+@@ -66,9 +66,9 @@ struct receiver {
+ struct can_pkg_stats {
+ 	unsigned long jiffies_init;
+ 
+-	unsigned long rx_frames;
+-	unsigned long tx_frames;
+-	unsigned long matches;
++	atomic_long_t rx_frames;
++	atomic_long_t tx_frames;
++	atomic_long_t matches;
+ 
+ 	unsigned long total_rx_rate;
+ 	unsigned long total_tx_rate;
+@@ -82,9 +82,9 @@ struct can_pkg_stats {
+ 	unsigned long max_tx_rate;
+ 	unsigned long max_rx_match_ratio;
+ 
+-	unsigned long rx_frames_delta;
+-	unsigned long tx_frames_delta;
+-	unsigned long matches_delta;
++	atomic_long_t rx_frames_delta;
++	atomic_long_t tx_frames_delta;
++	atomic_long_t matches_delta;
+ };
+ 
+ /* persistent statistics */
+diff --git a/net/can/proc.c b/net/can/proc.c
+index bbce97825f13fb..25fdf060e30d0d 100644
+--- a/net/can/proc.c
++++ b/net/can/proc.c
+@@ -118,6 +118,13 @@ void can_stat_update(struct timer_list *t)
+ 	struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
+ 	unsigned long j = jiffies; /* snapshot */
+ 
++	long rx_frames = atomic_long_read(&pkg_stats->rx_frames);
++	long tx_frames = atomic_long_read(&pkg_stats->tx_frames);
++	long matches = atomic_long_read(&pkg_stats->matches);
++	long rx_frames_delta = atomic_long_read(&pkg_stats->rx_frames_delta);
++	long tx_frames_delta = atomic_long_read(&pkg_stats->tx_frames_delta);
++	long matches_delta = atomic_long_read(&pkg_stats->matches_delta);
++
+ 	/* restart counting in timer context on user request */
+ 	if (user_reset)
+ 		can_init_stats(net);
+@@ -127,35 +134,33 @@ void can_stat_update(struct timer_list *t)
+ 		can_init_stats(net);
+ 
+ 	/* prevent overflow in calc_rate() */
+-	if (pkg_stats->rx_frames > (ULONG_MAX / HZ))
++	if (rx_frames > (LONG_MAX / HZ))
+ 		can_init_stats(net);
+ 
+ 	/* prevent overflow in calc_rate() */
+-	if (pkg_stats->tx_frames > (ULONG_MAX / HZ))
++	if (tx_frames > (LONG_MAX / HZ))
+ 		can_init_stats(net);
+ 
+ 	/* matches overflow - very improbable */
+-	if (pkg_stats->matches > (ULONG_MAX / 100))
++	if (matches > (LONG_MAX / 100))
+ 		can_init_stats(net);
+ 
+ 	/* calc total values */
+-	if (pkg_stats->rx_frames)
+-		pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) /
+-			pkg_stats->rx_frames;
++	if (rx_frames)
++		pkg_stats->total_rx_match_ratio = (matches * 100) / rx_frames;
+ 
+ 	pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j,
+-					    pkg_stats->tx_frames);
++					    tx_frames);
+ 	pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j,
+-					    pkg_stats->rx_frames);
++					    rx_frames);
+ 
+ 	/* calc current values */
+-	if (pkg_stats->rx_frames_delta)
++	if (rx_frames_delta)
+ 		pkg_stats->current_rx_match_ratio =
+-			(pkg_stats->matches_delta * 100) /
+-			pkg_stats->rx_frames_delta;
++			(matches_delta * 100) /	rx_frames_delta;
+ 
+-	pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta);
+-	pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta);
++	pkg_stats->current_tx_rate = calc_rate(0, HZ, tx_frames_delta);
++	pkg_stats->current_rx_rate = calc_rate(0, HZ, rx_frames_delta);
+ 
+ 	/* check / update maximum values */
+ 	if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate)
+@@ -168,9 +173,9 @@ void can_stat_update(struct timer_list *t)
+ 		pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio;
+ 
+ 	/* clear values for 'current rate' calculation */
+-	pkg_stats->tx_frames_delta = 0;
+-	pkg_stats->rx_frames_delta = 0;
+-	pkg_stats->matches_delta   = 0;
++	atomic_long_set(&pkg_stats->tx_frames_delta, 0);
++	atomic_long_set(&pkg_stats->rx_frames_delta, 0);
++	atomic_long_set(&pkg_stats->matches_delta, 0);
+ 
+ 	/* restart timer (one second) */
+ 	mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ));
+@@ -214,9 +219,12 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
+ 	struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
+ 
+ 	seq_putc(m, '\n');
+-	seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames);
+-	seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames);
+-	seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches);
++	seq_printf(m, " %8ld transmitted frames (TXF)\n",
++		   atomic_long_read(&pkg_stats->tx_frames));
++	seq_printf(m, " %8ld received frames (RXF)\n",
++		   atomic_long_read(&pkg_stats->rx_frames));
++	seq_printf(m, " %8ld matched frames (RXMF)\n",
++		   atomic_long_read(&pkg_stats->matches));
+ 
+ 	seq_putc(m, '\n');
+ 
+diff --git a/net/core/devmem.c b/net/core/devmem.c
+index 11b91c12ee1135..17f8a83a5ee74a 100644
+--- a/net/core/devmem.c
++++ b/net/core/devmem.c
+@@ -108,6 +108,7 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
+ 	struct netdev_rx_queue *rxq;
+ 	unsigned long xa_idx;
+ 	unsigned int rxq_idx;
++	int err;
+ 
+ 	if (binding->list.next)
+ 		list_del(&binding->list);
+@@ -119,7 +120,8 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
+ 
+ 		rxq_idx = get_netdev_rx_queue_index(rxq);
+ 
+-		WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
++		err = netdev_rx_queue_restart(binding->dev, rxq_idx);
++		WARN_ON(err && err != -ENETDOWN);
+ 	}
+ 
+ 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 9552a90d4772dc..6d76b799ce645d 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -165,6 +165,14 @@ static void dst_count_dec(struct dst_entry *dst)
+ void dst_release(struct dst_entry *dst)
+ {
+ 	if (dst && rcuref_put(&dst->__rcuref)) {
++#ifdef CONFIG_DST_CACHE
++		if (dst->flags & DST_METADATA) {
++			struct metadata_dst *md_dst = (struct metadata_dst *)dst;
++
++			if (md_dst->type == METADATA_IP_TUNNEL)
++				dst_cache_reset_now(&md_dst->u.tun_info.dst_cache);
++		}
++#endif
+ 		dst_count_dec(dst);
+ 		call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
+ 	}
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 2ba5cd965d3fae..4d0ee1c9002aac 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1005,6 +1005,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
+ 				 /* IFLA_VF_STATS_TX_DROPPED */
+ 				 nla_total_size_64bit(sizeof(__u64)));
+ 		}
++		if (dev->netdev_ops->ndo_get_vf_guid)
++			size += num_vfs * 2 *
++				nla_total_size(sizeof(struct ifla_vf_guid));
+ 		return size;
+ 	} else
+ 		return 0;
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index a3676155be78b9..f65d2f7273813b 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -416,7 +416,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
+ 
+ 	skb_dst_update_pmtu_no_confirm(skb, mtu);
+ 
+-	if (!reply || skb->pkt_type == PACKET_HOST)
++	if (!reply)
+ 		return 0;
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+@@ -451,7 +451,7 @@ static const struct nla_policy
+ geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = {
+ 	[LWTUNNEL_IP_OPT_GENEVE_CLASS]	= { .type = NLA_U16 },
+ 	[LWTUNNEL_IP_OPT_GENEVE_TYPE]	= { .type = NLA_U8 },
+-	[LWTUNNEL_IP_OPT_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 128 },
++	[LWTUNNEL_IP_OPT_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 127 },
+ };
+ 
+ static const struct nla_policy
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 8da74dc63061c0..f4e24fc878fabe 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1470,12 +1470,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb)
+ }
+ 
+ /* fully reclaim rmem/fwd memory allocated for skb */
+-static void udp_rmem_release(struct sock *sk, int size, int partial,
+-			     bool rx_queue_lock_held)
++static void udp_rmem_release(struct sock *sk, unsigned int size,
++			     int partial, bool rx_queue_lock_held)
+ {
+ 	struct udp_sock *up = udp_sk(sk);
+ 	struct sk_buff_head *sk_queue;
+-	int amt;
++	unsigned int amt;
+ 
+ 	if (likely(partial)) {
+ 		up->forward_deficit += size;
+@@ -1495,10 +1495,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
+ 	if (!rx_queue_lock_held)
+ 		spin_lock(&sk_queue->lock);
+ 
+-
+-	sk_forward_alloc_add(sk, size);
+-	amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
+-	sk_forward_alloc_add(sk, -amt);
++	amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
++	sk_forward_alloc_add(sk, size - amt);
+ 
+ 	if (amt)
+ 		__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
+@@ -1570,17 +1568,25 @@ static int udp_rmem_schedule(struct sock *sk, int size)
+ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct sk_buff_head *list = &sk->sk_receive_queue;
+-	int rmem, err = -ENOMEM;
++	unsigned int rmem, rcvbuf;
+ 	spinlock_t *busy = NULL;
+-	int size, rcvbuf;
++	int size, err = -ENOMEM;
+ 
+-	/* Immediately drop when the receive queue is full.
+-	 * Always allow at least one packet.
+-	 */
+ 	rmem = atomic_read(&sk->sk_rmem_alloc);
+ 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+-	if (rmem > rcvbuf)
+-		goto drop;
++	size = skb->truesize;
++
++	/* Immediately drop when the receive queue is full.
++	 * Cast to unsigned int performs the boundary check for INT_MAX.
++	 */
++	if (rmem + size > rcvbuf) {
++		if (rcvbuf > INT_MAX >> 1)
++			goto drop;
++
++		/* Always allow at least one packet for small buffer. */
++		if (rmem > rcvbuf)
++			goto drop;
++	}
+ 
+ 	/* Under mem pressure, it might be helpful to help udp_recvmsg()
+ 	 * having linear skbs :
+@@ -1590,10 +1596,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+ 	 */
+ 	if (rmem > (rcvbuf >> 1)) {
+ 		skb_condense(skb);
+-
++		size = skb->truesize;
+ 		busy = busylock_acquire(sk);
+ 	}
+-	size = skb->truesize;
++
+ 	udp_set_dev_scratch(skb);
+ 
+ 	atomic_add(size, &sk->sk_rmem_alloc);
+@@ -1680,7 +1686,7 @@ EXPORT_SYMBOL_GPL(skb_consume_udp);
+ 
+ static struct sk_buff *__first_packet_length(struct sock *sk,
+ 					     struct sk_buff_head *rcvq,
+-					     int *total)
++					     unsigned int *total)
+ {
+ 	struct sk_buff *skb;
+ 
+@@ -1713,8 +1719,8 @@ static int first_packet_length(struct sock *sk)
+ {
+ 	struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
+ 	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
++	unsigned int total = 0;
+ 	struct sk_buff *skb;
+-	int total = 0;
+ 	int res;
+ 
+ 	spin_lock_bh(&rcvq->lock);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f7c17388ff6aaf..f5d49162f79834 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5807,6 +5807,27 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
+ 	}
+ }
+ 
++static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb,
++					struct inet6_dev *idev)
++{
++	struct nlattr *nla;
++
++	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
++	if (!nla)
++		goto nla_put_failure;
++	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
++
++	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
++	if (!nla)
++		goto nla_put_failure;
++	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
++
++	return 0;
++
++nla_put_failure:
++	return -EMSGSIZE;
++}
++
+ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
+ 				  u32 ext_filter_mask)
+ {
+@@ -5829,18 +5850,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
+ 
+ 	/* XXX - MC not implemented */
+ 
+-	if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
+-		return 0;
+-
+-	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
+-	if (!nla)
+-		goto nla_put_failure;
+-	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
+-
+-	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
+-	if (!nla)
+-		goto nla_put_failure;
+-	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
++	if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) {
++		if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0)
++			goto nla_put_failure;
++	}
+ 
+ 	nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
+ 	if (!nla)
+diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
+index dbcea9fee6262d..62618a058b8fad 100644
+--- a/net/ipv6/calipso.c
++++ b/net/ipv6/calipso.c
+@@ -1072,8 +1072,13 @@ static int calipso_sock_getattr(struct sock *sk,
+ 	struct ipv6_opt_hdr *hop;
+ 	int opt_len, len, ret_val = -ENOMSG, offset;
+ 	unsigned char *opt;
+-	struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
++	struct ipv6_pinfo *pinfo = inet6_sk(sk);
++	struct ipv6_txoptions *txopts;
++
++	if (!pinfo)
++		return -EAFNOSUPPORT;
+ 
++	txopts = txopt_get(pinfo);
+ 	if (!txopts || !txopts->hopopt)
+ 		goto done;
+ 
+@@ -1125,8 +1130,13 @@ static int calipso_sock_setattr(struct sock *sk,
+ {
+ 	int ret_val;
+ 	struct ipv6_opt_hdr *old, *new;
+-	struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
++	struct ipv6_pinfo *pinfo = inet6_sk(sk);
++	struct ipv6_txoptions *txopts;
++
++	if (!pinfo)
++		return -EAFNOSUPPORT;
+ 
++	txopts = txopt_get(pinfo);
+ 	old = NULL;
+ 	if (txopts)
+ 		old = txopts->hopopt;
+@@ -1153,8 +1163,13 @@ static int calipso_sock_setattr(struct sock *sk,
+ static void calipso_sock_delattr(struct sock *sk)
+ {
+ 	struct ipv6_opt_hdr *new_hop;
+-	struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
++	struct ipv6_pinfo *pinfo = inet6_sk(sk);
++	struct ipv6_txoptions *txopts;
++
++	if (!pinfo)
++		return;
+ 
++	txopts = txopt_get(pinfo);
+ 	if (!txopts || !txopts->hopopt)
+ 		goto done;
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index b393c37d24245c..987492dcb07ca8 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -412,12 +412,37 @@ static bool rt6_check_expired(const struct rt6_info *rt)
+ 	return false;
+ }
+ 
++static struct fib6_info *
++rt6_multipath_first_sibling_rcu(const struct fib6_info *rt)
++{
++	struct fib6_info *iter;
++	struct fib6_node *fn;
++
++	fn = rcu_dereference(rt->fib6_node);
++	if (!fn)
++		goto out;
++	iter = rcu_dereference(fn->leaf);
++	if (!iter)
++		goto out;
++
++	while (iter) {
++		if (iter->fib6_metric == rt->fib6_metric &&
++		    rt6_qualify_for_ecmp(iter))
++			return iter;
++		iter = rcu_dereference(iter->fib6_next);
++	}
++
++out:
++	return NULL;
++}
++
+ void fib6_select_path(const struct net *net, struct fib6_result *res,
+ 		      struct flowi6 *fl6, int oif, bool have_oif_match,
+ 		      const struct sk_buff *skb, int strict)
+ {
+-	struct fib6_info *match = res->f6i;
++	struct fib6_info *first, *match = res->f6i;
+ 	struct fib6_info *sibling;
++	int hash;
+ 
+ 	if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
+ 		goto out;
+@@ -440,16 +465,25 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
+ 		return;
+ 	}
+ 
+-	if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
++	first = rt6_multipath_first_sibling_rcu(match);
++	if (!first)
+ 		goto out;
+ 
+-	list_for_each_entry_rcu(sibling, &match->fib6_siblings,
++	hash = fl6->mp_hash;
++	if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound) &&
++	    rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
++			    strict) >= 0) {
++		match = first;
++		goto out;
++	}
++
++	list_for_each_entry_rcu(sibling, &first->fib6_siblings,
+ 				fib6_siblings) {
+ 		const struct fib6_nh *nh = sibling->fib6_nh;
+ 		int nh_upper_bound;
+ 
+ 		nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
+-		if (fl6->mp_hash > nh_upper_bound)
++		if (hash > nh_upper_bound)
+ 			continue;
+ 		if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
+ 			break;
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index fe868b52162201..4243f8ee5ab6b6 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -115,8 +115,14 @@ void drv_remove_interface(struct ieee80211_local *local,
+ 
+ 	sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
+ 
+-	/* Remove driver debugfs entries */
+-	ieee80211_debugfs_recreate_netdev(sdata, sdata->vif.valid_links);
++	/*
++	 * Remove driver debugfs entries.
++	 * The virtual monitor interface doesn't get a debugfs
++	 * entry, so it's exempt here.
++	 */
++	if (sdata != rcu_access_pointer(local->monitor_sdata))
++		ieee80211_debugfs_recreate_netdev(sdata,
++						  sdata->vif.valid_links);
+ 
+ 	trace_drv_remove_interface(local, sdata);
+ 	local->ops->remove_interface(&local->hw, &sdata->vif);
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index af9055252e6dfa..8bbfa45e1796df 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1205,16 +1205,17 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
+ 		return;
+ 	}
+ 
+-	RCU_INIT_POINTER(local->monitor_sdata, NULL);
+-	mutex_unlock(&local->iflist_mtx);
+-
+-	synchronize_net();
+-
++	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+ 	ieee80211_link_release_channel(&sdata->deflink);
+ 
+ 	if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ 		drv_remove_interface(local, sdata);
+ 
++	RCU_INIT_POINTER(local->monitor_sdata, NULL);
++	mutex_unlock(&local->iflist_mtx);
++
++	synchronize_net();
++
+ 	kfree(sdata);
+ }
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 6f3a86040cfcd8..8e1fbdd3bff10b 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -6,7 +6,7 @@
+  * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2024 Intel Corporation
++ * Copyright (C) 2018-2025 Intel Corporation
+  */
+ 
+ #include <linux/jiffies.h>
+@@ -3323,8 +3323,8 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
+ 		return;
+ 	}
+ 
+-	if (!ether_addr_equal(mgmt->sa, sdata->deflink.u.mgd.bssid) ||
+-	    !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) {
++	if (!ether_addr_equal(mgmt->sa, sdata->vif.cfg.ap_addr) ||
++	    !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) {
+ 		/* Not from the current AP or not associated yet. */
+ 		return;
+ 	}
+@@ -3340,9 +3340,9 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
+ 
+ 	skb_reserve(skb, local->hw.extra_tx_headroom);
+ 	resp = skb_put_zero(skb, 24);
+-	memcpy(resp->da, mgmt->sa, ETH_ALEN);
++	memcpy(resp->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ 	memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
+-	memcpy(resp->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
++	memcpy(resp->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ 	resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ 					  IEEE80211_STYPE_ACTION);
+ 	skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index aa22f09e6d145f..49095f19a0f221 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -4,7 +4,7 @@
+  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2023 Intel Corporation
++ * Copyright (C) 2018-2024 Intel Corporation
+  */
+ 
+ #include <linux/module.h>
+@@ -1317,9 +1317,13 @@ static int _sta_info_move_state(struct sta_info *sta,
+ 		sta->sta.addr, new_state);
+ 
+ 	/* notify the driver before the actual changes so it can
+-	 * fail the transition
++	 * fail the transition if the state is increasing.
++	 * The driver is required not to fail when the transition
++	 * is decreasing the state, so first, do all the preparation
++	 * work and only then, notify the driver.
+ 	 */
+-	if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
++	if (new_state > sta->sta_state &&
++	    test_sta_flag(sta, WLAN_STA_INSERTED)) {
+ 		int err = drv_sta_state(sta->local, sta->sdata, sta,
+ 					sta->sta_state, new_state);
+ 		if (err)
+@@ -1395,6 +1399,16 @@ static int _sta_info_move_state(struct sta_info *sta,
+ 		break;
+ 	}
+ 
++	if (new_state < sta->sta_state &&
++	    test_sta_flag(sta, WLAN_STA_INSERTED)) {
++		int err = drv_sta_state(sta->local, sta->sdata, sta,
++					sta->sta_state, new_state);
++
++		WARN_ONCE(err,
++			  "Driver is not allowed to fail if the sta_state is transitioning down the list: %d\n",
++			  err);
++	}
++
+ 	sta->sta_state = new_state;
+ 
+ 	return 0;
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 2b6e8e7307ee5e..a98ae563613c04 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -685,7 +685,7 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
+ 			      struct ieee80211_sub_if_data *sdata,
+ 			      unsigned int queues, bool drop)
+ {
+-	if (!local->ops->flush)
++	if (!local->ops->flush && !drop)
+ 		return;
+ 
+ 	/*
+@@ -712,7 +712,8 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
+ 		}
+ 	}
+ 
+-	drv_flush(local, sdata, queues, drop);
++	if (local->ops->flush)
++		drv_flush(local, sdata, queues, drop);
+ 
+ 	ieee80211_wake_queues_by_reason(&local->hw, queues,
+ 					IEEE80211_QUEUE_STOP_REASON_FLUSH,
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index eb3a6f96b094db..bdee187bc5dd45 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2732,11 +2732,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
+ 			err = nft_netdev_register_hooks(ctx->net, &hook.list);
+ 			if (err < 0)
+ 				goto err_hooks;
++
++			unregister = true;
+ 		}
+ 	}
+ 
+-	unregister = true;
+-
+ 	if (nla[NFTA_CHAIN_COUNTERS]) {
+ 		if (!nft_is_base_chain(chain)) {
+ 			err = -EOPNOTSUPP;
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index b93f046ac7d1e1..4b3452dff2ec08 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -309,7 +309,8 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
+ 
+ 	nft_setelem_expr_foreach(expr, elem_expr, size) {
+ 		if (expr->ops->gc &&
+-		    expr->ops->gc(read_pnet(&set->net), expr))
++		    expr->ops->gc(read_pnet(&set->net), expr) &&
++		    set->flags & NFT_SET_EVAL)
+ 			return true;
+ 	}
+ 
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index 5c6ed68cc6e058..0d99786c322e88 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -335,13 +335,13 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
+ static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
+ 	[NFTA_TUNNEL_KEY_GENEVE_CLASS]	= { .type = NLA_U16 },
+ 	[NFTA_TUNNEL_KEY_GENEVE_TYPE]	= { .type = NLA_U8 },
+-	[NFTA_TUNNEL_KEY_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 128 },
++	[NFTA_TUNNEL_KEY_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 127 },
+ };
+ 
+ static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
+ 				      struct nft_tunnel_opts *opts)
+ {
+-	struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
++	struct geneve_opt *opt = (struct geneve_opt *)(opts->u.data + opts->len);
+ 	struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
+ 	int err, data_len;
+ 
+@@ -628,7 +628,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
+ 		if (!inner)
+ 			goto failure;
+ 		while (opts->len > offset) {
+-			opt = (struct geneve_opt *)opts->u.data + offset;
++			opt = (struct geneve_opt *)(opts->u.data + offset);
+ 			if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
+ 					 opt->opt_class) ||
+ 			    nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 704c858cf2093b..61fea7baae5d5c 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -947,12 +947,6 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+ 				pskb_trim(skb, ovs_mac_header_len(key));
+ 		}
+ 
+-		/* Need to set the pkt_type to involve the routing layer.  The
+-		 * packet movement through the OVS datapath doesn't generally
+-		 * use routing, but this is needed for tunnel cases.
+-		 */
+-		skb->pkt_type = PACKET_OUTGOING;
+-
+ 		if (likely(!mru ||
+ 		           (skb->len <= mru + vport->dev->hard_header_len))) {
+ 			ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index af7c9984594880..e296714803dc02 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -68,7 +68,7 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
+ 	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]	   = { .type = NLA_U16 },
+ 	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]	   = { .type = NLA_U8 },
+ 	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]	   = { .type = NLA_BINARY,
+-						       .len = 128 },
++						       .len = 127 },
+ };
+ 
+ static const struct nla_policy
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 03505673d5234d..099ff6a3e1f516 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -766,7 +766,7 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
+ 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
+ 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
+ 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
+-						       .len = 128 },
++						       .len = 127 },
+ };
+ 
+ static const struct nla_policy
+diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
+index 20ff7386b74bd8..f485f62ab721ab 100644
+--- a/net/sched/sch_skbprio.c
++++ b/net/sched/sch_skbprio.c
+@@ -123,8 +123,6 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	/* Check to update highest and lowest priorities. */
+ 	if (skb_queue_empty(lp_qdisc)) {
+ 		if (q->lowest_prio == q->highest_prio) {
+-			/* The incoming packet is the only packet in queue. */
+-			BUG_ON(sch->q.qlen != 1);
+ 			q->lowest_prio = prio;
+ 			q->highest_prio = prio;
+ 		} else {
+@@ -156,7 +154,6 @@ static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
+ 	/* Update highest priority field. */
+ 	if (skb_queue_empty(hpq)) {
+ 		if (q->lowest_prio == q->highest_prio) {
+-			BUG_ON(sch->q.qlen);
+ 			q->highest_prio = 0;
+ 			q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
+ 		} else {
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index 8e1e97be4df79f..ee3eac338a9dee 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -525,6 +525,8 @@ static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
+ 	return ret;
+ }
+ 
++static DEFINE_MUTEX(sctp_sysctl_mutex);
++
+ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
+ 				 void *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -549,6 +551,7 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
+ 		if (new_value > max || new_value < min)
+ 			return -EINVAL;
+ 
++		mutex_lock(&sctp_sysctl_mutex);
+ 		net->sctp.udp_port = new_value;
+ 		sctp_udp_sock_stop(net);
+ 		if (new_value) {
+@@ -561,6 +564,7 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
+ 		lock_sock(sk);
+ 		sctp_sk(sk)->udp_port = htons(net->sctp.udp_port);
+ 		release_sock(sk);
++		mutex_unlock(&sctp_sysctl_mutex);
+ 	}
+ 
+ 	return ret;
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index eb6ea26b390ee8..d08f205b33dccf 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1551,7 +1551,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
+ 	timeout = vsk->connect_timeout;
+ 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ 
+-	while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
++	/* If the socket is already closing or it is in an error state, there
++	 * is no point in waiting.
++	 */
++	while (sk->sk_state != TCP_ESTABLISHED &&
++	       sk->sk_state != TCP_CLOSING && sk->sk_err == 0) {
+ 		if (flags & O_NONBLOCK) {
+ 			/* If we're not going to block, we schedule a timeout
+ 			 * function to generate a timeout on the connection
+diff --git a/rust/Makefile b/rust/Makefile
+index 09521fc449dca2..1b00e16951eeb8 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -227,7 +227,8 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
+ 	-mfunction-return=thunk-extern -mrecord-mcount -mabi=lp64 \
+ 	-mindirect-branch-cs-prefix -mstack-protector-guard% -mtraceback=no \
+ 	-mno-pointers-to-nested-functions -mno-string \
+-	-mno-strict-align -mstrict-align \
++	-mno-strict-align -mstrict-align -mdirect-extern-access \
++	-mexplicit-relocs -mno-check-zero-division \
+ 	-fconserve-stack -falign-jumps=% -falign-loops=% \
+ 	-femit-struct-debug-baseonly -fno-ipa-cp-clone -fno-ipa-sra \
+ 	-fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \
+@@ -241,6 +242,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
+ # Derived from `scripts/Makefile.clang`.
+ BINDGEN_TARGET_x86	:= x86_64-linux-gnu
+ BINDGEN_TARGET_arm64	:= aarch64-linux-gnu
++BINDGEN_TARGET_loongarch	:= loongarch64-linux-gnusf
+ BINDGEN_TARGET		:= $(BINDGEN_TARGET_$(SRCARCH))
+ 
+ # All warnings are inhibited since GCC builds are very experimental,
+diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
+index a28077a7cb3011..e52cd64333bccc 100644
+--- a/rust/kernel/print.rs
++++ b/rust/kernel/print.rs
+@@ -6,12 +6,11 @@
+ //!
+ //! Reference: <https://docs.kernel.org/core-api/printk-basics.html>
+ 
+-use core::{
++use crate::{
+     ffi::{c_char, c_void},
+-    fmt,
++    str::RawFormatter,
+ };
+-
+-use crate::str::RawFormatter;
++use core::fmt;
+ 
+ // Called from `vsprintf` with format specifier `%pA`.
+ #[expect(clippy::missing_safety_doc)]
+diff --git a/scripts/package/debian/rules b/scripts/package/debian/rules
+index ca07243bd5cdf6..2b3f9a0bd6c40f 100755
+--- a/scripts/package/debian/rules
++++ b/scripts/package/debian/rules
+@@ -21,9 +21,11 @@ ifeq ($(origin KBUILD_VERBOSE),undefined)
+     endif
+ endif
+ 
+-revision = $(lastword $(subst -, ,$(shell dpkg-parsechangelog -S Version)))
++revision = $(shell dpkg-parsechangelog -S Version | sed -n 's/.*-//p')
+ CROSS_COMPILE ?= $(filter-out $(DEB_BUILD_GNU_TYPE)-, $(DEB_HOST_GNU_TYPE)-)
+-make-opts = ARCH=$(ARCH) KERNELRELEASE=$(KERNELRELEASE) KBUILD_BUILD_VERSION=$(revision) $(addprefix CROSS_COMPILE=,$(CROSS_COMPILE))
++make-opts = ARCH=$(ARCH) KERNELRELEASE=$(KERNELRELEASE) \
++    $(addprefix KBUILD_BUILD_VERSION=,$(revision)) \
++    $(addprefix CROSS_COMPILE=,$(CROSS_COMPILE))
+ 
+ binary-targets := $(addprefix binary-, image image-dbg headers libc-dev)
+ 
+diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh
+index 24086793b0d8d4..db40237e60ce7e 100755
+--- a/scripts/selinux/install_policy.sh
++++ b/scripts/selinux/install_policy.sh
+@@ -6,27 +6,24 @@ if [ `id -u` -ne 0 ]; then
+ 	exit 1
+ fi
+ 
+-SF=`which setfiles`
+-if [ $? -eq 1 ]; then
++SF=`which setfiles` || {
+ 	echo "Could not find setfiles"
+ 	echo "Do you have policycoreutils installed?"
+ 	exit 1
+-fi
++}
+ 
+-CP=`which checkpolicy`
+-if [ $? -eq 1 ]; then
++CP=`which checkpolicy` || {
+ 	echo "Could not find checkpolicy"
+ 	echo "Do you have checkpolicy installed?"
+ 	exit 1
+-fi
++}
+ VERS=`$CP -V | awk '{print $1}'`
+ 
+-ENABLED=`which selinuxenabled`
+-if [ $? -eq 1 ]; then
++ENABLED=`which selinuxenabled` || {
+ 	echo "Could not find selinuxenabled"
+ 	echo "Do you have libselinux-utils installed?"
+ 	exit 1
+-fi
++}
+ 
+ if selinuxenabled; then
+     echo "SELinux is already enabled"
+diff --git a/security/smack/smack.h b/security/smack/smack.h
+index dbf8d7226eb56a..1c3656b5e3b91b 100644
+--- a/security/smack/smack.h
++++ b/security/smack/smack.h
+@@ -152,6 +152,7 @@ struct smk_net4addr {
+ 	struct smack_known	*smk_label;	/* label */
+ };
+ 
++#if IS_ENABLED(CONFIG_IPV6)
+ /*
+  * An entry in the table identifying IPv6 hosts.
+  */
+@@ -162,7 +163,9 @@ struct smk_net6addr {
+ 	int			smk_masks;	/* mask size */
+ 	struct smack_known	*smk_label;	/* label */
+ };
++#endif /* CONFIG_IPV6 */
+ 
++#ifdef SMACK_IPV6_PORT_LABELING
+ /*
+  * An entry in the table identifying ports.
+  */
+@@ -175,6 +178,7 @@ struct smk_port_label {
+ 	short			smk_sock_type;	/* Socket type */
+ 	short			smk_can_reuse;
+ };
++#endif /* SMACK_IPV6_PORT_LABELING */
+ 
+ struct smack_known_list_elem {
+ 	struct list_head	list;
+@@ -314,7 +318,9 @@ extern struct smack_known smack_known_web;
+ extern struct mutex	smack_known_lock;
+ extern struct list_head smack_known_list;
+ extern struct list_head smk_net4addr_list;
++#if IS_ENABLED(CONFIG_IPV6)
+ extern struct list_head smk_net6addr_list;
++#endif /* CONFIG_IPV6 */
+ 
+ extern struct mutex     smack_onlycap_lock;
+ extern struct list_head smack_onlycap_list;
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 370fd594da1252..9e13fd39206300 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -2498,6 +2498,7 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip)
+ 	return NULL;
+ }
+ 
++#if IS_ENABLED(CONFIG_IPV6)
+ /*
+  * smk_ipv6_localhost - Check for local ipv6 host address
+  * @sip: the address
+@@ -2565,6 +2566,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
+ 
+ 	return NULL;
+ }
++#endif /* CONFIG_IPV6 */
+ 
+ /**
+  * smack_netlbl_add - Set the secattr on a socket
+@@ -2669,6 +2671,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap)
+ 	return rc;
+ }
+ 
++#if IS_ENABLED(CONFIG_IPV6)
+ /**
+  * smk_ipv6_check - check Smack access
+  * @subject: subject Smack label
+@@ -2701,6 +2704,7 @@ static int smk_ipv6_check(struct smack_known *subject,
+ 	rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc);
+ 	return rc;
+ }
++#endif /* CONFIG_IPV6 */
+ 
+ #ifdef SMACK_IPV6_PORT_LABELING
+ /**
+@@ -3033,7 +3037,9 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
+ 		return 0;
+ 	if (addrlen < offsetofend(struct sockaddr, sa_family))
+ 		return 0;
+-	if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
++
++#if IS_ENABLED(CONFIG_IPV6)
++	if (sap->sa_family == AF_INET6) {
+ 		struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
+ 		struct smack_known *rsp = NULL;
+ 
+@@ -3053,6 +3059,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
+ 
+ 		return rc;
+ 	}
++#endif /* CONFIG_IPV6 */
++
+ 	if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
+ 		return 0;
+ 	rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap);
+@@ -4349,29 +4357,6 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
+ 	return 0;
+ }
+ 
+-/**
+- * smack_sock_graft - Initialize a newly created socket with an existing sock
+- * @sk: child sock
+- * @parent: parent socket
+- *
+- * Set the smk_{in,out} state of an existing sock based on the process that
+- * is creating the new socket.
+- */
+-static void smack_sock_graft(struct sock *sk, struct socket *parent)
+-{
+-	struct socket_smack *ssp;
+-	struct smack_known *skp = smk_of_current();
+-
+-	if (sk == NULL ||
+-	    (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
+-		return;
+-
+-	ssp = smack_sock(sk);
+-	ssp->smk_in = skp;
+-	ssp->smk_out = skp;
+-	/* cssp->smk_packet is already set in smack_inet_csk_clone() */
+-}
+-
+ /**
+  * smack_inet_conn_request - Smack access check on connect
+  * @sk: socket involved
+@@ -5160,7 +5145,6 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
+ 	LSM_HOOK_INIT(sk_free_security, smack_sk_free_security),
+ #endif
+ 	LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security),
+-	LSM_HOOK_INIT(sock_graft, smack_sock_graft),
+ 	LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request),
+ 	LSM_HOOK_INIT(inet_csk_clone, smack_inet_csk_clone),
+ 
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index fbada79380f9ea..d774b9b71ce238 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1515,91 +1515,97 @@ static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *ti
+ 	id->subdevice = timer->tmr_subdevice;
+ }
+ 
+-static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
++static void get_next_device(struct snd_timer_id *id)
+ {
+-	struct snd_timer_id id;
+ 	struct snd_timer *timer;
+ 	struct list_head *p;
+ 
+-	if (copy_from_user(&id, _tid, sizeof(id)))
+-		return -EFAULT;
+-	guard(mutex)(&register_mutex);
+-	if (id.dev_class < 0) {		/* first item */
++	if (id->dev_class < 0) {		/* first item */
+ 		if (list_empty(&snd_timer_list))
+-			snd_timer_user_zero_id(&id);
++			snd_timer_user_zero_id(id);
+ 		else {
+ 			timer = list_entry(snd_timer_list.next,
+ 					   struct snd_timer, device_list);
+-			snd_timer_user_copy_id(&id, timer);
++			snd_timer_user_copy_id(id, timer);
+ 		}
+ 	} else {
+-		switch (id.dev_class) {
++		switch (id->dev_class) {
+ 		case SNDRV_TIMER_CLASS_GLOBAL:
+-			id.device = id.device < 0 ? 0 : id.device + 1;
++			id->device = id->device < 0 ? 0 : id->device + 1;
+ 			list_for_each(p, &snd_timer_list) {
+ 				timer = list_entry(p, struct snd_timer, device_list);
+ 				if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
+-					snd_timer_user_copy_id(&id, timer);
++					snd_timer_user_copy_id(id, timer);
+ 					break;
+ 				}
+-				if (timer->tmr_device >= id.device) {
+-					snd_timer_user_copy_id(&id, timer);
++				if (timer->tmr_device >= id->device) {
++					snd_timer_user_copy_id(id, timer);
+ 					break;
+ 				}
+ 			}
+ 			if (p == &snd_timer_list)
+-				snd_timer_user_zero_id(&id);
++				snd_timer_user_zero_id(id);
+ 			break;
+ 		case SNDRV_TIMER_CLASS_CARD:
+ 		case SNDRV_TIMER_CLASS_PCM:
+-			if (id.card < 0) {
+-				id.card = 0;
++			if (id->card < 0) {
++				id->card = 0;
+ 			} else {
+-				if (id.device < 0) {
+-					id.device = 0;
++				if (id->device < 0) {
++					id->device = 0;
+ 				} else {
+-					if (id.subdevice < 0)
+-						id.subdevice = 0;
+-					else if (id.subdevice < INT_MAX)
+-						id.subdevice++;
++					if (id->subdevice < 0)
++						id->subdevice = 0;
++					else if (id->subdevice < INT_MAX)
++						id->subdevice++;
+ 				}
+ 			}
+ 			list_for_each(p, &snd_timer_list) {
+ 				timer = list_entry(p, struct snd_timer, device_list);
+-				if (timer->tmr_class > id.dev_class) {
+-					snd_timer_user_copy_id(&id, timer);
++				if (timer->tmr_class > id->dev_class) {
++					snd_timer_user_copy_id(id, timer);
+ 					break;
+ 				}
+-				if (timer->tmr_class < id.dev_class)
++				if (timer->tmr_class < id->dev_class)
+ 					continue;
+-				if (timer->card->number > id.card) {
+-					snd_timer_user_copy_id(&id, timer);
++				if (timer->card->number > id->card) {
++					snd_timer_user_copy_id(id, timer);
+ 					break;
+ 				}
+-				if (timer->card->number < id.card)
++				if (timer->card->number < id->card)
+ 					continue;
+-				if (timer->tmr_device > id.device) {
+-					snd_timer_user_copy_id(&id, timer);
++				if (timer->tmr_device > id->device) {
++					snd_timer_user_copy_id(id, timer);
+ 					break;
+ 				}
+-				if (timer->tmr_device < id.device)
++				if (timer->tmr_device < id->device)
+ 					continue;
+-				if (timer->tmr_subdevice > id.subdevice) {
+-					snd_timer_user_copy_id(&id, timer);
++				if (timer->tmr_subdevice > id->subdevice) {
++					snd_timer_user_copy_id(id, timer);
+ 					break;
+ 				}
+-				if (timer->tmr_subdevice < id.subdevice)
++				if (timer->tmr_subdevice < id->subdevice)
+ 					continue;
+-				snd_timer_user_copy_id(&id, timer);
++				snd_timer_user_copy_id(id, timer);
+ 				break;
+ 			}
+ 			if (p == &snd_timer_list)
+-				snd_timer_user_zero_id(&id);
++				snd_timer_user_zero_id(id);
+ 			break;
+ 		default:
+-			snd_timer_user_zero_id(&id);
++			snd_timer_user_zero_id(id);
+ 		}
+ 	}
++}
++
++static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
++{
++	struct snd_timer_id id;
++
++	if (copy_from_user(&id, _tid, sizeof(id)))
++		return -EFAULT;
++	scoped_guard(mutex, &register_mutex)
++		get_next_device(&id);
+ 	if (copy_to_user(_tid, &id, sizeof(*_tid)))
+ 		return -EFAULT;
+ 	return 0;
+@@ -1620,23 +1626,24 @@ static int snd_timer_user_ginfo(struct file *file,
+ 	tid = ginfo->tid;
+ 	memset(ginfo, 0, sizeof(*ginfo));
+ 	ginfo->tid = tid;
+-	guard(mutex)(&register_mutex);
+-	t = snd_timer_find(&tid);
+-	if (!t)
+-		return -ENODEV;
+-	ginfo->card = t->card ? t->card->number : -1;
+-	if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
+-		ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
+-	strscpy(ginfo->id, t->id, sizeof(ginfo->id));
+-	strscpy(ginfo->name, t->name, sizeof(ginfo->name));
+-	scoped_guard(spinlock_irq, &t->lock)
+-		ginfo->resolution = snd_timer_hw_resolution(t);
+-	if (t->hw.resolution_min > 0) {
+-		ginfo->resolution_min = t->hw.resolution_min;
+-		ginfo->resolution_max = t->hw.resolution_max;
+-	}
+-	list_for_each(p, &t->open_list_head) {
+-		ginfo->clients++;
++	scoped_guard(mutex, &register_mutex) {
++		t = snd_timer_find(&tid);
++		if (!t)
++			return -ENODEV;
++		ginfo->card = t->card ? t->card->number : -1;
++		if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
++			ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
++		strscpy(ginfo->id, t->id, sizeof(ginfo->id));
++		strscpy(ginfo->name, t->name, sizeof(ginfo->name));
++		scoped_guard(spinlock_irq, &t->lock)
++			ginfo->resolution = snd_timer_hw_resolution(t);
++		if (t->hw.resolution_min > 0) {
++			ginfo->resolution_min = t->hw.resolution_min;
++			ginfo->resolution_max = t->hw.resolution_max;
++		}
++		list_for_each(p, &t->open_list_head) {
++			ginfo->clients++;
++		}
+ 	}
+ 	if (copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
+ 		return -EFAULT;
+@@ -1674,31 +1681,31 @@ static int snd_timer_user_gstatus(struct file *file,
+ 	struct snd_timer_gstatus gstatus;
+ 	struct snd_timer_id tid;
+ 	struct snd_timer *t;
+-	int err = 0;
+ 
+ 	if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
+ 		return -EFAULT;
+ 	tid = gstatus.tid;
+ 	memset(&gstatus, 0, sizeof(gstatus));
+ 	gstatus.tid = tid;
+-	guard(mutex)(&register_mutex);
+-	t = snd_timer_find(&tid);
+-	if (t != NULL) {
+-		guard(spinlock_irq)(&t->lock);
+-		gstatus.resolution = snd_timer_hw_resolution(t);
+-		if (t->hw.precise_resolution) {
+-			t->hw.precise_resolution(t, &gstatus.resolution_num,
+-						 &gstatus.resolution_den);
++	scoped_guard(mutex, &register_mutex) {
++		t = snd_timer_find(&tid);
++		if (t != NULL) {
++			guard(spinlock_irq)(&t->lock);
++			gstatus.resolution = snd_timer_hw_resolution(t);
++			if (t->hw.precise_resolution) {
++				t->hw.precise_resolution(t, &gstatus.resolution_num,
++							 &gstatus.resolution_den);
++			} else {
++				gstatus.resolution_num = gstatus.resolution;
++				gstatus.resolution_den = 1000000000uL;
++			}
+ 		} else {
+-			gstatus.resolution_num = gstatus.resolution;
+-			gstatus.resolution_den = 1000000000uL;
++			return -ENODEV;
+ 		}
+-	} else {
+-		err = -ENODEV;
+ 	}
+-	if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
+-		err = -EFAULT;
+-	return err;
++	if (copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
++		return -EFAULT;
++	return 0;
+ }
+ 
+ static int snd_timer_user_tselect(struct file *file,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8c7da13a804c04..59e59fdc38f2c4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -586,6 +586,9 @@ static void alc_shutup_pins(struct hda_codec *codec)
+ {
+ 	struct alc_spec *spec = codec->spec;
+ 
++	if (spec->no_shutup_pins)
++		return;
++
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+@@ -601,8 +604,7 @@ static void alc_shutup_pins(struct hda_codec *codec)
+ 		alc_headset_mic_no_shutup(codec);
+ 		break;
+ 	default:
+-		if (!spec->no_shutup_pins)
+-			snd_hda_shutup_pins(codec);
++		snd_hda_shutup_pins(codec);
+ 		break;
+ 	}
+ }
+@@ -4792,6 +4794,21 @@ static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc295_fixup_hp_mute_led_coefbit11(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->mute_led_polarity = 0;
++		spec->mute_led_coef.idx = 0xb;
++		spec->mute_led_coef.mask = 3 << 3;
++		spec->mute_led_coef.on = 1 << 3;
++		spec->mute_led_coef.off = 1 << 4;
++		snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set);
++	}
++}
++
+ static void alc285_fixup_hp_mute_led(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+ {
+@@ -7624,6 +7641,7 @@ enum {
+ 	ALC290_FIXUP_MONO_SPEAKERS_HSJACK,
+ 	ALC290_FIXUP_SUBWOOFER,
+ 	ALC290_FIXUP_SUBWOOFER_HSJACK,
++	ALC295_FIXUP_HP_MUTE_LED_COEFBIT11,
+ 	ALC269_FIXUP_THINKPAD_ACPI,
+ 	ALC269_FIXUP_DMIC_THINKPAD_ACPI,
+ 	ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13,
+@@ -9359,6 +9377,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC283_FIXUP_INT_MIC,
+ 	},
++	[ALC295_FIXUP_HP_MUTE_LED_COEFBIT11] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc295_fixup_hp_mute_led_coefbit11,
++	},
+ 	[ALC298_FIXUP_SAMSUNG_AMP] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc298_fixup_samsung_amp,
+@@ -10394,6 +10416,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11),
+ 	SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+@@ -10618,7 +10641,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
++	SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x1043, 0x1074, "ASUS G614PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x10a4, "ASUS TP3407SA", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+@@ -10626,15 +10651,18 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x1154, "ASUS TP3607SH", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x1043, 0x1194, "ASUS UM3406KA", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1204, "ASUS Strix G615JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x1214, "ASUS Strix G615LH_LM_LP", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x1294, "ASUS B3405CVA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
+ 	SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -10648,6 +10676,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x14f2, "ASUS VivoBook X515JA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10687,6 +10716,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1c63, "ASUS GU605M", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
++	SND_PCI_QUIRK(0x1043, 0x1c80, "ASUS VivoBook TP401", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+@@ -10715,14 +10745,28 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1f1f, "ASUS H7604JI/JV/J3D", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x1f63, "ASUS P5405CSA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
++	SND_PCI_QUIRK(0x1043, 0x1fb3, "ASUS ROG Flow Z13 GZ302EA", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x3011, "ASUS B5605CVA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
++	SND_PCI_QUIRK(0x1043, 0x3061, "ASUS B3405CCA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3071, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x30c1, "ASUS B3605CCA / P3605CCA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x30d1, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x30e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x31d0, "ASUS Zen AIO 27 Z272SD_A272SD", ALC274_FIXUP_ASUS_ZEN_AIO_27),
++	SND_PCI_QUIRK(0x1043, 0x31e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x31f1, "ASUS B3605CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
++	SND_PCI_QUIRK(0x1043, 0x3d78, "ASUS GA603KH", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x3d88, "ASUS GA603KM", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x3e00, "ASUS G814FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x3e20, "ASUS G814PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3e30, "ASUS TP3607SA", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x3ee0, "ASUS Strix G815_JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x3ef0, "ASUS Strix G635LR_LW_LX", ALC287_FIXUP_TAS2781_I2C),
+@@ -10730,6 +10774,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x3f10, "ASUS Strix G835LR_LW_LX", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x3f20, "ASUS Strix G615LR_LW", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x3f30, "ASUS Strix G815LR_LW", ALC287_FIXUP_TAS2781_I2C),
++	SND_PCI_QUIRK(0x1043, 0x3fd0, "ASUS B3605CVA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3ff0, "ASUS B5405CVA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+diff --git a/sound/soc/amd/acp/acp-legacy-common.c b/sound/soc/amd/acp/acp-legacy-common.c
+index be01b178172e86..e4af2640feeb14 100644
+--- a/sound/soc/amd/acp/acp-legacy-common.c
++++ b/sound/soc/amd/acp/acp-legacy-common.c
+@@ -13,6 +13,7 @@
+  */
+ 
+ #include "amd.h"
++#include <linux/acpi.h>
+ #include <linux/pci.h>
+ #include <linux/export.h>
+ 
+@@ -445,7 +446,9 @@ void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip)
+ {
+ 	struct acpi_device *pdm_dev;
+ 	const union acpi_object *obj;
+-	u32 pdm_addr;
++	acpi_handle handle;
++	acpi_integer dmic_status;
++	u32 pdm_addr, ret;
+ 
+ 	switch (chip->acp_rev) {
+ 	case ACP3X_DEV:
+@@ -477,6 +480,11 @@ void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip)
+ 						   obj->integer.value == pdm_addr)
+ 				chip->is_pdm_dev = true;
+ 		}
++
++		handle = ACPI_HANDLE(&pci->dev);
++		ret = acpi_evaluate_integer(handle, "_WOV", NULL, &dmic_status);
++		if (!ACPI_FAILURE(ret))
++			chip->is_pdm_dev = dmic_status;
+ 	}
+ }
+ EXPORT_SYMBOL_NS_GPL(check_acp_config, SND_SOC_ACP_COMMON);
+diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c
+index a6db44520c060b..f9b6bf7bea9c97 100644
+--- a/sound/soc/codecs/cs35l41-spi.c
++++ b/sound/soc/codecs/cs35l41-spi.c
+@@ -32,13 +32,16 @@ static int cs35l41_spi_probe(struct spi_device *spi)
+ 	const struct regmap_config *regmap_config = &cs35l41_regmap_spi;
+ 	struct cs35l41_hw_cfg *hw_cfg = dev_get_platdata(&spi->dev);
+ 	struct cs35l41_private *cs35l41;
++	int ret;
+ 
+ 	cs35l41 = devm_kzalloc(&spi->dev, sizeof(struct cs35l41_private), GFP_KERNEL);
+ 	if (!cs35l41)
+ 		return -ENOMEM;
+ 
+ 	spi->max_speed_hz = CS35L41_SPI_MAX_FREQ;
+-	spi_setup(spi);
++	ret = spi_setup(spi);
++	if (ret < 0)
++		return ret;
+ 
+ 	spi_set_drvdata(spi, cs35l41);
+ 	cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config);
+diff --git a/sound/soc/codecs/rt1320-sdw.c b/sound/soc/codecs/rt1320-sdw.c
+index f4e1ea29c26513..f2d194e76a947f 100644
+--- a/sound/soc/codecs/rt1320-sdw.c
++++ b/sound/soc/codecs/rt1320-sdw.c
+@@ -3705,6 +3705,9 @@ static int rt1320_read_prop(struct sdw_slave *slave)
+ 	/* set the timeout values */
+ 	prop->clk_stop_timeout = 64;
+ 
++	/* BIOS may set wake_capable. Make sure it is 0 as wake events are disabled. */
++	prop->wake_capable = 0;
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
+index 47df14ba52784b..4f0236b34a2d9b 100644
+--- a/sound/soc/codecs/rt5665.c
++++ b/sound/soc/codecs/rt5665.c
+@@ -31,9 +31,7 @@
+ #include "rl6231.h"
+ #include "rt5665.h"
+ 
+-#define RT5665_NUM_SUPPLIES 3
+-
+-static const char *rt5665_supply_names[RT5665_NUM_SUPPLIES] = {
++static const char * const rt5665_supply_names[] = {
+ 	"AVDD",
+ 	"MICVDD",
+ 	"VBAT",
+@@ -46,7 +44,6 @@ struct rt5665_priv {
+ 	struct gpio_desc *gpiod_ldo1_en;
+ 	struct gpio_desc *gpiod_reset;
+ 	struct snd_soc_jack *hs_jack;
+-	struct regulator_bulk_data supplies[RT5665_NUM_SUPPLIES];
+ 	struct delayed_work jack_detect_work;
+ 	struct delayed_work calibrate_work;
+ 	struct delayed_work jd_check_work;
+@@ -4471,8 +4468,6 @@ static void rt5665_remove(struct snd_soc_component *component)
+ 	struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
+ 
+ 	regmap_write(rt5665->regmap, RT5665_RESET, 0);
+-
+-	regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
+ }
+ 
+ #ifdef CONFIG_PM
+@@ -4758,7 +4753,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c)
+ {
+ 	struct rt5665_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ 	struct rt5665_priv *rt5665;
+-	int i, ret;
++	int ret;
+ 	unsigned int val;
+ 
+ 	rt5665 = devm_kzalloc(&i2c->dev, sizeof(struct rt5665_priv),
+@@ -4774,24 +4769,13 @@ static int rt5665_i2c_probe(struct i2c_client *i2c)
+ 	else
+ 		rt5665_parse_dt(rt5665, &i2c->dev);
+ 
+-	for (i = 0; i < ARRAY_SIZE(rt5665->supplies); i++)
+-		rt5665->supplies[i].supply = rt5665_supply_names[i];
+-
+-	ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5665->supplies),
+-				      rt5665->supplies);
++	ret = devm_regulator_bulk_get_enable(&i2c->dev, ARRAY_SIZE(rt5665_supply_names),
++					     rt5665_supply_names);
+ 	if (ret != 0) {
+ 		dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+-	ret = regulator_bulk_enable(ARRAY_SIZE(rt5665->supplies),
+-				    rt5665->supplies);
+-	if (ret != 0) {
+-		dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+-		return ret;
+-	}
+-
+-
+ 	rt5665->gpiod_ldo1_en = devm_gpiod_get_optional(&i2c->dev,
+ 							"realtek,ldo1-en",
+ 							GPIOD_OUT_HIGH);
+diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c
+index 86df5152c547bc..560a2c04b69553 100644
+--- a/sound/soc/codecs/wsa884x.c
++++ b/sound/soc/codecs/wsa884x.c
+@@ -1875,7 +1875,7 @@ static int wsa884x_get_temp(struct wsa884x_priv *wsa884x, long *temp)
+ 		 * Reading temperature is possible only when Power Amplifier is
+ 		 * off. Report last cached data.
+ 		 */
+-		*temp = wsa884x->temperature;
++		*temp = wsa884x->temperature * 1000;
+ 		return 0;
+ 	}
+ 
+@@ -1934,7 +1934,7 @@ static int wsa884x_get_temp(struct wsa884x_priv *wsa884x, long *temp)
+ 	if ((val > WSA884X_LOW_TEMP_THRESHOLD) &&
+ 	    (val < WSA884X_HIGH_TEMP_THRESHOLD)) {
+ 		wsa884x->temperature = val;
+-		*temp = val;
++		*temp = val * 1000;
+ 		ret = 0;
+ 	} else {
+ 		ret = -EAGAIN;
+diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
+index a7215bad648457..93dbe40008c009 100644
+--- a/sound/soc/fsl/imx-card.c
++++ b/sound/soc/fsl/imx-card.c
+@@ -738,6 +738,8 @@ static int imx_card_probe(struct platform_device *pdev)
+ 				data->dapm_routes[i].sink =
+ 					devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s",
+ 						       i + 1, "Playback");
++				if (!data->dapm_routes[i].sink)
++					return -ENOMEM;
+ 				data->dapm_routes[i].source = "CPU-Playback";
+ 			}
+ 		}
+@@ -755,6 +757,8 @@ static int imx_card_probe(struct platform_device *pdev)
+ 				data->dapm_routes[i].source =
+ 					devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s",
+ 						       i + 1, "Capture");
++				if (!data->dapm_routes[i].source)
++					return -ENOMEM;
+ 				data->dapm_routes[i].sink = "CPU-Capture";
+ 			}
+ 		}
+diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c
+index d9d1e021f5b2ee..0f96cc45578d8c 100644
+--- a/sound/soc/ti/j721e-evm.c
++++ b/sound/soc/ti/j721e-evm.c
+@@ -182,6 +182,8 @@ static int j721e_configure_refclk(struct j721e_priv *priv,
+ 		clk_id = J721E_CLK_PARENT_48000;
+ 	else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_44100])
+ 		clk_id = J721E_CLK_PARENT_44100;
++	else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_48000])
++		clk_id = J721E_CLK_PARENT_48000;
+ 	else
+ 		return ret;
+ 
+diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c
+index ab5cdc3337dacb..e91d4c4e1c1621 100644
+--- a/tools/arch/x86/lib/insn.c
++++ b/tools/arch/x86/lib/insn.c
+@@ -13,7 +13,7 @@
+ #endif
+ #include "../include/asm/inat.h" /* __ignore_sync_check__ */
+ #include "../include/asm/insn.h" /* __ignore_sync_check__ */
+-#include "../include/linux/unaligned.h" /* __ignore_sync_check__ */
++#include <linux/unaligned.h> /* __ignore_sync_check__ */
+ 
+ #include <linux/errno.h>
+ #include <linux/kconfig.h>
+diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
+index 777600822d8e45..179f6b31cbd6fa 100644
+--- a/tools/lib/bpf/linker.c
++++ b/tools/lib/bpf/linker.c
+@@ -2007,7 +2007,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
+ 
+ 	obj->sym_map[src_sym_idx] = dst_sym_idx;
+ 
+-	if (sym_type == STT_SECTION && dst_sym) {
++	if (sym_type == STT_SECTION && dst_sec) {
+ 		dst_sec->sec_sym_idx = dst_sym_idx;
+ 		dst_sym->st_value = 0;
+ 	}
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 3c3e5760e81b83..286a2c0af02aa8 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -4153,7 +4153,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
+ 	 * It may also insert a UD2 after calling a __noreturn function.
+ 	 */
+ 	prev_insn = prev_insn_same_sec(file, insn);
+-	if (prev_insn->dead_end &&
++	if (prev_insn && prev_insn->dead_end &&
+ 	    (insn->type == INSN_BUG ||
+ 	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
+ 	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
+@@ -4575,35 +4575,6 @@ static int validate_sls(struct objtool_file *file)
+ 	return warnings;
+ }
+ 
+-static bool ignore_noreturn_call(struct instruction *insn)
+-{
+-	struct symbol *call_dest = insn_call_dest(insn);
+-
+-	/*
+-	 * FIXME: hack, we need a real noreturn solution
+-	 *
+-	 * Problem is, exc_double_fault() may or may not return, depending on
+-	 * whether CONFIG_X86_ESPFIX64 is set.  But objtool has no visibility
+-	 * to the kernel config.
+-	 *
+-	 * Other potential ways to fix it:
+-	 *
+-	 *   - have compiler communicate __noreturn functions somehow
+-	 *   - remove CONFIG_X86_ESPFIX64
+-	 *   - read the .config file
+-	 *   - add a cmdline option
+-	 *   - create a generic objtool annotation format (vs a bunch of custom
+-	 *     formats) and annotate it
+-	 */
+-	if (!strcmp(call_dest->name, "exc_double_fault")) {
+-		/* prevent further unreachable warnings for the caller */
+-		insn->sym->warned = 1;
+-		return true;
+-	}
+-
+-	return false;
+-}
+-
+ static int validate_reachable_instructions(struct objtool_file *file)
+ {
+ 	struct instruction *insn, *prev_insn;
+@@ -4620,7 +4591,7 @@ static int validate_reachable_instructions(struct objtool_file *file)
+ 		prev_insn = prev_insn_same_sec(file, insn);
+ 		if (prev_insn && prev_insn->dead_end) {
+ 			call_dest = insn_call_dest(prev_insn);
+-			if (call_dest && !ignore_noreturn_call(prev_insn)) {
++			if (call_dest) {
+ 				WARN_INSN(insn, "%s() is missing a __noreturn annotation",
+ 					  call_dest->name);
+ 				warnings++;
+@@ -4643,6 +4614,8 @@ static int disas_funcs(const char *funcs)
+ 	char *cmd;
+ 
+ 	cross_compile = getenv("CROSS_COMPILE");
++	if (!cross_compile)
++		cross_compile = "";
+ 
+ 	objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
+ 			"BEGIN { split(_funcs, funcs); }"
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 2ce71d2e5fae05..b102a4c525e4b0 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -513,13 +513,14 @@ ifeq ($(feature-setns), 1)
+   $(call detected,CONFIG_SETNS)
+ endif
+ 
++ifeq ($(feature-reallocarray), 0)
++  CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
++endif
++
+ ifdef CORESIGHT
+   $(call feature_check,libopencsd)
+   ifeq ($(feature-libopencsd), 1)
+     CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
+-    ifeq ($(feature-reallocarray), 0)
+-      CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+-    endif
+     LDFLAGS += $(LIBOPENCSD_LDFLAGS)
+     EXTLIBS += $(OPENCSDLIBS)
+     $(call detected,CONFIG_LIBOPENCSD)
+@@ -1135,9 +1136,6 @@ ifndef NO_AUXTRACE
+   ifndef NO_AUXTRACE
+     $(call detected,CONFIG_AUXTRACE)
+     CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+-    ifeq ($(feature-reallocarray), 0)
+-      CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+-    endif
+   endif
+ endif
+ 
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 9dd2e8d3f3c9b7..8ee59ecb14110f 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -164,7 +164,7 @@ ifneq ($(OUTPUT),)
+ VPATH += $(OUTPUT)
+ export VPATH
+ # create symlink to the original source
+-SOURCE := $(shell ln -sf $(srctree)/tools/perf $(OUTPUT)/source)
++SOURCE := $(shell ln -sfn $(srctree)/tools/perf $(OUTPUT)/source)
+ endif
+ 
+ ifeq ($(V),1)
+diff --git a/tools/perf/bench/syscall.c b/tools/perf/bench/syscall.c
+index ea4dfc07cbd6b8..e7dc216f717f5a 100644
+--- a/tools/perf/bench/syscall.c
++++ b/tools/perf/bench/syscall.c
+@@ -22,8 +22,7 @@
+ #define __NR_fork -1
+ #endif
+ 
+-#define LOOPS_DEFAULT 10000000
+-static	int loops = LOOPS_DEFAULT;
++static	int loops;
+ 
+ static const struct option options[] = {
+ 	OPT_INTEGER('l', "loop",	&loops,		"Specify number of loops"),
+@@ -80,6 +79,18 @@ static int bench_syscall_common(int argc, const char **argv, int syscall)
+ 	const char *name = NULL;
+ 	int i;
+ 
++	switch (syscall) {
++	case __NR_fork:
++	case __NR_execve:
++		/* Limit default loop to 10000 times to save time */
++		loops = 10000;
++		break;
++	default:
++		loops = 10000000;
++		break;
++	}
++
++	/* Options -l and --loops override default above */
+ 	argc = parse_options(argc, argv, options, bench_syscall_usage, 0);
+ 
+ 	gettimeofday(&start, NULL);
+@@ -94,16 +105,9 @@ static int bench_syscall_common(int argc, const char **argv, int syscall)
+ 			break;
+ 		case __NR_fork:
+ 			test_fork();
+-			/* Only loop 10000 times to save time */
+-			if (i == 10000)
+-				loops = 10000;
+ 			break;
+ 		case __NR_execve:
+ 			test_execve();
+-			/* Only loop 10000 times to save time */
+-			if (i == 10000)
+-				loops = 10000;
+-			break;
+ 		default:
+ 			break;
+ 		}
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index 645deec294c842..8700c39680662a 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -1551,12 +1551,12 @@ int cmd_report(int argc, const char **argv)
+ 			input_name = "perf.data";
+ 	}
+ 
++repeat:
+ 	data.path  = input_name;
+ 	data.force = symbol_conf.force;
+ 
+ 	symbol_conf.skip_empty = report.skip_empty;
+ 
+-repeat:
+ 	perf_tool__init(&report.tool, ordered_events);
+ 	report.tool.sample		 = process_sample_event;
+ 	report.tool.mmap		 = perf_event__process_mmap;
+diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
+index c5d1d22bd034b1..5228f94a793f95 100644
+--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
++++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
+@@ -229,19 +229,19 @@
+     },
+     {
+         "MetricName": "slots_lost_misspeculation_fraction",
+-        "MetricExpr": "(OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)",
++        "MetricExpr": "100 * (OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)",
+         "BriefDescription": "Fraction of slots lost due to misspeculation",
+         "DefaultMetricgroupName": "TopdownL1",
+         "MetricGroup": "Default;TopdownL1",
+-        "ScaleUnit": "100percent of slots"
++        "ScaleUnit": "1percent of slots"
+     },
+     {
+         "MetricName": "retired_fraction",
+-        "MetricExpr": "OP_RETIRED / (CPU_CYCLES * #slots)",
++        "MetricExpr": "100 * OP_RETIRED / (CPU_CYCLES * #slots)",
+         "BriefDescription": "Fraction of slots retiring, useful work",
+         "DefaultMetricgroupName": "TopdownL1",
+         "MetricGroup": "Default;TopdownL1",
+-        "ScaleUnit": "100percent of slots"
++        "ScaleUnit": "1percent of slots"
+     },
+     {
+         "MetricName": "backend_core",
+@@ -266,7 +266,7 @@
+     },
+     {
+         "MetricName": "frontend_bandwidth",
+-        "MetricExpr": "frontend_bound - frontend_latency",
++        "MetricExpr": "frontend_bound - 100 * frontend_latency",
+         "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)",
+         "MetricGroup": "TopdownL2",
+         "ScaleUnit": "1percent of slots"
+diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
+index 75cf084a927d3d..5777600467723f 100644
+--- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
++++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
+@@ -26,3 +26,5 @@ skip:
+ 	mov	x0, #0
+ 	mov	x8, #93 // __NR_exit syscall
+ 	svc	#0
++
++.section .note.GNU-stack, "", @progbits
+diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
+index 1b58ccc1fd882d..4d6c3c1b7fb925 100755
+--- a/tools/perf/tests/shell/record_bpf_filter.sh
++++ b/tools/perf/tests/shell/record_bpf_filter.sh
+@@ -89,7 +89,7 @@ test_bpf_filter_fail() {
+ test_bpf_filter_group() {
+   echo "Group bpf-filter test"
+ 
+-  if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \
++  if ! perf record -e task-clock --filter 'period > 1000, ip > 0' \
+ 	  -o /dev/null true 2>/dev/null
+   then
+     echo "Group bpf-filter test [Failed should succeed]"
+@@ -97,7 +97,7 @@ test_bpf_filter_group() {
+     return
+   fi
+ 
+-  if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \
++  if ! perf record -e task-clock --filter 'period > 1000 , cpu > 0 || ip > 0' \
+ 	  -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU
+   then
+     echo "Group bpf-filter test [Failed forbidden CPU]"
+diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
+index 138ffc71b32dd7..2c06f2a85400e1 100644
+--- a/tools/perf/util/arm-spe.c
++++ b/tools/perf/util/arm-spe.c
+@@ -37,6 +37,8 @@
+ #include "../../arch/arm64/include/asm/cputype.h"
+ #define MAX_TIMESTAMP (~0ULL)
+ 
++#define is_ldst_op(op)		(!!((op) & ARM_SPE_OP_LDST))
++
+ struct arm_spe {
+ 	struct auxtrace			auxtrace;
+ 	struct auxtrace_queues		queues;
+@@ -520,6 +522,10 @@ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 m
+ 	union perf_mem_data_src	data_src = { .mem_op = PERF_MEM_OP_NA };
+ 	bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
+ 
++	/* Only synthesize data source for LDST operations */
++	if (!is_ldst_op(record->op))
++		return 0;
++
+ 	if (record->op & ARM_SPE_OP_LD)
+ 		data_src.mem_op = PERF_MEM_OP_LOAD;
+ 	else if (record->op & ARM_SPE_OP_ST)
+@@ -619,7 +625,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
+ 	 * When data_src is zero it means the record is not a memory operation,
+ 	 * skip to synthesize memory sample for this case.
+ 	 */
+-	if (spe->sample_memory && data_src) {
++	if (spe->sample_memory && is_ldst_op(record->op)) {
+ 		err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
+ 		if (err)
+ 			return err;
+diff --git a/tools/perf/util/bpf-filter.l b/tools/perf/util/bpf-filter.l
+index f313404f95a90d..6aa65ade33851b 100644
+--- a/tools/perf/util/bpf-filter.l
++++ b/tools/perf/util/bpf-filter.l
+@@ -76,7 +76,7 @@ static int path_or_error(void)
+ num_dec		[0-9]+
+ num_hex		0[Xx][0-9a-fA-F]+
+ space		[ \t]+
+-path		[^ \t\n]+
++path		[^ \t\n,]+
+ ident		[_a-zA-Z][_a-zA-Z0-9]+
+ 
+ %%
+diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
+index 49b79cf0c5cc51..8aa456d7c2cd2d 100644
+--- a/tools/perf/util/comm.c
++++ b/tools/perf/util/comm.c
+@@ -5,6 +5,8 @@
+ #include <internal/rc_check.h>
+ #include <linux/refcount.h>
+ #include <linux/zalloc.h>
++#include <tools/libc_compat.h> // reallocarray
++
+ #include "rwsem.h"
+ 
+ DECLARE_RC_STRUCT(comm_str) {
+diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
+index d633d15329fa09..e56330c85fe7e1 100644
+--- a/tools/perf/util/debug.c
++++ b/tools/perf/util/debug.c
+@@ -46,8 +46,8 @@ int debug_type_profile;
+ FILE *debug_file(void)
+ {
+ 	if (!_debug_file) {
+-		pr_warning_once("debug_file not set");
+ 		debug_set_file(stderr);
++		pr_warning_once("debug_file not set");
+ 	}
+ 	return _debug_file;
+ }
+diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
+index bb8e8f444054d8..c0472a41147c3c 100644
+--- a/tools/perf/util/dso.h
++++ b/tools/perf/util/dso.h
+@@ -808,7 +808,9 @@ static inline bool dso__is_kcore(const struct dso *dso)
+ 
+ static inline bool dso__is_kallsyms(const struct dso *dso)
+ {
+-	return RC_CHK_ACCESS(dso)->kernel && RC_CHK_ACCESS(dso)->long_name[0] != '/';
++	enum dso_binary_type bt = dso__binary_type(dso);
++
++	return bt == DSO_BINARY_TYPE__KALLSYMS || bt == DSO_BINARY_TYPE__GUEST_KALLSYMS;
+ }
+ 
+ bool dso__is_object_file(const struct dso *dso);
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index a9df84692d4a88..dac87dccaaaa5d 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -1434,19 +1434,18 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
+ 	 */
+ 	cpus = perf_cpu_map__new_online_cpus();
+ 	if (!cpus)
+-		goto out;
++		return -ENOMEM;
+ 
+ 	threads = perf_thread_map__new_dummy();
+-	if (!threads)
+-		goto out_put;
++	if (!threads) {
++		perf_cpu_map__put(cpus);
++		return -ENOMEM;
++	}
+ 
+ 	perf_evlist__set_maps(&evlist->core, cpus, threads);
+-
+ 	perf_thread_map__put(threads);
+-out_put:
+ 	perf_cpu_map__put(cpus);
+-out:
+-	return -ENOMEM;
++	return 0;
+ }
+ 
+ int evlist__open(struct evlist *evlist)
+diff --git a/tools/perf/util/intel-tpebs.c b/tools/perf/util/intel-tpebs.c
+index 50a3c3e0716065..2c421b475b3b8b 100644
+--- a/tools/perf/util/intel-tpebs.c
++++ b/tools/perf/util/intel-tpebs.c
+@@ -254,7 +254,7 @@ int tpebs_start(struct evlist *evsel_list)
+ 		new = zalloc(sizeof(*new));
+ 		if (!new) {
+ 			ret = -1;
+-			zfree(name);
++			zfree(&name);
+ 			goto err;
+ 		}
+ 		new->name = name;
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index ed893c3c6ad938..8b4e346808b4c2 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -593,7 +593,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
+ 			};
+ 			if (pmu_events_table__find_event(pmu->events_table, pmu, name,
+ 							 update_alias, &data) == 0)
+-				pmu->cpu_json_aliases++;
++				pmu->cpu_common_json_aliases++;
+ 		}
+ 		pmu->sysfs_aliases++;
+ 		break;
+@@ -1807,9 +1807,10 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu)
+ 	if (pmu->cpu_aliases_added)
+ 		 nr += pmu->cpu_json_aliases;
+ 	else if (pmu->events_table)
+-		nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases;
++		nr += pmu_events_table__num_events(pmu->events_table, pmu) -
++			pmu->cpu_common_json_aliases;
+ 	else
+-		assert(pmu->cpu_json_aliases == 0);
++		assert(pmu->cpu_json_aliases == 0 && pmu->cpu_common_json_aliases == 0);
+ 
+ 	return pmu->selectable ? nr + 1 : nr;
+ }
+diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
+index 4397c48ad569a3..bcd278b9b546fb 100644
+--- a/tools/perf/util/pmu.h
++++ b/tools/perf/util/pmu.h
+@@ -131,6 +131,11 @@ struct perf_pmu {
+ 	uint32_t cpu_json_aliases;
+ 	/** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */
+ 	uint32_t sys_json_aliases;
++	/**
++	 * @cpu_common_json_aliases: Number of json events that overlapped with sysfs when
++	 * loading all sysfs events.
++	 */
++	uint32_t cpu_common_json_aliases;
+ 	/** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */
+ 	bool sysfs_aliases_loaded;
+ 	/**
+diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
+index d7d67e09d759bb..362596ed272945 100644
+--- a/tools/perf/util/pmus.c
++++ b/tools/perf/util/pmus.c
+@@ -701,11 +701,25 @@ char *perf_pmus__default_pmu_name(void)
+ struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
+ {
+ 	struct perf_pmu *pmu = evsel->pmu;
++	bool legacy_core_type;
+ 
+-	if (!pmu) {
+-		pmu = perf_pmus__find_by_type(evsel->core.attr.type);
+-		((struct evsel *)evsel)->pmu = pmu;
++	if (pmu)
++		return pmu;
++
++	pmu = perf_pmus__find_by_type(evsel->core.attr.type);
++	legacy_core_type =
++		evsel->core.attr.type == PERF_TYPE_HARDWARE ||
++		evsel->core.attr.type == PERF_TYPE_HW_CACHE;
++	if (!pmu && legacy_core_type) {
++		if (perf_pmus__supports_extended_type()) {
++			u32 type = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
++
++			pmu = perf_pmus__find_by_type(type);
++		} else {
++			pmu = perf_pmus__find_core_pmu();
++		}
+ 	}
++	((struct evsel *)evsel)->pmu = pmu;
+ 	return pmu;
+ }
+ 
+diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
+index ee3d43a7ba4570..e7f36ea9e2fa12 100644
+--- a/tools/perf/util/python.c
++++ b/tools/perf/util/python.c
+@@ -79,7 +79,7 @@ struct pyrf_event {
+ };
+ 
+ #define sample_members \
+-	sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"),			 \
++	sample_member_def(sample_ip, ip, T_ULONGLONG, "event ip"),			 \
+ 	sample_member_def(sample_pid, pid, T_INT, "event pid"),			 \
+ 	sample_member_def(sample_tid, tid, T_INT, "event tid"),			 \
+ 	sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),		 \
+@@ -512,6 +512,11 @@ static PyObject *pyrf_event__new(union perf_event *event)
+ 	      event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
+ 		return NULL;
+ 
++	// FIXME this better be dynamic or we need to parse everything
++	// before calling perf_mmap__consume(), including tracepoint fields.
++	if (sizeof(pevent->event) < event->header.size)
++		return NULL;
++
+ 	ptype = pyrf_event__type[event->header.type];
+ 	pevent = PyObject_New(struct pyrf_event, ptype);
+ 	if (pevent != NULL)
+@@ -1011,20 +1016,22 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
+ 
+ 		evsel = evlist__event2evsel(evlist, event);
+ 		if (!evsel) {
++			Py_DECREF(pyevent);
+ 			Py_INCREF(Py_None);
+ 			return Py_None;
+ 		}
+ 
+ 		pevent->evsel = evsel;
+ 
+-		err = evsel__parse_sample(evsel, event, &pevent->sample);
+-
+-		/* Consume the even only after we parsed it out. */
+ 		perf_mmap__consume(&md->core);
+ 
+-		if (err)
++		err = evsel__parse_sample(evsel, &pevent->event, &pevent->sample);
++		if (err) {
++			Py_DECREF(pyevent);
+ 			return PyErr_Format(PyExc_OSError,
+ 					    "perf: can't parse sample, err=%d", err);
++		}
++
+ 		return pyevent;
+ 	}
+ end:
+diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
+index 99376c12dd8ec9..7c49997fab3a3a 100644
+--- a/tools/perf/util/stat-shadow.c
++++ b/tools/perf/util/stat-shadow.c
+@@ -154,6 +154,7 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type
+ {
+ 	const struct evsel *cur;
+ 	int evsel_ctx = evsel_context(evsel);
++	struct perf_pmu *evsel_pmu = evsel__find_pmu(evsel);
+ 
+ 	evlist__for_each_entry(evsel->evlist, cur) {
+ 		struct perf_stat_aggr *aggr;
+@@ -180,7 +181,7 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type
+ 		 * Except the SW CLOCK events,
+ 		 * ignore if not the PMU we're looking for.
+ 		 */
+-		if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu))
++		if ((type != STAT_NSECS) && (evsel_pmu != evsel__find_pmu(cur)))
+ 			continue;
+ 
+ 		aggr = &cur->stats->aggr[aggr_idx];
+diff --git a/tools/perf/util/units.c b/tools/perf/util/units.c
+index 32c39cfe209b3b..4c6a86e1cb54b2 100644
+--- a/tools/perf/util/units.c
++++ b/tools/perf/util/units.c
+@@ -64,7 +64,7 @@ unsigned long convert_unit(unsigned long value, char *unit)
+ 
+ int unit_number__scnprintf(char *buf, size_t size, u64 n)
+ {
+-	char unit[4] = "BKMG";
++	char unit[] = "BKMG";
+ 	int i = 0;
+ 
+ 	while (((n / 1024) > 1) && (i < 3)) {
+diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
+index 56c7ff6efcdabc..a3cf1d17163ae7 100644
+--- a/tools/power/x86/turbostat/turbostat.8
++++ b/tools/power/x86/turbostat/turbostat.8
+@@ -168,6 +168,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
+ .PP
+ \fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+ .PP
++\fBCoreThr\fP Core Thermal Throttling events during the measurement interval.  Note that events since boot can be find in /sys/devices/system/cpu/cpu*/thermal_throttle/*
++.PP
+ \fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms or /sys/class/drm/card0/gt/gt0/rc6_residency_ms or /sys/class/drm/card0/device/tile0/gtN/gtidle/idle_residency_ms depending on the graphics driver being used.
+ .PP
+ \fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz or /sys/class/drm/card0/gt_cur_freq_mhz or /sys/class/drm/card0/gt/gt0/rps_cur_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/cur_freq depending on the graphics driver being used.
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 235e82fe7d0a56..77ef60980ee581 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -3242,7 +3242,7 @@ void delta_core(struct core_data *new, struct core_data *old)
+ 	old->c6 = new->c6 - old->c6;
+ 	old->c7 = new->c7 - old->c7;
+ 	old->core_temp_c = new->core_temp_c;
+-	old->core_throt_cnt = new->core_throt_cnt;
++	old->core_throt_cnt = new->core_throt_cnt - old->core_throt_cnt;
+ 	old->mc6_us = new->mc6_us - old->mc6_us;
+ 
+ 	DELTA_WRAP32(new->core_energy.raw_value, old->core_energy.raw_value);
+diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+index cc184e4420f6e3..67557cda220835 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
++++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+@@ -6,6 +6,10 @@
+ #include <test_progs.h>
+ #include "bloom_filter_map.skel.h"
+ 
++#ifndef NUMA_NO_NODE
++#define NUMA_NO_NODE	(-1)
++#endif
++
+ static void test_fail_cases(void)
+ {
+ 	LIBBPF_OPTS(bpf_map_create_opts, opts);
+@@ -69,6 +73,7 @@ static void test_success_cases(void)
+ 
+ 	/* Create a map */
+ 	opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE;
++	opts.numa_node = NUMA_NO_NODE;
+ 	fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts);
+ 	if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case"))
+ 		return;
+diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+index 40f22454cf05b0..1f0977742741f3 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
++++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+@@ -1599,6 +1599,7 @@ static void test_tailcall_bpf2bpf_freplace(void)
+ 		goto out;
+ 
+ 	err = bpf_link__destroy(freplace_link);
++	freplace_link = NULL;
+ 	if (!ASSERT_OK(err, "destroy link"))
+ 		goto out;
+ 
+diff --git a/tools/testing/selftests/bpf/progs/strncmp_bench.c b/tools/testing/selftests/bpf/progs/strncmp_bench.c
+index 18373a7df76e6c..f47bf88f8d2a73 100644
+--- a/tools/testing/selftests/bpf/progs/strncmp_bench.c
++++ b/tools/testing/selftests/bpf/progs/strncmp_bench.c
+@@ -35,7 +35,10 @@ static __always_inline int local_strncmp(const char *s1, unsigned int sz,
+ SEC("tp/syscalls/sys_enter_getpgid")
+ int strncmp_no_helper(void *ctx)
+ {
+-	if (local_strncmp(str, cmp_str_len + 1, target) < 0)
++	const char *target_str = target;
++
++	barrier_var(target_str);
++	if (local_strncmp(str, cmp_str_len + 1, target_str) < 0)
+ 		__sync_add_and_fetch(&hits, 1);
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
+index 1238e1c5aae150..d87c5b1763ff15 100644
+--- a/tools/testing/selftests/mm/cow.c
++++ b/tools/testing/selftests/mm/cow.c
+@@ -876,7 +876,7 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize)
+ 		mremap_size = thpsize / 2;
+ 		mremap_mem = mmap(NULL, mremap_size, PROT_NONE,
+ 				  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+-		if (mem == MAP_FAILED) {
++		if (mremap_mem == MAP_FAILED) {
+ 			ksft_test_result_fail("mmap() failed\n");
+ 			goto munmap;
+ 		}
+diff --git a/tools/testing/selftests/net/netfilter/br_netfilter.sh b/tools/testing/selftests/net/netfilter/br_netfilter.sh
+index c28379a965d838..1559ba275105ed 100755
+--- a/tools/testing/selftests/net/netfilter/br_netfilter.sh
++++ b/tools/testing/selftests/net/netfilter/br_netfilter.sh
+@@ -13,6 +13,12 @@ source lib.sh
+ 
+ checktool "nft --version" "run test without nft tool"
+ 
++read t < /proc/sys/kernel/tainted
++if [ "$t" -ne 0 ];then
++	echo SKIP: kernel is tainted
++	exit $ksft_skip
++fi
++
+ cleanup() {
+ 	cleanup_all_ns
+ }
+@@ -165,6 +171,7 @@ if [ "$t" -eq 0 ];then
+ 	echo PASS: kernel not tainted
+ else
+ 	echo ERROR: kernel is tainted
++	dmesg
+ 	ret=1
+ fi
+ 
+diff --git a/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh b/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh
+index 6a764d70ab06f9..4788641717d935 100755
+--- a/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh
++++ b/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh
+@@ -4,6 +4,12 @@ source lib.sh
+ 
+ checktool "nft --version" "run test without nft tool"
+ 
++read t < /proc/sys/kernel/tainted
++if [ "$t" -ne 0 ];then
++	echo SKIP: kernel is tainted
++	exit $ksft_skip
++fi
++
+ cleanup() {
+ 	cleanup_all_ns
+ }
+@@ -72,6 +78,7 @@ if [ "$t" -eq 0 ];then
+ 	echo PASS: kernel not tainted
+ else
+ 	echo ERROR: kernel is tainted
++	dmesg
+ 	exit 1
+ fi
+ 
+diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh
+index a9d109fcc15c25..00fe1a6c1f30c4 100755
+--- a/tools/testing/selftests/net/netfilter/nft_queue.sh
++++ b/tools/testing/selftests/net/netfilter/nft_queue.sh
+@@ -593,6 +593,7 @@ EOF
+ 		echo "PASS: queue program exiting while packets queued"
+ 	else
+ 		echo "TAINT: queue program exiting while packets queued"
++		dmesg
+ 		ret=1
+ 	fi
+ }


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-04-07 10:30 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-04-07 10:30 UTC (permalink / raw
  To: gentoo-commits

commit:     557a0c081003662fedb1e21ff88bad89f2a69a13
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Apr  7 10:30:01 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Apr  7 10:30:01 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=557a0c08

Linux patch 6.12.22

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |   4 +
 1021_linux-6.12.22.patch | 916 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 920 insertions(+)

diff --git a/0000_README b/0000_README
index 6196ef75..696eb7c9 100644
--- a/0000_README
+++ b/0000_README
@@ -127,6 +127,10 @@ Patch:  1020_linux-6.12.21.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.21
 
+Patch:  1021_linux-6.12.22.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.22
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1021_linux-6.12.22.patch b/1021_linux-6.12.22.patch
new file mode 100644
index 00000000..dd1e3934
--- /dev/null
+++ b/1021_linux-6.12.22.patch
@@ -0,0 +1,916 @@
+diff --git a/Makefile b/Makefile
+index a646151342b832..f380005d1600ad 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 21
++SUBLEVEL = 22
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
+index b3e615cbd2caa6..461f57f66631c3 100644
+--- a/drivers/counter/microchip-tcb-capture.c
++++ b/drivers/counter/microchip-tcb-capture.c
+@@ -368,6 +368,25 @@ static int mchp_tc_probe(struct platform_device *pdev)
+ 			channel);
+ 	}
+ 
++	/* Disable Quadrature Decoder and position measure */
++	ret = regmap_update_bits(regmap, ATMEL_TC_BMR, ATMEL_TC_QDEN | ATMEL_TC_POSEN, 0);
++	if (ret)
++		return ret;
++
++	/* Setup the period capture mode */
++	ret = regmap_update_bits(regmap, ATMEL_TC_REG(priv->channel[0], CMR),
++				 ATMEL_TC_WAVE | ATMEL_TC_ABETRG | ATMEL_TC_CMR_MASK |
++				 ATMEL_TC_TCCLKS,
++				 ATMEL_TC_CMR_MASK);
++	if (ret)
++		return ret;
++
++	/* Enable clock and trigger counter */
++	ret = regmap_write(regmap, ATMEL_TC_REG(priv->channel[0], CCR),
++			   ATMEL_TC_CLKEN | ATMEL_TC_SWTRG);
++	if (ret)
++		return ret;
++
+ 	priv->tc_cfg = tcb_config;
+ 	priv->regmap = regmap;
+ 	counter->name = dev_name(&pdev->dev);
+diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
+index 8439755559b219..537fe9b669f352 100644
+--- a/drivers/counter/stm32-lptimer-cnt.c
++++ b/drivers/counter/stm32-lptimer-cnt.c
+@@ -58,37 +58,43 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
+ 		return 0;
+ 	}
+ 
++	ret = clk_enable(priv->clk);
++	if (ret)
++		goto disable_cnt;
++
+ 	/* LP timer must be enabled before writing CMP & ARR */
+ 	ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling);
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	/* ensure CMP & ARR registers are properly written */
+ 	ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+ 				       (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
+ 				       100, 1000);
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
+ 			   STM32_LPTIM_CMPOKCF_ARROKCF);
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+-	ret = clk_enable(priv->clk);
+-	if (ret) {
+-		regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+-		return ret;
+-	}
+ 	priv->enabled = true;
+ 
+ 	/* Start LP timer in continuous mode */
+ 	return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
+ 				  STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
++
++disable_clk:
++	clk_disable(priv->clk);
++disable_cnt:
++	regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
++
++	return ret;
+ }
+ 
+ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index d9a3917d207e93..c4c6538eabae6d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3231,8 +3231,7 @@ static int dm_resume(void *handle)
+ 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
+ 	struct dc_state *dc_state;
+-	int i, r, j, ret;
+-	bool need_hotplug = false;
++	int i, r, j;
+ 	struct dc_commit_streams_params commit_params = {};
+ 
+ 	if (dm->dc->caps.ips_support) {
+@@ -3427,23 +3426,16 @@ static int dm_resume(void *handle)
+ 		    aconnector->mst_root)
+ 			continue;
+ 
+-		ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
+-
+-		if (ret < 0) {
+-			dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+-					aconnector->dc_link);
+-			need_hotplug = true;
+-		}
++		drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
+ 	}
+ 	drm_connector_list_iter_end(&iter);
+ 
+-	if (need_hotplug)
+-		drm_kms_helper_hotplug_event(ddev);
+-
+ 	amdgpu_dm_irq_resume_late(adev);
+ 
+ 	amdgpu_dm_smu_write_watermarks_table(adev);
+ 
++	drm_kms_helper_hotplug_event(ddev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
+index 25cfd964dc25d9..acb9eb18f7ccfe 100644
+--- a/drivers/hid/hid-plantronics.c
++++ b/drivers/hid/hid-plantronics.c
+@@ -6,9 +6,6 @@
+  *  Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
+  */
+ 
+-/*
+- */
+-
+ #include "hid-ids.h"
+ 
+ #include <linux/hid.h>
+@@ -23,30 +20,28 @@
+ 
+ #define PLT_VOL_UP		0x00b1
+ #define PLT_VOL_DOWN		0x00b2
++#define PLT_MIC_MUTE		0x00b5
+ 
+ #define PLT1_VOL_UP		(PLT_HID_1_0_PAGE | PLT_VOL_UP)
+ #define PLT1_VOL_DOWN		(PLT_HID_1_0_PAGE | PLT_VOL_DOWN)
++#define PLT1_MIC_MUTE		(PLT_HID_1_0_PAGE | PLT_MIC_MUTE)
+ #define PLT2_VOL_UP		(PLT_HID_2_0_PAGE | PLT_VOL_UP)
+ #define PLT2_VOL_DOWN		(PLT_HID_2_0_PAGE | PLT_VOL_DOWN)
++#define PLT2_MIC_MUTE		(PLT_HID_2_0_PAGE | PLT_MIC_MUTE)
++#define HID_TELEPHONY_MUTE	(HID_UP_TELEPHONY | 0x2f)
++#define HID_CONSUMER_MUTE	(HID_UP_CONSUMER | 0xe2)
+ 
+ #define PLT_DA60		0xda60
+ #define PLT_BT300_MIN		0x0413
+ #define PLT_BT300_MAX		0x0418
+ 
+-
+-#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
+-			    (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
+-
+-#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
+-#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1)
+-
+ #define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
+-#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */
+ 
+ struct plt_drv_data {
+ 	unsigned long device_type;
+-	unsigned long last_volume_key_ts;
+-	u32 quirks;
++	unsigned long last_key_ts;
++	unsigned long double_key_to;
++	__u16 last_key;
+ };
+ 
+ static int plantronics_input_mapping(struct hid_device *hdev,
+@@ -58,34 +53,43 @@ static int plantronics_input_mapping(struct hid_device *hdev,
+ 	unsigned short mapped_key;
+ 	struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+ 	unsigned long plt_type = drv_data->device_type;
++	int allow_mute = usage->hid == HID_TELEPHONY_MUTE;
++	int allow_consumer = field->application == HID_CP_CONSUMERCONTROL &&
++			(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER &&
++			usage->hid != HID_CONSUMER_MUTE;
+ 
+ 	/* special case for PTT products */
+ 	if (field->application == HID_GD_JOYSTICK)
+ 		goto defaulted;
+ 
+-	/* handle volume up/down mapping */
+ 	/* non-standard types or multi-HID interfaces - plt_type is PID */
+ 	if (!(plt_type & HID_USAGE_PAGE)) {
+ 		switch (plt_type) {
+ 		case PLT_DA60:
+-			if (PLT_ALLOW_CONSUMER)
++			if (allow_consumer)
+ 				goto defaulted;
+-			goto ignored;
++			if (usage->hid == HID_CONSUMER_MUTE) {
++				mapped_key = KEY_MICMUTE;
++				goto mapped;
++			}
++			break;
+ 		default:
+-			if (PLT_ALLOW_CONSUMER)
++			if (allow_consumer || allow_mute)
+ 				goto defaulted;
+ 		}
++		goto ignored;
+ 	}
+-	/* handle standard types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
+-	/* 'basic telephony compliant' - allow default consumer page map */
+-	else if ((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
+-		 (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) {
+-		if (PLT_ALLOW_CONSUMER)
+-			goto defaulted;
+-	}
+-	/* not 'basic telephony' - apply legacy mapping */
+-	/* only map if the field is in the device's primary vendor page */
+-	else if (!((field->application ^ plt_type) & HID_USAGE_PAGE)) {
++
++	/* handle standard consumer control mapping */
++	/* and standard telephony mic mute mapping */
++	if (allow_consumer || allow_mute)
++		goto defaulted;
++
++	/* handle vendor unique types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
++	/* if not 'basic telephony compliant' - map vendor unique controls */
++	if (!((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
++	      (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) &&
++	      !((field->application ^ plt_type) & HID_USAGE_PAGE))
+ 		switch (usage->hid) {
+ 		case PLT1_VOL_UP:
+ 		case PLT2_VOL_UP:
+@@ -95,8 +99,11 @@ static int plantronics_input_mapping(struct hid_device *hdev,
+ 		case PLT2_VOL_DOWN:
+ 			mapped_key = KEY_VOLUMEDOWN;
+ 			goto mapped;
++		case PLT1_MIC_MUTE:
++		case PLT2_MIC_MUTE:
++			mapped_key = KEY_MICMUTE;
++			goto mapped;
+ 		}
+-	}
+ 
+ /*
+  * Future mapping of call control or other usages,
+@@ -105,6 +112,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
+  */
+ 
+ ignored:
++	hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n",
++		usage->hid, field->application);
+ 	return -1;
+ 
+ defaulted:
+@@ -123,38 +132,26 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
+ 			     struct hid_usage *usage, __s32 value)
+ {
+ 	struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
++	unsigned long prev_tsto, cur_ts;
++	__u16 prev_key, cur_key;
+ 
+-	if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
+-		unsigned long prev_ts, cur_ts;
++	/* Usages are filtered in plantronics_usages. */
+ 
+-		/* Usages are filtered in plantronics_usages. */
++	/* HZ too low for ms resolution - double key detection disabled */
++	/* or it is a key release - handle key presses only. */
++	if (!drv_data->double_key_to || !value)
++		return 0;
+ 
+-		if (!value) /* Handle key presses only. */
+-			return 0;
++	prev_tsto = drv_data->last_key_ts + drv_data->double_key_to;
++	cur_ts = drv_data->last_key_ts = jiffies;
++	prev_key = drv_data->last_key;
++	cur_key = drv_data->last_key = usage->code;
+ 
+-		prev_ts = drv_data->last_volume_key_ts;
+-		cur_ts = jiffies;
+-		if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
+-			return 1; /* Ignore the repeated key. */
+-
+-		drv_data->last_volume_key_ts = cur_ts;
++	/* If the same key occurs in <= double_key_to -- ignore it */
++	if (prev_key == cur_key && time_before_eq(cur_ts, prev_tsto)) {
++		hid_dbg(hdev, "double key %d ignored\n", cur_key);
++		return 1; /* Ignore the repeated key. */
+ 	}
+-	if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) {
+-		unsigned long prev_ts, cur_ts;
+-
+-		/* Usages are filtered in plantronics_usages. */
+-
+-		if (!value) /* Handle key presses only. */
+-			return 0;
+-
+-		prev_ts = drv_data->last_volume_key_ts;
+-		cur_ts = jiffies;
+-		if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT)
+-			return 1; /* Ignore the followed opposite volume key. */
+-
+-		drv_data->last_volume_key_ts = cur_ts;
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -196,12 +193,16 @@ static int plantronics_probe(struct hid_device *hdev,
+ 	ret = hid_parse(hdev);
+ 	if (ret) {
+ 		hid_err(hdev, "parse failed\n");
+-		goto err;
++		return ret;
+ 	}
+ 
+ 	drv_data->device_type = plantronics_device_type(hdev);
+-	drv_data->quirks = id->driver_data;
+-	drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
++	drv_data->double_key_to = msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
++	drv_data->last_key_ts = jiffies - drv_data->double_key_to;
++
++	/* if HZ does not allow ms resolution - disable double key detection */
++	if (drv_data->double_key_to < PLT_DOUBLE_KEY_TIMEOUT)
++		drv_data->double_key_to = 0;
+ 
+ 	hid_set_drvdata(hdev, drv_data);
+ 
+@@ -210,29 +211,10 @@ static int plantronics_probe(struct hid_device *hdev,
+ 	if (ret)
+ 		hid_err(hdev, "hw start failed\n");
+ 
+-err:
+ 	return ret;
+ }
+ 
+ static const struct hid_device_id plantronics_devices[] = {
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+-					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
+-		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+-					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
+-		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+-					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
+-		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+-					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
+-		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+-					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES),
+-		.driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+-					 USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES),
+-		.driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+ 	{ }
+ };
+@@ -241,6 +223,14 @@ MODULE_DEVICE_TABLE(hid, plantronics_devices);
+ static const struct hid_usage_id plantronics_usages[] = {
+ 	{ HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
+ 	{ HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
++	{ HID_TELEPHONY_MUTE, EV_KEY, HID_ANY_ID },
++	{ HID_CONSUMER_MUTE, EV_KEY, HID_ANY_ID },
++	{ PLT2_VOL_UP, EV_KEY, HID_ANY_ID },
++	{ PLT2_VOL_DOWN, EV_KEY, HID_ANY_ID },
++	{ PLT2_MIC_MUTE, EV_KEY, HID_ANY_ID },
++	{ PLT1_VOL_UP, EV_KEY, HID_ANY_ID },
++	{ PLT1_VOL_DOWN, EV_KEY, HID_ANY_ID },
++	{ PLT1_MIC_MUTE, EV_KEY, HID_ANY_ID },
+ 	{ HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
+ };
+ 
+diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
+index ffdd8de9ec5d79..d99f8922d4ad04 100644
+--- a/drivers/memstick/host/rtsx_usb_ms.c
++++ b/drivers/memstick/host/rtsx_usb_ms.c
+@@ -813,6 +813,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
+ 
+ 	host->eject = true;
+ 	cancel_work_sync(&host->handle_req);
++	cancel_delayed_work_sync(&host->poll_card);
+ 
+ 	mutex_lock(&host->host_mutex);
+ 	if (host->req) {
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 9fe7f704a2f7b8..944a33361dae59 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1365,9 +1365,11 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10b0, 0)}, /* Telit FE990B */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10d0, 0)}, /* Telit FN990B */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 44179f4e807fc3..aeab2308b15008 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -178,6 +178,17 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
+ }
+ EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
+ 
++static bool usbnet_needs_usb_name_format(struct usbnet *dev, struct net_device *net)
++{
++	/* Point to point devices which don't have a real MAC address
++	 * (or report a fake local one) have historically used the usb%d
++	 * naming. Preserve this..
++	 */
++	return (dev->driver_info->flags & FLAG_POINTTOPOINT) != 0 &&
++		(is_zero_ether_addr(net->dev_addr) ||
++		 is_local_ether_addr(net->dev_addr));
++}
++
+ static void intr_complete (struct urb *urb)
+ {
+ 	struct usbnet	*dev = urb->context;
+@@ -1762,13 +1773,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 		if (status < 0)
+ 			goto out1;
+ 
+-		// heuristic:  "usb%d" for links we know are two-host,
+-		// else "eth%d" when there's reasonable doubt.  userspace
+-		// can rename the link if it knows better.
++		/* heuristic: rename to "eth%d" if we are not sure this link
++		 * is two-host (these links keep "usb%d")
++		 */
+ 		if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
+-		    ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
+-		     /* somebody touched it*/
+-		     !is_zero_ether_addr(net->dev_addr)))
++		    !usbnet_needs_usb_name_format(dev, net))
+ 			strscpy(net->name, "eth%d", sizeof(net->name));
+ 		/* WLAN devices should always be named "wlan%d" */
+ 		if ((dev->driver_info->flags & FLAG_WLAN) != 0)
+diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
+index f245a84f4a508d..bdd26c9f34bdf2 100644
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -162,7 +162,7 @@ void serial8250_tx_dma_flush(struct uart_8250_port *p)
+ 	 */
+ 	dma->tx_size = 0;
+ 
+-	dmaengine_terminate_async(dma->rxchan);
++	dmaengine_terminate_async(dma->txchan);
+ }
+ 
+ int serial8250_rx_dma(struct uart_8250_port *p)
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index de6d90bf0d70a2..b3c19ba777c68d 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2687,6 +2687,22 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ 		.init		= pci_oxsemi_tornado_init,
+ 		.setup		= pci_oxsemi_tornado_setup,
+ 	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4026,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4021,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
+ 	{
+ 		.vendor         = PCI_VENDOR_ID_INTEL,
+ 		.device         = 0x8811,
+@@ -5213,6 +5229,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_2_115200 },
++	{       PCI_VENDOR_ID_INTASHIELD, 0x0BA2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{       PCI_VENDOR_ID_INTASHIELD, 0x0BA3,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	/*
+ 	 * Brainboxes UC-235/246
+ 	 */
+@@ -5333,6 +5357,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_4_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C42,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_4_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C43,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_4_115200 },
+ 	/*
+ 	 * Brainboxes UC-420
+ 	 */
+@@ -5559,6 +5591,20 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_oxsemi_1_15625000 },
++	/*
++	 * Brainboxes XC-235
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4026,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_1_15625000 },
++	/*
++	 * Brainboxes XC-475
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4021,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_1_15625000 },
+ 
+ 	/*
+ 	 * Perle PCI-RAS cards
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 77efa7ee6eda29..9f9fc733eb2c1f 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1483,6 +1483,19 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio
+ 
+ 	unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
+ 				& ~(UARTMODIR_TXRTSPOL | UARTMODIR_TXRTSE);
++	u32 ctrl;
++
++	/* TXRTSE and TXRTSPOL only can be changed when transmitter is disabled. */
++	ctrl = lpuart32_read(&sport->port, UARTCTRL);
++	if (ctrl & UARTCTRL_TE) {
++		/* wait for the transmit engine to complete */
++		lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
++		lpuart32_write(&sport->port, ctrl & ~UARTCTRL_TE, UARTCTRL);
++
++		while (lpuart32_read(&sport->port, UARTCTRL) & UARTCTRL_TE)
++			cpu_relax();
++	}
++
+ 	lpuart32_write(&sport->port, modem, UARTMODIR);
+ 
+ 	if (rs485->flags & SER_RS485_ENABLED) {
+@@ -1502,6 +1515,10 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio
+ 	}
+ 
+ 	lpuart32_write(&sport->port, modem, UARTMODIR);
++
++	if (ctrl & UARTCTRL_TE)
++		lpuart32_write(&sport->port, ctrl, UARTCTRL);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index f5199fdecff278..9b9981352b1e1a 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -965,10 +965,8 @@ static void stm32_usart_start_tx(struct uart_port *port)
+ {
+ 	struct tty_port *tport = &port->state->port;
+ 
+-	if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char) {
+-		stm32_usart_rs485_rts_disable(port);
++	if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char)
+ 		return;
+-	}
+ 
+ 	stm32_usart_rs485_rts_enable(port);
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 4384b86ea7b66c..2fad9563dca40b 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2866,6 +2866,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		if (!ep_seg) {
+ 
+ 			if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
++				/* this event is unlikely to match any TD, don't skip them all */
++				if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID)
++					return 0;
++
+ 				skip_isoc_td(xhci, td, ep, status);
+ 				if (!list_empty(&ep_ring->td_list))
+ 					continue;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 439767d242fa9c..71588e4db0e34b 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1748,11 +1748,20 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
+ }
+ 
+ 
+-/* Link TRB chain should always be set on 0.95 hosts, and AMD 0.96 ISOC rings */
++/*
++ * Reportedly, some chapters of v0.95 spec said that Link TRB always has its chain bit set.
++ * Other chapters and later specs say that it should only be set if the link is inside a TD
++ * which continues from the end of one segment to the next segment.
++ *
++ * Some 0.95 hardware was found to misbehave if any link TRB doesn't have the chain bit set.
++ *
++ * 0.96 hardware from AMD and NEC was found to ignore unchained isochronous link TRBs when
++ * "resynchronizing the pipe" after a Missed Service Error.
++ */
+ static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type)
+ {
+ 	return (xhci->quirks & XHCI_LINK_TRB_QUIRK) ||
+-	       (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST));
++	       (type == TYPE_ISOC && (xhci->quirks & (XHCI_AMD_0x96_HOST | XHCI_NEC_HOST)));
+ }
+ 
+ /* xHCI debugging */
+diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
+index 405cf08bda3473..e599d5ac6e4d2a 100644
+--- a/fs/bcachefs/fs-ioctl.c
++++ b/fs/bcachefs/fs-ioctl.c
+@@ -520,10 +520,12 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
+ 		ret = -ENOENT;
+ 		goto err;
+ 	}
+-	ret = __bch2_unlink(dir, victim, true);
++
++	ret =   inode_permission(file_mnt_idmap(filp), d_inode(victim), MAY_WRITE) ?:
++		__bch2_unlink(dir, victim, true);
+ 	if (!ret) {
+ 		fsnotify_rmdir(dir, victim);
+-		d_delete(victim);
++		d_invalidate(victim);
+ 	}
+ err:
+ 	inode_unlock(dir);
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 4a765555bf8459..1c8fcb04b3cdeb 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -2052,7 +2052,6 @@ static inline int check_for_legacy_methods(int status, struct net *net)
+ 		path_put(&path);
+ 		if (status)
+ 			return -ENOTDIR;
+-		status = nn->client_tracking_ops->init(net);
+ 	}
+ 	return status;
+ }
+diff --git a/net/atm/mpc.c b/net/atm/mpc.c
+index 324e3ab96bb393..12da0269275c54 100644
+--- a/net/atm/mpc.c
++++ b/net/atm/mpc.c
+@@ -1314,6 +1314,8 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg,
+ 	holding_time = msg->content.eg_info.holding_time;
+ 	dprintk("(%s) entry = %p, holding_time = %u\n",
+ 		mpc->dev->name, entry, holding_time);
++	if (entry == NULL && !holding_time)
++		return;
+ 	if (entry == NULL && holding_time) {
+ 		entry = mpc->eg_ops->add_entry(msg, mpc);
+ 		mpc->eg_ops->put(entry);
+diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
+index a7690ec6232596..9ea5ef56cb2704 100644
+--- a/net/ipv6/netfilter/nf_socket_ipv6.c
++++ b/net/ipv6/netfilter/nf_socket_ipv6.c
+@@ -103,6 +103,10 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
+ 	struct sk_buff *data_skb = NULL;
+ 	int doff = 0;
+ 	int thoff = 0, tproto;
++#if IS_ENABLED(CONFIG_NF_CONNTRACK)
++	enum ip_conntrack_info ctinfo;
++	struct nf_conn const *ct;
++#endif
+ 
+ 	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
+ 	if (tproto < 0) {
+@@ -136,6 +140,25 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
+ 		return NULL;
+ 	}
+ 
++#if IS_ENABLED(CONFIG_NF_CONNTRACK)
++	/* Do the lookup with the original socket address in
++	 * case this is a reply packet of an established
++	 * SNAT-ted connection.
++	 */
++	ct = nf_ct_get(skb, &ctinfo);
++	if (ct &&
++	    ((tproto != IPPROTO_ICMPV6 &&
++	      ctinfo == IP_CT_ESTABLISHED_REPLY) ||
++	     (tproto == IPPROTO_ICMPV6 &&
++	      ctinfo == IP_CT_RELATED_REPLY)) &&
++	    (ct->status & IPS_SRC_NAT_DONE)) {
++		daddr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6;
++		dport = (tproto == IPPROTO_TCP) ?
++			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port :
++			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
++	}
++#endif
++
+ 	return nf_socket_get_sock_v6(net, data_skb, doff, tproto, saddr, daddr,
+ 				     sport, dport, indev);
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3949e2614a6638..8c7da13a804c04 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10441,6 +10441,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ 	SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ 	SND_PCI_QUIRK(0x103c, 0x881d, "HP 250 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++	SND_PCI_QUIRK(0x103c, 0x881e, "HP Laptop 15s-du3xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index a95ebcf4e46e76..1e7192cb4693c0 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -4156,6 +4156,52 @@ static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
+ 	}
+ }
+ 
++/*
++ * Some Plantronics headsets have control names that don't meet ALSA naming
++ * standards. This function fixes nonstandard source names. By the time
++ * this function is called the control name should look like one of these:
++ * "source names Playback Volume"
++ * "source names Playback Switch"
++ * "source names Capture Volume"
++ * "source names Capture Switch"
++ * If any of the trigger words are found in the name then the name will
++ * be changed to:
++ * "Headset Playback Volume"
++ * "Headset Playback Switch"
++ * "Headset Capture Volume"
++ * "Headset Capture Switch"
++ * depending on the current suffix.
++ */
++static void snd_fix_plt_name(struct snd_usb_audio *chip,
++			     struct snd_ctl_elem_id *id)
++{
++	/* no variant of "Sidetone" should be added to this list */
++	static const char * const trigger[] = {
++		"Earphone", "Microphone", "Receive", "Transmit"
++	};
++	static const char * const suffix[] = {
++		" Playback Volume", " Playback Switch",
++		" Capture Volume", " Capture Switch"
++	};
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(trigger); i++)
++		if (strstr(id->name, trigger[i]))
++			goto triggered;
++	usb_audio_dbg(chip, "no change in %s\n", id->name);
++	return;
++
++triggered:
++	for (i = 0; i < ARRAY_SIZE(suffix); i++)
++		if (strstr(id->name, suffix[i])) {
++			usb_audio_dbg(chip, "fixing kctl name %s\n", id->name);
++			snprintf(id->name, sizeof(id->name), "Headset%s",
++				 suffix[i]);
++			return;
++		}
++	usb_audio_dbg(chip, "something wrong in kctl name %s\n", id->name);
++}
++
+ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
+ 				  struct usb_mixer_elem_info *cval, int unitid,
+ 				  struct snd_kcontrol *kctl)
+@@ -4173,5 +4219,10 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
+ 			cval->min_mute = 1;
+ 		break;
+ 	}
++
++	/* ALSA-ify some Plantronics headset control names */
++	if (USB_ID_VENDOR(mixer->chip->usb_id) == 0x047f &&
++	    (cval->control == UAC_FU_MUTE || cval->control == UAC_FU_VOLUME))
++		snd_fix_plt_name(mixer->chip, &kctl->id);
+ }
+ 
+diff --git a/tools/perf/Documentation/intel-hybrid.txt b/tools/perf/Documentation/intel-hybrid.txt
+index e7a776ad25d719..0379903673a4ac 100644
+--- a/tools/perf/Documentation/intel-hybrid.txt
++++ b/tools/perf/Documentation/intel-hybrid.txt
+@@ -8,15 +8,15 @@ Part of events are available on core cpu, part of events are available
+ on atom cpu and even part of events are available on both.
+ 
+ Kernel exports two new cpu pmus via sysfs:
+-/sys/devices/cpu_core
+-/sys/devices/cpu_atom
++/sys/bus/event_source/devices/cpu_core
++/sys/bus/event_source/devices/cpu_atom
+ 
+ The 'cpus' files are created under the directories. For example,
+ 
+-cat /sys/devices/cpu_core/cpus
++cat /sys/bus/event_source/devices/cpu_core/cpus
+ 0-15
+ 
+-cat /sys/devices/cpu_atom/cpus
++cat /sys/bus/event_source/devices/cpu_atom/cpus
+ 16-23
+ 
+ It indicates cpu0-cpu15 are core cpus and cpu16-cpu23 are atom cpus.
+@@ -60,8 +60,8 @@ can't carry pmu information. So now this type is extended to be PMU aware
+ type. The PMU type ID is stored at attr.config[63:32].
+ 
+ PMU type ID is retrieved from sysfs.
+-/sys/devices/cpu_atom/type
+-/sys/devices/cpu_core/type
++/sys/bus/event_source/devices/cpu_atom/type
++/sys/bus/event_source/devices/cpu_core/type
+ 
+ The new attr.config layout for PERF_TYPE_HARDWARE:
+ 
+diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
+index dea005410ec02f..ee5d333e2ca964 100644
+--- a/tools/perf/Documentation/perf-list.txt
++++ b/tools/perf/Documentation/perf-list.txt
+@@ -188,7 +188,7 @@ in the CPU vendor specific documentation.
+ 
+ The available PMUs and their raw parameters can be listed with
+ 
+-  ls /sys/devices/*/format
++  ls /sys/bus/event_source/devices/*/format
+ 
+ For example the raw event "LSD.UOPS" core pmu event above could
+ be specified as
+diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
+index df7b5dfcc26a51..7ea882ef293a18 100644
+--- a/tools/perf/arch/x86/util/iostat.c
++++ b/tools/perf/arch/x86/util/iostat.c
+@@ -32,7 +32,7 @@
+ #define MAX_PATH 1024
+ #endif
+ 
+-#define UNCORE_IIO_PMU_PATH	"devices/uncore_iio_%d"
++#define UNCORE_IIO_PMU_PATH	"bus/event_source/devices/uncore_iio_%d"
+ #define SYSFS_UNCORE_PMU_PATH	"%s/"UNCORE_IIO_PMU_PATH
+ #define PLATFORM_MAPPING_PATH	UNCORE_IIO_PMU_PATH"/die%d"
+ 
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 4933efdfee76fb..628c61397d2d38 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -96,7 +96,7 @@
+ #include <internal/threadmap.h>
+ 
+ #define DEFAULT_SEPARATOR	" "
+-#define FREEZE_ON_SMI_PATH	"devices/cpu/freeze_on_smi"
++#define FREEZE_ON_SMI_PATH	"bus/event_source/devices/cpu/freeze_on_smi"
+ 
+ static void print_counters(struct timespec *ts, int argc, const char **argv);
+ 
+diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
+index bf5090f5220bbd..9c4adfb45f62ba 100644
+--- a/tools/perf/util/mem-events.c
++++ b/tools/perf/util/mem-events.c
+@@ -189,7 +189,7 @@ static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu
+ 	if (!e->event_name)
+ 		return true;
+ 
+-	scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
++	scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/events/%s", mnt, pmu->name, e->event_name);
+ 
+ 	return !stat(path, &st);
+ }
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 61bdda01a05aca..ed893c3c6ad938 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -33,12 +33,12 @@
+ #define UNIT_MAX_LEN	31 /* max length for event unit name */
+ 
+ enum event_source {
+-	/* An event loaded from /sys/devices/<pmu>/events. */
++	/* An event loaded from /sys/bus/event_source/devices/<pmu>/events. */
+ 	EVENT_SRC_SYSFS,
+ 	/* An event loaded from a CPUID matched json file. */
+ 	EVENT_SRC_CPU_JSON,
+ 	/*
+-	 * An event loaded from a /sys/devices/<pmu>/identifier matched json
++	 * An event loaded from a /sys/bus/event_source/devices/<pmu>/identifier matched json
+ 	 * file.
+ 	 */
+ 	EVENT_SRC_SYS_JSON,


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-03-29 10:59 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-03-29 10:59 UTC (permalink / raw
  To: gentoo-commits

commit:     05f176184ad749c5e3d3208b8f6e1165855dd482
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 29 10:59:27 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 29 10:59:27 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=05f17618

Remove redundant patch

Removed:
2901_tools-lib-subcmd-compile-fix.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                             |  4 ---
 2901_tools-lib-subcmd-compile-fix.patch | 54 ---------------------------------
 2 files changed, 58 deletions(-)

diff --git a/0000_README b/0000_README
index accea09e..6196ef75 100644
--- a/0000_README
+++ b/0000_README
@@ -151,10 +151,6 @@ Patch:  2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch
 From:   https://github.com/nbd168/wireless/commit/adc3fd2a2277b7cc0b61692463771bf9bd298036
 Desc:   wifi: mt76: mt7921: fix kernel panic due to null pointer dereference
 
-Patch:  2901_tools-lib-subcmd-compile-fix.patch
-From:   https://lore.kernel.org/all/20240731085217.94928-1-michael.weiss@aisec.fraunhofer.de/
-Desc:   tools lib subcmd: Fixed uninitialized use of variable in parse-options
-
 Patch:  2910_bfp-mark-get-entry-ip-as--maybe-unused.patch
 From:   https://www.spinics.net/lists/stable/msg604665.html
 Desc:   bpf: mark get_entry_ip as __maybe_unused

diff --git a/2901_tools-lib-subcmd-compile-fix.patch b/2901_tools-lib-subcmd-compile-fix.patch
deleted file mode 100644
index bb1f7ffd..00000000
--- a/2901_tools-lib-subcmd-compile-fix.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From git@z Thu Jan  1 00:00:00 1970
-Subject: [PATCH] tools lib subcmd: Fixed uninitialized use of variable in
- parse-options
-From: Michael Weiß <michael.weiss@aisec.fraunhofer.de>
-Date: Wed, 31 Jul 2024 10:52:17 +0200
-Message-Id: <20240731085217.94928-1-michael.weiss@aisec.fraunhofer.de>
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 8bit
-
-Since commit ea558c86248b ("tools lib subcmd: Show parent options in
-help"), our debug images fail to build.
-
-For our Yocto-based GyroidOS, we build debug images with debugging enabled
-for all binaries including the kernel. Yocto passes the corresponding gcc
-option "-Og" also to the kernel HOSTCFLAGS. This results in the following
-build error:
-
-  parse-options.c: In function ‘options__order’:
-  parse-options.c:834:9: error: ‘o’ may be used uninitialized [-Werror=maybe-uninitialized]
-    834 |         memcpy(&ordered[nr_opts], o, sizeof(*o));
-        |         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-  parse-options.c:812:30: note: ‘o’ was declared here
-    812 |         const struct option *o, *p = opts;
-        |                              ^
-  ..
-
-Fix it by initializing 'o' instead of 'p' in the above failing line 812.
-'p' is initialized afterwards in the following for-loop anyway.
-I think that was the intention of the commit ea558c86248b ("tools lib
-subcmd: Show parent options in help") in the first place.
-
-Fixes: ea558c86248b ("tools lib subcmd: Show parent options in help")
-Signed-off-by: Michael Weiß <michael.weiss@aisec.fraunhofer.de>
----
- tools/lib/subcmd/parse-options.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
-index 4b60ec03b0bb..2a3b51a690c7 100644
---- a/tools/lib/subcmd/parse-options.c
-+++ b/tools/lib/subcmd/parse-options.c
-@@ -809,7 +809,7 @@ static int option__cmp(const void *va, const void *vb)
- static struct option *options__order(const struct option *opts)
- {
- 	int nr_opts = 0, nr_group = 0, nr_parent = 0, len;
--	const struct option *o, *p = opts;
-+	const struct option *o = opts, *p;
- 	struct option *opt, *ordered = NULL, *group;
- 
- 	/* flatten the options that have parents */
--- 
-2.39.2
-


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-03-29 10:47 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-03-29 10:47 UTC (permalink / raw
  To: gentoo-commits

commit:     1a34b289a2641ba3e4ee1a58335f9185b4ef765a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 29 10:46:47 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 29 10:46:47 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1a34b289

Linux patch 6.12.21

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1020_linux-6.12.21.patch | 4384 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4388 insertions(+)

diff --git a/0000_README b/0000_README
index ecb0495f..accea09e 100644
--- a/0000_README
+++ b/0000_README
@@ -123,6 +123,10 @@ Patch:  1019_linux-6.12.20.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.20
 
+Patch:  1020_linux-6.12.21.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.21
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1020_linux-6.12.21.patch b/1020_linux-6.12.21.patch
new file mode 100644
index 00000000..5eae937b
--- /dev/null
+++ b/1020_linux-6.12.21.patch
@@ -0,0 +1,4384 @@
+diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
+index 7c5ac5d2e880bb..f6884f6e59e743 100644
+--- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
++++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
+@@ -170,7 +170,7 @@ allOf:
+             const: renesas,r8a779h0-canfd
+     then:
+       patternProperties:
+-        "^channel[5-7]$": false
++        "^channel[4-7]$": false
+     else:
+       if:
+         not:
+diff --git a/Makefile b/Makefile
+index ca000bd227be66..a646151342b832 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 20
++SUBLEVEL = 21
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
+index 6bf4241fe3b737..c78ed064d1667d 100644
+--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
+@@ -1,7 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "bcm2835-rpi.dtsi"
+ 
+-#include <dt-bindings/power/raspberrypi-power.h>
+ #include <dt-bindings/reset/raspberrypi,firmware-reset.h>
+ 
+ / {
+@@ -101,7 +100,3 @@ &v3d {
+ &vchiq {
+ 	interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ };
+-
+-&xhci {
+-	power-domains = <&power RPI_POWER_DOMAIN_USB>;
+-};
+diff --git a/arch/arm/boot/dts/broadcom/bcm2711.dtsi b/arch/arm/boot/dts/broadcom/bcm2711.dtsi
+index e4e42af21ef3a4..c06d9f5e53c804 100644
+--- a/arch/arm/boot/dts/broadcom/bcm2711.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm2711.dtsi
+@@ -134,7 +134,7 @@ uart2: serial@7e201400 {
+ 			clocks = <&clocks BCM2835_CLOCK_UART>,
+ 				 <&clocks BCM2835_CLOCK_VPU>;
+ 			clock-names = "uartclk", "apb_pclk";
+-			arm,primecell-periphid = <0x00241011>;
++			arm,primecell-periphid = <0x00341011>;
+ 			status = "disabled";
+ 		};
+ 
+@@ -145,7 +145,7 @@ uart3: serial@7e201600 {
+ 			clocks = <&clocks BCM2835_CLOCK_UART>,
+ 				 <&clocks BCM2835_CLOCK_VPU>;
+ 			clock-names = "uartclk", "apb_pclk";
+-			arm,primecell-periphid = <0x00241011>;
++			arm,primecell-periphid = <0x00341011>;
+ 			status = "disabled";
+ 		};
+ 
+@@ -156,7 +156,7 @@ uart4: serial@7e201800 {
+ 			clocks = <&clocks BCM2835_CLOCK_UART>,
+ 				 <&clocks BCM2835_CLOCK_VPU>;
+ 			clock-names = "uartclk", "apb_pclk";
+-			arm,primecell-periphid = <0x00241011>;
++			arm,primecell-periphid = <0x00341011>;
+ 			status = "disabled";
+ 		};
+ 
+@@ -167,7 +167,7 @@ uart5: serial@7e201a00 {
+ 			clocks = <&clocks BCM2835_CLOCK_UART>,
+ 				 <&clocks BCM2835_CLOCK_VPU>;
+ 			clock-names = "uartclk", "apb_pclk";
+-			arm,primecell-periphid = <0x00241011>;
++			arm,primecell-periphid = <0x00341011>;
+ 			status = "disabled";
+ 		};
+ 
+@@ -451,8 +451,6 @@ IRQ_TYPE_LEVEL_LOW)>,
+ 					  IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) |
+ 					  IRQ_TYPE_LEVEL_LOW)>;
+-		/* This only applies to the ARMv7 stub */
+-		arm,cpu-registers-not-fw-configured;
+ 	};
+ 
+ 	cpus: cpus {
+@@ -610,6 +608,7 @@ xhci: usb@7e9c0000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
++			power-domains = <&pm BCM2835_POWER_DOMAIN_USB>;
+ 			/* DWC2 and this IP block share the same USB PHY,
+ 			 * enabling both at the same time results in lockups.
+ 			 * So keep this node disabled and let the bootloader
+@@ -1177,6 +1176,7 @@ &txp {
+ };
+ 
+ &uart0 {
++	arm,primecell-periphid = <0x00341011>;
+ 	interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ 
+diff --git a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts
+index 53cb0c58f6d057..3da2daee0c849d 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts
+@@ -124,19 +124,19 @@ port@0 {
+ 		};
+ 
+ 		port@1 {
+-			label = "lan1";
++			label = "lan4";
+ 		};
+ 
+ 		port@2 {
+-			label = "lan2";
++			label = "lan3";
+ 		};
+ 
+ 		port@3 {
+-			label = "lan3";
++			label = "lan2";
+ 		};
+ 
+ 		port@4 {
+-			label = "lan4";
++			label = "lan1";
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts
+index 6c666dc7ad23ef..01ec8c03686a66 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts
+@@ -126,11 +126,11 @@ &srab {
+ 
+ 	ports {
+ 		port@0 {
+-			label = "lan4";
++			label = "wan";
+ 		};
+ 
+ 		port@1 {
+-			label = "lan3";
++			label = "lan1";
+ 		};
+ 
+ 		port@2 {
+@@ -138,11 +138,11 @@ port@2 {
+ 		};
+ 
+ 		port@3 {
+-			label = "lan1";
++			label = "lan3";
+ 		};
+ 
+ 		port@4 {
+-			label = "wan";
++			label = "lan4";
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+index edf55760a5c1a2..1a63a6add43988 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+@@ -108,6 +108,11 @@ lvds_panel_in: endpoint {
+ 		};
+ 	};
+ 
++	poweroff {
++		compatible = "regulator-poweroff";
++		cpu-supply = <&vgen2_reg>;
++	};
++
+ 	reg_module_3v3: regulator-module-3v3 {
+ 		compatible = "regulator-fixed";
+ 		regulator-always-on;
+@@ -236,10 +241,6 @@ &can2 {
+ 	status = "disabled";
+ };
+ 
+-&clks {
+-	fsl,pmic-stby-poweroff;
+-};
+-
+ /* Apalis SPI1 */
+ &ecspi1 {
+ 	cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>;
+@@ -527,7 +528,6 @@ &i2c2 {
+ 
+ 	pmic: pmic@8 {
+ 		compatible = "fsl,pfuze100";
+-		fsl,pmic-stby-poweroff;
+ 		reg = <0x08>;
+ 
+ 		regulators {
+diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
+index 2a8a9fe46586d2..3fa15f3422409a 100644
+--- a/arch/arm/mach-davinci/Kconfig
++++ b/arch/arm/mach-davinci/Kconfig
+@@ -27,6 +27,7 @@ config ARCH_DAVINCI_DA830
+ 
+ config ARCH_DAVINCI_DA850
+ 	bool "DA850/OMAP-L138/AM18x based system"
++	select ARCH_DAVINCI_DA8XX
+ 	select DAVINCI_CP_INTC
+ 
+ config ARCH_DAVINCI_DA8XX
+diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
+index a643b71e30a355..08ec6bd84ada56 100644
+--- a/arch/arm/mach-omap1/Kconfig
++++ b/arch/arm/mach-omap1/Kconfig
+@@ -8,6 +8,7 @@ menuconfig ARCH_OMAP1
+ 	select ARCH_OMAP
+ 	select CLKSRC_MMIO
+ 	select FORCE_PCI if PCCARD
++	select GENERIC_IRQ_CHIP
+ 	select GPIOLIB
+ 	help
+ 	  Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx)
+diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
+index a956b489b6ea12..2bc7e73a8582d2 100644
+--- a/arch/arm/mach-shmobile/headsmp.S
++++ b/arch/arm/mach-shmobile/headsmp.S
+@@ -136,6 +136,7 @@ ENDPROC(shmobile_smp_sleep)
+ 	.long	shmobile_smp_arg - 1b
+ 
+ 	.bss
++	.align	2
+ 	.globl	shmobile_smp_mpidr
+ shmobile_smp_mpidr:
+ 	.space	NR_CPUS * 4
+diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+index 26a29e5e5078d5..447bfa060918ca 100644
+--- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+@@ -232,7 +232,7 @@ uart10: serial@7d001000 {
+ 			interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&clk_uart>, <&clk_vpu>;
+ 			clock-names = "uartclk", "apb_pclk";
+-			arm,primecell-periphid = <0x00241011>;
++			arm,primecell-periphid = <0x00341011>;
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+index ce20de25980545..3d0b1496813104 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+@@ -16,10 +16,10 @@ sound_card: sound-card {
+ 			"Headphone Jack", "HPOUTR",
+ 			"IN2L", "Line In Jack",
+ 			"IN2R", "Line In Jack",
+-			"Headphone Jack", "MICBIAS",
+-			"IN1L", "Headphone Jack";
++			"Microphone Jack", "MICBIAS",
++			"IN1L", "Microphone Jack";
+ 		simple-audio-card,widgets =
+-			"Microphone", "Headphone Jack",
++			"Microphone", "Microphone Jack",
+ 			"Headphone", "Headphone Jack",
+ 			"Line", "Line In Jack";
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
+index 336785a9fba896..3ddc5aaa7c5f0c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+ /*
+- * Copyright 2021-2022 TQ-Systems GmbH
+- * Author: Alexander Stein <alexander.stein@tq-group.com>
++ * Copyright 2021-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
++ * D-82229 Seefeld, Germany.
++ * Author: Alexander Stein
+  */
+ 
+ #include "imx8mp.dtsi"
+@@ -23,15 +24,6 @@ reg_vcc3v3: regulator-vcc3v3 {
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-always-on;
+ 	};
+-
+-	/* e-MMC IO, needed for HS modes */
+-	reg_vcc1v8: regulator-vcc1v8 {
+-		compatible = "regulator-fixed";
+-		regulator-name = "VCC1V8";
+-		regulator-min-microvolt = <1800000>;
+-		regulator-max-microvolt = <1800000>;
+-		regulator-always-on;
+-	};
+ };
+ 
+ &A53_0 {
+@@ -197,7 +189,7 @@ &usdhc3 {
+ 	no-sd;
+ 	no-sdio;
+ 	vmmc-supply = <&reg_vcc3v3>;
+-	vqmmc-supply = <&reg_vcc1v8>;
++	vqmmc-supply = <&buck5_reg>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
+index da8902c5f7e5b2..1493319aa748d0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
+@@ -28,10 +28,10 @@ sound {
+ 			"Headphone Jack", "HPOUTR",
+ 			"IN2L", "Line In Jack",
+ 			"IN2R", "Line In Jack",
+-			"Headphone Jack", "MICBIAS",
+-			"IN1L", "Headphone Jack";
++			"Microphone Jack", "MICBIAS",
++			"IN1L", "Microphone Jack";
+ 		simple-audio-card,widgets =
+-			"Microphone", "Headphone Jack",
++			"Microphone", "Microphone Jack",
+ 			"Headphone", "Headphone Jack",
+ 			"Line", "Line In Jack";
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+index 0905668cbe1f4e..3d5e81a0afdc57 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
++++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+@@ -194,6 +194,13 @@ sd_card_led_pin: sd-card-led-pin {
+ 			  <3 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 	};
++
++	uart {
++		uart5_rts_pin: uart5-rts-pin {
++			rockchip,pins =
++			  <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
++		};
++	};
+ };
+ 
+ &pwm0 {
+@@ -222,10 +229,15 @@ &u2phy_otg {
+ };
+ 
+ &uart0 {
++	pinctrl-names = "default";
++	pinctrl-0 = <&uart0_xfer>;
+ 	status = "okay";
+ };
+ 
+ &uart5 {
++	/* Add pinmux for rts-gpios (uart5_rts_pin) */
++	pinctrl-names = "default";
++	pinctrl-0 = <&uart5_xfer &uart5_rts_pin>;
+ 	rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
+index fe5b526100107a..6a6b36c36ce215 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
+@@ -117,7 +117,7 @@ &u2phy0_host {
+ };
+ 
+ &u2phy1_host {
+-	status = "disabled";
++	phy-supply = <&vdd_5v>;
+ };
+ 
+ &uart0 {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+index 9a2f59a351dee5..48ccdd6b471182 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+@@ -512,7 +512,6 @@ &sdhci {
+ 
+ &sdmmc0 {
+ 	max-frequency = <150000000>;
+-	supports-sd;
+ 	bus-width = <4>;
+ 	cap-mmc-highspeed;
+ 	cap-sd-highspeed;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
+index 31d2f8994f8513..e61c5731fb99f0 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
+@@ -455,7 +455,6 @@ &sdhci {
+ 	non-removable;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>;
+-	supports-cqe;
+ 	vmmc-supply = <&vcc_3v3_s3>;
+ 	vqmmc-supply = <&vcc_1v8_s3>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
+index 615094bb8ba380..a82fe75bda55c8 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
+@@ -367,7 +367,6 @@ &sdhci {
+ 	non-removable;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>;
+-	supports-cqe;
+ 	vmmc-supply = <&vcc_3v3_s3>;
+ 	vqmmc-supply = <&vcc_1v8_s3>;
+ 	status = "okay";
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 1bf70fa1045dcd..122a1e12582c05 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -602,23 +602,13 @@ struct kvm_host_data {
+ 	struct kvm_cpu_context host_ctxt;
+ 
+ 	/*
+-	 * All pointers in this union are hyp VA.
++	 * Hyp VA.
+ 	 * sve_state is only used in pKVM and if system_supports_sve().
+ 	 */
+-	union {
+-		struct user_fpsimd_state *fpsimd_state;
+-		struct cpu_sve_state *sve_state;
+-	};
+-
+-	union {
+-		/* HYP VA pointer to the host storage for FPMR */
+-		u64	*fpmr_ptr;
+-		/*
+-		 * Used by pKVM only, as it needs to provide storage
+-		 * for the host
+-		 */
+-		u64	fpmr;
+-	};
++	struct cpu_sve_state *sve_state;
++
++	/* Used by pKVM only. */
++	u64	fpmr;
+ 
+ 	/* Ownership of the FP regs */
+ 	enum {
+@@ -697,7 +687,6 @@ struct kvm_vcpu_arch {
+ 	u64 hcr_el2;
+ 	u64 hcrx_el2;
+ 	u64 mdcr_el2;
+-	u64 cptr_el2;
+ 
+ 	/* Exception Information */
+ 	struct kvm_vcpu_fault_info fault;
+@@ -902,10 +891,6 @@ struct kvm_vcpu_arch {
+ /* Save TRBE context if active  */
+ #define DEBUG_STATE_SAVE_TRBE	__vcpu_single_flag(iflags, BIT(6))
+ 
+-/* SVE enabled for host EL0 */
+-#define HOST_SVE_ENABLED	__vcpu_single_flag(sflags, BIT(0))
+-/* SME enabled for EL0 */
+-#define HOST_SME_ENABLED	__vcpu_single_flag(sflags, BIT(1))
+ /* Physical CPU not in supported_cpus */
+ #define ON_UNSUPPORTED_CPU	__vcpu_single_flag(sflags, BIT(2))
+ /* WFIT instruction trapped */
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 6d21971ae5594f..f38d22dac140f1 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1694,31 +1694,6 @@ void fpsimd_signal_preserve_current_state(void)
+ 		sve_to_fpsimd(current);
+ }
+ 
+-/*
+- * Called by KVM when entering the guest.
+- */
+-void fpsimd_kvm_prepare(void)
+-{
+-	if (!system_supports_sve())
+-		return;
+-
+-	/*
+-	 * KVM does not save host SVE state since we can only enter
+-	 * the guest from a syscall so the ABI means that only the
+-	 * non-saved SVE state needs to be saved.  If we have left
+-	 * SVE enabled for performance reasons then update the task
+-	 * state to be FPSIMD only.
+-	 */
+-	get_cpu_fpsimd_context();
+-
+-	if (test_and_clear_thread_flag(TIF_SVE)) {
+-		sve_to_fpsimd(current);
+-		current->thread.fp_type = FP_STATE_FPSIMD;
+-	}
+-
+-	put_cpu_fpsimd_context();
+-}
+-
+ /*
+  * Associate current's FPSIMD context with this cpu
+  * The caller must have ownership of the cpu FPSIMD context before calling
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 3cf65daa75a51f..634d3f62481827 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1577,7 +1577,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+ 	}
+ 
+ 	vcpu_reset_hcr(vcpu);
+-	vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
+ 
+ 	/*
+ 	 * Handle the "start in power-off" case.
+@@ -2477,14 +2476,6 @@ static void finalize_init_hyp_mode(void)
+ 			per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
+ 				kern_hyp_va(sve_state);
+ 		}
+-	} else {
+-		for_each_possible_cpu(cpu) {
+-			struct user_fpsimd_state *fpsimd_state;
+-
+-			fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
+-			per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
+-				kern_hyp_va(fpsimd_state);
+-		}
+ 	}
+ }
+ 
+diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
+index ea5484ce1f3ba3..3cbb999419af7b 100644
+--- a/arch/arm64/kvm/fpsimd.c
++++ b/arch/arm64/kvm/fpsimd.c
+@@ -54,43 +54,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
+ 	if (!system_supports_fpsimd())
+ 		return;
+ 
+-	fpsimd_kvm_prepare();
+-
+ 	/*
+-	 * We will check TIF_FOREIGN_FPSTATE just before entering the
+-	 * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
+-	 * FP_STATE_FREE if the flag set.
++	 * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
++	 * that the host kernel is responsible for restoring this state upon
++	 * return to userspace, and the hyp code doesn't need to save anything.
++	 *
++	 * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
++	 * that PSTATE.{SM,ZA} == {0,0}.
+ 	 */
+-	*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
+-	*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
+-	*host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
+-
+-	vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
+-	if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+-		vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
+-
+-	if (system_supports_sme()) {
+-		vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
+-		if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
+-			vcpu_set_flag(vcpu, HOST_SME_ENABLED);
+-
+-		/*
+-		 * If PSTATE.SM is enabled then save any pending FP
+-		 * state and disable PSTATE.SM. If we leave PSTATE.SM
+-		 * enabled and the guest does not enable SME via
+-		 * CPACR_EL1.SMEN then operations that should be valid
+-		 * may generate SME traps from EL1 to EL1 which we
+-		 * can't intercept and which would confuse the guest.
+-		 *
+-		 * Do the same for PSTATE.ZA in the case where there
+-		 * is state in the registers which has not already
+-		 * been saved, this is very unlikely to happen.
+-		 */
+-		if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
+-			*host_data_ptr(fp_owner) = FP_STATE_FREE;
+-			fpsimd_save_and_flush_cpu_state();
+-		}
+-	}
++	fpsimd_save_and_flush_cpu_state();
++	*host_data_ptr(fp_owner) = FP_STATE_FREE;
+ 
+ 	/*
+ 	 * If normal guests gain SME support, maintain this behavior for pKVM
+@@ -162,52 +135,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
+ 
+ 	local_irq_save(flags);
+ 
+-	/*
+-	 * If we have VHE then the Hyp code will reset CPACR_EL1 to
+-	 * the default value and we need to reenable SME.
+-	 */
+-	if (has_vhe() && system_supports_sme()) {
+-		/* Also restore EL0 state seen on entry */
+-		if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
+-			sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
+-		else
+-			sysreg_clear_set(CPACR_EL1,
+-					 CPACR_EL1_SMEN_EL0EN,
+-					 CPACR_EL1_SMEN_EL1EN);
+-		isb();
+-	}
+-
+ 	if (guest_owns_fp_regs()) {
+-		if (vcpu_has_sve(vcpu)) {
+-			u64 zcr = read_sysreg_el1(SYS_ZCR);
+-
+-			/*
+-			 * If the vCPU is in the hyp context then ZCR_EL1 is
+-			 * loaded with its vEL2 counterpart.
+-			 */
+-			__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
+-
+-			/*
+-			 * Restore the VL that was saved when bound to the CPU,
+-			 * which is the maximum VL for the guest. Because the
+-			 * layout of the data when saving the sve state depends
+-			 * on the VL, we need to use a consistent (i.e., the
+-			 * maximum) VL.
+-			 * Note that this means that at guest exit ZCR_EL1 is
+-			 * not necessarily the same as on guest entry.
+-			 *
+-			 * ZCR_EL2 holds the guest hypervisor's VL when running
+-			 * a nested guest, which could be smaller than the
+-			 * max for the vCPU. Similar to above, we first need to
+-			 * switch to a VL consistent with the layout of the
+-			 * vCPU's SVE state. KVM support for NV implies VHE, so
+-			 * using the ZCR_EL1 alias is safe.
+-			 */
+-			if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
+-				sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
+-						       SYS_ZCR_EL1);
+-		}
+-
+ 		/*
+ 		 * Flush (save and invalidate) the fpsimd/sve state so that if
+ 		 * the host tries to use fpsimd/sve, it's not using stale data
+@@ -219,18 +147,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
+ 		 * when needed.
+ 		 */
+ 		fpsimd_save_and_flush_cpu_state();
+-	} else if (has_vhe() && system_supports_sve()) {
+-		/*
+-		 * The FPSIMD/SVE state in the CPU has not been touched, and we
+-		 * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+-		 * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
+-		 * for EL0.  To avoid spurious traps, restore the trap state
+-		 * seen by kvm_arch_vcpu_load_fp():
+-		 */
+-		if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
+-			sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+-		else
+-			sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
+ 	}
+ 
+ 	local_irq_restore(flags);
+diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
+index 4433a234aa9ba2..9f4e8d68ab505c 100644
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
+ alternative_else_nop_endif
+ 	mrs	x1, isr_el1
+ 	cbz	x1,  1f
++
++	// Ensure that __guest_enter() always provides a context
++	// synchronization event so that callers don't need ISBs for anything
++	// that would usually be synchonized by the ERET.
++	isb
+ 	mov	x0, #ARM_EXCEPTION_IRQ
+ 	ret
+ 
+diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
+index 5310fe1da6165b..cc9cb63959463a 100644
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -295,7 +295,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
+ 	return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
+ }
+ 
+-static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ 	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
+ 	arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
+@@ -344,7 +344,87 @@ static inline void __hyp_sve_save_host(void)
+ 			 true);
+ }
+ 
+-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
++static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
++{
++	u64 zcr_el1, zcr_el2;
++
++	if (!guest_owns_fp_regs())
++		return;
++
++	if (vcpu_has_sve(vcpu)) {
++		/* A guest hypervisor may restrict the effective max VL. */
++		if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
++			zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
++		else
++			zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
++
++		write_sysreg_el2(zcr_el2, SYS_ZCR);
++
++		zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
++		write_sysreg_el1(zcr_el1, SYS_ZCR);
++	}
++}
++
++static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
++{
++	u64 zcr_el1, zcr_el2;
++
++	if (!guest_owns_fp_regs())
++		return;
++
++	/*
++	 * When the guest owns the FP regs, we know that guest+hyp traps for
++	 * any FPSIMD/SVE/SME features exposed to the guest have been disabled
++	 * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
++	 * prior to __guest_entry(). As __guest_entry() guarantees a context
++	 * synchronization event, we don't need an ISB here to avoid taking
++	 * traps for anything that was exposed to the guest.
++	 */
++	if (vcpu_has_sve(vcpu)) {
++		zcr_el1 = read_sysreg_el1(SYS_ZCR);
++		__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
++
++		/*
++		 * The guest's state is always saved using the guest's max VL.
++		 * Ensure that the host has the guest's max VL active such that
++		 * the host can save the guest's state lazily, but don't
++		 * artificially restrict the host to the guest's max VL.
++		 */
++		if (has_vhe()) {
++			zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
++			write_sysreg_el2(zcr_el2, SYS_ZCR);
++		} else {
++			zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
++			write_sysreg_el2(zcr_el2, SYS_ZCR);
++
++			zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
++			write_sysreg_el1(zcr_el1, SYS_ZCR);
++		}
++	}
++}
++
++static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
++{
++	/*
++	 * Non-protected kvm relies on the host restoring its sve state.
++	 * Protected kvm restores the host's sve state as not to reveal that
++	 * fpsimd was used by a guest nor leak upper sve bits.
++	 */
++	if (system_supports_sve()) {
++		__hyp_sve_save_host();
++
++		/* Re-enable SVE traps if not supported for the guest vcpu. */
++		if (!vcpu_has_sve(vcpu))
++			cpacr_clear_set(CPACR_ELx_ZEN, 0);
++
++	} else {
++		__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
++	}
++
++	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
++		*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
++}
++
+ 
+ /*
+  * We trap the first access to the FP/SIMD to save the host context and
+@@ -352,7 +432,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
+  * If FP/SIMD is not implemented, handle the trap and inject an undefined
+  * instruction exception to the guest. Similarly for trapped SVE accesses.
+  */
+-static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ 	bool sve_guest;
+ 	u8 esr_ec;
+@@ -394,7 +474,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 	isb();
+ 
+ 	/* Write out the host state if it's in the registers */
+-	if (host_owns_fp_regs())
++	if (is_protected_kvm_enabled() && host_owns_fp_regs())
+ 		kvm_hyp_save_fpsimd_host(vcpu);
+ 
+ 	/* Restore the guest state */
+@@ -543,7 +623,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
+ 	return true;
+ }
+ 
+-static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ 	if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ 	    handle_tx2_tvm(vcpu))
+@@ -563,7 +643,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 	return false;
+ }
+ 
+-static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
+ 	    __vgic_v3_perform_cpuif_access(vcpu) == 1)
+@@ -572,19 +652,18 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 	return false;
+ }
+ 
+-static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
++					       u64 *exit_code)
+ {
+ 	if (!__populate_fault_info(vcpu))
+ 		return true;
+ 
+ 	return false;
+ }
+-static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+-	__alias(kvm_hyp_handle_memory_fault);
+-static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+-	__alias(kvm_hyp_handle_memory_fault);
++#define kvm_hyp_handle_iabt_low		kvm_hyp_handle_memory_fault
++#define kvm_hyp_handle_watchpt_low	kvm_hyp_handle_memory_fault
+ 
+-static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ 	if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
+ 		return true;
+@@ -614,23 +693,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 
+ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
+ 
+-static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
+-
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+-
+ /*
+  * Allow the hypervisor to handle the exit with an exit handler if it has one.
+  *
+  * Returns true if the hypervisor handled the exit, and control should go back
+  * to the guest, or false if it hasn't.
+  */
+-static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
++				       const exit_handler_fn *handlers)
+ {
+-	const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
+-	exit_handler_fn fn;
+-
+-	fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
+-
++	exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
+ 	if (fn)
+ 		return fn(vcpu, exit_code);
+ 
+@@ -660,20 +732,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code
+  * the guest, false when we should restore the host state and return to the
+  * main run loop.
+  */
+-static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
++				      const exit_handler_fn *handlers)
+ {
+-	/*
+-	 * Save PSTATE early so that we can evaluate the vcpu mode
+-	 * early on.
+-	 */
+-	synchronize_vcpu_pstate(vcpu, exit_code);
+-
+-	/*
+-	 * Check whether we want to repaint the state one way or
+-	 * another.
+-	 */
+-	early_exit_filter(vcpu, exit_code);
+-
+ 	if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
+ 		vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
+ 
+@@ -703,7 +764,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 		goto exit;
+ 
+ 	/* Check if there's an exit handler and allow it to handle the exit. */
+-	if (kvm_hyp_handle_exit(vcpu, exit_code))
++	if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
+ 		goto guest;
+ exit:
+ 	/* Return to the host kernel and handle the exit */
+diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+index fefc89209f9e41..75f7e386de75bc 100644
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+@@ -5,6 +5,7 @@
+  */
+ 
+ #include <hyp/adjust_pc.h>
++#include <hyp/switch.h>
+ 
+ #include <asm/pgtable-types.h>
+ #include <asm/kvm_asm.h>
+@@ -83,7 +84,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
+ 	if (system_supports_sve())
+ 		__hyp_sve_restore_host();
+ 	else
+-		__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
++		__fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
+ 
+ 	if (has_fpmr)
+ 		write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
+@@ -177,7 +178,9 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
+ 		pkvm_put_hyp_vcpu(hyp_vcpu);
+ 	} else {
+ 		/* The host is fully trusted, run its vCPU directly. */
++		fpsimd_lazy_switch_to_guest(host_vcpu);
+ 		ret = __kvm_vcpu_run(host_vcpu);
++		fpsimd_lazy_switch_to_host(host_vcpu);
+ 	}
+ 
+ out:
+@@ -486,12 +489,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
+ 	case ESR_ELx_EC_SMC64:
+ 		handle_host_smc(host_ctxt);
+ 		break;
+-	case ESR_ELx_EC_SVE:
+-		cpacr_clear_set(0, CPACR_ELx_ZEN);
+-		isb();
+-		sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
+-				       SYS_ZCR_EL2);
+-		break;
+ 	case ESR_ELx_EC_IABT_LOW:
+ 	case ESR_ELx_EC_DABT_LOW:
+ 		handle_host_mem_abort(host_ctxt);
+diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+index 077d4098548d2c..7c464340bcd078 100644
+--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
++++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
+@@ -28,8 +28,6 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
+ 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
+ 	u64 hcr_set = HCR_RW;
+ 	u64 hcr_clear = 0;
+-	u64 cptr_set = 0;
+-	u64 cptr_clear = 0;
+ 
+ 	/* Protected KVM does not support AArch32 guests. */
+ 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
+@@ -59,21 +57,10 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
+ 	/* Trap AMU */
+ 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
+ 		hcr_clear |= HCR_AMVOFFEN;
+-		cptr_set |= CPTR_EL2_TAM;
+-	}
+-
+-	/* Trap SVE */
+-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
+-		if (has_hvhe())
+-			cptr_clear |= CPACR_ELx_ZEN;
+-		else
+-			cptr_set |= CPTR_EL2_TZ;
+ 	}
+ 
+ 	vcpu->arch.hcr_el2 |= hcr_set;
+ 	vcpu->arch.hcr_el2 &= ~hcr_clear;
+-	vcpu->arch.cptr_el2 |= cptr_set;
+-	vcpu->arch.cptr_el2 &= ~cptr_clear;
+ }
+ 
+ /*
+@@ -103,7 +90,6 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
+ 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
+ 	u64 mdcr_set = 0;
+ 	u64 mdcr_clear = 0;
+-	u64 cptr_set = 0;
+ 
+ 	/* Trap/constrain PMU */
+ 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
+@@ -130,21 +116,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
+ 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
+ 		mdcr_set |= MDCR_EL2_TTRF;
+ 
+-	/* Trap Trace */
+-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
+-		if (has_hvhe())
+-			cptr_set |= CPACR_EL1_TTA;
+-		else
+-			cptr_set |= CPTR_EL2_TTA;
+-	}
+-
+ 	/* Trap External Trace */
+ 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
+ 		mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
+ 
+ 	vcpu->arch.mdcr_el2 |= mdcr_set;
+ 	vcpu->arch.mdcr_el2 &= ~mdcr_clear;
+-	vcpu->arch.cptr_el2 |= cptr_set;
+ }
+ 
+ /*
+@@ -195,10 +172,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
+ 	/* Clear res0 and set res1 bits to trap potential new features. */
+ 	vcpu->arch.hcr_el2 &= ~(HCR_RES0);
+ 	vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
+-	if (!has_hvhe()) {
+-		vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
+-		vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+-	}
+ }
+ 
+ /*
+@@ -579,8 +552,6 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
+ 		return ret;
+ 	}
+ 
+-	hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
+-
+ 	return 0;
+ }
+ 
+diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
+index cc69106734ca73..a1245fa8383195 100644
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -36,33 +36,71 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+ 
+ extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+ 
+-static void __activate_traps(struct kvm_vcpu *vcpu)
++static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
+ {
+-	u64 val;
++	u64 val = CPTR_EL2_TAM;	/* Same bit irrespective of E2H */
+ 
+-	___activate_traps(vcpu, vcpu->arch.hcr_el2);
+-	__activate_traps_common(vcpu);
++	if (!guest_owns_fp_regs())
++		__activate_traps_fpsimd32(vcpu);
+ 
+-	val = vcpu->arch.cptr_el2;
+-	val |= CPTR_EL2_TAM;	/* Same bit irrespective of E2H */
+-	val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
+-	if (cpus_have_final_cap(ARM64_SME)) {
+-		if (has_hvhe())
+-			val &= ~CPACR_ELx_SMEN;
+-		else
+-			val |= CPTR_EL2_TSM;
++	if (has_hvhe()) {
++		val |= CPACR_ELx_TTA;
++
++		if (guest_owns_fp_regs()) {
++			val |= CPACR_ELx_FPEN;
++			if (vcpu_has_sve(vcpu))
++				val |= CPACR_ELx_ZEN;
++		}
++
++		write_sysreg(val, cpacr_el1);
++	} else {
++		val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
++
++		/*
++		 * Always trap SME since it's not supported in KVM.
++		 * TSM is RES1 if SME isn't implemented.
++		 */
++		val |= CPTR_EL2_TSM;
++
++		if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
++			val |= CPTR_EL2_TZ;
++
++		if (!guest_owns_fp_regs())
++			val |= CPTR_EL2_TFP;
++
++		write_sysreg(val, cptr_el2);
+ 	}
++}
+ 
+-	if (!guest_owns_fp_regs()) {
+-		if (has_hvhe())
+-			val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+-		else
+-			val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
++static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
++{
++	if (has_hvhe()) {
++		u64 val = CPACR_ELx_FPEN;
+ 
+-		__activate_traps_fpsimd32(vcpu);
++		if (cpus_have_final_cap(ARM64_SVE))
++			val |= CPACR_ELx_ZEN;
++		if (cpus_have_final_cap(ARM64_SME))
++			val |= CPACR_ELx_SMEN;
++
++		write_sysreg(val, cpacr_el1);
++	} else {
++		u64 val = CPTR_NVHE_EL2_RES1;
++
++		if (!cpus_have_final_cap(ARM64_SVE))
++			val |= CPTR_EL2_TZ;
++		if (!cpus_have_final_cap(ARM64_SME))
++			val |= CPTR_EL2_TSM;
++
++		write_sysreg(val, cptr_el2);
+ 	}
++}
++
++static void __activate_traps(struct kvm_vcpu *vcpu)
++{
++	___activate_traps(vcpu, vcpu->arch.hcr_el2);
++	__activate_traps_common(vcpu);
++	__activate_cptr_traps(vcpu);
+ 
+-	kvm_write_cptr_el2(val);
+ 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
+ 
+ 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
+@@ -107,7 +145,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
+ 
+ 	write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
+ 
+-	kvm_reset_cptr_el2(vcpu);
++	__deactivate_cptr_traps(vcpu);
+ 	write_sysreg(__kvm_hyp_host_vector, vbar_el2);
+ }
+ 
+@@ -180,34 +218,6 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 		kvm_handle_pvm_sysreg(vcpu, exit_code));
+ }
+ 
+-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+-{
+-	/*
+-	 * Non-protected kvm relies on the host restoring its sve state.
+-	 * Protected kvm restores the host's sve state as not to reveal that
+-	 * fpsimd was used by a guest nor leak upper sve bits.
+-	 */
+-	if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
+-		__hyp_sve_save_host();
+-
+-		/* Re-enable SVE traps if not supported for the guest vcpu. */
+-		if (!vcpu_has_sve(vcpu))
+-			cpacr_clear_set(CPACR_ELx_ZEN, 0);
+-
+-	} else {
+-		__fpsimd_save_state(*host_data_ptr(fpsimd_state));
+-	}
+-
+-	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
+-		u64 val = read_sysreg_s(SYS_FPMR);
+-
+-		if (unlikely(is_protected_kvm_enabled()))
+-			*host_data_ptr(fpmr) = val;
+-		else
+-			**host_data_ptr(fpmr_ptr) = val;
+-	}
+-}
+-
+ static const exit_handler_fn hyp_exit_handlers[] = {
+ 	[0 ... ESR_ELx_EC_MAX]		= NULL,
+ 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
+@@ -239,19 +249,21 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
+ 	return hyp_exit_handlers;
+ }
+ 
+-/*
+- * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
+- * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
+- * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
+- * hypervisor spots a guest in such a state ensure it is handled, and don't
+- * trust the host to spot or fix it.  The check below is based on the one in
+- * kvm_arch_vcpu_ioctl_run().
+- *
+- * Returns false if the guest ran in AArch32 when it shouldn't have, and
+- * thus should exit to the host, or true if a the guest run loop can continue.
+- */
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
++static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
++	const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
++
++	synchronize_vcpu_pstate(vcpu, exit_code);
++
++	/*
++	 * Some guests (e.g., protected VMs) are not be allowed to run in
++	 * AArch32.  The ARMv8 architecture does not give the hypervisor a
++	 * mechanism to prevent a guest from dropping to AArch32 EL0 if
++	 * implemented by the CPU. If the hypervisor spots a guest in such a
++	 * state ensure it is handled, and don't trust the host to spot or fix
++	 * it.  The check below is based on the one in
++	 * kvm_arch_vcpu_ioctl_run().
++	 */
+ 	if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
+ 		/*
+ 		 * As we have caught the guest red-handed, decide that it isn't
+@@ -264,6 +276,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 		*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
+ 		*exit_code |= ARM_EXCEPTION_IL;
+ 	}
++
++	return __fixup_guest_exit(vcpu, exit_code, handlers);
+ }
+ 
+ /* Switch to the guest for legacy non-VHE systems */
+diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
+index 80581b1c399595..496abfd3646b98 100644
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -309,14 +309,6 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 	return true;
+ }
+ 
+-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+-{
+-	__fpsimd_save_state(*host_data_ptr(fpsimd_state));
+-
+-	if (kvm_has_fpmr(vcpu->kvm))
+-		**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
+-}
+-
+ static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ 	int ret = -EINVAL;
+@@ -431,13 +423,10 @@ static const exit_handler_fn hyp_exit_handlers[] = {
+ 	[ESR_ELx_EC_MOPS]		= kvm_hyp_handle_mops,
+ };
+ 
+-static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
++static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+-	return hyp_exit_handlers;
+-}
++	synchronize_vcpu_pstate(vcpu, exit_code);
+ 
+-static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+-{
+ 	/*
+ 	 * If we were in HYP context on entry, adjust the PSTATE view
+ 	 * so that the usual helpers work correctly.
+@@ -457,6 +446,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 		*vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
+ 		*vcpu_cpsr(vcpu) |= mode;
+ 	}
++
++	return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
+ }
+ 
+ /* Switch to the guest for VHE systems running in EL2 */
+@@ -471,6 +462,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+ 
+ 	sysreg_save_host_state_vhe(host_ctxt);
+ 
++	fpsimd_lazy_switch_to_guest(vcpu);
++
+ 	/*
+ 	 * Note that ARM erratum 1165522 requires us to configure both stage 1
+ 	 * and stage 2 translation for the guest context before we clear
+@@ -495,6 +488,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+ 
+ 	__deactivate_traps(vcpu);
+ 
++	fpsimd_lazy_switch_to_host(vcpu);
++
+ 	sysreg_restore_host_state_vhe(host_ctxt);
+ 
+ 	if (guest_owns_fp_regs())
+diff --git a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h
+index 256de17f526113..ae49c908e7fb3f 100644
+--- a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h
++++ b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h
+@@ -89,7 +89,7 @@
+ #define GPOUT_SYS_SDIO1_DATA1			59
+ #define GPOUT_SYS_SDIO1_DATA2			60
+ #define GPOUT_SYS_SDIO1_DATA3			61
+-#define GPOUT_SYS_SDIO1_DATA4			63
++#define GPOUT_SYS_SDIO1_DATA4			62
+ #define GPOUT_SYS_SDIO1_DATA5			63
+ #define GPOUT_SYS_SDIO1_DATA6			64
+ #define GPOUT_SYS_SDIO1_DATA7			65
+diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
+index c20eb63750f517..43aba57b48f05f 100644
+--- a/drivers/accel/qaic/qaic_data.c
++++ b/drivers/accel/qaic/qaic_data.c
+@@ -172,9 +172,10 @@ static void free_slice(struct kref *kref)
+ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
+ 					struct sg_table *sgt_in, u64 size, u64 offset)
+ {
+-	int total_len, len, nents, offf = 0, offl = 0;
+ 	struct scatterlist *sg, *sgn, *sgf, *sgl;
++	unsigned int len, nents, offf, offl;
+ 	struct sg_table *sgt;
++	size_t total_len;
+ 	int ret, j;
+ 
+ 	/* find out number of relevant nents needed for this mem */
+@@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
+ 	sgf = NULL;
+ 	sgl = NULL;
+ 	nents = 0;
++	offf = 0;
++	offl = 0;
+ 
+ 	size = size ? size : PAGE_SIZE;
+ 	for_each_sgtable_dma_sg(sgt_in, sg, j) {
+@@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
+ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
+ 			     u32 count, u64 total_size)
+ {
++	u64 total;
+ 	int i;
+ 
+ 	for (i = 0; i < count; i++) {
+@@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
+ 		      invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
+ 			return -EINVAL;
+ 
+-		if (slice_ent[i].offset + slice_ent[i].size > total_size)
++		if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
++		    total > total_size)
+ 			return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c085dd81ebe7f6..d956735e2a7645 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2845,6 +2845,10 @@ int ata_dev_configure(struct ata_device *dev)
+ 	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+ 		dev->quirks |= ATA_QUIRK_NOLPM;
+ 
++	if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
++	    ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
++		dev->quirks |= ATA_QUIRK_NOLPM;
++
+ 	if (ap->flags & ATA_FLAG_NO_LPM)
+ 		dev->quirks |= ATA_QUIRK_NOLPM;
+ 
+@@ -3897,6 +3901,7 @@ static const char * const ata_quirk_names[] = {
+ 	[__ATA_QUIRK_MAX_SEC_1024]	= "maxsec1024",
+ 	[__ATA_QUIRK_MAX_TRIM_128M]	= "maxtrim128m",
+ 	[__ATA_QUIRK_NO_NCQ_ON_ATI]	= "noncqonati",
++	[__ATA_QUIRK_NO_LPM_ON_ATI]	= "nolpmonati",
+ 	[__ATA_QUIRK_NO_ID_DEV_LOG]	= "noiddevlog",
+ 	[__ATA_QUIRK_NO_LOG_DIR]	= "nologdir",
+ 	[__ATA_QUIRK_NO_FUA]		= "nofua",
+@@ -4142,13 +4147,16 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
+ 						ATA_QUIRK_ZERO_AFTER_TRIM },
+ 	{ "Samsung SSD 860*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
+ 						ATA_QUIRK_ZERO_AFTER_TRIM |
+-						ATA_QUIRK_NO_NCQ_ON_ATI },
++						ATA_QUIRK_NO_NCQ_ON_ATI |
++						ATA_QUIRK_NO_LPM_ON_ATI },
+ 	{ "Samsung SSD 870*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
+ 						ATA_QUIRK_ZERO_AFTER_TRIM |
+-						ATA_QUIRK_NO_NCQ_ON_ATI },
++						ATA_QUIRK_NO_NCQ_ON_ATI |
++						ATA_QUIRK_NO_LPM_ON_ATI },
+ 	{ "SAMSUNG*MZ7LH*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
+ 						ATA_QUIRK_ZERO_AFTER_TRIM |
+-						ATA_QUIRK_NO_NCQ_ON_ATI, },
++						ATA_QUIRK_NO_NCQ_ON_ATI |
++						ATA_QUIRK_NO_LPM_ON_ATI },
+ 	{ "FCCT*M500*",			NULL,	ATA_QUIRK_NO_NCQ_TRIM |
+ 						ATA_QUIRK_ZERO_AFTER_TRIM },
+ 
+diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
+index 32019dc33cca7e..1877201d1aa9fe 100644
+--- a/drivers/dpll/dpll_core.c
++++ b/drivers/dpll/dpll_core.c
+@@ -505,7 +505,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
+ 	xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
+ 	ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
+ 			      &dpll_pin_xa_id, GFP_KERNEL);
+-	if (ret)
++	if (ret < 0)
+ 		goto err_xa_alloc;
+ 	return pin;
+ err_xa_alloc:
+diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
+index 8ad3efb9b1ff16..593e98e3b993ea 100644
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -75,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size,
+ 	if (align < EFI_ALLOC_ALIGN)
+ 		align = EFI_ALLOC_ALIGN;
+ 
++	/* Avoid address 0x0, as it can be mistaken for NULL */
++	if (alloc_min == 0)
++		alloc_min = align;
++
+ 	size = round_up(size, EFI_ALLOC_ALIGN);
+ 
+ 	/* count the suitable slots in each memory map entry */
+diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
+index 1dd4362ef9a3fc..8c28e25ddc8a65 100644
+--- a/drivers/firmware/imx/imx-scu.c
++++ b/drivers/firmware/imx/imx-scu.c
+@@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
++	of_node_put(args.np);
+ 
+ 	num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
+ 	for (i = 0; i < num_channel; i++) {
+diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+index 447246bd04be3f..98a463e9774bf0 100644
+--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
++++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+@@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
+ 
+ 	qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev);
+ 
+-	auxiliary_set_drvdata(aux_dev, qcuefi);
+-	status = qcuefi_set_reference(qcuefi);
+-	if (status)
+-		return status;
+-
+-	status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
+-	if (status)
+-		qcuefi_set_reference(NULL);
+-
+ 	memset(&pool_config, 0, sizeof(pool_config));
+ 	pool_config.initial_size = SZ_4K;
+ 	pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
+@@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
+ 	if (IS_ERR(qcuefi->mempool))
+ 		return PTR_ERR(qcuefi->mempool);
+ 
++	auxiliary_set_drvdata(aux_dev, qcuefi);
++	status = qcuefi_set_reference(qcuefi);
++	if (status)
++		return status;
++
++	status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
++	if (status)
++		qcuefi_set_reference(NULL);
++
+ 	return status;
+ }
+ 
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 2e093c39b610ae..23aefbf6fca588 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -2054,8 +2054,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 
+ 	__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
+ 	if (IS_ERR(__scm->mempool)) {
+-		dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+-			      "Failed to create the SCM memory pool\n");
++		ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
++				    "Failed to create the SCM memory pool\n");
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+index ca130880edfd42..d3798a333d1f88 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+@@ -2395,7 +2395,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
+ 				      (void **)&adev->gfx.me.me_fw_data_ptr);
+ 	if (r) {
+ 		dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
+-		gfx_v12_0_pfp_fini(adev);
++		gfx_v12_0_me_fini(adev);
+ 		return r;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+index 9c6824e1c15660..60acf676000b34 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+@@ -498,9 +498,6 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
+ 				 uint64_t *flags)
+ {
+ 	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+-	struct amdgpu_device *bo_adev;
+-	bool coherent, is_system;
+-
+ 
+ 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
+ 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+@@ -516,26 +513,11 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
+ 		*flags &= ~AMDGPU_PTE_VALID;
+ 	}
+ 
+-	if (!bo)
+-		return;
+-
+-	if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+-			       AMDGPU_GEM_CREATE_UNCACHED))
+-		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
+-
+-	bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+-	coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
+-	is_system = bo->tbo.resource &&
+-		(bo->tbo.resource->mem_type == TTM_PL_TT ||
+-		 bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
+-
+ 	if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ 		*flags |= AMDGPU_PTE_DCC;
+ 
+-	/* WA for HW bug */
+-	if (is_system || ((bo_adev != adev) && coherent))
+-		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+-
++	if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
++		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
+ }
+ 
+ static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 73065a85e0d264..4f94a119d62754 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -78,12 +78,12 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = {
+ 
+ /* Navi1x */
+ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ };
+ 
+@@ -104,10 +104,10 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = {
+ };
+ 
+ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+@@ -115,10 +115,10 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
+ };
+ 
+ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 307185c0e1b8f2..4cbe0da100d8f3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -103,12 +103,11 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
+ /* Vega */
+ static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ };
+ 
+ static const struct amdgpu_video_codecs vega_video_codecs_decode =
+@@ -120,12 +119,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
+ /* Raven */
+ static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
+ };
+ 
+@@ -138,10 +137,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
+ /* Renoir, Arcturus */
+ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 792b2eb6bbacea..48ab93e715c823 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -167,16 +167,16 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
+ {
+ 	{
+ 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+-		.max_width = 4096,
+-		.max_height = 4096,
+-		.max_pixels_per_frame = 4096 * 4096,
++		.max_width = 1920,
++		.max_height = 1088,
++		.max_pixels_per_frame = 1920 * 1088,
+ 		.max_level = 3,
+ 	},
+ 	{
+ 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+-		.max_width = 4096,
+-		.max_height = 4096,
+-		.max_pixels_per_frame = 4096 * 4096,
++		.max_width = 1920,
++		.max_height = 1088,
++		.max_pixels_per_frame = 1920 * 1088,
+ 		.max_level = 5,
+ 	},
+ 	{
+@@ -188,9 +188,9 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
+ 	},
+ 	{
+ 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+-		.max_width = 4096,
+-		.max_height = 4096,
+-		.max_pixels_per_frame = 4096 * 4096,
++		.max_width = 1920,
++		.max_height = 1088,
++		.max_pixels_per_frame = 1920 * 1088,
+ 		.max_level = 4,
+ 	},
+ };
+@@ -206,16 +206,16 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
+ {
+ 	{
+ 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+-		.max_width = 4096,
+-		.max_height = 4096,
+-		.max_pixels_per_frame = 4096 * 4096,
++		.max_width = 1920,
++		.max_height = 1088,
++		.max_pixels_per_frame = 1920 * 1088,
+ 		.max_level = 3,
+ 	},
+ 	{
+ 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+-		.max_width = 4096,
+-		.max_height = 4096,
+-		.max_pixels_per_frame = 4096 * 4096,
++		.max_width = 1920,
++		.max_height = 1088,
++		.max_pixels_per_frame = 1920 * 1088,
+ 		.max_level = 5,
+ 	},
+ 	{
+@@ -227,9 +227,9 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
+ 	},
+ 	{
+ 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+-		.max_width = 4096,
+-		.max_height = 4096,
+-		.max_pixels_per_frame = 4096 * 4096,
++		.max_width = 1920,
++		.max_height = 1088,
++		.max_pixels_per_frame = 1920 * 1088,
+ 		.max_level = 4,
+ 	},
+ 	{
+@@ -239,13 +239,6 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
+ 		.max_pixels_per_frame = 4096 * 4096,
+ 		.max_level = 186,
+ 	},
+-	{
+-		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+-		.max_width = 4096,
+-		.max_height = 4096,
+-		.max_pixels_per_frame = 4096 * 4096,
+-		.max_level = 0,
+-	},
+ };
+ 
+ static const struct amdgpu_video_codecs cz_video_codecs_decode =
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+index 80c85b6cc478a9..29d7cb4cfe69ae 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -233,6 +233,7 @@ void kfd_queue_buffer_put(struct amdgpu_bo **bo)
+ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
+ {
+ 	struct kfd_topology_device *topo_dev;
++	u64 expected_queue_size;
+ 	struct amdgpu_vm *vm;
+ 	u32 total_cwsr_size;
+ 	int err;
+@@ -241,6 +242,15 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
+ 	if (!topo_dev)
+ 		return -EINVAL;
+ 
++	/* AQL queues on GFX7 and GFX8 appear twice their actual size */
++	if (properties->type == KFD_QUEUE_TYPE_COMPUTE &&
++	    properties->format == KFD_QUEUE_FORMAT_AQL &&
++	    topo_dev->node_props.gfx_target_version >= 70000 &&
++	    topo_dev->node_props.gfx_target_version < 90000)
++		expected_queue_size = properties->queue_size / 2;
++	else
++		expected_queue_size = properties->queue_size;
++
+ 	vm = drm_priv_to_vm(pdd->drm_priv);
+ 	err = amdgpu_bo_reserve(vm->root.bo, false);
+ 	if (err)
+@@ -255,7 +265,7 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
+ 		goto out_err_unreserve;
+ 
+ 	err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
+-				   &properties->ring_bo, properties->queue_size);
++				   &properties->ring_bo, expected_queue_size);
+ 	if (err)
+ 		goto out_err_unreserve;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 1893c27746a523..8c61dee5ca0db1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -1276,13 +1276,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
+ 		break;
+ 	case IP_VERSION(12, 0, 0):
+ 	case IP_VERSION(12, 0, 1):
+-		if (domain == SVM_RANGE_VRAM_DOMAIN) {
+-			if (bo_node != node)
+-				mapping_flags |= AMDGPU_VM_MTYPE_NC;
+-		} else {
+-			mapping_flags |= coherent ?
+-				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+-		}
++		mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ 		break;
+ 	default:
+ 		mapping_flags |= coherent ?
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 0688a428ee4f7d..d9a3917d207e93 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1720,7 +1720,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_
+ 	}
+ 	if (quirk_entries.support_edp0_on_dp1) {
+ 		init_data->flags.support_edp0_on_dp1 = true;
+-		drm_info(dev, "aux_hpd_discon_quirk attached\n");
++		drm_info(dev, "support_edp0_on_dp1 attached\n");
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+index bf636b28e3e16e..6e2fce329d7382 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+@@ -69,5 +69,16 @@ bool should_use_dmub_lock(struct dc_link *link)
+ 	if (link->replay_settings.replay_feature_enabled)
+ 		return true;
+ 
++	/* only use HW lock for PSR1 on single eDP */
++	if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
++		struct dc_link *edp_links[MAX_NUM_EDP];
++		int edp_num;
++
++		dc_get_edp_links(link->dc, edp_links, &edp_num);
++
++		if (edp_num == 1)
++			return true;
++	}
++
+ 	return false;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 0fa6fbee197899..bfdfba676025e7 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -2493,6 +2493,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
+ 		case IP_VERSION(11, 0, 1):
+ 		case IP_VERSION(11, 0, 2):
+ 		case IP_VERSION(11, 0, 3):
++		case IP_VERSION(12, 0, 0):
++		case IP_VERSION(12, 0, 1):
+ 			*states = ATTR_STATE_SUPPORTED;
+ 			break;
+ 		default:
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+index 9ec53431f2c32d..e98a6a2f3e6acc 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+@@ -1206,16 +1206,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
+ 							 PP_OD_FEATURE_GFXCLK_BIT))
+ 			break;
+ 
+-		PPTable_t *pptable = smu->smu_table.driver_pptable;
+-		const OverDriveLimits_t * const overdrive_upperlimits =
+-					&pptable->SkuTable.OverDriveLimitsBasicMax;
+-		const OverDriveLimits_t * const overdrive_lowerlimits =
+-					&pptable->SkuTable.OverDriveLimitsBasicMin;
+-
+ 		size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
+-		size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
+-					overdrive_lowerlimits->GfxclkFoffset,
+-					overdrive_upperlimits->GfxclkFoffset);
++		size += sysfs_emit_at(buf, size, "%dMhz\n",
++					od_table->OverDriveTable.GfxclkFoffset);
+ 		break;
+ 
+ 	case SMU_OD_MCLK:
+@@ -1349,13 +1342,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
+ 		size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ 
+ 		if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+-			smu_v14_0_2_get_od_setting_limits(smu,
+-							  PP_OD_FEATURE_GFXCLK_FMIN,
+-							  &min_value,
+-							  NULL);
+ 			smu_v14_0_2_get_od_setting_limits(smu,
+ 							  PP_OD_FEATURE_GFXCLK_FMAX,
+-							  NULL,
++							  &min_value,
+ 							  &max_value);
+ 			size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
+ 					      min_value, max_value);
+@@ -1639,6 +1628,39 @@ static void smu_v14_0_2_get_unique_id(struct smu_context *smu)
+ 	adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
+ }
+ 
++static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
++					 uint32_t *speed)
++{
++	int ret;
++
++	if (!speed)
++		return -EINVAL;
++
++	ret = smu_v14_0_2_get_smu_metrics_data(smu,
++					       METRICS_CURR_FANPWM,
++					       speed);
++	if (ret) {
++		dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
++		return ret;
++	}
++
++	/* Convert the PMFW output which is in percent to pwm(255) based */
++	*speed = min(*speed * 255 / 100, (uint32_t)255);
++
++	return 0;
++}
++
++static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
++					 uint32_t *speed)
++{
++	if (!speed)
++		return -EINVAL;
++
++	return smu_v14_0_2_get_smu_metrics_data(smu,
++						METRICS_CURR_FANSPEED,
++						speed);
++}
++
+ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
+ 				       uint32_t *current_power_limit,
+ 				       uint32_t *default_power_limit,
+@@ -2429,36 +2451,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
+ 			return -ENOTSUPP;
+ 		}
+ 
+-		for (i = 0; i < size; i += 2) {
+-			if (i + 2 > size) {
+-				dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+-				return -EINVAL;
+-			}
+-
+-			switch (input[i]) {
+-			case 1:
+-				smu_v14_0_2_get_od_setting_limits(smu,
+-								  PP_OD_FEATURE_GFXCLK_FMAX,
+-								  &minimum,
+-								  &maximum);
+-				if (input[i + 1] < minimum ||
+-				    input[i + 1] > maximum) {
+-					dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
+-						input[i + 1], minimum, maximum);
+-					return -EINVAL;
+-				}
+-
+-				od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
+-				od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+-				break;
++		if (size != 1) {
++			dev_info(adev->dev, "invalid number of input parameters %d\n", size);
++			return -EINVAL;
++		}
+ 
+-			default:
+-				dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+-				dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+-				return -EINVAL;
+-			}
++		smu_v14_0_2_get_od_setting_limits(smu,
++						  PP_OD_FEATURE_GFXCLK_FMAX,
++						  &minimum,
++						  &maximum);
++		if (input[0] < minimum ||
++		    input[0] > maximum) {
++			dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
++				 minimum, maximum);
++			return -EINVAL;
+ 		}
+ 
++		od_table->OverDriveTable.GfxclkFoffset = input[0];
++		od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ 		break;
+ 
+ 	case PP_OD_EDIT_MCLK_VDDC_TABLE:
+@@ -2817,6 +2827,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
+ 	.set_performance_level = smu_v14_0_set_performance_level,
+ 	.gfx_off_control = smu_v14_0_gfx_off_control,
+ 	.get_unique_id = smu_v14_0_2_get_unique_id,
++	.get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
++	.get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
+ 	.get_power_limit = smu_v14_0_2_get_power_limit,
+ 	.set_power_limit = smu_v14_0_2_set_power_limit,
+ 	.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
+diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
+index d1871af967d4af..2355a78e1b69d6 100644
+--- a/drivers/gpu/drm/radeon/radeon_vce.c
++++ b/drivers/gpu/drm/radeon/radeon_vce.c
+@@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
+ {
+ 	int session_idx = -1;
+ 	bool destroyed = false, created = false, allocated = false;
+-	uint32_t tmp, handle = 0;
++	uint32_t tmp = 0, handle = 0;
+ 	uint32_t *size = &tmp;
+ 	int i, r = 0;
+ 
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index a75eede8bf8dab..002057be0d84a2 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -259,9 +259,16 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
+ 		struct drm_sched_fence *s_fence = job->s_fence;
+ 
+ 		dma_fence_get(&s_fence->finished);
+-		if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
+-					   drm_sched_entity_kill_jobs_cb))
++		if (!prev ||
++		    dma_fence_add_callback(prev, &job->finish_cb,
++					   drm_sched_entity_kill_jobs_cb)) {
++			/*
++			 * Adding callback above failed.
++			 * dma_fence_put() checks for NULL.
++			 */
++			dma_fence_put(prev);
+ 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
++		}
+ 
+ 		prev = &s_fence->finished;
+ 	}
+diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
+index 4f935f1d50a943..3066cfdb054cc0 100644
+--- a/drivers/gpu/drm/v3d/v3d_sched.c
++++ b/drivers/gpu/drm/v3d/v3d_sched.c
+@@ -319,11 +319,15 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
+ 	struct drm_device *dev = &v3d->drm;
+ 	struct dma_fence *fence;
+ 
++	if (unlikely(job->base.base.s_fence->finished.error))
++		return NULL;
++
++	v3d->tfu_job = job;
++
+ 	fence = v3d_fence_create(v3d, V3D_TFU);
+ 	if (IS_ERR(fence))
+ 		return NULL;
+ 
+-	v3d->tfu_job = job;
+ 	if (job->base.irq_fence)
+ 		dma_fence_put(job->base.irq_fence);
+ 	job->base.irq_fence = dma_fence_get(fence);
+@@ -361,6 +365,9 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
+ 	struct dma_fence *fence;
+ 	int i, csd_cfg0_reg;
+ 
++	if (unlikely(job->base.base.s_fence->finished.error))
++		return NULL;
++
+ 	v3d->csd_job = job;
+ 
+ 	v3d_invalidate_caches(v3d);
+diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
+index 6e4be52306dfc9..d22269a230aa19 100644
+--- a/drivers/gpu/drm/xe/xe_bo.h
++++ b/drivers/gpu/drm/xe/xe_bo.h
+@@ -314,7 +314,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
+ 
+ #define i915_gem_object_flush_if_display(obj)		((void)(obj))
+ 
+-#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ /**
+  * xe_bo_is_mem_type - Whether the bo currently resides in the given
+  * TTM memory type
+@@ -329,4 +328,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
+ 	return bo->ttm.resource->mem_type == mem_type;
+ }
+ #endif
+-#endif
+diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
+index 68f309f5e98153..f3bf7d3157b479 100644
+--- a/drivers/gpu/drm/xe/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/xe_dma_buf.c
+@@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
+ 	 * 1) Avoid pinning in a placement not accessible to some importers.
+ 	 * 2) Pinning in VRAM requires PIN accounting which is a to-do.
+ 	 */
+-	if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
++	if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
+ 		drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
+index 710674ef40a973..3f23a7d91519fa 100644
+--- a/drivers/gpu/host1x/dev.c
++++ b/drivers/gpu/host1x/dev.c
+@@ -367,6 +367,10 @@ static bool host1x_wants_iommu(struct host1x *host1x)
+ 	return true;
+ }
+ 
++/*
++ * Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a
++ * valid paging domain.
++ */
+ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
+ {
+ 	struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
+@@ -391,6 +395,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
+ 	 * Similarly, if host1x is already attached to an IOMMU (via the DMA
+ 	 * API), don't try to attach again.
+ 	 */
++	if (domain && domain->type == IOMMU_DOMAIN_IDENTITY)
++		domain = NULL;
+ 	if (!host1x_wants_iommu(host) || domain)
+ 		return domain;
+ 
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 1d9ad25c89ae55..8c9cf08ad45e22 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -1048,23 +1048,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes,
+ 	return 0;
+ }
+ 
+-static irqreturn_t
+-omap_i2c_isr(int irq, void *dev_id)
+-{
+-	struct omap_i2c_dev *omap = dev_id;
+-	irqreturn_t ret = IRQ_HANDLED;
+-	u16 mask;
+-	u16 stat;
+-
+-	stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+-	mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
+-
+-	if (stat & mask)
+-		ret = IRQ_WAKE_THREAD;
+-
+-	return ret;
+-}
+-
+ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
+ {
+ 	u16 bits;
+@@ -1095,8 +1078,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
+ 		}
+ 
+ 		if (stat & OMAP_I2C_STAT_NACK) {
+-			err |= OMAP_I2C_STAT_NACK;
++			omap->cmd_err |= OMAP_I2C_STAT_NACK;
+ 			omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
++
++			if (!(stat & ~OMAP_I2C_STAT_NACK)) {
++				err = -EAGAIN;
++				break;
++			}
+ 		}
+ 
+ 		if (stat & OMAP_I2C_STAT_AL) {
+@@ -1472,7 +1460,7 @@ omap_i2c_probe(struct platform_device *pdev)
+ 				IRQF_NO_SUSPEND, pdev->name, omap);
+ 	else
+ 		r = devm_request_threaded_irq(&pdev->dev, omap->irq,
+-				omap_i2c_isr, omap_i2c_isr_thread,
++				NULL, omap_i2c_isr_thread,
+ 				IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ 				pdev->name, omap);
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 613b5fc70e13ea..7436ce55157972 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -1216,8 +1216,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
+ 			qp->path_mtu =
+ 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
+ 		}
+-		qp->modify_flags &=
+-			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
+ 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
+ 		if (qp->max_dest_rd_atomic < 1)
+ 			qp->max_dest_rd_atomic = 1;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+index 07779aeb75759d..a4deb45ec849fa 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+@@ -283,9 +283,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
+ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
+ 			 struct bnxt_qplib_ctx *ctx, int is_virtfn);
+ void bnxt_qplib_mark_qp_error(void *qp_handle);
++
+ static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
+ {
+ 	/* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
+-	return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
++	return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
+ }
+ #endif /* __BNXT_QPLIB_RCFW_H__ */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
+index 950c133d4220e7..6ee911f6885b54 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
++++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
+@@ -175,8 +175,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
+ 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ 		ida_destroy(&hr_dev->xrcd_ida.ida);
+ 
+-	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
++	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ 		ida_destroy(&hr_dev->srq_table.srq_ida.ida);
++		xa_destroy(&hr_dev->srq_table.xa);
++	}
+ 	hns_roce_cleanup_qp_table(hr_dev);
+ 	hns_roce_cleanup_cq_table(hr_dev);
+ 	ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index 4106423a1b399d..3a5c93c9fb3e66 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -537,5 +537,6 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
+ 
+ 	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
+ 		ida_destroy(&hr_dev->cq_table.bank[i].ida);
++	xa_destroy(&hr_dev->cq_table.array);
+ 	mutex_destroy(&hr_dev->cq_table.bank_mutex);
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index 605562122ecce2..ca0798224e565c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -1361,6 +1361,11 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ 	return ret;
+ }
+ 
++/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming
++ * the bt page size is not expanded by cal_best_bt_pg_sz()
++ */
++#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800
++
+ /* construct the base address table and link them by address hop config */
+ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
+ 			      struct hns_roce_hem_list *hem_list,
+@@ -1369,6 +1374,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
+ {
+ 	const struct hns_roce_buf_region *r;
+ 	int ofs, end;
++	int loop;
+ 	int unit;
+ 	int ret;
+ 	int i;
+@@ -1386,7 +1392,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
+ 			continue;
+ 
+ 		end = r->offset + r->count;
+-		for (ofs = r->offset; ofs < end; ofs += unit) {
++		for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) {
++			if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
++				cond_resched();
++
+ 			ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
+ 						    hem_list->mid_bt[i],
+ 						    &hem_list->btm_bt);
+@@ -1443,9 +1452,14 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
+ 	struct list_head *head = &hem_list->btm_bt;
+ 	struct hns_roce_hem_item *hem, *temp_hem;
+ 	void *cpu_base = NULL;
++	int loop = 1;
+ 	int nr = 0;
+ 
+ 	list_for_each_entry_safe(hem, temp_hem, head, sibling) {
++		if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
++			cond_resched();
++		loop++;
++
+ 		if (hem_list_page_is_in_range(hem, offset)) {
+ 			nr = offset - hem->start;
+ 			cpu_base = hem->addr + nr * BA_BYTE_LEN;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index ae24c81c9812d9..cf89a8db4f64cd 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -183,7 +183,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
+ 				  IB_DEVICE_RC_RNR_NAK_GEN;
+ 	props->max_send_sge = hr_dev->caps.max_sq_sg;
+ 	props->max_recv_sge = hr_dev->caps.max_rq_sg;
+-	props->max_sge_rd = 1;
++	props->max_sge_rd = hr_dev->caps.max_sq_sg;
+ 	props->max_cq = hr_dev->caps.num_cqs;
+ 	props->max_cqe = hr_dev->caps.max_cqes;
+ 	props->max_mr = hr_dev->caps.num_mtpts;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 9e2e76c5940636..8901c142c1b652 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -868,12 +868,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
+ 			    struct hns_roce_ib_create_qp *ucmd,
+ 			    struct hns_roce_ib_create_qp_resp *resp)
+ {
++	bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
+ 	struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
+ 		struct hns_roce_ucontext, ibucontext);
++	bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	int ret;
+ 
+-	if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
++	if (has_sdb) {
+ 		ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
+ 		if (ret) {
+ 			ibdev_err(ibdev,
+@@ -884,7 +886,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
+ 		hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
+ 	}
+ 
+-	if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
++	if (has_rdb) {
+ 		ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
+ 		if (ret) {
+ 			ibdev_err(ibdev,
+@@ -898,7 +900,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
+ 	return 0;
+ 
+ err_sdb:
+-	if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
++	if (has_sdb)
+ 		hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+ err_out:
+ 	return ret;
+@@ -1119,24 +1121,23 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ 						 ibucontext);
+ 		hr_qp->config = uctx->config;
+ 		ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
+-		if (ret)
++		if (ret) {
+ 			ibdev_err(ibdev,
+ 				  "failed to set user SQ size, ret = %d.\n",
+ 				  ret);
++			return ret;
++		}
+ 
+ 		ret = set_congest_param(hr_dev, hr_qp, ucmd);
+-		if (ret)
+-			return ret;
+ 	} else {
+ 		if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ 			hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
++		default_congest_type(hr_dev, hr_qp);
+ 		ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
+ 		if (ret)
+ 			ibdev_err(ibdev,
+ 				  "failed to set kernel SQ size, ret = %d.\n",
+ 				  ret);
+-
+-		default_congest_type(hr_dev, hr_qp);
+ 	}
+ 
+ 	return ret;
+@@ -1219,7 +1220,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ 				       min(udata->outlen, sizeof(resp)));
+ 		if (ret) {
+ 			ibdev_err(ibdev, "copy qp resp failed!\n");
+-			goto err_store;
++			goto err_flow_ctrl;
+ 		}
+ 	}
+ 
+@@ -1602,6 +1603,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
+ 	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
+ 		ida_destroy(&hr_dev->qp_table.bank[i].ida);
+ 	xa_destroy(&hr_dev->qp_table.dip_xa);
++	xa_destroy(&hr_dev->qp_table_xa);
+ 	mutex_destroy(&hr_dev->qp_table.bank_mutex);
+ 	mutex_destroy(&hr_dev->qp_table.scc_mutex);
+ }
+diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
+index 99036afb3aef0b..531a57f9ee7e8b 100644
+--- a/drivers/infiniband/hw/mlx5/ah.c
++++ b/drivers/infiniband/hw/mlx5/ah.c
+@@ -50,11 +50,12 @@ static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
+ 	return sport;
+ }
+ 
+-static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
++static int create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+ 			 struct rdma_ah_init_attr *init_attr)
+ {
+ 	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ 	enum ib_gid_type gid_type;
++	int rate_val;
+ 
+ 	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ 		const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+@@ -67,8 +68,10 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+ 		ah->av.tclass = grh->traffic_class;
+ 	}
+ 
+-	ah->av.stat_rate_sl =
+-		(mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
++	rate_val = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr));
++	if (rate_val < 0)
++		return rate_val;
++	ah->av.stat_rate_sl = rate_val << 4;
+ 
+ 	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ 		if (init_attr->xmit_slave)
+@@ -89,6 +92,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+ 		ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
+ 		ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
+ 	}
++
++	return 0;
+ }
+ 
+ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+@@ -121,8 +126,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ 			return err;
+ 	}
+ 
+-	create_ib_ah(dev, ah, init_attr);
+-	return 0;
++	return create_ib_ah(dev, ah, init_attr);
+ }
+ 
+ int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 1ba4a0c8726aed..e27478fe9456c9 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -38,10 +38,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
+ }
+ 
+ /* initialize rxe device parameters */
+-static void rxe_init_device_param(struct rxe_dev *rxe)
++static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
+ {
+-	struct net_device *ndev;
+-
+ 	rxe->max_inline_data			= RXE_MAX_INLINE_DATA;
+ 
+ 	rxe->attr.vendor_id			= RXE_VENDOR_ID;
+@@ -74,15 +72,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
+ 	rxe->attr.max_pkeys			= RXE_MAX_PKEYS;
+ 	rxe->attr.local_ca_ack_delay		= RXE_LOCAL_CA_ACK_DELAY;
+ 
+-	ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+-	if (!ndev)
+-		return;
+-
+ 	addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
+ 			ndev->dev_addr);
+ 
+-	dev_put(ndev);
+-
+ 	rxe->max_ucontext			= RXE_MAX_UCONTEXT;
+ }
+ 
+@@ -115,18 +107,13 @@ static void rxe_init_port_param(struct rxe_port *port)
+ /* initialize port state, note IB convention that HCA ports are always
+  * numbered from 1
+  */
+-static void rxe_init_ports(struct rxe_dev *rxe)
++static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev)
+ {
+ 	struct rxe_port *port = &rxe->port;
+-	struct net_device *ndev;
+ 
+ 	rxe_init_port_param(port);
+-	ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+-	if (!ndev)
+-		return;
+ 	addrconf_addr_eui48((unsigned char *)&port->port_guid,
+ 			    ndev->dev_addr);
+-	dev_put(ndev);
+ 	spin_lock_init(&port->port_lock);
+ }
+ 
+@@ -144,12 +131,12 @@ static void rxe_init_pools(struct rxe_dev *rxe)
+ }
+ 
+ /* initialize rxe device state */
+-static void rxe_init(struct rxe_dev *rxe)
++static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev)
+ {
+ 	/* init default device parameters */
+-	rxe_init_device_param(rxe);
++	rxe_init_device_param(rxe, ndev);
+ 
+-	rxe_init_ports(rxe);
++	rxe_init_ports(rxe, ndev);
+ 	rxe_init_pools(rxe);
+ 
+ 	/* init pending mmap list */
+@@ -184,7 +171,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+ int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
+ 			struct net_device *ndev)
+ {
+-	rxe_init(rxe);
++	rxe_init(rxe, ndev);
+ 	rxe_set_mtu(rxe, mtu);
+ 
+ 	return rxe_register_device(rxe, ibdev_name, ndev);
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index cdbd2edf4b2e7c..8c0853dca8b923 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -2499,8 +2499,10 @@ static int atmci_probe(struct platform_device *pdev)
+ 	/* Get MCI capabilities and set operations according to it */
+ 	atmci_get_cap(host);
+ 	ret = atmci_configure_dma(host);
+-	if (ret == -EPROBE_DEFER)
++	if (ret == -EPROBE_DEFER) {
++		clk_disable_unprepare(host->mck);
+ 		goto err_dma_probe_defer;
++	}
+ 	if (ret == 0) {
+ 		host->prepare_data = &atmci_prepare_data_dma;
+ 		host->submit_data = &atmci_submit_data_dma;
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index 031a4b514d16bd..9d9ddc2f6f70c1 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -503,8 +503,15 @@ static int sdhci_brcmstb_suspend(struct device *dev)
+ 	struct sdhci_host *host = dev_get_drvdata(dev);
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
++	int ret;
+ 
+ 	clk_disable_unprepare(priv->base_clk);
++	if (host->mmc->caps2 & MMC_CAP2_CQE) {
++		ret = cqhci_suspend(host->mmc);
++		if (ret)
++			return ret;
++	}
++
+ 	return sdhci_pltfm_suspend(dev);
+ }
+ 
+@@ -529,6 +536,9 @@ static int sdhci_brcmstb_resume(struct device *dev)
+ 			ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
+ 	}
+ 
++	if (host->mmc->caps2 & MMC_CAP2_CQE)
++		ret = cqhci_resume(host->mmc);
++
+ 	return ret;
+ }
+ #endif
+diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
+index ac1a860986df69..b080740bcb104f 100644
+--- a/drivers/net/can/flexcan/flexcan-core.c
++++ b/drivers/net/can/flexcan/flexcan-core.c
+@@ -2260,14 +2260,19 @@ static int __maybe_unused flexcan_suspend(struct device *device)
+ 
+ 			flexcan_chip_interrupts_disable(dev);
+ 
++			err = flexcan_transceiver_disable(priv);
++			if (err)
++				return err;
++
+ 			err = pinctrl_pm_select_sleep_state(device);
+ 			if (err)
+ 				return err;
+ 		}
+ 		netif_stop_queue(dev);
+ 		netif_device_detach(dev);
++
++		priv->can.state = CAN_STATE_SLEEPING;
+ 	}
+-	priv->can.state = CAN_STATE_SLEEPING;
+ 
+ 	return 0;
+ }
+@@ -2278,7 +2283,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
+ 	struct flexcan_priv *priv = netdev_priv(dev);
+ 	int err;
+ 
+-	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ 	if (netif_running(dev)) {
+ 		netif_device_attach(dev);
+ 		netif_start_queue(dev);
+@@ -2292,12 +2296,20 @@ static int __maybe_unused flexcan_resume(struct device *device)
+ 			if (err)
+ 				return err;
+ 
+-			err = flexcan_chip_start(dev);
++			err = flexcan_transceiver_enable(priv);
+ 			if (err)
+ 				return err;
+ 
++			err = flexcan_chip_start(dev);
++			if (err) {
++				flexcan_transceiver_disable(priv);
++				return err;
++			}
++
+ 			flexcan_chip_interrupts_enable(dev);
+ 		}
++
++		priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
+index df1a5d0b37b226..aa3df0d05b853b 100644
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -787,22 +787,14 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
+ }
+ 
+ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
+-					   u32 ch)
++					   u32 ch, u32 rule_entry)
+ {
+-	u32 cfg;
+-	int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
++	int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
++	u32 rule_entry_index = rule_entry % 16;
+ 	u32 ridx = ch + RCANFD_RFFIFO_IDX;
+ 
+-	if (ch == 0) {
+-		start = 0; /* Channel 0 always starts from 0th rule */
+-	} else {
+-		/* Get number of Channel 0 rules and adjust */
+-		cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
+-		start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
+-	}
+-
+ 	/* Enable write access to entry */
+-	page = RCANFD_GAFL_PAGENUM(start);
++	page = RCANFD_GAFL_PAGENUM(rule_entry);
+ 	rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
+ 			   (RCANFD_GAFLECTR_AFLPN(gpriv, page) |
+ 			    RCANFD_GAFLECTR_AFLDAE));
+@@ -818,13 +810,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
+ 		offset = RCANFD_C_GAFL_OFFSET;
+ 
+ 	/* Accept all IDs */
+-	rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
++	rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0);
+ 	/* IDE or RTR is not considered for matching */
+-	rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
++	rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0);
+ 	/* Any data length accepted */
+-	rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
++	rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0);
+ 	/* Place the msg in corresponding Rx FIFO entry */
+-	rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
++	rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index),
+ 			   RCANFD_GAFLP1_GAFLFDP(ridx));
+ 
+ 	/* Disable write access to page */
+@@ -1851,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
+ 	unsigned long channels_mask = 0;
+ 	int err, ch_irq, g_irq;
+ 	int g_err_irq, g_recc_irq;
++	u32 rule_entry = 0;
+ 	bool fdmode = true;			/* CAN FD only mode - default */
+ 	char name[9] = "channelX";
+ 	int i;
+@@ -2023,7 +2016,8 @@ static int rcar_canfd_probe(struct platform_device *pdev)
+ 		rcar_canfd_configure_tx(gpriv, ch);
+ 
+ 		/* Configure receive rules */
+-		rcar_canfd_configure_afl_rules(gpriv, ch);
++		rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
++		rule_entry += RCANFD_CHANNEL_NUMRULES;
+ 	}
+ 
+ 	/* Configure common interrupts */
+diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
+index 39a63b7313a46d..07406daf7c88ed 100644
+--- a/drivers/net/can/usb/ucan.c
++++ b/drivers/net/can/usb/ucan.c
+@@ -186,7 +186,7 @@ union ucan_ctl_payload {
+ 	 */
+ 	struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
+ 
+-	u8 raw[128];
++	u8 fw_str[128];
+ } __packed;
+ 
+ enum {
+@@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up,
+ 			       UCAN_USB_CTL_PIPE_TIMEOUT);
+ }
+ 
+-static int ucan_device_request_in(struct ucan_priv *up,
+-				  u8 cmd, u16 subcmd, u16 datalen)
++static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size)
+ {
+-	return usb_control_msg(up->udev,
+-			       usb_rcvctrlpipe(up->udev, 0),
+-			       cmd,
+-			       USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+-			       subcmd,
+-			       0,
+-			       up->ctl_msg_buffer,
+-			       datalen,
+-			       UCAN_USB_CTL_PIPE_TIMEOUT);
++	int ret;
++
++	ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0),
++			      UCAN_DEVICE_GET_FW_STRING,
++			      USB_DIR_IN | USB_TYPE_VENDOR |
++			      USB_RECIP_DEVICE,
++			      0, 0, fw_str, size - 1,
++			      UCAN_USB_CTL_PIPE_TIMEOUT);
++	if (ret > 0)
++		fw_str[ret] = '\0';
++	else
++		strscpy(fw_str, "unknown", size);
+ }
+ 
+ /* Parse the device information structure reported by the device and
+@@ -1314,7 +1316,6 @@ static int ucan_probe(struct usb_interface *intf,
+ 	u8 in_ep_addr;
+ 	u8 out_ep_addr;
+ 	union ucan_ctl_payload *ctl_msg_buffer;
+-	char firmware_str[sizeof(union ucan_ctl_payload) + 1];
+ 
+ 	udev = interface_to_usbdev(intf);
+ 
+@@ -1527,17 +1528,6 @@ static int ucan_probe(struct usb_interface *intf,
+ 	 */
+ 	ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
+ 
+-	/* just print some device information - if available */
+-	ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
+-				     sizeof(union ucan_ctl_payload));
+-	if (ret > 0) {
+-		/* copy string while ensuring zero termination */
+-		strscpy(firmware_str, up->ctl_msg_buffer->raw,
+-			sizeof(union ucan_ctl_payload) + 1);
+-	} else {
+-		strcpy(firmware_str, "unknown");
+-	}
+-
+ 	/* device is compatible, reset it */
+ 	ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ 	if (ret < 0)
+@@ -1555,7 +1545,10 @@ static int ucan_probe(struct usb_interface *intf,
+ 
+ 	/* initialisation complete, log device info */
+ 	netdev_info(up->netdev, "registered device\n");
+-	netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
++	ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str,
++			sizeof(up->ctl_msg_buffer->fw_str));
++	netdev_info(up->netdev, "firmware string: %s\n",
++		    up->ctl_msg_buffer->fw_str);
+ 
+ 	/* success */
+ 	return 0;
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index 0c2ba2fa88c466..36802e0a8b570f 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -131,9 +131,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
+ 	struct gdma_list_devices_resp resp = {};
+ 	struct gdma_general_req req = {};
+ 	struct gdma_dev_id dev;
+-	u32 i, max_num_devs;
++	int found_dev = 0;
+ 	u16 dev_type;
+ 	int err;
++	u32 i;
+ 
+ 	mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
+ 			     sizeof(resp));
+@@ -145,12 +146,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
+ 		return err ? err : -EPROTO;
+ 	}
+ 
+-	max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
+-
+-	for (i = 0; i < max_num_devs; i++) {
++	for (i = 0; i < GDMA_DEV_LIST_SIZE &&
++	     found_dev < resp.num_of_devs; i++) {
+ 		dev = resp.devs[i];
+ 		dev_type = dev.type;
+ 
++		/* Skip empty devices */
++		if (dev.as_uint32 == 0)
++			continue;
++
++		found_dev++;
++
+ 		/* HWC is already detected in mana_hwc_create_channel(). */
+ 		if (dev_type == GDMA_DEVICE_HWC)
+ 			continue;
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 3e090f87f97ebd..308a2b72a65de3 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2225,14 +2225,18 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+ {
+ 	struct device *dev = common->dev;
++	struct am65_cpsw_tx_chn *tx_chn;
+ 	int i, ret = 0;
+ 
+ 	for (i = 0; i < common->tx_ch_num; i++) {
+-		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
++		tx_chn = &common->tx_chns[i];
+ 
+ 		hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ 		tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
+ 
++		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
++				  am65_cpsw_nuss_tx_poll);
++
+ 		ret = devm_request_irq(dev, tx_chn->irq,
+ 				       am65_cpsw_nuss_tx_irq,
+ 				       IRQF_TRIGGER_HIGH,
+@@ -2242,19 +2246,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+ 				tx_chn->id, tx_chn->irq, ret);
+ 			goto err;
+ 		}
+-
+-		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+-				  am65_cpsw_nuss_tx_poll);
+ 	}
+ 
+ 	return 0;
+ 
+ err:
+-	for (--i ; i >= 0 ; i--) {
+-		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+-
+-		netif_napi_del(&tx_chn->napi_tx);
++	netif_napi_del(&tx_chn->napi_tx);
++	for (--i; i >= 0; i--) {
++		tx_chn = &common->tx_chns[i];
+ 		devm_free_irq(dev, tx_chn->irq, tx_chn);
++		netif_napi_del(&tx_chn->napi_tx);
+ 	}
+ 
+ 	return ret;
+@@ -2488,6 +2489,9 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 			     HRTIMER_MODE_REL_PINNED);
+ 		flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+ 
++		netif_napi_add(common->dma_ndev, &flow->napi_rx,
++			       am65_cpsw_nuss_rx_poll);
++
+ 		ret = devm_request_irq(dev, flow->irq,
+ 				       am65_cpsw_nuss_rx_irq,
+ 				       IRQF_TRIGGER_HIGH,
+@@ -2496,11 +2500,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 			dev_err(dev, "failure requesting rx %d irq %u, %d\n",
+ 				i, flow->irq, ret);
+ 			flow->irq = -EINVAL;
+-			goto err_flow;
++			goto err_request_irq;
+ 		}
+-
+-		netif_napi_add(common->dma_ndev, &flow->napi_rx,
+-			       am65_cpsw_nuss_rx_poll);
+ 	}
+ 
+ 	/* setup classifier to route priorities to flows */
+@@ -2508,11 +2509,14 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 
+ 	return 0;
+ 
++err_request_irq:
++	netif_napi_del(&flow->napi_rx);
++
+ err_flow:
+-	for (--i; i >= 0 ; i--) {
++	for (--i; i >= 0; i--) {
+ 		flow = &rx_chn->flows[i];
+-		netif_napi_del(&flow->napi_rx);
+ 		devm_free_irq(dev, flow->irq, flow);
++		netif_napi_del(&flow->napi_rx);
+ 	}
+ 
+ err:
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index cb11635a8d1209..6f0700d156e710 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -1555,6 +1555,7 @@ static int prueth_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	spin_lock_init(&prueth->vtbl_lock);
++	spin_lock_init(&prueth->stats_lock);
+ 	/* setup netdev interfaces */
+ 	if (eth0_node) {
+ 		ret = prueth_netdev_init(prueth, eth0_node);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+index 5473315ea20406..e456a11c5d4e38 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+@@ -297,6 +297,8 @@ struct prueth {
+ 	int default_vlan;
+ 	/** @vtbl_lock: Lock for vtbl in shared memory */
+ 	spinlock_t vtbl_lock;
++	/** @stats_lock: Lock for reading icssg stats */
++	spinlock_t stats_lock;
+ };
+ 
+ struct emac_tx_ts_response {
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
+index 8800bd3a8d074c..6f0edae38ea242 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
+@@ -26,6 +26,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
+ 	u32 val, reg;
+ 	int i;
+ 
++	spin_lock(&prueth->stats_lock);
++
+ 	for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
+ 		regmap_read(prueth->miig_rt,
+ 			    base + icssg_all_miig_stats[i].offset,
+@@ -51,6 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
+ 			emac->pa_stats[i] += val;
+ 		}
+ 	}
++
++	spin_unlock(&prueth->stats_lock);
+ }
+ 
+ void icssg_stats_work_handler(struct work_struct *work)
+diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
+index 4a5d73002a1a85..0e9e987f37dd84 100644
+--- a/drivers/net/phy/phy_link_topology.c
++++ b/drivers/net/phy/phy_link_topology.c
+@@ -73,7 +73,7 @@ int phy_link_topo_add_phy(struct net_device *dev,
+ 				      xa_limit_32b, &topo->next_phy_index,
+ 				      GFP_KERNEL);
+ 
+-	if (ret)
++	if (ret < 0)
+ 		goto err;
+ 
+ 	return 0;
+diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
+index 42ce41a2fe3a0c..ff76ea36835e53 100644
+--- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c
++++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
+@@ -221,7 +221,7 @@ static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = {
+ 	SEC_PD(T7_VI_CLK2,	0),
+ 	/* ETH is for ethernet online wakeup, and should be always on */
+ 	SEC_PD(T7_ETH,		GENPD_FLAG_ALWAYS_ON),
+-	SEC_PD(T7_ISP,		0),
++	TOP_PD(T7_ISP,		0, PWRC_T7_MIPI_ISP_ID),
+ 	SEC_PD(T7_MIPI_ISP,	0),
+ 	TOP_PD(T7_GDC,		0, PWRC_T7_NIC3_ID),
+ 	TOP_PD(T7_DEWARP,	0, PWRC_T7_NIC3_ID),
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 4bb2652740d001..1f4698d724bb78 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -2024,6 +2024,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
+ 
+ 		if (have_full_constraints()) {
+ 			r = dummy_regulator_rdev;
++			if (!r) {
++				ret = -EPROBE_DEFER;
++				goto out;
++			}
+ 			get_device(&r->dev);
+ 		} else {
+ 			dev_err(dev, "Failed to resolve %s-supply for %s\n",
+@@ -2041,6 +2045,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
+ 			goto out;
+ 		}
+ 		r = dummy_regulator_rdev;
++		if (!r) {
++			ret = -EPROBE_DEFER;
++			goto out;
++		}
+ 		get_device(&r->dev);
+ 	}
+ 
+@@ -2166,8 +2174,10 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
+ 			 * enabled, even if it isn't hooked up, and just
+ 			 * provide a dummy.
+ 			 */
+-			dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
+ 			rdev = dummy_regulator_rdev;
++			if (!rdev)
++				return ERR_PTR(-EPROBE_DEFER);
++			dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
+ 			get_device(&rdev->dev);
+ 			break;
+ 
+diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
+index 5b9b9e4e762d52..9f59889129abec 100644
+--- a/drivers/regulator/dummy.c
++++ b/drivers/regulator/dummy.c
+@@ -60,7 +60,7 @@ static struct platform_driver dummy_regulator_driver = {
+ 	.probe		= dummy_regulator_probe,
+ 	.driver		= {
+ 		.name		= "reg-dummy",
+-		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
++		.probe_type	= PROBE_FORCE_SYNCHRONOUS,
+ 	},
+ };
+ 
+diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
+index 5ea8887828c064..3ed8161d7d28ba 100644
+--- a/drivers/soc/imx/soc-imx8m.c
++++ b/drivers/soc/imx/soc-imx8m.c
+@@ -30,11 +30,9 @@
+ 
+ struct imx8_soc_data {
+ 	char *name;
+-	int (*soc_revision)(u32 *socrev);
++	int (*soc_revision)(u32 *socrev, u64 *socuid);
+ };
+ 
+-static u64 soc_uid;
+-
+ #ifdef CONFIG_HAVE_ARM_SMCCC
+ static u32 imx8mq_soc_revision_from_atf(void)
+ {
+@@ -51,24 +49,22 @@ static u32 imx8mq_soc_revision_from_atf(void)
+ static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
+ #endif
+ 
+-static int imx8mq_soc_revision(u32 *socrev)
++static int imx8mq_soc_revision(u32 *socrev, u64 *socuid)
+ {
+-	struct device_node *np;
++	struct device_node *np __free(device_node) =
++		of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
+ 	void __iomem *ocotp_base;
+ 	u32 magic;
+ 	u32 rev;
+ 	struct clk *clk;
+ 	int ret;
+ 
+-	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
+ 	if (!np)
+ 		return -EINVAL;
+ 
+ 	ocotp_base = of_iomap(np, 0);
+-	if (!ocotp_base) {
+-		ret = -EINVAL;
+-		goto err_iomap;
+-	}
++	if (!ocotp_base)
++		return -EINVAL;
+ 
+ 	clk = of_clk_get_by_name(np, NULL);
+ 	if (IS_ERR(clk)) {
+@@ -89,44 +85,39 @@ static int imx8mq_soc_revision(u32 *socrev)
+ 			rev = REV_B1;
+ 	}
+ 
+-	soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+-	soc_uid <<= 32;
+-	soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
++	*socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
++	*socuid <<= 32;
++	*socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ 
+ 	*socrev = rev;
+ 
+ 	clk_disable_unprepare(clk);
+ 	clk_put(clk);
+ 	iounmap(ocotp_base);
+-	of_node_put(np);
+ 
+ 	return 0;
+ 
+ err_clk:
+ 	iounmap(ocotp_base);
+-err_iomap:
+-	of_node_put(np);
+ 	return ret;
+ }
+ 
+-static int imx8mm_soc_uid(void)
++static int imx8mm_soc_uid(u64 *socuid)
+ {
++	struct device_node *np __free(device_node) =
++		of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
+ 	void __iomem *ocotp_base;
+-	struct device_node *np;
+ 	struct clk *clk;
+ 	int ret = 0;
+ 	u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ 		     IMX8MP_OCOTP_UID_OFFSET : 0;
+ 
+-	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
+ 	if (!np)
+ 		return -EINVAL;
+ 
+ 	ocotp_base = of_iomap(np, 0);
+-	if (!ocotp_base) {
+-		ret = -EINVAL;
+-		goto err_iomap;
+-	}
++	if (!ocotp_base)
++		return -EINVAL;
+ 
+ 	clk = of_clk_get_by_name(np, NULL);
+ 	if (IS_ERR(clk)) {
+@@ -136,47 +127,36 @@ static int imx8mm_soc_uid(void)
+ 
+ 	clk_prepare_enable(clk);
+ 
+-	soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
+-	soc_uid <<= 32;
+-	soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
++	*socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
++	*socuid <<= 32;
++	*socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+ 
+ 	clk_disable_unprepare(clk);
+ 	clk_put(clk);
+ 
+ err_clk:
+ 	iounmap(ocotp_base);
+-err_iomap:
+-	of_node_put(np);
+-
+ 	return ret;
+ }
+ 
+-static int imx8mm_soc_revision(u32 *socrev)
++static int imx8mm_soc_revision(u32 *socrev, u64 *socuid)
+ {
+-	struct device_node *np;
++	struct device_node *np __free(device_node) =
++		of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ 	void __iomem *anatop_base;
+-	int ret;
+ 
+-	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ 	if (!np)
+ 		return -EINVAL;
+ 
+ 	anatop_base = of_iomap(np, 0);
+-	if (!anatop_base) {
+-		ret = -EINVAL;
+-		goto err_iomap;
+-	}
++	if (!anatop_base)
++		return -EINVAL;
+ 
+ 	*socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+ 
+ 	iounmap(anatop_base);
+-	of_node_put(np);
+ 
+-	return imx8mm_soc_uid();
+-
+-err_iomap:
+-	of_node_put(np);
+-	return ret;
++	return imx8mm_soc_uid(socuid);
+ }
+ 
+ static const struct imx8_soc_data imx8mq_soc_data = {
+@@ -207,21 +187,34 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
+ 	{ }
+ };
+ 
+-#define imx8_revision(soc_rev) \
+-	soc_rev ? \
+-	kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf,  soc_rev & 0xf) : \
++#define imx8_revision(dev, soc_rev) \
++	(soc_rev) ? \
++	devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \
+ 	"unknown"
+ 
++static void imx8m_unregister_soc(void *data)
++{
++	soc_device_unregister(data);
++}
++
++static void imx8m_unregister_cpufreq(void *data)
++{
++	platform_device_unregister(data);
++}
++
+ static int imx8m_soc_probe(struct platform_device *pdev)
+ {
+ 	struct soc_device_attribute *soc_dev_attr;
+-	struct soc_device *soc_dev;
++	struct platform_device *cpufreq_dev;
++	const struct imx8_soc_data *data;
++	struct device *dev = &pdev->dev;
+ 	const struct of_device_id *id;
++	struct soc_device *soc_dev;
+ 	u32 soc_rev = 0;
+-	const struct imx8_soc_data *data;
++	u64 soc_uid = 0;
+ 	int ret;
+ 
+-	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
++	soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL);
+ 	if (!soc_dev_attr)
+ 		return -ENOMEM;
+ 
+@@ -229,58 +222,52 @@ static int imx8m_soc_probe(struct platform_device *pdev)
+ 
+ 	ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
+ 	if (ret)
+-		goto free_soc;
++		return ret;
+ 
+ 	id = of_match_node(imx8_soc_match, of_root);
+-	if (!id) {
+-		ret = -ENODEV;
+-		goto free_soc;
+-	}
++	if (!id)
++		return -ENODEV;
+ 
+ 	data = id->data;
+ 	if (data) {
+ 		soc_dev_attr->soc_id = data->name;
+ 		if (data->soc_revision) {
+-			ret = data->soc_revision(&soc_rev);
++			ret = data->soc_revision(&soc_rev, &soc_uid);
+ 			if (ret)
+-				goto free_soc;
++				return ret;
+ 		}
+ 	}
+ 
+-	soc_dev_attr->revision = imx8_revision(soc_rev);
+-	if (!soc_dev_attr->revision) {
+-		ret = -ENOMEM;
+-		goto free_soc;
+-	}
++	soc_dev_attr->revision = imx8_revision(dev, soc_rev);
++	if (!soc_dev_attr->revision)
++		return -ENOMEM;
+ 
+-	soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
+-	if (!soc_dev_attr->serial_number) {
+-		ret = -ENOMEM;
+-		goto free_rev;
+-	}
++	soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX", soc_uid);
++	if (!soc_dev_attr->serial_number)
++		return -ENOMEM;
+ 
+ 	soc_dev = soc_device_register(soc_dev_attr);
+-	if (IS_ERR(soc_dev)) {
+-		ret = PTR_ERR(soc_dev);
+-		goto free_serial_number;
+-	}
++	if (IS_ERR(soc_dev))
++		return PTR_ERR(soc_dev);
++
++	ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev);
++	if (ret)
++		return ret;
+ 
+ 	pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
+ 		soc_dev_attr->revision);
+ 
+-	if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
+-		platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
++	if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) {
++		cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
++		if (IS_ERR(cpufreq_dev))
++			return dev_err_probe(dev, PTR_ERR(cpufreq_dev),
++					     "Failed to register imx-cpufreq-dev device\n");
++		ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev);
++		if (ret)
++			return ret;
++	}
+ 
+ 	return 0;
+-
+-free_serial_number:
+-	kfree(soc_dev_attr->serial_number);
+-free_rev:
+-	if (strcmp(soc_dev_attr->revision, "unknown"))
+-		kfree(soc_dev_attr->revision);
+-free_soc:
+-	kfree(soc_dev_attr);
+-	return ret;
+ }
+ 
+ static struct platform_driver imx8m_soc_driver = {
+diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
+index 328b6153b2be6c..71be378d2e43a5 100644
+--- a/drivers/soc/qcom/pdr_interface.c
++++ b/drivers/soc/qcom/pdr_interface.c
+@@ -75,7 +75,6 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
+ {
+ 	struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ 					      locator_hdl);
+-	struct pdr_service *pds;
+ 
+ 	mutex_lock(&pdr->lock);
+ 	/* Create a local client port for QMI communication */
+@@ -87,12 +86,7 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
+ 	mutex_unlock(&pdr->lock);
+ 
+ 	/* Service pending lookup requests */
+-	mutex_lock(&pdr->list_lock);
+-	list_for_each_entry(pds, &pdr->lookups, node) {
+-		if (pds->need_locator_lookup)
+-			schedule_work(&pdr->locator_work);
+-	}
+-	mutex_unlock(&pdr->list_lock);
++	schedule_work(&pdr->locator_work);
+ 
+ 	return 0;
+ }
+diff --git a/fs/libfs.c b/fs/libfs.c
+index b0f262223b5351..3cb49463a84969 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -492,7 +492,7 @@ offset_dir_lookup(struct dentry *parent, loff_t offset)
+ 		found = find_positive_dentry(parent, NULL, false);
+ 	else {
+ 		rcu_read_lock();
+-		child = mas_find(&mas, DIR_OFFSET_MAX);
++		child = mas_find_rev(&mas, DIR_OFFSET_MIN);
+ 		found = find_positive_dentry(parent, child, false);
+ 		rcu_read_unlock();
+ 	}
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 82290c92ba7a29..412d4da7422701 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -576,7 +576,8 @@ void netfs_write_collection_worker(struct work_struct *work)
+ 	trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
+ 
+ 	if (wreq->io_streams[1].active &&
+-	    wreq->io_streams[1].failed) {
++	    wreq->io_streams[1].failed &&
++	    ictx->ops->invalidate_cache) {
+ 		/* Cache write failure doesn't prevent writeback completion
+ 		 * unless we're in disconnected mode.
+ 		 */
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index dbe82cf23ee49c..3431b083f7d05c 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -557,10 +557,16 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
+ 	return p;
+ }
+ 
+-static inline void pde_set_flags(struct proc_dir_entry *pde)
++static void pde_set_flags(struct proc_dir_entry *pde)
+ {
+ 	if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
+ 		pde->flags |= PROC_ENTRY_PERMANENT;
++	if (pde->proc_ops->proc_read_iter)
++		pde->flags |= PROC_ENTRY_proc_read_iter;
++#ifdef CONFIG_COMPAT
++	if (pde->proc_ops->proc_compat_ioctl)
++		pde->flags |= PROC_ENTRY_proc_compat_ioctl;
++#endif
+ }
+ 
+ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
+@@ -624,6 +630,7 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
+ 	p->proc_ops = &proc_seq_ops;
+ 	p->seq_ops = ops;
+ 	p->state_size = state_size;
++	pde_set_flags(p);
+ 	return proc_register(parent, p);
+ }
+ EXPORT_SYMBOL(proc_create_seq_private);
+@@ -654,6 +661,7 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
+ 		return NULL;
+ 	p->proc_ops = &proc_single_ops;
+ 	p->single_show = show;
++	pde_set_flags(p);
+ 	return proc_register(parent, p);
+ }
+ EXPORT_SYMBOL(proc_create_single_data);
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 626ad7bd94f244..a3eb3b740f7664 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -656,13 +656,13 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ 
+ 	if (S_ISREG(inode->i_mode)) {
+ 		inode->i_op = de->proc_iops;
+-		if (de->proc_ops->proc_read_iter)
++		if (pde_has_proc_read_iter(de))
+ 			inode->i_fop = &proc_iter_file_ops;
+ 		else
+ 			inode->i_fop = &proc_reg_file_ops;
+ #ifdef CONFIG_COMPAT
+-		if (de->proc_ops->proc_compat_ioctl) {
+-			if (de->proc_ops->proc_read_iter)
++		if (pde_has_proc_compat_ioctl(de)) {
++			if (pde_has_proc_read_iter(de))
+ 				inode->i_fop = &proc_iter_file_ops_compat;
+ 			else
+ 				inode->i_fop = &proc_reg_file_ops_compat;
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index 87e4d628202520..4e0c5b57ffdbb8 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -85,6 +85,20 @@ static inline void pde_make_permanent(struct proc_dir_entry *pde)
+ 	pde->flags |= PROC_ENTRY_PERMANENT;
+ }
+ 
++static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde)
++{
++	return pde->flags & PROC_ENTRY_proc_read_iter;
++}
++
++static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde)
++{
++#ifdef CONFIG_COMPAT
++	return pde->flags & PROC_ENTRY_proc_compat_ioctl;
++#else
++	return false;
++#endif
++}
++
+ extern struct kmem_cache *proc_dir_entry_cache;
+ void pde_free(struct proc_dir_entry *pde);
+ 
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index da8ed72f335d99..109036e2227ca1 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -398,7 +398,9 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ 	if (num_aces <= 0)
+ 		return;
+ 
+-	if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
++	if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) /
++			(offsetof(struct smb_ace, sid) +
++			 offsetof(struct smb_sid, sub_auth) + sizeof(__le16)))
+ 		return;
+ 
+ 	ret = init_acl_state(&acl_state, num_aces);
+@@ -432,6 +434,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ 			offsetof(struct smb_sid, sub_auth);
+ 
+ 		if (end_of_acl - acl_base < acl_size ||
++		    ppace[i]->sid.num_subauth == 0 ||
+ 		    ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+ 		    (end_of_acl - acl_base <
+ 		     acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+diff --git a/include/linux/key.h b/include/linux/key.h
+index 074dca3222b967..ba05de8579ecc5 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -236,6 +236,7 @@ struct key {
+ #define KEY_FLAG_ROOT_CAN_INVAL	7	/* set if key can be invalidated by root without permission */
+ #define KEY_FLAG_KEEP		8	/* set if key should not be removed */
+ #define KEY_FLAG_UID_KEYRING	9	/* set if key is a user or user session keyring */
++#define KEY_FLAG_FINAL_PUT	10	/* set if final put has happened on key */
+ 
+ 	/* the key type and key description string
+ 	 * - the desc is used to match a key against search criteria
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 9b4a6ff03235bc..79974a99265fc2 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -88,6 +88,7 @@ enum ata_quirks {
+ 	__ATA_QUIRK_MAX_SEC_1024,	/* Limit max sects to 1024 */
+ 	__ATA_QUIRK_MAX_TRIM_128M,	/* Limit max trim size to 128M */
+ 	__ATA_QUIRK_NO_NCQ_ON_ATI,	/* Disable NCQ on ATI chipset */
++	__ATA_QUIRK_NO_LPM_ON_ATI,	/* Disable LPM on ATI chipset */
+ 	__ATA_QUIRK_NO_ID_DEV_LOG,	/* Identify device log missing */
+ 	__ATA_QUIRK_NO_LOG_DIR,		/* Do not read log directory */
+ 	__ATA_QUIRK_NO_FUA,		/* Do not use FUA */
+@@ -434,6 +435,7 @@ enum {
+ 	ATA_QUIRK_MAX_SEC_1024		= (1U << __ATA_QUIRK_MAX_SEC_1024),
+ 	ATA_QUIRK_MAX_TRIM_128M		= (1U << __ATA_QUIRK_MAX_TRIM_128M),
+ 	ATA_QUIRK_NO_NCQ_ON_ATI		= (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
++	ATA_QUIRK_NO_LPM_ON_ATI		= (1U << __ATA_QUIRK_NO_LPM_ON_ATI),
+ 	ATA_QUIRK_NO_ID_DEV_LOG		= (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
+ 	ATA_QUIRK_NO_LOG_DIR		= (1U << __ATA_QUIRK_NO_LOG_DIR),
+ 	ATA_QUIRK_NO_FUA		= (1U << __ATA_QUIRK_NO_FUA),
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
+index 0b2a8985444097..ea62201c74c402 100644
+--- a/include/linux/proc_fs.h
++++ b/include/linux/proc_fs.h
+@@ -20,10 +20,13 @@ enum {
+ 	 * If in doubt, ignore this flag.
+ 	 */
+ #ifdef MODULE
+-	PROC_ENTRY_PERMANENT = 0U,
++	PROC_ENTRY_PERMANENT		= 0U,
+ #else
+-	PROC_ENTRY_PERMANENT = 1U << 0,
++	PROC_ENTRY_PERMANENT		= 1U << 0,
+ #endif
++
++	PROC_ENTRY_proc_read_iter	= 1U << 1,
++	PROC_ENTRY_proc_compat_ioctl	= 1U << 2,
+ };
+ 
+ struct proc_ops {
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 5bb4eaa52e14cf..dd10e02bfc746e 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -683,7 +683,7 @@ enum {
+ #define HCI_ERROR_REMOTE_POWER_OFF	0x15
+ #define HCI_ERROR_LOCAL_HOST_TERM	0x16
+ #define HCI_ERROR_PAIRING_NOT_ALLOWED	0x18
+-#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE	0x1e
++#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE	0x1a
+ #define HCI_ERROR_INVALID_LL_PARAMS	0x1e
+ #define HCI_ERROR_UNSPECIFIED		0x1f
+ #define HCI_ERROR_ADVERTISING_TIMEOUT	0x3c
+diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
+index de47fa533b1504..6a0e83ac0fdb41 100644
+--- a/include/net/mana/gdma.h
++++ b/include/net/mana/gdma.h
+@@ -406,8 +406,6 @@ struct gdma_context {
+ 	struct gdma_dev		mana_ib;
+ };
+ 
+-#define MAX_NUM_GDMA_DEVICES	4
+-
+ static inline bool mana_gd_is_mana(struct gdma_dev *gd)
+ {
+ 	return gd->dev_id.type == GDMA_DEVICE_MANA;
+@@ -554,11 +552,15 @@ enum {
+ #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
+ #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
+ 
++/* Driver can handle holes (zeros) in the device list */
++#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
++
+ #define GDMA_DRV_CAP_FLAGS1 \
+ 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
+ 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
+ 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
+-	 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT)
++	 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
++	 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP)
+ 
+ #define GDMA_DRV_CAP_FLAGS2 0
+ 
+@@ -619,11 +621,12 @@ struct gdma_query_max_resources_resp {
+ }; /* HW DATA */
+ 
+ /* GDMA_LIST_DEVICES */
++#define GDMA_DEV_LIST_SIZE 64
+ struct gdma_list_devices_resp {
+ 	struct gdma_resp_hdr hdr;
+ 	u32 num_of_devs;
+ 	u32 reserved;
+-	struct gdma_dev_id devs[64];
++	struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
+ }; /* HW DATA */
+ 
+ /* GDMA_REGISTER_DEVICE */
+diff --git a/io_uring/net.c b/io_uring/net.c
+index f32311f6411338..7ea99e082e97e7 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -152,7 +152,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
+ 		if (iov)
+ 			kasan_mempool_poison_object(iov);
+ 		req->async_data = NULL;
+-		req->flags &= ~REQ_F_ASYNC_DATA;
++		req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
+ 	}
+ }
+ 
+@@ -447,7 +447,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ static void io_req_msg_cleanup(struct io_kiocb *req,
+ 			       unsigned int issue_flags)
+ {
+-	req->flags &= ~REQ_F_NEED_CLEANUP;
+ 	io_netmsg_recycle(req, issue_flags);
+ }
+ 
+@@ -1428,6 +1427,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
+ 	 */
+ 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ 		io_notif_flush(zc->notif);
++		zc->notif = NULL;
+ 		io_req_msg_cleanup(req, 0);
+ 	}
+ 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
+@@ -1488,6 +1488,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
+ 	 */
+ 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ 		io_notif_flush(sr->notif);
++		sr->notif = NULL;
+ 		io_req_msg_cleanup(req, 0);
+ 	}
+ 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
+diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
+index 5b4e6d3bf7bcca..b8fe0b3d0ffb69 100644
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -584,6 +584,22 @@ int dma_direct_supported(struct device *dev, u64 mask)
+ 	return mask >= phys_to_dma_unencrypted(dev, min_mask);
+ }
+ 
++static const struct bus_dma_region *dma_find_range(struct device *dev,
++						   unsigned long start_pfn)
++{
++	const struct bus_dma_region *m;
++
++	for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
++		unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
++
++		if (start_pfn >= cpu_start_pfn &&
++		    start_pfn - cpu_start_pfn < PFN_DOWN(m->size))
++			return m;
++	}
++
++	return NULL;
++}
++
+ /*
+  * To check whether all ram resource ranges are covered by dma range map
+  * Returns 0 when further check is needed
+@@ -593,20 +609,12 @@ static int check_ram_in_range_map(unsigned long start_pfn,
+ 				  unsigned long nr_pages, void *data)
+ {
+ 	unsigned long end_pfn = start_pfn + nr_pages;
+-	const struct bus_dma_region *bdr = NULL;
+-	const struct bus_dma_region *m;
+ 	struct device *dev = data;
+ 
+ 	while (start_pfn < end_pfn) {
+-		for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
+-			unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
++		const struct bus_dma_region *bdr;
+ 
+-			if (start_pfn >= cpu_start_pfn &&
+-			    start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
+-				bdr = m;
+-				break;
+-			}
+-		}
++		bdr = dma_find_range(dev, start_pfn);
+ 		if (!bdr)
+ 			return 1;
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 1f817d0c5d2d0e..e9bb1b4c58421f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -8919,7 +8919,7 @@ void sched_release_group(struct task_group *tg)
+ 	spin_unlock_irqrestore(&task_group_lock, flags);
+ }
+ 
+-static struct task_group *sched_get_task_group(struct task_struct *tsk)
++static void sched_change_group(struct task_struct *tsk)
+ {
+ 	struct task_group *tg;
+ 
+@@ -8931,13 +8931,7 @@ static struct task_group *sched_get_task_group(struct task_struct *tsk)
+ 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
+ 			  struct task_group, css);
+ 	tg = autogroup_task_group(tsk, tg);
+-
+-	return tg;
+-}
+-
+-static void sched_change_group(struct task_struct *tsk, struct task_group *group)
+-{
+-	tsk->sched_task_group = group;
++	tsk->sched_task_group = tg;
+ 
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ 	if (tsk->sched_class->task_change_group)
+@@ -8958,20 +8952,11 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup)
+ {
+ 	int queued, running, queue_flags =
+ 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+-	struct task_group *group;
+ 	struct rq *rq;
+ 
+ 	CLASS(task_rq_lock, rq_guard)(tsk);
+ 	rq = rq_guard.rq;
+ 
+-	/*
+-	 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
+-	 * group changes.
+-	 */
+-	group = sched_get_task_group(tsk);
+-	if (group == tsk->sched_task_group)
+-		return;
+-
+ 	update_rq_clock(rq);
+ 
+ 	running = task_current(rq, tsk);
+@@ -8982,7 +8967,7 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup)
+ 	if (running)
+ 		put_prev_task(rq, tsk);
+ 
+-	sched_change_group(tsk, group);
++	sched_change_group(tsk);
+ 	if (!for_autogroup)
+ 		scx_cgroup_move_task(tsk);
+ 
+diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
+index 99048c33038223..4acdab16579390 100644
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -889,13 +889,8 @@ static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mo
+ 
+ 	if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
+ 		data->tpoint = tp;
+-		if (!data->mod) {
++		if (!data->mod)
+ 			data->mod = mod;
+-			if (!try_module_get(data->mod)) {
+-				data->tpoint = NULL;
+-				data->mod = NULL;
+-			}
+-		}
+ 	}
+ }
+ 
+@@ -907,13 +902,7 @@ static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
+ 		data->tpoint = tp;
+ }
+ 
+-/*
+- * Find a tracepoint from kernel and module. If the tracepoint is in a module,
+- * this increments the module refcount to prevent unloading until the
+- * trace_fprobe is registered to the list. After registering the trace_fprobe
+- * on the trace_fprobe list, the module refcount is decremented because
+- * tracepoint_probe_module_cb will handle it.
+- */
++/* Find a tracepoint from kernel and module. */
+ static struct tracepoint *find_tracepoint(const char *tp_name,
+ 					  struct module **tp_mod)
+ {
+@@ -942,6 +931,7 @@ static void reenable_trace_fprobe(struct trace_fprobe *tf)
+ 	}
+ }
+ 
++/* Find a tracepoint from specified module. */
+ static struct tracepoint *find_tracepoint_in_module(struct module *mod,
+ 						    const char *tp_name)
+ {
+@@ -977,10 +967,13 @@ static int __tracepoint_probe_module_cb(struct notifier_block *self,
+ 					reenable_trace_fprobe(tf);
+ 			}
+ 		} else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
+-			tracepoint_probe_unregister(tf->tpoint,
++			unregister_fprobe(&tf->fp);
++			if (trace_fprobe_is_tracepoint(tf)) {
++				tracepoint_probe_unregister(tf->tpoint,
+ 					tf->tpoint->probestub, NULL);
+-			tf->tpoint = NULL;
+-			tf->mod = NULL;
++				tf->tpoint = TRACEPOINT_STUB;
++				tf->mod = NULL;
++			}
+ 		}
+ 	}
+ 	mutex_unlock(&event_mutex);
+@@ -1174,6 +1167,11 @@ static int __trace_fprobe_create(int argc, const char *argv[])
+ 	if (is_tracepoint) {
+ 		ctx.flags |= TPARG_FL_TPOINT;
+ 		tpoint = find_tracepoint(symbol, &tp_mod);
++		/* lock module until register this tprobe. */
++		if (tp_mod && !try_module_get(tp_mod)) {
++			tpoint = NULL;
++			tp_mod = NULL;
++		}
+ 		if (tpoint) {
+ 			ctx.funcname = kallsyms_lookup(
+ 				(unsigned long)tpoint->probestub,
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 05adf0392625da..3c37ad6c598bbb 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1966,8 +1966,19 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
+ 
+ 		if (err == -EEXIST)
+ 			goto repeat;
+-		if (err)
++		if (err) {
++			/*
++			 * When NOWAIT I/O fails to allocate folios this could
++			 * be due to a nonblocking memory allocation and not
++			 * because the system actually is out of memory.
++			 * Return -EAGAIN so that there caller retries in a
++			 * blocking fashion instead of propagating -ENOMEM
++			 * to the application.
++			 */
++			if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM)
++				err = -EAGAIN;
+ 			return ERR_PTR(err);
++		}
+ 		/*
+ 		 * filemap_add_folio locks the page, and for mmap
+ 		 * we expect an unlocked page.
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index f127b61f04a825..40ac11e294231e 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3224,7 +3224,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ 				folio_account_cleaned(tail,
+ 					inode_to_wb(folio->mapping->host));
+ 			__filemap_remove_folio(tail, NULL);
+-			folio_put(tail);
++			folio_put_refs(tail, folio_nr_pages(tail));
+ 		} else if (!PageAnon(page)) {
+ 			__xa_store(&folio->mapping->i_pages, head[i].index,
+ 					head + i, 0);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index ae1d184d035a4d..2d1e402f06f22a 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1882,9 +1882,18 @@ void drain_all_stock(struct mem_cgroup *root_memcg)
+ static int memcg_hotplug_cpu_dead(unsigned int cpu)
+ {
+ 	struct memcg_stock_pcp *stock;
++	struct obj_cgroup *old;
++	unsigned long flags;
+ 
+ 	stock = &per_cpu(memcg_stock, cpu);
++
++	/* drain_obj_stock requires stock_lock */
++	local_lock_irqsave(&memcg_stock.stock_lock, flags);
++	old = drain_obj_stock(stock);
++	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
++
+ 	drain_stock(stock);
++	obj_cgroup_put(old);
+ 
+ 	return 0;
+ }
+diff --git a/mm/migrate.c b/mm/migrate.c
+index dfa24e41e8f956..25e7438af968a4 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -526,15 +526,13 @@ static int __folio_migrate_mapping(struct address_space *mapping,
+ 	if (folio_test_anon(folio) && folio_test_large(folio))
+ 		mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
+ 	folio_ref_add(newfolio, nr); /* add cache reference */
+-	if (folio_test_swapbacked(folio)) {
++	if (folio_test_swapbacked(folio))
+ 		__folio_set_swapbacked(newfolio);
+-		if (folio_test_swapcache(folio)) {
+-			folio_set_swapcache(newfolio);
+-			newfolio->private = folio_get_private(folio);
+-		}
++	if (folio_test_swapcache(folio)) {
++		folio_set_swapcache(newfolio);
++		newfolio->private = folio_get_private(folio);
+ 		entries = nr;
+ 	} else {
+-		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
+ 		entries = 1;
+ 	}
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index e0a77fe1b6300d..fd4e0e1cd65e43 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -7094,7 +7094,7 @@ static inline bool has_unaccepted_memory(void)
+ 
+ static bool cond_accept_memory(struct zone *zone, unsigned int order)
+ {
+-	long to_accept;
++	long to_accept, wmark;
+ 	bool ret = false;
+ 
+ 	if (!has_unaccepted_memory())
+@@ -7103,8 +7103,18 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
+ 	if (list_empty(&zone->unaccepted_pages))
+ 		return false;
+ 
++	wmark = promo_wmark_pages(zone);
++
++	/*
++	 * Watermarks have not been initialized yet.
++	 *
++	 * Accepting one MAX_ORDER page to ensure progress.
++	 */
++	if (!wmark)
++		return try_to_accept_memory_one(zone);
++
+ 	/* How much to accept to get to promo watermark? */
+-	to_accept = promo_wmark_pages(zone) -
++	to_accept = wmark -
+ 		    (zone_page_state(zone, NR_FREE_PAGES) -
+ 		    __zone_watermark_unusable_free(zone, order, 0) -
+ 		    zone_page_state(zone, NR_UNACCEPTED));
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index ffef658862db15..a948dd47c3f347 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -181,6 +181,7 @@ static void
+ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ {
+ 	struct net_device *dev = skb->dev;
++	unsigned int len = skb->len;
+ 
+ 	ATM_SKB(skb)->vcc = vcc;
+ 	atm_account_tx(vcc, skb);
+@@ -191,7 +192,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ 	}
+ 
+ 	dev->stats.tx_packets++;
+-	dev->stats.tx_bytes += skb->len;
++	dev->stats.tx_bytes += len;
+ }
+ 
+ static void lec_tx_timeout(struct net_device *dev, unsigned int txqueue)
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index 74b49c35ddc14d..209180b4c26817 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -324,8 +324,7 @@ batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+ 	/* check if there is enough space for the optional TVLV */
+ 	next_buff_pos += ntohs(ogm_packet->tvlv_len);
+ 
+-	return (next_buff_pos <= packet_len) &&
+-	       (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
++	return next_buff_pos <= packet_len;
+ }
+ 
+ /* send a batman ogm to a given interface */
+diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
+index e503ee0d896bd5..8f89ffe6020ced 100644
+--- a/net/batman-adv/bat_v_ogm.c
++++ b/net/batman-adv/bat_v_ogm.c
+@@ -839,8 +839,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+ 	/* check if there is enough space for the optional TVLV */
+ 	next_buff_pos += ntohs(ogm2_packet->tvlv_len);
+ 
+-	return (next_buff_pos <= packet_len) &&
+-	       (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
++	return next_buff_pos <= packet_len;
+ }
+ 
+ /**
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index 50cfec8ccac4f7..3c29778171c581 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -825,11 +825,16 @@ static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
+ 					 unsigned long hdr_len,
+ 					 unsigned long len, int nb)
+ {
++	struct sk_buff *skb;
++
+ 	/* Note that we must allocate using GFP_ATOMIC here as
+ 	 * this function is called originally from netdev hard xmit
+ 	 * function in atomic context.
+ 	 */
+-	return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
++	skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
++	if (!skb)
++		return ERR_PTR(-ENOMEM);
++	return skb;
+ }
+ 
+ static void chan_suspend_cb(struct l2cap_chan *chan)
+diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
+index 711cd3b4347a79..4417a18b3e951a 100644
+--- a/net/core/lwtunnel.c
++++ b/net/core/lwtunnel.c
+@@ -23,6 +23,8 @@
+ #include <net/ip6_fib.h>
+ #include <net/rtnh.h>
+ 
++#include "dev.h"
++
+ DEFINE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled);
+ EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_enabled);
+ 
+@@ -325,13 +327,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_cmp_encap);
+ 
+ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	struct dst_entry *dst = skb_dst(skb);
+ 	const struct lwtunnel_encap_ops *ops;
+ 	struct lwtunnel_state *lwtstate;
+-	int ret = -EINVAL;
++	struct dst_entry *dst;
++	int ret;
++
++	if (dev_xmit_recursion()) {
++		net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
++				     __func__);
++		ret = -ENETDOWN;
++		goto drop;
++	}
+ 
+-	if (!dst)
++	dst = skb_dst(skb);
++	if (!dst) {
++		ret = -EINVAL;
+ 		goto drop;
++	}
+ 	lwtstate = dst->lwtstate;
+ 
+ 	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+@@ -341,8 +353,11 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	ret = -EOPNOTSUPP;
+ 	rcu_read_lock();
+ 	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+-	if (likely(ops && ops->output))
++	if (likely(ops && ops->output)) {
++		dev_xmit_recursion_inc();
+ 		ret = ops->output(net, sk, skb);
++		dev_xmit_recursion_dec();
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (ret == -EOPNOTSUPP)
+@@ -359,13 +374,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_output);
+ 
+ int lwtunnel_xmit(struct sk_buff *skb)
+ {
+-	struct dst_entry *dst = skb_dst(skb);
+ 	const struct lwtunnel_encap_ops *ops;
+ 	struct lwtunnel_state *lwtstate;
+-	int ret = -EINVAL;
++	struct dst_entry *dst;
++	int ret;
++
++	if (dev_xmit_recursion()) {
++		net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
++				     __func__);
++		ret = -ENETDOWN;
++		goto drop;
++	}
+ 
+-	if (!dst)
++	dst = skb_dst(skb);
++	if (!dst) {
++		ret = -EINVAL;
+ 		goto drop;
++	}
+ 
+ 	lwtstate = dst->lwtstate;
+ 
+@@ -376,8 +401,11 @@ int lwtunnel_xmit(struct sk_buff *skb)
+ 	ret = -EOPNOTSUPP;
+ 	rcu_read_lock();
+ 	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+-	if (likely(ops && ops->xmit))
++	if (likely(ops && ops->xmit)) {
++		dev_xmit_recursion_inc();
+ 		ret = ops->xmit(skb);
++		dev_xmit_recursion_dec();
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (ret == -EOPNOTSUPP)
+@@ -394,13 +422,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_xmit);
+ 
+ int lwtunnel_input(struct sk_buff *skb)
+ {
+-	struct dst_entry *dst = skb_dst(skb);
+ 	const struct lwtunnel_encap_ops *ops;
+ 	struct lwtunnel_state *lwtstate;
+-	int ret = -EINVAL;
++	struct dst_entry *dst;
++	int ret;
+ 
+-	if (!dst)
++	if (dev_xmit_recursion()) {
++		net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
++				     __func__);
++		ret = -ENETDOWN;
+ 		goto drop;
++	}
++
++	dst = skb_dst(skb);
++	if (!dst) {
++		ret = -EINVAL;
++		goto drop;
++	}
+ 	lwtstate = dst->lwtstate;
+ 
+ 	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+@@ -410,8 +448,11 @@ int lwtunnel_input(struct sk_buff *skb)
+ 	ret = -EOPNOTSUPP;
+ 	rcu_read_lock();
+ 	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+-	if (likely(ops && ops->input))
++	if (likely(ops && ops->input)) {
++		dev_xmit_recursion_inc();
+ 		ret = ops->input(skb);
++		dev_xmit_recursion_dec();
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (ret == -EOPNOTSUPP)
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index c7f7ea61b524a2..8082cc6be4fc1b 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2301,6 +2301,7 @@ static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
+ static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
+ 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
+ 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
++	[NDTPA_QUEUE_LENBYTES]		= { .type = NLA_U32 },
+ 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
+ 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
+ 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
+diff --git a/net/devlink/core.c b/net/devlink/core.c
+index f49cd83f1955f5..7203c39532fcc3 100644
+--- a/net/devlink/core.c
++++ b/net/devlink/core.c
+@@ -117,7 +117,7 @@ static struct devlink_rel *devlink_rel_alloc(void)
+ 
+ 	err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel,
+ 			      xa_limit_32b, &next, GFP_KERNEL);
+-	if (err) {
++	if (err < 0) {
+ 		kfree(rel);
+ 		return ERR_PTR(err);
+ 	}
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 26cdb665747573..f7c17388ff6aaf 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3237,13 +3237,16 @@ static void add_v4_addrs(struct inet6_dev *idev)
+ 	struct in6_addr addr;
+ 	struct net_device *dev;
+ 	struct net *net = dev_net(idev->dev);
+-	int scope, plen;
++	int scope, plen, offset = 0;
+ 	u32 pflags = 0;
+ 
+ 	ASSERT_RTNL();
+ 
+ 	memset(&addr, 0, sizeof(struct in6_addr));
+-	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
++	/* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
++	if (idev->dev->addr_len == sizeof(struct in6_addr))
++		offset = sizeof(struct in6_addr) - 4;
++	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
+ 
+ 	if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
+ 		scope = IPV6_ADDR_COMPATv4;
+@@ -3554,13 +3557,7 @@ static void addrconf_gre_config(struct net_device *dev)
+ 		return;
+ 	}
+ 
+-	/* Generate the IPv6 link-local address using addrconf_addr_gen(),
+-	 * unless we have an IPv4 GRE device not bound to an IP address and
+-	 * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
+-	 * case). Such devices fall back to add_v4_addrs() instead.
+-	 */
+-	if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
+-	      idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
++	if (dev->type == ARPHRD_ETHER) {
+ 		addrconf_addr_gen(idev, true);
+ 		return;
+ 	}
+diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
+index 4215cebe7d85a9..647dd8417c6cf9 100644
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -339,7 +339,6 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
+ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL;
+-	struct in6_addr orig_daddr;
+ 	struct ioam6_lwt *ilwt;
+ 	int err = -EINVAL;
+ 	u32 pkt_cnt;
+@@ -354,8 +353,6 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	if (pkt_cnt % ilwt->freq.n >= ilwt->freq.k)
+ 		goto out;
+ 
+-	orig_daddr = ipv6_hdr(skb)->daddr;
+-
+ 	local_bh_disable();
+ 	cache_dst = dst_cache_get(&ilwt->cache);
+ 	local_bh_enable();
+@@ -424,7 +421,10 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 			goto drop;
+ 	}
+ 
+-	if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
++	/* avoid lwtunnel_output() reentry loop when destination is the same
++	 * after transformation (e.g., with the inline mode)
++	 */
++	if (dst->lwtstate != cache_dst->lwtstate) {
+ 		skb_dst_drop(skb);
+ 		skb_dst_set(skb, cache_dst);
+ 		return dst_output(net, sk, skb);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 2736dea77575b5..b393c37d24245c 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3644,7 +3644,8 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ 		in6_dev_put(idev);
+ 
+ 	if (err) {
+-		lwtstate_put(fib6_nh->fib_nh_lws);
++		fib_nh_common_release(&fib6_nh->nh_common);
++		fib6_nh->nh_common.nhc_pcpu_rth_output = NULL;
+ 		fib6_nh->fib_nh_lws = NULL;
+ 		netdev_put(dev, dev_tracker);
+ 	}
+@@ -3802,10 +3803,12 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ 	if (nh) {
+ 		if (rt->fib6_src.plen) {
+ 			NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
++			err = -EINVAL;
+ 			goto out_free;
+ 		}
+ 		if (!nexthop_get(nh)) {
+ 			NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
++			err = -ENOENT;
+ 			goto out_free;
+ 		}
+ 		rt->nh = nh;
+diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
+index a45bf17cb2a172..ae2da28f9dfb1c 100644
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -94,14 +94,23 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
+ }
+ 
+ static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
++				     struct in6_addr *oldip,
++				     const struct in6_addr *newip,
+ 				     __be16 *oldport, __be16 newport)
+ {
+-	struct tcphdr *th;
++	struct tcphdr *th = tcp_hdr(seg);
++
++	if (!ipv6_addr_equal(oldip, newip)) {
++		inet_proto_csum_replace16(&th->check, seg,
++					  oldip->s6_addr32,
++					  newip->s6_addr32,
++					  true);
++		*oldip = *newip;
++	}
+ 
+ 	if (*oldport == newport)
+ 		return;
+ 
+-	th = tcp_hdr(seg);
+ 	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
+ 	*oldport = newport;
+ }
+@@ -129,10 +138,10 @@ static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
+ 		th2 = tcp_hdr(seg);
+ 		iph2 = ipv6_hdr(seg);
+ 
+-		iph2->saddr = iph->saddr;
+-		iph2->daddr = iph->daddr;
+-		__tcpv6_gso_segment_csum(seg, &th2->source, th->source);
+-		__tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
++		__tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
++					 &th2->source, th->source);
++		__tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
++					 &th2->dest, th->dest);
+ 	}
+ 
+ 	return segs;
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index fd2de185bc939f..23949ae2a3a8db 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -651,6 +651,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ 	bool drop_other_suboptions = false;
+ 	unsigned int opt_size = *size;
++	struct mptcp_addr_info addr;
+ 	bool echo;
+ 	int len;
+ 
+@@ -659,7 +660,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 	 */
+ 	if (!mptcp_pm_should_add_signal(msk) ||
+ 	    (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) ||
+-	    !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr,
++	    !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &addr,
+ 		    &echo, &drop_other_suboptions))
+ 		return false;
+ 
+@@ -672,7 +673,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 	else if (opts->suboptions & OPTION_MPTCP_DSS)
+ 		return false;
+ 
+-	len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
++	len = mptcp_add_addr_len(addr.family, echo, !!addr.port);
+ 	if (remaining < len)
+ 		return false;
+ 
+@@ -689,6 +690,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 		opts->ahmac = 0;
+ 		*size -= opt_size;
+ 	}
++	opts->addr = addr;
+ 	opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
+ 	if (!echo) {
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX);
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index 0662d34b09ee78..87e865b9b83af9 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -106,7 +106,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ 		if (pool->unaligned)
+ 			pool->free_heads[i] = xskb;
+ 		else
+-			xp_init_xskb_addr(xskb, pool, i * pool->chunk_size);
++			xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size);
+ 	}
+ 
+ 	return pool;
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index e5722c95b8bb38..a30538a980cc7f 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -610,6 +610,40 @@ int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
+ }
+ EXPORT_SYMBOL_GPL(xfrm_output_resume);
+ 
++static int xfrm_dev_direct_output(struct sock *sk, struct xfrm_state *x,
++				  struct sk_buff *skb)
++{
++	struct dst_entry *dst = skb_dst(skb);
++	struct net *net = xs_net(x);
++	int err;
++
++	dst = skb_dst_pop(skb);
++	if (!dst) {
++		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
++		kfree_skb(skb);
++		return -EHOSTUNREACH;
++	}
++	skb_dst_set(skb, dst);
++	nf_reset_ct(skb);
++
++	err = skb_dst(skb)->ops->local_out(net, sk, skb);
++	if (unlikely(err != 1)) {
++		kfree_skb(skb);
++		return err;
++	}
++
++	/* In transport mode, network destination is
++	 * directly reachable, while in tunnel mode,
++	 * inner packet network may not be. In packet
++	 * offload type, HW is responsible for hard
++	 * header packet mangling so directly xmit skb
++	 * to netdevice.
++	 */
++	skb->dev = x->xso.dev;
++	__skb_push(skb, skb->dev->hard_header_len);
++	return dev_queue_xmit(skb);
++}
++
+ static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ 	return xfrm_output_resume(sk, skb, 1);
+@@ -729,6 +763,13 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
+ 			return -EHOSTUNREACH;
+ 		}
+ 
++		/* Exclusive direct xmit for tunnel mode, as
++		 * some filtering or matching rules may apply
++		 * in transport mode.
++		 */
++		if (x->props.mode == XFRM_MODE_TUNNEL)
++			return xfrm_dev_direct_output(sk, x, skb);
++
+ 		return xfrm_output_resume(sk, skb, 0);
+ 	}
+ 
+@@ -752,7 +793,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
+ 		skb->encapsulation = 1;
+ 
+ 		if (skb_is_gso(skb)) {
+-			if (skb->inner_protocol)
++			if (skb->inner_protocol && x->props.mode == XFRM_MODE_TUNNEL)
+ 				return xfrm_output_gso(net, sk, skb);
+ 
+ 			skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index 7d687b0962b146..f27223ea4578f1 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -218,8 +218,10 @@ static void key_garbage_collector(struct work_struct *work)
+ 		key = rb_entry(cursor, struct key, serial_node);
+ 		cursor = rb_next(cursor);
+ 
+-		if (refcount_read(&key->usage) == 0)
++		if (test_bit(KEY_FLAG_FINAL_PUT, &key->flags)) {
++			smp_mb(); /* Clobber key->user after FINAL_PUT seen. */
+ 			goto found_unreferenced_key;
++		}
+ 
+ 		if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
+ 			if (key->type == key_gc_dead_keytype) {
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 3d7d185019d30a..7198cd2ac3a3a5 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -658,6 +658,8 @@ void key_put(struct key *key)
+ 				key->user->qnbytes -= key->quotalen;
+ 				spin_unlock_irqrestore(&key->user->lock, flags);
+ 			}
++			smp_mb(); /* key->user before FINAL_PUT set. */
++			set_bit(KEY_FLAG_FINAL_PUT, &key->flags);
+ 			schedule_work(&key_gc_work);
+ 		}
+ 	}
+diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
+index eb896d30545b63..555d617c1f502a 100644
+--- a/tools/lib/subcmd/parse-options.c
++++ b/tools/lib/subcmd/parse-options.c
+@@ -807,7 +807,7 @@ static int option__cmp(const void *va, const void *vb)
+ static struct option *options__order(const struct option *opts)
+ {
+ 	int nr_opts = 0, nr_group = 0, nr_parent = 0, len;
+-	const struct option *o, *p = opts;
++	const struct option *o = NULL, *p = opts;
+ 	struct option *opt, *ordered = NULL, *group;
+ 
+ 	/* flatten the options that have parents */
+diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
+index c5797ad1d37b68..d86ca1554d6d0a 100755
+--- a/tools/testing/selftests/mm/run_vmtests.sh
++++ b/tools/testing/selftests/mm/run_vmtests.sh
+@@ -300,7 +300,9 @@ uffd_stress_bin=./uffd-stress
+ CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
+ # Hugetlb tests require source and destination huge pages. Pass in half
+ # the size of the free pages we have, which is used for *each*.
+-half_ufd_size_MB=$((freepgs / 2))
++# uffd-stress expects a region expressed in MiB, so we adjust
++# half_ufd_size_MB accordingly.
++half_ufd_size_MB=$(((freepgs * hpgsize_KB) / 1024 / 2))
+ CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
+ CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
+ CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-03-23 11:31 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-03-23 11:31 UTC (permalink / raw
  To: gentoo-commits

commit:     977bc8af5b6f751986ac594ef727aa6423f3dc58
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Mar 23 11:30:56 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Mar 23 11:30:56 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=977bc8af

Linux patch 6.12.20

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    28 +
 1019_linux-6.12.20.patch | 10202 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10230 insertions(+)

diff --git a/0000_README b/0000_README
index c53357bf..ecb0495f 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,34 @@ Patch:  1012_linux-6.12.13.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.13
 
+Patch:  1013_linux-6.12.14.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.14
+
+Patch:  1014_linux-6.12.15.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.15
+
+Patch:  1015_linux-6.12.16.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.16
+
+Patch:  1016_linux-6.12.17.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.17
+
+Patch:  1017_linux-6.12.18.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.18
+
+Patch:  1018_linux-6.12.19.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.19
+
+Patch:  1019_linux-6.12.20.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.20
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1019_linux-6.12.20.patch b/1019_linux-6.12.20.patch
new file mode 100644
index 00000000..aad096ee
--- /dev/null
+++ b/1019_linux-6.12.20.patch
@@ -0,0 +1,10202 @@
+diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
+index 2d107982c87bbe..ded0d0836aee0d 100644
+--- a/Documentation/rust/quick-start.rst
++++ b/Documentation/rust/quick-start.rst
+@@ -128,7 +128,7 @@ Rust standard library source
+ ****************************
+ 
+ The Rust standard library source is required because the build system will
+-cross-compile ``core`` and ``alloc``.
++cross-compile ``core``.
+ 
+ If ``rustup`` is being used, run::
+ 
+diff --git a/Makefile b/Makefile
+index 343c9f25433c7c..ca000bd227be66 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 19
++SUBLEVEL = 20
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index 4d7c46f50382e3..50c82187e60ec9 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ /*
+  * This is used to ensure we don't load something for the wrong architecture.
+  */
+-#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
++#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT))
+ 
+ /*
+  * These are used to set parameters in the core dumps.
+@@ -137,10 +137,6 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
+ 	: amask (AMASK_CIX) ? "ev6" : "ev67");	\
+ })
+ 
+-#define SET_PERSONALITY(EX)					\
+-	set_personality(((EX).e_flags & EF_ALPHA_32BIT)		\
+-	   ? PER_LINUX_32BIT : PER_LINUX)
+-
+ extern int alpha_l1i_cacheshape;
+ extern int alpha_l1d_cacheshape;
+ extern int alpha_l2_cacheshape;
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index 635f0a5f5bbdeb..02e8817a89212c 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -360,7 +360,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+ 
+ extern void paging_init(void);
+ 
+-/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
++/* We have our own get_unmapped_area */
+ #define HAVE_ARCH_UNMAPPED_AREA
+ 
+ #endif /* _ALPHA_PGTABLE_H */
+diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
+index 55bb1c09fd39d5..5dce5518a21119 100644
+--- a/arch/alpha/include/asm/processor.h
++++ b/arch/alpha/include/asm/processor.h
+@@ -8,23 +8,19 @@
+ #ifndef __ASM_ALPHA_PROCESSOR_H
+ #define __ASM_ALPHA_PROCESSOR_H
+ 
+-#include <linux/personality.h>	/* for ADDR_LIMIT_32BIT */
+-
+ /*
+  * We have a 42-bit user address space: 4TB user VM...
+  */
+ #define TASK_SIZE (0x40000000000UL)
+ 
+-#define STACK_TOP \
+-  (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
++#define STACK_TOP (0x00120000000UL)
+ 
+ #define STACK_TOP_MAX	0x00120000000UL
+ 
+ /* This decides where the kernel will search for a free chunk of vm
+  * space during mmap's.
+  */
+-#define TASK_UNMAPPED_BASE \
+-  ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
++#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
+ 
+ /* This is dead.  Everything has been moved to thread_info.  */
+ struct thread_struct { };
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index c0424de9e7cda2..077a1407be6d73 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1211,8 +1211,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+ 	return ret;
+ }
+ 
+-/* Get an address range which is currently unmapped.  Similar to the
+-   generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
++/* Get an address range which is currently unmapped. */
+ 
+ static unsigned long
+ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+@@ -1231,13 +1230,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 		       unsigned long len, unsigned long pgoff,
+ 		       unsigned long flags, vm_flags_t vm_flags)
+ {
+-	unsigned long limit;
+-
+-	/* "32 bit" actually means 31 bit, since pointers sign extend.  */
+-	if (current->personality & ADDR_LIMIT_32BIT)
+-		limit = 0x80000000;
+-	else
+-		limit = TASK_SIZE;
++	unsigned long limit = TASK_SIZE;
+ 
+ 	if (len > limit)
+ 		return -ENOMEM;
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
+index 95fbc8c0560798..9edbd871c31bf0 100644
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+ #define __flush_tlb_range_op(op, start, pages, stride,			\
+ 				asid, tlb_level, tlbi_user, lpa2)	\
+ do {									\
++	typeof(start) __flush_start = start;				\
++	typeof(pages) __flush_pages = pages;				\
+ 	int num = 0;							\
+ 	int scale = 3;							\
+ 	int shift = lpa2 ? 16 : PAGE_SHIFT;				\
+ 	unsigned long addr;						\
+ 									\
+-	while (pages > 0) {						\
++	while (__flush_pages > 0) {					\
+ 		if (!system_supports_tlb_range() ||			\
+-		    pages == 1 ||					\
+-		    (lpa2 && start != ALIGN(start, SZ_64K))) {		\
+-			addr = __TLBI_VADDR(start, asid);		\
++		    __flush_pages == 1 ||				\
++		    (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) {	\
++			addr = __TLBI_VADDR(__flush_start, asid);	\
+ 			__tlbi_level(op, addr, tlb_level);		\
+ 			if (tlbi_user)					\
+ 				__tlbi_user_level(op, addr, tlb_level);	\
+-			start += stride;				\
+-			pages -= stride >> PAGE_SHIFT;			\
++			__flush_start += stride;			\
++			__flush_pages -= stride >> PAGE_SHIFT;		\
+ 			continue;					\
+ 		}							\
+ 									\
+-		num = __TLBI_RANGE_NUM(pages, scale);			\
++		num = __TLBI_RANGE_NUM(__flush_pages, scale);		\
+ 		if (num >= 0) {						\
+-			addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
++			addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
+ 						scale, num, tlb_level);	\
+ 			__tlbi(r##op, addr);				\
+ 			if (tlbi_user)					\
+ 				__tlbi_user(r##op, addr);		\
+-			start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
+-			pages -= __TLBI_RANGE_PAGES(num, scale);	\
++			__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
++			__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
+ 		}							\
+ 		scale--;						\
+ 	}								\
+diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
+index 1a2c72f3e7f80e..cb180684d10d5b 100644
+--- a/arch/arm64/kernel/topology.c
++++ b/arch/arm64/kernel/topology.c
+@@ -194,12 +194,19 @@ static void amu_fie_setup(const struct cpumask *cpus)
+ 	int cpu;
+ 
+ 	/* We are already set since the last insmod of cpufreq driver */
+-	if (unlikely(cpumask_subset(cpus, amu_fie_cpus)))
++	if (cpumask_available(amu_fie_cpus) &&
++	    unlikely(cpumask_subset(cpus, amu_fie_cpus)))
+ 		return;
+ 
+-	for_each_cpu(cpu, cpus) {
++	for_each_cpu(cpu, cpus)
+ 		if (!freq_counters_valid(cpu))
+ 			return;
++
++	if (!cpumask_available(amu_fie_cpus) &&
++	    !zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
++		WARN_ONCE(1, "Failed to allocate FIE cpumask for CPUs[%*pbl]\n",
++			  cpumask_pr_args(cpus));
++		return;
+ 	}
+ 
+ 	cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
+@@ -237,17 +244,8 @@ static struct notifier_block init_amu_fie_notifier = {
+ 
+ static int __init init_amu_fie(void)
+ {
+-	int ret;
+-
+-	if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
+-		return -ENOMEM;
+-
+-	ret = cpufreq_register_notifier(&init_amu_fie_notifier,
++	return cpufreq_register_notifier(&init_amu_fie_notifier,
+ 					CPUFREQ_POLICY_NOTIFIER);
+-	if (ret)
+-		free_cpumask_var(amu_fie_cpus);
+-
+-	return ret;
+ }
+ core_initcall(init_amu_fie);
+ 
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index e55b02fbddc8f3..e59c628c93f20d 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -1176,8 +1176,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ 		struct vmem_altmap *altmap)
+ {
+ 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
++	/* [start, end] should be within one section */
++	WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page));
+ 
+-	if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
++	if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) ||
++	    (end - start < PAGES_PER_SECTION * sizeof(struct page)))
+ 		return vmemmap_populate_basepages(start, end, node, altmap);
+ 	else
+ 		return vmemmap_populate_hugepages(start, end, node, altmap);
+diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
+index 0c292f81849277..1be185e9480723 100644
+--- a/arch/loongarch/kvm/switch.S
++++ b/arch/loongarch/kvm/switch.S
+@@ -85,7 +85,7 @@
+ 	 * Guest CRMD comes from separate GCSR_CRMD register
+ 	 */
+ 	ori	t0, zero, CSR_PRMD_PIE
+-	csrxchg	t0, t0,   LOONGARCH_CSR_PRMD
++	csrwr	t0, LOONGARCH_CSR_PRMD
+ 
+ 	/* Set PVM bit to setup ertn to guest context */
+ 	ori	t0, zero, CSR_GSTAT_PVM
+diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
+index ffd8d76021d470..aca4e86d2d888b 100644
+--- a/arch/loongarch/mm/pageattr.c
++++ b/arch/loongarch/mm/pageattr.c
+@@ -3,6 +3,7 @@
+  * Copyright (C) 2024 Loongson Technology Corporation Limited
+  */
+ 
++#include <linux/memblock.h>
+ #include <linux/pagewalk.h>
+ #include <linux/pgtable.h>
+ #include <asm/set_memory.h>
+@@ -167,7 +168,7 @@ bool kernel_page_present(struct page *page)
+ 	unsigned long addr = (unsigned long)page_address(page);
+ 
+ 	if (addr < vm_map_base)
+-		return true;
++		return memblock_is_memory(__pa(addr));
+ 
+ 	pgd = pgd_offset_k(addr);
+ 	if (pgd_none(pgdp_get(pgd)))
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 9ec3170c18f925..3a68b3e0b7a358 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3949,6 +3949,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
+ 	return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
+ }
+ 
++static u64 intel_pmu_freq_start_period(struct perf_event *event)
++{
++	int type = event->attr.type;
++	u64 config, factor;
++	s64 start;
++
++	/*
++	 * The 127 is the lowest possible recommended SAV (sample after value)
++	 * for a 4000 freq (default freq), according to the event list JSON file.
++	 * Also, assume the workload is idle 50% time.
++	 */
++	factor = 64 * 4000;
++	if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
++		goto end;
++
++	/*
++	 * The estimation of the start period in the freq mode is
++	 * based on the below assumption.
++	 *
++	 * For a cycles or an instructions event, 1GHZ of the
++	 * underlying platform, 1 IPC. The workload is idle 50% time.
++	 * The start period = 1,000,000,000 * 1 / freq / 2.
++	 *		    = 500,000,000 / freq
++	 *
++	 * Usually, the branch-related events occur less than the
++	 * instructions event. According to the Intel event list JSON
++	 * file, the SAV (sample after value) of a branch-related event
++	 * is usually 1/4 of an instruction event.
++	 * The start period of branch-related events = 125,000,000 / freq.
++	 *
++	 * The cache-related events occurs even less. The SAV is usually
++	 * 1/20 of an instruction event.
++	 * The start period of cache-related events = 25,000,000 / freq.
++	 */
++	config = event->attr.config & PERF_HW_EVENT_MASK;
++	if (type == PERF_TYPE_HARDWARE) {
++		switch (config) {
++		case PERF_COUNT_HW_CPU_CYCLES:
++		case PERF_COUNT_HW_INSTRUCTIONS:
++		case PERF_COUNT_HW_BUS_CYCLES:
++		case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
++		case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
++		case PERF_COUNT_HW_REF_CPU_CYCLES:
++			factor = 500000000;
++			break;
++		case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
++		case PERF_COUNT_HW_BRANCH_MISSES:
++			factor = 125000000;
++			break;
++		case PERF_COUNT_HW_CACHE_REFERENCES:
++		case PERF_COUNT_HW_CACHE_MISSES:
++			factor = 25000000;
++			break;
++		default:
++			goto end;
++		}
++	}
++
++	if (type == PERF_TYPE_HW_CACHE)
++		factor = 25000000;
++end:
++	/*
++	 * Usually, a prime or a number with less factors (close to prime)
++	 * is chosen as an SAV, which makes it less likely that the sampling
++	 * period synchronizes with some periodic event in the workload.
++	 * Minus 1 to make it at least avoiding values near power of twos
++	 * for the default freq.
++	 */
++	start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
++
++	if (start > x86_pmu.max_period)
++		start = x86_pmu.max_period;
++
++	if (x86_pmu.limit_period)
++		x86_pmu.limit_period(event, &start);
++
++	return start;
++}
++
+ static int intel_pmu_hw_config(struct perf_event *event)
+ {
+ 	int ret = x86_pmu_hw_config(event);
+@@ -3960,6 +4039,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
+ 	if (ret)
+ 		return ret;
+ 
++	if (event->attr.freq && event->attr.sample_freq) {
++		event->hw.sample_period = intel_pmu_freq_start_period(event);
++		event->hw.last_period = event->hw.sample_period;
++		local64_set(&event->hw.period_left, event->hw.sample_period);
++	}
++
+ 	if (event->attr.precise_ip) {
+ 		if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
+ 			return -EINVAL;
+diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
+index a481a939862e54..fc06b216aacdb7 100644
+--- a/arch/x86/events/rapl.c
++++ b/arch/x86/events/rapl.c
+@@ -846,6 +846,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
+ 	X86_MATCH_VFM(INTEL_METEORLAKE_L,	&model_skl),
+ 	X86_MATCH_VFM(INTEL_ARROWLAKE_H,	&model_skl),
+ 	X86_MATCH_VFM(INTEL_ARROWLAKE,		&model_skl),
++	X86_MATCH_VFM(INTEL_ARROWLAKE_U,	&model_skl),
+ 	X86_MATCH_VFM(INTEL_LUNARLAKE_M,	&model_skl),
+ 	{},
+ };
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index def6a2854a4b7c..07fc145f353103 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -1075,7 +1075,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
+ 	if (ret != UCODE_OK)
+ 		return ret;
+ 
+-	for_each_node(nid) {
++	for_each_node_with_cpus(nid) {
+ 		cpu = cpumask_first(cpumask_of_node(nid));
+ 		c = &cpu_data(cpu);
+ 
+diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
+index 00189cdeb775f0..cb3f900c46fcc1 100644
+--- a/arch/x86/kernel/cpu/vmware.c
++++ b/arch/x86/kernel/cpu/vmware.c
+@@ -26,6 +26,7 @@
+ #include <linux/export.h>
+ #include <linux/clocksource.h>
+ #include <linux/cpu.h>
++#include <linux/efi.h>
+ #include <linux/reboot.h>
+ #include <linux/static_call.h>
+ #include <asm/div64.h>
+@@ -429,6 +430,9 @@ static void __init vmware_platform_setup(void)
+ 		pr_warn("Failed to get TSC freq from the hypervisor\n");
+ 	}
+ 
++	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !efi_enabled(EFI_BOOT))
++		x86_init.mpparse.find_mptable = mpparse_find_mptable;
++
+ 	vmware_paravirt_ops_setup();
+ 
+ #ifdef CONFIG_X86_IO_APIC
+diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
+index 59d23cdf4ed0fa..dd8748c45529a8 100644
+--- a/arch/x86/kernel/devicetree.c
++++ b/arch/x86/kernel/devicetree.c
+@@ -2,6 +2,7 @@
+ /*
+  * Architecture specific OF callbacks.
+  */
++#include <linux/acpi.h>
+ #include <linux/export.h>
+ #include <linux/io.h>
+ #include <linux/interrupt.h>
+@@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void)
+ 	if (initial_dtb)
+ 		early_memunmap(dt, map_len);
+ #endif
+-	if (of_have_populated_dt())
++	if (acpi_disabled && of_have_populated_dt())
+ 		x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
+ }
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 385e3a5fc30458..feca4f20b06aaa 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -25,8 +25,10 @@
+ #include <asm/posted_intr.h>
+ #include <asm/irq_remapping.h>
+ 
++#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
+ #define CREATE_TRACE_POINTS
+ #include <asm/trace/irq_vectors.h>
++#endif
+ 
+ DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+ EXPORT_PER_CPU_SYMBOL(irq_stat);
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 19c96278ba755d..9242c0649adf1b 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -7589,7 +7589,7 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
+ 				      kvm_nx_huge_page_recovery_worker_kill,
+ 				      kvm, "kvm-nx-lpage-recovery");
+ 
+-	if (!nx_thread)
++	if (IS_ERR(nx_thread))
+ 		return;
+ 
+ 	vhost_task_start(nx_thread);
+diff --git a/block/bio.c b/block/bio.c
+index ac4d77c889322d..43d4ae26f47587 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -77,7 +77,7 @@ struct bio_slab {
+ 	struct kmem_cache *slab;
+ 	unsigned int slab_ref;
+ 	unsigned int slab_size;
+-	char name[8];
++	char name[12];
+ };
+ static DEFINE_MUTEX(bio_slab_lock);
+ static DEFINE_XARRAY(bio_slabs);
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 90aaec923889cf..b4cd14e7fa76cc 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -563,6 +563,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
++			DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
++		},
++	},
+ 	{
+ 		/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
+ 		.matches = {
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 2f0431e42c494d..c479348ce8ff69 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1541,8 +1541,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+ 		cmd = blk_mq_rq_to_pdu(req);
+ 		cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
+ 						blk_rq_sectors(req));
+-		if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
+-					blk_mq_end_request_batch))
++		if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
++					 blk_mq_end_request_batch))
+ 			blk_mq_end_request(req, cmd->error);
+ 		nr++;
+ 	}
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 0e50b65e1dbf5a..44a6937a4b65cc 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1210,11 +1210,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+ 
+ 	while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
+ 		struct request *req = blk_mq_rq_from_pdu(vbr);
++		u8 status = virtblk_vbr_status(vbr);
+ 
+ 		found++;
+ 		if (!blk_mq_complete_request_remote(req) &&
+-		    !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
+-						virtblk_complete_batch))
++		    !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
++					 virtblk_complete_batch))
+ 			virtblk_request_done(req);
+ 	}
+ 
+diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
+index 85098c61c15e6f..4d4363bc8b28db 100644
+--- a/drivers/clk/samsung/clk-gs101.c
++++ b/drivers/clk/samsung/clk-gs101.c
+@@ -382,17 +382,9 @@ static const unsigned long cmu_top_clk_regs[] __initconst = {
+ 	EARLY_WAKEUP_DPU_DEST,
+ 	EARLY_WAKEUP_CSIS_DEST,
+ 	EARLY_WAKEUP_SW_TRIG_APM,
+-	EARLY_WAKEUP_SW_TRIG_APM_SET,
+-	EARLY_WAKEUP_SW_TRIG_APM_CLEAR,
+ 	EARLY_WAKEUP_SW_TRIG_CLUSTER0,
+-	EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET,
+-	EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR,
+ 	EARLY_WAKEUP_SW_TRIG_DPU,
+-	EARLY_WAKEUP_SW_TRIG_DPU_SET,
+-	EARLY_WAKEUP_SW_TRIG_DPU_CLEAR,
+ 	EARLY_WAKEUP_SW_TRIG_CSIS,
+-	EARLY_WAKEUP_SW_TRIG_CSIS_SET,
+-	EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR,
+ 	CLK_CON_MUX_MUX_CLKCMU_BO_BUS,
+ 	CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
+ 	CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
+diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
+index cca3e630922c14..68a72f5fd9a5a6 100644
+--- a/drivers/clk/samsung/clk-pll.c
++++ b/drivers/clk/samsung/clk-pll.c
+@@ -206,6 +206,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = {
+  */
+ /* Maximum lock time can be 270 * PDIV cycles */
+ #define PLL35XX_LOCK_FACTOR	(270)
++#define PLL142XX_LOCK_FACTOR	(150)
+ 
+ #define PLL35XX_MDIV_MASK       (0x3FF)
+ #define PLL35XX_PDIV_MASK       (0x3F)
+@@ -272,7 +273,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ 	}
+ 
+ 	/* Set PLL lock time. */
+-	writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
++	if (pll->type == pll_142xx)
++		writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
++			pll->lock_reg);
++	else
++		writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ 			pll->lock_reg);
+ 
+ 	/* Change PLL PMS values */
+diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
+index 6e9788324fea55..371f24569b3b22 100644
+--- a/drivers/firmware/iscsi_ibft.c
++++ b/drivers/firmware/iscsi_ibft.c
+@@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
+ 		str += sprintf_ipaddr(str, nic->ip_addr);
+ 		break;
+ 	case ISCSI_BOOT_ETH_SUBNET_MASK:
+-		val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
++		if (nic->subnet_mask_prefix > 32)
++			val = cpu_to_be32(~0);
++		else
++			val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ 		str += sprintf(str, "%pI4", &val);
+ 		break;
+ 	case ISCSI_BOOT_ETH_PREFIX_LEN:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+index edcb5351f8cca7..9c6824e1c15660 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+@@ -525,8 +525,9 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
+ 
+ 	bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ 	coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
+-	is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
+-		(bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
++	is_system = bo->tbo.resource &&
++		(bo->tbo.resource->mem_type == TTM_PL_TT ||
++		 bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
+ 
+ 	if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ 		*flags |= AMDGPU_PTE_DCC;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 3cfb4a38d17c7f..dffe2a86f383ef 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1199,11 +1199,13 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
+ 		decrement_queue_count(dqm, qpd, q);
+ 
+ 		if (dqm->dev->kfd->shared_resources.enable_mes) {
+-			retval = remove_queue_mes(dqm, q, qpd);
+-			if (retval) {
++			int err;
++
++			err = remove_queue_mes(dqm, q, qpd);
++			if (err) {
+ 				dev_err(dev, "Failed to evict queue %d\n",
+ 					q->properties.queue_id);
+-				goto out;
++				retval = err;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 5df26f8937cc81..0688a428ee4f7d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -243,6 +243,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+ static void handle_hpd_rx_irq(void *param);
+ 
++static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
++					 int bl_idx,
++					 u32 user_brightness);
++
+ static bool
+ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ 				 struct drm_crtc_state *new_crtc_state);
+@@ -3295,6 +3299,12 @@ static int dm_resume(void *handle)
+ 
+ 		mutex_unlock(&dm->dc_lock);
+ 
++		/* set the backlight after a reset */
++		for (i = 0; i < dm->num_of_edps; i++) {
++			if (dm->backlight_dev[i])
++				amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
++		}
++
+ 		return 0;
+ 	}
+ 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
+@@ -4822,6 +4832,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
+ 	dm->backlight_dev[aconnector->bl_idx] =
+ 		backlight_device_register(bl_name, aconnector->base.kdev, dm,
+ 					  &amdgpu_dm_backlight_ops, &props);
++	dm->brightness[aconnector->bl_idx] = props.brightness;
+ 
+ 	if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
+ 		DRM_ERROR("DM: Backlight registration failed!\n");
+@@ -4889,7 +4900,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
+ 	aconnector->bl_idx = bl_idx;
+ 
+ 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
+-	dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
+ 	dm->backlight_link[bl_idx] = link;
+ 	dm->num_of_edps++;
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+index e339c7a8d541c9..c0dc2324404908 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+@@ -455,6 +455,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
+ 	for (i = 0; i < hdcp_work->max_link; i++) {
+ 		cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
+ 		cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
++		cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
+ 	}
+ 
+ 	sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index c4a7fd453e5fc0..a215234151ac31 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -894,8 +894,16 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+ 	struct drm_device *dev = adev_to_drm(adev);
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
++	int irq_type;
+ 	int i;
+ 
++	/* First, clear all hpd and hpdrx interrupts */
++	for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
++		if (!dc_interrupt_set(adev->dm.dc, i, false))
++			drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
++				i);
++	}
++
+ 	drm_connector_list_iter_begin(dev, &iter);
+ 	drm_for_each_connector_iter(connector, &iter) {
+ 		struct amdgpu_dm_connector *amdgpu_dm_connector;
+@@ -908,10 +916,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+ 
+ 		dc_link = amdgpu_dm_connector->dc_link;
+ 
++		/*
++		 * Get a base driver irq reference for hpd ints for the lifetime
++		 * of dm. Note that only hpd interrupt types are registered with
++		 * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
++		 * hpd_rx isn't available. DM currently controls hpd_rx
++		 * explicitly with dc_interrupt_set()
++		 */
+ 		if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
+-			dc_interrupt_set(adev->dm.dc,
+-					dc_link->irq_source_hpd,
+-					true);
++			irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
++			/*
++			 * TODO: There's a mismatch between mode_info.num_hpd
++			 * and what bios reports as the # of connectors with hpd
++			 * sources. Since the # of hpd source types registered
++			 * with base driver == mode_info.num_hpd, we have to
++			 * fallback to dc_interrupt_set for the remaining types.
++			 */
++			if (irq_type < adev->mode_info.num_hpd) {
++				if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
++					drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
++						dc_link->irq_source_hpd);
++			} else {
++				dc_interrupt_set(adev->dm.dc,
++						 dc_link->irq_source_hpd,
++						 true);
++			}
+ 		}
+ 
+ 		if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
+@@ -921,12 +950,6 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&iter);
+-
+-	/* Update reference counts for HPDs */
+-	for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+-		if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+-			drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
+-	}
+ }
+ 
+ /**
+@@ -942,7 +965,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+ 	struct drm_device *dev = adev_to_drm(adev);
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
+-	int i;
++	int irq_type;
+ 
+ 	drm_connector_list_iter_begin(dev, &iter);
+ 	drm_for_each_connector_iter(connector, &iter) {
+@@ -956,9 +979,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+ 		dc_link = amdgpu_dm_connector->dc_link;
+ 
+ 		if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
+-			dc_interrupt_set(adev->dm.dc,
+-					dc_link->irq_source_hpd,
+-					false);
++			irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
++
++			/* TODO: See same TODO in amdgpu_dm_hpd_init() */
++			if (irq_type < adev->mode_info.num_hpd) {
++				if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
++					drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
++						dc_link->irq_source_hpd);
++			} else {
++				dc_interrupt_set(adev->dm.dc,
++						 dc_link->irq_source_hpd,
++						 false);
++			}
+ 		}
+ 
+ 		if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
+@@ -968,10 +1000,4 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&iter);
+-
+-	/* Update reference counts for HPDs */
+-	for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+-		if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+-			drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
+-	}
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index 83c7c8853edeca..62e30942f735d4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -275,8 +275,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
+ 	if (!dcc->enable)
+ 		return 0;
+ 
+-	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
+-	    !dc->cap_funcs.get_dcc_compression_cap)
++	if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
++	    format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
++		return -EINVAL;
++
++	if (!dc->cap_funcs.get_dcc_compression_cap)
+ 		return -EINVAL;
+ 
+ 	input.format = format;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index f0eda0ba015600..bfcbbea377298f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -3388,10 +3388,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
+ 			break;
+ 		case COLOR_DEPTH_121212:
+ 			normalized_pix_clk = (pix_clk * 36) / 24;
+-		break;
++			break;
++		case COLOR_DEPTH_141414:
++			normalized_pix_clk = (pix_clk * 42) / 24;
++			break;
+ 		case COLOR_DEPTH_161616:
+ 			normalized_pix_clk = (pix_clk * 48) / 24;
+-		break;
++			break;
+ 		default:
+ 			ASSERT(0);
+ 		break;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+index e5fb0e8333e43f..e691a1cf33567d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
++++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+@@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = {
+ 				dce60_timing_generator_enable_advanced_request,
+ 		.configure_crc = dce60_configure_crc,
+ 		.get_crc = dce110_get_crc,
++		.is_two_pixels_per_container = dce110_is_two_pixels_per_container,
+ };
+ 
+ void dce60_timing_generator_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+index 8dee0d397e0322..55014c15211674 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+@@ -994,7 +994,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
+ 		if (disp_cfg_stream_location < 0)
+ 			disp_cfg_stream_location = dml_dispcfg->num_streams++;
+ 
+-		ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
++		ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ 		populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
+ 		populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
+ 		populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
+@@ -1018,7 +1018,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
+ 				if (disp_cfg_plane_location < 0)
+ 					disp_cfg_plane_location = dml_dispcfg->num_planes++;
+ 
+-				ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
++				ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ 
+ 				populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]);
+ 				populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+index bde4250853b10c..81ba8809a3b4c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+@@ -746,7 +746,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
+ 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ 	case SIGNAL_TYPE_DISPLAY_PORT:
+ 		out->OutputEncoder[location] = dml_dp;
+-		if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
++		if (location < MAX_HPO_DP2_ENCODERS && dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ 			out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
+ 		break;
+ 	case SIGNAL_TYPE_EDP:
+@@ -1303,7 +1303,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
+ 		if (disp_cfg_stream_location < 0)
+ 			disp_cfg_stream_location = dml_dispcfg->num_timings++;
+ 
+-		ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
++		ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ 
+ 		populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
+ 		populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
+@@ -1343,7 +1343,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
+ 				if (disp_cfg_plane_location < 0)
+ 					disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
+ 
+-				ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
++				ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ 
+ 				populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
+ 				populate_dml_plane_cfg_from_plane_state(
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index f0c6d50d8c3345..da6ff36623d30f 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -4034,6 +4034,22 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+ 	return 0;
+ }
+ 
++static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
++{
++	bool probing_done = false;
++
++	mutex_lock(&mgr->lock);
++
++	if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
++		probing_done = mgr->mst_primary->link_address_sent;
++		drm_dp_mst_topology_put_mstb(mgr->mst_primary);
++	}
++
++	mutex_unlock(&mgr->lock);
++
++	return probing_done;
++}
++
+ static inline bool
+ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
+ 			  struct drm_dp_pending_up_req *up_req)
+@@ -4064,8 +4080,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
+ 	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
+-		dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+-		hotplug = true;
++		if (!primary_mstb_probing_is_done(mgr)) {
++			drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
++		} else {
++			dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
++			hotplug = true;
++		}
+ 	}
+ 
+ 	drm_dp_mst_topology_put_mstb(mstb);
+@@ -4144,10 +4164,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 	drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
+ 				 false);
+ 
++	drm_dp_mst_topology_put_mstb(mst_primary);
++
+ 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ 		const struct drm_dp_connection_status_notify *conn_stat =
+ 			&up_req->msg.u.conn_stat;
+-		bool handle_csn;
+ 
+ 		drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ 			    conn_stat->port_number,
+@@ -4156,16 +4177,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 			    conn_stat->message_capability_status,
+ 			    conn_stat->input_port,
+ 			    conn_stat->peer_device_type);
+-
+-		mutex_lock(&mgr->probe_lock);
+-		handle_csn = mst_primary->link_address_sent;
+-		mutex_unlock(&mgr->probe_lock);
+-
+-		if (!handle_csn) {
+-			drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
+-			kfree(up_req);
+-			goto out_put_primary;
+-		}
+ 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ 		const struct drm_dp_resource_status_notify *res_stat =
+ 			&up_req->msg.u.resource_stat;
+@@ -4180,9 +4191,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 	list_add_tail(&up_req->next, &mgr->up_req_list);
+ 	mutex_unlock(&mgr->up_req_lock);
+ 	queue_work(system_long_wq, &mgr->up_req_work);
+-
+-out_put_primary:
+-	drm_dp_mst_topology_put_mstb(mst_primary);
+ out_clear_reply:
+ 	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ 	return 0;
+diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
+index 370dc676e3aa54..fd36b8fd54e9e1 100644
+--- a/drivers/gpu/drm/drm_atomic_uapi.c
++++ b/drivers/gpu/drm/drm_atomic_uapi.c
+@@ -956,6 +956,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
+ 
+ 	if (mode != DRM_MODE_DPMS_ON)
+ 		mode = DRM_MODE_DPMS_OFF;
++
++	if (connector->dpms == mode)
++		goto out;
++
+ 	connector->dpms = mode;
+ 
+ 	crtc = connector->state->crtc;
+diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
+index 0e6021235a9304..994afa5a0ffb52 100644
+--- a/drivers/gpu/drm/drm_connector.c
++++ b/drivers/gpu/drm/drm_connector.c
+@@ -1308,6 +1308,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
+  * 	callback. For atomic drivers the remapping to the "ACTIVE" property is
+  * 	implemented in the DRM core.
+  *
++ * 	On atomic drivers any DPMS setproperty ioctl where the value does not
++ * 	change is completely skipped, otherwise a full atomic commit will occur.
++ * 	On legacy drivers the exact behavior is driver specific.
++ *
+  * 	Note that this property cannot be set through the MODE_ATOMIC ioctl,
+  * 	userspace must use "ACTIVE" on the CRTC instead.
+  *
+diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
+index bcf248f69252c2..6903e2010cb98b 100644
+--- a/drivers/gpu/drm/drm_panic_qr.rs
++++ b/drivers/gpu/drm/drm_panic_qr.rs
+@@ -545,7 +545,7 @@ fn add_segments(&mut self, segments: &[&Segment<'_>]) {
+         }
+         self.push(&mut offset, (MODE_STOP, 4));
+ 
+-        let pad_offset = (offset + 7) / 8;
++        let pad_offset = offset.div_ceil(8);
+         for i in pad_offset..self.version.max_data() {
+             self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
+         }
+@@ -659,7 +659,7 @@ struct QrImage<'a> {
+ impl QrImage<'_> {
+     fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
+         let width = em.version.width();
+-        let stride = (width + 7) / 8;
++        let stride = width.div_ceil(8);
+         let data = qrdata;
+ 
+         let mut qr_image = QrImage {
+@@ -911,16 +911,16 @@ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
+ ///
+ /// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
+ /// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
+-///    will be encoded as binary segment, otherwise it will be encoded
+-///    efficiently as a numeric segment, and appended to the URL.
++///   will be encoded as binary segment, otherwise it will be encoded
++///   efficiently as a numeric segment, and appended to the URL.
+ /// * `data_len`: Length of the data, that needs to be encoded, must be less
+-///    than data_size.
++///   than data_size.
+ /// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
+-///    a V40 QR code. It will then be overwritten with the QR code image.
++///   a V40 QR code. It will then be overwritten with the QR code image.
+ /// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
+-///    segments and ECC.
++///   segments and ECC.
+ /// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
+-///    long for V40.
++///   long for V40.
+ ///
+ /// # Safety
+ ///
+diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
+index 7e76790c6a81fa..cba97d7db131d8 100644
+--- a/drivers/gpu/drm/gma500/mid_bios.c
++++ b/drivers/gpu/drm/gma500/mid_bios.c
+@@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+ 					    0, PCI_DEVFN(2, 0));
+ 	int ret = -1;
+ 
++	if (pci_gfx_root == NULL) {
++		WARN_ON(1);
++		return;
++	}
++
+ 	/* Get the address of the platform config vbt */
+ 	pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+ 	pci_dev_put(pci_gfx_root);
+diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+index ff93e08d5036df..5f02a5a39ab4a2 100644
+--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
++++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+@@ -154,6 +154,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
+ 	return 0;
+ 
+ err_free_mmio:
++	iounmap(hv->vram);
+ 	vmbus_free_mmio(hv->mem->start, hv->fb_size);
+ err_vmbus_close:
+ 	vmbus_close(hdev->channel);
+@@ -172,6 +173,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
+ 	vmbus_close(hdev->channel);
+ 	hv_set_drvdata(hdev, NULL);
+ 
++	iounmap(hv->vram);
+ 	vmbus_free_mmio(hv->mem->start, hv->fb_size);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 3039ee03e1c7a8..d5eb8de645a9a3 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -7438,9 +7438,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ 	dev_priv->display.funcs.display->commit_modeset_enables(state);
+ 
+-	if (state->modeset)
+-		intel_set_cdclk_post_plane_update(state);
+-
+ 	intel_wait_for_vblank_workers(state);
+ 
+ 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
+@@ -7521,6 +7518,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 		intel_verify_planes(state);
+ 
+ 	intel_sagv_post_plane_update(state);
++	if (state->modeset)
++		intel_set_cdclk_post_plane_update(state);
+ 	intel_pmdemand_post_plane_update(state);
+ 
+ 	drm_atomic_helper_commit_hw_done(&state->base);
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index 21274aa9bdddc1..c3dabb85796052 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
+  * 4 - Support multiple fault handlers per object depending on object's
+  *     backing storage (a.k.a. MMAP_OFFSET).
+  *
++ * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple
++ *     times with different size and offset).
++ *
+  * Restrictions:
+  *
+  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
+@@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
+  */
+ int i915_gem_mmap_gtt_version(void)
+ {
+-	return 4;
++	return 5;
+ }
+ 
+ static inline struct i915_gtt_view
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index b06aa473102b30..5ab4201c981e47 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -776,7 +776,6 @@ nouveau_connector_force(struct drm_connector *connector)
+ 	if (!nv_encoder) {
+ 		NV_ERROR(drm, "can't find encoder to force %s on!\n",
+ 			 connector->name);
+-		connector->status = connector_status_disconnected;
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+index cbd9584af32995..383fbe128348ea 100644
+--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+@@ -258,15 +258,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
+ 						     8);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
++
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -321,15 +322,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
+ 						     8);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
++
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -384,18 +386,18 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
+ 						     8);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 	KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -450,7 +452,6 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
+ 	mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
+ 	KUNIT_ASSERT_NOT_NULL(test, mode);
+ 
+-	drm = &priv->drm;
+ 	crtc = priv->crtc;
+ 	ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+@@ -496,18 +497,18 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
+ 						     8);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 	KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -564,7 +565,6 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
+ 	mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
+ 	KUNIT_ASSERT_NOT_NULL(test, mode);
+ 
+-	drm = &priv->drm;
+ 	crtc = priv->crtc;
+ 	ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+@@ -612,18 +612,18 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
+ 						     8);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 	KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -680,7 +680,6 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
+ 	mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
+ 	KUNIT_ASSERT_NOT_NULL(test, mode);
+ 
+-	drm = &priv->drm;
+ 	crtc = priv->crtc;
+ 	ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+@@ -730,20 +729,20 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
+ 						     10);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+ 	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -804,20 +803,20 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
+ 						     10);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+ 	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -875,6 +874,8 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
+ 						     12);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_dvi_1080p,
+@@ -884,14 +885,12 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_FALSE(test, info->is_hdmi);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -922,21 +921,21 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
+ 						     8);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+ 	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 	KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -969,21 +968,21 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
+ 						     10);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
+ 	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 	KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -1016,21 +1015,21 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
+ 						     12);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
+ 	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 	KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -1067,15 +1066,16 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
+ 						     8);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
++
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_ASSERT_EQ(test, ret, 0);
+ 
+@@ -1123,6 +1123,8 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+ 						     12);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+@@ -1133,9 +1135,6 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ 	KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 	KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+@@ -1146,8 +1145,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+ 	rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
+ 	KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+ 
+@@ -1192,6 +1192,8 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+ 						     12);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+@@ -1202,9 +1204,6 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ 	KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 	KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+@@ -1218,8 +1217,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+ 	rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
+ 	KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+ 
+@@ -1266,9 +1266,6 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ 	KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
+ 	KUNIT_ASSERT_NOT_NULL(test, mode);
+ 
+@@ -1282,7 +1279,9 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
+ 	rate = mode->clock * 1500;
+ 	KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+ 
+-	drm = &priv->drm;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	crtc = priv->crtc;
+ 	ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+@@ -1316,6 +1315,8 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
+ 						     12);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+@@ -1326,9 +1327,6 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ 	KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+@@ -1347,8 +1345,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
+ 	rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
+ 	KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+ 
+@@ -1383,6 +1382,8 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
+ 						     12);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_200mhz,
+@@ -1393,9 +1394,6 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ 	KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+@@ -1414,8 +1412,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
+ 	rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
+ 	KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+ 
+@@ -1449,6 +1448,8 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
+ 						     8);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+@@ -1459,9 +1460,6 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ 	KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+@@ -1472,8 +1470,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
+ 	rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
+ 	KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+ 
+@@ -1509,6 +1508,8 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
+ 						     12);
+ 	KUNIT_ASSERT_NOT_NULL(test, priv);
+ 
++	drm = &priv->drm;
++	crtc = priv->crtc;
+ 	conn = &priv->connector;
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_340mhz,
+@@ -1519,9 +1520,6 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ 	KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ 
+-	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+-	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+-
+ 	preferred = find_preferred_mode(conn);
+ 	KUNIT_ASSERT_NOT_NULL(test, preferred);
+ 
+@@ -1532,8 +1530,9 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
+ 	rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
+ 	KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+ 
+-	drm = &priv->drm;
+-	crtc = priv->crtc;
++	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
++
+ 	ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+ 
+diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
+index e7441b227b3cea..3d6785d081f2cd 100644
+--- a/drivers/gpu/drm/vkms/vkms_composer.c
++++ b/drivers/gpu/drm/vkms/vkms_composer.c
+@@ -98,7 +98,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
+ 
+ 	s64 delta = drm_fixp_mul(b_fp - a_fp,  t);
+ 
+-	return drm_fixp2int(a_fp + delta);
++	return drm_fixp2int_round(a_fp + delta);
+ }
+ 
+ static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index fed23304e4da58..20d05efdd406e6 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -1213,9 +1213,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
+ 	xe_pm_runtime_get(guc_to_xe(guc));
+ 	trace_xe_exec_queue_destroy(q);
+ 
++	release_guc_id(guc, q);
+ 	if (xe_exec_queue_is_lr(q))
+ 		cancel_work_sync(&ge->lr_tdr);
+-	release_guc_id(guc, q);
++	/* Confirm no work left behind accessing device structures */
++	cancel_delayed_work_sync(&ge->sched.base.work_tdr);
+ 	xe_sched_entity_fini(&ge->entity);
+ 	xe_sched_fini(&ge->sched);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
+index d7a9408b3a97c8..f6bc4f29d7538e 100644
+--- a/drivers/gpu/drm/xe/xe_hmm.c
++++ b/drivers/gpu/drm/xe/xe_hmm.c
+@@ -138,13 +138,17 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
+ 		i += size;
+ 
+ 		if (unlikely(j == st->nents - 1)) {
++			xe_assert(xe, i >= npages);
+ 			if (i > npages)
+ 				size -= (i - npages);
++
+ 			sg_mark_end(sgl);
++		} else {
++			xe_assert(xe, i < npages);
+ 		}
++
+ 		sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
+ 	}
+-	xe_assert(xe, i == npages);
+ 
+ 	return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+ 			       DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
+index 33eb039053e4f5..06f50aa313267a 100644
+--- a/drivers/gpu/drm/xe/xe_pm.c
++++ b/drivers/gpu/drm/xe/xe_pm.c
+@@ -264,6 +264,15 @@ int xe_pm_init_early(struct xe_device *xe)
+ 	return 0;
+ }
+ 
++static u32 vram_threshold_value(struct xe_device *xe)
++{
++	/* FIXME: D3Cold temporarily disabled by default on BMG */
++	if (xe->info.platform == XE_BATTLEMAGE)
++		return 0;
++
++	return DEFAULT_VRAM_THRESHOLD;
++}
++
+ /**
+  * xe_pm_init - Initialize Xe Power Management
+  * @xe: xe device instance
+@@ -274,6 +283,7 @@ int xe_pm_init_early(struct xe_device *xe)
+  */
+ int xe_pm_init(struct xe_device *xe)
+ {
++	u32 vram_threshold;
+ 	int err;
+ 
+ 	/* For now suspend/resume is only allowed with GuC */
+@@ -287,7 +297,8 @@ int xe_pm_init(struct xe_device *xe)
+ 		if (err)
+ 			return err;
+ 
+-		err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
++		vram_threshold = vram_threshold_value(xe);
++		err = xe_pm_set_vram_threshold(xe, vram_threshold);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index f8a56d6312425a..4500d7653b05ee 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -1154,7 +1154,8 @@ config HID_TOPRE
+ 	tristate "Topre REALFORCE keyboards"
+ 	depends on HID
+ 	help
+-	  Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards.
++	  Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key and
++          Topre REALFORCE R3S 87 key keyboards.
+ 
+ config HID_THINGM
+ 	tristate "ThingM blink(1) USB RGB LED"
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 7e1ae2a2bcc247..d900dd05c335c3 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -378,6 +378,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
+ 	return false;
+ }
+ 
++static bool apple_is_omoton_kb066(struct hid_device *hdev)
++{
++	return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI &&
++		strcmp(hdev->name, "Bluetooth Keyboard") == 0;
++}
++
+ static inline void apple_setup_key_translation(struct input_dev *input,
+ 		const struct apple_key_translation *table)
+ {
+@@ -474,6 +480,7 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ 			 hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015)
+ 			table = magic_keyboard_2015_fn_keys;
+ 		else if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 ||
++			 hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 ||
+ 			 hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 ||
+ 			 hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021)
+ 			table = apple2021_fn_keys;
+@@ -724,7 +731,7 @@ static int apple_input_configured(struct hid_device *hdev,
+ {
+ 	struct apple_sc *asc = hid_get_drvdata(hdev);
+ 
+-	if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
++	if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) {
+ 		hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
+ 		asc->quirks &= ~APPLE_HAS_FN;
+ 	}
+@@ -1150,6 +1157,10 @@ static const struct hid_device_id apple_devices[] = {
+ 		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ 	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
+ 		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
++	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
+ 		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ 	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ceb3b1a72e235c..c6ae7c4268b84c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -184,6 +184,7 @@
+ #define USB_DEVICE_ID_APPLE_IRCONTROL4	0x8242
+ #define USB_DEVICE_ID_APPLE_IRCONTROL5	0x8243
+ #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021   0x029c
++#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024   0x0320
+ #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021   0x029a
+ #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021   0x029f
+ #define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
+@@ -1089,6 +1090,7 @@
+ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001		0x3001
+ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003		0x3003
+ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008		0x3008
++#define USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473		0x5473
+ 
+ #define I2C_VENDOR_ID_RAYDIUM		0x2386
+ #define I2C_PRODUCT_ID_RAYDIUM_4B33	0x4b33
+@@ -1295,6 +1297,7 @@
+ #define USB_VENDOR_ID_TOPRE			0x0853
+ #define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108			0x0148
+ #define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87			0x0146
++#define USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87			0x0313
+ 
+ #define USB_VENDOR_ID_TOPSEED		0x0766
+ #define USB_DEVICE_ID_TOPSEED_CYBERLINK	0x0204
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index e0bbf0c6345d68..5d7a418ccdbecf 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -891,6 +891,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
+ #endif
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
+ 	{ }
+ };
+ 
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index 19b7bb0c3d7f99..9de875f27c246a 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -1051,10 +1051,10 @@ static void steam_mode_switch_cb(struct work_struct *work)
+ 							struct steam_device, mode_switch);
+ 	unsigned long flags;
+ 	bool client_opened;
+-	steam->gamepad_mode = !steam->gamepad_mode;
+ 	if (!lizard_mode)
+ 		return;
+ 
++	steam->gamepad_mode = !steam->gamepad_mode;
+ 	if (steam->gamepad_mode)
+ 		steam_set_lizard_mode(steam, false);
+ 	else {
+@@ -1623,7 +1623,7 @@ static void steam_do_deck_input_event(struct steam_device *steam,
+ 		schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
+ 	}
+ 
+-	if (!steam->gamepad_mode)
++	if (!steam->gamepad_mode && lizard_mode)
+ 		return;
+ 
+ 	lpad_touched = b10 & BIT(3);
+@@ -1693,7 +1693,7 @@ static void steam_do_deck_sensors_event(struct steam_device *steam,
+ 	 */
+ 	steam->sensor_timestamp_us += 4000;
+ 
+-	if (!steam->gamepad_mode)
++	if (!steam->gamepad_mode && lizard_mode)
+ 		return;
+ 
+ 	input_event(sensors, EV_MSC, MSC_TIMESTAMP, steam->sensor_timestamp_us);
+diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c
+index 848361f6225df1..ccedf8721722ec 100644
+--- a/drivers/hid/hid-topre.c
++++ b/drivers/hid/hid-topre.c
+@@ -29,6 +29,11 @@ static const __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		hid_info(hdev,
+ 			"fixing up Topre REALFORCE keyboard report descriptor\n");
+ 		rdesc[72] = 0x02;
++	} else if (*rsize >= 106 && rdesc[28] == 0x29 && rdesc[29] == 0xe7 &&
++				    rdesc[30] == 0x81 && rdesc[31] == 0x00) {
++		hid_info(hdev,
++			"fixing up Topre REALFORCE keyboard report descriptor\n");
++		rdesc[31] = 0x02;
+ 	}
+ 	return rdesc;
+ }
+@@ -38,6 +43,8 @@ static const struct hid_device_id topre_id_table[] = {
+ 			 USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
+ 			 USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
++			 USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, topre_id_table);
+diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+index cdd80c653918b2..07e90d51f073cc 100644
+--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
++++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+@@ -36,6 +36,8 @@
+ #define PCI_DEVICE_ID_INTEL_ISH_ARL_H		0x7745
+ #define PCI_DEVICE_ID_INTEL_ISH_ARL_S		0x7F78
+ #define PCI_DEVICE_ID_INTEL_ISH_LNL_M		0xA845
++#define PCI_DEVICE_ID_INTEL_ISH_PTL_H		0xE345
++#define PCI_DEVICE_ID_INTEL_ISH_PTL_P		0xE445
+ 
+ #define	REVISION_ID_CHT_A0	0x6
+ #define	REVISION_ID_CHT_Ax_SI	0x0
+diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
+index 3cd53fc80634a6..4c861119e97aa0 100644
+--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
++++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
+@@ -517,6 +517,10 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
+ 	/* ISH FW is dead */
+ 	if (!ish_is_input_ready(dev))
+ 		return	-EPIPE;
++
++	/* Send clock sync at once after reset */
++	ishtp_dev->prev_sync = 0;
++
+ 	/*
+ 	 * Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
+ 	 * RESET_NOTIFY_ACK - FW will be checking for it
+@@ -577,15 +581,14 @@ static void fw_reset_work_fn(struct work_struct *work)
+  */
+ static void _ish_sync_fw_clock(struct ishtp_device *dev)
+ {
+-	static unsigned long	prev_sync;
+-	uint64_t	usec;
++	struct ipc_time_update_msg time = {};
+ 
+-	if (prev_sync && time_before(jiffies, prev_sync + 20 * HZ))
++	if (dev->prev_sync && time_before(jiffies, dev->prev_sync + 20 * HZ))
+ 		return;
+ 
+-	prev_sync = jiffies;
+-	usec = ktime_to_us(ktime_get_boottime());
+-	ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t));
++	dev->prev_sync = jiffies;
++	/* The fields of time would be updated while sending message */
++	ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &time, sizeof(time));
+ }
+ 
+ /**
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index aae0d965b47b5e..1894743e880288 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -26,9 +26,11 @@
+ enum ishtp_driver_data_index {
+ 	ISHTP_DRIVER_DATA_NONE,
+ 	ISHTP_DRIVER_DATA_LNL_M,
++	ISHTP_DRIVER_DATA_PTL,
+ };
+ 
+ #define ISH_FW_GEN_LNL_M "lnlm"
++#define ISH_FW_GEN_PTL "ptl"
+ 
+ #define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin"
+ #define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin"
+@@ -37,6 +39,9 @@ static struct ishtp_driver_data ishtp_driver_data[] = {
+ 	[ISHTP_DRIVER_DATA_LNL_M] = {
+ 		.fw_generation = ISH_FW_GEN_LNL_M,
+ 	},
++	[ISHTP_DRIVER_DATA_PTL] = {
++		.fw_generation = ISH_FW_GEN_PTL,
++	},
+ };
+ 
+ static const struct pci_device_id ish_pci_tbl[] = {
+@@ -63,6 +68,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
+ 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_H)},
+ 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_S)},
+ 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_LNL_M), .driver_data = ISHTP_DRIVER_DATA_LNL_M},
++	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_H), .driver_data = ISHTP_DRIVER_DATA_PTL},
++	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_P), .driver_data = ISHTP_DRIVER_DATA_PTL},
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+index cdacce0a4c9d7d..b35afefd036d40 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
++++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+@@ -242,6 +242,8 @@ struct ishtp_device {
+ 	unsigned int	ipc_tx_cnt;
+ 	unsigned long long	ipc_tx_bytes_cnt;
+ 
++	/* Time of the last clock sync */
++	unsigned long prev_sync;
+ 	const struct ishtp_hw_ops *ops;
+ 	size_t	mtu;
+ 	uint32_t	ishtp_msg_hdr;
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 9b15f7daf50597..2b6749c9712ef2 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2262,12 +2262,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
+ 	struct resource *iter;
+ 
+ 	mutex_lock(&hyperv_mmio_lock);
++
++	/*
++	 * If all bytes of the MMIO range to be released are within the
++	 * special case fb_mmio shadow region, skip releasing the shadow
++	 * region since no corresponding __request_region() was done
++	 * in vmbus_allocate_mmio().
++	 */
++	if (fb_mmio && start >= fb_mmio->start &&
++	    (start + size - 1 <= fb_mmio->end))
++		goto skip_shadow_release;
++
+ 	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
+ 		if ((iter->start >= start + size) || (iter->end <= start))
+ 			continue;
+ 
+ 		__release_region(iter, start, size);
+ 	}
++
++skip_shadow_release:
+ 	release_mem_region(start, size);
+ 	mutex_unlock(&hyperv_mmio_lock);
+ 
+diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
+index 544c94e86b8967..1eac3583804058 100644
+--- a/drivers/i2c/busses/i2c-ali1535.c
++++ b/drivers/i2c/busses/i2c-ali1535.c
+@@ -485,6 +485,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids);
+ 
+ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
++	int ret;
++
+ 	if (ali1535_setup(dev)) {
+ 		dev_warn(&dev->dev,
+ 			"ALI1535 not detected, module not inserted.\n");
+@@ -496,7 +498,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 
+ 	snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
+ 		"SMBus ALI1535 adapter at %04x", ali1535_offset);
+-	return i2c_add_adapter(&ali1535_adapter);
++	ret = i2c_add_adapter(&ali1535_adapter);
++	if (ret)
++		goto release_region;
++
++	return 0;
++
++release_region:
++	release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
++	return ret;
+ }
+ 
+ static void ali1535_remove(struct pci_dev *dev)
+diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
+index 4761c720810227..418d11266671e3 100644
+--- a/drivers/i2c/busses/i2c-ali15x3.c
++++ b/drivers/i2c/busses/i2c-ali15x3.c
+@@ -472,6 +472,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids);
+ 
+ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
++	int ret;
++
+ 	if (ali15x3_setup(dev)) {
+ 		dev_err(&dev->dev,
+ 			"ALI15X3 not detected, module not inserted.\n");
+@@ -483,7 +485,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 
+ 	snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name),
+ 		"SMBus ALI15X3 adapter at %04x", ali15x3_smba);
+-	return i2c_add_adapter(&ali15x3_adapter);
++	ret = i2c_add_adapter(&ali15x3_adapter);
++	if (ret)
++		goto release_region;
++
++	return 0;
++
++release_region:
++	release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
++	return ret;
+ }
+ 
+ static void ali15x3_remove(struct pci_dev *dev)
+diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
+index 3505cf29cedda3..a19c3d251804d5 100644
+--- a/drivers/i2c/busses/i2c-sis630.c
++++ b/drivers/i2c/busses/i2c-sis630.c
+@@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids);
+ 
+ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
++	int ret;
++
+ 	if (sis630_setup(dev)) {
+ 		dev_err(&dev->dev,
+ 			"SIS630 compatible bus not detected, "
+@@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
+ 		 "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
+ 
+-	return i2c_add_adapter(&sis630_adapter);
++	ret = i2c_add_adapter(&sis630_adapter);
++	if (ret)
++		goto release_region;
++
++	return 0;
++
++release_region:
++	release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
++	return ret;
+ }
+ 
+ static void sis630_remove(struct pci_dev *dev)
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 77fddab9d9502e..a6c7951011308c 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -140,6 +140,7 @@ static const struct xpad_device {
+ 	{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
++	{ 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
+ 	{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
+ 	{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
+@@ -177,6 +178,7 @@ static const struct xpad_device {
+ 	{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
+ 	{ 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX },
+ 	{ 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 },
++	{ 0x0738, 0x4503, "Mad Catz Racing Wheel", 0, XTYPE_XBOXONE },
+ 	{ 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX },
+ 	{ 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
+ 	{ 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
+@@ -238,6 +240,7 @@ static const struct xpad_device {
+ 	{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
++	{ 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+@@ -276,12 +279,15 @@ static const struct xpad_device {
+ 	{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ 	{ 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ 	{ 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++	{ 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
++	{ 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
+ 	{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
+ 	{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
+ 	{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
+ 	{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
+ 	{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
++	{ 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+ 	{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
+ 	{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
+@@ -306,7 +312,7 @@ static const struct xpad_device {
+ 	{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ 	{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
+ 	{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
+-	{ 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 },
++	{ 0x1a86, 0xe310, "Legion Go S", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+@@ -343,6 +349,7 @@ static const struct xpad_device {
+ 	{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
++	{ 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
+ 	{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ 	{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+@@ -366,6 +373,7 @@ static const struct xpad_device {
+ 	{ 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
++	{ 0x24c6, 0x581a, "ThrustMaster XB1 Classic Controller", 0, XTYPE_XBOXONE },
+ 	{ 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
+ 	{ 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
+@@ -374,10 +382,15 @@ static const struct xpad_device {
+ 	{ 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
+ 	{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
+ 	{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
++	{ 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 },
+ 	{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+ 	{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
++	{ 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
+ 	{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
++	{ 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
+ 	{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
++	{ 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
++	{ 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+ 	{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
+@@ -385,11 +398,16 @@ static const struct xpad_device {
+ 	{ 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
++	{ 0x3285, 0x0603, "Nacon Pro Compact controller for Xbox", 0, XTYPE_XBOXONE },
+ 	{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
++	{ 0x3285, 0x0614, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
+ 	{ 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
++	{ 0x3285, 0x0662, "Nacon Revolution5 Pro", 0, XTYPE_XBOX360 },
+ 	{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
+ 	{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
++	{ 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
+ 	{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
++	{ 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
+ 	{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+ 	{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
+ };
+@@ -488,6 +506,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x03f0),		/* HP HyperX Xbox 360 controllers */
+ 	XPAD_XBOXONE_VENDOR(0x03f0),		/* HP HyperX Xbox One controllers */
+ 	XPAD_XBOX360_VENDOR(0x044f),		/* Thrustmaster Xbox 360 controllers */
++	XPAD_XBOXONE_VENDOR(0x044f),		/* Thrustmaster Xbox One controllers */
+ 	XPAD_XBOX360_VENDOR(0x045e),		/* Microsoft Xbox 360 controllers */
+ 	XPAD_XBOXONE_VENDOR(0x045e),		/* Microsoft Xbox One controllers */
+ 	XPAD_XBOX360_VENDOR(0x046d),		/* Logitech Xbox 360-style controllers */
+@@ -519,8 +538,9 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x1689),		/* Razer Onza */
+ 	XPAD_XBOX360_VENDOR(0x17ef),		/* Lenovo */
+ 	XPAD_XBOX360_VENDOR(0x1949),		/* Amazon controllers */
+-	XPAD_XBOX360_VENDOR(0x1a86),		/* QH Electronics */
++	XPAD_XBOX360_VENDOR(0x1a86),		/* Nanjing Qinheng Microelectronics (WCH) */
+ 	XPAD_XBOX360_VENDOR(0x1bad),		/* Harmonix Rock Band guitar and drums */
++	XPAD_XBOX360_VENDOR(0x1ee9),		/* ZOTAC Technology Limited */
+ 	XPAD_XBOX360_VENDOR(0x20d6),		/* PowerA controllers */
+ 	XPAD_XBOXONE_VENDOR(0x20d6),		/* PowerA controllers */
+ 	XPAD_XBOX360_VENDOR(0x2345),		/* Machenike Controllers */
+@@ -528,17 +548,20 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOXONE_VENDOR(0x24c6),		/* PowerA controllers */
+ 	XPAD_XBOX360_VENDOR(0x2563),		/* OneXPlayer Gamepad */
+ 	XPAD_XBOX360_VENDOR(0x260d),		/* Dareu H101 */
+-       XPAD_XBOXONE_VENDOR(0x294b),            /* Snakebyte */
++	XPAD_XBOXONE_VENDOR(0x294b),		/* Snakebyte */
++	XPAD_XBOX360_VENDOR(0x2993),		/* TECNO Mobile */
+ 	XPAD_XBOX360_VENDOR(0x2c22),		/* Qanba Controllers */
+-	XPAD_XBOX360_VENDOR(0x2dc8),            /* 8BitDo Pro 2 Wired Controller */
+-	XPAD_XBOXONE_VENDOR(0x2dc8),		/* 8BitDo Pro 2 Wired Controller for Xbox */
+-	XPAD_XBOXONE_VENDOR(0x2e24),		/* Hyperkin Duke Xbox One pad */
+-	XPAD_XBOX360_VENDOR(0x2f24),		/* GameSir controllers */
++	XPAD_XBOX360_VENDOR(0x2dc8),		/* 8BitDo Controllers */
++	XPAD_XBOXONE_VENDOR(0x2dc8),		/* 8BitDo Controllers */
++	XPAD_XBOXONE_VENDOR(0x2e24),		/* Hyperkin Controllers */
++	XPAD_XBOX360_VENDOR(0x2f24),		/* GameSir Controllers */
++	XPAD_XBOXONE_VENDOR(0x2e95),		/* SCUF Gaming Controller */
+ 	XPAD_XBOX360_VENDOR(0x31e3),		/* Wooting Keyboards */
+ 	XPAD_XBOX360_VENDOR(0x3285),		/* Nacon GC-100 */
+ 	XPAD_XBOXONE_VENDOR(0x3285),		/* Nacon Evol-X */
+ 	XPAD_XBOX360_VENDOR(0x3537),		/* GameSir Controllers */
+ 	XPAD_XBOXONE_VENDOR(0x3537),		/* GameSir Controllers */
++	XPAD_XBOX360_VENDOR(0x413d),		/* Black Shark Green Ghost Controller */
+ 	{ }
+ };
+ 
+@@ -691,7 +714,9 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
+ 	XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
+ 	XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
+ 	XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
++	XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
+ 	XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
++	XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
+ 	XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
+ 	XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
+ 	XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index be80a31de9f8f4..01c4009fd53e7b 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -100,11 +100,11 @@ enum iqs7222_reg_key_id {
+ 
+ enum iqs7222_reg_grp_id {
+ 	IQS7222_REG_GRP_STAT,
+-	IQS7222_REG_GRP_FILT,
+ 	IQS7222_REG_GRP_CYCLE,
+ 	IQS7222_REG_GRP_GLBL,
+ 	IQS7222_REG_GRP_BTN,
+ 	IQS7222_REG_GRP_CHAN,
++	IQS7222_REG_GRP_FILT,
+ 	IQS7222_REG_GRP_SLDR,
+ 	IQS7222_REG_GRP_TPAD,
+ 	IQS7222_REG_GRP_GPIO,
+@@ -286,6 +286,7 @@ static const struct iqs7222_event_desc iqs7222_tp_events[] = {
+ 
+ struct iqs7222_reg_grp_desc {
+ 	u16 base;
++	u16 val_len;
+ 	int num_row;
+ 	int num_col;
+ };
+@@ -342,6 +343,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xAC00,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -400,6 +402,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xAC00,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -454,6 +457,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xC400,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -496,6 +500,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xC400,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -543,6 +548,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xAA00,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -600,6 +606,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xAA00,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -656,6 +663,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xAE00,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -712,6 +720,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xAE00,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -768,6 +777,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 			},
+ 			[IQS7222_REG_GRP_FILT] = {
+ 				.base = 0xAE00,
++				.val_len = 3,
+ 				.num_row = 1,
+ 				.num_col = 2,
+ 			},
+@@ -1604,7 +1614,7 @@ static int iqs7222_force_comms(struct iqs7222_private *iqs7222)
+ }
+ 
+ static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
+-			      u16 reg, void *val, u16 num_val)
++			      u16 reg, void *val, u16 val_len)
+ {
+ 	u8 reg_buf[sizeof(__be16)];
+ 	int ret, i;
+@@ -1619,7 +1629,7 @@ static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
+ 		{
+ 			.addr = client->addr,
+ 			.flags = I2C_M_RD,
+-			.len = num_val * sizeof(__le16),
++			.len = val_len,
+ 			.buf = (u8 *)val,
+ 		},
+ 	};
+@@ -1675,7 +1685,7 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
+ 	__le16 val_buf;
+ 	int error;
+ 
+-	error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1);
++	error = iqs7222_read_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
+ 	if (error)
+ 		return error;
+ 
+@@ -1685,10 +1695,9 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
+ }
+ 
+ static int iqs7222_write_burst(struct iqs7222_private *iqs7222,
+-			       u16 reg, const void *val, u16 num_val)
++			       u16 reg, const void *val, u16 val_len)
+ {
+ 	int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8);
+-	int val_len = num_val * sizeof(__le16);
+ 	int msg_len = reg_len + val_len;
+ 	int ret, i;
+ 	struct i2c_client *client = iqs7222->client;
+@@ -1747,7 +1756,7 @@ static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val)
+ {
+ 	__le16 val_buf = cpu_to_le16(val);
+ 
+-	return iqs7222_write_burst(iqs7222, reg, &val_buf, 1);
++	return iqs7222_write_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
+ }
+ 
+ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
+@@ -1831,30 +1840,14 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
+ 
+ 	/*
+ 	 * Acknowledge reset before writing any registers in case the device
+-	 * suffers a spurious reset during initialization. Because this step
+-	 * may change the reserved fields of the second filter beta register,
+-	 * its cache must be updated.
+-	 *
+-	 * Writing the second filter beta register, in turn, may clobber the
+-	 * system status register. As such, the filter beta register pair is
+-	 * written first to protect against this hazard.
++	 * suffers a spurious reset during initialization.
+ 	 */
+ 	if (dir == WRITE) {
+-		u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
+-		u16 filt_setup;
+-
+ 		error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
+ 					   iqs7222->sys_setup[0] |
+ 					   IQS7222_SYS_SETUP_ACK_RESET);
+ 		if (error)
+ 			return error;
+-
+-		error = iqs7222_read_word(iqs7222, reg, &filt_setup);
+-		if (error)
+-			return error;
+-
+-		iqs7222->filt_setup[1] &= GENMASK(7, 0);
+-		iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
+ 	}
+ 
+ 	/*
+@@ -1883,6 +1876,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
+ 		int num_col = dev_desc->reg_grps[i].num_col;
+ 		u16 reg = dev_desc->reg_grps[i].base;
+ 		__le16 *val_buf;
++		u16 val_len = dev_desc->reg_grps[i].val_len ? : num_col * sizeof(*val_buf);
+ 		u16 *val;
+ 
+ 		if (!num_col)
+@@ -1900,7 +1894,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
+ 			switch (dir) {
+ 			case READ:
+ 				error = iqs7222_read_burst(iqs7222, reg,
+-							   val_buf, num_col);
++							   val_buf, val_len);
+ 				for (k = 0; k < num_col; k++)
+ 					val[k] = le16_to_cpu(val_buf[k]);
+ 				break;
+@@ -1909,7 +1903,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
+ 				for (k = 0; k < num_col; k++)
+ 					val_buf[k] = cpu_to_le16(val[k]);
+ 				error = iqs7222_write_burst(iqs7222, reg,
+-							    val_buf, num_col);
++							    val_buf, val_len);
+ 				break;
+ 
+ 			default:
+@@ -1962,7 +1956,7 @@ static int iqs7222_dev_info(struct iqs7222_private *iqs7222)
+ 	int error, i;
+ 
+ 	error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id,
+-				   ARRAY_SIZE(dev_id));
++				   sizeof(dev_id));
+ 	if (error)
+ 		return error;
+ 
+@@ -2917,7 +2911,7 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
+ 	__le16 status[IQS7222_MAX_COLS_STAT];
+ 
+ 	error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status,
+-				   num_stat);
++				   num_stat * sizeof(*status));
+ 	if (error)
+ 		return error;
+ 
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 34d1f07ea4c304..8813db7eec3978 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1080,16 +1080,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ 			DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ 			DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		/* Mivvy M310 */
+@@ -1159,9 +1157,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 	},
+ 	/*
+ 	 * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+-	 * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+-	 * none of them have an external PS/2 port so this can safely be set for
+-	 * all of them.
++	 * after suspend fixable with the forcenorestore quirk.
+ 	 * Clevo barebones come with board_vendor and/or system_vendor set to
+ 	 * either the very generic string "Notebook" and/or a different value
+ 	 * for each individual reseller. The only somewhat universal way to
+@@ -1171,29 +1167,25 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+@@ -1205,29 +1197,19 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+-		/*
+-		 * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
+-		 * the keyboard very laggy for ~5 seconds after boot and
+-		 * sometimes also after resume.
+-		 * However both are required for the keyboard to not fail
+-		 * completely sometimes after boot or resume.
+-		 */
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	/*
+ 	 * At least one modern Clevo barebone has the touchpad connected both
+@@ -1243,17 +1225,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+-					SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+-					SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_NOAUX |
++					SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+-					SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+-					SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_NOAUX |
++					SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+@@ -1265,8 +1245,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "P640RE"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		/*
+@@ -1277,16 +1262,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ 		.matches = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		/*
+@@ -1297,8 +1280,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		/*
+@@ -1309,8 +1291,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		/*
+@@ -1321,8 +1302,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		/*
+@@ -1333,22 +1313,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "PB51RF"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "PB71RD"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "PC70DR"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "PCX0DX_GN20"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
+ 	{
+@@ -1361,15 +1362,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		/*
+diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
+index 607f18af70104d..212dafa0bba2d1 100644
+--- a/drivers/input/touchscreen/ads7846.c
++++ b/drivers/input/touchscreen/ads7846.c
+@@ -1011,7 +1011,7 @@ static int ads7846_setup_pendown(struct spi_device *spi,
+ 	if (pdata->get_pendown_state) {
+ 		ts->get_pendown_state = pdata->get_pendown_state;
+ 	} else {
+-		ts->gpio_pendown = gpiod_get(&spi->dev, "pendown", GPIOD_IN);
++		ts->gpio_pendown = devm_gpiod_get(&spi->dev, "pendown", GPIOD_IN);
+ 		if (IS_ERR(ts->gpio_pendown)) {
+ 			dev_err(&spi->dev, "failed to request pendown GPIO\n");
+ 			return PTR_ERR(ts->gpio_pendown);
+diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
+index 3fc03cf0ca23fd..141c64675997db 100644
+--- a/drivers/input/touchscreen/goodix_berlin_core.c
++++ b/drivers/input/touchscreen/goodix_berlin_core.c
+@@ -165,7 +165,7 @@ struct goodix_berlin_core {
+ 	struct device *dev;
+ 	struct regmap *regmap;
+ 	struct regulator *avdd;
+-	struct regulator *iovdd;
++	struct regulator *vddio;
+ 	struct gpio_desc *reset_gpio;
+ 	struct touchscreen_properties props;
+ 	struct goodix_berlin_fw_version fw_version;
+@@ -248,19 +248,19 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
+ {
+ 	int error;
+ 
+-	error = regulator_enable(cd->iovdd);
++	error = regulator_enable(cd->vddio);
+ 	if (error) {
+-		dev_err(cd->dev, "Failed to enable iovdd: %d\n", error);
++		dev_err(cd->dev, "Failed to enable vddio: %d\n", error);
+ 		return error;
+ 	}
+ 
+-	/* Vendor waits 3ms for IOVDD to settle */
++	/* Vendor waits 3ms for VDDIO to settle */
+ 	usleep_range(3000, 3100);
+ 
+ 	error = regulator_enable(cd->avdd);
+ 	if (error) {
+ 		dev_err(cd->dev, "Failed to enable avdd: %d\n", error);
+-		goto err_iovdd_disable;
++		goto err_vddio_disable;
+ 	}
+ 
+ 	/* Vendor waits 15ms for IOVDD to settle */
+@@ -283,8 +283,8 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
+ err_dev_reset:
+ 	gpiod_set_value_cansleep(cd->reset_gpio, 1);
+ 	regulator_disable(cd->avdd);
+-err_iovdd_disable:
+-	regulator_disable(cd->iovdd);
++err_vddio_disable:
++	regulator_disable(cd->vddio);
+ 	return error;
+ }
+ 
+@@ -292,7 +292,7 @@ static void goodix_berlin_power_off(struct goodix_berlin_core *cd)
+ {
+ 	gpiod_set_value_cansleep(cd->reset_gpio, 1);
+ 	regulator_disable(cd->avdd);
+-	regulator_disable(cd->iovdd);
++	regulator_disable(cd->vddio);
+ }
+ 
+ static int goodix_berlin_read_version(struct goodix_berlin_core *cd)
+@@ -744,10 +744,10 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
+ 		return dev_err_probe(dev, PTR_ERR(cd->avdd),
+ 				     "Failed to request avdd regulator\n");
+ 
+-	cd->iovdd = devm_regulator_get(dev, "iovdd");
+-	if (IS_ERR(cd->iovdd))
+-		return dev_err_probe(dev, PTR_ERR(cd->iovdd),
+-				     "Failed to request iovdd regulator\n");
++	cd->vddio = devm_regulator_get(dev, "vddio");
++	if (IS_ERR(cd->vddio))
++		return dev_err_probe(dev, PTR_ERR(cd->vddio),
++				     "Failed to request vddio regulator\n");
+ 
+ 	error = goodix_berlin_power_on(cd);
+ 	if (error) {
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 731467d4ed101c..b690905ab89ffb 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -426,7 +426,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
+ 	if (!clone)
+ 		return NULL;
+ 
+-	bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf);
++	bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
+ 
+ 	clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
+ 	clone->bi_private = bio;
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 327b6ecdc77e00..d1b095af253bdc 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1242,10 +1242,28 @@ static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *sla
+ 	       slave->dev->flags & IFF_MULTICAST;
+ }
+ 
++/**
++ * slave_set_ns_maddrs - add/del all NS mac addresses for slave
++ * @bond: bond device
++ * @slave: slave device
++ * @add: add or remove all the NS mac addresses
++ *
++ * This function tries to add or delete all the NS mac addresses on the slave
++ *
++ * Note, the IPv6 NS target address is the unicast address in Neighbor
++ * Solicitation (NS) message. The dest address of NS message should be
++ * solicited-node multicast address of the target. The dest mac of NS message
++ * is converted from the solicited-node multicast address.
++ *
++ * This function is called when
++ *   * arp_validate changes
++ *   * enslaving, releasing new slaves
++ */
+ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
+ {
+ 	struct in6_addr *targets = bond->params.ns_targets;
+ 	char slot_maddr[MAX_ADDR_LEN];
++	struct in6_addr mcaddr;
+ 	int i;
+ 
+ 	if (!slave_can_set_ns_maddr(bond, slave))
+@@ -1255,7 +1273,8 @@ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool
+ 		if (ipv6_addr_any(&targets[i]))
+ 			break;
+ 
+-		if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) {
++		addrconf_addr_solict_mult(&targets[i], &mcaddr);
++		if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) {
+ 			if (add)
+ 				dev_mc_add(slave->dev, slot_maddr);
+ 			else
+@@ -1278,23 +1297,43 @@ void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
+ 	slave_set_ns_maddrs(bond, slave, false);
+ }
+ 
++/**
++ * slave_set_ns_maddr - set new NS mac address for slave
++ * @bond: bond device
++ * @slave: slave device
++ * @target: the new IPv6 target
++ * @slot: the old IPv6 target in the slot
++ *
++ * This function tries to replace the old mac address to new one on the slave.
++ *
++ * Note, the target/slot IPv6 address is the unicast address in Neighbor
++ * Solicitation (NS) message. The dest address of NS message should be
++ * solicited-node multicast address of the target. The dest mac of NS message
++ * is converted from the solicited-node multicast address.
++ *
++ * This function is called when
++ *   * An IPv6 NS target is added or removed.
++ */
+ static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
+ 			       struct in6_addr *target, struct in6_addr *slot)
+ {
+-	char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN];
++	char mac_addr[MAX_ADDR_LEN];
++	struct in6_addr mcast_addr;
+ 
+ 	if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
+ 		return;
+ 
+-	/* remove the previous maddr from slave */
++	/* remove the previous mac addr from slave */
++	addrconf_addr_solict_mult(slot, &mcast_addr);
+ 	if (!ipv6_addr_any(slot) &&
+-	    !ndisc_mc_map(slot, slot_maddr, slave->dev, 0))
+-		dev_mc_del(slave->dev, slot_maddr);
++	    !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
++		dev_mc_del(slave->dev, mac_addr);
+ 
+-	/* add new maddr on slave if target is set */
++	/* add new mac addr on slave if target is set */
++	addrconf_addr_solict_mult(target, &mcast_addr);
+ 	if (!ipv6_addr_any(target) &&
+-	    !ndisc_mc_map(target, target_maddr, slave->dev, 0))
+-		dev_mc_add(slave->dev, target_maddr);
++	    !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
++		dev_mc_add(slave->dev, mac_addr);
+ }
+ 
+ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 284270a4ade1c1..5aeecfab96306c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2261,13 +2261,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
+ 	return err;
+ }
+ 
+-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+-					const unsigned char *addr, u16 vid,
+-					u8 state)
++static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip,
++				 const unsigned char *addr, u16 vid,
++				 u16 *fid, struct mv88e6xxx_atu_entry *entry)
+ {
+-	struct mv88e6xxx_atu_entry entry;
+ 	struct mv88e6xxx_vtu_entry vlan;
+-	u16 fid;
+ 	int err;
+ 
+ 	/* Ports have two private address databases: one for when the port is
+@@ -2278,7 +2276,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ 	 * VLAN ID into the port's database used for VLAN-unaware bridging.
+ 	 */
+ 	if (vid == 0) {
+-		fid = MV88E6XXX_FID_BRIDGED;
++		*fid = MV88E6XXX_FID_BRIDGED;
+ 	} else {
+ 		err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ 		if (err)
+@@ -2288,14 +2286,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ 		if (!vlan.valid)
+ 			return -EOPNOTSUPP;
+ 
+-		fid = vlan.fid;
++		*fid = vlan.fid;
+ 	}
+ 
+-	entry.state = 0;
+-	ether_addr_copy(entry.mac, addr);
+-	eth_addr_dec(entry.mac);
++	entry->state = 0;
++	ether_addr_copy(entry->mac, addr);
++	eth_addr_dec(entry->mac);
++
++	return mv88e6xxx_g1_atu_getnext(chip, *fid, entry);
++}
++
++static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip,
++				   const unsigned char *addr, u16 vid)
++{
++	struct mv88e6xxx_atu_entry entry;
++	u16 fid;
++	int err;
+ 
+-	err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
++	err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
++	if (err)
++		return false;
++
++	return entry.state && ether_addr_equal(entry.mac, addr);
++}
++
++static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
++					const unsigned char *addr, u16 vid,
++					u8 state)
++{
++	struct mv88e6xxx_atu_entry entry;
++	u16 fid;
++	int err;
++
++	err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
+ 	if (err)
+ 		return err;
+ 
+@@ -2893,6 +2916,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+ 	mv88e6xxx_reg_lock(chip);
+ 	err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
+ 					   MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
++	if (err)
++		goto out;
++
++	if (!mv88e6xxx_port_db_find(chip, addr, vid))
++		err = -ENOSPC;
++
++out:
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+ 	return err;
+@@ -6593,6 +6623,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
+ 	mv88e6xxx_reg_lock(chip);
+ 	err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
+ 					   MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
++	if (err)
++		goto out;
++
++	if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid))
++		err = -ENOSPC;
++
++out:
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+ 	return err;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 603e9c968c44bd..e7580df13229a6 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -864,6 +864,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+ 		bnapi->events &= ~BNXT_TX_CMP_EVENT;
+ }
+ 
++static bool bnxt_separate_head_pool(void)
++{
++	return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
++}
++
+ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+ 					 struct bnxt_rx_ring_info *rxr,
+ 					 unsigned int *offset,
+@@ -886,27 +891,19 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+ }
+ 
+ static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
++				       struct bnxt_rx_ring_info *rxr,
+ 				       gfp_t gfp)
+ {
+-	u8 *data;
+-	struct pci_dev *pdev = bp->pdev;
++	unsigned int offset;
++	struct page *page;
+ 
+-	if (gfp == GFP_ATOMIC)
+-		data = napi_alloc_frag(bp->rx_buf_size);
+-	else
+-		data = netdev_alloc_frag(bp->rx_buf_size);
+-	if (!data)
++	page = page_pool_alloc_frag(rxr->head_pool, &offset,
++				    bp->rx_buf_size, gfp);
++	if (!page)
+ 		return NULL;
+ 
+-	*mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
+-					bp->rx_buf_use_size, bp->rx_dir,
+-					DMA_ATTR_WEAK_ORDERING);
+-
+-	if (dma_mapping_error(&pdev->dev, *mapping)) {
+-		skb_free_frag(data);
+-		data = NULL;
+-	}
+-	return data;
++	*mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
++	return page_address(page) + offset;
+ }
+ 
+ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+@@ -928,7 +925,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ 		rx_buf->data = page;
+ 		rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
+ 	} else {
+-		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
++		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
+ 
+ 		if (!data)
+ 			return -ENOMEM;
+@@ -1179,13 +1176,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+ 	}
+ 
+ 	skb = napi_build_skb(data, bp->rx_buf_size);
+-	dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+-			       bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
++	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
++				bp->rx_dir);
+ 	if (!skb) {
+-		skb_free_frag(data);
++		page_pool_free_va(rxr->head_pool, data, true);
+ 		return NULL;
+ 	}
+ 
++	skb_mark_for_recycle(skb);
+ 	skb_reserve(skb, bp->rx_offset);
+ 	skb_put(skb, offset_and_len & 0xffff);
+ 	return skb;
+@@ -1840,7 +1838,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ 		u8 *new_data;
+ 		dma_addr_t new_mapping;
+ 
+-		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
++		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
++						GFP_ATOMIC);
+ 		if (!new_data) {
+ 			bnxt_abort_tpa(cpr, idx, agg_bufs);
+ 			cpr->sw_stats->rx.rx_oom_discards += 1;
+@@ -1852,16 +1851,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ 		tpa_info->mapping = new_mapping;
+ 
+ 		skb = napi_build_skb(data, bp->rx_buf_size);
+-		dma_unmap_single_attrs(&bp->pdev->dev, mapping,
+-				       bp->rx_buf_use_size, bp->rx_dir,
+-				       DMA_ATTR_WEAK_ORDERING);
++		dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
++					bp->rx_buf_use_size, bp->rx_dir);
+ 
+ 		if (!skb) {
+-			skb_free_frag(data);
++			page_pool_free_va(rxr->head_pool, data, true);
+ 			bnxt_abort_tpa(cpr, idx, agg_bufs);
+ 			cpr->sw_stats->rx.rx_oom_discards += 1;
+ 			return NULL;
+ 		}
++		skb_mark_for_recycle(skb);
+ 		skb_reserve(skb, bp->rx_offset);
+ 		skb_put(skb, len);
+ 	}
+@@ -2025,6 +2024,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 	struct rx_cmp_ext *rxcmp1;
+ 	u32 tmp_raw_cons = *raw_cons;
+ 	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
++	struct skb_shared_info *sinfo;
+ 	struct bnxt_sw_rx_bd *rx_buf;
+ 	unsigned int len;
+ 	u8 *data_ptr, agg_bufs, cmp_type;
+@@ -2151,6 +2151,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 							     false);
+ 			if (!frag_len)
+ 				goto oom_next_rx;
++
+ 		}
+ 		xdp_active = true;
+ 	}
+@@ -2160,6 +2161,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 			rc = 1;
+ 			goto next_rx;
+ 		}
++		if (xdp_buff_has_frags(&xdp)) {
++			sinfo = xdp_get_shared_info_from_buff(&xdp);
++			agg_bufs = sinfo->nr_frags;
++		} else {
++			agg_bufs = 0;
++		}
+ 	}
+ 
+ 	if (len <= bp->rx_copy_thresh) {
+@@ -2197,7 +2204,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 			if (!skb)
+ 				goto oom_next_rx;
+ 		} else {
+-			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
++			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
++						 rxr->page_pool, &xdp);
+ 			if (!skb) {
+ 				/* we should be able to free the old skb here */
+ 				bnxt_xdp_buff_frags_free(rxr, &xdp);
+@@ -3316,28 +3324,22 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
+ 
+ static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+ {
+-	struct pci_dev *pdev = bp->pdev;
+ 	int i, max_idx;
+ 
+ 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+ 
+ 	for (i = 0; i < max_idx; i++) {
+ 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+-		dma_addr_t mapping = rx_buf->mapping;
+ 		void *data = rx_buf->data;
+ 
+ 		if (!data)
+ 			continue;
+ 
+ 		rx_buf->data = NULL;
+-		if (BNXT_RX_PAGE_MODE(bp)) {
++		if (BNXT_RX_PAGE_MODE(bp))
+ 			page_pool_recycle_direct(rxr->page_pool, data);
+-		} else {
+-			dma_unmap_single_attrs(&pdev->dev, mapping,
+-					       bp->rx_buf_use_size, bp->rx_dir,
+-					       DMA_ATTR_WEAK_ORDERING);
+-			skb_free_frag(data);
+-		}
++		else
++			page_pool_free_va(rxr->head_pool, data, true);
+ 	}
+ }
+ 
+@@ -3361,16 +3363,11 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
+ 	}
+ }
+ 
+-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
++static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
++					struct bnxt_rx_ring_info *rxr)
+ {
+-	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+-	struct pci_dev *pdev = bp->pdev;
+-	struct bnxt_tpa_idx_map *map;
+ 	int i;
+ 
+-	if (!rxr->rx_tpa)
+-		goto skip_rx_tpa_free;
+-
+ 	for (i = 0; i < bp->max_tpa; i++) {
+ 		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ 		u8 *data = tpa_info->data;
+@@ -3378,14 +3375,20 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+ 		if (!data)
+ 			continue;
+ 
+-		dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
+-				       bp->rx_buf_use_size, bp->rx_dir,
+-				       DMA_ATTR_WEAK_ORDERING);
+-
+ 		tpa_info->data = NULL;
+-
+-		skb_free_frag(data);
++		page_pool_free_va(rxr->head_pool, data, false);
+ 	}
++}
++
++static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
++				       struct bnxt_rx_ring_info *rxr)
++{
++	struct bnxt_tpa_idx_map *map;
++
++	if (!rxr->rx_tpa)
++		goto skip_rx_tpa_free;
++
++	bnxt_free_one_tpa_info_data(bp, rxr);
+ 
+ skip_rx_tpa_free:
+ 	if (!rxr->rx_buf_ring)
+@@ -3413,7 +3416,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
+ 		return;
+ 
+ 	for (i = 0; i < bp->rx_nr_rings; i++)
+-		bnxt_free_one_rx_ring_skbs(bp, i);
++		bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
+ }
+ 
+ static void bnxt_free_skbs(struct bnxt *bp)
+@@ -3525,29 +3528,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
+ 	return 0;
+ }
+ 
++static void bnxt_free_one_tpa_info(struct bnxt *bp,
++				   struct bnxt_rx_ring_info *rxr)
++{
++	int i;
++
++	kfree(rxr->rx_tpa_idx_map);
++	rxr->rx_tpa_idx_map = NULL;
++	if (rxr->rx_tpa) {
++		for (i = 0; i < bp->max_tpa; i++) {
++			kfree(rxr->rx_tpa[i].agg_arr);
++			rxr->rx_tpa[i].agg_arr = NULL;
++		}
++	}
++	kfree(rxr->rx_tpa);
++	rxr->rx_tpa = NULL;
++}
++
+ static void bnxt_free_tpa_info(struct bnxt *bp)
+ {
+-	int i, j;
++	int i;
+ 
+ 	for (i = 0; i < bp->rx_nr_rings; i++) {
+ 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ 
+-		kfree(rxr->rx_tpa_idx_map);
+-		rxr->rx_tpa_idx_map = NULL;
+-		if (rxr->rx_tpa) {
+-			for (j = 0; j < bp->max_tpa; j++) {
+-				kfree(rxr->rx_tpa[j].agg_arr);
+-				rxr->rx_tpa[j].agg_arr = NULL;
+-			}
+-		}
+-		kfree(rxr->rx_tpa);
+-		rxr->rx_tpa = NULL;
++		bnxt_free_one_tpa_info(bp, rxr);
+ 	}
+ }
+ 
++static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
++				   struct bnxt_rx_ring_info *rxr)
++{
++	struct rx_agg_cmp *agg;
++	int i;
++
++	rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
++			      GFP_KERNEL);
++	if (!rxr->rx_tpa)
++		return -ENOMEM;
++
++	if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
++		return 0;
++	for (i = 0; i < bp->max_tpa; i++) {
++		agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
++		if (!agg)
++			return -ENOMEM;
++		rxr->rx_tpa[i].agg_arr = agg;
++	}
++	rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
++				      GFP_KERNEL);
++	if (!rxr->rx_tpa_idx_map)
++		return -ENOMEM;
++
++	return 0;
++}
++
+ static int bnxt_alloc_tpa_info(struct bnxt *bp)
+ {
+-	int i, j;
++	int i, rc;
+ 
+ 	bp->max_tpa = MAX_TPA;
+ 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+@@ -3558,25 +3596,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
+ 
+ 	for (i = 0; i < bp->rx_nr_rings; i++) {
+ 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+-		struct rx_agg_cmp *agg;
+-
+-		rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+-				      GFP_KERNEL);
+-		if (!rxr->rx_tpa)
+-			return -ENOMEM;
+ 
+-		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+-			continue;
+-		for (j = 0; j < bp->max_tpa; j++) {
+-			agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+-			if (!agg)
+-				return -ENOMEM;
+-			rxr->rx_tpa[j].agg_arr = agg;
+-		}
+-		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+-					      GFP_KERNEL);
+-		if (!rxr->rx_tpa_idx_map)
+-			return -ENOMEM;
++		rc = bnxt_alloc_one_tpa_info(bp, rxr);
++		if (rc)
++			return rc;
+ 	}
+ 	return 0;
+ }
+@@ -3600,7 +3623,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
+ 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
+ 
+ 		page_pool_destroy(rxr->page_pool);
+-		rxr->page_pool = NULL;
++		if (bnxt_separate_head_pool())
++			page_pool_destroy(rxr->head_pool);
++		rxr->page_pool = rxr->head_pool = NULL;
+ 
+ 		kfree(rxr->rx_agg_bmap);
+ 		rxr->rx_agg_bmap = NULL;
+@@ -3618,6 +3643,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
+ 				   int numa_node)
+ {
+ 	struct page_pool_params pp = { 0 };
++	struct page_pool *pool;
+ 
+ 	pp.pool_size = bp->rx_agg_ring_size;
+ 	if (BNXT_RX_PAGE_MODE(bp))
+@@ -3630,14 +3656,25 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
+ 	pp.max_len = PAGE_SIZE;
+ 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ 
+-	rxr->page_pool = page_pool_create(&pp);
+-	if (IS_ERR(rxr->page_pool)) {
+-		int err = PTR_ERR(rxr->page_pool);
++	pool = page_pool_create(&pp);
++	if (IS_ERR(pool))
++		return PTR_ERR(pool);
++	rxr->page_pool = pool;
+ 
+-		rxr->page_pool = NULL;
+-		return err;
++	if (bnxt_separate_head_pool()) {
++		pp.pool_size = max(bp->rx_ring_size, 1024);
++		pool = page_pool_create(&pp);
++		if (IS_ERR(pool))
++			goto err_destroy_pp;
+ 	}
++	rxr->head_pool = pool;
++
+ 	return 0;
++
++err_destroy_pp:
++	page_pool_destroy(rxr->page_pool);
++	rxr->page_pool = NULL;
++	return PTR_ERR(pool);
+ }
+ 
+ static int bnxt_alloc_rx_rings(struct bnxt *bp)
+@@ -4171,10 +4208,31 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
+ 	rxr->rx_agg_prod = prod;
+ }
+ 
++static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
++					struct bnxt_rx_ring_info *rxr)
++{
++	dma_addr_t mapping;
++	u8 *data;
++	int i;
++
++	for (i = 0; i < bp->max_tpa; i++) {
++		data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
++					    GFP_KERNEL);
++		if (!data)
++			return -ENOMEM;
++
++		rxr->rx_tpa[i].data = data;
++		rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
++		rxr->rx_tpa[i].mapping = mapping;
++	}
++
++	return 0;
++}
++
+ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+ {
+ 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+-	int i;
++	int rc;
+ 
+ 	bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
+ 
+@@ -4184,18 +4242,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+ 	bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
+ 
+ 	if (rxr->rx_tpa) {
+-		dma_addr_t mapping;
+-		u8 *data;
+-
+-		for (i = 0; i < bp->max_tpa; i++) {
+-			data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
+-			if (!data)
+-				return -ENOMEM;
+-
+-			rxr->rx_tpa[i].data = data;
+-			rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+-			rxr->rx_tpa[i].mapping = mapping;
+-		}
++		rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
++		if (rc)
++			return rc;
+ 	}
+ 	return 0;
+ }
+@@ -13452,7 +13501,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
+ 			bnxt_reset_task(bp, true);
+ 			break;
+ 		}
+-		bnxt_free_one_rx_ring_skbs(bp, i);
++		bnxt_free_one_rx_ring_skbs(bp, rxr);
+ 		rxr->rx_prod = 0;
+ 		rxr->rx_agg_prod = 0;
+ 		rxr->rx_sw_agg_prod = 0;
+@@ -15023,6 +15072,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
+ 	struct bnxt_cp_ring_info *cpr;
+ 	u64 *sw;
+ 
++	if (!bp->bnapi)
++		return;
++
+ 	cpr = &bp->bnapi[i]->cp_ring;
+ 	sw = cpr->stats.sw_stats;
+ 
+@@ -15046,6 +15098,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
+ 	struct bnxt_napi *bnapi;
+ 	u64 *sw;
+ 
++	if (!bp->tx_ring)
++		return;
++
+ 	bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
+ 	sw = bnapi->cp_ring.stats.sw_stats;
+ 
+@@ -15100,6 +15155,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+ 	struct bnxt_ring_struct *ring;
+ 	int rc;
+ 
++	if (!bp->rx_ring)
++		return -ENETDOWN;
++
+ 	rxr = &bp->rx_ring[idx];
+ 	clone = qmem;
+ 	memcpy(clone, rxr, sizeof(*rxr));
+@@ -15141,15 +15199,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+ 			goto err_free_rx_agg_ring;
+ 	}
+ 
++	if (bp->flags & BNXT_FLAG_TPA) {
++		rc = bnxt_alloc_one_tpa_info(bp, clone);
++		if (rc)
++			goto err_free_tpa_info;
++	}
++
+ 	bnxt_init_one_rx_ring_rxbd(bp, clone);
+ 	bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
+ 
+ 	bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
+ 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ 		bnxt_alloc_one_rx_ring_page(bp, clone, idx);
++	if (bp->flags & BNXT_FLAG_TPA)
++		bnxt_alloc_one_tpa_info_data(bp, clone);
+ 
+ 	return 0;
+ 
++err_free_tpa_info:
++	bnxt_free_one_tpa_info(bp, clone);
+ err_free_rx_agg_ring:
+ 	bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
+ err_free_rx_ring:
+@@ -15157,9 +15225,11 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+ err_rxq_info_unreg:
+ 	xdp_rxq_info_unreg(&clone->xdp_rxq);
+ err_page_pool_destroy:
+-	clone->page_pool->p.napi = NULL;
+ 	page_pool_destroy(clone->page_pool);
++	if (bnxt_separate_head_pool())
++		page_pool_destroy(clone->head_pool);
+ 	clone->page_pool = NULL;
++	clone->head_pool = NULL;
+ 	return rc;
+ }
+ 
+@@ -15169,13 +15239,16 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
+ 	struct bnxt *bp = netdev_priv(dev);
+ 	struct bnxt_ring_struct *ring;
+ 
+-	bnxt_free_one_rx_ring(bp, rxr);
+-	bnxt_free_one_rx_agg_ring(bp, rxr);
++	bnxt_free_one_rx_ring_skbs(bp, rxr);
++	bnxt_free_one_tpa_info(bp, rxr);
+ 
+ 	xdp_rxq_info_unreg(&rxr->xdp_rxq);
+ 
+ 	page_pool_destroy(rxr->page_pool);
++	if (bnxt_separate_head_pool())
++		page_pool_destroy(rxr->head_pool);
+ 	rxr->page_pool = NULL;
++	rxr->head_pool = NULL;
+ 
+ 	ring = &rxr->rx_ring_struct;
+ 	bnxt_free_ring(bp, &ring->ring_mem);
+@@ -15257,7 +15330,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+ 	rxr->rx_agg_prod = clone->rx_agg_prod;
+ 	rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
+ 	rxr->rx_next_cons = clone->rx_next_cons;
++	rxr->rx_tpa = clone->rx_tpa;
++	rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
+ 	rxr->page_pool = clone->page_pool;
++	rxr->head_pool = clone->head_pool;
+ 	rxr->xdp_rxq = clone->xdp_rxq;
+ 
+ 	bnxt_copy_rx_ring(bp, rxr, clone);
+@@ -15276,7 +15352,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+ 	cpr = &rxr->bnapi->cp_ring;
+ 	cpr->sw_stats->rx.rx_resets++;
+ 
+-	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
++	for (i = 0; i <= bp->nr_vnics; i++) {
+ 		vnic = &bp->vnic_info[i];
+ 
+ 		rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+@@ -15304,7 +15380,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+ 	struct bnxt_vnic_info *vnic;
+ 	int i;
+ 
+-	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
++	for (i = 0; i <= bp->nr_vnics; i++) {
+ 		vnic = &bp->vnic_info[i];
+ 		vnic->mru = 0;
+ 		bnxt_hwrm_vnic_update(bp, vnic,
+@@ -15318,6 +15394,8 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+ 	bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+ 	rxr->rx_next_cons = 0;
+ 	page_pool_disable_direct_recycling(rxr->page_pool);
++	if (bnxt_separate_head_pool())
++		page_pool_disable_direct_recycling(rxr->head_pool);
+ 
+ 	memcpy(qmem, rxr, sizeof(*rxr));
+ 	bnxt_init_rx_ring_struct(bp, qmem);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index bee645f58d0bde..1758edcd1db42a 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1108,6 +1108,7 @@ struct bnxt_rx_ring_info {
+ 	struct bnxt_ring_struct	rx_agg_ring_struct;
+ 	struct xdp_rxq_info	xdp_rxq;
+ 	struct page_pool	*page_pool;
++	struct page_pool	*head_pool;
+ };
+ 
+ struct bnxt_rx_sw_stats {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index dc51dce209d5f0..8726657f5cb9e0 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -456,23 +456,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ 
+ struct sk_buff *
+ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
+-		   struct page_pool *pool, struct xdp_buff *xdp,
+-		   struct rx_cmp_ext *rxcmp1)
++		   struct page_pool *pool, struct xdp_buff *xdp)
+ {
+ 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ 
+ 	if (!skb)
+ 		return NULL;
+-	skb_checksum_none_assert(skb);
+-	if (RX_CMP_L4_CS_OK(rxcmp1)) {
+-		if (bp->dev->features & NETIF_F_RXCSUM) {
+-			skb->ip_summed = CHECKSUM_UNNECESSARY;
+-			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+-		}
+-	}
++
+ 	xdp_update_skb_shared_info(skb, num_frags,
+ 				   sinfo->xdp_frags_size,
+-				   BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
++				   BNXT_RX_PAGE_SIZE * num_frags,
+ 				   xdp_buff_is_frag_pfmemalloc(xdp));
+ 	return skb;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+index 0122782400b8a2..220285e190fcd1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ 			      struct xdp_buff *xdp);
+ struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
+ 				   u8 num_frags, struct page_pool *pool,
+-				   struct xdp_buff *xdp,
+-				   struct rx_cmp_ext *rxcmp1);
++				   struct xdp_buff *xdp);
+ #endif
+diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
+index 7cee365cc7d167..405ddd17de1bff 100644
+--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
++++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
+@@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
+ 	struct hlist_head *arfs_fltr_list;
+ 	unsigned int i;
+ 
+-	if (!vsi || vsi->type != ICE_VSI_PF)
++	if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
+ 		return;
+ 
+ 	arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index d649c197cf673f..ed21d7f55ac11b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -49,9 +49,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
+ 	if (vlan_ops->dis_rx_filtering(uplink_vsi))
+ 		goto err_vlan_filtering;
+ 
+-	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
+-		goto err_override_uplink;
+-
+ 	if (ice_vsi_update_local_lb(uplink_vsi, true))
+ 		goto err_override_local_lb;
+ 
+@@ -63,8 +60,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
+ err_up:
+ 	ice_vsi_update_local_lb(uplink_vsi, false);
+ err_override_local_lb:
+-	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+-err_override_uplink:
+ 	vlan_ops->ena_rx_filtering(uplink_vsi);
+ err_vlan_filtering:
+ 	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+@@ -275,7 +270,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
+ 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
+ 
+ 	ice_vsi_update_local_lb(uplink_vsi, false);
+-	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+ 	vlan_ops->ena_rx_filtering(uplink_vsi);
+ 	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ 			 ICE_FLTR_TX);
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
+index 1ccb572ce285df..22371011c24928 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.c
++++ b/drivers/net/ethernet/intel/ice/ice_lag.c
+@@ -1000,6 +1000,28 @@ static void ice_lag_link(struct ice_lag *lag)
+ 	netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
+ }
+ 
++/**
++ * ice_lag_config_eswitch - configure eswitch to work with LAG
++ * @lag: lag info struct
++ * @netdev: active network interface device struct
++ *
++ * Updates all port representors in eswitch to use @netdev for Tx.
++ *
++ * Configures the netdev to keep dst metadata (also used in representor Tx).
++ * This is required for an uplink without switchdev mode configured.
++ */
++static void ice_lag_config_eswitch(struct ice_lag *lag,
++				   struct net_device *netdev)
++{
++	struct ice_repr *repr;
++	unsigned long id;
++
++	xa_for_each(&lag->pf->eswitch.reprs, id, repr)
++		repr->dst->u.port_info.lower_dev = netdev;
++
++	netif_keep_dst(netdev);
++}
++
+ /**
+  * ice_lag_unlink - handle unlink event
+  * @lag: LAG info struct
+@@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
+ 			ice_lag_move_vf_nodes(lag, act_port, pri_port);
+ 		lag->primary = false;
+ 		lag->active_port = ICE_LAG_INVALID_PORT;
++
++		/* Config primary's eswitch back to normal operation. */
++		ice_lag_config_eswitch(lag, lag->netdev);
+ 	} else {
+ 		struct ice_lag *primary_lag;
+ 
+@@ -1419,6 +1444,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+ 				ice_lag_move_vf_nodes(lag, prim_port,
+ 						      event_port);
+ 			lag->active_port = event_port;
++			ice_lag_config_eswitch(lag, event_netdev);
+ 			return;
+ 		}
+ 
+@@ -1428,6 +1454,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+ 		/* new active port */
+ 		ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
+ 		lag->active_port = event_port;
++		ice_lag_config_eswitch(lag, event_netdev);
+ 	} else {
+ 		/* port not set as currently active (e.g. new active port
+ 		 * has already claimed the nodes and filters
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index d4e74f96a8ad5d..121a5ad5c8e10b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -3928,24 +3928,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
+ 				 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ }
+ 
+-/**
+- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
+- * @ctx: pointer to VSI ctx structure
+- */
+-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
+-{
+-	ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+-}
+-
+-/**
+- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
+- * @ctx: pointer to VSI ctx structure
+- */
+-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
+-{
+-	ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+-}
+-
+ /**
+  * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
+  * @vsi: pointer to VSI structure
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
+index 1a6cfc8693ce47..2b27998fd1be36 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_lib.h
+@@ -106,10 +106,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
+ void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
+ 
+ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
+-
+-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
+-
+-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
+ int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
+ int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
+ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index f12fb3a2b6ad94..f522dd42093a9f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -2424,7 +2424,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
+ 					ICE_TXD_CTX_QW1_CMD_S);
+ 
+ 	ice_tstamp(tx_ring, skb, first, &offload);
+-	if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
++	if ((ice_is_switchdev_running(vsi->back) ||
++	     ice_lag_is_switchdev_running(vsi->back)) &&
++	    vsi->type != ICE_VSI_SF)
+ 		ice_eswitch_set_target_vsi(skb, &offload);
+ 
+ 	if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index 98d4306929f3ed..a2cf3e79693dd8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -46,6 +46,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ 	u32 running_fw, stored_fw;
+ 	int err;
+ 
++	if (!mlx5_core_is_pf(dev))
++		return 0;
++
+ 	err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+index 5d128c5b4529af..0f5d7ea8956f72 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+@@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
+ 	struct list_head *iter;
+ 
+ 	netdev_for_each_lower_dev(dev, lower, iter) {
+-		struct mlx5_core_dev *mdev;
+-		struct mlx5e_priv *priv;
+-
+ 		if (!mlx5e_eswitch_rep(lower))
+ 			continue;
+ 
+-		priv = netdev_priv(lower);
+-		mdev = priv->mdev;
+-		if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
++		if (mlx5_esw_bridge_dev_same_esw(lower, esw))
+ 			return lower;
+ 	}
+ 
+@@ -125,7 +120,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *
+ 	priv = netdev_priv(rep);
+ 	mdev = priv->mdev;
+ 	if (netif_is_lag_master(dev))
+-		return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
++		return mlx5_lag_is_master(mdev);
+ 	return true;
+ }
+ 
+@@ -455,6 +450,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
+ 	if (!rep)
+ 		return NOTIFY_DONE;
+ 
++	if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
++		return NOTIFY_DONE;
++
+ 	switch (event) {
+ 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
+ 		fdb_info = container_of(info,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 62b8a7c1c6b54a..1c087fa1ca269b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5099,11 +5099,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ 	struct mlx5e_priv *priv = netdev_priv(dev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	u8 mode, setting;
+-	int err;
+ 
+-	err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
+-	if (err)
+-		return err;
++	if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
++		return -EOPNOTSUPP;
+ 	mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
+ 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ 				       mode,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index 68cb86b37e561f..4241cf07a0306b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -887,8 +887,8 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
+ 
+ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
+ {
++	struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
+ 	struct mlx5_eq_table *table = dev->priv.eq_table;
+-	struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ 	struct irq_affinity_desc af_desc = {};
+ 	struct mlx5_irq *irq;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+index 1477db7f5307e0..2691d88cdee1f7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+@@ -175,7 +175,7 @@ mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ 
+ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
+ {
+-	struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
++	struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq);
+ 	int cpu;
+ 
+ 	cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 7f68468c2e7598..4b3da7ebd6310e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -859,7 +859,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
+ 				mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
+ 
+-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
++bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
+ {
+ 	struct mlx5_core_dev *dev;
+ 	int i;
+@@ -937,7 +937,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ 	}
+ 
+ 	if (do_bond && !__mlx5_lag_is_active(ldev)) {
+-		bool shared_fdb = mlx5_shared_fdb_supported(ldev);
++		bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev);
+ 
+ 		roce_lag = mlx5_lag_is_roce_lag(ldev);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+index 50fcb1eee57483..48a5f3e7b91a85 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+@@ -92,6 +92,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
+ 	return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
+ }
+ 
++bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
+ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
+ void mlx5_modify_lag(struct mlx5_lag *ldev,
+ 		     struct lag_tracker *tracker);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+index 571ea26edd0cab..2381a0eec19006 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+@@ -81,7 +81,8 @@ static int enable_mpesw(struct mlx5_lag *ldev)
+ 	if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
+ 	    !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
+ 	    !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
+-	    !mlx5_lag_check_prereq(ldev))
++	    !mlx5_lag_check_prereq(ldev) ||
++	    !mlx5_lag_shared_fdb_supported(ldev))
+ 		return -EOPNOTSUPP;
+ 
+ 	err = mlx5_mpesw_metadata_set(ldev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+index a80ecb672f33dd..711d14dea2485f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+@@ -196,6 +196,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
+ 		ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
+ 	}
+ 
++	if (!ns) {
++		mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
++		return ERR_PTR(-EOPNOTSUPP);
++	}
++
+ 	ft_attr.autogroup.num_reserved_entries = 2;
+ 	ft_attr.autogroup.max_num_groups = chains->group_num;
+ 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+index 0881e961d8b177..586688da9940ee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+@@ -10,12 +10,15 @@
+ 
+ struct mlx5_irq;
+ struct cpu_rmap;
++struct mlx5_irq_pool;
+ 
+ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
+ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
+ int mlx5_irq_table_create(struct mlx5_core_dev *dev);
+ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
+ void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
++struct mlx5_irq_pool *
++mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev);
+ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
+ int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
+ struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
+@@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
+ int mlx5_irq_get_index(struct mlx5_irq *irq);
+ int mlx5_irq_get_irq(const struct mlx5_irq *irq);
+ 
+-struct mlx5_irq_pool;
+ #ifdef CONFIG_MLX5_SF
+ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ 						    struct cpumask *used_cpus, u16 vecidx);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index d9362eabc6a1ca..2c5f850c31f683 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -378,6 +378,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
+ 	return irq->map.index;
+ }
+ 
++struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
++{
++	return irq->pool;
++}
++
+ /* irq_pool API */
+ 
+ /* requesting an irq from a given pool according to given index */
+@@ -405,18 +410,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab
+ 	return irq_table->sf_ctrl_pool;
+ }
+ 
+-static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
++static struct mlx5_irq_pool *
++sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
+ {
+ 	return irq_table->sf_comp_pool;
+ }
+ 
+-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
++struct mlx5_irq_pool *
++mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
+ {
+ 	struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+ 	struct mlx5_irq_pool *pool = NULL;
+ 
+ 	if (mlx5_core_is_sf(dev))
+-		pool = sf_irq_pool_get(irq_table);
++		pool = sf_comp_irq_pool_get(irq_table);
+ 
+ 	/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
+ 	 * the PF IRQs pool in case the SF pool doesn't exist.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+index c4d377f8df3089..cc064425fe1608 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+@@ -28,7 +28,6 @@ struct mlx5_irq_pool {
+ 	struct mlx5_core_dev *dev;
+ };
+ 
+-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
+ static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
+ {
+ 	return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
+@@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ int mlx5_irq_get_locked(struct mlx5_irq *irq);
+ int mlx5_irq_read_locked(struct mlx5_irq *irq);
+ int mlx5_irq_put(struct mlx5_irq *irq);
++struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq);
+ 
+ #endif /* __PCI_IRQ_H__ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
+index 4fe8c32d8fbe86..681fb73f00bbf3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
+@@ -16,8 +16,8 @@ struct mlx5hws_bwc_matcher {
+ 	struct mlx5hws_matcher *matcher;
+ 	struct mlx5hws_match_template *mt;
+ 	struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
++	u32 priority;
+ 	u8 num_of_at;
+-	u16 priority;
+ 	u8 size_log;
+ 	u32 num_of_rules; /* atomically accessed */
+ 	struct list_head *rules;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+index f9dd50152b1e3e..28d24d59efb84f 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+@@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
+ 
+ 	num_vlans = sriov->num_allowed_vlans;
+ 	sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
+-	if (!sriov->allowed_vlans)
++	if (!sriov->allowed_vlans) {
++		qlcnic_sriov_free_vlans(adapter);
+ 		return -ENOMEM;
++	}
+ 
+ 	vlans = (u16 *)&cmd->rsp.arg[3];
+ 	for (i = 0; i < num_vlans; i++)
+@@ -2167,8 +2169,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+ 		vf = &sriov->vf_info[i];
+ 		vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+ 					  sizeof(*vf->sriov_vlans), GFP_KERNEL);
+-		if (!vf->sriov_vlans)
++		if (!vf->sriov_vlans) {
++			qlcnic_sriov_free_vlans(adapter);
+ 			return -ENOMEM;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+index 14ffd45e9a25a7..86dd034fdddc52 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
++++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+@@ -1501,7 +1501,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev)
+ static void rtase_sw_reset(struct net_device *dev)
+ {
+ 	struct rtase_private *tp = netdev_priv(dev);
++	struct rtase_ring *ring, *tmp;
++	struct rtase_int_vector *ivec;
+ 	int ret;
++	u32 i;
+ 
+ 	netif_stop_queue(dev);
+ 	netif_carrier_off(dev);
+@@ -1512,6 +1515,13 @@ static void rtase_sw_reset(struct net_device *dev)
+ 	rtase_tx_clear(tp);
+ 	rtase_rx_clear(tp);
+ 
++	for (i = 0; i < tp->int_nums; i++) {
++		ivec = &tp->int_vector[i];
++		list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
++					 ring_entry)
++			list_del(&ring->ring_entry);
++	}
++
+ 	ret = rtase_init_ring(dev);
+ 	if (ret) {
+ 		netdev_err(dev, "unable to init ring\n");
+diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
+index e70fb66879941f..6622de48fc9e76 100644
+--- a/drivers/net/mctp/mctp-i2c.c
++++ b/drivers/net/mctp/mctp-i2c.c
+@@ -584,6 +584,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
+ 	struct mctp_i2c_hdr *hdr;
+ 	struct mctp_hdr *mhdr;
+ 	u8 lldst, llsrc;
++	int rc;
+ 
+ 	if (len > MCTP_I2C_MAXMTU)
+ 		return -EMSGSIZE;
+@@ -594,6 +595,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
+ 	lldst = *((u8 *)daddr);
+ 	llsrc = *((u8 *)saddr);
+ 
++	rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr));
++	if (rc)
++		return rc;
++
+ 	skb_push(skb, sizeof(struct mctp_i2c_hdr));
+ 	skb_reset_mac_header(skb);
+ 	hdr = (void *)skb_mac_header(skb);
+diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
+index a2b15cddf46e6b..47513ebbc68079 100644
+--- a/drivers/net/mctp/mctp-i3c.c
++++ b/drivers/net/mctp/mctp-i3c.c
+@@ -506,10 +506,15 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
+ 	   const void *saddr, unsigned int len)
+ {
+ 	struct mctp_i3c_internal_hdr *ihdr;
++	int rc;
+ 
+ 	if (!daddr || !saddr)
+ 		return -EINVAL;
+ 
++	rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr));
++	if (rc)
++		return rc;
++
+ 	skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
+ 	skb_reset_mac_header(skb);
+ 	ihdr = (void *)skb_mac_header(skb);
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index ae43103c76cbd8..9788b820c6be72 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -21,6 +21,11 @@
+ #define PHY_ID_TJA_1103			0x001BB010
+ #define PHY_ID_TJA_1120			0x001BB031
+ 
++#define VEND1_DEVICE_ID3		0x0004
++#define TJA1120_DEV_ID3_SILICON_VERSION	GENMASK(15, 12)
++#define TJA1120_DEV_ID3_SAMPLE_TYPE	GENMASK(11, 8)
++#define DEVICE_ID3_SAMPLE_TYPE_R	0x9
++
+ #define VEND1_DEVICE_CONTROL		0x0040
+ #define DEVICE_CONTROL_RESET		BIT(15)
+ #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
+@@ -108,6 +113,9 @@
+ #define MII_BASIC_CONFIG_RMII		0x5
+ #define MII_BASIC_CONFIG_MII		0x4
+ 
++#define VEND1_SGMII_BASIC_CONTROL	0xB000
++#define SGMII_LPM			BIT(11)
++
+ #define VEND1_SYMBOL_ERROR_CNT_XTD	0x8351
+ #define EXTENDED_CNT_EN			BIT(15)
+ #define VEND1_MONITOR_STATUS		0xAC80
+@@ -1583,6 +1591,63 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++/* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
++static void nxp_c45_tja1120_errata(struct phy_device *phydev)
++{
++	bool macsec_ability, sgmii_ability;
++	int silicon_version, sample_type;
++	int phy_abilities;
++	int ret = 0;
++
++	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
++	if (ret < 0)
++		return;
++
++	sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
++	if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
++		return;
++
++	silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
++
++	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
++				     VEND1_PORT_ABILITIES);
++	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
++	sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
++	if ((!macsec_ability && silicon_version == 2) ||
++	    (macsec_ability && silicon_version == 1)) {
++		/* TJA1120/TJA1121 PHY configuration errata workaround.
++		 * Apply PHY writes sequence before link up.
++		 */
++		if (!macsec_ability) {
++			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
++			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
++		} else {
++			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
++			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
++		}
++
++		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
++
++		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
++		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
++
++		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
++		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
++
++		if (sgmii_ability) {
++			/* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
++			 * Put SGMII PCS into power down mode and back up.
++			 */
++			phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
++					 VEND1_SGMII_BASIC_CONTROL,
++					 SGMII_LPM);
++			phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
++					   VEND1_SGMII_BASIC_CONTROL,
++					   SGMII_LPM);
++		}
++	}
++}
++
+ static int nxp_c45_config_init(struct phy_device *phydev)
+ {
+ 	int ret;
+@@ -1599,6 +1664,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
+ 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
+ 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
+ 
++	if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
++		nxp_c45_tja1120_errata(phydev);
++
+ 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
+ 			 PHY_CONFIG_AUTO);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index f30b0fc8eca97d..2b9a684cf61d57 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
++ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+  * Copyright (C) 2016-2017 Intel Deutschland GmbH
+  */
+@@ -422,6 +422,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
+ 	/* if reached this point, Alive notification was received */
+ 	iwl_mei_alive_notif(true);
+ 
++	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
++
+ 	ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
+ 			    &mvm->fw->ucode_capa);
+ 	if (ret) {
+@@ -430,8 +432,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
+ 		return ret;
+ 	}
+ 
+-	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+-
+ 	/*
+ 	 * Note: all the queues are enabled as part of the interface
+ 	 * initialization, but in firmware restart scenarios they
+diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
+index d5a9360323d29d..8755c5e6a65b30 100644
+--- a/drivers/net/wwan/mhi_wwan_mbim.c
++++ b/drivers/net/wwan/mhi_wwan_mbim.c
+@@ -220,7 +220,7 @@ static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *s
+ 	if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
+ 	    (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+ 	    !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
+-		net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
++		net_dbg_ratelimited("sequence number glitch prev=%d curr=%d\n",
+ 				    mbim->rx_seq, le16_to_cpu(nth16->wSequence));
+ 	}
+ 	mbim->rx_seq = le16_to_cpu(nth16->wSequence);
+diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
+index b1387dc459a323..e79a0adf13950b 100644
+--- a/drivers/nvme/host/apple.c
++++ b/drivers/nvme/host/apple.c
+@@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
+ 	}
+ 
+ 	if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+-	    !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
++	    !blk_mq_add_to_batch(req, iob,
++				 nvme_req(req)->status != NVME_SC_SUCCESS,
+ 				 apple_nvme_complete_batch))
+ 		apple_nvme_complete_rq(req);
+ }
+@@ -1518,6 +1519,7 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
+ 
+ 	return anv;
+ put_dev:
++	apple_nvme_detach_genpd(anv);
+ 	put_device(anv->dev);
+ 	return ERR_PTR(ret);
+ }
+@@ -1551,6 +1553,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
+ 	nvme_uninit_ctrl(&anv->ctrl);
+ out_put_ctrl:
+ 	nvme_put_ctrl(&anv->ctrl);
++	apple_nvme_detach_genpd(anv);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 8da50df56b0795..9bdf6fc53697c0 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -429,6 +429,12 @@ static inline void nvme_end_req_zoned(struct request *req)
+ 
+ static inline void __nvme_end_req(struct request *req)
+ {
++	if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
++		if (blk_rq_is_passthrough(req))
++			nvme_log_err_passthru(req);
++		else
++			nvme_log_error(req);
++	}
+ 	nvme_end_req_zoned(req);
+ 	nvme_trace_bio_complete(req);
+ 	if (req->cmd_flags & REQ_NVME_MPATH)
+@@ -439,12 +445,6 @@ void nvme_end_req(struct request *req)
+ {
+ 	blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ 
+-	if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+-		if (blk_rq_is_passthrough(req))
+-			nvme_log_err_passthru(req);
+-		else
+-			nvme_log_error(req);
+-	}
+ 	__nvme_end_req(req);
+ 	blk_mq_end_request(req, status);
+ }
+@@ -562,8 +562,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ 	switch (new_state) {
+ 	case NVME_CTRL_LIVE:
+ 		switch (old_state) {
+-		case NVME_CTRL_NEW:
+-		case NVME_CTRL_RESETTING:
+ 		case NVME_CTRL_CONNECTING:
+ 			changed = true;
+ 			fallthrough;
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 682234da2fabe0..7c13a400071e65 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -786,49 +786,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
+ 		"NVME-FC{%d}: controller connectivity lost. Awaiting "
+ 		"Reconnect", ctrl->cnum);
+ 
+-	switch (nvme_ctrl_state(&ctrl->ctrl)) {
+-	case NVME_CTRL_NEW:
+-	case NVME_CTRL_LIVE:
+-		/*
+-		 * Schedule a controller reset. The reset will terminate the
+-		 * association and schedule the reconnect timer.  Reconnects
+-		 * will be attempted until either the ctlr_loss_tmo
+-		 * (max_retries * connect_delay) expires or the remoteport's
+-		 * dev_loss_tmo expires.
+-		 */
+-		if (nvme_reset_ctrl(&ctrl->ctrl)) {
+-			dev_warn(ctrl->ctrl.device,
+-				"NVME-FC{%d}: Couldn't schedule reset.\n",
+-				ctrl->cnum);
+-			nvme_delete_ctrl(&ctrl->ctrl);
+-		}
+-		break;
+-
+-	case NVME_CTRL_CONNECTING:
+-		/*
+-		 * The association has already been terminated and the
+-		 * controller is attempting reconnects.  No need to do anything
+-		 * futher.  Reconnects will be attempted until either the
+-		 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
+-		 * remoteport's dev_loss_tmo expires.
+-		 */
+-		break;
+-
+-	case NVME_CTRL_RESETTING:
+-		/*
+-		 * Controller is already in the process of terminating the
+-		 * association.  No need to do anything further. The reconnect
+-		 * step will kick in naturally after the association is
+-		 * terminated.
+-		 */
+-		break;
+-
+-	case NVME_CTRL_DELETING:
+-	case NVME_CTRL_DELETING_NOIO:
+-	default:
+-		/* no action to take - let it delete */
+-		break;
+-	}
++	set_bit(ASSOC_FAILED, &ctrl->flags);
++	nvme_reset_ctrl(&ctrl->ctrl);
+ }
+ 
+ /**
+@@ -2546,7 +2505,6 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ 	 */
+ 	if (state == NVME_CTRL_CONNECTING) {
+ 		__nvme_fc_abort_outstanding_ios(ctrl, true);
+-		set_bit(ASSOC_FAILED, &ctrl->flags);
+ 		dev_warn(ctrl->ctrl.device,
+ 			"NVME-FC{%d}: transport error during (re)connect\n",
+ 			ctrl->cnum);
+@@ -3065,7 +3023,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+ 	struct nvmefc_ls_rcv_op *disls = NULL;
+ 	unsigned long flags;
+ 	int ret;
+-	bool changed;
+ 
+ 	++ctrl->ctrl.nr_reconnects;
+ 
+@@ -3176,12 +3133,13 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+ 	if (ret)
+ 		goto out_term_aen_ops;
+ 
+-	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
++	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) {
++		ret = -EIO;
++		goto out_term_aen_ops;
++	}
+ 
+ 	ctrl->ctrl.nr_reconnects = 0;
+-
+-	if (changed)
+-		nvme_start_ctrl(&ctrl->ctrl);
++	nvme_start_ctrl(&ctrl->ctrl);
+ 
+ 	return 0;	/* Success */
+ 
+@@ -3582,8 +3540,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ 	list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
+ 	spin_unlock_irqrestore(&rport->lock, flags);
+ 
+-	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
+-	    !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
++	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ 		dev_err(ctrl->ctrl.device,
+ 			"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
+ 		goto fail_ctrl;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index e1329d4974fd6f..1d3205f08af847 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1131,8 +1131,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
+ 
+ 	trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
+ 	if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+-	    !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+-					nvme_pci_complete_batch))
++	    !blk_mq_add_to_batch(req, iob,
++				 nvme_req(req)->status != NVME_SC_SUCCESS,
++				 nvme_pci_complete_batch))
+ 		nvme_pci_complete_rq(req);
+ }
+ 
+@@ -3669,6 +3670,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1cc1, 0x5350),   /* ADATA XPG GAMMIX S50 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
++	{ PCI_DEVICE(0x1dbe, 0x5216),   /* Acer/INNOGRIT FA100/5216 NVMe SSD */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1dbe, 0x5236),   /* ADATA XPG GAMMIX S70 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1e49, 0x0021),   /* ZHITAI TiPro5000 NVMe SSD */
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 1afd93026f9bf0..2a4536ef618487 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -996,6 +996,27 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
+ 	nvmet_req_complete(&cmd->req, status);
+ }
+ 
++static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue,
++		struct nvmet_rdma_rsp *rsp)
++{
++	unsigned long flags;
++	bool ret = true;
++
++	spin_lock_irqsave(&queue->state_lock, flags);
++	/*
++	 * recheck queue state is not live to prevent a race condition
++	 * with RDMA_CM_EVENT_ESTABLISHED handler.
++	 */
++	if (queue->state == NVMET_RDMA_Q_LIVE)
++		ret = false;
++	else if (queue->state == NVMET_RDMA_Q_CONNECTING)
++		list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
++	else
++		nvmet_rdma_put_rsp(rsp);
++	spin_unlock_irqrestore(&queue->state_lock, flags);
++	return ret;
++}
++
+ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+ 	struct nvmet_rdma_cmd *cmd =
+@@ -1038,17 +1059,9 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	rsp->n_rdma = 0;
+ 	rsp->invalidate_rkey = 0;
+ 
+-	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
+-		unsigned long flags;
+-
+-		spin_lock_irqsave(&queue->state_lock, flags);
+-		if (queue->state == NVMET_RDMA_Q_CONNECTING)
+-			list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
+-		else
+-			nvmet_rdma_put_rsp(rsp);
+-		spin_unlock_irqrestore(&queue->state_lock, flags);
++	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) &&
++	    nvmet_rdma_recv_not_live(queue, rsp))
+ 		return;
+-	}
+ 
+ 	nvmet_rdma_handle_command(queue, rsp);
+ }
+diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
+index 103b266fec7717..2c2256fe5a3b6f 100644
+--- a/drivers/phy/ti/phy-gmii-sel.c
++++ b/drivers/phy/ti/phy-gmii-sel.c
+@@ -423,6 +423,12 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
+ 	return 0;
+ }
+ 
++static const struct regmap_config phy_gmii_sel_regmap_cfg = {
++	.reg_bits = 32,
++	.val_bits = 32,
++	.reg_stride = 4,
++};
++
+ static int phy_gmii_sel_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -467,7 +473,14 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
+ 
+ 	priv->regmap = syscon_node_to_regmap(node->parent);
+ 	if (IS_ERR(priv->regmap)) {
+-		priv->regmap = device_node_to_regmap(node);
++		void __iomem *base;
++
++		base = devm_platform_ioremap_resource(pdev, 0);
++		if (IS_ERR(base))
++			return dev_err_probe(dev, PTR_ERR(base),
++					     "failed to get base memory resource\n");
++
++		priv->regmap = regmap_init_mmio(dev, base, &phy_gmii_sel_regmap_cfg);
+ 		if (IS_ERR(priv->regmap))
+ 			return dev_err_probe(dev, PTR_ERR(priv->regmap),
+ 					     "Failed to get syscon\n");
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+index 73dbf29c002f39..cf6efa9c0364a1 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+@@ -974,7 +974,7 @@ static const struct regmap_config bcm281xx_pinctrl_regmap_config = {
+ 	.reg_bits = 32,
+ 	.reg_stride = 4,
+ 	.val_bits = 32,
+-	.max_register = BCM281XX_PIN_VC_CAM3_SDA,
++	.max_register = BCM281XX_PIN_VC_CAM3_SDA * 4,
+ };
+ 
+ static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+index 471f644c5eef2c..d09a5e9b2eca53 100644
+--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
++++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+@@ -2374,6 +2374,9 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
+ 		pctrl->gpio_bank[id].gc.parent = dev;
+ 		pctrl->gpio_bank[id].gc.fwnode = child;
+ 		pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
++		if (pctrl->gpio_bank[id].gc.label == NULL)
++			return -ENOMEM;
++
+ 		pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
+ 		pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input;
+ 		pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input;
+diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
+index 15678508ee5019..9e69ac9cfb92ce 100644
+--- a/drivers/platform/x86/intel/int3472/discrete.c
++++ b/drivers/platform/x86/intel/int3472/discrete.c
+@@ -2,6 +2,7 @@
+ /* Author: Dan Scally <djrscally@gmail.com> */
+ 
+ #include <linux/acpi.h>
++#include <linux/array_size.h>
+ #include <linux/bitfield.h>
+ #include <linux/device.h>
+ #include <linux/gpio/consumer.h>
+@@ -55,7 +56,7 @@ static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *i
+ 
+ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
+ 					 struct acpi_resource_gpio *agpio,
+-					 const char *func, u32 polarity)
++					 const char *func, unsigned long gpio_flags)
+ {
+ 	char *path = agpio->resource_source.string_ptr;
+ 	struct acpi_device *adev;
+@@ -70,14 +71,14 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
+ 	if (!adev)
+ 		return -ENODEV;
+ 
+-	*table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, polarity);
++	*table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, gpio_flags);
+ 
+ 	return 0;
+ }
+ 
+ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
+ 					  struct acpi_resource_gpio *agpio,
+-					  const char *func, u32 polarity)
++					  const char *func, unsigned long gpio_flags)
+ {
+ 	int ret;
+ 
+@@ -87,7 +88,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
+ 	}
+ 
+ 	ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios],
+-					    agpio, func, polarity);
++					    agpio, func, gpio_flags);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -100,7 +101,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
+ static struct gpio_desc *
+ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
+ 				       struct acpi_resource_gpio *agpio,
+-				       const char *func, u32 polarity)
++				       const char *func, unsigned long gpio_flags)
+ {
+ 	struct gpio_desc *desc;
+ 	int ret;
+@@ -111,7 +112,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	lookup->dev_id = dev_name(int3472->dev);
+-	ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, polarity);
++	ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, gpio_flags);
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+@@ -122,32 +123,76 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
+ 	return desc;
+ }
+ 
+-static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polarity)
++/**
++ * struct int3472_gpio_map - Map GPIOs to whatever is expected by the
++ * sensor driver (as in DT bindings)
++ * @hid: The ACPI HID of the device without the instance number e.g. INT347E
++ * @type_from: The GPIO type from ACPI ?SDT
++ * @type_to: The assigned GPIO type, typically same as @type_from
++ * @func: The function, e.g. "enable"
++ * @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true,
++ * GPIO_ACTIVE_HIGH otherwise
++ */
++struct int3472_gpio_map {
++	const char *hid;
++	u8 type_from;
++	u8 type_to;
++	bool polarity_low;
++	const char *func;
++};
++
++static const struct int3472_gpio_map int3472_gpio_map[] = {
++	{ "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
++};
++
++static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
++					  const char **func, unsigned long *gpio_flags)
+ {
+-	switch (type) {
++	unsigned int i;
++
++	for (i = 0; i < ARRAY_SIZE(int3472_gpio_map); i++) {
++		/*
++		 * Map the firmware-provided GPIO to whatever a driver expects
++		 * (as in DT bindings). First check if the type matches with the
++		 * GPIO map, then further check that the device _HID matches.
++		 */
++		if (*type != int3472_gpio_map[i].type_from)
++			continue;
++
++		if (!acpi_dev_hid_uid_match(adev, int3472_gpio_map[i].hid, NULL))
++			continue;
++
++		*type = int3472_gpio_map[i].type_to;
++		*gpio_flags = int3472_gpio_map[i].polarity_low ?
++			      GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
++		*func = int3472_gpio_map[i].func;
++		return;
++	}
++
++	switch (*type) {
+ 	case INT3472_GPIO_TYPE_RESET:
+ 		*func = "reset";
+-		*polarity = GPIO_ACTIVE_LOW;
++		*gpio_flags = GPIO_ACTIVE_LOW;
+ 		break;
+ 	case INT3472_GPIO_TYPE_POWERDOWN:
+ 		*func = "powerdown";
+-		*polarity = GPIO_ACTIVE_LOW;
++		*gpio_flags = GPIO_ACTIVE_LOW;
+ 		break;
+ 	case INT3472_GPIO_TYPE_CLK_ENABLE:
+ 		*func = "clk-enable";
+-		*polarity = GPIO_ACTIVE_HIGH;
++		*gpio_flags = GPIO_ACTIVE_HIGH;
+ 		break;
+ 	case INT3472_GPIO_TYPE_PRIVACY_LED:
+ 		*func = "privacy-led";
+-		*polarity = GPIO_ACTIVE_HIGH;
++		*gpio_flags = GPIO_ACTIVE_HIGH;
+ 		break;
+ 	case INT3472_GPIO_TYPE_POWER_ENABLE:
+ 		*func = "power-enable";
+-		*polarity = GPIO_ACTIVE_HIGH;
++		*gpio_flags = GPIO_ACTIVE_HIGH;
+ 		break;
+ 	default:
+ 		*func = "unknown";
+-		*polarity = GPIO_ACTIVE_HIGH;
++		*gpio_flags = GPIO_ACTIVE_HIGH;
+ 		break;
+ 	}
+ }
+@@ -194,7 +239,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
+ 	struct gpio_desc *gpio;
+ 	const char *err_msg;
+ 	const char *func;
+-	u32 polarity;
++	unsigned long gpio_flags;
+ 	int ret;
+ 
+ 	if (!acpi_gpio_get_io_resource(ares, &agpio))
+@@ -217,7 +262,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
+ 
+ 	type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
+ 
+-	int3472_get_func_and_polarity(type, &func, &polarity);
++	int3472_get_func_and_polarity(int3472->sensor, &type, &func, &gpio_flags);
+ 
+ 	pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
+ 	if (pin != agpio->pin_table[0])
+@@ -227,16 +272,16 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
+ 
+ 	active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
+ 	if (!active_value)
+-		polarity ^= GPIO_ACTIVE_LOW;
++		gpio_flags ^= GPIO_ACTIVE_LOW;
+ 
+ 	dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
+ 		agpio->resource_source.string_ptr, agpio->pin_table[0],
+-		str_high_low(polarity == GPIO_ACTIVE_HIGH));
++		str_high_low(gpio_flags == GPIO_ACTIVE_HIGH));
+ 
+ 	switch (type) {
+ 	case INT3472_GPIO_TYPE_RESET:
+ 	case INT3472_GPIO_TYPE_POWERDOWN:
+-		ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity);
++		ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags);
+ 		if (ret)
+ 			err_msg = "Failed to map GPIO pin to sensor\n";
+ 
+@@ -244,7 +289,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
+ 	case INT3472_GPIO_TYPE_CLK_ENABLE:
+ 	case INT3472_GPIO_TYPE_PRIVACY_LED:
+ 	case INT3472_GPIO_TYPE_POWER_ENABLE:
+-		gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, polarity);
++		gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, gpio_flags);
+ 		if (IS_ERR(gpio)) {
+ 			ret = PTR_ERR(gpio);
+ 			err_msg = "Failed to get GPIO\n";
+diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
+index 4e9c8c96c8ccee..257c03c59fd958 100644
+--- a/drivers/platform/x86/intel/pmc/core.c
++++ b/drivers/platform/x86/intel/pmc/core.c
+@@ -625,8 +625,8 @@ static u32 convert_ltr_scale(u32 val)
+ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
+ {
+ 	struct pmc_dev *pmcdev = s->private;
+-	u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
+-	u32 ltr_raw_data, scale, val;
++	u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
++	u32 ltr_raw_data, scale;
+ 	u16 snoop_ltr, nonsnoop_ltr;
+ 	unsigned int i, index, ltr_index = 0;
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 84dcd7da7319e3..a3c73abb00f21e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -7883,6 +7883,7 @@ static struct ibm_struct volume_driver_data = {
+ 
+ #define FAN_NS_CTRL_STATUS	BIT(2)		/* Bit which determines control is enabled or not */
+ #define FAN_NS_CTRL		BIT(4)		/* Bit which determines control is by host or EC */
++#define FAN_CLOCK_TPM		(22500*60)	/* Ticks per minute for a 22.5 kHz clock */
+ 
+ enum {					/* Fan control constants */
+ 	fan_status_offset = 0x2f,	/* EC register 0x2f */
+@@ -7938,6 +7939,7 @@ static int fan_watchdog_maxinterval;
+ 
+ static bool fan_with_ns_addr;
+ static bool ecfw_with_fan_dec_rpm;
++static bool fan_speed_in_tpr;
+ 
+ static struct mutex fan_mutex;
+ 
+@@ -8140,8 +8142,11 @@ static int fan_get_speed(unsigned int *speed)
+ 			     !acpi_ec_read(fan_rpm_offset + 1, &hi)))
+ 			return -EIO;
+ 
+-		if (likely(speed))
++		if (likely(speed)) {
+ 			*speed = (hi << 8) | lo;
++			if (fan_speed_in_tpr && *speed != 0)
++				*speed = FAN_CLOCK_TPM / *speed;
++		}
+ 		break;
+ 	case TPACPI_FAN_RD_TPEC_NS:
+ 		if (!acpi_ec_read(fan_rpm_status_ns, &lo))
+@@ -8174,8 +8179,11 @@ static int fan2_get_speed(unsigned int *speed)
+ 		if (rc)
+ 			return -EIO;
+ 
+-		if (likely(speed))
++		if (likely(speed)) {
+ 			*speed = (hi << 8) | lo;
++			if (fan_speed_in_tpr && *speed != 0)
++				*speed = FAN_CLOCK_TPM / *speed;
++		}
+ 		break;
+ 
+ 	case TPACPI_FAN_RD_TPEC_NS:
+@@ -8786,6 +8794,7 @@ static const struct attribute_group fan_driver_attr_group = {
+ #define TPACPI_FAN_NOFAN	0x0008		/* no fan available */
+ #define TPACPI_FAN_NS		0x0010		/* For EC with non-Standard register addresses */
+ #define TPACPI_FAN_DECRPM	0x0020		/* For ECFW's with RPM in register as decimal */
++#define TPACPI_FAN_TPR		0x0040		/* Fan speed is in Ticks Per Revolution */
+ 
+ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
+@@ -8815,6 +8824,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS),	/* 11e Gen5 KL-Y */
+ 	TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN),	/* X1 Tablet (2nd gen) */
+ 	TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */
++	TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR),		/* ThinkPad x120e */
+ };
+ 
+ static int __init fan_init(struct ibm_init_struct *iibm)
+@@ -8885,6 +8895,8 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ 
+ 			if (quirks & TPACPI_FAN_Q1)
+ 				fan_quirk1_setup();
++			if (quirks & TPACPI_FAN_TPR)
++				fan_speed_in_tpr = true;
+ 			/* Try and probe the 2nd fan */
+ 			tp_features.second_fan = 1; /* needed for get_speed to work */
+ 			res = fan2_get_speed(&speed);
+@@ -10318,6 +10330,10 @@ static struct ibm_struct proxsensor_driver_data = {
+ #define DYTC_MODE_PSC_BALANCE  5  /* Default mode aka balanced */
+ #define DYTC_MODE_PSC_PERFORM  7  /* High power mode aka performance */
+ 
++#define DYTC_MODE_PSCV9_LOWPOWER 1  /* Low power mode */
++#define DYTC_MODE_PSCV9_BALANCE  3  /* Default mode aka balanced */
++#define DYTC_MODE_PSCV9_PERFORM  4  /* High power mode aka performance */
++
+ #define DYTC_ERR_MASK       0xF  /* Bits 0-3 in cmd result are the error result */
+ #define DYTC_ERR_SUCCESS      1  /* CMD completed successful */
+ 
+@@ -10338,6 +10354,10 @@ static int dytc_capabilities;
+ static bool dytc_mmc_get_available;
+ static int profile_force;
+ 
++static int platform_psc_profile_lowpower = DYTC_MODE_PSC_LOWPOWER;
++static int platform_psc_profile_balanced = DYTC_MODE_PSC_BALANCE;
++static int platform_psc_profile_performance = DYTC_MODE_PSC_PERFORM;
++
+ static int convert_dytc_to_profile(int funcmode, int dytcmode,
+ 		enum platform_profile_option *profile)
+ {
+@@ -10359,19 +10379,15 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
+ 		}
+ 		return 0;
+ 	case DYTC_FUNCTION_PSC:
+-		switch (dytcmode) {
+-		case DYTC_MODE_PSC_LOWPOWER:
++		if (dytcmode == platform_psc_profile_lowpower)
+ 			*profile = PLATFORM_PROFILE_LOW_POWER;
+-			break;
+-		case DYTC_MODE_PSC_BALANCE:
++		else if (dytcmode == platform_psc_profile_balanced)
+ 			*profile =  PLATFORM_PROFILE_BALANCED;
+-			break;
+-		case DYTC_MODE_PSC_PERFORM:
++		else if (dytcmode == platform_psc_profile_performance)
+ 			*profile =  PLATFORM_PROFILE_PERFORMANCE;
+-			break;
+-		default: /* Unknown mode */
++		else
+ 			return -EINVAL;
+-		}
++
+ 		return 0;
+ 	case DYTC_FUNCTION_AMT:
+ 		/* For now return balanced. It's the closest we have to 'auto' */
+@@ -10392,19 +10408,19 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
+ 		if (dytc_capabilities & BIT(DYTC_FC_MMC))
+ 			*perfmode = DYTC_MODE_MMC_LOWPOWER;
+ 		else if (dytc_capabilities & BIT(DYTC_FC_PSC))
+-			*perfmode = DYTC_MODE_PSC_LOWPOWER;
++			*perfmode = platform_psc_profile_lowpower;
+ 		break;
+ 	case PLATFORM_PROFILE_BALANCED:
+ 		if (dytc_capabilities & BIT(DYTC_FC_MMC))
+ 			*perfmode = DYTC_MODE_MMC_BALANCE;
+ 		else if (dytc_capabilities & BIT(DYTC_FC_PSC))
+-			*perfmode = DYTC_MODE_PSC_BALANCE;
++			*perfmode = platform_psc_profile_balanced;
+ 		break;
+ 	case PLATFORM_PROFILE_PERFORMANCE:
+ 		if (dytc_capabilities & BIT(DYTC_FC_MMC))
+ 			*perfmode = DYTC_MODE_MMC_PERFORM;
+ 		else if (dytc_capabilities & BIT(DYTC_FC_PSC))
+-			*perfmode = DYTC_MODE_PSC_PERFORM;
++			*perfmode = platform_psc_profile_performance;
+ 		break;
+ 	default: /* Unknown profile */
+ 		return -EOPNOTSUPP;
+@@ -10593,6 +10609,7 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
+ 	if (output & BIT(DYTC_QUERY_ENABLE_BIT))
+ 		dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+ 
++	dbg_printk(TPACPI_DBG_INIT, "DYTC version %d\n", dytc_version);
+ 	/* Check DYTC is enabled and supports mode setting */
+ 	if (dytc_version < 5)
+ 		return -ENODEV;
+@@ -10631,6 +10648,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
+ 		}
+ 	} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
+ 		pr_debug("PSC is supported\n");
++		if (dytc_version >= 9) { /* update profiles for DYTC 9 and up */
++			platform_psc_profile_lowpower = DYTC_MODE_PSCV9_LOWPOWER;
++			platform_psc_profile_balanced = DYTC_MODE_PSCV9_BALANCE;
++			platform_psc_profile_performance = DYTC_MODE_PSCV9_PERFORM;
++		}
+ 	} else {
+ 		dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
+ 		return -ENODEV;
+diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
+index 52c32dcbf7d846..4112a009733826 100644
+--- a/drivers/powercap/powercap_sys.c
++++ b/drivers/powercap/powercap_sys.c
+@@ -627,8 +627,7 @@ struct powercap_control_type *powercap_register_control_type(
+ 	dev_set_name(&control_type->dev, "%s", name);
+ 	result = device_register(&control_type->dev);
+ 	if (result) {
+-		if (control_type->allocated)
+-			kfree(control_type);
++		put_device(&control_type->dev);
+ 		return ERR_PTR(result);
+ 	}
+ 	idr_init(&control_type->idr);
+diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
+index a07bbecba61cd4..0c5bda060249e1 100644
+--- a/drivers/s390/cio/chp.c
++++ b/drivers/s390/cio/chp.c
+@@ -682,7 +682,8 @@ static int info_update(void)
+ 	if (time_after(jiffies, chp_info_expires)) {
+ 		/* Data is too old, update. */
+ 		rc = sclp_chp_read_info(&chp_info);
+-		chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
++		if (!rc)
++			chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL;
+ 	}
+ 	mutex_unlock(&info_lock);
+ 
+diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
+index 8958547ac111ac..fed07b1460702a 100644
+--- a/drivers/scsi/qla1280.c
++++ b/drivers/scsi/qla1280.c
+@@ -2867,7 +2867,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
+ 			dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
+ 				cpu_to_le32(upper_32_bits(dma_handle)),
+ 				cpu_to_le32(lower_32_bits(dma_handle)),
+-				cpu_to_le32(sg_dma_len(sg_next(s))));
++				cpu_to_le32(sg_dma_len(s)));
+ 			remseg--;
+ 		}
+ 		dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 042329b74c6e68..fe08af4dcb67cf 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -245,7 +245,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ 	}
+ 	ret = sbitmap_init_node(&sdev->budget_map,
+ 				scsi_device_max_queue_depth(sdev),
+-				new_shift, GFP_KERNEL,
++				new_shift, GFP_NOIO,
+ 				sdev->request_queue->node, false, true);
+ 	if (!ret)
+ 		sbitmap_resize(&sdev->budget_map, depth);
+diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
+index 7c1a9a9853733e..92b63a7f20415c 100644
+--- a/drivers/spi/spi-microchip-core.c
++++ b/drivers/spi/spi-microchip-core.c
+@@ -70,8 +70,7 @@
+ #define INT_RX_CHANNEL_OVERFLOW		BIT(2)
+ #define INT_TX_CHANNEL_UNDERRUN		BIT(3)
+ 
+-#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
+-			 CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
++#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
+ 
+ #define REG_CONTROL		(0x00)
+ #define REG_FRAME_SIZE		(0x04)
+@@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi)
+ 	mchp_corespi_write(spi, REG_CONTROL, control);
+ }
+ 
+-static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
++static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max)
+ {
+-	while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
+-		u32 data = mchp_corespi_read(spi, REG_RX_DATA);
++	for (int i = 0; i < fifo_max; i++) {
++		u32 data;
++
++		while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)
++			;
++
++		data = mchp_corespi_read(spi, REG_RX_DATA);
+ 
+ 		spi->rx_len -= spi->n_bytes;
+ 
+@@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
+ 	mchp_corespi_write(spi, REG_FRAMESUP, len);
+ }
+ 
+-static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
++static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max)
+ {
+-	int fifo_max, i = 0;
++	int i = 0;
+ 
+-	fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
+ 	mchp_corespi_set_xfer_size(spi, fifo_max);
+ 
+ 	while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
+@@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ 	if (intfield == 0)
+ 		return IRQ_NONE;
+ 
+-	if (intfield & INT_TXDONE)
+-		mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
+-
+-	if (intfield & INT_RXRDY) {
+-		mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
+-
+-		if (spi->rx_len)
+-			mchp_corespi_read_fifo(spi);
+-	}
+-
+-	if (!spi->rx_len && !spi->tx_len)
+-		finalise = true;
+-
+ 	if (intfield & INT_RX_CHANNEL_OVERFLOW) {
+ 		mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
+ 		finalise = true;
+@@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host,
+ 
+ 	mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
+ 
+-	while (spi->tx_len)
+-		mchp_corespi_write_fifo(spi);
++	while (spi->tx_len) {
++		int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
++
++		mchp_corespi_write_fifo(spi, fifo_max);
++		mchp_corespi_read_fifo(spi, fifo_max);
++	}
+ 
++	spi_finalize_current_transfer(host);
+ 	return 1;
+ }
+ 
+diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
+index 280071be30b157..6b7ab1814c12df 100644
+--- a/drivers/thermal/cpufreq_cooling.c
++++ b/drivers/thermal/cpufreq_cooling.c
+@@ -57,8 +57,6 @@ struct time_in_idle {
+  * @max_level: maximum cooling level. One less than total number of valid
+  *	cpufreq frequencies.
+  * @em: Reference on the Energy Model of the device
+- * @cdev: thermal_cooling_device pointer to keep track of the
+- *	registered cooling device.
+  * @policy: cpufreq policy.
+  * @cooling_ops: cpufreq callbacks to thermal cooling device ops
+  * @idle_time: idle time stats
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index a3e95ef5eda82e..89fc0b5662919b 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -3138,8 +3138,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ 	case UPIU_TRANSACTION_QUERY_RSP: {
+ 		u8 response = lrbp->ucd_rsp_ptr->header.response;
+ 
+-		if (response == 0)
++		if (response == 0) {
+ 			err = ufshcd_copy_query_response(hba, lrbp);
++		} else {
++			err = -EINVAL;
++			dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
++					__func__, response);
++		}
+ 		break;
+ 	}
+ 	case UPIU_TRANSACTION_REJECT_UPIU:
+diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
+index e7d50e0a161238..aadf98f65c6084 100644
+--- a/drivers/usb/phy/phy-generic.c
++++ b/drivers/usb/phy/phy-generic.c
+@@ -212,7 +212,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
+ 		if (of_property_read_u32(node, "clock-frequency", &clk_rate))
+ 			clk_rate = 0;
+ 
+-		needs_clk = of_property_read_bool(node, "clocks");
++		needs_clk = of_property_present(node, "clocks");
+ 	}
+ 	nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
+ 						   GPIOD_ASIS);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index c6f17d732b9581..236205ce350030 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1079,6 +1079,20 @@ static const struct usb_device_id id_table_combined[] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	/* GMC devices */
+ 	{ USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
++	/* Altera USB Blaster 3 */
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) },
++	{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 5ee60ba2a73cdb..52be47d684ea66 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1612,3 +1612,16 @@
+  */
+ #define GMC_VID				0x1cd7
+ #define GMC_Z216C_PID			0x0217 /* GMC Z216C Adapter IR-USB */
++
++/*
++ *  Altera USB Blaster 3 (http://www.altera.com).
++ */
++#define ALTERA_VID			0x09fb
++#define ALTERA_UB3_6022_PID		0x6022
++#define ALTERA_UB3_6025_PID		0x6025
++#define ALTERA_UB3_6026_PID		0x6026
++#define ALTERA_UB3_6029_PID		0x6029
++#define ALTERA_UB3_602A_PID		0x602a
++#define ALTERA_UB3_602C_PID		0x602c
++#define ALTERA_UB3_602D_PID		0x602d
++#define ALTERA_UB3_602E_PID		0x602e
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 58bd54e8c483a2..5cd26dac2069fa 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1368,13 +1368,13 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990A (PCIe) */
+ 	  .driver_info = RSVD(0) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),	/* Telit FE990 (rmnet) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),	/* Telit FE990A (rmnet) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff),	/* Telit FE990 (MBIM) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff),	/* Telit FE990A (MBIM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff),	/* Telit FE990 (RNDIS) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff),	/* Telit FE990A (RNDIS) */
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff),	/* Telit FE990 (ECM) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff),	/* Telit FE990A (ECM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff),	/* Telit FN20C04 (rmnet) */
+ 	  .driver_info = RSVD(0) | NCTRL(3) },
+@@ -1388,28 +1388,44 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff),	/* Telit FN920C04 (MBIM) */
+ 	  .driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x30),	/* Telit FE990B (rmnet) */
++	  .driver_info = NCTRL(5) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x60) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x30),	/* Telit FE990B (MBIM) */
++	  .driver_info = NCTRL(6) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x60) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x30),	/* Telit FE990B (RNDIS) */
++	  .driver_info = NCTRL(6) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x60) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x30),	/* Telit FE990B (ECM) */
++	  .driver_info = NCTRL(6) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x60) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff),	/* Telit FE910C04 (rmnet) */
+ 	  .driver_info = RSVD(0) | NCTRL(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff),	/* Telit FE910C04 (rmnet) */
+ 	  .driver_info = RSVD(0) | NCTRL(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff),	/* Telit FE910C04 (rmnet) */
+ 	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) },	/* Telit FN990B (rmnet) */
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x30),	/* Telit FN990B (rmnet) */
+ 	  .driver_info = NCTRL(5) },
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) },	/* Telit FN990B (MBIM) */
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30),	/* Telit FN990B (MBIM) */
+ 	  .driver_info = NCTRL(6) },
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) },	/* Telit FN990B (RNDIS) */
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x60) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x30),	/* Telit FN990B (RNDIS) */
+ 	  .driver_info = NCTRL(6) },
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) },	/* Telit FN990B (ECM) */
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
+-	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x60) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x30),	/* Telit FN990B (ECM) */
+ 	  .driver_info = NCTRL(6) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x60) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 9ac25d08f473e8..63612faeab7271 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -666,7 +666,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+ 
+ 	vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
+ 				 worker, name);
+-	if (!vtsk)
++	if (IS_ERR(vtsk))
+ 		goto free_worker;
+ 
+ 	mutex_init(&worker->mutex);
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 7fdb5edd7e2e8d..75338ffc703fb5 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -282,6 +282,8 @@ static uint screen_depth;
+ static uint screen_fb_size;
+ static uint dio_fb_size; /* FB size for deferred IO */
+ 
++static void hvfb_putmem(struct fb_info *info);
++
+ /* Send message to Hyper-V host */
+ static inline int synthvid_send(struct hv_device *hdev,
+ 				struct synthvid_msg *msg)
+@@ -862,6 +864,17 @@ static void hvfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width,
+ 		hvfb_ondemand_refresh_throttle(par, x, y, width, height);
+ }
+ 
++/*
++ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
++ * of unregister_framebuffer() or fb_release(). Do any cleanup related to
++ * framebuffer here.
++ */
++static void hvfb_destroy(struct fb_info *info)
++{
++	hvfb_putmem(info);
++	framebuffer_release(info);
++}
++
+ /*
+  * TODO: GEN1 codepaths allocate from system or DMA-able memory. Fix the
+  *       driver to use the _SYSMEM_ or _DMAMEM_ helpers in these cases.
+@@ -877,6 +890,7 @@ static const struct fb_ops hvfb_ops = {
+ 	.fb_set_par = hvfb_set_par,
+ 	.fb_setcolreg = hvfb_setcolreg,
+ 	.fb_blank = hvfb_blank,
++	.fb_destroy	= hvfb_destroy,
+ };
+ 
+ /* Get options from kernel paramenter "video=" */
+@@ -952,7 +966,7 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
+ }
+ 
+ /* Release contiguous physical memory */
+-static void hvfb_release_phymem(struct hv_device *hdev,
++static void hvfb_release_phymem(struct device *device,
+ 				phys_addr_t paddr, unsigned int size)
+ {
+ 	unsigned int order = get_order(size);
+@@ -960,7 +974,7 @@ static void hvfb_release_phymem(struct hv_device *hdev,
+ 	if (order <= MAX_PAGE_ORDER)
+ 		__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
+ 	else
+-		dma_free_coherent(&hdev->device,
++		dma_free_coherent(device,
+ 				  round_up(size, PAGE_SIZE),
+ 				  phys_to_virt(paddr),
+ 				  paddr);
+@@ -989,6 +1003,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 
+ 		base = pci_resource_start(pdev, 0);
+ 		size = pci_resource_len(pdev, 0);
++		aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
+ 
+ 		/*
+ 		 * For Gen 1 VM, we can directly use the contiguous memory
+@@ -1010,11 +1025,21 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 			goto getmem_done;
+ 		}
+ 		pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
++	} else {
++		aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
+ 	}
+ 
+ 	/*
+-	 * Cannot use the contiguous physical memory.
+-	 * Allocate mmio space for framebuffer.
++	 * Cannot use contiguous physical memory, so allocate MMIO space for
++	 * the framebuffer. At this point in the function, conflicting devices
++	 * that might have claimed the framebuffer MMIO space based on
++	 * screen_info.lfb_base must have already been removed so that
++	 * vmbus_allocate_mmio() does not allocate different MMIO space. If the
++	 * kdump image were to be loaded using kexec_file_load(), the
++	 * framebuffer location in the kdump image would be set from
++	 * screen_info.lfb_base at the time that kdump is enabled. If the
++	 * framebuffer has moved elsewhere, this could be the wrong location,
++	 * causing kdump to hang when efifb (for example) loads.
+ 	 */
+ 	dio_fb_size =
+ 		screen_width * screen_height * screen_depth / 8;
+@@ -1051,11 +1076,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 	info->screen_size = dio_fb_size;
+ 
+ getmem_done:
+-	if (base && size)
+-		aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
+-	else
+-		aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
+-
+ 	if (!gen2vm)
+ 		pci_dev_put(pdev);
+ 
+@@ -1074,16 +1094,16 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ }
+ 
+ /* Release the framebuffer */
+-static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
++static void hvfb_putmem(struct fb_info *info)
+ {
+ 	struct hvfb_par *par = info->par;
+ 
+ 	if (par->need_docopy) {
+ 		vfree(par->dio_vp);
+-		iounmap(info->screen_base);
++		iounmap(par->mmio_vp);
+ 		vmbus_free_mmio(par->mem->start, screen_fb_size);
+ 	} else {
+-		hvfb_release_phymem(hdev, info->fix.smem_start,
++		hvfb_release_phymem(info->device, info->fix.smem_start,
+ 				    screen_fb_size);
+ 	}
+ 
+@@ -1172,7 +1192,7 @@ static int hvfb_probe(struct hv_device *hdev,
+ 	if (ret)
+ 		goto error;
+ 
+-	ret = register_framebuffer(info);
++	ret = devm_register_framebuffer(&hdev->device, info);
+ 	if (ret) {
+ 		pr_err("Unable to register framebuffer\n");
+ 		goto error;
+@@ -1197,7 +1217,7 @@ static int hvfb_probe(struct hv_device *hdev,
+ 
+ error:
+ 	fb_deferred_io_cleanup(info);
+-	hvfb_putmem(hdev, info);
++	hvfb_putmem(info);
+ error2:
+ 	vmbus_close(hdev->channel);
+ error1:
+@@ -1220,14 +1240,10 @@ static void hvfb_remove(struct hv_device *hdev)
+ 
+ 	fb_deferred_io_cleanup(info);
+ 
+-	unregister_framebuffer(info);
+ 	cancel_delayed_work_sync(&par->dwork);
+ 
+ 	vmbus_close(hdev->channel);
+ 	hv_set_drvdata(hdev, NULL);
+-
+-	hvfb_putmem(hdev, info);
+-	framebuffer_release(info);
+ }
+ 
+ static int hvfb_suspend(struct hv_device *hdev)
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 26c62e0d34e98b..1f65795cf5d7a2 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -113,7 +113,7 @@ static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
+ }
+ 
+ #ifdef CONFIG_X86
+-int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
++int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs)
+ {
+ 	int rc;
+ 	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 660a5b9c08e9e4..6551fb003eed25 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -526,8 +526,6 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
+ 		u64 end;
+ 		u32 len;
+ 
+-		/* For now only order 0 folios are supported for data. */
+-		ASSERT(folio_order(folio) == 0);
+ 		btrfs_debug(fs_info,
+ 			"%s: bi_sector=%llu, err=%d, mirror=%u",
+ 			__func__, bio->bi_iter.bi_sector, bio->bi_status,
+@@ -555,7 +553,6 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
+ 
+ 		if (likely(uptodate)) {
+ 			loff_t i_size = i_size_read(inode);
+-			pgoff_t end_index = i_size >> folio_shift(folio);
+ 
+ 			/*
+ 			 * Zero out the remaining part if this range straddles
+@@ -564,9 +561,11 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
+ 			 * Here we should only zero the range inside the folio,
+ 			 * not touch anything else.
+ 			 *
+-			 * NOTE: i_size is exclusive while end is inclusive.
++			 * NOTE: i_size is exclusive while end is inclusive and
++			 * folio_contains() takes PAGE_SIZE units.
+ 			 */
+-			if (folio_index(folio) == end_index && i_size <= end) {
++			if (folio_contains(folio, i_size >> PAGE_SHIFT) &&
++			    i_size <= end) {
+ 				u32 zero_start = max(offset_in_folio(folio, i_size),
+ 						     offset_in_folio(folio, start));
+ 				u32 zero_len = offset_in_folio(folio, end) + 1 -
+@@ -960,7 +959,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+ 		return ret;
+ 	}
+ 
+-	if (folio->index == last_byte >> folio_shift(folio)) {
++	if (folio_contains(folio, last_byte >> PAGE_SHIFT)) {
+ 		size_t zero_offset = offset_in_folio(folio, last_byte);
+ 
+ 		if (zero_offset) {
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index fa9025c05d4e29..e9f58cdeeb5f3c 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1899,11 +1899,7 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su
+ 	 * Commit current transaction to make sure all the rfer/excl numbers
+ 	 * get updated.
+ 	 */
+-	trans = btrfs_start_transaction(fs_info->quota_root, 0);
+-	if (IS_ERR(trans))
+-		return PTR_ERR(trans);
+-
+-	ret = btrfs_commit_transaction(trans);
++	ret = btrfs_commit_current_transaction(fs_info->quota_root);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 2e62e62c07f836..bd6e675023c622 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1632,7 +1632,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
+ 		goto out_err;
+ 
+ 	if (fc->cache_symlinks)
+-		return page_get_link(dentry, inode, callback);
++		return page_get_link_raw(dentry, inode, callback);
+ 
+ 	err = -ECHILD;
+ 	if (!dentry)
+diff --git a/fs/namei.c b/fs/namei.c
+index 4a4a22a08ac20d..6795600c5738a5 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -5300,10 +5300,9 @@ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
+ EXPORT_SYMBOL(vfs_get_link);
+ 
+ /* get the link contents into pagecache */
+-const char *page_get_link(struct dentry *dentry, struct inode *inode,
+-			  struct delayed_call *callback)
++static char *__page_get_link(struct dentry *dentry, struct inode *inode,
++			     struct delayed_call *callback)
+ {
+-	char *kaddr;
+ 	struct page *page;
+ 	struct address_space *mapping = inode->i_mapping;
+ 
+@@ -5322,8 +5321,23 @@ const char *page_get_link(struct dentry *dentry, struct inode *inode,
+ 	}
+ 	set_delayed_call(callback, page_put_link, page);
+ 	BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
+-	kaddr = page_address(page);
+-	nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
++	return page_address(page);
++}
++
++const char *page_get_link_raw(struct dentry *dentry, struct inode *inode,
++			      struct delayed_call *callback)
++{
++	return __page_get_link(dentry, inode, callback);
++}
++EXPORT_SYMBOL_GPL(page_get_link_raw);
++
++const char *page_get_link(struct dentry *dentry, struct inode *inode,
++					struct delayed_call *callback)
++{
++	char *kaddr = __page_get_link(dentry, inode, callback);
++
++	if (!IS_ERR(kaddr))
++		nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
+ 	return kaddr;
+ }
+ 
+diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
+index 3b9461f5e712e5..eae415efae24a0 100644
+--- a/fs/netfs/read_collect.c
++++ b/fs/netfs/read_collect.c
+@@ -284,7 +284,7 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was
+ 				   netfs_trace_donate_to_deferred_next);
+ 	} else {
+ 		next = list_next_entry(subreq, rreq_link);
+-		WRITE_ONCE(next->prev_donated, excess);
++		WRITE_ONCE(next->prev_donated, next->prev_donated + excess);
+ 		trace_netfs_donate(rreq, subreq, next, excess,
+ 				   netfs_trace_donate_to_next);
+ 	}
+diff --git a/fs/smb/client/asn1.c b/fs/smb/client/asn1.c
+index b5724ef9f182f4..214a44509e7b99 100644
+--- a/fs/smb/client/asn1.c
++++ b/fs/smb/client/asn1.c
+@@ -52,6 +52,8 @@ int cifs_neg_token_init_mech_type(void *context, size_t hdrlen,
+ 		server->sec_kerberos = true;
+ 	else if (oid == OID_ntlmssp)
+ 		server->sec_ntlmssp = true;
++	else if (oid == OID_IAKerb)
++		server->sec_iakerb = true;
+ 	else {
+ 		char buf[50];
+ 
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+index af7849e5974ff3..2ad067886ec3fa 100644
+--- a/fs/smb/client/cifs_spnego.c
++++ b/fs/smb/client/cifs_spnego.c
+@@ -130,11 +130,13 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo,
+ 
+ 	dp = description + strlen(description);
+ 
+-	/* for now, only sec=krb5 and sec=mskrb5 are valid */
++	/* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */
+ 	if (server->sec_kerberos)
+ 		sprintf(dp, ";sec=krb5");
+ 	else if (server->sec_mskerberos)
+ 		sprintf(dp, ";sec=mskrb5");
++	else if (server->sec_iakerb)
++		sprintf(dp, ";sec=iakerb");
+ 	else {
+ 		cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
+ 		sprintf(dp, ";sec=krb5");
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index b630beb757a44a..a8484af7a2fbc4 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -151,6 +151,7 @@ enum securityEnum {
+ 	NTLMv2,			/* Legacy NTLM auth with NTLMv2 hash */
+ 	RawNTLMSSP,		/* NTLMSSP without SPNEGO, NTLMv2 hash */
+ 	Kerberos,		/* Kerberos via SPNEGO */
++	IAKerb,			/* Kerberos proxy */
+ };
+ 
+ enum cifs_reparse_type {
+@@ -743,6 +744,7 @@ struct TCP_Server_Info {
+ 	bool	sec_kerberosu2u;	/* supports U2U Kerberos */
+ 	bool	sec_kerberos;		/* supports plain Kerberos */
+ 	bool	sec_mskerberos;		/* supports legacy MS Kerberos */
++	bool	sec_iakerb;		/* supports pass-through auth for Kerberos (krb5 proxy) */
+ 	bool	large_buf;		/* is current buffer large? */
+ 	/* use SMBD connection instead of socket */
+ 	bool	rdma;
+@@ -2115,6 +2117,8 @@ static inline char *get_security_type_str(enum securityEnum sectype)
+ 		return "Kerberos";
+ 	case NTLMv2:
+ 		return "NTLMv2";
++	case IAKerb:
++		return "IAKerb";
+ 	default:
+ 		return "Unknown";
+ 	}
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index fb51cdf5520617..d327f31b317db9 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1873,9 +1873,8 @@ static int match_session(struct cifs_ses *ses,
+ 			 struct smb3_fs_context *ctx,
+ 			 bool match_super)
+ {
+-	if (ctx->sectype != Unspecified &&
+-	    ctx->sectype != ses->sectype)
+-		return 0;
++	struct TCP_Server_Info *server = ses->server;
++	enum securityEnum ctx_sec, ses_sec;
+ 
+ 	if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses)
+ 		return 0;
+@@ -1887,11 +1886,20 @@ static int match_session(struct cifs_ses *ses,
+ 	if (ses->chan_max < ctx->max_channels)
+ 		return 0;
+ 
+-	switch (ses->sectype) {
++	ctx_sec = server->ops->select_sectype(server, ctx->sectype);
++	ses_sec = server->ops->select_sectype(server, ses->sectype);
++
++	if (ctx_sec != ses_sec)
++		return 0;
++
++	switch (ctx_sec) {
++	case IAKerb:
+ 	case Kerberos:
+ 		if (!uid_eq(ctx->cred_uid, ses->cred_uid))
+ 			return 0;
+ 		break;
++	case NTLMv2:
++	case RawNTLMSSP:
+ 	default:
+ 		/* NULL username means anonymous session */
+ 		if (ses->user_name == NULL) {
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 48606e2ddffdcd..f8bc1da3003781 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -164,6 +164,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
+ 	fsparam_string("username", Opt_user),
+ 	fsparam_string("pass", Opt_pass),
+ 	fsparam_string("password", Opt_pass),
++	fsparam_string("pass2", Opt_pass2),
+ 	fsparam_string("password2", Opt_pass2),
+ 	fsparam_string("ip", Opt_ip),
+ 	fsparam_string("addr", Opt_ip),
+@@ -1041,6 +1042,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 		} else if (!strcmp("user", param->key) || !strcmp("username", param->key)) {
+ 			skip_parsing = true;
+ 			opt = Opt_user;
++		} else if (!strcmp("pass2", param->key) || !strcmp("password2", param->key)) {
++			skip_parsing = true;
++			opt = Opt_pass2;
+ 		}
+ 	}
+ 
+@@ -1250,21 +1254,21 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 		}
+ 		break;
+ 	case Opt_acregmax:
+-		ctx->acregmax = HZ * result.uint_32;
+-		if (ctx->acregmax > CIFS_MAX_ACTIMEO) {
++		if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) {
+ 			cifs_errorf(fc, "acregmax too large\n");
+ 			goto cifs_parse_mount_err;
+ 		}
++		ctx->acregmax = HZ * result.uint_32;
+ 		break;
+ 	case Opt_acdirmax:
+-		ctx->acdirmax = HZ * result.uint_32;
+-		if (ctx->acdirmax > CIFS_MAX_ACTIMEO) {
++		if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) {
+ 			cifs_errorf(fc, "acdirmax too large\n");
+ 			goto cifs_parse_mount_err;
+ 		}
++		ctx->acdirmax = HZ * result.uint_32;
+ 		break;
+ 	case Opt_actimeo:
+-		if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) {
++		if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) {
+ 			cifs_errorf(fc, "timeout too large\n");
+ 			goto cifs_parse_mount_err;
+ 		}
+@@ -1276,11 +1280,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 		ctx->acdirmax = ctx->acregmax = HZ * result.uint_32;
+ 		break;
+ 	case Opt_closetimeo:
+-		ctx->closetimeo = HZ * result.uint_32;
+-		if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) {
++		if (result.uint_32 > SMB3_MAX_DCLOSETIMEO / HZ) {
+ 			cifs_errorf(fc, "closetimeo too large\n");
+ 			goto cifs_parse_mount_err;
+ 		}
++		ctx->closetimeo = HZ * result.uint_32;
+ 		break;
+ 	case Opt_echo_interval:
+ 		ctx->echo_interval = result.uint_32;
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index a3f0835e12be31..97151715d1a413 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1193,6 +1193,19 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
+ 			rc = server->ops->parse_reparse_point(cifs_sb,
+ 							      full_path,
+ 							      iov, data);
++			/*
++			 * If the reparse point was not handled but it is the
++			 * name surrogate which points to directory, then treat
++			 * is as a new mount point. Name surrogate reparse point
++			 * represents another named entity in the system.
++			 */
++			if (rc == -EOPNOTSUPP &&
++			    IS_REPARSE_TAG_NAME_SURROGATE(data->reparse.tag) &&
++			    (le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY)) {
++				rc = 0;
++				cifs_create_junction_fattr(fattr, sb);
++				goto out;
++			}
+ 		}
+ 		break;
+ 	}
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+index e56a8df23fec9a..bb246ef0458fb5 100644
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -651,13 +651,17 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
+ 	case IO_REPARSE_TAG_LX_FIFO:
+ 	case IO_REPARSE_TAG_LX_CHR:
+ 	case IO_REPARSE_TAG_LX_BLK:
+-		break;
++		if (le16_to_cpu(buf->ReparseDataLength) != 0) {
++			cifs_dbg(VFS, "srv returned malformed buffer for reparse point: 0x%08x\n",
++				 le32_to_cpu(buf->ReparseTag));
++			return -EIO;
++		}
++		return 0;
+ 	default:
+ 		cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n",
+ 			      le32_to_cpu(buf->ReparseTag));
+-		break;
++		return -EOPNOTSUPP;
+ 	}
+-	return 0;
+ }
+ 
+ int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index c88e9657f47a8d..95e14977baeab0 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -1263,12 +1263,13 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
+ 		switch (requested) {
+ 		case Kerberos:
+ 		case RawNTLMSSP:
++		case IAKerb:
+ 			return requested;
+ 		case Unspecified:
+ 			if (server->sec_ntlmssp &&
+ 			    (global_secflags & CIFSSEC_MAY_NTLMSSP))
+ 				return RawNTLMSSP;
+-			if ((server->sec_kerberos || server->sec_mskerberos) &&
++			if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
+ 			    (global_secflags & CIFSSEC_MAY_KRB5))
+ 				return Kerberos;
+ 			fallthrough;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 2e3f78fe9210ff..75b13175a2e781 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -1435,7 +1435,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
+ 		if (server->sec_ntlmssp &&
+ 			(global_secflags & CIFSSEC_MAY_NTLMSSP))
+ 			return RawNTLMSSP;
+-		if ((server->sec_kerberos || server->sec_mskerberos) &&
++		if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
+ 			(global_secflags & CIFSSEC_MAY_KRB5))
+ 			return Kerberos;
+ 		fallthrough;
+@@ -2175,7 +2175,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 
+ tcon_error_exit:
+ 	if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
+-		cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
++		cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree);
+ 	goto tcon_exit;
+ }
+ 
+diff --git a/fs/smb/common/smbfsctl.h b/fs/smb/common/smbfsctl.h
+index 4b379e84c46b94..3253a18ecb5cbc 100644
+--- a/fs/smb/common/smbfsctl.h
++++ b/fs/smb/common/smbfsctl.h
+@@ -159,6 +159,9 @@
+ #define IO_REPARSE_TAG_LX_CHR	     0x80000025
+ #define IO_REPARSE_TAG_LX_BLK	     0x80000026
+ 
++/* If Name Surrogate Bit is set, the file or directory represents another named entity in the system. */
++#define IS_REPARSE_TAG_NAME_SURROGATE(tag) (!!((tag) & 0x20000000))
++
+ /* fsctl flags */
+ /* If Flags is set to this value, the request is an FSCTL not ioctl request */
+ #define SMB2_0_IOCTL_IS_FSCTL		0x00000001
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index bf45822db5d589..ab11246ccd8a09 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -432,6 +432,26 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
+ 	default_conn_ops.terminate_fn = ops->terminate_fn;
+ }
+ 
++void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn)
++{
++	atomic_inc(&conn->r_count);
++}
++
++void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn)
++{
++	/*
++	 * Checking waitqueue to dropping pending requests on
++	 * disconnection. waitqueue_active is safe because it
++	 * uses atomic operation for condition.
++	 */
++	atomic_inc(&conn->refcnt);
++	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
++		wake_up(&conn->r_count_q);
++
++	if (atomic_dec_and_test(&conn->refcnt))
++		kfree(conn);
++}
++
+ int ksmbd_conn_transport_init(void)
+ {
+ 	int ret;
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index b379ae4fdcdffa..91c2318639e766 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -168,6 +168,8 @@ int ksmbd_conn_transport_init(void);
+ void ksmbd_conn_transport_destroy(void);
+ void ksmbd_conn_lock(struct ksmbd_conn *conn);
+ void ksmbd_conn_unlock(struct ksmbd_conn *conn);
++void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn);
++void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn);
+ 
+ /*
+  * WARNING
+diff --git a/fs/smb/server/ksmbd_work.c b/fs/smb/server/ksmbd_work.c
+index d7c676c151e209..544d8ccd29b0a0 100644
+--- a/fs/smb/server/ksmbd_work.c
++++ b/fs/smb/server/ksmbd_work.c
+@@ -26,7 +26,6 @@ struct ksmbd_work *ksmbd_alloc_work_struct(void)
+ 		INIT_LIST_HEAD(&work->request_entry);
+ 		INIT_LIST_HEAD(&work->async_request_entry);
+ 		INIT_LIST_HEAD(&work->fp_entry);
+-		INIT_LIST_HEAD(&work->interim_entry);
+ 		INIT_LIST_HEAD(&work->aux_read_list);
+ 		work->iov_alloc_cnt = 4;
+ 		work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
+@@ -56,8 +55,6 @@ void ksmbd_free_work_struct(struct ksmbd_work *work)
+ 	kfree(work->tr_buf);
+ 	kvfree(work->request_buf);
+ 	kfree(work->iov);
+-	if (!list_empty(&work->interim_entry))
+-		list_del(&work->interim_entry);
+ 
+ 	if (work->async_id)
+ 		ksmbd_release_id(&work->conn->async_ida, work->async_id);
+diff --git a/fs/smb/server/ksmbd_work.h b/fs/smb/server/ksmbd_work.h
+index 8ca2c813246e61..d36393ff8310cd 100644
+--- a/fs/smb/server/ksmbd_work.h
++++ b/fs/smb/server/ksmbd_work.h
+@@ -89,7 +89,6 @@ struct ksmbd_work {
+ 	/* List head at conn->async_requests */
+ 	struct list_head                async_request_entry;
+ 	struct list_head                fp_entry;
+-	struct list_head                interim_entry;
+ };
+ 
+ /**
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 4142c7ad5fa910..592fe665973a87 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -46,7 +46,6 @@ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
+ 	opinfo->fid = id;
+ 	opinfo->Tid = Tid;
+ 	INIT_LIST_HEAD(&opinfo->op_entry);
+-	INIT_LIST_HEAD(&opinfo->interim_list);
+ 	init_waitqueue_head(&opinfo->oplock_q);
+ 	init_waitqueue_head(&opinfo->oplock_brk);
+ 	atomic_set(&opinfo->refcount, 1);
+@@ -635,6 +634,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
+ {
+ 	struct smb2_oplock_break *rsp = NULL;
+ 	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
++	struct ksmbd_conn *conn = work->conn;
+ 	struct oplock_break_info *br_info = work->request_buf;
+ 	struct smb2_hdr *rsp_hdr;
+ 	struct ksmbd_file *fp;
+@@ -690,6 +690,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
+ 
+ out:
+ 	ksmbd_free_work_struct(work);
++	ksmbd_conn_r_count_dec(conn);
+ }
+ 
+ /**
+@@ -724,6 +725,7 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
+ 	work->sess = opinfo->sess;
+ 
+ 	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
++		ksmbd_conn_r_count_inc(conn);
+ 		INIT_WORK(&work->work, __smb2_oplock_break_noti);
+ 		ksmbd_queue_work(work);
+ 
+@@ -745,6 +747,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
+ {
+ 	struct smb2_lease_break *rsp = NULL;
+ 	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
++	struct ksmbd_conn *conn = work->conn;
+ 	struct lease_break_info *br_info = work->request_buf;
+ 	struct smb2_hdr *rsp_hdr;
+ 
+@@ -791,6 +794,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
+ 
+ out:
+ 	ksmbd_free_work_struct(work);
++	ksmbd_conn_r_count_dec(conn);
+ }
+ 
+ /**
+@@ -803,7 +807,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
+ static int smb2_lease_break_noti(struct oplock_info *opinfo)
+ {
+ 	struct ksmbd_conn *conn = opinfo->conn;
+-	struct list_head *tmp, *t;
+ 	struct ksmbd_work *work;
+ 	struct lease_break_info *br_info;
+ 	struct lease *lease = opinfo->o_lease;
+@@ -831,16 +834,7 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
+ 	work->sess = opinfo->sess;
+ 
+ 	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+-		list_for_each_safe(tmp, t, &opinfo->interim_list) {
+-			struct ksmbd_work *in_work;
+-
+-			in_work = list_entry(tmp, struct ksmbd_work,
+-					     interim_entry);
+-			setup_async_work(in_work, NULL, NULL);
+-			smb2_send_interim_resp(in_work, STATUS_PENDING);
+-			list_del_init(&in_work->interim_entry);
+-			release_async_work(in_work);
+-		}
++		ksmbd_conn_r_count_inc(conn);
+ 		INIT_WORK(&work->work, __smb2_lease_break_noti);
+ 		ksmbd_queue_work(work);
+ 		wait_for_break_ack(opinfo);
+@@ -871,7 +865,8 @@ static void wait_lease_breaking(struct oplock_info *opinfo)
+ 	}
+ }
+ 
+-static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
++static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level,
++			struct ksmbd_work *in_work)
+ {
+ 	int err = 0;
+ 
+@@ -914,9 +909,15 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
+ 		}
+ 
+ 		if (lease->state & (SMB2_LEASE_WRITE_CACHING_LE |
+-				SMB2_LEASE_HANDLE_CACHING_LE))
++				SMB2_LEASE_HANDLE_CACHING_LE)) {
++			if (in_work) {
++				setup_async_work(in_work, NULL, NULL);
++				smb2_send_interim_resp(in_work, STATUS_PENDING);
++				release_async_work(in_work);
++			}
++
+ 			brk_opinfo->op_state = OPLOCK_ACK_WAIT;
+-		else
++		} else
+ 			atomic_dec(&brk_opinfo->breaking_cnt);
+ 	} else {
+ 		err = oplock_break_pending(brk_opinfo, req_op_level);
+@@ -1116,7 +1117,7 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
+ 			if (ksmbd_conn_releasing(opinfo->conn))
+ 				continue;
+ 
+-			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
++			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL);
+ 			opinfo_put(opinfo);
+ 		}
+ 	}
+@@ -1152,7 +1153,7 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
+ 
+ 			if (ksmbd_conn_releasing(opinfo->conn))
+ 				continue;
+-			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
++			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL);
+ 			opinfo_put(opinfo);
+ 		}
+ 	}
+@@ -1252,8 +1253,7 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ 		goto op_break_not_needed;
+ 	}
+ 
+-	list_add(&work->interim_entry, &prev_opinfo->interim_list);
+-	err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
++	err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II, work);
+ 	opinfo_put(prev_opinfo);
+ 	if (err == -ENOENT)
+ 		goto set_lev;
+@@ -1322,8 +1322,7 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
+ 	}
+ 
+ 	brk_opinfo->open_trunc = is_trunc;
+-	list_add(&work->interim_entry, &brk_opinfo->interim_list);
+-	oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
++	oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II, work);
+ 	opinfo_put(brk_opinfo);
+ }
+ 
+@@ -1386,7 +1385,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 			    SMB2_LEASE_KEY_SIZE))
+ 			goto next;
+ 		brk_op->open_trunc = is_trunc;
+-		oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
++		oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE, NULL);
+ next:
+ 		opinfo_put(brk_op);
+ 		rcu_read_lock();
+diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h
+index 72bc88a63a4082..3f64f07872638e 100644
+--- a/fs/smb/server/oplock.h
++++ b/fs/smb/server/oplock.h
+@@ -67,7 +67,6 @@ struct oplock_info {
+ 	bool			is_lease;
+ 	bool			open_trunc;	/* truncate on open */
+ 	struct lease		*o_lease;
+-	struct list_head        interim_list;
+ 	struct list_head        op_entry;
+ 	struct list_head        lease_entry;
+ 	wait_queue_head_t oplock_q; /* Other server threads */
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index d146b0e7c3a9dd..d523b860236ab3 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -270,17 +270,7 @@ static void handle_ksmbd_work(struct work_struct *wk)
+ 
+ 	ksmbd_conn_try_dequeue_request(work);
+ 	ksmbd_free_work_struct(work);
+-	/*
+-	 * Checking waitqueue to dropping pending requests on
+-	 * disconnection. waitqueue_active is safe because it
+-	 * uses atomic operation for condition.
+-	 */
+-	atomic_inc(&conn->refcnt);
+-	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+-		wake_up(&conn->r_count_q);
+-
+-	if (atomic_dec_and_test(&conn->refcnt))
+-		kfree(conn);
++	ksmbd_conn_r_count_dec(conn);
+ }
+ 
+ /**
+@@ -310,7 +300,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
+ 	conn->request_buf = NULL;
+ 
+ 	ksmbd_conn_enqueue_request(work);
+-	atomic_inc(&conn->r_count);
++	ksmbd_conn_r_count_inc(conn);
+ 	/* update activity on connection */
+ 	conn->last_active = jiffies;
+ 	INIT_WORK(&work->work, handle_ksmbd_work);
+diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
+index e95b8a48d8a02d..1d94bb7841081d 100644
+--- a/fs/vboxsf/super.c
++++ b/fs/vboxsf/super.c
+@@ -21,7 +21,8 @@
+ 
+ #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
+ 
+-static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
++static const unsigned char VBSF_MOUNT_SIGNATURE[4] = { '\000', '\377', '\376',
++						       '\375' };
+ 
+ static int follow_symlinks;
+ module_param(follow_symlinks, int, 0444);
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index a53cbe25691043..7b5e5388c3801a 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -871,12 +871,20 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq)
+ 	return rq->rq_flags & RQF_RESV;
+ }
+ 
+-/*
++/**
++ * blk_mq_add_to_batch() - add a request to the completion batch
++ * @req: The request to add to batch
++ * @iob: The batch to add the request
++ * @is_error: Specify true if the request failed with an error
++ * @complete: The completaion handler for the request
++ *
+  * Batched completions only work when there is no I/O error and no special
+  * ->end_io handler.
++ *
++ * Return: true when the request was added to the batch, otherwise false
+  */
+ static inline bool blk_mq_add_to_batch(struct request *req,
+-				       struct io_comp_batch *iob, int ioerror,
++				       struct io_comp_batch *iob, bool is_error,
+ 				       void (*complete)(struct io_comp_batch *))
+ {
+ 	/*
+@@ -884,7 +892,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
+ 	 * 1) No batch container
+ 	 * 2) Has scheduler data attached
+ 	 * 3) Not a passthrough request and end_io set
+-	 * 4) Not a passthrough request and an ioerror
++	 * 4) Not a passthrough request and failed with an error
+ 	 */
+ 	if (!iob)
+ 		return false;
+@@ -893,7 +901,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
+ 	if (!blk_rq_is_passthrough(req)) {
+ 		if (req->end_io)
+ 			return false;
+-		if (ioerror < 0)
++		if (is_error)
+ 			return false;
+ 	}
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index fc3de42d9d764f..b98f128c9afa78 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3320,6 +3320,8 @@ extern const struct file_operations generic_ro_fops;
+ 
+ extern int readlink_copy(char __user *, int, const char *);
+ extern int page_readlink(struct dentry *, char __user *, int);
++extern const char *page_get_link_raw(struct dentry *, struct inode *,
++				     struct delayed_call *);
+ extern const char *page_get_link(struct dentry *, struct inode *,
+ 				 struct delayed_call *);
+ extern void page_put_link(void *);
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 25a7b13574c28b..12f7a7b9c06e9b 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -687,6 +687,7 @@ struct huge_bootmem_page {
+ };
+ 
+ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
++void wait_for_freed_hugetlb_folios(void);
+ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ 				unsigned long addr, int avoid_reserve);
+ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+@@ -1057,6 +1058,10 @@ static inline int isolate_or_dissolve_huge_page(struct page *page,
+ 	return -ENOMEM;
+ }
+ 
++static inline void wait_for_freed_hugetlb_folios(void)
++{
++}
++
+ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ 					   unsigned long addr,
+ 					   int avoid_reserve)
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 22f6b018cff8de..c9dc15355f1bac 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -3133,6 +3133,7 @@
+ #define PCI_DEVICE_ID_INTEL_HDA_LNL_P	0xa828
+ #define PCI_DEVICE_ID_INTEL_S21152BB	0xb152
+ #define PCI_DEVICE_ID_INTEL_HDA_BMG	0xe2f7
++#define PCI_DEVICE_ID_INTEL_HDA_PTL_H	0xe328
+ #define PCI_DEVICE_ID_INTEL_HDA_PTL	0xe428
+ #define PCI_DEVICE_ID_INTEL_HDA_CML_R	0xf0c8
+ #define PCI_DEVICE_ID_INTEL_HDA_RKL_S	0xf1c8
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index ba7b52584770d7..c95f7e6ba25514 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -804,6 +804,7 @@ struct hci_conn_params {
+ extern struct list_head hci_dev_list;
+ extern struct list_head hci_cb_list;
+ extern rwlock_t hci_dev_list_lock;
++extern struct mutex hci_cb_list_lock;
+ 
+ #define hci_dev_set_flag(hdev, nr)             set_bit((nr), (hdev)->dev_flags)
+ #define hci_dev_clear_flag(hdev, nr)           clear_bit((nr), (hdev)->dev_flags)
+@@ -2006,47 +2007,24 @@ struct hci_cb {
+ 
+ 	char *name;
+ 
+-	bool (*match)		(struct hci_conn *conn);
+ 	void (*connect_cfm)	(struct hci_conn *conn, __u8 status);
+ 	void (*disconn_cfm)	(struct hci_conn *conn, __u8 status);
+ 	void (*security_cfm)	(struct hci_conn *conn, __u8 status,
+-				 __u8 encrypt);
++								__u8 encrypt);
+ 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
+ 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
+ };
+ 
+-static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list)
+-{
+-	struct hci_cb *cb, *cpy;
+-
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(cb, &hci_cb_list, list) {
+-		if (cb->match && cb->match(conn)) {
+-			cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC);
+-			if (!cpy)
+-				break;
+-
+-			*cpy = *cb;
+-			INIT_LIST_HEAD(&cpy->list);
+-			list_add_rcu(&cpy->list, list);
+-		}
+-	}
+-	rcu_read_unlock();
+-}
+-
+ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+ {
+-	struct list_head list;
+-	struct hci_cb *cb, *tmp;
+-
+-	INIT_LIST_HEAD(&list);
+-	hci_cb_lookup(conn, &list);
++	struct hci_cb *cb;
+ 
+-	list_for_each_entry_safe(cb, tmp, &list, list) {
++	mutex_lock(&hci_cb_list_lock);
++	list_for_each_entry(cb, &hci_cb_list, list) {
+ 		if (cb->connect_cfm)
+ 			cb->connect_cfm(conn, status);
+-		kfree(cb);
+ 	}
++	mutex_unlock(&hci_cb_list_lock);
+ 
+ 	if (conn->connect_cfm_cb)
+ 		conn->connect_cfm_cb(conn, status);
+@@ -2054,43 +2032,22 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+ 
+ static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
+ {
+-	struct list_head list;
+-	struct hci_cb *cb, *tmp;
+-
+-	INIT_LIST_HEAD(&list);
+-	hci_cb_lookup(conn, &list);
++	struct hci_cb *cb;
+ 
+-	list_for_each_entry_safe(cb, tmp, &list, list) {
++	mutex_lock(&hci_cb_list_lock);
++	list_for_each_entry(cb, &hci_cb_list, list) {
+ 		if (cb->disconn_cfm)
+ 			cb->disconn_cfm(conn, reason);
+-		kfree(cb);
+ 	}
++	mutex_unlock(&hci_cb_list_lock);
+ 
+ 	if (conn->disconn_cfm_cb)
+ 		conn->disconn_cfm_cb(conn, reason);
+ }
+ 
+-static inline void hci_security_cfm(struct hci_conn *conn, __u8 status,
+-				    __u8 encrypt)
+-{
+-	struct list_head list;
+-	struct hci_cb *cb, *tmp;
+-
+-	INIT_LIST_HEAD(&list);
+-	hci_cb_lookup(conn, &list);
+-
+-	list_for_each_entry_safe(cb, tmp, &list, list) {
+-		if (cb->security_cfm)
+-			cb->security_cfm(conn, status, encrypt);
+-		kfree(cb);
+-	}
+-
+-	if (conn->security_cfm_cb)
+-		conn->security_cfm_cb(conn, status);
+-}
+-
+ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
+ {
++	struct hci_cb *cb;
+ 	__u8 encrypt;
+ 
+ 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+@@ -2098,11 +2055,20 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
+ 
+ 	encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
+ 
+-	hci_security_cfm(conn, status, encrypt);
++	mutex_lock(&hci_cb_list_lock);
++	list_for_each_entry(cb, &hci_cb_list, list) {
++		if (cb->security_cfm)
++			cb->security_cfm(conn, status, encrypt);
++	}
++	mutex_unlock(&hci_cb_list_lock);
++
++	if (conn->security_cfm_cb)
++		conn->security_cfm_cb(conn, status);
+ }
+ 
+ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+ {
++	struct hci_cb *cb;
+ 	__u8 encrypt;
+ 
+ 	if (conn->state == BT_CONFIG) {
+@@ -2129,38 +2095,40 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+ 			conn->sec_level = conn->pending_sec_level;
+ 	}
+ 
+-	hci_security_cfm(conn, status, encrypt);
++	mutex_lock(&hci_cb_list_lock);
++	list_for_each_entry(cb, &hci_cb_list, list) {
++		if (cb->security_cfm)
++			cb->security_cfm(conn, status, encrypt);
++	}
++	mutex_unlock(&hci_cb_list_lock);
++
++	if (conn->security_cfm_cb)
++		conn->security_cfm_cb(conn, status);
+ }
+ 
+ static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
+ {
+-	struct list_head list;
+-	struct hci_cb *cb, *tmp;
+-
+-	INIT_LIST_HEAD(&list);
+-	hci_cb_lookup(conn, &list);
++	struct hci_cb *cb;
+ 
+-	list_for_each_entry_safe(cb, tmp, &list, list) {
++	mutex_lock(&hci_cb_list_lock);
++	list_for_each_entry(cb, &hci_cb_list, list) {
+ 		if (cb->key_change_cfm)
+ 			cb->key_change_cfm(conn, status);
+-		kfree(cb);
+ 	}
++	mutex_unlock(&hci_cb_list_lock);
+ }
+ 
+ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+ 								__u8 role)
+ {
+-	struct list_head list;
+-	struct hci_cb *cb, *tmp;
+-
+-	INIT_LIST_HEAD(&list);
+-	hci_cb_lookup(conn, &list);
++	struct hci_cb *cb;
+ 
+-	list_for_each_entry_safe(cb, tmp, &list, list) {
++	mutex_lock(&hci_cb_list_lock);
++	list_for_each_entry(cb, &hci_cb_list, list) {
+ 		if (cb->role_switch_cfm)
+ 			cb->role_switch_cfm(conn, status, role);
+-		kfree(cb);
+ 	}
++	mutex_unlock(&hci_cb_list_lock);
+ }
+ 
+ static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index d9c767cf773de9..9189354c568f44 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -668,7 +668,7 @@ struct l2cap_conn {
+ 	struct l2cap_chan	*smp;
+ 
+ 	struct list_head	chan_l;
+-	struct mutex		chan_lock;
++	struct mutex		lock;
+ 	struct kref		ref;
+ 	struct list_head	users;
+ };
+@@ -970,6 +970,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err);
+ void l2cap_send_conn_req(struct l2cap_chan *chan);
+ 
+ struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
++struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *conn);
+ void l2cap_conn_put(struct l2cap_conn *conn);
+ 
+ int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 788513cc384b7f..757abcb54d117d 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1889,7 +1889,7 @@ void nft_chain_filter_fini(void);
+ void __init nft_chain_route_init(void);
+ void nft_chain_route_fini(void);
+ 
+-void nf_tables_trans_destroy_flush_work(void);
++void nf_tables_trans_destroy_flush_work(struct net *net);
+ 
+ int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
+ __be64 nf_jiffies64_to_msecs(u64 input);
+@@ -1903,6 +1903,7 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re
+ struct nftables_pernet {
+ 	struct list_head	tables;
+ 	struct list_head	commit_list;
++	struct list_head	destroy_list;
+ 	struct list_head	commit_set_list;
+ 	struct list_head	binding_list;
+ 	struct list_head	module_list;
+@@ -1913,6 +1914,7 @@ struct nftables_pernet {
+ 	unsigned int		base_seq;
+ 	unsigned int		gc_seq;
+ 	u8			validate_state;
++	struct work_struct	destroy_work;
+ };
+ 
+ extern unsigned int nf_tables_net_id;
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index e6e359c1a2ac4d..db3b464a91c7b7 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -1251,7 +1251,10 @@ void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
+ 
+ /* mixer control */
+ struct soc_mixer_control {
+-	int min, max, platform_max;
++	/* Minimum and maximum specified as written to the hardware */
++	int min, max;
++	/* Limited maximum value specified as presented through the control */
++	int platform_max;
+ 	int reg, rreg;
+ 	unsigned int shift, rshift;
+ 	unsigned int sign_bit;
+diff --git a/init/Kconfig b/init/Kconfig
+index 7256fa127530ff..293c565c62168e 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1958,7 +1958,7 @@ config RUST
+ 	depends on !MODVERSIONS
+ 	depends on !GCC_PLUGIN_RANDSTRUCT
+ 	depends on !RANDSTRUCT
+-	depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
++	depends on !DEBUG_INFO_BTF || (PAHOLE_HAS_LANG_EXCLUDE && !LTO)
+ 	depends on !CFI_CLANG || HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC
+ 	select CFI_ICALL_NORMALIZE_INTEGERS if CFI_CLANG
+ 	depends on !CALL_PADDING || RUSTC_VERSION >= 108100
+diff --git a/io_uring/futex.c b/io_uring/futex.c
+index 914848f46beb21..01f044f89f8fa9 100644
+--- a/io_uring/futex.c
++++ b/io_uring/futex.c
+@@ -349,7 +349,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
+ 		hlist_add_head(&req->hash_node, &ctx->futex_list);
+ 		io_ring_submit_unlock(ctx, issue_flags);
+ 
+-		futex_queue(&ifd->q, hb);
++		futex_queue(&ifd->q, hb, NULL);
+ 		return IOU_ISSUE_SKIP_COMPLETE;
+ 	}
+ 
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index a38f36b6806041..a2d577b099308e 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -64,7 +64,7 @@ struct io_worker {
+ 
+ 	union {
+ 		struct rcu_head rcu;
+-		struct work_struct work;
++		struct delayed_work work;
+ 	};
+ };
+ 
+@@ -770,6 +770,18 @@ static inline bool io_should_retry_thread(struct io_worker *worker, long err)
+ 	}
+ }
+ 
++static void queue_create_worker_retry(struct io_worker *worker)
++{
++	/*
++	 * We only bother retrying because there's a chance that the
++	 * failure to create a worker is due to some temporary condition
++	 * in the forking task (e.g. outstanding signal); give the task
++	 * some time to clear that condition.
++	 */
++	schedule_delayed_work(&worker->work,
++			      msecs_to_jiffies(worker->init_retries * 5));
++}
++
+ static void create_worker_cont(struct callback_head *cb)
+ {
+ 	struct io_worker *worker;
+@@ -809,12 +821,13 @@ static void create_worker_cont(struct callback_head *cb)
+ 
+ 	/* re-create attempts grab a new worker ref, drop the existing one */
+ 	io_worker_release(worker);
+-	schedule_work(&worker->work);
++	queue_create_worker_retry(worker);
+ }
+ 
+ static void io_workqueue_create(struct work_struct *work)
+ {
+-	struct io_worker *worker = container_of(work, struct io_worker, work);
++	struct io_worker *worker = container_of(work, struct io_worker,
++						work.work);
+ 	struct io_wq_acct *acct = io_wq_get_acct(worker);
+ 
+ 	if (!io_queue_worker_create(worker, acct, create_worker_cont))
+@@ -855,8 +868,8 @@ static bool create_io_worker(struct io_wq *wq, int index)
+ 		kfree(worker);
+ 		goto fail;
+ 	} else {
+-		INIT_WORK(&worker->work, io_workqueue_create);
+-		schedule_work(&worker->work);
++		INIT_DELAYED_WORK(&worker->work, io_workqueue_create);
++		queue_create_worker_retry(worker);
+ 	}
+ 
+ 	return true;
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index 136768ae26375f..010607a9919498 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -554,7 +554,8 @@ void futex_q_unlock(struct futex_hash_bucket *hb)
+ 	futex_hb_waiters_dec(hb);
+ }
+ 
+-void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
++void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
++		   struct task_struct *task)
+ {
+ 	int prio;
+ 
+@@ -570,7 +571,7 @@ void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
+ 
+ 	plist_node_init(&q->list, prio);
+ 	plist_add(&q->list, &hb->chain);
+-	q->task = current;
++	q->task = task;
+ }
+ 
+ /**
+diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
+index 8b195d06f4e8ed..12e47386232ed6 100644
+--- a/kernel/futex/futex.h
++++ b/kernel/futex/futex.h
+@@ -230,13 +230,15 @@ extern int futex_get_value_locked(u32 *dest, u32 __user *from);
+ extern struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key);
+ 
+ extern void __futex_unqueue(struct futex_q *q);
+-extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb);
++extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
++				struct task_struct *task);
+ extern int futex_unqueue(struct futex_q *q);
+ 
+ /**
+  * futex_queue() - Enqueue the futex_q on the futex_hash_bucket
+  * @q:	The futex_q to enqueue
+  * @hb:	The destination hash bucket
++ * @task: Task queueing this futex
+  *
+  * The hb->lock must be held by the caller, and is released here. A call to
+  * futex_queue() is typically paired with exactly one call to futex_unqueue().  The
+@@ -244,11 +246,14 @@ extern int futex_unqueue(struct futex_q *q);
+  * or nothing if the unqueue is done as part of the wake process and the unqueue
+  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
+  * an example).
++ *
++ * Note that @task may be NULL, for async usage of futexes.
+  */
+-static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
++static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
++			       struct task_struct *task)
+ 	__releases(&hb->lock)
+ {
+-	__futex_queue(q, hb);
++	__futex_queue(q, hb, task);
+ 	spin_unlock(&hb->lock);
+ }
+ 
+diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
+index 5722467f273794..8ec12f1aff83be 100644
+--- a/kernel/futex/pi.c
++++ b/kernel/futex/pi.c
+@@ -981,7 +981,7 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
+ 	/*
+ 	 * Only actually queue now that the atomic ops are done:
+ 	 */
+-	__futex_queue(&q, hb);
++	__futex_queue(&q, hb, current);
+ 
+ 	if (trylock) {
+ 		ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
+diff --git a/kernel/futex/waitwake.c b/kernel/futex/waitwake.c
+index 3a10375d952186..a9056acb75eef9 100644
+--- a/kernel/futex/waitwake.c
++++ b/kernel/futex/waitwake.c
+@@ -350,7 +350,7 @@ void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q,
+ 	 * access to the hash list and forcing another memory barrier.
+ 	 */
+ 	set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
+-	futex_queue(q, hb);
++	futex_queue(q, hb, current);
+ 
+ 	/* Arm the timer */
+ 	if (timeout)
+@@ -461,7 +461,7 @@ int futex_wait_multiple_setup(struct futex_vector *vs, int count, int *woken)
+ 			 * next futex. Queue each futex at this moment so hb can
+ 			 * be unlocked.
+ 			 */
+-			futex_queue(q, hb);
++			futex_queue(q, hb, current);
+ 			continue;
+ 		}
+ 
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 3e486ccaa4ca34..8e52c1dd06284c 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3191,6 +3191,8 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func)
+ }
+ EXPORT_SYMBOL_GPL(call_rcu);
+ 
++static struct workqueue_struct *rcu_reclaim_wq;
++
+ /* Maximum number of jiffies to wait before draining a batch. */
+ #define KFREE_DRAIN_JIFFIES (5 * HZ)
+ #define KFREE_N_BATCHES 2
+@@ -3519,10 +3521,10 @@ __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
+ 	if (delayed_work_pending(&krcp->monitor_work)) {
+ 		delay_left = krcp->monitor_work.timer.expires - jiffies;
+ 		if (delay < delay_left)
+-			mod_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
++			mod_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay);
+ 		return;
+ 	}
+-	queue_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
++	queue_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay);
+ }
+ 
+ static void
+@@ -3620,7 +3622,7 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
+ 			// "free channels", the batch can handle. Break
+ 			// the loop since it is done with this CPU thus
+ 			// queuing an RCU work is _always_ success here.
+-			queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work);
++			queued = queue_rcu_work(rcu_reclaim_wq, &krwp->rcu_work);
+ 			WARN_ON_ONCE(!queued);
+ 			break;
+ 		}
+@@ -3708,7 +3710,7 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
+ 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
+ 			!atomic_xchg(&krcp->work_in_progress, 1)) {
+ 		if (atomic_read(&krcp->backoff_page_cache_fill)) {
+-			queue_delayed_work(system_unbound_wq,
++			queue_delayed_work(rcu_reclaim_wq,
+ 				&krcp->page_cache_work,
+ 					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
+ 		} else {
+@@ -5662,6 +5664,10 @@ static void __init kfree_rcu_batch_init(void)
+ 	int i, j;
+ 	struct shrinker *kfree_rcu_shrinker;
+ 
++	rcu_reclaim_wq = alloc_workqueue("kvfree_rcu_reclaim",
++			WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
++	WARN_ON(!rcu_reclaim_wq);
++
+ 	/* Clamp it to [0:100] seconds interval. */
+ 	if (rcu_delay_page_cache_fill_msec < 0 ||
+ 		rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9803f10a082a7b..1f817d0c5d2d0e 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1058,9 +1058,10 @@ void wake_up_q(struct wake_q_head *head)
+ 		struct task_struct *task;
+ 
+ 		task = container_of(node, struct task_struct, wake_q);
+-		/* Task can safely be re-inserted now: */
+ 		node = node->next;
+-		task->wake_q.next = NULL;
++		/* pairs with cmpxchg_relaxed() in __wake_q_add() */
++		WRITE_ONCE(task->wake_q.next, NULL);
++		/* Task can safely be re-inserted now. */
+ 
+ 		/*
+ 		 * wake_up_process() executes a full barrier, which pairs with
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 82b165bf48c423..1e3bc0774efd51 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -1264,6 +1264,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
+ 	if (task_has_dl_policy(p)) {
+ 		P(dl.runtime);
+ 		P(dl.deadline);
++	} else if (fair_policy(p->policy)) {
++		P(se.slice);
+ 	}
+ #ifdef CONFIG_SCHED_CLASS_EXT
+ 	__PS("ext.enabled", task_on_scx(p));
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 325fd5b9d47152..e5cab54dfdd142 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -6052,6 +6052,9 @@ __bpf_kfunc_start_defs();
+ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ 				       u64 wake_flags, bool *is_idle)
+ {
++	if (!ops_cpu_valid(prev_cpu, NULL))
++		goto prev_cpu;
++
+ 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ 		scx_ops_error("built-in idle tracking is disabled");
+ 		goto prev_cpu;
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index d116c28564f26c..db9c06bb23116a 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -156,11 +156,6 @@ static struct hrtimer_cpu_base migration_cpu_base = {
+ 
+ #define migration_base	migration_cpu_base.clock_base[0]
+ 
+-static inline bool is_migration_base(struct hrtimer_clock_base *base)
+-{
+-	return base == &migration_base;
+-}
+-
+ /*
+  * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
+  * means that all timers which are tied to this base via timer->base are
+@@ -312,11 +307,6 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
+ 
+ #else /* CONFIG_SMP */
+ 
+-static inline bool is_migration_base(struct hrtimer_clock_base *base)
+-{
+-	return false;
+-}
+-
+ static inline struct hrtimer_clock_base *
+ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
+ 	__acquires(&timer->base->cpu_base->lock)
+@@ -1441,6 +1431,18 @@ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
+ 	}
+ }
+ 
++#ifdef CONFIG_SMP
++static __always_inline bool is_migration_base(struct hrtimer_clock_base *base)
++{
++	return base == &migration_base;
++}
++#else
++static __always_inline bool is_migration_base(struct hrtimer_clock_base *base)
++{
++	return false;
++}
++#endif
++
+ /*
+  * This function is called on PREEMPT_RT kernels when the fast path
+  * deletion of a timer failed because the timer callback function was
+diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
+index 8800f5acc00717..2ef2e1b8009165 100644
+--- a/kernel/vhost_task.c
++++ b/kernel/vhost_task.c
+@@ -133,7 +133,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *),
+ 
+ 	vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL);
+ 	if (!vtsk)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 	init_completion(&vtsk->exited);
+ 	mutex_init(&vtsk->exit_mutex);
+ 	vtsk->data = arg;
+@@ -145,7 +145,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *),
+ 	tsk = copy_process(NULL, 0, NUMA_NO_NODE, &args);
+ 	if (IS_ERR(tsk)) {
+ 		kfree(vtsk);
+-		return NULL;
++		return ERR_PTR(PTR_ERR(tsk));
+ 	}
+ 
+ 	vtsk->task = tsk;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 1e9aa6de4e21ea..e28e820fdb7756 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2955,6 +2955,14 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
+ 	return ret;
+ }
+ 
++void wait_for_freed_hugetlb_folios(void)
++{
++	if (llist_empty(&hpage_freelist))
++		return;
++
++	flush_work(&free_hpage_work);
++}
++
+ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ 				    unsigned long addr, int avoid_reserve)
+ {
+diff --git a/mm/page_isolation.c b/mm/page_isolation.c
+index 7e04047977cfea..6989c5ffd47417 100644
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -611,6 +611,16 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
+ 	struct zone *zone;
+ 	int ret;
+ 
++	/*
++	 * Due to the deferred freeing of hugetlb folios, the hugepage folios may
++	 * not immediately release to the buddy system. This can cause PageBuddy()
++	 * to fail in __test_page_isolated_in_pageblock(). To ensure that the
++	 * hugetlb folios are properly released back to the buddy system, we
++	 * invoke the wait_for_freed_hugetlb_folios() function to wait for the
++	 * release to complete.
++	 */
++	wait_for_freed_hugetlb_folios();
++
+ 	/*
+ 	 * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free
+ 	 * pages are not aligned to pageblock_nr_pages.
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 66011831d7983d..080a00d916f6b6 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -18,6 +18,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/tlb.h>
+ #include "internal.h"
++#include "swap.h"
+ 
+ static __always_inline
+ bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
+@@ -1067,15 +1068,13 @@ static int move_present_pte(struct mm_struct *mm,
+ 	return err;
+ }
+ 
+-static int move_swap_pte(struct mm_struct *mm,
++static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
+ 			 unsigned long dst_addr, unsigned long src_addr,
+ 			 pte_t *dst_pte, pte_t *src_pte,
+ 			 pte_t orig_dst_pte, pte_t orig_src_pte,
+-			 spinlock_t *dst_ptl, spinlock_t *src_ptl)
++			 spinlock_t *dst_ptl, spinlock_t *src_ptl,
++			 struct folio *src_folio)
+ {
+-	if (!pte_swp_exclusive(orig_src_pte))
+-		return -EBUSY;
+-
+ 	double_pt_lock(dst_ptl, src_ptl);
+ 
+ 	if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
+@@ -1084,6 +1083,16 @@ static int move_swap_pte(struct mm_struct *mm,
+ 		return -EAGAIN;
+ 	}
+ 
++	/*
++	 * The src_folio resides in the swapcache, requiring an update to its
++	 * index and mapping to align with the dst_vma, where a swap-in may
++	 * occur and hit the swapcache after moving the PTE.
++	 */
++	if (src_folio) {
++		folio_move_anon_rmap(src_folio, dst_vma);
++		src_folio->index = linear_page_index(dst_vma, dst_addr);
++	}
++
+ 	orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
+ 	set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
+ 	double_pt_unlock(dst_ptl, src_ptl);
+@@ -1130,6 +1139,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 			  __u64 mode)
+ {
+ 	swp_entry_t entry;
++	struct swap_info_struct *si = NULL;
+ 	pte_t orig_src_pte, orig_dst_pte;
+ 	pte_t src_folio_pte;
+ 	spinlock_t *src_ptl, *dst_ptl;
+@@ -1255,8 +1265,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 			spin_unlock(src_ptl);
+ 
+ 			if (!locked) {
+-				pte_unmap(&orig_src_pte);
+-				pte_unmap(&orig_dst_pte);
++				pte_unmap(src_pte);
++				pte_unmap(dst_pte);
+ 				src_pte = dst_pte = NULL;
+ 				/* now we can block and wait */
+ 				folio_lock(src_folio);
+@@ -1272,8 +1282,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 		/* at this point we have src_folio locked */
+ 		if (folio_test_large(src_folio)) {
+ 			/* split_folio() can block */
+-			pte_unmap(&orig_src_pte);
+-			pte_unmap(&orig_dst_pte);
++			pte_unmap(src_pte);
++			pte_unmap(dst_pte);
+ 			src_pte = dst_pte = NULL;
+ 			err = split_folio(src_folio);
+ 			if (err)
+@@ -1298,8 +1308,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 				goto out;
+ 			}
+ 			if (!anon_vma_trylock_write(src_anon_vma)) {
+-				pte_unmap(&orig_src_pte);
+-				pte_unmap(&orig_dst_pte);
++				pte_unmap(src_pte);
++				pte_unmap(dst_pte);
+ 				src_pte = dst_pte = NULL;
+ 				/* now we can block and wait */
+ 				anon_vma_lock_write(src_anon_vma);
+@@ -1312,11 +1322,13 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 				       orig_dst_pte, orig_src_pte,
+ 				       dst_ptl, src_ptl, src_folio);
+ 	} else {
++		struct folio *folio = NULL;
++
+ 		entry = pte_to_swp_entry(orig_src_pte);
+ 		if (non_swap_entry(entry)) {
+ 			if (is_migration_entry(entry)) {
+-				pte_unmap(&orig_src_pte);
+-				pte_unmap(&orig_dst_pte);
++				pte_unmap(src_pte);
++				pte_unmap(dst_pte);
+ 				src_pte = dst_pte = NULL;
+ 				migration_entry_wait(mm, src_pmd, src_addr);
+ 				err = -EAGAIN;
+@@ -1325,10 +1337,53 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 			goto out;
+ 		}
+ 
+-		err = move_swap_pte(mm, dst_addr, src_addr,
+-				    dst_pte, src_pte,
+-				    orig_dst_pte, orig_src_pte,
+-				    dst_ptl, src_ptl);
++		if (!pte_swp_exclusive(orig_src_pte)) {
++			err = -EBUSY;
++			goto out;
++		}
++
++		si = get_swap_device(entry);
++		if (unlikely(!si)) {
++			err = -EAGAIN;
++			goto out;
++		}
++		/*
++		 * Verify the existence of the swapcache. If present, the folio's
++		 * index and mapping must be updated even when the PTE is a swap
++		 * entry. The anon_vma lock is not taken during this process since
++		 * the folio has already been unmapped, and the swap entry is
++		 * exclusive, preventing rmap walks.
++		 *
++		 * For large folios, return -EBUSY immediately, as split_folio()
++		 * also returns -EBUSY when attempting to split unmapped large
++		 * folios in the swapcache. This issue needs to be resolved
++		 * separately to allow proper handling.
++		 */
++		if (!src_folio)
++			folio = filemap_get_folio(swap_address_space(entry),
++					swap_cache_index(entry));
++		if (!IS_ERR_OR_NULL(folio)) {
++			if (folio_test_large(folio)) {
++				err = -EBUSY;
++				folio_put(folio);
++				goto out;
++			}
++			src_folio = folio;
++			src_folio_pte = orig_src_pte;
++			if (!folio_trylock(src_folio)) {
++				pte_unmap(src_pte);
++				pte_unmap(dst_pte);
++				src_pte = dst_pte = NULL;
++				put_swap_device(si);
++				si = NULL;
++				/* now we can block and wait */
++				folio_lock(src_folio);
++				goto retry;
++			}
++		}
++		err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte,
++				orig_dst_pte, orig_src_pte,
++				dst_ptl, src_ptl, src_folio);
+ 	}
+ 
+ out:
+@@ -1345,6 +1400,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 	if (src_pte)
+ 		pte_unmap(src_pte);
+ 	mmu_notifier_invalidate_range_end(&range);
++	if (si)
++		put_swap_device(si);
+ 
+ 	return err;
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index b5553c08e73162..72439764186ed2 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -57,6 +57,7 @@ DEFINE_RWLOCK(hci_dev_list_lock);
+ 
+ /* HCI callback list */
+ LIST_HEAD(hci_cb_list);
++DEFINE_MUTEX(hci_cb_list_lock);
+ 
+ /* HCI ID Numbering */
+ static DEFINE_IDA(hci_index_ida);
+@@ -2992,7 +2993,9 @@ int hci_register_cb(struct hci_cb *cb)
+ {
+ 	BT_DBG("%p name %s", cb, cb->name);
+ 
+-	list_add_tail_rcu(&cb->list, &hci_cb_list);
++	mutex_lock(&hci_cb_list_lock);
++	list_add_tail(&cb->list, &hci_cb_list);
++	mutex_unlock(&hci_cb_list_lock);
+ 
+ 	return 0;
+ }
+@@ -3002,8 +3005,9 @@ int hci_unregister_cb(struct hci_cb *cb)
+ {
+ 	BT_DBG("%p name %s", cb, cb->name);
+ 
+-	list_del_rcu(&cb->list);
+-	synchronize_rcu();
++	mutex_lock(&hci_cb_list_lock);
++	list_del(&cb->list);
++	mutex_unlock(&hci_cb_list_lock);
+ 
+ 	return 0;
+ }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 388d46c6a043d4..d64117be62cc44 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3393,23 +3393,30 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
+ 		hci_update_scan(hdev);
+ 	}
+ 
+-	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+-	if (params) {
+-		switch (params->auto_connect) {
+-		case HCI_AUTO_CONN_LINK_LOSS:
+-			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
++	/* Re-enable passive scanning if disconnected device is marked
++	 * as auto-connectable.
++	 */
++	if (conn->type == LE_LINK) {
++		params = hci_conn_params_lookup(hdev, &conn->dst,
++						conn->dst_type);
++		if (params) {
++			switch (params->auto_connect) {
++			case HCI_AUTO_CONN_LINK_LOSS:
++				if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
++					break;
++				fallthrough;
++
++			case HCI_AUTO_CONN_DIRECT:
++			case HCI_AUTO_CONN_ALWAYS:
++				hci_pend_le_list_del_init(params);
++				hci_pend_le_list_add(params,
++						     &hdev->pend_le_conns);
++				hci_update_passive_scan(hdev);
+ 				break;
+-			fallthrough;
+ 
+-		case HCI_AUTO_CONN_DIRECT:
+-		case HCI_AUTO_CONN_ALWAYS:
+-			hci_pend_le_list_del_init(params);
+-			hci_pend_le_list_add(params, &hdev->pend_le_conns);
+-			hci_update_passive_scan(hdev);
+-			break;
+-
+-		default:
+-			break;
++			default:
++				break;
++			}
+ 		}
+ 	}
+ 
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index bda2f2da7d7311..644b606743e212 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -2137,11 +2137,6 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 	return HCI_LM_ACCEPT;
+ }
+ 
+-static bool iso_match(struct hci_conn *hcon)
+-{
+-	return hcon->type == ISO_LINK || hcon->type == LE_LINK;
+-}
+-
+ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
+ {
+ 	if (hcon->type != ISO_LINK) {
+@@ -2323,7 +2318,6 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 
+ static struct hci_cb iso_cb = {
+ 	.name		= "ISO",
+-	.match		= iso_match,
+ 	.connect_cfm	= iso_connect_cfm,
+ 	.disconn_cfm	= iso_disconn_cfm,
+ };
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 728a5ce9b50587..c27ea70f71e1e1 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -119,7 +119,6 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+ {
+ 	struct l2cap_chan *c;
+ 
+-	mutex_lock(&conn->chan_lock);
+ 	c = __l2cap_get_chan_by_scid(conn, cid);
+ 	if (c) {
+ 		/* Only lock if chan reference is not 0 */
+@@ -127,7 +126,6 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+ 		if (c)
+ 			l2cap_chan_lock(c);
+ 	}
+-	mutex_unlock(&conn->chan_lock);
+ 
+ 	return c;
+ }
+@@ -140,7 +138,6 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+ {
+ 	struct l2cap_chan *c;
+ 
+-	mutex_lock(&conn->chan_lock);
+ 	c = __l2cap_get_chan_by_dcid(conn, cid);
+ 	if (c) {
+ 		/* Only lock if chan reference is not 0 */
+@@ -148,7 +145,6 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+ 		if (c)
+ 			l2cap_chan_lock(c);
+ 	}
+-	mutex_unlock(&conn->chan_lock);
+ 
+ 	return c;
+ }
+@@ -418,7 +414,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
+ 	if (!conn)
+ 		return;
+ 
+-	mutex_lock(&conn->chan_lock);
++	mutex_lock(&conn->lock);
+ 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
+ 	 * this work. No need to call l2cap_chan_hold(chan) here again.
+ 	 */
+@@ -439,7 +435,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
+ 	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ 
+-	mutex_unlock(&conn->chan_lock);
++	mutex_unlock(&conn->lock);
+ }
+ 
+ struct l2cap_chan *l2cap_chan_create(void)
+@@ -642,9 +638,9 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+ 
+ void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+ {
+-	mutex_lock(&conn->chan_lock);
++	mutex_lock(&conn->lock);
+ 	__l2cap_chan_add(conn, chan);
+-	mutex_unlock(&conn->chan_lock);
++	mutex_unlock(&conn->lock);
+ }
+ 
+ void l2cap_chan_del(struct l2cap_chan *chan, int err)
+@@ -732,9 +728,9 @@ void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
+ 	if (!conn)
+ 		return;
+ 
+-	mutex_lock(&conn->chan_lock);
++	mutex_lock(&conn->lock);
+ 	__l2cap_chan_list(conn, func, data);
+-	mutex_unlock(&conn->chan_lock);
++	mutex_unlock(&conn->lock);
+ }
+ 
+ EXPORT_SYMBOL_GPL(l2cap_chan_list);
+@@ -746,7 +742,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
+ 	struct hci_conn *hcon = conn->hcon;
+ 	struct l2cap_chan *chan;
+ 
+-	mutex_lock(&conn->chan_lock);
++	mutex_lock(&conn->lock);
+ 
+ 	list_for_each_entry(chan, &conn->chan_l, list) {
+ 		l2cap_chan_lock(chan);
+@@ -755,7 +751,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
+ 		l2cap_chan_unlock(chan);
+ 	}
+ 
+-	mutex_unlock(&conn->chan_lock);
++	mutex_unlock(&conn->lock);
+ }
+ 
+ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
+@@ -949,6 +945,16 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn)
+ 	return id;
+ }
+ 
++static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
++			   u8 flags)
++{
++	/* Check if the hcon still valid before attempting to send */
++	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
++		hci_send_acl(conn->hchan, skb, flags);
++	else
++		kfree_skb(skb);
++}
++
+ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ 			   void *data)
+ {
+@@ -971,7 +977,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+ 	skb->priority = HCI_PRIO_MAX;
+ 
+-	hci_send_acl(conn->hchan, skb, flags);
++	l2cap_send_acl(conn, skb, flags);
+ }
+ 
+ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+@@ -1498,8 +1504,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ 
+ 	BT_DBG("conn %p", conn);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+ 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+ 		l2cap_chan_lock(chan);
+ 
+@@ -1568,8 +1572,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ 
+ 		l2cap_chan_unlock(chan);
+ 	}
+-
+-	mutex_unlock(&conn->chan_lock);
+ }
+ 
+ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+@@ -1615,7 +1617,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
+ 	if (hcon->type == ACL_LINK)
+ 		l2cap_request_info(conn);
+ 
+-	mutex_lock(&conn->chan_lock);
++	mutex_lock(&conn->lock);
+ 
+ 	list_for_each_entry(chan, &conn->chan_l, list) {
+ 
+@@ -1633,7 +1635,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
+ 		l2cap_chan_unlock(chan);
+ 	}
+ 
+-	mutex_unlock(&conn->chan_lock);
++	mutex_unlock(&conn->lock);
+ 
+ 	if (hcon->type == LE_LINK)
+ 		l2cap_le_conn_ready(conn);
+@@ -1648,14 +1650,10 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
+ 
+ 	BT_DBG("conn %p", conn);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+ 	list_for_each_entry(chan, &conn->chan_l, list) {
+ 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
+ 			l2cap_chan_set_err(chan, err);
+ 	}
+-
+-	mutex_unlock(&conn->chan_lock);
+ }
+ 
+ static void l2cap_info_timeout(struct work_struct *work)
+@@ -1666,7 +1664,9 @@ static void l2cap_info_timeout(struct work_struct *work)
+ 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+ 	conn->info_ident = 0;
+ 
++	mutex_lock(&conn->lock);
+ 	l2cap_conn_start(conn);
++	mutex_unlock(&conn->lock);
+ }
+ 
+ /*
+@@ -1758,6 +1758,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
+ 
+ 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+ 
++	mutex_lock(&conn->lock);
++
+ 	kfree_skb(conn->rx_skb);
+ 
+ 	skb_queue_purge(&conn->pending_rx);
+@@ -1776,8 +1778,6 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
+ 	/* Force the connection to be immediately dropped */
+ 	hcon->disc_timeout = 0;
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+ 	/* Kill channels */
+ 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ 		l2cap_chan_hold(chan);
+@@ -1791,15 +1791,14 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
+ 		l2cap_chan_put(chan);
+ 	}
+ 
+-	mutex_unlock(&conn->chan_lock);
+-
+-	hci_chan_del(conn->hchan);
+-
+ 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+ 		cancel_delayed_work_sync(&conn->info_timer);
+ 
+-	hcon->l2cap_data = NULL;
++	hci_chan_del(conn->hchan);
+ 	conn->hchan = NULL;
++
++	hcon->l2cap_data = NULL;
++	mutex_unlock(&conn->lock);
+ 	l2cap_conn_put(conn);
+ }
+ 
+@@ -2917,8 +2916,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+ 
+ 	BT_DBG("conn %p", conn);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+ 	list_for_each_entry(chan, &conn->chan_l, list) {
+ 		if (chan->chan_type != L2CAP_CHAN_RAW)
+ 			continue;
+@@ -2933,8 +2930,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+ 		if (chan->ops->recv(chan, nskb))
+ 			kfree_skb(nskb);
+ 	}
+-
+-	mutex_unlock(&conn->chan_lock);
+ }
+ 
+ /* ---- L2CAP signalling commands ---- */
+@@ -3957,7 +3952,6 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ 		goto response;
+ 	}
+ 
+-	mutex_lock(&conn->chan_lock);
+ 	l2cap_chan_lock(pchan);
+ 
+ 	/* Check if the ACL is secure enough (if not SDP) */
+@@ -4064,7 +4058,6 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ 	}
+ 
+ 	l2cap_chan_unlock(pchan);
+-	mutex_unlock(&conn->chan_lock);
+ 	l2cap_chan_put(pchan);
+ }
+ 
+@@ -4103,27 +4096,19 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
+ 	       dcid, scid, result, status);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+ 	if (scid) {
+ 		chan = __l2cap_get_chan_by_scid(conn, scid);
+-		if (!chan) {
+-			err = -EBADSLT;
+-			goto unlock;
+-		}
++		if (!chan)
++			return -EBADSLT;
+ 	} else {
+ 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+-		if (!chan) {
+-			err = -EBADSLT;
+-			goto unlock;
+-		}
++		if (!chan)
++			return -EBADSLT;
+ 	}
+ 
+ 	chan = l2cap_chan_hold_unless_zero(chan);
+-	if (!chan) {
+-		err = -EBADSLT;
+-		goto unlock;
+-	}
++	if (!chan)
++		return -EBADSLT;
+ 
+ 	err = 0;
+ 
+@@ -4161,9 +4146,6 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ 
+-unlock:
+-	mutex_unlock(&conn->chan_lock);
+-
+ 	return err;
+ }
+ 
+@@ -4451,11 +4433,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ 
+ 	chan->ops->set_shutdown(chan);
+ 
+-	l2cap_chan_unlock(chan);
+-	mutex_lock(&conn->chan_lock);
+-	l2cap_chan_lock(chan);
+ 	l2cap_chan_del(chan, ECONNRESET);
+-	mutex_unlock(&conn->chan_lock);
+ 
+ 	chan->ops->close(chan);
+ 
+@@ -4492,11 +4470,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ 		return 0;
+ 	}
+ 
+-	l2cap_chan_unlock(chan);
+-	mutex_lock(&conn->chan_lock);
+-	l2cap_chan_lock(chan);
+ 	l2cap_chan_del(chan, 0);
+-	mutex_unlock(&conn->chan_lock);
+ 
+ 	chan->ops->close(chan);
+ 
+@@ -4694,13 +4668,9 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
+ 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
+ 	       dcid, mtu, mps, credits, result);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+ 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+-	if (!chan) {
+-		err = -EBADSLT;
+-		goto unlock;
+-	}
++	if (!chan)
++		return -EBADSLT;
+ 
+ 	err = 0;
+ 
+@@ -4748,9 +4718,6 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
+ 
+ 	l2cap_chan_unlock(chan);
+ 
+-unlock:
+-	mutex_unlock(&conn->chan_lock);
+-
+ 	return err;
+ }
+ 
+@@ -4862,7 +4829,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
+ 		goto response;
+ 	}
+ 
+-	mutex_lock(&conn->chan_lock);
+ 	l2cap_chan_lock(pchan);
+ 
+ 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
+@@ -4928,7 +4894,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
+ 
+ response_unlock:
+ 	l2cap_chan_unlock(pchan);
+-	mutex_unlock(&conn->chan_lock);
+ 	l2cap_chan_put(pchan);
+ 
+ 	if (result == L2CAP_CR_PEND)
+@@ -5062,7 +5027,6 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
+ 		goto response;
+ 	}
+ 
+-	mutex_lock(&conn->chan_lock);
+ 	l2cap_chan_lock(pchan);
+ 
+ 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
+@@ -5137,7 +5101,6 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
+ 
+ unlock:
+ 	l2cap_chan_unlock(pchan);
+-	mutex_unlock(&conn->chan_lock);
+ 	l2cap_chan_put(pchan);
+ 
+ response:
+@@ -5174,8 +5137,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
+ 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
+ 	       result);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+ 	cmd_len -= sizeof(*rsp);
+ 
+ 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+@@ -5261,8 +5222,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
+ 		l2cap_chan_unlock(chan);
+ 	}
+ 
+-	mutex_unlock(&conn->chan_lock);
+-
+ 	return err;
+ }
+ 
+@@ -5375,8 +5334,6 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
+ 	if (cmd_len < sizeof(*rej))
+ 		return -EPROTO;
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+ 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+ 	if (!chan)
+ 		goto done;
+@@ -5391,7 +5348,6 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
+ 	l2cap_chan_put(chan);
+ 
+ done:
+-	mutex_unlock(&conn->chan_lock);
+ 	return 0;
+ }
+ 
+@@ -6846,8 +6802,12 @@ static void process_pending_rx(struct work_struct *work)
+ 
+ 	BT_DBG("");
+ 
++	mutex_lock(&conn->lock);
++
+ 	while ((skb = skb_dequeue(&conn->pending_rx)))
+ 		l2cap_recv_frame(conn, skb);
++
++	mutex_unlock(&conn->lock);
+ }
+ 
+ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
+@@ -6886,7 +6846,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
+ 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
+ 
+ 	mutex_init(&conn->ident_lock);
+-	mutex_init(&conn->chan_lock);
++	mutex_init(&conn->lock);
+ 
+ 	INIT_LIST_HEAD(&conn->chan_l);
+ 	INIT_LIST_HEAD(&conn->users);
+@@ -7077,7 +7037,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ 		}
+ 	}
+ 
+-	mutex_lock(&conn->chan_lock);
++	mutex_lock(&conn->lock);
+ 	l2cap_chan_lock(chan);
+ 
+ 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
+@@ -7118,7 +7078,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ 
+ chan_unlock:
+ 	l2cap_chan_unlock(chan);
+-	mutex_unlock(&conn->chan_lock);
++	mutex_unlock(&conn->lock);
+ done:
+ 	hci_dev_unlock(hdev);
+ 	hci_dev_put(hdev);
+@@ -7222,11 +7182,6 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
+ 	return NULL;
+ }
+ 
+-static bool l2cap_match(struct hci_conn *hcon)
+-{
+-	return hcon->type == ACL_LINK || hcon->type == LE_LINK;
+-}
+-
+ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ {
+ 	struct hci_dev *hdev = hcon->hdev;
+@@ -7234,6 +7189,9 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ 	struct l2cap_chan *pchan;
+ 	u8 dst_type;
+ 
++	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
++		return;
++
+ 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+ 
+ 	if (status) {
+@@ -7298,6 +7256,9 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
+ 
+ static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+ {
++	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
++		return;
++
+ 	BT_DBG("hcon %p reason %d", hcon, reason);
+ 
+ 	l2cap_conn_del(hcon, bt_to_errno(reason));
+@@ -7330,7 +7291,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 
+ 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
+ 
+-	mutex_lock(&conn->chan_lock);
++	mutex_lock(&conn->lock);
+ 
+ 	list_for_each_entry(chan, &conn->chan_l, list) {
+ 		l2cap_chan_lock(chan);
+@@ -7404,7 +7365,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 		l2cap_chan_unlock(chan);
+ 	}
+ 
+-	mutex_unlock(&conn->chan_lock);
++	mutex_unlock(&conn->lock);
+ }
+ 
+ /* Append fragment into frame respecting the maximum len of rx_skb */
+@@ -7471,19 +7432,45 @@ static void l2cap_recv_reset(struct l2cap_conn *conn)
+ 	conn->rx_len = 0;
+ }
+ 
++struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
++{
++	if (!c)
++		return NULL;
++
++	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
++
++	if (!kref_get_unless_zero(&c->ref))
++		return NULL;
++
++	return c;
++}
++
+ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ {
+-	struct l2cap_conn *conn = hcon->l2cap_data;
++	struct l2cap_conn *conn;
+ 	int len;
+ 
++	/* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
++	hci_dev_lock(hcon->hdev);
++
++	conn = hcon->l2cap_data;
++
+ 	if (!conn)
+ 		conn = l2cap_conn_add(hcon);
+ 
+-	if (!conn)
+-		goto drop;
++	conn = l2cap_conn_hold_unless_zero(conn);
++
++	hci_dev_unlock(hcon->hdev);
++
++	if (!conn) {
++		kfree_skb(skb);
++		return;
++	}
+ 
+ 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
+ 
++	mutex_lock(&conn->lock);
++
+ 	switch (flags) {
+ 	case ACL_START:
+ 	case ACL_START_NO_FLUSH:
+@@ -7508,7 +7495,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 		if (len == skb->len) {
+ 			/* Complete frame received */
+ 			l2cap_recv_frame(conn, skb);
+-			return;
++			goto unlock;
+ 		}
+ 
+ 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
+@@ -7572,11 +7559,13 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 
+ drop:
+ 	kfree_skb(skb);
++unlock:
++	mutex_unlock(&conn->lock);
++	l2cap_conn_put(conn);
+ }
+ 
+ static struct hci_cb l2cap_cb = {
+ 	.name		= "L2CAP",
+-	.match		= l2cap_match,
+ 	.connect_cfm	= l2cap_connect_cfm,
+ 	.disconn_cfm	= l2cap_disconn_cfm,
+ 	.security_cfm	= l2cap_security_cfm,
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 46ea0bee2259f8..acd11b268b98ad 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1326,9 +1326,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
+ 	/* prevent sk structure from being freed whilst unlocked */
+ 	sock_hold(sk);
+ 
+-	chan = l2cap_pi(sk)->chan;
+ 	/* prevent chan structure from being freed whilst unlocked */
+-	l2cap_chan_hold(chan);
++	chan = l2cap_chan_hold_unless_zero(l2cap_pi(sk)->chan);
++	if (!chan)
++		goto shutdown_already;
+ 
+ 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+ 
+@@ -1358,22 +1359,20 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
+ 	release_sock(sk);
+ 
+ 	l2cap_chan_lock(chan);
+-	conn = chan->conn;
+-	if (conn)
+-		/* prevent conn structure from being freed */
+-		l2cap_conn_get(conn);
++	/* prevent conn structure from being freed */
++	conn = l2cap_conn_hold_unless_zero(chan->conn);
+ 	l2cap_chan_unlock(chan);
+ 
+ 	if (conn)
+ 		/* mutex lock must be taken before l2cap_chan_lock() */
+-		mutex_lock(&conn->chan_lock);
++		mutex_lock(&conn->lock);
+ 
+ 	l2cap_chan_lock(chan);
+ 	l2cap_chan_close(chan, 0);
+ 	l2cap_chan_unlock(chan);
+ 
+ 	if (conn) {
+-		mutex_unlock(&conn->chan_lock);
++		mutex_unlock(&conn->lock);
+ 		l2cap_conn_put(conn);
+ 	}
+ 
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index 4c56ca5a216c6f..ad5177e3a69b77 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -2134,11 +2134,6 @@ static int rfcomm_run(void *unused)
+ 	return 0;
+ }
+ 
+-static bool rfcomm_match(struct hci_conn *hcon)
+-{
+-	return hcon->type == ACL_LINK;
+-}
+-
+ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+ {
+ 	struct rfcomm_session *s;
+@@ -2185,7 +2180,6 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+ 
+ static struct hci_cb rfcomm_cb = {
+ 	.name		= "RFCOMM",
+-	.match		= rfcomm_match,
+ 	.security_cfm	= rfcomm_security_cfm
+ };
+ 
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 071c404c790af9..b872a2ca3ff38b 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -1355,13 +1355,11 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 	return lm;
+ }
+ 
+-static bool sco_match(struct hci_conn *hcon)
+-{
+-	return hcon->type == SCO_LINK || hcon->type == ESCO_LINK;
+-}
+-
+ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+ {
++	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
++		return;
++
+ 	BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status);
+ 
+ 	if (!status) {
+@@ -1376,6 +1374,9 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+ 
+ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+ {
++	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
++		return;
++
+ 	BT_DBG("hcon %p reason %d", hcon, reason);
+ 
+ 	sco_conn_del(hcon, bt_to_errno(reason));
+@@ -1401,7 +1402,6 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+ 
+ static struct hci_cb sco_cb = {
+ 	.name		= "SCO",
+-	.match		= sco_match,
+ 	.connect_cfm	= sco_connect_cfm,
+ 	.disconn_cfm	= sco_disconn_cfm,
+ };
+diff --git a/net/core/dev.c b/net/core/dev.c
+index c761f862bc5a2d..7b7b36c43c82cc 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3723,6 +3723,9 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
+ {
+ 	netdev_features_t features;
+ 
++	if (!skb_frags_readable(skb))
++		goto out_kfree_skb;
++
+ 	features = netif_skb_features(skb);
+ 	skb = validate_xmit_vlan(skb, features);
+ 	if (unlikely(!skb))
+@@ -4608,7 +4611,7 @@ static inline void ____napi_schedule(struct softnet_data *sd,
+ 	 * we have to raise NET_RX_SOFTIRQ.
+ 	 */
+ 	if (!sd->in_net_rx_action)
+-		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
++		raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ }
+ 
+ #ifdef CONFIG_RPS
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 45fb60bc480395..e95c2933756df9 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -319,6 +319,7 @@ static int netpoll_owner_active(struct net_device *dev)
+ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+ {
+ 	netdev_tx_t status = NETDEV_TX_BUSY;
++	netdev_tx_t ret = NET_XMIT_DROP;
+ 	struct net_device *dev;
+ 	unsigned long tries;
+ 	/* It is up to the caller to keep npinfo alive. */
+@@ -327,11 +328,12 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+ 	lockdep_assert_irqs_disabled();
+ 
+ 	dev = np->dev;
++	rcu_read_lock();
+ 	npinfo = rcu_dereference_bh(dev->npinfo);
+ 
+ 	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
+ 		dev_kfree_skb_irq(skb);
+-		return NET_XMIT_DROP;
++		goto out;
+ 	}
+ 
+ 	/* don't get messages out of order, and no recursion */
+@@ -370,7 +372,10 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+ 		skb_queue_tail(&npinfo->txq, skb);
+ 		schedule_delayed_work(&npinfo->tx_work,0);
+ 	}
+-	return NETDEV_TX_OK;
++	ret = NETDEV_TX_OK;
++out:
++	rcu_read_unlock();
++	return ret;
+ }
+ 
+ netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f7c17388ff6aaf..26cdb665747573 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3237,16 +3237,13 @@ static void add_v4_addrs(struct inet6_dev *idev)
+ 	struct in6_addr addr;
+ 	struct net_device *dev;
+ 	struct net *net = dev_net(idev->dev);
+-	int scope, plen, offset = 0;
++	int scope, plen;
+ 	u32 pflags = 0;
+ 
+ 	ASSERT_RTNL();
+ 
+ 	memset(&addr, 0, sizeof(struct in6_addr));
+-	/* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
+-	if (idev->dev->addr_len == sizeof(struct in6_addr))
+-		offset = sizeof(struct in6_addr) - 4;
+-	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
++	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
+ 
+ 	if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
+ 		scope = IPV6_ADDR_COMPATv4;
+@@ -3557,7 +3554,13 @@ static void addrconf_gre_config(struct net_device *dev)
+ 		return;
+ 	}
+ 
+-	if (dev->type == ARPHRD_ETHER) {
++	/* Generate the IPv6 link-local address using addrconf_addr_gen(),
++	 * unless we have an IPv4 GRE device not bound to an IP address and
++	 * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
++	 * case). Such devices fall back to add_v4_addrs() instead.
++	 */
++	if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
++	      idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
+ 		addrconf_addr_gen(idev, true);
+ 		return;
+ 	}
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 38c30e4ddda98c..2b6e8e7307ee5e 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -6,7 +6,7 @@
+  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright (C) 2015-2017	Intel Deutschland GmbH
+- * Copyright (C) 2018-2024 Intel Corporation
++ * Copyright (C) 2018-2025 Intel Corporation
+  *
+  * utilities for mac80211
+  */
+@@ -2184,8 +2184,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ 		ieee80211_reconfig_roc(local);
+ 
+ 		/* Requeue all works */
+-		list_for_each_entry(sdata, &local->interfaces, list)
+-			wiphy_work_queue(local->hw.wiphy, &sdata->work);
++		list_for_each_entry(sdata, &local->interfaces, list) {
++			if (ieee80211_sdata_running(sdata))
++				wiphy_work_queue(local->hw.wiphy, &sdata->work);
++		}
+ 	}
+ 
+ 	ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 3f2bd65ff5e3c9..4c460160914f01 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -332,8 +332,14 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
+ 		& MCTP_HDR_SEQ_MASK;
+ 
+ 	if (!key->reasm_head) {
+-		key->reasm_head = skb;
+-		key->reasm_tailp = &(skb_shinfo(skb)->frag_list);
++		/* Since we're manipulating the shared frag_list, ensure it isn't
++		 * shared with any other SKBs.
++		 */
++		key->reasm_head = skb_unshare(skb, GFP_ATOMIC);
++		if (!key->reasm_head)
++			return -ENOMEM;
++
++		key->reasm_tailp = &(skb_shinfo(key->reasm_head)->frag_list);
+ 		key->last_seq = this_seq;
+ 		return 0;
+ 	}
+diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
+index 17165b86ce22d4..06c1897b685a8b 100644
+--- a/net/mctp/test/route-test.c
++++ b/net/mctp/test/route-test.c
+@@ -921,6 +921,114 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
+ 	__mctp_route_test_fini(test, dev, rt, sock);
+ }
+ 
++/* Input route to socket, using a fragmented message created from clones.
++ */
++static void mctp_test_route_input_cloned_frag(struct kunit *test)
++{
++	/* 5 packet fragments, forming 2 complete messages */
++	const struct mctp_hdr hdrs[5] = {
++		RX_FRAG(FL_S, 0),
++		RX_FRAG(0, 1),
++		RX_FRAG(FL_E, 2),
++		RX_FRAG(FL_S, 0),
++		RX_FRAG(FL_E, 1),
++	};
++	struct mctp_test_route *rt;
++	struct mctp_test_dev *dev;
++	struct sk_buff *skb[5];
++	struct sk_buff *rx_skb;
++	struct socket *sock;
++	size_t data_len;
++	u8 compare[100];
++	u8 flat[100];
++	size_t total;
++	void *p;
++	int rc;
++
++	/* Arbitrary length */
++	data_len = 3;
++	total = data_len + sizeof(struct mctp_hdr);
++
++	__mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
++
++	/* Create a single skb initially with concatenated packets */
++	skb[0] = mctp_test_create_skb(&hdrs[0], 5 * total);
++	mctp_test_skb_set_dev(skb[0], dev);
++	memset(skb[0]->data, 0 * 0x11, skb[0]->len);
++	memcpy(skb[0]->data, &hdrs[0], sizeof(struct mctp_hdr));
++
++	/* Extract and populate packets */
++	for (int i = 1; i < 5; i++) {
++		skb[i] = skb_clone(skb[i - 1], GFP_ATOMIC);
++		KUNIT_ASSERT_TRUE(test, skb[i]);
++		p = skb_pull(skb[i], total);
++		KUNIT_ASSERT_TRUE(test, p);
++		skb_reset_network_header(skb[i]);
++		memcpy(skb[i]->data, &hdrs[i], sizeof(struct mctp_hdr));
++		memset(&skb[i]->data[sizeof(struct mctp_hdr)], i * 0x11, data_len);
++	}
++	for (int i = 0; i < 5; i++)
++		skb_trim(skb[i], total);
++
++	/* SOM packets have a type byte to match the socket */
++	skb[0]->data[4] = 0;
++	skb[3]->data[4] = 0;
++
++	skb_dump("pkt1 ", skb[0], false);
++	skb_dump("pkt2 ", skb[1], false);
++	skb_dump("pkt3 ", skb[2], false);
++	skb_dump("pkt4 ", skb[3], false);
++	skb_dump("pkt5 ", skb[4], false);
++
++	for (int i = 0; i < 5; i++) {
++		KUNIT_EXPECT_EQ(test, refcount_read(&skb[i]->users), 1);
++		/* Take a reference so we can check refcounts at the end */
++		skb_get(skb[i]);
++	}
++
++	/* Feed the fragments into MCTP core */
++	for (int i = 0; i < 5; i++) {
++		rc = mctp_route_input(&rt->rt, skb[i]);
++		KUNIT_EXPECT_EQ(test, rc, 0);
++	}
++
++	/* Receive first reassembled message */
++	rx_skb = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
++	KUNIT_EXPECT_EQ(test, rc, 0);
++	KUNIT_EXPECT_EQ(test, rx_skb->len, 3 * data_len);
++	rc = skb_copy_bits(rx_skb, 0, flat, rx_skb->len);
++	for (int i = 0; i < rx_skb->len; i++)
++		compare[i] = (i / data_len) * 0x11;
++	/* Set type byte */
++	compare[0] = 0;
++
++	KUNIT_EXPECT_MEMEQ(test, flat, compare, rx_skb->len);
++	KUNIT_EXPECT_EQ(test, refcount_read(&rx_skb->users), 1);
++	kfree_skb(rx_skb);
++
++	/* Receive second reassembled message */
++	rx_skb = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
++	KUNIT_EXPECT_EQ(test, rc, 0);
++	KUNIT_EXPECT_EQ(test, rx_skb->len, 2 * data_len);
++	rc = skb_copy_bits(rx_skb, 0, flat, rx_skb->len);
++	for (int i = 0; i < rx_skb->len; i++)
++		compare[i] = (i / data_len + 3) * 0x11;
++	/* Set type byte */
++	compare[0] = 0;
++
++	KUNIT_EXPECT_MEMEQ(test, flat, compare, rx_skb->len);
++	KUNIT_EXPECT_EQ(test, refcount_read(&rx_skb->users), 1);
++	kfree_skb(rx_skb);
++
++	/* Check input skb refcounts */
++	for (int i = 0; i < 5; i++) {
++		KUNIT_EXPECT_EQ(test, refcount_read(&skb[i]->users), 1);
++		kfree_skb(skb[i]);
++	}
++
++	__mctp_route_test_fini(test, dev, rt, sock);
++}
++
+ #if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ 
+ static void mctp_test_flow_init(struct kunit *test,
+@@ -1144,6 +1252,7 @@ static struct kunit_case mctp_test_cases[] = {
+ 	KUNIT_CASE(mctp_test_packet_flow),
+ 	KUNIT_CASE(mctp_test_fragment_flow),
+ 	KUNIT_CASE(mctp_test_route_output_key_create),
++	KUNIT_CASE(mctp_test_route_input_cloned_frag),
+ 	{}
+ };
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index b70a303e082878..7e2f70f22b05b6 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -1194,6 +1194,8 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
+ 		pr_debug("TCP fallback already done (msk=%p)\n", msk);
+ 		return;
+ 	}
++	if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback)))
++		return;
+ 	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index dc6ddc4abbe213..3224f6e17e7361 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -3091,12 +3091,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+ 	case IP_VS_SO_GET_SERVICES:
+ 	{
+ 		struct ip_vs_get_services *get;
+-		int size;
++		size_t size;
+ 
+ 		get = (struct ip_vs_get_services *)arg;
+ 		size = struct_size(get, entrytable, get->num_services);
+ 		if (*len != size) {
+-			pr_err("length: %u != %u\n", *len, size);
++			pr_err("length: %u != %zu\n", *len, size);
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+@@ -3132,12 +3132,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+ 	case IP_VS_SO_GET_DESTS:
+ 	{
+ 		struct ip_vs_get_dests *get;
+-		int size;
++		size_t size;
+ 
+ 		get = (struct ip_vs_get_dests *)arg;
+ 		size = struct_size(get, entrytable, get->num_dests);
+ 		if (*len != size) {
+-			pr_err("length: %u != %u\n", *len, size);
++			pr_err("length: %u != %zu\n", *len, size);
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
+index 4890af4dc263fd..913ede2f57f9a9 100644
+--- a/net/netfilter/nf_conncount.c
++++ b/net/netfilter/nf_conncount.c
+@@ -132,7 +132,7 @@ static int __nf_conncount_add(struct net *net,
+ 	struct nf_conn *found_ct;
+ 	unsigned int collect = 0;
+ 
+-	if (time_is_after_eq_jiffies((unsigned long)list->last_gc))
++	if ((u32)jiffies == list->last_gc)
+ 		goto add_new_node;
+ 
+ 	/* check the saved connections */
+@@ -234,7 +234,7 @@ bool nf_conncount_gc_list(struct net *net,
+ 	bool ret = false;
+ 
+ 	/* don't bother if we just did GC */
+-	if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc)))
++	if ((u32)jiffies == READ_ONCE(list->last_gc))
+ 		return false;
+ 
+ 	/* don't bother if other cpu is already doing GC */
+@@ -377,6 +377,8 @@ insert_tree(struct net *net,
+ 
+ 	conn->tuple = *tuple;
+ 	conn->zone = *zone;
++	conn->cpu = raw_smp_processor_id();
++	conn->jiffies32 = (u32)jiffies;
+ 	memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
+ 
+ 	nf_conncount_list_init(&rbconn->list);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 939510247ef5a6..eb3a6f96b094db 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -31,7 +31,6 @@ unsigned int nf_tables_net_id __read_mostly;
+ static LIST_HEAD(nf_tables_expressions);
+ static LIST_HEAD(nf_tables_objects);
+ static LIST_HEAD(nf_tables_flowtables);
+-static LIST_HEAD(nf_tables_destroy_list);
+ static LIST_HEAD(nf_tables_gc_list);
+ static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
+ static DEFINE_SPINLOCK(nf_tables_gc_list_lock);
+@@ -122,7 +121,6 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s
+ 	table->validate_state = new_validate_state;
+ }
+ static void nf_tables_trans_destroy_work(struct work_struct *w);
+-static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work);
+ 
+ static void nft_trans_gc_work(struct work_struct *work);
+ static DECLARE_WORK(trans_gc_work, nft_trans_gc_work);
+@@ -9748,11 +9746,12 @@ static void nft_commit_release(struct nft_trans *trans)
+ 
+ static void nf_tables_trans_destroy_work(struct work_struct *w)
+ {
++	struct nftables_pernet *nft_net = container_of(w, struct nftables_pernet, destroy_work);
+ 	struct nft_trans *trans, *next;
+ 	LIST_HEAD(head);
+ 
+ 	spin_lock(&nf_tables_destroy_list_lock);
+-	list_splice_init(&nf_tables_destroy_list, &head);
++	list_splice_init(&nft_net->destroy_list, &head);
+ 	spin_unlock(&nf_tables_destroy_list_lock);
+ 
+ 	if (list_empty(&head))
+@@ -9766,9 +9765,11 @@ static void nf_tables_trans_destroy_work(struct work_struct *w)
+ 	}
+ }
+ 
+-void nf_tables_trans_destroy_flush_work(void)
++void nf_tables_trans_destroy_flush_work(struct net *net)
+ {
+-	flush_work(&trans_destroy_work);
++	struct nftables_pernet *nft_net = nft_pernet(net);
++
++	flush_work(&nft_net->destroy_work);
+ }
+ EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
+ 
+@@ -10226,11 +10227,11 @@ static void nf_tables_commit_release(struct net *net)
+ 
+ 	trans->put_net = true;
+ 	spin_lock(&nf_tables_destroy_list_lock);
+-	list_splice_tail_init(&nft_net->commit_list, &nf_tables_destroy_list);
++	list_splice_tail_init(&nft_net->commit_list, &nft_net->destroy_list);
+ 	spin_unlock(&nf_tables_destroy_list_lock);
+ 
+ 	nf_tables_module_autoload_cleanup(net);
+-	schedule_work(&trans_destroy_work);
++	schedule_work(&nft_net->destroy_work);
+ 
+ 	mutex_unlock(&nft_net->commit_mutex);
+ }
+@@ -11653,7 +11654,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ 
+ 	gc_seq = nft_gc_seq_begin(nft_net);
+ 
+-	nf_tables_trans_destroy_flush_work();
++	nf_tables_trans_destroy_flush_work(net);
+ again:
+ 	list_for_each_entry(table, &nft_net->tables, list) {
+ 		if (nft_table_has_owner(table) &&
+@@ -11695,6 +11696,7 @@ static int __net_init nf_tables_init_net(struct net *net)
+ 
+ 	INIT_LIST_HEAD(&nft_net->tables);
+ 	INIT_LIST_HEAD(&nft_net->commit_list);
++	INIT_LIST_HEAD(&nft_net->destroy_list);
+ 	INIT_LIST_HEAD(&nft_net->commit_set_list);
+ 	INIT_LIST_HEAD(&nft_net->binding_list);
+ 	INIT_LIST_HEAD(&nft_net->module_list);
+@@ -11703,6 +11705,7 @@ static int __net_init nf_tables_init_net(struct net *net)
+ 	nft_net->base_seq = 1;
+ 	nft_net->gc_seq = 0;
+ 	nft_net->validate_state = NFT_VALIDATE_SKIP;
++	INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work);
+ 
+ 	return 0;
+ }
+@@ -11731,14 +11734,17 @@ static void __net_exit nf_tables_exit_net(struct net *net)
+ 	if (!list_empty(&nft_net->module_list))
+ 		nf_tables_module_autoload_cleanup(net);
+ 
++	cancel_work_sync(&nft_net->destroy_work);
+ 	__nft_release_tables(net);
+ 
+ 	nft_gc_seq_end(nft_net, gc_seq);
+ 
+ 	mutex_unlock(&nft_net->commit_mutex);
++
+ 	WARN_ON_ONCE(!list_empty(&nft_net->tables));
+ 	WARN_ON_ONCE(!list_empty(&nft_net->module_list));
+ 	WARN_ON_ONCE(!list_empty(&nft_net->notify_list));
++	WARN_ON_ONCE(!list_empty(&nft_net->destroy_list));
+ }
+ 
+ static void nf_tables_exit_batch(struct list_head *net_exit_list)
+@@ -11829,10 +11835,8 @@ static void __exit nf_tables_module_exit(void)
+ 	unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
+ 	nft_chain_filter_fini();
+ 	nft_chain_route_fini();
+-	nf_tables_trans_destroy_flush_work();
+ 	unregister_pernet_subsys(&nf_tables_net_ops);
+ 	cancel_work_sync(&trans_gc_work);
+-	cancel_work_sync(&trans_destroy_work);
+ 	rcu_barrier();
+ 	rhltable_destroy(&nft_objname_ht);
+ 	nf_tables_core_module_exit();
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 7ca4f0d21fe2a2..72711d62fddfa4 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -228,7 +228,7 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
+ 	return 0;
+ }
+ 
+-static void nft_compat_wait_for_destructors(void)
++static void nft_compat_wait_for_destructors(struct net *net)
+ {
+ 	/* xtables matches or targets can have side effects, e.g.
+ 	 * creation/destruction of /proc files.
+@@ -236,7 +236,7 @@ static void nft_compat_wait_for_destructors(void)
+ 	 * work queue.  If we have pending invocations we thus
+ 	 * need to wait for those to finish.
+ 	 */
+-	nf_tables_trans_destroy_flush_work();
++	nf_tables_trans_destroy_flush_work(net);
+ }
+ 
+ static int
+@@ -262,7 +262,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 
+ 	nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
+ 
+-	nft_compat_wait_for_destructors();
++	nft_compat_wait_for_destructors(ctx->net);
+ 
+ 	ret = xt_check_target(&par, size, proto, inv);
+ 	if (ret < 0) {
+@@ -515,7 +515,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 
+ 	nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
+ 
+-	nft_compat_wait_for_destructors();
++	nft_compat_wait_for_destructors(ctx->net);
+ 
+ 	return xt_check_match(&par, size, proto, inv);
+ }
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 67a41cd2baaff0..a1b373b99f7b84 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -230,6 +230,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
+ 	enum ip_conntrack_info ctinfo;
+ 	u16 value = nft_reg_load16(&regs->data[priv->sreg]);
+ 	struct nf_conn *ct;
++	int oldcnt;
+ 
+ 	ct = nf_ct_get(skb, &ctinfo);
+ 	if (ct) /* already tracked */
+@@ -250,10 +251,11 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
+ 
+ 	ct = this_cpu_read(nft_ct_pcpu_template);
+ 
+-	if (likely(refcount_read(&ct->ct_general.use) == 1)) {
+-		refcount_inc(&ct->ct_general.use);
++	__refcount_inc(&ct->ct_general.use, &oldcnt);
++	if (likely(oldcnt == 1)) {
+ 		nf_ct_zone_add(ct, &zone);
+ 	} else {
++		refcount_dec(&ct->ct_general.use);
+ 		/* previous skb got queued to userspace, allocate temporary
+ 		 * one until percpu template can be reused.
+ 		 */
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index b8d03364566c1f..c74012c9912554 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -85,7 +85,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb,
+ 	unsigned char optbuf[sizeof(struct ip_options) + 40];
+ 	struct ip_options *opt = (struct ip_options *)optbuf;
+ 	struct iphdr *iph, _iph;
+-	unsigned int start;
+ 	bool found = false;
+ 	__be32 info;
+ 	int optlen;
+@@ -93,7 +92,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb,
+ 	iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+ 	if (!iph)
+ 		return -EBADMSG;
+-	start = sizeof(struct iphdr);
+ 
+ 	optlen = iph->ihl * 4 - (int)sizeof(struct iphdr);
+ 	if (optlen <= 0)
+@@ -103,7 +101,7 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb,
+ 	/* Copy the options since __ip_options_compile() modifies
+ 	 * the options.
+ 	 */
+-	if (skb_copy_bits(skb, start, opt->__data, optlen))
++	if (skb_copy_bits(skb, sizeof(struct iphdr), opt->__data, optlen))
+ 		return -EBADMSG;
+ 	opt->optlen = optlen;
+ 
+@@ -118,18 +116,18 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb,
+ 		found = target == IPOPT_SSRR ? opt->is_strictroute :
+ 					       !opt->is_strictroute;
+ 		if (found)
+-			*offset = opt->srr + start;
++			*offset = opt->srr;
+ 		break;
+ 	case IPOPT_RR:
+ 		if (!opt->rr)
+ 			break;
+-		*offset = opt->rr + start;
++		*offset = opt->rr;
+ 		found = true;
+ 		break;
+ 	case IPOPT_RA:
+ 		if (!opt->router_alert)
+ 			break;
+-		*offset = opt->router_alert + start;
++		*offset = opt->router_alert;
+ 		found = true;
+ 		break;
+ 	default:
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 3bb4810234aac2..e573e92213029c 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1368,8 +1368,11 @@ bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
+ 	    attr == OVS_KEY_ATTR_CT_MARK)
+ 		return true;
+ 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+-	    attr == OVS_KEY_ATTR_CT_LABELS)
+-		return true;
++	    attr == OVS_KEY_ATTR_CT_LABELS) {
++		struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
++
++		return ovs_net->xt_label;
++	}
+ 
+ 	return false;
+ }
+@@ -1378,7 +1381,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+ 		       const struct sw_flow_key *key,
+ 		       struct sw_flow_actions **sfa,  bool log)
+ {
+-	unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
+ 	struct ovs_conntrack_info ct_info;
+ 	const char *helper = NULL;
+ 	u16 family;
+@@ -1407,12 +1409,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (nf_connlabels_get(net, n_bits - 1)) {
+-		nf_ct_tmpl_free(ct_info.ct);
+-		OVS_NLERR(log, "Failed to set connlabel length");
+-		return -EOPNOTSUPP;
+-	}
+-
+ 	if (ct_info.timeout[0]) {
+ 		if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
+ 				      ct_info.timeout))
+@@ -1581,7 +1577,6 @@ static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
+ 	if (ct_info->ct) {
+ 		if (ct_info->timeout[0])
+ 			nf_ct_destroy_timeout(ct_info->ct);
+-		nf_connlabels_put(nf_ct_net(ct_info->ct));
+ 		nf_ct_tmpl_free(ct_info->ct);
+ 	}
+ }
+@@ -2006,9 +2001,17 @@ struct genl_family dp_ct_limit_genl_family __ro_after_init = {
+ 
+ int ovs_ct_init(struct net *net)
+ {
+-#if	IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
++	unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
+ 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+ 
++	if (nf_connlabels_get(net, n_bits - 1)) {
++		ovs_net->xt_label = false;
++		OVS_NLERR(true, "Failed to set connlabel length");
++	} else {
++		ovs_net->xt_label = true;
++	}
++
++#if	IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
+ 	return ovs_ct_limit_init(net, ovs_net);
+ #else
+ 	return 0;
+@@ -2017,9 +2020,12 @@ int ovs_ct_init(struct net *net)
+ 
+ void ovs_ct_exit(struct net *net)
+ {
+-#if	IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
+ 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+ 
++#if	IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
+ 	ovs_ct_limit_exit(net, ovs_net);
+ #endif
++
++	if (ovs_net->xt_label)
++		nf_connlabels_put(net);
+ }
+diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
+index 365b9bb7f546e8..9ca6231ea64703 100644
+--- a/net/openvswitch/datapath.h
++++ b/net/openvswitch/datapath.h
+@@ -160,6 +160,9 @@ struct ovs_net {
+ #if	IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
+ 	struct ovs_ct_limit_info *ct_limit_info;
+ #endif
++
++	/* Module reference for configuring conntrack. */
++	bool xt_label;
+ };
+ 
+ /**
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 729ef582a3a8b8..0df89240b73361 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2317,14 +2317,10 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
+ 				OVS_FLOW_ATTR_MASK, true, skb);
+ }
+ 
+-#define MAX_ACTIONS_BUFSIZE	(32 * 1024)
+-
+ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
+ {
+ 	struct sw_flow_actions *sfa;
+ 
+-	WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
+-
+ 	sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL);
+ 	if (!sfa)
+ 		return ERR_PTR(-ENOMEM);
+@@ -2480,15 +2476,6 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
+ 
+ 	new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
+ 
+-	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+-		if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
+-			OVS_NLERR(log, "Flow action size exceeds max %u",
+-				  MAX_ACTIONS_BUFSIZE);
+-			return ERR_PTR(-EMSGSIZE);
+-		}
+-		new_acts_size = MAX_ACTIONS_BUFSIZE;
+-	}
+-
+ 	acts = nla_alloc_flow_actions(new_acts_size);
+ 	if (IS_ERR(acts))
+ 		return ERR_CAST(acts);
+@@ -3545,7 +3532,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 	int err;
+ 	u32 mpls_label_count = 0;
+ 
+-	*sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
++	*sfa = nla_alloc_flow_actions(nla_len(attr));
+ 	if (IS_ERR(*sfa))
+ 		return PTR_ERR(*sfa);
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index d26ac6bd9b1080..518f52f65a49d7 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -2254,6 +2254,12 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	/* Prevent creation of traffic classes with classid TC_H_ROOT */
++	if (clid == TC_H_ROOT) {
++		NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT");
++		return -EINVAL;
++	}
++
+ 	new_cl = cl;
+ 	err = -EOPNOTSUPP;
+ 	if (cops->change)
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index 79ba9dc702541e..43b0343a7cd0ca 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -913,7 +913,8 @@ static void gred_destroy(struct Qdisc *sch)
+ 	for (i = 0; i < table->DPs; i++)
+ 		gred_destroy_vq(table->tab[i]);
+ 
+-	gred_offload(sch, TC_GRED_DESTROY);
++	if (table->opt)
++		gred_offload(sch, TC_GRED_DESTROY);
+ 	kfree(table->opt);
+ }
+ 
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index c241cc552e8d58..bfcff6d6a43866 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -735,7 +735,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
+ 	 *     value SHOULD be the smallest TSN not acknowledged by the
+ 	 *     receiver of the request plus 2^31.
+ 	 */
+-	init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
++	init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1U << 31);
+ 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ 			 init_tsn, GFP_ATOMIC);
+ 
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index 6488ead9e46459..4d5fbacef496fd 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -472,7 +472,7 @@ bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
+ EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
+ 
+ static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
+-static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
++static RAW_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
+ 
+ /**
+  *	register_switchdev_notifier - Register notifier
+@@ -518,17 +518,27 @@ EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
+ 
+ int register_switchdev_blocking_notifier(struct notifier_block *nb)
+ {
+-	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
++	struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
++	int err;
++
++	rtnl_lock();
++	err = raw_notifier_chain_register(chain, nb);
++	rtnl_unlock();
+ 
+-	return blocking_notifier_chain_register(chain, nb);
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
+ 
+ int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
+ {
+-	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
++	struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
++	int err;
+ 
+-	return blocking_notifier_chain_unregister(chain, nb);
++	rtnl_lock();
++	err = raw_notifier_chain_unregister(chain, nb);
++	rtnl_unlock();
++
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
+ 
+@@ -536,10 +546,11 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
+ 				      struct switchdev_notifier_info *info,
+ 				      struct netlink_ext_ack *extack)
+ {
++	ASSERT_RTNL();
+ 	info->dev = dev;
+ 	info->extack = extack;
+-	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
+-					    val, info);
++	return raw_notifier_call_chain(&switchdev_blocking_notif_chain,
++				       val, info);
+ }
+ EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
+ 
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 7d313fb66d76ba..1ce8fff2a28a4e 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1198,6 +1198,13 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
+ {
+ 	struct cfg80211_internal_bss *scan, *tmp;
+ 	struct cfg80211_beacon_registration *reg, *treg;
++	unsigned long flags;
++
++	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++	WARN_ON(!list_empty(&rdev->wiphy_work_list));
++	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++	cancel_work_sync(&rdev->wiphy_work);
++
+ 	rfkill_destroy(rdev->wiphy.rfkill);
+ 	list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) {
+ 		list_del(&reg->list);
+diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
+index e3240d16040bd9..c37d4c0c64e9f9 100644
+--- a/rust/kernel/alloc/allocator_test.rs
++++ b/rust/kernel/alloc/allocator_test.rs
+@@ -62,6 +62,24 @@ unsafe fn realloc(
+             ));
+         }
+ 
++        // ISO C (ISO/IEC 9899:2011) defines `aligned_alloc`:
++        //
++        // > The value of alignment shall be a valid alignment supported by the implementation
++        // [...].
++        //
++        // As an example of the "supported by the implementation" requirement, POSIX.1-2001 (IEEE
++        // 1003.1-2001) defines `posix_memalign`:
++        //
++        // > The value of alignment shall be a power of two multiple of sizeof (void *).
++        //
++        // and POSIX-based implementations of `aligned_alloc` inherit this requirement. At the time
++        // of writing, this is known to be the case on macOS (but not in glibc).
++        //
++        // Satisfy the stricter requirement to avoid spurious test failures on some platforms.
++        let min_align = core::mem::size_of::<*const crate::ffi::c_void>();
++        let layout = layout.align_to(min_align).map_err(|_| AllocError)?;
++        let layout = layout.pad_to_align();
++
+         // SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or
+         // exceeds the given size and alignment requirements.
+         let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) } as *mut u8;
+diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
+index 5fece574ec023b..4911b294bfe662 100644
+--- a/rust/kernel/error.rs
++++ b/rust/kernel/error.rs
+@@ -104,7 +104,7 @@ pub fn from_errno(errno: crate::ffi::c_int) -> Error {
+         if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
+             // TODO: Make it a `WARN_ONCE` once available.
+             crate::pr_warn!(
+-                "attempted to create `Error` with out of range `errno`: {}",
++                "attempted to create `Error` with out of range `errno`: {}\n",
+                 errno
+             );
+             return code::EINVAL;
+diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
+index c962029f96e1f1..90bfb5cb26cd7a 100644
+--- a/rust/kernel/init.rs
++++ b/rust/kernel/init.rs
+@@ -259,7 +259,7 @@
+ ///     },
+ /// }));
+ /// let foo: Pin<&mut Foo> = foo;
+-/// pr_info!("a: {}", &*foo.a.lock());
++/// pr_info!("a: {}\n", &*foo.a.lock());
+ /// ```
+ ///
+ /// # Syntax
+@@ -311,7 +311,7 @@ macro_rules! stack_pin_init {
+ ///     }, GFP_KERNEL)?,
+ /// }));
+ /// let foo = foo.unwrap();
+-/// pr_info!("a: {}", &*foo.a.lock());
++/// pr_info!("a: {}\n", &*foo.a.lock());
+ /// ```
+ ///
+ /// ```rust,ignore
+@@ -336,7 +336,7 @@ macro_rules! stack_pin_init {
+ ///         x: 64,
+ ///     }, GFP_KERNEL)?,
+ /// }));
+-/// pr_info!("a: {}", &*foo.a.lock());
++/// pr_info!("a: {}\n", &*foo.a.lock());
+ /// # Ok::<_, AllocError>(())
+ /// ```
+ ///
+@@ -866,7 +866,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
+     ///
+     /// impl Foo {
+     ///     fn setup(self: Pin<&mut Self>) {
+-    ///         pr_info!("Setting up foo");
++    ///         pr_info!("Setting up foo\n");
+     ///     }
+     /// }
+     ///
+@@ -970,7 +970,7 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
+     ///
+     /// impl Foo {
+     ///     fn setup(&mut self) {
+-    ///         pr_info!("Setting up foo");
++    ///         pr_info!("Setting up foo\n");
+     ///     }
+     /// }
+     ///
+@@ -1318,7 +1318,7 @@ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Ini
+ /// #[pinned_drop]
+ /// impl PinnedDrop for Foo {
+ ///     fn drop(self: Pin<&mut Self>) {
+-///         pr_info!("Foo is being dropped!");
++///         pr_info!("Foo is being dropped!\n");
+ ///     }
+ /// }
+ /// ```
+@@ -1400,17 +1400,14 @@ macro_rules! impl_zeroable {
+     // SAFETY: `T: Zeroable` and `UnsafeCell` is `repr(transparent)`.
+     {<T: ?Sized + Zeroable>} UnsafeCell<T>,
+ 
+-    // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
++    // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee:
++    // https://doc.rust-lang.org/stable/std/option/index.html#representation).
+     Option<NonZeroU8>, Option<NonZeroU16>, Option<NonZeroU32>, Option<NonZeroU64>,
+     Option<NonZeroU128>, Option<NonZeroUsize>,
+     Option<NonZeroI8>, Option<NonZeroI16>, Option<NonZeroI32>, Option<NonZeroI64>,
+     Option<NonZeroI128>, Option<NonZeroIsize>,
+-
+-    // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
+-    //
+-    // In this case we are allowed to use `T: ?Sized`, since all zeros is the `None` variant.
+-    {<T: ?Sized>} Option<NonNull<T>>,
+-    {<T: ?Sized>} Option<KBox<T>>,
++    {<T>} Option<NonNull<T>>,
++    {<T>} Option<KBox<T>>,
+ 
+     // SAFETY: `null` pointer is valid.
+     //
+diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
+index 1fd146a8324165..b7213962a6a5ac 100644
+--- a/rust/kernel/init/macros.rs
++++ b/rust/kernel/init/macros.rs
+@@ -45,7 +45,7 @@
+ //! #[pinned_drop]
+ //! impl PinnedDrop for Foo {
+ //!     fn drop(self: Pin<&mut Self>) {
+-//!         pr_info!("{self:p} is getting dropped.");
++//!         pr_info!("{self:p} is getting dropped.\n");
+ //!     }
+ //! }
+ //!
+@@ -412,7 +412,7 @@
+ //! #[pinned_drop]
+ //! impl PinnedDrop for Foo {
+ //!     fn drop(self: Pin<&mut Self>) {
+-//!         pr_info!("{self:p} is getting dropped.");
++//!         pr_info!("{self:p} is getting dropped.\n");
+ //!     }
+ //! }
+ //! ```
+@@ -423,7 +423,7 @@
+ //! // `unsafe`, full path and the token parameter are added, everything else stays the same.
+ //! unsafe impl ::kernel::init::PinnedDrop for Foo {
+ //!     fn drop(self: Pin<&mut Self>, _: ::kernel::init::__internal::OnlyCallFromDrop) {
+-//!         pr_info!("{self:p} is getting dropped.");
++//!         pr_info!("{self:p} is getting dropped.\n");
+ //!     }
+ //! }
+ //! ```
+diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
+index d764cb7ff5d785..904d241604db91 100644
+--- a/rust/kernel/lib.rs
++++ b/rust/kernel/lib.rs
+@@ -6,7 +6,7 @@
+ //! usage by Rust code in the kernel and is shared by all of them.
+ //!
+ //! In other words, all the rest of the Rust code in the kernel (e.g. kernel
+-//! modules written in Rust) depends on [`core`], [`alloc`] and this crate.
++//! modules written in Rust) depends on [`core`] and this crate.
+ //!
+ //! If you need a kernel C API that is not ported or wrapped yet here, then
+ //! do so first instead of bypassing this crate.
+diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
+index 0ab20975a3b5db..697649ddef72e4 100644
+--- a/rust/kernel/sync.rs
++++ b/rust/kernel/sync.rs
+@@ -27,28 +27,20 @@
+ unsafe impl Sync for LockClassKey {}
+ 
+ impl LockClassKey {
+-    /// Creates a new lock class key.
+-    pub const fn new() -> Self {
+-        Self(Opaque::uninit())
+-    }
+-
+     pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key {
+         self.0.get()
+     }
+ }
+ 
+-impl Default for LockClassKey {
+-    fn default() -> Self {
+-        Self::new()
+-    }
+-}
+-
+ /// Defines a new static lock class and returns a pointer to it.
+ #[doc(hidden)]
+ #[macro_export]
+ macro_rules! static_lock_class {
+     () => {{
+-        static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
++        static CLASS: $crate::sync::LockClassKey =
++            // SAFETY: lockdep expects uninitialized memory when it's handed a statically allocated
++            // lock_class_key
++            unsafe { ::core::mem::MaybeUninit::uninit().assume_init() };
+         &CLASS
+     }};
+ }
+diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
+index 09e1d166d8d236..d1f5adbf33f91c 100755
+--- a/scripts/generate_rust_analyzer.py
++++ b/scripts/generate_rust_analyzer.py
+@@ -49,14 +49,26 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+             }
+         })
+ 
+-    # First, the ones in `rust/` since they are a bit special.
+-    append_crate(
+-        "core",
+-        sysroot_src / "core" / "src" / "lib.rs",
+-        [],
+-        cfg=crates_cfgs.get("core", []),
+-        is_workspace_member=False,
+-    )
++    def append_sysroot_crate(
++        display_name,
++        deps,
++        cfg=[],
++    ):
++        append_crate(
++            display_name,
++            sysroot_src / display_name / "src" / "lib.rs",
++            deps,
++            cfg,
++            is_workspace_member=False,
++        )
++
++    # NB: sysroot crates reexport items from one another so setting up our transitive dependencies
++    # here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth
++    # for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`.
++    append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", []))
++    append_sysroot_crate("alloc", ["core"])
++    append_sysroot_crate("std", ["alloc", "core"])
++    append_sysroot_crate("proc_macro", ["core", "std"])
+ 
+     append_crate(
+         "compiler_builtins",
+@@ -67,7 +79,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+     append_crate(
+         "macros",
+         srctree / "rust" / "macros" / "lib.rs",
+-        [],
++        ["std", "proc_macro"],
+         is_proc_macro=True,
+     )
+     crates[-1]["proc_macro_dylib_path"] = f"{objtree}/rust/libmacros.so"
+@@ -78,27 +90,28 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+         ["core", "compiler_builtins"],
+     )
+ 
+-    append_crate(
+-        "bindings",
+-        srctree / "rust"/ "bindings" / "lib.rs",
+-        ["core"],
+-        cfg=cfg,
+-    )
+-    crates[-1]["env"]["OBJTREE"] = str(objtree.resolve(True))
+-
+-    append_crate(
+-        "kernel",
+-        srctree / "rust" / "kernel" / "lib.rs",
+-        ["core", "macros", "build_error", "bindings"],
+-        cfg=cfg,
+-    )
+-    crates[-1]["source"] = {
+-        "include_dirs": [
+-            str(srctree / "rust" / "kernel"),
+-            str(objtree / "rust")
+-        ],
+-        "exclude_dirs": [],
+-    }
++    def append_crate_with_generated(
++        display_name,
++        deps,
++    ):
++        append_crate(
++            display_name,
++            srctree / "rust"/ display_name / "lib.rs",
++            deps,
++            cfg=cfg,
++        )
++        crates[-1]["env"]["OBJTREE"] = str(objtree.resolve(True))
++        crates[-1]["source"] = {
++            "include_dirs": [
++                str(srctree / "rust" / display_name),
++                str(objtree / "rust")
++            ],
++            "exclude_dirs": [],
++        }
++
++    append_crate_with_generated("bindings", ["core"])
++    append_crate_with_generated("uapi", ["core"])
++    append_crate_with_generated("kernel", ["core", "macros", "build_error", "bindings", "uapi"])
+ 
+     def is_root_crate(build_file, target):
+         try:
+diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs
+index 5ebd42ae4a3fd1..76aaa8329413d8 100644
+--- a/scripts/rustdoc_test_gen.rs
++++ b/scripts/rustdoc_test_gen.rs
+@@ -15,8 +15,8 @@
+ //!   - Test code should be able to define functions and call them, without having to carry
+ //!     the context.
+ //!
+-//!   - Later on, we may want to be able to test non-kernel code (e.g. `core`, `alloc` or
+-//!     third-party crates) which likely use the standard library `assert*!` macros.
++//!   - Later on, we may want to be able to test non-kernel code (e.g. `core` or third-party
++//!     crates) which likely use the standard library `assert*!` macros.
+ //!
+ //! For this reason, instead of the passed context, `kunit_get_current_test()` is used instead
+ //! (i.e. `current->kunit_test`).
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index 9f849e05ce79f8..34825b2f3b1083 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -539,6 +539,11 @@ static const struct config_entry config_table[] = {
+ 		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ 		.device = PCI_DEVICE_ID_INTEL_HDA_PTL,
+ 	},
++	{
++		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++		.device = PCI_DEVICE_ID_INTEL_HDA_PTL_H,
++	},
++
+ #endif
+ 
+ };
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ea52bc7370a58d..cb9925948175f9 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2508,6 +2508,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, HDA_ARL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE) },
+ 	/* Panther Lake */
+ 	{ PCI_DEVICE_DATA(INTEL, HDA_PTL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_LNL) },
++	/* Panther Lake-H */
++	{ PCI_DEVICE_DATA(INTEL, HDA_PTL_H, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_LNL) },
+ 	/* Apollolake (Broxton-P) */
+ 	{ PCI_DEVICE_DATA(INTEL, HDA_APL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON) },
+ 	/* Gemini-Lake */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b559f0d4e34885..3949e2614a6638 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11064,6 +11064,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13),
+ 	SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index b16587d8f97a89..a7637056972aab 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -248,6 +248,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21M5"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21M6"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
+index 402b9a2ff02406..68cdb1027d0c05 100644
+--- a/sound/soc/codecs/arizona.c
++++ b/sound/soc/codecs/arizona.c
+@@ -967,7 +967,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
+ 		case ARIZONA_OUT3L_ENA_SHIFT:
+ 		case ARIZONA_OUT3R_ENA_SHIFT:
+ 			priv->out_up_pending++;
+-			priv->out_up_delay += 17;
++			priv->out_up_delay += 17000;
+ 			break;
+ 		case ARIZONA_OUT4L_ENA_SHIFT:
+ 		case ARIZONA_OUT4R_ENA_SHIFT:
+@@ -977,7 +977,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
+ 			case WM8997:
+ 				break;
+ 			default:
+-				priv->out_up_delay += 10;
++				priv->out_up_delay += 10000;
+ 				break;
+ 			}
+ 			break;
+@@ -999,7 +999,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
+ 			if (!priv->out_up_pending && priv->out_up_delay) {
+ 				dev_dbg(component->dev, "Power up delay: %d\n",
+ 					priv->out_up_delay);
+-				msleep(priv->out_up_delay);
++				fsleep(priv->out_up_delay);
+ 				priv->out_up_delay = 0;
+ 			}
+ 			break;
+@@ -1017,7 +1017,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
+ 		case ARIZONA_OUT3L_ENA_SHIFT:
+ 		case ARIZONA_OUT3R_ENA_SHIFT:
+ 			priv->out_down_pending++;
+-			priv->out_down_delay++;
++			priv->out_down_delay += 1000;
+ 			break;
+ 		case ARIZONA_OUT4L_ENA_SHIFT:
+ 		case ARIZONA_OUT4R_ENA_SHIFT:
+@@ -1028,10 +1028,10 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
+ 				break;
+ 			case WM8998:
+ 			case WM1814:
+-				priv->out_down_delay += 5;
++				priv->out_down_delay += 5000;
+ 				break;
+ 			default:
+-				priv->out_down_delay++;
++				priv->out_down_delay += 1000;
+ 				break;
+ 			}
+ 			break;
+@@ -1053,7 +1053,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
+ 			if (!priv->out_down_pending && priv->out_down_delay) {
+ 				dev_dbg(component->dev, "Power down delay: %d\n",
+ 					priv->out_down_delay);
+-				msleep(priv->out_down_delay);
++				fsleep(priv->out_down_delay);
+ 				priv->out_down_delay = 0;
+ 			}
+ 			break;
+diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
+index 8ec4083cd3b807..2c43e4a6751b10 100644
+--- a/sound/soc/codecs/cs42l43.c
++++ b/sound/soc/codecs/cs42l43.c
+@@ -1146,7 +1146,7 @@ static const struct snd_kcontrol_new cs42l43_controls[] = {
+ 
+ 	SOC_DOUBLE_R_SX_TLV("ADC Volume", CS42L43_ADC_B_CTRL1, CS42L43_ADC_B_CTRL2,
+ 			    CS42L43_ADC_PGA_GAIN_SHIFT,
+-			    0xF, 5, cs42l43_adc_tlv),
++			    0xF, 4, cs42l43_adc_tlv),
+ 
+ 	SOC_DOUBLE("PDM1 Invert Switch", CS42L43_DMIC_PDM_CTRL,
+ 		   CS42L43_PDM1L_INV_SHIFT, CS42L43_PDM1R_INV_SHIFT, 1, 0),
+diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c
+index b24d6472ad5fc9..fbfd7fb7f1685c 100644
+--- a/sound/soc/codecs/madera.c
++++ b/sound/soc/codecs/madera.c
+@@ -2322,10 +2322,10 @@ int madera_out_ev(struct snd_soc_dapm_widget *w,
+ 	case CS42L92:
+ 	case CS47L92:
+ 	case CS47L93:
+-		out_up_delay = 6;
++		out_up_delay = 6000;
+ 		break;
+ 	default:
+-		out_up_delay = 17;
++		out_up_delay = 17000;
+ 		break;
+ 	}
+ 
+@@ -2356,7 +2356,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w,
+ 		case MADERA_OUT3R_ENA_SHIFT:
+ 			priv->out_up_pending--;
+ 			if (!priv->out_up_pending) {
+-				msleep(priv->out_up_delay);
++				fsleep(priv->out_up_delay);
+ 				priv->out_up_delay = 0;
+ 			}
+ 			break;
+@@ -2375,7 +2375,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w,
+ 		case MADERA_OUT3L_ENA_SHIFT:
+ 		case MADERA_OUT3R_ENA_SHIFT:
+ 			priv->out_down_pending++;
+-			priv->out_down_delay++;
++			priv->out_down_delay += 1000;
+ 			break;
+ 		default:
+ 			break;
+@@ -2392,7 +2392,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w,
+ 		case MADERA_OUT3R_ENA_SHIFT:
+ 			priv->out_down_pending--;
+ 			if (!priv->out_down_pending) {
+-				msleep(priv->out_down_delay);
++				fsleep(priv->out_down_delay);
+ 				priv->out_down_delay = 0;
+ 			}
+ 			break;
+diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c
+index d5c985ff5ac553..5449d6b5cf3d11 100644
+--- a/sound/soc/codecs/rt722-sdca-sdw.c
++++ b/sound/soc/codecs/rt722-sdca-sdw.c
+@@ -86,6 +86,10 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re
+ 	case 0x6100067:
+ 	case 0x6100070 ... 0x610007c:
+ 	case 0x6100080:
++	case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_FU15, RT722_SDCA_CTL_FU_CH_GAIN,
++			  CH_01) ...
++	     SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_FU15, RT722_SDCA_CTL_FU_CH_GAIN,
++			  CH_04):
+ 	case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, RT722_SDCA_CTL_FU_VOLUME,
+ 			CH_01):
+ 	case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, RT722_SDCA_CTL_FU_VOLUME,
+diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
+index d482cd194c08c5..58315eab492a16 100644
+--- a/sound/soc/codecs/tas2764.c
++++ b/sound/soc/codecs/tas2764.c
+@@ -365,7 +365,7 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ {
+ 	struct snd_soc_component *component = dai->component;
+ 	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
+-	u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0;
++	u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0, asi_cfg_4 = 0;
+ 	int ret;
+ 
+ 	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+@@ -374,12 +374,14 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ 		fallthrough;
+ 	case SND_SOC_DAIFMT_NB_NF:
+ 		asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING;
++		asi_cfg_4 = TAS2764_TDM_CFG4_TX_FALLING;
+ 		break;
+ 	case SND_SOC_DAIFMT_IB_IF:
+ 		asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
+ 		fallthrough;
+ 	case SND_SOC_DAIFMT_IB_NF:
+ 		asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING;
++		asi_cfg_4 = TAS2764_TDM_CFG4_TX_RISING;
+ 		break;
+ 	}
+ 
+@@ -389,6 +391,12 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG4,
++					    TAS2764_TDM_CFG4_TX_MASK,
++					    asi_cfg_4);
++	if (ret < 0)
++		return ret;
++
+ 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ 	case SND_SOC_DAIFMT_I2S:
+ 		asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
+diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h
+index 168af772a898ff..9490f2686e3891 100644
+--- a/sound/soc/codecs/tas2764.h
++++ b/sound/soc/codecs/tas2764.h
+@@ -25,7 +25,7 @@
+ 
+ /* Power Control */
+ #define TAS2764_PWR_CTRL		TAS2764_REG(0X0, 0x02)
+-#define TAS2764_PWR_CTRL_MASK		GENMASK(1, 0)
++#define TAS2764_PWR_CTRL_MASK		GENMASK(2, 0)
+ #define TAS2764_PWR_CTRL_ACTIVE		0x0
+ #define TAS2764_PWR_CTRL_MUTE		BIT(0)
+ #define TAS2764_PWR_CTRL_SHUTDOWN	BIT(1)
+@@ -79,6 +79,12 @@
+ #define TAS2764_TDM_CFG3_RXS_SHIFT	0x4
+ #define TAS2764_TDM_CFG3_MASK		GENMASK(3, 0)
+ 
++/* TDM Configuration Reg4 */
++#define TAS2764_TDM_CFG4		TAS2764_REG(0X0, 0x0d)
++#define TAS2764_TDM_CFG4_TX_MASK	BIT(0)
++#define TAS2764_TDM_CFG4_TX_RISING	0x0
++#define TAS2764_TDM_CFG4_TX_FALLING	BIT(0)
++
+ /* TDM Configuration Reg5 */
+ #define TAS2764_TDM_CFG5		TAS2764_REG(0X0, 0x0e)
+ #define TAS2764_TDM_CFG5_VSNS_MASK	BIT(6)
+diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
+index 9f93b230652a5d..863c3f672ba98d 100644
+--- a/sound/soc/codecs/tas2770.c
++++ b/sound/soc/codecs/tas2770.c
+@@ -506,7 +506,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
+ }
+ 
+ static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0);
+-static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -12750, 50, 0);
++static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -10050, 50, 0);
+ 
+ static const struct snd_kcontrol_new tas2770_snd_controls[] = {
+ 	SOC_SINGLE_TLV("Speaker Playback Volume", TAS2770_PLAY_CFG_REG2,
+diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
+index edd2cb185c42cf..9e67fbfc2ccaf8 100644
+--- a/sound/soc/codecs/wm0010.c
++++ b/sound/soc/codecs/wm0010.c
+@@ -920,7 +920,7 @@ static int wm0010_spi_probe(struct spi_device *spi)
+ 	if (ret) {
+ 		dev_err(wm0010->dev, "Failed to set IRQ %d as wake source: %d\n",
+ 			irq, ret);
+-		return ret;
++		goto free_irq;
+ 	}
+ 
+ 	if (spi->max_speed_hz)
+@@ -932,9 +932,18 @@ static int wm0010_spi_probe(struct spi_device *spi)
+ 				     &soc_component_dev_wm0010, wm0010_dai,
+ 				     ARRAY_SIZE(wm0010_dai));
+ 	if (ret < 0)
+-		return ret;
++		goto disable_irq_wake;
+ 
+ 	return 0;
++
++disable_irq_wake:
++	irq_set_irq_wake(wm0010->irq, 0);
++
++free_irq:
++	if (wm0010->irq)
++		free_irq(wm0010->irq, wm0010);
++
++	return ret;
+ }
+ 
+ static void wm0010_spi_remove(struct spi_device *spi)
+diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
+index 502196253d42a9..64eee0d2347da1 100644
+--- a/sound/soc/codecs/wm5110.c
++++ b/sound/soc/codecs/wm5110.c
+@@ -302,7 +302,7 @@ static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w)
+ 		} else {
+ 			wseq = wm5110_no_dre_left_enable;
+ 			nregs = ARRAY_SIZE(wm5110_no_dre_left_enable);
+-			priv->out_up_delay += 10;
++			priv->out_up_delay += 10000;
+ 		}
+ 		break;
+ 	case ARIZONA_OUT1R_ENA_SHIFT:
+@@ -312,7 +312,7 @@ static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w)
+ 		} else {
+ 			wseq = wm5110_no_dre_right_enable;
+ 			nregs = ARRAY_SIZE(wm5110_no_dre_right_enable);
+-			priv->out_up_delay += 10;
++			priv->out_up_delay += 10000;
+ 		}
+ 		break;
+ 	default:
+@@ -338,7 +338,7 @@ static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w)
+ 			snd_soc_component_update_bits(component,
+ 						      ARIZONA_SPARE_TRIGGERS,
+ 						      ARIZONA_WS_TRG1, 0);
+-			priv->out_down_delay += 27;
++			priv->out_down_delay += 27000;
+ 		}
+ 		break;
+ 	case ARIZONA_OUT1R_ENA_SHIFT:
+@@ -350,7 +350,7 @@ static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w)
+ 			snd_soc_component_update_bits(component,
+ 						      ARIZONA_SPARE_TRIGGERS,
+ 						      ARIZONA_WS_TRG2, 0);
+-			priv->out_down_delay += 27;
++			priv->out_down_delay += 27000;
+ 		}
+ 		break;
+ 	default:
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index fedae7f6f70cc5..975ffd2cad292c 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -1097,6 +1097,7 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
+ 	args.np = ep;
+ 	dai = snd_soc_get_dai_via_args(&args);
+ 	if (dai) {
++		dlc->of_node  = node;
+ 		dlc->dai_name = snd_soc_dai_name_get(dai);
+ 		dlc->dai_args = snd_soc_copy_dai_args(dev, &args);
+ 		if (!dlc->dai_args)
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 84fc35d88b9267..380fc3be8c932e 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -13,6 +13,7 @@
+ #include <linux/soundwire/sdw.h>
+ #include <linux/soundwire/sdw_type.h>
+ #include <linux/soundwire/sdw_intel.h>
++#include <sound/core.h>
+ #include <sound/soc-acpi.h>
+ #include "sof_sdw_common.h"
+ #include "../../codecs/rt711.h"
+@@ -685,6 +686,23 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 	{}
+ };
+ 
++static const struct snd_pci_quirk sof_sdw_ssid_quirk_table[] = {
++	SND_PCI_QUIRK(0x1043, 0x1e13, "ASUS Zenbook S14", SOC_SDW_CODEC_MIC),
++	{}
++};
++
++static void sof_sdw_check_ssid_quirk(const struct snd_soc_acpi_mach *mach)
++{
++	const struct snd_pci_quirk *quirk_entry;
++
++	quirk_entry = snd_pci_quirk_lookup_id(mach->mach_params.subsystem_vendor,
++					      mach->mach_params.subsystem_device,
++					      sof_sdw_ssid_quirk_table);
++
++	if (quirk_entry)
++		sof_sdw_quirk = quirk_entry->value;
++}
++
+ static struct snd_soc_dai_link_component platform_component[] = {
+ 	{
+ 		/* name might be overridden during probe */
+@@ -853,7 +871,7 @@ static int create_sdw_dailinks(struct snd_soc_card *card,
+ 
+ 	/* generate DAI links by each sdw link */
+ 	while (sof_dais->initialised) {
+-		int current_be_id;
++		int current_be_id = 0;
+ 
+ 		ret = create_sdw_dailink(card, sof_dais, dai_links,
+ 					 &current_be_id, codec_conf);
+@@ -1212,6 +1230,13 @@ static int mc_probe(struct platform_device *pdev)
+ 
+ 	snd_soc_card_set_drvdata(card, ctx);
+ 
++	if (mach->mach_params.subsystem_id_set) {
++		snd_soc_card_set_pci_ssid(card,
++					  mach->mach_params.subsystem_vendor,
++					  mach->mach_params.subsystem_device);
++		sof_sdw_check_ssid_quirk(mach);
++	}
++
+ 	dmi_check_system(sof_sdw_quirk_table);
+ 
+ 	if (quirk_override != -1) {
+@@ -1227,12 +1252,6 @@ static int mc_probe(struct platform_device *pdev)
+ 	for (i = 0; i < ctx->codec_info_list_count; i++)
+ 		codec_info_list[i].amp_num = 0;
+ 
+-	if (mach->mach_params.subsystem_id_set) {
+-		snd_soc_card_set_pci_ssid(card,
+-					  mach->mach_params.subsystem_vendor,
+-					  mach->mach_params.subsystem_device);
+-	}
+-
+ 	ret = sof_card_dai_links_create(card);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+index fd02c864e25ef9..a3f79176563719 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+@@ -297,7 +297,7 @@ static const struct snd_soc_acpi_adr_device rt1316_3_single_adr[] = {
+ 
+ static const struct snd_soc_acpi_adr_device rt1318_1_single_adr[] = {
+ 	{
+-		.adr = 0x000130025D131801,
++		.adr = 0x000130025D131801ull,
+ 		.num_endpoints = 1,
+ 		.endpoints = &single_endpoint,
+ 		.name_prefix = "rt1318-1"
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index eca5ce096e5457..e3ef9104b411c1 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -1758,20 +1758,6 @@ int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io)
+ 	return 1;
+ }
+ 
+-int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io)
+-{
+-	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+-	struct rsnd_priv *priv = rsnd_io_to_priv(io);
+-	struct device *dev = rsnd_priv_to_dev(priv);
+-
+-	if (!runtime) {
+-		dev_warn(dev, "Can't update kctrl when idle\n");
+-		return 0;
+-	}
+-
+-	return 1;
+-}
+-
+ struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg)
+ {
+ 	cfg->cfg.val = cfg->val;
+diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
+index 3c164d8e3b16bf..3f1100b98cdd33 100644
+--- a/sound/soc/sh/rcar/rsnd.h
++++ b/sound/soc/sh/rcar/rsnd.h
+@@ -742,7 +742,6 @@ struct rsnd_kctrl_cfg_s {
+ #define rsnd_kctrl_vals(x)	((x).val)	/* = (x).cfg.val[0] */
+ 
+ int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io);
+-int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io);
+ struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg);
+ struct rsnd_kctrl_cfg *rsnd_kctrl_init_s(struct rsnd_kctrl_cfg_s *cfg);
+ int rsnd_kctrl_new(struct rsnd_mod *mod,
+diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
+index e7f86db0d94c3c..7d73b183bda685 100644
+--- a/sound/soc/sh/rcar/src.c
++++ b/sound/soc/sh/rcar/src.c
+@@ -35,6 +35,7 @@ struct rsnd_src {
+ 	struct rsnd_mod *dma;
+ 	struct rsnd_kctrl_cfg_s sen;  /* sync convert enable */
+ 	struct rsnd_kctrl_cfg_s sync; /* sync convert */
++	u32 current_sync_rate;
+ 	int irq;
+ };
+ 
+@@ -100,7 +101,7 @@ static u32 rsnd_src_convert_rate(struct rsnd_dai_stream *io,
+ 	if (!rsnd_src_sync_is_enabled(mod))
+ 		return rsnd_io_converted_rate(io);
+ 
+-	convert_rate = src->sync.val;
++	convert_rate = src->current_sync_rate;
+ 
+ 	if (!convert_rate)
+ 		convert_rate = rsnd_io_converted_rate(io);
+@@ -201,13 +202,73 @@ static const u32 chan222222[] = {
+ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
+ 				      struct rsnd_mod *mod)
+ {
++	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+-	struct device *dev = rsnd_priv_to_dev(priv);
++	struct rsnd_src *src = rsnd_mod_to_src(mod);
++	u32 fin, fout, new_rate;
++	int inc, cnt, rate;
++	u64 base, val;
++
++	if (!runtime)
++		return;
++
++	if (!rsnd_src_sync_is_enabled(mod))
++		return;
++
++	fin	= rsnd_src_get_in_rate(priv, io);
++	fout	= rsnd_src_get_out_rate(priv, io);
++
++	new_rate = src->sync.val;
++
++	if (!new_rate)
++		new_rate = fout;
++
++	/* Do nothing if no diff */
++	if (new_rate == src->current_sync_rate)
++		return;
++
++	/*
++	 * SRCm_IFSVR::INTIFS can change within 1%
++	 * see
++	 *	SRCm_IFSVR::INTIFS Note
++	 */
++	inc = fout / 100;
++	cnt = abs(new_rate - fout) / inc;
++	if (fout > new_rate)
++		inc *= -1;
++
++	/*
++	 * After start running SRC, we can update only SRC_IFSVR
++	 * for Synchronous Mode
++	 */
++	base = (u64)0x0400000 * fin;
++	rate  = fout;
++	for (int i = 0; i < cnt; i++) {
++		val   = base;
++		rate += inc;
++		do_div(val, rate);
++
++		rsnd_mod_write(mod, SRC_IFSVR, val);
++	}
++	val   = base;
++	do_div(val, new_rate);
++
++	rsnd_mod_write(mod, SRC_IFSVR, val);
++
++	/* update current_sync_rate */
++	src->current_sync_rate = new_rate;
++}
++
++static void rsnd_src_init_convert_rate(struct rsnd_dai_stream *io,
++				       struct rsnd_mod *mod)
++{
+ 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
++	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
++	struct device *dev = rsnd_priv_to_dev(priv);
+ 	int is_play = rsnd_io_is_play(io);
+ 	int use_src = 0;
+ 	u32 fin, fout;
+-	u32 ifscr, fsrate, adinr;
++	u32 ifscr, adinr;
+ 	u32 cr, route;
+ 	u32 i_busif, o_busif, tmp;
+ 	const u32 *bsdsr_table;
+@@ -245,26 +306,15 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
+ 	adinr = rsnd_get_adinr_bit(mod, io) | chan;
+ 
+ 	/*
+-	 * SRC_IFSCR / SRC_IFSVR
+-	 */
+-	ifscr = 0;
+-	fsrate = 0;
+-	if (use_src) {
+-		u64 n;
+-
+-		ifscr = 1;
+-		n = (u64)0x0400000 * fin;
+-		do_div(n, fout);
+-		fsrate = n;
+-	}
+-
+-	/*
++	 * SRC_IFSCR
+ 	 * SRC_SRCCR / SRC_ROUTE_MODE0
+ 	 */
++	ifscr	= 0;
+ 	cr	= 0x00011110;
+ 	route	= 0x0;
+ 	if (use_src) {
+ 		route	= 0x1;
++		ifscr	= 0x1;
+ 
+ 		if (rsnd_src_sync_is_enabled(mod)) {
+ 			cr |= 0x1;
+@@ -335,7 +385,6 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
+ 	rsnd_mod_write(mod, SRC_SRCIR, 1);	/* initialize */
+ 	rsnd_mod_write(mod, SRC_ADINR, adinr);
+ 	rsnd_mod_write(mod, SRC_IFSCR, ifscr);
+-	rsnd_mod_write(mod, SRC_IFSVR, fsrate);
+ 	rsnd_mod_write(mod, SRC_SRCCR, cr);
+ 	rsnd_mod_write(mod, SRC_BSDSR, bsdsr_table[idx]);
+ 	rsnd_mod_write(mod, SRC_BSISR, bsisr_table[idx]);
+@@ -348,6 +397,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
+ 
+ 	rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
+ 
++	/* update SRC_IFSVR */
++	rsnd_src_set_convert_rate(io, mod);
++
+ 	return;
+ 
+ convert_rate_err:
+@@ -467,7 +519,8 @@ static int rsnd_src_init(struct rsnd_mod *mod,
+ 	int ret;
+ 
+ 	/* reset sync convert_rate */
+-	src->sync.val = 0;
++	src->sync.val		=
++	src->current_sync_rate	= 0;
+ 
+ 	ret = rsnd_mod_power_on(mod);
+ 	if (ret < 0)
+@@ -475,7 +528,7 @@ static int rsnd_src_init(struct rsnd_mod *mod,
+ 
+ 	rsnd_src_activation(mod);
+ 
+-	rsnd_src_set_convert_rate(io, mod);
++	rsnd_src_init_convert_rate(io, mod);
+ 
+ 	rsnd_src_status_clear(mod);
+ 
+@@ -493,7 +546,8 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
+ 	rsnd_mod_power_off(mod);
+ 
+ 	/* reset sync convert_rate */
+-	src->sync.val = 0;
++	src->sync.val		=
++	src->current_sync_rate	= 0;
+ 
+ 	return 0;
+ }
+@@ -531,6 +585,22 @@ static irqreturn_t rsnd_src_interrupt(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
++static int rsnd_src_kctrl_accept_runtime(struct rsnd_dai_stream *io)
++{
++	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
++
++	if (!runtime) {
++		struct rsnd_priv *priv = rsnd_io_to_priv(io);
++		struct device *dev = rsnd_priv_to_dev(priv);
++
++		dev_warn(dev, "\"SRC Out Rate\" can use during running\n");
++
++		return 0;
++	}
++
++	return 1;
++}
++
+ static int rsnd_src_probe_(struct rsnd_mod *mod,
+ 			   struct rsnd_dai_stream *io,
+ 			   struct rsnd_priv *priv)
+@@ -585,7 +655,7 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod,
+ 			       "SRC Out Rate Switch" :
+ 			       "SRC In Rate Switch",
+ 			       rsnd_kctrl_accept_anytime,
+-			       rsnd_src_set_convert_rate,
++			       rsnd_src_init_convert_rate,
+ 			       &src->sen, 1);
+ 	if (ret < 0)
+ 		return ret;
+@@ -594,7 +664,7 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod,
+ 			       rsnd_io_is_play(io) ?
+ 			       "SRC Out Rate" :
+ 			       "SRC In Rate",
+-			       rsnd_kctrl_accept_runtime,
++			       rsnd_src_kctrl_accept_runtime,
+ 			       rsnd_src_set_convert_rate,
+ 			       &src->sync, 192000);
+ 
+diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
+index b3d4e8ae07eff8..0c6424a1fcac04 100644
+--- a/sound/soc/sh/rcar/ssi.c
++++ b/sound/soc/sh/rcar/ssi.c
+@@ -336,7 +336,8 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
+ 	return 0;
+ 
+ rate_err:
+-	dev_err(dev, "unsupported clock rate\n");
++	dev_err(dev, "unsupported clock rate (%d)\n", rate);
++
+ 	return ret;
+ }
+ 
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index 19928f098d8dcb..b0e4e4168f38d5 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -337,7 +337,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
+ 	if (ucontrol->value.integer.value[0] < 0)
+ 		return -EINVAL;
+ 	val = ucontrol->value.integer.value[0];
+-	if (mc->platform_max && ((int)val + min) > mc->platform_max)
++	if (mc->platform_max && val > mc->platform_max)
+ 		return -EINVAL;
+ 	if (val > max - min)
+ 		return -EINVAL;
+@@ -350,7 +350,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
+ 		if (ucontrol->value.integer.value[1] < 0)
+ 			return -EINVAL;
+ 		val2 = ucontrol->value.integer.value[1];
+-		if (mc->platform_max && ((int)val2 + min) > mc->platform_max)
++		if (mc->platform_max && val2 > mc->platform_max)
+ 			return -EINVAL;
+ 		if (val2 > max - min)
+ 			return -EINVAL;
+@@ -503,17 +503,16 @@ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
+ {
+ 	struct soc_mixer_control *mc =
+ 		(struct soc_mixer_control *)kcontrol->private_value;
+-	int platform_max;
+-	int min = mc->min;
++	int max;
+ 
+-	if (!mc->platform_max)
+-		mc->platform_max = mc->max;
+-	platform_max = mc->platform_max;
++	max = mc->max - mc->min;
++	if (mc->platform_max && mc->platform_max < max)
++		max = mc->platform_max;
+ 
+ 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ 	uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
+ 	uinfo->value.integer.min = 0;
+-	uinfo->value.integer.max = platform_max - min;
++	uinfo->value.integer.max = max;
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/sof/amd/acp-ipc.c b/sound/soc/sof/amd/acp-ipc.c
+index b44b1b1adb6ed9..cf3994a705f946 100644
+--- a/sound/soc/sof/amd/acp-ipc.c
++++ b/sound/soc/sof/amd/acp-ipc.c
+@@ -167,6 +167,7 @@ irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context)
+ 
+ 	if (sdev->first_boot && sdev->fw_state != SOF_FW_BOOT_COMPLETE) {
+ 		acp_mailbox_read(sdev, sdev->dsp_box.offset, &status, sizeof(status));
++
+ 		if ((status & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ 			snd_sof_dsp_panic(sdev, sdev->dsp_box.offset + sizeof(status),
+ 					  true);
+@@ -188,13 +189,21 @@ irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context)
+ 
+ 	dsp_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack_write);
+ 	if (dsp_ack) {
+-		spin_lock_irq(&sdev->ipc_lock);
+-		/* handle immediate reply from DSP core */
+-		acp_dsp_ipc_get_reply(sdev);
+-		snd_sof_ipc_reply(sdev, 0);
+-		/* set the done bit */
+-		acp_dsp_ipc_dsp_done(sdev);
+-		spin_unlock_irq(&sdev->ipc_lock);
++		if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
++			spin_lock_irq(&sdev->ipc_lock);
++
++			/* handle immediate reply from DSP core */
++			acp_dsp_ipc_get_reply(sdev);
++			snd_sof_ipc_reply(sdev, 0);
++			/* set the done bit */
++			acp_dsp_ipc_dsp_done(sdev);
++
++			spin_unlock_irq(&sdev->ipc_lock);
++		} else {
++			dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_BOOT_COMPLETE: %#x\n",
++					    dsp_ack);
++		}
++
+ 		ipc_irq = true;
+ 	}
+ 
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index 95d4762c9d9390..35eb23d2a056d8 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -27,6 +27,7 @@ MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
+ static struct acp_quirk_entry quirk_valve_galileo = {
+ 	.signed_fw_image = true,
+ 	.skip_iram_dram_size_mod = true,
++	.post_fw_run_delay = true,
+ };
+ 
+ const struct dmi_system_id acp_sof_quirk_table[] = {
+diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
+index 800594440f7391..2a19d82d620022 100644
+--- a/sound/soc/sof/amd/acp.h
++++ b/sound/soc/sof/amd/acp.h
+@@ -220,6 +220,7 @@ struct sof_amd_acp_desc {
+ struct acp_quirk_entry {
+ 	bool signed_fw_image;
+ 	bool skip_iram_dram_size_mod;
++	bool post_fw_run_delay;
+ };
+ 
+ /* Common device data struct for ACP devices */
+diff --git a/sound/soc/sof/amd/vangogh.c b/sound/soc/sof/amd/vangogh.c
+index 61372958c09dc8..436f58be3a9f94 100644
+--- a/sound/soc/sof/amd/vangogh.c
++++ b/sound/soc/sof/amd/vangogh.c
+@@ -11,6 +11,7 @@
+  * Hardware interface for Audio DSP on Vangogh platform
+  */
+ 
++#include <linux/delay.h>
+ #include <linux/platform_device.h>
+ #include <linux/module.h>
+ 
+@@ -136,6 +137,20 @@ static struct snd_soc_dai_driver vangogh_sof_dai[] = {
+ 	},
+ };
+ 
++static int sof_vangogh_post_fw_run_delay(struct snd_sof_dev *sdev)
++{
++	/*
++	 * Resuming from suspend in some cases my cause the DSP firmware
++	 * to enter an unrecoverable faulty state.  Delaying a bit any host
++	 * to DSP transmission right after firmware boot completion seems
++	 * to resolve the issue.
++	 */
++	if (!sdev->first_boot)
++		usleep_range(100, 150);
++
++	return 0;
++}
++
+ /* Vangogh ops */
+ struct snd_sof_dsp_ops sof_vangogh_ops;
+ EXPORT_SYMBOL_NS(sof_vangogh_ops, SND_SOC_SOF_AMD_COMMON);
+@@ -157,6 +172,9 @@ int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
+ 
+ 		if (quirks->signed_fw_image)
+ 			sof_vangogh_ops.load_firmware = acp_sof_load_signed_firmware;
++
++		if (quirks->post_fw_run_delay)
++			sof_vangogh_ops.post_fw_run = sof_vangogh_post_fw_run_delay;
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
+index dc46888faa0dc9..c0c58b42971556 100644
+--- a/sound/soc/sof/intel/hda-codec.c
++++ b/sound/soc/sof/intel/hda-codec.c
+@@ -454,6 +454,7 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev)
+ }
+ EXPORT_SYMBOL_NS_GPL(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+ 
++MODULE_SOFTDEP("pre: snd-hda-codec-hdmi");
+ #endif
+ 
+ MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index f10ed4d1025016..c924a998d6f90d 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -1305,22 +1305,8 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
+ 		/* report to machine driver if any DMICs are found */
+ 		mach->mach_params.dmic_num = check_dmic_num(sdev);
+ 
+-		if (sdw_mach_found) {
+-			/*
+-			 * DMICs use up to 4 pins and are typically pin-muxed with SoundWire
+-			 * link 2 and 3, or link 1 and 2, thus we only try to enable dmics
+-			 * if all conditions are true:
+-			 * a) 2 or fewer links are used by SoundWire
+-			 * b) the NHLT table reports the presence of microphones
+-			 */
+-			if (hweight_long(mach->link_mask) <= 2)
+-				dmic_fixup = true;
+-			else
+-				mach->mach_params.dmic_num = 0;
+-		} else {
+-			if (mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER)
+-				dmic_fixup = true;
+-		}
++		if (sdw_mach_found || mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER)
++			dmic_fixup = true;
+ 
+ 		if (tplg_fixup &&
+ 		    dmic_fixup &&
+diff --git a/sound/soc/sof/intel/pci-ptl.c b/sound/soc/sof/intel/pci-ptl.c
+index 69195b5e7b1a92..f54d098d616f67 100644
+--- a/sound/soc/sof/intel/pci-ptl.c
++++ b/sound/soc/sof/intel/pci-ptl.c
+@@ -50,6 +50,7 @@ static const struct sof_dev_desc ptl_desc = {
+ /* PCI IDs */
+ static const struct pci_device_id sof_pci_ids[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, HDA_PTL, &ptl_desc) }, /* PTL */
++	{ PCI_DEVICE_DATA(INTEL, HDA_PTL_H, &ptl_desc) }, /* PTL-H */
+ 	{ 0, }
+ };
+ MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 1691aa6e6ce32d..3c3e5760e81b83 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -2061,6 +2061,14 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
+ 		    reloc_addend(reloc) == pfunc->offset)
+ 			break;
+ 
++		/*
++		 * Clang sometimes leaves dangling unused jump table entries
++		 * which point to the end of the function.  Ignore them.
++		 */
++		if (reloc->sym->sec == pfunc->sec &&
++		    reloc_addend(reloc) == pfunc->offset + pfunc->len)
++			goto next;
++
+ 		dest_insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
+ 		if (!dest_insn)
+ 			break;
+@@ -2078,6 +2086,7 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
+ 		alt->insn = dest_insn;
+ 		alt->next = insn->alts;
+ 		insn->alts = alt;
++next:
+ 		prev_offset = reloc_offset(reloc);
+ 	}
+ 
+diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
+index f7206374a73dd8..75d6d61279d664 100644
+--- a/tools/sched_ext/include/scx/common.bpf.h
++++ b/tools/sched_ext/include/scx/common.bpf.h
+@@ -333,6 +333,17 @@ static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask)
+ 	return (const struct cpumask *)mask;
+ }
+ 
++/*
++ * Return true if task @p cannot migrate to a different CPU, false
++ * otherwise.
++ */
++static inline bool is_migration_disabled(const struct task_struct *p)
++{
++	if (bpf_core_field_exists(p->migration_disabled))
++		return p->migration_disabled;
++	return false;
++}
++
+ /* rcu */
+ void bpf_rcu_read_lock(void) __ksym;
+ void bpf_rcu_read_unlock(void) __ksym;
+diff --git a/tools/sound/dapm-graph b/tools/sound/dapm-graph
+index f14bdfedee8f11..b6196ee5065a4e 100755
+--- a/tools/sound/dapm-graph
++++ b/tools/sound/dapm-graph
+@@ -10,7 +10,7 @@ set -eu
+ 
+ STYLE_COMPONENT_ON="color=dodgerblue;style=bold"
+ STYLE_COMPONENT_OFF="color=gray40;style=filled;fillcolor=gray90"
+-STYLE_NODE_ON="shape=box,style=bold,color=green4"
++STYLE_NODE_ON="shape=box,style=bold,color=green4,fillcolor=white"
+ STYLE_NODE_OFF="shape=box,style=filled,color=gray30,fillcolor=gray95"
+ 
+ # Print usage and exit
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+index 82bfb266741cfa..fb08c565d6aada 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+@@ -492,8 +492,8 @@ static void test_sockmap_skb_verdict_shutdown(void)
+ 	if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
+ 		goto out_close;
+ 
+-	n = recv(c1, &b, 1, SOCK_NONBLOCK);
+-	ASSERT_EQ(n, 0, "recv_timeout(fin)");
++	n = recv(c1, &b, 1, MSG_DONTWAIT);
++	ASSERT_EQ(n, 0, "recv(fin)");
+ out_close:
+ 	close(c1);
+ 	close(p1);
+@@ -546,7 +546,7 @@ static void test_sockmap_skb_verdict_fionread(bool pass_prog)
+ 	ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
+ 	/* On DROP test there will be no data to read */
+ 	if (pass_prog) {
+-		recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
++		recvd = recv_timeout(c1, &buf, sizeof(buf), MSG_DONTWAIT, IO_TIMEOUT_SEC);
+ 		ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
+ 	}
+ 
+diff --git a/tools/testing/selftests/cgroup/test_cpuset_v1_hp.sh b/tools/testing/selftests/cgroup/test_cpuset_v1_hp.sh
+index 3f45512fb512eb..7406c24be1ac99 100755
+--- a/tools/testing/selftests/cgroup/test_cpuset_v1_hp.sh
++++ b/tools/testing/selftests/cgroup/test_cpuset_v1_hp.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # Test the special cpuset v1 hotplug case where a cpuset become empty of
+diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+index edc56e2cc60690..7bc148889ca729 100755
+--- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh
++++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+@@ -11,8 +11,8 @@ ALL_TESTS="
+ 
+ lib_dir=$(dirname "$0")
+ source ${lib_dir}/bond_topo_3d1c.sh
+-c_maddr="33:33:00:00:00:10"
+-g_maddr="33:33:00:00:02:54"
++c_maddr="33:33:ff:00:00:10"
++g_maddr="33:33:ff:00:02:54"
+ 
+ skip_prio()
+ {
+diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test.c b/tools/testing/selftests/filesystems/statmount/statmount_test.c
+index c773334bbcc95b..550e5d762c23f4 100644
+--- a/tools/testing/selftests/filesystems/statmount/statmount_test.c
++++ b/tools/testing/selftests/filesystems/statmount/statmount_test.c
+@@ -383,6 +383,10 @@ static void test_statmount_mnt_point(void)
+ 		return;
+ 	}
+ 
++	if (!(sm->mask & STATMOUNT_MNT_POINT)) {
++		ksft_test_result_fail("missing STATMOUNT_MNT_POINT in mask\n");
++		return;
++	}
+ 	if (strcmp(sm->str + sm->mnt_point, "/") != 0) {
+ 		ksft_test_result_fail("unexpected mount point: '%s' != '/'\n",
+ 				      sm->str + sm->mnt_point);
+@@ -408,6 +412,10 @@ static void test_statmount_mnt_root(void)
+ 				      strerror(errno));
+ 		return;
+ 	}
++	if (!(sm->mask & STATMOUNT_MNT_ROOT)) {
++		ksft_test_result_fail("missing STATMOUNT_MNT_ROOT in mask\n");
++		return;
++	}
+ 	mnt_root = sm->str + sm->mnt_root;
+ 	last_root = strrchr(mnt_root, '/');
+ 	if (last_root)
+@@ -437,6 +445,10 @@ static void test_statmount_fs_type(void)
+ 				      strerror(errno));
+ 		return;
+ 	}
++	if (!(sm->mask & STATMOUNT_FS_TYPE)) {
++		ksft_test_result_fail("missing STATMOUNT_FS_TYPE in mask\n");
++		return;
++	}
+ 	fs_type = sm->str + sm->fs_type;
+ 	for (s = known_fs; s != NULL; s++) {
+ 		if (strcmp(fs_type, *s) == 0)
+@@ -464,6 +476,11 @@ static void test_statmount_mnt_opts(void)
+ 		return;
+ 	}
+ 
++	if (!(sm->mask & STATMOUNT_MNT_BASIC)) {
++		ksft_test_result_fail("missing STATMOUNT_MNT_BASIC in mask\n");
++		return;
++	}
++
+ 	while (getline(&line, &len, f_mountinfo) != -1) {
+ 		int i;
+ 		char *p, *p2;
+@@ -514,7 +531,10 @@ static void test_statmount_mnt_opts(void)
+ 		if (p2)
+ 			*p2 = '\0';
+ 
+-		statmount_opts = sm->str + sm->mnt_opts;
++		if (sm->mask & STATMOUNT_MNT_OPTS)
++			statmount_opts = sm->str + sm->mnt_opts;
++		else
++			statmount_opts = "";
+ 		if (strcmp(statmount_opts, p) != 0)
+ 			ksft_test_result_fail(
+ 				"unexpected mount options: '%s' != '%s'\n",
+diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+index c9a2da0575a0fa..6dcf7e6104afb0 100644
+--- a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
++++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+@@ -43,7 +43,7 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
+ 	if (!p)
+ 		return;
+ 
+-	if (p->nr_cpus_allowed == nr_cpus)
++	if (p->nr_cpus_allowed == nr_cpus && !is_migration_disabled(p))
+ 		target = bpf_get_prandom_u32() % nr_cpus;
+ 	else
+ 		target = scx_bpf_task_cpu(p);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-03-20 22:39 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-03-20 22:39 UTC (permalink / raw
  To: gentoo-commits

commit:     18ea66dfadb2f6fded8b475ebf3396a1e7cb622d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 20 22:39:25 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 20 22:39:25 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=18ea66df

wifi: mt76: mt7921: fix kernel panic due to null pointer dereference

Bug: https://bugs.gentoo.org/950243

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                    | 34 +++---------
 2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch | 74 ++++++++++++++++++++++++++
 2 files changed, 81 insertions(+), 27 deletions(-)

diff --git a/0000_README b/0000_README
index a2f75d4a..c53357bf 100644
--- a/0000_README
+++ b/0000_README
@@ -95,30 +95,6 @@ Patch:  1012_linux-6.12.13.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.13
 
-Patch:  1013_linux-6.12.14.patch
-From:   https://www.kernel.org
-Desc:   Linux 6.12.14
-
-Patch:  1014_linux-6.12.15.patch
-From:   https://www.kernel.org
-Desc:   Linux 6.12.15
-
-Patch:  1015_linux-6.12.16.patch
-From:   https://www.kernel.org
-Desc:   Linux 6.12.16
-
-Patch:  1016_linux-6.12.17.patch
-From:   https://www.kernel.org
-Desc:   Linux 6.12.17
-
-Patch:  1017_linux-6.12.18.patch
-From:   https://www.kernel.org
-Desc:   Linux 6.12.18
-
-Patch:  1018_linux-6.12.19.patch
-From:   https://www.kernel.org
-Desc:   Linux 6.12.19
-
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking
@@ -139,6 +115,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch
+From:   https://github.com/nbd168/wireless/commit/adc3fd2a2277b7cc0b61692463771bf9bd298036
+Desc:   wifi: mt76: mt7921: fix kernel panic due to null pointer dereference
+
 Patch:  2901_tools-lib-subcmd-compile-fix.patch
 From:   https://lore.kernel.org/all/20240731085217.94928-1-michael.weiss@aisec.fraunhofer.de/
 Desc:   tools lib subcmd: Fixed uninitialized use of variable in parse-options
@@ -151,9 +131,9 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:  2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch
-From:   https://github.com/hhoffstaette/kernel-patches/
-Desc:   gcc 15 kbuild fixes
+Patch:  2980_GCC15-gnu23-to-gnu11-fix.patch
+From:   https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
+Desc:   GCC 15 defaults to -std=gnu23. Hack in CSTD_FLAG to pass -std=gnu11 everywhere.
 
 Patch:  2990_libbpf-v2-workaround-Wmaybe-uninitialized-false-pos.patch
 From:   https://lore.kernel.org/bpf/

diff --git a/2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch b/2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch
new file mode 100644
index 00000000..1cc1dbf3
--- /dev/null
+++ b/2400_wifi-mt76-mt7921-null-ptr-deref-fix.patch
@@ -0,0 +1,74 @@
+From adc3fd2a2277b7cc0b61692463771bf9bd298036 Mon Sep 17 00:00:00 2001
+From: Ming Yen Hsieh <mingyen.hsieh@mediatek.com>
+Date: Tue, 18 Feb 2025 11:33:42 +0800
+Subject: [PATCH] wifi: mt76: mt7921: fix kernel panic due to null pointer
+ dereference
+
+Address a kernel panic caused by a null pointer dereference in the
+`mt792x_rx_get_wcid` function. The issue arises because the `deflink` structure
+is not properly initialized with the `sta` context. This patch ensures that the
+`deflink` structure is correctly linked to the `sta` context, preventing the
+null pointer dereference.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000400
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+ CPU: 0 UID: 0 PID: 470 Comm: mt76-usb-rx phy Not tainted 6.12.13-gentoo-dist #1
+ Hardware name:  /AMD HUDSON-M1, BIOS 4.6.4 11/15/2011
+ RIP: 0010:mt792x_rx_get_wcid+0x48/0x140 [mt792x_lib]
+ RSP: 0018:ffffa147c055fd98 EFLAGS: 00010202
+ RAX: 0000000000000000 RBX: ffff8e9ecb652000 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff8e9ecb652000
+ RBP: 0000000000000685 R08: ffff8e9ec6570000 R09: 0000000000000000
+ R10: ffff8e9ecd2ca000 R11: ffff8e9f22a217c0 R12: 0000000038010119
+ R13: 0000000080843801 R14: ffff8e9ec6570000 R15: ffff8e9ecb652000
+ FS:  0000000000000000(0000) GS:ffff8e9f22a00000(0000) knlGS:0000000000000000
+ CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000400 CR3: 000000000d2ea000 CR4: 00000000000006f0
+ Call Trace:
+  <TASK>
+  ? __die_body.cold+0x19/0x27
+  ? page_fault_oops+0x15a/0x2f0
+  ? search_module_extables+0x19/0x60
+  ? search_bpf_extables+0x5f/0x80
+  ? exc_page_fault+0x7e/0x180
+  ? asm_exc_page_fault+0x26/0x30
+  ? mt792x_rx_get_wcid+0x48/0x140 [mt792x_lib]
+  mt7921_queue_rx_skb+0x1c6/0xaa0 [mt7921_common]
+  mt76u_alloc_queues+0x784/0x810 [mt76_usb]
+  ? __pfx___mt76_worker_fn+0x10/0x10 [mt76]
+  __mt76_worker_fn+0x4f/0x80 [mt76]
+  kthread+0xd2/0x100
+  ? __pfx_kthread+0x10/0x10
+  ret_from_fork+0x34/0x50
+  ? __pfx_kthread+0x10/0x10
+  ret_from_fork_asm+0x1a/0x30
+  </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Reported-by: Nick Morrow <usbwifi2024@gmail.com>
+Closes: https://github.com/morrownr/USB-WiFi/issues/577
+Cc: stable@vger.kernel.org
+Fixes: 90c10286b176 ("wifi: mt76: mt7925: Update mt792x_rx_get_wcid for per-link STA")
+Signed-off-by: Ming Yen Hsieh <mingyen.hsieh@mediatek.com>
+Tested-by: Salah Coronya <salah.coronya@gmail.com>
+Link: https://patch.msgid.link/20250218033343.1999648-1-mingyen.hsieh@mediatek.com
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/net/wireless/mediatek/mt76/mt7921/main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 13e58c328aff..78b77a54d195 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -811,6 +811,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ 	msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
+ 	msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ 	msta->deflink.last_txs = jiffies;
++	msta->deflink.sta = msta;
+ 
+ 	ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ 	if (ret)


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-03-13 12:54 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-03-13 12:54 UTC (permalink / raw
  To: gentoo-commits

commit:     eaf25fbd983469e888a0f8a6a6c0e1f7cf5f60c2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 13 12:54:46 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 13 12:54:46 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eaf25fbd

Linux patch 6.12.19

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1018_linux-6.12.19.patch | 18520 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 18524 insertions(+)

diff --git a/0000_README b/0000_README
index 85e743e9..a2f75d4a 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch:  1017_linux-6.12.18.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.18
 
+Patch:  1018_linux-6.12.19.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.19
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1018_linux-6.12.19.patch b/1018_linux-6.12.19.patch
new file mode 100644
index 00000000..8440f974
--- /dev/null
+++ b/1018_linux-6.12.19.patch
@@ -0,0 +1,18520 @@
+diff --git a/.clippy.toml b/.clippy.toml
+new file mode 100644
+index 00000000000000..e4c4eef10b28c1
+--- /dev/null
++++ b/.clippy.toml
+@@ -0,0 +1,9 @@
++# SPDX-License-Identifier: GPL-2.0
++
++check-private-items = true
++
++disallowed-macros = [
++    # The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate
++    # it here, see: https://github.com/rust-lang/rust-clippy/issues/11303.
++    { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" },
++]
+diff --git a/.gitignore b/.gitignore
+index 56972adb5031af..a61e4778d011cf 100644
+--- a/.gitignore
++++ b/.gitignore
+@@ -103,6 +103,7 @@ modules.order
+ # We don't want to ignore the following even if they are dot-files
+ #
+ !.clang-format
++!.clippy.toml
+ !.cocciconfig
+ !.editorconfig
+ !.get_maintainer.ignore
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index f8bc1630eba056..fa21cdd610b21a 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -212,6 +212,17 @@ pid>/``).
+ This value defaults to 0.
+ 
+ 
++core_sort_vma
++=============
++
++The default coredump writes VMAs in address order. By setting
++``core_sort_vma`` to 1, VMAs will be written from smallest size
++to largest size. This is known to break at least elfutils, but
++can be handy when dealing with very large (and truncated)
++coredumps where the more useful debugging details are included
++in the smaller VMAs.
++
++
+ core_uses_pid
+ =============
+ 
+diff --git a/Documentation/rust/coding-guidelines.rst b/Documentation/rust/coding-guidelines.rst
+index 329b070a1d4736..a2e326b42410f8 100644
+--- a/Documentation/rust/coding-guidelines.rst
++++ b/Documentation/rust/coding-guidelines.rst
+@@ -227,3 +227,149 @@ The equivalent in Rust may look like (ignoring documentation):
+ That is, the equivalent of ``GPIO_LINE_DIRECTION_IN`` would be referred to as
+ ``gpio::LineDirection::In``. In particular, it should not be named
+ ``gpio::gpio_line_direction::GPIO_LINE_DIRECTION_IN``.
++
++
++Lints
++-----
++
++In Rust, it is possible to ``allow`` particular warnings (diagnostics, lints)
++locally, making the compiler ignore instances of a given warning within a given
++function, module, block, etc.
++
++It is similar to ``#pragma GCC diagnostic push`` + ``ignored`` + ``pop`` in C
++[#]_:
++
++.. code-block:: c
++
++	#pragma GCC diagnostic push
++	#pragma GCC diagnostic ignored "-Wunused-function"
++	static void f(void) {}
++	#pragma GCC diagnostic pop
++
++.. [#] In this particular case, the kernel's ``__{always,maybe}_unused``
++       attributes (C23's ``[[maybe_unused]]``) may be used; however, the example
++       is meant to reflect the equivalent lint in Rust discussed afterwards.
++
++But way less verbose:
++
++.. code-block:: rust
++
++	#[allow(dead_code)]
++	fn f() {}
++
++By that virtue, it makes it possible to comfortably enable more diagnostics by
++default (i.e. outside ``W=`` levels). In particular, those that may have some
++false positives but that are otherwise quite useful to keep enabled to catch
++potential mistakes.
++
++On top of that, Rust provides the ``expect`` attribute which takes this further.
++It makes the compiler warn if the warning was not produced. For instance, the
++following will ensure that, when ``f()`` is called somewhere, we will have to
++remove the attribute:
++
++.. code-block:: rust
++
++	#[expect(dead_code)]
++	fn f() {}
++
++If we do not, we get a warning from the compiler::
++
++	warning: this lint expectation is unfulfilled
++	 --> x.rs:3:10
++	  |
++	3 | #[expect(dead_code)]
++	  |          ^^^^^^^^^
++	  |
++	  = note: `#[warn(unfulfilled_lint_expectations)]` on by default
++
++This means that ``expect``\ s do not get forgotten when they are not needed, which
++may happen in several situations, e.g.:
++
++- Temporary attributes added while developing.
++
++- Improvements in lints in the compiler, Clippy or custom tools which may
++  remove a false positive.
++
++- When the lint is not needed anymore because it was expected that it would be
++  removed at some point, such as the ``dead_code`` example above.
++
++It also increases the visibility of the remaining ``allow``\ s and reduces the
++chance of misapplying one.
++
++Thus prefer ``expect`` over ``allow`` unless:
++
++- Conditional compilation triggers the warning in some cases but not others.
++
++  If there are only a few cases where the warning triggers (or does not
++  trigger) compared to the total number of cases, then one may consider using
++  a conditional ``expect`` (i.e. ``cfg_attr(..., expect(...))``). Otherwise,
++  it is likely simpler to just use ``allow``.
++
++- Inside macros, when the different invocations may create expanded code that
++  triggers the warning in some cases but not in others.
++
++- When code may trigger a warning for some architectures but not others, such
++  as an ``as`` cast to a C FFI type.
++
++As a more developed example, consider for instance this program:
++
++.. code-block:: rust
++
++	fn g() {}
++
++	fn main() {
++	    #[cfg(CONFIG_X)]
++	    g();
++	}
++
++Here, function ``g()`` is dead code if ``CONFIG_X`` is not set. Can we use
++``expect`` here?
++
++.. code-block:: rust
++
++	#[expect(dead_code)]
++	fn g() {}
++
++	fn main() {
++	    #[cfg(CONFIG_X)]
++	    g();
++	}
++
++This would emit a lint if ``CONFIG_X`` is set, since it is not dead code in that
++configuration. Therefore, in cases like this, we cannot use ``expect`` as-is.
++
++A simple possibility is using ``allow``:
++
++.. code-block:: rust
++
++	#[allow(dead_code)]
++	fn g() {}
++
++	fn main() {
++	    #[cfg(CONFIG_X)]
++	    g();
++	}
++
++An alternative would be using a conditional ``expect``:
++
++.. code-block:: rust
++
++	#[cfg_attr(not(CONFIG_X), expect(dead_code))]
++	fn g() {}
++
++	fn main() {
++	    #[cfg(CONFIG_X)]
++	    g();
++	}
++
++This would ensure that, if someone introduces another call to ``g()`` somewhere
++(e.g. unconditionally), then it would be spotted that it is not dead code
++anymore. However, the ``cfg_attr`` is more complex than a simple ``allow``.
++
++Therefore, it is likely that it is not worth using conditional ``expect``\ s when
++more than one or two configurations are involved or when the lint may be
++triggered due to non-local changes (such as ``dead_code``).
++
++For more information about diagnostics in Rust, please see:
++
++	https://doc.rust-lang.org/stable/reference/attributes/diagnostics.html
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 6bb4ec0c162a53..de04c7ba8571bd 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -20175,6 +20175,7 @@ B:	https://github.com/Rust-for-Linux/linux/issues
+ C:	zulip://rust-for-linux.zulipchat.com
+ P:	https://rust-for-linux.com/contributing
+ T:	git https://github.com/Rust-for-Linux/linux.git rust-next
++F:	.clippy.toml
+ F:	Documentation/rust/
+ F:	rust/
+ F:	samples/rust/
+@@ -20182,6 +20183,13 @@ F:	scripts/*rust*
+ F:	tools/testing/selftests/rust/
+ K:	\b(?i:rust)\b
+ 
++RUST [ALLOC]
++M:	Danilo Krummrich <dakr@kernel.org>
++L:	rust-for-linux@vger.kernel.org
++S:	Maintained
++F:	rust/kernel/alloc.rs
++F:	rust/kernel/alloc/
++
+ RXRPC SOCKETS (AF_RXRPC)
+ M:	David Howells <dhowells@redhat.com>
+ M:	Marc Dionne <marc.dionne@auristor.com>
+diff --git a/Makefile b/Makefile
+index 17dfe0a8ca8fa9..343c9f25433c7c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -446,19 +446,23 @@ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
+ export rust_common_flags := --edition=2021 \
+ 			    -Zbinary_dep_depinfo=y \
+ 			    -Astable_features \
+-			    -Dunsafe_op_in_unsafe_fn \
+ 			    -Dnon_ascii_idents \
++			    -Dunsafe_op_in_unsafe_fn \
++			    -Wmissing_docs \
+ 			    -Wrust_2018_idioms \
+ 			    -Wunreachable_pub \
+-			    -Wmissing_docs \
+-			    -Wrustdoc::missing_crate_level_docs \
+ 			    -Wclippy::all \
++			    -Wclippy::ignored_unit_patterns \
+ 			    -Wclippy::mut_mut \
+ 			    -Wclippy::needless_bitwise_bool \
+ 			    -Wclippy::needless_continue \
+ 			    -Aclippy::needless_lifetimes \
+ 			    -Wclippy::no_mangle_with_rust_abi \
+-			    -Wclippy::dbg_macro
++			    -Wclippy::undocumented_unsafe_blocks \
++			    -Wclippy::unnecessary_safety_comment \
++			    -Wclippy::unnecessary_safety_doc \
++			    -Wrustdoc::missing_crate_level_docs \
++			    -Wrustdoc::unescaped_backticks
+ 
+ KBUILD_HOSTCFLAGS   := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) \
+ 		       $(HOSTCFLAGS) -I $(srctree)/scripts/include
+@@ -583,6 +587,9 @@ endif
+ # Allows the usage of unstable features in stable compilers.
+ export RUSTC_BOOTSTRAP := 1
+ 
++# Allows finding `.clippy.toml` in out-of-srctree builds.
++export CLIPPY_CONF_DIR := $(srctree)
++
+ export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC HOSTPKG_CONFIG
+ export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN
+ export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
+@@ -1060,6 +1067,11 @@ endif
+ KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
+ KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
+ 
++# userspace programs are linked via the compiler, use the correct linker
++ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy)
++KBUILD_USERLDFLAGS += --ld-path=$(LD)
++endif
++
+ # make the checker run with the right architecture
+ CHECKFLAGS += --arch=$(ARCH)
+ 
+diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
+index 293f880865e8d0..f0304273eb3519 100644
+--- a/arch/arm64/include/asm/hugetlb.h
++++ b/arch/arm64/include/asm/hugetlb.h
+@@ -34,8 +34,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ 				      unsigned long addr, pte_t *ptep,
+ 				      pte_t pte, int dirty);
+ #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+-extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-				     unsigned long addr, pte_t *ptep);
++extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
++				     pte_t *ptep, unsigned long sz);
+ #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+ extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ 				    unsigned long addr, pte_t *ptep);
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 0a6956bbfb3269..fe167ce297a161 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
+ 
+ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
+ {
+-	int contig_ptes = 0;
++	int contig_ptes = 1;
+ 
+ 	*pgsize = size;
+ 
+ 	switch (size) {
+-#ifndef __PAGETABLE_PMD_FOLDED
+-	case PUD_SIZE:
+-		if (pud_sect_supported())
+-			contig_ptes = 1;
+-		break;
+-#endif
+-	case PMD_SIZE:
+-		contig_ptes = 1;
+-		break;
+ 	case CONT_PMD_SIZE:
+ 		*pgsize = PMD_SIZE;
+ 		contig_ptes = CONT_PMDS;
+@@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
+ 		*pgsize = PAGE_SIZE;
+ 		contig_ptes = CONT_PTES;
+ 		break;
++	default:
++		WARN_ON(!__hugetlb_valid_size(size));
+ 	}
+ 
+ 	return contig_ptes;
+@@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
+ 			     unsigned long pgsize,
+ 			     unsigned long ncontig)
+ {
+-	pte_t orig_pte = __ptep_get(ptep);
+-	unsigned long i;
+-
+-	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
+-		pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
+-
+-		/*
+-		 * If HW_AFDBM is enabled, then the HW could turn on
+-		 * the dirty or accessed bit for any page in the set,
+-		 * so check them all.
+-		 */
+-		if (pte_dirty(pte))
+-			orig_pte = pte_mkdirty(orig_pte);
+-
+-		if (pte_young(pte))
+-			orig_pte = pte_mkyoung(orig_pte);
++	pte_t pte, tmp_pte;
++	bool present;
++
++	pte = __ptep_get_and_clear(mm, addr, ptep);
++	present = pte_present(pte);
++	while (--ncontig) {
++		ptep++;
++		addr += pgsize;
++		tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
++		if (present) {
++			if (pte_dirty(tmp_pte))
++				pte = pte_mkdirty(pte);
++			if (pte_young(tmp_pte))
++				pte = pte_mkyoung(pte);
++		}
+ 	}
+-	return orig_pte;
++	return pte;
+ }
+ 
+ static pte_t get_clear_contig_flush(struct mm_struct *mm,
+@@ -385,18 +377,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ 		__pte_clear(mm, addr, ptep);
+ }
+ 
+-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-			      unsigned long addr, pte_t *ptep)
++pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
++			      pte_t *ptep, unsigned long sz)
+ {
+ 	int ncontig;
+ 	size_t pgsize;
+-	pte_t orig_pte = __ptep_get(ptep);
+-
+-	if (!pte_cont(orig_pte))
+-		return __ptep_get_and_clear(mm, addr, ptep);
+-
+-	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+ 
++	ncontig = num_contig_ptes(sz, &pgsize);
+ 	return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
+ }
+ 
+@@ -538,6 +525,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
+ 
+ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+ {
++	unsigned long psize = huge_page_size(hstate_vma(vma));
++
+ 	if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
+ 		/*
+ 		 * Break-before-make (BBM) is required for all user space mappings
+@@ -547,7 +536,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
+ 		if (pte_user_exec(__ptep_get(ptep)))
+ 			return huge_ptep_clear_flush(vma, addr, ptep);
+ 	}
+-	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
++	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
+ }
+ 
+ void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h
+index 08388876ade4ce..561ac1bf79e26c 100644
+--- a/arch/loongarch/include/asm/bug.h
++++ b/arch/loongarch/include/asm/bug.h
+@@ -4,6 +4,7 @@
+ 
+ #include <asm/break.h>
+ #include <linux/stringify.h>
++#include <linux/objtool.h>
+ 
+ #ifndef CONFIG_DEBUG_BUGVERBOSE
+ #define _BUGVERBOSE_LOCATION(file, line)
+@@ -33,25 +34,25 @@
+ 
+ #define ASM_BUG_FLAGS(flags)					\
+ 	__BUG_ENTRY(flags)					\
+-	break		BRK_BUG
++	break		BRK_BUG;
+ 
+ #define ASM_BUG()	ASM_BUG_FLAGS(0)
+ 
+-#define __BUG_FLAGS(flags)					\
+-	asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags)));
++#define __BUG_FLAGS(flags, extra)					\
++	asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags))		\
++			     extra);
+ 
+ #define __WARN_FLAGS(flags)					\
+ do {								\
+ 	instrumentation_begin();				\
+-	__BUG_FLAGS(BUGFLAG_WARNING|(flags));			\
+-	annotate_reachable();					\
++	__BUG_FLAGS(BUGFLAG_WARNING|(flags), ASM_REACHABLE);	\
+ 	instrumentation_end();					\
+ } while (0)
+ 
+ #define BUG()							\
+ do {								\
+ 	instrumentation_begin();				\
+-	__BUG_FLAGS(0);						\
++	__BUG_FLAGS(0, "");					\
+ 	unreachable();						\
+ } while (0)
+ 
+diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
+index 376c0708e2979b..6302e60fbaee1a 100644
+--- a/arch/loongarch/include/asm/hugetlb.h
++++ b/arch/loongarch/include/asm/hugetlb.h
+@@ -41,7 +41,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ 
+ #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-					    unsigned long addr, pte_t *ptep)
++					    unsigned long addr, pte_t *ptep,
++					    unsigned long sz)
+ {
+ 	pte_t clear;
+ 	pte_t pte = ptep_get(ptep);
+@@ -56,8 +57,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ 					  unsigned long addr, pte_t *ptep)
+ {
+ 	pte_t pte;
++	unsigned long sz = huge_page_size(hstate_vma(vma));
+ 
+-	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
++	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
+ 	flush_tlb_page(vma, addr);
+ 	return pte;
+ }
+diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
+index 8ae641dc53bb77..f9381800e291cc 100644
+--- a/arch/loongarch/kernel/machine_kexec.c
++++ b/arch/loongarch/kernel/machine_kexec.c
+@@ -126,14 +126,14 @@ void kexec_reboot(void)
+ 	/* All secondary cpus go to kexec_smp_wait */
+ 	if (smp_processor_id() > 0) {
+ 		relocated_kexec_smp_wait(NULL);
+-		unreachable();
++		BUG();
+ 	}
+ #endif
+ 
+ 	do_kexec = (void *)reboot_code_buffer;
+ 	do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
+ 
+-	unreachable();
++	BUG();
+ }
+ 
+ 
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index 56934fe58170e0..1fa6a604734ef2 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -387,6 +387,9 @@ static void __init check_kernel_sections_mem(void)
+  */
+ static void __init arch_mem_init(char **cmdline_p)
+ {
++	/* Recalculate max_low_pfn for "mem=xxx" */
++	max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
++
+ 	if (usermem)
+ 		pr_info("User-defined physical RAM map overwrite\n");
+ 
+diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
+index 5d59e9ce2772d8..d96065dbe779be 100644
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -19,6 +19,7 @@
+ #include <linux/smp.h>
+ #include <linux/threads.h>
+ #include <linux/export.h>
++#include <linux/suspend.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/time.h>
+ #include <linux/tracepoint.h>
+@@ -423,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu)
+ 	mb();
+ }
+ 
+-void __noreturn arch_cpu_idle_dead(void)
++static void __noreturn idle_play_dead(void)
+ {
+ 	register uint64_t addr;
+ 	register void (*init_fn)(void);
+@@ -447,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void)
+ 	BUG();
+ }
+ 
++#ifdef CONFIG_HIBERNATION
++static void __noreturn poll_play_dead(void)
++{
++	register uint64_t addr;
++	register void (*init_fn)(void);
++
++	idle_task_exit();
++	__this_cpu_write(cpu_state, CPU_DEAD);
++
++	__smp_mb();
++	do {
++		__asm__ __volatile__("nop\n\t");
++		addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
++	} while (addr == 0);
++
++	init_fn = (void *)TO_CACHE(addr);
++	iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
++
++	init_fn();
++	BUG();
++}
++#endif
++
++static void (*play_dead)(void) = idle_play_dead;
++
++void __noreturn arch_cpu_idle_dead(void)
++{
++	play_dead();
++	BUG(); /* play_dead() doesn't return */
++}
++
++#ifdef CONFIG_HIBERNATION
++int hibernate_resume_nonboot_cpu_disable(void)
++{
++	int ret;
++
++	play_dead = poll_play_dead;
++	ret = suspend_disable_secondary_cpus();
++	play_dead = idle_play_dead;
++
++	return ret;
++}
++#endif
++
+ #endif
+ 
+ /*
+diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
+index 90894f70ff4a50..add52e927f1530 100644
+--- a/arch/loongarch/kvm/exit.c
++++ b/arch/loongarch/kvm/exit.c
+@@ -624,6 +624,12 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
+ 	struct kvm_run *run = vcpu->run;
+ 	unsigned long badv = vcpu->arch.badv;
+ 
++	/* Inject ADE exception if exceed max GPA size */
++	if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
++		kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
++		return RESUME_GUEST;
++	}
++
+ 	ret = kvm_handle_mm_fault(vcpu, badv, write);
+ 	if (ret) {
+ 		/* Treat as MMIO */
+diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
+index 7e8f5d6829ef0c..34fad2c29ee695 100644
+--- a/arch/loongarch/kvm/main.c
++++ b/arch/loongarch/kvm/main.c
+@@ -297,6 +297,13 @@ int kvm_arch_enable_virtualization_cpu(void)
+ 	kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
+ 		  read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
+ 
++	/*
++	 * HW Guest CSR registers are lost after CPU suspend and resume.
++	 * Clear last_vcpu so that Guest CSR registers forced to reload
++	 * from vCPU SW state.
++	 */
++	this_cpu_ptr(vmcs)->last_vcpu = NULL;
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
+index 9d53eca66fcc70..e7a084de64f7bf 100644
+--- a/arch/loongarch/kvm/vcpu.c
++++ b/arch/loongarch/kvm/vcpu.c
+@@ -311,7 +311,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ {
+ 	int ret = RESUME_GUEST;
+ 	unsigned long estat = vcpu->arch.host_estat;
+-	u32 intr = estat & 0x1fff; /* Ignore NMI */
++	u32 intr = estat & CSR_ESTAT_IS;
+ 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+ 
+ 	vcpu->mode = OUTSIDE_GUEST_MODE;
+diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c
+index 4ba734aaef87a7..fe9e973912d440 100644
+--- a/arch/loongarch/kvm/vm.c
++++ b/arch/loongarch/kvm/vm.c
+@@ -46,7 +46,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 	if (kvm_pvtime_supported())
+ 		kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+ 
+-	kvm->arch.gpa_size = BIT(cpu_vabits - 1);
++	/*
++	 * cpu_vabits means user address space only (a half of total).
++	 * GPA size of VM is the same with the size of user address space.
++	 */
++	kvm->arch.gpa_size = BIT(cpu_vabits);
+ 	kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
+ 	kvm->arch.invalid_ptes[0] = 0;
+ 	kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
+diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
+index fd69c88085542e..00ee3c0366305c 100644
+--- a/arch/mips/include/asm/hugetlb.h
++++ b/arch/mips/include/asm/hugetlb.h
+@@ -32,7 +32,8 @@ static inline int prepare_hugepage_range(struct file *file,
+ 
+ #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-					    unsigned long addr, pte_t *ptep)
++					    unsigned long addr, pte_t *ptep,
++					    unsigned long sz)
+ {
+ 	pte_t clear;
+ 	pte_t pte = *ptep;
+@@ -47,13 +48,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ 					  unsigned long addr, pte_t *ptep)
+ {
+ 	pte_t pte;
++	unsigned long sz = huge_page_size(hstate_vma(vma));
+ 
+ 	/*
+ 	 * clear the huge pte entry firstly, so that the other smp threads will
+ 	 * not get old pte entry after finishing flush_tlb_page and before
+ 	 * setting new huge pte entry
+ 	 */
+-	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
++	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
+ 	flush_tlb_page(vma, addr);
+ 	return pte;
+ }
+diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
+index 72daacc472a0a3..f7a91411dcc955 100644
+--- a/arch/parisc/include/asm/hugetlb.h
++++ b/arch/parisc/include/asm/hugetlb.h
+@@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 
+ #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+-			      pte_t *ptep);
++			      pte_t *ptep, unsigned long sz);
+ 
+ /*
+  * If the arch doesn't supply something else, assume that hugepage
+diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
+index aa664f7ddb6398..cec2b9a581dd3e 100644
+--- a/arch/parisc/mm/hugetlbpage.c
++++ b/arch/parisc/mm/hugetlbpage.c
+@@ -147,7 +147,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 
+ 
+ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+-			      pte_t *ptep)
++			      pte_t *ptep, unsigned long sz)
+ {
+ 	pte_t entry;
+ 
+diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
+index dad2e7980f245b..86326587e58de8 100644
+--- a/arch/powerpc/include/asm/hugetlb.h
++++ b/arch/powerpc/include/asm/hugetlb.h
+@@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ 
+ #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-					    unsigned long addr, pte_t *ptep)
++					    unsigned long addr, pte_t *ptep,
++					    unsigned long sz)
+ {
+ 	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
+ }
+@@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ 					  unsigned long addr, pte_t *ptep)
+ {
+ 	pte_t pte;
++	unsigned long sz = huge_page_size(hstate_vma(vma));
+ 
+-	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
++	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
+ 	flush_hugetlb_page(vma, addr);
+ 	return pte;
+ }
+diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
+index 6824e8139801c2..3708fa48bee95d 100644
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
+ 	return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
+ }
+ 
+-static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
++static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+ 					 struct kvm_book3e_206_tlb_entry *gtlbe,
+ 					 kvm_pfn_t pfn, unsigned int wimg)
+ {
+@@ -252,7 +252,11 @@ static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+ 	/* Use guest supplied MAS2_G and MAS2_E */
+ 	ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
+ 
+-	return tlbe_is_writable(gtlbe);
++	/* Mark the page accessed */
++	kvm_set_pfn_accessed(pfn);
++
++	if (tlbe_is_writable(gtlbe))
++		kvm_set_pfn_dirty(pfn);
+ }
+ 
+ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
+@@ -322,7 +326,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ {
+ 	struct kvm_memory_slot *slot;
+ 	unsigned long pfn = 0; /* silence GCC warning */
+-	struct page *page = NULL;
+ 	unsigned long hva;
+ 	int pfnmap = 0;
+ 	int tsize = BOOK3E_PAGESZ_4K;
+@@ -334,7 +337,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 	unsigned int wimg = 0;
+ 	pgd_t *pgdir;
+ 	unsigned long flags;
+-	bool writable = false;
+ 
+ 	/* used to check for invalidations in progress */
+ 	mmu_seq = kvm->mmu_invalidate_seq;
+@@ -444,7 +446,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 
+ 	if (likely(!pfnmap)) {
+ 		tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
+-		pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
++		pfn = gfn_to_pfn_memslot(slot, gfn);
+ 		if (is_error_noslot_pfn(pfn)) {
+ 			if (printk_ratelimit())
+ 				pr_err("%s: real page not found for gfn %lx\n",
+@@ -489,7 +491,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 	}
+ 	local_irq_restore(flags);
+ 
+-	writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
++	kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
+ 	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+ 				ref, gvaddr, stlbe);
+ 
+@@ -497,8 +499,11 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 	kvmppc_mmu_flush_icache(pfn);
+ 
+ out:
+-	kvm_release_faultin_page(kvm, page, !!ret, writable);
+ 	spin_unlock(&kvm->mmu_lock);
++
++	/* Drop refcount on page, so that mmu notifiers can clear it */
++	kvm_release_pfn_clean(pfn);
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
+index faf3624d80577c..4461264977684b 100644
+--- a/arch/riscv/include/asm/hugetlb.h
++++ b/arch/riscv/include/asm/hugetlb.h
+@@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
+ 
+ #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-			      unsigned long addr, pte_t *ptep);
++			      unsigned long addr, pte_t *ptep,
++			      unsigned long sz);
+ 
+ #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
+index 42314f0939220a..b4a78a4b35cff5 100644
+--- a/arch/riscv/mm/hugetlbpage.c
++++ b/arch/riscv/mm/hugetlbpage.c
+@@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ 
+ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ 			      unsigned long addr,
+-			      pte_t *ptep)
++			      pte_t *ptep, unsigned long sz)
+ {
+ 	pte_t orig_pte = ptep_get(ptep);
+ 	int pte_num;
+diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
+index cf1b5d6fb1a629..4731a51241ba86 100644
+--- a/arch/s390/include/asm/hugetlb.h
++++ b/arch/s390/include/asm/hugetlb.h
+@@ -20,8 +20,15 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 		     pte_t *ptep, pte_t pte);
+ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-			      unsigned long addr, pte_t *ptep);
++pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
++				pte_t *ptep);
++
++static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
++					    unsigned long addr, pte_t *ptep,
++					    unsigned long sz)
++{
++	return __huge_ptep_get_and_clear(mm, addr, ptep);
++}
+ 
+ /*
+  * If the arch doesn't supply something else, assume that hugepage
+@@ -57,7 +64,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ 					  unsigned long address, pte_t *ptep)
+ {
+-	return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
++	return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
+ }
+ 
+ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+@@ -66,7 +73,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ {
+ 	int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
+ 	if (changed) {
+-		huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
++		__huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ 		__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ 	}
+ 	return changed;
+@@ -75,7 +82,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ 					   unsigned long addr, pte_t *ptep)
+ {
+-	pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
++	pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
+ 	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
+ }
+ 
+diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
+index 160b2acba8db2b..908bae84984351 100644
+--- a/arch/s390/kernel/traps.c
++++ b/arch/s390/kernel/traps.c
+@@ -284,10 +284,10 @@ static void __init test_monitor_call(void)
+ 		return;
+ 	asm volatile(
+ 		"	mc	0,0\n"
+-		"0:	xgr	%0,%0\n"
++		"0:	lhi	%[val],0\n"
+ 		"1:\n"
+-		EX_TABLE(0b,1b)
+-		: "+d" (val));
++		EX_TABLE(0b, 1b)
++		: [val] "+d" (val));
+ 	if (!val)
+ 		panic("Monitor call doesn't work!\n");
+ }
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
+index ded0eff58a192a..9c1ba8c0cac61a 100644
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -174,8 +174,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ 	return __rste_to_pte(pte_val(*ptep));
+ }
+ 
+-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-			      unsigned long addr, pte_t *ptep)
++pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
++				unsigned long addr, pte_t *ptep)
+ {
+ 	pte_t pte = huge_ptep_get(mm, addr, ptep);
+ 	pmd_t *pmdp = (pmd_t *) ptep;
+diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
+index c714ca6a05aa04..e7a9cdd498dca6 100644
+--- a/arch/sparc/include/asm/hugetlb.h
++++ b/arch/sparc/include/asm/hugetlb.h
+@@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 
+ #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+-			      pte_t *ptep);
++			      pte_t *ptep, unsigned long sz);
+ 
+ #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index cc91ca7a1e182c..c276d70a747995 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -368,7 +368,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ }
+ 
+ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+-			      pte_t *ptep)
++			      pte_t *ptep, unsigned long sz)
+ {
+ 	unsigned int i, nptes, orig_shift, shift;
+ 	unsigned long size;
+diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
+index c882e1f67af01c..d8c5de40669d36 100644
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "misc.h"
+ #include <asm/bootparam.h>
++#include <asm/bootparam_utils.h>
+ #include <asm/e820/types.h>
+ #include <asm/processor.h>
+ #include "pgtable.h"
+@@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
+ 	bool l5_required = false;
+ 
+ 	/* Initialize boot_params. Required for cmdline_find_option_bool(). */
++	sanitize_boot_params(bp);
+ 	boot_params_ptr = bp;
+ 
+ 	/*
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 8499b9cb9c8263..e4dd840e0becd4 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -761,6 +761,7 @@ struct kvm_vcpu_arch {
+ 	u32 pkru;
+ 	u32 hflags;
+ 	u64 efer;
++	u64 host_debugctl;
+ 	u64 apic_base;
+ 	struct kvm_lapic *apic;    /* kernel irqchip context */
+ 	bool load_eoi_exitmap_pending;
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 37b8244899d895..04712fd0c96497 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -405,7 +405,6 @@ bool __init early_is_amd_nb(u32 device)
+ 
+ struct resource *amd_get_mmconfig_range(struct resource *res)
+ {
+-	u32 address;
+ 	u64 base, msr;
+ 	unsigned int segn_busn_bits;
+ 
+@@ -413,13 +412,11 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
+ 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+ 		return NULL;
+ 
+-	/* assume all cpus from fam10h have mmconfig */
+-	if (boot_cpu_data.x86 < 0x10)
++	/* Assume CPUs from Fam10h have mmconfig, although not all VMs do */
++	if (boot_cpu_data.x86 < 0x10 ||
++	    rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
+ 		return NULL;
+ 
+-	address = MSR_FAM10H_MMIO_CONF_BASE;
+-	rdmsrl(address, msr);
+-
+ 	/* mmconfig is not enabled */
+ 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+ 		return NULL;
+diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
+index e6fa03ed9172c0..a6c6bccfa8b8d3 100644
+--- a/arch/x86/kernel/cpu/cacheinfo.c
++++ b/arch/x86/kernel/cpu/cacheinfo.c
+@@ -808,7 +808,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
+ 			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
+ 
+ 			/* If bit 31 is set, this is an unknown format */
+-			for (j = 0 ; j < 3 ; j++)
++			for (j = 0 ; j < 4 ; j++)
+ 				if (regs[j] & (1 << 31))
+ 					regs[j] = 0;
+ 
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 4b5f3d0521517a..b93d88ec141759 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -672,26 +672,37 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+ }
+ #endif
+ 
+-#define TLB_INST_4K	0x01
+-#define TLB_INST_4M	0x02
+-#define TLB_INST_2M_4M	0x03
++#define TLB_INST_4K		0x01
++#define TLB_INST_4M		0x02
++#define TLB_INST_2M_4M		0x03
+ 
+-#define TLB_INST_ALL	0x05
+-#define TLB_INST_1G	0x06
++#define TLB_INST_ALL		0x05
++#define TLB_INST_1G		0x06
+ 
+-#define TLB_DATA_4K	0x11
+-#define TLB_DATA_4M	0x12
+-#define TLB_DATA_2M_4M	0x13
+-#define TLB_DATA_4K_4M	0x14
++#define TLB_DATA_4K		0x11
++#define TLB_DATA_4M		0x12
++#define TLB_DATA_2M_4M		0x13
++#define TLB_DATA_4K_4M		0x14
+ 
+-#define TLB_DATA_1G	0x16
++#define TLB_DATA_1G		0x16
++#define TLB_DATA_1G_2M_4M	0x17
+ 
+-#define TLB_DATA0_4K	0x21
+-#define TLB_DATA0_4M	0x22
+-#define TLB_DATA0_2M_4M	0x23
++#define TLB_DATA0_4K		0x21
++#define TLB_DATA0_4M		0x22
++#define TLB_DATA0_2M_4M		0x23
+ 
+-#define STLB_4K		0x41
+-#define STLB_4K_2M	0x42
++#define STLB_4K			0x41
++#define STLB_4K_2M		0x42
++
++/*
++ * All of leaf 0x2's one-byte TLB descriptors implies the same number of
++ * entries for their respective TLB types.  The 0x63 descriptor is an
++ * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
++ * for 2MB or 4MB pages.  Encode descriptor 0x63 dTLB entry count for
++ * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
++ * intel_tlb_table[] mapping.
++ */
++#define TLB_0x63_2M_4M_ENTRIES	32
+ 
+ static const struct _tlb_table intel_tlb_table[] = {
+ 	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
+@@ -713,7 +724,8 @@ static const struct _tlb_table intel_tlb_table[] = {
+ 	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
+ 	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
+ 	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" },
+-	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" },
++	{ 0x63, TLB_DATA_1G_2M_4M,	4,	" TLB_DATA 1 GByte pages, 4-way set associative"
++						" (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" },
+ 	{ 0x6b, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 8-way associative" },
+ 	{ 0x6c, TLB_DATA_2M_4M,		128,	" TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
+ 	{ 0x6d, TLB_DATA_1G,		16,	" TLB_DATA 1 GByte pages, fully associative" },
+@@ -813,6 +825,12 @@ static void intel_tlb_lookup(const unsigned char desc)
+ 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+ 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ 		break;
++	case TLB_DATA_1G_2M_4M:
++		if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
++			tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
++		if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
++			tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
++		fallthrough;
+ 	case TLB_DATA_1G:
+ 		if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
+ 			tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
+@@ -836,7 +854,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c)
+ 		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
+ 
+ 		/* If bit 31 is set, this is an unknown format */
+-		for (j = 0 ; j < 3 ; j++)
++		for (j = 0 ; j < 4 ; j++)
+ 			if (regs[j] & (1 << 31))
+ 				regs[j] = 0;
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index f5365b32582a5c..def6a2854a4b7c 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -175,23 +175,29 @@ static bool need_sha_check(u32 cur_rev)
+ {
+ 	switch (cur_rev >> 8) {
+ 	case 0x80012: return cur_rev <= 0x800126f; break;
++	case 0x80082: return cur_rev <= 0x800820f; break;
+ 	case 0x83010: return cur_rev <= 0x830107c; break;
+ 	case 0x86001: return cur_rev <= 0x860010e; break;
+ 	case 0x86081: return cur_rev <= 0x8608108; break;
+ 	case 0x87010: return cur_rev <= 0x8701034; break;
+ 	case 0x8a000: return cur_rev <= 0x8a0000a; break;
++	case 0xa0010: return cur_rev <= 0xa00107a; break;
+ 	case 0xa0011: return cur_rev <= 0xa0011da; break;
+ 	case 0xa0012: return cur_rev <= 0xa001243; break;
++	case 0xa0082: return cur_rev <= 0xa00820e; break;
+ 	case 0xa1011: return cur_rev <= 0xa101153; break;
+ 	case 0xa1012: return cur_rev <= 0xa10124e; break;
+ 	case 0xa1081: return cur_rev <= 0xa108109; break;
+ 	case 0xa2010: return cur_rev <= 0xa20102f; break;
+ 	case 0xa2012: return cur_rev <= 0xa201212; break;
++	case 0xa4041: return cur_rev <= 0xa404109; break;
++	case 0xa5000: return cur_rev <= 0xa500013; break;
+ 	case 0xa6012: return cur_rev <= 0xa60120a; break;
+ 	case 0xa7041: return cur_rev <= 0xa704109; break;
+ 	case 0xa7052: return cur_rev <= 0xa705208; break;
+ 	case 0xa7080: return cur_rev <= 0xa708009; break;
+ 	case 0xa70c0: return cur_rev <= 0xa70C009; break;
++	case 0xaa001: return cur_rev <= 0xaa00116; break;
+ 	case 0xaa002: return cur_rev <= 0xaa00218; break;
+ 	default: break;
+ 	}
+diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
+index b65ab214bdf57d..776a20172867ea 100644
+--- a/arch/x86/kernel/cpu/sgx/ioctl.c
++++ b/arch/x86/kernel/cpu/sgx/ioctl.c
+@@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
+ 	struct file *backing;
+ 	long ret;
+ 
++	/*
++	 * ECREATE would detect this too, but checking here also ensures
++	 * that the 'encl_size' calculations below can never overflow.
++	 */
++	if (!is_power_of_2(secs->size))
++		return -EINVAL;
++
+ 	va_page = sgx_encl_grow(encl, true);
+ 	if (IS_ERR(va_page))
+ 		return PTR_ERR(va_page);
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 83bfecd1a6e40c..9157b4485dedce 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -1387,7 +1387,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 
+ 		entry->ecx = entry->edx = 0;
+ 		if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
+-			entry->eax = entry->ebx;
++			entry->eax = entry->ebx = 0;
+ 			break;
+ 		}
+ 
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index e9af87b1281407..3ec56bf76ef164 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -4579,6 +4579,8 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm)
+ 
+ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
+ {
++	struct kvm *kvm = svm->vcpu.kvm;
++
+ 	/*
+ 	 * All host state for SEV-ES guests is categorized into three swap types
+ 	 * based on how it is handled by hardware during a world switch:
+@@ -4602,10 +4604,15 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
+ 
+ 	/*
+ 	 * If DebugSwap is enabled, debug registers are loaded but NOT saved by
+-	 * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
+-	 * saves and loads debug registers (Type-A).
++	 * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does
++	 * not save or load debug registers.  Sadly, on CPUs without
++	 * ALLOWED_SEV_FEATURES, KVM can't prevent SNP guests from enabling
++	 * DebugSwap on secondary vCPUs without KVM's knowledge via "AP Create".
++	 * Save all registers if DebugSwap is supported to prevent host state
++	 * from being clobbered by a misbehaving guest.
+ 	 */
+-	if (sev_vcpu_has_debug_swap(svm)) {
++	if (sev_vcpu_has_debug_swap(svm) ||
++	    (sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) {
+ 		hostsa->dr0 = native_get_debugreg(0);
+ 		hostsa->dr1 = native_get_debugreg(1);
+ 		hostsa->dr2 = native_get_debugreg(2);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index a7cb7c82b38e39..e39ab7c0be4e9c 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3167,6 +3167,27 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 			kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
+ 			break;
+ 		}
++
++		/*
++		 * AMD changed the architectural behavior of bits 5:2.  On CPUs
++		 * without BusLockTrap, bits 5:2 control "external pins", but
++		 * on CPUs that support BusLockDetect, bit 2 enables BusLockTrap
++		 * and bits 5:3 are reserved-to-zero.  Sadly, old KVM allowed
++		 * the guest to set bits 5:2 despite not actually virtualizing
++		 * Performance-Monitoring/Breakpoint external pins.  Drop bits
++		 * 5:2 for backwards compatibility.
++		 */
++		data &= ~GENMASK(5, 2);
++
++		/*
++		 * Suppress BTF as KVM doesn't virtualize BTF, but there's no
++		 * way to communicate lack of support to the guest.
++		 */
++		if (data & DEBUGCTLMSR_BTF) {
++			kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
++			data &= ~DEBUGCTLMSR_BTF;
++		}
++
+ 		if (data & DEBUGCTL_RESERVED_BITS)
+ 			return 1;
+ 
+@@ -4176,6 +4197,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
+ 
+ 	guest_state_enter_irqoff();
+ 
++	/*
++	 * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
++	 * VMRUN controls whether or not physical IRQs are masked (KVM always
++	 * runs with V_INTR_MASKING_MASK).  Toggle RFLAGS.IF here to avoid the
++	 * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
++	 * into guest state if delivery of an event during VMRUN triggers a
++	 * #VMEXIT, and the guest_state transitions already tell lockdep that
++	 * IRQs are being enabled/disabled.  Note!  GIF=0 for the entirety of
++	 * this path, so IRQs aren't actually unmasked while running host code.
++	 */
++	raw_local_irq_enable();
++
+ 	amd_clear_divider();
+ 
+ 	if (sev_es_guest(vcpu->kvm))
+@@ -4184,6 +4217,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
+ 	else
+ 		__svm_vcpu_run(svm, spec_ctrl_intercepted);
+ 
++	raw_local_irq_disable();
++
+ 	guest_state_exit_irqoff();
+ }
+ 
+@@ -4240,6 +4275,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+ 	clgi();
+ 	kvm_load_guest_xsave_state(vcpu);
+ 
++	/*
++	 * Hardware only context switches DEBUGCTL if LBR virtualization is
++	 * enabled.  Manually load DEBUGCTL if necessary (and restore it after
++	 * VM-Exit), as running with the host's DEBUGCTL can negatively affect
++	 * guest state and can even be fatal, e.g. due to Bus Lock Detect.
++	 */
++	if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
++	    vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
++		update_debugctlmsr(svm->vmcb->save.dbgctl);
++
+ 	kvm_wait_lapic_expire(vcpu);
+ 
+ 	/*
+@@ -4267,6 +4312,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+ 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
+ 		kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ 
++	if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
++	    vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
++		update_debugctlmsr(vcpu->arch.host_debugctl);
++
+ 	kvm_load_host_xsave_state(vcpu);
+ 	stgi();
+ 
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 43fa6a16eb1917..d114efac7af78d 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -591,7 +591,7 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
+ /* svm.c */
+ #define MSR_INVALID				0xffffffffU
+ 
+-#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
++#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
+ 
+ extern bool dump_invalid_vmcb;
+ 
+diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
+index 2ed80aea3bb130..0c61153b275f64 100644
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -170,12 +170,8 @@ SYM_FUNC_START(__svm_vcpu_run)
+ 	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
+ 
+ 	/* Enter guest mode */
+-	sti
+-
+ 3:	vmrun %_ASM_AX
+ 4:
+-	cli
+-
+ 	/* Pop @svm to RAX while it's the only available register. */
+ 	pop %_ASM_AX
+ 
+@@ -340,12 +336,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
+ 	mov KVM_VMCB_pa(%rax), %rax
+ 
+ 	/* Enter guest mode */
+-	sti
+-
+ 1:	vmrun %rax
+-
+-2:	cli
+-
++2:
+ 	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+ 	FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 1af30e3472cdd9..a3d45b01dbadf3 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1515,16 +1515,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
+  */
+ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+ 	if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
+ 		shrink_ple_window(vcpu);
+ 
+ 	vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
+ 
+ 	vmx_vcpu_pi_load(vcpu, cpu);
+-
+-	vmx->host_debugctlmsr = get_debugctlmsr();
+ }
+ 
+ void vmx_vcpu_put(struct kvm_vcpu *vcpu)
+@@ -7454,8 +7450,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+ 	}
+ 
+ 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
+-	if (vmx->host_debugctlmsr)
+-		update_debugctlmsr(vmx->host_debugctlmsr);
++	if (vcpu->arch.host_debugctl)
++		update_debugctlmsr(vcpu->arch.host_debugctl);
+ 
+ #ifndef CONFIG_X86_64
+ 	/*
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index 41bf59bbc6426c..cf57fbf12104f5 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -339,8 +339,6 @@ struct vcpu_vmx {
+ 	/* apic deadline value in host tsc */
+ 	u64 hv_deadline_tsc;
+ 
+-	unsigned long host_debugctlmsr;
+-
+ 	/*
+ 	 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
+ 	 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b67a2f46e40b05..8794c0a8a2e447 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10964,6 +10964,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		set_debugreg(0, 7);
+ 	}
+ 
++	vcpu->arch.host_debugctl = get_debugctlmsr();
++
+ 	guest_timing_enter_irqoff();
+ 
+ 	for (;;) {
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index eb503f53c3195c..101725c149c429 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -263,28 +263,33 @@ static void __init probe_page_size_mask(void)
+ }
+ 
+ /*
+- * INVLPG may not properly flush Global entries
+- * on these CPUs when PCIDs are enabled.
++ * INVLPG may not properly flush Global entries on
++ * these CPUs.  New microcode fixes the issue.
+  */
+ static const struct x86_cpu_id invlpg_miss_ids[] = {
+-	X86_MATCH_VFM(INTEL_ALDERLAKE,	    0),
+-	X86_MATCH_VFM(INTEL_ALDERLAKE_L,    0),
+-	X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0),
+-	X86_MATCH_VFM(INTEL_RAPTORLAKE,	    0),
+-	X86_MATCH_VFM(INTEL_RAPTORLAKE_P,   0),
+-	X86_MATCH_VFM(INTEL_RAPTORLAKE_S,   0),
++	X86_MATCH_VFM(INTEL_ALDERLAKE,	    0x2e),
++	X86_MATCH_VFM(INTEL_ALDERLAKE_L,    0x42c),
++	X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0x11),
++	X86_MATCH_VFM(INTEL_RAPTORLAKE,	    0x118),
++	X86_MATCH_VFM(INTEL_RAPTORLAKE_P,   0x4117),
++	X86_MATCH_VFM(INTEL_RAPTORLAKE_S,   0x2e),
+ 	{}
+ };
+ 
+ static void setup_pcid(void)
+ {
++	const struct x86_cpu_id *invlpg_miss_match;
++
+ 	if (!IS_ENABLED(CONFIG_X86_64))
+ 		return;
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_PCID))
+ 		return;
+ 
+-	if (x86_match_cpu(invlpg_miss_ids)) {
++	invlpg_miss_match = x86_match_cpu(invlpg_miss_ids);
++
++	if (invlpg_miss_match &&
++	    boot_cpu_data.microcode < invlpg_miss_match->driver_data) {
+ 		pr_info("Incomplete global flushes, disabling PCID");
+ 		setup_clear_cpu_cap(X86_FEATURE_PCID);
+ 		return;
+diff --git a/block/partitions/efi.c b/block/partitions/efi.c
+index 5e9be13a56a82a..7acba66eed481c 100644
+--- a/block/partitions/efi.c
++++ b/block/partitions/efi.c
+@@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
+ 	out[size] = 0;
+ 
+ 	while (i < size) {
+-		u8 c = le16_to_cpu(in[i]) & 0xff;
++		u8 c = le16_to_cpu(in[i]) & 0x7f;
+ 
+ 		if (c && !isprint(c))
+ 			c = '!';
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index d922cefc1e6625..ec0ef6a0de9427 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
+ out:
+ 	sup_handle->flags &= ~FWNODE_FLAG_VISITED;
+ 	put_device(sup_dev);
++	put_device(con_dev);
+ 	put_device(par_dev);
+ 	return ret;
+ }
+diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
+index b0227cf9ddd387..5de7223beb4d5b 100644
+--- a/drivers/block/rnull.rs
++++ b/drivers/block/rnull.rs
+@@ -32,7 +32,7 @@
+ }
+ 
+ struct NullBlkModule {
+-    _disk: Pin<Box<Mutex<GenDisk<NullBlkDevice>>>>,
++    _disk: Pin<KBox<Mutex<GenDisk<NullBlkDevice>>>>,
+ }
+ 
+ impl kernel::Module for NullBlkModule {
+@@ -47,7 +47,7 @@ fn init(_module: &'static ThisModule) -> Result<Self> {
+             .rotational(false)
+             .build(format_args!("rnullb{}", 0), tagset)?;
+ 
+-        let disk = Box::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
++        let disk = KBox::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
+ 
+         Ok(Self { _disk: disk })
+     }
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 458ac54e7b201e..c7d728d686e5a5 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -2665,9 +2665,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
+ 	if (ph.len > sizeof(struct ublk_params))
+ 		ph.len = sizeof(struct ublk_params);
+ 
+-	/* parameters can only be changed when device isn't live */
+ 	mutex_lock(&ub->mutex);
+-	if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
++	if (test_bit(UB_STATE_USED, &ub->state)) {
++		/*
++		 * Parameters can only be changed when device hasn't
++		 * been started yet
++		 */
+ 		ret = -EACCES;
+ 	} else if (copy_from_user(&ub->params, argp, ph.len)) {
+ 		ret = -EFAULT;
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6bc6dd417adf64..3a0b9dc98707f5 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3644,6 +3644,7 @@ static ssize_t force_poll_sync_write(struct file *file,
+ }
+ 
+ static const struct file_operations force_poll_sync_fops = {
++	.owner		= THIS_MODULE,
+ 	.open		= simple_open,
+ 	.read		= force_poll_sync_read,
+ 	.write		= force_poll_sync_write,
+diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
+index 9938bb034c1cbc..acfd673834ed73 100644
+--- a/drivers/bus/mhi/host/pci_generic.c
++++ b/drivers/bus/mhi/host/pci_generic.c
+@@ -1040,8 +1040,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
+ err_unprepare:
+ 	mhi_unprepare_after_power_down(mhi_cntrl);
+ err_try_reset:
+-	if (pci_reset_function(pdev))
+-		dev_err(&pdev->dev, "Recovery failed\n");
++	err = pci_try_reset_function(pdev);
++	if (err)
++		dev_err(&pdev->dev, "Recovery failed: %d\n", err);
+ }
+ 
+ static void health_check(struct timer_list *t)
+diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
+index 07371cb653d356..4af1901c9d524a 100644
+--- a/drivers/cdx/cdx.c
++++ b/drivers/cdx/cdx.c
+@@ -470,8 +470,12 @@ static ssize_t driver_override_show(struct device *dev,
+ 				    struct device_attribute *attr, char *buf)
+ {
+ 	struct cdx_device *cdx_dev = to_cdx_device(dev);
++	ssize_t len;
+ 
+-	return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
++	device_lock(dev);
++	len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
++	device_unlock(dev);
++	return len;
+ }
+ static DEVICE_ATTR_RW(driver_override);
+ 
+diff --git a/drivers/char/misc.c b/drivers/char/misc.c
+index 2cf595d2e10b85..f7dd455dd0dd3c 100644
+--- a/drivers/char/misc.c
++++ b/drivers/char/misc.c
+@@ -264,8 +264,8 @@ int misc_register(struct miscdevice *misc)
+ 		device_create_with_groups(&misc_class, misc->parent, dev,
+ 					  misc, misc->groups, "%s", misc->name);
+ 	if (IS_ERR(misc->this_device)) {
++		misc_minor_free(misc->minor);
+ 		if (is_dynamic) {
+-			misc_minor_free(misc->minor);
+ 			misc->minor = MISC_DYNAMIC_MINOR;
+ 		}
+ 		err = PTR_ERR(misc->this_device);
+diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
+index 38e0fff9afe722..cc6ee4334602aa 100644
+--- a/drivers/gpio/gpio-aggregator.c
++++ b/drivers/gpio/gpio-aggregator.c
+@@ -121,10 +121,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
+ 	struct platform_device *pdev;
+ 	int res, id;
+ 
++	if (!try_module_get(THIS_MODULE))
++		return -ENOENT;
++
+ 	/* kernfs guarantees string termination, so count + 1 is safe */
+ 	aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
+-	if (!aggr)
+-		return -ENOMEM;
++	if (!aggr) {
++		res = -ENOMEM;
++		goto put_module;
++	}
+ 
+ 	memcpy(aggr->args, buf, count + 1);
+ 
+@@ -163,6 +168,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
+ 	}
+ 
+ 	aggr->pdev = pdev;
++	module_put(THIS_MODULE);
+ 	return count;
+ 
+ remove_table:
+@@ -177,6 +183,8 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
+ 	kfree(aggr->lookups);
+ free_ga:
+ 	kfree(aggr);
++put_module:
++	module_put(THIS_MODULE);
+ 	return res;
+ }
+ 
+@@ -205,13 +213,19 @@ static ssize_t delete_device_store(struct device_driver *driver,
+ 	if (error)
+ 		return error;
+ 
++	if (!try_module_get(THIS_MODULE))
++		return -ENOENT;
++
+ 	mutex_lock(&gpio_aggregator_lock);
+ 	aggr = idr_remove(&gpio_aggregator_idr, id);
+ 	mutex_unlock(&gpio_aggregator_lock);
+-	if (!aggr)
++	if (!aggr) {
++		module_put(THIS_MODULE);
+ 		return -ENOENT;
++	}
+ 
+ 	gpio_aggregator_free(aggr);
++	module_put(THIS_MODULE);
+ 	return count;
+ }
+ static DRIVER_ATTR_WO(delete_device);
+diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
+index 6159fda38d5da1..6641ed5cd8e1c6 100644
+--- a/drivers/gpio/gpio-rcar.c
++++ b/drivers/gpio/gpio-rcar.c
+@@ -40,7 +40,7 @@ struct gpio_rcar_info {
+ 
+ struct gpio_rcar_priv {
+ 	void __iomem *base;
+-	spinlock_t lock;
++	raw_spinlock_t lock;
+ 	struct device *dev;
+ 	struct gpio_chip gpio_chip;
+ 	unsigned int irq_parent;
+@@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
+ 	 * "Setting Level-Sensitive Interrupt Input Mode"
+ 	 */
+ 
+-	spin_lock_irqsave(&p->lock, flags);
++	raw_spin_lock_irqsave(&p->lock, flags);
+ 
+ 	/* Configure positive or negative logic in POSNEG */
+ 	gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
+@@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
+ 	if (!level_trigger)
+ 		gpio_rcar_write(p, INTCLR, BIT(hwirq));
+ 
+-	spin_unlock_irqrestore(&p->lock, flags);
++	raw_spin_unlock_irqrestore(&p->lock, flags);
+ }
+ 
+ static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
+@@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
+ 	 * "Setting General Input Mode"
+ 	 */
+ 
+-	spin_lock_irqsave(&p->lock, flags);
++	raw_spin_lock_irqsave(&p->lock, flags);
+ 
+ 	/* Configure positive logic in POSNEG */
+ 	gpio_rcar_modify_bit(p, POSNEG, gpio, false);
+@@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
+ 	if (p->info.has_outdtsel && output)
+ 		gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
+ 
+-	spin_unlock_irqrestore(&p->lock, flags);
++	raw_spin_unlock_irqrestore(&p->lock, flags);
+ }
+ 
+ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
+@@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ 		return 0;
+ 	}
+ 
+-	spin_lock_irqsave(&p->lock, flags);
++	raw_spin_lock_irqsave(&p->lock, flags);
+ 	outputs = gpio_rcar_read(p, INOUTSEL);
+ 	m = outputs & bankmask;
+ 	if (m)
+@@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ 	m = ~outputs & bankmask;
+ 	if (m)
+ 		val |= gpio_rcar_read(p, INDT) & m;
+-	spin_unlock_irqrestore(&p->lock, flags);
++	raw_spin_unlock_irqrestore(&p->lock, flags);
+ 
+ 	bits[0] = val;
+ 	return 0;
+@@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
+ 	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&p->lock, flags);
++	raw_spin_lock_irqsave(&p->lock, flags);
+ 	gpio_rcar_modify_bit(p, OUTDT, offset, value);
+-	spin_unlock_irqrestore(&p->lock, flags);
++	raw_spin_unlock_irqrestore(&p->lock, flags);
+ }
+ 
+ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+@@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ 	if (!bankmask)
+ 		return;
+ 
+-	spin_lock_irqsave(&p->lock, flags);
++	raw_spin_lock_irqsave(&p->lock, flags);
+ 	val = gpio_rcar_read(p, OUTDT);
+ 	val &= ~bankmask;
+ 	val |= (bankmask & bits[0]);
+ 	gpio_rcar_write(p, OUTDT, val);
+-	spin_unlock_irqrestore(&p->lock, flags);
++	raw_spin_unlock_irqrestore(&p->lock, flags);
+ }
+ 
+ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
+@@ -468,7 +468,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
+ 	p->info = *info;
+ 
+ 	ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
+-	*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
++	if (ret) {
++		*npins = RCAR_MAX_GPIO_PER_BANK;
++	} else {
++		*npins = args.args[2];
++		of_node_put(args.np);
++	}
+ 
+ 	if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
+ 		dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
+@@ -505,7 +510,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	p->dev = dev;
+-	spin_lock_init(&p->lock);
++	raw_spin_lock_init(&p->lock);
+ 
+ 	/* Get device configuration from DT node */
+ 	ret = gpio_rcar_parse_dt(p, &npins);
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index 27eff741fe9a2a..c36a9dbccd4dd5 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -15,10 +15,9 @@
+ #include <linux/io.h>
+ #include <linux/ioport.h>
+ #include <linux/irq.h>
+-#include <linux/platform_device.h>
+-#include <linux/of.h>
+-#include <linux/of_irq.h>
+ #include <linux/pinctrl/consumer.h>
++#include <linux/platform_device.h>
++#include <linux/property.h>
+ 
+ #define VF610_GPIO_PER_PORT		32
+ 
+@@ -37,6 +36,7 @@ struct vf610_gpio_port {
+ 	struct clk *clk_port;
+ 	struct clk *clk_gpio;
+ 	int irq;
++	spinlock_t lock; /* protect gpio direction registers */
+ };
+ 
+ #define GPIO_PDOR		0x00
+@@ -125,6 +125,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
+ 	u32 val;
+ 
+ 	if (port->sdata->have_paddr) {
++		guard(spinlock_irqsave)(&port->lock);
+ 		val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ 		val &= ~mask;
+ 		vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+@@ -143,6 +144,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio
+ 	vf610_gpio_set(chip, gpio, value);
+ 
+ 	if (port->sdata->have_paddr) {
++		guard(spinlock_irqsave)(&port->lock);
+ 		val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ 		val |= mask;
+ 		vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+@@ -297,7 +299,8 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ 	if (!port)
+ 		return -ENOMEM;
+ 
+-	port->sdata = of_device_get_match_data(dev);
++	port->sdata = device_get_match_data(dev);
++	spin_lock_init(&port->lock);
+ 
+ 	dual_base = port->sdata->have_dual_base;
+ 
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 7408ea8caacc3c..ae53f26da945f8 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -211,6 +211,18 @@ config DRM_DEBUG_MODESET_LOCK
+ 
+ 	  If in doubt, say "N".
+ 
++config DRM_CLIENT_SELECTION
++	bool
++	depends on DRM
++	select DRM_CLIENT_SETUP if DRM_FBDEV_EMULATION
++	help
++	  Drivers that support in-kernel DRM clients have to select this
++	  option.
++
++config DRM_CLIENT_SETUP
++	bool
++	depends on DRM_CLIENT_SELECTION
++
+ config DRM_FBDEV_EMULATION
+ 	bool "Enable legacy fbdev support for your modesetting driver"
+ 	depends on DRM
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 84746054c721a3..1ec44529447a76 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -144,8 +144,12 @@ drm_kms_helper-y := \
+ 	drm_rect.o \
+ 	drm_self_refresh_helper.o \
+ 	drm_simple_kms_helper.o
++drm_kms_helper-$(CONFIG_DRM_CLIENT_SETUP) += \
++	drm_client_setup.o
+ drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
+-drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
++drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \
++	drm_fbdev_client.o \
++	drm_fb_helper.o
+ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+ 
+ #
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+index ad29634f8b44ca..80c85b6cc478a9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -266,8 +266,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
+ 	/* EOP buffer is not required for all ASICs */
+ 	if (properties->eop_ring_buffer_address) {
+ 		if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
+-			pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
+-				properties->eop_buf_bo->tbo.base.size,
++			pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
++				properties->eop_ring_buffer_size,
+ 				topo_dev->node_props.eop_buffer_size);
+ 			err = -EINVAL;
+ 			goto out_err_unreserve;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index d915020a429582..f0eda0ba015600 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ 	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ 
+ 	/* Invalid input */
+-	if (!plane_state->dst_rect.width ||
++	if (!plane_state ||
++			!plane_state->dst_rect.width ||
+ 			!plane_state->dst_rect.height ||
+ 			!plane_state->src_rect.width ||
+ 			!plane_state->src_rect.height) {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+index 452589adaf0468..e5f619c979d80e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+@@ -1883,16 +1883,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
+ 				    NULL);
+ }
+ 
+-static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
+-{
+-	int ret = 0;
+-
+-	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
+-		ret = smu_v14_0_allow_ih_interrupt(smu);
+-
+-	return ret;
+-}
+-
+ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
+ {
+ 	int ret = 0;
+@@ -1904,7 +1894,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
+ 	if (ret)
+ 		return ret;
+ 
+-	return smu_v14_0_process_pending_interrupt(smu);
++	return smu_v14_0_allow_ih_interrupt(smu);
+ }
+ 
+ int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
+diff --git a/drivers/gpu/drm/drm_client_setup.c b/drivers/gpu/drm/drm_client_setup.c
+new file mode 100644
+index 00000000000000..5969c4ffe31ba4
+--- /dev/null
++++ b/drivers/gpu/drm/drm_client_setup.c
+@@ -0,0 +1,66 @@
++// SPDX-License-Identifier: MIT
++
++#include <drm/drm_client_setup.h>
++#include <drm/drm_device.h>
++#include <drm/drm_fbdev_client.h>
++#include <drm/drm_fourcc.h>
++#include <drm/drm_print.h>
++
++/**
++ * drm_client_setup() - Setup in-kernel DRM clients
++ * @dev: DRM device
++ * @format: Preferred pixel format for the device. Use NULL, unless
++ *          there is clearly a driver-preferred format.
++ *
++ * This function sets up the in-kernel DRM clients. Restore, hotplug
++ * events and teardown are all taken care of.
++ *
++ * Drivers should call drm_client_setup() after registering the new
++ * DRM device with drm_dev_register(). This function is safe to call
++ * even when there are no connectors present. Setup will be retried
++ * on the next hotplug event.
++ *
++ * The clients are destroyed by drm_dev_unregister().
++ */
++void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
++{
++	int ret;
++
++	ret = drm_fbdev_client_setup(dev, format);
++	if (ret)
++		drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
++}
++EXPORT_SYMBOL(drm_client_setup);
++
++/**
++ * drm_client_setup_with_fourcc() - Setup in-kernel DRM clients for color mode
++ * @dev: DRM device
++ * @fourcc: Preferred pixel format as 4CC code for the device
++ *
++ * This function sets up the in-kernel DRM clients. It is equivalent
++ * to drm_client_setup(), but expects a 4CC code as second argument.
++ */
++void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc)
++{
++	drm_client_setup(dev, drm_format_info(fourcc));
++}
++EXPORT_SYMBOL(drm_client_setup_with_fourcc);
++
++/**
++ * drm_client_setup_with_color_mode() - Setup in-kernel DRM clients for color mode
++ * @dev: DRM device
++ * @color_mode: Preferred color mode for the device
++ *
++ * This function sets up the in-kernel DRM clients. It is equivalent
++ * to drm_client_setup(), but expects a color mode as second argument.
++ *
++ * Do not use this function in new drivers. Prefer drm_client_setup() with a
++ * format of NULL.
++ */
++void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode)
++{
++	u32 fourcc = drm_driver_color_mode_format(dev, color_mode);
++
++	drm_client_setup_with_fourcc(dev, fourcc);
++}
++EXPORT_SYMBOL(drm_client_setup_with_color_mode);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index eaac2e5726e750..b15ddbd65e7b5d 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -492,8 +492,8 @@ EXPORT_SYMBOL(drm_fb_helper_init);
+  * @fb_helper: driver-allocated fbdev helper
+  *
+  * A helper to alloc fb_info and the member cmap. Called by the driver
+- * within the fb_probe fb_helper callback function. Drivers do not
+- * need to release the allocated fb_info structure themselves, this is
++ * within the struct &drm_driver.fbdev_probe callback function. Drivers do
++ * not need to release the allocated fb_info structure themselves, this is
+  * automatically done when calling drm_fb_helper_fini().
+  *
+  * RETURNS:
+@@ -1443,67 +1443,27 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ EXPORT_SYMBOL(drm_fb_helper_pan_display);
+ 
+ static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats,
+-					  size_t format_count, uint32_t bpp, uint32_t depth)
++					  size_t format_count, unsigned int color_mode)
+ {
+ 	struct drm_device *dev = fb_helper->dev;
+ 	uint32_t format;
+ 	size_t i;
+ 
+-	/*
+-	 * Do not consider YUV or other complicated formats
+-	 * for framebuffers. This means only legacy formats
+-	 * are supported (fmt->depth is a legacy field), but
+-	 * the framebuffer emulation can only deal with such
+-	 * formats, specifically RGB/BGA formats.
+-	 */
+-	format = drm_mode_legacy_fb_format(bpp, depth);
+-	if (!format)
+-		goto err;
++	format = drm_driver_color_mode_format(dev, color_mode);
++	if (!format) {
++		drm_info(dev, "unsupported color mode of %d\n", color_mode);
++		return DRM_FORMAT_INVALID;
++	}
+ 
+ 	for (i = 0; i < format_count; ++i) {
+ 		if (formats[i] == format)
+ 			return format;
+ 	}
+-
+-err:
+-	/* We found nothing. */
+-	drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth);
++	drm_warn(dev, "format %p4cc not supported\n", &format);
+ 
+ 	return DRM_FORMAT_INVALID;
+ }
+ 
+-static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper,
+-						     const uint32_t *formats, size_t format_count,
+-						     unsigned int color_mode)
+-{
+-	struct drm_device *dev = fb_helper->dev;
+-	uint32_t bpp, depth;
+-
+-	switch (color_mode) {
+-	case 1:
+-	case 2:
+-	case 4:
+-	case 8:
+-	case 16:
+-	case 24:
+-		bpp = depth = color_mode;
+-		break;
+-	case 15:
+-		bpp = 16;
+-		depth = 15;
+-		break;
+-	case 32:
+-		bpp = 32;
+-		depth = 24;
+-		break;
+-	default:
+-		drm_info(dev, "unsupported color mode of %d\n", color_mode);
+-		return DRM_FORMAT_INVALID;
+-	}
+-
+-	return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth);
+-}
+-
+ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
+ 				      struct drm_fb_helper_surface_size *sizes)
+ {
+@@ -1533,10 +1493,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
+ 			if (!cmdline_mode->bpp_specified)
+ 				continue;
+ 
+-			surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
+-									      plane->format_types,
+-									      plane->format_count,
+-									      cmdline_mode->bpp);
++			surface_format = drm_fb_helper_find_format(fb_helper,
++								   plane->format_types,
++								   plane->format_count,
++								   cmdline_mode->bpp);
+ 			if (surface_format != DRM_FORMAT_INVALID)
+ 				break; /* found supported format */
+ 		}
+@@ -1546,10 +1506,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
+ 			break; /* found supported format */
+ 
+ 		/* try preferred color mode */
+-		surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
+-								      plane->format_types,
+-								      plane->format_count,
+-								      fb_helper->preferred_bpp);
++		surface_format = drm_fb_helper_find_format(fb_helper,
++							   plane->format_types,
++							   plane->format_count,
++							   fb_helper->preferred_bpp);
+ 		if (surface_format != DRM_FORMAT_INVALID)
+ 			break; /* found supported format */
+ 	}
+@@ -1650,7 +1610,7 @@ static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
+ 
+ /*
+  * Allocates the backing storage and sets up the fbdev info structure through
+- * the ->fb_probe callback.
++ * the ->fbdev_probe callback.
+  */
+ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
+ {
+@@ -1668,7 +1628,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
+ 	}
+ 
+ 	/* push down into drivers */
+-	ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
++	if (dev->driver->fbdev_probe)
++		ret = dev->driver->fbdev_probe(fb_helper, &sizes);
++	else if (fb_helper->funcs)
++		ret = fb_helper->funcs->fb_probe(fb_helper, &sizes);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1740,7 +1703,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
+  * instance and the drm framebuffer allocated in &drm_fb_helper.fb.
+  *
+  * Drivers should call this (or their equivalent setup code) from their
+- * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
++ * &drm_driver.fbdev_probe callback after having allocated the fbdev
+  * backing storage framebuffer.
+  */
+ void drm_fb_helper_fill_info(struct fb_info *info,
+@@ -1896,7 +1859,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
+  * Note that this also registers the fbdev and so allows userspace to call into
+  * the driver through the fbdev interfaces.
+  *
+- * This function will call down into the &drm_fb_helper_funcs.fb_probe callback
++ * This function will call down into the &drm_driver.fbdev_probe callback
+  * to let the driver allocate and initialize the fbdev info structure and the
+  * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided
+  * as a helper to setup simple default values for the fbdev info structure.
+diff --git a/drivers/gpu/drm/drm_fbdev_client.c b/drivers/gpu/drm/drm_fbdev_client.c
+new file mode 100644
+index 00000000000000..a09382afe2fb6f
+--- /dev/null
++++ b/drivers/gpu/drm/drm_fbdev_client.c
+@@ -0,0 +1,141 @@
++// SPDX-License-Identifier: MIT
++
++#include <drm/drm_client.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/drm_drv.h>
++#include <drm/drm_fbdev_client.h>
++#include <drm/drm_fb_helper.h>
++#include <drm/drm_fourcc.h>
++#include <drm/drm_print.h>
++
++/*
++ * struct drm_client_funcs
++ */
++
++static void drm_fbdev_client_unregister(struct drm_client_dev *client)
++{
++	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
++
++	if (fb_helper->info) {
++		drm_fb_helper_unregister_info(fb_helper);
++	} else {
++		drm_client_release(&fb_helper->client);
++		drm_fb_helper_unprepare(fb_helper);
++		kfree(fb_helper);
++	}
++}
++
++static int drm_fbdev_client_restore(struct drm_client_dev *client)
++{
++	drm_fb_helper_lastclose(client->dev);
++
++	return 0;
++}
++
++static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
++{
++	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
++	struct drm_device *dev = client->dev;
++	int ret;
++
++	if (dev->fb_helper)
++		return drm_fb_helper_hotplug_event(dev->fb_helper);
++
++	ret = drm_fb_helper_init(dev, fb_helper);
++	if (ret)
++		goto err_drm_err;
++
++	if (!drm_drv_uses_atomic_modeset(dev))
++		drm_helper_disable_unused_functions(dev);
++
++	ret = drm_fb_helper_initial_config(fb_helper);
++	if (ret)
++		goto err_drm_fb_helper_fini;
++
++	return 0;
++
++err_drm_fb_helper_fini:
++	drm_fb_helper_fini(fb_helper);
++err_drm_err:
++	drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret);
++	return ret;
++}
++
++static const struct drm_client_funcs drm_fbdev_client_funcs = {
++	.owner		= THIS_MODULE,
++	.unregister	= drm_fbdev_client_unregister,
++	.restore	= drm_fbdev_client_restore,
++	.hotplug	= drm_fbdev_client_hotplug,
++};
++
++/**
++ * drm_fbdev_client_setup() - Setup fbdev emulation
++ * @dev: DRM device
++ * @format: Preferred color format for the device. DRM_FORMAT_XRGB8888
++ *          is used if this is zero.
++ *
++ * This function sets up fbdev emulation. Restore, hotplug events and
++ * teardown are all taken care of. Drivers that do suspend/resume need
++ * to call drm_fb_helper_set_suspend_unlocked() themselves. Simple
++ * drivers might use drm_mode_config_helper_suspend().
++ *
++ * This function is safe to call even when there are no connectors present.
++ * Setup will be retried on the next hotplug event.
++ *
++ * The fbdev client is destroyed by drm_dev_unregister().
++ *
++ * Returns:
++ * 0 on success, or a negative errno code otherwise.
++ */
++int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format)
++{
++	struct drm_fb_helper *fb_helper;
++	unsigned int color_mode;
++	int ret;
++
++	/* TODO: Use format info throughout DRM */
++	if (format) {
++		unsigned int bpp = drm_format_info_bpp(format, 0);
++
++		switch (bpp) {
++		case 16:
++			color_mode = format->depth; // could also be 15
++			break;
++		default:
++			color_mode = bpp;
++		}
++	} else {
++		switch (dev->mode_config.preferred_depth) {
++		case 0:
++		case 24:
++			color_mode = 32;
++			break;
++		default:
++			color_mode = dev->mode_config.preferred_depth;
++		}
++	}
++
++	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
++	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
++
++	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
++	if (!fb_helper)
++		return -ENOMEM;
++	drm_fb_helper_prepare(dev, fb_helper, color_mode, NULL);
++
++	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
++	if (ret) {
++		drm_err(dev, "Failed to register client: %d\n", ret);
++		goto err_drm_client_init;
++	}
++
++	drm_client_register(&fb_helper->client);
++
++	return 0;
++
++err_drm_client_init:
++	drm_fb_helper_unprepare(fb_helper);
++	kfree(fb_helper);
++	return ret;
++}
++EXPORT_SYMBOL(drm_fbdev_client_setup);
+diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c
+index 119ffb28aaf952..d799cbe944cd34 100644
+--- a/drivers/gpu/drm/drm_fbdev_ttm.c
++++ b/drivers/gpu/drm/drm_fbdev_ttm.c
+@@ -71,71 +71,7 @@ static const struct fb_ops drm_fbdev_ttm_fb_ops = {
+ static int drm_fbdev_ttm_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 					     struct drm_fb_helper_surface_size *sizes)
+ {
+-	struct drm_client_dev *client = &fb_helper->client;
+-	struct drm_device *dev = fb_helper->dev;
+-	struct drm_client_buffer *buffer;
+-	struct fb_info *info;
+-	size_t screen_size;
+-	void *screen_buffer;
+-	u32 format;
+-	int ret;
+-
+-	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
+-		    sizes->surface_width, sizes->surface_height,
+-		    sizes->surface_bpp);
+-
+-	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
+-					     sizes->surface_depth);
+-	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+-					       sizes->surface_height, format);
+-	if (IS_ERR(buffer))
+-		return PTR_ERR(buffer);
+-
+-	fb_helper->buffer = buffer;
+-	fb_helper->fb = buffer->fb;
+-
+-	screen_size = buffer->gem->size;
+-	screen_buffer = vzalloc(screen_size);
+-	if (!screen_buffer) {
+-		ret = -ENOMEM;
+-		goto err_drm_client_framebuffer_delete;
+-	}
+-
+-	info = drm_fb_helper_alloc_info(fb_helper);
+-	if (IS_ERR(info)) {
+-		ret = PTR_ERR(info);
+-		goto err_vfree;
+-	}
+-
+-	drm_fb_helper_fill_info(info, fb_helper, sizes);
+-
+-	info->fbops = &drm_fbdev_ttm_fb_ops;
+-
+-	/* screen */
+-	info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+-	info->screen_buffer = screen_buffer;
+-	info->fix.smem_len = screen_size;
+-
+-	/* deferred I/O */
+-	fb_helper->fbdefio.delay = HZ / 20;
+-	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+-
+-	info->fbdefio = &fb_helper->fbdefio;
+-	ret = fb_deferred_io_init(info);
+-	if (ret)
+-		goto err_drm_fb_helper_release_info;
+-
+-	return 0;
+-
+-err_drm_fb_helper_release_info:
+-	drm_fb_helper_release_info(fb_helper);
+-err_vfree:
+-	vfree(screen_buffer);
+-err_drm_client_framebuffer_delete:
+-	fb_helper->fb = NULL;
+-	fb_helper->buffer = NULL;
+-	drm_client_framebuffer_delete(buffer);
+-	return ret;
++	return drm_fbdev_ttm_driver_fbdev_probe(fb_helper, sizes);
+ }
+ 
+ static void drm_fbdev_ttm_damage_blit_real(struct drm_fb_helper *fb_helper,
+@@ -240,6 +176,82 @@ static const struct drm_fb_helper_funcs drm_fbdev_ttm_helper_funcs = {
+ 	.fb_dirty = drm_fbdev_ttm_helper_fb_dirty,
+ };
+ 
++/*
++ * struct drm_driver
++ */
++
++int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
++				     struct drm_fb_helper_surface_size *sizes)
++{
++	struct drm_client_dev *client = &fb_helper->client;
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_client_buffer *buffer;
++	struct fb_info *info;
++	size_t screen_size;
++	void *screen_buffer;
++	u32 format;
++	int ret;
++
++	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
++		    sizes->surface_width, sizes->surface_height,
++		    sizes->surface_bpp);
++
++	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
++					     sizes->surface_depth);
++	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
++					       sizes->surface_height, format);
++	if (IS_ERR(buffer))
++		return PTR_ERR(buffer);
++
++	fb_helper->funcs = &drm_fbdev_ttm_helper_funcs;
++	fb_helper->buffer = buffer;
++	fb_helper->fb = buffer->fb;
++
++	screen_size = buffer->gem->size;
++	screen_buffer = vzalloc(screen_size);
++	if (!screen_buffer) {
++		ret = -ENOMEM;
++		goto err_drm_client_framebuffer_delete;
++	}
++
++	info = drm_fb_helper_alloc_info(fb_helper);
++	if (IS_ERR(info)) {
++		ret = PTR_ERR(info);
++		goto err_vfree;
++	}
++
++	drm_fb_helper_fill_info(info, fb_helper, sizes);
++
++	info->fbops = &drm_fbdev_ttm_fb_ops;
++
++	/* screen */
++	info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
++	info->screen_buffer = screen_buffer;
++	info->fix.smem_len = screen_size;
++
++	/* deferred I/O */
++	fb_helper->fbdefio.delay = HZ / 20;
++	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
++
++	info->fbdefio = &fb_helper->fbdefio;
++	ret = fb_deferred_io_init(info);
++	if (ret)
++		goto err_drm_fb_helper_release_info;
++
++	return 0;
++
++err_drm_fb_helper_release_info:
++	drm_fb_helper_release_info(fb_helper);
++err_vfree:
++	vfree(screen_buffer);
++err_drm_client_framebuffer_delete:
++	fb_helper->fb = NULL;
++	fb_helper->buffer = NULL;
++	drm_client_framebuffer_delete(buffer);
++	return ret;
++}
++EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe);
++
+ static void drm_fbdev_ttm_client_unregister(struct drm_client_dev *client)
+ {
+ 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
+index 193cf8ed791283..3a94ca211f9ce9 100644
+--- a/drivers/gpu/drm/drm_fourcc.c
++++ b/drivers/gpu/drm/drm_fourcc.c
+@@ -36,7 +36,6 @@
+  * @depth: bit depth per pixel
+  *
+  * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
+- * Useful in fbdev emulation code, since that deals in those values.
+  */
+ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+ {
+@@ -140,6 +139,35 @@ uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
+ }
+ EXPORT_SYMBOL(drm_driver_legacy_fb_format);
+ 
++/**
++ * drm_driver_color_mode_format - Compute DRM 4CC code from color mode
++ * @dev: DRM device
++ * @color_mode: command-line color mode
++ *
++ * Computes a DRM 4CC pixel format code for the given color mode using
++ * drm_driver_color_mode(). The color mode is in the format used and the
++ * kernel command line. It specifies the number of bits per pixel
++ * and color depth in a single value.
++ *
++ * Useful in fbdev emulation code, since that deals in those values. The
++ * helper does not consider YUV or other complicated formats. This means
++ * only legacy formats are supported (fmt->depth is a legacy field), but
++ * the framebuffer emulation can only deal with such formats, specifically
++ * RGB/BGA formats.
++ */
++uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode)
++{
++	switch (color_mode) {
++	case 15:
++		return drm_driver_legacy_fb_format(dev, 16, 15);
++	case 32:
++		return drm_driver_legacy_fb_format(dev, 32, 24);
++	default:
++		return drm_driver_legacy_fb_format(dev, color_mode, color_mode);
++	}
++}
++EXPORT_SYMBOL(drm_driver_color_mode_format);
++
+ /*
+  * Internal function to query information for a given format. See
+  * drm_format_info() for the public API.
+diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
+index 447740d79d3d2e..bcf248f69252c2 100644
+--- a/drivers/gpu/drm/drm_panic_qr.rs
++++ b/drivers/gpu/drm/drm_panic_qr.rs
+@@ -209,12 +209,9 @@
+ impl Version {
+     /// Returns the smallest QR version than can hold these segments.
+     fn from_segments(segments: &[&Segment<'_>]) -> Option<Version> {
+-        for v in (1..=40).map(|k| Version(k)) {
+-            if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() {
+-                return Some(v);
+-            }
+-        }
+-        None
++        (1..=40)
++            .map(Version)
++            .find(|&v| v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum())
+     }
+ 
+     fn width(&self) -> u8 {
+@@ -242,7 +239,7 @@ fn g1_blk_size(&self) -> usize {
+     }
+ 
+     fn alignment_pattern(&self) -> &'static [u8] {
+-        &ALIGNMENT_PATTERNS[self.0 - 1]
++        ALIGNMENT_PATTERNS[self.0 - 1]
+     }
+ 
+     fn poly(&self) -> &'static [u8] {
+@@ -479,7 +476,7 @@ struct EncodedMsg<'a> {
+ /// Data to be put in the QR code, with correct segment encoding, padding, and
+ /// Error Code Correction.
+ impl EncodedMsg<'_> {
+-    fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> {
++    fn new<'a>(segments: &[&Segment<'_>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> {
+         let version = Version::from_segments(segments)?;
+         let ec_size = version.ec_size();
+         let g1_blocks = version.g1_blocks();
+@@ -492,7 +489,7 @@ fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option<EncodedM
+         data.fill(0);
+ 
+         let mut em = EncodedMsg {
+-            data: data,
++            data,
+             ec_size,
+             g1_blocks,
+             g2_blocks,
+@@ -722,7 +719,10 @@ fn draw_finders(&mut self) {
+ 
+     fn is_finder(&self, x: u8, y: u8) -> bool {
+         let end = self.width - 8;
+-        (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8)
++        #[expect(clippy::nonminimal_bool)]
++        {
++            (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8)
++        }
+     }
+ 
+     // Alignment pattern: 5x5 squares in a grid.
+@@ -931,7 +931,7 @@ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
+ /// They must remain valid for the duration of the function call.
+ #[no_mangle]
+ pub unsafe extern "C" fn drm_panic_qr_generate(
+-    url: *const i8,
++    url: *const kernel::ffi::c_char,
+     data: *mut u8,
+     data_len: usize,
+     data_size: usize,
+@@ -978,10 +978,11 @@ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
+ /// * `url_len`: Length of the URL.
+ ///
+ /// * If `url_len` > 0, remove the 2 segments header/length and also count the
+-/// conversion to numeric segments.
++///   conversion to numeric segments.
+ /// * If `url_len` = 0, only removes 3 bytes for 1 binary segment.
+ #[no_mangle]
+ pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
++    #[expect(clippy::manual_range_contains)]
+     if version < 1 || version > 40 {
+         return 0;
+     }
+diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
+index 9447f7229b6084..17a1e3801a85c0 100644
+--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
++++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
+@@ -416,7 +416,8 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ 	return DIV_ROUND_UP(pixel_rate * num, den);
+ }
+ 
+-static void i9xx_plane_update_noarm(struct intel_plane *plane,
++static void i9xx_plane_update_noarm(struct intel_dsb *dsb,
++				    struct intel_plane *plane,
+ 				    const struct intel_crtc_state *crtc_state,
+ 				    const struct intel_plane_state *plane_state)
+ {
+@@ -444,7 +445,8 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane,
+ 	}
+ }
+ 
+-static void i9xx_plane_update_arm(struct intel_plane *plane,
++static void i9xx_plane_update_arm(struct intel_dsb *dsb,
++				  struct intel_plane *plane,
+ 				  const struct intel_crtc_state *crtc_state,
+ 				  const struct intel_plane_state *plane_state)
+ {
+@@ -507,7 +509,8 @@ static void i9xx_plane_update_arm(struct intel_plane *plane,
+ 				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ }
+ 
+-static void i830_plane_update_arm(struct intel_plane *plane,
++static void i830_plane_update_arm(struct intel_dsb *dsb,
++				  struct intel_plane *plane,
+ 				  const struct intel_crtc_state *crtc_state,
+ 				  const struct intel_plane_state *plane_state)
+ {
+@@ -517,11 +520,12 @@ static void i830_plane_update_arm(struct intel_plane *plane,
+ 	 * Additional breakage on i830 causes register reads to return
+ 	 * the last latched value instead of the last written value [ALM026].
+ 	 */
+-	i9xx_plane_update_noarm(plane, crtc_state, plane_state);
+-	i9xx_plane_update_arm(plane, crtc_state, plane_state);
++	i9xx_plane_update_noarm(dsb, plane, crtc_state, plane_state);
++	i9xx_plane_update_arm(dsb, plane, crtc_state, plane_state);
+ }
+ 
+-static void i9xx_plane_disable_arm(struct intel_plane *plane,
++static void i9xx_plane_disable_arm(struct intel_dsb *dsb,
++				   struct intel_plane *plane,
+ 				   const struct intel_crtc_state *crtc_state)
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+@@ -549,7 +553,8 @@ static void i9xx_plane_disable_arm(struct intel_plane *plane,
+ }
+ 
+ static void
+-g4x_primary_async_flip(struct intel_plane *plane,
++g4x_primary_async_flip(struct intel_dsb *dsb,
++		       struct intel_plane *plane,
+ 		       const struct intel_crtc_state *crtc_state,
+ 		       const struct intel_plane_state *plane_state,
+ 		       bool async_flip)
+@@ -569,7 +574,8 @@ g4x_primary_async_flip(struct intel_plane *plane,
+ }
+ 
+ static void
+-vlv_primary_async_flip(struct intel_plane *plane,
++vlv_primary_async_flip(struct intel_dsb *dsb,
++		       struct intel_plane *plane,
+ 		       const struct intel_crtc_state *crtc_state,
+ 		       const struct intel_plane_state *plane_state,
+ 		       bool async_flip)
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index 293efc1f841dff..4e95b8eda23f74 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -50,38 +50,38 @@
+ #include "skl_scaler.h"
+ #include "skl_universal_plane.h"
+ 
+-static int header_credits_available(struct drm_i915_private *dev_priv,
++static int header_credits_available(struct intel_display *display,
+ 				    enum transcoder dsi_trans)
+ {
+-	return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
++	return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ 		>> FREE_HEADER_CREDIT_SHIFT;
+ }
+ 
+-static int payload_credits_available(struct drm_i915_private *dev_priv,
++static int payload_credits_available(struct intel_display *display,
+ 				     enum transcoder dsi_trans)
+ {
+-	return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
++	return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ 		>> FREE_PLOAD_CREDIT_SHIFT;
+ }
+ 
+-static bool wait_for_header_credits(struct drm_i915_private *dev_priv,
++static bool wait_for_header_credits(struct intel_display *display,
+ 				    enum transcoder dsi_trans, int hdr_credit)
+ {
+-	if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
++	if (wait_for_us(header_credits_available(display, dsi_trans) >=
+ 			hdr_credit, 100)) {
+-		drm_err(&dev_priv->drm, "DSI header credits not released\n");
++		drm_err(display->drm, "DSI header credits not released\n");
+ 		return false;
+ 	}
+ 
+ 	return true;
+ }
+ 
+-static bool wait_for_payload_credits(struct drm_i915_private *dev_priv,
++static bool wait_for_payload_credits(struct intel_display *display,
+ 				     enum transcoder dsi_trans, int payld_credit)
+ {
+-	if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
++	if (wait_for_us(payload_credits_available(display, dsi_trans) >=
+ 			payld_credit, 100)) {
+-		drm_err(&dev_priv->drm, "DSI payload credits not released\n");
++		drm_err(display->drm, "DSI payload credits not released\n");
+ 		return false;
+ 	}
+ 
+@@ -98,7 +98,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port)
+ 
+ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	struct mipi_dsi_device *dsi;
+ 	enum port port;
+@@ -108,8 +108,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+ 	/* wait for header/payload credits to be released */
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
+-		wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT);
++		wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT);
++		wait_for_payload_credits(display, dsi_trans, MAX_PLOAD_CREDIT);
+ 	}
+ 
+ 	/* send nop DCS command */
+@@ -119,22 +119,22 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+ 		dsi->channel = 0;
+ 		ret = mipi_dsi_dcs_nop(dsi);
+ 		if (ret < 0)
+-			drm_err(&dev_priv->drm,
++			drm_err(display->drm,
+ 				"error sending DCS NOP command\n");
+ 	}
+ 
+ 	/* wait for header credits to be released */
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
++		wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT);
+ 	}
+ 
+ 	/* wait for LP TX in progress bit to be cleared */
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
++		if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
+ 				  LPTX_IN_PROGRESS), 20))
+-			drm_err(&dev_priv->drm, "LPTX bit not cleared\n");
++			drm_err(display->drm, "LPTX bit not cleared\n");
+ 	}
+ }
+ 
+@@ -142,7 +142,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ 			      const struct mipi_dsi_packet *packet)
+ {
+ 	struct intel_dsi *intel_dsi = host->intel_dsi;
+-	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
++	struct intel_display *display = to_intel_display(&intel_dsi->base);
+ 	enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ 	const u8 *data = packet->payload;
+ 	u32 len = packet->payload_length;
+@@ -150,20 +150,20 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ 
+ 	/* payload queue can accept *256 bytes*, check limit */
+ 	if (len > MAX_PLOAD_CREDIT * 4) {
+-		drm_err(&i915->drm, "payload size exceeds max queue limit\n");
++		drm_err(display->drm, "payload size exceeds max queue limit\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	for (i = 0; i < len; i += 4) {
+ 		u32 tmp = 0;
+ 
+-		if (!wait_for_payload_credits(i915, dsi_trans, 1))
++		if (!wait_for_payload_credits(display, dsi_trans, 1))
+ 			return -EBUSY;
+ 
+ 		for (j = 0; j < min_t(u32, len - i, 4); j++)
+ 			tmp |= *data++ << 8 * j;
+ 
+-		intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp);
++		intel_de_write(display, DSI_CMD_TXPYLD(dsi_trans), tmp);
+ 	}
+ 
+ 	return 0;
+@@ -174,14 +174,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+ 			    bool enable_lpdt)
+ {
+ 	struct intel_dsi *intel_dsi = host->intel_dsi;
+-	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
++	struct intel_display *display = to_intel_display(&intel_dsi->base);
+ 	enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ 	u32 tmp;
+ 
+-	if (!wait_for_header_credits(dev_priv, dsi_trans, 1))
++	if (!wait_for_header_credits(display, dsi_trans, 1))
+ 		return -EBUSY;
+ 
+-	tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
++	tmp = intel_de_read(display, DSI_CMD_TXHDR(dsi_trans));
+ 
+ 	if (packet->payload)
+ 		tmp |= PAYLOAD_PRESENT;
+@@ -200,15 +200,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+ 	tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT);
+ 	tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT);
+ 	tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT);
+-	intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
++	intel_de_write(display, DSI_CMD_TXHDR(dsi_trans), tmp);
+ 
+ 	return 0;
+ }
+ 
+ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
+ {
+-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
++	struct intel_display *display = to_intel_display(crtc_state);
+ 	u32 mode_flags;
+ 	enum port port;
+ 
+@@ -226,12 +225,13 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
+ 	else
+ 		return;
+ 
+-	intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST);
++	intel_de_rmw(display, DSI_CMD_FRMCTL(port), 0,
++		     DSI_FRAME_UPDATE_REQUEST);
+ }
+ 
+ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum phy phy;
+ 	u32 tmp, mask, val;
+@@ -245,31 +245,31 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+ 		mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
+ 		val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
+ 		      RTERM_SELECT(0x6);
+-		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
++		tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
+ 		tmp &= ~mask;
+ 		tmp |= val;
+-		intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+-		intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val);
++		intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp);
++		intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), mask, val);
+ 
+ 		mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ 		       RCOMP_SCALAR_MASK;
+ 		val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) |
+ 		      RCOMP_SCALAR(0x98);
+-		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
++		tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy));
+ 		tmp &= ~mask;
+ 		tmp |= val;
+-		intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
+-		intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val);
++		intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp);
++		intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy), mask, val);
+ 
+ 		mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ 		       CURSOR_COEFF_MASK;
+ 		val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) |
+ 		      CURSOR_COEFF(0x3f);
+-		intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val);
++		intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy), mask, val);
+ 
+ 		/* Bspec: must not use GRP register for write */
+ 		for (lane = 0; lane <= 3; lane++)
+-			intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
++			intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy),
+ 				     mask, val);
+ 	}
+ }
+@@ -277,13 +277,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+ static void configure_dual_link_mode(struct intel_encoder *encoder,
+ 				     const struct intel_crtc_state *pipe_config)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ 	u32 dss_ctl1;
+ 
+ 	/* FIXME: Move all DSS handling to intel_vdsc.c */
+-	if (DISPLAY_VER(dev_priv) >= 12) {
++	if (DISPLAY_VER(display) >= 12) {
+ 		struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ 
+ 		dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+@@ -293,7 +293,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
+ 		dss_ctl2_reg = DSS_CTL2;
+ 	}
+ 
+-	dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
++	dss_ctl1 = intel_de_read(display, dss_ctl1_reg);
+ 	dss_ctl1 |= SPLITTER_ENABLE;
+ 	dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ 	dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+@@ -308,19 +308,19 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
+ 		dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+ 
+ 		if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+-			drm_err(&dev_priv->drm,
++			drm_err(display->drm,
+ 				"DL buffer depth exceed max value\n");
+ 
+ 		dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ 		dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+-		intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK,
++		intel_de_rmw(display, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK,
+ 			     RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth));
+ 	} else {
+ 		/* Interleave */
+ 		dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ 	}
+ 
+-	intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
++	intel_de_write(display, dss_ctl1_reg, dss_ctl1);
+ }
+ 
+ /* aka DSI 8X clock */
+@@ -341,6 +341,7 @@ static int afe_clk(struct intel_encoder *encoder,
+ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
+ 					  const struct intel_crtc_state *crtc_state)
+ {
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+@@ -360,33 +361,34 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
+ 	}
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+-		intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
++		intel_de_write(display, ICL_DSI_ESC_CLK_DIV(port),
+ 			       esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+-		intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port));
++		intel_de_posting_read(display, ICL_DSI_ESC_CLK_DIV(port));
+ 	}
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+-		intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port),
++		intel_de_write(display, ICL_DPHY_ESC_CLK_DIV(port),
+ 			       esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+-		intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
++		intel_de_posting_read(display, ICL_DPHY_ESC_CLK_DIV(port));
+ 	}
+ 
+ 	if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ 		for_each_dsi_port(port, intel_dsi->ports) {
+-			intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8),
++			intel_de_write(display, ADL_MIPIO_DW(port, 8),
+ 				       esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY);
+-			intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8));
++			intel_de_posting_read(display, ADL_MIPIO_DW(port, 8));
+ 		}
+ 	}
+ }
+ 
+-static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+-				     struct intel_dsi *intel_dsi)
++static void get_dsi_io_power_domains(struct intel_dsi *intel_dsi)
+ {
++	struct intel_display *display = to_intel_display(&intel_dsi->base);
++	struct drm_i915_private *dev_priv = to_i915(display->drm);
+ 	enum port port;
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+-		drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]);
++		drm_WARN_ON(display->drm, intel_dsi->io_wakeref[port]);
+ 		intel_dsi->io_wakeref[port] =
+ 			intel_display_power_get(dev_priv,
+ 						port == PORT_A ?
+@@ -397,15 +399,15 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+ 
+ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports)
+-		intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
++		intel_de_rmw(display, ICL_DSI_IO_MODECTL(port),
+ 			     0, COMBO_PHY_MODE_DSI);
+ 
+-	get_dsi_io_power_domains(dev_priv, intel_dsi);
++	get_dsi_io_power_domains(intel_dsi);
+ }
+ 
+ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
+@@ -421,6 +423,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
+ 
+ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+ {
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum phy phy;
+@@ -429,32 +432,33 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+ 
+ 	/* Step 4b(i) set loadgen select for transmit and aux lanes */
+ 	for_each_dsi_phy(phy, intel_dsi->phys) {
+-		intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0);
++		intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy),
++			     LOADGEN_SELECT, 0);
+ 		for (lane = 0; lane <= 3; lane++)
+-			intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
++			intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy),
+ 				     LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0);
+ 	}
+ 
+ 	/* Step 4b(ii) set latency optimization for transmit and aux lanes */
+ 	for_each_dsi_phy(phy, intel_dsi->phys) {
+-		intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy),
++		intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy),
+ 			     FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5));
+-		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
++		tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy));
+ 		tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ 		tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+-		intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
++		intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp);
+ 
+ 		/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
+ 		if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) ||
+-		    (DISPLAY_VER(dev_priv) >= 12)) {
+-			intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
++		    (DISPLAY_VER(display) >= 12)) {
++			intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy),
+ 				     LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
+ 
+-			tmp = intel_de_read(dev_priv,
++			tmp = intel_de_read(display,
+ 					    ICL_PORT_PCS_DW1_LN(0, phy));
+ 			tmp &= ~LATENCY_OPTIM_MASK;
+ 			tmp |= LATENCY_OPTIM_VAL(0x1);
+-			intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
++			intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy),
+ 				       tmp);
+ 		}
+ 	}
+@@ -463,17 +467,17 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+ 
+ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	u32 tmp;
+ 	enum phy phy;
+ 
+ 	/* clear common keeper enable bit */
+ 	for_each_dsi_phy(phy, intel_dsi->phys) {
+-		tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
++		tmp = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
+ 		tmp &= ~COMMON_KEEPER_EN;
+-		intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
+-		intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0);
++		intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), tmp);
++		intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0);
+ 	}
+ 
+ 	/*
+@@ -482,14 +486,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+ 	 * as part of lane phy sequence configuration
+ 	 */
+ 	for_each_dsi_phy(phy, intel_dsi->phys)
+-		intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG);
++		intel_de_rmw(display, ICL_PORT_CL_DW5(phy), 0,
++			     SUS_CLOCK_CONFIG);
+ 
+ 	/* Clear training enable to change swing values */
+ 	for_each_dsi_phy(phy, intel_dsi->phys) {
+-		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
++		tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
+ 		tmp &= ~TX_TRAINING_EN;
+-		intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+-		intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0);
++		intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp);
++		intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0);
+ 	}
+ 
+ 	/* Program swing and de-emphasis */
+@@ -497,26 +502,26 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+ 
+ 	/* Set training enable to trigger update */
+ 	for_each_dsi_phy(phy, intel_dsi->phys) {
+-		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
++		tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
+ 		tmp |= TX_TRAINING_EN;
+-		intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+-		intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN);
++		intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp);
++		intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN);
+ 	}
+ }
+ 
+ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+-		intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
++		intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
+ 
+-		if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
++		if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) &
+ 				  DDI_BUF_IS_IDLE),
+ 				  500))
+-			drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n",
++			drm_err(display->drm, "DDI port:%c buffer idle\n",
+ 				port_name(port));
+ 	}
+ }
+@@ -525,6 +530,7 @@ static void
+ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+ 			     const struct intel_crtc_state *crtc_state)
+ {
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+@@ -532,12 +538,12 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+ 
+ 	/* Program DPHY clock lanes timings */
+ 	for_each_dsi_port(port, intel_dsi->ports)
+-		intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
++		intel_de_write(display, DPHY_CLK_TIMING_PARAM(port),
+ 			       intel_dsi->dphy_reg);
+ 
+ 	/* Program DPHY data lanes timings */
+ 	for_each_dsi_port(port, intel_dsi->ports)
+-		intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
++		intel_de_write(display, DPHY_DATA_TIMING_PARAM(port),
+ 			       intel_dsi->dphy_data_lane_reg);
+ 
+ 	/*
+@@ -546,10 +552,10 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+ 	 * a value '0' inside TA_PARAM_REGISTERS otherwise
+ 	 * leave all fields at HW default values.
+ 	 */
+-	if (DISPLAY_VER(dev_priv) == 11) {
++	if (DISPLAY_VER(display) == 11) {
+ 		if (afe_clk(encoder, crtc_state) <= 800000) {
+ 			for_each_dsi_port(port, intel_dsi->ports)
+-				intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port),
++				intel_de_rmw(display, DPHY_TA_TIMING_PARAM(port),
+ 					     TA_SURE_MASK,
+ 					     TA_SURE_OVERRIDE | TA_SURE(0));
+ 		}
+@@ -557,7 +563,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+ 
+ 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ 		for_each_dsi_phy(phy, intel_dsi->phys)
+-			intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
++			intel_de_rmw(display, ICL_DPHY_CHKN(phy),
+ 				     0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
+ 	}
+ }
+@@ -566,30 +572,30 @@ static void
+ gen11_dsi_setup_timings(struct intel_encoder *encoder,
+ 			const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 
+ 	/* Program T-INIT master registers */
+ 	for_each_dsi_port(port, intel_dsi->ports)
+-		intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
++		intel_de_rmw(display, ICL_DSI_T_INIT_MASTER(port),
+ 			     DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
+ 
+ 	/* shadow register inside display core */
+ 	for_each_dsi_port(port, intel_dsi->ports)
+-		intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
++		intel_de_write(display, DSI_CLK_TIMING_PARAM(port),
+ 			       intel_dsi->dphy_reg);
+ 
+ 	/* shadow register inside display core */
+ 	for_each_dsi_port(port, intel_dsi->ports)
+-		intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
++		intel_de_write(display, DSI_DATA_TIMING_PARAM(port),
+ 			       intel_dsi->dphy_data_lane_reg);
+ 
+ 	/* shadow register inside display core */
+-	if (DISPLAY_VER(dev_priv) == 11) {
++	if (DISPLAY_VER(display) == 11) {
+ 		if (afe_clk(encoder, crtc_state) <= 800000) {
+ 			for_each_dsi_port(port, intel_dsi->ports) {
+-				intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
++				intel_de_rmw(display, DSI_TA_TIMING_PARAM(port),
+ 					     TA_SURE_MASK,
+ 					     TA_SURE_OVERRIDE | TA_SURE(0));
+ 			}
+@@ -599,45 +605,45 @@ gen11_dsi_setup_timings(struct intel_encoder *encoder,
+ 
+ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	u32 tmp;
+ 	enum phy phy;
+ 
+-	mutex_lock(&dev_priv->display.dpll.lock);
+-	tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
++	mutex_lock(&display->dpll.lock);
++	tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
+ 	for_each_dsi_phy(phy, intel_dsi->phys)
+ 		tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ 
+-	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+-	mutex_unlock(&dev_priv->display.dpll.lock);
++	intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp);
++	mutex_unlock(&display->dpll.lock);
+ }
+ 
+ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	u32 tmp;
+ 	enum phy phy;
+ 
+-	mutex_lock(&dev_priv->display.dpll.lock);
+-	tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
++	mutex_lock(&display->dpll.lock);
++	tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
+ 	for_each_dsi_phy(phy, intel_dsi->phys)
+ 		tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ 
+-	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+-	mutex_unlock(&dev_priv->display.dpll.lock);
++	intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp);
++	mutex_unlock(&display->dpll.lock);
+ }
+ 
+ static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	bool clock_enabled = false;
+ 	enum phy phy;
+ 	u32 tmp;
+ 
+-	tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
++	tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
+ 
+ 	for_each_dsi_phy(phy, intel_dsi->phys) {
+ 		if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)))
+@@ -650,36 +656,36 @@ static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
+ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+ 			      const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ 	enum phy phy;
+ 	u32 val;
+ 
+-	mutex_lock(&dev_priv->display.dpll.lock);
++	mutex_lock(&display->dpll.lock);
+ 
+-	val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
++	val = intel_de_read(display, ICL_DPCLKA_CFGCR0);
+ 	for_each_dsi_phy(phy, intel_dsi->phys) {
+ 		val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ 		val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
+ 	}
+-	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
++	intel_de_write(display, ICL_DPCLKA_CFGCR0, val);
+ 
+ 	for_each_dsi_phy(phy, intel_dsi->phys) {
+ 		val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ 	}
+-	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
++	intel_de_write(display, ICL_DPCLKA_CFGCR0, val);
+ 
+-	intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
++	intel_de_posting_read(display, ICL_DPCLKA_CFGCR0);
+ 
+-	mutex_unlock(&dev_priv->display.dpll.lock);
++	mutex_unlock(&display->dpll.lock);
+ }
+ 
+ static void
+ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ 			       const struct intel_crtc_state *pipe_config)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ 	enum pipe pipe = crtc->pipe;
+@@ -689,7 +695,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
++		tmp = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
+ 
+ 		if (intel_dsi->eotp_pkt)
+ 			tmp &= ~EOTP_DISABLED;
+@@ -745,7 +751,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ 			}
+ 		}
+ 
+-		if (DISPLAY_VER(dev_priv) >= 12) {
++		if (DISPLAY_VER(display) >= 12) {
+ 			if (is_vid_mode(intel_dsi))
+ 				tmp |= BLANKING_PACKET_ENABLE;
+ 		}
+@@ -778,15 +784,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ 			tmp |= TE_SOURCE_GPIO;
+ 		}
+ 
+-		intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
++		intel_de_write(display, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ 	}
+ 
+ 	/* enable port sync mode if dual link */
+ 	if (intel_dsi->dual_link) {
+ 		for_each_dsi_port(port, intel_dsi->ports) {
+ 			dsi_trans = dsi_port_to_transcoder(port);
+-			intel_de_rmw(dev_priv,
+-				     TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans),
++			intel_de_rmw(display,
++				     TRANS_DDI_FUNC_CTL2(display, dsi_trans),
+ 				     0, PORT_SYNC_MODE_ENABLE);
+ 		}
+ 
+@@ -798,10 +804,10 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ 		dsi_trans = dsi_port_to_transcoder(port);
+ 
+ 		/* select data lane width */
+-		tmp = intel_de_read(dev_priv,
+-				    TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+-		tmp &= ~DDI_PORT_WIDTH_MASK;
+-		tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
++		tmp = intel_de_read(display,
++				    TRANS_DDI_FUNC_CTL(display, dsi_trans));
++		tmp &= ~TRANS_DDI_PORT_WIDTH_MASK;
++		tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count);
+ 
+ 		/* select input pipe */
+ 		tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
+@@ -825,16 +831,16 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ 
+ 		/* enable DDI buffer */
+ 		tmp |= TRANS_DDI_FUNC_ENABLE;
+-		intel_de_write(dev_priv,
+-			       TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans), tmp);
++		intel_de_write(display,
++			       TRANS_DDI_FUNC_CTL(display, dsi_trans), tmp);
+ 	}
+ 
+ 	/* wait for link ready */
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) &
++		if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ 				 LINK_READY), 2500))
+-			drm_err(&dev_priv->drm, "DSI link not ready\n");
++			drm_err(display->drm, "DSI link not ready\n");
+ 	}
+ }
+ 
+@@ -842,7 +848,7 @@ static void
+ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ 				 const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	const struct drm_display_mode *adjusted_mode =
+ 		&crtc_state->hw.adjusted_mode;
+@@ -909,17 +915,17 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ 
+ 	/* minimum hactive as per bspec: 256 pixels */
+ 	if (adjusted_mode->crtc_hdisplay < 256)
+-		drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n");
++		drm_err(display->drm, "hactive is less then 256 pixels\n");
+ 
+ 	/* if RGB666 format, then hactive must be multiple of 4 pixels */
+ 	if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
+-		drm_err(&dev_priv->drm,
++		drm_err(display->drm,
+ 			"hactive pixels are not multiple of 4\n");
+ 
+ 	/* program TRANS_HTOTAL register */
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, dsi_trans),
++		intel_de_write(display, TRANS_HTOTAL(display, dsi_trans),
+ 			       HACTIVE(hactive - 1) | HTOTAL(htotal - 1));
+ 	}
+ 
+@@ -928,12 +934,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ 		if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) {
+ 			/* BSPEC: hsync size should be atleast 16 pixels */
+ 			if (hsync_size < 16)
+-				drm_err(&dev_priv->drm,
++				drm_err(display->drm,
+ 					"hsync size < 16 pixels\n");
+ 		}
+ 
+ 		if (hback_porch < 16)
+-			drm_err(&dev_priv->drm, "hback porch < 16 pixels\n");
++			drm_err(display->drm, "hback porch < 16 pixels\n");
+ 
+ 		if (intel_dsi->dual_link) {
+ 			hsync_start /= 2;
+@@ -942,8 +948,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ 
+ 		for_each_dsi_port(port, intel_dsi->ports) {
+ 			dsi_trans = dsi_port_to_transcoder(port);
+-			intel_de_write(dev_priv,
+-				       TRANS_HSYNC(dev_priv, dsi_trans),
++			intel_de_write(display,
++				       TRANS_HSYNC(display, dsi_trans),
+ 				       HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1));
+ 		}
+ 	}
+@@ -957,22 +963,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ 		 * struct drm_display_mode.
+ 		 * For interlace mode: program required pixel minus 2
+ 		 */
+-		intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, dsi_trans),
++		intel_de_write(display, TRANS_VTOTAL(display, dsi_trans),
+ 			       VACTIVE(vactive - 1) | VTOTAL(vtotal - 1));
+ 	}
+ 
+ 	if (vsync_end < vsync_start || vsync_end > vtotal)
+-		drm_err(&dev_priv->drm, "Invalid vsync_end value\n");
++		drm_err(display->drm, "Invalid vsync_end value\n");
+ 
+ 	if (vsync_start < vactive)
+-		drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
++		drm_err(display->drm, "vsync_start less than vactive\n");
+ 
+ 	/* program TRANS_VSYNC register for video mode only */
+ 	if (is_vid_mode(intel_dsi)) {
+ 		for_each_dsi_port(port, intel_dsi->ports) {
+ 			dsi_trans = dsi_port_to_transcoder(port);
+-			intel_de_write(dev_priv,
+-				       TRANS_VSYNC(dev_priv, dsi_trans),
++			intel_de_write(display,
++				       TRANS_VSYNC(display, dsi_trans),
+ 				       VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1));
+ 		}
+ 	}
+@@ -986,8 +992,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ 	if (is_vid_mode(intel_dsi)) {
+ 		for_each_dsi_port(port, intel_dsi->ports) {
+ 			dsi_trans = dsi_port_to_transcoder(port);
+-			intel_de_write(dev_priv,
+-				       TRANS_VSYNCSHIFT(dev_priv, dsi_trans),
++			intel_de_write(display,
++				       TRANS_VSYNCSHIFT(display, dsi_trans),
+ 				       vsync_shift);
+ 		}
+ 	}
+@@ -998,11 +1004,11 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ 	 * FIXME get rid of these local hacks and do it right,
+ 	 * this will not handle eg. delayed vblank correctly.
+ 	 */
+-	if (DISPLAY_VER(dev_priv) >= 12) {
++	if (DISPLAY_VER(display) >= 12) {
+ 		for_each_dsi_port(port, intel_dsi->ports) {
+ 			dsi_trans = dsi_port_to_transcoder(port);
+-			intel_de_write(dev_priv,
+-				       TRANS_VBLANK(dev_priv, dsi_trans),
++			intel_de_write(display,
++				       TRANS_VBLANK(display, dsi_trans),
+ 				       VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1));
+ 		}
+ 	}
+@@ -1010,20 +1016,20 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ 
+ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 	enum transcoder dsi_trans;
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans), 0,
++		intel_de_rmw(display, TRANSCONF(display, dsi_trans), 0,
+ 			     TRANSCONF_ENABLE);
+ 
+ 		/* wait for transcoder to be enabled */
+-		if (intel_de_wait_for_set(dev_priv, TRANSCONF(dev_priv, dsi_trans),
++		if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans),
+ 					  TRANSCONF_STATE_ENABLE, 10))
+-			drm_err(&dev_priv->drm,
++			drm_err(display->drm,
+ 				"DSI transcoder not enabled\n");
+ 	}
+ }
+@@ -1031,7 +1037,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
+ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
+ 				     const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 	enum transcoder dsi_trans;
+@@ -1055,21 +1061,21 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
+ 		dsi_trans = dsi_port_to_transcoder(port);
+ 
+ 		/* program hst_tx_timeout */
+-		intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans),
++		intel_de_rmw(display, DSI_HSTX_TO(dsi_trans),
+ 			     HSTX_TIMEOUT_VALUE_MASK,
+ 			     HSTX_TIMEOUT_VALUE(hs_tx_timeout));
+ 
+ 		/* FIXME: DSI_CALIB_TO */
+ 
+ 		/* program lp_rx_host timeout */
+-		intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans),
++		intel_de_rmw(display, DSI_LPRX_HOST_TO(dsi_trans),
+ 			     LPRX_TIMEOUT_VALUE_MASK,
+ 			     LPRX_TIMEOUT_VALUE(lp_rx_timeout));
+ 
+ 		/* FIXME: DSI_PWAIT_TO */
+ 
+ 		/* program turn around timeout */
+-		intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans),
++		intel_de_rmw(display, DSI_TA_TO(dsi_trans),
+ 			     TA_TIMEOUT_VALUE_MASK,
+ 			     TA_TIMEOUT_VALUE(ta_timeout));
+ 	}
+@@ -1078,7 +1084,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
+ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
+ 				      bool enable)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	u32 tmp;
+ 
+@@ -1090,7 +1096,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
+ 	if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)))
+ 		return;
+ 
+-	tmp = intel_de_read(dev_priv, UTIL_PIN_CTL);
++	tmp = intel_de_read(display, UTIL_PIN_CTL);
+ 
+ 	if (enable) {
+ 		tmp |= UTIL_PIN_DIRECTION_INPUT;
+@@ -1098,7 +1104,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
+ 	} else {
+ 		tmp &= ~UTIL_PIN_ENABLE;
+ 	}
+-	intel_de_write(dev_priv, UTIL_PIN_CTL, tmp);
++	intel_de_write(display, UTIL_PIN_CTL, tmp);
+ }
+ 
+ static void
+@@ -1136,7 +1142,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
+ 
+ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	struct mipi_dsi_device *dsi;
+ 	enum port port;
+@@ -1152,14 +1158,14 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+ 		 * FIXME: This uses the number of DW's currently in the payload
+ 		 * receive queue. This is probably not what we want here.
+ 		 */
+-		tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans));
++		tmp = intel_de_read(display, DSI_CMD_RXCTL(dsi_trans));
+ 		tmp &= NUMBER_RX_PLOAD_DW_MASK;
+ 		/* multiply "Number Rx Payload DW" by 4 to get max value */
+ 		tmp = tmp * 4;
+ 		dsi = intel_dsi->dsi_hosts[port]->device;
+ 		ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
+ 		if (ret < 0)
+-			drm_err(&dev_priv->drm,
++			drm_err(display->drm,
+ 				"error setting max return pkt size%d\n", tmp);
+ 	}
+ 
+@@ -1219,10 +1225,10 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
+ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
+ 				     enum pipe pipe, bool enable)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 
+-	if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B)
+-		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
++	if (DISPLAY_VER(display) == 11 && pipe == PIPE_B)
++		intel_de_rmw(display, CHICKEN_PAR1_1,
+ 			     IGNORE_KVMR_PIPE_A,
+ 			     enable ? IGNORE_KVMR_PIPE_A : 0);
+ }
+@@ -1235,13 +1241,13 @@ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
+  */
+ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 
+-	if (DISPLAY_VER(i915) == 13) {
++	if (DISPLAY_VER(display) == 13) {
+ 		for_each_dsi_port(port, intel_dsi->ports)
+-			intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
++			intel_de_rmw(display, TGL_DSI_CHKN_REG(port),
+ 				     TGL_DSI_CHKN_LSHS_GB_MASK,
+ 				     TGL_DSI_CHKN_LSHS_GB(4));
+ 	}
+@@ -1275,7 +1281,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
+ 
+ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 	enum transcoder dsi_trans;
+@@ -1284,13 +1290,13 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
+ 		dsi_trans = dsi_port_to_transcoder(port);
+ 
+ 		/* disable transcoder */
+-		intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans),
++		intel_de_rmw(display, TRANSCONF(display, dsi_trans),
+ 			     TRANSCONF_ENABLE, 0);
+ 
+ 		/* wait for transcoder to be disabled */
+-		if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, dsi_trans),
++		if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans),
+ 					    TRANSCONF_STATE_ENABLE, 50))
+-			drm_err(&dev_priv->drm,
++			drm_err(display->drm,
+ 				"DSI trancoder not disabled\n");
+ 	}
+ }
+@@ -1307,7 +1313,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
+ 
+ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 	enum transcoder dsi_trans;
+@@ -1316,29 +1322,29 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+ 	/* disable periodic update mode */
+ 	if (is_cmd_mode(intel_dsi)) {
+ 		for_each_dsi_port(port, intel_dsi->ports)
+-			intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port),
++			intel_de_rmw(display, DSI_CMD_FRMCTL(port),
+ 				     DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0);
+ 	}
+ 
+ 	/* put dsi link in ULPS */
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans));
++		tmp = intel_de_read(display, DSI_LP_MSG(dsi_trans));
+ 		tmp |= LINK_ENTER_ULPS;
+ 		tmp &= ~LINK_ULPS_TYPE_LP11;
+-		intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp);
++		intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp);
+ 
+-		if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
++		if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
+ 				 LINK_IN_ULPS),
+ 				10))
+-			drm_err(&dev_priv->drm, "DSI link not in ULPS\n");
++			drm_err(display->drm, "DSI link not in ULPS\n");
+ 	}
+ 
+ 	/* disable ddi function */
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		intel_de_rmw(dev_priv,
+-			     TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans),
++		intel_de_rmw(display,
++			     TRANS_DDI_FUNC_CTL(display, dsi_trans),
+ 			     TRANS_DDI_FUNC_ENABLE, 0);
+ 	}
+ 
+@@ -1346,8 +1352,8 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+ 	if (intel_dsi->dual_link) {
+ 		for_each_dsi_port(port, intel_dsi->ports) {
+ 			dsi_trans = dsi_port_to_transcoder(port);
+-			intel_de_rmw(dev_priv,
+-				     TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans),
++			intel_de_rmw(display,
++				     TRANS_DDI_FUNC_CTL2(display, dsi_trans),
+ 				     PORT_SYNC_MODE_ENABLE, 0);
+ 		}
+ 	}
+@@ -1355,18 +1361,18 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+ 
+ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+ 
+ 	gen11_dsi_ungate_clocks(encoder);
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+-		intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
++		intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
+ 
+-		if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
++		if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) &
+ 				 DDI_BUF_IS_IDLE),
+ 				 8))
+-			drm_err(&dev_priv->drm,
++			drm_err(display->drm,
+ 				"DDI port:%c buffer not idle\n",
+ 				port_name(port));
+ 	}
+@@ -1375,6 +1381,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
+ 
+ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
+ {
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum port port;
+@@ -1392,7 +1399,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
+ 
+ 	/* set mode to DDI */
+ 	for_each_dsi_port(port, intel_dsi->ports)
+-		intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
++		intel_de_rmw(display, ICL_DSI_IO_MODECTL(port),
+ 			     COMBO_PHY_MODE_DSI, 0);
+ }
+ 
+@@ -1504,8 +1511,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
+ 
+ static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
+ {
+-	struct drm_device *dev = intel_dsi->base.base.dev;
+-	struct drm_i915_private *dev_priv = to_i915(dev);
++	struct intel_display *display = to_intel_display(&intel_dsi->base);
+ 	enum transcoder dsi_trans;
+ 	u32 val;
+ 
+@@ -1514,7 +1520,7 @@ static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
+ 	else
+ 		dsi_trans = TRANSCODER_DSI_0;
+ 
+-	val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
++	val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
+ 	return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
+ }
+ 
+@@ -1557,7 +1563,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
+ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
+ 				 const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_crtc *intel_crtc;
+ 	enum pipe pipe;
+ 
+@@ -1568,9 +1574,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
+ 	pipe = intel_crtc->pipe;
+ 
+ 	/* wa verify 1409054076:icl,jsl,ehl */
+-	if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
+-	    !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A))
+-		drm_dbg_kms(&dev_priv->drm,
++	if (DISPLAY_VER(display) == 11 && pipe == PIPE_B &&
++	    !(intel_de_read(display, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A))
++		drm_dbg_kms(display->drm,
+ 			    "[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n",
+ 			    encoder->base.base.id,
+ 			    encoder->base.name);
+@@ -1579,9 +1585,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
+ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
+ 					struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+-	int dsc_max_bpc = DISPLAY_VER(dev_priv) >= 12 ? 12 : 10;
++	int dsc_max_bpc = DISPLAY_VER(display) >= 12 ? 12 : 10;
+ 	bool use_dsc;
+ 	int ret;
+ 
+@@ -1606,12 +1612,12 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
+ 		return ret;
+ 
+ 	/* DSI specific sanity checks on the common code */
+-	drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable);
+-	drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422);
+-	drm_WARN_ON(&dev_priv->drm,
++	drm_WARN_ON(display->drm, vdsc_cfg->vbr_enable);
++	drm_WARN_ON(display->drm, vdsc_cfg->simple_422);
++	drm_WARN_ON(display->drm,
+ 		    vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+-	drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8);
+-	drm_WARN_ON(&dev_priv->drm,
++	drm_WARN_ON(display->drm, vdsc_cfg->slice_height < 8);
++	drm_WARN_ON(display->drm,
+ 		    vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+ 
+ 	ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
+@@ -1627,7 +1633,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
+ 				    struct intel_crtc_state *pipe_config,
+ 				    struct drm_connector_state *conn_state)
+ {
+-	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ 	struct drm_display_mode *adjusted_mode =
+@@ -1661,7 +1667,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
+ 	pipe_config->clock_set = true;
+ 
+ 	if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
+-		drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n");
++		drm_dbg_kms(display->drm, "Attempting to use DSC failed\n");
+ 
+ 	pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
+ 
+@@ -1679,15 +1685,13 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
+ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ 					struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+-
+-	get_dsi_io_power_domains(i915,
+-				 enc_to_intel_dsi(encoder));
++	get_dsi_io_power_domains(enc_to_intel_dsi(encoder));
+ }
+ 
+ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ 				   enum pipe *pipe)
+ {
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	enum transcoder dsi_trans;
+@@ -1703,8 +1707,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		dsi_trans = dsi_port_to_transcoder(port);
+-		tmp = intel_de_read(dev_priv,
+-				    TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
++		tmp = intel_de_read(display,
++				    TRANS_DDI_FUNC_CTL(display, dsi_trans));
+ 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ 		case TRANS_DDI_EDP_INPUT_A_ON:
+ 			*pipe = PIPE_A;
+@@ -1719,11 +1723,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ 			*pipe = PIPE_D;
+ 			break;
+ 		default:
+-			drm_err(&dev_priv->drm, "Invalid PIPE input\n");
++			drm_err(display->drm, "Invalid PIPE input\n");
+ 			goto out;
+ 		}
+ 
+-		tmp = intel_de_read(dev_priv, TRANSCONF(dev_priv, dsi_trans));
++		tmp = intel_de_read(display, TRANSCONF(display, dsi_trans));
+ 		ret = tmp & TRANSCONF_ENABLE;
+ 	}
+ out:
+@@ -1833,8 +1837,7 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+ 
+ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+ {
+-	struct drm_device *dev = intel_dsi->base.base.dev;
+-	struct drm_i915_private *dev_priv = to_i915(dev);
++	struct intel_display *display = to_intel_display(&intel_dsi->base);
+ 	struct intel_connector *connector = intel_dsi->attached_connector;
+ 	struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ 	u32 tlpx_ns;
+@@ -1858,7 +1861,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+ 	 */
+ 	prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+ 	if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+-		drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n",
++		drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n",
+ 			    prepare_cnt);
+ 		prepare_cnt = ICL_PREPARE_CNT_MAX;
+ 	}
+@@ -1867,7 +1870,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+ 	clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ 				    ths_prepare_ns, tlpx_ns);
+ 	if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+-		drm_dbg_kms(&dev_priv->drm,
++		drm_dbg_kms(display->drm,
+ 			    "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ 		clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
+ 	}
+@@ -1875,7 +1878,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+ 	/* trail cnt in escape clocks*/
+ 	trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+ 	if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+-		drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n",
++		drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n",
+ 			    trail_cnt);
+ 		trail_cnt = ICL_TRAIL_CNT_MAX;
+ 	}
+@@ -1883,7 +1886,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+ 	/* tclk pre count in escape clocks */
+ 	tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ 	if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+-		drm_dbg_kms(&dev_priv->drm,
++		drm_dbg_kms(display->drm,
+ 			    "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ 		tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+ 	}
+@@ -1892,7 +1895,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+ 	hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ 				   ths_prepare_ns, tlpx_ns);
+ 	if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+-		drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n",
++		drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n",
+ 			    hs_zero_cnt);
+ 		hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+ 	}
+@@ -1900,7 +1903,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+ 	/* hs exit zero cnt in escape clocks */
+ 	exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ 	if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+-		drm_dbg_kms(&dev_priv->drm,
++		drm_dbg_kms(display->drm,
+ 			    "exit_zero_cnt out of range (%d)\n",
+ 			    exit_zero_cnt);
+ 		exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+@@ -1942,10 +1945,9 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
+ 						       fixed_mode->vdisplay);
+ }
+ 
+-void icl_dsi_init(struct drm_i915_private *dev_priv,
++void icl_dsi_init(struct intel_display *display,
+ 		  const struct intel_bios_encoder_data *devdata)
+ {
+-	struct intel_display *display = &dev_priv->display;
+ 	struct intel_dsi *intel_dsi;
+ 	struct intel_encoder *encoder;
+ 	struct intel_connector *intel_connector;
+@@ -1973,7 +1975,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
+ 	encoder->devdata = devdata;
+ 
+ 	/* register DSI encoder with DRM subsystem */
+-	drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs,
++	drm_encoder_init(display->drm, &encoder->base,
++			 &gen11_dsi_encoder_funcs,
+ 			 DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+ 
+ 	encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+@@ -1998,7 +2001,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
+ 	encoder->shutdown = intel_dsi_shutdown;
+ 
+ 	/* register DSI connector with DRM subsystem */
+-	drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs,
++	drm_connector_init(display->drm, connector,
++			   &gen11_dsi_connector_funcs,
+ 			   DRM_MODE_CONNECTOR_DSI);
+ 	drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+ 	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+@@ -2011,12 +2015,12 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
+ 
+ 	intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, NULL);
+ 
+-	mutex_lock(&dev_priv->drm.mode_config.mutex);
++	mutex_lock(&display->drm->mode_config.mutex);
+ 	intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+-	mutex_unlock(&dev_priv->drm.mode_config.mutex);
++	mutex_unlock(&display->drm->mode_config.mutex);
+ 
+ 	if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+-		drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
++		drm_err(display->drm, "DSI fixed mode info missing\n");
+ 		goto err;
+ 	}
+ 
+@@ -2029,10 +2033,10 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
+ 	else
+ 		intel_dsi->ports = BIT(port);
+ 
+-	if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
++	if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ 		intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+ 
+-	if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
++	if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ 		intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+ 
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+@@ -2046,7 +2050,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
+ 	}
+ 
+ 	if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+-		drm_dbg_kms(&dev_priv->drm, "no device found\n");
++		drm_dbg_kms(display->drm, "no device found\n");
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h
+index 43fa7d72eeb180..099fc50e35b415 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.h
++++ b/drivers/gpu/drm/i915/display/icl_dsi.h
+@@ -6,11 +6,11 @@
+ #ifndef __ICL_DSI_H__
+ #define __ICL_DSI_H__
+ 
+-struct drm_i915_private;
+ struct intel_bios_encoder_data;
+ struct intel_crtc_state;
++struct intel_display;
+ 
+-void icl_dsi_init(struct drm_i915_private *dev_priv,
++void icl_dsi_init(struct intel_display *display,
+ 		  const struct intel_bios_encoder_data *devdata);
+ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+index e979786aa5cf3d..5c2a7987cccb44 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
++++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+@@ -790,7 +790,8 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
+ 	return NULL;
+ }
+ 
+-void intel_plane_update_noarm(struct intel_plane *plane,
++void intel_plane_update_noarm(struct intel_dsb *dsb,
++			      struct intel_plane *plane,
+ 			      const struct intel_crtc_state *crtc_state,
+ 			      const struct intel_plane_state *plane_state)
+ {
+@@ -799,10 +800,11 @@ void intel_plane_update_noarm(struct intel_plane *plane,
+ 	trace_intel_plane_update_noarm(plane, crtc);
+ 
+ 	if (plane->update_noarm)
+-		plane->update_noarm(plane, crtc_state, plane_state);
++		plane->update_noarm(dsb, plane, crtc_state, plane_state);
+ }
+ 
+-void intel_plane_async_flip(struct intel_plane *plane,
++void intel_plane_async_flip(struct intel_dsb *dsb,
++			    struct intel_plane *plane,
+ 			    const struct intel_crtc_state *crtc_state,
+ 			    const struct intel_plane_state *plane_state,
+ 			    bool async_flip)
+@@ -810,34 +812,37 @@ void intel_plane_async_flip(struct intel_plane *plane,
+ 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ 
+ 	trace_intel_plane_async_flip(plane, crtc, async_flip);
+-	plane->async_flip(plane, crtc_state, plane_state, async_flip);
++	plane->async_flip(dsb, plane, crtc_state, plane_state, async_flip);
+ }
+ 
+-void intel_plane_update_arm(struct intel_plane *plane,
++void intel_plane_update_arm(struct intel_dsb *dsb,
++			    struct intel_plane *plane,
+ 			    const struct intel_crtc_state *crtc_state,
+ 			    const struct intel_plane_state *plane_state)
+ {
+ 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ 
+ 	if (crtc_state->do_async_flip && plane->async_flip) {
+-		intel_plane_async_flip(plane, crtc_state, plane_state, true);
++		intel_plane_async_flip(dsb, plane, crtc_state, plane_state, true);
+ 		return;
+ 	}
+ 
+ 	trace_intel_plane_update_arm(plane, crtc);
+-	plane->update_arm(plane, crtc_state, plane_state);
++	plane->update_arm(dsb, plane, crtc_state, plane_state);
+ }
+ 
+-void intel_plane_disable_arm(struct intel_plane *plane,
++void intel_plane_disable_arm(struct intel_dsb *dsb,
++			     struct intel_plane *plane,
+ 			     const struct intel_crtc_state *crtc_state)
+ {
+ 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ 
+ 	trace_intel_plane_disable_arm(plane, crtc);
+-	plane->disable_arm(plane, crtc_state);
++	plane->disable_arm(dsb, plane, crtc_state);
+ }
+ 
+-void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
++void intel_crtc_planes_update_noarm(struct intel_dsb *dsb,
++				    struct intel_atomic_state *state,
+ 				    struct intel_crtc *crtc)
+ {
+ 	struct intel_crtc_state *new_crtc_state =
+@@ -862,11 +867,13 @@ void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+ 		/* TODO: for mailbox updates this should be skipped */
+ 		if (new_plane_state->uapi.visible ||
+ 		    new_plane_state->planar_slave)
+-			intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
++			intel_plane_update_noarm(dsb, plane,
++						 new_crtc_state, new_plane_state);
+ 	}
+ }
+ 
+-static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
++static void skl_crtc_planes_update_arm(struct intel_dsb *dsb,
++				       struct intel_atomic_state *state,
+ 				       struct intel_crtc *crtc)
+ {
+ 	struct intel_crtc_state *old_crtc_state =
+@@ -893,13 +900,14 @@ static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
+ 		 */
+ 		if (new_plane_state->uapi.visible ||
+ 		    new_plane_state->planar_slave)
+-			intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
++			intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state);
+ 		else
+-			intel_plane_disable_arm(plane, new_crtc_state);
++			intel_plane_disable_arm(dsb, plane, new_crtc_state);
+ 	}
+ }
+ 
+-static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
++static void i9xx_crtc_planes_update_arm(struct intel_dsb *dsb,
++					struct intel_atomic_state *state,
+ 					struct intel_crtc *crtc)
+ {
+ 	struct intel_crtc_state *new_crtc_state =
+@@ -919,21 +927,22 @@ static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
+ 		 * would have to be called here as well.
+ 		 */
+ 		if (new_plane_state->uapi.visible)
+-			intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
++			intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state);
+ 		else
+-			intel_plane_disable_arm(plane, new_crtc_state);
++			intel_plane_disable_arm(dsb, plane, new_crtc_state);
+ 	}
+ }
+ 
+-void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
++void intel_crtc_planes_update_arm(struct intel_dsb *dsb,
++				  struct intel_atomic_state *state,
+ 				  struct intel_crtc *crtc)
+ {
+ 	struct drm_i915_private *i915 = to_i915(state->base.dev);
+ 
+ 	if (DISPLAY_VER(i915) >= 9)
+-		skl_crtc_planes_update_arm(state, crtc);
++		skl_crtc_planes_update_arm(dsb, state, crtc);
+ 	else
+-		i9xx_crtc_planes_update_arm(state, crtc);
++		i9xx_crtc_planes_update_arm(dsb, state, crtc);
+ }
+ 
+ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+index 6c4fe359646504..0f982f452ff391 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
++++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+@@ -14,6 +14,7 @@ struct drm_rect;
+ struct intel_atomic_state;
+ struct intel_crtc;
+ struct intel_crtc_state;
++struct intel_dsb;
+ struct intel_plane;
+ struct intel_plane_state;
+ enum plane_id;
+@@ -32,26 +33,32 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ 				       struct intel_crtc *crtc);
+ void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
+ 			       const struct intel_plane_state *from_plane_state);
+-void intel_plane_async_flip(struct intel_plane *plane,
++void intel_plane_async_flip(struct intel_dsb *dsb,
++			    struct intel_plane *plane,
+ 			    const struct intel_crtc_state *crtc_state,
+ 			    const struct intel_plane_state *plane_state,
+ 			    bool async_flip);
+-void intel_plane_update_noarm(struct intel_plane *plane,
++void intel_plane_update_noarm(struct intel_dsb *dsb,
++			      struct intel_plane *plane,
+ 			      const struct intel_crtc_state *crtc_state,
+ 			      const struct intel_plane_state *plane_state);
+-void intel_plane_update_arm(struct intel_plane *plane,
++void intel_plane_update_arm(struct intel_dsb *dsb,
++			    struct intel_plane *plane,
+ 			    const struct intel_crtc_state *crtc_state,
+ 			    const struct intel_plane_state *plane_state);
+-void intel_plane_disable_arm(struct intel_plane *plane,
++void intel_plane_disable_arm(struct intel_dsb *dsb,
++			     struct intel_plane *plane,
+ 			     const struct intel_crtc_state *crtc_state);
+ struct intel_plane *intel_plane_alloc(void);
+ void intel_plane_free(struct intel_plane *plane);
+ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+ void intel_plane_destroy_state(struct drm_plane *plane,
+ 			       struct drm_plane_state *state);
+-void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
++void intel_crtc_planes_update_noarm(struct intel_dsb *dsb,
++				    struct intel_atomic_state *state,
+ 				    struct intel_crtc *crtc);
+-void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
++void intel_crtc_planes_update_arm(struct intel_dsb *dsbx,
++				  struct intel_atomic_state *state,
+ 				  struct intel_crtc *crtc);
+ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ 					struct intel_crtc_state *crtc_state,
+diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
+index ec55cb651d4498..1fbe3cd452c13a 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -1912,6 +1912,23 @@ void intel_color_post_update(const struct intel_crtc_state *crtc_state)
+ 		i915->display.funcs.color->color_post_update(crtc_state);
+ }
+ 
++void intel_color_modeset(const struct intel_crtc_state *crtc_state)
++{
++	struct intel_display *display = to_intel_display(crtc_state);
++
++	intel_color_load_luts(crtc_state);
++	intel_color_commit_noarm(crtc_state);
++	intel_color_commit_arm(crtc_state);
++
++	if (DISPLAY_VER(display) < 9) {
++		struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
++		struct intel_plane *plane = to_intel_plane(crtc->base.primary);
++
++		/* update DSPCNTR to configure gamma/csc for pipe bottom color */
++		plane->disable_arm(NULL, plane, crtc_state);
++	}
++}
++
+ void intel_color_prepare_commit(struct intel_atomic_state *state,
+ 				struct intel_crtc *crtc)
+ {
+diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
+index 79f230a1709ad1..ab3aaec06a2ac8 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.h
++++ b/drivers/gpu/drm/i915/display/intel_color.h
+@@ -28,6 +28,7 @@ void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
+ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
+ void intel_color_post_update(const struct intel_crtc_state *crtc_state);
+ void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
++void intel_color_modeset(const struct intel_crtc_state *crtc_state);
+ void intel_color_get_config(struct intel_crtc_state *crtc_state);
+ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
+ 			   const struct drm_property_blob *blob1,
+diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
+index 9ad53e1cbbd063..aeadb834d33286 100644
+--- a/drivers/gpu/drm/i915/display/intel_cursor.c
++++ b/drivers/gpu/drm/i915/display/intel_cursor.c
+@@ -275,7 +275,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
+ }
+ 
+ /* TODO: split into noarm+arm pair */
+-static void i845_cursor_update_arm(struct intel_plane *plane,
++static void i845_cursor_update_arm(struct intel_dsb *dsb,
++				   struct intel_plane *plane,
+ 				   const struct intel_crtc_state *crtc_state,
+ 				   const struct intel_plane_state *plane_state)
+ {
+@@ -315,10 +316,11 @@ static void i845_cursor_update_arm(struct intel_plane *plane,
+ 	}
+ }
+ 
+-static void i845_cursor_disable_arm(struct intel_plane *plane,
++static void i845_cursor_disable_arm(struct intel_dsb *dsb,
++				    struct intel_plane *plane,
+ 				    const struct intel_crtc_state *crtc_state)
+ {
+-	i845_cursor_update_arm(plane, crtc_state, NULL);
++	i845_cursor_update_arm(dsb, plane, crtc_state, NULL);
+ }
+ 
+ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+@@ -527,22 +529,25 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
+ 	return 0;
+ }
+ 
+-static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
++static void i9xx_cursor_disable_sel_fetch_arm(struct intel_dsb *dsb,
++					      struct intel_plane *plane,
+ 					      const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum pipe pipe = plane->pipe;
+ 
+ 	if (!crtc_state->enable_psr2_sel_fetch)
+ 		return;
+ 
+-	intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), 0);
++	intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), 0);
+ }
+ 
+-static void wa_16021440873(struct intel_plane *plane,
++static void wa_16021440873(struct intel_dsb *dsb,
++			   struct intel_plane *plane,
+ 			   const struct intel_crtc_state *crtc_state,
+ 			   const struct intel_plane_state *plane_state)
+ {
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ 	u32 ctl = plane_state->ctl;
+ 	int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1;
+@@ -551,16 +556,18 @@ static void wa_16021440873(struct intel_plane *plane,
+ 	ctl &= ~MCURSOR_MODE_MASK;
+ 	ctl |= MCURSOR_MODE_64_2B;
+ 
+-	intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), ctl);
++	intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), ctl);
+ 
+-	intel_de_write(dev_priv, CURPOS_ERLY_TPT(dev_priv, pipe),
+-		       CURSOR_POS_Y(et_y_position));
++	intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe),
++			   CURSOR_POS_Y(et_y_position));
+ }
+ 
+-static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
++static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb,
++					     struct intel_plane *plane,
+ 					     const struct intel_crtc_state *crtc_state,
+ 					     const struct intel_plane_state *plane_state)
+ {
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ 	enum pipe pipe = plane->pipe;
+ 
+@@ -571,19 +578,17 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
+ 		if (crtc_state->enable_psr2_su_region_et) {
+ 			u32 val = intel_cursor_position(crtc_state, plane_state,
+ 				true);
+-			intel_de_write_fw(dev_priv,
+-					  CURPOS_ERLY_TPT(dev_priv, pipe),
+-					  val);
++
++			intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), val);
+ 		}
+ 
+-		intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe),
+-				  plane_state->ctl);
++		intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), plane_state->ctl);
+ 	} else {
+ 		/* Wa_16021440873 */
+ 		if (crtc_state->enable_psr2_su_region_et)
+-			wa_16021440873(plane, crtc_state, plane_state);
++			wa_16021440873(dsb, plane, crtc_state, plane_state);
+ 		else
+-			i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
++			i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state);
+ 	}
+ }
+ 
+@@ -610,9 +615,11 @@ static u32 skl_cursor_wm_reg_val(const struct skl_wm_level *level)
+ 	return val;
+ }
+ 
+-static void skl_write_cursor_wm(struct intel_plane *plane,
++static void skl_write_cursor_wm(struct intel_dsb *dsb,
++				struct intel_plane *plane,
+ 				const struct intel_crtc_state *crtc_state)
+ {
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+@@ -622,30 +629,32 @@ static void skl_write_cursor_wm(struct intel_plane *plane,
+ 	int level;
+ 
+ 	for (level = 0; level < i915->display.wm.num_levels; level++)
+-		intel_de_write_fw(i915, CUR_WM(pipe, level),
+-				  skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
++		intel_de_write_dsb(display, dsb, CUR_WM(pipe, level),
++				   skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
+ 
+-	intel_de_write_fw(i915, CUR_WM_TRANS(pipe),
+-			  skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
++	intel_de_write_dsb(display, dsb, CUR_WM_TRANS(pipe),
++			   skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
+ 
+ 	if (HAS_HW_SAGV_WM(i915)) {
+ 		const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+ 
+-		intel_de_write_fw(i915, CUR_WM_SAGV(pipe),
+-				  skl_cursor_wm_reg_val(&wm->sagv.wm0));
+-		intel_de_write_fw(i915, CUR_WM_SAGV_TRANS(pipe),
+-				  skl_cursor_wm_reg_val(&wm->sagv.trans_wm));
++		intel_de_write_dsb(display, dsb, CUR_WM_SAGV(pipe),
++				   skl_cursor_wm_reg_val(&wm->sagv.wm0));
++		intel_de_write_dsb(display, dsb, CUR_WM_SAGV_TRANS(pipe),
++				   skl_cursor_wm_reg_val(&wm->sagv.trans_wm));
+ 	}
+ 
+-	intel_de_write_fw(i915, CUR_BUF_CFG(pipe),
+-			  skl_cursor_ddb_reg_val(ddb));
++	intel_de_write_dsb(display, dsb, CUR_BUF_CFG(pipe),
++			   skl_cursor_ddb_reg_val(ddb));
+ }
+ 
+ /* TODO: split into noarm+arm pair */
+-static void i9xx_cursor_update_arm(struct intel_plane *plane,
++static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
++				   struct intel_plane *plane,
+ 				   const struct intel_crtc_state *crtc_state,
+ 				   const struct intel_plane_state *plane_state)
+ {
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ 	enum pipe pipe = plane->pipe;
+ 	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+@@ -685,38 +694,36 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
+ 	 */
+ 
+ 	if (DISPLAY_VER(dev_priv) >= 9)
+-		skl_write_cursor_wm(plane, crtc_state);
++		skl_write_cursor_wm(dsb, plane, crtc_state);
+ 
+ 	if (plane_state)
+-		i9xx_cursor_update_sel_fetch_arm(plane, crtc_state,
+-						 plane_state);
++		i9xx_cursor_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
+ 	else
+-		i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
++		i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state);
+ 
+ 	if (plane->cursor.base != base ||
+ 	    plane->cursor.size != fbc_ctl ||
+ 	    plane->cursor.cntl != cntl) {
+ 		if (HAS_CUR_FBC(dev_priv))
+-			intel_de_write_fw(dev_priv,
+-					  CUR_FBC_CTL(dev_priv, pipe),
+-					  fbc_ctl);
+-		intel_de_write_fw(dev_priv, CURCNTR(dev_priv, pipe), cntl);
+-		intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos);
+-		intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base);
++			intel_de_write_dsb(display, dsb, CUR_FBC_CTL(dev_priv, pipe), fbc_ctl);
++		intel_de_write_dsb(display, dsb, CURCNTR(dev_priv, pipe), cntl);
++		intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos);
++		intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base);
+ 
+ 		plane->cursor.base = base;
+ 		plane->cursor.size = fbc_ctl;
+ 		plane->cursor.cntl = cntl;
+ 	} else {
+-		intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos);
+-		intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base);
++		intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos);
++		intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base);
+ 	}
+ }
+ 
+-static void i9xx_cursor_disable_arm(struct intel_plane *plane,
++static void i9xx_cursor_disable_arm(struct intel_dsb *dsb,
++				    struct intel_plane *plane,
+ 				    const struct intel_crtc_state *crtc_state)
+ {
+-	i9xx_cursor_update_arm(plane, crtc_state, NULL);
++	i9xx_cursor_update_arm(dsb, plane, crtc_state, NULL);
+ }
+ 
+ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+@@ -905,10 +912,10 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
+ 	}
+ 
+ 	if (new_plane_state->uapi.visible) {
+-		intel_plane_update_noarm(plane, crtc_state, new_plane_state);
+-		intel_plane_update_arm(plane, crtc_state, new_plane_state);
++		intel_plane_update_noarm(NULL, plane, crtc_state, new_plane_state);
++		intel_plane_update_arm(NULL, plane, crtc_state, new_plane_state);
+ 	} else {
+-		intel_plane_disable_arm(plane, crtc_state);
++		intel_plane_disable_arm(NULL, plane, crtc_state);
+ 	}
+ 
+ 	local_irq_enable();
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 2f1d9ce87ceb01..34dee523f0b612 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -4888,7 +4888,7 @@ void intel_ddi_init(struct intel_display *display,
+ 		if (!assert_has_icl_dsi(dev_priv))
+ 			return;
+ 
+-		icl_dsi_init(dev_priv, devdata);
++		icl_dsi_init(display, devdata);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
+index e881bfeafb47d7..e017cd4a81685a 100644
+--- a/drivers/gpu/drm/i915/display/intel_de.h
++++ b/drivers/gpu/drm/i915/display/intel_de.h
+@@ -8,6 +8,7 @@
+ 
+ #include "i915_drv.h"
+ #include "i915_trace.h"
++#include "intel_dsb.h"
+ #include "intel_uncore.h"
+ 
+ static inline struct intel_uncore *__to_uncore(struct intel_display *display)
+@@ -233,4 +234,14 @@ __intel_de_write_notrace(struct intel_display *display, i915_reg_t reg,
+ }
+ #define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__)
+ 
++static __always_inline void
++intel_de_write_dsb(struct intel_display *display, struct intel_dsb *dsb,
++		   i915_reg_t reg, u32 val)
++{
++	if (dsb)
++		intel_dsb_reg_write(dsb, reg, val);
++	else
++		intel_de_write_fw(display, reg, val);
++}
++
+ #endif /* __INTEL_DE_H__ */
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 2c6d0da8a16f8c..3039ee03e1c7a8 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -135,7 +135,8 @@
+ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
+ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
+ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
+-static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
++static void bdw_set_pipe_misc(struct intel_dsb *dsb,
++			      const struct intel_crtc_state *crtc_state);
+ 
+ /* returns HPLL frequency in kHz */
+ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
+@@ -715,7 +716,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ 	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
+ 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ 
+-	intel_plane_disable_arm(plane, crtc_state);
++	intel_plane_disable_arm(NULL, plane, crtc_state);
+ 	intel_crtc_wait_for_next_vblank(crtc);
+ }
+ 
+@@ -1172,8 +1173,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
+ 			 * Apart from the async flip bit we want to
+ 			 * preserve the old state for the plane.
+ 			 */
+-			intel_plane_async_flip(plane, old_crtc_state,
+-					       old_plane_state, false);
++			intel_plane_async_flip(NULL, plane,
++					       old_crtc_state, old_plane_state, false);
+ 			need_vbl_wait = true;
+ 		}
+ 	}
+@@ -1315,7 +1316,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+ 		    !(update_mask & BIT(plane->id)))
+ 			continue;
+ 
+-		intel_plane_disable_arm(plane, new_crtc_state);
++		intel_plane_disable_arm(NULL, plane, new_crtc_state);
+ 
+ 		if (old_plane_state->uapi.visible)
+ 			fb_bits |= plane->frontbuffer_bit;
+@@ -1502,14 +1503,6 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state,
+ 	}
+ }
+ 
+-static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
+-{
+-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+-	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+-
+-	plane->disable_arm(plane, crtc_state);
+-}
+-
+ static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
+ {
+ 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+@@ -1575,11 +1568,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
+ 	 * On ILK+ LUT must be loaded before the pipe is running but with
+ 	 * clocks enabled
+ 	 */
+-	intel_color_load_luts(new_crtc_state);
+-	intel_color_commit_noarm(new_crtc_state);
+-	intel_color_commit_arm(new_crtc_state);
+-	/* update DSPCNTR to configure gamma for pipe bottom color */
+-	intel_disable_primary_plane(new_crtc_state);
++	intel_color_modeset(new_crtc_state);
+ 
+ 	intel_initial_watermarks(state, crtc);
+ 	intel_enable_transcoder(new_crtc_state);
+@@ -1716,7 +1705,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
+ 		intel_set_pipe_src_size(pipe_crtc_state);
+ 
+ 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+-			bdw_set_pipe_misc(pipe_crtc_state);
++			bdw_set_pipe_misc(NULL, pipe_crtc_state);
+ 	}
+ 
+ 	if (!transcoder_is_dsi(cpu_transcoder))
+@@ -1741,12 +1730,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
+ 		 * On ILK+ LUT must be loaded before the pipe is running but with
+ 		 * clocks enabled
+ 		 */
+-		intel_color_load_luts(pipe_crtc_state);
+-		intel_color_commit_noarm(pipe_crtc_state);
+-		intel_color_commit_arm(pipe_crtc_state);
+-		/* update DSPCNTR to configure gamma/csc for pipe bottom color */
+-		if (DISPLAY_VER(dev_priv) < 9)
+-			intel_disable_primary_plane(pipe_crtc_state);
++		intel_color_modeset(pipe_crtc_state);
+ 
+ 		hsw_set_linetime_wm(pipe_crtc_state);
+ 
+@@ -2147,11 +2131,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
+ 
+ 	i9xx_pfit_enable(new_crtc_state);
+ 
+-	intel_color_load_luts(new_crtc_state);
+-	intel_color_commit_noarm(new_crtc_state);
+-	intel_color_commit_arm(new_crtc_state);
+-	/* update DSPCNTR to configure gamma for pipe bottom color */
+-	intel_disable_primary_plane(new_crtc_state);
++	intel_color_modeset(new_crtc_state);
+ 
+ 	intel_initial_watermarks(state, crtc);
+ 	intel_enable_transcoder(new_crtc_state);
+@@ -2187,11 +2167,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
+ 
+ 	i9xx_pfit_enable(new_crtc_state);
+ 
+-	intel_color_load_luts(new_crtc_state);
+-	intel_color_commit_noarm(new_crtc_state);
+-	intel_color_commit_arm(new_crtc_state);
+-	/* update DSPCNTR to configure gamma for pipe bottom color */
+-	intel_disable_primary_plane(new_crtc_state);
++	intel_color_modeset(new_crtc_state);
+ 
+ 	if (!intel_initial_watermarks(state, crtc))
+ 		intel_update_watermarks(dev_priv);
+@@ -3246,9 +3222,11 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
+ 	intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ }
+ 
+-static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
++static void bdw_set_pipe_misc(struct intel_dsb *dsb,
++			      const struct intel_crtc_state *crtc_state)
+ {
+ 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
++	struct intel_display *display = to_intel_display(crtc->base.dev);
+ 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ 	u32 val = 0;
+ 
+@@ -3293,7 +3271,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
+ 	if (IS_BROADWELL(dev_priv))
+ 		val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
+ 
+-	intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
++	intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val);
+ }
+ 
+ int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
+@@ -6846,7 +6824,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
+ 			intel_color_commit_arm(new_crtc_state);
+ 
+ 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+-			bdw_set_pipe_misc(new_crtc_state);
++			bdw_set_pipe_misc(NULL, new_crtc_state);
+ 
+ 		if (intel_crtc_needs_fastset(new_crtc_state))
+ 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
+@@ -6946,7 +6924,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
+ 	    intel_crtc_needs_color_update(new_crtc_state))
+ 		intel_color_commit_noarm(new_crtc_state);
+ 
+-	intel_crtc_planes_update_noarm(state, crtc);
++	intel_crtc_planes_update_noarm(NULL, state, crtc);
+ }
+ 
+ static void intel_update_crtc(struct intel_atomic_state *state,
+@@ -6962,7 +6940,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ 
+ 	commit_pipe_pre_planes(state, crtc);
+ 
+-	intel_crtc_planes_update_arm(state, crtc);
++	intel_crtc_planes_update_arm(NULL, state, crtc);
+ 
+ 	commit_pipe_post_planes(state, crtc);
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index f29e5dc3db910c..3e24d2e90d3cfb 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -1036,6 +1036,10 @@ struct intel_csc_matrix {
+ 	u16 postoff[3];
+ };
+ 
++void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val);
++
++typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val);
++
+ struct intel_crtc_state {
+ 	/*
+ 	 * uapi (drm) state. This is the software state shown to userspace.
+@@ -1578,22 +1582,26 @@ struct intel_plane {
+ 				   u32 pixel_format, u64 modifier,
+ 				   unsigned int rotation);
+ 	/* Write all non-self arming plane registers */
+-	void (*update_noarm)(struct intel_plane *plane,
++	void (*update_noarm)(struct intel_dsb *dsb,
++			     struct intel_plane *plane,
+ 			     const struct intel_crtc_state *crtc_state,
+ 			     const struct intel_plane_state *plane_state);
+ 	/* Write all self-arming plane registers */
+-	void (*update_arm)(struct intel_plane *plane,
++	void (*update_arm)(struct intel_dsb *dsb,
++			   struct intel_plane *plane,
+ 			   const struct intel_crtc_state *crtc_state,
+ 			   const struct intel_plane_state *plane_state);
+ 	/* Disable the plane, must arm */
+-	void (*disable_arm)(struct intel_plane *plane,
++	void (*disable_arm)(struct intel_dsb *dsb,
++			    struct intel_plane *plane,
+ 			    const struct intel_crtc_state *crtc_state);
+ 	bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
+ 	int (*check_plane)(struct intel_crtc_state *crtc_state,
+ 			   struct intel_plane_state *plane_state);
+ 	int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
+ 			 const struct intel_plane_state *plane_state);
+-	void (*async_flip)(struct intel_plane *plane,
++	void (*async_flip)(struct intel_dsb *dsb,
++			   struct intel_plane *plane,
+ 			   const struct intel_crtc_state *crtc_state,
+ 			   const struct intel_plane_state *plane_state,
+ 			   bool async_flip);
+diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
+index e657b09ede999b..e6fadcef58e06c 100644
+--- a/drivers/gpu/drm/i915/display/intel_sprite.c
++++ b/drivers/gpu/drm/i915/display/intel_sprite.c
+@@ -378,7 +378,8 @@ static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
+ }
+ 
+ static void
+-vlv_sprite_update_noarm(struct intel_plane *plane,
++vlv_sprite_update_noarm(struct intel_dsb *dsb,
++			struct intel_plane *plane,
+ 			const struct intel_crtc_state *crtc_state,
+ 			const struct intel_plane_state *plane_state)
+ {
+@@ -399,7 +400,8 @@ vlv_sprite_update_noarm(struct intel_plane *plane,
+ }
+ 
+ static void
+-vlv_sprite_update_arm(struct intel_plane *plane,
++vlv_sprite_update_arm(struct intel_dsb *dsb,
++		      struct intel_plane *plane,
+ 		      const struct intel_crtc_state *crtc_state,
+ 		      const struct intel_plane_state *plane_state)
+ {
+@@ -449,7 +451,8 @@ vlv_sprite_update_arm(struct intel_plane *plane,
+ }
+ 
+ static void
+-vlv_sprite_disable_arm(struct intel_plane *plane,
++vlv_sprite_disable_arm(struct intel_dsb *dsb,
++		       struct intel_plane *plane,
+ 		       const struct intel_crtc_state *crtc_state)
+ {
+ 	struct intel_display *display = to_intel_display(plane->base.dev);
+@@ -795,7 +798,8 @@ static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
+ }
+ 
+ static void
+-ivb_sprite_update_noarm(struct intel_plane *plane,
++ivb_sprite_update_noarm(struct intel_dsb *dsb,
++			struct intel_plane *plane,
+ 			const struct intel_crtc_state *crtc_state,
+ 			const struct intel_plane_state *plane_state)
+ {
+@@ -826,7 +830,8 @@ ivb_sprite_update_noarm(struct intel_plane *plane,
+ }
+ 
+ static void
+-ivb_sprite_update_arm(struct intel_plane *plane,
++ivb_sprite_update_arm(struct intel_dsb *dsb,
++		      struct intel_plane *plane,
+ 		      const struct intel_crtc_state *crtc_state,
+ 		      const struct intel_plane_state *plane_state)
+ {
+@@ -874,7 +879,8 @@ ivb_sprite_update_arm(struct intel_plane *plane,
+ }
+ 
+ static void
+-ivb_sprite_disable_arm(struct intel_plane *plane,
++ivb_sprite_disable_arm(struct intel_dsb *dsb,
++		       struct intel_plane *plane,
+ 		       const struct intel_crtc_state *crtc_state)
+ {
+ 	struct intel_display *display = to_intel_display(plane->base.dev);
+@@ -1133,7 +1139,8 @@ static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
+ }
+ 
+ static void
+-g4x_sprite_update_noarm(struct intel_plane *plane,
++g4x_sprite_update_noarm(struct intel_dsb *dsb,
++			struct intel_plane *plane,
+ 			const struct intel_crtc_state *crtc_state,
+ 			const struct intel_plane_state *plane_state)
+ {
+@@ -1162,7 +1169,8 @@ g4x_sprite_update_noarm(struct intel_plane *plane,
+ }
+ 
+ static void
+-g4x_sprite_update_arm(struct intel_plane *plane,
++g4x_sprite_update_arm(struct intel_dsb *dsb,
++		      struct intel_plane *plane,
+ 		      const struct intel_crtc_state *crtc_state,
+ 		      const struct intel_plane_state *plane_state)
+ {
+@@ -1206,7 +1214,8 @@ g4x_sprite_update_arm(struct intel_plane *plane,
+ }
+ 
+ static void
+-g4x_sprite_disable_arm(struct intel_plane *plane,
++g4x_sprite_disable_arm(struct intel_dsb *dsb,
++		       struct intel_plane *plane,
+ 		       const struct intel_crtc_state *crtc_state)
+ {
+ 	struct intel_display *display = to_intel_display(plane->base.dev);
+diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+index 62a5287ea1d9c4..7f77a76309bd55 100644
+--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
++++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+@@ -589,11 +589,11 @@ static u32 skl_plane_min_alignment(struct intel_plane *plane,
+  * in full-range YCbCr.
+  */
+ static void
+-icl_program_input_csc(struct intel_plane *plane,
+-		      const struct intel_crtc_state *crtc_state,
++icl_program_input_csc(struct intel_dsb *dsb,
++		      struct intel_plane *plane,
+ 		      const struct intel_plane_state *plane_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum pipe pipe = plane->pipe;
+ 	enum plane_id plane_id = plane->id;
+ 
+@@ -637,31 +637,31 @@ icl_program_input_csc(struct intel_plane *plane,
+ 	};
+ 	const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
+ 
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+-			  ROFF(csc[0]) | GOFF(csc[1]));
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+-			  BOFF(csc[2]));
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+-			  ROFF(csc[3]) | GOFF(csc[4]));
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+-			  BOFF(csc[5]));
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+-			  ROFF(csc[6]) | GOFF(csc[7]));
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+-			  BOFF(csc[8]));
+-
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+-			  PREOFF_YUV_TO_RGB_HI);
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+-			  PREOFF_YUV_TO_RGB_ME);
+-	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+-			  PREOFF_YUV_TO_RGB_LO);
+-	intel_de_write_fw(dev_priv,
+-			  PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+-	intel_de_write_fw(dev_priv,
+-			  PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+-	intel_de_write_fw(dev_priv,
+-			  PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
++			   ROFF(csc[0]) | GOFF(csc[1]));
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
++			   BOFF(csc[2]));
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
++			   ROFF(csc[3]) | GOFF(csc[4]));
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
++			   BOFF(csc[5]));
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
++			   ROFF(csc[6]) | GOFF(csc[7]));
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
++			   BOFF(csc[8]));
++
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
++			   PREOFF_YUV_TO_RGB_HI);
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
++			   PREOFF_YUV_TO_RGB_ME);
++	intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
++			   PREOFF_YUV_TO_RGB_LO);
++	intel_de_write_dsb(display, dsb,
++			   PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
++	intel_de_write_dsb(display, dsb,
++			   PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
++	intel_de_write_dsb(display, dsb,
++			   PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+ }
+ 
+ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+@@ -715,9 +715,11 @@ static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level)
+ 	return val;
+ }
+ 
+-static void skl_write_plane_wm(struct intel_plane *plane,
++static void skl_write_plane_wm(struct intel_dsb *dsb,
++			       struct intel_plane *plane,
+ 			       const struct intel_crtc_state *crtc_state)
+ {
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+@@ -729,71 +731,75 @@ static void skl_write_plane_wm(struct intel_plane *plane,
+ 	int level;
+ 
+ 	for (level = 0; level < i915->display.wm.num_levels; level++)
+-		intel_de_write_fw(i915, PLANE_WM(pipe, plane_id, level),
+-				  skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
++		intel_de_write_dsb(display, dsb, PLANE_WM(pipe, plane_id, level),
++				   skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
+ 
+-	intel_de_write_fw(i915, PLANE_WM_TRANS(pipe, plane_id),
+-			  skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
++	intel_de_write_dsb(display, dsb, PLANE_WM_TRANS(pipe, plane_id),
++			   skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
+ 
+ 	if (HAS_HW_SAGV_WM(i915)) {
+ 		const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+ 
+-		intel_de_write_fw(i915, PLANE_WM_SAGV(pipe, plane_id),
+-				  skl_plane_wm_reg_val(&wm->sagv.wm0));
+-		intel_de_write_fw(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
+-				  skl_plane_wm_reg_val(&wm->sagv.trans_wm));
++		intel_de_write_dsb(display, dsb, PLANE_WM_SAGV(pipe, plane_id),
++				   skl_plane_wm_reg_val(&wm->sagv.wm0));
++		intel_de_write_dsb(display, dsb, PLANE_WM_SAGV_TRANS(pipe, plane_id),
++				   skl_plane_wm_reg_val(&wm->sagv.trans_wm));
+ 	}
+ 
+-	intel_de_write_fw(i915, PLANE_BUF_CFG(pipe, plane_id),
+-			  skl_plane_ddb_reg_val(ddb));
++	intel_de_write_dsb(display, dsb, PLANE_BUF_CFG(pipe, plane_id),
++			   skl_plane_ddb_reg_val(ddb));
+ 
+ 	if (DISPLAY_VER(i915) < 11)
+-		intel_de_write_fw(i915, PLANE_NV12_BUF_CFG(pipe, plane_id),
+-				  skl_plane_ddb_reg_val(ddb_y));
++		intel_de_write_dsb(display, dsb, PLANE_NV12_BUF_CFG(pipe, plane_id),
++				   skl_plane_ddb_reg_val(ddb_y));
+ }
+ 
+ static void
+-skl_plane_disable_arm(struct intel_plane *plane,
++skl_plane_disable_arm(struct intel_dsb *dsb,
++		      struct intel_plane *plane,
+ 		      const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+ 
+-	skl_write_plane_wm(plane, crtc_state);
++	skl_write_plane_wm(dsb, plane, crtc_state);
+ 
+-	intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+-	intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0);
++	intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0);
+ }
+ 
+-static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane,
++static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb,
++					    struct intel_plane *plane,
+ 					    const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *i915 = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum pipe pipe = plane->pipe;
+ 
+ 	if (!crtc_state->enable_psr2_sel_fetch)
+ 		return;
+ 
+-	intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0);
++	intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0);
+ }
+ 
+ static void
+-icl_plane_disable_arm(struct intel_plane *plane,
++icl_plane_disable_arm(struct intel_dsb *dsb,
++		      struct intel_plane *plane,
+ 		      const struct intel_crtc_state *crtc_state)
+ {
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+ 
+ 	if (icl_is_hdr_plane(dev_priv, plane_id))
+-		intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
++		intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), 0);
+ 
+-	skl_write_plane_wm(plane, crtc_state);
++	skl_write_plane_wm(dsb, plane, crtc_state);
+ 
+-	icl_plane_disable_sel_fetch_arm(plane, crtc_state);
+-	intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+-	intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
++	icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state);
++	intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0);
++	intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0);
+ }
+ 
+ static bool
+@@ -1230,28 +1236,30 @@ static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state)
+ 	return keymsk;
+ }
+ 
+-static void icl_plane_csc_load_black(struct intel_plane *plane)
++static void icl_plane_csc_load_black(struct intel_dsb *dsb,
++				     struct intel_plane *plane,
++				     const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *i915 = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+ 
+-	intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0);
+-	intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 0), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 1), 0);
+ 
+-	intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0);
+-	intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 2), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 3), 0);
+ 
+-	intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0);
+-	intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 4), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 5), 0);
+ 
+-	intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0);
+-	intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0);
+-	intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0);
+ 
+-	intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0);
+-	intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0);
+-	intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0);
++	intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
+ }
+ 
+ static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
+@@ -1264,11 +1272,12 @@ static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
+ }
+ 
+ static void
+-skl_plane_update_noarm(struct intel_plane *plane,
++skl_plane_update_noarm(struct intel_dsb *dsb,
++		       struct intel_plane *plane,
+ 		       const struct intel_crtc_state *crtc_state,
+ 		       const struct intel_plane_state *plane_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+ 	u32 stride = skl_plane_stride(plane_state, 0);
+@@ -1283,21 +1292,23 @@ skl_plane_update_noarm(struct intel_plane *plane,
+ 		crtc_y = 0;
+ 	}
+ 
+-	intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id),
+-			  PLANE_STRIDE_(stride));
+-	intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+-			  PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
+-	intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+-			  PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
++	intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id),
++			   PLANE_STRIDE_(stride));
++	intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id),
++			   PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
++	intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id),
++			   PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
+ 
+-	skl_write_plane_wm(plane, crtc_state);
++	skl_write_plane_wm(dsb, plane, crtc_state);
+ }
+ 
+ static void
+-skl_plane_update_arm(struct intel_plane *plane,
++skl_plane_update_arm(struct intel_dsb *dsb,
++		     struct intel_plane *plane,
+ 		     const struct intel_crtc_state *crtc_state,
+ 		     const struct intel_plane_state *plane_state)
+ {
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+@@ -1317,22 +1328,26 @@ skl_plane_update_arm(struct intel_plane *plane,
+ 		plane_color_ctl = plane_state->color_ctl |
+ 			glk_plane_color_ctl_crtc(crtc_state);
+ 
+-	intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
+-	intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
+-	intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
++	intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id),
++			   skl_plane_keyval(plane_state));
++	intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id),
++			   skl_plane_keymsk(plane_state));
++	intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id),
++			   skl_plane_keymax(plane_state));
+ 
+-	intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+-			  PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
++	intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id),
++			   PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
+ 
+-	intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
+-			  skl_plane_aux_dist(plane_state, 0));
++	intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
++			   skl_plane_aux_dist(plane_state, 0));
+ 
+-	intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+-			  PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) |
+-			  PLANE_OFFSET_X(plane_state->view.color_plane[1].x));
++	intel_de_write_dsb(display, dsb, PLANE_AUX_OFFSET(pipe, plane_id),
++			   PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) |
++			   PLANE_OFFSET_X(plane_state->view.color_plane[1].x));
+ 
+ 	if (DISPLAY_VER(dev_priv) >= 10)
+-		intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
++		intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id),
++				   plane_color_ctl);
+ 
+ 	/*
+ 	 * Enable the scaler before the plane so that we don't
+@@ -1349,17 +1364,19 @@ skl_plane_update_arm(struct intel_plane *plane,
+ 	 * disabled. Try to make the plane enable atomic by writing
+ 	 * the control register just before the surface register.
+ 	 */
+-	intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+-	intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+-			  skl_plane_surf(plane_state, 0));
++	intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
++			   plane_ctl);
++	intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
++			   skl_plane_surf(plane_state, 0));
+ }
+ 
+-static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
++static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb,
++					     struct intel_plane *plane,
+ 					     const struct intel_crtc_state *crtc_state,
+ 					     const struct intel_plane_state *plane_state,
+ 					     int color_plane)
+ {
+-	struct drm_i915_private *i915 = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum pipe pipe = plane->pipe;
+ 	const struct drm_rect *clip;
+ 	u32 val;
+@@ -1376,7 +1393,7 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
+ 		y = (clip->y1 + plane_state->uapi.dst.y1);
+ 	val = y << 16;
+ 	val |= plane_state->uapi.dst.x1;
+-	intel_de_write_fw(i915, SEL_FETCH_PLANE_POS(pipe, plane->id), val);
++	intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_POS(pipe, plane->id), val);
+ 
+ 	x = plane_state->view.color_plane[color_plane].x;
+ 
+@@ -1391,20 +1408,21 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
+ 
+ 	val = y << 16 | x;
+ 
+-	intel_de_write_fw(i915, SEL_FETCH_PLANE_OFFSET(pipe, plane->id),
+-			  val);
++	intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_OFFSET(pipe, plane->id), val);
+ 
+ 	/* Sizes are 0 based */
+ 	val = (drm_rect_height(clip) - 1) << 16;
+ 	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
+-	intel_de_write_fw(i915, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val);
++	intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val);
+ }
+ 
+ static void
+-icl_plane_update_noarm(struct intel_plane *plane,
++icl_plane_update_noarm(struct intel_dsb *dsb,
++		       struct intel_plane *plane,
+ 		       const struct intel_crtc_state *crtc_state,
+ 		       const struct intel_plane_state *plane_state)
+ {
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+@@ -1428,76 +1446,82 @@ icl_plane_update_noarm(struct intel_plane *plane,
+ 		crtc_y = 0;
+ 	}
+ 
+-	intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id),
+-			  PLANE_STRIDE_(stride));
+-	intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+-			  PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
+-	intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+-			  PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
++	intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id),
++			   PLANE_STRIDE_(stride));
++	intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id),
++			   PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
++	intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id),
++			   PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
+ 
+-	intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
+-	intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
+-	intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
++	intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id),
++			   skl_plane_keyval(plane_state));
++	intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id),
++			   skl_plane_keymsk(plane_state));
++	intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id),
++			   skl_plane_keymax(plane_state));
+ 
+-	intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+-			  PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
++	intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id),
++			   PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
+ 
+ 	if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) {
+-		intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0),
+-				  lower_32_bits(plane_state->ccval));
+-		intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1),
+-				  upper_32_bits(plane_state->ccval));
++		intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 0),
++				   lower_32_bits(plane_state->ccval));
++		intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 1),
++				   upper_32_bits(plane_state->ccval));
+ 	}
+ 
+ 	/* FLAT CCS doesn't need to program AUX_DIST */
+ 	if (!HAS_FLAT_CCS(dev_priv) && DISPLAY_VER(dev_priv) < 20)
+-		intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
+-				  skl_plane_aux_dist(plane_state, color_plane));
++		intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
++				   skl_plane_aux_dist(plane_state, color_plane));
+ 
+ 	if (icl_is_hdr_plane(dev_priv, plane_id))
+-		intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
+-				  plane_state->cus_ctl);
++		intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id),
++				   plane_state->cus_ctl);
+ 
+-	intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
++	intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id),
++			   plane_color_ctl);
+ 
+ 	if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
+-		icl_program_input_csc(plane, crtc_state, plane_state);
++		icl_program_input_csc(dsb, plane, plane_state);
+ 
+-	skl_write_plane_wm(plane, crtc_state);
++	skl_write_plane_wm(dsb, plane, crtc_state);
+ 
+ 	/*
+ 	 * FIXME: pxp session invalidation can hit any time even at time of commit
+ 	 * or after the commit, display content will be garbage.
+ 	 */
+ 	if (plane_state->force_black)
+-		icl_plane_csc_load_black(plane);
++		icl_plane_csc_load_black(dsb, plane, crtc_state);
+ 
+-	icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
++	icl_plane_update_sel_fetch_noarm(dsb, plane, crtc_state, plane_state, color_plane);
+ }
+ 
+-static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane,
++static void icl_plane_update_sel_fetch_arm(struct intel_dsb *dsb,
++					   struct intel_plane *plane,
+ 					   const struct intel_crtc_state *crtc_state,
+ 					   const struct intel_plane_state *plane_state)
+ {
+-	struct drm_i915_private *i915 = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum pipe pipe = plane->pipe;
+ 
+ 	if (!crtc_state->enable_psr2_sel_fetch)
+ 		return;
+ 
+ 	if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
+-		intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id),
+-				  SEL_FETCH_PLANE_CTL_ENABLE);
++		intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id),
++				   SEL_FETCH_PLANE_CTL_ENABLE);
+ 	else
+-		icl_plane_disable_sel_fetch_arm(plane, crtc_state);
++		icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state);
+ }
+ 
+ static void
+-icl_plane_update_arm(struct intel_plane *plane,
++icl_plane_update_arm(struct intel_dsb *dsb,
++		     struct intel_plane *plane,
+ 		     const struct intel_crtc_state *crtc_state,
+ 		     const struct intel_plane_state *plane_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+ 	int color_plane = icl_plane_color_plane(plane_state);
+@@ -1516,25 +1540,27 @@ icl_plane_update_arm(struct intel_plane *plane,
+ 	if (plane_state->scaler_id >= 0)
+ 		skl_program_plane_scaler(plane, crtc_state, plane_state);
+ 
+-	icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state);
++	icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
+ 
+ 	/*
+ 	 * The control register self-arms if the plane was previously
+ 	 * disabled. Try to make the plane enable atomic by writing
+ 	 * the control register just before the surface register.
+ 	 */
+-	intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+-	intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+-			  skl_plane_surf(plane_state, color_plane));
++	intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
++			   plane_ctl);
++	intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
++			   skl_plane_surf(plane_state, color_plane));
+ }
+ 
+ static void
+-skl_plane_async_flip(struct intel_plane *plane,
++skl_plane_async_flip(struct intel_dsb *dsb,
++		     struct intel_plane *plane,
+ 		     const struct intel_crtc_state *crtc_state,
+ 		     const struct intel_plane_state *plane_state,
+ 		     bool async_flip)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++	struct intel_display *display = to_intel_display(plane->base.dev);
+ 	enum plane_id plane_id = plane->id;
+ 	enum pipe pipe = plane->pipe;
+ 	u32 plane_ctl = plane_state->ctl;
+@@ -1544,9 +1570,10 @@ skl_plane_async_flip(struct intel_plane *plane,
+ 	if (async_flip)
+ 		plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+ 
+-	intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+-	intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+-			  skl_plane_surf(plane_state, 0));
++	intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
++			   plane_ctl);
++	intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
++			   skl_plane_surf(plane_state, 0));
+ }
+ 
+ static bool intel_format_is_p01x(u32 format)
+diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
+index c39beb70c3173e..6d13864851fc2e 100644
+--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
++++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
+@@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+ static void
+ pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+ {
+-	pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
+-		     fw_obj->fw_mm_node.size);
++	struct pvr_gem_object *pvr_obj = fw_obj->gem;
++
++	pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
++			 fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
+ }
+ 
+ static bool
+diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
+index 73707daa4e52d1..5dbb636d7d4ffe 100644
+--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
++++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
+@@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v)
+ 	if (sf_id == ROGUE_FW_SF_LAST)
+ 		return -EINVAL;
+ 
+-	timestamp = read_fw_trace(trace_seq_data, 1) |
+-		((u64)read_fw_trace(trace_seq_data, 2) << 32);
++	timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
++		read_fw_trace(trace_seq_data, 2);
+ 	timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
+ 		ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
+ 
+diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
+index 20cb4601208214..87780cc7c0c322 100644
+--- a/drivers/gpu/drm/imagination/pvr_queue.c
++++ b/drivers/gpu/drm/imagination/pvr_queue.c
+@@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f)
+ 	return PVR_DRIVER_NAME;
+ }
+ 
++static void pvr_queue_fence_release_work(struct work_struct *w)
++{
++	struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
++
++	pvr_context_put(fence->queue->ctx);
++	dma_fence_free(&fence->base);
++}
++
+ static void pvr_queue_fence_release(struct dma_fence *f)
+ {
+ 	struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
++	struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
+ 
+-	pvr_context_put(fence->queue->ctx);
+-	dma_fence_free(f);
++	queue_work(pvr_dev->sched_wq, &fence->release_work);
+ }
+ 
+ static const char *
+@@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f,
+ 
+ 	pvr_context_get(queue->ctx);
+ 	fence->queue = queue;
++	INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
+ 	dma_fence_init(&fence->base, fence_ops,
+ 		       &fence_ctx->lock, fence_ctx->id,
+ 		       atomic_inc_return(&fence_ctx->seqno));
+@@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
+ static void
+ pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
+ {
+-	pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+-			     &queue->job_fence_ctx);
++	if (!fence->ops)
++		pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
++				     &queue->job_fence_ctx);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
+index e06ced69302fca..93fe9ac9f58ccc 100644
+--- a/drivers/gpu/drm/imagination/pvr_queue.h
++++ b/drivers/gpu/drm/imagination/pvr_queue.h
+@@ -5,6 +5,7 @@
+ #define PVR_QUEUE_H
+ 
+ #include <drm/gpu_scheduler.h>
++#include <linux/workqueue.h>
+ 
+ #include "pvr_cccb.h"
+ #include "pvr_device.h"
+@@ -63,6 +64,9 @@ struct pvr_queue_fence {
+ 
+ 	/** @queue: Queue that created this fence. */
+ 	struct pvr_queue *queue;
++
++	/** @release_work: Fence release work structure. */
++	struct work_struct release_work;
+ };
+ 
+ /**
+diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
+index 363f885a709826..2896fa7501b1cc 100644
+--- a/drivers/gpu/drm/imagination/pvr_vm.c
++++ b/drivers/gpu/drm/imagination/pvr_vm.c
+@@ -293,8 +293,9 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
+ 
+ static int
+ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
+-			  struct pvr_vm_context *vm_ctx, u64 device_addr,
+-			  u64 size)
++			  struct pvr_vm_context *vm_ctx,
++			  struct pvr_gem_object *pvr_obj,
++			  u64 device_addr, u64 size)
+ {
+ 	int err;
+ 
+@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
+ 		goto err_bind_op_fini;
+ 	}
+ 
++	bind_op->pvr_obj = pvr_obj;
+ 	bind_op->vm_ctx = vm_ctx;
+ 	bind_op->device_addr = device_addr;
+ 	bind_op->size = size;
+@@ -597,20 +599,6 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
+ 	return ERR_PTR(err);
+ }
+ 
+-/**
+- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+- * @vm_ctx: Target VM context.
+- *
+- * This function ensures that no mappings are left dangling by unmapping them
+- * all in order of ascending device-virtual address.
+- */
+-void
+-pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+-{
+-	WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
+-			     vm_ctx->gpuvm_mgr.mm_range));
+-}
+-
+ /**
+  * pvr_vm_context_release() - Teardown a VM context.
+  * @ref_count: Pointer to reference counter of the VM context.
+@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
+ 	struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
+ 	struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
+ 
+-	/* Unmap operations don't have an object to lock. */
+-	if (!pvr_obj)
+-		return 0;
+-
+-	/* Acquire lock on the GEM being mapped. */
++	/* Acquire lock on the GEM object being mapped/unmapped. */
+ 	return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
+ }
+ 
+@@ -772,8 +756,10 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+ }
+ 
+ /**
+- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
++ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
++ * memory.
+  * @vm_ctx: Target VM context.
++ * @pvr_obj: Target PowerVR memory object.
+  * @device_addr: Virtual device address at the start of the target mapping.
+  * @size: Size of the target mapping.
+  *
+@@ -784,9 +770,13 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+  *  * Any error encountered while performing internal operations required to
+  *    destroy the mapping (returned from pvr_vm_gpuva_unmap or
+  *    pvr_vm_gpuva_remap).
++ *
++ * The vm_ctx->lock must be held when calling this function.
+  */
+-int
+-pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
++static int
++pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
++			struct pvr_gem_object *pvr_obj,
++			u64 device_addr, u64 size)
+ {
+ 	struct pvr_vm_bind_op bind_op = {0};
+ 	struct drm_gpuvm_exec vm_exec = {
+@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+ 		},
+ 	};
+ 
+-	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
+-					    size);
++	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
++					    device_addr, size);
+ 	if (err)
+ 		return err;
+ 
++	pvr_gem_object_get(pvr_obj);
++
+ 	err = drm_gpuvm_exec_lock(&vm_exec);
+ 	if (err)
+ 		goto err_cleanup;
+@@ -818,6 +810,96 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+ 	return err;
+ }
+ 
++/**
++ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
++ * memory.
++ * @vm_ctx: Target VM context.
++ * @pvr_obj: Target PowerVR memory object.
++ * @device_addr: Virtual device address at the start of the target mapping.
++ * @size: Size of the target mapping.
++ *
++ * Return:
++ *  * 0 on success,
++ *  * Any error encountered by pvr_vm_unmap_obj_locked.
++ */
++int
++pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
++		 u64 device_addr, u64 size)
++{
++	int err;
++
++	mutex_lock(&vm_ctx->lock);
++	err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
++	mutex_unlock(&vm_ctx->lock);
++
++	return err;
++}
++
++/**
++ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
++ * @vm_ctx: Target VM context.
++ * @device_addr: Virtual device address at the start of the target mapping.
++ * @size: Size of the target mapping.
++ *
++ * Return:
++ *  * 0 on success,
++ *  * Any error encountered by drm_gpuva_find,
++ *  * Any error encountered by pvr_vm_unmap_obj_locked.
++ */
++int
++pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
++{
++	struct pvr_gem_object *pvr_obj;
++	struct drm_gpuva *va;
++	int err;
++
++	mutex_lock(&vm_ctx->lock);
++
++	va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
++	if (va) {
++		pvr_obj = gem_to_pvr_gem(va->gem.obj);
++		err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
++					      va->va.addr, va->va.range);
++	} else {
++		err = -ENOENT;
++	}
++
++	mutex_unlock(&vm_ctx->lock);
++
++	return err;
++}
++
++/**
++ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
++ * @vm_ctx: Target VM context.
++ *
++ * This function ensures that no mappings are left dangling by unmapping them
++ * all in order of ascending device-virtual address.
++ */
++void
++pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
++{
++	mutex_lock(&vm_ctx->lock);
++
++	for (;;) {
++		struct pvr_gem_object *pvr_obj;
++		struct drm_gpuva *va;
++
++		va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
++					  vm_ctx->gpuvm_mgr.mm_start,
++					  vm_ctx->gpuvm_mgr.mm_range);
++		if (!va)
++			break;
++
++		pvr_obj = gem_to_pvr_gem(va->gem.obj);
++
++		WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
++						va->va.addr, va->va.range));
++	}
++
++	mutex_unlock(&vm_ctx->lock);
++}
++
+ /* Static data areas are determined by firmware. */
+ static const struct drm_pvr_static_data_area static_data_areas[] = {
+ 	{
+diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
+index 79406243617c1f..b0528dffa7f1ba 100644
+--- a/drivers/gpu/drm/imagination/pvr_vm.h
++++ b/drivers/gpu/drm/imagination/pvr_vm.h
+@@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
+ int pvr_vm_map(struct pvr_vm_context *vm_ctx,
+ 	       struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
+ 	       u64 device_addr, u64 size);
++int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
++		     struct pvr_gem_object *pvr_obj,
++		     u64 device_addr, u64 size);
+ int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
+ void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
+ 
+diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
+index ceef470c9fbfcf..1050a4617fc15c 100644
+--- a/drivers/gpu/drm/nouveau/Kconfig
++++ b/drivers/gpu/drm/nouveau/Kconfig
+@@ -4,6 +4,8 @@ config DRM_NOUVEAU
+ 	depends on DRM && PCI && MMU
+ 	select IOMMU_API
+ 	select FW_LOADER
++	select FW_CACHE if PM_SLEEP
++	select DRM_CLIENT_SELECTION
+ 	select DRM_DISPLAY_DP_HELPER
+ 	select DRM_DISPLAY_HDMI_HELPER
+ 	select DRM_DISPLAY_HELPER
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index 34985771b2a285..6e5adab034713f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -31,6 +31,7 @@
+ #include <linux/dynamic_debug.h>
+ 
+ #include <drm/drm_aperture.h>
++#include <drm/drm_client_setup.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_ttm.h>
+ #include <drm/drm_gem_ttm_helper.h>
+@@ -836,6 +837,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
+ {
+ 	struct nvkm_device *device;
+ 	struct nouveau_drm *drm;
++	const struct drm_format_info *format;
+ 	int ret;
+ 
+ 	if (vga_switcheroo_client_probe_defer(pdev))
+@@ -873,9 +875,11 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
+ 		goto fail_pci;
+ 
+ 	if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
+-		drm_fbdev_ttm_setup(drm->dev, 8);
++		format = drm_format_info(DRM_FORMAT_C8);
+ 	else
+-		drm_fbdev_ttm_setup(drm->dev, 32);
++		format = NULL;
++
++	drm_client_setup(drm->dev, format);
+ 
+ 	quirk_broken_nv_runpm(pdev);
+ 	return 0;
+@@ -1318,6 +1322,8 @@ driver_stub = {
+ 	.dumb_create = nouveau_display_dumb_create,
+ 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
+ 
++	DRM_FBDEV_TTM_DRIVER_OPS,
++
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ #ifdef GIT_REVISION
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index 05c13102a8cb8f..d22889fbfa9c83 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
+ 	return -1;
+ }
+ 
+-static void r300_gpu_init(struct radeon_device *rdev)
++/* rs400_gpu_init also calls this! */
++void r300_gpu_init(struct radeon_device *rdev)
+ {
+ 	uint32_t gb_tile_config, tmp;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 1e00f6b99f94b6..8f5e07834fcc60 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev);
+  */
+ extern int r300_init(struct radeon_device *rdev);
+ extern void r300_fini(struct radeon_device *rdev);
++extern void r300_gpu_init(struct radeon_device *rdev);
+ extern int r300_suspend(struct radeon_device *rdev);
+ extern int r300_resume(struct radeon_device *rdev);
+ extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
+diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
+index d6c18fd740ec6a..13cd0a688a65cb 100644
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+ 
+ static void rs400_gpu_init(struct radeon_device *rdev)
+ {
+-	/* FIXME: is this correct ? */
+-	r420_pipes_init(rdev);
++	/* Earlier code was calling r420_pipes_init and then
++	 * rs400_mc_wait_for_idle(rdev). The problem is that
++	 * at least on my Mobility Radeon Xpress 200M RC410 card
++	 * that ends up in this code path ends up num_gb_pipes == 3
++	 * while the card seems to have only one pipe. With the
++	 * r420 pipe initialization method.
++	 *
++	 * Problems shown up as HyperZ glitches, see:
++	 * https://bugs.freedesktop.org/show_bug.cgi?id=110897
++	 *
++	 * Delegating initialization to r300 code seems to work
++	 * and results in proper pipe numbers. The rs400 cards
++	 * are said to be not r400, but r300 kind of cards.
++	 */
++	r300_gpu_init(rdev);
++
+ 	if (rs400_mc_wait_for_idle(rdev)) {
+ 		pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n",
+ 			RREG32(RADEON_MC_STATUS));
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+index c75302ca3427ce..f56e77e7f6d022 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+@@ -21,7 +21,7 @@
+  *
+  */
+ 
+-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+ #define _GPU_SCHED_TRACE_H_
+ 
+ #include <linux/stringify.h>
+@@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
+ 		      __entry->seqno)
+ );
+ 
+-#endif
++#endif /* _GPU_SCHED_TRACE_H_ */
+ 
+ /* This part must be outside protection */
+ #undef TRACE_INCLUDE_PATH
+diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
+index a50ab9eae40ae4..f99d38cc5d8e91 100644
+--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
++++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
+@@ -194,8 +194,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
+ 		to_intel_plane(crtc->base.primary);
+ 	struct intel_plane_state *plane_state =
+ 		to_intel_plane_state(plane->base.state);
+-	struct intel_crtc_state *crtc_state =
+-		to_intel_crtc_state(crtc->base.state);
+ 	struct drm_framebuffer *fb;
+ 	struct i915_vma *vma;
+ 
+@@ -241,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
+ 	atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
+ 
+ 	plane_config->vma = vma;
+-
+-	/*
+-	 * Flip to the newly created mapping ASAP, so we can re-use the
+-	 * first part of GGTT for WOPCM, prevent flickering, and prevent
+-	 * the lookup of sysmem scratch pages.
+-	 */
+-	plane->check_plane(crtc_state, plane_state);
+-	plane->async_flip(plane, crtc_state, plane_state, true);
+ 	return;
+ 
+ nofb:
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index b940688c361356..98fe8573e054e9 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -379,9 +379,7 @@ int xe_gt_init_early(struct xe_gt *gt)
+ 	if (err)
+ 		return err;
+ 
+-	xe_wa_process_gt(gt);
+ 	xe_wa_process_oob(gt);
+-	xe_tuning_process_gt(gt);
+ 
+ 	xe_force_wake_init_gt(gt, gt_to_fw(gt));
+ 	spin_lock_init(&gt->global_invl_lock);
+@@ -469,6 +467,8 @@ static int all_fw_domain_init(struct xe_gt *gt)
+ 		goto err_hw_fence_irq;
+ 
+ 	xe_gt_mcr_set_implicit_defaults(gt);
++	xe_wa_process_gt(gt);
++	xe_tuning_process_gt(gt);
+ 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
+ 
+ 	err = xe_gt_clock_init(gt);
+diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
+index 2c32dc46f7d482..d7a9408b3a97c8 100644
+--- a/drivers/gpu/drm/xe/xe_hmm.c
++++ b/drivers/gpu/drm/xe/xe_hmm.c
+@@ -19,11 +19,10 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
+ 	return (end - start) >> PAGE_SHIFT;
+ }
+ 
+-/*
++/**
+  * xe_mark_range_accessed() - mark a range is accessed, so core mm
+  * have such information for memory eviction or write back to
+  * hard disk
+- *
+  * @range: the range to mark
+  * @write: if write to this range, we mark pages in this range
+  * as dirty
+@@ -43,15 +42,51 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
+ 	}
+ }
+ 
+-/*
++static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
++		       struct hmm_range *range, struct rw_semaphore *notifier_sem)
++{
++	unsigned long i, npages, hmm_pfn;
++	unsigned long num_chunks = 0;
++	int ret;
++
++	/* HMM docs says this is needed. */
++	ret = down_read_interruptible(notifier_sem);
++	if (ret)
++		return ret;
++
++	if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
++		up_read(notifier_sem);
++		return -EAGAIN;
++	}
++
++	npages = xe_npages_in_range(range->start, range->end);
++	for (i = 0; i < npages;) {
++		unsigned long len;
++
++		hmm_pfn = range->hmm_pfns[i];
++		xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
++
++		len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
++
++		/* If order > 0 the page may extend beyond range->start */
++		len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
++		i += len;
++		num_chunks++;
++	}
++	up_read(notifier_sem);
++
++	return sg_alloc_table(st, num_chunks, GFP_KERNEL);
++}
++
++/**
+  * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
+  * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
+  * and will be used to program GPU page table later.
+- *
+  * @xe: the xe device who will access the dma-address in sg table
+  * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
+  * has the pfn numbers of pages that back up this hmm address range.
+  * @st: pointer to the sg table.
++ * @notifier_sem: The xe notifier lock.
+  * @write: whether we write to this range. This decides dma map direction
+  * for system pages. If write we map it bi-diretional; otherwise
+  * DMA_TO_DEVICE
+@@ -78,43 +113,84 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
+  * Returns 0 if successful; -ENOMEM if fails to allocate memory
+  */
+ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
+-		       struct sg_table *st, bool write)
++		       struct sg_table *st,
++		       struct rw_semaphore *notifier_sem,
++		       bool write)
+ {
++	unsigned long npages = xe_npages_in_range(range->start, range->end);
+ 	struct device *dev = xe->drm.dev;
+-	struct page **pages;
+-	u64 i, npages;
+-	int ret;
++	struct scatterlist *sgl;
++	struct page *page;
++	unsigned long i, j;
+ 
+-	npages = xe_npages_in_range(range->start, range->end);
+-	pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
+-	if (!pages)
+-		return -ENOMEM;
++	lockdep_assert_held(notifier_sem);
+ 
+-	for (i = 0; i < npages; i++) {
+-		pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
+-		xe_assert(xe, !is_device_private_page(pages[i]));
++	i = 0;
++	for_each_sg(st->sgl, sgl, st->nents, j) {
++		unsigned long hmm_pfn, size;
++
++		hmm_pfn = range->hmm_pfns[i];
++		page = hmm_pfn_to_page(hmm_pfn);
++		xe_assert(xe, !is_device_private_page(page));
++
++		size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
++		size -= page_to_pfn(page) & (size - 1);
++		i += size;
++
++		if (unlikely(j == st->nents - 1)) {
++			if (i > npages)
++				size -= (i - npages);
++			sg_mark_end(sgl);
++		}
++		sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
+ 	}
++	xe_assert(xe, i == npages);
+ 
+-	ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
+-						xe_sg_segment_size(dev), GFP_KERNEL);
+-	if (ret)
+-		goto free_pages;
++	return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
++			       DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
++}
++
++static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
++{
++	struct xe_userptr *userptr = &uvma->userptr;
++	struct xe_vm *vm = xe_vma_vm(&uvma->vma);
++
++	lockdep_assert_held_write(&vm->lock);
++	lockdep_assert_held(&vm->userptr.notifier_lock);
++
++	mutex_lock(&userptr->unmap_mutex);
++	xe_assert(vm->xe, !userptr->mapped);
++	userptr->mapped = true;
++	mutex_unlock(&userptr->unmap_mutex);
++}
++
++void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
++{
++	struct xe_userptr *userptr = &uvma->userptr;
++	struct xe_vma *vma = &uvma->vma;
++	bool write = !xe_vma_read_only(vma);
++	struct xe_vm *vm = xe_vma_vm(vma);
++	struct xe_device *xe = vm->xe;
+ 
+-	ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+-			      DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+-	if (ret) {
+-		sg_free_table(st);
+-		st = NULL;
++	if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
++	    !lockdep_is_held_type(&vm->lock, 0) &&
++	    !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
++		/* Don't unmap in exec critical section. */
++		xe_vm_assert_held(vm);
++		/* Don't unmap while mapping the sg. */
++		lockdep_assert_held(&vm->lock);
+ 	}
+ 
+-free_pages:
+-	kvfree(pages);
+-	return ret;
++	mutex_lock(&userptr->unmap_mutex);
++	if (userptr->sg && userptr->mapped)
++		dma_unmap_sgtable(xe->drm.dev, userptr->sg,
++				  write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
++	userptr->mapped = false;
++	mutex_unlock(&userptr->unmap_mutex);
+ }
+ 
+-/*
++/**
+  * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
+- *
+  * @uvma: the userptr vma which hold the scatter gather table
+  *
+  * With function xe_userptr_populate_range, we allocate storage of
+@@ -124,16 +200,9 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
+ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
+ {
+ 	struct xe_userptr *userptr = &uvma->userptr;
+-	struct xe_vma *vma = &uvma->vma;
+-	bool write = !xe_vma_read_only(vma);
+-	struct xe_vm *vm = xe_vma_vm(vma);
+-	struct xe_device *xe = vm->xe;
+-	struct device *dev = xe->drm.dev;
+-
+-	xe_assert(xe, userptr->sg);
+-	dma_unmap_sgtable(dev, userptr->sg,
+-			  write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ 
++	xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
++	xe_hmm_userptr_unmap(uvma);
+ 	sg_free_table(userptr->sg);
+ 	userptr->sg = NULL;
+ }
+@@ -166,13 +235,20 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
+ {
+ 	unsigned long timeout =
+ 		jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+-	unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
++	unsigned long *pfns;
+ 	struct xe_userptr *userptr;
+ 	struct xe_vma *vma = &uvma->vma;
+ 	u64 userptr_start = xe_vma_userptr(vma);
+ 	u64 userptr_end = userptr_start + xe_vma_size(vma);
+ 	struct xe_vm *vm = xe_vma_vm(vma);
+-	struct hmm_range hmm_range;
++	struct hmm_range hmm_range = {
++		.pfn_flags_mask = 0, /* ignore pfns */
++		.default_flags = HMM_PFN_REQ_FAULT,
++		.start = userptr_start,
++		.end = userptr_end,
++		.notifier = &uvma->userptr.notifier,
++		.dev_private_owner = vm->xe,
++	};
+ 	bool write = !xe_vma_read_only(vma);
+ 	unsigned long notifier_seq;
+ 	u64 npages;
+@@ -199,19 +275,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
+ 		return -ENOMEM;
+ 
+ 	if (write)
+-		flags |= HMM_PFN_REQ_WRITE;
++		hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
+ 
+ 	if (!mmget_not_zero(userptr->notifier.mm)) {
+ 		ret = -EFAULT;
+ 		goto free_pfns;
+ 	}
+ 
+-	hmm_range.default_flags = flags;
+ 	hmm_range.hmm_pfns = pfns;
+-	hmm_range.notifier = &userptr->notifier;
+-	hmm_range.start = userptr_start;
+-	hmm_range.end = userptr_end;
+-	hmm_range.dev_private_owner = vm->xe;
+ 
+ 	while (true) {
+ 		hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+@@ -238,16 +309,37 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
+ 	if (ret)
+ 		goto free_pfns;
+ 
+-	ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
++	ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
+ 	if (ret)
+ 		goto free_pfns;
+ 
++	ret = down_read_interruptible(&vm->userptr.notifier_lock);
++	if (ret)
++		goto free_st;
++
++	if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
++		ret = -EAGAIN;
++		goto out_unlock;
++	}
++
++	ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
++			  &vm->userptr.notifier_lock, write);
++	if (ret)
++		goto out_unlock;
++
+ 	xe_mark_range_accessed(&hmm_range, write);
+ 	userptr->sg = &userptr->sgt;
++	xe_hmm_userptr_set_mapped(uvma);
+ 	userptr->notifier_seq = hmm_range.notifier_seq;
++	up_read(&vm->userptr.notifier_lock);
++	kvfree(pfns);
++	return 0;
+ 
++out_unlock:
++	up_read(&vm->userptr.notifier_lock);
++free_st:
++	sg_free_table(&userptr->sgt);
+ free_pfns:
+ 	kvfree(pfns);
+ 	return ret;
+ }
+-
+diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
+index 909dc2bdcd97ee..0ea98d8e7bbc76 100644
+--- a/drivers/gpu/drm/xe/xe_hmm.h
++++ b/drivers/gpu/drm/xe/xe_hmm.h
+@@ -3,9 +3,16 @@
+  * Copyright © 2024 Intel Corporation
+  */
+ 
++#ifndef _XE_HMM_H_
++#define _XE_HMM_H_
++
+ #include <linux/types.h>
+ 
+ struct xe_userptr_vma;
+ 
+ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
++
+ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
++
++void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
++#endif
+diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
+index 797576690356f2..230cf47fb9c5ee 100644
+--- a/drivers/gpu/drm/xe/xe_pt.c
++++ b/drivers/gpu/drm/xe/xe_pt.c
+@@ -28,6 +28,8 @@ struct xe_pt_dir {
+ 	struct xe_pt pt;
+ 	/** @children: Array of page-table child nodes */
+ 	struct xe_ptw *children[XE_PDES];
++	/** @staging: Array of page-table staging nodes */
++	struct xe_ptw *staging[XE_PDES];
+ };
+ 
+ #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
+@@ -48,9 +50,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
+ 	return container_of(pt, struct xe_pt_dir, pt);
+ }
+ 
+-static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
++static struct xe_pt *
++xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
+ {
+-	return container_of(pt_dir->children[index], struct xe_pt, base);
++	return container_of(pt_dir->staging[index], struct xe_pt, base);
+ }
+ 
+ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
+@@ -125,6 +128,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
+ 	}
+ 	pt->bo = bo;
+ 	pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
++	pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
+ 
+ 	if (vm->xef)
+ 		xe_drm_client_add_bo(vm->xef->client, pt->bo);
+@@ -205,8 +209,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
+ 		struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+ 
+ 		for (i = 0; i < XE_PDES; i++) {
+-			if (xe_pt_entry(pt_dir, i))
+-				xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
++			if (xe_pt_entry_staging(pt_dir, i))
++				xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
+ 					      deferred);
+ 		}
+ 	}
+@@ -375,8 +379,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
+ 		/* Continue building a non-connected subtree. */
+ 		struct iosys_map *map = &parent->bo->vmap;
+ 
+-		if (unlikely(xe_child))
++		if (unlikely(xe_child)) {
+ 			parent->base.children[offset] = &xe_child->base;
++			parent->base.staging[offset] = &xe_child->base;
++		}
+ 
+ 		xe_pt_write(xe_walk->vm->xe, map, offset, pte);
+ 		parent->num_live++;
+@@ -613,6 +619,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
+ 			.ops = &xe_pt_stage_bind_ops,
+ 			.shifts = xe_normal_pt_shifts,
+ 			.max_level = XE_PT_HIGHEST_LEVEL,
++			.staging = true,
+ 		},
+ 		.vm = xe_vma_vm(vma),
+ 		.tile = tile,
+@@ -872,7 +879,7 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
+ 	}
+ }
+ 
+-static void xe_pt_commit_locks_assert(struct xe_vma *vma)
++static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
+ {
+ 	struct xe_vm *vm = xe_vma_vm(vma);
+ 
+@@ -884,6 +891,16 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+ 	xe_vm_assert_held(vm);
+ }
+ 
++static void xe_pt_commit_locks_assert(struct xe_vma *vma)
++{
++	struct xe_vm *vm = xe_vma_vm(vma);
++
++	xe_pt_commit_prepare_locks_assert(vma);
++
++	if (xe_vma_is_userptr(vma))
++		lockdep_assert_held_read(&vm->userptr.notifier_lock);
++}
++
+ static void xe_pt_commit(struct xe_vma *vma,
+ 			 struct xe_vm_pgtable_update *entries,
+ 			 u32 num_entries, struct llist_head *deferred)
+@@ -894,13 +911,17 @@ static void xe_pt_commit(struct xe_vma *vma,
+ 
+ 	for (i = 0; i < num_entries; i++) {
+ 		struct xe_pt *pt = entries[i].pt;
++		struct xe_pt_dir *pt_dir;
+ 
+ 		if (!pt->level)
+ 			continue;
+ 
++		pt_dir = as_xe_pt_dir(pt);
+ 		for (j = 0; j < entries[i].qwords; j++) {
+ 			struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
++			int j_ = j + entries[i].ofs;
+ 
++			pt_dir->children[j_] = pt_dir->staging[j_];
+ 			xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ 		}
+ 	}
+@@ -912,7 +933,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
+ {
+ 	int i, j;
+ 
+-	xe_pt_commit_locks_assert(vma);
++	xe_pt_commit_prepare_locks_assert(vma);
+ 
+ 	for (i = num_entries - 1; i >= 0; --i) {
+ 		struct xe_pt *pt = entries[i].pt;
+@@ -927,10 +948,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
+ 		pt_dir = as_xe_pt_dir(pt);
+ 		for (j = 0; j < entries[i].qwords; j++) {
+ 			u32 j_ = j + entries[i].ofs;
+-			struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
++			struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
+ 			struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+ 
+-			pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
++			pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
+ 			xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
+ 		}
+ 	}
+@@ -942,7 +963,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
+ {
+ 	u32 i, j;
+ 
+-	xe_pt_commit_locks_assert(vma);
++	xe_pt_commit_prepare_locks_assert(vma);
+ 
+ 	for (i = 0; i < num_entries; i++) {
+ 		struct xe_pt *pt = entries[i].pt;
+@@ -960,10 +981,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
+ 			struct xe_pt *newpte = entries[i].pt_entries[j].pt;
+ 			struct xe_pt *oldpte = NULL;
+ 
+-			if (xe_pt_entry(pt_dir, j_))
+-				oldpte = xe_pt_entry(pt_dir, j_);
++			if (xe_pt_entry_staging(pt_dir, j_))
++				oldpte = xe_pt_entry_staging(pt_dir, j_);
+ 
+-			pt_dir->children[j_] = &newpte->base;
++			pt_dir->staging[j_] = &newpte->base;
+ 			entries[i].pt_entries[j].pt = oldpte;
+ 		}
+ 	}
+@@ -1212,42 +1233,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
+ 		return 0;
+ 
+ 	uvma = to_userptr_vma(vma);
+-	notifier_seq = uvma->userptr.notifier_seq;
++	if (xe_pt_userptr_inject_eagain(uvma))
++		xe_vma_userptr_force_invalidate(uvma);
+ 
+-	if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
+-		return 0;
++	notifier_seq = uvma->userptr.notifier_seq;
+ 
+ 	if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+-				     notifier_seq) &&
+-	    !xe_pt_userptr_inject_eagain(uvma))
++				     notifier_seq))
+ 		return 0;
+ 
+-	if (xe_vm_in_fault_mode(vm)) {
++	if (xe_vm_in_fault_mode(vm))
+ 		return -EAGAIN;
+-	} else {
+-		spin_lock(&vm->userptr.invalidated_lock);
+-		list_move_tail(&uvma->userptr.invalidate_link,
+-			       &vm->userptr.invalidated);
+-		spin_unlock(&vm->userptr.invalidated_lock);
+-
+-		if (xe_vm_in_preempt_fence_mode(vm)) {
+-			struct dma_resv_iter cursor;
+-			struct dma_fence *fence;
+-			long err;
+-
+-			dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+-					    DMA_RESV_USAGE_BOOKKEEP);
+-			dma_resv_for_each_fence_unlocked(&cursor, fence)
+-				dma_fence_enable_sw_signaling(fence);
+-			dma_resv_iter_end(&cursor);
+-
+-			err = dma_resv_wait_timeout(xe_vm_resv(vm),
+-						    DMA_RESV_USAGE_BOOKKEEP,
+-						    false, MAX_SCHEDULE_TIMEOUT);
+-			XE_WARN_ON(err <= 0);
+-		}
+-	}
+ 
++	/*
++	 * Just continue the operation since exec or rebind worker
++	 * will take care of rebinding.
++	 */
+ 	return 0;
+ }
+ 
+@@ -1513,6 +1514,7 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
+ 			.ops = &xe_pt_stage_unbind_ops,
+ 			.shifts = xe_normal_pt_shifts,
+ 			.max_level = XE_PT_HIGHEST_LEVEL,
++			.staging = true,
+ 		},
+ 		.tile = tile,
+ 		.modified_start = xe_vma_start(vma),
+@@ -1554,7 +1556,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
+ {
+ 	int i, j;
+ 
+-	xe_pt_commit_locks_assert(vma);
++	xe_pt_commit_prepare_locks_assert(vma);
+ 
+ 	for (i = num_entries - 1; i >= 0; --i) {
+ 		struct xe_vm_pgtable_update *entry = &entries[i];
+@@ -1567,7 +1569,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
+ 			continue;
+ 
+ 		for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
+-			pt_dir->children[j] =
++			pt_dir->staging[j] =
+ 				entries[i].pt_entries[j - entry->ofs].pt ?
+ 				&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
+ 	}
+@@ -1580,7 +1582,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
+ {
+ 	int i, j;
+ 
+-	xe_pt_commit_locks_assert(vma);
++	xe_pt_commit_prepare_locks_assert(vma);
+ 
+ 	for (i = 0; i < num_entries; ++i) {
+ 		struct xe_vm_pgtable_update *entry = &entries[i];
+@@ -1594,8 +1596,8 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
+ 		pt_dir = as_xe_pt_dir(pt);
+ 		for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
+ 			entry->pt_entries[j - entry->ofs].pt =
+-				xe_pt_entry(pt_dir, j);
+-			pt_dir->children[j] = NULL;
++				xe_pt_entry_staging(pt_dir, j);
++			pt_dir->staging[j] = NULL;
+ 		}
+ 	}
+ }
+diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
+index b8b3d2aea4923d..be602a763ff32b 100644
+--- a/drivers/gpu/drm/xe/xe_pt_walk.c
++++ b/drivers/gpu/drm/xe/xe_pt_walk.c
+@@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
+ 		     u64 addr, u64 end, struct xe_pt_walk *walk)
+ {
+ 	pgoff_t offset = xe_pt_offset(addr, level, walk);
+-	struct xe_ptw **entries = parent->children ? parent->children : NULL;
++	struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) :
++		(parent->children ?: NULL);
+ 	const struct xe_pt_walk_ops *ops = walk->ops;
+ 	enum page_walk_action action;
+ 	struct xe_ptw *child;
+diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
+index 5ecc4d2f0f6536..5c02c244f7de35 100644
+--- a/drivers/gpu/drm/xe/xe_pt_walk.h
++++ b/drivers/gpu/drm/xe/xe_pt_walk.h
+@@ -11,12 +11,14 @@
+ /**
+  * struct xe_ptw - base class for driver pagetable subclassing.
+  * @children: Pointer to an array of children if any.
++ * @staging: Pointer to an array of staging if any.
+  *
+  * Drivers could subclass this, and if it's a page-directory, typically
+  * embed an array of xe_ptw pointers.
+  */
+ struct xe_ptw {
+ 	struct xe_ptw **children;
++	struct xe_ptw **staging;
+ };
+ 
+ /**
+@@ -41,6 +43,8 @@ struct xe_pt_walk {
+ 	 * as shared pagetables.
+ 	 */
+ 	bool shared_pt_mode;
++	/** @staging: Walk staging PT structure */
++	bool staging;
+ };
+ 
+ /**
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 5693b337f5dffe..872de052d670f5 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -580,51 +580,26 @@ static void preempt_rebind_work_func(struct work_struct *w)
+ 	trace_xe_vm_rebind_worker_exit(vm);
+ }
+ 
+-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+-				   const struct mmu_notifier_range *range,
+-				   unsigned long cur_seq)
++static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
+ {
+-	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
+-	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
++	struct xe_userptr *userptr = &uvma->userptr;
+ 	struct xe_vma *vma = &uvma->vma;
+-	struct xe_vm *vm = xe_vma_vm(vma);
+ 	struct dma_resv_iter cursor;
+ 	struct dma_fence *fence;
+ 	long err;
+ 
+-	xe_assert(vm->xe, xe_vma_is_userptr(vma));
+-	trace_xe_vma_userptr_invalidate(vma);
+-
+-	if (!mmu_notifier_range_blockable(range))
+-		return false;
+-
+-	vm_dbg(&xe_vma_vm(vma)->xe->drm,
+-	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+-		xe_vma_start(vma), xe_vma_size(vma));
+-
+-	down_write(&vm->userptr.notifier_lock);
+-	mmu_interval_set_seq(mni, cur_seq);
+-
+-	/* No need to stop gpu access if the userptr is not yet bound. */
+-	if (!userptr->initial_bind) {
+-		up_write(&vm->userptr.notifier_lock);
+-		return true;
+-	}
+-
+ 	/*
+ 	 * Tell exec and rebind worker they need to repin and rebind this
+ 	 * userptr.
+ 	 */
+ 	if (!xe_vm_in_fault_mode(vm) &&
+-	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
++	    !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ 		spin_lock(&vm->userptr.invalidated_lock);
+ 		list_move_tail(&userptr->invalidate_link,
+ 			       &vm->userptr.invalidated);
+ 		spin_unlock(&vm->userptr.invalidated_lock);
+ 	}
+ 
+-	up_write(&vm->userptr.notifier_lock);
+-
+ 	/*
+ 	 * Preempt fences turn into schedule disables, pipeline these.
+ 	 * Note that even in fault mode, we need to wait for binds and
+@@ -642,11 +617,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ 				    false, MAX_SCHEDULE_TIMEOUT);
+ 	XE_WARN_ON(err <= 0);
+ 
+-	if (xe_vm_in_fault_mode(vm)) {
++	if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
+ 		err = xe_vm_invalidate_vma(vma);
+ 		XE_WARN_ON(err);
+ 	}
+ 
++	xe_hmm_userptr_unmap(uvma);
++}
++
++static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
++				   const struct mmu_notifier_range *range,
++				   unsigned long cur_seq)
++{
++	struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
++	struct xe_vma *vma = &uvma->vma;
++	struct xe_vm *vm = xe_vma_vm(vma);
++
++	xe_assert(vm->xe, xe_vma_is_userptr(vma));
++	trace_xe_vma_userptr_invalidate(vma);
++
++	if (!mmu_notifier_range_blockable(range))
++		return false;
++
++	vm_dbg(&xe_vma_vm(vma)->xe->drm,
++	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
++		xe_vma_start(vma), xe_vma_size(vma));
++
++	down_write(&vm->userptr.notifier_lock);
++	mmu_interval_set_seq(mni, cur_seq);
++
++	__vma_userptr_invalidate(vm, uvma);
++	up_write(&vm->userptr.notifier_lock);
+ 	trace_xe_vma_userptr_invalidate_complete(vma);
+ 
+ 	return true;
+@@ -656,6 +657,34 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
+ 	.invalidate = vma_userptr_invalidate,
+ };
+ 
++#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
++/**
++ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
++ * @uvma: The userptr vma to invalidate
++ *
++ * Perform a forced userptr invalidation for testing purposes.
++ */
++void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
++{
++	struct xe_vm *vm = xe_vma_vm(&uvma->vma);
++
++	/* Protect against concurrent userptr pinning */
++	lockdep_assert_held(&vm->lock);
++	/* Protect against concurrent notifiers */
++	lockdep_assert_held(&vm->userptr.notifier_lock);
++	/*
++	 * Protect against concurrent instances of this function and
++	 * the critical exec sections
++	 */
++	xe_vm_assert_held(vm);
++
++	if (!mmu_interval_read_retry(&uvma->userptr.notifier,
++				     uvma->userptr.notifier_seq))
++		uvma->userptr.notifier_seq -= 2;
++	__vma_userptr_invalidate(vm, uvma);
++}
++#endif
++
+ int xe_vm_userptr_pin(struct xe_vm *vm)
+ {
+ 	struct xe_userptr_vma *uvma, *next;
+@@ -1012,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
+ 			INIT_LIST_HEAD(&userptr->invalidate_link);
+ 			INIT_LIST_HEAD(&userptr->repin_link);
+ 			vma->gpuva.gem.offset = bo_offset_or_userptr;
++			mutex_init(&userptr->unmap_mutex);
+ 
+ 			err = mmu_interval_notifier_insert(&userptr->notifier,
+ 							   current->mm,
+@@ -1053,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
+ 		 * them anymore
+ 		 */
+ 		mmu_interval_notifier_remove(&userptr->notifier);
++		mutex_destroy(&userptr->unmap_mutex);
+ 		xe_vm_put(vm);
+ 	} else if (xe_vma_is_null(vma)) {
+ 		xe_vm_put(vm);
+@@ -2284,8 +2315,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
+ 			break;
+ 		}
+ 		case DRM_GPUVA_OP_UNMAP:
++			xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
++			break;
+ 		case DRM_GPUVA_OP_PREFETCH:
+-			/* FIXME: Need to skip some prefetch ops */
++			vma = gpuva_to_vma(op->base.prefetch.va);
++
++			if (xe_vma_is_userptr(vma)) {
++				err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
++				if (err)
++					return err;
++			}
++
+ 			xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ 			break;
+ 		default:
+diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
+index c864dba35e1d5c..d2406532fcc500 100644
+--- a/drivers/gpu/drm/xe/xe_vm.h
++++ b/drivers/gpu/drm/xe/xe_vm.h
+@@ -275,9 +275,17 @@ static inline void vm_dbg(const struct drm_device *dev,
+ 			  const char *format, ...)
+ { /* noop */ }
+ #endif
+-#endif
+ 
+ struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
+ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
+ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
+ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
++
++#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
++void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
++#else
++static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
++{
++}
++#endif
++#endif
+diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
+index 7f9a303e51d896..a4b4091cfd0dab 100644
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -59,12 +59,16 @@ struct xe_userptr {
+ 	struct sg_table *sg;
+ 	/** @notifier_seq: notifier sequence number */
+ 	unsigned long notifier_seq;
++	/** @unmap_mutex: Mutex protecting dma-unmapping */
++	struct mutex unmap_mutex;
+ 	/**
+ 	 * @initial_bind: user pointer has been bound at least once.
+ 	 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
+ 	 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
+ 	 */
+ 	bool initial_bind;
++	/** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
++	bool mapped;
+ #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+ 	u32 divisor;
+ #endif
+@@ -227,8 +231,8 @@ struct xe_vm {
+ 		 * up for revalidation. Protected from access with the
+ 		 * @invalidated_lock. Removing items from the list
+ 		 * additionally requires @lock in write mode, and adding
+-		 * items to the list requires the @userptr.notifer_lock in
+-		 * write mode.
++		 * items to the list requires either the @userptr.notifer_lock in
++		 * write mode, OR @lock in write mode.
+ 		 */
+ 		struct list_head invalidated;
+ 	} userptr;
+diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
+index 8deded1857254a..c45e5aa569d25f 100644
+--- a/drivers/hid/hid-appleir.c
++++ b/drivers/hid/hid-appleir.c
+@@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report,
+ 	static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 };
+ 	unsigned long flags;
+ 
+-	if (len != 5)
++	if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT))
+ 		goto out;
+ 
+ 	if (!memcmp(data, keydown, sizeof(keydown))) {
+diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
+index 22683ec819aaca..646ba5b92e0b2a 100644
+--- a/drivers/hid/hid-google-hammer.c
++++ b/drivers/hid/hid-google-hammer.c
+@@ -268,11 +268,13 @@ static void cbas_ec_remove(struct platform_device *pdev)
+ 	mutex_unlock(&cbas_ec_reglock);
+ }
+ 
++#ifdef CONFIG_ACPI
+ static const struct acpi_device_id cbas_ec_acpi_ids[] = {
+ 	{ "GOOG000B", 0 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
++#endif
+ 
+ #ifdef CONFIG_OF
+ static const struct of_device_id cbas_ec_of_match[] = {
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index 7b359668987854..19b7bb0c3d7f99 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -1327,11 +1327,11 @@ static void steam_remove(struct hid_device *hdev)
+ 		return;
+ 	}
+ 
++	hid_destroy_device(steam->client_hdev);
+ 	cancel_delayed_work_sync(&steam->mode_switch);
+ 	cancel_work_sync(&steam->work_connect);
+ 	cancel_work_sync(&steam->rumble_work);
+ 	cancel_work_sync(&steam->unregister_work);
+-	hid_destroy_device(steam->client_hdev);
+ 	steam->client_hdev = NULL;
+ 	steam->client_opened = 0;
+ 	if (steam->quirks & STEAM_QUIRK_WIRELESS) {
+diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+index fbd4f8ea1951b8..af6a5afc1a93e9 100644
+--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
++++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+@@ -833,9 +833,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+ 			hid_ishtp_cl);
+ 
+ 	dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
+-	hid_ishtp_cl_deinit(hid_ishtp_cl);
+ 	ishtp_put_device(cl_device);
+ 	ishtp_hid_remove(client_data);
++	hid_ishtp_cl_deinit(hid_ishtp_cl);
+ 
+ 	hid_ishtp_cl = NULL;
+ 
+diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
+index 00c6f0ebf35633..be2c62fc8251d7 100644
+--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
++++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
+@@ -261,12 +261,14 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
+  */
+ void ishtp_hid_remove(struct ishtp_cl_data *client_data)
+ {
++	void *data;
+ 	int i;
+ 
+ 	for (i = 0; i < client_data->num_hid_devices; ++i) {
+ 		if (client_data->hid_sensor_hubs[i]) {
+-			kfree(client_data->hid_sensor_hubs[i]->driver_data);
++			data = client_data->hid_sensor_hubs[i]->driver_data;
+ 			hid_destroy_device(client_data->hid_sensor_hubs[i]);
++			kfree(data);
+ 			client_data->hid_sensor_hubs[i] = NULL;
+ 		}
+ 	}
+diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
+index 7802bbf5f9587f..59424103f6348a 100644
+--- a/drivers/hwmon/ad7314.c
++++ b/drivers/hwmon/ad7314.c
+@@ -22,11 +22,13 @@
+  */
+ #define AD7314_TEMP_MASK		0x7FE0
+ #define AD7314_TEMP_SHIFT		5
++#define AD7314_LEADING_ZEROS_MASK	BIT(15)
+ 
+ /*
+  * ADT7301 and ADT7302 temperature masks
+  */
+ #define ADT7301_TEMP_MASK		0x3FFF
++#define ADT7301_LEADING_ZEROS_MASK	(BIT(15) | BIT(14))
+ 
+ enum ad7314_variant {
+ 	adt7301,
+@@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev,
+ 		return ret;
+ 	switch (spi_get_device_id(chip->spi_dev)->driver_data) {
+ 	case ad7314:
++		if (ret & AD7314_LEADING_ZEROS_MASK) {
++			/* Invalid read-out, leading zero part is missing */
++			return -EIO;
++		}
+ 		data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
+ 		data = sign_extend32(data, 9);
+ 
+ 		return sprintf(buf, "%d\n", 250 * data);
+ 	case adt7301:
+ 	case adt7302:
++		if (ret & ADT7301_LEADING_ZEROS_MASK) {
++			/* Invalid read-out, leading zero part is missing */
++			return -EIO;
++		}
+ 		/*
+ 		 * Documented as a 13 bit twos complement register
+ 		 * with a sign bit - which is a 14 bit 2's complement
+diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
+index b5352900463fb9..0d29c8f97ba7c2 100644
+--- a/drivers/hwmon/ntc_thermistor.c
++++ b/drivers/hwmon/ntc_thermistor.c
+@@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = {
+ };
+ 
+ static const struct ntc_compensation ncpXXxh103[] = {
+-	{ .temp_c	= -40, .ohm	= 247565 },
+-	{ .temp_c	= -35, .ohm	= 181742 },
+-	{ .temp_c	= -30, .ohm	= 135128 },
+-	{ .temp_c	= -25, .ohm	= 101678 },
+-	{ .temp_c	= -20, .ohm	= 77373 },
+-	{ .temp_c	= -15, .ohm	= 59504 },
+-	{ .temp_c	= -10, .ohm	= 46222 },
+-	{ .temp_c	= -5, .ohm	= 36244 },
+-	{ .temp_c	= 0, .ohm	= 28674 },
+-	{ .temp_c	= 5, .ohm	= 22878 },
+-	{ .temp_c	= 10, .ohm	= 18399 },
+-	{ .temp_c	= 15, .ohm	= 14910 },
+-	{ .temp_c	= 20, .ohm	= 12169 },
++	{ .temp_c	= -40, .ohm	= 195652 },
++	{ .temp_c	= -35, .ohm	= 148171 },
++	{ .temp_c	= -30, .ohm	= 113347 },
++	{ .temp_c	= -25, .ohm	= 87559 },
++	{ .temp_c	= -20, .ohm	= 68237 },
++	{ .temp_c	= -15, .ohm	= 53650 },
++	{ .temp_c	= -10, .ohm	= 42506 },
++	{ .temp_c	= -5, .ohm	= 33892 },
++	{ .temp_c	= 0, .ohm	= 27219 },
++	{ .temp_c	= 5, .ohm	= 22021 },
++	{ .temp_c	= 10, .ohm	= 17926 },
++	{ .temp_c	= 15, .ohm	= 14674 },
++	{ .temp_c	= 20, .ohm	= 12081 },
+ 	{ .temp_c	= 25, .ohm	= 10000 },
+-	{ .temp_c	= 30, .ohm	= 8271 },
+-	{ .temp_c	= 35, .ohm	= 6883 },
+-	{ .temp_c	= 40, .ohm	= 5762 },
+-	{ .temp_c	= 45, .ohm	= 4851 },
+-	{ .temp_c	= 50, .ohm	= 4105 },
+-	{ .temp_c	= 55, .ohm	= 3492 },
+-	{ .temp_c	= 60, .ohm	= 2985 },
+-	{ .temp_c	= 65, .ohm	= 2563 },
+-	{ .temp_c	= 70, .ohm	= 2211 },
+-	{ .temp_c	= 75, .ohm	= 1915 },
+-	{ .temp_c	= 80, .ohm	= 1666 },
+-	{ .temp_c	= 85, .ohm	= 1454 },
+-	{ .temp_c	= 90, .ohm	= 1275 },
+-	{ .temp_c	= 95, .ohm	= 1121 },
+-	{ .temp_c	= 100, .ohm	= 990 },
+-	{ .temp_c	= 105, .ohm	= 876 },
+-	{ .temp_c	= 110, .ohm	= 779 },
+-	{ .temp_c	= 115, .ohm	= 694 },
+-	{ .temp_c	= 120, .ohm	= 620 },
+-	{ .temp_c	= 125, .ohm	= 556 },
++	{ .temp_c	= 30, .ohm	= 8315 },
++	{ .temp_c	= 35, .ohm	= 6948 },
++	{ .temp_c	= 40, .ohm	= 5834 },
++	{ .temp_c	= 45, .ohm	= 4917 },
++	{ .temp_c	= 50, .ohm	= 4161 },
++	{ .temp_c	= 55, .ohm	= 3535 },
++	{ .temp_c	= 60, .ohm	= 3014 },
++	{ .temp_c	= 65, .ohm	= 2586 },
++	{ .temp_c	= 70, .ohm	= 2228 },
++	{ .temp_c	= 75, .ohm	= 1925 },
++	{ .temp_c	= 80, .ohm	= 1669 },
++	{ .temp_c	= 85, .ohm	= 1452 },
++	{ .temp_c	= 90, .ohm	= 1268 },
++	{ .temp_c	= 95, .ohm	= 1110 },
++	{ .temp_c	= 100, .ohm	= 974 },
++	{ .temp_c	= 105, .ohm	= 858 },
++	{ .temp_c	= 110, .ohm	= 758 },
++	{ .temp_c	= 115, .ohm	= 672 },
++	{ .temp_c	= 120, .ohm	= 596 },
++	{ .temp_c	= 125, .ohm	= 531 },
+ };
+ 
+ /*
+diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
+index 4a72e9712408e2..b7b09780c7b0a6 100644
+--- a/drivers/hwmon/peci/dimmtemp.c
++++ b/drivers/hwmon/peci/dimmtemp.c
+@@ -127,8 +127,6 @@ static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
+ 		return 0;
+ 
+ 	ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
+-	if (ret == -ENODATA) /* Use default or previous value */
+-		return 0;
+ 	if (ret)
+ 		return ret;
+ 
+@@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
+ 
+ 	ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, &reg_val);
+ 	if (ret || !(reg_val & BIT(31)))
+-		return -ENODATA; /* Use default or previous value */
++		return -ENODATA;
+ 
+ 	ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, &reg_val);
+ 	if (ret)
+-		return -ENODATA; /* Use default or previous value */
++		return -ENODATA;
+ 
+ 	/*
+ 	 * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
+@@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
+ 
+ 	ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, &reg_val);
+ 	if (ret || !(reg_val & BIT(31)))
+-		return -ENODATA; /* Use default or previous value */
++		return -ENODATA;
+ 
+ 	ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, &reg_val);
+ 	if (ret)
+-		return -ENODATA; /* Use default or previous value */
++		return -ENODATA;
+ 
+ 	/*
+ 	 * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0
+diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
+index ec40c5c599543a..59424dc518c8f9 100644
+--- a/drivers/hwmon/pmbus/pmbus.c
++++ b/drivers/hwmon/pmbus/pmbus.c
+@@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client,
+ 		if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
+ 			int page;
+ 
++			info->pages = PMBUS_PAGES;
++
+ 			for (page = 1; page < PMBUS_PAGES; page++) {
+ 				if (pmbus_set_page(client, page, 0xff) < 0)
+ 					break;
+diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
+index 5e0759a70f6d51..92d82faf237fcf 100644
+--- a/drivers/hwmon/xgene-hwmon.c
++++ b/drivers/hwmon/xgene-hwmon.c
+@@ -706,7 +706,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
+ 			goto out;
+ 		}
+ 
+-		if (!ctx->pcc_comm_addr) {
++		if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
+ 			dev_err(&pdev->dev,
+ 				"Failed to ioremap PCC comm region\n");
+ 			rc = -ENOMEM;
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 0d7b9839e5b663..6bb6af0f96fa5c 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -329,6 +329,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Arrow Lake */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
++	{
++		/* Panther Lake-H */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
++	{
++		/* Panther Lake-P/U */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Alder Lake CPU */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index 955e9eff0099e5..6fe32f866765bf 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -1082,7 +1082,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon
+ 
+ 	conf &= ~AD7192_CONF_CHAN_MASK;
+ 	for_each_set_bit(i, scan_mask, 8)
+-		conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i);
++		conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, BIT(i));
+ 
+ 	ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
+ 	if (ret < 0)
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index d7fd21e7c6e2a6..3618e769b10654 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -329,7 +329,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
+ #define AT91_HWFIFO_MAX_SIZE_STR	"128"
+ #define AT91_HWFIFO_MAX_SIZE		128
+ 
+-#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr)			\
++#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits)			\
+ 	{								\
+ 		.type = IIO_VOLTAGE,					\
+ 		.channel = num,						\
+@@ -337,7 +337,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
+ 		.scan_index = index,					\
+ 		.scan_type = {						\
+ 			.sign = 'u',					\
+-			.realbits = 14,					\
++			.realbits = rbits,				\
+ 			.storagebits = 16,				\
+ 		},							\
+ 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
+@@ -350,7 +350,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
+ 		.indexed = 1,						\
+ 	}
+ 
+-#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr)			\
++#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr)			\
++	AT91_SAMA_CHAN_SINGLE(index, num, addr, 14)
++
++#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr)			\
++	AT91_SAMA_CHAN_SINGLE(index, num, addr, 16)
++
++#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits)		\
+ 	{								\
+ 		.type = IIO_VOLTAGE,					\
+ 		.differential = 1,					\
+@@ -360,7 +366,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
+ 		.scan_index = index,					\
+ 		.scan_type = {						\
+ 			.sign = 's',					\
+-			.realbits = 14,					\
++			.realbits = rbits,				\
+ 			.storagebits = 16,				\
+ 		},							\
+ 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
+@@ -373,6 +379,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
+ 		.indexed = 1,						\
+ 	}
+ 
++#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr)			\
++	AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14)
++
++#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr)			\
++	AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16)
++
+ #define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod)				\
+ 	{								\
+ 		.type = IIO_POSITIONRELATIVE,				\
+@@ -666,30 +678,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = {
+ };
+ 
+ static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
+-	AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60),
+-	AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64),
+-	AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68),
+-	AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c),
+-	AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70),
+-	AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74),
+-	AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78),
+-	AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c),
+-	AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80),
+-	AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84),
+-	AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88),
+-	AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c),
+-	AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90),
+-	AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94),
+-	AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98),
+-	AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c),
+-	AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60),
+-	AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68),
+-	AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70),
+-	AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78),
+-	AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80),
+-	AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88),
+-	AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
+-	AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
++	AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60),
++	AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64),
++	AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68),
++	AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c),
++	AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70),
++	AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74),
++	AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78),
++	AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c),
++	AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80),
++	AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84),
++	AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88),
++	AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c),
++	AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90),
++	AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94),
++	AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98),
++	AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c),
++	AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60),
++	AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68),
++	AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70),
++	AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78),
++	AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80),
++	AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88),
++	AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90),
++	AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98),
+ 	IIO_CHAN_SOFT_TIMESTAMP(24),
+ 	AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc),
+ };
+diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
+index 7d61b2fe662436..390d3fab21478f 100644
+--- a/drivers/iio/dac/ad3552r.c
++++ b/drivers/iio/dac/ad3552r.c
+@@ -714,6 +714,12 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
+ 		return ret;
+ 	}
+ 
++	/* Clear reset error flag, see ad3552r manual, rev B table 38. */
++	ret = ad3552r_write_reg(dac, AD3552R_REG_ADDR_ERR_STATUS,
++				AD3552R_MASK_RESET_STATUS);
++	if (ret)
++		return ret;
++
+ 	return ad3552r_update_reg_field(dac,
+ 					addr_mask_map[AD3552R_ADDR_ASCENSION][0],
+ 					addr_mask_map[AD3552R_ADDR_ASCENSION][1],
+diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
+index 848baa6e3bbf5d..d85b7d3de86604 100644
+--- a/drivers/iio/filter/admv8818.c
++++ b/drivers/iio/filter/admv8818.c
+@@ -574,21 +574,15 @@ static int admv8818_init(struct admv8818_state *st)
+ 	struct spi_device *spi = st->spi;
+ 	unsigned int chip_id;
+ 
+-	ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+-				 ADMV8818_SOFTRESET_N_MSK |
+-				 ADMV8818_SOFTRESET_MSK,
+-				 FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) |
+-				 FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1));
++	ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
++			   ADMV8818_SOFTRESET_N_MSK | ADMV8818_SOFTRESET_MSK);
+ 	if (ret) {
+ 		dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n");
+ 		return ret;
+ 	}
+ 
+-	ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+-				 ADMV8818_SDOACTIVE_N_MSK |
+-				 ADMV8818_SDOACTIVE_MSK,
+-				 FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) |
+-				 FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1));
++	ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
++			   ADMV8818_SDOACTIVE_N_MSK | ADMV8818_SDOACTIVE_MSK);
+ 	if (ret) {
+ 		dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n");
+ 		return ret;
+diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
+index 079e02be100521..7f9d6cac8adb72 100644
+--- a/drivers/iio/light/apds9306.c
++++ b/drivers/iio/light/apds9306.c
+@@ -108,11 +108,11 @@ static const struct part_id_gts_multiplier apds9306_gts_mul[] = {
+ 	{
+ 		.part_id = 0xB1,
+ 		.max_scale_int = 16,
+-		.max_scale_nano = 3264320,
++		.max_scale_nano = 326432000,
+ 	}, {
+ 		.part_id = 0xB3,
+ 		.max_scale_int = 14,
+-		.max_scale_nano = 9712000,
++		.max_scale_nano = 97120000,
+ 	},
+ };
+ 
+diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
+index 285a748748d701..f150d8769f1986 100644
+--- a/drivers/misc/cardreader/rtsx_usb.c
++++ b/drivers/misc/cardreader/rtsx_usb.c
+@@ -286,7 +286,6 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
+ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+ {
+ 	int ret;
+-	u8 interrupt_val = 0;
+ 	u16 *buf;
+ 
+ 	if (!status)
+@@ -309,20 +308,6 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+ 		ret = rtsx_usb_get_status_with_bulk(ucr, status);
+ 	}
+ 
+-	rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val);
+-	/* Cross check presence with interrupts */
+-	if (*status & XD_CD)
+-		if (!(interrupt_val & XD_INT))
+-			*status &= ~XD_CD;
+-
+-	if (*status & SD_CD)
+-		if (!(interrupt_val & SD_INT))
+-			*status &= ~SD_CD;
+-
+-	if (*status & MS_CD)
+-		if (!(interrupt_val & MS_INT))
+-			*status &= ~MS_CD;
+-
+ 	/* usb_control_msg may return positive when success */
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
+index 88888485e6f8eb..ee58f7ce5bfa98 100644
+--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
++++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
+@@ -50,7 +50,7 @@ static struct platform_device digsy_mtc_eeprom = {
+ };
+ 
+ static struct gpiod_lookup_table eeprom_spi_gpiod_table = {
+-	.dev_id         = "spi_gpio",
++	.dev_id         = "spi_gpio.1",
+ 	.table          = {
+ 		GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK,
+ 			    "sck", GPIO_ACTIVE_HIGH),
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index c3a6657dcd4a29..a5f88ec97df753 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -117,6 +117,8 @@
+ 
+ #define MEI_DEV_ID_LNL_M      0xA870  /* Lunar Lake Point M */
+ 
++#define MEI_DEV_ID_PTL_P      0xE470  /* Panther Lake P */
++
+ /*
+  * MEI HW Section
+  */
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 6589635f8ba32b..d6ff9d82ae94b3 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -124,6 +124,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+ 
++	{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
++
+ 	/* required last entry */
+ 	{0, }
+ };
+diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
+index 1618cca9a7317f..ef0a9f423c8f8d 100644
+--- a/drivers/misc/mei/vsc-tp.c
++++ b/drivers/misc/mei/vsc-tp.c
+@@ -504,7 +504,7 @@ static int vsc_tp_probe(struct spi_device *spi)
+ 	if (ret)
+ 		return ret;
+ 
+-	tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
++	tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
+ 	if (IS_ERR(tp->wakeuphost))
+ 		return PTR_ERR(tp->wakeuphost);
+ 
+diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
+index 7fea00c7ca8a6a..c60386bf2d1a4a 100644
+--- a/drivers/net/caif/caif_virtio.c
++++ b/drivers/net/caif/caif_virtio.c
+@@ -745,7 +745,7 @@ static int cfv_probe(struct virtio_device *vdev)
+ 
+ 	if (cfv->vr_rx)
+ 		vdev->vringh_config->del_vrhs(cfv->vdev);
+-	if (cfv->vdev)
++	if (cfv->vq_tx)
+ 		vdev->config->del_vqs(cfv->vdev);
+ 	free_netdev(netdev);
+ 	return err;
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index d84ee1b419a614..abc979fbb45d18 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -2590,7 +2590,8 @@ mt7531_setup_common(struct dsa_switch *ds)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return 0;
++	/* Setup VLAN ID 0 for VLAN-unaware bridges */
++	return mt7530_setup_vlan0(priv);
+ }
+ 
+ static int
+@@ -2686,11 +2687,6 @@ mt7531_setup(struct dsa_switch *ds)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Setup VLAN ID 0 for VLAN-unaware bridges */
+-	ret = mt7530_setup_vlan0(priv);
+-	if (ret)
+-		return ret;
+-
+ 	ds->assisted_learning_on_cpu_port = true;
+ 	ds->mtu_enforcement_ingress = true;
+ 
+diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
+index e48b861e4ce15d..270ff9aab3352b 100644
+--- a/drivers/net/ethernet/emulex/benet/be.h
++++ b/drivers/net/ethernet/emulex/benet/be.h
+@@ -562,7 +562,7 @@ struct be_adapter {
+ 	struct be_dma_mem mbox_mem_alloced;
+ 
+ 	struct be_mcc_obj mcc_obj;
+-	struct mutex mcc_lock;	/* For serializing mcc cmds to BE card */
++	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
+ 	spinlock_t mcc_cq_lock;
+ 
+ 	u16 cfg_num_rx_irqs;		/* configured via set-channels */
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
+index 61adcebeef0107..51b8377edd1d04 100644
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
+ /* Wait till no more pending mcc requests are present */
+ static int be_mcc_wait_compl(struct be_adapter *adapter)
+ {
+-#define mcc_timeout		12000 /* 12s timeout */
++#define mcc_timeout		120000 /* 12s timeout */
+ 	int i, status = 0;
+ 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ 
+@@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
+ 
+ 		if (atomic_read(&mcc_obj->q.used) == 0)
+ 			break;
+-		usleep_range(500, 1000);
++		udelay(100);
+ 	}
+ 	if (i == mcc_timeout) {
+ 		dev_err(&adapter->pdev->dev, "FW not responding\n");
+@@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
+ static int be_cmd_lock(struct be_adapter *adapter)
+ {
+ 	if (use_mcc(adapter)) {
+-		mutex_lock(&adapter->mcc_lock);
++		spin_lock_bh(&adapter->mcc_lock);
+ 		return 0;
+ 	} else {
+ 		return mutex_lock_interruptible(&adapter->mbox_lock);
+@@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
+ static void be_cmd_unlock(struct be_adapter *adapter)
+ {
+ 	if (use_mcc(adapter))
+-		return mutex_unlock(&adapter->mcc_lock);
++		return spin_unlock_bh(&adapter->mcc_lock);
+ 	else
+ 		return mutex_unlock(&adapter->mbox_lock);
+ }
+@@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ 	struct be_cmd_req_mac_query *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
+ 	struct be_cmd_req_pmac_add *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 
+ 	if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ 		status = -EPERM;
+@@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
+ 	if (pmac_id == -1)
+ 		return 0;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
+ 	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
+ 	struct be_dma_mem *q_mem = &rxq->dma_mem;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
+ 	struct be_cmd_req_q_destroy *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
+ 	q->created = false;
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
+ 	struct be_cmd_req_hdr *hdr;
+ 	int status = 0;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
+ 	adapter->stats_cmd_sent = true;
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ 			    CMD_SUBSYSTEM_ETH))
+ 		return -EPERM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ 	adapter->stats_cmd_sent = true;
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ 	struct be_cmd_req_link_status *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	if (link_status)
+ 		*link_status = LINK_DOWN;
+@@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
+ 	struct be_cmd_req_get_cntl_addnl_attribs *req;
+ 	int status = 0;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
+ 
+ 	status = be_mcc_notify(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
+ 	if (!get_fat_cmd.va)
+ 		return -ENOMEM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	while (total_size) {
+ 		buf_size = min(total_size, (u32)60 * 1024);
+@@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
+ 		log_offset += buf_size;
+ 	}
+ err:
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
+ 			  get_fat_cmd.va, get_fat_cmd.dma);
+-	mutex_unlock(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
+ 	struct be_cmd_req_get_fw_version *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
+ 			sizeof(adapter->fw_on_flash));
+ 	}
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
+ 	struct be_cmd_req_modify_eq_delay *req;
+ 	int status = 0, i;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
+ 
+ 	status = be_mcc_notify(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ 	struct be_cmd_req_vlan_config *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ 
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+ 	struct be_cmd_req_rx_filter *req = mem->va;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+ 
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
+ 			    CMD_SUBSYSTEM_COMMON))
+ 		return -EPERM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
+ 	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 
+ 	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
+ 		return  -EOPNOTSUPP;
+@@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
+ 			    CMD_SUBSYSTEM_COMMON))
+ 		return -EPERM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ 	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+ 		return 0;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ 
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
+ 	struct be_cmd_req_enable_disable_beacon *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
+ 	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
+ 	struct be_cmd_req_get_beacon_state *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
+ 		return -ENOMEM;
+ 	}
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
+ 		memcpy(data, resp->page_data + off, len);
+ 	}
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ 	return status;
+ }
+@@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
+ 	void *ctxt = NULL;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 	adapter->flash_status = 0;
+ 
+ 	wrb = wrb_from_mccq(adapter);
+@@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
+ 	if (status)
+ 		goto err_unlock;
+ 
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 
+ 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ 					 msecs_to_jiffies(60000)))
+@@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
+ 	return status;
+ 
+ err_unlock:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
+ 	struct be_mcc_wrb *wrb;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
+ 
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ 	struct lancer_cmd_resp_read_object *resp;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ 	}
+ 
+ err_unlock:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
+ 	struct be_cmd_write_flashrom *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 	adapter->flash_status = 0;
+ 
+ 	wrb = wrb_from_mccq(adapter);
+@@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
+ 	if (status)
+ 		goto err_unlock;
+ 
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 
+ 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ 					 msecs_to_jiffies(40000)))
+@@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
+ 	return status;
+ 
+ err_unlock:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ 	struct be_mcc_wrb *wrb;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -2611,7 +2611,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ 		memcpy(flashed_crc, req->crc, 4);
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3217,7 +3217,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ 	struct be_cmd_req_acpi_wol_magic_config *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3234,7 +3234,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ 	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3249,7 +3249,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ 			    CMD_SUBSYSTEM_LOWLEVEL))
+ 		return -EPERM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3272,7 +3272,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ 	if (status)
+ 		goto err_unlock;
+ 
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 
+ 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ 					 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+@@ -3281,7 +3281,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ 	return status;
+ 
+ err_unlock:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3298,7 +3298,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ 			    CMD_SUBSYSTEM_LOWLEVEL))
+ 		return -EPERM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3324,7 +3324,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ 	if (status)
+ 		goto err;
+ 
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 
+ 	wait_for_completion(&adapter->et_cmd_compl);
+ 	resp = embedded_payload(wrb);
+@@ -3332,7 +3332,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ 
+ 	return status;
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3348,7 +3348,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+ 			    CMD_SUBSYSTEM_LOWLEVEL))
+ 		return -EPERM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3382,7 +3382,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3393,7 +3393,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ 	struct be_cmd_req_seeprom_read *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3409,7 +3409,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ 	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3424,7 +3424,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
+ 			    CMD_SUBSYSTEM_COMMON))
+ 		return -EPERM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3469,7 +3469,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
+ 	}
+ 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3479,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+ 	struct be_cmd_req_set_qos *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3499,7 +3499,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+ 	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3611,7 +3611,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ 	struct be_cmd_req_get_fn_privileges *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3643,7 +3643,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3655,7 +3655,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ 	struct be_cmd_req_set_fn_privileges *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3675,7 +3675,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ 
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3707,7 +3707,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ 		return -ENOMEM;
+ 	}
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3771,7 +3771,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ 	}
+ 
+ out:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
+ 			  get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ 	return status;
+@@ -3831,7 +3831,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ 	if (!cmd.va)
+ 		return -ENOMEM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3853,7 +3853,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ 
+ err:
+ 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3889,7 +3889,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
+ 			    CMD_SUBSYSTEM_COMMON))
+ 		return -EPERM;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3930,7 +3930,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
+ 	status = be_mcc_notify_wait(adapter);
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -3944,7 +3944,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
+ 	int status;
+ 	u16 vid;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -3991,7 +3991,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -4190,7 +4190,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+ 	struct be_cmd_req_set_ext_fat_caps *req;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -4206,7 +4206,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+ 
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -4684,7 +4684,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
+ 	if (iface == 0xFFFFFFFF)
+ 		return -1;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -4701,7 +4701,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
+ 
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -4735,7 +4735,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ 	struct be_cmd_resp_get_iface_list *resp;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -4756,7 +4756,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ 	}
+ 
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -4850,7 +4850,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
+ 	if (BEx_chip(adapter))
+ 		return 0;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -4868,7 +4868,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
+ 	req->enable = 1;
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -4941,7 +4941,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ 	u32 link_config = 0;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -4969,7 +4969,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ 
+ 	status = be_mcc_notify_wait(adapter);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -5000,8 +5000,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
+ 	struct be_mcc_wrb *wrb;
+ 	int status;
+ 
+-	if (mutex_lock_interruptible(&adapter->mcc_lock))
+-		return -1;
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -5039,7 +5038,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
+ 		dev_info(&adapter->pdev->dev,
+ 			 "Adapter does not support HW error recovery\n");
+ 
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ 
+@@ -5053,7 +5052,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
+ 	struct be_cmd_resp_hdr *resp;
+ 	int status;
+ 
+-	mutex_lock(&adapter->mcc_lock);
++	spin_lock_bh(&adapter->mcc_lock);
+ 
+ 	wrb = wrb_from_mccq(adapter);
+ 	if (!wrb) {
+@@ -5076,7 +5075,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
+ 	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
+ 	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
+ err:
+-	mutex_unlock(&adapter->mcc_lock);
++	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+ EXPORT_SYMBOL(be_roce_mcc_cmd);
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 875fe379eea213..3d2e2159211917 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
+ 	}
+ 
+ 	mutex_init(&adapter->mbox_lock);
+-	mutex_init(&adapter->mcc_lock);
+ 	mutex_init(&adapter->rx_filter_lock);
++	spin_lock_init(&adapter->mcc_lock);
+ 	spin_lock_init(&adapter->mcc_cq_lock);
+ 	init_completion(&adapter->et_cmd_compl);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index bab16c2191b2f0..181af419b878d5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -483,7 +483,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
+ 
+ 		ret = hclge_ptp_get_cycle(hdev);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 	}
+ 
+ 	ret = hclge_ptp_int_en(hdev, true);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index f5acfb7d4ff655..ab7c2750c10425 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -11,6 +11,8 @@
+ #include "dwmac_dma.h"
+ #include "dwmac1000.h"
+ 
++#define DRIVER_NAME "dwmac-loongson-pci"
++
+ /* Normal Loongson Tx Summary */
+ #define DMA_INTR_ENA_NIE_TX_LOONGSON	0x00040000
+ /* Normal Loongson Rx Summary */
+@@ -568,7 +570,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ 		if (pci_resource_len(pdev, i) == 0)
+ 			continue;
+-		ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
++		ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME);
+ 		if (ret)
+ 			goto err_disable_device;
+ 		break;
+@@ -687,7 +689,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
+ MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
+ 
+ static struct pci_driver loongson_dwmac_driver = {
+-	.name = "dwmac-loongson-pci",
++	.name = DRIVER_NAME,
+ 	.id_table = loongson_dwmac_id_table,
+ 	.probe = loongson_dwmac_probe,
+ 	.remove = loongson_dwmac_remove,
+diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
+index c8c23d9be961b1..41f212209993f1 100644
+--- a/drivers/net/ipa/data/ipa_data-v4.7.c
++++ b/drivers/net/ipa/data/ipa_data-v4.7.c
+@@ -28,20 +28,18 @@ enum ipa_resource_type {
+ enum ipa_rsrc_group_id {
+ 	/* Source resource group identifiers */
+ 	IPA_RSRC_GROUP_SRC_UL_DL			= 0,
+-	IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ 	IPA_RSRC_GROUP_SRC_COUNT,	/* Last in set; not a source group */
+ 
+ 	/* Destination resource group identifiers */
+-	IPA_RSRC_GROUP_DST_UL_DL_DPL			= 0,
+-	IPA_RSRC_GROUP_DST_UNUSED_1,
++	IPA_RSRC_GROUP_DST_UL_DL			= 0,
+ 	IPA_RSRC_GROUP_DST_COUNT,	/* Last; not a destination group */
+ };
+ 
+ /* QSB configuration data for an SoC having IPA v4.7 */
+ static const struct ipa_qsb_data ipa_qsb_data[] = {
+ 	[IPA_QSB_MASTER_DDR] = {
+-		.max_writes		= 8,
+-		.max_reads		= 0,	/* no limit (hardware max) */
++		.max_writes		= 12,
++		.max_reads		= 13,
+ 		.max_reads_beats	= 120,
+ 	},
+ };
+@@ -81,7 +79,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ 		},
+ 		.endpoint = {
+ 			.config = {
+-				.resource_group	= IPA_RSRC_GROUP_DST_UL_DL_DPL,
++				.resource_group	= IPA_RSRC_GROUP_DST_UL_DL,
+ 				.aggregation	= true,
+ 				.status_enable	= true,
+ 				.rx = {
+@@ -106,6 +104,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ 			.filter_support	= true,
+ 			.config = {
+ 				.resource_group	= IPA_RSRC_GROUP_SRC_UL_DL,
++				.checksum       = true,
+ 				.qmap		= true,
+ 				.status_enable	= true,
+ 				.tx = {
+@@ -128,7 +127,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ 		},
+ 		.endpoint = {
+ 			.config = {
+-				.resource_group	= IPA_RSRC_GROUP_DST_UL_DL_DPL,
++				.resource_group	= IPA_RSRC_GROUP_DST_UL_DL,
++				.checksum       = true,
+ 				.qmap		= true,
+ 				.aggregation	= true,
+ 				.rx = {
+@@ -197,12 +197,12 @@ static const struct ipa_resource ipa_resource_src[] = {
+ /* Destination resource configuration data for an SoC having IPA v4.7 */
+ static const struct ipa_resource ipa_resource_dst[] = {
+ 	[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+-		.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
++		.limits[IPA_RSRC_GROUP_DST_UL_DL] = {
+ 			.min = 7,	.max = 7,
+ 		},
+ 	},
+ 	[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+-		.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
++		.limits[IPA_RSRC_GROUP_DST_UL_DL] = {
+ 			.min = 2,	.max = 2,
+ 		},
+ 	},
+diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
+index ee9d562f0817cf..a2b15cddf46e6b 100644
+--- a/drivers/net/mctp/mctp-i3c.c
++++ b/drivers/net/mctp/mctp-i3c.c
+@@ -507,6 +507,9 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
+ {
+ 	struct mctp_i3c_internal_hdr *ihdr;
+ 
++	if (!daddr || !saddr)
++		return -EINVAL;
++
+ 	skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
+ 	skb_reset_mac_header(skb);
+ 	ihdr = (void *)skb_mac_header(skb);
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 4f3e742907cb62..c9cfdc33fc5f17 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -615,6 +615,49 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
+ }
+ EXPORT_SYMBOL(phy_ethtool_get_stats);
+ 
++/**
++ * __phy_ethtool_get_phy_stats - Retrieve standardized PHY statistics
++ * @phydev: Pointer to the PHY device
++ * @phy_stats: Pointer to ethtool_eth_phy_stats structure
++ * @phydev_stats: Pointer to ethtool_phy_stats structure
++ *
++ * Fetches PHY statistics using a kernel-defined interface for consistent
++ * diagnostics. Unlike phy_ethtool_get_stats(), which allows custom stats,
++ * this function enforces a standardized format for better interoperability.
++ */
++void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
++				 struct ethtool_eth_phy_stats *phy_stats,
++				 struct ethtool_phy_stats *phydev_stats)
++{
++	if (!phydev->drv || !phydev->drv->get_phy_stats)
++		return;
++
++	mutex_lock(&phydev->lock);
++	phydev->drv->get_phy_stats(phydev, phy_stats, phydev_stats);
++	mutex_unlock(&phydev->lock);
++}
++
++/**
++ * __phy_ethtool_get_link_ext_stats - Retrieve extended link statistics for a PHY
++ * @phydev: Pointer to the PHY device
++ * @link_stats: Pointer to the structure to store extended link statistics
++ *
++ * Populates the ethtool_link_ext_stats structure with link down event counts
++ * and additional driver-specific link statistics, if available.
++ */
++void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
++				      struct ethtool_link_ext_stats *link_stats)
++{
++	link_stats->link_down_events = READ_ONCE(phydev->link_down_events);
++
++	if (!phydev->drv || !phydev->drv->get_link_stats)
++		return;
++
++	mutex_lock(&phydev->lock);
++	phydev->drv->get_link_stats(phydev, link_stats);
++	mutex_unlock(&phydev->lock);
++}
++
+ /**
+  * phy_ethtool_get_plca_cfg - Get PLCA RS configuration
+  * @phydev: the phy_device struct
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 499797646580e3..119dfa2d6643a9 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3776,6 +3776,8 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = {
+ static const struct phylib_stubs __phylib_stubs = {
+ 	.hwtstamp_get = __phy_hwtstamp_get,
+ 	.hwtstamp_set = __phy_hwtstamp_set,
++	.get_phy_stats = __phy_ethtool_get_phy_stats,
++	.get_link_ext_stats = __phy_ethtool_get_link_ext_stats,
+ };
+ 
+ static void phylib_register_stubs(void)
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 4583e15ad03a0b..1420c4efa48e68 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -72,6 +72,17 @@
+ #define PPP_PROTO_LEN	2
+ #define PPP_LCP_HDRLEN	4
+ 
++/* The filter instructions generated by libpcap are constructed
++ * assuming a four-byte PPP header on each packet, where the last
++ * 2 bytes are the protocol field defined in the RFC and the first
++ * byte of the first 2 bytes indicates the direction.
++ * The second byte is currently unused, but we still need to initialize
++ * it to prevent crafted BPF programs from reading them which would
++ * cause reading of uninitialized data.
++ */
++#define PPP_FILTER_OUTBOUND_TAG 0x0100
++#define PPP_FILTER_INBOUND_TAG  0x0000
++
+ /*
+  * An instance of /dev/ppp can be associated with either a ppp
+  * interface unit or a ppp channel.  In both cases, file->private_data
+@@ -1762,10 +1773,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+ 
+ 	if (proto < 0x8000) {
+ #ifdef CONFIG_PPP_FILTER
+-		/* check if we should pass this packet */
+-		/* the filter instructions are constructed assuming
+-		   a four-byte PPP header on each packet */
+-		*(u8 *)skb_push(skb, 2) = 1;
++		/* check if the packet passes the pass and active filters.
++		 * See comment for PPP_FILTER_OUTBOUND_TAG above.
++		 */
++		*(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG);
+ 		if (ppp->pass_filter &&
+ 		    bpf_prog_run(ppp->pass_filter, skb) == 0) {
+ 			if (ppp->debug & 1)
+@@ -2482,14 +2493,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
+ 		/* network protocol frame - give it to the kernel */
+ 
+ #ifdef CONFIG_PPP_FILTER
+-		/* check if the packet passes the pass and active filters */
+-		/* the filter instructions are constructed assuming
+-		   a four-byte PPP header on each packet */
+ 		if (ppp->pass_filter || ppp->active_filter) {
+ 			if (skb_unclone(skb, GFP_ATOMIC))
+ 				goto err;
+-
+-			*(u8 *)skb_push(skb, 2) = 0;
++			/* Check if the packet passes the pass and active filters.
++			 * See comment for PPP_FILTER_INBOUND_TAG above.
++			 */
++			*(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG);
+ 			if (ppp->pass_filter &&
+ 			    bpf_prog_run(ppp->pass_filter, skb) == 0) {
+ 				if (ppp->debug & 1)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index c620911a11933a..754e01688900d3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1197,7 +1197,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
+ 
+ 			if (tlv_len != sizeof(*fseq_ver))
+ 				goto invalid_tlv_len;
+-			IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
++			IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
+ 				 fseq_ver->version);
+ 			}
+ 			break;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+index 91ca830a7b6035..f4276fdee6beae 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+@@ -1518,6 +1518,13 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
+ 	if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ 		return -EOPNOTSUPP;
+ 
++	/*
++	 * If the firmware is not running, silently succeed since there is
++	 * no data to clear.
++	 */
++	if (!iwl_mvm_firmware_running(mvm))
++		return count;
++
+ 	mutex_lock(&mvm->mutex);
+ 	iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt);
+ 	mutex_unlock(&mvm->mutex);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 72fa7ac86516cd..17b8ccc275693b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -1030,6 +1030,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ 		/* End TE, notify mac80211 */
+ 		mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
+ 		mvmvif->time_event_data.link_id = -1;
++		/* set the bit so the ROC cleanup will actually clean up */
++		set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
+ 		iwl_mvm_roc_finished(mvm);
+ 		ieee80211_remain_on_channel_expired(mvm->hw);
+ 	} else if (le32_to_cpu(notif->start)) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index 27a7e0b5b3d51e..ebe9b25cc53a99 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
++ * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
+  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+  * Copyright (C) 2016-2017 Intel Deutschland GmbH
+  */
+@@ -643,7 +643,8 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ 				    unsigned int len);
+ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ 				   struct iwl_cmd_meta *cmd_meta,
+-				   u8 **hdr, unsigned int hdr_room);
++				   u8 **hdr, unsigned int hdr_room,
++				   unsigned int offset);
+ 
+ void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ 			     struct iwl_cmd_meta *cmd_meta);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+index b1846abb99b78f..477a05cd1288b0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+  * Copyright (C) 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2020, 2023-2024 Intel Corporation
++ * Copyright (C) 2018-2020, 2023-2025 Intel Corporation
+  */
+ #include <net/tso.h>
+ #include <linux/tcp.h>
+@@ -188,7 +188,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
+ 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
+ 
+ 	/* Our device supports 9 segments at most, it will fit in 1 page */
+-	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
++	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
++				snap_ip_tcp_hdrlen + hdr_len);
+ 	if (!sgt)
+ 		return -ENOMEM;
+ 
+@@ -347,6 +348,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ 	return tfd;
+ 
+ out_err:
++	iwl_pcie_free_tso_pages(trans, skb, out_meta);
+ 	iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ 	return NULL;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 9fe050f0ddc160..9fcdd06e126ae1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
++ * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation
+  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+  * Copyright (C) 2016-2017 Intel Deutschland GmbH
+  */
+@@ -1853,6 +1853,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+  * @cmd_meta: command meta to store the scatter list information for unmapping
+  * @hdr: output argument for TSO headers
+  * @hdr_room: requested length for TSO headers
++ * @offset: offset into the data from which mapping should start
+  *
+  * Allocate space for a scatter gather list and TSO headers and map the SKB
+  * using the scatter gather list. The SKB is unmapped again when the page is
+@@ -1862,9 +1863,12 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+  */
+ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ 				   struct iwl_cmd_meta *cmd_meta,
+-				   u8 **hdr, unsigned int hdr_room)
++				   u8 **hdr, unsigned int hdr_room,
++				   unsigned int offset)
+ {
+ 	struct sg_table *sgt;
++	unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;
++	int orig_nents;
+ 
+ 	if (WARN_ON_ONCE(skb_has_frag_list(skb)))
+ 		return NULL;
+@@ -1872,8 +1876,7 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ 	*hdr = iwl_pcie_get_page_hdr(trans,
+ 				     hdr_room + __alignof__(struct sg_table) +
+ 				     sizeof(struct sg_table) +
+-				     (skb_shinfo(skb)->nr_frags + 1) *
+-				     sizeof(struct scatterlist),
++				     n_segments * sizeof(struct scatterlist),
+ 				     skb);
+ 	if (!*hdr)
+ 		return NULL;
+@@ -1881,14 +1884,15 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ 	sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
+ 	sgt->sgl = (void *)(sgt + 1);
+ 
+-	sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
++	sg_init_table(sgt->sgl, n_segments);
+ 
+ 	/* Only map the data, not the header (it is copied to the TSO page) */
+-	sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
+-				       skb->data_len);
+-	if (WARN_ON_ONCE(sgt->orig_nents <= 0))
++	orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);
++	if (WARN_ON_ONCE(orig_nents <= 0))
+ 		return NULL;
+ 
++	sgt->orig_nents = orig_nents;
++
+ 	/* And map the entire SKB */
+ 	if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
+ 		return NULL;
+@@ -1937,7 +1941,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
+ 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+ 
+ 	/* Our device supports 9 segments at most, it will fit in 1 page */
+-	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
++	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
++				snap_ip_tcp_hdrlen + hdr_len + iv_len);
+ 	if (!sgt)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index 61af1583356c27..e4daac9c244015 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -120,19 +120,31 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
+ 	struct nvme_ns *ns = q->queuedata;
+ 	struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ 	bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
++	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+ 	bool has_metadata = meta_buffer && meta_len;
+ 	struct bio *bio = NULL;
+ 	int ret;
+ 
+-	if (has_metadata && !supports_metadata)
+-		return -EINVAL;
++	if (!nvme_ctrl_sgl_supported(ctrl))
++		dev_warn_once(ctrl->device, "using unchecked data buffer\n");
++	if (has_metadata) {
++		if (!supports_metadata) {
++			ret = -EINVAL;
++			goto out;
++		}
++		if (!nvme_ctrl_meta_sgl_supported(ctrl))
++			dev_warn_once(ctrl->device,
++				      "using unchecked metadata buffer\n");
++	}
+ 
+ 	if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ 		struct iov_iter iter;
+ 
+ 		/* fixedbufs is only for non-vectored io */
+-		if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
+-			return -EINVAL;
++		if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ 				rq_data_dir(req), &iter, ioucmd);
+ 		if (ret < 0)
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 61bba5513de05a..dcdce7d12e441a 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -1130,6 +1130,13 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
+ 	return ctrl->sgls & ((1 << 0) | (1 << 1));
+ }
+ 
++static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl)
++{
++	if (ctrl->ops->flags & NVME_F_FABRICS)
++		return true;
++	return ctrl->sgls & NVME_CTRL_SGLS_MSDS;
++}
++
+ #ifdef CONFIG_NVME_HOST_AUTH
+ int __init nvme_init_auth(void);
+ void __exit nvme_exit_auth(void);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index cc74682dc0d4e9..e1329d4974fd6f 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -43,6 +43,7 @@
+  */
+ #define NVME_MAX_KB_SZ	8192
+ #define NVME_MAX_SEGS	128
++#define NVME_MAX_META_SEGS 15
+ #define NVME_MAX_NR_ALLOCATIONS	5
+ 
+ static int use_threaded_interrupts;
+@@ -143,6 +144,7 @@ struct nvme_dev {
+ 	bool hmb;
+ 
+ 	mempool_t *iod_mempool;
++	mempool_t *iod_meta_mempool;
+ 
+ 	/* shadow doorbell buffer support: */
+ 	__le32 *dbbuf_dbs;
+@@ -238,6 +240,8 @@ struct nvme_iod {
+ 	dma_addr_t first_dma;
+ 	dma_addr_t meta_dma;
+ 	struct sg_table sgt;
++	struct sg_table meta_sgt;
++	union nvme_descriptor meta_list;
+ 	union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS];
+ };
+ 
+@@ -505,6 +509,15 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
+ 	spin_unlock(&nvmeq->sq_lock);
+ }
+ 
++static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev,
++					      struct request *req)
++{
++	if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl))
++		return false;
++	return req->nr_integrity_segments > 1 ||
++		nvme_req(req)->flags & NVME_REQ_USERCMD;
++}
++
+ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
+ 				     int nseg)
+ {
+@@ -517,8 +530,10 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
+ 		return false;
+ 	if (!nvmeq->qid)
+ 		return false;
++	if (nvme_pci_metadata_use_sgls(dev, req))
++		return true;
+ 	if (!sgl_threshold || avg_seg_size < sgl_threshold)
+-		return false;
++		return nvme_req(req)->flags & NVME_REQ_USERCMD;
+ 	return true;
+ }
+ 
+@@ -779,7 +794,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ 		struct bio_vec bv = req_bvec(req);
+ 
+ 		if (!is_pci_p2pdma_page(bv.bv_page)) {
+-			if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
++			if (!nvme_pci_metadata_use_sgls(dev, req) &&
++			    (bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
+ 			     bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ 				return nvme_setup_prp_simple(dev, req,
+ 							     &cmnd->rw, &bv);
+@@ -823,11 +839,69 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ 	return ret;
+ }
+ 
+-static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
+-		struct nvme_command *cmnd)
++static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev,
++					     struct request *req)
++{
++	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
++	struct nvme_rw_command *cmnd = &iod->cmd.rw;
++	struct nvme_sgl_desc *sg_list;
++	struct scatterlist *sgl, *sg;
++	unsigned int entries;
++	dma_addr_t sgl_dma;
++	int rc, i;
++
++	iod->meta_sgt.sgl = mempool_alloc(dev->iod_meta_mempool, GFP_ATOMIC);
++	if (!iod->meta_sgt.sgl)
++		return BLK_STS_RESOURCE;
++
++	sg_init_table(iod->meta_sgt.sgl, req->nr_integrity_segments);
++	iod->meta_sgt.orig_nents = blk_rq_map_integrity_sg(req,
++							   iod->meta_sgt.sgl);
++	if (!iod->meta_sgt.orig_nents)
++		goto out_free_sg;
++
++	rc = dma_map_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req),
++			     DMA_ATTR_NO_WARN);
++	if (rc)
++		goto out_free_sg;
++
++	sg_list = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC, &sgl_dma);
++	if (!sg_list)
++		goto out_unmap_sg;
++
++	entries = iod->meta_sgt.nents;
++	iod->meta_list.sg_list = sg_list;
++	iod->meta_dma = sgl_dma;
++
++	cmnd->flags = NVME_CMD_SGL_METASEG;
++	cmnd->metadata = cpu_to_le64(sgl_dma);
++
++	sgl = iod->meta_sgt.sgl;
++	if (entries == 1) {
++		nvme_pci_sgl_set_data(sg_list, sgl);
++		return BLK_STS_OK;
++	}
++
++	sgl_dma += sizeof(*sg_list);
++	nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries);
++	for_each_sg(sgl, sg, entries, i)
++		nvme_pci_sgl_set_data(&sg_list[i + 1], sg);
++
++	return BLK_STS_OK;
++
++out_unmap_sg:
++	dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
++out_free_sg:
++	mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
++	return BLK_STS_RESOURCE;
++}
++
++static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev,
++					     struct request *req)
+ {
+ 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ 	struct bio_vec bv = rq_integrity_vec(req);
++	struct nvme_command *cmnd = &iod->cmd;
+ 
+ 	iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
+ 	if (dma_mapping_error(dev->dev, iod->meta_dma))
+@@ -836,6 +910,13 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
+ 	return BLK_STS_OK;
+ }
+ 
++static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req)
++{
++	if (nvme_pci_metadata_use_sgls(dev, req))
++		return nvme_pci_setup_meta_sgls(dev, req);
++	return nvme_pci_setup_meta_mptr(dev, req);
++}
++
+ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
+ {
+ 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+@@ -844,6 +925,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
+ 	iod->aborted = false;
+ 	iod->nr_allocations = -1;
+ 	iod->sgt.nents = 0;
++	iod->meta_sgt.nents = 0;
+ 
+ 	ret = nvme_setup_cmd(req->q->queuedata, req);
+ 	if (ret)
+@@ -856,7 +938,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
+ 	}
+ 
+ 	if (blk_integrity_rq(req)) {
+-		ret = nvme_map_metadata(dev, req, &iod->cmd);
++		ret = nvme_map_metadata(dev, req);
+ 		if (ret)
+ 			goto out_unmap_data;
+ 	}
+@@ -955,17 +1037,31 @@ static void nvme_queue_rqs(struct request **rqlist)
+ 	*rqlist = requeue_list;
+ }
+ 
++static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev,
++						struct request *req)
++{
++	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
++
++	if (!iod->meta_sgt.nents) {
++		dma_unmap_page(dev->dev, iod->meta_dma,
++			       rq_integrity_vec(req).bv_len,
++			       rq_dma_dir(req));
++		return;
++	}
++
++	dma_pool_free(dev->prp_small_pool, iod->meta_list.sg_list,
++		      iod->meta_dma);
++	dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
++	mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
++}
++
+ static __always_inline void nvme_pci_unmap_rq(struct request *req)
+ {
+ 	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ 	struct nvme_dev *dev = nvmeq->dev;
+ 
+-	if (blk_integrity_rq(req)) {
+-	        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+-
+-		dma_unmap_page(dev->dev, iod->meta_dma,
+-			       rq_integrity_vec(req).bv_len, rq_dma_dir(req));
+-	}
++	if (blk_integrity_rq(req))
++		nvme_unmap_metadata(dev, req);
+ 
+ 	if (blk_rq_nr_phys_segments(req))
+ 		nvme_unmap_data(dev, req);
+@@ -2719,6 +2815,7 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
+ 
+ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
+ {
++	size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1);
+ 	size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS;
+ 
+ 	dev->iod_mempool = mempool_create_node(1,
+@@ -2727,7 +2824,18 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
+ 			dev_to_node(dev->dev));
+ 	if (!dev->iod_mempool)
+ 		return -ENOMEM;
++
++	dev->iod_meta_mempool = mempool_create_node(1,
++			mempool_kmalloc, mempool_kfree,
++			(void *)meta_size, GFP_KERNEL,
++			dev_to_node(dev->dev));
++	if (!dev->iod_meta_mempool)
++		goto free;
++
+ 	return 0;
++free:
++	mempool_destroy(dev->iod_mempool);
++	return -ENOMEM;
+ }
+ 
+ static void nvme_free_tagset(struct nvme_dev *dev)
+@@ -2792,6 +2900,11 @@ static void nvme_reset_work(struct work_struct *work)
+ 	if (result)
+ 		goto out;
+ 
++	if (nvme_ctrl_meta_sgl_supported(&dev->ctrl))
++		dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS;
++	else
++		dev->ctrl.max_integrity_segments = 1;
++
+ 	nvme_dbbuf_dma_alloc(dev);
+ 
+ 	result = nvme_setup_host_mem(dev);
+@@ -3061,11 +3174,6 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
+ 	dev->ctrl.max_hw_sectors = min_t(u32,
+ 		NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9);
+ 	dev->ctrl.max_segments = NVME_MAX_SEGS;
+-
+-	/*
+-	 * There is no support for SGLs for metadata (yet), so we are limited to
+-	 * a single integrity segment for the separate metadata pointer.
+-	 */
+ 	dev->ctrl.max_integrity_segments = 1;
+ 	return dev;
+ 
+@@ -3128,6 +3236,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (result)
+ 		goto out_disable;
+ 
++	if (nvme_ctrl_meta_sgl_supported(&dev->ctrl))
++		dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS;
++	else
++		dev->ctrl.max_integrity_segments = 1;
++
+ 	nvme_dbbuf_dma_alloc(dev);
+ 
+ 	result = nvme_setup_host_mem(dev);
+@@ -3170,6 +3283,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	nvme_free_queues(dev, 0);
+ out_release_iod_mempool:
+ 	mempool_destroy(dev->iod_mempool);
++	mempool_destroy(dev->iod_meta_mempool);
+ out_release_prp_pools:
+ 	nvme_release_prp_pools(dev);
+ out_dev_unmap:
+@@ -3235,6 +3349,7 @@ static void nvme_remove(struct pci_dev *pdev)
+ 	nvme_dbbuf_dma_free(dev);
+ 	nvme_free_queues(dev, 0);
+ 	mempool_destroy(dev->iod_mempool);
++	mempool_destroy(dev->iod_meta_mempool);
+ 	nvme_release_prp_pools(dev);
+ 	nvme_dev_unmap(dev);
+ 	nvme_uninit_ctrl(&dev->ctrl);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 840ae475074d09..eeb05b7bc0fd01 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -217,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
+ 	return queue - queue->ctrl->queues;
+ }
+ 
++static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
++{
++	switch (type) {
++	case nvme_tcp_c2h_term:
++	case nvme_tcp_c2h_data:
++	case nvme_tcp_r2t:
++	case nvme_tcp_rsp:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ /*
+  * Check if the queue is TLS encrypted
+  */
+@@ -763,6 +776,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
+ 	return 0;
+ }
+ 
++static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
++		struct nvme_tcp_term_pdu *pdu)
++{
++	u16 fes;
++	const char *msg;
++	u32 plen = le32_to_cpu(pdu->hdr.plen);
++
++	static const char * const msg_table[] = {
++		[NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field",
++		[NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
++		[NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
++		[NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
++		[NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
++		[NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
++	};
++
++	if (plen < NVME_TCP_MIN_C2HTERM_PLEN ||
++	    plen > NVME_TCP_MAX_C2HTERM_PLEN) {
++		dev_err(queue->ctrl->ctrl.device,
++			"Received a malformed C2HTermReq PDU (plen = %u)\n",
++			plen);
++		return;
++	}
++
++	fes = le16_to_cpu(pdu->fes);
++	if (fes && fes < ARRAY_SIZE(msg_table))
++		msg = msg_table[fes];
++	else
++		msg = "Unknown";
++
++	dev_err(queue->ctrl->ctrl.device,
++		"Received C2HTermReq (FES = %s)\n", msg);
++}
++
+ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ 		unsigned int *offset, size_t *len)
+ {
+@@ -784,6 +831,25 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ 		return 0;
+ 
+ 	hdr = queue->pdu;
++	if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
++		if (!nvme_tcp_recv_pdu_supported(hdr->type))
++			goto unsupported_pdu;
++
++		dev_err(queue->ctrl->ctrl.device,
++			"pdu type %d has unexpected header length (%d)\n",
++			hdr->type, hdr->hlen);
++		return -EPROTO;
++	}
++
++	if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
++		/*
++		 * C2HTermReq never includes Header or Data digests.
++		 * Skip the checks.
++		 */
++		nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu);
++		return -EINVAL;
++	}
++
+ 	if (queue->hdr_digest) {
+ 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
+ 		if (unlikely(ret))
+@@ -807,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ 		nvme_tcp_init_recv_ctx(queue);
+ 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
+ 	default:
+-		dev_err(queue->ctrl->ctrl.device,
+-			"unsupported pdu type (%d)\n", hdr->type);
+-		return -EINVAL;
++		goto unsupported_pdu;
+ 	}
++
++unsupported_pdu:
++	dev_err(queue->ctrl->ctrl.device,
++		"unsupported pdu type (%d)\n", hdr->type);
++	return -EINVAL;
+ }
+ 
+ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
+@@ -1452,11 +1521,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
+ 	msg.msg_flags = MSG_WAITALL;
+ 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ 			iov.iov_len, msg.msg_flags);
+-	if (ret < sizeof(*icresp)) {
++	if (ret >= 0 && ret < sizeof(*icresp))
++		ret = -ECONNRESET;
++	if (ret < 0) {
+ 		pr_warn("queue %d: failed to receive icresp, error %d\n",
+ 			nvme_tcp_queue_id(queue), ret);
+-		if (ret >= 0)
+-			ret = -ECONNRESET;
+ 		goto free_icresp;
+ 	}
+ 	ret = -ENOTCONN;
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 7c51c2a8c109a9..4f9cac8a5abe07 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
+ 	struct nvmet_tcp_cmd *cmd =
+ 		container_of(req, struct nvmet_tcp_cmd, req);
+ 	struct nvmet_tcp_queue	*queue = cmd->queue;
++	enum nvmet_tcp_recv_state queue_state;
++	struct nvmet_tcp_cmd *queue_cmd;
+ 	struct nvme_sgl_desc *sgl;
+ 	u32 len;
+ 
+-	if (unlikely(cmd == queue->cmd)) {
++	/* Pairs with store_release in nvmet_prepare_receive_pdu() */
++	queue_state = smp_load_acquire(&queue->rcv_state);
++	queue_cmd = READ_ONCE(queue->cmd);
++
++	if (unlikely(cmd == queue_cmd)) {
+ 		sgl = &cmd->req.cmd->common.dptr.sgl;
+ 		len = le32_to_cpu(sgl->length);
+ 
+@@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
+ 		 * Avoid using helpers, this might happen before
+ 		 * nvmet_req_init is completed.
+ 		 */
+-		if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
++		if (queue_state == NVMET_TCP_RECV_PDU &&
+ 		    len && len <= cmd->req.port->inline_data_size &&
+ 		    nvme_is_write(cmd->req.cmd))
+ 			return;
+@@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
+ {
+ 	queue->offset = 0;
+ 	queue->left = sizeof(struct nvme_tcp_hdr);
+-	queue->cmd = NULL;
+-	queue->rcv_state = NVMET_TCP_RECV_PDU;
++	WRITE_ONCE(queue->cmd, NULL);
++	/* Ensure rcv_state is visible only after queue->cmd is set */
++	smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
+ }
+ 
+ static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index e45d6d3a8dc678..45445a1600a968 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -360,12 +360,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
+ 
+ 	prop = of_get_flat_dt_prop(node, "alignment", &len);
+ 	if (prop) {
+-		if (len != dt_root_size_cells * sizeof(__be32)) {
++		if (len != dt_root_addr_cells * sizeof(__be32)) {
+ 			pr_err("invalid alignment property in '%s' node.\n",
+ 				uname);
+ 			return -EINVAL;
+ 		}
+-		align = dt_mem_next_cell(dt_root_size_cells, &prop);
++		align = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ 	}
+ 
+ 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 2cfb2ac3f465aa..84dcd7da7319e3 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9958,6 +9958,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ 	 * Individual addressing is broken on models that expose the
+ 	 * primary battery as BAT1.
+ 	 */
++	TPACPI_Q_LNV('G', '8', true),       /* ThinkPad X131e */
+ 	TPACPI_Q_LNV('8', 'F', true),       /* Thinkpad X120e */
+ 	TPACPI_Q_LNV('J', '7', true),       /* B5400 */
+ 	TPACPI_Q_LNV('J', 'I', true),       /* Thinkpad 11e */
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index 27afbb9d544b7c..cbf531d0ba6885 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -1742,7 +1742,8 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ 		err = rio_add_net(net);
+ 		if (err) {
+ 			rmcd_debug(RDEV, "failed to register net, err=%d", err);
+-			kfree(net);
++			put_device(&net->dev);
++			mport->net = NULL;
+ 			goto cleanup;
+ 		}
+ 	}
+diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
+index fdcf742b2adbcb..c12941f71e2cba 100644
+--- a/drivers/rapidio/rio-scan.c
++++ b/drivers/rapidio/rio-scan.c
+@@ -871,7 +871,10 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport,
+ 		dev_set_name(&net->dev, "rnet_%d", net->id);
+ 		net->dev.parent = &mport->dev;
+ 		net->dev.release = rio_scan_release_dev;
+-		rio_add_net(net);
++		if (rio_add_net(net)) {
++			put_device(&net->dev);
++			net = NULL;
++		}
+ 	}
+ 
+ 	return net;
+diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
+index 242570a5e5654b..455c1fd1490fd3 100644
+--- a/drivers/slimbus/messaging.c
++++ b/drivers/slimbus/messaging.c
+@@ -148,8 +148,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+ 	}
+ 
+ 	ret = ctrl->xfer_msg(ctrl, txn);
+-
+-	if (!ret && need_tid && !txn->msg->comp) {
++	if (ret == -ETIMEDOUT) {
++		slim_free_txn_tid(ctrl, txn);
++	} else if (!ret && need_tid && !txn->msg->comp) {
+ 		unsigned long ms = txn->rl + HZ;
+ 
+ 		time_left = wait_for_completion_timeout(txn->comp,
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index 0dd85d2635b998..47d06af33747d0 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -1131,7 +1131,10 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
+ 	struct cxacru_data *instance;
+ 	struct usb_device *usb_dev = interface_to_usbdev(intf);
+ 	struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
+-	struct usb_endpoint_descriptor *in, *out;
++	static const u8 ep_addrs[] = {
++		CXACRU_EP_CMD + USB_DIR_IN,
++		CXACRU_EP_CMD + USB_DIR_OUT,
++		0};
+ 	int ret;
+ 
+ 	/* instance init */
+@@ -1179,13 +1182,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
+ 	}
+ 
+ 	if (usb_endpoint_xfer_int(&cmd_ep->desc))
+-		ret = usb_find_common_endpoints(intf->cur_altsetting,
+-						NULL, NULL, &in, &out);
++		ret = usb_check_int_endpoints(intf, ep_addrs);
+ 	else
+-		ret = usb_find_common_endpoints(intf->cur_altsetting,
+-						&in, &out, NULL, NULL);
++		ret = usb_check_bulk_endpoints(intf, ep_addrs);
+ 
+-	if (ret) {
++	if (!ret) {
+ 		usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
+ 		ret = -ENODEV;
+ 		goto fail;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 906daf423cb02b..145787c424e0c8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -6065,6 +6065,36 @@ void usb_hub_cleanup(void)
+ 	usb_deregister(&hub_driver);
+ } /* usb_hub_cleanup() */
+ 
++/**
++ * hub_hc_release_resources - clear resources used by host controller
++ * @udev: pointer to device being released
++ *
++ * Context: task context, might sleep
++ *
++ * Function releases the host controller resources in correct order before
++ * making any operation on resuming usb device. The host controller resources
++ * allocated for devices in tree should be released starting from the last
++ * usb device in tree toward the root hub. This function is used only during
++ * resuming device when usb device require reinitialization – that is, when
++ * flag udev->reset_resume is set.
++ *
++ * This call is synchronous, and may not be used in an interrupt context.
++ */
++static void hub_hc_release_resources(struct usb_device *udev)
++{
++	struct usb_hub *hub = usb_hub_to_struct_hub(udev);
++	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++	int i;
++
++	/* Release up resources for all children before this device */
++	for (i = 0; i < udev->maxchild; i++)
++		if (hub->ports[i]->child)
++			hub_hc_release_resources(hub->ports[i]->child);
++
++	if (hcd->driver->reset_device)
++		hcd->driver->reset_device(hcd, udev);
++}
++
+ /**
+  * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
+  * @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
+@@ -6129,6 +6159,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	bos = udev->bos;
+ 	udev->bos = NULL;
+ 
++	if (udev->reset_resume)
++		hub_hc_release_resources(udev);
++
+ 	mutex_lock(hcd->address0_mutex);
+ 
+ 	for (i = 0; i < PORT_INIT_TRIES; ++i) {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 027479179f09e9..6926bd639ec6ff 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -341,6 +341,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
+ 	  USB_QUIRK_STRING_FETCH_255 },
+ 
++	/* Prolific Single-LUN Mass Storage Card Reader */
++	{ USB_DEVICE(0x067b, 0x2731), .driver_info = USB_QUIRK_DELAY_INIT |
++	  USB_QUIRK_NO_LPM },
++
+ 	/* Saitek Cyborg Gold Joystick */
+ 	{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 244e3e04e1ad74..7820d6815bedd5 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -131,11 +131,24 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
+ 	}
+ }
+ 
+-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
++void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
+ {
++	unsigned int hw_mode;
+ 	u32 reg;
+ 
+ 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
++
++	 /*
++	  * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and
++	  * GUSB2PHYCFG.SUSPHY should be cleared during mode switching,
++	  * and they can be set after core initialization.
++	  */
++	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
++	if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) {
++		if (DWC3_GCTL_PRTCAP(reg) != mode)
++			dwc3_enable_susphy(dwc, false);
++	}
++
+ 	reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
+ 	reg |= DWC3_GCTL_PRTCAPDIR(mode);
+ 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+@@ -216,7 +229,7 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 
+-	dwc3_set_prtcap(dwc, desired_dr_role);
++	dwc3_set_prtcap(dwc, desired_dr_role, false);
+ 
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+@@ -658,16 +671,7 @@ static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index)
+ 	 */
+ 	reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
+ 
+-	/*
+-	 * Above DWC_usb3.0 1.94a, it is recommended to set
+-	 * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
+-	 * So default value will be '0' when the core is reset. Application
+-	 * needs to set it to '1' after the core initialization is completed.
+-	 *
+-	 * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
+-	 * cleared after power-on reset, and it can be set after core
+-	 * initialization.
+-	 */
++	/* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */
+ 	reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+ 
+ 	if (dwc->u2ss_inp3_quirk)
+@@ -747,15 +751,7 @@ static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index)
+ 		break;
+ 	}
+ 
+-	/*
+-	 * Above DWC_usb3.0 1.94a, it is recommended to set
+-	 * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
+-	 * So default value will be '0' when the core is reset. Application
+-	 * needs to set it to '1' after the core initialization is completed.
+-	 *
+-	 * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
+-	 * after power-on reset, and it can be set after core initialization.
+-	 */
++	/* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */
+ 	reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ 
+ 	if (dwc->dis_enblslpm_quirk)
+@@ -830,6 +826,25 @@ static int dwc3_phy_init(struct dwc3 *dwc)
+ 			goto err_exit_usb3_phy;
+ 	}
+ 
++	/*
++	 * Above DWC_usb3.0 1.94a, it is recommended to set
++	 * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during
++	 * coreConsultant configuration. So default value will be '0' when the
++	 * core is reset. Application needs to set it to '1' after the core
++	 * initialization is completed.
++	 *
++	 * Certain phy requires to be in P0 power state during initialization.
++	 * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear
++	 * prior to phy init to maintain in the P0 state.
++	 *
++	 * After phy initialization, some phy operations can only be executed
++	 * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and
++	 * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid
++	 * blocking phy ops.
++	 */
++	if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
++		dwc3_enable_susphy(dwc, true);
++
+ 	return 0;
+ 
+ err_exit_usb3_phy:
+@@ -1564,7 +1579,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
+ 
+ 	switch (dwc->dr_mode) {
+ 	case USB_DR_MODE_PERIPHERAL:
+-		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
++		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false);
+ 
+ 		if (dwc->usb2_phy)
+ 			otg_set_vbus(dwc->usb2_phy->otg, false);
+@@ -1576,7 +1591,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
+ 			return dev_err_probe(dev, ret, "failed to initialize gadget\n");
+ 		break;
+ 	case USB_DR_MODE_HOST:
+-		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
++		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false);
+ 
+ 		if (dwc->usb2_phy)
+ 			otg_set_vbus(dwc->usb2_phy->otg, true);
+@@ -1621,7 +1636,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
+ 	}
+ 
+ 	/* de-assert DRVVBUS for HOST and OTG mode */
+-	dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
++	dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
+ }
+ 
+ static void dwc3_get_software_properties(struct dwc3 *dwc)
+@@ -1811,8 +1826,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 	dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
+ 	dwc->tx_max_burst_prd = tx_max_burst_prd;
+ 
+-	dwc->imod_interval = 0;
+-
+ 	dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
+ }
+ 
+@@ -1830,21 +1843,19 @@ static void dwc3_check_params(struct dwc3 *dwc)
+ 	unsigned int hwparam_gen =
+ 		DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
+ 
+-	/* Check for proper value of imod_interval */
+-	if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
+-		dev_warn(dwc->dev, "Interrupt moderation not supported\n");
+-		dwc->imod_interval = 0;
+-	}
+-
+ 	/*
++	 * Enable IMOD for all supporting controllers.
++	 *
++	 * Particularly, DWC_usb3 v3.00a must enable this feature for
++	 * the following reason:
++	 *
+ 	 * Workaround for STAR 9000961433 which affects only version
+ 	 * 3.00a of the DWC_usb3 core. This prevents the controller
+ 	 * interrupt from being masked while handling events. IMOD
+ 	 * allows us to work around this issue. Enable it for the
+ 	 * affected version.
+ 	 */
+-	if (!dwc->imod_interval &&
+-	    DWC3_VER_IS(DWC3, 300A))
++	if (dwc3_has_imod((dwc)))
+ 		dwc->imod_interval = 1;
+ 
+ 	/* Check the maximum_speed parameter */
+@@ -2433,7 +2444,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ 		if (ret)
+ 			return ret;
+ 
+-		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
++		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
+ 		dwc3_gadget_resume(dwc);
+ 		break;
+ 	case DWC3_GCTL_PRTCAP_HOST:
+@@ -2441,7 +2452,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ 			ret = dwc3_core_init_for_resume(dwc);
+ 			if (ret)
+ 				return ret;
+-			dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
++			dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true);
+ 			break;
+ 		}
+ 		/* Restore GUSB2PHYCFG bits that were modified in suspend */
+@@ -2470,7 +2481,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ 		if (ret)
+ 			return ret;
+ 
+-		dwc3_set_prtcap(dwc, dwc->current_dr_role);
++		dwc3_set_prtcap(dwc, dwc->current_dr_role, true);
+ 
+ 		dwc3_otg_init(dwc);
+ 		if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 0e91a227507fff..f288d88cd10519 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -1562,7 +1562,7 @@ struct dwc3_gadget_ep_cmd_params {
+ #define DWC3_HAS_OTG			BIT(3)
+ 
+ /* prototypes */
+-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
++void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy);
+ void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
+ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
+ 
+diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
+index d76ae676783cf3..7977860932b142 100644
+--- a/drivers/usb/dwc3/drd.c
++++ b/drivers/usb/dwc3/drd.c
+@@ -173,7 +173,7 @@ void dwc3_otg_init(struct dwc3 *dwc)
+ 	 * block "Initialize GCTL for OTG operation".
+ 	 */
+ 	/* GCTL.PrtCapDir=2'b11 */
+-	dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
++	dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
+ 	/* GUSB2PHYCFG0.SusPHY=0 */
+ 	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ 	reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+@@ -556,7 +556,7 @@ int dwc3_drd_init(struct dwc3 *dwc)
+ 
+ 		dwc3_drd_update(dwc);
+ 	} else {
+-		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
++		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
+ 
+ 		/* use OTG block to get ID event */
+ 		irq = dwc3_otg_get_irq(dwc);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8c80bb4a467bff..309a871453bfad 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -4507,14 +4507,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
+ 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ 		    DWC3_GEVNTSIZ_SIZE(evt->length));
+ 
++	evt->flags &= ~DWC3_EVENT_PENDING;
++	/*
++	 * Add an explicit write memory barrier to make sure that the update of
++	 * clearing DWC3_EVENT_PENDING is observed in dwc3_check_event_buf()
++	 */
++	wmb();
++
+ 	if (dwc->imod_interval) {
+ 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ 		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ 	}
+ 
+-	/* Keep the clearing of DWC3_EVENT_PENDING at the end */
+-	evt->flags &= ~DWC3_EVENT_PENDING;
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index cec86c0c6369ca..8402a86176f48c 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1050,10 +1050,11 @@ static int set_config(struct usb_composite_dev *cdev,
+ 	else
+ 		usb_gadget_set_remote_wakeup(gadget, 0);
+ done:
+-	if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
+-		usb_gadget_set_selfpowered(gadget);
+-	else
++	if (power > USB_SELF_POWER_VBUS_MAX_DRAW ||
++	    (c && !(c->bmAttributes & USB_CONFIG_ATT_SELFPOWER)))
+ 		usb_gadget_clear_selfpowered(gadget);
++	else
++		usb_gadget_set_selfpowered(gadget);
+ 
+ 	usb_gadget_vbus_draw(gadget, power);
+ 	if (result >= 0 && cdev->delayed_status)
+@@ -2615,7 +2616,10 @@ void composite_suspend(struct usb_gadget *gadget)
+ 
+ 	cdev->suspended = 1;
+ 
+-	usb_gadget_set_selfpowered(gadget);
++	if (cdev->config &&
++	    cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER)
++		usb_gadget_set_selfpowered(gadget);
++
+ 	usb_gadget_vbus_draw(gadget, 2);
+ }
+ 
+@@ -2649,8 +2653,11 @@ void composite_resume(struct usb_gadget *gadget)
+ 		else
+ 			maxpower = min(maxpower, 900U);
+ 
+-		if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
++		if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW ||
++		    !(cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
+ 			usb_gadget_clear_selfpowered(gadget);
++		else
++			usb_gadget_set_selfpowered(gadget);
+ 
+ 		usb_gadget_vbus_draw(gadget, maxpower);
+ 	} else {
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index 09e2838917e294..f58590bf5e02f5 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -1052,8 +1052,8 @@ void gether_suspend(struct gether *link)
+ 		 * There is a transfer in progress. So we trigger a remote
+ 		 * wakeup to inform the host.
+ 		 */
+-		ether_wakeup_host(dev->port_usb);
+-		return;
++		if (!ether_wakeup_host(dev->port_usb))
++			return;
+ 	}
+ 	spin_lock_irqsave(&dev->lock, flags);
+ 	link->is_suspend = true;
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 8d774f19271e6a..2fe3a92978fa29 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -12,6 +12,7 @@
+ #include <linux/slab.h>
+ #include <linux/unaligned.h>
+ #include <linux/bitfield.h>
++#include <linux/pci.h>
+ 
+ #include "xhci.h"
+ #include "xhci-trace.h"
+@@ -770,9 +771,16 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
+ enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
+ 						struct xhci_port *port)
+ {
++	struct usb_hcd *hcd;
+ 	void __iomem *base;
+ 	u32 offset;
+ 
++	/* Don't try and probe this capability for non-Intel hosts */
++	hcd = xhci_to_hcd(xhci);
++	if (!dev_is_pci(hcd->self.controller) ||
++	    to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL)
++		return USB_LINK_UNKNOWN;
++
+ 	base = &xhci->cap_regs->hc_capbase;
+ 	offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW);
+ 
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index d2900197a49e74..32c8693b438b07 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -2443,7 +2443,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 	 * and our use of dma addresses in the trb_address_map radix tree needs
+ 	 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
+ 	 */
+-	if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
++	if (xhci->quirks & XHCI_TRB_OVERFETCH)
++		/* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */
+ 		xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ 				TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
+ 	else
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index deb3c98c9beaf6..1b033c8ce188ef 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -28,8 +28,8 @@
+ #define SPARSE_CNTL_ENABLE	0xC12C
+ 
+ /* Device for a quirk */
+-#define PCI_VENDOR_ID_FRESCO_LOGIC	0x1b73
+-#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK	0x1000
++#define PCI_VENDOR_ID_FRESCO_LOGIC		0x1b73
++#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK		0x1000
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009	0x1009
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100	0x1100
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400	0x1400
+@@ -38,8 +38,10 @@
+ #define PCI_DEVICE_ID_EJ168		0x7023
+ #define PCI_DEVICE_ID_EJ188		0x7052
+ 
+-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
+-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
++#define PCI_DEVICE_ID_VIA_VL805			0x3483
++
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI		0x8c31
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI		0x9c31
+ #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI	0x9cb1
+ #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+@@ -424,8 +426,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 			pdev->device == 0x3432)
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ 
+-	if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
++	if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) {
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
++		xhci->quirks |= XHCI_TRB_OVERFETCH;
++	}
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ 		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
+@@ -473,11 +477,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 
+ 		if (pdev->device == 0x9202) {
+ 			xhci->quirks |= XHCI_RESET_ON_RESUME;
+-			xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
++			xhci->quirks |= XHCI_TRB_OVERFETCH;
+ 		}
+ 
+ 		if (pdev->device == 0x9203)
+-			xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
++			xhci->quirks |= XHCI_TRB_OVERFETCH;
+ 	}
+ 
+ 	if (pdev->vendor == PCI_DEVICE_ID_CADENCE &&
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 673179047eb82e..439767d242fa9c 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1621,7 +1621,7 @@ struct xhci_hcd {
+ #define XHCI_EP_CTX_BROKEN_DCS	BIT_ULL(42)
+ #define XHCI_SUSPEND_RESUME_CLKS	BIT_ULL(43)
+ #define XHCI_RESET_TO_DEFAULT	BIT_ULL(44)
+-#define XHCI_ZHAOXIN_TRB_FETCH	BIT_ULL(45)
++#define XHCI_TRB_OVERFETCH	BIT_ULL(45)
+ #define XHCI_ZHAOXIN_HOST	BIT_ULL(46)
+ #define XHCI_WRITE_64_HI_LO	BIT_ULL(47)
+ #define XHCI_CDNS_SCTX_QUIRK	BIT_ULL(48)
+diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
+index edc43f169d493c..7324de52d9505e 100644
+--- a/drivers/usb/renesas_usbhs/common.c
++++ b/drivers/usb/renesas_usbhs/common.c
+@@ -312,8 +312,10 @@ static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
+ 	priv->clks[1] = of_clk_get(dev_of_node(dev), 1);
+ 	if (PTR_ERR(priv->clks[1]) == -ENOENT)
+ 		priv->clks[1] = NULL;
+-	else if (IS_ERR(priv->clks[1]))
++	else if (IS_ERR(priv->clks[1])) {
++		clk_put(priv->clks[0]);
+ 		return PTR_ERR(priv->clks[1]);
++	}
+ 
+ 	return 0;
+ }
+@@ -779,6 +781,8 @@ static void usbhs_remove(struct platform_device *pdev)
+ 
+ 	dev_dbg(&pdev->dev, "usb remove\n");
+ 
++	flush_delayed_work(&priv->notify_hotplug_work);
++
+ 	/* power off */
+ 	if (!usbhs_get_dparam(priv, runtime_pwctrl))
+ 		usbhsc_power_ctrl(priv, 0);
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index 105132ae87acbc..e8e5723f541226 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -1094,7 +1094,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
+ 		goto usbhs_mod_gadget_probe_err_gpriv;
+ 	}
+ 
+-	gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
++	gpriv->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_UNDEFINED);
+ 	dev_info(dev, "%stransceiver found\n",
+ 		 !IS_ERR(gpriv->transceiver) ? "" : "no ");
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+index 64f6dd0dc66096..88c50b984e8a3f 100644
+--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
++++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+@@ -334,6 +334,11 @@ static int rt1711h_probe(struct i2c_client *client)
+ {
+ 	int ret;
+ 	struct rt1711h_chip *chip;
++	const u16 alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED |
++			       TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST |
++			       TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS |
++			       TCPC_ALERT_CC_STATUS | TCPC_ALERT_RX_BUF_OVF |
++			       TCPC_ALERT_FAULT;
+ 
+ 	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ 	if (!chip)
+@@ -382,6 +387,12 @@ static int rt1711h_probe(struct i2c_client *client)
+ 					dev_name(chip->dev), chip);
+ 	if (ret < 0)
+ 		return ret;
++
++	/* Enable alert interrupts */
++	ret = rt1711h_write16(chip, TCPC_ALERT_MASK, alert_mask);
++	if (ret < 0)
++		return ret;
++
+ 	enable_irq_wake(client->irq);
+ 
+ 	return 0;
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 7a3f0f5af38fdb..3f2bc13efa4865 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -25,7 +25,7 @@
+  * difficult to estimate the time it takes for the system to process the command
+  * before it is actually passed to the PPM.
+  */
+-#define UCSI_TIMEOUT_MS		5000
++#define UCSI_TIMEOUT_MS		10000
+ 
+ /*
+  * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
+@@ -1330,7 +1330,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
+ 
+ 	mutex_lock(&ucsi->ppm_lock);
+ 
+-	ret = ucsi->ops->read_cci(ucsi, &cci);
++	ret = ucsi->ops->poll_cci(ucsi, &cci);
+ 	if (ret < 0)
+ 		goto out;
+ 
+@@ -1348,7 +1348,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
+ 
+ 		tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
+ 		do {
+-			ret = ucsi->ops->read_cci(ucsi, &cci);
++			ret = ucsi->ops->poll_cci(ucsi, &cci);
+ 			if (ret < 0)
+ 				goto out;
+ 			if (cci & UCSI_CCI_COMMAND_COMPLETE)
+@@ -1377,7 +1377,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
+ 		/* Give the PPM time to process a reset before reading CCI */
+ 		msleep(20);
+ 
+-		ret = ucsi->ops->read_cci(ucsi, &cci);
++		ret = ucsi->ops->poll_cci(ucsi, &cci);
+ 		if (ret)
+ 			goto out;
+ 
+@@ -1809,11 +1809,11 @@ static int ucsi_init(struct ucsi *ucsi)
+ 
+ err_unregister:
+ 	for (con = connector; con->port; con++) {
++		if (con->wq)
++			destroy_workqueue(con->wq);
+ 		ucsi_unregister_partner(con);
+ 		ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ 		ucsi_unregister_port_psy(con);
+-		if (con->wq)
+-			destroy_workqueue(con->wq);
+ 
+ 		usb_power_delivery_unregister_capabilities(con->port_sink_caps);
+ 		con->port_sink_caps = NULL;
+@@ -1913,8 +1913,8 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
+ 	struct ucsi *ucsi;
+ 
+ 	if (!ops ||
+-	    !ops->read_version || !ops->read_cci || !ops->read_message_in ||
+-	    !ops->sync_control || !ops->async_control)
++	    !ops->read_version || !ops->read_cci || !ops->poll_cci ||
++	    !ops->read_message_in || !ops->sync_control || !ops->async_control)
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
+@@ -1997,10 +1997,6 @@ void ucsi_unregister(struct ucsi *ucsi)
+ 
+ 	for (i = 0; i < ucsi->cap.num_connectors; i++) {
+ 		cancel_work_sync(&ucsi->connector[i].work);
+-		ucsi_unregister_partner(&ucsi->connector[i]);
+-		ucsi_unregister_altmodes(&ucsi->connector[i],
+-					 UCSI_RECIPIENT_CON);
+-		ucsi_unregister_port_psy(&ucsi->connector[i]);
+ 
+ 		if (ucsi->connector[i].wq) {
+ 			struct ucsi_work *uwork;
+@@ -2016,6 +2012,11 @@ void ucsi_unregister(struct ucsi *ucsi)
+ 			destroy_workqueue(ucsi->connector[i].wq);
+ 		}
+ 
++		ucsi_unregister_partner(&ucsi->connector[i]);
++		ucsi_unregister_altmodes(&ucsi->connector[i],
++					 UCSI_RECIPIENT_CON);
++		ucsi_unregister_port_psy(&ucsi->connector[i]);
++
+ 		usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
+ 		ucsi->connector[i].port_sink_caps = NULL;
+ 		usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 1cf5aad4c23a9e..a333006d3496a1 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -60,6 +60,7 @@ struct dentry;
+  * struct ucsi_operations - UCSI I/O operations
+  * @read_version: Read implemented UCSI version
+  * @read_cci: Read CCI register
++ * @poll_cci: Read CCI register while polling with notifications disabled
+  * @read_message_in: Read message data from UCSI
+  * @sync_control: Blocking control operation
+  * @async_control: Non-blocking control operation
+@@ -74,6 +75,7 @@ struct dentry;
+ struct ucsi_operations {
+ 	int (*read_version)(struct ucsi *ucsi, u16 *version);
+ 	int (*read_cci)(struct ucsi *ucsi, u32 *cci);
++	int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
+ 	int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
+ 	int (*sync_control)(struct ucsi *ucsi, u64 command);
+ 	int (*async_control)(struct ucsi *ucsi, u64 command);
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index accf15ff1306a2..8de2961718cdd2 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -59,19 +59,24 @@ static int ucsi_acpi_read_version(struct ucsi *ucsi, u16 *version)
+ static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
+ {
+ 	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+-	int ret;
+-
+-	if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
+-		ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+-		if (ret)
+-			return ret;
+-	}
+ 
+ 	memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
+ 
+ 	return 0;
+ }
+ 
++static int ucsi_acpi_poll_cci(struct ucsi *ucsi, u32 *cci)
++{
++	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
++	int ret;
++
++	ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
++	if (ret)
++		return ret;
++
++	return ucsi_acpi_read_cci(ucsi, cci);
++}
++
+ static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
+ {
+ 	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+@@ -94,6 +99,7 @@ static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command)
+ static const struct ucsi_operations ucsi_acpi_ops = {
+ 	.read_version = ucsi_acpi_read_version,
+ 	.read_cci = ucsi_acpi_read_cci,
++	.poll_cci = ucsi_acpi_poll_cci,
+ 	.read_message_in = ucsi_acpi_read_message_in,
+ 	.sync_control = ucsi_sync_control_common,
+ 	.async_control = ucsi_acpi_async_control
+@@ -145,6 +151,7 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
+ static const struct ucsi_operations ucsi_gram_ops = {
+ 	.read_version = ucsi_acpi_read_version,
+ 	.read_cci = ucsi_acpi_read_cci,
++	.poll_cci = ucsi_acpi_poll_cci,
+ 	.read_message_in = ucsi_gram_read_message_in,
+ 	.sync_control = ucsi_gram_sync_control,
+ 	.async_control = ucsi_acpi_async_control
+diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
+index 740171f24ef9fa..4b1668733a4bec 100644
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
+@@ -664,6 +664,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+ static const struct ucsi_operations ucsi_ccg_ops = {
+ 	.read_version = ucsi_ccg_read_version,
+ 	.read_cci = ucsi_ccg_read_cci,
++	.poll_cci = ucsi_ccg_read_cci,
+ 	.read_message_in = ucsi_ccg_read_message_in,
+ 	.sync_control = ucsi_ccg_sync_control,
+ 	.async_control = ucsi_ccg_async_control,
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index 9b6cb76e632807..75c0e54c37fa0a 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -201,6 +201,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
+ static const struct ucsi_operations pmic_glink_ucsi_ops = {
+ 	.read_version = pmic_glink_ucsi_read_version,
+ 	.read_cci = pmic_glink_ucsi_read_cci,
++	.poll_cci = pmic_glink_ucsi_read_cci,
+ 	.read_message_in = pmic_glink_ucsi_read_message_in,
+ 	.sync_control = ucsi_sync_control_common,
+ 	.async_control = pmic_glink_ucsi_async_control,
+diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+index 6923fad31d7951..57ef7d83a41211 100644
+--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
++++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+@@ -424,6 +424,7 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
+ static const struct ucsi_operations ucsi_stm32g0_ops = {
+ 	.read_version = ucsi_stm32g0_read_version,
+ 	.read_cci = ucsi_stm32g0_read_cci,
++	.poll_cci = ucsi_stm32g0_read_cci,
+ 	.read_message_in = ucsi_stm32g0_read_message_in,
+ 	.sync_control = ucsi_sync_control_common,
+ 	.async_control = ucsi_stm32g0_async_control,
+diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+index f3a5e24ea84d51..40e5da4fd2a454 100644
+--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
++++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+@@ -74,6 +74,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
+ const struct ucsi_operations yoga_c630_ucsi_ops = {
+ 	.read_version = yoga_c630_ucsi_read_version,
+ 	.read_cci = yoga_c630_ucsi_read_cci,
++	.poll_cci = yoga_c630_ucsi_read_cci,
+ 	.read_message_in = yoga_c630_ucsi_read_message_in,
+ 	.sync_control = ucsi_sync_control_common,
+ 	.async_control = yoga_c630_ucsi_async_control,
+diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c
+index c24036c4e51ecd..e4e196abdaac94 100644
+--- a/drivers/virt/acrn/hsm.c
++++ b/drivers/virt/acrn/hsm.c
+@@ -49,7 +49,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
+ 	switch (cmd & PMCMD_TYPE_MASK) {
+ 	case ACRN_PMCMD_GET_PX_CNT:
+ 	case ACRN_PMCMD_GET_CX_CNT:
+-		pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
++		pm_info = kzalloc(sizeof(u64), GFP_KERNEL);
+ 		if (!pm_info)
+ 			return -ENOMEM;
+ 
+@@ -64,7 +64,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
+ 		kfree(pm_info);
+ 		break;
+ 	case ACRN_PMCMD_GET_PX_DATA:
+-		px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
++		px_data = kzalloc(sizeof(*px_data), GFP_KERNEL);
+ 		if (!px_data)
+ 			return -ENOMEM;
+ 
+@@ -79,7 +79,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
+ 		kfree(px_data);
+ 		break;
+ 	case ACRN_PMCMD_GET_CX_DATA:
+-		cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
++		cx_data = kzalloc(sizeof(*cx_data), GFP_KERNEL);
+ 		if (!cx_data)
+ 			return -ENOMEM;
+ 
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 848cb2c3d9ddeb..78c4a3765002eb 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1200,7 +1200,7 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
+ 	ssize_t ret;
+ 	bool only_release_metadata = false;
+ 	bool force_page_uptodate = false;
+-	loff_t old_isize = i_size_read(inode);
++	loff_t old_isize;
+ 	unsigned int ilock_flags = 0;
+ 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
+ 	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
+@@ -1212,6 +1212,13 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/*
++	 * We can only trust the isize with inode lock held, or it can race with
++	 * other buffered writes and cause incorrect call of
++	 * pagecache_isize_extended() to overwrite existing data.
++	 */
++	old_isize = i_size_read(inode);
++
+ 	ret = generic_write_checks(iocb, i);
+ 	if (ret <= 0)
+ 		goto out;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 395b8b880ce786..587ac07cd19410 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7094,6 +7094,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
+ 		btrfs_err(fs_info,
+ 			  "failed to add chunk map, start=%llu len=%llu: %d",
+ 			  map->start, map->chunk_len, ret);
++		btrfs_free_chunk_map(map);
+ 	}
+ 
+ 	return ret;
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 45737b43dda5c8..2b8c36c9660c5c 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -63,6 +63,7 @@ static void free_vma_snapshot(struct coredump_params *cprm);
+ 
+ static int core_uses_pid;
+ static unsigned int core_pipe_limit;
++static unsigned int core_sort_vma;
+ static char core_pattern[CORENAME_MAX_SIZE] = "core";
+ static int core_name_size = CORENAME_MAX_SIZE;
+ unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT;
+@@ -1025,6 +1026,15 @@ static struct ctl_table coredump_sysctls[] = {
+ 		.extra1		= (unsigned int *)&core_file_note_size_min,
+ 		.extra2		= (unsigned int *)&core_file_note_size_max,
+ 	},
++	{
++		.procname	= "core_sort_vma",
++		.data		= &core_sort_vma,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_douintvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_ONE,
++	},
+ };
+ 
+ static int __init init_fs_coredump_sysctls(void)
+@@ -1255,8 +1265,9 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
+ 		cprm->vma_data_size += m->dump_size;
+ 	}
+ 
+-	sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
+-		cmp_vma_size, NULL);
++	if (core_sort_vma)
++		sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
++		     cmp_vma_size, NULL);
+ 
+ 	return true;
+ }
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index ce9be95c9172f6..9ff825f1502d5e 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -141,7 +141,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
+ 	return 0;
+ }
+ 
+-void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
++int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
+ {
+ 	int i, b;
+ 	unsigned int ent_idx;
+@@ -150,13 +150,17 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
+ 	struct exfat_mount_options *opts = &sbi->options;
+ 
+ 	if (!is_valid_cluster(sbi, clu))
+-		return;
++		return -EIO;
+ 
+ 	ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+ 	i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ 	b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+ 
++	if (!test_bit_le(b, sbi->vol_amap[i]->b_data))
++		return -EIO;
++
+ 	clear_bit_le(b, sbi->vol_amap[i]->b_data);
++
+ 	exfat_update_bh(sbi->vol_amap[i], sync);
+ 
+ 	if (opts->discard) {
+@@ -171,6 +175,8 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
+ 			opts->discard = 0;
+ 		}
+ 	}
++
++	return 0;
+ }
+ 
+ /*
+diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
+index 3cdc1de362a941..d2ba8e2d0c398a 100644
+--- a/fs/exfat/exfat_fs.h
++++ b/fs/exfat/exfat_fs.h
+@@ -452,7 +452,7 @@ int exfat_count_num_clusters(struct super_block *sb,
+ int exfat_load_bitmap(struct super_block *sb);
+ void exfat_free_bitmap(struct exfat_sb_info *sbi);
+ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync);
+-void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync);
++int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync);
+ unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
+ int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);
+ int exfat_trim_fs(struct inode *inode, struct fstrim_range *range);
+diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
+index 9e5492ac409b07..6f3651c6ca91ef 100644
+--- a/fs/exfat/fatent.c
++++ b/fs/exfat/fatent.c
+@@ -175,6 +175,7 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
+ 		BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu));
+ 
+ 	if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
++		int err;
+ 		unsigned int last_cluster = p_chain->dir + p_chain->size - 1;
+ 		do {
+ 			bool sync = false;
+@@ -189,7 +190,9 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
+ 				cur_cmap_i = next_cmap_i;
+ 			}
+ 
+-			exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
++			err = exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
++			if (err)
++				break;
+ 			clu++;
+ 			num_clusters++;
+ 		} while (num_clusters < p_chain->size);
+@@ -210,12 +213,13 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
+ 				cur_cmap_i = next_cmap_i;
+ 			}
+ 
+-			exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
++			if (exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))))
++				break;
+ 			clu = n_clu;
+ 			num_clusters++;
+ 
+ 			if (err)
+-				goto dec_used_clus;
++				break;
+ 
+ 			if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) {
+ 				/*
+@@ -229,7 +233,6 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
+ 		} while (clu != EXFAT_EOF_CLUSTER);
+ 	}
+ 
+-dec_used_clus:
+ 	sbi->used_clusters -= num_clusters;
+ 	return 0;
+ }
+diff --git a/fs/exfat/file.c b/fs/exfat/file.c
+index 05b51e7217838f..807349d8ea0501 100644
+--- a/fs/exfat/file.c
++++ b/fs/exfat/file.c
+@@ -587,7 +587,7 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	valid_size = ei->valid_size;
+ 
+ 	ret = generic_write_checks(iocb, iter);
+-	if (ret < 0)
++	if (ret <= 0)
+ 		goto unlock;
+ 
+ 	if (iocb->ki_flags & IOCB_DIRECT) {
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index 337197ece59955..e47a5ddfc79b3d 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -237,7 +237,7 @@ static int exfat_search_empty_slot(struct super_block *sb,
+ 		dentry = 0;
+ 	}
+ 
+-	while (dentry + num_entries < total_entries &&
++	while (dentry + num_entries <= total_entries &&
+ 	       clu.dir != EXFAT_EOF_CLUSTER) {
+ 		i = dentry & (dentries_per_clu - 1);
+ 
+diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
+index a44132c986538b..3b9461f5e712e5 100644
+--- a/fs/netfs/read_collect.c
++++ b/fs/netfs/read_collect.c
+@@ -258,17 +258,18 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was
+ 	 */
+ 	if (!subreq->consumed &&
+ 	    !prev_donated &&
+-	    !list_is_first(&subreq->rreq_link, &rreq->subrequests) &&
+-	    subreq->start == prev->start + prev->len) {
++	    !list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
+ 		prev = list_prev_entry(subreq, rreq_link);
+-		WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
+-		subreq->start += subreq->len;
+-		subreq->len = 0;
+-		subreq->transferred = 0;
+-		trace_netfs_donate(rreq, subreq, prev, subreq->len,
+-				   netfs_trace_donate_to_prev);
+-		trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
+-		goto remove_subreq_locked;
++		if (subreq->start == prev->start + prev->len) {
++			WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
++			subreq->start += subreq->len;
++			subreq->len = 0;
++			subreq->transferred = 0;
++			trace_netfs_donate(rreq, subreq, prev, subreq->len,
++					   netfs_trace_donate_to_prev);
++			trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
++			goto remove_subreq_locked;
++		}
+ 	}
+ 
+ 	/* If we can't donate down the chain, donate up the chain instead. */
+diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
+index 54d5004fec1826..e72f5e67483422 100644
+--- a/fs/netfs/read_pgpriv2.c
++++ b/fs/netfs/read_pgpriv2.c
+@@ -181,16 +181,17 @@ void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
+ 			break;
+ 
+ 		folioq_unmark3(folioq, slot);
+-		if (!folioq->marks3) {
++		while (!folioq->marks3) {
+ 			folioq = folioq->next;
+ 			if (!folioq)
+-				break;
++				goto end_of_queue;
+ 		}
+ 
+ 		slot = __ffs(folioq->marks3);
+ 		folio = folioq_folio(folioq, slot);
+ 	}
+ 
++end_of_queue:
+ 	netfs_issue_write(wreq, &wreq->io_streams[1]);
+ 	smp_wmb(); /* Write lists before ALL_QUEUED. */
+ 	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 6800ee92d742a8..153d25d4b810c5 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -29,6 +29,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/gfp.h>
+ #include <linux/swap.h>
++#include <linux/compaction.h>
+ 
+ #include <linux/uaccess.h>
+ #include <linux/filelock.h>
+@@ -451,7 +452,7 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
+ 	/* If the private flag is set, then the folio is not freeable */
+ 	if (folio_test_private(folio)) {
+ 		if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
+-		    current_is_kswapd())
++		    current_is_kswapd() || current_is_kcompactd())
+ 			return false;
+ 		if (nfs_wb_folio(folio->mapping->host, folio) < 0)
+ 			return false;
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 05274121e46f04..b630beb757a44a 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -209,10 +209,8 @@ struct cifs_cred {
+ 
+ struct cifs_open_info_data {
+ 	bool adjust_tz;
+-	union {
+-		bool reparse_point;
+-		bool symlink;
+-	};
++	bool reparse_point;
++	bool contains_posix_file_info;
+ 	struct {
+ 		/* ioctl response buffer */
+ 		struct {
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index e11e67af760f44..a3f0835e12be31 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -968,7 +968,7 @@ cifs_get_file_info(struct file *filp)
+ 		/* TODO: add support to query reparse tag */
+ 		data.adjust_tz = false;
+ 		if (data.symlink_target) {
+-			data.symlink = true;
++			data.reparse_point = true;
+ 			data.reparse.tag = IO_REPARSE_TAG_SYMLINK;
+ 		}
+ 		path = build_path_from_dentry(dentry, page);
+diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
+index ff05b0e75c9284..f080f92cb1e741 100644
+--- a/fs/smb/client/reparse.h
++++ b/fs/smb/client/reparse.h
+@@ -97,14 +97,30 @@ static inline bool reparse_inode_match(struct inode *inode,
+ 
+ static inline bool cifs_open_data_reparse(struct cifs_open_info_data *data)
+ {
+-	struct smb2_file_all_info *fi = &data->fi;
+-	u32 attrs = le32_to_cpu(fi->Attributes);
++	u32 attrs;
+ 	bool ret;
+ 
+-	ret = data->reparse_point || (attrs & ATTR_REPARSE);
+-	if (ret)
+-		attrs |= ATTR_REPARSE;
+-	fi->Attributes = cpu_to_le32(attrs);
++	if (data->contains_posix_file_info) {
++		struct smb311_posix_qinfo *fi = &data->posix_fi;
++
++		attrs = le32_to_cpu(fi->DosAttributes);
++		if (data->reparse_point) {
++			attrs |= ATTR_REPARSE;
++			fi->DosAttributes = cpu_to_le32(attrs);
++		}
++
++	} else {
++		struct smb2_file_all_info *fi = &data->fi;
++
++		attrs = le32_to_cpu(fi->Attributes);
++		if (data->reparse_point) {
++			attrs |= ATTR_REPARSE;
++			fi->Attributes = cpu_to_le32(attrs);
++		}
++	}
++
++	ret = attrs & ATTR_REPARSE;
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index c70f4961c4eb78..bd791aa54681f6 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -551,7 +551,7 @@ static int cifs_query_path_info(const unsigned int xid,
+ 	int rc;
+ 	FILE_ALL_INFO fi = {};
+ 
+-	data->symlink = false;
++	data->reparse_point = false;
+ 	data->adjust_tz = false;
+ 
+ 	/* could do find first instead but this returns more info */
+@@ -592,7 +592,7 @@ static int cifs_query_path_info(const unsigned int xid,
+ 		/* Need to check if this is a symbolic link or not */
+ 		tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 		if (tmprc == -EOPNOTSUPP)
+-			data->symlink = true;
++			data->reparse_point = true;
+ 		else if (tmprc == 0)
+ 			CIFSSMBClose(xid, tcon, fid.netfid);
+ 	}
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 7dfd3eb3847b33..6048b3fed3e787 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -648,6 +648,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		switch (cmds[i]) {
+ 		case SMB2_OP_QUERY_INFO:
+ 			idata = in_iov[i].iov_base;
++			idata->contains_posix_file_info = false;
+ 			if (rc == 0 && cfile && cfile->symlink_target) {
+ 				idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+ 				if (!idata->symlink_target)
+@@ -671,6 +672,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 			break;
+ 		case SMB2_OP_POSIX_QUERY_INFO:
+ 			idata = in_iov[i].iov_base;
++			idata->contains_posix_file_info = true;
+ 			if (rc == 0 && cfile && cfile->symlink_target) {
+ 				idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+ 				if (!idata->symlink_target)
+@@ -768,6 +770,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				idata = in_iov[i].iov_base;
+ 				idata->reparse.io.iov = *iov;
+ 				idata->reparse.io.buftype = resp_buftype[i + 1];
++				idata->contains_posix_file_info = false; /* BB VERIFY */
+ 				rbuf = reparse_buf_ptr(iov);
+ 				if (IS_ERR(rbuf)) {
+ 					rc = PTR_ERR(rbuf);
+@@ -789,6 +792,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		case SMB2_OP_QUERY_WSL_EA:
+ 			if (!rc) {
+ 				idata = in_iov[i].iov_base;
++				idata->contains_posix_file_info = false;
+ 				qi_rsp = rsp_iov[i + 1].iov_base;
+ 				data[0] = (u8 *)qi_rsp + le16_to_cpu(qi_rsp->OutputBufferOffset);
+ 				size[0] = le32_to_cpu(qi_rsp->OutputBufferLength);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index e8da63d29a28f1..516be8c0b2a9b4 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -1001,6 +1001,7 @@ static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 		if (!data->symlink_target)
+ 			return -ENOMEM;
+ 	}
++	data->contains_posix_file_info = false;
+ 	return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi);
+ }
+ 
+@@ -5177,7 +5178,7 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ 			     FILE_CREATE, CREATE_NOT_DIR |
+ 			     CREATE_OPTION_SPECIAL, ACL_NO_MODE);
+ 	oparms.fid = &fid;
+-
++	idata.contains_posix_file_info = false;
+ 	rc = server->ops->open(xid, &oparms, &oplock, &idata);
+ 	if (rc)
+ 		goto out;
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index c763a2f7df6640..8464261d763876 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7441,17 +7441,17 @@ int smb2_lock(struct ksmbd_work *work)
+ 		}
+ 
+ no_check_cl:
++		flock = smb_lock->fl;
++		list_del(&smb_lock->llist);
++
+ 		if (smb_lock->zero_len) {
+ 			err = 0;
+ 			goto skip;
+ 		}
+-
+-		flock = smb_lock->fl;
+-		list_del(&smb_lock->llist);
+ retry:
+ 		rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
+ skip:
+-		if (flags & SMB2_LOCKFLAG_UNLOCK) {
++		if (smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) {
+ 			if (!rc) {
+ 				ksmbd_debug(SMB, "File unlocked\n");
+ 			} else if (rc == -ENOENT) {
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index 1c9775f1efa56d..da8ed72f335d99 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -807,6 +807,13 @@ static int parse_sid(struct smb_sid *psid, char *end_of_acl)
+ 		return -EINVAL;
+ 	}
+ 
++	if (!psid->num_subauth)
++		return 0;
++
++	if (psid->num_subauth > SID_MAX_SUB_AUTHORITIES ||
++	    end_of_acl < (char *)psid + 8 + sizeof(__le32) * psid->num_subauth)
++		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+@@ -848,6 +855,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
+ 	pntsd->type = cpu_to_le16(DACL_PRESENT);
+ 
+ 	if (pntsd->osidoffset) {
++		if (le32_to_cpu(pntsd->osidoffset) < sizeof(struct smb_ntsd))
++			return -EINVAL;
++
+ 		rc = parse_sid(owner_sid_ptr, end_of_acl);
+ 		if (rc) {
+ 			pr_err("%s: Error %d parsing Owner SID\n", __func__, rc);
+@@ -863,6 +873,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
+ 	}
+ 
+ 	if (pntsd->gsidoffset) {
++		if (le32_to_cpu(pntsd->gsidoffset) < sizeof(struct smb_ntsd))
++			return -EINVAL;
++
+ 		rc = parse_sid(group_sid_ptr, end_of_acl);
+ 		if (rc) {
+ 			pr_err("%s: Error %d mapping Owner SID to gid\n",
+@@ -884,6 +897,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
+ 		pntsd->type |= cpu_to_le16(DACL_PROTECTED);
+ 
+ 	if (dacloffset) {
++		if (dacloffset < sizeof(struct smb_ntsd))
++			return -EINVAL;
++
+ 		parse_dacl(idmap, dacl_ptr, end_of_acl,
+ 			   owner_sid_ptr, group_sid_ptr, fattr);
+ 	}
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 69bac122adbe06..87af57cf35a157 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -281,6 +281,7 @@ static int handle_response(int type, void *payload, size_t sz)
+ 		if (entry->type + 1 != type) {
+ 			pr_err("Waiting for IPC type %d, got %d. Ignore.\n",
+ 			       entry->type + 1, type);
++			continue;
+ 		}
+ 
+ 		entry->response = kvzalloc(sz, GFP_KERNEL);
+diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
+index 594d5905f61512..215bf9f317cbfd 100644
+--- a/include/asm-generic/hugetlb.h
++++ b/include/asm-generic/hugetlb.h
+@@ -84,7 +84,7 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 
+ #ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+-		unsigned long addr, pte_t *ptep)
++		unsigned long addr, pte_t *ptep, unsigned long sz)
+ {
+ 	return ptep_get_and_clear(mm, addr, ptep);
+ }
+diff --git a/include/drm/drm_client_setup.h b/include/drm/drm_client_setup.h
+new file mode 100644
+index 00000000000000..46aab3fb46be54
+--- /dev/null
++++ b/include/drm/drm_client_setup.h
+@@ -0,0 +1,26 @@
++/* SPDX-License-Identifier: MIT */
++
++#ifndef DRM_CLIENT_SETUP_H
++#define DRM_CLIENT_SETUP_H
++
++#include <linux/types.h>
++
++struct drm_device;
++struct drm_format_info;
++
++#if defined(CONFIG_DRM_CLIENT_SETUP)
++void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format);
++void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc);
++void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode);
++#else
++static inline void drm_client_setup(struct drm_device *dev,
++				    const struct drm_format_info *format)
++{ }
++static inline void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc)
++{ }
++static inline void drm_client_setup_with_color_mode(struct drm_device *dev,
++						    unsigned int color_mode)
++{ }
++#endif
++
++#endif
+diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
+index 02ea4e3248fdf9..36a606af4ba1d8 100644
+--- a/include/drm/drm_drv.h
++++ b/include/drm/drm_drv.h
+@@ -34,6 +34,8 @@
+ 
+ #include <drm/drm_device.h>
+ 
++struct drm_fb_helper;
++struct drm_fb_helper_surface_size;
+ struct drm_file;
+ struct drm_gem_object;
+ struct drm_master;
+@@ -366,6 +368,22 @@ struct drm_driver {
+ 			       struct drm_device *dev, uint32_t handle,
+ 			       uint64_t *offset);
+ 
++	/**
++	 * @fbdev_probe
++	 *
++	 * Allocates and initialize the fb_info structure for fbdev emulation.
++	 * Furthermore it also needs to allocate the DRM framebuffer used to
++	 * back the fbdev.
++	 *
++	 * This callback is mandatory for fbdev support.
++	 *
++	 * Returns:
++	 *
++	 * 0 on success ot a negative error code otherwise.
++	 */
++	int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper,
++			   struct drm_fb_helper_surface_size *sizes);
++
+ 	/**
+ 	 * @show_fdinfo:
+ 	 *
+diff --git a/include/drm/drm_fbdev_client.h b/include/drm/drm_fbdev_client.h
+new file mode 100644
+index 00000000000000..e11a5614f127c9
+--- /dev/null
++++ b/include/drm/drm_fbdev_client.h
+@@ -0,0 +1,19 @@
++/* SPDX-License-Identifier: MIT */
++
++#ifndef DRM_FBDEV_CLIENT_H
++#define DRM_FBDEV_CLIENT_H
++
++struct drm_device;
++struct drm_format_info;
++
++#ifdef CONFIG_DRM_FBDEV_EMULATION
++int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format);
++#else
++static inline int drm_fbdev_client_setup(struct drm_device *dev,
++					 const struct drm_format_info *format)
++{
++	return 0;
++}
++#endif
++
++#endif
+diff --git a/include/drm/drm_fbdev_ttm.h b/include/drm/drm_fbdev_ttm.h
+index 9e6c3bdf35376a..243685d02eb13a 100644
+--- a/include/drm/drm_fbdev_ttm.h
++++ b/include/drm/drm_fbdev_ttm.h
+@@ -3,11 +3,24 @@
+ #ifndef DRM_FBDEV_TTM_H
+ #define DRM_FBDEV_TTM_H
+ 
++#include <linux/stddef.h>
++
+ struct drm_device;
++struct drm_fb_helper;
++struct drm_fb_helper_surface_size;
+ 
+ #ifdef CONFIG_DRM_FBDEV_EMULATION
++int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
++				     struct drm_fb_helper_surface_size *sizes);
++
++#define DRM_FBDEV_TTM_DRIVER_OPS \
++	.fbdev_probe = drm_fbdev_ttm_driver_fbdev_probe
++
+ void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp);
+ #else
++#define DRM_FBDEV_TTM_DRIVER_OPS \
++	.fbdev_probe = NULL
++
+ static inline void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp)
+ { }
+ #endif
+diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
+index ccf91daa430702..c3f4405d66629e 100644
+--- a/include/drm/drm_fourcc.h
++++ b/include/drm/drm_fourcc.h
+@@ -313,6 +313,7 @@ drm_get_format_info(struct drm_device *dev,
+ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
+ uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
+ 				     uint32_t bpp, uint32_t depth);
++uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode);
+ unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+ 					 int plane);
+ unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+diff --git a/include/linux/compaction.h b/include/linux/compaction.h
+index e9477649604964..7bf0c521db6340 100644
+--- a/include/linux/compaction.h
++++ b/include/linux/compaction.h
+@@ -80,6 +80,11 @@ static inline unsigned long compact_gap(unsigned int order)
+ 	return 2UL << order;
+ }
+ 
++static inline int current_is_kcompactd(void)
++{
++	return current->flags & PF_KCOMPACTD;
++}
++
+ #ifdef CONFIG_COMPACTION
+ 
+ extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index b8b935b526033f..b0ed740ca749bb 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -412,6 +412,29 @@ struct ethtool_eth_phy_stats {
+ 	);
+ };
+ 
++/**
++ * struct ethtool_phy_stats - PHY-level statistics counters
++ * @rx_packets: Total successfully received frames
++ * @rx_bytes: Total successfully received bytes
++ * @rx_errors: Total received frames with errors (e.g., CRC errors)
++ * @tx_packets: Total successfully transmitted frames
++ * @tx_bytes: Total successfully transmitted bytes
++ * @tx_errors: Total transmitted frames with errors
++ *
++ * This structure provides a standardized interface for reporting
++ * PHY-level statistics counters. It is designed to expose statistics
++ * commonly provided by PHYs but not explicitly defined in the IEEE
++ * 802.3 standard.
++ */
++struct ethtool_phy_stats {
++	u64 rx_packets;
++	u64 rx_bytes;
++	u64 rx_errors;
++	u64 tx_packets;
++	u64 tx_bytes;
++	u64 tx_errors;
++};
++
+ /* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
+  * via a more targeted API.
+  */
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index e4697539b665a2..25a7b13574c28b 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -1009,7 +1009,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
+ static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+ 						unsigned long addr, pte_t *ptep)
+ {
+-	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
++	unsigned long psize = huge_page_size(hstate_vma(vma));
++
++	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
+ }
+ #endif
+ 
+diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
+index e07e8978d691b7..e435250fcb4d05 100644
+--- a/include/linux/nvme-tcp.h
++++ b/include/linux/nvme-tcp.h
+@@ -13,6 +13,8 @@
+ #define NVME_TCP_ADMIN_CCSZ	SZ_8K
+ #define NVME_TCP_DIGEST_LENGTH	4
+ #define NVME_TCP_MIN_MAXH2CDATA 4096
++#define NVME_TCP_MIN_C2HTERM_PLEN	24
++#define NVME_TCP_MAX_C2HTERM_PLEN	152
+ 
+ enum nvme_tcp_pfv {
+ 	NVME_TCP_PFV_1_0 = 0x0,
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index b58d9405d65e01..1c101f6fad2f31 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -388,6 +388,7 @@ enum {
+ 	NVME_CTRL_CTRATT_PREDICTABLE_LAT	= 1 << 5,
+ 	NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY	= 1 << 7,
+ 	NVME_CTRL_CTRATT_UUID_LIST		= 1 << 9,
++	NVME_CTRL_SGLS_MSDS                     = 1 << 19,
+ };
+ 
+ struct nvme_lbaf {
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index a98bc91a0cde9c..945264f457d8aa 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -1090,6 +1090,35 @@ struct phy_driver {
+ 	int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
+ 
+ 	/* Get statistics from the PHY using ethtool */
++	/**
++	 * @get_phy_stats: Retrieve PHY statistics.
++	 * @dev: The PHY device for which the statistics are retrieved.
++	 * @eth_stats: structure where Ethernet PHY stats will be stored.
++	 * @stats: structure where additional PHY-specific stats will be stored.
++	 *
++	 * Retrieves the supported PHY statistics and populates the provided
++	 * structures. The input structures are pre-initialized with
++	 * `ETHTOOL_STAT_NOT_SET`, and the driver must only modify members
++	 * corresponding to supported statistics. Unmodified members will remain
++	 * set to `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace.
++	 */
++	void (*get_phy_stats)(struct phy_device *dev,
++			      struct ethtool_eth_phy_stats *eth_stats,
++			      struct ethtool_phy_stats *stats);
++
++	/**
++	 * @get_link_stats: Retrieve link statistics.
++	 * @dev: The PHY device for which the statistics are retrieved.
++	 * @link_stats: structure where link-specific stats will be stored.
++	 *
++	 * Retrieves link-related statistics for the given PHY device. The input
++	 * structure is pre-initialized with `ETHTOOL_STAT_NOT_SET`, and the
++	 * driver must only modify members corresponding to supported
++	 * statistics. Unmodified members will remain set to
++	 * `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace.
++	 */
++	void (*get_link_stats)(struct phy_device *dev,
++			       struct ethtool_link_ext_stats *link_stats);
+ 	/** @get_sset_count: Number of statistic counters */
+ 	int (*get_sset_count)(struct phy_device *dev);
+ 	/** @get_strings: Names of the statistic counters */
+@@ -2055,6 +2084,13 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
+ int phy_ethtool_get_sset_count(struct phy_device *phydev);
+ int phy_ethtool_get_stats(struct phy_device *phydev,
+ 			  struct ethtool_stats *stats, u64 *data);
++
++void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
++			 struct ethtool_eth_phy_stats *phy_stats,
++			 struct ethtool_phy_stats *phydev_stats);
++void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
++				      struct ethtool_link_ext_stats *link_stats);
++
+ int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
+ 			     struct phy_plca_cfg *plca_cfg);
+ int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
+diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h
+index 1279f48c8a7077..9d2d6090c86d12 100644
+--- a/include/linux/phylib_stubs.h
++++ b/include/linux/phylib_stubs.h
+@@ -5,6 +5,9 @@
+ 
+ #include <linux/rtnetlink.h>
+ 
++struct ethtool_eth_phy_stats;
++struct ethtool_link_ext_stats;
++struct ethtool_phy_stats;
+ struct kernel_hwtstamp_config;
+ struct netlink_ext_ack;
+ struct phy_device;
+@@ -19,6 +22,11 @@ struct phylib_stubs {
+ 	int (*hwtstamp_set)(struct phy_device *phydev,
+ 			    struct kernel_hwtstamp_config *config,
+ 			    struct netlink_ext_ack *extack);
++	void (*get_phy_stats)(struct phy_device *phydev,
++			      struct ethtool_eth_phy_stats *phy_stats,
++			      struct ethtool_phy_stats *phydev_stats);
++	void (*get_link_ext_stats)(struct phy_device *phydev,
++				   struct ethtool_link_ext_stats *link_stats);
+ };
+ 
+ static inline int phy_hwtstamp_get(struct phy_device *phydev,
+@@ -50,6 +58,29 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev,
+ 	return phylib_stubs->hwtstamp_set(phydev, config, extack);
+ }
+ 
++static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev,
++					struct ethtool_eth_phy_stats *phy_stats,
++					struct ethtool_phy_stats *phydev_stats)
++{
++	ASSERT_RTNL();
++
++	if (!phylib_stubs)
++		return;
++
++	phylib_stubs->get_phy_stats(phydev, phy_stats, phydev_stats);
++}
++
++static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
++				    struct ethtool_link_ext_stats *link_stats)
++{
++	ASSERT_RTNL();
++
++	if (!phylib_stubs)
++		return;
++
++	phylib_stubs->get_link_ext_stats(phydev, link_stats);
++}
++
+ #else
+ 
+ static inline int phy_hwtstamp_get(struct phy_device *phydev,
+@@ -65,4 +96,15 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev,
+ 	return -EOPNOTSUPP;
+ }
+ 
++static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev,
++					struct ethtool_eth_phy_stats *phy_stats,
++					struct ethtool_phy_stats *phydev_stats)
++{
++}
++
++static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
++				    struct ethtool_link_ext_stats *link_stats)
++{
++}
++
+ #endif
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 8982820dae2131..0d1d70aded38f6 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1682,7 +1682,7 @@ extern struct pid *cad_pid;
+ #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
+ #define PF_USER_WORKER		0x00004000	/* Kernel thread cloned from userspace thread */
+ #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
+-#define PF__HOLE__00010000	0x00010000
++#define PF_KCOMPACTD		0x00010000	/* I am kcompactd */
+ #define PF_KSWAPD		0x00020000	/* I am kswapd */
+ #define PF_MEMALLOC_NOFS	0x00040000	/* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
+ #define PF_MEMALLOC_NOIO	0x00080000	/* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index a0e1d2124727e1..5fff74c736063c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11846,6 +11846,8 @@ void perf_pmu_unregister(struct pmu *pmu)
+ {
+ 	mutex_lock(&pmus_lock);
+ 	list_del_rcu(&pmu->entry);
++	idr_remove(&pmu_idr, pmu->type);
++	mutex_unlock(&pmus_lock);
+ 
+ 	/*
+ 	 * We dereference the pmu list under both SRCU and regular RCU, so
+@@ -11855,7 +11857,6 @@ void perf_pmu_unregister(struct pmu *pmu)
+ 	synchronize_rcu();
+ 
+ 	free_percpu(pmu->pmu_disable_count);
+-	idr_remove(&pmu_idr, pmu->type);
+ 	if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
+ 		if (pmu->nr_addr_filters)
+ 			device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
+@@ -11863,7 +11864,6 @@ void perf_pmu_unregister(struct pmu *pmu)
+ 		put_device(pmu->dev);
+ 	}
+ 	free_pmu_context(pmu);
+-	mutex_unlock(&pmus_lock);
+ }
+ EXPORT_SYMBOL_GPL(perf_pmu_unregister);
+ 
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index a0e0676f5d8bbe..4fdc08ca0f3cbd 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1775,6 +1775,7 @@ void uprobe_free_utask(struct task_struct *t)
+ 	if (!utask)
+ 		return;
+ 
++	t->utask = NULL;
+ 	if (utask->active_uprobe)
+ 		put_uprobe(utask->active_uprobe);
+ 
+@@ -1784,7 +1785,6 @@ void uprobe_free_utask(struct task_struct *t)
+ 
+ 	xol_free_insn_slot(t);
+ 	kfree(utask);
+-	t->utask = NULL;
+ }
+ 
+ /*
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index ddc096d6b0c203..58ba14ed8fbcb9 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4155,15 +4155,17 @@ static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
+ {
+ 	struct cfs_rq *prev_cfs_rq;
+ 	struct list_head *prev;
++	struct rq *rq = rq_of(cfs_rq);
+ 
+ 	if (cfs_rq->on_list) {
+ 		prev = cfs_rq->leaf_cfs_rq_list.prev;
+ 	} else {
+-		struct rq *rq = rq_of(cfs_rq);
+-
+ 		prev = rq->tmp_alone_branch;
+ 	}
+ 
++	if (prev == &rq->leaf_cfs_rq_list)
++		return false;
++
+ 	prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
+ 
+ 	return (prev_cfs_rq->tg->parent == cfs_rq->tg);
+diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
+index c62d1629cffecd..99048c33038223 100644
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -1018,6 +1018,19 @@ static int parse_symbol_and_return(int argc, const char *argv[],
+ 	if (*is_return)
+ 		return 0;
+ 
++	if (is_tracepoint) {
++		tmp = *symbol;
++		while (*tmp && (isalnum(*tmp) || *tmp == '_'))
++			tmp++;
++		if (*tmp) {
++			/* find a wrong character. */
++			trace_probe_log_err(tmp - *symbol, BAD_TP_NAME);
++			kfree(*symbol);
++			*symbol = NULL;
++			return -EINVAL;
++		}
++	}
++
+ 	/* If there is $retval, this should be a return fprobe. */
+ 	for (i = 2; i < argc; i++) {
+ 		tmp = strstr(argv[i], "$retval");
+@@ -1025,6 +1038,8 @@ static int parse_symbol_and_return(int argc, const char *argv[],
+ 			if (is_tracepoint) {
+ 				trace_probe_log_set_index(i);
+ 				trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
++				kfree(*symbol);
++				*symbol = NULL;
+ 				return -EINVAL;
+ 			}
+ 			*is_return = true;
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 5803e6a4157055..8a6797c2278d90 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -36,7 +36,6 @@
+ #define MAX_BTF_ARGS_LEN	128
+ #define MAX_DENTRY_ARGS_LEN	256
+ #define MAX_STRING_SIZE		PATH_MAX
+-#define MAX_ARG_BUF_LEN		(MAX_TRACE_ARGS * MAX_ARG_NAME_LEN)
+ 
+ /* Reserved field names */
+ #define FIELD_STRING_IP		"__probe_ip"
+@@ -481,6 +480,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ 	C(NON_UNIQ_SYMBOL,	"The symbol is not unique"),		\
+ 	C(BAD_RETPROBE,		"Retprobe address must be an function entry"), \
+ 	C(NO_TRACEPOINT,	"Tracepoint is not found"),		\
++	C(BAD_TP_NAME,		"Invalid character in tracepoint name"),\
+ 	C(BAD_ADDR_SUFFIX,	"Invalid probed address suffix"), \
+ 	C(NO_GROUP_NAME,	"Group name is not specified"),		\
+ 	C(GROUP_TOO_LONG,	"Group name is too long"),		\
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 384e4672998e55..77dbb9022b47f0 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -3164,6 +3164,7 @@ static int kcompactd(void *p)
+ 	if (!cpumask_empty(cpumask))
+ 		set_cpus_allowed_ptr(tsk, cpumask);
+ 
++	current->flags |= PF_KCOMPACTD;
+ 	set_freezable();
+ 
+ 	pgdat->kcompactd_max_order = 0;
+@@ -3220,6 +3221,8 @@ static int kcompactd(void *p)
+ 			pgdat->proactive_compact_trigger = false;
+ 	}
+ 
++	current->flags &= ~PF_KCOMPACTD;
++
+ 	return 0;
+ }
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index bdee6d3ab0e7e3..1e9aa6de4e21ea 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5395,7 +5395,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
+ 	if (src_ptl != dst_ptl)
+ 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ 
+-	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
++	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
+ 
+ 	if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
+ 		huge_pte_clear(mm, new_addr, dst_pte, sz);
+@@ -5570,7 +5570,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
+ 		}
+ 
+-		pte = huge_ptep_get_and_clear(mm, address, ptep);
++		pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
+ 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
+ 		if (huge_pte_dirty(pte))
+ 			set_page_dirty(page);
+diff --git a/mm/internal.h b/mm/internal.h
+index 9bb098e78f1556..398633d6b6c9f0 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -1101,7 +1101,7 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
+  * mm/memory-failure.c
+  */
+ #ifdef CONFIG_MEMORY_FAILURE
+-void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu);
++int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
+ void shake_folio(struct folio *folio);
+ extern int hwpoison_filter(struct page *p);
+ 
+@@ -1123,8 +1123,9 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
+ unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+ 
+ #else
+-static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
++static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
+ {
++	return -EBUSY;
+ }
+ #endif
+ 
+diff --git a/mm/kasan/kasan_test_rust.rs b/mm/kasan/kasan_test_rust.rs
+index caa7175964ef64..5b34edf30e7244 100644
+--- a/mm/kasan/kasan_test_rust.rs
++++ b/mm/kasan/kasan_test_rust.rs
+@@ -11,11 +11,12 @@
+ /// drop the vector, and touch it.
+ #[no_mangle]
+ pub extern "C" fn kasan_test_rust_uaf() -> u8 {
+-    let mut v: Vec<u8> = Vec::new();
++    let mut v: KVec<u8> = KVec::new();
+     for _ in 0..4096 {
+         v.push(0x42, GFP_KERNEL).unwrap();
+     }
+     let ptr: *mut u8 = addr_of_mut!(v[2048]);
+     drop(v);
++    // SAFETY: Incorrect, on purpose.
+     unsafe { *ptr }
+ }
+diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
+index 3ea50f09311fd7..3df45c25c1f62f 100644
+--- a/mm/kmsan/hooks.c
++++ b/mm/kmsan/hooks.c
+@@ -357,6 +357,7 @@ void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ 		size -= to_go;
+ 	}
+ }
++EXPORT_SYMBOL_GPL(kmsan_handle_dma);
+ 
+ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ 			 enum dma_data_direction dir)
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 96ce31e5a203be..fa25a022e64d71 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1554,11 +1554,35 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
+ 	return ret;
+ }
+ 
+-void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
++int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
+ {
+-	if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
+-		struct address_space *mapping;
++	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
++	struct address_space *mapping;
++
++	if (folio_test_swapcache(folio)) {
++		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
++		ttu &= ~TTU_HWPOISON;
++	}
+ 
++	/*
++	 * Propagate the dirty bit from PTEs to struct page first, because we
++	 * need this to decide if we should kill or just drop the page.
++	 * XXX: the dirty test could be racy: set_page_dirty() may not always
++	 * be called inside page lock (it's recommended but not enforced).
++	 */
++	mapping = folio_mapping(folio);
++	if (!must_kill && !folio_test_dirty(folio) && mapping &&
++	    mapping_can_writeback(mapping)) {
++		if (folio_mkclean(folio)) {
++			folio_set_dirty(folio);
++		} else {
++			ttu &= ~TTU_HWPOISON;
++			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
++				pfn);
++		}
++	}
++
++	if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
+ 		/*
+ 		 * For hugetlb folios in shared mappings, try_to_unmap
+ 		 * could potentially call huge_pmd_unshare.  Because of
+@@ -1570,7 +1594,7 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+ 		if (!mapping) {
+ 			pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n",
+ 				folio_pfn(folio));
+-			return;
++			return -EBUSY;
+ 		}
+ 
+ 		try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
+@@ -1578,6 +1602,8 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+ 	} else {
+ 		try_to_unmap(folio, ttu);
+ 	}
++
++	return folio_mapped(folio) ? -EBUSY : 0;
+ }
+ 
+ /*
+@@ -1587,8 +1613,6 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+ static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
+ 		unsigned long pfn, int flags)
+ {
+-	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
+-	struct address_space *mapping;
+ 	LIST_HEAD(tokill);
+ 	bool unmap_success;
+ 	int forcekill;
+@@ -1611,29 +1635,6 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
+ 	if (!folio_mapped(folio))
+ 		return true;
+ 
+-	if (folio_test_swapcache(folio)) {
+-		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
+-		ttu &= ~TTU_HWPOISON;
+-	}
+-
+-	/*
+-	 * Propagate the dirty bit from PTEs to struct page first, because we
+-	 * need this to decide if we should kill or just drop the page.
+-	 * XXX: the dirty test could be racy: set_page_dirty() may not always
+-	 * be called inside page lock (it's recommended but not enforced).
+-	 */
+-	mapping = folio_mapping(folio);
+-	if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
+-	    mapping_can_writeback(mapping)) {
+-		if (folio_mkclean(folio)) {
+-			folio_set_dirty(folio);
+-		} else {
+-			ttu &= ~TTU_HWPOISON;
+-			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
+-				pfn);
+-		}
+-	}
+-
+ 	/*
+ 	 * First collect all the processes that have the page
+ 	 * mapped in dirty form.  This has to be done before try_to_unmap,
+@@ -1641,9 +1642,7 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
+ 	 */
+ 	collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
+ 
+-	unmap_poisoned_folio(folio, ttu);
+-
+-	unmap_success = !folio_mapped(folio);
++	unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL);
+ 	if (!unmap_success)
+ 		pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n",
+ 		       pfn, folio_mapcount(folio));
+diff --git a/mm/memory.c b/mm/memory.c
+index d322ddfe679167..525f96ad65b8d7 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2957,8 +2957,10 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
+ 		next = pgd_addr_end(addr, end);
+ 		if (pgd_none(*pgd) && !create)
+ 			continue;
+-		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
+-			return -EINVAL;
++		if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
++			err = -EINVAL;
++			break;
++		}
+ 		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
+ 			if (!create)
+ 				continue;
+@@ -5077,7 +5079,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
+ 	bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
+ 		      !(vma->vm_flags & VM_SHARED);
+ 	int type, nr_pages;
+-	unsigned long addr = vmf->address;
++	unsigned long addr;
++	bool needs_fallback = false;
++
++fallback:
++	addr = vmf->address;
+ 
+ 	/* Did we COW the page? */
+ 	if (is_cow)
+@@ -5116,7 +5122,8 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
+ 	 * approach also applies to non-anonymous-shmem faults to avoid
+ 	 * inflating the RSS of the process.
+ 	 */
+-	if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
++	if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) ||
++	    unlikely(needs_fallback)) {
+ 		nr_pages = 1;
+ 	} else if (nr_pages > 1) {
+ 		pgoff_t idx = folio_page_idx(folio, page);
+@@ -5152,9 +5159,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
+ 		ret = VM_FAULT_NOPAGE;
+ 		goto unlock;
+ 	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+-		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
+-		ret = VM_FAULT_NOPAGE;
+-		goto unlock;
++		needs_fallback = true;
++		pte_unmap_unlock(vmf->pte, vmf->ptl);
++		goto fallback;
+ 	}
+ 
+ 	folio_ref_add(folio, nr_pages - 1);
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 621ae1015106c5..619445096ef4a6 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1795,26 +1795,24 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+ 		if (folio_test_large(folio))
+ 			pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
+ 
+-		/*
+-		 * HWPoison pages have elevated reference counts so the migration would
+-		 * fail on them. It also doesn't make any sense to migrate them in the
+-		 * first place. Still try to unmap such a page in case it is still mapped
+-		 * (keep the unmap as the catch all safety net).
+-		 */
++		if (!folio_try_get(folio))
++			continue;
++
++		if (unlikely(page_folio(page) != folio))
++			goto put_folio;
++
+ 		if (folio_test_hwpoison(folio) ||
+ 		    (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
+ 			if (WARN_ON(folio_test_lru(folio)))
+ 				folio_isolate_lru(folio);
+-			if (folio_mapped(folio))
+-				unmap_poisoned_folio(folio, TTU_IGNORE_MLOCK);
+-			continue;
+-		}
+-
+-		if (!folio_try_get(folio))
+-			continue;
++			if (folio_mapped(folio)) {
++				folio_lock(folio);
++				unmap_poisoned_folio(folio, pfn, false);
++				folio_unlock(folio);
++			}
+ 
+-		if (unlikely(page_folio(page) != folio))
+ 			goto put_folio;
++		}
+ 
+ 		if (!isolate_folio_to_list(folio, &source)) {
+ 			if (__ratelimit(&migrate_rs)) {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index de65e8b4f75f21..e0a77fe1b6300d 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4243,6 +4243,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ restart:
+ 	compaction_retries = 0;
+ 	no_progress_loops = 0;
++	compact_result = COMPACT_SKIPPED;
+ 	compact_priority = DEF_COMPACT_PRIORITY;
+ 	cpuset_mems_cookie = read_mems_allowed_begin();
+ 	zonelist_iter_cookie = zonelist_iter_begin();
+@@ -5991,11 +5992,10 @@ static void setup_per_zone_lowmem_reserve(void)
+ 
+ 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
+ 				struct zone *upper_zone = &pgdat->node_zones[j];
+-				bool empty = !zone_managed_pages(upper_zone);
+ 
+ 				managed_pages += zone_managed_pages(upper_zone);
+ 
+-				if (clear || empty)
++				if (clear)
+ 					zone->lowmem_reserve[j] = 0;
+ 				else
+ 					zone->lowmem_reserve[j] = managed_pages / ratio;
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index ce13c40626472a..66011831d7983d 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -1215,6 +1215,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 		 */
+ 		if (!src_folio) {
+ 			struct folio *folio;
++			bool locked;
+ 
+ 			/*
+ 			 * Pin the page while holding the lock to be sure the
+@@ -1234,12 +1235,26 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ 				goto out;
+ 			}
+ 
++			locked = folio_trylock(folio);
++			/*
++			 * We avoid waiting for folio lock with a raised
++			 * refcount for large folios because extra refcounts
++			 * will result in split_folio() failing later and
++			 * retrying.  If multiple tasks are trying to move a
++			 * large folio we can end up livelocking.
++			 */
++			if (!locked && folio_test_large(folio)) {
++				spin_unlock(src_ptl);
++				err = -EAGAIN;
++				goto out;
++			}
++
+ 			folio_get(folio);
+ 			src_folio = folio;
+ 			src_folio_pte = orig_src_pte;
+ 			spin_unlock(src_ptl);
+ 
+-			if (!folio_trylock(src_folio)) {
++			if (!locked) {
+ 				pte_unmap(&orig_src_pte);
+ 				pte_unmap(&orig_dst_pte);
+ 				src_pte = dst_pte = NULL;
+diff --git a/mm/vma.c b/mm/vma.c
+index 7621384d64cf5f..c9ddc06b672a52 100644
+--- a/mm/vma.c
++++ b/mm/vma.c
+@@ -1417,24 +1417,28 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
+ static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
+ {
+ 	struct vm_area_struct *vma = vmg->vma;
++	unsigned long start = vmg->start;
++	unsigned long end = vmg->end;
+ 	struct vm_area_struct *merged;
+ 
+ 	/* First, try to merge. */
+ 	merged = vma_merge_existing_range(vmg);
+ 	if (merged)
+ 		return merged;
++	if (vmg_nomem(vmg))
++		return ERR_PTR(-ENOMEM);
+ 
+ 	/* Split any preceding portion of the VMA. */
+-	if (vma->vm_start < vmg->start) {
+-		int err = split_vma(vmg->vmi, vma, vmg->start, 1);
++	if (vma->vm_start < start) {
++		int err = split_vma(vmg->vmi, vma, start, 1);
+ 
+ 		if (err)
+ 			return ERR_PTR(err);
+ 	}
+ 
+ 	/* Split any trailing portion of the VMA. */
+-	if (vma->vm_end > vmg->end) {
+-		int err = split_vma(vmg->vmi, vma, vmg->end, 0);
++	if (vma->vm_end > end) {
++		int err = split_vma(vmg->vmi, vma, end, 0);
+ 
+ 		if (err)
+ 			return ERR_PTR(err);
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 3f9255dfacb0c1..fd70a7cd1c8fa8 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -586,13 +586,13 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
+ 			mask |= PGTBL_PGD_MODIFIED;
+ 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
+ 		if (err)
+-			return err;
++			break;
+ 	} while (pgd++, addr = next, addr != end);
+ 
+ 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
+ 		arch_sync_kernel_mappings(start, end);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ /*
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index e45187b8822069..41be38264493df 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -131,7 +131,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
+ {
+ 	const char *name = real_dev->name;
+ 
+-	if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
++	if (real_dev->features & NETIF_F_VLAN_CHALLENGED ||
++	    real_dev->type != ARPHRD_ETHER) {
+ 		pr_info("VLANs not supported on %s\n", name);
+ 		NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
+ 		return -EOPNOTSUPP;
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 90c21b3edcd80e..c019f69c593955 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -9731,6 +9731,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
+ 				     eir_precalc_len(sizeof(conn->dev_class)));
+ 
++	if (!skb)
++		return;
++
+ 	ev = skb_put(skb, sizeof(*ev));
+ 	bacpy(&ev->addr.bdaddr, &conn->dst);
+ 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
+@@ -10484,6 +10487,8 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ 
+ 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
+ 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
++	if (!skb)
++		return;
+ 
+ 	ev = skb_put(skb, sizeof(*ev));
+ 	bacpy(&ev->addr.bdaddr, bdaddr);
+diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c
+index f22051f33868ac..84096f6b0236e8 100644
+--- a/net/ethtool/cabletest.c
++++ b/net/ethtool/cabletest.c
+@@ -72,8 +72,8 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
+ 	dev = req_info.dev;
+ 
+ 	rtnl_lock();
+-	phydev = ethnl_req_get_phydev(&req_info,
+-				      tb[ETHTOOL_A_CABLE_TEST_HEADER],
++	phydev = ethnl_req_get_phydev(&req_info, tb,
++				      ETHTOOL_A_CABLE_TEST_HEADER,
+ 				      info->extack);
+ 	if (IS_ERR_OR_NULL(phydev)) {
+ 		ret = -EOPNOTSUPP;
+@@ -339,8 +339,8 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
+ 		goto out_dev_put;
+ 
+ 	rtnl_lock();
+-	phydev = ethnl_req_get_phydev(&req_info,
+-				      tb[ETHTOOL_A_CABLE_TEST_TDR_HEADER],
++	phydev = ethnl_req_get_phydev(&req_info, tb,
++				      ETHTOOL_A_CABLE_TEST_TDR_HEADER,
+ 				      info->extack);
+ 	if (IS_ERR_OR_NULL(phydev)) {
+ 		ret = -EOPNOTSUPP;
+diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
+index 34d76e87847d08..05a5f72c99fab1 100644
+--- a/net/ethtool/linkstate.c
++++ b/net/ethtool/linkstate.c
+@@ -3,6 +3,7 @@
+ #include "netlink.h"
+ #include "common.h"
+ #include <linux/phy.h>
++#include <linux/phylib_stubs.h>
+ 
+ struct linkstate_req_info {
+ 	struct ethnl_req_info		base;
+@@ -26,9 +27,8 @@ const struct nla_policy ethnl_linkstate_get_policy[] = {
+ 		NLA_POLICY_NESTED(ethnl_header_policy_stats),
+ };
+ 
+-static int linkstate_get_sqi(struct net_device *dev)
++static int linkstate_get_sqi(struct phy_device *phydev)
+ {
+-	struct phy_device *phydev = dev->phydev;
+ 	int ret;
+ 
+ 	if (!phydev)
+@@ -46,9 +46,8 @@ static int linkstate_get_sqi(struct net_device *dev)
+ 	return ret;
+ }
+ 
+-static int linkstate_get_sqi_max(struct net_device *dev)
++static int linkstate_get_sqi_max(struct phy_device *phydev)
+ {
+-	struct phy_device *phydev = dev->phydev;
+ 	int ret;
+ 
+ 	if (!phydev)
+@@ -100,19 +99,28 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
+ {
+ 	struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base);
+ 	struct net_device *dev = reply_base->dev;
++	struct nlattr **tb = info->attrs;
++	struct phy_device *phydev;
+ 	int ret;
+ 
++	phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_LINKSTATE_HEADER,
++				      info->extack);
++	if (IS_ERR(phydev)) {
++		ret = PTR_ERR(phydev);
++		goto out;
++	}
++
+ 	ret = ethnl_ops_begin(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 	data->link = __ethtool_get_link(dev);
+ 
+-	ret = linkstate_get_sqi(dev);
++	ret = linkstate_get_sqi(phydev);
+ 	if (linkstate_sqi_critical_error(ret))
+ 		goto out;
+ 	data->sqi = ret;
+ 
+-	ret = linkstate_get_sqi_max(dev);
++	ret = linkstate_get_sqi_max(phydev);
+ 	if (linkstate_sqi_critical_error(ret))
+ 		goto out;
+ 	data->sqi_max = ret;
+@@ -127,9 +135,9 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
+ 			   sizeof(data->link_stats) / 8);
+ 
+ 	if (req_base->flags & ETHTOOL_FLAG_STATS) {
+-		if (dev->phydev)
+-			data->link_stats.link_down_events =
+-				READ_ONCE(dev->phydev->link_down_events);
++		if (phydev)
++			phy_ethtool_get_link_ext_stats(phydev,
++						       &data->link_stats);
+ 
+ 		if (dev->ethtool_ops->get_link_ext_stats)
+ 			dev->ethtool_ops->get_link_ext_stats(dev,
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index 4d18dc29b30438..e233dfc8ca4bec 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -210,7 +210,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
+ }
+ 
+ struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
+-					const struct nlattr *header,
++					struct nlattr **tb, unsigned int header,
+ 					struct netlink_ext_ack *extack)
+ {
+ 	struct phy_device *phydev;
+@@ -224,8 +224,8 @@ struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
+ 		return req_info->dev->phydev;
+ 
+ 	phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index);
+-	if (!phydev) {
+-		NL_SET_ERR_MSG_ATTR(extack, header,
++	if (!phydev && tb) {
++		NL_SET_ERR_MSG_ATTR(extack, tb[header],
+ 				    "no phy matching phyindex");
+ 		return ERR_PTR(-ENODEV);
+ 	}
+diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
+index 203b08eb6c6f60..5e176938d6d228 100644
+--- a/net/ethtool/netlink.h
++++ b/net/ethtool/netlink.h
+@@ -275,7 +275,8 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
+  * ethnl_req_get_phydev() - Gets the phy_device targeted by this request,
+  *			    if any. Must be called under rntl_lock().
+  * @req_info:	The ethnl request to get the phy from.
+- * @header:	The netlink header, used for error reporting.
++ * @tb:		The netlink attributes array, for error reporting.
++ * @header:	The netlink header index, used for error reporting.
+  * @extack:	The netlink extended ACK, for error reporting.
+  *
+  * The caller must hold RTNL, until it's done interacting with the returned
+@@ -289,7 +290,7 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
+  *	   is returned.
+  */
+ struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
+-					const struct nlattr *header,
++					struct nlattr **tb, unsigned int header,
+ 					struct netlink_ext_ack *extack);
+ 
+ /**
+diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c
+index ed8f690f6bac81..e067cc234419dc 100644
+--- a/net/ethtool/phy.c
++++ b/net/ethtool/phy.c
+@@ -125,7 +125,7 @@ static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
+ 	struct phy_req_info *req_info = PHY_REQINFO(req_base);
+ 	struct phy_device *phydev;
+ 
+-	phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER],
++	phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PHY_HEADER,
+ 				      extack);
+ 	if (!phydev)
+ 		return 0;
+diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c
+index d95d92f173a6d2..e1f7820a6158f4 100644
+--- a/net/ethtool/plca.c
++++ b/net/ethtool/plca.c
+@@ -62,7 +62,7 @@ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
+ 	struct phy_device *phydev;
+ 	int ret;
+ 
+-	phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER],
++	phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER,
+ 				      info->extack);
+ 	// check that the PHY device is available and connected
+ 	if (IS_ERR_OR_NULL(phydev)) {
+@@ -152,7 +152,7 @@ ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
+ 	bool mod = false;
+ 	int ret;
+ 
+-	phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PLCA_HEADER],
++	phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PLCA_HEADER,
+ 				      info->extack);
+ 	// check that the PHY device is available and connected
+ 	if (IS_ERR_OR_NULL(phydev))
+@@ -211,7 +211,7 @@ static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
+ 	struct phy_device *phydev;
+ 	int ret;
+ 
+-	phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER],
++	phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER,
+ 				      info->extack);
+ 	// check that the PHY device is available and connected
+ 	if (IS_ERR_OR_NULL(phydev)) {
+diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
+index a0705edca22a1a..71843de832cca7 100644
+--- a/net/ethtool/pse-pd.c
++++ b/net/ethtool/pse-pd.c
+@@ -64,7 +64,7 @@ static int pse_prepare_data(const struct ethnl_req_info *req_base,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PSE_HEADER],
++	phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PSE_HEADER,
+ 				      info->extack);
+ 	if (IS_ERR(phydev))
+ 		return -ENODEV;
+@@ -261,7 +261,7 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info)
+ 	struct phy_device *phydev;
+ 	int ret;
+ 
+-	phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PSE_HEADER],
++	phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PSE_HEADER,
+ 				      info->extack);
+ 	ret = ethnl_set_pse_validate(phydev, info);
+ 	if (ret)
+diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
+index 912f0c4fff2fb9..273ae4ff343fe8 100644
+--- a/net/ethtool/stats.c
++++ b/net/ethtool/stats.c
+@@ -1,5 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ 
++#include <linux/phy.h>
++#include <linux/phylib_stubs.h>
++
+ #include "netlink.h"
+ #include "common.h"
+ #include "bitset.h"
+@@ -20,6 +23,7 @@ struct stats_reply_data {
+ 		struct ethtool_eth_mac_stats	mac_stats;
+ 		struct ethtool_eth_ctrl_stats	ctrl_stats;
+ 		struct ethtool_rmon_stats	rmon_stats;
++		struct ethtool_phy_stats	phydev_stats;
+ 	);
+ 	const struct ethtool_rmon_hist_range	*rmon_ranges;
+ };
+@@ -120,8 +124,15 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
+ 	struct stats_reply_data *data = STATS_REPDATA(reply_base);
+ 	enum ethtool_mac_stats_src src = req_info->src;
+ 	struct net_device *dev = reply_base->dev;
++	struct nlattr **tb = info->attrs;
++	struct phy_device *phydev;
+ 	int ret;
+ 
++	phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_STATS_HEADER,
++				      info->extack);
++	if (IS_ERR(phydev))
++		return PTR_ERR(phydev);
++
+ 	ret = ethnl_ops_begin(dev);
+ 	if (ret < 0)
+ 		return ret;
+@@ -145,6 +156,13 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
+ 	data->ctrl_stats.src = src;
+ 	data->rmon_stats.src = src;
+ 
++	if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
++	    src == ETHTOOL_MAC_STATS_SRC_AGGREGATE) {
++		if (phydev)
++			phy_ethtool_get_phy_stats(phydev, &data->phy_stats,
++						  &data->phydev_stats);
++	}
++
+ 	if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
+ 	    dev->ethtool_ops->get_eth_phy_stats)
+ 		dev->ethtool_ops->get_eth_phy_stats(dev, &data->phy_stats);
+diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
+index b3382b3cf325c5..b9400d18f01d58 100644
+--- a/net/ethtool/strset.c
++++ b/net/ethtool/strset.c
+@@ -299,7 +299,7 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base,
+ 		return 0;
+ 	}
+ 
+-	phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_HEADER_FLAGS],
++	phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_HEADER_FLAGS,
+ 				      info->extack);
+ 
+ 	/* phydev can be NULL, check for errors only */
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 2308665b51c538..2dfac79dc78b8b 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -13,12 +13,15 @@
+ #include <net/tcp.h>
+ #include <net/protocol.h>
+ 
+-static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
++static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
+ 			   unsigned int seq, unsigned int mss)
+ {
++	u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP;
++	u32 ts_seq = skb_shinfo(gso_skb)->tskey;
++
+ 	while (skb) {
+ 		if (before(ts_seq, seq + mss)) {
+-			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
++			skb_shinfo(skb)->tx_flags |= flags;
+ 			skb_shinfo(skb)->tskey = ts_seq;
+ 			return;
+ 		}
+@@ -193,8 +196,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 	th = tcp_hdr(skb);
+ 	seq = ntohl(th->seq);
+ 
+-	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
+-		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
++	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP))
++		tcp_gso_tstamp(segs, gso_skb, seq, mss);
+ 
+ 	newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
+ 
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index a5be6e4ed326fb..ecfca59f31f13e 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -321,13 +321,17 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ 
+ 	/* clear destructor to avoid skb_segment assigning it to tail */
+ 	copy_dtor = gso_skb->destructor == sock_wfree;
+-	if (copy_dtor)
++	if (copy_dtor) {
+ 		gso_skb->destructor = NULL;
++		gso_skb->sk = NULL;
++	}
+ 
+ 	segs = skb_segment(gso_skb, features);
+ 	if (IS_ERR_OR_NULL(segs)) {
+-		if (copy_dtor)
++		if (copy_dtor) {
+ 			gso_skb->destructor = sock_wfree;
++			gso_skb->sk = sk;
++		}
+ 		return segs;
+ 	}
+ 
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index ff7e734e335b06..7d574f5132e2fb 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -88,13 +88,15 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 			goto drop;
+ 		}
+ 
+-		if (ilwt->connected) {
++		/* cache only if we don't create a dst reference loop */
++		if (ilwt->connected && orig_dst->lwtstate != dst->lwtstate) {
+ 			local_bh_disable();
+ 			dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
+ 			local_bh_enable();
+ 		}
+ 	}
+ 
++	skb_dst_drop(skb);
+ 	skb_dst_set(skb, dst);
+ 	return dst_output(net, sk, skb);
+ 
+diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
+index 06fb8e6944b06a..7a0cae9a811148 100644
+--- a/net/llc/llc_s_ac.c
++++ b/net/llc/llc_s_ac.c
+@@ -24,7 +24,7 @@
+ #include <net/llc_s_ac.h>
+ #include <net/llc_s_ev.h>
+ #include <net/llc_sap.h>
+-
++#include <net/sock.h>
+ 
+ /**
+  *	llc_sap_action_unitdata_ind - forward UI PDU to network layer
+@@ -40,6 +40,26 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb)
+ 	return 0;
+ }
+ 
++static int llc_prepare_and_xmit(struct sk_buff *skb)
++{
++	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
++	struct sk_buff *nskb;
++	int rc;
++
++	rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
++	if (rc)
++		return rc;
++
++	nskb = skb_clone(skb, GFP_ATOMIC);
++	if (!nskb)
++		return -ENOMEM;
++
++	if (skb->sk)
++		skb_set_owner_w(nskb, skb->sk);
++
++	return dev_queue_xmit(nskb);
++}
++
+ /**
+  *	llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer
+  *	@sap: SAP
+@@ -52,17 +72,12 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb)
+ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
+ {
+ 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
+-	int rc;
+ 
+ 	llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
+ 			    ev->daddr.lsap, LLC_PDU_CMD);
+ 	llc_pdu_init_as_ui_cmd(skb);
+-	rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+-	if (likely(!rc)) {
+-		skb_get(skb);
+-		rc = dev_queue_xmit(skb);
+-	}
+-	return rc;
++
++	return llc_prepare_and_xmit(skb);
+ }
+ 
+ /**
+@@ -77,17 +92,12 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
+ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
+ {
+ 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
+-	int rc;
+ 
+ 	llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
+ 			    ev->daddr.lsap, LLC_PDU_CMD);
+ 	llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
+-	rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+-	if (likely(!rc)) {
+-		skb_get(skb);
+-		rc = dev_queue_xmit(skb);
+-	}
+-	return rc;
++
++	return llc_prepare_and_xmit(skb);
+ }
+ 
+ /**
+@@ -133,17 +143,12 @@ int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb)
+ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
+ {
+ 	struct llc_sap_state_ev *ev = llc_sap_ev(skb);
+-	int rc;
+ 
+ 	llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
+ 			    ev->daddr.lsap, LLC_PDU_CMD);
+ 	llc_pdu_init_as_test_cmd(skb);
+-	rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+-	if (likely(!rc)) {
+-		skb_get(skb);
+-		rc = dev_queue_xmit(skb);
+-	}
+-	return rc;
++
++	return llc_prepare_and_xmit(skb);
+ }
+ 
+ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 7a0242e937d364..bfe0514efca37f 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1751,6 +1751,7 @@ struct ieee802_11_elems {
+ 	const struct ieee80211_eht_operation *eht_operation;
+ 	const struct ieee80211_multi_link_elem *ml_basic;
+ 	const struct ieee80211_multi_link_elem *ml_reconf;
++	const struct ieee80211_multi_link_elem *ml_epcs;
+ 	const struct ieee80211_bandwidth_indication *bandwidth_indication;
+ 	const struct ieee80211_ttlm_elem *ttlm[IEEE80211_TTLM_MAX_CNT];
+ 
+@@ -1781,6 +1782,7 @@ struct ieee802_11_elems {
+ 	/* mult-link element can be de-fragmented and thus u8 is not sufficient */
+ 	size_t ml_basic_len;
+ 	size_t ml_reconf_len;
++	size_t ml_epcs_len;
+ 
+ 	u8 ttlm_num;
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 111066928b963c..88751b0eb317a3 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4733,6 +4733,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
+ 		parse_params.start = bss_ies->data;
+ 		parse_params.len = bss_ies->len;
+ 		parse_params.bss = cbss;
++		parse_params.link_id = -1;
+ 		bss_elems = ieee802_11_parse_elems_full(&parse_params);
+ 		if (!bss_elems) {
+ 			ret = false;
+diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
+index 279c5143b3356d..6da39c864f45ba 100644
+--- a/net/mac80211/parse.c
++++ b/net/mac80211/parse.c
+@@ -44,6 +44,12 @@ struct ieee80211_elems_parse {
+ 	/* The reconfiguration Multi-Link element in the original elements */
+ 	const struct element *ml_reconf_elem;
+ 
++	/* The EPCS Multi-Link element in the original elements */
++	const struct element *ml_epcs_elem;
++
++	bool multi_link_inner;
++	bool skip_vendor;
++
+ 	/*
+ 	 * scratch buffer that can be used for various element parsing related
+ 	 * tasks, e.g., element de-fragmentation etc.
+@@ -149,16 +155,18 @@ ieee80211_parse_extension_element(u32 *crc,
+ 			switch (le16_get_bits(mle->control,
+ 					      IEEE80211_ML_CONTROL_TYPE)) {
+ 			case IEEE80211_ML_CONTROL_TYPE_BASIC:
+-				if (elems_parse->ml_basic_elem) {
++				if (elems_parse->multi_link_inner) {
+ 					elems->parse_error |=
+ 						IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC;
+ 					break;
+ 				}
+-				elems_parse->ml_basic_elem = elem;
+ 				break;
+ 			case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ 				elems_parse->ml_reconf_elem = elem;
+ 				break;
++			case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
++				elems_parse->ml_epcs_elem = elem;
++				break;
+ 			default:
+ 				break;
+ 			}
+@@ -393,6 +401,9 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
+ 					IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ 			break;
+ 		case WLAN_EID_VENDOR_SPECIFIC:
++			if (elems_parse->skip_vendor)
++				break;
++
+ 			if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+ 			    pos[2] == 0xf2) {
+ 				/* Microsoft OUI (00:50:F2) */
+@@ -860,21 +871,36 @@ ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse,
+ 	}
+ }
+ 
+-static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse,
+-				     struct ieee80211_elems_parse_params *params)
++static const struct element *
++ieee80211_prep_mle_link_parse(struct ieee80211_elems_parse *elems_parse,
++			      struct ieee80211_elems_parse_params *params,
++			      struct ieee80211_elems_parse_params *sub)
+ {
+ 	struct ieee802_11_elems *elems = &elems_parse->elems;
+ 	struct ieee80211_mle_per_sta_profile *prof;
+-	struct ieee80211_elems_parse_params sub = {
+-		.mode = params->mode,
+-		.action = params->action,
+-		.from_ap = params->from_ap,
+-		.link_id = -1,
+-	};
+-	ssize_t ml_len = elems->ml_basic_len;
+-	const struct element *non_inherit = NULL;
++	const struct element *tmp;
++	ssize_t ml_len;
+ 	const u8 *end;
+ 
++	if (params->mode < IEEE80211_CONN_MODE_EHT)
++		return NULL;
++
++	for_each_element_extid(tmp, WLAN_EID_EXT_EHT_MULTI_LINK,
++			       elems->ie_start, elems->total_len) {
++		const struct ieee80211_multi_link_elem *mle =
++			(void *)tmp->data + 1;
++
++		if (!ieee80211_mle_size_ok(tmp->data + 1, tmp->datalen - 1))
++			continue;
++
++		if (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE) !=
++		    IEEE80211_ML_CONTROL_TYPE_BASIC)
++			continue;
++
++		elems_parse->ml_basic_elem = tmp;
++		break;
++	}
++
+ 	ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem,
+ 					     elems->ie_start,
+ 					     elems->total_len,
+@@ -885,26 +911,26 @@ static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse,
+ 					     WLAN_EID_FRAGMENT);
+ 
+ 	if (ml_len < 0)
+-		return;
++		return NULL;
+ 
+ 	elems->ml_basic = (const void *)elems_parse->scratch_pos;
+ 	elems->ml_basic_len = ml_len;
+ 	elems_parse->scratch_pos += ml_len;
+ 
+ 	if (params->link_id == -1)
+-		return;
++		return NULL;
+ 
+ 	ieee80211_mle_get_sta_prof(elems_parse, params->link_id);
+ 	prof = elems->prof;
+ 
+ 	if (!prof)
+-		return;
++		return NULL;
+ 
+ 	/* check if we have the 4 bytes for the fixed part in assoc response */
+ 	if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
+ 		elems->prof = NULL;
+ 		elems->sta_prof_len = 0;
+-		return;
++		return NULL;
+ 	}
+ 
+ 	/*
+@@ -913,13 +939,17 @@ static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse,
+ 	 * the -1 is because the 'sta_info_len' is accounted to as part of the
+ 	 * per-STA profile, but not part of the 'u8 variable[]' portion.
+ 	 */
+-	sub.start = prof->variable + prof->sta_info_len - 1 + 4;
++	sub->start = prof->variable + prof->sta_info_len - 1 + 4;
+ 	end = (const u8 *)prof + elems->sta_prof_len;
+-	sub.len = end - sub.start;
++	sub->len = end - sub->start;
++
++	sub->mode = params->mode;
++	sub->action = params->action;
++	sub->from_ap = params->from_ap;
++	sub->link_id = -1;
+ 
+-	non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+-					     sub.start, sub.len);
+-	_ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit);
++	return cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
++				      sub->start, sub->len);
+ }
+ 
+ static void
+@@ -943,18 +973,43 @@ ieee80211_mle_defrag_reconf(struct ieee80211_elems_parse *elems_parse)
+ 	elems_parse->scratch_pos += ml_len;
+ }
+ 
++static void
++ieee80211_mle_defrag_epcs(struct ieee80211_elems_parse *elems_parse)
++{
++	struct ieee802_11_elems *elems = &elems_parse->elems;
++	ssize_t ml_len;
++
++	ml_len = cfg80211_defragment_element(elems_parse->ml_epcs_elem,
++					     elems->ie_start,
++					     elems->total_len,
++					     elems_parse->scratch_pos,
++					     elems_parse->scratch +
++						elems_parse->scratch_len -
++						elems_parse->scratch_pos,
++					     WLAN_EID_FRAGMENT);
++	if (ml_len < 0)
++		return;
++	elems->ml_epcs = (void *)elems_parse->scratch_pos;
++	elems->ml_epcs_len = ml_len;
++	elems_parse->scratch_pos += ml_len;
++}
++
+ struct ieee802_11_elems *
+ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
+ {
++	struct ieee80211_elems_parse_params sub = {};
+ 	struct ieee80211_elems_parse *elems_parse;
+-	struct ieee802_11_elems *elems;
+ 	const struct element *non_inherit = NULL;
+-	u8 *nontransmitted_profile;
+-	int nontransmitted_profile_len = 0;
++	struct ieee802_11_elems *elems;
+ 	size_t scratch_len = 3 * params->len;
++	bool multi_link_inner = false;
+ 
+ 	BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0);
+ 
++	/* cannot parse for both a specific link and non-transmitted BSS */
++	if (WARN_ON(params->link_id >= 0 && params->bss))
++		return NULL;
++
+ 	elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len),
+ 			      GFP_ATOMIC);
+ 	if (!elems_parse)
+@@ -971,36 +1026,55 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
+ 	ieee80211_clear_tpe(&elems->tpe);
+ 	ieee80211_clear_tpe(&elems->csa_tpe);
+ 
+-	nontransmitted_profile = elems_parse->scratch_pos;
+-	nontransmitted_profile_len =
+-		ieee802_11_find_bssid_profile(params->start, params->len,
+-					      elems, params->bss,
+-					      nontransmitted_profile);
+-	elems_parse->scratch_pos += nontransmitted_profile_len;
+-	non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+-					     nontransmitted_profile,
+-					     nontransmitted_profile_len);
++	/*
++	 * If we're looking for a non-transmitted BSS then we cannot at
++	 * the same time be looking for a second link as the two can only
++	 * appear in the same frame carrying info for different BSSes.
++	 *
++	 * In any case, we only look for one at a time, as encoded by
++	 * the WARN_ON above.
++	 */
++	if (params->bss) {
++		int nontx_len =
++			ieee802_11_find_bssid_profile(params->start,
++						      params->len,
++						      elems, params->bss,
++						      elems_parse->scratch_pos);
++		sub.start = elems_parse->scratch_pos;
++		sub.mode = params->mode;
++		sub.len = nontx_len;
++		sub.action = params->action;
++		sub.link_id = params->link_id;
++
++		/* consume the space used for non-transmitted profile */
++		elems_parse->scratch_pos += nontx_len;
++
++		non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
++						     sub.start, nontx_len);
++	} else {
++		/* must always parse to get elems_parse->ml_basic_elem */
++		non_inherit = ieee80211_prep_mle_link_parse(elems_parse, params,
++							    &sub);
++		multi_link_inner = true;
++	}
+ 
++	elems_parse->skip_vendor =
++		cfg80211_find_elem(WLAN_EID_VENDOR_SPECIFIC,
++				   sub.start, sub.len);
+ 	elems->crc = _ieee802_11_parse_elems_full(params, elems_parse,
+ 						  non_inherit);
+ 
+-	/* Override with nontransmitted profile, if found */
+-	if (nontransmitted_profile_len) {
+-		struct ieee80211_elems_parse_params sub = {
+-			.mode = params->mode,
+-			.start = nontransmitted_profile,
+-			.len = nontransmitted_profile_len,
+-			.action = params->action,
+-			.link_id = params->link_id,
+-		};
+-
++	/* Override with nontransmitted/per-STA profile if found */
++	if (sub.len) {
++		elems_parse->multi_link_inner = multi_link_inner;
++		elems_parse->skip_vendor = false;
+ 		_ieee802_11_parse_elems_full(&sub, elems_parse, NULL);
+ 	}
+ 
+-	ieee80211_mle_parse_link(elems_parse, params);
+-
+ 	ieee80211_mle_defrag_reconf(elems_parse);
+ 
++	ieee80211_mle_defrag_epcs(elems_parse);
++
+ 	if (elems->tim && !elems->parse_error) {
+ 		const struct ieee80211_tim_ie *tim_ie = elems->tim;
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index b4ba2d9f041765..2a085ec5bfd097 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -968,7 +968,7 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
+ 
+ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ 					     struct mptcp_pm_addr_entry *entry,
+-					     bool needs_id)
++					     bool needs_id, bool replace)
+ {
+ 	struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
+ 	unsigned int addr_max;
+@@ -1008,6 +1008,17 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ 			if (entry->addr.id)
+ 				goto out;
+ 
++			/* allow callers that only need to look up the local
++			 * addr's id to skip replacement. This allows them to
++			 * avoid calling synchronize_rcu in the packet recv
++			 * path.
++			 */
++			if (!replace) {
++				kfree(entry);
++				ret = cur->addr.id;
++				goto out;
++			}
++
+ 			pernet->addrs--;
+ 			entry->addr.id = cur->addr.id;
+ 			list_del_rcu(&cur->list);
+@@ -1160,7 +1171,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
+ 	entry->ifindex = 0;
+ 	entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
+ 	entry->lsk = NULL;
+-	ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
++	ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false);
+ 	if (ret < 0)
+ 		kfree(entry);
+ 
+@@ -1432,7 +1443,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
+ 		}
+ 	}
+ 	ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
+-						!mptcp_pm_has_addr_attr_id(attr, info));
++						!mptcp_pm_has_addr_attr_id(attr, info),
++						true);
+ 	if (ret < 0) {
+ 		GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
+ 		goto out_free;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 1e78f575fb5630..ecfceddce00fcc 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -4218,6 +4218,11 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
+ 		if (flags[flag])
+ 			*mntrflags |= (1<<flag);
+ 
++	/* cooked monitor mode is incompatible with other modes */
++	if (*mntrflags & MONITOR_FLAG_COOK_FRAMES &&
++	    *mntrflags != MONITOR_FLAG_COOK_FRAMES)
++		return -EOPNOTSUPP;
++
+ 	*mntrflags |= MONITOR_FLAG_CHANGED;
+ 
+ 	return 0;
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 6489ba943a633d..2b626078739c52 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -407,7 +407,8 @@ static bool is_an_alpha2(const char *alpha2)
+ {
+ 	if (!alpha2)
+ 		return false;
+-	return isalpha(alpha2[0]) && isalpha(alpha2[1]);
++	return isascii(alpha2[0]) && isalpha(alpha2[0]) &&
++	       isascii(alpha2[1]) && isalpha(alpha2[1]);
+ }
+ 
+ static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
+diff --git a/rust/Makefile b/rust/Makefile
+index 45779a064fa4f4..09521fc449dca2 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -3,7 +3,7 @@
+ # Where to place rustdoc generated documentation
+ rustdoc_output := $(objtree)/Documentation/output/rust/rustdoc
+ 
+-obj-$(CONFIG_RUST) += core.o compiler_builtins.o
++obj-$(CONFIG_RUST) += core.o compiler_builtins.o ffi.o
+ always-$(CONFIG_RUST) += exports_core_generated.h
+ 
+ # Missing prototypes are expected in the helpers since these are exported
+@@ -15,8 +15,8 @@ always-$(CONFIG_RUST) += libmacros.so
+ no-clean-files += libmacros.so
+ 
+ always-$(CONFIG_RUST) += bindings/bindings_generated.rs bindings/bindings_helpers_generated.rs
+-obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o
+-always-$(CONFIG_RUST) += exports_alloc_generated.h exports_helpers_generated.h \
++obj-$(CONFIG_RUST) += bindings.o kernel.o
++always-$(CONFIG_RUST) += exports_helpers_generated.h \
+     exports_bindings_generated.h exports_kernel_generated.h
+ 
+ always-$(CONFIG_RUST) += uapi/uapi_generated.rs
+@@ -53,15 +53,10 @@ endif
+ core-cfgs = \
+     --cfg no_fp_fmt_parse
+ 
+-alloc-cfgs = \
+-    --cfg no_global_oom_handling \
+-    --cfg no_rc \
+-    --cfg no_sync
+-
+ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+       cmd_rustdoc = \
+ 	OBJTREE=$(abspath $(objtree)) \
+-	$(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
++	$(RUSTDOC) $(filter-out $(skip_flags),$(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \
+ 		$(rustc_target_flags) -L$(objtree)/$(obj) \
+ 		-Zunstable-options --generate-link-to-definition \
+ 		--output $(rustdoc_output) \
+@@ -81,7 +76,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+ # command-like flags to solve the issue. Meanwhile, we use the non-custom case
+ # and then retouch the generated files.
+ rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
+-    rustdoc-alloc rustdoc-kernel
++    rustdoc-kernel
+ 	$(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)/static.files/
+ 	$(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)/static.files/
+ 	$(Q)find $(rustdoc_output) -name '*.html' -type f -print0 | xargs -0 sed -Ei \
+@@ -98,6 +93,9 @@ rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
+ rustdoc-macros: $(src)/macros/lib.rs FORCE
+ 	+$(call if_changed,rustdoc)
+ 
++# Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should
++# not be needed -- see https://github.com/rust-lang/rust/pull/128307.
++rustdoc-core: private skip_flags = -Wrustdoc::unescaped_backticks
+ rustdoc-core: private rustc_target_flags = $(core-cfgs)
+ rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
+ 	+$(call if_changed,rustdoc)
+@@ -105,20 +103,14 @@ rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
+ rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
+ 	+$(call if_changed,rustdoc)
+ 
+-# We need to allow `rustdoc::broken_intra_doc_links` because some
+-# `no_global_oom_handling` functions refer to non-`no_global_oom_handling`
+-# functions. Ideally `rustdoc` would have a way to distinguish broken links
+-# due to things that are "configured out" vs. entirely non-existing ones.
+-rustdoc-alloc: private rustc_target_flags = $(alloc-cfgs) \
+-    -Arustdoc::broken_intra_doc_links
+-rustdoc-alloc: $(RUST_LIB_SRC)/alloc/src/lib.rs rustdoc-core rustdoc-compiler_builtins FORCE
++rustdoc-ffi: $(src)/ffi.rs rustdoc-core FORCE
+ 	+$(call if_changed,rustdoc)
+ 
+-rustdoc-kernel: private rustc_target_flags = --extern alloc \
++rustdoc-kernel: private rustc_target_flags = --extern ffi \
+     --extern build_error --extern macros=$(objtree)/$(obj)/libmacros.so \
+     --extern bindings --extern uapi
+-rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-macros \
+-    rustdoc-compiler_builtins rustdoc-alloc $(obj)/libmacros.so \
++rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-ffi rustdoc-macros \
++    rustdoc-compiler_builtins $(obj)/libmacros.so \
+     $(obj)/bindings.o FORCE
+ 	+$(call if_changed,rustdoc)
+ 
+@@ -135,15 +127,28 @@ quiet_cmd_rustc_test_library = RUSTC TL $<
+ rusttestlib-build_error: $(src)/build_error.rs FORCE
+ 	+$(call if_changed,rustc_test_library)
+ 
++rusttestlib-ffi: $(src)/ffi.rs FORCE
++	+$(call if_changed,rustc_test_library)
++
+ rusttestlib-macros: private rustc_target_flags = --extern proc_macro
+ rusttestlib-macros: private rustc_test_library_proc = yes
+ rusttestlib-macros: $(src)/macros/lib.rs FORCE
+ 	+$(call if_changed,rustc_test_library)
+ 
+-rusttestlib-bindings: $(src)/bindings/lib.rs FORCE
++rusttestlib-kernel: private rustc_target_flags = --extern ffi \
++    --extern build_error --extern macros \
++    --extern bindings --extern uapi
++rusttestlib-kernel: $(src)/kernel/lib.rs \
++    rusttestlib-bindings rusttestlib-uapi rusttestlib-build_error \
++    $(obj)/libmacros.so $(obj)/bindings.o FORCE
++	+$(call if_changed,rustc_test_library)
++
++rusttestlib-bindings: private rustc_target_flags = --extern ffi
++rusttestlib-bindings: $(src)/bindings/lib.rs rusttestlib-ffi FORCE
+ 	+$(call if_changed,rustc_test_library)
+ 
+-rusttestlib-uapi: $(src)/uapi/lib.rs FORCE
++rusttestlib-uapi: private rustc_target_flags = --extern ffi
++rusttestlib-uapi: $(src)/uapi/lib.rs rusttestlib-ffi FORCE
+ 	+$(call if_changed,rustc_test_library)
+ 
+ quiet_cmd_rustdoc_test = RUSTDOC T $<
+@@ -162,7 +167,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
+ 	mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
+ 	OBJTREE=$(abspath $(objtree)) \
+ 	$(RUSTDOC) --test $(rust_flags) \
+-		-L$(objtree)/$(obj) --extern alloc --extern kernel \
++		-L$(objtree)/$(obj) --extern ffi --extern kernel \
+ 		--extern build_error --extern macros \
+ 		--extern bindings --extern uapi \
+ 		--no-run --crate-name kernel -Zunstable-options \
+@@ -192,19 +197,20 @@ quiet_cmd_rustc_test = RUSTC T  $<
+ 
+ rusttest: rusttest-macros rusttest-kernel
+ 
+-rusttest-macros: private rustc_target_flags = --extern proc_macro
++rusttest-macros: private rustc_target_flags = --extern proc_macro \
++	--extern macros --extern kernel
+ rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro
+-rusttest-macros: $(src)/macros/lib.rs FORCE
++rusttest-macros: $(src)/macros/lib.rs \
++    rusttestlib-macros rusttestlib-kernel FORCE
+ 	+$(call if_changed,rustc_test)
+ 	+$(call if_changed,rustdoc_test)
+ 
+-rusttest-kernel: private rustc_target_flags = --extern alloc \
++rusttest-kernel: private rustc_target_flags = --extern ffi \
+     --extern build_error --extern macros --extern bindings --extern uapi
+-rusttest-kernel: $(src)/kernel/lib.rs \
++rusttest-kernel: $(src)/kernel/lib.rs rusttestlib-ffi rusttestlib-kernel \
+     rusttestlib-build_error rusttestlib-macros rusttestlib-bindings \
+     rusttestlib-uapi FORCE
+ 	+$(call if_changed,rustc_test)
+-	+$(call if_changed,rustc_test_library)
+ 
+ ifdef CONFIG_CC_IS_CLANG
+ bindgen_c_flags = $(c_flags)
+@@ -266,7 +272,11 @@ else
+ bindgen_c_flags_lto = $(bindgen_c_flags)
+ endif
+ 
+-bindgen_c_flags_final = $(bindgen_c_flags_lto) -D__BINDGEN__
++# `-fno-builtin` is passed to avoid `bindgen` from using `clang` builtin
++# prototypes for functions like `memcpy` -- if this flag is not passed,
++# `bindgen`-generated prototypes use `c_ulong` or `c_uint` depending on
++# architecture instead of generating `usize`.
++bindgen_c_flags_final = $(bindgen_c_flags_lto) -fno-builtin -D__BINDGEN__
+ 
+ # Each `bindgen` release may upgrade the list of Rust target versions. By
+ # default, the highest stable release in their list is used. Thus we need to set
+@@ -284,7 +294,7 @@ bindgen_c_flags_final = $(bindgen_c_flags_lto) -D__BINDGEN__
+ quiet_cmd_bindgen = BINDGEN $@
+       cmd_bindgen = \
+ 	$(BINDGEN) $< $(bindgen_target_flags) --rust-target 1.68 \
+-		--use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
++		--use-core --with-derive-default --ctypes-prefix ffi --no-layout-tests \
+ 		--no-debug '.*' --enable-function-attribute-detection \
+ 		-o $@ -- $(bindgen_c_flags_final) -DMODULE \
+ 		$(bindgen_target_cflags) $(bindgen_target_extra)
+@@ -325,9 +335,6 @@ quiet_cmd_exports = EXPORTS $@
+ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE
+ 	$(call if_changed,exports)
+ 
+-$(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE
+-	$(call if_changed,exports)
+-
+ # Even though Rust kernel modules should never use the bindings directly,
+ # symbols from the `bindings` crate and the C helpers need to be exported
+ # because Rust generics and inlined functions may not get their code generated
+@@ -374,7 +381,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
+ 
+ rust-analyzer:
+ 	$(Q)$(srctree)/scripts/generate_rust_analyzer.py \
+-		--cfgs='core=$(core-cfgs)' --cfgs='alloc=$(alloc-cfgs)' \
++		--cfgs='core=$(core-cfgs)' \
+ 		$(realpath $(srctree)) $(realpath $(objtree)) \
+ 		$(rustc_sysroot) $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \
+ 		$(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json
+@@ -412,29 +419,28 @@ $(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
+ $(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
+ 	+$(call if_changed_rule,rustc_library)
+ 
+-$(obj)/alloc.o: private skip_clippy = 1
+-$(obj)/alloc.o: private skip_flags = -Wunreachable_pub
+-$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
+-$(obj)/alloc.o: $(RUST_LIB_SRC)/alloc/src/lib.rs $(obj)/compiler_builtins.o FORCE
++$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
+ 	+$(call if_changed_rule,rustc_library)
+ 
+-$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
++$(obj)/ffi.o: $(src)/ffi.rs $(obj)/compiler_builtins.o FORCE
+ 	+$(call if_changed_rule,rustc_library)
+ 
++$(obj)/bindings.o: private rustc_target_flags = --extern ffi
+ $(obj)/bindings.o: $(src)/bindings/lib.rs \
+-    $(obj)/compiler_builtins.o \
++    $(obj)/ffi.o \
+     $(obj)/bindings/bindings_generated.rs \
+     $(obj)/bindings/bindings_helpers_generated.rs FORCE
+ 	+$(call if_changed_rule,rustc_library)
+ 
++$(obj)/uapi.o: private rustc_target_flags = --extern ffi
+ $(obj)/uapi.o: $(src)/uapi/lib.rs \
+-    $(obj)/compiler_builtins.o \
++    $(obj)/ffi.o \
+     $(obj)/uapi/uapi_generated.rs FORCE
+ 	+$(call if_changed_rule,rustc_library)
+ 
+-$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
++$(obj)/kernel.o: private rustc_target_flags = --extern ffi \
+     --extern build_error --extern macros --extern bindings --extern uapi
+-$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
++$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/build_error.o \
+     $(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE
+ 	+$(call if_changed_rule,rustc_library)
+ 
+diff --git a/rust/bindgen_parameters b/rust/bindgen_parameters
+index b7c7483123b7ab..0f96af8b9a7fee 100644
+--- a/rust/bindgen_parameters
++++ b/rust/bindgen_parameters
+@@ -1,5 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
++# We want to map these types to `isize`/`usize` manually, instead of
++# define them as `int`/`long` depending on platform bitwidth.
++--blocklist-type __kernel_s?size_t
++--blocklist-type __kernel_ptrdiff_t
++
+ --opaque-type xregs_state
+ --opaque-type desc_struct
+ --opaque-type arch_lbr_state
+diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
+index ae82e9c941afa1..a80783fcbe042a 100644
+--- a/rust/bindings/bindings_helper.h
++++ b/rust/bindings/bindings_helper.h
+@@ -31,4 +31,5 @@ const gfp_t RUST_CONST_HELPER_GFP_KERNEL_ACCOUNT = GFP_KERNEL_ACCOUNT;
+ const gfp_t RUST_CONST_HELPER_GFP_NOWAIT = GFP_NOWAIT;
+ const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO;
+ const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM;
++const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN;
+ const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL;
+diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
+index 93a1a3fc97bc9b..014af0d1fc70cb 100644
+--- a/rust/bindings/lib.rs
++++ b/rust/bindings/lib.rs
+@@ -25,7 +25,13 @@
+ )]
+ 
+ #[allow(dead_code)]
++#[allow(clippy::undocumented_unsafe_blocks)]
+ mod bindings_raw {
++    // Manual definition for blocklisted types.
++    type __kernel_size_t = usize;
++    type __kernel_ssize_t = isize;
++    type __kernel_ptrdiff_t = isize;
++
+     // Use glob import here to expose all helpers.
+     // Symbols defined within the module will take precedence to the glob import.
+     pub use super::bindings_helper::*;
+diff --git a/rust/exports.c b/rust/exports.c
+index e5695f3b45b7aa..82a037381798d7 100644
+--- a/rust/exports.c
++++ b/rust/exports.c
+@@ -16,7 +16,6 @@
+ #define EXPORT_SYMBOL_RUST_GPL(sym) extern int sym; EXPORT_SYMBOL_GPL(sym)
+ 
+ #include "exports_core_generated.h"
+-#include "exports_alloc_generated.h"
+ #include "exports_helpers_generated.h"
+ #include "exports_bindings_generated.h"
+ #include "exports_kernel_generated.h"
+diff --git a/rust/ffi.rs b/rust/ffi.rs
+new file mode 100644
+index 00000000000000..584f75b49862b3
+--- /dev/null
++++ b/rust/ffi.rs
+@@ -0,0 +1,48 @@
++// SPDX-License-Identifier: GPL-2.0
++
++//! Foreign function interface (FFI) types.
++//!
++//! This crate provides mapping from C primitive types to Rust ones.
++//!
++//! The Rust [`core`] crate provides [`core::ffi`], which maps integer types to the platform default
++//! C ABI. The kernel does not use [`core::ffi`], so it can customise the mapping that deviates from
++//! the platform default.
++
++#![no_std]
++
++macro_rules! alias {
++    ($($name:ident = $ty:ty;)*) => {$(
++        #[allow(non_camel_case_types, missing_docs)]
++        pub type $name = $ty;
++
++        // Check size compatibility with `core`.
++        const _: () = assert!(
++            core::mem::size_of::<$name>() == core::mem::size_of::<core::ffi::$name>()
++        );
++    )*}
++}
++
++alias! {
++    // `core::ffi::c_char` is either `i8` or `u8` depending on architecture. In the kernel, we use
++    // `-funsigned-char` so it's always mapped to `u8`.
++    c_char = u8;
++
++    c_schar = i8;
++    c_uchar = u8;
++
++    c_short = i16;
++    c_ushort = u16;
++
++    c_int = i32;
++    c_uint = u32;
++
++    // In the kernel, `intptr_t` is defined to be `long` in all platforms, so we can map the type to
++    // `isize`.
++    c_long = isize;
++    c_ulong = usize;
++
++    c_longlong = i64;
++    c_ulonglong = u64;
++}
++
++pub use core::ffi::c_void;
+diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
+index 30f40149f3a969..20a0c69d5cc7b8 100644
+--- a/rust/helpers/helpers.c
++++ b/rust/helpers/helpers.c
+@@ -22,5 +22,6 @@
+ #include "spinlock.c"
+ #include "task.c"
+ #include "uaccess.c"
++#include "vmalloc.c"
+ #include "wait.c"
+ #include "workqueue.c"
+diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
+index f043e087f9d666..a842bfbddcba91 100644
+--- a/rust/helpers/slab.c
++++ b/rust/helpers/slab.c
+@@ -7,3 +7,9 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
+ {
+ 	return krealloc(objp, new_size, flags);
+ }
++
++void * __must_check __realloc_size(2)
++rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
++{
++	return kvrealloc(p, size, flags);
++}
+diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c
+new file mode 100644
+index 00000000000000..80d34501bbc010
+--- /dev/null
++++ b/rust/helpers/vmalloc.c
+@@ -0,0 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/vmalloc.h>
++
++void * __must_check __realloc_size(2)
++rust_helper_vrealloc(const void *p, size_t size, gfp_t flags)
++{
++	return vrealloc(p, size, flags);
++}
+diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
+index 1966bd40701741..f2f7f3a53d298c 100644
+--- a/rust/kernel/alloc.rs
++++ b/rust/kernel/alloc.rs
+@@ -1,23 +1,41 @@
+ // SPDX-License-Identifier: GPL-2.0
+ 
+-//! Extensions to the [`alloc`] crate.
++//! Implementation of the kernel's memory allocation infrastructure.
+ 
+-#[cfg(not(test))]
+-#[cfg(not(testlib))]
+-mod allocator;
+-pub mod box_ext;
+-pub mod vec_ext;
++#[cfg(not(any(test, testlib)))]
++pub mod allocator;
++pub mod kbox;
++pub mod kvec;
++pub mod layout;
++
++#[cfg(any(test, testlib))]
++pub mod allocator_test;
++
++#[cfg(any(test, testlib))]
++pub use self::allocator_test as allocator;
++
++pub use self::kbox::Box;
++pub use self::kbox::KBox;
++pub use self::kbox::KVBox;
++pub use self::kbox::VBox;
++
++pub use self::kvec::IntoIter;
++pub use self::kvec::KVVec;
++pub use self::kvec::KVec;
++pub use self::kvec::VVec;
++pub use self::kvec::Vec;
+ 
+ /// Indicates an allocation error.
+ #[derive(Copy, Clone, PartialEq, Eq, Debug)]
+ pub struct AllocError;
++use core::{alloc::Layout, ptr::NonNull};
+ 
+ /// Flags to be used when allocating memory.
+ ///
+ /// They can be combined with the operators `|`, `&`, and `!`.
+ ///
+ /// Values can be used from the [`flags`] module.
+-#[derive(Clone, Copy)]
++#[derive(Clone, Copy, PartialEq)]
+ pub struct Flags(u32);
+ 
+ impl Flags {
+@@ -25,6 +43,11 @@ impl Flags {
+     pub(crate) fn as_raw(self) -> u32 {
+         self.0
+     }
++
++    /// Check whether `flags` is contained in `self`.
++    pub fn contains(self, flags: Flags) -> bool {
++        (self & flags) == flags
++    }
+ }
+ 
+ impl core::ops::BitOr for Flags {
+@@ -85,4 +108,117 @@ pub mod flags {
+     /// use any filesystem callback.  It is very likely to fail to allocate memory, even for very
+     /// small allocations.
+     pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT);
++
++    /// Suppresses allocation failure reports.
++    ///
++    /// This is normally or'd with other flags.
++    pub const __GFP_NOWARN: Flags = Flags(bindings::__GFP_NOWARN);
++}
++
++/// The kernel's [`Allocator`] trait.
++///
++/// An implementation of [`Allocator`] can allocate, re-allocate and free memory buffers described
++/// via [`Layout`].
++///
++/// [`Allocator`] is designed to be implemented as a ZST; [`Allocator`] functions do not operate on
++/// an object instance.
++///
++/// In order to be able to support `#[derive(SmartPointer)]` later on, we need to avoid a design
++/// that requires an `Allocator` to be instantiated, hence its functions must not contain any kind
++/// of `self` parameter.
++///
++/// # Safety
++///
++/// - A memory allocation returned from an allocator must remain valid until it is explicitly freed.
++///
++/// - Any pointer to a valid memory allocation must be valid to be passed to any other [`Allocator`]
++///   function of the same type.
++///
++/// - Implementers must ensure that all trait functions abide by the guarantees documented in the
++///   `# Guarantees` sections.
++pub unsafe trait Allocator {
++    /// Allocate memory based on `layout` and `flags`.
++    ///
++    /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout
++    /// constraints (i.e. minimum size and alignment as specified by `layout`).
++    ///
++    /// This function is equivalent to `realloc` when called with `None`.
++    ///
++    /// # Guarantees
++    ///
++    /// When the return value is `Ok(ptr)`, then `ptr` is
++    /// - valid for reads and writes for `layout.size()` bytes, until it is passed to
++    ///   [`Allocator::free`] or [`Allocator::realloc`],
++    /// - aligned to `layout.align()`,
++    ///
++    /// Additionally, `Flags` are honored as documented in
++    /// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>.
++    fn alloc(layout: Layout, flags: Flags) -> Result<NonNull<[u8]>, AllocError> {
++        // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
++        // new memory allocation.
++        unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags) }
++    }
++
++    /// Re-allocate an existing memory allocation to satisfy the requested `layout`.
++    ///
++    /// If the requested size is zero, `realloc` behaves equivalent to `free`.
++    ///
++    /// If the requested size is larger than the size of the existing allocation, a successful call
++    /// to `realloc` guarantees that the new or grown buffer has at least `Layout::size` bytes, but
++    /// may also be larger.
++    ///
++    /// If the requested size is smaller than the size of the existing allocation, `realloc` may or
++    /// may not shrink the buffer; this is implementation specific to the allocator.
++    ///
++    /// On allocation failure, the existing buffer, if any, remains valid.
++    ///
++    /// The buffer is represented as `NonNull<[u8]>`.
++    ///
++    /// # Safety
++    ///
++    /// - If `ptr == Some(p)`, then `p` must point to an existing and valid memory allocation
++    ///   created by this [`Allocator`]; if `old_layout` is zero-sized `p` does not need to be a
++    ///   pointer returned by this [`Allocator`].
++    /// - `ptr` is allowed to be `None`; in this case a new memory allocation is created and
++    ///   `old_layout` is ignored.
++    /// - `old_layout` must match the `Layout` the allocation has been created with.
++    ///
++    /// # Guarantees
++    ///
++    /// This function has the same guarantees as [`Allocator::alloc`]. When `ptr == Some(p)`, then
++    /// it additionally guarantees that:
++    /// - the contents of the memory pointed to by `p` are preserved up to the lesser of the new
++    ///   and old size, i.e. `ret_ptr[0..min(layout.size(), old_layout.size())] ==
++    ///   p[0..min(layout.size(), old_layout.size())]`.
++    /// - when the return value is `Err(AllocError)`, then `ptr` is still valid.
++    unsafe fn realloc(
++        ptr: Option<NonNull<u8>>,
++        layout: Layout,
++        old_layout: Layout,
++        flags: Flags,
++    ) -> Result<NonNull<[u8]>, AllocError>;
++
++    /// Free an existing memory allocation.
++    ///
++    /// # Safety
++    ///
++    /// - `ptr` must point to an existing and valid memory allocation created by this [`Allocator`];
++    ///   if `old_layout` is zero-sized `p` does not need to be a pointer returned by this
++    ///   [`Allocator`].
++    /// - `layout` must match the `Layout` the allocation has been created with.
++    /// - The memory allocation at `ptr` must never again be read from or written to.
++    unsafe fn free(ptr: NonNull<u8>, layout: Layout) {
++        // SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this
++        // allocator. We are passing a `Layout` with the smallest possible alignment, so it is
++        // smaller than or equal to the alignment previously used with this allocation.
++        let _ = unsafe { Self::realloc(Some(ptr), Layout::new::<()>(), layout, Flags(0)) };
++    }
++}
++
++/// Returns a properly aligned dangling pointer from the given `layout`.
++pub(crate) fn dangling_from_layout(layout: Layout) -> NonNull<u8> {
++    let ptr = layout.align() as *mut u8;
++
++    // SAFETY: `layout.align()` (and hence `ptr`) is guaranteed to be non-zero.
++    unsafe { NonNull::new_unchecked(ptr) }
+ }
+diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
+index e6ea601f38c6d9..439985e29fbc0e 100644
+--- a/rust/kernel/alloc/allocator.rs
++++ b/rust/kernel/alloc/allocator.rs
+@@ -1,74 +1,188 @@
+ // SPDX-License-Identifier: GPL-2.0
+ 
+ //! Allocator support.
++//!
++//! Documentation for the kernel's memory allocators can found in the "Memory Allocation Guide"
++//! linked below. For instance, this includes the concept of "get free page" (GFP) flags and the
++//! typical application of the different kernel allocators.
++//!
++//! Reference: <https://docs.kernel.org/core-api/memory-allocation.html>
+ 
+-use super::{flags::*, Flags};
+-use core::alloc::{GlobalAlloc, Layout};
++use super::Flags;
++use core::alloc::Layout;
+ use core::ptr;
++use core::ptr::NonNull;
+ 
+-struct KernelAllocator;
++use crate::alloc::{AllocError, Allocator};
++use crate::bindings;
++use crate::pr_warn;
+ 
+-/// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment.
++/// The contiguous kernel allocator.
+ ///
+-/// # Safety
++/// `Kmalloc` is typically used for physically contiguous allocations up to page size, but also
++/// supports larger allocations up to `bindings::KMALLOC_MAX_SIZE`, which is hardware specific.
+ ///
+-/// - `ptr` can be either null or a pointer which has been allocated by this allocator.
+-/// - `new_layout` must have a non-zero size.
+-pub(crate) unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: Flags) -> *mut u8 {
++/// For more details see [self].
++pub struct Kmalloc;
++
++/// The virtually contiguous kernel allocator.
++///
++/// `Vmalloc` allocates pages from the page level allocator and maps them into the contiguous kernel
++/// virtual space. It is typically used for large allocations. The memory allocated with this
++/// allocator is not physically contiguous.
++///
++/// For more details see [self].
++pub struct Vmalloc;
++
++/// The kvmalloc kernel allocator.
++///
++/// `KVmalloc` attempts to allocate memory with `Kmalloc` first, but falls back to `Vmalloc` upon
++/// failure. This allocator is typically used when the size for the requested allocation is not
++/// known and may exceed the capabilities of `Kmalloc`.
++///
++/// For more details see [self].
++pub struct KVmalloc;
++
++/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
++fn aligned_size(new_layout: Layout) -> usize {
+     // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
+     let layout = new_layout.pad_to_align();
+ 
+     // Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`
+     // which together with the slab guarantees means the `krealloc` will return a properly aligned
+     // object (see comments in `kmalloc()` for more information).
+-    let size = layout.size();
+-
+-    // SAFETY:
+-    // - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the
+-    //   function safety requirement.
+-    // - `size` is greater than 0 since it's from `layout.size()` (which cannot be zero according
+-    //   to the function safety requirement)
+-    unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags.0) as *mut u8 }
++    layout.size()
+ }
+ 
+-unsafe impl GlobalAlloc for KernelAllocator {
+-    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+-        // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
+-        // requirement.
+-        unsafe { krealloc_aligned(ptr::null_mut(), layout, GFP_KERNEL) }
+-    }
++/// # Invariants
++///
++/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
++struct ReallocFunc(
++    unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void,
++);
+ 
+-    unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+-        unsafe {
+-            bindings::kfree(ptr as *const core::ffi::c_void);
+-        }
+-    }
++impl ReallocFunc {
++    // INVARIANT: `krealloc` satisfies the type invariants.
++    const KREALLOC: Self = Self(bindings::krealloc);
+ 
+-    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+-        // SAFETY:
+-        // - `new_size`, when rounded up to the nearest multiple of `layout.align()`, will not
+-        //   overflow `isize` by the function safety requirement.
+-        // - `layout.align()` is a proper alignment (i.e. not zero and must be a power of two).
+-        let layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
++    // INVARIANT: `vrealloc` satisfies the type invariants.
++    const VREALLOC: Self = Self(bindings::vrealloc);
++
++    // INVARIANT: `kvrealloc` satisfies the type invariants.
++    const KVREALLOC: Self = Self(bindings::kvrealloc);
++
++    /// # Safety
++    ///
++    /// This method has the same safety requirements as [`Allocator::realloc`].
++    ///
++    /// # Guarantees
++    ///
++    /// This method has the same guarantees as `Allocator::realloc`. Additionally
++    /// - it accepts any pointer to a valid memory allocation allocated by this function.
++    /// - memory allocated by this function remains valid until it is passed to this function.
++    unsafe fn call(
++        &self,
++        ptr: Option<NonNull<u8>>,
++        layout: Layout,
++        old_layout: Layout,
++        flags: Flags,
++    ) -> Result<NonNull<[u8]>, AllocError> {
++        let size = aligned_size(layout);
++        let ptr = match ptr {
++            Some(ptr) => {
++                if old_layout.size() == 0 {
++                    ptr::null()
++                } else {
++                    ptr.as_ptr()
++                }
++            }
++            None => ptr::null(),
++        };
+ 
+         // SAFETY:
+-        // - `ptr` is either null or a pointer allocated by this allocator by the function safety
+-        //   requirement.
+-        // - the size of `layout` is not zero because `new_size` is not zero by the function safety
+-        //   requirement.
+-        unsafe { krealloc_aligned(ptr, layout, GFP_KERNEL) }
++        // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc` and thus only requires that
++        //   `ptr` is NULL or valid.
++        // - `ptr` is either NULL or valid by the safety requirements of this function.
++        //
++        // GUARANTEE:
++        // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc`.
++        // - Those functions provide the guarantees of this function.
++        let raw_ptr = unsafe {
++            // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed.
++            self.0(ptr.cast(), size, flags.0).cast()
++        };
++
++        let ptr = if size == 0 {
++            crate::alloc::dangling_from_layout(layout)
++        } else {
++            NonNull::new(raw_ptr).ok_or(AllocError)?
++        };
++
++        Ok(NonNull::slice_from_raw_parts(ptr, size))
++    }
++}
++
++// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
++// - memory remains valid until it is explicitly freed,
++// - passing a pointer to a valid memory allocation is OK,
++// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
++unsafe impl Allocator for Kmalloc {
++    #[inline]
++    unsafe fn realloc(
++        ptr: Option<NonNull<u8>>,
++        layout: Layout,
++        old_layout: Layout,
++        flags: Flags,
++    ) -> Result<NonNull<[u8]>, AllocError> {
++        // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
++        unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
+     }
++}
++
++// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
++// - memory remains valid until it is explicitly freed,
++// - passing a pointer to a valid memory allocation is OK,
++// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
++unsafe impl Allocator for Vmalloc {
++    #[inline]
++    unsafe fn realloc(
++        ptr: Option<NonNull<u8>>,
++        layout: Layout,
++        old_layout: Layout,
++        flags: Flags,
++    ) -> Result<NonNull<[u8]>, AllocError> {
++        // TODO: Support alignments larger than PAGE_SIZE.
++        if layout.align() > bindings::PAGE_SIZE {
++            pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n");
++            return Err(AllocError);
++        }
+ 
+-    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+-        // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
+-        // requirement.
+-        unsafe { krealloc_aligned(ptr::null_mut(), layout, GFP_KERNEL | __GFP_ZERO) }
++        // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
++        // allocated with this `Allocator`.
++        unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) }
+     }
+ }
+ 
+-#[global_allocator]
+-static ALLOCATOR: KernelAllocator = KernelAllocator;
++// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
++// - memory remains valid until it is explicitly freed,
++// - passing a pointer to a valid memory allocation is OK,
++// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same.
++unsafe impl Allocator for KVmalloc {
++    #[inline]
++    unsafe fn realloc(
++        ptr: Option<NonNull<u8>>,
++        layout: Layout,
++        old_layout: Layout,
++        flags: Flags,
++    ) -> Result<NonNull<[u8]>, AllocError> {
++        // TODO: Support alignments larger than PAGE_SIZE.
++        if layout.align() > bindings::PAGE_SIZE {
++            pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
++            return Err(AllocError);
++        }
+ 
+-// See <https://github.com/rust-lang/rust/pull/86844>.
+-#[no_mangle]
+-static __rust_no_alloc_shim_is_unstable: u8 = 0;
++        // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously
++        // allocated with this `Allocator`.
++        unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) }
++    }
++}
+diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
+new file mode 100644
+index 00000000000000..e3240d16040bd9
+--- /dev/null
++++ b/rust/kernel/alloc/allocator_test.rs
+@@ -0,0 +1,95 @@
++// SPDX-License-Identifier: GPL-2.0
++
++//! So far the kernel's `Box` and `Vec` types can't be used by userspace test cases, since all users
++//! of those types (e.g. `CString`) use kernel allocators for instantiation.
++//!
++//! In order to allow userspace test cases to make use of such types as well, implement the
++//! `Cmalloc` allocator within the allocator_test module and type alias all kernel allocators to
++//! `Cmalloc`. The `Cmalloc` allocator uses libc's `realloc()` function as allocator backend.
++
++#![allow(missing_docs)]
++
++use super::{flags::*, AllocError, Allocator, Flags};
++use core::alloc::Layout;
++use core::cmp;
++use core::ptr;
++use core::ptr::NonNull;
++
++/// The userspace allocator based on libc.
++pub struct Cmalloc;
++
++pub type Kmalloc = Cmalloc;
++pub type Vmalloc = Kmalloc;
++pub type KVmalloc = Kmalloc;
++
++extern "C" {
++    #[link_name = "aligned_alloc"]
++    fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
++
++    #[link_name = "free"]
++    fn libc_free(ptr: *mut crate::ffi::c_void);
++}
++
++// SAFETY:
++// - memory remains valid until it is explicitly freed,
++// - passing a pointer to a valid memory allocation created by this `Allocator` is always OK,
++// - `realloc` provides the guarantees as provided in the `# Guarantees` section.
++unsafe impl Allocator for Cmalloc {
++    unsafe fn realloc(
++        ptr: Option<NonNull<u8>>,
++        layout: Layout,
++        old_layout: Layout,
++        flags: Flags,
++    ) -> Result<NonNull<[u8]>, AllocError> {
++        let src = match ptr {
++            Some(src) => {
++                if old_layout.size() == 0 {
++                    ptr::null_mut()
++                } else {
++                    src.as_ptr()
++                }
++            }
++            None => ptr::null_mut(),
++        };
++
++        if layout.size() == 0 {
++            // SAFETY: `src` is either NULL or was previously allocated with this `Allocator`
++            unsafe { libc_free(src.cast()) };
++
++            return Ok(NonNull::slice_from_raw_parts(
++                crate::alloc::dangling_from_layout(layout),
++                0,
++            ));
++        }
++
++        // SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or
++        // exceeds the given size and alignment requirements.
++        let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) } as *mut u8;
++        let dst = NonNull::new(dst).ok_or(AllocError)?;
++
++        if flags.contains(__GFP_ZERO) {
++            // SAFETY: The preceding calls to `libc_aligned_alloc` and `NonNull::new`
++            // guarantee that `dst` points to memory of at least `layout.size()` bytes.
++            unsafe { dst.as_ptr().write_bytes(0, layout.size()) };
++        }
++
++        if !src.is_null() {
++            // SAFETY:
++            // - `src` has previously been allocated with this `Allocator`; `dst` has just been
++            //   newly allocated, hence the memory regions do not overlap.
++            // - both` src` and `dst` are properly aligned and valid for reads and writes
++            unsafe {
++                ptr::copy_nonoverlapping(
++                    src,
++                    dst.as_ptr(),
++                    cmp::min(layout.size(), old_layout.size()),
++                )
++            };
++        }
++
++        // SAFETY: `src` is either NULL or was previously allocated with this `Allocator`
++        unsafe { libc_free(src.cast()) };
++
++        Ok(NonNull::slice_from_raw_parts(dst, layout.size()))
++    }
++}
+diff --git a/rust/kernel/alloc/box_ext.rs b/rust/kernel/alloc/box_ext.rs
+deleted file mode 100644
+index 7009ad78d4e082..00000000000000
+--- a/rust/kernel/alloc/box_ext.rs
++++ /dev/null
+@@ -1,89 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
+-//! Extensions to [`Box`] for fallible allocations.
+-
+-use super::{AllocError, Flags};
+-use alloc::boxed::Box;
+-use core::{mem::MaybeUninit, ptr, result::Result};
+-
+-/// Extensions to [`Box`].
+-pub trait BoxExt<T>: Sized {
+-    /// Allocates a new box.
+-    ///
+-    /// The allocation may fail, in which case an error is returned.
+-    fn new(x: T, flags: Flags) -> Result<Self, AllocError>;
+-
+-    /// Allocates a new uninitialised box.
+-    ///
+-    /// The allocation may fail, in which case an error is returned.
+-    fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError>;
+-
+-    /// Drops the contents, but keeps the allocation.
+-    ///
+-    /// # Examples
+-    ///
+-    /// ```
+-    /// use kernel::alloc::{flags, box_ext::BoxExt};
+-    /// let value = Box::new([0; 32], flags::GFP_KERNEL)?;
+-    /// assert_eq!(*value, [0; 32]);
+-    /// let mut value = Box::drop_contents(value);
+-    /// // Now we can re-use `value`:
+-    /// value.write([1; 32]);
+-    /// // SAFETY: We just wrote to it.
+-    /// let value = unsafe { value.assume_init() };
+-    /// assert_eq!(*value, [1; 32]);
+-    /// # Ok::<(), Error>(())
+-    /// ```
+-    fn drop_contents(this: Self) -> Box<MaybeUninit<T>>;
+-}
+-
+-impl<T> BoxExt<T> for Box<T> {
+-    fn new(x: T, flags: Flags) -> Result<Self, AllocError> {
+-        let mut b = <Self as BoxExt<_>>::new_uninit(flags)?;
+-        b.write(x);
+-        // SAFETY: We just wrote to it.
+-        Ok(unsafe { b.assume_init() })
+-    }
+-
+-    #[cfg(any(test, testlib))]
+-    fn new_uninit(_flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError> {
+-        Ok(Box::new_uninit())
+-    }
+-
+-    #[cfg(not(any(test, testlib)))]
+-    fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError> {
+-        let ptr = if core::mem::size_of::<MaybeUninit<T>>() == 0 {
+-            core::ptr::NonNull::<_>::dangling().as_ptr()
+-        } else {
+-            let layout = core::alloc::Layout::new::<MaybeUninit<T>>();
+-
+-            // SAFETY: Memory is being allocated (first arg is null). The only other source of
+-            // safety issues is sleeping on atomic context, which is addressed by klint. Lastly,
+-            // the type is not a SZT (checked above).
+-            let ptr =
+-                unsafe { super::allocator::krealloc_aligned(core::ptr::null_mut(), layout, flags) };
+-            if ptr.is_null() {
+-                return Err(AllocError);
+-            }
+-
+-            ptr.cast::<MaybeUninit<T>>()
+-        };
+-
+-        // SAFETY: For non-zero-sized types, we allocate above using the global allocator. For
+-        // zero-sized types, we use `NonNull::dangling`.
+-        Ok(unsafe { Box::from_raw(ptr) })
+-    }
+-
+-    fn drop_contents(this: Self) -> Box<MaybeUninit<T>> {
+-        let ptr = Box::into_raw(this);
+-        // SAFETY: `ptr` is valid, because it came from `Box::into_raw`.
+-        unsafe { ptr::drop_in_place(ptr) };
+-
+-        // CAST: `MaybeUninit<T>` is a transparent wrapper of `T`.
+-        let ptr = ptr.cast::<MaybeUninit<T>>();
+-
+-        // SAFETY: `ptr` is valid for writes, because it came from `Box::into_raw` and it is valid for
+-        // reads, since the pointer came from `Box::into_raw` and the type is `MaybeUninit<T>`.
+-        unsafe { Box::from_raw(ptr) }
+-    }
+-}
+diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs
+new file mode 100644
+index 00000000000000..9ce414361c2c6d
+--- /dev/null
++++ b/rust/kernel/alloc/kbox.rs
+@@ -0,0 +1,456 @@
++// SPDX-License-Identifier: GPL-2.0
++
++//! Implementation of [`Box`].
++
++#[allow(unused_imports)] // Used in doc comments.
++use super::allocator::{KVmalloc, Kmalloc, Vmalloc};
++use super::{AllocError, Allocator, Flags};
++use core::alloc::Layout;
++use core::fmt;
++use core::marker::PhantomData;
++use core::mem::ManuallyDrop;
++use core::mem::MaybeUninit;
++use core::ops::{Deref, DerefMut};
++use core::pin::Pin;
++use core::ptr::NonNull;
++use core::result::Result;
++
++use crate::init::{InPlaceInit, InPlaceWrite, Init, PinInit};
++use crate::types::ForeignOwnable;
++
++/// The kernel's [`Box`] type -- a heap allocation for a single value of type `T`.
++///
++/// This is the kernel's version of the Rust stdlib's `Box`. There are several differences,
++/// for example no `noalias` attribute is emitted and partially moving out of a `Box` is not
++/// supported. There are also several API differences, e.g. `Box` always requires an [`Allocator`]
++/// implementation to be passed as generic, page [`Flags`] when allocating memory and all functions
++/// that may allocate memory are fallible.
++///
++/// `Box` works with any of the kernel's allocators, e.g. [`Kmalloc`], [`Vmalloc`] or [`KVmalloc`].
++/// There are aliases for `Box` with these allocators ([`KBox`], [`VBox`], [`KVBox`]).
++///
++/// When dropping a [`Box`], the value is also dropped and the heap memory is automatically freed.
++///
++/// # Examples
++///
++/// ```
++/// let b = KBox::<u64>::new(24_u64, GFP_KERNEL)?;
++///
++/// assert_eq!(*b, 24_u64);
++/// # Ok::<(), Error>(())
++/// ```
++///
++/// ```
++/// # use kernel::bindings;
++/// const SIZE: usize = bindings::KMALLOC_MAX_SIZE as usize + 1;
++/// struct Huge([u8; SIZE]);
++///
++/// assert!(KBox::<Huge>::new_uninit(GFP_KERNEL | __GFP_NOWARN).is_err());
++/// ```
++///
++/// ```
++/// # use kernel::bindings;
++/// const SIZE: usize = bindings::KMALLOC_MAX_SIZE as usize + 1;
++/// struct Huge([u8; SIZE]);
++///
++/// assert!(KVBox::<Huge>::new_uninit(GFP_KERNEL).is_ok());
++/// ```
++///
++/// # Invariants
++///
++/// `self.0` is always properly aligned and either points to memory allocated with `A` or, for
++/// zero-sized types, is a dangling, well aligned pointer.
++#[repr(transparent)]
++pub struct Box<T: ?Sized, A: Allocator>(NonNull<T>, PhantomData<A>);
++
++/// Type alias for [`Box`] with a [`Kmalloc`] allocator.
++///
++/// # Examples
++///
++/// ```
++/// let b = KBox::new(24_u64, GFP_KERNEL)?;
++///
++/// assert_eq!(*b, 24_u64);
++/// # Ok::<(), Error>(())
++/// ```
++pub type KBox<T> = Box<T, super::allocator::Kmalloc>;
++
++/// Type alias for [`Box`] with a [`Vmalloc`] allocator.
++///
++/// # Examples
++///
++/// ```
++/// let b = VBox::new(24_u64, GFP_KERNEL)?;
++///
++/// assert_eq!(*b, 24_u64);
++/// # Ok::<(), Error>(())
++/// ```
++pub type VBox<T> = Box<T, super::allocator::Vmalloc>;
++
++/// Type alias for [`Box`] with a [`KVmalloc`] allocator.
++///
++/// # Examples
++///
++/// ```
++/// let b = KVBox::new(24_u64, GFP_KERNEL)?;
++///
++/// assert_eq!(*b, 24_u64);
++/// # Ok::<(), Error>(())
++/// ```
++pub type KVBox<T> = Box<T, super::allocator::KVmalloc>;
++
++// SAFETY: `Box` is `Send` if `T` is `Send` because the `Box` owns a `T`.
++unsafe impl<T, A> Send for Box<T, A>
++where
++    T: Send + ?Sized,
++    A: Allocator,
++{
++}
++
++// SAFETY: `Box` is `Sync` if `T` is `Sync` because the `Box` owns a `T`.
++unsafe impl<T, A> Sync for Box<T, A>
++where
++    T: Sync + ?Sized,
++    A: Allocator,
++{
++}
++
++impl<T, A> Box<T, A>
++where
++    T: ?Sized,
++    A: Allocator,
++{
++    /// Creates a new `Box<T, A>` from a raw pointer.
++    ///
++    /// # Safety
++    ///
++    /// For non-ZSTs, `raw` must point at an allocation allocated with `A` that is sufficiently
++    /// aligned for and holds a valid `T`. The caller passes ownership of the allocation to the
++    /// `Box`.
++    ///
++    /// For ZSTs, `raw` must be a dangling, well aligned pointer.
++    #[inline]
++    pub const unsafe fn from_raw(raw: *mut T) -> Self {
++        // INVARIANT: Validity of `raw` is guaranteed by the safety preconditions of this function.
++        // SAFETY: By the safety preconditions of this function, `raw` is not a NULL pointer.
++        Self(unsafe { NonNull::new_unchecked(raw) }, PhantomData)
++    }
++
++    /// Consumes the `Box<T, A>` and returns a raw pointer.
++    ///
++    /// This will not run the destructor of `T` and for non-ZSTs the allocation will stay alive
++    /// indefinitely. Use [`Box::from_raw`] to recover the [`Box`], drop the value and free the
++    /// allocation, if any.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let x = KBox::new(24, GFP_KERNEL)?;
++    /// let ptr = KBox::into_raw(x);
++    /// // SAFETY: `ptr` comes from a previous call to `KBox::into_raw`.
++    /// let x = unsafe { KBox::from_raw(ptr) };
++    ///
++    /// assert_eq!(*x, 24);
++    /// # Ok::<(), Error>(())
++    /// ```
++    #[inline]
++    pub fn into_raw(b: Self) -> *mut T {
++        ManuallyDrop::new(b).0.as_ptr()
++    }
++
++    /// Consumes and leaks the `Box<T, A>` and returns a mutable reference.
++    ///
++    /// See [`Box::into_raw`] for more details.
++    #[inline]
++    pub fn leak<'a>(b: Self) -> &'a mut T {
++        // SAFETY: `Box::into_raw` always returns a properly aligned and dereferenceable pointer
++        // which points to an initialized instance of `T`.
++        unsafe { &mut *Box::into_raw(b) }
++    }
++}
++
++impl<T, A> Box<MaybeUninit<T>, A>
++where
++    A: Allocator,
++{
++    /// Converts a `Box<MaybeUninit<T>, A>` to a `Box<T, A>`.
++    ///
++    /// It is undefined behavior to call this function while the value inside of `b` is not yet
++    /// fully initialized.
++    ///
++    /// # Safety
++    ///
++    /// Callers must ensure that the value inside of `b` is in an initialized state.
++    pub unsafe fn assume_init(self) -> Box<T, A> {
++        let raw = Self::into_raw(self);
++
++        // SAFETY: `raw` comes from a previous call to `Box::into_raw`. By the safety requirements
++        // of this function, the value inside the `Box` is in an initialized state. Hence, it is
++        // safe to reconstruct the `Box` as `Box<T, A>`.
++        unsafe { Box::from_raw(raw.cast()) }
++    }
++
++    /// Writes the value and converts to `Box<T, A>`.
++    pub fn write(mut self, value: T) -> Box<T, A> {
++        (*self).write(value);
++
++        // SAFETY: We've just initialized `b`'s value.
++        unsafe { self.assume_init() }
++    }
++}
++
++impl<T, A> Box<T, A>
++where
++    A: Allocator,
++{
++    /// Creates a new `Box<T, A>` and initializes its contents with `x`.
++    ///
++    /// New memory is allocated with `A`. The allocation may fail, in which case an error is
++    /// returned. For ZSTs no memory is allocated.
++    pub fn new(x: T, flags: Flags) -> Result<Self, AllocError> {
++        let b = Self::new_uninit(flags)?;
++        Ok(Box::write(b, x))
++    }
++
++    /// Creates a new `Box<T, A>` with uninitialized contents.
++    ///
++    /// New memory is allocated with `A`. The allocation may fail, in which case an error is
++    /// returned. For ZSTs no memory is allocated.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let b = KBox::<u64>::new_uninit(GFP_KERNEL)?;
++    /// let b = KBox::write(b, 24);
++    ///
++    /// assert_eq!(*b, 24_u64);
++    /// # Ok::<(), Error>(())
++    /// ```
++    pub fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>, A>, AllocError> {
++        let layout = Layout::new::<MaybeUninit<T>>();
++        let ptr = A::alloc(layout, flags)?;
++
++        // INVARIANT: `ptr` is either a dangling pointer or points to memory allocated with `A`,
++        // which is sufficient in size and alignment for storing a `T`.
++        Ok(Box(ptr.cast(), PhantomData))
++    }
++
++    /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then `x` will be
++    /// pinned in memory and can't be moved.
++    #[inline]
++    pub fn pin(x: T, flags: Flags) -> Result<Pin<Box<T, A>>, AllocError>
++    where
++        A: 'static,
++    {
++        Ok(Self::new(x, flags)?.into())
++    }
++
++    /// Forgets the contents (does not run the destructor), but keeps the allocation.
++    fn forget_contents(this: Self) -> Box<MaybeUninit<T>, A> {
++        let ptr = Self::into_raw(this);
++
++        // SAFETY: `ptr` is valid, because it came from `Box::into_raw`.
++        unsafe { Box::from_raw(ptr.cast()) }
++    }
++
++    /// Drops the contents, but keeps the allocation.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let value = KBox::new([0; 32], GFP_KERNEL)?;
++    /// assert_eq!(*value, [0; 32]);
++    /// let value = KBox::drop_contents(value);
++    /// // Now we can re-use `value`:
++    /// let value = KBox::write(value, [1; 32]);
++    /// assert_eq!(*value, [1; 32]);
++    /// # Ok::<(), Error>(())
++    /// ```
++    pub fn drop_contents(this: Self) -> Box<MaybeUninit<T>, A> {
++        let ptr = this.0.as_ptr();
++
++        // SAFETY: `ptr` is valid, because it came from `this`. After this call we never access the
++        // value stored in `this` again.
++        unsafe { core::ptr::drop_in_place(ptr) };
++
++        Self::forget_contents(this)
++    }
++
++    /// Moves the `Box`'s value out of the `Box` and consumes the `Box`.
++    pub fn into_inner(b: Self) -> T {
++        // SAFETY: By the type invariant `&*b` is valid for `read`.
++        let value = unsafe { core::ptr::read(&*b) };
++        let _ = Self::forget_contents(b);
++        value
++    }
++}
++
++impl<T, A> From<Box<T, A>> for Pin<Box<T, A>>
++where
++    T: ?Sized,
++    A: Allocator,
++{
++    /// Converts a `Box<T, A>` into a `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then
++    /// `*b` will be pinned in memory and can't be moved.
++    ///
++    /// This moves `b` into `Pin` without moving `*b` or allocating and copying any memory.
++    fn from(b: Box<T, A>) -> Self {
++        // SAFETY: The value wrapped inside a `Pin<Box<T, A>>` cannot be moved or replaced as long
++        // as `T` does not implement `Unpin`.
++        unsafe { Pin::new_unchecked(b) }
++    }
++}
++
++impl<T, A> InPlaceWrite<T> for Box<MaybeUninit<T>, A>
++where
++    A: Allocator + 'static,
++{
++    type Initialized = Box<T, A>;
++
++    fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
++        let slot = self.as_mut_ptr();
++        // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
++        // slot is valid.
++        unsafe { init.__init(slot)? };
++        // SAFETY: All fields have been initialized.
++        Ok(unsafe { Box::assume_init(self) })
++    }
++
++    fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
++        let slot = self.as_mut_ptr();
++        // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
++        // slot is valid and will not be moved, because we pin it later.
++        unsafe { init.__pinned_init(slot)? };
++        // SAFETY: All fields have been initialized.
++        Ok(unsafe { Box::assume_init(self) }.into())
++    }
++}
++
++impl<T, A> InPlaceInit<T> for Box<T, A>
++where
++    A: Allocator + 'static,
++{
++    type PinnedSelf = Pin<Self>;
++
++    #[inline]
++    fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
++    where
++        E: From<AllocError>,
++    {
++        Box::<_, A>::new_uninit(flags)?.write_pin_init(init)
++    }
++
++    #[inline]
++    fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
++    where
++        E: From<AllocError>,
++    {
++        Box::<_, A>::new_uninit(flags)?.write_init(init)
++    }
++}
++
++impl<T: 'static, A> ForeignOwnable for Box<T, A>
++where
++    A: Allocator,
++{
++    type Borrowed<'a> = &'a T;
++
++    fn into_foreign(self) -> *const crate::ffi::c_void {
++        Box::into_raw(self) as _
++    }
++
++    unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self {
++        // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
++        // call to `Self::into_foreign`.
++        unsafe { Box::from_raw(ptr as _) }
++    }
++
++    unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> &'a T {
++        // SAFETY: The safety requirements of this method ensure that the object remains alive and
++        // immutable for the duration of 'a.
++        unsafe { &*ptr.cast() }
++    }
++}
++
++impl<T: 'static, A> ForeignOwnable for Pin<Box<T, A>>
++where
++    A: Allocator,
++{
++    type Borrowed<'a> = Pin<&'a T>;
++
++    fn into_foreign(self) -> *const crate::ffi::c_void {
++        // SAFETY: We are still treating the box as pinned.
++        Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _
++    }
++
++    unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self {
++        // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
++        // call to `Self::into_foreign`.
++        unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) }
++    }
++
++    unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> Pin<&'a T> {
++        // SAFETY: The safety requirements for this function ensure that the object is still alive,
++        // so it is safe to dereference the raw pointer.
++        // The safety requirements of `from_foreign` also ensure that the object remains alive for
++        // the lifetime of the returned value.
++        let r = unsafe { &*ptr.cast() };
++
++        // SAFETY: This pointer originates from a `Pin<Box<T>>`.
++        unsafe { Pin::new_unchecked(r) }
++    }
++}
++
++impl<T, A> Deref for Box<T, A>
++where
++    T: ?Sized,
++    A: Allocator,
++{
++    type Target = T;
++
++    fn deref(&self) -> &T {
++        // SAFETY: `self.0` is always properly aligned, dereferenceable and points to an initialized
++        // instance of `T`.
++        unsafe { self.0.as_ref() }
++    }
++}
++
++impl<T, A> DerefMut for Box<T, A>
++where
++    T: ?Sized,
++    A: Allocator,
++{
++    fn deref_mut(&mut self) -> &mut T {
++        // SAFETY: `self.0` is always properly aligned, dereferenceable and points to an initialized
++        // instance of `T`.
++        unsafe { self.0.as_mut() }
++    }
++}
++
++impl<T, A> fmt::Debug for Box<T, A>
++where
++    T: ?Sized + fmt::Debug,
++    A: Allocator,
++{
++    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
++        fmt::Debug::fmt(&**self, f)
++    }
++}
++
++impl<T, A> Drop for Box<T, A>
++where
++    T: ?Sized,
++    A: Allocator,
++{
++    fn drop(&mut self) {
++        let layout = Layout::for_value::<T>(self);
++
++        // SAFETY: The pointer in `self.0` is guaranteed to be valid by the type invariant.
++        unsafe { core::ptr::drop_in_place::<T>(self.deref_mut()) };
++
++        // SAFETY:
++        // - `self.0` was previously allocated with `A`.
++        // - `layout` is equal to the `Layout´ `self.0` was allocated with.
++        unsafe { A::free(self.0.cast(), layout) };
++    }
++}
+diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
+new file mode 100644
+index 00000000000000..ae9d072741cedb
+--- /dev/null
++++ b/rust/kernel/alloc/kvec.rs
+@@ -0,0 +1,913 @@
++// SPDX-License-Identifier: GPL-2.0
++
++//! Implementation of [`Vec`].
++
++use super::{
++    allocator::{KVmalloc, Kmalloc, Vmalloc},
++    layout::ArrayLayout,
++    AllocError, Allocator, Box, Flags,
++};
++use core::{
++    fmt,
++    marker::PhantomData,
++    mem::{ManuallyDrop, MaybeUninit},
++    ops::Deref,
++    ops::DerefMut,
++    ops::Index,
++    ops::IndexMut,
++    ptr,
++    ptr::NonNull,
++    slice,
++    slice::SliceIndex,
++};
++
++/// Create a [`KVec`] containing the arguments.
++///
++/// New memory is allocated with `GFP_KERNEL`.
++///
++/// # Examples
++///
++/// ```
++/// let mut v = kernel::kvec![];
++/// v.push(1, GFP_KERNEL)?;
++/// assert_eq!(v, [1]);
++///
++/// let mut v = kernel::kvec![1; 3]?;
++/// v.push(4, GFP_KERNEL)?;
++/// assert_eq!(v, [1, 1, 1, 4]);
++///
++/// let mut v = kernel::kvec![1, 2, 3]?;
++/// v.push(4, GFP_KERNEL)?;
++/// assert_eq!(v, [1, 2, 3, 4]);
++///
++/// # Ok::<(), Error>(())
++/// ```
++#[macro_export]
++macro_rules! kvec {
++    () => (
++        $crate::alloc::KVec::new()
++    );
++    ($elem:expr; $n:expr) => (
++        $crate::alloc::KVec::from_elem($elem, $n, GFP_KERNEL)
++    );
++    ($($x:expr),+ $(,)?) => (
++        match $crate::alloc::KBox::new_uninit(GFP_KERNEL) {
++            Ok(b) => Ok($crate::alloc::KVec::from($crate::alloc::KBox::write(b, [$($x),+]))),
++            Err(e) => Err(e),
++        }
++    );
++}
++
++/// The kernel's [`Vec`] type.
++///
++/// A contiguous growable array type with contents allocated with the kernel's allocators (e.g.
++/// [`Kmalloc`], [`Vmalloc`] or [`KVmalloc`]), written `Vec<T, A>`.
++///
++/// For non-zero-sized values, a [`Vec`] will use the given allocator `A` for its allocation. For
++/// the most common allocators the type aliases [`KVec`], [`VVec`] and [`KVVec`] exist.
++///
++/// For zero-sized types the [`Vec`]'s pointer must be `dangling_mut::<T>`; no memory is allocated.
++///
++/// Generally, [`Vec`] consists of a pointer that represents the vector's backing buffer, the
++/// capacity of the vector (the number of elements that currently fit into the vector), its length
++/// (the number of elements that are currently stored in the vector) and the `Allocator` type used
++/// to allocate (and free) the backing buffer.
++///
++/// A [`Vec`] can be deconstructed into and (re-)constructed from its previously named raw parts
++/// and manually modified.
++///
++/// [`Vec`]'s backing buffer gets, if required, automatically increased (re-allocated) when elements
++/// are added to the vector.
++///
++/// # Invariants
++///
++/// - `self.ptr` is always properly aligned and either points to memory allocated with `A` or, for
++///   zero-sized types, is a dangling, well aligned pointer.
++///
++/// - `self.len` always represents the exact number of elements stored in the vector.
++///
++/// - `self.layout` represents the absolute number of elements that can be stored within the vector
++///   without re-allocation. For ZSTs `self.layout`'s capacity is zero. However, it is legal for the
++///   backing buffer to be larger than `layout`.
++///
++/// - The `Allocator` type `A` of the vector is the exact same `Allocator` type the backing buffer
++///   was allocated with (and must be freed with).
++pub struct Vec<T, A: Allocator> {
++    ptr: NonNull<T>,
++    /// Represents the actual buffer size as `cap` times `size_of::<T>` bytes.
++    ///
++    /// Note: This isn't quite the same as `Self::capacity`, which in contrast returns the number of
++    /// elements we can still store without reallocating.
++    layout: ArrayLayout<T>,
++    len: usize,
++    _p: PhantomData<A>,
++}
++
++/// Type alias for [`Vec`] with a [`Kmalloc`] allocator.
++///
++/// # Examples
++///
++/// ```
++/// let mut v = KVec::new();
++/// v.push(1, GFP_KERNEL)?;
++/// assert_eq!(&v, &[1]);
++///
++/// # Ok::<(), Error>(())
++/// ```
++pub type KVec<T> = Vec<T, Kmalloc>;
++
++/// Type alias for [`Vec`] with a [`Vmalloc`] allocator.
++///
++/// # Examples
++///
++/// ```
++/// let mut v = VVec::new();
++/// v.push(1, GFP_KERNEL)?;
++/// assert_eq!(&v, &[1]);
++///
++/// # Ok::<(), Error>(())
++/// ```
++pub type VVec<T> = Vec<T, Vmalloc>;
++
++/// Type alias for [`Vec`] with a [`KVmalloc`] allocator.
++///
++/// # Examples
++///
++/// ```
++/// let mut v = KVVec::new();
++/// v.push(1, GFP_KERNEL)?;
++/// assert_eq!(&v, &[1]);
++///
++/// # Ok::<(), Error>(())
++/// ```
++pub type KVVec<T> = Vec<T, KVmalloc>;
++
++// SAFETY: `Vec` is `Send` if `T` is `Send` because `Vec` owns its elements.
++unsafe impl<T, A> Send for Vec<T, A>
++where
++    T: Send,
++    A: Allocator,
++{
++}
++
++// SAFETY: `Vec` is `Sync` if `T` is `Sync` because `Vec` owns its elements.
++unsafe impl<T, A> Sync for Vec<T, A>
++where
++    T: Sync,
++    A: Allocator,
++{
++}
++
++impl<T, A> Vec<T, A>
++where
++    A: Allocator,
++{
++    #[inline]
++    const fn is_zst() -> bool {
++        core::mem::size_of::<T>() == 0
++    }
++
++    /// Returns the number of elements that can be stored within the vector without allocating
++    /// additional memory.
++    pub fn capacity(&self) -> usize {
++        if const { Self::is_zst() } {
++            usize::MAX
++        } else {
++            self.layout.len()
++        }
++    }
++
++    /// Returns the number of elements stored within the vector.
++    #[inline]
++    pub fn len(&self) -> usize {
++        self.len
++    }
++
++    /// Forcefully sets `self.len` to `new_len`.
++    ///
++    /// # Safety
++    ///
++    /// - `new_len` must be less than or equal to [`Self::capacity`].
++    /// - If `new_len` is greater than `self.len`, all elements within the interval
++    ///   [`self.len`,`new_len`) must be initialized.
++    #[inline]
++    pub unsafe fn set_len(&mut self, new_len: usize) {
++        debug_assert!(new_len <= self.capacity());
++        self.len = new_len;
++    }
++
++    /// Returns a slice of the entire vector.
++    #[inline]
++    pub fn as_slice(&self) -> &[T] {
++        self
++    }
++
++    /// Returns a mutable slice of the entire vector.
++    #[inline]
++    pub fn as_mut_slice(&mut self) -> &mut [T] {
++        self
++    }
++
++    /// Returns a mutable raw pointer to the vector's backing buffer, or, if `T` is a ZST, a
++    /// dangling raw pointer.
++    #[inline]
++    pub fn as_mut_ptr(&mut self) -> *mut T {
++        self.ptr.as_ptr()
++    }
++
++    /// Returns a raw pointer to the vector's backing buffer, or, if `T` is a ZST, a dangling raw
++    /// pointer.
++    #[inline]
++    pub fn as_ptr(&self) -> *const T {
++        self.ptr.as_ptr()
++    }
++
++    /// Returns `true` if the vector contains no elements, `false` otherwise.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let mut v = KVec::new();
++    /// assert!(v.is_empty());
++    ///
++    /// v.push(1, GFP_KERNEL);
++    /// assert!(!v.is_empty());
++    /// ```
++    #[inline]
++    pub fn is_empty(&self) -> bool {
++        self.len() == 0
++    }
++
++    /// Creates a new, empty `Vec<T, A>`.
++    ///
++    /// This method does not allocate by itself.
++    #[inline]
++    pub const fn new() -> Self {
++        // INVARIANT: Since this is a new, empty `Vec` with no backing memory yet,
++        // - `ptr` is a properly aligned dangling pointer for type `T`,
++        // - `layout` is an empty `ArrayLayout` (zero capacity)
++        // - `len` is zero, since no elements can be or have been stored,
++        // - `A` is always valid.
++        Self {
++            ptr: NonNull::dangling(),
++            layout: ArrayLayout::empty(),
++            len: 0,
++            _p: PhantomData::<A>,
++        }
++    }
++
++    /// Returns a slice of `MaybeUninit<T>` for the remaining spare capacity of the vector.
++    pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
++        // SAFETY:
++        // - `self.len` is smaller than `self.capacity` and hence, the resulting pointer is
++        //   guaranteed to be part of the same allocated object.
++        // - `self.len` can not overflow `isize`.
++        let ptr = unsafe { self.as_mut_ptr().add(self.len) } as *mut MaybeUninit<T>;
++
++        // SAFETY: The memory between `self.len` and `self.capacity` is guaranteed to be allocated
++        // and valid, but uninitialized.
++        unsafe { slice::from_raw_parts_mut(ptr, self.capacity() - self.len) }
++    }
++
++    /// Appends an element to the back of the [`Vec`] instance.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let mut v = KVec::new();
++    /// v.push(1, GFP_KERNEL)?;
++    /// assert_eq!(&v, &[1]);
++    ///
++    /// v.push(2, GFP_KERNEL)?;
++    /// assert_eq!(&v, &[1, 2]);
++    /// # Ok::<(), Error>(())
++    /// ```
++    pub fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError> {
++        self.reserve(1, flags)?;
++
++        // SAFETY:
++        // - `self.len` is smaller than `self.capacity` and hence, the resulting pointer is
++        //   guaranteed to be part of the same allocated object.
++        // - `self.len` can not overflow `isize`.
++        let ptr = unsafe { self.as_mut_ptr().add(self.len) };
++
++        // SAFETY:
++        // - `ptr` is properly aligned and valid for writes.
++        unsafe { core::ptr::write(ptr, v) };
++
++        // SAFETY: We just initialised the first spare entry, so it is safe to increase the length
++        // by 1. We also know that the new length is <= capacity because of the previous call to
++        // `reserve` above.
++        unsafe { self.set_len(self.len() + 1) };
++        Ok(())
++    }
++
++    /// Creates a new [`Vec`] instance with at least the given capacity.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let v = KVec::<u32>::with_capacity(20, GFP_KERNEL)?;
++    ///
++    /// assert!(v.capacity() >= 20);
++    /// # Ok::<(), Error>(())
++    /// ```
++    pub fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError> {
++        let mut v = Vec::new();
++
++        v.reserve(capacity, flags)?;
++
++        Ok(v)
++    }
++
++    /// Creates a `Vec<T, A>` from a pointer, a length and a capacity using the allocator `A`.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let mut v = kernel::kvec![1, 2, 3]?;
++    /// v.reserve(1, GFP_KERNEL)?;
++    ///
++    /// let (mut ptr, mut len, cap) = v.into_raw_parts();
++    ///
++    /// // SAFETY: We've just reserved memory for another element.
++    /// unsafe { ptr.add(len).write(4) };
++    /// len += 1;
++    ///
++    /// // SAFETY: We only wrote an additional element at the end of the `KVec`'s buffer and
++    /// // correspondingly increased the length of the `KVec` by one. Otherwise, we construct it
++    /// // from the exact same raw parts.
++    /// let v = unsafe { KVec::from_raw_parts(ptr, len, cap) };
++    ///
++    /// assert_eq!(v, [1, 2, 3, 4]);
++    ///
++    /// # Ok::<(), Error>(())
++    /// ```
++    ///
++    /// # Safety
++    ///
++    /// If `T` is a ZST:
++    ///
++    /// - `ptr` must be a dangling, well aligned pointer.
++    ///
++    /// Otherwise:
++    ///
++    /// - `ptr` must have been allocated with the allocator `A`.
++    /// - `ptr` must satisfy or exceed the alignment requirements of `T`.
++    /// - `ptr` must point to memory with a size of at least `size_of::<T>() * capacity` bytes.
++    /// - The allocated size in bytes must not be larger than `isize::MAX`.
++    /// - `length` must be less than or equal to `capacity`.
++    /// - The first `length` elements must be initialized values of type `T`.
++    ///
++    /// It is also valid to create an empty `Vec` passing a dangling pointer for `ptr` and zero for
++    /// `cap` and `len`.
++    pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
++        let layout = if Self::is_zst() {
++            ArrayLayout::empty()
++        } else {
++            // SAFETY: By the safety requirements of this function, `capacity * size_of::<T>()` is
++            // smaller than `isize::MAX`.
++            unsafe { ArrayLayout::new_unchecked(capacity) }
++        };
++
++        // INVARIANT: For ZSTs, we store an empty `ArrayLayout`, all other type invariants are
++        // covered by the safety requirements of this function.
++        Self {
++            // SAFETY: By the safety requirements, `ptr` is either dangling or pointing to a valid
++            // memory allocation, allocated with `A`.
++            ptr: unsafe { NonNull::new_unchecked(ptr) },
++            layout,
++            len: length,
++            _p: PhantomData::<A>,
++        }
++    }
++
++    /// Consumes the `Vec<T, A>` and returns its raw components `pointer`, `length` and `capacity`.
++    ///
++    /// This will not run the destructor of the contained elements and for non-ZSTs the allocation
++    /// will stay alive indefinitely. Use [`Vec::from_raw_parts`] to recover the [`Vec`], drop the
++    /// elements and free the allocation, if any.
++    pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
++        let mut me = ManuallyDrop::new(self);
++        let len = me.len();
++        let capacity = me.capacity();
++        let ptr = me.as_mut_ptr();
++        (ptr, len, capacity)
++    }
++
++    /// Ensures that the capacity exceeds the length by at least `additional` elements.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let mut v = KVec::new();
++    /// v.push(1, GFP_KERNEL)?;
++    ///
++    /// v.reserve(10, GFP_KERNEL)?;
++    /// let cap = v.capacity();
++    /// assert!(cap >= 10);
++    ///
++    /// v.reserve(10, GFP_KERNEL)?;
++    /// let new_cap = v.capacity();
++    /// assert_eq!(new_cap, cap);
++    ///
++    /// # Ok::<(), Error>(())
++    /// ```
++    pub fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError> {
++        let len = self.len();
++        let cap = self.capacity();
++
++        if cap - len >= additional {
++            return Ok(());
++        }
++
++        if Self::is_zst() {
++            // The capacity is already `usize::MAX` for ZSTs, we can't go higher.
++            return Err(AllocError);
++        }
++
++        // We know that `cap <= isize::MAX` because of the type invariants of `Self`. So the
++        // multiplication by two won't overflow.
++        let new_cap = core::cmp::max(cap * 2, len.checked_add(additional).ok_or(AllocError)?);
++        let layout = ArrayLayout::new(new_cap).map_err(|_| AllocError)?;
++
++        // SAFETY:
++        // - `ptr` is valid because it's either `None` or comes from a previous call to
++        //   `A::realloc`.
++        // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
++        let ptr = unsafe {
++            A::realloc(
++                Some(self.ptr.cast()),
++                layout.into(),
++                self.layout.into(),
++                flags,
++            )?
++        };
++
++        // INVARIANT:
++        // - `layout` is some `ArrayLayout::<T>`,
++        // - `ptr` has been created by `A::realloc` from `layout`.
++        self.ptr = ptr.cast();
++        self.layout = layout;
++
++        Ok(())
++    }
++}
++
++impl<T: Clone, A: Allocator> Vec<T, A> {
++    /// Extend the vector by `n` clones of `value`.
++    pub fn extend_with(&mut self, n: usize, value: T, flags: Flags) -> Result<(), AllocError> {
++        if n == 0 {
++            return Ok(());
++        }
++
++        self.reserve(n, flags)?;
++
++        let spare = self.spare_capacity_mut();
++
++        for item in spare.iter_mut().take(n - 1) {
++            item.write(value.clone());
++        }
++
++        // We can write the last element directly without cloning needlessly.
++        spare[n - 1].write(value);
++
++        // SAFETY:
++        // - `self.len() + n < self.capacity()` due to the call to reserve above,
++        // - the loop and the line above initialized the next `n` elements.
++        unsafe { self.set_len(self.len() + n) };
++
++        Ok(())
++    }
++
++    /// Pushes clones of the elements of slice into the [`Vec`] instance.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let mut v = KVec::new();
++    /// v.push(1, GFP_KERNEL)?;
++    ///
++    /// v.extend_from_slice(&[20, 30, 40], GFP_KERNEL)?;
++    /// assert_eq!(&v, &[1, 20, 30, 40]);
++    ///
++    /// v.extend_from_slice(&[50, 60], GFP_KERNEL)?;
++    /// assert_eq!(&v, &[1, 20, 30, 40, 50, 60]);
++    /// # Ok::<(), Error>(())
++    /// ```
++    pub fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> {
++        self.reserve(other.len(), flags)?;
++        for (slot, item) in core::iter::zip(self.spare_capacity_mut(), other) {
++            slot.write(item.clone());
++        }
++
++        // SAFETY:
++        // - `other.len()` spare entries have just been initialized, so it is safe to increase
++        //   the length by the same number.
++        // - `self.len() + other.len() <= self.capacity()` is guaranteed by the preceding `reserve`
++        //   call.
++        unsafe { self.set_len(self.len() + other.len()) };
++        Ok(())
++    }
++
++    /// Create a new `Vec<T, A>` and extend it by `n` clones of `value`.
++    pub fn from_elem(value: T, n: usize, flags: Flags) -> Result<Self, AllocError> {
++        let mut v = Self::with_capacity(n, flags)?;
++
++        v.extend_with(n, value, flags)?;
++
++        Ok(v)
++    }
++}
++
++impl<T, A> Drop for Vec<T, A>
++where
++    A: Allocator,
++{
++    fn drop(&mut self) {
++        // SAFETY: `self.as_mut_ptr` is guaranteed to be valid by the type invariant.
++        unsafe {
++            ptr::drop_in_place(core::ptr::slice_from_raw_parts_mut(
++                self.as_mut_ptr(),
++                self.len,
++            ))
++        };
++
++        // SAFETY:
++        // - `self.ptr` was previously allocated with `A`.
++        // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
++        unsafe { A::free(self.ptr.cast(), self.layout.into()) };
++    }
++}
++
++impl<T, A, const N: usize> From<Box<[T; N], A>> for Vec<T, A>
++where
++    A: Allocator,
++{
++    fn from(b: Box<[T; N], A>) -> Vec<T, A> {
++        let len = b.len();
++        let ptr = Box::into_raw(b);
++
++        // SAFETY:
++        // - `b` has been allocated with `A`,
++        // - `ptr` fulfills the alignment requirements for `T`,
++        // - `ptr` points to memory with at least a size of `size_of::<T>() * len`,
++        // - all elements within `b` are initialized values of `T`,
++        // - `len` does not exceed `isize::MAX`.
++        unsafe { Vec::from_raw_parts(ptr as _, len, len) }
++    }
++}
++
++impl<T> Default for KVec<T> {
++    #[inline]
++    fn default() -> Self {
++        Self::new()
++    }
++}
++
++impl<T: fmt::Debug, A: Allocator> fmt::Debug for Vec<T, A> {
++    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
++        fmt::Debug::fmt(&**self, f)
++    }
++}
++
++impl<T, A> Deref for Vec<T, A>
++where
++    A: Allocator,
++{
++    type Target = [T];
++
++    #[inline]
++    fn deref(&self) -> &[T] {
++        // SAFETY: The memory behind `self.as_ptr()` is guaranteed to contain `self.len`
++        // initialized elements of type `T`.
++        unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
++    }
++}
++
++impl<T, A> DerefMut for Vec<T, A>
++where
++    A: Allocator,
++{
++    #[inline]
++    fn deref_mut(&mut self) -> &mut [T] {
++        // SAFETY: The memory behind `self.as_ptr()` is guaranteed to contain `self.len`
++        // initialized elements of type `T`.
++        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
++    }
++}
++
++impl<T: Eq, A> Eq for Vec<T, A> where A: Allocator {}
++
++impl<T, I: SliceIndex<[T]>, A> Index<I> for Vec<T, A>
++where
++    A: Allocator,
++{
++    type Output = I::Output;
++
++    #[inline]
++    fn index(&self, index: I) -> &Self::Output {
++        Index::index(&**self, index)
++    }
++}
++
++impl<T, I: SliceIndex<[T]>, A> IndexMut<I> for Vec<T, A>
++where
++    A: Allocator,
++{
++    #[inline]
++    fn index_mut(&mut self, index: I) -> &mut Self::Output {
++        IndexMut::index_mut(&mut **self, index)
++    }
++}
++
++macro_rules! impl_slice_eq {
++    ($([$($vars:tt)*] $lhs:ty, $rhs:ty,)*) => {
++        $(
++            impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
++            where
++                T: PartialEq<U>,
++            {
++                #[inline]
++                fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
++            }
++        )*
++    }
++}
++
++impl_slice_eq! {
++    [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>,
++    [A: Allocator] Vec<T, A>, &[U],
++    [A: Allocator] Vec<T, A>, &mut [U],
++    [A: Allocator] &[T], Vec<U, A>,
++    [A: Allocator] &mut [T], Vec<U, A>,
++    [A: Allocator] Vec<T, A>, [U],
++    [A: Allocator] [T], Vec<U, A>,
++    [A: Allocator, const N: usize] Vec<T, A>, [U; N],
++    [A: Allocator, const N: usize] Vec<T, A>, &[U; N],
++}
++
++impl<'a, T, A> IntoIterator for &'a Vec<T, A>
++where
++    A: Allocator,
++{
++    type Item = &'a T;
++    type IntoIter = slice::Iter<'a, T>;
++
++    fn into_iter(self) -> Self::IntoIter {
++        self.iter()
++    }
++}
++
++impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A>
++where
++    A: Allocator,
++{
++    type Item = &'a mut T;
++    type IntoIter = slice::IterMut<'a, T>;
++
++    fn into_iter(self) -> Self::IntoIter {
++        self.iter_mut()
++    }
++}
++
++/// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector.
++///
++/// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the
++/// [`IntoIterator`] trait).
++///
++/// # Examples
++///
++/// ```
++/// let v = kernel::kvec![0, 1, 2]?;
++/// let iter = v.into_iter();
++///
++/// # Ok::<(), Error>(())
++/// ```
++pub struct IntoIter<T, A: Allocator> {
++    ptr: *mut T,
++    buf: NonNull<T>,
++    len: usize,
++    layout: ArrayLayout<T>,
++    _p: PhantomData<A>,
++}
++
++impl<T, A> IntoIter<T, A>
++where
++    A: Allocator,
++{
++    fn into_raw_parts(self) -> (*mut T, NonNull<T>, usize, usize) {
++        let me = ManuallyDrop::new(self);
++        let ptr = me.ptr;
++        let buf = me.buf;
++        let len = me.len;
++        let cap = me.layout.len();
++        (ptr, buf, len, cap)
++    }
++
++    /// Same as `Iterator::collect` but specialized for `Vec`'s `IntoIter`.
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let v = kernel::kvec![1, 2, 3]?;
++    /// let mut it = v.into_iter();
++    ///
++    /// assert_eq!(it.next(), Some(1));
++    ///
++    /// let v = it.collect(GFP_KERNEL);
++    /// assert_eq!(v, [2, 3]);
++    ///
++    /// # Ok::<(), Error>(())
++    /// ```
++    ///
++    /// # Implementation details
++    ///
++    /// Currently, we can't implement `FromIterator`. There are a couple of issues with this trait
++    /// in the kernel, namely:
++    ///
++    /// - Rust's specialization feature is unstable. This prevents us to optimize for the special
++    ///   case where `I::IntoIter` equals `Vec`'s `IntoIter` type.
++    /// - We also can't use `I::IntoIter`'s type ID either to work around this, since `FromIterator`
++    ///   doesn't require this type to be `'static`.
++    /// - `FromIterator::from_iter` does return `Self` instead of `Result<Self, AllocError>`, hence
++    ///   we can't properly handle allocation failures.
++    /// - Neither `Iterator::collect` nor `FromIterator::from_iter` can handle additional allocation
++    ///   flags.
++    ///
++    /// Instead, provide `IntoIter::collect`, such that we can at least convert a `IntoIter` into a
++    /// `Vec` again.
++    ///
++    /// Note that `IntoIter::collect` doesn't require `Flags`, since it re-uses the existing backing
++    /// buffer. However, this backing buffer may be shrunk to the actual count of elements.
++    pub fn collect(self, flags: Flags) -> Vec<T, A> {
++        let old_layout = self.layout;
++        let (mut ptr, buf, len, mut cap) = self.into_raw_parts();
++        let has_advanced = ptr != buf.as_ptr();
++
++        if has_advanced {
++            // Copy the contents we have advanced to at the beginning of the buffer.
++            //
++            // SAFETY:
++            // - `ptr` is valid for reads of `len * size_of::<T>()` bytes,
++            // - `buf.as_ptr()` is valid for writes of `len * size_of::<T>()` bytes,
++            // - `ptr` and `buf.as_ptr()` are not be subject to aliasing restrictions relative to
++            //   each other,
++            // - both `ptr` and `buf.ptr()` are properly aligned.
++            unsafe { ptr::copy(ptr, buf.as_ptr(), len) };
++            ptr = buf.as_ptr();
++
++            // SAFETY: `len` is guaranteed to be smaller than `self.layout.len()`.
++            let layout = unsafe { ArrayLayout::<T>::new_unchecked(len) };
++
++            // SAFETY: `buf` points to the start of the backing buffer and `len` is guaranteed to be
++            // smaller than `cap`. Depending on `alloc` this operation may shrink the buffer or leaves
++            // it as it is.
++            ptr = match unsafe {
++                A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags)
++            } {
++                // If we fail to shrink, which likely can't even happen, continue with the existing
++                // buffer.
++                Err(_) => ptr,
++                Ok(ptr) => {
++                    cap = len;
++                    ptr.as_ptr().cast()
++                }
++            };
++        }
++
++        // SAFETY: If the iterator has been advanced, the advanced elements have been copied to
++        // the beginning of the buffer and `len` has been adjusted accordingly.
++        //
++        // - `ptr` is guaranteed to point to the start of the backing buffer.
++        // - `cap` is either the original capacity or, after shrinking the buffer, equal to `len`.
++        // - `alloc` is guaranteed to be unchanged since `into_iter` has been called on the original
++        //   `Vec`.
++        unsafe { Vec::from_raw_parts(ptr, len, cap) }
++    }
++}
++
++impl<T, A> Iterator for IntoIter<T, A>
++where
++    A: Allocator,
++{
++    type Item = T;
++
++    /// # Examples
++    ///
++    /// ```
++    /// let v = kernel::kvec![1, 2, 3]?;
++    /// let mut it = v.into_iter();
++    ///
++    /// assert_eq!(it.next(), Some(1));
++    /// assert_eq!(it.next(), Some(2));
++    /// assert_eq!(it.next(), Some(3));
++    /// assert_eq!(it.next(), None);
++    ///
++    /// # Ok::<(), Error>(())
++    /// ```
++    fn next(&mut self) -> Option<T> {
++        if self.len == 0 {
++            return None;
++        }
++
++        let current = self.ptr;
++
++        // SAFETY: We can't overflow; decreasing `self.len` by one every time we advance `self.ptr`
++        // by one guarantees that.
++        unsafe { self.ptr = self.ptr.add(1) };
++
++        self.len -= 1;
++
++        // SAFETY: `current` is guaranteed to point at a valid element within the buffer.
++        Some(unsafe { current.read() })
++    }
++
++    /// # Examples
++    ///
++    /// ```
++    /// let v: KVec<u32> = kernel::kvec![1, 2, 3]?;
++    /// let mut iter = v.into_iter();
++    /// let size = iter.size_hint().0;
++    ///
++    /// iter.next();
++    /// assert_eq!(iter.size_hint().0, size - 1);
++    ///
++    /// iter.next();
++    /// assert_eq!(iter.size_hint().0, size - 2);
++    ///
++    /// iter.next();
++    /// assert_eq!(iter.size_hint().0, size - 3);
++    ///
++    /// # Ok::<(), Error>(())
++    /// ```
++    fn size_hint(&self) -> (usize, Option<usize>) {
++        (self.len, Some(self.len))
++    }
++}
++
++impl<T, A> Drop for IntoIter<T, A>
++where
++    A: Allocator,
++{
++    fn drop(&mut self) {
++        // SAFETY: `self.ptr` is guaranteed to be valid by the type invariant.
++        unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.ptr, self.len)) };
++
++        // SAFETY:
++        // - `self.buf` was previously allocated with `A`.
++        // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
++        unsafe { A::free(self.buf.cast(), self.layout.into()) };
++    }
++}
++
++impl<T, A> IntoIterator for Vec<T, A>
++where
++    A: Allocator,
++{
++    type Item = T;
++    type IntoIter = IntoIter<T, A>;
++
++    /// Consumes the `Vec<T, A>` and creates an `Iterator`, which moves each value out of the
++    /// vector (from start to end).
++    ///
++    /// # Examples
++    ///
++    /// ```
++    /// let v = kernel::kvec![1, 2]?;
++    /// let mut v_iter = v.into_iter();
++    ///
++    /// let first_element: Option<u32> = v_iter.next();
++    ///
++    /// assert_eq!(first_element, Some(1));
++    /// assert_eq!(v_iter.next(), Some(2));
++    /// assert_eq!(v_iter.next(), None);
++    ///
++    /// # Ok::<(), Error>(())
++    /// ```
++    ///
++    /// ```
++    /// let v = kernel::kvec![];
++    /// let mut v_iter = v.into_iter();
++    ///
++    /// let first_element: Option<u32> = v_iter.next();
++    ///
++    /// assert_eq!(first_element, None);
++    ///
++    /// # Ok::<(), Error>(())
++    /// ```
++    #[inline]
++    fn into_iter(self) -> Self::IntoIter {
++        let buf = self.ptr;
++        let layout = self.layout;
++        let (ptr, len, _) = self.into_raw_parts();
++
++        IntoIter {
++            ptr,
++            buf,
++            len,
++            layout,
++            _p: PhantomData::<A>,
++        }
++    }
++}
+diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs
+new file mode 100644
+index 00000000000000..4b3cd7fdc816c1
+--- /dev/null
++++ b/rust/kernel/alloc/layout.rs
+@@ -0,0 +1,91 @@
++// SPDX-License-Identifier: GPL-2.0
++
++//! Memory layout.
++//!
++//! Custom layout types extending or improving [`Layout`].
++
++use core::{alloc::Layout, marker::PhantomData};
++
++/// Error when constructing an [`ArrayLayout`].
++pub struct LayoutError;
++
++/// A layout for an array `[T; n]`.
++///
++/// # Invariants
++///
++/// - `len * size_of::<T>() <= isize::MAX`.
++pub struct ArrayLayout<T> {
++    len: usize,
++    _phantom: PhantomData<fn() -> T>,
++}
++
++impl<T> Clone for ArrayLayout<T> {
++    fn clone(&self) -> Self {
++        *self
++    }
++}
++impl<T> Copy for ArrayLayout<T> {}
++
++const ISIZE_MAX: usize = isize::MAX as usize;
++
++impl<T> ArrayLayout<T> {
++    /// Creates a new layout for `[T; 0]`.
++    pub const fn empty() -> Self {
++        // INVARIANT: `0 * size_of::<T>() <= isize::MAX`.
++        Self {
++            len: 0,
++            _phantom: PhantomData,
++        }
++    }
++
++    /// Creates a new layout for `[T; len]`.
++    ///
++    /// # Errors
++    ///
++    /// When `len * size_of::<T>()` overflows or when `len * size_of::<T>() > isize::MAX`.
++    pub const fn new(len: usize) -> Result<Self, LayoutError> {
++        match len.checked_mul(core::mem::size_of::<T>()) {
++            Some(size) if size <= ISIZE_MAX => {
++                // INVARIANT: We checked above that `len * size_of::<T>() <= isize::MAX`.
++                Ok(Self {
++                    len,
++                    _phantom: PhantomData,
++                })
++            }
++            _ => Err(LayoutError),
++        }
++    }
++
++    /// Creates a new layout for `[T; len]`.
++    ///
++    /// # Safety
++    ///
++    /// `len` must be a value, for which `len * size_of::<T>() <= isize::MAX` is true.
++    pub unsafe fn new_unchecked(len: usize) -> Self {
++        // INVARIANT: By the safety requirements of this function
++        // `len * size_of::<T>() <= isize::MAX`.
++        Self {
++            len,
++            _phantom: PhantomData,
++        }
++    }
++
++    /// Returns the number of array elements represented by this layout.
++    pub const fn len(&self) -> usize {
++        self.len
++    }
++
++    /// Returns `true` when no array elements are represented by this layout.
++    pub const fn is_empty(&self) -> bool {
++        self.len == 0
++    }
++}
++
++impl<T> From<ArrayLayout<T>> for Layout {
++    fn from(value: ArrayLayout<T>) -> Self {
++        let res = Layout::array::<T>(value.len);
++        // SAFETY: By the type invariant of `ArrayLayout` we have
++        // `len * size_of::<T>() <= isize::MAX` and thus the result must be `Ok`.
++        unsafe { res.unwrap_unchecked() }
++    }
++}
+diff --git a/rust/kernel/alloc/vec_ext.rs b/rust/kernel/alloc/vec_ext.rs
+deleted file mode 100644
+index 1297a4be32e8c4..00000000000000
+--- a/rust/kernel/alloc/vec_ext.rs
++++ /dev/null
+@@ -1,185 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
+-//! Extensions to [`Vec`] for fallible allocations.
+-
+-use super::{AllocError, Flags};
+-use alloc::vec::Vec;
+-
+-/// Extensions to [`Vec`].
+-pub trait VecExt<T>: Sized {
+-    /// Creates a new [`Vec`] instance with at least the given capacity.
+-    ///
+-    /// # Examples
+-    ///
+-    /// ```
+-    /// let v = Vec::<u32>::with_capacity(20, GFP_KERNEL)?;
+-    ///
+-    /// assert!(v.capacity() >= 20);
+-    /// # Ok::<(), Error>(())
+-    /// ```
+-    fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError>;
+-
+-    /// Appends an element to the back of the [`Vec`] instance.
+-    ///
+-    /// # Examples
+-    ///
+-    /// ```
+-    /// let mut v = Vec::new();
+-    /// v.push(1, GFP_KERNEL)?;
+-    /// assert_eq!(&v, &[1]);
+-    ///
+-    /// v.push(2, GFP_KERNEL)?;
+-    /// assert_eq!(&v, &[1, 2]);
+-    /// # Ok::<(), Error>(())
+-    /// ```
+-    fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError>;
+-
+-    /// Pushes clones of the elements of slice into the [`Vec`] instance.
+-    ///
+-    /// # Examples
+-    ///
+-    /// ```
+-    /// let mut v = Vec::new();
+-    /// v.push(1, GFP_KERNEL)?;
+-    ///
+-    /// v.extend_from_slice(&[20, 30, 40], GFP_KERNEL)?;
+-    /// assert_eq!(&v, &[1, 20, 30, 40]);
+-    ///
+-    /// v.extend_from_slice(&[50, 60], GFP_KERNEL)?;
+-    /// assert_eq!(&v, &[1, 20, 30, 40, 50, 60]);
+-    /// # Ok::<(), Error>(())
+-    /// ```
+-    fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError>
+-    where
+-        T: Clone;
+-
+-    /// Ensures that the capacity exceeds the length by at least `additional` elements.
+-    ///
+-    /// # Examples
+-    ///
+-    /// ```
+-    /// let mut v = Vec::new();
+-    /// v.push(1, GFP_KERNEL)?;
+-    ///
+-    /// v.reserve(10, GFP_KERNEL)?;
+-    /// let cap = v.capacity();
+-    /// assert!(cap >= 10);
+-    ///
+-    /// v.reserve(10, GFP_KERNEL)?;
+-    /// let new_cap = v.capacity();
+-    /// assert_eq!(new_cap, cap);
+-    ///
+-    /// # Ok::<(), Error>(())
+-    /// ```
+-    fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError>;
+-}
+-
+-impl<T> VecExt<T> for Vec<T> {
+-    fn with_capacity(capacity: usize, flags: Flags) -> Result<Self, AllocError> {
+-        let mut v = Vec::new();
+-        <Self as VecExt<_>>::reserve(&mut v, capacity, flags)?;
+-        Ok(v)
+-    }
+-
+-    fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError> {
+-        <Self as VecExt<_>>::reserve(self, 1, flags)?;
+-        let s = self.spare_capacity_mut();
+-        s[0].write(v);
+-
+-        // SAFETY: We just initialised the first spare entry, so it is safe to increase the length
+-        // by 1. We also know that the new length is <= capacity because of the previous call to
+-        // `reserve` above.
+-        unsafe { self.set_len(self.len() + 1) };
+-        Ok(())
+-    }
+-
+-    fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError>
+-    where
+-        T: Clone,
+-    {
+-        <Self as VecExt<_>>::reserve(self, other.len(), flags)?;
+-        for (slot, item) in core::iter::zip(self.spare_capacity_mut(), other) {
+-            slot.write(item.clone());
+-        }
+-
+-        // SAFETY: We just initialised the `other.len()` spare entries, so it is safe to increase
+-        // the length by the same amount. We also know that the new length is <= capacity because
+-        // of the previous call to `reserve` above.
+-        unsafe { self.set_len(self.len() + other.len()) };
+-        Ok(())
+-    }
+-
+-    #[cfg(any(test, testlib))]
+-    fn reserve(&mut self, additional: usize, _flags: Flags) -> Result<(), AllocError> {
+-        Vec::reserve(self, additional);
+-        Ok(())
+-    }
+-
+-    #[cfg(not(any(test, testlib)))]
+-    fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError> {
+-        let len = self.len();
+-        let cap = self.capacity();
+-
+-        if cap - len >= additional {
+-            return Ok(());
+-        }
+-
+-        if core::mem::size_of::<T>() == 0 {
+-            // The capacity is already `usize::MAX` for SZTs, we can't go higher.
+-            return Err(AllocError);
+-        }
+-
+-        // We know cap is <= `isize::MAX` because `Layout::array` fails if the resulting byte size
+-        // is greater than `isize::MAX`. So the multiplication by two won't overflow.
+-        let new_cap = core::cmp::max(cap * 2, len.checked_add(additional).ok_or(AllocError)?);
+-        let layout = core::alloc::Layout::array::<T>(new_cap).map_err(|_| AllocError)?;
+-
+-        let (old_ptr, len, cap) = destructure(self);
+-
+-        // We need to make sure that `ptr` is either NULL or comes from a previous call to
+-        // `krealloc_aligned`. A `Vec<T>`'s `ptr` value is not guaranteed to be NULL and might be
+-        // dangling after being created with `Vec::new`. Instead, we can rely on `Vec<T>`'s capacity
+-        // to be zero if no memory has been allocated yet.
+-        let ptr = if cap == 0 {
+-            core::ptr::null_mut()
+-        } else {
+-            old_ptr
+-        };
+-
+-        // SAFETY: `ptr` is valid because it's either NULL or comes from a previous call to
+-        // `krealloc_aligned`. We also verified that the type is not a ZST.
+-        let new_ptr = unsafe { super::allocator::krealloc_aligned(ptr.cast(), layout, flags) };
+-        if new_ptr.is_null() {
+-            // SAFETY: We are just rebuilding the existing `Vec` with no changes.
+-            unsafe { rebuild(self, old_ptr, len, cap) };
+-            Err(AllocError)
+-        } else {
+-            // SAFETY: `ptr` has been reallocated with the layout for `new_cap` elements. New cap
+-            // is greater than `cap`, so it continues to be >= `len`.
+-            unsafe { rebuild(self, new_ptr.cast::<T>(), len, new_cap) };
+-            Ok(())
+-        }
+-    }
+-}
+-
+-#[cfg(not(any(test, testlib)))]
+-fn destructure<T>(v: &mut Vec<T>) -> (*mut T, usize, usize) {
+-    let mut tmp = Vec::new();
+-    core::mem::swap(&mut tmp, v);
+-    let mut tmp = core::mem::ManuallyDrop::new(tmp);
+-    let len = tmp.len();
+-    let cap = tmp.capacity();
+-    (tmp.as_mut_ptr(), len, cap)
+-}
+-
+-/// Rebuilds a `Vec` from a pointer, length, and capacity.
+-///
+-/// # Safety
+-///
+-/// The same as [`Vec::from_raw_parts`].
+-#[cfg(not(any(test, testlib)))]
+-unsafe fn rebuild<T>(v: &mut Vec<T>, ptr: *mut T, len: usize, cap: usize) {
+-    // SAFETY: The safety requirements from this function satisfy those of `from_raw_parts`.
+-    let mut tmp = unsafe { Vec::from_raw_parts(ptr, len, cap) };
+-    core::mem::swap(&mut tmp, v);
+-}
+diff --git a/rust/kernel/block/mq/gen_disk.rs b/rust/kernel/block/mq/gen_disk.rs
+index 708125dce96a93..c6df153ebb8860 100644
+--- a/rust/kernel/block/mq/gen_disk.rs
++++ b/rust/kernel/block/mq/gen_disk.rs
+@@ -174,9 +174,9 @@ pub fn build<T: Operations>(
+ ///
+ /// # Invariants
+ ///
+-///  - `gendisk` must always point to an initialized and valid `struct gendisk`.
+-///  - `gendisk` was added to the VFS through a call to
+-///     `bindings::device_add_disk`.
++/// - `gendisk` must always point to an initialized and valid `struct gendisk`.
++/// - `gendisk` was added to the VFS through a call to
++///   `bindings::device_add_disk`.
+ pub struct GenDisk<T: Operations> {
+     _tagset: Arc<TagSet<T>>,
+     gendisk: *mut bindings::gendisk,
+diff --git a/rust/kernel/block/mq/operations.rs b/rust/kernel/block/mq/operations.rs
+index 9ba7fdfeb4b22c..c8646d0d98669f 100644
+--- a/rust/kernel/block/mq/operations.rs
++++ b/rust/kernel/block/mq/operations.rs
+@@ -131,7 +131,7 @@ impl<T: Operations> OperationsVTable<T> {
+     unsafe extern "C" fn poll_callback(
+         _hctx: *mut bindings::blk_mq_hw_ctx,
+         _iob: *mut bindings::io_comp_batch,
+-    ) -> core::ffi::c_int {
++    ) -> crate::ffi::c_int {
+         T::poll().into()
+     }
+ 
+@@ -145,9 +145,9 @@ impl<T: Operations> OperationsVTable<T> {
+     /// for the same context.
+     unsafe extern "C" fn init_hctx_callback(
+         _hctx: *mut bindings::blk_mq_hw_ctx,
+-        _tagset_data: *mut core::ffi::c_void,
+-        _hctx_idx: core::ffi::c_uint,
+-    ) -> core::ffi::c_int {
++        _tagset_data: *mut crate::ffi::c_void,
++        _hctx_idx: crate::ffi::c_uint,
++    ) -> crate::ffi::c_int {
+         from_result(|| Ok(0))
+     }
+ 
+@@ -159,7 +159,7 @@ impl<T: Operations> OperationsVTable<T> {
+     /// This function may only be called by blk-mq C infrastructure.
+     unsafe extern "C" fn exit_hctx_callback(
+         _hctx: *mut bindings::blk_mq_hw_ctx,
+-        _hctx_idx: core::ffi::c_uint,
++        _hctx_idx: crate::ffi::c_uint,
+     ) {
+     }
+ 
+@@ -176,9 +176,9 @@ impl<T: Operations> OperationsVTable<T> {
+     unsafe extern "C" fn init_request_callback(
+         _set: *mut bindings::blk_mq_tag_set,
+         rq: *mut bindings::request,
+-        _hctx_idx: core::ffi::c_uint,
+-        _numa_node: core::ffi::c_uint,
+-    ) -> core::ffi::c_int {
++        _hctx_idx: crate::ffi::c_uint,
++        _numa_node: crate::ffi::c_uint,
++    ) -> crate::ffi::c_int {
+         from_result(|| {
+             // SAFETY: By the safety requirements of this function, `rq` points
+             // to a valid allocation.
+@@ -203,7 +203,7 @@ impl<T: Operations> OperationsVTable<T> {
+     unsafe extern "C" fn exit_request_callback(
+         _set: *mut bindings::blk_mq_tag_set,
+         rq: *mut bindings::request,
+-        _hctx_idx: core::ffi::c_uint,
++        _hctx_idx: crate::ffi::c_uint,
+     ) {
+         // SAFETY: The tagset invariants guarantee that all requests are allocated with extra memory
+         // for the request data.
+diff --git a/rust/kernel/block/mq/raw_writer.rs b/rust/kernel/block/mq/raw_writer.rs
+index 9222465d670bfe..7e2159e4f6a6f7 100644
+--- a/rust/kernel/block/mq/raw_writer.rs
++++ b/rust/kernel/block/mq/raw_writer.rs
+@@ -25,7 +25,7 @@ fn new(buffer: &'a mut [u8]) -> Result<RawWriter<'a>> {
+     }
+ 
+     pub(crate) fn from_array<const N: usize>(
+-        a: &'a mut [core::ffi::c_char; N],
++        a: &'a mut [crate::ffi::c_char; N],
+     ) -> Result<RawWriter<'a>> {
+         Self::new(
+             // SAFETY: the buffer of `a` is valid for read and write as `u8` for
+diff --git a/rust/kernel/block/mq/tag_set.rs b/rust/kernel/block/mq/tag_set.rs
+index f9a1ca655a35be..d7f175a05d992b 100644
+--- a/rust/kernel/block/mq/tag_set.rs
++++ b/rust/kernel/block/mq/tag_set.rs
+@@ -53,7 +53,7 @@ pub fn new(
+                     queue_depth: num_tags,
+                     cmd_size,
+                     flags: bindings::BLK_MQ_F_SHOULD_MERGE,
+-                    driver_data: core::ptr::null_mut::<core::ffi::c_void>(),
++                    driver_data: core::ptr::null_mut::<crate::ffi::c_void>(),
+                     nr_maps: num_maps,
+                     ..tag_set
+                 }
+diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
+index 6f1587a2524e8b..5fece574ec023b 100644
+--- a/rust/kernel/error.rs
++++ b/rust/kernel/error.rs
+@@ -6,9 +6,10 @@
+ 
+ use crate::{alloc::AllocError, str::CStr};
+ 
+-use alloc::alloc::LayoutError;
++use core::alloc::LayoutError;
+ 
+ use core::fmt;
++use core::num::NonZeroI32;
+ use core::num::TryFromIntError;
+ use core::str::Utf8Error;
+ 
+@@ -20,7 +21,11 @@ macro_rules! declare_err {
+             $(
+             #[doc = $doc]
+             )*
+-            pub const $err: super::Error = super::Error(-(crate::bindings::$err as i32));
++            pub const $err: super::Error =
++                match super::Error::try_from_errno(-(crate::bindings::$err as i32)) {
++                    Some(err) => err,
++                    None => panic!("Invalid errno in `declare_err!`"),
++                };
+         };
+     }
+ 
+@@ -88,14 +93,14 @@ macro_rules! declare_err {
+ ///
+ /// The value is a valid `errno` (i.e. `>= -MAX_ERRNO && < 0`).
+ #[derive(Clone, Copy, PartialEq, Eq)]
+-pub struct Error(core::ffi::c_int);
++pub struct Error(NonZeroI32);
+ 
+ impl Error {
+     /// Creates an [`Error`] from a kernel error code.
+     ///
+     /// It is a bug to pass an out-of-range `errno`. `EINVAL` would
+     /// be returned in such a case.
+-    pub(crate) fn from_errno(errno: core::ffi::c_int) -> Error {
++    pub fn from_errno(errno: crate::ffi::c_int) -> Error {
+         if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
+             // TODO: Make it a `WARN_ONCE` once available.
+             crate::pr_warn!(
+@@ -107,7 +112,20 @@ pub(crate) fn from_errno(errno: core::ffi::c_int) -> Error {
+ 
+         // INVARIANT: The check above ensures the type invariant
+         // will hold.
+-        Error(errno)
++        // SAFETY: `errno` is checked above to be in a valid range.
++        unsafe { Error::from_errno_unchecked(errno) }
++    }
++
++    /// Creates an [`Error`] from a kernel error code.
++    ///
++    /// Returns [`None`] if `errno` is out-of-range.
++    const fn try_from_errno(errno: crate::ffi::c_int) -> Option<Error> {
++        if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
++            return None;
++        }
++
++        // SAFETY: `errno` is checked above to be in a valid range.
++        Some(unsafe { Error::from_errno_unchecked(errno) })
+     }
+ 
+     /// Creates an [`Error`] from a kernel error code.
+@@ -115,38 +133,35 @@ pub(crate) fn from_errno(errno: core::ffi::c_int) -> Error {
+     /// # Safety
+     ///
+     /// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`).
+-    unsafe fn from_errno_unchecked(errno: core::ffi::c_int) -> Error {
++    const unsafe fn from_errno_unchecked(errno: crate::ffi::c_int) -> Error {
+         // INVARIANT: The contract ensures the type invariant
+         // will hold.
+-        Error(errno)
++        // SAFETY: The caller guarantees `errno` is non-zero.
++        Error(unsafe { NonZeroI32::new_unchecked(errno) })
+     }
+ 
+     /// Returns the kernel error code.
+-    pub fn to_errno(self) -> core::ffi::c_int {
+-        self.0
++    pub fn to_errno(self) -> crate::ffi::c_int {
++        self.0.get()
+     }
+ 
+     #[cfg(CONFIG_BLOCK)]
+     pub(crate) fn to_blk_status(self) -> bindings::blk_status_t {
+         // SAFETY: `self.0` is a valid error due to its invariant.
+-        unsafe { bindings::errno_to_blk_status(self.0) }
++        unsafe { bindings::errno_to_blk_status(self.0.get()) }
+     }
+ 
+     /// Returns the error encoded as a pointer.
+-    #[allow(dead_code)]
+-    pub(crate) fn to_ptr<T>(self) -> *mut T {
+-        #[cfg_attr(target_pointer_width = "32", allow(clippy::useless_conversion))]
++    pub fn to_ptr<T>(self) -> *mut T {
+         // SAFETY: `self.0` is a valid error due to its invariant.
+-        unsafe {
+-            bindings::ERR_PTR(self.0.into()) as *mut _
+-        }
++        unsafe { bindings::ERR_PTR(self.0.get() as _) as *mut _ }
+     }
+ 
+     /// Returns a string representing the error, if one exists.
+-    #[cfg(not(testlib))]
++    #[cfg(not(any(test, testlib)))]
+     pub fn name(&self) -> Option<&'static CStr> {
+         // SAFETY: Just an FFI call, there are no extra safety requirements.
+-        let ptr = unsafe { bindings::errname(-self.0) };
++        let ptr = unsafe { bindings::errname(-self.0.get()) };
+         if ptr.is_null() {
+             None
+         } else {
+@@ -160,7 +175,7 @@ pub fn name(&self) -> Option<&'static CStr> {
+     /// When `testlib` is configured, this always returns `None` to avoid the dependency on a
+     /// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still
+     /// run in userspace.
+-    #[cfg(testlib)]
++    #[cfg(any(test, testlib))]
+     pub fn name(&self) -> Option<&'static CStr> {
+         None
+     }
+@@ -171,9 +186,11 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+         match self.name() {
+             // Print out number if no name can be found.
+             None => f.debug_tuple("Error").field(&-self.0).finish(),
+-            // SAFETY: These strings are ASCII-only.
+             Some(name) => f
+-                .debug_tuple(unsafe { core::str::from_utf8_unchecked(name) })
++                .debug_tuple(
++                    // SAFETY: These strings are ASCII-only.
++                    unsafe { core::str::from_utf8_unchecked(name) },
++                )
+                 .finish(),
+         }
+     }
+@@ -239,7 +256,7 @@ fn from(e: core::convert::Infallible) -> Error {
+ 
+ /// Converts an integer as returned by a C kernel function to an error if it's negative, and
+ /// `Ok(())` otherwise.
+-pub fn to_result(err: core::ffi::c_int) -> Result {
++pub fn to_result(err: crate::ffi::c_int) -> Result {
+     if err < 0 {
+         Err(Error::from_errno(err))
+     } else {
+@@ -262,21 +279,21 @@ pub fn to_result(err: core::ffi::c_int) -> Result {
+ /// fn devm_platform_ioremap_resource(
+ ///     pdev: &mut PlatformDevice,
+ ///     index: u32,
+-/// ) -> Result<*mut core::ffi::c_void> {
++/// ) -> Result<*mut kernel::ffi::c_void> {
+ ///     // SAFETY: `pdev` points to a valid platform device. There are no safety requirements
+ ///     // on `index`.
+ ///     from_err_ptr(unsafe { bindings::devm_platform_ioremap_resource(pdev.to_ptr(), index) })
+ /// }
+ /// ```
+-// TODO: Remove `dead_code` marker once an in-kernel client is available.
+-#[allow(dead_code)]
+-pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+-    // CAST: Casting a pointer to `*const core::ffi::c_void` is always valid.
+-    let const_ptr: *const core::ffi::c_void = ptr.cast();
++pub fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
++    // CAST: Casting a pointer to `*const crate::ffi::c_void` is always valid.
++    let const_ptr: *const crate::ffi::c_void = ptr.cast();
+     // SAFETY: The FFI function does not deref the pointer.
+     if unsafe { bindings::IS_ERR(const_ptr) } {
+         // SAFETY: The FFI function does not deref the pointer.
+         let err = unsafe { bindings::PTR_ERR(const_ptr) };
++
++        #[allow(clippy::unnecessary_cast)]
+         // CAST: If `IS_ERR()` returns `true`,
+         // then `PTR_ERR()` is guaranteed to return a
+         // negative value greater-or-equal to `-bindings::MAX_ERRNO`,
+@@ -286,8 +303,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+         //
+         // SAFETY: `IS_ERR()` ensures `err` is a
+         // negative value greater-or-equal to `-bindings::MAX_ERRNO`.
+-        #[allow(clippy::unnecessary_cast)]
+-        return Err(unsafe { Error::from_errno_unchecked(err as core::ffi::c_int) });
++        return Err(unsafe { Error::from_errno_unchecked(err as crate::ffi::c_int) });
+     }
+     Ok(ptr)
+ }
+@@ -307,7 +323,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+ /// # use kernel::bindings;
+ /// unsafe extern "C" fn probe_callback(
+ ///     pdev: *mut bindings::platform_device,
+-/// ) -> core::ffi::c_int {
++/// ) -> kernel::ffi::c_int {
+ ///     from_result(|| {
+ ///         let ptr = devm_alloc(pdev)?;
+ ///         bindings::platform_set_drvdata(pdev, ptr);
+@@ -315,9 +331,7 @@ pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+ ///     })
+ /// }
+ /// ```
+-// TODO: Remove `dead_code` marker once an in-kernel client is available.
+-#[allow(dead_code)]
+-pub(crate) fn from_result<T, F>(f: F) -> T
++pub fn from_result<T, F>(f: F) -> T
+ where
+     T: From<i16>,
+     F: FnOnce() -> Result<T>,
+diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
+index 13a374a5cdb743..c5162fdc95ff05 100644
+--- a/rust/kernel/firmware.rs
++++ b/rust/kernel/firmware.rs
+@@ -12,7 +12,7 @@
+ /// One of the following: `bindings::request_firmware`, `bindings::firmware_request_nowarn`,
+ /// `bindings::firmware_request_platform`, `bindings::request_firmware_direct`.
+ struct FwFunc(
+-    unsafe extern "C" fn(*mut *const bindings::firmware, *const i8, *mut bindings::device) -> i32,
++    unsafe extern "C" fn(*mut *const bindings::firmware, *const u8, *mut bindings::device) -> i32,
+ );
+ 
+ impl FwFunc {
+diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
+index 789f80f71ca7e1..c962029f96e1f1 100644
+--- a/rust/kernel/init.rs
++++ b/rust/kernel/init.rs
+@@ -13,7 +13,7 @@
+ //! To initialize a `struct` with an in-place constructor you will need two things:
+ //! - an in-place constructor,
+ //! - a memory location that can hold your `struct` (this can be the [stack], an [`Arc<T>`],
+-//!   [`UniqueArc<T>`], [`Box<T>`] or any other smart pointer that implements [`InPlaceInit`]).
++//!   [`UniqueArc<T>`], [`KBox<T>`] or any other smart pointer that implements [`InPlaceInit`]).
+ //!
+ //! To get an in-place constructor there are generally three options:
+ //! - directly creating an in-place constructor using the [`pin_init!`] macro,
+@@ -35,7 +35,7 @@
+ //! that you need to write `<-` instead of `:` for fields that you want to initialize in-place.
+ //!
+ //! ```rust
+-//! # #![allow(clippy::disallowed_names)]
++//! # #![expect(clippy::disallowed_names)]
+ //! use kernel::sync::{new_mutex, Mutex};
+ //! # use core::pin::Pin;
+ //! #[pin_data]
+@@ -55,7 +55,7 @@
+ //! (or just the stack) to actually initialize a `Foo`:
+ //!
+ //! ```rust
+-//! # #![allow(clippy::disallowed_names)]
++//! # #![expect(clippy::disallowed_names)]
+ //! # use kernel::sync::{new_mutex, Mutex};
+ //! # use core::pin::Pin;
+ //! # #[pin_data]
+@@ -68,7 +68,7 @@
+ //! #     a <- new_mutex!(42, "Foo::a"),
+ //! #     b: 24,
+ //! # });
+-//! let foo: Result<Pin<Box<Foo>>> = Box::pin_init(foo, GFP_KERNEL);
++//! let foo: Result<Pin<KBox<Foo>>> = KBox::pin_init(foo, GFP_KERNEL);
+ //! ```
+ //!
+ //! For more information see the [`pin_init!`] macro.
+@@ -87,20 +87,19 @@
+ //! To declare an init macro/function you just return an [`impl PinInit<T, E>`]:
+ //!
+ //! ```rust
+-//! # #![allow(clippy::disallowed_names)]
+ //! # use kernel::{sync::Mutex, new_mutex, init::PinInit, try_pin_init};
+ //! #[pin_data]
+ //! struct DriverData {
+ //!     #[pin]
+ //!     status: Mutex<i32>,
+-//!     buffer: Box<[u8; 1_000_000]>,
++//!     buffer: KBox<[u8; 1_000_000]>,
+ //! }
+ //!
+ //! impl DriverData {
+ //!     fn new() -> impl PinInit<Self, Error> {
+ //!         try_pin_init!(Self {
+ //!             status <- new_mutex!(0, "DriverData::status"),
+-//!             buffer: Box::init(kernel::init::zeroed(), GFP_KERNEL)?,
++//!             buffer: KBox::init(kernel::init::zeroed(), GFP_KERNEL)?,
+ //!         })
+ //!     }
+ //! }
+@@ -121,11 +120,12 @@
+ //!   `slot` gets called.
+ //!
+ //! ```rust
+-//! # #![allow(unreachable_pub, clippy::disallowed_names)]
++//! # #![expect(unreachable_pub, clippy::disallowed_names)]
+ //! use kernel::{init, types::Opaque};
+ //! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};
+ //! # mod bindings {
+-//! #     #![allow(non_camel_case_types)]
++//! #     #![expect(non_camel_case_types)]
++//! #     #![expect(clippy::missing_safety_doc)]
+ //! #     pub struct foo;
+ //! #     pub unsafe fn init_foo(_ptr: *mut foo) {}
+ //! #     pub unsafe fn destroy_foo(_ptr: *mut foo) {}
+@@ -133,7 +133,7 @@
+ //! # }
+ //! # // `Error::from_errno` is `pub(crate)` in the `kernel` crate, thus provide a workaround.
+ //! # trait FromErrno {
+-//! #     fn from_errno(errno: core::ffi::c_int) -> Error {
++//! #     fn from_errno(errno: kernel::ffi::c_int) -> Error {
+ //! #         // Dummy error that can be constructed outside the `kernel` crate.
+ //! #         Error::from(core::fmt::Error)
+ //! #     }
+@@ -211,13 +211,12 @@
+ //! [`pin_init!`]: crate::pin_init!
+ 
+ use crate::{
+-    alloc::{box_ext::BoxExt, AllocError, Flags},
++    alloc::{AllocError, Flags, KBox},
+     error::{self, Error},
+     sync::Arc,
+     sync::UniqueArc,
+     types::{Opaque, ScopeGuard},
+ };
+-use alloc::boxed::Box;
+ use core::{
+     cell::UnsafeCell,
+     convert::Infallible,
+@@ -238,7 +237,7 @@
+ /// # Examples
+ ///
+ /// ```rust
+-/// # #![allow(clippy::disallowed_names)]
++/// # #![expect(clippy::disallowed_names)]
+ /// # use kernel::{init, macros::pin_data, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};
+ /// # use core::pin::Pin;
+ /// #[pin_data]
+@@ -290,7 +289,7 @@ macro_rules! stack_pin_init {
+ /// # Examples
+ ///
+ /// ```rust,ignore
+-/// # #![allow(clippy::disallowed_names)]
++/// # #![expect(clippy::disallowed_names)]
+ /// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
+ /// # use macros::pin_data;
+ /// # use core::{alloc::AllocError, pin::Pin};
+@@ -298,7 +297,7 @@ macro_rules! stack_pin_init {
+ /// struct Foo {
+ ///     #[pin]
+ ///     a: Mutex<usize>,
+-///     b: Box<Bar>,
++///     b: KBox<Bar>,
+ /// }
+ ///
+ /// struct Bar {
+@@ -307,7 +306,7 @@ macro_rules! stack_pin_init {
+ ///
+ /// stack_try_pin_init!(let foo: Result<Pin<&mut Foo>, AllocError> = pin_init!(Foo {
+ ///     a <- new_mutex!(42),
+-///     b: Box::new(Bar {
++///     b: KBox::new(Bar {
+ ///         x: 64,
+ ///     }, GFP_KERNEL)?,
+ /// }));
+@@ -316,7 +315,7 @@ macro_rules! stack_pin_init {
+ /// ```
+ ///
+ /// ```rust,ignore
+-/// # #![allow(clippy::disallowed_names)]
++/// # #![expect(clippy::disallowed_names)]
+ /// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
+ /// # use macros::pin_data;
+ /// # use core::{alloc::AllocError, pin::Pin};
+@@ -324,7 +323,7 @@ macro_rules! stack_pin_init {
+ /// struct Foo {
+ ///     #[pin]
+ ///     a: Mutex<usize>,
+-///     b: Box<Bar>,
++///     b: KBox<Bar>,
+ /// }
+ ///
+ /// struct Bar {
+@@ -333,7 +332,7 @@ macro_rules! stack_pin_init {
+ ///
+ /// stack_try_pin_init!(let foo: Pin<&mut Foo> =? pin_init!(Foo {
+ ///     a <- new_mutex!(42),
+-///     b: Box::new(Bar {
++///     b: KBox::new(Bar {
+ ///         x: 64,
+ ///     }, GFP_KERNEL)?,
+ /// }));
+@@ -368,7 +367,6 @@ macro_rules! stack_try_pin_init {
+ /// The syntax is almost identical to that of a normal `struct` initializer:
+ ///
+ /// ```rust
+-/// # #![allow(clippy::disallowed_names)]
+ /// # use kernel::{init, pin_init, macros::pin_data, init::*};
+ /// # use core::pin::Pin;
+ /// #[pin_data]
+@@ -392,7 +390,7 @@ macro_rules! stack_try_pin_init {
+ ///     },
+ /// });
+ /// # initializer }
+-/// # Box::pin_init(demo(), GFP_KERNEL).unwrap();
++/// # KBox::pin_init(demo(), GFP_KERNEL).unwrap();
+ /// ```
+ ///
+ /// Arbitrary Rust expressions can be used to set the value of a variable.
+@@ -413,7 +411,6 @@ macro_rules! stack_try_pin_init {
+ /// To create an initializer function, simply declare it like this:
+ ///
+ /// ```rust
+-/// # #![allow(clippy::disallowed_names)]
+ /// # use kernel::{init, pin_init, init::*};
+ /// # use core::pin::Pin;
+ /// # #[pin_data]
+@@ -440,7 +437,7 @@ macro_rules! stack_try_pin_init {
+ /// Users of `Foo` can now create it like this:
+ ///
+ /// ```rust
+-/// # #![allow(clippy::disallowed_names)]
++/// # #![expect(clippy::disallowed_names)]
+ /// # use kernel::{init, pin_init, macros::pin_data, init::*};
+ /// # use core::pin::Pin;
+ /// # #[pin_data]
+@@ -462,13 +459,12 @@ macro_rules! stack_try_pin_init {
+ /// #         })
+ /// #     }
+ /// # }
+-/// let foo = Box::pin_init(Foo::new(), GFP_KERNEL);
++/// let foo = KBox::pin_init(Foo::new(), GFP_KERNEL);
+ /// ```
+ ///
+ /// They can also easily embed it into their own `struct`s:
+ ///
+ /// ```rust
+-/// # #![allow(clippy::disallowed_names)]
+ /// # use kernel::{init, pin_init, macros::pin_data, init::*};
+ /// # use core::pin::Pin;
+ /// # #[pin_data]
+@@ -541,6 +537,7 @@ macro_rules! stack_try_pin_init {
+ /// }
+ /// pin_init!(&this in Buf {
+ ///     buf: [0; 64],
++///     // SAFETY: TODO.
+ ///     ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() },
+ ///     pin: PhantomPinned,
+ /// });
+@@ -590,11 +587,10 @@ macro_rules! pin_init {
+ /// # Examples
+ ///
+ /// ```rust
+-/// # #![feature(new_uninit)]
+ /// use kernel::{init::{self, PinInit}, error::Error};
+ /// #[pin_data]
+ /// struct BigBuf {
+-///     big: Box<[u8; 1024 * 1024 * 1024]>,
++///     big: KBox<[u8; 1024 * 1024 * 1024]>,
+ ///     small: [u8; 1024 * 1024],
+ ///     ptr: *mut u8,
+ /// }
+@@ -602,7 +598,7 @@ macro_rules! pin_init {
+ /// impl BigBuf {
+ ///     fn new() -> impl PinInit<Self, Error> {
+ ///         try_pin_init!(Self {
+-///             big: Box::init(init::zeroed(), GFP_KERNEL)?,
++///             big: KBox::init(init::zeroed(), GFP_KERNEL)?,
+ ///             small: [0; 1024 * 1024],
+ ///             ptr: core::ptr::null_mut(),
+ ///         }? Error)
+@@ -694,16 +690,16 @@ macro_rules! init {
+ /// # Examples
+ ///
+ /// ```rust
+-/// use kernel::{init::{PinInit, zeroed}, error::Error};
++/// use kernel::{alloc::KBox, init::{PinInit, zeroed}, error::Error};
+ /// struct BigBuf {
+-///     big: Box<[u8; 1024 * 1024 * 1024]>,
++///     big: KBox<[u8; 1024 * 1024 * 1024]>,
+ ///     small: [u8; 1024 * 1024],
+ /// }
+ ///
+ /// impl BigBuf {
+ ///     fn new() -> impl Init<Self, Error> {
+ ///         try_init!(Self {
+-///             big: Box::init(zeroed(), GFP_KERNEL)?,
++///             big: KBox::init(zeroed(), GFP_KERNEL)?,
+ ///             small: [0; 1024 * 1024],
+ ///         }? Error)
+ ///     }
+@@ -814,8 +810,8 @@ macro_rules! assert_pinned {
+ /// A pin-initializer for the type `T`.
+ ///
+ /// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
+-/// be [`Box<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use the
+-/// [`InPlaceInit::pin_init`] function of a smart pointer like [`Arc<T>`] on this.
++/// be [`KBox<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use
++/// the [`InPlaceInit::pin_init`] function of a smart pointer like [`Arc<T>`] on this.
+ ///
+ /// Also see the [module description](self).
+ ///
+@@ -854,7 +850,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
+     /// # Examples
+     ///
+     /// ```rust
+-    /// # #![allow(clippy::disallowed_names)]
++    /// # #![expect(clippy::disallowed_names)]
+     /// use kernel::{types::Opaque, init::pin_init_from_closure};
+     /// #[repr(C)]
+     /// struct RawFoo([u8; 16]);
+@@ -875,6 +871,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
+     /// }
+     ///
+     /// let foo = pin_init!(Foo {
++    ///     // SAFETY: TODO.
+     ///     raw <- unsafe {
+     ///         Opaque::ffi_init(|s| {
+     ///             init_foo(s);
+@@ -894,7 +891,7 @@ fn pin_chain<F>(self, f: F) -> ChainPinInit<Self, F, T, E>
+ }
+ 
+ /// An initializer returned by [`PinInit::pin_chain`].
+-pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
++pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, KBox<T>)>);
+ 
+ // SAFETY: The `__pinned_init` function is implemented such that it
+ // - returns `Ok(())` on successful initialization,
+@@ -920,8 +917,8 @@ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ /// An initializer for `T`.
+ ///
+ /// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
+-/// be [`Box<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use the
+-/// [`InPlaceInit::init`] function of a smart pointer like [`Arc<T>`] on this. Because
++/// be [`KBox<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use
++/// the [`InPlaceInit::init`] function of a smart pointer like [`Arc<T>`] on this. Because
+ /// [`PinInit<T, E>`] is a super trait, you can use every function that takes it as well.
+ ///
+ /// Also see the [module description](self).
+@@ -965,7 +962,7 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
+     /// # Examples
+     ///
+     /// ```rust
+-    /// # #![allow(clippy::disallowed_names)]
++    /// # #![expect(clippy::disallowed_names)]
+     /// use kernel::{types::Opaque, init::{self, init_from_closure}};
+     /// struct Foo {
+     ///     buf: [u8; 1_000_000],
+@@ -993,7 +990,7 @@ fn chain<F>(self, f: F) -> ChainInit<Self, F, T, E>
+ }
+ 
+ /// An initializer returned by [`Init::chain`].
+-pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
++pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, KBox<T>)>);
+ 
+ // SAFETY: The `__init` function is implemented such that it
+ // - returns `Ok(())` on successful initialization,
+@@ -1077,8 +1074,9 @@ pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {
+ /// # Examples
+ ///
+ /// ```rust
+-/// use kernel::{error::Error, init::init_array_from_fn};
+-/// let array: Box<[usize; 1_000]> = Box::init::<Error>(init_array_from_fn(|i| i), GFP_KERNEL).unwrap();
++/// use kernel::{alloc::KBox, error::Error, init::init_array_from_fn};
++/// let array: KBox<[usize; 1_000]> =
++///     KBox::init::<Error>(init_array_from_fn(|i| i), GFP_KERNEL).unwrap();
+ /// assert_eq!(array.len(), 1_000);
+ /// ```
+ pub fn init_array_from_fn<I, const N: usize, T, E>(
+@@ -1162,6 +1160,7 @@ pub fn pin_init_array_from_fn<I, const N: usize, T, E>(
+ // SAFETY: Every type can be initialized by-value.
+ unsafe impl<T, E> Init<T, E> for T {
+     unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
++        // SAFETY: TODO.
+         unsafe { slot.write(self) };
+         Ok(())
+     }
+@@ -1170,6 +1169,7 @@ unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: Every type can be initialized by-value. `__pinned_init` calls `__init`.
+ unsafe impl<T, E> PinInit<T, E> for T {
+     unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
++        // SAFETY: TODO.
+         unsafe { self.__init(slot) }
+     }
+ }
+@@ -1243,26 +1243,6 @@ fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+     }
+ }
+ 
+-impl<T> InPlaceInit<T> for Box<T> {
+-    type PinnedSelf = Pin<Self>;
+-
+-    #[inline]
+-    fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
+-    where
+-        E: From<AllocError>,
+-    {
+-        <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_pin_init(init)
+-    }
+-
+-    #[inline]
+-    fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+-    where
+-        E: From<AllocError>,
+-    {
+-        <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_init(init)
+-    }
+-}
+-
+ impl<T> InPlaceInit<T> for UniqueArc<T> {
+     type PinnedSelf = Pin<Self>;
+ 
+@@ -1299,28 +1279,6 @@ pub trait InPlaceWrite<T> {
+     fn write_pin_init<E>(self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E>;
+ }
+ 
+-impl<T> InPlaceWrite<T> for Box<MaybeUninit<T>> {
+-    type Initialized = Box<T>;
+-
+-    fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+-        let slot = self.as_mut_ptr();
+-        // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+-        // slot is valid.
+-        unsafe { init.__init(slot)? };
+-        // SAFETY: All fields have been initialized.
+-        Ok(unsafe { self.assume_init() })
+-    }
+-
+-    fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+-        let slot = self.as_mut_ptr();
+-        // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+-        // slot is valid and will not be moved, because we pin it later.
+-        unsafe { init.__pinned_init(slot)? };
+-        // SAFETY: All fields have been initialized.
+-        Ok(unsafe { self.assume_init() }.into())
+-    }
+-}
+-
+ impl<T> InPlaceWrite<T> for UniqueArc<MaybeUninit<T>> {
+     type Initialized = UniqueArc<T>;
+ 
+@@ -1411,6 +1369,7 @@ pub fn zeroed<T: Zeroable>() -> impl Init<T> {
+ 
+ macro_rules! impl_zeroable {
+     ($($({$($generics:tt)*})? $t:ty, )*) => {
++        // SAFETY: Safety comments written in the macro invocation.
+         $(unsafe impl$($($generics)*)? Zeroable for $t {})*
+     };
+ }
+@@ -1451,7 +1410,7 @@ macro_rules! impl_zeroable {
+     //
+     // In this case we are allowed to use `T: ?Sized`, since all zeros is the `None` variant.
+     {<T: ?Sized>} Option<NonNull<T>>,
+-    {<T: ?Sized>} Option<Box<T>>,
++    {<T: ?Sized>} Option<KBox<T>>,
+ 
+     // SAFETY: `null` pointer is valid.
+     //
+diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs
+index 13cefd37512f8d..74329cc3262c05 100644
+--- a/rust/kernel/init/__internal.rs
++++ b/rust/kernel/init/__internal.rs
+@@ -15,9 +15,10 @@
+ /// [this table]: https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns
+ pub(super) type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
+ 
+-/// This is the module-internal type implementing `PinInit` and `Init`. It is unsafe to create this
+-/// type, since the closure needs to fulfill the same safety requirement as the
+-/// `__pinned_init`/`__init` functions.
++/// Module-internal type implementing `PinInit` and `Init`.
++///
++/// It is unsafe to create this type, since the closure needs to fulfill the same safety
++/// requirement as the `__pinned_init`/`__init` functions.
+ pub(crate) struct InitClosure<F, T: ?Sized, E>(pub(crate) F, pub(crate) Invariant<(E, T)>);
+ 
+ // SAFETY: While constructing the `InitClosure`, the user promised that it upholds the
+@@ -53,6 +54,7 @@ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ pub unsafe trait HasPinData {
+     type PinData: PinData;
+ 
++    #[expect(clippy::missing_safety_doc)]
+     unsafe fn __pin_data() -> Self::PinData;
+ }
+ 
+@@ -82,6 +84,7 @@ fn make_closure<F, O, E>(self, f: F) -> F
+ pub unsafe trait HasInitData {
+     type InitData: InitData;
+ 
++    #[expect(clippy::missing_safety_doc)]
+     unsafe fn __init_data() -> Self::InitData;
+ }
+ 
+@@ -102,7 +105,7 @@ fn make_closure<F, O, E>(self, f: F) -> F
+     }
+ }
+ 
+-pub struct AllData<T: ?Sized>(PhantomData<fn(Box<T>) -> Box<T>>);
++pub struct AllData<T: ?Sized>(PhantomData<fn(KBox<T>) -> KBox<T>>);
+ 
+ impl<T: ?Sized> Clone for AllData<T> {
+     fn clone(&self) -> Self {
+@@ -112,10 +115,12 @@ fn clone(&self) -> Self {
+ 
+ impl<T: ?Sized> Copy for AllData<T> {}
+ 
++// SAFETY: TODO.
+ unsafe impl<T: ?Sized> InitData for AllData<T> {
+     type Datee = T;
+ }
+ 
++// SAFETY: TODO.
+ unsafe impl<T: ?Sized> HasInitData for T {
+     type InitData = AllData<T>;
+ 
+diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
+index 9a0c4650ef676d..1fd146a8324165 100644
+--- a/rust/kernel/init/macros.rs
++++ b/rust/kernel/init/macros.rs
+@@ -182,13 +182,13 @@
+ //!     // Normally `Drop` bounds do not have the correct semantics, but for this purpose they do
+ //!     // (normally people want to know if a type has any kind of drop glue at all, here we want
+ //!     // to know if it has any kind of custom drop glue, which is exactly what this bound does).
+-//!     #[allow(drop_bounds)]
++//!     #[expect(drop_bounds)]
+ //!     impl<T: ::core::ops::Drop> MustNotImplDrop for T {}
+ //!     impl<T> MustNotImplDrop for Bar<T> {}
+ //!     // Here comes a convenience check, if one implemented `PinnedDrop`, but forgot to add it to
+ //!     // `#[pin_data]`, then this will error with the same mechanic as above, this is not needed
+ //!     // for safety, but a good sanity check, since no normal code calls `PinnedDrop::drop`.
+-//!     #[allow(non_camel_case_types)]
++//!     #[expect(non_camel_case_types)]
+ //!     trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
+ //!     impl<
+ //!         T: ::kernel::init::PinnedDrop,
+@@ -513,6 +513,7 @@ fn drop($($sig:tt)*) {
+             }
+         ),
+     ) => {
++        // SAFETY: TODO.
+         unsafe $($impl_sig)* {
+             // Inherit all attributes and the type/ident tokens for the signature.
+             $(#[$($attr)*])*
+@@ -872,6 +873,7 @@ unsafe fn __pin_data() -> Self::PinData {
+                 }
+             }
+ 
++            // SAFETY: TODO.
+             unsafe impl<$($impl_generics)*>
+                 $crate::init::__internal::PinData for __ThePinData<$($ty_generics)*>
+             where $($whr)*
+@@ -923,14 +925,14 @@ impl<'__pin, $($impl_generics)*> ::core::marker::Unpin for $name<$($ty_generics)
+         // `Drop`. Additionally we will implement this trait for the struct leading to a conflict,
+         // if it also implements `Drop`
+         trait MustNotImplDrop {}
+-        #[allow(drop_bounds)]
++        #[expect(drop_bounds)]
+         impl<T: ::core::ops::Drop> MustNotImplDrop for T {}
+         impl<$($impl_generics)*> MustNotImplDrop for $name<$($ty_generics)*>
+         where $($whr)* {}
+         // We also take care to prevent users from writing a useless `PinnedDrop` implementation.
+         // They might implement `PinnedDrop` correctly for the struct, but forget to give
+         // `PinnedDrop` as the parameter to `#[pin_data]`.
+-        #[allow(non_camel_case_types)]
++        #[expect(non_camel_case_types)]
+         trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
+         impl<T: $crate::init::PinnedDrop>
+             UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
+@@ -987,6 +989,7 @@ fn drop(&mut self) {
+         //
+         // The functions are `unsafe` to prevent accidentally calling them.
+         #[allow(dead_code)]
++        #[expect(clippy::missing_safety_doc)]
+         impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+         where $($whr)*
+         {
+@@ -997,6 +1000,7 @@ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+                     slot: *mut $p_type,
+                     init: impl $crate::init::PinInit<$p_type, E>,
+                 ) -> ::core::result::Result<(), E> {
++                    // SAFETY: TODO.
+                     unsafe { $crate::init::PinInit::__pinned_init(init, slot) }
+                 }
+             )*
+@@ -1007,6 +1011,7 @@ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+                     slot: *mut $type,
+                     init: impl $crate::init::Init<$type, E>,
+                 ) -> ::core::result::Result<(), E> {
++                    // SAFETY: TODO.
+                     unsafe { $crate::init::Init::__init(init, slot) }
+                 }
+             )*
+@@ -1121,6 +1126,8 @@ macro_rules! __init_internal {
+         // no possibility of returning without `unsafe`.
+         struct __InitOk;
+         // Get the data about fields from the supplied type.
++        //
++        // SAFETY: TODO.
+         let data = unsafe {
+             use $crate::init::__internal::$has_data;
+             // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+@@ -1176,6 +1183,7 @@ fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
+         let init = move |slot| -> ::core::result::Result<(), $err> {
+             init(slot).map(|__InitOk| ())
+         };
++        // SAFETY: TODO.
+         let init = unsafe { $crate::init::$construct_closure::<_, $err>(init) };
+         init
+     }};
+@@ -1324,6 +1332,8 @@ fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
+         // Endpoint, nothing more to munch, create the initializer.
+         // Since we are in the closure that is never called, this will never get executed.
+         // We abuse `slot` to get the correct type inference here:
++        //
++        // SAFETY: TODO.
+         unsafe {
+             // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+             // information that is associated to already parsed fragments, so a path fragment
+diff --git a/rust/kernel/ioctl.rs b/rust/kernel/ioctl.rs
+index cfa7d080b53193..2fc7662339e54b 100644
+--- a/rust/kernel/ioctl.rs
++++ b/rust/kernel/ioctl.rs
+@@ -4,7 +4,7 @@
+ //!
+ //! C header: [`include/asm-generic/ioctl.h`](srctree/include/asm-generic/ioctl.h)
+ 
+-#![allow(non_snake_case)]
++#![expect(non_snake_case)]
+ 
+ use crate::build_assert;
+ 
+diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
+index e936254531fd0a..d764cb7ff5d785 100644
+--- a/rust/kernel/lib.rs
++++ b/rust/kernel/lib.rs
+@@ -15,7 +15,8 @@
+ #![feature(arbitrary_self_types)]
+ #![feature(coerce_unsized)]
+ #![feature(dispatch_from_dyn)]
+-#![feature(new_uninit)]
++#![feature(inline_const)]
++#![feature(lint_reasons)]
+ #![feature(unsize)]
+ 
+ // Ensure conditional compilation based on the kernel configuration works;
+@@ -26,6 +27,8 @@
+ // Allow proc-macros to refer to `::kernel` inside the `kernel` crate (this crate).
+ extern crate self as kernel;
+ 
++pub use ffi;
++
+ pub mod alloc;
+ #[cfg(CONFIG_BLOCK)]
+ pub mod block;
+diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
+index 5b4aec29eb6753..fb93330f4af48c 100644
+--- a/rust/kernel/list.rs
++++ b/rust/kernel/list.rs
+@@ -354,6 +354,7 @@ pub fn pop_front(&mut self) -> Option<ListArc<T, ID>> {
+     ///
+     /// `item` must not be in a different linked list (with the same id).
+     pub unsafe fn remove(&mut self, item: &T) -> Option<ListArc<T, ID>> {
++        // SAFETY: TODO.
+         let mut item = unsafe { ListLinks::fields(T::view_links(item)) };
+         // SAFETY: The user provided a reference, and reference are never dangling.
+         //
+diff --git a/rust/kernel/list/arc_field.rs b/rust/kernel/list/arc_field.rs
+index 2330f673427ab0..c4b9dd50398264 100644
+--- a/rust/kernel/list/arc_field.rs
++++ b/rust/kernel/list/arc_field.rs
+@@ -56,7 +56,7 @@ pub unsafe fn assert_ref(&self) -> &T {
+     ///
+     /// The caller must have mutable access to the `ListArc<ID>` containing the struct with this
+     /// field for the duration of the returned reference.
+-    #[allow(clippy::mut_from_ref)]
++    #[expect(clippy::mut_from_ref)]
+     pub unsafe fn assert_mut(&self) -> &mut T {
+         // SAFETY: The caller has exclusive access to the `ListArc`, so they also have exclusive
+         // access to this field.
+diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
+index 910ce867480a8a..beb62ec712c37a 100644
+--- a/rust/kernel/net/phy.rs
++++ b/rust/kernel/net/phy.rs
+@@ -314,7 +314,7 @@ impl<T: Driver> Adapter<T> {
+     /// `phydev` must be passed by the corresponding callback in `phy_driver`.
+     unsafe extern "C" fn soft_reset_callback(
+         phydev: *mut bindings::phy_device,
+-    ) -> core::ffi::c_int {
++    ) -> crate::ffi::c_int {
+         from_result(|| {
+             // SAFETY: This callback is called only in contexts
+             // where we hold `phy_device->lock`, so the accessors on
+@@ -328,7 +328,7 @@ impl<T: Driver> Adapter<T> {
+     /// # Safety
+     ///
+     /// `phydev` must be passed by the corresponding callback in `phy_driver`.
+-    unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int {
++    unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
+         from_result(|| {
+             // SAFETY: This callback is called only in contexts
+             // where we can exclusively access `phy_device` because
+@@ -345,7 +345,7 @@ impl<T: Driver> Adapter<T> {
+     /// `phydev` must be passed by the corresponding callback in `phy_driver`.
+     unsafe extern "C" fn get_features_callback(
+         phydev: *mut bindings::phy_device,
+-    ) -> core::ffi::c_int {
++    ) -> crate::ffi::c_int {
+         from_result(|| {
+             // SAFETY: This callback is called only in contexts
+             // where we hold `phy_device->lock`, so the accessors on
+@@ -359,7 +359,7 @@ impl<T: Driver> Adapter<T> {
+     /// # Safety
+     ///
+     /// `phydev` must be passed by the corresponding callback in `phy_driver`.
+-    unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int {
++    unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
+         from_result(|| {
+             // SAFETY: The C core code ensures that the accessors on
+             // `Device` are okay to call even though `phy_device->lock`
+@@ -373,7 +373,7 @@ impl<T: Driver> Adapter<T> {
+     /// # Safety
+     ///
+     /// `phydev` must be passed by the corresponding callback in `phy_driver`.
+-    unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int {
++    unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int {
+         from_result(|| {
+             // SAFETY: The C core code ensures that the accessors on
+             // `Device` are okay to call even though `phy_device->lock`
+@@ -389,7 +389,7 @@ impl<T: Driver> Adapter<T> {
+     /// `phydev` must be passed by the corresponding callback in `phy_driver`.
+     unsafe extern "C" fn config_aneg_callback(
+         phydev: *mut bindings::phy_device,
+-    ) -> core::ffi::c_int {
++    ) -> crate::ffi::c_int {
+         from_result(|| {
+             // SAFETY: This callback is called only in contexts
+             // where we hold `phy_device->lock`, so the accessors on
+@@ -405,7 +405,7 @@ impl<T: Driver> Adapter<T> {
+     /// `phydev` must be passed by the corresponding callback in `phy_driver`.
+     unsafe extern "C" fn read_status_callback(
+         phydev: *mut bindings::phy_device,
+-    ) -> core::ffi::c_int {
++    ) -> crate::ffi::c_int {
+         from_result(|| {
+             // SAFETY: This callback is called only in contexts
+             // where we hold `phy_device->lock`, so the accessors on
+@@ -421,7 +421,7 @@ impl<T: Driver> Adapter<T> {
+     /// `phydev` must be passed by the corresponding callback in `phy_driver`.
+     unsafe extern "C" fn match_phy_device_callback(
+         phydev: *mut bindings::phy_device,
+-    ) -> core::ffi::c_int {
++    ) -> crate::ffi::c_int {
+         // SAFETY: This callback is called only in contexts
+         // where we hold `phy_device->lock`, so the accessors on
+         // `Device` are okay to call.
+diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
+index 4571daec0961bb..8bdab9aa0d16bf 100644
+--- a/rust/kernel/prelude.rs
++++ b/rust/kernel/prelude.rs
+@@ -14,10 +14,7 @@
+ #[doc(no_inline)]
+ pub use core::pin::Pin;
+ 
+-pub use crate::alloc::{box_ext::BoxExt, flags::*, vec_ext::VecExt};
+-
+-#[doc(no_inline)]
+-pub use alloc::{boxed::Box, vec::Vec};
++pub use crate::alloc::{flags::*, Box, KBox, KVBox, KVVec, KVec, VBox, VVec, Vec};
+ 
+ #[doc(no_inline)]
+ pub use macros::{module, pin_data, pinned_drop, vtable, Zeroable};
+diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
+index 508b0221256c97..a28077a7cb3011 100644
+--- a/rust/kernel/print.rs
++++ b/rust/kernel/print.rs
+@@ -14,6 +14,7 @@
+ use crate::str::RawFormatter;
+ 
+ // Called from `vsprintf` with format specifier `%pA`.
++#[expect(clippy::missing_safety_doc)]
+ #[no_mangle]
+ unsafe extern "C" fn rust_fmt_argument(
+     buf: *mut c_char,
+@@ -23,6 +24,7 @@
+     use fmt::Write;
+     // SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
+     let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
++    // SAFETY: TODO.
+     let _ = w.write_fmt(unsafe { *(ptr as *const fmt::Arguments<'_>) });
+     w.pos().cast()
+ }
+@@ -102,6 +104,7 @@ pub unsafe fn call_printk(
+ ) {
+     // `_printk` does not seem to fail in any path.
+     #[cfg(CONFIG_PRINTK)]
++    // SAFETY: TODO.
+     unsafe {
+         bindings::_printk(
+             format_string.as_ptr() as _,
+@@ -137,7 +140,7 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {
+ #[doc(hidden)]
+ #[cfg(not(testlib))]
+ #[macro_export]
+-#[allow(clippy::crate_in_macro_def)]
++#[expect(clippy::crate_in_macro_def)]
+ macro_rules! print_macro (
+     // The non-continuation cases (most of them, e.g. `INFO`).
+     ($format_string:path, false, $($arg:tt)+) => (
+diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
+index 7543378d372927..571e27efe54489 100644
+--- a/rust/kernel/rbtree.rs
++++ b/rust/kernel/rbtree.rs
+@@ -7,7 +7,6 @@
+ //! Reference: <https://docs.kernel.org/core-api/rbtree.html>
+ 
+ use crate::{alloc::Flags, bindings, container_of, error::Result, prelude::*};
+-use alloc::boxed::Box;
+ use core::{
+     cmp::{Ord, Ordering},
+     marker::PhantomData,
+@@ -497,7 +496,7 @@ fn drop(&mut self) {
+             // but it is not observable. The loop invariant is still maintained.
+ 
+             // SAFETY: `this` is valid per the loop invariant.
+-            unsafe { drop(Box::from_raw(this.cast_mut())) };
++            unsafe { drop(KBox::from_raw(this.cast_mut())) };
+         }
+     }
+ }
+@@ -764,7 +763,7 @@ pub fn remove_current(self) -> (Option<Self>, RBTreeNode<K, V>) {
+         // point to the links field of `Node<K, V>` objects.
+         let this = unsafe { container_of!(self.current.as_ptr(), Node<K, V>, links) }.cast_mut();
+         // SAFETY: `this` is valid by the type invariants as described above.
+-        let node = unsafe { Box::from_raw(this) };
++        let node = unsafe { KBox::from_raw(this) };
+         let node = RBTreeNode { node };
+         // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+         // the tree cannot change. By the tree invariant, all nodes are valid.
+@@ -809,7 +808,7 @@ fn remove_neighbor(&mut self, direction: Direction) -> Option<RBTreeNode<K, V>>
+             // point to the links field of `Node<K, V>` objects.
+             let this = unsafe { container_of!(neighbor, Node<K, V>, links) }.cast_mut();
+             // SAFETY: `this` is valid by the type invariants as described above.
+-            let node = unsafe { Box::from_raw(this) };
++            let node = unsafe { KBox::from_raw(this) };
+             return Some(RBTreeNode { node });
+         }
+         None
+@@ -1038,7 +1037,7 @@ fn next(&mut self) -> Option<Self::Item> {
+ /// It contains the memory needed to hold a node that can be inserted into a red-black tree. One
+ /// can be obtained by directly allocating it ([`RBTreeNodeReservation::new`]).
+ pub struct RBTreeNodeReservation<K, V> {
+-    node: Box<MaybeUninit<Node<K, V>>>,
++    node: KBox<MaybeUninit<Node<K, V>>>,
+ }
+ 
+ impl<K, V> RBTreeNodeReservation<K, V> {
+@@ -1046,7 +1045,7 @@ impl<K, V> RBTreeNodeReservation<K, V> {
+     /// call to [`RBTree::insert`].
+     pub fn new(flags: Flags) -> Result<RBTreeNodeReservation<K, V>> {
+         Ok(RBTreeNodeReservation {
+-            node: <Box<_> as BoxExt<_>>::new_uninit(flags)?,
++            node: KBox::new_uninit(flags)?,
+         })
+     }
+ }
+@@ -1062,14 +1061,15 @@ impl<K, V> RBTreeNodeReservation<K, V> {
+     /// Initialises a node reservation.
+     ///
+     /// It then becomes an [`RBTreeNode`] that can be inserted into a tree.
+-    pub fn into_node(mut self, key: K, value: V) -> RBTreeNode<K, V> {
+-        self.node.write(Node {
+-            key,
+-            value,
+-            links: bindings::rb_node::default(),
+-        });
+-        // SAFETY: We just wrote to it.
+-        let node = unsafe { self.node.assume_init() };
++    pub fn into_node(self, key: K, value: V) -> RBTreeNode<K, V> {
++        let node = KBox::write(
++            self.node,
++            Node {
++                key,
++                value,
++                links: bindings::rb_node::default(),
++            },
++        );
+         RBTreeNode { node }
+     }
+ }
+@@ -1079,7 +1079,7 @@ pub fn into_node(mut self, key: K, value: V) -> RBTreeNode<K, V> {
+ /// The node is fully initialised (with key and value) and can be inserted into a tree without any
+ /// extra allocations or failure paths.
+ pub struct RBTreeNode<K, V> {
+-    node: Box<Node<K, V>>,
++    node: KBox<Node<K, V>>,
+ }
+ 
+ impl<K, V> RBTreeNode<K, V> {
+@@ -1091,7 +1091,9 @@ pub fn new(key: K, value: V, flags: Flags) -> Result<RBTreeNode<K, V>> {
+ 
+     /// Get the key and value from inside the node.
+     pub fn to_key_value(self) -> (K, V) {
+-        (self.node.key, self.node.value)
++        let node = KBox::into_inner(self.node);
++
++        (node.key, node.value)
+     }
+ }
+ 
+@@ -1113,7 +1115,7 @@ impl<K, V> RBTreeNode<K, V> {
+     /// may be freed (but only for the key/value; memory for the node itself is kept for reuse).
+     pub fn into_reservation(self) -> RBTreeNodeReservation<K, V> {
+         RBTreeNodeReservation {
+-            node: Box::drop_contents(self.node),
++            node: KBox::drop_contents(self.node),
+         }
+     }
+ }
+@@ -1164,7 +1166,7 @@ impl<'a, K, V> RawVacantEntry<'a, K, V> {
+     /// The `node` must have a key such that inserting it here does not break the ordering of this
+     /// [`RBTree`].
+     fn insert(self, node: RBTreeNode<K, V>) -> &'a mut V {
+-        let node = Box::into_raw(node.node);
++        let node = KBox::into_raw(node.node);
+ 
+         // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+         // the node is removed or replaced.
+@@ -1238,21 +1240,24 @@ pub fn remove_node(self) -> RBTreeNode<K, V> {
+             // SAFETY: The node was a node in the tree, but we removed it, so we can convert it
+             // back into a box.
+             node: unsafe {
+-                Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut())
++                KBox::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut())
+             },
+         }
+     }
+ 
+     /// Takes the value of the entry out of the map, and returns it.
+     pub fn remove(self) -> V {
+-        self.remove_node().node.value
++        let rb_node = self.remove_node();
++        let node = KBox::into_inner(rb_node.node);
++
++        node.value
+     }
+ 
+     /// Swap the current node for the provided node.
+     ///
+     /// The key of both nodes must be equal.
+     fn replace(self, node: RBTreeNode<K, V>) -> RBTreeNode<K, V> {
+-        let node = Box::into_raw(node.node);
++        let node = KBox::into_raw(node.node);
+ 
+         // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+         // the node is removed or replaced.
+@@ -1268,7 +1273,7 @@ fn replace(self, node: RBTreeNode<K, V>) -> RBTreeNode<K, V> {
+         // - `self.node_ptr` produces a valid pointer to a node in the tree.
+         // - Now that we removed this entry from the tree, we can convert the node to a box.
+         let old_node =
+-            unsafe { Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) };
++            unsafe { KBox::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) };
+ 
+         RBTreeNode { node: old_node }
+     }
+diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs
+index 67bf9d37ddb557..8b4872b48e9775 100644
+--- a/rust/kernel/std_vendor.rs
++++ b/rust/kernel/std_vendor.rs
+@@ -1,5 +1,7 @@
+ // SPDX-License-Identifier: Apache-2.0 OR MIT
+ 
++//! Rust standard library vendored code.
++//!
+ //! The contents of this file come from the Rust standard library, hosted in
+ //! the <https://github.com/rust-lang/rust> repository, licensed under
+ //! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
+@@ -14,7 +16,7 @@
+ ///
+ /// ```rust
+ /// let a = 2;
+-/// # #[allow(clippy::dbg_macro)]
++/// # #[expect(clippy::disallowed_macros)]
+ /// let b = dbg!(a * 2) + 1;
+ /// //      ^-- prints: [src/main.rs:2] a * 2 = 4
+ /// assert_eq!(b, 5);
+@@ -52,7 +54,7 @@
+ /// With a method call:
+ ///
+ /// ```rust
+-/// # #[allow(clippy::dbg_macro)]
++/// # #[expect(clippy::disallowed_macros)]
+ /// fn foo(n: usize) {
+ ///     if dbg!(n.checked_sub(4)).is_some() {
+ ///         // ...
+@@ -71,7 +73,7 @@
+ /// Naive factorial implementation:
+ ///
+ /// ```rust
+-/// # #[allow(clippy::dbg_macro)]
++/// # #[expect(clippy::disallowed_macros)]
+ /// # {
+ /// fn factorial(n: u32) -> u32 {
+ ///     if dbg!(n <= 1) {
+@@ -118,7 +120,7 @@
+ /// a tuple (and return it, too):
+ ///
+ /// ```
+-/// # #[allow(clippy::dbg_macro)]
++/// # #![expect(clippy::disallowed_macros)]
+ /// assert_eq!(dbg!(1usize, 2u32), (1, 2));
+ /// ```
+ ///
+@@ -127,7 +129,7 @@
+ /// invocations. You can use a 1-tuple directly if you need one:
+ ///
+ /// ```
+-/// # #[allow(clippy::dbg_macro)]
++/// # #[expect(clippy::disallowed_macros)]
+ /// # {
+ /// assert_eq!(1, dbg!(1u32,)); // trailing comma ignored
+ /// assert_eq!((1,), dbg!((1u32,))); // 1-tuple
+diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
+index bb8d4f41475b59..d04c12a1426d1c 100644
+--- a/rust/kernel/str.rs
++++ b/rust/kernel/str.rs
+@@ -2,8 +2,7 @@
+ 
+ //! String representations.
+ 
+-use crate::alloc::{flags::*, vec_ext::VecExt, AllocError};
+-use alloc::vec::Vec;
++use crate::alloc::{flags::*, AllocError, KVec};
+ use core::fmt::{self, Write};
+ use core::ops::{self, Deref, DerefMut, Index};
+ 
+@@ -162,10 +161,10 @@ pub const fn len(&self) -> usize {
+     /// Returns the length of this string with `NUL`.
+     #[inline]
+     pub const fn len_with_nul(&self) -> usize {
+-        // SAFETY: This is one of the invariant of `CStr`.
+-        // We add a `unreachable_unchecked` here to hint the optimizer that
+-        // the value returned from this function is non-zero.
+         if self.0.is_empty() {
++            // SAFETY: This is one of the invariant of `CStr`.
++            // We add a `unreachable_unchecked` here to hint the optimizer that
++            // the value returned from this function is non-zero.
+             unsafe { core::hint::unreachable_unchecked() };
+         }
+         self.0.len()
+@@ -185,7 +184,7 @@ pub const fn is_empty(&self) -> bool {
+     /// last at least `'a`. When `CStr` is alive, the memory pointed by `ptr`
+     /// must not be mutated.
+     #[inline]
+-    pub unsafe fn from_char_ptr<'a>(ptr: *const core::ffi::c_char) -> &'a Self {
++    pub unsafe fn from_char_ptr<'a>(ptr: *const crate::ffi::c_char) -> &'a Self {
+         // SAFETY: The safety precondition guarantees `ptr` is a valid pointer
+         // to a `NUL`-terminated C string.
+         let len = unsafe { bindings::strlen(ptr) } + 1;
+@@ -248,7 +247,7 @@ pub unsafe fn from_bytes_with_nul_unchecked_mut(bytes: &mut [u8]) -> &mut CStr {
+ 
+     /// Returns a C pointer to the string.
+     #[inline]
+-    pub const fn as_char_ptr(&self) -> *const core::ffi::c_char {
++    pub const fn as_char_ptr(&self) -> *const crate::ffi::c_char {
+         self.0.as_ptr() as _
+     }
+ 
+@@ -301,6 +300,7 @@ pub fn to_str(&self) -> Result<&str, core::str::Utf8Error> {
+     /// ```
+     #[inline]
+     pub unsafe fn as_str_unchecked(&self) -> &str {
++        // SAFETY: TODO.
+         unsafe { core::str::from_utf8_unchecked(self.as_bytes()) }
+     }
+ 
+@@ -524,7 +524,28 @@ macro_rules! c_str {
+ #[cfg(test)]
+ mod tests {
+     use super::*;
+-    use alloc::format;
++
++    struct String(CString);
++
++    impl String {
++        fn from_fmt(args: fmt::Arguments<'_>) -> Self {
++            String(CString::try_from_fmt(args).unwrap())
++        }
++    }
++
++    impl Deref for String {
++        type Target = str;
++
++        fn deref(&self) -> &str {
++            self.0.to_str().unwrap()
++        }
++    }
++
++    macro_rules! format {
++        ($($f:tt)*) => ({
++            &*String::from_fmt(kernel::fmt!($($f)*))
++        })
++    }
+ 
+     const ALL_ASCII_CHARS: &'static str =
+         "\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\
+@@ -790,7 +811,7 @@ fn write_str(&mut self, s: &str) -> fmt::Result {
+ /// assert_eq!(s.is_ok(), false);
+ /// ```
+ pub struct CString {
+-    buf: Vec<u8>,
++    buf: KVec<u8>,
+ }
+ 
+ impl CString {
+@@ -803,7 +824,7 @@ pub fn try_from_fmt(args: fmt::Arguments<'_>) -> Result<Self, Error> {
+         let size = f.bytes_written();
+ 
+         // Allocate a vector with the required number of bytes, and write to it.
+-        let mut buf = <Vec<_> as VecExt<_>>::with_capacity(size, GFP_KERNEL)?;
++        let mut buf = KVec::with_capacity(size, GFP_KERNEL)?;
+         // SAFETY: The buffer stored in `buf` is at least of size `size` and is valid for writes.
+         let mut f = unsafe { Formatter::from_buffer(buf.as_mut_ptr(), size) };
+         f.write_fmt(args)?;
+@@ -850,10 +871,9 @@ impl<'a> TryFrom<&'a CStr> for CString {
+     type Error = AllocError;
+ 
+     fn try_from(cstr: &'a CStr) -> Result<CString, AllocError> {
+-        let mut buf = Vec::new();
++        let mut buf = KVec::new();
+ 
+-        <Vec<_> as VecExt<_>>::extend_from_slice(&mut buf, cstr.as_bytes_with_nul(), GFP_KERNEL)
+-            .map_err(|_| AllocError)?;
++        buf.extend_from_slice(cstr.as_bytes_with_nul(), GFP_KERNEL)?;
+ 
+         // INVARIANT: The `CStr` and `CString` types have the same invariants for
+         // the string data, and we copied it over without changes.
+diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
+index 28743a7c74a847..fa4509406ee909 100644
+--- a/rust/kernel/sync/arc.rs
++++ b/rust/kernel/sync/arc.rs
+@@ -17,13 +17,12 @@
+ //! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
+ 
+ use crate::{
+-    alloc::{box_ext::BoxExt, AllocError, Flags},
++    alloc::{AllocError, Flags, KBox},
+     bindings,
+     init::{self, InPlaceInit, Init, PinInit},
+     try_init,
+     types::{ForeignOwnable, Opaque},
+ };
+-use alloc::boxed::Box;
+ use core::{
+     alloc::Layout,
+     fmt,
+@@ -201,11 +200,11 @@ pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
+             data: contents,
+         };
+ 
+-        let inner = <Box<_> as BoxExt<_>>::new(value, flags)?;
++        let inner = KBox::new(value, flags)?;
+ 
+         // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new
+         // `Arc` object.
+-        Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
++        Ok(unsafe { Self::from_inner(KBox::leak(inner).into()) })
+     }
+ }
+ 
+@@ -333,12 +332,12 @@ pub fn into_unique_or_drop(self) -> Option<Pin<UniqueArc<T>>> {
+ impl<T: 'static> ForeignOwnable for Arc<T> {
+     type Borrowed<'a> = ArcBorrow<'a, T>;
+ 
+-    fn into_foreign(self) -> *const core::ffi::c_void {
++    fn into_foreign(self) -> *const crate::ffi::c_void {
+         ManuallyDrop::new(self).ptr.as_ptr() as _
+     }
+ 
+-    unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> ArcBorrow<'a, T> {
+-        // SAFETY: By the safety requirement of this function, we know that `ptr` came from
++    unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> ArcBorrow<'a, T> {
++        // By the safety requirement of this function, we know that `ptr` came from
+         // a previous call to `Arc::into_foreign`.
+         let inner = NonNull::new(ptr as *mut ArcInner<T>).unwrap();
+ 
+@@ -347,7 +346,7 @@ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> ArcBorrow<'a, T> {
+         unsafe { ArcBorrow::new(inner) }
+     }
+ 
+-    unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
++    unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self {
+         // SAFETY: By the safety requirement of this function, we know that `ptr` came from
+         // a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and
+         // holds a reference count increment that is transferrable to us.
+@@ -398,8 +397,8 @@ fn drop(&mut self) {
+         if is_zero {
+             // The count reached zero, we must free the memory.
+             //
+-            // SAFETY: The pointer was initialised from the result of `Box::leak`.
+-            unsafe { drop(Box::from_raw(self.ptr.as_ptr())) };
++            // SAFETY: The pointer was initialised from the result of `KBox::leak`.
++            unsafe { drop(KBox::from_raw(self.ptr.as_ptr())) };
+         }
+     }
+ }
+@@ -641,7 +640,7 @@ pub fn new(value: T, flags: Flags) -> Result<Self, AllocError> {
+     /// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.
+     pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
+         // INVARIANT: The refcount is initialised to a non-zero value.
+-        let inner = Box::try_init::<AllocError>(
++        let inner = KBox::try_init::<AllocError>(
+             try_init!(ArcInner {
+                 // SAFETY: There are no safety requirements for this FFI call.
+                 refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+@@ -651,8 +650,8 @@ pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError>
+         )?;
+         Ok(UniqueArc {
+             // INVARIANT: The newly-created object has a refcount of 1.
+-            // SAFETY: The pointer from the `Box` is valid.
+-            inner: unsafe { Arc::from_inner(Box::leak(inner).into()) },
++            // SAFETY: The pointer from the `KBox` is valid.
++            inner: unsafe { Arc::from_inner(KBox::leak(inner).into()) },
+         })
+     }
+ }
+diff --git a/rust/kernel/sync/arc/std_vendor.rs b/rust/kernel/sync/arc/std_vendor.rs
+index a66a0c2831b3ed..11b3f4ecca5f79 100644
+--- a/rust/kernel/sync/arc/std_vendor.rs
++++ b/rust/kernel/sync/arc/std_vendor.rs
+@@ -1,5 +1,7 @@
+ // SPDX-License-Identifier: Apache-2.0 OR MIT
+ 
++//! Rust standard library vendored code.
++//!
+ //! The contents of this file come from the Rust standard library, hosted in
+ //! the <https://github.com/rust-lang/rust> repository, licensed under
+ //! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
+diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
+index 2b306afbe56d96..7df565038d7d0d 100644
+--- a/rust/kernel/sync/condvar.rs
++++ b/rust/kernel/sync/condvar.rs
+@@ -7,6 +7,7 @@
+ 
+ use super::{lock::Backend, lock::Guard, LockClassKey};
+ use crate::{
++    ffi::{c_int, c_long},
+     init::PinInit,
+     pin_init,
+     str::CStr,
+@@ -14,7 +15,6 @@
+     time::Jiffies,
+     types::Opaque,
+ };
+-use core::ffi::{c_int, c_long};
+ use core::marker::PhantomPinned;
+ use core::ptr;
+ use macros::pin_data;
+@@ -70,8 +70,8 @@ macro_rules! new_condvar {
+ /// }
+ ///
+ /// /// Allocates a new boxed `Example`.
+-/// fn new_example() -> Result<Pin<Box<Example>>> {
+-///     Box::pin_init(pin_init!(Example {
++/// fn new_example() -> Result<Pin<KBox<Example>>> {
++///     KBox::pin_init(pin_init!(Example {
+ ///         value <- new_mutex!(0),
+ ///         value_changed <- new_condvar!(),
+ ///     }), GFP_KERNEL)
+@@ -93,7 +93,6 @@ pub struct CondVar {
+ }
+ 
+ // SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread.
+-#[allow(clippy::non_send_fields_in_send_ty)]
+ unsafe impl Send for CondVar {}
+ 
+ // SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads
+diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
+index f6c34ca4d819f8..528eb690723120 100644
+--- a/rust/kernel/sync/lock.rs
++++ b/rust/kernel/sync/lock.rs
+@@ -46,7 +46,7 @@ pub unsafe trait Backend {
+     /// remain valid for read indefinitely.
+     unsafe fn init(
+         ptr: *mut Self::State,
+-        name: *const core::ffi::c_char,
++        name: *const crate::ffi::c_char,
+         key: *mut bindings::lock_class_key,
+     );
+ 
+@@ -150,9 +150,9 @@ pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
+         // SAFETY: The caller owns the lock, so it is safe to unlock it.
+         unsafe { B::unlock(self.lock.state.get(), &self.state) };
+ 
+-        // SAFETY: The lock was just unlocked above and is being relocked now.
+-        let _relock =
+-            ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
++        let _relock = ScopeGuard::new(||
++                // SAFETY: The lock was just unlocked above and is being relocked now.
++                unsafe { B::relock(self.lock.state.get(), &mut self.state) });
+ 
+         cb()
+     }
+diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
+index 30632070ee6709..59a872cbcac64a 100644
+--- a/rust/kernel/sync/lock/mutex.rs
++++ b/rust/kernel/sync/lock/mutex.rs
+@@ -58,7 +58,7 @@ macro_rules! new_mutex {
+ /// }
+ ///
+ /// // Allocate a boxed `Example`.
+-/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?;
++/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?;
+ /// assert_eq!(e.c, 10);
+ /// assert_eq!(e.d.lock().a, 20);
+ /// assert_eq!(e.d.lock().b, 30);
+@@ -96,7 +96,7 @@ unsafe impl super::Backend for MutexBackend {
+ 
+     unsafe fn init(
+         ptr: *mut Self::State,
+-        name: *const core::ffi::c_char,
++        name: *const crate::ffi::c_char,
+         key: *mut bindings::lock_class_key,
+     ) {
+         // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
+diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
+index ea5c5bc1ce12ed..b77eed1789ad0f 100644
+--- a/rust/kernel/sync/lock/spinlock.rs
++++ b/rust/kernel/sync/lock/spinlock.rs
+@@ -56,7 +56,7 @@ macro_rules! new_spinlock {
+ /// }
+ ///
+ /// // Allocate a boxed `Example`.
+-/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?;
++/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?;
+ /// assert_eq!(e.c, 10);
+ /// assert_eq!(e.d.lock().a, 20);
+ /// assert_eq!(e.d.lock().b, 30);
+@@ -95,7 +95,7 @@ unsafe impl super::Backend for SpinLockBackend {
+ 
+     unsafe fn init(
+         ptr: *mut Self::State,
+-        name: *const core::ffi::c_char,
++        name: *const crate::ffi::c_char,
+         key: *mut bindings::lock_class_key,
+     ) {
+         // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
+diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
+index ce2ee8d8786587..a7b244675c2b96 100644
+--- a/rust/kernel/sync/locked_by.rs
++++ b/rust/kernel/sync/locked_by.rs
+@@ -43,7 +43,7 @@
+ /// struct InnerDirectory {
+ ///     /// The sum of the bytes used by all files.
+ ///     bytes_used: u64,
+-///     _files: Vec<File>,
++///     _files: KVec<File>,
+ /// }
+ ///
+ /// struct Directory {
+diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
+index 55dff7e088bf5f..5bce090a386977 100644
+--- a/rust/kernel/task.rs
++++ b/rust/kernel/task.rs
+@@ -4,13 +4,9 @@
+ //!
+ //! C header: [`include/linux/sched.h`](srctree/include/linux/sched.h).
+ 
++use crate::ffi::{c_int, c_long, c_uint};
+ use crate::types::Opaque;
+-use core::{
+-    ffi::{c_int, c_long, c_uint},
+-    marker::PhantomData,
+-    ops::Deref,
+-    ptr,
+-};
++use core::{marker::PhantomData, ops::Deref, ptr};
+ 
+ /// A sentinel value used for infinite timeouts.
+ pub const MAX_SCHEDULE_TIMEOUT: c_long = c_long::MAX;
+diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
+index e3bb5e89f88dac..379c0f5772e575 100644
+--- a/rust/kernel/time.rs
++++ b/rust/kernel/time.rs
+@@ -12,10 +12,10 @@
+ pub const NSEC_PER_MSEC: i64 = bindings::NSEC_PER_MSEC as i64;
+ 
+ /// The time unit of Linux kernel. One jiffy equals (1/HZ) second.
+-pub type Jiffies = core::ffi::c_ulong;
++pub type Jiffies = crate::ffi::c_ulong;
+ 
+ /// The millisecond time unit.
+-pub type Msecs = core::ffi::c_uint;
++pub type Msecs = crate::ffi::c_uint;
+ 
+ /// Converts milliseconds to jiffies.
+ #[inline]
+diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
+index 9e7ca066355cd5..7c8c531ef1909d 100644
+--- a/rust/kernel/types.rs
++++ b/rust/kernel/types.rs
+@@ -3,13 +3,11 @@
+ //! Kernel types.
+ 
+ use crate::init::{self, PinInit};
+-use alloc::boxed::Box;
+ use core::{
+     cell::UnsafeCell,
+     marker::{PhantomData, PhantomPinned},
+     mem::{ManuallyDrop, MaybeUninit},
+     ops::{Deref, DerefMut},
+-    pin::Pin,
+     ptr::NonNull,
+ };
+ 
+@@ -31,7 +29,7 @@ pub trait ForeignOwnable: Sized {
+     /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in
+     /// any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`],
+     /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior.
+-    fn into_foreign(self) -> *const core::ffi::c_void;
++    fn into_foreign(self) -> *const crate::ffi::c_void;
+ 
+     /// Borrows a foreign-owned object.
+     ///
+@@ -39,7 +37,7 @@ pub trait ForeignOwnable: Sized {
+     ///
+     /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
+     /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
+-    unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>;
++    unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> Self::Borrowed<'a>;
+ 
+     /// Converts a foreign-owned object back to a Rust-owned one.
+     ///
+@@ -49,7 +47,7 @@ pub trait ForeignOwnable: Sized {
+     /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
+     /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for
+     /// this object must have been dropped.
+-    unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self;
++    unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self;
+ 
+     /// Tries to convert a foreign-owned object back to a Rust-owned one.
+     ///
+@@ -60,7 +58,7 @@ pub trait ForeignOwnable: Sized {
+     ///
+     /// `ptr` must either be null or satisfy the safety requirements for
+     /// [`ForeignOwnable::from_foreign`].
+-    unsafe fn try_from_foreign(ptr: *const core::ffi::c_void) -> Option<Self> {
++    unsafe fn try_from_foreign(ptr: *const crate::ffi::c_void) -> Option<Self> {
+         if ptr.is_null() {
+             None
+         } else {
+@@ -71,64 +69,16 @@ unsafe fn try_from_foreign(ptr: *const core::ffi::c_void) -> Option<Self> {
+     }
+ }
+ 
+-impl<T: 'static> ForeignOwnable for Box<T> {
+-    type Borrowed<'a> = &'a T;
+-
+-    fn into_foreign(self) -> *const core::ffi::c_void {
+-        Box::into_raw(self) as _
+-    }
+-
+-    unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> &'a T {
+-        // SAFETY: The safety requirements for this function ensure that the object is still alive,
+-        // so it is safe to dereference the raw pointer.
+-        // The safety requirements of `from_foreign` also ensure that the object remains alive for
+-        // the lifetime of the returned value.
+-        unsafe { &*ptr.cast() }
+-    }
+-
+-    unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
+-        // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+-        // call to `Self::into_foreign`.
+-        unsafe { Box::from_raw(ptr as _) }
+-    }
+-}
+-
+-impl<T: 'static> ForeignOwnable for Pin<Box<T>> {
+-    type Borrowed<'a> = Pin<&'a T>;
+-
+-    fn into_foreign(self) -> *const core::ffi::c_void {
+-        // SAFETY: We are still treating the box as pinned.
+-        Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _
+-    }
+-
+-    unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Pin<&'a T> {
+-        // SAFETY: The safety requirements for this function ensure that the object is still alive,
+-        // so it is safe to dereference the raw pointer.
+-        // The safety requirements of `from_foreign` also ensure that the object remains alive for
+-        // the lifetime of the returned value.
+-        let r = unsafe { &*ptr.cast() };
+-
+-        // SAFETY: This pointer originates from a `Pin<Box<T>>`.
+-        unsafe { Pin::new_unchecked(r) }
+-    }
+-
+-    unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
+-        // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+-        // call to `Self::into_foreign`.
+-        unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) }
+-    }
+-}
+-
+ impl ForeignOwnable for () {
+     type Borrowed<'a> = ();
+ 
+-    fn into_foreign(self) -> *const core::ffi::c_void {
++    fn into_foreign(self) -> *const crate::ffi::c_void {
+         core::ptr::NonNull::dangling().as_ptr()
+     }
+ 
+-    unsafe fn borrow<'a>(_: *const core::ffi::c_void) -> Self::Borrowed<'a> {}
++    unsafe fn borrow<'a>(_: *const crate::ffi::c_void) -> Self::Borrowed<'a> {}
+ 
+-    unsafe fn from_foreign(_: *const core::ffi::c_void) -> Self {}
++    unsafe fn from_foreign(_: *const crate::ffi::c_void) -> Self {}
+ }
+ 
+ /// Runs a cleanup function/closure when dropped.
+@@ -185,7 +135,7 @@ unsafe fn from_foreign(_: *const core::ffi::c_void) -> Self {}
+ /// # use kernel::types::ScopeGuard;
+ /// fn example3(arg: bool) -> Result {
+ ///     let mut vec =
+-///         ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
++///         ScopeGuard::new_with_data(KVec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
+ ///
+ ///     vec.push(10u8, GFP_KERNEL)?;
+ ///     if arg {
+@@ -225,7 +175,7 @@ pub fn dismiss(mut self) -> T {
+ impl ScopeGuard<(), fn(())> {
+     /// Creates a new guarded object with the given cleanup function.
+     pub fn new(cleanup: impl FnOnce()) -> ScopeGuard<(), impl FnOnce(())> {
+-        ScopeGuard::new_with_data((), move |_| cleanup())
++        ScopeGuard::new_with_data((), move |()| cleanup())
+     }
+ }
+ 
+@@ -410,6 +360,7 @@ pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+     ///
+     /// struct Empty {}
+     ///
++    /// # // SAFETY: TODO.
+     /// unsafe impl AlwaysRefCounted for Empty {
+     ///     fn inc_ref(&self) {}
+     ///     unsafe fn dec_ref(_obj: NonNull<Self>) {}
+@@ -417,6 +368,7 @@ pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+     ///
+     /// let mut data = Empty {};
+     /// let ptr = NonNull::<Empty>::new(&mut data as *mut _).unwrap();
++    /// # // SAFETY: TODO.
+     /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
+     /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
+     ///
+@@ -481,21 +433,23 @@ pub enum Either<L, R> {
+ /// All bit-patterns must be valid for this type. This type must not have interior mutability.
+ pub unsafe trait FromBytes {}
+ 
+-// SAFETY: All bit patterns are acceptable values of the types below.
+-unsafe impl FromBytes for u8 {}
+-unsafe impl FromBytes for u16 {}
+-unsafe impl FromBytes for u32 {}
+-unsafe impl FromBytes for u64 {}
+-unsafe impl FromBytes for usize {}
+-unsafe impl FromBytes for i8 {}
+-unsafe impl FromBytes for i16 {}
+-unsafe impl FromBytes for i32 {}
+-unsafe impl FromBytes for i64 {}
+-unsafe impl FromBytes for isize {}
+-// SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit
+-// patterns are also acceptable for arrays of that type.
+-unsafe impl<T: FromBytes> FromBytes for [T] {}
+-unsafe impl<T: FromBytes, const N: usize> FromBytes for [T; N] {}
++macro_rules! impl_frombytes {
++    ($($({$($generics:tt)*})? $t:ty, )*) => {
++        // SAFETY: Safety comments written in the macro invocation.
++        $(unsafe impl$($($generics)*)? FromBytes for $t {})*
++    };
++}
++
++impl_frombytes! {
++    // SAFETY: All bit patterns are acceptable values of the types below.
++    u8, u16, u32, u64, usize,
++    i8, i16, i32, i64, isize,
++
++    // SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit
++    // patterns are also acceptable for arrays of that type.
++    {<T: FromBytes>} [T],
++    {<T: FromBytes, const N: usize>} [T; N],
++}
+ 
+ /// Types that can be viewed as an immutable slice of initialized bytes.
+ ///
+@@ -514,21 +468,23 @@ unsafe impl<T: FromBytes> FromBytes for [T] {}
+ /// mutability.
+ pub unsafe trait AsBytes {}
+ 
+-// SAFETY: Instances of the following types have no uninitialized portions.
+-unsafe impl AsBytes for u8 {}
+-unsafe impl AsBytes for u16 {}
+-unsafe impl AsBytes for u32 {}
+-unsafe impl AsBytes for u64 {}
+-unsafe impl AsBytes for usize {}
+-unsafe impl AsBytes for i8 {}
+-unsafe impl AsBytes for i16 {}
+-unsafe impl AsBytes for i32 {}
+-unsafe impl AsBytes for i64 {}
+-unsafe impl AsBytes for isize {}
+-unsafe impl AsBytes for bool {}
+-unsafe impl AsBytes for char {}
+-unsafe impl AsBytes for str {}
+-// SAFETY: If individual values in an array have no uninitialized portions, then the array itself
+-// does not have any uninitialized portions either.
+-unsafe impl<T: AsBytes> AsBytes for [T] {}
+-unsafe impl<T: AsBytes, const N: usize> AsBytes for [T; N] {}
++macro_rules! impl_asbytes {
++    ($($({$($generics:tt)*})? $t:ty, )*) => {
++        // SAFETY: Safety comments written in the macro invocation.
++        $(unsafe impl$($($generics)*)? AsBytes for $t {})*
++    };
++}
++
++impl_asbytes! {
++    // SAFETY: Instances of the following types have no uninitialized portions.
++    u8, u16, u32, u64, usize,
++    i8, i16, i32, i64, isize,
++    bool,
++    char,
++    str,
++
++    // SAFETY: If individual values in an array have no uninitialized portions, then the array
++    // itself does not have any uninitialized portions either.
++    {<T: AsBytes>} [T],
++    {<T: AsBytes, const N: usize>} [T; N],
++}
+diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
+index e9347cff99ab20..5a3c2d4df65f86 100644
+--- a/rust/kernel/uaccess.rs
++++ b/rust/kernel/uaccess.rs
+@@ -8,11 +8,10 @@
+     alloc::Flags,
+     bindings,
+     error::Result,
++    ffi::c_void,
+     prelude::*,
+     types::{AsBytes, FromBytes},
+ };
+-use alloc::vec::Vec;
+-use core::ffi::{c_ulong, c_void};
+ use core::mem::{size_of, MaybeUninit};
+ 
+ /// The type used for userspace addresses.
+@@ -46,15 +45,14 @@
+ /// every byte in the region.
+ ///
+ /// ```no_run
+-/// use alloc::vec::Vec;
+-/// use core::ffi::c_void;
++/// use kernel::ffi::c_void;
+ /// use kernel::error::Result;
+ /// use kernel::uaccess::{UserPtr, UserSlice};
+ ///
+ /// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result<()> {
+ ///     let (read, mut write) = UserSlice::new(uptr, len).reader_writer();
+ ///
+-///     let mut buf = Vec::new();
++///     let mut buf = KVec::new();
+ ///     read.read_all(&mut buf, GFP_KERNEL)?;
+ ///
+ ///     for b in &mut buf {
+@@ -69,8 +67,7 @@
+ /// Example illustrating a TOCTOU (time-of-check to time-of-use) bug.
+ ///
+ /// ```no_run
+-/// use alloc::vec::Vec;
+-/// use core::ffi::c_void;
++/// use kernel::ffi::c_void;
+ /// use kernel::error::{code::EINVAL, Result};
+ /// use kernel::uaccess::{UserPtr, UserSlice};
+ ///
+@@ -78,21 +75,21 @@
+ /// fn is_valid(uptr: UserPtr, len: usize) -> Result<bool> {
+ ///     let read = UserSlice::new(uptr, len).reader();
+ ///
+-///     let mut buf = Vec::new();
++///     let mut buf = KVec::new();
+ ///     read.read_all(&mut buf, GFP_KERNEL)?;
+ ///
+ ///     todo!()
+ /// }
+ ///
+ /// /// Returns the bytes behind this user pointer if they are valid.
+-/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<Vec<u8>> {
++/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<KVec<u8>> {
+ ///     if !is_valid(uptr, len)? {
+ ///         return Err(EINVAL);
+ ///     }
+ ///
+ ///     let read = UserSlice::new(uptr, len).reader();
+ ///
+-///     let mut buf = Vec::new();
++///     let mut buf = KVec::new();
+ ///     read.read_all(&mut buf, GFP_KERNEL)?;
+ ///
+ ///     // THIS IS A BUG! The bytes could have changed since we checked them.
+@@ -130,7 +127,7 @@ pub fn new(ptr: UserPtr, length: usize) -> Self {
+     /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+     ///
+     /// Fails with [`EFAULT`] if the read happens on a bad address.
+-    pub fn read_all(self, buf: &mut Vec<u8>, flags: Flags) -> Result {
++    pub fn read_all(self, buf: &mut KVec<u8>, flags: Flags) -> Result {
+         self.reader().read_all(buf, flags)
+     }
+ 
+@@ -227,13 +224,9 @@ pub fn read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result {
+         if len > self.length {
+             return Err(EFAULT);
+         }
+-        let Ok(len_ulong) = c_ulong::try_from(len) else {
+-            return Err(EFAULT);
+-        };
+-        // SAFETY: `out_ptr` points into a mutable slice of length `len_ulong`, so we may write
++        // SAFETY: `out_ptr` points into a mutable slice of length `len`, so we may write
+         // that many bytes to it.
+-        let res =
+-            unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len_ulong) };
++        let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len) };
+         if res != 0 {
+             return Err(EFAULT);
+         }
+@@ -262,9 +255,6 @@ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+         if len > self.length {
+             return Err(EFAULT);
+         }
+-        let Ok(len_ulong) = c_ulong::try_from(len) else {
+-            return Err(EFAULT);
+-        };
+         let mut out: MaybeUninit<T> = MaybeUninit::uninit();
+         // SAFETY: The local variable `out` is valid for writing `size_of::<T>()` bytes.
+         //
+@@ -275,7 +265,7 @@ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+             bindings::_copy_from_user(
+                 out.as_mut_ptr().cast::<c_void>(),
+                 self.ptr as *const c_void,
+-                len_ulong,
++                len,
+             )
+         };
+         if res != 0 {
+@@ -291,9 +281,9 @@ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+     /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+     ///
+     /// Fails with [`EFAULT`] if the read happens on a bad address.
+-    pub fn read_all(mut self, buf: &mut Vec<u8>, flags: Flags) -> Result {
++    pub fn read_all(mut self, buf: &mut KVec<u8>, flags: Flags) -> Result {
+         let len = self.length;
+-        VecExt::<u8>::reserve(buf, len, flags)?;
++        buf.reserve(len, flags)?;
+ 
+         // The call to `try_reserve` was successful, so the spare capacity is at least `len` bytes
+         // long.
+@@ -338,12 +328,9 @@ pub fn write_slice(&mut self, data: &[u8]) -> Result {
+         if len > self.length {
+             return Err(EFAULT);
+         }
+-        let Ok(len_ulong) = c_ulong::try_from(len) else {
+-            return Err(EFAULT);
+-        };
+-        // SAFETY: `data_ptr` points into an immutable slice of length `len_ulong`, so we may read
++        // SAFETY: `data_ptr` points into an immutable slice of length `len`, so we may read
+         // that many bytes from it.
+-        let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len_ulong) };
++        let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len) };
+         if res != 0 {
+             return Err(EFAULT);
+         }
+@@ -362,9 +349,6 @@ pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
+         if len > self.length {
+             return Err(EFAULT);
+         }
+-        let Ok(len_ulong) = c_ulong::try_from(len) else {
+-            return Err(EFAULT);
+-        };
+         // SAFETY: The reference points to a value of type `T`, so it is valid for reading
+         // `size_of::<T>()` bytes.
+         //
+@@ -375,7 +359,7 @@ pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
+             bindings::_copy_to_user(
+                 self.ptr as *mut c_void,
+                 (value as *const T).cast::<c_void>(),
+-                len_ulong,
++                len,
+             )
+         };
+         if res != 0 {
+diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
+index 553a5cba2adcb5..4d1d2062f6eba5 100644
+--- a/rust/kernel/workqueue.rs
++++ b/rust/kernel/workqueue.rs
+@@ -216,7 +216,7 @@ pub fn try_spawn<T: 'static + Send + FnOnce()>(
+             func: Some(func),
+         });
+ 
+-        self.enqueue(Box::pin_init(init, flags).map_err(|_| AllocError)?);
++        self.enqueue(KBox::pin_init(init, flags).map_err(|_| AllocError)?);
+         Ok(())
+     }
+ }
+@@ -239,9 +239,9 @@ fn project(self: Pin<&mut Self>) -> &mut Option<T> {
+ }
+ 
+ impl<T: FnOnce()> WorkItem for ClosureWork<T> {
+-    type Pointer = Pin<Box<Self>>;
++    type Pointer = Pin<KBox<Self>>;
+ 
+-    fn run(mut this: Pin<Box<Self>>) {
++    fn run(mut this: Pin<KBox<Self>>) {
+         if let Some(func) = this.as_mut().project().take() {
+             (func)()
+         }
+@@ -297,7 +297,7 @@ unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput
+ 
+ /// Defines the method that should be called directly when a work item is executed.
+ ///
+-/// This trait is implemented by `Pin<Box<T>>` and [`Arc<T>`], and is mainly intended to be
++/// This trait is implemented by `Pin<KBox<T>>` and [`Arc<T>`], and is mainly intended to be
+ /// implemented for smart pointer types. For your own structs, you would implement [`WorkItem`]
+ /// instead. The [`run`] method on this trait will usually just perform the appropriate
+ /// `container_of` translation and then call into the [`run`][WorkItem::run] method from the
+@@ -329,7 +329,7 @@ pub unsafe trait WorkItemPointer<const ID: u64>: RawWorkItem<ID> {
+ /// This trait is used when the `work_struct` field is defined using the [`Work`] helper.
+ pub trait WorkItem<const ID: u64 = 0> {
+     /// The pointer type that this struct is wrapped in. This will typically be `Arc<Self>` or
+-    /// `Pin<Box<Self>>`.
++    /// `Pin<KBox<Self>>`.
+     type Pointer: WorkItemPointer<ID>;
+ 
+     /// The method that should be called when this work item is executed.
+@@ -366,7 +366,6 @@ unsafe impl<T: ?Sized, const ID: u64> Sync for Work<T, ID> {}
+ impl<T: ?Sized, const ID: u64> Work<T, ID> {
+     /// Creates a new instance of [`Work`].
+     #[inline]
+-    #[allow(clippy::new_ret_no_self)]
+     pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self>
+     where
+         T: WorkItem<ID>,
+@@ -520,13 +519,14 @@ unsafe fn raw_get_work(ptr: *mut Self) -> *mut $crate::workqueue::Work<$work_typ
+     impl{T} HasWork<Self> for ClosureWork<T> { self.work }
+ }
+ 
++// SAFETY: TODO.
+ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
+ where
+     T: WorkItem<ID, Pointer = Self>,
+     T: HasWork<T, ID>,
+ {
+     unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
+-        // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
++        // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
+         let ptr = ptr as *mut Work<T, ID>;
+         // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
+         let ptr = unsafe { T::work_container_of(ptr) };
+@@ -537,6 +537,7 @@ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
+     }
+ }
+ 
++// SAFETY: TODO.
+ unsafe impl<T, const ID: u64> RawWorkItem<ID> for Arc<T>
+ where
+     T: WorkItem<ID, Pointer = Self>,
+@@ -565,18 +566,19 @@ unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput
+     }
+ }
+ 
+-unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<Box<T>>
++// SAFETY: TODO.
++unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<KBox<T>>
+ where
+     T: WorkItem<ID, Pointer = Self>,
+     T: HasWork<T, ID>,
+ {
+     unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
+-        // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
++        // The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
+         let ptr = ptr as *mut Work<T, ID>;
+         // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
+         let ptr = unsafe { T::work_container_of(ptr) };
+         // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
+-        let boxed = unsafe { Box::from_raw(ptr) };
++        let boxed = unsafe { KBox::from_raw(ptr) };
+         // SAFETY: The box was already pinned when it was enqueued.
+         let pinned = unsafe { Pin::new_unchecked(boxed) };
+ 
+@@ -584,7 +586,8 @@ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<Box<T>>
+     }
+ }
+ 
+-unsafe impl<T, const ID: u64> RawWorkItem<ID> for Pin<Box<T>>
++// SAFETY: TODO.
++unsafe impl<T, const ID: u64> RawWorkItem<ID> for Pin<KBox<T>>
+ where
+     T: WorkItem<ID, Pointer = Self>,
+     T: HasWork<T, ID>,
+@@ -598,9 +601,9 @@ unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput
+         // SAFETY: We're not going to move `self` or any of its fields, so its okay to temporarily
+         // remove the `Pin` wrapper.
+         let boxed = unsafe { Pin::into_inner_unchecked(self) };
+-        let ptr = Box::into_raw(boxed);
++        let ptr = KBox::into_raw(boxed);
+ 
+-        // SAFETY: Pointers into a `Box` point at a valid value.
++        // SAFETY: Pointers into a `KBox` point at a valid value.
+         let work_ptr = unsafe { T::raw_get_work(ptr) };
+         // SAFETY: `raw_get_work` returns a pointer to a valid value.
+         let work_ptr = unsafe { Work::raw_get(work_ptr) };
+diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
+index 90e2202ba4d5a0..b16402a16acd48 100644
+--- a/rust/macros/lib.rs
++++ b/rust/macros/lib.rs
+@@ -132,7 +132,7 @@ pub fn module(ts: TokenStream) -> TokenStream {
+ /// calls to this function at compile time:
+ ///
+ /// ```compile_fail
+-/// # use kernel::error::VTABLE_DEFAULT_ERROR;
++/// # // Intentionally missing `use`s to simplify `rusttest`.
+ /// kernel::build_error(VTABLE_DEFAULT_ERROR)
+ /// ```
+ ///
+@@ -242,8 +242,8 @@ pub fn concat_idents(ts: TokenStream) -> TokenStream {
+ /// #[pin_data]
+ /// struct DriverData {
+ ///     #[pin]
+-///     queue: Mutex<Vec<Command>>,
+-///     buf: Box<[u8; 1024 * 1024]>,
++///     queue: Mutex<KVec<Command>>,
++///     buf: KBox<[u8; 1024 * 1024]>,
+ /// }
+ /// ```
+ ///
+@@ -251,8 +251,8 @@ pub fn concat_idents(ts: TokenStream) -> TokenStream {
+ /// #[pin_data(PinnedDrop)]
+ /// struct DriverData {
+ ///     #[pin]
+-///     queue: Mutex<Vec<Command>>,
+-///     buf: Box<[u8; 1024 * 1024]>,
++///     queue: Mutex<KVec<Command>>,
++///     buf: KBox<[u8; 1024 * 1024]>,
+ ///     raw_info: *mut Info,
+ /// }
+ ///
+@@ -281,8 +281,8 @@ pub fn pin_data(inner: TokenStream, item: TokenStream) -> TokenStream {
+ /// #[pin_data(PinnedDrop)]
+ /// struct DriverData {
+ ///     #[pin]
+-///     queue: Mutex<Vec<Command>>,
+-///     buf: Box<[u8; 1024 * 1024]>,
++///     queue: Mutex<KVec<Command>>,
++///     buf: KBox<[u8; 1024 * 1024]>,
+ ///     raw_info: *mut Info,
+ /// }
+ ///
+diff --git a/rust/macros/module.rs b/rust/macros/module.rs
+index aef3b132f32b33..e7a087b7e88494 100644
+--- a/rust/macros/module.rs
++++ b/rust/macros/module.rs
+@@ -253,7 +253,7 @@ mod __module_init {{
+                     #[doc(hidden)]
+                     #[no_mangle]
+                     #[link_section = \".init.text\"]
+-                    pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{
++                    pub unsafe extern \"C\" fn init_module() -> kernel::ffi::c_int {{
+                         // SAFETY: This function is inaccessible to the outside due to the double
+                         // module wrapping it. It is called exactly once by the C side via its
+                         // unique name.
+@@ -292,7 +292,7 @@ mod __module_init {{
+                     #[doc(hidden)]
+                     #[link_section = \"{initcall_section}\"]
+                     #[used]
+-                    pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
++                    pub static __{name}_initcall: extern \"C\" fn() -> kernel::ffi::c_int = __{name}_init;
+ 
+                     #[cfg(not(MODULE))]
+                     #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
+@@ -307,7 +307,7 @@ mod __module_init {{
+                     #[cfg(not(MODULE))]
+                     #[doc(hidden)]
+                     #[no_mangle]
+-                    pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
++                    pub extern \"C\" fn __{name}_init() -> kernel::ffi::c_int {{
+                         // SAFETY: This function is inaccessible to the outside due to the double
+                         // module wrapping it. It is called exactly once by the C side via its
+                         // placement above in the initcall section.
+@@ -330,7 +330,7 @@ mod __module_init {{
+                     /// # Safety
+                     ///
+                     /// This function must only be called once.
+-                    unsafe fn __init() -> core::ffi::c_int {{
++                    unsafe fn __init() -> kernel::ffi::c_int {{
+                         match <{type_} as kernel::Module>::init(&super::super::THIS_MODULE) {{
+                             Ok(m) => {{
+                                 // SAFETY: No data race, since `__MOD` can only be accessed by this
+diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
+index 80a00260e3e7a1..13495910271faf 100644
+--- a/rust/uapi/lib.rs
++++ b/rust/uapi/lib.rs
+@@ -14,6 +14,7 @@
+ #![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
+ #![allow(
+     clippy::all,
++    clippy::undocumented_unsafe_blocks,
+     dead_code,
+     missing_docs,
+     non_camel_case_types,
+@@ -24,4 +25,9 @@
+     unsafe_op_in_unsafe_fn
+ )]
+ 
++// Manual definition of blocklisted types.
++type __kernel_size_t = usize;
++type __kernel_ssize_t = isize;
++type __kernel_ptrdiff_t = isize;
++
+ include!(concat!(env!("OBJTREE"), "/rust/uapi/uapi_generated.rs"));
+diff --git a/samples/rust/rust_minimal.rs b/samples/rust/rust_minimal.rs
+index 2a9eaab62d1ca7..4aaf117bf8e3c0 100644
+--- a/samples/rust/rust_minimal.rs
++++ b/samples/rust/rust_minimal.rs
+@@ -13,7 +13,7 @@
+ }
+ 
+ struct RustMinimal {
+-    numbers: Vec<i32>,
++    numbers: KVec<i32>,
+ }
+ 
+ impl kernel::Module for RustMinimal {
+@@ -21,7 +21,7 @@ fn init(_module: &'static ThisModule) -> Result<Self> {
+         pr_info!("Rust minimal sample (init)\n");
+         pr_info!("Am I built-in? {}\n", !cfg!(MODULE));
+ 
+-        let mut numbers = Vec::new();
++        let mut numbers = KVec::new();
+         numbers.push(72, GFP_KERNEL)?;
+         numbers.push(108, GFP_KERNEL)?;
+         numbers.push(200, GFP_KERNEL)?;
+diff --git a/samples/rust/rust_print.rs b/samples/rust/rust_print.rs
+index 6eabb0d79ea3a7..ba1606bdbd7543 100644
+--- a/samples/rust/rust_print.rs
++++ b/samples/rust/rust_print.rs
+@@ -15,6 +15,7 @@
+ 
+ struct RustPrint;
+ 
++#[expect(clippy::disallowed_macros)]
+ fn arc_print() -> Result {
+     use kernel::sync::*;
+ 
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 880785b52c04ad..2bba59e790b8a4 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -248,7 +248,7 @@ $(obj)/%.lst: $(obj)/%.c FORCE
+ # Compile Rust sources (.rs)
+ # ---------------------------------------------------------------------------
+ 
+-rust_allowed_features := arbitrary_self_types,new_uninit
++rust_allowed_features := arbitrary_self_types,lint_reasons
+ 
+ # `--out-dir` is required to avoid temporaries being created by `rustc` in the
+ # current working directory, which may be not accessible in the out-of-tree
+@@ -258,7 +258,7 @@ rust_common_cmd = \
+ 	-Zallow-features=$(rust_allowed_features) \
+ 	-Zcrate-attr=no_std \
+ 	-Zcrate-attr='feature($(rust_allowed_features))' \
+-	-Zunstable-options --extern force:alloc --extern kernel \
++	-Zunstable-options --extern kernel \
+ 	--crate-type rlib -L $(objtree)/rust/ \
+ 	--crate-name $(basename $(notdir $@)) \
+ 	--sysroot=/dev/null \
+diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
+index d2bc63cde8c6a3..09e1d166d8d236 100755
+--- a/scripts/generate_rust_analyzer.py
++++ b/scripts/generate_rust_analyzer.py
+@@ -64,13 +64,6 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+         [],
+     )
+ 
+-    append_crate(
+-        "alloc",
+-        sysroot_src / "alloc" / "src" / "lib.rs",
+-        ["core", "compiler_builtins"],
+-        cfg=crates_cfgs.get("alloc", []),
+-    )
+-
+     append_crate(
+         "macros",
+         srctree / "rust" / "macros" / "lib.rs",
+@@ -96,7 +89,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+     append_crate(
+         "kernel",
+         srctree / "rust" / "kernel" / "lib.rs",
+-        ["core", "alloc", "macros", "build_error", "bindings"],
++        ["core", "macros", "build_error", "bindings"],
+         cfg=cfg,
+     )
+     crates[-1]["source"] = {
+@@ -133,7 +126,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
+             append_crate(
+                 name,
+                 path,
+-                ["core", "alloc", "kernel"],
++                ["core", "kernel"],
+                 cfg=cfg,
+             )
+ 
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 9955c4d54e42a7..b30faf731da720 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -106,7 +106,7 @@ static struct snd_seq_client *clientptr(int clientid)
+ 	return clienttab[clientid];
+ }
+ 
+-struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
++static struct snd_seq_client *client_use_ptr(int clientid, bool load_module)
+ {
+ 	unsigned long flags;
+ 	struct snd_seq_client *client;
+@@ -126,7 +126,7 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
+ 	}
+ 	spin_unlock_irqrestore(&clients_lock, flags);
+ #ifdef CONFIG_MODULES
+-	if (!in_interrupt()) {
++	if (load_module) {
+ 		static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS);
+ 		static DECLARE_BITMAP(card_requested, SNDRV_CARDS);
+ 
+@@ -168,6 +168,20 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
+ 	return client;
+ }
+ 
++/* get snd_seq_client object for the given id quickly */
++struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
++{
++	return client_use_ptr(clientid, false);
++}
++
++/* get snd_seq_client object for the given id;
++ * if not found, retry after loading the modules
++ */
++static struct snd_seq_client *client_load_and_use_ptr(int clientid)
++{
++	return client_use_ptr(clientid, IS_ENABLED(CONFIG_MODULES));
++}
++
+ /* Take refcount and perform ioctl_mutex lock on the given client;
+  * used only for OSS sequencer
+  * Unlock via snd_seq_client_ioctl_unlock() below
+@@ -176,7 +190,7 @@ bool snd_seq_client_ioctl_lock(int clientid)
+ {
+ 	struct snd_seq_client *client;
+ 
+-	client = snd_seq_client_use_ptr(clientid);
++	client = client_load_and_use_ptr(clientid);
+ 	if (!client)
+ 		return false;
+ 	mutex_lock(&client->ioctl_mutex);
+@@ -1195,7 +1209,7 @@ static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void  *arg)
+ 	int err = 0;
+ 
+ 	/* requested client number */
+-	cptr = snd_seq_client_use_ptr(info->client);
++	cptr = client_load_and_use_ptr(info->client);
+ 	if (cptr == NULL)
+ 		return -ENOENT;		/* don't change !!! */
+ 
+@@ -1257,7 +1271,7 @@ static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
+ 	struct snd_seq_client *cptr;
+ 
+ 	/* requested client number */
+-	cptr = snd_seq_client_use_ptr(client_info->client);
++	cptr = client_load_and_use_ptr(client_info->client);
+ 	if (cptr == NULL)
+ 		return -ENOENT;		/* don't change !!! */
+ 
+@@ -1392,7 +1406,7 @@ static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg)
+ 	struct snd_seq_client *cptr;
+ 	struct snd_seq_client_port *port;
+ 
+-	cptr = snd_seq_client_use_ptr(info->addr.client);
++	cptr = client_load_and_use_ptr(info->addr.client);
+ 	if (cptr == NULL)
+ 		return -ENXIO;
+ 
+@@ -1496,10 +1510,10 @@ static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
+ 	struct snd_seq_client *receiver = NULL, *sender = NULL;
+ 	struct snd_seq_client_port *sport = NULL, *dport = NULL;
+ 
+-	receiver = snd_seq_client_use_ptr(subs->dest.client);
++	receiver = client_load_and_use_ptr(subs->dest.client);
+ 	if (!receiver)
+ 		goto __end;
+-	sender = snd_seq_client_use_ptr(subs->sender.client);
++	sender = client_load_and_use_ptr(subs->sender.client);
+ 	if (!sender)
+ 		goto __end;
+ 	sport = snd_seq_port_use_ptr(sender, subs->sender.port);
+@@ -1864,7 +1878,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
+ 	struct snd_seq_client_pool *info = arg;
+ 	struct snd_seq_client *cptr;
+ 
+-	cptr = snd_seq_client_use_ptr(info->client);
++	cptr = client_load_and_use_ptr(info->client);
+ 	if (cptr == NULL)
+ 		return -ENOENT;
+ 	memset(info, 0, sizeof(*info));
+@@ -1968,7 +1982,7 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
+ 	struct snd_seq_client_port *sport = NULL;
+ 
+ 	result = -EINVAL;
+-	sender = snd_seq_client_use_ptr(subs->sender.client);
++	sender = client_load_and_use_ptr(subs->sender.client);
+ 	if (!sender)
+ 		goto __end;
+ 	sport = snd_seq_port_use_ptr(sender, subs->sender.port);
+@@ -1999,7 +2013,7 @@ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg)
+ 	struct list_head *p;
+ 	int i;
+ 
+-	cptr = snd_seq_client_use_ptr(subs->root.client);
++	cptr = client_load_and_use_ptr(subs->root.client);
+ 	if (!cptr)
+ 		goto __end;
+ 	port = snd_seq_port_use_ptr(cptr, subs->root.port);
+@@ -2066,7 +2080,7 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
+ 	if (info->client < 0)
+ 		info->client = 0;
+ 	for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
+-		cptr = snd_seq_client_use_ptr(info->client);
++		cptr = client_load_and_use_ptr(info->client);
+ 		if (cptr)
+ 			break; /* found */
+ 	}
+@@ -2089,7 +2103,7 @@ static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
+ 	struct snd_seq_client *cptr;
+ 	struct snd_seq_client_port *port = NULL;
+ 
+-	cptr = snd_seq_client_use_ptr(info->addr.client);
++	cptr = client_load_and_use_ptr(info->addr.client);
+ 	if (cptr == NULL)
+ 		return -ENXIO;
+ 
+@@ -2186,7 +2200,7 @@ static int snd_seq_ioctl_client_ump_info(struct snd_seq_client *caller,
+ 		size = sizeof(struct snd_ump_endpoint_info);
+ 	else
+ 		size = sizeof(struct snd_ump_block_info);
+-	cptr = snd_seq_client_use_ptr(client);
++	cptr = client_load_and_use_ptr(client);
+ 	if (!cptr)
+ 		return -ENOENT;
+ 
+@@ -2458,7 +2472,7 @@ int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev,
+ 	if (check_event_type_and_length(ev))
+ 		return -EINVAL;
+ 
+-	cptr = snd_seq_client_use_ptr(client);
++	cptr = client_load_and_use_ptr(client);
+ 	if (cptr == NULL)
+ 		return -EINVAL;
+ 	
+@@ -2690,7 +2704,7 @@ void snd_seq_info_clients_read(struct snd_info_entry *entry,
+ 
+ 	/* list the client table */
+ 	for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
+-		client = snd_seq_client_use_ptr(c);
++		client = client_load_and_use_ptr(c);
+ 		if (client == NULL)
+ 			continue;
+ 		if (client->type == NO_CLIENT) {
+diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
+index 68f1eee9e5c938..dbf933c18a8219 100644
+--- a/sound/pci/hda/Kconfig
++++ b/sound/pci/hda/Kconfig
+@@ -208,6 +208,7 @@ comment "Set to Y if you want auto-loading the side codec driver"
+ 
+ config SND_HDA_CODEC_REALTEK
+ 	tristate "Build Realtek HD-audio codec support"
++	depends on INPUT
+ 	select SND_HDA_GENERIC
+ 	select SND_HDA_GENERIC_LEDS
+ 	select SND_HDA_SCODEC_COMPONENT
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index b4540c5cd2a6f9..ea52bc7370a58d 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2242,6 +2242,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
+ 	SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
+ 	/* KONTRON SinglePC may cause a stall at runtime resume */
+ 	SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
++	/* Dell ALC3271 */
++	SND_PCI_QUIRK(0x1028, 0x0962, "Dell ALC3271", 0),
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4a3b4c6d4114b9..b559f0d4e34885 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3845,6 +3845,79 @@ static void alc225_shutup(struct hda_codec *codec)
+ 	}
+ }
+ 
++static void alc222_init(struct hda_codec *codec)
++{
++	struct alc_spec *spec = codec->spec;
++	hda_nid_t hp_pin = alc_get_hp_pin(spec);
++	bool hp1_pin_sense, hp2_pin_sense;
++
++	if (!hp_pin)
++		return;
++
++	msleep(30);
++
++	hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
++	hp2_pin_sense = snd_hda_jack_detect(codec, 0x14);
++
++	if (hp1_pin_sense || hp2_pin_sense) {
++		msleep(2);
++
++		if (hp1_pin_sense)
++			snd_hda_codec_write(codec, hp_pin, 0,
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++		if (hp2_pin_sense)
++			snd_hda_codec_write(codec, 0x14, 0,
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++		msleep(75);
++
++		if (hp1_pin_sense)
++			snd_hda_codec_write(codec, hp_pin, 0,
++				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++		if (hp2_pin_sense)
++			snd_hda_codec_write(codec, 0x14, 0,
++				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++
++		msleep(75);
++	}
++}
++
++static void alc222_shutup(struct hda_codec *codec)
++{
++	struct alc_spec *spec = codec->spec;
++	hda_nid_t hp_pin = alc_get_hp_pin(spec);
++	bool hp1_pin_sense, hp2_pin_sense;
++
++	if (!hp_pin)
++		hp_pin = 0x21;
++
++	hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
++	hp2_pin_sense = snd_hda_jack_detect(codec, 0x14);
++
++	if (hp1_pin_sense || hp2_pin_sense) {
++		msleep(2);
++
++		if (hp1_pin_sense)
++			snd_hda_codec_write(codec, hp_pin, 0,
++				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++		if (hp2_pin_sense)
++			snd_hda_codec_write(codec, 0x14, 0,
++				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++
++		msleep(75);
++
++		if (hp1_pin_sense)
++			snd_hda_codec_write(codec, hp_pin, 0,
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++		if (hp2_pin_sense)
++			snd_hda_codec_write(codec, 0x14, 0,
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++
++		msleep(75);
++	}
++	alc_auto_setup_eapd(codec, false);
++	alc_shutup_pins(codec);
++}
++
+ static void alc_default_init(struct hda_codec *codec)
+ {
+ 	struct alc_spec *spec = codec->spec;
+@@ -4929,7 +5002,6 @@ static void alc298_fixup_samsung_amp_v2_4_amps(struct hda_codec *codec,
+ 		alc298_samsung_v2_init_amps(codec, 4);
+ }
+ 
+-#if IS_REACHABLE(CONFIG_INPUT)
+ static void gpio2_mic_hotkey_event(struct hda_codec *codec,
+ 				   struct hda_jack_callback *event)
+ {
+@@ -5038,10 +5110,6 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
+ 		spec->kb_dev = NULL;
+ 	}
+ }
+-#else /* INPUT */
+-#define alc280_fixup_hp_gpio2_mic_hotkey	NULL
+-#define alc233_fixup_lenovo_line2_mic_hotkey	NULL
+-#endif /* INPUT */
+ 
+ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+@@ -5055,6 +5123,16 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc233_fixup_lenovo_low_en_micmute_led(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE)
++		spec->micmute_led_polarity = 1;
++	alc233_fixup_lenovo_line2_mic_hotkey(codec, fix, action);
++}
++
+ static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay)
+ {
+ 	if (delay <= 0)
+@@ -7588,6 +7666,7 @@ enum {
+ 	ALC275_FIXUP_DELL_XPS,
+ 	ALC293_FIXUP_LENOVO_SPK_NOISE,
+ 	ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
++	ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED,
+ 	ALC255_FIXUP_DELL_SPK_NOISE,
+ 	ALC225_FIXUP_DISABLE_MIC_VREF,
+ 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -7657,7 +7736,6 @@ enum {
+ 	ALC285_FIXUP_THINKPAD_X1_GEN7,
+ 	ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+ 	ALC294_FIXUP_ASUS_ALLY,
+-	ALC294_FIXUP_ASUS_ALLY_X,
+ 	ALC294_FIXUP_ASUS_ALLY_PINS,
+ 	ALC294_FIXUP_ASUS_ALLY_VERBS,
+ 	ALC294_FIXUP_ASUS_ALLY_SPEAKER,
+@@ -8574,6 +8652,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
+ 	},
++	[ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc233_fixup_lenovo_low_en_micmute_led,
++	},
+ 	[ALC233_FIXUP_INTEL_NUC8_DMIC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_inv_dmic,
+@@ -9096,12 +9178,6 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC294_FIXUP_ASUS_ALLY_PINS
+ 	},
+-	[ALC294_FIXUP_ASUS_ALLY_X] = {
+-		.type = HDA_FIXUP_FUNC,
+-		.v.func = tas2781_fixup_i2c,
+-		.chained = true,
+-		.chain_id = ALC294_FIXUP_ASUS_ALLY_PINS
+-	},
+ 	[ALC294_FIXUP_ASUS_ALLY_PINS] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -10586,7 +10662,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+ 	SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally NR2301L/X", ALC294_FIXUP_ASUS_ALLY),
+-	SND_PCI_QUIRK(0x1043, 0x1eb3, "ROG Ally X RC72LA", ALC294_FIXUP_ASUS_ALLY_X),
+ 	SND_PCI_QUIRK(0x1043, 0x1863, "ASUS UX6404VI/VV", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+@@ -10852,6 +10927,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ 	SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x17aa, 0x3384, "ThinkCentre M90a PRO", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
++	SND_PCI_QUIRK(0x17aa, 0x3386, "ThinkCentre M90a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
++	SND_PCI_QUIRK(0x17aa, 0x3387, "ThinkCentre M70a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
+ 	SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	HDA_CODEC_QUIRK(0x17aa, 0x3802, "DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8", ALC287_FIXUP_TAS2781_I2C),
+@@ -11838,8 +11916,11 @@ static int patch_alc269(struct hda_codec *codec)
+ 		spec->codec_variant = ALC269_TYPE_ALC300;
+ 		spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
+ 		break;
++	case 0x10ec0222:
+ 	case 0x10ec0623:
+ 		spec->codec_variant = ALC269_TYPE_ALC623;
++		spec->shutup = alc222_shutup;
++		spec->init_hook = alc222_init;
+ 		break;
+ 	case 0x10ec0700:
+ 	case 0x10ec0701:
+diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
+index 5f81c68fd42b68..5756ff3528a2d3 100644
+--- a/sound/usb/usx2y/usbusx2y.c
++++ b/sound/usb/usx2y/usbusx2y.c
+@@ -151,6 +151,12 @@ static int snd_usx2y_card_used[SNDRV_CARDS];
+ static void snd_usx2y_card_private_free(struct snd_card *card);
+ static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s);
+ 
++#ifdef USX2Y_NRPACKS_VARIABLE
++int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */
++module_param(nrpacks, int, 0444);
++MODULE_PARM_DESC(nrpacks, "Number of packets per URB.");
++#endif
++
+ /*
+  * pipe 4 is used for switching the lamps, setting samplerate, volumes ....
+  */
+@@ -432,6 +438,11 @@ static int snd_usx2y_probe(struct usb_interface *intf,
+ 	struct snd_card *card;
+ 	int err;
+ 
++#ifdef USX2Y_NRPACKS_VARIABLE
++	if (nrpacks < 0 || nrpacks > USX2Y_NRPACKS_MAX)
++		return -EINVAL;
++#endif
++
+ 	if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 ||
+ 	    (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 &&
+ 	     le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 &&
+diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h
+index 391fd7b4ed5ef6..6a76d04bf1c7df 100644
+--- a/sound/usb/usx2y/usbusx2y.h
++++ b/sound/usb/usx2y/usbusx2y.h
+@@ -7,6 +7,32 @@
+ 
+ #define NRURBS	        2
+ 
++/* Default value used for nr of packs per urb.
++ * 1 to 4 have been tested ok on uhci.
++ * To use 3 on ohci, you'd need a patch:
++ * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on
++ * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425"
++ *
++ * 1, 2 and 4 work out of the box on ohci, if I recall correctly.
++ * Bigger is safer operation, smaller gives lower latencies.
++ */
++#define USX2Y_NRPACKS 4
++
++#define USX2Y_NRPACKS_MAX 1024
++
++/* If your system works ok with this module's parameter
++ * nrpacks set to 1, you might as well comment
++ * this define out, and thereby produce smaller, faster code.
++ * You'd also set USX2Y_NRPACKS to 1 then.
++ */
++#define USX2Y_NRPACKS_VARIABLE 1
++
++#ifdef USX2Y_NRPACKS_VARIABLE
++extern int nrpacks;
++#define nr_of_packs() nrpacks
++#else
++#define nr_of_packs() USX2Y_NRPACKS
++#endif
+ 
+ #define URBS_ASYNC_SEQ 10
+ #define URB_DATA_LEN_ASYNC_SEQ 32
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index f540f46a0b143b..acca8bead82e5b 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -28,33 +28,6 @@
+ #include "usx2y.h"
+ #include "usbusx2y.h"
+ 
+-/* Default value used for nr of packs per urb.
+- * 1 to 4 have been tested ok on uhci.
+- * To use 3 on ohci, you'd need a patch:
+- * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on
+- * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425"
+- *
+- * 1, 2 and 4 work out of the box on ohci, if I recall correctly.
+- * Bigger is safer operation, smaller gives lower latencies.
+- */
+-#define USX2Y_NRPACKS 4
+-
+-/* If your system works ok with this module's parameter
+- * nrpacks set to 1, you might as well comment
+- * this define out, and thereby produce smaller, faster code.
+- * You'd also set USX2Y_NRPACKS to 1 then.
+- */
+-#define USX2Y_NRPACKS_VARIABLE 1
+-
+-#ifdef USX2Y_NRPACKS_VARIABLE
+-static int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */
+-#define  nr_of_packs() nrpacks
+-module_param(nrpacks, int, 0444);
+-MODULE_PARM_DESC(nrpacks, "Number of packets per URB.");
+-#else
+-#define nr_of_packs() USX2Y_NRPACKS
+-#endif
+-
+ static int usx2y_urb_capt_retire(struct snd_usx2y_substream *subs)
+ {
+ 	struct urb	*urb = subs->completed_urb;
+diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
+index 2ed0ef6f21eeec..32e9f194d4497e 100644
+--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
++++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
+@@ -4,6 +4,7 @@
+ #include <argp.h>
+ #include <unistd.h>
+ #include <stdint.h>
++#include "bpf_util.h"
+ #include "bench.h"
+ #include "trigger_bench.skel.h"
+ #include "trace_helpers.h"
+@@ -72,7 +73,7 @@ static __always_inline void inc_counter(struct counter *counters)
+ 	unsigned slot;
+ 
+ 	if (unlikely(tid == 0))
+-		tid = syscall(SYS_gettid);
++		tid = sys_gettid();
+ 
+ 	/* multiplicative hashing, it's fast */
+ 	slot = 2654435769U * tid;
+diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
+index 10587a29b9674f..feff92219e213f 100644
+--- a/tools/testing/selftests/bpf/bpf_util.h
++++ b/tools/testing/selftests/bpf/bpf_util.h
+@@ -6,6 +6,7 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <errno.h>
++#include <syscall.h>
+ #include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
+ 
+ static inline unsigned int bpf_num_possible_cpus(void)
+@@ -59,4 +60,12 @@ static inline void bpf_strlcpy(char *dst, const char *src, size_t sz)
+ 	(offsetof(TYPE, MEMBER)	+ sizeof_field(TYPE, MEMBER))
+ #endif
+ 
++/* Availability of gettid across glibc versions is hit-and-miss, therefore
++ * fallback to syscall in this macro and use it everywhere.
++ */
++#ifndef sys_gettid
++#define sys_gettid() syscall(SYS_gettid)
++#endif
++
++
+ #endif /* __BPF_UTIL__ */
+diff --git a/tools/testing/selftests/bpf/map_tests/task_storage_map.c b/tools/testing/selftests/bpf/map_tests/task_storage_map.c
+index 7d050364efca1a..62971dbf299615 100644
+--- a/tools/testing/selftests/bpf/map_tests/task_storage_map.c
++++ b/tools/testing/selftests/bpf/map_tests/task_storage_map.c
+@@ -12,6 +12,7 @@
+ #include <bpf/bpf.h>
+ #include <bpf/libbpf.h>
+ 
++#include "bpf_util.h"
+ #include "test_maps.h"
+ #include "task_local_storage_helpers.h"
+ #include "read_bpf_task_storage_busy.skel.h"
+@@ -115,7 +116,7 @@ void test_task_storage_map_stress_lookup(void)
+ 	CHECK(err, "attach", "error %d\n", err);
+ 
+ 	/* Trigger program */
+-	syscall(SYS_gettid);
++	sys_gettid();
+ 	skel->bss->pid = 0;
+ 
+ 	CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy);
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+index 070c52c312e5f6..6befa870434bcb 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+@@ -690,7 +690,7 @@ void test_bpf_cookie(void)
+ 	if (!ASSERT_OK_PTR(skel, "skel_open"))
+ 		return;
+ 
+-	skel->bss->my_tid = syscall(SYS_gettid);
++	skel->bss->my_tid = sys_gettid();
+ 
+ 	if (test__start_subtest("kprobe"))
+ 		kprobe_subtest(skel);
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+index 9006549a12945f..b8e1224cfd190d 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+@@ -226,7 +226,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
+ 	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
+ 		  "pthread_create");
+ 
+-	skel->bss->tid = syscall(SYS_gettid);
++	skel->bss->tid = sys_gettid();
+ 
+ 	do_dummy_read_opts(skel->progs.dump_task, opts);
+ 
+@@ -255,10 +255,10 @@ static void *run_test_task_tid(void *arg)
+ 	union bpf_iter_link_info linfo;
+ 	int num_unknown_tid, num_known_tid;
+ 
+-	ASSERT_NEQ(getpid(), syscall(SYS_gettid), "check_new_thread_id");
++	ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id");
+ 
+ 	memset(&linfo, 0, sizeof(linfo));
+-	linfo.task.tid = syscall(SYS_gettid);
++	linfo.task.tid = sys_gettid();
+ 	opts.link_info = &linfo;
+ 	opts.link_info_len = sizeof(linfo);
+ 	test_task_common(&opts, 0, 1);
+diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
+index 747761572098cd..9015e2c2ab1201 100644
+--- a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
++++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
+@@ -63,14 +63,14 @@ static void test_tp_btf(int cgroup_fd)
+ 	if (!ASSERT_OK(err, "map_delete_elem"))
+ 		goto out;
+ 
+-	skel->bss->target_pid = syscall(SYS_gettid);
++	skel->bss->target_pid = sys_gettid();
+ 
+ 	err = cgrp_ls_tp_btf__attach(skel);
+ 	if (!ASSERT_OK(err, "skel_attach"))
+ 		goto out;
+ 
+-	syscall(SYS_gettid);
+-	syscall(SYS_gettid);
++	sys_gettid();
++	sys_gettid();
+ 
+ 	skel->bss->target_pid = 0;
+ 
+@@ -154,7 +154,7 @@ static void test_recursion(int cgroup_fd)
+ 		goto out;
+ 
+ 	/* trigger sys_enter, make sure it does not cause deadlock */
+-	syscall(SYS_gettid);
++	sys_gettid();
+ 
+ out:
+ 	cgrp_ls_recursion__destroy(skel);
+@@ -224,7 +224,7 @@ static void test_yes_rcu_lock(__u64 cgroup_id)
+ 		return;
+ 
+ 	CGROUP_MODE_SET(skel);
+-	skel->bss->target_pid = syscall(SYS_gettid);
++	skel->bss->target_pid = sys_gettid();
+ 
+ 	bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
+ 	err = cgrp_ls_sleepable__load(skel);
+diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+index 26019313e1fc20..1c682550e0e7ca 100644
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -1010,7 +1010,7 @@ static void run_core_reloc_tests(bool use_btfgen)
+ 	struct data *data;
+ 	void *mmap_data = NULL;
+ 
+-	my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32);
++	my_pid_tgid = getpid() | ((uint64_t)sys_gettid() << 32);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ 		char btf_file[] = "/tmp/core_reloc.btf.XXXXXX";
+diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
+index cad6645469129d..fa639b021f7ef8 100644
+--- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
++++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
+@@ -20,7 +20,7 @@ void test_linked_funcs(void)
+ 	bpf_program__set_autoload(skel->progs.handler1, true);
+ 	bpf_program__set_autoload(skel->progs.handler2, true);
+ 
+-	skel->rodata->my_tid = syscall(SYS_gettid);
++	skel->rodata->my_tid = sys_gettid();
+ 	skel->bss->syscall_id = SYS_getpgid;
+ 
+ 	err = linked_funcs__load(skel);
+diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
+index c29787e092d66a..761ce24bce38fd 100644
+--- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
++++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
+@@ -23,7 +23,7 @@ static int get_pid_tgid(pid_t *pid, pid_t *tgid,
+ 	struct stat st;
+ 	int err;
+ 
+-	*pid = syscall(SYS_gettid);
++	*pid = sys_gettid();
+ 	*tgid = getpid();
+ 
+ 	err = stat("/proc/self/ns/pid", &st);
+diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
+index a1f7e7378a64ce..ebe0c12b55363c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
++++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
+@@ -21,7 +21,7 @@ static void test_success(void)
+ 	if (!ASSERT_OK_PTR(skel, "skel_open"))
+ 		return;
+ 
+-	skel->bss->target_pid = syscall(SYS_gettid);
++	skel->bss->target_pid = sys_gettid();
+ 
+ 	bpf_program__set_autoload(skel->progs.get_cgroup_id, true);
+ 	bpf_program__set_autoload(skel->progs.task_succ, true);
+@@ -58,7 +58,7 @@ static void test_rcuptr_acquire(void)
+ 	if (!ASSERT_OK_PTR(skel, "skel_open"))
+ 		return;
+ 
+-	skel->bss->target_pid = syscall(SYS_gettid);
++	skel->bss->target_pid = sys_gettid();
+ 
+ 	bpf_program__set_autoload(skel->progs.task_acquire, true);
+ 	err = rcu_read_lock__load(skel);
+diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+index c33c05161a9ea4..0d42ce00166f07 100644
+--- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
++++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+@@ -23,14 +23,14 @@ static void test_sys_enter_exit(void)
+ 	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ 		return;
+ 
+-	skel->bss->target_pid = syscall(SYS_gettid);
++	skel->bss->target_pid = sys_gettid();
+ 
+ 	err = task_local_storage__attach(skel);
+ 	if (!ASSERT_OK(err, "skel_attach"))
+ 		goto out;
+ 
+-	syscall(SYS_gettid);
+-	syscall(SYS_gettid);
++	sys_gettid();
++	sys_gettid();
+ 
+ 	/* 3x syscalls: 1x attach and 2x gettid */
+ 	ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
+@@ -99,7 +99,7 @@ static void test_recursion(void)
+ 
+ 	/* trigger sys_enter, make sure it does not cause deadlock */
+ 	skel->bss->test_pid = getpid();
+-	syscall(SYS_gettid);
++	sys_gettid();
+ 	skel->bss->test_pid = 0;
+ 	task_ls_recursion__detach(skel);
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+index c1ac813ff9bae3..02a484b22aa69b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
++++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+@@ -125,7 +125,7 @@ static void *child_thread(void *ctx)
+ 	struct child *child = ctx;
+ 	int c = 0, err;
+ 
+-	child->tid = syscall(SYS_gettid);
++	child->tid = sys_gettid();
+ 
+ 	/* let parent know we are ready */
+ 	err = write(child->c2p[1], &c, 1);
+diff --git a/tools/testing/selftests/damon/damon_nr_regions.py b/tools/testing/selftests/damon/damon_nr_regions.py
+index 2e8a74aff54314..58f3291fed12a4 100755
+--- a/tools/testing/selftests/damon/damon_nr_regions.py
++++ b/tools/testing/selftests/damon/damon_nr_regions.py
+@@ -65,6 +65,7 @@ def test_nr_regions(real_nr_regions, min_nr_regions, max_nr_regions):
+ 
+     test_name = 'nr_regions test with %d/%d/%d real/min/max nr_regions' % (
+             real_nr_regions, min_nr_regions, max_nr_regions)
++    collected_nr_regions.sort()
+     if (collected_nr_regions[0] < min_nr_regions or
+         collected_nr_regions[-1] > max_nr_regions):
+         print('fail %s' % test_name)
+@@ -109,6 +110,7 @@ def main():
+     attrs = kdamonds.kdamonds[0].contexts[0].monitoring_attrs
+     attrs.min_nr_regions = 3
+     attrs.max_nr_regions = 7
++    attrs.update_us = 100000
+     err = kdamonds.kdamonds[0].commit()
+     if err is not None:
+         proc.terminate()
+diff --git a/tools/testing/selftests/damon/damos_quota.py b/tools/testing/selftests/damon/damos_quota.py
+index 7d4c6bb2e3cd27..57c4937aaed285 100755
+--- a/tools/testing/selftests/damon/damos_quota.py
++++ b/tools/testing/selftests/damon/damos_quota.py
+@@ -51,16 +51,19 @@ def main():
+         nr_quota_exceeds = scheme.stats.qt_exceeds
+ 
+     wss_collected.sort()
++    nr_expected_quota_exceeds = 0
+     for wss in wss_collected:
+         if wss > sz_quota:
+             print('quota is not kept: %s > %s' % (wss, sz_quota))
+             print('collected samples are as below')
+             print('\n'.join(['%d' % wss for wss in wss_collected]))
+             exit(1)
++        if wss == sz_quota:
++            nr_expected_quota_exceeds += 1
+ 
+-    if nr_quota_exceeds < len(wss_collected):
+-        print('quota is not always exceeded: %d > %d' %
+-              (len(wss_collected), nr_quota_exceeds))
++    if nr_quota_exceeds < nr_expected_quota_exceeds:
++        print('quota is exceeded less than expected: %d < %d' %
++              (nr_quota_exceeds, nr_expected_quota_exceeds))
+         exit(1)
+ 
+ if __name__ == '__main__':
+diff --git a/tools/testing/selftests/damon/damos_quota_goal.py b/tools/testing/selftests/damon/damos_quota_goal.py
+index 18246f3b62f7ee..f76e0412b564cb 100755
+--- a/tools/testing/selftests/damon/damos_quota_goal.py
++++ b/tools/testing/selftests/damon/damos_quota_goal.py
+@@ -63,6 +63,9 @@ def main():
+             if last_effective_bytes != 0 else -1.0))
+ 
+         if last_effective_bytes == goal.effective_bytes:
++            # effective quota was already minimum that cannot be more reduced
++            if expect_increase is False and last_effective_bytes == 1:
++                continue
+             print('efective bytes not changed: %d' % goal.effective_bytes)
+             exit(1)
+ 
+diff --git a/tools/testing/selftests/mm/hugepage-mremap.c b/tools/testing/selftests/mm/hugepage-mremap.c
+index ada9156cc497b3..c463d1c09c9b4a 100644
+--- a/tools/testing/selftests/mm/hugepage-mremap.c
++++ b/tools/testing/selftests/mm/hugepage-mremap.c
+@@ -15,7 +15,7 @@
+ #define _GNU_SOURCE
+ #include <stdlib.h>
+ #include <stdio.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <sys/mman.h>
+ #include <errno.h>
+ #include <fcntl.h> /* Definition of O_* constants */
+diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
+index 66b4e111b5a273..b61803e36d1cf5 100644
+--- a/tools/testing/selftests/mm/ksm_functional_tests.c
++++ b/tools/testing/selftests/mm/ksm_functional_tests.c
+@@ -11,7 +11,7 @@
+ #include <string.h>
+ #include <stdbool.h>
+ #include <stdint.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <errno.h>
+ #include <fcntl.h>
+ #include <sys/mman.h>
+@@ -369,6 +369,7 @@ static void test_unmerge_discarded(void)
+ 	munmap(map, size);
+ }
+ 
++#ifdef __NR_userfaultfd
+ static void test_unmerge_uffd_wp(void)
+ {
+ 	struct uffdio_writeprotect uffd_writeprotect;
+@@ -429,6 +430,7 @@ static void test_unmerge_uffd_wp(void)
+ unmap:
+ 	munmap(map, size);
+ }
++#endif
+ 
+ /* Verify that KSM can be enabled / queried with prctl. */
+ static void test_prctl(void)
+@@ -684,7 +686,9 @@ int main(int argc, char **argv)
+ 		exit(test_child_ksm());
+ 	}
+ 
++#ifdef __NR_userfaultfd
+ 	tests++;
++#endif
+ 
+ 	ksft_print_header();
+ 	ksft_set_plan(tests);
+@@ -696,7 +700,9 @@ int main(int argc, char **argv)
+ 	test_unmerge();
+ 	test_unmerge_zero_pages();
+ 	test_unmerge_discarded();
++#ifdef __NR_userfaultfd
+ 	test_unmerge_uffd_wp();
++#endif
+ 
+ 	test_prot_none();
+ 
+diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c
+index 74c911aa3aea9f..9a0597310a7651 100644
+--- a/tools/testing/selftests/mm/memfd_secret.c
++++ b/tools/testing/selftests/mm/memfd_secret.c
+@@ -17,7 +17,7 @@
+ 
+ #include <stdlib.h>
+ #include <string.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <errno.h>
+ #include <stdio.h>
+ #include <fcntl.h>
+@@ -28,6 +28,8 @@
+ #define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__)
+ #define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__)
+ 
++#ifdef __NR_memfd_secret
++
+ #define PATTERN	0x55
+ 
+ static const int prot = PROT_READ | PROT_WRITE;
+@@ -332,3 +334,13 @@ int main(int argc, char *argv[])
+ 
+ 	ksft_finished();
+ }
++
++#else /* __NR_memfd_secret */
++
++int main(int argc, char *argv[])
++{
++	printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n");
++	return KSFT_SKIP;
++}
++
++#endif /* __NR_memfd_secret */
+diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c
+index 1db134063c38c0..b8a7efe9204ea1 100644
+--- a/tools/testing/selftests/mm/mkdirty.c
++++ b/tools/testing/selftests/mm/mkdirty.c
+@@ -9,7 +9,7 @@
+  */
+ #include <fcntl.h>
+ #include <signal.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <string.h>
+ #include <errno.h>
+ #include <stdlib.h>
+@@ -265,6 +265,7 @@ static void test_pte_mapped_thp(void)
+ 	munmap(mmap_mem, mmap_size);
+ }
+ 
++#ifdef __NR_userfaultfd
+ static void test_uffdio_copy(void)
+ {
+ 	struct uffdio_register uffdio_register;
+@@ -321,6 +322,7 @@ static void test_uffdio_copy(void)
+ 	munmap(dst, pagesize);
+ 	free(src);
+ }
++#endif /* __NR_userfaultfd */
+ 
+ int main(void)
+ {
+@@ -333,7 +335,9 @@ int main(void)
+ 			       thpsize / 1024);
+ 		tests += 3;
+ 	}
++#ifdef __NR_userfaultfd
+ 	tests += 1;
++#endif /* __NR_userfaultfd */
+ 
+ 	ksft_print_header();
+ 	ksft_set_plan(tests);
+@@ -363,7 +367,9 @@ int main(void)
+ 	if (thpsize)
+ 		test_pte_mapped_thp();
+ 	/* Placing a fresh page via userfaultfd may set the PTE dirty. */
++#ifdef __NR_userfaultfd
+ 	test_uffdio_copy();
++#endif /* __NR_userfaultfd */
+ 
+ 	err = ksft_get_fail_cnt();
+ 	if (err)
+diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h
+index 1e5731bab499a3..4417eaa5cfb78b 100644
+--- a/tools/testing/selftests/mm/mlock2.h
++++ b/tools/testing/selftests/mm/mlock2.h
+@@ -3,7 +3,6 @@
+ #include <errno.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+-#include <asm-generic/unistd.h>
+ 
+ static int mlock2_(void *start, size_t len, int flags)
+ {
+diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
+index 4990f7ab4cb729..4fcecfb7b189bb 100644
+--- a/tools/testing/selftests/mm/protection_keys.c
++++ b/tools/testing/selftests/mm/protection_keys.c
+@@ -42,7 +42,7 @@
+ #include <sys/wait.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <sys/ptrace.h>
+ #include <setjmp.h>
+ 
+diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
+index 717539eddf9875..7ad6ba660c7d6f 100644
+--- a/tools/testing/selftests/mm/uffd-common.c
++++ b/tools/testing/selftests/mm/uffd-common.c
+@@ -673,7 +673,11 @@ int uffd_open_dev(unsigned int flags)
+ 
+ int uffd_open_sys(unsigned int flags)
+ {
++#ifdef __NR_userfaultfd
+ 	return syscall(__NR_userfaultfd, flags);
++#else
++	return -1;
++#endif
+ }
+ 
+ int uffd_open(unsigned int flags)
+diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
+index a4b83280998ab7..944d559ade21f2 100644
+--- a/tools/testing/selftests/mm/uffd-stress.c
++++ b/tools/testing/selftests/mm/uffd-stress.c
+@@ -33,10 +33,11 @@
+  * pthread_mutex_lock will also verify the atomicity of the memory
+  * transfer (UFFDIO_COPY).
+  */
+-#include <asm-generic/unistd.h>
++
+ #include "uffd-common.h"
+ 
+ uint64_t features;
++#ifdef __NR_userfaultfd
+ 
+ #define BOUNCE_RANDOM		(1<<0)
+ #define BOUNCE_RACINGFAULTS	(1<<1)
+@@ -471,3 +472,15 @@ int main(int argc, char **argv)
+ 	       nr_pages, nr_pages_per_cpu);
+ 	return userfaultfd_stress();
+ }
++
++#else /* __NR_userfaultfd */
++
++#warning "missing __NR_userfaultfd definition"
++
++int main(void)
++{
++	printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
++	return KSFT_SKIP;
++}
++
++#endif /* __NR_userfaultfd */
+diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
+index a2e71b1636e7ca..3ddbb0a71b9c12 100644
+--- a/tools/testing/selftests/mm/uffd-unit-tests.c
++++ b/tools/testing/selftests/mm/uffd-unit-tests.c
+@@ -5,11 +5,12 @@
+  *  Copyright (C) 2015-2023  Red Hat, Inc.
+  */
+ 
+-#include <asm-generic/unistd.h>
+ #include "uffd-common.h"
+ 
+ #include "../../../../mm/gup_test.h"
+ 
++#ifdef __NR_userfaultfd
++
+ /* The unit test doesn't need a large or random size, make it 32MB for now */
+ #define  UFFD_TEST_MEM_SIZE               (32UL << 20)
+ 
+@@ -1558,3 +1559,14 @@ int main(int argc, char *argv[])
+ 	return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS;
+ }
+ 
++#else /* __NR_userfaultfd */
++
++#warning "missing __NR_userfaultfd definition"
++
++int main(void)
++{
++	printf("Skipping %s (missing __NR_userfaultfd)\n", __file__);
++	return KSFT_SKIP;
++}
++
++#endif /* __NR_userfaultfd */
+diff --git a/usr/include/Makefile b/usr/include/Makefile
+index 771e32872b2ab1..58173cfe5ff179 100644
+--- a/usr/include/Makefile
++++ b/usr/include/Makefile
+@@ -10,7 +10,7 @@ UAPI_CFLAGS := -std=c90 -Wall -Werror=implicit-function-declaration
+ 
+ # In theory, we do not care -m32 or -m64 for header compile tests.
+ # It is here just because CONFIG_CC_CAN_LINK is tested with -m32 or -m64.
+-UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
++UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
+ 
+ # USERCFLAGS might contain sysroot location for CC.
+ UAPI_CFLAGS += $(USERCFLAGS)


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-03-07 18:22 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-03-07 18:22 UTC (permalink / raw
  To: gentoo-commits

commit:     d5a9b4d7acad17d938141574f2e0bd1e9d28bbf8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  7 18:22:16 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  7 18:22:16 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d5a9b4d7

Linux patch 6.12.18

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1017_linux-6.12.18.patch | 8470 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8474 insertions(+)

diff --git a/0000_README b/0000_README
index 8efc8938..85e743e9 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch:  1016_linux-6.12.17.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.17
 
+Patch:  1017_linux-6.12.18.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.18
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1017_linux-6.12.18.patch b/1017_linux-6.12.18.patch
new file mode 100644
index 00000000..75258348
--- /dev/null
+++ b/1017_linux-6.12.18.patch
@@ -0,0 +1,8470 @@
+diff --git a/Makefile b/Makefile
+index e8b8c5b3840505..17dfe0a8ca8fa9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index c315bc1a4e9adf..1bf70fa1045dcd 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -1243,7 +1243,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
+ extern unsigned int __ro_after_init kvm_arm_vmid_bits;
+ int __init kvm_arm_vmid_alloc_init(void);
+ void __init kvm_arm_vmid_alloc_free(void);
+-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
++void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+ void kvm_arm_vmid_clear_active(void);
+ 
+ static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 117702f033218d..3cf65daa75a51f 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -580,6 +580,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	mmu = vcpu->arch.hw_mmu;
+ 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
+ 
++	/*
++	 * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
++	 * which happens eagerly in VHE.
++	 *
++	 * Also, the VMID allocator only preserves VMIDs that are active at the
++	 * time of rollover, so KVM might need to grab a new VMID for the MMU if
++	 * this is called from kvm_sched_in().
++	 */
++	kvm_arm_vmid_update(&mmu->vmid);
++
+ 	/*
+ 	 * We guarantee that both TLBs and I-cache are private to each
+ 	 * vcpu. If detecting that a vcpu from the same VM has
+@@ -1155,18 +1165,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 		 */
+ 		preempt_disable();
+ 
+-		/*
+-		 * The VMID allocator only tracks active VMIDs per
+-		 * physical CPU, and therefore the VMID allocated may not be
+-		 * preserved on VMID roll-over if the task was preempted,
+-		 * making a thread's VMID inactive. So we need to call
+-		 * kvm_arm_vmid_update() in non-premptible context.
+-		 */
+-		if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
+-		    has_vhe())
+-			__load_stage2(vcpu->arch.hw_mmu,
+-				      vcpu->arch.hw_mmu->arch);
+-
+ 		kvm_pmu_flush_hwstate(vcpu);
+ 
+ 		local_irq_disable();
+diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c
+index 806223b7022afd..7fe8ba1a2851c5 100644
+--- a/arch/arm64/kvm/vmid.c
++++ b/arch/arm64/kvm/vmid.c
+@@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
+ 	atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
+ }
+ 
+-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
++void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
+ {
+ 	unsigned long flags;
+ 	u64 vmid, old_active_vmid;
+-	bool updated = false;
+ 
+ 	vmid = atomic64_read(&kvm_vmid->id);
+ 
+@@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
+ 	if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
+ 	    0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
+ 					  old_active_vmid, vmid))
+-		return false;
++		return;
+ 
+ 	raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
+ 
+ 	/* Check that our VMID belongs to the current generation. */
+ 	vmid = atomic64_read(&kvm_vmid->id);
+-	if (!vmid_gen_match(vmid)) {
++	if (!vmid_gen_match(vmid))
+ 		vmid = new_vmid(kvm_vmid);
+-		updated = true;
+-	}
+ 
+ 	atomic64_set(this_cpu_ptr(&active_vmids), vmid);
+ 	raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
+-
+-	return updated;
+ }
+ 
+ /*
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index ea71ef2e343c2c..93ba66de160ce4 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -278,12 +278,7 @@ void __init arm64_memblock_init(void)
+ 
+ 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ 		extern u16 memstart_offset_seed;
+-
+-		/*
+-		 * Use the sanitised version of id_aa64mmfr0_el1 so that linear
+-		 * map randomization can be enabled by shrinking the IPA space.
+-		 */
+-		u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
++		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+ 		int parange = cpuid_feature_extract_unsigned_field(
+ 					mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+ 		s64 range = linear_region_size -
+diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
+index fc8130f995c1ee..6907c456ac8c05 100644
+--- a/arch/riscv/include/asm/futex.h
++++ b/arch/riscv/include/asm/futex.h
+@@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ 		_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r])	\
+ 		_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r])	\
+ 	: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
+-	: [ov] "Jr" (oldval), [nv] "Jr" (newval)
++	: [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
+ 	: "memory");
+ 	__disable_user_access();
+ 
+diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
+index 2d40736fc37cec..26b085dbdd073f 100644
+--- a/arch/riscv/kernel/cacheinfo.c
++++ b/arch/riscv/kernel/cacheinfo.c
+@@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu)
+ 	if (!np)
+ 		return -ENOENT;
+ 
+-	if (of_property_read_bool(np, "cache-size"))
++	if (of_property_present(np, "cache-size"))
+ 		ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
+-	if (of_property_read_bool(np, "i-cache-size"))
++	if (of_property_present(np, "i-cache-size"))
+ 		ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+-	if (of_property_read_bool(np, "d-cache-size"))
++	if (of_property_present(np, "d-cache-size"))
+ 		ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ 
+ 	prev = np;
+@@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu)
+ 			break;
+ 		if (level <= levels)
+ 			break;
+-		if (of_property_read_bool(np, "cache-size"))
++		if (of_property_present(np, "cache-size"))
+ 			ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
+-		if (of_property_read_bool(np, "i-cache-size"))
++		if (of_property_present(np, "i-cache-size"))
+ 			ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+-		if (of_property_read_bool(np, "d-cache-size"))
++		if (of_property_present(np, "d-cache-size"))
+ 			ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ 		levels = level;
+ 	}
+diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
+index 3a8eeaa9310c32..308430af3e8f83 100644
+--- a/arch/riscv/kernel/cpufeature.c
++++ b/arch/riscv/kernel/cpufeature.c
+@@ -454,7 +454,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
+ 			if (bit < RISCV_ISA_EXT_BASE)
+ 				*this_hwcap |= isa2hwcap[bit];
+ 		}
+-	} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
++	} while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX));
+ }
+ 
+ static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap)
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index 2b3c152d3c91f5..7934613a98c883 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -288,8 +288,8 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	riscv_init_cbo_blocksizes();
+ 	riscv_fill_hwcap();
+-	init_rt_signal_env();
+ 	apply_boot_alternatives();
++	init_rt_signal_env();
+ 
+ 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
+ 	    riscv_isa_extension_available(NULL, ZICBOM))
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index dcd28241945613..c3c517b9eee554 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all)
+ 		if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
+ 			total_context_size += riscv_v_sc_size;
+ 	}
+-	/*
+-	 * Preserved a __riscv_ctx_hdr for END signal context header if an
+-	 * extension uses __riscv_extra_ext_header
+-	 */
+-	if (total_context_size)
+-		total_context_size += sizeof(struct __riscv_ctx_hdr);
+ 
+ 	frame_size += total_context_size;
+ 
+diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
+index dce667f4b6ab08..3070bb31745de7 100644
+--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
++++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
+@@ -9,6 +9,7 @@
+ #include <linux/errno.h>
+ #include <linux/err.h>
+ #include <linux/kvm_host.h>
++#include <linux/wordpart.h>
+ #include <asm/sbi.h>
+ #include <asm/kvm_vcpu_sbi.h>
+ 
+@@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
+ 	target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+ 	if (!target_vcpu)
+ 		return SBI_ERR_INVALID_PARAM;
+-	if (!kvm_riscv_vcpu_stopped(target_vcpu))
+-		return SBI_HSM_STATE_STARTED;
+-	else if (vcpu->stat.generic.blocking)
++	if (kvm_riscv_vcpu_stopped(target_vcpu))
++		return SBI_HSM_STATE_STOPPED;
++	else if (target_vcpu->stat.generic.blocking)
+ 		return SBI_HSM_STATE_SUSPENDED;
+ 	else
+-		return SBI_HSM_STATE_STOPPED;
++		return SBI_HSM_STATE_STARTED;
+ }
+ 
+ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+@@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 		}
+ 		return 0;
+ 	case SBI_EXT_HSM_HART_SUSPEND:
+-		switch (cp->a0) {
++		switch (lower_32_bits(cp->a0)) {
+ 		case SBI_HSM_SUSPEND_RET_DEFAULT:
+ 			kvm_riscv_vcpu_wfi(vcpu);
+ 			break;
+diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
+index 9c2ab3dfa93aa5..5fbf3f94f1e855 100644
+--- a/arch/riscv/kvm/vcpu_sbi_replace.c
++++ b/arch/riscv/kvm/vcpu_sbi_replace.c
+@@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 	u64 next_cycle;
+ 
+ 	if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
+-		retdata->err_val = SBI_ERR_INVALID_PARAM;
++		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ 		return 0;
+ 	}
+ 
+@@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ 	unsigned long hmask = cp->a0;
+ 	unsigned long hbase = cp->a1;
++	unsigned long hart_bit = 0, sentmask = 0;
+ 
+ 	if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
+-		retdata->err_val = SBI_ERR_INVALID_PARAM;
++		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ 		return 0;
+ 	}
+ 
+@@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 		if (hbase != -1UL) {
+ 			if (tmp->vcpu_id < hbase)
+ 				continue;
+-			if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
++			hart_bit = tmp->vcpu_id - hbase;
++			if (hart_bit >= __riscv_xlen)
++				goto done;
++			if (!(hmask & (1UL << hart_bit)))
+ 				continue;
+ 		}
+ 		ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
+ 		if (ret < 0)
+ 			break;
++		sentmask |= 1UL << hart_bit;
+ 		kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
+ 	}
+ 
++done:
++	if (hbase != -1UL && (hmask ^ sentmask))
++		retdata->err_val = SBI_ERR_INVALID_PARAM;
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 1b0c2397d65753..6f8e9af827e0c9 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1333,6 +1333,7 @@ config X86_REBOOTFIXUPS
+ config MICROCODE
+ 	def_bool y
+ 	depends on CPU_SUP_AMD || CPU_SUP_INTEL
++	select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
+ 
+ config MICROCODE_INITRD32
+ 	def_bool y
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 65ab6460aed4d7..0d33c85da45355 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
+ 	if (event->attr.type == event->pmu->type)
+ 		event->hw.config |= x86_pmu_get_event_config(event);
+ 
+-	if (event->attr.sample_period && x86_pmu.limit_period) {
++	if (!event->attr.freq && x86_pmu.limit_period) {
+ 		s64 left = event->attr.sample_period;
+ 		x86_pmu.limit_period(event, &left);
+ 		if (left > event->attr.sample_period)
+diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
+index 9651275aecd1bb..dfec2c61e3547d 100644
+--- a/arch/x86/kernel/cpu/cyrix.c
++++ b/arch/x86/kernel/cpu/cyrix.c
+@@ -153,8 +153,8 @@ static void geode_configure(void)
+ 	u8 ccr3;
+ 	local_irq_save(flags);
+ 
+-	/* Suspend on halt power saving and enable #SUSP pin */
+-	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
++	/* Suspend on halt power saving */
++	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
+ 
+ 	ccr3 = getCx86(CX86_CCR3);
+ 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index fb5d0c67fbab17..f5365b32582a5c 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -23,14 +23,18 @@
+ 
+ #include <linux/earlycpio.h>
+ #include <linux/firmware.h>
++#include <linux/bsearch.h>
+ #include <linux/uaccess.h>
+ #include <linux/vmalloc.h>
+ #include <linux/initrd.h>
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ 
++#include <crypto/sha2.h>
++
+ #include <asm/microcode.h>
+ #include <asm/processor.h>
++#include <asm/cmdline.h>
+ #include <asm/setup.h>
+ #include <asm/cpu.h>
+ #include <asm/msr.h>
+@@ -145,6 +149,107 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
+  */
+ static u32 bsp_cpuid_1_eax __ro_after_init;
+ 
++static bool sha_check = true;
++
++struct patch_digest {
++	u32 patch_id;
++	u8 sha256[SHA256_DIGEST_SIZE];
++};
++
++#include "amd_shas.c"
++
++static int cmp_id(const void *key, const void *elem)
++{
++	struct patch_digest *pd = (struct patch_digest *)elem;
++	u32 patch_id = *(u32 *)key;
++
++	if (patch_id == pd->patch_id)
++		return 0;
++	else if (patch_id < pd->patch_id)
++		return -1;
++	else
++		return 1;
++}
++
++static bool need_sha_check(u32 cur_rev)
++{
++	switch (cur_rev >> 8) {
++	case 0x80012: return cur_rev <= 0x800126f; break;
++	case 0x83010: return cur_rev <= 0x830107c; break;
++	case 0x86001: return cur_rev <= 0x860010e; break;
++	case 0x86081: return cur_rev <= 0x8608108; break;
++	case 0x87010: return cur_rev <= 0x8701034; break;
++	case 0x8a000: return cur_rev <= 0x8a0000a; break;
++	case 0xa0011: return cur_rev <= 0xa0011da; break;
++	case 0xa0012: return cur_rev <= 0xa001243; break;
++	case 0xa1011: return cur_rev <= 0xa101153; break;
++	case 0xa1012: return cur_rev <= 0xa10124e; break;
++	case 0xa1081: return cur_rev <= 0xa108109; break;
++	case 0xa2010: return cur_rev <= 0xa20102f; break;
++	case 0xa2012: return cur_rev <= 0xa201212; break;
++	case 0xa6012: return cur_rev <= 0xa60120a; break;
++	case 0xa7041: return cur_rev <= 0xa704109; break;
++	case 0xa7052: return cur_rev <= 0xa705208; break;
++	case 0xa7080: return cur_rev <= 0xa708009; break;
++	case 0xa70c0: return cur_rev <= 0xa70C009; break;
++	case 0xaa002: return cur_rev <= 0xaa00218; break;
++	default: break;
++	}
++
++	pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
++	pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
++	return true;
++}
++
++static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
++{
++	struct patch_digest *pd = NULL;
++	u8 digest[SHA256_DIGEST_SIZE];
++	struct sha256_state s;
++	int i;
++
++	if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
++	    x86_family(bsp_cpuid_1_eax) > 0x19)
++		return true;
++
++	if (!need_sha_check(cur_rev))
++		return true;
++
++	if (!sha_check)
++		return true;
++
++	pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
++	if (!pd) {
++		pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
++		return false;
++	}
++
++	sha256_init(&s);
++	sha256_update(&s, data, len);
++	sha256_final(&s, digest);
++
++	if (memcmp(digest, pd->sha256, sizeof(digest))) {
++		pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
++
++		for (i = 0; i < SHA256_DIGEST_SIZE; i++)
++			pr_cont("0x%x ", digest[i]);
++		pr_info("\n");
++
++		return false;
++	}
++
++	return true;
++}
++
++static u32 get_patch_level(void)
++{
++	u32 rev, dummy __always_unused;
++
++	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
++
++	return rev;
++}
++
+ static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
+ {
+ 	union zen_patch_rev p;
+@@ -246,8 +351,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
+  * On success, @sh_psize returns the patch size according to the section header,
+  * to the caller.
+  */
+-static bool
+-__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
++static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
+ {
+ 	u32 p_type, p_size;
+ 	const u32 *hdr;
+@@ -484,10 +588,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
+ 	}
+ }
+ 
+-static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
++static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
++				  unsigned int psize)
+ {
+ 	unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
+-	u32 rev, dummy;
++
++	if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
++		return -1;
+ 
+ 	native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
+ 
+@@ -505,47 +612,13 @@ static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
+ 	}
+ 
+ 	/* verify patch application was successful */
+-	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+-
+-	if (rev != mc->hdr.patch_id)
+-		return -1;
++	*cur_rev = get_patch_level();
++	if (*cur_rev != mc->hdr.patch_id)
++		return false;
+ 
+-	return 0;
++	return true;
+ }
+ 
+-/*
+- * Early load occurs before we can vmalloc(). So we look for the microcode
+- * patch container file in initrd, traverse equivalent cpu table, look for a
+- * matching microcode patch, and update, all in initrd memory in place.
+- * When vmalloc() is available for use later -- on 64-bit during first AP load,
+- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
+- * load_microcode_amd() to save equivalent cpu table and microcode patches in
+- * kernel heap memory.
+- *
+- * Returns true if container found (sets @desc), false otherwise.
+- */
+-static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
+-{
+-	struct cont_desc desc = { 0 };
+-	struct microcode_amd *mc;
+-	bool ret = false;
+-
+-	scan_containers(ucode, size, &desc);
+-
+-	mc = desc.mc;
+-	if (!mc)
+-		return ret;
+-
+-	/*
+-	 * Allow application of the same revision to pick up SMT-specific
+-	 * changes even if the revision of the other SMT thread is already
+-	 * up-to-date.
+-	 */
+-	if (old_rev > mc->hdr.patch_id)
+-		return ret;
+-
+-	return !__apply_microcode_amd(mc, desc.psize);
+-}
+ 
+ static bool get_builtin_microcode(struct cpio_data *cp)
+ {
+@@ -569,64 +642,74 @@ static bool get_builtin_microcode(struct cpio_data *cp)
+ 	return false;
+ }
+ 
+-static void __init find_blobs_in_containers(struct cpio_data *ret)
++static bool __init find_blobs_in_containers(struct cpio_data *ret)
+ {
+ 	struct cpio_data cp;
++	bool found;
+ 
+ 	if (!get_builtin_microcode(&cp))
+ 		cp = find_microcode_in_initrd(ucode_path);
+ 
+-	*ret = cp;
++	found = cp.data && cp.size;
++	if (found)
++		*ret = cp;
++
++	return found;
+ }
+ 
++/*
++ * Early load occurs before we can vmalloc(). So we look for the microcode
++ * patch container file in initrd, traverse equivalent cpu table, look for a
++ * matching microcode patch, and update, all in initrd memory in place.
++ * When vmalloc() is available for use later -- on 64-bit during first AP load,
++ * and on 32-bit during save_microcode_in_initrd() -- we can call
++ * load_microcode_amd() to save equivalent cpu table and microcode patches in
++ * kernel heap memory.
++ */
+ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
+ {
++	struct cont_desc desc = { };
++	struct microcode_amd *mc;
+ 	struct cpio_data cp = { };
+-	u32 dummy;
++	char buf[4];
++	u32 rev;
++
++	if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
++		if (!strncmp(buf, "off", 3)) {
++			sha_check = false;
++			pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
++			add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
++		}
++	}
+ 
+ 	bsp_cpuid_1_eax = cpuid_1_eax;
+ 
+-	native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
++	rev = get_patch_level();
++	ed->old_rev = rev;
+ 
+ 	/* Needed in load_microcode_amd() */
+ 	ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
+ 
+-	find_blobs_in_containers(&cp);
+-	if (!(cp.data && cp.size))
++	if (!find_blobs_in_containers(&cp))
+ 		return;
+ 
+-	if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
+-		native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
+-}
+-
+-static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
+-
+-static int __init save_microcode_in_initrd(void)
+-{
+-	unsigned int cpuid_1_eax = native_cpuid_eax(1);
+-	struct cpuinfo_x86 *c = &boot_cpu_data;
+-	struct cont_desc desc = { 0 };
+-	enum ucode_state ret;
+-	struct cpio_data cp;
+-
+-	if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+-		return 0;
+-
+-	find_blobs_in_containers(&cp);
+-	if (!(cp.data && cp.size))
+-		return -EINVAL;
+-
+ 	scan_containers(cp.data, cp.size, &desc);
+-	if (!desc.mc)
+-		return -EINVAL;
+ 
+-	ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
+-	if (ret > UCODE_UPDATED)
+-		return -EINVAL;
++	mc = desc.mc;
++	if (!mc)
++		return;
+ 
+-	return 0;
++	/*
++	 * Allow application of the same revision to pick up SMT-specific
++	 * changes even if the revision of the other SMT thread is already
++	 * up-to-date.
++	 */
++	if (ed->old_rev > mc->hdr.patch_id)
++		return;
++
++	if (__apply_microcode_amd(mc, &rev, desc.psize))
++		ed->new_rev = rev;
+ }
+-early_initcall(save_microcode_in_initrd);
+ 
+ static inline bool patch_cpus_equivalent(struct ucode_patch *p,
+ 					 struct ucode_patch *n,
+@@ -727,14 +810,9 @@ static void free_cache(void)
+ static struct ucode_patch *find_patch(unsigned int cpu)
+ {
+ 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+-	u32 rev, dummy __always_unused;
+ 	u16 equiv_id = 0;
+ 
+-	/* fetch rev if not populated yet: */
+-	if (!uci->cpu_sig.rev) {
+-		rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+-		uci->cpu_sig.rev = rev;
+-	}
++	uci->cpu_sig.rev = get_patch_level();
+ 
+ 	if (x86_family(bsp_cpuid_1_eax) < 0x17) {
+ 		equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
+@@ -757,22 +835,20 @@ void reload_ucode_amd(unsigned int cpu)
+ 
+ 	mc = p->data;
+ 
+-	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+-
++	rev = get_patch_level();
+ 	if (rev < mc->hdr.patch_id) {
+-		if (!__apply_microcode_amd(mc, p->size))
+-			pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
++		if (__apply_microcode_amd(mc, &rev, p->size))
++			pr_info_once("reload revision: 0x%08x\n", rev);
+ 	}
+ }
+ 
+ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
+ {
+-	struct cpuinfo_x86 *c = &cpu_data(cpu);
+ 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ 	struct ucode_patch *p;
+ 
+ 	csig->sig = cpuid_eax(0x00000001);
+-	csig->rev = c->microcode;
++	csig->rev = get_patch_level();
+ 
+ 	/*
+ 	 * a patch could have been loaded early, set uci->mc so that
+@@ -813,7 +889,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
+ 		goto out;
+ 	}
+ 
+-	if (__apply_microcode_amd(mc_amd, p->size)) {
++	if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
+ 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
+ 			cpu, mc_amd->hdr.patch_id);
+ 		return UCODE_ERROR;
+@@ -935,8 +1011,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
+ }
+ 
+ /* Scan the blob in @data and add microcode patches to the cache. */
+-static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+-					     size_t size)
++static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
+ {
+ 	u8 *fw = (u8 *)data;
+ 	size_t offset;
+@@ -1011,6 +1086,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
+ 	return ret;
+ }
+ 
++static int __init save_microcode_in_initrd(void)
++{
++	unsigned int cpuid_1_eax = native_cpuid_eax(1);
++	struct cpuinfo_x86 *c = &boot_cpu_data;
++	struct cont_desc desc = { 0 };
++	enum ucode_state ret;
++	struct cpio_data cp;
++
++	if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
++		return 0;
++
++	if (!find_blobs_in_containers(&cp))
++		return -EINVAL;
++
++	scan_containers(cp.data, cp.size, &desc);
++	if (!desc.mc)
++		return -EINVAL;
++
++	ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
++	if (ret > UCODE_UPDATED)
++		return -EINVAL;
++
++	return 0;
++}
++early_initcall(save_microcode_in_initrd);
++
+ /*
+  * AMD microcode firmware naming convention, up to family 15h they are in
+  * the legacy file:
+diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c
+new file mode 100644
+index 00000000000000..2a1655b1fdd883
+--- /dev/null
++++ b/arch/x86/kernel/cpu/microcode/amd_shas.c
+@@ -0,0 +1,444 @@
++/* Keep 'em sorted. */
++static const struct patch_digest phashes[] = {
++ { 0x8001227, {
++		0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
++		0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
++		0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
++		0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
++	}
++ },
++ { 0x8001250, {
++		0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
++		0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
++		0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
++		0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
++	}
++ },
++ { 0x800126e, {
++		0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
++		0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
++		0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
++		0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
++	}
++ },
++ { 0x800126f, {
++		0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
++		0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
++		0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
++		0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
++	}
++ },
++ { 0x800820d, {
++		0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
++		0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
++		0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
++		0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
++	}
++ },
++ { 0x8301025, {
++		0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
++		0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
++		0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
++		0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
++	}
++ },
++ { 0x8301055, {
++		0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
++		0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
++		0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
++		0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
++	}
++ },
++ { 0x8301072, {
++		0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
++		0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
++		0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
++		0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
++	}
++ },
++ { 0x830107a, {
++		0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
++		0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
++		0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
++		0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
++	}
++ },
++ { 0x830107b, {
++		0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
++		0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
++		0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
++		0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
++	}
++ },
++ { 0x830107c, {
++		0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
++		0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
++		0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
++		0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
++	}
++ },
++ { 0x860010d, {
++		0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
++		0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
++		0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
++		0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
++	}
++ },
++ { 0x8608108, {
++		0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
++		0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
++		0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
++		0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
++	}
++ },
++ { 0x8701034, {
++		0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
++		0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
++		0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
++		0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
++	}
++ },
++ { 0x8a00008, {
++		0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
++		0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
++		0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
++		0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
++	}
++ },
++ { 0x8a0000a, {
++		0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
++		0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
++		0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
++		0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
++	}
++ },
++ { 0xa00104c, {
++		0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
++		0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
++		0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
++		0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
++	}
++ },
++ { 0xa00104e, {
++		0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
++		0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
++		0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
++		0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
++	}
++ },
++ { 0xa001053, {
++		0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
++		0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
++		0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
++		0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
++	}
++ },
++ { 0xa001058, {
++		0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
++		0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
++		0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
++		0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
++	}
++ },
++ { 0xa001075, {
++		0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
++		0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
++		0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
++		0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
++	}
++ },
++ { 0xa001078, {
++		0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
++		0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
++		0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
++		0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
++	}
++ },
++ { 0xa001079, {
++		0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
++		0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
++		0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
++		0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
++	}
++ },
++ { 0xa00107a, {
++		0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
++		0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
++		0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
++		0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
++	}
++ },
++ { 0xa001143, {
++		0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
++		0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
++		0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
++		0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
++	}
++ },
++ { 0xa001144, {
++		0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
++		0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
++		0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
++		0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
++	}
++ },
++ { 0xa00115d, {
++		0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
++		0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
++		0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
++		0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
++	}
++ },
++ { 0xa001173, {
++		0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
++		0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
++		0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
++		0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
++	}
++ },
++ { 0xa0011a8, {
++		0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
++		0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
++		0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
++		0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
++	}
++ },
++ { 0xa0011ce, {
++		0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
++		0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
++		0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
++		0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
++	}
++ },
++ { 0xa0011d1, {
++		0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
++		0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
++		0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
++		0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
++	}
++ },
++ { 0xa0011d3, {
++		0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
++		0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
++		0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
++		0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
++	}
++ },
++ { 0xa0011d5, {
++		0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
++		0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
++		0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
++		0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
++	}
++ },
++ { 0xa001223, {
++		0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
++		0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
++		0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
++		0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
++	}
++ },
++ { 0xa001224, {
++		0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
++		0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
++		0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
++		0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
++	}
++ },
++ { 0xa001227, {
++		0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
++		0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
++		0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
++		0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
++	}
++ },
++ { 0xa001229, {
++		0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
++		0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
++		0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
++		0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
++	}
++ },
++ { 0xa00122e, {
++		0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
++		0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
++		0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
++		0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
++	}
++ },
++ { 0xa001231, {
++		0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
++		0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
++		0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
++		0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
++	}
++ },
++ { 0xa001234, {
++		0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
++		0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
++		0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
++		0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
++	}
++ },
++ { 0xa001236, {
++		0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
++		0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
++		0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
++		0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
++	}
++ },
++ { 0xa001238, {
++		0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
++		0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
++		0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
++		0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
++	}
++ },
++ { 0xa00820c, {
++		0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
++		0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
++		0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
++		0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
++	}
++ },
++ { 0xa10113e, {
++		0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
++		0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
++		0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
++		0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
++	}
++ },
++ { 0xa101144, {
++		0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
++		0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
++		0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
++		0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
++	}
++ },
++ { 0xa101148, {
++		0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
++		0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
++		0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
++		0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
++	}
++ },
++ { 0xa10123e, {
++		0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
++		0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
++		0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
++		0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
++	}
++ },
++ { 0xa101244, {
++		0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
++		0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
++		0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
++		0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
++	}
++ },
++ { 0xa101248, {
++		0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
++		0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
++		0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
++		0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
++	}
++ },
++ { 0xa108108, {
++		0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
++		0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
++		0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
++		0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
++	}
++ },
++ { 0xa20102d, {
++		0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
++		0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
++		0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
++		0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
++	}
++ },
++ { 0xa201210, {
++		0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
++		0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
++		0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
++		0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
++	}
++ },
++ { 0xa404107, {
++		0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
++		0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
++		0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
++		0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
++	}
++ },
++ { 0xa500011, {
++		0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
++		0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
++		0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
++		0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
++	}
++ },
++ { 0xa601209, {
++		0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
++		0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
++		0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
++		0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
++	}
++ },
++ { 0xa704107, {
++		0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
++		0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
++		0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
++		0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
++	}
++ },
++ { 0xa705206, {
++		0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
++		0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
++		0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
++		0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
++	}
++ },
++ { 0xa708007, {
++		0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
++		0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
++		0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
++		0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
++	}
++ },
++ { 0xa70c005, {
++		0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
++		0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
++		0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
++		0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
++	}
++ },
++ { 0xaa00116, {
++		0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
++		0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
++		0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
++		0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
++	}
++ },
++ { 0xaa00212, {
++		0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
++		0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
++		0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
++		0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
++	}
++ },
++ { 0xaa00213, {
++		0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
++		0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
++		0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
++		0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
++	}
++ },
++ { 0xaa00215, {
++		0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
++		0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
++		0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
++		0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
++	}
++ },
++};
+diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
+index 21776c529fa97a..5df621752fefac 100644
+--- a/arch/x86/kernel/cpu/microcode/internal.h
++++ b/arch/x86/kernel/cpu/microcode/internal.h
+@@ -100,14 +100,12 @@ extern bool force_minrev;
+ #ifdef CONFIG_CPU_SUP_AMD
+ void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
+ void load_ucode_amd_ap(unsigned int family);
+-int save_microcode_in_initrd_amd(unsigned int family);
+ void reload_ucode_amd(unsigned int cpu);
+ struct microcode_ops *init_amd_microcode(void);
+ void exit_amd_microcode(void);
+ #else /* CONFIG_CPU_SUP_AMD */
+ static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
+ static inline void load_ucode_amd_ap(unsigned int family) { }
+-static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
+ static inline void reload_ucode_amd(unsigned int cpu) { }
+ static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
+ static inline void exit_amd_microcode(void) { }
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 767bcbce74facb..c11db5be253248 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -427,13 +427,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk,
+ 		}
+ 	}
+ 	hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
++	atomic_inc(&disk->nr_zone_wplugs);
+ 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+ 
+ 	return true;
+ }
+ 
+-static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
+-						  sector_t sector)
++static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
++							 sector_t sector)
+ {
+ 	unsigned int zno = disk_zone_no(disk, sector);
+ 	unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
+@@ -454,6 +455,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
+ 	return NULL;
+ }
+ 
++static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
++							 sector_t sector)
++{
++	if (!atomic_read(&disk->nr_zone_wplugs))
++		return NULL;
++
++	return disk_get_hashed_zone_wplug(disk, sector);
++}
++
+ static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
+ {
+ 	struct blk_zone_wplug *zwplug =
+@@ -518,6 +528,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk,
+ 	zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
+ 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+ 	hlist_del_init_rcu(&zwplug->node);
++	atomic_dec(&disk->nr_zone_wplugs);
+ 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+ 	disk_put_zone_wplug(zwplug);
+ }
+@@ -607,6 +618,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
+ {
+ 	struct bio *bio;
+ 
++	if (bio_list_empty(&zwplug->bio_list))
++		return;
++
++	pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
++			    zwplug->disk->disk_name, zwplug->zone_no);
+ 	while ((bio = bio_list_pop(&zwplug->bio_list)))
+ 		blk_zone_wplug_bio_io_error(zwplug, bio);
+ }
+@@ -1055,6 +1071,47 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
+ 	return true;
+ }
+ 
++static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
++{
++	struct gendisk *disk = bio->bi_bdev->bd_disk;
++	struct blk_zone_wplug *zwplug;
++	unsigned long flags;
++
++	/*
++	 * We have native support for zone append operations, so we are not
++	 * going to handle @bio through plugging. However, we may already have a
++	 * zone write plug for the target zone if that zone was previously
++	 * partially written using regular writes. In such case, we risk leaving
++	 * the plug in the disk hash table if the zone is fully written using
++	 * zone append operations. Avoid this by removing the zone write plug.
++	 */
++	zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
++	if (likely(!zwplug))
++		return;
++
++	spin_lock_irqsave(&zwplug->lock, flags);
++
++	/*
++	 * We are about to remove the zone write plug. But if the user
++	 * (mistakenly) has issued regular writes together with native zone
++	 * append, we must aborts the writes as otherwise the plugged BIOs would
++	 * not be executed by the plug BIO work as disk_get_zone_wplug() will
++	 * return NULL after the plug is removed. Aborting the plugged write
++	 * BIOs is consistent with the fact that these writes will most likely
++	 * fail anyway as there is no ordering guarantees between zone append
++	 * operations and regular write operations.
++	 */
++	if (!bio_list_empty(&zwplug->bio_list)) {
++		pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
++				    disk->disk_name, zwplug->zone_no);
++		disk_zone_wplug_abort(zwplug);
++	}
++	disk_remove_zone_wplug(disk, zwplug);
++	spin_unlock_irqrestore(&zwplug->lock, flags);
++
++	disk_put_zone_wplug(zwplug);
++}
++
+ /**
+  * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
+  * @bio: The BIO being submitted
+@@ -1111,8 +1168,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+ 	 */
+ 	switch (bio_op(bio)) {
+ 	case REQ_OP_ZONE_APPEND:
+-		if (!bdev_emulates_zone_append(bdev))
++		if (!bdev_emulates_zone_append(bdev)) {
++			blk_zone_wplug_handle_native_zone_append(bio);
+ 			return false;
++		}
+ 		fallthrough;
+ 	case REQ_OP_WRITE:
+ 	case REQ_OP_WRITE_ZEROES:
+@@ -1299,6 +1358,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
+ {
+ 	unsigned int i;
+ 
++	atomic_set(&disk->nr_zone_wplugs, 0);
+ 	disk->zone_wplugs_hash_bits =
+ 		min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
+ 
+@@ -1353,6 +1413,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
+ 		}
+ 	}
+ 
++	WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
+ 	kfree(disk->zone_wplugs_hash);
+ 	disk->zone_wplugs_hash = NULL;
+ 	disk->zone_wplugs_hash_bits = 0;
+@@ -1570,11 +1631,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
+ 	}
+ 
+ 	/*
+-	 * We need to track the write pointer of all zones that are not
+-	 * empty nor full. So make sure we have a zone write plug for
+-	 * such zone if the device has a zone write plug hash table.
++	 * If the device needs zone append emulation, we need to track the
++	 * write pointer of all zones that are not empty nor full. So make sure
++	 * we have a zone write plug for such zone if the device has a zone
++	 * write plug hash table.
+ 	 */
+-	if (!disk->zone_wplugs_hash)
++	if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
+ 		return 0;
+ 
+ 	disk_zone_wplug_sync_wp_offset(disk, zone);
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 419220fa42fd7e..bd1ea99c3b4751 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 				goto out_fw;
+ 			}
+ 
+-			ret = regmap_raw_write_async(regmap, reg, buf->buf,
+-						     le32_to_cpu(region->len));
++			ret = regmap_raw_write(regmap, reg, buf->buf,
++					       le32_to_cpu(region->len));
+ 			if (ret != 0) {
+ 				cs_dsp_err(dsp,
+ 					   "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
+@@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 		regions++;
+ 	}
+ 
+-	ret = regmap_async_complete(regmap);
+-	if (ret != 0) {
+-		cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+-		goto out_fw;
+-	}
+-
+ 	if (pos > firmware->size)
+ 		cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ 			    file, regions, pos - firmware->size);
+@@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 	cs_dsp_debugfs_save_wmfwname(dsp, file);
+ 
+ out_fw:
+-	regmap_async_complete(regmap);
+ 	cs_dsp_buf_free(&buf_list);
+ 
+ 	if (ret == -EOVERFLOW)
+@@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 			cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
+ 				   file, blocks, le32_to_cpu(blk->len),
+ 				   reg);
+-			ret = regmap_raw_write_async(regmap, reg, buf->buf,
+-						     le32_to_cpu(blk->len));
++			ret = regmap_raw_write(regmap, reg, buf->buf,
++					       le32_to_cpu(blk->len));
+ 			if (ret != 0) {
+ 				cs_dsp_err(dsp,
+ 					   "%s.%d: Failed to write to %x in %s: %d\n",
+@@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 		blocks++;
+ 	}
+ 
+-	ret = regmap_async_complete(regmap);
+-	if (ret != 0)
+-		cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+-
+ 	if (pos > firmware->size)
+ 		cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ 			    file, blocks, pos - firmware->size);
+@@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 	cs_dsp_debugfs_save_binname(dsp, file);
+ 
+ out_fw:
+-	regmap_async_complete(regmap);
+ 	cs_dsp_buf_free(&buf_list);
+ 
+ 	if (ret == -EOVERFLOW)
+@@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
+ {
+ 	int ret;
+ 
+-	ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
+-				       ADSP2_SYS_ENA, ADSP2_SYS_ENA);
++	ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
++				 ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ 	if (ret != 0)
+ 		return ret;
+ 
+diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
+index 5ed0602c2f75f0..4eb0dff4dfaf8b 100644
+--- a/drivers/firmware/efi/mokvar-table.c
++++ b/drivers/firmware/efi/mokvar-table.c
+@@ -103,9 +103,7 @@ void __init efi_mokvar_table_init(void)
+ 	void *va = NULL;
+ 	unsigned long cur_offset = 0;
+ 	unsigned long offset_limit;
+-	unsigned long map_size = 0;
+ 	unsigned long map_size_needed = 0;
+-	unsigned long size;
+ 	struct efi_mokvar_table_entry *mokvar_entry;
+ 	int err;
+ 
+@@ -134,48 +132,34 @@ void __init efi_mokvar_table_init(void)
+ 	 */
+ 	err = -EINVAL;
+ 	while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
+-		mokvar_entry = va + cur_offset;
+-		map_size_needed = cur_offset + sizeof(*mokvar_entry);
+-		if (map_size_needed > map_size) {
+-			if (va)
+-				early_memunmap(va, map_size);
+-			/*
+-			 * Map a little more than the fixed size entry
+-			 * header, anticipating some data. It's safe to
+-			 * do so as long as we stay within current memory
+-			 * descriptor.
+-			 */
+-			map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
+-				       offset_limit);
+-			va = early_memremap(efi.mokvar_table, map_size);
+-			if (!va) {
+-				pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
+-				       efi.mokvar_table, map_size);
+-				return;
+-			}
+-			mokvar_entry = va + cur_offset;
++		if (va)
++			early_memunmap(va, sizeof(*mokvar_entry));
++		va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
++		if (!va) {
++			pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
++			       efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
++			return;
+ 		}
++		mokvar_entry = va;
+ 
+ 		/* Check for last sentinel entry */
+ 		if (mokvar_entry->name[0] == '\0') {
+ 			if (mokvar_entry->data_size != 0)
+ 				break;
+ 			err = 0;
++			map_size_needed = cur_offset + sizeof(*mokvar_entry);
+ 			break;
+ 		}
+ 
+-		/* Sanity check that the name is null terminated */
+-		size = strnlen(mokvar_entry->name,
+-			       sizeof(mokvar_entry->name));
+-		if (size >= sizeof(mokvar_entry->name))
+-			break;
++		/* Enforce that the name is NUL terminated */
++		mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
+ 
+ 		/* Advance to the next entry */
+-		cur_offset = map_size_needed + mokvar_entry->data_size;
++		cur_offset += sizeof(*mokvar_entry) + mokvar_entry->data_size;
+ 	}
+ 
+ 	if (va)
+-		early_memunmap(va, map_size);
++		early_memunmap(va, sizeof(*mokvar_entry));
+ 	if (err) {
+ 		pr_err("EFI MOKvar config table is not valid\n");
+ 		return;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 45e28726e148e9..96845541b2d255 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1542,6 +1542,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
+ 	if (amdgpu_sriov_vf(adev))
+ 		return 0;
+ 
++	/* resizing on Dell G5 SE platforms causes problems with runtime pm */
++	if ((amdgpu_runtime_pm != 0) &&
++	    adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
++	    adev->pdev->device == 0x731f &&
++	    adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
++		return 0;
++
+ 	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
+ 	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
+ 		DRM_WARN("System can't access extended configuration space, please check!!\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 425073d994912f..1c8ac4cf08c5ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2280,7 +2280,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
+ 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ 	struct amdgpu_res_cursor cursor;
+ 	u64 addr;
+-	int r;
++	int r = 0;
+ 
+ 	if (!adev->mman.buffer_funcs_enabled)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+index 2eff37aaf8273b..1695dd78ede8e6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+@@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
+ 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ 
++	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+ 
+ 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+@@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 
+ 	m = get_mqd(mqd);
+ 
+-	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
+ 	m->cp_hqd_pq_control |=
+ 			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+-	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
++
+ 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ 
+ 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+index 68dbc0399c87aa..3c0ae28c5923b5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+@@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
+ 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 			0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ 
++	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+ 
+ 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+@@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 
+ 	m = get_mqd(mqd);
+ 
+-	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
+ 	m->cp_hqd_pq_control |=
+ 			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+-	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ 
+ 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+index 2b72d5b4949b6c..565858b9044d46 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+@@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
+ 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 			0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ 
++	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+ 
+ 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+@@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 
+ 	m = get_mqd(mqd);
+ 
+-	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
+ 	m->cp_hqd_pq_control |=
+ 			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+-	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ 
+ 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 84e8ea3a8a0c94..217af36dc0976f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -182,6 +182,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
+ 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ 
++	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
++
+ 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+ 
+ 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+@@ -244,7 +247,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 
+ 	m = get_mqd(mqd);
+ 
+-	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
+ 	m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
+ 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 85e58e0f6059a6..5df26f8937cc81 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1593,75 +1593,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
+ 	return false;
+ }
+ 
+-static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
++struct amdgpu_dm_quirks {
++	bool aux_hpd_discon;
++	bool support_edp0_on_dp1;
++};
++
++static struct amdgpu_dm_quirks quirk_entries = {
++	.aux_hpd_discon = false,
++	.support_edp0_on_dp1 = false
++};
++
++static int edp0_on_dp1_callback(const struct dmi_system_id *id)
++{
++	quirk_entries.support_edp0_on_dp1 = true;
++	return 0;
++}
++
++static int aux_hpd_discon_callback(const struct dmi_system_id *id)
++{
++	quirk_entries.aux_hpd_discon = true;
++	return 0;
++}
++
++static const struct dmi_system_id dmi_quirk_table[] = {
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ 		},
+ 	},
++	{
++		.callback = edp0_on_dp1_callback,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
++		},
++	},
++	{
++		.callback = edp0_on_dp1_callback,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
++		},
++	},
+ 	{}
+ 	/* TODO: refactor this from a fixed table to a dynamic option */
+ };
+ 
+-static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
++static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
+ {
+-	const struct dmi_system_id *dmi_id;
++	int dmi_id;
++	struct drm_device *dev = dm->ddev;
+ 
+ 	dm->aux_hpd_discon_quirk = false;
++	init_data->flags.support_edp0_on_dp1 = false;
++
++	dmi_id = dmi_check_system(dmi_quirk_table);
+ 
+-	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+-	if (dmi_id) {
++	if (!dmi_id)
++		return;
++
++	if (quirk_entries.aux_hpd_discon) {
+ 		dm->aux_hpd_discon_quirk = true;
+-		DRM_INFO("aux_hpd_discon_quirk attached\n");
++		drm_info(dev, "aux_hpd_discon_quirk attached\n");
++	}
++	if (quirk_entries.support_edp0_on_dp1) {
++		init_data->flags.support_edp0_on_dp1 = true;
++		drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ 	}
+ }
+ 
+@@ -1969,7 +2024,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ 		init_data.num_virtual_links = 1;
+ 
+-	retrieve_dmi_info(&adev->dm);
++	retrieve_dmi_info(&adev->dm, &init_data);
+ 
+ 	if (adev->dm.bb_from_dmub)
+ 		init_data.bb_from_dmub = adev->dm.bb_from_dmub;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index 3390f0d8420a05..c4a7fd453e5fc0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -894,6 +894,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+ 	struct drm_device *dev = adev_to_drm(adev);
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
++	int i;
+ 
+ 	drm_connector_list_iter_begin(dev, &iter);
+ 	drm_for_each_connector_iter(connector, &iter) {
+@@ -920,6 +921,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&iter);
++
++	/* Update reference counts for HPDs */
++	for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
++		if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
++			drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
++	}
+ }
+ 
+ /**
+@@ -935,6 +942,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+ 	struct drm_device *dev = adev_to_drm(adev);
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
++	int i;
+ 
+ 	drm_connector_list_iter_begin(dev, &iter);
+ 	drm_for_each_connector_iter(connector, &iter) {
+@@ -960,4 +968,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&iter);
++
++	/* Update reference counts for HPDs */
++	for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
++		if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
++			drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
++	}
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 45858bf1523d8f..e140b7a04d7246 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
+ 	if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
+ 		return false;
+ 
+-	return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
++	/* Temporarily disable PSR-SU to avoid glitches */
++	return false;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+index e8b6989a40f35a..6b34a33d788f29 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+@@ -3043,6 +3043,7 @@ static int kv_dpm_hw_init(void *handle)
+ 	if (!amdgpu_dpm)
+ 		return 0;
+ 
++	mutex_lock(&adev->pm.mutex);
+ 	kv_dpm_setup_asic(adev);
+ 	ret = kv_dpm_enable(adev);
+ 	if (ret)
+@@ -3050,6 +3051,8 @@ static int kv_dpm_hw_init(void *handle)
+ 	else
+ 		adev->pm.dpm_enabled = true;
+ 	amdgpu_legacy_dpm_compute_clocks(adev);
++	mutex_unlock(&adev->pm.mutex);
++
+ 	return ret;
+ }
+ 
+@@ -3067,32 +3070,42 @@ static int kv_dpm_suspend(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	cancel_work_sync(&adev->pm.dpm.thermal.work);
++
+ 	if (adev->pm.dpm_enabled) {
++		mutex_lock(&adev->pm.mutex);
++		adev->pm.dpm_enabled = false;
+ 		/* disable dpm */
+ 		kv_dpm_disable(adev);
+ 		/* reset the power state */
+ 		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
++		mutex_unlock(&adev->pm.mutex);
+ 	}
+ 	return 0;
+ }
+ 
+ static int kv_dpm_resume(void *handle)
+ {
+-	int ret;
++	int ret = 0;
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+-	if (adev->pm.dpm_enabled) {
++	if (!amdgpu_dpm)
++		return 0;
++
++	if (!adev->pm.dpm_enabled) {
++		mutex_lock(&adev->pm.mutex);
+ 		/* asic init will reset to the boot state */
+ 		kv_dpm_setup_asic(adev);
+ 		ret = kv_dpm_enable(adev);
+-		if (ret)
++		if (ret) {
+ 			adev->pm.dpm_enabled = false;
+-		else
++		} else {
+ 			adev->pm.dpm_enabled = true;
+-		if (adev->pm.dpm_enabled)
+ 			amdgpu_legacy_dpm_compute_clocks(adev);
++		}
++		mutex_unlock(&adev->pm.mutex);
+ 	}
+-	return 0;
++	return ret;
+ }
+ 
+ static bool kv_dpm_is_idle(void *handle)
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+index e861355ebd75b9..c7518b13e78795 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+@@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+ 	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+ 	int temp, size = sizeof(temp);
+ 
+-	if (!adev->pm.dpm_enabled)
+-		return;
++	mutex_lock(&adev->pm.mutex);
+ 
++	if (!adev->pm.dpm_enabled) {
++		mutex_unlock(&adev->pm.mutex);
++		return;
++	}
+ 	if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
+ 				   AMDGPU_PP_SENSOR_GPU_TEMP,
+ 				   (void *)&temp,
+@@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+ 	adev->pm.dpm.state = dpm_state;
+ 
+ 	amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
++	mutex_unlock(&adev->pm.mutex);
+ }
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+index a1baa13ab2c263..a5ad1b60597e61 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+@@ -7783,6 +7783,7 @@ static int si_dpm_hw_init(void *handle)
+ 	if (!amdgpu_dpm)
+ 		return 0;
+ 
++	mutex_lock(&adev->pm.mutex);
+ 	si_dpm_setup_asic(adev);
+ 	ret = si_dpm_enable(adev);
+ 	if (ret)
+@@ -7790,6 +7791,7 @@ static int si_dpm_hw_init(void *handle)
+ 	else
+ 		adev->pm.dpm_enabled = true;
+ 	amdgpu_legacy_dpm_compute_clocks(adev);
++	mutex_unlock(&adev->pm.mutex);
+ 	return ret;
+ }
+ 
+@@ -7807,32 +7809,44 @@ static int si_dpm_suspend(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	cancel_work_sync(&adev->pm.dpm.thermal.work);
++
+ 	if (adev->pm.dpm_enabled) {
++		mutex_lock(&adev->pm.mutex);
++		adev->pm.dpm_enabled = false;
+ 		/* disable dpm */
+ 		si_dpm_disable(adev);
+ 		/* reset the power state */
+ 		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
++		mutex_unlock(&adev->pm.mutex);
+ 	}
++
+ 	return 0;
+ }
+ 
+ static int si_dpm_resume(void *handle)
+ {
+-	int ret;
++	int ret = 0;
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+-	if (adev->pm.dpm_enabled) {
++	if (!amdgpu_dpm)
++		return 0;
++
++	if (!adev->pm.dpm_enabled) {
+ 		/* asic init will reset to the boot state */
++		mutex_lock(&adev->pm.mutex);
+ 		si_dpm_setup_asic(adev);
+ 		ret = si_dpm_enable(adev);
+-		if (ret)
++		if (ret) {
+ 			adev->pm.dpm_enabled = false;
+-		else
++		} else {
+ 			adev->pm.dpm_enabled = true;
+-		if (adev->pm.dpm_enabled)
+ 			amdgpu_legacy_dpm_compute_clocks(adev);
++		}
++		mutex_unlock(&adev->pm.mutex);
+ 	}
+-	return 0;
++
++	return ret;
+ }
+ 
+ static bool si_dpm_is_idle(void *handle)
+diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+index 7c78496e6213cc..192e571348f6b3 100644
+--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+@@ -53,7 +53,6 @@
+ 
+ #define RING_CTL(base)				XE_REG((base) + 0x3c)
+ #define   RING_CTL_SIZE(size)			((size) - PAGE_SIZE) /* in bytes -> pages */
+-#define   RING_CTL_SIZE(size)			((size) - PAGE_SIZE) /* in bytes -> pages */
+ 
+ #define RING_START_UDW(base)			XE_REG((base) + 0x48)
+ 
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index e6744422dee492..448766033690c7 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -47,6 +47,11 @@ enum xe_oa_submit_deps {
+ 	XE_OA_SUBMIT_ADD_DEPS,
+ };
+ 
++enum xe_oa_user_extn_from {
++	XE_OA_USER_EXTN_FROM_OPEN,
++	XE_OA_USER_EXTN_FROM_CONFIG,
++};
++
+ struct xe_oa_reg {
+ 	struct xe_reg addr;
+ 	u32 value;
+@@ -94,6 +99,17 @@ struct xe_oa_config_bo {
+ 	struct xe_bb *bb;
+ };
+ 
++struct xe_oa_fence {
++	/* @base: dma fence base */
++	struct dma_fence base;
++	/* @lock: lock for the fence */
++	spinlock_t lock;
++	/* @work: work to signal @base */
++	struct delayed_work work;
++	/* @cb: callback to schedule @work */
++	struct dma_fence_cb cb;
++};
++
+ #define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x
+ 
+ static const struct xe_oa_format oa_formats[] = {
+@@ -166,10 +182,10 @@ static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_se
+ 	return oa_config;
+ }
+ 
+-static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo)
++static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence)
+ {
+ 	xe_oa_config_put(oa_bo->oa_config);
+-	xe_bb_free(oa_bo->bb, NULL);
++	xe_bb_free(oa_bo->bb, last_fence);
+ 	kfree(oa_bo);
+ }
+ 
+@@ -668,7 +684,8 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
+ 
+ 	xe_oa_config_put(stream->oa_config);
+ 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
+-		free_oa_config_bo(oa_bo);
++		free_oa_config_bo(oa_bo, stream->last_fence);
++	dma_fence_put(stream->last_fence);
+ }
+ 
+ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
+@@ -832,6 +849,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
+ 		xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc));
+ 
+ 	xe_oa_free_configs(stream);
++	xe_file_put(stream->xef);
+ }
+ 
+ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
+@@ -902,40 +920,113 @@ xe_oa_alloc_config_buffer(struct xe_oa_stream *stream, struct xe_oa_config *oa_c
+ 	return oa_bo;
+ }
+ 
++static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence)
++{
++	dma_fence_put(stream->last_fence);
++	stream->last_fence = dma_fence_get(fence);
++}
++
++static void xe_oa_fence_work_fn(struct work_struct *w)
++{
++	struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work);
++
++	/* Signal fence to indicate new OA configuration is active */
++	dma_fence_signal(&ofence->base);
++	dma_fence_put(&ofence->base);
++}
++
++static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
++{
++	/* Additional empirical delay needed for NOA programming after registers are written */
++#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
++
++	struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
++
++	INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
++	queue_delayed_work(system_unbound_wq, &ofence->work,
++			   usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
++	dma_fence_put(fence);
++}
++
++static const char *xe_oa_get_driver_name(struct dma_fence *fence)
++{
++	return "xe_oa";
++}
++
++static const char *xe_oa_get_timeline_name(struct dma_fence *fence)
++{
++	return "unbound";
++}
++
++static const struct dma_fence_ops xe_oa_fence_ops = {
++	.get_driver_name = xe_oa_get_driver_name,
++	.get_timeline_name = xe_oa_get_timeline_name,
++};
++
+ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config)
+ {
+ #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
+ 	struct xe_oa_config_bo *oa_bo;
+-	int err = 0, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
++	struct xe_oa_fence *ofence;
++	int i, err, num_signal = 0;
+ 	struct dma_fence *fence;
+-	long timeout;
+ 
+-	/* Emit OA configuration batch */
++	ofence = kzalloc(sizeof(*ofence), GFP_KERNEL);
++	if (!ofence) {
++		err = -ENOMEM;
++		goto exit;
++	}
++
+ 	oa_bo = xe_oa_alloc_config_buffer(stream, config);
+ 	if (IS_ERR(oa_bo)) {
+ 		err = PTR_ERR(oa_bo);
+ 		goto exit;
+ 	}
+ 
++	/* Emit OA configuration batch */
+ 	fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb);
+ 	if (IS_ERR(fence)) {
+ 		err = PTR_ERR(fence);
+ 		goto exit;
+ 	}
+ 
+-	/* Wait till all previous batches have executed */
+-	timeout = dma_fence_wait_timeout(fence, false, 5 * HZ);
+-	dma_fence_put(fence);
+-	if (timeout < 0)
+-		err = timeout;
+-	else if (!timeout)
+-		err = -ETIME;
+-	if (err)
+-		drm_dbg(&stream->oa->xe->drm, "dma_fence_wait_timeout err %d\n", err);
++	/* Point of no return: initialize and set fence to signal */
++	spin_lock_init(&ofence->lock);
++	dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0);
+ 
+-	/* Additional empirical delay needed for NOA programming after registers are written */
+-	usleep_range(us, 2 * us);
++	for (i = 0; i < stream->num_syncs; i++) {
++		if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL)
++			num_signal++;
++		xe_sync_entry_signal(&stream->syncs[i], &ofence->base);
++	}
++
++	/* Additional dma_fence_get in case we dma_fence_wait */
++	if (!num_signal)
++		dma_fence_get(&ofence->base);
++
++	/* Update last fence too before adding callback */
++	xe_oa_update_last_fence(stream, fence);
++
++	/* Add job fence callback to schedule work to signal ofence->base */
++	err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb);
++	xe_gt_assert(stream->gt, !err || err == -ENOENT);
++	if (err == -ENOENT)
++		xe_oa_config_cb(fence, &ofence->cb);
++
++	/* If nothing needs to be signaled we wait synchronously */
++	if (!num_signal) {
++		dma_fence_wait(&ofence->base, false);
++		dma_fence_put(&ofence->base);
++	}
++
++	/* Done with syncs */
++	for (i = 0; i < stream->num_syncs; i++)
++		xe_sync_entry_cleanup(&stream->syncs[i]);
++	kfree(stream->syncs);
++
++	return 0;
+ exit:
++	kfree(ofence);
+ 	return err;
+ }
+ 
+@@ -1006,6 +1097,262 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
+ 	return xe_oa_emit_oa_config(stream, stream->oa_config);
+ }
+ 
++static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
++{
++	u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
++	u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
++	u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
++	u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
++	int idx;
++
++	for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
++		const struct xe_oa_format *f = &oa->oa_formats[idx];
++
++		if (counter_size == f->counter_size && bc_report == f->bc_report &&
++		    type == f->type && counter_sel == f->counter_select) {
++			*name = idx;
++			return 0;
++		}
++	}
++
++	return -EINVAL;
++}
++
++static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
++				     struct xe_oa_open_param *param)
++{
++	if (value >= oa->oa_unit_ids) {
++		drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
++		return -EINVAL;
++	}
++	param->oa_unit_id = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
++				    struct xe_oa_open_param *param)
++{
++	param->sample = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
++				     struct xe_oa_open_param *param)
++{
++	param->metric_set = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
++				    struct xe_oa_open_param *param)
++{
++	int ret = decode_oa_format(oa, value, &param->oa_format);
++
++	if (ret) {
++		drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
++		return ret;
++	}
++	return 0;
++}
++
++static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
++				      struct xe_oa_open_param *param)
++{
++#define OA_EXPONENT_MAX 31
++
++	if (value > OA_EXPONENT_MAX) {
++		drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
++		return -EINVAL;
++	}
++	param->period_exponent = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
++				   struct xe_oa_open_param *param)
++{
++	param->disabled = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
++					struct xe_oa_open_param *param)
++{
++	param->exec_queue_id = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
++					  struct xe_oa_open_param *param)
++{
++	param->engine_instance = value;
++	return 0;
++}
++
++static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
++				struct xe_oa_open_param *param)
++{
++	param->no_preempt = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
++				    struct xe_oa_open_param *param)
++{
++	param->num_syncs = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
++				     struct xe_oa_open_param *param)
++{
++	param->syncs_user = u64_to_user_ptr(value);
++	return 0;
++}
++
++static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
++				    struct xe_oa_open_param *param)
++{
++	return -EINVAL;
++}
++
++typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
++				     struct xe_oa_open_param *param);
++static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
++	[DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
++	[DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
++	[DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
++	[DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
++	[DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
++	[DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
++	[DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
++	[DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
++	[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
++	[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
++	[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
++};
++
++static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
++	[DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
++	[DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
++	[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
++};
++
++static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
++				       u64 extension, struct xe_oa_open_param *param)
++{
++	u64 __user *address = u64_to_user_ptr(extension);
++	struct drm_xe_ext_set_property ext;
++	int err;
++	u32 idx;
++
++	err = __copy_from_user(&ext, address, sizeof(ext));
++	if (XE_IOCTL_DBG(oa->xe, err))
++		return -EFAULT;
++
++	BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) !=
++		     ARRAY_SIZE(xe_oa_set_property_funcs_config));
++
++	if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
++	    XE_IOCTL_DBG(oa->xe, ext.pad))
++		return -EINVAL;
++
++	idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
++
++	if (from == XE_OA_USER_EXTN_FROM_CONFIG)
++		return xe_oa_set_property_funcs_config[idx](oa, ext.value, param);
++	else
++		return xe_oa_set_property_funcs_open[idx](oa, ext.value, param);
++}
++
++typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa,  enum xe_oa_user_extn_from from,
++				       u64 extension, struct xe_oa_open_param *param);
++static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
++	[DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
++};
++
++#define MAX_USER_EXTENSIONS	16
++static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension,
++				 int ext_number, struct xe_oa_open_param *param)
++{
++	u64 __user *address = u64_to_user_ptr(extension);
++	struct drm_xe_user_extension ext;
++	int err;
++	u32 idx;
++
++	if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
++		return -E2BIG;
++
++	err = __copy_from_user(&ext, address, sizeof(ext));
++	if (XE_IOCTL_DBG(oa->xe, err))
++		return -EFAULT;
++
++	if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
++	    XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
++		return -EINVAL;
++
++	idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
++	err = xe_oa_user_extension_funcs[idx](oa, from, extension, param);
++	if (XE_IOCTL_DBG(oa->xe, err))
++		return err;
++
++	if (ext.next_extension)
++		return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param);
++
++	return 0;
++}
++
++static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
++{
++	int ret, num_syncs, num_ufence = 0;
++
++	if (param->num_syncs && !param->syncs_user) {
++		drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n");
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	if (param->num_syncs) {
++		param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL);
++		if (!param->syncs) {
++			ret = -ENOMEM;
++			goto exit;
++		}
++	}
++
++	for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
++		ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs],
++					  &param->syncs_user[num_syncs], 0);
++		if (ret)
++			goto err_syncs;
++
++		if (xe_sync_is_ufence(&param->syncs[num_syncs]))
++			num_ufence++;
++	}
++
++	if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) {
++		ret = -EINVAL;
++		goto err_syncs;
++	}
++
++	return 0;
++
++err_syncs:
++	while (num_syncs--)
++		xe_sync_entry_cleanup(&param->syncs[num_syncs]);
++	kfree(param->syncs);
++exit:
++	return ret;
++}
++
+ static void xe_oa_stream_enable(struct xe_oa_stream *stream)
+ {
+ 	stream->pollin = false;
+@@ -1099,36 +1446,38 @@ static int xe_oa_disable_locked(struct xe_oa_stream *stream)
+ 
+ static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
+ {
+-	struct drm_xe_ext_set_property ext;
++	struct xe_oa_open_param param = {};
+ 	long ret = stream->oa_config->id;
+ 	struct xe_oa_config *config;
+ 	int err;
+ 
+-	err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext));
+-	if (XE_IOCTL_DBG(stream->oa->xe, err))
+-		return -EFAULT;
+-
+-	if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) ||
+-	    XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) ||
+-	    XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) ||
+-	    XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET))
+-		return -EINVAL;
++	err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, &param);
++	if (err)
++		return err;
+ 
+-	config = xe_oa_get_oa_config(stream->oa, ext.value);
++	config = xe_oa_get_oa_config(stream->oa, param.metric_set);
+ 	if (!config)
+ 		return -ENODEV;
+ 
+-	if (config != stream->oa_config) {
+-		err = xe_oa_emit_oa_config(stream, config);
+-		if (!err)
+-			config = xchg(&stream->oa_config, config);
+-		else
+-			ret = err;
++	param.xef = stream->xef;
++	err = xe_oa_parse_syncs(stream->oa, &param);
++	if (err)
++		goto err_config_put;
++
++	stream->num_syncs = param.num_syncs;
++	stream->syncs = param.syncs;
++
++	err = xe_oa_emit_oa_config(stream, config);
++	if (!err) {
++		config = xchg(&stream->oa_config, config);
++		drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n",
++			stream->oa_config->uuid);
+ 	}
+ 
++err_config_put:
+ 	xe_oa_config_put(config);
+ 
+-	return ret;
++	return err ?: ret;
+ }
+ 
+ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
+@@ -1367,10 +1716,11 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ 	stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
+ 
+ 	stream->sample = param->sample;
+-	stream->periodic = param->period_exponent > 0;
++	stream->periodic = param->period_exponent >= 0;
+ 	stream->period_exponent = param->period_exponent;
+ 	stream->no_preempt = param->no_preempt;
+ 
++	stream->xef = xe_file_get(param->xef);
+ 	stream->num_syncs = param->num_syncs;
+ 	stream->syncs = param->syncs;
+ 
+@@ -1470,6 +1820,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ err_free_configs:
+ 	xe_oa_free_configs(stream);
+ exit:
++	xe_file_put(stream->xef);
+ 	return ret;
+ }
+ 
+@@ -1579,27 +1930,6 @@ static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
+ 	}
+ }
+ 
+-static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
+-{
+-	u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
+-	u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
+-	u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
+-	u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
+-	int idx;
+-
+-	for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
+-		const struct xe_oa_format *f = &oa->oa_formats[idx];
+-
+-		if (counter_size == f->counter_size && bc_report == f->bc_report &&
+-		    type == f->type && counter_sel == f->counter_select) {
+-			*name = idx;
+-			return 0;
+-		}
+-	}
+-
+-	return -EINVAL;
+-}
+-
+ /**
+  * xe_oa_unit_id - Return OA unit ID for a hardware engine
+  * @hwe: @xe_hw_engine
+@@ -1646,214 +1976,6 @@ static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param)
+ 	return ret;
+ }
+ 
+-static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
+-				     struct xe_oa_open_param *param)
+-{
+-	if (value >= oa->oa_unit_ids) {
+-		drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
+-		return -EINVAL;
+-	}
+-	param->oa_unit_id = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
+-				    struct xe_oa_open_param *param)
+-{
+-	param->sample = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
+-				     struct xe_oa_open_param *param)
+-{
+-	param->metric_set = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
+-				    struct xe_oa_open_param *param)
+-{
+-	int ret = decode_oa_format(oa, value, &param->oa_format);
+-
+-	if (ret) {
+-		drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
+-		return ret;
+-	}
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
+-				      struct xe_oa_open_param *param)
+-{
+-#define OA_EXPONENT_MAX 31
+-
+-	if (value > OA_EXPONENT_MAX) {
+-		drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
+-		return -EINVAL;
+-	}
+-	param->period_exponent = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
+-				   struct xe_oa_open_param *param)
+-{
+-	param->disabled = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
+-					struct xe_oa_open_param *param)
+-{
+-	param->exec_queue_id = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
+-					  struct xe_oa_open_param *param)
+-{
+-	param->engine_instance = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
+-				struct xe_oa_open_param *param)
+-{
+-	param->no_preempt = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
+-				    struct xe_oa_open_param *param)
+-{
+-	param->num_syncs = value;
+-	return 0;
+-}
+-
+-static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
+-				     struct xe_oa_open_param *param)
+-{
+-	param->syncs_user = u64_to_user_ptr(value);
+-	return 0;
+-}
+-
+-typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
+-				     struct xe_oa_open_param *param);
+-static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
+-	[DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
+-	[DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
+-	[DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+-	[DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
+-	[DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
+-	[DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
+-	[DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
+-	[DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
+-	[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
+-	[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+-	[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+-};
+-
+-static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
+-				       struct xe_oa_open_param *param)
+-{
+-	u64 __user *address = u64_to_user_ptr(extension);
+-	struct drm_xe_ext_set_property ext;
+-	int err;
+-	u32 idx;
+-
+-	err = __copy_from_user(&ext, address, sizeof(ext));
+-	if (XE_IOCTL_DBG(oa->xe, err))
+-		return -EFAULT;
+-
+-	if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) ||
+-	    XE_IOCTL_DBG(oa->xe, ext.pad))
+-		return -EINVAL;
+-
+-	idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs));
+-	return xe_oa_set_property_funcs[idx](oa, ext.value, param);
+-}
+-
+-typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension,
+-				       struct xe_oa_open_param *param);
+-static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
+-	[DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
+-};
+-
+-#define MAX_USER_EXTENSIONS	16
+-static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number,
+-				 struct xe_oa_open_param *param)
+-{
+-	u64 __user *address = u64_to_user_ptr(extension);
+-	struct drm_xe_user_extension ext;
+-	int err;
+-	u32 idx;
+-
+-	if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
+-		return -E2BIG;
+-
+-	err = __copy_from_user(&ext, address, sizeof(ext));
+-	if (XE_IOCTL_DBG(oa->xe, err))
+-		return -EFAULT;
+-
+-	if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
+-	    XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
+-		return -EINVAL;
+-
+-	idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
+-	err = xe_oa_user_extension_funcs[idx](oa, extension, param);
+-	if (XE_IOCTL_DBG(oa->xe, err))
+-		return err;
+-
+-	if (ext.next_extension)
+-		return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param);
+-
+-	return 0;
+-}
+-
+-static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
+-{
+-	int ret, num_syncs, num_ufence = 0;
+-
+-	if (param->num_syncs && !param->syncs_user) {
+-		drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n");
+-		ret = -EINVAL;
+-		goto exit;
+-	}
+-
+-	if (param->num_syncs) {
+-		param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL);
+-		if (!param->syncs) {
+-			ret = -ENOMEM;
+-			goto exit;
+-		}
+-	}
+-
+-	for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
+-		ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs],
+-					  &param->syncs_user[num_syncs], 0);
+-		if (ret)
+-			goto err_syncs;
+-
+-		if (xe_sync_is_ufence(&param->syncs[num_syncs]))
+-			num_ufence++;
+-	}
+-
+-	if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) {
+-		ret = -EINVAL;
+-		goto err_syncs;
+-	}
+-
+-	return 0;
+-
+-err_syncs:
+-	while (num_syncs--)
+-		xe_sync_entry_cleanup(&param->syncs[num_syncs]);
+-	kfree(param->syncs);
+-exit:
+-	return ret;
+-}
+-
+ /**
+  * xe_oa_stream_open_ioctl - Opens an OA stream
+  * @dev: @drm_device
+@@ -1880,7 +2002,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ 	}
+ 
+ 	param.xef = xef;
+-	ret = xe_oa_user_extensions(oa, data, 0, &param);
++	param.period_exponent = -1;
++	ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1934,7 +2057,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ 		goto err_exec_q;
+ 	}
+ 
+-	if (param.period_exponent > 0) {
++	if (param.period_exponent >= 0) {
+ 		u64 oa_period, oa_freq_hz;
+ 
+ 		/* Requesting samples from OAG buffer is a privileged operation */
+diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
+index 99f4b2d4bdcf6a..fea9d981e414fa 100644
+--- a/drivers/gpu/drm/xe/xe_oa_types.h
++++ b/drivers/gpu/drm/xe/xe_oa_types.h
+@@ -239,6 +239,12 @@ struct xe_oa_stream {
+ 	/** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */
+ 	u32 no_preempt;
+ 
++	/** @xef: xe_file with which the stream was opened */
++	struct xe_file *xef;
++
++	/** @last_fence: fence to use in stream destroy when needed */
++	struct dma_fence *last_fence;
++
+ 	/** @num_syncs: size of @syncs array */
+ 	u32 num_syncs;
+ 
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index c99380271de62f..5693b337f5dffe 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -667,20 +667,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
+ 
+ 	/* Collect invalidated userptrs */
+ 	spin_lock(&vm->userptr.invalidated_lock);
++	xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
+ 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
+ 				 userptr.invalidate_link) {
+ 		list_del_init(&uvma->userptr.invalidate_link);
+-		list_move_tail(&uvma->userptr.repin_link,
+-			       &vm->userptr.repin_list);
++		list_add_tail(&uvma->userptr.repin_link,
++			      &vm->userptr.repin_list);
+ 	}
+ 	spin_unlock(&vm->userptr.invalidated_lock);
+ 
+-	/* Pin and move to temporary list */
++	/* Pin and move to bind list */
+ 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ 				 userptr.repin_link) {
+ 		err = xe_vma_userptr_pin_pages(uvma);
+ 		if (err == -EFAULT) {
+ 			list_del_init(&uvma->userptr.repin_link);
++			/*
++			 * We might have already done the pin once already, but
++			 * then had to retry before the re-bind happened, due
++			 * some other condition in the caller, but in the
++			 * meantime the userptr got dinged by the notifier such
++			 * that we need to revalidate here, but this time we hit
++			 * the EFAULT. In such a case make sure we remove
++			 * ourselves from the rebind list to avoid going down in
++			 * flames.
++			 */
++			if (!list_empty(&uvma->vma.combined_links.rebind))
++				list_del_init(&uvma->vma.combined_links.rebind);
+ 
+ 			/* Wait for pending binds */
+ 			xe_vm_lock(vm, false);
+@@ -691,10 +704,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
+ 			err = xe_vm_invalidate_vma(&uvma->vma);
+ 			xe_vm_unlock(vm);
+ 			if (err)
+-				return err;
++				break;
+ 		} else {
+-			if (err < 0)
+-				return err;
++			if (err)
++				break;
+ 
+ 			list_del_init(&uvma->userptr.repin_link);
+ 			list_move_tail(&uvma->vma.combined_links.rebind,
+@@ -702,7 +715,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
+ 		}
+ 	}
+ 
+-	return 0;
++	if (err) {
++		down_write(&vm->userptr.notifier_lock);
++		spin_lock(&vm->userptr.invalidated_lock);
++		list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
++					 userptr.repin_link) {
++			list_del_init(&uvma->userptr.repin_link);
++			list_move_tail(&uvma->userptr.invalidate_link,
++				       &vm->userptr.invalidated);
++		}
++		spin_unlock(&vm->userptr.invalidated_lock);
++		up_write(&vm->userptr.notifier_lock);
++	}
++	return err;
+ }
+ 
+ /**
+@@ -1066,6 +1091,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
+ 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
+ 
+ 		spin_lock(&vm->userptr.invalidated_lock);
++		xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
+ 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
+ 		spin_unlock(&vm->userptr.invalidated_lock);
+ 	} else if (!xe_vma_is_null(vma)) {
+diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c
+index 8821cac3897b69..b475dd27b7af94 100644
+--- a/drivers/i2c/busses/i2c-ls2x.c
++++ b/drivers/i2c/busses/i2c-ls2x.c
+@@ -10,6 +10,7 @@
+  * Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/bits.h>
+ #include <linux/completion.h>
+ #include <linux/device.h>
+@@ -26,7 +27,8 @@
+ #include <linux/units.h>
+ 
+ /* I2C Registers */
+-#define I2C_LS2X_PRER		0x0 /* Freq Division Register(16 bits) */
++#define I2C_LS2X_PRER_LO	0x0 /* Freq Division Low Byte Register */
++#define I2C_LS2X_PRER_HI	0x1 /* Freq Division High Byte Register */
+ #define I2C_LS2X_CTR		0x2 /* Control Register */
+ #define I2C_LS2X_TXR		0x3 /* Transport Data Register */
+ #define I2C_LS2X_RXR		0x3 /* Receive Data Register */
+@@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
+  */
+ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
+ {
++	u16 val;
+ 	struct i2c_timings *t = &priv->i2c_t;
+ 	struct device *dev = priv->adapter.dev.parent;
+ 	u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
+@@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
+ 	else
+ 		t->bus_freq_hz = LS2X_I2C_FREQ_STD;
+ 
+-	/* Calculate and set i2c frequency. */
+-	writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
+-	       priv->base + I2C_LS2X_PRER);
++	/*
++	 * According to the chip manual, we can only access the registers as bytes,
++	 * otherwise the high bits will be truncated.
++	 * So set the I2C frequency with a sequential writeb() instead of writew().
++	 */
++	val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
++	writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
++	writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
+ }
+ 
+ static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
+diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
+index bbcb4d6668ce63..a693ebb64edf41 100644
+--- a/drivers/i2c/busses/i2c-npcm7xx.c
++++ b/drivers/i2c/busses/i2c-npcm7xx.c
+@@ -2319,6 +2319,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		return irq;
+ 
++	/*
++	 * Disable the interrupt to avoid the interrupt handler being triggered
++	 * incorrectly by the asynchronous interrupt status since the machine
++	 * might do a warm reset during the last smbus/i2c transfer session.
++	 */
++	npcm_i2c_int_enable(bus, false);
++
+ 	ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
+ 			       dev_name(bus->dev), bus);
+ 	if (ret)
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 67aebfe0fed665..524ed143f875d3 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -55,6 +55,7 @@
+ #include <asm/intel-family.h>
+ #include <asm/mwait.h>
+ #include <asm/spec-ctrl.h>
++#include <asm/tsc.h>
+ #include <asm/fpu/api.h>
+ 
+ #define INTEL_IDLE_VERSION "0.5.1"
+@@ -1749,6 +1750,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
+ 		if (intel_idle_state_needs_timer_stop(state))
+ 			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ 
++		if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
++			mark_tsc_unstable("TSC halts in idle");
++
+ 		state->enter = intel_idle;
+ 		state->enter_s2idle = intel_idle_s2idle;
+ 	}
+diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+index e94518b12f86ee..a316afc0139c86 100644
+--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+@@ -154,6 +154,14 @@ struct bnxt_re_pacing {
+ 
+ #define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
+ 
++#define BNXT_RE_MIN_MSIX		2
++#define BNXT_RE_MAX_MSIX		BNXT_MAX_ROCE_MSIX
++struct bnxt_re_nq_record {
++	struct bnxt_msix_entry	msix_entries[BNXT_RE_MAX_MSIX];
++	struct bnxt_qplib_nq	nq[BNXT_RE_MAX_MSIX];
++	int			num_msix;
++};
++
+ #define MAX_CQ_HASH_BITS		(16)
+ #define MAX_SRQ_HASH_BITS		(16)
+ struct bnxt_re_dev {
+@@ -174,24 +182,20 @@ struct bnxt_re_dev {
+ 	unsigned int			version, major, minor;
+ 	struct bnxt_qplib_chip_ctx	*chip_ctx;
+ 	struct bnxt_en_dev		*en_dev;
+-	int				num_msix;
+ 
+ 	int				id;
+ 
+ 	struct delayed_work		worker;
+ 	u8				cur_prio_map;
+ 
+-	/* FP Notification Queue (CQ & SRQ) */
+-	struct tasklet_struct		nq_task;
+-
+ 	/* RCFW Channel */
+ 	struct bnxt_qplib_rcfw		rcfw;
+ 
+-	/* NQ */
+-	struct bnxt_qplib_nq		nq[BNXT_MAX_ROCE_MSIX];
++	/* NQ record */
++	struct bnxt_re_nq_record	*nqr;
+ 
+ 	/* Device Resources */
+-	struct bnxt_qplib_dev_attr	dev_attr;
++	struct bnxt_qplib_dev_attr	*dev_attr;
+ 	struct bnxt_qplib_ctx		qplib_ctx;
+ 	struct bnxt_qplib_res		qplib_res;
+ 	struct bnxt_qplib_dpi		dpi_privileged;
+diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
+index 1e63f809174837..f51adb0a97e667 100644
+--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
++++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
+@@ -357,8 +357,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ 			goto done;
+ 		}
+ 		bnxt_re_copy_err_stats(rdev, stats, err_s);
+-		if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
+-		    !rdev->is_virtfn) {
++		if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
++					     rdev->is_virtfn)) {
+ 			rc = bnxt_re_get_ext_stat(rdev, stats);
+ 			if (rc) {
+ 				clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index a7067c3c067972..0b21d8b5d96296 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -118,7 +118,7 @@ static enum ib_access_flags __to_ib_access_flags(int qflags)
+ static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
+ 						   struct bnxt_qplib_mrw *qplib_mr)
+ {
+-	if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
++	if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
+ 	    pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
+ 		qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
+ }
+@@ -143,7 +143,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
+ 			 struct ib_udata *udata)
+ {
+ 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 
+ 	memset(ib_attr, 0, sizeof(*ib_attr));
+ 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
+@@ -216,7 +216,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
+ 		       struct ib_port_attr *port_attr)
+ {
+ 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 	int rc;
+ 
+ 	memset(port_attr, 0, sizeof(*port_attr));
+@@ -274,8 +274,8 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
+ 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ 
+ 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+-		 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
+-		 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
++		 rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
++		 rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
+ }
+ 
+ int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
+@@ -526,7 +526,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
+ 	mr->qplib_mr.pd = &pd->qplib_pd;
+ 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+-	if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
++	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
+ 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ 		if (rc) {
+ 			ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
+@@ -1001,7 +1001,7 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+ 	sq = &qplqp->sq;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	align = sizeof(struct sq_send_hdr);
+ 	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
+@@ -1221,7 +1221,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+ 	rq = &qplqp->rq;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	if (init_attr->srq) {
+ 		struct bnxt_re_srq *srq;
+@@ -1258,7 +1258,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+ 
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ 		qplqp->rq.max_sge = dev_attr->max_qp_sges;
+@@ -1284,7 +1284,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+ 	sq = &qplqp->sq;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	sq->max_sge = init_attr->cap.max_send_sge;
+ 	entries = init_attr->cap.max_send_wr;
+@@ -1337,7 +1337,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ 
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ 		entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
+@@ -1386,7 +1386,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ 
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	/* Setup misc params */
+ 	ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
+@@ -1556,7 +1556,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
+ 	ib_pd = ib_qp->pd;
+ 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ 	rdev = pd->rdev;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 	qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ 
+ 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+@@ -1783,7 +1783,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
+ 	ib_pd = ib_srq->pd;
+ 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ 	rdev = pd->rdev;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 	srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+ 
+ 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
+@@ -1814,8 +1814,10 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
+ 	srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
+ 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
+ 	srq->srq_limit = srq_init_attr->attr.srq_limit;
+-	srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
+-	nq = &rdev->nq[0];
++	srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
++	srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
++	srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
++	nq = &rdev->nqr->nq[0];
+ 
+ 	if (udata) {
+ 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
+@@ -1987,7 +1989,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ {
+ 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ 	struct bnxt_re_dev *rdev = qp->rdev;
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 	enum ib_qp_state curr_qp_state, new_qp_state;
+ 	int rc, entries;
+ 	unsigned int flags;
+@@ -3011,7 +3013,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 	struct ib_udata *udata = &attrs->driver_udata;
+ 	struct bnxt_re_ucontext *uctx =
+ 		rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 	struct bnxt_qplib_chip_ctx *cctx;
+ 	struct bnxt_qplib_nq *nq = NULL;
+ 	unsigned int nq_alloc_cnt;
+@@ -3070,7 +3072,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 	 * used for getting the NQ index.
+ 	 */
+ 	nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
+-	nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
++	nq = &rdev->nqr->nq[nq_alloc_cnt % (rdev->nqr->num_msix - 1)];
+ 	cq->qplib_cq.max_wqe = entries;
+ 	cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
+ 	cq->qplib_cq.nq	= nq;
+@@ -3154,7 +3156,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+ 
+ 	cq =  container_of(ibcq, struct bnxt_re_cq, ib_cq);
+ 	rdev = cq->rdev;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 	if (!ibcq->uobject) {
+ 		ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
+ 		return -EOPNOTSUPP;
+@@ -4127,7 +4129,7 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
+ 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+ 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
+ 
+-	if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
++	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
+ 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ 		if (rc) {
+ 			ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+@@ -4219,7 +4221,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
+ 	struct bnxt_re_ucontext *uctx =
+ 		container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
+ 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 	struct bnxt_re_user_mmap_entry *entry;
+ 	struct bnxt_re_uctx_resp resp = {};
+ 	struct bnxt_re_uctx_req ureq = {};
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 8abd1b723f8ff5..9bd837a5b8a1ad 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -152,6 +152,10 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
+ 
+ 	if (!rdev->chip_ctx)
+ 		return;
++
++	kfree(rdev->dev_attr);
++	rdev->dev_attr = NULL;
++
+ 	chip_ctx = rdev->chip_ctx;
+ 	rdev->chip_ctx = NULL;
+ 	rdev->rcfw.res = NULL;
+@@ -165,7 +169,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
+ {
+ 	struct bnxt_qplib_chip_ctx *chip_ctx;
+ 	struct bnxt_en_dev *en_dev;
+-	int rc;
++	int rc = -ENOMEM;
+ 
+ 	en_dev = rdev->en_dev;
+ 
+@@ -181,23 +185,30 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
+ 
+ 	rdev->qplib_res.cctx = rdev->chip_ctx;
+ 	rdev->rcfw.res = &rdev->qplib_res;
+-	rdev->qplib_res.dattr = &rdev->dev_attr;
++	rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
++	if (!rdev->dev_attr)
++		goto free_chip_ctx;
++	rdev->qplib_res.dattr = rdev->dev_attr;
+ 	rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
+ 
+ 	bnxt_re_set_drv_mode(rdev);
+ 
+ 	bnxt_re_set_db_offset(rdev);
+ 	rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
+-	if (rc) {
+-		kfree(rdev->chip_ctx);
+-		rdev->chip_ctx = NULL;
+-		return rc;
+-	}
++	if (rc)
++		goto free_dev_attr;
+ 
+ 	if (bnxt_qplib_determine_atomics(en_dev->pdev))
+ 		ibdev_info(&rdev->ibdev,
+ 			   "platform doesn't support global atomics.");
+ 	return 0;
++free_dev_attr:
++	kfree(rdev->dev_attr);
++	rdev->dev_attr = NULL;
++free_chip_ctx:
++	kfree(rdev->chip_ctx);
++	rdev->chip_ctx = NULL;
++	return rc;
+ }
+ 
+ /* SR-IOV helper functions */
+@@ -219,7 +230,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
+ 	struct bnxt_qplib_ctx *ctx;
+ 	int i;
+ 
+-	attr = &rdev->dev_attr;
++	attr = rdev->dev_attr;
+ 	ctx = &rdev->qplib_ctx;
+ 
+ 	ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+@@ -233,7 +244,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
+ 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ 		for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ 			rdev->qplib_ctx.tqm_ctx.qcount[i] =
+-			rdev->dev_attr.tqm_alloc_reqs[i];
++			rdev->dev_attr->tqm_alloc_reqs[i];
+ }
+ 
+ static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
+@@ -314,10 +325,12 @@ static void bnxt_re_stop_irq(void *handle)
+ 	int indx;
+ 
+ 	rdev = en_info->rdev;
++	if (!rdev)
++		return;
+ 	rcfw = &rdev->rcfw;
+ 
+-	for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
+-		nq = &rdev->nq[indx - 1];
++	for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) {
++		nq = &rdev->nqr->nq[indx - 1];
+ 		bnxt_qplib_nq_stop_irq(nq, false);
+ 	}
+ 
+@@ -334,7 +347,9 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+ 	int indx, rc;
+ 
+ 	rdev = en_info->rdev;
+-	msix_ent = rdev->en_dev->msix_entries;
++	if (!rdev)
++		return;
++	msix_ent = rdev->nqr->msix_entries;
+ 	rcfw = &rdev->rcfw;
+ 	if (!ent) {
+ 		/* Not setting the f/w timeout bit in rcfw.
+@@ -349,8 +364,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+ 	/* Vectors may change after restart, so update with new vectors
+ 	 * in device sctructure.
+ 	 */
+-	for (indx = 0; indx < rdev->num_msix; indx++)
+-		rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
++	for (indx = 0; indx < rdev->nqr->num_msix; indx++)
++		rdev->nqr->msix_entries[indx].vector = ent[indx].vector;
+ 
+ 	rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+ 				       false);
+@@ -358,8 +373,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+ 		ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
+ 		return;
+ 	}
+-	for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
+-		nq = &rdev->nq[indx - 1];
++	for (indx = BNXT_RE_NQ_IDX ; indx < rdev->nqr->num_msix; indx++) {
++		nq = &rdev->nqr->nq[indx - 1];
+ 		rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+ 					     msix_ent[indx].vector, false);
+ 		if (rc) {
+@@ -943,7 +958,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
+ 
+ 	addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
+ 
+-	ibdev->num_comp_vectors	= rdev->num_msix - 1;
++	ibdev->num_comp_vectors	= rdev->nqr->num_msix - 1;
+ 	ibdev->dev.parent = &rdev->en_dev->pdev->dev;
+ 	ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
+ 
+@@ -1276,8 +1291,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
+ {
+ 	int i;
+ 
+-	for (i = 1; i < rdev->num_msix; i++)
+-		bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
++	for (i = 1; i < rdev->nqr->num_msix; i++)
++		bnxt_qplib_disable_nq(&rdev->nqr->nq[i - 1]);
+ 
+ 	if (rdev->qplib_res.rcfw)
+ 		bnxt_qplib_cleanup_res(&rdev->qplib_res);
+@@ -1291,10 +1306,10 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
+ 
+ 	bnxt_qplib_init_res(&rdev->qplib_res);
+ 
+-	for (i = 1; i < rdev->num_msix ; i++) {
+-		db_offt = rdev->en_dev->msix_entries[i].db_offset;
+-		rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
+-					  i - 1, rdev->en_dev->msix_entries[i].vector,
++	for (i = 1; i < rdev->nqr->num_msix ; i++) {
++		db_offt = rdev->nqr->msix_entries[i].db_offset;
++		rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nqr->nq[i - 1],
++					  i - 1, rdev->nqr->msix_entries[i].vector,
+ 					  db_offt, &bnxt_re_cqn_handler,
+ 					  &bnxt_re_srqn_handler);
+ 		if (rc) {
+@@ -1307,20 +1322,22 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
+ 	return 0;
+ fail:
+ 	for (i = num_vec_enabled; i >= 0; i--)
+-		bnxt_qplib_disable_nq(&rdev->nq[i]);
++		bnxt_qplib_disable_nq(&rdev->nqr->nq[i]);
+ 	return rc;
+ }
+ 
+ static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
+ {
++	struct bnxt_qplib_nq *nq;
+ 	u8 type;
+ 	int i;
+ 
+-	for (i = 0; i < rdev->num_msix - 1; i++) {
++	for (i = 0; i < rdev->nqr->num_msix - 1; i++) {
+ 		type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+-		bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
+-		bnxt_qplib_free_nq(&rdev->nq[i]);
+-		rdev->nq[i].res = NULL;
++		nq = &rdev->nqr->nq[i];
++		bnxt_re_net_ring_free(rdev, nq->ring_id, type);
++		bnxt_qplib_free_nq(nq);
++		nq->res = NULL;
+ 	}
+ }
+ 
+@@ -1347,12 +1364,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
+ 
+ 	/* Configure and allocate resources for qplib */
+ 	rdev->qplib_res.rcfw = &rdev->rcfw;
+-	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
++	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
+ 	if (rc)
+ 		goto fail;
+ 
+-	rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
+-				  rdev->netdev, &rdev->dev_attr);
++	rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev);
+ 	if (rc)
+ 		goto fail;
+ 
+@@ -1362,12 +1378,12 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
+ 	if (rc)
+ 		goto dealloc_res;
+ 
+-	for (i = 0; i < rdev->num_msix - 1; i++) {
++	for (i = 0; i < rdev->nqr->num_msix - 1; i++) {
+ 		struct bnxt_qplib_nq *nq;
+ 
+-		nq = &rdev->nq[i];
++		nq = &rdev->nqr->nq[i];
+ 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
+-		rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
++		rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, nq);
+ 		if (rc) {
+ 			ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
+ 				  i, rc);
+@@ -1375,17 +1391,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
+ 		}
+ 		type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ 		rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+-		rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
++		rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count;
+ 		rattr.type = type;
+ 		rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ 		rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
+-		rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
++		rattr.lrid = rdev->nqr->msix_entries[i + 1].ring_idx;
+ 		rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
+ 		if (rc) {
+ 			ibdev_err(&rdev->ibdev,
+ 				  "Failed to allocate NQ fw id with rc = 0x%x",
+ 				  rc);
+-			bnxt_qplib_free_nq(&rdev->nq[i]);
++			bnxt_qplib_free_nq(nq);
+ 			goto free_nq;
+ 		}
+ 		num_vec_created++;
+@@ -1394,8 +1410,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
+ free_nq:
+ 	for (i = num_vec_created - 1; i >= 0; i--) {
+ 		type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+-		bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
+-		bnxt_qplib_free_nq(&rdev->nq[i]);
++		bnxt_re_net_ring_free(rdev, rdev->nqr->nq[i].ring_id, type);
++		bnxt_qplib_free_nq(&rdev->nqr->nq[i]);
+ 	}
+ 	bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ 			       &rdev->dpi_privileged);
+@@ -1584,6 +1600,21 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
+ 	return rc;
+ }
+ 
++static int bnxt_re_alloc_nqr_mem(struct bnxt_re_dev *rdev)
++{
++	rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL);
++	if (!rdev->nqr)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
++{
++	kfree(rdev->nqr);
++	rdev->nqr = NULL;
++}
++
+ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+ {
+ 	u8 type;
+@@ -1611,11 +1642,12 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+ 		bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
+ 	}
+ 
+-	rdev->num_msix = 0;
++	rdev->nqr->num_msix = 0;
+ 
+ 	if (rdev->pacing.dbr_pacing)
+ 		bnxt_re_deinitialize_dbr_pacing(rdev);
+ 
++	bnxt_re_free_nqr_mem(rdev);
+ 	bnxt_re_destroy_chip_ctx(rdev);
+ 	if (op_type == BNXT_RE_COMPLETE_REMOVE) {
+ 		if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+@@ -1653,6 +1685,17 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
+ 	}
+ 	set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ 
++	if (rdev->en_dev->ulp_tbl->msix_requested < BNXT_RE_MIN_MSIX) {
++		ibdev_err(&rdev->ibdev,
++			  "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n",
++			  rdev->en_dev->ulp_tbl->msix_requested);
++		bnxt_unregister_dev(rdev->en_dev);
++		clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
++		return -EINVAL;
++	}
++	ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
++		  rdev->en_dev->ulp_tbl->msix_requested);
++
+ 	rc = bnxt_re_setup_chip_ctx(rdev);
+ 	if (rc) {
+ 		bnxt_unregister_dev(rdev->en_dev);
+@@ -1661,19 +1704,20 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
+ 		return -EINVAL;
+ 	}
+ 
++	rc = bnxt_re_alloc_nqr_mem(rdev);
++	if (rc) {
++		bnxt_re_destroy_chip_ctx(rdev);
++		bnxt_unregister_dev(rdev->en_dev);
++		clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
++		return rc;
++	}
++	rdev->nqr->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
++	memcpy(rdev->nqr->msix_entries, rdev->en_dev->msix_entries,
++	       sizeof(struct bnxt_msix_entry) * rdev->nqr->num_msix);
++
+ 	/* Check whether VF or PF */
+ 	bnxt_re_get_sriov_func_type(rdev);
+ 
+-	if (!rdev->en_dev->ulp_tbl->msix_requested) {
+-		ibdev_err(&rdev->ibdev,
+-			  "Failed to get MSI-X vectors: %#x\n", rc);
+-		rc = -EINVAL;
+-		goto fail;
+-	}
+-	ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+-		  rdev->en_dev->ulp_tbl->msix_requested);
+-	rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
+-
+ 	bnxt_re_query_hwrm_intf_version(rdev);
+ 
+ 	/* Establish RCFW Communication Channel to initialize the context
+@@ -1695,14 +1739,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
+ 	rattr.type = type;
+ 	rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ 	rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
+-	rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
++	rattr.lrid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ 	rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
+ 	if (rc) {
+ 		ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
+ 		goto free_rcfw;
+ 	}
+-	db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
+-	vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
++	db_offt = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
++	vid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].vector;
+ 	rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
+ 					    vid, db_offt,
+ 					    &bnxt_re_aeq_handler);
+@@ -1722,7 +1766,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
+ 			rdev->pacing.dbr_pacing = false;
+ 		}
+ 	}
+-	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
++	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
+ 	if (rc)
+ 		goto disable_rcfw;
+ 
+@@ -2047,6 +2091,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
+ 	ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
+ 		   __func__, en_dev->en_state);
+ 	bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
++	bnxt_re_update_en_info_rdev(NULL, en_info, adev);
+ 	mutex_unlock(&bnxt_re_mutex);
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 96ceec1e8199a6..02922a0987ad7a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -876,14 +876,13 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
+ 	bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
+ }
+ 
+-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
+-			 struct net_device *netdev,
+-			 struct bnxt_qplib_dev_attr *dev_attr)
++int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
+ {
++	struct bnxt_qplib_dev_attr *dev_attr;
+ 	int rc;
+ 
+-	res->pdev = pdev;
+ 	res->netdev = netdev;
++	dev_attr = res->dattr;
+ 
+ 	rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
+ 	if (rc)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+index c2f710364e0ffe..b40cff8252bc4d 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -421,9 +421,7 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
+ void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
+ int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
+ void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
+-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
+-			 struct net_device *netdev,
+-			 struct bnxt_qplib_dev_attr *dev_attr);
++int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
+ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
+ 			 struct bnxt_qplib_ctx *ctx);
+ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
+@@ -546,6 +544,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
+ 		CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
+ }
+ 
++static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
++					   u16 flags, bool virtfn)
++{
++	/* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
++	return (_is_ext_stats_supported(flags) &&
++		((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
++}
++
+ static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
+ {
+ 	return dev_cap_flags &
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index 3cca7b1395f6a7..807439b1acb51f 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -88,9 +88,9 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
+ 	fw_ver[3] = resp.fw_rsvd;
+ }
+ 
+-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+-			    struct bnxt_qplib_dev_attr *attr)
++int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
+ {
++	struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr;
+ 	struct creq_query_func_resp resp = {};
+ 	struct bnxt_qplib_cmdqmsg msg = {};
+ 	struct creq_query_func_resp_sb *sb;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index ecf3f45fea74fe..de959b3c28e01f 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -325,8 +325,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ 			   struct bnxt_qplib_gid *gid, u16 gid_idx,
+ 			   const u8 *smac);
+-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+-			    struct bnxt_qplib_dev_attr *attr);
++int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
+ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ 				  struct bnxt_qplib_rcfw *rcfw,
+ 				  struct bnxt_qplib_ctx *ctx);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 0144e7210d05a1..f5c3e560df58d7 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
+ 	return tx_timeout;
+ }
+ 
+-static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
++static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
+ {
+-	struct hns_roce_v2_priv *priv = hr_dev->priv;
+-	u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
+ 	u32 timeout = 0;
+ 
+ 	do {
+@@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
+ 	} while (++timeout < tx_timeout);
+ }
+ 
+-static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+-			       struct hns_roce_cmq_desc *desc, int num)
++static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
++				   struct hns_roce_cmq_desc *desc,
++				   int num, u32 tx_timeout)
+ {
+ 	struct hns_roce_v2_priv *priv = hr_dev->priv;
+ 	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+@@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 	int ret;
+ 	int i;
+ 
+-	spin_lock_bh(&csq->lock);
+-
+ 	tail = csq->head;
+ 
+ 	for (i = 0; i < num; i++) {
+@@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 
+ 	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
+ 
+-	hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode));
++	hns_roce_wait_csq_done(hr_dev, tx_timeout);
+ 	if (hns_roce_cmq_csq_done(hr_dev)) {
+ 		ret = 0;
+ 		for (i = 0; i < num; i++) {
+ 			/* check the result of hardware write back */
+-			desc[i] = csq->desc[tail++];
++			desc_ret = le16_to_cpu(csq->desc[tail++].retval);
+ 			if (tail == csq->desc_num)
+ 				tail = 0;
+-
+-			desc_ret = le16_to_cpu(desc[i].retval);
+ 			if (likely(desc_ret == CMD_EXEC_SUCCESS))
+ 				continue;
+ 
+-			dev_err_ratelimited(hr_dev->dev,
+-					    "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
+-					    desc->opcode, desc_ret);
+ 			ret = hns_roce_cmd_err_convert_errno(desc_ret);
+ 		}
+ 	} else {
+@@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 		ret = -EAGAIN;
+ 	}
+ 
+-	spin_unlock_bh(&csq->lock);
+-
+ 	if (ret)
+ 		atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
+ 
+ 	return ret;
+ }
+ 
++static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++			       struct hns_roce_cmq_desc *desc, int num)
++{
++	struct hns_roce_v2_priv *priv = hr_dev->priv;
++	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
++	u16 opcode = le16_to_cpu(desc->opcode);
++	u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
++	u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
++	u32 rsv_tail;
++	int ret;
++	int i;
++
++	while (try_cnt) {
++		try_cnt--;
++
++		spin_lock_bh(&csq->lock);
++		rsv_tail = csq->head;
++		ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
++		if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
++		    try_cnt) {
++			spin_unlock_bh(&csq->lock);
++			mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
++			continue;
++		}
++
++		for (i = 0; i < num; i++) {
++			desc[i] = csq->desc[rsv_tail++];
++			if (rsv_tail == csq->desc_num)
++				rsv_tail = 0;
++		}
++		spin_unlock_bh(&csq->lock);
++		break;
++	}
++
++	if (ret)
++		dev_err_ratelimited(hr_dev->dev,
++				    "Cmdq IO error, opcode = 0x%x, return = %d.\n",
++				    opcode, ret);
++
++	return ret;
++}
++
+ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 			     struct hns_roce_cmq_desc *desc, int num)
+ {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index cbdbc9edbce6ec..91a5665465ffba 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -230,6 +230,8 @@ enum hns_roce_opcode_type {
+ };
+ 
+ #define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
++#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8
++#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5
+ struct hns_roce_cmdq_tx_timeout_map {
+ 	u16 opcode;
+ 	u32 tx_timeout;
+diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
+index 67c2d43135a8af..457cea6d990958 100644
+--- a/drivers/infiniband/hw/mana/main.c
++++ b/drivers/infiniband/hw/mana/main.c
+@@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
+ 
+ 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
+ 	req.num_resources = 1;
+-	req.alignment = 1;
++	req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
+ 
+ 	/* Have GDMA start searching from 0 */
+ 	req.allocated_resources = 0;
+diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
+index 505bc47fd575d5..99036afb3aef0b 100644
+--- a/drivers/infiniband/hw/mlx5/ah.c
++++ b/drivers/infiniband/hw/mlx5/ah.c
+@@ -67,7 +67,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+ 		ah->av.tclass = grh->traffic_class;
+ 	}
+ 
+-	ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
++	ah->av.stat_rate_sl =
++		(mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
+ 
+ 	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ 		if (init_attr->xmit_slave)
+diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
+index 4f6c1968a2ee3c..81cfa74147a183 100644
+--- a/drivers/infiniband/hw/mlx5/counters.c
++++ b/drivers/infiniband/hw/mlx5/counters.c
+@@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
+ 				   struct ib_qp *qp)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
++	bool new = false;
+ 	int err;
+ 
+ 	if (!counter->id) {
+@@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
+ 			return err;
+ 		counter->id =
+ 			MLX5_GET(alloc_q_counter_out, out, counter_set_id);
++		new = true;
+ 	}
+ 
+ 	err = mlx5_ib_qp_set_counter(qp, counter);
+@@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
+ 	return 0;
+ 
+ fail_set_counter:
+-	mlx5_ib_counter_dealloc(counter);
+-	counter->id = 0;
++	if (new) {
++		mlx5_ib_counter_dealloc(counter);
++		counter->id = 0;
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index bb02b6adbf2c21..753faa9ad06a88 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1550,7 +1550,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
+ 
+ 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+ 
+-	if (!umem_dmabuf->sgt)
++	if (!umem_dmabuf->sgt || !mr)
+ 		return;
+ 
+ 	mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
+@@ -1935,7 +1935,8 @@ mlx5_alloc_priv_descs(struct ib_device *device,
+ static void
+ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+ {
+-	if (!mr->umem && !mr->data_direct && mr->descs) {
++	if (!mr->umem && !mr->data_direct &&
++	    mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) {
+ 		struct ib_device *device = mr->ibmr.device;
+ 		int size = mr->max_descs * mr->desc_size;
+ 		struct mlx5_ib_dev *dev = to_mdev(device);
+@@ -2022,11 +2023,16 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ 	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+ 	bool is_odp = is_odp_mr(mr);
++	bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
++			!to_ib_umem_dmabuf(mr->umem)->pinned;
+ 	int ret = 0;
+ 
+ 	if (is_odp)
+ 		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ 
++	if (is_odp_dma_buf)
++		dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
++
+ 	if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
+ 		ent = mr->mmkey.cache_ent;
+ 		/* upon storing to a clean temp entry - schedule its cleanup */
+@@ -2054,6 +2060,12 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 		mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ 	}
+ 
++	if (is_odp_dma_buf) {
++		if (!ret)
++			to_ib_umem_dmabuf(mr->umem)->private = NULL;
++		dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 1d3bf56157702d..b4e2a6f9cb9c3d 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -242,6 +242,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+ 	if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
+ 	    mr) {
+ 		xa_unlock(&imr->implicit_children);
++		mlx5r_deref_odp_mkey(&imr->mmkey);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 10ce3b44f645f4..ded139b4e87aa4 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3420,11 +3420,11 @@ static int ib_to_mlx5_rate_map(u8 rate)
+ 	return 0;
+ }
+ 
+-static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
++int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
+ {
+ 	u32 stat_rate_support;
+ 
+-	if (rate == IB_RATE_PORT_CURRENT)
++	if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
+ 		return 0;
+ 
+ 	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
+@@ -3569,7 +3569,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ 		       sizeof(grh->dgid.raw));
+ 	}
+ 
+-	err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
++	err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah));
+ 	if (err < 0)
+ 		return err;
+ 	MLX5_SET(ads, path, stat_rate, err);
+@@ -4547,6 +4547,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 
+ 		set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
+ 		MLX5_SET(dctc, dctc, counter_set_id, set_id);
++
++		qp->port = attr->port_num;
+ 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ 		struct mlx5_ib_modify_qp_resp resp = {};
+ 		u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
+@@ -5033,7 +5035,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
+ 	}
+ 
+ 	if (qp_attr_mask & IB_QP_PORT)
+-		qp_attr->port_num = MLX5_GET(dctc, dctc, port);
++		qp_attr->port_num = mqp->port;
+ 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
+ 		qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
+ 	if (qp_attr_mask & IB_QP_AV) {
+diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
+index b6ee7c3ee1ca1b..2530e7730635f3 100644
+--- a/drivers/infiniband/hw/mlx5/qp.h
++++ b/drivers/infiniband/hw/mlx5/qp.h
+@@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
+ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+ int mlx5_ib_qp_event_init(void);
+ void mlx5_ib_qp_event_cleanup(void);
++int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate);
+ #endif /* _MLX5_IB_QP_H */
+diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
+index 887fd6fa3ba930..793f3c5c4d0126 100644
+--- a/drivers/infiniband/hw/mlx5/umr.c
++++ b/drivers/infiniband/hw/mlx5/umr.c
+@@ -231,30 +231,6 @@ void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
+ 	ib_dealloc_pd(dev->umrc.pd);
+ }
+ 
+-static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
+-{
+-	struct umr_common *umrc = &dev->umrc;
+-	struct ib_qp_attr attr;
+-	int err;
+-
+-	attr.qp_state = IB_QPS_RESET;
+-	err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+-	if (err) {
+-		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
+-		goto err;
+-	}
+-
+-	err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+-	if (err)
+-		goto err;
+-
+-	umrc->state = MLX5_UMR_STATE_ACTIVE;
+-	return 0;
+-
+-err:
+-	umrc->state = MLX5_UMR_STATE_ERR;
+-	return err;
+-}
+ 
+ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
+ 			       struct mlx5r_umr_wqe *wqe, bool with_data)
+@@ -302,6 +278,61 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
+ 	return err;
+ }
+ 
++static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey,
++			     struct mlx5r_umr_context *umr_context,
++			     struct mlx5r_umr_wqe *wqe, bool with_data)
++{
++	struct umr_common *umrc = &dev->umrc;
++	struct ib_qp_attr attr;
++	int err;
++
++	mutex_lock(&umrc->lock);
++	/* Preventing any further WRs to be sent now */
++	if (umrc->state != MLX5_UMR_STATE_RECOVER) {
++		mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n",
++			     umrc->state);
++		umrc->state = MLX5_UMR_STATE_RECOVER;
++	}
++	mutex_unlock(&umrc->lock);
++
++	/* Sending a final/barrier WR (the failed one) and wait for its completion.
++	 * This will ensure that all the previous WRs got a completion before
++	 * we set the QP state to RESET.
++	 */
++	err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe,
++				  with_data);
++	if (err) {
++		mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err);
++		goto err;
++	}
++
++	/* Since the QP is in an error state, it will only receive
++	 * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier
++	 * we don't care about its status.
++	 */
++	wait_for_completion(&umr_context->done);
++
++	attr.qp_state = IB_QPS_RESET;
++	err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
++	if (err) {
++		mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err);
++		goto err;
++	}
++
++	err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
++	if (err) {
++		mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err);
++		goto err;
++	}
++
++	umrc->state = MLX5_UMR_STATE_ACTIVE;
++	return 0;
++
++err:
++	umrc->state = MLX5_UMR_STATE_ERR;
++	return err;
++}
++
+ static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+ 	struct mlx5_ib_umr_context *context =
+@@ -366,9 +397,7 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
+ 		mlx5_ib_warn(dev,
+ 			"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
+ 			umr_context.status, mkey);
+-		mutex_lock(&umrc->lock);
+-		err = mlx5r_umr_recover(dev);
+-		mutex_unlock(&umrc->lock);
++		err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data);
+ 		if (err)
+ 			mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
+ 				     err);
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index eaf862e8dea1a9..7f553f7aa3cb3b 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -2056,6 +2056,7 @@ int enable_drhd_fault_handling(unsigned int cpu)
+ 	/*
+ 	 * Enable fault control interrupt.
+ 	 */
++	guard(rwsem_read)(&dmar_global_lock);
+ 	for_each_iommu(iommu, drhd) {
+ 		u32 fault_status;
+ 		int ret;
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index cc23cfcdeb2d59..9c46a4cd384842 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3307,7 +3307,14 @@ int __init intel_iommu_init(void)
+ 		iommu_device_sysfs_add(&iommu->iommu, NULL,
+ 				       intel_iommu_groups,
+ 				       "%s", iommu->name);
++		/*
++		 * The iommu device probe is protected by the iommu_probe_device_lock.
++		 * Release the dmar_global_lock before entering the device probe path
++		 * to avoid unnecessary lock order splat.
++		 */
++		up_read(&dmar_global_lock);
+ 		iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
++		down_read(&dmar_global_lock);
+ 
+ 		iommu_pmu_register(iommu);
+ 	}
+@@ -4547,9 +4554,6 @@ static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *
+ {
+ 	struct device *dev = data;
+ 
+-	if (dev != &pdev->dev)
+-		return 0;
+-
+ 	return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff);
+ }
+ 
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index ee9f7cecd78e0e..555dc06b942287 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -3790,10 +3790,6 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ 		break;
+ 
+ 	case STATUSTYPE_TABLE: {
+-		__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+-
+-		watermark_percentage += ic->journal_entries / 2;
+-		do_div(watermark_percentage, ic->journal_entries);
+ 		arg_count = 3;
+ 		arg_count += !!ic->meta_dev;
+ 		arg_count += ic->sectors_per_block != 1;
+@@ -3826,6 +3822,10 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ 		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ 		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
+ 		if (ic->mode == 'J') {
++			__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
++
++			watermark_percentage += ic->journal_entries / 2;
++			do_div(watermark_percentage, ic->journal_entries);
+ 			DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
+ 			DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ 		}
+diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
+index 80628ae93fbacc..5a74b3a85ec435 100644
+--- a/drivers/md/dm-vdo/dedupe.c
++++ b/drivers/md/dm-vdo/dedupe.c
+@@ -2178,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
+ 
+ 	vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
+ 	vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
++	spin_lock_init(&zones->lock);
+ 
+ 	/*
+ 	 * Since we will save up the timeouts that would have been reported but were ratelimited,
+diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
+index 6989972eebc306..10687722d14c08 100644
+--- a/drivers/net/dsa/realtek/Kconfig
++++ b/drivers/net/dsa/realtek/Kconfig
+@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
+ 	help
+ 	  Select to enable support for Realtek RTL8366RB.
+ 
++config NET_DSA_REALTEK_RTL8366RB_LEDS
++	bool "Support RTL8366RB LED control"
++	depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
++	depends on NET_DSA_REALTEK_RTL8366RB
++	default NET_DSA_REALTEK_RTL8366RB
++
+ endif
+diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
+index 35491dc20d6d6e..17367bcba496c1 100644
+--- a/drivers/net/dsa/realtek/Makefile
++++ b/drivers/net/dsa/realtek/Makefile
+@@ -12,4 +12,7 @@ endif
+ 
+ obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+ rtl8366-objs 				:= rtl8366-core.o rtl8366rb.o
++ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
++rtl8366-objs 				+= rtl8366rb-leds.o
++endif
+ obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
+diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
+new file mode 100644
+index 00000000000000..99c890681ae607
+--- /dev/null
++++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
+@@ -0,0 +1,177 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bitops.h>
++#include <linux/regmap.h>
++#include <net/dsa.h>
++#include "rtl83xx.h"
++#include "rtl8366rb.h"
++
++static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
++{
++	switch (led_group) {
++	case 0:
++		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++	case 1:
++		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++	case 2:
++		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++	case 3:
++		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++	default:
++		return 0;
++	}
++}
++
++static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
++{
++	struct realtek_priv *priv = led->priv;
++	u8 led_group = led->led_group;
++	u8 port_num = led->port_num;
++	int ret;
++	u32 val;
++
++	ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
++			  &val);
++	if (ret) {
++		dev_err(priv->dev, "error reading LED on port %d group %d\n",
++			led_group, port_num);
++		return ret;
++	}
++
++	return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
++}
++
++static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
++{
++	struct realtek_priv *priv = led->priv;
++	u8 led_group = led->led_group;
++	u8 port_num = led->port_num;
++	int ret;
++
++	ret = regmap_update_bits(priv->map,
++				 RTL8366RB_LED_X_X_CTRL_REG(led_group),
++				 rtl8366rb_led_group_port_mask(led_group,
++							       port_num),
++				 enable ? 0xffff : 0);
++	if (ret) {
++		dev_err(priv->dev, "error updating LED on port %d group %d\n",
++			led_group, port_num);
++		return ret;
++	}
++
++	/* Change the LED group to manual controlled LEDs if required */
++	ret = rb8366rb_set_ledgroup_mode(priv, led_group,
++					 RTL8366RB_LEDGROUP_FORCE);
++
++	if (ret) {
++		dev_err(priv->dev, "error updating LED GROUP group %d\n",
++			led_group);
++		return ret;
++	}
++
++	return 0;
++}
++
++static int
++rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
++				       enum led_brightness brightness)
++{
++	struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
++						 cdev);
++
++	return rb8366rb_set_port_led(led, brightness == LED_ON);
++}
++
++static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
++			       struct fwnode_handle *led_fwnode)
++{
++	struct rtl8366rb *rb = priv->chip_data;
++	struct led_init_data init_data = { };
++	enum led_default_state state;
++	struct rtl8366rb_led *led;
++	u32 led_group;
++	int ret;
++
++	ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
++	if (ret)
++		return ret;
++
++	if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
++		dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
++			 led_group, dp->index);
++		return -EINVAL;
++	}
++
++	led = &rb->leds[dp->index][led_group];
++	led->port_num = dp->index;
++	led->led_group = led_group;
++	led->priv = priv;
++
++	state = led_init_default_state_get(led_fwnode);
++	switch (state) {
++	case LEDS_DEFSTATE_ON:
++		led->cdev.brightness = 1;
++		rb8366rb_set_port_led(led, 1);
++		break;
++	case LEDS_DEFSTATE_KEEP:
++		led->cdev.brightness =
++			rb8366rb_get_port_led(led);
++		break;
++	case LEDS_DEFSTATE_OFF:
++	default:
++		led->cdev.brightness = 0;
++		rb8366rb_set_port_led(led, 0);
++	}
++
++	led->cdev.max_brightness = 1;
++	led->cdev.brightness_set_blocking =
++		rtl8366rb_cled_brightness_set_blocking;
++	init_data.fwnode = led_fwnode;
++	init_data.devname_mandatory = true;
++
++	init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
++					 dp->ds->index, dp->index, led_group);
++	if (!init_data.devicename)
++		return -ENOMEM;
++
++	ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
++	if (ret) {
++		dev_warn(priv->dev, "Failed to init LED %d for port %d",
++			 led_group, dp->index);
++		return ret;
++	}
++
++	return 0;
++}
++
++int rtl8366rb_setup_leds(struct realtek_priv *priv)
++{
++	struct dsa_switch *ds = &priv->ds;
++	struct device_node *leds_np;
++	struct dsa_port *dp;
++	int ret = 0;
++
++	dsa_switch_for_each_port(dp, ds) {
++		if (!dp->dn)
++			continue;
++
++		leds_np = of_get_child_by_name(dp->dn, "leds");
++		if (!leds_np) {
++			dev_dbg(priv->dev, "No leds defined for port %d",
++				dp->index);
++			continue;
++		}
++
++		for_each_child_of_node_scoped(leds_np, led_np) {
++			ret = rtl8366rb_setup_led(priv, dp,
++						  of_fwnode_handle(led_np));
++			if (ret)
++				break;
++		}
++
++		of_node_put(leds_np);
++		if (ret)
++			return ret;
++	}
++	return 0;
++}
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
+index c7a8cd06058781..ae3d49fc22b809 100644
+--- a/drivers/net/dsa/realtek/rtl8366rb.c
++++ b/drivers/net/dsa/realtek/rtl8366rb.c
+@@ -26,11 +26,7 @@
+ #include "realtek-smi.h"
+ #include "realtek-mdio.h"
+ #include "rtl83xx.h"
+-
+-#define RTL8366RB_PORT_NUM_CPU		5
+-#define RTL8366RB_NUM_PORTS		6
+-#define RTL8366RB_PHY_NO_MAX		4
+-#define RTL8366RB_PHY_ADDR_MAX		31
++#include "rtl8366rb.h"
+ 
+ /* Switch Global Configuration register */
+ #define RTL8366RB_SGCR				0x0000
+@@ -175,39 +171,6 @@
+  */
+ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG	0x037f
+ 
+-/* LED control registers */
+-/* The LED blink rate is global; it is used by all triggers in all groups. */
+-#define RTL8366RB_LED_BLINKRATE_REG		0x0430
+-#define RTL8366RB_LED_BLINKRATE_MASK		0x0007
+-#define RTL8366RB_LED_BLINKRATE_28MS		0x0000
+-#define RTL8366RB_LED_BLINKRATE_56MS		0x0001
+-#define RTL8366RB_LED_BLINKRATE_84MS		0x0002
+-#define RTL8366RB_LED_BLINKRATE_111MS		0x0003
+-#define RTL8366RB_LED_BLINKRATE_222MS		0x0004
+-#define RTL8366RB_LED_BLINKRATE_446MS		0x0005
+-
+-/* LED trigger event for each group */
+-#define RTL8366RB_LED_CTRL_REG			0x0431
+-#define RTL8366RB_LED_CTRL_OFFSET(led_group)	\
+-	(4 * (led_group))
+-#define RTL8366RB_LED_CTRL_MASK(led_group)	\
+-	(0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+-
+-/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+- * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+- * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+- */
+-#define RTL8366RB_LED_0_1_CTRL_REG		0x0432
+-#define RTL8366RB_LED_2_3_CTRL_REG		0x0433
+-#define RTL8366RB_LED_X_X_CTRL_REG(led_group)	\
+-	((led_group) <= 1 ? \
+-		RTL8366RB_LED_0_1_CTRL_REG : \
+-		RTL8366RB_LED_2_3_CTRL_REG)
+-#define RTL8366RB_LED_0_X_CTRL_MASK		GENMASK(5, 0)
+-#define RTL8366RB_LED_X_1_CTRL_MASK		GENMASK(11, 6)
+-#define RTL8366RB_LED_2_X_CTRL_MASK		GENMASK(5, 0)
+-#define RTL8366RB_LED_X_3_CTRL_MASK		GENMASK(11, 6)
+-
+ #define RTL8366RB_MIB_COUNT			33
+ #define RTL8366RB_GLOBAL_MIB_COUNT		1
+ #define RTL8366RB_MIB_COUNTER_PORT_OFFSET	0x0050
+@@ -243,7 +206,6 @@
+ #define RTL8366RB_PORT_STATUS_AN_MASK		0x0080
+ 
+ #define RTL8366RB_NUM_VLANS		16
+-#define RTL8366RB_NUM_LEDGROUPS		4
+ #define RTL8366RB_NUM_VIDS		4096
+ #define RTL8366RB_PRIORITYMAX		7
+ #define RTL8366RB_NUM_FIDS		8
+@@ -350,46 +312,6 @@
+ #define RTL8366RB_GREEN_FEATURE_TX	BIT(0)
+ #define RTL8366RB_GREEN_FEATURE_RX	BIT(2)
+ 
+-enum rtl8366_ledgroup_mode {
+-	RTL8366RB_LEDGROUP_OFF			= 0x0,
+-	RTL8366RB_LEDGROUP_DUP_COL		= 0x1,
+-	RTL8366RB_LEDGROUP_LINK_ACT		= 0x2,
+-	RTL8366RB_LEDGROUP_SPD1000		= 0x3,
+-	RTL8366RB_LEDGROUP_SPD100		= 0x4,
+-	RTL8366RB_LEDGROUP_SPD10		= 0x5,
+-	RTL8366RB_LEDGROUP_SPD1000_ACT		= 0x6,
+-	RTL8366RB_LEDGROUP_SPD100_ACT		= 0x7,
+-	RTL8366RB_LEDGROUP_SPD10_ACT		= 0x8,
+-	RTL8366RB_LEDGROUP_SPD100_10_ACT	= 0x9,
+-	RTL8366RB_LEDGROUP_FIBER		= 0xa,
+-	RTL8366RB_LEDGROUP_AN_FAULT		= 0xb,
+-	RTL8366RB_LEDGROUP_LINK_RX		= 0xc,
+-	RTL8366RB_LEDGROUP_LINK_TX		= 0xd,
+-	RTL8366RB_LEDGROUP_MASTER		= 0xe,
+-	RTL8366RB_LEDGROUP_FORCE		= 0xf,
+-
+-	__RTL8366RB_LEDGROUP_MODE_MAX
+-};
+-
+-struct rtl8366rb_led {
+-	u8 port_num;
+-	u8 led_group;
+-	struct realtek_priv *priv;
+-	struct led_classdev cdev;
+-};
+-
+-/**
+- * struct rtl8366rb - RTL8366RB-specific data
+- * @max_mtu: per-port max MTU setting
+- * @pvid_enabled: if PVID is set for respective port
+- * @leds: per-port and per-ledgroup led info
+- */
+-struct rtl8366rb {
+-	unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+-	bool pvid_enabled[RTL8366RB_NUM_PORTS];
+-	struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+-};
+-
+ static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
+ 	{ 0,  0, 4, "IfInOctets"				},
+ 	{ 0,  4, 4, "EtherStatsOctets"				},
+@@ -830,9 +752,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
+ 	return 0;
+ }
+ 
+-static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+-				      u8 led_group,
+-				      enum rtl8366_ledgroup_mode mode)
++/* This code is used also with LEDs disabled */
++int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
++			       u8 led_group,
++			       enum rtl8366_ledgroup_mode mode)
+ {
+ 	int ret;
+ 	u32 val;
+@@ -849,144 +772,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ 	return 0;
+ }
+ 
+-static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+-{
+-	switch (led_group) {
+-	case 0:
+-		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+-	case 1:
+-		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+-	case 2:
+-		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+-	case 3:
+-		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+-	default:
+-		return 0;
+-	}
+-}
+-
+-static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+-{
+-	struct realtek_priv *priv = led->priv;
+-	u8 led_group = led->led_group;
+-	u8 port_num = led->port_num;
+-	int ret;
+-	u32 val;
+-
+-	ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+-			  &val);
+-	if (ret) {
+-		dev_err(priv->dev, "error reading LED on port %d group %d\n",
+-			led_group, port_num);
+-		return ret;
+-	}
+-
+-	return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+-}
+-
+-static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+-{
+-	struct realtek_priv *priv = led->priv;
+-	u8 led_group = led->led_group;
+-	u8 port_num = led->port_num;
+-	int ret;
+-
+-	ret = regmap_update_bits(priv->map,
+-				 RTL8366RB_LED_X_X_CTRL_REG(led_group),
+-				 rtl8366rb_led_group_port_mask(led_group,
+-							       port_num),
+-				 enable ? 0xffff : 0);
+-	if (ret) {
+-		dev_err(priv->dev, "error updating LED on port %d group %d\n",
+-			led_group, port_num);
+-		return ret;
+-	}
+-
+-	/* Change the LED group to manual controlled LEDs if required */
+-	ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+-					 RTL8366RB_LEDGROUP_FORCE);
+-
+-	if (ret) {
+-		dev_err(priv->dev, "error updating LED GROUP group %d\n",
+-			led_group);
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-static int
+-rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+-				       enum led_brightness brightness)
+-{
+-	struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+-						 cdev);
+-
+-	return rb8366rb_set_port_led(led, brightness == LED_ON);
+-}
+-
+-static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+-			       struct fwnode_handle *led_fwnode)
+-{
+-	struct rtl8366rb *rb = priv->chip_data;
+-	struct led_init_data init_data = { };
+-	enum led_default_state state;
+-	struct rtl8366rb_led *led;
+-	u32 led_group;
+-	int ret;
+-
+-	ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+-	if (ret)
+-		return ret;
+-
+-	if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+-		dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+-			 led_group, dp->index);
+-		return -EINVAL;
+-	}
+-
+-	led = &rb->leds[dp->index][led_group];
+-	led->port_num = dp->index;
+-	led->led_group = led_group;
+-	led->priv = priv;
+-
+-	state = led_init_default_state_get(led_fwnode);
+-	switch (state) {
+-	case LEDS_DEFSTATE_ON:
+-		led->cdev.brightness = 1;
+-		rb8366rb_set_port_led(led, 1);
+-		break;
+-	case LEDS_DEFSTATE_KEEP:
+-		led->cdev.brightness =
+-			rb8366rb_get_port_led(led);
+-		break;
+-	case LEDS_DEFSTATE_OFF:
+-	default:
+-		led->cdev.brightness = 0;
+-		rb8366rb_set_port_led(led, 0);
+-	}
+-
+-	led->cdev.max_brightness = 1;
+-	led->cdev.brightness_set_blocking =
+-		rtl8366rb_cled_brightness_set_blocking;
+-	init_data.fwnode = led_fwnode;
+-	init_data.devname_mandatory = true;
+-
+-	init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+-					 dp->ds->index, dp->index, led_group);
+-	if (!init_data.devicename)
+-		return -ENOMEM;
+-
+-	ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+-	if (ret) {
+-		dev_warn(priv->dev, "Failed to init LED %d for port %d",
+-			 led_group, dp->index);
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
++/* This code is used also with LEDs disabled */
+ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+ {
+ 	int ret = 0;
+@@ -1007,38 +793,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+ 	return ret;
+ }
+ 
+-static int rtl8366rb_setup_leds(struct realtek_priv *priv)
+-{
+-	struct dsa_switch *ds = &priv->ds;
+-	struct device_node *leds_np;
+-	struct dsa_port *dp;
+-	int ret = 0;
+-
+-	dsa_switch_for_each_port(dp, ds) {
+-		if (!dp->dn)
+-			continue;
+-
+-		leds_np = of_get_child_by_name(dp->dn, "leds");
+-		if (!leds_np) {
+-			dev_dbg(priv->dev, "No leds defined for port %d",
+-				dp->index);
+-			continue;
+-		}
+-
+-		for_each_child_of_node_scoped(leds_np, led_np) {
+-			ret = rtl8366rb_setup_led(priv, dp,
+-						  of_fwnode_handle(led_np));
+-			if (ret)
+-				break;
+-		}
+-
+-		of_node_put(leds_np);
+-		if (ret)
+-			return ret;
+-	}
+-	return 0;
+-}
+-
+ static int rtl8366rb_setup(struct dsa_switch *ds)
+ {
+ 	struct realtek_priv *priv = ds->priv;
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
+new file mode 100644
+index 00000000000000..685ff3275faa17
+--- /dev/null
++++ b/drivers/net/dsa/realtek/rtl8366rb.h
+@@ -0,0 +1,107 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
++#ifndef _RTL8366RB_H
++#define _RTL8366RB_H
++
++#include "realtek.h"
++
++#define RTL8366RB_PORT_NUM_CPU		5
++#define RTL8366RB_NUM_PORTS		6
++#define RTL8366RB_PHY_NO_MAX		4
++#define RTL8366RB_NUM_LEDGROUPS		4
++#define RTL8366RB_PHY_ADDR_MAX		31
++
++/* LED control registers */
++/* The LED blink rate is global; it is used by all triggers in all groups. */
++#define RTL8366RB_LED_BLINKRATE_REG		0x0430
++#define RTL8366RB_LED_BLINKRATE_MASK		0x0007
++#define RTL8366RB_LED_BLINKRATE_28MS		0x0000
++#define RTL8366RB_LED_BLINKRATE_56MS		0x0001
++#define RTL8366RB_LED_BLINKRATE_84MS		0x0002
++#define RTL8366RB_LED_BLINKRATE_111MS		0x0003
++#define RTL8366RB_LED_BLINKRATE_222MS		0x0004
++#define RTL8366RB_LED_BLINKRATE_446MS		0x0005
++
++/* LED trigger event for each group */
++#define RTL8366RB_LED_CTRL_REG			0x0431
++#define RTL8366RB_LED_CTRL_OFFSET(led_group)	\
++	(4 * (led_group))
++#define RTL8366RB_LED_CTRL_MASK(led_group)	\
++	(0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
++
++/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
++ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
++ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
++ */
++#define RTL8366RB_LED_0_1_CTRL_REG		0x0432
++#define RTL8366RB_LED_2_3_CTRL_REG		0x0433
++#define RTL8366RB_LED_X_X_CTRL_REG(led_group)	\
++	((led_group) <= 1 ? \
++		RTL8366RB_LED_0_1_CTRL_REG : \
++		RTL8366RB_LED_2_3_CTRL_REG)
++#define RTL8366RB_LED_0_X_CTRL_MASK		GENMASK(5, 0)
++#define RTL8366RB_LED_X_1_CTRL_MASK		GENMASK(11, 6)
++#define RTL8366RB_LED_2_X_CTRL_MASK		GENMASK(5, 0)
++#define RTL8366RB_LED_X_3_CTRL_MASK		GENMASK(11, 6)
++
++enum rtl8366_ledgroup_mode {
++	RTL8366RB_LEDGROUP_OFF			= 0x0,
++	RTL8366RB_LEDGROUP_DUP_COL		= 0x1,
++	RTL8366RB_LEDGROUP_LINK_ACT		= 0x2,
++	RTL8366RB_LEDGROUP_SPD1000		= 0x3,
++	RTL8366RB_LEDGROUP_SPD100		= 0x4,
++	RTL8366RB_LEDGROUP_SPD10		= 0x5,
++	RTL8366RB_LEDGROUP_SPD1000_ACT		= 0x6,
++	RTL8366RB_LEDGROUP_SPD100_ACT		= 0x7,
++	RTL8366RB_LEDGROUP_SPD10_ACT		= 0x8,
++	RTL8366RB_LEDGROUP_SPD100_10_ACT	= 0x9,
++	RTL8366RB_LEDGROUP_FIBER		= 0xa,
++	RTL8366RB_LEDGROUP_AN_FAULT		= 0xb,
++	RTL8366RB_LEDGROUP_LINK_RX		= 0xc,
++	RTL8366RB_LEDGROUP_LINK_TX		= 0xd,
++	RTL8366RB_LEDGROUP_MASTER		= 0xe,
++	RTL8366RB_LEDGROUP_FORCE		= 0xf,
++
++	__RTL8366RB_LEDGROUP_MODE_MAX
++};
++
++#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
++
++struct rtl8366rb_led {
++	u8 port_num;
++	u8 led_group;
++	struct realtek_priv *priv;
++	struct led_classdev cdev;
++};
++
++int rtl8366rb_setup_leds(struct realtek_priv *priv);
++
++#else
++
++static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
++{
++	return 0;
++}
++
++#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
++
++/**
++ * struct rtl8366rb - RTL8366RB-specific data
++ * @max_mtu: per-port max MTU setting
++ * @pvid_enabled: if PVID is set for respective port
++ * @leds: per-port and per-ledgroup led info
++ */
++struct rtl8366rb {
++	unsigned int max_mtu[RTL8366RB_NUM_PORTS];
++	bool pvid_enabled[RTL8366RB_NUM_PORTS];
++#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
++	struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
++#endif
++};
++
++/* This code is used also with LEDs disabled */
++int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
++			       u8 led_group,
++			       enum rtl8366_ledgroup_mode mode);
++
++#endif /* _RTL8366RB_H */
+diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
+index 5740c98d8c9f03..2847278d9cd48e 100644
+--- a/drivers/net/ethernet/cadence/macb.h
++++ b/drivers/net/ethernet/cadence/macb.h
+@@ -1279,6 +1279,8 @@ struct macb {
+ 	struct clk		*rx_clk;
+ 	struct clk		*tsu_clk;
+ 	struct net_device	*dev;
++	/* Protects hw_stats and ethtool_stats */
++	spinlock_t		stats_lock;
+ 	union {
+ 		struct macb_stats	macb;
+ 		struct gem_stats	gem;
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 56901280ba0472..60847cdb516eef 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1992,10 +1992,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
+ 
+ 		if (status & MACB_BIT(ISR_ROVR)) {
+ 			/* We missed at least one packet */
++			spin_lock(&bp->stats_lock);
+ 			if (macb_is_gem(bp))
+ 				bp->hw_stats.gem.rx_overruns++;
+ 			else
+ 				bp->hw_stats.macb.rx_overruns++;
++			spin_unlock(&bp->stats_lock);
+ 
+ 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ 				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
+@@ -3116,6 +3118,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ 	if (!netif_running(bp->dev))
+ 		return nstat;
+ 
++	spin_lock_irq(&bp->stats_lock);
+ 	gem_update_stats(bp);
+ 
+ 	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+@@ -3145,6 +3148,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ 	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+ 	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+ 	nstat->tx_fifo_errors = hwstat->tx_underrun;
++	spin_unlock_irq(&bp->stats_lock);
+ 
+ 	return nstat;
+ }
+@@ -3152,12 +3156,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ static void gem_get_ethtool_stats(struct net_device *dev,
+ 				  struct ethtool_stats *stats, u64 *data)
+ {
+-	struct macb *bp;
++	struct macb *bp = netdev_priv(dev);
+ 
+-	bp = netdev_priv(dev);
++	spin_lock_irq(&bp->stats_lock);
+ 	gem_update_stats(bp);
+ 	memcpy(data, &bp->ethtool_stats, sizeof(u64)
+ 			* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
++	spin_unlock_irq(&bp->stats_lock);
+ }
+ 
+ static int gem_get_sset_count(struct net_device *dev, int sset)
+@@ -3207,6 +3212,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
+ 		return gem_get_stats(bp);
+ 
+ 	/* read stats from hardware */
++	spin_lock_irq(&bp->stats_lock);
+ 	macb_update_stats(bp);
+ 
+ 	/* Convert HW stats into netdevice stats */
+@@ -3240,6 +3246,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
+ 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+ 	nstat->tx_fifo_errors = hwstat->tx_underruns;
+ 	/* Don't know about heartbeat or window errors... */
++	spin_unlock_irq(&bp->stats_lock);
+ 
+ 	return nstat;
+ }
+@@ -5110,6 +5117,7 @@ static int macb_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 	spin_lock_init(&bp->lock);
++	spin_lock_init(&bp->stats_lock);
+ 
+ 	/* setup capabilities */
+ 	macb_configure_caps(bp, macb_config);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 16a7908c79f703..f662a5d54986cf 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -145,6 +145,24 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
+ 	return 0;
+ }
+ 
++/**
++ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
++ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
++ * @count: Number of Tx buffer descriptors which need to be unmapped
++ * @i: Index of the last successfully mapped Tx buffer descriptor
++ */
++static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
++{
++	while (count--) {
++		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
++
++		enetc_free_tx_frame(tx_ring, tx_swbd);
++		if (i == 0)
++			i = tx_ring->bd_count;
++		i--;
++	}
++}
++
+ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+ {
+ 	bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
+@@ -235,9 +253,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+ 		}
+ 
+ 		if (do_onestep_tstamp) {
+-			u32 lo, hi, val;
+-			u64 sec, nsec;
++			__be32 new_sec_l, new_nsec;
++			u32 lo, hi, nsec, val;
++			__be16 new_sec_h;
+ 			u8 *data;
++			u64 sec;
+ 
+ 			lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ 			hi = enetc_rd_hot(hw, ENETC_SICTR1);
+@@ -251,13 +271,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+ 			/* Update originTimestamp field of Sync packet
+ 			 * - 48 bits seconds field
+ 			 * - 32 bits nanseconds field
++			 *
++			 * In addition, the UDP checksum needs to be updated
++			 * by software after updating originTimestamp field,
++			 * otherwise the hardware will calculate the wrong
++			 * checksum when updating the correction field and
++			 * update it to the packet.
+ 			 */
+ 			data = skb_mac_header(skb);
+-			*(__be16 *)(data + offset2) =
+-				htons((sec >> 32) & 0xffff);
+-			*(__be32 *)(data + offset2 + 2) =
+-				htonl(sec & 0xffffffff);
+-			*(__be32 *)(data + offset2 + 6) = htonl(nsec);
++			new_sec_h = htons((sec >> 32) & 0xffff);
++			new_sec_l = htonl(sec & 0xffffffff);
++			new_nsec = htonl(nsec);
++			if (udp) {
++				struct udphdr *uh = udp_hdr(skb);
++				__be32 old_sec_l, old_nsec;
++				__be16 old_sec_h;
++
++				old_sec_h = *(__be16 *)(data + offset2);
++				inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
++							 new_sec_h, false);
++
++				old_sec_l = *(__be32 *)(data + offset2 + 2);
++				inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
++							 new_sec_l, false);
++
++				old_nsec = *(__be32 *)(data + offset2 + 6);
++				inet_proto_csum_replace4(&uh->check, skb, old_nsec,
++							 new_nsec, false);
++			}
++
++			*(__be16 *)(data + offset2) = new_sec_h;
++			*(__be32 *)(data + offset2 + 2) = new_sec_l;
++			*(__be32 *)(data + offset2 + 6) = new_nsec;
+ 
+ 			/* Configure single-step register */
+ 			val = ENETC_PM0_SINGLE_STEP_EN;
+@@ -328,25 +373,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+ dma_err:
+ 	dev_err(tx_ring->dev, "DMA map error");
+ 
+-	do {
+-		tx_swbd = &tx_ring->tx_swbd[i];
+-		enetc_free_tx_frame(tx_ring, tx_swbd);
+-		if (i == 0)
+-			i = tx_ring->bd_count;
+-		i--;
+-	} while (count--);
++	enetc_unwind_tx_frame(tx_ring, count, i);
+ 
+ 	return 0;
+ }
+ 
+-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+-				 struct enetc_tx_swbd *tx_swbd,
+-				 union enetc_tx_bd *txbd, int *i, int hdr_len,
+-				 int data_len)
++static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
++				struct enetc_tx_swbd *tx_swbd,
++				union enetc_tx_bd *txbd, int *i, int hdr_len,
++				int data_len)
+ {
+ 	union enetc_tx_bd txbd_tmp;
+ 	u8 flags = 0, e_flags = 0;
+ 	dma_addr_t addr;
++	int count = 1;
+ 
+ 	enetc_clear_tx_bd(&txbd_tmp);
+ 	addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+@@ -389,7 +429,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ 		/* Write the BD */
+ 		txbd_tmp.ext.e_flags = e_flags;
+ 		*txbd = txbd_tmp;
++		count++;
+ 	}
++
++	return count;
+ }
+ 
+ static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+@@ -521,9 +564,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
+ 
+ 		/* compute the csum over the L4 header */
+ 		csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
+-		enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
++		count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
++					      &i, hdr_len, data_len);
+ 		bd_data_num = 0;
+-		count++;
+ 
+ 		while (data_len > 0) {
+ 			int size;
+@@ -547,8 +590,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
+ 			err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
+ 						    tso.data, size,
+ 						    size == data_len);
+-			if (err)
++			if (err) {
++				if (i == 0)
++					i = tx_ring->bd_count;
++				i--;
++
+ 				goto err_map_data;
++			}
+ 
+ 			data_len -= size;
+ 			count++;
+@@ -577,13 +625,7 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
+ 	dev_err(tx_ring->dev, "DMA map error");
+ 
+ err_chained_bd:
+-	do {
+-		tx_swbd = &tx_ring->tx_swbd[i];
+-		enetc_free_tx_frame(tx_ring, tx_swbd);
+-		if (i == 0)
+-			i = tx_ring->bd_count;
+-		i--;
+-	} while (count--);
++	enetc_unwind_tx_frame(tx_ring, count, i);
+ 
+ 	return 0;
+ }
+@@ -1623,7 +1665,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ 				enetc_xdp_drop(rx_ring, orig_i, i);
+ 				tx_ring->stats.xdp_tx_drops++;
+ 			} else {
+-				tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
++				tx_ring->stats.xdp_tx++;
+ 				rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
+ 				xdp_tx_frm_cnt++;
+ 				/* The XDP_TX enqueue was successful, so we
+@@ -2929,6 +2971,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+ 		new_offloads |= ENETC_F_TX_TSTAMP;
+ 		break;
+ 	case HWTSTAMP_TX_ONESTEP_SYNC:
++		if (!enetc_si_is_pf(priv->si))
++			return -EOPNOTSUPP;
++
+ 		new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ 		new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ 		break;
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+index 2563eb8ac7b63a..6a24324703bf49 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+@@ -843,6 +843,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
+ static int enetc_get_ts_info(struct net_device *ndev,
+ 			     struct kernel_ethtool_ts_info *info)
+ {
++	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ 	int *phc_idx;
+ 
+ 	phc_idx = symbol_get(enetc_phc_index);
+@@ -863,8 +864,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
+ 				SOF_TIMESTAMPING_TX_SOFTWARE;
+ 
+ 	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+-			 (1 << HWTSTAMP_TX_ON) |
+-			 (1 << HWTSTAMP_TX_ONESTEP_SYNC);
++			 (1 << HWTSTAMP_TX_ON);
++
++	if (enetc_si_is_pf(priv->si))
++		info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ 
+ 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ 			   (1 << HWTSTAMP_FILTER_ALL);
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 558cda577191d6..2960709f6b62ca 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -207,6 +207,7 @@ enum ice_feature {
+ 	ICE_F_GNSS,
+ 	ICE_F_ROCE_LAG,
+ 	ICE_F_SRIOV_LAG,
++	ICE_F_MBX_LIMIT,
+ 	ICE_F_MAX
+ };
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index fb527434b58b15..d649c197cf673f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
+ 	if (ice_vsi_add_vlan_zero(uplink_vsi))
+ 		goto err_vlan_zero;
+ 
+-	if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+-			     ICE_FLTR_RX))
++	if (ice_set_dflt_vsi(uplink_vsi))
+ 		goto err_def_rx;
+ 
+ 	if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+index 91cbae1eec89a0..8d31bfe28cc884 100644
+--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+@@ -539,5 +539,8 @@
+ #define E830_PRTMAC_CL01_QNT_THR_CL0_M		GENMASK(15, 0)
+ #define VFINT_DYN_CTLN(_i)			(0x00003800 + ((_i) * 4))
+ #define VFINT_DYN_CTLN_CLEARPBA_M		BIT(1)
++#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH	0x00234000
++#define E830_MBX_VF_DEC_TRIG(_VF)		(0x00233800 + (_VF) * 4)
++#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF)	(0x00233000 + (_VF) * 4)
+ 
+ #endif /* _ICE_HW_AUTOGEN_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 06e712cdc3d9ed..d4e74f96a8ad5d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -3880,6 +3880,9 @@ void ice_init_feature_support(struct ice_pf *pf)
+ 	default:
+ 		break;
+ 	}
++
++	if (pf->hw.mac_type == ICE_MAC_E830)
++		ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 45eefe22fb5b73..ca707dfcb286ef 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -1546,12 +1546,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
+ 			ice_vf_lan_overflow_event(pf, &event);
+ 			break;
+ 		case ice_mbx_opc_send_msg_to_pf:
+-			data.num_msg_proc = i;
+-			data.num_pending_arq = pending;
+-			data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
+-			data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
++			if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
++				ice_vc_process_vf_msg(pf, &event, NULL);
++				ice_mbx_vf_dec_trig_e830(hw, &event);
++			} else {
++				u16 val = hw->mailboxq.num_rq_entries;
++
++				data.max_num_msgs_mbx = val;
++				val = ICE_MBX_OVERFLOW_WATERMARK;
++				data.async_watermark_val = val;
++				data.num_msg_proc = i;
++				data.num_pending_arq = pending;
+ 
+-			ice_vc_process_vf_msg(pf, &event, &data);
++				ice_vc_process_vf_msg(pf, &event, &data);
++			}
+ 			break;
+ 		case ice_aqc_opc_fw_logs_event:
+ 			ice_get_fwlog_data(pf, &event);
+@@ -4082,7 +4090,11 @@ static int ice_init_pf(struct ice_pf *pf)
+ 
+ 	mutex_init(&pf->vfs.table_lock);
+ 	hash_init(pf->vfs.table);
+-	ice_mbx_init_snapshot(&pf->hw);
++	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++		wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
++		     ICE_MBX_OVERFLOW_WATERMARK);
++	else
++		ice_mbx_init_snapshot(&pf->hw);
+ 
+ 	xa_init(&pf->dyn_ports);
+ 	xa_init(&pf->sf_nums);
+diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
+index 91cb393f616f2b..8aabf7749aa5e0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
++++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
+@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
+ 
+ 	hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
+ 		hash_del_rcu(&vf->entry);
++		ice_deinitialize_vf_entry(vf);
+ 		ice_put_vf(vf);
+ 	}
+ }
+@@ -193,9 +194,6 @@ void ice_free_vfs(struct ice_pf *pf)
+ 			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ 		}
+ 
+-		/* clear malicious info since the VF is getting released */
+-		list_del(&vf->mbx_info.list_entry);
+-
+ 		mutex_unlock(&vf->cfg_lock);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+index 8c434689e3f78e..815ad0bfe8326b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+@@ -716,6 +716,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+ 	return 0;
+ }
+ 
++/**
++ * ice_reset_vf_mbx_cnt - reset VF mailbox message count
++ * @vf: pointer to the VF structure
++ *
++ * This function clears the VF mailbox message count, and should be called on
++ * VF reset.
++ */
++static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
++{
++	struct ice_pf *pf = vf->pf;
++
++	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
++	else
++		ice_mbx_clear_malvf(&vf->mbx_info);
++}
++
+ /**
+  * ice_reset_all_vfs - reset all allocated VFs in one go
+  * @pf: pointer to the PF structure
+@@ -742,7 +759,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
+ 
+ 	/* clear all malicious info if the VFs are getting reset */
+ 	ice_for_each_vf(pf, bkt, vf)
+-		ice_mbx_clear_malvf(&vf->mbx_info);
++		ice_reset_vf_mbx_cnt(vf);
+ 
+ 	/* If VFs have been disabled, there is no need to reset */
+ 	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
+@@ -958,7 +975,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ 	ice_eswitch_update_repr(&vf->repr_id, vsi);
+ 
+ 	/* if the VF has been reset allow it to come up again */
+-	ice_mbx_clear_malvf(&vf->mbx_info);
++	ice_reset_vf_mbx_cnt(vf);
+ 
+ out_unlock:
+ 	if (lag && lag->bonded && lag->primary &&
+@@ -1011,11 +1028,22 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
+ 	ice_vf_fdir_init(vf);
+ 
+ 	/* Initialize mailbox info for this VF */
+-	ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
++	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
++	else
++		ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
+ 
+ 	mutex_init(&vf->cfg_lock);
+ }
+ 
++void ice_deinitialize_vf_entry(struct ice_vf *vf)
++{
++	struct ice_pf *pf = vf->pf;
++
++	if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++		list_del(&vf->mbx_info.list_entry);
++}
++
+ /**
+  * ice_dis_vf_qs - Disable the VF queues
+  * @vf: pointer to the VF structure
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+index 0c7e77c0a09fa6..5392b040498621 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+@@ -24,6 +24,7 @@
+ #endif
+ 
+ void ice_initialize_vf_entry(struct ice_vf *vf);
++void ice_deinitialize_vf_entry(struct ice_vf *vf);
+ void ice_dis_vf_qs(struct ice_vf *vf);
+ int ice_check_vf_init(struct ice_vf *vf);
+ enum virtchnl_status_code ice_err_to_virt_err(int err);
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+index 40cb4ba0789ced..75c8113e58ee92 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+@@ -210,6 +210,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
+ 	return 0;
+ }
+ 
++/**
++ * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter
++ * @hw: pointer to the HW struct
++ * @event: pointer to the control queue receive event
++ *
++ * This function triggers to decrement the counter
++ * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes
++ * the buffers at the PF mailbox queue.
++ */
++void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
++			      const struct ice_rq_event_info *event)
++{
++	u16 vfid = le16_to_cpu(event->desc.retval);
++
++	wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1);
++}
++
++/**
++ * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count
++ * @hw: pointer to the HW struct
++ * @vf_id: VF ID in the PF space
++ *
++ * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should
++ * be called when a VF is created and on VF reset.
++ */
++void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id)
++{
++	u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id));
++
++	wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg);
++}
++
+ /**
+  * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+  * @hw: pointer to the HW struct
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
+index 44bc030d17e07a..684de89e5c5ed7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
++++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
+@@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ 		      u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+ 
+ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
++void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
++			      const struct ice_rq_event_info *event);
++void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id);
+ int
+ ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ 			 struct ice_mbx_vf_info *vf_info, bool *report_malvf);
+@@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw)
+ {
+ }
+ 
++static inline void
++ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
++			 const struct ice_rq_event_info *event)
++{
++}
++
+ #endif /* CONFIG_PCI_IOV */
+ #endif /* _ICE_VF_MBX_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index b6ec01f6fa73e0..c8c1d48ff793d7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -4008,8 +4008,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
+  * @event: pointer to the AQ event
+  * @mbxdata: information used to detect VF attempting mailbox overflow
+  *
+- * called from the common asq/arq handler to
+- * process request from VF
++ * Called from the common asq/arq handler to process request from VF. When this
++ * flow is used for devices with hardware VF to PF message queue overflow
++ * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
++ * check is skipped.
+  */
+ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ 			   struct ice_mbx_data *mbxdata)
+@@ -4035,7 +4037,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ 	mutex_lock(&vf->cfg_lock);
+ 
+ 	/* Check if the VF is trying to overflow the mailbox */
+-	if (ice_is_malicious_vf(vf, mbxdata))
++	if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
+ 		goto finish;
+ 
+ 	/* Check if VF is disabled. */
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 1e0d1f9b07fbcf..afc902ae4763e0 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3013,7 +3013,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 	skb_shinfo(skb)->gso_size = rsc_seg_len;
+ 
+ 	skb_reset_network_header(skb);
+-	len = skb->len - skb_transport_offset(skb);
+ 
+ 	if (ipv4) {
+ 		struct iphdr *ipv4h = ip_hdr(skb);
+@@ -3022,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 
+ 		/* Reset and set transport header offset in skb */
+ 		skb_set_transport_header(skb, sizeof(struct iphdr));
++		len = skb->len - skb_transport_offset(skb);
+ 
+ 		/* Compute the TCP pseudo header checksum*/
+ 		tcp_hdr(skb)->check =
+@@ -3031,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 
+ 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
++		len = skb->len - skb_transport_offset(skb);
+ 		tcp_hdr(skb)->check =
+ 			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
+ 	}
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+index 1641791a2d5b4e..8ed83fb9886243 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+ 		       MVPP2_PRS_RI_VLAN_MASK),
+ 	/* Non IP flow, with vlan tag */
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
+-		       MVPP22_CLS_HEK_OPT_VLAN,
++		       MVPP22_CLS_HEK_TAGGED,
+ 		       0, 0),
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 7db9cab9bedf69..d9362eabc6a1ca 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -572,7 +572,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
+ 	pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
+ 	pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
+ 	mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
+-		      name, size, start);
++		      name ? name : "mlx5_pcif_pool", size, start);
+ 	return pool;
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index bfe6e2d631bdf5..f5acfb7d4ff655 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -516,6 +516,19 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
+ 	return 0;
+ }
+ 
++/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
++static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
++{
++	u32 value = readl(ioaddr + DMA_BUS_MODE);
++
++	value |= DMA_BUS_MODE_SFT_RESET;
++	writel(value, ioaddr + DMA_BUS_MODE);
++
++	return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
++				  !(value & DMA_BUS_MODE_SFT_RESET),
++				  10000, 2000000);
++}
++
+ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct plat_stmmacenet_data *plat;
+@@ -566,6 +579,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ 
+ 	plat->bsp_priv = ld;
+ 	plat->setup = loongson_dwmac_setup;
++	plat->fix_soc_reset = loongson_dwmac_fix_reset;
+ 	ld->dev = &pdev->dev;
+ 	ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
+ 
+diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
+index 0d5a862cd78a6c..3a13d60a947a81 100644
+--- a/drivers/net/ethernet/ti/Kconfig
++++ b/drivers/net/ethernet/ti/Kconfig
+@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
+ 	select NET_DEVLINK
+ 	select TI_DAVINCI_MDIO
+ 	select PHYLINK
++	select PAGE_POOL
+ 	select TI_K3_CPPI_DESC_POOL
+ 	imply PHY_TI_GMII_SEL
+ 	depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 768578c0d9587d..d59c1744840af2 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -474,26 +474,7 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ static int icss_iep_perout_enable(struct icss_iep *iep,
+ 				  struct ptp_perout_request *req, int on)
+ {
+-	int ret = 0;
+-
+-	mutex_lock(&iep->ptp_clk_mutex);
+-
+-	if (iep->pps_enabled) {
+-		ret = -EBUSY;
+-		goto exit;
+-	}
+-
+-	if (iep->perout_enabled == !!on)
+-		goto exit;
+-
+-	ret = icss_iep_perout_enable_hw(iep, req, on);
+-	if (!ret)
+-		iep->perout_enabled = !!on;
+-
+-exit:
+-	mutex_unlock(&iep->ptp_clk_mutex);
+-
+-	return ret;
++	return -EOPNOTSUPP;
+ }
+ 
+ static void icss_iep_cap_cmp_work(struct work_struct *work)
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index b1afcb8740de12..ca62188a317ad4 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -3,6 +3,7 @@
+  */
+ 
+ #include <net/inet_dscp.h>
++#include <net/ip.h>
+ 
+ #include "ipvlan.h"
+ 
+@@ -415,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ 
+ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ {
+-	const struct iphdr *ip4h = ip_hdr(skb);
+ 	struct net_device *dev = skb->dev;
+ 	struct net *net = dev_net(dev);
+-	struct rtable *rt;
+ 	int err, ret = NET_XMIT_DROP;
++	const struct iphdr *ip4h;
++	struct rtable *rt;
+ 	struct flowi4 fl4 = {
+ 		.flowi4_oif = dev->ifindex,
+-		.flowi4_tos = ip4h->tos & INET_DSCP_MASK,
+ 		.flowi4_flags = FLOWI_FLAG_ANYSRC,
+ 		.flowi4_mark = skb->mark,
+-		.daddr = ip4h->daddr,
+-		.saddr = ip4h->saddr,
+ 	};
+ 
++	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
++		goto err;
++
++	ip4h = ip_hdr(skb);
++	fl4.daddr = ip4h->daddr;
++	fl4.saddr = ip4h->saddr;
++	fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
++
+ 	rt = ip_route_output_flow(net, &fl4, NULL);
+ 	if (IS_ERR(rt))
+ 		goto err;
+@@ -487,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 	struct net_device *dev = skb->dev;
+ 	int err, ret = NET_XMIT_DROP;
+ 
++	if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
++		DEV_STATS_INC(dev, tx_errors);
++		kfree_skb(skb);
++		return ret;
++	}
++
+ 	err = ipvlan_route_v6_outbound(dev, skb);
+ 	if (unlikely(err)) {
+ 		DEV_STATS_INC(dev, tx_errors);
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 1993b90b1a5f90..491e56b3263fd5 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
+ 	return NETDEV_TX_OK;
+ }
+ 
++static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
++{
++	kfree_skb(skb);
++	return 0;
++}
++
++static int blackhole_neigh_construct(struct net_device *dev,
++				     struct neighbour *n)
++{
++	n->output = blackhole_neigh_output;
++	return 0;
++}
++
+ static const struct net_device_ops blackhole_netdev_ops = {
+ 	.ndo_start_xmit = blackhole_netdev_xmit,
++	.ndo_neigh_construct = blackhole_neigh_construct,
+ };
+ 
+ /* This is a dst-dummy device used specifically for invalidated
+diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
+index bd8a51ec0ecd6a..ec336c3e338d6c 100644
+--- a/drivers/net/phy/qcom/qca807x.c
++++ b/drivers/net/phy/qcom/qca807x.c
+@@ -774,7 +774,7 @@ static int qca807x_config_init(struct phy_device *phydev)
+ 	control_dac &= ~QCA807X_CONTROL_DAC_MASK;
+ 	if (!priv->dac_full_amplitude)
+ 		control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
+-	if (!priv->dac_full_amplitude)
++	if (!priv->dac_full_bias_current)
+ 		control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
+ 	if (!priv->dac_disable_bias_current_tweak)
+ 		control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
+diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
+index 46af78caf457a6..0bfa37c1405918 100644
+--- a/drivers/net/usb/gl620a.c
++++ b/drivers/net/usb/gl620a.c
+@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ 	dev->hard_mtu = GL_RCV_BUF_SIZE;
+ 	dev->net->hard_header_len += 4;
+-	dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
+-	dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
+-	return 0;
++	return usbnet_get_endpoints(dev, intf);
+ }
+ 
+ static const struct driver_info	genelink_info = {
+diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
+index 2f7a05f21dc595..dcb8e1628632e6 100644
+--- a/drivers/phy/rockchip/Kconfig
++++ b/drivers/phy/rockchip/Kconfig
+@@ -125,6 +125,7 @@ config PHY_ROCKCHIP_USBDP
+ 	depends on ARCH_ROCKCHIP && OF
+ 	depends on TYPEC
+ 	select GENERIC_PHY
++	select USB_COMMON
+ 	help
+ 	  Enable this to support the Rockchip USB3.0/DP combo PHY with
+ 	  Samsung IP block. This is required for USB3 support on RK3588.
+diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+index 2eb3329ca23f67..1ef6d9630f7e09 100644
+--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
++++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+@@ -309,7 +309,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
+ 
+ 	priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
+ 
+-	priv->phy_rst = devm_reset_control_get(dev, "phy");
++	priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy");
++	/* fallback to old behaviour */
++	if (PTR_ERR(priv->phy_rst) == -ENOENT)
++		priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
+ 	if (IS_ERR(priv->phy_rst))
+ 		return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
+ 
+diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+index c421b495eb0fe4..46b8f6987c62c3 100644
+--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
++++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+@@ -488,9 +488,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
+ 	reg |=	PHYCLKRST_REFCLKSEL_EXT_REFCLK;
+ 
+ 	/* FSEL settings corresponding to reference clock */
+-	reg &= ~PHYCLKRST_FSEL_PIPE_MASK |
+-		PHYCLKRST_MPLL_MULTIPLIER_MASK |
+-		PHYCLKRST_SSC_REFCLKSEL_MASK;
++	reg &= ~(PHYCLKRST_FSEL_PIPE_MASK |
++		 PHYCLKRST_MPLL_MULTIPLIER_MASK |
++		 PHYCLKRST_SSC_REFCLKSEL_MASK);
+ 	switch (phy_drd->extrefclk) {
+ 	case EXYNOS5_FSEL_50MHZ:
+ 		reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
+@@ -532,9 +532,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
+ 	reg &= ~PHYCLKRST_REFCLKSEL_MASK;
+ 	reg |=	PHYCLKRST_REFCLKSEL_EXT_REFCLK;
+ 
+-	reg &= ~PHYCLKRST_FSEL_UTMI_MASK |
+-		PHYCLKRST_MPLL_MULTIPLIER_MASK |
+-		PHYCLKRST_SSC_REFCLKSEL_MASK;
++	reg &= ~(PHYCLKRST_FSEL_UTMI_MASK |
++		 PHYCLKRST_MPLL_MULTIPLIER_MASK |
++		 PHYCLKRST_SSC_REFCLKSEL_MASK);
+ 	reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
+ 
+ 	return reg;
+@@ -1296,14 +1296,17 @@ static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy)
+ 	struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ 	int ret;
+ 
++	if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
++		ret = exynos850_usbdrd_phy_exit(phy);
++		if (ret)
++			return ret;
++	}
++
++	exynos5_usbdrd_phy_isol(inst, true);
++
+ 	if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI)
+ 		return 0;
+ 
+-	ret = exynos850_usbdrd_phy_exit(phy);
+-	if (ret)
+-		return ret;
+-
+-	exynos5_usbdrd_phy_isol(inst, true);
+ 	return regulator_bulk_disable(phy_drd->drv_data->n_regulators,
+ 				      phy_drd->regulators);
+ }
+diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
+index 0f60d5d1c1678d..fae6242aa730e0 100644
+--- a/drivers/phy/tegra/xusb-tegra186.c
++++ b/drivers/phy/tegra/xusb-tegra186.c
+@@ -928,6 +928,7 @@ static int tegra186_utmi_phy_init(struct phy *phy)
+ 	unsigned int index = lane->index;
+ 	struct device *dev = padctl->dev;
+ 	int err;
++	u32 reg;
+ 
+ 	port = tegra_xusb_find_usb2_port(padctl, index);
+ 	if (!port) {
+@@ -935,6 +936,16 @@ static int tegra186_utmi_phy_init(struct phy *phy)
+ 		return -ENODEV;
+ 	}
+ 
++	if (port->mode == USB_DR_MODE_OTG ||
++	    port->mode == USB_DR_MODE_PERIPHERAL) {
++		/* reset VBUS&ID OVERRIDE */
++		reg = padctl_readl(padctl, USB2_VBUS_ID);
++		reg &= ~VBUS_OVERRIDE;
++		reg &= ~ID_OVERRIDE(~0);
++		reg |= ID_OVERRIDE_FLOATING;
++		padctl_writel(padctl, reg, USB2_VBUS_ID);
++	}
++
+ 	if (port->supply && port->mode == USB_DR_MODE_HOST) {
+ 		err = regulator_enable(port->supply);
+ 		if (err) {
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index c9dde1ac9523e8..3023b07dc483b5 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1653,13 +1653,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
+ 	if (in_flight)
+ 		__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+ 
+-	/*
+-	 * Only clear the driver-private command data if the LLD does not supply
+-	 * a function to initialize that data.
+-	 */
+-	if (!shost->hostt->init_cmd_priv)
+-		memset(cmd + 1, 0, shost->hostt->cmd_size);
+-
+ 	cmd->prot_op = SCSI_PROT_NORMAL;
+ 	if (blk_rq_bytes(req))
+ 		cmd->sc_data_direction = rq_dma_dir(req);
+@@ -1826,6 +1819,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	if (!scsi_host_queue_ready(q, shost, sdev, cmd))
+ 		goto out_dec_target_busy;
+ 
++	/*
++	 * Only clear the driver-private command data if the LLD does not supply
++	 * a function to initialize that data.
++	 */
++	if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
++		memset(cmd + 1, 0, shost->hostt->cmd_size);
++
+ 	if (!(req->rq_flags & RQF_DONTPREP)) {
+ 		ret = scsi_prepare_cmd(req);
+ 		if (ret != BLK_STS_OK)
+diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
+index 863e7a4272e66f..b887e48e8c7e67 100644
+--- a/drivers/thermal/gov_bang_bang.c
++++ b/drivers/thermal/gov_bang_bang.c
+@@ -67,6 +67,7 @@ static void bang_bang_control(struct thermal_zone_device *tz,
+ 			      const struct thermal_trip *trip,
+ 			      bool crossed_up)
+ {
++	const struct thermal_trip_desc *td = trip_to_trip_desc(trip);
+ 	struct thermal_instance *instance;
+ 
+ 	lockdep_assert_held(&tz->lock);
+@@ -75,10 +76,8 @@ static void bang_bang_control(struct thermal_zone_device *tz,
+ 		thermal_zone_trip_id(tz, trip), trip->temperature,
+ 		tz->temperature, trip->hysteresis);
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+-		if (instance->trip == trip)
+-			bang_bang_set_instance_target(instance, crossed_up);
+-	}
++	list_for_each_entry(instance, &td->thermal_instances, trip_node)
++		bang_bang_set_instance_target(instance, crossed_up);
+ }
+ 
+ static void bang_bang_manage(struct thermal_zone_device *tz)
+@@ -104,8 +103,8 @@ static void bang_bang_manage(struct thermal_zone_device *tz)
+ 		 * to the thermal zone temperature and the trip point threshold.
+ 		 */
+ 		turn_on = tz->temperature >= td->threshold;
+-		list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+-			if (!instance->initialized && instance->trip == trip)
++		list_for_each_entry(instance, &td->thermal_instances, trip_node) {
++			if (!instance->initialized)
+ 				bang_bang_set_instance_target(instance, turn_on);
+ 		}
+ 	}
+diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
+index ce0ea571ed67ab..d37d57d48c389a 100644
+--- a/drivers/thermal/gov_fair_share.c
++++ b/drivers/thermal/gov_fair_share.c
+@@ -44,7 +44,7 @@ static int get_trip_level(struct thermal_zone_device *tz)
+ /**
+  * fair_share_throttle - throttles devices associated with the given zone
+  * @tz: thermal_zone_device
+- * @trip: trip point
++ * @td: trip point descriptor
+  * @trip_level: number of trips crossed by the zone temperature
+  *
+  * Throttling Logic: This uses three parameters to calculate the new
+@@ -61,29 +61,23 @@ static int get_trip_level(struct thermal_zone_device *tz)
+  * new_state of cooling device = P3 * P2 * P1
+  */
+ static void fair_share_throttle(struct thermal_zone_device *tz,
+-				const struct thermal_trip *trip,
++				const struct thermal_trip_desc *td,
+ 				int trip_level)
+ {
+ 	struct thermal_instance *instance;
+ 	int total_weight = 0;
+ 	int nr_instances = 0;
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+-		if (instance->trip != trip)
+-			continue;
+-
++	list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ 		total_weight += instance->weight;
+ 		nr_instances++;
+ 	}
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
++	list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ 		struct thermal_cooling_device *cdev = instance->cdev;
+ 		u64 dividend;
+ 		u32 divisor;
+ 
+-		if (instance->trip != trip)
+-			continue;
+-
+ 		dividend = trip_level;
+ 		dividend *= cdev->max_state;
+ 		divisor = tz->num_trips;
+@@ -116,7 +110,7 @@ static void fair_share_manage(struct thermal_zone_device *tz)
+ 		    trip->type == THERMAL_TRIP_HOT)
+ 			continue;
+ 
+-		fair_share_throttle(tz, trip, trip_level);
++		fair_share_throttle(tz, td, trip_level);
+ 	}
+ }
+ 
+diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
+index 1b2345a697c5a0..90b4bfd9237bce 100644
+--- a/drivers/thermal/gov_power_allocator.c
++++ b/drivers/thermal/gov_power_allocator.c
+@@ -97,11 +97,9 @@ struct power_allocator_params {
+ 	struct power_actor *power;
+ };
+ 
+-static bool power_actor_is_valid(struct power_allocator_params *params,
+-				 struct thermal_instance *instance)
++static bool power_actor_is_valid(struct thermal_instance *instance)
+ {
+-	return (instance->trip == params->trip_max &&
+-		 cdev_is_power_actor(instance->cdev));
++	return cdev_is_power_actor(instance->cdev);
+ }
+ 
+ /**
+@@ -118,13 +116,14 @@ static bool power_actor_is_valid(struct power_allocator_params *params,
+ static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
+ {
+ 	struct power_allocator_params *params = tz->governor_data;
++	const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
+ 	struct thermal_cooling_device *cdev;
+ 	struct thermal_instance *instance;
+ 	u32 sustainable_power = 0;
+ 	u32 min_power;
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+-		if (!power_actor_is_valid(params, instance))
++	list_for_each_entry(instance, &td->thermal_instances, trip_node) {
++		if (!power_actor_is_valid(instance))
+ 			continue;
+ 
+ 		cdev = instance->cdev;
+@@ -364,7 +363,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
+ 
+ 	for (i = 0; i < num_actors; i++) {
+ 		struct power_actor *pa = &power[i];
+-		u64 req_range = (u64)pa->req_power * power_range;
++		u64 req_range = (u64)pa->weighted_req_power * power_range;
+ 
+ 		pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
+ 							  total_req_power);
+@@ -400,6 +399,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
+ static void allocate_power(struct thermal_zone_device *tz, int control_temp)
+ {
+ 	struct power_allocator_params *params = tz->governor_data;
++	const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
+ 	unsigned int num_actors = params->num_actors;
+ 	struct power_actor *power = params->power;
+ 	struct thermal_cooling_device *cdev;
+@@ -417,10 +417,10 @@ static void allocate_power(struct thermal_zone_device *tz, int control_temp)
+ 	/* Clean all buffers for new power estimations */
+ 	memset(power, 0, params->buffer_size);
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
++	list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ 		struct power_actor *pa = &power[i];
+ 
+-		if (!power_actor_is_valid(params, instance))
++		if (!power_actor_is_valid(instance))
+ 			continue;
+ 
+ 		cdev = instance->cdev;
+@@ -454,10 +454,10 @@ static void allocate_power(struct thermal_zone_device *tz, int control_temp)
+ 		       power_range);
+ 
+ 	i = 0;
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
++	list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ 		struct power_actor *pa = &power[i];
+ 
+-		if (!power_actor_is_valid(params, instance))
++		if (!power_actor_is_valid(instance))
+ 			continue;
+ 
+ 		power_actor_set_power(instance->cdev, instance,
+@@ -538,12 +538,13 @@ static void reset_pid_controller(struct power_allocator_params *params)
+ static void allow_maximum_power(struct thermal_zone_device *tz)
+ {
+ 	struct power_allocator_params *params = tz->governor_data;
++	const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
+ 	struct thermal_cooling_device *cdev;
+ 	struct thermal_instance *instance;
+ 	u32 req_power;
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+-		if (!power_actor_is_valid(params, instance))
++	list_for_each_entry(instance, &td->thermal_instances, trip_node) {
++		if (!power_actor_is_valid(instance))
+ 			continue;
+ 
+ 		cdev = instance->cdev;
+@@ -581,13 +582,16 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
+ static int check_power_actors(struct thermal_zone_device *tz,
+ 			      struct power_allocator_params *params)
+ {
++	const struct thermal_trip_desc *td;
+ 	struct thermal_instance *instance;
+ 	int ret = 0;
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+-		if (instance->trip != params->trip_max)
+-			continue;
++	if (!params->trip_max)
++		return 0;
++
++	td = trip_to_trip_desc(params->trip_max);
+ 
++	list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ 		if (!cdev_is_power_actor(instance->cdev)) {
+ 			dev_warn(&tz->device, "power_allocator: %s is not a power actor\n",
+ 				 instance->cdev->type);
+@@ -631,30 +635,43 @@ static int allocate_actors_buffer(struct power_allocator_params *params,
+ 	return ret;
+ }
+ 
++static void power_allocator_update_weight(struct power_allocator_params *params)
++{
++	const struct thermal_trip_desc *td;
++	struct thermal_instance *instance;
++
++	if (!params->trip_max)
++		return;
++
++	td = trip_to_trip_desc(params->trip_max);
++
++	params->total_weight = 0;
++	list_for_each_entry(instance, &td->thermal_instances, trip_node)
++		if (power_actor_is_valid(instance))
++			params->total_weight += instance->weight;
++}
++
+ static void power_allocator_update_tz(struct thermal_zone_device *tz,
+ 				      enum thermal_notify_event reason)
+ {
+ 	struct power_allocator_params *params = tz->governor_data;
++	const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max);
+ 	struct thermal_instance *instance;
+ 	int num_actors = 0;
+ 
+ 	switch (reason) {
+ 	case THERMAL_TZ_BIND_CDEV:
+ 	case THERMAL_TZ_UNBIND_CDEV:
+-		list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+-			if (power_actor_is_valid(params, instance))
++		list_for_each_entry(instance, &td->thermal_instances, trip_node)
++			if (power_actor_is_valid(instance))
+ 				num_actors++;
+ 
+-		if (num_actors == params->num_actors)
+-			return;
++		if (num_actors != params->num_actors)
++			allocate_actors_buffer(params, num_actors);
+ 
+-		allocate_actors_buffer(params, num_actors);
+-		break;
++		fallthrough;
+ 	case THERMAL_INSTANCE_WEIGHT_CHANGED:
+-		params->total_weight = 0;
+-		list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+-			if (power_actor_is_valid(params, instance))
+-				params->total_weight += instance->weight;
++		power_allocator_update_weight(params);
+ 		break;
+ 	default:
+ 		break;
+@@ -720,6 +737,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
+ 
+ 	tz->governor_data = params;
+ 
++	power_allocator_update_weight(params);
++
+ 	return 0;
+ 
+ free_params:
+diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
+index fd5527188cf91a..ea4bf88d37f337 100644
+--- a/drivers/thermal/gov_step_wise.c
++++ b/drivers/thermal/gov_step_wise.c
+@@ -66,9 +66,10 @@ static unsigned long get_target_state(struct thermal_instance *instance,
+ }
+ 
+ static void thermal_zone_trip_update(struct thermal_zone_device *tz,
+-				     const struct thermal_trip *trip,
++				     const struct thermal_trip_desc *td,
+ 				     int trip_threshold)
+ {
++	const struct thermal_trip *trip = &td->trip;
+ 	enum thermal_trend trend = get_tz_trend(tz, trip);
+ 	int trip_id = thermal_zone_trip_id(tz, trip);
+ 	struct thermal_instance *instance;
+@@ -82,12 +83,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz,
+ 	dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
+ 		trip_id, trip->type, trip_threshold, trend, throttle);
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
++	list_for_each_entry(instance, &td->thermal_instances, trip_node) {
+ 		int old_target;
+ 
+-		if (instance->trip != trip)
+-			continue;
+-
+ 		old_target = instance->target;
+ 		instance->target = get_target_state(instance, trend, throttle);
+ 
+@@ -127,11 +125,13 @@ static void step_wise_manage(struct thermal_zone_device *tz)
+ 		    trip->type == THERMAL_TRIP_HOT)
+ 			continue;
+ 
+-		thermal_zone_trip_update(tz, trip, td->threshold);
++		thermal_zone_trip_update(tz, td, td->threshold);
+ 	}
+ 
+-	list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+-		thermal_cdev_update(instance->cdev);
++	for_each_trip_desc(tz, td) {
++		list_for_each_entry(instance, &td->thermal_instances, trip_node)
++			thermal_cdev_update(instance->cdev);
++	}
+ }
+ 
+ static struct thermal_governor thermal_gov_step_wise = {
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 1d2f2b307bac50..c2fa236e10cda7 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -490,7 +490,7 @@ static void thermal_zone_device_check(struct work_struct *work)
+ 
+ static void thermal_zone_device_init(struct thermal_zone_device *tz)
+ {
+-	struct thermal_instance *pos;
++	struct thermal_trip_desc *td;
+ 
+ 	INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
+ 
+@@ -498,8 +498,12 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
+ 	tz->passive = 0;
+ 	tz->prev_low_trip = -INT_MAX;
+ 	tz->prev_high_trip = INT_MAX;
+-	list_for_each_entry(pos, &tz->thermal_instances, tz_node)
+-		pos->initialized = false;
++	for_each_trip_desc(tz, td) {
++		struct thermal_instance *instance;
++
++		list_for_each_entry(instance, &td->thermal_instances, trip_node)
++			instance->initialized = false;
++	}
+ }
+ 
+ static void thermal_governor_trip_crossed(struct thermal_governor *governor,
+@@ -764,12 +768,12 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
+  * Return: 0 on success, the proper error value otherwise.
+  */
+ static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
+-				     const struct thermal_trip *trip,
++				     struct thermal_trip *trip,
+ 				     struct thermal_cooling_device *cdev,
+ 				     struct cooling_spec *cool_spec)
+ {
+-	struct thermal_instance *dev;
+-	struct thermal_instance *pos;
++	struct thermal_trip_desc *td = trip_to_trip_desc(trip);
++	struct thermal_instance *dev, *instance;
+ 	bool upper_no_limit;
+ 	int result;
+ 
+@@ -832,13 +836,13 @@ static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
+ 		goto remove_trip_file;
+ 
+ 	mutex_lock(&cdev->lock);
+-	list_for_each_entry(pos, &tz->thermal_instances, tz_node)
+-		if (pos->trip == trip && pos->cdev == cdev) {
++	list_for_each_entry(instance, &td->thermal_instances, trip_node)
++		if (instance->cdev == cdev) {
+ 			result = -EEXIST;
+ 			break;
+ 		}
+ 	if (!result) {
+-		list_add_tail(&dev->tz_node, &tz->thermal_instances);
++		list_add_tail(&dev->trip_node, &td->thermal_instances);
+ 		list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
+ 		atomic_set(&tz->need_update, 1);
+ 
+@@ -872,15 +876,16 @@ static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
+  * This function is usually called in the thermal zone device .unbind callback.
+  */
+ static void thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
+-					  const struct thermal_trip *trip,
++					  struct thermal_trip *trip,
+ 					  struct thermal_cooling_device *cdev)
+ {
++	struct thermal_trip_desc *td = trip_to_trip_desc(trip);
+ 	struct thermal_instance *pos, *next;
+ 
+ 	mutex_lock(&cdev->lock);
+-	list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) {
+-		if (pos->trip == trip && pos->cdev == cdev) {
+-			list_del(&pos->tz_node);
++	list_for_each_entry_safe(pos, next, &td->thermal_instances, trip_node) {
++		if (pos->cdev == cdev) {
++			list_del(&pos->trip_node);
+ 			list_del(&pos->cdev_node);
+ 
+ 			thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV);
+@@ -1435,7 +1440,6 @@ thermal_zone_device_register_with_trips(const char *type,
+ 		}
+ 	}
+ 
+-	INIT_LIST_HEAD(&tz->thermal_instances);
+ 	INIT_LIST_HEAD(&tz->node);
+ 	ida_init(&tz->ida);
+ 	mutex_init(&tz->lock);
+@@ -1459,6 +1463,7 @@ thermal_zone_device_register_with_trips(const char *type,
+ 	tz->num_trips = num_trips;
+ 	for_each_trip_desc(tz, td) {
+ 		td->trip = *trip++;
++		INIT_LIST_HEAD(&td->thermal_instances);
+ 		/*
+ 		 * Mark all thresholds as invalid to start with even though
+ 		 * this only matters for the trips that start as invalid and
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index 421522a2bb9d4c..163871699a602c 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -30,6 +30,7 @@ struct thermal_trip_desc {
+ 	struct thermal_trip trip;
+ 	struct thermal_trip_attrs trip_attrs;
+ 	struct list_head notify_list_node;
++	struct list_head thermal_instances;
+ 	int notify_temp;
+ 	int threshold;
+ };
+@@ -99,7 +100,6 @@ struct thermal_governor {
+  * @tzp:	thermal zone parameters
+  * @governor:	pointer to the governor for this thermal zone
+  * @governor_data:	private pointer for governor data
+- * @thermal_instances:	list of &struct thermal_instance of this thermal zone
+  * @ida:	&struct ida to generate unique id for this zone's cooling
+  *		devices
+  * @lock:	lock to protect thermal_instances list
+@@ -133,7 +133,6 @@ struct thermal_zone_device {
+ 	struct thermal_zone_params *tzp;
+ 	struct thermal_governor *governor;
+ 	void *governor_data;
+-	struct list_head thermal_instances;
+ 	struct ida ida;
+ 	struct mutex lock;
+ 	struct list_head node;
+@@ -230,7 +229,7 @@ struct thermal_instance {
+ 	struct device_attribute attr;
+ 	char weight_attr_name[THERMAL_NAME_LENGTH];
+ 	struct device_attribute weight_attr;
+-	struct list_head tz_node; /* node in tz->thermal_instances */
++	struct list_head trip_node; /* node in trip->thermal_instances */
+ 	struct list_head cdev_node; /* node in cdev->thermal_instances */
+ 	unsigned int weight; /* The weight of the cooling device */
+ 	bool upper_no_limit;
+diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
+index dc374a7a1a659f..403d62d3ce77ee 100644
+--- a/drivers/thermal/thermal_helpers.c
++++ b/drivers/thermal/thermal_helpers.c
+@@ -43,10 +43,11 @@ static bool thermal_instance_present(struct thermal_zone_device *tz,
+ 				     struct thermal_cooling_device *cdev,
+ 				     const struct thermal_trip *trip)
+ {
++	const struct thermal_trip_desc *td = trip_to_trip_desc(trip);
+ 	struct thermal_instance *ti;
+ 
+-	list_for_each_entry(ti, &tz->thermal_instances, tz_node) {
+-		if (ti->trip == trip && ti->cdev == cdev)
++	list_for_each_entry(ti, &td->thermal_instances, trip_node) {
++		if (ti->cdev == cdev)
+ 			return true;
+ 	}
+ 
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 5d3d8ce672cd51..e0aa9d9d5604b7 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -293,12 +293,40 @@ static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index,
+ 	return true;
+ }
+ 
++static bool thermal_of_cm_lookup(struct device_node *cm_np,
++				 const struct thermal_trip *trip,
++				 struct thermal_cooling_device *cdev,
++				 struct cooling_spec *c)
++{
++	for_each_child_of_node_scoped(cm_np, child) {
++		struct device_node *tr_np;
++		int count, i;
++
++		tr_np = of_parse_phandle(child, "trip", 0);
++		if (tr_np != trip->priv)
++			continue;
++
++		/* The trip has been found, look up the cdev. */
++		count = of_count_phandle_with_args(child, "cooling-device",
++						   "#cooling-cells");
++		if (count <= 0)
++			pr_err("Add a cooling_device property with at least one device\n");
++
++		for (i = 0; i < count; i++) {
++			if (thermal_of_get_cooling_spec(child, i, cdev, c))
++				return true;
++		}
++	}
++
++	return false;
++}
++
+ static bool thermal_of_should_bind(struct thermal_zone_device *tz,
+ 				   const struct thermal_trip *trip,
+ 				   struct thermal_cooling_device *cdev,
+ 				   struct cooling_spec *c)
+ {
+-	struct device_node *tz_np, *cm_np, *child;
++	struct device_node *tz_np, *cm_np;
+ 	bool result = false;
+ 
+ 	tz_np = thermal_of_zone_get_by_name(tz);
+@@ -312,28 +340,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz,
+ 		goto out;
+ 
+ 	/* Look up the trip and the cdev in the cooling maps. */
+-	for_each_child_of_node(cm_np, child) {
+-		struct device_node *tr_np;
+-		int count, i;
+-
+-		tr_np = of_parse_phandle(child, "trip", 0);
+-		if (tr_np != trip->priv)
+-			continue;
+-
+-		/* The trip has been found, look up the cdev. */
+-		count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells");
+-		if (count <= 0)
+-			pr_err("Add a cooling_device property with at least one device\n");
+-
+-		for (i = 0; i < count; i++) {
+-			result = thermal_of_get_cooling_spec(child, i, cdev, c);
+-			if (result)
+-				break;
+-		}
+-
+-		of_node_put(child);
+-		break;
+-	}
++	result = thermal_of_cm_lookup(cm_np, trip, cdev, c);
+ 
+ 	of_node_put(cm_np);
+ out:
+diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
+index 8d4ad0a3f2cf02..252186124669a8 100644
+--- a/drivers/ufs/core/ufs_bsg.c
++++ b/drivers/ufs/core/ufs_bsg.c
+@@ -194,10 +194,12 @@ static int ufs_bsg_request(struct bsg_job *job)
+ 	ufshcd_rpm_put_sync(hba);
+ 	kfree(buff);
+ 	bsg_reply->result = ret;
+-	job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
+ 	/* complete the job here only if no error */
+-	if (ret == 0)
++	if (ret == 0) {
++		job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
++					sizeof(struct ufs_bsg_reply);
+ 		bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 67410c4cebee6d..a3e95ef5eda82e 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -266,7 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
+ 
+ static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
+ {
+-	return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
++	return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
+ }
+ 
+ static const struct ufs_dev_quirk ufs_fixups[] = {
+@@ -639,8 +639,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
+ 	const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
+ 
+ 	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+-	dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
+-		hba->outstanding_reqs, hba->outstanding_tasks);
++	dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
++		scsi_host_busy(hba->host), hba->outstanding_tasks);
+ 	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
+ 		hba->saved_err, hba->saved_uic_err);
+ 	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+@@ -8975,7 +8975,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+ 	dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
+ 		 __func__, hba->outstanding_tasks);
+ 
+-	return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
++	return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ }
+ 
+ static const struct attribute_group *ufshcd_driver_groups[] = {
+@@ -10457,6 +10457,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	 */
+ 	spin_lock_init(&hba->clk_gating.lock);
+ 
++	/*
++	 * Set the default power management level for runtime and system PM.
++	 * Host controller drivers can override them in their
++	 * 'ufs_hba_variant_ops::init' callback.
++	 *
++	 * Default power saving mode is to keep UFS link in Hibern8 state
++	 * and UFS device in sleep state.
++	 */
++	hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
++						UFS_SLEEP_PWR_MODE,
++						UIC_LINK_HIBERN8_STATE);
++	hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
++						UFS_SLEEP_PWR_MODE,
++						UIC_LINK_HIBERN8_STATE);
++
+ 	err = ufshcd_hba_init(hba);
+ 	if (err)
+ 		goto out_error;
+@@ -10606,21 +10621,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 		goto free_tmf_queue;
+ 	}
+ 
+-	/*
+-	 * Set the default power management level for runtime and system PM if
+-	 * not set by the host controller drivers.
+-	 * Default power saving mode is to keep UFS link in Hibern8 state
+-	 * and UFS device in sleep state.
+-	 */
+-	if (!hba->rpm_lvl)
+-		hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+-						UFS_SLEEP_PWR_MODE,
+-						UIC_LINK_HIBERN8_STATE);
+-	if (!hba->spm_lvl)
+-		hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+-						UFS_SLEEP_PWR_MODE,
+-						UIC_LINK_HIBERN8_STATE);
+-
+ 	INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
+ 	INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
+ 
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index 038f9d0ae3af8e..4504e16b458cc1 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -163,6 +163,8 @@ static struct afs_server *afs_install_server(struct afs_cell *cell,
+ 	rb_insert_color(&server->uuid_rb, &net->fs_servers);
+ 	hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+ 
++	afs_get_cell(cell, afs_cell_trace_get_server);
++
+ added_dup:
+ 	write_seqlock(&net->fs_addr_lock);
+ 	estate = rcu_dereference_protected(server->endpoint_state,
+@@ -442,6 +444,7 @@ static void afs_server_rcu(struct rcu_head *rcu)
+ 			 atomic_read(&server->active), afs_server_trace_free);
+ 	afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state),
+ 			       afs_estate_trace_put_server);
++	afs_put_cell(server->cell, afs_cell_trace_put_server);
+ 	kfree(server);
+ }
+ 
+diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
+index 7e7e567a7f8a20..d20cd902ef949a 100644
+--- a/fs/afs/server_list.c
++++ b/fs/afs/server_list.c
+@@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
+ 				break;
+ 		if (j < slist->nr_servers) {
+ 			if (slist->servers[j].server == server) {
+-				afs_put_server(volume->cell->net, server,
+-					       afs_server_trace_put_slist_isort);
++				afs_unuse_server(volume->cell->net, server,
++						 afs_server_trace_put_slist_isort);
+ 				continue;
+ 			}
+ 
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 035ba52742a504..4db912f5623055 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -780,6 +780,43 @@ int nfs4_inode_return_delegation(struct inode *inode)
+ 	return 0;
+ }
+ 
++/**
++ * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation
++ * @inode: inode to process
++ *
++ * This routine is called to request that the delegation be returned as soon
++ * as the file is closed. If the file is already closed, the delegation is
++ * immediately returned.
++ */
++void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
++{
++	struct nfs_delegation *delegation;
++	struct nfs_delegation *ret = NULL;
++
++	if (!inode)
++		return;
++	rcu_read_lock();
++	delegation = nfs4_get_valid_delegation(inode);
++	if (!delegation)
++		goto out;
++	spin_lock(&delegation->lock);
++	if (!delegation->inode)
++		goto out_unlock;
++	if (list_empty(&NFS_I(inode)->open_files) &&
++	    !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
++		/* Refcount matched in nfs_end_delegation_return() */
++		ret = nfs_get_delegation(delegation);
++	} else
++		set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
++out_unlock:
++	spin_unlock(&delegation->lock);
++	if (ret)
++		nfs_clear_verifier_delegated(inode);
++out:
++	rcu_read_unlock();
++	nfs_end_delegation_return(inode, ret, 0);
++}
++
+ /**
+  * nfs4_inode_return_delegation_on_close - asynchronously return a delegation
+  * @inode: inode to process
+diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
+index 71524d34ed207c..8ff5ab9c5c2565 100644
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -49,6 +49,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
+ 				  unsigned long pagemod_limit, u32 deleg_type);
+ int nfs4_inode_return_delegation(struct inode *inode);
+ void nfs4_inode_return_delegation_on_close(struct inode *inode);
++void nfs4_inode_set_return_delegation_on_close(struct inode *inode);
+ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
+ void nfs_inode_evict_delegation(struct inode *inode);
+ 
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 90079ca134dd3c..c1f1b826888c98 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -56,6 +56,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/atomic.h>
+ 
++#include "delegation.h"
+ #include "internal.h"
+ #include "iostat.h"
+ #include "pnfs.h"
+@@ -130,6 +131,20 @@ static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
+ 		dreq->count = req_start;
+ }
+ 
++static void nfs_direct_file_adjust_size_locked(struct inode *inode,
++					       loff_t offset, size_t count)
++{
++	loff_t newsize = offset + (loff_t)count;
++	loff_t oldsize = i_size_read(inode);
++
++	if (newsize > oldsize) {
++		i_size_write(inode, newsize);
++		NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
++		trace_nfs_size_grow(inode, newsize);
++		nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
++	}
++}
++
+ /**
+  * nfs_swap_rw - NFS address space operation for swap I/O
+  * @iocb: target I/O control block
+@@ -272,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
+ 	nfs_direct_count_bytes(dreq, hdr);
+ 	spin_unlock(&dreq->lock);
+ 
++	nfs_update_delegated_atime(dreq->inode);
++
+ 	while (!list_empty(&hdr->pages)) {
+ 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ 		struct page *page = req->wb_page;
+@@ -732,6 +749,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 	struct nfs_direct_req *dreq = hdr->dreq;
+ 	struct nfs_commit_info cinfo;
+ 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
++	struct inode *inode = dreq->inode;
+ 	int flags = NFS_ODIRECT_DONE;
+ 
+ 	trace_nfs_direct_write_completion(dreq);
+@@ -753,6 +771,11 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 	}
+ 	spin_unlock(&dreq->lock);
+ 
++	spin_lock(&inode->i_lock);
++	nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count);
++	nfs_update_delegated_mtime_locked(dreq->inode);
++	spin_unlock(&inode->i_lock);
++
+ 	while (!list_empty(&hdr->pages)) {
+ 
+ 		req = nfs_list_entry(hdr->pages.next);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 405f17e6e0b45b..e7bc99c69743cf 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3898,8 +3898,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
+ 
+ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
+ {
++	struct dentry *dentry = ctx->dentry;
+ 	if (ctx->state == NULL)
+ 		return;
++	if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
++		nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
+ 	if (is_sync)
+ 		nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
+ 	else
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index b2c78621da44a4..4388004a319d0c 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -619,7 +619,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
+ 	err = PTR_ERR(upper);
+ 	if (!IS_ERR(upper)) {
+ 		err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
+-		dput(upper);
+ 
+ 		if (!err) {
+ 			/* Restore timestamps on parent (best effort) */
+@@ -627,6 +626,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
+ 			ovl_dentry_set_upper_alias(c->dentry);
+ 			ovl_dentry_update_reval(c->dentry, upper);
+ 		}
++		dput(upper);
+ 	}
+ 	inode_unlock(udir);
+ 	if (err)
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index fa284b64b2de20..23b358a1271cd9 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -450,7 +450,7 @@
+ 	. = ALIGN((align));						\
+ 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
+ 		__start_rodata = .;					\
+-		*(.rodata) *(.rodata.*)					\
++		*(.rodata) *(.rodata.*) *(.data.rel.ro*)		\
+ 		SCHED_DATA						\
+ 		RO_AFTER_INIT_DATA	/* Read only after init */	\
+ 		. = ALIGN(8);						\
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index b7f327ce797e5b..8f37c5dd52b215 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -196,10 +196,11 @@ struct gendisk {
+ 	unsigned int		zone_capacity;
+ 	unsigned int		last_zone_capacity;
+ 	unsigned long __rcu	*conv_zones_bitmap;
+-	unsigned int            zone_wplugs_hash_bits;
+-	spinlock_t              zone_wplugs_lock;
++	unsigned int		zone_wplugs_hash_bits;
++	atomic_t		nr_zone_wplugs;
++	spinlock_t		zone_wplugs_lock;
+ 	struct mempool_s	*zone_wplugs_pool;
+-	struct hlist_head       *zone_wplugs_hash;
++	struct hlist_head	*zone_wplugs_hash;
+ 	struct workqueue_struct *zone_wplugs_wq;
+ #endif /* CONFIG_BLK_DEV_ZONED */
+ 
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index cd6f9aae311fca..070b3b680209cd 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -52,18 +52,6 @@
+  */
+ #define barrier_before_unreachable() asm volatile("")
+ 
+-/*
+- * Mark a position in code as unreachable.  This can be used to
+- * suppress control flow warnings after asm blocks that transfer
+- * control elsewhere.
+- */
+-#define unreachable() \
+-	do {					\
+-		annotate_unreachable();		\
+-		barrier_before_unreachable();	\
+-		__builtin_unreachable();	\
+-	} while (0)
+-
+ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+ #define __HAVE_BUILTIN_BSWAP32__
+ #define __HAVE_BUILTIN_BSWAP64__
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 2d962dade9faee..b15911e201bf95 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -109,44 +109,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ 
+ /* Unreachable code */
+ #ifdef CONFIG_OBJTOOL
+-/*
+- * These macros help objtool understand GCC code flow for unreachable code.
+- * The __COUNTER__ based labels are a hack to make each instance of the macros
+- * unique, to convince GCC not to merge duplicate inline asm statements.
+- */
+-#define __stringify_label(n) #n
+-
+-#define __annotate_reachable(c) ({					\
+-	asm volatile(__stringify_label(c) ":\n\t"			\
+-			".pushsection .discard.reachable\n\t"		\
+-			".long " __stringify_label(c) "b - .\n\t"	\
+-			".popsection\n\t");				\
+-})
+-#define annotate_reachable() __annotate_reachable(__COUNTER__)
+-
+-#define __annotate_unreachable(c) ({					\
+-	asm volatile(__stringify_label(c) ":\n\t"			\
+-		     ".pushsection .discard.unreachable\n\t"		\
+-		     ".long " __stringify_label(c) "b - .\n\t"		\
+-		     ".popsection\n\t" : : "i" (c));			\
+-})
+-#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
+-
+ /* Annotate a C jump table to allow objtool to follow the code flow */
+-#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
+-
++#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")
+ #else /* !CONFIG_OBJTOOL */
+-#define annotate_reachable()
+-#define annotate_unreachable()
+ #define __annotate_jump_table
+ #endif /* CONFIG_OBJTOOL */
+ 
+-#ifndef unreachable
+-# define unreachable() do {		\
+-	annotate_unreachable();		\
++/*
++ * Mark a position in code as unreachable.  This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ */
++#define unreachable() do {		\
++	barrier_before_unreachable();	\
+ 	__builtin_unreachable();	\
+ } while (0)
+-#endif
+ 
+ /*
+  * KENTRY - kernel entry point
+diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
+index 2c8bfd0f1b6b3a..6322d8c1c6b429 100644
+--- a/include/linux/rcuref.h
++++ b/include/linux/rcuref.h
+@@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)
+ 	return rcuref_get_slowpath(ref);
+ }
+ 
+-extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
++extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
+ 
+ /*
+  * Internal helper. Do not invoke directly.
+  */
+ static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
+ {
++	int cnt;
++
+ 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
+ 			 "suspicious rcuref_put_rcusafe() usage");
+ 	/*
+ 	 * Unconditionally decrease the reference count. The saturation and
+ 	 * dead zones provide enough tolerance for this.
+ 	 */
+-	if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
++	cnt = atomic_sub_return_release(1, &ref->refcnt);
++	if (likely(cnt >= 0))
+ 		return false;
+ 
+ 	/*
+ 	 * Handle the last reference drop and cases inside the saturation
+ 	 * and dead zones.
+ 	 */
+-	return rcuref_put_slowpath(ref);
++	return rcuref_put_slowpath(ref, cnt);
+ }
+ 
+ /**
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index d18cc47e89bd01..c3322eb3d6865d 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -392,6 +392,8 @@ struct ucred {
+ 
+ extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
+ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
++extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
++			    void *data);
+ 
+ struct timespec64;
+ struct __kernel_timespec;
+diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
+index fec1e8a1570c36..eac57914dcf320 100644
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -158,7 +158,6 @@ enum {
+ 	RPC_TASK_NEED_XMIT,
+ 	RPC_TASK_NEED_RECV,
+ 	RPC_TASK_MSG_PIN_WAIT,
+-	RPC_TASK_SIGNALLED,
+ };
+ 
+ #define rpc_test_and_set_running(t) \
+@@ -171,7 +170,7 @@ enum {
+ 
+ #define RPC_IS_ACTIVATED(t)	test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
+ 
+-#define RPC_SIGNALLED(t)	test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
++#define RPC_SIGNALLED(t)	(READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
+ 
+ /*
+  * Task priorities.
+diff --git a/include/net/ip.h b/include/net/ip.h
+index fe4f8543811433..bd201278c55a58 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -424,6 +424,11 @@ int ip_decrease_ttl(struct iphdr *iph)
+ 	return --iph->ttl;
+ }
+ 
++static inline dscp_t ip4h_dscp(const struct iphdr *ip4h)
++{
++	return inet_dsfield_to_dscp(ip4h->tos);
++}
++
+ static inline int ip_mtu_locked(const struct dst_entry *dst)
+ {
+ 	const struct rtable *rt = dst_rtable(dst);
+diff --git a/include/net/route.h b/include/net/route.h
+index da34b6fa9862dc..8a11d19f897bb2 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -208,12 +208,13 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
+ 		      const struct sk_buff *hint);
+ 
+ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
+-				 u8 tos, struct net_device *devin)
++				 dscp_t dscp, struct net_device *devin)
+ {
+ 	int err;
+ 
+ 	rcu_read_lock();
+-	err = ip_route_input_noref(skb, dst, src, tos, devin);
++	err = ip_route_input_noref(skb, dst, src, inet_dscp_to_dsfield(dscp),
++				   devin);
+ 	if (!err) {
+ 		skb_dst_force(skb);
+ 		if (!skb_dst(skb))
+diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
+index 3dc7a1551ac350..5d653a3491d073 100644
+--- a/include/sound/cs35l56.h
++++ b/include/sound/cs35l56.h
+@@ -12,6 +12,7 @@
+ #include <linux/firmware/cirrus/cs_dsp.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/regmap.h>
++#include <linux/spi/spi.h>
+ #include <sound/cs-amp-lib.h>
+ 
+ #define CS35L56_DEVID					0x0000000
+@@ -61,6 +62,7 @@
+ #define CS35L56_IRQ1_MASK_8				0x000E0AC
+ #define CS35L56_IRQ1_MASK_18				0x000E0D4
+ #define CS35L56_IRQ1_MASK_20				0x000E0DC
++#define CS35L56_DSP_MBOX_1_RAW				0x0011000
+ #define CS35L56_DSP_VIRTUAL1_MBOX_1			0x0011020
+ #define CS35L56_DSP_VIRTUAL1_MBOX_2			0x0011024
+ #define CS35L56_DSP_VIRTUAL1_MBOX_3			0x0011028
+@@ -224,6 +226,7 @@
+ #define CS35L56_HALO_STATE_SHUTDOWN			1
+ #define CS35L56_HALO_STATE_BOOT_DONE			2
+ 
++#define CS35L56_MBOX_CMD_PING				0x0A000000
+ #define CS35L56_MBOX_CMD_AUDIO_PLAY			0x0B000001
+ #define CS35L56_MBOX_CMD_AUDIO_PAUSE			0x0B000002
+ #define CS35L56_MBOX_CMD_AUDIO_REINIT			0x0B000003
+@@ -254,6 +257,16 @@
+ #define CS35L56_NUM_BULK_SUPPLIES			3
+ #define CS35L56_NUM_DSP_REGIONS				5
+ 
++/* Additional margin for SYSTEM_RESET to control port ready on SPI */
++#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500)
++
++struct cs35l56_spi_payload {
++	__be32	addr;
++	__be16	pad;
++	__be32	value;
++} __packed;
++static_assert(sizeof(struct cs35l56_spi_payload) == 10);
++
+ struct cs35l56_base {
+ 	struct device *dev;
+ 	struct regmap *regmap;
+@@ -269,6 +282,7 @@ struct cs35l56_base {
+ 	s8 cal_index;
+ 	struct cirrus_amp_cal_data cal_data;
+ 	struct gpio_desc *reset_gpio;
++	struct cs35l56_spi_payload *spi_payload_buf;
+ };
+ 
+ static inline bool cs35l56_is_otp_register(unsigned int reg)
+@@ -276,6 +290,23 @@ static inline bool cs35l56_is_otp_register(unsigned int reg)
+ 	return (reg >> 16) == 3;
+ }
+ 
++static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56,
++					      struct spi_device *spi)
++{
++	cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev,
++						sizeof(*cs35l56->spi_payload_buf),
++						GFP_KERNEL | GFP_DMA);
++	if (!cs35l56->spi_payload_buf)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56)
++{
++	return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf;
++}
++
+ extern const struct regmap_config cs35l56_regmap_i2c;
+ extern const struct regmap_config cs35l56_regmap_spi;
+ extern const struct regmap_config cs35l56_regmap_sdw;
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index 9a75590227f262..3dddfc6abf0ee3 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -173,6 +173,7 @@ enum yfs_cm_operation {
+ 	EM(afs_cell_trace_get_queue_dns,	"GET q-dns ") \
+ 	EM(afs_cell_trace_get_queue_manage,	"GET q-mng ") \
+ 	EM(afs_cell_trace_get_queue_new,	"GET q-new ") \
++	EM(afs_cell_trace_get_server,		"GET server") \
+ 	EM(afs_cell_trace_get_vol,		"GET vol   ") \
+ 	EM(afs_cell_trace_insert,		"INSERT    ") \
+ 	EM(afs_cell_trace_manage,		"MANAGE    ") \
+@@ -180,6 +181,7 @@ enum yfs_cm_operation {
+ 	EM(afs_cell_trace_put_destroy,		"PUT destry") \
+ 	EM(afs_cell_trace_put_queue_work,	"PUT q-work") \
+ 	EM(afs_cell_trace_put_queue_fail,	"PUT q-fail") \
++	EM(afs_cell_trace_put_server,		"PUT server") \
+ 	EM(afs_cell_trace_put_vol,		"PUT vol   ") \
+ 	EM(afs_cell_trace_see_source,		"SEE source") \
+ 	EM(afs_cell_trace_see_ws,		"SEE ws    ") \
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 5e849521668954..5fe852bd31abc9 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request,
+ 		{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" },			\
+ 		{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" },		\
+ 		{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" },		\
+-		{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" },	\
+-		{ (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
++		{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
+ 
+ DECLARE_EVENT_CLASS(rpc_task_running,
+ 
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 3974c417fe2644..f32311f6411338 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -334,7 +334,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ 		if (unlikely(ret))
+ 			return ret;
+ 
+-		return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
++		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
++		sr->msg_control = iomsg->msg.msg_control_user;
++		return ret;
+ 	}
+ #endif
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 501d8c2fedff40..a0e1d2124727e1 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4957,7 +4957,7 @@ static struct perf_event_pmu_context *
+ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
+ 		     struct perf_event *event)
+ {
+-	struct perf_event_pmu_context *new = NULL, *epc;
++	struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
+ 	void *task_ctx_data = NULL;
+ 
+ 	if (!ctx->task) {
+@@ -5014,12 +5014,19 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
+ 			atomic_inc(&epc->refcount);
+ 			goto found_epc;
+ 		}
++		/* Make sure the pmu_ctx_list is sorted by PMU type: */
++		if (!pos && epc->pmu->type > pmu->type)
++			pos = epc;
+ 	}
+ 
+ 	epc = new;
+ 	new = NULL;
+ 
+-	list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
++	if (!pos)
++		list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
++	else
++		list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev);
++
+ 	epc->ctx = ctx;
+ 
+ found_epc:
+@@ -5969,14 +5976,15 @@ static int _perf_event_period(struct perf_event *event, u64 value)
+ 	if (!value)
+ 		return -EINVAL;
+ 
+-	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+-		return -EINVAL;
+-
+-	if (perf_event_check_period(event, value))
+-		return -EINVAL;
+-
+-	if (!event->attr.freq && (value & (1ULL << 63)))
+-		return -EINVAL;
++	if (event->attr.freq) {
++		if (value > sysctl_perf_event_sample_rate)
++			return -EINVAL;
++	} else {
++		if (perf_event_check_period(event, value))
++			return -EINVAL;
++		if (value & (1ULL << 63))
++			return -EINVAL;
++	}
+ 
+ 	event_function_call(event, __perf_event_period, &value);
+ 
+@@ -8233,7 +8241,8 @@ void perf_event_exec(void)
+ 
+ 	perf_event_enable_on_exec(ctx);
+ 	perf_event_remove_on_exec(ctx);
+-	perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
++	scoped_guard(rcu)
++		perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
+ 
+ 	perf_unpin_context(ctx);
+ 	put_ctx(ctx);
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 4b52cb2ae6d620..a0e0676f5d8bbe 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -489,6 +489,11 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ 	if (ret <= 0)
+ 		goto put_old;
+ 
++	if (is_zero_page(old_page)) {
++		ret = -EINVAL;
++		goto put_old;
++	}
++
+ 	if (WARN(!is_register && PageCompound(old_page),
+ 		 "uprobe unregister should never work on compound page\n")) {
+ 		ret = -EINVAL;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c72356836eb628..9803f10a082a7b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7229,7 +7229,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
+ #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+ int __sched __cond_resched(void)
+ {
+-	if (should_resched(0)) {
++	if (should_resched(0) && !irqs_disabled()) {
+ 		preempt_schedule_common();
+ 		return 1;
+ 	}
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index aa57ae3eb1ff5e..325fd5b9d47152 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -3047,7 +3047,6 @@ static struct task_struct *pick_task_scx(struct rq *rq)
+ {
+ 	struct task_struct *prev = rq->curr;
+ 	struct task_struct *p;
+-	bool prev_on_scx = prev->sched_class == &ext_sched_class;
+ 	bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
+ 	bool kick_idle = false;
+ 
+@@ -3067,14 +3066,18 @@ static struct task_struct *pick_task_scx(struct rq *rq)
+ 	 * if pick_task_scx() is called without preceding balance_scx().
+ 	 */
+ 	if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
+-		if (prev_on_scx) {
++		if (prev->scx.flags & SCX_TASK_QUEUED) {
+ 			keep_prev = true;
+ 		} else {
+ 			keep_prev = false;
+ 			kick_idle = true;
+ 		}
+-	} else if (unlikely(keep_prev && !prev_on_scx)) {
+-		/* only allowed during transitions */
++	} else if (unlikely(keep_prev &&
++			    prev->sched_class != &ext_sched_class)) {
++		/*
++		 * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
++		 * conditional on scx_enabled() and may have been skipped.
++		 */
+ 		WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
+ 		keep_prev = false;
+ 	}
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 71cc1bbfe9aa3e..dbd375f28ee098 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -541,6 +541,7 @@ static int function_stat_show(struct seq_file *m, void *v)
+ 	static struct trace_seq s;
+ 	unsigned long long avg;
+ 	unsigned long long stddev;
++	unsigned long long stddev_denom;
+ #endif
+ 	mutex_lock(&ftrace_profile_lock);
+ 
+@@ -562,23 +563,19 @@ static int function_stat_show(struct seq_file *m, void *v)
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ 	seq_puts(m, "    ");
+ 
+-	/* Sample standard deviation (s^2) */
+-	if (rec->counter <= 1)
+-		stddev = 0;
+-	else {
+-		/*
+-		 * Apply Welford's method:
+-		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
+-		 */
++	/*
++	 * Variance formula:
++	 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
++	 * Maybe Welford's method is better here?
++	 * Divide only by 1000 for ns^2 -> us^2 conversion.
++	 * trace_print_graph_duration will divide by 1000 again.
++	 */
++	stddev = 0;
++	stddev_denom = rec->counter * (rec->counter - 1) * 1000;
++	if (stddev_denom) {
+ 		stddev = rec->counter * rec->time_squared -
+ 			 rec->time * rec->time;
+-
+-		/*
+-		 * Divide only 1000 for ns^2 -> us^2 conversion.
+-		 * trace_print_graph_duration will divide 1000 again.
+-		 */
+-		stddev = div64_ul(stddev,
+-				  rec->counter * (rec->counter - 1) * 1000);
++		stddev = div64_ul(stddev, stddev_denom);
+ 	}
+ 
+ 	trace_seq_init(&s);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 5f9119eb7c67f6..31f5ad322fab0a 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -6652,27 +6652,27 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
+ 	if (existing_hist_update_only(glob, trigger_data, file))
+ 		goto out_free;
+ 
+-	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+-	if (ret < 0)
+-		goto out_free;
++	if (!get_named_trigger_data(trigger_data)) {
+ 
+-	if (get_named_trigger_data(trigger_data))
+-		goto enable;
++		ret = create_actions(hist_data);
++		if (ret)
++			goto out_free;
+ 
+-	ret = create_actions(hist_data);
+-	if (ret)
+-		goto out_unreg;
++		if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
++			ret = save_hist_vars(hist_data);
++			if (ret)
++				goto out_free;
++		}
+ 
+-	if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+-		ret = save_hist_vars(hist_data);
++		ret = tracing_map_init(hist_data->map);
+ 		if (ret)
+-			goto out_unreg;
++			goto out_free;
+ 	}
+ 
+-	ret = tracing_map_init(hist_data->map);
+-	if (ret)
+-		goto out_unreg;
+-enable:
++	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
++	if (ret < 0)
++		goto out_free;
++
+ 	ret = hist_trigger_enable(trigger_data, file);
+ 	if (ret)
+ 		goto out_unreg;
+diff --git a/lib/rcuref.c b/lib/rcuref.c
+index 97f300eca927ce..5bd726b71e3936 100644
+--- a/lib/rcuref.c
++++ b/lib/rcuref.c
+@@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
+ /**
+  * rcuref_put_slowpath - Slowpath of __rcuref_put()
+  * @ref:	Pointer to the reference count
++ * @cnt:	The resulting value of the fastpath decrement
+  *
+  * Invoked when the reference count is outside of the valid zone.
+  *
+@@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
+  *	with a concurrent get()/put() pair. Caller is not allowed to
+  *	deconstruct the protected object.
+  */
+-bool rcuref_put_slowpath(rcuref_t *ref)
++bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
+ {
+-	unsigned int cnt = atomic_read(&ref->refcnt);
+-
+ 	/* Did this drop the last reference? */
+ 	if (likely(cnt == RCUREF_NOREF)) {
+ 		/*
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 27b4c4a2ba1fdd..728a5ce9b50587 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -636,7 +636,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+ 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
+ 		hci_conn_hold(conn->hcon);
+ 
+-	list_add(&chan->list, &conn->chan_l);
++	/* Append to the list since the order matters for ECRED */
++	list_add_tail(&chan->list, &conn->chan_l);
+ }
+ 
+ void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+@@ -3776,7 +3777,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
+ 	struct l2cap_ecred_conn_rsp *rsp_flex =
+ 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
+ 
+-	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
++	/* Check if channel for outgoing connection or if it wasn't deferred
++	 * since in those cases it must be skipped.
++	 */
++	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
++	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
+ 		return;
+ 
+ 	/* Reset ident so only one response is sent */
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 1d458e9da660c9..17a5f5923d615d 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -370,9 +370,9 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
+  */
+ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	struct net_device *dev = skb->dev, *br_indev;
+-	struct iphdr *iph = ip_hdr(skb);
+ 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
++	struct net_device *dev = skb->dev, *br_indev;
++	const struct iphdr *iph = ip_hdr(skb);
+ 	struct rtable *rt;
+ 	int err;
+ 
+@@ -390,7 +390,9 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
+ 	}
+ 	nf_bridge->in_prerouting = 0;
+ 	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
+-		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
++		err = ip_route_input(skb, iph->daddr, iph->saddr,
++				     ip4h_dscp(iph), dev);
++		if (err) {
+ 			struct in_device *in_dev = __in_dev_get_rcu(dev);
+ 
+ 			/* If err equals -EHOSTUNREACH the error is due to a
+diff --git a/net/core/gro.c b/net/core/gro.c
+index 78b320b6317445..0ad549b07e0399 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -653,6 +653,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+ 	skb->pkt_type = PACKET_HOST;
+ 
+ 	skb->encapsulation = 0;
++	skb->ip_summed = CHECKSUM_NONE;
+ 	skb_shinfo(skb)->gso_type = 0;
+ 	skb_shinfo(skb)->gso_size = 0;
+ 	if (unlikely(skb->slow_gro)) {
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 4f6a14babe5ae3..733c0cbd393d24 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -282,6 +282,16 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ }
+ EXPORT_SYMBOL(put_cmsg);
+ 
++int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
++		     void *data)
++{
++	/* Don't produce truncated CMSGs */
++	if (!msg->msg_control || msg->msg_controllen < CMSG_LEN(len))
++		return -ETOOSMALL;
++
++	return put_cmsg(msg, level, type, len, data);
++}
++
+ void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
+ {
+ 	struct scm_timestamping64 tss;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 61a950f13a91c7..f220306731dac8 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6127,11 +6127,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ 	skb->offload_fwd_mark = 0;
+ 	skb->offload_l3_fwd_mark = 0;
+ #endif
++	ipvs_reset(skb);
+ 
+ 	if (!xnet)
+ 		return;
+ 
+-	ipvs_reset(skb);
+ 	skb->mark = 0;
+ 	skb_clear_tstamp(skb);
+ }
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 5dd54a81339806..47e2743ffe2289 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -34,6 +34,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
+ static int max_skb_frags = MAX_SKB_FRAGS;
+ static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
++static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ;
+ 
+ static int net_msg_warn;	/* Unused, but still a sysctl */
+ 
+@@ -580,7 +581,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(unsigned int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= SYSCTL_ZERO,
++		.extra1		= &netdev_budget_usecs_min,
+ 	},
+ 	{
+ 		.procname	= "fb_tunnels_only_for_init_net",
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index f45bc187a92a7e..b8111ec651b545 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -477,13 +477,11 @@ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
+ 	return route_lookup_dev;
+ }
+ 
+-static struct rtable *icmp_route_lookup(struct net *net,
+-					struct flowi4 *fl4,
++static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
+ 					struct sk_buff *skb_in,
+-					const struct iphdr *iph,
+-					__be32 saddr, u8 tos, u32 mark,
+-					int type, int code,
+-					struct icmp_bxm *param)
++					const struct iphdr *iph, __be32 saddr,
++					dscp_t dscp, u32 mark, int type,
++					int code, struct icmp_bxm *param)
+ {
+ 	struct net_device *route_lookup_dev;
+ 	struct dst_entry *dst, *dst2;
+@@ -497,7 +495,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
+ 	fl4->saddr = saddr;
+ 	fl4->flowi4_mark = mark;
+ 	fl4->flowi4_uid = sock_net_uid(net, NULL);
+-	fl4->flowi4_tos = tos & INET_DSCP_MASK;
++	fl4->flowi4_tos = inet_dscp_to_dsfield(dscp);
+ 	fl4->flowi4_proto = IPPROTO_ICMP;
+ 	fl4->fl4_icmp_type = type;
+ 	fl4->fl4_icmp_code = code;
+@@ -549,7 +547,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
+ 		orefdst = skb_in->_skb_refdst; /* save old refdst */
+ 		skb_dst_set(skb_in, NULL);
+ 		err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
+-				     tos, rt2->dst.dev);
++				     dscp, rt2->dst.dev);
+ 
+ 		dst_release(&rt2->dst);
+ 		rt2 = skb_rtable(skb_in);
+@@ -745,8 +743,9 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 	ipc.opt = &icmp_param.replyopts.opt;
+ 	ipc.sockc.mark = mark;
+ 
+-	rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
+-			       type, code, &icmp_param);
++	rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr,
++			       inet_dsfield_to_dscp(tos), mark, type, code,
++			       &icmp_param);
+ 	if (IS_ERR(rt))
+ 		goto out_unlock;
+ 
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 68aedb8877b9f4..81e86e5defee6b 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -617,7 +617,8 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
+ 
+ 		orefdst = skb->_skb_refdst;
+ 		skb_dst_set(skb, NULL);
+-		err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
++		err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph),
++				     dev);
+ 		rt2 = skb_rtable(skb);
+ 		if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
+ 			skb_dst_drop(skb);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 68cb6a966b18b8..b731a4a8f2b0d5 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2456,14 +2456,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ 			 */
+ 			memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
+ 			dmabuf_cmsg.frag_size = copy;
+-			err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR,
+-				       sizeof(dmabuf_cmsg), &dmabuf_cmsg);
+-			if (err || msg->msg_flags & MSG_CTRUNC) {
+-				msg->msg_flags &= ~MSG_CTRUNC;
+-				if (!err)
+-					err = -ETOOSMALL;
++			err = put_cmsg_notrunc(msg, SOL_SOCKET,
++					       SO_DEVMEM_LINEAR,
++					       sizeof(dmabuf_cmsg),
++					       &dmabuf_cmsg);
++			if (err)
+ 				goto out;
+-			}
+ 
+ 			sent += copy;
+ 
+@@ -2517,16 +2515,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ 				offset += copy;
+ 				remaining_len -= copy;
+ 
+-				err = put_cmsg(msg, SOL_SOCKET,
+-					       SO_DEVMEM_DMABUF,
+-					       sizeof(dmabuf_cmsg),
+-					       &dmabuf_cmsg);
+-				if (err || msg->msg_flags & MSG_CTRUNC) {
+-					msg->msg_flags &= ~MSG_CTRUNC;
+-					if (!err)
+-						err = -ETOOSMALL;
++				err = put_cmsg_notrunc(msg, SOL_SOCKET,
++						       SO_DEVMEM_DMABUF,
++						       sizeof(dmabuf_cmsg),
++						       &dmabuf_cmsg);
++				if (err)
+ 					goto out;
+-				}
+ 
+ 				atomic_long_inc(&niov->pp_ref_count);
+ 				tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index bb1fe1ba867ac3..f3e4fc9572196c 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -806,12 +806,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 
+ 	/* In sequence, PAWS is OK. */
+ 
+-	/* TODO: We probably should defer ts_recent change once
+-	 * we take ownership of @req.
+-	 */
+-	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
+-		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
+-
+ 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
+ 		/* Truncate SYN, it is out of window starting
+ 		   at tcp_rsk(req)->rcv_isn + 1. */
+@@ -860,6 +854,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 	if (!child)
+ 		goto listen_overflow;
+ 
++	if (own_req && tmp_opt.saw_tstamp &&
++	    !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
++		tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
++
+ 	if (own_req && rsk_drop_req(req)) {
+ 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
+ 		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index b60e13c42bcacd..48fd53b9897265 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -630,8 +630,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 		}
+ 		skb_dst_set(skb2, &rt->dst);
+ 	} else {
+-		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
+-				   skb2->dev) ||
++		if (ip_route_input(skb2, eiph->daddr, eiph->saddr,
++				   ip4h_dscp(eiph), skb2->dev) ||
+ 		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
+ 			goto out;
+ 	}
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index 0ac4283acdf20c..7c05ac846646f3 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -262,10 +262,18 @@ static int rpl_input(struct sk_buff *skb)
+ {
+ 	struct dst_entry *orig_dst = skb_dst(skb);
+ 	struct dst_entry *dst = NULL;
++	struct lwtunnel_state *lwtst;
+ 	struct rpl_lwt *rlwt;
+ 	int err;
+ 
+-	rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
++	/* We cannot dereference "orig_dst" once ip6_route_input() or
++	 * skb_dst_drop() is called. However, in order to detect a dst loop, we
++	 * need the address of its lwtstate. So, save the address of lwtstate
++	 * now and use it later as a comparison.
++	 */
++	lwtst = orig_dst->lwtstate;
++
++	rlwt = rpl_lwt_lwtunnel(lwtst);
+ 
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
+@@ -280,7 +288,9 @@ static int rpl_input(struct sk_buff *skb)
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+-		if (!dst->error) {
++
++		/* cache only if we don't create a dst reference loop */
++		if (!dst->error && lwtst != dst->lwtstate) {
+ 			local_bh_disable();
+ 			dst_cache_set_ip6(&rlwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 33833b2064c072..51583461ae29ba 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -472,10 +472,18 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ {
+ 	struct dst_entry *orig_dst = skb_dst(skb);
+ 	struct dst_entry *dst = NULL;
++	struct lwtunnel_state *lwtst;
+ 	struct seg6_lwt *slwt;
+ 	int err;
+ 
+-	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
++	/* We cannot dereference "orig_dst" once ip6_route_input() or
++	 * skb_dst_drop() is called. However, in order to detect a dst loop, we
++	 * need the address of its lwtstate. So, save the address of lwtstate
++	 * now and use it later as a comparison.
++	 */
++	lwtst = orig_dst->lwtstate;
++
++	slwt = seg6_lwt_lwtunnel(lwtst);
+ 
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
+@@ -490,7 +498,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+-		if (!dst->error) {
++
++		/* cache only if we don't create a dst reference loop */
++		if (!dst->error && lwtst != dst->lwtstate) {
+ 			local_bh_disable();
+ 			dst_cache_set_ip6(&slwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 8c4f934d198cc6..b4ba2d9f041765 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1513,11 +1513,6 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 		if (mptcp_pm_is_userspace(msk))
+ 			goto next;
+ 
+-		if (list_empty(&msk->conn_list)) {
+-			mptcp_pm_remove_anno_addr(msk, addr, false);
+-			goto next;
+-		}
+-
+ 		lock_sock(sk);
+ 		remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
+ 		mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 860903e0642255..b56bbee7312c48 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1140,7 +1140,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 	if (data_len == 0) {
+ 		pr_debug("infinite mapping received\n");
+ 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
+-		subflow->map_data_len = 0;
+ 		return MAPPING_INVALID;
+ 	}
+ 
+@@ -1284,18 +1283,6 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
+ 		mptcp_schedule_work(sk);
+ }
+ 
+-static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
+-{
+-	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+-
+-	if (subflow->mp_join)
+-		return false;
+-	else if (READ_ONCE(msk->csum_enabled))
+-		return !subflow->valid_csum_seen;
+-	else
+-		return READ_ONCE(msk->allow_infinite_fallback);
+-}
+-
+ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+@@ -1391,7 +1378,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 			return true;
+ 		}
+ 
+-		if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
++		if (!READ_ONCE(msk->allow_infinite_fallback)) {
+ 			/* fatal protocol error, close the socket.
+ 			 * subflow_error_report() will introduce the appropriate barriers
+ 			 */
+diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
+index 085e7892d31040..b1536da2246b82 100644
+--- a/net/rxrpc/rxperf.c
++++ b/net/rxrpc/rxperf.c
+@@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call)
+ 		call->unmarshal++;
+ 		fallthrough;
+ 	case 2:
++		ret = rxperf_extract_data(call, true);
++		if (ret < 0)
++			return ret;
++
++		/* Deal with the terminal magic cookie. */
++		call->iov_len = 4;
++		call->kvec[0].iov_len	= call->iov_len;
++		call->kvec[0].iov_base	= call->tmp;
++		iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
++		call->unmarshal++;
++		fallthrough;
++	case 3:
+ 		ret = rxperf_extract_data(call, false);
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 059f6ef1ad1898..7fcb0574fc79e7 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1669,12 +1669,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
+ 	}
+ }
+ 
+-#ifdef CONFIG_PROC_FS
+ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
+ {
+ 	struct proc_dir_entry *p;
+ 	struct sunrpc_net *sn;
+ 
++	if (!IS_ENABLED(CONFIG_PROC_FS))
++		return 0;
++
+ 	sn = net_generic(net, sunrpc_net_id);
+ 	cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
+ 	if (cd->procfs == NULL)
+@@ -1702,12 +1704,6 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
+ 	remove_cache_proc_entries(cd);
+ 	return -ENOMEM;
+ }
+-#else /* CONFIG_PROC_FS */
+-static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
+-{
+-	return 0;
+-}
+-#endif
+ 
+ void __init cache_initialize(void)
+ {
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index cef623ea150609..9b45fbdc90cabe 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -864,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task)
+ 	if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
+ 		return;
+ 	trace_rpc_task_signalled(task, task->tk_action);
+-	set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
+-	smp_mb__after_atomic();
+ 	queue = READ_ONCE(task->tk_waitqueue);
+ 	if (queue)
+ 		rpc_wake_up_queued_task(queue, task);
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index b69e6290acfabe..171ad4e2523f13 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2580,7 +2580,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid)
+ 	struct sock_xprt *lower_transport =
+ 				container_of(lower_xprt, struct sock_xprt, xprt);
+ 
+-	lower_transport->xprt_err = status ? -EACCES : 0;
++	switch (status) {
++	case 0:
++	case -EACCES:
++	case -ETIMEDOUT:
++		lower_transport->xprt_err = status;
++		break;
++	default:
++		lower_transport->xprt_err = -EACCES;
++	}
+ 	complete(&lower_transport->handshake_done);
+ 	xprt_put(lower_xprt);
+ }
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index 3c323ca213d42c..abfdb4905ca2ac 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -149,6 +149,9 @@ struct ima_kexec_hdr {
+ #define IMA_CHECK_BLACKLIST	0x40000000
+ #define IMA_VERITY_REQUIRED	0x80000000
+ 
++/* Exclude non-action flags which are not rule-specific. */
++#define IMA_NONACTION_RULE_FLAGS	(IMA_NONACTION_FLAGS & ~IMA_NEW_FILE)
++
+ #define IMA_DO_MASK		(IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ 				 IMA_HASH | IMA_APPRAISE_SUBMASK)
+ #define IMA_DONE_MASK		(IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 06132cf47016da..4b213de8dcb40c 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -269,10 +269,13 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 	mutex_lock(&iint->mutex);
+ 
+ 	if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
+-		/* reset appraisal flags if ima_inode_post_setattr was called */
++		/*
++		 * Reset appraisal flags (action and non-action rule-specific)
++		 * if ima_inode_post_setattr was called.
++		 */
+ 		iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ 				 IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+-				 IMA_NONACTION_FLAGS);
++				 IMA_NONACTION_RULE_FLAGS);
+ 
+ 	/*
+ 	 * Re-evaulate the file if either the xattr has changed or the
+diff --git a/security/landlock/net.c b/security/landlock/net.c
+index d5dcc4407a197b..104b6c01fe503b 100644
+--- a/security/landlock/net.c
++++ b/security/landlock/net.c
+@@ -63,8 +63,7 @@ static int current_check_access_socket(struct socket *const sock,
+ 	if (WARN_ON_ONCE(dom->num_layers < 1))
+ 		return -EACCES;
+ 
+-	/* Checks if it's a (potential) TCP socket. */
+-	if (sock->type != SOCK_STREAM)
++	if (!sk_is_tcp(sock->sk))
+ 		return 0;
+ 
+ 	/* Checks for minimal header length to safely read sa_family. */
+diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
+index 7f02155fe61e3c..7c94110b6272a6 100644
+--- a/sound/pci/hda/cs35l56_hda_spi.c
++++ b/sound/pci/hda/cs35l56_hda_spi.c
+@@ -22,6 +22,9 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi)
+ 		return -ENOMEM;
+ 
+ 	cs35l56->base.dev = &spi->dev;
++	ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
++	if (ret)
++		return ret;
+ 
+ #ifdef CS35L56_WAKE_HOLD_TIME_US
+ 	cs35l56->base.can_hibernate = true;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 9bf99fe6cd34dd..4a3b4c6d4114b9 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10564,6 +10564,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+@@ -10597,7 +10598,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+-	SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
+index e45e9ae01bc668..195841a567c3d4 100644
+--- a/sound/soc/codecs/cs35l56-shared.c
++++ b/sound/soc/codecs/cs35l56-shared.c
+@@ -10,6 +10,7 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
++#include <linux/spi/spi.h>
+ #include <linux/types.h>
+ #include <sound/cs-amp-lib.h>
+ 
+@@ -303,6 +304,79 @@ void cs35l56_wait_min_reset_pulse(void)
+ }
+ EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, SND_SOC_CS35L56_SHARED);
+ 
++static const struct {
++	u32 addr;
++	u32 value;
++} cs35l56_spi_system_reset_stages[] = {
++	{ .addr = CS35L56_DSP_VIRTUAL1_MBOX_1, .value = CS35L56_MBOX_CMD_SYSTEM_RESET },
++	/* The next write is necessary to delimit the soft reset */
++	{ .addr = CS35L56_DSP_MBOX_1_RAW, .value = CS35L56_MBOX_CMD_PING },
++};
++
++static void cs35l56_spi_issue_bus_locked_reset(struct cs35l56_base *cs35l56_base,
++					       struct spi_device *spi)
++{
++	struct cs35l56_spi_payload *buf = cs35l56_base->spi_payload_buf;
++	struct spi_transfer t = {
++		.tx_buf		= buf,
++		.len		= sizeof(*buf),
++	};
++	struct spi_message m;
++	int i, ret;
++
++	for (i = 0; i < ARRAY_SIZE(cs35l56_spi_system_reset_stages); i++) {
++		buf->addr = cpu_to_be32(cs35l56_spi_system_reset_stages[i].addr);
++		buf->value = cpu_to_be32(cs35l56_spi_system_reset_stages[i].value);
++		spi_message_init_with_transfers(&m, &t, 1);
++		ret = spi_sync_locked(spi, &m);
++		if (ret)
++			dev_warn(cs35l56_base->dev, "spi_sync failed: %d\n", ret);
++
++		usleep_range(CS35L56_SPI_RESET_TO_PORT_READY_US,
++			     2 * CS35L56_SPI_RESET_TO_PORT_READY_US);
++	}
++}
++
++static void cs35l56_spi_system_reset(struct cs35l56_base *cs35l56_base)
++{
++	struct spi_device *spi = to_spi_device(cs35l56_base->dev);
++	unsigned int val;
++	int read_ret, ret;
++
++	/*
++	 * There must not be any other SPI bus activity while the amp is
++	 * soft-resetting.
++	 */
++	ret = spi_bus_lock(spi->controller);
++	if (ret) {
++		dev_warn(cs35l56_base->dev, "spi_bus_lock failed: %d\n", ret);
++		return;
++	}
++
++	cs35l56_spi_issue_bus_locked_reset(cs35l56_base, spi);
++	spi_bus_unlock(spi->controller);
++
++	/*
++	 * Check firmware boot by testing for a response in MBOX_2.
++	 * HALO_STATE cannot be trusted yet because the reset sequence
++	 * can leave it with stale state. But MBOX is reset.
++	 * The regmap must remain in cache-only until the chip has
++	 * booted, so use a bypassed read.
++	 */
++	ret = read_poll_timeout(regmap_read_bypassed, read_ret,
++				(val > 0) && (val < 0xffffffff),
++				CS35L56_HALO_STATE_POLL_US,
++				CS35L56_HALO_STATE_TIMEOUT_US,
++				false,
++				cs35l56_base->regmap,
++				CS35L56_DSP_VIRTUAL1_MBOX_2,
++				&val);
++	if (ret) {
++		dev_err(cs35l56_base->dev, "SPI reboot timed out(%d): MBOX2=%#x\n",
++			read_ret, val);
++	}
++}
++
+ static const struct reg_sequence cs35l56_system_reset_seq[] = {
+ 	REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0),
+ 	REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET),
+@@ -315,6 +389,12 @@ void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire)
+ 	 * accesses other than the controlled system reset sequence below.
+ 	 */
+ 	regcache_cache_only(cs35l56_base->regmap, true);
++
++	if (cs35l56_is_spi(cs35l56_base)) {
++		cs35l56_spi_system_reset(cs35l56_base);
++		return;
++	}
++
+ 	regmap_multi_reg_write_bypassed(cs35l56_base->regmap,
+ 					cs35l56_system_reset_seq,
+ 					ARRAY_SIZE(cs35l56_system_reset_seq));
+diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c
+index b07b798b0b45d6..568f554a8638bf 100644
+--- a/sound/soc/codecs/cs35l56-spi.c
++++ b/sound/soc/codecs/cs35l56-spi.c
+@@ -33,6 +33,9 @@ static int cs35l56_spi_probe(struct spi_device *spi)
+ 
+ 	cs35l56->base.dev = &spi->dev;
+ 	cs35l56->base.can_hibernate = true;
++	ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
++	if (ret)
++		return ret;
+ 
+ 	ret = cs35l56_common_probe(cs35l56);
+ 	if (ret != 0)
+diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
+index f3c97da798dc8e..76159c45e6b52e 100644
+--- a/sound/soc/codecs/es8328.c
++++ b/sound/soc/codecs/es8328.c
+@@ -233,7 +233,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
+ 
+ /* Left Mixer */
+ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
+-	SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
+ 	SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
+ 	SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
+ 	SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
+@@ -243,7 +242,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
+ static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
+ 	SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
+ 	SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
+-	SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
+ 	SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
+ };
+ 
+@@ -336,10 +334,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = {
+ 	SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER,
+ 			ES8328_DACPOWER_LDAC_OFF, 1),
+ 
+-	SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
++	SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0,
+ 		&es8328_left_mixer_controls[0],
+ 		ARRAY_SIZE(es8328_left_mixer_controls)),
+-	SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
++	SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0,
+ 		&es8328_right_mixer_controls[0],
+ 		ARRAY_SIZE(es8328_right_mixer_controls)),
+ 
+@@ -418,19 +416,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = {
+ 	{ "Right Line Mux", "PGA", "Right PGA Mux" },
+ 	{ "Right Line Mux", "Differential", "Differential Mux" },
+ 
+-	{ "Left Out 1", NULL, "Left DAC" },
+-	{ "Right Out 1", NULL, "Right DAC" },
+-	{ "Left Out 2", NULL, "Left DAC" },
+-	{ "Right Out 2", NULL, "Right DAC" },
+-
+-	{ "Left Mixer", "Playback Switch", "Left DAC" },
++	{ "Left Mixer", NULL, "Left DAC" },
+ 	{ "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
+ 	{ "Left Mixer", "Right Playback Switch", "Right DAC" },
+ 	{ "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
+ 
+ 	{ "Right Mixer", "Left Playback Switch", "Left DAC" },
+ 	{ "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
+-	{ "Right Mixer", "Playback Switch", "Right DAC" },
++	{ "Right Mixer", NULL, "Right DAC" },
+ 	{ "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
+ 
+ 	{ "DAC DIG", NULL, "DAC STM" },
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 634168d2bb6e54..c5efbceb06d1fc 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -994,10 +994,10 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
+ 	{
+ 		.name = "sai-tx",
+ 		.playback = {
+-			.stream_name = "CPU-Playback",
++			.stream_name = "SAI-Playback",
+ 			.channels_min = 1,
+ 			.channels_max = 32,
+-				.rate_min = 8000,
++			.rate_min = 8000,
+ 			.rate_max = 2822400,
+ 			.rates = SNDRV_PCM_RATE_KNOT,
+ 			.formats = FSL_SAI_FORMATS,
+@@ -1007,7 +1007,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
+ 	{
+ 		.name = "sai-rx",
+ 		.capture = {
+-			.stream_name = "CPU-Capture",
++			.stream_name = "SAI-Capture",
+ 			.channels_min = 1,
+ 			.channels_max = 32,
+ 			.rate_min = 8000,
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index ff3671226306bd..ca33ecad075218 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -119,8 +119,8 @@ static const struct snd_soc_ops imx_audmix_be_ops = {
+ static const char *name[][3] = {
+ 	{"HiFi-AUDMIX-FE-0", "HiFi-AUDMIX-FE-1", "HiFi-AUDMIX-FE-2"},
+ 	{"sai-tx", "sai-tx", "sai-rx"},
+-	{"AUDMIX-Playback-0", "AUDMIX-Playback-1", "CPU-Capture"},
+-	{"CPU-Playback", "CPU-Playback", "AUDMIX-Capture-0"},
++	{"AUDMIX-Playback-0", "AUDMIX-Playback-1", "SAI-Capture"},
++	{"SAI-Playback", "SAI-Playback", "AUDMIX-Capture-0"},
+ };
+ 
+ static int imx_audmix_probe(struct platform_device *pdev)
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 737dd00e97b142..779d97d31f170e 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1145,7 +1145,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
+ {
+ 	struct usbmidi_out_port *port = substream->runtime->private_data;
+ 
+-	cancel_work_sync(&port->ep->work);
++	flush_work(&port->ep->work);
+ 	return substream_open(substream, 0, 0);
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a97efb7b131ea2..09210fb4ac60c1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1868,6 +1868,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ 	case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
+ 		subs->stream_offset_adj = 2;
+ 		break;
++	case USB_ID(0x2b73, 0x000a): /* Pioneer DJM-900NXS2 */
+ 	case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
+ 		pioneer_djm_set_format_quirk(subs, 0x0082);
+ 		break;
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 8e02db7e83323b..1691aa6e6ce32d 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -639,47 +639,8 @@ static int add_dead_ends(struct objtool_file *file)
+ 	uint64_t offset;
+ 
+ 	/*
+-	 * Check for manually annotated dead ends.
+-	 */
+-	rsec = find_section_by_name(file->elf, ".rela.discard.unreachable");
+-	if (!rsec)
+-		goto reachable;
+-
+-	for_each_reloc(rsec, reloc) {
+-		if (reloc->sym->type == STT_SECTION) {
+-			offset = reloc_addend(reloc);
+-		} else if (reloc->sym->local_label) {
+-			offset = reloc->sym->offset;
+-		} else {
+-			WARN("unexpected relocation symbol type in %s", rsec->name);
+-			return -1;
+-		}
+-
+-		insn = find_insn(file, reloc->sym->sec, offset);
+-		if (insn)
+-			insn = prev_insn_same_sec(file, insn);
+-		else if (offset == reloc->sym->sec->sh.sh_size) {
+-			insn = find_last_insn(file, reloc->sym->sec);
+-			if (!insn) {
+-				WARN("can't find unreachable insn at %s+0x%" PRIx64,
+-				     reloc->sym->sec->name, offset);
+-				return -1;
+-			}
+-		} else {
+-			WARN("can't find unreachable insn at %s+0x%" PRIx64,
+-			     reloc->sym->sec->name, offset);
+-			return -1;
+-		}
+-
+-		insn->dead_end = true;
+-	}
+-
+-reachable:
+-	/*
+-	 * These manually annotated reachable checks are needed for GCC 4.4,
+-	 * where the Linux unreachable() macro isn't supported.  In that case
+-	 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
+-	 * not a dead end.
++	 * UD2 defaults to being a dead-end, allow them to be annotated for
++	 * non-fatal, eg WARN.
+ 	 */
+ 	rsec = find_section_by_name(file->elf, ".rela.discard.reachable");
+ 	if (!rsec)
+@@ -2628,13 +2589,14 @@ static void mark_rodata(struct objtool_file *file)
+ 	 *
+ 	 * - .rodata: can contain GCC switch tables
+ 	 * - .rodata.<func>: same, if -fdata-sections is being used
+-	 * - .rodata..c_jump_table: contains C annotated jump tables
++	 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
+ 	 *
+ 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
+ 	 */
+ 	for_each_sec(file, sec) {
+-		if (!strncmp(sec->name, ".rodata", 7) &&
+-		    !strstr(sec->name, ".str1.")) {
++		if ((!strncmp(sec->name, ".rodata", 7) &&
++		     !strstr(sec->name, ".str1.")) ||
++		    !strncmp(sec->name, ".data.rel.ro", 12)) {
+ 			sec->rodata = true;
+ 			found = true;
+ 		}
+diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
+index 86d4af9c5aa9dc..89ee12b1a13849 100644
+--- a/tools/objtool/include/objtool/special.h
++++ b/tools/objtool/include/objtool/special.h
+@@ -10,7 +10,7 @@
+ #include <objtool/check.h>
+ #include <objtool/elf.h>
+ 
+-#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table"
++#define C_JUMP_TABLE_SECTION ".data.rel.ro.c_jump_table"
+ 
+ struct special_alt {
+ 	struct list_head list;
+diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
+index 30f29096e27c22..4868b514ae78d8 100755
+--- a/tools/testing/selftests/drivers/net/queues.py
++++ b/tools/testing/selftests/drivers/net/queues.py
+@@ -40,10 +40,9 @@ def addremove_queues(cfg, nl) -> None:
+ 
+     netnl = EthtoolFamily()
+     channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}})
+-    if channels['combined-count'] == 0:
+-        rx_type = 'rx'
+-    else:
+-        rx_type = 'combined'
++    rx_type = 'rx'
++    if channels.get('combined-count', 0) > 0:
++            rx_type = 'combined'
+ 
+     expected = curr_queues - 1
+     cmd(f"ethtool -L {cfg.dev['ifname']} {rx_type} {expected}", timeout=10)
+diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
+index 61056fa074bb2f..40a2def50b837e 100644
+--- a/tools/testing/selftests/landlock/common.h
++++ b/tools/testing/selftests/landlock/common.h
+@@ -234,6 +234,7 @@ enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd)
+ struct protocol_variant {
+ 	int domain;
+ 	int type;
++	int protocol;
+ };
+ 
+ struct service_fixture {
+diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
+index 29af19c4e9f981..a8982da4acbdc3 100644
+--- a/tools/testing/selftests/landlock/config
++++ b/tools/testing/selftests/landlock/config
+@@ -3,6 +3,8 @@ CONFIG_CGROUP_SCHED=y
+ CONFIG_INET=y
+ CONFIG_IPV6=y
+ CONFIG_KEYS=y
++CONFIG_MPTCP=y
++CONFIG_MPTCP_IPV6=y
+ CONFIG_NET=y
+ CONFIG_NET_NS=y
+ CONFIG_OVERLAY_FS=y
+diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
+index 4e0aeb53b225a5..376079d70d3fc0 100644
+--- a/tools/testing/selftests/landlock/net_test.c
++++ b/tools/testing/selftests/landlock/net_test.c
+@@ -85,18 +85,18 @@ static void setup_loopback(struct __test_metadata *const _metadata)
+ 	clear_ambient_cap(_metadata, CAP_NET_ADMIN);
+ }
+ 
++static bool prot_is_tcp(const struct protocol_variant *const prot)
++{
++	return (prot->domain == AF_INET || prot->domain == AF_INET6) &&
++	       prot->type == SOCK_STREAM &&
++	       (prot->protocol == IPPROTO_TCP || prot->protocol == IPPROTO_IP);
++}
++
+ static bool is_restricted(const struct protocol_variant *const prot,
+ 			  const enum sandbox_type sandbox)
+ {
+-	switch (prot->domain) {
+-	case AF_INET:
+-	case AF_INET6:
+-		switch (prot->type) {
+-		case SOCK_STREAM:
+-			return sandbox == TCP_SANDBOX;
+-		}
+-		break;
+-	}
++	if (sandbox == TCP_SANDBOX)
++		return prot_is_tcp(prot);
+ 	return false;
+ }
+ 
+@@ -105,7 +105,7 @@ static int socket_variant(const struct service_fixture *const srv)
+ 	int ret;
+ 
+ 	ret = socket(srv->protocol.domain, srv->protocol.type | SOCK_CLOEXEC,
+-		     0);
++		     srv->protocol.protocol);
+ 	if (ret < 0)
+ 		return -errno;
+ 	return ret;
+@@ -290,22 +290,59 @@ FIXTURE_TEARDOWN(protocol)
+ }
+ 
+ /* clang-format off */
+-FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp) {
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp1) {
+ 	/* clang-format on */
+ 	.sandbox = NO_SANDBOX,
+ 	.prot = {
+ 		.domain = AF_INET,
+ 		.type = SOCK_STREAM,
++		/* IPPROTO_IP == 0 */
++		.protocol = IPPROTO_IP,
+ 	},
+ };
+ 
+ /* clang-format off */
+-FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp) {
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp2) {
++	/* clang-format on */
++	.sandbox = NO_SANDBOX,
++	.prot = {
++		.domain = AF_INET,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_TCP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp1) {
+ 	/* clang-format on */
+ 	.sandbox = NO_SANDBOX,
+ 	.prot = {
+ 		.domain = AF_INET6,
+ 		.type = SOCK_STREAM,
++		/* IPPROTO_IP == 0 */
++		.protocol = IPPROTO_IP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp2) {
++	/* clang-format on */
++	.sandbox = NO_SANDBOX,
++	.prot = {
++		.domain = AF_INET6,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_TCP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_mptcp) {
++	/* clang-format on */
++	.sandbox = NO_SANDBOX,
++	.prot = {
++		.domain = AF_INET,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_MPTCP,
+ 	},
+ };
+ 
+@@ -329,6 +366,17 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_udp) {
+ 	},
+ };
+ 
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_mptcp) {
++	/* clang-format on */
++	.sandbox = NO_SANDBOX,
++	.prot = {
++		.domain = AF_INET6,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_MPTCP,
++	},
++};
++
+ /* clang-format off */
+ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_stream) {
+ 	/* clang-format on */
+@@ -350,22 +398,48 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_datagram) {
+ };
+ 
+ /* clang-format off */
+-FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp) {
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp1) {
++	/* clang-format on */
++	.sandbox = TCP_SANDBOX,
++	.prot = {
++		.domain = AF_INET,
++		.type = SOCK_STREAM,
++		/* IPPROTO_IP == 0 */
++		.protocol = IPPROTO_IP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp2) {
+ 	/* clang-format on */
+ 	.sandbox = TCP_SANDBOX,
+ 	.prot = {
+ 		.domain = AF_INET,
+ 		.type = SOCK_STREAM,
++		.protocol = IPPROTO_TCP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp1) {
++	/* clang-format on */
++	.sandbox = TCP_SANDBOX,
++	.prot = {
++		.domain = AF_INET6,
++		.type = SOCK_STREAM,
++		/* IPPROTO_IP == 0 */
++		.protocol = IPPROTO_IP,
+ 	},
+ };
+ 
+ /* clang-format off */
+-FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp) {
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp2) {
+ 	/* clang-format on */
+ 	.sandbox = TCP_SANDBOX,
+ 	.prot = {
+ 		.domain = AF_INET6,
+ 		.type = SOCK_STREAM,
++		.protocol = IPPROTO_TCP,
+ 	},
+ };
+ 
+@@ -389,6 +463,17 @@ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_udp) {
+ 	},
+ };
+ 
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_mptcp) {
++	/* clang-format on */
++	.sandbox = TCP_SANDBOX,
++	.prot = {
++		.domain = AF_INET,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_MPTCP,
++	},
++};
++
+ /* clang-format off */
+ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_stream) {
+ 	/* clang-format on */
+@@ -399,6 +484,17 @@ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_stream) {
+ 	},
+ };
+ 
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_mptcp) {
++	/* clang-format on */
++	.sandbox = TCP_SANDBOX,
++	.prot = {
++		.domain = AF_INET6,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_MPTCP,
++	},
++};
++
+ /* clang-format off */
+ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_datagram) {
+ 	/* clang-format on */
+diff --git a/tools/testing/selftests/rseq/rseq-riscv-bits.h b/tools/testing/selftests/rseq/rseq-riscv-bits.h
+index de31a0143139b7..f02f411d550d18 100644
+--- a/tools/testing/selftests/rseq/rseq-riscv-bits.h
++++ b/tools/testing/selftests/rseq/rseq-riscv-bits.h
+@@ -243,7 +243,7 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
+ #ifdef RSEQ_COMPARE_TWICE
+ 				  RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+ #endif
+-				  RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3)
++				  RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, 3)
+ 				  RSEQ_INJECT_ASM(4)
+ 				  RSEQ_ASM_DEFINE_ABORT(4, abort)
+ 				  : /* gcc asm goto does not allow outputs */
+@@ -251,8 +251,8 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
+ 				    [current_cpu_id]		"m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ 				    [rseq_cs]			"m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ 				    [ptr]			"r" (ptr),
+-				    [off]			"er" (off),
+-				    [inc]			"er" (inc)
++				    [off]			"r" (off),
++				    [inc]			"r" (inc)
+ 				    RSEQ_INJECT_INPUT
+ 				  : "memory", RSEQ_ASM_TMP_REG_1
+ 				    RSEQ_INJECT_CLOBBER
+diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
+index 37e598d0a365e2..67d544aaa9a3b0 100644
+--- a/tools/testing/selftests/rseq/rseq-riscv.h
++++ b/tools/testing/selftests/rseq/rseq-riscv.h
+@@ -158,7 +158,7 @@ do {									\
+ 	"bnez	" RSEQ_ASM_TMP_REG_1 ", 222b\n"				\
+ 	"333:\n"
+ 
+-#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, post_commit_label)		\
++#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, post_commit_label)	\
+ 	"mv	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n"	\
+ 	RSEQ_ASM_OP_R_ADD(off)						\
+ 	REG_L	  RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n"	\


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-27 13:22 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-27 13:22 UTC (permalink / raw
  To: gentoo-commits

commit:     43affa7d97bc920177a436c3997d9b1fb0cf1521
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb 27 13:22:00 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb 27 13:22:00 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=43affa7d

Linux patch 6.12.17

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1016_linux-6.12.17.patch | 9620 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9624 insertions(+)

diff --git a/0000_README b/0000_README
index 9f0c3a67..8efc8938 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  1015_linux-6.12.16.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.16
 
+Patch:  1016_linux-6.12.17.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.17
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1016_linux-6.12.17.patch b/1016_linux-6.12.17.patch
new file mode 100644
index 00000000..cebbb158
--- /dev/null
+++ b/1016_linux-6.12.17.patch
@@ -0,0 +1,9620 @@
+diff --git a/Documentation/networking/strparser.rst b/Documentation/networking/strparser.rst
+index 6cab1f74ae05a3..7f623d1db72aae 100644
+--- a/Documentation/networking/strparser.rst
++++ b/Documentation/networking/strparser.rst
+@@ -112,7 +112,7 @@ Functions
+ Callbacks
+ =========
+ 
+-There are six callbacks:
++There are seven callbacks:
+ 
+     ::
+ 
+@@ -182,6 +182,13 @@ There are six callbacks:
+     the length of the message. skb->len - offset may be greater
+     then full_len since strparser does not trim the skb.
+ 
++    ::
++
++	int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
++                     sk_read_actor_t recv_actor);
++
++    The read_sock callback is used by strparser instead of
++    sock->ops->read_sock, if provided.
+     ::
+ 
+ 	int (*read_sock_done)(struct strparser *strp, int err);
+diff --git a/Makefile b/Makefile
+index 340da922fa4f2c..e8b8c5b3840505 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+index 1aa668c3ccf928..dbdee604edab43 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+@@ -63,6 +63,18 @@ thermistor {
+ 		pulldown-ohm = <0>;
+ 		io-channels = <&auxadc 0>;
+ 	};
++
++	connector {
++		compatible = "hdmi-connector";
++		label = "hdmi";
++		type = "d";
++
++		port {
++			hdmi_connector_in: endpoint {
++				remote-endpoint = <&hdmi_connector_out>;
++			};
++		};
++	};
+ };
+ 
+ &auxadc {
+@@ -120,6 +132,43 @@ &i2c6 {
+ 	pinctrl-0 = <&i2c6_pins>;
+ 	status = "okay";
+ 	clock-frequency = <100000>;
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	it66121hdmitx: hdmitx@4c {
++		compatible = "ite,it66121";
++		reg = <0x4c>;
++		pinctrl-names = "default";
++		pinctrl-0 = <&ite_pins>;
++		reset-gpios = <&pio 160 GPIO_ACTIVE_LOW>;
++		interrupt-parent = <&pio>;
++		interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
++		vcn33-supply = <&mt6358_vcn33_reg>;
++		vcn18-supply = <&mt6358_vcn18_reg>;
++		vrf12-supply = <&mt6358_vrf12_reg>;
++
++		ports {
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			port@0 {
++				reg = <0>;
++
++				it66121_in: endpoint {
++					bus-width = <12>;
++					remote-endpoint = <&dpi_out>;
++				};
++			};
++
++			port@1 {
++				reg = <1>;
++
++				hdmi_connector_out: endpoint {
++					remote-endpoint = <&hdmi_connector_in>;
++				};
++			};
++		};
++	};
+ };
+ 
+ &keyboard {
+@@ -362,6 +411,67 @@ pins_clk {
+ 			input-enable;
+ 		};
+ 	};
++
++	ite_pins: ite-pins {
++		pins-irq {
++			pinmux = <PINMUX_GPIO4__FUNC_GPIO4>;
++			input-enable;
++			bias-pull-up;
++		};
++
++		pins-rst {
++			pinmux = <PINMUX_GPIO160__FUNC_GPIO160>;
++			output-high;
++		};
++	};
++
++	dpi_func_pins: dpi-func-pins {
++		pins-dpi {
++			pinmux = <PINMUX_GPIO12__FUNC_I2S5_BCK>,
++				 <PINMUX_GPIO46__FUNC_I2S5_LRCK>,
++				 <PINMUX_GPIO47__FUNC_I2S5_DO>,
++				 <PINMUX_GPIO13__FUNC_DBPI_D0>,
++				 <PINMUX_GPIO14__FUNC_DBPI_D1>,
++				 <PINMUX_GPIO15__FUNC_DBPI_D2>,
++				 <PINMUX_GPIO16__FUNC_DBPI_D3>,
++				 <PINMUX_GPIO17__FUNC_DBPI_D4>,
++				 <PINMUX_GPIO18__FUNC_DBPI_D5>,
++				 <PINMUX_GPIO19__FUNC_DBPI_D6>,
++				 <PINMUX_GPIO20__FUNC_DBPI_D7>,
++				 <PINMUX_GPIO21__FUNC_DBPI_D8>,
++				 <PINMUX_GPIO22__FUNC_DBPI_D9>,
++				 <PINMUX_GPIO23__FUNC_DBPI_D10>,
++				 <PINMUX_GPIO24__FUNC_DBPI_D11>,
++				 <PINMUX_GPIO25__FUNC_DBPI_HSYNC>,
++				 <PINMUX_GPIO26__FUNC_DBPI_VSYNC>,
++				 <PINMUX_GPIO27__FUNC_DBPI_DE>,
++				 <PINMUX_GPIO28__FUNC_DBPI_CK>;
++		};
++	};
++
++	dpi_idle_pins: dpi-idle-pins {
++		pins-idle {
++			pinmux = <PINMUX_GPIO12__FUNC_GPIO12>,
++				 <PINMUX_GPIO46__FUNC_GPIO46>,
++				 <PINMUX_GPIO47__FUNC_GPIO47>,
++				 <PINMUX_GPIO13__FUNC_GPIO13>,
++				 <PINMUX_GPIO14__FUNC_GPIO14>,
++				 <PINMUX_GPIO15__FUNC_GPIO15>,
++				 <PINMUX_GPIO16__FUNC_GPIO16>,
++				 <PINMUX_GPIO17__FUNC_GPIO17>,
++				 <PINMUX_GPIO18__FUNC_GPIO18>,
++				 <PINMUX_GPIO19__FUNC_GPIO19>,
++				 <PINMUX_GPIO20__FUNC_GPIO20>,
++				 <PINMUX_GPIO21__FUNC_GPIO21>,
++				 <PINMUX_GPIO22__FUNC_GPIO22>,
++				 <PINMUX_GPIO23__FUNC_GPIO23>,
++				 <PINMUX_GPIO24__FUNC_GPIO24>,
++				 <PINMUX_GPIO25__FUNC_GPIO25>,
++				 <PINMUX_GPIO26__FUNC_GPIO26>,
++				 <PINMUX_GPIO27__FUNC_GPIO27>,
++				 <PINMUX_GPIO28__FUNC_GPIO28>;
++		};
++	};
+ };
+ 
+ &pmic {
+@@ -412,6 +522,15 @@ &scp {
+ 	status = "okay";
+ };
+ 
+-&dsi0 {
+-	status = "disabled";
++&dpi0 {
++	pinctrl-names = "default", "sleep";
++	pinctrl-0 = <&dpi_func_pins>;
++	pinctrl-1 = <&dpi_idle_pins>;
++	status = "okay";
++
++	port {
++		dpi_out: endpoint {
++			remote-endpoint = <&it66121_in>;
++		};
++	};
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 5cb6bd3c5acbb0..92c41463d10e37 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1835,6 +1835,7 @@ dsi0: dsi@14014000 {
+ 			resets = <&mmsys MT8183_MMSYS_SW0_RST_B_DISP_DSI0>;
+ 			phys = <&mipi_tx0>;
+ 			phy-names = "dphy";
++			status = "disabled";
+ 		};
+ 
+ 		dpi0: dpi@14015000 {
+diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+index ae398acdcf45e6..0905668cbe1f4e 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
++++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+@@ -226,7 +226,6 @@ &uart0 {
+ };
+ 
+ &uart5 {
+-	pinctrl-0 = <&uart5_xfer>;
+ 	rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+index b7163ed74232d7..f743aaf78359d2 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+@@ -373,6 +373,12 @@ &u2phy_host {
+ 	status = "okay";
+ };
+ 
++&uart5 {
++	/delete-property/ dmas;
++	/delete-property/ dma-names;
++	pinctrl-0 = <&uart5_xfer>;
++};
++
+ /* Mule UCAN */
+ &usb_host0_ehci {
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+index 4237f2ee8fee33..f57d4acd9807cb 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+@@ -15,9 +15,11 @@ / {
+ };
+ 
+ &gmac2io {
++	/delete-property/ tx_delay;
++	/delete-property/ rx_delay;
++
+ 	phy-handle = <&yt8531c>;
+-	tx_delay = <0x19>;
+-	rx_delay = <0x05>;
++	phy-mode = "rgmii-id";
+ 
+ 	mdio {
+ 		/delete-node/ ethernet-phy@1;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+index fc67585b64b7ba..83e7e0fbe7839e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+@@ -549,10 +549,10 @@ usb_host2_xhci: usb@fcd00000 {
+ 	mmu600_pcie: iommu@fc900000 {
+ 		compatible = "arm,smmu-v3";
+ 		reg = <0x0 0xfc900000 0x0 0x200000>;
+-		interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
++		interrupts = <GIC_SPI 369 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 371 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 374 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 367 IRQ_TYPE_EDGE_RISING 0>;
+ 		interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ 		#iommu-cells = <1>;
+ 		status = "disabled";
+@@ -561,10 +561,10 @@ mmu600_pcie: iommu@fc900000 {
+ 	mmu600_php: iommu@fcb00000 {
+ 		compatible = "arm,smmu-v3";
+ 		reg = <0x0 0xfcb00000 0x0 0x200000>;
+-		interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
++		interrupts = <GIC_SPI 381 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 383 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 386 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 379 IRQ_TYPE_EDGE_RISING 0>;
+ 		interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ 		#iommu-cells = <1>;
+ 		status = "disabled";
+@@ -2626,9 +2626,9 @@ tsadc: tsadc@fec00000 {
+ 		rockchip,hw-tshut-temp = <120000>;
+ 		rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
+ 		rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
+-		pinctrl-0 = <&tsadc_gpio_func>;
+-		pinctrl-1 = <&tsadc_shut>;
+-		pinctrl-names = "gpio", "otpout";
++		pinctrl-0 = <&tsadc_shut_org>;
++		pinctrl-1 = <&tsadc_gpio_func>;
++		pinctrl-names = "default", "sleep";
+ 		#thermal-sensor-cells = <1>;
+ 		status = "disabled";
+ 	};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
+index 6418286efe40d3..762d36ad733ab2 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
+@@ -101,7 +101,7 @@ vcc3v3_lcd: vcc3v3-lcd-regulator {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vcc3v3_lcd";
+ 		enable-active-high;
+-		gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>;
++		gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&lcdpwr_en>;
+ 		vin-supply = <&vcc3v3_sys>;
+@@ -207,7 +207,7 @@ &pcie3x4 {
+ &pinctrl {
+ 	lcd {
+ 		lcdpwr_en: lcdpwr-en {
+-			rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
++			rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
+ 		};
+ 
+ 		bl_en: bl-en {
+diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
+index 798d965760d434..5a280ac7570cdd 100644
+--- a/arch/arm64/include/asm/mman.h
++++ b/arch/arm64/include/asm/mman.h
+@@ -41,9 +41,12 @@ static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
+ 	 * backed by tags-capable memory. The vm_flags may be overridden by a
+ 	 * filesystem supporting MTE (RAM-based).
+ 	 */
+-	if (system_supports_mte() &&
+-	    ((flags & MAP_ANONYMOUS) || shmem_file(file)))
+-		return VM_MTE_ALLOWED;
++	if (system_supports_mte()) {
++		if ((flags & MAP_ANONYMOUS) && !(flags & MAP_HUGETLB))
++			return VM_MTE_ALLOWED;
++		if (shmem_file(file))
++			return VM_MTE_ALLOWED;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+index c3efacab4b9412..aa90a048f319a3 100644
+--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+@@ -77,9 +77,17 @@
+ /*
+  * With 4K page size the real_pte machinery is all nops.
+  */
+-#define __real_pte(e, p, o)		((real_pte_t){(e)})
++static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
++{
++	return (real_pte_t){pte};
++}
++
+ #define __rpte_to_pte(r)	((r).pte)
+-#define __rpte_to_hidx(r,index)	(pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
++
++static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
++{
++	return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT;
++}
+ 
+ #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
+ 	do {							         \
+diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
+index acdab294b340a8..c1d9b031f0d578 100644
+--- a/arch/powerpc/lib/code-patching.c
++++ b/arch/powerpc/lib/code-patching.c
+@@ -108,7 +108,7 @@ static int text_area_cpu_up(unsigned int cpu)
+ 	unsigned long addr;
+ 	int err;
+ 
+-	area = get_vm_area(PAGE_SIZE, VM_ALLOC);
++	area = get_vm_area(PAGE_SIZE, 0);
+ 	if (!area) {
+ 		WARN_ONCE(1, "Failed to create text area for cpu %d\n",
+ 			cpu);
+@@ -493,7 +493,9 @@ static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool rep
+ 
+ 	orig_mm = start_using_temp_mm(patching_mm);
+ 
++	kasan_disable_current();
+ 	err = __patch_instructions(patch_addr, code, len, repeat_instr);
++	kasan_enable_current();
+ 
+ 	/* context synchronisation performed by __patch_instructions */
+ 	stop_using_temp_mm(patching_mm, orig_mm);
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index c2ee0745f59edc..7b69be63d5d20a 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -75,7 +75,7 @@ static int cmma_test_essa(void)
+ 		: [reg1] "=&d" (reg1),
+ 		  [reg2] "=&a" (reg2),
+ 		  [rc] "+&d" (rc),
+-		  [tmp] "=&d" (tmp),
++		  [tmp] "+&d" (tmp),
+ 		  "+Q" (get_lowcore()->program_new_psw),
+ 		  "=Q" (old)
+ 		: [psw_old] "a" (&old),
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index f5bf400f6a2833..9ec3170c18f925 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -397,34 +397,28 @@ static struct event_constraint intel_lnc_event_constraints[] = {
+ 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
+ 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
+ 
++	INTEL_EVENT_CONSTRAINT(0x20, 0xf),
++
++	INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
++	INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
+ 	INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
+ 	INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
+ 
+ 	INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
+ 	INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
+-	/*
+-	 * Generally event codes < 0x90 are restricted to counters 0-3.
+-	 * The 0x2E and 0x3C are exception, which has no restriction.
+-	 */
+-	INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
+ 
+-	INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
+-	INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
+ 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
+ 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
+ 	INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
+ 	INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
+ 	INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
+ 	INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
++	INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
+ 	INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
+-	INTEL_EVENT_CONSTRAINT(0xce, 0x1),
+ 
+ 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
+-	/*
+-	 * Generally event codes >= 0x90 are likely to have no restrictions.
+-	 * The exception are defined as above.
+-	 */
+-	INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff),
++
++	INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
+ 
+ 	EVENT_CONSTRAINT_END
+ };
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index b6303b0224531b..c07ca43e67e7f1 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1178,7 +1178,7 @@ struct event_constraint intel_lnc_pebs_event_constraints[] = {
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL),	/* INST_RETIRED.PREC_DIST */
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
+ 
+-	INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3ff),
++	INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc),
+ 	INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3),
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf),	/* MEM_INST_RETIRED.STLB_MISS_LOADS */
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf),	/* MEM_INST_RETIRED.STLB_MISS_STORES */
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 375bbb9600d3c1..1a8148dec4afe9 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -816,6 +816,17 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
+ 	}
+ }
+ 
++void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
++		return;
++
++	kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
++}
++EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr);
++
+ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
+ {
+ 	/* This may race with setting of irr in __apic_accept_irq() and
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index 1b8ef9856422a4..3aa599db779689 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -117,11 +117,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
+ 		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map);
+ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
+ 
+-u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
+ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
+ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
+-enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu);
++void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu);
+ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+ 
+ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
+@@ -271,6 +270,11 @@ static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
+ 	return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+ }
+ 
++static inline enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
++{
++	return kvm_apic_mode(vcpu->arch.apic_base);
++}
++
+ static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
+ {
+ 	return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 931a7361c30f2d..22bee8a711442d 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5043,6 +5043,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ 		kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+ 	}
+ 
++	if (vmx->nested.update_vmcs01_hwapic_isr) {
++		vmx->nested.update_vmcs01_hwapic_isr = false;
++		kvm_apic_update_hwapic_isr(vcpu);
++	}
++
+ 	if ((vm_exit_reason != -1) &&
+ 	    (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx)))
+ 		vmx->nested.need_vmcs12_to_shadow_sync = true;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index f06d443ec3c68d..1af30e3472cdd9 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6858,6 +6858,27 @@ void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
+ 	u16 status;
+ 	u8 old;
+ 
++	/*
++	 * If L2 is active, defer the SVI update until vmcs01 is loaded, as SVI
++	 * is only relevant for if and only if Virtual Interrupt Delivery is
++	 * enabled in vmcs12, and if VID is enabled then L2 EOIs affect L2's
++	 * vAPIC, not L1's vAPIC.  KVM must update vmcs01 on the next nested
++	 * VM-Exit, otherwise L1 with run with a stale SVI.
++	 */
++	if (is_guest_mode(vcpu)) {
++		/*
++		 * KVM is supposed to forward intercepted L2 EOIs to L1 if VID
++		 * is enabled in vmcs12; as above, the EOIs affect L2's vAPIC.
++		 * Note, userspace can stuff state while L2 is active; assert
++		 * that VID is disabled if and only if the vCPU is in KVM_RUN
++		 * to avoid false positives if userspace is setting APIC state.
++		 */
++		WARN_ON_ONCE(vcpu->wants_to_run &&
++			     nested_cpu_has_vid(get_vmcs12(vcpu)));
++		to_vmx(vcpu)->nested.update_vmcs01_hwapic_isr = true;
++		return;
++	}
++
+ 	if (max_isr == -1)
+ 		max_isr = 0;
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index 2325f773a20be0..41bf59bbc6426c 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -176,6 +176,7 @@ struct nested_vmx {
+ 	bool reload_vmcs01_apic_access_page;
+ 	bool update_vmcs01_cpu_dirty_logging;
+ 	bool update_vmcs01_apicv_status;
++	bool update_vmcs01_hwapic_isr;
+ 
+ 	/*
+ 	 * Enlightened VMCS has been enabled. It does not mean that L1 has to
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0846e3af5f6c5a..b67a2f46e40b05 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -667,17 +667,6 @@ static void drop_user_return_notifiers(void)
+ 		kvm_on_user_return(&msrs->urn);
+ }
+ 
+-u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
+-{
+-	return vcpu->arch.apic_base;
+-}
+-
+-enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
+-{
+-	return kvm_apic_mode(kvm_get_apic_base(vcpu));
+-}
+-EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
+-
+ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ {
+ 	enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
+@@ -4314,7 +4303,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		msr_info->data = 1 << 24;
+ 		break;
+ 	case MSR_IA32_APICBASE:
+-		msr_info->data = kvm_get_apic_base(vcpu);
++		msr_info->data = vcpu->arch.apic_base;
+ 		break;
+ 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
+ 		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
+@@ -10159,7 +10148,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
+ 
+ 	kvm_run->if_flag = kvm_x86_call(get_if_flag)(vcpu);
+ 	kvm_run->cr8 = kvm_get_cr8(vcpu);
+-	kvm_run->apic_base = kvm_get_apic_base(vcpu);
++	kvm_run->apic_base = vcpu->arch.apic_base;
+ 
+ 	kvm_run->ready_for_interrupt_injection =
+ 		pic_in_kernel(vcpu->kvm) ||
+@@ -11718,7 +11707,7 @@ static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+ 	sregs->cr4 = kvm_read_cr4(vcpu);
+ 	sregs->cr8 = kvm_get_cr8(vcpu);
+ 	sregs->efer = vcpu->arch.efer;
+-	sregs->apic_base = kvm_get_apic_base(vcpu);
++	sregs->apic_base = vcpu->arch.apic_base;
+ }
+ 
+ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
+index 682c532452863e..e4d418b44626ed 100644
+--- a/drivers/accel/ivpu/Kconfig
++++ b/drivers/accel/ivpu/Kconfig
+@@ -8,6 +8,7 @@ config DRM_ACCEL_IVPU
+ 	select FW_LOADER
+ 	select DRM_GEM_SHMEM_HELPER
+ 	select GENERIC_ALLOCATOR
++	select WANT_DEV_COREDUMP
+ 	help
+ 	  Choose this option if you have a system with an 14th generation
+ 	  Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU)
+diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
+index ebd682a42eb124..232ea6d28c6e25 100644
+--- a/drivers/accel/ivpu/Makefile
++++ b/drivers/accel/ivpu/Makefile
+@@ -19,5 +19,6 @@ intel_vpu-y := \
+ 	ivpu_sysfs.o
+ 
+ intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o
++intel_vpu-$(CONFIG_DEV_COREDUMP) += ivpu_coredump.o
+ 
+ obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
+diff --git a/drivers/accel/ivpu/ivpu_coredump.c b/drivers/accel/ivpu/ivpu_coredump.c
+new file mode 100644
+index 00000000000000..16ad0c30818ccf
+--- /dev/null
++++ b/drivers/accel/ivpu/ivpu_coredump.c
+@@ -0,0 +1,39 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (C) 2020-2024 Intel Corporation
++ */
++
++#include <linux/devcoredump.h>
++#include <linux/firmware.h>
++
++#include "ivpu_coredump.h"
++#include "ivpu_fw.h"
++#include "ivpu_gem.h"
++#include "vpu_boot_api.h"
++
++#define CRASH_DUMP_HEADER "Intel NPU crash dump"
++#define CRASH_DUMP_HEADERS_SIZE SZ_4K
++
++void ivpu_dev_coredump(struct ivpu_device *vdev)
++{
++	struct drm_print_iterator pi = {};
++	struct drm_printer p;
++	size_t coredump_size;
++	char *coredump;
++
++	coredump_size = CRASH_DUMP_HEADERS_SIZE + FW_VERSION_HEADER_SIZE +
++			ivpu_bo_size(vdev->fw->mem_log_crit) + ivpu_bo_size(vdev->fw->mem_log_verb);
++	coredump = vmalloc(coredump_size);
++	if (!coredump)
++		return;
++
++	pi.data = coredump;
++	pi.remain = coredump_size;
++	p = drm_coredump_printer(&pi);
++
++	drm_printf(&p, "%s\n", CRASH_DUMP_HEADER);
++	drm_printf(&p, "FW version: %s\n", vdev->fw->version);
++	ivpu_fw_log_print(vdev, false, &p);
++
++	dev_coredumpv(vdev->drm.dev, coredump, pi.offset, GFP_KERNEL);
++}
+diff --git a/drivers/accel/ivpu/ivpu_coredump.h b/drivers/accel/ivpu/ivpu_coredump.h
+new file mode 100644
+index 00000000000000..8efb09d0244115
+--- /dev/null
++++ b/drivers/accel/ivpu/ivpu_coredump.h
+@@ -0,0 +1,25 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2020-2024 Intel Corporation
++ */
++
++#ifndef __IVPU_COREDUMP_H__
++#define __IVPU_COREDUMP_H__
++
++#include <drm/drm_print.h>
++
++#include "ivpu_drv.h"
++#include "ivpu_fw_log.h"
++
++#ifdef CONFIG_DEV_COREDUMP
++void ivpu_dev_coredump(struct ivpu_device *vdev);
++#else
++static inline void ivpu_dev_coredump(struct ivpu_device *vdev)
++{
++	struct drm_printer p = drm_info_printer(vdev->drm.dev);
++
++	ivpu_fw_log_print(vdev, false, &p);
++}
++#endif
++
++#endif /* __IVPU_COREDUMP_H__ */
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index c91400ecf92651..38b4158f52784b 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -14,7 +14,7 @@
+ #include <drm/drm_ioctl.h>
+ #include <drm/drm_prime.h>
+ 
+-#include "vpu_boot_api.h"
++#include "ivpu_coredump.h"
+ #include "ivpu_debugfs.h"
+ #include "ivpu_drv.h"
+ #include "ivpu_fw.h"
+@@ -29,6 +29,7 @@
+ #include "ivpu_ms.h"
+ #include "ivpu_pm.h"
+ #include "ivpu_sysfs.h"
++#include "vpu_boot_api.h"
+ 
+ #ifndef DRIVER_VERSION_STR
+ #define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
+@@ -382,7 +383,7 @@ int ivpu_boot(struct ivpu_device *vdev)
+ 		ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
+ 		ivpu_hw_diagnose_failure(vdev);
+ 		ivpu_mmu_evtq_dump(vdev);
+-		ivpu_fw_log_dump(vdev);
++		ivpu_dev_coredump(vdev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index 63f13b697eed71..2b30cc2e9272e4 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -152,6 +152,7 @@ struct ivpu_device {
+ 		int tdr;
+ 		int autosuspend;
+ 		int d0i3_entry_msg;
++		int state_dump_msg;
+ 	} timeout;
+ };
+ 
+diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
+index ede6165e09d90d..b2b6d89f06537f 100644
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -25,7 +25,6 @@
+ #define FW_SHAVE_NN_MAX_SIZE	SZ_2M
+ #define FW_RUNTIME_MIN_ADDR	(FW_GLOBAL_MEM_START)
+ #define FW_RUNTIME_MAX_ADDR	(FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
+-#define FW_VERSION_HEADER_SIZE	SZ_4K
+ #define FW_FILE_IMAGE_OFFSET	(VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
+ 
+ #define WATCHDOG_MSS_REDIRECT	32
+@@ -191,8 +190,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
+ 	ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
+ 		 fw_hdr->header_version, fw_hdr->image_format);
+ 
+-	ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
+-		  (const char *)fw_hdr + VPU_FW_HEADER_SIZE);
++	if (!scnprintf(fw->version, sizeof(fw->version), "%s", fw->file->data + VPU_FW_HEADER_SIZE))
++		ivpu_warn(vdev, "Missing firmware version\n");
++
++	ivpu_info(vdev, "Firmware: %s, version: %s\n", fw->name, fw->version);
+ 
+ 	if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3))
+ 		return -EINVAL;
+diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
+index 40d9d17be3f528..5e8eb608b70f1f 100644
+--- a/drivers/accel/ivpu/ivpu_fw.h
++++ b/drivers/accel/ivpu/ivpu_fw.h
+@@ -1,11 +1,14 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (C) 2020-2023 Intel Corporation
++ * Copyright (C) 2020-2024 Intel Corporation
+  */
+ 
+ #ifndef __IVPU_FW_H__
+ #define __IVPU_FW_H__
+ 
++#define FW_VERSION_HEADER_SIZE	SZ_4K
++#define FW_VERSION_STR_SIZE	SZ_256
++
+ struct ivpu_device;
+ struct ivpu_bo;
+ struct vpu_boot_params;
+@@ -13,6 +16,7 @@ struct vpu_boot_params;
+ struct ivpu_fw_info {
+ 	const struct firmware *file;
+ 	const char *name;
++	char version[FW_VERSION_STR_SIZE];
+ 	struct ivpu_bo *mem;
+ 	struct ivpu_bo *mem_shave_nn;
+ 	struct ivpu_bo *mem_log_crit;
+diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h
+index 0b2573f6f31519..4b390a99699d66 100644
+--- a/drivers/accel/ivpu/ivpu_fw_log.h
++++ b/drivers/accel/ivpu/ivpu_fw_log.h
+@@ -8,8 +8,6 @@
+ 
+ #include <linux/types.h>
+ 
+-#include <drm/drm_print.h>
+-
+ #include "ivpu_drv.h"
+ 
+ #define IVPU_FW_LOG_DEFAULT 0
+@@ -28,11 +26,5 @@ extern unsigned int ivpu_log_level;
+ void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
+ void ivpu_fw_log_clear(struct ivpu_device *vdev);
+ 
+-static inline void ivpu_fw_log_dump(struct ivpu_device *vdev)
+-{
+-	struct drm_printer p = drm_info_printer(vdev->drm.dev);
+-
+-	ivpu_fw_log_print(vdev, false, &p);
+-}
+ 
+ #endif /* __IVPU_FW_LOG_H__ */
+diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
+index e69c0613513f11..08b3cef58fd2d7 100644
+--- a/drivers/accel/ivpu/ivpu_hw.c
++++ b/drivers/accel/ivpu/ivpu_hw.c
+@@ -89,12 +89,14 @@ static void timeouts_init(struct ivpu_device *vdev)
+ 		vdev->timeout.tdr = 2000000;
+ 		vdev->timeout.autosuspend = -1;
+ 		vdev->timeout.d0i3_entry_msg = 500;
++		vdev->timeout.state_dump_msg = 10;
+ 	} else if (ivpu_is_simics(vdev)) {
+ 		vdev->timeout.boot = 50;
+ 		vdev->timeout.jsm = 500;
+ 		vdev->timeout.tdr = 10000;
+ 		vdev->timeout.autosuspend = -1;
+ 		vdev->timeout.d0i3_entry_msg = 100;
++		vdev->timeout.state_dump_msg = 10;
+ 	} else {
+ 		vdev->timeout.boot = 1000;
+ 		vdev->timeout.jsm = 500;
+@@ -104,6 +106,7 @@ static void timeouts_init(struct ivpu_device *vdev)
+ 		else
+ 			vdev->timeout.autosuspend = 100;
+ 		vdev->timeout.d0i3_entry_msg = 5;
++		vdev->timeout.state_dump_msg = 10;
+ 	}
+ }
+ 
+diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
+index 29b723039a3459..13c8a12162e89e 100644
+--- a/drivers/accel/ivpu/ivpu_ipc.c
++++ b/drivers/accel/ivpu/ivpu_ipc.c
+@@ -353,6 +353,32 @@ int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ 	return ret;
+ }
+ 
++int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
++			   u32 channel, unsigned long timeout_ms)
++{
++	struct ivpu_ipc_consumer cons;
++	int ret;
++
++	ret = ivpu_rpm_get(vdev);
++	if (ret < 0)
++		return ret;
++
++	ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
++
++	ret = ivpu_ipc_send(vdev, &cons, req);
++	if (ret) {
++		ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
++		goto consumer_del;
++	}
++
++	msleep(timeout_ms);
++
++consumer_del:
++	ivpu_ipc_consumer_del(vdev, &cons);
++	ivpu_rpm_put(vdev);
++	return ret;
++}
++
+ static bool
+ ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ 			struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
+diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
+index fb4de7fb8210ea..b4dfb504679bac 100644
+--- a/drivers/accel/ivpu/ivpu_ipc.h
++++ b/drivers/accel/ivpu/ivpu_ipc.h
+@@ -107,5 +107,7 @@ int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg
+ int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ 			  enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ 			  u32 channel, unsigned long timeout_ms);
++int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
++			   u32 channel, unsigned long timeout_ms);
+ 
+ #endif /* __IVPU_IPC_H__ */
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
+index 88105963c1b288..f7618b605f0219 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -555,3 +555,11 @@ int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
+ 	return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
+ 					      VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ }
++
++int ivpu_jsm_state_dump(struct ivpu_device *vdev)
++{
++	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
++
++	return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
++				      vdev->timeout.state_dump_msg);
++}
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h
+index e4e42c0ff6e656..9e84d3526a1463 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.h
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.h
+@@ -43,4 +43,6 @@ int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mas
+ 				  u64 buffer_size, u32 *sample_size, u64 *info_size);
+ int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us);
+ int ivpu_jsm_dct_disable(struct ivpu_device *vdev);
++int ivpu_jsm_state_dump(struct ivpu_device *vdev);
++
+ #endif
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index ef9a4ba18cb8a8..fbb61a2c3b19ce 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -9,17 +9,18 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/reboot.h>
+ 
+-#include "vpu_boot_api.h"
++#include "ivpu_coredump.h"
+ #include "ivpu_drv.h"
+-#include "ivpu_hw.h"
+ #include "ivpu_fw.h"
+ #include "ivpu_fw_log.h"
++#include "ivpu_hw.h"
+ #include "ivpu_ipc.h"
+ #include "ivpu_job.h"
+ #include "ivpu_jsm_msg.h"
+ #include "ivpu_mmu.h"
+ #include "ivpu_ms.h"
+ #include "ivpu_pm.h"
++#include "vpu_boot_api.h"
+ 
+ static bool ivpu_disable_recovery;
+ module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
+@@ -110,40 +111,57 @@ static int ivpu_resume(struct ivpu_device *vdev)
+ 	return ret;
+ }
+ 
+-static void ivpu_pm_recovery_work(struct work_struct *work)
++static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
+ {
+-	struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+-	struct ivpu_device *vdev = pm->vdev;
+-	char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+-	int ret;
+-
+-	ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+-
+-	ret = pm_runtime_resume_and_get(vdev->drm.dev);
+-	if (ret)
+-		ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+-
+-	ivpu_fw_log_dump(vdev);
++	pm_runtime_disable(vdev->drm.dev);
+ 
+ 	atomic_inc(&vdev->pm->reset_counter);
+ 	atomic_set(&vdev->pm->reset_pending, 1);
+ 	down_write(&vdev->pm->reset_lock);
++}
++
++static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
++{
++	int ret;
+ 
+-	ivpu_suspend(vdev);
+ 	ivpu_pm_prepare_cold_boot(vdev);
+ 	ivpu_jobs_abort_all(vdev);
+ 	ivpu_ms_cleanup_all(vdev);
+ 
+ 	ret = ivpu_resume(vdev);
+-	if (ret)
++	if (ret) {
+ 		ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
++		pm_runtime_set_suspended(vdev->drm.dev);
++	} else {
++		pm_runtime_set_active(vdev->drm.dev);
++	}
+ 
+ 	up_write(&vdev->pm->reset_lock);
+ 	atomic_set(&vdev->pm->reset_pending, 0);
+ 
+-	kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+ 	pm_runtime_mark_last_busy(vdev->drm.dev);
+-	pm_runtime_put_autosuspend(vdev->drm.dev);
++	pm_runtime_enable(vdev->drm.dev);
++}
++
++static void ivpu_pm_recovery_work(struct work_struct *work)
++{
++	struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
++	struct ivpu_device *vdev = pm->vdev;
++	char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
++
++	ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
++
++	ivpu_pm_reset_begin(vdev);
++
++	if (!pm_runtime_status_suspended(vdev->drm.dev)) {
++		ivpu_jsm_state_dump(vdev);
++		ivpu_dev_coredump(vdev);
++		ivpu_suspend(vdev);
++	}
++
++	ivpu_pm_reset_complete(vdev);
++
++	kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+ }
+ 
+ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
+@@ -262,7 +280,7 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
+ 	if (!is_idle || ret_d0i3) {
+ 		ivpu_err(vdev, "Forcing cold boot due to previous errors\n");
+ 		atomic_inc(&vdev->pm->reset_counter);
+-		ivpu_fw_log_dump(vdev);
++		ivpu_dev_coredump(vdev);
+ 		ivpu_pm_prepare_cold_boot(vdev);
+ 	} else {
+ 		ivpu_pm_prepare_warm_boot(vdev);
+@@ -314,16 +332,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
+ 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
+ 
+ 	ivpu_dbg(vdev, PM, "Pre-reset..\n");
+-	atomic_inc(&vdev->pm->reset_counter);
+-	atomic_set(&vdev->pm->reset_pending, 1);
+ 
+-	pm_runtime_get_sync(vdev->drm.dev);
+-	down_write(&vdev->pm->reset_lock);
+-	ivpu_prepare_for_reset(vdev);
+-	ivpu_hw_reset(vdev);
+-	ivpu_pm_prepare_cold_boot(vdev);
+-	ivpu_jobs_abort_all(vdev);
+-	ivpu_ms_cleanup_all(vdev);
++	ivpu_pm_reset_begin(vdev);
++
++	if (!pm_runtime_status_suspended(vdev->drm.dev)) {
++		ivpu_prepare_for_reset(vdev);
++		ivpu_hw_reset(vdev);
++	}
+ 
+ 	ivpu_dbg(vdev, PM, "Pre-reset done.\n");
+ }
+@@ -331,18 +346,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
+ void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
+ {
+ 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
+-	int ret;
+ 
+ 	ivpu_dbg(vdev, PM, "Post-reset..\n");
+-	ret = ivpu_resume(vdev);
+-	if (ret)
+-		ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
+-	up_write(&vdev->pm->reset_lock);
+-	atomic_set(&vdev->pm->reset_pending, 0);
+-	ivpu_dbg(vdev, PM, "Post-reset done.\n");
+ 
+-	pm_runtime_mark_last_busy(vdev->drm.dev);
+-	pm_runtime_put_autosuspend(vdev->drm.dev);
++	ivpu_pm_reset_complete(vdev);
++
++	ivpu_dbg(vdev, PM, "Post-reset done.\n");
+ }
+ 
+ void ivpu_pm_init(struct ivpu_device *vdev)
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index dfbbac92242a84..04d02c746ec0fd 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -272,6 +272,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+ }
+ EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+ 
++static bool qca_filename_has_extension(const char *filename)
++{
++	const char *suffix = strrchr(filename, '.');
++
++	/* File extensions require a dot, but not as the first or last character */
++	if (!suffix || suffix == filename || *(suffix + 1) == '\0')
++		return 0;
++
++	/* Avoid matching directories with names that look like files with extensions */
++	return !strchr(suffix, '/');
++}
++
++static bool qca_get_alt_nvm_file(char *filename, size_t max_size)
++{
++	char fwname[64];
++	const char *suffix;
++
++	/* nvm file name has an extension, replace with .bin */
++	if (qca_filename_has_extension(filename)) {
++		suffix = strrchr(filename, '.');
++		strscpy(fwname, filename, suffix - filename + 1);
++		snprintf(fwname + (suffix - filename),
++		       sizeof(fwname) - (suffix - filename), ".bin");
++		/* If nvm file is already the default one, return false to skip the retry. */
++		if (strcmp(fwname, filename) == 0)
++			return false;
++
++		snprintf(filename, max_size, "%s", fwname);
++		return true;
++	}
++	return false;
++}
++
+ static int qca_tlv_check_data(struct hci_dev *hdev,
+ 			       struct qca_fw_config *config,
+ 			       u8 *fw_data, size_t fw_size,
+@@ -564,6 +597,19 @@ static int qca_download_firmware(struct hci_dev *hdev,
+ 					   config->fwname, ret);
+ 				return ret;
+ 			}
++		}
++		/* If the board-specific file is missing, try loading the default
++		 * one, unless that was attempted already.
++		 */
++		else if (config->type == TLV_TYPE_NVM &&
++			 qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
++			bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
++			ret = request_firmware(&fw, config->fwname, &hdev->dev);
++			if (ret) {
++				bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
++					   config->fwname, ret);
++				return ret;
++			}
+ 		} else {
+ 			bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ 				   config->fwname, ret);
+@@ -700,34 +746,38 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
+ 	return 0;
+ }
+ 
+-static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
++static void qca_get_nvm_name_by_board(char *fwname, size_t max_size,
++		const char *stem, enum qca_btsoc_type soc_type,
+ 		struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
+ {
+ 	const char *variant;
++	const char *prefix;
+ 
+-	/* hsp gf chip */
+-	if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+-		variant = "g";
+-	else
+-		variant = "";
++	/* Set the default value to variant and prefix */
++	variant = "";
++	prefix = "b";
+ 
+-	if (bid == 0x0)
+-		snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
+-	else
+-		snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
+-}
++	if (soc_type == QCA_QCA2066)
++		prefix = "";
+ 
+-static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg,
+-					    const char *stem, u8 rom_ver, u16 bid)
+-{
+-	if (bid == 0x0)
+-		snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver);
+-	else if (bid & 0xff00)
+-		snprintf(cfg->fwname, sizeof(cfg->fwname),
+-			 "qca/%snv%02x.b%x", stem, rom_ver, bid);
+-	else
+-		snprintf(cfg->fwname, sizeof(cfg->fwname),
+-			 "qca/%snv%02x.b%02x", stem, rom_ver, bid);
++	if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) {
++		/* If the chip is manufactured by GlobalFoundries */
++		if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
++			variant = "g";
++	}
++
++	if (rom_ver != 0) {
++		if (bid == 0x0 || bid == 0xffff)
++			snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant);
++		else
++			snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver,
++						variant, prefix, bid);
++	} else {
++		if (bid == 0x0 || bid == 0xffff)
++			snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant);
++		else
++			snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid);
++	}
+ }
+ 
+ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+@@ -816,8 +866,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 	/* Download NVM configuration */
+ 	config.type = TLV_TYPE_NVM;
+ 	if (firmware_name) {
+-		snprintf(config.fwname, sizeof(config.fwname),
+-			 "qca/%s", firmware_name);
++		/* The firmware name has an extension, use it directly */
++		if (qca_filename_has_extension(firmware_name)) {
++			snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name);
++		} else {
++			qca_read_fw_board_id(hdev, &boardid);
++			qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
++				 firmware_name, soc_type, ver, 0, boardid);
++		}
+ 	} else {
+ 		switch (soc_type) {
+ 		case QCA_WCN3990:
+@@ -836,8 +892,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 				 "qca/apnv%02x.bin", rom_ver);
+ 			break;
+ 		case QCA_QCA2066:
+-			qca_generate_hsp_nvm_name(config.fwname,
+-				sizeof(config.fwname), ver, rom_ver, boardid);
++			qca_get_nvm_name_by_board(config.fwname,
++				sizeof(config.fwname), "hpnv", soc_type, ver,
++				rom_ver, boardid);
+ 			break;
+ 		case QCA_QCA6390:
+ 			snprintf(config.fwname, sizeof(config.fwname),
+@@ -848,13 +905,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 				 "qca/msnv%02x.bin", rom_ver);
+ 			break;
+ 		case QCA_WCN6855:
+-			snprintf(config.fwname, sizeof(config.fwname),
+-				 "qca/hpnv%02x.bin", rom_ver);
++			qca_read_fw_board_id(hdev, &boardid);
++			qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
++						  "hpnv", soc_type, ver, rom_ver, boardid);
+ 			break;
+ 		case QCA_WCN7850:
+-			qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid);
++			qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
++				 "hmtnv", soc_type, ver, rom_ver, boardid);
+ 			break;
+-
+ 		default:
+ 			snprintf(config.fwname, sizeof(config.fwname),
+ 				 "qca/nvm_%08x.bin", soc_ver);
+diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
+index a3fe98cd383820..82815428f8f925 100644
+--- a/drivers/clocksource/jcore-pit.c
++++ b/drivers/clocksource/jcore-pit.c
+@@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu)
+ 	pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
+ 
+ 	clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
++	enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE);
++
++	return 0;
++}
++
++static int jcore_pit_local_teardown(unsigned cpu)
++{
++	struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
++
++	pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
++
++	disable_percpu_irq(pit->ced.irq);
+ 
+ 	return 0;
+ }
+@@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node)
+ 		return -ENOMEM;
+ 	}
+ 
++	irq_set_percpu_devid(pit_irq);
+ 	err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
+ 				 "jcore_pit", jcore_pit_percpu);
+ 	if (err) {
+@@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node)
+ 
+ 	cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
+ 			  "clockevents/jcore:starting",
+-			  jcore_pit_local_init, NULL);
++			  jcore_pit_local_init, jcore_pit_local_teardown);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index a9a8ba067007a9..0fd7a777fe7d27 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
+ 	 * Configure interrupt enable registers such that Tag, Data RAM related
+ 	 * interrupts are propagated to interrupt controller for servicing
+ 	 */
+-	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
++	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
+ 				 TRP0_INTERRUPT_ENABLE,
+ 				 TRP0_INTERRUPT_ENABLE);
+ 	if (ret)
+@@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
++	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
+ 				 DRP0_INTERRUPT_ENABLE,
+ 				 DRP0_INTERRUPT_ENABLE);
+ 	if (ret)
+diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+index a86ab9b35953f7..2641faa329cdd0 100644
+--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
++++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+@@ -254,8 +254,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
+ 	if (num > max_num)
+ 		return -EINVAL;
+ 
+-	ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in),
+-				      0, &t);
++	ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET,
++				      sizeof(*in) + num * sizeof(__le32), 0, &t);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
+index 907cd149c40a8b..c964f4924359fc 100644
+--- a/drivers/firmware/imx/Kconfig
++++ b/drivers/firmware/imx/Kconfig
+@@ -25,6 +25,7 @@ config IMX_SCU
+ 
+ config IMX_SCMI_MISC_DRV
+ 	tristate "IMX SCMI MISC Protocol driver"
++	depends on ARCH_MXC || COMPILE_TEST
+ 	default y if ARCH_MXC
+ 	help
+ 	  The System Controller Management Interface firmware (SCMI FW) is
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 1e8f0bdb6ae3b4..209871c219d697 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -3068,6 +3068,8 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
+ static int gpio_chip_get_multiple(struct gpio_chip *gc,
+ 				  unsigned long *mask, unsigned long *bits)
+ {
++	lockdep_assert_held(&gc->gpiodev->srcu);
++
+ 	if (gc->get_multiple)
+ 		return gc->get_multiple(gc, mask, bits);
+ 	if (gc->get) {
+@@ -3098,6 +3100,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ 				  struct gpio_array *array_info,
+ 				  unsigned long *value_bitmap)
+ {
++	struct gpio_chip *gc;
+ 	int ret, i = 0;
+ 
+ 	/*
+@@ -3109,10 +3112,15 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ 	    array_size <= array_info->size &&
+ 	    (void *)array_info == desc_array + array_info->size) {
+ 		if (!can_sleep)
+-			WARN_ON(array_info->chip->can_sleep);
++			WARN_ON(array_info->gdev->can_sleep);
++
++		guard(srcu)(&array_info->gdev->srcu);
++		gc = srcu_dereference(array_info->gdev->chip,
++				      &array_info->gdev->srcu);
++		if (!gc)
++			return -ENODEV;
+ 
+-		ret = gpio_chip_get_multiple(array_info->chip,
+-					     array_info->get_mask,
++		ret = gpio_chip_get_multiple(gc, array_info->get_mask,
+ 					     value_bitmap);
+ 		if (ret)
+ 			return ret;
+@@ -3393,6 +3401,8 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
+ static void gpio_chip_set_multiple(struct gpio_chip *gc,
+ 				   unsigned long *mask, unsigned long *bits)
+ {
++	lockdep_assert_held(&gc->gpiodev->srcu);
++
+ 	if (gc->set_multiple) {
+ 		gc->set_multiple(gc, mask, bits);
+ 	} else {
+@@ -3410,6 +3420,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
+ 				  struct gpio_array *array_info,
+ 				  unsigned long *value_bitmap)
+ {
++	struct gpio_chip *gc;
+ 	int i = 0;
+ 
+ 	/*
+@@ -3421,14 +3432,19 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
+ 	    array_size <= array_info->size &&
+ 	    (void *)array_info == desc_array + array_info->size) {
+ 		if (!can_sleep)
+-			WARN_ON(array_info->chip->can_sleep);
++			WARN_ON(array_info->gdev->can_sleep);
++
++		guard(srcu)(&array_info->gdev->srcu);
++		gc = srcu_dereference(array_info->gdev->chip,
++				      &array_info->gdev->srcu);
++		if (!gc)
++			return -ENODEV;
+ 
+ 		if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
+ 			bitmap_xor(value_bitmap, value_bitmap,
+ 				   array_info->invert_mask, array_size);
+ 
+-		gpio_chip_set_multiple(array_info->chip, array_info->set_mask,
+-				       value_bitmap);
++		gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap);
+ 
+ 		i = find_first_zero_bit(array_info->set_mask, array_size);
+ 		if (i == array_size)
+@@ -4684,9 +4700,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ {
+ 	struct gpio_desc *desc;
+ 	struct gpio_descs *descs;
++	struct gpio_device *gdev;
+ 	struct gpio_array *array_info = NULL;
+-	struct gpio_chip *gc;
+ 	int count, bitmap_size;
++	unsigned long dflags;
+ 	size_t descs_size;
+ 
+ 	count = gpiod_count(dev, con_id);
+@@ -4707,7 +4724,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 
+ 		descs->desc[descs->ndescs] = desc;
+ 
+-		gc = gpiod_to_chip(desc);
++		gdev = gpiod_to_gpio_device(desc);
+ 		/*
+ 		 * If pin hardware number of array member 0 is also 0, select
+ 		 * its chip as a candidate for fast bitmap processing path.
+@@ -4715,8 +4732,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 		if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
+ 			struct gpio_descs *array;
+ 
+-			bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
+-						    gc->ngpio : count);
++			bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ?
++						    gdev->ngpio : count);
+ 
+ 			array = krealloc(descs, descs_size +
+ 					 struct_size(array_info, invert_mask, 3 * bitmap_size),
+@@ -4736,7 +4753,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 
+ 			array_info->desc = descs->desc;
+ 			array_info->size = count;
+-			array_info->chip = gc;
++			array_info->gdev = gdev;
+ 			bitmap_set(array_info->get_mask, descs->ndescs,
+ 				   count - descs->ndescs);
+ 			bitmap_set(array_info->set_mask, descs->ndescs,
+@@ -4749,7 +4766,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 			continue;
+ 
+ 		/* Unmark array members which don't belong to the 'fast' chip */
+-		if (array_info->chip != gc) {
++		if (array_info->gdev != gdev) {
+ 			__clear_bit(descs->ndescs, array_info->get_mask);
+ 			__clear_bit(descs->ndescs, array_info->set_mask);
+ 		}
+@@ -4772,9 +4789,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 					    array_info->set_mask);
+ 			}
+ 		} else {
++			dflags = READ_ONCE(desc->flags);
+ 			/* Exclude open drain or open source from fast output */
+-			if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
+-			    gpiochip_line_is_open_source(gc, descs->ndescs))
++			if (test_bit(FLAG_OPEN_DRAIN, &dflags) ||
++			    test_bit(FLAG_OPEN_SOURCE, &dflags))
+ 				__clear_bit(descs->ndescs,
+ 					    array_info->set_mask);
+ 			/* Identify 'fast' pins which require invertion */
+@@ -4786,7 +4804,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 	if (array_info)
+ 		dev_dbg(dev,
+ 			"GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n",
+-			array_info->chip->label, array_info->size,
++			array_info->gdev->label, array_info->size,
+ 			*array_info->get_mask, *array_info->set_mask,
+ 			*array_info->invert_mask);
+ 	return descs;
+diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
+index 067197d61d57e4..87ce3753500e4b 100644
+--- a/drivers/gpio/gpiolib.h
++++ b/drivers/gpio/gpiolib.h
+@@ -110,7 +110,7 @@ extern const char *const gpio_suffixes[];
+  *
+  * @desc:		Array of pointers to the GPIO descriptors
+  * @size:		Number of elements in desc
+- * @chip:		Parent GPIO chip
++ * @gdev:		Parent GPIO device
+  * @get_mask:		Get mask used in fastpath
+  * @set_mask:		Set mask used in fastpath
+  * @invert_mask:	Invert mask used in fastpath
+@@ -122,7 +122,7 @@ extern const char *const gpio_suffixes[];
+ struct gpio_array {
+ 	struct gpio_desc	**desc;
+ 	unsigned int		size;
+-	struct gpio_chip	*chip;
++	struct gpio_device	*gdev;
+ 	unsigned long		*get_mask;
+ 	unsigned long		*set_mask;
+ 	unsigned long		invert_mask[];
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index c27b4c36a7c0f5..32afcf9485245e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -119,9 +119,10 @@
+  * - 3.58.0 - Add GFX12 DCC support
+  * - 3.59.0 - Cleared VRAM
+  * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
++ * - 3.61.0 - Contains fix for RV/PCO compute queues
+  */
+ #define KMS_DRIVER_MAJOR	3
+-#define KMS_DRIVER_MINOR	60
++#define KMS_DRIVER_MINOR	61
+ #define KMS_DRIVER_PATCHLEVEL	0
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index e2501c98e107d3..05d1ae2ef84b4e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -7415,6 +7415,34 @@ static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+ 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
+ }
+ 
++static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
++
++	/* Raven and PCO APUs seem to have stability issues
++	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
++	 * submission and allow again afterwards.
++	 */
++	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
++		gfx_v9_0_set_powergating_state(adev, AMD_PG_STATE_UNGATE);
++}
++
++static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	/* Raven and PCO APUs seem to have stability issues
++	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
++	 * submission and allow again afterwards.
++	 */
++	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
++		gfx_v9_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
++
++	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
++}
++
+ static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
+ 	.name = "gfx_v9_0",
+ 	.early_init = gfx_v9_0_early_init,
+@@ -7591,8 +7619,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
+ 	.emit_wave_limit = gfx_v9_0_emit_wave_limit,
+ 	.reset = gfx_v9_0_reset_kcq,
+ 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+-	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+-	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
++	.begin_use = gfx_v9_0_ring_begin_use_compute,
++	.end_use = gfx_v9_0_ring_end_use_compute,
+ };
+ 
+ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+index 02f7ba8c93cd45..7062f12b5b7511 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -4117,7 +4117,8 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
+ 	0x0000ffff, 0x8bfe7e7e,
+ 	0x8bea6a6a, 0xb97af804,
+ 	0xbe804ec2, 0xbf94fffe,
+-	0xbe804a6c, 0xbfb10000,
++	0xbe804a6c, 0xbe804ec2,
++	0xbf94fffe, 0xbfb10000,
+ 	0xbf9f0000, 0xbf9f0000,
+ 	0xbf9f0000, 0xbf9f0000,
+ 	0xbf9f0000, 0x00000000,
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+index 44772eec9ef4df..96fbb16ceb216d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+@@ -34,41 +34,24 @@
+  *   cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
+  *   sp3 gfx11.sp3 -hex gfx11.hex
+  *
+- * gfx12:
+- *   cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx10.asm -P -o gfx12.sp3
+- *   sp3 gfx12.sp3 -hex gfx12.hex
+  */
+ 
+ #define CHIP_NAVI10 26
+ #define CHIP_SIENNA_CICHLID 30
+ #define CHIP_PLUM_BONITO 36
+-#define CHIP_GFX12 37
+ 
+ #define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
+ #define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
+ #define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
+ #define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
+-#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO && ASIC_FAMILY < CHIP_GFX12)
++#define SW_SA_TRAP (ASIC_FAMILY == CHIP_PLUM_BONITO)
+ #define SAVE_AFTER_XNACK_ERROR (HAVE_XNACK && !NO_SQC_STORE) // workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
+ #define SINGLE_STEP_MISSED_WORKAROUND 1	//workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ #define S_COHERENCE glc:1
+ #define V_COHERENCE slc:1 glc:1
+ #define S_WAITCNT_0 s_waitcnt 0
+-#else
+-#define S_COHERENCE scope:SCOPE_SYS
+-#define V_COHERENCE scope:SCOPE_SYS
+-#define S_WAITCNT_0 s_wait_idle
+-
+-#define HW_REG_SHADER_FLAT_SCRATCH_LO HW_REG_WAVE_SCRATCH_BASE_LO
+-#define HW_REG_SHADER_FLAT_SCRATCH_HI HW_REG_WAVE_SCRATCH_BASE_HI
+-#define HW_REG_GPR_ALLOC HW_REG_WAVE_GPR_ALLOC
+-#define HW_REG_LDS_ALLOC HW_REG_WAVE_LDS_ALLOC
+-#define HW_REG_MODE HW_REG_WAVE_MODE
+-#endif
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK		= 0x00000006
+ var SQ_WAVE_STATUS_HALT_MASK			= 0x2000
+ var SQ_WAVE_STATUS_ECC_ERR_MASK			= 0x20000
+@@ -81,21 +64,6 @@ var S_STATUS_ALWAYS_CLEAR_MASK			= SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_E
+ var S_STATUS_HALT_MASK				= SQ_WAVE_STATUS_HALT_MASK
+ var S_SAVE_PC_HI_TRAP_ID_MASK			= 0x00FF0000
+ var S_SAVE_PC_HI_HT_MASK			= 0x01000000
+-#else
+-var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK	= 0x4
+-var SQ_WAVE_STATE_PRIV_SCC_SHIFT		= 9
+-var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK		= 0xC00
+-var SQ_WAVE_STATE_PRIV_HALT_MASK		= 0x4000
+-var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK		= 0x8000
+-var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT		= 15
+-var SQ_WAVE_STATUS_WAVE64_SHIFT			= 29
+-var SQ_WAVE_STATUS_WAVE64_SIZE			= 1
+-var SQ_WAVE_LDS_ALLOC_GRANULARITY		= 9
+-var S_STATUS_HWREG				= HW_REG_WAVE_STATE_PRIV
+-var S_STATUS_ALWAYS_CLEAR_MASK			= SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
+-var S_STATUS_HALT_MASK				= SQ_WAVE_STATE_PRIV_HALT_MASK
+-var S_SAVE_PC_HI_TRAP_ID_MASK			= 0xF0000000
+-#endif
+ 
+ var SQ_WAVE_STATUS_NO_VGPRS_SHIFT		= 24
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT		= 12
+@@ -110,7 +78,6 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT		= 8
+ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT		= 12
+ #endif
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ var SQ_WAVE_TRAPSTS_SAVECTX_MASK		= 0x400
+ var SQ_WAVE_TRAPSTS_EXCP_MASK			= 0x1FF
+ var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT		= 10
+@@ -161,39 +128,6 @@ var S_TRAPSTS_RESTORE_PART_3_SIZE		= 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
+ var S_TRAPSTS_HWREG				= HW_REG_TRAPSTS
+ var S_TRAPSTS_SAVE_CONTEXT_MASK			= SQ_WAVE_TRAPSTS_SAVECTX_MASK
+ var S_TRAPSTS_SAVE_CONTEXT_SHIFT		= SQ_WAVE_TRAPSTS_SAVECTX_SHIFT
+-#else
+-var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK	= 0xF
+-var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK	= 0x10
+-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT	= 5
+-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK	= 0x20
+-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK	= 0x40
+-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT	= 6
+-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK	= 0x80
+-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT	= 7
+-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK	= 0x100
+-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT	= 8
+-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK	= 0x200
+-var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK	= 0x800
+-var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK		= 0x80
+-var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK	= 0x200
+-
+-var S_TRAPSTS_HWREG				= HW_REG_WAVE_EXCP_FLAG_PRIV
+-var S_TRAPSTS_SAVE_CONTEXT_MASK			= SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+-var S_TRAPSTS_SAVE_CONTEXT_SHIFT		= SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+-var S_TRAPSTS_NON_MASKABLE_EXCP_MASK		= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK		|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK	|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK		|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK	|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK		|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
+-var S_TRAPSTS_RESTORE_PART_1_SIZE		= SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+-var S_TRAPSTS_RESTORE_PART_2_SHIFT		= SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+-var S_TRAPSTS_RESTORE_PART_2_SIZE		= SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+-var S_TRAPSTS_RESTORE_PART_3_SHIFT		= SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+-var S_TRAPSTS_RESTORE_PART_3_SIZE		= 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
+-var BARRIER_STATE_SIGNAL_OFFSET			= 16
+-var BARRIER_STATE_VALID_OFFSET			= 0
+-#endif
+ 
+ // bits [31:24] unused by SPI debug data
+ var TTMP11_SAVE_REPLAY_W64H_SHIFT		= 31
+@@ -305,11 +239,7 @@ L_TRAP_NO_BARRIER:
+ 
+ L_HALTED:
+ 	// Host trap may occur while wave is halted.
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+-#else
+-	s_and_b32	ttmp2, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+-#endif
+ 	s_cbranch_scc1	L_FETCH_2ND_TRAP
+ 
+ L_CHECK_SAVE:
+@@ -336,7 +266,6 @@ L_NOT_HALTED:
+ 	// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ 	// Maskable exceptions only cause the wave to enter the trap handler if
+ 	// their respective bit in mode.excp_en is set.
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_and_b32	ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ 	s_cbranch_scc0	L_CHECK_TRAP_ID
+ 
+@@ -349,17 +278,6 @@ L_NOT_ADDR_WATCH:
+ 	s_lshl_b32	ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
+ 	s_and_b32	ttmp2, ttmp2, ttmp3
+ 	s_cbranch_scc1	L_FETCH_2ND_TRAP
+-#else
+-	s_getreg_b32	ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+-	s_and_b32	ttmp3, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
+-	s_cbranch_scc0	L_NOT_ADDR_WATCH
+-	s_or_b32	ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
+-
+-L_NOT_ADDR_WATCH:
+-	s_getreg_b32	ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
+-	s_and_b32	ttmp2, ttmp3, ttmp2
+-	s_cbranch_scc1	L_FETCH_2ND_TRAP
+-#endif
+ 
+ L_CHECK_TRAP_ID:
+ 	// Check trap_id != 0
+@@ -369,13 +287,8 @@ L_CHECK_TRAP_ID:
+ #if SINGLE_STEP_MISSED_WORKAROUND
+ 	// Prioritize single step exception over context save.
+ 	// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_getreg_b32	ttmp2, hwreg(HW_REG_MODE)
+ 	s_and_b32	ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+-#else
+-	// WAVE_TRAP_CTRL is already in ttmp3.
+-	s_and_b32	ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
+-#endif
+ 	s_cbranch_scc1	L_FETCH_2ND_TRAP
+ #endif
+ 
+@@ -425,12 +338,7 @@ L_NO_NEXT_TRAP:
+ 	s_cbranch_scc1	L_TRAP_CASE
+ 
+ 	// Host trap will not cause trap re-entry.
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
+-#else
+-	s_getreg_b32	ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+-	s_and_b32	ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+-#endif
+ 	s_cbranch_scc1	L_EXIT_TRAP
+ 	s_or_b32	s_save_status, s_save_status, S_STATUS_HALT_MASK
+ 
+@@ -457,16 +365,7 @@ L_EXIT_TRAP:
+ 	s_and_b64	exec, exec, exec					// Restore STATUS.EXECZ, not writable by s_setreg_b32
+ 	s_and_b64	vcc, vcc, vcc						// Restore STATUS.VCCZ, not writable by s_setreg_b32
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_setreg_b32	hwreg(S_STATUS_HWREG), s_save_status
+-#else
+-	// STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
+-	// Only restore fields which the trap handler changes.
+-	s_lshr_b32	s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_SCC_SHIFT
+-	s_setreg_b32	hwreg(S_STATUS_HWREG, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
+-		SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_status
+-#endif
+-
+ 	s_rfe_b64	[ttmp0, ttmp1]
+ 
+ L_SAVE:
+@@ -478,14 +377,6 @@ L_SAVE:
+ 	s_endpgm
+ L_HAVE_VGPRS:
+ #endif
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+-	s_bitcmp1_b32	s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
+-	s_cbranch_scc0	L_HAVE_VGPRS
+-	s_endpgm
+-L_HAVE_VGPRS:
+-#endif
+-
+ 	s_and_b32	s_save_pc_hi, s_save_pc_hi, 0x0000ffff			//pc[47:32]
+ 	s_mov_b32	s_save_tmp, 0
+ 	s_setreg_b32	hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_SAVE_CONTEXT_SHIFT, 1), s_save_tmp	//clear saveCtx bit
+@@ -671,19 +562,6 @@ L_SAVE_HWREG:
+ 	s_mov_b32	m0, 0x0							//Next lane of v2 to write to
+ #endif
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	// Ensure no further changes to barrier or LDS state.
+-	// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
+-	s_barrier_signal	-2
+-	s_barrier_wait	-2
+-
+-	// Re-read final state of BARRIER_COMPLETE field for save.
+-	s_getreg_b32	s_save_tmp, hwreg(S_STATUS_HWREG)
+-	s_and_b32	s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+-	s_andn2_b32	s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+-	s_or_b32	s_save_status, s_save_status, s_save_tmp
+-#endif
+-
+ 	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+ 	write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
+ 	s_andn2_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+@@ -707,21 +585,6 @@ L_SAVE_HWREG:
+ 	s_getreg_b32	s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
+ 	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+-	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+-
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
+-	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+-
+-	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+-	write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
+-
+-	s_get_barrier_state s_save_tmp, -1
+-	s_wait_kmcnt (0)
+-	write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
+-#endif
+-
+ #if NO_SQC_STORE
+ 	// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ 	s_mov_b32       exec_lo, 0xFFFF
+@@ -814,9 +677,7 @@ L_SAVE_LDS_NORMAL:
+ 	s_and_b32	s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF	//lds_size is zero?
+ 	s_cbranch_scc0	L_SAVE_LDS_DONE						//no lds used? jump to L_SAVE_DONE
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_barrier								//LDS is used? wait for other waves in the same TG
+-#endif
+ 	s_and_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ 	s_cbranch_scc0	L_SAVE_LDS_DONE
+ 
+@@ -1081,11 +942,6 @@ L_RESTORE:
+ 	s_mov_b32	s_restore_buf_rsrc2, 0					//NUM_RECORDS initial value = 0 (in bytes)
+ 	s_mov_b32	s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	// Save s_restore_spi_init_hi for later use.
+-	s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
+-#endif
+-
+ 	//determine it is wave32 or wave64
+ 	get_wave_size2(s_restore_size)
+ 
+@@ -1320,9 +1176,7 @@ L_RESTORE_SGPR:
+ 	// s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception.
+ 	// Clear DEBUG_EN before and restore MODE after the barrier.
+ 	s_setreg_imm32_b32	hwreg(HW_REG_MODE), 0
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_barrier								//barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
+-#endif
+ 
+ 	/* restore HW registers */
+ L_RESTORE_HWREG:
+@@ -1334,11 +1188,6 @@ L_RESTORE_HWREG:
+ 
+ 	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	// Restore s_restore_spi_init_hi before the saved value gets clobbered.
+-	s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
+-#endif
+-
+ 	read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ 	read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ 	read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+@@ -1358,44 +1207,6 @@ L_RESTORE_HWREG:
+ 
+ 	s_setreg_b32	hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+-	S_WAITCNT_0
+-	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
+-
+-	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+-	S_WAITCNT_0
+-	s_setreg_b32	hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
+-
+-	// Only the first wave needs to restore the workgroup barrier.
+-	s_and_b32	s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+-	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
+-
+-	// Skip over WAVE_STATUS, since there is no state to restore from it
+-	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 4
+-
+-	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+-	S_WAITCNT_0
+-
+-	s_bitcmp1_b32	s_restore_tmp, BARRIER_STATE_VALID_OFFSET
+-	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
+-
+-	// extract the saved signal count from s_restore_tmp
+-	s_lshr_b32	s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
+-
+-	// We need to call s_barrier_signal repeatedly to restore the signal
+-	// count of the work group barrier.  The member count is already
+-	// initialized with the number of waves in the work group.
+-L_BARRIER_RESTORE_LOOP:
+-	s_and_b32	s_restore_tmp, s_restore_tmp, s_restore_tmp
+-	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
+-	s_barrier_signal	-1
+-	s_add_i32	s_restore_tmp, s_restore_tmp, -1
+-	s_branch	L_BARRIER_RESTORE_LOOP
+-
+-L_SKIP_BARRIER_RESTORE:
+-#endif
+-
+ 	s_mov_b32	m0, s_restore_m0
+ 	s_mov_b32	exec_lo, s_restore_exec_lo
+ 	s_mov_b32	exec_hi, s_restore_exec_hi
+@@ -1453,13 +1264,6 @@ L_RETURN_WITHOUT_PRIV:
+ 
+ 	s_setreg_b32	hwreg(S_STATUS_HWREG), s_restore_status			// SCC is included, which is changed by previous salu
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	// Make barrier and LDS state visible to all waves in the group.
+-	// STATE_PRIV.BARRIER_COMPLETE may change after this point.
+-	s_barrier_signal	-2
+-	s_barrier_wait	-2
+-#endif
+-
+ 	s_rfe_b64	s_restore_pc_lo						//Return to the main shader program and resume execution
+ 
+ L_END_PGM:
+@@ -1598,11 +1402,7 @@ function get_hwreg_size_bytes
+ end
+ 
+ function get_wave_size2(s_reg)
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_getreg_b32	s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
+-#else
+-	s_getreg_b32	s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
+-#endif
+ 	s_lshl_b32	s_reg, s_reg, S_WAVE_SIZE
+ end
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+new file mode 100644
+index 00000000000000..7b9d36e5fa4372
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+@@ -0,0 +1,1130 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/* To compile this assembly code:
++ *
++ * gfx12:
++ *   cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx12.asm -P -o gfx12.sp3
++ *   sp3 gfx12.sp3 -hex gfx12.hex
++ */
++
++#define CHIP_GFX12 37
++
++#define SINGLE_STEP_MISSED_WORKAROUND 1	//workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
++
++var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK	= 0x4
++var SQ_WAVE_STATE_PRIV_SCC_SHIFT		= 9
++var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK		= 0xC00
++var SQ_WAVE_STATE_PRIV_HALT_MASK		= 0x4000
++var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK		= 0x8000
++var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT		= 15
++var SQ_WAVE_STATUS_WAVE64_SHIFT			= 29
++var SQ_WAVE_STATUS_WAVE64_SIZE			= 1
++var SQ_WAVE_STATUS_NO_VGPRS_SHIFT		= 24
++var SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK	= SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
++var S_SAVE_PC_HI_TRAP_ID_MASK			= 0xF0000000
++
++var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT		= 12
++var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE		= 9
++var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE		= 8
++var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT		= 12
++var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT	= 24
++var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE	= 4
++var SQ_WAVE_LDS_ALLOC_GRANULARITY		= 9
++
++var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK	= 0xF
++var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK	= 0x10
++var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT	= 5
++var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK	= 0x20
++var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK	= 0x40
++var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT	= 6
++var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK	= 0x80
++var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT	= 7
++var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK	= 0x100
++var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT	= 8
++var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK	= 0x200
++var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK	= 0x800
++var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK		= 0x80
++var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK	= 0x200
++
++var SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK		|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK	|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK		|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK	|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK		|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE	= SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT	= SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE	= SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT	= SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE	= 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT
++var BARRIER_STATE_SIGNAL_OFFSET			= 16
++var BARRIER_STATE_VALID_OFFSET			= 0
++
++var TTMP11_DEBUG_TRAP_ENABLED_SHIFT		= 23
++var TTMP11_DEBUG_TRAP_ENABLED_MASK		= 0x800000
++
++// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
++// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
++var S_SAVE_BUF_RSRC_WORD1_STRIDE		= 0x00040000
++var S_SAVE_BUF_RSRC_WORD3_MISC			= 0x10807FAC
++var S_SAVE_SPI_INIT_FIRST_WAVE_MASK		= 0x04000000
++var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT		= 26
++
++var S_SAVE_PC_HI_FIRST_WAVE_MASK		= 0x80000000
++var S_SAVE_PC_HI_FIRST_WAVE_SHIFT		= 31
++
++var s_sgpr_save_num				= 108
++
++var s_save_spi_init_lo				= exec_lo
++var s_save_spi_init_hi				= exec_hi
++var s_save_pc_lo				= ttmp0
++var s_save_pc_hi				= ttmp1
++var s_save_exec_lo				= ttmp2
++var s_save_exec_hi				= ttmp3
++var s_save_state_priv				= ttmp12
++var s_save_excp_flag_priv			= ttmp15
++var s_save_xnack_mask				= s_save_excp_flag_priv
++var s_wave_size					= ttmp7
++var s_save_buf_rsrc0				= ttmp8
++var s_save_buf_rsrc1				= ttmp9
++var s_save_buf_rsrc2				= ttmp10
++var s_save_buf_rsrc3				= ttmp11
++var s_save_mem_offset				= ttmp4
++var s_save_alloc_size				= s_save_excp_flag_priv
++var s_save_tmp					= ttmp14
++var s_save_m0					= ttmp5
++var s_save_ttmps_lo				= s_save_tmp
++var s_save_ttmps_hi				= s_save_excp_flag_priv
++
++var S_RESTORE_BUF_RSRC_WORD1_STRIDE		= S_SAVE_BUF_RSRC_WORD1_STRIDE
++var S_RESTORE_BUF_RSRC_WORD3_MISC		= S_SAVE_BUF_RSRC_WORD3_MISC
++
++var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK		= 0x04000000
++var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT		= 26
++var S_WAVE_SIZE					= 25
++
++var s_restore_spi_init_lo			= exec_lo
++var s_restore_spi_init_hi			= exec_hi
++var s_restore_mem_offset			= ttmp12
++var s_restore_alloc_size			= ttmp3
++var s_restore_tmp				= ttmp2
++var s_restore_mem_offset_save			= s_restore_tmp
++var s_restore_m0				= s_restore_alloc_size
++var s_restore_mode				= ttmp7
++var s_restore_flat_scratch			= s_restore_tmp
++var s_restore_pc_lo				= ttmp0
++var s_restore_pc_hi				= ttmp1
++var s_restore_exec_lo				= ttmp4
++var s_restore_exec_hi				= ttmp5
++var s_restore_state_priv			= ttmp14
++var s_restore_excp_flag_priv			= ttmp15
++var s_restore_xnack_mask			= ttmp13
++var s_restore_buf_rsrc0				= ttmp8
++var s_restore_buf_rsrc1				= ttmp9
++var s_restore_buf_rsrc2				= ttmp10
++var s_restore_buf_rsrc3				= ttmp11
++var s_restore_size				= ttmp6
++var s_restore_ttmps_lo				= s_restore_tmp
++var s_restore_ttmps_hi				= s_restore_alloc_size
++var s_restore_spi_init_hi_save			= s_restore_exec_hi
++
++shader main
++	asic(DEFAULT)
++	type(CS)
++	wave_size(32)
++
++	s_branch	L_SKIP_RESTORE						//NOT restore. might be a regular trap or save
++
++L_JUMP_TO_RESTORE:
++	s_branch	L_RESTORE
++
++L_SKIP_RESTORE:
++	s_getreg_b32	s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV)	//save STATUS since we will change SCC
++
++	// Clear SPI_PRIO: do not save with elevated priority.
++	// Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
++	s_andn2_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK
++
++	s_getreg_b32	s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
++
++	s_and_b32       ttmp2, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
++	s_cbranch_scc0	L_NOT_HALTED
++
++L_HALTED:
++	// Host trap may occur while wave is halted.
++	s_and_b32	ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++
++L_CHECK_SAVE:
++	s_and_b32	ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
++	s_cbranch_scc1	L_SAVE
++
++	// Wave is halted but neither host trap nor SAVECTX is raised.
++	// Caused by instruction fetch memory violation.
++	// Spin wait until context saved to prevent interrupt storm.
++	s_sleep		0x10
++	s_getreg_b32	s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
++	s_branch	L_CHECK_SAVE
++
++L_NOT_HALTED:
++	// Let second-level handle non-SAVECTX exception or trap.
++	// Any concurrent SAVECTX will be handled upon re-entry once halted.
++
++	// Check non-maskable exceptions. memory_violation, illegal_instruction
++	// and xnack_error exceptions always cause the wave to enter the trap
++	// handler.
++	s_and_b32	ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++
++	// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
++	// Maskable exceptions only cause the wave to enter the trap handler if
++	// their respective bit in mode.excp_en is set.
++	s_getreg_b32	ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
++	s_and_b32	ttmp3, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
++	s_cbranch_scc0	L_NOT_ADDR_WATCH
++	s_or_b32	ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
++
++L_NOT_ADDR_WATCH:
++	s_getreg_b32	ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
++	s_and_b32	ttmp2, ttmp3, ttmp2
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++
++L_CHECK_TRAP_ID:
++	// Check trap_id != 0
++	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++
++#if SINGLE_STEP_MISSED_WORKAROUND
++	// Prioritize single step exception over context save.
++	// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
++	// WAVE_TRAP_CTRL is already in ttmp3.
++	s_and_b32	ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++#endif
++
++	s_and_b32	ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
++	s_cbranch_scc1	L_SAVE
++
++L_FETCH_2ND_TRAP:
++	// Read second-level TBA/TMA from first-level TMA and jump if available.
++	// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
++	// ttmp12 holds SQ_WAVE_STATUS
++	s_sendmsg_rtn_b64       [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
++	s_wait_idle
++	s_lshl_b64	[ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
++
++	s_bitcmp1_b32	ttmp15, 0xF
++	s_cbranch_scc0	L_NO_SIGN_EXTEND_TMA
++	s_or_b32	ttmp15, ttmp15, 0xFFFF0000
++L_NO_SIGN_EXTEND_TMA:
++
++	s_load_dword    ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS		// debug trap enabled flag
++	s_wait_idle
++	s_lshl_b32      ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
++	s_andn2_b32     ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
++	s_or_b32        ttmp11, ttmp11, ttmp2
++
++	s_load_dwordx2	[ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 scope:SCOPE_SYS	// second-level TBA
++	s_wait_idle
++	s_load_dwordx2	[ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 scope:SCOPE_SYS	// second-level TMA
++	s_wait_idle
++
++	s_and_b64	[ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
++	s_cbranch_scc0	L_NO_NEXT_TRAP						// second-level trap handler not been set
++	s_setpc_b64	[ttmp2, ttmp3]						// jump to second-level trap handler
++
++L_NO_NEXT_TRAP:
++	// If not caused by trap then halt wave to prevent re-entry.
++	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
++	s_cbranch_scc1	L_TRAP_CASE
++
++	// Host trap will not cause trap re-entry.
++	s_getreg_b32	ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
++	s_and_b32	ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
++	s_cbranch_scc1	L_EXIT_TRAP
++	s_or_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
++
++	// If the PC points to S_ENDPGM then context save will fail if STATE_PRIV.HALT is set.
++	// Rewind the PC to prevent this from occurring.
++	s_sub_u32	ttmp0, ttmp0, 0x8
++	s_subb_u32	ttmp1, ttmp1, 0x0
++
++	s_branch	L_EXIT_TRAP
++
++L_TRAP_CASE:
++	// Advance past trap instruction to prevent re-entry.
++	s_add_u32	ttmp0, ttmp0, 0x4
++	s_addc_u32	ttmp1, ttmp1, 0x0
++
++L_EXIT_TRAP:
++	s_and_b32	ttmp1, ttmp1, 0xFFFF
++
++	// Restore SQ_WAVE_STATUS.
++	s_and_b64	exec, exec, exec					// Restore STATUS.EXECZ, not writable by s_setreg_b32
++	s_and_b64	vcc, vcc, vcc						// Restore STATUS.VCCZ, not writable by s_setreg_b32
++
++	// STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
++	// Only restore fields which the trap handler changes.
++	s_lshr_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT
++	s_setreg_b32	hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
++		SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
++
++	s_rfe_b64	[ttmp0, ttmp1]
++
++L_SAVE:
++	// If VGPRs have been deallocated then terminate the wavefront.
++	// It has no remaining program to run and cannot save without VGPRs.
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
++	s_bitcmp1_b32	s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
++	s_cbranch_scc0	L_HAVE_VGPRS
++	s_endpgm
++L_HAVE_VGPRS:
++
++	s_and_b32	s_save_pc_hi, s_save_pc_hi, 0x0000ffff			//pc[47:32]
++	s_mov_b32	s_save_tmp, 0
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT, 1), s_save_tmp	//clear saveCtx bit
++
++	/* inform SPI the readiness and wait for SPI's go signal */
++	s_mov_b32	s_save_exec_lo, exec_lo					//save EXEC and use EXEC for the go signal from SPI
++	s_mov_b32	s_save_exec_hi, exec_hi
++	s_mov_b64	exec, 0x0						//clear EXEC to get ready to receive
++
++	s_sendmsg_rtn_b64       [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
++	s_wait_idle
++
++	// Save first_wave flag so we can clear high bits of save address.
++	s_and_b32	s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
++	s_lshl_b32	s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
++	s_or_b32	s_save_pc_hi, s_save_pc_hi, s_save_tmp
++
++	// Trap temporaries must be saved via VGPR but all VGPRs are in use.
++	// There is no ttmp space to hold the resource constant for VGPR save.
++	// Save v0 by itself since it requires only two SGPRs.
++	s_mov_b32	s_save_ttmps_lo, exec_lo
++	s_and_b32	s_save_ttmps_hi, exec_hi, 0xFFFF
++	s_mov_b32	exec_lo, 0xFFFFFFFF
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++	global_store_dword_addtid	v0, [s_save_ttmps_lo, s_save_ttmps_hi] scope:SCOPE_SYS
++	v_mov_b32	v0, 0x0
++	s_mov_b32	exec_lo, s_save_ttmps_lo
++	s_mov_b32	exec_hi, s_save_ttmps_hi
++
++	// Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
++	// ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
++	get_wave_size2(s_save_ttmps_hi)
++	get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
++	get_svgpr_size_bytes(s_save_ttmps_hi)
++	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
++	s_and_b32	s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
++	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
++	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
++	s_addc_u32	s_save_ttmps_hi, s_save_ttmps_hi, 0x0
++
++	v_writelane_b32	v0, ttmp4, 0x4
++	v_writelane_b32	v0, ttmp5, 0x5
++	v_writelane_b32	v0, ttmp6, 0x6
++	v_writelane_b32	v0, ttmp7, 0x7
++	v_writelane_b32	v0, ttmp8, 0x8
++	v_writelane_b32	v0, ttmp9, 0x9
++	v_writelane_b32	v0, ttmp10, 0xA
++	v_writelane_b32	v0, ttmp11, 0xB
++	v_writelane_b32	v0, ttmp13, 0xD
++	v_writelane_b32	v0, exec_lo, 0xE
++	v_writelane_b32	v0, exec_hi, 0xF
++
++	s_mov_b32	exec_lo, 0x3FFF
++	s_mov_b32	exec_hi, 0x0
++	global_store_dword_addtid	v0, [s_save_ttmps_lo, s_save_ttmps_hi] offset:0x40 scope:SCOPE_SYS
++	v_readlane_b32	ttmp14, v0, 0xE
++	v_readlane_b32	ttmp15, v0, 0xF
++	s_mov_b32	exec_lo, ttmp14
++	s_mov_b32	exec_hi, ttmp15
++
++	/* setup Resource Contants */
++	s_mov_b32	s_save_buf_rsrc0, s_save_spi_init_lo			//base_addr_lo
++	s_and_b32	s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF	//base_addr_hi
++	s_or_b32	s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
++	s_mov_b32	s_save_buf_rsrc2, 0					//NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
++	s_mov_b32	s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
++
++	s_mov_b32	s_save_m0, m0
++
++	/* global mem offset */
++	s_mov_b32	s_save_mem_offset, 0x0
++	get_wave_size2(s_wave_size)
++
++	/* save first 4 VGPRs, needed for SGPR save */
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_SAVE_4VGPR_EXEC_HI
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_SAVE_4VGPR_WAVE32
++L_ENABLE_SAVE_4VGPR_EXEC_HI:
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++	s_branch	L_SAVE_4VGPR_WAVE64
++L_SAVE_4VGPR_WAVE32:
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR Allocated in 4-GPR granularity
++
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
++	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
++	s_branch	L_SAVE_HWREG
++
++L_SAVE_4VGPR_WAVE64:
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR Allocated in 4-GPR granularity
++
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
++	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
++
++	/* save HW registers */
++
++L_SAVE_HWREG:
++	// HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
++	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
++	get_svgpr_size_bytes(s_save_tmp)
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
++
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	v_mov_b32	v0, 0x0							//Offset[31:0] from buffer resource
++	v_mov_b32	v1, 0x0							//Offset[63:32] from buffer resource
++	v_mov_b32	v2, 0x0							//Set of SGPRs for TCP store
++	s_mov_b32	m0, 0x0							//Next lane of v2 to write to
++
++	// Ensure no further changes to barrier or LDS state.
++	// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
++	s_barrier_signal	-2
++	s_barrier_wait	-2
++
++	// Re-read final state of BARRIER_COMPLETE field for save.
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATE_PRIV)
++	s_and_b32	s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
++	s_andn2_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
++	s_or_b32	s_save_state_priv, s_save_state_priv, s_save_tmp
++
++	write_hwreg_to_v2(s_save_m0)
++	write_hwreg_to_v2(s_save_pc_lo)
++	s_andn2_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
++	write_hwreg_to_v2(s_save_tmp)
++	write_hwreg_to_v2(s_save_exec_lo)
++	write_hwreg_to_v2(s_save_exec_hi)
++	write_hwreg_to_v2(s_save_state_priv)
++
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
++	write_hwreg_to_v2(s_save_tmp)
++
++	write_hwreg_to_v2(s_save_xnack_mask)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_MODE)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
++	write_hwreg_to_v2(s_save_tmp)
++
++	s_get_barrier_state s_save_tmp, -1
++	s_wait_kmcnt (0)
++	write_hwreg_to_v2(s_save_tmp)
++
++	// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
++	s_mov_b32       exec_lo, 0xFFFF
++	s_mov_b32	exec_hi, 0x0
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++
++	// Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
++	s_mov_b32       exec_lo, 0xFFFFFFFF
++
++	/* save SGPRs */
++	// Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
++
++	// SGPR SR memory offset : size(VGPR)+size(SVGPR)
++	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
++	get_svgpr_size_bytes(s_save_tmp)
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	s_mov_b32	ttmp13, 0x0						//next VGPR lane to copy SGPR into
++
++	s_mov_b32	m0, 0x0							//SGPR initial index value =0
++	s_nop		0x0							//Manually inserted wait states
++L_SAVE_SGPR_LOOP:
++	// SGPR is allocated in 16 SGPR granularity
++	s_movrels_b64	s0, s0							//s0 = s[0+m0], s1 = s[1+m0]
++	s_movrels_b64	s2, s2							//s2 = s[2+m0], s3 = s[3+m0]
++	s_movrels_b64	s4, s4							//s4 = s[4+m0], s5 = s[5+m0]
++	s_movrels_b64	s6, s6							//s6 = s[6+m0], s7 = s[7+m0]
++	s_movrels_b64	s8, s8							//s8 = s[8+m0], s9 = s[9+m0]
++	s_movrels_b64	s10, s10						//s10 = s[10+m0], s11 = s[11+m0]
++	s_movrels_b64	s12, s12						//s12 = s[12+m0], s13 = s[13+m0]
++	s_movrels_b64	s14, s14						//s14 = s[14+m0], s15 = s[15+m0]
++
++	write_16sgpr_to_v2(s0)
++
++	s_cmp_eq_u32	ttmp13, 0x20						//have 32 VGPR lanes filled?
++	s_cbranch_scc0	L_SAVE_SGPR_SKIP_TCP_STORE
++
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, 0x80
++	s_mov_b32	ttmp13, 0x0
++	v_mov_b32	v2, 0x0
++L_SAVE_SGPR_SKIP_TCP_STORE:
++
++	s_add_u32	m0, m0, 16						//next sgpr index
++	s_cmp_lt_u32	m0, 96							//scc = (m0 < first 96 SGPR) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_SGPR_LOOP					//first 96 SGPR save is complete?
++
++	//save the rest 12 SGPR
++	s_movrels_b64	s0, s0							//s0 = s[0+m0], s1 = s[1+m0]
++	s_movrels_b64	s2, s2							//s2 = s[2+m0], s3 = s[3+m0]
++	s_movrels_b64	s4, s4							//s4 = s[4+m0], s5 = s[5+m0]
++	s_movrels_b64	s6, s6							//s6 = s[6+m0], s7 = s[7+m0]
++	s_movrels_b64	s8, s8							//s8 = s[8+m0], s9 = s[9+m0]
++	s_movrels_b64	s10, s10						//s10 = s[10+m0], s11 = s[11+m0]
++	write_12sgpr_to_v2(s0)
++
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++
++	/* save LDS */
++
++L_SAVE_LDS:
++	// Change EXEC to all threads...
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_SAVE_LDS_EXEC_HI
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_SAVE_LDS_NORMAL
++L_ENABLE_SAVE_LDS_EXEC_HI:
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++L_SAVE_LDS_NORMAL:
++	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
++	s_and_b32	s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF	//lds_size is zero?
++	s_cbranch_scc0	L_SAVE_LDS_DONE						//no lds used? jump to L_SAVE_DONE
++
++	s_and_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
++	s_cbranch_scc0	L_SAVE_LDS_DONE
++
++	// first wave do LDS save;
++
++	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
++	s_mov_b32	s_save_buf_rsrc2, s_save_alloc_size			//NUM_RECORDS in bytes
++
++	// LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
++	//
++	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
++	get_svgpr_size_bytes(s_save_tmp)
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
++
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	//load 0~63*4(byte address) to vgpr v0
++	v_mbcnt_lo_u32_b32	v0, -1, 0
++	v_mbcnt_hi_u32_b32	v0, -1, v0
++	v_mul_u32_u24	v0, 4, v0
++
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_mov_b32	m0, 0x0
++	s_cbranch_scc1	L_SAVE_LDS_W64
++
++L_SAVE_LDS_W32:
++	s_mov_b32	s3, 128
++	s_nop		0
++	s_nop		0
++	s_nop		0
++L_SAVE_LDS_LOOP_W32:
++	ds_read_b32	v1, v0
++	s_wait_idle
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++
++	s_add_u32	m0, m0, s3						//every buffer_store_lds does 128 bytes
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s3
++	v_add_nc_u32	v0, v0, 128						//mem offset increased by 128 bytes
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc=(m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_LDS_LOOP_W32					//LDS save is complete?
++
++	s_branch	L_SAVE_LDS_DONE
++
++L_SAVE_LDS_W64:
++	s_mov_b32	s3, 256
++	s_nop		0
++	s_nop		0
++	s_nop		0
++L_SAVE_LDS_LOOP_W64:
++	ds_read_b32	v1, v0
++	s_wait_idle
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++
++	s_add_u32	m0, m0, s3						//every buffer_store_lds does 256 bytes
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s3
++	v_add_nc_u32	v0, v0, 256						//mem offset increased by 256 bytes
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc=(m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_LDS_LOOP_W64					//LDS save is complete?
++
++L_SAVE_LDS_DONE:
++	/* save VGPRs  - set the Rest VGPRs */
++L_SAVE_VGPR:
++	// VGPR SR memory offset: 0
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_SAVE_VGPR_EXEC_HI
++	s_mov_b32	s_save_mem_offset, (0+128*4)				// for the rest VGPRs
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_SAVE_VGPR_NORMAL
++L_ENABLE_SAVE_VGPR_EXEC_HI:
++	s_mov_b32	s_save_mem_offset, (0+256*4)				// for the rest VGPRs
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++L_SAVE_VGPR_NORMAL:
++	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
++	s_add_u32	s_save_alloc_size, s_save_alloc_size, 1
++	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, 2			//Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
++	//determine it is wave32 or wave64
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_SAVE_VGPR_WAVE64
++
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR Allocated in 4-GPR granularity
++
++	// VGPR store using dw burst
++	s_mov_b32	m0, 0x4							//VGPR initial index value =4
++	s_cmp_lt_u32	m0, s_save_alloc_size
++	s_cbranch_scc0	L_SAVE_VGPR_END
++
++L_SAVE_VGPR_W32_LOOP:
++	v_movrels_b32	v0, v0							//v0 = v[0+m0]
++	v_movrels_b32	v1, v1							//v1 = v[1+m0]
++	v_movrels_b32	v2, v2							//v2 = v[2+m0]
++	v_movrels_b32	v3, v3							//v3 = v[3+m0]
++
++	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
++	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
++
++	s_add_u32	m0, m0, 4						//next vgpr index
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, 128*4		//every buffer_store_dword does 128 bytes
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_VGPR_W32_LOOP					//VGPR save is complete?
++
++	s_branch	L_SAVE_VGPR_END
++
++L_SAVE_VGPR_WAVE64:
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR store using dw burst
++	s_mov_b32	m0, 0x4							//VGPR initial index value =4
++	s_cmp_lt_u32	m0, s_save_alloc_size
++	s_cbranch_scc0	L_SAVE_SHARED_VGPR
++
++L_SAVE_VGPR_W64_LOOP:
++	v_movrels_b32	v0, v0							//v0 = v[0+m0]
++	v_movrels_b32	v1, v1							//v1 = v[1+m0]
++	v_movrels_b32	v2, v2							//v2 = v[2+m0]
++	v_movrels_b32	v3, v3							//v3 = v[3+m0]
++
++	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
++	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
++
++	s_add_u32	m0, m0, 4						//next vgpr index
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, 256*4		//every buffer_store_dword does 256 bytes
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_VGPR_W64_LOOP					//VGPR save is complete?
++
++L_SAVE_SHARED_VGPR:
++	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
++	s_and_b32	s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF	//shared_vgpr_size is zero?
++	s_cbranch_scc0	L_SAVE_VGPR_END						//no shared_vgpr used? jump to L_SAVE_LDS
++	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, 3			//Number of SHARED_VGPRs = shared_vgpr_size * 8    (non-zero value)
++	//m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
++	//save shared_vgpr will start from the index of m0
++	s_add_u32	s_save_alloc_size, s_save_alloc_size, m0
++	s_mov_b32	exec_lo, 0xFFFFFFFF
++	s_mov_b32	exec_hi, 0x00000000
++
++L_SAVE_SHARED_VGPR_WAVE64_LOOP:
++	v_movrels_b32	v0, v0							//v0 = v[0+m0]
++	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++	s_add_u32	m0, m0, 1						//next vgpr index
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, 128
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_SHARED_VGPR_WAVE64_LOOP				//SHARED_VGPR save is complete?
++
++L_SAVE_VGPR_END:
++	s_branch	L_END_PGM
++
++L_RESTORE:
++	/* Setup Resource Contants */
++	s_mov_b32	s_restore_buf_rsrc0, s_restore_spi_init_lo		//base_addr_lo
++	s_and_b32	s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF	//base_addr_hi
++	s_or_b32	s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
++	s_mov_b32	s_restore_buf_rsrc2, 0					//NUM_RECORDS initial value = 0 (in bytes)
++	s_mov_b32	s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
++
++	// Save s_restore_spi_init_hi for later use.
++	s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
++
++	//determine it is wave32 or wave64
++	get_wave_size2(s_restore_size)
++
++	s_and_b32	s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
++	s_cbranch_scc0	L_RESTORE_VGPR
++
++	/* restore LDS */
++L_RESTORE_LDS:
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_RESTORE_LDS_EXEC_HI
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_RESTORE_LDS_NORMAL
++L_ENABLE_RESTORE_LDS_EXEC_HI:
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++L_RESTORE_LDS_NORMAL:
++	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
++	s_and_b32	s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF	//lds_size is zero?
++	s_cbranch_scc0	L_RESTORE_VGPR						//no lds used? jump to L_RESTORE_VGPR
++	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
++	s_mov_b32	s_restore_buf_rsrc2, s_restore_alloc_size		//NUM_RECORDS in bytes
++
++	// LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
++	//
++	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
++	get_svgpr_size_bytes(s_restore_tmp)
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
++
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_mov_b32	m0, 0x0
++	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W64
++
++L_RESTORE_LDS_LOOP_W32:
++	buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
++	s_wait_idle
++	ds_store_addtid_b32     v0
++	s_add_u32	m0, m0, 128						// 128 DW
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128		//mem offset increased by 128DW
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc=(m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W32					//LDS restore is complete?
++	s_branch	L_RESTORE_VGPR
++
++L_RESTORE_LDS_LOOP_W64:
++	buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
++	s_wait_idle
++	ds_store_addtid_b32     v0
++	s_add_u32	m0, m0, 256						// 256 DW
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256		//mem offset increased by 256DW
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc=(m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W64					//LDS restore is complete?
++
++	/* restore VGPRs */
++L_RESTORE_VGPR:
++	// VGPR SR memory offset : 0
++	s_mov_b32	s_restore_mem_offset, 0x0
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_RESTORE_VGPR_EXEC_HI
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_RESTORE_VGPR_NORMAL
++L_ENABLE_RESTORE_VGPR_EXEC_HI:
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++L_RESTORE_VGPR_NORMAL:
++	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
++	s_add_u32	s_restore_alloc_size, s_restore_alloc_size, 1
++	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, 2		//Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
++	//determine it is wave32 or wave64
++	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_RESTORE_VGPR_WAVE64
++
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR load using dw burst
++	s_mov_b32	s_restore_mem_offset_save, s_restore_mem_offset		// restore start with v1, v0 will be the last
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128*4
++	s_mov_b32	m0, 4							//VGPR initial index value = 4
++	s_cmp_lt_u32	m0, s_restore_alloc_size
++	s_cbranch_scc0	L_RESTORE_SGPR
++
++L_RESTORE_VGPR_WAVE32_LOOP:
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
++	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128
++	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*2
++	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*3
++	s_wait_idle
++	v_movreld_b32	v0, v0							//v[0+m0] = v0
++	v_movreld_b32	v1, v1
++	v_movreld_b32	v2, v2
++	v_movreld_b32	v3, v3
++	s_add_u32	m0, m0, 4						//next vgpr index
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128*4	//every buffer_load_dword does 128 bytes
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_VGPR_WAVE32_LOOP				//VGPR restore (except v0) is complete?
++
++	/* VGPR restore on v0 */
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
++	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128
++	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*2
++	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*3
++	s_wait_idle
++
++	s_branch	L_RESTORE_SGPR
++
++L_RESTORE_VGPR_WAVE64:
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR load using dw burst
++	s_mov_b32	s_restore_mem_offset_save, s_restore_mem_offset		// restore start with v4, v0 will be the last
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256*4
++	s_mov_b32	m0, 4							//VGPR initial index value = 4
++	s_cmp_lt_u32	m0, s_restore_alloc_size
++	s_cbranch_scc0	L_RESTORE_SHARED_VGPR
++
++L_RESTORE_VGPR_WAVE64_LOOP:
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
++	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256
++	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*2
++	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*3
++	s_wait_idle
++	v_movreld_b32	v0, v0							//v[0+m0] = v0
++	v_movreld_b32	v1, v1
++	v_movreld_b32	v2, v2
++	v_movreld_b32	v3, v3
++	s_add_u32	m0, m0, 4						//next vgpr index
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256*4	//every buffer_load_dword does 256 bytes
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_VGPR_WAVE64_LOOP				//VGPR restore (except v0) is complete?
++
++L_RESTORE_SHARED_VGPR:
++	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)	//shared_vgpr_size
++	s_and_b32	s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF	//shared_vgpr_size is zero?
++	s_cbranch_scc0	L_RESTORE_V0						//no shared_vgpr used?
++	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, 3		//Number of SHARED_VGPRs = shared_vgpr_size * 8    (non-zero value)
++	//m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
++	//restore shared_vgpr will start from the index of m0
++	s_add_u32	s_restore_alloc_size, s_restore_alloc_size, m0
++	s_mov_b32	exec_lo, 0xFFFFFFFF
++	s_mov_b32	exec_hi, 0x00000000
++L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
++	s_wait_idle
++	v_movreld_b32	v0, v0							//v[0+m0] = v0
++	s_add_u32	m0, m0, 1						//next vgpr index
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_SHARED_VGPR_WAVE64_LOOP			//VGPR restore (except v0) is complete?
++
++	s_mov_b32	exec_hi, 0xFFFFFFFF					//restore back exec_hi before restoring V0!!
++
++	/* VGPR restore on v0 */
++L_RESTORE_V0:
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
++	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256
++	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*2
++	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*3
++	s_wait_idle
++
++	/* restore SGPRs */
++	//will be 2+8+16*6
++	// SGPR SR memory offset : size(VGPR)+size(SVGPR)
++L_RESTORE_SGPR:
++	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
++	get_svgpr_size_bytes(s_restore_tmp)
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
++	s_sub_u32	s_restore_mem_offset, s_restore_mem_offset, 20*4	//s108~s127 is not saved
++
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	s_mov_b32	m0, s_sgpr_save_num
++
++	read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_sub_u32	m0, m0, 4						// Restore from S[0] to S[104]
++	s_nop		0							// hazard SALU M0=> S_MOVREL
++
++	s_movreld_b64	s0, s0							//s[0+m0] = s0
++	s_movreld_b64	s2, s2
++
++	read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_sub_u32	m0, m0, 8						// Restore from S[0] to S[96]
++	s_nop		0							// hazard SALU M0=> S_MOVREL
++
++	s_movreld_b64	s0, s0							//s[0+m0] = s0
++	s_movreld_b64	s2, s2
++	s_movreld_b64	s4, s4
++	s_movreld_b64	s6, s6
++
++ L_RESTORE_SGPR_LOOP:
++	read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_sub_u32	m0, m0, 16						// Restore from S[n] to S[0]
++	s_nop		0							// hazard SALU M0=> S_MOVREL
++
++	s_movreld_b64	s0, s0							//s[0+m0] = s0
++	s_movreld_b64	s2, s2
++	s_movreld_b64	s4, s4
++	s_movreld_b64	s6, s6
++	s_movreld_b64	s8, s8
++	s_movreld_b64	s10, s10
++	s_movreld_b64	s12, s12
++	s_movreld_b64	s14, s14
++
++	s_cmp_eq_u32	m0, 0							//scc = (m0 < s_sgpr_save_num) ? 1 : 0
++	s_cbranch_scc0	L_RESTORE_SGPR_LOOP
++
++	// s_barrier with STATE_PRIV.TRAP_AFTER_INST=1, STATUS.PRIV=1 incorrectly asserts debug exception.
++	// Clear DEBUG_EN before and restore MODE after the barrier.
++	s_setreg_imm32_b32	hwreg(HW_REG_WAVE_MODE), 0
++
++	/* restore HW registers */
++L_RESTORE_HWREG:
++	// HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
++	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
++	get_svgpr_size_bytes(s_restore_tmp)
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
++
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// Restore s_restore_spi_init_hi before the saved value gets clobbered.
++	s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
++
++	read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_state_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_excp_flag_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_setreg_b32	hwreg(HW_REG_WAVE_SCRATCH_BASE_LO), s_restore_flat_scratch
++
++	read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_setreg_b32	hwreg(HW_REG_WAVE_SCRATCH_BASE_HI), s_restore_flat_scratch
++
++	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
++
++	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++	s_setreg_b32	hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
++
++	// Only the first wave needs to restore the workgroup barrier.
++	s_and_b32	s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
++	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
++
++	// Skip over WAVE_STATUS, since there is no state to restore from it
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 4
++
++	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_bitcmp1_b32	s_restore_tmp, BARRIER_STATE_VALID_OFFSET
++	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
++
++	// extract the saved signal count from s_restore_tmp
++	s_lshr_b32	s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
++
++	// We need to call s_barrier_signal repeatedly to restore the signal
++	// count of the work group barrier.  The member count is already
++	// initialized with the number of waves in the work group.
++L_BARRIER_RESTORE_LOOP:
++	s_and_b32	s_restore_tmp, s_restore_tmp, s_restore_tmp
++	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
++	s_barrier_signal	-1
++	s_add_i32	s_restore_tmp, s_restore_tmp, -1
++	s_branch	L_BARRIER_RESTORE_LOOP
++
++L_SKIP_BARRIER_RESTORE:
++
++	s_mov_b32	m0, s_restore_m0
++	s_mov_b32	exec_lo, s_restore_exec_lo
++	s_mov_b32	exec_hi, s_restore_exec_hi
++
++	// EXCP_FLAG_PRIV.SAVE_CONTEXT and HOST_TRAP may have changed.
++	// Only restore the other fields to avoid clobbering them.
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 0, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE), s_restore_excp_flag_priv
++	s_lshr_b32	s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE), s_restore_excp_flag_priv
++	s_lshr_b32	s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE), s_restore_excp_flag_priv
++
++	s_setreg_b32	hwreg(HW_REG_WAVE_MODE), s_restore_mode
++
++	// Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
++	// ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
++	get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
++	get_svgpr_size_bytes(s_restore_ttmps_hi)
++	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
++	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
++	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
++	s_addc_u32	s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
++	s_and_b32	s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
++	s_load_dwordx4	[ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 scope:SCOPE_SYS
++	s_load_dwordx4	[ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 scope:SCOPE_SYS
++	s_load_dword	ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 scope:SCOPE_SYS
++	s_wait_idle
++
++	s_and_b32	s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff		//pc[47:32] //Do it here in order not to affect STATUS
++	s_and_b64	exec, exec, exec					// Restore STATUS.EXECZ, not writable by s_setreg_b32
++	s_and_b64	vcc, vcc, vcc						// Restore STATUS.VCCZ, not writable by s_setreg_b32
++
++	s_setreg_b32	hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv	// SCC is included, which is changed by previous salu
++
++	// Make barrier and LDS state visible to all waves in the group.
++	// STATE_PRIV.BARRIER_COMPLETE may change after this point.
++	s_barrier_signal	-2
++	s_barrier_wait	-2
++
++	s_rfe_b64	s_restore_pc_lo						//Return to the main shader program and resume execution
++
++L_END_PGM:
++	// Make sure that no wave of the workgroup can exit the trap handler
++	// before the workgroup barrier state is saved.
++	s_barrier_signal	-2
++	s_barrier_wait	-2
++	s_endpgm_saved
++end
++
++function write_hwreg_to_v2(s)
++	// Copy into VGPR for later TCP store.
++	v_writelane_b32	v2, s, m0
++	s_add_u32	m0, m0, 0x1
++end
++
++
++function write_16sgpr_to_v2(s)
++	// Copy into VGPR for later TCP store.
++	for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
++		v_writelane_b32	v2, s[sgpr_idx], ttmp13
++		s_add_u32	ttmp13, ttmp13, 0x1
++	end
++end
++
++function write_12sgpr_to_v2(s)
++	// Copy into VGPR for later TCP store.
++	for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
++		v_writelane_b32	v2, s[sgpr_idx], ttmp13
++		s_add_u32	ttmp13, ttmp13, 0x1
++	end
++end
++
++function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
++	s_buffer_load_dword	s, s_rsrc, s_mem_offset scope:SCOPE_SYS
++	s_add_u32	s_mem_offset, s_mem_offset, 4
++end
++
++function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
++	s_sub_u32	s_mem_offset, s_mem_offset, 4*16
++	s_buffer_load_dwordx16	s, s_rsrc, s_mem_offset scope:SCOPE_SYS
++end
++
++function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
++	s_sub_u32	s_mem_offset, s_mem_offset, 4*8
++	s_buffer_load_dwordx8	s, s_rsrc, s_mem_offset scope:SCOPE_SYS
++end
++
++function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
++	s_sub_u32	s_mem_offset, s_mem_offset, 4*4
++	s_buffer_load_dwordx4	s, s_rsrc, s_mem_offset scope:SCOPE_SYS
++end
++
++function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
++	s_getreg_b32	s_vgpr_size_byte, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
++	s_add_u32	s_vgpr_size_byte, s_vgpr_size_byte, 1
++	s_bitcmp1_b32	s_size, S_WAVE_SIZE
++	s_cbranch_scc1	L_ENABLE_SHIFT_W64
++	s_lshl_b32	s_vgpr_size_byte, s_vgpr_size_byte, (2+7)		//Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4   (non-zero value)
++	s_branch	L_SHIFT_DONE
++L_ENABLE_SHIFT_W64:
++	s_lshl_b32	s_vgpr_size_byte, s_vgpr_size_byte, (2+8)		//Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4   (non-zero value)
++L_SHIFT_DONE:
++end
++
++function get_svgpr_size_bytes(s_svgpr_size_byte)
++	s_getreg_b32	s_svgpr_size_byte, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
++	s_lshl_b32	s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
++end
++
++function get_sgpr_size_bytes
++	return 512
++end
++
++function get_hwreg_size_bytes
++	return 128
++end
++
++function get_wave_size2(s_reg)
++	s_getreg_b32	s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
++	s_lshl_b32	s_reg, s_reg, S_WAVE_SIZE
++end
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+index ab1132bc896a32..d9955c5d2e5ed5 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+@@ -174,7 +174,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32)
+ ###############################################################################
+ # DCN35
+ ###############################################################################
+-CLK_MGR_DCN35 = dcn35_smu.o dcn35_clk_mgr.o
++CLK_MGR_DCN35 = dcn35_smu.o dcn351_clk_mgr.o dcn35_clk_mgr.o
+ 
+ AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35))
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+index 0e243f4344d050..4c3e58c730b11c 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+@@ -355,8 +355,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
+ 			BREAK_TO_DEBUGGER();
+ 			return NULL;
+ 		}
++		if (ctx->dce_version == DCN_VERSION_3_51)
++			dcn351_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
++		else
++			dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ 
+-		dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ 		return &clk_mgr->base.base;
+ 	}
+ 	break;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+index e93df3d6222e68..bc123f1884da32 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+@@ -50,12 +50,13 @@
+ #include "link.h"
+ 
+ #include "logger_types.h"
++
++
++#include "yellow_carp_offset.h"
+ #undef DC_LOGGER
+ #define DC_LOGGER \
+ 	clk_mgr->base.base.ctx->logger
+ 
+-#include "yellow_carp_offset.h"
+-
+ #define regCLK1_CLK_PLL_REQ			0x0237
+ #define regCLK1_CLK_PLL_REQ_BASE_IDX		0
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+index 29eff386505ab5..91d872d6d392b1 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+@@ -53,9 +53,6 @@
+ 
+ 
+ #include "logger_types.h"
+-#undef DC_LOGGER
+-#define DC_LOGGER \
+-	clk_mgr->base.base.ctx->logger
+ 
+ 
+ #define MAX_INSTANCE                                        7
+@@ -77,6 +74,9 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
+ 					{ { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } },
+ 					{ { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } };
+ 
++#undef DC_LOGGER
++#define DC_LOGGER \
++	clk_mgr->base.base.ctx->logger
+ #define regCLK1_CLK_PLL_REQ			0x0237
+ #define regCLK1_CLK_PLL_REQ_BASE_IDX		0
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+new file mode 100644
+index 00000000000000..6a6ae618650b6d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+@@ -0,0 +1,140 @@
++/*
++ * Copyright 2024 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "core_types.h"
++#include "dcn35_clk_mgr.h"
++
++#define DCN_BASE__INST0_SEG1 0x000000C0
++#define mmCLK1_CLK_PLL_REQ 0x16E37
++
++#define mmCLK1_CLK0_DFS_CNTL 0x16E69
++#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
++#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
++#define mmCLK1_CLK3_DFS_CNTL 0x16E72
++#define mmCLK1_CLK4_DFS_CNTL 0x16E75
++#define mmCLK1_CLK5_DFS_CNTL 0x16E78
++
++#define mmCLK1_CLK0_CURRENT_CNT 0x16EFC
++#define mmCLK1_CLK1_CURRENT_CNT 0x16EFD
++#define mmCLK1_CLK2_CURRENT_CNT 0x16EFE
++#define mmCLK1_CLK3_CURRENT_CNT 0x16EFF
++#define mmCLK1_CLK4_CURRENT_CNT 0x16F00
++#define mmCLK1_CLK5_CURRENT_CNT 0x16F01
++
++#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
++#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
++#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
++#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
++#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
++#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
++
++#define mmCLK1_CLK0_DS_CNTL 0x16E83
++#define mmCLK1_CLK1_DS_CNTL 0x16E8C
++#define mmCLK1_CLK2_DS_CNTL 0x16E95
++#define mmCLK1_CLK3_DS_CNTL 0x16E9E
++#define mmCLK1_CLK4_DS_CNTL 0x16EA7
++#define mmCLK1_CLK5_DS_CNTL 0x16EB0
++
++#define mmCLK1_CLK0_ALLOW_DS 0x16E84
++#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
++#define mmCLK1_CLK2_ALLOW_DS 0x16E96
++#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
++#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
++#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
++
++#define mmCLK5_spll_field_8 0x1B04B
++#define mmDENTIST_DISPCLK_CNTL 0x0124
++#define regDENTIST_DISPCLK_CNTL 0x0064
++#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
++
++#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
++#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
++#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
++#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
++#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
++#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
++
++#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
++
++// DENTIST_DISPCLK_CNTL
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
++
++#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
++
++#define REG(reg) \
++	(clk_mgr->regs->reg)
++
++#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
++
++#define BASE(seg) BASE_INNER(seg)
++
++#define SR(reg_name)\
++		.reg_name = BASE(reg ## reg_name ## _BASE_IDX) +  \
++					reg ## reg_name
++
++#define CLK_SR_DCN35(reg_name)\
++	.reg_name = mm ## reg_name
++
++static const struct clk_mgr_registers clk_mgr_regs_dcn351 = {
++	CLK_REG_LIST_DCN35()
++};
++
++static const struct clk_mgr_shift clk_mgr_shift_dcn351 = {
++	CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
++};
++
++static const struct clk_mgr_mask clk_mgr_mask_dcn351 = {
++	CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
++};
++
++#define TO_CLK_MGR_DCN35(clk_mgr)\
++	container_of(clk_mgr, struct clk_mgr_dcn35, base)
++
++
++void dcn351_clk_mgr_construct(
++		struct dc_context *ctx,
++		struct clk_mgr_dcn35 *clk_mgr,
++		struct pp_smu_funcs *pp_smu,
++		struct dccg *dccg)
++{
++	/*register offset changed*/
++	clk_mgr->base.regs = &clk_mgr_regs_dcn351;
++	clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn351;
++	clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn351;
++
++	dcn35_clk_mgr_construct(ctx,  clk_mgr, pp_smu, dccg);
++
++}
++
++
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+index 3bd0d46c170109..7d0d8852ce8d27 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+@@ -36,15 +36,11 @@
+ #include "dcn20/dcn20_clk_mgr.h"
+ 
+ 
+-
+-
+ #include "reg_helper.h"
+ #include "core_types.h"
+ #include "dcn35_smu.h"
+ #include "dm_helpers.h"
+ 
+-/* TODO: remove this include once we ported over remaining clk mgr functions*/
+-#include "dcn30/dcn30_clk_mgr.h"
+ #include "dcn31/dcn31_clk_mgr.h"
+ 
+ #include "dc_dmub_srv.h"
+@@ -55,34 +51,102 @@
+ #define DC_LOGGER \
+ 	clk_mgr->base.base.ctx->logger
+ 
+-#define regCLK1_CLK_PLL_REQ			0x0237
+-#define regCLK1_CLK_PLL_REQ_BASE_IDX		0
++#define DCN_BASE__INST0_SEG1 0x000000C0
++#define mmCLK1_CLK_PLL_REQ 0x16E37
++
++#define mmCLK1_CLK0_DFS_CNTL 0x16E69
++#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
++#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
++#define mmCLK1_CLK3_DFS_CNTL 0x16E72
++#define mmCLK1_CLK4_DFS_CNTL 0x16E75
++#define mmCLK1_CLK5_DFS_CNTL 0x16E78
++
++#define mmCLK1_CLK0_CURRENT_CNT 0x16EFB
++#define mmCLK1_CLK1_CURRENT_CNT 0x16EFC
++#define mmCLK1_CLK2_CURRENT_CNT 0x16EFD
++#define mmCLK1_CLK3_CURRENT_CNT 0x16EFE
++#define mmCLK1_CLK4_CURRENT_CNT 0x16EFF
++#define mmCLK1_CLK5_CURRENT_CNT 0x16F00
++
++#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
++#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
++#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
++#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
++#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
++#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
++
++#define mmCLK1_CLK0_DS_CNTL 0x16E83
++#define mmCLK1_CLK1_DS_CNTL 0x16E8C
++#define mmCLK1_CLK2_DS_CNTL 0x16E95
++#define mmCLK1_CLK3_DS_CNTL 0x16E9E
++#define mmCLK1_CLK4_DS_CNTL 0x16EA7
++#define mmCLK1_CLK5_DS_CNTL 0x16EB0
++
++#define mmCLK1_CLK0_ALLOW_DS 0x16E84
++#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
++#define mmCLK1_CLK2_ALLOW_DS 0x16E96
++#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
++#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
++#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
++
++#define mmCLK5_spll_field_8 0x1B24B
++#define mmDENTIST_DISPCLK_CNTL 0x0124
++#define regDENTIST_DISPCLK_CNTL 0x0064
++#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
++
++#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
++#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
++#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
++#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
++#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
++#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
++
++#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
++#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
++// DENTIST_DISPCLK_CNTL
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
++
++#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+ 
+-#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT	0x0
+-#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT	0xc
+-#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT	0x10
+-#define CLK1_CLK_PLL_REQ__FbMult_int_MASK	0x000001FFL
+-#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK	0x0000F000L
+-#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK	0xFFFF0000L
++#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
++#undef FN
++#define FN(reg_name, field_name) \
++	clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
+ 
+-#define regCLK1_CLK2_BYPASS_CNTL			0x029c
+-#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX	0
++#define REG(reg) \
++	(clk_mgr->regs->reg)
+ 
+-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT	0x0
+-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT	0x10
+-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK		0x00000007L
+-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK		0x000F0000L
++#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+ 
+-#define regCLK5_0_CLK5_spll_field_8				0x464b
+-#define regCLK5_0_CLK5_spll_field_8_BASE_IDX	0
++#define BASE(seg) BASE_INNER(seg)
+ 
+-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT	0xd
+-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK		0x00002000L
++#define SR(reg_name)\
++		.reg_name = BASE(reg ## reg_name ## _BASE_IDX) +  \
++					reg ## reg_name
+ 
+-#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
++#define CLK_SR_DCN35(reg_name)\
++	.reg_name = mm ## reg_name
+ 
+-#define REG(reg_name) \
+-	(ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
++static const struct clk_mgr_registers clk_mgr_regs_dcn35 = {
++	CLK_REG_LIST_DCN35()
++};
++
++static const struct clk_mgr_shift clk_mgr_shift_dcn35 = {
++	CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
++};
++
++static const struct clk_mgr_mask clk_mgr_mask_dcn35 = {
++	CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
++};
+ 
+ #define TO_CLK_MGR_DCN35(clk_mgr)\
+ 	container_of(clk_mgr, struct clk_mgr_dcn35, base)
+@@ -443,7 +507,6 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+ 	struct fixed31_32 pll_req;
+ 	unsigned int fbmult_frac_val = 0;
+ 	unsigned int fbmult_int_val = 0;
+-	struct dc_context *ctx = clk_mgr->base.ctx;
+ 
+ 	/*
+ 	 * Register value of fbmult is in 8.16 format, we are converting to 314.32
+@@ -503,12 +566,12 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
+ static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+ {
+ 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+-	struct dc_context *ctx = clk_mgr->base.ctx;
++
+ 	uint32_t ssc_enable;
+ 
+-	REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
++	ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ 
+-	return ssc_enable == 1;
++	return ssc_enable != 0;
+ }
+ 
+ static void init_clk_states(struct clk_mgr *clk_mgr)
+@@ -633,10 +696,10 @@ static struct dcn35_ss_info_table ss_info_table = {
+ 
+ static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+ {
+-	struct dc_context *ctx = clk_mgr->base.ctx;
+-	uint32_t clock_source;
++	uint32_t clock_source = 0;
++
++	clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK;
+ 
+-	REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ 	// If it's DFS mode, clock_source is 0.
+ 	if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ 		clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+@@ -1106,6 +1169,12 @@ void dcn35_clk_mgr_construct(
+ 	clk_mgr->base.dprefclk_ss_divider = 1000;
+ 	clk_mgr->base.ss_on_dprefclk = false;
+ 	clk_mgr->base.dfs_ref_freq_khz = 48000;
++	if (ctx->dce_version == DCN_VERSION_3_5) {
++		clk_mgr->base.regs = &clk_mgr_regs_dcn35;
++		clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35;
++		clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35;
++	}
++
+ 
+ 	clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
+ 				clk_mgr->base.base.ctx,
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
+index 1203dc605b12c4..a12a9bf90806ed 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
+@@ -60,4 +60,8 @@ void dcn35_clk_mgr_construct(struct dc_context *ctx,
+ 
+ void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+ 
++void dcn351_clk_mgr_construct(struct dc_context *ctx,
++		struct clk_mgr_dcn35 *clk_mgr,
++		struct pp_smu_funcs *pp_smu,
++		struct dccg *dccg);
+ #endif //__DCN35_CLK_MGR_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+index c2dd061892f4d9..7a1ca1e98059b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+@@ -166,6 +166,41 @@ enum dentist_divider_range {
+     CLK_SR_DCN32(CLK1_CLK4_CURRENT_CNT), \
+     CLK_SR_DCN32(CLK4_CLK0_CURRENT_CNT)
+ 
++#define CLK_REG_LIST_DCN35()	  \
++	CLK_SR_DCN35(CLK1_CLK_PLL_REQ), \
++	CLK_SR_DCN35(CLK1_CLK0_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK1_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK2_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK3_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK4_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK5_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK0_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK1_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK2_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK3_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK4_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK5_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK0_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK1_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK2_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK3_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK4_BYPASS_CNTL),\
++	CLK_SR_DCN35(CLK1_CLK5_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK0_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK1_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK2_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK3_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK4_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK5_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK0_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK1_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK2_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK3_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
++	CLK_SR_DCN35(CLK5_spll_field_8), \
++	SR(DENTIST_DISPCLK_CNTL), \
++
+ #define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
+ 	CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\
+ 	CLK_SF(CLK1_CLK_PLL_REQ, FbMult_int, mask_sh),\
+@@ -236,6 +271,7 @@ struct clk_mgr_registers {
+ 	uint32_t CLK1_CLK2_DFS_CNTL;
+ 	uint32_t CLK1_CLK3_DFS_CNTL;
+ 	uint32_t CLK1_CLK4_DFS_CNTL;
++	uint32_t CLK1_CLK5_DFS_CNTL;
+ 	uint32_t CLK2_CLK2_DFS_CNTL;
+ 
+ 	uint32_t CLK1_CLK0_CURRENT_CNT;
+@@ -243,11 +279,34 @@ struct clk_mgr_registers {
+     uint32_t CLK1_CLK2_CURRENT_CNT;
+     uint32_t CLK1_CLK3_CURRENT_CNT;
+     uint32_t CLK1_CLK4_CURRENT_CNT;
++	uint32_t CLK1_CLK5_CURRENT_CNT;
+ 
+ 	uint32_t CLK0_CLK0_DFS_CNTL;
+ 	uint32_t CLK0_CLK1_DFS_CNTL;
+ 	uint32_t CLK0_CLK3_DFS_CNTL;
+ 	uint32_t CLK0_CLK4_DFS_CNTL;
++	uint32_t CLK1_CLK0_BYPASS_CNTL;
++	uint32_t CLK1_CLK1_BYPASS_CNTL;
++	uint32_t CLK1_CLK2_BYPASS_CNTL;
++	uint32_t CLK1_CLK3_BYPASS_CNTL;
++	uint32_t CLK1_CLK4_BYPASS_CNTL;
++	uint32_t CLK1_CLK5_BYPASS_CNTL;
++
++	uint32_t CLK1_CLK0_DS_CNTL;
++	uint32_t CLK1_CLK1_DS_CNTL;
++	uint32_t CLK1_CLK2_DS_CNTL;
++	uint32_t CLK1_CLK3_DS_CNTL;
++	uint32_t CLK1_CLK4_DS_CNTL;
++	uint32_t CLK1_CLK5_DS_CNTL;
++
++	uint32_t CLK1_CLK0_ALLOW_DS;
++	uint32_t CLK1_CLK1_ALLOW_DS;
++	uint32_t CLK1_CLK2_ALLOW_DS;
++	uint32_t CLK1_CLK3_ALLOW_DS;
++	uint32_t CLK1_CLK4_ALLOW_DS;
++	uint32_t CLK1_CLK5_ALLOW_DS;
++	uint32_t CLK5_spll_field_8;
++
+ };
+ 
+ struct clk_mgr_shift {
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+index d78c8ec4de79e7..885e749cdc6e96 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -51,9 +51,10 @@
+ #include "dc_dmub_srv.h"
+ #include "gpio_service_interface.h"
+ 
++#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
++
+ #define DC_LOGGER \
+ 	link->ctx->logger
+-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+ 
+ #ifndef MAX
+ #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index b1c294236cc878..2f1d9ce87ceb01 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -3363,7 +3363,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+ 		intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
+ 			     XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
+ 
+-		buf_ctl |= DDI_PORT_WIDTH(lane_count);
++		buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
+ 
+ 		if (DISPLAY_VER(dev_priv) >= 20)
+ 			buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index b4ef4d59da1ace..2c6d0da8a16f8c 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -6369,12 +6369,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
+ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
+ {
+ 	struct drm_i915_private *i915 = to_i915(state->base.dev);
++	const struct intel_plane_state *plane_state;
+ 	struct intel_crtc_state *crtc_state;
++	struct intel_plane *plane;
+ 	struct intel_crtc *crtc;
+ 	u8 affected_pipes = 0;
+ 	u8 modeset_pipes = 0;
+ 	int i;
+ 
++	/*
++	 * Any plane which is in use by the joiner needs its crtc.
++	 * Pull those in first as this will not have happened yet
++	 * if the plane remains disabled according to uapi.
++	 */
++	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
++		crtc = to_intel_crtc(plane_state->hw.crtc);
++		if (!crtc)
++			continue;
++
++		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
++		if (IS_ERR(crtc_state))
++			return PTR_ERR(crtc_state);
++	}
++
++	/* Now pull in all joined crtcs */
+ 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ 		affected_pipes |= crtc_state->joiner_pipes;
+ 		if (intel_crtc_needs_modeset(crtc_state))
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+index 40bedc31d6bf2f..5d8f93d4cdc6a6 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+@@ -1561,7 +1561,7 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
+ 
+ 	if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ 		lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
+-		return false;
++		goto out;
+ 	}
+ 
+ 	if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
+@@ -1573,6 +1573,19 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
+ 	       passed ? "passed" : "failed",
+ 	       crtc_state->port_clock, crtc_state->lane_count);
+ 
++out:
++	/*
++	 * Ensure that the training pattern does get set to TPS2 even in case
++	 * of a failure, as is the case at the end of a passing link training
++	 * and what is expected by the transcoder. Leaving TPS1 set (and
++	 * disabling the link train mode in DP_TP_CTL later from TPS1 directly)
++	 * would result in a stuck transcoder HW state and flip-done timeouts
++	 * later in the modeset sequence.
++	 */
++	if (!passed)
++		intel_dp_program_link_training_pattern(intel_dp, crtc_state,
++						       DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
++
+ 	return passed;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index b0e94c95940f67..8aaadbb702df6d 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -3425,10 +3425,10 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
+ 	 */
+ 	ret = deregister_context(ce, ce->guc_id.id);
+ 	if (ret) {
+-		spin_lock(&ce->guc_state.lock);
++		spin_lock_irqsave(&ce->guc_state.lock, flags);
+ 		set_context_registered(ce);
+ 		clr_context_destroyed(ce);
+-		spin_unlock(&ce->guc_state.lock);
++		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ 		/*
+ 		 * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
+ 		 * the wakeref immediately but per function spec usage call this after unlock.
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 41f4350a7c6c58..b7f521a9b337d3 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3869,7 +3869,7 @@ enum skl_power_gate {
+ #define  DDI_BUF_IS_IDLE			(1 << 7)
+ #define  DDI_BUF_CTL_TC_PHY_OWNERSHIP		REG_BIT(6)
+ #define  DDI_A_4_LANES				(1 << 4)
+-#define  DDI_PORT_WIDTH(width)			(((width) - 1) << 1)
++#define  DDI_PORT_WIDTH(width)			(((width) == 3 ? 4 : ((width) - 1)) << 1)
+ #define  DDI_PORT_WIDTH_MASK			(7 << 1)
+ #define  DDI_PORT_WIDTH_SHIFT			1
+ #define  DDI_INIT_DISPLAY_DETECTED		(1 << 0)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index 421afacb724803..36cc9dbc00b5c1 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -297,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
+ 	{
+ 		.name = "wb_2", .id = WB_2,
+ 		.base = 0x65000, .len = 0x2c8,
+-		.features = WB_SDM845_MASK,
++		.features = WB_SM8250_MASK,
+ 		.format_list = wb2_formats_rgb,
+ 		.num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ 		.clk_ctrl = DPU_CLK_CTRL_WB2,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index 641023b102bf59..e8eacdb47967a2 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -304,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
+ 	{
+ 		.name = "wb_2", .id = WB_2,
+ 		.base = 0x65000, .len = 0x2c8,
+-		.features = WB_SDM845_MASK,
++		.features = WB_SM8250_MASK,
+ 		.format_list = wb2_formats_rgb,
+ 		.num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ 		.clk_ctrl = DPU_CLK_CTRL_WB2,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+index d039b96beb97cf..76f60a2df7a890 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+@@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
+ 	{
+ 		.name = "wb_2", .id = WB_2,
+ 		.base = 0x65000, .len = 0x2c8,
+-		.features = WB_SDM845_MASK,
++		.features = WB_SM8250_MASK,
+ 		.format_list = wb2_formats_rgb,
+ 		.num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ 		.clk_ctrl = DPU_CLK_CTRL_WB2,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index bd3698bf0cf740..2cf8150adf81ff 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2125,6 +2125,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+ 		}
+ 	}
+ 
++	if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
++		phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
++
+ 	/* reset the merge 3D HW block */
+ 	if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
+ 		phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+index 5e9aad1b2aa283..d1e0fb2139765c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+@@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ 	u32 slice_last_group_size;
+ 	u32 det_thresh_flatness;
+ 	bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
++	bool input_10_bits = dsc->bits_per_component == 10;
+ 
+ 	DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
+ 
+@@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ 	data |= (dsc->line_buf_depth << 3);
+ 	data |= (dsc->simple_422 << 2);
+ 	data |= (dsc->convert_rgb << 1);
+-	data |= dsc->bits_per_component;
++	data |= input_10_bits;
+ 
+ 	DPU_REG_WRITE(c, DSC_ENC, data);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+index 0f40eea7f5e247..2040bee8d512f6 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+@@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ 
+ 	if (cap & BIT(DPU_MDP_VSYNC_SEL))
+ 		ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
+-	else
++	else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
+ 		ops->setup_vsync_source = dpu_hw_setup_wd_timer;
+ 
+ 	ops->get_safe_status = dpu_hw_get_safe_status;
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+index 031446c87daec0..798168180c1ab6 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+@@ -83,6 +83,9 @@ struct dsi_pll_7nm {
+ 	/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+ 	spinlock_t postdiv_lock;
+ 
++	/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
++	spinlock_t pclk_mux_lock;
++
+ 	struct pll_7nm_cached_state cached_state;
+ 
+ 	struct dsi_pll_7nm *slave;
+@@ -372,22 +375,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+ 	ndelay(250);
+ }
+ 
+-static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
++static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&pll->postdiv_lock, flags);
++	writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
++	spin_unlock_irqrestore(&pll->postdiv_lock, flags);
++}
++
++static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask,
++					u32 val)
++{
++	unsigned long flags;
+ 	u32 data;
+ 
++	spin_lock_irqsave(&pll->pclk_mux_lock, flags);
+ 	data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+-	writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	data &= ~mask;
++	data |= val & mask;
++
++	writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	spin_unlock_irqrestore(&pll->pclk_mux_lock, flags);
++}
++
++static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
++{
++	dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0);
+ }
+ 
+ static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+ {
+-	u32 data;
++	u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL;
+ 
+ 	writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3);
+-
+-	data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+-	writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1);
+ }
+ 
+ static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+@@ -565,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ {
+ 	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ 	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+-	void __iomem *phy_base = pll_7nm->phy->base;
+ 	u32 val;
+ 	int ret;
+ 
+@@ -574,13 +595,10 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ 	val |= cached->pll_out_div;
+ 	writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ 
+-	writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
+-	       phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+-
+-	val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+-	val &= ~0x3;
+-	val |= cached->pll_mux;
+-	writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	dsi_pll_cmn_clk_cfg0_write(pll_7nm,
++				   DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
++				   DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
++	dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
+ 
+ 	ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
+ 			pll_7nm->vco_current_rate,
+@@ -599,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+ {
+ 	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+-	void __iomem *base = phy->base;
+ 	u32 data = 0x0;	/* internal PLL */
+ 
+ 	DBG("DSI PLL%d", pll_7nm->phy->id);
+@@ -618,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+ 	}
+ 
+ 	/* set PLL src */
+-	writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK,
++				    DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data));
+ 
+ 	return 0;
+ }
+@@ -733,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
+ 					pll_by_2_bit,
+ 				}), 2, 0, pll_7nm->phy->base +
+ 					REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+-				0, 1, 0, NULL);
++				0, 1, 0, &pll_7nm->pclk_mux_lock);
+ 		if (IS_ERR(hw)) {
+ 			ret = PTR_ERR(hw);
+ 			goto fail;
+@@ -778,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
+ 	pll_7nm_list[phy->id] = pll_7nm;
+ 
+ 	spin_lock_init(&pll_7nm->postdiv_lock);
++	spin_lock_init(&pll_7nm->pclk_mux_lock);
+ 
+ 	pll_7nm->phy = phy;
+ 
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index 2e28a13446366c..9526b22038ab82 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -543,15 +543,12 @@ static inline int align_pitch(int width, int bpp)
+ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
+ {
+ 	ktime_t now = ktime_get();
+-	s64 remaining_jiffies;
+ 
+-	if (ktime_compare(*timeout, now) < 0) {
+-		remaining_jiffies = 0;
+-	} else {
+-		ktime_t rem = ktime_sub(*timeout, now);
+-		remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
+-	}
++	if (ktime_compare(*timeout, now) <= 0)
++		return 0;
+ 
++	ktime_t rem = ktime_sub(*timeout, now);
++	s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
+ 	return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+index d54b72f924493b..35f7f40e405b7d 100644
+--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
++++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+@@ -9,8 +9,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+ 	<reg32 offset="0x00004" name="REVISION_ID1"/>
+ 	<reg32 offset="0x00008" name="REVISION_ID2"/>
+ 	<reg32 offset="0x0000c" name="REVISION_ID3"/>
+-	<reg32 offset="0x00010" name="CLK_CFG0"/>
+-	<reg32 offset="0x00014" name="CLK_CFG1"/>
++	<reg32 offset="0x00010" name="CLK_CFG0">
++		<bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/>
++		<bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
++	</reg32>
++	<reg32 offset="0x00014" name="CLK_CFG1">
++		<bitfield name="CLK_EN" pos="5" type="boolean"/>
++		<bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
++		<bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
++	</reg32>
+ 	<reg32 offset="0x00018" name="GLBL_CTRL"/>
+ 	<reg32 offset="0x0001c" name="RBUF_CTRL"/>
+ 	<reg32 offset="0x00020" name="VREG_CTRL_0"/>
+diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
+index b4da82ddbb6b2f..8ea98f06d39afc 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
+@@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ 	unsigned long timeout =
+ 		jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ 	struct mm_struct *mm = svmm->notifier.mm;
++	struct folio *folio;
+ 	struct page *page;
+ 	unsigned long start = args->p.addr;
+ 	unsigned long notifier_seq;
+@@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
++		folio = page_folio(page);
+ 
+ 		mutex_lock(&svmm->mutex);
+ 		if (!mmu_interval_read_retry(&notifier->notifier,
+ 					     notifier_seq))
+ 			break;
+ 		mutex_unlock(&svmm->mutex);
++
++		folio_unlock(folio);
++		folio_put(folio);
+ 	}
+ 
+ 	/* Map the page on the GPU. */
+@@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ 	ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
+ 	mutex_unlock(&svmm->mutex);
+ 
+-	unlock_page(page);
+-	put_page(page);
++	folio_unlock(folio);
++	folio_put(folio);
+ 
+ out:
+ 	mmu_interval_notifier_remove(&notifier->notifier);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+index a6f410ba60bc94..d393bc540f8628 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+@@ -75,7 +75,7 @@ gp10b_pmu_acr = {
+ 	.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
+ };
+ 
+-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
++#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+index 45d09e6fa667fd..7d68a8acfe2ea4 100644
+--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
++++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+@@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel)
+ 	if (jadard->desc->lp11_to_reset_delay_ms)
+ 		msleep(jadard->desc->lp11_to_reset_delay_ms);
+ 
+-	gpiod_set_value(jadard->reset, 1);
++	gpiod_set_value(jadard->reset, 0);
+ 	msleep(5);
+ 
+-	gpiod_set_value(jadard->reset, 0);
++	gpiod_set_value(jadard->reset, 1);
+ 	msleep(10);
+ 
+-	gpiod_set_value(jadard->reset, 1);
++	gpiod_set_value(jadard->reset, 0);
+ 	msleep(130);
+ 
+ 	ret = jadard->desc->init(jadard);
+@@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
+ 	dsi->format = desc->format;
+ 	dsi->lanes = desc->lanes;
+ 
+-	jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
++	jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(jadard->reset)) {
+ 		DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
+ 		return PTR_ERR(jadard->reset);
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 6fc00d63b2857f..e6744422dee492 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -36,11 +36,17 @@
+ #include "xe_pm.h"
+ #include "xe_sched_job.h"
+ #include "xe_sriov.h"
++#include "xe_sync.h"
+ 
+ #define DEFAULT_POLL_FREQUENCY_HZ 200
+ #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
+ #define XE_OA_UNIT_INVALID U32_MAX
+ 
++enum xe_oa_submit_deps {
++	XE_OA_SUBMIT_NO_DEPS,
++	XE_OA_SUBMIT_ADD_DEPS,
++};
++
+ struct xe_oa_reg {
+ 	struct xe_reg addr;
+ 	u32 value;
+@@ -63,13 +69,8 @@ struct xe_oa_config {
+ 	struct rcu_head rcu;
+ };
+ 
+-struct flex {
+-	struct xe_reg reg;
+-	u32 offset;
+-	u32 value;
+-};
+-
+ struct xe_oa_open_param {
++	struct xe_file *xef;
+ 	u32 oa_unit_id;
+ 	bool sample;
+ 	u32 metric_set;
+@@ -81,6 +82,9 @@ struct xe_oa_open_param {
+ 	struct xe_exec_queue *exec_q;
+ 	struct xe_hw_engine *hwe;
+ 	bool no_preempt;
++	struct drm_xe_sync __user *syncs_user;
++	int num_syncs;
++	struct xe_sync_entry *syncs;
+ };
+ 
+ struct xe_oa_config_bo {
+@@ -567,32 +571,60 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
+ 	return ret;
+ }
+ 
+-static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
++static void xe_oa_lock_vma(struct xe_exec_queue *q)
++{
++	if (q->vm) {
++		down_read(&q->vm->lock);
++		xe_vm_lock(q->vm, false);
++	}
++}
++
++static void xe_oa_unlock_vma(struct xe_exec_queue *q)
+ {
++	if (q->vm) {
++		xe_vm_unlock(q->vm);
++		up_read(&q->vm->lock);
++	}
++}
++
++static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
++					 struct xe_bb *bb)
++{
++	struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
+ 	struct xe_sched_job *job;
+ 	struct dma_fence *fence;
+-	long timeout;
+ 	int err = 0;
+ 
+-	/* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
+-	job = xe_bb_create_job(stream->k_exec_q, bb);
++	xe_oa_lock_vma(q);
++
++	job = xe_bb_create_job(q, bb);
+ 	if (IS_ERR(job)) {
+ 		err = PTR_ERR(job);
+ 		goto exit;
+ 	}
++	job->ggtt = true;
++
++	if (deps == XE_OA_SUBMIT_ADD_DEPS) {
++		for (int i = 0; i < stream->num_syncs && !err; i++)
++			err = xe_sync_entry_add_deps(&stream->syncs[i], job);
++		if (err) {
++			drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err);
++			goto err_put_job;
++		}
++	}
+ 
+ 	xe_sched_job_arm(job);
+ 	fence = dma_fence_get(&job->drm.s_fence->finished);
+ 	xe_sched_job_push(job);
+ 
+-	timeout = dma_fence_wait_timeout(fence, false, HZ);
+-	dma_fence_put(fence);
+-	if (timeout < 0)
+-		err = timeout;
+-	else if (!timeout)
+-		err = -ETIME;
++	xe_oa_unlock_vma(q);
++
++	return fence;
++err_put_job:
++	xe_sched_job_put(job);
+ exit:
+-	return err;
++	xe_oa_unlock_vma(q);
++	return ERR_PTR(err);
+ }
+ 
+ static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs)
+@@ -639,54 +671,30 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
+ 		free_oa_config_bo(oa_bo);
+ }
+ 
+-static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
+-			     struct xe_bb *bb, const struct flex *flex, u32 count)
+-{
+-	u32 offset = xe_bo_ggtt_addr(lrc->bo);
+-
+-	do {
+-		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+-		bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
+-		bb->cs[bb->len++] = 0;
+-		bb->cs[bb->len++] = flex->value;
+-
+-	} while (flex++, --count);
+-}
+-
+-static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
+-				  const struct flex *flex, u32 count)
++static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
+ {
++	struct dma_fence *fence;
+ 	struct xe_bb *bb;
+ 	int err;
+ 
+-	bb = xe_bb_new(stream->gt, 4 * count, false);
++	bb = xe_bb_new(stream->gt, 2 * count + 1, false);
+ 	if (IS_ERR(bb)) {
+ 		err = PTR_ERR(bb);
+ 		goto exit;
+ 	}
+ 
+-	xe_oa_store_flex(stream, lrc, bb, flex, count);
+-
+-	err = xe_oa_submit_bb(stream, bb);
+-	xe_bb_free(bb, NULL);
+-exit:
+-	return err;
+-}
+-
+-static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
+-{
+-	struct xe_bb *bb;
+-	int err;
++	write_cs_mi_lri(bb, reg_lri, count);
+ 
+-	bb = xe_bb_new(stream->gt, 3, false);
+-	if (IS_ERR(bb)) {
+-		err = PTR_ERR(bb);
+-		goto exit;
++	fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
++	if (IS_ERR(fence)) {
++		err = PTR_ERR(fence);
++		goto free_bb;
+ 	}
++	xe_bb_free(bb, fence);
++	dma_fence_put(fence);
+ 
+-	write_cs_mi_lri(bb, reg_lri, 1);
+-
+-	err = xe_oa_submit_bb(stream, bb);
++	return 0;
++free_bb:
+ 	xe_bb_free(bb, NULL);
+ exit:
+ 	return err;
+@@ -695,70 +703,54 @@ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *re
+ static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
+ {
+ 	const struct xe_oa_format *format = stream->oa_buffer.format;
+-	struct xe_lrc *lrc = stream->exec_q->lrc[0];
+-	u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
+ 	u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
+ 		(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
+ 
+-	struct flex regs_context[] = {
++	struct xe_oa_reg reg_lri[] = {
+ 		{
+ 			OACTXCONTROL(stream->hwe->mmio_base),
+-			stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
+ 			enable ? OA_COUNTER_RESUME : 0,
+ 		},
++		{
++			OAR_OACONTROL,
++			oacontrol,
++		},
+ 		{
+ 			RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
+-			regs_offset + CTX_CONTEXT_CONTROL,
+-			_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
++			_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
++				      enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
+ 		},
+ 	};
+-	struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
+-	int err;
+ 
+-	/* Modify stream hwe context image with regs_context */
+-	err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
+-				     regs_context, ARRAY_SIZE(regs_context));
+-	if (err)
+-		return err;
+-
+-	/* Apply reg_lri using LRI */
+-	return xe_oa_load_with_lri(stream, &reg_lri);
++	return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
+ }
+ 
+ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
+ {
+ 	const struct xe_oa_format *format = stream->oa_buffer.format;
+-	struct xe_lrc *lrc = stream->exec_q->lrc[0];
+-	u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
+ 	u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
+ 		(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
+-	struct flex regs_context[] = {
++	struct xe_oa_reg reg_lri[] = {
+ 		{
+ 			OACTXCONTROL(stream->hwe->mmio_base),
+-			stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
+ 			enable ? OA_COUNTER_RESUME : 0,
+ 		},
++		{
++			OAC_OACONTROL,
++			oacontrol
++		},
+ 		{
+ 			RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
+-			regs_offset + CTX_CONTEXT_CONTROL,
+-			_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
++			_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
++				      enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
+ 			_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
+ 		},
+ 	};
+-	struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
+-	int err;
+ 
+ 	/* Set ccs select to enable programming of OAC_OACONTROL */
+ 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream));
+ 
+-	/* Modify stream hwe context image with regs_context */
+-	err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
+-				     regs_context, ARRAY_SIZE(regs_context));
+-	if (err)
+-		return err;
+-
+-	/* Apply reg_lri using LRI */
+-	return xe_oa_load_with_lri(stream, &reg_lri);
++	return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
+ }
+ 
+ static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
+@@ -914,15 +906,32 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config
+ {
+ #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
+ 	struct xe_oa_config_bo *oa_bo;
+-	int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
++	int err = 0, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
++	struct dma_fence *fence;
++	long timeout;
+ 
++	/* Emit OA configuration batch */
+ 	oa_bo = xe_oa_alloc_config_buffer(stream, config);
+ 	if (IS_ERR(oa_bo)) {
+ 		err = PTR_ERR(oa_bo);
+ 		goto exit;
+ 	}
+ 
+-	err = xe_oa_submit_bb(stream, oa_bo->bb);
++	fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb);
++	if (IS_ERR(fence)) {
++		err = PTR_ERR(fence);
++		goto exit;
++	}
++
++	/* Wait till all previous batches have executed */
++	timeout = dma_fence_wait_timeout(fence, false, 5 * HZ);
++	dma_fence_put(fence);
++	if (timeout < 0)
++		err = timeout;
++	else if (!timeout)
++		err = -ETIME;
++	if (err)
++		drm_dbg(&stream->oa->xe->drm, "dma_fence_wait_timeout err %d\n", err);
+ 
+ 	/* Additional empirical delay needed for NOA programming after registers are written */
+ 	usleep_range(us, 2 * us);
+@@ -1362,6 +1371,9 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ 	stream->period_exponent = param->period_exponent;
+ 	stream->no_preempt = param->no_preempt;
+ 
++	stream->num_syncs = param->num_syncs;
++	stream->syncs = param->syncs;
++
+ 	/*
+ 	 * For Xe2+, when overrun mode is enabled, there are no partial reports at the end
+ 	 * of buffer, making the OA buffer effectively a non-power-of-2 size circular
+@@ -1712,6 +1724,20 @@ static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
+ 	return 0;
+ }
+ 
++static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
++				    struct xe_oa_open_param *param)
++{
++	param->num_syncs = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
++				     struct xe_oa_open_param *param)
++{
++	param->syncs_user = u64_to_user_ptr(value);
++	return 0;
++}
++
+ typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
+ 				     struct xe_oa_open_param *param);
+ static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
+@@ -1724,6 +1750,8 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
+ 	[DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
+ 	[DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
+ 	[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
++	[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
++	[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ };
+ 
+ static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
+@@ -1783,6 +1811,49 @@ static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number
+ 	return 0;
+ }
+ 
++static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
++{
++	int ret, num_syncs, num_ufence = 0;
++
++	if (param->num_syncs && !param->syncs_user) {
++		drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n");
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	if (param->num_syncs) {
++		param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL);
++		if (!param->syncs) {
++			ret = -ENOMEM;
++			goto exit;
++		}
++	}
++
++	for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
++		ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs],
++					  &param->syncs_user[num_syncs], 0);
++		if (ret)
++			goto err_syncs;
++
++		if (xe_sync_is_ufence(&param->syncs[num_syncs]))
++			num_ufence++;
++	}
++
++	if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) {
++		ret = -EINVAL;
++		goto err_syncs;
++	}
++
++	return 0;
++
++err_syncs:
++	while (num_syncs--)
++		xe_sync_entry_cleanup(&param->syncs[num_syncs]);
++	kfree(param->syncs);
++exit:
++	return ret;
++}
++
+ /**
+  * xe_oa_stream_open_ioctl - Opens an OA stream
+  * @dev: @drm_device
+@@ -1808,6 +1879,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ 		return -ENODEV;
+ 	}
+ 
++	param.xef = xef;
+ 	ret = xe_oa_user_extensions(oa, data, 0, &param);
+ 	if (ret)
+ 		return ret;
+@@ -1817,8 +1889,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ 		if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
+ 			return -ENOENT;
+ 
+-		if (param.exec_q->width > 1)
+-			drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
++		if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
++			return -EOPNOTSUPP;
+ 	}
+ 
+ 	/*
+@@ -1876,11 +1948,24 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ 		drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz);
+ 	}
+ 
++	ret = xe_oa_parse_syncs(oa, &param);
++	if (ret)
++		goto err_exec_q;
++
+ 	mutex_lock(&param.hwe->gt->oa.gt_lock);
+ 	ret = xe_oa_stream_open_ioctl_locked(oa, &param);
+ 	mutex_unlock(&param.hwe->gt->oa.gt_lock);
++	if (ret < 0)
++		goto err_sync_cleanup;
++
++	return ret;
++
++err_sync_cleanup:
++	while (param.num_syncs--)
++		xe_sync_entry_cleanup(&param.syncs[param.num_syncs]);
++	kfree(param.syncs);
+ err_exec_q:
+-	if (ret < 0 && param.exec_q)
++	if (param.exec_q)
+ 		xe_exec_queue_put(param.exec_q);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
+index 8862eca73fbe32..99f4b2d4bdcf6a 100644
+--- a/drivers/gpu/drm/xe/xe_oa_types.h
++++ b/drivers/gpu/drm/xe/xe_oa_types.h
+@@ -238,5 +238,11 @@ struct xe_oa_stream {
+ 
+ 	/** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */
+ 	u32 no_preempt;
++
++	/** @num_syncs: size of @syncs array */
++	u32 num_syncs;
++
++	/** @syncs: syncs to wait on and to signal */
++	struct xe_sync_entry *syncs;
+ };
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
+index 1c96375bd7df75..6fec5d1a1eb44b 100644
+--- a/drivers/gpu/drm/xe/xe_query.c
++++ b/drivers/gpu/drm/xe/xe_query.c
+@@ -679,7 +679,7 @@ static int query_oa_units(struct xe_device *xe,
+ 			du->oa_unit_id = u->oa_unit_id;
+ 			du->oa_unit_type = u->type;
+ 			du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
+-			du->capabilities = DRM_XE_OA_CAPS_BASE;
++			du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS;
+ 
+ 			j = 0;
+ 			for_each_hw_engine(hwe, gt, hwe_id) {
+diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
+index 0be4f489d3e126..9f327f27c0726e 100644
+--- a/drivers/gpu/drm/xe/xe_ring_ops.c
++++ b/drivers/gpu/drm/xe/xe_ring_ops.c
+@@ -221,7 +221,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
+ 
+ static u32 get_ppgtt_flag(struct xe_sched_job *job)
+ {
+-	return job->q->vm ? BIT(8) : 0;
++	if (job->q->vm && !job->ggtt)
++		return BIT(8);
++
++	return 0;
+ }
+ 
+ static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
+diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
+index 0d3f76fb05cea2..c207361bf43e1c 100644
+--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
++++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
+@@ -57,6 +57,8 @@ struct xe_sched_job {
+ 	u32 migrate_flush_flags;
+ 	/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
+ 	bool ring_ops_flush_tlb;
++	/** @ggtt: mapped in ggtt. */
++	bool ggtt;
+ 	/** @ptrs: per instance pointers. */
+ 	struct xe_job_ptrs ptrs[];
+ };
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 380aa1614442f4..3d1459b551bb2e 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -667,23 +667,50 @@ static void synaptics_pt_stop(struct serio *serio)
+ 	serio_continue_rx(parent->ps2dev.serio);
+ }
+ 
++static int synaptics_pt_open(struct serio *serio)
++{
++	struct psmouse *parent = psmouse_from_serio(serio->parent);
++	struct synaptics_data *priv = parent->private;
++
++	guard(serio_pause_rx)(parent->ps2dev.serio);
++	priv->pt_port_open = true;
++
++	return 0;
++}
++
++static void synaptics_pt_close(struct serio *serio)
++{
++	struct psmouse *parent = psmouse_from_serio(serio->parent);
++	struct synaptics_data *priv = parent->private;
++
++	guard(serio_pause_rx)(parent->ps2dev.serio);
++	priv->pt_port_open = false;
++}
++
+ static int synaptics_is_pt_packet(u8 *buf)
+ {
+ 	return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
+ }
+ 
+-static void synaptics_pass_pt_packet(struct serio *ptport, u8 *packet)
++static void synaptics_pass_pt_packet(struct synaptics_data *priv, u8 *packet)
+ {
+-	struct psmouse *child = psmouse_from_serio(ptport);
++	struct serio *ptport;
+ 
+-	if (child && child->state == PSMOUSE_ACTIVATED) {
+-		serio_interrupt(ptport, packet[1], 0);
+-		serio_interrupt(ptport, packet[4], 0);
+-		serio_interrupt(ptport, packet[5], 0);
+-		if (child->pktsize == 4)
+-			serio_interrupt(ptport, packet[2], 0);
+-	} else {
+-		serio_interrupt(ptport, packet[1], 0);
++	ptport = priv->pt_port;
++	if (!ptport)
++		return;
++
++	serio_interrupt(ptport, packet[1], 0);
++
++	if (priv->pt_port_open) {
++		struct psmouse *child = psmouse_from_serio(ptport);
++
++		if (child->state == PSMOUSE_ACTIVATED) {
++			serio_interrupt(ptport, packet[4], 0);
++			serio_interrupt(ptport, packet[5], 0);
++			if (child->pktsize == 4)
++				serio_interrupt(ptport, packet[2], 0);
++		}
+ 	}
+ }
+ 
+@@ -722,6 +749,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
+ 	serio->write = synaptics_pt_write;
+ 	serio->start = synaptics_pt_start;
+ 	serio->stop = synaptics_pt_stop;
++	serio->open = synaptics_pt_open;
++	serio->close = synaptics_pt_close;
+ 	serio->parent = psmouse->ps2dev.serio;
+ 
+ 	psmouse->pt_activate = synaptics_pt_activate;
+@@ -1218,11 +1247,10 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
+ 
+ 		if (SYN_CAP_PASS_THROUGH(priv->info.capabilities) &&
+ 		    synaptics_is_pt_packet(psmouse->packet)) {
+-			if (priv->pt_port)
+-				synaptics_pass_pt_packet(priv->pt_port,
+-							 psmouse->packet);
+-		} else
++			synaptics_pass_pt_packet(priv, psmouse->packet);
++		} else {
+ 			synaptics_process_packet(psmouse);
++		}
+ 
+ 		return PSMOUSE_FULL_PACKET;
+ 	}
+diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
+index 08533d1b1b16fc..4b34f13b9f7616 100644
+--- a/drivers/input/mouse/synaptics.h
++++ b/drivers/input/mouse/synaptics.h
+@@ -188,6 +188,7 @@ struct synaptics_data {
+ 	bool disable_gesture;			/* disable gestures */
+ 
+ 	struct serio *pt_port;			/* Pass-through serio port */
++	bool pt_port_open;
+ 
+ 	/*
+ 	 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 8fdee511bc0f2c..cf469a67249723 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -44,6 +44,7 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
+ #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996	(1ULL << 0)
+ #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539	(1ULL << 1)
+ #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001	(1ULL << 2)
++#define FLAGS_WORKAROUND_INSECURE		(1ULL << 3)
+ 
+ #define GIC_IRQ_TYPE_PARTITION	(GIC_IRQ_TYPE_LPI + 1)
+ 
+@@ -83,6 +84,8 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+ #define GIC_LINE_NR	min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+ #define GIC_ESPI_NR	GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+ 
++static bool nmi_support_forbidden;
++
+ /*
+  * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
+  * are potentially stolen by the secure side. Some code, especially code dealing
+@@ -163,21 +166,27 @@ static void __init gic_prio_init(void)
+ {
+ 	bool ds;
+ 
+-	ds = gic_dist_security_disabled();
+-	if (!ds) {
+-		u32 val;
+-
+-		val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
+-		val |= GICD_CTLR_DS;
+-		writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
++	cpus_have_group0 = gic_has_group0();
+ 
+-		ds = gic_dist_security_disabled();
+-		if (ds)
+-			pr_warn("Broken GIC integration, security disabled");
++	ds = gic_dist_security_disabled();
++	if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) {
++		if (cpus_have_group0) {
++			u32 val;
++
++			val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
++			val |= GICD_CTLR_DS;
++			writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
++
++			ds = gic_dist_security_disabled();
++			if (ds)
++				pr_warn("Broken GIC integration, security disabled\n");
++		} else {
++			pr_warn("Broken GIC integration, pNMI forbidden\n");
++			nmi_support_forbidden = true;
++		}
+ 	}
+ 
+ 	cpus_have_security_disabled = ds;
+-	cpus_have_group0 = gic_has_group0();
+ 
+ 	/*
+ 	 * How priority values are used by the GIC depends on two things:
+@@ -209,7 +218,7 @@ static void __init gic_prio_init(void)
+ 	 * be in the non-secure range, we program the non-secure values into
+ 	 * the distributor to match the PMR values we want.
+ 	 */
+-	if (cpus_have_group0 & !cpus_have_security_disabled) {
++	if (cpus_have_group0 && !cpus_have_security_disabled) {
+ 		dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
+ 		dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
+ 	}
+@@ -1922,6 +1931,18 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
+ 	return true;
+ }
+ 
++static bool gic_enable_quirk_rk3399(void *data)
++{
++	struct gic_chip_data *d = data;
++
++	if (of_machine_is_compatible("rockchip,rk3399")) {
++		d->flags |= FLAGS_WORKAROUND_INSECURE;
++		return true;
++	}
++
++	return false;
++}
++
+ static bool rd_set_non_coherent(void *data)
+ {
+ 	struct gic_chip_data *d = data;
+@@ -1996,6 +2017,12 @@ static const struct gic_quirk gic_quirks[] = {
+ 		.property = "dma-noncoherent",
+ 		.init   = rd_set_non_coherent,
+ 	},
++	{
++		.desc	= "GICv3: Insecure RK3399 integration",
++		.iidr	= 0x0000043b,
++		.mask	= 0xff000fff,
++		.init	= gic_enable_quirk_rk3399,
++	},
+ 	{
+ 	}
+ };
+@@ -2004,7 +2031,7 @@ static void gic_enable_nmi_support(void)
+ {
+ 	int i;
+ 
+-	if (!gic_prio_masking_enabled())
++	if (!gic_prio_masking_enabled() || nmi_support_forbidden)
+ 		return;
+ 
+ 	rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
+diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
+index b9dcc8e78c7501..1f613eb7b7f034 100644
+--- a/drivers/irqchip/irq-jcore-aic.c
++++ b/drivers/irqchip/irq-jcore-aic.c
+@@ -38,7 +38,7 @@ static struct irq_chip jcore_aic;
+ static void handle_jcore_irq(struct irq_desc *desc)
+ {
+ 	if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+-		handle_percpu_irq(desc);
++		handle_percpu_devid_irq(desc);
+ 	else
+ 		handle_simple_irq(desc);
+ }
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 32d58752477847..31bea72bcb01ad 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -385,10 +385,8 @@ static int raid0_set_limits(struct mddev *mddev)
+ 	lim.io_min = mddev->chunk_sectors << 9;
+ 	lim.io_opt = lim.io_min * mddev->raid_disks;
+ 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+-	if (err) {
+-		queue_limits_cancel_update(mddev->gendisk->queue);
++	if (err)
+ 		return err;
+-	}
+ 	return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index d83fe3b3abc009..8a994a1975ca7b 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -3171,10 +3171,8 @@ static int raid1_set_limits(struct mddev *mddev)
+ 	md_init_stacking_limits(&lim);
+ 	lim.max_write_zeroes_sectors = 0;
+ 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+-	if (err) {
+-		queue_limits_cancel_update(mddev->gendisk->queue);
++	if (err)
+ 		return err;
+-	}
+ 	return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index daf42acc4fb6f3..a214fed4f16226 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3963,10 +3963,8 @@ static int raid10_set_queue_limits(struct mddev *mddev)
+ 	lim.io_min = mddev->chunk_sectors << 9;
+ 	lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+ 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+-	if (err) {
+-		queue_limits_cancel_update(mddev->gendisk->queue);
++	if (err)
+ 		return err;
+-	}
+ 	return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+ 
+diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
+index 3bc89b3569632d..fca54e21a164f3 100644
+--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
++++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
+@@ -471,6 +471,8 @@ struct cdns_nand_ctrl {
+ 	struct {
+ 		void __iomem *virt;
+ 		dma_addr_t dma;
++		dma_addr_t iova_dma;
++		u32 size;
+ 	} io;
+ 
+ 	int irq;
+@@ -1835,11 +1837,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ 	}
+ 
+ 	if (dir == DMA_FROM_DEVICE) {
+-		src_dma = cdns_ctrl->io.dma;
++		src_dma = cdns_ctrl->io.iova_dma;
+ 		dst_dma = buf_dma;
+ 	} else {
+ 		src_dma = buf_dma;
+-		dst_dma = cdns_ctrl->io.dma;
++		dst_dma = cdns_ctrl->io.iova_dma;
+ 	}
+ 
+ 	tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+@@ -1861,12 +1863,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ 	dma_async_issue_pending(cdns_ctrl->dmac);
+ 	wait_for_completion(&finished);
+ 
+-	dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
++	dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
+ 
+ 	return 0;
+ 
+ err_unmap:
+-	dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
++	dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
+ 
+ err:
+ 	dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
+@@ -2869,6 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
+ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ {
+ 	dma_cap_mask_t mask;
++	struct dma_device *dma_dev = cdns_ctrl->dmac->device;
+ 	int ret;
+ 
+ 	cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+@@ -2904,15 +2907,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ 	dma_cap_set(DMA_MEMCPY, mask);
+ 
+ 	if (cdns_ctrl->caps1->has_dma) {
+-		cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+-		if (!cdns_ctrl->dmac) {
+-			dev_err(cdns_ctrl->dev,
+-				"Unable to get a DMA channel\n");
+-			ret = -EBUSY;
++		cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
++		if (IS_ERR(cdns_ctrl->dmac)) {
++			ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
++					    "%d: Failed to get a DMA channel\n", ret);
+ 			goto disable_irq;
+ 		}
+ 	}
+ 
++	cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
++						  cdns_ctrl->io.size,
++						  DMA_BIDIRECTIONAL, 0);
++
++	ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
++	if (ret) {
++		dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
++		goto dma_release_chnl;
++	}
++
+ 	nand_controller_init(&cdns_ctrl->controller);
+ 	INIT_LIST_HEAD(&cdns_ctrl->chips);
+ 
+@@ -2923,18 +2935,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ 	if (ret) {
+ 		dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+ 			ret);
+-		goto dma_release_chnl;
++		goto unmap_dma_resource;
+ 	}
+ 
+ 	kfree(cdns_ctrl->buf);
+ 	cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ 	if (!cdns_ctrl->buf) {
+ 		ret = -ENOMEM;
+-		goto dma_release_chnl;
++		goto unmap_dma_resource;
+ 	}
+ 
+ 	return 0;
+ 
++unmap_dma_resource:
++	dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
++			   cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
++
+ dma_release_chnl:
+ 	if (cdns_ctrl->dmac)
+ 		dma_release_channel(cdns_ctrl->dmac);
+@@ -2956,6 +2972,8 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+ {
+ 	cadence_nand_chips_cleanup(cdns_ctrl);
++	dma_unmap_resource(cdns_ctrl->dmac->device->dev, cdns_ctrl->io.iova_dma,
++			   cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+ 	cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+ 	kfree(cdns_ctrl->buf);
+ 	dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+@@ -3020,7 +3038,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
+ 	cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
+ 	if (IS_ERR(cdns_ctrl->io.virt))
+ 		return PTR_ERR(cdns_ctrl->io.virt);
++
+ 	cdns_ctrl->io.dma = res->start;
++	cdns_ctrl->io.size = resource_size(res);
+ 
+ 	dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+ 	if (IS_ERR(dt->clk))
+diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
+index b5ad7118c49a2b..175211fe6a5ed2 100644
+--- a/drivers/mtd/spi-nor/sst.c
++++ b/drivers/mtd/spi-nor/sst.c
+@@ -174,7 +174,7 @@ static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ 	int ret;
+ 
+ 	nor->program_opcode = op;
+-	ret = spi_nor_write_data(nor, to, 1, buf);
++	ret = spi_nor_write_data(nor, to, len, buf);
+ 	if (ret < 0)
+ 		return ret;
+ 	WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
+diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
+index 95471cfcff420a..8ddd366d9fde54 100644
+--- a/drivers/net/ethernet/google/gve/gve.h
++++ b/drivers/net/ethernet/google/gve/gve.h
+@@ -1110,6 +1110,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
+ 	return gve_xdp_tx_queue_id(priv, 0);
+ }
+ 
++static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
++{
++	switch (priv->queue_format) {
++	case GVE_GQI_QPL_FORMAT:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ /* gqi napi handler defined in gve_main.c */
+ int gve_napi_poll(struct napi_struct *napi, int budget);
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index f985a3cf2b11fa..862c4575701fec 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -1895,6 +1895,8 @@ static void gve_turndown(struct gve_priv *priv)
+ 	/* Stop tx queues */
+ 	netif_tx_disable(priv->dev);
+ 
++	xdp_features_clear_redirect_target(priv->dev);
++
+ 	gve_clear_napi_enabled(priv);
+ 	gve_clear_report_stats(priv);
+ 
+@@ -1955,6 +1957,9 @@ static void gve_turnup(struct gve_priv *priv)
+ 		napi_schedule(&block->napi);
+ 	}
+ 
++	if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
++		xdp_features_set_redirect_target(priv->dev, false);
++
+ 	gve_set_napi_enabled(priv);
+ }
+ 
+@@ -2229,7 +2234,6 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
+ 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
+ 		xdp_features = NETDEV_XDP_ACT_BASIC;
+ 		xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+-		xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+ 		xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ 	} else {
+ 		xdp_features = 0;
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 97425c06e1ed7f..61db00b2b33e43 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2310,7 +2310,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
+ 		tx_buff = &tx_pool->tx_buff[index];
+ 		adapter->netdev->stats.tx_packets--;
+ 		adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
+-		adapter->tx_stats_buffers[queue_num].packets--;
++		adapter->tx_stats_buffers[queue_num].batched_packets--;
+ 		adapter->tx_stats_buffers[queue_num].bytes -=
+ 						tx_buff->skb->len;
+ 		dev_kfree_skb_any(tx_buff->skb);
+@@ -2402,11 +2402,13 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	unsigned int tx_map_failed = 0;
+ 	union sub_crq indir_arr[16];
+ 	unsigned int tx_dropped = 0;
+-	unsigned int tx_packets = 0;
++	unsigned int tx_dpackets = 0;
++	unsigned int tx_bpackets = 0;
+ 	unsigned int tx_bytes = 0;
+ 	dma_addr_t data_dma_addr;
+ 	struct netdev_queue *txq;
+ 	unsigned long lpar_rc;
++	unsigned int skblen;
+ 	union sub_crq tx_crq;
+ 	unsigned int offset;
+ 	bool use_scrq_send_direct = false;
+@@ -2521,6 +2523,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	tx_buff->skb = skb;
+ 	tx_buff->index = bufidx;
+ 	tx_buff->pool_index = queue_num;
++	skblen = skb->len;
+ 
+ 	memset(&tx_crq, 0, sizeof(tx_crq));
+ 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
+@@ -2575,6 +2578,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		if (lpar_rc != H_SUCCESS)
+ 			goto tx_err;
+ 
++		tx_dpackets++;
+ 		goto early_exit;
+ 	}
+ 
+@@ -2603,6 +2607,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 			goto tx_err;
+ 	}
+ 
++	tx_bpackets++;
++
+ early_exit:
+ 	if (atomic_add_return(num_entries, &tx_scrq->used)
+ 					>= adapter->req_tx_entries_per_subcrq) {
+@@ -2610,8 +2616,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		netif_stop_subqueue(netdev, queue_num);
+ 	}
+ 
+-	tx_packets++;
+-	tx_bytes += skb->len;
++	tx_bytes += skblen;
+ 	txq_trans_cond_update(txq);
+ 	ret = NETDEV_TX_OK;
+ 	goto out;
+@@ -2640,10 +2645,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	rcu_read_unlock();
+ 	netdev->stats.tx_dropped += tx_dropped;
+ 	netdev->stats.tx_bytes += tx_bytes;
+-	netdev->stats.tx_packets += tx_packets;
++	netdev->stats.tx_packets += tx_bpackets + tx_dpackets;
+ 	adapter->tx_send_failed += tx_send_failed;
+ 	adapter->tx_map_failed += tx_map_failed;
+-	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
++	adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
++	adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
+ 	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
+ 	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
+ 
+@@ -3808,7 +3814,10 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+ 		memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ 
+ 	for (i = 0; i < adapter->req_tx_queues; i++) {
+-		snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
++		snprintf(data, ETH_GSTRING_LEN, "tx%d_batched_packets", i);
++		data += ETH_GSTRING_LEN;
++
++		snprintf(data, ETH_GSTRING_LEN, "tx%d_direct_packets", i);
+ 		data += ETH_GSTRING_LEN;
+ 
+ 		snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+@@ -3873,7 +3882,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
+ 				      (adapter, ibmvnic_stats[i].offset));
+ 
+ 	for (j = 0; j < adapter->req_tx_queues; j++) {
+-		data[i] = adapter->tx_stats_buffers[j].packets;
++		data[i] = adapter->tx_stats_buffers[j].batched_packets;
++		i++;
++		data[i] = adapter->tx_stats_buffers[j].direct_packets;
+ 		i++;
+ 		data[i] = adapter->tx_stats_buffers[j].bytes;
+ 		i++;
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index 94ac36b1408be9..a189038d88df03 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -213,7 +213,8 @@ struct ibmvnic_statistics {
+ 
+ #define NUM_TX_STATS 3
+ struct ibmvnic_tx_queue_stats {
+-	u64 packets;
++	u64 batched_packets;
++	u64 direct_packets;
+ 	u64 bytes;
+ 	u64 dropped_packets;
+ };
+diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+index 2ec62c8d86e1c1..59486fe2ad18c2 100644
+--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
++++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
+ 	struct sk_buff *skb;
+ 
+ 	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
++	if (!skb)
++		return NULL;
+ 	skb_put(skb, size);
+ 
+ 	return skb;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index de10a2d08c428e..fe3438abcd253d 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -2888,6 +2888,7 @@ static int axienet_probe(struct platform_device *pdev)
+ 
+ 	lp->phylink_config.dev = &ndev->dev;
+ 	lp->phylink_config.type = PHYLINK_NETDEV;
++	lp->phylink_config.mac_managed_pm = true;
+ 	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ 		MAC_10FD | MAC_100FD | MAC_1000FD;
+ 
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index ba15a0a4ce629e..963fb9261f017c 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1902,21 +1902,9 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
+ {
+ 	struct geneve_net *gn = net_generic(net, geneve_net_id);
+ 	struct geneve_dev *geneve, *next;
+-	struct net_device *dev, *aux;
+ 
+-	/* gather any geneve devices that were moved into this ns */
+-	for_each_netdev_safe(net, dev, aux)
+-		if (dev->rtnl_link_ops == &geneve_link_ops)
+-			unregister_netdevice_queue(dev, head);
+-
+-	/* now gather any other geneve devices that were created in this ns */
+-	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
+-		/* If geneve->dev is in the same netns, it was already added
+-		 * to the list by the previous loop.
+-		 */
+-		if (!net_eq(dev_net(geneve->dev), net))
+-			unregister_netdevice_queue(geneve->dev, head);
+-	}
++	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
++		geneve_dellink(geneve->dev, head);
+ }
+ 
+ static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 47406ce9901612..33b78b4007fe7a 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -2487,11 +2487,6 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+ 	list_for_each_entry(net, net_list, exit_list) {
+ 		struct gtp_net *gn = net_generic(net, gtp_net_id);
+ 		struct gtp_dev *gtp, *gtp_next;
+-		struct net_device *dev;
+-
+-		for_each_netdev(net, dev)
+-			if (dev->rtnl_link_ops == &gtp_link_ops)
+-				gtp_dellink(dev, dev_to_kill);
+ 
+ 		list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
+ 			gtp_dellink(gtp->dev, dev_to_kill);
+diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
+index 0af7db80b2f883..7cfc36cadb5761 100644
+--- a/drivers/net/pse-pd/pd692x0.c
++++ b/drivers/net/pse-pd/pd692x0.c
+@@ -999,13 +999,12 @@ static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+ 	return (buf.sub[0] << 8 | buf.sub[1]) * 100000;
+ }
+ 
+-static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
+-					int id)
++static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev,
++				   int id)
+ {
+ 	struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ 	struct pd692x0_msg msg, buf = {0};
+-	int mW, uV, uA, ret;
+-	s64 tmp_64;
++	int ret;
+ 
+ 	msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ 	msg.sub[2] = id;
+@@ -1013,48 +1012,24 @@ static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
+-	if (ret < 0)
+-		return ret;
+-	mW = ret;
+-
+-	ret = pd692x0_pi_get_voltage(pcdev, id);
+-	if (ret < 0)
+-		return ret;
+-	uV = ret;
+-
+-	tmp_64 = mW;
+-	tmp_64 *= 1000000000ull;
+-	/* uA = mW * 1000000000 / uV */
+-	uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+-	return uA;
++	return pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
+ }
+ 
+-static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
+-					int id, int max_uA)
++static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
++				   int id, int max_mW)
+ {
+ 	struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ 	struct device *dev = &priv->client->dev;
+ 	struct pd692x0_msg msg, buf = {0};
+-	int uV, ret, mW;
+-	s64 tmp_64;
++	int ret;
+ 
+ 	ret = pd692x0_fw_unavailable(priv);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = pd692x0_pi_get_voltage(pcdev, id);
+-	if (ret < 0)
+-		return ret;
+-	uV = ret;
+-
+ 	msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ 	msg.sub[2] = id;
+-	tmp_64 = uV;
+-	tmp_64 *= max_uA;
+-	/* mW = uV * uA / 1000000000 */
+-	mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+-	ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW);
++	ret = pd692x0_pi_set_pw_from_table(dev, &msg, max_mW);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1068,8 +1043,8 @@ static const struct pse_controller_ops pd692x0_ops = {
+ 	.pi_disable = pd692x0_pi_disable,
+ 	.pi_is_enabled = pd692x0_pi_is_enabled,
+ 	.pi_get_voltage = pd692x0_pi_get_voltage,
+-	.pi_get_current_limit = pd692x0_pi_get_current_limit,
+-	.pi_set_current_limit = pd692x0_pi_set_current_limit,
++	.pi_get_pw_limit = pd692x0_pi_get_pw_limit,
++	.pi_set_pw_limit = pd692x0_pi_set_pw_limit,
+ };
+ 
+ #define PD692X0_FW_LINE_MAX_SZ 0xff
+diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
+index 2906ce173f66cd..bb509d973e914e 100644
+--- a/drivers/net/pse-pd/pse_core.c
++++ b/drivers/net/pse-pd/pse_core.c
+@@ -291,32 +291,24 @@ static int pse_pi_get_voltage(struct regulator_dev *rdev)
+ 	return ret;
+ }
+ 
+-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
+-				   int id,
+-				   struct netlink_ext_ack *extack,
+-				   struct pse_control_status *status);
+-
+ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
+ {
+ 	struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ 	const struct pse_controller_ops *ops;
+-	struct netlink_ext_ack extack = {};
+-	struct pse_control_status st = {};
+-	int id, uV, ret;
++	int id, uV, mW, ret;
+ 	s64 tmp_64;
+ 
+ 	ops = pcdev->ops;
+ 	id = rdev_get_id(rdev);
++	if (!ops->pi_get_pw_limit || !ops->pi_get_voltage)
++		return -EOPNOTSUPP;
++
+ 	mutex_lock(&pcdev->lock);
+-	if (ops->pi_get_current_limit) {
+-		ret = ops->pi_get_current_limit(pcdev, id);
++	ret = ops->pi_get_pw_limit(pcdev, id);
++	if (ret < 0)
+ 		goto out;
+-	}
++	mW = ret;
+ 
+-	/* If pi_get_current_limit() callback not populated get voltage
+-	 * from pi_get_voltage() and power limit from ethtool_get_status()
+-	 *  to calculate current limit.
+-	 */
+ 	ret = _pse_pi_get_voltage(rdev);
+ 	if (!ret) {
+ 		dev_err(pcdev->dev, "Voltage null\n");
+@@ -327,16 +319,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
+ 		goto out;
+ 	uV = ret;
+ 
+-	ret = _pse_ethtool_get_status(pcdev, id, &extack, &st);
+-	if (ret)
+-		goto out;
+-
+-	if (!st.c33_avail_pw_limit) {
+-		ret = -ENODATA;
+-		goto out;
+-	}
+-
+-	tmp_64 = st.c33_avail_pw_limit;
++	tmp_64 = mW;
+ 	tmp_64 *= 1000000000ull;
+ 	/* uA = mW * 1000000000 / uV */
+ 	ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+@@ -351,15 +334,33 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ {
+ 	struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ 	const struct pse_controller_ops *ops;
+-	int id, ret;
++	int id, mW, ret;
++	s64 tmp_64;
+ 
+ 	ops = pcdev->ops;
+-	if (!ops->pi_set_current_limit)
++	if (!ops->pi_set_pw_limit || !ops->pi_get_voltage)
+ 		return -EOPNOTSUPP;
+ 
++	if (max_uA > MAX_PI_CURRENT)
++		return -ERANGE;
++
+ 	id = rdev_get_id(rdev);
+ 	mutex_lock(&pcdev->lock);
+-	ret = ops->pi_set_current_limit(pcdev, id, max_uA);
++	ret = _pse_pi_get_voltage(rdev);
++	if (!ret) {
++		dev_err(pcdev->dev, "Voltage null\n");
++		ret = -ERANGE;
++		goto out;
++	}
++	if (ret < 0)
++		goto out;
++
++	tmp_64 = ret;
++	tmp_64 *= max_uA;
++	/* mW = uA * uV / 1000000000 */
++	mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
++	ret = ops->pi_set_pw_limit(pcdev, id, mW);
++out:
+ 	mutex_unlock(&pcdev->lock);
+ 
+ 	return ret;
+@@ -403,11 +404,9 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
+ 
+ 	rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+ 
+-	if (pcdev->ops->pi_set_current_limit) {
++	if (pcdev->ops->pi_set_pw_limit)
+ 		rinit_data->constraints.valid_ops_mask |=
+ 			REGULATOR_CHANGE_CURRENT;
+-		rinit_data->constraints.max_uA = MAX_PI_CURRENT;
+-	}
+ 
+ 	rinit_data->supply_regulator = "vpwr";
+ 
+@@ -736,23 +735,6 @@ struct pse_control *of_pse_control_get(struct device_node *node)
+ }
+ EXPORT_SYMBOL_GPL(of_pse_control_get);
+ 
+-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
+-				   int id,
+-				   struct netlink_ext_ack *extack,
+-				   struct pse_control_status *status)
+-{
+-	const struct pse_controller_ops *ops;
+-
+-	ops = pcdev->ops;
+-	if (!ops->ethtool_get_status) {
+-		NL_SET_ERR_MSG(extack,
+-			       "PSE driver does not support status report");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	return ops->ethtool_get_status(pcdev, id, extack, status);
+-}
+-
+ /**
+  * pse_ethtool_get_status - get status of PSE control
+  * @psec: PSE control pointer
+@@ -765,11 +747,21 @@ int pse_ethtool_get_status(struct pse_control *psec,
+ 			   struct netlink_ext_ack *extack,
+ 			   struct pse_control_status *status)
+ {
++	const struct pse_controller_ops *ops;
++	struct pse_controller_dev *pcdev;
+ 	int err;
+ 
+-	mutex_lock(&psec->pcdev->lock);
+-	err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status);
+-	mutex_unlock(&psec->pcdev->lock);
++	pcdev = psec->pcdev;
++	ops = pcdev->ops;
++	if (!ops->ethtool_get_status) {
++		NL_SET_ERR_MSG(extack,
++			       "PSE driver does not support status report");
++		return -EOPNOTSUPP;
++	}
++
++	mutex_lock(&pcdev->lock);
++	err = ops->ethtool_get_status(pcdev, psec->id, extack, status);
++	mutex_unlock(&pcdev->lock);
+ 
+ 	return err;
+ }
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index a96976b22fa796..61af1583356c27 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -276,8 +276,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
+ {
+ 	if (ns && nsid != ns->head->ns_id) {
+ 		dev_err(ctrl->device,
+-			"%s: nsid (%u) in cmd does not match nsid (%u)"
+-			"of namespace\n",
++			"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
+ 			current->comm, nsid, ns->head->ns_id);
+ 		return false;
+ 	}
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 8305d3c1280748..840ae475074d09 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1449,11 +1449,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
+ 		msg.msg_control = cbuf;
+ 		msg.msg_controllen = sizeof(cbuf);
+ 	}
++	msg.msg_flags = MSG_WAITALL;
+ 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ 			iov.iov_len, msg.msg_flags);
+-	if (ret < 0) {
++	if (ret < sizeof(*icresp)) {
+ 		pr_warn("queue %d: failed to receive icresp, error %d\n",
+ 			nvme_tcp_queue_id(queue), ret);
++		if (ret >= 0)
++			ret = -ECONNRESET;
+ 		goto free_icresp;
+ 	}
+ 	ret = -ENOTCONN;
+@@ -1565,7 +1568,7 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+ 			  ctrl->io_queues[HCTX_TYPE_POLL];
+ }
+ 
+-/**
++/*
+  * Track the number of queues assigned to each cpu using a global per-cpu
+  * counter and select the least used cpu from the mq_map. Our goal is to spread
+  * different controllers I/O threads across different cpu cores.
+diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
+index b133967faef840..643f85849ef64b 100644
+--- a/drivers/pci/devres.c
++++ b/drivers/pci/devres.c
+@@ -411,46 +411,20 @@ static inline bool mask_contains_bar(int mask, int bar)
+ 	return mask & BIT(bar);
+ }
+ 
+-/*
+- * This is a copy of pci_intx() used to bypass the problem of recursive
+- * function calls due to the hybrid nature of pci_intx().
+- */
+-static void __pcim_intx(struct pci_dev *pdev, int enable)
+-{
+-	u16 pci_command, new;
+-
+-	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+-
+-	if (enable)
+-		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
+-	else
+-		new = pci_command | PCI_COMMAND_INTX_DISABLE;
+-
+-	if (new != pci_command)
+-		pci_write_config_word(pdev, PCI_COMMAND, new);
+-}
+-
+ static void pcim_intx_restore(struct device *dev, void *data)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 	struct pcim_intx_devres *res = data;
+ 
+-	__pcim_intx(pdev, res->orig_intx);
++	pci_intx(pdev, res->orig_intx);
+ }
+ 
+-static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
++static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
+ {
+-	struct pcim_intx_devres *res;
+-
+-	res = devres_find(dev, pcim_intx_restore, NULL, NULL);
+-	if (res)
+-		return res;
++	u16 pci_command;
+ 
+-	res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
+-	if (res)
+-		devres_add(dev, res);
+-
+-	return res;
++	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
++	res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
+ }
+ 
+ /**
+@@ -466,16 +440,28 @@ static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
+ int pcim_intx(struct pci_dev *pdev, int enable)
+ {
+ 	struct pcim_intx_devres *res;
++	struct device *dev = &pdev->dev;
+ 
+-	res = get_or_create_intx_devres(&pdev->dev);
+-	if (!res)
+-		return -ENOMEM;
++	/*
++	 * pcim_intx() must only restore the INTx value that existed before the
++	 * driver was loaded, i.e., before it called pcim_intx() for the
++	 * first time.
++	 */
++	res = devres_find(dev, pcim_intx_restore, NULL, NULL);
++	if (!res) {
++		res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
++		if (!res)
++			return -ENOMEM;
++
++		save_orig_intx(pdev, res);
++		devres_add(dev, res);
++	}
+ 
+-	res->orig_intx = !enable;
+-	__pcim_intx(pdev, enable);
++	pci_intx(pdev, enable);
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(pcim_intx);
+ 
+ static void pcim_disable_device(void *pdev_raw)
+ {
+@@ -939,7 +925,7 @@ static void pcim_release_all_regions(struct pci_dev *pdev)
+  * desired, release individual regions with pcim_release_region() or all of
+  * them at once with pcim_release_all_regions().
+  */
+-static int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
++int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
+ {
+ 	int ret;
+ 	int bar;
+@@ -957,6 +943,7 @@ static int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL(pcim_request_all_regions);
+ 
+ /**
+  * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index dd3c6dcb47ae4a..1aa5d6f98ebda2 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4486,11 +4486,6 @@ void pci_disable_parity(struct pci_dev *dev)
+  * @enable: boolean: whether to enable or disable PCI INTx
+  *
+  * Enables/disables PCI INTx for device @pdev
+- *
+- * NOTE:
+- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+- * when pcim_enable_device() has been called in advance. This hybrid feature is
+- * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
+  */
+ void pci_intx(struct pci_dev *pdev, int enable)
+ {
+@@ -4503,15 +4498,10 @@ void pci_intx(struct pci_dev *pdev, int enable)
+ 	else
+ 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
+ 
+-	if (new != pci_command) {
+-		/* Preserve the "hybrid" behavior for backwards compatibility */
+-		if (pci_is_managed(pdev)) {
+-			WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
+-			return;
+-		}
++	if (new == pci_command)
++		return;
+ 
+-		pci_write_config_word(pdev, PCI_COMMAND, new);
+-	}
++	pci_write_config_word(pdev, PCI_COMMAND, new);
+ }
+ EXPORT_SYMBOL_GPL(pci_intx);
+ 
+diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
+index 49c383eb678541..13e37b49d9d01e 100644
+--- a/drivers/platform/cznic/Kconfig
++++ b/drivers/platform/cznic/Kconfig
+@@ -6,6 +6,7 @@
+ 
+ menuconfig CZNIC_PLATFORMS
+ 	bool "Platform support for CZ.NIC's Turris hardware"
++	depends on ARCH_MVEBU || COMPILE_TEST
+ 	help
+ 	  Say Y here to be able to choose driver support for CZ.NIC's Turris
+ 	  devices. This option alone does not add any kernel code.
+diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
+index f71cc90fea1273..57eba1ddb17ba5 100644
+--- a/drivers/power/supply/axp20x_battery.c
++++ b/drivers/power/supply/axp20x_battery.c
+@@ -466,10 +466,9 @@ static int axp717_battery_get_prop(struct power_supply *psy,
+ 
+ 	/*
+ 	 * If a fault is detected it must also be cleared; if the
+-	 * condition persists it should reappear (This is an
+-	 * assumption, it's actually not documented). A restart was
+-	 * not sufficient to clear the bit in testing despite the
+-	 * register listed as POR.
++	 * condition persists it should reappear. A restart was not
++	 * sufficient to clear the bit in testing despite the register
++	 * listed as POR.
+ 	 */
+ 	case POWER_SUPPLY_PROP_HEALTH:
+ 		ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT,
+@@ -480,26 +479,26 @@ static int axp717_battery_get_prop(struct power_supply *psy,
+ 		switch (reg & AXP717_BATT_PMU_FAULT_MASK) {
+ 		case AXP717_BATT_UVLO_2_5V:
+ 			val->intval = POWER_SUPPLY_HEALTH_DEAD;
+-			regmap_update_bits(axp20x_batt->regmap,
+-					   AXP717_PMU_FAULT,
+-					   AXP717_BATT_UVLO_2_5V,
+-					   AXP717_BATT_UVLO_2_5V);
++			regmap_write_bits(axp20x_batt->regmap,
++					  AXP717_PMU_FAULT,
++					  AXP717_BATT_UVLO_2_5V,
++					  AXP717_BATT_UVLO_2_5V);
+ 			return 0;
+ 
+ 		case AXP717_BATT_OVER_TEMP:
+ 			val->intval = POWER_SUPPLY_HEALTH_HOT;
+-			regmap_update_bits(axp20x_batt->regmap,
+-					   AXP717_PMU_FAULT,
+-					   AXP717_BATT_OVER_TEMP,
+-					   AXP717_BATT_OVER_TEMP);
++			regmap_write_bits(axp20x_batt->regmap,
++					  AXP717_PMU_FAULT,
++					  AXP717_BATT_OVER_TEMP,
++					  AXP717_BATT_OVER_TEMP);
+ 			return 0;
+ 
+ 		case AXP717_BATT_UNDER_TEMP:
+ 			val->intval = POWER_SUPPLY_HEALTH_COLD;
+-			regmap_update_bits(axp20x_batt->regmap,
+-					   AXP717_PMU_FAULT,
+-					   AXP717_BATT_UNDER_TEMP,
+-					   AXP717_BATT_UNDER_TEMP);
++			regmap_write_bits(axp20x_batt->regmap,
++					  AXP717_PMU_FAULT,
++					  AXP717_BATT_UNDER_TEMP,
++					  AXP717_BATT_UNDER_TEMP);
+ 			return 0;
+ 
+ 		default:
+diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c
+index 652c1f213af1c2..4f28ef1bba1a3c 100644
+--- a/drivers/power/supply/da9150-fg.c
++++ b/drivers/power/supply/da9150-fg.c
+@@ -247,9 +247,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg,
+ 				      DA9150_QIF_SD_GAIN_SIZE);
+ 	da9150_fg_read_sync_end(fg);
+ 
+-	div = (u64) (sd_gain * shunt_val * 65536ULL);
++	div = 65536ULL * sd_gain * shunt_val;
+ 	do_div(div, 1000000);
+-	res = (u64) (iavg * 1000000ULL);
++	res = 1000000ULL * iavg;
+ 	do_div(res, div);
+ 
+ 	val->intval = (int) res;
+diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
+index e36e3ea165d3b2..2f34761e64135c 100644
+--- a/drivers/s390/net/ism_drv.c
++++ b/drivers/s390/net/ism_drv.c
+@@ -588,6 +588,15 @@ static int ism_dev_init(struct ism_dev *ism)
+ 	return ret;
+ }
+ 
++static void ism_dev_release(struct device *dev)
++{
++	struct ism_dev *ism;
++
++	ism = container_of(dev, struct ism_dev, dev);
++
++	kfree(ism);
++}
++
+ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct ism_dev *ism;
+@@ -601,6 +610,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	dev_set_drvdata(&pdev->dev, ism);
+ 	ism->pdev = pdev;
+ 	ism->dev.parent = &pdev->dev;
++	ism->dev.release = ism_dev_release;
+ 	device_initialize(&ism->dev);
+ 	dev_set_name(&ism->dev, dev_name(&pdev->dev));
+ 	ret = device_add(&ism->dev);
+@@ -637,7 +647,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	device_del(&ism->dev);
+ err_dev:
+ 	dev_set_drvdata(&pdev->dev, NULL);
+-	kfree(ism);
++	put_device(&ism->dev);
+ 
+ 	return ret;
+ }
+@@ -682,7 +692,7 @@ static void ism_remove(struct pci_dev *pdev)
+ 	pci_disable_device(pdev);
+ 	device_del(&ism->dev);
+ 	dev_set_drvdata(&pdev->dev, NULL);
+-	kfree(ism);
++	put_device(&ism->dev);
+ }
+ 
+ static struct pci_driver ism_driver = {
+diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
+index ef352a0f502208..1fcf7ca8083e10 100644
+--- a/drivers/soc/loongson/loongson2_guts.c
++++ b/drivers/soc/loongson/loongson2_guts.c
+@@ -114,8 +114,11 @@ static int loongson2_guts_probe(struct platform_device *pdev)
+ 	if (of_property_read_string(root, "model", &machine))
+ 		of_property_read_string_index(root, "compatible", 0, &machine);
+ 	of_node_put(root);
+-	if (machine)
++	if (machine) {
+ 		soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
++		if (!soc_dev_attr.machine)
++			return -ENOMEM;
++	}
+ 
+ 	svr = loongson2_guts_get_svr();
+ 	soc_die = loongson2_soc_die_match(svr, loongson2_soc_die);
+diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
+index 322a543b8c278a..d0f397c9024201 100644
+--- a/drivers/tee/optee/supp.c
++++ b/drivers/tee/optee/supp.c
+@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ 	struct optee *optee = tee_get_drvdata(ctx->teedev);
+ 	struct optee_supp *supp = &optee->supp;
+ 	struct optee_supp_req *req;
+-	bool interruptable;
+ 	u32 ret;
+ 
+ 	/*
+@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ 	/*
+ 	 * Wait for supplicant to process and return result, once we've
+ 	 * returned from wait_for_completion(&req->c) successfully we have
+-	 * exclusive access again.
++	 * exclusive access again. Allow the wait to be killable such that
++	 * the wait doesn't turn into an indefinite state if the supplicant
++	 * gets hung for some reason.
+ 	 */
+-	while (wait_for_completion_interruptible(&req->c)) {
++	if (wait_for_completion_killable(&req->c)) {
+ 		mutex_lock(&supp->mutex);
+-		interruptable = !supp->ctx;
+-		if (interruptable) {
+-			/*
+-			 * There's no supplicant available and since the
+-			 * supp->mutex currently is held none can
+-			 * become available until the mutex released
+-			 * again.
+-			 *
+-			 * Interrupting an RPC to supplicant is only
+-			 * allowed as a way of slightly improving the user
+-			 * experience in case the supplicant hasn't been
+-			 * started yet. During normal operation the supplicant
+-			 * will serve all requests in a timely manner and
+-			 * interrupting then wouldn't make sense.
+-			 */
+-			if (req->in_queue) {
+-				list_del(&req->link);
+-				req->in_queue = false;
+-			}
++		if (req->in_queue) {
++			list_del(&req->link);
++			req->in_queue = false;
+ 		}
+ 		mutex_unlock(&supp->mutex);
+-
+-		if (interruptable) {
+-			req->ret = TEEC_ERROR_COMMUNICATION;
+-			break;
+-		}
++		req->ret = TEEC_ERROR_COMMUNICATION;
+ 	}
+ 
+ 	ret = req->ret;
+diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
+index 4153643c67dcec..1f18f15dba2778 100644
+--- a/drivers/usb/gadget/function/f_midi.c
++++ b/drivers/usb/gadget/function/f_midi.c
+@@ -283,7 +283,7 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req)
+ 			/* Our transmit completed. See if there's more to go.
+ 			 * f_midi_transmit eats req, don't queue it again. */
+ 			req->length = 0;
+-			f_midi_transmit(midi);
++			queue_work(system_highpri_wq, &midi->work);
+ 			return;
+ 		}
+ 		break;
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 90aef2627ca27b..40332ab62f1018 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -545,8 +545,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
+ 		 * subpage::readers and to unlock the page.
+ 		 */
+ 		if (fs_info->sectorsize < PAGE_SIZE)
+-			btrfs_subpage_start_reader(fs_info, folio, cur,
+-						   add_size);
++			btrfs_folio_set_lock(fs_info, folio, cur, add_size);
+ 		folio_put(folio);
+ 		cur += add_size;
+ 	}
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index fe08c983d5bb4b..660a5b9c08e9e4 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -190,7 +190,7 @@ static void process_one_folio(struct btrfs_fs_info *fs_info,
+ 		btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
+ 
+ 	if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
+-		btrfs_folio_end_writer_lock(fs_info, folio, start, len);
++		btrfs_folio_end_lock(fs_info, folio, start, len);
+ }
+ 
+ static void __process_folios_contig(struct address_space *mapping,
+@@ -276,7 +276,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,
+ 			range_start = max_t(u64, folio_pos(folio), start);
+ 			range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
+ 					  end + 1) - range_start;
+-			btrfs_folio_set_writer_lock(fs_info, folio, range_start, range_len);
++			btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
+ 
+ 			processed_end = range_start + range_len - 1;
+ 		}
+@@ -438,7 +438,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
+ 	if (!btrfs_is_subpage(fs_info, folio->mapping))
+ 		folio_unlock(folio);
+ 	else
+-		btrfs_subpage_end_reader(fs_info, folio, start, len);
++		btrfs_folio_end_lock(fs_info, folio, start, len);
+ }
+ 
+ /*
+@@ -495,7 +495,7 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
+ 		return;
+ 
+ 	ASSERT(folio_test_private(folio));
+-	btrfs_subpage_start_reader(fs_info, folio, folio_pos(folio), PAGE_SIZE);
++	btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
+ }
+ 
+ /*
+@@ -1105,15 +1105,59 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
+ 	return ret;
+ }
+ 
++static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
++				u64 start, u32 len)
++{
++	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
++	const u64 folio_start = folio_pos(folio);
++	unsigned int start_bit;
++	unsigned int nbits;
++
++	ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE);
++	start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
++	nbits = len >> fs_info->sectorsize_bits;
++	ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
++	bitmap_set(delalloc_bitmap, start_bit, nbits);
++}
++
++static bool find_next_delalloc_bitmap(struct folio *folio,
++				      unsigned long *delalloc_bitmap, u64 start,
++				      u64 *found_start, u32 *found_len)
++{
++	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
++	const u64 folio_start = folio_pos(folio);
++	const unsigned int bitmap_size = fs_info->sectors_per_page;
++	unsigned int start_bit;
++	unsigned int first_zero;
++	unsigned int first_set;
++
++	ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE);
++
++	start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
++	first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
++	if (first_set >= bitmap_size)
++		return false;
++
++	*found_start = folio_start + (first_set << fs_info->sectorsize_bits);
++	first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
++	*found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
++	return true;
++}
++
+ /*
+- * helper for extent_writepage(), doing all of the delayed allocation setup.
++ * Do all of the delayed allocation setup.
++ *
++ * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
++ * The @folio should no longer be touched (treat it as already unlocked).
+  *
+- * This returns 1 if btrfs_run_delalloc_range function did all the work required
+- * to write the page (copy into inline extent).  In this case the IO has
+- * been started and the page is already unlocked.
++ * Return 0 if there is still dirty block that needs to be submitted through
++ * extent_writepage_io().
++ * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
++ * submitted, and @folio is still kept locked.
+  *
+- * This returns 0 if all went well (page still locked)
+- * This returns < 0 if there were errors (page still locked)
++ * Return <0 if there is any error hit.
++ * Any allocated ordered extent range covering this folio will be marked
++ * finished (IOERR), and @folio is still kept locked.
+  */
+ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 						 struct folio *folio,
+@@ -1124,16 +1168,28 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 	const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
+ 	const u64 page_start = folio_pos(folio);
+ 	const u64 page_end = page_start + folio_size(folio) - 1;
++	unsigned long delalloc_bitmap = 0;
+ 	/*
+ 	 * Save the last found delalloc end. As the delalloc end can go beyond
+ 	 * page boundary, thus we cannot rely on subpage bitmap to locate the
+ 	 * last delalloc end.
+ 	 */
+ 	u64 last_delalloc_end = 0;
++	/*
++	 * The range end (exclusive) of the last successfully finished delalloc
++	 * range.
++	 * Any range covered by ordered extent must either be manually marked
++	 * finished (error handling), or has IO submitted (and finish the
++	 * ordered extent normally).
++	 *
++	 * This records the end of ordered extent cleanup if we hit an error.
++	 */
++	u64 last_finished_delalloc_end = page_start;
+ 	u64 delalloc_start = page_start;
+ 	u64 delalloc_end = page_end;
+ 	u64 delalloc_to_write = 0;
+ 	int ret = 0;
++	int bit;
+ 
+ 	/* Save the dirty bitmap as our submission bitmap will be a subset of it. */
+ 	if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
+@@ -1143,6 +1199,12 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 		bio_ctrl->submit_bitmap = 1;
+ 	}
+ 
++	for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
++		u64 start = page_start + (bit << fs_info->sectorsize_bits);
++
++		btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
++	}
++
+ 	/* Lock all (subpage) delalloc ranges inside the folio first. */
+ 	while (delalloc_start < page_end) {
+ 		delalloc_end = page_end;
+@@ -1151,9 +1213,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 			delalloc_start = delalloc_end + 1;
+ 			continue;
+ 		}
+-		btrfs_folio_set_writer_lock(fs_info, folio, delalloc_start,
+-					    min(delalloc_end, page_end) + 1 -
+-					    delalloc_start);
++		set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
++				    min(delalloc_end, page_end) + 1 - delalloc_start);
+ 		last_delalloc_end = delalloc_end;
+ 		delalloc_start = delalloc_end + 1;
+ 	}
+@@ -1178,7 +1239,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 			found_len = last_delalloc_end + 1 - found_start;
+ 			found = true;
+ 		} else {
+-			found = btrfs_subpage_find_writer_locked(fs_info, folio,
++			found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
+ 					delalloc_start, &found_start, &found_len);
+ 		}
+ 		if (!found)
+@@ -1192,11 +1253,19 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 			found_len = last_delalloc_end + 1 - found_start;
+ 
+ 		if (ret >= 0) {
++			/*
++			 * Some delalloc range may be created by previous folios.
++			 * Thus we still need to clean up this range during error
++			 * handling.
++			 */
++			last_finished_delalloc_end = found_start;
+ 			/* No errors hit so far, run the current delalloc range. */
+ 			ret = btrfs_run_delalloc_range(inode, folio,
+ 						       found_start,
+ 						       found_start + found_len - 1,
+ 						       wbc);
++			if (ret >= 0)
++				last_finished_delalloc_end = found_start + found_len;
+ 		} else {
+ 			/*
+ 			 * We've hit an error during previous delalloc range,
+@@ -1231,8 +1300,22 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 
+ 		delalloc_start = found_start + found_len;
+ 	}
+-	if (ret < 0)
++	/*
++	 * It's possible we had some ordered extents created before we hit
++	 * an error, cleanup non-async successfully created delalloc ranges.
++	 */
++	if (unlikely(ret < 0)) {
++		unsigned int bitmap_size = min(
++				(last_finished_delalloc_end - page_start) >>
++				fs_info->sectorsize_bits,
++				fs_info->sectors_per_page);
++
++		for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
++			btrfs_mark_ordered_io_finished(inode, folio,
++				page_start + (bit << fs_info->sectorsize_bits),
++				fs_info->sectorsize, false);
+ 		return ret;
++	}
+ out:
+ 	if (last_delalloc_end)
+ 		delalloc_end = last_delalloc_end;
+@@ -1348,6 +1431,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ 	unsigned long range_bitmap = 0;
+ 	bool submitted_io = false;
++	bool error = false;
+ 	const u64 folio_start = folio_pos(folio);
+ 	u64 cur;
+ 	int bit;
+@@ -1390,13 +1474,26 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 			break;
+ 		}
+ 		ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
+-		if (ret < 0)
+-			goto out;
++		if (unlikely(ret < 0)) {
++			/*
++			 * bio_ctrl may contain a bio crossing several folios.
++			 * Submit it immediately so that the bio has a chance
++			 * to finish normally, other than marked as error.
++			 */
++			submit_one_bio(bio_ctrl);
++			/*
++			 * Failed to grab the extent map which should be very rare.
++			 * Since there is no bio submitted to finish the ordered
++			 * extent, we have to manually finish this sector.
++			 */
++			btrfs_mark_ordered_io_finished(inode, folio, cur,
++						       fs_info->sectorsize, false);
++			error = true;
++			continue;
++		}
+ 		submitted_io = true;
+ 	}
+ 
+-	btrfs_folio_assert_not_dirty(fs_info, folio, start, len);
+-out:
+ 	/*
+ 	 * If we didn't submitted any sector (>= i_size), folio dirty get
+ 	 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
+@@ -1404,8 +1501,11 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 	 *
+ 	 * Here we set writeback and clear for the range. If the full folio
+ 	 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
++	 *
++	 * If we hit any error, the corresponding sector will still be dirty
++	 * thus no need to clear PAGECACHE_TAG_DIRTY.
+ 	 */
+-	if (!submitted_io) {
++	if (!submitted_io && !error) {
+ 		btrfs_folio_set_writeback(fs_info, folio, start, len);
+ 		btrfs_folio_clear_writeback(fs_info, folio, start, len);
+ 	}
+@@ -1423,15 +1523,14 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+  */
+ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
+ {
+-	struct inode *inode = folio->mapping->host;
+-	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+-	const u64 page_start = folio_pos(folio);
++	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ 	int ret;
+ 	size_t pg_offset;
+-	loff_t i_size = i_size_read(inode);
++	loff_t i_size = i_size_read(&inode->vfs_inode);
+ 	unsigned long end_index = i_size >> PAGE_SHIFT;
+ 
+-	trace_extent_writepage(folio, inode, bio_ctrl->wbc);
++	trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
+ 
+ 	WARN_ON(!folio_test_locked(folio));
+ 
+@@ -1455,13 +1554,13 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
+ 	if (ret < 0)
+ 		goto done;
+ 
+-	ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl);
++	ret = writepage_delalloc(inode, folio, bio_ctrl);
+ 	if (ret == 1)
+ 		return 0;
+ 	if (ret)
+ 		goto done;
+ 
+-	ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
++	ret = extent_writepage_io(inode, folio, folio_pos(folio),
+ 				  PAGE_SIZE, bio_ctrl, i_size);
+ 	if (ret == 1)
+ 		return 0;
+@@ -1469,17 +1568,13 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
+ 	bio_ctrl->wbc->nr_to_write--;
+ 
+ done:
+-	if (ret) {
+-		btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
+-					       page_start, PAGE_SIZE, !ret);
++	if (ret < 0)
+ 		mapping_set_error(folio->mapping, ret);
+-	}
+-
+ 	/*
+ 	 * Only unlock ranges that are submitted. As there can be some async
+ 	 * submitted ranges inside the folio.
+ 	 */
+-	btrfs_folio_end_writer_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
++	btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
+ 	ASSERT(ret <= 0);
+ 	return ret;
+ }
+@@ -2231,12 +2326,9 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
+ 		if (ret == 1)
+ 			goto next_page;
+ 
+-		if (ret) {
+-			btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
+-						       cur, cur_len, !ret);
++		if (ret)
+ 			mapping_set_error(mapping, ret);
+-		}
+-		btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len);
++		btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
+ 		if (ret < 0)
+ 			found_error = true;
+ next_page:
+@@ -2463,12 +2555,6 @@ static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *foli
+ 		subpage = folio_get_private(folio);
+ 		if (atomic_read(&subpage->eb_refs))
+ 			return true;
+-		/*
+-		 * Even there is no eb refs here, we may still have
+-		 * end_folio_read() call relying on page::private.
+-		 */
+-		if (atomic_read(&subpage->readers))
+-			return true;
+ 	}
+ 	return false;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f7e7d864f41440..5b842276573e82 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2419,8 +2419,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
+ 
+ out:
+ 	if (ret < 0)
+-		btrfs_cleanup_ordered_extents(inode, locked_folio, start,
+-					      end - start + 1);
++		btrfs_cleanup_ordered_extents(inode, NULL, start, end - start + 1);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
+index ec7328a6bfd755..88a01d51ab11f1 100644
+--- a/fs/btrfs/subpage.c
++++ b/fs/btrfs/subpage.c
+@@ -140,12 +140,10 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	spin_lock_init(&ret->lock);
+-	if (type == BTRFS_SUBPAGE_METADATA) {
++	if (type == BTRFS_SUBPAGE_METADATA)
+ 		atomic_set(&ret->eb_refs, 0);
+-	} else {
+-		atomic_set(&ret->readers, 0);
+-		atomic_set(&ret->writers, 0);
+-	}
++	else
++		atomic_set(&ret->nr_locked, 0);
+ 	return ret;
+ }
+ 
+@@ -221,62 +219,6 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
+ 	__start_bit;							\
+ })
+ 
+-void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
+-				struct folio *folio, u64 start, u32 len)
+-{
+-	struct btrfs_subpage *subpage = folio_get_private(folio);
+-	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
+-	const int nbits = len >> fs_info->sectorsize_bits;
+-	unsigned long flags;
+-
+-
+-	btrfs_subpage_assert(fs_info, folio, start, len);
+-
+-	spin_lock_irqsave(&subpage->lock, flags);
+-	/*
+-	 * Even though it's just for reading the page, no one should have
+-	 * locked the subpage range.
+-	 */
+-	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+-	bitmap_set(subpage->bitmaps, start_bit, nbits);
+-	atomic_add(nbits, &subpage->readers);
+-	spin_unlock_irqrestore(&subpage->lock, flags);
+-}
+-
+-void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
+-			      struct folio *folio, u64 start, u32 len)
+-{
+-	struct btrfs_subpage *subpage = folio_get_private(folio);
+-	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
+-	const int nbits = len >> fs_info->sectorsize_bits;
+-	unsigned long flags;
+-	bool is_data;
+-	bool last;
+-
+-	btrfs_subpage_assert(fs_info, folio, start, len);
+-	is_data = is_data_inode(BTRFS_I(folio->mapping->host));
+-
+-	spin_lock_irqsave(&subpage->lock, flags);
+-
+-	/* The range should have already been locked. */
+-	ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
+-	ASSERT(atomic_read(&subpage->readers) >= nbits);
+-
+-	bitmap_clear(subpage->bitmaps, start_bit, nbits);
+-	last = atomic_sub_and_test(nbits, &subpage->readers);
+-
+-	/*
+-	 * For data we need to unlock the page if the last read has finished.
+-	 *
+-	 * And please don't replace @last with atomic_sub_and_test() call
+-	 * inside if () condition.
+-	 * As we want the atomic_sub_and_test() to be always executed.
+-	 */
+-	if (is_data && last)
+-		folio_unlock(folio);
+-	spin_unlock_irqrestore(&subpage->lock, flags);
+-}
+-
+ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
+ {
+ 	u64 orig_start = *start;
+@@ -295,28 +237,8 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
+ 			     orig_start + orig_len) - *start;
+ }
+ 
+-static void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
+-				       struct folio *folio, u64 start, u32 len)
+-{
+-	struct btrfs_subpage *subpage = folio_get_private(folio);
+-	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
+-	const int nbits = (len >> fs_info->sectorsize_bits);
+-	unsigned long flags;
+-	int ret;
+-
+-	btrfs_subpage_assert(fs_info, folio, start, len);
+-
+-	spin_lock_irqsave(&subpage->lock, flags);
+-	ASSERT(atomic_read(&subpage->readers) == 0);
+-	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+-	bitmap_set(subpage->bitmaps, start_bit, nbits);
+-	ret = atomic_add_return(nbits, &subpage->writers);
+-	ASSERT(ret == nbits);
+-	spin_unlock_irqrestore(&subpage->lock, flags);
+-}
+-
+-static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
+-					      struct folio *folio, u64 start, u32 len)
++static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
++					    struct folio *folio, u64 start, u32 len)
+ {
+ 	struct btrfs_subpage *subpage = folio_get_private(folio);
+ 	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
+@@ -334,9 +256,9 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
+ 	 * extent_clear_unlock_delalloc() for compression path.
+ 	 *
+ 	 * This @locked_page is locked by plain lock_page(), thus its
+-	 * subpage::writers is 0.  Handle them in a special way.
++	 * subpage::locked is 0.  Handle them in a special way.
+ 	 */
+-	if (atomic_read(&subpage->writers) == 0) {
++	if (atomic_read(&subpage->nr_locked) == 0) {
+ 		spin_unlock_irqrestore(&subpage->lock, flags);
+ 		return true;
+ 	}
+@@ -345,39 +267,12 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
+ 		clear_bit(bit, subpage->bitmaps);
+ 		cleared++;
+ 	}
+-	ASSERT(atomic_read(&subpage->writers) >= cleared);
+-	last = atomic_sub_and_test(cleared, &subpage->writers);
++	ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
++	last = atomic_sub_and_test(cleared, &subpage->nr_locked);
+ 	spin_unlock_irqrestore(&subpage->lock, flags);
+ 	return last;
+ }
+ 
+-/*
+- * Lock a folio for delalloc page writeback.
+- *
+- * Return -EAGAIN if the page is not properly initialized.
+- * Return 0 with the page locked, and writer counter updated.
+- *
+- * Even with 0 returned, the page still need extra check to make sure
+- * it's really the correct page, as the caller is using
+- * filemap_get_folios_contig(), which can race with page invalidating.
+- */
+-int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
+-				  struct folio *folio, u64 start, u32 len)
+-{
+-	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
+-		folio_lock(folio);
+-		return 0;
+-	}
+-	folio_lock(folio);
+-	if (!folio_test_private(folio) || !folio_get_private(folio)) {
+-		folio_unlock(folio);
+-		return -EAGAIN;
+-	}
+-	btrfs_subpage_clamp_range(folio, &start, &len);
+-	btrfs_subpage_start_writer(fs_info, folio, start, len);
+-	return 0;
+-}
+-
+ /*
+  * Handle different locked folios:
+  *
+@@ -394,8 +289,8 @@ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
+  *   bitmap, reduce the writer lock number, and unlock the page if that's
+  *   the last locked range.
+  */
+-void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
+-				 struct folio *folio, u64 start, u32 len)
++void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
++			  struct folio *folio, u64 start, u32 len)
+ {
+ 	struct btrfs_subpage *subpage = folio_get_private(folio);
+ 
+@@ -408,24 +303,24 @@ void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
+ 
+ 	/*
+ 	 * For subpage case, there are two types of locked page.  With or
+-	 * without writers number.
++	 * without locked number.
+ 	 *
+-	 * Since we own the page lock, no one else could touch subpage::writers
++	 * Since we own the page lock, no one else could touch subpage::locked
+ 	 * and we are safe to do several atomic operations without spinlock.
+ 	 */
+-	if (atomic_read(&subpage->writers) == 0) {
+-		/* No writers, locked by plain lock_page(). */
++	if (atomic_read(&subpage->nr_locked) == 0) {
++		/* No subpage lock, locked by plain lock_page(). */
+ 		folio_unlock(folio);
+ 		return;
+ 	}
+ 
+ 	btrfs_subpage_clamp_range(folio, &start, &len);
+-	if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
++	if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
+ 		folio_unlock(folio);
+ }
+ 
+-void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
+-					struct folio *folio, unsigned long bitmap)
++void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
++				 struct folio *folio, unsigned long bitmap)
+ {
+ 	struct btrfs_subpage *subpage = folio_get_private(folio);
+ 	const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
+@@ -439,8 +334,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
+ 		return;
+ 	}
+ 
+-	if (atomic_read(&subpage->writers) == 0) {
+-		/* No writers, locked by plain lock_page(). */
++	if (atomic_read(&subpage->nr_locked) == 0) {
++		/* No subpage lock, locked by plain lock_page(). */
+ 		folio_unlock(folio);
+ 		return;
+ 	}
+@@ -450,8 +345,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
+ 		if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
+ 			cleared++;
+ 	}
+-	ASSERT(atomic_read(&subpage->writers) >= cleared);
+-	last = atomic_sub_and_test(cleared, &subpage->writers);
++	ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
++	last = atomic_sub_and_test(cleared, &subpage->nr_locked);
+ 	spin_unlock_irqrestore(&subpage->lock, flags);
+ 	if (last)
+ 		folio_unlock(folio);
+@@ -776,8 +671,8 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+  * This populates the involved subpage ranges so that subpage helpers can
+  * properly unlock them.
+  */
+-void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
+-				 struct folio *folio, u64 start, u32 len)
++void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
++			  struct folio *folio, u64 start, u32 len)
+ {
+ 	struct btrfs_subpage *subpage;
+ 	unsigned long flags;
+@@ -796,58 +691,11 @@ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
+ 	/* Target range should not yet be locked. */
+ 	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+ 	bitmap_set(subpage->bitmaps, start_bit, nbits);
+-	ret = atomic_add_return(nbits, &subpage->writers);
++	ret = atomic_add_return(nbits, &subpage->nr_locked);
+ 	ASSERT(ret <= fs_info->sectors_per_page);
+ 	spin_unlock_irqrestore(&subpage->lock, flags);
+ }
+ 
+-/*
+- * Find any subpage writer locked range inside @folio, starting at file offset
+- * @search_start. The caller should ensure the folio is locked.
+- *
+- * Return true and update @found_start_ret and @found_len_ret to the first
+- * writer locked range.
+- * Return false if there is no writer locked range.
+- */
+-bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
+-				      struct folio *folio, u64 search_start,
+-				      u64 *found_start_ret, u32 *found_len_ret)
+-{
+-	struct btrfs_subpage *subpage = folio_get_private(folio);
+-	const u32 sectors_per_page = fs_info->sectors_per_page;
+-	const unsigned int len = PAGE_SIZE - offset_in_page(search_start);
+-	const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
+-						locked, search_start, len);
+-	const unsigned int locked_bitmap_start = sectors_per_page * btrfs_bitmap_nr_locked;
+-	const unsigned int locked_bitmap_end = locked_bitmap_start + sectors_per_page;
+-	unsigned long flags;
+-	int first_zero;
+-	int first_set;
+-	bool found = false;
+-
+-	ASSERT(folio_test_locked(folio));
+-	spin_lock_irqsave(&subpage->lock, flags);
+-	first_set = find_next_bit(subpage->bitmaps, locked_bitmap_end, start_bit);
+-	if (first_set >= locked_bitmap_end)
+-		goto out;
+-
+-	found = true;
+-
+-	*found_start_ret = folio_pos(folio) +
+-		((first_set - locked_bitmap_start) << fs_info->sectorsize_bits);
+-	/*
+-	 * Since @first_set is ensured to be smaller than locked_bitmap_end
+-	 * here, @found_start_ret should be inside the folio.
+-	 */
+-	ASSERT(*found_start_ret < folio_pos(folio) + PAGE_SIZE);
+-
+-	first_zero = find_next_zero_bit(subpage->bitmaps, locked_bitmap_end, first_set);
+-	*found_len_ret = (first_zero - first_set) << fs_info->sectorsize_bits;
+-out:
+-	spin_unlock_irqrestore(&subpage->lock, flags);
+-	return found;
+-}
+-
+ #define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst)			\
+ {									\
+ 	const int sectors_per_page = fs_info->sectors_per_page;		\
+diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
+index cdb554e0d215e2..44fff1f4eac482 100644
+--- a/fs/btrfs/subpage.h
++++ b/fs/btrfs/subpage.h
+@@ -45,14 +45,6 @@ enum {
+ struct btrfs_subpage {
+ 	/* Common members for both data and metadata pages */
+ 	spinlock_t lock;
+-	/*
+-	 * Both data and metadata needs to track how many readers are for the
+-	 * page.
+-	 * Data relies on @readers to unlock the page when last reader finished.
+-	 * While metadata doesn't need page unlock, it needs to prevent
+-	 * page::private get cleared before the last end_page_read().
+-	 */
+-	atomic_t readers;
+ 	union {
+ 		/*
+ 		 * Structures only used by metadata
+@@ -62,8 +54,12 @@ struct btrfs_subpage {
+ 		 */
+ 		atomic_t eb_refs;
+ 
+-		/* Structures only used by data */
+-		atomic_t writers;
++		/*
++		 * Structures only used by data,
++		 *
++		 * How many sectors inside the page is locked.
++		 */
++		atomic_t nr_locked;
+ 	};
+ 	unsigned long bitmaps[];
+ };
+@@ -95,23 +91,12 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage);
+ void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
+ void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
+ 
+-void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
+-				struct folio *folio, u64 start, u32 len);
+-void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
+-			      struct folio *folio, u64 start, u32 len);
+-
+-int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
+-				  struct folio *folio, u64 start, u32 len);
+-void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
+-				 struct folio *folio, u64 start, u32 len);
+-void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
+-				 struct folio *folio, u64 start, u32 len);
+-void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
+-					struct folio *folio, unsigned long bitmap);
+-bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
+-				      struct folio *folio, u64 search_start,
+-				      u64 *found_start_ret, u32 *found_len_ret);
+-
++void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
++			  struct folio *folio, u64 start, u32 len);
++void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
++			  struct folio *folio, u64 start, u32 len);
++void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
++				 struct folio *folio, unsigned long bitmap);
+ /*
+  * Template for subpage related operations.
+  *
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index fafc07e38663ca..e11e67af760f44 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1381,7 +1381,7 @@ int cifs_get_inode_info(struct inode **inode,
+ 	struct cifs_fattr fattr = {};
+ 	int rc;
+ 
+-	if (is_inode_cache_good(*inode)) {
++	if (!data && is_inode_cache_good(*inode)) {
+ 		cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
+ 		return 0;
+ 	}
+@@ -1480,7 +1480,7 @@ int smb311_posix_get_inode_info(struct inode **inode,
+ 	struct cifs_fattr fattr = {};
+ 	int rc;
+ 
+-	if (is_inode_cache_good(*inode)) {
++	if (!data && is_inode_cache_good(*inode)) {
+ 		cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
+ 		return 0;
+ 	}
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 44952727fef9ef..e8da63d29a28f1 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4991,6 +4991,10 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ 			next_buffer = (char *)cifs_buf_get();
+ 		else
+ 			next_buffer = (char *)cifs_small_buf_get();
++		if (!next_buffer) {
++			cifs_server_dbg(VFS, "No memory for (large) SMB response\n");
++			return -1;
++		}
+ 		memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
+ 	}
+ 
+diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
+index 47148cc4a833e5..eb00d48590f200 100644
+--- a/fs/xfs/scrub/common.h
++++ b/fs/xfs/scrub/common.h
+@@ -179,7 +179,6 @@ static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
+ bool xchk_dir_looks_zapped(struct xfs_inode *dp);
+ bool xchk_pptr_looks_zapped(struct xfs_inode *ip);
+ 
+-#ifdef CONFIG_XFS_ONLINE_REPAIR
+ /* Decide if a repair is required. */
+ static inline bool xchk_needs_repair(const struct xfs_scrub_metadata *sm)
+ {
+@@ -199,10 +198,6 @@ static inline bool xchk_could_repair(const struct xfs_scrub *sc)
+ 	return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) &&
+ 		!(sc->flags & XREP_ALREADY_FIXED);
+ }
+-#else
+-# define xchk_needs_repair(sc)		(false)
+-# define xchk_could_repair(sc)		(false)
+-#endif /* CONFIG_XFS_ONLINE_REPAIR */
+ 
+ int xchk_metadata_inode_forks(struct xfs_scrub *sc);
+ 
+diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
+index 0e0dc2bf985c21..96180176c582f3 100644
+--- a/fs/xfs/scrub/repair.h
++++ b/fs/xfs/scrub/repair.h
+@@ -163,7 +163,16 @@ bool xrep_buf_verify_struct(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
+ #else
+ 
+ #define xrep_ino_dqattach(sc)	(0)
+-#define xrep_will_attempt(sc)	(false)
++
++/*
++ * When online repair is not built into the kernel, we still want to attempt
++ * the repair so that the stub xrep_attempt below will return EOPNOTSUPP.
++ */
++static inline bool xrep_will_attempt(const struct xfs_scrub *sc)
++{
++	return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD) ||
++		xchk_needs_repair(sc->sm);
++}
+ 
+ static inline int
+ xrep_attempt(
+diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
+index 4cbcf7a86dbec5..5c266d2842dbe9 100644
+--- a/fs/xfs/scrub/scrub.c
++++ b/fs/xfs/scrub/scrub.c
+@@ -149,6 +149,18 @@ xchk_probe(
+ 	if (xchk_should_terminate(sc, &error))
+ 		return error;
+ 
++	/*
++	 * If the caller is probing to see if repair works but repair isn't
++	 * built into the kernel, return EOPNOTSUPP because that's the signal
++	 * that userspace expects.  If online repair is built in, set the
++	 * CORRUPT flag (without any of the usual tracing/logging) to force us
++	 * into xrep_probe.
++	 */
++	if (xchk_could_repair(sc)) {
++		if (!IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR))
++			return -EOPNOTSUPP;
++		sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 4f17b786828af7..35b886385f3298 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3064,6 +3064,8 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
+ }
+ 
+ int netdev_boot_setup_check(struct net_device *dev);
++struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
++				   const char *hwaddr);
+ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+ 				       const char *hwaddr);
+ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 4e77c4230c0a19..74114acbb07fbb 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2293,6 +2293,8 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
+ 				    struct pci_dev *dev) { }
+ #endif
+ 
++int pcim_intx(struct pci_dev *pdev, int enabled);
++int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
+ void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
+ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ 				const char *name);
+diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
+index 591a53e082e650..df1592022d938e 100644
+--- a/include/linux/pse-pd/pse.h
++++ b/include/linux/pse-pd/pse.h
+@@ -75,12 +75,8 @@ struct pse_control_status {
+  * @pi_disable: Configure the PSE PI as disabled.
+  * @pi_get_voltage: Return voltage similarly to get_voltage regulator
+  *		    callback.
+- * @pi_get_current_limit: Get the configured current limit similarly to
+- *			  get_current_limit regulator callback.
+- * @pi_set_current_limit: Configure the current limit similarly to
+- *			  set_current_limit regulator callback.
+- *			  Should not return an error in case of MAX_PI_CURRENT
+- *			  current value set.
++ * @pi_get_pw_limit: Get the configured power limit of the PSE PI.
++ * @pi_set_pw_limit: Configure the power limit of the PSE PI.
+  */
+ struct pse_controller_ops {
+ 	int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
+@@ -91,10 +87,10 @@ struct pse_controller_ops {
+ 	int (*pi_enable)(struct pse_controller_dev *pcdev, int id);
+ 	int (*pi_disable)(struct pse_controller_dev *pcdev, int id);
+ 	int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id);
+-	int (*pi_get_current_limit)(struct pse_controller_dev *pcdev,
+-				    int id);
+-	int (*pi_set_current_limit)(struct pse_controller_dev *pcdev,
+-				    int id, int max_uA);
++	int (*pi_get_pw_limit)(struct pse_controller_dev *pcdev,
++			       int id);
++	int (*pi_set_pw_limit)(struct pse_controller_dev *pcdev,
++			       int id, int max_mW);
+ };
+ 
+ struct module;
+diff --git a/include/linux/serio.h b/include/linux/serio.h
+index bf2191f2535093..69a47674af653c 100644
+--- a/include/linux/serio.h
++++ b/include/linux/serio.h
+@@ -6,6 +6,7 @@
+ #define _SERIO_H
+ 
+ 
++#include <linux/cleanup.h>
+ #include <linux/types.h>
+ #include <linux/interrupt.h>
+ #include <linux/list.h>
+@@ -161,4 +162,6 @@ static inline void serio_continue_rx(struct serio *serio)
+ 	spin_unlock_irq(&serio->lock);
+ }
+ 
++DEFINE_GUARD(serio_pause_rx, struct serio *, serio_pause_rx(_T), serio_continue_rx(_T))
++
+ #endif
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 2cbe0c22a32f3c..0b9095a281b898 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -91,6 +91,8 @@ struct sk_psock {
+ 	struct sk_psock_progs		progs;
+ #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
+ 	struct strparser		strp;
++	u32				copied_seq;
++	u32				ingress_bytes;
+ #endif
+ 	struct sk_buff_head		ingress_skb;
+ 	struct list_head		ingress_msg;
+diff --git a/include/net/gro.h b/include/net/gro.h
+index b9b58c1f8d190b..7b548f91754bf3 100644
+--- a/include/net/gro.h
++++ b/include/net/gro.h
+@@ -11,6 +11,9 @@
+ #include <net/udp.h>
+ #include <net/hotdata.h>
+ 
++/* This should be increased if a protocol with a bigger head is added. */
++#define GRO_MAX_HEAD (MAX_HEADER + 128)
++
+ struct napi_gro_cb {
+ 	union {
+ 		struct {
+diff --git a/include/net/strparser.h b/include/net/strparser.h
+index 41e2ce9e9e10ff..0a83010b3a64a9 100644
+--- a/include/net/strparser.h
++++ b/include/net/strparser.h
+@@ -43,6 +43,8 @@ struct strparser;
+ struct strp_callbacks {
+ 	int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
+ 	void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
++	int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
++			 sk_read_actor_t recv_actor);
+ 	int (*read_sock_done)(struct strparser *strp, int err);
+ 	void (*abort_parser)(struct strparser *strp, int err);
+ 	void (*lock)(struct strparser *strp);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index d1948d357dade0..3255a199ef60d5 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -41,6 +41,7 @@
+ #include <net/inet_ecn.h>
+ #include <net/dst.h>
+ #include <net/mptcp.h>
++#include <net/xfrm.h>
+ 
+ #include <linux/seq_file.h>
+ #include <linux/memcontrol.h>
+@@ -683,6 +684,19 @@ void tcp_fin(struct sock *sk);
+ void tcp_check_space(struct sock *sk);
+ void tcp_sack_compress_send_ack(struct sock *sk);
+ 
++static inline void tcp_cleanup_skb(struct sk_buff *skb)
++{
++	skb_dst_drop(skb);
++	secpath_reset(skb);
++}
++
++static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
++{
++	DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
++	DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
++	__skb_queue_tail(&sk->sk_receive_queue, skb);
++}
++
+ /* tcp_timer.c */
+ void tcp_init_xmit_timers(struct sock *);
+ static inline void tcp_clear_xmit_timers(struct sock *sk)
+@@ -729,6 +743,9 @@ void tcp_get_info(struct sock *, struct tcp_info *);
+ /* Read 'sendfile()'-style from a TCP socket */
+ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ 		  sk_read_actor_t recv_actor);
++int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
++			sk_read_actor_t recv_actor, bool noack,
++			u32 *copied_seq);
+ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
+ struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
+ void tcp_read_done(struct sock *sk, size_t len);
+@@ -2595,6 +2612,11 @@ struct sk_psock;
+ #ifdef CONFIG_BPF_SYSCALL
+ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
++#ifdef CONFIG_BPF_STREAM_PARSER
++struct strparser;
++int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
++			   sk_read_actor_t recv_actor);
++#endif /* CONFIG_BPF_STREAM_PARSER */
+ #endif /* CONFIG_BPF_SYSCALL */
+ 
+ #ifdef CONFIG_INET
+diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
+index c4182e95a61955..4a8a4a63e99ca8 100644
+--- a/include/uapi/drm/xe_drm.h
++++ b/include/uapi/drm/xe_drm.h
+@@ -1485,6 +1485,7 @@ struct drm_xe_oa_unit {
+ 	/** @capabilities: OA capabilities bit-mask */
+ 	__u64 capabilities;
+ #define DRM_XE_OA_CAPS_BASE		(1 << 0)
++#define DRM_XE_OA_CAPS_SYNCS		(1 << 1)
+ 
+ 	/** @oa_timestamp_freq: OA timestamp freq */
+ 	__u64 oa_timestamp_freq;
+@@ -1634,6 +1635,22 @@ enum drm_xe_oa_property_id {
+ 	 * to be disabled for the stream exec queue.
+ 	 */
+ 	DRM_XE_OA_PROPERTY_NO_PREEMPT,
++
++	/**
++	 * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array
++	 * specified in @DRM_XE_OA_PROPERTY_SYNCS
++	 */
++	DRM_XE_OA_PROPERTY_NUM_SYNCS,
++
++	/**
++	 * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array
++	 * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA
++	 * configuration will wait till input fences signal. Output fences
++	 * will signal after the new OA configuration takes effect. For
++	 * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar
++	 * to the VM bind case.
++	 */
++	DRM_XE_OA_PROPERTY_SYNCS,
+ };
+ 
+ /**
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 21f1bcba2f52b5..cf28d29fffbf0e 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2053,6 +2053,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 		req->opcode = 0;
+ 		return io_init_fail_req(req, -EINVAL);
+ 	}
++	opcode = array_index_nospec(opcode, IORING_OP_LAST);
++
+ 	def = &io_issue_defs[opcode];
+ 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
+ 		/* enforce forwards compatibility on users */
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 39ad25d16ed404..6abc495602a4e9 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -862,7 +862,15 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+-	ret = io_iter_do_read(rw, &io->iter);
++	if (unlikely(req->opcode == IORING_OP_READ_MULTISHOT)) {
++		void *cb_copy = rw->kiocb.ki_complete;
++
++		rw->kiocb.ki_complete = NULL;
++		ret = io_iter_do_read(rw, &io->iter);
++		rw->kiocb.ki_complete = cb_copy;
++	} else {
++		ret = io_iter_do_read(rw, &io->iter);
++	}
+ 
+ 	/*
+ 	 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
+@@ -887,7 +895,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 	} else if (ret == -EIOCBQUEUED) {
+ 		return IOU_ISSUE_SKIP_COMPLETE;
+ 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
+-		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
++		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
++		   (issue_flags & IO_URING_F_MULTISHOT)) {
+ 		/* read all, failed, already did sync or don't want to retry */
+ 		goto done;
+ 	}
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 179848ad33e978..d9d55fa4d01a71 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -103,48 +103,50 @@ struct bsd_acct_struct {
+ 	atomic_long_t		count;
+ 	struct rcu_head		rcu;
+ 	struct mutex		lock;
+-	int			active;
++	bool			active;
++	bool			check_space;
+ 	unsigned long		needcheck;
+ 	struct file		*file;
+ 	struct pid_namespace	*ns;
+ 	struct work_struct	work;
+ 	struct completion	done;
++	acct_t			ac;
+ };
+ 
+-static void do_acct_process(struct bsd_acct_struct *acct);
++static void fill_ac(struct bsd_acct_struct *acct);
++static void acct_write_process(struct bsd_acct_struct *acct);
+ 
+ /*
+  * Check the amount of free space and suspend/resume accordingly.
+  */
+-static int check_free_space(struct bsd_acct_struct *acct)
++static bool check_free_space(struct bsd_acct_struct *acct)
+ {
+ 	struct kstatfs sbuf;
+ 
+-	if (time_is_after_jiffies(acct->needcheck))
+-		goto out;
++	if (!acct->check_space)
++		return acct->active;
+ 
+ 	/* May block */
+ 	if (vfs_statfs(&acct->file->f_path, &sbuf))
+-		goto out;
++		return acct->active;
+ 
+ 	if (acct->active) {
+ 		u64 suspend = sbuf.f_blocks * SUSPEND;
+ 		do_div(suspend, 100);
+ 		if (sbuf.f_bavail <= suspend) {
+-			acct->active = 0;
++			acct->active = false;
+ 			pr_info("Process accounting paused\n");
+ 		}
+ 	} else {
+ 		u64 resume = sbuf.f_blocks * RESUME;
+ 		do_div(resume, 100);
+ 		if (sbuf.f_bavail >= resume) {
+-			acct->active = 1;
++			acct->active = true;
+ 			pr_info("Process accounting resumed\n");
+ 		}
+ 	}
+ 
+ 	acct->needcheck = jiffies + ACCT_TIMEOUT*HZ;
+-out:
+ 	return acct->active;
+ }
+ 
+@@ -189,7 +191,11 @@ static void acct_pin_kill(struct fs_pin *pin)
+ {
+ 	struct bsd_acct_struct *acct = to_acct(pin);
+ 	mutex_lock(&acct->lock);
+-	do_acct_process(acct);
++	/*
++	 * Fill the accounting struct with the exiting task's info
++	 * before punting to the workqueue.
++	 */
++	fill_ac(acct);
+ 	schedule_work(&acct->work);
+ 	wait_for_completion(&acct->done);
+ 	cmpxchg(&acct->ns->bacct, pin, NULL);
+@@ -202,6 +208,9 @@ static void close_work(struct work_struct *work)
+ {
+ 	struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work);
+ 	struct file *file = acct->file;
++
++	/* We were fired by acct_pin_kill() which holds acct->lock. */
++	acct_write_process(acct);
+ 	if (file->f_op->flush)
+ 		file->f_op->flush(file, NULL);
+ 	__fput_sync(file);
+@@ -234,6 +243,20 @@ static int acct_on(struct filename *pathname)
+ 		return -EACCES;
+ 	}
+ 
++	/* Exclude kernel kernel internal filesystems. */
++	if (file_inode(file)->i_sb->s_flags & (SB_NOUSER | SB_KERNMOUNT)) {
++		kfree(acct);
++		filp_close(file, NULL);
++		return -EINVAL;
++	}
++
++	/* Exclude procfs and sysfs. */
++	if (file_inode(file)->i_sb->s_iflags & SB_I_USERNS_VISIBLE) {
++		kfree(acct);
++		filp_close(file, NULL);
++		return -EINVAL;
++	}
++
+ 	if (!(file->f_mode & FMODE_CAN_WRITE)) {
+ 		kfree(acct);
+ 		filp_close(file, NULL);
+@@ -430,13 +453,27 @@ static u32 encode_float(u64 value)
+  *  do_exit() or when switching to a different output file.
+  */
+ 
+-static void fill_ac(acct_t *ac)
++static void fill_ac(struct bsd_acct_struct *acct)
+ {
+ 	struct pacct_struct *pacct = &current->signal->pacct;
++	struct file *file = acct->file;
++	acct_t *ac = &acct->ac;
+ 	u64 elapsed, run_time;
+ 	time64_t btime;
+ 	struct tty_struct *tty;
+ 
++	lockdep_assert_held(&acct->lock);
++
++	if (time_is_after_jiffies(acct->needcheck)) {
++		acct->check_space = false;
++
++		/* Don't fill in @ac if nothing will be written. */
++		if (!acct->active)
++			return;
++	} else {
++		acct->check_space = true;
++	}
++
+ 	/*
+ 	 * Fill the accounting struct with the needed info as recorded
+ 	 * by the different kernel functions.
+@@ -484,64 +521,61 @@ static void fill_ac(acct_t *ac)
+ 	ac->ac_majflt = encode_comp_t(pacct->ac_majflt);
+ 	ac->ac_exitcode = pacct->ac_exitcode;
+ 	spin_unlock_irq(&current->sighand->siglock);
+-}
+-/*
+- *  do_acct_process does all actual work. Caller holds the reference to file.
+- */
+-static void do_acct_process(struct bsd_acct_struct *acct)
+-{
+-	acct_t ac;
+-	unsigned long flim;
+-	const struct cred *orig_cred;
+-	struct file *file = acct->file;
+ 
+-	/*
+-	 * Accounting records are not subject to resource limits.
+-	 */
+-	flim = rlimit(RLIMIT_FSIZE);
+-	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+-	/* Perform file operations on behalf of whoever enabled accounting */
+-	orig_cred = override_creds(file->f_cred);
+-
+-	/*
+-	 * First check to see if there is enough free_space to continue
+-	 * the process accounting system.
+-	 */
+-	if (!check_free_space(acct))
+-		goto out;
+-
+-	fill_ac(&ac);
+ 	/* we really need to bite the bullet and change layout */
+-	ac.ac_uid = from_kuid_munged(file->f_cred->user_ns, orig_cred->uid);
+-	ac.ac_gid = from_kgid_munged(file->f_cred->user_ns, orig_cred->gid);
++	ac->ac_uid = from_kuid_munged(file->f_cred->user_ns, current_uid());
++	ac->ac_gid = from_kgid_munged(file->f_cred->user_ns, current_gid());
+ #if ACCT_VERSION == 1 || ACCT_VERSION == 2
+ 	/* backward-compatible 16 bit fields */
+-	ac.ac_uid16 = ac.ac_uid;
+-	ac.ac_gid16 = ac.ac_gid;
++	ac->ac_uid16 = ac->ac_uid;
++	ac->ac_gid16 = ac->ac_gid;
+ #elif ACCT_VERSION == 3
+ 	{
+ 		struct pid_namespace *ns = acct->ns;
+ 
+-		ac.ac_pid = task_tgid_nr_ns(current, ns);
++		ac->ac_pid = task_tgid_nr_ns(current, ns);
+ 		rcu_read_lock();
+-		ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent),
+-					     ns);
++		ac->ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns);
+ 		rcu_read_unlock();
+ 	}
+ #endif
++}
++
++static void acct_write_process(struct bsd_acct_struct *acct)
++{
++	struct file *file = acct->file;
++	const struct cred *cred;
++	acct_t *ac = &acct->ac;
++
++	/* Perform file operations on behalf of whoever enabled accounting */
++	cred = override_creds(file->f_cred);
++
+ 	/*
+-	 * Get freeze protection. If the fs is frozen, just skip the write
+-	 * as we could deadlock the system otherwise.
++	 * First check to see if there is enough free_space to continue
++	 * the process accounting system. Then get freeze protection. If
++	 * the fs is frozen, just skip the write as we could deadlock
++	 * the system otherwise.
+ 	 */
+-	if (file_start_write_trylock(file)) {
++	if (check_free_space(acct) && file_start_write_trylock(file)) {
+ 		/* it's been opened O_APPEND, so position is irrelevant */
+ 		loff_t pos = 0;
+-		__kernel_write(file, &ac, sizeof(acct_t), &pos);
++		__kernel_write(file, ac, sizeof(acct_t), &pos);
+ 		file_end_write(file);
+ 	}
+-out:
++
++	revert_creds(cred);
++}
++
++static void do_acct_process(struct bsd_acct_struct *acct)
++{
++	unsigned long flim;
++
++	/* Accounting records are not subject to resource limits. */
++	flim = rlimit(RLIMIT_FSIZE);
++	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
++	fill_ac(acct);
++	acct_write_process(acct);
+ 	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
+-	revert_creds(orig_cred);
+ }
+ 
+ /**
+diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
+index 93e48c7cad4eff..8c775a1401d3ed 100644
+--- a/kernel/bpf/arena.c
++++ b/kernel/bpf/arena.c
+@@ -37,7 +37,7 @@
+  */
+ 
+ /* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
+-#define GUARD_SZ (1ull << sizeof_field(struct bpf_insn, off) * 8)
++#define GUARD_SZ round_up(1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1)
+ #define KERN_VM_SZ (SZ_4G + GUARD_SZ)
+ 
+ struct bpf_arena {
+diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c
+index 28efd0a3f2200c..6547fb7ac0dcb2 100644
+--- a/kernel/bpf/bpf_cgrp_storage.c
++++ b/kernel/bpf/bpf_cgrp_storage.c
+@@ -154,7 +154,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
+ 
+ static void cgroup_storage_map_free(struct bpf_map *map)
+ {
+-	bpf_local_storage_map_free(map, &cgroup_cache, NULL);
++	bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy);
+ }
+ 
+ /* *gfp_flags* is a hidden argument provided by the verifier */
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index a44f4be592be79..2c54c148a94f30 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -6483,6 +6483,8 @@ static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
+ 	/* rxrpc */
+ 	{ "rxrpc_recvdata", 0x1 },
+ 	{ "rxrpc_resend", 0x10 },
++	/* skb */
++	{"kfree_skb", 0x1000},
+ 	/* sunrpc */
+ 	{ "xs_stream_read_data", 0x1 },
+ 	/* ... from xprt_cong_event event class */
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index e1cfe890e0be64..1499d8caa9a351 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -268,8 +268,6 @@ static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma
+ 		/* allow writable mapping for the consumer_pos only */
+ 		if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ 			return -EPERM;
+-	} else {
+-		vm_flags_clear(vma, VM_MAYWRITE);
+ 	}
+ 	/* remap_vmalloc_range() checks size and offset constraints */
+ 	return remap_vmalloc_range(vma, rb_map->rb,
+@@ -289,8 +287,6 @@ static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma
+ 			 * position, and the ring buffer data itself.
+ 			 */
+ 			return -EPERM;
+-	} else {
+-		vm_flags_clear(vma, VM_MAYWRITE);
+ 	}
+ 	/* remap_vmalloc_range() checks size and offset constraints */
+ 	return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 368ae8d231d417..696e5a2cbea2e8 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -936,7 +936,7 @@ static const struct vm_operations_struct bpf_map_default_vmops = {
+ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ 	struct bpf_map *map = filp->private_data;
+-	int err;
++	int err = 0;
+ 
+ 	if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
+ 		return -ENOTSUPP;
+@@ -960,24 +960,33 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ 			err = -EACCES;
+ 			goto out;
+ 		}
++		bpf_map_write_active_inc(map);
+ 	}
++out:
++	mutex_unlock(&map->freeze_mutex);
++	if (err)
++		return err;
+ 
+ 	/* set default open/close callbacks */
+ 	vma->vm_ops = &bpf_map_default_vmops;
+ 	vma->vm_private_data = map;
+ 	vm_flags_clear(vma, VM_MAYEXEC);
++	/* If mapping is read-only, then disallow potentially re-mapping with
++	 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
++	 * means that as far as BPF map's memory-mapped VMAs are concerned,
++	 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
++	 * both should be set, so we can forget about VM_MAYWRITE and always
++	 * check just VM_WRITE
++	 */
+ 	if (!(vma->vm_flags & VM_WRITE))
+-		/* disallow re-mapping with PROT_WRITE */
+ 		vm_flags_clear(vma, VM_MAYWRITE);
+ 
+ 	err = map->ops->map_mmap(map, vma);
+-	if (err)
+-		goto out;
++	if (err) {
++		if (vma->vm_flags & VM_WRITE)
++			bpf_map_write_active_dec(map);
++	}
+ 
+-	if (vma->vm_flags & VM_MAYWRITE)
+-		bpf_map_write_active_inc(map);
+-out:
+-	mutex_unlock(&map->freeze_mutex);
+ 	return err;
+ }
+ 
+@@ -1863,8 +1872,6 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
+ 	return err;
+ }
+ 
+-#define MAP_LOOKUP_RETRIES 3
+-
+ int generic_map_lookup_batch(struct bpf_map *map,
+ 				    const union bpf_attr *attr,
+ 				    union bpf_attr __user *uattr)
+@@ -1874,8 +1881,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ 	void __user *values = u64_to_user_ptr(attr->batch.values);
+ 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
+ 	void *buf, *buf_prevkey, *prev_key, *key, *value;
+-	int err, retry = MAP_LOOKUP_RETRIES;
+ 	u32 value_size, cp, max_count;
++	int err;
+ 
+ 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
+ 		return -EINVAL;
+@@ -1921,14 +1928,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ 		err = bpf_map_copy_value(map, key, value,
+ 					 attr->batch.elem_flags);
+ 
+-		if (err == -ENOENT) {
+-			if (retry) {
+-				retry--;
+-				continue;
+-			}
+-			err = -EINTR;
+-			break;
+-		}
++		if (err == -ENOENT)
++			goto next_key;
+ 
+ 		if (err)
+ 			goto free_buf;
+@@ -1943,12 +1944,12 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ 			goto free_buf;
+ 		}
+ 
++		cp++;
++next_key:
+ 		if (!prev_key)
+ 			prev_key = buf_prevkey;
+ 
+ 		swap(prev_key, key);
+-		retry = MAP_LOOKUP_RETRIES;
+-		cp++;
+ 		cond_resched();
+ 	}
+ 
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 689f7e8f69f54d..aa57ae3eb1ff5e 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -2300,12 +2300,35 @@ static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
+  *
+  * - The BPF scheduler is bypassed while the rq is offline and we can always say
+  *   no to the BPF scheduler initiated migrations while offline.
++ *
++ * The caller must ensure that @p and @rq are on different CPUs.
+  */
+ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
+ 				      bool trigger_error)
+ {
+ 	int cpu = cpu_of(rq);
+ 
++	SCHED_WARN_ON(task_cpu(p) == cpu);
++
++	/*
++	 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
++	 * the pinned CPU in migrate_disable_switch() while @p is being switched
++	 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
++	 * updated and thus another CPU may see @p on a DSQ inbetween leading to
++	 * @p passing the below task_allowed_on_cpu() check while migration is
++	 * disabled.
++	 *
++	 * Test the migration disabled state first as the race window is narrow
++	 * and the BPF scheduler failing to check migration disabled state can
++	 * easily be masked if task_allowed_on_cpu() is done first.
++	 */
++	if (unlikely(is_migration_disabled(p))) {
++		if (trigger_error)
++			scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
++				      p->comm, p->pid, task_cpu(p), cpu);
++		return false;
++	}
++
+ 	/*
+ 	 * We don't require the BPF scheduler to avoid dispatching to offline
+ 	 * CPUs mostly for convenience but also because CPUs can go offline
+@@ -2314,14 +2337,11 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
+ 	 */
+ 	if (!task_allowed_on_cpu(p, cpu)) {
+ 		if (trigger_error)
+-			scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
+-				      cpu_of(rq), p->comm, p->pid);
++			scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
++				      cpu, p->comm, p->pid);
+ 		return false;
+ 	}
+ 
+-	if (unlikely(is_migration_disabled(p)))
+-		return false;
+-
+ 	if (!scx_rq_online(rq))
+ 		return false;
+ 
+@@ -2397,6 +2417,74 @@ static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *r
+ static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
+ #endif	/* CONFIG_SMP */
+ 
++/**
++ * move_task_between_dsqs() - Move a task from one DSQ to another
++ * @p: target task
++ * @enq_flags: %SCX_ENQ_*
++ * @src_dsq: DSQ @p is currently on, must not be a local DSQ
++ * @dst_dsq: DSQ @p is being moved to, can be any DSQ
++ *
++ * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
++ * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
++ * will change. As @p's task_rq is locked, this function doesn't need to use the
++ * holding_cpu mechanism.
++ *
++ * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
++ * return value, is locked.
++ */
++static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
++					 struct scx_dispatch_q *src_dsq,
++					 struct scx_dispatch_q *dst_dsq)
++{
++	struct rq *src_rq = task_rq(p), *dst_rq;
++
++	BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
++	lockdep_assert_held(&src_dsq->lock);
++	lockdep_assert_rq_held(src_rq);
++
++	if (dst_dsq->id == SCX_DSQ_LOCAL) {
++		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
++		if (src_rq != dst_rq &&
++		    unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
++			dst_dsq = find_global_dsq(p);
++			dst_rq = src_rq;
++		}
++	} else {
++		/* no need to migrate if destination is a non-local DSQ */
++		dst_rq = src_rq;
++	}
++
++	/*
++	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
++	 * CPU, @p will be migrated.
++	 */
++	if (dst_dsq->id == SCX_DSQ_LOCAL) {
++		/* @p is going from a non-local DSQ to a local DSQ */
++		if (src_rq == dst_rq) {
++			task_unlink_from_dsq(p, src_dsq);
++			move_local_task_to_local_dsq(p, enq_flags,
++						     src_dsq, dst_rq);
++			raw_spin_unlock(&src_dsq->lock);
++		} else {
++			raw_spin_unlock(&src_dsq->lock);
++			move_remote_task_to_local_dsq(p, enq_flags,
++						      src_rq, dst_rq);
++		}
++	} else {
++		/*
++		 * @p is going from a non-local DSQ to a non-local DSQ. As
++		 * $src_dsq is already locked, do an abbreviated dequeue.
++		 */
++		task_unlink_from_dsq(p, src_dsq);
++		p->scx.dsq = NULL;
++		raw_spin_unlock(&src_dsq->lock);
++
++		dispatch_enqueue(dst_dsq, p, enq_flags);
++	}
++
++	return dst_rq;
++}
++
+ static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
+ {
+ 	struct task_struct *p;
+@@ -2474,7 +2562,8 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ 	}
+ 
+ #ifdef CONFIG_SMP
+-	if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
++	if (src_rq != dst_rq &&
++	    unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
+ 		dispatch_enqueue(find_global_dsq(p), p,
+ 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
+ 		return;
+@@ -6134,7 +6223,7 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
+ 				  u64 enq_flags)
+ {
+ 	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
+-	struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
++	struct rq *this_rq, *src_rq, *locked_rq;
+ 	bool dispatched = false;
+ 	bool in_balance;
+ 	unsigned long flags;
+@@ -6180,51 +6269,18 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
+ 	/* @p is still on $src_dsq and stable, determine the destination */
+ 	dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
+ 
+-	if (dst_dsq->id == SCX_DSQ_LOCAL) {
+-		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
+-		if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
+-			dst_dsq = find_global_dsq(p);
+-			dst_rq = src_rq;
+-		}
+-	} else {
+-		/* no need to migrate if destination is a non-local DSQ */
+-		dst_rq = src_rq;
+-	}
+-
+ 	/*
+-	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
+-	 * CPU, @p will be migrated.
++	 * Apply vtime and slice updates before moving so that the new time is
++	 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
++	 * this is safe as we're locking it.
+ 	 */
+-	if (dst_dsq->id == SCX_DSQ_LOCAL) {
+-		/* @p is going from a non-local DSQ to a local DSQ */
+-		if (src_rq == dst_rq) {
+-			task_unlink_from_dsq(p, src_dsq);
+-			move_local_task_to_local_dsq(p, enq_flags,
+-						     src_dsq, dst_rq);
+-			raw_spin_unlock(&src_dsq->lock);
+-		} else {
+-			raw_spin_unlock(&src_dsq->lock);
+-			move_remote_task_to_local_dsq(p, enq_flags,
+-						      src_rq, dst_rq);
+-			locked_rq = dst_rq;
+-		}
+-	} else {
+-		/*
+-		 * @p is going from a non-local DSQ to a non-local DSQ. As
+-		 * $src_dsq is already locked, do an abbreviated dequeue.
+-		 */
+-		task_unlink_from_dsq(p, src_dsq);
+-		p->scx.dsq = NULL;
+-		raw_spin_unlock(&src_dsq->lock);
+-
+-		if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
+-			p->scx.dsq_vtime = kit->vtime;
+-		dispatch_enqueue(dst_dsq, p, enq_flags);
+-	}
+-
++	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
++		p->scx.dsq_vtime = kit->vtime;
+ 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
+ 		p->scx.slice = kit->slice;
+ 
++	/* execute move */
++	locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
+ 	dispatched = true;
+ out:
+ 	if (in_balance) {
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index cd9dbfb3038330..71cc1bbfe9aa3e 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3219,15 +3219,22 @@ static struct ftrace_hash *copy_hash(struct ftrace_hash *src)
+  *  The filter_hash updates uses just the append_hash() function
+  *  and the notrace_hash does not.
+  */
+-static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash)
++static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash,
++		       int size_bits)
+ {
+ 	struct ftrace_func_entry *entry;
+ 	int size;
+ 	int i;
+ 
+-	/* An empty hash does everything */
+-	if (ftrace_hash_empty(*hash))
+-		return 0;
++	if (*hash) {
++		/* An empty hash does everything */
++		if (ftrace_hash_empty(*hash))
++			return 0;
++	} else {
++		*hash = alloc_ftrace_hash(size_bits);
++		if (!*hash)
++			return -ENOMEM;
++	}
+ 
+ 	/* If new_hash has everything make hash have everything */
+ 	if (ftrace_hash_empty(new_hash)) {
+@@ -3291,16 +3298,18 @@ static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_has
+ /* Return a new hash that has a union of all @ops->filter_hash entries */
+ static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
+ {
+-	struct ftrace_hash *new_hash;
++	struct ftrace_hash *new_hash = NULL;
+ 	struct ftrace_ops *subops;
++	int size_bits;
+ 	int ret;
+ 
+-	new_hash = alloc_ftrace_hash(ops->func_hash->filter_hash->size_bits);
+-	if (!new_hash)
+-		return NULL;
++	if (ops->func_hash->filter_hash)
++		size_bits = ops->func_hash->filter_hash->size_bits;
++	else
++		size_bits = FTRACE_HASH_DEFAULT_BITS;
+ 
+ 	list_for_each_entry(subops, &ops->subop_list, list) {
+-		ret = append_hash(&new_hash, subops->func_hash->filter_hash);
++		ret = append_hash(&new_hash, subops->func_hash->filter_hash, size_bits);
+ 		if (ret < 0) {
+ 			free_ftrace_hash(new_hash);
+ 			return NULL;
+@@ -3309,7 +3318,8 @@ static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
+ 		if (ftrace_hash_empty(new_hash))
+ 			break;
+ 	}
+-	return new_hash;
++	/* Can't return NULL as that means this failed */
++	return new_hash ? : EMPTY_HASH;
+ }
+ 
+ /* Make @ops trace evenything except what all its subops do not trace */
+@@ -3504,7 +3514,8 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
+ 		filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash);
+ 		if (!filter_hash)
+ 			return -ENOMEM;
+-		ret = append_hash(&filter_hash, subops->func_hash->filter_hash);
++		ret = append_hash(&filter_hash, subops->func_hash->filter_hash,
++				  size_bits);
+ 		if (ret < 0) {
+ 			free_ftrace_hash(filter_hash);
+ 			return ret;
+@@ -5747,6 +5758,9 @@ __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
+ 			return -ENOENT;
+ 		free_hash_entry(hash, entry);
+ 		return 0;
++	} else if (__ftrace_lookup_ip(hash, ip) != NULL) {
++		/* Already exists */
++		return 0;
+ 	}
+ 
+ 	entry = add_hash_entry(hash, ip);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index bfc4ac265c2c33..ffe1422ab03f88 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -26,6 +26,7 @@
+ #include <linux/hardirq.h>
+ #include <linux/linkage.h>
+ #include <linux/uaccess.h>
++#include <linux/cleanup.h>
+ #include <linux/vmalloc.h>
+ #include <linux/ftrace.h>
+ #include <linux/module.h>
+@@ -535,19 +536,16 @@ LIST_HEAD(ftrace_trace_arrays);
+ int trace_array_get(struct trace_array *this_tr)
+ {
+ 	struct trace_array *tr;
+-	int ret = -ENODEV;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ 		if (tr == this_tr) {
+ 			tr->ref++;
+-			ret = 0;
+-			break;
++			return 0;
+ 		}
+ 	}
+-	mutex_unlock(&trace_types_lock);
+ 
+-	return ret;
++	return -ENODEV;
+ }
+ 
+ static void __trace_array_put(struct trace_array *this_tr)
+@@ -1456,22 +1454,20 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
+ 				 cond_update_fn_t update)
+ {
+-	struct cond_snapshot *cond_snapshot;
+-	int ret = 0;
++	struct cond_snapshot *cond_snapshot __free(kfree) =
++		kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
++	int ret;
+ 
+-	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
+ 	if (!cond_snapshot)
+ 		return -ENOMEM;
+ 
+ 	cond_snapshot->cond_data = cond_data;
+ 	cond_snapshot->update = update;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+-	if (tr->current_trace->use_max_tr) {
+-		ret = -EBUSY;
+-		goto fail_unlock;
+-	}
++	if (tr->current_trace->use_max_tr)
++		return -EBUSY;
+ 
+ 	/*
+ 	 * The cond_snapshot can only change to NULL without the
+@@ -1481,29 +1477,20 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
+ 	 * do safely with only holding the trace_types_lock and not
+ 	 * having to take the max_lock.
+ 	 */
+-	if (tr->cond_snapshot) {
+-		ret = -EBUSY;
+-		goto fail_unlock;
+-	}
++	if (tr->cond_snapshot)
++		return -EBUSY;
+ 
+ 	ret = tracing_arm_snapshot_locked(tr);
+ 	if (ret)
+-		goto fail_unlock;
++		return ret;
+ 
+ 	local_irq_disable();
+ 	arch_spin_lock(&tr->max_lock);
+-	tr->cond_snapshot = cond_snapshot;
++	tr->cond_snapshot = no_free_ptr(cond_snapshot);
+ 	arch_spin_unlock(&tr->max_lock);
+ 	local_irq_enable();
+ 
+-	mutex_unlock(&trace_types_lock);
+-
+-	return ret;
+-
+- fail_unlock:
+-	mutex_unlock(&trace_types_lock);
+-	kfree(cond_snapshot);
+-	return ret;
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
+ 
+@@ -2216,10 +2203,10 @@ static __init int init_trace_selftests(void)
+ 
+ 	selftests_can_run = true;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	if (list_empty(&postponed_selftests))
+-		goto out;
++		return 0;
+ 
+ 	pr_info("Running postponed tracer tests:\n");
+ 
+@@ -2248,9 +2235,6 @@ static __init int init_trace_selftests(void)
+ 	}
+ 	tracing_selftest_running = false;
+ 
+- out:
+-	mutex_unlock(&trace_types_lock);
+-
+ 	return 0;
+ }
+ core_initcall(init_trace_selftests);
+@@ -2818,7 +2802,7 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
+ 	int save_tracepoint_printk;
+ 	int ret;
+ 
+-	mutex_lock(&tracepoint_printk_mutex);
++	guard(mutex)(&tracepoint_printk_mutex);
+ 	save_tracepoint_printk = tracepoint_printk;
+ 
+ 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+@@ -2831,16 +2815,13 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
+ 		tracepoint_printk = 0;
+ 
+ 	if (save_tracepoint_printk == tracepoint_printk)
+-		goto out;
++		return ret;
+ 
+ 	if (tracepoint_printk)
+ 		static_key_enable(&tracepoint_printk_key.key);
+ 	else
+ 		static_key_disable(&tracepoint_printk_key.key);
+ 
+- out:
+-	mutex_unlock(&tracepoint_printk_mutex);
+-
+ 	return ret;
+ }
+ 
+@@ -5150,7 +5131,8 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
+ 	u32 tracer_flags;
+ 	int i;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
++
+ 	tracer_flags = tr->current_trace->flags->val;
+ 	trace_opts = tr->current_trace->flags->opts;
+ 
+@@ -5167,7 +5149,6 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
+ 		else
+ 			seq_printf(m, "no%s\n", trace_opts[i].name);
+ 	}
+-	mutex_unlock(&trace_types_lock);
+ 
+ 	return 0;
+ }
+@@ -5832,7 +5813,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
+ 		return;
+ 	}
+ 
+-	mutex_lock(&trace_eval_mutex);
++	guard(mutex)(&trace_eval_mutex);
+ 
+ 	if (!trace_eval_maps)
+ 		trace_eval_maps = map_array;
+@@ -5856,8 +5837,6 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
+ 		map_array++;
+ 	}
+ 	memset(map_array, 0, sizeof(*map_array));
+-
+-	mutex_unlock(&trace_eval_mutex);
+ }
+ 
+ static void trace_create_eval_file(struct dentry *d_tracer)
+@@ -6019,26 +5998,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+ 				  unsigned long size, int cpu_id)
+ {
+-	int ret;
+-
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
+ 		/* make sure, this cpu is enabled in the mask */
+-		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
++		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
++			return -EINVAL;
+ 	}
+ 
+-	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
+-	if (ret < 0)
+-		ret = -ENOMEM;
+-
+-out:
+-	mutex_unlock(&trace_types_lock);
+-
+-	return ret;
++	return __tracing_resize_ring_buffer(tr, size, cpu_id);
+ }
+ 
+ static void update_last_data(struct trace_array *tr)
+@@ -6129,9 +6097,9 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ #ifdef CONFIG_TRACER_MAX_TRACE
+ 	bool had_max_tr;
+ #endif
+-	int ret = 0;
++	int ret;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	update_last_data(tr);
+ 
+@@ -6139,7 +6107,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
+ 						RING_BUFFER_ALL_CPUS);
+ 		if (ret < 0)
+-			goto out;
++			return ret;
+ 		ret = 0;
+ 	}
+ 
+@@ -6147,43 +6115,37 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 		if (strcmp(t->name, buf) == 0)
+ 			break;
+ 	}
+-	if (!t) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
++	if (!t)
++		return -EINVAL;
++
+ 	if (t == tr->current_trace)
+-		goto out;
++		return 0;
+ 
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ 	if (t->use_max_tr) {
+ 		local_irq_disable();
+ 		arch_spin_lock(&tr->max_lock);
+-		if (tr->cond_snapshot)
+-			ret = -EBUSY;
++		ret = tr->cond_snapshot ? -EBUSY : 0;
+ 		arch_spin_unlock(&tr->max_lock);
+ 		local_irq_enable();
+ 		if (ret)
+-			goto out;
++			return ret;
+ 	}
+ #endif
+ 	/* Some tracers won't work on kernel command line */
+ 	if (system_state < SYSTEM_RUNNING && t->noboot) {
+ 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
+ 			t->name);
+-		goto out;
++		return 0;
+ 	}
+ 
+ 	/* Some tracers are only allowed for the top level buffer */
+-	if (!trace_ok_for_array(t, tr)) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
++	if (!trace_ok_for_array(t, tr))
++		return -EINVAL;
+ 
+ 	/* If trace pipe files are being read, we can't change the tracer */
+-	if (tr->trace_ref) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
++	if (tr->trace_ref)
++		return -EBUSY;
+ 
+ 	trace_branch_disable();
+ 
+@@ -6214,7 +6176,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 	if (!had_max_tr && t->use_max_tr) {
+ 		ret = tracing_arm_snapshot_locked(tr);
+ 		if (ret)
+-			goto out;
++			return ret;
+ 	}
+ #else
+ 	tr->current_trace = &nop_trace;
+@@ -6227,17 +6189,15 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 			if (t->use_max_tr)
+ 				tracing_disarm_snapshot(tr);
+ #endif
+-			goto out;
++			return ret;
+ 		}
+ 	}
+ 
+ 	tr->current_trace = t;
+ 	tr->current_trace->enabled++;
+ 	trace_branch_enable(tr);
+- out:
+-	mutex_unlock(&trace_types_lock);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static ssize_t
+@@ -6315,22 +6275,18 @@ tracing_thresh_write(struct file *filp, const char __user *ubuf,
+ 	struct trace_array *tr = filp->private_data;
+ 	int ret;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
+ 	if (ret < 0)
+-		goto out;
++		return ret;
+ 
+ 	if (tr->current_trace->update_thresh) {
+ 		ret = tr->current_trace->update_thresh(tr);
+ 		if (ret < 0)
+-			goto out;
++			return ret;
+ 	}
+ 
+-	ret = cnt;
+-out:
+-	mutex_unlock(&trace_types_lock);
+-
+-	return ret;
++	return cnt;
+ }
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+@@ -6549,31 +6505,29 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+ 	 * This is just a matter of traces coherency, the ring buffer itself
+ 	 * is protected.
+ 	 */
+-	mutex_lock(&iter->mutex);
++	guard(mutex)(&iter->mutex);
+ 
+ 	/* return any leftover data */
+ 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+ 	if (sret != -EBUSY)
+-		goto out;
++		return sret;
+ 
+ 	trace_seq_init(&iter->seq);
+ 
+ 	if (iter->trace->read) {
+ 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ 		if (sret)
+-			goto out;
++			return sret;
+ 	}
+ 
+ waitagain:
+ 	sret = tracing_wait_pipe(filp);
+ 	if (sret <= 0)
+-		goto out;
++		return sret;
+ 
+ 	/* stop when tracing is finished */
+-	if (trace_empty(iter)) {
+-		sret = 0;
+-		goto out;
+-	}
++	if (trace_empty(iter))
++		return 0;
+ 
+ 	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
+ 		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
+@@ -6637,9 +6591,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+ 	if (sret == -EBUSY)
+ 		goto waitagain;
+ 
+-out:
+-	mutex_unlock(&iter->mutex);
+-
+ 	return sret;
+ }
+ 
+@@ -7231,25 +7182,19 @@ u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_eve
+  */
+ int tracing_set_filter_buffering(struct trace_array *tr, bool set)
+ {
+-	int ret = 0;
+-
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	if (set && tr->no_filter_buffering_ref++)
+-		goto out;
++		return 0;
+ 
+ 	if (!set) {
+-		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
++		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
++			return -EINVAL;
+ 
+ 		--tr->no_filter_buffering_ref;
+ 	}
+- out:
+-	mutex_unlock(&trace_types_lock);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ struct ftrace_buffer_info {
+@@ -7325,12 +7270,10 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+-	if (tr->current_trace->use_max_tr) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
++	if (tr->current_trace->use_max_tr)
++		return -EBUSY;
+ 
+ 	local_irq_disable();
+ 	arch_spin_lock(&tr->max_lock);
+@@ -7339,24 +7282,20 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 	arch_spin_unlock(&tr->max_lock);
+ 	local_irq_enable();
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	switch (val) {
+ 	case 0:
+-		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
+-			ret = -EINVAL;
+-			break;
+-		}
++		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
++			return -EINVAL;
+ 		if (tr->allocated_snapshot)
+ 			free_snapshot(tr);
+ 		break;
+ 	case 1:
+ /* Only allow per-cpu swap if the ring buffer supports it */
+ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
+-		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
+-			ret = -EINVAL;
+-			break;
+-		}
++		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
++			return -EINVAL;
+ #endif
+ 		if (tr->allocated_snapshot)
+ 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
+@@ -7364,7 +7303,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 
+ 		ret = tracing_arm_snapshot_locked(tr);
+ 		if (ret)
+-			break;
++			return ret;
+ 
+ 		/* Now, we're going to swap */
+ 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+@@ -7391,8 +7330,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 		*ppos += cnt;
+ 		ret = cnt;
+ 	}
+-out:
+-	mutex_unlock(&trace_types_lock);
++
+ 	return ret;
+ }
+ 
+@@ -7778,12 +7716,11 @@ void tracing_log_err(struct trace_array *tr,
+ 
+ 	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
+ 
+-	mutex_lock(&tracing_err_log_lock);
++	guard(mutex)(&tracing_err_log_lock);
++
+ 	err = get_tracing_log_err(tr, len);
+-	if (PTR_ERR(err) == -ENOMEM) {
+-		mutex_unlock(&tracing_err_log_lock);
++	if (PTR_ERR(err) == -ENOMEM)
+ 		return;
+-	}
+ 
+ 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
+ 	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
+@@ -7794,7 +7731,6 @@ void tracing_log_err(struct trace_array *tr,
+ 	err->info.ts = local_clock();
+ 
+ 	list_add_tail(&err->list, &tr->err_log);
+-	mutex_unlock(&tracing_err_log_lock);
+ }
+ 
+ static void clear_tracing_err_log(struct trace_array *tr)
+@@ -9535,20 +9471,17 @@ static int instance_mkdir(const char *name)
+ 	struct trace_array *tr;
+ 	int ret;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	ret = -EEXIST;
+ 	if (trace_array_find(name))
+-		goto out_unlock;
++		return -EEXIST;
+ 
+ 	tr = trace_array_create(name);
+ 
+ 	ret = PTR_ERR_OR_ZERO(tr);
+ 
+-out_unlock:
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
+ 	return ret;
+ }
+ 
+@@ -9598,24 +9531,23 @@ struct trace_array *trace_array_get_by_name(const char *name, const char *system
+ {
+ 	struct trace_array *tr;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+-		if (tr->name && strcmp(tr->name, name) == 0)
+-			goto out_unlock;
++		if (tr->name && strcmp(tr->name, name) == 0) {
++			tr->ref++;
++			return tr;
++		}
+ 	}
+ 
+ 	tr = trace_array_create_systems(name, systems, 0, 0);
+ 
+ 	if (IS_ERR(tr))
+ 		tr = NULL;
+-out_unlock:
+-	if (tr)
++	else
+ 		tr->ref++;
+ 
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
+ 	return tr;
+ }
+ EXPORT_SYMBOL_GPL(trace_array_get_by_name);
+@@ -9666,48 +9598,36 @@ static int __remove_instance(struct trace_array *tr)
+ int trace_array_destroy(struct trace_array *this_tr)
+ {
+ 	struct trace_array *tr;
+-	int ret;
+ 
+ 	if (!this_tr)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+-	ret = -ENODEV;
+ 
+ 	/* Making sure trace array exists before destroying it. */
+ 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+-		if (tr == this_tr) {
+-			ret = __remove_instance(tr);
+-			break;
+-		}
++		if (tr == this_tr)
++			return __remove_instance(tr);
+ 	}
+ 
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
+-
+-	return ret;
++	return -ENODEV;
+ }
+ EXPORT_SYMBOL_GPL(trace_array_destroy);
+ 
+ static int instance_rmdir(const char *name)
+ {
+ 	struct trace_array *tr;
+-	int ret;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+-	ret = -ENODEV;
+ 	tr = trace_array_find(name);
+-	if (tr)
+-		ret = __remove_instance(tr);
+-
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
++	if (!tr)
++		return -ENODEV;
+ 
+-	return ret;
++	return __remove_instance(tr);
+ }
+ 
+ static __init void create_trace_instances(struct dentry *d_tracer)
+@@ -9720,19 +9640,16 @@ static __init void create_trace_instances(struct dentry *d_tracer)
+ 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
+ 		return;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ 		if (!tr->name)
+ 			continue;
+ 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
+ 			     "Failed to create instance directory\n"))
+-			break;
++			return;
+ 	}
+-
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
+ }
+ 
+ static void
+@@ -9946,7 +9863,7 @@ static void trace_module_remove_evals(struct module *mod)
+ 	if (!mod->num_trace_evals)
+ 		return;
+ 
+-	mutex_lock(&trace_eval_mutex);
++	guard(mutex)(&trace_eval_mutex);
+ 
+ 	map = trace_eval_maps;
+ 
+@@ -9958,12 +9875,10 @@ static void trace_module_remove_evals(struct module *mod)
+ 		map = map->tail.next;
+ 	}
+ 	if (!map)
+-		goto out;
++		return;
+ 
+ 	*last = trace_eval_jmp_to_tail(map)->tail.next;
+ 	kfree(map);
+- out:
+-	mutex_unlock(&trace_eval_mutex);
+ }
+ #else
+ static inline void trace_module_remove_evals(struct module *mod) { }
+diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
+index 3b0cea37e0297b..fbbc3c719d2f68 100644
+--- a/kernel/trace/trace_functions.c
++++ b/kernel/trace/trace_functions.c
+@@ -193,7 +193,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
+ 	if (bit < 0)
+ 		return;
+ 
+-	trace_ctx = tracing_gen_ctx();
++	trace_ctx = tracing_gen_ctx_dec();
+ 
+ 	cpu = smp_processor_id();
+ 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
+@@ -298,7 +298,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+ 	struct trace_array *tr = op->private;
+ 	struct trace_array_cpu *data;
+ 	unsigned int trace_ctx;
+-	unsigned long flags;
+ 	int bit;
+ 	int cpu;
+ 
+@@ -325,8 +324,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+ 	if (is_repeat_check(tr, last_info, ip, parent_ip))
+ 		goto out;
+ 
+-	local_save_flags(flags);
+-	trace_ctx = tracing_gen_ctx_flags(flags);
++	trace_ctx = tracing_gen_ctx_dec();
+ 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
+ 
+ 	trace_function(tr, ip, parent_ip, trace_ctx);
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 908e75a28d90bd..bdb37d572e97ca 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -1428,6 +1428,8 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
+ 	struct iovec *iov = *iovp;
+ 	ssize_t ret;
+ 
++	*iovp = NULL;
++
+ 	if (compat)
+ 		ret = copy_compat_iovec_from_user(iov, uvec, 1);
+ 	else
+@@ -1438,7 +1440,6 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
+ 	ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
+ 	if (unlikely(ret))
+ 		return ret;
+-	*iovp = NULL;
+ 	return i->count;
+ }
+ 
+diff --git a/mm/madvise.c b/mm/madvise.c
+index ff139e57cca292..c211e8fa4e49bb 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -920,7 +920,16 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
+ 			 */
+ 			end = vma->vm_end;
+ 		}
+-		VM_WARN_ON(start >= end);
++		/*
++		 * If the memory region between start and end was
++		 * originally backed by 4kB pages and then remapped to
++		 * be backed by hugepages while mmap_lock was dropped,
++		 * the adjustment for hugetlb vma above may have rounded
++		 * end down to the start address.
++		 */
++		if (start == end)
++			return 0;
++		VM_WARN_ON(start > end);
+ 	}
+ 
+ 	if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
+diff --git a/mm/migrate_device.c b/mm/migrate_device.c
+index 9cf26592ac934d..5bd888223cc8b8 100644
+--- a/mm/migrate_device.c
++++ b/mm/migrate_device.c
+@@ -840,20 +840,15 @@ void migrate_device_finalize(unsigned long *src_pfns,
+ 			dst = src;
+ 		}
+ 
++		if (!folio_is_zone_device(dst))
++			folio_add_lru(dst);
+ 		remove_migration_ptes(src, dst, 0);
+ 		folio_unlock(src);
+-
+-		if (folio_is_zone_device(src))
+-			folio_put(src);
+-		else
+-			folio_putback_lru(src);
++		folio_put(src);
+ 
+ 		if (dst != src) {
+ 			folio_unlock(dst);
+-			if (folio_is_zone_device(dst))
+-				folio_put(dst);
+-			else
+-				folio_putback_lru(dst);
++			folio_put(dst);
+ 		}
+ 	}
+ }
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index 501ec4249fedc3..8612023bec60dc 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -660,12 +660,9 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
+ 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
+ 	void *data;
+ 
+-	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
++	if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	if (user_size > size)
+-		return ERR_PTR(-EMSGSIZE);
+-
+ 	size = SKB_DATA_ALIGN(size);
+ 	data = kzalloc(size + headroom + tailroom, GFP_USER);
+ 	if (!data)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2e0fe38d0e877d..c761f862bc5a2d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1012,6 +1012,12 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
+ 	return ret;
+ }
+ 
++static bool dev_addr_cmp(struct net_device *dev, unsigned short type,
++			 const char *ha)
++{
++	return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len);
++}
++
+ /**
+  *	dev_getbyhwaddr_rcu - find a device by its hardware address
+  *	@net: the applicable net namespace
+@@ -1020,7 +1026,7 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
+  *
+  *	Search for an interface by MAC address. Returns NULL if the device
+  *	is not found or a pointer to the device.
+- *	The caller must hold RCU or RTNL.
++ *	The caller must hold RCU.
+  *	The returned device has not had its ref count increased
+  *	and the caller must therefore be careful about locking
+  *
+@@ -1032,14 +1038,39 @@ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+ 	struct net_device *dev;
+ 
+ 	for_each_netdev_rcu(net, dev)
+-		if (dev->type == type &&
+-		    !memcmp(dev->dev_addr, ha, dev->addr_len))
++		if (dev_addr_cmp(dev, type, ha))
+ 			return dev;
+ 
+ 	return NULL;
+ }
+ EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
+ 
++/**
++ * dev_getbyhwaddr() - find a device by its hardware address
++ * @net: the applicable net namespace
++ * @type: media type of device
++ * @ha: hardware address
++ *
++ * Similar to dev_getbyhwaddr_rcu(), but the owner needs to hold
++ * rtnl_lock.
++ *
++ * Context: rtnl_lock() must be held.
++ * Return: pointer to the net_device, or NULL if not found
++ */
++struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
++				   const char *ha)
++{
++	struct net_device *dev;
++
++	ASSERT_RTNL();
++	for_each_netdev(net, dev)
++		if (dev_addr_cmp(dev, type, ha))
++			return dev;
++
++	return NULL;
++}
++EXPORT_SYMBOL(dev_getbyhwaddr);
++
+ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
+ {
+ 	struct net_device *dev, *ret = NULL;
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 6efd4cccc9ddd2..212f0a048cab68 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -1734,30 +1734,30 @@ static int __init init_net_drop_monitor(void)
+ 		return -ENOSPC;
+ 	}
+ 
+-	rc = genl_register_family(&net_drop_monitor_family);
+-	if (rc) {
+-		pr_err("Could not create drop monitor netlink family\n");
+-		return rc;
++	for_each_possible_cpu(cpu) {
++		net_dm_cpu_data_init(cpu);
++		net_dm_hw_cpu_data_init(cpu);
+ 	}
+-	WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
+ 
+ 	rc = register_netdevice_notifier(&dropmon_net_notifier);
+ 	if (rc < 0) {
+ 		pr_crit("Failed to register netdevice notifier\n");
++		return rc;
++	}
++
++	rc = genl_register_family(&net_drop_monitor_family);
++	if (rc) {
++		pr_err("Could not create drop monitor netlink family\n");
+ 		goto out_unreg;
+ 	}
++	WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
+ 
+ 	rc = 0;
+ 
+-	for_each_possible_cpu(cpu) {
+-		net_dm_cpu_data_init(cpu);
+-		net_dm_hw_cpu_data_init(cpu);
+-	}
+-
+ 	goto out;
+ 
+ out_unreg:
+-	genl_unregister_family(&net_drop_monitor_family);
++	WARN_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+ out:
+ 	return rc;
+ }
+@@ -1766,19 +1766,18 @@ static void exit_net_drop_monitor(void)
+ {
+ 	int cpu;
+ 
+-	BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+-
+ 	/*
+ 	 * Because of the module_get/put we do in the trace state change path
+ 	 * we are guaranteed not to have any current users when we get here
+ 	 */
++	BUG_ON(genl_unregister_family(&net_drop_monitor_family));
++
++	BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+ 
+ 	for_each_possible_cpu(cpu) {
+ 		net_dm_hw_cpu_data_fini(cpu);
+ 		net_dm_cpu_data_fini(cpu);
+ 	}
+-
+-	BUG_ON(genl_unregister_family(&net_drop_monitor_family));
+ }
+ 
+ module_init(init_net_drop_monitor);
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 5db41bf2ed93e0..9cd8de6bebb543 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -853,23 +853,30 @@ __skb_flow_dissect_ports(const struct sk_buff *skb,
+ 			 void *target_container, const void *data,
+ 			 int nhoff, u8 ip_proto, int hlen)
+ {
+-	enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
+-	struct flow_dissector_key_ports *key_ports;
++	struct flow_dissector_key_ports_range *key_ports_range = NULL;
++	struct flow_dissector_key_ports *key_ports = NULL;
++	__be32 ports;
+ 
+ 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
+-		dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
+-	else if (dissector_uses_key(flow_dissector,
+-				    FLOW_DISSECTOR_KEY_PORTS_RANGE))
+-		dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
++		key_ports = skb_flow_dissector_target(flow_dissector,
++						      FLOW_DISSECTOR_KEY_PORTS,
++						      target_container);
+ 
+-	if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
++	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS_RANGE))
++		key_ports_range = skb_flow_dissector_target(flow_dissector,
++							    FLOW_DISSECTOR_KEY_PORTS_RANGE,
++							    target_container);
++
++	if (!key_ports && !key_ports_range)
+ 		return;
+ 
+-	key_ports = skb_flow_dissector_target(flow_dissector,
+-					      dissector_ports,
+-					      target_container);
+-	key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
+-						data, hlen);
++	ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen);
++
++	if (key_ports)
++		key_ports->ports = ports;
++
++	if (key_ports_range)
++		key_ports_range->tp.ports = ports;
+ }
+ 
+ static void
+@@ -924,6 +931,7 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+ 				     struct flow_dissector *flow_dissector,
+ 				     void *target_container)
+ {
++	struct flow_dissector_key_ports_range *key_ports_range = NULL;
+ 	struct flow_dissector_key_ports *key_ports = NULL;
+ 	struct flow_dissector_key_control *key_control;
+ 	struct flow_dissector_key_basic *key_basic;
+@@ -968,20 +976,21 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+ 		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ 	}
+ 
+-	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
++	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ 		key_ports = skb_flow_dissector_target(flow_dissector,
+ 						      FLOW_DISSECTOR_KEY_PORTS,
+ 						      target_container);
+-	else if (dissector_uses_key(flow_dissector,
+-				    FLOW_DISSECTOR_KEY_PORTS_RANGE))
+-		key_ports = skb_flow_dissector_target(flow_dissector,
+-						      FLOW_DISSECTOR_KEY_PORTS_RANGE,
+-						      target_container);
+-
+-	if (key_ports) {
+ 		key_ports->src = flow_keys->sport;
+ 		key_ports->dst = flow_keys->dport;
+ 	}
++	if (dissector_uses_key(flow_dissector,
++			       FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
++		key_ports_range = skb_flow_dissector_target(flow_dissector,
++							    FLOW_DISSECTOR_KEY_PORTS_RANGE,
++							    target_container);
++		key_ports_range->tp.src = flow_keys->sport;
++		key_ports_range->tp.dst = flow_keys->dport;
++	}
+ 
+ 	if (dissector_uses_key(flow_dissector,
+ 			       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+diff --git a/net/core/gro.c b/net/core/gro.c
+index d1f44084e978fb..78b320b6317445 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -7,9 +7,6 @@
+ 
+ #define MAX_GRO_SKBS 8
+ 
+-/* This should be increased if a protocol with a bigger head is added. */
+-#define GRO_MAX_HEAD (MAX_HEADER + 128)
+-
+ static DEFINE_SPINLOCK(offload_lock);
+ 
+ /**
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 74149dc4ee318d..61a950f13a91c7 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -69,6 +69,7 @@
+ #include <net/dst.h>
+ #include <net/sock.h>
+ #include <net/checksum.h>
++#include <net/gro.h>
+ #include <net/gso.h>
+ #include <net/hotdata.h>
+ #include <net/ip6_checksum.h>
+@@ -95,7 +96,9 @@
+ static struct kmem_cache *skbuff_ext_cache __ro_after_init;
+ #endif
+ 
+-#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
++#define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN)
++#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \
++					       GRO_MAX_HEAD_PAD))
+ 
+ /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
+  * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
+@@ -736,7 +739,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
+ 	/* If requested length is either too small or too big,
+ 	 * we use kmalloc() for skb->head allocation.
+ 	 */
+-	if (len <= SKB_WITH_OVERHEAD(1024) ||
++	if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
+ 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
+ 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
+ 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+@@ -816,7 +819,8 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
+ 	 * When the small frag allocator is available, prefer it over kmalloc
+ 	 * for small fragments
+ 	 */
+-	if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
++	if ((!NAPI_HAS_SMALL_PAGE_FRAG &&
++	     len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)) ||
+ 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
+ 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
+ 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 8ad7e6755fd642..f76cbf49c68c8d 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -548,6 +548,9 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
+ 			return num_sge;
+ 	}
+ 
++#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
++	psock->ingress_bytes += len;
++#endif
+ 	copied = len;
+ 	msg->sg.start = 0;
+ 	msg->sg.size = copied;
+@@ -1143,6 +1146,10 @@ int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
+ 	if (!ret)
+ 		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
+ 
++	if (sk_is_tcp(sk)) {
++		psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
++		psock->copied_seq = tcp_sk(sk)->copied_seq;
++	}
+ 	return ret;
+ }
+ 
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index f1b9b3958792cd..82a14f131d00c6 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -303,7 +303,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
+ 
+ 	write_lock_bh(&sk->sk_callback_lock);
+ 	if (stream_parser && stream_verdict && !psock->saved_data_ready) {
+-		ret = sk_psock_init_strp(sk, psock);
++		if (sk_is_tcp(sk))
++			ret = sk_psock_init_strp(sk, psock);
++		else
++			ret = -EOPNOTSUPP;
+ 		if (ret) {
+ 			write_unlock_bh(&sk->sk_callback_lock);
+ 			sk_psock_put(sk, psock);
+@@ -541,6 +544,9 @@ static bool sock_map_sk_state_allowed(const struct sock *sk)
+ 		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
+ 	if (sk_is_stream_unix(sk))
+ 		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
++	if (sk_is_vsock(sk) &&
++	    (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET))
++		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
+ 	return true;
+ }
+ 
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 59ffaa89d7b05f..8fb48f42581ce1 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -1077,7 +1077,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
+ 	__be32 mask = ((struct sockaddr_in *)&r->arp_netmask)->sin_addr.s_addr;
+ 
+ 	if (!dev && (r->arp_flags & ATF_COM)) {
+-		dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family,
++		dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
+ 				      r->arp_ha.sa_data);
+ 		if (!dev)
+ 			return -ENODEV;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 4f77bd862e957f..68cb6a966b18b8 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1564,12 +1564,13 @@ EXPORT_SYMBOL(tcp_recv_skb);
+  *	  or for 'peeking' the socket using this routine
+  *	  (although both would be easy to implement).
+  */
+-int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+-		  sk_read_actor_t recv_actor)
++static int __tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
++			   sk_read_actor_t recv_actor, bool noack,
++			   u32 *copied_seq)
+ {
+ 	struct sk_buff *skb;
+ 	struct tcp_sock *tp = tcp_sk(sk);
+-	u32 seq = tp->copied_seq;
++	u32 seq = *copied_seq;
+ 	u32 offset;
+ 	int copied = 0;
+ 
+@@ -1623,9 +1624,12 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ 		tcp_eat_recv_skb(sk, skb);
+ 		if (!desc->count)
+ 			break;
+-		WRITE_ONCE(tp->copied_seq, seq);
++		WRITE_ONCE(*copied_seq, seq);
+ 	}
+-	WRITE_ONCE(tp->copied_seq, seq);
++	WRITE_ONCE(*copied_seq, seq);
++
++	if (noack)
++		goto out;
+ 
+ 	tcp_rcv_space_adjust(sk);
+ 
+@@ -1634,10 +1638,25 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+ 		tcp_recv_skb(sk, seq, &offset);
+ 		tcp_cleanup_rbuf(sk, copied);
+ 	}
++out:
+ 	return copied;
+ }
++
++int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
++		  sk_read_actor_t recv_actor)
++{
++	return __tcp_read_sock(sk, desc, recv_actor, false,
++			       &tcp_sk(sk)->copied_seq);
++}
+ EXPORT_SYMBOL(tcp_read_sock);
+ 
++int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
++			sk_read_actor_t recv_actor, bool noack,
++			u32 *copied_seq)
++{
++	return __tcp_read_sock(sk, desc, recv_actor, noack, copied_seq);
++}
++
+ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+ 	struct sk_buff *skb;
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 392678ae80f4ed..22e8a2af5dd8b0 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -646,6 +646,42 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops)
+ 	       ops->sendmsg  == tcp_sendmsg ? 0 : -ENOTSUPP;
+ }
+ 
++#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
++int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
++			   sk_read_actor_t recv_actor)
++{
++	struct sock *sk = strp->sk;
++	struct sk_psock *psock;
++	struct tcp_sock *tp;
++	int copied = 0;
++
++	tp = tcp_sk(sk);
++	rcu_read_lock();
++	psock = sk_psock(sk);
++	if (WARN_ON_ONCE(!psock)) {
++		desc->error = -EINVAL;
++		goto out;
++	}
++
++	psock->ingress_bytes = 0;
++	copied = tcp_read_sock_noack(sk, desc, recv_actor, true,
++				     &psock->copied_seq);
++	if (copied < 0)
++		goto out;
++	/* recv_actor may redirect skb to another socket (SK_REDIRECT) or
++	 * just put skb into ingress queue of current socket (SK_PASS).
++	 * For SK_REDIRECT, we need to ack the frame immediately but for
++	 * SK_PASS, we want to delay the ack until tcp_bpf_recvmsg_parser().
++	 */
++	tp->copied_seq = psock->copied_seq - psock->ingress_bytes;
++	tcp_rcv_space_adjust(sk);
++	__tcp_cleanup_rbuf(sk, copied - psock->ingress_bytes);
++out:
++	rcu_read_unlock();
++	return copied;
++}
++#endif /* CONFIG_BPF_STREAM_PARSER */
++
+ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ {
+ 	int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 0f523cbfe329ef..32b28fc21b63c0 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -178,7 +178,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
+ 	if (!skb)
+ 		return;
+ 
+-	skb_dst_drop(skb);
++	tcp_cleanup_skb(skb);
+ 	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
+ 	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
+ 	 * to avoid double counting.  Also, tcp_segs_in() expects
+@@ -195,7 +195,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
+ 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
+ 
+ 	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+-	__skb_queue_tail(&sk->sk_receive_queue, skb);
++	tcp_add_receive_queue(sk, skb);
+ 	tp->syn_data_acked = 1;
+ 
+ 	/* u64_stats_update_begin(&tp->syncp) not needed here,
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 2d43b29da15e20..d93a5a89c5692d 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -243,9 +243,15 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
+ 			do_div(val, skb->truesize);
+ 			tcp_sk(sk)->scaling_ratio = val ? val : 1;
+ 
+-			if (old_ratio != tcp_sk(sk)->scaling_ratio)
+-				WRITE_ONCE(tcp_sk(sk)->window_clamp,
+-					   tcp_win_from_space(sk, sk->sk_rcvbuf));
++			if (old_ratio != tcp_sk(sk)->scaling_ratio) {
++				struct tcp_sock *tp = tcp_sk(sk);
++
++				val = tcp_win_from_space(sk, sk->sk_rcvbuf);
++				tcp_set_window_clamp(sk, val);
++
++				if (tp->window_clamp < tp->rcvq_space.space)
++					tp->rcvq_space.space = tp->window_clamp;
++			}
+ 		}
+ 		icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
+ 					       tcp_sk(sk)->advmss);
+@@ -4964,7 +4970,7 @@ static void tcp_ofo_queue(struct sock *sk)
+ 		tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
+ 		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
+ 		if (!eaten)
+-			__skb_queue_tail(&sk->sk_receive_queue, skb);
++			tcp_add_receive_queue(sk, skb);
+ 		else
+ 			kfree_skb_partial(skb, fragstolen);
+ 
+@@ -5156,7 +5162,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
+ 				  skb, fragstolen)) ? 1 : 0;
+ 	tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
+ 	if (!eaten) {
+-		__skb_queue_tail(&sk->sk_receive_queue, skb);
++		tcp_add_receive_queue(sk, skb);
+ 		skb_set_owner_r(skb, sk);
+ 	}
+ 	return eaten;
+@@ -5239,7 +5245,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+ 		__kfree_skb(skb);
+ 		return;
+ 	}
+-	skb_dst_drop(skb);
++	tcp_cleanup_skb(skb);
+ 	__skb_pull(skb, tcp_hdr(skb)->doff * 4);
+ 
+ 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
+@@ -6208,7 +6214,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
+ 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
+ 
+ 			/* Bulk data transfer: receiver */
+-			skb_dst_drop(skb);
++			tcp_cleanup_skb(skb);
+ 			__skb_pull(skb, tcp_header_len);
+ 			eaten = tcp_queue_rcv(sk, skb, &fragstolen);
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index bcc2f1e090c7db..824048679e1b8f 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2025,7 +2025,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 	 */
+ 	skb_condense(skb);
+ 
+-	skb_dst_drop(skb);
++	tcp_cleanup_skb(skb);
+ 
+ 	if (unlikely(tcp_checksum_complete(skb))) {
+ 		bh_unlock_sock(sk);
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index dfa3067084948f..998ea3b5badfce 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -97,7 +97,7 @@ tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
+ 
+ 	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
+ 			      n, xa_limit_32b, &next, GFP_KERNEL);
+-	if (err)
++	if (err < 0)
+ 		goto err_xa_alloc;
+ 
+ 	exts->miss_cookie_node = n;
+diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
+index 8299ceb3e3739d..95696f42647ec1 100644
+--- a/net/strparser/strparser.c
++++ b/net/strparser/strparser.c
+@@ -347,7 +347,10 @@ static int strp_read_sock(struct strparser *strp)
+ 	struct socket *sock = strp->sk->sk_socket;
+ 	read_descriptor_t desc;
+ 
+-	if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
++	if (unlikely(!sock || !sock->ops))
++		return -EBUSY;
++
++	if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock))
+ 		return -EBUSY;
+ 
+ 	desc.arg.data = strp;
+@@ -355,7 +358,10 @@ static int strp_read_sock(struct strparser *strp)
+ 	desc.count = 1; /* give more than one skb per call */
+ 
+ 	/* sk should be locked here, so okay to do read_sock */
+-	sock->ops->read_sock(strp->sk, &desc, strp_recv);
++	if (strp->cb.read_sock)
++		strp->cb.read_sock(strp, &desc, strp_recv);
++	else
++		sock->ops->read_sock(strp->sk, &desc, strp_recv);
+ 
+ 	desc.error = strp->cb.read_sock_done(strp, desc.error);
+ 
+@@ -468,6 +474,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
+ 	strp->cb.unlock = cb->unlock ? : strp_sock_unlock;
+ 	strp->cb.rcv_msg = cb->rcv_msg;
+ 	strp->cb.parse_msg = cb->parse_msg;
++	strp->cb.read_sock = cb->read_sock;
+ 	strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
+ 	strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
+ 
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 37299a7ca1876e..eb6ea26b390ee8 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1189,6 +1189,9 @@ static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor)
+ {
+ 	struct vsock_sock *vsk = vsock_sk(sk);
+ 
++	if (WARN_ON_ONCE(!vsk->transport))
++		return -ENODEV;
++
+ 	return vsk->transport->read_skb(vsk, read_actor);
+ }
+ 
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index b58c3818f284f1..f0e48e6911fc46 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -670,6 +670,13 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
+ 	};
+ 	int ret;
+ 
++	mutex_lock(&vsock->rx_lock);
++	vsock->rx_buf_nr = 0;
++	vsock->rx_buf_max_nr = 0;
++	mutex_unlock(&vsock->rx_lock);
++
++	atomic_set(&vsock->queued_replies, 0);
++
+ 	ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL);
+ 	if (ret < 0)
+ 		return ret;
+@@ -779,9 +786,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ 
+ 	vsock->vdev = vdev;
+ 
+-	vsock->rx_buf_nr = 0;
+-	vsock->rx_buf_max_nr = 0;
+-	atomic_set(&vsock->queued_replies, 0);
+ 
+ 	mutex_init(&vsock->tx_lock);
+ 	mutex_init(&vsock->rx_lock);
+diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
+index f201d9eca1df2f..07b96d56f3a577 100644
+--- a/net/vmw_vsock/vsock_bpf.c
++++ b/net/vmw_vsock/vsock_bpf.c
+@@ -87,7 +87,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ 	lock_sock(sk);
+ 	vsk = vsock_sk(sk);
+ 
+-	if (!vsk->transport) {
++	if (WARN_ON_ONCE(!vsk->transport)) {
+ 		copied = -ENODEV;
+ 		goto out;
+ 	}
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 77b6ac9b5c11bc..9955c4d54e42a7 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -678,12 +678,18 @@ static int snd_seq_deliver_single_event(struct snd_seq_client *client,
+ 					  dest_port->time_real);
+ 
+ #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
+-	if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
+-		if (snd_seq_ev_is_ump(event)) {
++	if (snd_seq_ev_is_ump(event)) {
++		if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
+ 			result = snd_seq_deliver_from_ump(client, dest, dest_port,
+ 							  event, atomic, hop);
+ 			goto __skip;
+-		} else if (snd_seq_client_is_ump(dest)) {
++		} else if (dest->type == USER_CLIENT &&
++			   !snd_seq_client_is_ump(dest)) {
++			result = 0; // drop the event
++			goto __skip;
++		}
++	} else if (snd_seq_client_is_ump(dest)) {
++		if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
+ 			result = snd_seq_deliver_to_ump(client, dest, dest_port,
+ 							event, atomic, hop);
+ 			goto __skip;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 14763c0f31ad9f..46a2204049993d 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2470,7 +2470,9 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
+ 				break;
+ 			id = kctl->id;
+ 			id.index = spdif_index;
+-			snd_ctl_rename_id(codec->card, &kctl->id, &id);
++			err = snd_ctl_rename_id(codec->card, &kctl->id, &id);
++			if (err < 0)
++				return err;
+ 		}
+ 		bus->primary_dig_out_type = HDA_PCM_TYPE_HDMI;
+ 	}
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 538c37a78a56f7..84ab357b840d67 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1080,6 +1080,7 @@ static const struct hda_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ 	SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+diff --git a/sound/pci/hda/patch_cs8409-tables.c b/sound/pci/hda/patch_cs8409-tables.c
+index 759f48038273df..621f947e38174d 100644
+--- a/sound/pci/hda/patch_cs8409-tables.c
++++ b/sound/pci/hda/patch_cs8409-tables.c
+@@ -121,7 +121,7 @@ static const struct cs8409_i2c_param cs42l42_init_reg_seq[] = {
+ 	{ CS42L42_MIXER_CHA_VOL, 0x3F },
+ 	{ CS42L42_MIXER_CHB_VOL, 0x3F },
+ 	{ CS42L42_MIXER_ADC_VOL, 0x3f },
+-	{ CS42L42_HP_CTL, 0x03 },
++	{ CS42L42_HP_CTL, 0x0D },
+ 	{ CS42L42_MIC_DET_CTL1, 0xB6 },
+ 	{ CS42L42_TIPSENSE_CTL, 0xC2 },
+ 	{ CS42L42_HS_CLAMP_DISABLE, 0x01 },
+@@ -315,7 +315,7 @@ static const struct cs8409_i2c_param dolphin_c0_init_reg_seq[] = {
+ 	{ CS42L42_ASP_TX_SZ_EN, 0x01 },
+ 	{ CS42L42_PWR_CTL1, 0x0A },
+ 	{ CS42L42_PWR_CTL2, 0x84 },
+-	{ CS42L42_HP_CTL, 0x03 },
++	{ CS42L42_HP_CTL, 0x0D },
+ 	{ CS42L42_MIXER_CHA_VOL, 0x3F },
+ 	{ CS42L42_MIXER_CHB_VOL, 0x3F },
+ 	{ CS42L42_MIXER_ADC_VOL, 0x3f },
+@@ -371,7 +371,7 @@ static const struct cs8409_i2c_param dolphin_c1_init_reg_seq[] = {
+ 	{ CS42L42_ASP_TX_SZ_EN, 0x00 },
+ 	{ CS42L42_PWR_CTL1, 0x0E },
+ 	{ CS42L42_PWR_CTL2, 0x84 },
+-	{ CS42L42_HP_CTL, 0x01 },
++	{ CS42L42_HP_CTL, 0x0D },
+ 	{ CS42L42_MIXER_CHA_VOL, 0x3F },
+ 	{ CS42L42_MIXER_CHB_VOL, 0x3F },
+ 	{ CS42L42_MIXER_ADC_VOL, 0x3f },
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index 614327218634c0..b760332a4e3577 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -876,7 +876,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
+ 		{ CS42L42_DET_INT_STATUS2, 0x00 },
+ 		{ CS42L42_TSRS_PLUG_STATUS, 0x00 },
+ 	};
+-	int fsv_old, fsv_new;
++	unsigned int fsv;
+ 
+ 	/* Bring CS42L42 out of Reset */
+ 	spec->gpio_data = snd_hda_codec_read(codec, CS8409_PIN_AFG, 0, AC_VERB_GET_GPIO_DATA, 0);
+@@ -893,13 +893,15 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
+ 	/* Clear interrupts, by reading interrupt status registers */
+ 	cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
+ 
+-	fsv_old = cs8409_i2c_read(cs42l42, CS42L42_HP_CTL);
+-	if (cs42l42->full_scale_vol == CS42L42_FULL_SCALE_VOL_0DB)
+-		fsv_new = fsv_old & ~CS42L42_FULL_SCALE_VOL_MASK;
+-	else
+-		fsv_new = fsv_old & CS42L42_FULL_SCALE_VOL_MASK;
+-	if (fsv_new != fsv_old)
+-		cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv_new);
++	fsv = cs8409_i2c_read(cs42l42, CS42L42_HP_CTL);
++	if (cs42l42->full_scale_vol) {
++		// Set the full scale volume bit
++		fsv |= CS42L42_FULL_SCALE_VOL_MASK;
++		cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv);
++	}
++	// Unmute analog channels A and B
++	fsv = (fsv & ~CS42L42_ANA_MUTE_AB);
++	cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv);
+ 
+ 	/* we have to explicitly allow unsol event handling even during the
+ 	 * resume phase so that the jack event is processed properly
+@@ -920,7 +922,7 @@ static void cs42l42_suspend(struct sub_codec *cs42l42)
+ 		{ CS42L42_MIXER_CHA_VOL, 0x3F },
+ 		{ CS42L42_MIXER_ADC_VOL, 0x3F },
+ 		{ CS42L42_MIXER_CHB_VOL, 0x3F },
+-		{ CS42L42_HP_CTL, 0x0F },
++		{ CS42L42_HP_CTL, 0x0D },
+ 		{ CS42L42_ASP_RX_DAI0_EN, 0x00 },
+ 		{ CS42L42_ASP_CLK_CFG, 0x00 },
+ 		{ CS42L42_PWR_CTL1, 0xFE },
+diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
+index 5e48115caf096b..14645d25e70fd2 100644
+--- a/sound/pci/hda/patch_cs8409.h
++++ b/sound/pci/hda/patch_cs8409.h
+@@ -230,9 +230,10 @@ enum cs8409_coefficient_index_registers {
+ #define CS42L42_PDN_TIMEOUT_US			(250000)
+ #define CS42L42_PDN_SLEEP_US			(2000)
+ #define CS42L42_INIT_TIMEOUT_MS			(45)
++#define CS42L42_ANA_MUTE_AB			(0x0C)
+ #define CS42L42_FULL_SCALE_VOL_MASK		(2)
+-#define CS42L42_FULL_SCALE_VOL_0DB		(1)
+-#define CS42L42_FULL_SCALE_VOL_MINUS6DB		(0)
++#define CS42L42_FULL_SCALE_VOL_0DB		(0)
++#define CS42L42_FULL_SCALE_VOL_MINUS6DB		(1)
+ 
+ /* Dell BULLSEYE / WARLOCK / CYBORG Specific Definitions */
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f3f849b96402d1..9bf99fe6cd34dd 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3790,6 +3790,7 @@ static void alc225_init(struct hda_codec *codec)
+ 				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ 
+ 		msleep(75);
++		alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+ 		alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+ 	}
+ }
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index 67c2d4cb0dea21..7cfe77b57b3c25 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -156,6 +156,8 @@ static int micfil_set_quality(struct fsl_micfil *micfil)
+ 	case QUALITY_VLOW2:
+ 		qsel = MICFIL_QSEL_VLOW2_QUALITY;
+ 		break;
++	default:
++		return -EINVAL;
+ 	}
+ 
+ 	return regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2,
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index 8e7b75cf64db42..ff3671226306bd 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -23,7 +23,6 @@ struct imx_audmix {
+ 	struct snd_soc_card card;
+ 	struct platform_device *audmix_pdev;
+ 	struct platform_device *out_pdev;
+-	struct clk *cpu_mclk;
+ 	int num_dai;
+ 	struct snd_soc_dai_link *dai;
+ 	int num_dai_conf;
+@@ -32,34 +31,11 @@ struct imx_audmix {
+ 	struct snd_soc_dapm_route *dapm_routes;
+ };
+ 
+-static const u32 imx_audmix_rates[] = {
+-	8000, 12000, 16000, 24000, 32000, 48000, 64000, 96000,
+-};
+-
+-static const struct snd_pcm_hw_constraint_list imx_audmix_rate_constraints = {
+-	.count = ARRAY_SIZE(imx_audmix_rates),
+-	.list = imx_audmix_rates,
+-};
+-
+ static int imx_audmix_fe_startup(struct snd_pcm_substream *substream)
+ {
+-	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+-	struct imx_audmix *priv = snd_soc_card_get_drvdata(rtd->card);
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+-	struct device *dev = rtd->card->dev;
+-	unsigned long clk_rate = clk_get_rate(priv->cpu_mclk);
+ 	int ret;
+ 
+-	if (clk_rate % 24576000 == 0) {
+-		ret = snd_pcm_hw_constraint_list(runtime, 0,
+-						 SNDRV_PCM_HW_PARAM_RATE,
+-						 &imx_audmix_rate_constraints);
+-		if (ret < 0)
+-			return ret;
+-	} else {
+-		dev_warn(dev, "mclk may be not supported %lu\n", clk_rate);
+-	}
+-
+ 	ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
+ 					   1, 8);
+ 	if (ret < 0)
+@@ -325,13 +301,6 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 	}
+ 	put_device(&cpu_pdev->dev);
+ 
+-	priv->cpu_mclk = devm_clk_get(&cpu_pdev->dev, "mclk1");
+-	if (IS_ERR(priv->cpu_mclk)) {
+-		ret = PTR_ERR(priv->cpu_mclk);
+-		dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret);
+-		return ret;
+-	}
+-
+ 	priv->audmix_pdev = audmix_pdev;
+ 	priv->out_pdev  = cpu_pdev;
+ 
+diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
+index acd75e48851fcf..7feefeb6b876dc 100644
+--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
+@@ -451,11 +451,11 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai,
+ 			break;
+ 		case SND_SOC_DAIFMT_DSP_A:
+ 			val = I2S_TXCR_TFS_TDM_PCM;
+-			tdm_val = TDM_SHIFT_CTRL(0);
++			tdm_val = TDM_SHIFT_CTRL(2);
+ 			break;
+ 		case SND_SOC_DAIFMT_DSP_B:
+ 			val = I2S_TXCR_TFS_TDM_PCM;
+-			tdm_val = TDM_SHIFT_CTRL(2);
++			tdm_val = TDM_SHIFT_CTRL(4);
+ 			break;
+ 		default:
+ 			ret = -EINVAL;
+diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c
+index 32db2cead8a4ec..4f483bfa584f5b 100644
+--- a/sound/soc/sh/rz-ssi.c
++++ b/sound/soc/sh/rz-ssi.c
+@@ -416,8 +416,12 @@ static int rz_ssi_stop(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
+ 	rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TEN | SSICR_REN, 0);
+ 
+ 	/* Cancel all remaining DMA transactions */
+-	if (rz_ssi_is_dma_enabled(ssi))
+-		dmaengine_terminate_async(strm->dma_ch);
++	if (rz_ssi_is_dma_enabled(ssi)) {
++		if (ssi->playback.dma_ch)
++			dmaengine_terminate_async(ssi->playback.dma_ch);
++		if (ssi->capture.dma_ch)
++			dmaengine_terminate_async(ssi->capture.dma_ch);
++	}
+ 
+ 	rz_ssi_set_idle(ssi);
+ 
+@@ -524,6 +528,8 @@ static int rz_ssi_pio_send(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
+ 	sample_space = strm->fifo_sample_size;
+ 	ssifsr = rz_ssi_reg_readl(ssi, SSIFSR);
+ 	sample_space -= (ssifsr >> SSIFSR_TDC_SHIFT) & SSIFSR_TDC_MASK;
++	if (sample_space < 0)
++		return -EINVAL;
+ 
+ 	/* Only add full frames at a time */
+ 	while (frames_left && (sample_space >= runtime->channels)) {
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 240fee2166d125..f82db7f2a6b7e7 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -671,10 +671,16 @@ static int sof_ipc4_widget_setup_comp_dai(struct snd_sof_widget *swidget)
+ 		}
+ 
+ 		list_for_each_entry(w, &sdev->widget_list, list) {
+-			if (w->widget->sname &&
++			struct snd_sof_dai *alh_dai;
++
++			if (!WIDGET_IS_DAI(w->id) || !w->widget->sname ||
+ 			    strcmp(w->widget->sname, swidget->widget->sname))
+ 				continue;
+ 
++			alh_dai = w->private;
++			if (alh_dai->type != SOF_DAI_INTEL_ALH)
++				continue;
++
+ 			blob->alh_cfg.device_count++;
+ 		}
+ 
+@@ -1973,11 +1979,13 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
+ 			list_for_each_entry(w, &sdev->widget_list, list) {
+ 				u32 node_type;
+ 
+-				if (w->widget->sname &&
++				if (!WIDGET_IS_DAI(w->id) || !w->widget->sname ||
+ 				    strcmp(w->widget->sname, swidget->widget->sname))
+ 					continue;
+ 
+ 				dai = w->private;
++				if (dai->type != SOF_DAI_INTEL_ALH)
++					continue;
+ 				alh_copier = (struct sof_ipc4_copier *)dai->private;
+ 				alh_data = &alh_copier->data;
+ 				node_type = SOF_IPC4_GET_NODE_TYPE(alh_data->gtw_cfg.node_id);
+diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
+index 35a7462d8b6938..c5c6353f18ceef 100644
+--- a/sound/soc/sof/pcm.c
++++ b/sound/soc/sof/pcm.c
+@@ -511,6 +511,8 @@ static int sof_pcm_close(struct snd_soc_component *component,
+ 		 */
+ 	}
+ 
++	spcm->stream[substream->stream].substream = NULL;
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/sof/stream-ipc.c b/sound/soc/sof/stream-ipc.c
+index 794c7bbccbaf92..8262443ac89ad1 100644
+--- a/sound/soc/sof/stream-ipc.c
++++ b/sound/soc/sof/stream-ipc.c
+@@ -43,7 +43,7 @@ int sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ 				return -ESTRPIPE;
+ 
+ 			posn_offset = stream->posn_offset;
+-		} else {
++		} else if (sps->cstream) {
+ 
+ 			struct sof_compr_stream *sstream = sps->cstream->runtime->private_data;
+ 
+@@ -51,6 +51,10 @@ int sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ 				return -ESTRPIPE;
+ 
+ 			posn_offset = sstream->posn_offset;
++
++		} else {
++			dev_err(sdev->dev, "%s: No stream opened\n", __func__);
++			return -EINVAL;
+ 		}
+ 
+ 		snd_sof_dsp_mailbox_read(sdev, posn_offset, p, sz);
+diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
+index 6c3b4d4f173ac6..aeef86b3da747a 100644
+--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
++++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
+@@ -40,6 +40,14 @@ DECLARE_TRACE(bpf_testmod_test_nullable_bare,
+ 	TP_ARGS(ctx__nullable)
+ );
+ 
++struct sk_buff;
++
++DECLARE_TRACE(bpf_testmod_test_raw_tp_null,
++	TP_PROTO(struct sk_buff *skb),
++	TP_ARGS(skb)
++);
++
++
+ #undef BPF_TESTMOD_DECLARE_TRACE
+ #ifdef DECLARE_TRACE_WRITABLE
+ #define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \
+diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+index 8835761d9a126a..4e6a9e9c036873 100644
+--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
++++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+@@ -380,6 +380,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
+ 
+ 	(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
+ 
++	(void)trace_bpf_testmod_test_raw_tp_null(NULL);
++
+ 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
+ 				sizeof(int)), GFP_KERNEL);
+ 	if (struct_arg3 != NULL) {
+diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c
+new file mode 100644
+index 00000000000000..6fa19449297e9b
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c
+@@ -0,0 +1,25 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
++
++#include <test_progs.h>
++#include "raw_tp_null.skel.h"
++
++void test_raw_tp_null(void)
++{
++	struct raw_tp_null *skel;
++
++	skel = raw_tp_null__open_and_load();
++	if (!ASSERT_OK_PTR(skel, "raw_tp_null__open_and_load"))
++		return;
++
++	skel->bss->tid = sys_gettid();
++
++	if (!ASSERT_OK(raw_tp_null__attach(skel), "raw_tp_null__attach"))
++		goto end;
++
++	ASSERT_OK(trigger_module_test_read(2), "trigger testmod read");
++	ASSERT_EQ(skel->bss->i, 3, "invocations");
++
++end:
++	raw_tp_null__destroy(skel);
++}
+diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c
+new file mode 100644
+index 00000000000000..457f34c151e32f
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c
+@@ -0,0 +1,32 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
++
++#include <vmlinux.h>
++#include <bpf/bpf_tracing.h>
++
++char _license[] SEC("license") = "GPL";
++
++int tid;
++int i;
++
++SEC("tp_btf/bpf_testmod_test_raw_tp_null")
++int BPF_PROG(test_raw_tp_null, struct sk_buff *skb)
++{
++	struct task_struct *task = bpf_get_current_task_btf();
++
++	if (task->pid != tid)
++		return 0;
++
++	i = i + skb->mark + 1;
++	/* The compiler may move the NULL check before this deref, which causes
++	 * the load to fail as deref of scalar. Prevent that by using a barrier.
++	 */
++	barrier();
++	/* If dead code elimination kicks in, the increment below will
++	 * be removed. For raw_tp programs, we mark input arguments as
++	 * PTR_MAYBE_NULL, so branch prediction should never kick in.
++	 */
++	if (!skb)
++		i += 2;
++	return 0;
++}
+diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
+index 02e1204971b0a8..c0138cb19705bc 100644
+--- a/tools/testing/selftests/mm/Makefile
++++ b/tools/testing/selftests/mm/Makefile
+@@ -33,9 +33,16 @@ endif
+ # LDLIBS.
+ MAKEFLAGS += --no-builtin-rules
+ 
+-CFLAGS = -Wall -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES)
++CFLAGS = -Wall -O2 -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES)
+ LDLIBS = -lrt -lpthread -lm
+ 
++# Some distributions (such as Ubuntu) configure GCC so that _FORTIFY_SOURCE is
++# automatically enabled at -O1 or above. This triggers various unused-result
++# warnings where functions such as read() or write() are called and their
++# return value is not checked. Disable _FORTIFY_SOURCE to silence those
++# warnings.
++CFLAGS += -U_FORTIFY_SOURCE
++
+ TEST_GEN_FILES = cow
+ TEST_GEN_FILES += compaction_test
+ TEST_GEN_FILES += gup_longterm


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-21 13:31 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-21 13:31 UTC (permalink / raw
  To: gentoo-commits

commit:     ce2243f5071849f131d7ebfffb21858a8b0fb12a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 21 13:30:53 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 21 13:30:53 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ce2243f5

Linux patch 6.12.16

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1015_linux-6.12.16.patch | 9009 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9013 insertions(+)

diff --git a/0000_README b/0000_README
index f6cd3204..9f0c3a67 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-6.12.15.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.15
 
+Patch:  1015_linux-6.12.16.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.16
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1015_linux-6.12.16.patch b/1015_linux-6.12.16.patch
new file mode 100644
index 00000000..6d524d6b
--- /dev/null
+++ b/1015_linux-6.12.16.patch
@@ -0,0 +1,9009 @@
+diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
+index f2fd2df68a9ed9..b7241ce975b961 100644
+--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
++++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
+@@ -22,7 +22,7 @@ description:
+   Each sub-node is identified using the node's name, with valid values listed
+   for each of the pmics below.
+ 
+-  For mp5496, s1, s2
++  For mp5496, s1, s2, l2, l5
+ 
+   For pm2250, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
+   l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22
+diff --git a/Documentation/networking/iso15765-2.rst b/Documentation/networking/iso15765-2.rst
+index 0e9d960741783b..37ebb2c417cb44 100644
+--- a/Documentation/networking/iso15765-2.rst
++++ b/Documentation/networking/iso15765-2.rst
+@@ -369,8 +369,8 @@ to their default.
+ 
+   addr.can_family = AF_CAN;
+   addr.can_ifindex = if_nametoindex("can0");
+-  addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
+-  addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
++  addr.can_addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
++  addr.can_addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
+ 
+   ret = bind(s, (struct sockaddr *)&addr, sizeof(addr));
+   if (ret < 0)
+diff --git a/Makefile b/Makefile
+index c6918c620bc368..340da922fa4f2c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -1057,8 +1057,8 @@ LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
+ endif
+ 
+ # Align the bit size of userspace programs with the kernel
+-KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+-KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
++KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
++KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
+ 
+ # make the checker run with the right architecture
+ CHECKFLAGS += --arch=$(ARCH)
+@@ -1357,18 +1357,13 @@ ifneq ($(wildcard $(resolve_btfids_O)),)
+ 	$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
+ endif
+ 
+-# Clear a bunch of variables before executing the submake
+-ifeq ($(quiet),silent_)
+-tools_silent=s
+-endif
+-
+ tools/: FORCE
+ 	$(Q)mkdir -p $(objtree)/tools
+-	$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
++	$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
+ 
+ tools/%: FORCE
+ 	$(Q)mkdir -p $(objtree)/tools
+-	$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
++	$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
+ 
+ # ---------------------------------------------------------------------------
+ # Kernel selftest
+diff --git a/arch/alpha/include/uapi/asm/ptrace.h b/arch/alpha/include/uapi/asm/ptrace.h
+index 5ca45934fcbb82..72ed913a910f25 100644
+--- a/arch/alpha/include/uapi/asm/ptrace.h
++++ b/arch/alpha/include/uapi/asm/ptrace.h
+@@ -42,6 +42,8 @@ struct pt_regs {
+ 	unsigned long trap_a0;
+ 	unsigned long trap_a1;
+ 	unsigned long trap_a2;
++/* This makes the stack 16-byte aligned as GCC expects */
++	unsigned long __pad0;
+ /* These are saved by PAL-code: */
+ 	unsigned long ps;
+ 	unsigned long pc;
+diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c
+index 4cfeae42c79ac7..e9dad60b147f33 100644
+--- a/arch/alpha/kernel/asm-offsets.c
++++ b/arch/alpha/kernel/asm-offsets.c
+@@ -19,9 +19,13 @@ static void __used foo(void)
+ 	DEFINE(TI_STATUS, offsetof(struct thread_info, status));
+ 	BLANK();
+ 
++	DEFINE(SP_OFF, offsetof(struct pt_regs, ps));
+ 	DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
+ 	BLANK();
+ 
++	DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
++	BLANK();
++
+ 	DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
+ 	DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
+ }
+diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
+index dd26062d75b3c5..f4d41b4538c2e8 100644
+--- a/arch/alpha/kernel/entry.S
++++ b/arch/alpha/kernel/entry.S
+@@ -15,10 +15,6 @@
+ 	.set noat
+ 	.cfi_sections	.debug_frame
+ 
+-/* Stack offsets.  */
+-#define SP_OFF			184
+-#define SWITCH_STACK_SIZE	64
+-
+ .macro	CFI_START_OSF_FRAME	func
+ 	.align	4
+ 	.globl	\func
+@@ -198,8 +194,8 @@ CFI_END_OSF_FRAME entArith
+ CFI_START_OSF_FRAME entMM
+ 	SAVE_ALL
+ /* save $9 - $15 so the inline exception code can manipulate them.  */
+-	subq	$sp, 56, $sp
+-	.cfi_adjust_cfa_offset	56
++	subq	$sp, 64, $sp
++	.cfi_adjust_cfa_offset	64
+ 	stq	$9, 0($sp)
+ 	stq	$10, 8($sp)
+ 	stq	$11, 16($sp)
+@@ -214,7 +210,7 @@ CFI_START_OSF_FRAME entMM
+ 	.cfi_rel_offset	$13, 32
+ 	.cfi_rel_offset	$14, 40
+ 	.cfi_rel_offset	$15, 48
+-	addq	$sp, 56, $19
++	addq	$sp, 64, $19
+ /* handle the fault */
+ 	lda	$8, 0x3fff
+ 	bic	$sp, $8, $8
+@@ -227,7 +223,7 @@ CFI_START_OSF_FRAME entMM
+ 	ldq	$13, 32($sp)
+ 	ldq	$14, 40($sp)
+ 	ldq	$15, 48($sp)
+-	addq	$sp, 56, $sp
++	addq	$sp, 64, $sp
+ 	.cfi_restore	$9
+ 	.cfi_restore	$10
+ 	.cfi_restore	$11
+@@ -235,7 +231,7 @@ CFI_START_OSF_FRAME entMM
+ 	.cfi_restore	$13
+ 	.cfi_restore	$14
+ 	.cfi_restore	$15
+-	.cfi_adjust_cfa_offset	-56
++	.cfi_adjust_cfa_offset	-64
+ /* finish up the syscall as normal.  */
+ 	br	ret_from_sys_call
+ CFI_END_OSF_FRAME entMM
+@@ -382,8 +378,8 @@ entUnaUser:
+ 	.cfi_restore	$0
+ 	.cfi_adjust_cfa_offset	-256
+ 	SAVE_ALL		/* setup normal kernel stack */
+-	lda	$sp, -56($sp)
+-	.cfi_adjust_cfa_offset	56
++	lda	$sp, -64($sp)
++	.cfi_adjust_cfa_offset	64
+ 	stq	$9, 0($sp)
+ 	stq	$10, 8($sp)
+ 	stq	$11, 16($sp)
+@@ -399,7 +395,7 @@ entUnaUser:
+ 	.cfi_rel_offset	$14, 40
+ 	.cfi_rel_offset	$15, 48
+ 	lda	$8, 0x3fff
+-	addq	$sp, 56, $19
++	addq	$sp, 64, $19
+ 	bic	$sp, $8, $8
+ 	jsr	$26, do_entUnaUser
+ 	ldq	$9, 0($sp)
+@@ -409,7 +405,7 @@ entUnaUser:
+ 	ldq	$13, 32($sp)
+ 	ldq	$14, 40($sp)
+ 	ldq	$15, 48($sp)
+-	lda	$sp, 56($sp)
++	lda	$sp, 64($sp)
+ 	.cfi_restore	$9
+ 	.cfi_restore	$10
+ 	.cfi_restore	$11
+@@ -417,7 +413,7 @@ entUnaUser:
+ 	.cfi_restore	$13
+ 	.cfi_restore	$14
+ 	.cfi_restore	$15
+-	.cfi_adjust_cfa_offset	-56
++	.cfi_adjust_cfa_offset	-64
+ 	br	ret_from_sys_call
+ CFI_END_OSF_FRAME entUna
+ 
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
+index a9a38c80c4a7af..7004397937cfda 100644
+--- a/arch/alpha/kernel/traps.c
++++ b/arch/alpha/kernel/traps.c
+@@ -649,7 +649,7 @@ s_reg_to_mem (unsigned long s_reg)
+ static int unauser_reg_offsets[32] = {
+ 	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
+ 	/* r9 ... r15 are stored in front of regs.  */
+-	-56, -48, -40, -32, -24, -16, -8,
++	-64, -56, -48, -40, -32, -24, -16,	/* padding at -8 */
+ 	R(r16), R(r17), R(r18),
+ 	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
+ 	R(r27), R(r28), R(gp),
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 8c9850437e6744..a9816bbc9f34d3 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -78,8 +78,8 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ 
+ /* Macro for exception fixup code to access integer registers.  */
+ #define dpf_reg(r)							\
+-	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :	\
+-				 (r) <= 18 ? (r)+10 : (r)-10])
++	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-17 :	\
++				 (r) <= 18 ? (r)+11 : (r)-10])
+ 
+ asmlinkage void
+ do_page_fault(unsigned long address, unsigned long mmcsr,
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 9efd3f37c2fd9d..19a4988621ac9a 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -48,7 +48,11 @@ KBUILD_CFLAGS	+= $(CC_FLAGS_NO_FPU) \
+ KBUILD_CFLAGS	+= $(call cc-disable-warning, psabi)
+ KBUILD_AFLAGS	+= $(compat_vdso)
+ 
++ifeq ($(call test-ge, $(CONFIG_RUSTC_VERSION), 108500),y)
++KBUILD_RUSTFLAGS += --target=aarch64-unknown-none-softfloat
++else
+ KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
++endif
+ 
+ KBUILD_CFLAGS	+= $(call cc-option,-mabi=lp64)
+ KBUILD_AFLAGS	+= $(call cc-option,-mabi=lp64)
+diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
+index d9c9218fa1fddc..309942b06c5bc2 100644
+--- a/arch/arm64/kernel/cacheinfo.c
++++ b/arch/arm64/kernel/cacheinfo.c
+@@ -101,16 +101,18 @@ int populate_cache_leaves(unsigned int cpu)
+ 	unsigned int level, idx;
+ 	enum cache_type type;
+ 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+-	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
++	struct cacheinfo *infos = this_cpu_ci->info_list;
+ 
+ 	for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+-	     idx < this_cpu_ci->num_leaves; idx++, level++) {
++	     idx < this_cpu_ci->num_leaves; level++) {
+ 		type = get_cache_type(level);
+ 		if (type == CACHE_TYPE_SEPARATE) {
+-			ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+-			ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
++			if (idx + 1 >= this_cpu_ci->num_leaves)
++				break;
++			ci_leaf_init(&infos[idx++], CACHE_TYPE_DATA, level);
++			ci_leaf_init(&infos[idx++], CACHE_TYPE_INST, level);
+ 		} else {
+-			ci_leaf_init(this_leaf++, type, level);
++			ci_leaf_init(&infos[idx++], type, level);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
+index f204a9ddc83359..a3f1e895e2a670 100644
+--- a/arch/arm64/kernel/vdso/vdso.lds.S
++++ b/arch/arm64/kernel/vdso/vdso.lds.S
+@@ -41,6 +41,7 @@ SECTIONS
+ 	 */
+ 	/DISCARD/	: {
+ 		*(.note.GNU-stack .note.gnu.property)
++		*(.ARM.attributes)
+ 	}
+ 	.note		: { *(.note.*) }		:text	:note
+ 
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index f84c71f04d9ea9..e73326bd3ff7e9 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -162,6 +162,7 @@ SECTIONS
+ 	/DISCARD/ : {
+ 		*(.interp .dynamic)
+ 		*(.dynsym .dynstr .hash .gnu.hash)
++		*(.ARM.attributes)
+ 	}
+ 
+ 	. = KIMAGE_VADDR;
+diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
+index 86d5d90ebefe5b..4f09121417818d 100644
+--- a/arch/loongarch/kernel/genex.S
++++ b/arch/loongarch/kernel/genex.S
+@@ -18,16 +18,19 @@
+ 
+ 	.align	5
+ SYM_FUNC_START(__arch_cpu_idle)
+-	/* start of rollback region */
+-	LONG_L	t0, tp, TI_FLAGS
+-	nop
+-	andi	t0, t0, _TIF_NEED_RESCHED
+-	bnez	t0, 1f
+-	nop
+-	nop
+-	nop
++	/* start of idle interrupt region */
++	ori	t0, zero, CSR_CRMD_IE
++	/* idle instruction needs irq enabled */
++	csrxchg	t0, t0, LOONGARCH_CSR_CRMD
++	/*
++	 * If an interrupt lands here; between enabling interrupts above and
++	 * going idle on the next instruction, we must *NOT* go idle since the
++	 * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
++	 * reprogramming. Fall through -- see handle_vint() below -- and have
++	 * the idle loop take care of things.
++	 */
+ 	idle	0
+-	/* end of rollback region */
++	/* end of idle interrupt region */
+ 1:	jr	ra
+ SYM_FUNC_END(__arch_cpu_idle)
+ 
+@@ -35,11 +38,10 @@ SYM_CODE_START(handle_vint)
+ 	UNWIND_HINT_UNDEFINED
+ 	BACKUP_T0T1
+ 	SAVE_ALL
+-	la_abs	t1, __arch_cpu_idle
++	la_abs	t1, 1b
+ 	LONG_L	t0, sp, PT_ERA
+-	/* 32 byte rollback region */
+-	ori	t0, t0, 0x1f
+-	xori	t0, t0, 0x1f
++	/* 3 instructions idle interrupt region */
++	ori	t0, t0, 0b1100
+ 	bne	t0, t1, 1f
+ 	LONG_S	t0, sp, PT_ERA
+ 1:	move	a0, sp
+diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
+index 0b5dd2faeb90b8..54b247d8cdb695 100644
+--- a/arch/loongarch/kernel/idle.c
++++ b/arch/loongarch/kernel/idle.c
+@@ -11,7 +11,6 @@
+ 
+ void __cpuidle arch_cpu_idle(void)
+ {
+-	raw_local_irq_enable();
+-	__arch_cpu_idle(); /* idle instruction needs irq enabled */
++	__arch_cpu_idle();
+ 	raw_local_irq_disable();
+ }
+diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
+index 1ef8c63835351b..de8fa5a8a825cd 100644
+--- a/arch/loongarch/kernel/reset.c
++++ b/arch/loongarch/kernel/reset.c
+@@ -33,7 +33,7 @@ void machine_halt(void)
+ 	console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+ 
+ 	while (true) {
+-		__arch_cpu_idle();
++		__asm__ __volatile__("idle 0" : : : "memory");
+ 	}
+ }
+ 
+@@ -53,7 +53,7 @@ void machine_power_off(void)
+ #endif
+ 
+ 	while (true) {
+-		__arch_cpu_idle();
++		__asm__ __volatile__("idle 0" : : : "memory");
+ 	}
+ }
+ 
+@@ -74,6 +74,6 @@ void machine_restart(char *command)
+ 		acpi_reboot();
+ 
+ 	while (true) {
+-		__arch_cpu_idle();
++		__asm__ __volatile__("idle 0" : : : "memory");
+ 	}
+ }
+diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
+index 27e9b94c0a0b6e..7e8f5d6829ef0c 100644
+--- a/arch/loongarch/kvm/main.c
++++ b/arch/loongarch/kvm/main.c
+@@ -283,9 +283,9 @@ int kvm_arch_enable_virtualization_cpu(void)
+ 	 * TOE=0:       Trap on Exception.
+ 	 * TIT=0:       Trap on Timer.
+ 	 */
+-	if (env & CSR_GCFG_GCIP_ALL)
++	if (env & CSR_GCFG_GCIP_SECURE)
+ 		gcfg |= CSR_GCFG_GCI_SECURE;
+-	if (env & CSR_GCFG_MATC_ROOT)
++	if (env & CSR_GCFG_MATP_ROOT)
+ 		gcfg |= CSR_GCFG_MATC_ROOT;
+ 
+ 	write_csr_gcfg(gcfg);
+diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c
+index a5e84b403c3b34..df309ae4045dee 100644
+--- a/arch/loongarch/lib/csum.c
++++ b/arch/loongarch/lib/csum.c
+@@ -25,7 +25,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
+ 	const u64 *ptr;
+ 	u64 data, sum64 = 0;
+ 
+-	if (unlikely(len == 0))
++	if (unlikely(len <= 0))
+ 		return 0;
+ 
+ 	offset = (unsigned long)buff & 7;
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index 56a786ca7354b9..c3854682934557 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -331,6 +331,17 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+ 	return rc;
+ }
+ 
++static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev)
++{
++	struct pci_dev *pdev;
++
++	pdev = zpci_iov_find_parent_pf(zbus, zdev);
++	if (!pdev)
++		return true;
++	pci_dev_put(pdev);
++	return false;
++}
++
+ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+ {
+ 	bool topo_is_tid = zdev->tid_avail;
+@@ -345,6 +356,15 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+ 
+ 	topo = topo_is_tid ? zdev->tid : zdev->pchid;
+ 	zbus = zpci_bus_get(topo, topo_is_tid);
++	/*
++	 * An isolated VF gets its own domain/bus even if there exists
++	 * a matching domain/bus already
++	 */
++	if (zbus && zpci_bus_is_isolated_vf(zbus, zdev)) {
++		zpci_bus_put(zbus);
++		zbus = NULL;
++	}
++
+ 	if (!zbus) {
+ 		zbus = zpci_bus_alloc(topo, topo_is_tid);
+ 		if (!zbus)
+diff --git a/arch/s390/pci/pci_iov.c b/arch/s390/pci/pci_iov.c
+index ead062bf2b41cc..191e56a623f62c 100644
+--- a/arch/s390/pci/pci_iov.c
++++ b/arch/s390/pci/pci_iov.c
+@@ -60,18 +60,35 @@ static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, in
+ 	return 0;
+ }
+ 
+-int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
++/**
++ * zpci_iov_find_parent_pf - Find the parent PF, if any, of the given function
++ * @zbus:	The bus that the PCI function is on, or would be added on
++ * @zdev:	The PCI function
++ *
++ * Finds the parent PF, if it exists and is configured, of the given PCI function
++ * and increments its refcount. Th PF is searched for on the provided bus so the
++ * caller has to ensure that this is the correct bus to search. This function may
++ * be used before adding the PCI function to a zbus.
++ *
++ * Return: Pointer to the struct pci_dev of the parent PF or NULL if it not
++ * found. If the function is not a VF or has no RequesterID information,
++ * NULL is returned as well.
++ */
++struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
+ {
+-	int i, cand_devfn;
+-	struct zpci_dev *zdev;
++	int i, vfid, devfn, cand_devfn;
+ 	struct pci_dev *pdev;
+-	int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
+-	int rc = 0;
+ 
+ 	if (!zbus->multifunction)
+-		return 0;
+-
+-	/* If the parent PF for the given VF is also configured in the
++		return NULL;
++	/* Non-VFs and VFs without RID available don't have a parent */
++	if (!zdev->vfn || !zdev->rid_available)
++		return NULL;
++	/* Linux vfid starts at 0 vfn at 1 */
++	vfid = zdev->vfn - 1;
++	devfn = zdev->rid & ZPCI_RID_MASK_DEVFN;
++	/*
++	 * If the parent PF for the given VF is also configured in the
+ 	 * instance, it must be on the same zbus.
+ 	 * We can then identify the parent PF by checking what
+ 	 * devfn the VF would have if it belonged to that PF using the PF's
+@@ -85,15 +102,26 @@ int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn
+ 			if (!pdev)
+ 				continue;
+ 			cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
+-			if (cand_devfn == virtfn->devfn) {
+-				rc = zpci_iov_link_virtfn(pdev, virtfn, vfid);
+-				/* balance pci_get_slot() */
+-				pci_dev_put(pdev);
+-				break;
+-			}
++			if (cand_devfn == devfn)
++				return pdev;
+ 			/* balance pci_get_slot() */
+ 			pci_dev_put(pdev);
+ 		}
+ 	}
++	return NULL;
++}
++
++int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
++{
++	struct zpci_dev *zdev = to_zpci(virtfn);
++	struct pci_dev *pdev_pf;
++	int rc = 0;
++
++	pdev_pf = zpci_iov_find_parent_pf(zbus, zdev);
++	if (pdev_pf) {
++		/* Linux' vfids start at 0 while zdev->vfn starts at 1 */
++		rc = zpci_iov_link_virtfn(pdev_pf, virtfn, zdev->vfn - 1);
++		pci_dev_put(pdev_pf);
++	}
+ 	return rc;
+ }
+diff --git a/arch/s390/pci/pci_iov.h b/arch/s390/pci/pci_iov.h
+index b2c828003bad0a..05df728f980ca4 100644
+--- a/arch/s390/pci/pci_iov.h
++++ b/arch/s390/pci/pci_iov.h
+@@ -17,6 +17,8 @@ void zpci_iov_map_resources(struct pci_dev *pdev);
+ 
+ int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn);
+ 
++struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev);
++
+ #else /* CONFIG_PCI_IOV */
+ static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {}
+ 
+@@ -26,5 +28,10 @@ static inline int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *v
+ {
+ 	return 0;
+ }
++
++static inline struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
++{
++	return NULL;
++}
+ #endif /* CONFIG_PCI_IOV */
+ #endif /* __S390_PCI_IOV_h */
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 171be04eca1f5d..1b0c2397d65753 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2582,7 +2582,8 @@ config MITIGATION_IBPB_ENTRY
+ 	depends on CPU_SUP_AMD && X86_64
+ 	default y
+ 	help
+-	  Compile the kernel with support for the retbleed=ibpb mitigation.
++	  Compile the kernel with support for the retbleed=ibpb and
++	  spec_rstack_overflow={ibpb,ibpb-vmexit} mitigations.
+ 
+ config MITIGATION_IBRS_ENTRY
+ 	bool "Enable IBRS on kernel entry"
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index f558be868a50b6..f5bf400f6a2833 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4865,20 +4865,22 @@ static inline bool intel_pmu_broken_perf_cap(void)
+ 
+ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
+ {
+-	unsigned int sub_bitmaps, eax, ebx, ecx, edx;
++	unsigned int cntr, fixed_cntr, ecx, edx;
++	union cpuid35_eax eax;
++	union cpuid35_ebx ebx;
+ 
+-	cpuid(ARCH_PERFMON_EXT_LEAF, &sub_bitmaps, &ebx, &ecx, &edx);
++	cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
+ 
+-	if (ebx & ARCH_PERFMON_EXT_UMASK2)
++	if (ebx.split.umask2)
+ 		pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
+-	if (ebx & ARCH_PERFMON_EXT_EQ)
++	if (ebx.split.eq)
+ 		pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
+ 
+-	if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
++	if (eax.split.cntr_subleaf) {
+ 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
+-			    &eax, &ebx, &ecx, &edx);
+-		pmu->cntr_mask64 = eax;
+-		pmu->fixed_cntr_mask64 = ebx;
++			    &cntr, &fixed_cntr, &ecx, &edx);
++		pmu->cntr_mask64 = cntr;
++		pmu->fixed_cntr_mask64 = fixed_cntr;
+ 	}
+ 
+ 	if (!intel_pmu_broken_perf_cap()) {
+@@ -4901,11 +4903,6 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
+ 	else
+ 		pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
+ 
+-	if (pmu->intel_cap.pebs_output_pt_available)
+-		pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+-	else
+-		pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
+-
+ 	intel_pmu_check_event_constraints(pmu->event_constraints,
+ 					  pmu->cntr_mask64,
+ 					  pmu->fixed_cntr_mask64,
+@@ -4974,9 +4971,6 @@ static bool init_hybrid_pmu(int cpu)
+ 
+ 	pr_info("%s PMU driver: ", pmu->name);
+ 
+-	if (pmu->intel_cap.pebs_output_pt_available)
+-		pr_cont("PEBS-via-PT ");
+-
+ 	pr_cont("\n");
+ 
+ 	x86_pmu_show_pmu_cap(&pmu->pmu);
+@@ -4999,8 +4993,11 @@ static void intel_pmu_cpu_starting(int cpu)
+ 
+ 	init_debug_store_on_cpu(cpu);
+ 	/*
+-	 * Deal with CPUs that don't clear their LBRs on power-up.
++	 * Deal with CPUs that don't clear their LBRs on power-up, and that may
++	 * even boot with LBRs enabled.
+ 	 */
++	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
++		msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
+ 	intel_pmu_lbr_reset();
+ 
+ 	cpuc->lbr_sel = NULL;
+@@ -6284,11 +6281,9 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
+ 		pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
+ 		if (pmu->pmu_type & hybrid_small) {
+ 			pmu->intel_cap.perf_metrics = 0;
+-			pmu->intel_cap.pebs_output_pt_available = 1;
+ 			pmu->mid_ack = true;
+ 		} else if (pmu->pmu_type & hybrid_big) {
+ 			pmu->intel_cap.perf_metrics = 1;
+-			pmu->intel_cap.pebs_output_pt_available = 0;
+ 			pmu->late_ack = true;
+ 		}
+ 	}
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 19a9fd974e3e1d..b6303b0224531b 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -2523,7 +2523,15 @@ void __init intel_ds_init(void)
+ 			}
+ 			pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
+ 
+-			if (!is_hybrid() && x86_pmu.intel_cap.pebs_output_pt_available) {
++			/*
++			 * The PEBS-via-PT is not supported on hybrid platforms,
++			 * because not all CPUs of a hybrid machine support it.
++			 * The global x86_pmu.intel_cap, which only contains the
++			 * common capabilities, is used to check the availability
++			 * of the feature. The per-PMU pebs_output_pt_available
++			 * in a hybrid machine should be ignored.
++			 */
++			if (x86_pmu.intel_cap.pebs_output_pt_available) {
+ 				pr_cont("PEBS-via-PT, ");
+ 				x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+ 			}
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index 861d080ed4c6ab..cfb22f8c451a7f 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -47,6 +47,7 @@ KVM_X86_OP(set_idt)
+ KVM_X86_OP(get_gdt)
+ KVM_X86_OP(set_gdt)
+ KVM_X86_OP(sync_dirty_debug_regs)
++KVM_X86_OP(set_dr6)
+ KVM_X86_OP(set_dr7)
+ KVM_X86_OP(cache_reg)
+ KVM_X86_OP(get_rflags)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 5da67e5c00401b..8499b9cb9c8263 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1674,6 +1674,7 @@ struct kvm_x86_ops {
+ 	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ 	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ 	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
++	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
+ 	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
+ 	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+ 	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index ce4677b8b7356c..3b496cdcb74b3c 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -37,6 +37,8 @@ typedef struct {
+ 	 */
+ 	atomic64_t tlb_gen;
+ 
++	unsigned long next_trim_cpumask;
++
+ #ifdef CONFIG_MODIFY_LDT_SYSCALL
+ 	struct rw_semaphore	ldt_usr_sem;
+ 	struct ldt_struct	*ldt;
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 2886cb668d7fae..795fdd53bd0a6d 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -151,6 +151,7 @@ static inline int init_new_context(struct task_struct *tsk,
+ 
+ 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
+ 	atomic64_set(&mm->context.tlb_gen, 0);
++	mm->context.next_trim_cpumask = jiffies + HZ;
+ 
+ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+ 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 3ae84c3b8e6dba..61e991507353eb 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -395,7 +395,8 @@
+ #define MSR_IA32_PASID_VALID		BIT_ULL(31)
+ 
+ /* DEBUGCTLMSR bits (others vary by model): */
+-#define DEBUGCTLMSR_LBR			(1UL <<  0) /* last branch recording */
++#define DEBUGCTLMSR_LBR_BIT		0	     /* last branch recording */
++#define DEBUGCTLMSR_LBR			(1UL <<  DEBUGCTLMSR_LBR_BIT)
+ #define DEBUGCTLMSR_BTF_SHIFT		1
+ #define DEBUGCTLMSR_BTF			(1UL <<  1) /* single-step on branches */
+ #define DEBUGCTLMSR_BUS_LOCK_DETECT	(1UL <<  2)
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 91b73571412f16..7505bb5d260ab4 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -187,11 +187,33 @@ union cpuid10_edx {
+  * detection/enumeration details:
+  */
+ #define ARCH_PERFMON_EXT_LEAF			0x00000023
+-#define ARCH_PERFMON_EXT_UMASK2			0x1
+-#define ARCH_PERFMON_EXT_EQ			0x2
+-#define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT	0x1
+ #define ARCH_PERFMON_NUM_COUNTER_LEAF		0x1
+ 
++union cpuid35_eax {
++	struct {
++		unsigned int	leaf0:1;
++		/* Counters Sub-Leaf */
++		unsigned int    cntr_subleaf:1;
++		/* Auto Counter Reload Sub-Leaf */
++		unsigned int    acr_subleaf:1;
++		/* Events Sub-Leaf */
++		unsigned int    events_subleaf:1;
++		unsigned int	reserved:28;
++	} split;
++	unsigned int            full;
++};
++
++union cpuid35_ebx {
++	struct {
++		/* UnitMask2 Supported */
++		unsigned int    umask2:1;
++		/* EQ-bit Supported */
++		unsigned int    eq:1;
++		unsigned int	reserved:30;
++	} split;
++	unsigned int            full;
++};
++
+ /*
+  * Intel Architectural LBR CPUID detection/enumeration details:
+  */
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 69e79fff41b800..02fc2aa06e9e0e 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -222,6 +222,7 @@ struct flush_tlb_info {
+ 	unsigned int		initiating_cpu;
+ 	u8			stride_shift;
+ 	u8			freed_tables;
++	u8			trim_cpumask;
+ };
+ 
+ void flush_tlb_local(void);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 47a01d4028f60e..5fba44a4f988c0 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1115,6 +1115,8 @@ static void __init retbleed_select_mitigation(void)
+ 
+ 	case RETBLEED_MITIGATION_IBPB:
+ 		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++		setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
++		mitigate_smt = true;
+ 
+ 		/*
+ 		 * IBPB on entry already obviates the need for
+@@ -1124,9 +1126,6 @@ static void __init retbleed_select_mitigation(void)
+ 		setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ 		setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+ 
+-		setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+-		mitigate_smt = true;
+-
+ 		/*
+ 		 * There is no need for RSB filling: entry_ibpb() ensures
+ 		 * all predictions, including the RSB, are invalidated,
+@@ -2643,6 +2642,7 @@ static void __init srso_select_mitigation(void)
+ 		if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
+ 			if (has_microcode) {
+ 				setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ 				srso_mitigation = SRSO_MITIGATION_IBPB;
+ 
+ 				/*
+@@ -2652,6 +2652,13 @@ static void __init srso_select_mitigation(void)
+ 				 */
+ 				setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ 				setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
++
++				/*
++				 * There is no need for RSB filling: entry_ibpb() ensures
++				 * all predictions, including the RSB, are invalidated,
++				 * regardless of IBPB implementation.
++				 */
++				setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ 			}
+ 		} else {
+ 			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
+@@ -2659,8 +2666,8 @@ static void __init srso_select_mitigation(void)
+ 		break;
+ 
+ 	case SRSO_CMD_IBPB_ON_VMEXIT:
+-		if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
+-			if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
++		if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
++			if (has_microcode) {
+ 				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ 				srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
+ 
+@@ -2672,8 +2679,8 @@ static void __init srso_select_mitigation(void)
+ 				setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ 			}
+ 		} else {
+-			pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
+-                }
++			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
++		}
+ 		break;
+ 	default:
+ 		break;
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index 9eed0c144dad51..9e51242ed125ee 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -175,7 +175,6 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
+ noinstr void __static_call_update_early(void *tramp, void *func)
+ {
+ 	BUG_ON(system_state != SYSTEM_BOOTING);
+-	BUG_ON(!early_boot_irqs_disabled);
+ 	BUG_ON(static_call_initialized);
+ 	__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
+ 	sync_core();
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 4f0a94346d0094..44c88537448c74 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -2226,6 +2226,9 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
+ 	u32 vector;
+ 	bool all_cpus;
+ 
++	if (!lapic_in_kernel(vcpu))
++		return HV_STATUS_INVALID_HYPERCALL_INPUT;
++
+ 	if (hc->code == HVCALL_SEND_IPI) {
+ 		if (!hc->fast) {
+ 			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
+@@ -2852,7 +2855,8 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+ 			ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ 			ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+ 			ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+-			ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
++			if (!vcpu || lapic_in_kernel(vcpu))
++				ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
+ 			ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
+ 			if (evmcs_ver)
+ 				ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 9dd3796d075a56..19c96278ba755d 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -5591,7 +5591,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+ 	union kvm_mmu_page_role root_role;
+ 
+ 	/* NPT requires CR0.PG=1. */
+-	WARN_ON_ONCE(cpu_role.base.direct);
++	WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
+ 
+ 	root_role = cpu_role.base;
+ 	root_role.level = kvm_mmu_get_tdp_level(vcpu);
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index cf84103ce38b97..2dcb9c870d5a22 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -646,6 +646,11 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
+ 	u32 pause_count12;
+ 	u32 pause_thresh12;
+ 
++	nested_svm_transition_tlb_flush(vcpu);
++
++	/* Enter Guest-Mode */
++	enter_guest_mode(vcpu);
++
+ 	/*
+ 	 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
+ 	 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
+@@ -762,11 +767,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
+ 		}
+ 	}
+ 
+-	nested_svm_transition_tlb_flush(vcpu);
+-
+-	/* Enter Guest-Mode */
+-	enter_guest_mode(vcpu);
+-
+ 	/*
+ 	 * Merge guest and host intercepts - must be called with vcpu in
+ 	 * guest-mode to take effect.
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 4543dd6bcab2cb..a7cb7c82b38e39 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1993,11 +1993,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
+ 	svm->asid = sd->next_asid++;
+ }
+ 
+-static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
++static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
+ {
+-	struct vmcb *vmcb = svm->vmcb;
++	struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+ 
+-	if (svm->vcpu.arch.guest_state_protected)
++	if (vcpu->arch.guest_state_protected)
+ 		return;
+ 
+ 	if (unlikely(value != vmcb->save.dr6)) {
+@@ -4234,10 +4234,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+ 	 * Run with all-zero DR6 unless needed, so that we can get the exact cause
+ 	 * of a #DB.
+ 	 */
+-	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
+-		svm_set_dr6(svm, vcpu->arch.dr6);
+-	else
+-		svm_set_dr6(svm, DR6_ACTIVE_LOW);
++	if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
++		svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
+ 
+ 	clgi();
+ 	kvm_load_guest_xsave_state(vcpu);
+@@ -5033,6 +5031,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.set_idt = svm_set_idt,
+ 	.get_gdt = svm_get_gdt,
+ 	.set_gdt = svm_set_gdt,
++	.set_dr6 = svm_set_dr6,
+ 	.set_dr7 = svm_set_dr7,
+ 	.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
+ 	.cache_reg = svm_cache_reg,
+diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
+index 7668e2fb8043ef..47476fcc179a52 100644
+--- a/arch/x86/kvm/vmx/main.c
++++ b/arch/x86/kvm/vmx/main.c
+@@ -60,6 +60,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
+ 	.set_idt = vmx_set_idt,
+ 	.get_gdt = vmx_get_gdt,
+ 	.set_gdt = vmx_set_gdt,
++	.set_dr6 = vmx_set_dr6,
+ 	.set_dr7 = vmx_set_dr7,
+ 	.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
+ 	.cache_reg = vmx_cache_reg,
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 968ddf71405446..f06d443ec3c68d 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -5631,6 +5631,12 @@ void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+ 	set_debugreg(DR6_RESERVED, 6);
+ }
+ 
++void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
++{
++	lockdep_assert_irqs_disabled();
++	set_debugreg(vcpu->arch.dr6, 6);
++}
++
+ void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+ {
+ 	vmcs_writel(GUEST_DR7, val);
+@@ -7392,10 +7398,6 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+ 		vmx->loaded_vmcs->host_state.cr4 = cr4;
+ 	}
+ 
+-	/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
+-	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
+-		set_debugreg(vcpu->arch.dr6, 6);
+-
+ 	/* When single-stepping over STI and MOV SS, we must clear the
+ 	 * corresponding interruptibility bits in the guest state. Otherwise
+ 	 * vmentry fails as it then expects bit 14 (BS) in pending debug
+diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
+index 48dc76bf0ec03a..4aba200f435d42 100644
+--- a/arch/x86/kvm/vmx/x86_ops.h
++++ b/arch/x86/kvm/vmx/x86_ops.h
+@@ -74,6 +74,7 @@ void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
++void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val);
+ void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val);
+ void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu);
+ void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index d760b19d1e513e..0846e3af5f6c5a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10968,6 +10968,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		set_debugreg(vcpu->arch.eff_db[1], 1);
+ 		set_debugreg(vcpu->arch.eff_db[2], 2);
+ 		set_debugreg(vcpu->arch.eff_db[3], 3);
++		/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
++		if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
++			kvm_x86_call(set_dr6)(vcpu, vcpu->arch.dr6);
+ 	} else if (unlikely(hw_breakpoint_active())) {
+ 		set_debugreg(0, 7);
+ 	}
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index b0678d59ebdb4a..00ffa74d0dd0bf 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -893,9 +893,36 @@ static void flush_tlb_func(void *info)
+ 			nr_invalidate);
+ }
+ 
+-static bool tlb_is_not_lazy(int cpu, void *data)
++static bool should_flush_tlb(int cpu, void *data)
+ {
+-	return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
++	struct flush_tlb_info *info = data;
++
++	/* Lazy TLB will get flushed at the next context switch. */
++	if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
++		return false;
++
++	/* No mm means kernel memory flush. */
++	if (!info->mm)
++		return true;
++
++	/* The target mm is loaded, and the CPU is not lazy. */
++	if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
++		return true;
++
++	/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
++	if (info->trim_cpumask)
++		return true;
++
++	return false;
++}
++
++static bool should_trim_cpumask(struct mm_struct *mm)
++{
++	if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) {
++		WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ);
++		return true;
++	}
++	return false;
+ }
+ 
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
+@@ -929,7 +956,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
+ 	if (info->freed_tables)
+ 		on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
+ 	else
+-		on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
++		on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
+ 				(void *)info, 1, cpumask);
+ }
+ 
+@@ -980,6 +1007,7 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
+ 	info->freed_tables	= freed_tables;
+ 	info->new_tlb_gen	= new_tlb_gen;
+ 	info->initiating_cpu	= smp_processor_id();
++	info->trim_cpumask	= 0;
+ 
+ 	return info;
+ }
+@@ -1022,6 +1050,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 	 * flush_tlb_func_local() directly in this case.
+ 	 */
+ 	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
++		info->trim_cpumask = should_trim_cpumask(mm);
+ 		flush_tlb_multi(mm_cpumask(mm), info);
+ 	} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
+ 		lockdep_assert_irqs_enabled();
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index 55a4996d0c04f1..d078de2c952b37 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -111,6 +111,51 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
+  */
+ static DEFINE_SPINLOCK(xen_reservation_lock);
+ 
++/* Protected by xen_reservation_lock. */
++#define MIN_CONTIG_ORDER 9 /* 2MB */
++static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
++static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
++static unsigned long *discontig_frames __refdata = discontig_frames_early;
++static bool discontig_frames_dyn;
++
++static int alloc_discontig_frames(unsigned int order)
++{
++	unsigned long *new_array, *old_array;
++	unsigned int old_order;
++	unsigned long flags;
++
++	BUG_ON(order < MIN_CONTIG_ORDER);
++	BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
++
++	new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
++						      order - MIN_CONTIG_ORDER);
++	if (!new_array)
++		return -ENOMEM;
++
++	spin_lock_irqsave(&xen_reservation_lock, flags);
++
++	old_order = discontig_frames_order;
++
++	if (order > discontig_frames_order || !discontig_frames_dyn) {
++		if (!discontig_frames_dyn)
++			old_array = NULL;
++		else
++			old_array = discontig_frames;
++
++		discontig_frames = new_array;
++		discontig_frames_order = order;
++		discontig_frames_dyn = true;
++	} else {
++		old_array = new_array;
++	}
++
++	spin_unlock_irqrestore(&xen_reservation_lock, flags);
++
++	free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
++
++	return 0;
++}
++
+ /*
+  * Note about cr3 (pagetable base) values:
+  *
+@@ -781,6 +826,7 @@ void xen_mm_pin_all(void)
+ {
+ 	struct page *page;
+ 
++	spin_lock(&init_mm.page_table_lock);
+ 	spin_lock(&pgd_lock);
+ 
+ 	list_for_each_entry(page, &pgd_list, lru) {
+@@ -791,6 +837,7 @@ void xen_mm_pin_all(void)
+ 	}
+ 
+ 	spin_unlock(&pgd_lock);
++	spin_unlock(&init_mm.page_table_lock);
+ }
+ 
+ static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
+@@ -812,6 +859,9 @@ static void __init xen_after_bootmem(void)
+ 	SetPagePinned(virt_to_page(level3_user_vsyscall));
+ #endif
+ 	xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
++
++	if (alloc_discontig_frames(MIN_CONTIG_ORDER))
++		BUG();
+ }
+ 
+ static void xen_unpin_page(struct mm_struct *mm, struct page *page,
+@@ -887,6 +937,7 @@ void xen_mm_unpin_all(void)
+ {
+ 	struct page *page;
+ 
++	spin_lock(&init_mm.page_table_lock);
+ 	spin_lock(&pgd_lock);
+ 
+ 	list_for_each_entry(page, &pgd_list, lru) {
+@@ -898,6 +949,7 @@ void xen_mm_unpin_all(void)
+ 	}
+ 
+ 	spin_unlock(&pgd_lock);
++	spin_unlock(&init_mm.page_table_lock);
+ }
+ 
+ static void xen_enter_mmap(struct mm_struct *mm)
+@@ -2199,10 +2251,6 @@ void __init xen_init_mmu_ops(void)
+ 	memset(dummy_mapping, 0xff, PAGE_SIZE);
+ }
+ 
+-/* Protected by xen_reservation_lock. */
+-#define MAX_CONTIG_ORDER 9 /* 2MB */
+-static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
+-
+ #define VOID_PTE (mfn_pte(0, __pgprot(0)))
+ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
+ 				unsigned long *in_frames,
+@@ -2319,18 +2367,25 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ 				 unsigned int address_bits,
+ 				 dma_addr_t *dma_handle)
+ {
+-	unsigned long *in_frames = discontig_frames, out_frame;
++	unsigned long *in_frames, out_frame;
+ 	unsigned long  flags;
+ 	int            success;
+ 	unsigned long vstart = (unsigned long)phys_to_virt(pstart);
+ 
+-	if (unlikely(order > MAX_CONTIG_ORDER))
+-		return -ENOMEM;
++	if (unlikely(order > discontig_frames_order)) {
++		if (!discontig_frames_dyn)
++			return -ENOMEM;
++
++		if (alloc_discontig_frames(order))
++			return -ENOMEM;
++	}
+ 
+ 	memset((void *) vstart, 0, PAGE_SIZE << order);
+ 
+ 	spin_lock_irqsave(&xen_reservation_lock, flags);
+ 
++	in_frames = discontig_frames;
++
+ 	/* 1. Zap current PTEs, remembering MFNs. */
+ 	xen_zap_pfn_range(vstart, order, in_frames, NULL);
+ 
+@@ -2354,12 +2409,12 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ 
+ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
+ {
+-	unsigned long *out_frames = discontig_frames, in_frame;
++	unsigned long *out_frames, in_frame;
+ 	unsigned long  flags;
+ 	int success;
+ 	unsigned long vstart;
+ 
+-	if (unlikely(order > MAX_CONTIG_ORDER))
++	if (unlikely(order > discontig_frames_order))
+ 		return;
+ 
+ 	vstart = (unsigned long)phys_to_virt(pstart);
+@@ -2367,6 +2422,8 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
+ 
+ 	spin_lock_irqsave(&xen_reservation_lock, flags);
+ 
++	out_frames = discontig_frames;
++
+ 	/* 1. Find start MFN of contiguous extent. */
+ 	in_frame = virt_to_mfn((void *)vstart);
+ 
+diff --git a/block/partitions/mac.c b/block/partitions/mac.c
+index c80183156d6802..b02530d9862970 100644
+--- a/block/partitions/mac.c
++++ b/block/partitions/mac.c
+@@ -53,13 +53,25 @@ int mac_partition(struct parsed_partitions *state)
+ 	}
+ 	secsize = be16_to_cpu(md->block_size);
+ 	put_dev_sector(sect);
++
++	/*
++	 * If the "block size" is not a power of 2, things get weird - we might
++	 * end up with a partition straddling a sector boundary, so we wouldn't
++	 * be able to read a partition entry with read_part_sector().
++	 * Real block sizes are probably (?) powers of two, so just require
++	 * that.
++	 */
++	if (!is_power_of_2(secsize))
++		return -1;
+ 	datasize = round_down(secsize, 512);
+ 	data = read_part_sector(state, datasize / 512, &sect);
+ 	if (!data)
+ 		return -1;
+ 	partoffset = secsize % 512;
+-	if (partoffset + sizeof(*part) > datasize)
++	if (partoffset + sizeof(*part) > datasize) {
++		put_dev_sector(sect);
+ 		return -1;
++	}
+ 	part = (struct mac_partition *) (data + partoffset);
+ 	if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+ 		put_dev_sector(sect);
+@@ -112,8 +124,8 @@ int mac_partition(struct parsed_partitions *state)
+ 				int i, l;
+ 
+ 				goodness++;
+-				l = strlen(part->name);
+-				if (strcmp(part->name, "/") == 0)
++				l = strnlen(part->name, sizeof(part->name));
++				if (strncmp(part->name, "/", sizeof(part->name)) == 0)
+ 					goodness++;
+ 				for (i = 0; i <= l - 4; ++i) {
+ 					if (strncasecmp(part->name + i, "root",
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index cb45ef5240dab6..068c1612660bc0 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -407,6 +407,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ 					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ 	},
++	{
++		/* Vexia Edu Atla 10 tablet 5V version */
++		.matches = {
++			/* Having all 3 of these not set is somewhat unique */
++			DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
++			DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
++			/* Above strings are too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
++		},
++		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++	},
+ 	{
+ 		/* Vexia Edu Atla 10 tablet 9V version */
+ 		.matches = {
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index 6981e5f974e9a4..ff7d0b14a6468b 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -909,6 +909,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ 	kfree(d->wake_buf);
+ 	kfree(d->mask_buf_def);
+ 	kfree(d->mask_buf);
++	kfree(d->main_status_buf);
+ 	kfree(d->status_buf);
+ 	kfree(d->status_reg_buf);
+ 	if (d->config_buf) {
+@@ -984,6 +985,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
+ 	kfree(d->wake_buf);
+ 	kfree(d->mask_buf_def);
+ 	kfree(d->mask_buf);
++	kfree(d->main_status_buf);
+ 	kfree(d->status_reg_buf);
+ 	kfree(d->status_buf);
+ 	if (d->config_buf) {
+diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
+index 8bd663f4bac1b7..53f6b4f76bccdd 100644
+--- a/drivers/bluetooth/btintel_pcie.c
++++ b/drivers/bluetooth/btintel_pcie.c
+@@ -1312,6 +1312,10 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
+ 			if (opcode == 0xfc01)
+ 				btintel_pcie_inject_cmd_complete(hdev, opcode);
+ 		}
++		/* Firmware raises alive interrupt on HCI_OP_RESET */
++		if (opcode == HCI_OP_RESET)
++			data->gp0_received = false;
++
+ 		hdev->stat.cmd_tx++;
+ 		break;
+ 	case HCI_ACLDATA_PKT:
+@@ -1349,7 +1353,6 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
+ 			   opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
+ 			   btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ 		if (opcode == HCI_OP_RESET) {
+-			data->gp0_received = false;
+ 			ret = wait_event_timeout(data->gp0_wait_q,
+ 						 data->gp0_received,
+ 						 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 91d3c3b1c2d3bf..9db5354fdb0271 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -696,12 +696,12 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+ 		pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ 		return -EOPNOTSUPP;
+ 	}
+-	mutex_lock(&amd_pstate_driver_lock);
++	guard(mutex)(&amd_pstate_driver_lock);
++
+ 	ret = amd_pstate_cpu_boost_update(policy, state);
+ 	WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
+ 	policy->boost_enabled = !ret ? state : false;
+ 	refresh_frequency_limits(policy);
+-	mutex_unlock(&amd_pstate_driver_lock);
+ 
+ 	return ret;
+ }
+@@ -778,24 +778,28 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+ 
+ static void amd_pstate_update_limits(unsigned int cpu)
+ {
+-	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++	struct cpufreq_policy *policy = NULL;
+ 	struct amd_cpudata *cpudata;
+ 	u32 prev_high = 0, cur_high = 0;
+ 	int ret;
+ 	bool highest_perf_changed = false;
+ 
++	if (!amd_pstate_prefcore)
++		return;
++
++	policy = cpufreq_cpu_get(cpu);
+ 	if (!policy)
+ 		return;
+ 
+ 	cpudata = policy->driver_data;
+ 
+-	if (!amd_pstate_prefcore)
+-		return;
++	guard(mutex)(&amd_pstate_driver_lock);
+ 
+-	mutex_lock(&amd_pstate_driver_lock);
+ 	ret = amd_get_highest_perf(cpu, &cur_high);
+-	if (ret)
+-		goto free_cpufreq_put;
++	if (ret) {
++		cpufreq_cpu_put(policy);
++		return;
++	}
+ 
+ 	prev_high = READ_ONCE(cpudata->prefcore_ranking);
+ 	highest_perf_changed = (prev_high != cur_high);
+@@ -805,14 +809,11 @@ static void amd_pstate_update_limits(unsigned int cpu)
+ 		if (cur_high < CPPC_MAX_PERF)
+ 			sched_set_itmt_core_prio((int)cur_high, cpu);
+ 	}
+-
+-free_cpufreq_put:
+ 	cpufreq_cpu_put(policy);
+ 
+ 	if (!highest_perf_changed)
+ 		cpufreq_update_policy(cpu);
+ 
+-	mutex_unlock(&amd_pstate_driver_lock);
+ }
+ 
+ /*
+@@ -1145,11 +1146,11 @@ static ssize_t store_energy_performance_preference(
+ 	if (ret < 0)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&amd_pstate_limits_lock);
++	guard(mutex)(&amd_pstate_limits_lock);
++
+ 	ret = amd_pstate_set_energy_pref_index(cpudata, ret);
+-	mutex_unlock(&amd_pstate_limits_lock);
+ 
+-	return ret ?: count;
++	return ret ? ret : count;
+ }
+ 
+ static ssize_t show_energy_performance_preference(
+@@ -1297,13 +1298,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_status);
+ static ssize_t status_show(struct device *dev,
+ 			   struct device_attribute *attr, char *buf)
+ {
+-	ssize_t ret;
+ 
+-	mutex_lock(&amd_pstate_driver_lock);
+-	ret = amd_pstate_show_status(buf);
+-	mutex_unlock(&amd_pstate_driver_lock);
++	guard(mutex)(&amd_pstate_driver_lock);
+ 
+-	return ret;
++	return amd_pstate_show_status(buf);
+ }
+ 
+ static ssize_t status_store(struct device *a, struct device_attribute *b,
+@@ -1312,9 +1310,8 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
+ 	char *p = memchr(buf, '\n', count);
+ 	int ret;
+ 
+-	mutex_lock(&amd_pstate_driver_lock);
++	guard(mutex)(&amd_pstate_driver_lock);
+ 	ret = amd_pstate_update_status(buf, p ? p - buf : count);
+-	mutex_unlock(&amd_pstate_driver_lock);
+ 
+ 	return ret < 0 ? ret : count;
+ }
+@@ -1579,24 +1576,17 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+ 
+ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+ {
+-	struct cppc_perf_ctrls perf_ctrls;
+-	u64 value, max_perf;
++	u64 max_perf;
+ 	int ret;
+ 
+ 	ret = amd_pstate_enable(true);
+ 	if (ret)
+ 		pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+ 
+-	value = READ_ONCE(cpudata->cppc_req_cached);
+ 	max_perf = READ_ONCE(cpudata->highest_perf);
+ 
+-	if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+-		wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+-	} else {
+-		perf_ctrls.max_perf = max_perf;
+-		perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+-		cppc_set_perf(cpudata->cpu, &perf_ctrls);
+-	}
++	amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
++	amd_pstate_set_epp(cpudata, cpudata->epp_cached);
+ }
+ 
+ static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+@@ -1605,54 +1595,26 @@ static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+ 
+ 	pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
+ 
+-	if (cppc_state == AMD_PSTATE_ACTIVE) {
+-		amd_pstate_epp_reenable(cpudata);
+-		cpudata->suspended = false;
+-	}
++	amd_pstate_epp_reenable(cpudata);
++	cpudata->suspended = false;
+ 
+ 	return 0;
+ }
+ 
+-static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+-{
+-	struct amd_cpudata *cpudata = policy->driver_data;
+-	struct cppc_perf_ctrls perf_ctrls;
+-	int min_perf;
+-	u64 value;
+-
+-	min_perf = READ_ONCE(cpudata->lowest_perf);
+-	value = READ_ONCE(cpudata->cppc_req_cached);
+-
+-	mutex_lock(&amd_pstate_limits_lock);
+-	if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+-		cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+-
+-		/* Set max perf same as min perf */
+-		value &= ~AMD_CPPC_MAX_PERF(~0L);
+-		value |= AMD_CPPC_MAX_PERF(min_perf);
+-		value &= ~AMD_CPPC_MIN_PERF(~0L);
+-		value |= AMD_CPPC_MIN_PERF(min_perf);
+-		wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+-	} else {
+-		perf_ctrls.desired_perf = 0;
+-		perf_ctrls.max_perf = min_perf;
+-		perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
+-		cppc_set_perf(cpudata->cpu, &perf_ctrls);
+-	}
+-	mutex_unlock(&amd_pstate_limits_lock);
+-}
+-
+ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+ {
+ 	struct amd_cpudata *cpudata = policy->driver_data;
+-
+-	pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
++	int min_perf;
+ 
+ 	if (cpudata->suspended)
+ 		return 0;
+ 
+-	if (cppc_state == AMD_PSTATE_ACTIVE)
+-		amd_pstate_epp_offline(policy);
++	min_perf = READ_ONCE(cpudata->lowest_perf);
++
++	guard(mutex)(&amd_pstate_limits_lock);
++
++	amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
++	amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
+ 
+ 	return 0;
+ }
+@@ -1689,13 +1651,11 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
+ 	struct amd_cpudata *cpudata = policy->driver_data;
+ 
+ 	if (cpudata->suspended) {
+-		mutex_lock(&amd_pstate_limits_lock);
++		guard(mutex)(&amd_pstate_limits_lock);
+ 
+ 		/* enable amd pstate from suspend state*/
+ 		amd_pstate_epp_reenable(cpudata);
+ 
+-		mutex_unlock(&amd_pstate_limits_lock);
+-
+ 		cpudata->suspended = false;
+ 	}
+ 
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 70490bf2697b16..acabc856fe8a58 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -922,13 +922,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
+ 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
+ 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+ 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
+-		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
++		     EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
++		     EFI_MEMORY_RUNTIME))
+ 		snprintf(pos, size, "|attr=0x%016llx]",
+ 			 (unsigned long long)attr);
+ 	else
+ 		snprintf(pos, size,
+-			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
++			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
++			 attr & EFI_MEMORY_HOT_PLUGGABLE	? "HP"  : "",
+ 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
+ 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
+ 			 attr & EFI_MEMORY_SP			? "SP"  : "",
+diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
+index c41e7b2091cdd1..8ad3efb9b1ff16 100644
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ 	if (md->type != EFI_CONVENTIONAL_MEMORY)
+ 		return 0;
+ 
++	if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE)
++		return 0;
++
+ 	if (efi_soft_reserve_enabled() &&
+ 	    (md->attribute & EFI_MEMORY_SP))
+ 		return 0;
+diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
+index d694bcfa1074e9..bf676dd127a143 100644
+--- a/drivers/firmware/efi/libstub/relocate.c
++++ b/drivers/firmware/efi/libstub/relocate.c
+@@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ 		if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ 			continue;
+ 
++		if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE)
++			continue;
++
+ 		if (efi_soft_reserve_enabled() &&
+ 		    (desc->attribute & EFI_MEMORY_SP))
+ 			continue;
+diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
+index 2b4c2826f57251..3f10b23ec941b5 100644
+--- a/drivers/firmware/qcom/qcom_scm-smc.c
++++ b/drivers/firmware/qcom/qcom_scm-smc.c
+@@ -173,6 +173,9 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ 		smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
+ 
+ 	if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
++		if (!mempool)
++			return -EINVAL;
++
+ 		args_virt = qcom_tzmem_alloc(mempool,
+ 					     SCM_SMC_N_EXT_ARGS * sizeof(u64),
+ 					     flag);
+diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
+index 5321ef98f4427d..64908f1a5e7f9b 100644
+--- a/drivers/gpio/gpio-bcm-kona.c
++++ b/drivers/gpio/gpio-bcm-kona.c
+@@ -69,6 +69,22 @@ struct bcm_kona_gpio {
+ struct bcm_kona_gpio_bank {
+ 	int id;
+ 	int irq;
++	/*
++	 * Used to keep track of lock/unlock operations for each GPIO in the
++	 * bank.
++	 *
++	 * All GPIOs are locked by default (see bcm_kona_gpio_reset), and the
++	 * unlock count for all GPIOs is 0 by default. Each unlock increments
++	 * the counter, and each lock decrements the counter.
++	 *
++	 * The lock function only locks the GPIO once its unlock counter is
++	 * down to 0. This is necessary because the GPIO is unlocked in two
++	 * places in this driver: once for requested GPIOs, and once for
++	 * requested IRQs. Since it is possible for a GPIO to be requested
++	 * as both a GPIO and an IRQ, we need to ensure that we don't lock it
++	 * too early.
++	 */
++	u8 gpio_unlock_count[GPIO_PER_BANK];
+ 	/* Used in the interrupt handler */
+ 	struct bcm_kona_gpio *kona_gpio;
+ };
+@@ -86,14 +102,24 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
+ 	u32 val;
+ 	unsigned long flags;
+ 	int bank_id = GPIO_BANK(gpio);
++	int bit = GPIO_BIT(gpio);
++	struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
+ 
+-	raw_spin_lock_irqsave(&kona_gpio->lock, flags);
++	if (bank->gpio_unlock_count[bit] == 0) {
++		dev_err(kona_gpio->gpio_chip.parent,
++			"Unbalanced locks for GPIO %u\n", gpio);
++		return;
++	}
+ 
+-	val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+-	val |= BIT(gpio);
+-	bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
++	if (--bank->gpio_unlock_count[bit] == 0) {
++		raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ 
+-	raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
++		val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
++		val |= BIT(bit);
++		bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
++
++		raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
++	}
+ }
+ 
+ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
+@@ -102,14 +128,20 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
+ 	u32 val;
+ 	unsigned long flags;
+ 	int bank_id = GPIO_BANK(gpio);
++	int bit = GPIO_BIT(gpio);
++	struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
+ 
+-	raw_spin_lock_irqsave(&kona_gpio->lock, flags);
++	if (bank->gpio_unlock_count[bit] == 0) {
++		raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ 
+-	val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+-	val &= ~BIT(gpio);
+-	bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
++		val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
++		val &= ~BIT(bit);
++		bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ 
+-	raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
++		raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
++	}
++
++	++bank->gpio_unlock_count[bit];
+ }
+ 
+ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
+@@ -360,6 +392,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
+ 
+ 	kona_gpio = irq_data_get_irq_chip_data(d);
+ 	reg_base = kona_gpio->reg_base;
++
+ 	raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ 
+ 	val = readl(reg_base + GPIO_INT_MASK(bank_id));
+@@ -382,6 +415,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
+ 
+ 	kona_gpio = irq_data_get_irq_chip_data(d);
+ 	reg_base = kona_gpio->reg_base;
++
+ 	raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ 
+ 	val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
+@@ -477,15 +511,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
+ static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
+ {
+ 	struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
++	unsigned int gpio = d->hwirq;
+ 
+-	return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
++	/*
++	 * We need to unlock the GPIO before any other operations are performed
++	 * on the relevant GPIO configuration registers
++	 */
++	bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
++
++	return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio);
+ }
+ 
+ static void bcm_kona_gpio_irq_relres(struct irq_data *d)
+ {
+ 	struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
++	unsigned int gpio = d->hwirq;
++
++	/* Once we no longer use it, lock the GPIO again */
++	bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
+ 
+-	gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
++	gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio);
+ }
+ 
+ static struct irq_chip bcm_gpio_irq_chip = {
+@@ -614,7 +659,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
+ 		bank->irq = platform_get_irq(pdev, i);
+ 		bank->kona_gpio = kona_gpio;
+ 		if (bank->irq < 0) {
+-			dev_err(dev, "Couldn't get IRQ for bank %d", i);
++			dev_err(dev, "Couldn't get IRQ for bank %d\n", i);
+ 			ret = -ENOENT;
+ 			goto err_irq_domain;
+ 		}
+diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
+index 75a3633ceddbb8..222279a9d82b2d 100644
+--- a/drivers/gpio/gpio-stmpe.c
++++ b/drivers/gpio/gpio-stmpe.c
+@@ -191,7 +191,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
+ 		[REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
+ 		[REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
+ 	};
+-	int i, j;
++	int ret, i, j;
+ 
+ 	/*
+ 	 * STMPE1600: to be able to get IRQ from pins,
+@@ -199,8 +199,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
+ 	 * GPSR or GPCR registers
+ 	 */
+ 	if (stmpe->partnum == STMPE1600) {
+-		stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+-		stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
++		ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
++		if (ret < 0) {
++			dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret);
++			goto err;
++		}
++		ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
++		if (ret < 0) {
++			dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret);
++			goto err;
++		}
+ 	}
+ 
+ 	for (i = 0; i < CACHE_NR_REGS; i++) {
+@@ -222,6 +230,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
+ 		}
+ 	}
+ 
++err:
+ 	mutex_unlock(&stmpe_gpio->irq_lock);
+ }
+ 
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 78ecd56123a3b6..148b4d1788a219 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1691,6 +1691,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ 			.ignore_wake = "PNP0C50:00@8",
+ 		},
+ 	},
++	{
++		/*
++		 * Spurious wakeups from GPIO 11
++		 * Found in BIOS 1.04
++		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
++		},
++		.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++			.ignore_interrupt = "AMDI0030:00@11",
++		},
++	},
+ 	{} /* Terminating entry */
+ };
+ 
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 44372f8647d51a..1e8f0bdb6ae3b4 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -905,13 +905,13 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
+ 	}
+ 
+ 	if (gc->ngpio == 0) {
+-		chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
++		dev_err(dev, "tried to insert a GPIO chip with zero lines\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (gc->ngpio > FASTPATH_NGPIO)
+-		chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
+-			gc->ngpio, FASTPATH_NGPIO);
++		dev_warn(dev, "line cnt %u is greater than fast path cnt %u\n",
++			 gc->ngpio, FASTPATH_NGPIO);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 0b28b2cf1517d1..d70855d7c61c1d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -3713,9 +3713,10 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
+ 		if (err == -ENODEV) {
+ 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
+ 			err = 0;
+-			goto out;
++		} else {
++			dev_err(adev->dev, "fail to initialize cap microcode\n");
+ 		}
+-		dev_err(adev->dev, "fail to initialize cap microcode\n");
++		goto out;
+ 	}
+ 
+ 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index dbb63ce316f11e..42fd7669ac7d37 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -298,7 +298,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
+ 	return 0;
+ 
+ free_gang_ctx_bo:
+-	amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo);
++	amdgpu_amdkfd_free_gtt_mem(dev->adev, &(*q)->gang_ctx_bo);
+ cleanup:
+ 	uninit_queue(*q);
+ 	*q = NULL;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 0c0b9aa44dfa3a..99d2d3092ea540 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -607,7 +607,8 @@ static int smu_sys_set_pp_table(void *handle,
+ 		return -EIO;
+ 	}
+ 
+-	if (!smu_table->hardcode_pptable) {
++	if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
++		kfree(smu_table->hardcode_pptable);
+ 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+ 		if (!smu_table->hardcode_pptable)
+ 			return -ENOMEM;
+diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
+index 6ee51003de3ce6..9fa13da513d24e 100644
+--- a/drivers/gpu/drm/display/drm_dp_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_helper.c
+@@ -2421,7 +2421,7 @@ u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+ {
+ 	u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
+ 
+-	switch (bpp_increment_dpcd) {
++	switch (bpp_increment_dpcd & DP_DSC_BITS_PER_PIXEL_MASK) {
+ 	case DP_DSC_BITS_PER_PIXEL_1_16:
+ 		return 16;
+ 	case DP_DSC_BITS_PER_PIXEL_1_8:
+diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+index 5c397a2df70e28..5d27e1c733c527 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+@@ -168,7 +168,7 @@ static int igt_ppgtt_alloc(void *arg)
+ 		return PTR_ERR(ppgtt);
+ 
+ 	if (!ppgtt->vm.allocate_va_range)
+-		goto err_ppgtt_cleanup;
++		goto ppgtt_vm_put;
+ 
+ 	/*
+ 	 * While we only allocate the page tables here and so we could
+@@ -236,7 +236,7 @@ static int igt_ppgtt_alloc(void *arg)
+ 			goto retry;
+ 	}
+ 	i915_gem_ww_ctx_fini(&ww);
+-
++ppgtt_vm_put:
+ 	i915_vm_put(&ppgtt->vm);
+ 	return err;
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+index e084406ebb0711..4f110be6b750d3 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+@@ -391,8 +391,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
+ 		.type = INTF_DP,
+ 		.controller_id = MSM_DP_CONTROLLER_2,
+ 		.prog_fetch_lines_worst_case = 24,
+-		.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+-		.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
++		.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
++		.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ 	}, {
+ 		.name = "intf_7", .id = INTF_7,
+ 		.base = 0x3b000, .len = 0x280,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+index 16f144cbc0c986..8ff496082902b1 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+@@ -42,9 +42,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
+ 	if (!conn_state || !conn_state->connector) {
+ 		DPU_ERROR("invalid connector state\n");
+ 		return -EINVAL;
+-	} else if (conn_state->connector->status != connector_status_connected) {
+-		DPU_ERROR("connector not connected %d\n", conn_state->connector->status);
+-		return -EINVAL;
+ 	}
+ 
+ 	crtc = conn_state->crtc;
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index fba78193127dee..f775638d239a5c 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -787,8 +787,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 			goto out;
+ 
+ 		if (!submit->cmd[i].size ||
+-			((submit->cmd[i].size + submit->cmd[i].offset) >
+-				obj->size / 4)) {
++		    (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
+ 			SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
+ 			ret = -EINVAL;
+ 			goto out;
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+index 2dba7c5ffd2c62..92f4261305bd9d 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+@@ -587,7 +587,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
+ 	for (timeout = 10; timeout > 0; --timeout) {
+ 		if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
+ 		    (rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
+-		    (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
++		    (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY))
+ 			break;
+ 
+ 		usleep_range(1000, 2000);
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+index f8114d11f2d158..a6b276f1d6ee15 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+@@ -142,7 +142,6 @@
+ 
+ #define CLOCKSET1			0x101c
+ #define CLOCKSET1_LOCK_PHY		(1 << 17)
+-#define CLOCKSET1_LOCK			(1 << 16)
+ #define CLOCKSET1_CLKSEL		(1 << 8)
+ #define CLOCKSET1_CLKINSEL_EXTAL	(0 << 2)
+ #define CLOCKSET1_CLKINSEL_DIG		(1 << 2)
+diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+index b99217b4e05d7d..90c6269ccd2920 100644
+--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
++++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+@@ -311,11 +311,11 @@ int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu)
+ 	dev->mode_config.helper_private = &rzg2l_du_mode_config_helper;
+ 
+ 	/*
+-	 * The RZ DU uses the VSP1 for memory access, and is limited
+-	 * to frame sizes of 1920x1080.
++	 * The RZ DU was designed to support a frame size of 1920x1200 (landscape)
++	 * or 1200x1920 (portrait).
+ 	 */
+ 	dev->mode_config.max_width = 1920;
+-	dev->mode_config.max_height = 1080;
++	dev->mode_config.max_height = 1920;
+ 
+ 	rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
+ 
+diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+index 4ba869e0e794c7..cbd9584af32995 100644
+--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+@@ -70,10 +70,17 @@ static int light_up_connector(struct kunit *test,
+ 	state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+ 
++retry:
+ 	conn_state = drm_atomic_get_connector_state(state, connector);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ 
+ 	ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
++	if (ret == -EDEADLK) {
++		drm_atomic_state_clear(state);
++		ret = drm_modeset_backoff(ctx);
++		if (!ret)
++			goto retry;
++	}
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+ 
+ 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
+index 1ad711f8d2a8bf..45f22ead3e61d3 100644
+--- a/drivers/gpu/drm/tidss/tidss_dispc.c
++++ b/drivers/gpu/drm/tidss/tidss_dispc.c
+@@ -700,7 +700,7 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+ {
+ 	dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
+ 
+-	/* clear the irqstatus for newly enabled irqs */
++	/* clear the irqstatus for irqs that will be enabled */
+ 	dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
+ 
+ 	dispc_k2g_vp_set_irqenable(dispc, 0, mask);
+@@ -708,6 +708,9 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+ 
+ 	dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+ 
++	/* clear the irqstatus for irqs that were disabled */
++	dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & old_mask);
++
+ 	/* flush posted write */
+ 	dispc_k2g_read_irqenable(dispc);
+ }
+@@ -780,24 +783,20 @@ static
+ void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
+ {
+ 	unsigned int i;
+-	u32 top_clear = 0;
+ 
+ 	for (i = 0; i < dispc->feat->num_vps; ++i) {
+-		if (clearmask & DSS_IRQ_VP_MASK(i)) {
++		if (clearmask & DSS_IRQ_VP_MASK(i))
+ 			dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
+-			top_clear |= BIT(i);
+-		}
+ 	}
+ 	for (i = 0; i < dispc->feat->num_planes; ++i) {
+-		if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
++		if (clearmask & DSS_IRQ_PLANE_MASK(i))
+ 			dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
+-			top_clear |= BIT(4 + i);
+-		}
+ 	}
+ 	if (dispc->feat->subrev == DISPC_K2G)
+ 		return;
+ 
+-	dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
++	/* always clear the top level irqstatus */
++	dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS));
+ 
+ 	/* Flush posted writes */
+ 	dispc_read(dispc, DISPC_IRQSTATUS);
+@@ -843,7 +842,7 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+ 
+ 	old_mask = dispc_k3_read_irqenable(dispc);
+ 
+-	/* clear the irqstatus for newly enabled irqs */
++	/* clear the irqstatus for irqs that will be enabled */
+ 	dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
+ 
+ 	for (i = 0; i < dispc->feat->num_vps; ++i) {
+@@ -868,6 +867,9 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+ 	if (main_disable)
+ 		dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+ 
++	/* clear the irqstatus for irqs that were disabled */
++	dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & old_mask);
++
+ 	/* Flush posted writes */
+ 	dispc_read(dispc, DISPC_IRQENABLE_SET);
+ }
+@@ -2767,8 +2769,12 @@ static void dispc_init_errata(struct dispc_device *dispc)
+  */
+ static void dispc_softreset_k2g(struct dispc_device *dispc)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&dispc->tidss->wait_lock, flags);
+ 	dispc_set_irqenable(dispc, 0);
+ 	dispc_read_and_clear_irqstatus(dispc);
++	spin_unlock_irqrestore(&dispc->tidss->wait_lock, flags);
+ 
+ 	for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
+ 		VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
+index 604334ef526a04..d053dbb9d28c5d 100644
+--- a/drivers/gpu/drm/tidss/tidss_irq.c
++++ b/drivers/gpu/drm/tidss/tidss_irq.c
+@@ -60,7 +60,9 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
+ 	unsigned int id;
+ 	dispc_irq_t irqstatus;
+ 
++	spin_lock(&tidss->wait_lock);
+ 	irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
++	spin_unlock(&tidss->wait_lock);
+ 
+ 	for (id = 0; id < tidss->num_crtcs; id++) {
+ 		struct drm_crtc *crtc = tidss->crtcs[id];
+diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
+index e3013ac3a5c2a6..1abfd738a6017d 100644
+--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
++++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
+@@ -384,6 +384,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
+ {
+ 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ 	struct drm_v3d_perfmon_destroy *req = data;
++	struct v3d_dev *v3d = v3d_priv->v3d;
+ 	struct v3d_perfmon *perfmon;
+ 
+ 	mutex_lock(&v3d_priv->perfmon.lock);
+@@ -393,6 +394,10 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
+ 	if (!perfmon)
+ 		return -EINVAL;
+ 
++	/* If the active perfmon is being destroyed, stop it first */
++	if (perfmon == v3d->active_perfmon)
++		v3d_perfmon_stop(v3d, perfmon, false);
++
+ 	v3d_perfmon_put(perfmon);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
+index fb52a23e28f84e..a89fbfbdab329f 100644
+--- a/drivers/gpu/drm/xe/xe_drm_client.c
++++ b/drivers/gpu/drm/xe/xe_drm_client.c
+@@ -135,8 +135,8 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
+ 	XE_WARN_ON(bo->client);
+ 	XE_WARN_ON(!list_empty(&bo->client_link));
+ 
+-	spin_lock(&client->bos_lock);
+ 	bo->client = xe_drm_client_get(client);
++	spin_lock(&client->bos_lock);
+ 	list_add_tail(&bo->client_link, &client->bos_list);
+ 	spin_unlock(&client->bos_lock);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
+index 9b1a1d4304ae18..ba0f61e7d2d6b9 100644
+--- a/drivers/gpu/drm/xe/xe_trace_bo.h
++++ b/drivers/gpu/drm/xe/xe_trace_bo.h
+@@ -55,8 +55,8 @@ TRACE_EVENT(xe_bo_move,
+ 	    TP_STRUCT__entry(
+ 		     __field(struct xe_bo *, bo)
+ 		     __field(size_t, size)
+-		     __field(u32, new_placement)
+-		     __field(u32, old_placement)
++		     __string(new_placement_name, xe_mem_type_to_name[new_placement])
++		     __string(old_placement_name, xe_mem_type_to_name[old_placement])
+ 		     __string(device_id, __dev_name_bo(bo))
+ 		     __field(bool, move_lacks_source)
+ 			),
+@@ -64,15 +64,15 @@ TRACE_EVENT(xe_bo_move,
+ 	    TP_fast_assign(
+ 		   __entry->bo      = bo;
+ 		   __entry->size = bo->size;
+-		   __entry->new_placement = new_placement;
+-		   __entry->old_placement = old_placement;
++		   __assign_str(new_placement_name);
++		   __assign_str(old_placement_name);
+ 		   __assign_str(device_id);
+ 		   __entry->move_lacks_source = move_lacks_source;
+ 		   ),
+ 	    TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
+ 		      __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
+-		      xe_mem_type_to_name[__entry->old_placement],
+-		      xe_mem_type_to_name[__entry->new_placement], __get_str(device_id))
++		      __get_str(old_placement_name),
++		      __get_str(new_placement_name), __get_str(device_id))
+ );
+ 
+ DECLARE_EVENT_CLASS(xe_vma,
+diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
+index e98528777faaec..710674ef40a973 100644
+--- a/drivers/gpu/host1x/dev.c
++++ b/drivers/gpu/host1x/dev.c
+@@ -625,6 +625,8 @@ static int host1x_probe(struct platform_device *pdev)
+ 		goto free_contexts;
+ 	}
+ 
++	mutex_init(&host->intr_mutex);
++
+ 	pm_runtime_enable(&pdev->dev);
+ 
+ 	err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
+index b3285dd101804c..f77a678949e96b 100644
+--- a/drivers/gpu/host1x/intr.c
++++ b/drivers/gpu/host1x/intr.c
+@@ -104,8 +104,6 @@ int host1x_intr_init(struct host1x *host)
+ 	unsigned int id;
+ 	int i, err;
+ 
+-	mutex_init(&host->intr_mutex);
+-
+ 	for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
+ 		struct host1x_syncpt *syncpt = &host->syncpt[id];
+ 
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 369414c92fccbe..93b5c648ef82c9 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1673,9 +1673,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ 		break;
+ 	}
+ 
+-	if (suffix)
++	if (suffix) {
+ 		hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ 						 "%s %s", hdev->name, suffix);
++		if (!hi->input->name)
++			return -ENOMEM;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index bf8b633114be6a..7b359668987854 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -313,6 +313,7 @@ struct steam_device {
+ 	u16 rumble_left;
+ 	u16 rumble_right;
+ 	unsigned int sensor_timestamp_us;
++	struct work_struct unregister_work;
+ };
+ 
+ static int steam_recv_report(struct steam_device *steam,
+@@ -1072,6 +1073,31 @@ static void steam_mode_switch_cb(struct work_struct *work)
+ 	}
+ }
+ 
++static void steam_work_unregister_cb(struct work_struct *work)
++{
++	struct steam_device *steam = container_of(work, struct steam_device,
++							unregister_work);
++	unsigned long flags;
++	bool connected;
++	bool opened;
++
++	spin_lock_irqsave(&steam->lock, flags);
++	opened = steam->client_opened;
++	connected = steam->connected;
++	spin_unlock_irqrestore(&steam->lock, flags);
++
++	if (connected) {
++		if (opened) {
++			steam_sensors_unregister(steam);
++			steam_input_unregister(steam);
++		} else {
++			steam_set_lizard_mode(steam, lizard_mode);
++			steam_input_register(steam);
++			steam_sensors_register(steam);
++		}
++	}
++}
++
+ static bool steam_is_valve_interface(struct hid_device *hdev)
+ {
+ 	struct hid_report_enum *rep_enum;
+@@ -1117,8 +1143,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
+ 	steam->client_opened++;
+ 	spin_unlock_irqrestore(&steam->lock, flags);
+ 
+-	steam_sensors_unregister(steam);
+-	steam_input_unregister(steam);
++	schedule_work(&steam->unregister_work);
+ 
+ 	return 0;
+ }
+@@ -1135,11 +1160,7 @@ static void steam_client_ll_close(struct hid_device *hdev)
+ 	connected = steam->connected && !steam->client_opened;
+ 	spin_unlock_irqrestore(&steam->lock, flags);
+ 
+-	if (connected) {
+-		steam_set_lizard_mode(steam, lizard_mode);
+-		steam_input_register(steam);
+-		steam_sensors_register(steam);
+-	}
++	schedule_work(&steam->unregister_work);
+ }
+ 
+ static int steam_client_ll_raw_request(struct hid_device *hdev,
+@@ -1231,6 +1252,7 @@ static int steam_probe(struct hid_device *hdev,
+ 	INIT_LIST_HEAD(&steam->list);
+ 	INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
+ 	steam->sensor_timestamp_us = 0;
++	INIT_WORK(&steam->unregister_work, steam_work_unregister_cb);
+ 
+ 	/*
+ 	 * With the real steam controller interface, do not connect hidraw.
+@@ -1291,6 +1313,7 @@ static int steam_probe(struct hid_device *hdev,
+ 	cancel_work_sync(&steam->work_connect);
+ 	cancel_delayed_work_sync(&steam->mode_switch);
+ 	cancel_work_sync(&steam->rumble_work);
++	cancel_work_sync(&steam->unregister_work);
+ 
+ 	return ret;
+ }
+@@ -1306,6 +1329,8 @@ static void steam_remove(struct hid_device *hdev)
+ 
+ 	cancel_delayed_work_sync(&steam->mode_switch);
+ 	cancel_work_sync(&steam->work_connect);
++	cancel_work_sync(&steam->rumble_work);
++	cancel_work_sync(&steam->unregister_work);
+ 	hid_destroy_device(steam->client_hdev);
+ 	steam->client_hdev = NULL;
+ 	steam->client_opened = 0;
+@@ -1592,7 +1617,7 @@ static void steam_do_deck_input_event(struct steam_device *steam,
+ 
+ 	if (!(b9 & BIT(6)) && steam->did_mode_switch) {
+ 		steam->did_mode_switch = false;
+-		cancel_delayed_work_sync(&steam->mode_switch);
++		cancel_delayed_work(&steam->mode_switch);
+ 	} else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) {
+ 		steam->did_mode_switch = true;
+ 		schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
+diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
+index 6c3e758bbb09e3..3b81468a1df297 100644
+--- a/drivers/hid/hid-thrustmaster.c
++++ b/drivers/hid/hid-thrustmaster.c
+@@ -171,7 +171,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
+ 	b_ep = ep->desc.bEndpointAddress;
+ 
+ 	/* Are the expected endpoints present? */
+-	u8 ep_addr[1] = {b_ep};
++	u8 ep_addr[2] = {b_ep, 0};
+ 
+ 	if (!usb_check_int_endpoints(usbif, ep_addr)) {
+ 		hid_err(hdev, "Unexpected non-int endpoint\n");
+diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
+index 831b760c66ea72..d4afbbd2780797 100644
+--- a/drivers/hid/hid-winwing.c
++++ b/drivers/hid/hid-winwing.c
+@@ -106,6 +106,8 @@ static int winwing_init_led(struct hid_device *hdev,
+ 						"%s::%s",
+ 						dev_name(&input->dev),
+ 						info->led_name);
++		if (!led->cdev.name)
++			return -ENOMEM;
+ 
+ 		ret = devm_led_classdev_register(&hdev->dev, &led->cdev);
+ 		if (ret)
+diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
+index 90dee3ec552097..77da199c7413e6 100644
+--- a/drivers/i3c/master/Kconfig
++++ b/drivers/i3c/master/Kconfig
+@@ -57,3 +57,14 @@ config MIPI_I3C_HCI
+ 
+ 	  This driver can also be built as a module.  If so, the module will be
+ 	  called mipi-i3c-hci.
++
++config MIPI_I3C_HCI_PCI
++	tristate "MIPI I3C Host Controller Interface PCI support"
++	depends on MIPI_I3C_HCI
++	depends on PCI
++	help
++	  Support for MIPI I3C Host Controller Interface compatible hardware
++	  on the PCI bus.
++
++	  This driver can also be built as a module. If so, the module will be
++	  called mipi-i3c-hci-pci.
+diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
+index 1f8cd5c48fdef3..e3d3ef757035f0 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/Makefile
++++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
+@@ -5,3 +5,4 @@ mipi-i3c-hci-y				:= core.o ext_caps.o pio.o dma.o \
+ 					   cmd_v1.o cmd_v2.o \
+ 					   dat_v1.o dct_v1.o \
+ 					   hci_quirks.o
++obj-$(CONFIG_MIPI_I3C_HCI_PCI)		+= mipi-i3c-hci-pci.o
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 13adc584009429..fe955703e59b58 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -762,9 +762,26 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+ 			complete(&rh->op_done);
+ 
+ 		if (status & INTR_TRANSFER_ABORT) {
++			u32 ring_status;
++
+ 			dev_notice_ratelimited(&hci->master.dev,
+ 				"ring %d: Transfer Aborted\n", i);
+ 			mipi_i3c_hci_resume(hci);
++			ring_status = rh_reg_read(RING_STATUS);
++			if (!(ring_status & RING_STATUS_RUNNING) &&
++			    status & INTR_TRANSFER_COMPLETION &&
++			    status & INTR_TRANSFER_ERR) {
++				/*
++				 * Ring stop followed by run is an Intel
++				 * specific required quirk after resuming the
++				 * halted controller. Do it only when the ring
++				 * is not in running state after a transfer
++				 * error.
++				 */
++				rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
++				rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
++							   RING_CTRL_RUN_STOP);
++			}
+ 		}
+ 		if (status & INTR_WARN_INS_STOP_MODE)
+ 			dev_warn_ratelimited(&hci->master.dev,
+diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
+new file mode 100644
+index 00000000000000..c6c3a3ec11eae3
+--- /dev/null
++++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
+@@ -0,0 +1,148 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCI glue code for MIPI I3C HCI driver
++ *
++ * Copyright (C) 2024 Intel Corporation
++ *
++ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
++ */
++#include <linux/acpi.h>
++#include <linux/idr.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++
++struct mipi_i3c_hci_pci_info {
++	int (*init)(struct pci_dev *pci);
++};
++
++#define INTEL_PRIV_OFFSET		0x2b0
++#define INTEL_PRIV_SIZE			0x28
++#define INTEL_PRIV_RESETS		0x04
++#define INTEL_PRIV_RESETS_RESET		BIT(0)
++#define INTEL_PRIV_RESETS_RESET_DONE	BIT(1)
++
++static DEFINE_IDA(mipi_i3c_hci_pci_ida);
++
++static int mipi_i3c_hci_pci_intel_init(struct pci_dev *pci)
++{
++	unsigned long timeout;
++	void __iomem *priv;
++
++	priv = devm_ioremap(&pci->dev,
++			    pci_resource_start(pci, 0) + INTEL_PRIV_OFFSET,
++			    INTEL_PRIV_SIZE);
++	if (!priv)
++		return -ENOMEM;
++
++	/* Assert reset, wait for completion and release reset */
++	writel(0, priv + INTEL_PRIV_RESETS);
++	timeout = jiffies + msecs_to_jiffies(10);
++	while (!(readl(priv + INTEL_PRIV_RESETS) &
++		 INTEL_PRIV_RESETS_RESET_DONE)) {
++		if (time_after(jiffies, timeout))
++			break;
++		cpu_relax();
++	}
++	writel(INTEL_PRIV_RESETS_RESET, priv + INTEL_PRIV_RESETS);
++
++	return 0;
++}
++
++static struct mipi_i3c_hci_pci_info intel_info = {
++	.init = mipi_i3c_hci_pci_intel_init,
++};
++
++static int mipi_i3c_hci_pci_probe(struct pci_dev *pci,
++				  const struct pci_device_id *id)
++{
++	struct mipi_i3c_hci_pci_info *info;
++	struct platform_device *pdev;
++	struct resource res[2];
++	int dev_id, ret;
++
++	ret = pcim_enable_device(pci);
++	if (ret)
++		return ret;
++
++	pci_set_master(pci);
++
++	memset(&res, 0, sizeof(res));
++
++	res[0].flags = IORESOURCE_MEM;
++	res[0].start = pci_resource_start(pci, 0);
++	res[0].end = pci_resource_end(pci, 0);
++
++	res[1].flags = IORESOURCE_IRQ;
++	res[1].start = pci->irq;
++	res[1].end = pci->irq;
++
++	dev_id = ida_alloc(&mipi_i3c_hci_pci_ida, GFP_KERNEL);
++	if (dev_id < 0)
++		return dev_id;
++
++	pdev = platform_device_alloc("mipi-i3c-hci", dev_id);
++	if (!pdev)
++		return -ENOMEM;
++
++	pdev->dev.parent = &pci->dev;
++	device_set_node(&pdev->dev, dev_fwnode(&pci->dev));
++
++	ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
++	if (ret)
++		goto err;
++
++	info = (struct mipi_i3c_hci_pci_info *)id->driver_data;
++	if (info && info->init) {
++		ret = info->init(pci);
++		if (ret)
++			goto err;
++	}
++
++	ret = platform_device_add(pdev);
++	if (ret)
++		goto err;
++
++	pci_set_drvdata(pci, pdev);
++
++	return 0;
++
++err:
++	platform_device_put(pdev);
++	ida_free(&mipi_i3c_hci_pci_ida, dev_id);
++	return ret;
++}
++
++static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
++{
++	struct platform_device *pdev = pci_get_drvdata(pci);
++	int dev_id = pdev->id;
++
++	platform_device_unregister(pdev);
++	ida_free(&mipi_i3c_hci_pci_ida, dev_id);
++}
++
++static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
++	/* Panther Lake-H */
++	{ PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
++	{ PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
++	/* Panther Lake-P */
++	{ PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info},
++	{ PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info},
++	{ },
++};
++MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
++
++static struct pci_driver mipi_i3c_hci_pci_driver = {
++	.name = "mipi_i3c_hci_pci",
++	.id_table = mipi_i3c_hci_pci_devices,
++	.probe = mipi_i3c_hci_pci_probe,
++	.remove = mipi_i3c_hci_pci_remove,
++};
++
++module_pci_driver(mipi_i3c_hci_pci_driver);
++
++MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@intel.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MIPI I3C HCI driver on PCI bus");
+diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
+index ad225823e6f2fe..45a4564c670c01 100644
+--- a/drivers/infiniband/hw/efa/efa_main.c
++++ b/drivers/infiniband/hw/efa/efa_main.c
+@@ -470,7 +470,6 @@ static void efa_ib_device_remove(struct efa_dev *dev)
+ 	ibdev_info(&dev->ibdev, "Unregister ib device\n");
+ 	ib_unregister_device(&dev->ibdev);
+ 	efa_destroy_eqs(dev);
+-	efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
+ 	efa_release_doorbell_bar(dev);
+ }
+ 
+@@ -643,12 +642,14 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
+ 	return ERR_PTR(err);
+ }
+ 
+-static void efa_remove_device(struct pci_dev *pdev)
++static void efa_remove_device(struct pci_dev *pdev,
++			      enum efa_regs_reset_reason_types reset_reason)
+ {
+ 	struct efa_dev *dev = pci_get_drvdata(pdev);
+ 	struct efa_com_dev *edev;
+ 
+ 	edev = &dev->edev;
++	efa_com_dev_reset(edev, reset_reason);
+ 	efa_com_admin_destroy(edev);
+ 	efa_free_irq(dev, &dev->admin_irq);
+ 	efa_disable_msix(dev);
+@@ -676,7 +677,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	return 0;
+ 
+ err_remove_device:
+-	efa_remove_device(pdev);
++	efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
+ 	return err;
+ }
+ 
+@@ -685,7 +686,7 @@ static void efa_remove(struct pci_dev *pdev)
+ 	struct efa_dev *dev = pci_get_drvdata(pdev);
+ 
+ 	efa_ib_device_remove(dev);
+-	efa_remove_device(pdev);
++	efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
+ }
+ 
+ static void efa_shutdown(struct pci_dev *pdev)
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 601fb4ee69009e..6fb2f2919ab1ff 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -175,6 +175,7 @@
+ #define CONTROL_GAM_EN		25
+ #define CONTROL_GALOG_EN	28
+ #define CONTROL_GAINT_EN	29
++#define CONTROL_EPH_EN		45
+ #define CONTROL_XT_EN		50
+ #define CONTROL_INTCAPXT_EN	51
+ #define CONTROL_IRTCACHEDIS	59
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 43131c3a21726f..dbe2d13972feff 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -2647,6 +2647,10 @@ static void iommu_init_flags(struct amd_iommu *iommu)
+ 
+ 	/* Set IOTLB invalidation timeout to 1s */
+ 	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
++
++	/* Enable Enhanced Peripheral Page Request Handling */
++	if (check_feature(FEATURE_EPHSUP))
++		iommu_feature_enable(iommu, CONTROL_EPH_EN);
+ }
+ 
+ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
+index 4674e618797c15..8b5926c1452edb 100644
+--- a/drivers/iommu/io-pgfault.c
++++ b/drivers/iommu/io-pgfault.c
+@@ -478,6 +478,7 @@ void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+ 
+ 		ops->page_response(dev, iopf, &resp);
+ 		list_del_init(&group->pending_node);
++		iopf_free_group(group);
+ 	}
+ 	mutex_unlock(&fault_param->lock);
+ 
+diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
+index d925ca24183b50..415f1f91cc3072 100644
+--- a/drivers/media/dvb-frontends/cxd2841er.c
++++ b/drivers/media/dvb-frontends/cxd2841er.c
+@@ -311,12 +311,8 @@ static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
+ 
+ static u32 cxd2841er_calc_iffreq_xtal(enum cxd2841er_xtal xtal, u32 ifhz)
+ {
+-	u64 tmp;
+-
+-	tmp = (u64) ifhz * 16777216;
+-	do_div(tmp, ((xtal == SONY_XTAL_24000) ? 48000000 : 41000000));
+-
+-	return (u32) tmp;
++	return div_u64(ifhz * 16777216ull,
++		       (xtal == SONY_XTAL_24000) ? 48000000 : 41000000);
+ }
+ 
+ static u32 cxd2841er_calc_iffreq(u32 ifhz)
+diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
+index b5375d73662996..7670d6c82d923e 100644
+--- a/drivers/media/i2c/ds90ub913.c
++++ b/drivers/media/i2c/ds90ub913.c
+@@ -8,6 +8,7 @@
+  * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk-provider.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+@@ -146,6 +147,19 @@ static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
+ 	return ret;
+ }
+ 
++static int ub913_update_bits(const struct ub913_data *priv, u8 reg, u8 mask,
++			     u8 val)
++{
++	int ret;
++
++	ret = regmap_update_bits(priv->regmap, reg, mask, val);
++	if (ret < 0)
++		dev_err(&priv->client->dev,
++			"Cannot update register 0x%02x %d!\n", reg, ret);
++
++	return ret;
++}
++
+ /*
+  * GPIO chip
+  */
+@@ -733,10 +747,13 @@ static int ub913_hw_init(struct ub913_data *priv)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "i2c master init failed\n");
+ 
+-	ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
+-	v &= ~UB913_REG_GENERAL_CFG_PCLK_RISING;
+-	v |= priv->pclk_polarity_rising ? UB913_REG_GENERAL_CFG_PCLK_RISING : 0;
+-	ub913_write(priv, UB913_REG_GENERAL_CFG, v);
++	ret = ub913_update_bits(priv, UB913_REG_GENERAL_CFG,
++				UB913_REG_GENERAL_CFG_PCLK_RISING,
++				FIELD_PREP(UB913_REG_GENERAL_CFG_PCLK_RISING,
++					   priv->pclk_polarity_rising));
++
++	if (ret)
++		return ret;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
+index 10daecf6f45798..f0bad3e64f23dc 100644
+--- a/drivers/media/i2c/ds90ub953.c
++++ b/drivers/media/i2c/ds90ub953.c
+@@ -398,8 +398,13 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
+ 	int ret;
+ 
+ 	/* Set all GPIOs to local input mode */
+-	ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
+-	ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
++	ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
++	if (ret)
++		return ret;
++
++	ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
++	if (ret)
++		return ret;
+ 
+ 	gc->label = dev_name(dev);
+ 	gc->parent = dev;
+@@ -961,10 +966,11 @@ static void ub953_calc_clkout_params(struct ub953_data *priv,
+ 	clkout_data->rate = clkout_rate;
+ }
+ 
+-static void ub953_write_clkout_regs(struct ub953_data *priv,
+-				    const struct ub953_clkout_data *clkout_data)
++static int ub953_write_clkout_regs(struct ub953_data *priv,
++				   const struct ub953_clkout_data *clkout_data)
+ {
+ 	u8 clkout_ctrl0, clkout_ctrl1;
++	int ret;
+ 
+ 	if (priv->hw_data->is_ub971)
+ 		clkout_ctrl0 = clkout_data->m;
+@@ -974,8 +980,15 @@ static void ub953_write_clkout_regs(struct ub953_data *priv,
+ 
+ 	clkout_ctrl1 = clkout_data->n;
+ 
+-	ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
+-	ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
++	ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
++	if (ret)
++		return ret;
++
++	ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
++	if (ret)
++		return ret;
++
++	return 0;
+ }
+ 
+ static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
+@@ -1055,9 +1068,7 @@ static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	dev_dbg(&priv->client->dev, "%s %lu (requested %lu)\n", __func__,
+ 		clkout_data.rate, rate);
+ 
+-	ub953_write_clkout_regs(priv, &clkout_data);
+-
+-	return 0;
++	return ub953_write_clkout_regs(priv, &clkout_data);
+ }
+ 
+ static const struct clk_ops ub953_clkout_ops = {
+@@ -1082,7 +1093,9 @@ static int ub953_register_clkout(struct ub953_data *priv)
+ 
+ 	/* Initialize clkout to 25MHz by default */
+ 	ub953_calc_clkout_params(priv, UB953_DEFAULT_CLKOUT_RATE, &clkout_data);
+-	ub953_write_clkout_regs(priv, &clkout_data);
++	ret = ub953_write_clkout_regs(priv, &clkout_data);
++	if (ret)
++		return ret;
+ 
+ 	priv->clkout_clk_hw.init = &init;
+ 
+@@ -1229,10 +1242,15 @@ static int ub953_hw_init(struct ub953_data *priv)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "i2c init failed\n");
+ 
+-	ub953_write(priv, UB953_REG_GENERAL_CFG,
+-		    (priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK) |
+-		    ((priv->num_data_lanes - 1) << UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT) |
+-		    UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE);
++	v = 0;
++	v |= priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK;
++	v |= (priv->num_data_lanes - 1) <<
++		UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT;
++	v |= UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE;
++
++	ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v);
++	if (ret)
++		return ret;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/broadcom/bcm2835-unicam.c b/drivers/media/platform/broadcom/bcm2835-unicam.c
+index a1d93c14553d80..9f81e1582a3005 100644
+--- a/drivers/media/platform/broadcom/bcm2835-unicam.c
++++ b/drivers/media/platform/broadcom/bcm2835-unicam.c
+@@ -816,11 +816,6 @@ static irqreturn_t unicam_isr(int irq, void *dev)
+ 		}
+ 	}
+ 
+-	if (unicam_reg_read(unicam, UNICAM_ICTL) & UNICAM_FCM) {
+-		/* Switch out of trigger mode if selected */
+-		unicam_reg_write_field(unicam, UNICAM_ICTL, 1, UNICAM_TFC);
+-		unicam_reg_write_field(unicam, UNICAM_ICTL, 0, UNICAM_FCM);
+-	}
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -984,8 +979,7 @@ static void unicam_start_rx(struct unicam_device *unicam,
+ 
+ 	unicam_reg_write_field(unicam, UNICAM_ANA, 0, UNICAM_DDL);
+ 
+-	/* Always start in trigger frame capture mode (UNICAM_FCM set) */
+-	val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_FCM | UNICAM_IBOB;
++	val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_IBOB;
+ 	line_int_freq = max(fmt->height >> 2, 128);
+ 	unicam_set_field(&val, line_int_freq, UNICAM_LCIE_MASK);
+ 	unicam_reg_write(unicam, UNICAM_ICTL, val);
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+index 613949df897d34..6d964e392d3130 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+@@ -191,10 +191,11 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb)
+ 
+ 	mux_args.mux_buf_sz  = mux_buf_sz;
+ 
+-	dvb->streaming = true;
+ 	dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
+ 	if (!dvb->mux)
+ 		return -ENOMEM;
++
++	dvb->streaming = true;
+ 	vidtv_mux_start_thread(dvb->mux);
+ 
+ 	dev_dbg_ratelimited(dev, "Started streaming\n");
+@@ -205,6 +206,11 @@ static int vidtv_stop_streaming(struct vidtv_dvb *dvb)
+ {
+ 	struct device *dev = &dvb->pdev->dev;
+ 
++	if (!dvb->streaming) {
++		dev_warn_ratelimited(dev, "No streaming. Skipping.\n");
++		return 0;
++	}
++
+ 	dvb->streaming = false;
+ 	vidtv_mux_stop_thread(dvb->mux);
+ 	vidtv_mux_destroy(dvb->mux);
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index d832aa55056f39..4d8e00b425f443 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2809,6 +2809,15 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= (kernel_ulong_t)&uvc_quirk_probe_minmax },
++	/* Sonix Technology Co. Ltd. - 292A IPC AR0330 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x0c45,
++	  .idProduct		= 0x6366,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
+ 	/* MT6227 */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+@@ -2837,6 +2846,15 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= (kernel_ulong_t)&uvc_quirk_probe_minmax },
++	/* Kurokesu C1 PRO */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x16d0,
++	  .idProduct		= 0x0ed1,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
+ 	/* Syntek (HP Spartan) */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index d2fe01bcd209e5..eab7b8f5573057 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -20,6 +20,7 @@
+ #include <linux/atomic.h>
+ #include <linux/unaligned.h>
+ 
++#include <media/jpeg.h>
+ #include <media/v4l2-common.h>
+ 
+ #include "uvcvideo.h"
+@@ -1137,6 +1138,7 @@ static void uvc_video_stats_stop(struct uvc_streaming *stream)
+ static int uvc_video_decode_start(struct uvc_streaming *stream,
+ 		struct uvc_buffer *buf, const u8 *data, int len)
+ {
++	u8 header_len;
+ 	u8 fid;
+ 
+ 	/*
+@@ -1150,6 +1152,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
+ 		return -EINVAL;
+ 	}
+ 
++	header_len = data[0];
+ 	fid = data[1] & UVC_STREAM_FID;
+ 
+ 	/*
+@@ -1231,9 +1234,31 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
+ 		return -EAGAIN;
+ 	}
+ 
++	/*
++	 * Some cameras, when running two parallel streams (one MJPEG alongside
++	 * another non-MJPEG stream), are known to lose the EOF packet for a frame.
++	 * We can detect the end of a frame by checking for a new SOI marker, as
++	 * the SOI always lies on the packet boundary between two frames for
++	 * these devices.
++	 */
++	if (stream->dev->quirks & UVC_QUIRK_MJPEG_NO_EOF &&
++	    (stream->cur_format->fcc == V4L2_PIX_FMT_MJPEG ||
++	    stream->cur_format->fcc == V4L2_PIX_FMT_JPEG)) {
++		const u8 *packet = data + header_len;
++
++		if (len >= header_len + 2 &&
++		    packet[0] == 0xff && packet[1] == JPEG_MARKER_SOI &&
++		    buf->bytesused != 0) {
++			buf->state = UVC_BUF_STATE_READY;
++			buf->error = 1;
++			stream->last_fid ^= UVC_STREAM_FID;
++			return -EAGAIN;
++		}
++	}
++
+ 	stream->last_fid = fid;
+ 
+-	return data[0];
++	return header_len;
+ }
+ 
+ static inline enum dma_data_direction uvc_stream_dir(
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 272dc9cf01ee7d..74ac2106f08e2c 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -76,6 +76,7 @@
+ #define UVC_QUIRK_NO_RESET_RESUME	0x00004000
+ #define UVC_QUIRK_DISABLE_AUTOSUSPEND	0x00008000
+ #define UVC_QUIRK_INVALID_DEVICE_SOF	0x00010000
++#define UVC_QUIRK_MJPEG_NO_EOF		0x00020000
+ 
+ /* Format flags */
+ #define UVC_FMT_FLAG_COMPRESSED		0x00000001
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 6e62415de2e5ec..d5d868cb4edc7b 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -263,6 +263,7 @@
+ #define MSDC_PAD_TUNE_CMD2_SEL	  BIT(21)   /* RW */
+ 
+ #define PAD_DS_TUNE_DLY_SEL       BIT(0)	  /* RW */
++#define PAD_DS_TUNE_DLY2_SEL      BIT(1)	  /* RW */
+ #define PAD_DS_TUNE_DLY1	  GENMASK(6, 2)   /* RW */
+ #define PAD_DS_TUNE_DLY2	  GENMASK(11, 7)  /* RW */
+ #define PAD_DS_TUNE_DLY3	  GENMASK(16, 12) /* RW */
+@@ -308,6 +309,7 @@
+ 
+ /* EMMC50_PAD_DS_TUNE mask */
+ #define PAD_DS_DLY_SEL		BIT(16)	/* RW */
++#define PAD_DS_DLY2_SEL		BIT(15)	/* RW */
+ #define PAD_DS_DLY1		GENMASK(14, 10)	/* RW */
+ #define PAD_DS_DLY3		GENMASK(4, 0)	/* RW */
+ 
+@@ -2361,13 +2363,23 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+ {
+ 	struct msdc_host *host = mmc_priv(mmc);
++
+ 	host->hs400_mode = true;
+ 
+-	if (host->top_base)
+-		writel(host->hs400_ds_delay,
+-		       host->top_base + EMMC50_PAD_DS_TUNE);
+-	else
+-		writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
++	if (host->top_base) {
++		if (host->hs400_ds_dly3)
++			sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
++				      PAD_DS_DLY3, host->hs400_ds_dly3);
++		if (host->hs400_ds_delay)
++			writel(host->hs400_ds_delay,
++			       host->top_base + EMMC50_PAD_DS_TUNE);
++	} else {
++		if (host->hs400_ds_dly3)
++			sdr_set_field(host->base + PAD_DS_TUNE,
++				      PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
++		if (host->hs400_ds_delay)
++			writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
++	}
+ 	/* hs400 mode must set it to 0 */
+ 	sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
+ 	/* to improve read performance, set outstanding to 2 */
+@@ -2387,14 +2399,11 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
+ 	if (host->top_base) {
+ 		sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ 			     PAD_DS_DLY_SEL);
+-		if (host->hs400_ds_dly3)
+-			sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+-				      PAD_DS_DLY3, host->hs400_ds_dly3);
++		sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE,
++			     PAD_DS_DLY2_SEL);
+ 	} else {
+ 		sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
+-		if (host->hs400_ds_dly3)
+-			sdr_set_field(host->base + PAD_DS_TUNE,
+-				      PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
++		sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL);
+ 	}
+ 
+ 	host->hs400_tuning = true;
+diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
+index 6cba9717a6d87d..399844809bbeaa 100644
+--- a/drivers/net/can/c_can/c_can_platform.c
++++ b/drivers/net/can/c_can/c_can_platform.c
+@@ -385,15 +385,16 @@ static int c_can_plat_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ 			KBUILD_MODNAME, ret);
+-		goto exit_free_device;
++		goto exit_pm_runtime;
+ 	}
+ 
+ 	dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+ 		 KBUILD_MODNAME, priv->base, dev->irq);
+ 	return 0;
+ 
+-exit_free_device:
++exit_pm_runtime:
+ 	pm_runtime_disable(priv->device);
++exit_free_device:
+ 	free_c_can_dev(dev);
+ exit:
+ 	dev_err(&pdev->dev, "probe failed\n");
+diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
+index 64c349fd46007f..f65c1a1e05ccdf 100644
+--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
++++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
+@@ -867,10 +867,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
+ 			}
+ 			break;
+ 		case CAN_STATE_ERROR_ACTIVE:
+-			cf->can_id |= CAN_ERR_CNT;
+-			cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+-			cf->data[6] = bec.txerr;
+-			cf->data[7] = bec.rxerr;
++			if (skb) {
++				cf->can_id |= CAN_ERR_CNT;
++				cf->data[1] = CAN_ERR_CRTL_ACTIVE;
++				cf->data[6] = bec.txerr;
++				cf->data[7] = bec.rxerr;
++			}
+ 			break;
+ 		default:
+ 			netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
+diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
+index df18c85fc07841..d9a937ba126c3c 100644
+--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
++++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
+@@ -622,7 +622,7 @@ rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
+ 	netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
+ 
+ 	skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+-	if (skb)
++	if (!skb)
+ 		return 0;
+ 
+ 	rkcanfd_get_berr_counter_corrected(priv, &bec);
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+index eee20839d96fd4..0d155eb1b9e999 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+@@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ 			return ret;
+ 	}
+ 
+-	return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
++	if (es58x_dev->udev->serial)
++		ret = devlink_info_serial_number_put(req,
++						     es58x_dev->udev->serial);
++
++	return ret;
+ }
+ 
+ const struct devlink_ops es58x_dl_ops = {
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index b4fbb99bfad208..a3d6b8f198a86a 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -2159,8 +2159,13 @@ static int idpf_open(struct net_device *netdev)
+ 	idpf_vport_ctrl_lock(netdev);
+ 	vport = idpf_netdev_to_vport(netdev);
+ 
++	err = idpf_set_real_num_queues(vport);
++	if (err)
++		goto unlock;
++
+ 	err = idpf_vport_open(vport);
+ 
++unlock:
+ 	idpf_vport_ctrl_unlock(netdev);
+ 
+ 	return err;
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 60d15b3e6e2faa..1e0d1f9b07fbcf 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3008,8 +3008,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 		return -EINVAL;
+ 
+ 	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
+-	if (unlikely(rsc_segments == 1))
+-		return 0;
+ 
+ 	NAPI_GRO_CB(skb)->count = rsc_segments;
+ 	skb_shinfo(skb)->gso_size = rsc_seg_len;
+@@ -3072,6 +3070,7 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 	idpf_rx_hash(rxq, skb, rx_desc, decoded);
+ 
+ 	skb->protocol = eth_type_trans(skb, rxq->netdev);
++	skb_record_rx_queue(skb, rxq->idx);
+ 
+ 	if (le16_get_bits(rx_desc->hdrlen_flags,
+ 			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
+@@ -3080,8 +3079,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 	csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
+ 	idpf_rx_csum(rxq, skb, csum_bits, decoded);
+ 
+-	skb_record_rx_queue(skb, rxq->idx);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 6e70bca15db1d8..1ec9e8cc99d947 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring,
+ 		return -ENOMEM;
+ 	}
+ 
++	buffer->type = IGC_TX_BUFFER_TYPE_SKB;
+ 	buffer->skb = skb;
+ 	buffer->protocol = 0;
+ 	buffer->bytecount = skb->len;
+@@ -2707,8 +2708,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
+ }
+ 
+ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
+-					    struct xdp_buff *xdp)
++					    struct igc_xdp_buff *ctx)
+ {
++	struct xdp_buff *xdp = &ctx->xdp;
+ 	unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ 	unsigned int metasize = xdp->data - xdp->data_meta;
+ 	struct sk_buff *skb;
+@@ -2727,27 +2729,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
+ 		__skb_pull(skb, metasize);
+ 	}
+ 
++	if (ctx->rx_ts) {
++		skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
++		skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
++	}
++
+ 	return skb;
+ }
+ 
+ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
+ 				union igc_adv_rx_desc *desc,
+-				struct xdp_buff *xdp,
+-				ktime_t timestamp)
++				struct igc_xdp_buff *ctx)
+ {
+ 	struct igc_ring *ring = q_vector->rx.ring;
+ 	struct sk_buff *skb;
+ 
+-	skb = igc_construct_skb_zc(ring, xdp);
++	skb = igc_construct_skb_zc(ring, ctx);
+ 	if (!skb) {
+ 		ring->rx_stats.alloc_failed++;
+ 		set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
+ 		return;
+ 	}
+ 
+-	if (timestamp)
+-		skb_hwtstamps(skb)->hwtstamp = timestamp;
+-
+ 	if (igc_cleanup_headers(ring, desc, skb))
+ 		return;
+ 
+@@ -2783,7 +2786,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
+ 		union igc_adv_rx_desc *desc;
+ 		struct igc_rx_buffer *bi;
+ 		struct igc_xdp_buff *ctx;
+-		ktime_t timestamp = 0;
+ 		unsigned int size;
+ 		int res;
+ 
+@@ -2813,6 +2815,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
+ 			 */
+ 			bi->xdp->data_meta += IGC_TS_HDR_LEN;
+ 			size -= IGC_TS_HDR_LEN;
++		} else {
++			ctx->rx_ts = NULL;
+ 		}
+ 
+ 		bi->xdp->data_end = bi->xdp->data + size;
+@@ -2821,7 +2825,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
+ 		res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
+ 		switch (res) {
+ 		case IGC_XDP_PASS:
+-			igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
++			igc_dispatch_skb_zc(q_vector, desc, ctx);
+ 			fallthrough;
+ 		case IGC_XDP_CONSUMED:
+ 			xsk_buff_free(bi->xdp);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+index 2bed8c86b7cfc5..3f64cdbabfa3c1 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
+ 	err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
+ 	if (err)
+ 		return;
+-	mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
++	err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
++	if (err)
++		return;
+ 	for (i = 0; i < len; i++) {
+ 		data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
+ 		if (!hw_stats[i].cells_bytes)
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 3c0d067c360992..3e090f87f97ebd 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -585,21 +585,30 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
+ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
+ {
+ 	struct am65_cpsw_tx_chn *tx_chn = data;
++	enum am65_cpsw_tx_buf_type buf_type;
+ 	struct cppi5_host_desc_t *desc_tx;
++	struct xdp_frame *xdpf;
+ 	struct sk_buff *skb;
+ 	void **swdata;
+ 
+ 	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ 	swdata = cppi5_hdesc_get_swdata(desc_tx);
+-	skb = *(swdata);
+-	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
++	buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
++	if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
++		skb = *(swdata);
++		dev_kfree_skb_any(skb);
++	} else {
++		xdpf = *(swdata);
++		xdp_return_frame(xdpf);
++	}
+ 
+-	dev_kfree_skb_any(skb);
++	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ }
+ 
+ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ 					   struct net_device *ndev,
+-					   unsigned int len)
++					   unsigned int len,
++					   unsigned int headroom)
+ {
+ 	struct sk_buff *skb;
+ 
+@@ -609,7 +618,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+-	skb_reserve(skb, AM65_CPSW_HEADROOM);
++	skb_reserve(skb, headroom);
+ 	skb->dev = ndev;
+ 
+ 	return skb;
+@@ -1191,16 +1200,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ 	dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
+ 
+ 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+-
+ 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ 
+-	skb = am65_cpsw_build_skb(page_addr, ndev,
+-				  AM65_CPSW_MAX_PACKET_SIZE);
+-	if (unlikely(!skb)) {
+-		new_page = page;
+-		goto requeue;
+-	}
+-
+ 	if (port->xdp_prog) {
+ 		xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
+ 		xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
+@@ -1210,9 +1211,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ 		if (*xdp_state != AM65_CPSW_XDP_PASS)
+ 			goto allocate;
+ 
+-		/* Compute additional headroom to be reserved */
+-		headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
+-		skb_reserve(skb, headroom);
++		headroom = xdp.data - xdp.data_hard_start;
++	} else {
++		headroom = AM65_CPSW_HEADROOM;
++	}
++
++	skb = am65_cpsw_build_skb(page_addr, ndev,
++				  AM65_CPSW_MAX_PACKET_SIZE, headroom);
++	if (unlikely(!skb)) {
++		new_page = page;
++		goto requeue;
+ 	}
+ 
+ 	ndev_priv = netdev_priv(ndev);
+diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
+index 3612b0633bd177..88187dd4eb2d40 100644
+--- a/drivers/net/netdevsim/ipsec.c
++++ b/drivers/net/netdevsim/ipsec.c
+@@ -39,10 +39,14 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
+ 		if (!sap->used)
+ 			continue;
+ 
+-		p += scnprintf(p, bufsize - (p - buf),
+-			       "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
+-			       i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
+-			       sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
++		if (sap->xs->props.family == AF_INET6)
++			p += scnprintf(p, bufsize - (p - buf),
++				       "sa[%i] %cx ipaddr=%pI6c\n",
++				       i, (sap->rx ? 'r' : 't'), &sap->ipaddr);
++		else
++			p += scnprintf(p, bufsize - (p - buf),
++				       "sa[%i] %cx ipaddr=%pI4\n",
++				       i, (sap->rx ? 'r' : 't'), &sap->ipaddr[3]);
+ 		p += scnprintf(p, bufsize - (p - buf),
+ 			       "sa[%i]    spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
+ 			       i, be32_to_cpu(sap->xs->id.spi),
+diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
+index 7f4ef219eee44f..6cfafaac1b4fb6 100644
+--- a/drivers/net/team/team_core.c
++++ b/drivers/net/team/team_core.c
+@@ -2640,7 +2640,9 @@ int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info)
+ 				ctx.data.u32_val = nla_get_u32(attr_data);
+ 				break;
+ 			case TEAM_OPTION_TYPE_STRING:
+-				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
++				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN ||
++				    !memchr(nla_data(attr_data), '\0',
++					    nla_len(attr_data))) {
+ 					err = -EINVAL;
+ 					goto team_put;
+ 				}
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 6e9a3795846aa3..5e7cdd1b806fbd 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2871,8 +2871,11 @@ static int vxlan_init(struct net_device *dev)
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
+ 	int err;
+ 
+-	if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+-		vxlan_vnigroup_init(vxlan);
++	if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
++		err = vxlan_vnigroup_init(vxlan);
++		if (err)
++			return err;
++	}
+ 
+ 	err = gro_cells_init(&vxlan->gro_cells, dev);
+ 	if (err)
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index 2cd3ff9b0164c8..a6ba97949440e4 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -4681,6 +4681,22 @@ static struct ath12k_reg_rule
+ 	return reg_rule_ptr;
+ }
+ 
++static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
++					    u32 num_reg_rules)
++{
++	u8 num_invalid_5ghz_rules = 0;
++	u32 count, start_freq;
++
++	for (count = 0; count < num_reg_rules; count++) {
++		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
++
++		if (start_freq >= ATH12K_MIN_6G_FREQ)
++			num_invalid_5ghz_rules++;
++	}
++
++	return num_invalid_5ghz_rules;
++}
++
+ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 						   struct sk_buff *skb,
+ 						   struct ath12k_reg_info *reg_info)
+@@ -4691,6 +4707,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 	u32 num_2g_reg_rules, num_5g_reg_rules;
+ 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
++	u8 num_invalid_5ghz_ext_rules;
+ 	u32 total_reg_rules = 0;
+ 	int ret, i, j;
+ 
+@@ -4784,20 +4801,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 
+ 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
+ 
+-	/* FIXME: Currently FW includes 6G reg rule also in 5G rule
+-	 * list for country US.
+-	 * Having same 6G reg rule in 5G and 6G rules list causes
+-	 * intersect check to be true, and same rules will be shown
+-	 * multiple times in iw cmd. So added hack below to avoid
+-	 * parsing 6G rule from 5G reg rule list, and this can be
+-	 * removed later, after FW updates to remove 6G reg rule
+-	 * from 5G rules list.
+-	 */
+-	if (memcmp(reg_info->alpha2, "US", 2) == 0) {
+-		reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
+-		num_5g_reg_rules = reg_info->num_5g_reg_rules;
+-	}
+-
+ 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
+ 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
+ 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
+@@ -4900,8 +4903,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 		}
+ 	}
+ 
++	ext_wmi_reg_rule += num_2g_reg_rules;
++
++	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
++	 * for few countries along with separate 6 GHz rule.
++	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
++	 * causes intersect check to be true, and same rules will be
++	 * shown multiple times in iw cmd.
++	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
++	 */
++	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
++								       num_5g_reg_rules);
++
++	if (num_invalid_5ghz_ext_rules) {
++		ath12k_dbg(ab, ATH12K_DBG_WMI,
++			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
++			   reg_info->alpha2, reg_info->num_5g_reg_rules,
++			   num_invalid_5ghz_ext_rules);
++
++		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
++		reg_info->num_5g_reg_rules = num_5g_reg_rules;
++	}
++
+ 	if (num_5g_reg_rules) {
+-		ext_wmi_reg_rule += num_2g_reg_rules;
+ 		reg_info->reg_rules_5g_ptr =
+ 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
+ 						      ext_wmi_reg_rule);
+@@ -4913,7 +4937,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 		}
+ 	}
+ 
+-	ext_wmi_reg_rule += num_5g_reg_rules;
++	/* We have adjusted the number of 5 GHz reg rules above. But still those
++	 * many rules needs to be adjusted in ext_wmi_reg_rule.
++	 *
++	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
++	 */
++	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
+ 
+ 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ 		reg_info->reg_rules_6g_ap_ptr[i] =
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
+index 6a913f9b831580..b495cdea7111c3 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.h
++++ b/drivers/net/wireless/ath/ath12k/wmi.h
+@@ -3943,7 +3943,6 @@ struct ath12k_wmi_eht_rate_set_params {
+ #define MAX_REG_RULES 10
+ #define REG_ALPHA2_LEN 2
+ #define MAX_6G_REG_RULES 5
+-#define REG_US_5G_NUM_REG_RULES 4
+ 
+ enum wmi_start_event_param {
+ 	WMI_VDEV_START_RESP_EVENT = 0,
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 5aef7fa378788c..0ac84f968994b4 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -2492,7 +2492,7 @@ static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
+ 				       PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
+ }
+ 
+-static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
++static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
+ {
+ 	if (pwr_up)
+ 		rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
+@@ -2799,6 +2799,8 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
+ {
+ 	const struct rtw89_pci_info *info = rtwdev->pci_info;
+ 
++	rtw89_pci_power_wake(rtwdev, false);
++
+ 	if (rtwdev->chip->chip_id == RTL8852A) {
+ 		/* ltr sw trigger */
+ 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
+@@ -2841,7 +2843,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
+ 		return ret;
+ 	}
+ 
+-	rtw89_pci_power_wake(rtwdev, true);
++	rtw89_pci_power_wake_ax(rtwdev, true);
+ 	rtw89_pci_autoload_hang(rtwdev);
+ 	rtw89_pci_l12_vmain(rtwdev);
+ 	rtw89_pci_gen2_force_ib(rtwdev);
+@@ -2886,6 +2888,13 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
+ 	return 0;
+ }
+ 
++static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
++{
++	rtw89_pci_power_wake_ax(rtwdev, false);
++
++	return 0;
++}
++
+ int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
+ {
+ 	u32 val;
+@@ -4264,7 +4273,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
+ 					    B_AX_RDU_INT},
+ 
+ 	.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
+-	.mac_pre_deinit = NULL,
++	.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
+ 	.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
+ 
+ 	.clr_idx_all = rtw89_pci_clr_idx_all_ax,
+@@ -4280,6 +4289,8 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
+ 	.aspm_set = rtw89_pci_aspm_set_ax,
+ 	.clkreq_set = rtw89_pci_clkreq_set_ax,
+ 	.l1ss_set = rtw89_pci_l1ss_set_ax,
++
++	.power_wake = rtw89_pci_power_wake_ax,
+ };
+ EXPORT_SYMBOL(rtw89_pci_gen_ax);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
+index 48c3ab735db2a7..0ea4dcb84dd862 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.h
++++ b/drivers/net/wireless/realtek/rtw89/pci.h
+@@ -1276,6 +1276,8 @@ struct rtw89_pci_gen_def {
+ 	void (*aspm_set)(struct rtw89_dev *rtwdev, bool enable);
+ 	void (*clkreq_set)(struct rtw89_dev *rtwdev, bool enable);
+ 	void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
++
++	void (*power_wake)(struct rtw89_dev *rtwdev, bool pwr_up);
+ };
+ 
+ struct rtw89_pci_info {
+@@ -1766,4 +1768,13 @@ static inline int rtw89_pci_poll_txdma_ch_idle(struct rtw89_dev *rtwdev)
+ 
+ 	return gen_def->poll_txdma_ch_idle(rtwdev);
+ }
++
++static inline void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
++{
++	const struct rtw89_pci_info *info = rtwdev->pci_info;
++	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
++
++	gen_def->power_wake(rtwdev, pwr_up);
++}
++
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
+index 7cc32822296528..2f0d9ff25ba520 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
++++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
+@@ -614,5 +614,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
+ 	.aspm_set = rtw89_pci_aspm_set_be,
+ 	.clkreq_set = rtw89_pci_clkreq_set_be,
+ 	.l1ss_set = rtw89_pci_l1ss_set_be,
++
++	.power_wake = _patch_pcie_power_wake_be,
+ };
+ EXPORT_SYMBOL(rtw89_pci_gen_be);
+diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
+index 3644997a834255..24d4f3a3ec3d0e 100644
+--- a/drivers/parport/parport_serial.c
++++ b/drivers/parport/parport_serial.c
+@@ -266,10 +266,14 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
+ 	{ 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
+ 
+ 	/* WCH CARDS */
+-	{ 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
+-	{ 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
+-	{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p},
+-	{ 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
++	{ PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_1S1P,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p },
++	{ PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1P,
++	  0x4348, 0x3253, 0, 0, wch_ch353_2s1p },
++	{ PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_0S1P,
++	  0x1c00, 0x3050, 0, 0, wch_ch382_0s1p },
++	{ PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S1P,
++	  0x1c00, 0x3250, 0, 0, wch_ch382_2s1p },
+ 
+ 	/* BrainBoxes PX272/PX306 MIO card */
+ 	{ PCI_VENDOR_ID_INTASHIELD, 0x4100,
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 8103bc24a54ea4..064067d9c8b529 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5522,7 +5522,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
+  * AMD Matisse USB 3.0 Host Controller 0x149c
+  * Intel 82579LM Gigabit Ethernet Controller 0x1502
+  * Intel 82579V Gigabit Ethernet Controller 0x1503
+- *
++ * Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
+  */
+ static void quirk_no_flr(struct pci_dev *dev)
+ {
+@@ -5534,6 +5534,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
+ 
+ /* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
+ static void quirk_no_flr_snet(struct pci_dev *dev)
+@@ -5985,6 +5986,17 @@ SWITCHTEC_QUIRK(0x5552);  /* PAXA 52XG5 */
+ SWITCHTEC_QUIRK(0x5536);  /* PAXA 36XG5 */
+ SWITCHTEC_QUIRK(0x5528);  /* PAXA 28XG5 */
+ 
++#define SWITCHTEC_PCI100X_QUIRK(vid) \
++	DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
++		PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
++SWITCHTEC_PCI100X_QUIRK(0x1001);  /* PCI1001XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1002);  /* PCI1002XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1003);  /* PCI1003XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1004);  /* PCI1004XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1005);  /* PCI1005XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1006);  /* PCI1006XG4 */
++
++
+ /*
+  * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
+  * These IDs are used to forward responses to the originator on the other
+@@ -6254,6 +6266,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
+ #endif
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index c7e1089ffdafcb..b14dfab04d846c 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -1739,6 +1739,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
+ 		.driver_data = gen, \
+ 	}
+ 
++#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
++	{ \
++		.vendor     = PCI_VENDOR_ID_EFAR, \
++		.device     = device_id, \
++		.subvendor  = PCI_ANY_ID, \
++		.subdevice  = PCI_ANY_ID, \
++		.class      = (PCI_CLASS_MEMORY_OTHER << 8), \
++		.class_mask = 0xFFFFFFFF, \
++		.driver_data = gen, \
++	}, \
++	{ \
++		.vendor     = PCI_VENDOR_ID_EFAR, \
++		.device     = device_id, \
++		.subvendor  = PCI_ANY_ID, \
++		.subdevice  = PCI_ANY_ID, \
++		.class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
++		.class_mask = 0xFFFFFFFF, \
++		.driver_data = gen, \
++	}
++
+ static const struct pci_device_id switchtec_pci_tbl[] = {
+ 	SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3),  /* PFX 24xG3 */
+ 	SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3),  /* PFX 32xG3 */
+@@ -1833,6 +1853,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
+ 	SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5),  /* PAXA 52XG5 */
+ 	SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5),  /* PAXA 36XG5 */
+ 	SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5),  /* PAXA 28XG5 */
++	SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4),  /* PCI1001 16XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4),  /* PCI1002 12XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4),  /* PCI1003 16XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4),  /* PCI1004 16XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4),  /* PCI1005 16XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4),  /* PCI1006 16XG4 */
+ 	{0}
+ };
+ MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
+index 0b13d7f17b3256..42547f64453e85 100644
+--- a/drivers/pinctrl/pinconf-generic.c
++++ b/drivers/pinctrl/pinconf-generic.c
+@@ -89,12 +89,12 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
+ 		seq_puts(s, items[i].display);
+ 		/* Print unit if available */
+ 		if (items[i].has_arg) {
+-			seq_printf(s, " (0x%x",
+-				   pinconf_to_config_argument(config));
++			u32 val = pinconf_to_config_argument(config);
++
+ 			if (items[i].format)
+-				seq_printf(s, " %s)", items[i].format);
++				seq_printf(s, " (%u %s)", val, items[i].format);
+ 			else
+-				seq_puts(s, ")");
++				seq_printf(s, " (0x%x)", val);
+ 		}
+ 	}
+ }
+diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
+index 5096ccdd459ea4..7a6a1434ae7f4b 100644
+--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
++++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
+@@ -42,7 +42,7 @@
+ #define CY8C95X0_PORTSEL	0x18
+ /* Port settings, write PORTSEL first */
+ #define CY8C95X0_INTMASK	0x19
+-#define CY8C95X0_PWMSEL		0x1A
++#define CY8C95X0_SELPWM		0x1A
+ #define CY8C95X0_INVERT		0x1B
+ #define CY8C95X0_DIRECTION	0x1C
+ /* Drive mode register change state on writing '1' */
+@@ -330,14 +330,14 @@ static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
+ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
+ {
+ 	/*
+-	 * Only 12 registers are present per port (see Table 6 in the
+-	 * datasheet).
++	 * Only 12 registers are present per port (see Table 6 in the datasheet).
+ 	 */
+-	if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) < 12)
+-		return true;
++	if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
++		return false;
+ 
+ 	switch (reg) {
+ 	case 0x24 ... 0x27:
++	case 0x31 ... 0x3f:
+ 		return false;
+ 	default:
+ 		return true;
+@@ -346,8 +346,11 @@ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
+ 
+ static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
+ {
+-	if (reg >= CY8C95X0_VIRTUAL)
+-		return true;
++	/*
++	 * Only 12 registers are present per port (see Table 6 in the datasheet).
++	 */
++	if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
++		return false;
+ 
+ 	switch (reg) {
+ 	case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+@@ -355,6 +358,7 @@ static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
+ 	case CY8C95X0_DEVID:
+ 		return false;
+ 	case 0x24 ... 0x27:
++	case 0x31 ... 0x3f:
+ 		return false;
+ 	default:
+ 		return true;
+@@ -367,8 +371,8 @@ static bool cy8c95x0_volatile_register(struct device *dev, unsigned int reg)
+ 	case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+ 	case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
+ 	case CY8C95X0_INTMASK:
++	case CY8C95X0_SELPWM:
+ 	case CY8C95X0_INVERT:
+-	case CY8C95X0_PWMSEL:
+ 	case CY8C95X0_DIRECTION:
+ 	case CY8C95X0_DRV_PU:
+ 	case CY8C95X0_DRV_PD:
+@@ -397,7 +401,7 @@ static bool cy8c95x0_muxed_register(unsigned int reg)
+ {
+ 	switch (reg) {
+ 	case CY8C95X0_INTMASK:
+-	case CY8C95X0_PWMSEL:
++	case CY8C95X0_SELPWM:
+ 	case CY8C95X0_INVERT:
+ 	case CY8C95X0_DIRECTION:
+ 	case CY8C95X0_DRV_PU:
+@@ -468,7 +472,11 @@ static const struct regmap_config cy8c9520_i2c_regmap = {
+ 	.max_register = 0,		/* Updated at runtime */
+ 	.num_reg_defaults_raw = 0,	/* Updated at runtime */
+ 	.use_single_read = true,	/* Workaround for regcache bug */
++#if IS_ENABLED(CONFIG_DEBUG_PINCTRL)
++	.disable_locking = false,
++#else
+ 	.disable_locking = true,
++#endif
+ };
+ 
+ static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
+@@ -799,7 +807,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
+ 		reg = CY8C95X0_DIRECTION;
+ 		break;
+ 	case PIN_CONFIG_MODE_PWM:
+-		reg = CY8C95X0_PWMSEL;
++		reg = CY8C95X0_SELPWM;
+ 		break;
+ 	case PIN_CONFIG_OUTPUT:
+ 		reg = CY8C95X0_OUTPUT;
+@@ -881,7 +889,7 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
+ 		reg = CY8C95X0_DRV_PP_FAST;
+ 		break;
+ 	case PIN_CONFIG_MODE_PWM:
+-		reg = CY8C95X0_PWMSEL;
++		reg = CY8C95X0_SELPWM;
+ 		break;
+ 	case PIN_CONFIG_OUTPUT_ENABLE:
+ 		ret = cy8c95x0_pinmux_direction(chip, off, !arg);
+@@ -1171,7 +1179,7 @@ static void cy8c95x0_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *
+ 	bitmap_zero(mask, MAX_LINE);
+ 	__set_bit(pin, mask);
+ 
+-	if (cy8c95x0_read_regs_mask(chip, CY8C95X0_PWMSEL, pwm, mask)) {
++	if (cy8c95x0_read_regs_mask(chip, CY8C95X0_SELPWM, pwm, mask)) {
+ 		seq_puts(s, "not available");
+ 		return;
+ 	}
+@@ -1216,7 +1224,7 @@ static int cy8c95x0_set_mode(struct cy8c95x0_pinctrl *chip, unsigned int off, bo
+ 	u8 port = cypress_get_port(chip, off);
+ 	u8 bit = cypress_get_pin_mask(chip, off);
+ 
+-	return cy8c95x0_regmap_write_bits(chip, CY8C95X0_PWMSEL, port, bit, mode ? bit : 0);
++	return cy8c95x0_regmap_write_bits(chip, CY8C95X0_SELPWM, port, bit, mode ? bit : 0);
+ }
+ 
+ static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
+@@ -1365,7 +1373,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
+ 
+ 	ret = devm_request_threaded_irq(chip->dev, irq,
+ 					NULL, cy8c95x0_irq_handler,
+-					IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
++					IRQF_ONESHOT | IRQF_SHARED,
+ 					dev_name(chip->dev), chip);
+ 	if (ret) {
+ 		dev_err(chip->dev, "failed to request irq %d\n", irq);
+diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
+index eb14e5ff5a0aa8..e24ab5f7d2bf10 100644
+--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
++++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
+@@ -647,15 +647,20 @@ static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
+ };
+ 
+ static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
+-	{ .start = 0x01c, .end = 0x0c8 },
+-	{ .start = 0x12c, .end = 0x184 },
++	{ .start = 0x01c, .end = 0x064 },
++	{ .start = 0x084, .end = 0x0a0 },
++	{ .start = 0x0a4, .end = 0x0c8 },
++	{ .start = 0x12c, .end = 0x164 },
++	{ .start = 0x16c, .end = 0x184 },
+ 	{ .start = 0x190, .end = 0x198 },
+ 	{ .start = 0x1a0, .end = 0x204 },
+-	{ .start = 0x21c, .end = 0x250 },
+-	{ .start = 0x25c, .end = 0x2f0 },
++	{ .start = 0x21c, .end = 0x2f0 },
+ 	{ .start = 0x310, .end = 0x3d8 },
+-	{ .start = 0x400, .end = 0x4f0 },
+-	{ .start = 0x4f8, .end = 0x7e8 },
++	{ .start = 0x400, .end = 0x420 },
++	{ .start = 0x444, .end = 0x490 },
++	{ .start = 0x4bc, .end = 0x4f0 },
++	{ .start = 0x4f8, .end = 0x54c },
++	{ .start = 0x57c, .end = 0x7e8 },
+ 	{ .start = 0x8d0, .end = 0x8d8 },
+ 	{ .start = 0xacc, .end = 0xf00 }
+ };
+diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
+index a7c3b3923b4af7..fd8c8eb37d01d6 100644
+--- a/drivers/spi/spi-sn-f-ospi.c
++++ b/drivers/spi/spi-sn-f-ospi.c
+@@ -116,6 +116,9 @@ struct f_ospi {
+ 
+ static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
+ {
++	if (!op->dummy.nbytes)
++		return 0;
++
+ 	return (op->dummy.nbytes * 8) / op->dummy.buswidth;
+ }
+ 
+diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
+index e5310c65cf52b3..10a706fe4b247d 100644
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -374,6 +374,7 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
+ 
+ #ifdef CONFIG_SERIAL_8250_DMA
+ extern int serial8250_tx_dma(struct uart_8250_port *);
++extern void serial8250_tx_dma_flush(struct uart_8250_port *);
+ extern int serial8250_rx_dma(struct uart_8250_port *);
+ extern void serial8250_rx_dma_flush(struct uart_8250_port *);
+ extern int serial8250_request_dma(struct uart_8250_port *);
+@@ -406,6 +407,7 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
+ {
+ 	return -1;
+ }
++static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { }
+ static inline int serial8250_rx_dma(struct uart_8250_port *p)
+ {
+ 	return -1;
+diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
+index d215c494ee24c1..f245a84f4a508d 100644
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -149,6 +149,22 @@ int serial8250_tx_dma(struct uart_8250_port *p)
+ 	return ret;
+ }
+ 
++void serial8250_tx_dma_flush(struct uart_8250_port *p)
++{
++	struct uart_8250_dma *dma = p->dma;
++
++	if (!dma->tx_running)
++		return;
++
++	/*
++	 * kfifo_reset() has been called by the serial core, avoid
++	 * advancing and underflowing in __dma_tx_complete().
++	 */
++	dma->tx_size = 0;
++
++	dmaengine_terminate_async(dma->rxchan);
++}
++
+ int serial8250_rx_dma(struct uart_8250_port *p)
+ {
+ 	struct uart_8250_dma		*dma = p->dma;
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 6709b6a5f3011d..de6d90bf0d70a2 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -64,23 +64,17 @@
+ #define PCIE_DEVICE_ID_NEO_2_OX_IBM	0x00F6
+ #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA	0xc001
+ #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
+-#define PCI_VENDOR_ID_WCH		0x4348
+-#define PCI_DEVICE_ID_WCH_CH352_2S	0x3253
+-#define PCI_DEVICE_ID_WCH_CH353_4S	0x3453
+-#define PCI_DEVICE_ID_WCH_CH353_2S1PF	0x5046
+-#define PCI_DEVICE_ID_WCH_CH353_1S1P	0x5053
+-#define PCI_DEVICE_ID_WCH_CH353_2S1P	0x7053
+-#define PCI_DEVICE_ID_WCH_CH355_4S	0x7173
++
++#define PCI_DEVICE_ID_WCHCN_CH352_2S	0x3253
++#define PCI_DEVICE_ID_WCHCN_CH355_4S	0x7173
++
+ #define PCI_VENDOR_ID_AGESTAR		0x5372
+ #define PCI_DEVICE_ID_AGESTAR_9375	0x6872
+ #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+ #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
+ 
+-#define PCIE_VENDOR_ID_WCH		0x1c00
+-#define PCIE_DEVICE_ID_WCH_CH382_2S1P	0x3250
+-#define PCIE_DEVICE_ID_WCH_CH384_4S	0x3470
+-#define PCIE_DEVICE_ID_WCH_CH384_8S	0x3853
+-#define PCIE_DEVICE_ID_WCH_CH382_2S	0x3253
++#define PCI_DEVICE_ID_WCHIC_CH384_4S	0x3470
++#define PCI_DEVICE_ID_WCHIC_CH384_8S	0x3853
+ 
+ #define PCI_DEVICE_ID_MOXA_CP102E	0x1024
+ #define PCI_DEVICE_ID_MOXA_CP102EL	0x1025
+@@ -2777,80 +2771,80 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ 	},
+ 	/* WCH CH353 1S1P card (16550 clone) */
+ 	{
+-		.vendor         = PCI_VENDOR_ID_WCH,
+-		.device         = PCI_DEVICE_ID_WCH_CH353_1S1P,
++		.vendor         = PCI_VENDOR_ID_WCHCN,
++		.device         = PCI_DEVICE_ID_WCHCN_CH353_1S1P,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH353 2S1P card (16550 clone) */
+ 	{
+-		.vendor         = PCI_VENDOR_ID_WCH,
+-		.device         = PCI_DEVICE_ID_WCH_CH353_2S1P,
++		.vendor         = PCI_VENDOR_ID_WCHCN,
++		.device         = PCI_DEVICE_ID_WCHCN_CH353_2S1P,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH353 4S card (16550 clone) */
+ 	{
+-		.vendor         = PCI_VENDOR_ID_WCH,
+-		.device         = PCI_DEVICE_ID_WCH_CH353_4S,
++		.vendor         = PCI_VENDOR_ID_WCHCN,
++		.device         = PCI_DEVICE_ID_WCHCN_CH353_4S,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH353 2S1PF card (16550 clone) */
+ 	{
+-		.vendor         = PCI_VENDOR_ID_WCH,
+-		.device         = PCI_DEVICE_ID_WCH_CH353_2S1PF,
++		.vendor         = PCI_VENDOR_ID_WCHCN,
++		.device         = PCI_DEVICE_ID_WCHCN_CH353_2S1PF,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH352 2S card (16550 clone) */
+ 	{
+-		.vendor		= PCI_VENDOR_ID_WCH,
+-		.device		= PCI_DEVICE_ID_WCH_CH352_2S,
++		.vendor		= PCI_VENDOR_ID_WCHCN,
++		.device		= PCI_DEVICE_ID_WCHCN_CH352_2S,
+ 		.subvendor	= PCI_ANY_ID,
+ 		.subdevice	= PCI_ANY_ID,
+ 		.setup		= pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH355 4S card (16550 clone) */
+ 	{
+-		.vendor		= PCI_VENDOR_ID_WCH,
+-		.device		= PCI_DEVICE_ID_WCH_CH355_4S,
++		.vendor		= PCI_VENDOR_ID_WCHCN,
++		.device		= PCI_DEVICE_ID_WCHCN_CH355_4S,
+ 		.subvendor	= PCI_ANY_ID,
+ 		.subdevice	= PCI_ANY_ID,
+ 		.setup		= pci_wch_ch355_setup,
+ 	},
+ 	/* WCH CH382 2S card (16850 clone) */
+ 	{
+-		.vendor         = PCIE_VENDOR_ID_WCH,
+-		.device         = PCIE_DEVICE_ID_WCH_CH382_2S,
++		.vendor         = PCI_VENDOR_ID_WCHIC,
++		.device         = PCI_DEVICE_ID_WCHIC_CH382_2S,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch38x_setup,
+ 	},
+ 	/* WCH CH382 2S1P card (16850 clone) */
+ 	{
+-		.vendor         = PCIE_VENDOR_ID_WCH,
+-		.device         = PCIE_DEVICE_ID_WCH_CH382_2S1P,
++		.vendor         = PCI_VENDOR_ID_WCHIC,
++		.device         = PCI_DEVICE_ID_WCHIC_CH382_2S1P,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch38x_setup,
+ 	},
+ 	/* WCH CH384 4S card (16850 clone) */
+ 	{
+-		.vendor         = PCIE_VENDOR_ID_WCH,
+-		.device         = PCIE_DEVICE_ID_WCH_CH384_4S,
++		.vendor         = PCI_VENDOR_ID_WCHIC,
++		.device         = PCI_DEVICE_ID_WCHIC_CH384_4S,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch38x_setup,
+ 	},
+ 	/* WCH CH384 8S card (16850 clone) */
+ 	{
+-		.vendor         = PCIE_VENDOR_ID_WCH,
+-		.device         = PCIE_DEVICE_ID_WCH_CH384_8S,
++		.vendor         = PCI_VENDOR_ID_WCHIC,
++		.device         = PCI_DEVICE_ID_WCHIC_CH384_8S,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.init           = pci_wch_ch38x_init,
+@@ -3927,11 +3921,11 @@ static const struct pci_device_id blacklist[] = {
+ 
+ 	/* multi-io cards handled by parport_serial */
+ 	/* WCH CH353 2S1P */
+-	{ PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
++	{ PCI_VDEVICE(WCHCN, 0x7053), REPORT_CONFIG(PARPORT_SERIAL), },
+ 	/* WCH CH353 1S1P */
+-	{ PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
++	{ PCI_VDEVICE(WCHCN, 0x5053), REPORT_CONFIG(PARPORT_SERIAL), },
+ 	/* WCH CH382 2S1P */
+-	{ PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
++	{ PCI_VDEVICE(WCHIC, 0x3250), REPORT_CONFIG(PARPORT_SERIAL), },
+ 
+ 	/* Intel platforms with MID UART */
+ 	{ PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), },
+@@ -6004,27 +5998,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	 * WCH CH353 series devices: The 2S1P is handled by parport_serial
+ 	 * so not listed here.
+ 	 */
+-	{	PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_4S,
++	{	PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_4S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_b0_bt_4_115200 },
+ 
+-	{	PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_2S1PF,
++	{	PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1PF,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_b0_bt_2_115200 },
+ 
+-	{	PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S,
++	{	PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH355_4S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_b0_bt_4_115200 },
+ 
+-	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
++	{	PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_wch382_2 },
+ 
+-	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
++	{	PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_4S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_wch384_4 },
+ 
+-	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_8S,
++	{	PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_8S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_wch384_8 },
+ 	/*
+diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
+index d3930bf32fe4c4..f462b3d1c104ce 100644
+--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
++++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
+@@ -78,6 +78,12 @@
+ #define UART_TX_BYTE_FIFO			0x00
+ #define UART_FIFO_CTL				0x02
+ 
++#define UART_MODEM_CTL_REG			0x04
++#define UART_MODEM_CTL_RTS_SET			BIT(1)
++
++#define UART_LINE_STAT_REG			0x05
++#define UART_LINE_XMIT_CHECK_MASK		GENMASK(6, 5)
++
+ #define UART_ACTV_REG				0x11
+ #define UART_BLOCK_SET_ACTIVE			BIT(0)
+ 
+@@ -94,6 +100,7 @@
+ #define UART_BIT_SAMPLE_CNT_16			16
+ #define BAUD_CLOCK_DIV_INT_MSK			GENMASK(31, 8)
+ #define ADCL_CFG_RTS_DELAY_MASK			GENMASK(11, 8)
++#define FRAC_DIV_TX_END_POINT_MASK		GENMASK(23, 20)
+ 
+ #define UART_WAKE_REG				0x8C
+ #define UART_WAKE_MASK_REG			0x90
+@@ -134,6 +141,11 @@
+ #define UART_BST_STAT_LSR_FRAME_ERR		0x8000000
+ #define UART_BST_STAT_LSR_THRE			0x20000000
+ 
++#define GET_MODEM_CTL_RTS_STATUS(reg)		((reg) & UART_MODEM_CTL_RTS_SET)
++#define GET_RTS_PIN_STATUS(val)			(((val) & TIOCM_RTS) >> 1)
++#define RTS_TOGGLE_STATUS_MASK(val, reg)	(GET_MODEM_CTL_RTS_STATUS(reg) \
++						 != GET_RTS_PIN_STATUS(val))
++
+ struct pci1xxxx_8250 {
+ 	unsigned int nr;
+ 	u8 dev_rev;
+@@ -254,6 +266,47 @@ static void pci1xxxx_set_divisor(struct uart_port *port, unsigned int baud,
+ 	       port->membase + UART_BAUD_CLK_DIVISOR_REG);
+ }
+ 
++static void pci1xxxx_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++	u32 fract_div_cfg_reg;
++	u32 line_stat_reg;
++	u32 modem_ctl_reg;
++	u32 adcl_cfg_reg;
++
++	adcl_cfg_reg = readl(port->membase + ADCL_CFG_REG);
++
++	/* HW is responsible in ADCL_EN case */
++	if ((adcl_cfg_reg & (ADCL_CFG_EN | ADCL_CFG_PIN_SEL)))
++		return;
++
++	modem_ctl_reg = readl(port->membase + UART_MODEM_CTL_REG);
++
++	serial8250_do_set_mctrl(port, mctrl);
++
++	if (RTS_TOGGLE_STATUS_MASK(mctrl, modem_ctl_reg)) {
++		line_stat_reg = readl(port->membase + UART_LINE_STAT_REG);
++		if (line_stat_reg & UART_LINE_XMIT_CHECK_MASK) {
++			fract_div_cfg_reg = readl(port->membase +
++						  FRAC_DIV_CFG_REG);
++
++			writel((fract_div_cfg_reg &
++			       ~(FRAC_DIV_TX_END_POINT_MASK)),
++			       port->membase + FRAC_DIV_CFG_REG);
++
++			/* Enable ADC and set the nRTS pin */
++			writel((adcl_cfg_reg | (ADCL_CFG_EN |
++			       ADCL_CFG_PIN_SEL)),
++			       port->membase + ADCL_CFG_REG);
++
++			/* Revert to the original settings */
++			writel(adcl_cfg_reg, port->membase + ADCL_CFG_REG);
++
++			writel(fract_div_cfg_reg, port->membase +
++			       FRAC_DIV_CFG_REG);
++		}
++	}
++}
++
+ static int pci1xxxx_rs485_config(struct uart_port *port,
+ 				 struct ktermios *termios,
+ 				 struct serial_rs485 *rs485)
+@@ -631,9 +684,14 @@ static int pci1xxxx_setup(struct pci_dev *pdev,
+ 	port->port.rs485_config = pci1xxxx_rs485_config;
+ 	port->port.rs485_supported = pci1xxxx_rs485_supported;
+ 
+-	/* From C0 rev Burst operation is supported */
++	/*
++	 * C0 and later revisions support Burst operation.
++	 * RTS workaround in mctrl is applicable only to B0.
++	 */
+ 	if (rev >= 0xC0)
+ 		port->port.handle_irq = pci1xxxx_handle_irq;
++	else if (rev == 0xB0)
++		port->port.set_mctrl = pci1xxxx_set_mctrl;
+ 
+ 	ret = serial8250_pci_setup_port(pdev, port, 0, PORT_OFFSET * port_idx, 0);
+ 	if (ret < 0)
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 11519aa2598a01..c1376727642a71 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2524,6 +2524,14 @@ static void serial8250_shutdown(struct uart_port *port)
+ 		serial8250_do_shutdown(port);
+ }
+ 
++static void serial8250_flush_buffer(struct uart_port *port)
++{
++	struct uart_8250_port *up = up_to_u8250p(port);
++
++	if (up->dma)
++		serial8250_tx_dma_flush(up);
++}
++
+ static unsigned int serial8250_do_get_divisor(struct uart_port *port,
+ 					      unsigned int baud,
+ 					      unsigned int *frac)
+@@ -3207,6 +3215,7 @@ static const struct uart_ops serial8250_pops = {
+ 	.break_ctl	= serial8250_break_ctl,
+ 	.startup	= serial8250_startup,
+ 	.shutdown	= serial8250_shutdown,
++	.flush_buffer	= serial8250_flush_buffer,
+ 	.set_termios	= serial8250_set_termios,
+ 	.set_ldisc	= serial8250_set_ldisc,
+ 	.pm		= serial8250_pm,
+diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
+index d35f1d24156c22..85285c56fabff4 100644
+--- a/drivers/tty/serial/serial_port.c
++++ b/drivers/tty/serial/serial_port.c
+@@ -173,6 +173,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
+  * The caller is responsible to initialize the following fields of the @port
+  *   ->dev (must be valid)
+  *   ->flags
++ *   ->iobase
+  *   ->mapbase
+  *   ->mapsize
+  *   ->regshift (if @use_defaults is false)
+@@ -214,7 +215,7 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
+ 	/* Read the registers I/O access type (default: MMIO 8-bit) */
+ 	ret = device_property_read_u32(dev, "reg-io-width", &value);
+ 	if (ret) {
+-		port->iotype = UPIO_MEM;
++		port->iotype = port->iobase ? UPIO_PORT : UPIO_MEM;
+ 	} else {
+ 		switch (value) {
+ 		case 1:
+@@ -227,11 +228,11 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
+ 			port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
+ 			break;
+ 		default:
++			port->iotype = UPIO_UNKNOWN;
+ 			if (!use_defaults) {
+ 				dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
+ 				return -EINVAL;
+ 			}
+-			port->iotype = UPIO_UNKNOWN;
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
+index 58023f735c195f..8d4ad0a3f2cf02 100644
+--- a/drivers/ufs/core/ufs_bsg.c
++++ b/drivers/ufs/core/ufs_bsg.c
+@@ -216,6 +216,7 @@ void ufs_bsg_remove(struct ufs_hba *hba)
+ 		return;
+ 
+ 	bsg_remove_queue(hba->bsg_queue);
++	hba->bsg_queue = NULL;
+ 
+ 	device_del(bsg_dev);
+ 	put_device(bsg_dev);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index b786cba9a270f4..67410c4cebee6d 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -258,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+ 	return UFS_PM_LVL_0;
+ }
+ 
++static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
++{
++	return hba->outstanding_tasks || hba->active_uic_cmd ||
++	       hba->uic_async_done;
++}
++
+ static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
+ {
+-	return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
+-		hba->active_uic_cmd || hba->uic_async_done);
++	return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
+ }
+ 
+ static const struct ufs_dev_quirk ufs_fixups[] = {
+@@ -1835,19 +1840,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+ static void ufshcd_ungate_work(struct work_struct *work)
+ {
+ 	int ret;
+-	unsigned long flags;
+ 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ 			clk_gating.ungate_work);
+ 
+ 	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
+-	if (hba->clk_gating.state == CLKS_ON) {
+-		spin_unlock_irqrestore(hba->host->host_lock, flags);
+-		return;
++	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
++		if (hba->clk_gating.state == CLKS_ON)
++			return;
+ 	}
+ 
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 	ufshcd_hba_vreg_set_hpm(hba);
+ 	ufshcd_setup_clocks(hba, true);
+ 
+@@ -1882,7 +1884,7 @@ void ufshcd_hold(struct ufs_hba *hba)
+ 	if (!ufshcd_is_clkgating_allowed(hba) ||
+ 	    !hba->clk_gating.is_initialized)
+ 		return;
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	spin_lock_irqsave(&hba->clk_gating.lock, flags);
+ 	hba->clk_gating.active_reqs++;
+ 
+ start:
+@@ -1898,11 +1900,11 @@ void ufshcd_hold(struct ufs_hba *hba)
+ 		 */
+ 		if (ufshcd_can_hibern8_during_gating(hba) &&
+ 		    ufshcd_is_link_hibern8(hba)) {
+-			spin_unlock_irqrestore(hba->host->host_lock, flags);
++			spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
+ 			flush_result = flush_work(&hba->clk_gating.ungate_work);
+ 			if (hba->clk_gating.is_suspended && !flush_result)
+ 				return;
+-			spin_lock_irqsave(hba->host->host_lock, flags);
++			spin_lock_irqsave(&hba->clk_gating.lock, flags);
+ 			goto start;
+ 		}
+ 		break;
+@@ -1931,17 +1933,17 @@ void ufshcd_hold(struct ufs_hba *hba)
+ 		 */
+ 		fallthrough;
+ 	case REQ_CLKS_ON:
+-		spin_unlock_irqrestore(hba->host->host_lock, flags);
++		spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
+ 		flush_work(&hba->clk_gating.ungate_work);
+ 		/* Make sure state is CLKS_ON before returning */
+-		spin_lock_irqsave(hba->host->host_lock, flags);
++		spin_lock_irqsave(&hba->clk_gating.lock, flags);
+ 		goto start;
+ 	default:
+ 		dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+ 				__func__, hba->clk_gating.state);
+ 		break;
+ 	}
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
++	spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_hold);
+ 
+@@ -1949,28 +1951,32 @@ static void ufshcd_gate_work(struct work_struct *work)
+ {
+ 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ 			clk_gating.gate_work.work);
+-	unsigned long flags;
+ 	int ret;
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
+-	/*
+-	 * In case you are here to cancel this work the gating state
+-	 * would be marked as REQ_CLKS_ON. In this case save time by
+-	 * skipping the gating work and exit after changing the clock
+-	 * state to CLKS_ON.
+-	 */
+-	if (hba->clk_gating.is_suspended ||
+-		(hba->clk_gating.state != REQ_CLKS_OFF)) {
+-		hba->clk_gating.state = CLKS_ON;
+-		trace_ufshcd_clk_gating(dev_name(hba->dev),
+-					hba->clk_gating.state);
+-		goto rel_lock;
+-	}
++	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
++		/*
++		 * In case you are here to cancel this work the gating state
++		 * would be marked as REQ_CLKS_ON. In this case save time by
++		 * skipping the gating work and exit after changing the clock
++		 * state to CLKS_ON.
++		 */
++		if (hba->clk_gating.is_suspended ||
++		    hba->clk_gating.state != REQ_CLKS_OFF) {
++			hba->clk_gating.state = CLKS_ON;
++			trace_ufshcd_clk_gating(dev_name(hba->dev),
++						hba->clk_gating.state);
++			return;
++		}
+ 
+-	if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+-		goto rel_lock;
++		if (hba->clk_gating.active_reqs)
++			return;
++	}
+ 
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
++	scoped_guard(spinlock_irqsave, hba->host->host_lock) {
++		if (ufshcd_is_ufs_dev_busy(hba) ||
++		    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
++			return;
++	}
+ 
+ 	/* put the link into hibern8 mode before turning off clocks */
+ 	if (ufshcd_can_hibern8_during_gating(hba)) {
+@@ -1981,7 +1987,7 @@ static void ufshcd_gate_work(struct work_struct *work)
+ 					__func__, ret);
+ 			trace_ufshcd_clk_gating(dev_name(hba->dev),
+ 						hba->clk_gating.state);
+-			goto out;
++			return;
+ 		}
+ 		ufshcd_set_link_hibern8(hba);
+ 	}
+@@ -2001,33 +2007,34 @@ static void ufshcd_gate_work(struct work_struct *work)
+ 	 * prevent from doing cancel work multiple times when there are
+ 	 * new requests arriving before the current cancel work is done.
+ 	 */
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	guard(spinlock_irqsave)(&hba->clk_gating.lock);
+ 	if (hba->clk_gating.state == REQ_CLKS_OFF) {
+ 		hba->clk_gating.state = CLKS_OFF;
+ 		trace_ufshcd_clk_gating(dev_name(hba->dev),
+ 					hba->clk_gating.state);
+ 	}
+-rel_lock:
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+-out:
+-	return;
+ }
+ 
+-/* host lock must be held before calling this variant */
+ static void __ufshcd_release(struct ufs_hba *hba)
+ {
++	lockdep_assert_held(&hba->clk_gating.lock);
++
+ 	if (!ufshcd_is_clkgating_allowed(hba))
+ 		return;
+ 
+ 	hba->clk_gating.active_reqs--;
+ 
+ 	if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
+-	    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
+-	    hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
+-	    hba->active_uic_cmd || hba->uic_async_done ||
++	    !hba->clk_gating.is_initialized ||
+ 	    hba->clk_gating.state == CLKS_OFF)
+ 		return;
+ 
++	scoped_guard(spinlock_irqsave, hba->host->host_lock) {
++		if (ufshcd_has_pending_tasks(hba) ||
++		    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
++			return;
++	}
++
+ 	hba->clk_gating.state = REQ_CLKS_OFF;
+ 	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ 	queue_delayed_work(hba->clk_gating.clk_gating_workq,
+@@ -2037,11 +2044,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
+ 
+ void ufshcd_release(struct ufs_hba *hba)
+ {
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	guard(spinlock_irqsave)(&hba->clk_gating.lock);
+ 	__ufshcd_release(hba);
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_release);
+ 
+@@ -2056,11 +2060,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+ void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
+ {
+ 	struct ufs_hba *hba = dev_get_drvdata(dev);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	guard(spinlock_irqsave)(&hba->clk_gating.lock);
+ 	hba->clk_gating.delay_ms = value;
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
+ 
+@@ -2088,7 +2090,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ 		struct device_attribute *attr, const char *buf, size_t count)
+ {
+ 	struct ufs_hba *hba = dev_get_drvdata(dev);
+-	unsigned long flags;
+ 	u32 value;
+ 
+ 	if (kstrtou32(buf, 0, &value))
+@@ -2096,9 +2097,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ 
+ 	value = !!value;
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	guard(spinlock_irqsave)(&hba->clk_gating.lock);
++
+ 	if (value == hba->clk_gating.is_enabled)
+-		goto out;
++		return count;
+ 
+ 	if (value)
+ 		__ufshcd_release(hba);
+@@ -2106,8 +2108,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ 		hba->clk_gating.active_reqs++;
+ 
+ 	hba->clk_gating.is_enabled = value;
+-out:
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
++
+ 	return count;
+ }
+ 
+@@ -8267,7 +8268,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
+ 	hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
+ 
+ 	 /* Update RTC only when there are no requests in progress and UFSHCI is operational */
+-	if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
++	if (!ufshcd_is_ufs_dev_busy(hba) &&
++	    hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
++	    !hba->clk_gating.active_reqs)
+ 		ufshcd_update_rtc(hba);
+ 
+ 	if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
+@@ -9186,7 +9189,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+ 	int ret = 0;
+ 	struct ufs_clk_info *clki;
+ 	struct list_head *head = &hba->clk_list_head;
+-	unsigned long flags;
+ 	ktime_t start = ktime_get();
+ 	bool clk_state_changed = false;
+ 
+@@ -9236,12 +9238,11 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+ 			if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+ 				clk_disable_unprepare(clki->clk);
+ 		}
+-	} else if (!ret && on) {
+-		spin_lock_irqsave(hba->host->host_lock, flags);
+-		hba->clk_gating.state = CLKS_ON;
++	} else if (!ret && on && hba->clk_gating.is_initialized) {
++		scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
++			hba->clk_gating.state = CLKS_ON;
+ 		trace_ufshcd_clk_gating(dev_name(hba->dev),
+ 					hba->clk_gating.state);
+-		spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 	}
+ 
+ 	if (clk_state_changed)
+@@ -10450,6 +10451,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	hba->irq = irq;
+ 	hba->vps = &ufs_hba_vps;
+ 
++	/*
++	 * Initialize clk_gating.lock early since it is being used in
++	 * ufshcd_setup_clocks()
++	 */
++	spin_lock_init(&hba->clk_gating.lock);
++
+ 	err = ufshcd_hba_init(hba);
+ 	if (err)
+ 		goto out_error;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 6b37d1c47fce13..c2ecfa3c83496f 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -371,7 +371,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
+ static void acm_ctrl_irq(struct urb *urb)
+ {
+ 	struct acm *acm = urb->context;
+-	struct usb_cdc_notification *dr = urb->transfer_buffer;
++	struct usb_cdc_notification *dr;
+ 	unsigned int current_size = urb->actual_length;
+ 	unsigned int expected_size, copy_size, alloc_size;
+ 	int retval;
+@@ -398,14 +398,25 @@ static void acm_ctrl_irq(struct urb *urb)
+ 
+ 	usb_mark_last_busy(acm->dev);
+ 
+-	if (acm->nb_index)
++	if (acm->nb_index == 0) {
++		/*
++		 * The first chunk of a message must contain at least the
++		 * notification header with the length field, otherwise we
++		 * can't get an expected_size.
++		 */
++		if (current_size < sizeof(struct usb_cdc_notification)) {
++			dev_dbg(&acm->control->dev, "urb too short\n");
++			goto exit;
++		}
++		dr = urb->transfer_buffer;
++	} else {
+ 		dr = (struct usb_cdc_notification *)acm->notification_buffer;
+-
++	}
+ 	/* size = notification-header + (optional) data */
+ 	expected_size = sizeof(struct usb_cdc_notification) +
+ 					le16_to_cpu(dr->wLength);
+ 
+-	if (current_size < expected_size) {
++	if (acm->nb_index != 0 || current_size < expected_size) {
+ 		/* notification is transmitted fragmented, reassemble */
+ 		if (acm->nb_size < expected_size) {
+ 			u8 *new_buffer;
+@@ -1727,13 +1738,16 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
+-	{ USB_DEVICE(0x045b, 0x023c),	/* Renesas USB Download mode */
++	{ USB_DEVICE(0x045b, 0x023c),	/* Renesas R-Car H3 USB Download mode */
++	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
++	},
++	{ USB_DEVICE(0x045b, 0x0247),	/* Renesas R-Car D3 USB Download mode */
+ 	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
+ 	},
+-	{ USB_DEVICE(0x045b, 0x0248),	/* Renesas USB Download mode */
++	{ USB_DEVICE(0x045b, 0x0248),	/* Renesas R-Car M3-N USB Download mode */
+ 	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
+ 	},
+-	{ USB_DEVICE(0x045b, 0x024D),	/* Renesas USB Download mode */
++	{ USB_DEVICE(0x045b, 0x024D),	/* Renesas R-Car E3 USB Download mode */
+ 	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
+ 	},
+ 	{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 21ac9b464696f5..906daf423cb02b 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1847,6 +1847,17 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	desc = intf->cur_altsetting;
+ 	hdev = interface_to_usbdev(intf);
+ 
++	/*
++	 * The USB 2.0 spec prohibits hubs from having more than one
++	 * configuration or interface, and we rely on this prohibition.
++	 * Refuse to accept a device that violates it.
++	 */
++	if (hdev->descriptor.bNumConfigurations > 1 ||
++			hdev->actconfig->desc.bNumInterfaces > 1) {
++		dev_err(&intf->dev, "Invalid hub with more than one config or interface\n");
++		return -EINVAL;
++	}
++
+ 	/*
+ 	 * Set default autosuspend delay as 0 to speedup bus suspend,
+ 	 * based on the below considerations:
+@@ -4698,7 +4709,6 @@ void usb_ep0_reinit(struct usb_device *udev)
+ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
+ 
+ #define usb_sndaddr0pipe()	(PIPE_CONTROL << 30)
+-#define usb_rcvaddr0pipe()	((PIPE_CONTROL << 30) | USB_DIR_IN)
+ 
+ static int hub_set_address(struct usb_device *udev, int devnum)
+ {
+@@ -4804,7 +4814,7 @@ static int get_bMaxPacketSize0(struct usb_device *udev,
+ 	for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
+ 		/* Start with invalid values in case the transfer fails */
+ 		buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
+-		rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
++		rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ 				USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+ 				USB_DT_DEVICE << 8, 0,
+ 				buf, size,
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 13171454f9591a..027479179f09e9 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -432,6 +432,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
+ 			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ 
++	/* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */
++	{ USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Action Semiconductor flash disk */
+ 	{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+@@ -522,6 +525,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Blackmagic Design UltraStudio SDI */
+ 	{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* Teclast disk */
++	{ USB_DEVICE(0x1f75, 0x0917), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Hauppauge HVR-950q */
+ 	{ USB_DEVICE(0x2040, 0x7200), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index e7bf9cc635be6f..bd4c788f03bc14 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -4615,6 +4615,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
+ 	spin_lock_irqsave(&hsotg->lock, flags);
+ 
+ 	hsotg->driver = NULL;
++	hsotg->gadget.dev.of_node = NULL;
+ 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ 	hsotg->enabled = 0;
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index a5d75d7d0a8707..8c80bb4a467bff 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2618,10 +2618,38 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+ {
+ 	u32			reg;
+ 	u32			timeout = 2000;
++	u32			saved_config = 0;
+ 
+ 	if (pm_runtime_suspended(dwc->dev))
+ 		return 0;
+ 
++	/*
++	 * When operating in USB 2.0 speeds (HS/FS), ensure that
++	 * GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY are cleared before starting
++	 * or stopping the controller. This resolves timeout issues that occur
++	 * during frequent role switches between host and device modes.
++	 *
++	 * Save and clear these settings, then restore them after completing the
++	 * controller start or stop sequence.
++	 *
++	 * This solution was discovered through experimentation as it is not
++	 * mentioned in the dwc3 programming guide. It has been tested on an
++	 * Exynos platforms.
++	 */
++	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++	if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
++		saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
++		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
++	}
++
++	if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
++		saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
++		reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
++	}
++
++	if (saved_config)
++		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
++
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 	if (is_on) {
+ 		if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
+@@ -2649,6 +2677,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+ 		reg &= DWC3_DSTS_DEVCTRLHLT;
+ 	} while (--timeout && !(!is_on ^ !reg));
+ 
++	if (saved_config) {
++		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++		reg |= saved_config;
++		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
++	}
++
+ 	if (!timeout)
+ 		return -ETIMEDOUT;
+ 
+diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
+index 1067847cc07995..4153643c67dcec 100644
+--- a/drivers/usb/gadget/function/f_midi.c
++++ b/drivers/usb/gadget/function/f_midi.c
+@@ -907,6 +907,15 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	status = -ENODEV;
+ 
++	/*
++	 * Reset wMaxPacketSize with maximum packet size of FS bulk transfer before
++	 * endpoint claim. This ensures that the wMaxPacketSize does not exceed the
++	 * limit during bind retries where configured dwc3 TX/RX FIFO's maxpacket
++	 * size of 512 bytes for IN/OUT endpoints in support HS speed only.
++	 */
++	bulk_in_desc.wMaxPacketSize = cpu_to_le16(64);
++	bulk_out_desc.wMaxPacketSize = cpu_to_le16(64);
++
+ 	/* allocate instance-specific endpoints */
+ 	midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
+ 	if (!midi->in_ep)
+@@ -1000,11 +1009,11 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
+ 	}
+ 
+ 	/* configure the endpoint descriptors ... */
+-	ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
+-	ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
++	ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
++	ms_out_desc.bNumEmbMIDIJack = midi->out_ports;
+ 
+-	ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
+-	ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
++	ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
++	ms_in_desc.bNumEmbMIDIJack = midi->in_ports;
+ 
+ 	/* ... and add them to the list */
+ 	endpoint_descriptor_index = i;
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index a6f46364be65f0..4b3d5075621aa0 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -1543,8 +1543,8 @@ void usb_del_gadget(struct usb_gadget *gadget)
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+ 	sysfs_remove_link(&udc->dev.kobj, "gadget");
+-	flush_work(&gadget->work);
+ 	device_del(&gadget->dev);
++	flush_work(&gadget->work);
+ 	ida_free(&gadget_id_numbers, gadget->id_number);
+ 	cancel_work_sync(&udc->vbus_work);
+ 	device_unregister(&udc->dev);
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index 3b01734ce1b7e5..a93ad93390ba17 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -310,7 +310,7 @@ struct renesas_usb3_request {
+ 	struct list_head	queue;
+ };
+ 
+-#define USB3_EP_NAME_SIZE	8
++#define USB3_EP_NAME_SIZE	16
+ struct renesas_usb3_ep {
+ 	struct usb_ep ep;
+ 	struct renesas_usb3 *usb3;
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 1f9c1b1435d862..0404489c2f6a9c 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -958,6 +958,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
+ 	 * booting from USB disk or using a usb keyboard
+ 	 */
+ 	hcc_params = readl(base + EHCI_HCC_PARAMS);
++
++	/* LS7A EHCI controller doesn't have extended capabilities, the
++	 * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS
++	 * register should be 0x0 but it reads as 0xa0.  So clear it to
++	 * avoid error messages on boot.
++	 */
++	if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14)
++		hcc_params &= ~(0xffL << 8);
++
+ 	offset = (hcc_params >> 8) & 0xff;
+ 	while (offset && --count) {
+ 		pci_read_config_dword(pdev, offset, &cap);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 3ba9902dd2093c..deb3c98c9beaf6 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -656,8 +656,8 @@ int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ }
+ EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, xhci);
+ 
+-static const struct pci_device_id pci_ids_reject[] = {
+-	/* handled by xhci-pci-renesas */
++/* handled by xhci-pci-renesas if enabled */
++static const struct pci_device_id pci_ids_renesas[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
+ 	{ /* end: all zeroes */ }
+@@ -665,7 +665,8 @@ static const struct pci_device_id pci_ids_reject[] = {
+ 
+ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+-	if (pci_match_id(pci_ids_reject, dev))
++	if (IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) &&
++			pci_match_id(pci_ids_renesas, dev))
+ 		return -ENODEV;
+ 
+ 	return xhci_pci_common_probe(dev, id);
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index c58a12c147f451..30482d4cf82678 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -387,8 +387,11 @@ usb_role_switch_register(struct device *parent,
+ 	dev_set_name(&sw->dev, "%s-role-switch",
+ 		     desc->name ? desc->name : dev_name(parent));
+ 
++	sw->registered = true;
++
+ 	ret = device_register(&sw->dev);
+ 	if (ret) {
++		sw->registered = false;
+ 		put_device(&sw->dev);
+ 		return ERR_PTR(ret);
+ 	}
+@@ -399,8 +402,6 @@ usb_role_switch_register(struct device *parent,
+ 			dev_warn(&sw->dev, "failed to add component\n");
+ 	}
+ 
+-	sw->registered = true;
+-
+ 	/* TODO: Symlinks for the host port and the device controller. */
+ 
+ 	return sw;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 1e2ae0c6c41c79..58bd54e8c483a2 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -619,15 +619,6 @@ static void option_instat_callback(struct urb *urb);
+ /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
+ #define LUAT_PRODUCT_AIR720U			0x4e00
+ 
+-/* MeiG Smart Technology products */
+-#define MEIGSMART_VENDOR_ID			0x2dee
+-/* MeiG Smart SRM815/SRM825L based on Qualcomm 315 */
+-#define MEIGSMART_PRODUCT_SRM825L		0x4d22
+-/* MeiG Smart SLM320 based on UNISOC UIS8910 */
+-#define MEIGSMART_PRODUCT_SLM320		0x4d41
+-/* MeiG Smart SLM770A based on ASR1803 */
+-#define MEIGSMART_PRODUCT_SLM770A		0x4d57
+-
+ /* Device flags */
+ 
+ /* Highest interface number which can be used with NCTRL() and RSVD() */
+@@ -1367,15 +1358,15 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff),	/* Telit LN920 (ECM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff),	/* Telit FN990 (rmnet) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff),	/* Telit FN990A (rmnet) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff),	/* Telit FN990 (MBIM) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff),	/* Telit FN990A (MBIM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff),	/* Telit FN990 (RNDIS) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff),	/* Telit FN990A (RNDIS) */
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff),	/* Telit FN990 (ECM) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff),	/* Telit FN990A (ECM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990 (PCIe) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990A (PCIe) */
+ 	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),	/* Telit FE990 (rmnet) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+@@ -1403,6 +1394,22 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | NCTRL(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff),	/* Telit FE910C04 (rmnet) */
+ 	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) },	/* Telit FN990B (rmnet) */
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
++	  .driver_info = NCTRL(5) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) },	/* Telit FN990B (MBIM) */
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
++	  .driver_info = NCTRL(6) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) },	/* Telit FN990B (RNDIS) */
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
++	  .driver_info = NCTRL(6) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) },	/* Telit FN990B (ECM) */
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
++	  .driver_info = NCTRL(6) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -2347,6 +2354,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) },			/* Fibocom FM650-CN (NCM mode) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) },			/* Fibocom FM650-CN (RNDIS mode) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) },			/* Fibocom FM650-CN (MBIM mode) */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d41, 0xff, 0, 0) },		/* MeiG Smart SLM320 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d57, 0xff, 0, 0) },		/* MeiG Smart SLM770A */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0, 0) },		/* MeiG Smart SRM815 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x02) },	/* MeiG Smart SLM828 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x03) },	/* MeiG Smart SLM828 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x30) },	/* MeiG Smart SRM815 and SRM825L */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x40) },	/* MeiG Smart SRM825L */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x60) },	/* MeiG Smart SRM825L */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },			/* LongSung M5710 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },			/* GosunCn GM500 RNDIS */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },			/* GosunCn GM500 MBIM */
+@@ -2403,12 +2418,6 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff),			/* TCL IK512 MBIM */
+ 	  .driver_info = NCTRL(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff),			/* TCL IK512 ECM */
+diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
+index a7fd018aa54836..9e1c57baab64a2 100644
+--- a/drivers/vfio/pci/nvgrace-gpu/main.c
++++ b/drivers/vfio/pci/nvgrace-gpu/main.c
+@@ -17,12 +17,14 @@
+ #define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX
+ #define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX
+ 
+-/* Memory size expected as non cached and reserved by the VM driver */
+-#define RESMEM_SIZE SZ_1G
+-
+ /* A hardwired and constant ABI value between the GPU FW and VFIO driver. */
+ #define MEMBLK_SIZE SZ_512M
+ 
++#define DVSEC_BITMAP_OFFSET 0xA
++#define MIG_SUPPORTED_WITH_CACHED_RESMEM BIT(0)
++
++#define GPU_CAP_DVSEC_REGISTER 3
++
+ /*
+  * The state of the two device memory region - resmem and usemem - is
+  * saved as struct mem_region.
+@@ -46,6 +48,7 @@ struct nvgrace_gpu_pci_core_device {
+ 	struct mem_region resmem;
+ 	/* Lock to control device memory kernel mapping */
+ 	struct mutex remap_lock;
++	bool has_mig_hw_bug;
+ };
+ 
+ static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev)
+@@ -66,7 +69,7 @@ nvgrace_gpu_memregion(int index,
+ 	if (index == USEMEM_REGION_INDEX)
+ 		return &nvdev->usemem;
+ 
+-	if (index == RESMEM_REGION_INDEX)
++	if (nvdev->resmem.memlength && index == RESMEM_REGION_INDEX)
+ 		return &nvdev->resmem;
+ 
+ 	return NULL;
+@@ -751,40 +754,67 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
+ 			      u64 memphys, u64 memlength)
+ {
+ 	int ret = 0;
++	u64 resmem_size = 0;
+ 
+ 	/*
+-	 * The VM GPU device driver needs a non-cacheable region to support
+-	 * the MIG feature. Since the device memory is mapped as NORMAL cached,
+-	 * carve out a region from the end with a different NORMAL_NC
+-	 * property (called as reserved memory and represented as resmem). This
+-	 * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while
+-	 * exposing the rest (termed as usable memory and represented using usemem)
+-	 * as cacheable 64b BAR (region 4 and 5).
++	 * On Grace Hopper systems, the VM GPU device driver needs a non-cacheable
++	 * region to support the MIG feature owing to a hardware bug. Since the
++	 * device memory is mapped as NORMAL cached, carve out a region from the end
++	 * with a different NORMAL_NC property (called as reserved memory and
++	 * represented as resmem). This region then is exposed as a 64b BAR
++	 * (region 2 and 3) to the VM, while exposing the rest (termed as usable
++	 * memory and represented using usemem) as cacheable 64b BAR (region 4 and 5).
+ 	 *
+ 	 *               devmem (memlength)
+ 	 * |-------------------------------------------------|
+ 	 * |                                           |
+ 	 * usemem.memphys                              resmem.memphys
++	 *
++	 * This hardware bug is fixed on the Grace Blackwell platforms and the
++	 * presence of the bug can be determined through nvdev->has_mig_hw_bug.
++	 * Thus on systems with the hardware fix, there is no need to partition
++	 * the GPU device memory and the entire memory is usable and mapped as
++	 * NORMAL cached (i.e. resmem size is 0).
+ 	 */
++	if (nvdev->has_mig_hw_bug)
++		resmem_size = SZ_1G;
++
+ 	nvdev->usemem.memphys = memphys;
+ 
+ 	/*
+ 	 * The device memory exposed to the VM is added to the kernel by the
+-	 * VM driver module in chunks of memory block size. Only the usable
+-	 * memory (usemem) is added to the kernel for usage by the VM
+-	 * workloads. Make the usable memory size memblock aligned.
++	 * VM driver module in chunks of memory block size. Note that only the
++	 * usable memory (usemem) is added to the kernel for usage by the VM
++	 * workloads.
+ 	 */
+-	if (check_sub_overflow(memlength, RESMEM_SIZE,
++	if (check_sub_overflow(memlength, resmem_size,
+ 			       &nvdev->usemem.memlength)) {
+ 		ret = -EOVERFLOW;
+ 		goto done;
+ 	}
+ 
+ 	/*
+-	 * The USEMEM part of the device memory has to be MEMBLK_SIZE
+-	 * aligned. This is a hardwired ABI value between the GPU FW and
+-	 * VFIO driver. The VM device driver is also aware of it and make
+-	 * use of the value for its calculation to determine USEMEM size.
++	 * The usemem region is exposed as a 64B Bar composed of region 4 and 5.
++	 * Calculate and save the BAR size for the region.
++	 */
++	nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
++
++	/*
++	 * If the hardware has the fix for MIG, there is no requirement
++	 * for splitting the device memory to create RESMEM. The entire
++	 * device memory is usable and will be USEMEM. Return here for
++	 * such case.
++	 */
++	if (!nvdev->has_mig_hw_bug)
++		goto done;
++
++	/*
++	 * When the device memory is split to workaround the MIG bug on
++	 * Grace Hopper, the USEMEM part of the device memory has to be
++	 * MEMBLK_SIZE aligned. This is a hardwired ABI value between the
++	 * GPU FW and VFIO driver. The VM device driver is also aware of it
++	 * and make use of the value for its calculation to determine USEMEM
++	 * size. Note that the device memory may not be 512M aligned.
+ 	 */
+ 	nvdev->usemem.memlength = round_down(nvdev->usemem.memlength,
+ 					     MEMBLK_SIZE);
+@@ -803,15 +833,34 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
+ 	}
+ 
+ 	/*
+-	 * The memory regions are exposed as BARs. Calculate and save
+-	 * the BAR size for them.
++	 * The resmem region is exposed as a 64b BAR composed of region 2 and 3
++	 * for Grace Hopper. Calculate and save the BAR size for the region.
+ 	 */
+-	nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
+ 	nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength);
+ done:
+ 	return ret;
+ }
+ 
++static bool nvgrace_gpu_has_mig_hw_bug(struct pci_dev *pdev)
++{
++	int pcie_dvsec;
++	u16 dvsec_ctrl16;
++
++	pcie_dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_NVIDIA,
++					       GPU_CAP_DVSEC_REGISTER);
++
++	if (pcie_dvsec) {
++		pci_read_config_word(pdev,
++				     pcie_dvsec + DVSEC_BITMAP_OFFSET,
++				     &dvsec_ctrl16);
++
++		if (dvsec_ctrl16 & MIG_SUPPORTED_WITH_CACHED_RESMEM)
++			return false;
++	}
++
++	return true;
++}
++
+ static int nvgrace_gpu_probe(struct pci_dev *pdev,
+ 			     const struct pci_device_id *id)
+ {
+@@ -832,6 +881,8 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev,
+ 	dev_set_drvdata(&pdev->dev, &nvdev->core_device);
+ 
+ 	if (ops == &nvgrace_gpu_pci_ops) {
++		nvdev->has_mig_hw_bug = nvgrace_gpu_has_mig_hw_bug(pdev);
++
+ 		/*
+ 		 * Device memory properties are identified in the host ACPI
+ 		 * table. Set the nvgrace_gpu_pci_core_device structure.
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index 66b72c2892841d..a0595c745732a3 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -16,6 +16,7 @@
+ #include <linux/io.h>
+ #include <linux/vfio.h>
+ #include <linux/vgaarb.h>
++#include <linux/io-64-nonatomic-lo-hi.h>
+ 
+ #include "vfio_pci_priv.h"
+ 
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index d63c2d266d0735..3bf1043cd7957c 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -393,11 +393,6 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
+ 
+ 	count = min_t(size_t, count, reg->size - off);
+ 
+-	if (off >= reg->size)
+-		return -EINVAL;
+-
+-	count = min_t(size_t, count, reg->size - off);
+-
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+@@ -482,11 +477,6 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
+ 
+ 	count = min_t(size_t, count, reg->size - off);
+ 
+-	if (off >= reg->size)
+-		return -EINVAL;
+-
+-	count = min_t(size_t, count, reg->size - off);
+-
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c
+index f85817635a8c2c..0da23c57e4757e 100644
+--- a/drivers/video/fbdev/omap/lcd_dma.c
++++ b/drivers/video/fbdev/omap/lcd_dma.c
+@@ -432,8 +432,8 @@ static int __init omap_init_lcd_dma(void)
+ 
+ 	spin_lock_init(&lcd_dma.lock);
+ 
+-	r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
+-			"LCD DMA", NULL);
++	r = request_threaded_irq(INT_DMA_LCD, NULL, lcd_dma_irq_handler,
++				 IRQF_ONESHOT, "LCD DMA", NULL);
+ 	if (r != 0)
+ 		pr_err("unable to request IRQ for LCD DMA (error %d)\n", r);
+ 
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index a337edcf8faf71..26c62e0d34e98b 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
+ 	return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
+ }
+ 
++static inline bool range_requires_alignment(phys_addr_t p, size_t size)
++{
++	phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
++	phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
++
++	return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
++}
++
+ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
+ {
+ 	unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+ 	unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
+-	phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+ 
+ 	next_bfn = pfn_to_bfn(xen_pfn);
+ 
+-	/* If buffer is physically aligned, ensure DMA alignment. */
+-	if (IS_ALIGNED(p, algn) &&
+-	    !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
+-		return 1;
+-
+ 	for (i = 1; i < nr_pages; i++)
+ 		if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
+ 			return 1;
+@@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ 
+ 	*dma_handle = xen_phys_to_dma(dev, phys);
+ 	if (*dma_handle + size - 1 > dma_mask ||
+-	    range_straddles_page_boundary(phys, size)) {
++	    range_straddles_page_boundary(phys, size) ||
++	    range_requires_alignment(phys, size)) {
+ 		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
+ 				dma_handle) != 0)
+ 			goto out_free_pages;
+@@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
+ 	size = ALIGN(size, XEN_PAGE_SIZE);
+ 
+ 	if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
+-	    WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
++	    WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
++			 range_requires_alignment(phys, size)))
+ 	    	return;
+ 
+ 	if (TestClearPageXenRemapped(virt_to_page(vaddr)))
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 42c9899d9241c9..fe08c983d5bb4b 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -901,12 +901,11 @@ void clear_folio_extent_mapped(struct folio *folio)
+ 	folio_detach_private(folio);
+ }
+ 
+-static struct extent_map *__get_extent_map(struct inode *inode,
+-					   struct folio *folio, u64 start,
+-					   u64 len, struct extent_map **em_cached)
++static struct extent_map *get_extent_map(struct btrfs_inode *inode,
++					 struct folio *folio, u64 start,
++					 u64 len, struct extent_map **em_cached)
+ {
+ 	struct extent_map *em;
+-	struct extent_state *cached_state = NULL;
+ 
+ 	ASSERT(em_cached);
+ 
+@@ -922,14 +921,12 @@ static struct extent_map *__get_extent_map(struct inode *inode,
+ 		*em_cached = NULL;
+ 	}
+ 
+-	btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state);
+-	em = btrfs_get_extent(BTRFS_I(inode), folio, start, len);
++	em = btrfs_get_extent(inode, folio, start, len);
+ 	if (!IS_ERR(em)) {
+ 		BUG_ON(*em_cached);
+ 		refcount_inc(&em->refs);
+ 		*em_cached = em;
+ 	}
+-	unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state);
+ 
+ 	return em;
+ }
+@@ -985,8 +982,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+ 			end_folio_read(folio, true, cur, iosize);
+ 			break;
+ 		}
+-		em = __get_extent_map(inode, folio, cur, end - cur + 1,
+-				      em_cached);
++		em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
+ 		if (IS_ERR(em)) {
+ 			end_folio_read(folio, false, cur, end + 1 - cur);
+ 			return PTR_ERR(em);
+@@ -1087,11 +1083,18 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+ 
+ int btrfs_read_folio(struct file *file, struct folio *folio)
+ {
++	struct btrfs_inode *inode = folio_to_inode(folio);
++	const u64 start = folio_pos(folio);
++	const u64 end = start + folio_size(folio) - 1;
++	struct extent_state *cached_state = NULL;
+ 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
+ 	struct extent_map *em_cached = NULL;
+ 	int ret;
+ 
++	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
+ 	ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
++	unlock_extent(&inode->io_tree, start, end, &cached_state);
++
+ 	free_extent_map(em_cached);
+ 
+ 	/*
+@@ -2268,12 +2271,20 @@ void btrfs_readahead(struct readahead_control *rac)
+ {
+ 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
+ 	struct folio *folio;
++	struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
++	const u64 start = readahead_pos(rac);
++	const u64 end = start + readahead_length(rac) - 1;
++	struct extent_state *cached_state = NULL;
+ 	struct extent_map *em_cached = NULL;
+ 	u64 prev_em_start = (u64)-1;
+ 
++	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
++
+ 	while ((folio = readahead_folio(rac)) != NULL)
+ 		btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
+ 
++	unlock_extent(&inode->io_tree, start, end, &cached_state);
++
+ 	if (em_cached)
+ 		free_extent_map(em_cached);
+ 	submit_one_bio(&bio_ctrl);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 559c177456e6a0..848cb2c3d9ddeb 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1148,7 +1148,6 @@ int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, size_t count)
+ 	loff_t pos = iocb->ki_pos;
+ 	int ret;
+ 	loff_t oldsize;
+-	loff_t start_pos;
+ 
+ 	/*
+ 	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
+@@ -1172,9 +1171,8 @@ int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, size_t count)
+ 	 */
+ 	update_time_for_write(inode);
+ 
+-	start_pos = round_down(pos, fs_info->sectorsize);
+ 	oldsize = i_size_read(inode);
+-	if (start_pos > oldsize) {
++	if (pos > oldsize) {
+ 		/* Expand hole size to cover write data, preventing empty gap */
+ 		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
+ 
+diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
+index bf378ecd5d9fdd..7b59a40d40c061 100644
+--- a/fs/nfs/sysfs.c
++++ b/fs/nfs/sysfs.c
+@@ -280,9 +280,9 @@ void nfs_sysfs_link_rpc_client(struct nfs_server *server,
+ 	char name[RPC_CLIENT_NAME_SIZE];
+ 	int ret;
+ 
+-	strcpy(name, clnt->cl_program->name);
+-	strcat(name, uniq ? uniq : "");
+-	strcat(name, "_client");
++	strscpy(name, clnt->cl_program->name, sizeof(name));
++	strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1);
++	strncat(name, "_client", sizeof(name) - strlen(name) - 1);
+ 
+ 	ret = sysfs_create_link_nowarn(&server->kobj,
+ 						&clnt->cl_sysfs->kobject, name);
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index 146a9463c3c230..d199688818557d 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -445,11 +445,20 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose)
+ 						struct nfsd_file, nf_gc);
+ 		struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
+ 		struct nfsd_fcache_disposal *l = nn->fcache_disposal;
++		struct svc_serv *serv;
+ 
+ 		spin_lock(&l->lock);
+ 		list_move_tail(&nf->nf_gc, &l->freeme);
+ 		spin_unlock(&l->lock);
+-		svc_wake_up(nn->nfsd_serv);
++
++		/*
++		 * The filecache laundrette is shut down after the
++		 * nn->nfsd_serv pointer is cleared, but before the
++		 * svc_serv is freed.
++		 */
++		serv = nn->nfsd_serv;
++		if (serv)
++			svc_wake_up(serv);
+ 	}
+ }
+ 
+diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
+index 4e3be7201b1c43..5fb202acb0fd00 100644
+--- a/fs/nfsd/nfs2acl.c
++++ b/fs/nfsd/nfs2acl.c
+@@ -84,6 +84,8 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst *rqstp)
+ fail:
+ 	posix_acl_release(resp->acl_access);
+ 	posix_acl_release(resp->acl_default);
++	resp->acl_access = NULL;
++	resp->acl_default = NULL;
+ 	goto out;
+ }
+ 
+diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
+index 5e34e98db969db..7b5433bd301974 100644
+--- a/fs/nfsd/nfs3acl.c
++++ b/fs/nfsd/nfs3acl.c
+@@ -76,6 +76,8 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst *rqstp)
+ fail:
+ 	posix_acl_release(resp->acl_access);
+ 	posix_acl_release(resp->acl_default);
++	resp->acl_access = NULL;
++	resp->acl_default = NULL;
+ 	goto out;
+ }
+ 
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index de076365254978..88c03e18257323 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -1486,8 +1486,11 @@ nfsd4_run_cb_work(struct work_struct *work)
+ 		nfsd4_process_cb_update(cb);
+ 
+ 	clnt = clp->cl_cb_client;
+-	if (!clnt) {
+-		/* Callback channel broken, or client killed; give up: */
++	if (!clnt || clp->cl_state == NFSD4_COURTESY) {
++		/*
++		 * Callback channel broken, client killed or
++		 * nfs4_client in courtesy state; give up.
++		 */
+ 		nfsd41_destroy_cb(cb);
+ 		return;
+ 	}
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 8d789b017fa9b6..da1a9312e61a0e 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -1406,7 +1406,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 	 */
+ 	if (!attr->non_res) {
+ 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
+-			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
++			_ntfs_bad_inode(&ni->vfs_inode);
+ 			return -EINVAL;
+ 		}
+ 		addr = resident_data(attr);
+@@ -2587,7 +2587,7 @@ int attr_force_nonresident(struct ntfs_inode *ni)
+ 
+ 	attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
+ 	if (!attr) {
+-		ntfs_bad_inode(&ni->vfs_inode, "no data attribute");
++		_ntfs_bad_inode(&ni->vfs_inode);
+ 		return -ENOENT;
+ 	}
+ 
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index fc6a8aa29e3afe..b6da80c69ca634 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -512,7 +512,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+ 		ctx->pos = pos;
+ 	} else if (err < 0) {
+ 		if (err == -EINVAL)
+-			ntfs_inode_err(dir, "directory corrupted");
++			_ntfs_bad_inode(dir);
+ 		ctx->pos = eod;
+ 	}
+ 
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index c33e818b3164cd..175662acd5eaf0 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -148,8 +148,10 @@ int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
+ 		goto out;
+ 
+ 	err = mi_get(ni->mi.sbi, rno, &r);
+-	if (err)
++	if (err) {
++		_ntfs_bad_inode(&ni->vfs_inode);
+ 		return err;
++	}
+ 
+ 	ni_add_mi(ni, r);
+ 
+@@ -238,8 +240,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 	return attr;
+ 
+ out:
+-	ntfs_inode_err(&ni->vfs_inode, "failed to parse mft record");
+-	ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++	_ntfs_bad_inode(&ni->vfs_inode);
+ 	return NULL;
+ }
+ 
+@@ -330,6 +331,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	    vcn <= le64_to_cpu(attr->nres.evcn))
+ 		return attr;
+ 
++	_ntfs_bad_inode(&ni->vfs_inode);
+ 	return NULL;
+ }
+ 
+@@ -1604,8 +1606,8 @@ int ni_delete_all(struct ntfs_inode *ni)
+ 		roff = le16_to_cpu(attr->nres.run_off);
+ 
+ 		if (roff > asize) {
+-			_ntfs_bad_inode(&ni->vfs_inode);
+-			return -EINVAL;
++			/* ni_enum_attr_ex checks this case. */
++			continue;
+ 		}
+ 
+ 		/* run==1 means unpack and deallocate. */
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 0fa636038b4e4d..6c73e93afb478c 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -908,7 +908,11 @@ void ntfs_bad_inode(struct inode *inode, const char *hint)
+ 
+ 	ntfs_inode_err(inode, "%s", hint);
+ 	make_bad_inode(inode);
+-	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++	/* Avoid recursion if bad inode is $Volume. */
++	if (inode->i_ino != MFT_REC_VOL &&
++	    !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
++		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++	}
+ }
+ 
+ /*
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 9089c58a005ce1..7eb9fae22f8da6 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1094,8 +1094,7 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
+ 
+ ok:
+ 	if (!index_buf_check(ib, bytes, &vbn)) {
+-		ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
+-		ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++		_ntfs_bad_inode(&ni->vfs_inode);
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+@@ -1117,8 +1116,7 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
+ 
+ out:
+ 	if (err == -E_NTFS_CORRUPT) {
+-		ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
+-		ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++		_ntfs_bad_inode(&ni->vfs_inode);
+ 		err = -EINVAL;
+ 	}
+ 
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index be04d2845bb7bc..a1e11228dafd02 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -410,6 +410,9 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 	if (!std5)
+ 		goto out;
+ 
++	if (is_bad_inode(inode))
++		goto out;
++
+ 	if (!is_match && name) {
+ 		err = -ENOENT;
+ 		goto out;
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index 1b508f5433846e..fa41db08848802 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -393,9 +393,9 @@ static ssize_t orangefs_debug_write(struct file *file,
+ 	 * Thwart users who try to jamb a ridiculous number
+ 	 * of bytes into the debug file...
+ 	 */
+-	if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) {
++	if (count > ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ 		silly = count;
+-		count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1;
++		count = ORANGEFS_MAX_DEBUG_STRING_LEN;
+ 	}
+ 
+ 	buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 94785abc9b1b2d..05274121e46f04 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1476,7 +1476,6 @@ struct cifs_io_parms {
+ struct cifs_io_request {
+ 	struct netfs_io_request		rreq;
+ 	struct cifsFileInfo		*cfile;
+-	struct TCP_Server_Info		*server;
+ 	pid_t				pid;
+ };
+ 
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index a58a3333ecc300..313c851fc1c122 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -147,7 +147,7 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
+ 	struct netfs_io_request *rreq = subreq->rreq;
+ 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+ 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+-	struct TCP_Server_Info *server = req->server;
++	struct TCP_Server_Info *server;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+ 	size_t size;
+ 	int rc = 0;
+@@ -156,6 +156,8 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
+ 		rdata->xid = get_xid();
+ 		rdata->have_xid = true;
+ 	}
++
++	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
+ 	rdata->server = server;
+ 
+ 	if (cifs_sb->ctx->rsize == 0)
+@@ -198,7 +200,7 @@ static void cifs_issue_read(struct netfs_io_subrequest *subreq)
+ 	struct netfs_io_request *rreq = subreq->rreq;
+ 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+ 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+-	struct TCP_Server_Info *server = req->server;
++	struct TCP_Server_Info *server = rdata->server;
+ 	int rc = 0;
+ 
+ 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
+@@ -265,7 +267,6 @@ static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
+ 		open_file = file->private_data;
+ 		rreq->netfs_priv = file->private_data;
+ 		req->cfile = cifsFileInfo_get(open_file);
+-		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
+ 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ 			req->pid = req->cfile->pid;
+ 	} else if (rreq->origin != NETFS_WRITEBACK) {
+diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
+index a6f8b098c56f14..3bd9f482f0c3e6 100644
+--- a/include/drm/display/drm_dp.h
++++ b/include/drm/display/drm_dp.h
+@@ -359,6 +359,7 @@
+ # define DP_DSC_BITS_PER_PIXEL_1_4          0x2
+ # define DP_DSC_BITS_PER_PIXEL_1_2          0x3
+ # define DP_DSC_BITS_PER_PIXEL_1_1          0x4
++# define DP_DSC_BITS_PER_PIXEL_MASK         0x7
+ 
+ #define DP_PSR_SUPPORT                      0x070   /* XXX 1.2? */
+ # define DP_PSR_IS_SUPPORTED                1
+diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h
+index 0fc0999d2420aa..f8236a8536f7eb 100644
+--- a/include/kunit/platform_device.h
++++ b/include/kunit/platform_device.h
+@@ -2,6 +2,7 @@
+ #ifndef _KUNIT_PLATFORM_DRIVER_H
+ #define _KUNIT_PLATFORM_DRIVER_H
+ 
++struct completion;
+ struct kunit;
+ struct platform_device;
+ struct platform_driver;
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index c5063e0a38a058..a53cbe25691043 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -880,12 +880,22 @@ static inline bool blk_mq_add_to_batch(struct request *req,
+ 				       void (*complete)(struct io_comp_batch *))
+ {
+ 	/*
+-	 * blk_mq_end_request_batch() can't end request allocated from
+-	 * sched tags
++	 * Check various conditions that exclude batch processing:
++	 * 1) No batch container
++	 * 2) Has scheduler data attached
++	 * 3) Not a passthrough request and end_io set
++	 * 4) Not a passthrough request and an ioerror
+ 	 */
+-	if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
+-			(req->end_io && !blk_rq_is_passthrough(req)))
++	if (!iob)
+ 		return false;
++	if (req->rq_flags & RQF_SCHED_TAGS)
++		return false;
++	if (!blk_rq_is_passthrough(req)) {
++		if (req->end_io)
++			return false;
++		if (ioerror < 0)
++			return false;
++	}
+ 
+ 	if (!iob->complete)
+ 		iob->complete = complete;
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 47ae4c4d924c28..a32eebcd23da47 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -71,9 +71,6 @@ enum {
+ 
+ 	/* Cgroup is frozen. */
+ 	CGRP_FROZEN,
+-
+-	/* Control group has to be killed. */
+-	CGRP_KILL,
+ };
+ 
+ /* cgroup_root->flags */
+@@ -460,6 +457,9 @@ struct cgroup {
+ 
+ 	int nr_threaded_children;	/* # of live threaded child cgroups */
+ 
++	/* sequence number for cgroup.kill, serialized by css_set_lock. */
++	unsigned int kill_seq;
++
+ 	struct kernfs_node *kn;		/* cgroup kernfs entry */
+ 	struct cgroup_file procs_file;	/* handle for "cgroup.procs" */
+ 	struct cgroup_file events_file;	/* handle for "cgroup.events" */
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index e28d8806603376..2f1bfd7562eb2b 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -128,6 +128,7 @@ typedef	struct {
+ #define EFI_MEMORY_RO		((u64)0x0000000000020000ULL)	/* read-only */
+ #define EFI_MEMORY_SP		((u64)0x0000000000040000ULL)	/* soft reserved */
+ #define EFI_MEMORY_CPU_CRYPTO	((u64)0x0000000000080000ULL)	/* supports encryption */
++#define EFI_MEMORY_HOT_PLUGGABLE	BIT_ULL(20)	/* supports unplugging at runtime */
+ #define EFI_MEMORY_RUNTIME	((u64)0x8000000000000000ULL)	/* range requires runtime mapping */
+ #define EFI_MEMORY_DESCRIPTOR_VERSION	1
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 02d3bafebbe77c..4f17b786828af7 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2577,6 +2577,12 @@ struct net *dev_net(const struct net_device *dev)
+ 	return read_pnet(&dev->nd_net);
+ }
+ 
++static inline
++struct net *dev_net_rcu(const struct net_device *dev)
++{
++	return read_pnet_rcu(&dev->nd_net);
++}
++
+ static inline
+ void dev_net_set(struct net_device *dev, struct net *net)
+ {
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 4cf6aaed5f35db..22f6b018cff8de 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2589,6 +2589,11 @@
+ 
+ #define PCI_VENDOR_ID_REDHAT		0x1b36
+ 
++#define PCI_VENDOR_ID_WCHIC		0x1c00
++#define PCI_DEVICE_ID_WCHIC_CH382_0S1P	0x3050
++#define PCI_DEVICE_ID_WCHIC_CH382_2S1P	0x3250
++#define PCI_DEVICE_ID_WCHIC_CH382_2S	0x3253
++
+ #define PCI_VENDOR_ID_SILICOM_DENMARK	0x1c2c
+ 
+ #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS	0x1c36
+@@ -2643,6 +2648,12 @@
+ #define PCI_VENDOR_ID_AKS		0x416c
+ #define PCI_DEVICE_ID_AKS_ALADDINCARD	0x0100
+ 
++#define PCI_VENDOR_ID_WCHCN		0x4348
++#define PCI_DEVICE_ID_WCHCN_CH353_4S	0x3453
++#define PCI_DEVICE_ID_WCHCN_CH353_2S1PF	0x5046
++#define PCI_DEVICE_ID_WCHCN_CH353_1S1P	0x5053
++#define PCI_DEVICE_ID_WCHCN_CH353_2S1P	0x7053
++
+ #define PCI_VENDOR_ID_ACCESSIO		0x494f
+ #define PCI_DEVICE_ID_ACCESSIO_WDG_CSM	0x22c0
+ 
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 0f2aeb37bbb047..ca1db4b92c3244 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -43,6 +43,7 @@ struct kernel_clone_args {
+ 	void *fn_arg;
+ 	struct cgroup *cgrp;
+ 	struct css_set *cset;
++	unsigned int kill_seq;
+ };
+ 
+ /*
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 0f303cc602520e..08647c99d79c9a 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -440,6 +440,15 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
+ 		dst->expires = expires;
+ }
+ 
++static inline unsigned int dst_dev_overhead(struct dst_entry *dst,
++					    struct sk_buff *skb)
++{
++	if (likely(dst))
++		return LL_RESERVED_SPACE(dst->dev);
++
++	return skb->mac_len;
++}
++
+ INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
+ 					 struct sk_buff *));
+ INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index d92d3bc3ec0e25..fe4f8543811433 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -465,9 +465,12 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ 						    bool forwarding)
+ {
+ 	const struct rtable *rt = dst_rtable(dst);
+-	struct net *net = dev_net(dst->dev);
+-	unsigned int mtu;
++	unsigned int mtu, res;
++	struct net *net;
++
++	rcu_read_lock();
+ 
++	net = dev_net_rcu(dst->dev);
+ 	if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
+ 	    ip_mtu_locked(dst) ||
+ 	    !forwarding) {
+@@ -491,7 +494,11 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ out:
+ 	mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
+ 
+-	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
++	res = mtu - lwtunnel_headroom(dst->lwtstate, mtu);
++
++	rcu_read_unlock();
++
++	return res;
+ }
+ 
+ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
+index 031c661aa14df7..bdfa9d414360c7 100644
+--- a/include/net/l3mdev.h
++++ b/include/net/l3mdev.h
+@@ -198,10 +198,12 @@ struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
+ 	if (netif_is_l3_slave(dev)) {
+ 		struct net_device *master;
+ 
++		rcu_read_lock();
+ 		master = netdev_master_upper_dev_get_rcu(dev);
+ 		if (master && master->l3mdev_ops->l3mdev_l3_out)
+ 			skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
+ 								skb, proto);
++		rcu_read_unlock();
+ 	}
+ 
+ 	return skb;
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 9398c8f4995368..da93873df4dbd7 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -387,7 +387,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
+ #endif
+ }
+ 
+-static inline struct net *read_pnet_rcu(possible_net_t *pnet)
++static inline struct net *read_pnet_rcu(const possible_net_t *pnet)
+ {
+ #ifdef CONFIG_NET_NS
+ 	return rcu_dereference(pnet->net);
+diff --git a/include/net/route.h b/include/net/route.h
+index 1789f1e6640b46..da34b6fa9862dc 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -363,10 +363,15 @@ static inline int inet_iif(const struct sk_buff *skb)
+ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
+ {
+ 	int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+-	struct net *net = dev_net(dst->dev);
+ 
+-	if (hoplimit == 0)
++	if (hoplimit == 0) {
++		const struct net *net;
++
++		rcu_read_lock();
++		net = dev_net_rcu(dst->dev);
+ 		hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
++		rcu_read_unlock();
++	}
+ 	return hoplimit;
+ }
+ 
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index d5e43a1dcff226..47cba116f87b81 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -402,6 +402,9 @@ enum clk_gating_state {
+  * delay_ms
+  * @ungate_work: worker to turn on clocks that will be used in case of
+  * interrupt context
++ * @clk_gating_workq: workqueue for clock gating work.
++ * @lock: serialize access to some struct ufs_clk_gating members. An outer lock
++ * relative to the host lock
+  * @state: the current clocks state
+  * @delay_ms: gating delay in ms
+  * @is_suspended: clk gating is suspended when set to 1 which can be used
+@@ -412,11 +415,14 @@ enum clk_gating_state {
+  * @is_initialized: Indicates whether clock gating is initialized or not
+  * @active_reqs: number of requests that are pending and should be waited for
+  * completion before gating clocks.
+- * @clk_gating_workq: workqueue for clock gating work.
+  */
+ struct ufs_clk_gating {
+ 	struct delayed_work gate_work;
+ 	struct work_struct ungate_work;
++	struct workqueue_struct *clk_gating_workq;
++
++	spinlock_t lock;
++
+ 	enum clk_gating_state state;
+ 	unsigned long delay_ms;
+ 	bool is_suspended;
+@@ -425,7 +431,6 @@ struct ufs_clk_gating {
+ 	bool is_enabled;
+ 	bool is_initialized;
+ 	int active_reqs;
+-	struct workqueue_struct *clk_gating_workq;
+ };
+ 
+ /**
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index eec5eb7de8430e..e1895952066eeb 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -420,6 +420,12 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
+ 	}
+ }
+ 
++static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
++{
++	xa_erase(&ctx->io_bl_xa, bl->bgid);
++	io_put_bl(ctx, bl);
++}
++
+ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
+@@ -717,12 +723,13 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ 		/* if mapped buffer ring OR classic exists, don't allow */
+ 		if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
+ 			return -EEXIST;
+-	} else {
+-		free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+-		if (!bl)
+-			return -ENOMEM;
++		io_destroy_bl(ctx, bl);
+ 	}
+ 
++	free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
++	if (!bl)
++		return -ENOMEM;
++
+ 	if (!(reg.flags & IOU_PBUF_RING_MMAP))
+ 		ret = io_pin_pbuf_ring(&reg, bl);
+ 	else
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index 874f9e2defd583..b2ce4b56100271 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -65,9 +65,6 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
+ 			continue;
+ 
+ 		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
+-			/* ->sqe isn't available if no async data */
+-			if (!req_has_async_data(req))
+-				cmd->sqe = NULL;
+ 			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
+ 						   IO_URING_F_COMPLETE_DEFER);
+ 			ret = true;
+diff --git a/io_uring/waitid.c b/io_uring/waitid.c
+index 6362ec20abc0cf..2f7b5eeab845e9 100644
+--- a/io_uring/waitid.c
++++ b/io_uring/waitid.c
+@@ -118,7 +118,6 @@ static int io_waitid_finish(struct io_kiocb *req, int ret)
+ static void io_waitid_complete(struct io_kiocb *req, int ret)
+ {
+ 	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+-	struct io_tw_state ts = {};
+ 
+ 	/* anyone completing better be holding a reference */
+ 	WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
+@@ -131,7 +130,6 @@ static void io_waitid_complete(struct io_kiocb *req, int ret)
+ 	if (ret < 0)
+ 		req_set_fail(req);
+ 	io_req_set_res(req, ret, 0);
+-	io_req_task_complete(req, &ts);
+ }
+ 
+ static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
+@@ -153,6 +151,7 @@ static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
+ 	list_del_init(&iwa->wo.child_wait.entry);
+ 	spin_unlock_irq(&iw->head->lock);
+ 	io_waitid_complete(req, -ECANCELED);
++	io_req_queue_tw_complete(req, -ECANCELED);
+ 	return true;
+ }
+ 
+@@ -258,6 +257,7 @@ static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts)
+ 	}
+ 
+ 	io_waitid_complete(req, ret);
++	io_req_task_complete(req, ts);
+ }
+ 
+ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index e275eaf2de7f8f..216535e055e112 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -4013,7 +4013,7 @@ static void __cgroup_kill(struct cgroup *cgrp)
+ 	lockdep_assert_held(&cgroup_mutex);
+ 
+ 	spin_lock_irq(&css_set_lock);
+-	set_bit(CGRP_KILL, &cgrp->flags);
++	cgrp->kill_seq++;
+ 	spin_unlock_irq(&css_set_lock);
+ 
+ 	css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it);
+@@ -4029,10 +4029,6 @@ static void __cgroup_kill(struct cgroup *cgrp)
+ 		send_sig(SIGKILL, task, 0);
+ 	}
+ 	css_task_iter_end(&it);
+-
+-	spin_lock_irq(&css_set_lock);
+-	clear_bit(CGRP_KILL, &cgrp->flags);
+-	spin_unlock_irq(&css_set_lock);
+ }
+ 
+ static void cgroup_kill(struct cgroup *cgrp)
+@@ -6489,6 +6485,10 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
+ 	spin_lock_irq(&css_set_lock);
+ 	cset = task_css_set(current);
+ 	get_css_set(cset);
++	if (kargs->cgrp)
++		kargs->kill_seq = kargs->cgrp->kill_seq;
++	else
++		kargs->kill_seq = cset->dfl_cgrp->kill_seq;
+ 	spin_unlock_irq(&css_set_lock);
+ 
+ 	if (!(kargs->flags & CLONE_INTO_CGROUP)) {
+@@ -6672,6 +6672,7 @@ void cgroup_post_fork(struct task_struct *child,
+ 		      struct kernel_clone_args *kargs)
+ 	__releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
+ {
++	unsigned int cgrp_kill_seq = 0;
+ 	unsigned long cgrp_flags = 0;
+ 	bool kill = false;
+ 	struct cgroup_subsys *ss;
+@@ -6685,10 +6686,13 @@ void cgroup_post_fork(struct task_struct *child,
+ 
+ 	/* init tasks are special, only link regular threads */
+ 	if (likely(child->pid)) {
+-		if (kargs->cgrp)
++		if (kargs->cgrp) {
+ 			cgrp_flags = kargs->cgrp->flags;
+-		else
++			cgrp_kill_seq = kargs->cgrp->kill_seq;
++		} else {
+ 			cgrp_flags = cset->dfl_cgrp->flags;
++			cgrp_kill_seq = cset->dfl_cgrp->kill_seq;
++		}
+ 
+ 		WARN_ON_ONCE(!list_empty(&child->cg_list));
+ 		cset->nr_tasks++;
+@@ -6723,7 +6727,7 @@ void cgroup_post_fork(struct task_struct *child,
+ 		 * child down right after we finished preparing it for
+ 		 * userspace.
+ 		 */
+-		kill = test_bit(CGRP_KILL, &cgrp_flags);
++		kill = kargs->kill_seq != cgrp_kill_seq;
+ 	}
+ 
+ 	spin_unlock_irq(&css_set_lock);
+diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
+index a06b452724118a..ce295b73c0a366 100644
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -586,7 +586,6 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
+ 
+ 		cputime->sum_exec_runtime += user;
+ 		cputime->sum_exec_runtime += sys;
+-		cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
+ 
+ #ifdef CONFIG_SCHED_CORE
+ 		bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
+diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
+index db68a964e34e26..c4a3ccf6a8ace4 100644
+--- a/kernel/sched/autogroup.c
++++ b/kernel/sched/autogroup.c
+@@ -150,7 +150,7 @@ void sched_autogroup_exit_task(struct task_struct *p)
+ 	 * see this thread after that: we can no longer use signal->autogroup.
+ 	 * See the PF_EXITING check in task_wants_autogroup().
+ 	 */
+-	sched_move_task(p);
++	sched_move_task(p, true);
+ }
+ 
+ static void
+@@ -182,7 +182,7 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
+ 	 * sched_autogroup_exit_task().
+ 	 */
+ 	for_each_thread(p, t)
+-		sched_move_task(t);
++		sched_move_task(t, true);
+ 
+ 	unlock_task_sighand(p, &flags);
+ 	autogroup_kref_put(prev);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 5d67f41d05d40b..c72356836eb628 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -8953,7 +8953,7 @@ static void sched_change_group(struct task_struct *tsk, struct task_group *group
+  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
+  * its new group.
+  */
+-void sched_move_task(struct task_struct *tsk)
++void sched_move_task(struct task_struct *tsk, bool for_autogroup)
+ {
+ 	int queued, running, queue_flags =
+ 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+@@ -8982,7 +8982,8 @@ void sched_move_task(struct task_struct *tsk)
+ 		put_prev_task(rq, tsk);
+ 
+ 	sched_change_group(tsk, group);
+-	scx_move_task(tsk);
++	if (!for_autogroup)
++		scx_cgroup_move_task(tsk);
+ 
+ 	if (queued)
+ 		enqueue_task(rq, tsk, queue_flags);
+@@ -9083,7 +9084,7 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
+ 	struct cgroup_subsys_state *css;
+ 
+ 	cgroup_taskset_for_each(task, css, tset)
+-		sched_move_task(task);
++		sched_move_task(task, false);
+ 
+ 	scx_cgroup_finish_attach();
+ }
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 4c4681cb9337b4..689f7e8f69f54d 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -2458,6 +2458,9 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ {
+ 	struct rq *src_rq = task_rq(p);
+ 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
++#ifdef CONFIG_SMP
++	struct rq *locked_rq = rq;
++#endif
+ 
+ 	/*
+ 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
+@@ -2494,8 +2497,9 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+ 
+ 	/* switch to @src_rq lock */
+-	if (rq != src_rq) {
+-		raw_spin_rq_unlock(rq);
++	if (locked_rq != src_rq) {
++		raw_spin_rq_unlock(locked_rq);
++		locked_rq = src_rq;
+ 		raw_spin_rq_lock(src_rq);
+ 	}
+ 
+@@ -2513,6 +2517,8 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ 		} else {
+ 			move_remote_task_to_local_dsq(p, enq_flags,
+ 						      src_rq, dst_rq);
++			/* task has been moved to dst_rq, which is now locked */
++			locked_rq = dst_rq;
+ 		}
+ 
+ 		/* if the destination CPU is idle, wake it up */
+@@ -2521,8 +2527,8 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ 	}
+ 
+ 	/* switch back to @rq lock */
+-	if (rq != dst_rq) {
+-		raw_spin_rq_unlock(dst_rq);
++	if (locked_rq != rq) {
++		raw_spin_rq_unlock(locked_rq);
+ 		raw_spin_rq_lock(rq);
+ 	}
+ #else	/* CONFIG_SMP */
+@@ -3441,7 +3447,7 @@ static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
+ 		curr->scx.slice = 0;
+ 		touch_core_sched(rq, curr);
+ 	} else if (SCX_HAS_OP(tick)) {
+-		SCX_CALL_OP(SCX_KF_REST, tick, curr);
++		SCX_CALL_OP_TASK(SCX_KF_REST, tick, curr);
+ 	}
+ 
+ 	if (!curr->scx.slice)
+@@ -3588,7 +3594,7 @@ static void scx_ops_disable_task(struct task_struct *p)
+ 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
+ 
+ 	if (SCX_HAS_OP(disable))
+-		SCX_CALL_OP(SCX_KF_REST, disable, p);
++		SCX_CALL_OP_TASK(SCX_KF_REST, disable, p);
+ 	scx_set_task_state(p, SCX_TASK_READY);
+ }
+ 
+@@ -3617,7 +3623,7 @@ static void scx_ops_exit_task(struct task_struct *p)
+ 	}
+ 
+ 	if (SCX_HAS_OP(exit_task))
+-		SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
++		SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, p, &args);
+ 	scx_set_task_state(p, SCX_TASK_NONE);
+ }
+ 
+@@ -3913,24 +3919,11 @@ int scx_cgroup_can_attach(struct cgroup_taskset *tset)
+ 	return ops_sanitize_err("cgroup_prep_move", ret);
+ }
+ 
+-void scx_move_task(struct task_struct *p)
++void scx_cgroup_move_task(struct task_struct *p)
+ {
+ 	if (!scx_cgroup_enabled)
+ 		return;
+ 
+-	/*
+-	 * We're called from sched_move_task() which handles both cgroup and
+-	 * autogroup moves. Ignore the latter.
+-	 *
+-	 * Also ignore exiting tasks, because in the exit path tasks transition
+-	 * from the autogroup to the root group, so task_group_is_autogroup()
+-	 * alone isn't able to catch exiting autogroup tasks. This is safe for
+-	 * cgroup_move(), because cgroup migrations never happen for PF_EXITING
+-	 * tasks.
+-	 */
+-	if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
+-		return;
+-
+ 	/*
+ 	 * @p must have ops.cgroup_prep_move() called on it and thus
+ 	 * cgrp_moving_from set.
+diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
+index 4d022d17ac7dd6..1079b56b0f7aea 100644
+--- a/kernel/sched/ext.h
++++ b/kernel/sched/ext.h
+@@ -73,7 +73,7 @@ static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
+ int scx_tg_online(struct task_group *tg);
+ void scx_tg_offline(struct task_group *tg);
+ int scx_cgroup_can_attach(struct cgroup_taskset *tset);
+-void scx_move_task(struct task_struct *p);
++void scx_cgroup_move_task(struct task_struct *p);
+ void scx_cgroup_finish_attach(void);
+ void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
+ void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
+@@ -82,7 +82,7 @@ void scx_group_set_idle(struct task_group *tg, bool idle);
+ static inline int scx_tg_online(struct task_group *tg) { return 0; }
+ static inline void scx_tg_offline(struct task_group *tg) {}
+ static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
+-static inline void scx_move_task(struct task_struct *p) {}
++static inline void scx_cgroup_move_task(struct task_struct *p) {}
+ static inline void scx_cgroup_finish_attach(void) {}
+ static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
+ static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 5426969cf478a0..d79de755c1c269 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -572,7 +572,7 @@ extern void sched_online_group(struct task_group *tg,
+ extern void sched_destroy_group(struct task_group *tg);
+ extern void sched_release_group(struct task_group *tg);
+ 
+-extern void sched_move_task(struct task_struct *tsk);
++extern void sched_move_task(struct task_struct *tsk, bool for_autogroup);
+ 
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 8a40a616288b81..58fb7280cabbe6 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -365,16 +365,18 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ 	cpumask_clear(&cpus_ahead);
+ 	cpumask_clear(&cpus_behind);
+ 	cpus_read_lock();
+-	preempt_disable();
++	migrate_disable();
+ 	clocksource_verify_choose_cpus();
+ 	if (cpumask_empty(&cpus_chosen)) {
+-		preempt_enable();
++		migrate_enable();
+ 		cpus_read_unlock();
+ 		pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
+ 		return;
+ 	}
+ 	testcpu = smp_processor_id();
+-	pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
++	pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
++		cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
++	preempt_disable();
+ 	for_each_cpu(cpu, &cpus_chosen) {
+ 		if (cpu == testcpu)
+ 			continue;
+@@ -394,6 +396,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ 			cs_nsec_min = cs_nsec;
+ 	}
+ 	preempt_enable();
++	migrate_enable();
+ 	cpus_read_unlock();
+ 	if (!cpumask_empty(&cpus_ahead))
+ 		pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 0f8f3ffc6f0904..ea8ad5480e286d 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1672,7 +1672,8 @@ static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
+  * must be the same.
+  */
+ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+-			  struct trace_buffer *buffer, int nr_pages)
++			  struct trace_buffer *buffer, int nr_pages,
++			  unsigned long *subbuf_mask)
+ {
+ 	int subbuf_size = PAGE_SIZE;
+ 	struct buffer_data_page *subbuf;
+@@ -1680,6 +1681,9 @@ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+ 	unsigned long buffers_end;
+ 	int i;
+ 
++	if (!subbuf_mask)
++		return false;
++
+ 	/* Check the meta magic and meta struct size */
+ 	if (meta->magic != RING_BUFFER_META_MAGIC ||
+ 	    meta->struct_size != sizeof(*meta)) {
+@@ -1712,6 +1716,8 @@ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+ 
+ 	subbuf = rb_subbufs_from_meta(meta);
+ 
++	bitmap_clear(subbuf_mask, 0, meta->nr_subbufs);
++
+ 	/* Is the meta buffers and the subbufs themselves have correct data? */
+ 	for (i = 0; i < meta->nr_subbufs; i++) {
+ 		if (meta->buffers[i] < 0 ||
+@@ -1725,6 +1731,12 @@ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+ 			return false;
+ 		}
+ 
++		if (test_bit(meta->buffers[i], subbuf_mask)) {
++			pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu);
++			return false;
++		}
++
++		set_bit(meta->buffers[i], subbuf_mask);
+ 		subbuf = (void *)subbuf + subbuf_size;
+ 	}
+ 
+@@ -1838,6 +1850,11 @@ static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
+ 				cpu_buffer->cpu);
+ 			goto invalid;
+ 		}
++
++		/* If the buffer has content, update pages_touched */
++		if (ret)
++			local_inc(&cpu_buffer->pages_touched);
++
+ 		entries += ret;
+ 		entry_bytes += local_read(&head_page->page->commit);
+ 		local_set(&cpu_buffer->head_page->entries, ret);
+@@ -1889,17 +1906,22 @@ static void rb_meta_init_text_addr(struct ring_buffer_meta *meta)
+ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
+ {
+ 	struct ring_buffer_meta *meta;
++	unsigned long *subbuf_mask;
+ 	unsigned long delta;
+ 	void *subbuf;
+ 	int cpu;
+ 	int i;
+ 
++	/* Create a mask to test the subbuf array */
++	subbuf_mask = bitmap_alloc(nr_pages + 1, GFP_KERNEL);
++	/* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
++
+ 	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ 		void *next_meta;
+ 
+ 		meta = rb_range_meta(buffer, nr_pages, cpu);
+ 
+-		if (rb_meta_valid(meta, cpu, buffer, nr_pages)) {
++		if (rb_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) {
+ 			/* Make the mappings match the current address */
+ 			subbuf = rb_subbufs_from_meta(meta);
+ 			delta = (unsigned long)subbuf - meta->first_buffer;
+@@ -1943,6 +1965,7 @@ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
+ 			subbuf += meta->subbuf_size;
+ 		}
+ 	}
++	bitmap_free(subbuf_mask);
+ }
+ 
+ static void *rbm_start(struct seq_file *m, loff_t *pos)
+@@ -7157,6 +7180,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
+ 		kfree(cpu_buffer->subbuf_ids);
+ 		cpu_buffer->subbuf_ids = NULL;
+ 		rb_free_meta_page(cpu_buffer);
++		atomic_dec(&cpu_buffer->resize_disabled);
+ 	}
+ 
+ unlock:
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index b04990385a6a87..bfc4ac265c2c33 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -8364,6 +8364,10 @@ static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	struct trace_iterator *iter = &info->iter;
+ 	int ret = 0;
+ 
++	/* Currently the boot mapped buffer is not supported for mmap */
++	if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
++		return -ENODEV;
++
+ 	ret = get_snapshot_map(iter->tr);
+ 	if (ret)
+ 		return ret;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index cee65cb4310816..a9d64e08dffc7c 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3509,12 +3509,6 @@ static int rescuer_thread(void *__rescuer)
+ 			}
+ 		}
+ 
+-		/*
+-		 * Put the reference grabbed by send_mayday().  @pool won't
+-		 * go away while we're still attached to it.
+-		 */
+-		put_pwq(pwq);
+-
+ 		/*
+ 		 * Leave this pool. Notify regular workers; otherwise, we end up
+ 		 * with 0 concurrency and stalling the execution.
+@@ -3525,6 +3519,12 @@ static int rescuer_thread(void *__rescuer)
+ 
+ 		worker_detach_from_pool(rescuer);
+ 
++		/*
++		 * Put the reference grabbed by send_mayday().  @pool might
++		 * go away any time after it.
++		 */
++		put_pwq_unlocked(pwq);
++
+ 		raw_spin_lock_irq(&wq_mayday_lock);
+ 	}
+ 
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index aa6c714892ec9d..9f3b8b682adb29 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -685,6 +685,15 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
++		if (ax25->ax25_dev) {
++			if (dev == ax25->ax25_dev->dev) {
++				rcu_read_unlock();
++				break;
++			}
++			netdev_put(ax25->ax25_dev->dev, &ax25->dev_tracker);
++			ax25_dev_put(ax25->ax25_dev);
++		}
++
+ 		ax25->ax25_dev = ax25_dev_ax25dev(dev);
+ 		if (!ax25->ax25_dev) {
+ 			rcu_read_unlock();
+@@ -692,6 +701,8 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 		ax25_fillin_cb(ax25, ax25->ax25_dev);
++		netdev_hold(dev, &ax25->dev_tracker, GFP_ATOMIC);
++		ax25_dev_hold(ax25->ax25_dev);
+ 		rcu_read_unlock();
+ 		break;
+ 
+diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
+index ac11f1f08db0f9..d35479c465e2c4 100644
+--- a/net/batman-adv/bat_v.c
++++ b/net/batman-adv/bat_v.c
+@@ -113,8 +113,6 @@ static void
+ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
+ {
+ 	ewma_throughput_init(&hardif_neigh->bat_v.throughput);
+-	INIT_WORK(&hardif_neigh->bat_v.metric_work,
+-		  batadv_v_elp_throughput_metric_update);
+ }
+ 
+ /**
+diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
+index 1d704574e6bf54..b065578b4436ee 100644
+--- a/net/batman-adv/bat_v_elp.c
++++ b/net/batman-adv/bat_v_elp.c
+@@ -18,6 +18,7 @@
+ #include <linux/if_ether.h>
+ #include <linux/jiffies.h>
+ #include <linux/kref.h>
++#include <linux/list.h>
+ #include <linux/minmax.h>
+ #include <linux/netdevice.h>
+ #include <linux/nl80211.h>
+@@ -26,6 +27,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/skbuff.h>
++#include <linux/slab.h>
+ #include <linux/stddef.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+@@ -41,6 +43,18 @@
+ #include "routing.h"
+ #include "send.h"
+ 
++/**
++ * struct batadv_v_metric_queue_entry - list of hardif neighbors which require
++ *  and metric update
++ */
++struct batadv_v_metric_queue_entry {
++	/** @hardif_neigh: hardif neighbor scheduled for metric update */
++	struct batadv_hardif_neigh_node *hardif_neigh;
++
++	/** @list: list node for metric_queue */
++	struct list_head list;
++};
++
+ /**
+  * batadv_v_elp_start_timer() - restart timer for ELP periodic work
+  * @hard_iface: the interface for which the timer has to be reset
+@@ -59,25 +73,36 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
+ /**
+  * batadv_v_elp_get_throughput() - get the throughput towards a neighbour
+  * @neigh: the neighbour for which the throughput has to be obtained
++ * @pthroughput: calculated throughput towards the given neighbour in multiples
++ *  of 100kpbs (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
+  *
+- * Return: The throughput towards the given neighbour in multiples of 100kpbs
+- *         (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
++ * Return: true when value behind @pthroughput was set
+  */
+-static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
++static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh,
++					u32 *pthroughput)
+ {
+ 	struct batadv_hard_iface *hard_iface = neigh->if_incoming;
++	struct net_device *soft_iface = hard_iface->soft_iface;
+ 	struct ethtool_link_ksettings link_settings;
+ 	struct net_device *real_netdev;
+ 	struct station_info sinfo;
+ 	u32 throughput;
+ 	int ret;
+ 
++	/* don't query throughput when no longer associated with any
++	 * batman-adv interface
++	 */
++	if (!soft_iface)
++		return false;
++
+ 	/* if the user specified a customised value for this interface, then
+ 	 * return it directly
+ 	 */
+ 	throughput =  atomic_read(&hard_iface->bat_v.throughput_override);
+-	if (throughput != 0)
+-		return throughput;
++	if (throughput != 0) {
++		*pthroughput = throughput;
++		return true;
++	}
+ 
+ 	/* if this is a wireless device, then ask its throughput through
+ 	 * cfg80211 API
+@@ -104,27 +129,39 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+ 			 * possible to delete this neighbor. For now set
+ 			 * the throughput metric to 0.
+ 			 */
+-			return 0;
++			*pthroughput = 0;
++			return true;
+ 		}
+ 		if (ret)
+ 			goto default_throughput;
+ 
+-		if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT))
+-			return sinfo.expected_throughput / 100;
++		if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) {
++			*pthroughput = sinfo.expected_throughput / 100;
++			return true;
++		}
+ 
+ 		/* try to estimate the expected throughput based on reported tx
+ 		 * rates
+ 		 */
+-		if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
+-			return cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
++		if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)) {
++			*pthroughput = cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
++			return true;
++		}
+ 
+ 		goto default_throughput;
+ 	}
+ 
++	/* only use rtnl_trylock because the elp worker will be cancelled while
++	 * the rntl_lock is held. the cancel_delayed_work_sync() would otherwise
++	 * wait forever when the elp work_item was started and it is then also
++	 * trying to rtnl_lock
++	 */
++	if (!rtnl_trylock())
++		return false;
++
+ 	/* if not a wifi interface, check if this device provides data via
+ 	 * ethtool (e.g. an Ethernet adapter)
+ 	 */
+-	rtnl_lock();
+ 	ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
+ 	rtnl_unlock();
+ 	if (ret == 0) {
+@@ -135,13 +172,15 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+ 			hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
+ 
+ 		throughput = link_settings.base.speed;
+-		if (throughput && throughput != SPEED_UNKNOWN)
+-			return throughput * 10;
++		if (throughput && throughput != SPEED_UNKNOWN) {
++			*pthroughput = throughput * 10;
++			return true;
++		}
+ 	}
+ 
+ default_throughput:
+ 	if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
+-		batadv_info(hard_iface->soft_iface,
++		batadv_info(soft_iface,
+ 			    "WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n",
+ 			    hard_iface->net_dev->name,
+ 			    BATADV_THROUGHPUT_DEFAULT_VALUE / 10,
+@@ -150,31 +189,26 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+ 	}
+ 
+ 	/* if none of the above cases apply, return the base_throughput */
+-	return BATADV_THROUGHPUT_DEFAULT_VALUE;
++	*pthroughput = BATADV_THROUGHPUT_DEFAULT_VALUE;
++	return true;
+ }
+ 
+ /**
+  * batadv_v_elp_throughput_metric_update() - worker updating the throughput
+  *  metric of a single hop neighbour
+- * @work: the work queue item
++ * @neigh: the neighbour to probe
+  */
+-void batadv_v_elp_throughput_metric_update(struct work_struct *work)
++static void
++batadv_v_elp_throughput_metric_update(struct batadv_hardif_neigh_node *neigh)
+ {
+-	struct batadv_hardif_neigh_node_bat_v *neigh_bat_v;
+-	struct batadv_hardif_neigh_node *neigh;
+-
+-	neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v,
+-				   metric_work);
+-	neigh = container_of(neigh_bat_v, struct batadv_hardif_neigh_node,
+-			     bat_v);
++	u32 throughput;
++	bool valid;
+ 
+-	ewma_throughput_add(&neigh->bat_v.throughput,
+-			    batadv_v_elp_get_throughput(neigh));
++	valid = batadv_v_elp_get_throughput(neigh, &throughput);
++	if (!valid)
++		return;
+ 
+-	/* decrement refcounter to balance increment performed before scheduling
+-	 * this task
+-	 */
+-	batadv_hardif_neigh_put(neigh);
++	ewma_throughput_add(&neigh->bat_v.throughput, throughput);
+ }
+ 
+ /**
+@@ -248,14 +282,16 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
+  */
+ static void batadv_v_elp_periodic_work(struct work_struct *work)
+ {
++	struct batadv_v_metric_queue_entry *metric_entry;
++	struct batadv_v_metric_queue_entry *metric_safe;
+ 	struct batadv_hardif_neigh_node *hardif_neigh;
+ 	struct batadv_hard_iface *hard_iface;
+ 	struct batadv_hard_iface_bat_v *bat_v;
+ 	struct batadv_elp_packet *elp_packet;
++	struct list_head metric_queue;
+ 	struct batadv_priv *bat_priv;
+ 	struct sk_buff *skb;
+ 	u32 elp_interval;
+-	bool ret;
+ 
+ 	bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
+ 	hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
+@@ -291,6 +327,8 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
+ 
+ 	atomic_inc(&hard_iface->bat_v.elp_seqno);
+ 
++	INIT_LIST_HEAD(&metric_queue);
++
+ 	/* The throughput metric is updated on each sent packet. This way, if a
+ 	 * node is dead and no longer sends packets, batman-adv is still able to
+ 	 * react timely to its death.
+@@ -315,16 +353,28 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
+ 
+ 		/* Reading the estimated throughput from cfg80211 is a task that
+ 		 * may sleep and that is not allowed in an rcu protected
+-		 * context. Therefore schedule a task for that.
++		 * context. Therefore add it to metric_queue and process it
++		 * outside rcu protected context.
+ 		 */
+-		ret = queue_work(batadv_event_workqueue,
+-				 &hardif_neigh->bat_v.metric_work);
+-
+-		if (!ret)
++		metric_entry = kzalloc(sizeof(*metric_entry), GFP_ATOMIC);
++		if (!metric_entry) {
+ 			batadv_hardif_neigh_put(hardif_neigh);
++			continue;
++		}
++
++		metric_entry->hardif_neigh = hardif_neigh;
++		list_add(&metric_entry->list, &metric_queue);
+ 	}
+ 	rcu_read_unlock();
+ 
++	list_for_each_entry_safe(metric_entry, metric_safe, &metric_queue, list) {
++		batadv_v_elp_throughput_metric_update(metric_entry->hardif_neigh);
++
++		batadv_hardif_neigh_put(metric_entry->hardif_neigh);
++		list_del(&metric_entry->list);
++		kfree(metric_entry);
++	}
++
+ restart_timer:
+ 	batadv_v_elp_start_timer(hard_iface);
+ out:
+diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
+index 9e2740195fa2d4..c9cb0a30710045 100644
+--- a/net/batman-adv/bat_v_elp.h
++++ b/net/batman-adv/bat_v_elp.h
+@@ -10,7 +10,6 @@
+ #include "main.h"
+ 
+ #include <linux/skbuff.h>
+-#include <linux/workqueue.h>
+ 
+ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface);
+ void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface);
+@@ -19,6 +18,5 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
+ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface);
+ int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ 			     struct batadv_hard_iface *if_incoming);
+-void batadv_v_elp_throughput_metric_update(struct work_struct *work);
+ 
+ #endif /* _NET_BATMAN_ADV_BAT_V_ELP_H_ */
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index 04f6398b3a40e8..85a50096f5b24d 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -596,9 +596,6 @@ struct batadv_hardif_neigh_node_bat_v {
+ 	 *  neighbor
+ 	 */
+ 	unsigned long last_unicast_tx;
+-
+-	/** @metric_work: work queue callback item for metric update */
+-	struct work_struct metric_work;
+ };
+ 
+ /**
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 305dd72c844c70..17226b2341d03d 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -1132,7 +1132,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv,  struct sock *sk,
+ 
+ 	todo_size = size;
+ 
+-	while (todo_size) {
++	do {
+ 		struct j1939_sk_buff_cb *skcb;
+ 
+ 		segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
+@@ -1177,7 +1177,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv,  struct sock *sk,
+ 
+ 		todo_size -= segment_size;
+ 		session->total_queued_size += segment_size;
+-	}
++	} while (todo_size);
+ 
+ 	switch (ret) {
+ 	case 0: /* OK */
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index 95f7a7e65a73fa..9b72d118d756dd 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -382,8 +382,9 @@ sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
+ 	skb_queue_walk(&session->skb_queue, do_skb) {
+ 		do_skcb = j1939_skb_to_cb(do_skb);
+ 
+-		if (offset_start >= do_skcb->offset &&
+-		    offset_start < (do_skcb->offset + do_skb->len)) {
++		if ((offset_start >= do_skcb->offset &&
++		     offset_start < (do_skcb->offset + do_skb->len)) ||
++		     (offset_start == 0 && do_skcb->offset == 0 && do_skb->len == 0)) {
+ 			skb = do_skb;
+ 		}
+ 	}
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 154a2681f55cc6..388ff1d6d86b7a 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -37,8 +37,8 @@ static const struct fib_kuid_range fib_kuid_range_unset = {
+ 
+ bool fib_rule_matchall(const struct fib_rule *rule)
+ {
+-	if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
+-	    rule->flags)
++	if (READ_ONCE(rule->iifindex) || READ_ONCE(rule->oifindex) ||
++	    rule->mark || rule->tun_id || rule->flags)
+ 		return false;
+ 	if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
+ 		return false;
+@@ -260,12 +260,14 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
+ 			  struct flowi *fl, int flags,
+ 			  struct fib_lookup_arg *arg)
+ {
+-	int ret = 0;
++	int iifindex, oifindex, ret = 0;
+ 
+-	if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
++	iifindex = READ_ONCE(rule->iifindex);
++	if (iifindex && (iifindex != fl->flowi_iif))
+ 		goto out;
+ 
+-	if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
++	oifindex = READ_ONCE(rule->oifindex);
++	if (oifindex && (oifindex != fl->flowi_oif))
+ 		goto out;
+ 
+ 	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
+@@ -1038,14 +1040,14 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
+ 	if (rule->iifname[0]) {
+ 		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
+ 			goto nla_put_failure;
+-		if (rule->iifindex == -1)
++		if (READ_ONCE(rule->iifindex) == -1)
+ 			frh->flags |= FIB_RULE_IIF_DETACHED;
+ 	}
+ 
+ 	if (rule->oifname[0]) {
+ 		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
+ 			goto nla_put_failure;
+-		if (rule->oifindex == -1)
++		if (READ_ONCE(rule->oifindex) == -1)
+ 			frh->flags |= FIB_RULE_OIF_DETACHED;
+ 	}
+ 
+@@ -1217,10 +1219,10 @@ static void attach_rules(struct list_head *rules, struct net_device *dev)
+ 	list_for_each_entry(rule, rules, list) {
+ 		if (rule->iifindex == -1 &&
+ 		    strcmp(dev->name, rule->iifname) == 0)
+-			rule->iifindex = dev->ifindex;
++			WRITE_ONCE(rule->iifindex, dev->ifindex);
+ 		if (rule->oifindex == -1 &&
+ 		    strcmp(dev->name, rule->oifname) == 0)
+-			rule->oifindex = dev->ifindex;
++			WRITE_ONCE(rule->oifindex, dev->ifindex);
+ 	}
+ }
+ 
+@@ -1230,9 +1232,9 @@ static void detach_rules(struct list_head *rules, struct net_device *dev)
+ 
+ 	list_for_each_entry(rule, rules, list) {
+ 		if (rule->iifindex == dev->ifindex)
+-			rule->iifindex = -1;
++			WRITE_ONCE(rule->iifindex, -1);
+ 		if (rule->oifindex == dev->ifindex)
+-			rule->oifindex = -1;
++			WRITE_ONCE(rule->oifindex, -1);
+ 	}
+ }
+ 
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 0e638a37aa0961..5db41bf2ed93e0 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1108,10 +1108,12 @@ bool __skb_flow_dissect(const struct net *net,
+ 					      FLOW_DISSECTOR_KEY_BASIC,
+ 					      target_container);
+ 
++	rcu_read_lock();
++
+ 	if (skb) {
+ 		if (!net) {
+ 			if (skb->dev)
+-				net = dev_net(skb->dev);
++				net = dev_net_rcu(skb->dev);
+ 			else if (skb->sk)
+ 				net = sock_net(skb->sk);
+ 		}
+@@ -1122,7 +1124,6 @@ bool __skb_flow_dissect(const struct net *net,
+ 		enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+ 		struct bpf_prog_array *run_array;
+ 
+-		rcu_read_lock();
+ 		run_array = rcu_dereference(init_net.bpf.run_array[type]);
+ 		if (!run_array)
+ 			run_array = rcu_dereference(net->bpf.run_array[type]);
+@@ -1150,17 +1151,17 @@ bool __skb_flow_dissect(const struct net *net,
+ 			prog = READ_ONCE(run_array->items[0].prog);
+ 			result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
+ 						  hlen, flags);
+-			if (result == BPF_FLOW_DISSECTOR_CONTINUE)
+-				goto dissect_continue;
+-			__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
+-						 target_container);
+-			rcu_read_unlock();
+-			return result == BPF_OK;
++			if (result != BPF_FLOW_DISSECTOR_CONTINUE) {
++				__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
++							 target_container);
++				rcu_read_unlock();
++				return result == BPF_OK;
++			}
+ 		}
+-dissect_continue:
+-		rcu_read_unlock();
+ 	}
+ 
++	rcu_read_unlock();
++
+ 	if (dissector_uses_key(flow_dissector,
+ 			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ 		struct ethhdr *eth = eth_hdr(skb);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index cc58315a40a79c..c7f7ea61b524a2 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -3513,10 +3513,12 @@ static const struct seq_operations neigh_stat_seq_ops = {
+ static void __neigh_notify(struct neighbour *n, int type, int flags,
+ 			   u32 pid)
+ {
+-	struct net *net = dev_net(n->dev);
+ 	struct sk_buff *skb;
+ 	int err = -ENOBUFS;
++	struct net *net;
+ 
++	rcu_read_lock();
++	net = dev_net_rcu(n->dev);
+ 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
+ 	if (skb == NULL)
+ 		goto errout;
+@@ -3529,9 +3531,11 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
+ 		goto errout;
+ 	}
+ 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+-	return;
++	goto out;
+ errout:
+ 	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
++out:
++	rcu_read_unlock();
+ }
+ 
+ void neigh_app_ns(struct neighbour *n)
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 11c1519b36993d..59ffaa89d7b05f 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -659,10 +659,12 @@ static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+  */
+ void arp_xmit(struct sk_buff *skb)
+ {
++	rcu_read_lock();
+ 	/* Send it off, maybe filter it using firewalling first.  */
+ 	NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
+-		dev_net(skb->dev), NULL, skb, NULL, skb->dev,
++		dev_net_rcu(skb->dev), NULL, skb, NULL, skb->dev,
+ 		arp_xmit_finish);
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(arp_xmit);
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 7cf5f7d0d0de23..a55e95046984da 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1351,10 +1351,11 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
+ 	__be32 addr = 0;
+ 	unsigned char localnet_scope = RT_SCOPE_HOST;
+ 	struct in_device *in_dev;
+-	struct net *net = dev_net(dev);
++	struct net *net;
+ 	int master_idx;
+ 
+ 	rcu_read_lock();
++	net = dev_net_rcu(dev);
+ 	in_dev = __in_dev_get_rcu(dev);
+ 	if (!in_dev)
+ 		goto no_in_dev;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 932bd775fc2682..f45bc187a92a7e 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -399,10 +399,10 @@ static void icmp_push_reply(struct sock *sk,
+ 
+ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
+ {
+-	struct ipcm_cookie ipc;
+ 	struct rtable *rt = skb_rtable(skb);
+-	struct net *net = dev_net(rt->dst.dev);
++	struct net *net = dev_net_rcu(rt->dst.dev);
+ 	bool apply_ratelimit = false;
++	struct ipcm_cookie ipc;
+ 	struct flowi4 fl4;
+ 	struct sock *sk;
+ 	struct inet_sock *inet;
+@@ -610,12 +610,14 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 	struct sock *sk;
+ 
+ 	if (!rt)
+-		goto out;
++		return;
++
++	rcu_read_lock();
+ 
+ 	if (rt->dst.dev)
+-		net = dev_net(rt->dst.dev);
++		net = dev_net_rcu(rt->dst.dev);
+ 	else if (skb_in->dev)
+-		net = dev_net(skb_in->dev);
++		net = dev_net_rcu(skb_in->dev);
+ 	else
+ 		goto out;
+ 
+@@ -786,7 +788,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 	icmp_xmit_unlock(sk);
+ out_bh_enable:
+ 	local_bh_enable();
+-out:;
++out:
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(__icmp_send);
+ 
+@@ -835,7 +838,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+ 	 * avoid additional coding at protocol handlers.
+ 	 */
+ 	if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
+-		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
++		__ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
+ 		return;
+ 	}
+ 
+@@ -869,7 +872,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
+ 	struct net *net;
+ 	u32 info = 0;
+ 
+-	net = dev_net(skb_dst(skb)->dev);
++	net = dev_net_rcu(skb_dst(skb)->dev);
+ 
+ 	/*
+ 	 *	Incomplete header ?
+@@ -980,7 +983,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
+ static enum skb_drop_reason icmp_redirect(struct sk_buff *skb)
+ {
+ 	if (skb->len < sizeof(struct iphdr)) {
+-		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
++		__ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
+ 		return SKB_DROP_REASON_PKT_TOO_SMALL;
+ 	}
+ 
+@@ -1012,7 +1015,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
+ 	struct icmp_bxm icmp_param;
+ 	struct net *net;
+ 
+-	net = dev_net(skb_dst(skb)->dev);
++	net = dev_net_rcu(skb_dst(skb)->dev);
+ 	/* should there be an ICMP stat for ignored echos? */
+ 	if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
+ 		return SKB_NOT_DROPPED_YET;
+@@ -1041,9 +1044,9 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
+ 
+ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
+ {
++	struct net *net = dev_net_rcu(skb->dev);
+ 	struct icmp_ext_hdr *ext_hdr, _ext_hdr;
+ 	struct icmp_ext_echo_iio *iio, _iio;
+-	struct net *net = dev_net(skb->dev);
+ 	struct inet6_dev *in6_dev;
+ 	struct in_device *in_dev;
+ 	struct net_device *dev;
+@@ -1182,7 +1185,7 @@ static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
+ 	return SKB_NOT_DROPPED_YET;
+ 
+ out_err:
+-	__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
++	__ICMP_INC_STATS(dev_net_rcu(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
+ 	return SKB_DROP_REASON_PKT_TOO_SMALL;
+ }
+ 
+@@ -1199,7 +1202,7 @@ int icmp_rcv(struct sk_buff *skb)
+ {
+ 	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ 	struct rtable *rt = skb_rtable(skb);
+-	struct net *net = dev_net(rt->dst.dev);
++	struct net *net = dev_net_rcu(rt->dst.dev);
+ 	struct icmphdr *icmph;
+ 
+ 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+@@ -1372,9 +1375,9 @@ int icmp_err(struct sk_buff *skb, u32 info)
+ 	struct iphdr *iph = (struct iphdr *)skb->data;
+ 	int offset = iph->ihl<<2;
+ 	struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	int type = icmp_hdr(skb)->type;
+ 	int code = icmp_hdr(skb)->code;
+-	struct net *net = dev_net(skb->dev);
+ 
+ 	/*
+ 	 * Use ping_err to handle all icmp errors except those
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 2a27913588d05a..41b320f0c20ebf 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -390,7 +390,13 @@ static inline int ip_rt_proc_init(void)
+ 
+ static inline bool rt_is_expired(const struct rtable *rth)
+ {
+-	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
++	bool res;
++
++	rcu_read_lock();
++	res = rth->rt_genid != rt_genid_ipv4(dev_net_rcu(rth->dst.dev));
++	rcu_read_unlock();
++
++	return res;
+ }
+ 
+ void rt_cache_flush(struct net *net)
+@@ -1002,9 +1008,9 @@ out:	kfree_skb_reason(skb, reason);
+ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ {
+ 	struct dst_entry *dst = &rt->dst;
+-	struct net *net = dev_net(dst->dev);
+ 	struct fib_result res;
+ 	bool lock = false;
++	struct net *net;
+ 	u32 old_mtu;
+ 
+ 	if (ip_mtu_locked(dst))
+@@ -1014,6 +1020,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ 	if (old_mtu < mtu)
+ 		return;
+ 
++	rcu_read_lock();
++	net = dev_net_rcu(dst->dev);
+ 	if (mtu < net->ipv4.ip_rt_min_pmtu) {
+ 		lock = true;
+ 		mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
+@@ -1021,17 +1029,29 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ 
+ 	if (rt->rt_pmtu == mtu && !lock &&
+ 	    time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
+-		return;
++		goto out;
+ 
+-	rcu_read_lock();
+ 	if (fib_lookup(net, fl4, &res, 0) == 0) {
+ 		struct fib_nh_common *nhc;
+ 
+ 		fib_select_path(net, &res, fl4, NULL);
++#ifdef CONFIG_IP_ROUTE_MULTIPATH
++		if (fib_info_num_path(res.fi) > 1) {
++			int nhsel;
++
++			for (nhsel = 0; nhsel < fib_info_num_path(res.fi); nhsel++) {
++				nhc = fib_info_nhc(res.fi, nhsel);
++				update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
++						      jiffies + net->ipv4.ip_rt_mtu_expires);
++			}
++			goto out;
++		}
++#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+ 		nhc = FIB_RES_NHC(res);
+ 		update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
+ 				      jiffies + net->ipv4.ip_rt_mtu_expires);
+ 	}
++out:
+ 	rcu_read_unlock();
+ }
+ 
+@@ -1294,10 +1314,15 @@ static void set_class_tag(struct rtable *rt, u32 tag)
+ 
+ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
+ {
+-	struct net *net = dev_net(dst->dev);
+ 	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
+-	unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
+-				    net->ipv4.ip_rt_min_advmss);
++	unsigned int advmss;
++	struct net *net;
++
++	rcu_read_lock();
++	net = dev_net_rcu(dst->dev);
++	advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
++				   net->ipv4.ip_rt_min_advmss);
++	rcu_read_unlock();
+ 
+ 	return min(advmss, IPV4_MAX_PMTU - header_size);
+ }
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index a6984a29fdb9dd..4d14ab7f7e99f1 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -76,7 +76,7 @@ static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ {
+ 	/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
+ 	struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 
+ 	if (type == ICMPV6_PKT_TOOBIG)
+ 		ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL));
+@@ -473,7 +473,10 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 
+ 	if (!skb->dev)
+ 		return;
+-	net = dev_net(skb->dev);
++
++	rcu_read_lock();
++
++	net = dev_net_rcu(skb->dev);
+ 	mark = IP6_REPLY_MARK(net, skb->mark);
+ 	/*
+ 	 *	Make sure we respect the rules
+@@ -496,7 +499,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 		    !(type == ICMPV6_PARAMPROB &&
+ 		      code == ICMPV6_UNK_OPTION &&
+ 		      (opt_unrec(skb, info))))
+-			return;
++			goto out;
+ 
+ 		saddr = NULL;
+ 	}
+@@ -526,7 +529,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
+ 		net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
+ 				    &hdr->saddr, &hdr->daddr);
+-		return;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -535,7 +538,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 	if (is_ineligible(skb)) {
+ 		net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
+ 				    &hdr->saddr, &hdr->daddr);
+-		return;
++		goto out;
+ 	}
+ 
+ 	/* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */
+@@ -582,7 +585,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 	np = inet6_sk(sk);
+ 
+ 	if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit))
+-		goto out;
++		goto out_unlock;
+ 
+ 	tmp_hdr.icmp6_type = type;
+ 	tmp_hdr.icmp6_code = code;
+@@ -600,7 +603,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 
+ 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
+ 	if (IS_ERR(dst))
+-		goto out;
++		goto out_unlock;
+ 
+ 	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+ 
+@@ -616,7 +619,6 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 		goto out_dst_release;
+ 	}
+ 
+-	rcu_read_lock();
+ 	idev = __in6_dev_get(skb->dev);
+ 
+ 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
+@@ -630,13 +632,15 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 		icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+ 					   len + sizeof(struct icmp6hdr));
+ 	}
+-	rcu_read_unlock();
++
+ out_dst_release:
+ 	dst_release(dst);
+-out:
++out_unlock:
+ 	icmpv6_xmit_unlock(sk);
+ out_bh_enable:
+ 	local_bh_enable();
++out:
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(icmp6_send);
+ 
+@@ -679,8 +683,8 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
+ 	skb_pull(skb2, nhs);
+ 	skb_reset_network_header(skb2);
+ 
+-	rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0,
+-			skb, 0);
++	rt = rt6_lookup(dev_net_rcu(skb->dev), &ipv6_hdr(skb2)->saddr,
++			NULL, 0, skb, 0);
+ 
+ 	if (rt && rt->dst.dev)
+ 		skb2->dev = rt->dst.dev;
+@@ -717,7 +721,7 @@ EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach);
+ 
+ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
+ {
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	struct sock *sk;
+ 	struct inet6_dev *idev;
+ 	struct ipv6_pinfo *np;
+@@ -832,7 +836,7 @@ enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
+ 				   u8 code, __be32 info)
+ {
+ 	struct inet6_skb_parm *opt = IP6CB(skb);
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	const struct inet6_protocol *ipprot;
+ 	enum skb_drop_reason reason;
+ 	int inner_offset;
+@@ -889,7 +893,7 @@ enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
+ static int icmpv6_rcv(struct sk_buff *skb)
+ {
+ 	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	struct net_device *dev = icmp6_dev(skb);
+ 	struct inet6_dev *idev = __in6_dev_get(dev);
+ 	const struct in6_addr *saddr, *daddr;
+@@ -921,7 +925,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
+ 		skb_set_network_header(skb, nh);
+ 	}
+ 
+-	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
++	__ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INMSGS);
+ 
+ 	saddr = &ipv6_hdr(skb)->saddr;
+ 	daddr = &ipv6_hdr(skb)->daddr;
+@@ -939,7 +943,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
+ 
+ 	type = hdr->icmp6_type;
+ 
+-	ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
++	ICMP6MSGIN_INC_STATS(dev_net_rcu(dev), idev, type);
+ 
+ 	switch (type) {
+ 	case ICMPV6_ECHO_REQUEST:
+@@ -1034,9 +1038,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
+ 
+ csum_error:
+ 	reason = SKB_DROP_REASON_ICMP_CSUM;
+-	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
++	__ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_CSUMERRORS);
+ discard_it:
+-	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
++	__ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INERRORS);
+ drop_no_count:
+ 	kfree_skb_reason(skb, reason);
+ 	return 0;
+diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
+index beb6b4cfc551cf..4215cebe7d85a9 100644
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -255,14 +255,15 @@ static int ioam6_do_fill(struct net *net, struct sk_buff *skb)
+ }
+ 
+ static int ioam6_do_inline(struct net *net, struct sk_buff *skb,
+-			   struct ioam6_lwt_encap *tuninfo)
++			   struct ioam6_lwt_encap *tuninfo,
++			   struct dst_entry *cache_dst)
+ {
+ 	struct ipv6hdr *oldhdr, *hdr;
+ 	int hdrlen, err;
+ 
+ 	hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
+ 
+-	err = skb_cow_head(skb, hdrlen + skb->mac_len);
++	err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -293,7 +294,8 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
+ 			  struct ioam6_lwt_encap *tuninfo,
+ 			  bool has_tunsrc,
+ 			  struct in6_addr *tunsrc,
+-			  struct in6_addr *tundst)
++			  struct in6_addr *tundst,
++			  struct dst_entry *cache_dst)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct ipv6hdr *hdr, *inner_hdr;
+@@ -302,7 +304,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
+ 	hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
+ 	len = sizeof(*hdr) + hdrlen;
+ 
+-	err = skb_cow_head(skb, len + skb->mac_len);
++	err = skb_cow_head(skb, len + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -336,7 +338,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
+ 
+ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	struct dst_entry *dst = skb_dst(skb);
++	struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL;
+ 	struct in6_addr orig_daddr;
+ 	struct ioam6_lwt *ilwt;
+ 	int err = -EINVAL;
+@@ -354,6 +356,10 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 
+ 	orig_daddr = ipv6_hdr(skb)->daddr;
+ 
++	local_bh_disable();
++	cache_dst = dst_cache_get(&ilwt->cache);
++	local_bh_enable();
++
+ 	switch (ilwt->mode) {
+ 	case IOAM6_IPTUNNEL_MODE_INLINE:
+ do_inline:
+@@ -361,7 +367,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
+ 			goto out;
+ 
+-		err = ioam6_do_inline(net, skb, &ilwt->tuninfo);
++		err = ioam6_do_inline(net, skb, &ilwt->tuninfo, cache_dst);
+ 		if (unlikely(err))
+ 			goto drop;
+ 
+@@ -371,7 +377,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		/* Encapsulation (ip6ip6) */
+ 		err = ioam6_do_encap(net, skb, &ilwt->tuninfo,
+ 				     ilwt->has_tunsrc, &ilwt->tunsrc,
+-				     &ilwt->tundst);
++				     &ilwt->tundst, cache_dst);
+ 		if (unlikely(err))
+ 			goto drop;
+ 
+@@ -389,46 +395,45 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		goto drop;
+ 	}
+ 
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
++	if (unlikely(!cache_dst)) {
++		struct ipv6hdr *hdr = ipv6_hdr(skb);
++		struct flowi6 fl6;
+ 
+-	if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
+-		local_bh_disable();
+-		dst = dst_cache_get(&ilwt->cache);
+-		local_bh_enable();
+-
+-		if (unlikely(!dst)) {
+-			struct ipv6hdr *hdr = ipv6_hdr(skb);
+-			struct flowi6 fl6;
+-
+-			memset(&fl6, 0, sizeof(fl6));
+-			fl6.daddr = hdr->daddr;
+-			fl6.saddr = hdr->saddr;
+-			fl6.flowlabel = ip6_flowinfo(hdr);
+-			fl6.flowi6_mark = skb->mark;
+-			fl6.flowi6_proto = hdr->nexthdr;
+-
+-			dst = ip6_route_output(net, NULL, &fl6);
+-			if (dst->error) {
+-				err = dst->error;
+-				dst_release(dst);
+-				goto drop;
+-			}
++		memset(&fl6, 0, sizeof(fl6));
++		fl6.daddr = hdr->daddr;
++		fl6.saddr = hdr->saddr;
++		fl6.flowlabel = ip6_flowinfo(hdr);
++		fl6.flowi6_mark = skb->mark;
++		fl6.flowi6_proto = hdr->nexthdr;
++
++		cache_dst = ip6_route_output(net, NULL, &fl6);
++		if (cache_dst->error) {
++			err = cache_dst->error;
++			goto drop;
++		}
+ 
++		/* cache only if we don't create a dst reference loop */
++		if (dst->lwtstate != cache_dst->lwtstate) {
+ 			local_bh_disable();
+-			dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
++			dst_cache_set_ip6(&ilwt->cache, cache_dst, &fl6.saddr);
+ 			local_bh_enable();
+ 		}
+ 
+-		skb_dst_drop(skb);
+-		skb_dst_set(skb, dst);
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(cache_dst->dev));
++		if (unlikely(err))
++			goto drop;
++	}
+ 
++	if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
++		skb_dst_drop(skb);
++		skb_dst_set(skb, cache_dst);
+ 		return dst_output(net, sk, skb);
+ 	}
+ out:
++	dst_release(cache_dst);
+ 	return dst->lwtstate->orig_output(net, sk, skb);
+ drop:
++	dst_release(cache_dst);
+ 	kfree_skb(skb);
+ 	return err;
+ }
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index b244dbf61d5f39..b7b62e5a562e5d 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1730,21 +1730,19 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ 	struct net_device *dev = idev->dev;
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
+-	struct net *net = dev_net(dev);
+ 	const struct in6_addr *saddr;
+ 	struct in6_addr addr_buf;
+ 	struct mld2_report *pmr;
+ 	struct sk_buff *skb;
+ 	unsigned int size;
+ 	struct sock *sk;
+-	int err;
++	struct net *net;
+ 
+-	sk = net->ipv6.igmp_sk;
+ 	/* we assume size > sizeof(ra) here
+ 	 * Also try to not allocate high-order pages for big MTU
+ 	 */
+ 	size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
+-	skb = sock_alloc_send_skb(sk, size, 1, &err);
++	skb = alloc_skb(size, GFP_KERNEL);
+ 	if (!skb)
+ 		return NULL;
+ 
+@@ -1752,6 +1750,12 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ 	skb_reserve(skb, hlen);
+ 	skb_tailroom_reserve(skb, mtu, tlen);
+ 
++	rcu_read_lock();
++
++	net = dev_net_rcu(dev);
++	sk = net->ipv6.igmp_sk;
++	skb_set_owner_w(skb, sk);
++
+ 	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
+ 		/* <draft-ietf-magma-mld-source-05.txt>:
+ 		 * use unspecified address as the source address
+@@ -1763,6 +1767,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ 
+ 	ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
+ 
++	rcu_read_unlock();
++
+ 	skb_put_data(skb, ra, sizeof(ra));
+ 
+ 	skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
+@@ -2122,21 +2128,21 @@ static void mld_send_cr(struct inet6_dev *idev)
+ 
+ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
+ {
+-	struct net *net = dev_net(dev);
+-	struct sock *sk = net->ipv6.igmp_sk;
++	const struct in6_addr *snd_addr, *saddr;
++	int err, len, payload_len, full_len;
++	struct in6_addr addr_buf;
+ 	struct inet6_dev *idev;
+ 	struct sk_buff *skb;
+ 	struct mld_msg *hdr;
+-	const struct in6_addr *snd_addr, *saddr;
+-	struct in6_addr addr_buf;
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
+-	int err, len, payload_len, full_len;
+ 	u8 ra[8] = { IPPROTO_ICMPV6, 0,
+ 		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
+ 		     IPV6_TLV_PADN, 0 };
+-	struct flowi6 fl6;
+ 	struct dst_entry *dst;
++	struct flowi6 fl6;
++	struct net *net;
++	struct sock *sk;
+ 
+ 	if (type == ICMPV6_MGM_REDUCTION)
+ 		snd_addr = &in6addr_linklocal_allrouters;
+@@ -2147,19 +2153,21 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
+ 	payload_len = len + sizeof(ra);
+ 	full_len = sizeof(struct ipv6hdr) + payload_len;
+ 
+-	rcu_read_lock();
+-	IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTREQUESTS);
+-	rcu_read_unlock();
++	skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
+ 
+-	skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
++	rcu_read_lock();
+ 
++	net = dev_net_rcu(dev);
++	idev = __in6_dev_get(dev);
++	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
+ 	if (!skb) {
+-		rcu_read_lock();
+-		IP6_INC_STATS(net, __in6_dev_get(dev),
+-			      IPSTATS_MIB_OUTDISCARDS);
++		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ 		rcu_read_unlock();
+ 		return;
+ 	}
++	sk = net->ipv6.igmp_sk;
++	skb_set_owner_w(skb, sk);
++
+ 	skb->priority = TC_PRIO_CONTROL;
+ 	skb_reserve(skb, hlen);
+ 
+@@ -2184,9 +2192,6 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
+ 					 IPPROTO_ICMPV6,
+ 					 csum_partial(hdr, len, 0));
+ 
+-	rcu_read_lock();
+-	idev = __in6_dev_get(skb->dev);
+-
+ 	icmpv6_flow_init(sk, &fl6, type,
+ 			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ 			 skb->dev->ifindex);
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index d044c67019de6d..8699d1a188dc4a 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -418,15 +418,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
+ {
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
+-	struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
+ 	struct sk_buff *skb;
+ 
+ 	skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
+-	if (!skb) {
+-		ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
+-			  __func__);
++	if (!skb)
+ 		return NULL;
+-	}
+ 
+ 	skb->protocol = htons(ETH_P_IPV6);
+ 	skb->dev = dev;
+@@ -437,7 +433,9 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
+ 	/* Manually assign socket ownership as we avoid calling
+ 	 * sock_alloc_send_pskb() to bypass wmem buffer limits
+ 	 */
+-	skb_set_owner_w(skb, sk);
++	rcu_read_lock();
++	skb_set_owner_w(skb, dev_net_rcu(dev)->ipv6.ndisc_sk);
++	rcu_read_unlock();
+ 
+ 	return skb;
+ }
+@@ -473,16 +471,20 @@ static void ip6_nd_hdr(struct sk_buff *skb,
+ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ 		    const struct in6_addr *saddr)
+ {
++	struct icmp6hdr *icmp6h = icmp6_hdr(skb);
+ 	struct dst_entry *dst = skb_dst(skb);
+-	struct net *net = dev_net(skb->dev);
+-	struct sock *sk = net->ipv6.ndisc_sk;
+ 	struct inet6_dev *idev;
++	struct net *net;
++	struct sock *sk;
+ 	int err;
+-	struct icmp6hdr *icmp6h = icmp6_hdr(skb);
+ 	u8 type;
+ 
+ 	type = icmp6h->icmp6_type;
+ 
++	rcu_read_lock();
++
++	net = dev_net_rcu(skb->dev);
++	sk = net->ipv6.ndisc_sk;
+ 	if (!dst) {
+ 		struct flowi6 fl6;
+ 		int oif = skb->dev->ifindex;
+@@ -490,6 +492,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ 		icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
+ 		dst = icmp6_dst_alloc(skb->dev, &fl6);
+ 		if (IS_ERR(dst)) {
++			rcu_read_unlock();
+ 			kfree_skb(skb);
+ 			return;
+ 		}
+@@ -504,7 +507,6 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ 
+ 	ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
+ 
+-	rcu_read_lock();
+ 	idev = __in6_dev_get(dst->dev);
+ 	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
+ 
+@@ -1694,7 +1696,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
+ 	bool ret;
+ 
+ 	if (netif_is_l3_master(skb->dev)) {
+-		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
++		dev = dev_get_by_index_rcu(dev_net(skb->dev), IPCB(skb)->iif);
+ 		if (!dev)
+ 			return;
+ 	}
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 8ebfed5d63232e..2736dea77575b5 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3196,13 +3196,18 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
+ {
+ 	struct net_device *dev = dst->dev;
+ 	unsigned int mtu = dst_mtu(dst);
+-	struct net *net = dev_net(dev);
++	struct net *net;
+ 
+ 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+ 
++	rcu_read_lock();
++
++	net = dev_net_rcu(dev);
+ 	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
+ 		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
+ 
++	rcu_read_unlock();
++
+ 	/*
+ 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
+ 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index db3c19a42e1ca7..0ac4283acdf20c 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -125,7 +125,8 @@ static void rpl_destroy_state(struct lwtunnel_state *lwt)
+ }
+ 
+ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+-			     const struct ipv6_rpl_sr_hdr *srh)
++			     const struct ipv6_rpl_sr_hdr *srh,
++			     struct dst_entry *cache_dst)
+ {
+ 	struct ipv6_rpl_sr_hdr *isrh, *csrh;
+ 	const struct ipv6hdr *oldhdr;
+@@ -153,7 +154,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ 
+ 	hdrlen = ((csrh->hdrlen + 1) << 3);
+ 
+-	err = skb_cow_head(skb, hdrlen + skb->mac_len);
++	err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err)) {
+ 		kfree(buf);
+ 		return err;
+@@ -186,7 +187,8 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ 	return 0;
+ }
+ 
+-static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
++static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt,
++		      struct dst_entry *cache_dst)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct rpl_iptunnel_encap *tinfo;
+@@ -196,7 +198,7 @@ static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
+ 
+ 	tinfo = rpl_encap_lwtunnel(dst->lwtstate);
+ 
+-	return rpl_do_srh_inline(skb, rlwt, tinfo->srh);
++	return rpl_do_srh_inline(skb, rlwt, tinfo->srh, cache_dst);
+ }
+ 
+ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+@@ -208,14 +210,14 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 
+ 	rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+-	err = rpl_do_srh(skb, rlwt);
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
+ 	local_bh_enable();
+ 
++	err = rpl_do_srh(skb, rlwt, dst);
++	if (unlikely(err))
++		goto drop;
++
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *hdr = ipv6_hdr(skb);
+ 		struct flowi6 fl6;
+@@ -230,25 +232,28 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		dst = ip6_route_output(net, NULL, &fl6);
+ 		if (dst->error) {
+ 			err = dst->error;
+-			dst_release(dst);
+ 			goto drop;
+ 		}
+ 
+-		local_bh_disable();
+-		dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
+-		local_bh_enable();
++		/* cache only if we don't create a dst reference loop */
++		if (orig_dst->lwtstate != dst->lwtstate) {
++			local_bh_disable();
++			dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
++			local_bh_enable();
++		}
++
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++		if (unlikely(err))
++			goto drop;
+ 	}
+ 
+ 	skb_dst_drop(skb);
+ 	skb_dst_set(skb, dst);
+ 
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	return dst_output(net, sk, skb);
+ 
+ drop:
++	dst_release(dst);
+ 	kfree_skb(skb);
+ 	return err;
+ }
+@@ -262,29 +267,33 @@ static int rpl_input(struct sk_buff *skb)
+ 
+ 	rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+-	err = rpl_do_srh(skb, rlwt);
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
++	local_bh_enable();
++
++	err = rpl_do_srh(skb, rlwt, dst);
++	if (unlikely(err)) {
++		dst_release(dst);
++		goto drop;
++	}
+ 
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+ 		if (!dst->error) {
++			local_bh_disable();
+ 			dst_cache_set_ip6(&rlwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
++			local_bh_enable();
+ 		}
++
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++		if (unlikely(err))
++			goto drop;
+ 	} else {
+ 		skb_dst_drop(skb);
+ 		skb_dst_set(skb, dst);
+ 	}
+-	local_bh_enable();
+-
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
+ 
+ 	return dst_input(skb);
+ 
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 098632adc9b5af..33833b2064c072 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -124,8 +124,8 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
+ 	return flowlabel;
+ }
+ 
+-/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
+-int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
++static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
++			       int proto, struct dst_entry *cache_dst)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct net *net = dev_net(dst->dev);
+@@ -137,7 +137,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+ 	hdrlen = (osrh->hdrlen + 1) << 3;
+ 	tot_len = hdrlen + sizeof(*hdr);
+ 
+-	err = skb_cow_head(skb, tot_len + skb->mac_len);
++	err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -197,11 +197,18 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+ 
+ 	return 0;
+ }
++
++/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
++int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
++{
++	return __seg6_do_srh_encap(skb, osrh, proto, NULL);
++}
+ EXPORT_SYMBOL_GPL(seg6_do_srh_encap);
+ 
+ /* encapsulate an IPv6 packet within an outer IPv6 header with reduced SRH */
+ static int seg6_do_srh_encap_red(struct sk_buff *skb,
+-				 struct ipv6_sr_hdr *osrh, int proto)
++				 struct ipv6_sr_hdr *osrh, int proto,
++				 struct dst_entry *cache_dst)
+ {
+ 	__u8 first_seg = osrh->first_segment;
+ 	struct dst_entry *dst = skb_dst(skb);
+@@ -230,7 +237,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
+ 
+ 	tot_len = red_hdrlen + sizeof(struct ipv6hdr);
+ 
+-	err = skb_cow_head(skb, tot_len + skb->mac_len);
++	err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -317,8 +324,8 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
+ 	return 0;
+ }
+ 
+-/* insert an SRH within an IPv6 packet, just after the IPv6 header */
+-int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
++static int __seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
++				struct dst_entry *cache_dst)
+ {
+ 	struct ipv6hdr *hdr, *oldhdr;
+ 	struct ipv6_sr_hdr *isrh;
+@@ -326,7 +333,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
+ 
+ 	hdrlen = (osrh->hdrlen + 1) << 3;
+ 
+-	err = skb_cow_head(skb, hdrlen + skb->mac_len);
++	err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -369,9 +376,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(seg6_do_srh_inline);
+ 
+-static int seg6_do_srh(struct sk_buff *skb)
++static int seg6_do_srh(struct sk_buff *skb, struct dst_entry *cache_dst)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct seg6_iptunnel_encap *tinfo;
+@@ -384,7 +390,7 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 		if (skb->protocol != htons(ETH_P_IPV6))
+ 			return -EINVAL;
+ 
+-		err = seg6_do_srh_inline(skb, tinfo->srh);
++		err = __seg6_do_srh_inline(skb, tinfo->srh, cache_dst);
+ 		if (err)
+ 			return err;
+ 		break;
+@@ -402,9 +408,11 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 			return -EINVAL;
+ 
+ 		if (tinfo->mode == SEG6_IPTUN_MODE_ENCAP)
+-			err = seg6_do_srh_encap(skb, tinfo->srh, proto);
++			err = __seg6_do_srh_encap(skb, tinfo->srh,
++						  proto, cache_dst);
+ 		else
+-			err = seg6_do_srh_encap_red(skb, tinfo->srh, proto);
++			err = seg6_do_srh_encap_red(skb, tinfo->srh,
++						    proto, cache_dst);
+ 
+ 		if (err)
+ 			return err;
+@@ -425,11 +433,13 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 		skb_push(skb, skb->mac_len);
+ 
+ 		if (tinfo->mode == SEG6_IPTUN_MODE_L2ENCAP)
+-			err = seg6_do_srh_encap(skb, tinfo->srh,
+-						IPPROTO_ETHERNET);
++			err = __seg6_do_srh_encap(skb, tinfo->srh,
++						  IPPROTO_ETHERNET,
++						  cache_dst);
+ 		else
+ 			err = seg6_do_srh_encap_red(skb, tinfo->srh,
+-						    IPPROTO_ETHERNET);
++						    IPPROTO_ETHERNET,
++						    cache_dst);
+ 
+ 		if (err)
+ 			return err;
+@@ -444,6 +454,13 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 	return 0;
+ }
+ 
++/* insert an SRH within an IPv6 packet, just after the IPv6 header */
++int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
++{
++	return __seg6_do_srh_inline(skb, osrh, NULL);
++}
++EXPORT_SYMBOL_GPL(seg6_do_srh_inline);
++
+ static int seg6_input_finish(struct net *net, struct sock *sk,
+ 			     struct sk_buff *skb)
+ {
+@@ -458,31 +475,35 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 	struct seg6_lwt *slwt;
+ 	int err;
+ 
+-	err = seg6_do_srh(skb);
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
++	local_bh_enable();
++
++	err = seg6_do_srh(skb, dst);
++	if (unlikely(err)) {
++		dst_release(dst);
++		goto drop;
++	}
+ 
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+ 		if (!dst->error) {
++			local_bh_disable();
+ 			dst_cache_set_ip6(&slwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
++			local_bh_enable();
+ 		}
++
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++		if (unlikely(err))
++			goto drop;
+ 	} else {
+ 		skb_dst_drop(skb);
+ 		skb_dst_set(skb, dst);
+ 	}
+-	local_bh_enable();
+-
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
+ 
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+@@ -528,16 +549,16 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ 	struct seg6_lwt *slwt;
+ 	int err;
+ 
+-	err = seg6_do_srh(skb);
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
+ 	local_bh_enable();
+ 
++	err = seg6_do_srh(skb, dst);
++	if (unlikely(err))
++		goto drop;
++
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *hdr = ipv6_hdr(skb);
+ 		struct flowi6 fl6;
+@@ -552,28 +573,31 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ 		dst = ip6_route_output(net, NULL, &fl6);
+ 		if (dst->error) {
+ 			err = dst->error;
+-			dst_release(dst);
+ 			goto drop;
+ 		}
+ 
+-		local_bh_disable();
+-		dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+-		local_bh_enable();
++		/* cache only if we don't create a dst reference loop */
++		if (orig_dst->lwtstate != dst->lwtstate) {
++			local_bh_disable();
++			dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
++			local_bh_enable();
++		}
++
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++		if (unlikely(err))
++			goto drop;
+ 	}
+ 
+ 	skb_dst_drop(skb);
+ 	skb_dst_set(skb, dst);
+ 
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
+ 			       NULL, skb_dst(skb)->dev, dst_output);
+ 
+ 	return dst_output(net, sk, skb);
+ drop:
++	dst_release(dst);
+ 	kfree_skb(skb);
+ 	return err;
+ }
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 78d9961fcd446d..8d3c01f0e2aa19 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2102,6 +2102,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ {
+ 	struct ovs_header *ovs_header;
+ 	struct ovs_vport_stats vport_stats;
++	struct net *net_vport;
+ 	int err;
+ 
+ 	ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
+@@ -2118,12 +2119,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ 	    nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
+ 		goto nla_put_failure;
+ 
+-	if (!net_eq(net, dev_net(vport->dev))) {
+-		int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
++	rcu_read_lock();
++	net_vport = dev_net_rcu(vport->dev);
++	if (!net_eq(net, net_vport)) {
++		int id = peernet2id_alloc(net, net_vport, GFP_ATOMIC);
+ 
+ 		if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
+-			goto nla_put_failure;
++			goto nla_put_failure_unlock;
+ 	}
++	rcu_read_unlock();
+ 
+ 	ovs_vport_get_stats(vport, &vport_stats);
+ 	if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
+@@ -2144,6 +2148,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ 	genlmsg_end(skb, ovs_header);
+ 	return 0;
+ 
++nla_put_failure_unlock:
++	rcu_read_unlock();
+ nla_put_failure:
+ 	err = -EMSGSIZE;
+ error:
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index f5d116a1bdea1a..37299a7ca1876e 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -337,7 +337,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
+ 
+ void vsock_remove_sock(struct vsock_sock *vsk)
+ {
+-	vsock_remove_bound(vsk);
++	/* Transport reassignment must not remove the binding. */
++	if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
++		vsock_remove_bound(vsk);
++
+ 	vsock_remove_connected(vsk);
+ }
+ EXPORT_SYMBOL_GPL(vsock_remove_sock);
+@@ -821,6 +824,13 @@ static void __vsock_release(struct sock *sk, int level)
+ 	 */
+ 	lock_sock_nested(sk, level);
+ 
++	/* Indicate to vsock_remove_sock() that the socket is being released and
++	 * can be removed from the bound_table. Unlike transport reassignment
++	 * case, where the socket must remain bound despite vsock_remove_sock()
++	 * being called from the transport release() callback.
++	 */
++	sock_set_flag(sk, SOCK_DEAD);
++
+ 	if (vsk->transport)
+ 		vsk->transport->release(vsk);
+ 	else if (sock_type_connectible(sk->sk_type))
+diff --git a/rust/Makefile b/rust/Makefile
+index 9f59baacaf7730..45779a064fa4f4 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -229,6 +229,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
+ 	-fzero-call-used-regs=% -fno-stack-clash-protection \
+ 	-fno-inline-functions-called-once -fsanitize=bounds-strict \
+ 	-fstrict-flex-arrays=% -fmin-function-alignment=% \
++	-fzero-init-padding-bits=% \
+ 	--param=% --param asan-%
+ 
+ # Derived from `scripts/Makefile.clang`.
+diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
+index d03e4aa1f4812b..7543378d372927 100644
+--- a/rust/kernel/rbtree.rs
++++ b/rust/kernel/rbtree.rs
+@@ -1147,7 +1147,7 @@ pub struct VacantEntry<'a, K, V> {
+ /// # Invariants
+ /// - `parent` may be null if the new node becomes the root.
+ /// - `child_field_of_parent` is a valid pointer to the left-child or right-child of `parent`. If `parent` is
+-///     null, it is a pointer to the root of the [`RBTree`].
++///   null, it is a pointer to the root of the [`RBTree`].
+ struct RawVacantEntry<'a, K, V> {
+     rbtree: *mut RBTree<K, V>,
+     /// The node that will become the parent of the new node if we insert one.
+diff --git a/scripts/Makefile.defconf b/scripts/Makefile.defconf
+index 226ea3df3b4b4c..a44307f08e9d68 100644
+--- a/scripts/Makefile.defconf
++++ b/scripts/Makefile.defconf
+@@ -1,6 +1,11 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Configuration heplers
+ 
++cmd_merge_fragments = \
++	$(srctree)/scripts/kconfig/merge_config.sh \
++	$4 -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$2 \
++	$(foreach config,$3,$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
++
+ # Creates 'merged defconfigs'
+ # ---------------------------------------------------------------------------
+ # Usage:
+@@ -8,9 +13,7 @@
+ #
+ # Input config fragments without '.config' suffix
+ define merge_into_defconfig
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
+-		-m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
+-		$(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
++	$(call cmd,merge_fragments,$1,$2)
+ 	+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
+ endef
+ 
+@@ -22,8 +25,6 @@ endef
+ #
+ # Input config fragments without '.config' suffix
+ define merge_into_defconfig_override
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
+-		-Q -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
+-		$(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
++	$(call cmd,merge_fragments,$1,$2,-Q)
+ 	+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
+ endef
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 04faf15ed316a9..dc081cf46d211c 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -31,6 +31,11 @@ KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
+ ifdef CONFIG_CC_IS_CLANG
+ # The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
+ KBUILD_CFLAGS += -Wno-gnu
++
++# Clang checks for overflow/truncation with '%p', while GCC does not:
++# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219
++KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow-non-kprintf)
++KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation-non-kprintf)
+ else
+ 
+ # gcc inanely warns about local variables called 'main'
+@@ -77,6 +82,9 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
+ # Warn if there is an enum types mismatch
+ KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion)
+ 
++# Explicitly clear padding bits during variable initialization
++KBUILD_CFLAGS += $(call cc-option,-fzero-init-padding-bits=all)
++
+ KBUILD_CFLAGS += -Wextra
+ KBUILD_CFLAGS += -Wunused
+ 
+@@ -102,11 +110,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
+ ifdef CONFIG_CC_IS_GCC
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
+-else
+-# Clang checks for overflow/truncation with '%p', while GCC does not:
+-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219
+-KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow-non-kprintf)
+-KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation-non-kprintf)
+ endif
+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+ 
+diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
+index a0a0be38cbdc14..fb50bd4f4103f2 100644
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -105,9 +105,11 @@ configfiles = $(wildcard $(srctree)/kernel/configs/$(1) $(srctree)/arch/$(SRCARC
+ all-config-fragments = $(call configfiles,*.config)
+ config-fragments = $(call configfiles,$@)
+ 
++cmd_merge_fragments = $(srctree)/scripts/kconfig/merge_config.sh -m $(KCONFIG_CONFIG) $(config-fragments)
++
+ %.config: $(obj)/conf
+ 	$(if $(config-fragments),, $(error $@ fragment does not exists on this architecture))
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m $(KCONFIG_CONFIG) $(config-fragments)
++	$(call cmd,merge_fragments)
+ 	$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
+ 
+ PHONY += tinyconfig
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 54f77f57ec8e25..1148e9498d8e83 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -1132,7 +1132,22 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF2 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
+-	{	/* Vexia Edu Atla 10 tablet */
++	{
++		/* Vexia Edu Atla 10 tablet 5V version */
++		.matches = {
++			/* Having all 3 of these not set is somewhat unique */
++			DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
++			DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
++			/* Above strings are too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_JD_NOT_INV |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
++	{	/* Vexia Edu Atla 10 tablet 9V version */
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ 			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index f0d8796b984a80..8e02db7e83323b 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -218,6 +218,7 @@ static bool is_rust_noreturn(const struct symbol *func)
+ 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
+ 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
+ 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
++	       strstr(func->name, "_4core9panicking13assert_failed")					||
+ 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
+ 	       (strstr(func->name, "_4core5slice5index24slice_") &&
+ 		str_ends_with(func->name, "_fail"));
+diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
+index 248ab790d143ed..f7206374a73dd8 100644
+--- a/tools/sched_ext/include/scx/common.bpf.h
++++ b/tools/sched_ext/include/scx/common.bpf.h
+@@ -251,8 +251,16 @@ void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
+ #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
+ #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
+ 
+-void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
+-void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
++int bpf_list_push_front_impl(struct bpf_list_head *head,
++				    struct bpf_list_node *node,
++				    void *meta, __u64 off) __ksym;
++#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
++
++int bpf_list_push_back_impl(struct bpf_list_head *head,
++				   struct bpf_list_node *node,
++				   void *meta, __u64 off) __ksym;
++#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
++
+ struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
+ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
+ struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+index f0a3a9c18e9ef5..9006549a12945f 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+@@ -226,7 +226,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
+ 	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
+ 		  "pthread_create");
+ 
+-	skel->bss->tid = gettid();
++	skel->bss->tid = syscall(SYS_gettid);
+ 
+ 	do_dummy_read_opts(skel->progs.dump_task, opts);
+ 
+@@ -255,10 +255,10 @@ static void *run_test_task_tid(void *arg)
+ 	union bpf_iter_link_info linfo;
+ 	int num_unknown_tid, num_known_tid;
+ 
+-	ASSERT_NEQ(getpid(), gettid(), "check_new_thread_id");
++	ASSERT_NEQ(getpid(), syscall(SYS_gettid), "check_new_thread_id");
+ 
+ 	memset(&linfo, 0, sizeof(linfo));
+-	linfo.task.tid = gettid();
++	linfo.task.tid = syscall(SYS_gettid);
+ 	opts.link_info = &linfo;
+ 	opts.link_info_len = sizeof(linfo);
+ 	test_task_common(&opts, 0, 1);
+diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+index 844f6fc8487b67..c1ac813ff9bae3 100644
+--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
++++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+@@ -869,21 +869,14 @@ static void consumer_test(struct uprobe_multi_consumers *skel,
+ 			fmt = "prog 0/1: uprobe";
+ 		} else {
+ 			/*
+-			 * uprobe return is tricky ;-)
+-			 *
+ 			 * to trigger uretprobe consumer, the uretprobe needs to be installed,
+ 			 * which means one of the 'return' uprobes was alive when probe was hit:
+ 			 *
+ 			 *   idxs: 2/3 uprobe return in 'installed' mask
+-			 *
+-			 * in addition if 'after' state removes everything that was installed in
+-			 * 'before' state, then uprobe kernel object goes away and return uprobe
+-			 * is not installed and we won't hit it even if it's in 'after' state.
+ 			 */
+ 			unsigned long had_uretprobes  = before & 0b1100; /* is uretprobe installed */
+-			unsigned long probe_preserved = before & after;  /* did uprobe go away */
+ 
+-			if (had_uretprobes && probe_preserved && test_bit(idx, after))
++			if (had_uretprobes && test_bit(idx, after))
+ 				val++;
+ 			fmt = "idx 2/3: uretprobe";
+ 		}
+diff --git a/tools/testing/selftests/gpio/gpio-sim.sh b/tools/testing/selftests/gpio/gpio-sim.sh
+index 6fb66a687f1737..bbc29ed9c60a91 100755
+--- a/tools/testing/selftests/gpio/gpio-sim.sh
++++ b/tools/testing/selftests/gpio/gpio-sim.sh
+@@ -46,12 +46,6 @@ remove_chip() {
+ 	rmdir $CONFIGFS_DIR/$CHIP || fail "Unable to remove the chip"
+ }
+ 
+-configfs_cleanup() {
+-	for CHIP in `ls $CONFIGFS_DIR/`; do
+-		remove_chip $CHIP
+-	done
+-}
+-
+ create_chip() {
+ 	local CHIP=$1
+ 
+@@ -105,6 +99,13 @@ disable_chip() {
+ 	echo 0 > $CONFIGFS_DIR/$CHIP/live || fail "Unable to disable the chip"
+ }
+ 
++configfs_cleanup() {
++	for CHIP in `ls $CONFIGFS_DIR/`; do
++		disable_chip $CHIP
++		remove_chip $CHIP
++	done
++}
++
+ configfs_chip_name() {
+ 	local CHIP=$1
+ 	local BANK=$2
+@@ -181,6 +182,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ test -n `cat $CONFIGFS_DIR/chip/bank/chip_name` || fail "chip_name doesn't work"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "1.2. chip_name returns 'none' if the chip is still pending"
+@@ -195,6 +197,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ test -n `cat $CONFIGFS_DIR/chip/dev_name` || fail "dev_name doesn't work"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2. Creating and configuring simulated chips"
+@@ -204,6 +207,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ test "`get_chip_num_lines chip bank`" = "1" || fail "default number of lines is not 1"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.2. Number of lines can be specified"
+@@ -212,6 +216,7 @@ create_bank chip bank
+ set_num_lines chip bank 16
+ enable_chip chip
+ test "`get_chip_num_lines chip bank`" = "16" || fail "number of lines is not 16"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.3. Label can be set"
+@@ -220,6 +225,7 @@ create_bank chip bank
+ set_label chip bank foobar
+ enable_chip chip
+ test "`get_chip_label chip bank`" = "foobar" || fail "label is incorrect"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.4. Label can be left empty"
+@@ -227,6 +233,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ test -z "`cat $CONFIGFS_DIR/chip/bank/label`" || fail "label is not empty"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.5. Line names can be configured"
+@@ -238,6 +245,7 @@ set_line_name chip bank 2 bar
+ enable_chip chip
+ test "`get_line_name chip bank 0`" = "foo" || fail "line name is incorrect"
+ test "`get_line_name chip bank 2`" = "bar" || fail "line name is incorrect"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.6. Line config can remain unused if offset is greater than number of lines"
+@@ -248,6 +256,7 @@ set_line_name chip bank 5 foobar
+ enable_chip chip
+ test "`get_line_name chip bank 0`" = "" || fail "line name is incorrect"
+ test "`get_line_name chip bank 1`" = "" || fail "line name is incorrect"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.7. Line configfs directory names are sanitized"
+@@ -267,6 +276,7 @@ for CHIP in $CHIPS; do
+ 	enable_chip $CHIP
+ done
+ for CHIP in $CHIPS; do
++  disable_chip $CHIP
+ 	remove_chip $CHIP
+ done
+ 
+@@ -278,6 +288,7 @@ echo foobar > $CONFIGFS_DIR/chip/bank/label 2> /dev/null && \
+ 	fail "Setting label of a live chip should fail"
+ echo 8 > $CONFIGFS_DIR/chip/bank/num_lines 2> /dev/null && \
+ 	fail "Setting number of lines of a live chip should fail"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.10. Can't create line items when chip is live"
+@@ -285,6 +296,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ mkdir $CONFIGFS_DIR/chip/bank/line0 2> /dev/null && fail "Creating line item should fail"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.11. Probe errors are propagated to user-space"
+@@ -316,6 +328,7 @@ mkdir -p $CONFIGFS_DIR/chip/bank/line4/hog
+ enable_chip chip
+ $BASE_DIR/gpio-mockup-cdev -s 1 /dev/`configfs_chip_name chip bank` 4 2> /dev/null && \
+ 	fail "Setting the value of a hogged line shouldn't succeed"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "3. Controlling simulated chips"
+@@ -331,6 +344,7 @@ test "$?" = "1" || fail "pull set incorrectly"
+ sysfs_set_pull chip bank 0 pull-down
+ $BASE_DIR/gpio-mockup-cdev /dev/`configfs_chip_name chip bank` 1
+ test "$?" = "0" || fail "pull set incorrectly"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "3.2. Pull can be read from sysfs"
+@@ -344,6 +358,7 @@ SYSFS_PATH=/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/pull
+ test `cat $SYSFS_PATH` = "pull-down" || fail "reading the pull failed"
+ sysfs_set_pull chip bank 0 pull-up
+ test `cat $SYSFS_PATH` = "pull-up" || fail "reading the pull failed"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "3.3. Incorrect input in sysfs is rejected"
+@@ -355,6 +370,7 @@ DEVNAME=`configfs_dev_name chip`
+ CHIPNAME=`configfs_chip_name chip bank`
+ SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/pull"
+ echo foobar > $SYSFS_PATH 2> /dev/null && fail "invalid input not detected"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "3.4. Can't write to value"
+@@ -365,6 +381,7 @@ DEVNAME=`configfs_dev_name chip`
+ CHIPNAME=`configfs_chip_name chip bank`
+ SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
+ echo 1 > $SYSFS_PATH 2> /dev/null && fail "writing to 'value' succeeded unexpectedly"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "4. Simulated GPIO chips are functional"
+@@ -382,6 +399,7 @@ $BASE_DIR/gpio-mockup-cdev -s 1 /dev/`configfs_chip_name chip bank` 0 &
+ sleep 0.1 # FIXME Any better way?
+ test `cat $SYSFS_PATH` = "1" || fail "incorrect value read from sysfs"
+ kill $!
++disable_chip chip
+ remove_chip chip
+ 
+ echo "4.2. Bias settings work correctly"
+@@ -394,6 +412,7 @@ CHIPNAME=`configfs_chip_name chip bank`
+ SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
+ $BASE_DIR/gpio-mockup-cdev -b pull-up /dev/`configfs_chip_name chip bank` 0
+ test `cat $SYSFS_PATH` = "1" || fail "bias setting does not work"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "GPIO $MODULE test PASS"
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index 6c651c880fe83d..66be7699c72c9a 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -197,6 +197,12 @@
+ #
+ # - pmtu_ipv6_route_change
+ #	Same as above but with IPv6
++#
++# - pmtu_ipv4_mp_exceptions
++#	Use the same topology as in pmtu_ipv4, but add routeable addresses
++#	on host A and B on lo reachable via both routers. Host A and B
++#	addresses have multipath routes to each other, b_r1 mtu = 1500.
++#	Check that PMTU exceptions are created for both paths.
+ 
+ source lib.sh
+ source net_helper.sh
+@@ -266,7 +272,8 @@ tests="
+ 	list_flush_ipv4_exception	ipv4: list and flush cached exceptions	1
+ 	list_flush_ipv6_exception	ipv6: list and flush cached exceptions	1
+ 	pmtu_ipv4_route_change		ipv4: PMTU exception w/route replace	1
+-	pmtu_ipv6_route_change		ipv6: PMTU exception w/route replace	1"
++	pmtu_ipv6_route_change		ipv6: PMTU exception w/route replace	1
++	pmtu_ipv4_mp_exceptions		ipv4: PMTU multipath nh exceptions	1"
+ 
+ # Addressing and routing for tests with routers: four network segments, with
+ # index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an
+@@ -343,6 +350,9 @@ tunnel6_a_addr="fd00:2::a"
+ tunnel6_b_addr="fd00:2::b"
+ tunnel6_mask="64"
+ 
++host4_a_addr="192.168.99.99"
++host4_b_addr="192.168.88.88"
++
+ dummy6_0_prefix="fc00:1000::"
+ dummy6_1_prefix="fc00:1001::"
+ dummy6_mask="64"
+@@ -984,6 +994,52 @@ setup_ovs_bridge() {
+ 	run_cmd ip route add ${prefix6}:${b_r1}::1 via ${prefix6}:${a_r1}::2
+ }
+ 
++setup_multipath_new() {
++	# Set up host A with multipath routes to host B host4_b_addr
++	run_cmd ${ns_a} ip addr add ${host4_a_addr} dev lo
++	run_cmd ${ns_a} ip nexthop add id 401 via ${prefix4}.${a_r1}.2 dev veth_A-R1
++	run_cmd ${ns_a} ip nexthop add id 402 via ${prefix4}.${a_r2}.2 dev veth_A-R2
++	run_cmd ${ns_a} ip nexthop add id 403 group 401/402
++	run_cmd ${ns_a} ip route add ${host4_b_addr} src ${host4_a_addr} nhid 403
++
++	# Set up host B with multipath routes to host A host4_a_addr
++	run_cmd ${ns_b} ip addr add ${host4_b_addr} dev lo
++	run_cmd ${ns_b} ip nexthop add id 401 via ${prefix4}.${b_r1}.2 dev veth_B-R1
++	run_cmd ${ns_b} ip nexthop add id 402 via ${prefix4}.${b_r2}.2 dev veth_B-R2
++	run_cmd ${ns_b} ip nexthop add id 403 group 401/402
++	run_cmd ${ns_b} ip route add ${host4_a_addr} src ${host4_b_addr} nhid 403
++}
++
++setup_multipath_old() {
++	# Set up host A with multipath routes to host B host4_b_addr
++	run_cmd ${ns_a} ip addr add ${host4_a_addr} dev lo
++	run_cmd ${ns_a} ip route add ${host4_b_addr} \
++			src ${host4_a_addr} \
++			nexthop via ${prefix4}.${a_r1}.2 weight 1 \
++			nexthop via ${prefix4}.${a_r2}.2 weight 1
++
++	# Set up host B with multipath routes to host A host4_a_addr
++	run_cmd ${ns_b} ip addr add ${host4_b_addr} dev lo
++	run_cmd ${ns_b} ip route add ${host4_a_addr} \
++			src ${host4_b_addr} \
++			nexthop via ${prefix4}.${b_r1}.2 weight 1 \
++			nexthop via ${prefix4}.${b_r2}.2 weight 1
++}
++
++setup_multipath() {
++	if [ "$USE_NH" = "yes" ]; then
++		setup_multipath_new
++	else
++		setup_multipath_old
++	fi
++
++	# Set up routers with routes to dummies
++	run_cmd ${ns_r1} ip route add ${host4_a_addr} via ${prefix4}.${a_r1}.1
++	run_cmd ${ns_r2} ip route add ${host4_a_addr} via ${prefix4}.${a_r2}.1
++	run_cmd ${ns_r1} ip route add ${host4_b_addr} via ${prefix4}.${b_r1}.1
++	run_cmd ${ns_r2} ip route add ${host4_b_addr} via ${prefix4}.${b_r2}.1
++}
++
+ setup() {
+ 	[ "$(id -u)" -ne 0 ] && echo "  need to run as root" && return $ksft_skip
+ 
+@@ -1076,23 +1132,15 @@ link_get_mtu() {
+ }
+ 
+ route_get_dst_exception() {
+-	ns_cmd="${1}"
+-	dst="${2}"
+-	dsfield="${3}"
++	ns_cmd="${1}"; shift
+ 
+-	if [ -z "${dsfield}" ]; then
+-		dsfield=0
+-	fi
+-
+-	${ns_cmd} ip route get "${dst}" dsfield "${dsfield}"
++	${ns_cmd} ip route get "$@"
+ }
+ 
+ route_get_dst_pmtu_from_exception() {
+-	ns_cmd="${1}"
+-	dst="${2}"
+-	dsfield="${3}"
++	ns_cmd="${1}"; shift
+ 
+-	mtu_parse "$(route_get_dst_exception "${ns_cmd}" "${dst}" "${dsfield}")"
++	mtu_parse "$(route_get_dst_exception "${ns_cmd}" "$@")"
+ }
+ 
+ check_pmtu_value() {
+@@ -1235,10 +1283,10 @@ test_pmtu_ipv4_dscp_icmp_exception() {
+ 	run_cmd "${ns_a}" ping -q -M want -Q "${dsfield}" -c 1 -w 1 -s "${len}" "${dst2}"
+ 
+ 	# Check that exceptions have been created with the correct PMTU
+-	pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" "${policy_mark}")"
++	pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" dsfield "${policy_mark}")"
+ 	check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
+ 
+-	pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" "${policy_mark}")"
++	pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" dsfield "${policy_mark}")"
+ 	check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
+ }
+ 
+@@ -1285,9 +1333,9 @@ test_pmtu_ipv4_dscp_udp_exception() {
+ 		UDP:"${dst2}":50000,tos="${dsfield}"
+ 
+ 	# Check that exceptions have been created with the correct PMTU
+-	pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" "${policy_mark}")"
++	pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" dsfield "${policy_mark}")"
+ 	check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
+-	pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" "${policy_mark}")"
++	pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" dsfield "${policy_mark}")"
+ 	check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
+ }
+ 
+@@ -2329,6 +2377,36 @@ test_pmtu_ipv6_route_change() {
+ 	test_pmtu_ipvX_route_change 6
+ }
+ 
++test_pmtu_ipv4_mp_exceptions() {
++	setup namespaces routing multipath || return $ksft_skip
++
++	trace "${ns_a}"  veth_A-R1    "${ns_r1}" veth_R1-A \
++	      "${ns_r1}" veth_R1-B    "${ns_b}"  veth_B-R1 \
++	      "${ns_a}"  veth_A-R2    "${ns_r2}" veth_R2-A \
++	      "${ns_r2}" veth_R2-B    "${ns_b}"  veth_B-R2
++
++	# Set up initial MTU values
++	mtu "${ns_a}"  veth_A-R1 2000
++	mtu "${ns_r1}" veth_R1-A 2000
++	mtu "${ns_r1}" veth_R1-B 1500
++	mtu "${ns_b}"  veth_B-R1 1500
++
++	mtu "${ns_a}"  veth_A-R2 2000
++	mtu "${ns_r2}" veth_R2-A 2000
++	mtu "${ns_r2}" veth_R2-B 1500
++	mtu "${ns_b}"  veth_B-R2 1500
++
++	# Ping and expect two nexthop exceptions for two routes
++	run_cmd ${ns_a} ping -q -M want -i 0.1 -c 1 -s 1800 "${host4_b_addr}"
++
++	# Check that exceptions have been created with the correct PMTU
++	pmtu_a_R1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${host4_b_addr}" oif veth_A-R1)"
++	pmtu_a_R2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${host4_b_addr}" oif veth_A-R2)"
++
++	check_pmtu_value "1500" "${pmtu_a_R1}" "exceeding MTU (veth_A-R1)" || return 1
++	check_pmtu_value "1500" "${pmtu_a_R2}" "exceeding MTU (veth_A-R2)" || return 1
++}
++
+ usage() {
+ 	echo
+ 	echo "$0 [OPTIONS] [TEST]..."
+diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
+index bdf6f10d055891..87dce3efe31e4a 100755
+--- a/tools/testing/selftests/net/rtnetlink.sh
++++ b/tools/testing/selftests/net/rtnetlink.sh
+@@ -809,10 +809,10 @@ kci_test_ipsec_offload()
+ 	# does driver have correct offload info
+ 	run_cmd diff $sysfsf - << EOF
+ SA count=2 tx=3
+-sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000
++sa[0] tx ipaddr=$dstip
+ sa[0]    spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
+ sa[0]    key=0x34333231 38373635 32313039 36353433
+-sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0
++sa[1] rx ipaddr=$srcip
+ sa[1]    spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
+ sa[1]    key=0x34333231 38373635 32313039 36353433
+ EOF
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index 4cbd2d8ebb0461..397dc962f5e2ad 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -1143,6 +1143,14 @@ static int stop_tracing;
+ static struct trace_instance *hist_inst = NULL;
+ static void stop_hist(int sig)
+ {
++	if (stop_tracing) {
++		/*
++		 * Stop requested twice in a row; abort event processing and
++		 * exit immediately
++		 */
++		tracefs_iterate_stop(hist_inst->inst);
++		return;
++	}
+ 	stop_tracing = 1;
+ 	if (hist_inst)
+ 		trace_instance_stop(hist_inst);
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index d13be28dacd599..0def5fec51ed7a 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -897,6 +897,14 @@ static int stop_tracing;
+ static struct trace_instance *top_inst = NULL;
+ static void stop_top(int sig)
+ {
++	if (stop_tracing) {
++		/*
++		 * Stop requested twice in a row; abort event processing and
++		 * exit immediately
++		 */
++		tracefs_iterate_stop(top_inst->inst);
++		return;
++	}
+ 	stop_tracing = 1;
+ 	if (top_inst)
+ 		trace_instance_stop(top_inst);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-18 11:26 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-18 11:26 UTC (permalink / raw
  To: gentoo-commits

commit:     74d0366e3c6bc166d44d782e2f233740da1a9a16
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Feb 18 11:26:15 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Feb 18 11:26:15 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=74d0366e

Linux patch 6.12.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |   4 ++
 1014_linux-6.12.15.patch | 142 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 146 insertions(+)

diff --git a/0000_README b/0000_README
index 8a136823..f6cd3204 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-6.12.14.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.14
 
+Patch:  1014_linux-6.12.15.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.15
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1014_linux-6.12.15.patch b/1014_linux-6.12.15.patch
new file mode 100644
index 00000000..8fb3146b
--- /dev/null
+++ b/1014_linux-6.12.15.patch
@@ -0,0 +1,142 @@
+diff --git a/Makefile b/Makefile
+index 26a471dbed62a5..c6918c620bc368 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
+index 23d71a55bbc006..032f3a70f21ddd 100644
+--- a/fs/xfs/xfs_quota.h
++++ b/fs/xfs/xfs_quota.h
+@@ -96,7 +96,8 @@ extern void xfs_trans_free_dqinfo(struct xfs_trans *);
+ extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *,
+ 		uint, int64_t);
+ extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *);
+-extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *);
++void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *tp,
++		bool already_locked);
+ int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, struct xfs_inode *ip,
+ 		int64_t dblocks, int64_t rblocks, bool force);
+ extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
+@@ -166,7 +167,7 @@ static inline void xfs_trans_mod_dquot_byino(struct xfs_trans *tp,
+ {
+ }
+ #define xfs_trans_apply_dquot_deltas(tp)
+-#define xfs_trans_unreserve_and_mod_dquots(tp)
++#define xfs_trans_unreserve_and_mod_dquots(tp, a)
+ static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
+ 		struct xfs_inode *ip, int64_t dblocks, int64_t rblocks,
+ 		bool force)
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index ee46051db12dde..39cd11cbe21fcb 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -840,6 +840,7 @@ __xfs_trans_commit(
+ 	 */
+ 	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
+ 		xfs_trans_apply_sb_deltas(tp);
++	xfs_trans_apply_dquot_deltas(tp);
+ 
+ 	error = xfs_trans_run_precommits(tp);
+ 	if (error)
+@@ -868,11 +869,6 @@ __xfs_trans_commit(
+ 
+ 	ASSERT(tp->t_ticket != NULL);
+ 
+-	/*
+-	 * If we need to update the superblock, then do it now.
+-	 */
+-	xfs_trans_apply_dquot_deltas(tp);
+-
+ 	xlog_cil_commit(log, tp, &commit_seq, regrant);
+ 
+ 	xfs_trans_free(tp);
+@@ -898,7 +894,7 @@ __xfs_trans_commit(
+ 	 * the dqinfo portion to be.  All that means is that we have some
+ 	 * (non-persistent) quota reservations that need to be unreserved.
+ 	 */
+-	xfs_trans_unreserve_and_mod_dquots(tp);
++	xfs_trans_unreserve_and_mod_dquots(tp, true);
+ 	if (tp->t_ticket) {
+ 		if (regrant && !xlog_is_shutdown(log))
+ 			xfs_log_ticket_regrant(log, tp->t_ticket);
+@@ -992,7 +988,7 @@ xfs_trans_cancel(
+ 	}
+ #endif
+ 	xfs_trans_unreserve_and_mod_sb(tp);
+-	xfs_trans_unreserve_and_mod_dquots(tp);
++	xfs_trans_unreserve_and_mod_dquots(tp, false);
+ 
+ 	if (tp->t_ticket) {
+ 		xfs_log_ticket_ungrant(log, tp->t_ticket);
+diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
+index b368e13424c4f4..b92eeaa1a2a9e7 100644
+--- a/fs/xfs/xfs_trans_dquot.c
++++ b/fs/xfs/xfs_trans_dquot.c
+@@ -602,6 +602,24 @@ xfs_trans_apply_dquot_deltas(
+ 			ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
+ 			ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
+ 			ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
++
++			/*
++			 * We've applied the count changes and given back
++			 * whatever reservation we didn't use.  Zero out the
++			 * dqtrx fields.
++			 */
++			qtrx->qt_blk_res = 0;
++			qtrx->qt_bcount_delta = 0;
++			qtrx->qt_delbcnt_delta = 0;
++
++			qtrx->qt_rtblk_res = 0;
++			qtrx->qt_rtblk_res_used = 0;
++			qtrx->qt_rtbcount_delta = 0;
++			qtrx->qt_delrtb_delta = 0;
++
++			qtrx->qt_ino_res = 0;
++			qtrx->qt_ino_res_used = 0;
++			qtrx->qt_icount_delta = 0;
+ 		}
+ 	}
+ }
+@@ -638,7 +656,8 @@ xfs_trans_unreserve_and_mod_dquots_hook(
+  */
+ void
+ xfs_trans_unreserve_and_mod_dquots(
+-	struct xfs_trans	*tp)
++	struct xfs_trans	*tp,
++	bool			already_locked)
+ {
+ 	int			i, j;
+ 	struct xfs_dquot	*dqp;
+@@ -667,10 +686,12 @@ xfs_trans_unreserve_and_mod_dquots(
+ 			 * about the number of blocks used field, or deltas.
+ 			 * Also we don't bother to zero the fields.
+ 			 */
+-			locked = false;
++			locked = already_locked;
+ 			if (qtrx->qt_blk_res) {
+-				xfs_dqlock(dqp);
+-				locked = true;
++				if (!locked) {
++					xfs_dqlock(dqp);
++					locked = true;
++				}
+ 				dqp->q_blk.reserved -=
+ 					(xfs_qcnt_t)qtrx->qt_blk_res;
+ 			}
+@@ -691,7 +712,7 @@ xfs_trans_unreserve_and_mod_dquots(
+ 				dqp->q_rtb.reserved -=
+ 					(xfs_qcnt_t)qtrx->qt_rtblk_res;
+ 			}
+-			if (locked)
++			if (locked && !already_locked)
+ 				xfs_dqunlock(dqp);
+ 
+ 		}


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-17 15:44 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-17 15:44 UTC (permalink / raw
  To: gentoo-commits

commit:     86cb22103e9d9da37a098e62cada3e3a279169a4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 17 15:44:27 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 17 15:44:27 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=86cb2210

kbuild gcc15 fixes, thanks to holgar

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                |  4 ++
 2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch | 94 ++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/0000_README b/0000_README
index 54f48e7e..8a136823 100644
--- a/0000_README
+++ b/0000_README
@@ -131,6 +131,10 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
+Patch:  2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch
+From:   https://github.com/hhoffstaette/kernel-patches/
+Desc:   gcc 15 kbuild fixes
+
 Patch:  2990_libbpf-v2-workaround-Wmaybe-uninitialized-false-pos.patch
 From:   https://lore.kernel.org/bpf/
 Desc:   libbpf: workaround -Wmaybe-uninitialized false positive

diff --git a/2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch b/2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch
new file mode 100644
index 00000000..e55dc3ed
--- /dev/null
+++ b/2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch
@@ -0,0 +1,94 @@
+iGCC 15 defaults to -std=gnu23. While most of the kernel builds with -std=gnu11,
+some of it forgets to pass that flag. Hack in CSTD_FLAG to pass -std=gnu11
+everywhere.
+
+https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
+--- a/Makefile
++++ b/Makefile
+@@ -416,6 +416,8 @@ export KCONFIG_CONFIG
+ # SHELL used by kbuild
+ CONFIG_SHELL := sh
+ 
++CSTD_FLAG := -std=gnu11
++
+ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
+ HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
+ HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
+@@ -437,7 +439,7 @@ HOSTRUSTC = rustc
+ HOSTPKG_CONFIG	= pkg-config
+ 
+ KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+-			 -O2 -fomit-frame-pointer -std=gnu11
++			 -O2 -fomit-frame-pointer $(CSTD_FLAG)
+ KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
+ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
+ 
+@@ -545,7 +547,7 @@ LINUXINCLUDE    := \
+ KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
+ 
+ KBUILD_CFLAGS :=
+-KBUILD_CFLAGS += -std=gnu11
++KBUILD_CFLAGS += $(CSTD_FLAG)
+ KBUILD_CFLAGS += -fshort-wchar
+ KBUILD_CFLAGS += -funsigned-char
+ KBUILD_CFLAGS += -fno-common
+@@ -589,7 +591,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AW
+ export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+ export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
+ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+-export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
++export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS CSTD_FLAG
+ 
+ export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
+ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+--- a/arch/arm64/kernel/vdso32/Makefile
++++ b/arch/arm64/kernel/vdso32/Makefile
+@@ -65,7 +65,7 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+                -fno-strict-aliasing -fno-common \
+                -Werror-implicit-function-declaration \
+                -Wno-format-security \
+-               -std=gnu11
++               $(CSTD_FLAG)
+ VDSO_CFLAGS  += -O2
+ # Some useful compiler-dependent flags from top-level Makefile
+ VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -47,7 +47,7 @@ endif
+ 
+ # How to compile the 16-bit code.  Note we always compile for -march=i386;
+ # that way we can complain to the user if the CPU is insufficient.
+-REALMODE_CFLAGS	:= -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
++REALMODE_CFLAGS	:= $(CSTD_FLAG) -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
+ 		   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
+ 		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ 		   -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -7,7 +7,7 @@
+ #
+ 
+ # non-x86 reuses KBUILD_CFLAGS, x86 does not
+-cflags-y			:= $(KBUILD_CFLAGS)
++cflags-y			:= $(KBUILD_CFLAGS) $(CSTD_FLAG)
+ 
+ cflags-$(CONFIG_X86_32)		:= -march=i386
+ cflags-$(CONFIG_X86_64)		:= -mcmodel=small
+@@ -18,7 +18,7 @@ cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
+ 				   $(call cc-disable-warning, address-of-packed-member) \
+ 				   $(call cc-disable-warning, gnu) \
+ 				   -fno-asynchronous-unwind-tables \
+-				   $(CLANG_FLAGS)
++				   $(CLANG_FLAGS) $(CSTD_FLAG)
+ 
+ # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+ # disable the stackleak plugin
+@@ -42,7 +42,7 @@ KBUILD_CFLAGS			:= $(subst $(CC_FLAGS_FTRACE),,$(cflags-y)) \
+ 				   -ffreestanding \
+ 				   -fno-stack-protector \
+ 				   $(call cc-option,-fno-addrsig) \
+-				   -D__DISABLE_EXPORTS
++				   -D__DISABLE_EXPORTS $(CSTD_FLAG)
+ 
+ #
+ # struct randomization only makes sense for Linux internal types, which the EFI


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-17 11:25 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-17 11:25 UTC (permalink / raw
  To: gentoo-commits

commit:     e35e34f8774b096f3213e24cfcbf45e4240ae613
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 17 11:24:59 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 17 11:24:59 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e35e34f8

Removed redundant patch

Removed
2980_GCC15-gnu23-to-gnu11-fix.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                         |   4 --
 2980_GCC15-gnu23-to-gnu11-fix.patch | 105 ------------------------------------
 2 files changed, 109 deletions(-)

diff --git a/0000_README b/0000_README
index c6c607fe..54f48e7e 100644
--- a/0000_README
+++ b/0000_README
@@ -131,10 +131,6 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:  2980_GCC15-gnu23-to-gnu11-fix.patch
-From:   https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
-Desc:   GCC 15 defaults to -std=gnu23. Hack in CSTD_FLAG to pass -std=gnu11 everywhere.
-
 Patch:  2990_libbpf-v2-workaround-Wmaybe-uninitialized-false-pos.patch
 From:   https://lore.kernel.org/bpf/
 Desc:   libbpf: workaround -Wmaybe-uninitialized false positive

diff --git a/2980_GCC15-gnu23-to-gnu11-fix.patch b/2980_GCC15-gnu23-to-gnu11-fix.patch
deleted file mode 100644
index c74b6180..00000000
--- a/2980_GCC15-gnu23-to-gnu11-fix.patch
+++ /dev/null
@@ -1,105 +0,0 @@
-iGCC 15 defaults to -std=gnu23. While most of the kernel builds with -std=gnu11,
-some of it forgets to pass that flag. Hack in CSTD_FLAG to pass -std=gnu11
-everywhere.
-
-https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
---- a/Makefile
-+++ b/Makefile
-@@ -416,6 +416,8 @@ export KCONFIG_CONFIG
- # SHELL used by kbuild
- CONFIG_SHELL := sh
- 
-+CSTD_FLAG := -std=gnu11
-+
- HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
- HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
- HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
-@@ -437,7 +439,7 @@ HOSTRUSTC = rustc
- HOSTPKG_CONFIG	= pkg-config
- 
- KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
--			 -O2 -fomit-frame-pointer -std=gnu11
-+			 -O2 -fomit-frame-pointer $(CSTD_FLAG)
- KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
- KBUILD_USERLDFLAGS := $(USERLDFLAGS)
- 
-@@ -545,7 +547,7 @@ LINUXINCLUDE    := \
- KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
- 
- KBUILD_CFLAGS :=
--KBUILD_CFLAGS += -std=gnu11
-+KBUILD_CFLAGS += $(CSTD_FLAG)
- KBUILD_CFLAGS += -fshort-wchar
- KBUILD_CFLAGS += -funsigned-char
- KBUILD_CFLAGS += -fno-common
-@@ -589,7 +591,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AW
- export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
- export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
- export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
--export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
-+export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS CSTD_FLAG
- 
- export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
- export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
---- a/arch/arm64/kernel/vdso32/Makefile
-+++ b/arch/arm64/kernel/vdso32/Makefile
-@@ -65,7 +65,7 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-                -fno-strict-aliasing -fno-common \
-                -Werror-implicit-function-declaration \
-                -Wno-format-security \
--               -std=gnu11
-+               $(CSTD_FLAG)
- VDSO_CFLAGS  += -O2
- # Some useful compiler-dependent flags from top-level Makefile
- VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -47,7 +47,7 @@ endif
- 
- # How to compile the 16-bit code.  Note we always compile for -march=i386;
- # that way we can complain to the user if the CPU is insufficient.
--REALMODE_CFLAGS	:= -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
-+REALMODE_CFLAGS	:= $(CSTD_FLAG) -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
- 		   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
- 		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- 		   -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
---- a/drivers/firmware/efi/libstub/Makefile
-+++ b/drivers/firmware/efi/libstub/Makefile
-@@ -7,7 +7,7 @@
- #
- 
- # non-x86 reuses KBUILD_CFLAGS, x86 does not
--cflags-y			:= $(KBUILD_CFLAGS)
-+cflags-y			:= $(KBUILD_CFLAGS) $(CSTD_FLAG)
- 
- cflags-$(CONFIG_X86_32)		:= -march=i386
- cflags-$(CONFIG_X86_64)		:= -mcmodel=small
-@@ -18,7 +18,7 @@ cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
- 				   $(call cc-disable-warning, address-of-packed-member) \
- 				   $(call cc-disable-warning, gnu) \
- 				   -fno-asynchronous-unwind-tables \
--				   $(CLANG_FLAGS)
-+				   $(CLANG_FLAGS) $(CSTD_FLAG)
- 
- # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
- # disable the stackleak plugin
-@@ -42,7 +42,7 @@ KBUILD_CFLAGS			:= $(subst $(CC_FLAGS_FTRACE),,$(cflags-y)) \
- 				   -ffreestanding \
- 				   -fno-stack-protector \
- 				   $(call cc-option,-fno-addrsig) \
--				   -D__DISABLE_EXPORTS
-+				   -D__DISABLE_EXPORTS $(CSTD_FLAG)
- 
- #
- # struct randomization only makes sense for Linux internal types, which the EFI
---- a/arch/x86/boot/compressed/Makefile
-+++ b/arch/x86/boot/compressed/Makefile
-@@ -24,7 +24,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
- # case of cross compiling, as it has the '--target=' flag, which is needed to
- # avoid errors with '-march=i386', and future flags may depend on the target to
- # be valid.
--KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
-+KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS) $(CSTD_FLAG)
- KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
- KBUILD_CFLAGS += -Wundef
- KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-17 11:16 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-17 11:16 UTC (permalink / raw
  To: gentoo-commits

commit:     ac1b056c5231ef785a638ac21cacdd2697fd115c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 17 11:16:10 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 17 11:16:10 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ac1b056c

Linux patch 6.12.14

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1013_linux-6.12.14.patch | 17568 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 17572 insertions(+)

diff --git a/0000_README b/0000_README
index 499702fa..c6c607fe 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-6.12.13.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.13
 
+Patch:  1013_linux-6.12.14.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.14
+
 Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
 From:   https://git.kernel.org/
 Desc:   fortify: Hide run-time copy size from value range tracking

diff --git a/1013_linux-6.12.14.patch b/1013_linux-6.12.14.patch
new file mode 100644
index 00000000..5243c324
--- /dev/null
+++ b/1013_linux-6.12.14.patch
@@ -0,0 +1,17568 @@
+diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst
+index 694f67fa07d196..ab556426c7ac24 100644
+--- a/Documentation/arch/arm64/elf_hwcaps.rst
++++ b/Documentation/arch/arm64/elf_hwcaps.rst
+@@ -174,22 +174,28 @@ HWCAP2_DCPODP
+     Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
+ 
+ HWCAP2_SVE2
+-    Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.SVEver == 0b0001.
+ 
+ HWCAP2_SVEAES
+-    Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.AES == 0b0001.
+ 
+ HWCAP2_SVEPMULL
+-    Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.AES == 0b0010.
+ 
+ HWCAP2_SVEBITPERM
+-    Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.BitPerm == 0b0001.
+ 
+ HWCAP2_SVESHA3
+-    Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.SHA3 == 0b0001.
+ 
+ HWCAP2_SVESM4
+-    Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.SM4 == 0b0001.
+ 
+ HWCAP2_FLAGM2
+     Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0010.
+@@ -198,16 +204,20 @@ HWCAP2_FRINT
+     Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
+ 
+ HWCAP2_SVEI8MM
+-    Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.I8MM == 0b0001.
+ 
+ HWCAP2_SVEF32MM
+-    Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.F32MM == 0b0001.
+ 
+ HWCAP2_SVEF64MM
+-    Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.F64MM == 0b0001.
+ 
+ HWCAP2_SVEBF16
+-    Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.BF16 == 0b0001.
+ 
+ HWCAP2_I8MM
+     Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001.
+@@ -273,7 +283,8 @@ HWCAP2_EBF16
+     Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
+ 
+ HWCAP2_SVE_EBF16
+-    Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0010.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.BF16 == 0b0010.
+ 
+ HWCAP2_CSSC
+     Functionality implied by ID_AA64ISAR2_EL1.CSSC == 0b0001.
+@@ -282,7 +293,8 @@ HWCAP2_RPRFM
+     Functionality implied by ID_AA64ISAR2_EL1.RPRFM == 0b0001.
+ 
+ HWCAP2_SVE2P1
+-    Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0010.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.SVEver == 0b0010.
+ 
+ HWCAP2_SME2
+     Functionality implied by ID_AA64SMFR0_EL1.SMEver == 0b0001.
+@@ -309,7 +321,8 @@ HWCAP2_HBC
+     Functionality implied by ID_AA64ISAR2_EL1.BC == 0b0001.
+ 
+ HWCAP2_SVE_B16B16
+-    Functionality implied by ID_AA64ZFR0_EL1.B16B16 == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.B16B16 == 0b0001.
+ 
+ HWCAP2_LRCPC3
+     Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0011.
+diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
+index c3e58856f75b36..96c03b9a644e4f 100644
+--- a/Documentation/gpu/drm-kms-helpers.rst
++++ b/Documentation/gpu/drm-kms-helpers.rst
+@@ -230,6 +230,9 @@ Panel Helper Reference
+ .. kernel-doc:: drivers/gpu/drm/drm_panel_orientation_quirks.c
+    :export:
+ 
++.. kernel-doc:: drivers/gpu/drm/drm_panel_backlight_quirks.c
++   :export:
++
+ Panel Self Refresh Helper Reference
+ ===================================
+ 
+diff --git a/Makefile b/Makefile
+index 5442ff45f963ed..26a471dbed62a5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi b/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
+index 6e67d99832ac25..ba7fdaae9c6e6d 100644
+--- a/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
++++ b/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
+@@ -12,6 +12,7 @@ &l4_cfg {						/* 0x4a000000 */
+ 	ranges = <0x00000000 0x4a000000 0x100000>,	/* segment 0 */
+ 		 <0x00100000 0x4a100000 0x100000>,	/* segment 1 */
+ 		 <0x00200000 0x4a200000 0x100000>;	/* segment 2 */
++	dma-ranges;
+ 
+ 	segment@0 {					/* 0x4a000000 */
+ 		compatible = "simple-pm-bus";
+@@ -557,6 +558,7 @@ segment@100000 {					/* 0x4a100000 */
+ 			 <0x0007e000 0x0017e000 0x001000>,	/* ap 124 */
+ 			 <0x00059000 0x00159000 0x001000>,	/* ap 125 */
+ 			 <0x0005a000 0x0015a000 0x001000>;	/* ap 126 */
++		dma-ranges;
+ 
+ 		target-module@2000 {			/* 0x4a102000, ap 27 3c.0 */
+ 			compatible = "ti,sysc";
+diff --git a/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi b/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
+index 3661340009e7a4..8dca2bed941b64 100644
+--- a/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
+@@ -446,6 +446,7 @@ &omap3_pmx_core2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <
+ 			&hsusb2_2_pins
++			&mcspi3hog_pins
+ 	>;
+ 
+ 	hsusb2_2_pins: hsusb2-2-pins {
+@@ -459,6 +460,15 @@ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3)	/* etk_d15.hsusb2_d
+ 		>;
+ 	};
+ 
++	mcspi3hog_pins: mcspi3hog-pins {
++		pinctrl-single,pins = <
++			OMAP3630_CORE2_IOPAD(0x25dc, PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* etk_d0 */
++			OMAP3630_CORE2_IOPAD(0x25de, PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* etk_d1 */
++			OMAP3630_CORE2_IOPAD(0x25e0, PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* etk_d2 */
++			OMAP3630_CORE2_IOPAD(0x25e2, PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* etk_d3 */
++		>;
++	};
++
+ 	spi_gpio_pins: spi-gpio-pinmux-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP3630_CORE2_IOPAD(0x25d8, PIN_OUTPUT | MUX_MODE4) /* clk */
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index 07ae3c8e897b7d..22924f61ec9ed2 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -290,11 +290,6 @@ dsi_out: endpoint {
+ 	};
+ };
+ 
+-&dpi0 {
+-	/* TODO Re-enable after DP to Type-C port muxing can be described */
+-	status = "disabled";
+-};
+-
+ &gic {
+ 	mediatek,broken-save-restore-fw;
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 9cd5e0cef02a29..5cb6bd3c5acbb0 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1846,6 +1846,7 @@ dpi0: dpi@14015000 {
+ 				 <&mmsys CLK_MM_DPI_MM>,
+ 				 <&apmixedsys CLK_APMIXED_TVDPLL>;
+ 			clock-names = "pixel", "engine", "pll";
++			status = "disabled";
+ 
+ 			port {
+ 				dpi_out: endpoint { };
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 570331baa09ee3..2601b43b2d8cad 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -3815,7 +3815,7 @@ sce-fabric@b600000 {
+ 			compatible = "nvidia,tegra234-sce-fabric";
+ 			reg = <0x0 0xb600000 0x0 0x40000>;
+ 			interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+-			status = "okay";
++			status = "disabled";
+ 		};
+ 
+ 		rce-fabric@be00000 {
+@@ -3995,7 +3995,7 @@ bpmp-fabric@d600000 {
+ 		};
+ 
+ 		dce-fabric@de00000 {
+-			compatible = "nvidia,tegra234-sce-fabric";
++			compatible = "nvidia,tegra234-dce-fabric";
+ 			reg = <0x0 0xde00000 0x0 0x40000>;
+ 			interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>;
+ 			status = "okay";
+@@ -4018,6 +4018,8 @@ gic: interrupt-controller@f400000 {
+ 			#redistributor-regions = <1>;
+ 			#interrupt-cells = <3>;
+ 			interrupt-controller;
++
++			#address-cells = <0>;
+ 		};
+ 
+ 		smmu_iso: iommu@10000000 {
+diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+index dcb925348e3f31..60a5d6d3ca7cc8 100644
+--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+@@ -893,7 +893,7 @@ tcsr: syscon@1fc0000 {
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sdx75-mpss-pas";
+-			reg = <0 0x04080000 0 0x4040>;
++			reg = <0 0x04080000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+index 41216cc319d65e..4adadfd1e51ae9 100644
+--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+@@ -2027,7 +2027,7 @@ dispcc: clock-controller@5f00000 {
+ 
+ 		remoteproc_mpss: remoteproc@6080000 {
+ 			compatible = "qcom,sm6115-mpss-pas";
+-			reg = <0x0 0x06080000 0x0 0x100>;
++			reg = <0x0 0x06080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 307 IRQ_TYPE_EDGE_RISING>,
+ 					      <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2670,9 +2670,9 @@ funnel_apss1_in: endpoint {
+ 			};
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@ab00000 {
++		remoteproc_adsp: remoteproc@a400000 {
+ 			compatible = "qcom,sm6115-adsp-pas";
+-			reg = <0x0 0x0ab00000 0x0 0x100>;
++			reg = <0x0 0x0a400000 0x0 0x4040>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
+ 					      <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2744,7 +2744,7 @@ compute-cb@7 {
+ 
+ 		remoteproc_cdsp: remoteproc@b300000 {
+ 			compatible = "qcom,sm6115-cdsp-pas";
+-			reg = <0x0 0x0b300000 0x0 0x100000>;
++			reg = <0x0 0x0b300000 0x0 0x4040>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
+ 					      <&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 4f8477de7e1b1e..10418fccfea24f 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -936,7 +936,7 @@ uart1: serial@884000 {
+ 				power-domains = <&rpmhpd SM6350_CX>;
+ 				operating-points-v2 = <&qup_opp_table>;
+ 				interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
+-						<&aggre1_noc MASTER_QUP_0 0 &clk_virt SLAVE_EBI_CH0 0>;
++						<&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_QUP_0 0>;
+ 				interconnect-names = "qup-core", "qup-config";
+ 				status = "disabled";
+ 			};
+@@ -1283,7 +1283,7 @@ tcsr_mutex: hwlock@1f40000 {
+ 
+ 		adsp: remoteproc@3000000 {
+ 			compatible = "qcom,sm6350-adsp-pas";
+-			reg = <0 0x03000000 0 0x100>;
++			reg = <0x0 0x03000000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -1503,7 +1503,7 @@ gpucc: clock-controller@3d90000 {
+ 
+ 		mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm6350-mpss-pas";
+-			reg = <0x0 0x04080000 0x0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
+ 					      <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+index 72e01437ded125..01371f41f7906b 100644
+--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+@@ -1516,9 +1516,9 @@ gpucc: clock-controller@5990000 {
+ 			#power-domain-cells = <1>;
+ 		};
+ 
+-		remoteproc_mss: remoteproc@6000000 {
++		remoteproc_mss: remoteproc@6080000 {
+ 			compatible = "qcom,sm6375-mpss-pas";
+-			reg = <0 0x06000000 0 0x4040>;
++			reg = <0x0 0x06080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 307 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -1559,7 +1559,7 @@ IPCC_MPROC_SIGNAL_GLINK_QMP
+ 
+ 		remoteproc_adsp: remoteproc@a400000 {
+ 			compatible = "qcom,sm6375-adsp-pas";
+-			reg = <0 0x0a400000 0 0x100>;
++			reg = <0 0x0a400000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -1595,9 +1595,9 @@ IPCC_MPROC_SIGNAL_GLINK_QMP
+ 			};
+ 		};
+ 
+-		remoteproc_cdsp: remoteproc@b000000 {
++		remoteproc_cdsp: remoteproc@b300000 {
+ 			compatible = "qcom,sm6375-cdsp-pas";
+-			reg = <0x0 0x0b000000 0x0 0x100000>;
++			reg = <0x0 0x0b300000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 041750d71e4550..46adf10e5fe4d6 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -1876,6 +1876,142 @@ tcsr: syscon@1fc0000 {
+ 			reg = <0x0 0x1fc0000 0x0 0x30000>;
+ 		};
+ 
++		adsp: remoteproc@3000000 {
++			compatible = "qcom,sm8350-adsp-pas";
++			reg = <0x0 0x03000000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog", "fatal", "ready",
++					  "handover", "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx", "lmx";
++
++			memory-region = <&pil_adsp_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "lpass";
++				qcom,remote-pid = <2>;
++
++				apr {
++					compatible = "qcom,apr-v2";
++					qcom,glink-channels = "apr_audio_svc";
++					qcom,domain = <APR_DOMAIN_ADSP>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					service@3 {
++						reg = <APR_SVC_ADSP_CORE>;
++						compatible = "qcom,q6core";
++						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
++					};
++
++					q6afe: service@4 {
++						compatible = "qcom,q6afe";
++						reg = <APR_SVC_AFE>;
++						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
++
++						q6afedai: dais {
++							compatible = "qcom,q6afe-dais";
++							#address-cells = <1>;
++							#size-cells = <0>;
++							#sound-dai-cells = <1>;
++						};
++
++						q6afecc: clock-controller {
++							compatible = "qcom,q6afe-clocks";
++							#clock-cells = <2>;
++						};
++					};
++
++					q6asm: service@7 {
++						compatible = "qcom,q6asm";
++						reg = <APR_SVC_ASM>;
++						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
++
++						q6asmdai: dais {
++							compatible = "qcom,q6asm-dais";
++							#address-cells = <1>;
++							#size-cells = <0>;
++							#sound-dai-cells = <1>;
++							iommus = <&apps_smmu 0x1801 0x0>;
++
++							dai@0 {
++								reg = <0>;
++							};
++
++							dai@1 {
++								reg = <1>;
++							};
++
++							dai@2 {
++								reg = <2>;
++							};
++						};
++					};
++
++					q6adm: service@8 {
++						compatible = "qcom,q6adm";
++						reg = <APR_SVC_ADM>;
++						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
++
++						q6routing: routing {
++							compatible = "qcom,q6adm-routing";
++							#sound-dai-cells = <0>;
++						};
++					};
++				};
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "adsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x1803 0x0>;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x1804 0x0>;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x1805 0x0>;
++					};
++				};
++			};
++		};
++
+ 		lpass_tlmm: pinctrl@33c0000 {
+ 			compatible = "qcom,sm8350-lpass-lpi-pinctrl";
+ 			reg = <0 0x033c0000 0 0x20000>,
+@@ -2078,7 +2214,7 @@ lpass_ag_noc: interconnect@3c40000 {
+ 
+ 		mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm8350-mpss-pas";
+-			reg = <0x0 0x04080000 0x0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2360,6 +2496,115 @@ compute_noc: interconnect@a0c0000 {
+ 			qcom,bcm-voters = <&apps_bcm_voter>;
+ 		};
+ 
++		cdsp: remoteproc@a300000 {
++			compatible = "qcom,sm8350-cdsp-pas";
++			reg = <0x0 0x0a300000 0x0 0x10000>;
++
++			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog", "fatal", "ready",
++					  "handover", "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_CX>,
++					<&rpmhpd RPMHPD_MXC>;
++			power-domain-names = "cx", "mxc";
++
++			interconnects = <&compute_noc MASTER_CDSP_PROC 0 &mc_virt SLAVE_EBI1 0>;
++
++			memory-region = <&pil_cdsp_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_cdsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_CDSP
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "cdsp";
++				qcom,remote-pid = <5>;
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "cdsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@1 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <1>;
++						iommus = <&apps_smmu 0x2161 0x0400>,
++							 <&apps_smmu 0x1181 0x0420>;
++					};
++
++					compute-cb@2 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <2>;
++						iommus = <&apps_smmu 0x2162 0x0400>,
++							 <&apps_smmu 0x1182 0x0420>;
++					};
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x2163 0x0400>,
++							 <&apps_smmu 0x1183 0x0420>;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x2164 0x0400>,
++							 <&apps_smmu 0x1184 0x0420>;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x2165 0x0400>,
++							 <&apps_smmu 0x1185 0x0420>;
++					};
++
++					compute-cb@6 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <6>;
++						iommus = <&apps_smmu 0x2166 0x0400>,
++							 <&apps_smmu 0x1186 0x0420>;
++					};
++
++					compute-cb@7 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <7>;
++						iommus = <&apps_smmu 0x2167 0x0400>,
++							 <&apps_smmu 0x1187 0x0420>;
++					};
++
++					compute-cb@8 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <8>;
++						iommus = <&apps_smmu 0x2168 0x0400>,
++							 <&apps_smmu 0x1188 0x0420>;
++					};
++
++					/* note: secure cb9 in downstream */
++				};
++			};
++		};
++
+ 		usb_1: usb@a6f8800 {
+ 			compatible = "qcom,sm8350-dwc3", "qcom,dwc3";
+ 			reg = <0 0x0a6f8800 0 0x400>;
+@@ -3284,142 +3529,6 @@ apps_smmu: iommu@15000000 {
+ 				     <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+-		adsp: remoteproc@17300000 {
+-			compatible = "qcom,sm8350-adsp-pas";
+-			reg = <0 0x17300000 0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog", "fatal", "ready",
+-					  "handover", "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx", "lmx";
+-
+-			memory-region = <&pil_adsp_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "lpass";
+-				qcom,remote-pid = <2>;
+-
+-				apr {
+-					compatible = "qcom,apr-v2";
+-					qcom,glink-channels = "apr_audio_svc";
+-					qcom,domain = <APR_DOMAIN_ADSP>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					service@3 {
+-						reg = <APR_SVC_ADSP_CORE>;
+-						compatible = "qcom,q6core";
+-						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+-					};
+-
+-					q6afe: service@4 {
+-						compatible = "qcom,q6afe";
+-						reg = <APR_SVC_AFE>;
+-						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+-
+-						q6afedai: dais {
+-							compatible = "qcom,q6afe-dais";
+-							#address-cells = <1>;
+-							#size-cells = <0>;
+-							#sound-dai-cells = <1>;
+-						};
+-
+-						q6afecc: clock-controller {
+-							compatible = "qcom,q6afe-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-
+-					q6asm: service@7 {
+-						compatible = "qcom,q6asm";
+-						reg = <APR_SVC_ASM>;
+-						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+-
+-						q6asmdai: dais {
+-							compatible = "qcom,q6asm-dais";
+-							#address-cells = <1>;
+-							#size-cells = <0>;
+-							#sound-dai-cells = <1>;
+-							iommus = <&apps_smmu 0x1801 0x0>;
+-
+-							dai@0 {
+-								reg = <0>;
+-							};
+-
+-							dai@1 {
+-								reg = <1>;
+-							};
+-
+-							dai@2 {
+-								reg = <2>;
+-							};
+-						};
+-					};
+-
+-					q6adm: service@8 {
+-						compatible = "qcom,q6adm";
+-						reg = <APR_SVC_ADM>;
+-						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+-
+-						q6routing: routing {
+-							compatible = "qcom,q6adm-routing";
+-							#sound-dai-cells = <0>;
+-						};
+-					};
+-				};
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "adsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x1803 0x0>;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x1804 0x0>;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x1805 0x0>;
+-					};
+-				};
+-			};
+-		};
+-
+ 		intc: interrupt-controller@17a00000 {
+ 			compatible = "arm,gic-v3";
+ 			#interrupt-cells = <3>;
+@@ -3588,115 +3697,6 @@ cpufreq_hw: cpufreq@18591000 {
+ 			#freq-domain-cells = <1>;
+ 			#clock-cells = <1>;
+ 		};
+-
+-		cdsp: remoteproc@98900000 {
+-			compatible = "qcom,sm8350-cdsp-pas";
+-			reg = <0 0x98900000 0 0x1400000>;
+-
+-			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog", "fatal", "ready",
+-					  "handover", "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_CX>,
+-					<&rpmhpd RPMHPD_MXC>;
+-			power-domain-names = "cx", "mxc";
+-
+-			interconnects = <&compute_noc MASTER_CDSP_PROC 0 &mc_virt SLAVE_EBI1 0>;
+-
+-			memory-region = <&pil_cdsp_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_cdsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_CDSP
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "cdsp";
+-				qcom,remote-pid = <5>;
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "cdsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@1 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <1>;
+-						iommus = <&apps_smmu 0x2161 0x0400>,
+-							 <&apps_smmu 0x1181 0x0420>;
+-					};
+-
+-					compute-cb@2 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <2>;
+-						iommus = <&apps_smmu 0x2162 0x0400>,
+-							 <&apps_smmu 0x1182 0x0420>;
+-					};
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x2163 0x0400>,
+-							 <&apps_smmu 0x1183 0x0420>;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x2164 0x0400>,
+-							 <&apps_smmu 0x1184 0x0420>;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x2165 0x0400>,
+-							 <&apps_smmu 0x1185 0x0420>;
+-					};
+-
+-					compute-cb@6 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <6>;
+-						iommus = <&apps_smmu 0x2166 0x0400>,
+-							 <&apps_smmu 0x1186 0x0420>;
+-					};
+-
+-					compute-cb@7 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <7>;
+-						iommus = <&apps_smmu 0x2167 0x0400>,
+-							 <&apps_smmu 0x1187 0x0420>;
+-					};
+-
+-					compute-cb@8 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <8>;
+-						iommus = <&apps_smmu 0x2168 0x0400>,
+-							 <&apps_smmu 0x1188 0x0420>;
+-					};
+-
+-					/* note: secure cb9 in downstream */
+-				};
+-			};
+-		};
+ 	};
+ 
+ 	thermal_zones: thermal-zones {
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index f7d52e491b694b..d664a88a018efb 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -2492,6 +2492,112 @@ compute-cb@3 {
+ 			};
+ 		};
+ 
++		remoteproc_adsp: remoteproc@3000000 {
++			compatible = "qcom,sm8450-adsp-pas";
++			reg = <0x0 0x03000000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog", "fatal", "ready",
++					  "handover", "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx", "lmx";
++
++			memory-region = <&adsp_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			remoteproc_adsp_glink: glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "lpass";
++				qcom,remote-pid = <2>;
++
++				gpr {
++					compatible = "qcom,gpr";
++					qcom,glink-channels = "adsp_apps";
++					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
++					qcom,intents = <512 20>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					q6apm: service@1 {
++						compatible = "qcom,q6apm";
++						reg = <GPR_APM_MODULE_IID>;
++						#sound-dai-cells = <0>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6apmdai: dais {
++							compatible = "qcom,q6apm-dais";
++							iommus = <&apps_smmu 0x1801 0x0>;
++						};
++
++						q6apmbedai: bedais {
++							compatible = "qcom,q6apm-lpass-dais";
++							#sound-dai-cells = <1>;
++						};
++					};
++
++					q6prm: service@2 {
++						compatible = "qcom,q6prm";
++						reg = <GPR_PRM_MODULE_IID>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6prmcc: clock-controller {
++							compatible = "qcom,q6prm-lpass-clocks";
++							#clock-cells = <2>;
++						};
++					};
++				};
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "adsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x1803 0x0>;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x1804 0x0>;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x1805 0x0>;
++					};
++				};
++			};
++		};
++
+ 		wsa2macro: codec@31e0000 {
+ 			compatible = "qcom,sm8450-lpass-wsa-macro";
+ 			reg = <0 0x031e0000 0 0x1000>;
+@@ -2688,115 +2794,9 @@ vamacro: codec@33f0000 {
+ 			status = "disabled";
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@30000000 {
+-			compatible = "qcom,sm8450-adsp-pas";
+-			reg = <0 0x30000000 0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog", "fatal", "ready",
+-					  "handover", "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx", "lmx";
+-
+-			memory-region = <&adsp_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			remoteproc_adsp_glink: glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "lpass";
+-				qcom,remote-pid = <2>;
+-
+-				gpr {
+-					compatible = "qcom,gpr";
+-					qcom,glink-channels = "adsp_apps";
+-					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+-					qcom,intents = <512 20>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					q6apm: service@1 {
+-						compatible = "qcom,q6apm";
+-						reg = <GPR_APM_MODULE_IID>;
+-						#sound-dai-cells = <0>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6apmdai: dais {
+-							compatible = "qcom,q6apm-dais";
+-							iommus = <&apps_smmu 0x1801 0x0>;
+-						};
+-
+-						q6apmbedai: bedais {
+-							compatible = "qcom,q6apm-lpass-dais";
+-							#sound-dai-cells = <1>;
+-						};
+-					};
+-
+-					q6prm: service@2 {
+-						compatible = "qcom,q6prm";
+-						reg = <GPR_PRM_MODULE_IID>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6prmcc: clock-controller {
+-							compatible = "qcom,q6prm-lpass-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-				};
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "adsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x1803 0x0>;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x1804 0x0>;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x1805 0x0>;
+-					};
+-				};
+-			};
+-		};
+-
+ 		remoteproc_cdsp: remoteproc@32300000 {
+ 			compatible = "qcom,sm8450-cdsp-pas";
+-			reg = <0 0x32300000 0 0x1400000>;
++			reg = <0 0x32300000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2903,7 +2903,7 @@ compute-cb@8 {
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm8450-mpss-pas";
+-			reg = <0x0 0x04080000 0x0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+index 9dc0ee3eb98f87..9ecf4a7fc3287a 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+@@ -2313,7 +2313,7 @@ ipa: ipa@3f40000 {
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm8550-mpss-pas";
+-			reg = <0x0 0x04080000 0x0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2353,6 +2353,137 @@ IPCC_MPROC_SIGNAL_GLINK_QMP
+ 			};
+ 		};
+ 
++		remoteproc_adsp: remoteproc@6800000 {
++			compatible = "qcom,sm8550-adsp-pas";
++			reg = <0x0 0x06800000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog", "fatal", "ready",
++					  "handover", "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx", "lmx";
++
++			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
++
++			memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			remoteproc_adsp_glink: glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "lpass";
++				qcom,remote-pid = <2>;
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "adsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x1003 0x80>,
++							 <&apps_smmu 0x1063 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x1004 0x80>,
++							 <&apps_smmu 0x1064 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x1005 0x80>,
++							 <&apps_smmu 0x1065 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@6 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <6>;
++						iommus = <&apps_smmu 0x1006 0x80>,
++							 <&apps_smmu 0x1066 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@7 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <7>;
++						iommus = <&apps_smmu 0x1007 0x80>,
++							 <&apps_smmu 0x1067 0x0>;
++						dma-coherent;
++					};
++				};
++
++				gpr {
++					compatible = "qcom,gpr";
++					qcom,glink-channels = "adsp_apps";
++					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
++					qcom,intents = <512 20>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					q6apm: service@1 {
++						compatible = "qcom,q6apm";
++						reg = <GPR_APM_MODULE_IID>;
++						#sound-dai-cells = <0>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6apmdai: dais {
++							compatible = "qcom,q6apm-dais";
++							iommus = <&apps_smmu 0x1001 0x80>,
++								 <&apps_smmu 0x1061 0x0>;
++						};
++
++						q6apmbedai: bedais {
++							compatible = "qcom,q6apm-lpass-dais";
++							#sound-dai-cells = <1>;
++						};
++					};
++
++					q6prm: service@2 {
++						compatible = "qcom,q6prm";
++						reg = <GPR_PRM_MODULE_IID>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6prmcc: clock-controller {
++							compatible = "qcom,q6prm-lpass-clocks";
++							#clock-cells = <2>;
++						};
++					};
++				};
++			};
++		};
++
+ 		lpass_wsa2macro: codec@6aa0000 {
+ 			compatible = "qcom,sm8550-lpass-wsa-macro";
+ 			reg = <0 0x06aa0000 0 0x1000>;
+@@ -2871,9 +3002,8 @@ mdss: display-subsystem@ae00000 {
+ 
+ 			power-domains = <&dispcc MDSS_GDSC>;
+ 
+-			interconnects = <&mmss_noc MASTER_MDP 0 &gem_noc SLAVE_LLCC 0>,
+-					<&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>;
+-			interconnect-names = "mdp0-mem", "mdp1-mem";
++			interconnects = <&mmss_noc MASTER_MDP 0 &mc_virt SLAVE_EBI1 0>;
++			interconnect-names = "mdp0-mem";
+ 
+ 			iommus = <&apps_smmu 0x1c00 0x2>;
+ 
+@@ -4575,137 +4705,6 @@ system-cache-controller@25000000 {
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@30000000 {
+-			compatible = "qcom,sm8550-adsp-pas";
+-			reg = <0x0 0x30000000 0x0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog", "fatal", "ready",
+-					  "handover", "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx", "lmx";
+-
+-			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
+-
+-			memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			remoteproc_adsp_glink: glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "lpass";
+-				qcom,remote-pid = <2>;
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "adsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x1003 0x80>,
+-							 <&apps_smmu 0x1063 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x1004 0x80>,
+-							 <&apps_smmu 0x1064 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x1005 0x80>,
+-							 <&apps_smmu 0x1065 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@6 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <6>;
+-						iommus = <&apps_smmu 0x1006 0x80>,
+-							 <&apps_smmu 0x1066 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@7 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <7>;
+-						iommus = <&apps_smmu 0x1007 0x80>,
+-							 <&apps_smmu 0x1067 0x0>;
+-						dma-coherent;
+-					};
+-				};
+-
+-				gpr {
+-					compatible = "qcom,gpr";
+-					qcom,glink-channels = "adsp_apps";
+-					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+-					qcom,intents = <512 20>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					q6apm: service@1 {
+-						compatible = "qcom,q6apm";
+-						reg = <GPR_APM_MODULE_IID>;
+-						#sound-dai-cells = <0>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6apmdai: dais {
+-							compatible = "qcom,q6apm-dais";
+-							iommus = <&apps_smmu 0x1001 0x80>,
+-								 <&apps_smmu 0x1061 0x0>;
+-						};
+-
+-						q6apmbedai: bedais {
+-							compatible = "qcom,q6apm-lpass-dais";
+-							#sound-dai-cells = <1>;
+-						};
+-					};
+-
+-					q6prm: service@2 {
+-						compatible = "qcom,q6prm";
+-						reg = <GPR_PRM_MODULE_IID>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6prmcc: clock-controller {
+-							compatible = "qcom,q6prm-lpass-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-				};
+-			};
+-		};
+-
+ 		nsp_noc: interconnect@320c0000 {
+ 			compatible = "qcom,sm8550-nsp-noc";
+ 			reg = <0 0x320c0000 0 0xe080>;
+@@ -4715,7 +4714,7 @@ nsp_noc: interconnect@320c0000 {
+ 
+ 		remoteproc_cdsp: remoteproc@32300000 {
+ 			compatible = "qcom,sm8550-cdsp-pas";
+-			reg = <0x0 0x32300000 0x0 0x1400000>;
++			reg = <0x0 0x32300000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+index cd54fd723ce40e..416cfb71878a5f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+@@ -2853,7 +2853,7 @@ ipa: ipa@3f40000 {
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm8650-mpss-pas";
+-			reg = <0 0x04080000 0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2904,6 +2904,154 @@ IPCC_MPROC_SIGNAL_GLINK_QMP
+ 			};
+ 		};
+ 
++		remoteproc_adsp: remoteproc@6800000 {
++			compatible = "qcom,sm8650-adsp-pas";
++			reg = <0x0 0x06800000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog",
++					  "fatal",
++					  "ready",
++					  "handover",
++					  "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
++					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx",
++					     "lmx";
++
++			memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			remoteproc_adsp_glink: glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				qcom,remote-pid = <2>;
++
++				label = "lpass";
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++
++					label = "adsp";
++
++					qcom,non-secure-domain;
++
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++
++						iommus = <&apps_smmu 0x1003 0x80>,
++							 <&apps_smmu 0x1043 0x20>;
++						dma-coherent;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++
++						iommus = <&apps_smmu 0x1004 0x80>,
++							 <&apps_smmu 0x1044 0x20>;
++						dma-coherent;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++
++						iommus = <&apps_smmu 0x1005 0x80>,
++							 <&apps_smmu 0x1045 0x20>;
++						dma-coherent;
++					};
++
++					compute-cb@6 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <6>;
++
++						iommus = <&apps_smmu 0x1006 0x80>,
++							 <&apps_smmu 0x1046 0x20>;
++						dma-coherent;
++					};
++
++					compute-cb@7 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <7>;
++
++						iommus = <&apps_smmu 0x1007 0x40>,
++							 <&apps_smmu 0x1067 0x0>,
++							 <&apps_smmu 0x1087 0x0>;
++						dma-coherent;
++					};
++				};
++
++				gpr {
++					compatible = "qcom,gpr";
++					qcom,glink-channels = "adsp_apps";
++					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
++					qcom,intents = <512 20>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					q6apm: service@1 {
++						compatible = "qcom,q6apm";
++						reg = <GPR_APM_MODULE_IID>;
++						#sound-dai-cells = <0>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6apmbedai: bedais {
++							compatible = "qcom,q6apm-lpass-dais";
++							#sound-dai-cells = <1>;
++						};
++
++						q6apmdai: dais {
++							compatible = "qcom,q6apm-dais";
++							iommus = <&apps_smmu 0x1001 0x80>,
++								 <&apps_smmu 0x1061 0x0>;
++						};
++					};
++
++					q6prm: service@2 {
++						compatible = "qcom,q6prm";
++						reg = <GPR_PRM_MODULE_IID>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6prmcc: clock-controller {
++							compatible = "qcom,q6prm-lpass-clocks";
++							#clock-cells = <2>;
++						};
++					};
++				};
++			};
++		};
++
+ 		lpass_wsa2macro: codec@6aa0000 {
+ 			compatible = "qcom,sm8650-lpass-wsa-macro", "qcom,sm8550-lpass-wsa-macro";
+ 			reg = <0 0x06aa0000 0 0x1000>;
+@@ -3455,11 +3603,8 @@ mdss: display-subsystem@ae00000 {
+ 			resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
+ 
+ 			interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS
+-					 &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+-					<&mc_virt MASTER_LLCC QCOM_ICC_TAG_ALWAYS
+ 					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+-			interconnect-names = "mdp0-mem",
+-					     "mdp1-mem";
++			interconnect-names = "mdp0-mem";
+ 
+ 			power-domains = <&dispcc MDSS_GDSC>;
+ 
+@@ -5324,154 +5469,6 @@ system-cache-controller@25000000 {
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@30000000 {
+-			compatible = "qcom,sm8650-adsp-pas";
+-			reg = <0 0x30000000 0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog",
+-					  "fatal",
+-					  "ready",
+-					  "handover",
+-					  "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
+-					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx",
+-					     "lmx";
+-
+-			memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			remoteproc_adsp_glink: glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				qcom,remote-pid = <2>;
+-
+-				label = "lpass";
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-
+-					label = "adsp";
+-
+-					qcom,non-secure-domain;
+-
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-
+-						iommus = <&apps_smmu 0x1003 0x80>,
+-							 <&apps_smmu 0x1043 0x20>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-
+-						iommus = <&apps_smmu 0x1004 0x80>,
+-							 <&apps_smmu 0x1044 0x20>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-
+-						iommus = <&apps_smmu 0x1005 0x80>,
+-							 <&apps_smmu 0x1045 0x20>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@6 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <6>;
+-
+-						iommus = <&apps_smmu 0x1006 0x80>,
+-							 <&apps_smmu 0x1046 0x20>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@7 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <7>;
+-
+-						iommus = <&apps_smmu 0x1007 0x40>,
+-							 <&apps_smmu 0x1067 0x0>,
+-							 <&apps_smmu 0x1087 0x0>;
+-						dma-coherent;
+-					};
+-				};
+-
+-				gpr {
+-					compatible = "qcom,gpr";
+-					qcom,glink-channels = "adsp_apps";
+-					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+-					qcom,intents = <512 20>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					q6apm: service@1 {
+-						compatible = "qcom,q6apm";
+-						reg = <GPR_APM_MODULE_IID>;
+-						#sound-dai-cells = <0>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6apmbedai: bedais {
+-							compatible = "qcom,q6apm-lpass-dais";
+-							#sound-dai-cells = <1>;
+-						};
+-
+-						q6apmdai: dais {
+-							compatible = "qcom,q6apm-dais";
+-							iommus = <&apps_smmu 0x1001 0x80>,
+-								 <&apps_smmu 0x1061 0x0>;
+-						};
+-					};
+-
+-					q6prm: service@2 {
+-						compatible = "qcom,q6prm";
+-						reg = <GPR_PRM_MODULE_IID>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6prmcc: clock-controller {
+-							compatible = "qcom,q6prm-lpass-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-				};
+-			};
+-		};
+-
+ 		nsp_noc: interconnect@320c0000 {
+ 			compatible = "qcom,sm8650-nsp-noc";
+ 			reg = <0 0x320c0000 0 0xf080>;
+@@ -5483,7 +5480,7 @@ nsp_noc: interconnect@320c0000 {
+ 
+ 		remoteproc_cdsp: remoteproc@32300000 {
+ 			compatible = "qcom,sm8650-cdsp-pas";
+-			reg = <0 0x32300000 0 0x1400000>;
++			reg = <0x0 0x32300000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+index fdde988ae01ebd..b1fa8f3558b3fc 100644
+--- a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
++++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+@@ -754,7 +754,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -786,7 +786,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+index 2926a1aba76873..b2cf080cab5622 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+@@ -591,7 +591,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -623,7 +623,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+index c6e0356ed9a2a2..044a2f1432fe32 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+@@ -1147,7 +1147,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -1179,7 +1179,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+@@ -1211,7 +1211,7 @@ &usb_1_ss2_hsphy {
+ };
+ 
+ &usb_1_ss2_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+index f22e5c840a2e55..e9ed723f90381a 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+@@ -895,7 +895,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -927,7 +927,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+@@ -959,7 +959,7 @@ &usb_1_ss2_hsphy {
+ };
+ 
+ &usb_1_ss2_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+index 89e39d55278579..19da90704b7cb9 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+@@ -782,7 +782,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e>;
++	vdda-phy-supply = <&vreg_l2j>;
+ 	vdda-pll-supply = <&vreg_l1j>;
+ 
+ 	status = "okay";
+@@ -814,7 +814,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e>;
++	vdda-phy-supply = <&vreg_l2j>;
+ 	vdda-pll-supply = <&vreg_l2d>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+index 5ef030c60abe29..af76aa034d0e17 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+@@ -896,7 +896,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -928,7 +928,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+@@ -960,7 +960,7 @@ &usb_1_ss2_hsphy {
+ };
+ 
+ &usb_1_ss2_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index f0797df9619b15..91e4fbca19f99c 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -3515,6 +3515,143 @@ nsp_noc: interconnect@320c0000 {
+ 			#interconnect-cells = <2>;
+ 		};
+ 
++		remoteproc_adsp: remoteproc@6800000 {
++			compatible = "qcom,x1e80100-adsp-pas";
++			reg = <0x0 0x06800000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog",
++					  "fatal",
++					  "ready",
++					  "handover",
++					  "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx",
++					     "lmx";
++
++			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
++					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
++
++			memory-region = <&adspslpi_mem>,
++					<&q6_adsp_dtb_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "lpass";
++				qcom,remote-pid = <2>;
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "adsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x1003 0x80>,
++							 <&apps_smmu 0x1063 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x1004 0x80>,
++							 <&apps_smmu 0x1064 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x1005 0x80>,
++							 <&apps_smmu 0x1065 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@6 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <6>;
++						iommus = <&apps_smmu 0x1006 0x80>,
++							 <&apps_smmu 0x1066 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@7 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <7>;
++						iommus = <&apps_smmu 0x1007 0x80>,
++							 <&apps_smmu 0x1067 0x0>;
++						dma-coherent;
++					};
++				};
++
++				gpr {
++					compatible = "qcom,gpr";
++					qcom,glink-channels = "adsp_apps";
++					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
++					qcom,intents = <512 20>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					q6apm: service@1 {
++						compatible = "qcom,q6apm";
++						reg = <GPR_APM_MODULE_IID>;
++						#sound-dai-cells = <0>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6apmbedai: bedais {
++							compatible = "qcom,q6apm-lpass-dais";
++							#sound-dai-cells = <1>;
++						};
++
++						q6apmdai: dais {
++							compatible = "qcom,q6apm-dais";
++							iommus = <&apps_smmu 0x1001 0x80>,
++								 <&apps_smmu 0x1061 0x0>;
++						};
++					};
++
++					q6prm: service@2 {
++						compatible = "qcom,q6prm";
++						reg = <GPR_PRM_MODULE_IID>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6prmcc: clock-controller {
++							compatible = "qcom,q6prm-lpass-clocks";
++							#clock-cells = <2>;
++						};
++					};
++				};
++			};
++		};
++
+ 		lpass_wsa2macro: codec@6aa0000 {
+ 			compatible = "qcom,x1e80100-lpass-wsa-macro", "qcom,sm8550-lpass-wsa-macro";
+ 			reg = <0 0x06aa0000 0 0x1000>;
+@@ -4115,7 +4252,7 @@ usb_2: usb@a2f8800 {
+ 					  <&gcc GCC_USB20_MASTER_CLK>;
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+-			interrupts-extended = <&intc GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
++			interrupts-extended = <&intc GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&pdc 50 IRQ_TYPE_EDGE_BOTH>,
+ 					      <&pdc 49 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "pwr_event",
+@@ -4141,7 +4278,7 @@ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ 			usb_2_dwc3: usb@a200000 {
+ 				compatible = "snps,dwc3";
+ 				reg = <0 0x0a200000 0 0xcd00>;
+-				interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
++				interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+ 				iommus = <&apps_smmu 0x14e0 0x0>;
+ 				phys = <&usb_2_hsphy>;
+ 				phy-names = "usb2-phy";
+@@ -6108,146 +6245,9 @@ system-cache-controller@25000000 {
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@30000000 {
+-			compatible = "qcom,x1e80100-adsp-pas";
+-			reg = <0 0x30000000 0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog",
+-					  "fatal",
+-					  "ready",
+-					  "handover",
+-					  "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx",
+-					     "lmx";
+-
+-			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
+-					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+-
+-			memory-region = <&adspslpi_mem>,
+-					<&q6_adsp_dtb_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "lpass";
+-				qcom,remote-pid = <2>;
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "adsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x1003 0x80>,
+-							 <&apps_smmu 0x1063 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x1004 0x80>,
+-							 <&apps_smmu 0x1064 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x1005 0x80>,
+-							 <&apps_smmu 0x1065 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@6 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <6>;
+-						iommus = <&apps_smmu 0x1006 0x80>,
+-							 <&apps_smmu 0x1066 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@7 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <7>;
+-						iommus = <&apps_smmu 0x1007 0x80>,
+-							 <&apps_smmu 0x1067 0x0>;
+-						dma-coherent;
+-					};
+-				};
+-
+-				gpr {
+-					compatible = "qcom,gpr";
+-					qcom,glink-channels = "adsp_apps";
+-					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+-					qcom,intents = <512 20>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					q6apm: service@1 {
+-						compatible = "qcom,q6apm";
+-						reg = <GPR_APM_MODULE_IID>;
+-						#sound-dai-cells = <0>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6apmbedai: bedais {
+-							compatible = "qcom,q6apm-lpass-dais";
+-							#sound-dai-cells = <1>;
+-						};
+-
+-						q6apmdai: dais {
+-							compatible = "qcom,q6apm-dais";
+-							iommus = <&apps_smmu 0x1001 0x80>,
+-								 <&apps_smmu 0x1061 0x0>;
+-						};
+-					};
+-
+-					q6prm: service@2 {
+-						compatible = "qcom,q6prm";
+-						reg = <GPR_PRM_MODULE_IID>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6prmcc: clock-controller {
+-							compatible = "qcom,q6prm-lpass-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-				};
+-			};
+-		};
+-
+ 		remoteproc_cdsp: remoteproc@32300000 {
+ 			compatible = "qcom,x1e80100-cdsp-pas";
+-			reg = <0 0x32300000 0 0x1400000>;
++			reg = <0x0 0x32300000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index 650b1ba9c19213..257636d0d2cbb0 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -181,7 +181,7 @@ &gmac {
+ 	snps,reset-active-low;
+ 	snps,reset-delays-us = <0 10000 50000>;
+ 	tx_delay = <0x10>;
+-	rx_delay = <0x10>;
++	rx_delay = <0x23>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+index 0946310e8c1248..6fd67ae2711746 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+@@ -262,6 +262,7 @@ combphy0: phy@fe820000 {
+ 		assigned-clocks = <&pmucru CLK_PCIEPHY0_REF>;
+ 		assigned-clock-rates = <100000000>;
+ 		resets = <&cru SRST_PIPEPHY0>;
++		reset-names = "phy";
+ 		rockchip,pipe-grf = <&pipegrf>;
+ 		rockchip,pipe-phy-grf = <&pipe_phy_grf0>;
+ 		#phy-cells = <1>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index 0ee0ada6f0ab0f..bc0f57a26c2ff8 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -1762,6 +1762,7 @@ combphy1: phy@fe830000 {
+ 		assigned-clocks = <&pmucru CLK_PCIEPHY1_REF>;
+ 		assigned-clock-rates = <100000000>;
+ 		resets = <&cru SRST_PIPEPHY1>;
++		reset-names = "phy";
+ 		rockchip,pipe-grf = <&pipegrf>;
+ 		rockchip,pipe-phy-grf = <&pipe_phy_grf1>;
+ 		#phy-cells = <1>;
+@@ -1778,6 +1779,7 @@ combphy2: phy@fe840000 {
+ 		assigned-clocks = <&pmucru CLK_PCIEPHY2_REF>;
+ 		assigned-clock-rates = <100000000>;
+ 		resets = <&cru SRST_PIPEPHY2>;
++		reset-names = "phy";
+ 		rockchip,pipe-grf = <&pipegrf>;
+ 		rockchip,pipe-phy-grf = <&pipe_phy_grf2>;
+ 		#phy-cells = <1>;
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
+index bc0b0d75acef7b..c1f45fd6b3e9a9 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -350,6 +350,11 @@ alternative_cb_end
+ 	// Narrow PARange to fit the PS field in TCR_ELx
+ 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
+ 	mov	\tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
++#ifdef CONFIG_ARM64_LPA2
++alternative_if_not ARM64_HAS_VA52
++	mov	\tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
++alternative_else_nop_endif
++#endif
+ 	cmp	\tmp0, \tmp1
+ 	csel	\tmp0, \tmp1, \tmp0, hi
+ 	bfi	\tcr, \tmp0, \pos, #3
+diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
+index fd330c1db289a6..a970def932aacb 100644
+--- a/arch/arm64/include/asm/pgtable-hwdef.h
++++ b/arch/arm64/include/asm/pgtable-hwdef.h
+@@ -218,12 +218,6 @@
+  */
+ #define S1_TABLE_AP		(_AT(pmdval_t, 3) << 61)
+ 
+-/*
+- * Highest possible physical address supported.
+- */
+-#define PHYS_MASK_SHIFT		(CONFIG_ARM64_PA_BITS)
+-#define PHYS_MASK		((UL(1) << PHYS_MASK_SHIFT) - 1)
+-
+ #define TTBR_CNP_BIT		(UL(1) << 0)
+ 
+ /*
+diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
+index 2a11d0c10760b9..3ce7c632fbfbc3 100644
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -78,6 +78,7 @@ extern bool arm64_use_ng_mappings;
+ #define lpa2_is_enabled()	false
+ #define PTE_MAYBE_SHARED	PTE_SHARED
+ #define PMD_MAYBE_SHARED	PMD_SECT_S
++#define PHYS_MASK_SHIFT		(CONFIG_ARM64_PA_BITS)
+ #else
+ static inline bool __pure lpa2_is_enabled(void)
+ {
+@@ -86,8 +87,14 @@ static inline bool __pure lpa2_is_enabled(void)
+ 
+ #define PTE_MAYBE_SHARED	(lpa2_is_enabled() ? 0 : PTE_SHARED)
+ #define PMD_MAYBE_SHARED	(lpa2_is_enabled() ? 0 : PMD_SECT_S)
++#define PHYS_MASK_SHIFT		(lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
+ #endif
+ 
++/*
++ * Highest possible physical address supported.
++ */
++#define PHYS_MASK		((UL(1) << PHYS_MASK_SHIFT) - 1)
++
+ /*
+  * If we have userspace only BTI we don't want to mark kernel pages
+  * guarded even if the system does support BTI.
+diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
+index 8a8acc220371cb..84783efdc9d1f7 100644
+--- a/arch/arm64/include/asm/sparsemem.h
++++ b/arch/arm64/include/asm/sparsemem.h
+@@ -5,7 +5,10 @@
+ #ifndef __ASM_SPARSEMEM_H
+ #define __ASM_SPARSEMEM_H
+ 
+-#define MAX_PHYSMEM_BITS	CONFIG_ARM64_PA_BITS
++#include <asm/pgtable-prot.h>
++
++#define MAX_PHYSMEM_BITS		PHYS_MASK_SHIFT
++#define MAX_POSSIBLE_PHYSMEM_BITS	(52)
+ 
+ /*
+  * Section size must be at least 512MB for 64K base
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index db994d1fd97e70..709f2b51be6df3 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1153,12 +1153,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
+ 	    id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
+ 		unsigned long cpacr = cpacr_save_enable_kernel_sme();
+ 
+-		/*
+-		 * We mask out SMPS since even if the hardware
+-		 * supports priorities the kernel does not at present
+-		 * and we block access to them.
+-		 */
+-		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
+ 		vec_init_vq_map(ARM64_VEC_SME);
+ 
+ 		cpacr_restore(cpacr);
+@@ -1406,13 +1400,6 @@ void update_cpu_features(int cpu,
+ 	    id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
+ 		unsigned long cpacr = cpacr_save_enable_kernel_sme();
+ 
+-		/*
+-		 * We mask out SMPS since even if the hardware
+-		 * supports priorities the kernel does not at present
+-		 * and we block access to them.
+-		 */
+-		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
+-
+ 		/* Probe vector lengths */
+ 		if (!system_capabilities_finalized())
+ 			vec_update_vq_map(ARM64_VEC_SME);
+@@ -2923,6 +2910,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
+ 		.matches = match,						\
+ 	}
+ 
++#define HWCAP_CAP_MATCH_ID(match, reg, field, min_value, cap_type, cap)		\
++	{									\
++		__HWCAP_CAP(#cap, cap_type, cap)				\
++		HWCAP_CPUID_MATCH(reg, field, min_value) 			\
++		.matches = match,						\
++	}
++
+ #ifdef CONFIG_ARM64_PTR_AUTH
+ static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
+ 	{
+@@ -2951,6 +2945,13 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
+ };
+ #endif
+ 
++#ifdef CONFIG_ARM64_SVE
++static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
++{
++	return system_supports_sve() && has_user_cpuid_feature(cap, scope);
++}
++#endif
++
+ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ 	HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
+@@ -2993,19 +2994,19 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT),
+ #ifdef CONFIG_ARM64_SVE
+ 	HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_B16B16),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_B16B16),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
+ #endif
+ 	HWCAP_CAP(ID_AA64PFR1_EL1, SSBS, SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS),
+ #ifdef CONFIG_ARM64_BTI
+@@ -3376,7 +3377,7 @@ static void verify_hyp_capabilities(void)
+ 		return;
+ 
+ 	safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+-	mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
++	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ 	mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ 
+ 	/* Verify VMID bits */
+diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
+index 44718d0482b3b4..aec5e3947c780a 100644
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -478,6 +478,16 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
+ 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+ 		__cpuinfo_store_cpu_32bit(&info->aarch32);
+ 
++	if (IS_ENABLED(CONFIG_ARM64_SME) &&
++	    id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
++		/*
++		 * We mask out SMPS since even if the hardware
++		 * supports priorities the kernel does not at present
++		 * and we block access to them.
++		 */
++		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
++	}
++
+ 	cpuinfo_detect_icache_policy(info);
+ }
+ 
+diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
+index 29d4b6244a6f63..5c03f5e0d352da 100644
+--- a/arch/arm64/kernel/pi/idreg-override.c
++++ b/arch/arm64/kernel/pi/idreg-override.c
+@@ -74,6 +74,15 @@ static bool __init mmfr2_varange_filter(u64 val)
+ 		id_aa64mmfr0_override.val |=
+ 			(ID_AA64MMFR0_EL1_TGRAN_LPA2 - 1) << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
+ 		id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
++
++		/*
++		 * Override PARange to 48 bits - the override will just be
++		 * ignored if the actual PARange is smaller, but this is
++		 * unlikely to be the case for LPA2 capable silicon.
++		 */
++		id_aa64mmfr0_override.val |=
++			ID_AA64MMFR0_EL1_PARANGE_48 << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
++		id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
+ 	}
+ #endif
+ 	return true;
+diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
+index f374a3e5a5fe10..e57b043f324b51 100644
+--- a/arch/arm64/kernel/pi/map_kernel.c
++++ b/arch/arm64/kernel/pi/map_kernel.c
+@@ -136,6 +136,12 @@ static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
+ {
+ 	u64 sctlr = read_sysreg(sctlr_el1);
+ 	u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
++	u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
++	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
++							   ID_AA64MMFR0_EL1_PARANGE_SHIFT);
++
++	tcr &= ~TCR_IPS_MASK;
++	tcr |= parange << TCR_IPS_SHIFT;
+ 
+ 	asm("	msr	sctlr_el1, %0		;"
+ 	    "	isb				;"
+diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
+index 1215df59041856..754914d9ec6835 100644
+--- a/arch/arm64/kvm/arch_timer.c
++++ b/arch/arm64/kvm/arch_timer.c
+@@ -466,10 +466,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
+ 
+ 	trace_kvm_timer_emulate(ctx, should_fire);
+ 
+-	if (should_fire != ctx->irq.level) {
++	if (should_fire != ctx->irq.level)
+ 		kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
+-		return;
+-	}
+ 
+ 	/*
+ 	 * If the timer can fire now, we don't need to have a soft timer
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 70ff9a20ef3af3..117702f033218d 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1998,8 +1998,7 @@ static int kvm_init_vector_slots(void)
+ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
+ {
+ 	struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
+-	u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+-	unsigned long tcr;
++	unsigned long tcr, ips;
+ 
+ 	/*
+ 	 * Calculate the raw per-cpu offset without a translation from the
+@@ -2013,6 +2012,7 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
+ 	params->mair_el2 = read_sysreg(mair_el1);
+ 
+ 	tcr = read_sysreg(tcr_el1);
++	ips = FIELD_GET(TCR_IPS_MASK, tcr);
+ 	if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
+ 		tcr |= TCR_EPD1_MASK;
+ 	} else {
+@@ -2022,8 +2022,8 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
+ 	tcr &= ~TCR_T0SZ_MASK;
+ 	tcr |= TCR_T0SZ(hyp_va_bits);
+ 	tcr &= ~TCR_EL2_PS_MASK;
+-	tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
+-	if (kvm_lpa2_is_enabled())
++	tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
++	if (lpa2_is_enabled())
+ 		tcr |= TCR_EL2_DS;
+ 	params->tcr_el2 = tcr;
+ 
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 5f1e2103888b76..0a6956bbfb3269 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -508,6 +508,18 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ 
+ static int __init hugetlbpage_init(void)
+ {
++	/*
++	 * HugeTLB pages are supported on maximum four page table
++	 * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base
++	 * page size, corresponding to hugetlb_add_hstate() calls
++	 * here.
++	 *
++	 * HUGE_MAX_HSTATE should at least match maximum supported
++	 * HugeTLB page sizes on the platform. Any new addition to
++	 * supported HugeTLB page sizes will also require changing
++	 * HUGE_MAX_HSTATE as well.
++	 */
++	BUILD_BUG_ON(HUGE_MAX_HSTATE < 4);
+ 	if (pud_sect_supported())
+ 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ 
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 93ba66de160ce4..ea71ef2e343c2c 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -278,7 +278,12 @@ void __init arm64_memblock_init(void)
+ 
+ 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ 		extern u16 memstart_offset_seed;
+-		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
++
++		/*
++		 * Use the sanitised version of id_aa64mmfr0_el1 so that linear
++		 * map randomization can be enabled by shrinking the IPA space.
++		 */
++		u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ 		int parange = cpuid_feature_extract_unsigned_field(
+ 					mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+ 		s64 range = linear_region_size -
+diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
+index ac915f84165053..aafb3cd9e943e5 100644
+--- a/arch/loongarch/include/uapi/asm/ptrace.h
++++ b/arch/loongarch/include/uapi/asm/ptrace.h
+@@ -72,6 +72,16 @@ struct user_watch_state {
+ 	} dbg_regs[8];
+ };
+ 
++struct user_watch_state_v2 {
++	uint64_t dbg_info;
++	struct {
++		uint64_t    addr;
++		uint64_t    mask;
++		uint32_t    ctrl;
++		uint32_t    pad;
++	} dbg_regs[14];
++};
++
+ #define PTRACE_SYSEMU			0x1f
+ #define PTRACE_SYSEMU_SINGLESTEP	0x20
+ 
+diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
+index 19dc6eff45ccc8..5e2402cfcab0a1 100644
+--- a/arch/loongarch/kernel/ptrace.c
++++ b/arch/loongarch/kernel/ptrace.c
+@@ -720,7 +720,7 @@ static int hw_break_set(struct task_struct *target,
+ 	unsigned int note_type = regset->core_note_type;
+ 
+ 	/* Resource info */
+-	offset = offsetof(struct user_watch_state, dbg_regs);
++	offset = offsetof(struct user_watch_state_v2, dbg_regs);
+ 	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
+ 
+ 	/* (address, mask, ctrl) registers */
+@@ -920,7 +920,7 @@ static const struct user_regset loongarch64_regsets[] = {
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+ 	[REGSET_HW_BREAK] = {
+ 		.core_note_type = NT_LOONGARCH_HW_BREAK,
+-		.n = sizeof(struct user_watch_state) / sizeof(u32),
++		.n = sizeof(struct user_watch_state_v2) / sizeof(u32),
+ 		.size = sizeof(u32),
+ 		.align = sizeof(u32),
+ 		.regset_get = hw_break_get,
+@@ -928,7 +928,7 @@ static const struct user_regset loongarch64_regsets[] = {
+ 	},
+ 	[REGSET_HW_WATCH] = {
+ 		.core_note_type = NT_LOONGARCH_HW_WATCH,
+-		.n = sizeof(struct user_watch_state) / sizeof(u32),
++		.n = sizeof(struct user_watch_state_v2) / sizeof(u32),
+ 		.size = sizeof(u32),
+ 		.align = sizeof(u32),
+ 		.regset_get = hw_break_get,
+diff --git a/arch/m68k/include/asm/vga.h b/arch/m68k/include/asm/vga.h
+index 4742e6bc3ab8ea..cdd414fa8710a9 100644
+--- a/arch/m68k/include/asm/vga.h
++++ b/arch/m68k/include/asm/vga.h
+@@ -9,7 +9,7 @@
+  */
+ #ifndef CONFIG_PCI
+ 
+-#include <asm/raw_io.h>
++#include <asm/io.h>
+ #include <asm/kmap.h>
+ 
+ /*
+@@ -29,9 +29,9 @@
+ #define inw_p(port)		0
+ #define outb_p(port, val)	do { } while (0)
+ #define outw(port, val)		do { } while (0)
+-#define readb			raw_inb
+-#define writeb			raw_outb
+-#define writew			raw_outw
++#define readb			__raw_readb
++#define writeb			__raw_writeb
++#define writew			__raw_writew
+ 
+ #endif /* CONFIG_PCI */
+ #endif /* _ASM_M68K_VGA_H */
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 467b10f4361aeb..5078ebf071ec07 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1084,7 +1084,6 @@ config CSRC_IOASIC
+ 
+ config CSRC_R4K
+ 	select CLOCKSOURCE_WATCHDOG if CPU_FREQ
+-	select HAVE_UNSTABLE_SCHED_CLOCK if SMP && 64BIT
+ 	bool
+ 
+ config CSRC_SB1250
+diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
+index 8c401e42301cbf..f39e85fd58fa99 100644
+--- a/arch/mips/kernel/ftrace.c
++++ b/arch/mips/kernel/ftrace.c
+@@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(void)
+ #define S_R_SP	(0xafb0 << 16)	/* s{d,w} R, offset(sp) */
+ #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
+ 
+-unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
++static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+ 		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
+ {
+ 	unsigned long sp, ip, tmp;
+diff --git a/arch/mips/loongson64/boardinfo.c b/arch/mips/loongson64/boardinfo.c
+index 280989c5a137b5..8bb275c93ac099 100644
+--- a/arch/mips/loongson64/boardinfo.c
++++ b/arch/mips/loongson64/boardinfo.c
+@@ -21,13 +21,11 @@ static ssize_t boardinfo_show(struct kobject *kobj,
+ 		       "BIOS Info\n"
+ 		       "Vendor\t\t\t: %s\n"
+ 		       "Version\t\t\t: %s\n"
+-		       "ROM Size\t\t: %d KB\n"
+ 		       "Release Date\t\t: %s\n",
+ 		       strsep(&tmp_board_manufacturer, "-"),
+ 		       eboard->name,
+ 		       strsep(&tmp_bios_vendor, "-"),
+ 		       einter->description,
+-		       einter->size,
+ 		       especial->special_name);
+ }
+ static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 265bc57819dfb5..c89e70df43d82b 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -1660,7 +1660,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ 		break;
+ 	}
+ 
+-	case 0x3:
++	case 0x7:
+ 		if (MIPSInst_FUNC(ir) != pfetch_op)
+ 			return SIGILL;
+ 
+diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
+index ec2567f8efd83b..66898fd182dc1f 100644
+--- a/arch/mips/pci/pci-legacy.c
++++ b/arch/mips/pci/pci-legacy.c
+@@ -29,6 +29,14 @@ static LIST_HEAD(controllers);
+ 
+ static int pci_initialized;
+ 
++unsigned long pci_address_to_pio(phys_addr_t address)
++{
++	if (address > IO_SPACE_LIMIT)
++		return (unsigned long)-1;
++
++	return (unsigned long) address;
++}
++
+ /*
+  * We need to avoid collisions with `mirrored' VGA ports
+  * and other strange ISA hardware, so we always want the
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index aa6a3cad275d91..fcc5973f75195a 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -60,8 +60,8 @@ config PARISC
+ 	select HAVE_ARCH_MMAP_RND_BITS
+ 	select HAVE_ARCH_AUDITSYSCALL
+ 	select HAVE_ARCH_HASH
+-	select HAVE_ARCH_JUMP_LABEL
+-	select HAVE_ARCH_JUMP_LABEL_RELATIVE
++	# select HAVE_ARCH_JUMP_LABEL
++	# select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ 	select HAVE_ARCH_KFENCE
+ 	select HAVE_ARCH_SECCOMP_FILTER
+ 	select HAVE_ARCH_TRACEHOOK
+diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
+index c664fdec75b1ab..6824e8139801c2 100644
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
+ 	return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
+ }
+ 
+-static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
++static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+ 					 struct kvm_book3e_206_tlb_entry *gtlbe,
+ 					 kvm_pfn_t pfn, unsigned int wimg)
+ {
+@@ -252,11 +252,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+ 	/* Use guest supplied MAS2_G and MAS2_E */
+ 	ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
+ 
+-	/* Mark the page accessed */
+-	kvm_set_pfn_accessed(pfn);
+-
+-	if (tlbe_is_writable(gtlbe))
+-		kvm_set_pfn_dirty(pfn);
++	return tlbe_is_writable(gtlbe);
+ }
+ 
+ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
+@@ -326,6 +322,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ {
+ 	struct kvm_memory_slot *slot;
+ 	unsigned long pfn = 0; /* silence GCC warning */
++	struct page *page = NULL;
+ 	unsigned long hva;
+ 	int pfnmap = 0;
+ 	int tsize = BOOK3E_PAGESZ_4K;
+@@ -337,6 +334,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 	unsigned int wimg = 0;
+ 	pgd_t *pgdir;
+ 	unsigned long flags;
++	bool writable = false;
+ 
+ 	/* used to check for invalidations in progress */
+ 	mmu_seq = kvm->mmu_invalidate_seq;
+@@ -446,7 +444,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 
+ 	if (likely(!pfnmap)) {
+ 		tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
+-		pfn = gfn_to_pfn_memslot(slot, gfn);
++		pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
+ 		if (is_error_noslot_pfn(pfn)) {
+ 			if (printk_ratelimit())
+ 				pr_err("%s: real page not found for gfn %lx\n",
+@@ -481,7 +479,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 		if (pte_present(pte)) {
+ 			wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
+ 				MAS2_WIMGE_MASK;
+-			local_irq_restore(flags);
+ 		} else {
+ 			local_irq_restore(flags);
+ 			pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
+@@ -490,8 +487,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 			goto out;
+ 		}
+ 	}
+-	kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
++	local_irq_restore(flags);
+ 
++	writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
+ 	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+ 				ref, gvaddr, stlbe);
+ 
+@@ -499,11 +497,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ 	kvmppc_mmu_flush_icache(pfn);
+ 
+ out:
++	kvm_release_faultin_page(kvm, page, !!ret, writable);
+ 	spin_unlock(&kvm->mmu_lock);
+-
+-	/* Drop refcount on page, so that mmu notifiers can clear it */
+-	kvm_release_pfn_clean(pfn);
+-
+ 	return ret;
+ }
+ 
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index 1893f66371fa43..b12ef382fec709 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -580,8 +580,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
+ 
+ 	switch(rets[0]) {
+ 	case 0:
+-		result = EEH_STATE_MMIO_ACTIVE |
+-			 EEH_STATE_DMA_ACTIVE;
++		result = EEH_STATE_MMIO_ACTIVE	|
++			 EEH_STATE_DMA_ACTIVE	|
++			 EEH_STATE_MMIO_ENABLED	|
++			 EEH_STATE_DMA_ENABLED;
+ 		break;
+ 	case 1:
+ 		result = EEH_STATE_RESET_ACTIVE |
+diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
+index 4a6b0a8b6412f1..00a67464c44534 100644
+--- a/arch/s390/include/asm/asm-extable.h
++++ b/arch/s390/include/asm/asm-extable.h
+@@ -14,6 +14,7 @@
+ #define EX_TYPE_UA_LOAD_REG	5
+ #define EX_TYPE_UA_LOAD_REGPAIR	6
+ #define EX_TYPE_ZEROPAD		7
++#define EX_TYPE_FPC		8
+ 
+ #define EX_DATA_REG_ERR_SHIFT	0
+ #define EX_DATA_REG_ERR		GENMASK(3, 0)
+@@ -84,4 +85,7 @@
+ #define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr)		\
+ 	__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
+ 
++#define EX_TABLE_FPC(_fault, _target)					\
++	__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0)
++
+ #endif /* __ASM_EXTABLE_H */
+diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h
+index c1e2e521d9af7c..a4c9b4db62ff57 100644
+--- a/arch/s390/include/asm/fpu-insn.h
++++ b/arch/s390/include/asm/fpu-insn.h
+@@ -100,19 +100,12 @@ static __always_inline void fpu_lfpc(unsigned int *fpc)
+  */
+ static inline void fpu_lfpc_safe(unsigned int *fpc)
+ {
+-	u32 tmp;
+-
+ 	instrument_read(fpc, sizeof(*fpc));
+-	asm volatile("\n"
+-		"0:	lfpc	%[fpc]\n"
+-		"1:	nopr	%%r7\n"
+-		".pushsection .fixup, \"ax\"\n"
+-		"2:	lghi	%[tmp],0\n"
+-		"	sfpc	%[tmp]\n"
+-		"	jg	1b\n"
+-		".popsection\n"
+-		EX_TABLE(1b, 2b)
+-		: [tmp] "=d" (tmp)
++	asm_inline volatile(
++		"	lfpc	%[fpc]\n"
++		"0:	nopr	%%r7\n"
++		EX_TABLE_FPC(0b, 0b)
++		:
+ 		: [fpc] "Q" (*fpc)
+ 		: "memory");
+ }
+diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
+index eaeaeb3ff0be3e..752a2310f0d6c1 100644
+--- a/arch/s390/include/asm/futex.h
++++ b/arch/s390/include/asm/futex.h
+@@ -44,7 +44,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ 		break;
+ 	case FUTEX_OP_ANDN:
+ 		__futex_atomic_op("lr %2,%1\nnr %2,%5\n",
+-				  ret, oldval, newval, uaddr, oparg);
++				  ret, oldval, newval, uaddr, ~oparg);
+ 		break;
+ 	case FUTEX_OP_XOR:
+ 		__futex_atomic_op("lr %2,%1\nxr %2,%5\n",
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 9a5236acc0a860..21ae93cbd8e478 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -162,8 +162,7 @@ static __always_inline void __stackleak_poison(unsigned long erase_low,
+ 		"	la	%[addr],256(%[addr])\n"
+ 		"	brctg	%[tmp],0b\n"
+ 		"1:	stg	%[poison],0(%[addr])\n"
+-		"	larl	%[tmp],3f\n"
+-		"	ex	%[count],0(%[tmp])\n"
++		"	exrl	%[count],3f\n"
+ 		"	j	4f\n"
+ 		"2:	stg	%[poison],0(%[addr])\n"
+ 		"	j	4f\n"
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 377b9aaf8c9248..ff1ddba96352a1 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -52,7 +52,6 @@ SECTIONS
+ 		SOFTIRQENTRY_TEXT
+ 		FTRACE_HOTPATCH_TRAMPOLINES_TEXT
+ 		*(.text.*_indirect_*)
+-		*(.fixup)
+ 		*(.gnu.warning)
+ 		. = ALIGN(PAGE_SIZE);
+ 		_etext = .;		/* End of text section */
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index 89cafea4c41f26..caf40665fce96e 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -1362,8 +1362,14 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
+ 	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
+ 	rcu_read_unlock();
+ 	if (page) {
+-		if (page_ref_inc_return(page) == 2)
+-			return page_to_virt(page);
++		if (page_ref_inc_return(page) == 2) {
++			if (page->index == addr)
++				return page_to_virt(page);
++			/*
++			 * We raced with someone reusing + putting this vsie
++			 * page before we grabbed it.
++			 */
++		}
+ 		page_ref_dec(page);
+ 	}
+ 
+@@ -1393,15 +1399,20 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
+ 			kvm->arch.vsie.next++;
+ 			kvm->arch.vsie.next %= nr_vcpus;
+ 		}
+-		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
++		if (page->index != ULONG_MAX)
++			radix_tree_delete(&kvm->arch.vsie.addr_to_page,
++					  page->index >> 9);
+ 	}
+-	page->index = addr;
+-	/* double use of the same address */
++	/* Mark it as invalid until it resides in the tree. */
++	page->index = ULONG_MAX;
++
++	/* Double use of the same address or allocation failure. */
+ 	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
+ 		page_ref_dec(page);
+ 		mutex_unlock(&kvm->arch.vsie.mutex);
+ 		return NULL;
+ 	}
++	page->index = addr;
+ 	mutex_unlock(&kvm->arch.vsie.mutex);
+ 
+ 	vsie_page = page_to_virt(page);
+@@ -1496,7 +1507,9 @@ void kvm_s390_vsie_destroy(struct kvm *kvm)
+ 		vsie_page = page_to_virt(page);
+ 		release_gmap_shadow(vsie_page);
+ 		/* free the radix tree entry */
+-		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
++		if (page->index != ULONG_MAX)
++			radix_tree_delete(&kvm->arch.vsie.addr_to_page,
++					  page->index >> 9);
+ 		__free_page(page);
+ 	}
+ 	kvm->arch.vsie.page_count = 0;
+diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
+index 0a0738a473af05..812ec5be129169 100644
+--- a/arch/s390/mm/extable.c
++++ b/arch/s390/mm/extable.c
+@@ -77,6 +77,13 @@ static bool ex_handler_zeropad(const struct exception_table_entry *ex, struct pt
+ 	return true;
+ }
+ 
++static bool ex_handler_fpc(const struct exception_table_entry *ex, struct pt_regs *regs)
++{
++	asm volatile("sfpc	%[val]\n" : : [val] "d" (0));
++	regs->psw.addr = extable_fixup(ex);
++	return true;
++}
++
+ bool fixup_exception(struct pt_regs *regs)
+ {
+ 	const struct exception_table_entry *ex;
+@@ -99,6 +106,8 @@ bool fixup_exception(struct pt_regs *regs)
+ 		return ex_handler_ua_load_reg(ex, true, regs);
+ 	case EX_TYPE_ZEROPAD:
+ 		return ex_handler_zeropad(ex, regs);
++	case EX_TYPE_FPC:
++		return ex_handler_fpc(ex, regs);
+ 	}
+ 	panic("invalid exception table entry");
+ }
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index 1b74a000ff6459..56a786ca7354b9 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -171,7 +171,6 @@ void zpci_bus_scan_busses(void)
+ static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
+ {
+ 	return !s390_pci_no_rid && zdev->rid_available &&
+-		zpci_is_device_configured(zdev) &&
+ 		!zdev->vfn;
+ }
+ 
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index f2051644de9432..606c74f274593e 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -25,6 +25,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
+ # avoid errors with '-march=i386', and future flags may depend on the target to
+ # be valid.
+ KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
++KBUILD_CFLAGS += -std=gnu11
+ KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
+ KBUILD_CFLAGS += -Wundef
+ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
+index ae5482a2f0ca0e..ccb8ff37fa9d4b 100644
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -16,6 +16,7 @@
+ # define PAGES_NR		4
+ #endif
+ 
++# define KEXEC_CONTROL_PAGE_SIZE	4096
+ # define KEXEC_CONTROL_CODE_MAX_SIZE	2048
+ 
+ #ifndef __ASSEMBLY__
+@@ -43,7 +44,6 @@ struct kimage;
+ /* Maximum address we can use for the control code buffer */
+ # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+ 
+-# define KEXEC_CONTROL_PAGE_SIZE	4096
+ 
+ /* The native architecture */
+ # define KEXEC_ARCH KEXEC_ARCH_386
+@@ -58,9 +58,6 @@ struct kimage;
+ /* Maximum address we can use for the control pages */
+ # define KEXEC_CONTROL_MEMORY_LIMIT     (MAXMEM-1)
+ 
+-/* Allocate one page for the pdp and the second for the code */
+-# define KEXEC_CONTROL_PAGE_SIZE  (4096UL + 4096UL)
+-
+ /* The native architecture */
+ # define KEXEC_ARCH KEXEC_ARCH_X86_64
+ #endif
+@@ -145,6 +142,19 @@ struct kimage_arch {
+ };
+ #else
+ struct kimage_arch {
++	/*
++	 * This is a kimage control page, as it must not overlap with either
++	 * source or destination address ranges.
++	 */
++	pgd_t *pgd;
++	/*
++	 * The virtual mapping of the control code page itself is used only
++	 * during the transition, while the current kernel's pages are all
++	 * in place. Thus the intermediate page table pages used to map it
++	 * are not control pages, but instead just normal pages obtained
++	 * with get_zeroed_page(). And have to be tracked (below) so that
++	 * they can be freed.
++	 */
+ 	p4d_t *p4d;
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 6b981868905f5d..5da67e5c00401b 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -27,6 +27,7 @@
+ #include <linux/hyperv.h>
+ #include <linux/kfifo.h>
+ #include <linux/sched/vhost_task.h>
++#include <linux/call_once.h>
+ 
+ #include <asm/apic.h>
+ #include <asm/pvclock-abi.h>
+@@ -1446,6 +1447,7 @@ struct kvm_arch {
+ 	struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
+ 	struct vhost_task *nx_huge_page_recovery_thread;
+ 	u64 nx_huge_page_last;
++	struct once nx_once;
+ 
+ #ifdef CONFIG_X86_64
+ 	/* The number of TDP MMU pages across all roots. */
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 4efecac49863ec..c70b86f1f2954f 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -226,6 +226,28 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
+ 	return 0;
+ }
+ 
++static int __init
++acpi_check_lapic(union acpi_subtable_headers *header, const unsigned long end)
++{
++	struct acpi_madt_local_apic *processor = NULL;
++
++	processor = (struct acpi_madt_local_apic *)header;
++
++	if (BAD_MADT_ENTRY(processor, end))
++		return -EINVAL;
++
++	/* Ignore invalid ID */
++	if (processor->id == 0xff)
++		return 0;
++
++	/* Ignore processors that can not be onlined */
++	if (!acpi_is_processor_usable(processor->lapic_flags))
++		return 0;
++
++	has_lapic_cpus = true;
++	return 0;
++}
++
+ static int __init
+ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
+ {
+@@ -257,7 +279,6 @@ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
+ 			       processor->processor_id, /* ACPI ID */
+ 			       processor->lapic_flags & ACPI_MADT_ENABLED);
+ 
+-	has_lapic_cpus = true;
+ 	return 0;
+ }
+ 
+@@ -1029,6 +1050,8 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
+ static int __init acpi_parse_madt_lapic_entries(void)
+ {
+ 	int count, x2count = 0;
++	struct acpi_subtable_proc madt_proc[2];
++	int ret;
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_APIC))
+ 		return -ENODEV;
+@@ -1037,10 +1060,27 @@ static int __init acpi_parse_madt_lapic_entries(void)
+ 				      acpi_parse_sapic, MAX_LOCAL_APIC);
+ 
+ 	if (!count) {
+-		count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
+-					acpi_parse_lapic, MAX_LOCAL_APIC);
+-		x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
+-					acpi_parse_x2apic, MAX_LOCAL_APIC);
++		/* Check if there are valid LAPIC entries */
++		acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_check_lapic, MAX_LOCAL_APIC);
++
++		/*
++		 * Enumerate the APIC IDs in the order that they appear in the
++		 * MADT, no matter LAPIC entry or x2APIC entry is used.
++		 */
++		memset(madt_proc, 0, sizeof(madt_proc));
++		madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC;
++		madt_proc[0].handler = acpi_parse_lapic;
++		madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC;
++		madt_proc[1].handler = acpi_parse_x2apic;
++		ret = acpi_table_parse_entries_array(ACPI_SIG_MADT,
++				sizeof(struct acpi_table_madt),
++				madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
++		if (ret < 0) {
++			pr_err("Error parsing LAPIC/X2APIC entries\n");
++			return ret;
++		}
++		count = madt_proc[0].count;
++		x2count = madt_proc[1].count;
+ 	}
+ 	if (!count && !x2count) {
+ 		pr_err("No LAPIC entries present\n");
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 9fe9972d2071b9..37b8244899d895 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -582,6 +582,10 @@ static __init void fix_erratum_688(void)
+ 
+ static __init int init_amd_nbs(void)
+ {
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
++	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
++		return 0;
++
+ 	amd_cache_northbridges();
+ 	amd_cache_gart();
+ 
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index 9c9ac606893e99..7223c38a8708fc 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -146,7 +146,8 @@ static void free_transition_pgtable(struct kimage *image)
+ 	image->arch.pte = NULL;
+ }
+ 
+-static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
++static int init_transition_pgtable(struct kimage *image, pgd_t *pgd,
++				   unsigned long control_page)
+ {
+ 	pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
+ 	unsigned long vaddr, paddr;
+@@ -157,7 +158,7 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+ 	pte_t *pte;
+ 
+ 	vaddr = (unsigned long)relocate_kernel;
+-	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
++	paddr = control_page;
+ 	pgd += pgd_index(vaddr);
+ 	if (!pgd_present(*pgd)) {
+ 		p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
+@@ -216,7 +217,7 @@ static void *alloc_pgt_page(void *data)
+ 	return p;
+ }
+ 
+-static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
++static int init_pgtable(struct kimage *image, unsigned long control_page)
+ {
+ 	struct x86_mapping_info info = {
+ 		.alloc_pgt_page	= alloc_pgt_page,
+@@ -225,12 +226,12 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ 		.kernpg_flag	= _KERNPG_TABLE_NOENC,
+ 	};
+ 	unsigned long mstart, mend;
+-	pgd_t *level4p;
+ 	int result;
+ 	int i;
+ 
+-	level4p = (pgd_t *)__va(start_pgtable);
+-	clear_page(level4p);
++	image->arch.pgd = alloc_pgt_page(image);
++	if (!image->arch.pgd)
++		return -ENOMEM;
+ 
+ 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ 		info.page_flag   |= _PAGE_ENC;
+@@ -244,8 +245,8 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ 		mstart = pfn_mapped[i].start << PAGE_SHIFT;
+ 		mend   = pfn_mapped[i].end << PAGE_SHIFT;
+ 
+-		result = kernel_ident_mapping_init(&info,
+-						 level4p, mstart, mend);
++		result = kernel_ident_mapping_init(&info, image->arch.pgd,
++						   mstart, mend);
+ 		if (result)
+ 			return result;
+ 	}
+@@ -260,8 +261,8 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ 		mstart = image->segment[i].mem;
+ 		mend   = mstart + image->segment[i].memsz;
+ 
+-		result = kernel_ident_mapping_init(&info,
+-						 level4p, mstart, mend);
++		result = kernel_ident_mapping_init(&info, image->arch.pgd,
++						   mstart, mend);
+ 
+ 		if (result)
+ 			return result;
+@@ -271,15 +272,19 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ 	 * Prepare EFI systab and ACPI tables for kexec kernel since they are
+ 	 * not covered by pfn_mapped.
+ 	 */
+-	result = map_efi_systab(&info, level4p);
++	result = map_efi_systab(&info, image->arch.pgd);
+ 	if (result)
+ 		return result;
+ 
+-	result = map_acpi_tables(&info, level4p);
++	result = map_acpi_tables(&info, image->arch.pgd);
+ 	if (result)
+ 		return result;
+ 
+-	return init_transition_pgtable(image, level4p);
++	/*
++	 * This must be last because the intermediate page table pages it
++	 * allocates will not be control pages and may overlap the image.
++	 */
++	return init_transition_pgtable(image, image->arch.pgd, control_page);
+ }
+ 
+ static void load_segments(void)
+@@ -296,14 +301,14 @@ static void load_segments(void)
+ 
+ int machine_kexec_prepare(struct kimage *image)
+ {
+-	unsigned long start_pgtable;
++	unsigned long control_page;
+ 	int result;
+ 
+ 	/* Calculate the offsets */
+-	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
++	control_page = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
+ 
+ 	/* Setup the identity mapped 64bit page table */
+-	result = init_pgtable(image, start_pgtable);
++	result = init_pgtable(image, control_page);
+ 	if (result)
+ 		return result;
+ 
+@@ -357,13 +362,12 @@ void machine_kexec(struct kimage *image)
+ #endif
+ 	}
+ 
+-	control_page = page_address(image->control_code_page) + PAGE_SIZE;
++	control_page = page_address(image->control_code_page);
+ 	__memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
+ 
+ 	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
+ 	page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
+-	page_list[PA_TABLE_PAGE] =
+-	  (unsigned long)__pa(page_address(image->control_code_page));
++	page_list[PA_TABLE_PAGE] = (unsigned long)__pa(image->arch.pgd);
+ 
+ 	if (image->type == KEXEC_TYPE_DEFAULT)
+ 		page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
+@@ -573,8 +577,7 @@ static void kexec_mark_crashkres(bool protect)
+ 
+ 	/* Don't touch the control code page used in crash_kexec().*/
+ 	control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
+-	/* Control code page is located in the 2nd page. */
+-	kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect);
++	kexec_mark_range(crashk_res.start, control - 1, protect);
+ 	control += KEXEC_CONTROL_PAGE_SIZE;
+ 	kexec_mark_range(control, crashk_res.end, protect);
+ }
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index f63f8fd00a91f3..15507e739c255b 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -838,7 +838,7 @@ void __noreturn stop_this_cpu(void *dummy)
+ #ifdef CONFIG_SMP
+ 	if (smp_ops.stop_this_cpu) {
+ 		smp_ops.stop_this_cpu();
+-		unreachable();
++		BUG();
+ 	}
+ #endif
+ 
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 615922838c510b..dc1dd3f3e67fcd 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -883,7 +883,7 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
+ 
+ 	if (smp_ops.stop_this_cpu) {
+ 		smp_ops.stop_this_cpu();
+-		unreachable();
++		BUG();
+ 	}
+ 
+ 	/* Assume hlt works */
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 1b4438e24814b4..9dd3796d075a56 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -7227,6 +7227,19 @@ static void mmu_destroy_caches(void)
+ 	kmem_cache_destroy(mmu_page_header_cache);
+ }
+ 
++static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
++{
++	/*
++	 * The NX recovery thread is spawned on-demand at the first KVM_RUN and
++	 * may not be valid even though the VM is globally visible.  Do nothing,
++	 * as such a VM can't have any possible NX huge pages.
++	 */
++	struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
++
++	if (nx_thread)
++		vhost_task_wake(nx_thread);
++}
++
+ static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
+ {
+ 	if (nx_hugepage_mitigation_hard_disabled)
+@@ -7287,7 +7300,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+ 			kvm_mmu_zap_all_fast(kvm);
+ 			mutex_unlock(&kvm->slots_lock);
+ 
+-			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
++			kvm_wake_nx_recovery_thread(kvm);
+ 		}
+ 		mutex_unlock(&kvm_lock);
+ 	}
+@@ -7433,7 +7446,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
+ 		mutex_lock(&kvm_lock);
+ 
+ 		list_for_each_entry(kvm, &vm_list, vm_list)
+-			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
++			kvm_wake_nx_recovery_thread(kvm);
+ 
+ 		mutex_unlock(&kvm_lock);
+ 	}
+@@ -7565,20 +7578,34 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
+ 	return true;
+ }
+ 
++static void kvm_mmu_start_lpage_recovery(struct once *once)
++{
++	struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
++	struct kvm *kvm = container_of(ka, struct kvm, arch);
++	struct vhost_task *nx_thread;
++
++	kvm->arch.nx_huge_page_last = get_jiffies_64();
++	nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
++				      kvm_nx_huge_page_recovery_worker_kill,
++				      kvm, "kvm-nx-lpage-recovery");
++
++	if (!nx_thread)
++		return;
++
++	vhost_task_start(nx_thread);
++
++	/* Make the task visible only once it is fully started. */
++	WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
++}
++
+ int kvm_mmu_post_init_vm(struct kvm *kvm)
+ {
+ 	if (nx_hugepage_mitigation_hard_disabled)
+ 		return 0;
+ 
+-	kvm->arch.nx_huge_page_last = get_jiffies_64();
+-	kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
+-		kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
+-		kvm, "kvm-nx-lpage-recovery");
+-
++	call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
+ 	if (!kvm->arch.nx_huge_page_recovery_thread)
+ 		return -ENOMEM;
+-
+-	vhost_task_start(kvm->arch.nx_huge_page_recovery_thread);
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index fb854cf20ac3be..e9af87b1281407 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -3833,7 +3833,7 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
+ 		goto next_range;
+ 	}
+ 
+-	unreachable();
++	BUG();
+ }
+ 
+ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b49e2eb4893080..d760b19d1e513e 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11478,6 +11478,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 	struct kvm_run *kvm_run = vcpu->run;
+ 	int r;
+ 
++	r = kvm_mmu_post_init_vm(vcpu->kvm);
++	if (r)
++		return r;
++
+ 	vcpu_load(vcpu);
+ 	kvm_sigset_activate(vcpu);
+ 	kvm_run->flags = 0;
+@@ -12751,7 +12755,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 
+ int kvm_arch_post_init_vm(struct kvm *kvm)
+ {
+-	return kvm_mmu_post_init_vm(kvm);
++	once_init(&kvm->arch.nx_once);
++	return 0;
+ }
+ 
+ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index e6c469b323ccb7..ac52255fab01f4 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -678,7 +678,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code,
+ 			      ASM_CALL_ARG3,
+ 			      , [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info));
+ 
+-		unreachable();
++		BUG();
+ 	}
+ #endif
+ 
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 98a9bb92d75c88..12d5a0f37432ea 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -1010,4 +1010,34 @@ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
+ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
++
++/*
++ * Putting PCIe root ports on Ryzen SoCs with USB4 controllers into D3hot
++ * may cause problems when the system attempts wake up from s2idle.
++ *
++ * On the TUXEDO Sirius 16 Gen 1 with a specific old BIOS this manifests as
++ * a system hang.
++ */
++static const struct dmi_system_id quirk_tuxeo_rp_d3_dmi_table[] = {
++	{
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
++			DMI_EXACT_MATCH(DMI_BOARD_NAME, "APX958"),
++			DMI_EXACT_MATCH(DMI_BIOS_VERSION, "V1.00A00_20240108"),
++		},
++	},
++	{}
++};
++
++static void quirk_tuxeo_rp_d3(struct pci_dev *pdev)
++{
++	struct pci_dev *root_pdev;
++
++	if (dmi_check_system(quirk_tuxeo_rp_d3_dmi_table)) {
++		root_pdev = pcie_find_root_port(pdev);
++		if (root_pdev)
++			root_pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
++	}
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1502, quirk_tuxeo_rp_d3);
+ #endif /* CONFIG_SUSPEND */
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index 721a57700a3b05..55978e0dc17551 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -117,8 +117,8 @@ SYM_FUNC_START(xen_hypercall_hvm)
+ 	pop %ebx
+ 	pop %eax
+ #else
+-	lea xen_hypercall_amd(%rip), %rbx
+-	cmp %rax, %rbx
++	lea xen_hypercall_amd(%rip), %rcx
++	cmp %rax, %rcx
+ #ifdef CONFIG_FRAME_POINTER
+ 	pop %rax	/* Dummy pop. */
+ #endif
+@@ -132,6 +132,7 @@ SYM_FUNC_START(xen_hypercall_hvm)
+ 	pop %rcx
+ 	pop %rax
+ #endif
++	FRAME_END
+ 	/* Use correct hypercall function. */
+ 	jz xen_hypercall_amd
+ 	jmp xen_hypercall_intel
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 45a395862fbc88..f1cf7f2909f3a7 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1138,6 +1138,7 @@ static void blkcg_fill_root_iostats(void)
+ 		blkg_iostat_set(&blkg->iostat.cur, &tmp);
+ 		u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
+ 	}
++	class_dev_iter_exit(&iter);
+ }
+ 
+ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
+diff --git a/block/fops.c b/block/fops.c
+index 13a67940d0408d..43983be5a2b3b1 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -758,11 +758,12 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ 		file_accessed(iocb->ki_filp);
+ 
+ 		ret = blkdev_direct_IO(iocb, to);
+-		if (ret >= 0) {
++		if (ret > 0) {
+ 			iocb->ki_pos += ret;
+ 			count -= ret;
+ 		}
+-		iov_iter_revert(to, count - iov_iter_count(to));
++		if (ret != -EIOCBQUEUED)
++			iov_iter_revert(to, count - iov_iter_count(to));
+ 		if (ret < 0 || !count)
+ 			goto reexpand;
+ 	}
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index 10b7ae0f866c98..ef9a4ba18cb8a8 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -73,8 +73,8 @@ static int ivpu_resume(struct ivpu_device *vdev)
+ 	int ret;
+ 
+ retry:
+-	pci_restore_state(to_pci_dev(vdev->drm.dev));
+ 	pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
++	pci_restore_state(to_pci_dev(vdev->drm.dev));
+ 
+ 	ret = ivpu_hw_power_up(vdev);
+ 	if (ret) {
+@@ -295,7 +295,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
+ 	int ret;
+ 
+ 	ret = pm_runtime_resume_and_get(vdev->drm.dev);
+-	drm_WARN_ON(&vdev->drm, ret < 0);
++	if (ret < 0) {
++		ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
++		pm_runtime_set_suspended(vdev->drm.dev);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index ada93cfde9ba1c..cff6685fa6cc6b 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -173,8 +173,6 @@ static struct gen_pool *ghes_estatus_pool;
+ static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
+ static atomic_t ghes_estatus_cache_alloced;
+ 
+-static int ghes_panic_timeout __read_mostly = 30;
+-
+ static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
+ {
+ 	phys_addr_t paddr;
+@@ -983,14 +981,16 @@ static void __ghes_panic(struct ghes *ghes,
+ 			 struct acpi_hest_generic_status *estatus,
+ 			 u64 buf_paddr, enum fixed_addresses fixmap_idx)
+ {
++	const char *msg = GHES_PFX "Fatal hardware error";
++
+ 	__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
+ 
+ 	ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
+ 
+-	/* reboot to log the error! */
+ 	if (!panic_timeout)
+-		panic_timeout = ghes_panic_timeout;
+-	panic("Fatal hardware error!");
++		pr_emerg("%s but panic disabled\n", msg);
++
++	panic(msg);
+ }
+ 
+ static int ghes_proc(struct ghes *ghes)
+diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
+index 747f83f7114d29..e549914a636c66 100644
+--- a/drivers/acpi/prmt.c
++++ b/drivers/acpi/prmt.c
+@@ -287,9 +287,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
+ 		if (!handler || !module)
+ 			goto invalid_guid;
+ 
+-		if (!handler->handler_addr ||
+-		    !handler->static_data_buffer_addr ||
+-		    !handler->acpi_param_buffer_addr) {
++		if (!handler->handler_addr) {
+ 			buffer->prm_status = PRM_HANDLER_ERROR;
+ 			return AE_OK;
+ 		}
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 80a52a4e66dd16..e9186339f6e6bb 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1187,8 +1187,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ 		}
+ 		break;
+ 	}
+-	if (nval == 0)
+-		return -EINVAL;
+ 
+ 	if (obj->type == ACPI_TYPE_BUFFER) {
+ 		if (proptype != DEV_PROP_U8)
+@@ -1212,9 +1210,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ 		ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
+ 		break;
+ 	case DEV_PROP_STRING:
+-		ret = acpi_copy_property_array_string(
+-			items, (char **)val,
+-			min_t(u32, nval, obj->package.count));
++		nval = min_t(u32, nval, obj->package.count);
++		if (nval == 0)
++			return -ENODATA;
++
++		ret = acpi_copy_property_array_string(items, (char **)val, nval);
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 67f277e1c3bf31..5a46c066abc365 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -601,7 +601,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct page *page;
+-	unsigned int offset;
++	unsigned int offset, count;
+ 
+ 	if (!qc->cursg) {
+ 		qc->curbytes = qc->nbytes;
+@@ -617,25 +617,27 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+ 	page = nth_page(page, (offset >> PAGE_SHIFT));
+ 	offset %= PAGE_SIZE;
+ 
+-	trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
++	/* don't overrun current sg */
++	count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
++
++	trace_ata_sff_pio_transfer_data(qc, offset, count);
+ 
+ 	/*
+ 	 * Split the transfer when it splits a page boundary.  Note that the
+ 	 * split still has to be dword aligned like all ATA data transfers.
+ 	 */
+ 	WARN_ON_ONCE(offset % 4);
+-	if (offset + qc->sect_size > PAGE_SIZE) {
++	if (offset + count > PAGE_SIZE) {
+ 		unsigned int split_len = PAGE_SIZE - offset;
+ 
+ 		ata_pio_xfer(qc, page, offset, split_len);
+-		ata_pio_xfer(qc, nth_page(page, 1), 0,
+-			     qc->sect_size - split_len);
++		ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
+ 	} else {
+-		ata_pio_xfer(qc, page, offset, qc->sect_size);
++		ata_pio_xfer(qc, page, offset, count);
+ 	}
+ 
+-	qc->curbytes += qc->sect_size;
+-	qc->cursg_ofs += qc->sect_size;
++	qc->curbytes += count;
++	qc->cursg_ofs += count;
+ 
+ 	if (qc->cursg_ofs == qc->cursg->length) {
+ 		qc->cursg = sg_next(qc->cursg);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 258a5cb6f27afe..6bc6dd417adf64 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -604,6 +604,8 @@ static const struct usb_device_id quirks_table[] = {
+ 	/* MediaTek MT7922 Bluetooth devices */
+ 	{ USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3610), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* MediaTek MT7922A Bluetooth devices */
+ 	{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
+@@ -668,6 +670,8 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Additional Realtek 8723AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+diff --git a/drivers/char/misc.c b/drivers/char/misc.c
+index 541edc26ec89a1..2cf595d2e10b85 100644
+--- a/drivers/char/misc.c
++++ b/drivers/char/misc.c
+@@ -63,16 +63,30 @@ static DEFINE_MUTEX(misc_mtx);
+ #define DYNAMIC_MINORS 128 /* like dynamic majors */
+ static DEFINE_IDA(misc_minors_ida);
+ 
+-static int misc_minor_alloc(void)
++static int misc_minor_alloc(int minor)
+ {
+-	int ret;
+-
+-	ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
+-	if (ret >= 0) {
+-		ret = DYNAMIC_MINORS - ret - 1;
++	int ret = 0;
++
++	if (minor == MISC_DYNAMIC_MINOR) {
++		/* allocate free id */
++		ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
++		if (ret >= 0) {
++			ret = DYNAMIC_MINORS - ret - 1;
++		} else {
++			ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
++					      MINORMASK, GFP_KERNEL);
++		}
+ 	} else {
+-		ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
+-				      MINORMASK, GFP_KERNEL);
++		/* specific minor, check if it is in dynamic or misc dynamic range  */
++		if (minor < DYNAMIC_MINORS) {
++			minor = DYNAMIC_MINORS - minor - 1;
++			ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
++		} else if (minor > MISC_DYNAMIC_MINOR) {
++			ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
++		} else {
++			/* case of non-dynamic minors, no need to allocate id */
++			ret = 0;
++		}
+ 	}
+ 	return ret;
+ }
+@@ -219,7 +233,7 @@ int misc_register(struct miscdevice *misc)
+ 	mutex_lock(&misc_mtx);
+ 
+ 	if (is_dynamic) {
+-		int i = misc_minor_alloc();
++		int i = misc_minor_alloc(misc->minor);
+ 
+ 		if (i < 0) {
+ 			err = -EBUSY;
+@@ -228,6 +242,7 @@ int misc_register(struct miscdevice *misc)
+ 		misc->minor = i;
+ 	} else {
+ 		struct miscdevice *c;
++		int i;
+ 
+ 		list_for_each_entry(c, &misc_list, list) {
+ 			if (c->minor == misc->minor) {
+@@ -235,6 +250,12 @@ int misc_register(struct miscdevice *misc)
+ 				goto out;
+ 			}
+ 		}
++
++		i = misc_minor_alloc(misc->minor);
++		if (i < 0) {
++			err = -EBUSY;
++			goto out;
++		}
+ 	}
+ 
+ 	dev = MKDEV(MISC_MAJOR, misc->minor);
+diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
+index 69533d0bfb51e8..cf02ec646f46f0 100644
+--- a/drivers/char/tpm/eventlog/acpi.c
++++ b/drivers/char/tpm/eventlog/acpi.c
+@@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
+ 	return n == 0;
+ }
+ 
++static void tpm_bios_log_free(void *data)
++{
++	kvfree(data);
++}
++
+ /* read binary bios log */
+ int tpm_read_log_acpi(struct tpm_chip *chip)
+ {
+@@ -136,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 	}
+ 
+ 	/* malloc EventLog space */
+-	log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
++	log->bios_event_log = kvmalloc(len, GFP_KERNEL);
+ 	if (!log->bios_event_log)
+ 		return -ENOMEM;
+ 
+@@ -161,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 		goto err;
+ 	}
+ 
++	ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
++	if (ret) {
++		log->bios_event_log = NULL;
++		goto err;
++	}
++
+ 	return format;
+ 
+ err:
+-	devm_kfree(&chip->dev, log->bios_event_log);
++	tpm_bios_log_free(log->bios_event_log);
+ 	log->bios_event_log = NULL;
+ 	return ret;
+ }
+diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
+index 7082b4309c6f15..0d9485e83938a1 100644
+--- a/drivers/clk/clk-loongson2.c
++++ b/drivers/clk/clk-loongson2.c
+@@ -294,7 +294,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 
+ 	for (p = data; p->name; p++)
+-		clks_num++;
++		clks_num = max(clks_num, p->id + 1);
+ 
+ 	clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num),
+ 			   GFP_KERNEL);
+@@ -309,6 +309,9 @@ static int loongson2_clk_probe(struct platform_device *pdev)
+ 	clp->clk_data.num = clks_num;
+ 	clp->dev = dev;
+ 
++	/* Avoid returning NULL for unused id */
++	memset_p((void **)clp->clk_data.hws, ERR_PTR(-ENOENT), clks_num);
++
+ 	for (i = 0; i < clks_num; i++) {
+ 		p = &data[i];
+ 		switch (p->type) {
+diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
+index 425c69cfb105a6..e103121cf58e77 100644
+--- a/drivers/clk/mediatek/clk-mt2701-aud.c
++++ b/drivers/clk/mediatek/clk-mt2701-aud.c
+@@ -55,10 +55,16 @@ static const struct mtk_gate audio_clks[] = {
+ 	GATE_DUMMY(CLK_DUMMY, "aud_dummy"),
+ 	/* AUDIO0 */
+ 	GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
++	GATE_DUMMY(CLK_AUD_LRCK_DETECT, "audio_lrck_detect_dummy"),
++	GATE_DUMMY(CLK_AUD_I2S, "audio_i2c_dummy"),
++	GATE_DUMMY(CLK_AUD_APLL_TUNER, "audio_apll_tuner_dummy"),
+ 	GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
+ 	GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
+ 	GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
+ 	GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
++	GATE_DUMMY(CLK_AUD_TML, "audio_tml_dummy"),
++	GATE_DUMMY(CLK_AUD_AHB_IDLE_EXT, "audio_ahb_idle_ext_dummy"),
++	GATE_DUMMY(CLK_AUD_AHB_IDLE_INT, "audio_ahb_idle_int_dummy"),
+ 	/* AUDIO1 */
+ 	GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
+ 	GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
+@@ -76,10 +82,12 @@ static const struct mtk_gate audio_clks[] = {
+ 	GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
+ 	GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
+ 	GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
++	GATE_DUMMY(CLK_AUD_HDMIRX, "audio_hdmirx_dummy"),
+ 	GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
+ 	GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
+ 	GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
+ 	GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
++	GATE_DUMMY(CLK_AUD_AFE_PCMIF, "audio_afe_pcmif_dummy"),
+ 	GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
+ 	/* AUDIO2 */
+ 	GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
+@@ -100,6 +108,8 @@ static const struct mtk_gate audio_clks[] = {
+ 	GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
+ 	GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
+ 	/* AUDIO3 */
++	GATE_DUMMY(CLK_AUD_DMIC1, "audio_dmic1_dummy"),
++	GATE_DUMMY(CLK_AUD_DMIC2, "audio_dmic2_dummy"),
+ 	GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
+ 	GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
+ 	GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
+diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
+index 5da3eabffd3e76..f11c7a4fa37b65 100644
+--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
++++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
+@@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
+ 	GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate bdp_clks[] = {
++	GATE_DUMMY(CLK_DUMMY, "bdp_dummy"),
+ 	GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
+ 	GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
+ 	GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
+diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
+index 875594bc9dcba8..c158e54c46526e 100644
+--- a/drivers/clk/mediatek/clk-mt2701-img.c
++++ b/drivers/clk/mediatek/clk-mt2701-img.c
+@@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
++	GATE_DUMMY(CLK_DUMMY, "img_dummy"),
+ 	GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
+ 	GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
+ 	GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
+diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
+index bc68fa718878f9..474d87d62e8331 100644
+--- a/drivers/clk/mediatek/clk-mt2701-mm.c
++++ b/drivers/clk/mediatek/clk-mt2701-mm.c
+@@ -31,6 +31,7 @@ static const struct mtk_gate_regs disp1_cg_regs = {
+ 	GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
++	GATE_DUMMY(CLK_DUMMY, "mm_dummy"),
+ 	GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
+ 	GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ 	GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
+diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
+index 94db86f8d0a462..5299d92f3aba0f 100644
+--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
++++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
+@@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] = {
++	GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
+ 	GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
+ 	GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
+ };
+diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c
+index edaa2433a472ad..eaf5d2c5e59337 100644
+--- a/drivers/clk/mmp/pwr-island.c
++++ b/drivers/clk/mmp/pwr-island.c
+@@ -106,10 +106,10 @@ struct generic_pm_domain *mmp_pm_domain_register(const char *name,
+ 	pm_domain->flags = flags;
+ 	pm_domain->lock = lock;
+ 
+-	pm_genpd_init(&pm_domain->genpd, NULL, true);
+ 	pm_domain->genpd.name = name;
+ 	pm_domain->genpd.power_on = mmp_pm_domain_power_on;
+ 	pm_domain->genpd.power_off = mmp_pm_domain_power_off;
++	pm_genpd_init(&pm_domain->genpd, NULL, true);
+ 
+ 	return &pm_domain->genpd;
+ }
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 9ba675f229b144..16145f74bbc853 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -1022,6 +1022,7 @@ config SM_GCC_7150
+ config SM_GCC_8150
+ 	tristate "SM8150 Global Clock Controller"
+ 	depends on ARM64 || COMPILE_TEST
++	select QCOM_GDSC
+ 	help
+ 	  Support for the global clock controller on SM8150 devices.
+ 	  Say Y if you want to use peripheral devices such as UART,
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index 49687512184b92..10e276dabff93d 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -432,6 +432,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ 	mask |= config->pre_div_mask;
+ 	mask |= config->post_div_mask;
+ 	mask |= config->vco_mask;
++	mask |= config->alpha_en_mask;
++	mask |= config->alpha_mode_mask;
+ 
+ 	regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+ 
+diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
+index eefc322ce36798..e6c33010cfbf69 100644
+--- a/drivers/clk/qcom/clk-rpmh.c
++++ b/drivers/clk/qcom/clk-rpmh.c
+@@ -329,7 +329,7 @@ static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
+ {
+ 	struct clk_rpmh *c = to_clk_rpmh(hw);
+ 
+-	return c->aggr_state * c->unit;
++	return (unsigned long)c->aggr_state * c->unit;
+ }
+ 
+ static const struct clk_ops clk_rpmh_bcm_ops = {
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index 50facb36701af9..2bc6b5f99f5725 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -187,13 +187,12 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ 	.cmd_rcgr = 0x1144,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
++	.parent_map = disp_cc_parent_map_6,
+ 	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "disp_cc_mdss_dp_aux_clk_src",
+-		.parent_data = &(const struct clk_parent_data){
+-			.fw_name = "bi_tcxo",
+-		},
+-		.num_parents = 1,
++		.parent_data = disp_cc_parent_data_6,
++		.num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ 		.ops = &clk_rcg2_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c
+index 6e6068b168e66e..07f1b78d737a15 100644
+--- a/drivers/clk/qcom/gcc-mdm9607.c
++++ b/drivers/clk/qcom/gcc-mdm9607.c
+@@ -535,7 +535,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ };
+ 
+ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+-	.cmd_rcgr = 0x6044,
++	.cmd_rcgr = 0x7044,
+ 	.mnd_width = 16,
+ 	.hid_width = 5,
+ 	.parent_map = gcc_xo_gpll0_map,
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index a811fad2aa2785..74346dc026068a 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -182,6 +182,14 @@ static const struct clk_parent_data gcc_parent_data_2_ao[] = {
+ 	{ .hw = &gpll0_out_odd.clkr.hw },
+ };
+ 
++static const struct parent_map gcc_parent_map_3[] = {
++	{ P_BI_TCXO, 0 },
++};
++
++static const struct clk_parent_data gcc_parent_data_3[] = {
++	{ .fw_name = "bi_tcxo" },
++};
++
+ static const struct parent_map gcc_parent_map_4[] = {
+ 	{ P_BI_TCXO, 0 },
+ 	{ P_GPLL0_OUT_MAIN, 1 },
+@@ -701,13 +709,12 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ 	.cmd_rcgr = 0x3a0b0,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
++	.parent_map = gcc_parent_map_3,
+ 	.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gcc_ufs_phy_phy_aux_clk_src",
+-		.parent_data = &(const struct clk_parent_data){
+-			.fw_name = "bi_tcxo",
+-		},
+-		.num_parents = 1,
++		.parent_data = gcc_parent_data_3,
++		.num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ 		.ops = &clk_rcg2_ops,
+ 	},
+ };
+@@ -764,13 +771,12 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ 	.cmd_rcgr = 0x1a034,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
++	.parent_map = gcc_parent_map_3,
+ 	.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gcc_usb30_prim_mock_utmi_clk_src",
+-		.parent_data = &(const struct clk_parent_data){
+-			.fw_name = "bi_tcxo",
+-		},
+-		.num_parents = 1,
++		.parent_data = gcc_parent_data_3,
++		.num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ 		.ops = &clk_rcg2_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
+index 5abaeddd6afcc5..862a9bf73bcb5d 100644
+--- a/drivers/clk/qcom/gcc-sm8550.c
++++ b/drivers/clk/qcom/gcc-sm8550.c
+@@ -3003,7 +3003,7 @@ static struct gdsc pcie_0_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_0_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -3014,7 +3014,7 @@ static struct gdsc pcie_0_phy_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_0_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -3025,7 +3025,7 @@ static struct gdsc pcie_1_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_1_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -3036,7 +3036,7 @@ static struct gdsc pcie_1_phy_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_1_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
+index fd9d6544bdd53a..9dd5c48f33bed5 100644
+--- a/drivers/clk/qcom/gcc-sm8650.c
++++ b/drivers/clk/qcom/gcc-sm8650.c
+@@ -3437,7 +3437,7 @@ static struct gdsc pcie_0_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_0_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+ };
+ 
+@@ -3448,7 +3448,7 @@ static struct gdsc pcie_0_phy_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_0_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+ };
+ 
+@@ -3459,7 +3459,7 @@ static struct gdsc pcie_1_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_1_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+ };
+ 
+@@ -3470,7 +3470,7 @@ static struct gdsc pcie_1_phy_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_1_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+ };
+ 
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+index bbaa82978716a9..a59e420b195d77 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+@@ -436,7 +436,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
+ 					  24, 2,	/* mux */
+ 					  BIT(31),	/* gate */
+ 					  2,		/* post-div */
+-					  CLK_SET_RATE_NO_REPARENT);
++					  0);
+ 
+ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 					  0, 4,		/* M */
+@@ -444,7 +444,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 					  24, 2,	/* mux */
+ 					  BIT(31),	/* gate */
+ 					  2,		/* post-div */
+-					  CLK_SET_RATE_NO_REPARENT);
++					  0);
+ 
+ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 					  0, 4,		/* M */
+@@ -452,7 +452,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 					  24, 2,	/* mux */
+ 					  BIT(31),	/* gate */
+ 					  2,		/* post-div */
+-					  CLK_SET_RATE_NO_REPARENT);
++					  0);
+ 
+ static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
+ static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
+index 588ab1cc6d557c..f089a1b9c0c98a 100644
+--- a/drivers/cpufreq/Kconfig
++++ b/drivers/cpufreq/Kconfig
+@@ -218,7 +218,7 @@ config CPUFREQ_DT
+ 	  If in doubt, say N.
+ 
+ config CPUFREQ_DT_PLATDEV
+-	tristate "Generic DT based cpufreq platdev driver"
++	bool "Generic DT based cpufreq platdev driver"
+ 	depends on OF
+ 	help
+ 	  This adds a generic DT based cpufreq platdev driver for frequency
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index 18942bfe9c95f7..78ad3221fe077e 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -234,5 +234,3 @@ static int __init cpufreq_dt_platdev_init(void)
+ 			       sizeof(struct cpufreq_dt_platform_data)));
+ }
+ core_initcall(cpufreq_dt_platdev_init);
+-MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
+index c6bdfc308e9908..9cef7152807626 100644
+--- a/drivers/cpufreq/s3c64xx-cpufreq.c
++++ b/drivers/cpufreq/s3c64xx-cpufreq.c
+@@ -24,6 +24,7 @@ struct s3c64xx_dvfs {
+ 	unsigned int vddarm_max;
+ };
+ 
++#ifdef CONFIG_REGULATOR
+ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ 	[0] = { 1000000, 1150000 },
+ 	[1] = { 1050000, 1150000 },
+@@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ 	[3] = { 1200000, 1350000 },
+ 	[4] = { 1300000, 1350000 },
+ };
++#endif
+ 
+ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ 	{ 0, 0,  66000 },
+@@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
+ 				      unsigned int index)
+ {
+-	struct s3c64xx_dvfs *dvfs;
+-	unsigned int old_freq, new_freq;
++	unsigned int new_freq = s3c64xx_freq_table[index].frequency;
+ 	int ret;
+ 
++#ifdef CONFIG_REGULATOR
++	struct s3c64xx_dvfs *dvfs;
++	unsigned int old_freq;
++
+ 	old_freq = clk_get_rate(policy->clk) / 1000;
+-	new_freq = s3c64xx_freq_table[index].frequency;
+ 	dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
+ 
+-#ifdef CONFIG_REGULATOR
+ 	if (vddarm && new_freq > old_freq) {
+ 		ret = regulator_set_voltage(vddarm,
+ 					    dvfs->vddarm_min,
+diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
+index 7d811728f04782..97b56e92ea33f5 100644
+--- a/drivers/crypto/qce/aead.c
++++ b/drivers/crypto/qce/aead.c
+@@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
+ 	alg->init			= qce_aead_init;
+ 	alg->exit			= qce_aead_exit;
+ 
+-	alg->base.cra_priority		= 300;
++	alg->base.cra_priority		= 275;
+ 	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
+ 					  CRYPTO_ALG_ALLOCATES_MEMORY |
+ 					  CRYPTO_ALG_KERN_DRIVER_ONLY |
+diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
+index 28b5fd82382775..3ec8297ed3fff7 100644
+--- a/drivers/crypto/qce/core.c
++++ b/drivers/crypto/qce/core.c
+@@ -51,16 +51,19 @@ static void qce_unregister_algs(struct qce_device *qce)
+ static int qce_register_algs(struct qce_device *qce)
+ {
+ 	const struct qce_algo_ops *ops;
+-	int i, ret = -ENODEV;
++	int i, j, ret = -ENODEV;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+ 		ops = qce_ops[i];
+ 		ret = ops->register_algs(qce);
+-		if (ret)
+-			break;
++		if (ret) {
++			for (j = i - 1; j >= 0; j--)
++				ops->unregister_algs(qce);
++			return ret;
++		}
+ 	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int qce_handle_request(struct crypto_async_request *async_req)
+@@ -247,7 +250,7 @@ static int qce_crypto_probe(struct platform_device *pdev)
+ 
+ 	ret = qce_check_version(qce);
+ 	if (ret)
+-		goto err_clks;
++		goto err_dma;
+ 
+ 	spin_lock_init(&qce->lock);
+ 	tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
+diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
+index fc72af8aa9a725..71b748183cfa86 100644
+--- a/drivers/crypto/qce/sha.c
++++ b/drivers/crypto/qce/sha.c
+@@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
+ 
+ 	base = &alg->halg.base;
+ 	base->cra_blocksize = def->blocksize;
+-	base->cra_priority = 300;
++	base->cra_priority = 175;
+ 	base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ 	base->cra_ctxsize = sizeof(struct qce_sha_ctx);
+ 	base->cra_alignmask = 0;
+diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
+index 5b493fdc1e747f..ffb334eb5b3461 100644
+--- a/drivers/crypto/qce/skcipher.c
++++ b/drivers/crypto/qce/skcipher.c
+@@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
+ 	alg->encrypt			= qce_skcipher_encrypt;
+ 	alg->decrypt			= qce_skcipher_decrypt;
+ 
+-	alg->base.cra_priority		= 300;
++	alg->base.cra_priority		= 275;
+ 	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
+ 					  CRYPTO_ALG_ALLOCATES_MEMORY |
+ 					  CRYPTO_ALG_KERN_DRIVER_ONLY;
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index 71d8b26c4103b9..9f35f69e0f9e2b 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -106,7 +106,7 @@ config ISCSI_IBFT
+ 	select ISCSI_BOOT_SYSFS
+ 	select ISCSI_IBFT_FIND if X86
+ 	depends on ACPI && SCSI && SCSI_LOWLEVEL
+-	default	n
++	default n
+ 	help
+ 	  This option enables support for detection and exposing of iSCSI
+ 	  Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index ed4e8ddbe76a50..1141cd06011ff4 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -11,7 +11,7 @@ cflags-y			:= $(KBUILD_CFLAGS)
+ 
+ cflags-$(CONFIG_X86_32)		:= -march=i386
+ cflags-$(CONFIG_X86_64)		:= -mcmodel=small
+-cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
++cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ -std=gnu11 \
+ 				   -fPIC -fno-strict-aliasing -mno-red-zone \
+ 				   -mno-mmx -mno-sse -fshort-wchar \
+ 				   -Wno-pointer-sign \
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index a6bdedbbf70888..2e093c39b610ae 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -217,7 +217,10 @@ static DEFINE_SPINLOCK(scm_query_lock);
+ 
+ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
+ {
+-	return __scm ? __scm->mempool : NULL;
++	if (!qcom_scm_is_available())
++		return NULL;
++
++	return __scm->mempool;
+ }
+ 
+ static enum qcom_scm_convention __get_convention(void)
+@@ -1839,7 +1842,8 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
+  */
+ bool qcom_scm_is_available(void)
+ {
+-	return !!READ_ONCE(__scm);
++	/* Paired with smp_store_release() in qcom_scm_probe */
++	return !!smp_load_acquire(&__scm);
+ }
+ EXPORT_SYMBOL_GPL(qcom_scm_is_available);
+ 
+@@ -1996,7 +2000,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Let all above stores be available after this */
++	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
+ 	smp_store_release(&__scm, scm);
+ 
+ 	irq = platform_get_irq_optional(pdev, 0);
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index e49802f26e07f8..d764a3af634670 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -841,25 +841,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
+ 	DECLARE_BITMAP(trigger, MAX_LINE);
+ 	int ret;
+ 
+-	if (chip->driver_data & PCA_PCAL) {
+-		/* Read the current interrupt status from the device */
+-		ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
+-		if (ret)
+-			return false;
+-
+-		/* Check latched inputs and clear interrupt status */
+-		ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
+-		if (ret)
+-			return false;
+-
+-		/* Apply filter for rising/falling edge selection */
+-		bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
+-
+-		bitmap_and(pending, new_stat, trigger, gc->ngpio);
+-
+-		return !bitmap_empty(pending, gc->ngpio);
+-	}
+-
+ 	ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
+ 	if (ret)
+ 		return false;
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index deedacdeb23952..f83a8b5a51d0dc 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -1036,20 +1036,23 @@ gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
+ 	struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ 	struct gpio_sim_bank *bank;
+ 	struct gpio_sim_line *line;
++	struct config_item *item;
+ 
+ 	/*
+-	 * The device only needs to depend on leaf line entries. This is
++	 * The device only needs to depend on leaf entries. This is
+ 	 * sufficient to lock up all the configfs entries that the
+ 	 * instantiated, alive device depends on.
+ 	 */
+ 	list_for_each_entry(bank, &dev->bank_list, siblings) {
+ 		list_for_each_entry(line, &bank->line_list, siblings) {
++			item = line->hog ? &line->hog->item
++					 : &line->group.cg_item;
++
+ 			if (lock)
+-				WARN_ON(configfs_depend_item_unlocked(
+-						subsys, &line->group.cg_item));
++				WARN_ON(configfs_depend_item_unlocked(subsys,
++								      item));
+ 			else
+-				configfs_undepend_item_unlocked(
+-						&line->group.cg_item);
++				configfs_undepend_item_unlocked(item);
+ 		}
+ 	}
+ }
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 610e159d362ad6..7408ea8caacc3c 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -486,6 +486,10 @@ config DRM_HYPERV
+ config DRM_EXPORT_FOR_TESTS
+ 	bool
+ 
++# Separate option as not all DRM drivers use it
++config DRM_PANEL_BACKLIGHT_QUIRKS
++	tristate
++
+ config DRM_LIB_RANDOM
+ 	bool
+ 	default n
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 784229d4504dcb..84746054c721a3 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -93,6 +93,7 @@ drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
+ obj-$(CONFIG_DRM)	+= drm.o
+ 
+ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
++obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
+ 
+ #
+ # Memory-management helpers
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 81d9877c87357d..c27b4c36a7c0f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -118,9 +118,10 @@
+  * - 3.57.0 - Compute tunneling on GFX10+
+  * - 3.58.0 - Add GFX12 DCC support
+  * - 3.59.0 - Cleared VRAM
++ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
+  */
+ #define KMS_DRIVER_MAJOR	3
+-#define KMS_DRIVER_MINOR	59
++#define KMS_DRIVER_MINOR	60
+ #define KMS_DRIVER_PATCHLEVEL	0
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 156abd2ba5a6c6..05ebb8216a55a5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -1837,6 +1837,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
+ {
+ 	struct amdgpu_device *adev = ring->adev;
+ 	u32 idx;
++	bool sched_work = false;
+ 
+ 	if (!adev->gfx.enable_cleaner_shader)
+ 		return;
+@@ -1852,15 +1853,19 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
+ 	mutex_lock(&adev->enforce_isolation_mutex);
+ 	if (adev->enforce_isolation[idx]) {
+ 		if (adev->kfd.init_complete)
+-			amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
++			sched_work = true;
+ 	}
+ 	mutex_unlock(&adev->enforce_isolation_mutex);
++
++	if (sched_work)
++		amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ }
+ 
+ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
+ {
+ 	struct amdgpu_device *adev = ring->adev;
+ 	u32 idx;
++	bool sched_work = false;
+ 
+ 	if (!adev->gfx.enable_cleaner_shader)
+ 		return;
+@@ -1876,7 +1881,10 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
+ 	mutex_lock(&adev->enforce_isolation_mutex);
+ 	if (adev->enforce_isolation[idx]) {
+ 		if (adev->kfd.init_complete)
+-			amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
++			sched_work = true;
+ 	}
+ 	mutex_unlock(&adev->enforce_isolation_mutex);
++
++	if (sched_work)
++		amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index ae9ca6788df78c..425073d994912f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -309,7 +309,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ 	mutex_lock(&adev->mman.gtt_window_lock);
+ 	while (src_mm.remaining) {
+ 		uint64_t from, to, cur_size, tiling_flags;
+-		uint32_t num_type, data_format, max_com;
++		uint32_t num_type, data_format, max_com, write_compress_disable;
+ 		struct dma_fence *next;
+ 
+ 		/* Never copy more than 256MiB at once to avoid a timeout */
+@@ -340,9 +340,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ 			max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
+ 			num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
+ 			data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
++			write_compress_disable =
++				AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
+ 			copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
+ 				       AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
+-				       AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
++				       AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
++				       AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
++							     write_compress_disable));
+ 		}
+ 
+ 		r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 138d80017f3564..b7742fa74e1de2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -118,6 +118,8 @@ struct amdgpu_copy_mem {
+ #define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK		0x07
+ #define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT		8
+ #define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK		0x3f
++#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT	14
++#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK	0x1
+ 
+ #define AMDGPU_COPY_FLAGS_SET(field, value) \
+ 	(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+index 6c19626ec59e9d..ca130880edfd42 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+@@ -3981,17 +3981,6 @@ static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
+ 
+ 		if (def != data)
+ 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
+-
+-		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
+-		data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+-		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
+-
+-		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+-		if (adev->sdma.num_instances > 1) {
+-			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+-			data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+-			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+-		}
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+index c77889040760ad..4dd86c682ee6a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+@@ -953,10 +953,12 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
+ 		/* set utc l1 enable flag always to 1 */
+ 		temp = RREG32_SDMA(i, regSDMA_CNTL);
+ 		temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+-		/* enable context empty interrupt during initialization */
+-		temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
+-		WREG32_SDMA(i, regSDMA_CNTL, temp);
+ 
++		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
++			/* enable context empty interrupt during initialization */
++			temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
++			WREG32_SDMA(i, regSDMA_CNTL, temp);
++		}
+ 		if (!amdgpu_sriov_vf(adev)) {
+ 			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ 				/* unhalt engine */
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+index 9288f37a3cc5c3..843e6b46deee82 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+@@ -1688,11 +1688,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
+ 				       uint32_t byte_count,
+ 				       uint32_t copy_flags)
+ {
+-	uint32_t num_type, data_format, max_com;
++	uint32_t num_type, data_format, max_com, write_cm;
+ 
+ 	max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED);
+ 	data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT);
+ 	num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE);
++	write_cm = AMDGPU_COPY_FLAGS_GET(copy_flags, WRITE_COMPRESS_DISABLE) ? 2 : 1;
+ 
+ 	ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
+ 		SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+@@ -1709,7 +1710,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
+ 	if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
+ 		ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) |
+ 			((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
+-			((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
++			((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(write_cm) : 0) |
+ 			SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
+ 	else
+ 		ib->ptr[ib->length_dw++] = 0;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index b05be24531e187..d350c7ce35b3d6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -637,6 +637,14 @@ static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
+ 	struct kfd_node *knode;
+ 	unsigned int i;
+ 
++	/*
++	 * flush_work ensures that there are no outstanding
++	 * work-queue items that will access interrupt_ring. New work items
++	 * can't be created because we stopped interrupt handling above.
++	 */
++	flush_workqueue(kfd->ih_wq);
++	destroy_workqueue(kfd->ih_wq);
++
+ 	for (i = 0; i < num_nodes; i++) {
+ 		knode = kfd->nodes[i];
+ 		device_queue_manager_uninit(knode->dqm);
+@@ -1058,21 +1066,6 @@ static int kfd_resume(struct kfd_node *node)
+ 	return err;
+ }
+ 
+-static inline void kfd_queue_work(struct workqueue_struct *wq,
+-				  struct work_struct *work)
+-{
+-	int cpu, new_cpu;
+-
+-	cpu = new_cpu = smp_processor_id();
+-	do {
+-		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+-		if (cpu_to_node(new_cpu) == numa_node_id())
+-			break;
+-	} while (cpu != new_cpu);
+-
+-	queue_work_on(new_cpu, wq, work);
+-}
+-
+ /* This is called directly from KGD at ISR. */
+ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+ {
+@@ -1098,7 +1091,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+ 			    	patched_ihre, &is_patched)
+ 		    && enqueue_ih_ring_entry(node,
+ 			    	is_patched ? patched_ihre : ih_ring_entry)) {
+-			kfd_queue_work(node->ih_wq, &node->interrupt_work);
++			queue_work(node->kfd->ih_wq, &node->interrupt_work);
+ 			spin_unlock_irqrestore(&node->interrupt_lock, flags);
+ 			return;
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index f5b3ed20e891b3..3cfb4a38d17c7f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -2290,9 +2290,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ 	 */
+ 	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
+ 	if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
++		while (halt_if_hws_hang)
++			schedule();
+ 		if (reset_queues_on_hws_hang(dqm)) {
+-			while (halt_if_hws_hang)
+-				schedule();
+ 			dqm->is_hws_hang = true;
+ 			kfd_hws_hang(dqm);
+ 			retval = -ETIME;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+index 9b6b6e88259348..15b4b70cf19976 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+@@ -62,11 +62,14 @@ int kfd_interrupt_init(struct kfd_node *node)
+ 		return r;
+ 	}
+ 
+-	node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+-	if (unlikely(!node->ih_wq)) {
+-		kfifo_free(&node->ih_fifo);
+-		dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
+-		return -ENOMEM;
++	if (!node->kfd->ih_wq) {
++		node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND,
++						   node->kfd->num_nodes);
++		if (unlikely(!node->kfd->ih_wq)) {
++			kfifo_free(&node->ih_fifo);
++			dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
++			return -ENOMEM;
++		}
+ 	}
+ 	spin_lock_init(&node->interrupt_lock);
+ 
+@@ -96,16 +99,6 @@ void kfd_interrupt_exit(struct kfd_node *node)
+ 	spin_lock_irqsave(&node->interrupt_lock, flags);
+ 	node->interrupts_active = false;
+ 	spin_unlock_irqrestore(&node->interrupt_lock, flags);
+-
+-	/*
+-	 * flush_work ensures that there are no outstanding
+-	 * work-queue items that will access interrupt_ring. New work items
+-	 * can't be created because we stopped interrupt handling above.
+-	 */
+-	flush_workqueue(node->ih_wq);
+-
+-	destroy_workqueue(node->ih_wq);
+-
+ 	kfifo_free(&node->ih_fifo);
+ }
+ 
+@@ -162,7 +155,7 @@ static void interrupt_wq(struct work_struct *work)
+ 			/* If we spent more than a second processing signals,
+ 			 * reschedule the worker to avoid soft-lockup warnings
+ 			 */
+-			queue_work(dev->ih_wq, &dev->interrupt_work);
++			queue_work(dev->kfd->ih_wq, &dev->interrupt_work);
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 26e48fdc872896..75523f30cd38b0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -273,7 +273,6 @@ struct kfd_node {
+ 
+ 	/* Interrupts */
+ 	struct kfifo ih_fifo;
+-	struct workqueue_struct *ih_wq;
+ 	struct work_struct interrupt_work;
+ 	spinlock_t interrupt_lock;
+ 
+@@ -366,6 +365,8 @@ struct kfd_dev {
+ 	struct kfd_node *nodes[MAX_KFD_NODES];
+ 	unsigned int num_nodes;
+ 
++	struct workqueue_struct *ih_wq;
++
+ 	/* Kernel doorbells for KFD device */
+ 	struct amdgpu_bo *doorbells;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index ead4317a21680b..dbb63ce316f11e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -86,9 +86,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
+ 
+ 	if (pdd->already_dequeued)
+ 		return;
+-
++	/* The MES context flush needs to filter out the case which the
++	 * KFD process is created without setting up the MES context and
++	 * queue for creating a compute queue.
++	 */
+ 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
+-	if (dev->kfd->shared_resources.enable_mes &&
++	if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
+ 	    down_read_trylock(&dev->adev->reset_domain->sem)) {
+ 		amdgpu_mes_flush_shader_debugger(dev->adev,
+ 						 pdd->proc_ctx_gpu_addr);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 08c58d0315de7f..85e58e0f6059a6 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1036,8 +1036,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
+ 			continue;
+ 
+ 		*enabled = true;
++		mutex_lock(&connector->eld_mutex);
+ 		ret = drm_eld_size(connector->eld);
+ 		memcpy(buf, connector->eld, min(max_bytes, ret));
++		mutex_unlock(&connector->eld_mutex);
+ 
+ 		break;
+ 	}
+@@ -5497,8 +5499,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ 			    const u64 tiling_flags,
+ 			    struct dc_plane_info *plane_info,
+ 			    struct dc_plane_address *address,
+-			    bool tmz_surface,
+-			    bool force_disable_dcc)
++			    bool tmz_surface)
+ {
+ 	const struct drm_framebuffer *fb = plane_state->fb;
+ 	const struct amdgpu_framebuffer *afb =
+@@ -5597,7 +5598,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ 					   &plane_info->tiling_info,
+ 					   &plane_info->plane_size,
+ 					   &plane_info->dcc, address,
+-					   tmz_surface, force_disable_dcc);
++					   tmz_surface);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -5618,7 +5619,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ 	struct dc_scaling_info scaling_info;
+ 	struct dc_plane_info plane_info;
+ 	int ret;
+-	bool force_disable_dcc = false;
+ 
+ 	ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ 	if (ret)
+@@ -5629,13 +5629,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ 	dc_plane_state->clip_rect = scaling_info.clip_rect;
+ 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+ 
+-	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
+ 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
+ 					  afb->tiling_flags,
+ 					  &plane_info,
+ 					  &dc_plane_state->address,
+-					  afb->tmz_surface,
+-					  force_disable_dcc);
++					  afb->tmz_surface);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -9061,7 +9059,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 			afb->tiling_flags,
+ 			&bundle->plane_infos[planes_count],
+ 			&bundle->flip_addrs[planes_count].address,
+-			afb->tmz_surface, false);
++			afb->tmz_surface);
+ 
+ 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
+ 				 new_plane_state->plane->index,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 754dbc544f03a3..5bdf44c692180c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1691,16 +1691,16 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ 	return ret;
+ }
+ 
+-static unsigned int kbps_from_pbn(unsigned int pbn)
++static uint32_t kbps_from_pbn(unsigned int pbn)
+ {
+-	unsigned int kbps = pbn;
++	uint64_t kbps = (uint64_t)pbn;
+ 
+ 	kbps *= (1000000 / PEAK_FACTOR_X1000);
+ 	kbps *= 8;
+ 	kbps *= 54;
+ 	kbps /= 64;
+ 
+-	return kbps;
++	return (uint32_t)kbps;
+ }
+ 
+ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index 495e3cd70426db..83c7c8853edeca 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -309,8 +309,7 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
+ 								     const struct plane_size *plane_size,
+ 								     union dc_tiling_info *tiling_info,
+ 								     struct dc_plane_dcc_param *dcc,
+-								     struct dc_plane_address *address,
+-								     const bool force_disable_dcc)
++								     struct dc_plane_address *address)
+ {
+ 	const uint64_t modifier = afb->base.modifier;
+ 	int ret = 0;
+@@ -318,7 +317,7 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
+ 	amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
+ 	tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ 
+-	if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
++	if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
+ 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
+ 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+ 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
+@@ -360,8 +359,7 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
+ 								      const struct plane_size *plane_size,
+ 								      union dc_tiling_info *tiling_info,
+ 								      struct dc_plane_dcc_param *dcc,
+-								      struct dc_plane_address *address,
+-								      const bool force_disable_dcc)
++								      struct dc_plane_address *address)
+ {
+ 	const uint64_t modifier = afb->base.modifier;
+ 	int ret = 0;
+@@ -371,7 +369,7 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
+ 
+ 	tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ 
+-	if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
++	if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
+ 		int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
+ 
+ 		dcc->enable = 1;
+@@ -839,8 +837,7 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ 			     struct plane_size *plane_size,
+ 			     struct dc_plane_dcc_param *dcc,
+ 			     struct dc_plane_address *address,
+-			     bool tmz_surface,
+-			     bool force_disable_dcc)
++			     bool tmz_surface)
+ {
+ 	const struct drm_framebuffer *fb = &afb->base;
+ 	int ret;
+@@ -900,16 +897,14 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ 		ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
+ 										 rotation, plane_size,
+ 										 tiling_info, dcc,
+-										 address,
+-										 force_disable_dcc);
++										 address);
+ 		if (ret)
+ 			return ret;
+ 	} else if (adev->family >= AMDGPU_FAMILY_AI) {
+ 		ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
+ 										rotation, plane_size,
+ 										tiling_info, dcc,
+-										address,
+-										force_disable_dcc);
++										address);
+ 		if (ret)
+ 			return ret;
+ 	} else {
+@@ -1000,14 +995,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ 		struct dc_plane_state *plane_state =
+ 			dm_plane_state_new->dc_state;
+-		bool force_disable_dcc = !plane_state->dcc.enable;
+ 
+ 		amdgpu_dm_plane_fill_plane_buffer_attributes(
+ 			adev, afb, plane_state->format, plane_state->rotation,
+ 			afb->tiling_flags,
+ 			&plane_state->tiling_info, &plane_state->plane_size,
+ 			&plane_state->dcc, &plane_state->address,
+-			afb->tmz_surface, force_disable_dcc);
++			afb->tmz_surface);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+index 6498359bff6f68..2eef13b1c05a4b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+@@ -51,8 +51,7 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ 				 struct plane_size *plane_size,
+ 				 struct dc_plane_dcc_param *dcc,
+ 				 struct dc_plane_address *address,
+-				 bool tmz_surface,
+-				 bool force_disable_dcc);
++				 bool tmz_surface);
+ 
+ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ 			 struct drm_plane *plane,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 6d4ee8fe615c38..216b525bd75e79 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2032,7 +2032,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ 
+ 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+ 
+-	if (context->stream_count > get_seamless_boot_stream_count(context) ||
++	if (get_seamless_boot_stream_count(context) == 0 ||
+ 		context->stream_count == 0) {
+ 		/* Must wait for no flips to be pending before doing optimize bw */
+ 		hwss_wait_for_no_pipes_pending(dc, context);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+index 5bb8b78bf250a0..bf636b28e3e16e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+@@ -63,8 +63,7 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+ 
+ bool should_use_dmub_lock(struct dc_link *link)
+ {
+-	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+-	    link->psr_settings.psr_version == DC_PSR_VERSION_1)
++	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ 		return true;
+ 
+ 	if (link->replay_settings.replay_feature_enabled)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+index c4378e620cbf91..986a69c5bd4bca 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+@@ -29,7 +29,11 @@ dml2_rcflags := $(CC_FLAGS_NO_FPU)
+ 
+ ifneq ($(CONFIG_FRAME_WARN),0)
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
++ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
++frame_warn_flag := -Wframe-larger-than=4096
++else
+ frame_warn_flag := -Wframe-larger-than=3072
++endif
+ else
+ frame_warn_flag := -Wframe-larger-than=2048
+ endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+index 8dabb1ac0b684d..6822b07951204b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+@@ -6301,9 +6301,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 			mode_lib->ms.meta_row_bandwidth_this_state,
+ 			mode_lib->ms.dpte_row_bandwidth_this_state,
+ 			mode_lib->ms.NoOfDPPThisState,
+-			mode_lib->ms.UrgentBurstFactorLuma,
+-			mode_lib->ms.UrgentBurstFactorChroma,
+-			mode_lib->ms.UrgentBurstFactorCursor);
++			mode_lib->ms.UrgentBurstFactorLuma[j],
++			mode_lib->ms.UrgentBurstFactorChroma[j],
++			mode_lib->ms.UrgentBurstFactorCursor[j]);
+ 
+ 		s->VMDataOnlyReturnBWPerState = dml_get_return_bw_mbps_vm_only(
+ 																	&mode_lib->ms.soc,
+@@ -6434,7 +6434,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 							/* Output */
+ 							&mode_lib->ms.UrgentBurstFactorCursorPre[k],
+ 							&mode_lib->ms.UrgentBurstFactorLumaPre[k],
+-							&mode_lib->ms.UrgentBurstFactorChroma[k],
++							&mode_lib->ms.UrgentBurstFactorChromaPre[k],
+ 							&mode_lib->ms.NotUrgentLatencyHidingPre[k]);
+ 
+ 					mode_lib->ms.cursor_bw_pre[k] = mode_lib->ms.cache_display_cfg.plane.NumberOfCursors[k] * mode_lib->ms.cache_display_cfg.plane.CursorWidth[k] *
+@@ -6458,9 +6458,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 				mode_lib->ms.cursor_bw_pre,
+ 				mode_lib->ms.prefetch_vmrow_bw,
+ 				mode_lib->ms.NoOfDPPThisState,
+-				mode_lib->ms.UrgentBurstFactorLuma,
+-				mode_lib->ms.UrgentBurstFactorChroma,
+-				mode_lib->ms.UrgentBurstFactorCursor,
++				mode_lib->ms.UrgentBurstFactorLuma[j],
++				mode_lib->ms.UrgentBurstFactorChroma[j],
++				mode_lib->ms.UrgentBurstFactorCursor[j],
+ 				mode_lib->ms.UrgentBurstFactorLumaPre,
+ 				mode_lib->ms.UrgentBurstFactorChromaPre,
+ 				mode_lib->ms.UrgentBurstFactorCursorPre,
+@@ -6517,9 +6517,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 						mode_lib->ms.cursor_bw,
+ 						mode_lib->ms.cursor_bw_pre,
+ 						mode_lib->ms.NoOfDPPThisState,
+-						mode_lib->ms.UrgentBurstFactorLuma,
+-						mode_lib->ms.UrgentBurstFactorChroma,
+-						mode_lib->ms.UrgentBurstFactorCursor,
++						mode_lib->ms.UrgentBurstFactorLuma[j],
++						mode_lib->ms.UrgentBurstFactorChroma[j],
++						mode_lib->ms.UrgentBurstFactorCursor[j],
+ 						mode_lib->ms.UrgentBurstFactorLumaPre,
+ 						mode_lib->ms.UrgentBurstFactorChromaPre,
+ 						mode_lib->ms.UrgentBurstFactorCursorPre);
+@@ -6586,9 +6586,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 													mode_lib->ms.cursor_bw_pre,
+ 													mode_lib->ms.prefetch_vmrow_bw,
+ 													mode_lib->ms.NoOfDPP[j], // VBA_ERROR DPPPerSurface is not assigned at this point, should use NoOfDpp here
+-													mode_lib->ms.UrgentBurstFactorLuma,
+-													mode_lib->ms.UrgentBurstFactorChroma,
+-													mode_lib->ms.UrgentBurstFactorCursor,
++													mode_lib->ms.UrgentBurstFactorLuma[j],
++													mode_lib->ms.UrgentBurstFactorChroma[j],
++													mode_lib->ms.UrgentBurstFactorCursor[j],
+ 													mode_lib->ms.UrgentBurstFactorLumaPre,
+ 													mode_lib->ms.UrgentBurstFactorChromaPre,
+ 													mode_lib->ms.UrgentBurstFactorCursorPre,
+@@ -7809,9 +7809,9 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
+ 				mode_lib->ms.DETBufferSizeYThisState[k],
+ 				mode_lib->ms.DETBufferSizeCThisState[k],
+ 				/* Output */
+-				&mode_lib->ms.UrgentBurstFactorCursor[k],
+-				&mode_lib->ms.UrgentBurstFactorLuma[k],
+-				&mode_lib->ms.UrgentBurstFactorChroma[k],
++				&mode_lib->ms.UrgentBurstFactorCursor[j][k],
++				&mode_lib->ms.UrgentBurstFactorLuma[j][k],
++				&mode_lib->ms.UrgentBurstFactorChroma[j][k],
+ 				&mode_lib->ms.NotUrgentLatencyHiding[k]);
+ 		}
+ 
+@@ -9190,6 +9190,8 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ 			&locals->FractionOfUrgentBandwidth,
+ 			&s->dummy_boolean[0]); // dml_bool_t *PrefetchBandwidthSupport
+ 
++
++
+ 		if (s->VRatioPrefetchMoreThanMax != false || s->DestinationLineTimesForPrefetchLessThan2 != false) {
+ 			dml_print("DML::%s: VRatioPrefetchMoreThanMax                   = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ 			dml_print("DML::%s: DestinationLineTimesForPrefetchLessThan2    = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
+@@ -9204,6 +9206,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ 			}
+ 		}
+ 
++
+ 		if (locals->PrefetchModeSupported == true && mode_lib->ms.support.ImmediateFlipSupport == true) {
+ 			locals->BandwidthAvailableForImmediateFlip = CalculateBandwidthAvailableForImmediateFlip(
+ 																	mode_lib->ms.num_active_planes,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+index f951936bb579e6..504c427b3b3191 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+@@ -884,11 +884,11 @@ struct mode_support_st {
+ 	dml_uint_t meta_row_height[__DML_NUM_PLANES__];
+ 	dml_uint_t meta_row_height_chroma[__DML_NUM_PLANES__];
+ 	dml_float_t UrgLatency;
+-	dml_float_t UrgentBurstFactorCursor[__DML_NUM_PLANES__];
++	dml_float_t UrgentBurstFactorCursor[2][__DML_NUM_PLANES__];
+ 	dml_float_t UrgentBurstFactorCursorPre[__DML_NUM_PLANES__];
+-	dml_float_t UrgentBurstFactorLuma[__DML_NUM_PLANES__];
++	dml_float_t UrgentBurstFactorLuma[2][__DML_NUM_PLANES__];
+ 	dml_float_t UrgentBurstFactorLumaPre[__DML_NUM_PLANES__];
+-	dml_float_t UrgentBurstFactorChroma[__DML_NUM_PLANES__];
++	dml_float_t UrgentBurstFactorChroma[2][__DML_NUM_PLANES__];
+ 	dml_float_t UrgentBurstFactorChromaPre[__DML_NUM_PLANES__];
+ 	dml_float_t MaximumSwathWidthInLineBufferLuma;
+ 	dml_float_t MaximumSwathWidthInLineBufferChroma;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+index 866b0abcff1bad..4d64c45930da49 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+@@ -533,14 +533,21 @@ static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct d
+ static bool call_dml_mode_support_and_programming(struct dc_state *context)
+ {
+ 	unsigned int result = 0;
+-	unsigned int min_state;
++	unsigned int min_state = 0;
+ 	int min_state_for_g6_temp_read = 0;
++
++
++	if (!context)
++		return false;
++
+ 	struct dml2_context *dml2 = context->bw_ctx.dml2;
+ 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
+ 
+-	min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
++	if (!context->streams[0]->sink->link->dc->caps.is_apu) {
++		min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ 
+-	ASSERT(min_state_for_g6_temp_read >= 0);
++		ASSERT(min_state_for_g6_temp_read >= 0);
++	}
+ 
+ 	if (!dml2->config.use_native_pstate_optimization) {
+ 		result = optimize_pstate_with_svp_and_drr(dml2, context);
+@@ -551,14 +558,20 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
+ 	/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
+ 	 * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
+ 	 */
+-	if (min_state_for_g6_temp_read >= 0)
+-		min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
+-	else
+-		min_state = s->mode_support_params.out_lowest_state_idx;
+-
+-	if (result)
+-		result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
++	if (!context->streams[0]->sink->link->dc->caps.is_apu) {
++		if (min_state_for_g6_temp_read >= 0)
++			min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
++		else
++			min_state = s->mode_support_params.out_lowest_state_idx;
++	}
+ 
++	if (result) {
++		if (!context->streams[0]->sink->link->dc->caps.is_apu) {
++			result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
++		} else {
++			result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
++		}
++	}
+ 	return result;
+ }
+ 
+@@ -687,6 +700,8 @@ static bool dml2_validate_only(struct dc_state *context)
+ 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
+ 
+ 	map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
++	 if (!dml2->config.skip_hw_state_mapping)
++		 dml2_apply_det_buffer_allocation_policy(dml2, &dml2->v20.scratch.cur_display_config);
+ 
+ 	result = pack_and_call_dml_mode_support_ex(dml2,
+ 		&dml2->v20.scratch.cur_display_config,
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+index 961d8936150ab7..75fb77bca83ba2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+@@ -483,10 +483,11 @@ void dpp1_set_cursor_position(
+ 	if (src_y_offset + cursor_height <= 0)
+ 		cur_en = 0;  /* not visible beyond top edge*/
+ 
+-	REG_UPDATE(CURSOR0_CONTROL,
+-			CUR0_ENABLE, cur_en);
++	if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
++		REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ 
+-	dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
++		dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
++	}
+ }
+ 
+ void dpp1_cnv_set_optional_cursor_attributes(
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+index 3b6ca7974e188d..1236e0f9a2560c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+@@ -154,9 +154,11 @@ void dpp401_set_cursor_position(
+ 	struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+ 	uint32_t cur_en = pos->enable ? 1 : 0;
+ 
+-	REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
++	if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
++		REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ 
+-	dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
++		dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
++	}
+ }
+ 
+ void dpp401_set_optional_cursor_attributes(
+diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+index fe741100c0f880..d347bb06577ac6 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+@@ -129,7 +129,8 @@ bool hubbub3_program_watermarks(
+ 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
+ 
+-	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
++	if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
++		hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ 
+ 	return wm_pending;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+index 7fb5523f972244..b98505b240a797 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+@@ -750,7 +750,8 @@ static bool hubbub31_program_watermarks(
+ 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
+ 
+-	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
++	if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
++		hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ 	return wm_pending;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+index 5264dc26cce1fa..32a6be543105c1 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+@@ -786,7 +786,8 @@ static bool hubbub32_program_watermarks(
+ 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
+ 
+-	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
++	if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
++		hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ 
+ 	hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+index 5eb3da8d5206e9..dce7269959ce74 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+@@ -326,7 +326,8 @@ static bool hubbub35_program_watermarks(
+ 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/
+ 	REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
+ 
+-	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
++	if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
++		hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ 
+ 	hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+index b405fa22f87a9e..c74ee2d50a699a 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+@@ -1044,11 +1044,13 @@ void hubp2_cursor_set_position(
+ 	if (src_y_offset + cursor_height <= 0)
+ 		cur_en = 0;  /* not visible beyond top edge*/
+ 
+-	if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+-		hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
++	if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
++		if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
++			hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ 
+-	REG_UPDATE(CURSOR_CONTROL,
++		REG_UPDATE(CURSOR_CONTROL,
+ 			CURSOR_ENABLE, cur_en);
++	}
+ 
+ 	REG_SET_2(CURSOR_POSITION, 0,
+ 			CURSOR_X_POSITION, pos->x,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+index c55b1b8be8ffd6..5cf7e6771cb49e 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+@@ -484,6 +484,8 @@ void hubp3_init(struct hubp *hubp)
+ 	//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ 	REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+ 
++	REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
++
+ 	hubp_reset(hubp);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+index 45023fa9b708dc..c4f41350d1b3ce 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+@@ -168,6 +168,8 @@ void hubp32_init(struct hubp *hubp)
+ {
+ 	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ 	REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
++
++	REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
+ }
+ static struct hubp_funcs dcn32_hubp_funcs = {
+ 	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+index 2d52100510f05f..7013c124efcff8 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+@@ -718,11 +718,13 @@ void hubp401_cursor_set_position(
+ 			dc_fixpt_from_int(dst_x_offset),
+ 			param->h_scale_ratio));
+ 
+-	if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+-		hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
++	if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
++		if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
++			hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ 
+-	REG_UPDATE(CURSOR_CONTROL,
+-		CURSOR_ENABLE, cur_en);
++		REG_UPDATE(CURSOR_CONTROL,
++			CURSOR_ENABLE, cur_en);
++	}
+ 
+ 	REG_SET_2(CURSOR_POSITION, 0,
+ 		CURSOR_X_POSITION, x_pos,
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index f6b17bd3f714fa..38755ca771401b 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -236,7 +236,8 @@ void dcn35_init_hw(struct dc *dc)
+ 		}
+ 
+ 		hws->funcs.init_pipes(dc, dc->current_state);
+-		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
++		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
++			!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
+ 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+index 7d04739c3ba146..4bbbe07ecde7d0 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+@@ -671,9 +671,9 @@ static const struct dc_plane_cap plane_cap = {
+ 
+ 	/* 6:1 downscaling ratio: 1000/6 = 166.666 */
+ 	.max_downscale_factor = {
+-			.argb8888 = 167,
+-			.nv12 = 167,
+-			.fp16 = 167 
++			.argb8888 = 358,
++			.nv12 = 358,
++			.fp16 = 358
+ 	},
+ 	64,
+ 	64
+@@ -694,7 +694,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.disable_dcc = DCC_ENABLE,
+ 	.vsr_support = true,
+ 	.performance_trace = false,
+-	.max_downscale_src_width = 7680,/*upto 8K*/
++	.max_downscale_src_width = 4096,/*upto true 4k*/
+ 	.scl_reset_length10 = true,
+ 	.sanity_checks = false,
+ 	.underflow_assert_delay_us = 0xFFFFFFFF,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+index 2c35eb31475ab8..5a1f24438e472a 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+@@ -1731,7 +1731,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
+ 
+ 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+ 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+-	gpu_metrics->average_mm_activity = 0;
+ 
+ 	/* Valid power data is available only from primary die */
+ 	if (aldebaran_is_primary(smu)) {
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+index ebccb74306a765..f30b3d5eeca5c5 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+@@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
+ 	formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
+ 					       kwb_conn->wb_layer->layer_type,
+ 					       &n_formats);
++	if (!formats) {
++		kfree(kwb_conn);
++		return -ENOMEM;
++	}
+ 
+ 	err = drm_writeback_connector_init(&kms->base, wb_conn,
+ 					   &komeda_wb_connector_funcs,
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index a2675b121fe44b..c036bbc92ba96e 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -2002,8 +2002,10 @@ static int anx7625_audio_get_eld(struct device *dev, void *data,
+ 		memset(buf, 0, len);
+ 	} else {
+ 		dev_dbg(dev, "audio copy eld\n");
++		mutex_lock(&ctx->connector->eld_mutex);
+ 		memcpy(buf, ctx->connector->eld,
+ 		       min(sizeof(ctx->connector->eld), len));
++		mutex_unlock(&ctx->connector->eld_mutex);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index cf891e7677c0e2..faee8e2e82a053 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -296,7 +296,7 @@
+ #define MAX_LANE_COUNT 4
+ #define MAX_LINK_RATE HBR
+ #define AUTO_TRAIN_RETRY 3
+-#define MAX_HDCP_DOWN_STREAM_COUNT 10
++#define MAX_HDCP_DOWN_STREAM_COUNT 127
+ #define MAX_CR_LEVEL 0x03
+ #define MAX_EQ_LEVEL 0x03
+ #define AUX_WAIT_TIMEOUT_MS 15
+@@ -2023,7 +2023,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
+ {
+ 	struct device *dev = it6505->dev;
+ 	u8 av[5][4], bv[5][4];
+-	int i, err;
++	int i, err, retry;
+ 
+ 	i = it6505_setup_sha1_input(it6505, it6505->sha1_input);
+ 	if (i <= 0) {
+@@ -2032,22 +2032,28 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
+ 	}
+ 
+ 	it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
++	/*1B-05 V' must retry 3 times */
++	for (retry = 0; retry < 3; retry++) {
++		err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
++				      sizeof(bv));
+ 
+-	err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
+-			      sizeof(bv));
++		if (err < 0) {
++			dev_err(dev, "Read V' value Fail %d", retry);
++			continue;
++		}
+ 
+-	if (err < 0) {
+-		dev_err(dev, "Read V' value Fail");
+-		return false;
+-	}
++		for (i = 0; i < 5; i++) {
++			if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
++			    av[i][1] != av[i][2] || bv[i][0] != av[i][3])
++				break;
+ 
+-	for (i = 0; i < 5; i++)
+-		if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
+-		    bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
+-			return false;
++			DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
++			return true;
++		}
++	}
+ 
+-	DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!");
+-	return true;
++	DRM_DEV_DEBUG_DRIVER(dev, "V' NOT match!! %d", retry);
++	return false;
+ }
+ 
+ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
+@@ -2055,12 +2061,13 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
+ 	struct it6505 *it6505 = container_of(work, struct it6505,
+ 					     hdcp_wait_ksv_list);
+ 	struct device *dev = it6505->dev;
+-	unsigned int timeout = 5000;
+-	u8 bstatus = 0;
++	u8 bstatus;
+ 	bool ksv_list_check;
++	/* 1B-04 wait ksv list for 5s */
++	unsigned long timeout = jiffies +
++				msecs_to_jiffies(5000) + 1;
+ 
+-	timeout /= 20;
+-	while (timeout > 0) {
++	for (;;) {
+ 		if (!it6505_get_sink_hpd_status(it6505))
+ 			return;
+ 
+@@ -2069,27 +2076,23 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
+ 		if (bstatus & DP_BSTATUS_READY)
+ 			break;
+ 
+-		msleep(20);
+-		timeout--;
+-	}
++		if (time_after(jiffies, timeout)) {
++			DRM_DEV_DEBUG_DRIVER(dev, "KSV list wait timeout");
++			goto timeout;
++		}
+ 
+-	if (timeout == 0) {
+-		DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed");
+-		goto timeout;
++		msleep(20);
+ 	}
+ 
+ 	ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505);
+ 	DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s",
+ 			     ksv_list_check ? "pass" : "fail");
+-	if (ksv_list_check) {
+-		it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+-				HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
++
++	if (ksv_list_check)
+ 		return;
+-	}
++
+ timeout:
+-	it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+-			HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL,
+-			HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL);
++	it6505_start_hdcp(it6505);
+ }
+ 
+ static void it6505_hdcp_work(struct work_struct *work)
+@@ -2312,14 +2315,20 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
+ 	DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector);
+ 
+ 	if (dp_irq_vector & DP_CP_IRQ) {
+-		it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
+-				HDCP_TRIGGER_CPIRQ);
+-
+ 		bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS);
+ 		if (bstatus < 0)
+ 			return bstatus;
+ 
+ 		DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus);
++
++		/*Check BSTATUS when recive CP_IRQ */
++		if (bstatus & DP_BSTATUS_R0_PRIME_READY &&
++		    it6505->hdcp_status == HDCP_AUTH_GOING)
++			it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
++					HDCP_TRIGGER_CPIRQ);
++		else if (bstatus & (DP_BSTATUS_REAUTH_REQ | DP_BSTATUS_LINK_FAILURE) &&
++			 it6505->hdcp_status == HDCP_AUTH_DONE)
++			it6505_start_hdcp(it6505);
+ 	}
+ 
+ 	ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
+@@ -2456,7 +2465,11 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
+ {
+ 	struct device *dev = it6505->dev;
+ 
+-	DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt");
++	DRM_DEV_DEBUG_DRIVER(dev, "HDCP repeater R0 event Interrupt");
++	/* 1B01 HDCP encription should start when R0 is ready*/
++	it6505_set_bits(it6505, REG_HDCP_TRIGGER,
++			HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
++
+ 	schedule_work(&it6505->hdcp_wait_ksv_list);
+ }
+ 
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index 925e42f46cd87f..0f8d3ab30daa68 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -1452,8 +1452,10 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
+ 		dev_dbg(dev, "No connector present, passing empty EDID data");
+ 		memset(buf, 0, len);
+ 	} else {
++		mutex_lock(&ctx->connector->eld_mutex);
+ 		memcpy(buf, ctx->connector->eld,
+ 		       min(sizeof(ctx->connector->eld), len));
++		mutex_unlock(&ctx->connector->eld_mutex);
+ 	}
+ 	mutex_unlock(&ctx->lock);
+ 
+diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
+index 007ceb281d00da..56a4965e518cc2 100644
+--- a/drivers/gpu/drm/display/drm_dp_cec.c
++++ b/drivers/gpu/drm/display/drm_dp_cec.c
+@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
+ 	if (!aux->transfer)
+ 		return;
+ 
+-#ifndef CONFIG_MEDIA_CEC_RC
+-	/*
+-	 * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
+-	 * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
+-	 *
+-	 * Do this here as well to ensure the tests against cec_caps are
+-	 * correct.
+-	 */
+-	cec_caps &= ~CEC_CAP_RC;
+-#endif
+ 	cancel_delayed_work_sync(&aux->cec.unregister_work);
+ 
+ 	mutex_lock(&aux->cec.lock);
+@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
+ 		num_las = CEC_MAX_LOG_ADDRS;
+ 
+ 	if (aux->cec.adap) {
+-		if (aux->cec.adap->capabilities == cec_caps &&
++		/* Check if the adapter properties have changed */
++		if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
++		    (cec_caps & CEC_CAP_MONITOR_ALL) &&
+ 		    aux->cec.adap->available_log_addrs == num_las) {
+ 			/* Unchanged, so just set the phys addr */
+ 			cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index cee5eafbfb81a8..fd620f7db0dd27 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -741,6 +741,15 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
+ 	if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ 		goto retry;
+ 
++	for (i = 0; i < count; i++) {
++		struct drm_connector *connector = connectors[i];
++
++		if (connector->has_tile)
++			drm_client_get_tile_offsets(dev, connectors, connector_count,
++						    modes, offsets, i,
++						    connector->tile_h_loc, connector->tile_v_loc);
++	}
++
+ 	/*
+ 	 * If the BIOS didn't enable everything it could, fall back to have the
+ 	 * same user experiencing of lighting up as much as possible like the
+diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
+index ca7f43c8d6f1b3..0e6021235a9304 100644
+--- a/drivers/gpu/drm/drm_connector.c
++++ b/drivers/gpu/drm/drm_connector.c
+@@ -277,6 +277,7 @@ static int __drm_connector_init(struct drm_device *dev,
+ 	INIT_LIST_HEAD(&connector->probed_modes);
+ 	INIT_LIST_HEAD(&connector->modes);
+ 	mutex_init(&connector->mutex);
++	mutex_init(&connector->eld_mutex);
+ 	mutex_init(&connector->edid_override_mutex);
+ 	mutex_init(&connector->hdmi.infoframes.lock);
+ 	connector->edid_blob_ptr = NULL;
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 855beafb76ffbe..13bc4c290b17d5 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -5605,7 +5605,9 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name);
+ 
+ static void clear_eld(struct drm_connector *connector)
+ {
++	mutex_lock(&connector->eld_mutex);
+ 	memset(connector->eld, 0, sizeof(connector->eld));
++	mutex_unlock(&connector->eld_mutex);
+ 
+ 	connector->latency_present[0] = false;
+ 	connector->latency_present[1] = false;
+@@ -5657,6 +5659,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
+ 	if (!drm_edid)
+ 		return;
+ 
++	mutex_lock(&connector->eld_mutex);
++
+ 	mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
+ 	drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
+ 		    connector->base.id, connector->name,
+@@ -5717,6 +5721,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
+ 	drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
+ 		    connector->base.id, connector->name,
+ 		    drm_eld_size(eld), total_sad_count);
++
++	mutex_unlock(&connector->eld_mutex);
+ }
+ 
+ static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 29c53f9f449ca8..eaac2e5726e750 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1348,14 +1348,14 @@ int drm_fb_helper_set_par(struct fb_info *info)
+ }
+ EXPORT_SYMBOL(drm_fb_helper_set_par);
+ 
+-static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
++static void pan_set(struct drm_fb_helper *fb_helper, int dx, int dy)
+ {
+ 	struct drm_mode_set *mode_set;
+ 
+ 	mutex_lock(&fb_helper->client.modeset_mutex);
+ 	drm_client_for_each_modeset(mode_set, &fb_helper->client) {
+-		mode_set->x = x;
+-		mode_set->y = y;
++		mode_set->x += dx;
++		mode_set->y += dy;
+ 	}
+ 	mutex_unlock(&fb_helper->client.modeset_mutex);
+ }
+@@ -1364,16 +1364,18 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
+ 			      struct fb_info *info)
+ {
+ 	struct drm_fb_helper *fb_helper = info->par;
+-	int ret;
++	int ret, dx, dy;
+ 
+-	pan_set(fb_helper, var->xoffset, var->yoffset);
++	dx = var->xoffset - info->var.xoffset;
++	dy = var->yoffset - info->var.yoffset;
++	pan_set(fb_helper, dx, dy);
+ 
+ 	ret = drm_client_modeset_commit_locked(&fb_helper->client);
+ 	if (!ret) {
+ 		info->var.xoffset = var->xoffset;
+ 		info->var.yoffset = var->yoffset;
+ 	} else
+-		pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
++		pan_set(fb_helper, -dx, -dy);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
+new file mode 100644
+index 00000000000000..c477d98ade2b41
+--- /dev/null
++++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
+@@ -0,0 +1,94 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/array_size.h>
++#include <linux/dmi.h>
++#include <linux/mod_devicetable.h>
++#include <linux/module.h>
++#include <drm/drm_edid.h>
++#include <drm/drm_utils.h>
++
++struct drm_panel_min_backlight_quirk {
++	struct {
++		enum dmi_field field;
++		const char * const value;
++	} dmi_match;
++	struct drm_edid_ident ident;
++	u8 min_brightness;
++};
++
++static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
++	/* 13 inch matte panel */
++	{
++		.dmi_match.field = DMI_BOARD_VENDOR,
++		.dmi_match.value = "Framework",
++		.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
++		.ident.name = "NE135FBM-N41",
++		.min_brightness = 0,
++	},
++	/* 13 inch glossy panel */
++	{
++		.dmi_match.field = DMI_BOARD_VENDOR,
++		.dmi_match.value = "Framework",
++		.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
++		.ident.name = "NE135FBM-N41",
++		.min_brightness = 0,
++	},
++	/* 13 inch 2.8k panel */
++	{
++		.dmi_match.field = DMI_BOARD_VENDOR,
++		.dmi_match.value = "Framework",
++		.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
++		.ident.name = "NE135A1M-NY1",
++		.min_brightness = 0,
++	},
++};
++
++static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
++						  const struct drm_edid *edid)
++{
++	if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
++		return false;
++
++	if (!drm_edid_match(edid, &quirk->ident))
++		return false;
++
++	return true;
++}
++
++/**
++ * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
++ * @edid: EDID of the panel to check
++ *
++ * This function checks for platform specific (e.g. DMI based) quirks
++ * providing info on the minimum backlight brightness for systems where this
++ * cannot be probed correctly from the hard-/firm-ware.
++ *
++ * Returns:
++ * A negative error value or
++ * an override value in the range [0, 255] representing 0-100% to be scaled to
++ * the drivers target range.
++ */
++int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
++{
++	const struct drm_panel_min_backlight_quirk *quirk;
++	size_t i;
++
++	if (!IS_ENABLED(CONFIG_DMI))
++		return -ENODATA;
++
++	if (!edid)
++		return -EINVAL;
++
++	for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
++		quirk = &drm_panel_min_backlight_quirks[i];
++
++		if (drm_panel_min_backlight_quirk_matches(quirk, edid))
++			return quirk->min_brightness;
++	}
++
++	return -ENODATA;
++}
++EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
++
++MODULE_DESCRIPTION("Quirks for panel backlight overrides");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index 1e26cd4f834798..52059cfff4f0b3 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -1643,7 +1643,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
+ 	struct hdmi_context *hdata = dev_get_drvdata(dev);
+ 	struct drm_connector *connector = &hdata->connector;
+ 
++	mutex_lock(&connector->eld_mutex);
+ 	memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
++	mutex_unlock(&connector->eld_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 90fa73575feb13..45cca965c11b48 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2022,11 +2022,10 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
+ 	/* Compressed BPP should be less than the Input DSC bpp */
+ 	dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
+ 
+-	for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
+-		if (valid_dsc_bpp[i] < dsc_min_bpp)
++	for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
++		if (valid_dsc_bpp[i] < dsc_min_bpp ||
++		    valid_dsc_bpp[i] > dsc_max_bpp)
+ 			continue;
+-		if (valid_dsc_bpp[i] > dsc_max_bpp)
+-			break;
+ 
+ 		ret = dsc_compute_link_config(intel_dp,
+ 					      pipe_config,
+@@ -2738,7 +2737,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
+ 
+ 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
+ 
+-	/* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
+ 	as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
+ 	as_sdp->length = 0x9;
+ 	as_sdp->duration_incr_ms = 0;
+@@ -2750,7 +2748,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
+ 		as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
+ 		as_sdp->target_rr_divider = true;
+ 	} else {
+-		as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
++		as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
+ 		as_sdp->vtotal = adjusted_mode->vtotal;
+ 		as_sdp->target_rr = 0;
+ 	}
+diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+index c8720d31d1013d..62a5287ea1d9c4 100644
+--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
++++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+@@ -105,8 +105,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
+ 	DRM_FORMAT_Y216,
+ 	DRM_FORMAT_XYUV8888,
+ 	DRM_FORMAT_XVYU2101010,
+-	DRM_FORMAT_XVYU12_16161616,
+-	DRM_FORMAT_XVYU16161616,
+ };
+ 
+ static const u32 icl_sdr_uv_plane_formats[] = {
+@@ -133,8 +131,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
+ 	DRM_FORMAT_Y216,
+ 	DRM_FORMAT_XYUV8888,
+ 	DRM_FORMAT_XVYU2101010,
+-	DRM_FORMAT_XVYU12_16161616,
+-	DRM_FORMAT_XVYU16161616,
+ };
+ 
+ static const u32 icl_hdr_plane_formats[] = {
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+index fe69f2c8527d79..ae3343c81a6455 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+@@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
+ 	struct address_space *mapping = obj->base.filp->f_mapping;
+ 	unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
+ 	struct sg_table *st;
+-	struct sgt_iter sgt_iter;
+-	struct page *page;
+ 	int ret;
+ 
+ 	/*
+@@ -239,9 +237,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
+ 		 * for PAGE_SIZE chunks instead may be helpful.
+ 		 */
+ 		if (max_segment > PAGE_SIZE) {
+-			for_each_sgt_page(page, sgt_iter, st)
+-				put_page(page);
+-			sg_free_table(st);
++			shmem_sg_free_table(st, mapping, false, false);
+ 			kfree(st);
+ 
+ 			max_segment = PAGE_SIZE;
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index ee12ee0ed41871..b0e94c95940f67 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -5511,12 +5511,20 @@ static inline void guc_log_context(struct drm_printer *p,
+ {
+ 	drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
+ 	drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
+-	drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+-		   ce->ring->head,
+-		   ce->lrc_reg_state[CTX_RING_HEAD]);
+-	drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+-		   ce->ring->tail,
+-		   ce->lrc_reg_state[CTX_RING_TAIL]);
++	if (intel_context_pin_if_active(ce)) {
++		drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
++			   ce->ring->head,
++			   ce->lrc_reg_state[CTX_RING_HEAD]);
++		drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
++			   ce->ring->tail,
++			   ce->lrc_reg_state[CTX_RING_TAIL]);
++		intel_context_unpin(ce);
++	} else {
++		drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
++			   ce->ring->head);
++		drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
++			   ce->ring->tail);
++	}
+ 	drm_printf(p, "\t\tContext Pin Count: %u\n",
+ 		   atomic_read(&ce->pin_count));
+ 	drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+index d586aea3089841..9c83bab0a53091 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+@@ -121,6 +121,8 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+ 		return mqe->data;
+ 	}
+ 
++	size = ALIGN(repc + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
++
+ 	msg = kvmalloc(repc, GFP_KERNEL);
+ 	if (!msg)
+ 		return ERR_PTR(-ENOMEM);
+@@ -129,19 +131,15 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+ 	len = min_t(u32, repc, len);
+ 	memcpy(msg, mqe->data, len);
+ 
+-	rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
+-	if (rptr == gsp->msgq.cnt)
+-		rptr = 0;
+-
+ 	repc -= len;
+ 
+ 	if (repc) {
+ 		mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+ 		memcpy(msg + len, mqe, repc);
+-
+-		rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
+ 	}
+ 
++	rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
++
+ 	mb();
+ 	(*gsp->msgq.rptr) = rptr;
+ 	return msg;
+@@ -163,7 +161,7 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+ 	u64 *end;
+ 	u64 csum = 0;
+ 	int free, time = 1000000;
+-	u32 wptr, size;
++	u32 wptr, size, step;
+ 	u32 off = 0;
+ 
+ 	argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
+@@ -197,7 +195,9 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+ 		}
+ 
+ 		cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+-		size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
++		step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
++		size = min_t(u32, argc, step * GSP_PAGE_SIZE);
++
+ 		memcpy(cqe, (u8 *)cmd + off, size);
+ 
+ 		wptr += DIV_ROUND_UP(size, 0x1000);
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index 5b69cc8011b42b..8d64ba18572ec4 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -775,8 +775,10 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port,
+ 		if (!dig->pin || dig->pin->id != port)
+ 			continue;
+ 		*enabled = true;
++		mutex_lock(&connector->eld_mutex);
+ 		ret = drm_eld_size(connector->eld);
+ 		memcpy(buf, connector->eld, min(max_bytes, ret));
++		mutex_unlock(&connector->eld_mutex);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index b04538907f956c..f576b1aa86d143 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -947,9 +947,6 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
+ {
+ 	struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
+ 						event_work);
+-	struct drm_connector *connector = &dp->connector;
+-	enum drm_connector_status old_status;
+-
+ 	int ret;
+ 
+ 	mutex_lock(&dp->lock);
+@@ -1009,11 +1006,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
+ 
+ out:
+ 	mutex_unlock(&dp->lock);
+-
+-	old_status = connector->status;
+-	connector->status = connector->funcs->detect(connector, false);
+-	if (old_status != connector->status)
+-		drm_kms_helper_hotplug_event(dp->drm_dev);
++	drm_connector_helper_hpd_irq_event(&dp->connector);
+ }
+ 
+ static int cdn_dp_pd_event(struct notifier_block *nb,
+diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
+index 847470f747c0ef..3c8f3532c79723 100644
+--- a/drivers/gpu/drm/sti/sti_hdmi.c
++++ b/drivers/gpu/drm/sti/sti_hdmi.c
+@@ -1225,7 +1225,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size
+ 	struct drm_connector *connector = hdmi->drm_connector;
+ 
+ 	DRM_DEBUG_DRIVER("\n");
++	mutex_lock(&connector->eld_mutex);
+ 	memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
++	mutex_unlock(&connector->eld_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+index 294773342e710d..4ba869e0e794c7 100644
+--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+@@ -46,7 +46,7 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
+ 	struct drm_display_mode *mode, *preferred;
+ 
+ 	mutex_lock(&drm->mode_config.mutex);
+-	preferred = list_first_entry(&connector->modes, struct drm_display_mode, head);
++	preferred = list_first_entry_or_null(&connector->modes, struct drm_display_mode, head);
+ 	list_for_each_entry(mode, &connector->modes, head)
+ 		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ 			preferred = mode;
+@@ -105,9 +105,8 @@ static int set_connector_edid(struct kunit *test, struct drm_connector *connecto
+ 	mutex_lock(&drm->mode_config.mutex);
+ 	ret = connector->funcs->fill_modes(connector, 4096, 4096);
+ 	mutex_unlock(&drm->mode_config.mutex);
+-	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static const struct drm_connector_hdmi_funcs dummy_connector_hdmi_funcs = {
+@@ -223,7 +222,7 @@ drm_atomic_helper_connector_hdmi_init(struct kunit *test,
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	return priv;
+ }
+@@ -728,7 +727,7 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -802,7 +801,7 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -873,7 +872,7 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_dvi_1080p,
+ 				 ARRAY_SIZE(test_edid_dvi_1080p));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_FALSE(test, info->is_hdmi);
+@@ -920,7 +919,7 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -967,7 +966,7 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -1014,7 +1013,7 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -1121,7 +1120,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1190,7 +1189,7 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1254,7 +1253,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1314,7 +1313,7 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1381,7 +1380,7 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1447,7 +1446,7 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1507,7 +1506,7 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 7e0a5ea7ab859a..6b83d02b5d62a5 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -2192,9 +2192,9 @@ static int vc4_hdmi_audio_get_eld(struct device *dev, void *data,
+ 	struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ 	struct drm_connector *connector = &vc4_hdmi->connector;
+ 
+-	mutex_lock(&vc4_hdmi->mutex);
++	mutex_lock(&connector->eld_mutex);
+ 	memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+-	mutex_unlock(&vc4_hdmi->mutex);
++	mutex_unlock(&connector->eld_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
+index 64c236169db88a..5dc8eeaf7123c4 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
+@@ -194,6 +194,13 @@ struct virtio_gpu_framebuffer {
+ #define to_virtio_gpu_framebuffer(x) \
+ 	container_of(x, struct virtio_gpu_framebuffer, base)
+ 
++struct virtio_gpu_plane_state {
++	struct drm_plane_state base;
++	struct virtio_gpu_fence *fence;
++};
++#define to_virtio_gpu_plane_state(x) \
++	container_of(x, struct virtio_gpu_plane_state, base)
++
+ struct virtio_gpu_queue {
+ 	struct virtqueue *vq;
+ 	spinlock_t qlock;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
+index a72a2dbda031c2..7acd38b962c621 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
++++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
+@@ -66,11 +66,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
+ 	return format;
+ }
+ 
++static struct
++drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
++{
++	struct virtio_gpu_plane_state *new;
++
++	if (WARN_ON(!plane->state))
++		return NULL;
++
++	new = kzalloc(sizeof(*new), GFP_KERNEL);
++	if (!new)
++		return NULL;
++
++	__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
++
++	return &new->base;
++}
++
+ static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
+ 	.update_plane		= drm_atomic_helper_update_plane,
+ 	.disable_plane		= drm_atomic_helper_disable_plane,
+ 	.reset			= drm_atomic_helper_plane_reset,
+-	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
++	.atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
+ 	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+ };
+ 
+@@ -138,11 +155,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
+ 	struct drm_device *dev = plane->dev;
+ 	struct virtio_gpu_device *vgdev = dev->dev_private;
+ 	struct virtio_gpu_framebuffer *vgfb;
++	struct virtio_gpu_plane_state *vgplane_st;
+ 	struct virtio_gpu_object *bo;
+ 
+ 	vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
++	vgplane_st = to_virtio_gpu_plane_state(plane->state);
+ 	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+-	if (vgfb->fence) {
++	if (vgplane_st->fence) {
+ 		struct virtio_gpu_object_array *objs;
+ 
+ 		objs = virtio_gpu_array_alloc(1);
+@@ -151,13 +170,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
+ 		virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+ 		virtio_gpu_array_lock_resv(objs);
+ 		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+-					      width, height, objs, vgfb->fence);
++					      width, height, objs,
++					      vgplane_st->fence);
+ 		virtio_gpu_notify(vgdev);
+-
+-		dma_fence_wait_timeout(&vgfb->fence->f, true,
++		dma_fence_wait_timeout(&vgplane_st->fence->f, true,
+ 				       msecs_to_jiffies(50));
+-		dma_fence_put(&vgfb->fence->f);
+-		vgfb->fence = NULL;
+ 	} else {
+ 		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ 					      width, height, NULL, NULL);
+@@ -247,20 +264,23 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
+ 	struct drm_device *dev = plane->dev;
+ 	struct virtio_gpu_device *vgdev = dev->dev_private;
+ 	struct virtio_gpu_framebuffer *vgfb;
++	struct virtio_gpu_plane_state *vgplane_st;
+ 	struct virtio_gpu_object *bo;
+ 
+ 	if (!new_state->fb)
+ 		return 0;
+ 
+ 	vgfb = to_virtio_gpu_framebuffer(new_state->fb);
++	vgplane_st = to_virtio_gpu_plane_state(new_state);
+ 	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+ 	if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
+ 		return 0;
+ 
+-	if (bo->dumb && (plane->state->fb != new_state->fb)) {
+-		vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
++	if (bo->dumb) {
++		vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
++						     vgdev->fence_drv.context,
+ 						     0);
+-		if (!vgfb->fence)
++		if (!vgplane_st->fence)
+ 			return -ENOMEM;
+ 	}
+ 
+@@ -270,15 +290,15 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
+ static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
+ 					struct drm_plane_state *state)
+ {
+-	struct virtio_gpu_framebuffer *vgfb;
++	struct virtio_gpu_plane_state *vgplane_st;
+ 
+ 	if (!state->fb)
+ 		return;
+ 
+-	vgfb = to_virtio_gpu_framebuffer(state->fb);
+-	if (vgfb->fence) {
+-		dma_fence_put(&vgfb->fence->f);
+-		vgfb->fence = NULL;
++	vgplane_st = to_virtio_gpu_plane_state(state);
++	if (vgplane_st->fence) {
++		dma_fence_put(&vgplane_st->fence->f);
++		vgplane_st->fence = NULL;
+ 	}
+ }
+ 
+@@ -291,6 +311,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ 	struct virtio_gpu_device *vgdev = dev->dev_private;
+ 	struct virtio_gpu_output *output = NULL;
+ 	struct virtio_gpu_framebuffer *vgfb;
++	struct virtio_gpu_plane_state *vgplane_st;
+ 	struct virtio_gpu_object *bo = NULL;
+ 	uint32_t handle;
+ 
+@@ -303,6 +324,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ 
+ 	if (plane->state->fb) {
+ 		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
++		vgplane_st = to_virtio_gpu_plane_state(plane->state);
+ 		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+ 		handle = bo->hw_res_handle;
+ 	} else {
+@@ -322,11 +344,9 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ 			(vgdev, 0,
+ 			 plane->state->crtc_w,
+ 			 plane->state->crtc_h,
+-			 0, 0, objs, vgfb->fence);
++			 0, 0, objs, vgplane_st->fence);
+ 		virtio_gpu_notify(vgdev);
+-		dma_fence_wait(&vgfb->fence->f, true);
+-		dma_fence_put(&vgfb->fence->f);
+-		vgfb->fence = NULL;
++		dma_fence_wait(&vgplane_st->fence->f, true);
+ 	}
+ 
+ 	if (plane->state->fb != old_state->fb) {
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
+index 85aa3ab0da3b87..8050938389b68f 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.c
++++ b/drivers/gpu/drm/xe/xe_devcoredump.c
+@@ -104,11 +104,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+ 	drm_puts(&p, "\n**** GuC CT ****\n");
+ 	xe_guc_ct_snapshot_print(ss->ct, &p);
+ 
+-	/*
+-	 * Don't add a new section header here because the mesa debug decoder
+-	 * tool expects the context information to be in the 'GuC CT' section.
+-	 */
+-	/* drm_puts(&p, "\n**** Contexts ****\n"); */
++	drm_puts(&p, "\n**** Contexts ****\n");
+ 	xe_guc_exec_queue_snapshot_print(ss->ge, &p);
+ 
+ 	drm_puts(&p, "\n**** Job ****\n");
+@@ -337,42 +333,34 @@ int xe_devcoredump_init(struct xe_device *xe)
+ /**
+  * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
+  *
+- * The output is split to multiple lines because some print targets, e.g. dmesg
+- * cannot handle arbitrarily long lines. Note also that printing to dmesg in
+- * piece-meal fashion is not possible, each separate call to drm_puts() has a
+- * line-feed automatically added! Therefore, the entire output line must be
+- * constructed in a local buffer first, then printed in one atomic output call.
++ * The output is split into multiple calls to drm_puts() because some print
++ * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
++ * add newlines, as is the case with dmesg: each drm_puts() call creates a
++ * separate line.
+  *
+  * There is also a scheduler yield call to prevent the 'task has been stuck for
+  * 120s' kernel hang check feature from firing when printing to a slow target
+  * such as dmesg over a serial port.
+  *
+- * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
+- *
+  * @p: the printer object to output to
+  * @prefix: optional prefix to add to output string
++ * @suffix: optional suffix to add at the end. 0 disables it and is
++ *          not added to the output, which is useful when using multiple calls
++ *          to dump data to @p
+  * @blob: the Binary Large OBject to dump out
+  * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
+  * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
+  */
+-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
++void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
+ 			   const void *blob, size_t offset, size_t size)
+ {
+ 	const u32 *blob32 = (const u32 *)blob;
+ 	char buff[ASCII85_BUFSZ], *line_buff;
+ 	size_t line_pos = 0;
+ 
+-	/*
+-	 * Splitting blobs across multiple lines is not compatible with the mesa
+-	 * debug decoder tool. Note that even dropping the explicit '\n' below
+-	 * doesn't help because the GuC log is so big some underlying implementation
+-	 * still splits the lines at 512K characters. So just bail completely for
+-	 * the moment.
+-	 */
+-	return;
+-
+ #define DMESG_MAX_LINE_LEN	800
+-#define MIN_SPACE		(ASCII85_BUFSZ + 2)		/* 85 + "\n\0" */
++	/* Always leave space for the suffix char and the \0 */
++#define MIN_SPACE		(ASCII85_BUFSZ + 2)	/* 85 + "<suffix>\0" */
+ 
+ 	if (size & 3)
+ 		drm_printf(p, "Size not word aligned: %zu", size);
+@@ -404,7 +392,6 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+ 		line_pos += strlen(line_buff + line_pos);
+ 
+ 		if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
+-			line_buff[line_pos++] = '\n';
+ 			line_buff[line_pos++] = 0;
+ 
+ 			drm_puts(p, line_buff);
+@@ -416,10 +403,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+ 		}
+ 	}
+ 
++	if (suffix)
++		line_buff[line_pos++] = suffix;
++
+ 	if (line_pos) {
+-		line_buff[line_pos++] = '\n';
+ 		line_buff[line_pos++] = 0;
+-
+ 		drm_puts(p, line_buff);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
+index a4eebc285fc837..b231c8ad799f69 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.h
++++ b/drivers/gpu/drm/xe/xe_devcoredump.h
+@@ -26,7 +26,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
+ }
+ #endif
+ 
+-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
++void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
+ 			   const void *blob, size_t offset, size_t size);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
+index be47780ec2a7e7..50851638003b9b 100644
+--- a/drivers/gpu/drm/xe/xe_guc_log.c
++++ b/drivers/gpu/drm/xe/xe_guc_log.c
+@@ -78,7 +78,7 @@ void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
+ 
+ 	xe_map_memcpy_from(xe, copy, &log->bo->vmap, 0, size);
+ 
+-	xe_print_blob_ascii85(p, "Log data", copy, 0, size);
++	xe_print_blob_ascii85(p, "Log data", '\n', copy, 0, size);
+ 
+ 	vfree(copy);
+ }
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index a4b47319ad8ead..bcdd168cdc6d79 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -432,6 +432,26 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
+ 	return ret;
+ }
+ 
++static int asus_kbd_disable_oobe(struct hid_device *hdev)
++{
++	const u8 init[][6] = {
++		{ FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 },
++		{ FEATURE_KBD_REPORT_ID, 0xBA, 0xC5, 0xC4 },
++		{ FEATURE_KBD_REPORT_ID, 0xD0, 0x8F, 0x01 },
++		{ FEATURE_KBD_REPORT_ID, 0xD0, 0x85, 0xFF }
++	};
++	int ret;
++
++	for (size_t i = 0; i < ARRAY_SIZE(init); i++) {
++		ret = asus_kbd_set_report(hdev, init[i], sizeof(init[i]));
++		if (ret < 0)
++			return ret;
++	}
++
++	hid_info(hdev, "Disabled OOBE for keyboard\n");
++	return 0;
++}
++
+ static void asus_schedule_work(struct asus_kbd_leds *led)
+ {
+ 	unsigned long flags;
+@@ -534,6 +554,12 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
+ 		ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID2);
+ 		if (ret < 0)
+ 			return ret;
++
++		if (dmi_match(DMI_PRODUCT_FAMILY, "ProArt P16")) {
++			ret = asus_kbd_disable_oobe(hdev);
++			if (ret < 0)
++				return ret;
++		}
+ 	} else {
+ 		/* Initialize keyboard */
+ 		ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index e07d63db5e1f47..369414c92fccbe 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2308,6 +2308,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_SIS_TOUCH,
+ 			HID_ANY_ID) },
+ 
++	/* Hantick */
++	{ .driver_data = MT_CLS_NSMU,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			   I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288) },
++
+ 	/* Generic MT device */
+ 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
+ 
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index 7bd86eef6ec761..4c94c03cb57396 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -730,23 +730,30 @@ static int sensor_hub_probe(struct hid_device *hdev,
+ 	return ret;
+ }
+ 
++static int sensor_hub_finalize_pending_fn(struct device *dev, void *data)
++{
++	struct hid_sensor_hub_device *hsdev = dev->platform_data;
++
++	if (hsdev->pending.status)
++		complete(&hsdev->pending.ready);
++
++	return 0;
++}
++
+ static void sensor_hub_remove(struct hid_device *hdev)
+ {
+ 	struct sensor_hub_data *data = hid_get_drvdata(hdev);
+ 	unsigned long flags;
+-	int i;
+ 
+ 	hid_dbg(hdev, " hardware removed\n");
+ 	hid_hw_close(hdev);
+ 	hid_hw_stop(hdev);
++
+ 	spin_lock_irqsave(&data->lock, flags);
+-	for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
+-		struct hid_sensor_hub_device *hsdev =
+-			data->hid_sensor_hub_client_devs[i].platform_data;
+-		if (hsdev->pending.status)
+-			complete(&hsdev->pending.ready);
+-	}
++	device_for_each_child(&hdev->dev, NULL,
++			      sensor_hub_finalize_pending_fn);
+ 	spin_unlock_irqrestore(&data->lock, flags);
++
+ 	mfd_remove_devices(&hdev->dev);
+ 	mutex_destroy(&data->mutex);
+ }
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 5a599c90e7a2c7..c7033ffaba3919 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -4943,6 +4943,10 @@ static const struct wacom_features wacom_features_0x94 =
+ 	HID_DEVICE(BUS_I2C, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ 	.driver_data = (kernel_ulong_t)&wacom_features_##prod
+ 
++#define PCI_DEVICE_WACOM(prod)						\
++	HID_DEVICE(BUS_PCI, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
++	.driver_data = (kernel_ulong_t)&wacom_features_##prod
++
+ #define USB_DEVICE_LENOVO(prod)					\
+ 	HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod),			\
+ 	.driver_data = (kernel_ulong_t)&wacom_features_##prod
+@@ -5112,6 +5116,7 @@ const struct hid_device_id wacom_ids[] = {
+ 
+ 	{ USB_DEVICE_WACOM(HID_ANY_ID) },
+ 	{ I2C_DEVICE_WACOM(HID_ANY_ID) },
++	{ PCI_DEVICE_WACOM(HID_ANY_ID) },
+ 	{ BT_DEVICE_WACOM(HID_ANY_ID) },
+ 	{ }
+ };
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index 14ae0cfc325efb..d2499f302b5083 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -355,6 +355,25 @@ static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
+ 	{}
+ };
+ 
++static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = {
++	/*
++	 * When a 400KHz freq is used on this model of ELAN touchpad in Linux,
++	 * excessive smoothing (similar to when the touchpad's firmware detects
++	 * a noisy signal) is sometimes applied. As some devices' (e.g, Lenovo
++	 * V15 G4) ACPI tables specify a 400KHz frequency for this device and
++	 * some I2C busses (e.g, Designware I2C) default to a 400KHz freq,
++	 * force the speed to 100KHz as a workaround.
++	 *
++	 * For future investigation: This problem may be related to the default
++	 * HCNT/LCNT values given by some busses' drivers, because they are not
++	 * specified in the aforementioned devices' ACPI tables, and because
++	 * the device works without issues on Windows at what is expected to be
++	 * a 400KHz frequency. The root cause of the issue is not known.
++	 */
++	{ "ELAN06FA", 0 },
++	{}
++};
++
+ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
+ 					   void *data, void **return_value)
+ {
+@@ -373,6 +392,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
+ 	if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
+ 		lookup->force_speed = I2C_MAX_FAST_MODE_FREQ;
+ 
++	if (acpi_match_device_ids(adev, i2c_acpi_force_100khz_device_ids) == 0)
++		lookup->force_speed = I2C_MAX_STANDARD_MODE_FREQ;
++
+ 	return AE_OK;
+ }
+ 
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 42310c9a00c2d1..53ab814b676ffd 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1919,7 +1919,7 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
+ 		goto err_bus_cleanup;
+ 
+ 	if (master->ops->set_speed) {
+-		master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
++		ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
+ 		if (ret)
+ 			goto err_bus_cleanup;
+ 	}
+diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
+index be0068081ebbbb..11fbdcdd26d656 100644
+--- a/drivers/iio/light/as73211.c
++++ b/drivers/iio/light/as73211.c
+@@ -177,6 +177,12 @@ struct as73211_data {
+ 	BIT(AS73211_SCAN_INDEX_TEMP) | \
+ 	AS73211_SCAN_MASK_COLOR)
+ 
++static const unsigned long as73211_scan_masks[] = {
++	AS73211_SCAN_MASK_COLOR,
++	AS73211_SCAN_MASK_ALL,
++	0
++};
++
+ static const struct iio_chan_spec as73211_channels[] = {
+ 	{
+ 		.type = IIO_TEMP,
+@@ -672,9 +678,12 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
+ 
+ 		/* AS73211 starts reading at address 2 */
+ 		ret = i2c_master_recv(data->client,
+-				(char *)&scan.chan[1], 3 * sizeof(scan.chan[1]));
++				(char *)&scan.chan[0], 3 * sizeof(scan.chan[0]));
+ 		if (ret < 0)
+ 			goto done;
++
++		/* Avoid pushing uninitialized data */
++		scan.chan[3] = 0;
+ 	}
+ 
+ 	if (data_result) {
+@@ -682,9 +691,15 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
+ 		 * Saturate all channels (in case of overflows). Temperature channel
+ 		 * is not affected by overflows.
+ 		 */
+-		scan.chan[1] = cpu_to_le16(U16_MAX);
+-		scan.chan[2] = cpu_to_le16(U16_MAX);
+-		scan.chan[3] = cpu_to_le16(U16_MAX);
++		if (*indio_dev->active_scan_mask == AS73211_SCAN_MASK_ALL) {
++			scan.chan[1] = cpu_to_le16(U16_MAX);
++			scan.chan[2] = cpu_to_le16(U16_MAX);
++			scan.chan[3] = cpu_to_le16(U16_MAX);
++		} else {
++			scan.chan[0] = cpu_to_le16(U16_MAX);
++			scan.chan[1] = cpu_to_le16(U16_MAX);
++			scan.chan[2] = cpu_to_le16(U16_MAX);
++		}
+ 	}
+ 
+ 	iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev));
+@@ -758,6 +773,7 @@ static int as73211_probe(struct i2c_client *client)
+ 	indio_dev->channels = data->spec_dev->channels;
+ 	indio_dev->num_channels = data->spec_dev->num_channels;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
++	indio_dev->available_scan_masks = as73211_scan_masks;
+ 
+ 	ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR);
+ 	if (ret < 0)
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 45d9dc9c6c8fda..bb02b6adbf2c21 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -2021,6 +2021,11 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ 	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
++	bool is_odp = is_odp_mr(mr);
++	int ret = 0;
++
++	if (is_odp)
++		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ 
+ 	if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
+ 		ent = mr->mmkey.cache_ent;
+@@ -2032,7 +2037,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 			ent->tmp_cleanup_scheduled = true;
+ 		}
+ 		spin_unlock_irq(&ent->mkeys_queue.lock);
+-		return 0;
++		goto out;
+ 	}
+ 
+ 	if (ent) {
+@@ -2041,7 +2046,15 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 		mr->mmkey.cache_ent = NULL;
+ 		spin_unlock_irq(&ent->mkeys_queue.lock);
+ 	}
+-	return destroy_mkey(dev, mr);
++	ret = destroy_mkey(dev, mr);
++out:
++	if (is_odp) {
++		if (!ret)
++			to_ib_umem_odp(mr->umem)->private = NULL;
++		mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
++	}
++
++	return ret;
+ }
+ 
+ static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 64b441542cd5dd..1d3bf56157702d 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -282,6 +282,8 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ 	if (!umem_odp->npages)
+ 		goto out;
+ 	mr = umem_odp->private;
++	if (!mr)
++		goto out;
+ 
+ 	start = max_t(u64, ib_umem_start(umem_odp), range->start);
+ 	end = min_t(u64, ib_umem_end(umem_odp), range->end);
+diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c
+index eb4173f9c82044..7ba8d166d68c18 100644
+--- a/drivers/input/misc/nxp-bbnsm-pwrkey.c
++++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c
+@@ -187,6 +187,12 @@ static int bbnsm_pwrkey_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static void bbnsm_pwrkey_remove(struct platform_device *pdev)
++{
++	dev_pm_clear_wake_irq(&pdev->dev);
++	device_init_wakeup(&pdev->dev, false);
++}
++
+ static int __maybe_unused bbnsm_pwrkey_suspend(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+@@ -223,6 +229,8 @@ static struct platform_driver bbnsm_pwrkey_driver = {
+ 		.of_match_table = bbnsm_pwrkey_ids,
+ 	},
+ 	.probe = bbnsm_pwrkey_probe,
++	.remove = bbnsm_pwrkey_remove,
++
+ };
+ module_platform_driver(bbnsm_pwrkey_driver);
+ 
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index f1a8f8c75cb0e9..6bf8ecbbe0c263 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -4616,7 +4616,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
+ 	/* Initialise in-memory data structures */
+ 	ret = arm_smmu_init_structures(smmu);
+ 	if (ret)
+-		return ret;
++		goto err_free_iopf;
+ 
+ 	/* Record our private device structure */
+ 	platform_set_drvdata(pdev, smmu);
+@@ -4627,22 +4627,29 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
+ 	/* Reset the device */
+ 	ret = arm_smmu_device_reset(smmu);
+ 	if (ret)
+-		return ret;
++		goto err_disable;
+ 
+ 	/* And we're up. Go go go! */
+ 	ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
+ 				     "smmu3.%pa", &ioaddr);
+ 	if (ret)
+-		return ret;
++		goto err_disable;
+ 
+ 	ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to register iommu\n");
+-		iommu_device_sysfs_remove(&smmu->iommu);
+-		return ret;
++		goto err_free_sysfs;
+ 	}
+ 
+ 	return 0;
++
++err_free_sysfs:
++	iommu_device_sysfs_remove(&smmu->iommu);
++err_disable:
++	arm_smmu_device_disable(smmu);
++err_free_iopf:
++	iopf_queue_free(smmu->evtq.iopf);
++	return ret;
+ }
+ 
+ static void arm_smmu_device_remove(struct platform_device *pdev)
+diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+index 6e41ddaa24d636..d525ab43a4aebf 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
++++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+@@ -79,7 +79,6 @@
+ #define TEGRA241_VCMDQ_PAGE1(q)		(TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
+ #define  VCMDQ_ADDR			GENMASK(47, 5)
+ #define  VCMDQ_LOG2SIZE			GENMASK(4, 0)
+-#define  VCMDQ_LOG2SIZE_MAX		19
+ 
+ #define TEGRA241_VCMDQ_BASE		0x00000
+ #define TEGRA241_VCMDQ_CONS_INDX_BASE	0x00008
+@@ -505,12 +504,15 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
+ 	struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
+ 	struct arm_smmu_queue *q = &cmdq->q;
+ 	char name[16];
++	u32 regval;
+ 	int ret;
+ 
+ 	snprintf(name, 16, "vcmdq%u", vcmdq->idx);
+ 
+-	/* Queue size, capped to ensure natural alignment */
+-	q->llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, VCMDQ_LOG2SIZE_MAX);
++	/* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
++	regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
++	q->llq.max_n_shift =
++		min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
+ 
+ 	/* Use the common helper to init the VCMDQ, and then... */
+ 	ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index 6372f3e25c4bc2..601fb878d0ef25 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -567,6 +567,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+ 	{ .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ 	{ .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ 	{ .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
++	{ .compatible = "qcom,sdm670-smmu-v2", .data = &qcom_smmu_v2_data },
+ 	{ .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
+ 	{ .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
+ 	{ .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
+diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
+index b8393a8c075396..95e2e99ab27241 100644
+--- a/drivers/iommu/iommufd/fault.c
++++ b/drivers/iommu/iommufd/fault.c
+@@ -98,15 +98,23 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
+ {
+ 	struct iommufd_fault *fault = hwpt->fault;
+ 	struct iopf_group *group, *next;
++	struct list_head free_list;
+ 	unsigned long index;
+ 
+ 	if (!fault)
+ 		return;
++	INIT_LIST_HEAD(&free_list);
+ 
+ 	mutex_lock(&fault->mutex);
++	spin_lock(&fault->lock);
+ 	list_for_each_entry_safe(group, next, &fault->deliver, node) {
+ 		if (group->attach_handle != &handle->handle)
+ 			continue;
++		list_move(&group->node, &free_list);
++	}
++	spin_unlock(&fault->lock);
++
++	list_for_each_entry_safe(group, next, &free_list, node) {
+ 		list_del(&group->node);
+ 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ 		iopf_free_group(group);
+@@ -208,6 +216,7 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
+ {
+ 	struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
+ 	struct iopf_group *group, *next;
++	unsigned long index;
+ 
+ 	/*
+ 	 * The iommufd object's reference count is zero at this point.
+@@ -220,6 +229,13 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
+ 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ 		iopf_free_group(group);
+ 	}
++	xa_for_each(&fault->response, index, group) {
++		xa_erase(&fault->response, index);
++		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
++		iopf_free_group(group);
++	}
++	xa_destroy(&fault->response);
++	mutex_destroy(&fault->mutex);
+ }
+ 
+ static void iommufd_compose_fault_message(struct iommu_fault *fault,
+@@ -242,7 +258,7 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
+ {
+ 	size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
+ 	struct iommufd_fault *fault = filep->private_data;
+-	struct iommu_hwpt_pgfault data;
++	struct iommu_hwpt_pgfault data = {};
+ 	struct iommufd_device *idev;
+ 	struct iopf_group *group;
+ 	struct iopf_fault *iopf;
+@@ -253,17 +269,19 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
+ 		return -ESPIPE;
+ 
+ 	mutex_lock(&fault->mutex);
+-	while (!list_empty(&fault->deliver) && count > done) {
+-		group = list_first_entry(&fault->deliver,
+-					 struct iopf_group, node);
+-
+-		if (group->fault_count * fault_size > count - done)
++	while ((group = iommufd_fault_deliver_fetch(fault))) {
++		if (done >= count ||
++		    group->fault_count * fault_size > count - done) {
++			iommufd_fault_deliver_restore(fault, group);
+ 			break;
++		}
+ 
+ 		rc = xa_alloc(&fault->response, &group->cookie, group,
+ 			      xa_limit_32b, GFP_KERNEL);
+-		if (rc)
++		if (rc) {
++			iommufd_fault_deliver_restore(fault, group);
+ 			break;
++		}
+ 
+ 		idev = to_iommufd_handle(group->attach_handle)->idev;
+ 		list_for_each_entry(iopf, &group->faults, list) {
+@@ -272,13 +290,12 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
+ 						      group->cookie);
+ 			if (copy_to_user(buf + done, &data, fault_size)) {
+ 				xa_erase(&fault->response, group->cookie);
++				iommufd_fault_deliver_restore(fault, group);
+ 				rc = -EFAULT;
+ 				break;
+ 			}
+ 			done += fault_size;
+ 		}
+-
+-		list_del(&group->node);
+ 	}
+ 	mutex_unlock(&fault->mutex);
+ 
+@@ -336,10 +353,10 @@ static __poll_t iommufd_fault_fops_poll(struct file *filep,
+ 	__poll_t pollflags = EPOLLOUT;
+ 
+ 	poll_wait(filep, &fault->wait_queue, wait);
+-	mutex_lock(&fault->mutex);
++	spin_lock(&fault->lock);
+ 	if (!list_empty(&fault->deliver))
+ 		pollflags |= EPOLLIN | EPOLLRDNORM;
+-	mutex_unlock(&fault->mutex);
++	spin_unlock(&fault->lock);
+ 
+ 	return pollflags;
+ }
+@@ -381,6 +398,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
+ 	INIT_LIST_HEAD(&fault->deliver);
+ 	xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
+ 	mutex_init(&fault->mutex);
++	spin_lock_init(&fault->lock);
+ 	init_waitqueue_head(&fault->wait_queue);
+ 
+ 	filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
+@@ -429,9 +447,9 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
+ 	hwpt = group->attach_handle->domain->fault_data;
+ 	fault = hwpt->fault;
+ 
+-	mutex_lock(&fault->mutex);
++	spin_lock(&fault->lock);
+ 	list_add_tail(&group->node, &fault->deliver);
+-	mutex_unlock(&fault->mutex);
++	spin_unlock(&fault->lock);
+ 
+ 	wake_up_interruptible(&fault->wait_queue);
+ 
+diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
+index f1d865e6fab66a..c1f82cb6824256 100644
+--- a/drivers/iommu/iommufd/iommufd_private.h
++++ b/drivers/iommu/iommufd/iommufd_private.h
+@@ -462,14 +462,39 @@ struct iommufd_fault {
+ 	struct iommufd_ctx *ictx;
+ 	struct file *filep;
+ 
+-	/* The lists of outstanding faults protected by below mutex. */
+-	struct mutex mutex;
++	spinlock_t lock; /* protects the deliver list */
+ 	struct list_head deliver;
++	struct mutex mutex; /* serializes response flows */
+ 	struct xarray response;
+ 
+ 	struct wait_queue_head wait_queue;
+ };
+ 
++/* Fetch the first node out of the fault->deliver list */
++static inline struct iopf_group *
++iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
++{
++	struct list_head *list = &fault->deliver;
++	struct iopf_group *group = NULL;
++
++	spin_lock(&fault->lock);
++	if (!list_empty(list)) {
++		group = list_first_entry(list, struct iopf_group, node);
++		list_del(&group->node);
++	}
++	spin_unlock(&fault->lock);
++	return group;
++}
++
++/* Restore a node back to the head of the fault->deliver list */
++static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
++						 struct iopf_group *group)
++{
++	spin_lock(&fault->lock);
++	list_add(&group->node, &fault->deliver);
++	spin_unlock(&fault->lock);
++}
++
+ struct iommufd_attach_handle {
+ 	struct iommu_attach_handle handle;
+ 	struct iommufd_device *idev;
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index 66ce15027f28d7..c1f30483600859 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -169,6 +169,7 @@ config IXP4XX_IRQ
+ 
+ config LAN966X_OIC
+ 	tristate "Microchip LAN966x OIC Support"
++	depends on MCHP_LAN966X_PCI || COMPILE_TEST
+ 	select GENERIC_IRQ_CHIP
+ 	select IRQ_DOMAIN
+ 	help
+diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
+index da5250f0155cfa..2b1684c60e3cac 100644
+--- a/drivers/irqchip/irq-apple-aic.c
++++ b/drivers/irqchip/irq-apple-aic.c
+@@ -577,7 +577,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
+ 						  AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
+ 	}
+ 
+-	if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
++	if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
++			(FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
+ 		int irq;
+ 		if (cpumask_test_cpu(smp_processor_id(),
+ 				     &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
+diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
+index b337f6c05f184f..4eebed39880a5b 100644
+--- a/drivers/irqchip/irq-mvebu-icu.c
++++ b/drivers/irqchip/irq-mvebu-icu.c
+@@ -68,7 +68,8 @@ static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ 			       unsigned long *hwirq, unsigned int *type)
+ {
+ 	unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
+-	struct mvebu_icu_msi_data *msi_data = d->host_data;
++	struct msi_domain_info *info = d->host_data;
++	struct mvebu_icu_msi_data *msi_data = info->chip_data;
+ 	struct mvebu_icu *icu = msi_data->icu;
+ 
+ 	/* Check the count of the parameters in dt */
+diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
+index 7a136fd8172061..06196d851ade71 100644
+--- a/drivers/leds/leds-lp8860.c
++++ b/drivers/leds/leds-lp8860.c
+@@ -265,7 +265,7 @@ static int lp8860_init(struct lp8860_led *led)
+ 		goto out;
+ 	}
+ 
+-	reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs) / sizeof(lp8860_eeprom_disp_regs[0]);
++	reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs);
+ 	for (i = 0; i < reg_count; i++) {
+ 		ret = regmap_write(led->eeprom_regmap,
+ 				lp8860_eeprom_disp_regs[i].reg,
+diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
+index 19ef56cbcfd39d..46c921000a34cf 100644
+--- a/drivers/mailbox/tegra-hsp.c
++++ b/drivers/mailbox/tegra-hsp.c
+@@ -388,7 +388,6 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
+ 	value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX);
+ 	value &= ~HSP_SM_SHRD_MBOX_FULL;
+ 	msg = (void *)(unsigned long)value;
+-	mbox_chan_received_data(channel->chan, msg);
+ 
+ 	/*
+ 	 * Need to clear all bits here since some producers, such as TCU, depend
+@@ -398,6 +397,8 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
+ 	 * explicitly, so we have to make sure we cover all possible cases.
+ 	 */
+ 	tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX);
++
++	mbox_chan_received_data(channel->chan, msg);
+ }
+ 
+ static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = {
+@@ -433,7 +434,6 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
+ 	value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3);
+ 
+ 	msg = (void *)(unsigned long)value;
+-	mbox_chan_received_data(channel->chan, msg);
+ 
+ 	/*
+ 	 * Clear data registers and tag.
+@@ -443,6 +443,8 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
+ 	tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2);
+ 	tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3);
+ 	tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG);
++
++	mbox_chan_received_data(channel->chan, msg);
+ }
+ 
+ static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = {
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index 521d08b9ab47e3..d59fcb74b34794 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -905,7 +905,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *nc, *np = pdev->dev.of_node;
+-	struct zynqmp_ipi_pdata __percpu *pdata;
++	struct zynqmp_ipi_pdata *pdata;
+ 	struct of_phandle_args out_irq;
+ 	struct zynqmp_ipi_mbox *mbox;
+ 	int num_mboxes, ret = -EINVAL;
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index 1e9db8e4acdf65..0b1870a09e1fdc 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -61,6 +61,19 @@ config MD_BITMAP_FILE
+ 	  various kernel APIs and can only work with files on a file system not
+ 	  actually sitting on the MD device.
+ 
++config MD_LINEAR
++	tristate "Linear (append) mode"
++	depends on BLK_DEV_MD
++	help
++	  If you say Y here, then your multiple devices driver will be able to
++	  use the so-called linear mode, i.e. it will combine the hard disk
++	  partitions by simply appending one to the other.
++
++	  To compile this as a module, choose M here: the module
++	  will be called linear.
++
++	  If unsure, say Y.
++
+ config MD_RAID0
+ 	tristate "RAID-0 (striping) mode"
+ 	depends on BLK_DEV_MD
+diff --git a/drivers/md/Makefile b/drivers/md/Makefile
+index 476a214e4bdc26..87bdfc9fe14c55 100644
+--- a/drivers/md/Makefile
++++ b/drivers/md/Makefile
+@@ -29,12 +29,14 @@ dm-zoned-y	+= dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
+ 
+ md-mod-y	+= md.o md-bitmap.o
+ raid456-y	+= raid5.o raid5-cache.o raid5-ppl.o
++linear-y       += md-linear.o
+ 
+ # Note: link order is important.  All raid personalities
+ # and must come before md.o, as they each initialise
+ # themselves, and md.o may use the personalities when it
+ # auto-initialised.
+ 
++obj-$(CONFIG_MD_LINEAR)		+= linear.o
+ obj-$(CONFIG_MD_RAID0)		+= raid0.o
+ obj-$(CONFIG_MD_RAID1)		+= raid1.o
+ obj-$(CONFIG_MD_RAID10)		+= raid10.o
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 1ae2c71bb383b7..78c975d7cd5f42 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -59,6 +59,7 @@ struct convert_context {
+ 	struct bio *bio_out;
+ 	struct bvec_iter iter_out;
+ 	atomic_t cc_pending;
++	unsigned int tag_offset;
+ 	u64 cc_sector;
+ 	union {
+ 		struct skcipher_request *req;
+@@ -1256,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc,
+ 	if (bio_out)
+ 		ctx->iter_out = bio_out->bi_iter;
+ 	ctx->cc_sector = sector + cc->iv_offset;
++	ctx->tag_offset = 0;
+ 	init_completion(&ctx->restart);
+ }
+ 
+@@ -1588,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
+ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 			 struct convert_context *ctx, bool atomic, bool reset_pending)
+ {
+-	unsigned int tag_offset = 0;
+ 	unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
+ 	int r;
+ 
+@@ -1611,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 		atomic_inc(&ctx->cc_pending);
+ 
+ 		if (crypt_integrity_aead(cc))
+-			r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
++			r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset);
+ 		else
+-			r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
++			r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset);
+ 
+ 		switch (r) {
+ 		/*
+@@ -1633,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 					 * exit and continue processing in a workqueue
+ 					 */
+ 					ctx->r.req = NULL;
++					ctx->tag_offset++;
+ 					ctx->cc_sector += sector_step;
+-					tag_offset++;
+ 					return BLK_STS_DEV_RESOURCE;
+ 				}
+ 			} else {
+@@ -1648,8 +1649,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 		 */
+ 		case -EINPROGRESS:
+ 			ctx->r.req = NULL;
++			ctx->tag_offset++;
+ 			ctx->cc_sector += sector_step;
+-			tag_offset++;
+ 			continue;
+ 		/*
+ 		 * The request was already processed (synchronously).
+@@ -1657,7 +1658,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 		case 0:
+ 			atomic_dec(&ctx->cc_pending);
+ 			ctx->cc_sector += sector_step;
+-			tag_offset++;
++			ctx->tag_offset++;
+ 			if (!atomic)
+ 				cond_resched();
+ 			continue;
+@@ -2092,7 +2093,6 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
+ 	struct crypt_config *cc = io->cc;
+ 	struct convert_context *ctx = &io->ctx;
+ 	int crypt_finished;
+-	sector_t sector = io->sector;
+ 	blk_status_t r;
+ 
+ 	wait_for_completion(&ctx->restart);
+@@ -2109,10 +2109,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
+ 	}
+ 
+ 	/* Encryption was already finished, submit io now */
+-	if (crypt_finished) {
++	if (crypt_finished)
+ 		kcryptd_crypt_write_io_submit(io, 0);
+-		io->sector = sector;
+-	}
+ 
+ 	crypt_dec_pending(io);
+ }
+@@ -2123,14 +2121,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ 	struct convert_context *ctx = &io->ctx;
+ 	struct bio *clone;
+ 	int crypt_finished;
+-	sector_t sector = io->sector;
+ 	blk_status_t r;
+ 
+ 	/*
+ 	 * Prevent io from disappearing until this function completes.
+ 	 */
+ 	crypt_inc_pending(io);
+-	crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
++	crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector);
+ 
+ 	clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+ 	if (unlikely(!clone)) {
+@@ -2147,8 +2144,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ 		io->ctx.iter_in = clone->bi_iter;
+ 	}
+ 
+-	sector += bio_sectors(clone);
+-
+ 	crypt_inc_pending(io);
+ 	r = crypt_convert(cc, ctx,
+ 			  test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
+@@ -2172,10 +2167,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ 	}
+ 
+ 	/* Encryption was already finished, submit io now */
+-	if (crypt_finished) {
++	if (crypt_finished)
+ 		kcryptd_crypt_write_io_submit(io, 0);
+-		io->sector = sector;
+-	}
+ 
+ dec:
+ 	crypt_dec_pending(io);
+diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
+index b2a00f213c2cd7..4b80165afd2331 100644
+--- a/drivers/md/md-autodetect.c
++++ b/drivers/md/md-autodetect.c
+@@ -49,6 +49,7 @@ static int md_setup_ents __initdata;
+  *             instead of just one.  -- KTK
+  * 18May2000: Added support for persistent-superblock arrays:
+  *             md=n,0,factor,fault,device-list   uses RAID0 for device n
++ *             md=n,-1,factor,fault,device-list  uses LINEAR for device n
+  *             md=n,device-list      reads a RAID superblock from the devices
+  *             elements in device-list are read by name_to_kdev_t so can be
+  *             a hex number or something like /dev/hda1 /dev/sdb
+@@ -87,7 +88,7 @@ static int __init md_setup(char *str)
+ 		md_setup_ents++;
+ 	switch (get_option(&str, &level)) {	/* RAID level */
+ 	case 2: /* could be 0 or -1.. */
+-		if (level == 0) {
++		if (level == 0 || level == LEVEL_LINEAR) {
+ 			if (get_option(&str, &factor) != 2 ||	/* Chunk Size */
+ 					get_option(&str, &fault) != 2) {
+ 				printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
+@@ -95,7 +96,10 @@ static int __init md_setup(char *str)
+ 			}
+ 			md_setup_args[ent].level = level;
+ 			md_setup_args[ent].chunk = 1 << (factor+12);
+-			pername = "raid0";
++			if (level ==  LEVEL_LINEAR)
++				pername = "linear";
++			else
++				pername = "raid0";
+ 			break;
+ 		}
+ 		fallthrough;
+diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
+new file mode 100644
+index 00000000000000..369aed044b409f
+--- /dev/null
++++ b/drivers/md/md-linear.c
+@@ -0,0 +1,352 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * linear.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc
++ * ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr>
++ */
++
++#include <linux/blkdev.h>
++#include <linux/raid/md_u.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <trace/events/block.h>
++#include "md.h"
++
++struct dev_info {
++	struct md_rdev	*rdev;
++	sector_t	end_sector;
++};
++
++struct linear_conf {
++	struct rcu_head         rcu;
++	sector_t                array_sectors;
++	/* a copy of mddev->raid_disks */
++	int                     raid_disks;
++	struct dev_info         disks[] __counted_by(raid_disks);
++};
++
++/*
++ * find which device holds a particular offset
++ */
++static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
++{
++	int lo, mid, hi;
++	struct linear_conf *conf;
++
++	lo = 0;
++	hi = mddev->raid_disks - 1;
++	conf = mddev->private;
++
++	/*
++	 * Binary Search
++	 */
++
++	while (hi > lo) {
++
++		mid = (hi + lo) / 2;
++		if (sector < conf->disks[mid].end_sector)
++			hi = mid;
++		else
++			lo = mid + 1;
++	}
++
++	return conf->disks + lo;
++}
++
++static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
++{
++	struct linear_conf *conf;
++	sector_t array_sectors;
++
++	conf = mddev->private;
++	WARN_ONCE(sectors || raid_disks,
++		  "%s does not support generic reshape\n", __func__);
++	array_sectors = conf->array_sectors;
++
++	return array_sectors;
++}
++
++static int linear_set_limits(struct mddev *mddev)
++{
++	struct queue_limits lim;
++	int err;
++
++	md_init_stacking_limits(&lim);
++	lim.max_hw_sectors = mddev->chunk_sectors;
++	lim.max_write_zeroes_sectors = mddev->chunk_sectors;
++	lim.io_min = mddev->chunk_sectors << 9;
++	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
++	if (err)
++		return err;
++
++	return queue_limits_set(mddev->gendisk->queue, &lim);
++}
++
++static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
++{
++	struct linear_conf *conf;
++	struct md_rdev *rdev;
++	int ret = -EINVAL;
++	int cnt;
++	int i;
++
++	conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
++	if (!conf)
++		return ERR_PTR(-ENOMEM);
++
++	/*
++	 * conf->raid_disks is copy of mddev->raid_disks. The reason to
++	 * keep a copy of mddev->raid_disks in struct linear_conf is,
++	 * mddev->raid_disks may not be consistent with pointers number of
++	 * conf->disks[] when it is updated in linear_add() and used to
++	 * iterate old conf->disks[] earray in linear_congested().
++	 * Here conf->raid_disks is always consitent with number of
++	 * pointers in conf->disks[] array, and mddev->private is updated
++	 * with rcu_assign_pointer() in linear_addr(), such race can be
++	 * avoided.
++	 */
++	conf->raid_disks = raid_disks;
++
++	cnt = 0;
++	conf->array_sectors = 0;
++
++	rdev_for_each(rdev, mddev) {
++		int j = rdev->raid_disk;
++		struct dev_info *disk = conf->disks + j;
++		sector_t sectors;
++
++		if (j < 0 || j >= raid_disks || disk->rdev) {
++			pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
++				mdname(mddev));
++			goto out;
++		}
++
++		disk->rdev = rdev;
++		if (mddev->chunk_sectors) {
++			sectors = rdev->sectors;
++			sector_div(sectors, mddev->chunk_sectors);
++			rdev->sectors = sectors * mddev->chunk_sectors;
++		}
++
++		conf->array_sectors += rdev->sectors;
++		cnt++;
++	}
++	if (cnt != raid_disks) {
++		pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
++			mdname(mddev));
++		goto out;
++	}
++
++	/*
++	 * Here we calculate the device offsets.
++	 */
++	conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
++
++	for (i = 1; i < raid_disks; i++)
++		conf->disks[i].end_sector =
++			conf->disks[i-1].end_sector +
++			conf->disks[i].rdev->sectors;
++
++	if (!mddev_is_dm(mddev)) {
++		ret = linear_set_limits(mddev);
++		if (ret)
++			goto out;
++	}
++
++	return conf;
++
++out:
++	kfree(conf);
++	return ERR_PTR(ret);
++}
++
++static int linear_run(struct mddev *mddev)
++{
++	struct linear_conf *conf;
++	int ret;
++
++	if (md_check_no_bitmap(mddev))
++		return -EINVAL;
++
++	conf = linear_conf(mddev, mddev->raid_disks);
++	if (IS_ERR(conf))
++		return PTR_ERR(conf);
++
++	mddev->private = conf;
++	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
++
++	ret =  md_integrity_register(mddev);
++	if (ret) {
++		kfree(conf);
++		mddev->private = NULL;
++	}
++	return ret;
++}
++
++static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
++{
++	/* Adding a drive to a linear array allows the array to grow.
++	 * It is permitted if the new drive has a matching superblock
++	 * already on it, with raid_disk equal to raid_disks.
++	 * It is achieved by creating a new linear_private_data structure
++	 * and swapping it in in-place of the current one.
++	 * The current one is never freed until the array is stopped.
++	 * This avoids races.
++	 */
++	struct linear_conf *newconf, *oldconf;
++
++	if (rdev->saved_raid_disk != mddev->raid_disks)
++		return -EINVAL;
++
++	rdev->raid_disk = rdev->saved_raid_disk;
++	rdev->saved_raid_disk = -1;
++
++	newconf = linear_conf(mddev, mddev->raid_disks + 1);
++	if (IS_ERR(newconf))
++		return PTR_ERR(newconf);
++
++	/* newconf->raid_disks already keeps a copy of * the increased
++	 * value of mddev->raid_disks, WARN_ONCE() is just used to make
++	 * sure of this. It is possible that oldconf is still referenced
++	 * in linear_congested(), therefore kfree_rcu() is used to free
++	 * oldconf until no one uses it anymore.
++	 */
++	oldconf = rcu_dereference_protected(mddev->private,
++			lockdep_is_held(&mddev->reconfig_mutex));
++	mddev->raid_disks++;
++	WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
++		"copied raid_disks doesn't match mddev->raid_disks");
++	rcu_assign_pointer(mddev->private, newconf);
++	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
++	set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
++	kfree_rcu(oldconf, rcu);
++	return 0;
++}
++
++static void linear_free(struct mddev *mddev, void *priv)
++{
++	struct linear_conf *conf = priv;
++
++	kfree(conf);
++}
++
++static bool linear_make_request(struct mddev *mddev, struct bio *bio)
++{
++	struct dev_info *tmp_dev;
++	sector_t start_sector, end_sector, data_offset;
++	sector_t bio_sector = bio->bi_iter.bi_sector;
++
++	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
++	    && md_flush_request(mddev, bio))
++		return true;
++
++	tmp_dev = which_dev(mddev, bio_sector);
++	start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
++	end_sector = tmp_dev->end_sector;
++	data_offset = tmp_dev->rdev->data_offset;
++
++	if (unlikely(bio_sector >= end_sector ||
++		     bio_sector < start_sector))
++		goto out_of_bounds;
++
++	if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
++		md_error(mddev, tmp_dev->rdev);
++		bio_io_error(bio);
++		return true;
++	}
++
++	if (unlikely(bio_end_sector(bio) > end_sector)) {
++		/* This bio crosses a device boundary, so we have to split it */
++		struct bio *split = bio_split(bio, end_sector - bio_sector,
++					      GFP_NOIO, &mddev->bio_set);
++
++		if (IS_ERR(split)) {
++			bio->bi_status = errno_to_blk_status(PTR_ERR(split));
++			bio_endio(bio);
++			return true;
++		}
++
++		bio_chain(split, bio);
++		submit_bio_noacct(bio);
++		bio = split;
++	}
++
++	md_account_bio(mddev, &bio);
++	bio_set_dev(bio, tmp_dev->rdev->bdev);
++	bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
++		start_sector + data_offset;
++
++	if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
++		     !bdev_max_discard_sectors(bio->bi_bdev))) {
++		/* Just ignore it */
++		bio_endio(bio);
++	} else {
++		if (mddev->gendisk)
++			trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
++					      bio_sector);
++		mddev_check_write_zeroes(mddev, bio);
++		submit_bio_noacct(bio);
++	}
++	return true;
++
++out_of_bounds:
++	pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
++	       mdname(mddev),
++	       (unsigned long long)bio->bi_iter.bi_sector,
++	       tmp_dev->rdev->bdev,
++	       (unsigned long long)tmp_dev->rdev->sectors,
++	       (unsigned long long)start_sector);
++	bio_io_error(bio);
++	return true;
++}
++
++static void linear_status(struct seq_file *seq, struct mddev *mddev)
++{
++	seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
++}
++
++static void linear_error(struct mddev *mddev, struct md_rdev *rdev)
++{
++	if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
++		char *md_name = mdname(mddev);
++
++		pr_crit("md/linear%s: Disk failure on %pg detected, failing array.\n",
++			md_name, rdev->bdev);
++	}
++}
++
++static void linear_quiesce(struct mddev *mddev, int state)
++{
++}
++
++static struct md_personality linear_personality = {
++	.name		= "linear",
++	.level		= LEVEL_LINEAR,
++	.owner		= THIS_MODULE,
++	.make_request	= linear_make_request,
++	.run		= linear_run,
++	.free		= linear_free,
++	.status		= linear_status,
++	.hot_add_disk	= linear_add,
++	.size		= linear_size,
++	.quiesce	= linear_quiesce,
++	.error_handler	= linear_error,
++};
++
++static int __init linear_init(void)
++{
++	return register_md_personality(&linear_personality);
++}
++
++static void linear_exit(void)
++{
++	unregister_md_personality(&linear_personality);
++}
++
++module_init(linear_init);
++module_exit(linear_exit);
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)");
++MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
++MODULE_ALIAS("md-linear");
++MODULE_ALIAS("md-level--1");
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 44c4c518430d9b..fff28aea23c89e 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8124,7 +8124,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
+ 		return;
+ 	mddev->pers->error_handler(mddev, rdev);
+ 
+-	if (mddev->pers->level == 0)
++	if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
+ 		return;
+ 
+ 	if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index e1ae0f9fad4326..cb21df46bab169 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3566,15 +3566,15 @@ static int ccs_probe(struct i2c_client *client)
+ out_cleanup:
+ 	ccs_cleanup(sensor);
+ 
++out_free_ccs_limits:
++	kfree(sensor->ccs_limits);
++
+ out_release_mdata:
+ 	kvfree(sensor->mdata.backing);
+ 
+ out_release_sdata:
+ 	kvfree(sensor->sdata.backing);
+ 
+-out_free_ccs_limits:
+-	kfree(sensor->ccs_limits);
+-
+ out_power_off:
+ 	ccs_power_off(&client->dev);
+ 	mutex_destroy(&sensor->mutex);
+diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
+index 08400edf77ced1..2591dba51e17e2 100644
+--- a/drivers/media/i2c/ccs/ccs-data.c
++++ b/drivers/media/i2c/ccs/ccs-data.c
+@@ -10,6 +10,7 @@
+ #include <linux/limits.h>
+ #include <linux/mm.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ 
+ #include "ccs-data-defs.h"
+ 
+@@ -97,7 +98,7 @@ ccs_data_parse_length_specifier(const struct __ccs_data_length_specifier *__len,
+ 		plen = ((size_t)
+ 			(__len3->length[0] &
+ 			 ((1 << CCS_DATA_LENGTH_SPECIFIER_SIZE_SHIFT) - 1))
+-			<< 16) + (__len3->length[0] << 8) + __len3->length[1];
++			<< 16) + (__len3->length[1] << 8) + __len3->length[2];
+ 		break;
+ 	}
+ 	default:
+@@ -948,15 +949,15 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data,
+ 
+ 	rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, verbose);
+ 	if (rval)
+-		return rval;
++		goto out_cleanup;
+ 
+ 	rval = bin_backing_alloc(&bin);
+ 	if (rval)
+-		return rval;
++		goto out_cleanup;
+ 
+ 	rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, false);
+ 	if (rval)
+-		goto out_free;
++		goto out_cleanup;
+ 
+ 	if (verbose && ccsdata->version)
+ 		print_ccs_data_version(dev, ccsdata->version);
+@@ -965,15 +966,16 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data,
+ 		rval = -EPROTO;
+ 		dev_dbg(dev, "parsing mismatch; base %p; now %p; end %p\n",
+ 			bin.base, bin.now, bin.end);
+-		goto out_free;
++		goto out_cleanup;
+ 	}
+ 
+ 	ccsdata->backing = bin.base;
+ 
+ 	return 0;
+ 
+-out_free:
++out_cleanup:
+ 	kvfree(bin.base);
++	memset(ccsdata, 0, sizeof(*ccsdata));
+ 
+ 	return rval;
+ }
+diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
+index 8eed4a200fd89b..b5375d73662996 100644
+--- a/drivers/media/i2c/ds90ub913.c
++++ b/drivers/media/i2c/ds90ub913.c
+@@ -793,7 +793,6 @@ static void ub913_subdev_uninit(struct ub913_data *priv)
+ 	v4l2_async_unregister_subdev(&priv->sd);
+ 	ub913_v4l2_nf_unregister(priv);
+ 	v4l2_subdev_cleanup(&priv->sd);
+-	fwnode_handle_put(priv->sd.fwnode);
+ 	media_entity_cleanup(&priv->sd.entity);
+ }
+ 
+diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
+index 16f88db1498162..10daecf6f45798 100644
+--- a/drivers/media/i2c/ds90ub953.c
++++ b/drivers/media/i2c/ds90ub953.c
+@@ -1291,7 +1291,6 @@ static void ub953_subdev_uninit(struct ub953_data *priv)
+ 	v4l2_async_unregister_subdev(&priv->sd);
+ 	ub953_v4l2_notifier_unregister(priv);
+ 	v4l2_subdev_cleanup(&priv->sd);
+-	fwnode_handle_put(priv->sd.fwnode);
+ 	media_entity_cleanup(&priv->sd.entity);
+ }
+ 
+diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
+index 58424d8f72af03..432457a761b116 100644
+--- a/drivers/media/i2c/ds90ub960.c
++++ b/drivers/media/i2c/ds90ub960.c
+@@ -352,6 +352,8 @@
+ 
+ #define UB960_SR_I2C_RX_ID(n)			(0xf8 + (n)) /* < UB960_FPD_RX_NPORTS */
+ 
++#define UB9702_SR_REFCLK_FREQ			0x3d
++
+ /* Indirect register blocks */
+ #define UB960_IND_TARGET_PAT_GEN		0x00
+ #define UB960_IND_TARGET_RX_ANA(n)		(0x01 + (n))
+@@ -1575,16 +1577,24 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
+ 
+ 		ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v);
+ 
+-		ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
+-		if (ret)
+-			return ret;
++		if (priv->hw_data->is_ub9702) {
++			dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n",
++				nport, (v * 1000000ULL) >> 8);
++		} else {
++			ret = ub960_rxport_get_strobe_pos(priv, nport,
++							  &strobe_pos);
++			if (ret)
++				return ret;
+ 
+-		ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
+-		if (ret)
+-			return ret;
++			ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
++			if (ret)
++				return ret;
+ 
+-		dev_dbg(dev, "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
+-			nport, strobe_pos, eq_level, (v * 1000000ULL) >> 8);
++			dev_dbg(dev,
++				"\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
++				nport, strobe_pos, eq_level,
++				(v * 1000000ULL) >> 8);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -2523,7 +2533,7 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
+ 				for (i = 0; i < 8; i++)
+ 					ub960_rxport_write(priv, nport,
+ 							   UB960_RR_VC_ID_MAP(i),
+-							   nport);
++							   (nport << 4) | nport);
+ 			}
+ 
+ 			break;
+@@ -2940,6 +2950,54 @@ static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
+ 	.set_fmt = ub960_set_fmt,
+ };
+ 
++static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
++					 unsigned int nport)
++{
++	struct device *dev = &priv->client->dev;
++	u8 eq_level;
++	s8 strobe_pos;
++	u8 v = 0;
++
++	/* Strobe */
++
++	ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
++
++	dev_info(dev, "\t%s strobe\n",
++		 (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
++							  "Manual");
++
++	if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
++		ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
++
++		dev_info(dev, "\tStrobe range [%d, %d]\n",
++			 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
++			 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
++	}
++
++	ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
++
++	dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
++
++	/* EQ */
++
++	ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
++
++	dev_info(dev, "\t%s EQ\n",
++		 (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
++						    "Adaptive");
++
++	if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
++		ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
++
++		dev_info(dev, "\tEQ range [%u, %u]\n",
++			 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
++			 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
++	}
++
++	if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
++		dev_info(dev, "\tEQ level %u\n", eq_level);
++}
++
+ static int ub960_log_status(struct v4l2_subdev *sd)
+ {
+ 	struct ub960_data *priv = sd_to_ub960(sd);
+@@ -2987,8 +3045,6 @@ static int ub960_log_status(struct v4l2_subdev *sd)
+ 
+ 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ 		struct ub960_rxport *rxport = priv->rxports[nport];
+-		u8 eq_level;
+-		s8 strobe_pos;
+ 		unsigned int i;
+ 
+ 		dev_info(dev, "RX %u\n", nport);
+@@ -3024,44 +3080,8 @@ static int ub960_log_status(struct v4l2_subdev *sd)
+ 		ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
+ 		dev_info(dev, "\tcsi_err_counter %u\n", v);
+ 
+-		/* Strobe */
+-
+-		ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
+-
+-		dev_info(dev, "\t%s strobe\n",
+-			 (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
+-								  "Manual");
+-
+-		if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
+-			ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
+-
+-			dev_info(dev, "\tStrobe range [%d, %d]\n",
+-				 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
+-				 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
+-		}
+-
+-		ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
+-
+-		dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
+-
+-		/* EQ */
+-
+-		ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+-
+-		dev_info(dev, "\t%s EQ\n",
+-			 (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
+-							    "Adaptive");
+-
+-		if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
+-			ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
+-
+-			dev_info(dev, "\tEQ range [%u, %u]\n",
+-				 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
+-				 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
+-		}
+-
+-		if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
+-			dev_info(dev, "\tEQ level %u\n", eq_level);
++		if (!priv->hw_data->is_ub9702)
++			ub960_log_status_ub960_sp_eq(priv, nport);
+ 
+ 		/* GPIOs */
+ 		for (i = 0; i < UB960_NUM_BC_GPIOS; i++) {
+@@ -3837,7 +3857,10 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
+ 	if (ret)
+ 		goto err_pd_gpio;
+ 
+-	ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
++	if (priv->hw_data->is_ub9702)
++		ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq);
++	else
++		ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
+ 	if (ret)
+ 		goto err_pd_gpio;
+ 
+diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
+index 83149fa729c424..f3bec16b527c44 100644
+--- a/drivers/media/i2c/imx296.c
++++ b/drivers/media/i2c/imx296.c
+@@ -954,6 +954,8 @@ static int imx296_identify_model(struct imx296 *sensor)
+ 		return ret;
+ 	}
+ 
++	usleep_range(2000, 5000);
++
+ 	ret = imx296_read(sensor, IMX296_SENSOR_INFO);
+ 	if (ret < 0) {
+ 		dev_err(sensor->dev, "failed to read sensor information (%d)\n",
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index c1d3fce4a7d383..8566bc2edde978 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -1982,6 +1982,7 @@ static int ov5640_get_light_freq(struct ov5640_dev *sensor)
+ 			light_freq = 50;
+ 		} else {
+ 			/* 60Hz */
++			light_freq = 60;
+ 		}
+ 	}
+ 
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
+index c85e056cb904b2..17bc8cabcbdb59 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
+@@ -1133,6 +1133,7 @@ static int isys_probe(struct auxiliary_device *auxdev,
+ free_fw_msg_bufs:
+ 	free_fw_msg_bufs(isys);
+ out_remove_pkg_dir_shared_buffer:
++	cpu_latency_qos_remove_request(&isys->pm_qos);
+ 	if (!isp->secure_mode)
+ 		ipu6_cpd_free_pkg_dir(adev);
+ remove_shared_buffer:
+diff --git a/drivers/media/platform/marvell/mmp-driver.c b/drivers/media/platform/marvell/mmp-driver.c
+index ff9d151121d5eb..4fa171d469cabc 100644
+--- a/drivers/media/platform/marvell/mmp-driver.c
++++ b/drivers/media/platform/marvell/mmp-driver.c
+@@ -231,13 +231,23 @@ static int mmpcam_probe(struct platform_device *pdev)
+ 
+ 	mcam_init_clk(mcam);
+ 
++	/*
++	 * Register with V4L.
++	 */
++
++	ret = v4l2_device_register(mcam->dev, &mcam->v4l2_dev);
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * Create a match of the sensor against its OF node.
+ 	 */
+ 	ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(pdev->dev.of_node),
+ 					    NULL);
+-	if (!ep)
+-		return -ENODEV;
++	if (!ep) {
++		ret = -ENODEV;
++		goto out_v4l2_device_unregister;
++	}
+ 
+ 	v4l2_async_nf_init(&mcam->notifier, &mcam->v4l2_dev);
+ 
+@@ -246,7 +256,7 @@ static int mmpcam_probe(struct platform_device *pdev)
+ 	fwnode_handle_put(ep);
+ 	if (IS_ERR(asd)) {
+ 		ret = PTR_ERR(asd);
+-		goto out;
++		goto out_v4l2_device_unregister;
+ 	}
+ 
+ 	/*
+@@ -254,7 +264,7 @@ static int mmpcam_probe(struct platform_device *pdev)
+ 	 */
+ 	ret = mccic_register(mcam);
+ 	if (ret)
+-		goto out;
++		goto out_v4l2_device_unregister;
+ 
+ 	/*
+ 	 * Add OF clock provider.
+@@ -283,6 +293,8 @@ static int mmpcam_probe(struct platform_device *pdev)
+ 	return 0;
+ out:
+ 	mccic_shutdown(mcam);
++out_v4l2_device_unregister:
++	v4l2_device_unregister(&mcam->v4l2_dev);
+ 
+ 	return ret;
+ }
+@@ -293,6 +305,7 @@ static void mmpcam_remove(struct platform_device *pdev)
+ 	struct mcam_camera *mcam = &cam->mcam;
+ 
+ 	mccic_shutdown(mcam);
++	v4l2_device_unregister(&mcam->v4l2_dev);
+ 	pm_runtime_force_suspend(mcam->dev);
+ }
+ 
+diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
+index 60fbb91400355c..db454c9d2641f8 100644
+--- a/drivers/media/platform/nuvoton/npcm-video.c
++++ b/drivers/media/platform/nuvoton/npcm-video.c
+@@ -1667,9 +1667,9 @@ static int npcm_video_ece_init(struct npcm_video *video)
+ 		dev_info(dev, "Support HEXTILE pixel format\n");
+ 
+ 		ece_pdev = of_find_device_by_node(ece_node);
+-		if (IS_ERR(ece_pdev)) {
++		if (!ece_pdev) {
+ 			dev_err(dev, "Failed to find ECE device\n");
+-			return PTR_ERR(ece_pdev);
++			return -ENODEV;
+ 		}
+ 		of_node_put(ece_node);
+ 
+diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
+index 9f768f011fa25a..0f6918f4db383f 100644
+--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
++++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
+@@ -893,7 +893,7 @@ struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
+ 	q->dev = dev;
+ 
+ 	/* DCMIPP requires 16 bytes aligned buffers */
+-	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32) & ~0x0f);
++	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ 	if (ret) {
+ 		dev_err(dev, "Failed to set DMA mask\n");
+ 		goto err_mutex_destroy;
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 4fe26e82e3d1c1..4837d8df9c0386 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1579,6 +1579,40 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
+ 	uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
+ }
+ 
++static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
++				struct uvc_fh *new_handle)
++{
++	lockdep_assert_held(&handle->chain->ctrl_mutex);
++
++	if (new_handle) {
++		if (ctrl->handle)
++			dev_warn_ratelimited(&handle->stream->dev->udev->dev,
++					     "UVC non compliance: Setting an async control with a pending operation.");
++
++		if (new_handle == ctrl->handle)
++			return;
++
++		if (ctrl->handle) {
++			WARN_ON(!ctrl->handle->pending_async_ctrls);
++			if (ctrl->handle->pending_async_ctrls)
++				ctrl->handle->pending_async_ctrls--;
++		}
++
++		ctrl->handle = new_handle;
++		handle->pending_async_ctrls++;
++		return;
++	}
++
++	/* Cannot clear the handle for a control not owned by us.*/
++	if (WARN_ON(ctrl->handle != handle))
++		return;
++
++	ctrl->handle = NULL;
++	if (WARN_ON(!handle->pending_async_ctrls))
++		return;
++	handle->pending_async_ctrls--;
++}
++
+ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+ 			   struct uvc_control *ctrl, const u8 *data)
+ {
+@@ -1589,7 +1623,8 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+ 	mutex_lock(&chain->ctrl_mutex);
+ 
+ 	handle = ctrl->handle;
+-	ctrl->handle = NULL;
++	if (handle)
++		uvc_ctrl_set_handle(handle, ctrl, NULL);
+ 
+ 	list_for_each_entry(mapping, &ctrl->info.mappings, list) {
+ 		s32 value = __uvc_ctrl_get_value(mapping, data);
+@@ -1640,10 +1675,8 @@ bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
+ 	struct uvc_device *dev = chain->dev;
+ 	struct uvc_ctrl_work *w = &dev->async_ctrl;
+ 
+-	if (list_empty(&ctrl->info.mappings)) {
+-		ctrl->handle = NULL;
++	if (list_empty(&ctrl->info.mappings))
+ 		return false;
+-	}
+ 
+ 	w->data = data;
+ 	w->urb = urb;
+@@ -1673,13 +1706,13 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
+ {
+ 	struct uvc_control_mapping *mapping;
+ 	struct uvc_control *ctrl;
+-	u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ 	unsigned int i;
+ 	unsigned int j;
+ 
+ 	for (i = 0; i < xctrls_count; ++i) {
+-		ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
++		u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ 
++		ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+ 		if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ 			/* Notification will be sent from an Interrupt event. */
+ 			continue;
+@@ -1811,7 +1844,10 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
+ }
+ 
+ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+-	struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
++				  struct uvc_fh *handle,
++				  struct uvc_entity *entity,
++				  int rollback,
++				  struct uvc_control **err_ctrl)
+ {
+ 	struct uvc_control *ctrl;
+ 	unsigned int i;
+@@ -1859,6 +1895,10 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+ 				*err_ctrl = ctrl;
+ 			return ret;
+ 		}
++
++		if (!rollback && handle &&
++		    ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
++			uvc_ctrl_set_handle(handle, ctrl, handle);
+ 	}
+ 
+ 	return 0;
+@@ -1895,8 +1935,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ 
+ 	/* Find the control. */
+ 	list_for_each_entry(entity, &chain->entities, chain) {
+-		ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
+-					     &err_ctrl);
++		ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
++					     rollback, &err_ctrl);
+ 		if (ret < 0) {
+ 			if (ctrls)
+ 				ctrls->error_idx =
+@@ -2046,9 +2086,6 @@ int uvc_ctrl_set(struct uvc_fh *handle,
+ 	mapping->set(mapping, value,
+ 		uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ 
+-	if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+-		ctrl->handle = handle;
+-
+ 	ctrl->dirty = 1;
+ 	ctrl->modified = 1;
+ 	return 0;
+@@ -2377,7 +2414,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
+ 			ctrl->dirty = 1;
+ 		}
+ 
+-		ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
++		ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0, NULL);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+@@ -2770,6 +2807,26 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
+ 	return 0;
+ }
+ 
++void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
++{
++	struct uvc_entity *entity;
++
++	guard(mutex)(&handle->chain->ctrl_mutex);
++
++	if (!handle->pending_async_ctrls)
++		return;
++
++	list_for_each_entry(entity, &handle->chain->dev->entities, list) {
++		for (unsigned int i = 0; i < entity->ncontrols; ++i) {
++			if (entity->controls[i].handle != handle)
++				continue;
++			uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
++		}
++	}
++
++	WARN_ON(handle->pending_async_ctrls);
++}
++
+ /*
+  * Cleanup device controls.
+  */
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 9f38a9b23c0181..d832aa55056f39 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -775,27 +775,14 @@ static const u8 uvc_media_transport_input_guid[16] =
+ 	UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+ 
+-static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+-					       u16 id, unsigned int num_pads,
+-					       unsigned int extra_size)
++static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
++		unsigned int num_pads, unsigned int extra_size)
+ {
+ 	struct uvc_entity *entity;
+ 	unsigned int num_inputs;
+ 	unsigned int size;
+ 	unsigned int i;
+ 
+-	/* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
+-	if (id == 0) {
+-		dev_err(&dev->udev->dev, "Found Unit with invalid ID 0.\n");
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+-	/* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
+-	if (uvc_entity_by_id(dev, id)) {
+-		dev_err(&dev->udev->dev, "Found multiple Units with ID %u\n", id);
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+ 	extra_size = roundup(extra_size, sizeof(*entity->pads));
+ 	if (num_pads)
+ 		num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+@@ -805,7 +792,7 @@ static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+ 	     + num_inputs;
+ 	entity = kzalloc(size, GFP_KERNEL);
+ 	if (entity == NULL)
+-		return ERR_PTR(-ENOMEM);
++		return NULL;
+ 
+ 	entity->id = id;
+ 	entity->type = type;
+@@ -917,10 +904,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ 			break;
+ 		}
+ 
+-		unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
+-					    buffer[3], p + 1, 2 * n);
+-		if (IS_ERR(unit))
+-			return PTR_ERR(unit);
++		unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
++					p + 1, 2*n);
++		if (unit == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(unit->guid, &buffer[4], 16);
+ 		unit->extension.bNumControls = buffer[20];
+@@ -1029,10 +1016,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
+-					    buffer[3], 1, n + p);
+-		if (IS_ERR(term))
+-			return PTR_ERR(term);
++		term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
++					1, n + p);
++		if (term == NULL)
++			return -ENOMEM;
+ 
+ 		if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
+ 			term->camera.bControlSize = n;
+@@ -1088,10 +1075,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return 0;
+ 		}
+ 
+-		term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
+-					    buffer[3], 1, 0);
+-		if (IS_ERR(term))
+-			return PTR_ERR(term);
++		term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
++					1, 0);
++		if (term == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(term->baSourceID, &buffer[7], 1);
+ 
+@@ -1110,10 +1097,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+-					    p + 1, 0);
+-		if (IS_ERR(unit))
+-			return PTR_ERR(unit);
++		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
++		if (unit == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(unit->baSourceID, &buffer[5], p);
+ 
+@@ -1133,9 +1119,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
+-		if (IS_ERR(unit))
+-			return PTR_ERR(unit);
++		unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
++		if (unit == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(unit->baSourceID, &buffer[4], 1);
+ 		unit->processing.wMaxMultiplier =
+@@ -1162,10 +1148,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+-					    p + 1, n);
+-		if (IS_ERR(unit))
+-			return PTR_ERR(unit);
++		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
++		if (unit == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(unit->guid, &buffer[4], 16);
+ 		unit->extension.bNumControls = buffer[20];
+@@ -1295,20 +1280,19 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ 	struct gpio_desc *gpio_privacy;
+ 	int irq;
+ 
+-	gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy",
++	gpio_privacy = devm_gpiod_get_optional(&dev->intf->dev, "privacy",
+ 					       GPIOD_IN);
+ 	if (IS_ERR_OR_NULL(gpio_privacy))
+ 		return PTR_ERR_OR_ZERO(gpio_privacy);
+ 
+ 	irq = gpiod_to_irq(gpio_privacy);
+ 	if (irq < 0)
+-		return dev_err_probe(&dev->udev->dev, irq,
++		return dev_err_probe(&dev->intf->dev, irq,
+ 				     "No IRQ for privacy GPIO\n");
+ 
+-	unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
+-				    UVC_EXT_GPIO_UNIT_ID, 0, 1);
+-	if (IS_ERR(unit))
+-		return PTR_ERR(unit);
++	unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
++	if (!unit)
++		return -ENOMEM;
+ 
+ 	unit->gpio.gpio_privacy = gpio_privacy;
+ 	unit->gpio.irq = irq;
+@@ -1329,15 +1313,27 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ static int uvc_gpio_init_irq(struct uvc_device *dev)
+ {
+ 	struct uvc_entity *unit = dev->gpio_unit;
++	int ret;
+ 
+ 	if (!unit || unit->gpio.irq < 0)
+ 		return 0;
+ 
+-	return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL,
+-					 uvc_gpio_irq,
+-					 IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
+-					 IRQF_TRIGGER_RISING,
+-					 "uvc_privacy_gpio", dev);
++	ret = request_threaded_irq(unit->gpio.irq, NULL, uvc_gpio_irq,
++				   IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
++				   IRQF_TRIGGER_RISING,
++				   "uvc_privacy_gpio", dev);
++
++	unit->gpio.initialized = !ret;
++
++	return ret;
++}
++
++static void uvc_gpio_deinit(struct uvc_device *dev)
++{
++	if (!dev->gpio_unit || !dev->gpio_unit->gpio.initialized)
++		return;
++
++	free_irq(dev->gpio_unit->gpio.irq, dev);
+ }
+ 
+ /* ------------------------------------------------------------------------
+@@ -1934,6 +1930,8 @@ static void uvc_unregister_video(struct uvc_device *dev)
+ {
+ 	struct uvc_streaming *stream;
+ 
++	uvc_gpio_deinit(dev);
++
+ 	list_for_each_entry(stream, &dev->streams, list) {
+ 		/* Nothing to do here, continue. */
+ 		if (!video_is_registered(&stream->vdev))
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index f4988f03640aec..7bcd706281daf3 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -659,6 +659,8 @@ static int uvc_v4l2_release(struct file *file)
+ 
+ 	uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
+ 
++	uvc_ctrl_cleanup_fh(handle);
++
+ 	/* Only free resources if this is a privileged handle. */
+ 	if (uvc_has_privileges(handle))
+ 		uvc_queue_release(&stream->queue);
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index e00f38dd07d935..d2fe01bcd209e5 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -79,6 +79,27 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
+ 	if (likely(ret == size))
+ 		return 0;
+ 
++	/*
++	 * Some devices return shorter USB control packets than expected if the
++	 * returned value can fit in less bytes. Zero all the bytes that the
++	 * device has not written.
++	 *
++	 * This quirk is applied to all controls, regardless of their data type.
++	 * Most controls are little-endian integers, in which case the missing
++	 * bytes become 0 MSBs. For other data types, a different heuristic
++	 * could be implemented if a device is found needing it.
++	 *
++	 * We exclude UVC_GET_INFO from the quirk. UVC_GET_LEN does not need
++	 * to be excluded because its size is always 1.
++	 */
++	if (ret > 0 && query != UVC_GET_INFO) {
++		memset(data + ret, 0, size - ret);
++		dev_warn_once(&dev->udev->dev,
++			      "UVC non compliance: %s control %u on unit %u returned %d bytes when we expected %u.\n",
++			      uvc_query_name(query), cs, unit, ret, size);
++		return 0;
++	}
++
+ 	if (ret != -EPIPE) {
+ 		dev_err(&dev->udev->dev,
+ 			"Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index b7d24a853ce4f1..272dc9cf01ee7d 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -234,6 +234,7 @@ struct uvc_entity {
+ 			u8  *bmControls;
+ 			struct gpio_desc *gpio_privacy;
+ 			int irq;
++			bool initialized;
+ 		} gpio;
+ 	};
+ 
+@@ -337,7 +338,11 @@ struct uvc_video_chain {
+ 	struct uvc_entity *processing;		/* Processing unit */
+ 	struct uvc_entity *selector;		/* Selector unit */
+ 
+-	struct mutex ctrl_mutex;		/* Protects ctrl.info */
++	struct mutex ctrl_mutex;		/*
++						 * Protects ctrl.info,
++						 * ctrl.handle and
++						 * uvc_fh.pending_async_ctrls
++						 */
+ 
+ 	struct v4l2_prio_state prio;		/* V4L2 priority state */
+ 	u32 caps;				/* V4L2 chain-wide caps */
+@@ -612,6 +617,7 @@ struct uvc_fh {
+ 	struct uvc_video_chain *chain;
+ 	struct uvc_streaming *stream;
+ 	enum uvc_handle_state state;
++	unsigned int pending_async_ctrls;
+ };
+ 
+ struct uvc_driver {
+@@ -795,6 +801,8 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+ 		      struct uvc_xu_control_query *xqry);
+ 
++void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
++
+ /* Utility functions */
+ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
+ 					    u8 epaddr);
+diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
+index 4bb91359e3a9a7..937d358697e19a 100644
+--- a/drivers/media/v4l2-core/v4l2-mc.c
++++ b/drivers/media/v4l2-core/v4l2-mc.c
+@@ -329,7 +329,7 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
+ 	if (!(sink->flags & MEDIA_PAD_FL_SINK))
+ 		return -EINVAL;
+ 
+-	fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
++	fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) {
+ 		struct fwnode_handle *remote_ep;
+ 		int src_idx, sink_idx, ret;
+ 		struct media_pad *src;
+diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
+index f14901660147f5..4b7d0cb9340f1a 100644
+--- a/drivers/mfd/lpc_ich.c
++++ b/drivers/mfd/lpc_ich.c
+@@ -834,8 +834,9 @@ static const struct pci_device_id lpc_ich_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
+ 	{ PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
+ 	{ PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
+-	{ PCI_VDEVICE(INTEL, 0x3197), LPC_GLK},
+ 	{ PCI_VDEVICE(INTEL, 0x2b9c), LPC_COUGARMOUNTAIN},
++	{ PCI_VDEVICE(INTEL, 0x3197), LPC_GLK},
++	{ PCI_VDEVICE(INTEL, 0x31e8), LPC_GLK},
+ 	{ PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
+ 	{ PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
+ 	{ PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 74181b8c386b78..e567a36275afc5 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -992,7 +992,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+ 			mmap_read_lock(current->mm);
+ 			vma = find_vma(current->mm, ctx->args[i].ptr);
+ 			if (vma)
+-				pages[i].addr += ctx->args[i].ptr -
++				pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) -
+ 						 vma->vm_start;
+ 			mmap_read_unlock(current->mm);
+ 
+@@ -1019,8 +1019,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+ 					(pkt_size - rlen);
+ 			pages[i].addr = pages[i].addr &	PAGE_MASK;
+ 
+-			pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
+-			pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
++			pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT;
++			pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ 			pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
+ 			args = args + mlen;
+ 			rlen -= mlen;
+@@ -2344,7 +2344,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+ 
+ 		err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
+ 		if (err)
+-			goto fdev_error;
++			goto populate_error;
+ 		break;
+ 	default:
+ 		err = -EINVAL;
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 9566837c9848e6..4b19b8a16b0968 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -458,6 +458,8 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
+ 	if (mmc_card_sd_combo(card))
+ 		max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
+ 
++	max_dtr = min_not_zero(max_dtr, card->quirk_max_rate);
++
+ 	return max_dtr;
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index ef3a44f2dff16d..d84aa20f035894 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -303,6 +303,7 @@ static struct esdhc_soc_data usdhc_s32g2_data = {
+ 			| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ 			| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ 			| ESDHC_FLAG_SKIP_ERR004536 | ESDHC_FLAG_SKIP_CD_WAKE,
++	.quirks = SDHCI_QUIRK_NO_LED,
+ };
+ 
+ static struct esdhc_soc_data usdhc_imx7ulp_data = {
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 8716004fcf6c90..945d08531de376 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -134,9 +134,18 @@
+ /* Timeout value to avoid infinite waiting for pwr_irq */
+ #define MSM_PWR_IRQ_TIMEOUT_MS 5000
+ 
++/* Max load for eMMC Vdd supply */
++#define MMC_VMMC_MAX_LOAD_UA	570000
++
+ /* Max load for eMMC Vdd-io supply */
+ #define MMC_VQMMC_MAX_LOAD_UA	325000
+ 
++/* Max load for SD Vdd supply */
++#define SD_VMMC_MAX_LOAD_UA	800000
++
++/* Max load for SD Vdd-io supply */
++#define SD_VQMMC_MAX_LOAD_UA	22000
++
+ #define msm_host_readl(msm_host, host, offset) \
+ 	msm_host->var_ops->msm_readl_relaxed(host, offset)
+ 
+@@ -1403,11 +1412,48 @@ static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
+ 	return ret;
+ }
+ 
+-static int sdhci_msm_set_vmmc(struct mmc_host *mmc)
++static void msm_config_vmmc_regulator(struct mmc_host *mmc, bool hpm)
++{
++	int load;
++
++	if (!hpm)
++		load = 0;
++	else if (!mmc->card)
++		load = max(MMC_VMMC_MAX_LOAD_UA, SD_VMMC_MAX_LOAD_UA);
++	else if (mmc_card_mmc(mmc->card))
++		load = MMC_VMMC_MAX_LOAD_UA;
++	else if (mmc_card_sd(mmc->card))
++		load = SD_VMMC_MAX_LOAD_UA;
++	else
++		return;
++
++	regulator_set_load(mmc->supply.vmmc, load);
++}
++
++static void msm_config_vqmmc_regulator(struct mmc_host *mmc, bool hpm)
++{
++	int load;
++
++	if (!hpm)
++		load = 0;
++	else if (!mmc->card)
++		load = max(MMC_VQMMC_MAX_LOAD_UA, SD_VQMMC_MAX_LOAD_UA);
++	else if (mmc_card_sd(mmc->card))
++		load = SD_VQMMC_MAX_LOAD_UA;
++	else
++		return;
++
++	regulator_set_load(mmc->supply.vqmmc, load);
++}
++
++static int sdhci_msm_set_vmmc(struct sdhci_msm_host *msm_host,
++			      struct mmc_host *mmc, bool hpm)
+ {
+ 	if (IS_ERR(mmc->supply.vmmc))
+ 		return 0;
+ 
++	msm_config_vmmc_regulator(mmc, hpm);
++
+ 	return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
+ }
+ 
+@@ -1420,6 +1466,8 @@ static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
+ 	if (msm_host->vqmmc_enabled == level)
+ 		return 0;
+ 
++	msm_config_vqmmc_regulator(mmc, level);
++
+ 	if (level) {
+ 		/* Set the IO voltage regulator to default voltage level */
+ 		if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
+@@ -1642,7 +1690,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
+ 	}
+ 
+ 	if (pwr_state) {
+-		ret = sdhci_msm_set_vmmc(mmc);
++		ret = sdhci_msm_set_vmmc(msm_host, mmc,
++					 pwr_state & REQ_BUS_ON);
+ 		if (!ret)
+ 			ret = sdhci_msm_set_vqmmc(msm_host, mmc,
+ 					pwr_state & REQ_BUS_ON);
+diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
+index f66385faf631cd..0dc2ea4fc857b7 100644
+--- a/drivers/mtd/nand/onenand/onenand_base.c
++++ b/drivers/mtd/nand/onenand/onenand_base.c
+@@ -2923,6 +2923,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
+ 	ret = ONENAND_IS_4KB_PAGE(this) ?
+ 		onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ 		onenand_read_ops_nolock(mtd, from, &ops);
++	*retlen = ops.retlen;
+ 
+ 	/* Exit OTP access mode */
+ 	this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 30be4ed68fad29..ef6a22f372f95c 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -1537,7 +1537,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
+ 	if (token) {
+ 		int err = kstrtoint(token, 10, &p->ubi_num);
+ 
+-		if (err) {
++		if (err || p->ubi_num < UBI_DEV_NUM_AUTO) {
+ 			pr_err("UBI error: bad value for ubi_num parameter: %s\n",
+ 			       token);
+ 			return -EINVAL;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index fe0e3e2a811718..71e50fc65c1478 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -1441,7 +1441,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
+ 	aq_ptp_ring_free(self);
+ 	aq_ptp_free(self);
+ 
+-	if (likely(self->aq_fw_ops->deinit) && link_down) {
++	/* May be invoked during hot unplug. */
++	if (pci_device_is_present(self->pdev) &&
++	    likely(self->aq_fw_ops->deinit) && link_down) {
+ 		mutex_lock(&self->fwreq_mutex);
+ 		self->aq_fw_ops->deinit(self->aq_hw);
+ 		mutex_unlock(&self->fwreq_mutex);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+index 0715ea5bf13ed9..3b082114f2e538 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+@@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ {
+ 	struct bcmgenet_priv *priv = netdev_priv(dev);
+ 	struct device *kdev = &priv->pdev->dev;
++	u32 phy_wolopts = 0;
+ 
+-	if (dev->phydev)
++	if (dev->phydev) {
+ 		phy_ethtool_get_wol(dev->phydev, wol);
++		phy_wolopts = wol->wolopts;
++	}
+ 
+ 	/* MAC is not wake-up capable, return what the PHY does */
+ 	if (!device_can_wakeup(kdev))
+@@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ 
+ 	/* Overlay MAC capabilities with that of the PHY queried before */
+ 	wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
+-	wol->wolopts = priv->wolopts;
+-	memset(wol->sopass, 0, sizeof(wol->sopass));
++	wol->wolopts |= priv->wolopts;
+ 
++	/* Return the PHY configured magic password */
++	if (phy_wolopts & WAKE_MAGICSECURE)
++		return;
++
++	/* Otherwise the MAC one */
++	memset(wol->sopass, 0, sizeof(wol->sopass));
+ 	if (wol->wolopts & WAKE_MAGICSECURE)
+ 		memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
+ }
+@@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ 	/* Try Wake-on-LAN from the PHY first */
+ 	if (dev->phydev) {
+ 		ret = phy_ethtool_set_wol(dev->phydev, wol);
+-		if (ret != -EOPNOTSUPP)
++		if (ret != -EOPNOTSUPP && wol->wolopts)
+ 			return ret;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index d178138981a967..717e110d23c914 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -55,6 +55,7 @@
+ #include <linux/hwmon.h>
+ #include <linux/hwmon-sysfs.h>
+ #include <linux/crc32poly.h>
++#include <linux/dmi.h>
+ 
+ #include <net/checksum.h>
+ #include <net/gso.h>
+@@ -18154,6 +18155,50 @@ static int tg3_resume(struct device *device)
+ 
+ static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+ 
++/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
++ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
++ * be, powered down.
++ */
++static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
++		},
++	},
++	{}
++};
++
+ static void tg3_shutdown(struct pci_dev *pdev)
+ {
+ 	struct net_device *dev = pci_get_drvdata(pdev);
+@@ -18170,6 +18215,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
+ 
+ 	if (system_state == SYSTEM_POWER_OFF)
+ 		tg3_power_down(tp);
++	else if (system_state == SYSTEM_RESTART &&
++		 dmi_first_match(tg3_restart_aer_quirk_table) &&
++		 pdev->current_state != PCI_D3cold &&
++		 pdev->current_state != PCI_UNKNOWN) {
++		/* Disable PCIe AER on the tg3 to avoid a fatal
++		 * error during this system restart.
++		 */
++		pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
++					   PCI_EXP_DEVCTL_CERE |
++					   PCI_EXP_DEVCTL_NFERE |
++					   PCI_EXP_DEVCTL_FERE |
++					   PCI_EXP_DEVCTL_URRE);
++	}
+ 
+ 	rtnl_unlock();
+ 
+diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
+index 415445cefdb2aa..b1efd287b3309c 100644
+--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
++++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
+@@ -977,6 +977,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
+ 
+ 	/* preallocate memory for ice_sched_node */
+ 	node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
++	if (!node)
++		return -ENOMEM;
++
+ 	*priv = node;
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 8208055d6e7fc5..f12fb3a2b6ad94 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -527,15 +527,14 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
+  * @xdp: xdp_buff used as input to the XDP program
+  * @xdp_prog: XDP program to run
+  * @xdp_ring: ring to be used for XDP_TX action
+- * @rx_buf: Rx buffer to store the XDP action
+  * @eop_desc: Last descriptor in packet to read metadata from
+  *
+  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+  */
+-static void
++static u32
+ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ 	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+-	    struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
++	    union ice_32b_rx_flex_desc *eop_desc)
+ {
+ 	unsigned int ret = ICE_XDP_PASS;
+ 	u32 act;
+@@ -574,7 +573,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ 		ret = ICE_XDP_CONSUMED;
+ 	}
+ exit:
+-	ice_set_rx_bufs_act(xdp, rx_ring, ret);
++	return ret;
+ }
+ 
+ /**
+@@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ 		xdp_buff_set_frags_flag(xdp);
+ 	}
+ 
+-	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+-		ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
++	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS))
+ 		return -ENOMEM;
+-	}
+ 
+ 	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ 				   rx_buf->page_offset, size);
+@@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ 	struct ice_rx_buf *rx_buf;
+ 
+ 	rx_buf = &rx_ring->rx_buf[ntc];
+-	rx_buf->pgcnt = page_count(rx_buf->page);
+ 	prefetchw(rx_buf->page);
+ 
+ 	if (!size)
+@@ -940,6 +936,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ 	return rx_buf;
+ }
+ 
++/**
++ * ice_get_pgcnts - grab page_count() for gathered fragments
++ * @rx_ring: Rx descriptor ring to store the page counts on
++ *
++ * This function is intended to be called right before running XDP
++ * program so that the page recycling mechanism will be able to take
++ * a correct decision regarding underlying pages; this is done in such
++ * way as XDP program can change the refcount of page
++ */
++static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
++{
++	u32 nr_frags = rx_ring->nr_frags + 1;
++	u32 idx = rx_ring->first_desc;
++	struct ice_rx_buf *rx_buf;
++	u32 cnt = rx_ring->count;
++
++	for (int i = 0; i < nr_frags; i++) {
++		rx_buf = &rx_ring->rx_buf[idx];
++		rx_buf->pgcnt = page_count(rx_buf->page);
++
++		if (++idx == cnt)
++			idx = 0;
++	}
++}
++
+ /**
+  * ice_build_skb - Build skb around an existing buffer
+  * @rx_ring: Rx descriptor ring to transact packets on
+@@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
+ 				rx_buf->page_offset + headlen, size,
+ 				xdp->frame_sz);
+ 	} else {
+-		/* buffer is unused, change the act that should be taken later
+-		 * on; data was copied onto skb's linear part so there's no
++		/* buffer is unused, restore biased page count in Rx buffer;
++		 * data was copied onto skb's linear part so there's no
+ 		 * need for adjusting page offset and we can reuse this buffer
+ 		 * as-is
+ 		 */
+-		rx_buf->act = ICE_SKB_CONSUMED;
++		rx_buf->pagecnt_bias++;
+ 	}
+ 
+ 	if (unlikely(xdp_buff_has_frags(xdp))) {
+@@ -1103,6 +1124,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ 	rx_buf->page = NULL;
+ }
+ 
++/**
++ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
++ * @rx_ring: Rx ring with all the auxiliary data
++ * @xdp: XDP buffer carrying linear + frags part
++ * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
++ * @ntc: a current next_to_clean value to be stored at rx_ring
++ * @verdict: return code from XDP program execution
++ *
++ * Walk through gathered fragments and satisfy internal page
++ * recycle mechanism; we take here an action related to verdict
++ * returned by XDP program;
++ */
++static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
++			    u32 *xdp_xmit, u32 ntc, u32 verdict)
++{
++	u32 nr_frags = rx_ring->nr_frags + 1;
++	u32 idx = rx_ring->first_desc;
++	u32 cnt = rx_ring->count;
++	u32 post_xdp_frags = 1;
++	struct ice_rx_buf *buf;
++	int i;
++
++	if (unlikely(xdp_buff_has_frags(xdp)))
++		post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
++
++	for (i = 0; i < post_xdp_frags; i++) {
++		buf = &rx_ring->rx_buf[idx];
++
++		if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
++			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
++			*xdp_xmit |= verdict;
++		} else if (verdict & ICE_XDP_CONSUMED) {
++			buf->pagecnt_bias++;
++		} else if (verdict == ICE_XDP_PASS) {
++			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
++		}
++
++		ice_put_rx_buf(rx_ring, buf);
++
++		if (++idx == cnt)
++			idx = 0;
++	}
++	/* handle buffers that represented frags released by XDP prog;
++	 * for these we keep pagecnt_bias as-is; refcount from struct page
++	 * has been decremented within XDP prog and we do not have to increase
++	 * the biased refcnt
++	 */
++	for (; i < nr_frags; i++) {
++		buf = &rx_ring->rx_buf[idx];
++		ice_put_rx_buf(rx_ring, buf);
++		if (++idx == cnt)
++			idx = 0;
++	}
++
++	xdp->data = NULL;
++	rx_ring->first_desc = ntc;
++	rx_ring->nr_frags = 0;
++}
++
+ /**
+  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+  * @rx_ring: Rx descriptor ring to transact packets on
+@@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ 	unsigned int offset = rx_ring->rx_offset;
+ 	struct xdp_buff *xdp = &rx_ring->xdp;
+-	u32 cached_ntc = rx_ring->first_desc;
+ 	struct ice_tx_ring *xdp_ring = NULL;
+ 	struct bpf_prog *xdp_prog = NULL;
+ 	u32 ntc = rx_ring->next_to_clean;
++	u32 cached_ntu, xdp_verdict;
+ 	u32 cnt = rx_ring->count;
+ 	u32 xdp_xmit = 0;
+-	u32 cached_ntu;
+ 	bool failure;
+-	u32 first;
+ 
+ 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ 	if (xdp_prog) {
+@@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 			xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
+ 			xdp_buff_clear_frags_flag(xdp);
+ 		} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
++			ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
+ 			break;
+ 		}
+ 		if (++ntc == cnt)
+@@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		if (ice_is_non_eop(rx_ring, rx_desc))
+ 			continue;
+ 
+-		ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
+-		if (rx_buf->act == ICE_XDP_PASS)
++		ice_get_pgcnts(rx_ring);
++		xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
++		if (xdp_verdict == ICE_XDP_PASS)
+ 			goto construct_skb;
+ 		total_rx_bytes += xdp_get_buff_len(xdp);
+ 		total_rx_pkts++;
+ 
+-		xdp->data = NULL;
+-		rx_ring->first_desc = ntc;
+-		rx_ring->nr_frags = 0;
++		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++
+ 		continue;
+ construct_skb:
+ 		if (likely(ice_ring_uses_build_skb(rx_ring)))
+@@ -1217,18 +1296,12 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		/* exit if we failed to retrieve a buffer */
+ 		if (!skb) {
+ 			rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+-			rx_buf->act = ICE_XDP_CONSUMED;
+-			if (unlikely(xdp_buff_has_frags(xdp)))
+-				ice_set_rx_bufs_act(xdp, rx_ring,
+-						    ICE_XDP_CONSUMED);
+-			xdp->data = NULL;
+-			rx_ring->first_desc = ntc;
+-			rx_ring->nr_frags = 0;
+-			break;
++			xdp_verdict = ICE_XDP_CONSUMED;
+ 		}
+-		xdp->data = NULL;
+-		rx_ring->first_desc = ntc;
+-		rx_ring->nr_frags = 0;
++		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++
++		if (!skb)
++			break;
+ 
+ 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ 		if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+@@ -1257,23 +1330,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		total_rx_pkts++;
+ 	}
+ 
+-	first = rx_ring->first_desc;
+-	while (cached_ntc != first) {
+-		struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
+-
+-		if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+-			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+-			xdp_xmit |= buf->act;
+-		} else if (buf->act & ICE_XDP_CONSUMED) {
+-			buf->pagecnt_bias++;
+-		} else if (buf->act == ICE_XDP_PASS) {
+-			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+-		}
+-
+-		ice_put_rx_buf(rx_ring, buf);
+-		if (++cached_ntc >= cnt)
+-			cached_ntc = 0;
+-	}
+ 	rx_ring->next_to_clean = ntc;
+ 	/* return up to cleaned_count buffers to hardware */
+ 	failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index feba314a3fe441..7130992d417798 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -201,7 +201,6 @@ struct ice_rx_buf {
+ 	struct page *page;
+ 	unsigned int page_offset;
+ 	unsigned int pgcnt;
+-	unsigned int act;
+ 	unsigned int pagecnt_bias;
+ };
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+index afcead4baef4b1..f6c2b16ab45674 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+@@ -5,49 +5,6 @@
+ #define _ICE_TXRX_LIB_H_
+ #include "ice.h"
+ 
+-/**
+- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
+- * @xdp: XDP buffer representing frame (linear and frags part)
+- * @rx_ring: Rx ring struct
+- * act: action to store onto Rx buffers related to XDP buffer parts
+- *
+- * Set action that should be taken before putting Rx buffer from first frag
+- * to the last.
+- */
+-static inline void
+-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
+-		    const unsigned int act)
+-{
+-	u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+-	u32 nr_frags = rx_ring->nr_frags + 1;
+-	u32 idx = rx_ring->first_desc;
+-	u32 cnt = rx_ring->count;
+-	struct ice_rx_buf *buf;
+-
+-	for (int i = 0; i < nr_frags; i++) {
+-		buf = &rx_ring->rx_buf[idx];
+-		buf->act = act;
+-
+-		if (++idx == cnt)
+-			idx = 0;
+-	}
+-
+-	/* adjust pagecnt_bias on frags freed by XDP prog */
+-	if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+-		u32 delta = rx_ring->nr_frags - sinfo_frags;
+-
+-		while (delta) {
+-			if (idx == 0)
+-				idx = cnt - 1;
+-			else
+-				idx--;
+-			buf = &rx_ring->rx_buf[idx];
+-			buf->pagecnt_bias--;
+-			delta--;
+-		}
+-	}
+-}
+-
+ /**
+  * ice_test_staterr - tests bits in Rx descriptor status and error fields
+  * @status_err_n: Rx descriptor status_error0 or status_error1 bits
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+index 7d0124b283dace..d7a3465e6a7241 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+@@ -157,17 +157,14 @@ octep_get_ethtool_stats(struct net_device *netdev,
+ 				    iface_rx_stats,
+ 				    iface_tx_stats);
+ 
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_iq *iq = oct->iq[q];
+-		struct octep_oq *oq = oct->oq[q];
+-
+-		tx_packets += iq->stats.instr_completed;
+-		tx_bytes += iq->stats.bytes_sent;
+-		tx_busy_errors += iq->stats.tx_busy;
+-
+-		rx_packets += oq->stats.packets;
+-		rx_bytes += oq->stats.bytes;
+-		rx_alloc_errors += oq->stats.alloc_failures;
++	for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
++		tx_packets += oct->stats_iq[q].instr_completed;
++		tx_bytes += oct->stats_iq[q].bytes_sent;
++		tx_busy_errors += oct->stats_iq[q].tx_busy;
++
++		rx_packets += oct->stats_oq[q].packets;
++		rx_bytes += oct->stats_oq[q].bytes;
++		rx_alloc_errors += oct->stats_oq[q].alloc_failures;
+ 	}
+ 	i = 0;
+ 	data[i++] = rx_packets;
+@@ -205,22 +202,18 @@ octep_get_ethtool_stats(struct net_device *netdev,
+ 	data[i++] = iface_rx_stats->err_pkts;
+ 
+ 	/* Per Tx Queue stats */
+-	for (q = 0; q < oct->num_iqs; q++) {
+-		struct octep_iq *iq = oct->iq[q];
+-
+-		data[i++] = iq->stats.instr_posted;
+-		data[i++] = iq->stats.instr_completed;
+-		data[i++] = iq->stats.bytes_sent;
+-		data[i++] = iq->stats.tx_busy;
++	for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
++		data[i++] = oct->stats_iq[q].instr_posted;
++		data[i++] = oct->stats_iq[q].instr_completed;
++		data[i++] = oct->stats_iq[q].bytes_sent;
++		data[i++] = oct->stats_iq[q].tx_busy;
+ 	}
+ 
+ 	/* Per Rx Queue stats */
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_oq *oq = oct->oq[q];
+-
+-		data[i++] = oq->stats.packets;
+-		data[i++] = oq->stats.bytes;
+-		data[i++] = oq->stats.alloc_failures;
++	for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
++		data[i++] = oct->stats_oq[q].packets;
++		data[i++] = oct->stats_oq[q].bytes;
++		data[i++] = oct->stats_oq[q].alloc_failures;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 730aa5632cceee..a89f80bac39b8d 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq)
+ 	if (unlikely(IQ_INSTR_SPACE(iq) >
+ 		     OCTEP_WAKE_QUEUE_THRESHOLD)) {
+ 		netif_start_subqueue(iq->netdev, iq->q_no);
+-		iq->stats.restart_cnt++;
++		iq->stats->restart_cnt++;
+ 		return 0;
+ 	}
+ 
+@@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ 	wmb();
+ 	/* Ring Doorbell to notify the NIC of new packets */
+ 	writel(iq->fill_cnt, iq->doorbell_reg);
+-	iq->stats.instr_posted += iq->fill_cnt;
++	iq->stats->instr_posted += iq->fill_cnt;
+ 	iq->fill_cnt = 0;
+ 	return NETDEV_TX_OK;
+ 
+@@ -991,22 +991,19 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ static void octep_get_stats64(struct net_device *netdev,
+ 			      struct rtnl_link_stats64 *stats)
+ {
+-	u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ 	struct octep_device *oct = netdev_priv(netdev);
++	u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ 	int q;
+ 
+ 	tx_packets = 0;
+ 	tx_bytes = 0;
+ 	rx_packets = 0;
+ 	rx_bytes = 0;
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_iq *iq = oct->iq[q];
+-		struct octep_oq *oq = oct->oq[q];
+-
+-		tx_packets += iq->stats.instr_completed;
+-		tx_bytes += iq->stats.bytes_sent;
+-		rx_packets += oq->stats.packets;
+-		rx_bytes += oq->stats.bytes;
++	for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
++		tx_packets += oct->stats_iq[q].instr_completed;
++		tx_bytes += oct->stats_iq[q].bytes_sent;
++		rx_packets += oct->stats_oq[q].packets;
++		rx_bytes += oct->stats_oq[q].bytes;
+ 	}
+ 	stats->tx_packets = tx_packets;
+ 	stats->tx_bytes = tx_bytes;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+index fee59e0e0138fe..936b786f428168 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+@@ -257,11 +257,17 @@ struct octep_device {
+ 	/* Pointers to Octeon Tx queues */
+ 	struct octep_iq *iq[OCTEP_MAX_IQ];
+ 
++	/* Per iq stats */
++	struct octep_iq_stats stats_iq[OCTEP_MAX_IQ];
++
+ 	/* Rx queues (OQ: Output Queue) */
+ 	u16 num_oqs;
+ 	/* Pointers to Octeon Rx queues */
+ 	struct octep_oq *oq[OCTEP_MAX_OQ];
+ 
++	/* Per oq stats */
++	struct octep_oq_stats stats_oq[OCTEP_MAX_OQ];
++
+ 	/* Hardware port number of the PCIe interface */
+ 	u16 pcie_port;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+index 8af75cb37c3ee8..82b6b19e76b47a 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+@@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+ 		page = dev_alloc_page();
+ 		if (unlikely(!page)) {
+ 			dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			break;
+ 		}
+ 
+@@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+ 				"OQ-%d buffer refill: DMA mapping error!\n",
+ 				oq->q_no);
+ 			put_page(page);
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			break;
+ 		}
+ 		oq->buff_info[refill_idx].page = page;
+@@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
+ 	oq->netdev = oct->netdev;
+ 	oq->dev = &oct->pdev->dev;
+ 	oq->q_no = q_no;
++	oq->stats = &oct->stats_oq[q_no];
+ 	oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ 	oq->ring_size_mask = oq->max_count - 1;
+ 	oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+@@ -443,7 +444,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ 		if (!skb) {
+ 			octep_oq_drop_rx(oq, buff_info,
+ 					 &read_idx, &desc_used);
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			continue;
+ 		}
+ 		skb_reserve(skb, data_offset);
+@@ -494,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ 
+ 	oq->host_read_idx = read_idx;
+ 	oq->refill_count += desc_used;
+-	oq->stats.packets += pkt;
+-	oq->stats.bytes += rx_bytes;
++	oq->stats->packets += pkt;
++	oq->stats->bytes += rx_bytes;
+ 
+ 	return pkt;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+index 3b08e2d560dc39..b4696c93d0e6a9 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+@@ -186,8 +186,8 @@ struct octep_oq {
+ 	 */
+ 	u8 __iomem *pkts_sent_reg;
+ 
+-	/* Statistics for this OQ. */
+-	struct octep_oq_stats stats;
++	/* Pointer to statistics for this OQ. */
++	struct octep_oq_stats *stats;
+ 
+ 	/* Packets pending to be processed */
+ 	u32 pkts_pending;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+index 06851b78aa28c8..08ee90013fef3b 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+@@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+ 	}
+ 
+ 	iq->pkts_processed += compl_pkts;
+-	iq->stats.instr_completed += compl_pkts;
+-	iq->stats.bytes_sent += compl_bytes;
+-	iq->stats.sgentry_sent += compl_sg;
++	iq->stats->instr_completed += compl_pkts;
++	iq->stats->bytes_sent += compl_bytes;
++	iq->stats->sgentry_sent += compl_sg;
+ 	iq->flush_index = fi;
+ 
+ 	netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
+@@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no)
+ 	iq->netdev = oct->netdev;
+ 	iq->dev = &oct->pdev->dev;
+ 	iq->q_no = q_no;
++	iq->stats = &oct->stats_iq[q_no];
+ 	iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ 	iq->ring_size_mask = iq->max_count - 1;
+ 	iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+index 875a2c34091ffe..58fb39dda977c0 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+@@ -170,8 +170,8 @@ struct octep_iq {
+ 	 */
+ 	u16 flush_index;
+ 
+-	/* Statistics for this input queue. */
+-	struct octep_iq_stats stats;
++	/* Pointer to statistics for this input queue. */
++	struct octep_iq_stats *stats;
+ 
+ 	/* Pointer to the Virtual Base addr of the input ring. */
+ 	struct octep_tx_desc_hw *desc_ring;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+index a1979b45e355c6..12ddb77141cc35 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+@@ -121,12 +121,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ 	iface_tx_stats = &oct->iface_tx_stats;
+ 	iface_rx_stats = &oct->iface_rx_stats;
+ 
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_vf_iq *iq = oct->iq[q];
+-		struct octep_vf_oq *oq = oct->oq[q];
+-
+-		tx_busy_errors += iq->stats.tx_busy;
+-		rx_alloc_errors += oq->stats.alloc_failures;
++	for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
++		tx_busy_errors += oct->stats_iq[q].tx_busy;
++		rx_alloc_errors += oct->stats_oq[q].alloc_failures;
+ 	}
+ 	i = 0;
+ 	data[i++] = rx_alloc_errors;
+@@ -141,22 +138,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ 	data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+ 
+ 	/* Per Tx Queue stats */
+-	for (q = 0; q < oct->num_iqs; q++) {
+-		struct octep_vf_iq *iq = oct->iq[q];
+-
+-		data[i++] = iq->stats.instr_posted;
+-		data[i++] = iq->stats.instr_completed;
+-		data[i++] = iq->stats.bytes_sent;
+-		data[i++] = iq->stats.tx_busy;
++	for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
++		data[i++] = oct->stats_iq[q].instr_posted;
++		data[i++] = oct->stats_iq[q].instr_completed;
++		data[i++] = oct->stats_iq[q].bytes_sent;
++		data[i++] = oct->stats_iq[q].tx_busy;
+ 	}
+ 
+ 	/* Per Rx Queue stats */
+ 	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_vf_oq *oq = oct->oq[q];
+-
+-		data[i++] = oq->stats.packets;
+-		data[i++] = oq->stats.bytes;
+-		data[i++] = oq->stats.alloc_failures;
++		data[i++] = oct->stats_oq[q].packets;
++		data[i++] = oct->stats_oq[q].bytes;
++		data[i++] = oct->stats_oq[q].alloc_failures;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 4c699514fd57a0..18c922dd5fc64d 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -574,7 +574,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
+ 		  * caused queues to get re-enabled after
+ 		  * being stopped
+ 		  */
+-		iq->stats.restart_cnt++;
++		iq->stats->restart_cnt++;
+ 		fallthrough;
+ 	case 1: /* Queue left enabled, since IQ is not yet full*/
+ 		return 0;
+@@ -731,7 +731,7 @@ static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
+ 	/* Flush the hw descriptors before writing to doorbell */
+ 	smp_wmb();
+ 	writel(iq->fill_cnt, iq->doorbell_reg);
+-	iq->stats.instr_posted += iq->fill_cnt;
++	iq->stats->instr_posted += iq->fill_cnt;
+ 	iq->fill_cnt = 0;
+ 	return NETDEV_TX_OK;
+ }
+@@ -786,14 +786,11 @@ static void octep_vf_get_stats64(struct net_device *netdev,
+ 	tx_bytes = 0;
+ 	rx_packets = 0;
+ 	rx_bytes = 0;
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_vf_iq *iq = oct->iq[q];
+-		struct octep_vf_oq *oq = oct->oq[q];
+-
+-		tx_packets += iq->stats.instr_completed;
+-		tx_bytes += iq->stats.bytes_sent;
+-		rx_packets += oq->stats.packets;
+-		rx_bytes += oq->stats.bytes;
++	for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
++		tx_packets += oct->stats_iq[q].instr_completed;
++		tx_bytes += oct->stats_iq[q].bytes_sent;
++		rx_packets += oct->stats_oq[q].packets;
++		rx_bytes += oct->stats_oq[q].bytes;
+ 	}
+ 	stats->tx_packets = tx_packets;
+ 	stats->tx_bytes = tx_bytes;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+index 5769f62545cd44..1a352f41f823cd 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+@@ -246,11 +246,17 @@ struct octep_vf_device {
+ 	/* Pointers to Octeon Tx queues */
+ 	struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+ 
++	/* Per iq stats */
++	struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ];
++
+ 	/* Rx queues (OQ: Output Queue) */
+ 	u16 num_oqs;
+ 	/* Pointers to Octeon Rx queues */
+ 	struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+ 
++	/* Per oq stats */
++	struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ];
++
+ 	/* Hardware port number of the PCIe interface */
+ 	u16 pcie_port;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+index 82821bc28634b6..d70c8be3cfc40b 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+@@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
+ 		page = dev_alloc_page();
+ 		if (unlikely(!page)) {
+ 			dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			break;
+ 		}
+ 
+@@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
+ 				"OQ-%d buffer refill: DMA mapping error!\n",
+ 				oq->q_no);
+ 			put_page(page);
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			break;
+ 		}
+ 		oq->buff_info[refill_idx].page = page;
+@@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
+ 	oq->netdev = oct->netdev;
+ 	oq->dev = &oct->pdev->dev;
+ 	oq->q_no = q_no;
++	oq->stats = &oct->stats_oq[q_no];
+ 	oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ 	oq->ring_size_mask = oq->max_count - 1;
+ 	oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+@@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ 
+ 	oq->host_read_idx = read_idx;
+ 	oq->refill_count += desc_used;
+-	oq->stats.packets += pkt;
+-	oq->stats.bytes += rx_bytes;
++	oq->stats->packets += pkt;
++	oq->stats->bytes += rx_bytes;
+ 
+ 	return pkt;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+index fe46838b5200ff..9e296b7d7e3494 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+@@ -187,7 +187,7 @@ struct octep_vf_oq {
+ 	u8 __iomem *pkts_sent_reg;
+ 
+ 	/* Statistics for this OQ. */
+-	struct octep_vf_oq_stats stats;
++	struct octep_vf_oq_stats *stats;
+ 
+ 	/* Packets pending to be processed */
+ 	u32 pkts_pending;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+index 47a5c054fdb636..8180e5ce3d7efe 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+@@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
+ 	}
+ 
+ 	iq->pkts_processed += compl_pkts;
+-	iq->stats.instr_completed += compl_pkts;
+-	iq->stats.bytes_sent += compl_bytes;
+-	iq->stats.sgentry_sent += compl_sg;
++	iq->stats->instr_completed += compl_pkts;
++	iq->stats->bytes_sent += compl_bytes;
++	iq->stats->sgentry_sent += compl_sg;
+ 	iq->flush_index = fi;
+ 
+ 	netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
+@@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
+ 	iq->netdev = oct->netdev;
+ 	iq->dev = &oct->pdev->dev;
+ 	iq->q_no = q_no;
++	iq->stats = &oct->stats_iq[q_no];
+ 	iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ 	iq->ring_size_mask = iq->max_count - 1;
+ 	iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+index f338b975103c30..1cede90e3a5fae 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+@@ -129,7 +129,7 @@ struct octep_vf_iq {
+ 	u16 flush_index;
+ 
+ 	/* Statistics for this input queue. */
+-	struct octep_vf_iq_stats stats;
++	struct octep_vf_iq_stats *stats;
+ 
+ 	/* Pointer to the Virtual Base addr of the input ring. */
+ 	struct octep_vf_tx_desc_hw *desc_ring;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index b306ae79bf97a6..863196ad0ddc73 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -322,17 +322,16 @@ static void mlx5_pps_out(struct work_struct *work)
+ 	}
+ }
+ 
+-static void mlx5_timestamp_overflow(struct work_struct *work)
++static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
+ {
+-	struct delayed_work *dwork = to_delayed_work(work);
+ 	struct mlx5_core_dev *mdev;
+ 	struct mlx5_timer *timer;
+ 	struct mlx5_clock *clock;
+ 	unsigned long flags;
+ 
+-	timer = container_of(dwork, struct mlx5_timer, overflow_work);
+-	clock = container_of(timer, struct mlx5_clock, timer);
++	clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
+ 	mdev = container_of(clock, struct mlx5_core_dev, clock);
++	timer = &clock->timer;
+ 
+ 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ 		goto out;
+@@ -343,7 +342,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
+ 	write_sequnlock_irqrestore(&clock->lock, flags);
+ 
+ out:
+-	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
++	return timer->overflow_period;
+ }
+ 
+ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
+@@ -521,6 +520,7 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ 	timer->cycles.mult = mult;
+ 	mlx5_update_clock_info_page(mdev);
+ 	write_sequnlock_irqrestore(&clock->lock, flags);
++	ptp_schedule_worker(clock->ptp, timer->overflow_period);
+ 
+ 	return 0;
+ }
+@@ -856,6 +856,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ 	.settime64	= mlx5_ptp_settime,
+ 	.enable		= NULL,
+ 	.verify		= NULL,
++	.do_aux_work	= mlx5_timestamp_overflow,
+ };
+ 
+ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
+@@ -1056,12 +1057,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+ 	do_div(ns, NSEC_PER_SEC / HZ);
+ 	timer->overflow_period = ns;
+ 
+-	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
+-	if (timer->overflow_period)
+-		schedule_delayed_work(&timer->overflow_work, 0);
+-	else
++	if (!timer->overflow_period) {
++		timer->overflow_period = HZ;
+ 		mlx5_core_warn(mdev,
+-			       "invalid overflow period, overflow_work is not scheduled\n");
++			       "invalid overflow period, overflow_work is scheduled once per second\n");
++	}
+ 
+ 	if (clock_info)
+ 		clock_info->overflow_period = timer->overflow_period;
+@@ -1176,6 +1176,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
+ 
+ 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
+ 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
++
++	if (clock->ptp)
++		ptp_schedule_worker(clock->ptp, 0);
+ }
+ 
+ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+@@ -1192,7 +1195,6 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+ 	}
+ 
+ 	cancel_work_sync(&clock->pps_info.out_work);
+-	cancel_delayed_work_sync(&clock->timer.overflow_work);
+ 
+ 	if (mdev->clock_info) {
+ 		free_page((unsigned long)mdev->clock_info);
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index b13c7e958e6b4e..3c0d067c360992 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2201,8 +2201,6 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+ 	struct device *dev = common->dev;
+ 	int i;
+ 
+-	devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+-
+ 	common->tx_ch_rate_msk = 0;
+ 	for (i = 0; i < common->tx_ch_num; i++) {
+ 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+@@ -2224,8 +2222,6 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+ 	for (i = 0; i < common->tx_ch_num; i++) {
+ 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ 
+-		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+-				  am65_cpsw_nuss_tx_poll);
+ 		hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ 		tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
+ 
+@@ -2238,9 +2234,21 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+ 				tx_chn->id, tx_chn->irq, ret);
+ 			goto err;
+ 		}
++
++		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
++				  am65_cpsw_nuss_tx_poll);
+ 	}
+ 
++	return 0;
++
+ err:
++	for (--i ; i >= 0 ; i--) {
++		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
++
++		netif_napi_del(&tx_chn->napi_tx);
++		devm_free_irq(dev, tx_chn->irq, tx_chn);
++	}
++
+ 	return ret;
+ }
+ 
+@@ -2321,12 +2329,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
+ 		goto err;
+ 	}
+ 
++	return 0;
++
+ err:
+-	i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+-	if (i) {
+-		dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
+-		return i;
+-	}
++	am65_cpsw_nuss_free_tx_chns(common);
+ 
+ 	return ret;
+ }
+@@ -2354,7 +2360,6 @@ static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
+ 
+ 	rx_chn = &common->rx_chns;
+ 	flows = rx_chn->flows;
+-	devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+ 
+ 	for (i = 0; i < common->rx_ch_num_flows; i++) {
+ 		if (!(flows[i].irq < 0))
+@@ -2453,7 +2458,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 						i, &rx_flow_cfg);
+ 		if (ret) {
+ 			dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
+-			goto err;
++			goto err_flow;
+ 		}
+ 		if (!i)
+ 			fdqring_id =
+@@ -2465,14 +2470,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 			dev_err(dev, "Failed to get rx dma irq %d\n",
+ 				flow->irq);
+ 			ret = flow->irq;
+-			goto err;
++			goto err_flow;
+ 		}
+ 
+ 		snprintf(flow->name,
+ 			 sizeof(flow->name), "%s-rx%d",
+ 			 dev_name(dev), i);
+-		netif_napi_add(common->dma_ndev, &flow->napi_rx,
+-			       am65_cpsw_nuss_rx_poll);
+ 		hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
+ 			     HRTIMER_MODE_REL_PINNED);
+ 		flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+@@ -2485,20 +2488,28 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 			dev_err(dev, "failure requesting rx %d irq %u, %d\n",
+ 				i, flow->irq, ret);
+ 			flow->irq = -EINVAL;
+-			goto err;
++			goto err_flow;
+ 		}
++
++		netif_napi_add(common->dma_ndev, &flow->napi_rx,
++			       am65_cpsw_nuss_rx_poll);
+ 	}
+ 
+ 	/* setup classifier to route priorities to flows */
+ 	cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
+ 
+-err:
+-	i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+-	if (i) {
+-		dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
+-		return i;
++	return 0;
++
++err_flow:
++	for (--i; i >= 0 ; i--) {
++		flow = &rx_chn->flows[i];
++		netif_napi_del(&flow->napi_rx);
++		devm_free_irq(dev, flow->irq, flow);
+ 	}
+ 
++err:
++	am65_cpsw_nuss_free_rx_chns(common);
++
+ 	return ret;
+ }
+ 
+@@ -3324,7 +3335,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ 		return ret;
+ 	ret = am65_cpsw_nuss_init_rx_chns(common);
+ 	if (ret)
+-		return ret;
++		goto err_remove_tx;
+ 
+ 	/* The DMA Channels are not guaranteed to be in a clean state.
+ 	 * Reset and disable them to ensure that they are back to the
+@@ -3345,7 +3356,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ 
+ 	ret = am65_cpsw_nuss_register_devlink(common);
+ 	if (ret)
+-		return ret;
++		goto err_remove_rx;
+ 
+ 	for (i = 0; i < common->port_num; i++) {
+ 		port = &common->ports[i];
+@@ -3376,6 +3387,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ err_cleanup_ndev:
+ 	am65_cpsw_nuss_cleanup_ndev(common);
+ 	am65_cpsw_unregister_devlink(common);
++err_remove_rx:
++	am65_cpsw_nuss_remove_rx_chns(common);
++err_remove_tx:
++	am65_cpsw_nuss_remove_tx_chns(common);
+ 
+ 	return ret;
+ }
+@@ -3395,6 +3410,8 @@ int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ 		return ret;
+ 
+ 	ret = am65_cpsw_nuss_init_rx_chns(common);
++	if (ret)
++		am65_cpsw_nuss_remove_tx_chns(common);
+ 
+ 	return ret;
+ }
+@@ -3652,6 +3669,8 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
+ 	 */
+ 	am65_cpsw_nuss_cleanup_ndev(common);
+ 	am65_cpsw_unregister_devlink(common);
++	am65_cpsw_nuss_remove_rx_chns(common);
++	am65_cpsw_nuss_remove_tx_chns(common);
+ 	am65_cpsw_nuss_phylink_cleanup(common);
+ 	am65_cpts_release(common->cpts);
+ 	am65_cpsw_disable_serdes_phy(common);
+@@ -3713,8 +3732,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 	ret = am65_cpsw_nuss_init_rx_chns(common);
+-	if (ret)
++	if (ret) {
++		am65_cpsw_nuss_remove_tx_chns(common);
+ 		return ret;
++	}
+ 
+ 	/* If RX IRQ was disabled before suspend, keep it disabled */
+ 	for (i = 0; i < common->rx_ch_num_flows; i++) {
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index 5af5ade4fc6418..ae43103c76cbd8 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -1296,6 +1296,8 @@ static int nxp_c45_soft_reset(struct phy_device *phydev)
+ 	if (ret)
+ 		return ret;
+ 
++	usleep_range(2000, 2050);
++
+ 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ 					 VEND1_DEVICE_CONTROL, ret,
+ 					 !(ret & DEVICE_CONTROL_RESET), 20000,
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 6fc60950100c7c..fae1a0ab36bdfe 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -580,7 +580,7 @@ static inline bool tun_not_capable(struct tun_struct *tun)
+ 	struct net *net = dev_net(tun->dev);
+ 
+ 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+-		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
++		(gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
+ }
+ 
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 46afb95ffabe3b..a19789b571905a 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -61,7 +61,18 @@
+ #define IPHETH_USBINTF_PROTO    1
+ 
+ #define IPHETH_IP_ALIGN		2	/* padding at front of URB */
+-#define IPHETH_NCM_HEADER_SIZE  (12 + 96) /* NCMH + NCM0 */
++/* On iOS devices, NCM headers in RX have a fixed size regardless of DPE count:
++ * - NTH16 (NCMH): 12 bytes, as per CDC NCM 1.0 spec
++ * - NDP16 (NCM0): 96 bytes, of which
++ *    - NDP16 fixed header: 8 bytes
++ *    - maximum of 22 DPEs (21 datagrams + trailer), 4 bytes each
++ */
++#define IPHETH_NDP16_MAX_DPE	22
++#define IPHETH_NDP16_HEADER_SIZE (sizeof(struct usb_cdc_ncm_ndp16) + \
++				  IPHETH_NDP16_MAX_DPE * \
++				  sizeof(struct usb_cdc_ncm_dpe16))
++#define IPHETH_NCM_HEADER_SIZE	(sizeof(struct usb_cdc_ncm_nth16) + \
++				 IPHETH_NDP16_HEADER_SIZE)
+ #define IPHETH_TX_BUF_SIZE      ETH_FRAME_LEN
+ #define IPHETH_RX_BUF_SIZE_LEGACY (IPHETH_IP_ALIGN + ETH_FRAME_LEN)
+ #define IPHETH_RX_BUF_SIZE_NCM	65536
+@@ -207,15 +218,23 @@ static int ipheth_rcvbulk_callback_legacy(struct urb *urb)
+ 	return ipheth_consume_skb(buf, len, dev);
+ }
+ 
++/* In "NCM mode", the iOS device encapsulates RX (phone->computer) traffic
++ * in NCM Transfer Blocks (similarly to CDC NCM). However, unlike reverse
++ * tethering (handled by the `cdc_ncm` driver), regular tethering is not
++ * compliant with the CDC NCM spec, as the device is missing the necessary
++ * descriptors, and TX (computer->phone) traffic is not encapsulated
++ * at all. Thus `ipheth` implements a very limited subset of the spec with
++ * the sole purpose of parsing RX URBs.
++ */
+ static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
+ {
+ 	struct usb_cdc_ncm_nth16 *ncmh;
+ 	struct usb_cdc_ncm_ndp16 *ncm0;
+ 	struct usb_cdc_ncm_dpe16 *dpe;
+ 	struct ipheth_device *dev;
++	u16 dg_idx, dg_len;
+ 	int retval = -EINVAL;
+ 	char *buf;
+-	int len;
+ 
+ 	dev = urb->context;
+ 
+@@ -226,40 +245,42 @@ static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
+ 
+ 	ncmh = urb->transfer_buffer;
+ 	if (ncmh->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN) ||
+-	    le16_to_cpu(ncmh->wNdpIndex) >= urb->actual_length) {
+-		dev->net->stats.rx_errors++;
+-		return retval;
+-	}
++	    /* On iOS, NDP16 directly follows NTH16 */
++	    ncmh->wNdpIndex != cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)))
++		goto rx_error;
+ 
+-	ncm0 = urb->transfer_buffer + le16_to_cpu(ncmh->wNdpIndex);
+-	if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN) ||
+-	    le16_to_cpu(ncmh->wHeaderLength) + le16_to_cpu(ncm0->wLength) >=
+-	    urb->actual_length) {
+-		dev->net->stats.rx_errors++;
+-		return retval;
+-	}
++	ncm0 = urb->transfer_buffer + sizeof(struct usb_cdc_ncm_nth16);
++	if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN))
++		goto rx_error;
+ 
+ 	dpe = ncm0->dpe16;
+-	while (le16_to_cpu(dpe->wDatagramIndex) != 0 &&
+-	       le16_to_cpu(dpe->wDatagramLength) != 0) {
+-		if (le16_to_cpu(dpe->wDatagramIndex) >= urb->actual_length ||
+-		    le16_to_cpu(dpe->wDatagramIndex) +
+-		    le16_to_cpu(dpe->wDatagramLength) > urb->actual_length) {
++	for (int dpe_i = 0; dpe_i < IPHETH_NDP16_MAX_DPE; ++dpe_i, ++dpe) {
++		dg_idx = le16_to_cpu(dpe->wDatagramIndex);
++		dg_len = le16_to_cpu(dpe->wDatagramLength);
++
++		/* Null DPE must be present after last datagram pointer entry
++		 * (3.3.1 USB CDC NCM spec v1.0)
++		 */
++		if (dg_idx == 0 && dg_len == 0)
++			return 0;
++
++		if (dg_idx < IPHETH_NCM_HEADER_SIZE ||
++		    dg_idx >= urb->actual_length ||
++		    dg_len > urb->actual_length - dg_idx) {
+ 			dev->net->stats.rx_length_errors++;
+ 			return retval;
+ 		}
+ 
+-		buf = urb->transfer_buffer + le16_to_cpu(dpe->wDatagramIndex);
+-		len = le16_to_cpu(dpe->wDatagramLength);
++		buf = urb->transfer_buffer + dg_idx;
+ 
+-		retval = ipheth_consume_skb(buf, len, dev);
++		retval = ipheth_consume_skb(buf, dg_len, dev);
+ 		if (retval != 0)
+ 			return retval;
+-
+-		dpe++;
+ 	}
+ 
+-	return 0;
++rx_error:
++	dev->net->stats.rx_errors++;
++	return retval;
+ }
+ 
+ static void ipheth_rcvbulk_callback(struct urb *urb)
+diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
+index 1341374a4588a0..616ecc38d1726c 100644
+--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
++++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
+@@ -28,7 +28,7 @@ vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
+ 	if (likely(cpu < tq_number))
+ 		tq = &adapter->tx_queue[cpu];
+ 	else
+-		tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
++		tq = &adapter->tx_queue[cpu % tq_number];
+ 
+ 	return tq;
+ }
+@@ -124,6 +124,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ 	u32 buf_size;
+ 	u32 dw2;
+ 
++	spin_lock_irq(&tq->tx_lock);
+ 	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+ 	dw2 |= xdpf->len;
+ 	ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
+@@ -134,6 +135,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ 
+ 	if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
+ 		tq->stats.tx_ring_full++;
++		spin_unlock_irq(&tq->tx_lock);
+ 		return -ENOSPC;
+ 	}
+ 
+@@ -142,8 +144,10 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ 		tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
+ 					       xdpf->data, buf_size,
+ 					       DMA_TO_DEVICE);
+-		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
++		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) {
++			spin_unlock_irq(&tq->tx_lock);
+ 			return -EFAULT;
++		}
+ 		tbi->map_type |= VMXNET3_MAP_SINGLE;
+ 	} else { /* XDP buffer from page pool */
+ 		page = virt_to_page(xdpf->data);
+@@ -182,6 +186,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ 	dma_wmb();
+ 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
+ 						  VMXNET3_TXD_GEN);
++	spin_unlock_irq(&tq->tx_lock);
+ 
+ 	/* No need to handle the case when tx_num_deferred doesn't reach
+ 	 * threshold. Backend driver at hypervisor side will poll and reset
+@@ -225,6 +230,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
+ {
+ 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
+ 	struct vmxnet3_tx_queue *tq;
++	struct netdev_queue *nq;
+ 	int i;
+ 
+ 	if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
+@@ -236,6 +242,9 @@ vmxnet3_xdp_xmit(struct net_device *dev,
+ 	if (tq->stopped)
+ 		return -ENETDOWN;
+ 
++	nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
++
++	__netif_tx_lock(nq, smp_processor_id());
+ 	for (i = 0; i < n; i++) {
+ 		if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
+ 			tq->stats.xdp_xmit_err++;
+@@ -243,6 +252,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
+ 		}
+ 	}
+ 	tq->stats.xdp_xmit += i;
++	__netif_tx_unlock(nq);
+ 
+ 	return i;
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index da72fd2d541ff7..20ab9b1eea2836 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -540,6 +540,11 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
+ 	struct ethhdr *eh;
+ 	u16 type;
+ 
++	if (!ifp) {
++		brcmu_pkt_buf_free_skb(txp);
++		return;
++	}
++
+ 	eh = (struct ethhdr *)(txp->data);
+ 	type = ntohs(eh->h_proto);
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+index af930e34c21f8a..22c064848124d8 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+@@ -97,13 +97,13 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ 	/* Set board-type to the first string of the machine compatible prop */
+ 	root = of_find_node_by_path("/");
+ 	if (root && err) {
+-		char *board_type;
++		char *board_type = NULL;
+ 		const char *tmp;
+ 
+-		of_property_read_string_index(root, "compatible", 0, &tmp);
+-
+ 		/* get rid of '/' in the compatible string to be able to find the FW */
+-		board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
++		if (!of_property_read_string_index(root, "compatible", 0, &tmp))
++			board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
++
+ 		if (!board_type) {
+ 			of_node_put(root);
+ 			return;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+index d69879e1bd870c..d362c4337616b4 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+@@ -23423,6 +23423,9 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
+ 				break;
+ 		}
+ 
++		if (WARN_ON(k == NPHY_IQCAL_NUMGAINS))
++			return;
++
+ 		params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1];
+ 		params->pga = tbl_iqcal_gainparams_nphy[band_idx][k][2];
+ 		params->pad = tbl_iqcal_gainparams_nphy[band_idx][k][3];
+diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
+index 64c1233142451a..a3052684b341f2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/Makefile
++++ b/drivers/net/wireless/intel/iwlwifi/Makefile
+@@ -11,7 +11,7 @@ iwlwifi-objs		+= pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+ iwlwifi-objs		+= pcie/trans-gen2.o pcie/tx-gen2.o
+ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
+ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
+-iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o
++iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
+ iwlwifi-objs		+= iwl-dbg-tlv.o
+ iwlwifi-objs		+= iwl-trans.o
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+new file mode 100644
+index 00000000000000..ab7c0f8d54f425
+--- /dev/null
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+@@ -0,0 +1,167 @@
++// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
++/*
++ * Copyright (C) 2024 Intel Corporation
++ */
++#include <linux/module.h>
++#include <linux/stringify.h>
++#include "iwl-config.h"
++#include "iwl-prph.h"
++#include "fw/api/txq.h"
++
++/* Highest firmware API version supported */
++#define IWL_DR_UCODE_API_MAX	96
++
++/* Lowest firmware API version supported */
++#define IWL_DR_UCODE_API_MIN	96
++
++/* NVM versions */
++#define IWL_DR_NVM_VERSION		0x0a1d
++
++/* Memory offsets and lengths */
++#define IWL_DR_DCCM_OFFSET		0x800000 /* LMAC1 */
++#define IWL_DR_DCCM_LEN			0x10000 /* LMAC1 */
++#define IWL_DR_DCCM2_OFFSET		0x880000
++#define IWL_DR_DCCM2_LEN		0x8000
++#define IWL_DR_SMEM_OFFSET		0x400000
++#define IWL_DR_SMEM_LEN			0xD0000
++
++#define IWL_DR_A_PE_A_FW_PRE		"iwlwifi-dr-a0-pe-a0"
++#define IWL_BR_A_PET_A_FW_PRE		"iwlwifi-br-a0-petc-a0"
++#define IWL_BR_A_PE_A_FW_PRE		"iwlwifi-br-a0-pe-a0"
++
++#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
++	IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
++#define IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(api) \
++	IWL_BR_A_PET_A_FW_PRE "-" __stringify(api) ".ucode"
++#define IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(api) \
++	IWL_BR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
++
++static const struct iwl_base_params iwl_dr_base_params = {
++	.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
++	.num_of_queues = 512,
++	.max_tfd_queue_size = 65536,
++	.shadow_ram_support = true,
++	.led_compensation = 57,
++	.wd_timeout = IWL_LONG_WD_TIMEOUT,
++	.max_event_log_size = 512,
++	.shadow_reg_enable = true,
++	.pcie_l1_allowed = true,
++};
++
++#define IWL_DEVICE_DR_COMMON						\
++	.ucode_api_max = IWL_DR_UCODE_API_MAX,			\
++	.ucode_api_min = IWL_DR_UCODE_API_MIN,			\
++	.led_mode = IWL_LED_RF_STATE,					\
++	.nvm_hw_section_num = 10,					\
++	.non_shared_ant = ANT_B,					\
++	.dccm_offset = IWL_DR_DCCM_OFFSET,				\
++	.dccm_len = IWL_DR_DCCM_LEN,					\
++	.dccm2_offset = IWL_DR_DCCM2_OFFSET,				\
++	.dccm2_len = IWL_DR_DCCM2_LEN,				\
++	.smem_offset = IWL_DR_SMEM_OFFSET,				\
++	.smem_len = IWL_DR_SMEM_LEN,					\
++	.apmg_not_supported = true,					\
++	.trans.mq_rx_supported = true,					\
++	.vht_mu_mimo_supported = true,					\
++	.mac_addr_from_csr = 0x30,					\
++	.nvm_ver = IWL_DR_NVM_VERSION,				\
++	.trans.rf_id = true,						\
++	.trans.gen2 = true,						\
++	.nvm_type = IWL_NVM_EXT,					\
++	.dbgc_supported = true,						\
++	.min_umac_error_event_table = 0xD0000,				\
++	.d3_debug_data_base_addr = 0x401000,				\
++	.d3_debug_data_length = 60 * 1024,				\
++	.mon_smem_regs = {						\
++		.write_ptr = {						\
++			.addr = LDBG_M2S_BUF_WPTR,			\
++			.mask = LDBG_M2S_BUF_WPTR_VAL_MSK,		\
++	},								\
++		.cycle_cnt = {						\
++			.addr = LDBG_M2S_BUF_WRAP_CNT,			\
++			.mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,		\
++		},							\
++	},								\
++	.trans.umac_prph_offset = 0x300000,				\
++	.trans.device_family = IWL_DEVICE_FAMILY_DR,			\
++	.trans.base_params = &iwl_dr_base_params,			\
++	.min_txq_size = 128,						\
++	.gp2_reg_addr = 0xd02c68,					\
++	.min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,			\
++	.mon_dram_regs = {						\
++		.write_ptr = {						\
++			.addr = DBGC_CUR_DBGBUF_STATUS,			\
++			.mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,	\
++		},							\
++		.cycle_cnt = {						\
++			.addr = DBGC_DBGBUF_WRAP_AROUND,		\
++			.mask = 0xffffffff,				\
++		},							\
++		.cur_frag = {						\
++			.addr = DBGC_CUR_DBGBUF_STATUS,			\
++			.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,		\
++		},							\
++	},								\
++	.mon_dbgi_regs = {						\
++		.write_ptr = {						\
++			.addr = DBGI_SRAM_FIFO_POINTERS,		\
++			.mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,	\
++		},							\
++	}
++
++#define IWL_DEVICE_DR							\
++	IWL_DEVICE_DR_COMMON,						\
++	.uhb_supported = true,						\
++	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
++	.num_rbds = IWL_NUM_RBDS_DR_EHT,				\
++	.ht_params = &iwl_22000_ht_params
++
++/*
++ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
++ * A-MPDU, with additional overhead to account for processing time.
++ */
++#define IWL_NUM_RBDS_DR_EHT		(512 * 16)
++
++const struct iwl_cfg_trans_params iwl_dr_trans_cfg = {
++	.device_family = IWL_DEVICE_FAMILY_DR,
++	.base_params = &iwl_dr_base_params,
++	.mq_rx_supported = true,
++	.rf_id = true,
++	.gen2 = true,
++	.integrated = true,
++	.umac_prph_offset = 0x300000,
++	.xtal_latency = 12000,
++	.low_latency_xtal = true,
++	.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
++};
++
++const char iwl_dr_name[] = "Intel(R) TBD Dr device";
++
++const struct iwl_cfg iwl_cfg_dr = {
++	.fw_name_mac = "dr",
++	IWL_DEVICE_DR,
++};
++
++const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
++	.device_family = IWL_DEVICE_FAMILY_DR,
++	.base_params = &iwl_dr_base_params,
++	.mq_rx_supported = true,
++	.rf_id = true,
++	.gen2 = true,
++	.integrated = true,
++	.umac_prph_offset = 0x300000,
++	.xtal_latency = 12000,
++	.low_latency_xtal = true,
++	.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
++};
++
++const char iwl_br_name[] = "Intel(R) TBD Br device";
++
++const struct iwl_cfg iwl_cfg_br = {
++	.fw_name_mac = "br",
++	IWL_DEVICE_DR,
++};
++
++MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index 0bc32291815e1b..a26c5573d20916 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -108,7 +108,7 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ 				    size_t expected_size)
+ {
+ 	union acpi_object *obj;
+-	int ret = 0;
++	int ret;
+ 
+ 	obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid);
+ 	if (IS_ERR(obj)) {
+@@ -123,8 +123,10 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ 	} else if (obj->type == ACPI_TYPE_BUFFER) {
+ 		__le64 le_value = 0;
+ 
+-		if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+-			return -EINVAL;
++		if (WARN_ON_ONCE(expected_size > sizeof(le_value))) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
+ 		/* if the buffer size doesn't match the expected size */
+ 		if (obj->buffer.length != expected_size)
+@@ -145,8 +147,9 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ 	}
+ 
+ 	IWL_DEBUG_DEV_RADIO(dev,
+-			    "ACPI: DSM method evaluated: func=%d, ret=%d\n",
+-			    func, ret);
++			    "ACPI: DSM method evaluated: func=%d, value=%lld\n",
++			    func, *value);
++	ret = 0;
+ out:
+ 	ACPI_FREE(obj);
+ 	return ret;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index 17721bb47e2511..89744dbedb4a5a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -38,6 +38,7 @@ enum iwl_device_family {
+ 	IWL_DEVICE_FAMILY_AX210,
+ 	IWL_DEVICE_FAMILY_BZ,
+ 	IWL_DEVICE_FAMILY_SC,
++	IWL_DEVICE_FAMILY_DR,
+ };
+ 
+ /*
+@@ -424,6 +425,8 @@ struct iwl_cfg {
+ #define IWL_CFG_MAC_TYPE_SC2		0x49
+ #define IWL_CFG_MAC_TYPE_SC2F		0x4A
+ #define IWL_CFG_MAC_TYPE_BZ_W		0x4B
++#define IWL_CFG_MAC_TYPE_BR		0x4C
++#define IWL_CFG_MAC_TYPE_DR		0x4D
+ 
+ #define IWL_CFG_RF_TYPE_TH		0x105
+ #define IWL_CFG_RF_TYPE_TH1		0x108
+@@ -434,6 +437,7 @@ struct iwl_cfg {
+ #define IWL_CFG_RF_TYPE_GF		0x10D
+ #define IWL_CFG_RF_TYPE_FM		0x112
+ #define IWL_CFG_RF_TYPE_WH		0x113
++#define IWL_CFG_RF_TYPE_PE		0x114
+ 
+ #define IWL_CFG_RF_ID_TH		0x1
+ #define IWL_CFG_RF_ID_TH1		0x1
+@@ -506,6 +510,8 @@ extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
++extern const struct iwl_cfg_trans_params iwl_dr_trans_cfg;
++extern const struct iwl_cfg_trans_params iwl_br_trans_cfg;
+ extern const char iwl9162_name[];
+ extern const char iwl9260_name[];
+ extern const char iwl9260_1_name[];
+@@ -551,6 +557,8 @@ extern const char iwl_mtp_name[];
+ extern const char iwl_sc_name[];
+ extern const char iwl_sc2_name[];
+ extern const char iwl_sc2f_name[];
++extern const char iwl_dr_name[];
++extern const char iwl_br_name[];
+ #if IS_ENABLED(CONFIG_IWLDVM)
+ extern const struct iwl_cfg iwl5300_agn_cfg;
+ extern const struct iwl_cfg iwl5100_agn_cfg;
+@@ -658,6 +666,8 @@ extern const struct iwl_cfg iwl_cfg_gl;
+ extern const struct iwl_cfg iwl_cfg_sc;
+ extern const struct iwl_cfg iwl_cfg_sc2;
+ extern const struct iwl_cfg iwl_cfg_sc2f;
++extern const struct iwl_cfg iwl_cfg_dr;
++extern const struct iwl_cfg iwl_cfg_br;
+ #endif /* CONFIG_IWLMVM */
+ 
+ #endif /* __IWL_CONFIG_H__ */
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 8fb2aa28224212..9dd0e0a51ce5cc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -540,6 +540,9 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
++
++/* Dr devices */
++	{IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
+ #endif /* CONFIG_IWLMVM */
+ 
+ 	{0}
+@@ -1182,6 +1185,19 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
+ 		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      iwl_cfg_sc2f, iwl_sc2f_name),
++/* Dr */
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_dr, iwl_dr_name),
++
++/* Br */
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_br, iwl_br_name),
+ #endif /* CONFIG_IWLMVM */
+ };
+ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+index bfdbc15abaa9a7..928e0b07a9bf18 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+@@ -2,9 +2,14 @@
+ /* Copyright (C) 2020 MediaTek Inc. */
+ 
+ #include <linux/firmware.h>
++#include <linux/moduleparam.h>
+ #include "mt7915.h"
+ #include "eeprom.h"
+ 
++static bool enable_6ghz;
++module_param(enable_6ghz, bool, 0644);
++MODULE_PARM_DESC(enable_6ghz, "Enable 6 GHz instead of 5 GHz on hardware that supports both");
++
+ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
+ {
+ 	struct mt76_dev *mdev = &dev->mt76;
+@@ -170,8 +175,20 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+ 			phy->mt76->cap.has_6ghz = true;
+ 			return;
+ 		case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
+-			phy->mt76->cap.has_5ghz = true;
+-			phy->mt76->cap.has_6ghz = true;
++			if (enable_6ghz) {
++				phy->mt76->cap.has_6ghz = true;
++				u8p_replace_bits(&eeprom[MT_EE_WIFI_CONF + band],
++						 MT_EE_V2_BAND_SEL_6GHZ,
++						 MT_EE_WIFI_CONF0_BAND_SEL);
++			} else {
++				phy->mt76->cap.has_5ghz = true;
++				u8p_replace_bits(&eeprom[MT_EE_WIFI_CONF + band],
++						 MT_EE_V2_BAND_SEL_5GHZ,
++						 MT_EE_WIFI_CONF0_BAND_SEL);
++			}
++			/* force to buffer mode */
++			dev->flash_mode = true;
++
+ 			return;
+ 		default:
+ 			phy->mt76->cap.has_2ghz = true;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 77d82ccd73079d..bc983ab10b0c7a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -1239,14 +1239,14 @@ int mt7915_register_device(struct mt7915_dev *dev)
+ 	if (ret)
+ 		goto unreg_dev;
+ 
+-	ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+-
+ 	if (phy2) {
+ 		ret = mt7915_register_ext_phy(dev, phy2);
+ 		if (ret)
+ 			goto unreg_thermal;
+ 	}
+ 
++	ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
++
+ 	dev->recovery.hw_init_done = true;
+ 
+ 	ret = mt7915_init_debugfs(&dev->phy);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+index 8aa4f0203208ab..e3459295ad884e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
+ 	/* Netgear, Inc. [A8000,AXE3000] */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
++	/* TP-Link TXE50UH */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ 	{ },
+ };
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+index c269942b3f4ab1..af8d17b9e012ca 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+@@ -197,9 +197,9 @@ enum rtl8821a_h2c_cmd {
+ 
+ /* _MEDIA_STATUS_RPT_PARM_CMD1 */
+ #define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value)	\
+-	u8p_replace_bits(__cmd + 1, __value, BIT(0))
++	u8p_replace_bits(__cmd, __value, BIT(0))
+ #define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value)	\
+-	u8p_replace_bits(__cmd + 1, __value, BIT(1))
++	u8p_replace_bits(__cmd, __value, BIT(1))
+ 
+ /* AP_OFFLOAD */
+ #define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value)	\
+diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
+index 945117afe1438b..c808bb271e9d0f 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.h
++++ b/drivers/net/wireless/realtek/rtw88/main.h
+@@ -508,12 +508,12 @@ struct rtw_5g_txpwr_idx {
+ 	struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff;
+ 	struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff;
+ 	struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff;
+-};
++} __packed;
+ 
+ struct rtw_txpwr_idx {
+ 	struct rtw_2g_txpwr_idx pwr_idx_2g;
+ 	struct rtw_5g_txpwr_idx pwr_idx_5g;
+-};
++} __packed;
+ 
+ struct rtw_channel_params {
+ 	u8 center_chan;
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+index 222608de33cdec..a977aad9c650f5 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+@@ -911,7 +911,7 @@ static void rtw8703b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ 		rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0);
+ 		rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0);
+ 		rtw_write32_mask(rtwdev, REG_OFDM0_TX_PSD_NOISE,
+-				 GENMASK(31, 20), 0x0);
++				 GENMASK(31, 30), 0x0);
+ 		rtw_write32(rtwdev, REG_BBRX_DFIR, 0x4A880000);
+ 		rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x19F60000);
+ 		break;
+@@ -1257,9 +1257,9 @@ static u8 rtw8703b_iqk_rx_path(struct rtw_dev *rtwdev,
+ 	rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
+ 	rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ 	rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+-	rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8216000f);
++	rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8214030f);
+ 	rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000);
+-	rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x28110000);
++	rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000);
+ 	rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
+ 
+ 	/* LOK setting */
+@@ -1431,7 +1431,7 @@ void rtw8703b_iqk_fill_a_matrix(struct rtw_dev *rtwdev, const s32 result[])
+ 		return;
+ 
+ 	tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_X, result[IQK_S1_RX_X]);
+-	tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_X]);
++	tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_Y]);
+ 	rtw_write32(rtwdev, REG_A_RXIQI, tmp_rx_iqi);
+ 	rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2,
+ 			 BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y]));
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.h b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
+index e93bfce994bf82..a99af527c92cfb 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8723x.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
+@@ -47,7 +47,7 @@ struct rtw8723xe_efuse {
+ 	u8 device_id[2];
+ 	u8 sub_vendor_id[2];
+ 	u8 sub_device_id[2];
+-};
++} __packed;
+ 
+ struct rtw8723xu_efuse {
+ 	u8 res4[48];                    /* 0xd0 */
+@@ -56,12 +56,12 @@ struct rtw8723xu_efuse {
+ 	u8 usb_option;                  /* 0x104 */
+ 	u8 res5[2];			/* 0x105 */
+ 	u8 mac_addr[ETH_ALEN];          /* 0x107 */
+-};
++} __packed;
+ 
+ struct rtw8723xs_efuse {
+ 	u8 res4[0x4a];			/* 0xd0 */
+ 	u8 mac_addr[ETH_ALEN];		/* 0x11a */
+-};
++} __packed;
+ 
+ struct rtw8723x_efuse {
+ 	__le16 rtl_id;
+@@ -96,7 +96,7 @@ struct rtw8723x_efuse {
+ 		struct rtw8723xu_efuse u;
+ 		struct rtw8723xs_efuse s;
+ 	};
+-};
++} __packed;
+ 
+ #define RTW8723X_IQK_ADDA_REG_NUM	16
+ #define RTW8723X_IQK_MAC8_REG_NUM	3
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+index 91ed921407bbe7..10172f4d74bf28 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+@@ -27,7 +27,7 @@ struct rtw8821cu_efuse {
+ 	u8 res11[0xcf];
+ 	u8 package_type;		/* 0x1fb */
+ 	u8 res12[0x4];
+-};
++} __packed;
+ 
+ struct rtw8821ce_efuse {
+ 	u8 mac_addr[ETH_ALEN];		/* 0xd0 */
+@@ -47,7 +47,8 @@ struct rtw8821ce_efuse {
+ 	u8 ltr_en:1;
+ 	u8 res1:2;
+ 	u8 obff:2;
+-	u8 res2:3;
++	u8 res2_1:1;
++	u8 res2_2:2;
+ 	u8 obff_cap:2;
+ 	u8 res3:4;
+ 	u8 res4[3];
+@@ -63,7 +64,7 @@ struct rtw8821ce_efuse {
+ 	u8 res6:1;
+ 	u8 port_t_power_on_value:5;
+ 	u8 res7;
+-};
++} __packed;
+ 
+ struct rtw8821cs_efuse {
+ 	u8 res4[0x4a];			/* 0xd0 */
+@@ -101,7 +102,7 @@ struct rtw8821c_efuse {
+ 		struct rtw8821cu_efuse u;
+ 		struct rtw8821cs_efuse s;
+ 	};
+-};
++} __packed;
+ 
+ static inline void
+ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+index cf85e63966a1c7..e815bc97c218af 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+@@ -27,7 +27,7 @@ struct rtw8822bu_efuse {
+ 	u8 res11[0xcf];
+ 	u8 package_type;		/* 0x1fb */
+ 	u8 res12[0x4];
+-};
++} __packed;
+ 
+ struct rtw8822be_efuse {
+ 	u8 mac_addr[ETH_ALEN];		/* 0xd0 */
+@@ -47,7 +47,8 @@ struct rtw8822be_efuse {
+ 	u8 ltr_en:1;
+ 	u8 res1:2;
+ 	u8 obff:2;
+-	u8 res2:3;
++	u8 res2_1:1;
++	u8 res2_2:2;
+ 	u8 obff_cap:2;
+ 	u8 res3:4;
+ 	u8 res4[3];
+@@ -63,7 +64,7 @@ struct rtw8822be_efuse {
+ 	u8 res6:1;
+ 	u8 port_t_power_on_value:5;
+ 	u8 res7;
+-};
++} __packed;
+ 
+ struct rtw8822bs_efuse {
+ 	u8 res4[0x4a];			/* 0xd0 */
+@@ -103,7 +104,7 @@ struct rtw8822b_efuse {
+ 		struct rtw8822bu_efuse u;
+ 		struct rtw8822bs_efuse s;
+ 	};
+-};
++} __packed;
+ 
+ static inline void
+ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+index e2b383d633cd23..fc62b67a15f216 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+@@ -14,7 +14,7 @@ struct rtw8822cu_efuse {
+ 	u8 res1[3];
+ 	u8 mac_addr[ETH_ALEN];		/* 0x157 */
+ 	u8 res2[0x3d];
+-};
++} __packed;
+ 
+ struct rtw8822cs_efuse {
+ 	u8 res0[0x4a];			/* 0x120 */
+@@ -39,7 +39,8 @@ struct rtw8822ce_efuse {
+ 	u8 ltr_en:1;
+ 	u8 res1:2;
+ 	u8 obff:2;
+-	u8 res2:3;
++	u8 res2_1:1;
++	u8 res2_2:2;
+ 	u8 obff_cap:2;
+ 	u8 res3:4;
+ 	u8 class_code[3];
+@@ -55,7 +56,7 @@ struct rtw8822ce_efuse {
+ 	u8 res6:1;
+ 	u8 port_t_power_on_value:5;
+ 	u8 res7;
+-};
++} __packed;
+ 
+ struct rtw8822c_efuse {
+ 	__le16 rtl_id;
+@@ -102,7 +103,7 @@ struct rtw8822c_efuse {
+ 		struct rtw8822cu_efuse u;
+ 		struct rtw8822cs_efuse s;
+ 	};
+-};
++} __packed;
+ 
+ enum rtw8822c_dpk_agc_phase {
+ 	RTW_DPK_GAIN_CHECK,
+diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
+index b67e551fcee3ef..1d62b38526c486 100644
+--- a/drivers/net/wireless/realtek/rtw88/sdio.c
++++ b/drivers/net/wireless/realtek/rtw88/sdio.c
+@@ -1193,6 +1193,8 @@ static void rtw_sdio_indicate_tx_status(struct rtw_dev *rtwdev,
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_hw *hw = rtwdev->hw;
+ 
++	skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
++
+ 	/* enqueue to wait for tx report */
+ 	if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ 		rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
+index 4b47b45f897cbc..5c31639b4cade9 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.c
++++ b/drivers/net/wireless/realtek/rtw89/phy.c
+@@ -3892,7 +3892,6 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
+ 
+ 	if (!force && cfo->crystal_cap == crystal_cap)
+ 		return;
+-	crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
+ 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
+ 		rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
+ 		rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
+@@ -4015,7 +4014,7 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
+ 					     s32 curr_cfo)
+ {
+ 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+-	s8 crystal_cap = cfo->crystal_cap;
++	int crystal_cap = cfo->crystal_cap;
+ 	s32 cfo_abs = abs(curr_cfo);
+ 	int sign;
+ 
+@@ -4036,15 +4035,17 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
+ 	}
+ 	sign = curr_cfo > 0 ? 1 : -1;
+ 	if (cfo_abs > CFO_TRK_STOP_TH_4)
+-		crystal_cap += 7 * sign;
++		crystal_cap += 3 * sign;
+ 	else if (cfo_abs > CFO_TRK_STOP_TH_3)
+-		crystal_cap += 5 * sign;
+-	else if (cfo_abs > CFO_TRK_STOP_TH_2)
+ 		crystal_cap += 3 * sign;
++	else if (cfo_abs > CFO_TRK_STOP_TH_2)
++		crystal_cap += 1 * sign;
+ 	else if (cfo_abs > CFO_TRK_STOP_TH_1)
+ 		crystal_cap += 1 * sign;
+ 	else
+ 		return;
++
++	crystal_cap = clamp(crystal_cap, 0, 127);
+ 	rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
+ 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ 		    "X_cap{Curr,Default}={0x%x,0x%x}\n",
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
+index 7e335c02ee6fbf..9bb9c9c8e7a1b0 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.h
++++ b/drivers/net/wireless/realtek/rtw89/phy.h
+@@ -57,7 +57,7 @@
+ #define CFO_TRK_STOP_TH_4 (30 << 2)
+ #define CFO_TRK_STOP_TH_3 (20 << 2)
+ #define CFO_TRK_STOP_TH_2 (10 << 2)
+-#define CFO_TRK_STOP_TH_1 (00 << 2)
++#define CFO_TRK_STOP_TH_1 (03 << 2)
+ #define CFO_TRK_STOP_TH (2 << 2)
+ #define CFO_SW_COMP_FINE_TUNE (2 << 2)
+ #define CFO_PERIOD_CNT 15
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+index 04517bd3325a2a..a066977af0be5c 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+@@ -6,6 +6,7 @@
+ #include <linux/acpi.h>
+ #include <linux/bitfield.h>
+ #include <linux/module.h>
++#include <linux/suspend.h>
+ #include <net/rtnetlink.h>
+ 
+ #include "iosm_ipc_imem.h"
+@@ -18,6 +19,7 @@ MODULE_LICENSE("GPL v2");
+ /* WWAN GUID */
+ static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
+ 				       0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
++static bool pci_registered;
+ 
+ static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
+ {
+@@ -448,7 +450,6 @@ static struct pci_driver iosm_ipc_driver = {
+ 	},
+ 	.id_table = iosm_ipc_ids,
+ };
+-module_pci_driver(iosm_ipc_driver);
+ 
+ int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
+ 		      size_t size, dma_addr_t *mapping, int direction)
+@@ -530,3 +531,56 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
+ 	IPC_CB(skb)->mapping = 0;
+ 	dev_kfree_skb(skb);
+ }
++
++static int pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
++{
++	if (mode == PM_HIBERNATION_PREPARE || mode == PM_RESTORE_PREPARE) {
++		if (pci_registered) {
++			pci_unregister_driver(&iosm_ipc_driver);
++			pci_registered = false;
++		}
++	} else if (mode == PM_POST_HIBERNATION || mode == PM_POST_RESTORE) {
++		if (!pci_registered) {
++			int ret;
++
++			ret = pci_register_driver(&iosm_ipc_driver);
++			if (ret) {
++				pr_err(KBUILD_MODNAME ": unable to re-register PCI driver: %d\n",
++				       ret);
++			} else {
++				pci_registered = true;
++			}
++		}
++	}
++
++	return 0;
++}
++
++static struct notifier_block pm_notifier = {
++	.notifier_call = pm_notify,
++};
++
++static int __init iosm_ipc_driver_init(void)
++{
++	int ret;
++
++	ret = pci_register_driver(&iosm_ipc_driver);
++	if (ret)
++		return ret;
++
++	pci_registered = true;
++
++	register_pm_notifier(&pm_notifier);
++
++	return 0;
++}
++module_init(iosm_ipc_driver_init);
++
++static void __exit iosm_ipc_driver_exit(void)
++{
++	unregister_pm_notifier(&pm_notifier);
++
++	if (pci_registered)
++		pci_unregister_driver(&iosm_ipc_driver);
++}
++module_exit(iosm_ipc_driver_exit);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 4c409efd8cec17..8da50df56b0795 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1691,7 +1691,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
+ 
+ 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
+ 			&result);
+-	if (status < 0)
++
++	/*
++	 * It's either a kernel error or the host observed a connection
++	 * lost. In either case it's not possible communicate with the
++	 * controller and thus enter the error code path.
++	 */
++	if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
+ 		return status;
+ 
+ 	/*
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index b81af7919e94c4..682234da2fabe0 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2080,7 +2080,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+ 		nvme_fc_complete_rq(rq);
+ 
+ check_error:
+-	if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
++	if (terminate_assoc &&
++	    nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING)
+ 		queue_work(nvme_reset_wq, &ctrl->ioerr_work);
+ }
+ 
+@@ -2534,6 +2535,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
+ static void
+ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ {
++	enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
++
+ 	/*
+ 	 * if an error (io timeout, etc) while (re)connecting, the remote
+ 	 * port requested terminating of the association (disconnect_ls)
+@@ -2541,7 +2544,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ 	 * the controller.  Abort any ios on the association and let the
+ 	 * create_association error path resolve things.
+ 	 */
+-	if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
++	if (state == NVME_CTRL_CONNECTING) {
+ 		__nvme_fc_abort_outstanding_ios(ctrl, true);
+ 		set_bit(ASSOC_FAILED, &ctrl->flags);
+ 		dev_warn(ctrl->ctrl.device,
+@@ -2551,7 +2554,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ 	}
+ 
+ 	/* Otherwise, only proceed if in LIVE state - e.g. on first error */
+-	if (ctrl->ctrl.state != NVME_CTRL_LIVE)
++	if (state != NVME_CTRL_LIVE)
+ 		return;
+ 
+ 	dev_warn(ctrl->ctrl.device,
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 76b3f7b396c86b..cc74682dc0d4e9 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2987,7 +2987,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ 		 * because of high power consumption (> 2 Watt) in s2idle
+ 		 * sleep. Only some boards with Intel CPU are affected.
+ 		 */
+-		if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
++		if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
++		    dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
++		    dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
+diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
+index b68a9e5f1ea395..3a41b9ab0f13c4 100644
+--- a/drivers/nvme/host/sysfs.c
++++ b/drivers/nvme/host/sysfs.c
+@@ -792,7 +792,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
+ 	return a->mode;
+ }
+ 
+-const struct attribute_group nvme_tls_attrs_group = {
++static const struct attribute_group nvme_tls_attrs_group = {
+ 	.attrs		= nvme_tls_attrs,
+ 	.is_visible	= nvme_tls_attrs_are_visible,
+ };
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index e1a15fbc6ad025..d00a3b015635c2 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -1780,6 +1780,8 @@ static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, si
+ 		return -EINVAL;
+ 
+ 	if (cell->bit_offset || cell->nbits) {
++		if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes)
++			return -EINVAL;
+ 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
+ 		if (IS_ERR(buf))
+ 			return PTR_ERR(buf);
+diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
+index 1ba49449769874..ca6dd71d8a2e29 100644
+--- a/drivers/nvmem/imx-ocotp-ele.c
++++ b/drivers/nvmem/imx-ocotp-ele.c
+@@ -71,13 +71,15 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
+ 	u32 *buf;
+ 	void *p;
+ 	int i;
++	u8 skipbytes;
+ 
+-	index = offset;
+-	num_bytes = round_up(bytes, 4);
+-	count = num_bytes >> 2;
++	if (offset + bytes > priv->data->size)
++		bytes = priv->data->size - offset;
+ 
+-	if (count > ((priv->data->size >> 2) - index))
+-		count = (priv->data->size >> 2) - index;
++	index = offset >> 2;
++	skipbytes = offset - (index << 2);
++	num_bytes = round_up(bytes + skipbytes, 4);
++	count = num_bytes >> 2;
+ 
+ 	p = kzalloc(num_bytes, GFP_KERNEL);
+ 	if (!p)
+@@ -100,7 +102,7 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
+ 			*buf++ = readl_relaxed(reg + (i << 2));
+ 	}
+ 
+-	memcpy(val, (u8 *)p, bytes);
++	memcpy(val, ((u8 *)p) + skipbytes, bytes);
+ 
+ 	mutex_unlock(&priv->lock);
+ 
+@@ -109,6 +111,26 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
+ 	return 0;
+ };
+ 
++static int imx_ocotp_cell_pp(void *context, const char *id, int index,
++			     unsigned int offset, void *data, size_t bytes)
++{
++	u8 *buf = data;
++	int i;
++
++	/* Deal with some post processing of nvmem cell data */
++	if (id && !strcmp(id, "mac-address"))
++		for (i = 0; i < bytes / 2; i++)
++			swap(buf[i], buf[bytes - i - 1]);
++
++	return 0;
++}
++
++static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem,
++					 struct nvmem_cell_info *cell)
++{
++	cell->read_post_process = imx_ocotp_cell_pp;
++}
++
+ static int imx_ele_ocotp_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -131,10 +153,12 @@ static int imx_ele_ocotp_probe(struct platform_device *pdev)
+ 	priv->config.owner = THIS_MODULE;
+ 	priv->config.size = priv->data->size;
+ 	priv->config.reg_read = priv->data->reg_read;
+-	priv->config.word_size = 4;
++	priv->config.word_size = 1;
+ 	priv->config.stride = 1;
+ 	priv->config.priv = priv;
+ 	priv->config.read_only = true;
++	priv->config.add_legacy_fixed_of_cells = true;
++	priv->config.fixup_dt_cell_info = imx_ocotp_fixup_dt_cell_info;
+ 	mutex_init(&priv->lock);
+ 
+ 	nvmem = devm_nvmem_register(dev, &priv->config);
+diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
+index 9aa8f42faa4c93..4f1cca6eab71e1 100644
+--- a/drivers/nvmem/qcom-spmi-sdam.c
++++ b/drivers/nvmem/qcom-spmi-sdam.c
+@@ -144,6 +144,7 @@ static int sdam_probe(struct platform_device *pdev)
+ 	sdam->sdam_config.owner = THIS_MODULE;
+ 	sdam->sdam_config.add_legacy_fixed_of_cells = true;
+ 	sdam->sdam_config.stride = 1;
++	sdam->sdam_config.size = sdam->size;
+ 	sdam->sdam_config.word_size = 1;
+ 	sdam->sdam_config.reg_read = sdam_read;
+ 	sdam->sdam_config.reg_write = sdam_write;
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index a565b8c91da593..0e708a863e4aa3 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -200,17 +200,15 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
+ 
+ static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
+ {
+-	u64 end = start;
+-
+ 	if (overflows_type(start, r->start))
+ 		return -EOVERFLOW;
+-	if (size && check_add_overflow(end, size - 1, &end))
+-		return -EOVERFLOW;
+-	if (overflows_type(end, r->end))
+-		return -EOVERFLOW;
+ 
+ 	r->start = start;
+-	r->end = end;
++
++	if (!size)
++		r->end = wrapping_sub(typeof(r->end), r->start, 1);
++	else if (size && check_add_overflow(r->start, size - 1, &r->end))
++		return -EOVERFLOW;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 63161d0f72b4e8..4bb87e0cbaf179 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -841,10 +841,10 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
+ 	/* The path could begin with an alias */
+ 	if (*path != '/') {
+ 		int len;
+-		const char *p = separator;
++		const char *p = strchrnul(path, '/');
+ 
+-		if (!p)
+-			p = strchrnul(path, '/');
++		if (separator && separator < p)
++			p = separator;
+ 		len = p - path;
+ 
+ 		/* of_aliases must not be NULL */
+@@ -1493,7 +1493,6 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ 		 * specifier into the out_args structure, keeping the
+ 		 * bits specified in <list>-map-pass-thru.
+ 		 */
+-		match_array = map - new_size;
+ 		for (i = 0; i < new_size; i++) {
+ 			__be32 val = *(map - new_size + i);
+ 
+@@ -1502,6 +1501,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ 				val |= cpu_to_be32(out_args->args[i]) & pass[i];
+ 			}
+ 
++			initial_match_array[i] = val;
+ 			out_args->args[i] = be32_to_cpu(val);
+ 		}
+ 		out_args->args_count = list_size = new_size;
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 45445a1600a968..e45d6d3a8dc678 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -360,12 +360,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
+ 
+ 	prop = of_get_flat_dt_prop(node, "alignment", &len);
+ 	if (prop) {
+-		if (len != dt_root_addr_cells * sizeof(__be32)) {
++		if (len != dt_root_size_cells * sizeof(__be32)) {
+ 			pr_err("invalid alignment property in '%s' node.\n",
+ 				uname);
+ 			return -EINVAL;
+ 		}
+-		align = dt_mem_next_cell(dt_root_addr_cells, &prop);
++		align = dt_mem_next_cell(dt_root_size_cells, &prop);
+ 	}
+ 
+ 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index cc8ff4a014368c..b58e89ea566b8d 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -222,19 +222,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 	if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
+ 		return -EINVAL;
+ 
+-	reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+-
+-	if (!(flags & PCI_BASE_ADDRESS_SPACE))
+-		type = PCIE_ATU_TYPE_MEM;
+-	else
+-		type = PCIE_ATU_TYPE_IO;
++	/*
++	 * Certain EPF drivers dynamically change the physical address of a BAR
++	 * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
++	 * calling clear_bar() would clear the BAR's PCI address assigned by the
++	 * host).
++	 */
++	if (ep->epf_bar[bar]) {
++		/*
++		 * We can only dynamically change a BAR if the new BAR size and
++		 * BAR flags do not differ from the existing configuration.
++		 */
++		if (ep->epf_bar[bar]->barno != bar ||
++		    ep->epf_bar[bar]->size != size ||
++		    ep->epf_bar[bar]->flags != flags)
++			return -EINVAL;
+ 
+-	ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
+-	if (ret)
+-		return ret;
++		/*
++		 * When dynamically changing a BAR, skip writing the BAR reg, as
++		 * that would clear the BAR's PCI address assigned by the host.
++		 */
++		goto config_atu;
++	}
+ 
+-	if (ep->epf_bar[bar])
+-		return 0;
++	reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ 
+ 	dw_pcie_dbi_ro_wr_en(pci);
+ 
+@@ -246,9 +257,20 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 		dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ 	}
+ 
+-	ep->epf_bar[bar] = epf_bar;
+ 	dw_pcie_dbi_ro_wr_dis(pci);
+ 
++config_atu:
++	if (!(flags & PCI_BASE_ADDRESS_SPACE))
++		type = PCIE_ATU_TYPE_MEM;
++	else
++		type = PCIE_ATU_TYPE_IO;
++
++	ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
++	if (ret)
++		return ret;
++
++	ep->epf_bar[bar] = epf_bar;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
+index 8fa2797d4169a9..50bc2892a36c54 100644
+--- a/drivers/pci/endpoint/pci-epf-core.c
++++ b/drivers/pci/endpoint/pci-epf-core.c
+@@ -202,6 +202,7 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+ 
+ 	mutex_lock(&epf_pf->lock);
+ 	clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
++	epf_vf->epf_pf = NULL;
+ 	list_del(&epf_vf->list);
+ 	mutex_unlock(&epf_pf->lock);
+ }
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 3a81837b5e623b..5081c7d8064fae 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -155,7 +155,7 @@
+ #define PWPR_REGWE_B		BIT(5)	/* OEN Register Write Enable, known only in RZ/V2H(P) */
+ 
+ #define PM_MASK			0x03
+-#define PFC_MASK		0x07
++#define PFC_MASK		0x0f
+ #define IEN_MASK		0x01
+ #define IOLH_MASK		0x03
+ #define SR_MASK			0x01
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
+index 675efa5d86a9af..c142cd7920307f 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
+@@ -1272,7 +1272,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
+ 
+ 	ret = platform_get_irq_optional(pdev, 0);
+ 	if (ret < 0 && ret != -ENXIO)
+-		return ret;
++		goto err_put_banks;
+ 	if (ret > 0)
+ 		drvdata->irq = ret;
+ 
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 7169b84ccdb6e2..c5679e4a58a76e 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -95,6 +95,7 @@ enum acer_wmi_event_ids {
+ 	WMID_HOTKEY_EVENT = 0x1,
+ 	WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
+ 	WMID_GAMING_TURBO_KEY_EVENT = 0x7,
++	WMID_AC_EVENT = 0x8,
+ };
+ 
+ enum acer_wmi_predator_v4_sys_info_command {
+@@ -398,6 +399,20 @@ static struct quirk_entry quirk_acer_predator_ph315_53 = {
+ 	.gpu_fans = 1,
+ };
+ 
++static struct quirk_entry quirk_acer_predator_ph16_72 = {
++	.turbo = 1,
++	.cpu_fans = 1,
++	.gpu_fans = 1,
++	.predator_v4 = 1,
++};
++
++static struct quirk_entry quirk_acer_predator_pt14_51 = {
++	.turbo = 1,
++	.cpu_fans = 1,
++	.gpu_fans = 1,
++	.predator_v4 = 1,
++};
++
+ static struct quirk_entry quirk_acer_predator_v4 = {
+ 	.predator_v4 = 1,
+ };
+@@ -569,6 +584,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
+ 		},
+ 		.driver_data = &quirk_acer_travelmate_2490,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "Acer Nitro AN515-58",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Nitro AN515-58"),
++		},
++		.driver_data = &quirk_acer_predator_v4,
++	},
+ 	{
+ 		.callback = dmi_matched,
+ 		.ident = "Acer Predator PH315-53",
+@@ -596,6 +620,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
+ 		},
+ 		.driver_data = &quirk_acer_predator_v4,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "Acer Predator PH16-72",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH16-72"),
++		},
++		.driver_data = &quirk_acer_predator_ph16_72,
++	},
+ 	{
+ 		.callback = dmi_matched,
+ 		.ident = "Acer Predator PH18-71",
+@@ -605,6 +638,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
+ 		},
+ 		.driver_data = &quirk_acer_predator_v4,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "Acer Predator PT14-51",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Predator PT14-51"),
++		},
++		.driver_data = &quirk_acer_predator_pt14_51,
++	},
+ 	{
+ 		.callback = set_force_caps,
+ 		.ident = "Acer Aspire Switch 10E SW3-016",
+@@ -2285,6 +2327,9 @@ static void acer_wmi_notify(union acpi_object *obj, void *context)
+ 		if (return_value.key_num == 0x5 && has_cap(ACER_CAP_PLATFORM_PROFILE))
+ 			acer_thermal_profile_change();
+ 		break;
++	case WMID_AC_EVENT:
++		/* We ignore AC events here */
++		break;
+ 	default:
+ 		pr_warn("Unknown function number - %d - %d\n",
+ 			return_value.function, return_value.key_num);
+diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
+index 3de463c3d13b8e..15678508ee5019 100644
+--- a/drivers/platform/x86/intel/int3472/discrete.c
++++ b/drivers/platform/x86/intel/int3472/discrete.c
+@@ -336,6 +336,9 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
+ 	struct int3472_cldb cldb;
+ 	int ret;
+ 
++	if (!adev)
++		return -ENODEV;
++
+ 	ret = skl_int3472_fill_cldb(adev, &cldb);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
+diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
+index 1e107fd49f828c..81ac4c69196309 100644
+--- a/drivers/platform/x86/intel/int3472/tps68470.c
++++ b/drivers/platform/x86/intel/int3472/tps68470.c
+@@ -152,6 +152,9 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
+ 	int ret;
+ 	int i;
+ 
++	if (!adev)
++		return -ENODEV;
++
+ 	n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
+ 	if (n_consumers < 0)
+ 		return n_consumers;
+diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
+index bcf3a0c356ea1b..3bc7fd8e1e1972 100644
+--- a/drivers/platform/x86/serdev_helpers.h
++++ b/drivers/platform/x86/serdev_helpers.h
+@@ -35,7 +35,7 @@ get_serdev_controller(const char *serial_ctrl_hid,
+ 	ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ 	if (!ctrl_adev) {
+ 		pr_err("error could not get %s/%s serial-ctrl adev\n",
+-		       serial_ctrl_hid, serial_ctrl_uid);
++		       serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ 		return ERR_PTR(-ENODEV);
+ 	}
+ 
+@@ -43,7 +43,7 @@ get_serdev_controller(const char *serial_ctrl_hid,
+ 	ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
+ 	if (!ctrl_dev) {
+ 		pr_err("error could not get %s/%s serial-ctrl physical node\n",
+-		       serial_ctrl_hid, serial_ctrl_uid);
++		       serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ 		ctrl_dev = ERR_PTR(-ENODEV);
+ 		goto put_ctrl_adev;
+ 	}
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 77a36e7bddd54e..1a1edd87122d3d 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -217,6 +217,11 @@ static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
+ 		return info->gettime64(info, ts);
+ }
+ 
++static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
++{
++	return -EOPNOTSUPP;
++}
++
+ static void ptp_aux_kworker(struct kthread_work *work)
+ {
+ 	struct ptp_clock *ptp = container_of(work, struct ptp_clock,
+@@ -294,6 +299,9 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ 			ptp->info->getcrosscycles = ptp->info->getcrosststamp;
+ 	}
+ 
++	if (!ptp->info->enable)
++		ptp->info->enable = ptp_enable;
++
+ 	if (ptp->info->do_aux_work) {
+ 		kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
+ 		ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
+diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
+index c1f2287b8e9748..12821b4bbf9756 100644
+--- a/drivers/pwm/pwm-microchip-core.c
++++ b/drivers/pwm/pwm-microchip-core.c
+@@ -327,7 +327,7 @@ static int mchp_core_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *
+ 		 * mchp_core_pwm_calc_period().
+ 		 * The period is locked and we cannot change this, so we abort.
+ 		 */
+-		if (hw_period_steps == MCHPCOREPWM_PERIOD_STEPS_MAX)
++		if (hw_period_steps > MCHPCOREPWM_PERIOD_STEPS_MAX)
+ 			return -EINVAL;
+ 
+ 		prescale = hw_prescale;
+diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
+index 9ae2e831456d57..3260dd512491e8 100644
+--- a/drivers/remoteproc/omap_remoteproc.c
++++ b/drivers/remoteproc/omap_remoteproc.c
+@@ -37,6 +37,10 @@
+ 
+ #include <linux/platform_data/dmtimer-omap.h>
+ 
++#ifdef CONFIG_ARM_DMA_USE_IOMMU
++#include <asm/dma-iommu.h>
++#endif
++
+ #include "omap_remoteproc.h"
+ #include "remoteproc_internal.h"
+ 
+@@ -1323,6 +1327,19 @@ static int omap_rproc_probe(struct platform_device *pdev)
+ 	/* All existing OMAP IPU and DSP processors have an MMU */
+ 	rproc->has_iommu = true;
+ 
++#ifdef CONFIG_ARM_DMA_USE_IOMMU
++	/*
++	 * Throw away the ARM DMA mapping that we'll never use, so it doesn't
++	 * interfere with the core rproc->domain and we get the right DMA ops.
++	 */
++	if (pdev->dev.archdata.mapping) {
++		struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(&pdev->dev);
++
++		arm_iommu_detach_device(&pdev->dev);
++		arm_iommu_release_mapping(mapping);
++	}
++#endif
++
+ 	ret = omap_rproc_of_get_internal_memories(pdev, rproc);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
+index 08ed171bdab43a..b6f96c10196ae3 100644
+--- a/drivers/rtc/rtc-zynqmp.c
++++ b/drivers/rtc/rtc-zynqmp.c
+@@ -318,8 +318,8 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	/* Getting the rtc_clk info */
+-	xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk");
++	/* Getting the rtc info */
++	xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc");
+ 	if (IS_ERR(xrtcdev->rtc_clk)) {
+ 		if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER)
+ 			dev_warn(&pdev->dev, "Device clock not found.\n");
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 15066c112817a8..cb95b7b12051da 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -4098,6 +4098,8 @@ struct qla_hw_data {
+ 		uint32_t	npiv_supported		:1;
+ 		uint32_t	pci_channel_io_perm_failure	:1;
+ 		uint32_t	fce_enabled		:1;
++		uint32_t	user_enabled_fce	:1;
++		uint32_t	fce_dump_buf_alloced	:1;
+ 		uint32_t	fac_supported		:1;
+ 
+ 		uint32_t	chip_reset_done		:1;
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index a1545dad0c0ce2..08273520c77793 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -409,26 +409,31 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
+ 
+ 	mutex_lock(&ha->fce_mutex);
+ 
+-	seq_puts(s, "FCE Trace Buffer\n");
+-	seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
+-	seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
+-	seq_puts(s, "FCE Enable Registers\n");
+-	seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
+-	    ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
+-	    ha->fce_mb[5], ha->fce_mb[6]);
+-
+-	fce = (uint32_t *) ha->fce;
+-	fce_start = (unsigned long long) ha->fce_dma;
+-	for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
+-		if (cnt % 8 == 0)
+-			seq_printf(s, "\n%llx: ",
+-			    (unsigned long long)((cnt * 4) + fce_start));
+-		else
+-			seq_putc(s, ' ');
+-		seq_printf(s, "%08x", *fce++);
+-	}
++	if (ha->flags.user_enabled_fce) {
++		seq_puts(s, "FCE Trace Buffer\n");
++		seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
++		seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma);
++		seq_puts(s, "FCE Enable Registers\n");
++		seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
++			   ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
++			   ha->fce_mb[5], ha->fce_mb[6]);
++
++		fce = (uint32_t *)ha->fce;
++		fce_start = (unsigned long long)ha->fce_dma;
++		for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
++			if (cnt % 8 == 0)
++				seq_printf(s, "\n%llx: ",
++					   (unsigned long long)((cnt * 4) + fce_start));
++			else
++				seq_putc(s, ' ');
++			seq_printf(s, "%08x", *fce++);
++		}
+ 
+-	seq_puts(s, "\nEnd\n");
++		seq_puts(s, "\nEnd\n");
++	} else {
++		seq_puts(s, "FCE Trace is currently not enabled\n");
++		seq_puts(s, "\techo [ 1 | 0 ] > fce\n");
++	}
+ 
+ 	mutex_unlock(&ha->fce_mutex);
+ 
+@@ -467,7 +472,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
+ 	struct qla_hw_data *ha = vha->hw;
+ 	int rval;
+ 
+-	if (ha->flags.fce_enabled)
++	if (ha->flags.fce_enabled || !ha->fce)
+ 		goto out;
+ 
+ 	mutex_lock(&ha->fce_mutex);
+@@ -488,11 +493,88 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
+ 	return single_release(inode, file);
+ }
+ 
++static ssize_t
++qla2x00_dfs_fce_write(struct file *file, const char __user *buffer,
++		      size_t count, loff_t *pos)
++{
++	struct seq_file *s = file->private_data;
++	struct scsi_qla_host *vha = s->private;
++	struct qla_hw_data *ha = vha->hw;
++	char *buf;
++	int rc = 0;
++	unsigned long enable;
++
++	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
++	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
++		ql_dbg(ql_dbg_user, vha, 0xd034,
++		       "this adapter does not support FCE.");
++		return -EINVAL;
++	}
++
++	buf = memdup_user_nul(buffer, count);
++	if (IS_ERR(buf)) {
++		ql_dbg(ql_dbg_user, vha, 0xd037,
++		    "fail to copy user buffer.");
++		return PTR_ERR(buf);
++	}
++
++	enable = kstrtoul(buf, 0, 0);
++	rc = count;
++
++	mutex_lock(&ha->fce_mutex);
++
++	if (enable) {
++		if (ha->flags.user_enabled_fce) {
++			mutex_unlock(&ha->fce_mutex);
++			goto out_free;
++		}
++		ha->flags.user_enabled_fce = 1;
++		if (!ha->fce) {
++			rc = qla2x00_alloc_fce_trace(vha);
++			if (rc) {
++				ha->flags.user_enabled_fce = 0;
++				mutex_unlock(&ha->fce_mutex);
++				goto out_free;
++			}
++
++			/* adjust fw dump buffer to take into account of this feature */
++			if (!ha->flags.fce_dump_buf_alloced)
++				qla2x00_alloc_fw_dump(vha);
++		}
++
++		if (!ha->flags.fce_enabled)
++			qla_enable_fce_trace(vha);
++
++		ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n");
++	} else {
++		if (!ha->flags.user_enabled_fce) {
++			mutex_unlock(&ha->fce_mutex);
++			goto out_free;
++		}
++		ha->flags.user_enabled_fce = 0;
++		if (ha->flags.fce_enabled) {
++			qla2x00_disable_fce_trace(vha, NULL, NULL);
++			ha->flags.fce_enabled = 0;
++		}
++
++		qla2x00_free_fce_trace(ha);
++		/* no need to re-adjust fw dump buffer */
++
++		ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n");
++	}
++
++	mutex_unlock(&ha->fce_mutex);
++out_free:
++	kfree(buf);
++	return rc;
++}
++
+ static const struct file_operations dfs_fce_ops = {
+ 	.open		= qla2x00_dfs_fce_open,
+ 	.read		= seq_read,
+ 	.llseek		= seq_lseek,
+ 	.release	= qla2x00_dfs_fce_release,
++	.write		= qla2x00_dfs_fce_write,
+ };
+ 
+ static int
+@@ -626,8 +708,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
+ 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ 		goto out;
+-	if (!ha->fce)
+-		goto out;
+ 
+ 	if (qla2x00_dfs_root)
+ 		goto create_dir;
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index cededfda9d0e31..e556f57c91af62 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -11,6 +11,9 @@
+ /*
+  * Global Function Prototypes in qla_init.c source file.
+  */
++int  qla2x00_alloc_fce_trace(scsi_qla_host_t *);
++void qla2x00_free_fce_trace(struct qla_hw_data *ha);
++void qla_enable_fce_trace(scsi_qla_host_t *);
+ extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
+ extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 31fc6a0eca3e80..79cdfec2bca356 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -2681,7 +2681,7 @@ qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
+ 	return rval;
+ }
+ 
+-static void qla_enable_fce_trace(scsi_qla_host_t *vha)
++void qla_enable_fce_trace(scsi_qla_host_t *vha)
+ {
+ 	int rval;
+ 	struct qla_hw_data *ha = vha->hw;
+@@ -3717,25 +3717,24 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
+ 	return rval;
+ }
+ 
+-static void
+-qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
++int qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+ {
+ 	dma_addr_t tc_dma;
+ 	void *tc;
+ 	struct qla_hw_data *ha = vha->hw;
+ 
+ 	if (!IS_FWI2_CAPABLE(ha))
+-		return;
++		return -EINVAL;
+ 
+ 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+-		return;
++		return -EINVAL;
+ 
+ 	if (ha->fce) {
+ 		ql_dbg(ql_dbg_init, vha, 0x00bd,
+ 		       "%s: FCE Mem is already allocated.\n",
+ 		       __func__);
+-		return;
++		return -EIO;
+ 	}
+ 
+ 	/* Allocate memory for Fibre Channel Event Buffer. */
+@@ -3745,7 +3744,7 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+ 		ql_log(ql_log_warn, vha, 0x00be,
+ 		       "Unable to allocate (%d KB) for FCE.\n",
+ 		       FCE_SIZE / 1024);
+-		return;
++		return -ENOMEM;
+ 	}
+ 
+ 	ql_dbg(ql_dbg_init, vha, 0x00c0,
+@@ -3754,6 +3753,16 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+ 	ha->fce_dma = tc_dma;
+ 	ha->fce = tc;
+ 	ha->fce_bufs = FCE_NUM_BUFFERS;
++	return 0;
++}
++
++void qla2x00_free_fce_trace(struct qla_hw_data *ha)
++{
++	if (!ha->fce)
++		return;
++	dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma);
++	ha->fce = NULL;
++	ha->fce_dma = 0;
+ }
+ 
+ static void
+@@ -3844,9 +3853,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ 		if (ha->tgt.atio_ring)
+ 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ 
+-		qla2x00_alloc_fce_trace(vha);
+-		if (ha->fce)
++		if (ha->fce) {
+ 			fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
++			ha->flags.fce_dump_buf_alloced = 1;
++		}
+ 		qla2x00_alloc_eft_trace(vha);
+ 		if (ha->eft)
+ 			eft_size = EFT_SIZE;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index adee6f60c96655..c9dde1ac9523e8 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -865,13 +865,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
+ 				case 0x1a: /* start stop unit in progress */
+ 				case 0x1b: /* sanitize in progress */
+ 				case 0x1d: /* configuration in progress */
+-				case 0x24: /* depopulation in progress */
+-				case 0x25: /* depopulation restore in progress */
+ 					action = ACTION_DELAYED_RETRY;
+ 					break;
+ 				case 0x0a: /* ALUA state transition */
+ 					action = ACTION_DELAYED_REPREP;
+ 					break;
++				/*
++				 * Depopulation might take many hours,
++				 * thus it is not worthwhile to retry.
++				 */
++				case 0x24: /* depopulation in progress */
++				case 0x25: /* depopulation restore in progress */
++					fallthrough;
+ 				default:
+ 					action = ACTION_FAIL;
+ 					break;
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index c9038284bc893d..0dc37fc6f23678 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -1027,6 +1027,11 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
+ 			retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
+ 		break;
+ 	}
++	if (STp->first_tur) {
++		/* Don't set pos_unknown right after device recognition */
++		STp->pos_unknown = 0;
++		STp->first_tur = 0;
++	}
+ 
+ 	if (SRpnt != NULL)
+ 		st_release_request(SRpnt);
+@@ -4325,6 +4330,7 @@ static int st_probe(struct device *dev)
+ 	blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
+ 	tpnt->long_timeout = ST_LONG_TIMEOUT;
+ 	tpnt->try_dio = try_direct_io;
++	tpnt->first_tur = 1;
+ 
+ 	for (i = 0; i < ST_NBR_MODES; i++) {
+ 		STm = &(tpnt->modes[i]);
+diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
+index 7a68eaba7e810c..1aaaf5369a40fc 100644
+--- a/drivers/scsi/st.h
++++ b/drivers/scsi/st.h
+@@ -170,6 +170,7 @@ struct scsi_tape {
+ 	unsigned char rew_at_close;  /* rewind necessary at close */
+ 	unsigned char inited;
+ 	unsigned char cleaning_req;  /* cleaning requested? */
++	unsigned char first_tur;     /* first TEST UNIT READY */
+ 	int block_size;
+ 	int min_block;
+ 	int max_block;
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index b3c588b102d900..b8186feccdf5aa 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1800,6 +1800,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 
+ 	length = scsi_bufflen(scmnd);
+ 	payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
++	payload->range.len = 0;
+ 	payload_sz = 0;
+ 
+ 	if (scsi_sg_count(scmnd)) {
+diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
+index 56cc345552a430..d83a46334adbbe 100644
+--- a/drivers/soc/mediatek/mtk-devapc.c
++++ b/drivers/soc/mediatek/mtk-devapc.c
+@@ -273,23 +273,31 @@ static int mtk_devapc_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 
+ 	devapc_irq = irq_of_parse_and_map(node, 0);
+-	if (!devapc_irq)
+-		return -EINVAL;
++	if (!devapc_irq) {
++		ret = -EINVAL;
++		goto err;
++	}
+ 
+ 	ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
+-	if (IS_ERR(ctx->infra_clk))
+-		return -EINVAL;
++	if (IS_ERR(ctx->infra_clk)) {
++		ret = -EINVAL;
++		goto err;
++	}
+ 
+ 	ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
+ 			       IRQF_TRIGGER_NONE, "devapc", ctx);
+ 	if (ret)
+-		return ret;
++		goto err;
+ 
+ 	platform_set_drvdata(pdev, ctx);
+ 
+ 	start_devapc(ctx);
+ 
+ 	return 0;
++
++err:
++	iounmap(ctx->infra_base);
++	return ret;
+ }
+ 
+ static void mtk_devapc_remove(struct platform_device *pdev)
+@@ -297,6 +305,7 @@ static void mtk_devapc_remove(struct platform_device *pdev)
+ 	struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
+ 
+ 	stop_devapc(ctx);
++	iounmap(ctx->infra_base);
+ }
+ 
+ static struct platform_driver mtk_devapc_driver = {
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index a470285f54a875..133dc483331352 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -2511,6 +2511,7 @@ static const struct llcc_slice_config x1e80100_data[] = {
+ 		.fixed_size = true,
+ 		.bonus_ways = 0xfff,
+ 		.cache_mode = 0,
++		.activate_on_init = true,
+ 	}, {
+ 		.usecase_id = LLCC_CAMEXP0,
+ 		.slice_id = 4,
+diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
+index e848cc9a3cf801..a8be3a2f33824f 100644
+--- a/drivers/soc/qcom/smem_state.c
++++ b/drivers/soc/qcom/smem_state.c
+@@ -116,7 +116,8 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+ 
+ 	if (args.args_count != 1) {
+ 		dev_err(dev, "invalid #qcom,smem-state-cells\n");
+-		return ERR_PTR(-EINVAL);
++		state = ERR_PTR(-EINVAL);
++		goto put;
+ 	}
+ 
+ 	state = of_node_to_state(args.np);
+diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
+index ecfd3da9d5e877..c2f2a1ce4194b3 100644
+--- a/drivers/soc/qcom/socinfo.c
++++ b/drivers/soc/qcom/socinfo.c
+@@ -789,7 +789,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
+ 	if (!qs->attr.soc_id || !qs->attr.revision)
+ 		return -ENOMEM;
+ 
+-	if (offsetof(struct socinfo, serial_num) <= item_size) {
++	if (offsetofend(struct socinfo, serial_num) <= item_size) {
+ 		qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ 							"%u",
+ 							le32_to_cpu(info->serial_num));
+diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
+index d8c53cec7f37ad..dd5256e5aae1ae 100644
+--- a/drivers/soc/samsung/exynos-pmu.c
++++ b/drivers/soc/samsung/exynos-pmu.c
+@@ -126,7 +126,7 @@ static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
+ 		if (ret)
+ 			return ret;
+ 	}
+-	return ret;
++	return 0;
+ }
+ 
+ static bool tensor_is_atomic(unsigned int reg)
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index caecb2ad2a150d..02d83e2956cdf7 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -138,11 +138,15 @@
+ #define QSPI_WPSR_WPVSRC_MASK           GENMASK(15, 8)
+ #define QSPI_WPSR_WPVSRC(src)           (((src) << 8) & QSPI_WPSR_WPVSRC)
+ 
++#define ATMEL_QSPI_TIMEOUT		1000	/* ms */
++
+ struct atmel_qspi_caps {
+ 	bool has_qspick;
+ 	bool has_ricr;
+ };
+ 
++struct atmel_qspi_ops;
++
+ struct atmel_qspi {
+ 	void __iomem		*regs;
+ 	void __iomem		*mem;
+@@ -150,13 +154,22 @@ struct atmel_qspi {
+ 	struct clk		*qspick;
+ 	struct platform_device	*pdev;
+ 	const struct atmel_qspi_caps *caps;
++	const struct atmel_qspi_ops *ops;
+ 	resource_size_t		mmap_size;
+ 	u32			pending;
++	u32			irq_mask;
+ 	u32			mr;
+ 	u32			scr;
+ 	struct completion	cmd_completion;
+ };
+ 
++struct atmel_qspi_ops {
++	int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
++		       u32 *offset);
++	int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
++			u32 offset);
++};
++
+ struct atmel_qspi_mode {
+ 	u8 cmd_buswidth;
+ 	u8 addr_buswidth;
+@@ -404,10 +417,67 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
+ 	return 0;
+ }
+ 
++static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
++{
++	int err = 0;
++	u32 sr;
++
++	/* Poll INSTRuction End status */
++	sr = atmel_qspi_read(aq, QSPI_SR);
++	if ((sr & irq_mask) == irq_mask)
++		return 0;
++
++	/* Wait for INSTRuction End interrupt */
++	reinit_completion(&aq->cmd_completion);
++	aq->pending = sr & irq_mask;
++	aq->irq_mask = irq_mask;
++	atmel_qspi_write(irq_mask, aq, QSPI_IER);
++	if (!wait_for_completion_timeout(&aq->cmd_completion,
++					 msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
++		err = -ETIMEDOUT;
++	atmel_qspi_write(irq_mask, aq, QSPI_IDR);
++
++	return err;
++}
++
++static int atmel_qspi_transfer(struct spi_mem *mem,
++			       const struct spi_mem_op *op, u32 offset)
++{
++	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
++
++	/* Skip to the final steps if there is no data */
++	if (!op->data.nbytes)
++		return atmel_qspi_wait_for_completion(aq,
++						      QSPI_SR_CMD_COMPLETED);
++
++	/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
++	(void)atmel_qspi_read(aq, QSPI_IFR);
++
++	/* Send/Receive data */
++	if (op->data.dir == SPI_MEM_DATA_IN) {
++		memcpy_fromio(op->data.buf.in, aq->mem + offset,
++			      op->data.nbytes);
++
++		/* Synchronize AHB and APB accesses again */
++		rmb();
++	} else {
++		memcpy_toio(aq->mem + offset, op->data.buf.out,
++			    op->data.nbytes);
++
++		/* Synchronize AHB and APB accesses again */
++		wmb();
++	}
++
++	/* Release the chip-select */
++	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
++
++	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
++}
++
+ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+ {
+ 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
+-	u32 sr, offset;
++	u32 offset;
+ 	int err;
+ 
+ 	/*
+@@ -416,46 +486,20 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+ 	 * when the flash memories overrun the controller's memory space.
+ 	 */
+ 	if (op->addr.val + op->data.nbytes > aq->mmap_size)
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
++
++	if (op->addr.nbytes > 4)
++		return -EOPNOTSUPP;
+ 
+ 	err = pm_runtime_resume_and_get(&aq->pdev->dev);
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = atmel_qspi_set_cfg(aq, op, &offset);
++	err = aq->ops->set_cfg(aq, op, &offset);
+ 	if (err)
+ 		goto pm_runtime_put;
+ 
+-	/* Skip to the final steps if there is no data */
+-	if (op->data.nbytes) {
+-		/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+-		(void)atmel_qspi_read(aq, QSPI_IFR);
+-
+-		/* Send/Receive data */
+-		if (op->data.dir == SPI_MEM_DATA_IN)
+-			memcpy_fromio(op->data.buf.in, aq->mem + offset,
+-				      op->data.nbytes);
+-		else
+-			memcpy_toio(aq->mem + offset, op->data.buf.out,
+-				    op->data.nbytes);
+-
+-		/* Release the chip-select */
+-		atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+-	}
+-
+-	/* Poll INSTRuction End status */
+-	sr = atmel_qspi_read(aq, QSPI_SR);
+-	if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+-		goto pm_runtime_put;
+-
+-	/* Wait for INSTRuction End interrupt */
+-	reinit_completion(&aq->cmd_completion);
+-	aq->pending = sr & QSPI_SR_CMD_COMPLETED;
+-	atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
+-	if (!wait_for_completion_timeout(&aq->cmd_completion,
+-					 msecs_to_jiffies(1000)))
+-		err = -ETIMEDOUT;
+-	atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
++	err = aq->ops->transfer(mem, op, offset);
+ 
+ pm_runtime_put:
+ 	pm_runtime_mark_last_busy(&aq->pdev->dev);
+@@ -571,12 +615,17 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
+ 		return IRQ_NONE;
+ 
+ 	aq->pending |= pending;
+-	if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
++	if ((aq->pending & aq->irq_mask) == aq->irq_mask)
+ 		complete(&aq->cmd_completion);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
++static const struct atmel_qspi_ops atmel_qspi_ops = {
++	.set_cfg = atmel_qspi_set_cfg,
++	.transfer = atmel_qspi_transfer,
++};
++
+ static int atmel_qspi_probe(struct platform_device *pdev)
+ {
+ 	struct spi_controller *ctrl;
+@@ -601,6 +650,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
+ 
+ 	init_completion(&aq->cmd_completion);
+ 	aq->pdev = pdev;
++	aq->ops = &atmel_qspi_ops;
+ 
+ 	/* Map the registers */
+ 	aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index bdf17eafd3598d..f43059e1b5c28e 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -165,6 +165,7 @@ struct sci_port {
+ static struct sci_port sci_ports[SCI_NPORTS];
+ static unsigned long sci_ports_in_use;
+ static struct uart_driver sci_uart_driver;
++static bool sci_uart_earlycon;
+ 
+ static inline struct sci_port *
+ to_sci_port(struct uart_port *uart)
+@@ -3450,6 +3451,7 @@ static int sci_probe_single(struct platform_device *dev,
+ static int sci_probe(struct platform_device *dev)
+ {
+ 	struct plat_sci_port *p;
++	struct resource *res;
+ 	struct sci_port *sp;
+ 	unsigned int dev_id;
+ 	int ret;
+@@ -3479,6 +3481,26 @@ static int sci_probe(struct platform_device *dev)
+ 	}
+ 
+ 	sp = &sci_ports[dev_id];
++
++	/*
++	 * In case:
++	 * - the probed port alias is zero (as the one used by earlycon), and
++	 * - the earlycon is still active (e.g., "earlycon keep_bootcon" in
++	 *   bootargs)
++	 *
++	 * defer the probe of this serial. This is a debug scenario and the user
++	 * must be aware of it.
++	 *
++	 * Except when the probed port is the same as the earlycon port.
++	 */
++
++	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
++	if (!res)
++		return -ENODEV;
++
++	if (sci_uart_earlycon && sp == &sci_ports[0] && sp->port.mapbase != res->start)
++		return dev_err_probe(&dev->dev, -EBUSY, "sci_port[0] is used by earlycon!\n");
++
+ 	platform_set_drvdata(dev, sp);
+ 
+ 	ret = sci_probe_single(dev, dev_id, p, sp);
+@@ -3562,7 +3584,7 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver,
+ 			   early_serial_buf, ARRAY_SIZE(early_serial_buf));
+ #endif
+ #ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
+-static struct plat_sci_port port_cfg __initdata;
++static struct plat_sci_port port_cfg;
+ 
+ static int __init early_console_setup(struct earlycon_device *device,
+ 				      int type)
+@@ -3575,6 +3597,7 @@ static int __init early_console_setup(struct earlycon_device *device,
+ 	port_cfg.type = type;
+ 	sci_ports[0].cfg = &port_cfg;
+ 	sci_ports[0].params = sci_probe_regmap(&port_cfg);
++	sci_uart_earlycon = true;
+ 	port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR);
+ 	sci_serial_out(&sci_ports[0].port, SCSCR,
+ 		       SCSCR_RE | SCSCR_TE | port_cfg.scscr);
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index 777392914819d7..1d636578c1efc5 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -287,7 +287,7 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
+ 				continue;
+ 		}
+ 
+-		if (uart_handle_sysrq_char(port, data))
++		if (uart_prepare_sysrq_char(port, data))
+ 			continue;
+ 
+ 		if (is_rxbs_support) {
+@@ -495,7 +495,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
+ 	    !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
+ 		cdns_uart_handle_rx(dev_id, isrstatus);
+ 
+-	uart_port_unlock(port);
++	uart_unlock_and_check_sysrq(port);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -1380,9 +1380,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
+ 	unsigned int imr, ctrl;
+ 	int locked = 1;
+ 
+-	if (port->sysrq)
+-		locked = 0;
+-	else if (oops_in_progress)
++	if (oops_in_progress)
+ 		locked = uart_port_trylock_irqsave(port, &flags);
+ 	else
+ 		uart_port_lock_irqsave(port, &flags);
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 564341f1a74f3f..0bd6544e30a6b3 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -192,6 +192,20 @@ int set_selection_user(const struct tiocl_selection __user *sel,
+ 	if (copy_from_user(&v, sel, sizeof(*sel)))
+ 		return -EFAULT;
+ 
++	/*
++	 * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to
++	 * use without CAP_SYS_ADMIN as they do not modify the selection.
++	 */
++	switch (v.sel_mode) {
++	case TIOCL_SELCLEAR:
++	case TIOCL_SELPOINTER:
++	case TIOCL_SELMOUSEREPORT:
++		break;
++	default:
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
++	}
++
+ 	return set_selection_kernel(&v, tty);
+ }
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 96842ce817af47..be5564ed8c018a 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3345,8 +3345,6 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+ 
+ 	switch (type) {
+ 	case TIOCL_SETSEL:
+-		if (!capable(CAP_SYS_ADMIN))
+-			return -EPERM;
+ 		return set_selection_user(param, tty);
+ 	case TIOCL_PASTESEL:
+ 		if (!capable(CAP_SYS_ADMIN))
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 6cc9e61cca07de..b786cba9a270f4 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -10323,16 +10323,6 @@ int ufshcd_system_thaw(struct device *dev)
+ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
+ #endif /* CONFIG_PM_SLEEP  */
+ 
+-/**
+- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
+- * @hba: pointer to Host Bus Adapter (HBA)
+- */
+-void ufshcd_dealloc_host(struct ufs_hba *hba)
+-{
+-	scsi_host_put(hba->host);
+-}
+-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
+-
+ /**
+  * ufshcd_set_dma_mask - Set dma mask based on the controller
+  *			 addressing capability
+@@ -10351,12 +10341,26 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+ 	return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+ }
+ 
++/**
++ * ufshcd_devres_release - devres cleanup handler, invoked during release of
++ *			   hba->dev
++ * @host: pointer to SCSI host
++ */
++static void ufshcd_devres_release(void *host)
++{
++	scsi_host_put(host);
++}
++
+ /**
+  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
+  * @dev: pointer to device handle
+  * @hba_handle: driver private handle
+  *
+  * Return: 0 on success, non-zero value on failure.
++ *
++ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
++ * keeps track of its allocations using devres and deallocates everything on
++ * device removal automatically.
+  */
+ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+ {
+@@ -10378,6 +10382,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+ 		err = -ENOMEM;
+ 		goto out_error;
+ 	}
++
++	err = devm_add_action_or_reset(dev, ufshcd_devres_release,
++				       host);
++	if (err)
++		return dev_err_probe(dev, err,
++				     "failed to add ufshcd dealloc action\n");
++
+ 	host->nr_maps = HCTX_TYPE_POLL + 1;
+ 	hba = shost_priv(host);
+ 	hba->host = host;
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index 989692fb91083f..e12c5f9f795638 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -155,8 +155,9 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
+ {
+ 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ 	union ufs_crypto_cap_entry cap;
+-	bool config_enable =
+-		cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE;
++
++	if (!(cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE))
++		return qcom_ice_evict_key(host->ice, slot);
+ 
+ 	/* Only AES-256-XTS has been tested so far. */
+ 	cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
+@@ -164,14 +165,11 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
+ 	    cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
+ 		return -EOPNOTSUPP;
+ 
+-	if (config_enable)
+-		return qcom_ice_program_key(host->ice,
+-					    QCOM_ICE_CRYPTO_ALG_AES_XTS,
+-					    QCOM_ICE_CRYPTO_KEY_SIZE_256,
+-					    cfg->crypto_key,
+-					    cfg->data_unit_size, slot);
+-	else
+-		return qcom_ice_evict_key(host->ice, slot);
++	return qcom_ice_program_key(host->ice,
++				    QCOM_ICE_CRYPTO_ALG_AES_XTS,
++				    QCOM_ICE_CRYPTO_KEY_SIZE_256,
++				    cfg->crypto_key,
++				    cfg->data_unit_size, slot);
+ }
+ 
+ #else
+diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
+index 54e0cc0653a247..850ff71130d5e4 100644
+--- a/drivers/ufs/host/ufshcd-pci.c
++++ b/drivers/ufs/host/ufshcd-pci.c
+@@ -562,7 +562,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
+ 	pm_runtime_forbid(&pdev->dev);
+ 	pm_runtime_get_noresume(&pdev->dev);
+ 	ufshcd_remove(hba);
+-	ufshcd_dealloc_host(hba);
+ }
+ 
+ /**
+@@ -607,7 +606,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	err = ufshcd_init(hba, mmio_base, pdev->irq);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "Initialization failed\n");
+-		ufshcd_dealloc_host(hba);
+ 		return err;
+ 	}
+ 
+diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
+index 505572d4fa878c..ffe5d1d2b21588 100644
+--- a/drivers/ufs/host/ufshcd-pltfrm.c
++++ b/drivers/ufs/host/ufshcd-pltfrm.c
+@@ -465,21 +465,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 	struct device *dev = &pdev->dev;
+ 
+ 	mmio_base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(mmio_base)) {
+-		err = PTR_ERR(mmio_base);
+-		goto out;
+-	}
++	if (IS_ERR(mmio_base))
++		return PTR_ERR(mmio_base);
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0) {
+-		err = irq;
+-		goto out;
+-	}
++	if (irq < 0)
++		return irq;
+ 
+ 	err = ufshcd_alloc_host(dev, &hba);
+ 	if (err) {
+ 		dev_err(dev, "Allocation failed\n");
+-		goto out;
++		return err;
+ 	}
+ 
+ 	hba->vops = vops;
+@@ -488,13 +484,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 	if (err) {
+ 		dev_err(dev, "%s: clock parse failed %d\n",
+ 				__func__, err);
+-		goto dealloc_host;
++		return err;
+ 	}
+ 	err = ufshcd_parse_regulator_info(hba);
+ 	if (err) {
+ 		dev_err(dev, "%s: regulator init failed %d\n",
+ 				__func__, err);
+-		goto dealloc_host;
++		return err;
+ 	}
+ 
+ 	ufshcd_init_lanes_per_dir(hba);
+@@ -502,25 +498,20 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 	err = ufshcd_parse_operating_points(hba);
+ 	if (err) {
+ 		dev_err(dev, "%s: OPP parse failed %d\n", __func__, err);
+-		goto dealloc_host;
++		return err;
+ 	}
+ 
+ 	err = ufshcd_init(hba, mmio_base, irq);
+ 	if (err) {
+ 		dev_err_probe(dev, err, "Initialization failed with error %d\n",
+ 			      err);
+-		goto dealloc_host;
++		return err;
+ 	}
+ 
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
+ 	return 0;
+-
+-dealloc_host:
+-	ufshcd_dealloc_host(hba);
+-out:
+-	return err;
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
+ 
+@@ -534,7 +525,6 @@ void ufshcd_pltfrm_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
+ 	ufshcd_remove(hba);
+-	ufshcd_dealloc_host(hba);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ }
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 48dee166e5d89c..7b23631f47449b 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -245,7 +245,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
+ {
+ 	struct f_uas *fu = cmd->fu;
+ 	struct se_cmd *se_cmd = &cmd->se_cmd;
+-	struct usb_gadget *gadget = fuas_to_gadget(fu);
+ 	int ret;
+ 
+ 	init_completion(&cmd->write_complete);
+@@ -256,22 +255,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!gadget->sg_supported) {
+-		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
+-		if (!cmd->data_buf)
+-			return -ENOMEM;
+-
+-		fu->bot_req_out->buf = cmd->data_buf;
+-	} else {
+-		fu->bot_req_out->buf = NULL;
+-		fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
+-		fu->bot_req_out->sg = se_cmd->t_data_sg;
+-	}
+-
+-	fu->bot_req_out->complete = usbg_data_write_cmpl;
+-	fu->bot_req_out->length = se_cmd->data_length;
+-	fu->bot_req_out->context = cmd;
+-
+ 	ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
+ 	if (ret)
+ 		goto cleanup;
+@@ -973,6 +956,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
+ 	return;
+ 
+ cleanup:
++	target_put_sess_cmd(se_cmd);
+ 	transport_generic_free_cmd(&cmd->se_cmd, 0);
+ }
+ 
+@@ -1065,7 +1049,7 @@ static void usbg_cmd_work(struct work_struct *work)
+ 
+ out:
+ 	transport_send_check_condition_and_sense(se_cmd,
+-			TCM_UNSUPPORTED_SCSI_OPCODE, 1);
++			TCM_UNSUPPORTED_SCSI_OPCODE, 0);
+ }
+ 
+ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
+@@ -1193,7 +1177,7 @@ static void bot_cmd_work(struct work_struct *work)
+ 
+ out:
+ 	transport_send_check_condition_and_sense(se_cmd,
+-				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
++				TCM_UNSUPPORTED_SCSI_OPCODE, 0);
+ }
+ 
+ static int bot_submit_command(struct f_uas *fu,
+@@ -1969,43 +1953,39 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	bot_intf_desc.bInterfaceNumber = iface;
+ 	uasp_intf_desc.bInterfaceNumber = iface;
+ 	fu->iface = iface;
+-	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
+-			&uasp_bi_ep_comp_desc);
++	ep = usb_ep_autoconfig(gadget, &uasp_fs_bi_desc);
+ 	if (!ep)
+ 		goto ep_fail;
+ 
+ 	fu->ep_in = ep;
+ 
+-	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
+-			&uasp_bo_ep_comp_desc);
++	ep = usb_ep_autoconfig(gadget, &uasp_fs_bo_desc);
+ 	if (!ep)
+ 		goto ep_fail;
+ 	fu->ep_out = ep;
+ 
+-	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
+-			&uasp_status_in_ep_comp_desc);
++	ep = usb_ep_autoconfig(gadget, &uasp_fs_status_desc);
+ 	if (!ep)
+ 		goto ep_fail;
+ 	fu->ep_status = ep;
+ 
+-	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
+-			&uasp_cmd_comp_desc);
++	ep = usb_ep_autoconfig(gadget, &uasp_fs_cmd_desc);
+ 	if (!ep)
+ 		goto ep_fail;
+ 	fu->ep_cmd = ep;
+ 
+ 	/* Assume endpoint addresses are the same for both speeds */
+-	uasp_bi_desc.bEndpointAddress =	uasp_ss_bi_desc.bEndpointAddress;
+-	uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
++	uasp_bi_desc.bEndpointAddress =	uasp_fs_bi_desc.bEndpointAddress;
++	uasp_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
+ 	uasp_status_desc.bEndpointAddress =
+-		uasp_ss_status_desc.bEndpointAddress;
+-	uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+-
+-	uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
+-	uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+-	uasp_fs_status_desc.bEndpointAddress =
+-		uasp_ss_status_desc.bEndpointAddress;
+-	uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
++		uasp_fs_status_desc.bEndpointAddress;
++	uasp_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
++
++	uasp_ss_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
++	uasp_ss_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
++	uasp_ss_status_desc.bEndpointAddress =
++		uasp_fs_status_desc.bEndpointAddress;
++	uasp_ss_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
+ 
+ 	ret = usb_assign_descriptors(f, uasp_fs_function_desc,
+ 			uasp_hs_function_desc, uasp_ss_function_desc,
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index 3bf1043cd7957c..d63c2d266d0735 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -393,6 +393,11 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
+ 
+ 	count = min_t(size_t, count, reg->size - off);
+ 
++	if (off >= reg->size)
++		return -EINVAL;
++
++	count = min_t(size_t, count, reg->size - off);
++
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+@@ -477,6 +482,11 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
+ 
+ 	count = min_t(size_t, count, reg->size - off);
+ 
++	if (off >= reg->size)
++		return -EINVAL;
++
++	count = min_t(size_t, count, reg->size - off);
++
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index 390808ce935d50..b5b5ca1a44f70b 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -478,7 +478,7 @@ static int load_flat_file(struct linux_binprm *bprm,
+ 	 * 28 bits (256 MB) is way more than reasonable in this case.
+ 	 * If some top bits are set we have probable binary corruption.
+ 	*/
+-	if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) {
++	if ((text_len | data_len | bss_len | stack_len | relocs | full_data) >> 28) {
+ 		pr_err("bad header\n");
+ 		ret = -ENOEXEC;
+ 		goto err;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 4fb521d91b0612..559c177456e6a0 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -242,7 +242,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ 	if (args->drop_cache)
+ 		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
+ 
+-	if (args->start >= inode->disk_i_size && !args->replace_extent)
++	if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent)
+ 		modify_tree = 0;
+ 
+ 	update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 9d9ce308488dd3..f7e7d864f41440 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7200,8 +7200,6 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+ 			ret = -EAGAIN;
+ 			goto out;
+ 		}
+-
+-		cond_resched();
+ 	}
+ 
+ 	if (file_extent)
+@@ -10144,6 +10142,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 			ret = -EINTR;
+ 			goto out;
+ 		}
++
++		cond_resched();
+ 	}
+ 
+ 	if (bsi.block_len)
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 2104d60c216166..4ed11b089ea95a 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -1229,6 +1229,18 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
+ 	 */
+ 	if (WARN_ON_ONCE(len >= ordered->num_bytes))
+ 		return ERR_PTR(-EINVAL);
++	/*
++	 * If our ordered extent had an error there's no point in continuing.
++	 * The error may have come from a transaction abort done either by this
++	 * task or some other concurrent task, and the transaction abort path
++	 * iterates over all existing ordered extents and sets the flag
++	 * BTRFS_ORDERED_IOERR on them.
++	 */
++	if (unlikely(flags & (1U << BTRFS_ORDERED_IOERR))) {
++		const int fs_error = BTRFS_FS_ERROR(fs_info);
++
++		return fs_error ? ERR_PTR(fs_error) : ERR_PTR(-EIO);
++	}
+ 	/* We cannot split partially completed ordered extents. */
+ 	if (ordered->bytes_left) {
+ 		ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 4fcd6cd4c1c244..fa9025c05d4e29 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1916,8 +1916,11 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su
+ 	/*
+ 	 * It's squota and the subvolume still has numbers needed for future
+ 	 * accounting, in this case we can not delete it.  Just skip it.
++	 *
++	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
++	 * safe to ignore them.
+ 	 */
+-	if (ret == -EBUSY)
++	if (ret == -EBUSY || ret == -ENOENT)
+ 		ret = 0;
+ 	return ret;
+ }
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index adcbdc970f9ea4..f24a80857cd600 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -4405,8 +4405,18 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+ 		WARN_ON(!first_cow && level == 0);
+ 
+ 		node = rc->backref_cache.path[level];
+-		BUG_ON(node->bytenr != buf->start &&
+-		       node->new_bytenr != buf->start);
++
++		/*
++		 * If node->bytenr != buf->start and node->new_bytenr !=
++		 * buf->start then we've got the wrong backref node for what we
++		 * expected to see here and the cache is incorrect.
++		 */
++		if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
++			btrfs_err(fs_info,
++"bytenr %llu was found but our backref cache was expecting %llu or %llu",
++				  buf->start, node->bytenr, node->new_bytenr);
++			return -EUCLEAN;
++		}
+ 
+ 		btrfs_backref_drop_node_buffer(node);
+ 		atomic_inc(&cow->refs);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 0fc873af891f65..82dd9ee89fbc5b 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -275,8 +275,10 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
+ 	cur_trans = fs_info->running_transaction;
+ 	if (cur_trans) {
+ 		if (TRANS_ABORTED(cur_trans)) {
++			const int abort_error = cur_trans->aborted;
++
+ 			spin_unlock(&fs_info->trans_lock);
+-			return cur_trans->aborted;
++			return abort_error;
+ 		}
+ 		if (btrfs_blocked_trans_types[cur_trans->state] & type) {
+ 			spin_unlock(&fs_info->trans_lock);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index f48242262b2177..f3af6330d74a7d 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -5698,18 +5698,18 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
+ 			 *
+ 			 * All the other cases                       --> mismatch
+ 			 */
++			bool path_matched = true;
+ 			char *first = strstr(_tpath, auth->match.path);
+-			if (first != _tpath) {
+-				if (free_tpath)
+-					kfree(_tpath);
+-				return 0;
++			if (first != _tpath ||
++			    (tlen > len && _tpath[len] != '/')) {
++				path_matched = false;
+ 			}
+ 
+-			if (tlen > len && _tpath[len] != '/') {
+-				if (free_tpath)
+-					kfree(_tpath);
++			if (free_tpath)
++				kfree(_tpath);
++
++			if (!path_matched)
+ 				return 0;
+-			}
+ 		}
+ 	}
+ 
+diff --git a/fs/exec.c b/fs/exec.c
+index 9c349a74f38589..67513bd606c249 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1351,7 +1351,28 @@ int begin_new_exec(struct linux_binprm * bprm)
+ 		set_dumpable(current->mm, SUID_DUMP_USER);
+ 
+ 	perf_event_exec();
+-	__set_task_comm(me, kbasename(bprm->filename), true);
++
++	/*
++	 * If the original filename was empty, alloc_bprm() made up a path
++	 * that will probably not be useful to admins running ps or similar.
++	 * Let's fix it up to be something reasonable.
++	 */
++	if (bprm->comm_from_dentry) {
++		/*
++		 * Hold RCU lock to keep the name from being freed behind our back.
++		 * Use acquire semantics to make sure the terminating NUL from
++		 * __d_alloc() is seen.
++		 *
++		 * Note, we're deliberately sloppy here. We don't need to care about
++		 * detecting a concurrent rename and just want a terminated name.
++		 */
++		rcu_read_lock();
++		__set_task_comm(me, smp_load_acquire(&bprm->file->f_path.dentry->d_name.name),
++				true);
++		rcu_read_unlock();
++	} else {
++		__set_task_comm(me, kbasename(bprm->filename), true);
++	}
+ 
+ 	/* An exec changes our domain. We are no longer part of the thread
+ 	   group */
+@@ -1527,11 +1548,13 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl
+ 	if (fd == AT_FDCWD || filename->name[0] == '/') {
+ 		bprm->filename = filename->name;
+ 	} else {
+-		if (filename->name[0] == '\0')
++		if (filename->name[0] == '\0') {
+ 			bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
+-		else
++			bprm->comm_from_dentry = 1;
++		} else {
+ 			bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
+ 						  fd, filename->name);
++		}
+ 		if (!bprm->fdpath)
+ 			goto out_free;
+ 
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 5ea644b679add5..73da51ac5a0349 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -5020,26 +5020,29 @@ static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
+ {
+ 	struct vfsmount *mnt = s->mnt;
+ 	struct super_block *sb = mnt->mnt_sb;
++	size_t start = seq->count;
+ 	int err;
+ 
+-	if (sb->s_op->show_options) {
+-		size_t start = seq->count;
++	err = security_sb_show_options(seq, sb);
++	if (err)
++		return err;
+ 
++	if (sb->s_op->show_options) {
+ 		err = sb->s_op->show_options(seq, mnt->mnt_root);
+ 		if (err)
+ 			return err;
++	}
+ 
+-		if (unlikely(seq_has_overflowed(seq)))
+-			return -EAGAIN;
++	if (unlikely(seq_has_overflowed(seq)))
++		return -EAGAIN;
+ 
+-		if (seq->count == start)
+-			return 0;
++	if (seq->count == start)
++		return 0;
+ 
+-		/* skip leading comma */
+-		memmove(seq->buf + start, seq->buf + start + 1,
+-			seq->count - start - 1);
+-		seq->count--;
+-	}
++	/* skip leading comma */
++	memmove(seq->buf + start, seq->buf + start + 1,
++		seq->count - start - 1);
++	seq->count--;
+ 
+ 	return 0;
+ }
+@@ -5050,22 +5053,29 @@ static int statmount_string(struct kstatmount *s, u64 flag)
+ 	size_t kbufsize;
+ 	struct seq_file *seq = &s->seq;
+ 	struct statmount *sm = &s->sm;
++	u32 start, *offp;
++
++	/* Reserve an empty string at the beginning for any unset offsets */
++	if (!seq->count)
++		seq_putc(seq, 0);
++
++	start = seq->count;
+ 
+ 	switch (flag) {
+ 	case STATMOUNT_FS_TYPE:
+-		sm->fs_type = seq->count;
++		offp = &sm->fs_type;
+ 		ret = statmount_fs_type(s, seq);
+ 		break;
+ 	case STATMOUNT_MNT_ROOT:
+-		sm->mnt_root = seq->count;
++		offp = &sm->mnt_root;
+ 		ret = statmount_mnt_root(s, seq);
+ 		break;
+ 	case STATMOUNT_MNT_POINT:
+-		sm->mnt_point = seq->count;
++		offp = &sm->mnt_point;
+ 		ret = statmount_mnt_point(s, seq);
+ 		break;
+ 	case STATMOUNT_MNT_OPTS:
+-		sm->mnt_opts = seq->count;
++		offp = &sm->mnt_opts;
+ 		ret = statmount_mnt_opts(s, seq);
+ 		break;
+ 	default:
+@@ -5087,6 +5097,7 @@ static int statmount_string(struct kstatmount *s, u64 flag)
+ 
+ 	seq->buf[seq->count++] = '\0';
+ 	sm->mask |= flag;
++	*offp = start;
+ 	return 0;
+ }
+ 
+diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
+index 0eb20012792f07..d3f76101ad4b91 100644
+--- a/fs/nfs/Kconfig
++++ b/fs/nfs/Kconfig
+@@ -170,7 +170,8 @@ config ROOT_NFS
+ 
+ config NFS_FSCACHE
+ 	bool "Provide NFS client caching support"
+-	depends on NFS_FS=m && NETFS_SUPPORT || NFS_FS=y && NETFS_SUPPORT=y
++	depends on NFS_FS
++	select NETFS_SUPPORT
+ 	select FSCACHE
+ 	help
+ 	  Say Y here if you want NFS data to be cached locally on disc through
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index f78115c6c2c12a..a1cfe4cc60c4b1 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -847,6 +847,9 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 	struct nfs4_pnfs_ds *ds;
+ 	u32 ds_idx;
+ 
++	if (NFS_SERVER(pgio->pg_inode)->flags &
++			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
++		pgio->pg_maxretrans = io_maxretrans;
+ retry:
+ 	pnfs_generic_pg_check_layout(pgio, req);
+ 	/* Use full layout for now */
+@@ -860,6 +863,8 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 		if (!pgio->pg_lseg)
+ 			goto out_nolseg;
+ 	}
++	/* Reset wb_nio, since getting layout segment was successful */
++	req->wb_nio = 0;
+ 
+ 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
+ 	if (!ds) {
+@@ -876,14 +881,24 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
+ 
+ 	pgio->pg_mirror_idx = ds_idx;
+-
+-	if (NFS_SERVER(pgio->pg_inode)->flags &
+-			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+-		pgio->pg_maxretrans = io_maxretrans;
+ 	return;
+ out_nolseg:
+-	if (pgio->pg_error < 0)
+-		return;
++	if (pgio->pg_error < 0) {
++		if (pgio->pg_error != -EAGAIN)
++			return;
++		/* Retry getting layout segment if lower layer returned -EAGAIN */
++		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
++			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
++				pgio->pg_error = -ETIMEDOUT;
++			else
++				pgio->pg_error = -EIO;
++			return;
++		}
++		pgio->pg_error = 0;
++		/* Sleep for 1 second before retrying */
++		ssleep(1);
++		goto retry;
++	}
+ out_mds:
+ 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
+ 			0, NFS4_MAX_UINT64, IOMODE_READ,
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 8d25aef51ad150..2fc1919dd3c09f 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -5747,15 +5747,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 	struct nfs4_stateowner *so = resp->cstate.replay_owner;
+ 	struct svc_rqst *rqstp = resp->rqstp;
+ 	const struct nfsd4_operation *opdesc = op->opdesc;
+-	int post_err_offset;
++	unsigned int op_status_offset;
+ 	nfsd4_enc encoder;
+-	__be32 *p;
+ 
+-	p = xdr_reserve_space(xdr, 8);
+-	if (!p)
++	if (xdr_stream_encode_u32(xdr, op->opnum) != XDR_UNIT)
++		goto release;
++	op_status_offset = xdr->buf->len;
++	if (!xdr_reserve_space(xdr, XDR_UNIT))
+ 		goto release;
+-	*p++ = cpu_to_be32(op->opnum);
+-	post_err_offset = xdr->buf->len;
+ 
+ 	if (op->opnum == OP_ILLEGAL)
+ 		goto status;
+@@ -5796,20 +5795,21 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 		 * bug if we had to do this on a non-idempotent op:
+ 		 */
+ 		warn_on_nonidempotent_op(op);
+-		xdr_truncate_encode(xdr, post_err_offset);
++		xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT);
+ 	}
+ 	if (so) {
+-		int len = xdr->buf->len - post_err_offset;
++		int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
+ 
+ 		so->so_replay.rp_status = op->status;
+ 		so->so_replay.rp_buflen = len;
+-		read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
++		read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
+ 						so->so_replay.rp_buf, len);
+ 	}
+ status:
+ 	op->status = nfsd4_map_status(op->status,
+ 				      resp->cstate.minorversion);
+-	*p = op->status;
++	write_bytes_to_xdr_buf(xdr->buf, op_status_offset,
++			       &op->status, XDR_UNIT);
+ release:
+ 	if (opdesc && opdesc->op_release)
+ 		opdesc->op_release(&op->u);
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index aaca34ec678f26..fd2de4b2bef1a8 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -1219,7 +1219,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 			if (size) {
+ 				if (phys && blkphy << blkbits == phys + size) {
+ 					/* The current extent goes on */
+-					size += n << blkbits;
++					size += (u64)n << blkbits;
+ 				} else {
+ 					/* Terminate the current extent */
+ 					ret = fiemap_fill_next_extent(
+@@ -1232,14 +1232,14 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 					flags = FIEMAP_EXTENT_MERGED;
+ 					logical = blkoff << blkbits;
+ 					phys = blkphy << blkbits;
+-					size = n << blkbits;
++					size = (u64)n << blkbits;
+ 				}
+ 			} else {
+ 				/* Start a new extent */
+ 				flags = FIEMAP_EXTENT_MERGED;
+ 				logical = blkoff << blkbits;
+ 				phys = blkphy << blkbits;
+-				size = n << blkbits;
++				size = (u64)n << blkbits;
+ 			}
+ 			blkoff += n;
+ 		}
+diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
+index 213206ebdd5810..7799f4d16ce999 100644
+--- a/fs/ocfs2/dir.c
++++ b/fs/ocfs2/dir.c
+@@ -1065,26 +1065,39 @@ int ocfs2_find_entry(const char *name, int namelen,
+ {
+ 	struct buffer_head *bh;
+ 	struct ocfs2_dir_entry *res_dir = NULL;
++	int ret = 0;
+ 
+ 	if (ocfs2_dir_indexed(dir))
+ 		return ocfs2_find_entry_dx(name, namelen, dir, lookup);
+ 
++	if (unlikely(i_size_read(dir) <= 0)) {
++		ret = -EFSCORRUPTED;
++		mlog_errno(ret);
++		goto out;
++	}
+ 	/*
+ 	 * The unindexed dir code only uses part of the lookup
+ 	 * structure, so there's no reason to push it down further
+ 	 * than this.
+ 	 */
+-	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
++	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
++		if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) {
++			ret = -EFSCORRUPTED;
++			mlog_errno(ret);
++			goto out;
++		}
+ 		bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
+-	else
++	} else {
+ 		bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
++	}
+ 
+ 	if (bh == NULL)
+ 		return -ENOENT;
+ 
+ 	lookup->dl_leaf_bh = bh;
+ 	lookup->dl_entry = res_dir;
+-	return 0;
++out:
++	return ret;
+ }
+ 
+ /*
+@@ -2010,6 +2023,7 @@ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
+  *
+  * Return 0 if the name does not exist
+  * Return -EEXIST if the directory contains the name
++ * Return -EFSCORRUPTED if found corruption
+  *
+  * Callers should have i_rwsem + a cluster lock on dir
+  */
+@@ -2023,9 +2037,12 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
+ 	trace_ocfs2_check_dir_for_entry(
+ 		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
+ 
+-	if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
++	ret = ocfs2_find_entry(name, namelen, dir, &lookup);
++	if (ret == 0) {
+ 		ret = -EEXIST;
+ 		mlog_errno(ret);
++	} else if (ret == -ENOENT) {
++		ret = 0;
+ 	}
+ 
+ 	ocfs2_free_dir_lookup_result(&lookup);
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index c79b4291777f63..1e87554f6f4104 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -2340,7 +2340,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
+ 			mlog(ML_ERROR, "found superblock with incorrect block "
+ 			     "size bits: found %u, should be 9, 10, 11, or 12\n",
+ 			     blksz_bits);
+-		} else if ((1 << le32_to_cpu(blksz_bits)) != blksz) {
++		} else if ((1 << blksz_bits) != blksz) {
+ 			mlog(ML_ERROR, "found superblock with incorrect block "
+ 			     "size: found %u, should be %u\n", 1 << blksz_bits, blksz);
+ 		} else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
+diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
+index d4c5fdcfa1e464..f5cf2255dc0972 100644
+--- a/fs/ocfs2/symlink.c
++++ b/fs/ocfs2/symlink.c
+@@ -65,7 +65,7 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio)
+ 
+ 	if (status < 0) {
+ 		mlog_errno(status);
+-		return status;
++		goto out;
+ 	}
+ 
+ 	fe = (struct ocfs2_dinode *) bh->b_data;
+@@ -76,9 +76,10 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio)
+ 	memcpy(kaddr, link, len + 1);
+ 	kunmap_atomic(kaddr);
+ 	SetPageUptodate(page);
++out:
+ 	unlock_page(page);
+ 	brelse(bh);
+-	return 0;
++	return status;
+ }
+ 
+ const struct address_space_operations ocfs2_fast_symlink_aops = {
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 34a47fb0c57f25..5e4f7b411fbdb9 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -500,7 +500,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 		 * a program is not able to use ptrace(2) in that case. It is
+ 		 * safe because the task has stopped executing permanently.
+ 		 */
+-		if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
++		if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE|PF_POSTCOREDUMP))) {
+ 			if (try_get_task_stack(task)) {
+ 				eip = KSTK_EIP(task);
+ 				esp = KSTK_ESP(task);
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 9a4b3608b7d6f3..94785abc9b1b2d 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -320,7 +320,7 @@ struct smb_version_operations {
+ 	int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *);
+ 	void (*downgrade_oplock)(struct TCP_Server_Info *server,
+ 				 struct cifsInodeInfo *cinode, __u32 oplock,
+-				 unsigned int epoch, bool *purge_cache);
++				 __u16 epoch, bool *purge_cache);
+ 	/* process transaction2 response */
+ 	bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
+ 			     char *, int);
+@@ -515,12 +515,12 @@ struct smb_version_operations {
+ 	/* if we can do cache read operations */
+ 	bool (*is_read_op)(__u32);
+ 	/* set oplock level for the inode */
+-	void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
+-				 bool *);
++	void (*set_oplock_level)(struct cifsInodeInfo *cinode, __u32 oplock, __u16 epoch,
++				 bool *purge_cache);
+ 	/* create lease context buffer for CREATE request */
+ 	char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
+ 	/* parse lease context buffer and return oplock/epoch info */
+-	__u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
++	__u8 (*parse_lease_buf)(void *buf, __u16 *epoch, char *lkey);
+ 	ssize_t (*copychunk_range)(const unsigned int,
+ 			struct cifsFileInfo *src_file,
+ 			struct cifsFileInfo *target_file,
+@@ -1415,7 +1415,7 @@ struct cifs_fid {
+ 	__u8 create_guid[16];
+ 	__u32 access;
+ 	struct cifs_pending_open *pending_open;
+-	unsigned int epoch;
++	__u16 epoch;
+ #ifdef CONFIG_CIFS_DEBUG2
+ 	__u64 mid;
+ #endif /* CIFS_DEBUG2 */
+@@ -1448,7 +1448,7 @@ struct cifsFileInfo {
+ 	bool oplock_break_cancelled:1;
+ 	bool status_file_deleted:1; /* file has been deleted */
+ 	bool offload:1; /* offload final part of _put to a wq */
+-	unsigned int oplock_epoch; /* epoch from the lease break */
++	__u16 oplock_epoch; /* epoch from the lease break */
+ 	__u32 oplock_level; /* oplock/lease level from the lease break */
+ 	int count;
+ 	spinlock_t file_info_lock; /* protects four flag/count fields above */
+@@ -1545,7 +1545,7 @@ struct cifsInodeInfo {
+ 	spinlock_t	open_file_lock;	/* protects openFileList */
+ 	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
+ 	unsigned int oplock;		/* oplock/lease level we have */
+-	unsigned int epoch;		/* used to track lease state changes */
++	__u16 epoch;		/* used to track lease state changes */
+ #define CIFS_INODE_PENDING_OPLOCK_BREAK   (0) /* oplock break in progress */
+ #define CIFS_INODE_PENDING_WRITERS	  (1) /* Writes in progress */
+ #define CIFS_INODE_FLAG_UNUSED		  (2) /* Unused flag */
+diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
+index 864b194dbaa0a0..1822493dd0842e 100644
+--- a/fs/smb/client/dir.c
++++ b/fs/smb/client/dir.c
+@@ -627,7 +627,7 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
+ 		goto mknod_out;
+ 	}
+ 
+-	trace_smb3_mknod_enter(xid, tcon->ses->Suid, tcon->tid, full_path);
++	trace_smb3_mknod_enter(xid, tcon->tid, tcon->ses->Suid, full_path);
+ 
+ 	rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
+ 					       full_path, mode,
+@@ -635,9 +635,9 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
+ 
+ mknod_out:
+ 	if (rc)
+-		trace_smb3_mknod_err(xid,  tcon->ses->Suid, tcon->tid, rc);
++		trace_smb3_mknod_err(xid,  tcon->tid, tcon->ses->Suid, rc);
+ 	else
+-		trace_smb3_mknod_done(xid, tcon->ses->Suid, tcon->tid);
++		trace_smb3_mknod_done(xid, tcon->tid, tcon->ses->Suid);
+ 
+ 	free_dentry_path(page);
+ 	free_xid(xid);
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index db3695eddcf9d5..c70f4961c4eb78 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -377,7 +377,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
+ static void
+ cifs_downgrade_oplock(struct TCP_Server_Info *server,
+ 		      struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
++		      __u16 epoch, bool *purge_cache)
+ {
+ 	cifs_set_oplock_level(cinode, oplock);
+ }
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index b935c1a62d10cf..7dfd3eb3847b33 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -298,8 +298,8 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_query_info_compound_enter(xid, ses->Suid,
+-							     tcon->tid, full_path);
++			trace_smb3_query_info_compound_enter(xid, tcon->tid,
++							     ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_POSIX_QUERY_INFO:
+ 			rqst[num_rqst].rq_iov = &vars->qi_iov;
+@@ -334,18 +334,18 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_posix_query_info_compound_enter(xid, ses->Suid,
+-								   tcon->tid, full_path);
++			trace_smb3_posix_query_info_compound_enter(xid, tcon->tid,
++								   ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_DELETE:
+-			trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_delete_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_MKDIR:
+ 			/*
+ 			 * Directories are created through parameters in the
+ 			 * SMB2_open() call.
+ 			 */
+-			trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_mkdir_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_RMDIR:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -363,7 +363,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			smb2_set_next_command(tcon, &rqst[num_rqst]);
+ 			smb2_set_related(&rqst[num_rqst++]);
+-			trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_rmdir_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_SET_EOF:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -398,7 +398,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_set_eof_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_SET_INFO:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -429,8 +429,8 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_set_info_compound_enter(xid, ses->Suid,
+-							   tcon->tid, full_path);
++			trace_smb3_set_info_compound_enter(xid, tcon->tid,
++							   ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_RENAME:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -469,7 +469,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_rename_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_HARDLINK:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -496,7 +496,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			smb2_set_next_command(tcon, &rqst[num_rqst]);
+ 			smb2_set_related(&rqst[num_rqst++]);
+-			trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_hardlink_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_SET_REPARSE:
+ 			rqst[num_rqst].rq_iov = vars->io_iov;
+@@ -523,8 +523,8 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_set_reparse_compound_enter(xid, ses->Suid,
+-							      tcon->tid, full_path);
++			trace_smb3_set_reparse_compound_enter(xid, tcon->tid,
++							      ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_GET_REPARSE:
+ 			rqst[num_rqst].rq_iov = vars->io_iov;
+@@ -549,8 +549,8 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_get_reparse_compound_enter(xid, ses->Suid,
+-							      tcon->tid, full_path);
++			trace_smb3_get_reparse_compound_enter(xid, tcon->tid,
++							      ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_QUERY_WSL_EA:
+ 			rqst[num_rqst].rq_iov = &vars->ea_iov;
+@@ -663,11 +663,11 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 			}
+ 			SMB2_query_info_free(&rqst[num_rqst++]);
+ 			if (rc)
+-				trace_smb3_query_info_compound_err(xid,  ses->Suid,
+-								   tcon->tid, rc);
++				trace_smb3_query_info_compound_err(xid,  tcon->tid,
++								   ses->Suid, rc);
+ 			else
+-				trace_smb3_query_info_compound_done(xid, ses->Suid,
+-								    tcon->tid);
++				trace_smb3_query_info_compound_done(xid, tcon->tid,
++								    ses->Suid);
+ 			break;
+ 		case SMB2_OP_POSIX_QUERY_INFO:
+ 			idata = in_iov[i].iov_base;
+@@ -690,15 +690,15 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 			SMB2_query_info_free(&rqst[num_rqst++]);
+ 			if (rc)
+-				trace_smb3_posix_query_info_compound_err(xid,  ses->Suid,
+-									 tcon->tid, rc);
++				trace_smb3_posix_query_info_compound_err(xid,  tcon->tid,
++									 ses->Suid, rc);
+ 			else
+-				trace_smb3_posix_query_info_compound_done(xid, ses->Suid,
+-									  tcon->tid);
++				trace_smb3_posix_query_info_compound_done(xid, tcon->tid,
++									  ses->Suid);
+ 			break;
+ 		case SMB2_OP_DELETE:
+ 			if (rc)
+-				trace_smb3_delete_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_delete_err(xid, tcon->tid, ses->Suid, rc);
+ 			else {
+ 				/*
+ 				 * If dentry (hence, inode) is NULL, lease break is going to
+@@ -706,59 +706,59 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				 */
+ 				if (inode)
+ 					cifs_mark_open_handles_for_deleted_file(inode, full_path);
+-				trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_delete_done(xid, tcon->tid, ses->Suid);
+ 			}
+ 			break;
+ 		case SMB2_OP_MKDIR:
+ 			if (rc)
+-				trace_smb3_mkdir_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_mkdir_err(xid, tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_mkdir_done(xid, tcon->tid, ses->Suid);
+ 			break;
+ 		case SMB2_OP_HARDLINK:
+ 			if (rc)
+-				trace_smb3_hardlink_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_hardlink_err(xid,  tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_hardlink_done(xid, tcon->tid, ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_RENAME:
+ 			if (rc)
+-				trace_smb3_rename_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_rename_err(xid, tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_rename_done(xid, tcon->tid, ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_RMDIR:
+ 			if (rc)
+-				trace_smb3_rmdir_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_rmdir_err(xid, tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_rmdir_done(xid, tcon->tid, ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_SET_EOF:
+ 			if (rc)
+-				trace_smb3_set_eof_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_set_eof_err(xid, tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_set_eof_done(xid, tcon->tid, ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_SET_INFO:
+ 			if (rc)
+-				trace_smb3_set_info_compound_err(xid,  ses->Suid,
+-								 tcon->tid, rc);
++				trace_smb3_set_info_compound_err(xid,  tcon->tid,
++								 ses->Suid, rc);
+ 			else
+-				trace_smb3_set_info_compound_done(xid, ses->Suid,
+-								  tcon->tid);
++				trace_smb3_set_info_compound_done(xid, tcon->tid,
++								  ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_SET_REPARSE:
+ 			if (rc) {
+-				trace_smb3_set_reparse_compound_err(xid,  ses->Suid,
+-								    tcon->tid, rc);
++				trace_smb3_set_reparse_compound_err(xid, tcon->tid,
++								    ses->Suid, rc);
+ 			} else {
+-				trace_smb3_set_reparse_compound_done(xid, ses->Suid,
+-								     tcon->tid);
++				trace_smb3_set_reparse_compound_done(xid, tcon->tid,
++								     ses->Suid);
+ 			}
+ 			SMB2_ioctl_free(&rqst[num_rqst++]);
+ 			break;
+@@ -771,18 +771,18 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				rbuf = reparse_buf_ptr(iov);
+ 				if (IS_ERR(rbuf)) {
+ 					rc = PTR_ERR(rbuf);
+-					trace_smb3_set_reparse_compound_err(xid,  ses->Suid,
+-									    tcon->tid, rc);
++					trace_smb3_get_reparse_compound_err(xid, tcon->tid,
++									    ses->Suid, rc);
+ 				} else {
+ 					idata->reparse.tag = le32_to_cpu(rbuf->ReparseTag);
+-					trace_smb3_set_reparse_compound_done(xid, ses->Suid,
+-									     tcon->tid);
++					trace_smb3_get_reparse_compound_done(xid, tcon->tid,
++									     ses->Suid);
+ 				}
+ 				memset(iov, 0, sizeof(*iov));
+ 				resp_buftype[i + 1] = CIFS_NO_BUFFER;
+ 			} else {
+-				trace_smb3_set_reparse_compound_err(xid, ses->Suid,
+-								    tcon->tid, rc);
++				trace_smb3_get_reparse_compound_err(xid, tcon->tid,
++								    ses->Suid, rc);
+ 			}
+ 			SMB2_ioctl_free(&rqst[num_rqst++]);
+ 			break;
+@@ -799,11 +799,11 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				}
+ 			}
+ 			if (!rc) {
+-				trace_smb3_query_wsl_ea_compound_done(xid, ses->Suid,
+-								      tcon->tid);
++				trace_smb3_query_wsl_ea_compound_done(xid, tcon->tid,
++								      ses->Suid);
+ 			} else {
+-				trace_smb3_query_wsl_ea_compound_err(xid, ses->Suid,
+-								     tcon->tid, rc);
++				trace_smb3_query_wsl_ea_compound_err(xid, tcon->tid,
++								     ses->Suid, rc);
+ 			}
+ 			SMB2_query_info_free(&rqst[num_rqst++]);
+ 			break;
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 6bacf754b57efd..44952727fef9ef 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3931,22 +3931,22 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
+ static void
+ smb2_downgrade_oplock(struct TCP_Server_Info *server,
+ 		      struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
++		      __u16 epoch, bool *purge_cache)
+ {
+ 	server->ops->set_oplock_level(cinode, oplock, 0, NULL);
+ }
+ 
+ static void
+ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache);
++		       __u16 epoch, bool *purge_cache);
+ 
+ static void
+ smb3_downgrade_oplock(struct TCP_Server_Info *server,
+ 		       struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache)
++		       __u16 epoch, bool *purge_cache)
+ {
+ 	unsigned int old_state = cinode->oplock;
+-	unsigned int old_epoch = cinode->epoch;
++	__u16 old_epoch = cinode->epoch;
+ 	unsigned int new_state;
+ 
+ 	if (epoch > old_epoch) {
+@@ -3966,7 +3966,7 @@ smb3_downgrade_oplock(struct TCP_Server_Info *server,
+ 
+ static void
+ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
++		      __u16 epoch, bool *purge_cache)
+ {
+ 	oplock &= 0xFF;
+ 	cinode->lease_granted = false;
+@@ -3990,7 +3990,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ 
+ static void
+ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache)
++		       __u16 epoch, bool *purge_cache)
+ {
+ 	char message[5] = {0};
+ 	unsigned int new_oplock = 0;
+@@ -4027,7 +4027,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ 
+ static void
+ smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
++		      __u16 epoch, bool *purge_cache)
+ {
+ 	unsigned int old_oplock = cinode->oplock;
+ 
+@@ -4141,7 +4141,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock)
+ }
+ 
+ static __u8
+-smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
++smb2_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
+ {
+ 	struct create_lease *lc = (struct create_lease *)buf;
+ 
+@@ -4152,7 +4152,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
+ }
+ 
+ static __u8
+-smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
++smb3_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
+ {
+ 	struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
+ 
+@@ -5104,6 +5104,7 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ {
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	struct cifs_open_parms oparms;
++	struct cifs_open_info_data idata;
+ 	struct cifs_io_parms io_parms = {};
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ 	struct cifs_fid fid;
+@@ -5173,10 +5174,20 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ 			     CREATE_OPTION_SPECIAL, ACL_NO_MODE);
+ 	oparms.fid = &fid;
+ 
+-	rc = server->ops->open(xid, &oparms, &oplock, NULL);
++	rc = server->ops->open(xid, &oparms, &oplock, &idata);
+ 	if (rc)
+ 		goto out;
+ 
++	/*
++	 * Check if the server honored ATTR_SYSTEM flag by CREATE_OPTION_SPECIAL
++	 * option. If not then server does not support ATTR_SYSTEM and newly
++	 * created file is not SFU compatible, which means that the call failed.
++	 */
++	if (!(le32_to_cpu(idata.fi.Attributes) & ATTR_SYSTEM)) {
++		rc = -EOPNOTSUPP;
++		goto out_close;
++	}
++
+ 	if (type_len + data_len > 0) {
+ 		io_parms.pid = current->tgid;
+ 		io_parms.tcon = tcon;
+@@ -5191,8 +5202,18 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ 					     iov, ARRAY_SIZE(iov)-1);
+ 	}
+ 
++out_close:
+ 	server->ops->close(xid, tcon, &fid);
+ 
++	/*
++	 * If CREATE was successful but either setting ATTR_SYSTEM failed or
++	 * writing type/data information failed then remove the intermediate
++	 * object created by CREATE. Otherwise intermediate empty object stay
++	 * on the server.
++	 */
++	if (rc)
++		server->ops->unlink(xid, tcon, full_path, cifs_sb, NULL);
++
+ out:
+ 	kfree(symname_utf16);
+ 	return rc;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 4750505465ae63..2e3f78fe9210ff 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2335,7 +2335,7 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
+ 
+ int smb2_parse_contexts(struct TCP_Server_Info *server,
+ 			struct kvec *rsp_iov,
+-			unsigned int *epoch,
++			__u16 *epoch,
+ 			char *lease_key, __u8 *oplock,
+ 			struct smb2_file_all_info *buf,
+ 			struct create_posix_rsp *posix)
+diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
+index 09349fa8da039a..51d890f74e36f3 100644
+--- a/fs/smb/client/smb2proto.h
++++ b/fs/smb/client/smb2proto.h
+@@ -282,7 +282,7 @@ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
+ 					enum securityEnum);
+ int smb2_parse_contexts(struct TCP_Server_Info *server,
+ 			struct kvec *rsp_iov,
+-			unsigned int *epoch,
++			__u16 *epoch,
+ 			char *lease_key, __u8 *oplock,
+ 			struct smb2_file_all_info *buf,
+ 			struct create_posix_rsp *posix);
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 6de351cc2b60e0..69bac122adbe06 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -626,6 +626,9 @@ ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len)
+ 	struct ksmbd_spnego_authen_request *req;
+ 	struct ksmbd_spnego_authen_response *resp;
+ 
++	if (blob_len > KSMBD_IPC_MAX_PAYLOAD)
++		return NULL;
++
+ 	msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) +
+ 			blob_len + 1);
+ 	if (!msg)
+@@ -805,6 +808,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle
+ 	struct ksmbd_rpc_command *req;
+ 	struct ksmbd_rpc_command *resp;
+ 
++	if (payload_sz > KSMBD_IPC_MAX_PAYLOAD)
++		return NULL;
++
+ 	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+ 	if (!msg)
+ 		return NULL;
+@@ -853,6 +859,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle
+ 	struct ksmbd_rpc_command *req;
+ 	struct ksmbd_rpc_command *resp;
+ 
++	if (payload_sz > KSMBD_IPC_MAX_PAYLOAD)
++		return NULL;
++
+ 	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+ 	if (!msg)
+ 		return NULL;
+diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
+index 5180cbf5a90b4b..0185c92df8c2ea 100644
+--- a/fs/xfs/xfs_buf_item_recover.c
++++ b/fs/xfs/xfs_buf_item_recover.c
+@@ -1036,11 +1036,20 @@ xlog_recover_buf_commit_pass2(
+ 		error = xlog_recover_do_primary_sb_buffer(mp, item, bp, buf_f,
+ 				current_lsn);
+ 		if (error)
+-			goto out_release;
++			goto out_writebuf;
+ 	} else {
+ 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
+ 	}
+ 
++	/*
++	 * Buffer held by buf log item during 'normal' buffer recovery must
++	 * be committed through buffer I/O submission path to ensure proper
++	 * release. When error occurs during sb buffer recovery, log shutdown
++	 * will be done before submitting buffer list so that buffers can be
++	 * released correctly through ioend failure path.
++	 */
++out_writebuf:
++
+ 	/*
+ 	 * Perform delayed write on the buffer.  Asynchronous writes will be
+ 	 * slower when taking into account all the buffers to be flushed.
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index c1b211c260a9d5..0d73b59f1c9e57 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -68,6 +68,31 @@ xfs_dquot_mark_sick(
+ 	}
+ }
+ 
++/*
++ * Detach the dquot buffer if it's still attached, because we can get called
++ * through dqpurge after a log shutdown.  Caller must hold the dqflock or have
++ * otherwise isolated the dquot.
++ */
++void
++xfs_dquot_detach_buf(
++	struct xfs_dquot	*dqp)
++{
++	struct xfs_dq_logitem	*qlip = &dqp->q_logitem;
++	struct xfs_buf		*bp = NULL;
++
++	spin_lock(&qlip->qli_lock);
++	if (qlip->qli_item.li_buf) {
++		bp = qlip->qli_item.li_buf;
++		qlip->qli_item.li_buf = NULL;
++	}
++	spin_unlock(&qlip->qli_lock);
++	if (bp) {
++		xfs_buf_lock(bp);
++		list_del_init(&qlip->qli_item.li_bio_list);
++		xfs_buf_relse(bp);
++	}
++}
++
+ /*
+  * This is called to free all the memory associated with a dquot
+  */
+@@ -76,6 +101,7 @@ xfs_qm_dqdestroy(
+ 	struct xfs_dquot	*dqp)
+ {
+ 	ASSERT(list_empty(&dqp->q_lru));
++	ASSERT(dqp->q_logitem.qli_item.li_buf == NULL);
+ 
+ 	kvfree(dqp->q_logitem.qli_item.li_lv_shadow);
+ 	mutex_destroy(&dqp->q_qlock);
+@@ -1136,9 +1162,11 @@ static void
+ xfs_qm_dqflush_done(
+ 	struct xfs_log_item	*lip)
+ {
+-	struct xfs_dq_logitem	*qip = (struct xfs_dq_logitem *)lip;
+-	struct xfs_dquot	*dqp = qip->qli_dquot;
++	struct xfs_dq_logitem	*qlip =
++			container_of(lip, struct xfs_dq_logitem, qli_item);
++	struct xfs_dquot	*dqp = qlip->qli_dquot;
+ 	struct xfs_ail		*ailp = lip->li_ailp;
++	struct xfs_buf		*bp = NULL;
+ 	xfs_lsn_t		tail_lsn;
+ 
+ 	/*
+@@ -1150,12 +1178,12 @@ xfs_qm_dqflush_done(
+ 	 * holding the lock before removing the dquot from the AIL.
+ 	 */
+ 	if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
+-	    ((lip->li_lsn == qip->qli_flush_lsn) ||
++	    ((lip->li_lsn == qlip->qli_flush_lsn) ||
+ 	     test_bit(XFS_LI_FAILED, &lip->li_flags))) {
+ 
+ 		spin_lock(&ailp->ail_lock);
+ 		xfs_clear_li_failed(lip);
+-		if (lip->li_lsn == qip->qli_flush_lsn) {
++		if (lip->li_lsn == qlip->qli_flush_lsn) {
+ 			/* xfs_ail_update_finish() drops the AIL lock */
+ 			tail_lsn = xfs_ail_delete_one(ailp, lip);
+ 			xfs_ail_update_finish(ailp, tail_lsn);
+@@ -1168,6 +1196,19 @@ xfs_qm_dqflush_done(
+ 	 * Release the dq's flush lock since we're done with it.
+ 	 */
+ 	xfs_dqfunlock(dqp);
++
++	/*
++	 * If this dquot hasn't been dirtied since initiating the last dqflush,
++	 * release the buffer reference.
++	 */
++	spin_lock(&qlip->qli_lock);
++	if (!qlip->qli_dirty) {
++		bp = lip->li_buf;
++		lip->li_buf = NULL;
++	}
++	spin_unlock(&qlip->qli_lock);
++	if (bp)
++		xfs_buf_rele(bp);
+ }
+ 
+ void
+@@ -1190,7 +1231,7 @@ xfs_buf_dquot_io_fail(
+ 
+ 	spin_lock(&bp->b_mount->m_ail->ail_lock);
+ 	list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
+-		xfs_set_li_failed(lip, bp);
++		set_bit(XFS_LI_FAILED, &lip->li_flags);
+ 	spin_unlock(&bp->b_mount->m_ail->ail_lock);
+ }
+ 
+@@ -1232,6 +1273,115 @@ xfs_qm_dqflush_check(
+ 	return NULL;
+ }
+ 
++/*
++ * Get the buffer containing the on-disk dquot.
++ *
++ * Requires dquot flush lock, will clear the dirty flag, delete the quota log
++ * item from the AIL, and shut down the system if something goes wrong.
++ */
++static int
++xfs_dquot_read_buf(
++	struct xfs_trans	*tp,
++	struct xfs_dquot	*dqp,
++	struct xfs_buf		**bpp)
++{
++	struct xfs_mount	*mp = dqp->q_mount;
++	struct xfs_buf		*bp = NULL;
++	int			error;
++
++	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
++				   mp->m_quotainfo->qi_dqchunklen, 0,
++				   &bp, &xfs_dquot_buf_ops);
++	if (xfs_metadata_is_sick(error))
++		xfs_dquot_mark_sick(dqp);
++	if (error)
++		goto out_abort;
++
++	*bpp = bp;
++	return 0;
++
++out_abort:
++	dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
++	xfs_trans_ail_delete(&dqp->q_logitem.qli_item, 0);
++	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
++	return error;
++}
++
++/*
++ * Attach a dquot buffer to this dquot to avoid allocating a buffer during a
++ * dqflush, since dqflush can be called from reclaim context.  Caller must hold
++ * the dqlock.
++ */
++int
++xfs_dquot_attach_buf(
++	struct xfs_trans	*tp,
++	struct xfs_dquot	*dqp)
++{
++	struct xfs_dq_logitem	*qlip = &dqp->q_logitem;
++	struct xfs_log_item	*lip = &qlip->qli_item;
++	int			error;
++
++	spin_lock(&qlip->qli_lock);
++	if (!lip->li_buf) {
++		struct xfs_buf	*bp = NULL;
++
++		spin_unlock(&qlip->qli_lock);
++		error = xfs_dquot_read_buf(tp, dqp, &bp);
++		if (error)
++			return error;
++
++		/*
++		 * Hold the dquot buffer so that we retain our ref to it after
++		 * detaching it from the transaction, then give that ref to the
++		 * dquot log item so that the AIL does not have to read the
++		 * dquot buffer to push this item.
++		 */
++		xfs_buf_hold(bp);
++		xfs_trans_brelse(tp, bp);
++
++		spin_lock(&qlip->qli_lock);
++		lip->li_buf = bp;
++	}
++	qlip->qli_dirty = true;
++	spin_unlock(&qlip->qli_lock);
++
++	return 0;
++}
++
++/*
++ * Get a new reference the dquot buffer attached to this dquot for a dqflush
++ * operation.
++ *
++ * Returns 0 and a NULL bp if none was attached to the dquot; 0 and a locked
++ * bp; or -EAGAIN if the buffer could not be locked.
++ */
++int
++xfs_dquot_use_attached_buf(
++	struct xfs_dquot	*dqp,
++	struct xfs_buf		**bpp)
++{
++	struct xfs_buf		*bp = dqp->q_logitem.qli_item.li_buf;
++
++	/*
++	 * A NULL buffer can happen if the dquot dirty flag was set but the
++	 * filesystem shut down before transaction commit happened.  In that
++	 * case we're not going to flush anyway.
++	 */
++	if (!bp) {
++		ASSERT(xfs_is_shutdown(dqp->q_mount));
++
++		*bpp = NULL;
++		return 0;
++	}
++
++	if (!xfs_buf_trylock(bp))
++		return -EAGAIN;
++
++	xfs_buf_hold(bp);
++	*bpp = bp;
++	return 0;
++}
++
+ /*
+  * Write a modified dquot to disk.
+  * The dquot must be locked and the flush lock too taken by caller.
+@@ -1243,11 +1393,11 @@ xfs_qm_dqflush_check(
+ int
+ xfs_qm_dqflush(
+ 	struct xfs_dquot	*dqp,
+-	struct xfs_buf		**bpp)
++	struct xfs_buf		*bp)
+ {
+ 	struct xfs_mount	*mp = dqp->q_mount;
+-	struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
+-	struct xfs_buf		*bp;
++	struct xfs_dq_logitem	*qlip = &dqp->q_logitem;
++	struct xfs_log_item	*lip = &qlip->qli_item;
+ 	struct xfs_dqblk	*dqblk;
+ 	xfs_failaddr_t		fa;
+ 	int			error;
+@@ -1257,28 +1407,12 @@ xfs_qm_dqflush(
+ 
+ 	trace_xfs_dqflush(dqp);
+ 
+-	*bpp = NULL;
+-
+ 	xfs_qm_dqunpin_wait(dqp);
+ 
+-	/*
+-	 * Get the buffer containing the on-disk dquot
+-	 */
+-	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
+-				   mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
+-				   &bp, &xfs_dquot_buf_ops);
+-	if (error == -EAGAIN)
+-		goto out_unlock;
+-	if (xfs_metadata_is_sick(error))
+-		xfs_dquot_mark_sick(dqp);
+-	if (error)
+-		goto out_abort;
+-
+ 	fa = xfs_qm_dqflush_check(dqp);
+ 	if (fa) {
+ 		xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
+ 				dqp->q_id, fa);
+-		xfs_buf_relse(bp);
+ 		xfs_dquot_mark_sick(dqp);
+ 		error = -EFSCORRUPTED;
+ 		goto out_abort;
+@@ -1293,8 +1427,15 @@ xfs_qm_dqflush(
+ 	 */
+ 	dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
+ 
+-	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
+-					&dqp->q_logitem.qli_item.li_lsn);
++	/*
++	 * We hold the dquot lock, so nobody can dirty it while we're
++	 * scheduling the write out.  Clear the dirty-since-flush flag.
++	 */
++	spin_lock(&qlip->qli_lock);
++	qlip->qli_dirty = false;
++	spin_unlock(&qlip->qli_lock);
++
++	xfs_trans_ail_copy_lsn(mp->m_ail, &qlip->qli_flush_lsn, &lip->li_lsn);
+ 
+ 	/*
+ 	 * copy the lsn into the on-disk dquot now while we have the in memory
+@@ -1306,7 +1447,7 @@ xfs_qm_dqflush(
+ 	 * of a dquot without an up-to-date CRC getting to disk.
+ 	 */
+ 	if (xfs_has_crc(mp)) {
+-		dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
++		dqblk->dd_lsn = cpu_to_be64(lip->li_lsn);
+ 		xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
+ 				 XFS_DQUOT_CRC_OFF);
+ 	}
+@@ -1316,7 +1457,7 @@ xfs_qm_dqflush(
+ 	 * the AIL and release the flush lock once the dquot is synced to disk.
+ 	 */
+ 	bp->b_flags |= _XBF_DQUOTS;
+-	list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
++	list_add_tail(&lip->li_bio_list, &bp->b_li_list);
+ 
+ 	/*
+ 	 * If the buffer is pinned then push on the log so we won't
+@@ -1328,14 +1469,12 @@ xfs_qm_dqflush(
+ 	}
+ 
+ 	trace_xfs_dqflush_done(dqp);
+-	*bpp = bp;
+ 	return 0;
+ 
+ out_abort:
+ 	dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
+ 	xfs_trans_ail_delete(lip, 0);
+ 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+-out_unlock:
+ 	xfs_dqfunlock(dqp);
+ 	return error;
+ }
+diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
+index 677bb2dc9ac913..bd7bfd9e402e5b 100644
+--- a/fs/xfs/xfs_dquot.h
++++ b/fs/xfs/xfs_dquot.h
+@@ -204,7 +204,7 @@ void xfs_dquot_to_disk(struct xfs_disk_dquot *ddqp, struct xfs_dquot *dqp);
+ #define XFS_DQ_IS_DIRTY(dqp)	((dqp)->q_flags & XFS_DQFLAG_DIRTY)
+ 
+ void		xfs_qm_dqdestroy(struct xfs_dquot *dqp);
+-int		xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp);
++int		xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf *bp);
+ void		xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
+ void		xfs_qm_adjust_dqtimers(struct xfs_dquot *d);
+ void		xfs_qm_adjust_dqlimits(struct xfs_dquot *d);
+@@ -227,6 +227,10 @@ void		xfs_dqlockn(struct xfs_dqtrx *q);
+ 
+ void		xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
+ 
++int xfs_dquot_attach_buf(struct xfs_trans *tp, struct xfs_dquot *dqp);
++int xfs_dquot_use_attached_buf(struct xfs_dquot *dqp, struct xfs_buf **bpp);
++void xfs_dquot_detach_buf(struct xfs_dquot *dqp);
++
+ static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
+ {
+ 	xfs_dqlock(dqp);
+diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
+index 7d19091215b080..271b195ebb9326 100644
+--- a/fs/xfs/xfs_dquot_item.c
++++ b/fs/xfs/xfs_dquot_item.c
+@@ -123,8 +123,9 @@ xfs_qm_dquot_logitem_push(
+ 		__releases(&lip->li_ailp->ail_lock)
+ 		__acquires(&lip->li_ailp->ail_lock)
+ {
+-	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;
+-	struct xfs_buf		*bp = lip->li_buf;
++	struct xfs_dq_logitem	*qlip = DQUOT_ITEM(lip);
++	struct xfs_dquot	*dqp = qlip->qli_dquot;
++	struct xfs_buf		*bp;
+ 	uint			rval = XFS_ITEM_SUCCESS;
+ 	int			error;
+ 
+@@ -155,14 +156,25 @@ xfs_qm_dquot_logitem_push(
+ 
+ 	spin_unlock(&lip->li_ailp->ail_lock);
+ 
+-	error = xfs_qm_dqflush(dqp, &bp);
++	error = xfs_dquot_use_attached_buf(dqp, &bp);
++	if (error == -EAGAIN) {
++		xfs_dqfunlock(dqp);
++		rval = XFS_ITEM_LOCKED;
++		goto out_relock_ail;
++	}
++
++	/*
++	 * dqflush completes dqflock on error, and the delwri ioend does it on
++	 * success.
++	 */
++	error = xfs_qm_dqflush(dqp, bp);
+ 	if (!error) {
+ 		if (!xfs_buf_delwri_queue(bp, buffer_list))
+ 			rval = XFS_ITEM_FLUSHING;
+-		xfs_buf_relse(bp);
+-	} else if (error == -EAGAIN)
+-		rval = XFS_ITEM_LOCKED;
++	}
++	xfs_buf_relse(bp);
+ 
++out_relock_ail:
+ 	spin_lock(&lip->li_ailp->ail_lock);
+ out_unlock:
+ 	xfs_dqunlock(dqp);
+@@ -195,12 +207,10 @@ xfs_qm_dquot_logitem_committing(
+ }
+ 
+ #ifdef DEBUG_EXPENSIVE
+-static int
+-xfs_qm_dquot_logitem_precommit(
+-	struct xfs_trans	*tp,
+-	struct xfs_log_item	*lip)
++static void
++xfs_qm_dquot_logitem_precommit_check(
++	struct xfs_dquot	*dqp)
+ {
+-	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;
+ 	struct xfs_mount	*mp = dqp->q_mount;
+ 	struct xfs_disk_dquot	ddq = { };
+ 	xfs_failaddr_t		fa;
+@@ -216,13 +226,24 @@ xfs_qm_dquot_logitem_precommit(
+ 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ 		ASSERT(fa == NULL);
+ 	}
+-
+-	return 0;
+ }
+ #else
+-# define xfs_qm_dquot_logitem_precommit	NULL
++# define xfs_qm_dquot_logitem_precommit_check(...)	((void)0)
+ #endif
+ 
++static int
++xfs_qm_dquot_logitem_precommit(
++	struct xfs_trans	*tp,
++	struct xfs_log_item	*lip)
++{
++	struct xfs_dq_logitem	*qlip = DQUOT_ITEM(lip);
++	struct xfs_dquot	*dqp = qlip->qli_dquot;
++
++	xfs_qm_dquot_logitem_precommit_check(dqp);
++
++	return xfs_dquot_attach_buf(tp, dqp);
++}
++
+ static const struct xfs_item_ops xfs_dquot_item_ops = {
+ 	.iop_size	= xfs_qm_dquot_logitem_size,
+ 	.iop_precommit	= xfs_qm_dquot_logitem_precommit,
+@@ -247,5 +268,7 @@ xfs_qm_dquot_logitem_init(
+ 
+ 	xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
+ 					&xfs_dquot_item_ops);
++	spin_lock_init(&lp->qli_lock);
+ 	lp->qli_dquot = dqp;
++	lp->qli_dirty = false;
+ }
+diff --git a/fs/xfs/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h
+index 794710c2447493..d66e52807d76d5 100644
+--- a/fs/xfs/xfs_dquot_item.h
++++ b/fs/xfs/xfs_dquot_item.h
+@@ -14,6 +14,13 @@ struct xfs_dq_logitem {
+ 	struct xfs_log_item	qli_item;	/* common portion */
+ 	struct xfs_dquot	*qli_dquot;	/* dquot ptr */
+ 	xfs_lsn_t		qli_flush_lsn;	/* lsn at last flush */
++
++	/*
++	 * We use this spinlock to coordinate access to the li_buf pointer in
++	 * the log item and the qli_dirty flag.
++	 */
++	spinlock_t		qli_lock;
++	bool			qli_dirty;	/* dirtied since last flush? */
+ };
+ 
+ void xfs_qm_dquot_logitem_init(struct xfs_dquot *dqp);
+diff --git a/fs/xfs/xfs_exchrange.c b/fs/xfs/xfs_exchrange.c
+index 75cb53f090d1f7..7c8195895a734e 100644
+--- a/fs/xfs/xfs_exchrange.c
++++ b/fs/xfs/xfs_exchrange.c
+@@ -326,22 +326,6 @@ xfs_exchrange_mappings(
+  * successfully but before locks are dropped.
+  */
+ 
+-/* Verify that we have security clearance to perform this operation. */
+-static int
+-xfs_exchange_range_verify_area(
+-	struct xfs_exchrange	*fxr)
+-{
+-	int			ret;
+-
+-	ret = remap_verify_area(fxr->file1, fxr->file1_offset, fxr->length,
+-			true);
+-	if (ret)
+-		return ret;
+-
+-	return remap_verify_area(fxr->file2, fxr->file2_offset, fxr->length,
+-			true);
+-}
+-
+ /*
+  * Performs necessary checks before doing a range exchange, having stabilized
+  * mutable inode attributes via i_rwsem.
+@@ -352,11 +336,13 @@ xfs_exchange_range_checks(
+ 	unsigned int		alloc_unit)
+ {
+ 	struct inode		*inode1 = file_inode(fxr->file1);
++	loff_t			size1 = i_size_read(inode1);
+ 	struct inode		*inode2 = file_inode(fxr->file2);
++	loff_t			size2 = i_size_read(inode2);
+ 	uint64_t		allocmask = alloc_unit - 1;
+ 	int64_t			test_len;
+ 	uint64_t		blen;
+-	loff_t			size1, size2, tmp;
++	loff_t			tmp;
+ 	int			error;
+ 
+ 	/* Don't touch certain kinds of inodes */
+@@ -365,24 +351,25 @@ xfs_exchange_range_checks(
+ 	if (IS_SWAPFILE(inode1) || IS_SWAPFILE(inode2))
+ 		return -ETXTBSY;
+ 
+-	size1 = i_size_read(inode1);
+-	size2 = i_size_read(inode2);
+-
+ 	/* Ranges cannot start after EOF. */
+ 	if (fxr->file1_offset > size1 || fxr->file2_offset > size2)
+ 		return -EINVAL;
+ 
+-	/*
+-	 * If the caller said to exchange to EOF, we set the length of the
+-	 * request large enough to cover everything to the end of both files.
+-	 */
+ 	if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) {
++		/*
++		 * If the caller said to exchange to EOF, we set the length of
++		 * the request large enough to cover everything to the end of
++		 * both files.
++		 */
+ 		fxr->length = max_t(int64_t, size1 - fxr->file1_offset,
+ 					     size2 - fxr->file2_offset);
+-
+-		error = xfs_exchange_range_verify_area(fxr);
+-		if (error)
+-			return error;
++	} else {
++		/*
++		 * Otherwise we require both ranges to end within EOF.
++		 */
++		if (fxr->file1_offset + fxr->length > size1 ||
++		    fxr->file2_offset + fxr->length > size2)
++			return -EINVAL;
+ 	}
+ 
+ 	/*
+@@ -398,15 +385,6 @@ xfs_exchange_range_checks(
+ 	    check_add_overflow(fxr->file2_offset, fxr->length, &tmp))
+ 		return -EINVAL;
+ 
+-	/*
+-	 * We require both ranges to end within EOF, unless we're exchanging
+-	 * to EOF.
+-	 */
+-	if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) &&
+-	    (fxr->file1_offset + fxr->length > size1 ||
+-	     fxr->file2_offset + fxr->length > size2))
+-		return -EINVAL;
+-
+ 	/*
+ 	 * Make sure we don't hit any file size limits.  If we hit any size
+ 	 * limits such that test_length was adjusted, we abort the whole
+@@ -744,6 +722,7 @@ xfs_exchange_range(
+ {
+ 	struct inode		*inode1 = file_inode(fxr->file1);
+ 	struct inode		*inode2 = file_inode(fxr->file2);
++	loff_t			check_len = fxr->length;
+ 	int			ret;
+ 
+ 	BUILD_BUG_ON(XFS_EXCHANGE_RANGE_ALL_FLAGS &
+@@ -776,14 +755,18 @@ xfs_exchange_range(
+ 		return -EBADF;
+ 
+ 	/*
+-	 * If we're not exchanging to EOF, we can check the areas before
+-	 * stabilizing both files' i_size.
++	 * If we're exchanging to EOF we can't calculate the length until taking
++	 * the iolock.  Pass a 0 length to remap_verify_area similar to the
++	 * FICLONE and FICLONERANGE ioctls that support cloning to EOF as well.
+ 	 */
+-	if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)) {
+-		ret = xfs_exchange_range_verify_area(fxr);
+-		if (ret)
+-			return ret;
+-	}
++	if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)
++		check_len = 0;
++	ret = remap_verify_area(fxr->file1, fxr->file1_offset, check_len, true);
++	if (ret)
++		return ret;
++	ret = remap_verify_area(fxr->file2, fxr->file2_offset, check_len, true);
++	if (ret)
++		return ret;
+ 
+ 	/* Update cmtime if the fd/inode don't forbid it. */
+ 	if (!(fxr->file1->f_mode & FMODE_NOCMTIME) && !IS_NOCMTIME(inode1))
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 19dcb569a3e7f8..ed09b4a3084e1c 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1392,8 +1392,11 @@ xfs_inactive(
+ 		goto out;
+ 
+ 	/* Try to clean out the cow blocks if there are any. */
+-	if (xfs_inode_has_cow_data(ip))
+-		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
++	if (xfs_inode_has_cow_data(ip)) {
++		error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
++		if (error)
++			goto out;
++	}
+ 
+ 	if (VFS_I(ip)->i_nlink != 0) {
+ 		/*
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index 86da16f54be9d7..6335b122486fee 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -942,10 +942,8 @@ xfs_dax_write_iomap_end(
+ 	if (!xfs_is_cow_inode(ip))
+ 		return 0;
+ 
+-	if (!written) {
+-		xfs_reflink_cancel_cow_range(ip, pos, length, true);
+-		return 0;
+-	}
++	if (!written)
++		return xfs_reflink_cancel_cow_range(ip, pos, length, true);
+ 
+ 	return xfs_reflink_end_cow(ip, pos, written);
+ }
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 7e2307921deb2f..3212b5bf3fb3c6 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -146,17 +146,29 @@ xfs_qm_dqpurge(
+ 		 * We don't care about getting disk errors here. We need
+ 		 * to purge this dquot anyway, so we go ahead regardless.
+ 		 */
+-		error = xfs_qm_dqflush(dqp, &bp);
++		error = xfs_dquot_use_attached_buf(dqp, &bp);
++		if (error == -EAGAIN) {
++			xfs_dqfunlock(dqp);
++			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
++			goto out_unlock;
++		}
++		if (!bp)
++			goto out_funlock;
++
++		/*
++		 * dqflush completes dqflock on error, and the bwrite ioend
++		 * does it on success.
++		 */
++		error = xfs_qm_dqflush(dqp, bp);
+ 		if (!error) {
+ 			error = xfs_bwrite(bp);
+ 			xfs_buf_relse(bp);
+-		} else if (error == -EAGAIN) {
+-			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
+-			goto out_unlock;
+ 		}
+ 		xfs_dqflock(dqp);
+ 	}
++	xfs_dquot_detach_buf(dqp);
+ 
++out_funlock:
+ 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
+ 	ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
+ 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
+@@ -462,7 +474,17 @@ xfs_qm_dquot_isolate(
+ 		/* we have to drop the LRU lock to flush the dquot */
+ 		spin_unlock(lru_lock);
+ 
+-		error = xfs_qm_dqflush(dqp, &bp);
++		error = xfs_dquot_use_attached_buf(dqp, &bp);
++		if (!bp || error == -EAGAIN) {
++			xfs_dqfunlock(dqp);
++			goto out_unlock_dirty;
++		}
++
++		/*
++		 * dqflush completes dqflock on error, and the delwri ioend
++		 * does it on success.
++		 */
++		error = xfs_qm_dqflush(dqp, bp);
+ 		if (error)
+ 			goto out_unlock_dirty;
+ 
+@@ -470,6 +492,8 @@ xfs_qm_dquot_isolate(
+ 		xfs_buf_relse(bp);
+ 		goto out_unlock_dirty;
+ 	}
++
++	xfs_dquot_detach_buf(dqp);
+ 	xfs_dqfunlock(dqp);
+ 
+ 	/*
+@@ -1108,6 +1132,10 @@ xfs_qm_quotacheck_dqadjust(
+ 		return error;
+ 	}
+ 
++	error = xfs_dquot_attach_buf(NULL, dqp);
++	if (error)
++		return error;
++
+ 	trace_xfs_dqadjust(dqp);
+ 
+ 	/*
+@@ -1287,11 +1315,17 @@ xfs_qm_flush_one(
+ 		goto out_unlock;
+ 	}
+ 
+-	error = xfs_qm_dqflush(dqp, &bp);
++	error = xfs_dquot_use_attached_buf(dqp, &bp);
+ 	if (error)
+ 		goto out_unlock;
++	if (!bp) {
++		error = -EFSCORRUPTED;
++		goto out_unlock;
++	}
+ 
+-	xfs_buf_delwri_queue(bp, buffer_list);
++	error = xfs_qm_dqflush(dqp, bp);
++	if (!error)
++		xfs_buf_delwri_queue(bp, buffer_list);
+ 	xfs_buf_relse(bp);
+ out_unlock:
+ 	xfs_dqunlock(dqp);
+diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
+index a11436579877d5..ed1d597c30ca25 100644
+--- a/fs/xfs/xfs_qm_bhv.c
++++ b/fs/xfs/xfs_qm_bhv.c
+@@ -19,28 +19,41 @@
+ STATIC void
+ xfs_fill_statvfs_from_dquot(
+ 	struct kstatfs		*statp,
++	struct xfs_inode	*ip,
+ 	struct xfs_dquot	*dqp)
+ {
++	struct xfs_dquot_res	*blkres = &dqp->q_blk;
+ 	uint64_t		limit;
+ 
+-	limit = dqp->q_blk.softlimit ?
+-		dqp->q_blk.softlimit :
+-		dqp->q_blk.hardlimit;
+-	if (limit && statp->f_blocks > limit) {
+-		statp->f_blocks = limit;
+-		statp->f_bfree = statp->f_bavail =
+-			(statp->f_blocks > dqp->q_blk.reserved) ?
+-			 (statp->f_blocks - dqp->q_blk.reserved) : 0;
++	if (XFS_IS_REALTIME_MOUNT(ip->i_mount) &&
++	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME)))
++		blkres = &dqp->q_rtb;
++
++	limit = blkres->softlimit ?
++		blkres->softlimit :
++		blkres->hardlimit;
++	if (limit) {
++		uint64_t	remaining = 0;
++
++		if (limit > blkres->reserved)
++			remaining = limit - blkres->reserved;
++
++		statp->f_blocks = min(statp->f_blocks, limit);
++		statp->f_bfree = min(statp->f_bfree, remaining);
++		statp->f_bavail = min(statp->f_bavail, remaining);
+ 	}
+ 
+ 	limit = dqp->q_ino.softlimit ?
+ 		dqp->q_ino.softlimit :
+ 		dqp->q_ino.hardlimit;
+-	if (limit && statp->f_files > limit) {
+-		statp->f_files = limit;
+-		statp->f_ffree =
+-			(statp->f_files > dqp->q_ino.reserved) ?
+-			 (statp->f_files - dqp->q_ino.reserved) : 0;
++	if (limit) {
++		uint64_t	remaining = 0;
++
++		if (limit > dqp->q_ino.reserved)
++			remaining = limit - dqp->q_ino.reserved;
++
++		statp->f_files = min(statp->f_files, limit);
++		statp->f_ffree = min(statp->f_ffree, remaining);
+ 	}
+ }
+ 
+@@ -61,7 +74,7 @@ xfs_qm_statvfs(
+ 	struct xfs_dquot	*dqp;
+ 
+ 	if (!xfs_qm_dqget(mp, ip->i_projid, XFS_DQTYPE_PROJ, false, &dqp)) {
+-		xfs_fill_statvfs_from_dquot(statp, dqp);
++		xfs_fill_statvfs_from_dquot(statp, ip, dqp);
+ 		xfs_qm_dqput(dqp);
+ 	}
+ }
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index fbb3a1594c0dcc..8f7c9eaeb36090 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -873,12 +873,6 @@ xfs_fs_statfs(
+ 	ffree = statp->f_files - (icount - ifree);
+ 	statp->f_ffree = max_t(int64_t, ffree, 0);
+ 
+-
+-	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
+-	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
+-			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
+-		xfs_qm_statvfs(ip, statp);
+-
+ 	if (XFS_IS_REALTIME_MOUNT(mp) &&
+ 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
+ 		s64	freertx;
+@@ -888,6 +882,11 @@ xfs_fs_statfs(
+ 		statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
+ 	}
+ 
++	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
++	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
++			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
++		xfs_qm_statvfs(ip, statp);
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index 30e03342287a94..ee46051db12dde 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -835,16 +835,11 @@ __xfs_trans_commit(
+ 	trace_xfs_trans_commit(tp, _RET_IP_);
+ 
+ 	/*
+-	 * Finish deferred items on final commit. Only permanent transactions
+-	 * should ever have deferred ops.
++	 * Commit per-transaction changes that are not already tracked through
++	 * log items.  This can add dirty log items to the transaction.
+ 	 */
+-	WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
+-		     !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
+-	if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
+-		error = xfs_defer_finish_noroll(&tp);
+-		if (error)
+-			goto out_unreserve;
+-	}
++	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
++		xfs_trans_apply_sb_deltas(tp);
+ 
+ 	error = xfs_trans_run_precommits(tp);
+ 	if (error)
+@@ -876,8 +871,6 @@ __xfs_trans_commit(
+ 	/*
+ 	 * If we need to update the superblock, then do it now.
+ 	 */
+-	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
+-		xfs_trans_apply_sb_deltas(tp);
+ 	xfs_trans_apply_dquot_deltas(tp);
+ 
+ 	xlog_cil_commit(log, tp, &commit_seq, regrant);
+@@ -924,6 +917,20 @@ int
+ xfs_trans_commit(
+ 	struct xfs_trans	*tp)
+ {
++	/*
++	 * Finish deferred items on final commit. Only permanent transactions
++	 * should ever have deferred ops.
++	 */
++	WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
++		     !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
++	if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) {
++		int error = xfs_defer_finish_noroll(&tp);
++		if (error) {
++			xfs_trans_cancel(tp);
++			return error;
++		}
++	}
++
+ 	return __xfs_trans_commit(tp, false);
+ }
+ 
+diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
+index 8ede9d099d1fea..f56d62dced97b1 100644
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -360,7 +360,7 @@ xfsaild_resubmit_item(
+ 
+ 	/* protected by ail_lock */
+ 	list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
+-		if (bp->b_flags & _XBF_INODES)
++		if (bp->b_flags & (_XBF_INODES | _XBF_DQUOTS))
+ 			clear_bit(XFS_LI_FAILED, &lip->li_flags);
+ 		else
+ 			xfs_clear_li_failed(lip);
+diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
+index e3fa43291f449d..1e2b25e204cb52 100644
+--- a/include/drm/drm_connector.h
++++ b/include/drm/drm_connector.h
+@@ -2001,8 +2001,11 @@ struct drm_connector {
+ 	struct drm_encoder *encoder;
+ 
+ #define MAX_ELD_BYTES	128
+-	/** @eld: EDID-like data, if present */
++	/** @eld: EDID-like data, if present, protected by @eld_mutex */
+ 	uint8_t eld[MAX_ELD_BYTES];
++	/** @eld_mutex: protection for concurrenct access to @eld */
++	struct mutex eld_mutex;
++
+ 	/** @latency_present: AV delay info from ELD, if found */
+ 	bool latency_present[2];
+ 	/**
+diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h
+index 70775748d243b0..15fa9b6865f448 100644
+--- a/include/drm/drm_utils.h
++++ b/include/drm/drm_utils.h
+@@ -12,8 +12,12 @@
+ 
+ #include <linux/types.h>
+ 
++struct drm_edid;
++
+ int drm_get_panel_orientation_quirk(int width, int height);
+ 
++int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid);
++
+ signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec);
+ 
+ #endif
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index e6c00e860951ae..3305c849abd66a 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -42,7 +42,9 @@ struct linux_binprm {
+ 		 * Set when errors can no longer be returned to the
+ 		 * original userspace.
+ 		 */
+-		point_of_no_return:1;
++		point_of_no_return:1,
++		/* Set when "comm" must come from the dentry. */
++		comm_from_dentry:1;
+ 	struct file *executable; /* Executable to pass to the interpreter */
+ 	struct file *interpreter;
+ 	struct file *file;
+diff --git a/include/linux/call_once.h b/include/linux/call_once.h
+new file mode 100644
+index 00000000000000..6261aa0b3fb00d
+--- /dev/null
++++ b/include/linux/call_once.h
+@@ -0,0 +1,45 @@
++#ifndef _LINUX_CALL_ONCE_H
++#define _LINUX_CALL_ONCE_H
++
++#include <linux/types.h>
++#include <linux/mutex.h>
++
++#define ONCE_NOT_STARTED 0
++#define ONCE_RUNNING     1
++#define ONCE_COMPLETED   2
++
++struct once {
++        atomic_t state;
++        struct mutex lock;
++};
++
++static inline void __once_init(struct once *once, const char *name,
++			       struct lock_class_key *key)
++{
++        atomic_set(&once->state, ONCE_NOT_STARTED);
++        __mutex_init(&once->lock, name, key);
++}
++
++#define once_init(once)							\
++do {									\
++	static struct lock_class_key __key;				\
++	__once_init((once), #once, &__key);				\
++} while (0)
++
++static inline void call_once(struct once *once, void (*cb)(struct once *))
++{
++        /* Pairs with atomic_set_release() below.  */
++        if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
++                return;
++
++        guard(mutex)(&once->lock);
++        WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
++        if (atomic_read(&once->state) != ONCE_NOT_STARTED)
++                return;
++
++        atomic_set(&once->state, ONCE_RUNNING);
++        cb(once);
++        atomic_set_release(&once->state, ONCE_COMPLETED);
++}
++
++#endif /* _LINUX_CALL_ONCE_H */
+diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
+index c3b4b7ed7c163f..84a5045f80f36f 100644
+--- a/include/linux/hrtimer_defs.h
++++ b/include/linux/hrtimer_defs.h
+@@ -125,6 +125,7 @@ struct hrtimer_cpu_base {
+ 	ktime_t				softirq_expires_next;
+ 	struct hrtimer			*softirq_next_timer;
+ 	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
++	call_single_data_t		csd;
+ } ____cacheline_aligned;
+ 
+ 
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 85fe9d0ebb9152..2c66ca21801c17 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -969,6 +969,15 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
+ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+ {
+ 	int num_vcpus = atomic_read(&kvm->online_vcpus);
++
++	/*
++	 * Explicitly verify the target vCPU is online, as the anti-speculation
++	 * logic only limits the CPU's ability to speculate, e.g. given a "bad"
++	 * index, clamping the index to 0 would return vCPU0, not NULL.
++	 */
++	if (i >= num_vcpus)
++		return NULL;
++
+ 	i = array_index_nospec(i, num_vcpus);
+ 
+ 	/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 82c7056e27599e..d4b2c09cd5fec4 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -722,7 +722,6 @@ struct mlx5_timer {
+ 	struct timecounter         tc;
+ 	u32                        nominal_c_mult;
+ 	unsigned long              overflow_period;
+-	struct delayed_work        overflow_work;
+ };
+ 
+ struct mlx5_clock {
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index 365e119bebaa23..783e2a336861b7 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -184,6 +184,11 @@ static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = {
+ 			DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt P16"),
++		},
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 1e6324f0d4efda..24e48af7e8f74a 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -851,7 +851,7 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ }
+ 
+ static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
+-				  __u64 bytes, __u32 packets)
++				  __u64 bytes, __u64 packets)
+ {
+ 	u64_stats_update_begin(&bstats->syncp);
+ 	u64_stats_add(&bstats->bytes, bytes);
+diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
+index 9705b2a98e49e1..510c88bfabd433 100644
+--- a/include/rv/da_monitor.h
++++ b/include/rv/da_monitor.h
+@@ -14,6 +14,7 @@
+ #include <rv/automata.h>
+ #include <linux/rv.h>
+ #include <linux/bug.h>
++#include <linux/sched.h>
+ 
+ #ifdef CONFIG_RV_REACTORS
+ 
+@@ -324,10 +325,13 @@ static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk)
+ static void da_monitor_reset_all_##name(void)							\
+ {												\
+ 	struct task_struct *g, *p;								\
++	int cpu;										\
+ 												\
+ 	read_lock(&tasklist_lock);								\
+ 	for_each_process_thread(g, p)								\
+ 		da_monitor_reset_##name(da_get_monitor_##name(p));				\
++	for_each_present_cpu(cpu)								\
++		da_monitor_reset_##name(da_get_monitor_##name(idle_task(cpu)));			\
+ 	read_unlock(&tasklist_lock);								\
+ }												\
+ 												\
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index 666fe1779ccc63..e1a37e9c2d42d5 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -218,6 +218,7 @@
+ 	EM(rxrpc_conn_get_conn_input,		"GET inp-conn") \
+ 	EM(rxrpc_conn_get_idle,			"GET idle    ") \
+ 	EM(rxrpc_conn_get_poke_abort,		"GET pk-abort") \
++	EM(rxrpc_conn_get_poke_secured,		"GET secured ") \
+ 	EM(rxrpc_conn_get_poke_timer,		"GET poke    ") \
+ 	EM(rxrpc_conn_get_service_conn,		"GET svc-conn") \
+ 	EM(rxrpc_conn_new_client,		"NEW client  ") \
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index efe5de6ce208a1..aaa4f3bc688b57 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -411,13 +411,20 @@ struct drm_amdgpu_gem_userptr {
+ /* GFX12 and later: */
+ #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT			0
+ #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK			0x7
+-/* These are DCC recompression setting for memory management: */
++/* These are DCC recompression settings for memory management: */
+ #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT	3
+ #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK	0x3 /* 0:64B, 1:128B, 2:256B */
+ #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT		5
+ #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK		0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
+ #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT		8
+ #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK		0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
++/* When clearing the buffer or moving it from VRAM to GTT, don't compress and set DCC metadata
++ * to uncompressed. Set when parts of an allocation bypass DCC and read raw data. */
++#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_SHIFT	14
++#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_MASK	0x1
++/* bit gap */
++#define AMDGPU_TILING_GFX12_SCANOUT_SHIFT			63
++#define AMDGPU_TILING_GFX12_SCANOUT_MASK			0x1
+ 
+ /* Set/Get helpers for tiling flags. */
+ #define AMDGPU_TILING_SET(field, value) \
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index a4206723f50333..5a199f3d4a26a2 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -519,6 +519,7 @@
+ #define KEY_NOTIFICATION_CENTER	0x1bc	/* Show/hide the notification center */
+ #define KEY_PICKUP_PHONE	0x1bd	/* Answer incoming call */
+ #define KEY_HANGUP_PHONE	0x1be	/* Decline incoming call */
++#define KEY_LINK_PHONE		0x1bf   /* AL Phone Syncing */
+ 
+ #define KEY_DEL_EOL		0x1c0
+ #define KEY_DEL_EOS		0x1c1
+diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
+index 72010f71c5e479..8c4470742dcd99 100644
+--- a/include/uapi/linux/iommufd.h
++++ b/include/uapi/linux/iommufd.h
+@@ -737,6 +737,7 @@ enum iommu_hwpt_pgfault_perm {
+  * @pasid: Process Address Space ID
+  * @grpid: Page Request Group Index
+  * @perm: Combination of enum iommu_hwpt_pgfault_perm
++ * @__reserved: Must be 0.
+  * @addr: Fault address
+  * @length: a hint of how much data the requestor is expecting to fetch. For
+  *          example, if the PRI initiator knows it is going to do a 10MB
+@@ -752,7 +753,8 @@ struct iommu_hwpt_pgfault {
+ 	__u32 pasid;
+ 	__u32 grpid;
+ 	__u32 perm;
+-	__u64 addr;
++	__u32 __reserved;
++	__aligned_u64 addr;
+ 	__u32 length;
+ 	__u32 cookie;
+ };
+diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
+index 5a43c23f53bfbf..ff47b6f0ba0f5f 100644
+--- a/include/uapi/linux/raid/md_p.h
++++ b/include/uapi/linux/raid/md_p.h
+@@ -233,7 +233,7 @@ struct mdp_superblock_1 {
+ 	char	set_name[32];	/* set and interpreted by user-space */
+ 
+ 	__le64	ctime;		/* lo 40 bits are seconds, top 24 are microseconds or 0*/
+-	__le32	level;		/* 0,1,4,5 */
++	__le32	level;		/* 0,1,4,5, -1 (linear) */
+ 	__le32	layout;		/* only for raid5 and raid10 currently */
+ 	__le64	size;		/* used size of component devices, in 512byte sectors */
+ 
+diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
+index 7be89a4906e73e..a893010735fbad 100644
+--- a/include/uapi/linux/raid/md_u.h
++++ b/include/uapi/linux/raid/md_u.h
+@@ -103,6 +103,8 @@ typedef struct mdu_array_info_s {
+ 
+ } mdu_array_info_t;
+ 
++#define LEVEL_LINEAR		(-1)
++
+ /* we need a value for 'no level specified' and 0
+  * means 'raid0', so we need something else.  This is
+  * for internal use only
+diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
+index e594abe5d05fed..f0c6111160e7af 100644
+--- a/include/ufs/ufs.h
++++ b/include/ufs/ufs.h
+@@ -386,8 +386,8 @@ enum {
+ 
+ /* Possible values for dExtendedUFSFeaturesSupport */
+ enum {
+-	UFS_DEV_LOW_TEMP_NOTIF		= BIT(4),
+-	UFS_DEV_HIGH_TEMP_NOTIF		= BIT(5),
++	UFS_DEV_HIGH_TEMP_NOTIF		= BIT(4),
++	UFS_DEV_LOW_TEMP_NOTIF		= BIT(5),
+ 	UFS_DEV_EXT_TEMP_NOTIF		= BIT(6),
+ 	UFS_DEV_HPB_SUPPORT		= BIT(7),
+ 	UFS_DEV_WRITE_BOOSTER_SUP	= BIT(8),
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 20c5374e922ef5..d5e43a1dcff226 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -1297,7 +1297,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+ void ufshcd_enable_irq(struct ufs_hba *hba);
+ void ufshcd_disable_irq(struct ufs_hba *hba);
+ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+-void ufshcd_dealloc_host(struct ufs_hba *);
+ int ufshcd_hba_enable(struct ufs_hba *hba);
+ int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
+ int ufshcd_link_recovery(struct ufs_hba *hba);
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 7f549be9abd1e6..3974c417fe2644 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1697,6 +1697,11 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 	int ret;
+ 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ 
++	if (unlikely(req->flags & REQ_F_FAIL)) {
++		ret = -ECONNRESET;
++		goto out;
++	}
++
+ 	file_flags = force_nonblock ? O_NONBLOCK : 0;
+ 
+ 	ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 1f63b60e85e7c0..b93e9ebdd87c8f 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -315,6 +315,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
+ 				return IOU_POLL_REISSUE;
+ 			}
+ 		}
++		if (unlikely(req->cqe.res & EPOLLERR))
++			req_set_fail(req);
+ 		if (req->apoll_events & EPOLLONESHOT)
+ 			return IOU_POLL_DONE;
+ 
+@@ -357,8 +359,10 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
+ 
+ 	ret = io_poll_check_events(req, ts);
+ 	if (ret == IOU_POLL_NO_ACTION) {
++		io_kbuf_recycle(req, 0);
+ 		return;
+ 	} else if (ret == IOU_POLL_REQUEUE) {
++		io_kbuf_recycle(req, 0);
+ 		__io_poll_execute(req, 0);
+ 		return;
+ 	}
+diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
+index 10a5736a21c222..b5c2a2de457888 100644
+--- a/kernel/locking/test-ww_mutex.c
++++ b/kernel/locking/test-ww_mutex.c
+@@ -402,7 +402,7 @@ static inline u32 prandom_u32_below(u32 ceil)
+ static int *get_random_order(int count)
+ {
+ 	int *order;
+-	int n, r, tmp;
++	int n, r;
+ 
+ 	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
+ 	if (!order)
+@@ -413,11 +413,8 @@ static int *get_random_order(int count)
+ 
+ 	for (n = count - 1; n > 1; n--) {
+ 		r = prandom_u32_below(n + 1);
+-		if (r != n) {
+-			tmp = order[n];
+-			order[n] = order[r];
+-			order[r] = tmp;
+-		}
++		if (r != n)
++			swap(order[n], order[r]);
+ 	}
+ 
+ 	return order;
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 7530df62ff7cbc..3b75f6e8410b9d 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -523,7 +523,7 @@ static struct latched_seq clear_seq = {
+ /* record buffer */
+ #define LOG_ALIGN __alignof__(unsigned long)
+ #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+-#define LOG_BUF_LEN_MAX (u32)(1 << 31)
++#define LOG_BUF_LEN_MAX ((u32)1 << 31)
+ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
+ static char *log_buf = __log_buf;
+ static u32 log_buf_len = __LOG_BUF_LEN;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index aba41c69f09c42..5d67f41d05d40b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -766,13 +766,15 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
+ #endif
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ 	if (static_key_false((&paravirt_steal_rq_enabled))) {
+-		steal = paravirt_steal_clock(cpu_of(rq));
++		u64 prev_steal;
++
++		steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
+ 		steal -= rq->prev_steal_time_rq;
+ 
+ 		if (unlikely(steal > delta))
+ 			steal = delta;
+ 
+-		rq->prev_steal_time_rq += steal;
++		rq->prev_steal_time_rq = prev_steal;
+ 		delta -= steal;
+ 	}
+ #endif
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 65e7be64487202..ddc096d6b0c203 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5481,6 +5481,15 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+ static void set_delayed(struct sched_entity *se)
+ {
+ 	se->sched_delayed = 1;
++
++	/*
++	 * Delayed se of cfs_rq have no tasks queued on them.
++	 * Do not adjust h_nr_runnable since dequeue_entities()
++	 * will account it for blocked tasks.
++	 */
++	if (!entity_is_task(se))
++		return;
++
+ 	for_each_sched_entity(se) {
+ 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ 
+@@ -5493,6 +5502,16 @@ static void set_delayed(struct sched_entity *se)
+ static void clear_delayed(struct sched_entity *se)
+ {
+ 	se->sched_delayed = 0;
++
++	/*
++	 * Delayed se of cfs_rq have no tasks queued on them.
++	 * Do not adjust h_nr_runnable since a dequeue has
++	 * already accounted for it or an enqueue of a task
++	 * below it will account for it in enqueue_task_fair().
++	 */
++	if (!entity_is_task(se))
++		return;
++
+ 	for_each_sched_entity(se) {
+ 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ 
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 385d48293a5fa1..0cd1f8b5a102ee 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -749,6 +749,15 @@ static bool seccomp_is_const_allow(struct sock_fprog_kern *fprog,
+ 	if (WARN_ON_ONCE(!fprog))
+ 		return false;
+ 
++	/* Our single exception to filtering. */
++#ifdef __NR_uretprobe
++#ifdef SECCOMP_ARCH_COMPAT
++	if (sd->arch == SECCOMP_ARCH_NATIVE)
++#endif
++		if (sd->nr == __NR_uretprobe)
++			return true;
++#endif
++
+ 	for (pc = 0; pc < fprog->len; pc++) {
+ 		struct sock_filter *insn = &fprog->filter[pc];
+ 		u16 code = insn->code;
+@@ -1023,6 +1032,9 @@ static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
+  */
+ static const int mode1_syscalls[] = {
+ 	__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
++#ifdef __NR_uretprobe
++	__NR_uretprobe,
++#endif
+ 	-1, /* negative terminated */
+ };
+ 
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index ee20f5032a0366..d116c28564f26c 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -58,6 +58,8 @@
+ #define HRTIMER_ACTIVE_SOFT	(HRTIMER_ACTIVE_HARD << MASK_SHIFT)
+ #define HRTIMER_ACTIVE_ALL	(HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+ 
++static void retrigger_next_event(void *arg);
++
+ /*
+  * The timer bases:
+  *
+@@ -111,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+ 			.clockid = CLOCK_TAI,
+ 			.get_time = &ktime_get_clocktai,
+ 		},
+-	}
++	},
++	.csd = CSD_INIT(retrigger_next_event, NULL)
+ };
+ 
+ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+@@ -124,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+ 	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
+ };
+ 
++static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
++{
++	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
++		return true;
++	else
++		return likely(base->online);
++}
++
+ /*
+  * Functions and macros which are different for UP/SMP systems are kept in a
+  * single place
+@@ -183,27 +194,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
+ }
+ 
+ /*
+- * We do not migrate the timer when it is expiring before the next
+- * event on the target cpu. When high resolution is enabled, we cannot
+- * reprogram the target cpu hardware and we would cause it to fire
+- * late. To keep it simple, we handle the high resolution enabled and
+- * disabled case similar.
++ * Check if the elected target is suitable considering its next
++ * event and the hotplug state of the current CPU.
++ *
++ * If the elected target is remote and its next event is after the timer
++ * to queue, then a remote reprogram is necessary. However there is no
++ * guarantee the IPI handling the operation would arrive in time to meet
++ * the high resolution deadline. In this case the local CPU becomes a
++ * preferred target, unless it is offline.
++ *
++ * High and low resolution modes are handled the same way for simplicity.
+  *
+  * Called with cpu_base->lock of target cpu held.
+  */
+-static int
+-hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
++static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base,
++				    struct hrtimer_cpu_base *new_cpu_base,
++				    struct hrtimer_cpu_base *this_cpu_base)
+ {
+ 	ktime_t expires;
+ 
++	/*
++	 * The local CPU clockevent can be reprogrammed. Also get_target_base()
++	 * guarantees it is online.
++	 */
++	if (new_cpu_base == this_cpu_base)
++		return true;
++
++	/*
++	 * The offline local CPU can't be the default target if the
++	 * next remote target event is after this timer. Keep the
++	 * elected new base. An IPI will we issued to reprogram
++	 * it as a last resort.
++	 */
++	if (!hrtimer_base_is_online(this_cpu_base))
++		return true;
++
+ 	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
+-	return expires < new_base->cpu_base->expires_next;
++
++	return expires >= new_base->cpu_base->expires_next;
+ }
+ 
+-static inline
+-struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
+-					 int pinned)
++static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned)
+ {
++	if (!hrtimer_base_is_online(base)) {
++		int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER));
++
++		return &per_cpu(hrtimer_bases, cpu);
++	}
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+ 	if (static_branch_likely(&timers_migration_enabled) && !pinned)
+ 		return &per_cpu(hrtimer_bases, get_nohz_timer_target());
+@@ -254,8 +292,8 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
+ 		raw_spin_unlock(&base->cpu_base->lock);
+ 		raw_spin_lock(&new_base->cpu_base->lock);
+ 
+-		if (new_cpu_base != this_cpu_base &&
+-		    hrtimer_check_target(timer, new_base)) {
++		if (!hrtimer_suitable_target(timer, new_base, new_cpu_base,
++					     this_cpu_base)) {
+ 			raw_spin_unlock(&new_base->cpu_base->lock);
+ 			raw_spin_lock(&base->cpu_base->lock);
+ 			new_cpu_base = this_cpu_base;
+@@ -264,8 +302,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
+ 		}
+ 		WRITE_ONCE(timer->base, new_base);
+ 	} else {
+-		if (new_cpu_base != this_cpu_base &&
+-		    hrtimer_check_target(timer, new_base)) {
++		if (!hrtimer_suitable_target(timer, new_base,  new_cpu_base, this_cpu_base)) {
+ 			new_cpu_base = this_cpu_base;
+ 			goto again;
+ 		}
+@@ -725,8 +762,6 @@ static inline int hrtimer_is_hres_enabled(void)
+ 	return hrtimer_hres_enabled;
+ }
+ 
+-static void retrigger_next_event(void *arg);
+-
+ /*
+  * Switch to high resolution mode
+  */
+@@ -1215,6 +1250,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 				    u64 delta_ns, const enum hrtimer_mode mode,
+ 				    struct hrtimer_clock_base *base)
+ {
++	struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
+ 	struct hrtimer_clock_base *new_base;
+ 	bool force_local, first;
+ 
+@@ -1226,9 +1262,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 	 * and enforce reprogramming after it is queued no matter whether
+ 	 * it is the new first expiring timer again or not.
+ 	 */
+-	force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
++	force_local = base->cpu_base == this_cpu_base;
+ 	force_local &= base->cpu_base->next_timer == timer;
+ 
++	/*
++	 * Don't force local queuing if this enqueue happens on a unplugged
++	 * CPU after hrtimer_cpu_dying() has been invoked.
++	 */
++	force_local &= this_cpu_base->online;
++
+ 	/*
+ 	 * Remove an active timer from the queue. In case it is not queued
+ 	 * on the current CPU, make sure that remove_hrtimer() updates the
+@@ -1258,8 +1300,27 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 	}
+ 
+ 	first = enqueue_hrtimer(timer, new_base, mode);
+-	if (!force_local)
+-		return first;
++	if (!force_local) {
++		/*
++		 * If the current CPU base is online, then the timer is
++		 * never queued on a remote CPU if it would be the first
++		 * expiring timer there.
++		 */
++		if (hrtimer_base_is_online(this_cpu_base))
++			return first;
++
++		/*
++		 * Timer was enqueued remote because the current base is
++		 * already offline. If the timer is the first to expire,
++		 * kick the remote CPU to reprogram the clock event.
++		 */
++		if (first) {
++			struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;
++
++			smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
++		}
++		return 0;
++	}
+ 
+ 	/*
+ 	 * Timer was forced to stay on the current CPU to avoid
+diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
+index 371a62a749aad3..72538baa7a1fb0 100644
+--- a/kernel/time/timer_migration.c
++++ b/kernel/time/timer_migration.c
+@@ -1668,6 +1668,9 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
+ 
+ 	} while (i < tmigr_hierarchy_levels);
+ 
++	/* Assert single root */
++	WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top]));
++
+ 	while (i > 0) {
+ 		group = stack[--i];
+ 
+@@ -1709,7 +1712,12 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
+ 		WARN_ON_ONCE(top == 0);
+ 
+ 		lvllist = &tmigr_level_list[top];
+-		if (group->num_children == 1 && list_is_singular(lvllist)) {
++
++		/*
++		 * Newly created root level should have accounted the upcoming
++		 * CPU's child group and pre-accounted the old root.
++		 */
++		if (group->num_children == 2 && list_is_singular(lvllist)) {
+ 			/*
+ 			 * The target CPU must never do the prepare work, except
+ 			 * on early boot when the boot CPU is the target. Otherwise
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 703978b2d557d7..0f8f3ffc6f0904 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4398,8 +4398,13 @@ rb_reserve_next_event(struct trace_buffer *buffer,
+ 	int nr_loops = 0;
+ 	int add_ts_default;
+ 
+-	/* ring buffer does cmpxchg, make sure it is safe in NMI context */
+-	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
++	/*
++	 * ring buffer does cmpxchg as well as atomic64 operations
++	 * (which some archs use locking for atomic64), make sure this
++	 * is safe in NMI context
++	 */
++	if ((!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) ||
++	     IS_ENABLED(CONFIG_GENERIC_ATOMIC64)) &&
+ 	    (unlikely(in_nmi()))) {
+ 		return NULL;
+ 	}
+@@ -7059,7 +7064,7 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
+ 	}
+ 
+ 	while (p < nr_pages) {
+-		struct page *page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
++		struct page *page;
+ 		int off = 0;
+ 
+ 		if (WARN_ON_ONCE(s >= nr_subbufs)) {
+@@ -7067,6 +7072,8 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
+ 			goto out;
+ 		}
+ 
++		page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
++
+ 		for (; off < (1 << (subbuf_order)); off++, page++) {
+ 			if (p >= nr_pages)
+ 				break;
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index a569daaac4c4ff..ebb61ddca749d8 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -150,7 +150,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
+ 	 * returning from the function.
+ 	 */
+ 	if (ftrace_graph_notrace_addr(trace->func)) {
+-		*task_var |= TRACE_GRAPH_NOTRACE_BIT;
++		*task_var |= TRACE_GRAPH_NOTRACE;
+ 		/*
+ 		 * Need to return 1 to have the return called
+ 		 * that will clear the NOTRACE bit.
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index a50ed23bee777b..032fdeba37d350 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -1235,6 +1235,8 @@ static void trace_sched_migrate_callback(void *data, struct task_struct *p, int
+ 	}
+ }
+ 
++static bool monitor_enabled;
++
+ static int register_migration_monitor(void)
+ {
+ 	int ret = 0;
+@@ -1243,16 +1245,25 @@ static int register_migration_monitor(void)
+ 	 * Timerlat thread migration check is only required when running timerlat in user-space.
+ 	 * Thus, enable callback only if timerlat is set with no workload.
+ 	 */
+-	if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
++	if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options)) {
++		if (WARN_ON_ONCE(monitor_enabled))
++			return 0;
++
+ 		ret = register_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
++		if (!ret)
++			monitor_enabled = true;
++	}
+ 
+ 	return ret;
+ }
+ 
+ static void unregister_migration_monitor(void)
+ {
+-	if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
+-		unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
++	if (!monitor_enabled)
++		return;
++
++	unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
++	monitor_enabled = false;
+ }
+ #else
+ static int register_migration_monitor(void)
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 3f9c238bb58ea3..e48375fe5a50ce 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1511,7 +1511,7 @@ config LOCKDEP_SMALL
+ config LOCKDEP_BITS
+ 	int "Bitsize for MAX_LOCKDEP_ENTRIES"
+ 	depends on LOCKDEP && !LOCKDEP_SMALL
+-	range 10 30
++	range 10 24
+ 	default 15
+ 	help
+ 	  Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
+@@ -1527,7 +1527,7 @@ config LOCKDEP_CHAINS_BITS
+ config LOCKDEP_STACK_TRACE_BITS
+ 	int "Bitsize for MAX_STACK_TRACE_ENTRIES"
+ 	depends on LOCKDEP && !LOCKDEP_SMALL
+-	range 10 30
++	range 10 26
+ 	default 19
+ 	help
+ 	  Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
+@@ -1535,7 +1535,7 @@ config LOCKDEP_STACK_TRACE_BITS
+ config LOCKDEP_STACK_TRACE_HASH_BITS
+ 	int "Bitsize for STACK_TRACE_HASH_SIZE"
+ 	depends on LOCKDEP && !LOCKDEP_SMALL
+-	range 10 30
++	range 10 26
+ 	default 14
+ 	help
+ 	  Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
+@@ -1543,7 +1543,7 @@ config LOCKDEP_STACK_TRACE_HASH_BITS
+ config LOCKDEP_CIRCULAR_QUEUE_BITS
+ 	int "Bitsize for elements in circular_queue struct"
+ 	depends on LOCKDEP
+-	range 10 30
++	range 10 26
+ 	default 12
+ 	help
+ 	  Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
+diff --git a/lib/atomic64.c b/lib/atomic64.c
+index caf895789a1ee6..1a72bba36d2430 100644
+--- a/lib/atomic64.c
++++ b/lib/atomic64.c
+@@ -25,15 +25,15 @@
+  * Ensure each lock is in a separate cacheline.
+  */
+ static union {
+-	raw_spinlock_t lock;
++	arch_spinlock_t lock;
+ 	char pad[L1_CACHE_BYTES];
+ } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
+ 	[0 ... (NR_LOCKS - 1)] = {
+-		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
++		.lock =  __ARCH_SPIN_LOCK_UNLOCKED,
+ 	},
+ };
+ 
+-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
++static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
+ {
+ 	unsigned long addr = (unsigned long) v;
+ 
+@@ -45,12 +45,14 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+ s64 generic_atomic64_read(const atomic64_t *v)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 	return val;
+ }
+ EXPORT_SYMBOL(generic_atomic64_read);
+@@ -58,11 +60,13 @@ EXPORT_SYMBOL(generic_atomic64_read);
+ void generic_atomic64_set(atomic64_t *v, s64 i)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	v->counter = i;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(generic_atomic64_set);
+ 
+@@ -70,11 +74,13 @@ EXPORT_SYMBOL(generic_atomic64_set);
+ void generic_atomic64_##op(s64 a, atomic64_t *v)			\
+ {									\
+ 	unsigned long flags;						\
+-	raw_spinlock_t *lock = lock_addr(v);				\
++	arch_spinlock_t *lock = lock_addr(v);				\
+ 									\
+-	raw_spin_lock_irqsave(lock, flags);				\
++	local_irq_save(flags);						\
++	arch_spin_lock(lock);						\
+ 	v->counter c_op a;						\
+-	raw_spin_unlock_irqrestore(lock, flags);			\
++	arch_spin_unlock(lock);						\
++	local_irq_restore(flags);					\
+ }									\
+ EXPORT_SYMBOL(generic_atomic64_##op);
+ 
+@@ -82,12 +88,14 @@ EXPORT_SYMBOL(generic_atomic64_##op);
+ s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v)		\
+ {									\
+ 	unsigned long flags;						\
+-	raw_spinlock_t *lock = lock_addr(v);				\
++	arch_spinlock_t *lock = lock_addr(v);				\
+ 	s64 val;							\
+ 									\
+-	raw_spin_lock_irqsave(lock, flags);				\
++	local_irq_save(flags);						\
++	arch_spin_lock(lock);						\
+ 	val = (v->counter c_op a);					\
+-	raw_spin_unlock_irqrestore(lock, flags);			\
++	arch_spin_unlock(lock);						\
++	local_irq_restore(flags);					\
+ 	return val;							\
+ }									\
+ EXPORT_SYMBOL(generic_atomic64_##op##_return);
+@@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return);
+ s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v)			\
+ {									\
+ 	unsigned long flags;						\
+-	raw_spinlock_t *lock = lock_addr(v);				\
++	arch_spinlock_t *lock = lock_addr(v);				\
+ 	s64 val;							\
+ 									\
+-	raw_spin_lock_irqsave(lock, flags);				\
++	local_irq_save(flags);						\
++	arch_spin_lock(lock);						\
+ 	val = v->counter;						\
+ 	v->counter c_op a;						\
+-	raw_spin_unlock_irqrestore(lock, flags);			\
++	arch_spin_unlock(lock);						\
++	local_irq_restore(flags);					\
+ 	return val;							\
+ }									\
+ EXPORT_SYMBOL(generic_atomic64_fetch_##op);
+@@ -131,14 +141,16 @@ ATOMIC64_OPS(xor, ^=)
+ s64 generic_atomic64_dec_if_positive(atomic64_t *v)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter - 1;
+ 	if (val >= 0)
+ 		v->counter = val;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 	return val;
+ }
+ EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
+@@ -146,14 +158,16 @@ EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
+ s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter;
+ 	if (val == o)
+ 		v->counter = n;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 	return val;
+ }
+ EXPORT_SYMBOL(generic_atomic64_cmpxchg);
+@@ -161,13 +175,15 @@ EXPORT_SYMBOL(generic_atomic64_cmpxchg);
+ s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter;
+ 	v->counter = new;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 	return val;
+ }
+ EXPORT_SYMBOL(generic_atomic64_xchg);
+@@ -175,14 +191,16 @@ EXPORT_SYMBOL(generic_atomic64_xchg);
+ s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter;
+ 	if (val != u)
+ 		v->counter += a;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 
+ 	return val;
+ }
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 0cbe913634be4b..8d73ccf66f3aa0 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -1849,11 +1849,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
+  * Return: The first split location.  The middle split is set in @mid_split.
+  */
+ static inline int mab_calc_split(struct ma_state *mas,
+-	 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
++	 struct maple_big_node *bn, unsigned char *mid_split)
+ {
+ 	unsigned char b_end = bn->b_end;
+ 	int split = b_end / 2; /* Assume equal split. */
+-	unsigned char slot_min, slot_count = mt_slots[bn->type];
++	unsigned char slot_count = mt_slots[bn->type];
+ 
+ 	/*
+ 	 * To support gap tracking, all NULL entries are kept together and a node cannot
+@@ -1886,18 +1886,7 @@ static inline int mab_calc_split(struct ma_state *mas,
+ 		split = b_end / 3;
+ 		*mid_split = split * 2;
+ 	} else {
+-		slot_min = mt_min_slots[bn->type];
+-
+ 		*mid_split = 0;
+-		/*
+-		 * Avoid having a range less than the slot count unless it
+-		 * causes one node to be deficient.
+-		 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
+-		 */
+-		while ((split < slot_count - 1) &&
+-		       ((bn->pivot[split] - min) < slot_count - 1) &&
+-		       (b_end - split > slot_min))
+-			split++;
+ 	}
+ 
+ 	/* Avoid ending a node on a NULL entry */
+@@ -2366,7 +2355,7 @@ static inline struct maple_enode
+ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
+ 	struct maple_big_node *b_node, struct maple_enode **left,
+ 	struct maple_enode **right, struct maple_enode **middle,
+-	unsigned char *mid_split, unsigned long min)
++	unsigned char *mid_split)
+ {
+ 	unsigned char split = 0;
+ 	unsigned char slot_count = mt_slots[b_node->type];
+@@ -2379,7 +2368,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
+ 	if (b_node->b_end < slot_count) {
+ 		split = b_node->b_end;
+ 	} else {
+-		split = mab_calc_split(mas, b_node, mid_split, min);
++		split = mab_calc_split(mas, b_node, mid_split);
+ 		*right = mas_new_ma_node(mas, b_node);
+ 	}
+ 
+@@ -2866,7 +2855,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
+ 		mast->bn->b_end--;
+ 		mast->bn->type = mte_node_type(mast->orig_l->node);
+ 		split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
+-					&mid_split, mast->orig_l->min);
++					&mid_split);
+ 		mast_set_split_parents(mast, left, middle, right, split,
+ 				       mid_split);
+ 		mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
+@@ -3357,7 +3346,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
+ 		if (mas_push_data(mas, height, &mast, false))
+ 			break;
+ 
+-		split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
++		split = mab_calc_split(mas, b_node, &mid_split);
+ 		mast_split_data(&mast, mas, split);
+ 		/*
+ 		 * Usually correct, mab_mas_cp in the above call overwrites
+diff --git a/mm/compaction.c b/mm/compaction.c
+index a2b16b08cbbff7..384e4672998e55 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -630,7 +630,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 		if (PageCompound(page)) {
+ 			const unsigned int order = compound_order(page);
+ 
+-			if (blockpfn + (1UL << order) <= end_pfn) {
++			if ((order <= MAX_PAGE_ORDER) &&
++			    (blockpfn + (1UL << order) <= end_pfn)) {
+ 				blockpfn += (1UL << order) - 1;
+ 				page += (1UL << order) - 1;
+ 				nr_scanned += (1UL << order) - 1;
+diff --git a/mm/gup.c b/mm/gup.c
+index 7053f8114e0127..44c536904a83bb 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2323,13 +2323,13 @@ static void pofs_unpin(struct pages_or_folios *pofs)
+ /*
+  * Returns the number of collected folios. Return value is always >= 0.
+  */
+-static unsigned long collect_longterm_unpinnable_folios(
++static void collect_longterm_unpinnable_folios(
+ 		struct list_head *movable_folio_list,
+ 		struct pages_or_folios *pofs)
+ {
+-	unsigned long i, collected = 0;
+ 	struct folio *prev_folio = NULL;
+ 	bool drain_allow = true;
++	unsigned long i;
+ 
+ 	for (i = 0; i < pofs->nr_entries; i++) {
+ 		struct folio *folio = pofs_get_folio(pofs, i);
+@@ -2341,8 +2341,6 @@ static unsigned long collect_longterm_unpinnable_folios(
+ 		if (folio_is_longterm_pinnable(folio))
+ 			continue;
+ 
+-		collected++;
+-
+ 		if (folio_is_device_coherent(folio))
+ 			continue;
+ 
+@@ -2364,8 +2362,6 @@ static unsigned long collect_longterm_unpinnable_folios(
+ 				    NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ 				    folio_nr_pages(folio));
+ 	}
+-
+-	return collected;
+ }
+ 
+ /*
+@@ -2442,11 +2438,9 @@ static long
+ check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
+ {
+ 	LIST_HEAD(movable_folio_list);
+-	unsigned long collected;
+ 
+-	collected = collect_longterm_unpinnable_folios(&movable_folio_list,
+-						       pofs);
+-	if (!collected)
++	collect_longterm_unpinnable_folios(&movable_folio_list, pofs);
++	if (list_empty(&movable_folio_list))
+ 		return 0;
+ 
+ 	return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 4a8a4f3535caf7..bdee6d3ab0e7e3 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1394,8 +1394,7 @@ static unsigned long available_huge_pages(struct hstate *h)
+ 
+ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
+ 				struct vm_area_struct *vma,
+-				unsigned long address, int avoid_reserve,
+-				long chg)
++				unsigned long address, long chg)
+ {
+ 	struct folio *folio = NULL;
+ 	struct mempolicy *mpol;
+@@ -1411,10 +1410,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
+ 	if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
+ 		goto err;
+ 
+-	/* If reserves cannot be used, ensure enough pages are in the pool */
+-	if (avoid_reserve && !available_huge_pages(h))
+-		goto err;
+-
+ 	gfp_mask = htlb_alloc_mask(h);
+ 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+ 
+@@ -1430,7 +1425,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
+ 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
+ 							nid, nodemask);
+ 
+-	if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
++	if (folio && vma_has_reserves(vma, chg)) {
+ 		folio_set_hugetlb_restore_reserve(folio);
+ 		h->resv_huge_pages--;
+ 	}
+@@ -3006,17 +3001,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
+ 		if (gbl_chg < 0)
+ 			goto out_end_reservation;
+-
+-		/*
+-		 * Even though there was no reservation in the region/reserve
+-		 * map, there could be reservations associated with the
+-		 * subpool that can be used.  This would be indicated if the
+-		 * return value of hugepage_subpool_get_pages() is zero.
+-		 * However, if avoid_reserve is specified we still avoid even
+-		 * the subpool reservations.
+-		 */
+-		if (avoid_reserve)
+-			gbl_chg = 1;
+ 	}
+ 
+ 	/* If this allocation is not consuming a reservation, charge it now.
+@@ -3039,7 +3023,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ 	 * from the global free pool (global change).  gbl_chg == 0 indicates
+ 	 * a reservation exists for the allocation.
+ 	 */
+-	folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
++	folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
+ 	if (!folio) {
+ 		spin_unlock_irq(&hugetlb_lock);
+ 		folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
+@@ -3287,7 +3271,7 @@ static void __init gather_bootmem_prealloc(void)
+ 		.thread_fn	= gather_bootmem_prealloc_parallel,
+ 		.fn_arg		= NULL,
+ 		.start		= 0,
+-		.size		= num_node_state(N_MEMORY),
++		.size		= nr_node_ids,
+ 		.align		= 1,
+ 		.min_chunk	= 1,
+ 		.max_threads	= num_node_state(N_MEMORY),
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index 67fc321db79b7e..102048821c222a 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -21,6 +21,7 @@
+ #include <linux/log2.h>
+ #include <linux/memblock.h>
+ #include <linux/moduleparam.h>
++#include <linux/nodemask.h>
+ #include <linux/notifier.h>
+ #include <linux/panic_notifier.h>
+ #include <linux/random.h>
+@@ -1084,6 +1085,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
+ 	 * properties (e.g. reside in DMAable memory).
+ 	 */
+ 	if ((flags & GFP_ZONEMASK) ||
++	    ((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
+ 	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
+ 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
+ 		return NULL;
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 5f878ee05ff80b..44bb798423dd39 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1650,7 +1650,7 @@ static void kmemleak_scan(void)
+ 			unsigned long phys = object->pointer;
+ 
+ 			if (PHYS_PFN(phys) < min_low_pfn ||
+-			    PHYS_PFN(phys + object->size) >= max_low_pfn)
++			    PHYS_PFN(phys + object->size) > max_low_pfn)
+ 				__paint_it(object, KMEMLEAK_BLACK);
+ 		}
+ 
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index d81d667907448c..77d015d5db0c5b 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1053,7 +1053,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
+ 	struct folio_batch free_folios;
+ 	LIST_HEAD(ret_folios);
+ 	LIST_HEAD(demote_folios);
+-	unsigned int nr_reclaimed = 0;
++	unsigned int nr_reclaimed = 0, nr_demoted = 0;
+ 	unsigned int pgactivate = 0;
+ 	bool do_demote_pass;
+ 	struct swap_iocb *plug = NULL;
+@@ -1522,8 +1522,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
+ 	/* 'folio_list' is always empty here */
+ 
+ 	/* Migrate folios selected for demotion */
+-	stat->nr_demoted = demote_folio_list(&demote_folios, pgdat);
+-	nr_reclaimed += stat->nr_demoted;
++	nr_demoted = demote_folio_list(&demote_folios, pgdat);
++	nr_reclaimed += nr_demoted;
++	stat->nr_demoted += nr_demoted;
+ 	/* Folios that could not be demoted are still in @demote_folios */
+ 	if (!list_empty(&demote_folios)) {
+ 		/* Folios which weren't demoted go back on @folio_list */
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 3d2553dcdb1b3c..46ea0bee2259f8 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -710,12 +710,12 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
+ {
+ 	switch (chan->scid) {
+ 	case L2CAP_CID_ATT:
+-		if (mtu < L2CAP_LE_MIN_MTU)
++		if (mtu && mtu < L2CAP_LE_MIN_MTU)
+ 			return false;
+ 		break;
+ 
+ 	default:
+-		if (mtu < L2CAP_DEFAULT_MIN_MTU)
++		if (mtu && mtu < L2CAP_DEFAULT_MIN_MTU)
+ 			return false;
+ 	}
+ 
+@@ -1888,7 +1888,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ 	chan = l2cap_chan_create();
+ 	if (!chan) {
+ 		sk_free(sk);
+-		sock->sk = NULL;
++		if (sock)
++			sock->sk = NULL;
+ 		return NULL;
+ 	}
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 7dc315c1658e7d..90c21b3edcd80e 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -5460,10 +5460,16 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
+ {
+ 	struct mgmt_rp_remove_adv_monitor rp;
+ 	struct mgmt_pending_cmd *cmd = data;
+-	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
++	struct mgmt_cp_remove_adv_monitor *cp;
++
++	if (status == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
++		return;
+ 
+ 	hci_dev_lock(hdev);
+ 
++	cp = cmd->param;
++
+ 	rp.monitor_handle = cp->monitor_handle;
+ 
+ 	if (!status)
+@@ -5481,6 +5487,10 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
+ static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
+ {
+ 	struct mgmt_pending_cmd *cmd = data;
++
++	if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
++		return -ECANCELED;
++
+ 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
+ 	u16 handle = __le16_to_cpu(cp->monitor_handle);
+ 
+diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
+index e07386275e142d..8aa45f3fdfdf08 100644
+--- a/net/ethtool/rss.c
++++ b/net/ethtool/rss.c
+@@ -107,6 +107,8 @@ rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
+ 	u32 total_size, indir_bytes;
+ 	u8 *rss_config;
+ 
++	data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key;
++
+ 	ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context);
+ 	if (!ctx)
+ 		return -ENOENT;
+@@ -153,7 +155,6 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
+ 		if (!ops->cap_rss_ctx_supported && !ops->create_rxfh_context)
+ 			return -EOPNOTSUPP;
+ 
+-		data->no_key_fields = !ops->rxfh_per_ctx_key;
+ 		return rss_prepare_ctx(request, dev, data, info);
+ 	}
+ 
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index d2eeb6fc49b382..8da74dc63061c0 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -985,9 +985,9 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
+ 		const int hlen = skb_network_header_len(skb) +
+ 				 sizeof(struct udphdr);
+ 
+-		if (hlen + cork->gso_size > cork->fragsize) {
++		if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
+ 			kfree_skb(skb);
+-			return -EINVAL;
++			return -EMSGSIZE;
+ 		}
+ 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
+ 			kfree_skb(skb);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 896c9c827a288c..197d0ac47592ad 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1294,9 +1294,9 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ 		const int hlen = skb_network_header_len(skb) +
+ 				 sizeof(struct udphdr);
+ 
+-		if (hlen + cork->gso_size > cork->fragsize) {
++		if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
+ 			kfree_skb(skb);
+-			return -EINVAL;
++			return -EMSGSIZE;
+ 		}
+ 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
+ 			kfree_skb(skb);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index fac774825aff39..42b239d9b2b3cf 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -136,6 +136,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ 	int delta;
+ 
+ 	if (MPTCP_SKB_CB(from)->offset ||
++	    ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) ||
+ 	    !skb_try_coalesce(to, from, &fragstolen, &delta))
+ 		return false;
+ 
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index bf276eaf933075..7891a537bddd11 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -1385,6 +1385,12 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 		nd->state = ncsi_dev_state_probe_package;
+ 		break;
+ 	case ncsi_dev_state_probe_package:
++		if (ndp->package_probe_id >= 8) {
++			/* Last package probed, finishing */
++			ndp->flags |= NCSI_DEV_PROBED;
++			break;
++		}
++
+ 		ndp->pending_req_num = 1;
+ 
+ 		nca.type = NCSI_PKT_CMD_SP;
+@@ -1501,13 +1507,8 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 		if (ret)
+ 			goto error;
+ 
+-		/* Probe next package */
++		/* Probe next package after receiving response */
+ 		ndp->package_probe_id++;
+-		if (ndp->package_probe_id >= 8) {
+-			/* Probe finished */
+-			ndp->flags |= NCSI_DEV_PROBED;
+-			break;
+-		}
+ 		nd->state = ncsi_dev_state_probe_package;
+ 		ndp->active_package = NULL;
+ 		break;
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index de175318a3a0f3..082ab66f120b73 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -542,6 +542,8 @@ static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host,
+ 
+ 	pr_debug("pipe created=%d\n", pipe);
+ 
++	if (pipe >= NCI_HCI_MAX_PIPES)
++		pipe = NCI_HCI_INVALID_PIPE;
+ 	return pipe;
+ }
+ 
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 72c65d938a150e..a4a668b88a8f27 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -701,11 +701,9 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	struct net_device *dev;
+ 	ax25_address *source;
+ 	ax25_uid_assoc *user;
++	int err = -EINVAL;
+ 	int n;
+ 
+-	if (!sock_flag(sk, SOCK_ZAPPED))
+-		return -EINVAL;
+-
+ 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
+ 		return -EINVAL;
+ 
+@@ -718,8 +716,15 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
+ 		return -EINVAL;
+ 
+-	if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
+-		return -EADDRNOTAVAIL;
++	lock_sock(sk);
++
++	if (!sock_flag(sk, SOCK_ZAPPED))
++		goto out_release;
++
++	err = -EADDRNOTAVAIL;
++	dev = rose_dev_get(&addr->srose_addr);
++	if (!dev)
++		goto out_release;
+ 
+ 	source = &addr->srose_call;
+ 
+@@ -730,7 +735,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	} else {
+ 		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
+ 			dev_put(dev);
+-			return -EACCES;
++			err = -EACCES;
++			goto out_release;
+ 		}
+ 		rose->source_call   = *source;
+ 	}
+@@ -753,8 +759,10 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	rose_insert_socket(sk);
+ 
+ 	sock_reset_flag(sk, SOCK_ZAPPED);
+-
+-	return 0;
++	err = 0;
++out_release:
++	release_sock(sk);
++	return err;
+ }
+ 
+ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index d0fd37bdcfe9c8..6b036c0564c7a8 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -567,6 +567,7 @@ enum rxrpc_call_flag {
+ 	RXRPC_CALL_EXCLUSIVE,		/* The call uses a once-only connection */
+ 	RXRPC_CALL_RX_IS_IDLE,		/* recvmsg() is idle - send an ACK */
+ 	RXRPC_CALL_RECVMSG_READ_ALL,	/* recvmsg() read all of the received data */
++	RXRPC_CALL_CONN_CHALLENGING,	/* The connection is being challenged */
+ };
+ 
+ /*
+@@ -587,7 +588,6 @@ enum rxrpc_call_state {
+ 	RXRPC_CALL_CLIENT_AWAIT_REPLY,	/* - client awaiting reply */
+ 	RXRPC_CALL_CLIENT_RECV_REPLY,	/* - client receiving reply phase */
+ 	RXRPC_CALL_SERVER_PREALLOC,	/* - service preallocation */
+-	RXRPC_CALL_SERVER_SECURING,	/* - server securing request connection */
+ 	RXRPC_CALL_SERVER_RECV_REQUEST,	/* - server receiving request */
+ 	RXRPC_CALL_SERVER_ACK_REQUEST,	/* - server pending ACK of request */
+ 	RXRPC_CALL_SERVER_SEND_REPLY,	/* - server sending reply */
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index f9e983a12c1492..e379a2a9375ae0 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -22,7 +22,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
+ 	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
+ 	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
+ 	[RXRPC_CALL_SERVER_PREALLOC]		= "SvPrealc",
+-	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
+ 	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
+ 	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
+ 	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
+@@ -453,17 +452,16 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
+ 	call->cong_tstamp	= skb->tstamp;
+ 
+ 	__set_bit(RXRPC_CALL_EXPOSED, &call->flags);
+-	rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
++	rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+ 
+ 	spin_lock(&conn->state_lock);
+ 
+ 	switch (conn->state) {
+ 	case RXRPC_CONN_SERVICE_UNSECURED:
+ 	case RXRPC_CONN_SERVICE_CHALLENGING:
+-		rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
++		__set_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags);
+ 		break;
+ 	case RXRPC_CONN_SERVICE:
+-		rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+ 		break;
+ 
+ 	case RXRPC_CONN_ABORTED:
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index 2a1396cd892f30..c4eb7986efddf8 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -222,10 +222,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn)
+  */
+ static void rxrpc_call_is_secure(struct rxrpc_call *call)
+ {
+-	if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
+-		rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
++	if (call && __test_and_clear_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags))
+ 		rxrpc_notify_socket(call);
+-	}
+ }
+ 
+ /*
+@@ -266,6 +264,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
+ 			 * we've already received the packet, put it on the
+ 			 * front of the queue.
+ 			 */
++			sp->conn = rxrpc_get_connection(conn, rxrpc_conn_get_poke_secured);
+ 			skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
+ 			rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
+ 			skb_queue_head(&conn->local->rx_queue, skb);
+@@ -431,14 +430,16 @@ void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
+ 	if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
+ 		rxrpc_abort_calls(conn);
+ 
+-	switch (skb->mark) {
+-	case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+-		if (conn->state != RXRPC_CONN_SERVICE)
+-			break;
++	if (skb) {
++		switch (skb->mark) {
++		case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
++			if (conn->state != RXRPC_CONN_SERVICE)
++				break;
+ 
+-		for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
+-			rxrpc_call_is_secure(conn->channels[loop].call);
+-		break;
++			for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
++				rxrpc_call_is_secure(conn->channels[loop].call);
++			break;
++		}
+ 	}
+ 
+ 	/* Process delayed ACKs whose time has come. */
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index 1539d315afe74a..7bc68135966e24 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -67,6 +67,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
+ 		INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
+ 		INIT_LIST_HEAD(&conn->proc_link);
+ 		INIT_LIST_HEAD(&conn->link);
++		INIT_LIST_HEAD(&conn->attend_link);
+ 		mutex_init(&conn->security_lock);
+ 		mutex_init(&conn->tx_data_alloc_lock);
+ 		skb_queue_head_init(&conn->rx_queue);
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 16d49a861dbb58..6a075a7c190db3 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -573,7 +573,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
+ 		rxrpc_propose_delay_ACK(call, sp->hdr.serial,
+ 					rxrpc_propose_ack_input_data);
+ 	}
+-	if (notify) {
++	if (notify && !test_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags)) {
+ 		trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial);
+ 		rxrpc_notify_socket(call);
+ 	}
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 23d18fe5de9f0d..154f650efb0ab6 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -654,7 +654,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ 	} else {
+ 		switch (rxrpc_call_state(call)) {
+ 		case RXRPC_CALL_CLIENT_AWAIT_CONN:
+-		case RXRPC_CALL_SERVER_SECURING:
++		case RXRPC_CALL_SERVER_RECV_REQUEST:
+ 			if (p.command == RXRPC_CMD_SEND_ABORT)
+ 				break;
+ 			fallthrough;
+diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
+index b50b2c2cc09bc6..e6bfd39ff33965 100644
+--- a/net/sched/sch_fifo.c
++++ b/net/sched/sch_fifo.c
+@@ -40,6 +40,9 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ {
+ 	unsigned int prev_backlog;
+ 
++	if (unlikely(READ_ONCE(sch->limit) == 0))
++		return qdisc_drop(skb, sch, to_free);
++
+ 	if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
+ 		return qdisc_enqueue_tail(skb, sch);
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 3b519adc01259f..68a08f6d1fbce2 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -748,9 +748,9 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ 				if (err != NET_XMIT_SUCCESS) {
+ 					if (net_xmit_drop_count(err))
+ 						qdisc_qstats_drop(sch);
+-					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
+ 					sch->qstats.backlog -= pkt_len;
+ 					sch->q.qlen--;
++					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
+ 				}
+ 				goto tfifo_dequeue;
+ 			}
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index 43c3f1c971b8fd..c524421ec65252 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -2293,8 +2293,8 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
+ 	keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
+ 
+ 	/* Verify the supplied size values */
+-	if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
+-		     keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
++	if (unlikely(keylen > TIPC_AEAD_KEY_SIZE_MAX ||
++		     size != keylen + sizeof(struct tipc_aead_key))) {
+ 		pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
+ 		goto exit;
+ 	}
+diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
+index a17ac8762d8f9f..789f80f71ca7e1 100644
+--- a/rust/kernel/init.rs
++++ b/rust/kernel/init.rs
+@@ -858,7 +858,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
+     /// use kernel::{types::Opaque, init::pin_init_from_closure};
+     /// #[repr(C)]
+     /// struct RawFoo([u8; 16]);
+-    /// extern {
++    /// extern "C" {
+     ///     fn init_foo(_: *mut RawFoo);
+     /// }
+     ///
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 1d13cecc7cc780..04faf15ed316a9 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -130,7 +130,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
+ KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+ KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
+ KBUILD_CFLAGS += -Wno-enum-compare-conditional
+-KBUILD_CFLAGS += -Wno-enum-enum-conversion
+ endif
+ 
+ endif
+@@ -154,6 +153,10 @@ KBUILD_CFLAGS += -Wno-missing-field-initializers
+ KBUILD_CFLAGS += -Wno-type-limits
+ KBUILD_CFLAGS += -Wno-shift-negative-value
+ 
++ifdef CONFIG_CC_IS_CLANG
++KBUILD_CFLAGS += -Wno-enum-enum-conversion
++endif
++
+ ifdef CONFIG_CC_IS_GCC
+ KBUILD_CFLAGS += -Wno-maybe-uninitialized
+ endif
+diff --git a/scripts/gdb/linux/cpus.py b/scripts/gdb/linux/cpus.py
+index 2f11c4f9c345a0..13eb8b3901b8fc 100644
+--- a/scripts/gdb/linux/cpus.py
++++ b/scripts/gdb/linux/cpus.py
+@@ -167,7 +167,7 @@ def get_current_task(cpu):
+             var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task")
+             return per_cpu(var_ptr, cpu).dereference()
+     elif utils.is_target_arch("aarch64"):
+-        current_task_addr = gdb.parse_and_eval("$SP_EL0")
++        current_task_addr = gdb.parse_and_eval("(unsigned long)$SP_EL0")
+         if (current_task_addr >> 63) != 0:
+             current_task = current_task_addr.cast(task_ptr_type)
+             return current_task.dereference()
+diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
+index 0d00ac3723b5e5..4fd6b6ab3e329d 100644
+--- a/scripts/generate_rust_target.rs
++++ b/scripts/generate_rust_target.rs
+@@ -165,6 +165,18 @@ fn has(&self, option: &str) -> bool {
+         let option = "CONFIG_".to_owned() + option;
+         self.0.contains_key(&option)
+     }
++
++    /// Is the rustc version at least `major.minor.patch`?
++    fn rustc_version_atleast(&self, major: u32, minor: u32, patch: u32) -> bool {
++        let check_version = 100000 * major + 100 * minor + patch;
++        let actual_version = self
++            .0
++            .get("CONFIG_RUSTC_VERSION")
++            .unwrap()
++            .parse::<u32>()
++            .unwrap();
++        check_version <= actual_version
++    }
+ }
+ 
+ fn main() {
+@@ -182,6 +194,9 @@ fn main() {
+         }
+     } else if cfg.has("X86_64") {
+         ts.push("arch", "x86_64");
++        if cfg.rustc_version_atleast(1, 86, 0) {
++            ts.push("rustc-abi", "x86-softfloat");
++        }
+         ts.push(
+             "data-layout",
+             "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
+@@ -215,6 +230,9 @@ fn main() {
+             panic!("32-bit x86 only works under UML");
+         }
+         ts.push("arch", "x86");
++        if cfg.rustc_version_atleast(1, 86, 0) {
++            ts.push("rustc-abi", "x86-softfloat");
++        }
+         ts.push(
+             "data-layout",
+             "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128",
+diff --git a/security/keys/trusted-keys/trusted_dcp.c b/security/keys/trusted-keys/trusted_dcp.c
+index e908c53a803c4b..7b6eb655df0cbf 100644
+--- a/security/keys/trusted-keys/trusted_dcp.c
++++ b/security/keys/trusted-keys/trusted_dcp.c
+@@ -201,12 +201,16 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob)
+ {
+ 	struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob;
+ 	int blen, ret;
+-	u8 plain_blob_key[AES_KEYSIZE_128];
++	u8 *plain_blob_key;
+ 
+ 	blen = calc_blob_len(p->key_len);
+ 	if (blen > MAX_BLOB_SIZE)
+ 		return -E2BIG;
+ 
++	plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL);
++	if (!plain_blob_key)
++		return -ENOMEM;
++
+ 	b->fmt_version = DCP_BLOB_VERSION;
+ 	get_random_bytes(b->nonce, AES_KEYSIZE_128);
+ 	get_random_bytes(plain_blob_key, AES_KEYSIZE_128);
+@@ -229,7 +233,8 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob)
+ 	ret = 0;
+ 
+ out:
+-	memzero_explicit(plain_blob_key, sizeof(plain_blob_key));
++	memzero_explicit(plain_blob_key, AES_KEYSIZE_128);
++	kfree(plain_blob_key);
+ 
+ 	return ret;
+ }
+@@ -238,7 +243,7 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
+ {
+ 	struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob;
+ 	int blen, ret;
+-	u8 plain_blob_key[AES_KEYSIZE_128];
++	u8 *plain_blob_key = NULL;
+ 
+ 	if (b->fmt_version != DCP_BLOB_VERSION) {
+ 		pr_err("DCP blob has bad version: %i, expected %i\n",
+@@ -256,6 +261,12 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
+ 		goto out;
+ 	}
+ 
++	plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL);
++	if (!plain_blob_key) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
+ 	ret = decrypt_blob_key(b->blob_key, plain_blob_key);
+ 	if (ret) {
+ 		pr_err("Unable to decrypt blob key: %i\n", ret);
+@@ -271,7 +282,10 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
+ 
+ 	ret = 0;
+ out:
+-	memzero_explicit(plain_blob_key, sizeof(plain_blob_key));
++	if (plain_blob_key) {
++		memzero_explicit(plain_blob_key, AES_KEYSIZE_128);
++		kfree(plain_blob_key);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
+index 25310468bcddff..8e1ffd70b18ab4 100644
+--- a/security/safesetid/securityfs.c
++++ b/security/safesetid/securityfs.c
+@@ -143,6 +143,9 @@ static ssize_t handle_policy_update(struct file *file,
+ 	char *buf, *p, *end;
+ 	int err;
+ 
++	if (len >= KMALLOC_MAX_SIZE)
++		return -EINVAL;
++
+ 	pol = kmalloc(sizeof(struct setid_ruleset), GFP_KERNEL);
+ 	if (!pol)
+ 		return -ENOMEM;
+diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
+index 5c7b059a332aac..972664962e8f67 100644
+--- a/security/tomoyo/common.c
++++ b/security/tomoyo/common.c
+@@ -2665,7 +2665,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+ 
+ 		if (head->w.avail >= head->writebuf_size - 1) {
+ 			const int len = head->writebuf_size * 2;
+-			char *cp = kzalloc(len, GFP_NOFS);
++			char *cp = kzalloc(len, GFP_NOFS | __GFP_NOWARN);
+ 
+ 			if (!cp) {
+ 				error = -ENOMEM;
+diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
+index 8e74be038b0fad..0091ab3f2bd56b 100644
+--- a/sound/pci/hda/hda_auto_parser.c
++++ b/sound/pci/hda/hda_auto_parser.c
+@@ -80,7 +80,11 @@ static int compare_input_type(const void *ap, const void *bp)
+ 
+ 	/* In case one has boost and the other one has not,
+ 	   pick the one with boost first. */
+-	return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
++	if (a->has_boost_on_pin != b->has_boost_on_pin)
++		return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
++
++	/* Keep the original order */
++	return a->order - b->order;
+ }
+ 
+ /* Reorder the surround channels
+@@ -400,6 +404,8 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
+ 	reorder_outputs(cfg->speaker_outs, cfg->speaker_pins);
+ 
+ 	/* sort inputs in the order of AUTO_PIN_* type */
++	for (i = 0; i < cfg->num_inputs; i++)
++		cfg->inputs[i].order = i;
+ 	sort(cfg->inputs, cfg->num_inputs, sizeof(cfg->inputs[0]),
+ 	     compare_input_type, NULL);
+ 
+diff --git a/sound/pci/hda/hda_auto_parser.h b/sound/pci/hda/hda_auto_parser.h
+index 579b11beac718e..87af3d8c02f7f6 100644
+--- a/sound/pci/hda/hda_auto_parser.h
++++ b/sound/pci/hda/hda_auto_parser.h
+@@ -37,6 +37,7 @@ struct auto_pin_cfg_item {
+ 	unsigned int is_headset_mic:1;
+ 	unsigned int is_headphone_mic:1; /* Mic-only in headphone jack */
+ 	unsigned int has_boost_on_pin:1;
++	int order;
+ };
+ 
+ struct auto_pin_cfg;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5d99a4ea176a15..f3f849b96402d1 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10374,6 +10374,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++	SND_PCI_QUIRK(0x103c, 0x887c, "HP Laptop 14s-fq1xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x888a, "HP ENVY x360 Convertible 15-eu0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ 	SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED),
+@@ -10873,7 +10874,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	HDA_CODEC_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x386e, "Yoga Pro 7 14ARP8", ALC285_FIXUP_SPEAKER2_TO_DAC1),
+-	HDA_CODEC_QUIRK(0x17aa, 0x386f, "Legion Pro 7 16ARX8H", ALC287_FIXUP_TAS2781_I2C),
++	HDA_CODEC_QUIRK(0x17aa, 0x38a8, "Legion Pro 7 16ARX8H", ALC287_FIXUP_TAS2781_I2C), /* this must match before PCI SSID 17aa:386f below */
+ 	SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10948,6 +10949,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1849, 0x0269, "Positivo Master C6400", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
+ 	SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1854, 0x0440, "LG CQ6", ALC256_FIXUP_HEADPHONE_AMP_VOL),
+diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
+index 6dec44f516c13f..c2a5671ba96b07 100644
+--- a/sound/soc/amd/Kconfig
++++ b/sound/soc/amd/Kconfig
+@@ -105,7 +105,7 @@ config SND_SOC_AMD_ACP6x
+ config SND_SOC_AMD_YC_MACH
+ 	tristate "AMD YC support for DMIC"
+ 	select SND_SOC_DMIC
+-	depends on SND_SOC_AMD_ACP6x
++	depends on SND_SOC_AMD_ACP6x && ACPI
+ 	help
+ 	  This option enables machine driver for Yellow Carp platform
+ 	  using dmic. ACP IP has PDM Decoder block with DMA controller.
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index ecf57a6cb7c37d..b16587d8f97a89 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -304,6 +304,34 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "83AS"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 9f2dc24d44cb54..84fc35d88b9267 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -617,9 +617,10 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "380E")
++			DMI_MATCH(DMI_PRODUCT_NAME, "83HM")
+ 		},
+-		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS |
++					SOC_SDW_CODEC_MIC),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 7a59121fc323c3..1102599403c534 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -38,7 +38,6 @@ static inline int _soc_pcm_ret(struct snd_soc_pcm_runtime *rtd,
+ 	switch (ret) {
+ 	case -EPROBE_DEFER:
+ 	case -ENOTSUPP:
+-	case -EINVAL:
+ 		break;
+ 	default:
+ 		dev_err(rtd->dev,
+@@ -1001,7 +1000,13 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
+ 	}
+ 
+ out:
+-	return soc_pcm_ret(rtd, ret);
++	/*
++	 * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
++	 *
++	 * We don't want to log an error since we do not want to give userspace a way to do a
++	 * denial-of-service attack on the syslog / diskspace.
++	 */
++	return ret;
+ }
+ 
+ /* PCM prepare ops for non-DPCM streams */
+@@ -1013,6 +1018,13 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
+ 	snd_soc_dpcm_mutex_lock(rtd);
+ 	ret = __soc_pcm_prepare(rtd, substream);
+ 	snd_soc_dpcm_mutex_unlock(rtd);
++
++	/*
++	 * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
++	 *
++	 * We don't want to log an error since we do not want to give userspace a way to do a
++	 * denial-of-service attack on the syslog / diskspace.
++	 */
+ 	return ret;
+ }
+ 
+@@ -2554,7 +2566,13 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+ 		be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+ 	}
+ 
+-	return soc_pcm_ret(fe, ret);
++	/*
++	 * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
++	 *
++	 * We don't want to log an error since we do not want to give userspace a way to do a
++	 * denial-of-service attack on the syslog / diskspace.
++	 */
++	return ret;
+ }
+ 
+ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
+@@ -2594,7 +2612,13 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
+ 	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 	snd_soc_dpcm_mutex_unlock(fe);
+ 
+-	return soc_pcm_ret(fe, ret);
++	/*
++	 * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
++	 *
++	 * We don't want to log an error since we do not want to give userspace a way to do a
++	 * denial-of-service attack on the syslog / diskspace.
++	 */
++	return ret;
+ }
+ 
+ static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 82f46ecd94301e..2e58a264da5566 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -503,6 +503,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ 	int ret;
+ 	int i;
+ 
++	if (!w) {
++		dev_err(cpu_dai->dev, "%s widget not found, check amp link num in the topology\n",
++			cpu_dai->name);
++		return -EINVAL;
++	}
++
+ 	ops = hda_dai_get_ops(substream, cpu_dai);
+ 	if (!ops) {
+ 		dev_err(cpu_dai->dev, "DAI widget ops not set\n");
+@@ -582,6 +588,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ 	 */
+ 	for_each_rtd_cpu_dais(rtd, i, dai) {
+ 		w = snd_soc_dai_get_widget(dai, substream->stream);
++		if (!w) {
++			dev_err(cpu_dai->dev,
++				"%s widget not found, check amp link num in the topology\n",
++				dai->name);
++			return -EINVAL;
++		}
+ 		ipc4_copier = widget_to_copier(w);
+ 		memcpy(&ipc4_copier->dma_config_tlv[cpu_dai_id], dma_config_tlv,
+ 		       sizeof(*dma_config_tlv));
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 70fc08c8fc99e2..f10ed4d1025016 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -63,6 +63,11 @@ static int sdw_params_stream(struct device *dev,
+ 	struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
+ 	struct snd_sof_dai_config_data data = { 0 };
+ 
++	if (!w) {
++		dev_err(dev, "%s widget not found, check amp link num in the topology\n",
++			d->name);
++		return -EINVAL;
++	}
+ 	data.dai_index = (params_data->link_id << 8) | d->id;
+ 	data.dai_data = params_data->alh_stream_id;
+ 	data.dai_node_id = data.dai_data;
+diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
+index ef5c4257844d13..20fe4f72b4afcc 100644
+--- a/tools/perf/bench/epoll-wait.c
++++ b/tools/perf/bench/epoll-wait.c
+@@ -420,7 +420,12 @@ static int cmpworker(const void *p1, const void *p2)
+ 
+ 	struct worker *w1 = (struct worker *) p1;
+ 	struct worker *w2 = (struct worker *) p2;
+-	return w1->tid > w2->tid;
++
++	if (w1->tid > w2->tid)
++		return 1;
++	if (w1->tid < w2->tid)
++		return -1;
++	return 0;
+ }
+ 
+ int bench_epoll_wait(int argc, const char **argv)
+diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
+index be4a30a0d02aef..9b44a091802cbb 100644
+--- a/tools/testing/selftests/net/ipsec.c
++++ b/tools/testing/selftests/net/ipsec.c
+@@ -227,7 +227,8 @@ static int rtattr_pack(struct nlmsghdr *nh, size_t req_sz,
+ 
+ 	attr->rta_len = RTA_LENGTH(size);
+ 	attr->rta_type = rta_type;
+-	memcpy(RTA_DATA(attr), payload, size);
++	if (payload)
++		memcpy(RTA_DATA(attr), payload, size);
+ 
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index 414addef9a4514..d240d02fa443a1 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1302,7 +1302,7 @@ int main_loop(void)
+ 		return ret;
+ 
+ 	if (cfg_truncate > 0) {
+-		xdisconnect(fd);
++		shutdown(fd, SHUT_WR);
+ 	} else if (--cfg_repeat > 0) {
+ 		xdisconnect(fd);
+ 
+diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
+index 3f2fca02fec53f..36ff28af4b1905 100644
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -102,6 +102,19 @@ struct testcase testcases_v4[] = {
+ 		.gso_len = CONST_MSS_V4,
+ 		.r_num_mss = 1,
+ 	},
++	{
++		/* datalen <= MSS < gso_len: will fall back to no GSO */
++		.tlen = CONST_MSS_V4,
++		.gso_len = CONST_MSS_V4 + 1,
++		.r_num_mss = 0,
++		.r_len_last = CONST_MSS_V4,
++	},
++	{
++		/* MSS < datalen < gso_len: fail */
++		.tlen = CONST_MSS_V4 + 1,
++		.gso_len = CONST_MSS_V4 + 2,
++		.tfail = true,
++	},
+ 	{
+ 		/* send a single MSS + 1B */
+ 		.tlen = CONST_MSS_V4 + 1,
+@@ -205,6 +218,19 @@ struct testcase testcases_v6[] = {
+ 		.gso_len = CONST_MSS_V6,
+ 		.r_num_mss = 1,
+ 	},
++	{
++		/* datalen <= MSS < gso_len: will fall back to no GSO */
++		.tlen = CONST_MSS_V6,
++		.gso_len = CONST_MSS_V6 + 1,
++		.r_num_mss = 0,
++		.r_len_last = CONST_MSS_V6,
++	},
++	{
++		/* MSS < datalen < gso_len: fail */
++		.tlen = CONST_MSS_V6 + 1,
++		.gso_len = CONST_MSS_V6 + 2,
++		.tfail = true
++	},
+ 	{
+ 		/* send a single MSS + 1B */
+ 		.tlen = CONST_MSS_V6 + 1,
+diff --git a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
+index 6f4c3f5a1c5d99..37d9bf6fb7458d 100644
+--- a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
++++ b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
+@@ -20,7 +20,7 @@ s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_select_cpu, struct task_struct *p,
+ 		 * If we dispatch to a bogus DSQ that will fall back to the
+ 		 * builtin global DSQ, we fail gracefully.
+ 		 */
+-		scx_bpf_dsq_insert_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
++		scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
+ 				       p->scx.dsq_vtime, 0);
+ 		return cpu;
+ 	}
+diff --git a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
+index e4a55027778fd0..dffc97d9cdf141 100644
+--- a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
++++ b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
+@@ -17,8 +17,8 @@ s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_select_cpu, struct task_struct *p,
+ 
+ 	if (cpu >= 0) {
+ 		/* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */
+-		scx_bpf_dsq_insert_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
+-					 p->scx.dsq_vtime, 0);
++		scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
++				       p->scx.dsq_vtime, 0);
+ 		return cpu;
+ 	}
+ 
+diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+index fbda6bf5467128..c9a2da0575a0fa 100644
+--- a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
++++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+@@ -48,7 +48,7 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
+ 	else
+ 		target = scx_bpf_task_cpu(p);
+ 
+-	scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
++	scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
+ 	bpf_task_release(p);
+ }
+ 
+diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
+index a7cf868d5e311d..1efb50d61040ad 100644
+--- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
++++ b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
+@@ -31,7 +31,7 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
+ 	/* Can only call from ops.select_cpu() */
+ 	scx_bpf_select_cpu_dfl(p, 0, 0, &found);
+ 
+-	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ SEC(".struct_ops.link")
+diff --git a/tools/testing/selftests/sched_ext/exit.bpf.c b/tools/testing/selftests/sched_ext/exit.bpf.c
+index 4bc36182d3ffc2..d75d4faf07f6d5 100644
+--- a/tools/testing/selftests/sched_ext/exit.bpf.c
++++ b/tools/testing/selftests/sched_ext/exit.bpf.c
+@@ -33,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
+ 	if (exit_point == EXIT_ENQUEUE)
+ 		EXIT_CLEANLY();
+ 
+-	scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
+@@ -41,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
+ 	if (exit_point == EXIT_DISPATCH)
+ 		EXIT_CLEANLY();
+ 
+-	scx_bpf_dsq_move_to_local(DSQ_ID);
++	scx_bpf_consume(DSQ_ID);
+ }
+ 
+ void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
+diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
+index 430f5e13bf5544..361797e10ed5d5 100644
+--- a/tools/testing/selftests/sched_ext/maximal.bpf.c
++++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
+@@ -22,7 +22,7 @@ s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
+ 
+ void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
+ {
+-	scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
+@@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
+ 
+ void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
+ {
+-	scx_bpf_dsq_move_to_local(DSQ_ID);
++	scx_bpf_consume(DSQ_ID);
+ }
+ 
+ void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
+index 13d0f5be788d12..f171ac47097060 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
+@@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
+ 	}
+ 	scx_bpf_put_idle_cpumask(idle_mask);
+ 
+-	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ SEC(".struct_ops.link")
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
+index 815f1d5d61ac43..9efdbb7da92887 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
+@@ -67,7 +67,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p,
+ 		saw_local = true;
+ 	}
+ 
+-	scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
+index 4bb99699e9209c..59bfc4f36167a7 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
+@@ -29,7 +29,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p,
+ 	cpu = prev_cpu;
+ 
+ dispatch:
+-	scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, 0);
++	scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0);
+ 	return cpu;
+ }
+ 
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
+index 2a75de11b2cfd5..3bbd5fcdfb18e0 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
+@@ -18,7 +18,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_select_cpu, struct task_struct *p
+ 		   s32 prev_cpu, u64 wake_flags)
+ {
+ 	/* Dispatching to a random DSQ should fail. */
+-	scx_bpf_dsq_insert(p, 0xcafef00d, SCX_SLICE_DFL, 0);
++	scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0);
+ 
+ 	return prev_cpu;
+ }
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
+index 99d075695c9743..0fda57fe0ecfae 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
+@@ -18,8 +18,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_select_cpu, struct task_struct *p
+ 		   s32 prev_cpu, u64 wake_flags)
+ {
+ 	/* Dispatching twice in a row is disallowed. */
+-	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+-	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
++	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
++	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+ 
+ 	return prev_cpu;
+ }
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+index bfcb96cd4954bd..e6c67bcf5e6e35 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+@@ -2,8 +2,8 @@
+ /*
+  * A scheduler that validates that enqueue flags are properly stored and
+  * applied at dispatch time when a task is directly dispatched from
+- * ops.select_cpu(). We validate this by using scx_bpf_dsq_insert_vtime(),
+- * and making the test a very basic vtime scheduler.
++ * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
++ * making the test a very basic vtime scheduler.
+  *
+  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+  * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+@@ -47,13 +47,13 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
+ 	cpu = prev_cpu;
+ 	scx_bpf_test_and_clear_cpu_idle(cpu);
+ ddsp:
+-	scx_bpf_dsq_insert_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
++	scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
+ 	return cpu;
+ }
+ 
+ void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
+ {
+-	if (scx_bpf_dsq_move_to_local(VTIME_DSQ))
++	if (scx_bpf_consume(VTIME_DSQ))
+ 		consumed = true;
+ }
+ 
+diff --git a/tools/tracing/rtla/src/osnoise.c b/tools/tracing/rtla/src/osnoise.c
+index 245e9344932bc4..699a83f538a8e8 100644
+--- a/tools/tracing/rtla/src/osnoise.c
++++ b/tools/tracing/rtla/src/osnoise.c
+@@ -867,7 +867,7 @@ int osnoise_set_workload(struct osnoise_context *context, bool onoff)
+ 
+ 	retval = osnoise_options_set_option("OSNOISE_WORKLOAD", onoff);
+ 	if (retval < 0)
+-		return -1;
++		return -2;
+ 
+ 	context->opt_workload = onoff;
+ 
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index 2cc3ffcbc983d3..4cbd2d8ebb0461 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -1091,12 +1091,15 @@ timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_hist_param
+ 		}
+ 	}
+ 
+-	if (params->user_hist) {
+-		retval = osnoise_set_workload(tool->context, 0);
+-		if (retval) {
+-			err_msg("Failed to set OSNOISE_WORKLOAD option\n");
+-			goto out_err;
+-		}
++	/*
++	* Set workload according to type of thread if the kernel supports it.
++	* On kernels without support, user threads will have already failed
++	* on missing timerlat_fd, and kernel threads do not need it.
++	*/
++	retval = osnoise_set_workload(tool->context, params->kernel_workload);
++	if (retval < -1) {
++		err_msg("Failed to set OSNOISE_WORKLOAD option\n");
++		goto out_err;
+ 	}
+ 
+ 	return 0;
+@@ -1137,9 +1140,12 @@ static struct osnoise_tool
+ }
+ 
+ static int stop_tracing;
++static struct trace_instance *hist_inst = NULL;
+ static void stop_hist(int sig)
+ {
+ 	stop_tracing = 1;
++	if (hist_inst)
++		trace_instance_stop(hist_inst);
+ }
+ 
+ /*
+@@ -1185,6 +1191,12 @@ int timerlat_hist_main(int argc, char *argv[])
+ 	}
+ 
+ 	trace = &tool->trace;
++	/*
++	 * Save trace instance into global variable so that SIGINT can stop
++	 * the timerlat tracer.
++	 * Otherwise, rtla could loop indefinitely when overloaded.
++	 */
++	hist_inst = trace;
+ 
+ 	retval = enable_timerlat(trace);
+ 	if (retval) {
+@@ -1331,7 +1343,7 @@ int timerlat_hist_main(int argc, char *argv[])
+ 
+ 	return_value = 0;
+ 
+-	if (trace_is_off(&tool->trace, &record->trace)) {
++	if (trace_is_off(&tool->trace, &record->trace) && !stop_tracing) {
+ 		printf("rtla timerlat hit stop tracing\n");
+ 
+ 		if (!params->no_aa)
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index ac2ff38a57ee55..d13be28dacd599 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -842,12 +842,15 @@ timerlat_top_apply_config(struct osnoise_tool *top, struct timerlat_top_params *
+ 		}
+ 	}
+ 
+-	if (params->user_top) {
+-		retval = osnoise_set_workload(top->context, 0);
+-		if (retval) {
+-			err_msg("Failed to set OSNOISE_WORKLOAD option\n");
+-			goto out_err;
+-		}
++	/*
++	* Set workload according to type of thread if the kernel supports it.
++	* On kernels without support, user threads will have already failed
++	* on missing timerlat_fd, and kernel threads do not need it.
++	*/
++	retval = osnoise_set_workload(top->context, params->kernel_workload);
++	if (retval < -1) {
++		err_msg("Failed to set OSNOISE_WORKLOAD option\n");
++		goto out_err;
+ 	}
+ 
+ 	if (isatty(1) && !params->quiet)
+@@ -891,9 +894,12 @@ static struct osnoise_tool
+ }
+ 
+ static int stop_tracing;
++static struct trace_instance *top_inst = NULL;
+ static void stop_top(int sig)
+ {
+ 	stop_tracing = 1;
++	if (top_inst)
++		trace_instance_stop(top_inst);
+ }
+ 
+ /*
+@@ -940,6 +946,13 @@ int timerlat_top_main(int argc, char *argv[])
+ 	}
+ 
+ 	trace = &top->trace;
++	/*
++	* Save trace instance into global variable so that SIGINT can stop
++	* the timerlat tracer.
++	* Otherwise, rtla could loop indefinitely when overloaded.
++	*/
++	top_inst = trace;
++
+ 
+ 	retval = enable_timerlat(trace);
+ 	if (retval) {
+@@ -1099,7 +1112,7 @@ int timerlat_top_main(int argc, char *argv[])
+ 
+ 	return_value = 0;
+ 
+-	if (trace_is_off(&top->trace, &record->trace)) {
++	if (trace_is_off(&top->trace, &record->trace) && !stop_tracing) {
+ 		printf("rtla timerlat hit stop tracing\n");
+ 
+ 		if (!params->no_aa)
+diff --git a/tools/tracing/rtla/src/trace.c b/tools/tracing/rtla/src/trace.c
+index 170a706248abff..440323a997c621 100644
+--- a/tools/tracing/rtla/src/trace.c
++++ b/tools/tracing/rtla/src/trace.c
+@@ -196,6 +196,14 @@ int trace_instance_start(struct trace_instance *trace)
+ 	return tracefs_trace_on(trace->inst);
+ }
+ 
++/*
++ * trace_instance_stop - stop tracing a given rtla instance
++ */
++int trace_instance_stop(struct trace_instance *trace)
++{
++	return tracefs_trace_off(trace->inst);
++}
++
+ /*
+  * trace_events_free - free a list of trace events
+  */
+diff --git a/tools/tracing/rtla/src/trace.h b/tools/tracing/rtla/src/trace.h
+index c7c92dc9a18a61..76e1b77291ba2a 100644
+--- a/tools/tracing/rtla/src/trace.h
++++ b/tools/tracing/rtla/src/trace.h
+@@ -21,6 +21,7 @@ struct trace_instance {
+ 
+ int trace_instance_init(struct trace_instance *trace, char *tool_name);
+ int trace_instance_start(struct trace_instance *trace);
++int trace_instance_stop(struct trace_instance *trace);
+ void trace_instance_destroy(struct trace_instance *trace);
+ 
+ struct trace_seq *get_trace_seq(void);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-16 21:48 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-16 21:48 UTC (permalink / raw
  To: gentoo-commits

commit:     f8e6e0a09a78ef67abed5a29f23c6a2db0d259e9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 16 21:48:06 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Feb 16 21:48:06 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f8e6e0a0

fortify: Hide run-time copy size from value range tracking

Bug: https://bugs.gentoo.org/947270

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   4 +
 ...ortify-copy-size-value-range-tracking-fix.patch | 161 +++++++++++++++++++++
 2 files changed, 165 insertions(+)

diff --git a/0000_README b/0000_README
index ceb862e7..499702fa 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-6.12.13.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.13
 
+Patch:  1500_fortify-copy-size-value-range-tracking-fix.patch
+From:   https://git.kernel.org/
+Desc:   fortify: Hide run-time copy size from value range tracking
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1500_fortify-copy-size-value-range-tracking-fix.patch b/1500_fortify-copy-size-value-range-tracking-fix.patch
new file mode 100644
index 00000000..f751e02c
--- /dev/null
+++ b/1500_fortify-copy-size-value-range-tracking-fix.patch
@@ -0,0 +1,161 @@
+From 239d87327dcd361b0098038995f8908f3296864f Mon Sep 17 00:00:00 2001
+From: Kees Cook <kees@kernel.org>
+Date: Thu, 12 Dec 2024 17:28:06 -0800
+Subject: fortify: Hide run-time copy size from value range tracking
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+GCC performs value range tracking for variables as a way to provide better
+diagnostics. One place this is regularly seen is with warnings associated
+with bounds-checking, e.g. -Wstringop-overflow, -Wstringop-overread,
+-Warray-bounds, etc. In order to keep the signal-to-noise ratio high,
+warnings aren't emitted when a value range spans the entire value range
+representable by a given variable. For example:
+
+	unsigned int len;
+	char dst[8];
+	...
+	memcpy(dst, src, len);
+
+If len's value is unknown, it has the full "unsigned int" range of [0,
+UINT_MAX], and GCC's compile-time bounds checks against memcpy() will
+be ignored. However, when a code path has been able to narrow the range:
+
+	if (len > 16)
+		return;
+	memcpy(dst, src, len);
+
+Then the range will be updated for the execution path. Above, len is
+now [0, 16] when reading memcpy(), so depending on other optimizations,
+we might see a -Wstringop-overflow warning like:
+
+	error: '__builtin_memcpy' writing between 9 and 16 bytes into region of size 8 [-Werror=stringop-overflow]
+
+When building with CONFIG_FORTIFY_SOURCE, the fortified run-time bounds
+checking can appear to narrow value ranges of lengths for memcpy(),
+depending on how the compiler constructs the execution paths during
+optimization passes, due to the checks against the field sizes. For
+example:
+
+	if (p_size_field != SIZE_MAX &&
+	    p_size != p_size_field && p_size_field < size)
+
+As intentionally designed, these checks only affect the kernel warnings
+emitted at run-time and do not block the potentially overflowing memcpy(),
+so GCC thinks it needs to produce a warning about the resulting value
+range that might be reaching the memcpy().
+
+We have seen this manifest a few times now, with the most recent being
+with cpumasks:
+
+In function ‘bitmap_copy’,
+    inlined from ‘cpumask_copy’ at ./include/linux/cpumask.h:839:2,
+    inlined from ‘__padata_set_cpumasks’ at kernel/padata.c:730:2:
+./include/linux/fortify-string.h:114:33: error: ‘__builtin_memcpy’ reading between 257 and 536870904 bytes from a region of size 256 [-Werror=stringop-overread]
+  114 | #define __underlying_memcpy     __builtin_memcpy
+      |                                 ^
+./include/linux/fortify-string.h:633:9: note: in expansion of macro ‘__underlying_memcpy’
+  633 |         __underlying_##op(p, q, __fortify_size);                        \
+      |         ^~~~~~~~~~~~~
+./include/linux/fortify-string.h:678:26: note: in expansion of macro ‘__fortify_memcpy_chk’
+  678 | #define memcpy(p, q, s)  __fortify_memcpy_chk(p, q, s,                  \
+      |                          ^~~~~~~~~~~~~~~~~~~~
+./include/linux/bitmap.h:259:17: note: in expansion of macro ‘memcpy’
+  259 |                 memcpy(dst, src, len);
+      |                 ^~~~~~
+kernel/padata.c: In function ‘__padata_set_cpumasks’:
+kernel/padata.c:713:48: note: source object ‘pcpumask’ of size [0, 256]
+  713 |                                  cpumask_var_t pcpumask,
+      |                                  ~~~~~~~~~~~~~~^~~~~~~~
+
+This warning is _not_ emitted when CONFIG_FORTIFY_SOURCE is disabled,
+and with the recent -fdiagnostics-details we can confirm the origin of
+the warning is due to FORTIFY's bounds checking:
+
+../include/linux/bitmap.h:259:17: note: in expansion of macro 'memcpy'
+  259 |                 memcpy(dst, src, len);
+      |                 ^~~~~~
+  '__padata_set_cpumasks': events 1-2
+../include/linux/fortify-string.h:613:36:
+  612 |         if (p_size_field != SIZE_MAX &&
+      |             ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  613 |             p_size != p_size_field && p_size_field < size)
+      |             ~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~
+      |                                    |
+      |                                    (1) when the condition is evaluated to false
+      |                                    (2) when the condition is evaluated to true
+  '__padata_set_cpumasks': event 3
+  114 | #define __underlying_memcpy     __builtin_memcpy
+      |                                 ^
+      |                                 |
+      |                                 (3) out of array bounds here
+
+Note that the cpumask warning started appearing since bitmap functions
+were recently marked __always_inline in commit ed8cd2b3bd9f ("bitmap:
+Switch from inline to __always_inline"), which allowed GCC to gain
+visibility into the variables as they passed through the FORTIFY
+implementation.
+
+In order to silence these false positives but keep otherwise deterministic
+compile-time warnings intact, hide the length variable from GCC with
+OPTIMIZE_HIDE_VAR() before calling the builtin memcpy.
+
+Additionally add a comment about why all the macro args have copies with
+const storage.
+
+Reported-by: "Thomas Weißschuh" <linux@weissschuh.net>
+Closes: https://lore.kernel.org/all/db7190c8-d17f-4a0d-bc2f-5903c79f36c2@t-8ch.de/
+Reported-by: Nilay Shroff <nilay@linux.ibm.com>
+Closes: https://lore.kernel.org/all/20241112124127.1666300-1-nilay@linux.ibm.com/
+Tested-by: Nilay Shroff <nilay@linux.ibm.com>
+Acked-by: Yury Norov <yury.norov@gmail.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Kees Cook <kees@kernel.org>
+---
+ include/linux/fortify-string.h | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+(limited to 'include/linux/fortify-string.h')
+
+diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
+index 0d99bf11d260a3..e4ce1cae03bf77 100644
+--- a/include/linux/fortify-string.h
++++ b/include/linux/fortify-string.h
+@@ -616,6 +616,12 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ 	return false;
+ }
+ 
++/*
++ * To work around what seems to be an optimizer bug, the macro arguments
++ * need to have const copies or the values end up changed by the time they
++ * reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture
++ * __bos() results in const temp vars") for more details.
++ */
+ #define __fortify_memcpy_chk(p, q, size, p_size, q_size,		\
+ 			     p_size_field, q_size_field, op) ({		\
+ 	const size_t __fortify_size = (size_t)(size);			\
+@@ -623,6 +629,8 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ 	const size_t __q_size = (q_size);				\
+ 	const size_t __p_size_field = (p_size_field);			\
+ 	const size_t __q_size_field = (q_size_field);			\
++	/* Keep a mutable version of the size for the final copy. */	\
++	size_t __copy_size = __fortify_size;				\
+ 	fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size,	\
+ 				     __q_size, __p_size_field,		\
+ 				     __q_size_field, FORTIFY_FUNC_ ##op), \
+@@ -630,7 +638,11 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ 		  __fortify_size,					\
+ 		  "field \"" #p "\" at " FILE_LINE,			\
+ 		  __p_size_field);					\
+-	__underlying_##op(p, q, __fortify_size);			\
++	/* Hide only the run-time size from value range tracking to */	\
++	/* silence compile-time false positive bounds warnings. */	\
++	if (!__builtin_constant_p(__copy_size))				\
++		OPTIMIZER_HIDE_VAR(__copy_size);			\
++	__underlying_##op(p, q, __copy_size);				\
+ })
+ 
+ /*
+-- 
+cgit 1.2.3-korg


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-08 11:26 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-08 11:26 UTC (permalink / raw
  To: gentoo-commits

commit:     2415ed0f24c93b7a8b46736d437efcca53246fd4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb  8 11:25:59 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb  8 11:25:59 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2415ed0f

Linux patch 6.12.13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1012_linux-6.12.13.patch | 26355 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 26359 insertions(+)

diff --git a/0000_README b/0000_README
index 17858f75..ceb862e7 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-6.12.12.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.12
 
+Patch:  1012_linux-6.12.13.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.13
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1012_linux-6.12.13.patch b/1012_linux-6.12.13.patch
new file mode 100644
index 00000000..784a2897
--- /dev/null
+++ b/1012_linux-6.12.13.patch
@@ -0,0 +1,26355 @@
+diff --git a/Documentation/core-api/symbol-namespaces.rst b/Documentation/core-api/symbol-namespaces.rst
+index 12e4aecdae9452..d1154eb438101a 100644
+--- a/Documentation/core-api/symbol-namespaces.rst
++++ b/Documentation/core-api/symbol-namespaces.rst
+@@ -68,7 +68,7 @@ is to define the default namespace in the ``Makefile`` of the subsystem. E.g. to
+ export all symbols defined in usb-common into the namespace USB_COMMON, add a
+ line like this to drivers/usb/common/Makefile::
+ 
+-	ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
++	ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
+ 
+ That will affect all EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL() statements. A
+ symbol exported with EXPORT_SYMBOL_NS() while this definition is present, will
+@@ -79,7 +79,7 @@ A second option to define the default namespace is directly in the compilation
+ unit as preprocessor statement. The above example would then read::
+ 
+ 	#undef  DEFAULT_SYMBOL_NAMESPACE
+-	#define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
++	#define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
+ 
+ within the corresponding compilation unit before any EXPORT_SYMBOL macro is
+ used.
+diff --git a/Documentation/devicetree/bindings/clock/imx93-clock.yaml b/Documentation/devicetree/bindings/clock/imx93-clock.yaml
+index ccb53c6b96c119..98c0800732ef5d 100644
+--- a/Documentation/devicetree/bindings/clock/imx93-clock.yaml
++++ b/Documentation/devicetree/bindings/clock/imx93-clock.yaml
+@@ -16,6 +16,7 @@ description: |
+ properties:
+   compatible:
+     enum:
++      - fsl,imx91-ccm
+       - fsl,imx93-ccm
+ 
+   reg:
+diff --git a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
+index e850a8894758df..bb40bb9e036ee0 100644
+--- a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
++++ b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
+@@ -27,7 +27,7 @@ properties:
+     description: |
+       For multicolor LED support this property should be defined as either
+       LED_COLOR_ID_RGB or LED_COLOR_ID_MULTI which can be found in
+-      include/linux/leds/common.h.
++      include/dt-bindings/leds/common.h.
+     enum: [ 8, 9 ]
+ 
+ required:
+diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
+index bb81307dc11b89..4fc78efaa5504a 100644
+--- a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
++++ b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
+@@ -50,15 +50,15 @@ properties:
+     minimum: 0
+     maximum: 1
+ 
+-  rohm,charger-sense-resistor-ohms:
+-    minimum: 10000000
+-    maximum: 50000000
++  rohm,charger-sense-resistor-micro-ohms:
++    minimum: 10000
++    maximum: 50000
+     description: |
+-      BD71827 and BD71828 have SAR ADC for measuring charging currents.
+-      External sense resistor (RSENSE in data sheet) should be used. If
+-      something other but 30MOhm resistor is used the resistance value
+-      should be given here in Ohms.
+-    default: 30000000
++      BD71815 has SAR ADC for measuring charging currents. External sense
++      resistor (RSENSE in data sheet) should be used. If something other
++      but a 30 mOhm resistor is used the resistance value should be given
++      here in micro Ohms.
++    default: 30000
+ 
+   regulators:
+     $ref: /schemas/regulator/rohm,bd71815-regulator.yaml
+@@ -67,7 +67,7 @@ properties:
+ 
+   gpio-reserved-ranges:
+     description: |
+-      Usage of BD71828 GPIO pins can be changed via OTP. This property can be
++      Usage of BD71815 GPIO pins can be changed via OTP. This property can be
+       used to mark the pins which should not be configured for GPIO. Please see
+       the ../gpio/gpio.txt for more information.
+ 
+@@ -113,7 +113,7 @@ examples:
+             gpio-controller;
+             #gpio-cells = <2>;
+ 
+-            rohm,charger-sense-resistor-ohms = <10000000>;
++            rohm,charger-sense-resistor-micro-ohms = <10000>;
+ 
+             regulators {
+                 buck1: buck1 {
+diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+index 58ae298cd2fcf4..23884b8184a9df 100644
+--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
++++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+@@ -25,7 +25,7 @@ properties:
+   "#address-cells":
+     const: 1
+     description: |
+-      The cell is the slot ID if a function subnode is used.
++      The cell is the SDIO function number if a function subnode is used.
+ 
+   "#size-cells":
+     const: 0
+diff --git a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
+index cd4aa27218a1b6..fa6743bb269d44 100644
+--- a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
++++ b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
+@@ -35,10 +35,6 @@ properties:
+         $ref: regulator.yaml#
+         unevaluatedProperties: false
+ 
+-        properties:
+-          regulator-compatible:
+-            pattern: "^vbuck[1-4]$"
+-
+     additionalProperties: false
+ 
+ required:
+@@ -56,7 +52,6 @@ examples:
+ 
+       regulators {
+         vbuck1 {
+-          regulator-compatible = "vbuck1";
+           regulator-min-microvolt = <300000>;
+           regulator-max-microvolt = <1193750>;
+           regulator-enable-ramp-delay = <256>;
+@@ -64,7 +59,6 @@ examples:
+         };
+ 
+         vbuck3 {
+-          regulator-compatible = "vbuck3";
+           regulator-min-microvolt = <300000>;
+           regulator-max-microvolt = <1193750>;
+           regulator-enable-ramp-delay = <256>;
+diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
+index bba40158dd5c5a..8e50b900d51c27 100644
+--- a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
++++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
+@@ -272,7 +272,7 @@ The available attributes are:
+       echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode
+ 
+     Async mode without interrupts (caller must poll) can be enabled by
+-    writing 'async' to it::
++    writing 'async' to it (please see Caveat)::
+ 
+       echo async > /sys/bus/dsa/drivers/crypto/sync_mode
+ 
+@@ -283,6 +283,13 @@ The available attributes are:
+ 
+     The default mode is 'sync'.
+ 
++    Caveat: since the only mechanism that iaa_crypto currently implements
++    for async polling without interrupts is via the 'sync' mode as
++    described earlier, writing 'async' to
++    '/sys/bus/dsa/drivers/crypto/sync_mode' will internally enable the
++    'sync' mode. This is to ensure correct iaa_crypto behavior until true
++    async polling without interrupts is enabled in iaa_crypto.
++
+ .. _iaa_default_config:
+ 
+ IAA Default Configuration
+diff --git a/Documentation/translations/it_IT/core-api/symbol-namespaces.rst b/Documentation/translations/it_IT/core-api/symbol-namespaces.rst
+index 17abc25ee4c1e4..6657f82c0101f1 100644
+--- a/Documentation/translations/it_IT/core-api/symbol-namespaces.rst
++++ b/Documentation/translations/it_IT/core-api/symbol-namespaces.rst
+@@ -69,7 +69,7 @@ Per esempio per esportare tutti i simboli definiti in usb-common nello spazio
+ dei nomi USB_COMMON, si può aggiungere la seguente linea in
+ drivers/usb/common/Makefile::
+ 
+-	ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
++	ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
+ 
+ Questo cambierà tutte le macro EXPORT_SYMBOL() ed EXPORT_SYMBOL_GPL(). Invece,
+ un simbolo esportato con EXPORT_SYMBOL_NS() non verrà cambiato e il simbolo
+@@ -79,7 +79,7 @@ Una seconda possibilità è quella di definire il simbolo di preprocessore
+ direttamente nei file da compilare. L'esempio precedente diventerebbe::
+ 
+ 	#undef  DEFAULT_SYMBOL_NAMESPACE
+-	#define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
++	#define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
+ 
+ Questo va messo prima di un qualsiasi uso di EXPORT_SYMBOL.
+ 
+diff --git a/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst b/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
+index bb16f0611046d3..f3e73834f7d7df 100644
+--- a/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
++++ b/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
+@@ -66,7 +66,7 @@
+ 子系统的 ``Makefile`` 中定义默认命名空间。例如,如果要将usb-common中定义的所有符号导
+ 出到USB_COMMON命名空间,可以在drivers/usb/common/Makefile中添加这样一行::
+ 
+-       ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
++       ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
+ 
+ 这将影响所有 EXPORT_SYMBOL() 和 EXPORT_SYMBOL_GPL() 语句。当这个定义存在时,
+ 用EXPORT_SYMBOL_NS()导出的符号仍然会被导出到作为命名空间参数传递的命名空间中,
+@@ -76,7 +76,7 @@
+ 成::
+ 
+        #undef  DEFAULT_SYMBOL_NAMESPACE
+-       #define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
++       #define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
+ 
+ 应置于相关编译单元中任何 EXPORT_SYMBOL 宏之前
+ 
+diff --git a/Makefile b/Makefile
+index 9e6246e733eb94..5442ff45f963ed 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -509,7 +509,7 @@ KGZIP		= gzip
+ KBZIP2		= bzip2
+ KLZOP		= lzop
+ LZMA		= lzma
+-LZ4		= lz4c
++LZ4		= lz4
+ XZ		= xz
+ ZSTD		= zstd
+ 
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
+index 98477792aa005a..14d17510310680 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
++++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
+@@ -284,12 +284,12 @@ &i2c10 {
+ &i2c11 {
+ 	status = "okay";
+ 	power-sensor@10 {
+-		compatible = "adi, adm1272";
++		compatible = "adi,adm1272";
+ 		reg = <0x10>;
+ 	};
+ 
+ 	power-sensor@12 {
+-		compatible = "adi, adm1272";
++		compatible = "adi,adm1272";
+ 		reg = <0x12>;
+ 	};
+ 
+@@ -461,22 +461,20 @@ adc@1f {
+ 			};
+ 
+ 			pwm@20{
+-				compatible = "max31790";
++				compatible = "maxim,max31790";
+ 				reg = <0x20>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 			};
+ 
+ 			gpio@22{
+ 				compatible = "ti,tca6424";
+ 				reg = <0x22>;
++				gpio-controller;
++				#gpio-cells = <2>;
+ 			};
+ 
+ 			pwm@23{
+-				compatible = "max31790";
++				compatible = "maxim,max31790";
+ 				reg = <0x23>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 			};
+ 
+ 			adc@33 {
+@@ -511,22 +509,20 @@ adc@1f {
+ 			};
+ 
+ 			pwm@20{
+-				compatible = "max31790";
++				compatible = "maxim,max31790";
+ 				reg = <0x20>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 			};
+ 
+ 			gpio@22{
+ 				compatible = "ti,tca6424";
+ 				reg = <0x22>;
++				gpio-controller;
++				#gpio-cells = <2>;
+ 			};
+ 
+ 			pwm@23{
+-				compatible = "max31790";
++				compatible = "maxim,max31790";
+ 				reg = <0x23>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 			};
+ 
+ 			adc@33 {
+diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
+index 6b6e77596ffa86..b108265e9bde42 100644
+--- a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
++++ b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
+@@ -440,7 +440,7 @@ gmac0: ethernet@ff800000 {
+ 			clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ 			clock-names = "stmmaceth", "ptp_ref";
+ 			resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>;
+-			reset-names = "stmmaceth", "ahb";
++			reset-names = "stmmaceth", "stmmaceth-ocp";
+ 			snps,axi-config = <&socfpga_axi_setup>;
+ 			status = "disabled";
+ 		};
+@@ -460,7 +460,7 @@ gmac1: ethernet@ff802000 {
+ 			clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ 			clock-names = "stmmaceth", "ptp_ref";
+ 			resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>;
+-			reset-names = "stmmaceth", "ahb";
++			reset-names = "stmmaceth", "stmmaceth-ocp";
+ 			snps,axi-config = <&socfpga_axi_setup>;
+ 			status = "disabled";
+ 		};
+@@ -480,7 +480,7 @@ gmac2: ethernet@ff804000 {
+ 			clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ 			clock-names = "stmmaceth", "ptp_ref";
+ 			resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>;
+-			reset-names = "stmmaceth", "ahb";
++			reset-names = "stmmaceth", "stmmaceth-ocp";
+ 			snps,axi-config = <&socfpga_axi_setup>;
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm/boot/dts/mediatek/mt7623.dtsi b/arch/arm/boot/dts/mediatek/mt7623.dtsi
+index 814586abc2979e..fd7a89cc337d69 100644
+--- a/arch/arm/boot/dts/mediatek/mt7623.dtsi
++++ b/arch/arm/boot/dts/mediatek/mt7623.dtsi
+@@ -308,7 +308,7 @@ pwrap: pwrap@1000d000 {
+ 		clock-names = "spi", "wrap";
+ 	};
+ 
+-	cir: cir@10013000 {
++	cir: ir-receiver@10013000 {
+ 		compatible = "mediatek,mt7623-cir";
+ 		reg = <0 0x10013000 0 0x1000>;
+ 		interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_LOW>;
+diff --git a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
+index 15239834d886ed..35a933eec5738f 100644
+--- a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
++++ b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
+@@ -197,6 +197,7 @@ qspi1_flash: flash@0 {
+ 
+ &sdmmc0 {
+ 	bus-width = <4>;
++	no-1-8-v;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_sdmmc0_default>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
+index 951a0c97d3c6bb..5933840bb8f7e0 100644
+--- a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
++++ b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
+@@ -514,6 +514,7 @@ kernel@200000 {
+ 
+ &sdmmc0 {
+ 	bus-width = <4>;
++	no-1-8-v;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_sdmmc0_default>;
+ 	disable-wp;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
+index 028961eb71089c..91ca23a66bf3c2 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
+@@ -135,6 +135,7 @@ vgen6_reg: vldo4 {
+ 	lm75a: temperature-sensor@48 {
+ 		compatible = "national,lm75a";
+ 		reg = <0x48>;
++		vs-supply = <&vgen4_reg>;
+ 	};
+ 
+ 	/* NXP SE97BTP with temperature sensor + eeprom, TQMa7x 02xx */
+diff --git a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
+index ddad6497775b8e..ffb7233b063d23 100644
+--- a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
+@@ -85,8 +85,8 @@ regulators {
+ 
+ 			vddcpu: buck1 { /* VDD_CPU_1V2 */
+ 				regulator-name = "vddcpu";
+-				regulator-min-microvolt = <1250000>;
+-				regulator-max-microvolt = <1250000>;
++				regulator-min-microvolt = <1350000>;
++				regulator-max-microvolt = <1350000>;
+ 				regulator-always-on;
+ 				regulator-initial-mode = <0>;
+ 				regulator-over-current-protection;
+diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi
+index 4f878ec102c1f6..fdc42a89bd37d4 100644
+--- a/arch/arm/boot/dts/st/stm32mp151.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp151.dtsi
+@@ -129,7 +129,7 @@ ipcc: mailbox@4c001000 {
+ 			reg = <0x4c001000 0x400>;
+ 			st,proc-id = <0>;
+ 			interrupts-extended =
+-				<&exti 61 1>,
++				<&exti 61 IRQ_TYPE_LEVEL_HIGH>,
+ 				<&intc GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "rx", "tx";
+ 			clocks = <&rcc IPCC>;
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
+index bb4f8a0b937f37..abe2dfe706364b 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
+@@ -6,18 +6,6 @@
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/pwm/pwm.h>
+ 
+-/ {
+-	aliases {
+-		serial0 = &uart4;
+-		serial1 = &usart3;
+-		serial2 = &uart8;
+-	};
+-
+-	chosen {
+-		stdout-path = "serial0:115200n8";
+-	};
+-};
+-
+ &adc {
+ 	status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
+index 171d7c7658fa86..0fb4e55843b9d2 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
+@@ -7,16 +7,6 @@
+ #include <dt-bindings/pwm/pwm.h>
+ 
+ / {
+-	aliases {
+-		serial0 = &uart4;
+-		serial1 = &usart3;
+-		serial2 = &uart8;
+-	};
+-
+-	chosen {
+-		stdout-path = "serial0:115200n8";
+-	};
+-
+ 	clk_ext_audio_codec: clock-codec {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
+index b5bc53accd6b2f..01c693cc03446c 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
+@@ -7,16 +7,6 @@
+ #include <dt-bindings/pwm/pwm.h>
+ 
+ / {
+-	aliases {
+-		serial0 = &uart4;
+-		serial1 = &usart3;
+-		serial2 = &uart8;
+-	};
+-
+-	chosen {
+-		stdout-path = "serial0:115200n8";
+-	};
+-
+ 	led {
+ 		compatible = "gpio-leds";
+ 
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
+index 74a11ccc5333f8..142d4a8731f8d4 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
+@@ -14,6 +14,13 @@ aliases {
+ 		ethernet1 = &ksz8851;
+ 		rtc0 = &hwrtc;
+ 		rtc1 = &rtc;
++		serial0 = &uart4;
++		serial1 = &uart8;
++		serial2 = &usart3;
++	};
++
++	chosen {
++		stdout-path = "serial0:115200n8";
+ 	};
+ 
+ 	memory@c0000000 {
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index b9b995f8a36e14..05a1547642b60f 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -598,7 +598,21 @@ static int at91_suspend_finish(unsigned long val)
+ 	return 0;
+ }
+ 
+-static void at91_pm_switch_ba_to_vbat(void)
++/**
++ * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch
++ * to automatic/hardware mode.
++ *
++ * The Backup Unit Power Switch can be managed either by software or hardware.
++ * Enabling hardware mode allows the automatic transition of power between
++ * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the
++ * availability of these power sources.
++ *
++ * If the Backup Unit Power Switch is already in automatic mode, no action is
++ * required. If it is in software-controlled mode, it is switched to automatic
++ * mode to enhance safety and eliminate the need for toggling between power
++ * sources.
++ */
++static void at91_pm_switch_ba_to_auto(void)
+ {
+ 	unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
+ 	unsigned int val;
+@@ -609,24 +623,19 @@ static void at91_pm_switch_ba_to_vbat(void)
+ 
+ 	val = readl(soc_pm.data.sfrbu + offset);
+ 
+-	/* Already on VBAT. */
+-	if (!(val & soc_pm.sfrbu_regs.pswbu.state))
++	/* Already on auto/hardware. */
++	if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl))
+ 		return;
+ 
+-	val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
+-	val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
++	val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
++	val |= soc_pm.sfrbu_regs.pswbu.key;
+ 	writel(val, soc_pm.data.sfrbu + offset);
+-
+-	/* Wait for update. */
+-	val = readl(soc_pm.data.sfrbu + offset);
+-	while (val & soc_pm.sfrbu_regs.pswbu.state)
+-		val = readl(soc_pm.data.sfrbu + offset);
+ }
+ 
+ static void at91_pm_suspend(suspend_state_t state)
+ {
+ 	if (soc_pm.data.mode == AT91_PM_BACKUP) {
+-		at91_pm_switch_ba_to_vbat();
++		at91_pm_switch_ba_to_auto();
+ 
+ 		cpu_suspend(0, at91_suspend_finish);
+ 
+diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
+index 3312ef93355da7..a5bf5554800fe1 100644
+--- a/arch/arm/mach-omap1/board-nokia770.c
++++ b/arch/arm/mach-omap1/board-nokia770.c
+@@ -289,7 +289,7 @@ static struct gpiod_lookup_table nokia770_irq_gpio_table = {
+ 		GPIO_LOOKUP("gpio-0-15", 15, "ads7846_irq",
+ 			    GPIO_ACTIVE_HIGH),
+ 		/* GPIO used for retu IRQ */
+-		GPIO_LOOKUP("gpio-48-63", 15, "retu_irq",
++		GPIO_LOOKUP("gpio-48-63", 14, "retu_irq",
+ 			    GPIO_ACTIVE_HIGH),
+ 		/* GPIO used for tahvo IRQ */
+ 		GPIO_LOOKUP("gpio-32-47", 8, "tahvo_irq",
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+index 379c2c8466f504..86d44349e09517 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+@@ -390,6 +390,8 @@ &sound {
+ &tcon0 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&lcd_rgb666_pins>;
++	assigned-clocks = <&ccu CLK_TCON0>;
++	assigned-clock-parents = <&ccu CLK_PLL_VIDEO0_2X>;
+ 
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+index b407e1dd08a737..ec055510af8b68 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+@@ -369,6 +369,8 @@ &sound {
+ &tcon0 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&lcd_rgb666_pins>;
++	assigned-clocks = <&ccu CLK_TCON0>;
++	assigned-clock-parents = <&ccu CLK_PLL_VIDEO0_2X>;
+ 
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+index a5c3920e0f048e..0fecf0abb204c7 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+@@ -445,6 +445,8 @@ tcon0: lcd-controller@1c0c000 {
+ 			clock-names = "ahb", "tcon-ch0";
+ 			clock-output-names = "tcon-data-clock";
+ 			#clock-cells = <0>;
++			assigned-clocks = <&ccu CLK_TCON0>;
++			assigned-clock-parents = <&ccu CLK_PLL_MIPI>;
+ 			resets = <&ccu RST_BUS_TCON0>, <&ccu RST_BUS_LVDS>;
+ 			reset-names = "lcd", "lvds";
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index 04b9b3d31f4faf..7bc3852c6ef8fb 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -917,7 +917,7 @@ xcvr: xcvr@42680000 {
+ 				reg-names = "ram", "regs", "rxfifo", "txfifo";
+ 				interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+ 					     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
+-				clocks = <&clk IMX93_CLK_BUS_WAKEUP>,
++				clocks = <&clk IMX93_CLK_SPDIF_IPG>,
+ 					 <&clk IMX93_CLK_SPDIF_GATE>,
+ 					 <&clk IMX93_CLK_DUMMY>,
+ 					 <&clk IMX93_CLK_AUD_XCVR_GATE>;
+diff --git a/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts b/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
+index b1ea7dcaed17dc..47234d0858dd21 100644
+--- a/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
++++ b/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
+@@ -435,7 +435,7 @@ &cp1_eth1 {
+ 	managed = "in-band-status";
+ 	phy-mode = "sgmii";
+ 	phy = <&cp1_phy0>;
+-	phys = <&cp0_comphy3 1>;
++	phys = <&cp1_comphy3 1>;
+ 	status = "okay";
+ };
+ 
+@@ -444,7 +444,7 @@ &cp1_eth2 {
+ 	managed = "in-band-status";
+ 	phy-mode = "sgmii";
+ 	phy = <&cp1_phy1>;
+-	phys = <&cp0_comphy5 2>;
++	phys = <&cp1_comphy5 2>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+index aa728331e876b7..284e240b79977f 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+@@ -129,6 +129,7 @@ i2c@11003000 {
+ 			reg = <0 0x11003000 0 0x1000>,
+ 			      <0 0x10217080 0 0x80>;
+ 			interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
++			clock-div = <1>;
+ 			clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ 				 <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ 			clock-names = "main", "dma";
+@@ -142,6 +143,7 @@ i2c@11004000 {
+ 			reg = <0 0x11004000 0 0x1000>,
+ 			      <0 0x10217100 0 0x80>;
+ 			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
++			clock-div = <1>;
+ 			clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ 				 <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ 			clock-names = "main", "dma";
+@@ -155,6 +157,7 @@ i2c@11005000 {
+ 			reg = <0 0x11005000 0 0x1000>,
+ 			      <0 0x10217180 0 0x80>;
+ 			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
++			clock-div = <1>;
+ 			clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ 				 <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ 			clock-names = "main", "dma";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+index b4d85147b77b0b..309e2d104fdc9f 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+@@ -931,7 +931,7 @@ pmic: pmic {
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+ 
+-		clock: mt6397clock {
++		clock: clocks {
+ 			compatible = "mediatek,mt6397-clk";
+ 			#clock-cells = <1>;
+ 		};
+@@ -942,11 +942,10 @@ pio6397: pinctrl {
+ 			#gpio-cells = <2>;
+ 		};
+ 
+-		regulator: mt6397regulator {
++		regulators {
+ 			compatible = "mediatek,mt6397-regulator";
+ 
+ 			mt6397_vpca15_reg: buck_vpca15 {
+-				regulator-compatible = "buck_vpca15";
+ 				regulator-name = "vpca15";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -956,7 +955,6 @@ mt6397_vpca15_reg: buck_vpca15 {
+ 			};
+ 
+ 			mt6397_vpca7_reg: buck_vpca7 {
+-				regulator-compatible = "buck_vpca7";
+ 				regulator-name = "vpca7";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -966,7 +964,6 @@ mt6397_vpca7_reg: buck_vpca7 {
+ 			};
+ 
+ 			mt6397_vsramca15_reg: buck_vsramca15 {
+-				regulator-compatible = "buck_vsramca15";
+ 				regulator-name = "vsramca15";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -975,7 +972,6 @@ mt6397_vsramca15_reg: buck_vsramca15 {
+ 			};
+ 
+ 			mt6397_vsramca7_reg: buck_vsramca7 {
+-				regulator-compatible = "buck_vsramca7";
+ 				regulator-name = "vsramca7";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -984,7 +980,6 @@ mt6397_vsramca7_reg: buck_vsramca7 {
+ 			};
+ 
+ 			mt6397_vcore_reg: buck_vcore {
+-				regulator-compatible = "buck_vcore";
+ 				regulator-name = "vcore";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -993,7 +988,6 @@ mt6397_vcore_reg: buck_vcore {
+ 			};
+ 
+ 			mt6397_vgpu_reg: buck_vgpu {
+-				regulator-compatible = "buck_vgpu";
+ 				regulator-name = "vgpu";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -1002,7 +996,6 @@ mt6397_vgpu_reg: buck_vgpu {
+ 			};
+ 
+ 			mt6397_vdrm_reg: buck_vdrm {
+-				regulator-compatible = "buck_vdrm";
+ 				regulator-name = "vdrm";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <1400000>;
+@@ -1011,7 +1004,6 @@ mt6397_vdrm_reg: buck_vdrm {
+ 			};
+ 
+ 			mt6397_vio18_reg: buck_vio18 {
+-				regulator-compatible = "buck_vio18";
+ 				regulator-name = "vio18";
+ 				regulator-min-microvolt = <1620000>;
+ 				regulator-max-microvolt = <1980000>;
+@@ -1020,18 +1012,15 @@ mt6397_vio18_reg: buck_vio18 {
+ 			};
+ 
+ 			mt6397_vtcxo_reg: ldo_vtcxo {
+-				regulator-compatible = "ldo_vtcxo";
+ 				regulator-name = "vtcxo";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_va28_reg: ldo_va28 {
+-				regulator-compatible = "ldo_va28";
+ 				regulator-name = "va28";
+ 			};
+ 
+ 			mt6397_vcama_reg: ldo_vcama {
+-				regulator-compatible = "ldo_vcama";
+ 				regulator-name = "vcama";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -1039,18 +1028,15 @@ mt6397_vcama_reg: ldo_vcama {
+ 			};
+ 
+ 			mt6397_vio28_reg: ldo_vio28 {
+-				regulator-compatible = "ldo_vio28";
+ 				regulator-name = "vio28";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_vusb_reg: ldo_vusb {
+-				regulator-compatible = "ldo_vusb";
+ 				regulator-name = "vusb";
+ 			};
+ 
+ 			mt6397_vmc_reg: ldo_vmc {
+-				regulator-compatible = "ldo_vmc";
+ 				regulator-name = "vmc";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1058,7 +1044,6 @@ mt6397_vmc_reg: ldo_vmc {
+ 			};
+ 
+ 			mt6397_vmch_reg: ldo_vmch {
+-				regulator-compatible = "ldo_vmch";
+ 				regulator-name = "vmch";
+ 				regulator-min-microvolt = <3000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1066,7 +1051,6 @@ mt6397_vmch_reg: ldo_vmch {
+ 			};
+ 
+ 			mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+-				regulator-compatible = "ldo_vemc3v3";
+ 				regulator-name = "vemc_3v3";
+ 				regulator-min-microvolt = <3000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1074,7 +1058,6 @@ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ 			};
+ 
+ 			mt6397_vgp1_reg: ldo_vgp1 {
+-				regulator-compatible = "ldo_vgp1";
+ 				regulator-name = "vcamd";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -1082,7 +1065,6 @@ mt6397_vgp1_reg: ldo_vgp1 {
+ 			};
+ 
+ 			mt6397_vgp2_reg: ldo_vgp2 {
+-				regulator-compatible = "ldo_vgp2";
+ 				regulator-name = "vcamio";
+ 				regulator-min-microvolt = <3300000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1090,7 +1072,6 @@ mt6397_vgp2_reg: ldo_vgp2 {
+ 			};
+ 
+ 			mt6397_vgp3_reg: ldo_vgp3 {
+-				regulator-compatible = "ldo_vgp3";
+ 				regulator-name = "vcamaf";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -1098,7 +1079,6 @@ mt6397_vgp3_reg: ldo_vgp3 {
+ 			};
+ 
+ 			mt6397_vgp4_reg: ldo_vgp4 {
+-				regulator-compatible = "ldo_vgp4";
+ 				regulator-name = "vgp4";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1106,7 +1086,6 @@ mt6397_vgp4_reg: ldo_vgp4 {
+ 			};
+ 
+ 			mt6397_vgp5_reg: ldo_vgp5 {
+-				regulator-compatible = "ldo_vgp5";
+ 				regulator-name = "vgp5";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3000000>;
+@@ -1114,7 +1093,6 @@ mt6397_vgp5_reg: ldo_vgp5 {
+ 			};
+ 
+ 			mt6397_vgp6_reg: ldo_vgp6 {
+-				regulator-compatible = "ldo_vgp6";
+ 				regulator-name = "vgp6";
+ 				regulator-min-microvolt = <3300000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1123,7 +1101,6 @@ mt6397_vgp6_reg: ldo_vgp6 {
+ 			};
+ 
+ 			mt6397_vibr_reg: ldo_vibr {
+-				regulator-compatible = "ldo_vibr";
+ 				regulator-name = "vibr";
+ 				regulator-min-microvolt = <1300000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1131,7 +1108,7 @@ mt6397_vibr_reg: ldo_vibr {
+ 			};
+ 		};
+ 
+-		rtc: mt6397rtc {
++		rtc: rtc {
+ 			compatible = "mediatek,mt6397-rtc";
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+index bb4671c18e3bd4..9fffed0ef4bff4 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+@@ -307,11 +307,10 @@ pmic: pmic {
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+ 
+-		mt6397regulator: mt6397regulator {
++		regulators {
+ 			compatible = "mediatek,mt6397-regulator";
+ 
+ 			mt6397_vpca15_reg: buck_vpca15 {
+-				regulator-compatible = "buck_vpca15";
+ 				regulator-name = "vpca15";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -320,7 +319,6 @@ mt6397_vpca15_reg: buck_vpca15 {
+ 			};
+ 
+ 			mt6397_vpca7_reg: buck_vpca7 {
+-				regulator-compatible = "buck_vpca7";
+ 				regulator-name = "vpca7";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -329,7 +327,6 @@ mt6397_vpca7_reg: buck_vpca7 {
+ 			};
+ 
+ 			mt6397_vsramca15_reg: buck_vsramca15 {
+-				regulator-compatible = "buck_vsramca15";
+ 				regulator-name = "vsramca15";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -338,7 +335,6 @@ mt6397_vsramca15_reg: buck_vsramca15 {
+ 			};
+ 
+ 			mt6397_vsramca7_reg: buck_vsramca7 {
+-				regulator-compatible = "buck_vsramca7";
+ 				regulator-name = "vsramca7";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -347,7 +343,6 @@ mt6397_vsramca7_reg: buck_vsramca7 {
+ 			};
+ 
+ 			mt6397_vcore_reg: buck_vcore {
+-				regulator-compatible = "buck_vcore";
+ 				regulator-name = "vcore";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -356,7 +351,6 @@ mt6397_vcore_reg: buck_vcore {
+ 			};
+ 
+ 			mt6397_vgpu_reg: buck_vgpu {
+-				regulator-compatible = "buck_vgpu";
+ 				regulator-name = "vgpu";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -365,7 +359,6 @@ mt6397_vgpu_reg: buck_vgpu {
+ 			};
+ 
+ 			mt6397_vdrm_reg: buck_vdrm {
+-				regulator-compatible = "buck_vdrm";
+ 				regulator-name = "vdrm";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <1400000>;
+@@ -374,7 +367,6 @@ mt6397_vdrm_reg: buck_vdrm {
+ 			};
+ 
+ 			mt6397_vio18_reg: buck_vio18 {
+-				regulator-compatible = "buck_vio18";
+ 				regulator-name = "vio18";
+ 				regulator-min-microvolt = <1620000>;
+ 				regulator-max-microvolt = <1980000>;
+@@ -383,19 +375,16 @@ mt6397_vio18_reg: buck_vio18 {
+ 			};
+ 
+ 			mt6397_vtcxo_reg: ldo_vtcxo {
+-				regulator-compatible = "ldo_vtcxo";
+ 				regulator-name = "vtcxo";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_va28_reg: ldo_va28 {
+-				regulator-compatible = "ldo_va28";
+ 				regulator-name = "va28";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_vcama_reg: ldo_vcama {
+-				regulator-compatible = "ldo_vcama";
+ 				regulator-name = "vcama";
+ 				regulator-min-microvolt = <1500000>;
+ 				regulator-max-microvolt = <2800000>;
+@@ -403,18 +392,15 @@ mt6397_vcama_reg: ldo_vcama {
+ 			};
+ 
+ 			mt6397_vio28_reg: ldo_vio28 {
+-				regulator-compatible = "ldo_vio28";
+ 				regulator-name = "vio28";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_vusb_reg: ldo_vusb {
+-				regulator-compatible = "ldo_vusb";
+ 				regulator-name = "vusb";
+ 			};
+ 
+ 			mt6397_vmc_reg: ldo_vmc {
+-				regulator-compatible = "ldo_vmc";
+ 				regulator-name = "vmc";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -422,7 +408,6 @@ mt6397_vmc_reg: ldo_vmc {
+ 			};
+ 
+ 			mt6397_vmch_reg: ldo_vmch {
+-				regulator-compatible = "ldo_vmch";
+ 				regulator-name = "vmch";
+ 				regulator-min-microvolt = <3000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -430,7 +415,6 @@ mt6397_vmch_reg: ldo_vmch {
+ 			};
+ 
+ 			mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+-				regulator-compatible = "ldo_vemc3v3";
+ 				regulator-name = "vemc_3v3";
+ 				regulator-min-microvolt = <3000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -438,7 +422,6 @@ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ 			};
+ 
+ 			mt6397_vgp1_reg: ldo_vgp1 {
+-				regulator-compatible = "ldo_vgp1";
+ 				regulator-name = "vcamd";
+ 				regulator-min-microvolt = <1220000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -446,7 +429,6 @@ mt6397_vgp1_reg: ldo_vgp1 {
+ 			};
+ 
+ 			mt6397_vgp2_reg: ldo_vgp2 {
+-				regulator-compatible = "ldo_vgp2";
+ 				regulator-name = "vcamio";
+ 				regulator-min-microvolt = <1000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -454,7 +436,6 @@ mt6397_vgp2_reg: ldo_vgp2 {
+ 			};
+ 
+ 			mt6397_vgp3_reg: ldo_vgp3 {
+-				regulator-compatible = "ldo_vgp3";
+ 				regulator-name = "vcamaf";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -462,7 +443,6 @@ mt6397_vgp3_reg: ldo_vgp3 {
+ 			};
+ 
+ 			mt6397_vgp4_reg: ldo_vgp4 {
+-				regulator-compatible = "ldo_vgp4";
+ 				regulator-name = "vgp4";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -470,7 +450,6 @@ mt6397_vgp4_reg: ldo_vgp4 {
+ 			};
+ 
+ 			mt6397_vgp5_reg: ldo_vgp5 {
+-				regulator-compatible = "ldo_vgp5";
+ 				regulator-name = "vgp5";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3000000>;
+@@ -478,7 +457,6 @@ mt6397_vgp5_reg: ldo_vgp5 {
+ 			};
+ 
+ 			mt6397_vgp6_reg: ldo_vgp6 {
+-				regulator-compatible = "ldo_vgp6";
+ 				regulator-name = "vgp6";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -486,7 +464,6 @@ mt6397_vgp6_reg: ldo_vgp6 {
+ 			};
+ 
+ 			mt6397_vibr_reg: ldo_vibr {
+-				regulator-compatible = "ldo_vibr";
+ 				regulator-name = "vibr";
+ 				regulator-min-microvolt = <1300000>;
+ 				regulator-max-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
+index 65860b33c01fe8..3935d83a047e08 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
+@@ -26,6 +26,10 @@ &touchscreen {
+ 	hid-descr-addr = <0x0001>;
+ };
+ 
++&mt6358codec {
++	mediatek,dmic-mode = <1>; /* one-wire */
++};
++
+ &qca_wifi {
+ 	qcom,ath10k-calibration-variant = "GO_DAMU";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
+index e8241587949b2b..561770fcf69e66 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
+@@ -12,3 +12,18 @@ / {
+ 	chassis-type = "laptop";
+ 	compatible = "google,juniper-sku17", "google,juniper", "mediatek,mt8183";
+ };
++
++&i2c0 {
++	touchscreen@40 {
++		compatible = "hid-over-i2c";
++		reg = <0x40>;
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&touchscreen_pins>;
++
++		interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
++
++		post-power-on-delay-ms = <70>;
++		hid-descr-addr = <0x0001>;
++	};
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
+index 76d33540166f90..c942e461a177ef 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
+@@ -6,6 +6,21 @@
+ /dts-v1/;
+ #include "mt8183-kukui-jacuzzi.dtsi"
+ 
++&i2c0 {
++	touchscreen@40 {
++		compatible = "hid-over-i2c";
++		reg = <0x40>;
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&touchscreen_pins>;
++
++		interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
++
++		post-power-on-delay-ms = <70>;
++		hid-descr-addr = <0x0001>;
++	};
++};
++
+ &i2c2 {
+ 	trackpad@2c {
+ 		compatible = "hid-over-i2c";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+index 49e053b932e76c..80888bd4ad823d 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+@@ -39,8 +39,6 @@ pp1800_mipibrdg: pp1800-mipibrdg {
+ 	pp3300_panel: pp3300-panel {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "pp3300_panel";
+-		regulator-min-microvolt = <3300000>;
+-		regulator-max-microvolt = <3300000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pp3300_panel_pins>;
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 0a6578aacf8280..9cd5e0cef02a29 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1024,7 +1024,8 @@ pwrap: pwrap@1000d000 {
+ 		};
+ 
+ 		keyboard: keyboard@10010000 {
+-			compatible = "mediatek,mt6779-keypad";
++			compatible = "mediatek,mt8183-keypad",
++				     "mediatek,mt6779-keypad";
+ 			reg = <0 0x10010000 0 0x1000>;
+ 			interrupts = <GIC_SPI 186 IRQ_TYPE_EDGE_FALLING>;
+ 			clocks = <&clk26m>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+index 148c332018b0d8..ac34ba3afacb05 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+@@ -1570,6 +1570,8 @@ ssusb0: usb@11201000 {
+ 			#address-cells = <2>;
+ 			#size-cells = <2>;
+ 			ranges;
++			wakeup-source;
++			mediatek,syscon-wakeup = <&pericfg 0x420 2>;
+ 			status = "disabled";
+ 
+ 			usb_host0: usb@11200000 {
+@@ -1583,8 +1585,6 @@ usb_host0: usb@11200000 {
+ 					 <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_XHCI>;
+ 				clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ 				interrupts = <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH 0>;
+-				mediatek,syscon-wakeup = <&pericfg 0x420 2>;
+-				wakeup-source;
+ 				status = "disabled";
+ 			};
+ 		};
+@@ -1636,6 +1636,8 @@ ssusb1: usb@11281000 {
+ 			#address-cells = <2>;
+ 			#size-cells = <2>;
+ 			ranges;
++			wakeup-source;
++			mediatek,syscon-wakeup = <&pericfg 0x424 2>;
+ 			status = "disabled";
+ 
+ 			usb_host1: usb@11280000 {
+@@ -1649,8 +1651,6 @@ usb_host1: usb@11280000 {
+ 					 <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_XHCI>;
+ 				clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck","xhci_ck";
+ 				interrupts = <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH 0>;
+-				mediatek,syscon-wakeup = <&pericfg 0x424 2>;
+-				wakeup-source;
+ 				status = "disabled";
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+index 08d71ddf36683e..ad52c1d6e4eef7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+@@ -1420,7 +1420,6 @@ mt6315_6: pmic@6 {
+ 
+ 		regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -1430,7 +1429,6 @@ mt6315_6_vbuck1: vbuck1 {
+ 			};
+ 
+ 			mt6315_6_vbuck3: vbuck3 {
+-				regulator-compatible = "vbuck3";
+ 				regulator-name = "Vlcpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -1447,7 +1445,6 @@ mt6315_7: pmic@7 {
+ 
+ 		regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <800000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+index 2c7b2223ee76b1..5056e07399e23a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+@@ -1285,7 +1285,6 @@ mt6315@6 {
+ 
+ 		regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -1303,7 +1302,6 @@ mt6315@7 {
+ 
+ 		regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+index 31d424b8fc7ced..bfb75296795c39 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+@@ -137,7 +137,6 @@ charger {
+ 			richtek,vinovp-microvolt = <14500000>;
+ 
+ 			otg_vbus_regulator: usb-otg-vbus-regulator {
+-				regulator-compatible = "usb-otg-vbus";
+ 				regulator-name = "usb-otg-vbus";
+ 				regulator-min-microvolt = <4425000>;
+ 				regulator-max-microvolt = <5825000>;
+@@ -149,7 +148,6 @@ regulator {
+ 			LDO_VIN3-supply = <&mt6360_buck2>;
+ 
+ 			mt6360_buck1: buck1 {
+-				regulator-compatible = "BUCK1";
+ 				regulator-name = "mt6360,buck1";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1300000>;
+@@ -160,7 +158,6 @@ MT6360_OPMODE_LP
+ 			};
+ 
+ 			mt6360_buck2: buck2 {
+-				regulator-compatible = "BUCK2";
+ 				regulator-name = "mt6360,buck2";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1300000>;
+@@ -171,7 +168,6 @@ MT6360_OPMODE_LP
+ 			};
+ 
+ 			mt6360_ldo1: ldo1 {
+-				regulator-compatible = "LDO1";
+ 				regulator-name = "mt6360,ldo1";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -180,7 +176,6 @@ mt6360_ldo1: ldo1 {
+ 			};
+ 
+ 			mt6360_ldo2: ldo2 {
+-				regulator-compatible = "LDO2";
+ 				regulator-name = "mt6360,ldo2";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -189,7 +184,6 @@ mt6360_ldo2: ldo2 {
+ 			};
+ 
+ 			mt6360_ldo3: ldo3 {
+-				regulator-compatible = "LDO3";
+ 				regulator-name = "mt6360,ldo3";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -198,7 +192,6 @@ mt6360_ldo3: ldo3 {
+ 			};
+ 
+ 			mt6360_ldo5: ldo5 {
+-				regulator-compatible = "LDO5";
+ 				regulator-name = "mt6360,ldo5";
+ 				regulator-min-microvolt = <2700000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -207,7 +200,6 @@ mt6360_ldo5: ldo5 {
+ 			};
+ 
+ 			mt6360_ldo6: ldo6 {
+-				regulator-compatible = "LDO6";
+ 				regulator-name = "mt6360,ldo6";
+ 				regulator-min-microvolt = <500000>;
+ 				regulator-max-microvolt = <2100000>;
+@@ -216,7 +208,6 @@ mt6360_ldo6: ldo6 {
+ 			};
+ 
+ 			mt6360_ldo7: ldo7 {
+-				regulator-compatible = "LDO7";
+ 				regulator-name = "mt6360,ldo7";
+ 				regulator-min-microvolt = <500000>;
+ 				regulator-max-microvolt = <2100000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index ade685ed2190b7..f013dbad9dc4ea 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -1611,9 +1611,6 @@ pcie1: pcie@112f8000 {
+ 			phy-names = "pcie-phy";
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_PCIE_MAC_P1>;
+ 
+-			resets = <&infracfg_ao MT8195_INFRA_RST2_PCIE_P1_SWRST>;
+-			reset-names = "mac";
+-
+ 			#interrupt-cells = <1>;
+ 			interrupt-map-mask = <0 0 0 7>;
+ 			interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+@@ -3138,7 +3135,7 @@ larb20: larb@1b010000 {
+ 		};
+ 
+ 		ovl0: ovl@1c000000 {
+-			compatible = "mediatek,mt8195-disp-ovl", "mediatek,mt8183-disp-ovl";
++			compatible = "mediatek,mt8195-disp-ovl";
+ 			reg = <0 0x1c000000 0 0x1000>;
+ 			interrupts = <GIC_SPI 636 IRQ_TYPE_LEVEL_HIGH 0>;
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8365.dtsi b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
+index 9c91fe8ea0f969..2bf8c9d02b6ee7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8365.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
+@@ -449,7 +449,8 @@ pwrap: pwrap@1000d000 {
+ 		};
+ 
+ 		keypad: keypad@10010000 {
+-			compatible = "mediatek,mt6779-keypad";
++			compatible = "mediatek,mt8365-keypad",
++				     "mediatek,mt6779-keypad";
+ 			reg = <0 0x10010000 0 0x1000>;
+ 			wakeup-source;
+ 			interrupts = <GIC_SPI 124 IRQ_TYPE_EDGE_FALLING>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+index b4b48eb93f3c54..6f34b06a0359a7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+@@ -820,7 +820,6 @@ mt6315_6: pmic@6 {
+ 
+ 		regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -837,7 +836,6 @@ mt6315_7: pmic@7 {
+ 
+ 		regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1193750>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+index 14ec970c4e491f..41dc34837b02e7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+@@ -812,7 +812,6 @@ mt6315_6: pmic@6 {
+ 
+ 		regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -829,7 +828,6 @@ mt6315_7: pmic@7 {
+ 
+ 		regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1193750>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
+index d0b03dc4d3f43a..e30623ebac0e1b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8516.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
+@@ -144,10 +144,10 @@ reserved-memory {
+ 		#size-cells = <2>;
+ 		ranges;
+ 
+-		/* 128 KiB reserved for ARM Trusted Firmware (BL31) */
++		/* 192 KiB reserved for ARM Trusted Firmware (BL31) */
+ 		bl31_secmon_reserved: secmon@43000000 {
+ 			no-map;
+-			reg = <0 0x43000000 0 0x20000>;
++			reg = <0 0x43000000 0 0x30000>;
+ 		};
+ 	};
+ 
+@@ -206,7 +206,7 @@ watchdog@10007000 {
+ 			compatible = "mediatek,mt8516-wdt",
+ 				     "mediatek,mt6589-wdt";
+ 			reg = <0 0x10007000 0 0x1000>;
+-			interrupts = <GIC_SPI 198 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
+ 			#reset-cells = <1>;
+ 		};
+ 
+@@ -268,7 +268,7 @@ gic: interrupt-controller@10310000 {
+ 			interrupt-parent = <&gic>;
+ 			interrupt-controller;
+ 			reg = <0 0x10310000 0 0x1000>,
+-			      <0 0x10320000 0 0x1000>,
++			      <0 0x1032f000 0 0x2000>,
+ 			      <0 0x10340000 0 0x2000>,
+ 			      <0 0x10360000 0 0x2000>;
+ 			interrupts = <GIC_PPI 9
+@@ -344,6 +344,7 @@ i2c0: i2c@11009000 {
+ 			reg = <0 0x11009000 0 0x90>,
+ 			      <0 0x11000180 0 0x80>;
+ 			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
++			clock-div = <2>;
+ 			clocks = <&topckgen CLK_TOP_I2C0>,
+ 				 <&topckgen CLK_TOP_APDMA>;
+ 			clock-names = "main", "dma";
+@@ -358,6 +359,7 @@ i2c1: i2c@1100a000 {
+ 			reg = <0 0x1100a000 0 0x90>,
+ 			      <0 0x11000200 0 0x80>;
+ 			interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
++			clock-div = <2>;
+ 			clocks = <&topckgen CLK_TOP_I2C1>,
+ 				 <&topckgen CLK_TOP_APDMA>;
+ 			clock-names = "main", "dma";
+@@ -372,6 +374,7 @@ i2c2: i2c@1100b000 {
+ 			reg = <0 0x1100b000 0 0x90>,
+ 			      <0 0x11000280 0 0x80>;
+ 			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>;
++			clock-div = <2>;
+ 			clocks = <&topckgen CLK_TOP_I2C2>,
+ 				 <&topckgen CLK_TOP_APDMA>;
+ 			clock-names = "main", "dma";
+diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+index ec8dfb3d1c6d69..a356db5fcc5f3c 100644
+--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
++++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+@@ -47,7 +47,6 @@ key-volume-down {
+ };
+ 
+ &i2c0 {
+-	clock-div = <2>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c0_pins_a>;
+ 	status = "okay";
+@@ -156,7 +155,6 @@ cam-pwdn-hog {
+ };
+ 
+ &i2c2 {
+-	clock-div = <2>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c2_pins_a>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 984c85eab41afd..570331baa09ee3 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -3900,7 +3900,7 @@ spi@c260000 {
+ 			assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLP_OUT0>;
+ 			resets = <&bpmp TEGRA234_RESET_SPI2>;
+ 			reset-names = "spi";
+-			dmas = <&gpcdma 19>, <&gpcdma 19>;
++			dmas = <&gpcdma 16>, <&gpcdma 16>;
+ 			dma-names = "rx", "tx";
+ 			dma-coherent;
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
+index ae002c7cf1268a..b13c169ec70d26 100644
+--- a/arch/arm64/boot/dts/qcom/Makefile
++++ b/arch/arm64/boot/dts/qcom/Makefile
+@@ -207,6 +207,9 @@ dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r1.dtb
+ dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r2.dtb
+ dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r3.dtb
+ dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c.dtb
++
++sdm845-db845c-navigation-mezzanine-dtbs	:= sdm845-db845c.dtb sdm845-db845c-navigation-mezzanine.dtbo
++
+ dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c-navigation-mezzanine.dtb
+ dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyln.dtb
+ dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyp.dtb
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 0ee44706b70ba3..800bfe83dbf837 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -125,7 +125,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index 7af210789879af..effa3aaeb25054 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -34,7 +34,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+index fc2a7f13f690ee..8a7de1dba2b9d0 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+@@ -34,7 +34,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 			clock-output-names = "sleep_clk";
+ 		};
+ 	};
+@@ -437,6 +437,15 @@ usb3: usb@f92f8800 {
+ 			#size-cells = <1>;
+ 			ranges;
+ 
++			interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "pwr_event",
++					  "qusb2_phy",
++					  "hs_phy_irq",
++					  "ss_phy_irq";
++
+ 			clocks = <&gcc GCC_USB30_MASTER_CLK>,
+ 				 <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ 				 <&gcc GCC_USB30_SLEEP_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+index f8e9d90afab000..dbad8f57f2fa34 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
++++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+@@ -64,7 +64,7 @@ led@0 {
+ 		};
+ 
+ 		led@1 {
+-			reg = <0>;
++			reg = <1>;
+ 			chan-name = "button-backlight1";
+ 			led-cur = /bits/ 8 <0x32>;
+ 			max-cur = /bits/ 8 <0xc8>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index e5966724f37c69..0a8884145865d6 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -3065,9 +3065,14 @@ usb3: usb@6af8800 {
+ 			#size-cells = <1>;
+ 			ranges;
+ 
+-			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
++			interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "hs_phy_irq", "ss_phy_irq";
++			interrupt-names = "pwr_event",
++					  "qusb2_phy",
++					  "hs_phy_irq",
++					  "ss_phy_irq";
+ 
+ 			clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ 				 <&gcc GCC_USB30_MASTER_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
+index 4667e47a74bc5b..75930f95769663 100644
+--- a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
++++ b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
+@@ -942,8 +942,6 @@ &usb_1_hsphy {
+ 
+ 	qcom,squelch-detector-bp = <(-2090)>;
+ 
+-	orientation-switch;
+-
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+index cddc16bac0cea4..81a161c0cc5a82 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+@@ -28,7 +28,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi b/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
+index f6960e2d466a26..e6ac529e6b7216 100644
+--- a/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
+@@ -367,7 +367,7 @@ &pm8550b_eusb2_repeater {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &ufs_mem_hc {
+diff --git a/arch/arm64/boot/dts/qcom/qdu1000-idp.dts b/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
+index e65305f8136c88..c73eda75faf820 100644
+--- a/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
++++ b/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
+@@ -31,7 +31,7 @@ xo_board: xo-board-clk {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+index 1888d99d398b11..f99fb9159e0b68 100644
+--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
++++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+@@ -545,7 +545,7 @@ can@0 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &tlmm {
+diff --git a/arch/arm64/boot/dts/qcom/qru1000-idp.dts b/arch/arm64/boot/dts/qcom/qru1000-idp.dts
+index 1c781d9e24cf4d..52ce51e56e2fdc 100644
+--- a/arch/arm64/boot/dts/qcom/qru1000-idp.dts
++++ b/arch/arm64/boot/dts/qcom/qru1000-idp.dts
+@@ -31,7 +31,7 @@ xo_board: xo-board-clk {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
+index 0c1b21def4b62c..adb71aeff339b5 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
+@@ -517,7 +517,7 @@ &serdes1 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32764>;
++	clock-frequency = <32000>;
+ };
+ 
+ &spi16 {
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-firmware-tfa.dtsi b/arch/arm64/boot/dts/qcom/sc7180-firmware-tfa.dtsi
+index ee35a454dbf6f3..59162b3afcb841 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-firmware-tfa.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-firmware-tfa.dtsi
+@@ -6,82 +6,82 @@
+  * by Qualcomm firmware.
+  */
+ 
+-&CPU0 {
++&cpu0 {
+ 	/delete-property/ power-domains;
+ 	/delete-property/ power-domain-names;
+ 
+-	cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+-			   &LITTLE_CPU_SLEEP_1
+-			   &CLUSTER_SLEEP_0>;
++	cpu-idle-states = <&little_cpu_sleep_0
++			   &little_cpu_sleep_1
++			   &cluster_sleep_0>;
+ };
+ 
+-&CPU1 {
++&cpu1 {
+ 	/delete-property/ power-domains;
+ 	/delete-property/ power-domain-names;
+ 
+-	cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+-			   &LITTLE_CPU_SLEEP_1
+-			   &CLUSTER_SLEEP_0>;
++	cpu-idle-states = <&little_cpu_sleep_0
++			   &little_cpu_sleep_1
++			   &cluster_sleep_0>;
+ };
+ 
+-&CPU2 {
++&cpu2 {
+ 	/delete-property/ power-domains;
+ 	/delete-property/ power-domain-names;
+ 
+-	cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+-			   &LITTLE_CPU_SLEEP_1
+-			   &CLUSTER_SLEEP_0>;
++	cpu-idle-states = <&little_cpu_sleep_0
++			   &little_cpu_sleep_1
++			   &cluster_sleep_0>;
+ };
+ 
+-&CPU3 {
++&cpu3 {
+ 	/delete-property/ power-domains;
+ 	/delete-property/ power-domain-names;
+ 
+-	cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+-			   &LITTLE_CPU_SLEEP_1
+-			   &CLUSTER_SLEEP_0>;
++	cpu-idle-states = <&little_cpu_sleep_0
++			   &little_cpu_sleep_1
++			   &cluster_sleep_0>;
+ };
+ 
+-&CPU4 {
++&cpu4 {
+ 	/delete-property/ power-domains;
+ 	/delete-property/ power-domain-names;
+ 
+-	cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+-			   &LITTLE_CPU_SLEEP_1
+-			   &CLUSTER_SLEEP_0>;
++	cpu-idle-states = <&little_cpu_sleep_0
++			   &little_cpu_sleep_1
++			   &cluster_sleep_0>;
+ };
+ 
+-&CPU5 {
++&cpu5 {
+ 	/delete-property/ power-domains;
+ 	/delete-property/ power-domain-names;
+ 
+-	cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+-			   &LITTLE_CPU_SLEEP_1
+-			   &CLUSTER_SLEEP_0>;
++	cpu-idle-states = <&little_cpu_sleep_0
++			   &little_cpu_sleep_1
++			   &cluster_sleep_0>;
+ };
+ 
+-&CPU6 {
++&cpu6 {
+ 	/delete-property/ power-domains;
+ 	/delete-property/ power-domain-names;
+ 
+-	cpu-idle-states = <&BIG_CPU_SLEEP_0
+-			   &BIG_CPU_SLEEP_1
+-			   &CLUSTER_SLEEP_0>;
++	cpu-idle-states = <&big_cpu_sleep_0
++			   &big_cpu_sleep_1
++			   &cluster_sleep_0>;
+ };
+ 
+-&CPU7 {
++&cpu7 {
+ 	/delete-property/ power-domains;
+ 	/delete-property/ power-domain-names;
+ 
+-	cpu-idle-states = <&BIG_CPU_SLEEP_0
+-			   &BIG_CPU_SLEEP_1
+-			   &CLUSTER_SLEEP_0>;
++	cpu-idle-states = <&big_cpu_sleep_0
++			   &big_cpu_sleep_1
++			   &cluster_sleep_0>;
+ };
+ 
+ /delete-node/ &domain_idle_states;
+ 
+ &idle_states {
+-	CLUSTER_SLEEP_0: cluster-sleep-0 {
++	cluster_sleep_0: cluster-sleep-0 {
+ 		compatible = "arm,idle-state";
+ 		idle-state-name = "cluster-power-down";
+ 		arm,psci-suspend-param = <0x40003444>;
+@@ -92,15 +92,15 @@ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ 	};
+ };
+ 
+-/delete-node/ &CPU_PD0;
+-/delete-node/ &CPU_PD1;
+-/delete-node/ &CPU_PD2;
+-/delete-node/ &CPU_PD3;
+-/delete-node/ &CPU_PD4;
+-/delete-node/ &CPU_PD5;
+-/delete-node/ &CPU_PD6;
+-/delete-node/ &CPU_PD7;
+-/delete-node/ &CLUSTER_PD;
++/delete-node/ &cpu_pd0;
++/delete-node/ &cpu_pd1;
++/delete-node/ &cpu_pd2;
++/delete-node/ &cpu_pd3;
++/delete-node/ &cpu_pd4;
++/delete-node/ &cpu_pd5;
++/delete-node/ &cpu_pd6;
++/delete-node/ &cpu_pd7;
++/delete-node/ &cluster_pd;
+ 
+ &apps_rsc {
+ 	/delete-property/ power-domains;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
+index 3c124bbe2f4c94..25b17b0425f24e 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
+@@ -53,14 +53,14 @@ skin-temp-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&skin_temp_alert0>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 
+ 				map1 {
+ 					trip = <&skin_temp_alert1>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
+index b2df22faafe889..f57976906d6304 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
+@@ -71,14 +71,14 @@ skin-temp-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&skin_temp_alert0>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 
+ 				map1 {
+ 					trip = <&skin_temp_alert1>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+index ac8d4589e3fb74..f7300ffbb4519a 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+@@ -12,11 +12,11 @@
+ 
+ / {
+ 	thermal-zones {
+-		5v-choke-thermal {
++		choke-5v-thermal {
+ 			thermal-sensors = <&pm6150_adc_tm 1>;
+ 
+ 			trips {
+-				5v-choke-crit {
++				choke-5v-crit {
+ 					temperature = <125000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
+index 00229b1515e605..ff8996b4de4e1e 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
+@@ -78,6 +78,7 @@ panel: panel@0 {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&lcd_rst>;
+ 		avdd-supply = <&ppvar_lcd>;
++		avee-supply = <&ppvar_lcd>;
+ 		pp1800-supply = <&v1p8_disp>;
+ 		pp3300-supply = <&pp3300_dx_edp>;
+ 		backlight = <&backlight>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
+index af89d80426abbd..d4925be3b1fcf5 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi
+@@ -78,14 +78,14 @@ skin-temp-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&skin_temp_alert0>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 
+ 				map1 {
+ 					trip = <&skin_temp_alert1>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index b5ebf898032512..249b257fc6a74b 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -77,28 +77,28 @@ cpus {
+ 		#address-cells = <2>;
+ 		#size-cells = <0>;
+ 
+-		CPU0: cpu@0 {
++		cpu0: cpu@0 {
+ 			device_type = "cpu";
+ 			compatible = "qcom,kryo468";
+ 			reg = <0x0 0x0>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+-			power-domains = <&CPU_PD0>;
++			power-domains = <&cpu_pd0>;
+ 			power-domain-names = "psci";
+ 			capacity-dmips-mhz = <415>;
+ 			dynamic-power-coefficient = <137>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
+ 					<&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>;
+-			next-level-cache = <&L2_0>;
++			next-level-cache = <&l2_0>;
+ 			#cooling-cells = <2>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+-			L2_0: l2-cache {
++			l2_0: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
+-				L3_0: l3-cache {
++				next-level-cache = <&l3_0>;
++				l3_0: l3-cache {
+ 					compatible = "cache";
+ 					cache-level = <3>;
+ 					cache-unified;
+@@ -106,206 +106,206 @@ L3_0: l3-cache {
+ 			};
+ 		};
+ 
+-		CPU1: cpu@100 {
++		cpu1: cpu@100 {
+ 			device_type = "cpu";
+ 			compatible = "qcom,kryo468";
+ 			reg = <0x0 0x100>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+-			power-domains = <&CPU_PD1>;
++			power-domains = <&cpu_pd1>;
+ 			power-domain-names = "psci";
+ 			capacity-dmips-mhz = <415>;
+ 			dynamic-power-coefficient = <137>;
+-			next-level-cache = <&L2_100>;
++			next-level-cache = <&l2_100>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
+ 					<&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>;
+ 			#cooling-cells = <2>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+-			L2_100: l2-cache {
++			l2_100: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU2: cpu@200 {
++		cpu2: cpu@200 {
+ 			device_type = "cpu";
+ 			compatible = "qcom,kryo468";
+ 			reg = <0x0 0x200>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+-			power-domains = <&CPU_PD2>;
++			power-domains = <&cpu_pd2>;
+ 			power-domain-names = "psci";
+ 			capacity-dmips-mhz = <415>;
+ 			dynamic-power-coefficient = <137>;
+-			next-level-cache = <&L2_200>;
++			next-level-cache = <&l2_200>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
+ 					<&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>;
+ 			#cooling-cells = <2>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+-			L2_200: l2-cache {
++			l2_200: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU3: cpu@300 {
++		cpu3: cpu@300 {
+ 			device_type = "cpu";
+ 			compatible = "qcom,kryo468";
+ 			reg = <0x0 0x300>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+-			power-domains = <&CPU_PD3>;
++			power-domains = <&cpu_pd3>;
+ 			power-domain-names = "psci";
+ 			capacity-dmips-mhz = <415>;
+ 			dynamic-power-coefficient = <137>;
+-			next-level-cache = <&L2_300>;
++			next-level-cache = <&l2_300>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
+ 					<&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>;
+ 			#cooling-cells = <2>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+-			L2_300: l2-cache {
++			l2_300: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU4: cpu@400 {
++		cpu4: cpu@400 {
+ 			device_type = "cpu";
+ 			compatible = "qcom,kryo468";
+ 			reg = <0x0 0x400>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+-			power-domains = <&CPU_PD4>;
++			power-domains = <&cpu_pd4>;
+ 			power-domain-names = "psci";
+ 			capacity-dmips-mhz = <415>;
+ 			dynamic-power-coefficient = <137>;
+-			next-level-cache = <&L2_400>;
++			next-level-cache = <&l2_400>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
+ 					<&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>;
+ 			#cooling-cells = <2>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+-			L2_400: l2-cache {
++			l2_400: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU5: cpu@500 {
++		cpu5: cpu@500 {
+ 			device_type = "cpu";
+ 			compatible = "qcom,kryo468";
+ 			reg = <0x0 0x500>;
+ 			clocks = <&cpufreq_hw 0>;
+ 			enable-method = "psci";
+-			power-domains = <&CPU_PD5>;
++			power-domains = <&cpu_pd5>;
+ 			power-domain-names = "psci";
+ 			capacity-dmips-mhz = <415>;
+ 			dynamic-power-coefficient = <137>;
+-			next-level-cache = <&L2_500>;
++			next-level-cache = <&l2_500>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
+ 					<&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>;
+ 			#cooling-cells = <2>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+-			L2_500: l2-cache {
++			l2_500: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU6: cpu@600 {
++		cpu6: cpu@600 {
+ 			device_type = "cpu";
+ 			compatible = "qcom,kryo468";
+ 			reg = <0x0 0x600>;
+ 			clocks = <&cpufreq_hw 1>;
+ 			enable-method = "psci";
+-			power-domains = <&CPU_PD6>;
++			power-domains = <&cpu_pd6>;
+ 			power-domain-names = "psci";
+ 			capacity-dmips-mhz = <1024>;
+ 			dynamic-power-coefficient = <480>;
+-			next-level-cache = <&L2_600>;
++			next-level-cache = <&l2_600>;
+ 			operating-points-v2 = <&cpu6_opp_table>;
+ 			interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
+ 					<&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>;
+ 			#cooling-cells = <2>;
+ 			qcom,freq-domain = <&cpufreq_hw 1>;
+-			L2_600: l2-cache {
++			l2_600: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+-		CPU7: cpu@700 {
++		cpu7: cpu@700 {
+ 			device_type = "cpu";
+ 			compatible = "qcom,kryo468";
+ 			reg = <0x0 0x700>;
+ 			clocks = <&cpufreq_hw 1>;
+ 			enable-method = "psci";
+-			power-domains = <&CPU_PD7>;
++			power-domains = <&cpu_pd7>;
+ 			power-domain-names = "psci";
+ 			capacity-dmips-mhz = <1024>;
+ 			dynamic-power-coefficient = <480>;
+-			next-level-cache = <&L2_700>;
++			next-level-cache = <&l2_700>;
+ 			operating-points-v2 = <&cpu6_opp_table>;
+ 			interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>,
+ 					<&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>;
+ 			#cooling-cells = <2>;
+ 			qcom,freq-domain = <&cpufreq_hw 1>;
+-			L2_700: l2-cache {
++			l2_700: l2-cache {
+ 				compatible = "cache";
+ 				cache-level = <2>;
+ 				cache-unified;
+-				next-level-cache = <&L3_0>;
++				next-level-cache = <&l3_0>;
+ 			};
+ 		};
+ 
+ 		cpu-map {
+ 			cluster0 {
+ 				core0 {
+-					cpu = <&CPU0>;
++					cpu = <&cpu0>;
+ 				};
+ 
+ 				core1 {
+-					cpu = <&CPU1>;
++					cpu = <&cpu1>;
+ 				};
+ 
+ 				core2 {
+-					cpu = <&CPU2>;
++					cpu = <&cpu2>;
+ 				};
+ 
+ 				core3 {
+-					cpu = <&CPU3>;
++					cpu = <&cpu3>;
+ 				};
+ 
+ 				core4 {
+-					cpu = <&CPU4>;
++					cpu = <&cpu4>;
+ 				};
+ 
+ 				core5 {
+-					cpu = <&CPU5>;
++					cpu = <&cpu5>;
+ 				};
+ 
+ 				core6 {
+-					cpu = <&CPU6>;
++					cpu = <&cpu6>;
+ 				};
+ 
+ 				core7 {
+-					cpu = <&CPU7>;
++					cpu = <&cpu7>;
+ 				};
+ 			};
+ 		};
+@@ -313,7 +313,7 @@ core7 {
+ 		idle_states: idle-states {
+ 			entry-method = "psci";
+ 
+-			LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
++			little_cpu_sleep_0: cpu-sleep-0-0 {
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "little-power-down";
+ 				arm,psci-suspend-param = <0x40000003>;
+@@ -323,7 +323,7 @@ LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
+ 				local-timer-stop;
+ 			};
+ 
+-			LITTLE_CPU_SLEEP_1: cpu-sleep-0-1 {
++			little_cpu_sleep_1: cpu-sleep-0-1 {
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "little-rail-power-down";
+ 				arm,psci-suspend-param = <0x40000004>;
+@@ -333,7 +333,7 @@ LITTLE_CPU_SLEEP_1: cpu-sleep-0-1 {
+ 				local-timer-stop;
+ 			};
+ 
+-			BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
++			big_cpu_sleep_0: cpu-sleep-1-0 {
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "big-power-down";
+ 				arm,psci-suspend-param = <0x40000003>;
+@@ -343,7 +343,7 @@ BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
+ 				local-timer-stop;
+ 			};
+ 
+-			BIG_CPU_SLEEP_1: cpu-sleep-1-1 {
++			big_cpu_sleep_1: cpu-sleep-1-1 {
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "big-rail-power-down";
+ 				arm,psci-suspend-param = <0x40000004>;
+@@ -355,7 +355,7 @@ BIG_CPU_SLEEP_1: cpu-sleep-1-1 {
+ 		};
+ 
+ 		domain_idle_states: domain-idle-states {
+-			CLUSTER_SLEEP_PC: cluster-sleep-0 {
++			cluster_sleep_pc: cluster-sleep-0 {
+ 				compatible = "domain-idle-state";
+ 				idle-state-name = "cluster-l3-power-collapse";
+ 				arm,psci-suspend-param = <0x41000044>;
+@@ -364,7 +364,7 @@ CLUSTER_SLEEP_PC: cluster-sleep-0 {
+ 				min-residency-us = <6118>;
+ 			};
+ 
+-			CLUSTER_SLEEP_CX_RET: cluster-sleep-1 {
++			cluster_sleep_cx_ret: cluster-sleep-1 {
+ 				compatible = "domain-idle-state";
+ 				idle-state-name = "cluster-cx-retention";
+ 				arm,psci-suspend-param = <0x41001244>;
+@@ -373,7 +373,7 @@ CLUSTER_SLEEP_CX_RET: cluster-sleep-1 {
+ 				min-residency-us = <8467>;
+ 			};
+ 
+-			CLUSTER_AOSS_SLEEP: cluster-sleep-2 {
++			cluster_aoss_sleep: cluster-sleep-2 {
+ 				compatible = "domain-idle-state";
+ 				idle-state-name = "cluster-power-down";
+ 				arm,psci-suspend-param = <0x4100b244>;
+@@ -583,59 +583,59 @@ psci {
+ 		compatible = "arm,psci-1.0";
+ 		method = "smc";
+ 
+-		CPU_PD0: cpu0 {
++		cpu_pd0: power-domain-cpu0 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		CPU_PD1: cpu1 {
++		cpu_pd1: power-domain-cpu1 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		CPU_PD2: cpu2 {
++		cpu_pd2: power-domain-cpu2 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		CPU_PD3: cpu3 {
++		cpu_pd3: power-domain-cpu3 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		CPU_PD4: cpu4 {
++		cpu_pd4: power-domain-cpu4 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		CPU_PD5: cpu5 {
++		cpu_pd5: power-domain-cpu5 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		CPU_PD6: cpu6 {
++		cpu_pd6: power-domain-cpu6 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ 		};
+ 
+-		CPU_PD7: cpu7 {
++		cpu_pd7: power-domain-cpu7 {
+ 			#power-domain-cells = <0>;
+-			power-domains = <&CLUSTER_PD>;
+-			domain-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
++			power-domains = <&cluster_pd>;
++			domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ 		};
+ 
+-		CLUSTER_PD: cpu-cluster0 {
++		cluster_pd: power-domain-cluster {
+ 			#power-domain-cells = <0>;
+-			domain-idle-states = <&CLUSTER_SLEEP_PC
+-					      &CLUSTER_SLEEP_CX_RET
+-					      &CLUSTER_AOSS_SLEEP>;
++			domain-idle-states = <&cluster_sleep_pc
++					      &cluster_sleep_cx_ret
++					      &cluster_aoss_sleep>;
+ 		};
+ 	};
+ 
+@@ -2546,7 +2546,7 @@ etm@7040000 {
+ 			compatible = "arm,coresight-etm4x", "arm,primecell";
+ 			reg = <0 0x07040000 0 0x1000>;
+ 
+-			cpu = <&CPU0>;
++			cpu = <&cpu0>;
+ 
+ 			clocks = <&aoss_qmp>;
+ 			clock-names = "apb_pclk";
+@@ -2566,7 +2566,7 @@ etm@7140000 {
+ 			compatible = "arm,coresight-etm4x", "arm,primecell";
+ 			reg = <0 0x07140000 0 0x1000>;
+ 
+-			cpu = <&CPU1>;
++			cpu = <&cpu1>;
+ 
+ 			clocks = <&aoss_qmp>;
+ 			clock-names = "apb_pclk";
+@@ -2586,7 +2586,7 @@ etm@7240000 {
+ 			compatible = "arm,coresight-etm4x", "arm,primecell";
+ 			reg = <0 0x07240000 0 0x1000>;
+ 
+-			cpu = <&CPU2>;
++			cpu = <&cpu2>;
+ 
+ 			clocks = <&aoss_qmp>;
+ 			clock-names = "apb_pclk";
+@@ -2606,7 +2606,7 @@ etm@7340000 {
+ 			compatible = "arm,coresight-etm4x", "arm,primecell";
+ 			reg = <0 0x07340000 0 0x1000>;
+ 
+-			cpu = <&CPU3>;
++			cpu = <&cpu3>;
+ 
+ 			clocks = <&aoss_qmp>;
+ 			clock-names = "apb_pclk";
+@@ -2626,7 +2626,7 @@ etm@7440000 {
+ 			compatible = "arm,coresight-etm4x", "arm,primecell";
+ 			reg = <0 0x07440000 0 0x1000>;
+ 
+-			cpu = <&CPU4>;
++			cpu = <&cpu4>;
+ 
+ 			clocks = <&aoss_qmp>;
+ 			clock-names = "apb_pclk";
+@@ -2646,7 +2646,7 @@ etm@7540000 {
+ 			compatible = "arm,coresight-etm4x", "arm,primecell";
+ 			reg = <0 0x07540000 0 0x1000>;
+ 
+-			cpu = <&CPU5>;
++			cpu = <&cpu5>;
+ 
+ 			clocks = <&aoss_qmp>;
+ 			clock-names = "apb_pclk";
+@@ -2666,7 +2666,7 @@ etm@7640000 {
+ 			compatible = "arm,coresight-etm4x", "arm,primecell";
+ 			reg = <0 0x07640000 0 0x1000>;
+ 
+-			cpu = <&CPU6>;
++			cpu = <&cpu6>;
+ 
+ 			clocks = <&aoss_qmp>;
+ 			clock-names = "apb_pclk";
+@@ -2686,7 +2686,7 @@ etm@7740000 {
+ 			compatible = "arm,coresight-etm4x", "arm,primecell";
+ 			reg = <0 0x07740000 0 0x1000>;
+ 
+-			cpu = <&CPU7>;
++			cpu = <&cpu7>;
+ 
+ 			clocks = <&aoss_qmp>;
+ 			clock-names = "apb_pclk";
+@@ -3734,7 +3734,7 @@ apps_rsc: rsc@18200000 {
+ 					  <SLEEP_TCS   3>,
+ 					  <WAKE_TCS    3>,
+ 					  <CONTROL_TCS 1>;
+-			power-domains = <&CLUSTER_PD>;
++			power-domains = <&cluster_pd>;
+ 
+ 			rpmhcc: clock-controller {
+ 				compatible = "qcom,sc7180-rpmh-clk";
+@@ -4063,21 +4063,21 @@ cpu0_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu0_alert0>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu0_alert1>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4111,21 +4111,21 @@ cpu1_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu1_alert0>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu1_alert1>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4159,21 +4159,21 @@ cpu2_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu2_alert0>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu2_alert1>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4207,21 +4207,21 @@ cpu3_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu3_alert0>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu3_alert1>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4255,21 +4255,21 @@ cpu4_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu4_alert0>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu4_alert1>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4303,21 +4303,21 @@ cpu5_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu5_alert0>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu5_alert1>;
+-					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4351,13 +4351,13 @@ cpu6_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu6_alert0>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu6_alert1>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4391,13 +4391,13 @@ cpu7_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu7_alert0>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu7_alert1>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4431,13 +4431,13 @@ cpu8_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu8_alert0>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu8_alert1>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+@@ -4471,13 +4471,13 @@ cpu9_crit: cpu-crit {
+ 			cooling-maps {
+ 				map0 {
+ 					trip = <&cpu9_alert0>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 				map1 {
+ 					trip = <&cpu9_alert1>;
+-					cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+-							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
++					cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
++							 <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 3d8410683402fd..8fbc95cf63fe7e 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -83,7 +83,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 80a57aa228397e..b1e0e51a558291 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -2725,7 +2725,7 @@ usb_2_qmpphy1: phy@88f1000 {
+ 
+ 		remoteproc_adsp: remoteproc@3000000 {
+ 			compatible = "qcom,sc8280xp-adsp-pas";
+-			reg = <0 0x03000000 0 0x100>;
++			reg = <0 0x03000000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -3882,26 +3882,26 @@ camss: camss@ac5a000 {
+ 				    "vfe3",
+ 				    "csid3";
+ 
+-			interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 758 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 759 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 760 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 761 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 762 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 764 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 640 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 641 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 758 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 759 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 760 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 761 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 762 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 764 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "csid1_lite",
+ 					  "vfe_lite1",
+ 					  "csiphy3",
+@@ -5205,7 +5205,7 @@ cpufreq_hw: cpufreq@18591000 {
+ 
+ 		remoteproc_nsp0: remoteproc@1b300000 {
+ 			compatible = "qcom,sc8280xp-nsp0-pas";
+-			reg = <0 0x1b300000 0 0x100>;
++			reg = <0 0x1b300000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_nsp0_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -5336,7 +5336,7 @@ compute-cb@14 {
+ 
+ 		remoteproc_nsp1: remoteproc@21300000 {
+ 			compatible = "qcom,sc8280xp-nsp1-pas";
+-			reg = <0 0x21300000 0 0x100>;
++			reg = <0 0x21300000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_nsp1_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dts
+deleted file mode 100644
+index a21caa6f3fa259..00000000000000
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dts
++++ /dev/null
+@@ -1,104 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * Copyright (c) 2022, Linaro Ltd.
+- */
+-
+-/dts-v1/;
+-
+-#include "sdm845-db845c.dts"
+-
+-&camss {
+-	vdda-phy-supply = <&vreg_l1a_0p875>;
+-	vdda-pll-supply = <&vreg_l26a_1p2>;
+-
+-	status = "okay";
+-
+-	ports {
+-		port@0 {
+-			csiphy0_ep: endpoint {
+-				data-lanes = <0 1 2 3>;
+-				remote-endpoint = <&ov8856_ep>;
+-			};
+-		};
+-	};
+-};
+-
+-&cci {
+-	status = "okay";
+-};
+-
+-&cci_i2c0 {
+-	camera@10 {
+-		compatible = "ovti,ov8856";
+-		reg = <0x10>;
+-
+-		/* CAM0_RST_N */
+-		reset-gpios = <&tlmm 9 GPIO_ACTIVE_LOW>;
+-		pinctrl-names = "default";
+-		pinctrl-0 = <&cam0_default>;
+-
+-		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+-		clock-names = "xvclk";
+-		clock-frequency = <19200000>;
+-
+-		/*
+-		 * The &vreg_s4a_1p8 trace is powered on as a,
+-		 * so it is represented by a fixed regulator.
+-		 *
+-		 * The 2.8V vdda-supply and 1.2V vddd-supply regulators
+-		 * both have to be enabled through the power management
+-		 * gpios.
+-		 */
+-		dovdd-supply = <&vreg_lvs1a_1p8>;
+-		avdd-supply = <&cam0_avdd_2v8>;
+-		dvdd-supply = <&cam0_dvdd_1v2>;
+-
+-		port {
+-			ov8856_ep: endpoint {
+-				link-frequencies = /bits/ 64
+-					<360000000 180000000>;
+-				data-lanes = <1 2 3 4>;
+-				remote-endpoint = <&csiphy0_ep>;
+-			};
+-		};
+-	};
+-};
+-
+-&cci_i2c1 {
+-	camera@60 {
+-		compatible = "ovti,ov7251";
+-
+-		/* I2C address as per ov7251.txt linux documentation */
+-		reg = <0x60>;
+-
+-		/* CAM3_RST_N */
+-		enable-gpios = <&tlmm 21 GPIO_ACTIVE_HIGH>;
+-		pinctrl-names = "default";
+-		pinctrl-0 = <&cam3_default>;
+-
+-		clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+-		clock-names = "xclk";
+-		clock-frequency = <24000000>;
+-
+-		/*
+-		 * The &vreg_s4a_1p8 trace always powered on.
+-		 *
+-		 * The 2.8V vdda-supply regulator is enabled when the
+-		 * vreg_s4a_1p8 trace is pulled high.
+-		 * It too is represented by a fixed regulator.
+-		 *
+-		 * No 1.2V vddd-supply regulator is used.
+-		 */
+-		vdddo-supply = <&vreg_lvs1a_1p8>;
+-		vdda-supply = <&cam3_avdd_2v8>;
+-
+-		status = "disabled";
+-
+-		port {
+-			ov7251_ep: endpoint {
+-				data-lanes = <0 1>;
+-/*				remote-endpoint = <&csiphy3_ep>; */
+-			};
+-		};
+-	};
+-};
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
+new file mode 100644
+index 00000000000000..51f1a4883ab8f0
+--- /dev/null
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
+@@ -0,0 +1,70 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (c) 2022, Linaro Ltd.
++ */
++
++/dts-v1/;
++/plugin/;
++
++#include <dt-bindings/clock/qcom,camcc-sdm845.h>
++#include <dt-bindings/gpio/gpio.h>
++
++&camss {
++	vdda-phy-supply = <&vreg_l1a_0p875>;
++	vdda-pll-supply = <&vreg_l26a_1p2>;
++
++	status = "okay";
++
++	ports {
++		port@0 {
++			csiphy0_ep: endpoint {
++				data-lanes = <0 1 2 3>;
++				remote-endpoint = <&ov8856_ep>;
++			};
++		};
++	};
++};
++
++&cci {
++	status = "okay";
++};
++
++&cci_i2c0 {
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	camera@10 {
++		compatible = "ovti,ov8856";
++		reg = <0x10>;
++
++		/* CAM0_RST_N */
++		reset-gpios = <&tlmm 9 GPIO_ACTIVE_LOW>;
++		pinctrl-names = "default";
++		pinctrl-0 = <&cam0_default>;
++
++		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
++		clock-names = "xvclk";
++		clock-frequency = <19200000>;
++
++		/*
++		 * The &vreg_s4a_1p8 trace is powered on as a,
++		 * so it is represented by a fixed regulator.
++		 *
++		 * The 2.8V vdda-supply and 1.2V vddd-supply regulators
++		 * both have to be enabled through the power management
++		 * gpios.
++		 */
++		dovdd-supply = <&vreg_lvs1a_1p8>;
++		avdd-supply = <&cam0_avdd_2v8>;
++		dvdd-supply = <&cam0_dvdd_1v2>;
++
++		port {
++			ov8856_ep: endpoint {
++				link-frequencies = /bits/ 64
++					<360000000 180000000>;
++				data-lanes = <1 2 3 4>;
++				remote-endpoint = <&csiphy0_ep>;
++			};
++		};
++	};
++};
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 54077549b9da7f..0a0cef9dfcc416 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4326,16 +4326,16 @@ camss: camss@acb3000 {
+ 				"vfe1",
+ 				"vfe_lite";
+ 
+-			interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 469 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "csid0",
+ 				"csid1",
+ 				"csid2",
+diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+index 7cf3fcb469a868..dcb925348e3f31 100644
+--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+@@ -34,7 +34,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm4450.dtsi b/arch/arm64/boot/dts/qcom/sm4450.dtsi
+index 1e05cd00b635ee..0bbacab6842c3e 100644
+--- a/arch/arm64/boot/dts/qcom/sm4450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm4450.dtsi
+@@ -29,7 +29,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index 133610d14fc41a..1f7fd429ad4286 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -28,7 +28,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			clock-output-names = "sleep_clk";
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+index 4d519dd6e7ef2f..72e01437ded125 100644
+--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+@@ -29,7 +29,7 @@ xo_board_clk: xo-board-clk {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm7125.dtsi b/arch/arm64/boot/dts/qcom/sm7125.dtsi
+index 12dd72859a433b..a53145a610a3c8 100644
+--- a/arch/arm64/boot/dts/qcom/sm7125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm7125.dtsi
+@@ -6,11 +6,11 @@
+ #include "sc7180.dtsi"
+ 
+ /* SM7125 uses Kryo 465 instead of Kryo 468 */
+-&CPU0 { compatible = "qcom,kryo465"; };
+-&CPU1 { compatible = "qcom,kryo465"; };
+-&CPU2 { compatible = "qcom,kryo465"; };
+-&CPU3 { compatible = "qcom,kryo465"; };
+-&CPU4 { compatible = "qcom,kryo465"; };
+-&CPU5 { compatible = "qcom,kryo465"; };
+-&CPU6 { compatible = "qcom,kryo465"; };
+-&CPU7 { compatible = "qcom,kryo465"; };
++&cpu0 { compatible = "qcom,kryo465"; };
++&cpu1 { compatible = "qcom,kryo465"; };
++&cpu2 { compatible = "qcom,kryo465"; };
++&cpu3 { compatible = "qcom,kryo465"; };
++&cpu4 { compatible = "qcom,kryo465"; };
++&cpu5 { compatible = "qcom,kryo465"; };
++&cpu6 { compatible = "qcom,kryo465"; };
++&cpu7 { compatible = "qcom,kryo465"; };
+diff --git a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
+index 2ee2561b57b1d6..52b16a4fdc4321 100644
+--- a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
++++ b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
+@@ -32,7 +32,7 @@ / {
+ 	chassis-type = "handset";
+ 
+ 	/* required for bootloader to select correct board */
+-	qcom,msm-id = <434 0x10000>, <459 0x10000>;
++	qcom,msm-id = <459 0x10000>;
+ 	qcom,board-id = <8 32>;
+ 
+ 	aliases {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
+index b039773c44653a..a1323a8b8e6bfb 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
++++ b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
+@@ -376,8 +376,8 @@ da7280@4a {
+ 		pinctrl-0 = <&da7280_intr_default>;
+ 
+ 		dlg,actuator-type = "LRA";
+-		dlg,dlg,const-op-mode = <1>;
+-		dlg,dlg,periodic-op-mode = <1>;
++		dlg,const-op-mode = <1>;
++		dlg,periodic-op-mode = <1>;
+ 		dlg,nom-microvolt = <2000000>;
+ 		dlg,abs-max-microvolt = <2000000>;
+ 		dlg,imax-microamp = <129000>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 630f4eff20bf81..faa36d17b9f2c9 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -84,7 +84,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+@@ -4481,20 +4481,20 @@ camss: camss@ac6a000 {
+ 				    "vfe_lite0",
+ 				    "vfe_lite1";
+ 
+-			interrupts = <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 86 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 89 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "csiphy0",
+ 					  "csiphy1",
+ 					  "csiphy2",
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 37a2aba0d4cae0..041750d71e4550 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -42,7 +42,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 38cb524cc56893..f7d52e491b694b 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -43,7 +43,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
+index 01c92160260572..29bc1ddfc7b25f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
+@@ -1172,7 +1172,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+index ab447fc252f7dd..5648ab60ba4c4b 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+@@ -825,7 +825,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+index 6052dd922ec55c..3a6cb279130489 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+@@ -1005,7 +1005,7 @@ &remoteproc_mpss {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
+index 3d351e90bb3986..62a6b90697b063 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
+@@ -565,7 +565,7 @@ &remoteproc_mpss {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &tlmm {
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
+index 85d487ef80a0be..d90dc7b37c4a74 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
+@@ -722,7 +722,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &tlmm {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
+index 127c7aacd4fc31..59363267d2e0ab 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
++++ b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
+@@ -1117,7 +1117,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
+index c63822f5b12789..74275ca668c76f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
+@@ -734,7 +734,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+index 8ca0d28eba9bd0..1689699d6de710 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
++++ b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+@@ -1045,7 +1045,7 @@ &remoteproc_mpss {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &spi4 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+index 01ac3769ffa62f..cd54fd723ce40e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+@@ -5624,7 +5624,7 @@ compute-cb@8 {
+ 
+ 					/* note: secure cb9 in downstream */
+ 
+-					compute-cb@10 {
++					compute-cb@12 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <12>;
+ 
+@@ -5634,7 +5634,7 @@ compute-cb@10 {
+ 						dma-coherent;
+ 					};
+ 
+-					compute-cb@11 {
++					compute-cb@13 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <13>;
+ 
+@@ -5644,7 +5644,7 @@ compute-cb@11 {
+ 						dma-coherent;
+ 					};
+ 
+-					compute-cb@12 {
++					compute-cb@14 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <14>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+index cdb401767c4206..89e39d55278579 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+@@ -680,14 +680,14 @@ &qupv3_2 {
+ 
+ &remoteproc_adsp {
+ 	firmware-name = "qcom/x1e80100/microsoft/Romulus/qcadsp8380.mbn",
+-			"qcom/x1e80100/microsoft/Romulus/adsp_dtb.mbn";
++			"qcom/x1e80100/microsoft/Romulus/adsp_dtbs.elf";
+ 
+ 	status = "okay";
+ };
+ 
+ &remoteproc_cdsp {
+ 	firmware-name = "qcom/x1e80100/microsoft/Romulus/qccdsp8380.mbn",
+-			"qcom/x1e80100/microsoft/Romulus/cdsp_dtb.mbn";
++			"qcom/x1e80100/microsoft/Romulus/cdsp_dtbs.elf";
+ 
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index a97ceff939d882..f0797df9619b15 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -38,7 +38,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+index 21bfa4e03972ff..612cdc7efabbcc 100644
+--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+@@ -42,11 +42,6 @@ aliases {
+ #endif
+ 	};
+ 
+-	chosen {
+-		bootargs = "ignore_loglevel";
+-		stdout-path = "serial0:115200n8";
+-	};
+-
+ 	memory@48000000 {
+ 		device_type = "memory";
+ 		/* First 128MB is reserved for secure area. */
+diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
+index 7945d44e6ee159..af2ab1629104b0 100644
+--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
+@@ -12,10 +12,15 @@
+ / {
+ 	aliases {
+ 		i2c0 = &i2c0;
+-		serial0 = &scif0;
++		serial3 = &scif0;
+ 		mmc1 = &sdhi1;
+ 	};
+ 
++	chosen {
++		bootargs = "ignore_loglevel";
++		stdout-path = "serial3:115200n8";
++	};
++
+ 	keys {
+ 		compatible = "gpio-keys";
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
+index bd6419a5c20a22..8311af4c8689f6 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
+@@ -74,6 +74,23 @@ vcc_io: regulator-3v3-vcc-io {
+ 		vin-supply = <&vcc5v0_sys>;
+ 	};
+ 
++	/*
++	 * HW revision prior to v1.2 must pull GPIO4_D6 low to access sdmmc.
++	 * This is modeled as an always-on active low fixed regulator.
++	 */
++	vcc_sd: regulator-3v3-vcc-sd {
++		compatible = "regulator-fixed";
++		gpios = <&gpio4 RK_PD6 GPIO_ACTIVE_LOW>;
++		pinctrl-names = "default";
++		pinctrl-0 = <&sdmmc_2030>;
++		regulator-name = "vcc_sd";
++		regulator-always-on;
++		regulator-boot-on;
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		vin-supply = <&vcc_io>;
++	};
++
+ 	vcc5v0_sys: regulator-5v0-vcc-sys {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vcc5v0_sys";
+@@ -181,6 +198,12 @@ pwr_led: pwr-led {
+ 		};
+ 	};
+ 
++	sdmmc {
++		sdmmc_2030: sdmmc-2030 {
++			rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
++		};
++	};
++
+ 	wifi {
+ 		wifi_reg_on: wifi-reg-on {
+ 			rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+@@ -233,7 +256,7 @@ &sdmmc {
+ 	cap-mmc-highspeed;
+ 	cap-sd-highspeed;
+ 	disable-wp;
+-	vmmc-supply = <&vcc_io>;
++	vmmc-supply = <&vcc_sd>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
+index 170b14f92f51b5..f9ef0af8aa1ac8 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
+@@ -53,7 +53,7 @@ hdmi_tx_5v: hdmi-tx-5v-regulator {
+ 
+ 	pdm_codec: pdm-codec {
+ 		compatible = "dmic-codec";
+-		num-channels = <1>;
++		num-channels = <2>;
+ 		#sound-dai-cells = <0>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
+index bcd392c3206e50..562e6d57bc9919 100644
+--- a/arch/arm64/boot/dts/ti/Makefile
++++ b/arch/arm64/boot/dts/ti/Makefile
+@@ -41,10 +41,6 @@ dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-csi2-imx219.dtbo
+ dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-hdmi-audio.dtbo
+ 
+ # Boards with AM64x SoC
+-k3-am642-hummingboard-t-pcie-dtbs := \
+-	k3-am642-hummingboard-t.dtb k3-am642-hummingboard-t-pcie.dtbo
+-k3-am642-hummingboard-t-usb3-dtbs := \
+-	k3-am642-hummingboard-t.dtb k3-am642-hummingboard-t-usb3.dtbo
+ dtb-$(CONFIG_ARCH_K3) += k3-am642-evm.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-icssg1-dualemac.dtbo
+ dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-icssg1-dualemac-mii.dtbo
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index 5b92aef5b284b7..60c6814206a1f9 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -23,7 +23,6 @@ gic500: interrupt-controller@1800000 {
+ 		interrupt-controller;
+ 		reg = <0x00 0x01800000 0x00 0x10000>,	/* GICD */
+ 		      <0x00 0x01880000 0x00 0xc0000>,	/* GICR */
+-		      <0x00 0x01880000 0x00 0xc0000>,   /* GICR */
+ 		      <0x01 0x00000000 0x00 0x2000>,    /* GICC */
+ 		      <0x01 0x00010000 0x00 0x1000>,    /* GICH */
+ 		      <0x01 0x00020000 0x00 0x2000>;    /* GICV */
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+index 16a578ae2b412f..56945d29e0150b 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+@@ -18,7 +18,6 @@ gic500: interrupt-controller@1800000 {
+ 		compatible = "arm,gic-v3";
+ 		reg = <0x00 0x01800000 0x00 0x10000>,	/* GICD */
+ 		      <0x00 0x01880000 0x00 0xc0000>,	/* GICR */
+-		      <0x00 0x01880000 0x00 0xc0000>,   /* GICR */
+ 		      <0x01 0x00000000 0x00 0x2000>,    /* GICC */
+ 		      <0x01 0x00010000 0x00 0x1000>,    /* GICH */
+ 		      <0x01 0x00020000 0x00 0x2000>;    /* GICV */
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts
+new file mode 100644
+index 00000000000000..023b2a6aaa5668
+--- /dev/null
++++ b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts
+@@ -0,0 +1,47 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
++ *
++ * DTS for SolidRun AM642 HummingBoard-T,
++ * running on Cortex A53, with PCI-E.
++ *
++ */
++
++#include "k3-am642-hummingboard-t.dts"
++
++#include "k3-serdes.h"
++
++/ {
++	model = "SolidRun AM642 HummingBoard-T with PCI-E";
++};
++
++&pcie0_rc {
++	pinctrl-names = "default";
++	pinctrl-0 = <&pcie0_default_pins>;
++	reset-gpios = <&main_gpio1 15 GPIO_ACTIVE_HIGH>;
++	phys = <&serdes0_link>;
++	phy-names = "pcie-phy";
++	num-lanes = <1>;
++	status = "okay";
++};
++
++&serdes0 {
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	serdes0_link: phy@0 {
++		reg = <0>;
++		cdns,num-lanes = <1>;
++		cdns,phy-type = <PHY_TYPE_PCIE>;
++		#phy-cells = <0>;
++		resets = <&serdes_wiz0 1>;
++	};
++};
++
++&serdes_ln_ctrl {
++	idle-states = <AM64_SERDES0_LANE0_PCIE0>;
++};
++
++&serdes_mux {
++	idle-state = <1>;
++};
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso
+deleted file mode 100644
+index bd9a5caf20da5b..00000000000000
+--- a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso
++++ /dev/null
+@@ -1,45 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
+- *
+- * Overlay for SolidRun AM642 HummingBoard-T to enable PCI-E.
+- */
+-
+-/dts-v1/;
+-/plugin/;
+-
+-#include <dt-bindings/gpio/gpio.h>
+-#include <dt-bindings/phy/phy.h>
+-
+-#include "k3-serdes.h"
+-
+-&pcie0_rc {
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&pcie0_default_pins>;
+-	reset-gpios = <&main_gpio1 15 GPIO_ACTIVE_HIGH>;
+-	phys = <&serdes0_link>;
+-	phy-names = "pcie-phy";
+-	num-lanes = <1>;
+-	status = "okay";
+-};
+-
+-&serdes0 {
+-	#address-cells = <1>;
+-	#size-cells = <0>;
+-
+-	serdes0_link: phy@0 {
+-		reg = <0>;
+-		cdns,num-lanes = <1>;
+-		cdns,phy-type = <PHY_TYPE_PCIE>;
+-		#phy-cells = <0>;
+-		resets = <&serdes_wiz0 1>;
+-	};
+-};
+-
+-&serdes_ln_ctrl {
+-	idle-states = <AM64_SERDES0_LANE0_PCIE0>;
+-};
+-
+-&serdes_mux {
+-	idle-state = <1>;
+-};
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts
+new file mode 100644
+index 00000000000000..ee9bd618f37010
+--- /dev/null
++++ b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts
+@@ -0,0 +1,47 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
++ *
++ * DTS for SolidRun AM642 HummingBoard-T,
++ * running on Cortex A53, with USB-3.1 Gen 1.
++ *
++ */
++
++#include "k3-am642-hummingboard-t.dts"
++
++#include "k3-serdes.h"
++
++/ {
++	model = "SolidRun AM642 HummingBoard-T with USB-3.1 Gen 1";
++};
++
++&serdes0 {
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	serdes0_link: phy@0 {
++		reg = <0>;
++		cdns,num-lanes = <1>;
++		cdns,phy-type = <PHY_TYPE_USB3>;
++		#phy-cells = <0>;
++		resets = <&serdes_wiz0 1>;
++	};
++};
++
++&serdes_ln_ctrl {
++	idle-states = <AM64_SERDES0_LANE0_USB>;
++};
++
++&serdes_mux {
++	idle-state = <0>;
++};
++
++&usbss0 {
++	/delete-property/ ti,usb2-only;
++};
++
++&usb0 {
++	maximum-speed = "super-speed";
++	phys = <&serdes0_link>;
++	phy-names = "cdns3,usb3-phy";
++};
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso
+deleted file mode 100644
+index ffcc3bd3c7bc5d..00000000000000
+--- a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso
++++ /dev/null
+@@ -1,44 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
+- *
+- * Overlay for SolidRun AM642 HummingBoard-T to enable USB-3.1.
+- */
+-
+-/dts-v1/;
+-/plugin/;
+-
+-#include <dt-bindings/phy/phy.h>
+-
+-#include "k3-serdes.h"
+-
+-&serdes0 {
+-	#address-cells = <1>;
+-	#size-cells = <0>;
+-
+-	serdes0_link: phy@0 {
+-		reg = <0>;
+-		cdns,num-lanes = <1>;
+-		cdns,phy-type = <PHY_TYPE_USB3>;
+-		#phy-cells = <0>;
+-		resets = <&serdes_wiz0 1>;
+-	};
+-};
+-
+-&serdes_ln_ctrl {
+-	idle-states = <AM64_SERDES0_LANE0_USB>;
+-};
+-
+-&serdes_mux {
+-	idle-state = <0>;
+-};
+-
+-&usbss0 {
+-	/delete-property/ ti,usb2-only;
+-};
+-
+-&usb0 {
+-	maximum-speed = "super-speed";
+-	phys = <&serdes0_link>;
+-	phy-names = "cdns3,usb3-phy";
+-};
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index 5fdbfea7a5b295..8fe7dbae33bf90 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -1347,7 +1347,6 @@ CONFIG_SM_DISPCC_6115=m
+ CONFIG_SM_DISPCC_8250=y
+ CONFIG_SM_DISPCC_8450=m
+ CONFIG_SM_DISPCC_8550=m
+-CONFIG_SM_DISPCC_8650=m
+ CONFIG_SM_GCC_4450=y
+ CONFIG_SM_GCC_6115=y
+ CONFIG_SM_GCC_8350=y
+diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
+index bf6cf5579cf459..9c58fb81f7fd67 100644
+--- a/arch/hexagon/include/asm/cmpxchg.h
++++ b/arch/hexagon/include/asm/cmpxchg.h
+@@ -56,7 +56,7 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
+ 	__typeof__(ptr) __ptr = (ptr);				\
+ 	__typeof__(*(ptr)) __old = (old);			\
+ 	__typeof__(*(ptr)) __new = (new);			\
+-	__typeof__(*(ptr)) __oldval = 0;			\
++	__typeof__(*(ptr)) __oldval = (__typeof__(*(ptr))) 0;	\
+ 								\
+ 	asm volatile(						\
+ 		"1:	%0 = memw_locked(%1);\n"		\
+diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
+index 75e062722d285b..040a958de1dfc5 100644
+--- a/arch/hexagon/kernel/traps.c
++++ b/arch/hexagon/kernel/traps.c
+@@ -195,8 +195,10 @@ int die(const char *str, struct pt_regs *regs, long err)
+ 	printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
+ 
+ 	if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
+-	    NOTIFY_STOP)
++	    NOTIFY_STOP) {
++		spin_unlock_irq(&die.lock);
+ 		return 1;
++	}
+ 
+ 	print_modules();
+ 	show_regs(regs);
+diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
+index d78330916bd18a..13b2462f3d8c9d 100644
+--- a/arch/loongarch/include/asm/hw_breakpoint.h
++++ b/arch/loongarch/include/asm/hw_breakpoint.h
+@@ -38,8 +38,8 @@ struct arch_hw_breakpoint {
+  * Limits.
+  * Changing these will require modifications to the register accessors.
+  */
+-#define LOONGARCH_MAX_BRP		8
+-#define LOONGARCH_MAX_WRP		8
++#define LOONGARCH_MAX_BRP		14
++#define LOONGARCH_MAX_WRP		14
+ 
+ /* Virtual debug register bases. */
+ #define CSR_CFG_ADDR	0
+diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
+index 64ad277e096edd..aaa4ad6b85944a 100644
+--- a/arch/loongarch/include/asm/loongarch.h
++++ b/arch/loongarch/include/asm/loongarch.h
+@@ -959,6 +959,36 @@
+ #define LOONGARCH_CSR_DB7CTRL		0x34a	/* data breakpoint 7 control */
+ #define LOONGARCH_CSR_DB7ASID		0x34b	/* data breakpoint 7 asid */
+ 
++#define LOONGARCH_CSR_DB8ADDR		0x350	/* data breakpoint 8 address */
++#define LOONGARCH_CSR_DB8MASK		0x351	/* data breakpoint 8 mask */
++#define LOONGARCH_CSR_DB8CTRL		0x352	/* data breakpoint 8 control */
++#define LOONGARCH_CSR_DB8ASID		0x353	/* data breakpoint 8 asid */
++
++#define LOONGARCH_CSR_DB9ADDR		0x358	/* data breakpoint 9 address */
++#define LOONGARCH_CSR_DB9MASK		0x359	/* data breakpoint 9 mask */
++#define LOONGARCH_CSR_DB9CTRL		0x35a	/* data breakpoint 9 control */
++#define LOONGARCH_CSR_DB9ASID		0x35b	/* data breakpoint 9 asid */
++
++#define LOONGARCH_CSR_DB10ADDR		0x360	/* data breakpoint 10 address */
++#define LOONGARCH_CSR_DB10MASK		0x361	/* data breakpoint 10 mask */
++#define LOONGARCH_CSR_DB10CTRL		0x362	/* data breakpoint 10 control */
++#define LOONGARCH_CSR_DB10ASID		0x363	/* data breakpoint 10 asid */
++
++#define LOONGARCH_CSR_DB11ADDR		0x368	/* data breakpoint 11 address */
++#define LOONGARCH_CSR_DB11MASK		0x369	/* data breakpoint 11 mask */
++#define LOONGARCH_CSR_DB11CTRL		0x36a	/* data breakpoint 11 control */
++#define LOONGARCH_CSR_DB11ASID		0x36b	/* data breakpoint 11 asid */
++
++#define LOONGARCH_CSR_DB12ADDR		0x370	/* data breakpoint 12 address */
++#define LOONGARCH_CSR_DB12MASK		0x371	/* data breakpoint 12 mask */
++#define LOONGARCH_CSR_DB12CTRL		0x372	/* data breakpoint 12 control */
++#define LOONGARCH_CSR_DB12ASID		0x373	/* data breakpoint 12 asid */
++
++#define LOONGARCH_CSR_DB13ADDR		0x378	/* data breakpoint 13 address */
++#define LOONGARCH_CSR_DB13MASK		0x379	/* data breakpoint 13 mask */
++#define LOONGARCH_CSR_DB13CTRL		0x37a	/* data breakpoint 13 control */
++#define LOONGARCH_CSR_DB13ASID		0x37b	/* data breakpoint 13 asid */
++
+ #define LOONGARCH_CSR_FWPC		0x380	/* instruction breakpoint config */
+ #define LOONGARCH_CSR_FWPS		0x381	/* instruction breakpoint status */
+ 
+@@ -1002,6 +1032,36 @@
+ #define LOONGARCH_CSR_IB7CTRL		0x3ca	/* inst breakpoint 7 control */
+ #define LOONGARCH_CSR_IB7ASID		0x3cb	/* inst breakpoint 7 asid */
+ 
++#define LOONGARCH_CSR_IB8ADDR		0x3d0	/* inst breakpoint 8 address */
++#define LOONGARCH_CSR_IB8MASK		0x3d1	/* inst breakpoint 8 mask */
++#define LOONGARCH_CSR_IB8CTRL		0x3d2	/* inst breakpoint 8 control */
++#define LOONGARCH_CSR_IB8ASID		0x3d3	/* inst breakpoint 8 asid */
++
++#define LOONGARCH_CSR_IB9ADDR		0x3d8	/* inst breakpoint 9 address */
++#define LOONGARCH_CSR_IB9MASK		0x3d9	/* inst breakpoint 9 mask */
++#define LOONGARCH_CSR_IB9CTRL		0x3da	/* inst breakpoint 9 control */
++#define LOONGARCH_CSR_IB9ASID		0x3db	/* inst breakpoint 9 asid */
++
++#define LOONGARCH_CSR_IB10ADDR		0x3e0	/* inst breakpoint 10 address */
++#define LOONGARCH_CSR_IB10MASK		0x3e1	/* inst breakpoint 10 mask */
++#define LOONGARCH_CSR_IB10CTRL		0x3e2	/* inst breakpoint 10 control */
++#define LOONGARCH_CSR_IB10ASID		0x3e3	/* inst breakpoint 10 asid */
++
++#define LOONGARCH_CSR_IB11ADDR		0x3e8	/* inst breakpoint 11 address */
++#define LOONGARCH_CSR_IB11MASK		0x3e9	/* inst breakpoint 11 mask */
++#define LOONGARCH_CSR_IB11CTRL		0x3ea	/* inst breakpoint 11 control */
++#define LOONGARCH_CSR_IB11ASID		0x3eb	/* inst breakpoint 11 asid */
++
++#define LOONGARCH_CSR_IB12ADDR		0x3f0	/* inst breakpoint 12 address */
++#define LOONGARCH_CSR_IB12MASK		0x3f1	/* inst breakpoint 12 mask */
++#define LOONGARCH_CSR_IB12CTRL		0x3f2	/* inst breakpoint 12 control */
++#define LOONGARCH_CSR_IB12ASID		0x3f3	/* inst breakpoint 12 asid */
++
++#define LOONGARCH_CSR_IB13ADDR		0x3f8	/* inst breakpoint 13 address */
++#define LOONGARCH_CSR_IB13MASK		0x3f9	/* inst breakpoint 13 mask */
++#define LOONGARCH_CSR_IB13CTRL		0x3fa	/* inst breakpoint 13 control */
++#define LOONGARCH_CSR_IB13ASID		0x3fb	/* inst breakpoint 13 asid */
++
+ #define LOONGARCH_CSR_DEBUG		0x500	/* debug config */
+ #define LOONGARCH_CSR_DERA		0x501	/* debug era */
+ #define LOONGARCH_CSR_DESAVE		0x502	/* debug save */
+diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
+index a6e4b605bfa8d6..c35f9bf3803349 100644
+--- a/arch/loongarch/kernel/hw_breakpoint.c
++++ b/arch/loongarch/kernel/hw_breakpoint.c
+@@ -51,7 +51,13 @@ int hw_breakpoint_slots(int type)
+ 	READ_WB_REG_CASE(OFF, 4, REG, T, VAL);		\
+ 	READ_WB_REG_CASE(OFF, 5, REG, T, VAL);		\
+ 	READ_WB_REG_CASE(OFF, 6, REG, T, VAL);		\
+-	READ_WB_REG_CASE(OFF, 7, REG, T, VAL);
++	READ_WB_REG_CASE(OFF, 7, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 8, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 9, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 10, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 11, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 12, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 13, REG, T, VAL);
+ 
+ #define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL)	\
+ 	WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL);		\
+@@ -61,7 +67,13 @@ int hw_breakpoint_slots(int type)
+ 	WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL);		\
+ 	WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL);		\
+ 	WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL);		\
+-	WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);
++	WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);		\
++	WRITE_WB_REG_CASE(OFF, 8, REG, T, VAL);		\
++	WRITE_WB_REG_CASE(OFF, 9, REG, T, VAL);		\
++	WRITE_WB_REG_CASE(OFF, 10, REG, T, VAL);	\
++	WRITE_WB_REG_CASE(OFF, 11, REG, T, VAL);	\
++	WRITE_WB_REG_CASE(OFF, 12, REG, T, VAL);	\
++	WRITE_WB_REG_CASE(OFF, 13, REG, T, VAL);
+ 
+ static u64 read_wb_reg(int reg, int n, int t)
+ {
+diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c
+index 0909729dc2e153..5bbdb9fd76e5d0 100644
+--- a/arch/loongarch/power/platform.c
++++ b/arch/loongarch/power/platform.c
+@@ -17,7 +17,7 @@ void enable_gpe_wakeup(void)
+ 	if (acpi_gbl_reduced_hardware)
+ 	       return;
+ 
+-	acpi_enable_all_wakeup_gpes();
++	acpi_hw_enable_all_wakeup_gpes();
+ }
+ 
+ void enable_pci_wakeup(void)
+diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
+index 18a3028ac3b6de..dad2e7980f245b 100644
+--- a/arch/powerpc/include/asm/hugetlb.h
++++ b/arch/powerpc/include/asm/hugetlb.h
+@@ -15,6 +15,15 @@
+ 
+ extern bool hugetlb_disabled;
+ 
++static inline bool hugepages_supported(void)
++{
++	if (hugetlb_disabled)
++		return false;
++
++	return HPAGE_SHIFT != 0;
++}
++#define hugepages_supported hugepages_supported
++
+ void __init hugetlbpage_init_defaultsize(void);
+ 
+ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 76381e14e800c7..0ebae6e4c19dd7 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -687,7 +687,7 @@ void iommu_table_clear(struct iommu_table *tbl)
+ void iommu_table_reserve_pages(struct iommu_table *tbl,
+ 		unsigned long res_start, unsigned long res_end)
+ {
+-	int i;
++	unsigned long i;
+ 
+ 	WARN_ON_ONCE(res_end < res_start);
+ 	/*
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 534cd159e9ab4c..ae6f7a235d8b24 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -1650,7 +1650,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 		iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn,
+ 					    dynamic_addr, dynamic_len, page_shift, NULL,
+ 					    &iommu_table_lpar_multi_ops);
+-		iommu_init_table(newtbl, pci->phb->node, start, end);
++		iommu_init_table(newtbl, pci->phb->node,
++				 start >> page_shift, end >> page_shift);
+ 
+ 		pci->table_group->tables[default_win_removed ? 0 : 1] = newtbl;
+ 
+@@ -2065,7 +2066,9 @@ static long spapr_tce_create_table(struct iommu_table_group *table_group, int nu
+ 							    offset, 1UL << window_shift,
+ 							    IOMMU_PAGE_SHIFT_4K, NULL,
+ 							    &iommu_table_lpar_multi_ops);
+-				iommu_init_table(tbl, pci->phb->node, start, end);
++				iommu_init_table(tbl, pci->phb->node,
++						 start >> IOMMU_PAGE_SHIFT_4K,
++						 end >> IOMMU_PAGE_SHIFT_4K);
+ 
+ 				table_group->tables[0] = tbl;
+ 
+@@ -2136,7 +2139,7 @@ static long spapr_tce_create_table(struct iommu_table_group *table_group, int nu
+ 	/* New table for using DDW instead of the default DMA window */
+ 	iommu_table_setparms_common(tbl, pci->phb->bus->number, create.liobn, win_addr,
+ 				    1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
+-	iommu_init_table(tbl, pci->phb->node, start, end);
++	iommu_init_table(tbl, pci->phb->node, start >> page_shift, end >> page_shift);
+ 
+ 	pci->table_group->tables[num] = tbl;
+ 	set_iommu_table_base(&pdev->dev, tbl);
+@@ -2205,6 +2208,9 @@ static long spapr_tce_unset_window(struct iommu_table_group *table_group, int nu
+ 	const char *win_name;
+ 	int ret = -ENODEV;
+ 
++	if (!tbl) /* The table was never created OR window was never opened */
++		return 0;
++
+ 	mutex_lock(&dma_win_init_mutex);
+ 
+ 	if ((num == 0) && is_default_window_table(table_group, tbl))
+diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
+index 682b3feee45114..a30fb2fb8a2b16 100644
+--- a/arch/riscv/kernel/vector.c
++++ b/arch/riscv/kernel/vector.c
+@@ -309,7 +309,7 @@ static int __init riscv_v_sysctl_init(void)
+ static int __init riscv_v_sysctl_init(void) { return 0; }
+ #endif /* ! CONFIG_SYSCTL */
+ 
+-static int riscv_v_init(void)
++static int __init riscv_v_init(void)
+ {
+ 	return riscv_v_sysctl_init();
+ }
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index cc1f9cffe2a5f4..62f2c9e8e05f7d 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -65,6 +65,7 @@ config S390
+ 	select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
+ 	select ARCH_ENABLE_MEMORY_HOTREMOVE
+ 	select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
++	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_CURRENT_STACK_POINTER
+ 	select ARCH_HAS_DEBUG_VIRTUAL
+ 	select ARCH_HAS_DEBUG_VM_PGTABLE
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index 7fd57398221ea3..9b772093278704 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -22,7 +22,7 @@ KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
+ ifndef CONFIG_AS_IS_LLVM
+ KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
+ endif
+-KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack
++KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
+ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+ KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
+diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
+index eb00fa1771da07..ad17d91ad2e661 100644
+--- a/arch/s390/include/asm/sclp.h
++++ b/arch/s390/include/asm/sclp.h
+@@ -137,6 +137,7 @@ void sclp_early_printk(const char *s);
+ void __sclp_early_printk(const char *s, unsigned int len);
+ void sclp_emergency_printk(const char *s);
+ 
++int sclp_init(void);
+ int sclp_early_get_memsize(unsigned long *mem);
+ int sclp_early_get_hsa_size(unsigned long *hsa_size);
+ int _sclp_get_core_info(struct sclp_core_info *info);
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index e2e0aa463fbd1e..c3075e4a8efc31 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -981,7 +981,7 @@ static int cfdiag_push_sample(struct perf_event *event,
+ 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ 		raw.frag.size = cpuhw->usedss;
+ 		raw.frag.data = cpuhw->stop;
+-		perf_sample_save_raw_data(&data, &raw);
++		perf_sample_save_raw_data(&data, event, &raw);
+ 	}
+ 
+ 	overflow = perf_event_overflow(event, &data, &regs);
+diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
+index fa732545426611..10725f5a6f0fd1 100644
+--- a/arch/s390/kernel/perf_pai_crypto.c
++++ b/arch/s390/kernel/perf_pai_crypto.c
+@@ -478,7 +478,7 @@ static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
+ 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ 		raw.frag.size = rawsize;
+ 		raw.frag.data = cpump->save;
+-		perf_sample_save_raw_data(&data, &raw);
++		perf_sample_save_raw_data(&data, event, &raw);
+ 	}
+ 
+ 	overflow = perf_event_overflow(event, &data, &regs);
+diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
+index 7f462bef1fc075..a8f0bad99cf04f 100644
+--- a/arch/s390/kernel/perf_pai_ext.c
++++ b/arch/s390/kernel/perf_pai_ext.c
+@@ -503,7 +503,7 @@ static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
+ 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ 		raw.frag.size = rawsize;
+ 		raw.frag.data = cpump->save;
+-		perf_sample_save_raw_data(&data, &raw);
++		perf_sample_save_raw_data(&data, event, &raw);
+ 	}
+ 
+ 	overflow = perf_event_overflow(event, &data, &regs);
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index a3fea683b22706..99f165726ca9ef 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -1006,3 +1006,8 @@ void __init setup_arch(char **cmdline_p)
+ 	/* Add system specific data to the random pool */
+ 	setup_randomness();
+ }
++
++void __init arch_cpu_finalize_init(void)
++{
++	sclp_init();
++}
+diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
+index 24eccaa293371b..bdcf2a3b6c41b3 100644
+--- a/arch/s390/purgatory/Makefile
++++ b/arch/s390/purgatory/Makefile
+@@ -13,7 +13,7 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
+ $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
+ 	$(call if_changed_rule,as_o_S)
+ 
+-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
++KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes
+ KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
+ KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
+ KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common
+diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
+index e91970b01d6243..c3a2f6f57770ab 100644
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -1118,7 +1118,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
+ 				.data = ibs_data.data,
+ 			},
+ 		};
+-		perf_sample_save_raw_data(&data, &raw);
++		perf_sample_save_raw_data(&data, event, &raw);
+ 	}
+ 
+ 	if (perf_ibs == &perf_ibs_op)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 427d1daf06d06a..6b981868905f5d 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1735,7 +1735,7 @@ struct kvm_x86_ops {
+ 	bool allow_apicv_in_x2apic_without_x2apic_virtualization;
+ 	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
+ 	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
+-	void (*hwapic_isr_update)(int isr);
++	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
+ 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ 	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
+ 	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 766f092dab80b3..f1fac08fdef28c 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -495,14 +495,6 @@ static int x86_cluster_flags(void)
+ }
+ #endif
+ 
+-static int x86_die_flags(void)
+-{
+-	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+-	       return x86_sched_itmt_flags();
+-
+-	return 0;
+-}
+-
+ /*
+  * Set if a package/die has multiple NUMA nodes inside.
+  * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
+@@ -538,7 +530,7 @@ static void __init build_sched_topology(void)
+ 	 */
+ 	if (!x86_has_numa_in_package) {
+ 		x86_topology[i++] = (struct sched_domain_topology_level){
+-			cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(PKG)
++			cpu_cpu_mask, x86_sched_itmt_flags, SD_INIT_NAME(PKG)
+ 		};
+ 	}
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 95c6beb8ce2799..375bbb9600d3c1 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -763,7 +763,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+ 	 * just set SVI.
+ 	 */
+ 	if (unlikely(apic->apicv_active))
+-		kvm_x86_call(hwapic_isr_update)(vec);
++		kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec);
+ 	else {
+ 		++apic->isr_count;
+ 		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+@@ -808,7 +808,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
+ 	 * and must be left alone.
+ 	 */
+ 	if (unlikely(apic->apicv_active))
+-		kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
++		kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
+ 	else {
+ 		--apic->isr_count;
+ 		BUG_ON(apic->isr_count < 0);
+@@ -2786,7 +2786,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 	if (apic->apicv_active) {
+ 		kvm_x86_call(apicv_post_state_restore)(vcpu);
+ 		kvm_x86_call(hwapic_irr_update)(vcpu, -1);
+-		kvm_x86_call(hwapic_isr_update)(-1);
++		kvm_x86_call(hwapic_isr_update)(vcpu, -1);
+ 	}
+ 
+ 	vcpu->arch.apic_arb_prio = 0;
+@@ -3102,9 +3102,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ 	kvm_apic_update_apicv(vcpu);
+ 	if (apic->apicv_active) {
+ 		kvm_x86_call(apicv_post_state_restore)(vcpu);
+-		kvm_x86_call(hwapic_irr_update)(vcpu,
+-						apic_find_highest_irr(apic));
+-		kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
++		kvm_x86_call(hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
++		kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
+ 	}
+ 	kvm_make_request(KVM_REQ_EVENT, vcpu);
+ 	if (ioapic_in_kernel(vcpu->kvm))
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 92fee5e8a3c741..968ddf71405446 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6847,7 +6847,7 @@ void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
+ 	kvm_release_pfn_clean(pfn);
+ }
+ 
+-void vmx_hwapic_isr_update(int max_isr)
++void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
+ {
+ 	u16 status;
+ 	u8 old;
+diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
+index a55981c5216e63..48dc76bf0ec03a 100644
+--- a/arch/x86/kvm/vmx/x86_ops.h
++++ b/arch/x86/kvm/vmx/x86_ops.h
+@@ -48,7 +48,7 @@ void vmx_migrate_timers(struct kvm_vcpu *vcpu);
+ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
+ void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
+ void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
+-void vmx_hwapic_isr_update(int max_isr);
++void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
+ int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
+ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ 			   int trig_mode, int vector);
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 88e3ad73c38549..9aed61fcd0bf94 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -118,17 +118,18 @@ static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
+ 
+ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
+ {
+-	unsigned short nr_vecs = bip->bip_max_vcnt - 1;
+-	struct bio_vec *copy = &bip->bip_vec[1];
+-	size_t bytes = bip->bip_iter.bi_size;
+-	struct iov_iter iter;
++	unsigned short orig_nr_vecs = bip->bip_max_vcnt - 1;
++	struct bio_vec *orig_bvecs = &bip->bip_vec[1];
++	struct bio_vec *bounce_bvec = &bip->bip_vec[0];
++	size_t bytes = bounce_bvec->bv_len;
++	struct iov_iter orig_iter;
+ 	int ret;
+ 
+-	iov_iter_bvec(&iter, ITER_DEST, copy, nr_vecs, bytes);
+-	ret = copy_to_iter(bvec_virt(bip->bip_vec), bytes, &iter);
++	iov_iter_bvec(&orig_iter, ITER_DEST, orig_bvecs, orig_nr_vecs, bytes);
++	ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
+ 	WARN_ON_ONCE(ret != bytes);
+ 
+-	bio_integrity_unpin_bvec(copy, nr_vecs, true);
++	bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
+ }
+ 
+ /**
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 4f791a3114a12c..42023addf9cda6 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -629,8 +629,14 @@ static void __submit_bio(struct bio *bio)
+ 		blk_mq_submit_bio(bio);
+ 	} else if (likely(bio_queue_enter(bio) == 0)) {
+ 		struct gendisk *disk = bio->bi_bdev->bd_disk;
+-
+-		disk->fops->submit_bio(bio);
++	
++		if ((bio->bi_opf & REQ_POLLED) &&
++		    !(disk->queue->limits.features & BLK_FEAT_POLL)) {
++			bio->bi_status = BLK_STS_NOTSUPP;
++			bio_endio(bio);
++		} else {
++			disk->fops->submit_bio(bio);
++		}
+ 		blk_queue_exit(disk->queue);
+ 	}
+ 
+@@ -805,12 +811,6 @@ void submit_bio_noacct(struct bio *bio)
+ 		}
+ 	}
+ 
+-	if (!(q->limits.features & BLK_FEAT_POLL) &&
+-			(bio->bi_opf & REQ_POLLED)) {
+-		bio_clear_polled(bio);
+-		goto not_supported;
+-	}
+-
+ 	switch (bio_op(bio)) {
+ 	case REQ_OP_READ:
+ 		break;
+@@ -935,7 +935,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+ 		return 0;
+ 
+ 	q = bdev_get_queue(bdev);
+-	if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL))
++	if (cookie == BLK_QC_T_NONE)
+ 		return 0;
+ 
+ 	blk_flush_plug(current->plug, false);
+@@ -956,7 +956,8 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+ 	} else {
+ 		struct gendisk *disk = q->disk;
+ 
+-		if (disk && disk->fops->poll_bio)
++		if ((q->limits.features & BLK_FEAT_POLL) && disk &&
++		    disk->fops->poll_bio)
+ 			ret = disk->fops->poll_bio(bio, iob, flags);
+ 	}
+ 	blk_queue_exit(q);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 4e76651e786d19..662e52ab06467f 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3092,14 +3092,21 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	}
+ 
+ 	/*
+-	 * Device reconfiguration may change logical block size, so alignment
+-	 * check has to be done with queue usage counter held
++	 * Device reconfiguration may change logical block size or reduce the
++	 * number of poll queues, so the checks for alignment and poll support
++	 * have to be done with queue usage counter held.
+ 	 */
+ 	if (unlikely(bio_unaligned(bio, q))) {
+ 		bio_io_error(bio);
+ 		goto queue_exit;
+ 	}
+ 
++	if ((bio->bi_opf & REQ_POLLED) && !blk_mq_can_poll(q)) {
++		bio->bi_status = BLK_STS_NOTSUPP;
++		bio_endio(bio);
++		goto queue_exit;
++	}
++
+ 	bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+ 	if (!bio)
+ 		goto queue_exit;
+@@ -4325,12 +4332,6 @@ void blk_mq_release(struct request_queue *q)
+ 	blk_mq_sysfs_deinit(q);
+ }
+ 
+-static bool blk_mq_can_poll(struct blk_mq_tag_set *set)
+-{
+-	return set->nr_maps > HCTX_TYPE_POLL &&
+-		set->map[HCTX_TYPE_POLL].nr_queues;
+-}
+-
+ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
+ 		struct queue_limits *lim, void *queuedata)
+ {
+@@ -4341,7 +4342,7 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
+ 	if (!lim)
+ 		lim = &default_lim;
+ 	lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
+-	if (blk_mq_can_poll(set))
++	if (set->nr_maps > HCTX_TYPE_POLL)
+ 		lim->features |= BLK_FEAT_POLL;
+ 
+ 	q = blk_alloc_queue(lim, set->numa_node);
+@@ -5029,8 +5030,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ fallback:
+ 	blk_mq_update_queue_map(set);
+ 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
+-		struct queue_limits lim;
+-
+ 		blk_mq_realloc_hw_ctxs(set, q);
+ 
+ 		if (q->nr_hw_queues != set->nr_hw_queues) {
+@@ -5044,13 +5043,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ 			set->nr_hw_queues = prev_nr_hw_queues;
+ 			goto fallback;
+ 		}
+-		lim = queue_limits_start_update(q);
+-		if (blk_mq_can_poll(set))
+-			lim.features |= BLK_FEAT_POLL;
+-		else
+-			lim.features &= ~BLK_FEAT_POLL;
+-		if (queue_limits_commit_update(q, &lim) < 0)
+-			pr_warn("updating the poll flag failed\n");
+ 		blk_mq_map_swqueue(q);
+ 	}
+ 
+@@ -5110,9 +5102,9 @@ static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
+ 		struct io_comp_batch *iob, unsigned int flags)
+ {
+-	struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
+-
+-	return blk_hctx_poll(q, hctx, iob, flags);
++	if (!blk_mq_can_poll(q))
++		return 0;
++	return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags);
+ }
+ 
+ int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index f4ac1af77a267e..364c0415293cf7 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -451,4 +451,10 @@ do {								\
+ #define blk_mq_run_dispatch_ops(q, dispatch_ops)		\
+ 	__blk_mq_run_dispatch_ops(q, true, dispatch_ops)	\
+ 
++static inline bool blk_mq_can_poll(struct request_queue *q)
++{
++	return (q->limits.features & BLK_FEAT_POLL) &&
++		q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
++}
++
+ #endif
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 207577145c54f4..692b27266220fe 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -256,10 +256,17 @@ static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
+ 		!!(disk->queue->limits.features & _feature));		\
+ }
+ 
+-QUEUE_SYSFS_FEATURE_SHOW(poll, BLK_FEAT_POLL);
+ QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
+ QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
+ 
++static ssize_t queue_poll_show(struct gendisk *disk, char *page)
++{
++	if (queue_is_mq(disk->queue))
++		return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
++	return sysfs_emit(page, "%u\n",
++		!!(disk->queue->limits.features & BLK_FEAT_POLL));
++}
++
+ static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
+ {
+ 	if (blk_queue_is_zoned(disk->queue))
+diff --git a/block/genhd.c b/block/genhd.c
+index 8645cf3b0816e4..99344f53c78975 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -778,7 +778,7 @@ static ssize_t disk_badblocks_store(struct device *dev,
+ }
+ 
+ #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
+-void blk_request_module(dev_t devt)
++static bool blk_probe_dev(dev_t devt)
+ {
+ 	unsigned int major = MAJOR(devt);
+ 	struct blk_major_name **n;
+@@ -788,14 +788,26 @@ void blk_request_module(dev_t devt)
+ 		if ((*n)->major == major && (*n)->probe) {
+ 			(*n)->probe(devt);
+ 			mutex_unlock(&major_names_lock);
+-			return;
++			return true;
+ 		}
+ 	}
+ 	mutex_unlock(&major_names_lock);
++	return false;
++}
++
++void blk_request_module(dev_t devt)
++{
++	int error;
++
++	if (blk_probe_dev(devt))
++		return;
+ 
+-	if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
+-		/* Make old-style 2.4 aliases work */
+-		request_module("block-major-%d", MAJOR(devt));
++	error = request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt));
++	/* Make old-style 2.4 aliases work */
++	if (error > 0)
++		error = request_module("block-major-%d", MAJOR(devt));
++	if (!error)
++		blk_probe_dev(devt);
+ }
+ #endif /* CONFIG_BLOCK_LEGACY_AUTOLOAD */
+ 
+diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
+index e259180c89148b..aa3bd050d8cdd0 100644
+--- a/block/partitions/ldm.h
++++ b/block/partitions/ldm.h
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
+-/**
++/*
+  * ldm - Part of the Linux-NTFS project.
+  *
+  * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 004d27e41315ff..c067412d909a16 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -1022,6 +1022,8 @@ static void __init crypto_start_tests(void)
+ 	if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ 		return;
+ 
++	set_crypto_boot_test_finished();
++
+ 	for (;;) {
+ 		struct crypto_larval *larval = NULL;
+ 		struct crypto_alg *q;
+@@ -1053,8 +1055,6 @@ static void __init crypto_start_tests(void)
+ 		if (!larval)
+ 			break;
+ 	}
+-
+-	set_crypto_boot_test_finished();
+ }
+ 
+ static int __init crypto_algapi_init(void)
+diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
+index 79bbfe00d241f9..b8543a34caeada 100644
+--- a/drivers/acpi/acpica/achware.h
++++ b/drivers/acpi/acpica/achware.h
+@@ -103,8 +103,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
+ 
+ acpi_status acpi_hw_enable_all_runtime_gpes(void);
+ 
+-acpi_status acpi_hw_enable_all_wakeup_gpes(void);
+-
+ u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
+ 
+ acpi_status
+diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
+index 7cea4495f19bbe..300e5d91998648 100644
+--- a/drivers/acpi/fan_core.c
++++ b/drivers/acpi/fan_core.c
+@@ -371,19 +371,25 @@ static int acpi_fan_probe(struct platform_device *pdev)
+ 	result = sysfs_create_link(&pdev->dev.kobj,
+ 				   &cdev->device.kobj,
+ 				   "thermal_cooling");
+-	if (result)
++	if (result) {
+ 		dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
++		goto err_unregister;
++	}
+ 
+ 	result = sysfs_create_link(&cdev->device.kobj,
+ 				   &pdev->dev.kobj,
+ 				   "device");
+ 	if (result) {
+ 		dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
+-		goto err_end;
++		goto err_remove_link;
+ 	}
+ 
+ 	return 0;
+ 
++err_remove_link:
++	sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
++err_unregister:
++	thermal_cooling_device_unregister(cdev);
+ err_end:
+ 	if (fan->acpi4)
+ 		acpi_fan_delete_attributes(device);
+diff --git a/drivers/base/class.c b/drivers/base/class.c
+index cb5359235c7020..ce460e1ab1376d 100644
+--- a/drivers/base/class.c
++++ b/drivers/base/class.c
+@@ -323,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
+ 	struct subsys_private *sp = class_to_subsys(class);
+ 	struct klist_node *start_knode = NULL;
+ 
+-	if (!sp)
++	memset(iter, 0, sizeof(*iter));
++	if (!sp) {
++		pr_crit("%s: class %p was not registered yet\n",
++			__func__, class);
+ 		return;
++	}
+ 
+ 	if (start)
+ 		start_knode = &start->p->knode_class;
+@@ -351,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
+ 	struct klist_node *knode;
+ 	struct device *dev;
+ 
++	if (!iter->sp)
++		return NULL;
++
+ 	while (1) {
+ 		knode = klist_next(&iter->ki);
+ 		if (!knode)
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index b852050d8a9665..450458267e6e64 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -2180,6 +2180,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
+ 	flush_workqueue(nbd->recv_workq);
+ 	nbd_clear_que(nbd);
+ 	nbd->task_setup = NULL;
++	clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
+ 	mutex_unlock(&nbd->config_lock);
+ 
+ 	if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
+diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
+index ff45ed76646957..226ffc743238e9 100644
+--- a/drivers/block/ps3disk.c
++++ b/drivers/block/ps3disk.c
+@@ -384,9 +384,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
+ 	unsigned int devidx;
+ 	struct queue_limits lim = {
+ 		.logical_block_size	= dev->blk_size,
+-		.max_hw_sectors		= dev->bounce_size >> 9,
++		.max_hw_sectors		= BOUNCE_SIZE >> 9,
+ 		.max_segments		= -1,
+-		.max_segment_size	= dev->bounce_size,
++		.max_segment_size	= BOUNCE_SIZE,
+ 		.dma_alignment		= dev->blk_size - 1,
+ 		.features		= BLK_FEAT_WRITE_CACHE |
+ 					  BLK_FEAT_ROTATIONAL,
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index a1153ada74d206..0a60660fc8ce80 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -553,6 +553,9 @@ static const char *btbcm_get_board_name(struct device *dev)
+ 
+ 	/* get rid of any '/' in the compatible string */
+ 	board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
++	if (!board_type)
++		return NULL;
++
+ 	strreplace(board_type, '/', '-');
+ 
+ 	return board_type;
+diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
+index a028984f27829c..84a1ad61c4ad5f 100644
+--- a/drivers/bluetooth/btnxpuart.c
++++ b/drivers/bluetooth/btnxpuart.c
+@@ -1336,13 +1336,12 @@ static void btnxpuart_tx_work(struct work_struct *work)
+ 
+ 	while ((skb = nxp_dequeue(nxpdev))) {
+ 		len = serdev_device_write_buf(serdev, skb->data, skb->len);
+-		serdev_device_wait_until_sent(serdev, 0);
+ 		hdev->stat.byte_tx += len;
+ 
+ 		skb_pull(skb, len);
+ 		if (skb->len > 0) {
+ 			skb_queue_head(&nxpdev->txq, skb);
+-			break;
++			continue;
+ 		}
+ 
+ 		switch (hci_skb_pkt_type(skb)) {
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 0bcb44cf7b31d7..0a6ca6dfb94841 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -1351,12 +1351,14 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
+ 
+ 	btrtl_set_quirks(hdev, btrtl_dev);
+ 
+-	hci_set_hw_info(hdev,
++	if (btrtl_dev->ic_info) {
++		hci_set_hw_info(hdev,
+ 			"RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u",
+ 			btrtl_dev->ic_info->lmp_subver,
+ 			btrtl_dev->ic_info->hci_rev,
+ 			btrtl_dev->ic_info->hci_ver,
+ 			btrtl_dev->ic_info->hci_bus);
++	}
+ 
+ 	btrtl_free(btrtl_dev);
+ 	return ret;
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 0c85c981a8334a..258a5cb6f27afe 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2632,8 +2632,15 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
+ 	struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
+ 	int err;
+ 
++	/*
++	 * The function usb_driver_claim_interface() is documented to need
++	 * locks held if it's not called from a probe routine. The code here
++	 * is called from the hci_power_on workqueue, so grab the lock.
++	 */
++	device_lock(&btmtk_data->isopkt_intf->dev);
+ 	err = usb_driver_claim_interface(&btusb_driver,
+ 					 btmtk_data->isopkt_intf, data);
++	device_unlock(&btmtk_data->isopkt_intf->dev);
+ 	if (err < 0) {
+ 		btmtk_data->isopkt_intf = NULL;
+ 		bt_dev_err(data->hdev, "Failed to claim iso interface");
+diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
+index 749a3295c2bdc1..3ca7068a305256 100644
+--- a/drivers/cdx/Makefile
++++ b/drivers/cdx/Makefile
+@@ -5,7 +5,7 @@
+ # Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ #
+ 
+-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CDX_BUS
++ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CDX_BUS"'
+ 
+ obj-$(CONFIG_CDX_BUS) += cdx.o controller/
+ 
+diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
+index 7296127181eca3..8a14fd0291d89b 100644
+--- a/drivers/char/ipmi/ipmb_dev_int.c
++++ b/drivers/char/ipmi/ipmb_dev_int.c
+@@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client)
+ 	ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
+ 						"%s%d", "ipmb-",
+ 						client->adapter->nr);
++	if (!ipmb_dev->miscdev.name)
++		return -ENOMEM;
++
+ 	ipmb_dev->miscdev.fops = &ipmb_fops;
+ 	ipmb_dev->miscdev.parent = &client->dev;
+ 	ret = misc_register(&ipmb_dev->miscdev);
+diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
+index a14fafc583d4d8..310f17dd9511a5 100644
+--- a/drivers/char/ipmi/ssif_bmc.c
++++ b/drivers/char/ipmi/ssif_bmc.c
+@@ -292,7 +292,6 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
+ 	ssif_bmc->nbytes_processed = 0;
+ 	ssif_bmc->remain_len = 0;
+ 	ssif_bmc->busy = false;
+-	memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
+ 	wake_up_all(&ssif_bmc->wait_queue);
+ }
+ 
+@@ -744,9 +743,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+ 			ssif_bmc->aborting = true;
+ 		}
+ 	} else if (ssif_bmc->state == SSIF_RES_SENDING) {
+-		if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
++		if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) {
++			memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
+ 			/* Invalidate response buffer to denote it is sent */
+ 			complete_response(ssif_bmc);
++		}
+ 		ssif_bmc->state = SSIF_READY;
+ 	}
+ 
+diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
+index 65d422a588e1f1..9d178afc73bdd1 100644
+--- a/drivers/clk/analogbits/wrpll-cln28hpc.c
++++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
+@@ -292,7 +292,7 @@ int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
+ 			vco = vco_pre * f;
+ 		}
+ 
+-		delta = abs(target_rate - vco);
++		delta = abs(target_vco_rate - vco);
+ 		if (delta < best_delta) {
+ 			best_delta = delta;
+ 			best_r = r;
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index d02451f951cf05..5b4ab94193c2b0 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -5391,8 +5391,10 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
+ 		count++;
+ 	}
+ 	/* We went off the end of 'clock-indices' without finding it */
+-	if (of_property_present(clkspec.np, "clock-indices") && !found)
++	if (of_property_present(clkspec.np, "clock-indices") && !found) {
++		of_node_put(clkspec.np);
+ 		return NULL;
++	}
+ 
+ 	if (of_property_read_string_index(clkspec.np, "clock-output-names",
+ 					  index,
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 516dbd170c8a35..fb18f507f12135 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -399,8 +399,9 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
+ 
+ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ 						  "dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
+-						  "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+-						  "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
++						  "arm_pll_out", "sys_pll1_out", "sys_pll2_out",
++						  "sys_pll3_out", "dummy", "dummy", "osc_24m",
++						  "dummy", "osc_32k"};
+ 
+ static struct clk_hw **hws;
+ static struct clk_hw_onecell_data *clk_hw_data;
+diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
+index c6a9bc8ecc1fc7..c5f358a75f307b 100644
+--- a/drivers/clk/imx/clk-imx93.c
++++ b/drivers/clk/imx/clk-imx93.c
+@@ -15,6 +15,11 @@
+ 
+ #include "clk.h"
+ 
++#define IMX93_CLK_END 208
++
++#define PLAT_IMX93 BIT(0)
++#define PLAT_IMX91 BIT(1)
++
+ enum clk_sel {
+ 	LOW_SPEED_IO_SEL,
+ 	NON_IO_SEL,
+@@ -33,6 +38,7 @@ static u32 share_count_sai2;
+ static u32 share_count_sai3;
+ static u32 share_count_mub;
+ static u32 share_count_pdm;
++static u32 share_count_spdif;
+ 
+ static const char * const a55_core_sels[] = {"a55_alt", "arm_pll"};
+ static const char *parent_names[MAX_SEL][4] = {
+@@ -53,6 +59,7 @@ static const struct imx93_clk_root {
+ 	u32 off;
+ 	enum clk_sel sel;
+ 	unsigned long flags;
++	unsigned long plat;
+ } root_array[] = {
+ 	/* a55/m33/bus critical clk for system run */
+ 	{ IMX93_CLK_A55_PERIPH,		"a55_periph_root",	0x0000,	FAST_SEL, CLK_IS_CRITICAL },
+@@ -63,9 +70,9 @@ static const struct imx93_clk_root {
+ 	{ IMX93_CLK_BUS_AON,		"bus_aon_root",		0x0300,	LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
+ 	{ IMX93_CLK_WAKEUP_AXI,		"wakeup_axi_root",	0x0380,	FAST_SEL, CLK_IS_CRITICAL },
+ 	{ IMX93_CLK_SWO_TRACE,		"swo_trace_root",	0x0400,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_M33_SYSTICK,	"m33_systick_root",	0x0480,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_FLEXIO1,		"flexio1_root",		0x0500,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_FLEXIO2,		"flexio2_root",		0x0580,	LOW_SPEED_IO_SEL, },
++	{ IMX93_CLK_M33_SYSTICK,	"m33_systick_root",	0x0480,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_FLEXIO1,		"flexio1_root",		0x0500,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_FLEXIO2,		"flexio2_root",		0x0580,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ 	{ IMX93_CLK_LPTMR1,		"lptmr1_root",		0x0700,	LOW_SPEED_IO_SEL, },
+ 	{ IMX93_CLK_LPTMR2,		"lptmr2_root",		0x0780,	LOW_SPEED_IO_SEL, },
+ 	{ IMX93_CLK_TPM2,		"tpm2_root",		0x0880,	TPM_SEL, },
+@@ -120,15 +127,15 @@ static const struct imx93_clk_root {
+ 	{ IMX93_CLK_HSIO_ACSCAN_80M,	"hsio_acscan_80m_root",	0x1f80,	LOW_SPEED_IO_SEL, },
+ 	{ IMX93_CLK_HSIO_ACSCAN_480M,	"hsio_acscan_480m_root", 0x2000, MISC_SEL, },
+ 	{ IMX93_CLK_NIC_AXI,		"nic_axi_root",		0x2080, FAST_SEL, CLK_IS_CRITICAL, },
+-	{ IMX93_CLK_ML_APB,		"ml_apb_root",		0x2180,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_ML,			"ml_root",		0x2200,	FAST_SEL, },
++	{ IMX93_CLK_ML_APB,		"ml_apb_root",		0x2180,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_ML,			"ml_root",		0x2200,	FAST_SEL, 0, PLAT_IMX93, },
+ 	{ IMX93_CLK_MEDIA_AXI,		"media_axi_root",	0x2280,	FAST_SEL, },
+ 	{ IMX93_CLK_MEDIA_APB,		"media_apb_root",	0x2300,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_MEDIA_LDB,		"media_ldb_root",	0x2380,	VIDEO_SEL, },
++	{ IMX93_CLK_MEDIA_LDB,		"media_ldb_root",	0x2380,	VIDEO_SEL, 0, PLAT_IMX93, },
+ 	{ IMX93_CLK_MEDIA_DISP_PIX,	"media_disp_pix_root",	0x2400,	VIDEO_SEL, },
+ 	{ IMX93_CLK_CAM_PIX,		"cam_pix_root",		0x2480,	VIDEO_SEL, },
+-	{ IMX93_CLK_MIPI_TEST_BYTE,	"mipi_test_byte_root",	0x2500,	VIDEO_SEL, },
+-	{ IMX93_CLK_MIPI_PHY_CFG,	"mipi_phy_cfg_root",	0x2580,	VIDEO_SEL, },
++	{ IMX93_CLK_MIPI_TEST_BYTE,	"mipi_test_byte_root",	0x2500,	VIDEO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_MIPI_PHY_CFG,	"mipi_phy_cfg_root",	0x2580,	VIDEO_SEL, 0, PLAT_IMX93, },
+ 	{ IMX93_CLK_ADC,		"adc_root",		0x2700,	LOW_SPEED_IO_SEL, },
+ 	{ IMX93_CLK_PDM,		"pdm_root",		0x2780,	AUDIO_SEL, },
+ 	{ IMX93_CLK_TSTMR1,		"tstmr1_root",		0x2800,	LOW_SPEED_IO_SEL, },
+@@ -137,13 +144,16 @@ static const struct imx93_clk_root {
+ 	{ IMX93_CLK_MQS2,		"mqs2_root",		0x2980,	AUDIO_SEL, },
+ 	{ IMX93_CLK_AUDIO_XCVR,		"audio_xcvr_root",	0x2a00,	NON_IO_SEL, },
+ 	{ IMX93_CLK_SPDIF,		"spdif_root",		0x2a80,	AUDIO_SEL, },
+-	{ IMX93_CLK_ENET,		"enet_root",		0x2b00,	NON_IO_SEL, },
+-	{ IMX93_CLK_ENET_TIMER1,	"enet_timer1_root",	0x2b80,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_ENET_TIMER2,	"enet_timer2_root",	0x2c00,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_ENET_REF,		"enet_ref_root",	0x2c80,	NON_IO_SEL, },
+-	{ IMX93_CLK_ENET_REF_PHY,	"enet_ref_phy_root",	0x2d00,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_I3C1_SLOW,		"i3c1_slow_root",	0x2d80,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_I3C2_SLOW,		"i3c2_slow_root",	0x2e00,	LOW_SPEED_IO_SEL, },
++	{ IMX93_CLK_ENET,		"enet_root",		0x2b00,	NON_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_ENET_TIMER1,	"enet_timer1_root",	0x2b80,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_ENET_TIMER2,	"enet_timer2_root",	0x2c00,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_ENET_REF,		"enet_ref_root",	0x2c80,	NON_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_ENET_REF_PHY,	"enet_ref_phy_root",	0x2d00,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX91_CLK_ENET1_QOS_TSN,	"enet1_qos_tsn_root",   0x2b00, NON_IO_SEL, 0, PLAT_IMX91, },
++	{ IMX91_CLK_ENET_TIMER,		"enet_timer_root",      0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX91, },
++	{ IMX91_CLK_ENET2_REGULAR,	"enet2_regular_root",   0x2c80, NON_IO_SEL, 0, PLAT_IMX91, },
++	{ IMX93_CLK_I3C1_SLOW,		"i3c1_slow_root",	0x2d80,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_I3C2_SLOW,		"i3c2_slow_root",	0x2e00,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ 	{ IMX93_CLK_USB_PHY_BURUNIN,	"usb_phy_root",		0x2e80,	LOW_SPEED_IO_SEL, },
+ 	{ IMX93_CLK_PAL_CAME_SCAN,	"pal_came_scan_root",	0x2f00,	MISC_SEL, }
+ };
+@@ -155,6 +165,7 @@ static const struct imx93_clk_ccgr {
+ 	u32 off;
+ 	unsigned long flags;
+ 	u32 *shared_count;
++	unsigned long plat;
+ } ccgr_array[] = {
+ 	{ IMX93_CLK_A55_GATE,		"a55_alt",	"a55_alt_root",		0x8000, },
+ 	/* M33 critical clk for system run */
+@@ -167,10 +178,10 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_WDOG5_GATE,		"wdog5",	"osc_24m",		0x8400, },
+ 	{ IMX93_CLK_SEMA1_GATE,		"sema1",	"bus_aon_root",		0x8440, },
+ 	{ IMX93_CLK_SEMA2_GATE,		"sema2",	"bus_wakeup_root",	0x8480, },
+-	{ IMX93_CLK_MU1_A_GATE,		"mu1_a",	"bus_aon_root",		0x84c0, CLK_IGNORE_UNUSED },
+-	{ IMX93_CLK_MU2_A_GATE,		"mu2_a",	"bus_wakeup_root",	0x84c0, CLK_IGNORE_UNUSED },
+-	{ IMX93_CLK_MU1_B_GATE,		"mu1_b",	"bus_aon_root",		0x8500, 0, &share_count_mub },
+-	{ IMX93_CLK_MU2_B_GATE,		"mu2_b",	"bus_wakeup_root",	0x8500, 0, &share_count_mub },
++	{ IMX93_CLK_MU1_A_GATE,		"mu1_a",	"bus_aon_root",		0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
++	{ IMX93_CLK_MU2_A_GATE,		"mu2_a",	"bus_wakeup_root",	0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
++	{ IMX93_CLK_MU1_B_GATE,		"mu1_b",	"bus_aon_root",		0x8500, 0, &share_count_mub, PLAT_IMX93 },
++	{ IMX93_CLK_MU2_B_GATE,		"mu2_b",	"bus_wakeup_root",	0x8500, 0, &share_count_mub, PLAT_IMX93 },
+ 	{ IMX93_CLK_EDMA1_GATE,		"edma1",	"m33_root",		0x8540, },
+ 	{ IMX93_CLK_EDMA2_GATE,		"edma2",	"wakeup_axi_root",	0x8580, },
+ 	{ IMX93_CLK_FLEXSPI1_GATE,	"flexspi1",	"flexspi1_root",	0x8640, },
+@@ -178,8 +189,8 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_GPIO2_GATE,		"gpio2",	"bus_wakeup_root",	0x88c0, },
+ 	{ IMX93_CLK_GPIO3_GATE,		"gpio3",	"bus_wakeup_root",	0x8900, },
+ 	{ IMX93_CLK_GPIO4_GATE,		"gpio4",	"bus_wakeup_root",	0x8940, },
+-	{ IMX93_CLK_FLEXIO1_GATE,	"flexio1",	"flexio1_root",		0x8980, },
+-	{ IMX93_CLK_FLEXIO2_GATE,	"flexio2",	"flexio2_root",		0x89c0, },
++	{ IMX93_CLK_FLEXIO1_GATE,	"flexio1",	"flexio1_root",		0x8980, 0, NULL, PLAT_IMX93},
++	{ IMX93_CLK_FLEXIO2_GATE,	"flexio2",	"flexio2_root",		0x89c0, 0, NULL, PLAT_IMX93},
+ 	{ IMX93_CLK_LPIT1_GATE,		"lpit1",	"bus_aon_root",		0x8a00, },
+ 	{ IMX93_CLK_LPIT2_GATE,		"lpit2",	"bus_wakeup_root",	0x8a40, },
+ 	{ IMX93_CLK_LPTMR1_GATE,	"lptmr1",	"lptmr1_root",		0x8a80, },
+@@ -228,10 +239,10 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_SAI3_GATE,          "sai3",         "sai3_root",            0x94c0, 0, &share_count_sai3},
+ 	{ IMX93_CLK_SAI3_IPG,		"sai3_ipg_clk", "bus_wakeup_root",	0x94c0, 0, &share_count_sai3},
+ 	{ IMX93_CLK_MIPI_CSI_GATE,	"mipi_csi",	"media_apb_root",	0x9580, },
+-	{ IMX93_CLK_MIPI_DSI_GATE,	"mipi_dsi",	"media_apb_root",	0x95c0, },
+-	{ IMX93_CLK_LVDS_GATE,		"lvds",		"media_ldb_root",	0x9600, },
++	{ IMX93_CLK_MIPI_DSI_GATE,	"mipi_dsi",	"media_apb_root",	0x95c0, 0, NULL, PLAT_IMX93 },
++	{ IMX93_CLK_LVDS_GATE,		"lvds",		"media_ldb_root",	0x9600, 0, NULL, PLAT_IMX93 },
+ 	{ IMX93_CLK_LCDIF_GATE,		"lcdif",	"media_apb_root",	0x9640, },
+-	{ IMX93_CLK_PXP_GATE,		"pxp",		"media_apb_root",	0x9680, },
++	{ IMX93_CLK_PXP_GATE,		"pxp",		"media_apb_root",	0x9680, 0, NULL, PLAT_IMX93 },
+ 	{ IMX93_CLK_ISI_GATE,		"isi",		"media_apb_root",	0x96c0, },
+ 	{ IMX93_CLK_NIC_MEDIA_GATE,	"nic_media",	"media_axi_root",	0x9700, },
+ 	{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root",		0x9a00, },
+@@ -242,10 +253,13 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_MQS1_GATE,		"mqs1",		"sai1_root",		0x9b00, },
+ 	{ IMX93_CLK_MQS2_GATE,		"mqs2",		"sai3_root",		0x9b40, },
+ 	{ IMX93_CLK_AUD_XCVR_GATE,	"aud_xcvr",	"audio_xcvr_root",	0x9b80, },
+-	{ IMX93_CLK_SPDIF_GATE,		"spdif",	"spdif_root",		0x9c00, },
++	{ IMX93_CLK_SPDIF_IPG,		"spdif_ipg_clk", "bus_wakeup_root",	0x9c00, 0, &share_count_spdif},
++	{ IMX93_CLK_SPDIF_GATE,		"spdif",	"spdif_root",		0x9c00, 0, &share_count_spdif},
+ 	{ IMX93_CLK_HSIO_32K_GATE,	"hsio_32k",	"osc_32k",		0x9dc0, },
+-	{ IMX93_CLK_ENET1_GATE,		"enet1",	"wakeup_axi_root",	0x9e00, },
+-	{ IMX93_CLK_ENET_QOS_GATE,	"enet_qos",	"wakeup_axi_root",	0x9e40, },
++	{ IMX93_CLK_ENET1_GATE,		"enet1",	"wakeup_axi_root",	0x9e00, 0, NULL, PLAT_IMX93, },
++	{ IMX93_CLK_ENET_QOS_GATE,	"enet_qos",	"wakeup_axi_root",	0x9e40, 0, NULL, PLAT_IMX93, },
++	{ IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root",	0x9e00, 0, NULL, PLAT_IMX91, },
++	{ IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root",	0x9e40, 0, NULL, PLAT_IMX91, },
+ 	/* Critical because clk accessed during CPU idle */
+ 	{ IMX93_CLK_SYS_CNT_GATE,	"sys_cnt",	"osc_24m",		0x9e80, CLK_IS_CRITICAL},
+ 	{ IMX93_CLK_TSTMR1_GATE,	"tstmr1",	"bus_aon_root",		0x9ec0, },
+@@ -265,6 +279,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 	const struct imx93_clk_ccgr *ccgr;
+ 	void __iomem *base, *anatop_base;
+ 	int i, ret;
++	const unsigned long plat = (unsigned long)device_get_match_data(&pdev->dev);
+ 
+ 	clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
+ 					  IMX93_CLK_END), GFP_KERNEL);
+@@ -314,17 +329,20 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 
+ 	for (i = 0; i < ARRAY_SIZE(root_array); i++) {
+ 		root = &root_array[i];
+-		clks[root->clk] = imx93_clk_composite_flags(root->name,
+-							    parent_names[root->sel],
+-							    4, base + root->off, 3,
+-							    root->flags);
++		if (!root->plat || root->plat & plat)
++			clks[root->clk] = imx93_clk_composite_flags(root->name,
++								    parent_names[root->sel],
++								    4, base + root->off, 3,
++								    root->flags);
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ccgr_array); i++) {
+ 		ccgr = &ccgr_array[i];
+-		clks[ccgr->clk] = imx93_clk_gate(NULL, ccgr->name, ccgr->parent_name,
+-						 ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
+-						 ccgr->shared_count);
++		if (!ccgr->plat || ccgr->plat & plat)
++			clks[ccgr->clk] = imx93_clk_gate(NULL,
++							 ccgr->name, ccgr->parent_name,
++							 ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
++							 ccgr->shared_count);
+ 	}
+ 
+ 	clks[IMX93_CLK_A55_SEL] = imx_clk_hw_mux2("a55_sel", base + 0x4820, 0, 1, a55_core_sels,
+@@ -354,7 +372,8 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ }
+ 
+ static const struct of_device_id imx93_clk_of_match[] = {
+-	{ .compatible = "fsl,imx93-ccm" },
++	{ .compatible = "fsl,imx93-ccm", .data = (void *)PLAT_IMX93 },
++	{ .compatible = "fsl,imx91-ccm", .data = (void *)PLAT_IMX91 },
+ 	{ /* Sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, imx93_clk_of_match);
+diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
+index 85e76c7712ad84..b73524ae64b1b2 100644
+--- a/drivers/clk/qcom/camcc-x1e80100.c
++++ b/drivers/clk/qcom/camcc-x1e80100.c
+@@ -2212,6 +2212,8 @@ static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
+ 	},
+ };
+ 
++static struct gdsc cam_cc_titan_top_gdsc;
++
+ static struct gdsc cam_cc_bps_gdsc = {
+ 	.gdscr = 0x10004,
+ 	.en_rest_wait_val = 0x2,
+@@ -2221,6 +2223,7 @@ static struct gdsc cam_cc_bps_gdsc = {
+ 		.name = "cam_cc_bps_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2233,6 +2236,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
+ 		.name = "cam_cc_ife_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2245,6 +2249,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
+ 		.name = "cam_cc_ife_1_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2257,6 +2262,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
+ 		.name = "cam_cc_ipe_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2269,6 +2275,7 @@ static struct gdsc cam_cc_sfe_0_gdsc = {
+ 		.name = "cam_cc_sfe_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index dc3aa7014c3ed1..c6692808a8228c 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -454,7 +454,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s0_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+@@ -470,7 +470,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s1_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+@@ -486,7 +486,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s2_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+@@ -502,7 +502,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s3_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+@@ -518,7 +518,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s4_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+@@ -534,7 +534,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s5_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+@@ -550,7 +550,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s6_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+@@ -566,7 +566,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s7_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+@@ -582,7 +582,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s0_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+@@ -598,7 +598,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s1_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+@@ -614,7 +614,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s2_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+@@ -630,7 +630,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s3_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+@@ -646,7 +646,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s4_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+@@ -662,7 +662,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s5_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+@@ -678,7 +678,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s6_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+@@ -694,7 +694,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s7_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
+index 8ea25aa25dff04..7288af845434d8 100644
+--- a/drivers/clk/qcom/gcc-x1e80100.c
++++ b/drivers/clk/qcom/gcc-x1e80100.c
+@@ -6083,7 +6083,7 @@ static struct gdsc gcc_usb20_prim_gdsc = {
+ 	.pd = {
+ 		.name = "gcc_usb20_prim_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
+index 76285fbbdeaa2d..4b5d8b741e4e17 100644
+--- a/drivers/clk/ralink/clk-mtmips.c
++++ b/drivers/clk/ralink/clk-mtmips.c
+@@ -264,7 +264,6 @@ static int mtmips_register_pherip_clocks(struct device_node *np,
+ 	}
+ 
+ static struct mtmips_clk_fixed rt3883_fixed_clocks[] = {
+-	CLK_FIXED("xtal", NULL, 40000000),
+ 	CLK_FIXED("periph", "xtal", 40000000)
+ };
+ 
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 1b421b8097965b..0f27c33192e10d 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -981,7 +981,7 @@ static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
+ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
+ 					 const struct cpg_mssr_info *info)
+ {
+-	struct device_node *soc = of_find_node_by_path("/soc");
++	struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
+ 	struct device_node *node;
+ 	uint32_t args[MAX_PHANDLE_ARGS];
+ 	unsigned int *ids = NULL;
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+index c255dba2c96db3..6727a3e30a1297 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+@@ -535,11 +535,11 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
+ 				 CLK_SET_RATE_PARENT);
+ 
+ /*
+- * DSI output seems to work only when PLL_MIPI selected. Set it and prevent
+- * the mux from reparenting.
++ * Experiments showed that RGB output requires pll-video0-2x, while DSI
++ * requires pll-mipi. It will not work with incorrect clock, the screen will
++ * be blank.
++ * sun50i-a64.dtsi assigns pll-mipi as TCON0 parent by default
+  */
+-#define SUN50I_A64_TCON0_CLK_REG	0x118
+-
+ static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
+ static const u8 tcon0_table[] = { 0, 2, };
+ static SUNXI_CCU_MUX_TABLE_WITH_GATE_CLOSEST(tcon0_clk, "tcon0", tcon0_parents,
+@@ -959,11 +959,6 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
+ 
+ 	writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
+ 
+-	/* Set PLL MIPI as parent for TCON0 */
+-	val = readl(reg + SUN50I_A64_TCON0_CLK_REG);
+-	val &= ~GENMASK(26, 24);
+-	writel(val | (0 << 24), reg + SUN50I_A64_TCON0_CLK_REG);
+-
+ 	ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+index a8c11c0b4e0676..dfba88a5ad0f7c 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+@@ -21,7 +21,6 @@
+ 
+ /* PLL_VIDEO0 exported for HDMI PHY */
+ 
+-#define CLK_PLL_VIDEO0_2X		8
+ #define CLK_PLL_VE			9
+ #define CLK_PLL_DDR0			10
+ 
+@@ -32,7 +31,6 @@
+ #define CLK_PLL_PERIPH1_2X		14
+ #define CLK_PLL_VIDEO1			15
+ #define CLK_PLL_GPU			16
+-#define CLK_PLL_MIPI			17
+ #define CLK_PLL_HSIC			18
+ #define CLK_PLL_DE			19
+ #define CLK_PLL_DDR1			20
+diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
+index 1015fab9525157..4c9555fc61844d 100644
+--- a/drivers/clk/thead/clk-th1520-ap.c
++++ b/drivers/clk/thead/clk-th1520-ap.c
+@@ -657,7 +657,7 @@ static struct ccu_div apb_pclk = {
+ 		.hw.init	= CLK_HW_INIT_PARENTS_DATA("apb-pclk",
+ 						      apb_parents,
+ 						      &ccu_div_ops,
+-						      0),
++						      CLK_IGNORE_UNUSED),
+ 	},
+ };
+ 
+@@ -794,13 +794,13 @@ static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_
+ 		0x134, BIT(7), 0);
+ static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
+ static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
+-		0x140, BIT(9), 0);
++		0x140, BIT(9), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
+ 		0x150, BIT(9), 0);
+ static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
+-		0x150, BIT(10), 0);
++		0x150, BIT(10), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
+-		0x150, BIT(11), 0);
++		0x150, BIT(11), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
+ 		0x150, BIT(12), 0);
+ static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
+@@ -896,7 +896,6 @@ static struct ccu_common *th1520_div_clks[] = {
+ 	&vo_axi_clk.common,
+ 	&vp_apb_clk.common,
+ 	&vp_axi_clk.common,
+-	&cpu2vp_clk.common,
+ 	&venc_clk.common,
+ 	&dpu0_clk.common,
+ 	&dpu1_clk.common,
+@@ -916,6 +915,7 @@ static struct ccu_common *th1520_gate_clks[] = {
+ 	&bmu_clk.common,
+ 	&cpu2aon_x2h_clk.common,
+ 	&cpu2peri_x2h_clk.common,
++	&cpu2vp_clk.common,
+ 	&perisys_apb1_hclk.common,
+ 	&perisys_apb2_hclk.common,
+ 	&perisys_apb3_hclk.common,
+@@ -1048,7 +1048,8 @@ static int th1520_clk_probe(struct platform_device *pdev)
+ 		hw = devm_clk_hw_register_gate_parent_data(dev,
+ 							   cg->common.hw.init->name,
+ 							   cg->common.hw.init->parent_data,
+-							   0, base + cg->common.cfg0,
++							   cg->common.hw.init->flags,
++							   base + cg->common.cfg0,
+ 							   ffs(cg->enable) - 1, 0, NULL);
+ 		if (IS_ERR(hw))
+ 			return PTR_ERR(hw);
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 0f04feb6cafaf1..47e910c22a80bd 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -626,7 +626,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
+ #endif
+ 
+ #ifdef CONFIG_ACPI_CPPC_LIB
+-static u64 get_max_boost_ratio(unsigned int cpu)
++/*
++ * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
++ * between the highest_perf and the nominal_perf.
++ *
++ * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
++ * frequency via @nominal_freq if it is non-NULL pointer.
++ */
++static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+ {
+ 	struct cppc_perf_caps perf_caps;
+ 	u64 highest_perf, nominal_perf;
+@@ -655,6 +662,9 @@ static u64 get_max_boost_ratio(unsigned int cpu)
+ 
+ 	nominal_perf = perf_caps.nominal_perf;
+ 
++	if (nominal_freq)
++		*nominal_freq = perf_caps.nominal_freq;
++
+ 	if (!highest_perf || !nominal_perf) {
+ 		pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
+ 		return 0;
+@@ -667,8 +677,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
+ 
+ 	return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+ }
++
+ #else
+-static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
++static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
++{
++	return 0;
++}
+ #endif
+ 
+ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+@@ -678,9 +692,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	struct acpi_cpufreq_data *data;
+ 	unsigned int cpu = policy->cpu;
+ 	struct cpuinfo_x86 *c = &cpu_data(cpu);
++	u64 max_boost_ratio, nominal_freq = 0;
+ 	unsigned int valid_states = 0;
+ 	unsigned int result = 0;
+-	u64 max_boost_ratio;
+ 	unsigned int i;
+ #ifdef CONFIG_SMP
+ 	static int blacklisted;
+@@ -830,16 +844,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	}
+ 	freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+ 
+-	max_boost_ratio = get_max_boost_ratio(cpu);
++	max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
+ 	if (max_boost_ratio) {
+-		unsigned int freq = freq_table[0].frequency;
++		unsigned int freq = nominal_freq;
+ 
+ 		/*
+-		 * Because the loop above sorts the freq_table entries in the
+-		 * descending order, freq is the maximum frequency in the table.
+-		 * Assume that it corresponds to the CPPC nominal frequency and
+-		 * use it to set cpuinfo.max_freq.
++		 * The loop above sorts the freq_table entries in the
++		 * descending order. If ACPI CPPC has not advertised
++		 * the nominal frequency (this is possible in CPPC
++		 * revisions prior to 3), then use the first entry in
++		 * the pstate table as a proxy for nominal frequency.
+ 		 */
++		if (!freq)
++			freq = freq_table[0].frequency;
++
+ 		policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
+ 	} else {
+ 		/*
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 900d6844c43d3f..e7399780638393 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -143,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+ }
+ 
+ /* Get the frequency requested by the cpufreq core for the CPU */
+-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
++static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
+ {
+ 	struct qcom_cpufreq_data *data;
+ 	const struct qcom_cpufreq_soc_data *soc_data;
+-	struct cpufreq_policy *policy;
+ 	unsigned int index;
+ 
+-	policy = cpufreq_cpu_get_raw(cpu);
+ 	if (!policy)
+ 		return 0;
+ 
+@@ -163,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+ 	return policy->freq_table[index].frequency;
+ }
+ 
+-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
++static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
+ {
+ 	struct qcom_cpufreq_data *data;
+-	struct cpufreq_policy *policy;
+ 
+-	policy = cpufreq_cpu_get_raw(cpu);
+ 	if (!policy)
+ 		return 0;
+ 
+@@ -177,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+ 	if (data->throttle_irq >= 0)
+ 		return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+ 
+-	return qcom_cpufreq_get_freq(cpu);
++	return qcom_cpufreq_get_freq(policy);
++}
++
++static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
++{
++	return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
+ }
+ 
+ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+@@ -363,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+ 	 * If h/w throttled frequency is higher than what cpufreq has requested
+ 	 * for, then stop polling and switch back to interrupt mechanism.
+ 	 */
+-	if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
++	if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
+ 		enable_irq(data->throttle_irq);
+ 	else
+ 		mod_delayed_work(system_highpri_wq, &data->throttle_work,
+@@ -441,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
+ 		return data->throttle_irq;
+ 
+ 	data->cancel_throttle = false;
+-	data->policy = policy;
+ 
+ 	mutex_init(&data->throttle_lock);
+ 	INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
+@@ -552,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ 
+ 	policy->driver_data = data;
+ 	policy->dvfs_possible_from_any_cpu = true;
++	data->policy = policy;
+ 
+ 	ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
+ 	if (ret) {
+@@ -622,11 +623,24 @@ static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned lon
+ {
+ 	struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
+ 
+-	return qcom_lmh_get_throttle_freq(data);
++	return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
++}
++
++/*
++ * Since we cannot determine the closest rate of the target rate, let's just
++ * return the actual rate at which the clock is running at. This is needed to
++ * make clk_set_rate() API work properly.
++ */
++static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
++{
++	req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
++
++	return 0;
+ }
+ 
+ static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
+ 	.recalc_rate = qcom_cpufreq_hw_recalc_rate,
++	.determine_rate = qcom_cpufreq_hw_determine_rate,
+ };
+ 
+ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
+diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
+index 87781c1534ee5b..079a22cc9f02be 100644
+--- a/drivers/crypto/caam/blob_gen.c
++++ b/drivers/crypto/caam/blob_gen.c
+@@ -2,6 +2,7 @@
+ /*
+  * Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+  * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
++ * Copyright 2024 NXP
+  */
+ 
+ #define pr_fmt(fmt) "caam blob_gen: " fmt
+@@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
+ 	}
+ 
+ 	ctrlpriv = dev_get_drvdata(jrdev->parent);
+-	moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
++	moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+ 	if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
+ 		dev_warn(jrdev,
+ 			 "using insecure test key, enable HAB to use unique device key!\n");
+diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
+index 410c83712e2851..30c2b1a64695c0 100644
+--- a/drivers/crypto/hisilicon/sec2/sec.h
++++ b/drivers/crypto/hisilicon/sec2/sec.h
+@@ -37,6 +37,7 @@ struct sec_aead_req {
+ 	u8 *a_ivin;
+ 	dma_addr_t a_ivin_dma;
+ 	struct aead_request *aead_req;
++	bool fallback;
+ };
+ 
+ /* SEC request of Crypto */
+@@ -90,9 +91,7 @@ struct sec_auth_ctx {
+ 	dma_addr_t a_key_dma;
+ 	u8 *a_key;
+ 	u8 a_key_len;
+-	u8 mac_len;
+ 	u8 a_alg;
+-	bool fallback;
+ 	struct crypto_shash *hash_tfm;
+ 	struct crypto_aead *fallback_aead_tfm;
+ };
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 0558f98e221f63..a9b1b9b0b03bf7 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
+ 	struct aead_request *aead_req = req->aead_req;
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ 	size_t authsize = crypto_aead_authsize(tfm);
+-	u8 *mac_out = req->out_mac;
+ 	struct scatterlist *sgl = aead_req->src;
++	u8 *mac_out = req->out_mac;
+ 	size_t copy_size;
+ 	off_t skip_size;
+ 
+ 	/* Copy input mac */
+ 	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
+-	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
+-				       authsize, skip_size);
++	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
+ 	if (unlikely(copy_size != authsize))
+ 		return -EINVAL;
+ 
+@@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
+ 	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ 
+-	if (unlikely(a_ctx->fallback_aead_tfm))
+-		return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
+-
+-	return 0;
++	return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
+ }
+ 
+ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
+@@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
+ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 			   const u32 keylen, const enum sec_hash_alg a_alg,
+ 			   const enum sec_calg c_alg,
+-			   const enum sec_mac_len mac_len,
+ 			   const enum sec_cmode c_mode)
+ {
+ 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+@@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 
+ 	ctx->a_ctx.a_alg = a_alg;
+ 	ctx->c_ctx.c_alg = c_alg;
+-	ctx->a_ctx.mac_len = mac_len;
+ 	c_ctx->c_mode = c_mode;
+ 
+ 	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
+@@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 		}
+ 		memcpy(c_ctx->c_key, key, keylen);
+ 
+-		if (unlikely(a_ctx->fallback_aead_tfm)) {
+-			ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+-			if (ret)
+-				return ret;
+-		}
+-
+-		return 0;
++		return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ 	}
+ 
+ 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
+@@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 		goto bad_key;
+ 	}
+ 
+-	if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK)  ||
+-	    (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
++	if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
+ 		ret = -EINVAL;
+-		dev_err(dev, "MAC or AUTH key length error!\n");
++		dev_err(dev, "AUTH key length error!\n");
++		goto bad_key;
++	}
++
++	ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
++	if (ret) {
++		dev_err(dev, "set sec fallback key err!\n");
+ 		goto bad_key;
+ 	}
+ 
+@@ -1202,27 +1195,19 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ }
+ 
+ 
+-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode)	\
+-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key,	\
+-	u32 keylen)							\
+-{									\
+-	return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
+-}
+-
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
+-			 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
+-			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
+-			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
+-			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
+-			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
+-			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
+-			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
++#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode)				\
++static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen)	\
++{											\
++	return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode);			\
++}
++
++GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
++GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
++GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
++GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
++GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
++GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
++GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
+ 
+ static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+ {
+@@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
+ {
+ 	struct aead_request *aead_req = req->aead_req.aead_req;
+-	struct sec_cipher_req *c_req = &req->c_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 	struct sec_aead_req *a_req = &req->aead_req;
+-	size_t authsize = ctx->a_ctx.mac_len;
++	struct sec_cipher_req *c_req = &req->c_req;
+ 	u32 data_size = aead_req->cryptlen;
+ 	u8 flage = 0;
+ 	u8 cm, cl;
+@@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
+ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
+ {
+ 	struct aead_request *aead_req = req->aead_req.aead_req;
+-	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+-	size_t authsize = crypto_aead_authsize(tfm);
+-	struct sec_cipher_req *c_req = &req->c_req;
+ 	struct sec_aead_req *a_req = &req->aead_req;
++	struct sec_cipher_req *c_req = &req->c_req;
+ 
+ 	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+ 
+@@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
+ 		/*
+ 		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
+ 		 * the  counter must set to 0x01
++		 * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
+ 		 */
+-		ctx->a_ctx.mac_len = authsize;
+-		/* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
+ 		set_aead_auth_iv(ctx, req);
+-	}
+-
+-	/* GCM 12Byte Cipher_IV == Auth_IV */
+-	if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+-		ctx->a_ctx.mac_len = authsize;
++	} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
++		/* GCM 12Byte Cipher_IV == Auth_IV */
+ 		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
+ 	}
+ }
+@@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
+ {
+ 	struct sec_aead_req *a_req = &req->aead_req;
+ 	struct aead_request *aq = a_req->aead_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 
+ 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+-	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
++	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
+ 
+ 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+ 	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
+@@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
+ {
+ 	struct sec_aead_req *a_req = &req->aead_req;
+ 	struct aead_request *aq = a_req->aead_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 
+ 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+-	sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
++	sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
+ 
+ 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+ 	sqe3->a_key_addr = sqe3->c_key_addr;
+@@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
+ 	struct sec_aead_req *a_req = &req->aead_req;
+ 	struct sec_cipher_req *c_req = &req->c_req;
+ 	struct aead_request *aq = a_req->aead_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 
+ 	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
+ 
+-	sec_sqe->type2.mac_key_alg =
+-			cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
++	sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
+ 
+ 	sec_sqe->type2.mac_key_alg |=
+ 			cpu_to_le32((u32)((ctx->a_key_len) /
+@@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
+ 	struct sec_aead_req *a_req = &req->aead_req;
+ 	struct sec_cipher_req *c_req = &req->c_req;
+ 	struct aead_request *aq = a_req->aead_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 
+ 	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
+ 
+ 	sqe3->auth_mac_key |=
+-			cpu_to_le32((u32)(ctx->mac_len /
++			cpu_to_le32((u32)(authsize /
+ 			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
+ 
+ 	sqe3->auth_mac_key |=
+@@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+ {
+ 	struct aead_request *a_req = req->aead_req.aead_req;
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 	struct sec_aead_req *aead_req = &req->aead_req;
+ 	struct sec_cipher_req *c_req = &req->c_req;
+-	size_t authsize = crypto_aead_authsize(tfm);
+ 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ 	struct aead_request *backlog_aead_req;
+ 	struct sec_req *backlog_req;
+@@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+ 	if (!err && c_req->encrypt) {
+ 		struct scatterlist *sgl = a_req->dst;
+ 
+-		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
+-					  aead_req->out_mac,
+-					  authsize, a_req->cryptlen +
+-					  a_req->assoclen);
++		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
++					  authsize, a_req->cryptlen + a_req->assoclen);
+ 		if (unlikely(sz != authsize)) {
+ 			dev_err(c->dev, "copy out mac err!\n");
+ 			err = -EINVAL;
+@@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
+ 
+ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+ {
++	struct aead_alg *alg = crypto_aead_alg(tfm);
+ 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+-	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
++	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
++	const char *aead_name = alg->base.cra_name;
+ 	int ret;
+ 
+ 	ret = sec_aead_init(tfm);
+@@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+ 		return ret;
+ 	}
+ 
+-	auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+-	if (IS_ERR(auth_ctx->hash_tfm)) {
++	a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
++	if (IS_ERR(a_ctx->hash_tfm)) {
+ 		dev_err(ctx->dev, "aead alloc shash error!\n");
+ 		sec_aead_exit(tfm);
+-		return PTR_ERR(auth_ctx->hash_tfm);
++		return PTR_ERR(a_ctx->hash_tfm);
++	}
++
++	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
++						     CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
++	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
++		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
++		crypto_free_shash(ctx->a_ctx.hash_tfm);
++		sec_aead_exit(tfm);
++		return PTR_ERR(a_ctx->fallback_aead_tfm);
+ 	}
+ 
+ 	return 0;
+@@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
+ {
+ 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ 
++	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
+ 	crypto_free_shash(ctx->a_ctx.hash_tfm);
+ 	sec_aead_exit(tfm);
+ }
+@@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
+ 		sec_aead_exit(tfm);
+ 		return PTR_ERR(a_ctx->fallback_aead_tfm);
+ 	}
+-	a_ctx->fallback = false;
+ 
+ 	return 0;
+ }
+@@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ {
+ 	struct aead_request *req = sreq->aead_req.aead_req;
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+-	size_t authsize = crypto_aead_authsize(tfm);
++	size_t sz = crypto_aead_authsize(tfm);
+ 	u8 c_mode = ctx->c_ctx.c_mode;
+ 	struct device *dev = ctx->dev;
+ 	int ret;
+ 
+-	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+-	    req->assoclen > SEC_MAX_AAD_LEN)) {
+-		dev_err(dev, "aead input spec error!\n");
++	/* Hardware does not handle cases where authsize is less than 4 bytes */
++	if (unlikely(sz < MIN_MAC_LEN)) {
++		sreq->aead_req.fallback = true;
+ 		return -EINVAL;
+ 	}
+ 
+-	if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
+-	   (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
+-		authsize & MAC_LEN_MASK)))) {
+-		dev_err(dev, "aead input mac length error!\n");
++	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
++	    req->assoclen > SEC_MAX_AAD_LEN)) {
++		dev_err(dev, "aead input spec error!\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ 	if (sreq->c_req.encrypt)
+ 		sreq->c_req.c_len = req->cryptlen;
+ 	else
+-		sreq->c_req.c_len = req->cryptlen - authsize;
++		sreq->c_req.c_len = req->cryptlen - sz;
+ 	if (c_mode == SEC_CMODE_CBC) {
+ 		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ 			dev_err(dev, "aead crypto length error!\n");
+@@ -2292,8 +2287,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ 
+ 	if (ctx->sec->qm.ver == QM_HW_V2) {
+ 		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
+-		    req->cryptlen <= authsize))) {
+-			ctx->a_ctx.fallback = true;
++			     req->cryptlen <= authsize))) {
++			sreq->aead_req.fallback = true;
+ 			return -EINVAL;
+ 		}
+ 	}
+@@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+ 				bool encrypt)
+ {
+ 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+-	struct device *dev = ctx->dev;
+ 	struct aead_request *subreq;
+ 	int ret;
+ 
+-	/* Kunpeng920 aead mode not support input 0 size */
+-	if (!a_ctx->fallback_aead_tfm) {
+-		dev_err(dev, "aead fallback tfm is NULL!\n");
+-		return -EINVAL;
+-	}
+-
+ 	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
+ 	if (!subreq)
+ 		return -ENOMEM;
+@@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
+ 	req->aead_req.aead_req = a_req;
+ 	req->c_req.encrypt = encrypt;
+ 	req->ctx = ctx;
++	req->aead_req.fallback = false;
+ 
+ 	ret = sec_aead_param_check(ctx, req);
+ 	if (unlikely(ret)) {
+-		if (ctx->a_ctx.fallback)
++		if (req->aead_req.fallback)
+ 			return sec_aead_soft_crypto(ctx, a_req, encrypt);
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
+index 27a0ee5ad9131c..04725b514382f8 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
+@@ -23,17 +23,6 @@ enum sec_hash_alg {
+ 	SEC_A_HMAC_SHA512 = 0x15,
+ };
+ 
+-enum sec_mac_len {
+-	SEC_HMAC_CCM_MAC   = 16,
+-	SEC_HMAC_GCM_MAC   = 16,
+-	SEC_SM3_MAC        = 32,
+-	SEC_HMAC_SM3_MAC   = 32,
+-	SEC_HMAC_MD5_MAC   = 16,
+-	SEC_HMAC_SHA1_MAC   = 20,
+-	SEC_HMAC_SHA256_MAC = 32,
+-	SEC_HMAC_SHA512_MAC = 64,
+-};
+-
+ enum sec_cmode {
+ 	SEC_CMODE_ECB    = 0x0,
+ 	SEC_CMODE_CBC    = 0x1,
+diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile
+index b64b208d234408..55bda7770fac79 100644
+--- a/drivers/crypto/intel/iaa/Makefile
++++ b/drivers/crypto/intel/iaa/Makefile
+@@ -3,7 +3,7 @@
+ # Makefile for IAA crypto device drivers
+ #
+ 
+-ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD
++ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE='"IDXD"'
+ 
+ obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o
+ 
+diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
+index 237f8700007021..d2f07e34f3142d 100644
+--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
++++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
+@@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name)
+ 		async_mode = false;
+ 		use_irq = false;
+ 	} else if (sysfs_streq(name, "async")) {
+-		async_mode = true;
++		async_mode = false;
+ 		use_irq = false;
+ 	} else if (sysfs_streq(name, "async_irq")) {
+ 		async_mode = true;
+diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+index f8a77bff88448d..e43361392c83f7 100644
+--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
++++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+@@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
+ 			return -ENODEV;
+ 		}
+ 		npe_id = npe_spec.args[0];
++		of_node_put(npe_spec.np);
+ 
+ 		ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
+ 						       &queue_spec);
+@@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
+ 			return -ENODEV;
+ 		}
+ 		recv_qid = queue_spec.args[0];
++		of_node_put(queue_spec.np);
+ 
+ 		ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
+ 						       &queue_spec);
+@@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
+ 			return -ENODEV;
+ 		}
+ 		send_qid = queue_spec.args[0];
++		of_node_put(queue_spec.np);
+ 	} else {
+ 		/*
+ 		 * Hardcoded engine when using platform data, this goes away
+diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
+index eac73cbfdd38e2..7acf9c576149ba 100644
+--- a/drivers/crypto/intel/qat/qat_common/Makefile
++++ b/drivers/crypto/intel/qat/qat_common/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT
++ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
+ intel_qat-objs := adf_cfg.o \
+ 	adf_isr.o \
+ 	adf_ctl_drv.o \
+diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
+index ae7a0f8435fc63..3106fd1e84b91e 100644
+--- a/drivers/crypto/tegra/tegra-se-aes.c
++++ b/drivers/crypto/tegra/tegra-se-aes.c
+@@ -1752,10 +1752,13 @@ static int tegra_cmac_digest(struct ahash_request *req)
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
++	int ret;
+ 
+-	tegra_cmac_init(req);
+-	rctx->task |= SHA_UPDATE | SHA_FINAL;
++	ret = tegra_cmac_init(req);
++	if (ret)
++		return ret;
+ 
++	rctx->task |= SHA_UPDATE | SHA_FINAL;
+ 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+ }
+ 
+diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
+index 4d4bd727f49869..0b5cdd5676b17e 100644
+--- a/drivers/crypto/tegra/tegra-se-hash.c
++++ b/drivers/crypto/tegra/tegra-se-hash.c
+@@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req)
+ 	struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ 	struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
++	int ret;
+ 
+ 	if (ctx->fallback)
+ 		return tegra_sha_fallback_digest(req);
+ 
+-	tegra_sha_init(req);
+-	rctx->task |= SHA_UPDATE | SHA_FINAL;
++	ret = tegra_sha_init(req);
++	if (ret)
++		return ret;
+ 
++	rctx->task |= SHA_UPDATE | SHA_FINAL;
+ 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+ }
+ 
+diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
+index 2b4a0d406e1e71..9ff9d7b87b649d 100644
+--- a/drivers/dma/idxd/Makefile
++++ b/drivers/dma/idxd/Makefile
+@@ -1,4 +1,4 @@
+-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
++ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"IDXD"'
+ 
+ obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+ idxd_bus-y := bus.o
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 5f8d2e93ff3fb5..7f861fb07cb837 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -208,7 +208,6 @@ struct edma_desc {
+ struct edma_cc;
+ 
+ struct edma_tc {
+-	struct device_node		*node;
+ 	u16				id;
+ };
+ 
+@@ -2466,13 +2465,13 @@ static int edma_probe(struct platform_device *pdev)
+ 			if (ret || i == ecc->num_tc)
+ 				break;
+ 
+-			ecc->tc_list[i].node = tc_args.np;
+ 			ecc->tc_list[i].id = i;
+ 			queue_priority_mapping[i][1] = tc_args.args[0];
+ 			if (queue_priority_mapping[i][1] > lowest_priority) {
+ 				lowest_priority = queue_priority_mapping[i][1];
+ 				info->default_queue = i;
+ 			}
++			of_node_put(tc_args.np);
+ 		}
+ 
+ 		/* See if we have optional dma-channel-mask array */
+diff --git a/drivers/firewire/device-attribute-test.c b/drivers/firewire/device-attribute-test.c
+index 2f123c6b0a1659..97478a96d1c965 100644
+--- a/drivers/firewire/device-attribute-test.c
++++ b/drivers/firewire/device-attribute-test.c
+@@ -99,6 +99,7 @@ static void device_attr_simple_avc(struct kunit *test)
+ 	struct device *unit0_dev = (struct device *)&unit0.device;
+ 	static const int unit0_expected_ids[] = {0x00ffffff, 0x00ffffff, 0x0000a02d, 0x00010001};
+ 	char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ 	int ids[4] = {0, 0, 0, 0};
+ 
+ 	// Ensure associations for node and unit devices.
+@@ -180,6 +181,7 @@ static void device_attr_legacy_avc(struct kunit *test)
+ 	struct device *unit0_dev = (struct device *)&unit0.device;
+ 	static const int unit0_expected_ids[] = {0x00012345, 0x00fedcba, 0x00abcdef, 0x00543210};
+ 	char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ 	int ids[4] = {0, 0, 0, 0};
+ 
+ 	// Ensure associations for node and unit devices.
+diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
+index cc807ed35aedf7..1e509595ac0343 100644
+--- a/drivers/firmware/efi/sysfb_efi.c
++++ b/drivers/firmware/efi/sysfb_efi.c
+@@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+ 		_ret_;						\
+ 	})
+ 
++#ifdef CONFIG_EFI
+ static int __init efifb_set_system(const struct dmi_system_id *id)
+ {
+ 	struct efifb_dmi_info *info = id->driver_data;
+@@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = {
+ 	.add_links = efifb_add_links,
+ };
+ 
+-#ifdef CONFIG_EFI
+ static struct fwnode_handle efifb_fwnode;
+ 
+ __init void sysfb_apply_efi_quirks(void)
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 14afd68664a911..a6bdedbbf70888 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -2001,13 +2001,17 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 
+ 	irq = platform_get_irq_optional(pdev, 0);
+ 	if (irq < 0) {
+-		if (irq != -ENXIO)
+-			return irq;
++		if (irq != -ENXIO) {
++			ret = irq;
++			goto err;
++		}
+ 	} else {
+ 		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
+ 						IRQF_ONESHOT, "qcom-scm", __scm);
+-		if (ret < 0)
+-			return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
++		if (ret < 0) {
++			dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
++			goto err;
++		}
+ 	}
+ 
+ 	__get_convention();
+@@ -2026,14 +2030,18 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 		qcom_scm_disable_sdi();
+ 
+ 	ret = of_reserved_mem_device_init(__scm->dev);
+-	if (ret && ret != -ENODEV)
+-		return dev_err_probe(__scm->dev, ret,
+-				     "Failed to setup the reserved memory region for TZ mem\n");
++	if (ret && ret != -ENODEV) {
++		dev_err_probe(__scm->dev, ret,
++			      "Failed to setup the reserved memory region for TZ mem\n");
++		goto err;
++	}
+ 
+ 	ret = qcom_tzmem_enable(__scm->dev);
+-	if (ret)
+-		return dev_err_probe(__scm->dev, ret,
+-				     "Failed to enable the TrustZone memory allocator\n");
++	if (ret) {
++		dev_err_probe(__scm->dev, ret,
++			      "Failed to enable the TrustZone memory allocator\n");
++		goto err;
++	}
+ 
+ 	memset(&pool_config, 0, sizeof(pool_config));
+ 	pool_config.initial_size = 0;
+@@ -2041,9 +2049,11 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	pool_config.max_size = SZ_256K;
+ 
+ 	__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
+-	if (IS_ERR(__scm->mempool))
+-		return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+-				     "Failed to create the SCM memory pool\n");
++	if (IS_ERR(__scm->mempool)) {
++		dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
++			      "Failed to create the SCM memory pool\n");
++		goto err;
++	}
+ 
+ 	/*
+ 	 * Initialize the QSEECOM interface.
+@@ -2059,6 +2069,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
+ 
+ 	return 0;
++
++err:
++	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
++	smp_store_release(&__scm, NULL);
++
++	return ret;
+ }
+ 
+ static void qcom_scm_shutdown(struct platform_device *pdev)
+diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
+index 53b1eb876a1257..2c951258929721 100644
+--- a/drivers/gpio/gpio-idio-16.c
++++ b/drivers/gpio/gpio-idio-16.c
+@@ -14,7 +14,7 @@
+ 
+ #include "gpio-idio-16.h"
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE GPIO_IDIO_16
++#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
+ 
+ #define IDIO_16_DAT_BASE 0x0
+ #define IDIO_16_OUT_BASE IDIO_16_DAT_BASE
+diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
+index 4cb455b2bdee71..619b6fb9d833a4 100644
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -490,8 +490,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
+ 	port->gc.request = mxc_gpio_request;
+ 	port->gc.free = mxc_gpio_free;
+ 	port->gc.to_irq = mxc_gpio_to_irq;
+-	port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
+-					     pdev->id * 32;
++	port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ 
+ 	err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
+ 	if (err)
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 3f2d33ee20cca9..e49802f26e07f8 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -1088,7 +1088,8 @@ static int pca953x_probe(struct i2c_client *client)
+ 		 */
+ 		reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ 		if (IS_ERR(reset_gpio))
+-			return PTR_ERR(reset_gpio);
++			return dev_err_probe(dev, PTR_ERR(reset_gpio),
++					     "Failed to get reset gpio\n");
+ 	}
+ 
+ 	chip->client = client;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index 3bc0cbf45bc59a..a46d6dd6de32fc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -1133,8 +1133,7 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ 	uint32_t low, high;
+ 	uint64_t queue_addr = 0;
+ 
+-	if (!adev->debug_exp_resets &&
+-	    !adev->gfx.num_gfx_rings)
++	if (!amdgpu_gpu_recovery)
+ 		return 0;
+ 
+ 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+@@ -1185,6 +1184,9 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ 	uint32_t low, high, pipe_reset_data = 0;
+ 	uint64_t queue_addr = 0;
+ 
++	if (!amdgpu_gpu_recovery)
++		return 0;
++
+ 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ 	amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 9f922ec50ea2dc..ae9ca6788df78c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2065,6 +2065,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
+ 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
+ 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
+ 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
++	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ 	ttm_device_fini(&adev->mman.bdev);
+ 	adev->mman.initialized = false;
+ 	DRM_INFO("amdgpu: ttm finalized\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index e7cd51c95141e1..e2501c98e107d3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -7251,10 +7251,6 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
+ 	unsigned long flags;
+ 	int i, r;
+ 
+-	if (!adev->debug_exp_resets &&
+-	    !adev->gfx.num_gfx_rings)
+-		return -EINVAL;
+-
+ 	if (amdgpu_sriov_vf(adev))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index ffdb966c4127ee..5dc3454d7d3610 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -3062,9 +3062,6 @@ static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
+ 	struct amdgpu_device *adev = ring->adev;
+ 	uint32_t value = 0;
+ 
+-	if (!adev->debug_exp_resets)
+-		return;
+-
+ 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+ 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+@@ -3580,9 +3577,6 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
+ 	unsigned long flags;
+ 	int r;
+ 
+-	if (!adev->debug_exp_resets)
+-		return -EINVAL;
+-
+ 	if (amdgpu_sriov_vf(adev))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+index 6fca2915ea8fd5..84c6b0f5c4c0b2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+@@ -943,6 +943,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ 		vcn_inst = GET_INST(VCN, i);
+ 
++		vcn_v4_0_3_fw_shared_init(adev, vcn_inst);
++
+ 		memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
+ 		header.version = MMSCH_VERSION;
+ 		header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index a0bc2c0ac04d96..20ad72d1b0d9b3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -697,6 +697,8 @@ struct amdgpu_dm_connector {
+ 	struct drm_dp_mst_port *mst_output_port;
+ 	struct amdgpu_dm_connector *mst_root;
+ 	struct drm_dp_aux *dsc_aux;
++	uint32_t mst_local_bw;
++	uint16_t vc_full_pbn;
+ 	struct mutex handle_mst_msg_ready;
+ 
+ 	/* TODO see if we can merge with ddc_bus or make a dm_connector */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 3d624ae6d9bdfe..754dbc544f03a3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -155,6 +155,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
+ 	return 0;
+ }
+ 
++
++static inline void
++amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
++{
++	aconnector->edid = NULL;
++	aconnector->dsc_aux = NULL;
++	aconnector->mst_output_port->passthrough_aux = NULL;
++	aconnector->mst_local_bw = 0;
++	aconnector->vc_full_pbn = 0;
++}
++
+ static void
+ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
+ {
+@@ -182,9 +193,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
+ 
+ 		dc_sink_release(dc_sink);
+ 		aconnector->dc_sink = NULL;
+-		aconnector->edid = NULL;
+-		aconnector->dsc_aux = NULL;
+-		port->passthrough_aux = NULL;
++		amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
+ 	}
+ 
+ 	aconnector->mst_status = MST_STATUS_DEFAULT;
+@@ -500,9 +509,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
+ 
+ 		dc_sink_release(aconnector->dc_sink);
+ 		aconnector->dc_sink = NULL;
+-		aconnector->edid = NULL;
+-		aconnector->dsc_aux = NULL;
+-		port->passthrough_aux = NULL;
++		amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
+ 
+ 		amdgpu_dm_set_mst_status(&aconnector->mst_status,
+ 			MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
+@@ -1815,9 +1822,18 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ 			struct drm_dp_mst_port *immediate_upstream_port = NULL;
+ 			uint32_t end_link_bw = 0;
+ 
+-			/*Get last DP link BW capability*/
+-			if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
+-				if (stream_kbps > end_link_bw) {
++			/*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
++			if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
++				aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
++				if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
++					dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
++					aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
++					aconnector->mst_local_bw = end_link_bw;
++				} else {
++					end_link_bw = aconnector->mst_local_bw;
++				}
++
++				if (end_link_bw > 0 && stream_kbps > end_link_bw) {
+ 					DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ 							 "Mode required bw can't fit into last link\n");
+ 					return DC_FAIL_BANDWIDTH_VALIDATE;
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+index e1da48b05d0094..961d8936150ab7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+@@ -194,6 +194,9 @@ void dpp_reset(struct dpp *dpp_base)
+ 	dpp->filter_h = NULL;
+ 	dpp->filter_v = NULL;
+ 
++	memset(&dpp_base->pos, 0, sizeof(dpp_base->pos));
++	memset(&dpp_base->att, 0, sizeof(dpp_base->att));
++
+ 	memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
+ 	memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+index 22ac2b7e49aeae..da963f73829f6c 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+@@ -532,6 +532,12 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
+ 			SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+ }
+ 
++void hubp_reset(struct hubp *hubp)
++{
++	memset(&hubp->pos, 0, sizeof(hubp->pos));
++	memset(&hubp->att, 0, sizeof(hubp->att));
++}
++
+ void hubp1_program_surface_config(
+ 	struct hubp *hubp,
+ 	enum surface_pixel_format format,
+@@ -1337,8 +1343,9 @@ static void hubp1_wait_pipe_read_start(struct hubp *hubp)
+ 
+ void hubp1_init(struct hubp *hubp)
+ {
+-	//do nothing
++	hubp_reset(hubp);
+ }
++
+ static const struct hubp_funcs dcn10_hubp_funcs = {
+ 	.hubp_program_surface_flip_and_addr =
+ 			hubp1_program_surface_flip_and_addr,
+@@ -1351,6 +1358,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
+ 	.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
+ 	.set_blank = hubp1_set_blank,
+ 	.dcc_control = hubp1_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_hubp_blank_en = hubp1_set_hubp_blank_en,
+ 	.set_cursor_attributes	= hubp1_cursor_set_attributes,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+index 69119b2fdce23b..193e48b440ef18 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+@@ -746,6 +746,8 @@ void hubp1_dcc_control(struct hubp *hubp,
+ 		bool enable,
+ 		enum hubp_ind_block_size independent_64b_blks);
+ 
++void hubp_reset(struct hubp *hubp);
++
+ bool hubp1_program_surface_flip_and_addr(
+ 	struct hubp *hubp,
+ 	const struct dc_plane_address *address,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+index 0637e4c552d8a2..b405fa22f87a9e 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+@@ -1660,6 +1660,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
+ 	.set_blank = hubp2_set_blank,
+ 	.set_blank_regs = hubp2_set_blank_regs,
+ 	.dcc_control = hubp2_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+index cd2bfcc5127650..6efcb10abf3dee 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+@@ -121,6 +121,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
+ 	.set_cursor_position	= hubp1_cursor_set_position,
+ 	.set_blank = hubp1_set_blank,
+ 	.dcc_control = hubp1_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.hubp_clk_cntl = hubp1_clk_cntl,
+ 	.hubp_vtg_sel = hubp1_vtg_sel,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+index e13d69a22c1c7f..4e2d9d381db393 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+@@ -811,6 +811,8 @@ static void hubp21_init(struct hubp *hubp)
+ 	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ 	//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ 	REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
++
++	hubp_reset(hubp);
+ }
+ static struct hubp_funcs dcn21_hubp_funcs = {
+ 	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+@@ -823,6 +825,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
+ 	.set_blank = hubp1_set_blank,
+ 	.dcc_control = hubp1_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = hubp21_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp1_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+index 60a64d29035274..c55b1b8be8ffd6 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+@@ -483,6 +483,8 @@ void hubp3_init(struct hubp *hubp)
+ 	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ 	//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ 	REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
++
++	hubp_reset(hubp);
+ }
+ 
+ static struct hubp_funcs dcn30_hubp_funcs = {
+@@ -497,6 +499,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
+ 	.set_blank = hubp2_set_blank,
+ 	.set_blank_regs = hubp2_set_blank_regs,
+ 	.dcc_control = hubp3_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+index 8394e8c069199f..a65a0ddee64672 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+@@ -79,6 +79,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ 	.set_blank = hubp2_set_blank,
+ 	.dcc_control = hubp3_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+index ca5b4b28a66441..45023fa9b708dc 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+@@ -181,6 +181,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
+ 	.set_blank = hubp2_set_blank,
+ 	.set_blank_regs = hubp2_set_blank_regs,
+ 	.dcc_control = hubp3_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp32_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+index d1f05b82b3dd5c..e7625290c0e467 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+@@ -199,6 +199,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ 	.set_blank = hubp2_set_blank,
+ 	.dcc_control = hubp3_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+index b1ebf5053b4fc3..2d52100510f05f 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+@@ -141,7 +141,7 @@ void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor
+ 
+ void hubp401_init(struct hubp *hubp)
+ {
+-	//For now nothing to do, HUBPREQ_DEBUG_DB register is removed on DCN4x.
++	hubp_reset(hubp);
+ }
+ 
+ void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
+@@ -974,6 +974,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ 	.set_blank = hubp2_set_blank,
+ 	.set_blank_regs = hubp2_set_blank_regs,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = hubp401_set_viewport,
+ 	.set_cursor_attributes	= hubp32_cursor_set_attributes,
+ 	.set_cursor_position	= hubp401_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+index a6a1db5ba8bad1..fd0530251c6e5a 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+@@ -1286,6 +1286,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
+ 		if (hws->funcs.hubp_pg_control)
+ 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+ 
++		hubp->funcs->hubp_reset(hubp);
+ 		dpp->funcs->dpp_reset(dpp);
+ 
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0,
+@@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
+ 		/* Disable on the current state so the new one isn't cleared. */
+ 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ 
++		hubp->funcs->hubp_reset(hubp);
+ 		dpp->funcs->dpp_reset(dpp);
+ 
+ 		pipe_ctx->stream_res.tg = tg;
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index bd309dbdf7b2a7..f6b17bd3f714fa 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -787,6 +787,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
+ 		/* Disable on the current state so the new one isn't cleared. */
+ 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ 
++		hubp->funcs->hubp_reset(hubp);
+ 		dpp->funcs->dpp_reset(dpp);
+ 
+ 		pipe_ctx->stream_res.tg = tg;
+@@ -940,6 +941,7 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ /*to do, need to support both case*/
+ 	hubp->power_gated = true;
+ 
++	hubp->funcs->hubp_reset(hubp);
+ 	dpp->funcs->dpp_reset(dpp);
+ 
+ 	pipe_ctx->stream = NULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 16580d62427891..eec16b0a199dd4 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -152,6 +152,8 @@ struct hubp_funcs {
+ 	void (*dcc_control)(struct hubp *hubp, bool enable,
+ 			enum hubp_ind_block_size blk_size);
+ 
++	void (*hubp_reset)(struct hubp *hubp);
++
+ 	void (*mem_program_viewport)(
+ 			struct hubp *hubp,
+ 			const struct rect *viewport,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+index b56298d9da98f3..5c54c9fd446196 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+@@ -1420,6 +1420,8 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
+ 			GetIndexIntoMasterTable(DATA, SMU_Info),
+ 			&size, &frev, &crev);
+ 
++	if (!psmu_info)
++		return -EINVAL;
+ 
+ 	for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
+ 		table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+index 3007b054c873c9..776d58ea63ae90 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+@@ -1120,13 +1120,14 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+ 	result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ 	result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ 	if (0 != result)
+-		return result;
++		goto exit_safe_mode;
+ 
+ 	vega10_didt_set_mask(hwmgr, false);
+ 
++exit_safe_mode:
+ 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ 
+-	return 0;
++	return result;
+ }
+ 
+ static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index 008d86cc562af7..cf891e7677c0e2 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -300,7 +300,7 @@
+ #define MAX_CR_LEVEL 0x03
+ #define MAX_EQ_LEVEL 0x03
+ #define AUX_WAIT_TIMEOUT_MS 15
+-#define AUX_FIFO_MAX_SIZE 32
++#define AUX_FIFO_MAX_SIZE 16
+ #define PIXEL_CLK_DELAY 1
+ #define PIXEL_CLK_INVERSE 0
+ #define ADJUST_PHASE_THRESHOLD 80000
+diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+index feb7a3a759811a..936a8f95d80f7e 100644
+--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
++++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+@@ -347,6 +347,8 @@ static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
+ 		is_limited_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL;
+ 	int ret;
+ 
++	infoframe->set = false;
++
+ 	ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, mode);
+ 	if (ret)
+ 		return ret;
+@@ -376,6 +378,8 @@ static int hdmi_generate_spd_infoframe(const struct drm_connector *connector,
+ 		&infoframe->data.spd;
+ 	int ret;
+ 
++	infoframe->set = false;
++
+ 	ret = hdmi_spd_infoframe_init(frame,
+ 				      connector->hdmi.vendor,
+ 				      connector->hdmi.product);
+@@ -398,6 +402,8 @@ static int hdmi_generate_hdr_infoframe(const struct drm_connector *connector,
+ 		&infoframe->data.drm;
+ 	int ret;
+ 
++	infoframe->set = false;
++
+ 	if (connector->max_bpc < 10)
+ 		return 0;
+ 
+@@ -425,6 +431,8 @@ static int hdmi_generate_hdmi_vendor_infoframe(const struct drm_connector *conne
+ 		&infoframe->data.vendor.hdmi;
+ 	int ret;
+ 
++	infoframe->set = false;
++
+ 	if (!info->has_hdmi_infoframe)
+ 		return 0;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index 5c0c9d4e3be183..d3f6df047f5a2b 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -342,6 +342,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj)
+ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+ {
+ 	struct page **pages;
++	pgprot_t prot;
+ 
+ 	lockdep_assert_held(&obj->lock);
+ 
+@@ -349,8 +350,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+ 	if (IS_ERR(pages))
+ 		return NULL;
+ 
+-	return vmap(pages, obj->base.size >> PAGE_SHIFT,
+-			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
++	switch (obj->flags & ETNA_BO_CACHE_MASK) {
++	case ETNA_BO_CACHED:
++		prot = PAGE_KERNEL;
++		break;
++	case ETNA_BO_UNCACHED:
++		prot = pgprot_noncached(PAGE_KERNEL);
++		break;
++	case ETNA_BO_WC:
++	default:
++		prot = pgprot_writecombine(PAGE_KERNEL);
++	}
++
++	return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
+ }
+ 
+ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index 14db7376c712d1..e386b059187acf 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -1603,7 +1603,9 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+ 
+ 	gmu->dev = &pdev->dev;
+ 
+-	of_dma_configure(gmu->dev, node, true);
++	ret = of_dma_configure(gmu->dev, node, true);
++	if (ret)
++		return ret;
+ 
+ 	pm_runtime_enable(gmu->dev);
+ 
+@@ -1668,7 +1670,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+ 
+ 	gmu->dev = &pdev->dev;
+ 
+-	of_dma_configure(gmu->dev, node, true);
++	ret = of_dma_configure(gmu->dev, node, true);
++	if (ret)
++		return ret;
+ 
+ 	/* Fow now, don't do anything fancy until we get our feet under us */
+ 	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+index eb5dfff2ec4f48..e187e7b1cef167 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x400,
+@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x400,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+index cbbdaebe357ec4..daef07924886a5 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+@@ -65,6 +65,54 @@ static const struct dpu_sspp_cfg sdm670_sspp[] = {
+ 	},
+ };
+ 
++static const struct dpu_lm_cfg sdm670_lm[] = {
++	{
++		.name = "lm_0", .id = LM_0,
++		.base = 0x44000, .len = 0x320,
++		.features = MIXER_SDM845_MASK,
++		.sblk = &sdm845_lm_sblk,
++		.lm_pair = LM_1,
++		.pingpong = PINGPONG_0,
++		.dspp = DSPP_0,
++	}, {
++		.name = "lm_1", .id = LM_1,
++		.base = 0x45000, .len = 0x320,
++		.features = MIXER_SDM845_MASK,
++		.sblk = &sdm845_lm_sblk,
++		.lm_pair = LM_0,
++		.pingpong = PINGPONG_1,
++		.dspp = DSPP_1,
++	}, {
++		.name = "lm_2", .id = LM_2,
++		.base = 0x46000, .len = 0x320,
++		.features = MIXER_SDM845_MASK,
++		.sblk = &sdm845_lm_sblk,
++		.lm_pair = LM_5,
++		.pingpong = PINGPONG_2,
++	}, {
++		.name = "lm_5", .id = LM_5,
++		.base = 0x49000, .len = 0x320,
++		.features = MIXER_SDM845_MASK,
++		.sblk = &sdm845_lm_sblk,
++		.lm_pair = LM_2,
++		.pingpong = PINGPONG_3,
++	},
++};
++
++static const struct dpu_dspp_cfg sdm670_dspp[] = {
++	{
++		.name = "dspp_0", .id = DSPP_0,
++		.base = 0x54000, .len = 0x1800,
++		.features = DSPP_SC7180_MASK,
++		.sblk = &sdm845_dspp_sblk,
++	}, {
++		.name = "dspp_1", .id = DSPP_1,
++		.base = 0x56000, .len = 0x1800,
++		.features = DSPP_SC7180_MASK,
++		.sblk = &sdm845_dspp_sblk,
++	},
++};
++
+ static const struct dpu_dsc_cfg sdm670_dsc[] = {
+ 	{
+ 		.name = "dsc_0", .id = DSC_0,
+@@ -88,8 +136,10 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
+ 	.ctl = sdm845_ctl,
+ 	.sspp_count = ARRAY_SIZE(sdm670_sspp),
+ 	.sspp = sdm670_sspp,
+-	.mixer_count = ARRAY_SIZE(sdm845_lm),
+-	.mixer = sdm845_lm,
++	.mixer_count = ARRAY_SIZE(sdm670_lm),
++	.mixer = sdm670_lm,
++	.dspp_count = ARRAY_SIZE(sdm670_dspp),
++	.dspp = sdm670_dspp,
+ 	.pingpong_count = ARRAY_SIZE(sdm845_pp),
+ 	.pingpong = sdm845_pp,
+ 	.dsc_count = ARRAY_SIZE(sdm670_dsc),
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index 6ccfde82fecdb4..421afacb724803 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -164,6 +164,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -171,6 +172,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index bab19ddd1d4f97..641023b102bf59 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -163,6 +163,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -170,6 +171,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+index a57d50b1f02807..e8916ae826a6da 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+index aced16e350daa1..f7c08e89c88203 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+index ad48defa154f7d..a1dbbf5c652ff9 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+index a3e60ac70689e7..e084406ebb0711 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+@@ -159,6 +159,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -166,6 +167,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+index 576995ddce37e9..8bbc7fb881d599 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+@@ -389,7 +389,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
+ 
+ 	/* TODO: different regulators in other cases? */
+ 	mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
+-	mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
++	mdp4_lcdc_encoder->regs[1].supply = "lvds-pll-vdda";
+ 	mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
+ 
+ 	ret = devm_regulator_bulk_get(dev->dev,
+diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
+index a599fc5d63c524..f4e01da5c55b00 100644
+--- a/drivers/gpu/drm/msm/dp/dp_audio.c
++++ b/drivers/gpu/drm/msm/dp/dp_audio.c
+@@ -329,10 +329,10 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
+ 		safe_to_exit_level = 5;
+ 		break;
+ 	default:
++		safe_to_exit_level = 14;
+ 		drm_dbg_dp(audio->drm_dev,
+ 				"setting the default safe_to_exit_level = %u\n",
+ 				safe_to_exit_level);
+-		safe_to_exit_level = 14;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+index e6ffaf92d26d32..1c4211cfa2a476 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+@@ -137,7 +137,7 @@ static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
+ 
+ 	base <<= (digclk_divsel == 2 ? 1 : 0);
+ 
+-	return (base <= 2046 ? base : 2046);
++	return base;
+ }
+ 
+ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
+diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
+index af6a6fcb11736f..6749f0fbca96d5 100644
+--- a/drivers/gpu/drm/msm/msm_kms.c
++++ b/drivers/gpu/drm/msm/msm_kms.c
+@@ -244,7 +244,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
+ 	ret = priv->kms_init(ddev);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev, "failed to load kms\n");
+-		priv->kms = NULL;
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
+index 6fbff516c1c1f0..01dff89bed4e1d 100644
+--- a/drivers/gpu/drm/panthor/panthor_device.c
++++ b/drivers/gpu/drm/panthor/panthor_device.c
+@@ -445,8 +445,8 @@ int panthor_device_resume(struct device *dev)
+ 	    drm_dev_enter(&ptdev->base, &cookie)) {
+ 		panthor_gpu_resume(ptdev);
+ 		panthor_mmu_resume(ptdev);
+-		ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
+-		if (!ret) {
++		ret = panthor_fw_resume(ptdev);
++		if (!drm_WARN_ON(&ptdev->base, ret)) {
+ 			panthor_sched_resume(ptdev);
+ 		} else {
+ 			panthor_mmu_suspend(ptdev);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 9873172e3fd331..5880d87fe6b3aa 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -33,7 +33,6 @@
+ #include <uapi/linux/videodev2.h>
+ #include <dt-bindings/soc/rockchip,vop2.h>
+ 
+-#include "rockchip_drm_drv.h"
+ #include "rockchip_drm_gem.h"
+ #include "rockchip_drm_vop2.h"
+ #include "rockchip_rgb.h"
+@@ -550,6 +549,25 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
+ 	if (modifier == DRM_FORMAT_MOD_INVALID)
+ 		return false;
+ 
++	if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) {
++		if (vop2_cluster_window(win)) {
++			if (modifier == DRM_FORMAT_MOD_LINEAR) {
++				drm_dbg_kms(vop2->drm,
++					    "Cluster window only supports format with afbc\n");
++				return false;
++			}
++		}
++	}
++
++	if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) {
++		if (vop2->data->soc_id == 3588) {
++			if (!rockchip_afbc(plane, modifier)) {
++				drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n");
++				return false;
++			}
++		}
++	}
++
+ 	if (modifier == DRM_FORMAT_MOD_LINEAR)
+ 		return true;
+ 
+@@ -1320,6 +1338,12 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
+ 		&fb->format->format,
+ 		afbc_en ? "AFBC" : "", &yrgb_mst);
+ 
++	if (vop2->data->soc_id > 3568) {
++		vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id);
++		vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id);
++		vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id);
++	}
++
+ 	if (vop2_cluster_window(win))
+ 		vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
+ 
+@@ -1721,9 +1745,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
+ 		else
+ 			dclk_out_rate = v_pixclk >> 2;
+ 
+-		dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
++		dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
+ 		if (!dclk_rate) {
+-			drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld KHZ\n",
++			drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
+ 				dclk_out_rate);
+ 			return 0;
+ 		}
+@@ -1738,9 +1762,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
+ 		 * dclk_rate = N * dclk_core_rate N = (1,2,4 ),
+ 		 * we get a little factor here
+ 		 */
+-		dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
++		dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
+ 		if (!dclk_rate) {
+-			drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld KHZ\n",
++			drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
+ 				dclk_out_rate);
+ 			return 0;
+ 		}
+@@ -2159,7 +2183,6 @@ static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
+ 
+ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
+ {
+-	u32 offset = (main_win->data->phys_id * 0x10);
+ 	struct vop2_alpha_config alpha_config;
+ 	struct vop2_alpha alpha;
+ 	struct drm_plane_state *bottom_win_pstate;
+@@ -2167,6 +2190,7 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
+ 	u16 src_glb_alpha_val, dst_glb_alpha_val;
+ 	bool premulti_en = false;
+ 	bool swap = false;
++	u32 offset = 0;
+ 
+ 	/* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
+ 	bottom_win_pstate = main_win->base.state;
+@@ -2185,6 +2209,22 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
+ 	vop2_parse_alpha(&alpha_config, &alpha);
+ 
+ 	alpha.src_color_ctrl.bits.src_dst_swap = swap;
++
++	switch (main_win->data->phys_id) {
++	case ROCKCHIP_VOP2_CLUSTER0:
++		offset = 0x0;
++		break;
++	case ROCKCHIP_VOP2_CLUSTER1:
++		offset = 0x10;
++		break;
++	case ROCKCHIP_VOP2_CLUSTER2:
++		offset = 0x20;
++		break;
++	case ROCKCHIP_VOP2_CLUSTER3:
++		offset = 0x30;
++		break;
++	}
++
+ 	vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset,
+ 		    alpha.src_color_ctrl.val);
+ 	vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset,
+@@ -2232,6 +2272,12 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
+ 		struct vop2_win *win = to_vop2_win(plane);
+ 		int zpos = plane->state->normalized_zpos;
+ 
++		/*
++		 * Need to configure alpha from second layer.
++		 */
++		if (zpos == 0)
++			continue;
++
+ 		if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ 			premulti_en = 1;
+ 		else
+@@ -2308,7 +2354,10 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
+ 	struct drm_plane *plane;
+ 	u32 layer_sel = 0;
+ 	u32 port_sel;
+-	unsigned int nlayer, ofs;
++	u8 layer_id;
++	u8 old_layer_id;
++	u8 layer_sel_id;
++	unsigned int ofs;
+ 	u32 ovl_ctrl;
+ 	int i;
+ 	struct vop2_video_port *vp0 = &vop2->vps[0];
+@@ -2352,9 +2401,30 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
+ 	for (i = 0; i < vp->id; i++)
+ 		ofs += vop2->vps[i].nlayers;
+ 
+-	nlayer = 0;
+ 	drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ 		struct vop2_win *win = to_vop2_win(plane);
++		struct vop2_win *old_win;
++
++		layer_id = (u8)(plane->state->normalized_zpos + ofs);
++
++		/*
++		 * Find the layer this win bind in old state.
++		 */
++		for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
++			layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
++			if (layer_sel_id == win->data->layer_sel_id)
++				break;
++		}
++
++		/*
++		 * Find the win bind to this layer in old state
++		 */
++		for (i = 0; i < vop2->data->win_size; i++) {
++			old_win = &vop2->win[i];
++			layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
++			if (layer_sel_id == old_win->data->layer_sel_id)
++				break;
++		}
+ 
+ 		switch (win->data->phys_id) {
+ 		case ROCKCHIP_VOP2_CLUSTER0:
+@@ -2399,17 +2469,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
+ 			break;
+ 		}
+ 
+-		layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
+-							  0x7);
+-		layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
+-							 win->data->layer_sel_id);
+-		nlayer++;
+-	}
+-
+-	/* configure unused layers to 0x5 (reserved) */
+-	for (; nlayer < vp->nlayers; nlayer++) {
+-		layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 0x7);
+-		layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 5);
++		layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
++		layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id);
++		/*
++		 * When we bind a window from layerM to layerN, we also need to move the old
++		 * window on layerN to layerM to avoid one window selected by two or more layers.
++		 */
++		layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
++		layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id);
+ 	}
+ 
+ 	vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
+@@ -2444,9 +2511,11 @@ static void vop2_setup_dly_for_windows(struct vop2 *vop2)
+ 			sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
+ 			break;
+ 		case ROCKCHIP_VOP2_SMART0:
++		case ROCKCHIP_VOP2_ESMART2:
+ 			sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
+ 			break;
+ 		case ROCKCHIP_VOP2_SMART1:
++		case ROCKCHIP_VOP2_ESMART3:
+ 			sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
+ 			break;
+ 		}
+@@ -2865,6 +2934,10 @@ static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
+ 	[VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
+ 	[VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
+ 	[VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
++	[VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
++	[VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
++	/* RK3588 only, reserved bit on rk3568*/
++	[VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
+ 
+ 	/* Scale */
+ 	[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
+@@ -2957,6 +3030,10 @@ static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = {
+ 	[VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
+ 	[VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
+ 	[VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
++	[VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
++	[VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
++	/* RK3588 only, reserved register on rk3568 */
++	[VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
+ 
+ 	/* Scale */
+ 	[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+index 615a16196aff6b..130aaa40316d13 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+@@ -9,6 +9,7 @@
+ 
+ #include <linux/regmap.h>
+ #include <drm/drm_modes.h>
++#include "rockchip_drm_drv.h"
+ #include "rockchip_drm_vop.h"
+ 
+ #define VOP2_VP_FEATURE_OUTPUT_10BIT        BIT(0)
+@@ -78,6 +79,9 @@ enum vop2_win_regs {
+ 	VOP2_WIN_COLOR_KEY,
+ 	VOP2_WIN_COLOR_KEY_EN,
+ 	VOP2_WIN_DITHER_UP,
++	VOP2_WIN_AXI_BUS_ID,
++	VOP2_WIN_AXI_YRGB_R_ID,
++	VOP2_WIN_AXI_UV_R_ID,
+ 
+ 	/* scale regs */
+ 	VOP2_WIN_SCALE_YRGB_X,
+@@ -140,6 +144,10 @@ struct vop2_win_data {
+ 	unsigned int layer_sel_id;
+ 	uint64_t feature;
+ 
++	uint8_t axi_bus_id;
++	uint8_t axi_yrgb_r_id;
++	uint8_t axi_uv_r_id;
++
+ 	unsigned int max_upscale_factor;
+ 	unsigned int max_downscale_factor;
+ 	const u8 dly[VOP2_DLY_MODE_MAX];
+@@ -308,6 +316,7 @@ enum dst_factor_mode {
+ 
+ #define RK3568_CLUSTER_WIN_CTRL0		0x00
+ #define RK3568_CLUSTER_WIN_CTRL1		0x04
++#define RK3568_CLUSTER_WIN_CTRL2		0x08
+ #define RK3568_CLUSTER_WIN_YRGB_MST		0x10
+ #define RK3568_CLUSTER_WIN_CBR_MST		0x14
+ #define RK3568_CLUSTER_WIN_VIR			0x18
+@@ -330,6 +339,7 @@ enum dst_factor_mode {
+ /* (E)smart register definition, offset relative to window base */
+ #define RK3568_SMART_CTRL0			0x00
+ #define RK3568_SMART_CTRL1			0x04
++#define RK3588_SMART_AXI_CTRL			0x08
+ #define RK3568_SMART_REGION0_CTRL		0x10
+ #define RK3568_SMART_REGION0_YRGB_MST		0x14
+ #define RK3568_SMART_REGION0_CBR_MST		0x18
+diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+index 18efb3fe1c000f..e473a8f8fd32d4 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
++++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+@@ -313,7 +313,7 @@ static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
+  * AXI1 is a read only bus.
+  *
+  * Every window on a AXI bus must assigned two unique
+- * read id(yrgb_id/uv_id, valid id are 0x1~0xe).
++ * read id(yrgb_r_id/uv_r_id, valid id are 0x1~0xe).
+  *
+  * AXI0:
+  * Cluster0/1, Esmart0/1, WriteBack
+@@ -333,6 +333,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 0,
+ 		.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ 				       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
++		.axi_bus_id = 0,
++		.axi_yrgb_r_id = 2,
++		.axi_uv_r_id = 3,
+ 		.max_upscale_factor = 4,
+ 		.max_downscale_factor = 4,
+ 		.dly = { 4, 26, 29 },
+@@ -349,6 +352,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ 				       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_PRIMARY,
++		.axi_bus_id = 0,
++		.axi_yrgb_r_id = 6,
++		.axi_uv_r_id = 7,
+ 		.max_upscale_factor = 4,
+ 		.max_downscale_factor = 4,
+ 		.dly = { 4, 26, 29 },
+@@ -364,6 +370,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ 				       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_PRIMARY,
++		.axi_bus_id = 1,
++		.axi_yrgb_r_id = 2,
++		.axi_uv_r_id = 3,
+ 		.max_upscale_factor = 4,
+ 		.max_downscale_factor = 4,
+ 		.dly = { 4, 26, 29 },
+@@ -379,6 +388,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ 				       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_PRIMARY,
++		.axi_bus_id = 1,
++		.axi_yrgb_r_id = 6,
++		.axi_uv_r_id = 7,
+ 		.max_upscale_factor = 4,
+ 		.max_downscale_factor = 4,
+ 		.dly = { 4, 26, 29 },
+@@ -393,6 +405,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 2,
+ 		.supported_rotations = DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_OVERLAY,
++		.axi_bus_id = 0,
++		.axi_yrgb_r_id = 0x0a,
++		.axi_uv_r_id = 0x0b,
+ 		.max_upscale_factor = 8,
+ 		.max_downscale_factor = 8,
+ 		.dly = { 23, 45, 48 },
+@@ -406,6 +421,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 3,
+ 		.supported_rotations = DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_OVERLAY,
++		.axi_bus_id = 0,
++		.axi_yrgb_r_id = 0x0c,
++		.axi_uv_r_id = 0x01,
+ 		.max_upscale_factor = 8,
+ 		.max_downscale_factor = 8,
+ 		.dly = { 23, 45, 48 },
+@@ -419,6 +437,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 6,
+ 		.supported_rotations = DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_OVERLAY,
++		.axi_bus_id = 1,
++		.axi_yrgb_r_id = 0x0a,
++		.axi_uv_r_id = 0x0b,
+ 		.max_upscale_factor = 8,
+ 		.max_downscale_factor = 8,
+ 		.dly = { 23, 45, 48 },
+@@ -432,6 +453,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 7,
+ 		.supported_rotations = DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_OVERLAY,
++		.axi_bus_id = 1,
++		.axi_yrgb_r_id = 0x0c,
++		.axi_uv_r_id = 0x0d,
+ 		.max_upscale_factor = 8,
+ 		.max_downscale_factor = 8,
+ 		.dly = { 23, 45, 48 },
+diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
+index 19e3ee7ac897fe..76816f2551c100 100644
+--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
++++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
+@@ -237,8 +237,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
+ 	if (v3d->ver >= 40) {
+ 		int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
+ 		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
+-			       V3D_SET_FIELD(cycle_count_reg,
+-					     V3D_PCTR_S0));
++			       V3D_SET_FIELD_VER(cycle_count_reg,
++						 V3D_PCTR_S0, v3d->ver));
+ 		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
+ 		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
+ 	} else {
+diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
+index 6ee56cbd3f1bfc..e3013ac3a5c2a6 100644
+--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
++++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
+@@ -240,17 +240,18 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
+ 
+ 	for (i = 0; i < ncounters; i++) {
+ 		u32 source = i / 4;
+-		u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0);
++		u32 channel = V3D_SET_FIELD_VER(perfmon->counters[i], V3D_PCTR_S0,
++						v3d->ver);
+ 
+ 		i++;
+-		channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
+-					 V3D_PCTR_S1);
++		channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
++					     V3D_PCTR_S1, v3d->ver);
+ 		i++;
+-		channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
+-					 V3D_PCTR_S2);
++		channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
++					     V3D_PCTR_S2, v3d->ver);
+ 		i++;
+-		channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
+-					 V3D_PCTR_S3);
++		channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
++					     V3D_PCTR_S3, v3d->ver);
+ 		V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
+index 1b1a62ad95852b..6da3c69082bd6d 100644
+--- a/drivers/gpu/drm/v3d/v3d_regs.h
++++ b/drivers/gpu/drm/v3d/v3d_regs.h
+@@ -15,6 +15,14 @@
+ 		fieldval & field##_MASK;				\
+ 	 })
+ 
++#define V3D_SET_FIELD_VER(value, field, ver)				\
++	({								\
++		typeof(ver) _ver = (ver);				\
++		u32 fieldval = (value) << field##_SHIFT(_ver);		\
++		WARN_ON((fieldval & ~field##_MASK(_ver)) != 0);		\
++		fieldval & field##_MASK(_ver);				\
++	 })
++
+ #define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >>		\
+ 				    field##_SHIFT)
+ 
+@@ -354,18 +362,15 @@
+ #define V3D_V4_PCTR_0_SRC_28_31                        0x0067c
+ #define V3D_V4_PCTR_0_SRC_X(x)                         (V3D_V4_PCTR_0_SRC_0_3 + \
+ 							4 * (x))
+-# define V3D_PCTR_S0_MASK                              V3D_MASK(6, 0)
+-# define V3D_V7_PCTR_S0_MASK                           V3D_MASK(7, 0)
+-# define V3D_PCTR_S0_SHIFT                             0
+-# define V3D_PCTR_S1_MASK                              V3D_MASK(14, 8)
+-# define V3D_V7_PCTR_S1_MASK                           V3D_MASK(15, 8)
+-# define V3D_PCTR_S1_SHIFT                             8
+-# define V3D_PCTR_S2_MASK                              V3D_MASK(22, 16)
+-# define V3D_V7_PCTR_S2_MASK                           V3D_MASK(23, 16)
+-# define V3D_PCTR_S2_SHIFT                             16
+-# define V3D_PCTR_S3_MASK                              V3D_MASK(30, 24)
+-# define V3D_V7_PCTR_S3_MASK                           V3D_MASK(31, 24)
+-# define V3D_PCTR_S3_SHIFT                             24
++# define V3D_PCTR_S0_MASK(ver) (((ver) >= 71) ? V3D_MASK(7, 0) : V3D_MASK(6, 0))
++# define V3D_PCTR_S0_SHIFT(ver)                        0
++# define V3D_PCTR_S1_MASK(ver) (((ver) >= 71) ? V3D_MASK(15, 8) : V3D_MASK(14, 8))
++# define V3D_PCTR_S1_SHIFT(ver)                        8
++# define V3D_PCTR_S2_MASK(ver) (((ver) >= 71) ? V3D_MASK(23, 16) : V3D_MASK(22, 16))
++# define V3D_PCTR_S2_SHIFT(ver)                        16
++# define V3D_PCTR_S3_MASK(ver) (((ver) >= 71) ? V3D_MASK(31, 24) : V3D_MASK(30, 24))
++# define V3D_PCTR_S3_SHIFT(ver)                        24
++
+ #define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32)
+ 
+ /* Output values of the counters. */
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 935ccc38d12958..155deef867ac09 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1125,6 +1125,8 @@ static void hid_apply_multiplier(struct hid_device *hid,
+ 	while (multiplier_collection->parent_idx != -1 &&
+ 	       multiplier_collection->type != HID_COLLECTION_LOGICAL)
+ 		multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
++	if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
++		multiplier_collection = NULL;
+ 
+ 	effective_multiplier = hid_calculate_multiplier(hid, multiplier);
+ 
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index fda9dce3da9980..9d80635a91ebd8 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -810,10 +810,23 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			break;
+ 		}
+ 
+-		if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
+-			switch (usage->hid & 0xf) {
+-			case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
+-			default: goto ignore;
++		if ((usage->hid & 0xf0) == 0x90) { /* SystemControl & D-pad */
++			switch (usage->hid) {
++			case HID_GD_UP:	   usage->hat_dir = 1; break;
++			case HID_GD_DOWN:  usage->hat_dir = 5; break;
++			case HID_GD_RIGHT: usage->hat_dir = 3; break;
++			case HID_GD_LEFT:  usage->hat_dir = 7; break;
++			case HID_GD_DO_NOT_DISTURB:
++				map_key_clear(KEY_DO_NOT_DISTURB); break;
++			default: goto unknown;
++			}
++
++			if (usage->hid <= HID_GD_LEFT) {
++				if (field->dpad) {
++					map_abs(field->dpad);
++					goto ignore;
++				}
++				map_abs(ABS_HAT0X);
+ 			}
+ 			break;
+ 		}
+@@ -844,22 +857,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		if (field->application == HID_GD_SYSTEM_CONTROL)
+ 			goto ignore;
+ 
+-		if ((usage->hid & 0xf0) == 0x90) {	/* D-pad */
+-			switch (usage->hid) {
+-			case HID_GD_UP:	   usage->hat_dir = 1; break;
+-			case HID_GD_DOWN:  usage->hat_dir = 5; break;
+-			case HID_GD_RIGHT: usage->hat_dir = 3; break;
+-			case HID_GD_LEFT:  usage->hat_dir = 7; break;
+-			default: goto unknown;
+-			}
+-			if (field->dpad) {
+-				map_abs(field->dpad);
+-				goto ignore;
+-			}
+-			map_abs(ABS_HAT0X);
+-			break;
+-		}
+-
+ 		switch (usage->hid) {
+ 		/* These usage IDs map directly to the usage codes. */
+ 		case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index d1b7ccfb3e051f..e07d63db5e1f47 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2078,7 +2078,7 @@ static const struct hid_device_id mt_devices[] = {
+ 		     I2C_DEVICE_ID_GOODIX_01E8) },
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ 	  HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
+-		     I2C_DEVICE_ID_GOODIX_01E8) },
++		     I2C_DEVICE_ID_GOODIX_01E9) },
+ 
+ 	/* GoodTouch panels */
+ 	{ .driver_data = MT_CLS_NSMU,
+diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
+index cf1679b0d4fbb5..6c3e758bbb09e3 100644
+--- a/drivers/hid/hid-thrustmaster.c
++++ b/drivers/hid/hid-thrustmaster.c
+@@ -170,6 +170,14 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
+ 	ep = &usbif->cur_altsetting->endpoint[1];
+ 	b_ep = ep->desc.bEndpointAddress;
+ 
++	/* Are the expected endpoints present? */
++	u8 ep_addr[1] = {b_ep};
++
++	if (!usb_check_int_endpoints(usbif, ep_addr)) {
++		hid_err(hdev, "Unexpected non-int endpoint\n");
++		return;
++	}
++
+ 	for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) {
+ 		memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]);
+ 
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 08a3c863f80a27..58480a3f4683fe 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -413,7 +413,7 @@ config SENSORS_ASPEED
+ 	  will be called aspeed_pwm_tacho.
+ 
+ config SENSORS_ASPEED_G6
+-	tristate "ASPEED g6 PWM and Fan tach driver"
++	tristate "ASPEED G6 PWM and Fan tach driver"
+ 	depends on ARCH_ASPEED || COMPILE_TEST
+ 	depends on PWM
+ 	help
+@@ -421,7 +421,7 @@ config SENSORS_ASPEED_G6
+ 	  controllers.
+ 
+ 	  This driver can also be built as a module. If so, the module
+-	  will be called aspeed_pwm_tacho.
++	  will be called aspeed_g6_pwm_tach.
+ 
+ config SENSORS_ATXP1
+ 	tristate "Attansic ATXP1 VID controller"
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index ee04795b98aabe..fa3351351825b7 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -42,6 +42,9 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#undef DEFAULT_SYMBOL_NAMESPACE
++#define DEFAULT_SYMBOL_NAMESPACE "HWMON_NCT6775"
++
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+@@ -56,9 +59,6 @@
+ #include "lm75.h"
+ #include "nct6775.h"
+ 
+-#undef DEFAULT_SYMBOL_NAMESPACE
+-#define DEFAULT_SYMBOL_NAMESPACE HWMON_NCT6775
+-
+ #define USE_ALTERNATE
+ 
+ /* used to set data->name = nct6775_device_names[data->sio_kind] */
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index 9d88b4fa03e423..b3282785523d48 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -8,6 +8,9 @@
+  * Copyright (C) 2007 MontaVista Software Inc.
+  * Copyright (C) 2009 Provigent Ltd.
+  */
++
++#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW_COMMON"
++
+ #include <linux/acpi.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+@@ -29,8 +32,6 @@
+ #include <linux/types.h>
+ #include <linux/units.h>
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE	I2C_DW_COMMON
+-
+ #include "i2c-designware-core.h"
+ 
+ static char *abort_sources[] = {
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index e8ac9a7bf0b3d2..28188c6d0555e0 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -8,6 +8,9 @@
+  * Copyright (C) 2007 MontaVista Software Inc.
+  * Copyright (C) 2009 Provigent Ltd.
+  */
++
++#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW"
++
+ #include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/errno.h>
+@@ -22,8 +25,6 @@
+ #include <linux/regmap.h>
+ #include <linux/reset.h>
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE	I2C_DW
+-
+ #include "i2c-designware-core.h"
+ 
+ #define AMD_TIMEOUT_MIN_US	25
+diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
+index 7035296aa24ce0..f0f0f1f2131d0a 100644
+--- a/drivers/i2c/busses/i2c-designware-slave.c
++++ b/drivers/i2c/busses/i2c-designware-slave.c
+@@ -6,6 +6,9 @@
+  *
+  * Copyright (C) 2016 Synopsys Inc.
+  */
++
++#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW"
++
+ #include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/errno.h>
+@@ -16,8 +19,6 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/regmap.h>
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE	I2C_DW
+-
+ #include "i2c-designware-core.h"
+ 
+ static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
+diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
+index 8d694672c1104f..dbcd3984f25780 100644
+--- a/drivers/i3c/master/dw-i3c-master.c
++++ b/drivers/i3c/master/dw-i3c-master.c
+@@ -1624,6 +1624,7 @@ EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
+ 
+ void dw_i3c_common_remove(struct dw_i3c_master *master)
+ {
++	cancel_work_sync(&master->hj_work);
+ 	i3c_master_unregister(&master->base);
+ 
+ 	pm_runtime_disable(master->dev);
+diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
+index 1211f4317a9f4f..aba96ca9bce5df 100644
+--- a/drivers/infiniband/hw/Makefile
++++ b/drivers/infiniband/hw/Makefile
+@@ -11,7 +11,7 @@ obj-$(CONFIG_INFINIBAND_OCRDMA)		+= ocrdma/
+ obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA)	+= vmw_pvrdma/
+ obj-$(CONFIG_INFINIBAND_USNIC)		+= usnic/
+ obj-$(CONFIG_INFINIBAND_HFI1)		+= hfi1/
+-obj-$(CONFIG_INFINIBAND_HNS)		+= hns/
++obj-$(CONFIG_INFINIBAND_HNS_HIP08)	+= hns/
+ obj-$(CONFIG_INFINIBAND_QEDR)		+= qedr/
+ obj-$(CONFIG_INFINIBAND_BNXT_RE)	+= bnxt_re/
+ obj-$(CONFIG_INFINIBAND_ERDMA)		+= erdma/
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 14e434ff51edea..a7067c3c067972 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -4395,9 +4395,10 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
+ 	case BNXT_RE_MMAP_TOGGLE_PAGE:
+ 		/* Driver doesn't expect write access for user space */
+ 		if (vma->vm_flags & VM_WRITE)
+-			return -EFAULT;
+-		ret = vm_insert_page(vma, vma->vm_start,
+-				     virt_to_page((void *)bnxt_entry->mem_offset));
++			ret = -EFAULT;
++		else
++			ret = vm_insert_page(vma, vma->vm_start,
++					     virt_to_page((void *)bnxt_entry->mem_offset));
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
+index 80970a1738f8a6..034b85c4225555 100644
+--- a/drivers/infiniband/hw/cxgb4/device.c
++++ b/drivers/infiniband/hw/cxgb4/device.c
+@@ -1114,8 +1114,10 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
+ 	 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
+ 	 * cpl_rx_pkt.
+ 	 */
+-	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+-			sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
++	skb = alloc_skb(size_add(gl->tot_len,
++				 sizeof(struct cpl_pass_accept_req) +
++				 sizeof(struct rss_header)) - pktshift,
++			GFP_ATOMIC);
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index 7b5c4522b426a6..955f061a55e9ae 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1599,6 +1599,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+ 	int count;
+ 	int rq_flushed = 0, sq_flushed;
+ 	unsigned long flag;
++	struct ib_event ev;
+ 
+ 	pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
+ 
+@@ -1607,6 +1608,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+ 	if (schp != rchp)
+ 		spin_lock(&schp->lock);
+ 	spin_lock(&qhp->lock);
++	if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
++	    qhp->ibqp.event_handler) {
++		ev.device = qhp->ibqp.device;
++		ev.element.qp = &qhp->ibqp;
++		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
++		qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
++	}
+ 
+ 	if (qhp->wq.flushed) {
+ 		spin_unlock(&qhp->lock);
+diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
+index ab3fbba70789ca..44cdb706fe276d 100644
+--- a/drivers/infiniband/hw/hns/Kconfig
++++ b/drivers/infiniband/hw/hns/Kconfig
+@@ -1,21 +1,11 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-config INFINIBAND_HNS
+-	tristate "HNS RoCE Driver"
+-	depends on NET_VENDOR_HISILICON
+-	depends on ARM64 || (COMPILE_TEST && 64BIT)
+-	depends on (HNS_DSAF && HNS_ENET) || HNS3
+-	help
+-	  This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
+-
+-	  To compile HIP08 driver as module, choose M here.
+-
+ config INFINIBAND_HNS_HIP08
+-	bool "Hisilicon Hip08 Family RoCE support"
+-	depends on INFINIBAND_HNS && PCI && HNS3
+-	depends on INFINIBAND_HNS=m || HNS3=y
++	tristate "Hisilicon Hip08 Family RoCE support"
++	depends on ARM64 || (COMPILE_TEST && 64BIT)
++	depends on PCI && HNS3
+ 	help
+ 	  RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
+ 	  The RoCE engine is a PCI device.
+ 
+-	  To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+-	  module will be called hns-roce-hw-v2.
++	  To compile this driver, choose M here. This module will be called
++	  hns-roce-hw-v2.
+diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
+index be1e1cdbcfa8a8..7917af8e6380e8 100644
+--- a/drivers/infiniband/hw/hns/Makefile
++++ b/drivers/infiniband/hw/hns/Makefile
+@@ -5,12 +5,9 @@
+ 
+ ccflags-y :=  -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ 
+-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
++hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
+ 	hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
+ 	hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
+-	hns_roce_debugfs.o
++	hns_roce_debugfs.o hns_roce_hw_v2.o
+ 
+-ifdef CONFIG_INFINIBAND_HNS_HIP08
+-hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
+-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
+-endif
++obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 529db874d67c69..b1bbdcff631d56 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -351,7 +351,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
+ 	struct mlx4_port_gid_table   *port_gid_table;
+ 	int ret = 0;
+ 	int hw_update = 0;
+-	struct gid_entry *gids;
++	struct gid_entry *gids = NULL;
+ 
+ 	if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
+ 		return -EINVAL;
+@@ -389,10 +389,10 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
+ 	}
+ 	spin_unlock_bh(&iboe->lock);
+ 
+-	if (!ret && hw_update) {
++	if (gids)
+ 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
+-		kfree(gids);
+-	}
++
++	kfree(gids);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 4b37446758fd4e..64b441542cd5dd 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -228,13 +228,27 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+ 	unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ 	struct mlx5_ib_mr *imr = mr->parent;
+ 
++	/*
++	 * If userspace is racing freeing the parent implicit ODP MR then we can
++	 * loose the race with parent destruction. In this case
++	 * mlx5_ib_free_odp_mr() will free everything in the implicit_children
++	 * xarray so NOP is fine. This child MR cannot be destroyed here because
++	 * we are under its umem_mutex.
++	 */
+ 	if (!refcount_inc_not_zero(&imr->mmkey.usecount))
+ 		return;
+ 
+-	xa_erase(&imr->implicit_children, idx);
++	xa_lock(&imr->implicit_children);
++	if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
++	    mr) {
++		xa_unlock(&imr->implicit_children);
++		return;
++	}
++
+ 	if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
+-		xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+-			 mlx5_base_mkey(mr->mmkey.key));
++		__xa_erase(&mr_to_mdev(mr)->odp_mkeys,
++			   mlx5_base_mkey(mr->mmkey.key));
++	xa_unlock(&imr->implicit_children);
+ 
+ 	/* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ 	INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+@@ -500,18 +514,18 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ 		refcount_inc(&ret->mmkey.usecount);
+ 		goto out_lock;
+ 	}
+-	xa_unlock(&imr->implicit_children);
+ 
+ 	if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+-		ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+-			       &mr->mmkey, GFP_KERNEL);
++		ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
++				 &mr->mmkey, GFP_KERNEL);
+ 		if (xa_is_err(ret)) {
+ 			ret = ERR_PTR(xa_err(ret));
+-			xa_erase(&imr->implicit_children, idx);
+-			goto out_mr;
++			__xa_erase(&imr->implicit_children, idx);
++			goto out_lock;
+ 		}
+ 		mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
+ 	}
++	xa_unlock(&imr->implicit_children);
+ 	mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
+ 	return mr;
+ 
+@@ -944,8 +958,7 @@ static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key)
+ /*
+  * Handle a single data segment in a page-fault WQE or RDMA region.
+  *
+- * Returns number of OS pages retrieved on success. The caller may continue to
+- * the next data segment.
++ * Returns zero on success. The caller may continue to the next data segment.
+  * Can return the following error codes:
+  * -EAGAIN to designate a temporary error. The caller will abort handling the
+  *  page fault and resolve it.
+@@ -958,7 +971,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ 					 u32 *bytes_committed,
+ 					 u32 *bytes_mapped)
+ {
+-	int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
++	int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range;
+ 	struct pf_frame *head = NULL, *frame;
+ 	struct mlx5_ib_mkey *mmkey;
+ 	struct mlx5_ib_mr *mr;
+@@ -993,13 +1006,20 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ 	case MLX5_MKEY_MR:
+ 		mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ 
++		pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
++				  (io_virt & PAGE_MASK)) >>
++				 PAGE_SHIFT;
+ 		ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
+ 		if (ret < 0)
+ 			goto end;
+ 
+ 		mlx5_update_odp_stats(mr, faults, ret);
+ 
+-		npages += ret;
++		if (ret < pages_in_range) {
++			ret = -EFAULT;
++			goto end;
++		}
++
+ 		ret = 0;
+ 		break;
+ 
+@@ -1090,7 +1110,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ 	kfree(out);
+ 
+ 	*bytes_committed = 0;
+-	return ret ? ret : npages;
++	return ret;
+ }
+ 
+ /*
+@@ -1109,8 +1129,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+  *                   the committed bytes).
+  * @receive_queue: receive WQE end of sg list
+  *
+- * Returns the number of pages loaded if positive, zero for an empty WQE, or a
+- * negative error code.
++ * Returns zero for success or a negative error code.
+  */
+ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ 				   struct mlx5_pagefault *pfault,
+@@ -1118,7 +1137,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ 				   void *wqe_end, u32 *bytes_mapped,
+ 				   u32 *total_wqe_bytes, bool receive_queue)
+ {
+-	int ret = 0, npages = 0;
++	int ret = 0;
+ 	u64 io_virt;
+ 	__be32 key;
+ 	u32 byte_count;
+@@ -1175,10 +1194,9 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ 						    bytes_mapped);
+ 		if (ret < 0)
+ 			break;
+-		npages += ret;
+ 	}
+ 
+-	return ret < 0 ? ret : npages;
++	return ret;
+ }
+ 
+ /*
+@@ -1414,12 +1432,6 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
+ 	free_page((unsigned long)wqe_start);
+ }
+ 
+-static int pages_in_range(u64 address, u32 length)
+-{
+-	return (ALIGN(address + length, PAGE_SIZE) -
+-		(address & PAGE_MASK)) >> PAGE_SHIFT;
+-}
+-
+ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
+ 					   struct mlx5_pagefault *pfault)
+ {
+@@ -1458,7 +1470,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
+ 	if (ret == -EAGAIN) {
+ 		/* We're racing with an invalidation, don't prefetch */
+ 		prefetch_activated = 0;
+-	} else if (ret < 0 || pages_in_range(address, length) > ret) {
++	} else if (ret < 0) {
+ 		mlx5_ib_page_fault_resume(dev, pfault, 1);
+ 		if (ret != -ENOENT)
+ 			mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
+diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
+index d2f57ead78ad12..003f681e5dc022 100644
+--- a/drivers/infiniband/sw/rxe/rxe_param.h
++++ b/drivers/infiniband/sw/rxe/rxe_param.h
+@@ -129,7 +129,7 @@ enum rxe_device_param {
+ enum rxe_port_param {
+ 	RXE_PORT_GID_TBL_LEN		= 1024,
+ 	RXE_PORT_PORT_CAP_FLAGS		= IB_PORT_CM_SUP,
+-	RXE_PORT_MAX_MSG_SZ		= 0x800000,
++	RXE_PORT_MAX_MSG_SZ		= (1UL << 31),
+ 	RXE_PORT_BAD_PKEY_CNTR		= 0,
+ 	RXE_PORT_QKEY_VIOL_CNTR		= 0,
+ 	RXE_PORT_LID			= 0,
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
+index 67567d62195e86..d9cb682fd71f88 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.c
++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
+@@ -178,7 +178,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+ {
+ 	struct rxe_pool *pool = elem->pool;
+ 	struct xarray *xa = &pool->xa;
+-	static int timeout = RXE_POOL_TIMEOUT;
+ 	int ret, err = 0;
+ 	void *xa_ret;
+ 
+@@ -202,19 +201,19 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+ 	 * return to rdma-core
+ 	 */
+ 	if (sleepable) {
+-		if (!completion_done(&elem->complete) && timeout) {
++		if (!completion_done(&elem->complete)) {
+ 			ret = wait_for_completion_timeout(&elem->complete,
+-					timeout);
++					msecs_to_jiffies(50000));
+ 
+ 			/* Shouldn't happen. There are still references to
+ 			 * the object but, rather than deadlock, free the
+ 			 * object or pass back to rdma-core.
+ 			 */
+ 			if (WARN_ON(!ret))
+-				err = -EINVAL;
++				err = -ETIMEDOUT;
+ 		}
+ 	} else {
+-		unsigned long until = jiffies + timeout;
++		unsigned long until = jiffies + RXE_POOL_TIMEOUT;
+ 
+ 		/* AH objects are unique in that the destroy_ah verb
+ 		 * can be called in atomic context. This delay
+@@ -226,7 +225,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+ 			mdelay(1);
+ 
+ 		if (WARN_ON(!completion_done(&elem->complete)))
+-			err = -EINVAL;
++			err = -ETIMEDOUT;
+ 	}
+ 
+ 	if (pool->cleanup)
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 8a5fc20fd18692..589ac0d8489dbd 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -696,7 +696,7 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+ 		for (i = 0; i < ibwr->num_sge; i++)
+ 			length += ibwr->sg_list[i].length;
+ 
+-		if (length > (1UL << 31)) {
++		if (length > RXE_PORT_MAX_MSG_SZ) {
+ 			rxe_err_qp(qp, "message length too long\n");
+ 			break;
+ 		}
+@@ -980,8 +980,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
+ 	for (i = 0; i < num_sge; i++)
+ 		length += ibwr->sg_list[i].length;
+ 
+-	/* IBA max message size is 2^31 */
+-	if (length >= (1UL<<31)) {
++	if (length > RXE_PORT_MAX_MSG_SZ) {
+ 		err = -EINVAL;
+ 		rxe_dbg("message length too long\n");
+ 		goto err_out;
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index 4e17d546d4ccf3..bf38ac6f87c47a 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -584,6 +584,9 @@ static void dev_free(struct kref *ref)
+ 	list_del(&dev->entry);
+ 	mutex_unlock(&pool->mutex);
+ 
++	if (pool->ops && pool->ops->deinit)
++		pool->ops->deinit(dev);
++
+ 	ib_dealloc_pd(dev->ib_pd);
+ 	kfree(dev);
+ }
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 2916e77f589b81..7289ae0b83aced 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -3978,7 +3978,6 @@ static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
+ 	return host;
+ 
+ put_host:
+-	device_del(&host->dev);
+ 	put_device(&host->dev);
+ 	return NULL;
+ }
+diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
+index 6386fa4556d9b8..6fac9ee8dd3ed0 100644
+--- a/drivers/iommu/amd/amd_iommu.h
++++ b/drivers/iommu/amd/amd_iommu.h
+@@ -87,7 +87,6 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
+  */
+ void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
+ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
+-void amd_iommu_domain_update(struct protection_domain *domain);
+ void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ 				  u64 address, size_t size);
+ void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 8364cd6fa47d01..a24a97a2c6469b 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -1606,15 +1606,6 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
+ 	domain_flush_complete(domain);
+ }
+ 
+-void amd_iommu_domain_update(struct protection_domain *domain)
+-{
+-	/* Update device table */
+-	amd_iommu_update_and_flush_device_table(domain);
+-
+-	/* Flush domain TLB(s) and wait for completion */
+-	amd_iommu_domain_flush_all(domain);
+-}
+-
+ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
+ {
+ 	struct iommu_dev_data *dev_data;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 353fea58cd318a..f1a8f8c75cb0e9 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -2702,9 +2702,14 @@ static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
+ 		 * Translation Requests and Translated transactions are denied
+ 		 * as though ATS is disabled for the stream (STE.EATS == 0b00),
+ 		 * causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events
+-		 * (IHI0070Ea 5.2 Stream Table Entry). Thus ATS can only be
+-		 * enabled if we have arm_smmu_domain, those always have page
+-		 * tables.
++		 * (IHI0070Ea 5.2 Stream Table Entry).
++		 *
++		 * However, if we have installed a CD table and are using S1DSS
++		 * then ATS will work in S1DSS bypass. See "13.6.4 Full ATS
++		 * skipping stage 1".
++		 *
++		 * Disable ATS if we are going to create a normal 0b100 bypass
++		 * STE.
+ 		 */
+ 		state->ats_enabled = arm_smmu_ats_supported(master);
+ 	}
+@@ -3017,8 +3022,10 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
+ 	if (arm_smmu_ssids_in_use(&master->cd_table)) {
+ 		/*
+ 		 * If a CD table has to be present then we need to run with ATS
+-		 * on even though the RID will fail ATS queries with UR. This is
+-		 * because we have no idea what the PASID's need.
++		 * on because we have to assume a PASID is using ATS. For
++		 * IDENTITY this will setup things so that S1DSS=bypass which
++		 * follows the explanation in "13.6.4 Full ATS skipping stage 1"
++		 * and allows for ATS on the RID to work.
+ 		 */
+ 		state.cd_needs_ats = true;
+ 		arm_smmu_attach_prepare(&state, domain);
+diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
+index d90b9e253412ff..2cdc4f542df472 100644
+--- a/drivers/iommu/iommufd/iova_bitmap.c
++++ b/drivers/iommu/iommufd/iova_bitmap.c
+@@ -130,7 +130,7 @@ struct iova_bitmap {
+ static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
+ 						 unsigned long iova)
+ {
+-	unsigned long pgsize = 1 << bitmap->mapped.pgshift;
++	unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
+ 
+ 	return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
+ }
+diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
+index b5f5d27ee9634e..649fe79d0f0cc6 100644
+--- a/drivers/iommu/iommufd/main.c
++++ b/drivers/iommu/iommufd/main.c
+@@ -130,7 +130,7 @@ static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
+ 	if (wait_event_timeout(ictx->destroy_wait,
+ 				refcount_read(&to_destroy->shortterm_users) ==
+ 					0,
+-				msecs_to_jiffies(10000)))
++				msecs_to_jiffies(60000)))
+ 		return 0;
+ 
+ 	pr_crit("Time out waiting for iommufd object to become free\n");
+diff --git a/drivers/leds/leds-cht-wcove.c b/drivers/leds/leds-cht-wcove.c
+index b4998402b8c6f0..711ac4bd60580d 100644
+--- a/drivers/leds/leds-cht-wcove.c
++++ b/drivers/leds/leds-cht-wcove.c
+@@ -394,7 +394,7 @@ static int cht_wc_leds_probe(struct platform_device *pdev)
+ 		led->cdev.pattern_clear = cht_wc_leds_pattern_clear;
+ 		led->cdev.max_brightness = 255;
+ 
+-		ret = led_classdev_register(&pdev->dev, &led->cdev);
++		ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+@@ -406,10 +406,6 @@ static int cht_wc_leds_probe(struct platform_device *pdev)
+ static void cht_wc_leds_remove(struct platform_device *pdev)
+ {
+ 	struct cht_wc_leds *leds = platform_get_drvdata(pdev);
+-	int i;
+-
+-	for (i = 0; i < CHT_WC_LED_COUNT; i++)
+-		led_classdev_unregister(&leds->leds[i].cdev);
+ 
+ 	/* Restore LED1 regs if hw-control was active else leave LED1 off */
+ 	if (!(leds->led1_initial_regs.ctrl & CHT_WC_LED1_SWCTL))
+diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
+index af5a908b8d9edd..e95287416ef879 100644
+--- a/drivers/leds/leds-netxbig.c
++++ b/drivers/leds/leds-netxbig.c
+@@ -439,6 +439,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
+ 	}
+ 	gpio_ext_pdev = of_find_device_by_node(gpio_ext_np);
+ 	if (!gpio_ext_pdev) {
++		of_node_put(gpio_ext_np);
+ 		dev_err(dev, "Failed to find platform device for gpio-ext\n");
+ 		return -ENODEV;
+ 	}
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index c3a42dd66ce551..2e3087556adb37 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1671,24 +1671,13 @@ __acquires(bitmap->lock)
+ }
+ 
+ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
+-			     unsigned long sectors, bool behind)
++			     unsigned long sectors)
+ {
+ 	struct bitmap *bitmap = mddev->bitmap;
+ 
+ 	if (!bitmap)
+ 		return 0;
+ 
+-	if (behind) {
+-		int bw;
+-		atomic_inc(&bitmap->behind_writes);
+-		bw = atomic_read(&bitmap->behind_writes);
+-		if (bw > bitmap->behind_writes_used)
+-			bitmap->behind_writes_used = bw;
+-
+-		pr_debug("inc write-behind count %d/%lu\n",
+-			 bw, bitmap->mddev->bitmap_info.max_write_behind);
+-	}
+-
+ 	while (sectors) {
+ 		sector_t blocks;
+ 		bitmap_counter_t *bmc;
+@@ -1737,21 +1726,13 @@ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
+ }
+ 
+ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
+-			    unsigned long sectors, bool success, bool behind)
++			    unsigned long sectors)
+ {
+ 	struct bitmap *bitmap = mddev->bitmap;
+ 
+ 	if (!bitmap)
+ 		return;
+ 
+-	if (behind) {
+-		if (atomic_dec_and_test(&bitmap->behind_writes))
+-			wake_up(&bitmap->behind_wait);
+-		pr_debug("dec write-behind count %d/%lu\n",
+-			 atomic_read(&bitmap->behind_writes),
+-			 bitmap->mddev->bitmap_info.max_write_behind);
+-	}
+-
+ 	while (sectors) {
+ 		sector_t blocks;
+ 		unsigned long flags;
+@@ -1764,15 +1745,16 @@ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
+ 			return;
+ 		}
+ 
+-		if (success && !bitmap->mddev->degraded &&
+-		    bitmap->events_cleared < bitmap->mddev->events) {
+-			bitmap->events_cleared = bitmap->mddev->events;
+-			bitmap->need_sync = 1;
+-			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
+-		}
+-
+-		if (!success && !NEEDED(*bmc))
++		if (!bitmap->mddev->degraded) {
++			if (bitmap->events_cleared < bitmap->mddev->events) {
++				bitmap->events_cleared = bitmap->mddev->events;
++				bitmap->need_sync = 1;
++				sysfs_notify_dirent_safe(
++						bitmap->sysfs_can_clear);
++			}
++		} else if (!NEEDED(*bmc)) {
+ 			*bmc |= NEEDED_MASK;
++		}
+ 
+ 		if (COUNTER(*bmc) == COUNTER_MAX)
+ 			wake_up(&bitmap->overflow_wait);
+@@ -2062,6 +2044,37 @@ static void md_bitmap_free(void *data)
+ 	kfree(bitmap);
+ }
+ 
++static void bitmap_start_behind_write(struct mddev *mddev)
++{
++	struct bitmap *bitmap = mddev->bitmap;
++	int bw;
++
++	if (!bitmap)
++		return;
++
++	atomic_inc(&bitmap->behind_writes);
++	bw = atomic_read(&bitmap->behind_writes);
++	if (bw > bitmap->behind_writes_used)
++		bitmap->behind_writes_used = bw;
++
++	pr_debug("inc write-behind count %d/%lu\n",
++		 bw, bitmap->mddev->bitmap_info.max_write_behind);
++}
++
++static void bitmap_end_behind_write(struct mddev *mddev)
++{
++	struct bitmap *bitmap = mddev->bitmap;
++
++	if (!bitmap)
++		return;
++
++	if (atomic_dec_and_test(&bitmap->behind_writes))
++		wake_up(&bitmap->behind_wait);
++	pr_debug("dec write-behind count %d/%lu\n",
++		 atomic_read(&bitmap->behind_writes),
++		 bitmap->mddev->bitmap_info.max_write_behind);
++}
++
+ static void bitmap_wait_behind_writes(struct mddev *mddev)
+ {
+ 	struct bitmap *bitmap = mddev->bitmap;
+@@ -2342,7 +2355,10 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
+ 
+ 	if (!bitmap)
+ 		return -ENOENT;
+-
++	if (bitmap->mddev->bitmap_info.external)
++		return -ENOENT;
++	if (!bitmap->storage.sb_page) /* no superblock */
++		return -EINVAL;
+ 	sb = kmap_local_page(bitmap->storage.sb_page);
+ 	stats->sync_size = le64_to_cpu(sb->sync_size);
+ 	kunmap_local(sb);
+@@ -2981,6 +2997,9 @@ static struct bitmap_operations bitmap_ops = {
+ 	.dirty_bits		= bitmap_dirty_bits,
+ 	.unplug			= bitmap_unplug,
+ 	.daemon_work		= bitmap_daemon_work,
++
++	.start_behind_write	= bitmap_start_behind_write,
++	.end_behind_write	= bitmap_end_behind_write,
+ 	.wait_behind_writes	= bitmap_wait_behind_writes,
+ 
+ 	.startwrite		= bitmap_startwrite,
+diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
+index 662e6fc141a775..31c93019c76bf3 100644
+--- a/drivers/md/md-bitmap.h
++++ b/drivers/md/md-bitmap.h
+@@ -84,12 +84,15 @@ struct bitmap_operations {
+ 			   unsigned long e);
+ 	void (*unplug)(struct mddev *mddev, bool sync);
+ 	void (*daemon_work)(struct mddev *mddev);
++
++	void (*start_behind_write)(struct mddev *mddev);
++	void (*end_behind_write)(struct mddev *mddev);
+ 	void (*wait_behind_writes)(struct mddev *mddev);
+ 
+ 	int (*startwrite)(struct mddev *mddev, sector_t offset,
+-			  unsigned long sectors, bool behind);
++			  unsigned long sectors);
+ 	void (*endwrite)(struct mddev *mddev, sector_t offset,
+-			 unsigned long sectors, bool success, bool behind);
++			 unsigned long sectors);
+ 	bool (*start_sync)(struct mddev *mddev, sector_t offset,
+ 			   sector_t *blocks, bool degraded);
+ 	void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 67108c397c5a86..44c4c518430d9b 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8376,6 +8376,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ 		return 0;
+ 
+ 	spin_unlock(&all_mddevs_lock);
++
++	/* prevent bitmap to be freed after checking */
++	mutex_lock(&mddev->bitmap_info.mutex);
++
+ 	spin_lock(&mddev->lock);
+ 	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
+ 		seq_printf(seq, "%s : ", mdname(mddev));
+@@ -8451,6 +8455,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 	}
+ 	spin_unlock(&mddev->lock);
++	mutex_unlock(&mddev->bitmap_info.mutex);
+ 	spin_lock(&all_mddevs_lock);
+ 
+ 	if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
+@@ -8745,12 +8750,32 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
+ }
+ EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+ 
++static void md_bitmap_start(struct mddev *mddev,
++			    struct md_io_clone *md_io_clone)
++{
++	if (mddev->pers->bitmap_sector)
++		mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
++					   &md_io_clone->sectors);
++
++	mddev->bitmap_ops->startwrite(mddev, md_io_clone->offset,
++				      md_io_clone->sectors);
++}
++
++static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
++{
++	mddev->bitmap_ops->endwrite(mddev, md_io_clone->offset,
++				    md_io_clone->sectors);
++}
++
+ static void md_end_clone_io(struct bio *bio)
+ {
+ 	struct md_io_clone *md_io_clone = bio->bi_private;
+ 	struct bio *orig_bio = md_io_clone->orig_bio;
+ 	struct mddev *mddev = md_io_clone->mddev;
+ 
++	if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
++		md_bitmap_end(mddev, md_io_clone);
++
+ 	if (bio->bi_status && !orig_bio->bi_status)
+ 		orig_bio->bi_status = bio->bi_status;
+ 
+@@ -8775,6 +8800,12 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
+ 	if (blk_queue_io_stat(bdev->bd_disk->queue))
+ 		md_io_clone->start_time = bio_start_io_acct(*bio);
+ 
++	if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
++		md_io_clone->offset = (*bio)->bi_iter.bi_sector;
++		md_io_clone->sectors = bio_sectors(*bio);
++		md_bitmap_start(mddev, md_io_clone);
++	}
++
+ 	clone->bi_end_io = md_end_clone_io;
+ 	clone->bi_private = md_io_clone;
+ 	*bio = clone;
+@@ -8793,6 +8824,9 @@ void md_free_cloned_bio(struct bio *bio)
+ 	struct bio *orig_bio = md_io_clone->orig_bio;
+ 	struct mddev *mddev = md_io_clone->mddev;
+ 
++	if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
++		md_bitmap_end(mddev, md_io_clone);
++
+ 	if (bio->bi_status && !orig_bio->bi_status)
+ 		orig_bio->bi_status = bio->bi_status;
+ 
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 5d2e6bd58e4da2..8826dce9717da9 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -746,6 +746,9 @@ struct md_personality
+ 	void *(*takeover) (struct mddev *mddev);
+ 	/* Changes the consistency policy of an active array. */
+ 	int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
++	/* convert io ranges from array to bitmap */
++	void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
++			      unsigned long *sectors);
+ };
+ 
+ struct md_sysfs_entry {
+@@ -828,6 +831,8 @@ struct md_io_clone {
+ 	struct mddev	*mddev;
+ 	struct bio	*orig_bio;
+ 	unsigned long	start_time;
++	sector_t	offset;
++	unsigned long	sectors;
+ 	struct bio	bio_clone;
+ };
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 6c9d24203f39f0..d83fe3b3abc009 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -420,10 +420,8 @@ static void close_write(struct r1bio *r1_bio)
+ 		r1_bio->behind_master_bio = NULL;
+ 	}
+ 
+-	/* clear the bitmap if all writes complete successfully */
+-	mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors,
+-				    !test_bit(R1BIO_Degraded, &r1_bio->state),
+-				    test_bit(R1BIO_BehindIO, &r1_bio->state));
++	if (test_bit(R1BIO_BehindIO, &r1_bio->state))
++		mddev->bitmap_ops->end_behind_write(mddev);
+ 	md_write_end(mddev);
+ }
+ 
+@@ -480,8 +478,6 @@ static void raid1_end_write_request(struct bio *bio)
+ 		if (!test_bit(Faulty, &rdev->flags))
+ 			set_bit(R1BIO_WriteError, &r1_bio->state);
+ 		else {
+-			/* Fail the request */
+-			set_bit(R1BIO_Degraded, &r1_bio->state);
+ 			/* Finished with this branch */
+ 			r1_bio->bios[mirror] = NULL;
+ 			to_put = bio;
+@@ -1492,11 +1488,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 			break;
+ 		}
+ 		r1_bio->bios[i] = NULL;
+-		if (!rdev || test_bit(Faulty, &rdev->flags)) {
+-			if (i < conf->raid_disks)
+-				set_bit(R1BIO_Degraded, &r1_bio->state);
++		if (!rdev || test_bit(Faulty, &rdev->flags))
+ 			continue;
+-		}
+ 
+ 		atomic_inc(&rdev->nr_pending);
+ 		if (test_bit(WriteErrorSeen, &rdev->flags)) {
+@@ -1522,16 +1515,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 					 */
+ 					max_sectors = bad_sectors;
+ 				rdev_dec_pending(rdev, mddev);
+-				/* We don't set R1BIO_Degraded as that
+-				 * only applies if the disk is
+-				 * missing, so it might be re-added,
+-				 * and we want to know to recover this
+-				 * chunk.
+-				 * In this case the device is here,
+-				 * and the fact that this chunk is not
+-				 * in-sync is recorded in the bad
+-				 * block log
+-				 */
+ 				continue;
+ 			}
+ 			if (is_bad) {
+@@ -1611,9 +1594,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 			    stats.behind_writes < max_write_behind)
+ 				alloc_behind_master_bio(r1_bio, bio);
+ 
+-			mddev->bitmap_ops->startwrite(
+-				mddev, r1_bio->sector, r1_bio->sectors,
+-				test_bit(R1BIO_BehindIO, &r1_bio->state));
++			if (test_bit(R1BIO_BehindIO, &r1_bio->state))
++				mddev->bitmap_ops->start_behind_write(mddev);
+ 			first_clone = 0;
+ 		}
+ 
+@@ -2567,12 +2549,10 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
+ 			 * errors.
+ 			 */
+ 			fail = true;
+-			if (!narrow_write_error(r1_bio, m)) {
++			if (!narrow_write_error(r1_bio, m))
+ 				md_error(conf->mddev,
+ 					 conf->mirrors[m].rdev);
+ 				/* an I/O failed, we can't clear the bitmap */
+-				set_bit(R1BIO_Degraded, &r1_bio->state);
+-			}
+ 			rdev_dec_pending(conf->mirrors[m].rdev,
+ 					 conf->mddev);
+ 		}
+@@ -2663,8 +2643,6 @@ static void raid1d(struct md_thread *thread)
+ 			list_del(&r1_bio->retry_list);
+ 			idx = sector_to_idx(r1_bio->sector);
+ 			atomic_dec(&conf->nr_queued[idx]);
+-			if (mddev->degraded)
+-				set_bit(R1BIO_Degraded, &r1_bio->state);
+ 			if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ 				close_write(r1_bio);
+ 			raid_end_bio_io(r1_bio);
+diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
+index 5300cbaa58a415..33f318fcc268d8 100644
+--- a/drivers/md/raid1.h
++++ b/drivers/md/raid1.h
+@@ -188,7 +188,6 @@ struct r1bio {
+ enum r1bio_state {
+ 	R1BIO_Uptodate,
+ 	R1BIO_IsSync,
+-	R1BIO_Degraded,
+ 	R1BIO_BehindIO,
+ /* Set ReadError on bios that experience a readerror so that
+  * raid1d knows what to do with them.
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 862b1fb71d864b..daf42acc4fb6f3 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -428,10 +428,6 @@ static void close_write(struct r10bio *r10_bio)
+ {
+ 	struct mddev *mddev = r10_bio->mddev;
+ 
+-	/* clear the bitmap if all writes complete successfully */
+-	mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
+-				    !test_bit(R10BIO_Degraded, &r10_bio->state),
+-				    false);
+ 	md_write_end(mddev);
+ }
+ 
+@@ -501,7 +497,6 @@ static void raid10_end_write_request(struct bio *bio)
+ 				set_bit(R10BIO_WriteError, &r10_bio->state);
+ 			else {
+ 				/* Fail the request */
+-				set_bit(R10BIO_Degraded, &r10_bio->state);
+ 				r10_bio->devs[slot].bio = NULL;
+ 				to_put = bio;
+ 				dec_rdev = 1;
+@@ -1430,10 +1425,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 		r10_bio->devs[i].bio = NULL;
+ 		r10_bio->devs[i].repl_bio = NULL;
+ 
+-		if (!rdev && !rrdev) {
+-			set_bit(R10BIO_Degraded, &r10_bio->state);
++		if (!rdev && !rrdev)
+ 			continue;
+-		}
+ 		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
+ 			sector_t first_bad;
+ 			sector_t dev_sector = r10_bio->devs[i].addr;
+@@ -1450,14 +1443,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 					 * to other devices yet
+ 					 */
+ 					max_sectors = bad_sectors;
+-				/* We don't set R10BIO_Degraded as that
+-				 * only applies if the disk is missing,
+-				 * so it might be re-added, and we want to
+-				 * know to recover this chunk.
+-				 * In this case the device is here, and the
+-				 * fact that this chunk is not in-sync is
+-				 * recorded in the bad block log.
+-				 */
+ 				continue;
+ 			}
+ 			if (is_bad) {
+@@ -1493,8 +1478,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 	md_account_bio(mddev, &bio);
+ 	r10_bio->master_bio = bio;
+ 	atomic_set(&r10_bio->remaining, 1);
+-	mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors,
+-				      false);
+ 
+ 	for (i = 0; i < conf->copies; i++) {
+ 		if (r10_bio->devs[i].bio)
+@@ -2910,11 +2893,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
+ 				rdev_dec_pending(rdev, conf->mddev);
+ 			} else if (bio != NULL && bio->bi_status) {
+ 				fail = true;
+-				if (!narrow_write_error(r10_bio, m)) {
++				if (!narrow_write_error(r10_bio, m))
+ 					md_error(conf->mddev, rdev);
+-					set_bit(R10BIO_Degraded,
+-						&r10_bio->state);
+-				}
+ 				rdev_dec_pending(rdev, conf->mddev);
+ 			}
+ 			bio = r10_bio->devs[m].repl_bio;
+@@ -2973,8 +2953,6 @@ static void raid10d(struct md_thread *thread)
+ 			r10_bio = list_first_entry(&tmp, struct r10bio,
+ 						   retry_list);
+ 			list_del(&r10_bio->retry_list);
+-			if (mddev->degraded)
+-				set_bit(R10BIO_Degraded, &r10_bio->state);
+ 
+ 			if (test_bit(R10BIO_WriteError,
+ 				     &r10_bio->state))
+diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
+index 2e75e88d08023f..3f16ad6904a9fb 100644
+--- a/drivers/md/raid10.h
++++ b/drivers/md/raid10.h
+@@ -161,7 +161,6 @@ enum r10bio_state {
+ 	R10BIO_IsSync,
+ 	R10BIO_IsRecover,
+ 	R10BIO_IsReshape,
+-	R10BIO_Degraded,
+ /* Set ReadError on bios that experience a read error
+  * so that raid10d knows what to do with them.
+  */
+diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
+index b4f7b79fd187d0..011246e16a99e5 100644
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -313,10 +313,6 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
+ 		if (sh->dev[i].written) {
+ 			set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ 			r5c_return_dev_pending_writes(conf, &sh->dev[i]);
+-			conf->mddev->bitmap_ops->endwrite(conf->mddev,
+-					sh->sector, RAID5_STRIPE_SECTORS(conf),
+-					!test_bit(STRIPE_DEGRADED, &sh->state),
+-					false);
+ 		}
+ 	}
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 2fa1f270fb1d3c..39e7596e78c0b0 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -906,8 +906,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
+ 	if (raid5_has_log(conf) || raid5_has_ppl(conf))
+ 		return false;
+ 	return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+-		!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
+-		is_full_stripe_write(sh);
++	       is_full_stripe_write(sh);
+ }
+ 
+ /* we only do back search */
+@@ -1345,8 +1344,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
+ 				submit_bio_noacct(rbi);
+ 		}
+ 		if (!rdev && !rrdev) {
+-			if (op_is_write(op))
+-				set_bit(STRIPE_DEGRADED, &sh->state);
+ 			pr_debug("skip op %d on disc %d for sector %llu\n",
+ 				bi->bi_opf, i, (unsigned long long)sh->sector);
+ 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
+@@ -2884,7 +2881,6 @@ static void raid5_end_write_request(struct bio *bi)
+ 			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
+ 	} else {
+ 		if (bi->bi_status) {
+-			set_bit(STRIPE_DEGRADED, &sh->state);
+ 			set_bit(WriteErrorSeen, &rdev->flags);
+ 			set_bit(R5_WriteError, &sh->dev[i].flags);
+ 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
+@@ -3548,29 +3544,9 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+ 		 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
+ 		 sh->dev[dd_idx].sector);
+ 
+-	if (conf->mddev->bitmap && firstwrite) {
+-		/* Cannot hold spinlock over bitmap_startwrite,
+-		 * but must ensure this isn't added to a batch until
+-		 * we have added to the bitmap and set bm_seq.
+-		 * So set STRIPE_BITMAP_PENDING to prevent
+-		 * batching.
+-		 * If multiple __add_stripe_bio() calls race here they
+-		 * much all set STRIPE_BITMAP_PENDING.  So only the first one
+-		 * to complete "bitmap_startwrite" gets to set
+-		 * STRIPE_BIT_DELAY.  This is important as once a stripe
+-		 * is added to a batch, STRIPE_BIT_DELAY cannot be changed
+-		 * any more.
+-		 */
+-		set_bit(STRIPE_BITMAP_PENDING, &sh->state);
+-		spin_unlock_irq(&sh->stripe_lock);
+-		conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector,
+-					RAID5_STRIPE_SECTORS(conf), false);
+-		spin_lock_irq(&sh->stripe_lock);
+-		clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
+-		if (!sh->batch_head) {
+-			sh->bm_seq = conf->seq_flush+1;
+-			set_bit(STRIPE_BIT_DELAY, &sh->state);
+-		}
++	if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
++		sh->bm_seq = conf->seq_flush+1;
++		set_bit(STRIPE_BIT_DELAY, &sh->state);
+ 	}
+ }
+ 
+@@ -3621,7 +3597,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 	BUG_ON(sh->batch_head);
+ 	for (i = disks; i--; ) {
+ 		struct bio *bi;
+-		int bitmap_end = 0;
+ 
+ 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ 			struct md_rdev *rdev = conf->disks[i].rdev;
+@@ -3646,8 +3621,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 		sh->dev[i].towrite = NULL;
+ 		sh->overwrite_disks = 0;
+ 		spin_unlock_irq(&sh->stripe_lock);
+-		if (bi)
+-			bitmap_end = 1;
+ 
+ 		log_stripe_write_finished(sh);
+ 
+@@ -3662,11 +3635,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 			bio_io_error(bi);
+ 			bi = nextbi;
+ 		}
+-		if (bitmap_end)
+-			conf->mddev->bitmap_ops->endwrite(conf->mddev,
+-					sh->sector, RAID5_STRIPE_SECTORS(conf),
+-					false, false);
+-		bitmap_end = 0;
+ 		/* and fail all 'written' */
+ 		bi = sh->dev[i].written;
+ 		sh->dev[i].written = NULL;
+@@ -3675,7 +3643,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 			sh->dev[i].page = sh->dev[i].orig_page;
+ 		}
+ 
+-		if (bi) bitmap_end = 1;
+ 		while (bi && bi->bi_iter.bi_sector <
+ 		       sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
+ 			struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
+@@ -3709,10 +3676,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 				bi = nextbi;
+ 			}
+ 		}
+-		if (bitmap_end)
+-			conf->mddev->bitmap_ops->endwrite(conf->mddev,
+-					sh->sector, RAID5_STRIPE_SECTORS(conf),
+-					false, false);
+ 		/* If we were in the middle of a write the parity block might
+ 		 * still be locked - so just clear all R5_LOCKED flags
+ 		 */
+@@ -4061,10 +4024,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
+ 					bio_endio(wbi);
+ 					wbi = wbi2;
+ 				}
+-				conf->mddev->bitmap_ops->endwrite(conf->mddev,
+-					sh->sector, RAID5_STRIPE_SECTORS(conf),
+-					!test_bit(STRIPE_DEGRADED, &sh->state),
+-					false);
++
+ 				if (head_sh->batch_head) {
+ 					sh = list_first_entry(&sh->batch_list,
+ 							      struct stripe_head,
+@@ -4341,7 +4301,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
+ 		s->locked++;
+ 		set_bit(R5_Wantwrite, &dev->flags);
+ 
+-		clear_bit(STRIPE_DEGRADED, &sh->state);
+ 		set_bit(STRIPE_INSYNC, &sh->state);
+ 		break;
+ 	case check_state_run:
+@@ -4498,7 +4457,6 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
+ 			clear_bit(R5_Wantwrite, &dev->flags);
+ 			s->locked--;
+ 		}
+-		clear_bit(STRIPE_DEGRADED, &sh->state);
+ 
+ 		set_bit(STRIPE_INSYNC, &sh->state);
+ 		break;
+@@ -4892,8 +4850,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 					  (1 << STRIPE_COMPUTE_RUN)  |
+ 					  (1 << STRIPE_DISCARD) |
+ 					  (1 << STRIPE_BATCH_READY) |
+-					  (1 << STRIPE_BATCH_ERR) |
+-					  (1 << STRIPE_BITMAP_PENDING)),
++					  (1 << STRIPE_BATCH_ERR)),
+ 			"stripe state: %lx\n", sh->state);
+ 		WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+ 					      (1 << STRIPE_REPLACED)),
+@@ -4901,7 +4858,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 
+ 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ 					    (1 << STRIPE_PREREAD_ACTIVE) |
+-					    (1 << STRIPE_DEGRADED) |
+ 					    (1 << STRIPE_ON_UNPLUG_LIST)),
+ 			      head_sh->state & (1 << STRIPE_INSYNC));
+ 
+@@ -5785,10 +5741,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
+ 		}
+ 		spin_unlock_irq(&sh->stripe_lock);
+ 		if (conf->mddev->bitmap) {
+-			for (d = 0; d < conf->raid_disks - conf->max_degraded;
+-			     d++)
+-				mddev->bitmap_ops->startwrite(mddev, sh->sector,
+-					RAID5_STRIPE_SECTORS(conf), false);
+ 			sh->bm_seq = conf->seq_flush + 1;
+ 			set_bit(STRIPE_BIT_DELAY, &sh->state);
+ 		}
+@@ -5929,6 +5881,54 @@ static enum reshape_loc get_reshape_loc(struct mddev *mddev,
+ 	return LOC_BEHIND_RESHAPE;
+ }
+ 
++static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
++				unsigned long *sectors)
++{
++	struct r5conf *conf = mddev->private;
++	sector_t start = *offset;
++	sector_t end = start + *sectors;
++	sector_t prev_start = start;
++	sector_t prev_end = end;
++	int sectors_per_chunk;
++	enum reshape_loc loc;
++	int dd_idx;
++
++	sectors_per_chunk = conf->chunk_sectors *
++		(conf->raid_disks - conf->max_degraded);
++	start = round_down(start, sectors_per_chunk);
++	end = round_up(end, sectors_per_chunk);
++
++	start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
++	end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
++
++	/*
++	 * For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
++	 * progress, hence it's the same as LOC_BEHIND_RESHAPE.
++	 */
++	loc = get_reshape_loc(mddev, conf, prev_start);
++	if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
++		*offset = start;
++		*sectors = end - start;
++		return;
++	}
++
++	sectors_per_chunk = conf->prev_chunk_sectors *
++		(conf->previous_raid_disks - conf->max_degraded);
++	prev_start = round_down(prev_start, sectors_per_chunk);
++	prev_end = round_down(prev_end, sectors_per_chunk);
++
++	prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
++	prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
++
++	/*
++	 * for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
++	 * is handled in make_stripe_request(), we can't know this here hence
++	 * we set bits for both.
++	 */
++	*offset = min(start, prev_start);
++	*sectors = max(end, prev_end) - *offset;
++}
++
+ static enum stripe_result make_stripe_request(struct mddev *mddev,
+ 		struct r5conf *conf, struct stripe_request_ctx *ctx,
+ 		sector_t logical_sector, struct bio *bi)
+@@ -8977,6 +8977,7 @@ static struct md_personality raid6_personality =
+ 	.takeover	= raid6_takeover,
+ 	.change_consistency_policy = raid5_change_consistency_policy,
+ 	.prepare_suspend = raid5_prepare_suspend,
++	.bitmap_sector	= raid5_bitmap_sector,
+ };
+ static struct md_personality raid5_personality =
+ {
+@@ -9002,6 +9003,7 @@ static struct md_personality raid5_personality =
+ 	.takeover	= raid5_takeover,
+ 	.change_consistency_policy = raid5_change_consistency_policy,
+ 	.prepare_suspend = raid5_prepare_suspend,
++	.bitmap_sector	= raid5_bitmap_sector,
+ };
+ 
+ static struct md_personality raid4_personality =
+@@ -9028,6 +9030,7 @@ static struct md_personality raid4_personality =
+ 	.takeover	= raid4_takeover,
+ 	.change_consistency_policy = raid5_change_consistency_policy,
+ 	.prepare_suspend = raid5_prepare_suspend,
++	.bitmap_sector	= raid5_bitmap_sector,
+ };
+ 
+ static int __init raid5_init(void)
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index 896ecfc4afa6fa..2e42c1641049f9 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -358,7 +358,6 @@ enum {
+ 	STRIPE_REPLACED,
+ 	STRIPE_PREREAD_ACTIVE,
+ 	STRIPE_DELAYED,
+-	STRIPE_DEGRADED,
+ 	STRIPE_BIT_DELAY,
+ 	STRIPE_EXPANDING,
+ 	STRIPE_EXPAND_SOURCE,
+@@ -372,9 +371,6 @@ enum {
+ 	STRIPE_ON_RELEASE_LIST,
+ 	STRIPE_BATCH_READY,
+ 	STRIPE_BATCH_ERR,
+-	STRIPE_BITMAP_PENDING,	/* Being added to bitmap, don't add
+-				 * to batch yet.
+-				 */
+ 	STRIPE_LOG_TRAPPED,	/* trapped into log (see raid5-cache.c)
+ 				 * this bit is used in two scenarios:
+ 				 *
+diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
+index 458905dfb3e110..a87a265cd83957 100644
+--- a/drivers/media/i2c/imx290.c
++++ b/drivers/media/i2c/imx290.c
+@@ -269,7 +269,6 @@ static const struct cci_reg_sequence imx290_global_init_settings[] = {
+ 	{ IMX290_WINWV, 1097 },
+ 	{ IMX290_XSOUTSEL, IMX290_XSOUTSEL_XVSOUTSEL_VSYNC |
+ 			   IMX290_XSOUTSEL_XHSOUTSEL_HSYNC },
+-	{ CCI_REG8(0x3011), 0x02 },
+ 	{ CCI_REG8(0x3012), 0x64 },
+ 	{ CCI_REG8(0x3013), 0x00 },
+ };
+@@ -277,6 +276,7 @@ static const struct cci_reg_sequence imx290_global_init_settings[] = {
+ static const struct cci_reg_sequence imx290_global_init_settings_290[] = {
+ 	{ CCI_REG8(0x300f), 0x00 },
+ 	{ CCI_REG8(0x3010), 0x21 },
++	{ CCI_REG8(0x3011), 0x00 },
+ 	{ CCI_REG8(0x3016), 0x09 },
+ 	{ CCI_REG8(0x3070), 0x02 },
+ 	{ CCI_REG8(0x3071), 0x11 },
+@@ -330,6 +330,7 @@ static const struct cci_reg_sequence xclk_regs[][IMX290_NUM_CLK_REGS] = {
+ };
+ 
+ static const struct cci_reg_sequence imx290_global_init_settings_327[] = {
++	{ CCI_REG8(0x3011), 0x02 },
+ 	{ CCI_REG8(0x309e), 0x4A },
+ 	{ CCI_REG8(0x309f), 0x4A },
+ 	{ CCI_REG8(0x313b), 0x61 },
+diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
+index 0bfe3046fcc872..c74097a59c4285 100644
+--- a/drivers/media/i2c/imx412.c
++++ b/drivers/media/i2c/imx412.c
+@@ -547,7 +547,7 @@ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain)
+ 
+ 	lpfr = imx412->vblank + imx412->cur_mode->height;
+ 
+-	dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u",
++	dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u\n",
+ 		exposure, gain, lpfr);
+ 
+ 	ret = imx412_write_reg(imx412, IMX412_REG_HOLD, 1, 1);
+@@ -594,7 +594,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
+ 	case V4L2_CID_VBLANK:
+ 		imx412->vblank = imx412->vblank_ctrl->val;
+ 
+-		dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u",
++		dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u\n",
+ 			imx412->vblank,
+ 			imx412->vblank + imx412->cur_mode->height);
+ 
+@@ -613,7 +613,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
+ 		exposure = ctrl->val;
+ 		analog_gain = imx412->again_ctrl->val;
+ 
+-		dev_dbg(imx412->dev, "Received exp %u, analog gain %u",
++		dev_dbg(imx412->dev, "Received exp %u, analog gain %u\n",
+ 			exposure, analog_gain);
+ 
+ 		ret = imx412_update_exp_gain(imx412, exposure, analog_gain);
+@@ -622,7 +622,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
+ 
+ 		break;
+ 	default:
+-		dev_err(imx412->dev, "Invalid control %d", ctrl->id);
++		dev_err(imx412->dev, "Invalid control %d\n", ctrl->id);
+ 		ret = -EINVAL;
+ 	}
+ 
+@@ -803,14 +803,14 @@ static int imx412_start_streaming(struct imx412 *imx412)
+ 	ret = imx412_write_regs(imx412, reg_list->regs,
+ 				reg_list->num_of_regs);
+ 	if (ret) {
+-		dev_err(imx412->dev, "fail to write initial registers");
++		dev_err(imx412->dev, "fail to write initial registers\n");
+ 		return ret;
+ 	}
+ 
+ 	/* Setup handler will write actual exposure and gain */
+ 	ret =  __v4l2_ctrl_handler_setup(imx412->sd.ctrl_handler);
+ 	if (ret) {
+-		dev_err(imx412->dev, "fail to setup handler");
++		dev_err(imx412->dev, "fail to setup handler\n");
+ 		return ret;
+ 	}
+ 
+@@ -821,7 +821,7 @@ static int imx412_start_streaming(struct imx412 *imx412)
+ 	ret = imx412_write_reg(imx412, IMX412_REG_MODE_SELECT,
+ 			       1, IMX412_MODE_STREAMING);
+ 	if (ret) {
+-		dev_err(imx412->dev, "fail to start streaming");
++		dev_err(imx412->dev, "fail to start streaming\n");
+ 		return ret;
+ 	}
+ 
+@@ -895,7 +895,7 @@ static int imx412_detect(struct imx412 *imx412)
+ 		return ret;
+ 
+ 	if (val != IMX412_ID) {
+-		dev_err(imx412->dev, "chip id mismatch: %x!=%x",
++		dev_err(imx412->dev, "chip id mismatch: %x!=%x\n",
+ 			IMX412_ID, val);
+ 		return -ENXIO;
+ 	}
+@@ -927,7 +927,7 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
+ 	imx412->reset_gpio = devm_gpiod_get_optional(imx412->dev, "reset",
+ 						     GPIOD_OUT_LOW);
+ 	if (IS_ERR(imx412->reset_gpio)) {
+-		dev_err(imx412->dev, "failed to get reset gpio %ld",
++		dev_err(imx412->dev, "failed to get reset gpio %ld\n",
+ 			PTR_ERR(imx412->reset_gpio));
+ 		return PTR_ERR(imx412->reset_gpio);
+ 	}
+@@ -935,13 +935,13 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
+ 	/* Get sensor input clock */
+ 	imx412->inclk = devm_clk_get(imx412->dev, NULL);
+ 	if (IS_ERR(imx412->inclk)) {
+-		dev_err(imx412->dev, "could not get inclk");
++		dev_err(imx412->dev, "could not get inclk\n");
+ 		return PTR_ERR(imx412->inclk);
+ 	}
+ 
+ 	rate = clk_get_rate(imx412->inclk);
+ 	if (rate != IMX412_INCLK_RATE) {
+-		dev_err(imx412->dev, "inclk frequency mismatch");
++		dev_err(imx412->dev, "inclk frequency mismatch\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -966,14 +966,14 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
+ 
+ 	if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX412_NUM_DATA_LANES) {
+ 		dev_err(imx412->dev,
+-			"number of CSI2 data lanes %d is not supported",
++			"number of CSI2 data lanes %d is not supported\n",
+ 			bus_cfg.bus.mipi_csi2.num_data_lanes);
+ 		ret = -EINVAL;
+ 		goto done_endpoint_free;
+ 	}
+ 
+ 	if (!bus_cfg.nr_of_link_frequencies) {
+-		dev_err(imx412->dev, "no link frequencies defined");
++		dev_err(imx412->dev, "no link frequencies defined\n");
+ 		ret = -EINVAL;
+ 		goto done_endpoint_free;
+ 	}
+@@ -1034,7 +1034,7 @@ static int imx412_power_on(struct device *dev)
+ 
+ 	ret = clk_prepare_enable(imx412->inclk);
+ 	if (ret) {
+-		dev_err(imx412->dev, "fail to enable inclk");
++		dev_err(imx412->dev, "fail to enable inclk\n");
+ 		goto error_reset;
+ 	}
+ 
+@@ -1145,7 +1145,7 @@ static int imx412_init_controls(struct imx412 *imx412)
+ 		imx412->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ 
+ 	if (ctrl_hdlr->error) {
+-		dev_err(imx412->dev, "control init failed: %d",
++		dev_err(imx412->dev, "control init failed: %d\n",
+ 			ctrl_hdlr->error);
+ 		v4l2_ctrl_handler_free(ctrl_hdlr);
+ 		return ctrl_hdlr->error;
+@@ -1183,7 +1183,7 @@ static int imx412_probe(struct i2c_client *client)
+ 
+ 	ret = imx412_parse_hw_config(imx412);
+ 	if (ret) {
+-		dev_err(imx412->dev, "HW configuration is not supported");
++		dev_err(imx412->dev, "HW configuration is not supported\n");
+ 		return ret;
+ 	}
+ 
+@@ -1191,14 +1191,14 @@ static int imx412_probe(struct i2c_client *client)
+ 
+ 	ret = imx412_power_on(imx412->dev);
+ 	if (ret) {
+-		dev_err(imx412->dev, "failed to power-on the sensor");
++		dev_err(imx412->dev, "failed to power-on the sensor\n");
+ 		goto error_mutex_destroy;
+ 	}
+ 
+ 	/* Check module identity */
+ 	ret = imx412_detect(imx412);
+ 	if (ret) {
+-		dev_err(imx412->dev, "failed to find sensor: %d", ret);
++		dev_err(imx412->dev, "failed to find sensor: %d\n", ret);
+ 		goto error_power_off;
+ 	}
+ 
+@@ -1208,7 +1208,7 @@ static int imx412_probe(struct i2c_client *client)
+ 
+ 	ret = imx412_init_controls(imx412);
+ 	if (ret) {
+-		dev_err(imx412->dev, "failed to init controls: %d", ret);
++		dev_err(imx412->dev, "failed to init controls: %d\n", ret);
+ 		goto error_power_off;
+ 	}
+ 
+@@ -1222,14 +1222,14 @@ static int imx412_probe(struct i2c_client *client)
+ 	imx412->pad.flags = MEDIA_PAD_FL_SOURCE;
+ 	ret = media_entity_pads_init(&imx412->sd.entity, 1, &imx412->pad);
+ 	if (ret) {
+-		dev_err(imx412->dev, "failed to init entity pads: %d", ret);
++		dev_err(imx412->dev, "failed to init entity pads: %d\n", ret);
+ 		goto error_handler_free;
+ 	}
+ 
+ 	ret = v4l2_async_register_subdev_sensor(&imx412->sd);
+ 	if (ret < 0) {
+ 		dev_err(imx412->dev,
+-			"failed to register async subdev: %d", ret);
++			"failed to register async subdev: %d\n", ret);
+ 		goto error_media_entity;
+ 	}
+ 
+diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
+index 9f52af6f047f3c..87e5d7ce5a47ee 100644
+--- a/drivers/media/i2c/ov9282.c
++++ b/drivers/media/i2c/ov9282.c
+@@ -40,7 +40,7 @@
+ /* Exposure control */
+ #define OV9282_REG_EXPOSURE	0x3500
+ #define OV9282_EXPOSURE_MIN	1
+-#define OV9282_EXPOSURE_OFFSET	12
++#define OV9282_EXPOSURE_OFFSET	25
+ #define OV9282_EXPOSURE_STEP	1
+ #define OV9282_EXPOSURE_DEFAULT	0x0282
+ 
+diff --git a/drivers/media/platform/marvell/mcam-core.c b/drivers/media/platform/marvell/mcam-core.c
+index c81593c969e057..a62c3a484cb3ff 100644
+--- a/drivers/media/platform/marvell/mcam-core.c
++++ b/drivers/media/platform/marvell/mcam-core.c
+@@ -935,7 +935,12 @@ static int mclk_enable(struct clk_hw *hw)
+ 	ret = pm_runtime_resume_and_get(cam->dev);
+ 	if (ret < 0)
+ 		return ret;
+-	clk_enable(cam->clk[0]);
++	ret = clk_enable(cam->clk[0]);
++	if (ret) {
++		pm_runtime_put(cam->dev);
++		return ret;
++	}
++
+ 	mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
+ 	mcam_ctlr_power_up(cam);
+ 
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index 1bf85c1cf96435..b8c9bb017fb5f6 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -2679,11 +2679,12 @@ static void mxc_jpeg_detach_pm_domains(struct mxc_jpeg_dev *jpeg)
+ 	int i;
+ 
+ 	for (i = 0; i < jpeg->num_domains; i++) {
+-		if (jpeg->pd_dev[i] && !pm_runtime_suspended(jpeg->pd_dev[i]))
++		if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]) &&
++		    !pm_runtime_suspended(jpeg->pd_dev[i]))
+ 			pm_runtime_force_suspend(jpeg->pd_dev[i]);
+-		if (jpeg->pd_link[i] && !IS_ERR(jpeg->pd_link[i]))
++		if (!IS_ERR_OR_NULL(jpeg->pd_link[i]))
+ 			device_link_del(jpeg->pd_link[i]);
+-		if (jpeg->pd_dev[i] && !IS_ERR(jpeg->pd_dev[i]))
++		if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]))
+ 			dev_pm_domain_detach(jpeg->pd_dev[i], true);
+ 		jpeg->pd_dev[i] = NULL;
+ 		jpeg->pd_link[i] = NULL;
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+index 4091f1c0e78bdc..a71eb30323c8d2 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+@@ -861,6 +861,7 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
+ 				 const struct mxc_isi_format_info *info,
+ 				 const struct v4l2_pix_format_mplane *pix)
+ {
++	struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb2);
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < info->mem_planes; i++) {
+@@ -875,6 +876,8 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
+ 		vb2_set_plane_payload(vb2, i, size);
+ 	}
+ 
++	v4l2_buf->field = pix->field;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+index 4b9b20ba35041c..38c5f22b850b97 100644
+--- a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
++++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+@@ -940,13 +940,19 @@ static int s5pcsis_pm_resume(struct device *dev, bool runtime)
+ 					       state->supplies);
+ 			goto unlock;
+ 		}
+-		clk_enable(state->clock[CSIS_CLK_GATE]);
++		ret = clk_enable(state->clock[CSIS_CLK_GATE]);
++		if (ret) {
++			phy_power_off(state->phy);
++			regulator_bulk_disable(CSIS_NUM_SUPPLIES,
++					       state->supplies);
++			goto unlock;
++		}
+ 	}
+ 	if (state->flags & ST_STREAMING)
+ 		s5pcsis_start_stream(state);
+ 
+ 	state->flags &= ~ST_SUSPENDED;
+- unlock:
++unlock:
+ 	mutex_unlock(&state->lock);
+ 	return ret ? -EAGAIN : 0;
+ }
+diff --git a/drivers/media/platform/samsung/s3c-camif/camif-core.c b/drivers/media/platform/samsung/s3c-camif/camif-core.c
+index e4529f666e2060..8c597dd01713a6 100644
+--- a/drivers/media/platform/samsung/s3c-camif/camif-core.c
++++ b/drivers/media/platform/samsung/s3c-camif/camif-core.c
+@@ -527,10 +527,19 @@ static void s3c_camif_remove(struct platform_device *pdev)
+ static int s3c_camif_runtime_resume(struct device *dev)
+ {
+ 	struct camif_dev *camif = dev_get_drvdata(dev);
++	int ret;
++
++	ret = clk_enable(camif->clock[CLK_GATE]);
++	if (ret)
++		return ret;
+ 
+-	clk_enable(camif->clock[CLK_GATE]);
+ 	/* null op on s3c244x */
+-	clk_enable(camif->clock[CLK_CAM]);
++	ret = clk_enable(camif->clock[CLK_CAM]);
++	if (ret) {
++		clk_disable(camif->clock[CLK_GATE]);
++		return ret;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
+index 276bf3c8a8cb49..8af94246e5916e 100644
+--- a/drivers/media/rc/iguanair.c
++++ b/drivers/media/rc/iguanair.c
+@@ -194,8 +194,10 @@ static int iguanair_send(struct iguanair *ir, unsigned size)
+ 	if (rc)
+ 		return rc;
+ 
+-	if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0)
++	if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0) {
++		usb_kill_urb(ir->urb_out);
+ 		return -ETIMEDOUT;
++	}
+ 
+ 	return rc;
+ }
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 0d2c42819d3909..218f712f56b17c 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -322,13 +322,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
++			/* demod access via firmware interface */
++			u32 reg;
++
+ 			if (msg[0].len < 3 || msg[1].len < 1) {
+ 				ret = -EOPNOTSUPP;
+ 				goto unlock;
+ 			}
+-			/* demod access via firmware interface */
+-			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+-					msg[0].buf[2];
++
++			reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
++				msg[0].buf[2];
+ 
+ 			if (msg[0].addr == state->af9033_i2c_addr[1])
+ 				reg |= 0x100000;
+@@ -385,13 +388,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
++			/* demod access via firmware interface */
++			u32 reg;
++
+ 			if (msg[0].len < 3) {
+ 				ret = -EOPNOTSUPP;
+ 				goto unlock;
+ 			}
+-			/* demod access via firmware interface */
+-			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+-					msg[0].buf[2];
++
++			reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
++				msg[0].buf[2];
+ 
+ 			if (msg[0].addr == state->af9033_i2c_addr[1])
+ 				reg |= 0x100000;
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index 8a34e6c0d6a6d1..f0537b741d1352 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -373,6 +373,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ 	struct dvb_usb_device *d = adap_to_d(adap);
+ 	struct lme2510_state *lme_int = adap_to_priv(adap);
+ 	struct usb_host_endpoint *ep;
++	int ret;
+ 
+ 	lme_int->lme_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 
+@@ -390,11 +391,20 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ 
+ 	/* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
+ 	ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
++	if (!ep) {
++		usb_free_urb(lme_int->lme_urb);
++		return -ENODEV;
++	}
+ 
+ 	if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+ 		lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
+ 
+-	usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
++	ret = usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
++	if (ret) {
++		usb_free_urb(lme_int->lme_urb);
++		return ret;
++	}
++
+ 	info("INT Interrupt Service Started");
+ 
+ 	return 0;
+diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
+index 16fa17bbd15eaa..83ed7821fa2a77 100644
+--- a/drivers/media/usb/uvc/uvc_queue.c
++++ b/drivers/media/usb/uvc/uvc_queue.c
+@@ -483,7 +483,8 @@ static void uvc_queue_buffer_complete(struct kref *ref)
+ 
+ 	buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
+ 	vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
+-	vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
++	vb2_buffer_done(&buf->buf.vb2_buf, buf->error ? VB2_BUF_STATE_ERROR :
++							VB2_BUF_STATE_DONE);
+ }
+ 
+ /*
+diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
+index a78a88c710e24a..b5f6682ff38311 100644
+--- a/drivers/media/usb/uvc/uvc_status.c
++++ b/drivers/media/usb/uvc/uvc_status.c
+@@ -269,6 +269,7 @@ int uvc_status_init(struct uvc_device *dev)
+ 	dev->int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 	if (!dev->int_urb) {
+ 		kfree(dev->status);
++		dev->status = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
+index 7193f848d17e66..9b7d30a21a5bd0 100644
+--- a/drivers/memory/tegra/tegra20-emc.c
++++ b/drivers/memory/tegra/tegra20-emc.c
+@@ -474,14 +474,15 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
+ 
+ 	ram_code = tegra_read_ram_code();
+ 
+-	for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np;
+-	     np = of_find_node_by_name(np, "emc-tables")) {
++	for_each_child_of_node(dev->of_node, np) {
++		if (!of_node_name_eq(np, "emc-tables"))
++			continue;
+ 		err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ 		if (err || value != ram_code) {
+ 			struct device_node *lpddr2_np;
+ 			bool cfg_mismatches = false;
+ 
+-			lpddr2_np = of_find_node_by_name(np, "lpddr2");
++			lpddr2_np = of_get_child_by_name(np, "lpddr2");
+ 			if (lpddr2_np) {
+ 				const struct lpddr2_info *info;
+ 
+@@ -518,7 +519,6 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
+ 			}
+ 
+ 			if (cfg_mismatches) {
+-				of_node_put(np);
+ 				continue;
+ 			}
+ 		}
+diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
+index 2ce15f60eb1071..729e79e1be49fa 100644
+--- a/drivers/mfd/syscon.c
++++ b/drivers/mfd/syscon.c
+@@ -15,6 +15,7 @@
+ #include <linux/io.h>
+ #include <linux/init.h>
+ #include <linux/list.h>
++#include <linux/mutex.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+@@ -27,7 +28,7 @@
+ 
+ static struct platform_driver syscon_driver;
+ 
+-static DEFINE_SPINLOCK(syscon_list_slock);
++static DEFINE_MUTEX(syscon_list_lock);
+ static LIST_HEAD(syscon_list);
+ 
+ struct syscon {
+@@ -54,6 +55,8 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
+ 	struct resource res;
+ 	struct reset_control *reset;
+ 
++	WARN_ON(!mutex_is_locked(&syscon_list_lock));
++
+ 	struct syscon *syscon __free(kfree) = kzalloc(sizeof(*syscon), GFP_KERNEL);
+ 	if (!syscon)
+ 		return ERR_PTR(-ENOMEM);
+@@ -144,9 +147,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
+ 	syscon->regmap = regmap;
+ 	syscon->np = np;
+ 
+-	spin_lock(&syscon_list_slock);
+ 	list_add_tail(&syscon->list, &syscon_list);
+-	spin_unlock(&syscon_list_slock);
+ 
+ 	return_ptr(syscon);
+ 
+@@ -167,7 +168,7 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
+ {
+ 	struct syscon *entry, *syscon = NULL;
+ 
+-	spin_lock(&syscon_list_slock);
++	mutex_lock(&syscon_list_lock);
+ 
+ 	list_for_each_entry(entry, &syscon_list, list)
+ 		if (entry->np == np) {
+@@ -175,11 +176,11 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
+ 			break;
+ 		}
+ 
+-	spin_unlock(&syscon_list_slock);
+-
+ 	if (!syscon)
+ 		syscon = of_syscon_register(np, check_res);
+ 
++	mutex_unlock(&syscon_list_lock);
++
+ 	if (IS_ERR(syscon))
+ 		return ERR_CAST(syscon);
+ 
+@@ -210,7 +211,7 @@ int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
+ 		return -ENOMEM;
+ 
+ 	/* check if syscon entry already exists */
+-	spin_lock(&syscon_list_slock);
++	mutex_lock(&syscon_list_lock);
+ 
+ 	list_for_each_entry(entry, &syscon_list, list)
+ 		if (entry->np == np) {
+@@ -223,12 +224,12 @@ int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
+ 
+ 	/* register the regmap in syscon list */
+ 	list_add_tail(&syscon->list, &syscon_list);
+-	spin_unlock(&syscon_list_slock);
++	mutex_unlock(&syscon_list_lock);
+ 
+ 	return 0;
+ 
+ err_unlock:
+-	spin_unlock(&syscon_list_slock);
++	mutex_unlock(&syscon_list_lock);
+ 	kfree(syscon);
+ 	return ret;
+ }
+diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
+index f150d8769f1986..285a748748d701 100644
+--- a/drivers/misc/cardreader/rtsx_usb.c
++++ b/drivers/misc/cardreader/rtsx_usb.c
+@@ -286,6 +286,7 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
+ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+ {
+ 	int ret;
++	u8 interrupt_val = 0;
+ 	u16 *buf;
+ 
+ 	if (!status)
+@@ -308,6 +309,20 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+ 		ret = rtsx_usb_get_status_with_bulk(ucr, status);
+ 	}
+ 
++	rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val);
++	/* Cross check presence with interrupts */
++	if (*status & XD_CD)
++		if (!(interrupt_val & XD_INT))
++			*status &= ~XD_CD;
++
++	if (*status & SD_CD)
++		if (!(interrupt_val & SD_INT))
++			*status &= ~SD_CD;
++
++	if (*status & MS_CD)
++		if (!(interrupt_val & MS_INT))
++			*status &= ~MS_CD;
++
+ 	/* usb_control_msg may return positive when success */
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
+index dbe3eb361cca28..4b6cbee23fe893 100644
+--- a/drivers/mtd/hyperbus/hbmc-am654.c
++++ b/drivers/mtd/hyperbus/hbmc-am654.c
+@@ -174,26 +174,30 @@ static int am654_hbmc_probe(struct platform_device *pdev)
+ 	priv->hbdev.np = of_get_next_child(np, NULL);
+ 	ret = of_address_to_resource(priv->hbdev.np, 0, &res);
+ 	if (ret)
+-		return ret;
++		goto put_node;
+ 
+ 	if (of_property_read_bool(dev->of_node, "mux-controls")) {
+ 		struct mux_control *control = devm_mux_control_get(dev, NULL);
+ 
+-		if (IS_ERR(control))
+-			return PTR_ERR(control);
++		if (IS_ERR(control)) {
++			ret = PTR_ERR(control);
++			goto put_node;
++		}
+ 
+ 		ret = mux_control_select(control, 1);
+ 		if (ret) {
+ 			dev_err(dev, "Failed to select HBMC mux\n");
+-			return ret;
++			goto put_node;
+ 		}
+ 		priv->mux_ctrl = control;
+ 	}
+ 
+ 	priv->hbdev.map.size = resource_size(&res);
+ 	priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
+-	if (IS_ERR(priv->hbdev.map.virt))
+-		return PTR_ERR(priv->hbdev.map.virt);
++	if (IS_ERR(priv->hbdev.map.virt)) {
++		ret = PTR_ERR(priv->hbdev.map.virt);
++		goto disable_mux;
++	}
+ 
+ 	priv->ctlr.dev = dev;
+ 	priv->ctlr.ops = &am654_hbmc_ops;
+@@ -226,6 +230,8 @@ static int am654_hbmc_probe(struct platform_device *pdev)
+ disable_mux:
+ 	if (priv->mux_ctrl)
+ 		mux_control_deselect(priv->mux_ctrl);
++put_node:
++	of_node_put(priv->hbdev.np);
+ 	return ret;
+ }
+ 
+@@ -241,6 +247,7 @@ static void am654_hbmc_remove(struct platform_device *pdev)
+ 
+ 	if (dev_priv->rx_chan)
+ 		dma_release_channel(dev_priv->rx_chan);
++	of_node_put(priv->hbdev.np);
+ }
+ 
+ static const struct of_device_id am654_hbmc_dt_ids[] = {
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index 1b2ec0fec60c7a..e76df6a00ed4f5 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -2342,6 +2342,11 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
+ 		brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
+ 		status = brcmnand_waitfunc(chip);
+ 
++		if (status < 0) {
++			ret = status;
++			goto out;
++		}
++
+ 		if (status & NAND_STATUS_FAIL) {
+ 			dev_info(ctrl->dev, "program failed at %llx\n",
+ 				(unsigned long long)addr);
+diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
+index d73ef262991d61..6fee9a41839c0b 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.h
++++ b/drivers/net/ethernet/broadcom/bgmac.h
+@@ -328,8 +328,7 @@
+ #define BGMAC_RX_FRAME_OFFSET			30		/* There are 2 unused bytes between header and real data */
+ #define BGMAC_RX_BUF_OFFSET			(NET_SKB_PAD + NET_IP_ALIGN - \
+ 						 BGMAC_RX_FRAME_OFFSET)
+-/* Jumbo frame size with FCS */
+-#define BGMAC_RX_MAX_FRAME_SIZE			9724
++#define BGMAC_RX_MAX_FRAME_SIZE			1536
+ #define BGMAC_RX_BUF_SIZE			(BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+ #define BGMAC_RX_ALLOC_SIZE			(SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
+ 						 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index 150cc94ae9f884..25a604379b2f43 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -1777,10 +1777,11 @@ static void dm9000_drv_remove(struct platform_device *pdev)
+ 
+ 	unregister_netdev(ndev);
+ 	dm9000_release_board(pdev, dm);
+-	free_netdev(ndev);		/* free device structure */
+ 	if (dm->power_supply)
+ 		regulator_disable(dm->power_supply);
+ 
++	free_netdev(ndev);		/* free device structure */
++
+ 	dev_dbg(&pdev->dev, "released and freed device\n");
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 49d1748e0c043d..2b05d9c6c21a43 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -840,6 +840,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+ 	int hdr_len, total_len, data_left;
+ 	struct bufdesc *bdp = txq->bd.cur;
++	struct bufdesc *tmp_bdp;
++	struct bufdesc_ex *ebdp;
+ 	struct tso_t tso;
+ 	unsigned int index = 0;
+ 	int ret;
+@@ -913,7 +915,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+ 	return 0;
+ 
+ err_release:
+-	/* TODO: Release all used data descriptors for TSO */
++	/* Release all used data descriptors for TSO */
++	tmp_bdp = txq->bd.cur;
++
++	while (tmp_bdp != bdp) {
++		/* Unmap data buffers */
++		if (tmp_bdp->cbd_bufaddr &&
++		    !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
++			dma_unmap_single(&fep->pdev->dev,
++					 fec32_to_cpu(tmp_bdp->cbd_bufaddr),
++					 fec16_to_cpu(tmp_bdp->cbd_datlen),
++					 DMA_TO_DEVICE);
++
++		/* Clear standard buffer descriptor fields */
++		tmp_bdp->cbd_sc = 0;
++		tmp_bdp->cbd_datlen = 0;
++		tmp_bdp->cbd_bufaddr = 0;
++
++		/* Handle extended descriptor if enabled */
++		if (fep->bufdesc_ex) {
++			ebdp = (struct bufdesc_ex *)tmp_bdp;
++			ebdp->cbd_esc = 0;
++		}
++
++		tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
++	}
++
++	dev_kfree_skb_any(skb);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+index 9a63fbc6940831..b25fb400f4767e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+@@ -40,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+  */
+ static DEFINE_MUTEX(hnae3_common_lock);
+ 
++/* ensure the drivers being unloaded one by one */
++static DEFINE_MUTEX(hnae3_unload_lock);
++
++void hnae3_acquire_unload_lock(void)
++{
++	mutex_lock(&hnae3_unload_lock);
++}
++EXPORT_SYMBOL(hnae3_acquire_unload_lock);
++
++void hnae3_release_unload_lock(void)
++{
++	mutex_unlock(&hnae3_unload_lock);
++}
++EXPORT_SYMBOL(hnae3_release_unload_lock);
++
+ static bool hnae3_client_match(enum hnae3_client_type client_type)
+ {
+ 	if (client_type == HNAE3_CLIENT_KNIC ||
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index d873523e84f271..388c70331a55b5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -963,4 +963,6 @@ int hnae3_register_client(struct hnae3_client *client);
+ void hnae3_set_client_init_flag(struct hnae3_client *client,
+ 				struct hnae3_ae_dev *ae_dev,
+ 				unsigned int inited);
++void hnae3_acquire_unload_lock(void);
++void hnae3_release_unload_lock(void);
+ #endif
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 73825b6bd485d1..dc60ac3bde7f2c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -6002,9 +6002,11 @@ module_init(hns3_init_module);
+  */
+ static void __exit hns3_exit_module(void)
+ {
++	hnae3_acquire_unload_lock();
+ 	pci_unregister_driver(&hns3_driver);
+ 	hnae3_unregister_client(&client);
+ 	hns3_dbg_unregister_debugfs();
++	hnae3_release_unload_lock();
+ }
+ module_exit(hns3_exit_module);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 9a67fe0554a52b..06eedf80cfac4f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -12929,9 +12929,11 @@ static int __init hclge_init(void)
+ 
+ static void __exit hclge_exit(void)
+ {
++	hnae3_acquire_unload_lock();
+ 	hnae3_unregister_ae_algo_prepare(&ae_algo);
+ 	hnae3_unregister_ae_algo(&ae_algo);
+ 	destroy_workqueue(hclge_wq);
++	hnae3_release_unload_lock();
+ }
+ module_init(hclge_init);
+ module_exit(hclge_exit);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index d47bd8d6145f97..fd5992164846b1 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -3412,8 +3412,10 @@ static int __init hclgevf_init(void)
+ 
+ static void __exit hclgevf_exit(void)
+ {
++	hnae3_acquire_unload_lock();
+ 	hnae3_unregister_ae_algo(&ae_algovf);
+ 	destroy_workqueue(hclgevf_wq);
++	hnae3_release_unload_lock();
+ }
+ module_init(hclgevf_init);
+ module_exit(hclgevf_exit);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index f782402cd78986..5516795cc250a8 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -773,6 +773,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ 		f->state = IAVF_VLAN_ADD;
+ 		adapter->num_vlan_filters++;
+ 		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
++	} else if (f->state == IAVF_VLAN_REMOVE) {
++		/* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
++		 * We can safely only change the state here.
++		 */
++		f->state = IAVF_VLAN_ACTIVE;
+ 	}
+ 
+ clearout:
+@@ -793,8 +798,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
+ 
+ 	f = iavf_find_vlan(adapter, vlan);
+ 	if (f) {
+-		f->state = IAVF_VLAN_REMOVE;
+-		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
++		/* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
++		 * Remove it from the list.
++		 */
++		if (f->state == IAVF_VLAN_ADD) {
++			list_del(&f->list);
++			kfree(f);
++			adapter->num_vlan_filters--;
++		} else {
++			f->state = IAVF_VLAN_REMOVE;
++			iavf_schedule_aq_request(adapter,
++						 IAVF_FLAG_AQ_DEL_VLAN_FILTER);
++		}
+ 	}
+ 
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+index 80f3dfd2712430..66ae0352c6bca0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+@@ -1491,7 +1491,23 @@ struct ice_aqc_dnl_equa_param {
+ #define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
+ #define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
+ #define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
+-#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_CTLE_BW (0x23 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_GAIN (0x30 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_GAIN2 (0x31 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_2 (0x32 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_3 (0x33 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_4 (0x34 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_5 (0x35 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_6 (0x36 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_7 (0x37 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_8 (0x38 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_9 (0x39 << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_10 (0x3A << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_11 (0x3B << ICE_AQC_RX_EQU_SHIFT)
++#define ICE_AQC_RX_EQU_DFE_12 (0x3C << ICE_AQC_RX_EQU_SHIFT)
+ #define ICE_AQC_TX_EQU_PRE1 0x0
+ #define ICE_AQC_TX_EQU_PRE3 0x3
+ #define ICE_AQC_TX_EQU_ATTEN 0x4
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index d5cc934d135949..7d1feeb317be34 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -693,75 +693,52 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
+ 			      struct ice_serdes_equalization_to_ethtool *ptr)
+ {
++	static const int tx = ICE_AQC_OP_CODE_TX_EQU;
++	static const int rx = ICE_AQC_OP_CODE_RX_EQU;
++	struct {
++		int data_in;
++		int opcode;
++		int *out;
++	} aq_params[] = {
++		{ ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 },
++		{ ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 },
++		{ ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten },
++		{ ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 },
++		{ ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 },
++		{ ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 },
++		{ ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 },
++		{ ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
++		{ ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
++		{ ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
++		{ ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
++		{ ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
++		{ ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
++		{ ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw },
++		{ ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain },
++		{ ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 },
++		{ ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 },
++		{ ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 },
++		{ ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 },
++		{ ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 },
++		{ ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 },
++		{ ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 },
++		{ ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 },
++		{ ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 },
++		{ ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 },
++		{ ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 },
++		{ ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 },
++	};
+ 	int err;
+ 
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1,
+-					  ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+-					  &ptr->tx_equalization_pre1);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3,
+-					  ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+-					  &ptr->tx_equalization_pre3);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN,
+-					  ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+-					  &ptr->tx_equalization_atten);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1,
+-					  ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+-					  &ptr->tx_equalization_post1);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2,
+-					  ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+-					  &ptr->tx_equalization_pre2);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2,
+-					  ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+-					  &ptr->rx_equalization_pre2);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1,
+-					  ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+-					  &ptr->rx_equalization_pre1);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1,
+-					  ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+-					  &ptr->rx_equalization_post1);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF,
+-					  ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+-					  &ptr->rx_equalization_bflf);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF,
+-					  ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+-					  &ptr->rx_equalization_bfhf);
+-	if (err)
+-		return err;
+-
+-	err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE,
+-					  ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+-					  &ptr->rx_equalization_drate);
+-	if (err)
+-		return err;
++	for (int i = 0; i < ARRAY_SIZE(aq_params); i++) {
++		err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in,
++						  aq_params[i].opcode,
++						  serdes_num, aq_params[i].out);
++		if (err)
++			break;
++	}
+ 
+-	return 0;
++	return err;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
+index 9acccae38625ae..23b2cfbc9684c0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
+@@ -10,17 +10,33 @@ struct ice_phy_type_to_ethtool {
+ };
+ 
+ struct ice_serdes_equalization_to_ethtool {
+-	int rx_equalization_pre2;
+-	int rx_equalization_pre1;
+-	int rx_equalization_post1;
+-	int rx_equalization_bflf;
+-	int rx_equalization_bfhf;
+-	int rx_equalization_drate;
+-	int tx_equalization_pre1;
+-	int tx_equalization_pre3;
+-	int tx_equalization_atten;
+-	int tx_equalization_post1;
+-	int tx_equalization_pre2;
++	int rx_equ_pre2;
++	int rx_equ_pre1;
++	int rx_equ_post1;
++	int rx_equ_bflf;
++	int rx_equ_bfhf;
++	int rx_equ_ctle_gainhf;
++	int rx_equ_ctle_gainlf;
++	int rx_equ_ctle_gaindc;
++	int rx_equ_ctle_bw;
++	int rx_equ_dfe_gain;
++	int rx_equ_dfe_gain_2;
++	int rx_equ_dfe_2;
++	int rx_equ_dfe_3;
++	int rx_equ_dfe_4;
++	int rx_equ_dfe_5;
++	int rx_equ_dfe_6;
++	int rx_equ_dfe_7;
++	int rx_equ_dfe_8;
++	int rx_equ_dfe_9;
++	int rx_equ_dfe_10;
++	int rx_equ_dfe_11;
++	int rx_equ_dfe_12;
++	int tx_equ_pre1;
++	int tx_equ_pre3;
++	int tx_equ_atten;
++	int tx_equ_post1;
++	int tx_equ_pre2;
+ };
+ 
+ struct ice_regdump_to_ethtool {
+diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
+index 6509d807627cee..4f56d53d56b9ad 100644
+--- a/drivers/net/ethernet/intel/ice/ice_parser.h
++++ b/drivers/net/ethernet/intel/ice/ice_parser.h
+@@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ /*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+ #define ICE_BST_TCAM_TABLE_SIZE		256
+ #define ICE_BST_TCAM_KEY_SIZE		20
+-#define ICE_BST_KEY_TCAM_SIZE		19
+ 
+ /* Boost TCAM item */
+ struct ice_bst_tcam_item {
+@@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
+ #define ICE_PARSER_GPR_NUM	128
+ #define ICE_PARSER_FLG_NUM	64
+ #define ICE_PARSER_ERR_NUM	16
+-#define ICE_BST_KEY_SIZE	10
+ #define ICE_MARKER_ID_SIZE	9
+ #define ICE_MARKER_MAX_SIZE	\
+ 		(ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
+@@ -431,13 +429,13 @@ struct ice_parser_rt {
+ 	u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
+ 	u16 pkt_len;
+ 	u16 po;
+-	u8 bst_key[ICE_BST_KEY_SIZE];
++	u8 bst_key[ICE_BST_TCAM_KEY_SIZE];
+ 	struct ice_pg_cam_key pg_key;
++	u8 pg_prio;
+ 	struct ice_alu *alu0;
+ 	struct ice_alu *alu1;
+ 	struct ice_alu *alu2;
+ 	struct ice_pg_cam_action *action;
+-	u8 pg_prio;
+ 	struct ice_gpr_pu pu;
+ 	u8 markers[ICE_MARKER_ID_SIZE];
+ 	bool protocols[ICE_PO_PAIR_SIZE];
+diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+index dedf5e854e4b76..3995d662e05099 100644
+--- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c
++++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+@@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt,
+ 	else
+ 		key[idd] = imem->b_kb.prio;
+ 
+-	idd = ICE_BST_KEY_TCAM_SIZE - 1;
++	idd = ICE_BST_TCAM_KEY_SIZE - 2;
+ 	for (i = idd; i >= 0; i--) {
+ 		int j;
+ 
+ 		j = ho + idd - i;
+ 		if (j < ICE_PARSER_MAX_PKT_LEN)
+-			key[i] = rt->pkt_buf[ho + idd - i];
++			key[i] = rt->pkt_buf[j];
+ 		else
+ 			key[i] = 0;
+ 	}
+ 
+-	ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
+-	ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+-		  key[0], key[1], key[2], key[3], key[4],
+-		  key[5], key[6], key[7], key[8], key[9]);
+-	ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
++	ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER,
++				 KBUILD_MODNAME ": Generated Boost TCAM Key",
++				 key, ICE_BST_TCAM_KEY_SIZE);
+ }
+ 
+ static u16 ice_bit_rev_u16(u16 v, int len)
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+index 4849590a5591f1..b28991dd187036 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+@@ -376,6 +376,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+ 		if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
+ 			break;
+ 
++		/* Ensure no other fields are read until DD flag is checked */
++		dma_rmb();
++
+ 		/* strip off FW internal code */
+ 		desc_err = le16_to_cpu(desc->ret_val) & 0xff;
+ 
+@@ -563,6 +566,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ 		if (!(flags & IDPF_CTLQ_FLAG_DD))
+ 			break;
+ 
++		/* Ensure no other fields are read until DD flag is checked */
++		dma_rmb();
++
+ 		q_msg[i].vmvf_type = (flags &
+ 				      (IDPF_CTLQ_FLAG_FTYPE_VM |
+ 				       IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
+index db476b3314c8a5..dfd56fc5ff6550 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
+@@ -174,7 +174,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	pci_set_master(pdev);
+ 	pci_set_drvdata(pdev, adapter);
+ 
+-	adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
++	adapter->init_wq = alloc_workqueue("%s-%s-init",
++					   WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					   dev_driver_string(dev),
+ 					   dev_name(dev));
+ 	if (!adapter->init_wq) {
+@@ -183,7 +184,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_free;
+ 	}
+ 
+-	adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
++	adapter->serv_wq = alloc_workqueue("%s-%s-service",
++					   WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					   dev_driver_string(dev),
+ 					   dev_name(dev));
+ 	if (!adapter->serv_wq) {
+@@ -192,7 +194,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_serv_wq_alloc;
+ 	}
+ 
+-	adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
++	adapter->mbx_wq = alloc_workqueue("%s-%s-mbx",
++					  WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					  dev_driver_string(dev),
+ 					  dev_name(dev));
+ 	if (!adapter->mbx_wq) {
+@@ -201,7 +204,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_mbx_wq_alloc;
+ 	}
+ 
+-	adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
++	adapter->stats_wq = alloc_workqueue("%s-%s-stats",
++					    WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					    dev_driver_string(dev),
+ 					    dev_name(dev));
+ 	if (!adapter->stats_wq) {
+@@ -210,7 +214,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_stats_wq_alloc;
+ 	}
+ 
+-	adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
++	adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event",
++					       WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					       dev_driver_string(dev),
+ 					       dev_name(dev));
+ 	if (!adapter->vc_event_wq) {
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+index d46c95f91b0d81..99bdb95bf22661 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+@@ -612,14 +612,15 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
+ 		return -EINVAL;
+ 	}
+ 	xn = &adapter->vcxn_mngr->ring[xn_idx];
++	idpf_vc_xn_lock(xn);
+ 	salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
+ 	if (xn->salt != salt) {
+ 		dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
+ 				    xn->salt, salt);
++		idpf_vc_xn_unlock(xn);
+ 		return -EINVAL;
+ 	}
+ 
+-	idpf_vc_xn_lock(xn);
+ 	switch (xn->state) {
+ 	case IDPF_VC_XN_WAITING:
+ 		/* success */
+@@ -3077,12 +3078,21 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
+  */
+ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
+ {
++	bool remove_in_prog;
++
+ 	if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
+ 		return;
+ 
++	/* Avoid transaction timeouts when called during reset */
++	remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
++	if (!remove_in_prog)
++		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
++
+ 	idpf_deinit_task(adapter);
+ 	idpf_intr_rel(adapter);
+-	idpf_vc_xn_shutdown(adapter->vcxn_mngr);
++
++	if (remove_in_prog)
++		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+ 
+ 	cancel_delayed_work_sync(&adapter->serv_task);
+ 	cancel_delayed_work_sync(&adapter->mbx_task);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 549436efc20488..730aa5632cceee 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -995,12 +995,6 @@ static void octep_get_stats64(struct net_device *netdev,
+ 	struct octep_device *oct = netdev_priv(netdev);
+ 	int q;
+ 
+-	if (netif_running(netdev))
+-		octep_ctrl_net_get_if_stats(oct,
+-					    OCTEP_CTRL_NET_INVALID_VFID,
+-					    &oct->iface_rx_stats,
+-					    &oct->iface_tx_stats);
+-
+ 	tx_packets = 0;
+ 	tx_bytes = 0;
+ 	rx_packets = 0;
+@@ -1018,10 +1012,6 @@ static void octep_get_stats64(struct net_device *netdev,
+ 	stats->tx_bytes = tx_bytes;
+ 	stats->rx_packets = rx_packets;
+ 	stats->rx_bytes = rx_bytes;
+-	stats->multicast = oct->iface_rx_stats.mcast_pkts;
+-	stats->rx_errors = oct->iface_rx_stats.err_pkts;
+-	stats->collisions = oct->iface_tx_stats.xscol;
+-	stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 7e6771c9cdbbab..4c699514fd57a0 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -799,14 +799,6 @@ static void octep_vf_get_stats64(struct net_device *netdev,
+ 	stats->tx_bytes = tx_bytes;
+ 	stats->rx_packets = rx_packets;
+ 	stats->rx_bytes = rx_bytes;
+-	if (!octep_vf_get_if_stats(oct)) {
+-		stats->multicast = oct->iface_rx_stats.mcast_pkts;
+-		stats->rx_errors = oct->iface_rx_stats.err_pkts;
+-		stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
+-				    oct->iface_rx_stats.err_pkts;
+-		stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
+-		stats->tx_dropped = oct->iface_tx_stats.dropped;
+-	}
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
+index 2c26eb18528372..20cf7ba9d75084 100644
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -258,11 +258,11 @@
+ #define REG_GDM3_FWD_CFG		GDM3_BASE
+ #define GDM3_PAD_EN_MASK		BIT(28)
+ 
+-#define REG_GDM4_FWD_CFG		(GDM4_BASE + 0x100)
++#define REG_GDM4_FWD_CFG		GDM4_BASE
+ #define GDM4_PAD_EN_MASK		BIT(28)
+ #define GDM4_SPORT_OFFSET0_MASK		GENMASK(11, 8)
+ 
+-#define REG_GDM4_SRC_PORT_SET		(GDM4_BASE + 0x33c)
++#define REG_GDM4_SRC_PORT_SET		(GDM4_BASE + 0x23c)
+ #define GDM4_SPORT_OFF2_MASK		GENMASK(19, 16)
+ #define GDM4_SPORT_OFF1_MASK		GENMASK(15, 12)
+ #define GDM4_SPORT_OFF0_MASK		GENMASK(11, 8)
+@@ -2123,17 +2123,14 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma)
+ 		if (!qdma->q_rx[i].ndesc)
+ 			continue;
+ 
+-		napi_disable(&qdma->q_rx[i].napi);
+ 		netif_napi_del(&qdma->q_rx[i].napi);
+ 		airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
+ 		if (qdma->q_rx[i].page_pool)
+ 			page_pool_destroy(qdma->q_rx[i].page_pool);
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+-		napi_disable(&qdma->q_tx_irq[i].napi);
++	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ 		netif_napi_del(&qdma->q_tx_irq[i].napi);
+-	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ 		if (!qdma->q_tx[i].ndesc)
+@@ -2158,6 +2155,21 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
+ 	}
+ }
+ 
++static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
++		napi_disable(&qdma->q_tx_irq[i].napi);
++
++	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
++		if (!qdma->q_rx[i].ndesc)
++			continue;
++
++		napi_disable(&qdma->q_rx[i].napi);
++	}
++}
++
+ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
+ {
+ 	struct airoha_eth *eth = port->qdma->eth;
+@@ -2713,7 +2725,7 @@ static int airoha_probe(struct platform_device *pdev)
+ 
+ 	err = airoha_hw_init(pdev, eth);
+ 	if (err)
+-		goto error;
++		goto error_hw_cleanup;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ 		airoha_qdma_start_napi(&eth->qdma[i]);
+@@ -2728,13 +2740,16 @@ static int airoha_probe(struct platform_device *pdev)
+ 		err = airoha_alloc_gdm_port(eth, np);
+ 		if (err) {
+ 			of_node_put(np);
+-			goto error;
++			goto error_napi_stop;
+ 		}
+ 	}
+ 
+ 	return 0;
+ 
+-error:
++error_napi_stop:
++	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
++		airoha_qdma_stop_napi(&eth->qdma[i]);
++error_hw_cleanup:
+ 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ 		airoha_hw_cleanup(&eth->qdma[i]);
+ 
+@@ -2755,8 +2770,10 @@ static void airoha_remove(struct platform_device *pdev)
+ 	struct airoha_eth *eth = platform_get_drvdata(pdev);
+ 	int i;
+ 
+-	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
++	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
++		airoha_qdma_stop_napi(&eth->qdma[i]);
+ 		airoha_hw_cleanup(&eth->qdma[i]);
++	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ 		struct airoha_gdm_port *port = eth->ports[i];
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
+index 3f4c58bada3745..ab5f8f07f1f7e5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
+@@ -70,7 +70,7 @@
+ 			u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
+ 			_HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
+ 			_HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
+-				    (bit_off) % BITS_IN_DW, second_dw_mask); \
++				    (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
+ 		} else { \
+ 			_HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
+ 		} \
+diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+index 46245e0b24623d..43c84900369a36 100644
+--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
++++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+@@ -14,7 +14,6 @@
+ #define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
+ #define MLXFW_FSM_STATE_WAIT_ROUNDS \
+ 	(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
+-#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
+ 
+ static const int mlxfw_fsm_state_errno[] = {
+ 	[MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
+@@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
+ 		return err;
+ 	}
+ 
+-	comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
+ 	if (comp->data_size > comp_max_size) {
+ 		MLXFW_ERR_MSG(mlxfw_dev, extack,
+ 			      "Component size is bigger than limit", -EINVAL);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+index 69cd689dbc83e9..5afe6b155ef0d5 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+@@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
+ 	mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
+ 				&bytes);
+ 
+-	if (mr_route->mfc->mfc_un.res.pkt != packets)
+-		mr_route->mfc->mfc_un.res.lastuse = jiffies;
+-	mr_route->mfc->mfc_un.res.pkt = packets;
+-	mr_route->mfc->mfc_un.res.bytes = bytes;
++	if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets)
++		WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies);
++	atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets);
++	atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes);
+ }
+ 
+ static void mlxsw_sp_mr_stats_update(struct work_struct *work)
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 6f6b0566c65bcb..cc4f0d16c76303 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -3208,10 +3208,15 @@ static int ravb_suspend(struct device *dev)
+ 
+ 	netif_device_detach(ndev);
+ 
+-	if (priv->wol_enabled)
+-		return ravb_wol_setup(ndev);
++	rtnl_lock();
++	if (priv->wol_enabled) {
++		ret = ravb_wol_setup(ndev);
++		rtnl_unlock();
++		return ret;
++	}
+ 
+ 	ret = ravb_close(ndev);
++	rtnl_unlock();
+ 	if (ret)
+ 		return ret;
+ 
+@@ -3236,19 +3241,20 @@ static int ravb_resume(struct device *dev)
+ 	if (!netif_running(ndev))
+ 		return 0;
+ 
++	rtnl_lock();
+ 	/* If WoL is enabled restore the interface. */
+-	if (priv->wol_enabled) {
++	if (priv->wol_enabled)
+ 		ret = ravb_wol_restore(ndev);
+-		if (ret)
+-			return ret;
+-	} else {
++	else
+ 		ret = pm_runtime_force_resume(dev);
+-		if (ret)
+-			return ret;
++	if (ret) {
++		rtnl_unlock();
++		return ret;
+ 	}
+ 
+ 	/* Reopening the interface will restore the device to the working state. */
+ 	ret = ravb_open(ndev);
++	rtnl_unlock();
+ 	if (ret < 0)
+ 		goto out_rpm_put;
+ 
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 7a25903e35c305..bc12c0c7347f6b 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -3494,10 +3494,12 @@ static int sh_eth_suspend(struct device *dev)
+ 
+ 	netif_device_detach(ndev);
+ 
++	rtnl_lock();
+ 	if (mdp->wol_enabled)
+ 		ret = sh_eth_wol_setup(ndev);
+ 	else
+ 		ret = sh_eth_close(ndev);
++	rtnl_unlock();
+ 
+ 	return ret;
+ }
+@@ -3511,10 +3513,12 @@ static int sh_eth_resume(struct device *dev)
+ 	if (!netif_running(ndev))
+ 		return 0;
+ 
++	rtnl_lock();
+ 	if (mdp->wol_enabled)
+ 		ret = sh_eth_wol_restore(ndev);
+ 	else
+ 		ret = sh_eth_open(ndev);
++	rtnl_unlock();
+ 
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
+index 5c2551369812cb..6c3b74000d3b6a 100644
+--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
++++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
+@@ -59,6 +59,7 @@ const struct ethtool_ops ef100_ethtool_ops = {
+ 	.get_rxfh_indir_size	= efx_ethtool_get_rxfh_indir_size,
+ 	.get_rxfh_key_size	= efx_ethtool_get_rxfh_key_size,
+ 	.rxfh_per_ctx_key	= true,
++	.cap_rss_rxnfc_adds	= true,
+ 	.rxfh_priv_size		= sizeof(struct efx_rss_context_priv),
+ 	.get_rxfh		= efx_ethtool_get_rxfh,
+ 	.set_rxfh		= efx_ethtool_set_rxfh,
+diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
+index bb1930818beba4..83d715544f7fb2 100644
+--- a/drivers/net/ethernet/sfc/ethtool.c
++++ b/drivers/net/ethernet/sfc/ethtool.c
+@@ -263,6 +263,7 @@ const struct ethtool_ops efx_ethtool_ops = {
+ 	.get_rxfh_indir_size	= efx_ethtool_get_rxfh_indir_size,
+ 	.get_rxfh_key_size	= efx_ethtool_get_rxfh_key_size,
+ 	.rxfh_per_ctx_key	= true,
++	.cap_rss_rxnfc_adds	= true,
+ 	.rxfh_priv_size		= sizeof(struct efx_rss_context_priv),
+ 	.get_rxfh		= efx_ethtool_get_rxfh,
+ 	.set_rxfh		= efx_ethtool_set_rxfh,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index cf7b59b8cc64b3..918d7f2e8ba992 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7236,6 +7236,36 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
+ 	if (priv->dma_cap.tsoen)
+ 		dev_info(priv->device, "TSO supported\n");
+ 
++	if (priv->dma_cap.number_rx_queues &&
++	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
++		dev_warn(priv->device,
++			 "Number of Rx queues (%u) exceeds dma capability\n",
++			 priv->plat->rx_queues_to_use);
++		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
++	}
++	if (priv->dma_cap.number_tx_queues &&
++	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
++		dev_warn(priv->device,
++			 "Number of Tx queues (%u) exceeds dma capability\n",
++			 priv->plat->tx_queues_to_use);
++		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
++	}
++
++	if (priv->dma_cap.rx_fifo_size &&
++	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
++		dev_warn(priv->device,
++			 "Rx FIFO size (%u) exceeds dma capability\n",
++			 priv->plat->rx_fifo_size);
++		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
++	}
++	if (priv->dma_cap.tx_fifo_size &&
++	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
++		dev_warn(priv->device,
++			 "Tx FIFO size (%u) exceeds dma capability\n",
++			 priv->plat->tx_fifo_size);
++		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
++	}
++
+ 	priv->hw->vlan_fail_q_en =
+ 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
+ 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index dfca13b82bdce2..b13c7e958e6b4e 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2207,7 +2207,7 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+ 	for (i = 0; i < common->tx_ch_num; i++) {
+ 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ 
+-		if (tx_chn->irq)
++		if (tx_chn->irq > 0)
+ 			devm_free_irq(dev, tx_chn->irq, tx_chn);
+ 
+ 		netif_napi_del(&tx_chn->napi_tx);
+diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
+index bf02efa10956a6..84181dcb98831f 100644
+--- a/drivers/net/netdevsim/netdevsim.h
++++ b/drivers/net/netdevsim/netdevsim.h
+@@ -129,6 +129,7 @@ struct netdevsim {
+ 		u32 sleep;
+ 		u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+ 		u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
++		struct dentry *ddir;
+ 		struct debugfs_u32_array dfs_ports[2];
+ 	} udp_ports;
+ 
+diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
+index 02dc3123eb6c16..640b4983a9a0d1 100644
+--- a/drivers/net/netdevsim/udp_tunnels.c
++++ b/drivers/net/netdevsim/udp_tunnels.c
+@@ -112,9 +112,11 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
+ 	struct net_device *dev = file->private_data;
+ 	struct netdevsim *ns = netdev_priv(dev);
+ 
+-	memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
+ 	rtnl_lock();
+-	udp_tunnel_nic_reset_ntf(dev);
++	if (dev->reg_state == NETREG_REGISTERED) {
++		memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
++		udp_tunnel_nic_reset_ntf(dev);
++	}
+ 	rtnl_unlock();
+ 
+ 	return count;
+@@ -144,23 +146,23 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+ 	else
+ 		ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
+ 
+-	debugfs_create_u32("udp_ports_inject_error", 0600,
+-			   ns->nsim_dev_port->ddir,
++	ns->udp_ports.ddir = debugfs_create_dir("udp_ports",
++						ns->nsim_dev_port->ddir);
++
++	debugfs_create_u32("inject_error", 0600, ns->udp_ports.ddir,
+ 			   &ns->udp_ports.inject_error);
+ 
+ 	ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0];
+ 	ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
+-	debugfs_create_u32_array("udp_ports_table0", 0400,
+-				 ns->nsim_dev_port->ddir,
++	debugfs_create_u32_array("table0", 0400, ns->udp_ports.ddir,
+ 				 &ns->udp_ports.dfs_ports[0]);
+ 
+ 	ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1];
+ 	ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
+-	debugfs_create_u32_array("udp_ports_table1", 0400,
+-				 ns->nsim_dev_port->ddir,
++	debugfs_create_u32_array("table1", 0400, ns->udp_ports.ddir,
+ 				 &ns->udp_ports.dfs_ports[1]);
+ 
+-	debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir,
++	debugfs_create_file("reset", 0200, ns->udp_ports.ddir,
+ 			    dev, &nsim_udp_tunnels_info_reset_fops);
+ 
+ 	/* Note: it's not normal to allocate the info struct like this!
+@@ -196,6 +198,9 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+ 
+ void nsim_udp_tunnels_info_destroy(struct net_device *dev)
+ {
++	struct netdevsim *ns = netdev_priv(dev);
++
++	debugfs_remove_recursive(ns->udp_ports.ddir);
+ 	kfree(dev->udp_tunnel_nic_info);
+ 	dev->udp_tunnel_nic_info = NULL;
+ }
+diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
+index c812f16eaa3a88..b3a5a0af19da66 100644
+--- a/drivers/net/phy/marvell-88q2xxx.c
++++ b/drivers/net/phy/marvell-88q2xxx.c
+@@ -95,6 +95,10 @@
+ 
+ #define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF			65246
+ 
++struct mv88q2xxx_priv {
++	bool enable_temp;
++};
++
+ struct mmd_val {
+ 	int devad;
+ 	u32 regnum;
+@@ -669,17 +673,12 @@ static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
+ 
+ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+ {
++	struct mv88q2xxx_priv *priv = phydev->priv;
+ 	struct device *dev = &phydev->mdio.dev;
+ 	struct device *hwmon;
+ 	char *hwmon_name;
+-	int ret;
+-
+-	/* Enable temperature sense */
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+-			     MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+-	if (ret < 0)
+-		return ret;
+ 
++	priv->enable_temp = true;
+ 	hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ 	if (IS_ERR(hwmon_name))
+ 		return PTR_ERR(hwmon_name);
+@@ -702,6 +701,14 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+ 
+ static int mv88q2xxx_probe(struct phy_device *phydev)
+ {
++	struct mv88q2xxx_priv *priv;
++
++	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	phydev->priv = priv;
++
+ 	return mv88q2xxx_hwmon_probe(phydev);
+ }
+ 
+@@ -792,6 +799,18 @@ static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
+ 
+ static int mv88q222x_config_init(struct phy_device *phydev)
+ {
++	struct mv88q2xxx_priv *priv = phydev->priv;
++	int ret;
++
++	/* Enable temperature sense */
++	if (priv->enable_temp) {
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     MDIO_MMD_PCS_MV_TEMP_SENSOR2,
++				     MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
++		if (ret < 0)
++			return ret;
++	}
++
+ 	if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
+ 		return mv88q222x_revb0_config_init(phydev);
+ 	else
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 5aa41d5f7765a6..5ca6ecf0ce5fbc 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -1329,9 +1329,9 @@ int tap_queue_resize(struct tap_dev *tap)
+ 	list_for_each_entry(q, &tap->queue_list, next)
+ 		rings[i++] = &q->ring;
+ 
+-	ret = ptr_ring_resize_multiple(rings, n,
+-				       dev->tx_queue_len, GFP_KERNEL,
+-				       __skb_array_destroy_skb);
++	ret = ptr_ring_resize_multiple_bh(rings, n,
++					  dev->tx_queue_len, GFP_KERNEL,
++					  __skb_array_destroy_skb);
+ 
+ 	kfree(rings);
+ 	return ret;
+diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
+index 1c85dda83825d8..7f4ef219eee44f 100644
+--- a/drivers/net/team/team_core.c
++++ b/drivers/net/team/team_core.c
+@@ -1175,6 +1175,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
+ 		return -EBUSY;
+ 	}
+ 
++	if (netdev_has_upper_dev(port_dev, dev)) {
++		NL_SET_ERR_MSG(extack, "Device is already a lower device of the team interface");
++		netdev_err(dev, "Device %s is already a lower device of the team interface\n",
++			   portname);
++		return -EBUSY;
++	}
++
+ 	if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+ 	    vlan_uses_dev(dev)) {
+ 		NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 03fe9e3ee7af15..6fc60950100c7c 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3697,9 +3697,9 @@ static int tun_queue_resize(struct tun_struct *tun)
+ 	list_for_each_entry(tfile, &tun->disabled, next)
+ 		rings[i++] = &tfile->tx_ring;
+ 
+-	ret = ptr_ring_resize_multiple(rings, n,
+-				       dev->tx_queue_len, GFP_KERNEL,
+-				       tun_ptr_free);
++	ret = ptr_ring_resize_multiple_bh(rings, n,
++					  dev->tx_queue_len, GFP_KERNEL,
++					  tun_ptr_free);
+ 
+ 	kfree(rings);
+ 	return ret;
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 01a3b2417a5401..ddff6f19ff98eb 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -71,6 +71,14 @@
+ #define MSR_SPEED		(1<<3)
+ #define MSR_LINK		(1<<2)
+ 
++/* USB endpoints */
++enum rtl8150_usb_ep {
++	RTL8150_USB_EP_CONTROL = 0,
++	RTL8150_USB_EP_BULK_IN = 1,
++	RTL8150_USB_EP_BULK_OUT = 2,
++	RTL8150_USB_EP_INT_IN = 3,
++};
++
+ /* Interrupt pipe data */
+ #define INT_TSR			0x00
+ #define INT_RSR			0x01
+@@ -867,6 +875,13 @@ static int rtl8150_probe(struct usb_interface *intf,
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+ 	rtl8150_t *dev;
+ 	struct net_device *netdev;
++	static const u8 bulk_ep_addr[] = {
++		RTL8150_USB_EP_BULK_IN | USB_DIR_IN,
++		RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT,
++		0};
++	static const u8 int_ep_addr[] = {
++		RTL8150_USB_EP_INT_IN | USB_DIR_IN,
++		0};
+ 
+ 	netdev = alloc_etherdev(sizeof(rtl8150_t));
+ 	if (!netdev)
+@@ -880,6 +895,13 @@ static int rtl8150_probe(struct usb_interface *intf,
+ 		return -ENOMEM;
+ 	}
+ 
++	/* Verify that all required endpoints are present */
++	if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++	    !usb_check_int_endpoints(intf, int_ep_addr)) {
++		dev_err(&intf->dev, "couldn't find required endpoints\n");
++		goto out;
++	}
++
+ 	tasklet_setup(&dev->tl, rx_fixup);
+ 	spin_lock_init(&dev->rx_pool_lock);
+ 
+diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
+index d2023e7131bd4f..6e6e9f05509ab0 100644
+--- a/drivers/net/vxlan/vxlan_vnifilter.c
++++ b/drivers/net/vxlan/vxlan_vnifilter.c
+@@ -411,6 +411,11 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb
+ 	struct tunnel_msg *tmsg;
+ 	struct net_device *dev;
+ 
++	if (cb->nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct tunnel_msg))) {
++		NL_SET_ERR_MSG(cb->extack, "Invalid msg length");
++		return -EINVAL;
++	}
++
+ 	tmsg = nlmsg_data(cb->nlh);
+ 
+ 	if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 40088e62572e12..40b52d12b43235 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -3872,6 +3872,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ 						 &rbm);
+ 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
++		    rbm != HAL_RX_BUF_RBM_SW1_BM &&
+ 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
+ 			ab->soc_stats.invalid_rbm++;
+ 			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
+diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
+index 8f7dd43dc1bd8e..753bd93f02123d 100644
+--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
++++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
+@@ -372,7 +372,8 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ 
+ 	ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ 				wbm_desc->buf_addr_info.info1);
+-	if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
++	if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM &&
++	    ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+ 		ab->soc_stats.invalid_rbm++;
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 8946141aa0dce6..fbf5d57283576f 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -7220,9 +7220,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
+ 							chandef->chan->band,
+ 							arvif->vif->type);
+ 	arg.min_power = 0;
+-	arg.max_power = chandef->chan->max_power * 2;
+-	arg.max_reg_power = chandef->chan->max_reg_power * 2;
+-	arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
++	arg.max_power = chandef->chan->max_power;
++	arg.max_reg_power = chandef->chan->max_reg_power;
++	arg.max_antenna_gain = chandef->chan->max_antenna_gain;
+ 
+ 	arg.pref_tx_streams = ar->num_tx_chains;
+ 	arg.pref_rx_streams = ar->num_rx_chains;
+diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
+index 408776562a7e56..cd36cab6db75d3 100644
+--- a/drivers/net/wireless/ath/wcn36xx/main.c
++++ b/drivers/net/wireless/ath/wcn36xx/main.c
+@@ -1590,7 +1590,10 @@ static int wcn36xx_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
+-	wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
++	wcn->chan_survey = devm_kcalloc(wcn->dev,
++					n_channels,
++					sizeof(struct wcn36xx_chan_survey),
++					GFP_KERNEL);
+ 	if (!wcn->chan_survey) {
+ 		ret = -ENOMEM;
+ 		goto out_wq;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+index 31e080e4da6697..ab3d6cfcb02bde 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+@@ -6,6 +6,8 @@
+ #ifndef _fwil_h_
+ #define _fwil_h_
+ 
++#include "debug.h"
++
+ /*******************************************************************************
+  * Dongle command codes that are interpreted by firmware
+  ******************************************************************************/
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+index 091fb6fd7c787c..834f7c9bb9e92d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+@@ -13,9 +13,12 @@
+ #include <linux/efi.h>
+ #include "fw/runtime.h"
+ 
+-#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b,	\
+-				  0xb2, 0xec, 0xf5, 0xa3,	\
+-				  0x59, 0x4f, 0x4a, 0xea)
++#define IWL_EFI_WIFI_GUID	EFI_GUID(0x92daaf2f, 0xc02b, 0x455b,	\
++					 0xb2, 0xec, 0xf5, 0xa3,	\
++					 0x59, 0x4f, 0x4a, 0xea)
++#define IWL_EFI_WIFI_BT_GUID	EFI_GUID(0xe65d8884, 0xd4af, 0x4b20,	\
++					 0x8d, 0x03, 0x77, 0x2e,	\
++					 0xcc, 0x3d, 0xa5, 0x31)
+ 
+ struct iwl_uefi_pnvm_mem_desc {
+ 	__le32 addr;
+@@ -61,7 +64,7 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+ 
+ 	*len = 0;
+ 
+-	data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID,
++	data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_WIFI_GUID,
+ 				     &package_size);
+ 	if (IS_ERR(data)) {
+ 		IWL_DEBUG_FW(trans,
+@@ -76,18 +79,18 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+ 	return data;
+ }
+ 
+-static
+-void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+-				     efi_char16_t *uefi_var_name,
+-				     char *var_name,
+-				     unsigned int expected_size,
+-				     unsigned long *size)
++static void *
++iwl_uefi_get_verified_variable_guid(struct iwl_trans *trans,
++				    efi_guid_t *guid,
++				    efi_char16_t *uefi_var_name,
++				    char *var_name,
++				    unsigned int expected_size,
++				    unsigned long *size)
+ {
+ 	void *var;
+ 	unsigned long var_size;
+ 
+-	var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
+-				    &var_size);
++	var = iwl_uefi_get_variable(uefi_var_name, guid, &var_size);
+ 
+ 	if (IS_ERR(var)) {
+ 		IWL_DEBUG_RADIO(trans,
+@@ -112,6 +115,18 @@ void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ 	return var;
+ }
+ 
++static void *
++iwl_uefi_get_verified_variable(struct iwl_trans *trans,
++			       efi_char16_t *uefi_var_name,
++			       char *var_name,
++			       unsigned int expected_size,
++			       unsigned long *size)
++{
++	return iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_GUID,
++						   uefi_var_name, var_name,
++						   expected_size, size);
++}
++
+ int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
+ 				 u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
+ {
+@@ -311,8 +326,9 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
+ 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ 		return;
+ 
+-	data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
+-					      "STEP", sizeof(*data), NULL);
++	data = iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_BT_GUID,
++						   IWL_UEFI_STEP_NAME,
++						   "STEP", sizeof(*data), NULL);
+ 	if (IS_ERR(data))
+ 		return;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+index b607961970e970..9b8624304fa308 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+@@ -530,18 +530,15 @@ static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
+ 					   struct ieee80211_vif *vif)
+ {
+ 	struct iwl_mvm *mvm = _data;
++	struct ieee80211_bss_conf *link_conf;
++	unsigned int link_id;
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+ 	if (vif->type != NL80211_IFTYPE_STATION)
+ 		return;
+ 
+-	for (int link_id = 0;
+-	     link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+-	     link_id++) {
+-		struct ieee80211_bss_conf *link_conf =
+-			rcu_dereference_check(vif->link_conf[link_id],
+-					      lockdep_is_held(&mvm->mutex));
++	for_each_vif_active_link(vif, link_conf, link_id) {
+ 		struct ieee80211_chanctx_conf *chanctx_conf =
+ 			rcu_dereference_check(link_conf->chanctx_conf,
+ 					      lockdep_is_held(&mvm->mutex));
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index ca026b5256ce33..5f4942f6cc68e4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1880,7 +1880,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ 				IWL_DEBUG_TX_REPLY(mvm,
+ 						   "Next reclaimed packet:%d\n",
+ 						   next_reclaimed);
+-				iwl_mvm_count_mpdu(mvmsta, sta_id, 1, true, 0);
++				if (tid < IWL_MAX_TID_COUNT)
++					iwl_mvm_count_mpdu(mvmsta, sta_id, 1,
++							   true, 0);
+ 			} else {
+ 				IWL_DEBUG_TX_REPLY(mvm,
+ 						   "NDP - don't update next_reclaimed\n");
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
+index 9d5561f441347b..0ca83f1a3e3ea2 100644
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
+@@ -958,11 +958,11 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ 
+ 	if (chandef->chan != phy->main_chan)
+ 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
+-	mt76_worker_enable(&dev->tx_worker);
+ 
+ 	ret = dev->drv->set_channel(phy);
+ 
+ 	clear_bit(MT76_RESET, &phy->state);
++	mt76_worker_enable(&dev->tx_worker);
+ 	mt76_worker_schedule(&dev->tx_worker);
+ 
+ 	mutex_unlock(&dev->mutex);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index 96e34277fece9b..1cc8fc8fefe740 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -1113,7 +1113,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ {
+ 	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ 
+-	return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf,
++	return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf, &mvif->mt76,
+ 					   &mvif->sta.wcid, enable);
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 864246f9408899..7d07e720e4ec1d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -1137,10 +1137,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
+ 
+ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ 				struct ieee80211_bss_conf *bss_conf,
++				struct mt76_vif *mvif,
+ 				struct mt76_wcid *wcid,
+ 				bool enable)
+ {
+-	struct mt76_vif *mvif = (struct mt76_vif *)bss_conf->vif->drv_priv;
+ 	struct mt76_dev *dev = phy->dev;
+ 	struct {
+ 		struct {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+index 1b0e80dfc346b8..57a8340fa70097 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+@@ -1938,6 +1938,7 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
+ 				bool enable, bool tx);
+ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ 				struct ieee80211_bss_conf *bss_conf,
++				struct mt76_vif *mvif,
+ 				struct mt76_wcid *wcid,
+ 				bool enable);
+ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 6bef96e3d2a3d9..77d82ccd73079d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -82,7 +82,7 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
+ 		return ret;
+ 
+ 	mutex_lock(&phy->dev->mt76.mutex);
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 60 * 1000, 130 * 1000), 1000);
+ 
+ 	if ((i - 1 == MT7915_CRIT_TEMP_IDX &&
+ 	     val > phy->throttle_temp[MT7915_MAX_TEMP_IDX]) ||
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index cf77ce0c875991..799e8d2cc7e6ec 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1388,6 +1388,8 @@ mt7915_mac_restart(struct mt7915_dev *dev)
+ 	if (dev_is_pci(mdev->dev)) {
+ 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ 		if (dev->hif2) {
++			mt76_wr(dev, MT_PCIE_RECOG_ID,
++				dev->hif2->index | MT_PCIE_RECOG_ID_SEM);
+ 			if (is_mt7915(mdev))
+ 				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ 			else
+@@ -1442,9 +1444,11 @@ static void
+ mt7915_mac_full_reset(struct mt7915_dev *dev)
+ {
+ 	struct mt76_phy *ext_phy;
++	struct mt7915_phy *phy2;
+ 	int i;
+ 
+ 	ext_phy = dev->mt76.phys[MT_BAND1];
++	phy2 = ext_phy ? ext_phy->priv : NULL;
+ 
+ 	dev->recovery.hw_full_reset = true;
+ 
+@@ -1474,6 +1478,9 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
+ 
+ 	memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
+ 	dev->mt76.vif_mask = 0;
++	dev->phy.omac_mask = 0;
++	if (phy2)
++		phy2->omac_mask = 0;
+ 
+ 	i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
+ 	dev->mt76.global_wcid.idx = i;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index d75e8dea1fbdc8..8c0d63cebf3e14 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -246,8 +246,10 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
+ 	phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ 
+ 	idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev));
+-	if (idx < 0)
+-		return -ENOSPC;
++	if (idx < 0) {
++		ret = -ENOSPC;
++		goto out;
++	}
+ 
+ 	INIT_LIST_HEAD(&mvif->sta.rc_list);
+ 	INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
+@@ -619,8 +621,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & BSS_CHANGED_ASSOC)
+ 		set_bss_info = vif->cfg.assoc;
+ 	if (changed & BSS_CHANGED_BEACON_ENABLED &&
++	    info->enable_beacon &&
+ 	    vif->type != NL80211_IFTYPE_AP)
+-		set_bss_info = set_sta = info->enable_beacon;
++		set_bss_info = set_sta = 1;
+ 
+ 	if (set_bss_info == 1)
+ 		mt7915_mcu_add_bss_info(phy, vif, true);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+index 44e112b8b5b368..2e7604eed27b02 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+@@ -484,7 +484,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+ 			continue;
+ 
+ 		ofs = addr - dev->reg.map[i].phys;
+-		if (ofs > dev->reg.map[i].size)
++		if (ofs >= dev->reg.map[i].size)
+ 			continue;
+ 
+ 		return dev->reg.map[i].maps + ofs;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index ac0b1f0eb27c14..5fe872ef2e939b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -191,6 +191,7 @@ struct mt7915_hif {
+ 	struct device *dev;
+ 	void __iomem *regs;
+ 	int irq;
++	u32 index;
+ };
+ 
+ struct mt7915_phy {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+index 39132894e8ea29..07b0a5766eab7d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+@@ -42,6 +42,7 @@ static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
+ 			continue;
+ 
+ 		get_device(hif->dev);
++		hif->index = idx;
+ 		goto out;
+ 	}
+ 	hif = NULL;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 047106b65d2bc6..bd1455698ebe5f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -647,6 +647,7 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
+ 		ieee80211_disconnect(vif, true);
+ 
+ 	mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
++				    &mvif->bss_conf.mt76,
+ 				    &mvif->sta.deflink.wcid, true);
+ 	mt7921_mcu_set_tx(dev, vif);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index a7f5bfbc02ed1f..e2dfd3670c4c93 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -308,6 +308,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 	mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ 
+ 	ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
++					  &mvif->bss_conf.mt76,
+ 					  &mvif->sta.deflink.wcid, true);
+ 	if (ret)
+ 		goto out;
+@@ -531,7 +532,13 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	} else {
+ 		if (idx == *wcid_keyidx)
+ 			*wcid_keyidx = -1;
+-		goto out;
++
++		/* For security issue we don't trigger the key deletion when
++		 * reassociating. But we should trigger the deletion process
++		 * to avoid using incorrect cipher after disconnection,
++		 */
++		if (vif->type != NL80211_IFTYPE_STATION || vif->cfg.assoc)
++			goto out;
+ 	}
+ 
+ 	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+index 634c42bbf23f67..a095fb31e391a1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+@@ -49,7 +49,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
+ 			break;
+ 		mlink = list_first_entry(&sta_poll_list,
+ 					 struct mt792x_link_sta, wcid.poll_list);
+-		msta = container_of(mlink, struct mt792x_sta, deflink);
++		msta = mlink->sta;
+ 		spin_lock_bh(&dev->mt76.sta_poll_lock);
+ 		list_del_init(&mlink->wcid.poll_list);
+ 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
+@@ -1271,6 +1271,7 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
+ 	struct mt792x_dev *dev = mvif->phy->dev;
+ 	struct ieee80211_hw *hw = mt76_hw(dev);
+ 	struct ieee80211_bss_conf *bss_conf;
++	struct mt792x_bss_conf *mconf;
+ 	int i;
+ 
+ 	if (vif->type == NL80211_IFTYPE_STATION)
+@@ -1278,8 +1279,9 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
+ 
+ 	for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ 		bss_conf = mt792x_vif_to_bss_conf(vif, i);
++		mconf = mt792x_vif_to_link(mvif, i);
+ 
+-		mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf,
++		mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, &mconf->mt76,
+ 					    &mvif->sta.deflink.wcid, true);
+ 		mt7925_mcu_set_tx(dev, bss_conf);
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+index 791c8b00e11264..ddc67423efe2cb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+@@ -365,18 +365,14 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ 	mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
+ 			       0 : mconf->mt76.idx;
+ 	mconf->mt76.band_idx = 0xff;
+-	mconf->mt76.wmm_idx = mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
++	mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ?
++			      0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ 
+ 	if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
+ 		mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
+ 	else
+ 		mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL;
+ 
+-	ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf,
+-					  &mlink->wcid, true);
+-	if (ret)
+-		goto out;
+-
+ 	dev->mt76.vif_mask |= BIT_ULL(mconf->mt76.idx);
+ 	mvif->phy->omac_mask |= BIT_ULL(mconf->mt76.omac_idx);
+ 
+@@ -384,7 +380,7 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ 
+ 	INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ 	mlink->wcid.idx = idx;
+-	mlink->wcid.phy_idx = mconf->mt76.band_idx;
++	mlink->wcid.phy_idx = 0;
+ 	mlink->wcid.hw_key_idx = -1;
+ 	mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ 	mt76_wcid_init(&mlink->wcid);
+@@ -395,6 +391,12 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ 	ewma_rssi_init(&mconf->rssi);
+ 
+ 	rcu_assign_pointer(dev->mt76.wcid[idx], &mlink->wcid);
++
++	ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76,
++					  &mlink->wcid, true);
++	if (ret)
++		goto out;
++
+ 	if (vif->txq) {
+ 		mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ 		mtxq->wcid = idx;
+@@ -837,6 +839,7 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
+ 	u8 link_id = link_sta->link_id;
+ 	struct mt792x_link_sta *mlink;
+ 	struct mt792x_sta *msta;
++	struct mt76_wcid *wcid;
+ 	int ret, idx;
+ 
+ 	msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+@@ -850,11 +853,20 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
+ 	INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ 	mlink->wcid.sta = 1;
+ 	mlink->wcid.idx = idx;
+-	mlink->wcid.phy_idx = mconf->mt76.band_idx;
++	mlink->wcid.phy_idx = 0;
+ 	mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ 	mlink->last_txs = jiffies;
+ 	mlink->wcid.link_id = link_sta->link_id;
+ 	mlink->wcid.link_valid = !!link_sta->sta->valid_links;
++	mlink->sta = msta;
++
++	wcid = &mlink->wcid;
++	ewma_signal_init(&wcid->rssi);
++	rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
++	mt76_wcid_init(wcid);
++	ewma_avg_signal_init(&mlink->avg_ack_signal);
++	memset(mlink->airtime_ac, 0,
++	       sizeof(msta->deflink.airtime_ac));
+ 
+ 	ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ 	if (ret)
+@@ -866,9 +878,14 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
+ 	link_conf = mt792x_vif_to_bss_conf(vif, link_id);
+ 
+ 	/* should update bss info before STA add */
+-	if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls)
+-		mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+-					link_conf, link_sta, false);
++	if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) {
++		if (ieee80211_vif_is_mld(vif))
++			mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
++						link_conf, link_sta, link_sta != mlink->pri_link);
++		else
++			mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
++						link_conf, link_sta, false);
++	}
+ 
+ 	if (ieee80211_vif_is_mld(vif) &&
+ 	    link_sta == mlink->pri_link) {
+@@ -904,7 +921,6 @@ mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 			 struct ieee80211_sta *sta, unsigned long new_links)
+ {
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+-	struct mt76_wcid *wcid;
+ 	unsigned int link_id;
+ 	int err = 0;
+ 
+@@ -921,14 +937,6 @@ mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 				err = -ENOMEM;
+ 				break;
+ 			}
+-
+-			wcid = &mlink->wcid;
+-			ewma_signal_init(&wcid->rssi);
+-			rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
+-			mt76_wcid_init(wcid);
+-			ewma_avg_signal_init(&mlink->avg_ack_signal);
+-			memset(mlink->airtime_ac, 0,
+-			       sizeof(msta->deflink.airtime_ac));
+ 		}
+ 
+ 		msta->valid_links |= BIT(link_id);
+@@ -1141,8 +1149,7 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
+ 		struct mt792x_bss_conf *mconf;
+ 
+ 		mconf = mt792x_link_conf_to_mconf(link_conf);
+-		mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
+-					link_sta, false);
++		mt792x_mac_link_bss_remove(dev, mconf, mlink);
+ 	}
+ 
+ 	spin_lock_bh(&mdev->sta_poll_lock);
+@@ -1200,12 +1207,45 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ {
+ 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
++	struct {
++		struct {
++			u8 omac_idx;
++			u8 band_idx;
++			__le16 pad;
++		} __packed hdr;
++		struct req_tlv {
++			__le16 tag;
++			__le16 len;
++			u8 active;
++			u8 link_idx; /* hw link idx */
++			u8 omac_addr[ETH_ALEN];
++		} __packed tlv;
++	} dev_req = {
++		.hdr = {
++			.omac_idx = 0,
++			.band_idx = 0,
++		},
++		.tlv = {
++			.tag = cpu_to_le16(DEV_INFO_ACTIVE),
++			.len = cpu_to_le16(sizeof(struct req_tlv)),
++			.active = true,
++		},
++	};
+ 	unsigned long rem;
+ 
+ 	rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
+ 
+ 	mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+ 
++	if (ieee80211_vif_is_mld(vif)) {
++		mt7925_mcu_set_dbdc(&dev->mphy, false);
++
++		/* recovery omac address for the legacy interface */
++		memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
++		mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
++				  &dev_req, sizeof(dev_req), true);
++	}
++
+ 	if (vif->type == NL80211_IFTYPE_STATION) {
+ 		struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ 
+@@ -1250,22 +1290,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	case IEEE80211_AMPDU_RX_START:
+ 		mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
+ 				   params->buf_size);
+-		mt7925_mcu_uni_rx_ba(dev, params, true);
++		mt7925_mcu_uni_rx_ba(dev, vif, params, true);
+ 		break;
+ 	case IEEE80211_AMPDU_RX_STOP:
+ 		mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
+-		mt7925_mcu_uni_rx_ba(dev, params, false);
++		mt7925_mcu_uni_rx_ba(dev, vif, params, false);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_OPERATIONAL:
+ 		mtxq->aggr = true;
+ 		mtxq->send_bar = false;
+-		mt7925_mcu_uni_tx_ba(dev, params, true);
++		mt7925_mcu_uni_tx_ba(dev, vif, params, true);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ 		mtxq->aggr = false;
+ 		clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+-		mt7925_mcu_uni_tx_ba(dev, params, false);
++		mt7925_mcu_uni_tx_ba(dev, vif, params, false);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_START:
+ 		set_bit(tid, &msta->deflink.wcid.ampdu_state);
+@@ -1274,7 +1314,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	case IEEE80211_AMPDU_TX_STOP_CONT:
+ 		mtxq->aggr = false;
+ 		clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+-		mt7925_mcu_uni_tx_ba(dev, params, false);
++		mt7925_mcu_uni_tx_ba(dev, vif, params, false);
+ 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ 		break;
+ 	}
+@@ -1895,6 +1935,13 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ 		mt7925_mcu_set_tx(dev, info);
+ 
++	if (changed & BSS_CHANGED_BSSID) {
++		if (ieee80211_vif_is_mld(vif) &&
++		    hweight16(mvif->valid_links) == 2)
++			/* Indicate the secondary setup done */
++			mt7925_mcu_uni_bss_bcnft(dev, info, true);
++	}
++
+ 	mt792x_mutex_release(dev);
+ }
+ 
+@@ -1946,6 +1993,8 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 					     GFP_KERNEL);
+ 			mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink),
+ 					     GFP_KERNEL);
++			if (!mconf || !mlink)
++				return -ENOMEM;
+ 		}
+ 
+ 		mconfs[link_id] = mconf;
+@@ -1974,6 +2023,8 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 			goto free;
+ 
+ 		if (mconf != &mvif->bss_conf) {
++			mt7925_mcu_set_bss_pm(dev, link_conf, true);
++
+ 			err = mt7925_set_mlo_roc(phy, &mvif->bss_conf,
+ 						 vif->active_links);
+ 			if (err < 0)
+@@ -2071,18 +2122,16 @@ static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ 	struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
+ 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ 	struct mt792x_dev *dev = mt792x_hw_dev(hw);
+-	struct ieee80211_bss_conf *pri_link_conf;
+ 	struct mt792x_bss_conf *mconf;
+ 
+ 	mutex_lock(&dev->mt76.mutex);
+ 
+ 	if (ieee80211_vif_is_mld(vif)) {
+ 		mconf = mt792x_vif_to_link(mvif, link_conf->link_id);
+-		pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
+ 
+ 		if (vif->type == NL80211_IFTYPE_STATION &&
+ 		    mconf == &mvif->bss_conf)
+-			mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf,
++			mt7925_mcu_add_bss_info(&dev->phy, NULL, link_conf,
+ 						NULL, false);
+ 	} else {
+ 		mconf = &mvif->bss_conf;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index 748ea6adbc6b39..ce3d8197b026a6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -123,10 +123,8 @@ EXPORT_SYMBOL_GPL(mt7925_mcu_regval);
+ int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
+ 				 struct ieee80211_bss_conf *link_conf)
+ {
+-	struct ieee80211_vif *mvif = container_of((void *)link_conf->vif,
+-						  struct ieee80211_vif,
+-						  drv_priv);
+ 	struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
++	struct ieee80211_vif *mvif = link_conf->vif;
+ 	struct sk_buff *skb;
+ 	int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
+ 			   IEEE80211_BSS_ARP_ADDR_LIST_LEN);
+@@ -531,10 +529,10 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
+ 
+ static int
+ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
++		  struct mt76_wcid *wcid,
+ 		  struct ieee80211_ampdu_params *params,
+ 		  bool enable, bool tx)
+ {
+-	struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
+ 	struct sta_rec_ba_uni *ba;
+ 	struct sk_buff *skb;
+ 	struct tlv *tlv;
+@@ -562,28 +560,60 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+ 
+ /** starec & wtbl **/
+ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
++			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable)
+ {
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
+-	struct mt792x_vif *mvif = msta->vif;
++	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
++	struct mt792x_link_sta *mlink;
++	struct mt792x_bss_conf *mconf;
++	unsigned long usable_links = ieee80211_vif_usable_links(vif);
++	struct mt76_wcid *wcid;
++	u8 link_id, ret;
++
++	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
++		mconf = mt792x_vif_to_link(mvif, link_id);
++		mlink = mt792x_sta_to_link(msta, link_id);
++		wcid = &mlink->wcid;
+ 
+-	if (enable && !params->amsdu)
+-		msta->deflink.wcid.amsdu = false;
++		if (enable && !params->amsdu)
++			mlink->wcid.amsdu = false;
+ 
+-	return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
+-				 enable, true);
++		ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
++					enable, true);
++		if (ret < 0)
++			break;
++	}
++
++	return ret;
+ }
+ 
+ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
++			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable)
+ {
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
+-	struct mt792x_vif *mvif = msta->vif;
++	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
++	struct mt792x_link_sta *mlink;
++	struct mt792x_bss_conf *mconf;
++	unsigned long usable_links = ieee80211_vif_usable_links(vif);
++	struct mt76_wcid *wcid;
++	u8 link_id, ret;
++
++	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
++		mconf = mt792x_vif_to_link(mvif, link_id);
++		mlink = mt792x_sta_to_link(msta, link_id);
++		wcid = &mlink->wcid;
++
++		ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
++					enable, false);
++		if (ret < 0)
++			break;
++	}
+ 
+-	return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
+-				 enable, false);
++	return ret;
+ }
+ 
+ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
+@@ -638,7 +668,7 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
+ 	for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
+ 		clc = (const struct mt7925_clc *)(clc_base + offset);
+ 
+-		if (clc->idx > ARRAY_SIZE(phy->clc))
++		if (clc->idx >= ARRAY_SIZE(phy->clc))
+ 			break;
+ 
+ 		/* do not init buf again if chip reset triggered */
+@@ -823,7 +853,7 @@ mt7925_mcu_get_nic_capability(struct mt792x_dev *dev)
+ 			mt7925_mcu_parse_phy_cap(dev, tlv->data);
+ 			break;
+ 		case MT_NIC_CAP_CHIP_CAP:
+-			memcpy(&dev->phy.chip_cap, (void *)skb->data, sizeof(u64));
++			dev->phy.chip_cap = le64_to_cpu(*(__le64 *)tlv->data);
+ 			break;
+ 		case MT_NIC_CAP_EML_CAP:
+ 			mt7925_mcu_parse_eml_cap(dev, tlv->data);
+@@ -1153,7 +1183,12 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ 			u8 rsv[4];
+ 		} __packed hdr;
+ 		struct roc_acquire_tlv roc[2];
+-	} __packed req;
++	} __packed req = {
++			.roc[0].tag = cpu_to_le16(UNI_ROC_NUM),
++			.roc[0].len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
++			.roc[1].tag = cpu_to_le16(UNI_ROC_NUM),
++			.roc[1].len = cpu_to_le16(sizeof(struct roc_acquire_tlv))
++	};
+ 
+ 	if (!mconf || hweight16(vif->valid_links) < 2 ||
+ 	    hweight16(sel_links) != 2)
+@@ -1200,6 +1235,8 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ 		req.roc[i].bw_from_ap = CMD_CBW_20MHZ;
+ 		req.roc[i].center_chan = center_ch;
+ 		req.roc[i].center_chan_from_ap = center_ch;
++		req.roc[i].center_chan2 = 0;
++		req.roc[i].center_chan2_from_ap = 0;
+ 
+ 		/* STR : 0xfe indicates BAND_ALL with enabling DBDC
+ 		 * EMLSR : 0xff indicates (BAND_AUTO) without DBDC
+@@ -1215,7 +1252,7 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ 	}
+ 
+ 	return mt76_mcu_send_msg(&mvif->phy->dev->mt76, MCU_UNI_CMD(ROC),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ 
+ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
+@@ -1264,7 +1301,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
+ 	}
+ 
+ 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ 
+ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
+@@ -1294,7 +1331,7 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
+ 	};
+ 
+ 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ 
+ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
+@@ -1357,7 +1394,7 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
+ 				 &ps_req, sizeof(ps_req), true);
+ }
+ 
+-static int
++int
+ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
+ 			 struct ieee80211_bss_conf *link_conf, bool enable)
+ {
+@@ -1447,12 +1484,12 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev,
+ 	int err;
+ 
+ 	err = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
+-				&req1, sizeof(req1), false);
++				&req1, sizeof(req1), true);
+ 	if (err < 0 || !enable)
+ 		return err;
+ 
+ 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ 
+ static void
+@@ -1898,7 +1935,11 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev,
+ 		mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+ 	}
+ 	info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid;
+-	info.newly = link_sta ? state != MT76_STA_INFO_STATE_ASSOC : true;
++
++	if (link_sta)
++		info.newly = state != MT76_STA_INFO_STATE_ASSOC;
++	else
++		info.newly = state == MT76_STA_INFO_STATE_ASSOC ? false : true;
+ 
+ 	if (ieee80211_vif_is_mld(vif))
+ 		err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info);
+@@ -1914,32 +1955,21 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
+ {
+ #define MT7925_FIF_BIT_CLR		BIT(1)
+ #define MT7925_FIF_BIT_SET		BIT(0)
+-	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+-	unsigned long valid = ieee80211_vif_is_mld(vif) ?
+-				      mvif->valid_links : BIT(0);
+-	struct ieee80211_bss_conf *bss_conf;
+ 	int err = 0;
+-	int i;
+ 
+ 	if (enable) {
+-		for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+-			bss_conf = mt792x_vif_to_bss_conf(vif, i);
+-			err = mt7925_mcu_uni_bss_bcnft(dev, bss_conf, true);
+-			if (err < 0)
+-				return err;
+-		}
++		err = mt7925_mcu_uni_bss_bcnft(dev, &vif->bss_conf, true);
++		if (err < 0)
++			return err;
+ 
+ 		return mt7925_mcu_set_rxfilter(dev, 0,
+ 					       MT7925_FIF_BIT_SET,
+ 					       MT_WF_RFCR_DROP_OTHER_BEACON);
+ 	}
+ 
+-	for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+-		bss_conf = mt792x_vif_to_bss_conf(vif, i);
+-		err = mt7925_mcu_set_bss_pm(dev, bss_conf, false);
+-		if (err)
+-			return err;
+-	}
++	err = mt7925_mcu_set_bss_pm(dev, &vif->bss_conf, false);
++	if (err < 0)
++		return err;
+ 
+ 	return mt7925_mcu_set_rxfilter(dev, 0,
+ 				       MT7925_FIF_BIT_CLR,
+@@ -1976,8 +2006,6 @@ int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, struct mt7925_txp
+ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 			   bool enable)
+ {
+-	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+-
+ 	struct {
+ 		struct {
+ 			u8 band_idx;
+@@ -1991,7 +2019,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 		} __packed enable;
+ 	} __packed req = {
+ 		.hdr = {
+-			.band_idx = mvif->bss_conf.mt76.band_idx,
++			.band_idx = 0,
+ 		},
+ 		.enable = {
+ 			.tag = cpu_to_le16(UNI_SNIFFER_ENABLE),
+@@ -2050,7 +2078,7 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
+ 		} __packed tlv;
+ 	} __packed req = {
+ 		.hdr = {
+-			.band_idx = vif->bss_conf.mt76.band_idx,
++			.band_idx = 0,
+ 		},
+ 		.tlv = {
+ 			.tag = cpu_to_le16(UNI_SNIFFER_CONFIG),
+@@ -2179,11 +2207,27 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
+ 	req = (struct bss_rlm_tlv *)tlv;
+ 	req->control_channel = chandef->chan->hw_value;
+ 	req->center_chan = ieee80211_frequency_to_channel(freq1);
+-	req->center_chan2 = ieee80211_frequency_to_channel(freq2);
++	req->center_chan2 = 0;
+ 	req->tx_streams = hweight8(phy->antenna_mask);
+ 	req->ht_op_info = 4; /* set HT 40M allowed */
+ 	req->rx_streams = hweight8(phy->antenna_mask);
+-	req->band = band;
++	req->center_chan2 = 0;
++	req->sco = 0;
++	req->band = 1;
++
++	switch (band) {
++	case NL80211_BAND_2GHZ:
++		req->band = 1;
++		break;
++	case NL80211_BAND_5GHZ:
++		req->band = 2;
++		break;
++	case NL80211_BAND_6GHZ:
++		req->band = 3;
++		break;
++	default:
++		break;
++	}
+ 
+ 	switch (chandef->width) {
+ 	case NL80211_CHAN_WIDTH_40:
+@@ -2194,6 +2238,7 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
+ 		break;
+ 	case NL80211_CHAN_WIDTH_80P80:
+ 		req->bw = CMD_CBW_8080MHZ;
++		req->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ 		break;
+ 	case NL80211_CHAN_WIDTH_160:
+ 		req->bw = CMD_CBW_160MHZ;
+@@ -2463,6 +2508,7 @@ static void
+ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
+ 		       struct ieee80211_bss_conf *link_conf)
+ {
++	struct ieee80211_vif *vif = link_conf->vif;
+ 	struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ 	struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
+ 	struct bss_mld_tlv *mld;
+@@ -2483,7 +2529,7 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
+ 	mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
+ 			     IEEE80211_EML_CAP_EMLSR_SUPP);
+ 
+-	memcpy(mld->mac_addr, link_conf->addr, ETH_ALEN);
++	memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
+ }
+ 
+ static void
+@@ -2614,7 +2660,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
+ 				     MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+ }
+ 
+-int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
++int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable)
+ {
+ 	struct mt76_dev *mdev = phy->dev;
+ 
+@@ -2634,7 +2680,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
+ 	tlv = mt76_connac_mcu_add_tlv(skb, UNI_MBMC_SETTING, sizeof(*conf));
+ 	conf = (struct mbmc_conf_tlv *)tlv;
+ 
+-	conf->mbmc_en = 1;
++	conf->mbmc_en = enable;
+ 	conf->band = 0; /* unused */
+ 
+ 	err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS),
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+index ac53bdc993322f..fe6a613ba00889 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+@@ -616,7 +616,7 @@ mt7925_mcu_get_cipher(int cipher)
+ 	}
+ }
+ 
+-int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
++int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable);
+ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 		       struct ieee80211_scan_request *scan_req);
+ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
+@@ -643,4 +643,7 @@ int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+ int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy);
+ int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
+ 				 struct ieee80211_bss_conf *link_conf);
++int
++mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
++			 struct ieee80211_bss_conf *link_conf, bool enable);
+ #endif
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+index f5c02e5f506633..df3c705d1cb3fa 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+@@ -242,9 +242,11 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
+ 				 struct ieee80211_vif *vif,
+ 				 bool enable);
+ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
++			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable);
+ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
++			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable);
+ void mt7925_scan_work(struct work_struct *work);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
+index ab12616ec2b87c..2b8b9b2977f74a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
++++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
+@@ -241,6 +241,7 @@ static inline struct mt792x_bss_conf *
+ mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
+ {
+ 	struct ieee80211_vif *vif;
++	struct mt792x_bss_conf *bss_conf;
+ 
+ 	vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+ 
+@@ -248,8 +249,10 @@ mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
+ 	    link_id >= IEEE80211_LINK_UNSPECIFIED)
+ 		return &mvif->bss_conf;
+ 
+-	return rcu_dereference_protected(mvif->link_conf[link_id],
+-		lockdep_is_held(&mvif->phy->dev->mt76.mutex));
++	bss_conf = rcu_dereference_protected(mvif->link_conf[link_id],
++					     lockdep_is_held(&mvif->phy->dev->mt76.mutex));
++
++	return bss_conf ? bss_conf : &mvif->bss_conf;
+ }
+ 
+ static inline struct mt792x_link_sta *
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+index 78fe37c2e07b59..b87eed4d168df5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+@@ -147,7 +147,8 @@ void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
+ 	link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
+ 
+ 	mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
+-	mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mlink->wcid, false);
++	mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76,
++				    &mlink->wcid, false);
+ 
+ 	rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+index 106273935b267f..05978d9c7b916a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+@@ -153,7 +153,7 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
+ 		return NULL;
+ 
+ 	link = container_of(wcid, struct mt792x_link_sta, wcid);
+-	sta = container_of(link, struct mt792x_sta, deflink);
++	sta = link->sta;
+ 	if (!sta->vif)
+ 		return NULL;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+index 5e96973226bbb5..d8a013812d1e37 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+@@ -16,9 +16,6 @@
+ 
+ static const struct ieee80211_iface_limit if_limits[] = {
+ 	{
+-		.max = 1,
+-		.types = BIT(NL80211_IFTYPE_ADHOC)
+-	}, {
+ 		.max = 16,
+ 		.types = BIT(NL80211_IFTYPE_AP)
+ #ifdef CONFIG_MAC80211_MESH
+@@ -85,7 +82,7 @@ static ssize_t mt7996_thermal_temp_store(struct device *dev,
+ 		return ret;
+ 
+ 	mutex_lock(&phy->dev->mt76.mutex);
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 40, 130);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 40 * 1000, 130 * 1000), 1000);
+ 
+ 	/* add a safety margin ~10 */
+ 	if ((i - 1 == MT7996_CRIT_TEMP_IDX &&
+@@ -1080,6 +1077,9 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 	he_cap_elem->phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ 				       IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+ 
++	he_cap_elem->phy_cap_info[7] =
++			IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
++
+ 	switch (iftype) {
+ 	case NL80211_IFTYPE_AP:
+ 		he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_RES;
+@@ -1119,8 +1119,7 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 			IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ 			IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ 		he_cap_elem->phy_cap_info[7] |=
+-			IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+-			IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
++			IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP;
+ 		he_cap_elem->phy_cap_info[8] |=
+ 			IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ 			IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+@@ -1190,7 +1189,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 
+ 	eht_cap_elem->mac_cap_info[0] =
+ 		IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+-		IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
++		IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
++		u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
++			       IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ 
+ 	eht_cap_elem->phy_cap_info[0] =
+ 		IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+@@ -1233,21 +1234,20 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 		IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
+ 
+ 	eht_cap_elem->phy_cap_info[4] =
++		IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ 		u8_encode_bits(min_t(int, sts - 1, 2),
+ 			       IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+ 
+ 	eht_cap_elem->phy_cap_info[5] =
+ 		u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
+ 			       IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
+-		u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
++		u8_encode_bits(u8_get_bits(1, GENMASK(1, 0)),
+ 			       IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK);
+ 
+ 	val = width == NL80211_CHAN_WIDTH_320 ? 0xf :
+ 	      width == NL80211_CHAN_WIDTH_160 ? 0x7 :
+ 	      width == NL80211_CHAN_WIDTH_80 ? 0x3 : 0x1;
+ 	eht_cap_elem->phy_cap_info[6] =
+-		u8_encode_bits(u8_get_bits(0x11, GENMASK(4, 2)),
+-			       IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
+ 		u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
+ 
+ 	val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 0d21414e2c884a..f590902fdeea37 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -819,6 +819,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 			   struct ieee80211_key_conf *key, int pid,
+ 			   enum mt76_txq_id qid, u32 changed)
+ {
++	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_vif *vif = info->control.vif;
+ 	u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+@@ -886,8 +887,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 	val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
+ 	if (is_mt7996(&dev->mt76))
+ 		val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+-	else
++	else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
+ 		val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
++
+ 	txwi[6] = cpu_to_le32(val);
+ 	txwi[7] = 0;
+ 
+@@ -897,7 +899,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 		mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
+ 
+ 	if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
+-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 		bool mcast = ieee80211_is_data(hdr->frame_control) &&
+ 			     is_multicast_ether_addr(hdr->addr1);
+ 		u8 idx = MT7996_BASIC_RATES_TBL;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+index 39f071ece35e6e..4d11083b86c092 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+@@ -496,8 +496,7 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
+ 
+ 	MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ 			     MT_WF_RFCR_DROP_RTS |
+-			     MT_WF_RFCR_DROP_CTL_RSV |
+-			     MT_WF_RFCR_DROP_NDPA);
++			     MT_WF_RFCR_DROP_CTL_RSV);
+ 
+ 	*total_flags = flags;
+ 	mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), phy->rxfilter);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index 6c445a9dbc03d8..265958f7b78711 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -2070,7 +2070,7 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
+ 			cap |= STA_CAP_VHT_TX_STBC;
+ 		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ 			cap |= STA_CAP_VHT_RX_STBC;
+-		if (vif->bss_conf.vht_ldpc &&
++		if ((vif->type != NL80211_IFTYPE_AP || vif->bss_conf.vht_ldpc) &&
+ 		    (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ 			cap |= STA_CAP_VHT_LDPC;
+ 
+@@ -3666,6 +3666,13 @@ int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap)
+ 
+ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+ {
++	enum {
++		IDX_TX_TIME,
++		IDX_RX_TIME,
++		IDX_OBSS_AIRTIME,
++		IDX_NON_WIFI_TIME,
++		IDX_NUM
++	};
+ 	struct {
+ 		struct {
+ 			u8 band;
+@@ -3675,16 +3682,15 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+ 			__le16 tag;
+ 			__le16 len;
+ 			__le32 offs;
+-		} data[4];
++		} data[IDX_NUM];
+ 	} __packed req = {
+ 		.hdr.band = phy->mt76->band_idx,
+ 	};
+-	/* strict order */
+ 	static const u32 offs[] = {
+-		UNI_MIB_TX_TIME,
+-		UNI_MIB_RX_TIME,
+-		UNI_MIB_OBSS_AIRTIME,
+-		UNI_MIB_NON_WIFI_TIME,
++		[IDX_TX_TIME] = UNI_MIB_TX_TIME,
++		[IDX_RX_TIME] = UNI_MIB_RX_TIME,
++		[IDX_OBSS_AIRTIME] = UNI_MIB_OBSS_AIRTIME,
++		[IDX_NON_WIFI_TIME] = UNI_MIB_NON_WIFI_TIME,
+ 	};
+ 	struct mt76_channel_state *state = phy->mt76->chan_state;
+ 	struct mt76_channel_state *state_ts = &phy->state_ts;
+@@ -3693,7 +3699,7 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+ 	struct sk_buff *skb;
+ 	int i, ret;
+ 
+-	for (i = 0; i < 4; i++) {
++	for (i = 0; i < IDX_NUM; i++) {
+ 		req.data[i].tag = cpu_to_le16(UNI_CMD_MIB_DATA);
+ 		req.data[i].len = cpu_to_le16(sizeof(req.data[i]));
+ 		req.data[i].offs = cpu_to_le32(offs[i]);
+@@ -3712,17 +3718,24 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+ 		goto out;
+ 
+ #define __res_u64(s) le64_to_cpu(res[s].data)
+-	state->cc_tx += __res_u64(1) - state_ts->cc_tx;
+-	state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
+-	state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
+-	state->cc_busy += __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3) -
++	state->cc_tx += __res_u64(IDX_TX_TIME) - state_ts->cc_tx;
++	state->cc_bss_rx += __res_u64(IDX_RX_TIME) - state_ts->cc_bss_rx;
++	state->cc_rx += __res_u64(IDX_RX_TIME) +
++			__res_u64(IDX_OBSS_AIRTIME) -
++			state_ts->cc_rx;
++	state->cc_busy += __res_u64(IDX_TX_TIME) +
++			  __res_u64(IDX_RX_TIME) +
++			  __res_u64(IDX_OBSS_AIRTIME) +
++			  __res_u64(IDX_NON_WIFI_TIME) -
+ 			  state_ts->cc_busy;
+-
+ out:
+-	state_ts->cc_tx = __res_u64(1);
+-	state_ts->cc_bss_rx = __res_u64(2);
+-	state_ts->cc_rx = __res_u64(2) + __res_u64(3);
+-	state_ts->cc_busy = __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3);
++	state_ts->cc_tx = __res_u64(IDX_TX_TIME);
++	state_ts->cc_bss_rx = __res_u64(IDX_RX_TIME);
++	state_ts->cc_rx = __res_u64(IDX_RX_TIME) + __res_u64(IDX_OBSS_AIRTIME);
++	state_ts->cc_busy = __res_u64(IDX_TX_TIME) +
++			    __res_u64(IDX_RX_TIME) +
++			    __res_u64(IDX_OBSS_AIRTIME) +
++			    __res_u64(IDX_NON_WIFI_TIME);
+ #undef __res_u64
+ 
+ 	dev_kfree_skb(skb);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+index 40e45fb2b62607..442f72450352b0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+@@ -177,7 +177,7 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
+ 			continue;
+ 
+ 		ofs = addr - dev->reg.map[i].phys;
+-		if (ofs > dev->reg.map[i].size)
++		if (ofs >= dev->reg.map[i].size)
+ 			continue;
+ 
+ 		return dev->reg.map[i].mapped + ofs;
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
+index 58ff068233894e..f9e67b8c3b3c89 100644
+--- a/drivers/net/wireless/mediatek/mt76/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/usb.c
+@@ -33,9 +33,9 @@ int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+ 
+ 		ret = usb_control_msg(udev, pipe, req, req_type, val,
+ 				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
+-		if (ret == -ENODEV)
++		if (ret == -ENODEV || ret == -EPROTO)
+ 			set_bit(MT76_REMOVED, &dev->phy.state);
+-		if (ret >= 0 || ret == -ENODEV)
++		if (ret >= 0 || ret == -ENODEV || ret == -EPROTO)
+ 			return ret;
+ 		usleep_range(5000, 10000);
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
+index aab4605de9c47c..ff61867d142fa4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.c
++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
+@@ -575,9 +575,15 @@ static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw,
+ 
+ void rtl_deinit_core(struct ieee80211_hw *hw)
+ {
++	struct rtl_priv *rtlpriv = rtl_priv(hw);
++
+ 	rtl_c2hcmd_launcher(hw, 0);
+ 	rtl_free_entries_from_scan_list(hw);
+ 	rtl_free_entries_from_ack_queue(hw, false);
++	if (rtlpriv->works.rtl_wq) {
++		destroy_workqueue(rtlpriv->works.rtl_wq);
++		rtlpriv->works.rtl_wq = NULL;
++	}
+ }
+ EXPORT_SYMBOL_GPL(rtl_deinit_core);
+ 
+@@ -2696,9 +2702,6 @@ MODULE_AUTHOR("Larry Finger	<Larry.FInger@lwfinger.net>");
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+ 
+-struct rtl_global_var rtl_global_var = {};
+-EXPORT_SYMBOL_GPL(rtl_global_var);
+-
+ static int __init rtl_core_module_init(void)
+ {
+ 	BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION);
+@@ -2712,10 +2715,6 @@ static int __init rtl_core_module_init(void)
+ 	/* add debugfs */
+ 	rtl_debugfs_add_topdir();
+ 
+-	/* init some global vars */
+-	INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
+-	spin_lock_init(&rtl_global_var.glb_list_lock);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
+index f081a9a90563f5..f3a6a43a42eca8 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.h
++++ b/drivers/net/wireless/realtek/rtlwifi/base.h
+@@ -124,7 +124,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
+ u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
+ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
+ u8 rtl_tid_to_ac(u8 tid);
+-extern struct rtl_global_var rtl_global_var;
+ void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
+ 
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 11709b6c83f1aa..0eafc4d125f91d 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -295,46 +295,6 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
+ 	return status;
+ }
+ 
+-static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
+-				     struct rtl_priv **buddy_priv)
+-{
+-	struct rtl_priv *rtlpriv = rtl_priv(hw);
+-	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+-	struct rtl_priv *tpriv = NULL, *iter;
+-	struct rtl_pci_priv *tpcipriv = NULL;
+-
+-	if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
+-		list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
+-				    list) {
+-			tpcipriv = (struct rtl_pci_priv *)iter->priv;
+-			rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+-				"pcipriv->ndis_adapter.funcnumber %x\n",
+-				pcipriv->ndis_adapter.funcnumber);
+-			rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+-				"tpcipriv->ndis_adapter.funcnumber %x\n",
+-				tpcipriv->ndis_adapter.funcnumber);
+-
+-			if (pcipriv->ndis_adapter.busnumber ==
+-			    tpcipriv->ndis_adapter.busnumber &&
+-			    pcipriv->ndis_adapter.devnumber ==
+-			    tpcipriv->ndis_adapter.devnumber &&
+-			    pcipriv->ndis_adapter.funcnumber !=
+-			    tpcipriv->ndis_adapter.funcnumber) {
+-				tpriv = iter;
+-				break;
+-			}
+-		}
+-	}
+-
+-	rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+-		"find_buddy_priv %d\n", tpriv != NULL);
+-
+-	if (tpriv)
+-		*buddy_priv = tpriv;
+-
+-	return tpriv != NULL;
+-}
+-
+ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+ 					struct ieee80211_hw *hw)
+ {
+@@ -1696,8 +1656,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
+ 	synchronize_irq(rtlpci->pdev->irq);
+ 	tasklet_kill(&rtlpriv->works.irq_tasklet);
+ 	cancel_work_sync(&rtlpriv->works.lps_change_work);
+-
+-	destroy_workqueue(rtlpriv->works.rtl_wq);
+ }
+ 
+ static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+@@ -2011,7 +1969,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ 		pcipriv->ndis_adapter.amd_l1_patch);
+ 
+ 	rtl_pci_parse_configuration(pdev, hw);
+-	list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
+ 
+ 	return true;
+ }
+@@ -2158,7 +2115,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	rtlpriv->rtlhal.interface = INTF_PCI;
+ 	rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
+ 	rtlpriv->intf_ops = &rtl_pci_ops;
+-	rtlpriv->glb_var = &rtl_global_var;
+ 	rtl_efuse_ops_init(hw);
+ 
+ 	/* MEM map */
+@@ -2209,7 +2165,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ 		pr_err("Can't init_sw_vars\n");
+ 		err = -ENODEV;
+-		goto fail3;
++		goto fail2;
+ 	}
+ 	rtl_init_sw_leds(hw);
+ 
+@@ -2227,14 +2183,14 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	err = rtl_pci_init(hw, pdev);
+ 	if (err) {
+ 		pr_err("Failed to init PCI\n");
+-		goto fail3;
++		goto fail4;
+ 	}
+ 
+ 	err = ieee80211_register_hw(hw);
+ 	if (err) {
+ 		pr_err("Can't register mac80211 hw.\n");
+ 		err = -ENODEV;
+-		goto fail3;
++		goto fail5;
+ 	}
+ 	rtlpriv->mac80211.mac80211_registered = 1;
+ 
+@@ -2257,16 +2213,19 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ 	return 0;
+ 
+-fail3:
+-	pci_set_drvdata(pdev, NULL);
++fail5:
++	rtl_pci_deinit(hw);
++fail4:
+ 	rtl_deinit_core(hw);
++fail3:
++	wait_for_completion(&rtlpriv->firmware_loading_complete);
++	rtlpriv->cfg->ops->deinit_sw_vars(hw);
+ 
+ fail2:
+ 	if (rtlpriv->io.pci_mem_start != 0)
+ 		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
+ 
+ 	pci_release_regions(pdev);
+-	complete(&rtlpriv->firmware_loading_complete);
+ 
+ fail1:
+ 	if (hw)
+@@ -2317,7 +2276,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
+ 	if (rtlpci->using_msi)
+ 		pci_disable_msi(rtlpci->pdev);
+ 
+-	list_del(&rtlpriv->list);
+ 	if (rtlpriv->io.pci_mem_start != 0) {
+ 		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
+ 		pci_release_regions(pdev);
+@@ -2376,7 +2334,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
+ const struct rtl_intf_ops rtl_pci_ops = {
+ 	.adapter_start = rtl_pci_start,
+ 	.adapter_stop = rtl_pci_stop,
+-	.check_buddy_priv = rtl_pci_check_buddy_priv,
+ 	.adapter_tx = rtl_pci_tx,
+ 	.flush = rtl_pci_flush,
+ 	.reset_trx_ring = rtl_pci_reset_trx_ring,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+index bbf8ff63dcedb4..e63c67b1861b5f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+@@ -64,22 +64,23 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
+ 
+ 	rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ 		"Firmware callback routine entered!\n");
+-	complete(&rtlpriv->firmware_loading_complete);
+ 	if (!firmware) {
+ 		pr_err("Firmware %s not available\n", fw_name);
+ 		rtlpriv->max_fw_size = 0;
+-		return;
++		goto exit;
+ 	}
+ 	if (firmware->size > rtlpriv->max_fw_size) {
+ 		pr_err("Firmware is too big!\n");
+ 		rtlpriv->max_fw_size = 0;
+ 		release_firmware(firmware);
+-		return;
++		goto exit;
+ 	}
+ 	pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
+ 	memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
+ 	pfirmware->sz_fw_tmpbufferlen = firmware->size;
+ 	release_firmware(firmware);
++exit:
++	complete(&rtlpriv->firmware_loading_complete);
+ }
+ 
+ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+index 1be51ea3f3c820..9eddbada8af12c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+@@ -2033,8 +2033,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ 			if (!_rtl8821ae_check_condition(hw, v1)) {
+ 				i += 2; /* skip the pair of expression*/
+ 				v2 = array[i+1];
+-				while (v2 != 0xDEAD)
++				while (v2 != 0xDEAD) {
+ 					i += 3;
++					v2 = array[i + 1];
++				}
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
+index d37a017b2b814f..f5718e570011e6 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
+@@ -629,11 +629,6 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
+ 	tasklet_kill(&rtlusb->rx_work_tasklet);
+ 	cancel_work_sync(&rtlpriv->works.lps_change_work);
+ 
+-	if (rtlpriv->works.rtl_wq) {
+-		destroy_workqueue(rtlpriv->works.rtl_wq);
+-		rtlpriv->works.rtl_wq = NULL;
+-	}
+-
+ 	skb_queue_purge(&rtlusb->rx_queue);
+ 
+ 	while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
+@@ -1028,19 +1023,22 @@ int rtl_usb_probe(struct usb_interface *intf,
+ 	err = ieee80211_register_hw(hw);
+ 	if (err) {
+ 		pr_err("Can't register mac80211 hw.\n");
+-		goto error_out;
++		goto error_init_vars;
+ 	}
+ 	rtlpriv->mac80211.mac80211_registered = 1;
+ 
+ 	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ 	return 0;
+ 
++error_init_vars:
++	wait_for_completion(&rtlpriv->firmware_loading_complete);
++	rtlpriv->cfg->ops->deinit_sw_vars(hw);
+ error_out:
++	rtl_usb_deinit(hw);
+ 	rtl_deinit_core(hw);
+ error_out2:
+ 	_rtl_usb_io_handler_release(hw);
+ 	usb_put_dev(udev);
+-	complete(&rtlpriv->firmware_loading_complete);
+ 	kfree(rtlpriv->usb_data);
+ 	ieee80211_free_hw(hw);
+ 	return -ENODEV;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index ae6e351bc83c91..f1830ddcdd8c19 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -2270,8 +2270,6 @@ struct rtl_intf_ops {
+ 	/*com */
+ 	int (*adapter_start)(struct ieee80211_hw *hw);
+ 	void (*adapter_stop)(struct ieee80211_hw *hw);
+-	bool (*check_buddy_priv)(struct ieee80211_hw *hw,
+-				 struct rtl_priv **buddy_priv);
+ 
+ 	int (*adapter_tx)(struct ieee80211_hw *hw,
+ 			  struct ieee80211_sta *sta,
+@@ -2514,14 +2512,6 @@ struct dig_t {
+ 	u32 rssi_max;
+ };
+ 
+-struct rtl_global_var {
+-	/* from this list we can get
+-	 * other adapter's rtl_priv
+-	 */
+-	struct list_head glb_priv_list;
+-	spinlock_t glb_list_lock;
+-};
+-
+ #define IN_4WAY_TIMEOUT_TIME	(30 * MSEC_PER_SEC)	/* 30 seconds */
+ 
+ struct rtl_btc_info {
+@@ -2667,9 +2657,7 @@ struct rtl_scan_list {
+ struct rtl_priv {
+ 	struct ieee80211_hw *hw;
+ 	struct completion firmware_loading_complete;
+-	struct list_head list;
+ 	struct rtl_priv *buddy_priv;
+-	struct rtl_global_var *glb_var;
+ 	struct rtl_dmsp_ctl dmsp_ctl;
+ 	struct rtl_locks locks;
+ 	struct rtl_works works;
+diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
+index ba6332da8019c1..4df4e04c3e67d7 100644
+--- a/drivers/net/wireless/realtek/rtw89/chan.c
++++ b/drivers/net/wireless/realtek/rtw89/chan.c
+@@ -10,6 +10,10 @@
+ #include "ps.h"
+ #include "util.h"
+ 
++static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
++			       enum rtw89_chanctx_idx idx1,
++			       enum rtw89_chanctx_idx idx2);
++
+ static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band,
+ 						 u8 center_chan)
+ {
+@@ -226,11 +230,15 @@ static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev)
+ void rtw89_entity_init(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_hal *hal = &rtwdev->hal;
++	struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
+ 
+ 	hal->entity_pause = false;
+ 	bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX);
+ 	bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
+ 	atomic_set(&hal->roc_chanctx_idx, RTW89_CHANCTX_IDLE);
++
++	INIT_LIST_HEAD(&mgnt->active_list);
++
+ 	rtw89_config_default_chandef(rtwdev);
+ }
+ 
+@@ -272,6 +280,143 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
+ 	}
+ }
+ 
++static void rtw89_normalize_link_chanctx(struct rtw89_dev *rtwdev,
++					 struct rtw89_vif_link *rtwvif_link)
++{
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
++	struct rtw89_vif_link *cur;
++
++	if (unlikely(!rtwvif_link->chanctx_assigned))
++		return;
++
++	cur = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (!cur || !cur->chanctx_assigned)
++		return;
++
++	if (cur == rtwvif_link)
++		return;
++
++	rtw89_swap_chanctx(rtwdev, rtwvif_link->chanctx_idx, cur->chanctx_idx);
++}
++
++const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
++					       const char *caller_message,
++					       u8 link_index)
++{
++	struct rtw89_hal *hal = &rtwdev->hal;
++	struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
++	enum rtw89_chanctx_idx chanctx_idx;
++	enum rtw89_chanctx_idx roc_idx;
++	enum rtw89_entity_mode mode;
++	u8 role_index;
++
++	lockdep_assert_held(&rtwdev->mutex);
++
++	if (unlikely(link_index >= __RTW89_MLD_MAX_LINK_NUM)) {
++		WARN(1, "link index %u is invalid (max link inst num: %d)\n",
++		     link_index, __RTW89_MLD_MAX_LINK_NUM);
++		goto dflt;
++	}
++
++	mode = rtw89_get_entity_mode(rtwdev);
++	switch (mode) {
++	case RTW89_ENTITY_MODE_SCC_OR_SMLD:
++	case RTW89_ENTITY_MODE_MCC:
++		role_index = 0;
++		break;
++	case RTW89_ENTITY_MODE_MCC_PREPARE:
++		role_index = 1;
++		break;
++	default:
++		WARN(1, "Invalid ent mode: %d\n", mode);
++		goto dflt;
++	}
++
++	chanctx_idx = mgnt->chanctx_tbl[role_index][link_index];
++	if (chanctx_idx == RTW89_CHANCTX_IDLE)
++		goto dflt;
++
++	roc_idx = atomic_read(&hal->roc_chanctx_idx);
++	if (roc_idx != RTW89_CHANCTX_IDLE) {
++		/* ROC is ongoing (given ROC runs on RTW89_ROC_BY_LINK_INDEX).
++		 * If @link_index is the same as RTW89_ROC_BY_LINK_INDEX, get
++		 * the ongoing ROC chanctx.
++		 */
++		if (link_index == RTW89_ROC_BY_LINK_INDEX)
++			chanctx_idx = roc_idx;
++	}
++
++	return rtw89_chan_get(rtwdev, chanctx_idx);
++
++dflt:
++	rtw89_debug(rtwdev, RTW89_DBG_CHAN,
++		    "%s (%s): prefetch NULL on link index %u\n",
++		    __func__, caller_message ?: "", link_index);
++
++	return rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
++}
++EXPORT_SYMBOL(__rtw89_mgnt_chan_get);
++
++static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
++{
++	struct rtw89_hal *hal = &rtwdev->hal;
++	struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
++	struct rtw89_vif_link *link;
++	struct rtw89_vif *role;
++	u8 pos = 0;
++	int i, j;
++
++	lockdep_assert_held(&rtwdev->mutex);
++
++	for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++)
++		mgnt->active_roles[i] = NULL;
++
++	for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++) {
++		for (j = 0; j < __RTW89_MLD_MAX_LINK_NUM; j++)
++			mgnt->chanctx_tbl[i][j] = RTW89_CHANCTX_IDLE;
++	}
++
++	/* To be consistent with legacy behavior, expect the first active role
++	 * which uses RTW89_CHANCTX_0 to put at position 0, and make its first
++	 * link instance take RTW89_CHANCTX_0. (normalizing)
++	 */
++	list_for_each_entry(role, &mgnt->active_list, mgnt_entry) {
++		for (i = 0; i < role->links_inst_valid_num; i++) {
++			link = rtw89_vif_get_link_inst(role, i);
++			if (!link || !link->chanctx_assigned)
++				continue;
++
++			if (link->chanctx_idx == RTW89_CHANCTX_0) {
++				rtw89_normalize_link_chanctx(rtwdev, link);
++
++				list_del(&role->mgnt_entry);
++				list_add(&role->mgnt_entry, &mgnt->active_list);
++				goto fill;
++			}
++		}
++	}
++
++fill:
++	list_for_each_entry(role, &mgnt->active_list, mgnt_entry) {
++		if (unlikely(pos >= RTW89_MAX_INTERFACE_NUM)) {
++			rtw89_warn(rtwdev,
++				   "%s: active roles are over max iface num\n",
++				   __func__);
++			break;
++		}
++
++		for (i = 0; i < role->links_inst_valid_num; i++) {
++			link = rtw89_vif_get_link_inst(role, i);
++			if (!link || !link->chanctx_assigned)
++				continue;
++
++			mgnt->chanctx_tbl[pos][i] = link->chanctx_idx;
++		}
++
++		mgnt->active_roles[pos++] = role;
++	}
++}
++
+ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
+ {
+ 	DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_CHANCTX) = {};
+@@ -298,9 +443,14 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
+ 		set_bit(RTW89_CHANCTX_0, recalc_map);
+ 		fallthrough;
+ 	case 1:
+-		mode = RTW89_ENTITY_MODE_SCC;
++		mode = RTW89_ENTITY_MODE_SCC_OR_SMLD;
+ 		break;
+ 	case 2 ... NUM_OF_RTW89_CHANCTX:
++		if (w.active_roles == 1) {
++			mode = RTW89_ENTITY_MODE_SCC_OR_SMLD;
++			break;
++		}
++
+ 		if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) {
+ 			rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ 				    "unhandled ent: %d chanctxs %d roles\n",
+@@ -327,6 +477,8 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
+ 		rtw89_assign_entity_chan(rtwdev, idx, &chan);
+ 	}
+ 
++	rtw89_entity_recalc_mgnt_roles(rtwdev);
++
+ 	if (hal->entity_pause)
+ 		return rtw89_get_entity_mode(rtwdev);
+ 
+@@ -650,7 +802,7 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
+ 
+ 	mcc_role->limit.max_toa = max_toa_us / 1024;
+ 	mcc_role->limit.max_tob = max_tob_us / 1024;
+-	mcc_role->limit.max_dur = max_dur_us / 1024;
++	mcc_role->limit.max_dur = mcc_role->limit.max_toa + mcc_role->limit.max_tob;
+ 	mcc_role->limit.enable = true;
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+@@ -716,6 +868,7 @@ struct rtw89_mcc_fill_role_selector {
+ };
+ 
+ static_assert((u8)NUM_OF_RTW89_CHANCTX >= NUM_OF_RTW89_MCC_ROLES);
++static_assert(RTW89_MAX_INTERFACE_NUM >= NUM_OF_RTW89_MCC_ROLES);
+ 
+ static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev,
+ 					struct rtw89_mcc_role *mcc_role,
+@@ -745,14 +898,18 @@ static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev,
+ 
+ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
+ {
++	struct rtw89_hal *hal = &rtwdev->hal;
++	struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
+ 	struct rtw89_mcc_fill_role_selector sel = {};
+ 	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
+ 	int ret;
++	int i;
+ 
+-	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+-		if (!rtw89_vif_is_active_role(rtwvif))
+-			continue;
++	for (i = 0; i < NUM_OF_RTW89_MCC_ROLES; i++) {
++		rtwvif = mgnt->active_roles[i];
++		if (!rtwvif)
++			break;
+ 
+ 		rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ 		if (unlikely(!rtwvif_link)) {
+@@ -760,14 +917,7 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
+ 			continue;
+ 		}
+ 
+-		if (sel.bind_vif[rtwvif_link->chanctx_idx]) {
+-			rtw89_warn(rtwdev,
+-				   "MCC skip extra vif <macid %d> on chanctx[%d]\n",
+-				   rtwvif_link->mac_id, rtwvif_link->chanctx_idx);
+-			continue;
+-		}
+-
+-		sel.bind_vif[rtwvif_link->chanctx_idx] = rtwvif_link;
++		sel.bind_vif[i] = rtwvif_link;
+ 	}
+ 
+ 	ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel);
+@@ -2381,7 +2531,25 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
+ 	hal->entity_pause = true;
+ }
+ 
+-void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
++static void rtw89_chanctx_proceed_cb(struct rtw89_dev *rtwdev,
++				     const struct rtw89_chanctx_cb_parm *parm)
++{
++	int ret;
++
++	if (!parm || !parm->cb)
++		return;
++
++	ret = parm->cb(rtwdev, parm->data);
++	if (ret)
++		rtw89_warn(rtwdev, "%s (%s): cb failed: %d\n", __func__,
++			   parm->caller ?: "unknown", ret);
++}
++
++/* pass @cb_parm if there is a @cb_parm->cb which needs to invoke right after
++ * call rtw89_set_channel() and right before proceed entity according to mode.
++ */
++void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
++			   const struct rtw89_chanctx_cb_parm *cb_parm)
+ {
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+ 	enum rtw89_entity_mode mode;
+@@ -2389,14 +2557,18 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
+ 
+ 	lockdep_assert_held(&rtwdev->mutex);
+ 
+-	if (!hal->entity_pause)
++	if (unlikely(!hal->entity_pause)) {
++		rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
+ 		return;
++	}
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx proceed\n");
+ 
+ 	hal->entity_pause = false;
+ 	rtw89_set_channel(rtwdev);
+ 
++	rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
++
+ 	mode = rtw89_get_entity_mode(rtwdev);
+ 	switch (mode) {
+ 	case RTW89_ENTITY_MODE_MCC:
+@@ -2501,12 +2673,18 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ 				 struct ieee80211_chanctx_conf *ctx)
+ {
+ 	struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
++	struct rtw89_hal *hal = &rtwdev->hal;
++	struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
+ 	struct rtw89_entity_weight w = {};
+ 
+ 	rtwvif_link->chanctx_idx = cfg->idx;
+ 	rtwvif_link->chanctx_assigned = true;
+ 	cfg->ref_count++;
+ 
++	if (list_empty(&rtwvif->mgnt_entry))
++		list_add_tail(&rtwvif->mgnt_entry, &mgnt->active_list);
++
+ 	if (cfg->idx == RTW89_CHANCTX_0)
+ 		goto out;
+ 
+@@ -2526,6 +2704,7 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ 				    struct ieee80211_chanctx_conf *ctx)
+ {
+ 	struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+ 	enum rtw89_chanctx_idx roll;
+ 	enum rtw89_entity_mode cur;
+@@ -2536,6 +2715,9 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ 	rtwvif_link->chanctx_assigned = false;
+ 	cfg->ref_count--;
+ 
++	if (!rtw89_vif_is_active_role(rtwvif))
++		list_del_init(&rtwvif->mgnt_entry);
++
+ 	if (cfg->ref_count != 0)
+ 		goto out;
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
+index 4ed777ea506485..092a6f676894f5 100644
+--- a/drivers/net/wireless/realtek/rtw89/chan.h
++++ b/drivers/net/wireless/realtek/rtw89/chan.h
+@@ -38,23 +38,32 @@ enum rtw89_chanctx_pause_reasons {
+ 	RTW89_CHANCTX_PAUSE_REASON_ROC,
+ };
+ 
++struct rtw89_chanctx_cb_parm {
++	int (*cb)(struct rtw89_dev *rtwdev, void *data);
++	void *data;
++	const char *caller;
++};
++
+ struct rtw89_entity_weight {
+ 	unsigned int active_chanctxs;
+ 	unsigned int active_roles;
+ };
+ 
+-static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev)
++static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev,
++					  enum rtw89_phy_idx phy_idx)
+ {
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+ 
+-	return READ_ONCE(hal->entity_active);
++	return READ_ONCE(hal->entity_active[phy_idx]);
+ }
+ 
+-static inline void rtw89_set_entity_state(struct rtw89_dev *rtwdev, bool active)
++static inline void rtw89_set_entity_state(struct rtw89_dev *rtwdev,
++					  enum rtw89_phy_idx phy_idx,
++					  bool active)
+ {
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+ 
+-	WRITE_ONCE(hal->entity_active, active);
++	WRITE_ONCE(hal->entity_active[phy_idx], active);
+ }
+ 
+ static inline
+@@ -97,7 +106,16 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
+ void rtw89_chanctx_track(struct rtw89_dev *rtwdev);
+ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
+ 			 enum rtw89_chanctx_pause_reasons rsn);
+-void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev);
++void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
++			   const struct rtw89_chanctx_cb_parm *cb_parm);
++
++const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
++					       const char *caller_message,
++					       u8 link_index);
++
++#define rtw89_mgnt_chan_get(rtwdev, link_index) \
++	__rtw89_mgnt_chan_get(rtwdev, __func__, link_index)
++
+ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
+ 			  struct ieee80211_chanctx_conf *ctx);
+ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index 5b8e65f6de6a4e..f82a26be6fa82b 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -192,13 +192,13 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
+ 	{
+ 		.limits = rtw89_iface_limits,
+ 		.n_limits = ARRAY_SIZE(rtw89_iface_limits),
+-		.max_interfaces = 2,
++		.max_interfaces = RTW89_MAX_INTERFACE_NUM,
+ 		.num_different_channels = 1,
+ 	},
+ 	{
+ 		.limits = rtw89_iface_limits_mcc,
+ 		.n_limits = ARRAY_SIZE(rtw89_iface_limits_mcc),
+-		.max_interfaces = 2,
++		.max_interfaces = RTW89_MAX_INTERFACE_NUM,
+ 		.num_different_channels = 2,
+ 	},
+ };
+@@ -341,83 +341,47 @@ void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
+ 	rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth);
+ }
+ 
+-void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
++static void __rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev,
++					const struct rtw89_chan *chan,
++					enum rtw89_phy_idx phy_idx)
+ {
+-	struct rtw89_hal *hal = &rtwdev->hal;
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+-	const struct rtw89_chan *chan;
+-	enum rtw89_chanctx_idx chanctx_idx;
+-	enum rtw89_chanctx_idx roc_idx;
+-	enum rtw89_phy_idx phy_idx;
+-	enum rtw89_entity_mode mode;
+ 	bool entity_active;
+ 
+-	entity_active = rtw89_get_entity_state(rtwdev);
++	entity_active = rtw89_get_entity_state(rtwdev, phy_idx);
+ 	if (!entity_active)
+ 		return;
+ 
+-	mode = rtw89_get_entity_mode(rtwdev);
+-	switch (mode) {
+-	case RTW89_ENTITY_MODE_SCC:
+-	case RTW89_ENTITY_MODE_MCC:
+-		chanctx_idx = RTW89_CHANCTX_0;
+-		break;
+-	case RTW89_ENTITY_MODE_MCC_PREPARE:
+-		chanctx_idx = RTW89_CHANCTX_1;
+-		break;
+-	default:
+-		WARN(1, "Invalid ent mode: %d\n", mode);
+-		return;
+-	}
++	chip->ops->set_txpwr(rtwdev, chan, phy_idx);
++}
+ 
+-	roc_idx = atomic_read(&hal->roc_chanctx_idx);
+-	if (roc_idx != RTW89_CHANCTX_IDLE)
+-		chanctx_idx = roc_idx;
++void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
++{
++	const struct rtw89_chan *chan;
+ 
+-	phy_idx = RTW89_PHY_0;
+-	chan = rtw89_chan_get(rtwdev, chanctx_idx);
+-	chip->ops->set_txpwr(rtwdev, chan, phy_idx);
++	chan = rtw89_mgnt_chan_get(rtwdev, 0);
++	__rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_0);
++
++	if (!rtwdev->support_mlo)
++		return;
++
++	chan = rtw89_mgnt_chan_get(rtwdev, 1);
++	__rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_1);
+ }
+ 
+-int rtw89_set_channel(struct rtw89_dev *rtwdev)
++static void __rtw89_set_channel(struct rtw89_dev *rtwdev,
++				const struct rtw89_chan *chan,
++				enum rtw89_mac_idx mac_idx,
++				enum rtw89_phy_idx phy_idx)
+ {
+-	struct rtw89_hal *hal = &rtwdev->hal;
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	const struct rtw89_chan_rcd *chan_rcd;
+-	const struct rtw89_chan *chan;
+-	enum rtw89_chanctx_idx chanctx_idx;
+-	enum rtw89_chanctx_idx roc_idx;
+-	enum rtw89_mac_idx mac_idx;
+-	enum rtw89_phy_idx phy_idx;
+ 	struct rtw89_channel_help_params bak;
+-	enum rtw89_entity_mode mode;
+ 	bool entity_active;
+ 
+-	entity_active = rtw89_get_entity_state(rtwdev);
+-
+-	mode = rtw89_entity_recalc(rtwdev);
+-	switch (mode) {
+-	case RTW89_ENTITY_MODE_SCC:
+-	case RTW89_ENTITY_MODE_MCC:
+-		chanctx_idx = RTW89_CHANCTX_0;
+-		break;
+-	case RTW89_ENTITY_MODE_MCC_PREPARE:
+-		chanctx_idx = RTW89_CHANCTX_1;
+-		break;
+-	default:
+-		WARN(1, "Invalid ent mode: %d\n", mode);
+-		return -EINVAL;
+-	}
+-
+-	roc_idx = atomic_read(&hal->roc_chanctx_idx);
+-	if (roc_idx != RTW89_CHANCTX_IDLE)
+-		chanctx_idx = roc_idx;
++	entity_active = rtw89_get_entity_state(rtwdev, phy_idx);
+ 
+-	mac_idx = RTW89_MAC_0;
+-	phy_idx = RTW89_PHY_0;
+-
+-	chan = rtw89_chan_get(rtwdev, chanctx_idx);
+-	chan_rcd = rtw89_chan_rcd_get(rtwdev, chanctx_idx);
++	chan_rcd = rtw89_chan_rcd_get_by_chan(chan);
+ 
+ 	rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx);
+ 
+@@ -432,7 +396,29 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
+ 		rtw89_chip_rfk_band_changed(rtwdev, phy_idx, chan);
+ 	}
+ 
+-	rtw89_set_entity_state(rtwdev, true);
++	rtw89_set_entity_state(rtwdev, phy_idx, true);
++}
++
++int rtw89_set_channel(struct rtw89_dev *rtwdev)
++{
++	const struct rtw89_chan *chan;
++	enum rtw89_entity_mode mode;
++
++	mode = rtw89_entity_recalc(rtwdev);
++	if (mode < 0 || mode >= NUM_OF_RTW89_ENTITY_MODE) {
++		WARN(1, "Invalid ent mode: %d\n", mode);
++		return -EINVAL;
++	}
++
++	chan = rtw89_mgnt_chan_get(rtwdev, 0);
++	__rtw89_set_channel(rtwdev, chan, RTW89_MAC_0, RTW89_PHY_0);
++
++	if (!rtwdev->support_mlo)
++		return 0;
++
++	chan = rtw89_mgnt_chan_get(rtwdev, 1);
++	__rtw89_set_channel(rtwdev, chan, RTW89_MAC_1, RTW89_PHY_1);
++
+ 	return 0;
+ }
+ 
+@@ -3157,9 +3143,10 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 	rtw89_leave_ips_by_hwflags(rtwdev);
+ 	rtw89_leave_lps(rtwdev);
+ 
+-	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX);
+ 	if (unlikely(!rtwvif_link)) {
+-		rtw89_err(rtwdev, "roc start: find no link on HW-0\n");
++		rtw89_err(rtwdev, "roc start: find no link on HW-%u\n",
++			  RTW89_ROC_BY_LINK_INDEX);
+ 		return;
+ 	}
+ 
+@@ -3211,9 +3198,10 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 	rtw89_leave_ips_by_hwflags(rtwdev);
+ 	rtw89_leave_lps(rtwdev);
+ 
+-	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX);
+ 	if (unlikely(!rtwvif_link)) {
+-		rtw89_err(rtwdev, "roc end: find no link on HW-0\n");
++		rtw89_err(rtwdev, "roc end: find no link on HW-%u\n",
++			  RTW89_ROC_BY_LINK_INDEX);
+ 		return;
+ 	}
+ 
+@@ -3224,7 +3212,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 
+ 	roc->state = RTW89_ROC_IDLE;
+ 	rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, NULL);
+-	rtw89_chanctx_proceed(rtwdev);
++	rtw89_chanctx_proceed(rtwdev, NULL);
+ 	ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false);
+ 	if (ret)
+ 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
+index de33320b1354cd..ff3048d2489f12 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -3424,6 +3424,8 @@ enum rtw89_roc_state {
+ 	RTW89_ROC_MGMT,
+ };
+ 
++#define RTW89_ROC_BY_LINK_INDEX 0
++
+ struct rtw89_roc {
+ 	struct ieee80211_channel chan;
+ 	struct delayed_work roc_work;
+@@ -4619,7 +4621,7 @@ enum rtw89_chanctx_changes {
+ };
+ 
+ enum rtw89_entity_mode {
+-	RTW89_ENTITY_MODE_SCC,
++	RTW89_ENTITY_MODE_SCC_OR_SMLD,
+ 	RTW89_ENTITY_MODE_MCC_PREPARE,
+ 	RTW89_ENTITY_MODE_MCC,
+ 
+@@ -4628,6 +4630,16 @@ enum rtw89_entity_mode {
+ 	RTW89_ENTITY_MODE_UNHANDLED = -ESRCH,
+ };
+ 
++#define RTW89_MAX_INTERFACE_NUM 2
++
++/* only valid when running with chanctx_ops */
++struct rtw89_entity_mgnt {
++	struct list_head active_list;
++	struct rtw89_vif *active_roles[RTW89_MAX_INTERFACE_NUM];
++	enum rtw89_chanctx_idx chanctx_tbl[RTW89_MAX_INTERFACE_NUM]
++					  [__RTW89_MLD_MAX_LINK_NUM];
++};
++
+ struct rtw89_chanctx {
+ 	struct cfg80211_chan_def chandef;
+ 	struct rtw89_chan chan;
+@@ -4668,9 +4680,10 @@ struct rtw89_hal {
+ 	struct rtw89_chanctx chanctx[NUM_OF_RTW89_CHANCTX];
+ 	struct cfg80211_chan_def roc_chandef;
+ 
+-	bool entity_active;
++	bool entity_active[RTW89_PHY_MAX];
+ 	bool entity_pause;
+ 	enum rtw89_entity_mode entity_mode;
++	struct rtw89_entity_mgnt entity_mgnt;
+ 
+ 	struct rtw89_edcca_bak edcca_bak;
+ 	u32 disabled_dm_bitmap; /* bitmap of enum rtw89_dm_type */
+@@ -5607,6 +5620,7 @@ struct rtw89_dev {
+ struct rtw89_vif {
+ 	struct rtw89_dev *rtwdev;
+ 	struct list_head list;
++	struct list_head mgnt_entry;
+ 
+ 	u8 mac_addr[ETH_ALEN];
+ 	__be32 ip_addr;
+@@ -6361,6 +6375,15 @@ const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev,
+ 	return &hal->chanctx[idx].rcd;
+ }
+ 
++static inline
++const struct rtw89_chan_rcd *rtw89_chan_rcd_get_by_chan(const struct rtw89_chan *chan)
++{
++	const struct rtw89_chanctx *chanctx =
++		container_of_const(chan, struct rtw89_chanctx, chan);
++
++	return &chanctx->rcd;
++}
++
+ static inline
+ const struct rtw89_chan *rtw89_scan_chan_get(struct rtw89_dev *rtwdev)
+ {
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index e6bceef691e9be..620e076d1b597d 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -6637,21 +6637,24 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
+ 	rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
+ }
+ 
+-void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
+-			    struct rtw89_vif_link *rtwvif_link,
+-			    bool aborted)
++struct rtw89_hw_scan_complete_cb_data {
++	struct rtw89_vif_link *rtwvif_link;
++	bool aborted;
++};
++
++static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
++	struct rtw89_hw_scan_complete_cb_data *cb_data = data;
++	struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
+ 	struct cfg80211_scan_info info = {
+-		.aborted = aborted,
++		.aborted = cb_data->aborted,
+ 	};
+ 	struct rtw89_vif *rtwvif;
+ 
+ 	if (!rtwvif_link)
+-		return;
+-
+-	rtw89_chanctx_proceed(rtwdev);
++		return -EINVAL;
+ 
+ 	rtwvif = rtwvif_link->rtwvif;
+ 
+@@ -6672,6 +6675,29 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
+ 	scan_info->last_chan_idx = 0;
+ 	scan_info->scanning_vif = NULL;
+ 	scan_info->abort = false;
++
++	return 0;
++}
++
++void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
++			    struct rtw89_vif_link *rtwvif_link,
++			    bool aborted)
++{
++	struct rtw89_hw_scan_complete_cb_data cb_data = {
++		.rtwvif_link = rtwvif_link,
++		.aborted = aborted,
++	};
++	const struct rtw89_chanctx_cb_parm cb_parm = {
++		.cb = rtw89_hw_scan_complete_cb,
++		.data = &cb_data,
++		.caller = __func__,
++	};
++
++	/* The things here needs to be done after setting channel (for coex)
++	 * and before proceeding entity mode (for MCC). So, pass a callback
++	 * of them for the right sequence rather than doing them directly.
++	 */
++	rtw89_chanctx_proceed(rtwdev, &cb_parm);
+ }
+ 
+ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index 4e15d539e3d1c4..4574aa62839b02 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -1483,7 +1483,8 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
+ 		clear_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags);
+ 		clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
+ 		rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR);
+-		rtw89_set_entity_state(rtwdev, false);
++		rtw89_set_entity_state(rtwdev, RTW89_PHY_0, false);
++		rtw89_set_entity_state(rtwdev, RTW89_PHY_1, false);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index 13fb3cac27016b..8351a70d325d4a 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -189,8 +189,10 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
+ 
+ 	rtw89_core_txq_init(rtwdev, vif->txq);
+ 
+-	if (!rtw89_rtwvif_in_list(rtwdev, rtwvif))
++	if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) {
+ 		list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
++		INIT_LIST_HEAD(&rtwvif->mgnt_entry);
++	}
+ 
+ 	ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ 
+@@ -1271,11 +1273,11 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ 	if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
+ 		return;
+ 
+-	if (!rtwdev->scanning)
+-		return;
+-
+ 	mutex_lock(&rtwdev->mutex);
+ 
++	if (!rtwdev->scanning)
++		goto out;
++
+ 	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ 	if (unlikely(!rtwvif_link)) {
+ 		rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 0c77b8524160db..42805ed7ca1202 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -2612,24 +2612,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+ 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+ 		ret = -EBUSY;
+-		goto out;
++		goto out_unlock;
+ 	}
+ 
+ 
+ 	ret = wl12xx_init_vif_data(wl, vif);
+ 	if (ret < 0)
+-		goto out;
++		goto out_unlock;
+ 
+ 	wlvif->wl = wl;
+ 	role_type = wl12xx_get_role_type(wl, wlvif);
+ 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+ 		ret = -EINVAL;
+-		goto out;
++		goto out_unlock;
+ 	}
+ 
+ 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
+ 	if (ret < 0)
+-		goto out;
++		goto out_unlock;
+ 
+ 	/*
+ 	 * TODO: after the nvs issue will be solved, move this block
+@@ -2644,7 +2644,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ 
+ 		ret = wl12xx_init_fw(wl);
+ 		if (ret < 0)
+-			goto out;
++			goto out_unlock;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 249914b90dbfa7..4c409efd8cec17 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3085,7 +3085,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
+ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
+ 				struct nvme_effects_log **log)
+ {
+-	struct nvme_effects_log	*cel = xa_load(&ctrl->cels, csi);
++	struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
+ 	int ret;
+ 
+ 	if (cel)
+@@ -3102,7 +3102,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
+ 		return ret;
+ 	}
+ 
+-	xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
++	old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
++	if (xa_is_err(old)) {
++		kfree(cel);
++		return xa_err(old);
++	}
+ out:
+ 	*log = cel;
+ 	return 0;
+@@ -3164,6 +3168,25 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
+ 	return ret;
+ }
+ 
++static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
++		u8 csi, struct nvme_effects_log **log)
++{
++	struct nvme_effects_log *effects, *old;
++
++	effects = kzalloc(sizeof(*effects), GFP_KERNEL);
++	if (!effects)
++		return -ENOMEM;
++
++	old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
++	if (xa_is_err(old)) {
++		kfree(effects);
++		return xa_err(old);
++	}
++
++	*log = effects;
++	return 0;
++}
++
+ static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
+ {
+ 	struct nvme_effects_log	*log = ctrl->effects;
+@@ -3210,10 +3233,9 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ 	}
+ 
+ 	if (!ctrl->effects) {
+-		ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+-		if (!ctrl->effects)
+-			return -ENOMEM;
+-		xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
++		ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
++		if (ret < 0)
++			return ret;
+ 	}
+ 
+ 	nvme_init_known_nvm_effects(ctrl);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 55abfe5e1d2548..8305d3c1280748 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -54,6 +54,8 @@ MODULE_PARM_DESC(tls_handshake_timeout,
+ 		 "nvme TLS handshake timeout in seconds (default 10)");
+ #endif
+ 
++static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /* lockdep can detect a circular dependency of the form
+  *   sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
+@@ -127,6 +129,7 @@ enum nvme_tcp_queue_flags {
+ 	NVME_TCP_Q_ALLOCATED	= 0,
+ 	NVME_TCP_Q_LIVE		= 1,
+ 	NVME_TCP_Q_POLLING	= 2,
++	NVME_TCP_Q_IO_CPU_SET	= 3,
+ };
+ 
+ enum nvme_tcp_recv_state {
+@@ -1562,23 +1565,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+ 			  ctrl->io_queues[HCTX_TYPE_POLL];
+ }
+ 
++/**
++ * Track the number of queues assigned to each cpu using a global per-cpu
++ * counter and select the least used cpu from the mq_map. Our goal is to spread
++ * different controllers I/O threads across different cpu cores.
++ *
++ * Note that the accounting is not 100% perfect, but we don't need to be, we're
++ * simply putting our best effort to select the best candidate cpu core that we
++ * find at any given point.
++ */
+ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
+ {
+ 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+-	int qid = nvme_tcp_queue_id(queue);
+-	int n = 0;
++	struct blk_mq_tag_set *set = &ctrl->tag_set;
++	int qid = nvme_tcp_queue_id(queue) - 1;
++	unsigned int *mq_map = NULL;
++	int cpu, min_queues = INT_MAX, io_cpu;
++
++	if (wq_unbound)
++		goto out;
+ 
+ 	if (nvme_tcp_default_queue(queue))
+-		n = qid - 1;
++		mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
+ 	else if (nvme_tcp_read_queue(queue))
+-		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
++		mq_map = set->map[HCTX_TYPE_READ].mq_map;
+ 	else if (nvme_tcp_poll_queue(queue))
+-		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
+-				ctrl->io_queues[HCTX_TYPE_READ] - 1;
+-	if (wq_unbound)
+-		queue->io_cpu = WORK_CPU_UNBOUND;
+-	else
+-		queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
++		mq_map = set->map[HCTX_TYPE_POLL].mq_map;
++
++	if (WARN_ON(!mq_map))
++		goto out;
++
++	/* Search for the least used cpu from the mq_map */
++	io_cpu = WORK_CPU_UNBOUND;
++	for_each_online_cpu(cpu) {
++		int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
++
++		if (mq_map[cpu] != qid)
++			continue;
++		if (num_queues < min_queues) {
++			io_cpu = cpu;
++			min_queues = num_queues;
++		}
++	}
++	if (io_cpu != WORK_CPU_UNBOUND) {
++		queue->io_cpu = io_cpu;
++		atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
++		set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
++	}
++out:
++	dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
++		qid, queue->io_cpu);
+ }
+ 
+ static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
+@@ -1722,7 +1758,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
+ 
+ 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
+ 	queue->sock->sk->sk_use_task_frag = false;
+-	nvme_tcp_set_queue_io_cpu(queue);
++	queue->io_cpu = WORK_CPU_UNBOUND;
+ 	queue->request = NULL;
+ 	queue->data_remaining = 0;
+ 	queue->ddgst_remaining = 0;
+@@ -1844,6 +1880,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ 	if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ 		return;
+ 
++	if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
++		atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
++
+ 	mutex_lock(&queue->queue_lock);
+ 	if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ 		__nvme_tcp_stop_queue(queue);
+@@ -1878,9 +1917,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
+ 	nvme_tcp_init_recv_ctx(queue);
+ 	nvme_tcp_setup_sock_ops(queue);
+ 
+-	if (idx)
++	if (idx) {
++		nvme_tcp_set_queue_io_cpu(queue);
+ 		ret = nvmf_connect_io_queue(nctrl, idx);
+-	else
++	} else
+ 		ret = nvmf_connect_admin_queue(nctrl);
+ 
+ 	if (!ret) {
+@@ -2856,6 +2896,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
+ static int __init nvme_tcp_init_module(void)
+ {
+ 	unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
++	int cpu;
+ 
+ 	BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
+ 	BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
+@@ -2873,6 +2914,9 @@ static int __init nvme_tcp_init_module(void)
+ 	if (!nvme_tcp_wq)
+ 		return -ENOMEM;
+ 
++	for_each_possible_cpu(cpu)
++		atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
++
+ 	nvmf_register_transport(&nvme_tcp_transport);
+ 	return 0;
+ }
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 546e76ac407cfd..8c80f4dc8b3fae 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -8,7 +8,6 @@
+ 
+ #define pr_fmt(fmt)	"OF: fdt: " fmt
+ 
+-#include <linux/acpi.h>
+ #include <linux/crash_dump.h>
+ #include <linux/crc32.h>
+ #include <linux/kernel.h>
+@@ -512,8 +511,6 @@ void __init early_init_fdt_scan_reserved_mem(void)
+ 			break;
+ 		memblock_reserve(base, size);
+ 	}
+-
+-	fdt_init_reserved_mem();
+ }
+ 
+ /**
+@@ -1214,14 +1211,10 @@ void __init unflatten_device_tree(void)
+ {
+ 	void *fdt = initial_boot_params;
+ 
+-	/* Don't use the bootloader provided DTB if ACPI is enabled */
+-	if (!acpi_disabled)
+-		fdt = NULL;
++	/* Save the statically-placed regions in the reserved_mem array */
++	fdt_scan_reserved_mem_reg_nodes();
+ 
+-	/*
+-	 * Populate an empty root node when ACPI is enabled or bootloader
+-	 * doesn't provide one.
+-	 */
++	/* Populate an empty root node when bootloader doesn't provide one */
+ 	if (!fdt) {
+ 		fdt = (void *) __dtb_empty_root_begin;
+ 		/* fdt_totalsize() will be used for copy size */
+diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
+index c235d6c909a16a..10698862252572 100644
+--- a/drivers/of/of_private.h
++++ b/drivers/of/of_private.h
+@@ -9,6 +9,7 @@
+  */
+ 
+ #define FDT_ALIGN_SIZE 8
++#define MAX_RESERVED_REGIONS    64
+ 
+ /**
+  * struct alias_prop - Alias property in 'aliases' node
+@@ -183,7 +184,7 @@ static inline struct device_node *__of_get_dma_parent(const struct device_node *
+ #endif
+ 
+ int fdt_scan_reserved_mem(void);
+-void fdt_init_reserved_mem(void);
++void __init fdt_scan_reserved_mem_reg_nodes(void);
+ 
+ bool of_fdt_device_is_available(const void *blob, unsigned long node);
+ 
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 46e1c3fbc7692c..45445a1600a968 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -27,7 +27,6 @@
+ 
+ #include "of_private.h"
+ 
+-#define MAX_RESERVED_REGIONS	64
+ static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
+ static int reserved_mem_count;
+ 
+@@ -51,11 +50,13 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ 			memblock_phys_free(base, size);
+ 	}
+ 
+-	kmemleak_ignore_phys(base);
++	if (!err)
++		kmemleak_ignore_phys(base);
+ 
+ 	return err;
+ }
+ 
++static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
+ /*
+  * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
+  */
+@@ -74,6 +75,9 @@ static void __init fdt_reserved_mem_save_node(unsigned long node, const char *un
+ 	rmem->base = base;
+ 	rmem->size = size;
+ 
++	/* Call the region specific initialization function */
++	fdt_init_reserved_mem_node(rmem);
++
+ 	reserved_mem_count++;
+ 	return;
+ }
+@@ -106,7 +110,6 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
+ 	phys_addr_t base, size;
+ 	int len;
+ 	const __be32 *prop;
+-	int first = 1;
+ 	bool nomap;
+ 
+ 	prop = of_get_flat_dt_prop(node, "reg", &len);
+@@ -134,10 +137,6 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
+ 			       uname, &base, (unsigned long)(size / SZ_1M));
+ 
+ 		len -= t_len;
+-		if (first) {
+-			fdt_reserved_mem_save_node(node, uname, base, size);
+-			first = 0;
+-		}
+ 	}
+ 	return 0;
+ }
+@@ -165,12 +164,82 @@ static int __init __reserved_mem_check_root(unsigned long node)
+ 	return 0;
+ }
+ 
++static void __init __rmem_check_for_overlap(void);
++
++/**
++ * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
++ * reserved memory regions.
++ *
++ * This function is used to scan through the DT and store the
++ * information for the reserved memory regions that are defined using
++ * the "reg" property. The region node number, name, base address, and
++ * size are all stored in the reserved_mem array by calling the
++ * fdt_reserved_mem_save_node() function.
++ */
++void __init fdt_scan_reserved_mem_reg_nodes(void)
++{
++	int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
++	const void *fdt = initial_boot_params;
++	phys_addr_t base, size;
++	const __be32 *prop;
++	int node, child;
++	int len;
++
++	if (!fdt)
++		return;
++
++	node = fdt_path_offset(fdt, "/reserved-memory");
++	if (node < 0) {
++		pr_info("Reserved memory: No reserved-memory node in the DT\n");
++		return;
++	}
++
++	if (__reserved_mem_check_root(node)) {
++		pr_err("Reserved memory: unsupported node format, ignoring\n");
++		return;
++	}
++
++	fdt_for_each_subnode(child, fdt, node) {
++		const char *uname;
++
++		prop = of_get_flat_dt_prop(child, "reg", &len);
++		if (!prop)
++			continue;
++		if (!of_fdt_device_is_available(fdt, child))
++			continue;
++
++		uname = fdt_get_name(fdt, child, NULL);
++		if (len && len % t_len != 0) {
++			pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
++			       uname);
++			continue;
++		}
++
++		if (len > t_len)
++			pr_warn("%s() ignores %d regions in node '%s'\n",
++				__func__, len / t_len - 1, uname);
++
++		base = dt_mem_next_cell(dt_root_addr_cells, &prop);
++		size = dt_mem_next_cell(dt_root_size_cells, &prop);
++
++		if (size)
++			fdt_reserved_mem_save_node(child, uname, base, size);
++	}
++
++	/* check for overlapping reserved regions */
++	__rmem_check_for_overlap();
++}
++
++static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
++
+ /*
+  * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
+  */
+ int __init fdt_scan_reserved_mem(void)
+ {
+ 	int node, child;
++	int dynamic_nodes_cnt = 0;
++	int dynamic_nodes[MAX_RESERVED_REGIONS];
+ 	const void *fdt = initial_boot_params;
+ 
+ 	node = fdt_path_offset(fdt, "/reserved-memory");
+@@ -192,8 +261,24 @@ int __init fdt_scan_reserved_mem(void)
+ 		uname = fdt_get_name(fdt, child, NULL);
+ 
+ 		err = __reserved_mem_reserve_reg(child, uname);
+-		if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
+-			fdt_reserved_mem_save_node(child, uname, 0, 0);
++		/*
++		 * Save the nodes for the dynamically-placed regions
++		 * into an array which will be used for allocation right
++		 * after all the statically-placed regions are reserved
++		 * or marked as no-map. This is done to avoid dynamically
++		 * allocating from one of the statically-placed regions.
++		 */
++		if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
++			dynamic_nodes[dynamic_nodes_cnt] = child;
++			dynamic_nodes_cnt++;
++		}
++	}
++	for (int i = 0; i < dynamic_nodes_cnt; i++) {
++		const char *uname;
++
++		child = dynamic_nodes[i];
++		uname = fdt_get_name(fdt, child, NULL);
++		__reserved_mem_alloc_size(child, uname);
+ 	}
+ 	return 0;
+ }
+@@ -253,8 +338,7 @@ static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
+  * __reserved_mem_alloc_size() - allocate reserved memory described by
+  *	'size', 'alignment'  and 'alloc-ranges' properties.
+  */
+-static int __init __reserved_mem_alloc_size(unsigned long node,
+-	const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
++static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
+ {
+ 	int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ 	phys_addr_t start = 0, end = 0;
+@@ -334,9 +418,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
+ 		return -ENOMEM;
+ 	}
+ 
+-	*res_base = base;
+-	*res_size = size;
+-
++	/* Save region in the reserved_mem array */
++	fdt_reserved_mem_save_node(node, uname, base, size);
+ 	return 0;
+ }
+ 
+@@ -425,48 +508,37 @@ static void __init __rmem_check_for_overlap(void)
+ }
+ 
+ /**
+- * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
++ * fdt_init_reserved_mem_node() - Initialize a reserved memory region
++ * @rmem: reserved_mem struct of the memory region to be initialized.
++ *
++ * This function is used to call the region specific initialization
++ * function for a reserved memory region.
+  */
+-void __init fdt_init_reserved_mem(void)
++static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
+ {
+-	int i;
+-
+-	/* check for overlapping reserved regions */
+-	__rmem_check_for_overlap();
+-
+-	for (i = 0; i < reserved_mem_count; i++) {
+-		struct reserved_mem *rmem = &reserved_mem[i];
+-		unsigned long node = rmem->fdt_node;
+-		int err = 0;
+-		bool nomap;
++	unsigned long node = rmem->fdt_node;
++	int err = 0;
++	bool nomap;
+ 
+-		nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
++	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+ 
+-		if (rmem->size == 0)
+-			err = __reserved_mem_alloc_size(node, rmem->name,
+-						 &rmem->base, &rmem->size);
+-		if (err == 0) {
+-			err = __reserved_mem_init_node(rmem);
+-			if (err != 0 && err != -ENOENT) {
+-				pr_info("node %s compatible matching fail\n",
+-					rmem->name);
+-				if (nomap)
+-					memblock_clear_nomap(rmem->base, rmem->size);
+-				else
+-					memblock_phys_free(rmem->base,
+-							   rmem->size);
+-			} else {
+-				phys_addr_t end = rmem->base + rmem->size - 1;
+-				bool reusable =
+-					(of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
+-
+-				pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
+-					&rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
+-					nomap ? "nomap" : "map",
+-					reusable ? "reusable" : "non-reusable",
+-					rmem->name ? rmem->name : "unknown");
+-			}
+-		}
++	err = __reserved_mem_init_node(rmem);
++	if (err != 0 && err != -ENOENT) {
++		pr_info("node %s compatible matching fail\n", rmem->name);
++		if (nomap)
++			memblock_clear_nomap(rmem->base, rmem->size);
++		else
++			memblock_phys_free(rmem->base, rmem->size);
++	} else {
++		phys_addr_t end = rmem->base + rmem->size - 1;
++		bool reusable =
++			(of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
++
++		pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
++			&rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
++			nomap ? "nomap" : "map",
++			reusable ? "reusable" : "non-reusable",
++			rmem->name ? rmem->name : "unknown");
+ 	}
+ }
+ 
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index 7bd8390f2fba5e..906a33ae717f78 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -1317,9 +1317,9 @@ static struct device_node *parse_interrupt_map(struct device_node *np,
+ 	addrcells = of_bus_n_addr_cells(np);
+ 
+ 	imap = of_get_property(np, "interrupt-map", &imaplen);
+-	imaplen /= sizeof(*imap);
+ 	if (!imap)
+ 		return NULL;
++	imaplen /= sizeof(*imap);
+ 
+ 	imap_end = imap + imaplen;
+ 
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 3aa18737470fa2..5ac209472c0cf6 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -101,11 +101,30 @@ struct opp_table *_find_opp_table(struct device *dev)
+  * representation in the OPP table and manage the clock configuration themselves
+  * in an platform specific way.
+  */
+-static bool assert_single_clk(struct opp_table *opp_table)
++static bool assert_single_clk(struct opp_table *opp_table,
++			      unsigned int __always_unused index)
+ {
+ 	return !WARN_ON(opp_table->clk_count > 1);
+ }
+ 
++/*
++ * Returns true if clock table is large enough to contain the clock index.
++ */
++static bool assert_clk_index(struct opp_table *opp_table,
++			     unsigned int index)
++{
++	return opp_table->clk_count > index;
++}
++
++/*
++ * Returns true if bandwidth table is large enough to contain the bandwidth index.
++ */
++static bool assert_bandwidth_index(struct opp_table *opp_table,
++				   unsigned int index)
++{
++	return opp_table->path_count > index;
++}
++
+ /**
+  * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
+  * @opp:	opp for which voltage has to be returned for
+@@ -499,12 +518,12 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ 		bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ 				unsigned long opp_key, unsigned long key),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ 
+ 	/* Assert that the requirement is met */
+-	if (assert && !assert(opp_table))
++	if (assert && !assert(opp_table, index))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	mutex_lock(&opp_table->lock);
+@@ -532,7 +551,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
+ 	  unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ 	  bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ 			  unsigned long opp_key, unsigned long key),
+-	  bool (*assert)(struct opp_table *opp_table))
++	  bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	struct opp_table *opp_table;
+ 	struct dev_pm_opp *opp;
+@@ -555,7 +574,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
+ static struct dev_pm_opp *_find_key_exact(struct device *dev,
+ 		unsigned long key, int index, bool available,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	/*
+ 	 * The value of key will be updated here, but will be ignored as the
+@@ -568,7 +587,7 @@ static struct dev_pm_opp *_find_key_exact(struct device *dev,
+ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
+ 		unsigned long *key, int index, bool available,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	return _opp_table_find_key(opp_table, key, index, available, read,
+ 				   _compare_ceil, assert);
+@@ -577,7 +596,7 @@ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
+ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
+ 		int index, bool available,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	return _find_key(dev, key, index, available, read, _compare_ceil,
+ 			 assert);
+@@ -586,7 +605,7 @@ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
+ static struct dev_pm_opp *_find_key_floor(struct device *dev,
+ 		unsigned long *key, int index, bool available,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	return _find_key(dev, key, index, available, read, _compare_floor,
+ 			 assert);
+@@ -647,7 +666,8 @@ struct dev_pm_opp *
+ dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ 				   u32 index, bool available)
+ {
+-	return _find_key_exact(dev, freq, index, available, _read_freq, NULL);
++	return _find_key_exact(dev, freq, index, available, _read_freq,
++			       assert_clk_index);
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
+ 
+@@ -707,7 +727,8 @@ struct dev_pm_opp *
+ dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
+ 				  u32 index)
+ {
+-	return _find_key_ceil(dev, freq, index, true, _read_freq, NULL);
++	return _find_key_ceil(dev, freq, index, true, _read_freq,
++			      assert_clk_index);
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
+ 
+@@ -760,7 +781,7 @@ struct dev_pm_opp *
+ dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
+ 				   u32 index)
+ {
+-	return _find_key_floor(dev, freq, index, true, _read_freq, NULL);
++	return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
+ 
+@@ -878,7 +899,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
+ 	unsigned long temp = *bw;
+ 	struct dev_pm_opp *opp;
+ 
+-	opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
++	opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
++			     assert_bandwidth_index);
+ 	*bw = temp;
+ 	return opp;
+ }
+@@ -909,7 +931,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ 	unsigned long temp = *bw;
+ 	struct dev_pm_opp *opp;
+ 
+-	opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
++	opp = _find_key_floor(dev, &temp, index, true, _read_bw,
++			      assert_bandwidth_index);
+ 	*bw = temp;
+ 	return opp;
+ }
+@@ -1702,7 +1725,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+ 	if (IS_ERR(opp_table))
+ 		return;
+ 
+-	if (!assert_single_clk(opp_table))
++	if (!assert_single_clk(opp_table, 0))
+ 		goto put_table;
+ 
+ 	mutex_lock(&opp_table->lock);
+@@ -2054,7 +2077,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
+ 	unsigned long tol, u_volt = data->u_volt;
+ 	int ret;
+ 
+-	if (!assert_single_clk(opp_table))
++	if (!assert_single_clk(opp_table, 0))
+ 		return -EINVAL;
+ 
+ 	new_opp = _opp_allocate(opp_table);
+@@ -2911,7 +2934,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
+ 		return r;
+ 	}
+ 
+-	if (!assert_single_clk(opp_table)) {
++	if (!assert_single_clk(opp_table, 0)) {
+ 		r = -EINVAL;
+ 		goto put_table;
+ 	}
+@@ -2987,7 +3010,7 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ 		return r;
+ 	}
+ 
+-	if (!assert_single_clk(opp_table)) {
++	if (!assert_single_clk(opp_table, 0)) {
+ 		r = -EINVAL;
+ 		goto put_table;
+ 	}
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c
+index 55c8cfef97d489..dcab0e7ace1068 100644
+--- a/drivers/opp/of.c
++++ b/drivers/opp/of.c
+@@ -959,7 +959,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ 
+ 	ret = _of_opp_alloc_required_opps(opp_table, new_opp);
+ 	if (ret)
+-		goto free_opp;
++		goto put_node;
+ 
+ 	if (!of_property_read_u32(np, "clock-latency-ns", &val))
+ 		new_opp->clock_latency_ns = val;
+@@ -1009,6 +1009,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ 
+ free_required_opps:
+ 	_of_opp_free_required_opps(opp_table, new_opp);
++put_node:
++	of_node_put(np);
+ free_opp:
+ 	_opp_free(new_opp);
+ 
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index c8d5c90aa4d45b..ad3028b755d16a 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -598,10 +598,9 @@ static int imx_pcie_attach_pd(struct device *dev)
+ 
+ static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+ {
+-	if (enable)
+-		regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+-				  IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+-
++	regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
++			   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
++			   enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ 	return 0;
+ }
+ 
+@@ -630,19 +629,20 @@ static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+ {
+ 	int offset = imx_pcie_grp_offset(imx_pcie);
+ 
+-	if (enable) {
+-		regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+-		regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
+-	}
+-
++	regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
++			   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
++			   enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
++	regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
++			   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
++			   enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
+ 	return 0;
+ }
+ 
+ static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+ {
+-	if (!enable)
+-		regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+-				IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
++	regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
++			   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
++			   enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ 	return 0;
+ }
+ 
+@@ -775,6 +775,7 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+ static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+ {
+ 	reset_control_deassert(imx_pcie->pciephy_reset);
++	reset_control_deassert(imx_pcie->apps_reset);
+ 
+ 	if (imx_pcie->drvdata->core_reset)
+ 		imx_pcie->drvdata->core_reset(imx_pcie, false);
+@@ -966,7 +967,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
+ 			goto err_clk_disable;
+ 		}
+ 
+-		ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
++		ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
++				       imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
++						PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
+ 		if (ret) {
+ 			dev_err(dev, "unable to set PCIe PHY mode\n");
+ 			goto err_phy_exit;
+@@ -1391,7 +1394,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
+ 	switch (imx_pcie->drvdata->variant) {
+ 	case IMX8MQ:
+ 	case IMX8MQ_EP:
+-	case IMX7D:
+ 		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+ 			imx_pcie->controller_id = 1;
+ 		break;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
+index 3e41865c72904e..120e2aca5164ab 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-host.c
++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
+@@ -946,6 +946,7 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+ 		return ret;
+ 	}
+ 
++	dw_pcie_stop_link(pci);
+ 	if (pci->pp.ops->deinit)
+ 		pci->pp.ops->deinit(&pci->pp);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 6483e1874477ef..4c141e05f84e9c 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1559,6 +1559,8 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+ 		pci_lock_rescan_remove();
+ 		pci_rescan_bus(pp->bridge->bus);
+ 		pci_unlock_rescan_remove();
++
++		qcom_pcie_icc_opp_update(pcie);
+ 	} else {
+ 		dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ 			      status);
+diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
+index 047e2cef5afcd5..c5e0d025bc4359 100644
+--- a/drivers/pci/controller/pcie-rcar-ep.c
++++ b/drivers/pci/controller/pcie-rcar-ep.c
+@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
+ 		}
+ 		if (!devm_request_mem_region(&pdev->dev, res->start,
+ 					     resource_size(res),
+-					     outbound_name)) {
++					     res->name)) {
+ 			dev_err(pcie->dev, "Cannot request memory region %s.\n",
+ 				outbound_name);
+ 			return -EIO;
+diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
+index 48f60a04b740ba..3fdfffdf027001 100644
+--- a/drivers/pci/controller/plda/pcie-microchip-host.c
++++ b/drivers/pci/controller/plda/pcie-microchip-host.c
+@@ -7,27 +7,31 @@
+  * Author: Daire McNamara <daire.mcnamara@microchip.com>
+  */
+ 
++#include <linux/align.h>
++#include <linux/bits.h>
+ #include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/irqdomain.h>
++#include <linux/log2.h>
+ #include <linux/module.h>
+ #include <linux/msi.h>
+ #include <linux/of_address.h>
+ #include <linux/of_pci.h>
+ #include <linux/pci-ecam.h>
+ #include <linux/platform_device.h>
++#include <linux/wordpart.h>
+ 
+ #include "../../pci.h"
+ #include "pcie-plda.h"
+ 
++#define MC_MAX_NUM_INBOUND_WINDOWS		8
++#define MPFS_NC_BOUNCE_ADDR			0x80000000
++
+ /* PCIe Bridge Phy and Controller Phy offsets */
+ #define MC_PCIE1_BRIDGE_ADDR			0x00008000u
+ #define MC_PCIE1_CTRL_ADDR			0x0000a000u
+ 
+-#define MC_PCIE_BRIDGE_ADDR			(MC_PCIE1_BRIDGE_ADDR)
+-#define MC_PCIE_CTRL_ADDR			(MC_PCIE1_CTRL_ADDR)
+-
+ /* PCIe Controller Phy Regs */
+ #define SEC_ERROR_EVENT_CNT			0x20
+ #define DED_ERROR_EVENT_CNT			0x24
+@@ -128,7 +132,6 @@
+ 	[EVENT_LOCAL_ ## x] = { __stringify(x), s }
+ 
+ #define PCIE_EVENT(x) \
+-	.base = MC_PCIE_CTRL_ADDR, \
+ 	.offset = PCIE_EVENT_INT, \
+ 	.mask_offset = PCIE_EVENT_INT, \
+ 	.mask_high = 1, \
+@@ -136,7 +139,6 @@
+ 	.enb_mask = PCIE_EVENT_INT_ENB_MASK
+ 
+ #define SEC_EVENT(x) \
+-	.base = MC_PCIE_CTRL_ADDR, \
+ 	.offset = SEC_ERROR_INT, \
+ 	.mask_offset = SEC_ERROR_INT_MASK, \
+ 	.mask = SEC_ERROR_INT_ ## x ## _INT, \
+@@ -144,7 +146,6 @@
+ 	.enb_mask = 0
+ 
+ #define DED_EVENT(x) \
+-	.base = MC_PCIE_CTRL_ADDR, \
+ 	.offset = DED_ERROR_INT, \
+ 	.mask_offset = DED_ERROR_INT_MASK, \
+ 	.mask_high = 1, \
+@@ -152,7 +153,6 @@
+ 	.enb_mask = 0
+ 
+ #define LOCAL_EVENT(x) \
+-	.base = MC_PCIE_BRIDGE_ADDR, \
+ 	.offset = ISTATUS_LOCAL, \
+ 	.mask_offset = IMASK_LOCAL, \
+ 	.mask_high = 0, \
+@@ -179,7 +179,8 @@ struct event_map {
+ 
+ struct mc_pcie {
+ 	struct plda_pcie_rp plda;
+-	void __iomem *axi_base_addr;
++	void __iomem *bridge_base_addr;
++	void __iomem *ctrl_base_addr;
+ };
+ 
+ struct cause {
+@@ -253,7 +254,6 @@ static struct event_map local_status_to_event[] = {
+ };
+ 
+ static struct {
+-	u32 base;
+ 	u32 offset;
+ 	u32 mask;
+ 	u32 shift;
+@@ -325,8 +325,7 @@ static inline u32 reg_to_event(u32 reg, struct event_map field)
+ 
+ static u32 pcie_events(struct mc_pcie *port)
+ {
+-	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+-	u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT);
++	u32 reg = readl_relaxed(port->ctrl_base_addr + PCIE_EVENT_INT);
+ 	u32 val = 0;
+ 	int i;
+ 
+@@ -338,8 +337,7 @@ static u32 pcie_events(struct mc_pcie *port)
+ 
+ static u32 sec_errors(struct mc_pcie *port)
+ {
+-	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+-	u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT);
++	u32 reg = readl_relaxed(port->ctrl_base_addr + SEC_ERROR_INT);
+ 	u32 val = 0;
+ 	int i;
+ 
+@@ -351,8 +349,7 @@ static u32 sec_errors(struct mc_pcie *port)
+ 
+ static u32 ded_errors(struct mc_pcie *port)
+ {
+-	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+-	u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT);
++	u32 reg = readl_relaxed(port->ctrl_base_addr + DED_ERROR_INT);
+ 	u32 val = 0;
+ 	int i;
+ 
+@@ -364,8 +361,7 @@ static u32 ded_errors(struct mc_pcie *port)
+ 
+ static u32 local_events(struct mc_pcie *port)
+ {
+-	void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+-	u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
++	u32 reg = readl_relaxed(port->bridge_base_addr + ISTATUS_LOCAL);
+ 	u32 val = 0;
+ 	int i;
+ 
+@@ -412,8 +408,12 @@ static void mc_ack_event_irq(struct irq_data *data)
+ 	void __iomem *addr;
+ 	u32 mask;
+ 
+-	addr = mc_port->axi_base_addr + event_descs[event].base +
+-		event_descs[event].offset;
++	if (event_descs[event].offset == ISTATUS_LOCAL)
++		addr = mc_port->bridge_base_addr;
++	else
++		addr = mc_port->ctrl_base_addr;
++
++	addr += event_descs[event].offset;
+ 	mask = event_descs[event].mask;
+ 	mask |= event_descs[event].enb_mask;
+ 
+@@ -429,8 +429,12 @@ static void mc_mask_event_irq(struct irq_data *data)
+ 	u32 mask;
+ 	u32 val;
+ 
+-	addr = mc_port->axi_base_addr + event_descs[event].base +
+-		event_descs[event].mask_offset;
++	if (event_descs[event].offset == ISTATUS_LOCAL)
++		addr = mc_port->bridge_base_addr;
++	else
++		addr = mc_port->ctrl_base_addr;
++
++	addr += event_descs[event].mask_offset;
+ 	mask = event_descs[event].mask;
+ 	if (event_descs[event].enb_mask) {
+ 		mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+@@ -460,8 +464,12 @@ static void mc_unmask_event_irq(struct irq_data *data)
+ 	u32 mask;
+ 	u32 val;
+ 
+-	addr = mc_port->axi_base_addr + event_descs[event].base +
+-		event_descs[event].mask_offset;
++	if (event_descs[event].offset == ISTATUS_LOCAL)
++		addr = mc_port->bridge_base_addr;
++	else
++		addr = mc_port->ctrl_base_addr;
++
++	addr += event_descs[event].mask_offset;
+ 	mask = event_descs[event].mask;
+ 
+ 	if (event_descs[event].enb_mask)
+@@ -554,26 +562,20 @@ static const struct plda_event mc_event = {
+ 
+ static inline void mc_clear_secs(struct mc_pcie *port)
+ {
+-	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+-
+-	writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
+-		       SEC_ERROR_INT);
+-	writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT);
++	writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
++		       port->ctrl_base_addr + SEC_ERROR_INT);
++	writel_relaxed(0, port->ctrl_base_addr + SEC_ERROR_EVENT_CNT);
+ }
+ 
+ static inline void mc_clear_deds(struct mc_pcie *port)
+ {
+-	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+-
+-	writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
+-		       DED_ERROR_INT);
+-	writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT);
++	writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
++		       port->ctrl_base_addr + DED_ERROR_INT);
++	writel_relaxed(0, port->ctrl_base_addr + DED_ERROR_EVENT_CNT);
+ }
+ 
+ static void mc_disable_interrupts(struct mc_pcie *port)
+ {
+-	void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+-	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+ 	u32 val;
+ 
+ 	/* Ensure ECC bypass is enabled */
+@@ -581,22 +583,22 @@ static void mc_disable_interrupts(struct mc_pcie *port)
+ 	      ECC_CONTROL_RX_RAM_ECC_BYPASS |
+ 	      ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
+ 	      ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
+-	writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
++	writel_relaxed(val, port->ctrl_base_addr + ECC_CONTROL);
+ 
+ 	/* Disable SEC errors and clear any outstanding */
+-	writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
+-		       SEC_ERROR_INT_MASK);
++	writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
++		       port->ctrl_base_addr + SEC_ERROR_INT_MASK);
+ 	mc_clear_secs(port);
+ 
+ 	/* Disable DED errors and clear any outstanding */
+-	writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
+-		       DED_ERROR_INT_MASK);
++	writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
++		       port->ctrl_base_addr + DED_ERROR_INT_MASK);
+ 	mc_clear_deds(port);
+ 
+ 	/* Disable local interrupts and clear any outstanding */
+-	writel_relaxed(0, bridge_base_addr + IMASK_LOCAL);
+-	writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL);
+-	writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI);
++	writel_relaxed(0, port->bridge_base_addr + IMASK_LOCAL);
++	writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_LOCAL);
++	writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_MSI);
+ 
+ 	/* Disable PCIe events and clear any outstanding */
+ 	val = PCIE_EVENT_INT_L2_EXIT_INT |
+@@ -605,11 +607,96 @@ static void mc_disable_interrupts(struct mc_pcie *port)
+ 	      PCIE_EVENT_INT_L2_EXIT_INT_MASK |
+ 	      PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
+ 	      PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
+-	writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
++	writel_relaxed(val, port->ctrl_base_addr + PCIE_EVENT_INT);
+ 
+ 	/* Disable host interrupts and clear any outstanding */
+-	writel_relaxed(0, bridge_base_addr + IMASK_HOST);
+-	writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
++	writel_relaxed(0, port->bridge_base_addr + IMASK_HOST);
++	writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
++}
++
++static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
++				      u64 axi_addr, u64 pcie_addr, u64 size)
++{
++	u32 table_offset = window_index * ATR_ENTRY_SIZE;
++	void __iomem *table_addr = port->bridge_base_addr + table_offset;
++	u32 atr_sz;
++	u32 val;
++
++	atr_sz = ilog2(size) - 1;
++
++	val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
++	val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
++	val |= ATR_IMPL_ENABLE;
++
++	writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
++
++	writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
++
++	writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
++	writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
++
++	writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
++}
++
++static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
++					struct mc_pcie *port)
++{
++	struct device *dev = &pdev->dev;
++	struct device_node *dn = dev->of_node;
++	struct of_range_parser parser;
++	struct of_range range;
++	int atr_index = 0;
++
++	/*
++	 * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
++	 * Controller FPGA logic block which contains the AXI-S interface.
++	 *
++	 * From the point of view of the PCIe Root Port, there are only two
++	 * supported Root Port configurations:
++	 *
++	 * Configuration 1: for use with fully coherent designs; supports a
++	 * window from 0x0 (CPU space) to specified PCIe space.
++	 *
++	 * Configuration 2: for use with non-coherent designs; supports two
++	 * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
++	 * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
++	 * space 0xc0000000. This cfg needs two windows because of how the
++	 * MSI space is allocated in the AXI-S range on MPFS.
++	 *
++	 * The FIC interface outside the PCIe block *must* complete the
++	 * inbound address translation as per MCHP MPFS FPGA design
++	 * guidelines.
++	 */
++	if (device_property_read_bool(dev, "dma-noncoherent")) {
++		/*
++		 * Always need same two tables in this case.  Need two tables
++		 * due to hardware interactions between address and size.
++		 */
++		mc_pcie_setup_inbound_atr(port, 0, 0,
++					  MPFS_NC_BOUNCE_ADDR, SZ_1G);
++		mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
++					  MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
++	} else {
++		/* Find any DMA ranges */
++		if (of_pci_dma_range_parser_init(&parser, dn)) {
++			/* No DMA range property - setup default */
++			mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
++			return 0;
++		}
++
++		for_each_of_range(&parser, &range) {
++			if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
++				dev_err(dev, "too many inbound ranges; %d available tables\n",
++					MC_MAX_NUM_INBOUND_WINDOWS);
++				return -EINVAL;
++			}
++			mc_pcie_setup_inbound_atr(port, atr_index, 0,
++						  range.pci_addr, range.size);
++			atr_index++;
++		}
++	}
++
++	return 0;
+ }
+ 
+ static int mc_platform_init(struct pci_config_window *cfg)
+@@ -617,12 +704,10 @@ static int mc_platform_init(struct pci_config_window *cfg)
+ 	struct device *dev = cfg->parent;
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
+-	void __iomem *bridge_base_addr =
+-		port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ 	int ret;
+ 
+ 	/* Configure address translation table 0 for PCIe config space */
+-	plda_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
++	plda_pcie_setup_window(port->bridge_base_addr, 0, cfg->res.start,
+ 			       cfg->res.start,
+ 			       resource_size(&cfg->res));
+ 
+@@ -634,6 +719,10 @@ static int mc_platform_init(struct pci_config_window *cfg)
+ 	if (ret)
+ 		return ret;
+ 
++	ret = mc_pcie_setup_inbound_ranges(pdev, port);
++	if (ret)
++		return ret;
++
+ 	port->plda.event_ops = &mc_event_ops;
+ 	port->plda.event_irq_chip = &mc_event_irq_chip;
+ 	port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
+@@ -649,7 +738,7 @@ static int mc_platform_init(struct pci_config_window *cfg)
+ static int mc_host_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+-	void __iomem *bridge_base_addr;
++	void __iomem *apb_base_addr;
+ 	struct plda_pcie_rp *plda;
+ 	int ret;
+ 	u32 val;
+@@ -661,30 +750,45 @@ static int mc_host_probe(struct platform_device *pdev)
+ 	plda = &port->plda;
+ 	plda->dev = dev;
+ 
+-	port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
+-	if (IS_ERR(port->axi_base_addr))
+-		return PTR_ERR(port->axi_base_addr);
++	port->bridge_base_addr = devm_platform_ioremap_resource_byname(pdev,
++								    "bridge");
++	port->ctrl_base_addr = devm_platform_ioremap_resource_byname(pdev,
++								    "ctrl");
++	if (!IS_ERR(port->bridge_base_addr) && !IS_ERR(port->ctrl_base_addr))
++		goto addrs_set;
++
++	/*
++	 * The original, incorrect, binding that lumped the control and
++	 * bridge addresses together still needs to be handled by the driver.
++	 */
++	apb_base_addr = devm_platform_ioremap_resource_byname(pdev, "apb");
++	if (IS_ERR(apb_base_addr))
++		return dev_err_probe(dev, PTR_ERR(apb_base_addr),
++				     "both legacy apb register and ctrl/bridge regions missing");
++
++	port->bridge_base_addr = apb_base_addr + MC_PCIE1_BRIDGE_ADDR;
++	port->ctrl_base_addr = apb_base_addr + MC_PCIE1_CTRL_ADDR;
+ 
++addrs_set:
+ 	mc_disable_interrupts(port);
+ 
+-	bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+-	plda->bridge_addr = bridge_base_addr;
++	plda->bridge_addr = port->bridge_base_addr;
+ 	plda->num_events = NUM_EVENTS;
+ 
+ 	/* Allow enabling MSI by disabling MSI-X */
+-	val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
++	val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ 	val &= ~MSIX_CAP_MASK;
+-	writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0);
++	writel(val, port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ 
+ 	/* Pick num vectors from bitfile programmed onto FPGA fabric */
+-	val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
++	val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ 	val &= NUM_MSI_MSGS_MASK;
+ 	val >>= NUM_MSI_MSGS_SHIFT;
+ 
+ 	plda->msi.num_vectors = 1 << val;
+ 
+ 	/* Pick vector address from design */
+-	plda->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
++	plda->msi.vector_phy = readl_relaxed(port->bridge_base_addr + IMSI_ADDR);
+ 
+ 	ret = mc_pcie_init_clks(dev);
+ 	if (ret) {
+diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
+index 8533dc618d45f0..4153214ca41038 100644
+--- a/drivers/pci/controller/plda/pcie-plda-host.c
++++ b/drivers/pci/controller/plda/pcie-plda-host.c
+@@ -8,11 +8,14 @@
+  * Author: Daire McNamara <daire.mcnamara@microchip.com>
+  */
+ 
++#include <linux/align.h>
++#include <linux/bitfield.h>
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/irqdomain.h>
+ #include <linux/msi.h>
+ #include <linux/pci_regs.h>
+ #include <linux/pci-ecam.h>
++#include <linux/wordpart.h>
+ 
+ #include "pcie-plda.h"
+ 
+@@ -502,8 +505,9 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ 	       ATR0_AXI4_SLV0_TRSL_PARAM);
+ 
+-	val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
+-			    ATR_IMPL_ENABLE;
++	val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
++	val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
++	val |= ATR_IMPL_ENABLE;
+ 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ 	       ATR0_AXI4_SLV0_SRCADDR_PARAM);
+ 
+@@ -518,13 +522,20 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ 	val = upper_32_bits(pci_addr);
+ 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ 	       ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
++}
++EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
++
++void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
++{
++	void __iomem *bridge_base_addr = port->bridge_addr;
++	u32 val;
+ 
+ 	val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ 	val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ 	writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ 	writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+ }
+-EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
++EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
+ 
+ int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ 			   struct plda_pcie_rp *port)
+diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
+index 0e7dc0d8e5ba11..61ece26065ea09 100644
+--- a/drivers/pci/controller/plda/pcie-plda.h
++++ b/drivers/pci/controller/plda/pcie-plda.h
+@@ -89,14 +89,15 @@
+ 
+ /* PCIe AXI slave table init defines */
+ #define ATR0_AXI4_SLV0_SRCADDR_PARAM		0x800u
+-#define  ATR_SIZE_SHIFT				1
+-#define  ATR_IMPL_ENABLE			1
++#define  ATR_SIZE_MASK				GENMASK(6, 1)
++#define  ATR_IMPL_ENABLE			BIT(0)
+ #define ATR0_AXI4_SLV0_SRC_ADDR			0x804u
+ #define ATR0_AXI4_SLV0_TRSL_ADDR_LSB		0x808u
+ #define ATR0_AXI4_SLV0_TRSL_ADDR_UDW		0x80cu
+ #define ATR0_AXI4_SLV0_TRSL_PARAM		0x810u
+ #define  PCIE_TX_RX_INTERFACE			0x00000000u
+ #define  PCIE_CONFIG_INTERFACE			0x00000001u
++#define  TRSL_ID_AXI4_MASTER_0			0x00000004u
+ 
+ #define CONFIG_SPACE_ADDR_OFFSET		0x1000u
+ 
+@@ -204,6 +205,7 @@ int plda_init_interrupts(struct platform_device *pdev,
+ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ 			    phys_addr_t axi_addr, phys_addr_t pci_addr,
+ 			    size_t size);
++void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
+ int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ 			   struct plda_pcie_rp *port);
+ int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 7c2ed6eae53ad1..14b4c68ab4e1a2 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -251,7 +251,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
+ 
+ fail_back_rx:
+ 	dma_release_channel(epf_test->dma_chan_rx);
+-	epf_test->dma_chan_tx = NULL;
++	epf_test->dma_chan_rx = NULL;
+ 
+ fail_back_tx:
+ 	dma_cap_zero(mask);
+@@ -361,8 +361,8 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
+ 
+ 	ktime_get_ts64(&start);
+ 	if (reg->flags & FLAG_USE_DMA) {
+-		if (epf_test->dma_private) {
+-			dev_err(dev, "Cannot transfer data using DMA\n");
++		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
++			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
+ 			ret = -EINVAL;
+ 			goto err_map_addr;
+ 		}
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index 62f7dff437309f..de665342dc16d0 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -856,7 +856,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
+ {
+ 	int r;
+ 
+-	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
++	r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
+ 			   epc);
+ 	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
+ }
+diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+index f4f10c60c1d23b..dcc662be080004 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+@@ -438,9 +438,9 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
+  *  - Any spurious wake up event during switch sequence to be ignored and
+  *    cleared
+  */
+-static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
++static int nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+ {
+-	int i;
++	int i, j, ret;
+ 
+ 	for (i = 0; i < NMK_MAX_BANKS; i++) {
+ 		struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+@@ -449,11 +449,21 @@ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+ 		if (!chip)
+ 			break;
+ 
+-		clk_enable(chip->clk);
++		ret = clk_enable(chip->clk);
++		if (ret) {
++			for (j = 0; j < i; j++) {
++				chip = nmk_gpio_chips[j];
++				clk_disable(chip->clk);
++			}
++
++			return ret;
++		}
+ 
+ 		slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
+ 		writel(temp, chip->addr + NMK_GPIO_SLPC);
+ 	}
++
++	return 0;
+ }
+ 
+ static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
+@@ -923,7 +933,9 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
+ 
+ 			slpm[nmk_chip->bank] &= ~BIT(bit);
+ 		}
+-		nmk_gpio_glitch_slpm_init(slpm);
++		ret = nmk_gpio_glitch_slpm_init(slpm);
++		if (ret)
++			goto out_pre_slpm_init;
+ 	}
+ 
+ 	for (i = 0; i < g->grp.npins; i++) {
+@@ -940,7 +952,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
+ 		dev_dbg(npct->dev, "setting pin %d to altsetting %d\n",
+ 			g->grp.pins[i], g->altsetting);
+ 
+-		clk_enable(nmk_chip->clk);
++		ret = clk_enable(nmk_chip->clk);
++		if (ret)
++			goto out_glitch;
++
+ 		/*
+ 		 * If the pin is switching to altfunc, and there was an
+ 		 * interrupt installed on it which has been lazy disabled,
+@@ -988,6 +1003,7 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 	struct nmk_gpio_chip *nmk_chip;
+ 	struct gpio_chip *chip;
+ 	unsigned int bit;
++	int ret;
+ 
+ 	if (!range) {
+ 		dev_err(npct->dev, "invalid range\n");
+@@ -1004,7 +1020,9 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 
+ 	find_nmk_gpio_from_pin(pin, &bit);
+ 
+-	clk_enable(nmk_chip->clk);
++	ret = clk_enable(nmk_chip->clk);
++	if (ret)
++		return ret;
+ 	/* There is no glitch when converting any pin to GPIO */
+ 	__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
+ 	clk_disable(nmk_chip->clk);
+@@ -1058,6 +1076,7 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 	unsigned long cfg;
+ 	int pull, slpm, output, val, i;
+ 	bool lowemi, gpiomode, sleep;
++	int ret;
+ 
+ 	nmk_chip = find_nmk_gpio_from_pin(pin, &bit);
+ 	if (!nmk_chip) {
+@@ -1116,7 +1135,9 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 			output ? (val ? "high" : "low") : "",
+ 			lowemi ? "on" : "off");
+ 
+-		clk_enable(nmk_chip->clk);
++		ret = clk_enable(nmk_chip->clk);
++		if (ret)
++			return ret;
+ 		if (gpiomode)
+ 			/* No glitch when going to GPIO mode */
+ 			__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 7f66ec73199a9c..a12766b3bc8a73 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -908,12 +908,13 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
+ 	return false;
+ }
+ 
+-static int amd_gpio_suspend(struct device *dev)
++static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend)
+ {
+ 	struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
+ 	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ 	unsigned long flags;
+ 	int i;
++	u32 wake_mask = is_suspend ? WAKE_SOURCE_SUSPEND : WAKE_SOURCE_HIBERNATE;
+ 
+ 	for (i = 0; i < desc->npins; i++) {
+ 		int pin = desc->pins[i].number;
+@@ -925,11 +926,11 @@ static int amd_gpio_suspend(struct device *dev)
+ 		gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+ 
+ 		/* mask any interrupts not intended to be a wake source */
+-		if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) {
++		if (!(gpio_dev->saved_regs[i] & wake_mask)) {
+ 			writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF),
+ 			       gpio_dev->base + pin * 4);
+-			pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n",
+-				  pin);
++			pm_pr_dbg("Disabling GPIO #%d interrupt for %s.\n",
++				  pin, is_suspend ? "suspend" : "hibernate");
+ 		}
+ 
+ 		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+@@ -938,6 +939,16 @@ static int amd_gpio_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
++static int amd_gpio_suspend(struct device *dev)
++{
++	return amd_gpio_suspend_hibernate_common(dev, true);
++}
++
++static int amd_gpio_hibernate(struct device *dev)
++{
++	return amd_gpio_suspend_hibernate_common(dev, false);
++}
++
+ static int amd_gpio_resume(struct device *dev)
+ {
+ 	struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
+@@ -961,8 +972,12 @@ static int amd_gpio_resume(struct device *dev)
+ }
+ 
+ static const struct dev_pm_ops amd_gpio_pm_ops = {
+-	SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend,
+-				     amd_gpio_resume)
++	.suspend_late = amd_gpio_suspend,
++	.resume_early = amd_gpio_resume,
++	.freeze_late = amd_gpio_hibernate,
++	.thaw_early = amd_gpio_resume,
++	.poweroff_late = amd_gpio_hibernate,
++	.restore_early = amd_gpio_resume,
+ };
+ #endif
+ 
+diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
+index cf59089f277639..c9522c62d7910f 100644
+--- a/drivers/pinctrl/pinctrl-amd.h
++++ b/drivers/pinctrl/pinctrl-amd.h
+@@ -80,10 +80,9 @@
+ #define FUNCTION_MASK		GENMASK(1, 0)
+ #define FUNCTION_INVALID	GENMASK(7, 0)
+ 
+-#define WAKE_SOURCE	(BIT(WAKE_CNTRL_OFF_S0I3) | \
+-			 BIT(WAKE_CNTRL_OFF_S3)   | \
+-			 BIT(WAKE_CNTRL_OFF_S4)   | \
+-			 BIT(WAKECNTRL_Z_OFF))
++#define WAKE_SOURCE_SUSPEND  (BIT(WAKE_CNTRL_OFF_S0I3) | \
++			      BIT(WAKE_CNTRL_OFF_S3))
++#define WAKE_SOURCE_HIBERNATE BIT(WAKE_CNTRL_OFF_S4)
+ 
+ struct amd_function {
+ 	const char *name;
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
+index b79c211c037496..ac6dc22b37c98e 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
+@@ -636,7 +636,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
+ 		if (clk_enable(b->drvdata->pclk)) {
+ 			dev_err(b->gpio_chip.parent,
+ 				"unable to enable clock for pending IRQs\n");
+-			return;
++			goto out;
+ 		}
+ 	}
+ 
+@@ -652,6 +652,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
+ 	if (eintd->nr_banks)
+ 		clk_disable(eintd->banks[0]->drvdata->pclk);
+ 
++out:
+ 	chained_irq_exit(chip, desc);
+ }
+ 
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index 5b7fa77c118436..03f3f707d27555 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -86,7 +86,6 @@ struct stm32_pinctrl_group {
+ 
+ struct stm32_gpio_bank {
+ 	void __iomem *base;
+-	struct clk *clk;
+ 	struct reset_control *rstc;
+ 	spinlock_t lock;
+ 	struct gpio_chip gpio_chip;
+@@ -108,6 +107,7 @@ struct stm32_pinctrl {
+ 	unsigned ngroups;
+ 	const char **grp_names;
+ 	struct stm32_gpio_bank *banks;
++	struct clk_bulk_data *clks;
+ 	unsigned nbanks;
+ 	const struct stm32_pinctrl_match_data *match_data;
+ 	struct irq_domain	*domain;
+@@ -1308,12 +1308,6 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
+ 	if (IS_ERR(bank->base))
+ 		return PTR_ERR(bank->base);
+ 
+-	err = clk_prepare_enable(bank->clk);
+-	if (err) {
+-		dev_err(dev, "failed to prepare_enable clk (%d)\n", err);
+-		return err;
+-	}
+-
+ 	bank->gpio_chip = stm32_gpio_template;
+ 
+ 	fwnode_property_read_string(fwnode, "st,bank-name", &bank->gpio_chip.label);
+@@ -1360,26 +1354,20 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
+ 							   bank->fwnode, &stm32_gpio_domain_ops,
+ 							   bank);
+ 
+-		if (!bank->domain) {
+-			err = -ENODEV;
+-			goto err_clk;
+-		}
++		if (!bank->domain)
++			return -ENODEV;
+ 	}
+ 
+ 	names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
+-	if (!names) {
+-		err = -ENOMEM;
+-		goto err_clk;
+-	}
++	if (!names)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < npins; i++) {
+ 		stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
+ 		if (stm32_pin && stm32_pin->pin.name) {
+ 			names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name);
+-			if (!names[i]) {
+-				err = -ENOMEM;
+-				goto err_clk;
+-			}
++			if (!names[i])
++				return -ENOMEM;
+ 		} else {
+ 			names[i] = NULL;
+ 		}
+@@ -1390,15 +1378,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
+ 	err = gpiochip_add_data(&bank->gpio_chip, bank);
+ 	if (err) {
+ 		dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
+-		goto err_clk;
++		return err;
+ 	}
+ 
+ 	dev_info(dev, "%s bank added\n", bank->gpio_chip.label);
+ 	return 0;
+-
+-err_clk:
+-	clk_disable_unprepare(bank->clk);
+-	return err;
+ }
+ 
+ static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pdev)
+@@ -1621,6 +1605,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
+ 	if (!pctl->banks)
+ 		return -ENOMEM;
+ 
++	pctl->clks = devm_kcalloc(dev, banks, sizeof(*pctl->clks),
++				  GFP_KERNEL);
++	if (!pctl->clks)
++		return -ENOMEM;
++
+ 	i = 0;
+ 	for_each_gpiochip_node(dev, child) {
+ 		struct stm32_gpio_bank *bank = &pctl->banks[i];
+@@ -1632,24 +1621,27 @@ int stm32_pctl_probe(struct platform_device *pdev)
+ 			return -EPROBE_DEFER;
+ 		}
+ 
+-		bank->clk = of_clk_get_by_name(np, NULL);
+-		if (IS_ERR(bank->clk)) {
++		pctl->clks[i].clk = of_clk_get_by_name(np, NULL);
++		if (IS_ERR(pctl->clks[i].clk)) {
+ 			fwnode_handle_put(child);
+-			return dev_err_probe(dev, PTR_ERR(bank->clk),
++			return dev_err_probe(dev, PTR_ERR(pctl->clks[i].clk),
+ 					     "failed to get clk\n");
+ 		}
++		pctl->clks[i].id = "pctl";
+ 		i++;
+ 	}
+ 
++	ret = clk_bulk_prepare_enable(banks, pctl->clks);
++	if (ret) {
++		dev_err(dev, "failed to prepare_enable clk (%d)\n", ret);
++		return ret;
++	}
++
+ 	for_each_gpiochip_node(dev, child) {
+ 		ret = stm32_gpiolib_register_bank(pctl, child);
+ 		if (ret) {
+ 			fwnode_handle_put(child);
+-
+-			for (i = 0; i < pctl->nbanks; i++)
+-				clk_disable_unprepare(pctl->banks[i].clk);
+-
+-			return ret;
++			goto err_register;
+ 		}
+ 
+ 		pctl->nbanks++;
+@@ -1658,6 +1650,15 @@ int stm32_pctl_probe(struct platform_device *pdev)
+ 	dev_info(dev, "Pinctrl STM32 initialized\n");
+ 
+ 	return 0;
++err_register:
++	for (i = 0; i < pctl->nbanks; i++) {
++		struct stm32_gpio_bank *bank = &pctl->banks[i];
++
++		gpiochip_remove(&bank->gpio_chip);
++	}
++
++	clk_bulk_disable_unprepare(banks, pctl->clks);
++	return ret;
+ }
+ 
+ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
+@@ -1726,10 +1727,8 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
+ int __maybe_unused stm32_pinctrl_suspend(struct device *dev)
+ {
+ 	struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
+-	int i;
+ 
+-	for (i = 0; i < pctl->nbanks; i++)
+-		clk_disable(pctl->banks[i].clk);
++	clk_bulk_disable(pctl->nbanks, pctl->clks);
+ 
+ 	return 0;
+ }
+@@ -1738,10 +1737,11 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
+ {
+ 	struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
+ 	struct stm32_pinctrl_group *g = pctl->groups;
+-	int i;
++	int i, ret;
+ 
+-	for (i = 0; i < pctl->nbanks; i++)
+-		clk_enable(pctl->banks[i].clk);
++	ret = clk_bulk_enable(pctl->nbanks, pctl->clks);
++	if (ret)
++		return ret;
+ 
+ 	for (i = 0; i < pctl->ngroups; i++, g++)
+ 		stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 9d18dfca6a673b..9ff7b487dc4892 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -1168,7 +1168,7 @@ static int mlxbf_pmc_program_l3_counter(unsigned int blk_num, u32 cnt_num, u32 e
+ /* Method to handle crspace counter programming */
+ static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
+ {
+-	void *addr;
++	void __iomem *addr;
+ 	u32 word;
+ 	int ret;
+ 
+@@ -1192,7 +1192,7 @@ static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num,
+ /* Method to clear crspace counter value */
+ static int mlxbf_pmc_clear_crspace_counter(unsigned int blk_num, u32 cnt_num)
+ {
+-	void *addr;
++	void __iomem *addr;
+ 
+ 	addr = pmc->block[blk_num].mmio_base +
+ 		MLXBF_PMC_CRSPACE_PERFMON_VAL0(pmc->block[blk_num].counters) +
+@@ -1405,7 +1405,7 @@ static int mlxbf_pmc_read_l3_event(unsigned int blk_num, u32 cnt_num, u64 *resul
+ static int mlxbf_pmc_read_crspace_event(unsigned int blk_num, u32 cnt_num, u64 *result)
+ {
+ 	u32 word, evt;
+-	void *addr;
++	void __iomem *addr;
+ 	int ret;
+ 
+ 	addr = pmc->block[blk_num].mmio_base +
+diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
+index ae087f1471c174..a60efbaf4817fe 100644
+--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
++++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
+@@ -601,7 +601,7 @@ static const struct regulator_init_data lenovo_yoga_tab2_1380_bq24190_vbus_init_
+ 	.num_consumer_supplies = 1,
+ };
+ 
+-struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
++static struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
+ 	.regulator_init_data = &lenovo_yoga_tab2_1380_bq24190_vbus_init_data,
+ };
+ 
+@@ -726,7 +726,7 @@ static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initcon
+ 	},
+ };
+ 
+-const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
++static const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
+ 	"bq24190_charger",            /* For the Vbus regulator for lc824206xa */
+ 	NULL
+ };
+diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
+index 791fdc9326dd60..93e662912b5313 100644
+--- a/drivers/pps/clients/pps-gpio.c
++++ b/drivers/pps/clients/pps-gpio.c
+@@ -214,8 +214,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n",
+-		 data->irq);
++	dev_dbg(&data->pps->dev, "Registered IRQ %d as PPS source\n",
++		data->irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
+index d33106bd7a290f..2f465549b843f7 100644
+--- a/drivers/pps/clients/pps-ktimer.c
++++ b/drivers/pps/clients/pps-ktimer.c
+@@ -56,7 +56,7 @@ static struct pps_source_info pps_ktimer_info = {
+ 
+ static void __exit pps_ktimer_exit(void)
+ {
+-	dev_info(pps->dev, "ktimer PPS source unregistered\n");
++	dev_dbg(&pps->dev, "ktimer PPS source unregistered\n");
+ 
+ 	del_timer_sync(&ktimer);
+ 	pps_unregister_source(pps);
+@@ -74,7 +74,7 @@ static int __init pps_ktimer_init(void)
+ 	timer_setup(&ktimer, pps_ktimer_event, 0);
+ 	mod_timer(&ktimer, jiffies + HZ);
+ 
+-	dev_info(pps->dev, "ktimer PPS source registered\n");
++	dev_dbg(&pps->dev, "ktimer PPS source registered\n");
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
+index 443d6bae19d14d..fa5660f3c4b707 100644
+--- a/drivers/pps/clients/pps-ldisc.c
++++ b/drivers/pps/clients/pps-ldisc.c
+@@ -32,7 +32,7 @@ static void pps_tty_dcd_change(struct tty_struct *tty, bool active)
+ 	pps_event(pps, &ts, active ? PPS_CAPTUREASSERT :
+ 			PPS_CAPTURECLEAR, NULL);
+ 
+-	dev_dbg(pps->dev, "PPS %s at %lu\n",
++	dev_dbg(&pps->dev, "PPS %s at %lu\n",
+ 			active ? "assert" : "clear", jiffies);
+ }
+ 
+@@ -69,7 +69,7 @@ static int pps_tty_open(struct tty_struct *tty)
+ 		goto err_unregister;
+ 	}
+ 
+-	dev_info(pps->dev, "source \"%s\" added\n", info.path);
++	dev_dbg(&pps->dev, "source \"%s\" added\n", info.path);
+ 
+ 	return 0;
+ 
+@@ -89,7 +89,7 @@ static void pps_tty_close(struct tty_struct *tty)
+ 	if (WARN_ON(!pps))
+ 		return;
+ 
+-	dev_info(pps->dev, "removed\n");
++	dev_info(&pps->dev, "removed\n");
+ 	pps_unregister_source(pps);
+ }
+ 
+diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
+index abaffb4e1c1ce9..24db06750297d5 100644
+--- a/drivers/pps/clients/pps_parport.c
++++ b/drivers/pps/clients/pps_parport.c
+@@ -81,7 +81,7 @@ static void parport_irq(void *handle)
+ 	/* check the signal (no signal means the pulse is lost this time) */
+ 	if (!signal_is_set(port)) {
+ 		local_irq_restore(flags);
+-		dev_err(dev->pps->dev, "lost the signal\n");
++		dev_err(&dev->pps->dev, "lost the signal\n");
+ 		goto out_assert;
+ 	}
+ 
+@@ -98,7 +98,7 @@ static void parport_irq(void *handle)
+ 	/* timeout */
+ 	dev->cw_err++;
+ 	if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
+-		dev_err(dev->pps->dev, "disabled clear edge capture after %d"
++		dev_err(&dev->pps->dev, "disabled clear edge capture after %d"
+ 				" timeouts\n", dev->cw_err);
+ 		dev->cw = 0;
+ 		dev->cw_err = 0;
+diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
+index d9d566f70ed199..92d1b62ea239d7 100644
+--- a/drivers/pps/kapi.c
++++ b/drivers/pps/kapi.c
+@@ -41,7 +41,7 @@ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset)
+ static void pps_echo_client_default(struct pps_device *pps, int event,
+ 		void *data)
+ {
+-	dev_info(pps->dev, "echo %s %s\n",
++	dev_info(&pps->dev, "echo %s %s\n",
+ 		event & PPS_CAPTUREASSERT ? "assert" : "",
+ 		event & PPS_CAPTURECLEAR ? "clear" : "");
+ }
+@@ -112,7 +112,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
+ 		goto kfree_pps;
+ 	}
+ 
+-	dev_info(pps->dev, "new PPS source %s\n", info->name);
++	dev_dbg(&pps->dev, "new PPS source %s\n", info->name);
+ 
+ 	return pps;
+ 
+@@ -166,7 +166,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+ 	/* check event type */
+ 	BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
+ 
+-	dev_dbg(pps->dev, "PPS event at %lld.%09ld\n",
++	dev_dbg(&pps->dev, "PPS event at %lld.%09ld\n",
+ 			(s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
+ 
+ 	timespec_to_pps_ktime(&ts_real, ts->ts_real);
+@@ -188,7 +188,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+ 		/* Save the time stamp */
+ 		pps->assert_tu = ts_real;
+ 		pps->assert_sequence++;
+-		dev_dbg(pps->dev, "capture assert seq #%u\n",
++		dev_dbg(&pps->dev, "capture assert seq #%u\n",
+ 			pps->assert_sequence);
+ 
+ 		captured = ~0;
+@@ -202,7 +202,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+ 		/* Save the time stamp */
+ 		pps->clear_tu = ts_real;
+ 		pps->clear_sequence++;
+-		dev_dbg(pps->dev, "capture clear seq #%u\n",
++		dev_dbg(&pps->dev, "capture clear seq #%u\n",
+ 			pps->clear_sequence);
+ 
+ 		captured = ~0;
+diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
+index 50dc59af45be24..fbd23295afd7d9 100644
+--- a/drivers/pps/kc.c
++++ b/drivers/pps/kc.c
+@@ -43,11 +43,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
+ 			pps_kc_hardpps_mode = 0;
+ 			pps_kc_hardpps_dev = NULL;
+ 			spin_unlock_irq(&pps_kc_hardpps_lock);
+-			dev_info(pps->dev, "unbound kernel"
++			dev_info(&pps->dev, "unbound kernel"
+ 					" consumer\n");
+ 		} else {
+ 			spin_unlock_irq(&pps_kc_hardpps_lock);
+-			dev_err(pps->dev, "selected kernel consumer"
++			dev_err(&pps->dev, "selected kernel consumer"
+ 					" is not bound\n");
+ 			return -EINVAL;
+ 		}
+@@ -57,11 +57,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
+ 			pps_kc_hardpps_mode = bind_args->edge;
+ 			pps_kc_hardpps_dev = pps;
+ 			spin_unlock_irq(&pps_kc_hardpps_lock);
+-			dev_info(pps->dev, "bound kernel consumer: "
++			dev_info(&pps->dev, "bound kernel consumer: "
+ 				"edge=0x%x\n", bind_args->edge);
+ 		} else {
+ 			spin_unlock_irq(&pps_kc_hardpps_lock);
+-			dev_err(pps->dev, "another kernel consumer"
++			dev_err(&pps->dev, "another kernel consumer"
+ 					" is already bound\n");
+ 			return -EINVAL;
+ 		}
+@@ -83,7 +83,7 @@ void pps_kc_remove(struct pps_device *pps)
+ 		pps_kc_hardpps_mode = 0;
+ 		pps_kc_hardpps_dev = NULL;
+ 		spin_unlock_irq(&pps_kc_hardpps_lock);
+-		dev_info(pps->dev, "unbound kernel consumer"
++		dev_info(&pps->dev, "unbound kernel consumer"
+ 				" on device removal\n");
+ 	} else
+ 		spin_unlock_irq(&pps_kc_hardpps_lock);
+diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
+index 25d47907db175e..6a02245ea35fec 100644
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -25,7 +25,7 @@
+  * Local variables
+  */
+ 
+-static dev_t pps_devt;
++static int pps_major;
+ static struct class *pps_class;
+ 
+ static DEFINE_MUTEX(pps_idr_lock);
+@@ -62,7 +62,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
+ 	else {
+ 		unsigned long ticks;
+ 
+-		dev_dbg(pps->dev, "timeout %lld.%09d\n",
++		dev_dbg(&pps->dev, "timeout %lld.%09d\n",
+ 				(long long) fdata->timeout.sec,
+ 				fdata->timeout.nsec);
+ 		ticks = fdata->timeout.sec * HZ;
+@@ -80,7 +80,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
+ 
+ 	/* Check for pending signals */
+ 	if (err == -ERESTARTSYS) {
+-		dev_dbg(pps->dev, "pending signal caught\n");
++		dev_dbg(&pps->dev, "pending signal caught\n");
+ 		return -EINTR;
+ 	}
+ 
+@@ -98,7 +98,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 
+ 	switch (cmd) {
+ 	case PPS_GETPARAMS:
+-		dev_dbg(pps->dev, "PPS_GETPARAMS\n");
++		dev_dbg(&pps->dev, "PPS_GETPARAMS\n");
+ 
+ 		spin_lock_irq(&pps->lock);
+ 
+@@ -114,7 +114,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 		break;
+ 
+ 	case PPS_SETPARAMS:
+-		dev_dbg(pps->dev, "PPS_SETPARAMS\n");
++		dev_dbg(&pps->dev, "PPS_SETPARAMS\n");
+ 
+ 		/* Check the capabilities */
+ 		if (!capable(CAP_SYS_TIME))
+@@ -124,14 +124,14 @@ static long pps_cdev_ioctl(struct file *file,
+ 		if (err)
+ 			return -EFAULT;
+ 		if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
+-			dev_dbg(pps->dev, "capture mode unspecified (%x)\n",
++			dev_dbg(&pps->dev, "capture mode unspecified (%x)\n",
+ 								params.mode);
+ 			return -EINVAL;
+ 		}
+ 
+ 		/* Check for supported capabilities */
+ 		if ((params.mode & ~pps->info.mode) != 0) {
+-			dev_dbg(pps->dev, "unsupported capabilities (%x)\n",
++			dev_dbg(&pps->dev, "unsupported capabilities (%x)\n",
+ 								params.mode);
+ 			return -EINVAL;
+ 		}
+@@ -144,7 +144,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 		/* Restore the read only parameters */
+ 		if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
+ 			/* section 3.3 of RFC 2783 interpreted */
+-			dev_dbg(pps->dev, "time format unspecified (%x)\n",
++			dev_dbg(&pps->dev, "time format unspecified (%x)\n",
+ 								params.mode);
+ 			pps->params.mode |= PPS_TSFMT_TSPEC;
+ 		}
+@@ -165,7 +165,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 		break;
+ 
+ 	case PPS_GETCAP:
+-		dev_dbg(pps->dev, "PPS_GETCAP\n");
++		dev_dbg(&pps->dev, "PPS_GETCAP\n");
+ 
+ 		err = put_user(pps->info.mode, iuarg);
+ 		if (err)
+@@ -176,7 +176,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 	case PPS_FETCH: {
+ 		struct pps_fdata fdata;
+ 
+-		dev_dbg(pps->dev, "PPS_FETCH\n");
++		dev_dbg(&pps->dev, "PPS_FETCH\n");
+ 
+ 		err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
+ 		if (err)
+@@ -206,7 +206,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 	case PPS_KC_BIND: {
+ 		struct pps_bind_args bind_args;
+ 
+-		dev_dbg(pps->dev, "PPS_KC_BIND\n");
++		dev_dbg(&pps->dev, "PPS_KC_BIND\n");
+ 
+ 		/* Check the capabilities */
+ 		if (!capable(CAP_SYS_TIME))
+@@ -218,7 +218,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 
+ 		/* Check for supported capabilities */
+ 		if ((bind_args.edge & ~pps->info.mode) != 0) {
+-			dev_err(pps->dev, "unsupported capabilities (%x)\n",
++			dev_err(&pps->dev, "unsupported capabilities (%x)\n",
+ 					bind_args.edge);
+ 			return -EINVAL;
+ 		}
+@@ -227,7 +227,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 		if (bind_args.tsformat != PPS_TSFMT_TSPEC ||
+ 				(bind_args.edge & ~PPS_CAPTUREBOTH) != 0 ||
+ 				bind_args.consumer != PPS_KC_HARDPPS) {
+-			dev_err(pps->dev, "invalid kernel consumer bind"
++			dev_err(&pps->dev, "invalid kernel consumer bind"
+ 					" parameters (%x)\n", bind_args.edge);
+ 			return -EINVAL;
+ 		}
+@@ -259,7 +259,7 @@ static long pps_cdev_compat_ioctl(struct file *file,
+ 		struct pps_fdata fdata;
+ 		int err;
+ 
+-		dev_dbg(pps->dev, "PPS_FETCH\n");
++		dev_dbg(&pps->dev, "PPS_FETCH\n");
+ 
+ 		err = copy_from_user(&compat, uarg, sizeof(struct pps_fdata_compat));
+ 		if (err)
+@@ -296,20 +296,36 @@ static long pps_cdev_compat_ioctl(struct file *file,
+ #define pps_cdev_compat_ioctl	NULL
+ #endif
+ 
++static struct pps_device *pps_idr_get(unsigned long id)
++{
++	struct pps_device *pps;
++
++	mutex_lock(&pps_idr_lock);
++	pps = idr_find(&pps_idr, id);
++	if (pps)
++		get_device(&pps->dev);
++
++	mutex_unlock(&pps_idr_lock);
++	return pps;
++}
++
+ static int pps_cdev_open(struct inode *inode, struct file *file)
+ {
+-	struct pps_device *pps = container_of(inode->i_cdev,
+-						struct pps_device, cdev);
++	struct pps_device *pps = pps_idr_get(iminor(inode));
++
++	if (!pps)
++		return -ENODEV;
++
+ 	file->private_data = pps;
+-	kobject_get(&pps->dev->kobj);
+ 	return 0;
+ }
+ 
+ static int pps_cdev_release(struct inode *inode, struct file *file)
+ {
+-	struct pps_device *pps = container_of(inode->i_cdev,
+-						struct pps_device, cdev);
+-	kobject_put(&pps->dev->kobj);
++	struct pps_device *pps = file->private_data;
++
++	WARN_ON(pps->id != iminor(inode));
++	put_device(&pps->dev);
+ 	return 0;
+ }
+ 
+@@ -331,22 +347,13 @@ static void pps_device_destruct(struct device *dev)
+ {
+ 	struct pps_device *pps = dev_get_drvdata(dev);
+ 
+-	cdev_del(&pps->cdev);
+-
+-	/* Now we can release the ID for re-use */
+ 	pr_debug("deallocating pps%d\n", pps->id);
+-	mutex_lock(&pps_idr_lock);
+-	idr_remove(&pps_idr, pps->id);
+-	mutex_unlock(&pps_idr_lock);
+-
+-	kfree(dev);
+ 	kfree(pps);
+ }
+ 
+ int pps_register_cdev(struct pps_device *pps)
+ {
+ 	int err;
+-	dev_t devt;
+ 
+ 	mutex_lock(&pps_idr_lock);
+ 	/*
+@@ -363,40 +370,29 @@ int pps_register_cdev(struct pps_device *pps)
+ 		goto out_unlock;
+ 	}
+ 	pps->id = err;
+-	mutex_unlock(&pps_idr_lock);
+-
+-	devt = MKDEV(MAJOR(pps_devt), pps->id);
+-
+-	cdev_init(&pps->cdev, &pps_cdev_fops);
+-	pps->cdev.owner = pps->info.owner;
+ 
+-	err = cdev_add(&pps->cdev, devt, 1);
+-	if (err) {
+-		pr_err("%s: failed to add char device %d:%d\n",
+-				pps->info.name, MAJOR(pps_devt), pps->id);
++	pps->dev.class = pps_class;
++	pps->dev.parent = pps->info.dev;
++	pps->dev.devt = MKDEV(pps_major, pps->id);
++	dev_set_drvdata(&pps->dev, pps);
++	dev_set_name(&pps->dev, "pps%d", pps->id);
++	err = device_register(&pps->dev);
++	if (err)
+ 		goto free_idr;
+-	}
+-	pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
+-							"pps%d", pps->id);
+-	if (IS_ERR(pps->dev)) {
+-		err = PTR_ERR(pps->dev);
+-		goto del_cdev;
+-	}
+ 
+ 	/* Override the release function with our own */
+-	pps->dev->release = pps_device_destruct;
++	pps->dev.release = pps_device_destruct;
+ 
+-	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
+-			MAJOR(pps_devt), pps->id);
++	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major,
++		 pps->id);
+ 
++	get_device(&pps->dev);
++	mutex_unlock(&pps_idr_lock);
+ 	return 0;
+ 
+-del_cdev:
+-	cdev_del(&pps->cdev);
+-
+ free_idr:
+-	mutex_lock(&pps_idr_lock);
+ 	idr_remove(&pps_idr, pps->id);
++	put_device(&pps->dev);
+ out_unlock:
+ 	mutex_unlock(&pps_idr_lock);
+ 	return err;
+@@ -406,7 +402,13 @@ void pps_unregister_cdev(struct pps_device *pps)
+ {
+ 	pr_debug("unregistering pps%d\n", pps->id);
+ 	pps->lookup_cookie = NULL;
+-	device_destroy(pps_class, pps->dev->devt);
++	device_destroy(pps_class, pps->dev.devt);
++
++	/* Now we can release the ID for re-use */
++	mutex_lock(&pps_idr_lock);
++	idr_remove(&pps_idr, pps->id);
++	put_device(&pps->dev);
++	mutex_unlock(&pps_idr_lock);
+ }
+ 
+ /*
+@@ -426,6 +428,11 @@ void pps_unregister_cdev(struct pps_device *pps)
+  * so that it will not be used again, even if the pps device cannot
+  * be removed from the idr due to pending references holding the minor
+  * number in use.
++ *
++ * Since pps_idr holds a reference to the device, the returned
++ * pps_device is guaranteed to be valid until pps_unregister_cdev() is
++ * called on it. But after calling pps_unregister_cdev(), it may be
++ * freed at any time.
+  */
+ struct pps_device *pps_lookup_dev(void const *cookie)
+ {
+@@ -448,13 +455,11 @@ EXPORT_SYMBOL(pps_lookup_dev);
+ static void __exit pps_exit(void)
+ {
+ 	class_destroy(pps_class);
+-	unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES);
++	__unregister_chrdev(pps_major, 0, PPS_MAX_SOURCES, "pps");
+ }
+ 
+ static int __init pps_init(void)
+ {
+-	int err;
+-
+ 	pps_class = class_create("pps");
+ 	if (IS_ERR(pps_class)) {
+ 		pr_err("failed to allocate class\n");
+@@ -462,8 +467,9 @@ static int __init pps_init(void)
+ 	}
+ 	pps_class->dev_groups = pps_groups;
+ 
+-	err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
+-	if (err < 0) {
++	pps_major = __register_chrdev(0, 0, PPS_MAX_SOURCES, "pps",
++				      &pps_cdev_fops);
++	if (pps_major < 0) {
+ 		pr_err("failed to allocate char device region\n");
+ 		goto remove_class;
+ 	}
+@@ -476,8 +482,7 @@ static int __init pps_init(void)
+ 
+ remove_class:
+ 	class_destroy(pps_class);
+-
+-	return err;
++	return pps_major;
+ }
+ 
+ subsys_initcall(pps_init);
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index ea96a14d72d141..bf6468c56419c5 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -4,6 +4,7 @@
+  *
+  * Copyright (C) 2010 OMICRON electronics GmbH
+  */
++#include <linux/compat.h>
+ #include <linux/module.h>
+ #include <linux/posix-clock.h>
+ #include <linux/poll.h>
+@@ -176,6 +177,9 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
+ 	struct timespec64 ts;
+ 	int enable, err = 0;
+ 
++	if (in_compat_syscall() && cmd != PTP_ENABLE_PPS && cmd != PTP_ENABLE_PPS2)
++		arg = (unsigned long)compat_ptr(arg);
++
+ 	tsevq = pccontext->private_clkdata;
+ 
+ 	switch (cmd) {
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 5feecaadde8e05..120db96d9e95d6 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -4420,7 +4420,7 @@ ptp_ocp_complete(struct ptp_ocp *bp)
+ 
+ 	pps = pps_lookup_dev(bp->ptp);
+ 	if (pps)
+-		ptp_ocp_symlink(bp, pps->dev, "pps");
++		ptp_ocp_symlink(bp, &pps->dev, "pps");
+ 
+ 	ptp_ocp_debugfs_add_device(bp);
+ 
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index 210368099a0642..174939359ae3eb 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -6,7 +6,7 @@
+  * Copyright (C) 2011-2012 Avionic Design GmbH
+  */
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE PWM
++#define DEFAULT_SYMBOL_NAMESPACE "PWM"
+ 
+ #include <linux/acpi.h>
+ #include <linux/module.h>
+diff --git a/drivers/pwm/pwm-dwc-core.c b/drivers/pwm/pwm-dwc-core.c
+index c8425493b95d85..6dabec93a3c641 100644
+--- a/drivers/pwm/pwm-dwc-core.c
++++ b/drivers/pwm/pwm-dwc-core.c
+@@ -9,7 +9,7 @@
+  * Author: Raymond Tan <raymond.tan@intel.com>
+  */
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE dwc_pwm
++#define DEFAULT_SYMBOL_NAMESPACE "dwc_pwm"
+ 
+ #include <linux/bitops.h>
+ #include <linux/export.h>
+diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
+index 867e2bc8c601c8..3b99feb3bb4918 100644
+--- a/drivers/pwm/pwm-lpss.c
++++ b/drivers/pwm/pwm-lpss.c
+@@ -19,7 +19,7 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/time.h>
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE PWM_LPSS
++#define DEFAULT_SYMBOL_NAMESPACE "PWM_LPSS"
+ 
+ #include "pwm-lpss.h"
+ 
+diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
+index 989731256f5030..5832dce8ed9d58 100644
+--- a/drivers/pwm/pwm-stm32-lp.c
++++ b/drivers/pwm/pwm-stm32-lp.c
+@@ -167,8 +167,12 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
+ 	regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
+ 	state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
+ 	/* Keep PWM counter clock refcount in sync with PWM initial state */
+-	if (state->enabled)
+-		clk_enable(priv->clk);
++	if (state->enabled) {
++		int ret = clk_enable(priv->clk);
++
++		if (ret)
++			return ret;
++	}
+ 
+ 	regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val);
+ 	presc = FIELD_GET(STM32_LPTIM_PRESC, val);
+diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
+index eb24054f972973..4f231f8aae7d4c 100644
+--- a/drivers/pwm/pwm-stm32.c
++++ b/drivers/pwm/pwm-stm32.c
+@@ -688,8 +688,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+ 	chip->ops = &stm32pwm_ops;
+ 
+ 	/* Initialize clock refcount to number of enabled PWM channels. */
+-	for (i = 0; i < num_enabled; i++)
+-		clk_enable(priv->clk);
++	for (i = 0; i < num_enabled; i++) {
++		ret = clk_enable(priv->clk);
++		if (ret)
++			return ret;
++	}
+ 
+ 	ret = devm_pwmchip_add(dev, chip);
+ 	if (ret < 0)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 1179766811f583..4bb2652740d001 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -4946,7 +4946,7 @@ int _regulator_bulk_get(struct device *dev, int num_consumers,
+ 						       consumers[i].supply, get_type);
+ 		if (IS_ERR(consumers[i].consumer)) {
+ 			ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
+-					    "Failed to get supply '%s'",
++					    "Failed to get supply '%s'\n",
+ 					    consumers[i].supply);
+ 			consumers[i].consumer = NULL;
+ 			goto err;
+diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
+index 3f490d81abc28f..deab0b95b6637d 100644
+--- a/drivers/regulator/of_regulator.c
++++ b/drivers/regulator/of_regulator.c
+@@ -446,7 +446,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
+ 					"failed to parse DT for regulator %pOFn\n",
+ 					child);
+ 				of_node_put(child);
+-				return -EINVAL;
++				goto err_put;
+ 			}
+ 			match->of_node = of_node_get(child);
+ 			count++;
+@@ -455,6 +455,18 @@ int of_regulator_match(struct device *dev, struct device_node *node,
+ 	}
+ 
+ 	return count;
++
++err_put:
++	for (i = 0; i < num_matches; i++) {
++		struct of_regulator_match *match = &matches[i];
++
++		match->init_data = NULL;
++		if (match->of_node) {
++			of_node_put(match->of_node);
++			match->of_node = NULL;
++		}
++	}
++	return -EINVAL;
+ }
+ EXPORT_SYMBOL_GPL(of_regulator_match);
+ 
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index e744c07507eede..f98a11d4cf2920 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -1326,6 +1326,11 @@ static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_clus
+ 	return ret;
+ }
+ 
++static const struct of_device_id scp_core_match[] = {
++	{ .compatible = "mediatek,scp-core" },
++	{}
++};
++
+ static int scp_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -1357,13 +1362,15 @@ static int scp_probe(struct platform_device *pdev)
+ 	INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
+ 	mutex_init(&scp_cluster->cluster_lock);
+ 
+-	ret = devm_of_platform_populate(dev);
++	ret = of_platform_populate(dev_of_node(dev), scp_core_match, NULL, dev);
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
+ 
+ 	ret = scp_cluster_init(pdev, scp_cluster);
+-	if (ret)
++	if (ret) {
++		of_platform_depopulate(dev);
+ 		return ret;
++	}
+ 
+ 	return 0;
+ }
+@@ -1379,6 +1386,7 @@ static void scp_remove(struct platform_device *pdev)
+ 		rproc_del(scp->rproc);
+ 		scp_free(scp);
+ 	}
++	of_platform_depopulate(&pdev->dev);
+ 	mutex_destroy(&scp_cluster->cluster_lock);
+ }
+ 
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index f276956f2c5cec..ef6febe3563307 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -2486,6 +2486,13 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
+ 	rproc->dev.driver_data = rproc;
+ 	idr_init(&rproc->notifyids);
+ 
++	/* Assign a unique device index and name */
++	rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
++	if (rproc->index < 0) {
++		dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
++		goto put_device;
++	}
++
+ 	rproc->name = kstrdup_const(name, GFP_KERNEL);
+ 	if (!rproc->name)
+ 		goto put_device;
+@@ -2496,13 +2503,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
+ 	if (rproc_alloc_ops(rproc, ops))
+ 		goto put_device;
+ 
+-	/* Assign a unique device index and name */
+-	rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
+-	if (rproc->index < 0) {
+-		dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
+-		goto put_device;
+-	}
+-
+ 	dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
+ 
+ 	atomic_set(&rproc->power, 0);
+diff --git a/drivers/rtc/rtc-loongson.c b/drivers/rtc/rtc-loongson.c
+index e8ffc1ab90b02f..90e9d97a86b487 100644
+--- a/drivers/rtc/rtc-loongson.c
++++ b/drivers/rtc/rtc-loongson.c
+@@ -114,6 +114,13 @@ static irqreturn_t loongson_rtc_isr(int irq, void *id)
+ 	struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
+ 
+ 	rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF);
++
++	/*
++	 * The TOY_MATCH0_REG should be cleared 0 here,
++	 * otherwise the interrupt cannot be cleared.
++	 */
++	regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -131,11 +138,7 @@ static u32 loongson_rtc_handler(void *id)
+ 	writel(RTC_STS, priv->pm_base + PM1_STS_REG);
+ 	spin_unlock(&priv->lock);
+ 
+-	/*
+-	 * The TOY_MATCH0_REG should be cleared 0 here,
+-	 * otherwise the interrupt cannot be cleared.
+-	 */
+-	return regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
++	return ACPI_INTERRUPT_HANDLED;
+ }
+ 
+ static int loongson_rtc_set_enabled(struct device *dev)
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
+index fdbc07f14036af..905986c616559b 100644
+--- a/drivers/rtc/rtc-pcf85063.c
++++ b/drivers/rtc/rtc-pcf85063.c
+@@ -322,7 +322,16 @@ static const struct rtc_class_ops pcf85063_rtc_ops = {
+ static int pcf85063_nvmem_read(void *priv, unsigned int offset,
+ 			       void *val, size_t bytes)
+ {
+-	return regmap_read(priv, PCF85063_REG_RAM, val);
++	unsigned int tmp;
++	int ret;
++
++	ret = regmap_read(priv, PCF85063_REG_RAM, &tmp);
++	if (ret < 0)
++		return ret;
++
++	*(u8 *)val = tmp;
++
++	return 0;
+ }
+ 
+ static int pcf85063_nvmem_write(void *priv, unsigned int offset,
+diff --git a/drivers/rtc/rtc-tps6594.c b/drivers/rtc/rtc-tps6594.c
+index e696676341378e..7c6246e3f02923 100644
+--- a/drivers/rtc/rtc-tps6594.c
++++ b/drivers/rtc/rtc-tps6594.c
+@@ -37,7 +37,7 @@
+ #define MAX_OFFSET (277774)
+ 
+ // Number of ticks per hour
+-#define TICKS_PER_HOUR (32768 * 3600)
++#define TICKS_PER_HOUR (32768 * 3600LL)
+ 
+ // Multiplier for ppb conversions
+ #define PPB_MULT NANO
+diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
+index fbffd451031fdb..45bd001206a2b8 100644
+--- a/drivers/s390/char/sclp.c
++++ b/drivers/s390/char/sclp.c
+@@ -245,7 +245,6 @@ static void sclp_request_timeout(bool force_restart);
+ static void sclp_process_queue(void);
+ static void __sclp_make_read_req(void);
+ static int sclp_init_mask(int calculate);
+-static int sclp_init(void);
+ 
+ static void
+ __sclp_queue_read_req(void)
+@@ -1251,8 +1250,7 @@ static struct platform_driver sclp_pdrv = {
+ 
+ /* Initialize SCLP driver. Return zero if driver is operational, non-zero
+  * otherwise. */
+-static int
+-sclp_init(void)
++int sclp_init(void)
+ {
+ 	unsigned long flags;
+ 	int rc = 0;
+@@ -1305,13 +1303,7 @@ sclp_init(void)
+ 
+ static __init int sclp_initcall(void)
+ {
+-	int rc;
+-
+-	rc = platform_driver_register(&sclp_pdrv);
+-	if (rc)
+-		return rc;
+-
+-	return sclp_init();
++	return platform_driver_register(&sclp_pdrv);
+ }
+ 
+ arch_initcall(sclp_initcall);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 10b8e4dc64f8b0..7589f48aebc80f 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -2951,6 +2951,7 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
+ 		.max_hw_sectors		= MPI3MR_MAX_APP_XFER_SECTORS,
+ 		.max_segments		= MPI3MR_MAX_APP_XFER_SEGMENTS,
+ 	};
++	struct request_queue *q;
+ 
+ 	device_initialize(bsg_dev);
+ 
+@@ -2966,14 +2967,17 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
+ 		return;
+ 	}
+ 
+-	mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
++	q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
+ 			mpi3mr_bsg_request, NULL, 0);
+-	if (IS_ERR(mrioc->bsg_queue)) {
++	if (IS_ERR(q)) {
+ 		ioc_err(mrioc, "%s: bsg registration failed\n",
+ 		    dev_name(bsg_dev));
+ 		device_del(bsg_dev);
+ 		put_device(bsg_dev);
++		return;
+ 	}
++
++	mrioc->bsg_queue = q;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 16ac2267c71e19..c1d8f2c91a5e51 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -5629,8 +5629,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
+ 	if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
+ 		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ 		    ioc->name);
+-		ioc->manu_pg11.EEDPTagMode &= ~0x3;
+-		ioc->manu_pg11.EEDPTagMode |= 0x1;
++		ioc->manu_pg11.EEDPTagMode = 0x1;
+ 		mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
+ 		    &ioc->manu_pg11);
+ 	}
+diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
+index 2a42b28931c96d..298b542dd1c064 100644
+--- a/drivers/soc/atmel/soc.c
++++ b/drivers/soc/atmel/soc.c
+@@ -399,7 +399,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ 
+ static int __init atmel_soc_device_init(void)
+ {
+-	struct device_node *np = of_find_node_by_path("/");
++	struct device_node *np __free(device_node) = of_find_node_by_path("/");
+ 
+ 	if (!of_match_node(at91_soc_allowed_list, np))
+ 		return 0;
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 4a2f84c4d22e5f..532b2e9c31d0d3 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -1561,10 +1561,15 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+-	if (IS_ERR(mcspi->ref_clk))
+-		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
+-	else
++	if (IS_ERR(mcspi->ref_clk)) {
++		status = PTR_ERR(mcspi->ref_clk);
++		dev_err_probe(&pdev->dev, status, "Failed to get ref_clk");
++		goto free_ctlr;
++	}
++	if (mcspi->ref_clk)
+ 		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
++	else
++		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
+ 	ctlr->max_speed_hz = mcspi->ref_clk_hz;
+ 	ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
+ 
+diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
+index b67455bda972b2..de4c182474329d 100644
+--- a/drivers/spi/spi-zynq-qspi.c
++++ b/drivers/spi/spi-zynq-qspi.c
+@@ -379,12 +379,21 @@ static int zynq_qspi_setup_op(struct spi_device *spi)
+ {
+ 	struct spi_controller *ctlr = spi->controller;
+ 	struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
++	int ret;
+ 
+ 	if (ctlr->busy)
+ 		return -EBUSY;
+ 
+-	clk_enable(qspi->refclk);
+-	clk_enable(qspi->pclk);
++	ret = clk_enable(qspi->refclk);
++	if (ret)
++		return ret;
++
++	ret = clk_enable(qspi->pclk);
++	if (ret) {
++		clk_disable(qspi->refclk);
++		return ret;
++	}
++
+ 	zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ 			ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+ 
+diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
+index 118bff988bc7e6..bb28daa4d71334 100644
+--- a/drivers/staging/media/imx/imx-media-of.c
++++ b/drivers/staging/media/imx/imx-media-of.c
+@@ -54,22 +54,18 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
+ 			break;
+ 
+ 		ret = imx_media_of_add_csi(imxmd, csi_np);
++		of_node_put(csi_np);
+ 		if (ret) {
+ 			/* unavailable or already added is not an error */
+ 			if (ret == -ENODEV || ret == -EEXIST) {
+-				of_node_put(csi_np);
+ 				continue;
+ 			}
+ 
+ 			/* other error, can't continue */
+-			goto err_out;
++			return ret;
+ 		}
+ 	}
+ 
+ 	return 0;
+-
+-err_out:
+-	of_node_put(csi_np);
+-	return ret;
+ }
+ EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
+diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
+index 6bdbccbee05ac3..b528727ada75c6 100644
+--- a/drivers/staging/media/max96712/max96712.c
++++ b/drivers/staging/media/max96712/max96712.c
+@@ -421,7 +421,6 @@ static int max96712_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 
+ 	priv->client = client;
+-	i2c_set_clientdata(client, priv);
+ 
+ 	priv->regmap = devm_regmap_init_i2c(client, &max96712_i2c_regmap);
+ 	if (IS_ERR(priv->regmap))
+@@ -454,7 +453,8 @@ static int max96712_probe(struct i2c_client *client)
+ 
+ static void max96712_remove(struct i2c_client *client)
+ {
+-	struct max96712_priv *priv = i2c_get_clientdata(client);
++	struct v4l2_subdev *sd = i2c_get_clientdata(client);
++	struct max96712_priv *priv = container_of(sd, struct max96712_priv, sd);
+ 
+ 	v4l2_async_unregister_subdev(&priv->sd);
+ 
+diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
+index afbf7738c7c47c..58b28be63c79b1 100644
+--- a/drivers/tty/mips_ejtag_fdc.c
++++ b/drivers/tty/mips_ejtag_fdc.c
+@@ -1154,7 +1154,7 @@ static char kgdbfdc_rbuf[4];
+ 
+ /* write buffer to allow compaction */
+ static unsigned int kgdbfdc_wbuflen;
+-static char kgdbfdc_wbuf[4];
++static u8 kgdbfdc_wbuf[4];
+ 
+ static void __iomem *kgdbfdc_setup(void)
+ {
+@@ -1215,7 +1215,7 @@ static int kgdbfdc_read_char(void)
+ /* push an FDC word from write buffer to TX FIFO */
+ static void kgdbfdc_push_one(void)
+ {
+-	const char *bufs[1] = { kgdbfdc_wbuf };
++	const u8 *bufs[1] = { kgdbfdc_wbuf };
+ 	struct fdc_word word;
+ 	void __iomem *regs;
+ 	unsigned int i;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 3509af7dc52b88..11519aa2598a01 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2059,7 +2059,8 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
+ 	serial8250_rpm_put(up);
+ }
+ 
+-static void wait_for_lsr(struct uart_8250_port *up, int bits)
++/* Returns true if @bits were set, false on timeout */
++static bool wait_for_lsr(struct uart_8250_port *up, int bits)
+ {
+ 	unsigned int status, tmout = 10000;
+ 
+@@ -2074,11 +2075,11 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
+ 		udelay(1);
+ 		touch_nmi_watchdog();
+ 	}
++
++	return (tmout != 0);
+ }
+ 
+-/*
+- *	Wait for transmitter & holding register to empty
+- */
++/* Wait for transmitter and holding register to empty with timeout */
+ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+ {
+ 	unsigned int tmout;
+@@ -3297,6 +3298,16 @@ static void serial8250_console_restore(struct uart_8250_port *up)
+ 	serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
+ }
+ 
++static void fifo_wait_for_lsr(struct uart_8250_port *up, unsigned int count)
++{
++	unsigned int i;
++
++	for (i = 0; i < count; i++) {
++		if (wait_for_lsr(up, UART_LSR_THRE))
++			return;
++	}
++}
++
+ /*
+  * Print a string to the serial port using the device FIFO
+  *
+@@ -3306,13 +3317,15 @@ static void serial8250_console_restore(struct uart_8250_port *up)
+ static void serial8250_console_fifo_write(struct uart_8250_port *up,
+ 					  const char *s, unsigned int count)
+ {
+-	int i;
+ 	const char *end = s + count;
+ 	unsigned int fifosize = up->tx_loadsz;
++	unsigned int tx_count = 0;
+ 	bool cr_sent = false;
++	unsigned int i;
+ 
+ 	while (s != end) {
+-		wait_for_lsr(up, UART_LSR_THRE);
++		/* Allow timeout for each byte of a possibly full FIFO */
++		fifo_wait_for_lsr(up, fifosize);
+ 
+ 		for (i = 0; i < fifosize && s != end; ++i) {
+ 			if (*s == '\n' && !cr_sent) {
+@@ -3323,7 +3336,14 @@ static void serial8250_console_fifo_write(struct uart_8250_port *up,
+ 				cr_sent = false;
+ 			}
+ 		}
++		tx_count = i;
+ 	}
++
++	/*
++	 * Allow timeout for each byte written since the caller will only wait
++	 * for UART_LSR_BOTH_EMPTY using the timeout of a single character
++	 */
++	fifo_wait_for_lsr(up, tx_count);
+ }
+ 
+ /*
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index ad88a33a504f53..6a0a1cce3a897f 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -8,7 +8,7 @@
+  */
+ 
+ #undef DEFAULT_SYMBOL_NAMESPACE
+-#define DEFAULT_SYMBOL_NAMESPACE SERIAL_NXP_SC16IS7XX
++#define DEFAULT_SYMBOL_NAMESPACE "SERIAL_NXP_SC16IS7XX"
+ 
+ #include <linux/bits.h>
+ #include <linux/clk.h>
+diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
+index 6c09d97ae00658..58023f735c195f 100644
+--- a/drivers/ufs/core/ufs_bsg.c
++++ b/drivers/ufs/core/ufs_bsg.c
+@@ -257,6 +257,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
+ 			NULL, 0);
+ 	if (IS_ERR(q)) {
+ 		ret = PTR_ERR(q);
++		device_del(bsg_dev);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 98114c2827c098..244e3e04e1ad74 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1660,8 +1660,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 	u8			tx_thr_num_pkt_prd = 0;
+ 	u8			tx_max_burst_prd = 0;
+ 	u8			tx_fifo_resize_max_num;
+-	const char		*usb_psy_name;
+-	int			ret;
+ 
+ 	/* default to highest possible threshold */
+ 	lpm_nyet_threshold = 0xf;
+@@ -1696,13 +1694,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 
+ 	dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
+ 
+-	ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
+-	if (ret >= 0) {
+-		dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
+-		if (!dwc->usb_psy)
+-			dev_err(dev, "couldn't get usb power supply\n");
+-	}
+-
+ 	dwc->has_lpm_erratum = device_property_read_bool(dev,
+ 				"snps,has-lpm-erratum");
+ 	device_property_read_u8(dev, "snps,lpm-nyet-threshold",
+@@ -2105,6 +2096,23 @@ static int dwc3_get_num_ports(struct dwc3 *dwc)
+ 	return 0;
+ }
+ 
++static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc)
++{
++	struct power_supply *usb_psy;
++	const char *usb_psy_name;
++	int ret;
++
++	ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name);
++	if (ret < 0)
++		return NULL;
++
++	usb_psy = power_supply_get_by_name(usb_psy_name);
++	if (!usb_psy)
++		return ERR_PTR(-EPROBE_DEFER);
++
++	return usb_psy;
++}
++
+ static int dwc3_probe(struct platform_device *pdev)
+ {
+ 	struct device		*dev = &pdev->dev;
+@@ -2161,6 +2169,10 @@ static int dwc3_probe(struct platform_device *pdev)
+ 
+ 	dwc3_get_software_properties(dwc);
+ 
++	dwc->usb_psy = dwc3_get_usb_power_supply(dwc);
++	if (IS_ERR(dwc->usb_psy))
++		return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n");
++
+ 	dwc->reset = devm_reset_control_array_get_optional_shared(dev);
+ 	if (IS_ERR(dwc->reset)) {
+ 		ret = PTR_ERR(dwc->reset);
+@@ -2585,12 +2597,15 @@ static int dwc3_resume(struct device *dev)
+ 	pinctrl_pm_select_default_state(dev);
+ 
+ 	pm_runtime_disable(dev);
+-	pm_runtime_set_active(dev);
++	ret = pm_runtime_set_active(dev);
++	if (ret)
++		goto out;
+ 
+ 	ret = dwc3_resume_common(dwc, PMSG_RESUME);
+ 	if (ret)
+ 		pm_runtime_set_suspended(dev);
+ 
++out:
+ 	pm_runtime_enable(dev);
+ 
+ 	return ret;
+diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
+index 538185a4d1b4fb..c507e576bbe084 100644
+--- a/drivers/usb/dwc3/dwc3-am62.c
++++ b/drivers/usb/dwc3/dwc3-am62.c
+@@ -166,6 +166,7 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
+ 	if (ret)
+ 		return ret;
+ 
++	of_node_put(args.np);
+ 	am62->offset = args.args[0];
+ 
+ 	/* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 15bb3aa12aa8b4..48dee166e5d89c 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -1066,7 +1066,6 @@ static void usbg_cmd_work(struct work_struct *work)
+ out:
+ 	transport_send_check_condition_and_sense(se_cmd,
+ 			TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+-	transport_generic_free_cmd(&cmd->se_cmd, 0);
+ }
+ 
+ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
+@@ -1195,7 +1194,6 @@ static void bot_cmd_work(struct work_struct *work)
+ out:
+ 	transport_send_check_condition_and_sense(se_cmd,
+ 				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+-	transport_generic_free_cmd(&cmd->se_cmd, 0);
+ }
+ 
+ static int bot_submit_command(struct f_uas *fu,
+@@ -2051,9 +2049,14 @@ static void tcm_delayed_set_alt(struct work_struct *wq)
+ 
+ static int tcm_get_alt(struct usb_function *f, unsigned intf)
+ {
+-	if (intf == bot_intf_desc.bInterfaceNumber)
++	struct f_uas *fu = to_f_uas(f);
++
++	if (fu->iface != intf)
++		return -EOPNOTSUPP;
++
++	if (fu->flags & USBG_IS_BOT)
+ 		return USB_G_ALT_INT_BBB;
+-	if (intf == uasp_intf_desc.bInterfaceNumber)
++	else if (fu->flags & USBG_IS_UAS)
+ 		return USB_G_ALT_INT_UAS;
+ 
+ 	return -EOPNOTSUPP;
+@@ -2063,6 +2066,9 @@ static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ {
+ 	struct f_uas *fu = to_f_uas(f);
+ 
++	if (fu->iface != intf)
++		return -EOPNOTSUPP;
++
+ 	if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
+ 		struct guas_setup_wq *work;
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index b267dae14d3904..4384b86ea7b66c 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -422,7 +422,8 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ 	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ 	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ 		xhci->current_cmd = cur_cmd;
+-		xhci_mod_cmd_timer(xhci);
++		if (cur_cmd)
++			xhci_mod_cmd_timer(xhci);
+ 		xhci_ring_cmd_db(xhci);
+ 	}
+ }
+diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
+index 46635fa4a3405d..28db337f190bf5 100644
+--- a/drivers/usb/storage/Makefile
++++ b/drivers/usb/storage/Makefile
+@@ -8,7 +8,7 @@
+ 
+ ccflags-y := -I $(srctree)/drivers/scsi
+ 
+-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_STORAGE
++ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_STORAGE"'
+ 
+ obj-$(CONFIG_USB_UAS)		+= uas.o
+ obj-$(CONFIG_USB_STORAGE)	+= usb-storage.o
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index 24a6a4354df8ba..b2c83f552da55d 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -27,6 +27,7 @@
+ #define	VPPS_NEW_MIN_PERCENT			95
+ #define	VPPS_VALID_MIN_MV			100
+ #define	VSINKDISCONNECT_PD_MIN_PERCENT		90
++#define	VPPS_SHUTDOWN_MIN_PERCENT		85
+ 
+ struct tcpci {
+ 	struct device *dev;
+@@ -366,7 +367,8 @@ static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
+ }
+ 
+ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+-						   bool pps_active, u32 requested_vbus_voltage_mv)
++						   bool pps_active, u32 requested_vbus_voltage_mv,
++						   u32 apdo_min_voltage_mv)
+ {
+ 	struct tcpci *tcpci = tcpc_to_tcpci(dev);
+ 	unsigned int pwr_ctrl, threshold = 0;
+@@ -388,9 +390,12 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty
+ 		threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
+ 	} else if (mode == TYPEC_PWR_MODE_PD) {
+ 		if (pps_active)
+-			threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
+-				     VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
+-				     VSINKDISCONNECT_PD_MIN_PERCENT / 100;
++			/*
++			 * To prevent disconnect when the source is in Current Limit Mode.
++			 * Set the threshold to the lowest possible voltage vPpsShutdown (min)
++			 */
++			threshold = VPPS_SHUTDOWN_MIN_PERCENT * apdo_min_voltage_mv / 100 -
++				    VSINKPD_MIN_IR_DROP_MV;
+ 		else
+ 			threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
+ 				     VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 7ae341a403424c..48ddf27704619d 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2928,10 +2928,12 @@ static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
+ 		return 0;
+ 
+ 	ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
+-							    requested_vbus_voltage);
++							    requested_vbus_voltage,
++							    port->pps_data.min_volt);
+ 	tcpm_log_force(port,
+-		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
+-		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
++		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
++		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
++		       port->pps_data.min_volt, ret);
+ 
+ 	return ret;
+ }
+@@ -4757,7 +4759,7 @@ static void run_state_machine(struct tcpm_port *port)
+ 			port->caps_count = 0;
+ 			port->pd_capable = true;
+ 			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
+-					    PD_T_SEND_SOURCE_CAP);
++					    PD_T_SENDER_RESPONSE);
+ 		}
+ 		break;
+ 	case SRC_SEND_CAPABILITIES_TIMEOUT:
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+index d5a43b3bf45ec9..c46108a16a9dd3 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+@@ -102,6 +102,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
+ 		np = of_get_next_parent(np);
+ 	}
+ 
++	of_node_put(np);
+ 	return NULL;
+ }
+ 
+diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
+index 563d842014dfba..cc239251e19383 100644
+--- a/drivers/watchdog/rti_wdt.c
++++ b/drivers/watchdog/rti_wdt.c
+@@ -301,6 +301,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ 	node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ 	if (node) {
+ 		ret = of_address_to_resource(node, 0, &res);
++		of_node_put(node);
+ 		if (ret) {
+ 			dev_err(dev, "No memory address assigned to the region.\n");
+ 			goto err_iomap;
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index ada363af5aab8e..50edd1cae28ace 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1472,7 +1472,12 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
+ 		op->file[1].vnode = vnode;
+ 	}
+ 
+-	return afs_do_sync_operation(op);
++	ret = afs_do_sync_operation(op);
++
++	/* Not all systems that can host afs servers have ENOTEMPTY. */
++	if (ret == -EEXIST)
++		ret = -ENOTEMPTY;
++	return ret;
+ 
+ error:
+ 	return afs_put_operation(op);
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index c9d620175e80ca..d9760b2a8d8de4 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -1346,6 +1346,15 @@ extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
+ extern int afs_extract_data(struct afs_call *, bool);
+ extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause);
+ 
++static inline void afs_see_call(struct afs_call *call, enum afs_call_trace why)
++{
++	int r = refcount_read(&call->ref);
++
++	trace_afs_call(call->debug_id, why, r,
++		       atomic_read(&call->net->nr_outstanding_calls),
++		       __builtin_return_address(0));
++}
++
+ static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call,
+ 				    gfp_t gfp)
+ {
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index 9f2a3bb56ec69e..a122c6366ce19f 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -430,11 +430,16 @@ void afs_make_call(struct afs_call *call, gfp_t gfp)
+ 	return;
+ 
+ error_do_abort:
+-	if (ret != -ECONNABORTED) {
++	if (ret != -ECONNABORTED)
+ 		rxrpc_kernel_abort_call(call->net->socket, rxcall,
+ 					RX_USER_ABORT, ret,
+ 					afs_abort_send_data_error);
+-	} else {
++	if (call->async) {
++		afs_see_call(call, afs_call_trace_async_abort);
++		return;
++	}
++
++	if (ret == -ECONNABORTED) {
+ 		len = 0;
+ 		iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
+ 		rxrpc_kernel_recv_data(call->net->socket, rxcall,
+@@ -445,6 +450,8 @@ void afs_make_call(struct afs_call *call, gfp_t gfp)
+ 	call->error = ret;
+ 	trace_afs_call_done(call);
+ error_kill_call:
++	if (call->async)
++		afs_see_call(call, afs_call_trace_async_kill);
+ 	if (call->type->done)
+ 		call->type->done(call);
+ 
+@@ -602,7 +609,6 @@ static void afs_deliver_to_call(struct afs_call *call)
+ 	abort_code = 0;
+ call_complete:
+ 	afs_set_call_complete(call, ret, remote_abort);
+-	state = AFS_CALL_COMPLETE;
+ 	goto done;
+ }
+ 
+diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h
+index 8ca8681645077d..cc5f143d21a347 100644
+--- a/fs/afs/xdr_fs.h
++++ b/fs/afs/xdr_fs.h
+@@ -88,7 +88,7 @@ union afs_xdr_dir_block {
+ 
+ 	struct {
+ 		struct afs_xdr_dir_hdr	hdr;
+-		u8			alloc_ctrs[AFS_DIR_MAX_BLOCKS];
++		u8			alloc_ctrs[AFS_DIR_BLOCKS_WITH_CTR];
+ 		__be16			hashtable[AFS_DIR_HASHTBL_SIZE];
+ 	} meta;
+ 
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
+index 024227aba4cd5f..362845f9aaaefa 100644
+--- a/fs/afs/yfsclient.c
++++ b/fs/afs/yfsclient.c
+@@ -666,8 +666,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call)
+ static void yfs_done_fs_remove_file2(struct afs_call *call)
+ {
+ 	if (call->error == -ECONNABORTED &&
+-	    call->abort_code == RX_INVALID_OPERATION) {
+-		set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags);
++	    (call->abort_code == RX_INVALID_OPERATION ||
++	     call->abort_code == RXGEN_OPCODE)) {
++		set_bit(AFS_SERVER_FL_NO_RM2, &call->op->server->flags);
+ 		call->op->flags |= AFS_OPERATION_DOWNGRADE;
+ 	}
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a3c861b2a6d25d..9d9ce308488dd3 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2001,6 +2001,53 @@ static int can_nocow_file_extent(struct btrfs_path *path,
+ 	return ret < 0 ? ret : can_nocow;
+ }
+ 
++/*
++ * Cleanup the dirty folios which will never be submitted due to error.
++ *
++ * When running a delalloc range, we may need to split the ranges (due to
++ * fragmentation or NOCOW). If we hit an error in the later part, we will error
++ * out and previously successfully executed range will never be submitted, thus
++ * we have to cleanup those folios by clearing their dirty flag, starting and
++ * finishing the writeback.
++ */
++static void cleanup_dirty_folios(struct btrfs_inode *inode,
++				 struct folio *locked_folio,
++				 u64 start, u64 end, int error)
++{
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
++	struct address_space *mapping = inode->vfs_inode.i_mapping;
++	pgoff_t start_index = start >> PAGE_SHIFT;
++	pgoff_t end_index = end >> PAGE_SHIFT;
++	u32 len;
++
++	ASSERT(end + 1 - start < U32_MAX);
++	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
++	       IS_ALIGNED(end + 1, fs_info->sectorsize));
++	len = end + 1 - start;
++
++	/*
++	 * Handle the locked folio first.
++	 * The btrfs_folio_clamp_*() helpers can handle range out of the folio case.
++	 */
++	btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len);
++
++	for (pgoff_t index = start_index; index <= end_index; index++) {
++		struct folio *folio;
++
++		/* Already handled at the beginning. */
++		if (index == locked_folio->index)
++			continue;
++		folio = __filemap_get_folio(mapping, index, FGP_LOCK, GFP_NOFS);
++		/* Cache already dropped, no need to do any cleanup. */
++		if (IS_ERR(folio))
++			continue;
++		btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len);
++		folio_unlock(folio);
++		folio_put(folio);
++	}
++	mapping_set_error(mapping, error);
++}
++
+ /*
+  * when nowcow writeback call back.  This checks for snapshots or COW copies
+  * of the extents that exist in the file, and COWs the file as required.
+@@ -2016,6 +2063,11 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 	struct btrfs_root *root = inode->root;
+ 	struct btrfs_path *path;
+ 	u64 cow_start = (u64)-1;
++	/*
++	 * If not 0, represents the inclusive end of the last fallback_to_cow()
++	 * range. Only for error handling.
++	 */
++	u64 cow_end = 0;
+ 	u64 cur_offset = start;
+ 	int ret;
+ 	bool check_prev = true;
+@@ -2176,6 +2228,7 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 					      found_key.offset - 1);
+ 			cow_start = (u64)-1;
+ 			if (ret) {
++				cow_end = found_key.offset - 1;
+ 				btrfs_dec_nocow_writers(nocow_bg);
+ 				goto error;
+ 			}
+@@ -2249,24 +2302,54 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 		cow_start = cur_offset;
+ 
+ 	if (cow_start != (u64)-1) {
+-		cur_offset = end;
+ 		ret = fallback_to_cow(inode, locked_folio, cow_start, end);
+ 		cow_start = (u64)-1;
+-		if (ret)
++		if (ret) {
++			cow_end = end;
+ 			goto error;
++		}
+ 	}
+ 
+ 	btrfs_free_path(path);
+ 	return 0;
+ 
+ error:
++	/*
++	 * There are several error cases:
++	 *
++	 * 1) Failed without falling back to COW
++	 *    start         cur_offset             end
++	 *    |/////////////|                      |
++	 *
++	 *    For range [start, cur_offset) the folios are already unlocked (except
++	 *    @locked_folio), EXTENT_DELALLOC already removed.
++	 *    Only need to clear the dirty flag as they will never be submitted.
++	 *    Ordered extent and extent maps are handled by
++	 *    btrfs_mark_ordered_io_finished() inside run_delalloc_range().
++	 *
++	 * 2) Failed with error from fallback_to_cow()
++	 *    start         cur_offset  cow_end    end
++	 *    |/////////////|-----------|          |
++	 *
++	 *    For range [start, cur_offset) it's the same as case 1).
++	 *    But for range [cur_offset, cow_end), the folios have dirty flag
++	 *    cleared and unlocked, EXTENT_DEALLLOC cleared by cow_file_range().
++	 *
++	 *    Thus we should not call extent_clear_unlock_delalloc() on range
++	 *    [cur_offset, cow_end), as the folios are already unlocked.
++	 *
++	 * So clear the folio dirty flags for [start, cur_offset) first.
++	 */
++	if (cur_offset > start)
++		cleanup_dirty_folios(inode, locked_folio, start, cur_offset - 1, ret);
++
+ 	/*
+ 	 * If an error happened while a COW region is outstanding, cur_offset
+-	 * needs to be reset to cow_start to ensure the COW region is unlocked
+-	 * as well.
++	 * needs to be reset to @cow_end + 1 to skip the COW range, as
++	 * cow_file_range() will do the proper cleanup at error.
+ 	 */
+-	if (cow_start != (u64)-1)
+-		cur_offset = cow_start;
++	if (cow_end)
++		cur_offset = cow_end + 1;
+ 
+ 	/*
+ 	 * We need to lock the extent here because we're clearing DELALLOC and
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index e70ed857fc743b..4fcd6cd4c1c244 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1839,9 +1839,19 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ 	 * Thus its reserved space should all be zero, no matter if qgroup
+ 	 * is consistent or the mode.
+ 	 */
+-	WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
+-		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
+-		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
++	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
++	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
++	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
++		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
++		btrfs_warn_rl(fs_info,
++"to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
++			      btrfs_qgroup_level(qgroup->qgroupid),
++			      btrfs_qgroup_subvolid(qgroup->qgroupid),
++			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
++			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
++			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
++
++	}
+ 	/*
+ 	 * The same for rfer/excl numbers, but that's only if our qgroup is
+ 	 * consistent and if it's in regular qgroup mode.
+@@ -1850,8 +1860,9 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ 	 */
+ 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
+ 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
+-		if (WARN_ON(qgroup->rfer || qgroup->excl ||
+-			    qgroup->rfer_cmpr || qgroup->excl_cmpr)) {
++		if (qgroup->rfer || qgroup->excl ||
++		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
++			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ 			btrfs_warn_rl(fs_info,
+ "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
+ 				      btrfs_qgroup_level(qgroup->qgroupid),
+diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
+index fe4d719d506bf5..ec7328a6bfd755 100644
+--- a/fs/btrfs/subpage.c
++++ b/fs/btrfs/subpage.c
+@@ -868,6 +868,7 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
+ 	unsigned long writeback_bitmap;
+ 	unsigned long ordered_bitmap;
+ 	unsigned long checked_bitmap;
++	unsigned long locked_bitmap;
+ 	unsigned long flags;
+ 
+ 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
+@@ -880,15 +881,16 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
+ 	GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
+ 	GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
+ 	GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
+-	GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &checked_bitmap);
++	GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &locked_bitmap);
+ 	spin_unlock_irqrestore(&subpage->lock, flags);
+ 
+ 	dump_page(folio_page(folio, 0), "btrfs subpage dump");
+ 	btrfs_warn(fs_info,
+-"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
++"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
+ 		    start, len, folio_pos(folio),
+ 		    sectors_per_page, &uptodate_bitmap,
+ 		    sectors_per_page, &dirty_bitmap,
++		    sectors_per_page, &locked_bitmap,
+ 		    sectors_per_page, &writeback_bitmap,
+ 		    sectors_per_page, &ordered_bitmap,
+ 		    sectors_per_page, &checked_bitmap);
+diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
+index 4b85d91d0e18b0..cdb554e0d215e2 100644
+--- a/fs/btrfs/subpage.h
++++ b/fs/btrfs/subpage.h
+@@ -152,6 +152,19 @@ DECLARE_BTRFS_SUBPAGE_OPS(writeback);
+ DECLARE_BTRFS_SUBPAGE_OPS(ordered);
+ DECLARE_BTRFS_SUBPAGE_OPS(checked);
+ 
++/*
++ * Helper for error cleanup, where a folio will have its dirty flag cleared,
++ * with writeback started and finished.
++ */
++static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
++					       struct folio *locked_folio,
++					       u64 start, u32 len)
++{
++	btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
++	btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
++	btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
++}
++
+ bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
+ 					struct folio *folio, u64 start, u32 len);
+ 
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 8292e488d3d777..73343503ea60e4 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -972,7 +972,7 @@ static int btrfs_fill_super(struct super_block *sb,
+ 
+ 	err = open_ctree(sb, fs_devices);
+ 	if (err) {
+-		btrfs_err(fs_info, "open_ctree failed");
++		btrfs_err(fs_info, "open_ctree failed: %d", err);
+ 		return err;
+ 	}
+ 
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index dddedaef5e93dd..0c01e4423ee2a8 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -824,9 +824,12 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
+ 		r->res_first_lkid = 0;
+ 	}
+ 
+-	/* A dir record will not be on the scan list. */
+-	if (r->res_dir_nodeid != our_nodeid)
+-		del_scan(ls, r);
++	/* we always deactivate scan timer for the rsb, when
++	 * we move it out of the inactive state as rsb state
++	 * can be changed and scan timers are only for inactive
++	 * rsbs.
++	 */
++	del_scan(ls, r);
+ 	list_move(&r->res_slow_list, &ls->ls_slow_active);
+ 	rsb_clear_flag(r, RSB_INACTIVE);
+ 	kref_init(&r->res_ref); /* ref is now used in active state */
+@@ -989,10 +992,10 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
+ 		r->res_nodeid = 0;
+ 	}
+ 
++	del_scan(ls, r);
+ 	list_move(&r->res_slow_list, &ls->ls_slow_active);
+ 	rsb_clear_flag(r, RSB_INACTIVE);
+ 	kref_init(&r->res_ref);
+-	del_scan(ls, r);
+ 	write_unlock_bh(&ls->ls_rsbtbl_lock);
+ 
+ 	goto out;
+@@ -1337,9 +1340,13 @@ static int _dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *na
+ 	__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
+ 			    r_nodeid, result);
+ 
+-	/* A dir record rsb should never be on scan list. */
+-	/* Try to fix this with del_scan? */
+-	WARN_ON(!list_empty(&r->res_scan_list));
++	/* A dir record rsb should never be on scan list.
++	 * Except when we are the dir and master node.
++	 * This function should only be called by the dir
++	 * node.
++	 */
++	WARN_ON(!list_empty(&r->res_scan_list) &&
++		r->res_master_nodeid != our_nodeid);
+ 
+ 	write_unlock_bh(&ls->ls_rsbtbl_lock);
+ 
+@@ -1430,16 +1437,23 @@ static void deactivate_rsb(struct kref *kref)
+ 	list_move(&r->res_slow_list, &ls->ls_slow_inactive);
+ 
+ 	/*
+-	 * When the rsb becomes unused:
+-	 * - If it's not a dir record for a remote master rsb,
+-	 *   then it is put on the scan list to be freed.
+-	 * - If it's a dir record for a remote master rsb,
+-	 *   then it is kept in the inactive state until
+-	 *   receive_remove() from the master node.
++	 * When the rsb becomes unused, there are two possibilities:
++	 * 1. Leave the inactive rsb in place (don't remove it).
++	 * 2. Add it to the scan list to be removed.
++	 *
++	 * 1 is done when the rsb is acting as the dir record
++	 * for a remotely mastered rsb.  The rsb must be left
++	 * in place as an inactive rsb to act as the dir record.
++	 *
++	 * 2 is done when a) the rsb is not the master and not the
++	 * dir record, b) when the rsb is both the master and the
++	 * dir record, c) when the rsb is master but not dir record.
++	 *
++	 * (If no directory is used, the rsb can always be removed.)
+ 	 */
+-	if (!dlm_no_directory(ls) &&
+-	    (r->res_master_nodeid != our_nodeid) &&
+-	    (dlm_dir_nodeid(r) != our_nodeid))
++	if (dlm_no_directory(ls) ||
++	    (r->res_master_nodeid == our_nodeid ||
++	     dlm_dir_nodeid(r) != our_nodeid))
+ 		add_scan(ls, r);
+ 
+ 	if (r->res_lvbptr) {
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index cb3a10b041c278..f2d88a3581695a 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -462,7 +462,8 @@ static bool dlm_lowcomms_con_has_addr(const struct connection *con,
+ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr)
+ {
+ 	struct connection *con;
+-	bool ret, idx;
++	bool ret;
++	int idx;
+ 
+ 	idx = srcu_read_lock(&connections_srcu);
+ 	con = nodeid2con(nodeid, GFP_NOFS);
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 77e785a6dfa7ff..edbabb3256c9ac 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -205,12 +205,6 @@ enum {
+ 	EROFS_ZIP_CACHE_READAROUND
+ };
+ 
+-/* basic unit of the workstation of a super_block */
+-struct erofs_workgroup {
+-	pgoff_t index;
+-	struct lockref lockref;
+-};
+-
+ enum erofs_kmap_type {
+ 	EROFS_NO_KMAP,		/* don't map the buffer */
+ 	EROFS_KMAP,		/* use kmap_local_page() to map the buffer */
+@@ -452,20 +446,15 @@ static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
+ void erofs_release_pages(struct page **pagepool);
+ 
+ #ifdef CONFIG_EROFS_FS_ZIP
+-void erofs_workgroup_put(struct erofs_workgroup *grp);
+-struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
+-					     pgoff_t index);
+-struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
+-					       struct erofs_workgroup *grp);
+-void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
++extern atomic_long_t erofs_global_shrink_cnt;
+ void erofs_shrinker_register(struct super_block *sb);
+ void erofs_shrinker_unregister(struct super_block *sb);
+ int __init erofs_init_shrinker(void);
+ void erofs_exit_shrinker(void);
+ int __init z_erofs_init_subsystem(void);
+ void z_erofs_exit_subsystem(void);
+-int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
+-					struct erofs_workgroup *egrp);
++unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
++				  unsigned long nr_shrink);
+ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ 			    int flags);
+ void *z_erofs_get_gbuf(unsigned int requiredpages);
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 1a00f061798a3c..a8fb4b525f5443 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -44,12 +44,15 @@ __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
+  * A: Field should be accessed / updated in atomic for parallelized code.
+  */
+ struct z_erofs_pcluster {
+-	struct erofs_workgroup obj;
+ 	struct mutex lock;
++	struct lockref lockref;
+ 
+ 	/* A: point to next chained pcluster or TAILs */
+ 	z_erofs_next_pcluster_t next;
+ 
++	/* I: start block address of this pcluster */
++	erofs_off_t index;
++
+ 	/* L: the maximum decompression size of this round */
+ 	unsigned int length;
+ 
+@@ -108,7 +111,7 @@ struct z_erofs_decompressqueue {
+ 
+ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
+ {
+-	return !pcl->obj.index;
++	return !pcl->index;
+ }
+ 
+ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
+@@ -548,7 +551,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
+ 		if (READ_ONCE(pcl->compressed_bvecs[i].page))
+ 			continue;
+ 
+-		page = find_get_page(mc, pcl->obj.index + i);
++		page = find_get_page(mc, pcl->index + i);
+ 		if (!page) {
+ 			/* I/O is needed, no possible to decompress directly */
+ 			standalone = false;
+@@ -564,13 +567,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
+ 				continue;
+ 			set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
+ 		}
+-		spin_lock(&pcl->obj.lockref.lock);
++		spin_lock(&pcl->lockref.lock);
+ 		if (!pcl->compressed_bvecs[i].page) {
+ 			pcl->compressed_bvecs[i].page = page ? page : newpage;
+-			spin_unlock(&pcl->obj.lockref.lock);
++			spin_unlock(&pcl->lockref.lock);
+ 			continue;
+ 		}
+-		spin_unlock(&pcl->obj.lockref.lock);
++		spin_unlock(&pcl->lockref.lock);
+ 
+ 		if (page)
+ 			put_page(page);
+@@ -587,11 +590,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
+ }
+ 
+ /* (erofs_shrinker) disconnect cached encoded data with pclusters */
+-int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
+-					struct erofs_workgroup *grp)
++static int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
++					       struct z_erofs_pcluster *pcl)
+ {
+-	struct z_erofs_pcluster *const pcl =
+-		container_of(grp, struct z_erofs_pcluster, obj);
+ 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+ 	struct folio *folio;
+ 	int i;
+@@ -626,8 +627,8 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
+ 		return true;
+ 
+ 	ret = false;
+-	spin_lock(&pcl->obj.lockref.lock);
+-	if (pcl->obj.lockref.count <= 0) {
++	spin_lock(&pcl->lockref.lock);
++	if (pcl->lockref.count <= 0) {
+ 		DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
+ 		for (; bvec < end; ++bvec) {
+ 			if (bvec->page && page_folio(bvec->page) == folio) {
+@@ -638,7 +639,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
+ 			}
+ 		}
+ 	}
+-	spin_unlock(&pcl->obj.lockref.lock);
++	spin_unlock(&pcl->lockref.lock);
+ 	return ret;
+ }
+ 
+@@ -689,15 +690,15 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
+ 
+ 	if (exclusive) {
+ 		/* give priority for inplaceio to use file pages first */
+-		spin_lock(&pcl->obj.lockref.lock);
++		spin_lock(&pcl->lockref.lock);
+ 		while (fe->icur > 0) {
+ 			if (pcl->compressed_bvecs[--fe->icur].page)
+ 				continue;
+ 			pcl->compressed_bvecs[fe->icur] = *bvec;
+-			spin_unlock(&pcl->obj.lockref.lock);
++			spin_unlock(&pcl->lockref.lock);
+ 			return 0;
+ 		}
+-		spin_unlock(&pcl->obj.lockref.lock);
++		spin_unlock(&pcl->lockref.lock);
+ 
+ 		/* otherwise, check if it can be used as a bvpage */
+ 		if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
+@@ -710,13 +711,30 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
+ 	return ret;
+ }
+ 
++static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
++{
++	if (lockref_get_not_zero(&pcl->lockref))
++		return true;
++
++	spin_lock(&pcl->lockref.lock);
++	if (__lockref_is_dead(&pcl->lockref)) {
++		spin_unlock(&pcl->lockref.lock);
++		return false;
++	}
++
++	if (!pcl->lockref.count++)
++		atomic_long_dec(&erofs_global_shrink_cnt);
++	spin_unlock(&pcl->lockref.lock);
++	return true;
++}
++
+ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ {
+ 	struct erofs_map_blocks *map = &fe->map;
+ 	struct super_block *sb = fe->inode->i_sb;
++	struct erofs_sb_info *sbi = EROFS_SB(sb);
+ 	bool ztailpacking = map->m_flags & EROFS_MAP_META;
+-	struct z_erofs_pcluster *pcl;
+-	struct erofs_workgroup *grp;
++	struct z_erofs_pcluster *pcl, *pre;
+ 	int err;
+ 
+ 	if (!(map->m_flags & EROFS_MAP_ENCODED) ||
+@@ -730,8 +748,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 	if (IS_ERR(pcl))
+ 		return PTR_ERR(pcl);
+ 
+-	spin_lock_init(&pcl->obj.lockref.lock);
+-	pcl->obj.lockref.count = 1;	/* one ref for this request */
++	spin_lock_init(&pcl->lockref.lock);
++	pcl->lockref.count = 1;		/* one ref for this request */
+ 	pcl->algorithmformat = map->m_algorithmformat;
+ 	pcl->length = 0;
+ 	pcl->partial = true;
+@@ -749,19 +767,26 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 	DBG_BUGON(!mutex_trylock(&pcl->lock));
+ 
+ 	if (ztailpacking) {
+-		pcl->obj.index = 0;	/* which indicates ztailpacking */
++		pcl->index = 0;		/* which indicates ztailpacking */
+ 	} else {
+-		pcl->obj.index = erofs_blknr(sb, map->m_pa);
+-
+-		grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
+-		if (IS_ERR(grp)) {
+-			err = PTR_ERR(grp);
+-			goto err_out;
++		pcl->index = erofs_blknr(sb, map->m_pa);
++		while (1) {
++			xa_lock(&sbi->managed_pslots);
++			pre = __xa_cmpxchg(&sbi->managed_pslots, pcl->index,
++					   NULL, pcl, GFP_KERNEL);
++			if (!pre || xa_is_err(pre) || z_erofs_get_pcluster(pre)) {
++				xa_unlock(&sbi->managed_pslots);
++				break;
++			}
++			/* try to legitimize the current in-tree one */
++			xa_unlock(&sbi->managed_pslots);
++			cond_resched();
+ 		}
+-
+-		if (grp != &pcl->obj) {
+-			fe->pcl = container_of(grp,
+-					struct z_erofs_pcluster, obj);
++		if (xa_is_err(pre)) {
++			err = xa_err(pre);
++			goto err_out;
++		} else if (pre) {
++			fe->pcl = pre;
+ 			err = -EEXIST;
+ 			goto err_out;
+ 		}
+@@ -781,7 +806,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
+ 	struct erofs_map_blocks *map = &fe->map;
+ 	struct super_block *sb = fe->inode->i_sb;
+ 	erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
+-	struct erofs_workgroup *grp = NULL;
++	struct z_erofs_pcluster *pcl = NULL;
+ 	int ret;
+ 
+ 	DBG_BUGON(fe->pcl);
+@@ -789,14 +814,23 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
+ 	DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
+ 
+ 	if (!(map->m_flags & EROFS_MAP_META)) {
+-		grp = erofs_find_workgroup(sb, blknr);
++		while (1) {
++			rcu_read_lock();
++			pcl = xa_load(&EROFS_SB(sb)->managed_pslots, blknr);
++			if (!pcl || z_erofs_get_pcluster(pcl)) {
++				DBG_BUGON(pcl && blknr != pcl->index);
++				rcu_read_unlock();
++				break;
++			}
++			rcu_read_unlock();
++		}
+ 	} else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
+ 		DBG_BUGON(1);
+ 		return -EFSCORRUPTED;
+ 	}
+ 
+-	if (grp) {
+-		fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
++	if (pcl) {
++		fe->pcl = pcl;
+ 		ret = -EEXIST;
+ 	} else {
+ 		ret = z_erofs_register_pcluster(fe);
+@@ -851,12 +885,72 @@ static void z_erofs_rcu_callback(struct rcu_head *head)
+ 			struct z_erofs_pcluster, rcu));
+ }
+ 
+-void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
++static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
++					  struct z_erofs_pcluster *pcl)
+ {
+-	struct z_erofs_pcluster *const pcl =
+-		container_of(grp, struct z_erofs_pcluster, obj);
++	int free = false;
++
++	spin_lock(&pcl->lockref.lock);
++	if (pcl->lockref.count)
++		goto out;
++
++	/*
++	 * Note that all cached folios should be detached before deleted from
++	 * the XArray.  Otherwise some folios could be still attached to the
++	 * orphan old pcluster when the new one is available in the tree.
++	 */
++	if (erofs_try_to_free_all_cached_folios(sbi, pcl))
++		goto out;
++
++	/*
++	 * It's impossible to fail after the pcluster is freezed, but in order
++	 * to avoid some race conditions, add a DBG_BUGON to observe this.
++	 */
++	DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->index) != pcl);
++
++	lockref_mark_dead(&pcl->lockref);
++	free = true;
++out:
++	spin_unlock(&pcl->lockref.lock);
++	if (free) {
++		atomic_long_dec(&erofs_global_shrink_cnt);
++		call_rcu(&pcl->rcu, z_erofs_rcu_callback);
++	}
++	return free;
++}
++
++unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
++				  unsigned long nr_shrink)
++{
++	struct z_erofs_pcluster *pcl;
++	unsigned long index, freed = 0;
++
++	xa_lock(&sbi->managed_pslots);
++	xa_for_each(&sbi->managed_pslots, index, pcl) {
++		/* try to shrink each valid pcluster */
++		if (!erofs_try_to_release_pcluster(sbi, pcl))
++			continue;
++		xa_unlock(&sbi->managed_pslots);
++
++		++freed;
++		if (!--nr_shrink)
++			return freed;
++		xa_lock(&sbi->managed_pslots);
++	}
++	xa_unlock(&sbi->managed_pslots);
++	return freed;
++}
++
++static void z_erofs_put_pcluster(struct z_erofs_pcluster *pcl)
++{
++	if (lockref_put_or_lock(&pcl->lockref))
++		return;
+ 
+-	call_rcu(&pcl->rcu, z_erofs_rcu_callback);
++	DBG_BUGON(__lockref_is_dead(&pcl->lockref));
++	if (pcl->lockref.count == 1)
++		atomic_long_inc(&erofs_global_shrink_cnt);
++	--pcl->lockref.count;
++	spin_unlock(&pcl->lockref.lock);
+ }
+ 
+ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
+@@ -877,7 +971,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
+ 	 * any longer if the pcluster isn't hosted by ourselves.
+ 	 */
+ 	if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
+-		erofs_workgroup_put(&pcl->obj);
++		z_erofs_put_pcluster(pcl);
+ 
+ 	fe->pcl = NULL;
+ }
+@@ -1309,7 +1403,7 @@ static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ 		if (z_erofs_is_inline_pcluster(be.pcl))
+ 			z_erofs_free_pcluster(be.pcl);
+ 		else
+-			erofs_workgroup_put(&be.pcl->obj);
++			z_erofs_put_pcluster(be.pcl);
+ 	}
+ 	return err;
+ }
+@@ -1391,9 +1485,9 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
+ 	bvec->bv_offset = 0;
+ 	bvec->bv_len = PAGE_SIZE;
+ repeat:
+-	spin_lock(&pcl->obj.lockref.lock);
++	spin_lock(&pcl->lockref.lock);
+ 	zbv = pcl->compressed_bvecs[nr];
+-	spin_unlock(&pcl->obj.lockref.lock);
++	spin_unlock(&pcl->lockref.lock);
+ 	if (!zbv.page)
+ 		goto out_allocfolio;
+ 
+@@ -1455,23 +1549,23 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
+ 	folio_put(folio);
+ out_allocfolio:
+ 	page = __erofs_allocpage(&f->pagepool, gfp, true);
+-	spin_lock(&pcl->obj.lockref.lock);
++	spin_lock(&pcl->lockref.lock);
+ 	if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) {
+ 		if (page)
+ 			erofs_pagepool_add(&f->pagepool, page);
+-		spin_unlock(&pcl->obj.lockref.lock);
++		spin_unlock(&pcl->lockref.lock);
+ 		cond_resched();
+ 		goto repeat;
+ 	}
+ 	pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM);
+-	spin_unlock(&pcl->obj.lockref.lock);
++	spin_unlock(&pcl->lockref.lock);
+ 	bvec->bv_page = page;
+ 	if (!page)
+ 		return;
+ 	folio = page_folio(page);
+ out_tocache:
+ 	if (!tocache || bs != PAGE_SIZE ||
+-	    filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) {
++	    filemap_add_folio(mc, folio, pcl->index + nr, gfp)) {
+ 		/* turn into a temporary shortlived folio (1 ref) */
+ 		folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
+ 		return;
+@@ -1603,7 +1697,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ 
+ 		/* no device id here, thus it will always succeed */
+ 		mdev = (struct erofs_map_dev) {
+-			.m_pa = erofs_pos(sb, pcl->obj.index),
++			.m_pa = erofs_pos(sb, pcl->index),
+ 		};
+ 		(void)erofs_map_dev(sb, &mdev);
+ 
+diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
+index 37afe202484091..75704f58ecfa92 100644
+--- a/fs/erofs/zutil.c
++++ b/fs/erofs/zutil.c
+@@ -2,6 +2,7 @@
+ /*
+  * Copyright (C) 2018 HUAWEI, Inc.
+  *             https://www.huawei.com/
++ * Copyright (C) 2024 Alibaba Cloud
+  */
+ #include "internal.h"
+ 
+@@ -19,13 +20,12 @@ static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
+ module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
+ module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
+ 
+-static atomic_long_t erofs_global_shrink_cnt;	/* for all mounted instances */
+-/* protected by 'erofs_sb_list_lock' */
+-static unsigned int shrinker_run_no;
++atomic_long_t erofs_global_shrink_cnt;	/* for all mounted instances */
+ 
+-/* protects the mounted 'erofs_sb_list' */
++/* protects `erofs_sb_list_lock` and the mounted `erofs_sb_list` */
+ static DEFINE_SPINLOCK(erofs_sb_list_lock);
+ static LIST_HEAD(erofs_sb_list);
++static unsigned int shrinker_run_no;
+ static struct shrinker *erofs_shrinker_info;
+ 
+ static unsigned int z_erofs_gbuf_id(void)
+@@ -214,145 +214,6 @@ void erofs_release_pages(struct page **pagepool)
+ 	}
+ }
+ 
+-static bool erofs_workgroup_get(struct erofs_workgroup *grp)
+-{
+-	if (lockref_get_not_zero(&grp->lockref))
+-		return true;
+-
+-	spin_lock(&grp->lockref.lock);
+-	if (__lockref_is_dead(&grp->lockref)) {
+-		spin_unlock(&grp->lockref.lock);
+-		return false;
+-	}
+-
+-	if (!grp->lockref.count++)
+-		atomic_long_dec(&erofs_global_shrink_cnt);
+-	spin_unlock(&grp->lockref.lock);
+-	return true;
+-}
+-
+-struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
+-					     pgoff_t index)
+-{
+-	struct erofs_sb_info *sbi = EROFS_SB(sb);
+-	struct erofs_workgroup *grp;
+-
+-repeat:
+-	rcu_read_lock();
+-	grp = xa_load(&sbi->managed_pslots, index);
+-	if (grp) {
+-		if (!erofs_workgroup_get(grp)) {
+-			/* prefer to relax rcu read side */
+-			rcu_read_unlock();
+-			goto repeat;
+-		}
+-
+-		DBG_BUGON(index != grp->index);
+-	}
+-	rcu_read_unlock();
+-	return grp;
+-}
+-
+-struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
+-					       struct erofs_workgroup *grp)
+-{
+-	struct erofs_sb_info *const sbi = EROFS_SB(sb);
+-	struct erofs_workgroup *pre;
+-
+-	DBG_BUGON(grp->lockref.count < 1);
+-repeat:
+-	xa_lock(&sbi->managed_pslots);
+-	pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
+-			   NULL, grp, GFP_KERNEL);
+-	if (pre) {
+-		if (xa_is_err(pre)) {
+-			pre = ERR_PTR(xa_err(pre));
+-		} else if (!erofs_workgroup_get(pre)) {
+-			/* try to legitimize the current in-tree one */
+-			xa_unlock(&sbi->managed_pslots);
+-			cond_resched();
+-			goto repeat;
+-		}
+-		grp = pre;
+-	}
+-	xa_unlock(&sbi->managed_pslots);
+-	return grp;
+-}
+-
+-static void  __erofs_workgroup_free(struct erofs_workgroup *grp)
+-{
+-	atomic_long_dec(&erofs_global_shrink_cnt);
+-	erofs_workgroup_free_rcu(grp);
+-}
+-
+-void erofs_workgroup_put(struct erofs_workgroup *grp)
+-{
+-	if (lockref_put_or_lock(&grp->lockref))
+-		return;
+-
+-	DBG_BUGON(__lockref_is_dead(&grp->lockref));
+-	if (grp->lockref.count == 1)
+-		atomic_long_inc(&erofs_global_shrink_cnt);
+-	--grp->lockref.count;
+-	spin_unlock(&grp->lockref.lock);
+-}
+-
+-static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+-					   struct erofs_workgroup *grp)
+-{
+-	int free = false;
+-
+-	spin_lock(&grp->lockref.lock);
+-	if (grp->lockref.count)
+-		goto out;
+-
+-	/*
+-	 * Note that all cached pages should be detached before deleted from
+-	 * the XArray. Otherwise some cached pages could be still attached to
+-	 * the orphan old workgroup when the new one is available in the tree.
+-	 */
+-	if (erofs_try_to_free_all_cached_folios(sbi, grp))
+-		goto out;
+-
+-	/*
+-	 * It's impossible to fail after the workgroup is freezed,
+-	 * however in order to avoid some race conditions, add a
+-	 * DBG_BUGON to observe this in advance.
+-	 */
+-	DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
+-
+-	lockref_mark_dead(&grp->lockref);
+-	free = true;
+-out:
+-	spin_unlock(&grp->lockref.lock);
+-	if (free)
+-		__erofs_workgroup_free(grp);
+-	return free;
+-}
+-
+-static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
+-					      unsigned long nr_shrink)
+-{
+-	struct erofs_workgroup *grp;
+-	unsigned int freed = 0;
+-	unsigned long index;
+-
+-	xa_lock(&sbi->managed_pslots);
+-	xa_for_each(&sbi->managed_pslots, index, grp) {
+-		/* try to shrink each valid workgroup */
+-		if (!erofs_try_to_release_workgroup(sbi, grp))
+-			continue;
+-		xa_unlock(&sbi->managed_pslots);
+-
+-		++freed;
+-		if (!--nr_shrink)
+-			return freed;
+-		xa_lock(&sbi->managed_pslots);
+-	}
+-	xa_unlock(&sbi->managed_pslots);
+-	return freed;
+-}
+-
+ void erofs_shrinker_register(struct super_block *sb)
+ {
+ 	struct erofs_sb_info *sbi = EROFS_SB(sb);
+@@ -369,8 +230,8 @@ void erofs_shrinker_unregister(struct super_block *sb)
+ 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ 
+ 	mutex_lock(&sbi->umount_mutex);
+-	/* clean up all remaining workgroups in memory */
+-	erofs_shrink_workstation(sbi, ~0UL);
++	/* clean up all remaining pclusters in memory */
++	z_erofs_shrink_scan(sbi, ~0UL);
+ 
+ 	spin_lock(&erofs_sb_list_lock);
+ 	list_del(&sbi->list);
+@@ -418,9 +279,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
+ 
+ 		spin_unlock(&erofs_sb_list_lock);
+ 		sbi->shrinker_run_no = run_no;
+-
+-		freed += erofs_shrink_workstation(sbi, nr - freed);
+-
++		freed += z_erofs_shrink_scan(sbi, nr - freed);
+ 		spin_lock(&erofs_sb_list_lock);
+ 		/* Get the next list element before we move this one */
+ 		p = p->next;
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 47a5c806cf1628..54dd52de7269da 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -175,7 +175,8 @@ static unsigned long dir_block_index(unsigned int level,
+ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
+ 				struct page *dentry_page,
+ 				const struct f2fs_filename *fname,
+-				int *max_slots)
++				int *max_slots,
++				bool use_hash)
+ {
+ 	struct f2fs_dentry_block *dentry_blk;
+ 	struct f2fs_dentry_ptr d;
+@@ -183,7 +184,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
+ 	dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
+ 
+ 	make_dentry_ptr_block(dir, &d, dentry_blk);
+-	return f2fs_find_target_dentry(&d, fname, max_slots);
++	return f2fs_find_target_dentry(&d, fname, max_slots, use_hash);
+ }
+ 
+ static inline int f2fs_match_name(const struct inode *dir,
+@@ -208,7 +209,8 @@ static inline int f2fs_match_name(const struct inode *dir,
+ }
+ 
+ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+-			const struct f2fs_filename *fname, int *max_slots)
++			const struct f2fs_filename *fname, int *max_slots,
++			bool use_hash)
+ {
+ 	struct f2fs_dir_entry *de;
+ 	unsigned long bit_pos = 0;
+@@ -231,7 +233,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+ 			continue;
+ 		}
+ 
+-		if (de->hash_code == fname->hash) {
++		if (!use_hash || de->hash_code == fname->hash) {
+ 			res = f2fs_match_name(d->inode, fname,
+ 					      d->filename[bit_pos],
+ 					      le16_to_cpu(de->name_len));
+@@ -258,11 +260,12 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 					unsigned int level,
+ 					const struct f2fs_filename *fname,
+-					struct page **res_page)
++					struct page **res_page,
++					bool use_hash)
+ {
+ 	int s = GET_DENTRY_SLOTS(fname->disk_name.len);
+ 	unsigned int nbucket, nblock;
+-	unsigned int bidx, end_block;
++	unsigned int bidx, end_block, bucket_no;
+ 	struct page *dentry_page;
+ 	struct f2fs_dir_entry *de = NULL;
+ 	pgoff_t next_pgofs;
+@@ -272,8 +275,11 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 	nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
+ 	nblock = bucket_blocks(level);
+ 
++	bucket_no = use_hash ? le32_to_cpu(fname->hash) % nbucket : 0;
++
++start_find_bucket:
+ 	bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
+-			       le32_to_cpu(fname->hash) % nbucket);
++			       bucket_no);
+ 	end_block = bidx + nblock;
+ 
+ 	while (bidx < end_block) {
+@@ -290,7 +296,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 			}
+ 		}
+ 
+-		de = find_in_block(dir, dentry_page, fname, &max_slots);
++		de = find_in_block(dir, dentry_page, fname, &max_slots, use_hash);
+ 		if (IS_ERR(de)) {
+ 			*res_page = ERR_CAST(de);
+ 			de = NULL;
+@@ -307,12 +313,18 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 		bidx++;
+ 	}
+ 
+-	if (!de && room && F2FS_I(dir)->chash != fname->hash) {
+-		F2FS_I(dir)->chash = fname->hash;
+-		F2FS_I(dir)->clevel = level;
+-	}
++	if (de)
++		return de;
+ 
+-	return de;
++	if (likely(use_hash)) {
++		if (room && F2FS_I(dir)->chash != fname->hash) {
++			F2FS_I(dir)->chash = fname->hash;
++			F2FS_I(dir)->clevel = level;
++		}
++	} else if (++bucket_no < nbucket) {
++		goto start_find_bucket;
++	}
++	return NULL;
+ }
+ 
+ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+@@ -323,11 +335,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ 	struct f2fs_dir_entry *de = NULL;
+ 	unsigned int max_depth;
+ 	unsigned int level;
++	bool use_hash = true;
+ 
+ 	*res_page = NULL;
+ 
++#if IS_ENABLED(CONFIG_UNICODE)
++start_find_entry:
++#endif
+ 	if (f2fs_has_inline_dentry(dir)) {
+-		de = f2fs_find_in_inline_dir(dir, fname, res_page);
++		de = f2fs_find_in_inline_dir(dir, fname, res_page, use_hash);
+ 		goto out;
+ 	}
+ 
+@@ -343,11 +359,18 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ 	}
+ 
+ 	for (level = 0; level < max_depth; level++) {
+-		de = find_in_level(dir, level, fname, res_page);
++		de = find_in_level(dir, level, fname, res_page, use_hash);
+ 		if (de || IS_ERR(*res_page))
+ 			break;
+ 	}
++
+ out:
++#if IS_ENABLED(CONFIG_UNICODE)
++	if (IS_CASEFOLDED(dir) && !de && use_hash) {
++		use_hash = false;
++		goto start_find_entry;
++	}
++#endif
+ 	/* This is to increase the speed of f2fs_create */
+ 	if (!de)
+ 		F2FS_I(dir)->task = current;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index cec3dd205b3df8..b52df8aa95350e 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3579,7 +3579,8 @@ int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ 			struct f2fs_filename *fname);
+ void f2fs_free_filename(struct f2fs_filename *fname);
+ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+-			const struct f2fs_filename *fname, int *max_slots);
++			const struct f2fs_filename *fname, int *max_slots,
++			bool use_hash);
+ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ 			unsigned int start_pos, struct fscrypt_str *fstr);
+ void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
+@@ -4199,7 +4200,8 @@ int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
+ int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
+ 					const struct f2fs_filename *fname,
+-					struct page **res_page);
++					struct page **res_page,
++					bool use_hash);
+ int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
+ 			struct page *ipage);
+ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 005babf1bed1e3..3b91a95d42764f 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -352,7 +352,8 @@ int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
+ 
+ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
+ 					const struct f2fs_filename *fname,
+-					struct page **res_page)
++					struct page **res_page,
++					bool use_hash)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+ 	struct f2fs_dir_entry *de;
+@@ -369,7 +370,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
+ 	inline_dentry = inline_data_addr(dir, ipage);
+ 
+ 	make_dentry_ptr_inline(dir, &d, inline_dentry);
+-	de = f2fs_find_target_dentry(&d, fname, NULL);
++	de = f2fs_find_target_dentry(&d, fname, NULL, use_hash);
+ 	unlock_page(ipage);
+ 	if (IS_ERR(de)) {
+ 		*res_page = ERR_CAST(de);
+diff --git a/fs/file_table.c b/fs/file_table.c
+index eed5ffad9997c2..18735dc8269a10 100644
+--- a/fs/file_table.c
++++ b/fs/file_table.c
+@@ -125,7 +125,7 @@ static struct ctl_table fs_stat_sysctls[] = {
+ 		.data		= &sysctl_nr_open,
+ 		.maxlen		= sizeof(unsigned int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec_minmax,
++		.proc_handler	= proc_douintvec_minmax,
+ 		.extra1		= &sysctl_nr_open_min,
+ 		.extra2		= &sysctl_nr_open_max,
+ 	},
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 084f6ed2dd7a69..94f3cc42c74035 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -94,32 +94,17 @@ __uml_setup("hostfs=", hostfs_args,
+ static char *__dentry_name(struct dentry *dentry, char *name)
+ {
+ 	char *p = dentry_path_raw(dentry, name, PATH_MAX);
+-	char *root;
+-	size_t len;
+-	struct hostfs_fs_info *fsi;
+-
+-	fsi = dentry->d_sb->s_fs_info;
+-	root = fsi->host_root_path;
+-	len = strlen(root);
+-	if (IS_ERR(p)) {
+-		__putname(name);
+-		return NULL;
+-	}
+-
+-	/*
+-	 * This function relies on the fact that dentry_path_raw() will place
+-	 * the path name at the end of the provided buffer.
+-	 */
+-	BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
++	struct hostfs_fs_info *fsi = dentry->d_sb->s_fs_info;
++	char *root = fsi->host_root_path;
++	size_t len = strlen(root);
+ 
+-	strscpy(name, root, PATH_MAX);
+-	if (len > p - name) {
++	if (IS_ERR(p) || len > p - name) {
+ 		__putname(name);
+ 		return NULL;
+ 	}
+ 
+-	if (p > name + len)
+-		strcpy(name + len, p);
++	memcpy(name, root, len);
++	memmove(name + len, p, name + PATH_MAX - p);
+ 
+ 	return name;
+ }
+diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
+index 637528e6368ef7..21b2b38fae9f3a 100644
+--- a/fs/nfs/localio.c
++++ b/fs/nfs/localio.c
+@@ -331,7 +331,7 @@ nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
+ 		hdr->res.op_status = NFS4_OK;
+ 		hdr->task.tk_status = 0;
+ 	} else {
+-		hdr->res.op_status = nfs4_stat_to_errno(status);
++		hdr->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
+ 		hdr->task.tk_status = status;
+ 	}
+ }
+@@ -669,7 +669,7 @@ nfs_local_commit_done(struct nfs_commit_data *data, int status)
+ 		data->task.tk_status = 0;
+ 	} else {
+ 		nfs_reset_boot_verifier(data->inode);
+-		data->res.op_status = nfs4_stat_to_errno(status);
++		data->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
+ 		data->task.tk_status = status;
+ 	}
+ }
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 531c9c20ef1d1b..9f0d69e6526443 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -552,7 +552,7 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
+ 		.rpc_message = &msg,
+ 		.callback_ops = &nfs42_offload_cancel_ops,
+ 		.workqueue = nfsiod_workqueue,
+-		.flags = RPC_TASK_ASYNC,
++		.flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
+ 	};
+ 	int status;
+ 
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index 9e3ae53e220583..becc3149aa9e5c 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -144,9 +144,11 @@
+ 					 decode_putfh_maxsz + \
+ 					 decode_offload_cancel_maxsz)
+ #define NFS4_enc_copy_notify_sz		(compound_encode_hdr_maxsz + \
++					 encode_sequence_maxsz + \
+ 					 encode_putfh_maxsz + \
+ 					 encode_copy_notify_maxsz)
+ #define NFS4_dec_copy_notify_sz		(compound_decode_hdr_maxsz + \
++					 decode_sequence_maxsz + \
+ 					 decode_putfh_maxsz + \
+ 					 decode_copy_notify_maxsz)
+ #define NFS4_enc_deallocate_sz		(compound_encode_hdr_maxsz + \
+diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
+index 34a115176f97eb..af09aed09fd279 100644
+--- a/fs/nfs_common/common.c
++++ b/fs/nfs_common/common.c
+@@ -15,7 +15,7 @@ static const struct {
+ 	{ NFS_OK,		0		},
+ 	{ NFSERR_PERM,		-EPERM		},
+ 	{ NFSERR_NOENT,		-ENOENT		},
+-	{ NFSERR_IO,		-errno_NFSERR_IO},
++	{ NFSERR_IO,		-EIO		},
+ 	{ NFSERR_NXIO,		-ENXIO		},
+ /*	{ NFSERR_EAGAIN,	-EAGAIN		}, */
+ 	{ NFSERR_ACCES,		-EACCES		},
+@@ -45,7 +45,6 @@ static const struct {
+ 	{ NFSERR_SERVERFAULT,	-EREMOTEIO	},
+ 	{ NFSERR_BADTYPE,	-EBADTYPE	},
+ 	{ NFSERR_JUKEBOX,	-EJUKEBOX	},
+-	{ -1,			-EIO		}
+ };
+ 
+ /**
+@@ -59,26 +58,29 @@ int nfs_stat_to_errno(enum nfs_stat status)
+ {
+ 	int i;
+ 
+-	for (i = 0; nfs_errtbl[i].stat != -1; i++) {
++	for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
+ 		if (nfs_errtbl[i].stat == (int)status)
+ 			return nfs_errtbl[i].errno;
+ 	}
+-	return nfs_errtbl[i].errno;
++	return -EIO;
+ }
+ EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
+ 
+ /*
+  * We need to translate between nfs v4 status return values and
+  * the local errno values which may not be the same.
++ *
++ * nfs4_errtbl_common[] is used before more specialized mappings
++ * available in nfs4_errtbl[] or nfs4_errtbl_localio[].
+  */
+ static const struct {
+ 	int stat;
+ 	int errno;
+-} nfs4_errtbl[] = {
++} nfs4_errtbl_common[] = {
+ 	{ NFS4_OK,		0		},
+ 	{ NFS4ERR_PERM,		-EPERM		},
+ 	{ NFS4ERR_NOENT,	-ENOENT		},
+-	{ NFS4ERR_IO,		-errno_NFSERR_IO},
++	{ NFS4ERR_IO,		-EIO		},
+ 	{ NFS4ERR_NXIO,		-ENXIO		},
+ 	{ NFS4ERR_ACCESS,	-EACCES		},
+ 	{ NFS4ERR_EXIST,	-EEXIST		},
+@@ -98,15 +100,20 @@ static const struct {
+ 	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
+ 	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
+ 	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
+-	{ NFS4ERR_SERVERFAULT,	-EREMOTEIO	},
+ 	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
+-	{ NFS4ERR_LOCKED,	-EAGAIN		},
+ 	{ NFS4ERR_SYMLINK,	-ELOOP		},
+-	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
+ 	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
++};
++
++static const struct {
++	int stat;
++	int errno;
++} nfs4_errtbl[] = {
++	{ NFS4ERR_SERVERFAULT,	-EREMOTEIO	},
++	{ NFS4ERR_LOCKED,	-EAGAIN		},
++	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
+ 	{ NFS4ERR_NOXATTR,	-ENODATA	},
+ 	{ NFS4ERR_XATTR2BIG,	-E2BIG		},
+-	{ -1,			-EIO		}
+ };
+ 
+ /*
+@@ -116,7 +123,14 @@ static const struct {
+ int nfs4_stat_to_errno(int stat)
+ {
+ 	int i;
+-	for (i = 0; nfs4_errtbl[i].stat != -1; i++) {
++
++	/* First check nfs4_errtbl_common */
++	for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
++		if (nfs4_errtbl_common[i].stat == stat)
++			return nfs4_errtbl_common[i].errno;
++	}
++	/* Then check nfs4_errtbl */
++	for (i = 0; i < ARRAY_SIZE(nfs4_errtbl); i++) {
+ 		if (nfs4_errtbl[i].stat == stat)
+ 			return nfs4_errtbl[i].errno;
+ 	}
+@@ -132,3 +146,56 @@ int nfs4_stat_to_errno(int stat)
+ 	return -stat;
+ }
+ EXPORT_SYMBOL_GPL(nfs4_stat_to_errno);
++
++/*
++ * This table is useful for conversion from local errno to NFS error.
++ * It provides more logically correct mappings for use with LOCALIO
++ * (which is focused on converting from errno to NFS status).
++ */
++static const struct {
++	int stat;
++	int errno;
++} nfs4_errtbl_localio[] = {
++	/* Map errors differently than nfs4_errtbl */
++	{ NFS4ERR_IO,		-EREMOTEIO	},
++	{ NFS4ERR_DELAY,	-EAGAIN		},
++	{ NFS4ERR_FBIG,		-E2BIG		},
++	/* Map errors not handled by nfs4_errtbl */
++	{ NFS4ERR_STALE,	-EBADF		},
++	{ NFS4ERR_STALE,	-EOPENSTALE	},
++	{ NFS4ERR_DELAY,	-ETIMEDOUT	},
++	{ NFS4ERR_DELAY,	-ERESTARTSYS	},
++	{ NFS4ERR_DELAY,	-ENOMEM		},
++	{ NFS4ERR_IO,		-ETXTBSY	},
++	{ NFS4ERR_IO,		-EBUSY		},
++	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
++	{ NFS4ERR_SERVERFAULT,	-ENFILE		},
++	{ NFS4ERR_IO,		-EUCLEAN	},
++	{ NFS4ERR_PERM,		-ENOKEY		},
++};
++
++/*
++ * Convert an errno to an NFS error code for LOCALIO.
++ */
++__u32 nfs_localio_errno_to_nfs4_stat(int errno)
++{
++	int i;
++
++	/* First check nfs4_errtbl_common */
++	for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
++		if (nfs4_errtbl_common[i].errno == errno)
++			return nfs4_errtbl_common[i].stat;
++	}
++	/* Then check nfs4_errtbl_localio */
++	for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_localio); i++) {
++		if (nfs4_errtbl_localio[i].errno == errno)
++			return nfs4_errtbl_localio[i].stat;
++	}
++	/* If we cannot translate the error, the recovery routines should
++	 * handle it.
++	 * Note: remaining NFSv4 error codes have values > 10000, so should
++	 * not conflict with native Linux error codes.
++	 */
++	return NFS4ERR_SERVERFAULT;
++}
++EXPORT_SYMBOL_GPL(nfs_localio_errno_to_nfs4_stat);
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index f61c58fbf117d3..0cc32e9c71cbf0 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -400,7 +400,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino)
+ 	return 0;
+ }
+ 
+-void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ 		    struct folio *folio, struct inode *inode)
+ {
+ 	size_t from = offset_in_folio(folio, de);
+@@ -410,11 +410,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ 
+ 	folio_lock(folio);
+ 	err = nilfs_prepare_chunk(folio, from, to);
+-	BUG_ON(err);
++	if (unlikely(err)) {
++		folio_unlock(folio);
++		return err;
++	}
+ 	de->inode = cpu_to_le64(inode->i_ino);
+ 	de->file_type = fs_umode_to_ftype(inode->i_mode);
+ 	nilfs_commit_chunk(folio, mapping, from, to);
+ 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
++	return 0;
+ }
+ 
+ /*
+@@ -543,7 +547,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio)
+ 		from = (char *)pde - kaddr;
+ 	folio_lock(folio);
+ 	err = nilfs_prepare_chunk(folio, from, to);
+-	BUG_ON(err);
++	if (unlikely(err)) {
++		folio_unlock(folio);
++		goto out;
++	}
+ 	if (pde)
+ 		pde->rec_len = nilfs_rec_len_to_disk(to - from);
+ 	dir->inode = 0;
+diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
+index 1d836a5540f3b1..e02fae6757f126 100644
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -406,8 +406,10 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+ 			err = PTR_ERR(new_de);
+ 			goto out_dir;
+ 		}
+-		nilfs_set_link(new_dir, new_de, new_folio, old_inode);
++		err = nilfs_set_link(new_dir, new_de, new_folio, old_inode);
+ 		folio_release_kmap(new_folio, new_de);
++		if (unlikely(err))
++			goto out_dir;
+ 		nilfs_mark_inode_dirty(new_dir);
+ 		inode_set_ctime_current(new_inode);
+ 		if (dir_de)
+@@ -430,28 +432,27 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+ 	 */
+ 	inode_set_ctime_current(old_inode);
+ 
+-	nilfs_delete_entry(old_de, old_folio);
+-
+-	if (dir_de) {
+-		nilfs_set_link(old_inode, dir_de, dir_folio, new_dir);
+-		folio_release_kmap(dir_folio, dir_de);
+-		drop_nlink(old_dir);
++	err = nilfs_delete_entry(old_de, old_folio);
++	if (likely(!err)) {
++		if (dir_de) {
++			err = nilfs_set_link(old_inode, dir_de, dir_folio,
++					     new_dir);
++			drop_nlink(old_dir);
++		}
++		nilfs_mark_inode_dirty(old_dir);
+ 	}
+-	folio_release_kmap(old_folio, old_de);
+-
+-	nilfs_mark_inode_dirty(old_dir);
+ 	nilfs_mark_inode_dirty(old_inode);
+ 
+-	err = nilfs_transaction_commit(old_dir->i_sb);
+-	return err;
+-
+ out_dir:
+ 	if (dir_de)
+ 		folio_release_kmap(dir_folio, dir_de);
+ out_old:
+ 	folio_release_kmap(old_folio, old_de);
+ out:
+-	nilfs_transaction_abort(old_dir->i_sb);
++	if (likely(!err))
++		err = nilfs_transaction_commit(old_dir->i_sb);
++	else
++		nilfs_transaction_abort(old_dir->i_sb);
+ 	return err;
+ }
+ 
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index dff241c53fc583..cb6ed54accd7ba 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -261,8 +261,8 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *,
+ int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *);
+ int nilfs_empty_dir(struct inode *);
+ struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **);
+-void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+-			   struct folio *, struct inode *);
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++		   struct folio *folio, struct inode *inode);
+ 
+ /* file.c */
+ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
+diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
+index 6dd8b854cd1f38..06f18fe86407e4 100644
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -392,6 +392,11 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
+ /**
+  * nilfs_clear_folio_dirty - discard dirty folio
+  * @folio: dirty folio that will be discarded
++ *
++ * nilfs_clear_folio_dirty() clears working states including dirty state for
++ * the folio and its buffers.  If the folio has buffers, clear only if it is
++ * confirmed that none of the buffer heads are busy (none have valid
++ * references and none are locked).
+  */
+ void nilfs_clear_folio_dirty(struct folio *folio)
+ {
+@@ -399,10 +404,6 @@ void nilfs_clear_folio_dirty(struct folio *folio)
+ 
+ 	BUG_ON(!folio_test_locked(folio));
+ 
+-	folio_clear_uptodate(folio);
+-	folio_clear_mappedtodisk(folio);
+-	folio_clear_checked(folio);
+-
+ 	head = folio_buffers(folio);
+ 	if (head) {
+ 		const unsigned long clear_bits =
+@@ -410,6 +411,25 @@ void nilfs_clear_folio_dirty(struct folio *folio)
+ 			 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+ 			 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
+ 			 BIT(BH_Delay));
++		bool busy, invalidated = false;
++
++recheck_buffers:
++		busy = false;
++		bh = head;
++		do {
++			if (atomic_read(&bh->b_count) | buffer_locked(bh)) {
++				busy = true;
++				break;
++			}
++		} while (bh = bh->b_this_page, bh != head);
++
++		if (busy) {
++			if (invalidated)
++				return;
++			invalidate_bh_lrus();
++			invalidated = true;
++			goto recheck_buffers;
++		}
+ 
+ 		bh = head;
+ 		do {
+@@ -419,6 +439,9 @@ void nilfs_clear_folio_dirty(struct folio *folio)
+ 		} while (bh = bh->b_this_page, bh != head);
+ 	}
+ 
++	folio_clear_uptodate(folio);
++	folio_clear_mappedtodisk(folio);
++	folio_clear_checked(folio);
+ 	__nilfs_clear_folio_dirty(folio);
+ }
+ 
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 58725183089733..58a598b548fa28 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -734,7 +734,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
+ 		if (!head)
+ 			head = create_empty_buffers(folio,
+ 					i_blocksize(inode), 0);
+-		folio_unlock(folio);
+ 
+ 		bh = head;
+ 		do {
+@@ -744,11 +743,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
+ 			list_add_tail(&bh->b_assoc_buffers, listp);
+ 			ndirties++;
+ 			if (unlikely(ndirties >= nlimit)) {
++				folio_unlock(folio);
+ 				folio_batch_release(&fbatch);
+ 				cond_resched();
+ 				return ndirties;
+ 			}
+ 		} while (bh = bh->b_this_page, bh != head);
++
++		folio_unlock(folio);
+ 	}
+ 	folio_batch_release(&fbatch);
+ 	cond_resched();
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index 3404e7a30c330c..15d9acd456ecce 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -761,6 +761,11 @@ static int ocfs2_release_dquot(struct dquot *dquot)
+ 	handle = ocfs2_start_trans(osb,
+ 		ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
+ 	if (IS_ERR(handle)) {
++		/*
++		 * Mark dquot as inactive to avoid endless cycle in
++		 * quota_release_workfn().
++		 */
++		clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+ 		status = PTR_ERR(handle);
+ 		mlog_errno(status);
+ 		goto out_ilock;
+diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c
+index 65b2473e22ff9c..fa6b8cb788a1f2 100644
+--- a/fs/pstore/blk.c
++++ b/fs/pstore/blk.c
+@@ -89,7 +89,7 @@ static struct pstore_device_info *pstore_device_info;
+ 		_##name_ = check_size(name, alignsize);		\
+ 	else							\
+ 		_##name_ = 0;					\
+-	/* Synchronize module parameters with resuls. */	\
++	/* Synchronize module parameters with results. */	\
+ 	name = _##name_ / 1024;					\
+ 	dev->zone.name = _##name_;				\
+ }
+@@ -121,7 +121,7 @@ static int __register_pstore_device(struct pstore_device_info *dev)
+ 	if (pstore_device_info)
+ 		return -EBUSY;
+ 
+-	/* zero means not limit on which backends to attempt to store. */
++	/* zero means no limit on which backends attempt to store. */
+ 	if (!dev->flags)
+ 		dev->flags = UINT_MAX;
+ 
+diff --git a/fs/select.c b/fs/select.c
+index a77907faf2b459..834f438296e2ba 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -787,7 +787,7 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
+ 	}
+ 	return 0;
+ Efault:
+-	user_access_end();
++	user_read_access_end();
+ 	return -EFAULT;
+ }
+ 
+@@ -1361,7 +1361,7 @@ static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
+ 	}
+ 	return 0;
+ Efault:
+-	user_access_end();
++	user_read_access_end();
+ 	return -EFAULT;
+ }
+ 
+diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
+index c68ad526a4de1b..ebe9a7d7c70e86 100644
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -1395,7 +1395,7 @@ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ 				      const struct cifs_fid *cifsfid, u32 *pacllen,
+-				      u32 __maybe_unused unused)
++				      u32 info)
+ {
+ 	struct smb_ntsd *pntsd = NULL;
+ 	unsigned int xid;
+@@ -1407,7 +1407,7 @@ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ 
+ 	xid = get_xid();
+ 	rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
+-				pacllen);
++				pacllen, info);
+ 	free_xid(xid);
+ 
+ 	cifs_put_tlink(tlink);
+@@ -1419,7 +1419,7 @@ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ }
+ 
+ static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+-		const char *path, u32 *pacllen)
++		const char *path, u32 *pacllen, u32 info)
+ {
+ 	struct smb_ntsd *pntsd = NULL;
+ 	int oplock = 0;
+@@ -1446,9 +1446,12 @@ static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 		.fid = &fid,
+ 	};
+ 
++	if (info & SACL_SECINFO)
++		oparms.desired_access |= SYSTEM_SECURITY;
++
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (!rc) {
+-		rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen);
++		rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen, info);
+ 		CIFSSMBClose(xid, tcon, fid.netfid);
+ 	}
+ 
+@@ -1472,7 +1475,7 @@ struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
+ 	if (inode)
+ 		open_file = find_readable_file(CIFS_I(inode), true);
+ 	if (!open_file)
+-		return get_cifs_acl_by_path(cifs_sb, path, pacllen);
++		return get_cifs_acl_by_path(cifs_sb, path, pacllen, info);
+ 
+ 	pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
+ 	cifsFileInfo_put(open_file);
+@@ -1485,7 +1488,7 @@ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen,
+ {
+ 	int oplock = 0;
+ 	unsigned int xid;
+-	int rc, access_flags;
++	int rc, access_flags = 0;
+ 	struct cifs_tcon *tcon;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ 	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+@@ -1498,10 +1501,12 @@ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen,
+ 	tcon = tlink_tcon(tlink);
+ 	xid = get_xid();
+ 
+-	if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
+-		access_flags = WRITE_OWNER;
+-	else
+-		access_flags = WRITE_DAC;
++	if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
++		access_flags |= WRITE_OWNER;
++	if (aclflag & CIFS_ACL_SACL)
++		access_flags |= SYSTEM_SECURITY;
++	if (aclflag & CIFS_ACL_DACL)
++		access_flags |= WRITE_DAC;
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index a697e53ccee2be..907af3cbffd1bc 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -568,7 +568,7 @@ extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
+ 		const struct nls_table *nls_codepage,
+ 		struct cifs_sb_info *cifs_sb);
+ extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon,
+-			__u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen);
++			__u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen, __u32 info);
+ extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16,
+ 			struct smb_ntsd *pntsd, __u32 len, int aclflag);
+ extern int cifs_do_get_acl(const unsigned int xid, struct cifs_tcon *tcon,
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index 0eae60731c20c0..e2a14e25da87ce 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -3427,7 +3427,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
+ /* Get Security Descriptor (by handle) from remote server for a file or dir */
+ int
+ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+-		  struct smb_ntsd **acl_inf, __u32 *pbuflen)
++		  struct smb_ntsd **acl_inf, __u32 *pbuflen, __u32 info)
+ {
+ 	int rc = 0;
+ 	int buf_type = 0;
+@@ -3450,7 +3450,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+ 	pSMB->MaxSetupCount = 0;
+ 	pSMB->Fid = fid; /* file handle always le */
+ 	pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
+-				     CIFS_ACL_DACL);
++				     CIFS_ACL_DACL | info);
+ 	pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
+ 	inc_rfc1001_len(pSMB, 11);
+ 	iov[0].iov_base = (char *)pSMB;
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index 273358d20a46c9..50f96259d9adc2 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -413,7 +413,7 @@ _initiate_cifs_search(const unsigned int xid, struct file *file,
+ 		cifsFile->invalidHandle = false;
+ 	} else if ((rc == -EOPNOTSUPP) &&
+ 		   (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
++		cifs_autodisable_serverino(cifs_sb);
+ 		goto ffirst_retry;
+ 	}
+ error_exit:
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+index d3abb99cc99094..e56a8df23fec9a 100644
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -674,11 +674,12 @@ int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
+ 	return parse_reparse_point(buf, plen, cifs_sb, full_path, true, data);
+ }
+ 
+-static void wsl_to_fattr(struct cifs_open_info_data *data,
++static bool wsl_to_fattr(struct cifs_open_info_data *data,
+ 			 struct cifs_sb_info *cifs_sb,
+ 			 u32 tag, struct cifs_fattr *fattr)
+ {
+ 	struct smb2_file_full_ea_info *ea;
++	bool have_xattr_dev = false;
+ 	u32 next = 0;
+ 
+ 	switch (tag) {
+@@ -721,13 +722,24 @@ static void wsl_to_fattr(struct cifs_open_info_data *data,
+ 			fattr->cf_uid = wsl_make_kuid(cifs_sb, v);
+ 		else if (!strncmp(name, SMB2_WSL_XATTR_GID, nlen))
+ 			fattr->cf_gid = wsl_make_kgid(cifs_sb, v);
+-		else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen))
++		else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen)) {
++			/* File type in reparse point tag and in xattr mode must match. */
++			if (S_DT(fattr->cf_mode) != S_DT(le32_to_cpu(*(__le32 *)v)))
++				return false;
+ 			fattr->cf_mode = (umode_t)le32_to_cpu(*(__le32 *)v);
+-		else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen))
++		} else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen)) {
+ 			fattr->cf_rdev = reparse_mkdev(v);
++			have_xattr_dev = true;
++		}
+ 	} while (next);
+ out:
++
++	/* Major and minor numbers for char and block devices are mandatory. */
++	if (!have_xattr_dev && (tag == IO_REPARSE_TAG_LX_CHR || tag == IO_REPARSE_TAG_LX_BLK))
++		return false;
++
+ 	fattr->cf_dtype = S_DT(fattr->cf_mode);
++	return true;
+ }
+ 
+ static bool posix_reparse_to_fattr(struct cifs_sb_info *cifs_sb,
+@@ -801,7 +813,9 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
+ 	case IO_REPARSE_TAG_AF_UNIX:
+ 	case IO_REPARSE_TAG_LX_CHR:
+ 	case IO_REPARSE_TAG_LX_BLK:
+-		wsl_to_fattr(data, cifs_sb, tag, fattr);
++		ok = wsl_to_fattr(data, cifs_sb, tag, fattr);
++		if (!ok)
++			return false;
+ 		break;
+ 	case IO_REPARSE_TAG_NFS:
+ 		ok = posix_reparse_to_fattr(cifs_sb, fattr, data);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 7571fefeb83aa1..6bacf754b57efd 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -658,7 +658,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 
+ 	while (bytes_left >= (ssize_t)sizeof(*p)) {
+ 		memset(&tmp_iface, 0, sizeof(tmp_iface));
+-		tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
++		/* default to 1Gbps when link speed is unset */
++		tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000;
+ 		tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+ 		tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
+ 
+diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
+index 5cc69beaa62ecf..10a86c02a8b328 100644
+--- a/fs/ubifs/debug.c
++++ b/fs/ubifs/debug.c
+@@ -946,16 +946,20 @@ void ubifs_dump_tnc(struct ubifs_info *c)
+ 
+ 	pr_err("\n");
+ 	pr_err("(pid %d) start dumping TNC tree\n", current->pid);
+-	znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
+-	level = znode->level;
+-	pr_err("== Level %d ==\n", level);
+-	while (znode) {
+-		if (level != znode->level) {
+-			level = znode->level;
+-			pr_err("== Level %d ==\n", level);
++	if (c->zroot.znode) {
++		znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
++		level = znode->level;
++		pr_err("== Level %d ==\n", level);
++		while (znode) {
++			if (level != znode->level) {
++				level = znode->level;
++				pr_err("== Level %d ==\n", level);
++			}
++			ubifs_dump_znode(c, znode);
++			znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
+ 		}
+-		ubifs_dump_znode(c, znode);
+-		znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
++	} else {
++		pr_err("empty TNC tree in memory\n");
+ 	}
+ 	pr_err("(pid %d) finish dumping TNC tree\n", current->pid);
+ }
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index aa4dbda7b5365e..6bcbdc8bf186da 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -663,9 +663,8 @@ xfs_buf_find_insert(
+ 		spin_unlock(&bch->bc_lock);
+ 		goto out_free_buf;
+ 	}
+-	if (bp) {
++	if (bp && atomic_inc_not_zero(&bp->b_hold)) {
+ 		/* found an existing buffer */
+-		atomic_inc(&bp->b_hold);
+ 		spin_unlock(&bch->bc_lock);
+ 		error = xfs_buf_find_lock(bp, flags);
+ 		if (error)
+diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
+index fa50e5308292d3..0b0b0f31aca274 100644
+--- a/fs/xfs/xfs_notify_failure.c
++++ b/fs/xfs/xfs_notify_failure.c
+@@ -153,6 +153,79 @@ xfs_dax_notify_failure_thaw(
+ 	thaw_super(sb, FREEZE_HOLDER_USERSPACE);
+ }
+ 
++static int
++xfs_dax_translate_range(
++	struct xfs_buftarg	*btp,
++	u64			offset,
++	u64			len,
++	xfs_daddr_t		*daddr,
++	uint64_t		*bblen)
++{
++	u64			dev_start = btp->bt_dax_part_off;
++	u64			dev_len = bdev_nr_bytes(btp->bt_bdev);
++	u64			dev_end = dev_start + dev_len - 1;
++
++	/* Notify failure on the whole device. */
++	if (offset == 0 && len == U64_MAX) {
++		offset = dev_start;
++		len = dev_len;
++	}
++
++	/* Ignore the range out of filesystem area */
++	if (offset + len - 1 < dev_start)
++		return -ENXIO;
++	if (offset > dev_end)
++		return -ENXIO;
++
++	/* Calculate the real range when it touches the boundary */
++	if (offset > dev_start)
++		offset -= dev_start;
++	else {
++		len -= dev_start - offset;
++		offset = 0;
++	}
++	if (offset + len - 1 > dev_end)
++		len = dev_end - offset + 1;
++
++	*daddr = BTOBB(offset);
++	*bblen = BTOBB(len);
++	return 0;
++}
++
++static int
++xfs_dax_notify_logdev_failure(
++	struct xfs_mount	*mp,
++	u64			offset,
++	u64			len,
++	int			mf_flags)
++{
++	xfs_daddr_t		daddr;
++	uint64_t		bblen;
++	int			error;
++
++	/*
++	 * Return ENXIO instead of shutting down the filesystem if the failed
++	 * region is beyond the end of the log.
++	 */
++	error = xfs_dax_translate_range(mp->m_logdev_targp,
++			offset, len, &daddr, &bblen);
++	if (error)
++		return error;
++
++	/*
++	 * In the pre-remove case the failure notification is attempting to
++	 * trigger a force unmount.  The expectation is that the device is
++	 * still present, but its removal is in progress and can not be
++	 * cancelled, proceed with accessing the log device.
++	 */
++	if (mf_flags & MF_MEM_PRE_REMOVE)
++		return 0;
++
++	xfs_err(mp, "ondisk log corrupt, shutting down fs!");
++	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
++	return -EFSCORRUPTED;
++}
++
+ static int
+ xfs_dax_notify_ddev_failure(
+ 	struct xfs_mount	*mp,
+@@ -263,8 +336,9 @@ xfs_dax_notify_failure(
+ 	int			mf_flags)
+ {
+ 	struct xfs_mount	*mp = dax_holder(dax_dev);
+-	u64			ddev_start;
+-	u64			ddev_end;
++	xfs_daddr_t		daddr;
++	uint64_t		bblen;
++	int			error;
+ 
+ 	if (!(mp->m_super->s_flags & SB_BORN)) {
+ 		xfs_warn(mp, "filesystem is not ready for notify_failure()!");
+@@ -279,17 +353,7 @@ xfs_dax_notify_failure(
+ 
+ 	if (mp->m_logdev_targp && mp->m_logdev_targp->bt_daxdev == dax_dev &&
+ 	    mp->m_logdev_targp != mp->m_ddev_targp) {
+-		/*
+-		 * In the pre-remove case the failure notification is attempting
+-		 * to trigger a force unmount.  The expectation is that the
+-		 * device is still present, but its removal is in progress and
+-		 * can not be cancelled, proceed with accessing the log device.
+-		 */
+-		if (mf_flags & MF_MEM_PRE_REMOVE)
+-			return 0;
+-		xfs_err(mp, "ondisk log corrupt, shutting down fs!");
+-		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
+-		return -EFSCORRUPTED;
++		return xfs_dax_notify_logdev_failure(mp, offset, len, mf_flags);
+ 	}
+ 
+ 	if (!xfs_has_rmapbt(mp)) {
+@@ -297,33 +361,12 @@ xfs_dax_notify_failure(
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	ddev_start = mp->m_ddev_targp->bt_dax_part_off;
+-	ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1;
+-
+-	/* Notify failure on the whole device. */
+-	if (offset == 0 && len == U64_MAX) {
+-		offset = ddev_start;
+-		len = bdev_nr_bytes(mp->m_ddev_targp->bt_bdev);
+-	}
+-
+-	/* Ignore the range out of filesystem area */
+-	if (offset + len - 1 < ddev_start)
+-		return -ENXIO;
+-	if (offset > ddev_end)
+-		return -ENXIO;
+-
+-	/* Calculate the real range when it touches the boundary */
+-	if (offset > ddev_start)
+-		offset -= ddev_start;
+-	else {
+-		len -= ddev_start - offset;
+-		offset = 0;
+-	}
+-	if (offset + len - 1 > ddev_end)
+-		len = ddev_end - offset + 1;
++	error = xfs_dax_translate_range(mp->m_ddev_targp, offset, len, &daddr,
++			&bblen);
++	if (error)
++		return error;
+ 
+-	return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len),
+-			mf_flags);
++	return xfs_dax_notify_ddev_failure(mp, daddr, bblen, mf_flags);
+ }
+ 
+ const struct dax_holder_operations xfs_dax_holder_operations = {
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index d076ebd19a61e8..78b24b09048885 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -763,6 +763,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ 						     *event_status))
+ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_disable_all_gpes(void))
++ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_enable_all_wakeup_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
+diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
+index 787c9e74dc96db..c393fad3a3469c 100644
+--- a/include/dt-bindings/clock/imx93-clock.h
++++ b/include/dt-bindings/clock/imx93-clock.h
+@@ -204,6 +204,11 @@
+ #define IMX93_CLK_A55_SEL		199
+ #define IMX93_CLK_A55_CORE		200
+ #define IMX93_CLK_PDM_IPG		201
+-#define IMX93_CLK_END			202
++#define IMX91_CLK_ENET1_QOS_TSN     202
++#define IMX91_CLK_ENET_TIMER        203
++#define IMX91_CLK_ENET2_REGULAR     204
++#define IMX91_CLK_ENET2_REGULAR_GATE		205
++#define IMX91_CLK_ENET1_QOS_TSN_GATE		206
++#define IMX93_CLK_SPDIF_IPG		207
+ 
+ #endif
+diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
+index 175892189e9dcb..4f220ea7a23cc5 100644
+--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
++++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
+@@ -44,7 +44,9 @@
+ #define _DT_BINDINGS_CLK_SUN50I_A64_H_
+ 
+ #define CLK_PLL_VIDEO0		7
++#define CLK_PLL_VIDEO0_2X	8
+ #define CLK_PLL_PERIPH0		11
++#define CLK_PLL_MIPI		17
+ 
+ #define CLK_CPUX		21
+ #define CLK_BUS_MIPI_DSI	28
+diff --git a/include/linux/btf.h b/include/linux/btf.h
+index b8a583194c4a97..d99178ce01d21d 100644
+--- a/include/linux/btf.h
++++ b/include/linux/btf.h
+@@ -352,6 +352,11 @@ static inline bool btf_type_is_scalar(const struct btf_type *t)
+ 	return btf_type_is_int(t) || btf_type_is_enum(t);
+ }
+ 
++static inline bool btf_type_is_fwd(const struct btf_type *t)
++{
++	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
++}
++
+ static inline bool btf_type_is_typedef(const struct btf_type *t)
+ {
+ 	return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
+diff --git a/include/linux/coredump.h b/include/linux/coredump.h
+index 45e598fe34766f..77e6e195d1d687 100644
+--- a/include/linux/coredump.h
++++ b/include/linux/coredump.h
+@@ -52,8 +52,8 @@ extern void do_coredump(const kernel_siginfo_t *siginfo);
+ #define __COREDUMP_PRINTK(Level, Format, ...) \
+ 	do {	\
+ 		char comm[TASK_COMM_LEN];	\
+-	\
+-		get_task_comm(comm, current);	\
++		/* This will always be NUL terminated. */ \
++		memcpy(comm, current->comm, sizeof(comm)); \
+ 		printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n",	\
+ 			task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__);	\
+ 	} while (0)	\
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 12f6dc5675987b..b8b935b526033f 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -734,6 +734,9 @@ struct kernel_ethtool_ts_info {
+  * @rxfh_per_ctx_key: device supports setting different RSS key for each
+  *	additional context. Netlink API should report hfunc, key, and input_xfrm
+  *	for every context, not just context 0.
++ * @cap_rss_rxnfc_adds: device supports nonzero ring_cookie in filters with
++ *	%FLOW_RSS flag; the queue ID from the filter is added to the value from
++ *	the indirection table to determine the delivery queue.
+  * @rxfh_indir_space: max size of RSS indirection tables, if indirection table
+  *	size as returned by @get_rxfh_indir_size may change during lifetime
+  *	of the device. Leave as 0 if the table size is constant.
+@@ -956,6 +959,7 @@ struct ethtool_ops {
+ 	u32     cap_rss_ctx_supported:1;
+ 	u32	cap_rss_sym_xor_supported:1;
+ 	u32	rxfh_per_ctx_key:1;
++	u32	cap_rss_rxnfc_adds:1;
+ 	u32	rxfh_indir_space;
+ 	u16	rxfh_key_space;
+ 	u16	rxfh_priv_size;
+diff --git a/include/linux/export.h b/include/linux/export.h
+index 0bbd02fd351db9..1e04dbc675c2fa 100644
+--- a/include/linux/export.h
++++ b/include/linux/export.h
+@@ -60,7 +60,7 @@
+ #endif
+ 
+ #ifdef DEFAULT_SYMBOL_NAMESPACE
+-#define _EXPORT_SYMBOL(sym, license)	__EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
++#define _EXPORT_SYMBOL(sym, license)	__EXPORT_SYMBOL(sym, license, DEFAULT_SYMBOL_NAMESPACE)
+ #else
+ #define _EXPORT_SYMBOL(sym, license)	__EXPORT_SYMBOL(sym, license, "")
+ #endif
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index a7d60a1c72a09a..dd33423012538d 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -218,6 +218,7 @@ struct hid_item {
+ #define HID_GD_DOWN		0x00010091
+ #define HID_GD_RIGHT		0x00010092
+ #define HID_GD_LEFT		0x00010093
++#define HID_GD_DO_NOT_DISTURB	0x0001009b
+ /* Microsoft Win8 Wireless Radio Controls CA usage codes */
+ #define HID_GD_RFKILL_BTN	0x000100c6
+ #define HID_GD_RFKILL_LED	0x000100c7
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 456bca45ff0528..3750e56bfcbb36 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -5053,28 +5053,24 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
+ {
+ 	const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ 	u16 control = le16_to_cpu(mle->control);
+-	u8 common = 0;
+ 
+ 	switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ 	case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ 	case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ 	case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ 	case IEEE80211_ML_CONTROL_TYPE_RECONF:
++	case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ 		/*
+ 		 * The length is the first octet pointed by mle->variable so no
+ 		 * need to add anything
+ 		 */
+ 		break;
+-	case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+-		if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+-			common += ETH_ALEN;
+-		return common;
+ 	default:
+ 		WARN_ON(1);
+ 		return 0;
+ 	}
+ 
+-	return sizeof(*mle) + common + mle->variable[0];
++	return sizeof(*mle) + mle->variable[0];
+ }
+ 
+ /**
+@@ -5312,8 +5308,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+ 		check_common_len = true;
+ 		break;
+ 	case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+-		if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+-			common += ETH_ALEN;
++		common = ETH_ALEN + 1;
+ 		break;
+ 	default:
+ 		/* we don't know this type */
+diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
+index c3f075e8f60cb6..1c6a6c1704d8d0 100644
+--- a/include/linux/kallsyms.h
++++ b/include/linux/kallsyms.h
+@@ -57,10 +57,10 @@ static inline void *dereference_symbol_descriptor(void *ptr)
+ 
+ 	preempt_disable();
+ 	mod = __module_address((unsigned long)ptr);
+-	preempt_enable();
+ 
+ 	if (mod)
+ 		ptr = dereference_module_function_descriptor(mod, ptr);
++	preempt_enable();
+ #endif
+ 	return ptr;
+ }
+diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
+index 9dd4bf1572553f..58a2401e4b551b 100644
+--- a/include/linux/mroute_base.h
++++ b/include/linux/mroute_base.h
+@@ -146,9 +146,9 @@ struct mr_mfc {
+ 			unsigned long last_assert;
+ 			int minvif;
+ 			int maxvif;
+-			unsigned long bytes;
+-			unsigned long pkt;
+-			unsigned long wrong_if;
++			atomic_long_t bytes;
++			atomic_long_t pkt;
++			atomic_long_t wrong_if;
+ 			unsigned long lastuse;
+ 			unsigned char ttls[MAXVIFS];
+ 			refcount_t refcount;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8896705ccd638b..02d3bafebbe77c 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2222,7 +2222,7 @@ struct net_device {
+ 	void 			*atalk_ptr;
+ #endif
+ #if IS_ENABLED(CONFIG_AX25)
+-	void			*ax25_ptr;
++	struct ax25_dev	__rcu	*ax25_ptr;
+ #endif
+ #if IS_ENABLED(CONFIG_CFG80211)
+ 	struct wireless_dev	*ieee80211_ptr;
+diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
+index 5fc02df882521e..a541c3a0288750 100644
+--- a/include/linux/nfs_common.h
++++ b/include/linux/nfs_common.h
+@@ -9,9 +9,10 @@
+ #include <uapi/linux/nfs.h>
+ 
+ /* Mapping from NFS error code to "errno" error code. */
+-#define errno_NFSERR_IO EIO
+ 
+ int nfs_stat_to_errno(enum nfs_stat status);
+ int nfs4_stat_to_errno(int stat);
+ 
++__u32 nfs_localio_errno_to_nfs4_stat(int errno);
++
+ #endif /* _LINUX_NFS_COMMON_H */
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index fb908843f20928..347901525a46ae 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1266,12 +1266,18 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data,
+ }
+ 
+ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
++					     struct perf_event *event,
+ 					     struct perf_raw_record *raw)
+ {
+ 	struct perf_raw_frag *frag = &raw->frag;
+ 	u32 sum = 0;
+ 	int size;
+ 
++	if (!(event->attr.sample_type & PERF_SAMPLE_RAW))
++		return;
++	if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_RAW))
++		return;
++
+ 	do {
+ 		sum += frag->size;
+ 		if (perf_raw_frag_last(frag))
+diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
+index 78c8ac4951b581..c7abce28ed2995 100644
+--- a/include/linux/pps_kernel.h
++++ b/include/linux/pps_kernel.h
+@@ -56,8 +56,7 @@ struct pps_device {
+ 
+ 	unsigned int id;			/* PPS source unique ID */
+ 	void const *lookup_cookie;		/* For pps_lookup_dev() only */
+-	struct cdev cdev;
+-	struct device *dev;
++	struct device dev;
+ 	struct fasync_struct *async_queue;	/* fasync method */
+ 	spinlock_t lock;
+ };
+diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
+index fd037c127bb071..551329220e4f34 100644
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -615,15 +615,14 @@ static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp
+ /*
+  * Note: producer lock is nested within consumer lock, so if you
+  * resize you must make sure all uses nest correctly.
+- * In particular if you consume ring in interrupt or BH context, you must
+- * disable interrupts/BH when doing so.
++ * In particular if you consume ring in BH context, you must
++ * disable BH when doing so.
+  */
+-static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
+-						  unsigned int nrings,
+-						  int size,
+-						  gfp_t gfp, void (*destroy)(void *))
++static inline int ptr_ring_resize_multiple_bh_noprof(struct ptr_ring **rings,
++						     unsigned int nrings,
++						     int size, gfp_t gfp,
++						     void (*destroy)(void *))
+ {
+-	unsigned long flags;
+ 	void ***queues;
+ 	int i;
+ 
+@@ -638,12 +637,12 @@ static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
+ 	}
+ 
+ 	for (i = 0; i < nrings; ++i) {
+-		spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
++		spin_lock_bh(&(rings[i])->consumer_lock);
+ 		spin_lock(&(rings[i])->producer_lock);
+ 		queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
+ 						  size, gfp, destroy);
+ 		spin_unlock(&(rings[i])->producer_lock);
+-		spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
++		spin_unlock_bh(&(rings[i])->consumer_lock);
+ 	}
+ 
+ 	for (i = 0; i < nrings; ++i)
+@@ -662,8 +661,8 @@ static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
+ noqueues:
+ 	return -ENOMEM;
+ }
+-#define ptr_ring_resize_multiple(...) \
+-		alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__))
++#define ptr_ring_resize_multiple_bh(...) \
++		alloc_hooks(ptr_ring_resize_multiple_bh_noprof(__VA_ARGS__))
+ 
+ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
+ {
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 02eaf84c8626f4..8982820dae2131 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -944,6 +944,7 @@ struct task_struct {
+ 	unsigned			sched_reset_on_fork:1;
+ 	unsigned			sched_contributes_to_load:1;
+ 	unsigned			sched_migrated:1;
++	unsigned			sched_task_hot:1;
+ 
+ 	/* Force alignment to the next boundary: */
+ 	unsigned			:0;
+diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
+index 926496c9cc9c3b..bf178238a3083d 100644
+--- a/include/linux/skb_array.h
++++ b/include/linux/skb_array.h
+@@ -199,17 +199,18 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
+ 	return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
+ }
+ 
+-static inline int skb_array_resize_multiple_noprof(struct skb_array **rings,
+-						   int nrings, unsigned int size,
+-						   gfp_t gfp)
++static inline int skb_array_resize_multiple_bh_noprof(struct skb_array **rings,
++						      int nrings,
++						      unsigned int size,
++						      gfp_t gfp)
+ {
+ 	BUILD_BUG_ON(offsetof(struct skb_array, ring));
+-	return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings,
+-					       nrings, size, gfp,
+-					       __skb_array_destroy_skb);
++	return ptr_ring_resize_multiple_bh_noprof((struct ptr_ring **)rings,
++					          nrings, size, gfp,
++					          __skb_array_destroy_skb);
+ }
+-#define skb_array_resize_multiple(...)	\
+-		alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__))
++#define skb_array_resize_multiple_bh(...)	\
++		alloc_hooks(skb_array_resize_multiple_bh_noprof(__VA_ARGS__))
+ 
+ static inline void skb_array_cleanup(struct skb_array *a)
+ {
+diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
+index 061da9546a8131..b22e659f81ba54 100644
+--- a/include/linux/usb/tcpm.h
++++ b/include/linux/usb/tcpm.h
+@@ -163,7 +163,8 @@ struct tcpc_dev {
+ 	void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
+ 	int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
+ 	int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+-						 bool pps_active, u32 requested_vbus_voltage);
++						 bool pps_active, u32 requested_vbus_voltage,
++						 u32 pps_apdo_min_voltage);
+ 	bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
+ 	void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
+ 	void (*check_contaminant)(struct tcpc_dev *dev);
+diff --git a/include/net/ax25.h b/include/net/ax25.h
+index cb622d84cd0cc4..4ee141aae0a29d 100644
+--- a/include/net/ax25.h
++++ b/include/net/ax25.h
+@@ -231,6 +231,7 @@ typedef struct ax25_dev {
+ #endif
+ 	refcount_t		refcount;
+ 	bool device_up;
++	struct rcu_head		rcu;
+ } ax25_dev;
+ 
+ typedef struct ax25_cb {
+@@ -290,9 +291,8 @@ static inline void ax25_dev_hold(ax25_dev *ax25_dev)
+ 
+ static inline void ax25_dev_put(ax25_dev *ax25_dev)
+ {
+-	if (refcount_dec_and_test(&ax25_dev->refcount)) {
+-		kfree(ax25_dev);
+-	}
++	if (refcount_dec_and_test(&ax25_dev->refcount))
++		kfree_rcu(ax25_dev, rcu);
+ }
+ static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
+ {
+@@ -335,9 +335,9 @@ void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+ extern spinlock_t ax25_dev_lock;
+ 
+ #if IS_ENABLED(CONFIG_AX25)
+-static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
++static inline ax25_dev *ax25_dev_ax25dev(const struct net_device *dev)
+ {
+-	return dev->ax25_ptr;
++	return rcu_dereference_rtnl(dev->ax25_ptr);
+ }
+ #endif
+ 
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 74ff688568a0c6..f475757daafba9 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -96,30 +96,28 @@ static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
+ 
+ /* can be called with or without local BH being disabled */
+ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+-			       const struct inetpeer_addr *daddr,
+-			       int create);
++			       const struct inetpeer_addr *daddr);
+ 
+ static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
+ 						__be32 v4daddr,
+-						int vif, int create)
++						int vif)
+ {
+ 	struct inetpeer_addr daddr;
+ 
+ 	daddr.a4.addr = v4daddr;
+ 	daddr.a4.vif = vif;
+ 	daddr.family = AF_INET;
+-	return inet_getpeer(base, &daddr, create);
++	return inet_getpeer(base, &daddr);
+ }
+ 
+ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
+-						const struct in6_addr *v6daddr,
+-						int create)
++						const struct in6_addr *v6daddr)
+ {
+ 	struct inetpeer_addr daddr;
+ 
+ 	daddr.a6 = *v6daddr;
+ 	daddr.family = AF_INET6;
+-	return inet_getpeer(base, &daddr, create);
++	return inet_getpeer(base, &daddr);
+ }
+ 
+ static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 471c353d32a4a5..788513cc384b7f 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -442,6 +442,9 @@ struct nft_set_ext;
+  *	@remove: remove element from set
+  *	@walk: iterate over all set elements
+  *	@get: get set elements
++ *	@ksize: kernel set size
++ * 	@usize: userspace set size
++ *	@adjust_maxsize: delta to adjust maximum set size
+  *	@commit: commit set elements
+  *	@abort: abort set elements
+  *	@privsize: function to return size of set private data
+@@ -495,6 +498,9 @@ struct nft_set_ops {
+ 					       const struct nft_set *set,
+ 					       const struct nft_set_elem *elem,
+ 					       unsigned int flags);
++	u32				(*ksize)(u32 size);
++	u32				(*usize)(u32 size);
++	u32				(*adjust_maxsize)(const struct nft_set *set);
+ 	void				(*commit)(struct nft_set *set);
+ 	void				(*abort)(const struct nft_set *set);
+ 	u64				(*privsize)(const struct nlattr * const nla[],
+diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
+index ae60d66640954c..23dd647fe0248c 100644
+--- a/include/net/netns/xfrm.h
++++ b/include/net/netns/xfrm.h
+@@ -43,6 +43,7 @@ struct netns_xfrm {
+ 	struct hlist_head	__rcu *state_bysrc;
+ 	struct hlist_head	__rcu *state_byspi;
+ 	struct hlist_head	__rcu *state_byseq;
++	struct hlist_head	 __percpu *state_cache_input;
+ 	unsigned int		state_hmask;
+ 	unsigned int		state_num;
+ 	struct work_struct	state_hash_work;
+diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
+index 4880b3a7aced5b..4229e4fcd2a9ee 100644
+--- a/include/net/pkt_cls.h
++++ b/include/net/pkt_cls.h
+@@ -75,11 +75,11 @@ static inline bool tcf_block_non_null_shared(struct tcf_block *block)
+ }
+ 
+ #ifdef CONFIG_NET_CLS_ACT
+-DECLARE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
++DECLARE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
+ 
+ static inline bool tcf_block_bypass_sw(struct tcf_block *block)
+ {
+-	return block && block->bypass_wanted;
++	return block && !atomic_read(&block->useswcnt);
+ }
+ #endif
+ 
+@@ -759,6 +759,15 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
+ 		cls_common->extack = extack;
+ }
+ 
++static inline void tcf_proto_update_usesw(struct tcf_proto *tp, u32 flags)
++{
++	if (tp->usesw)
++		return;
++	if (tc_skip_sw(flags) && tc_in_hw(flags))
++		return;
++	tp->usesw = true;
++}
++
+ #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
+ {
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 5d74fa7e694cc8..1e6324f0d4efda 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -425,6 +425,7 @@ struct tcf_proto {
+ 	spinlock_t		lock;
+ 	bool			deleting;
+ 	bool			counted;
++	bool			usesw;
+ 	refcount_t		refcnt;
+ 	struct rcu_head		rcu;
+ 	struct hlist_node	destroy_ht_node;
+@@ -474,9 +475,7 @@ struct tcf_block {
+ 	struct flow_block flow_block;
+ 	struct list_head owner_list;
+ 	bool keep_dst;
+-	bool bypass_wanted;
+-	atomic_t filtercnt; /* Number of filters */
+-	atomic_t skipswcnt; /* Number of skip_sw filters */
++	atomic_t useswcnt;
+ 	atomic_t offloadcnt; /* Number of oddloaded filters */
+ 	unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
+ 	unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index a0bdd58f401c0f..83e9ef25b8d0d4 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -184,10 +184,13 @@ struct xfrm_state {
+ 	};
+ 	struct hlist_node	byspi;
+ 	struct hlist_node	byseq;
++	struct hlist_node	state_cache;
++	struct hlist_node	state_cache_input;
+ 
+ 	refcount_t		refcnt;
+ 	spinlock_t		lock;
+ 
++	u32			pcpu_num;
+ 	struct xfrm_id		id;
+ 	struct xfrm_selector	sel;
+ 	struct xfrm_mark	mark;
+@@ -536,6 +539,7 @@ struct xfrm_policy_queue {
+  *	@xp_net: network namespace the policy lives in
+  *	@bydst: hlist node for SPD hash table or rbtree list
+  *	@byidx: hlist node for index hash table
++ *	@state_cache_list: hlist head for policy cached xfrm states
+  *	@lock: serialize changes to policy structure members
+  *	@refcnt: reference count, freed once it reaches 0
+  *	@pos: kernel internal tie-breaker to determine age of policy
+@@ -566,6 +570,8 @@ struct xfrm_policy {
+ 	struct hlist_node	bydst;
+ 	struct hlist_node	byidx;
+ 
++	struct hlist_head	state_cache_list;
++
+ 	/* This lock only affects elements except for entry. */
+ 	rwlock_t		lock;
+ 	refcount_t		refcnt;
+@@ -1217,9 +1223,19 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ 
+ 	if (xo) {
+ 		x = xfrm_input_state(skb);
+-		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+-			return (xo->flags & CRYPTO_DONE) &&
+-			       (xo->status & CRYPTO_SUCCESS);
++		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
++			bool check = (xo->flags & CRYPTO_DONE) &&
++				     (xo->status & CRYPTO_SUCCESS);
++
++			/* The packets here are plain ones and secpath was
++			 * needed to indicate that hardware already handled
++			 * them and there is no need to do nothing in addition.
++			 *
++			 * Consume secpath which was set by drivers.
++			 */
++			secpath_reset(skb);
++			return check;
++		}
+ 	}
+ 
+ 	return __xfrm_check_nopolicy(net, skb, dir) ||
+@@ -1645,6 +1661,10 @@ int xfrm_state_update(struct xfrm_state *x);
+ struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
+ 				     const xfrm_address_t *daddr, __be32 spi,
+ 				     u8 proto, unsigned short family);
++struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
++					   const xfrm_address_t *daddr,
++					   __be32 spi, u8 proto,
++					   unsigned short family);
+ struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ 					    const xfrm_address_t *daddr,
+ 					    const xfrm_address_t *saddr,
+@@ -1684,7 +1704,7 @@ struct xfrmk_spdinfo {
+ 	u32 spdhmcnt;
+ };
+ 
+-struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
++struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
+ int xfrm_state_delete(struct xfrm_state *x);
+ int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
+ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
+@@ -1796,7 +1816,7 @@ int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
+ int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
+ 		   struct netlink_ext_ack *extack);
+ struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
+-				 u8 mode, u32 reqid, u32 if_id, u8 proto,
++				 u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
+ 				 const xfrm_address_t *daddr,
+ 				 const xfrm_address_t *saddr, int create,
+ 				 unsigned short family);
+diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
+index 957295364a5e3c..4c7a40e149a594 100644
+--- a/include/sound/hdaudio_ext.h
++++ b/include/sound/hdaudio_ext.h
+@@ -2,8 +2,6 @@
+ #ifndef __SOUND_HDAUDIO_EXT_H
+ #define __SOUND_HDAUDIO_EXT_H
+ 
+-#include <linux/io-64-nonatomic-lo-hi.h>
+-#include <linux/iopoll.h>
+ #include <sound/hdaudio.h>
+ 
+ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
+@@ -119,49 +117,6 @@ int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *hlink)
+ 
+ void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
+ 
+-#define snd_hdac_adsp_writeb(chip, reg, value) \
+-	snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value)
+-#define snd_hdac_adsp_readb(chip, reg) \
+-	snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg))
+-#define snd_hdac_adsp_writew(chip, reg, value) \
+-	snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value)
+-#define snd_hdac_adsp_readw(chip, reg) \
+-	snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg))
+-#define snd_hdac_adsp_writel(chip, reg, value) \
+-	snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value)
+-#define snd_hdac_adsp_readl(chip, reg) \
+-	snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg))
+-#define snd_hdac_adsp_writeq(chip, reg, value) \
+-	snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value)
+-#define snd_hdac_adsp_readq(chip, reg) \
+-	snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg))
+-
+-#define snd_hdac_adsp_updateb(chip, reg, mask, val) \
+-	snd_hdac_adsp_writeb(chip, reg, \
+-			(snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val))
+-#define snd_hdac_adsp_updatew(chip, reg, mask, val) \
+-	snd_hdac_adsp_writew(chip, reg, \
+-			(snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val))
+-#define snd_hdac_adsp_updatel(chip, reg, mask, val) \
+-	snd_hdac_adsp_writel(chip, reg, \
+-			(snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val))
+-#define snd_hdac_adsp_updateq(chip, reg, mask, val) \
+-	snd_hdac_adsp_writeq(chip, reg, \
+-			(snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val))
+-
+-#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \
+-	readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+-			   delay_us, timeout_us)
+-#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \
+-	readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+-			   delay_us, timeout_us)
+-#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \
+-	readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+-			   delay_us, timeout_us)
+-#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
+-	readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+-			   delay_us, timeout_us)
+-
+ struct hdac_ext_device;
+ 
+ /* ops common to all codec drivers */
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index a0aed1a428a183..9a75590227f262 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -118,6 +118,8 @@ enum yfs_cm_operation {
+  */
+ #define afs_call_traces \
+ 	EM(afs_call_trace_alloc,		"ALLOC") \
++	EM(afs_call_trace_async_abort,		"ASYAB") \
++	EM(afs_call_trace_async_kill,		"ASYKL") \
+ 	EM(afs_call_trace_free,			"FREE ") \
+ 	EM(afs_call_trace_get,			"GET  ") \
+ 	EM(afs_call_trace_put,			"PUT  ") \
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index cc22596c7250cf..666fe1779ccc63 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -117,6 +117,7 @@
+ #define rxrpc_call_poke_traces \
+ 	EM(rxrpc_call_poke_abort,		"Abort")	\
+ 	EM(rxrpc_call_poke_complete,		"Compl")	\
++	EM(rxrpc_call_poke_conn_abort,		"Conn-abort")	\
+ 	EM(rxrpc_call_poke_error,		"Error")	\
+ 	EM(rxrpc_call_poke_idle,		"Idle")		\
+ 	EM(rxrpc_call_poke_set_timeout,		"Set-timo")	\
+@@ -282,6 +283,7 @@
+ 	EM(rxrpc_call_see_activate_client,	"SEE act-clnt") \
+ 	EM(rxrpc_call_see_connect_failed,	"SEE con-fail") \
+ 	EM(rxrpc_call_see_connected,		"SEE connect ") \
++	EM(rxrpc_call_see_conn_abort,		"SEE conn-abt") \
+ 	EM(rxrpc_call_see_disconnected,		"SEE disconn ") \
+ 	EM(rxrpc_call_see_distribute_error,	"SEE dist-err") \
+ 	EM(rxrpc_call_see_input,		"SEE input   ") \
+@@ -956,6 +958,29 @@ TRACE_EVENT(rxrpc_rx_abort,
+ 		      __entry->abort_code)
+ 	    );
+ 
++TRACE_EVENT(rxrpc_rx_conn_abort,
++	    TP_PROTO(const struct rxrpc_connection *conn, const struct sk_buff *skb),
++
++	    TP_ARGS(conn, skb),
++
++	    TP_STRUCT__entry(
++		    __field(unsigned int,	conn)
++		    __field(rxrpc_serial_t,	serial)
++		    __field(u32,		abort_code)
++			     ),
++
++	    TP_fast_assign(
++		    __entry->conn = conn->debug_id;
++		    __entry->serial = rxrpc_skb(skb)->hdr.serial;
++		    __entry->abort_code = skb->priority;
++			   ),
++
++	    TP_printk("C=%08x ABORT %08x ac=%d",
++		      __entry->conn,
++		      __entry->serial,
++		      __entry->abort_code)
++	    );
++
+ TRACE_EVENT(rxrpc_rx_challenge,
+ 	    TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ 		     u32 version, u32 nonce, u32 min_level),
+diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
+index f28701500714f6..d73a97e3030a86 100644
+--- a/include/uapi/linux/xfrm.h
++++ b/include/uapi/linux/xfrm.h
+@@ -322,6 +322,7 @@ enum xfrm_attr_type_t {
+ 	XFRMA_MTIMER_THRESH,	/* __u32 in seconds for input SA */
+ 	XFRMA_SA_DIR,		/* __u8 */
+ 	XFRMA_NAT_KEEPALIVE_INTERVAL,	/* __u32 in seconds for NAT keepalive */
++	XFRMA_SA_PCPU,		/* __u32 */
+ 	__XFRMA_MAX
+ 
+ #define XFRMA_OUTPUT_MARK XFRMA_SET_MARK	/* Compatibility */
+@@ -437,6 +438,7 @@ struct xfrm_userpolicy_info {
+ #define XFRM_POLICY_LOCALOK	1	/* Allow user to override global policy */
+ 	/* Automatically expand selector to include matching ICMP payloads. */
+ #define XFRM_POLICY_ICMP	2
++#define XFRM_POLICY_CPU_ACQUIRE	4
+ 	__u8				share;
+ };
+ 
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index 883510a3e8d075..874f9e2defd583 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -340,7 +340,7 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 	if (!prot || !prot->ioctl)
+ 		return -EOPNOTSUPP;
+ 
+-	switch (cmd->sqe->cmd_op) {
++	switch (cmd->cmd_op) {
+ 	case SOCKET_URING_OP_SIOCINQ:
+ 		ret = prot->ioctl(sk, SIOCINQ, &arg);
+ 		if (ret)
+diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
+index e52b3ad231b9c4..93e48c7cad4eff 100644
+--- a/kernel/bpf/arena.c
++++ b/kernel/bpf/arena.c
+@@ -212,7 +212,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
+ struct vma_list {
+ 	struct vm_area_struct *vma;
+ 	struct list_head head;
+-	atomic_t mmap_count;
++	refcount_t mmap_count;
+ };
+ 
+ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
+@@ -222,7 +222,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
+ 	vml = kmalloc(sizeof(*vml), GFP_KERNEL);
+ 	if (!vml)
+ 		return -ENOMEM;
+-	atomic_set(&vml->mmap_count, 1);
++	refcount_set(&vml->mmap_count, 1);
+ 	vma->vm_private_data = vml;
+ 	vml->vma = vma;
+ 	list_add(&vml->head, &arena->vma_list);
+@@ -233,7 +233,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
+ {
+ 	struct vma_list *vml = vma->vm_private_data;
+ 
+-	atomic_inc(&vml->mmap_count);
++	refcount_inc(&vml->mmap_count);
+ }
+ 
+ static void arena_vm_close(struct vm_area_struct *vma)
+@@ -242,7 +242,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
+ 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+ 	struct vma_list *vml = vma->vm_private_data;
+ 
+-	if (!atomic_dec_and_test(&vml->mmap_count))
++	if (!refcount_dec_and_test(&vml->mmap_count))
+ 		return;
+ 	guard(mutex)(&arena->lock);
+ 	/* update link list under lock */
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
+index c938dea5ddbf3a..050c784498abec 100644
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -797,8 +797,12 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
+ 	smap->elem_size = offsetof(struct bpf_local_storage_elem,
+ 				   sdata.data[attr->value_size]);
+ 
+-	smap->bpf_ma = bpf_ma;
+-	if (bpf_ma) {
++	/* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
++	 * preemptible context. Thus, enforce all storages to use
++	 * bpf_mem_alloc when CONFIG_PREEMPT_RT is enabled.
++	 */
++	smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
++	if (smap->bpf_ma) {
+ 		err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
+ 		if (err)
+ 			goto free_smap;
+diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
+index b3a2ce1e5e22ec..b70d0eef8a284d 100644
+--- a/kernel/bpf/bpf_struct_ops.c
++++ b/kernel/bpf/bpf_struct_ops.c
+@@ -311,6 +311,20 @@ void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+ 	kfree(arg_info);
+ }
+ 
++static bool is_module_member(const struct btf *btf, u32 id)
++{
++	const struct btf_type *t;
++
++	t = btf_type_resolve_ptr(btf, id, NULL);
++	if (!t)
++		return false;
++
++	if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
++		return false;
++
++	return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
++}
++
+ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ 			     struct btf *btf,
+ 			     struct bpf_verifier_log *log)
+@@ -390,6 +404,13 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ 			goto errout;
+ 		}
+ 
++		if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
++			pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
++				st_ops->name);
++			err = -EOPNOTSUPP;
++			goto errout;
++		}
++
+ 		func_proto = btf_type_resolve_func_ptr(btf,
+ 						       member->type,
+ 						       NULL);
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 41d20b7199c4af..a44f4be592be79 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -498,11 +498,6 @@ bool btf_type_is_void(const struct btf_type *t)
+ 	return t == &btf_void;
+ }
+ 
+-static bool btf_type_is_fwd(const struct btf_type *t)
+-{
+-	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+-}
+-
+ static bool btf_type_is_datasec(const struct btf_type *t)
+ {
+ 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 3d45ebe8afb48d..a05aeb34589641 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1593,10 +1593,24 @@ void bpf_timer_cancel_and_free(void *val)
+ 	 * To avoid these issues, punt to workqueue context when we are in a
+ 	 * timer callback.
+ 	 */
+-	if (this_cpu_read(hrtimer_running))
++	if (this_cpu_read(hrtimer_running)) {
+ 		queue_work(system_unbound_wq, &t->cb.delete_work);
+-	else
++		return;
++	}
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++		/* If the timer is running on other CPU, also use a kworker to
++		 * wait for the completion of the timer instead of trying to
++		 * acquire a sleepable lock in hrtimer_cancel() to wait for its
++		 * completion.
++		 */
++		if (hrtimer_try_to_cancel(&t->timer) >= 0)
++			kfree_rcu(t, cb.rcu);
++		else
++			queue_work(system_unbound_wq, &t->cb.delete_work);
++	} else {
+ 		bpf_timer_delete_work(&t->cb.delete_work);
++	}
+ }
+ 
+ /* This function is called by map_delete/update_elem for individual element and
+diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
+index ff5683a57f7712..3b2bdca9f1d4b0 100644
+--- a/kernel/dma/coherent.c
++++ b/kernel/dma/coherent.c
+@@ -330,7 +330,8 @@ int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
+ #include <linux/of_reserved_mem.h>
+ 
+ #ifdef CONFIG_DMA_GLOBAL_POOL
+-static struct reserved_mem *dma_reserved_default_memory __initdata;
++static phys_addr_t dma_reserved_default_memory_base __initdata;
++static phys_addr_t dma_reserved_default_memory_size __initdata;
+ #endif
+ 
+ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
+@@ -376,9 +377,10 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
+ 
+ #ifdef CONFIG_DMA_GLOBAL_POOL
+ 	if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
+-		WARN(dma_reserved_default_memory,
++		WARN(dma_reserved_default_memory_size,
+ 		     "Reserved memory: region for default DMA coherent area is redefined\n");
+-		dma_reserved_default_memory = rmem;
++		dma_reserved_default_memory_base = rmem->base;
++		dma_reserved_default_memory_size = rmem->size;
+ 	}
+ #endif
+ 
+@@ -391,10 +393,10 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
+ #ifdef CONFIG_DMA_GLOBAL_POOL
+ static int __init dma_init_reserved_memory(void)
+ {
+-	if (!dma_reserved_default_memory)
++	if (!dma_reserved_default_memory_size)
+ 		return -ENOMEM;
+-	return dma_init_global_coherent(dma_reserved_default_memory->base,
+-					dma_reserved_default_memory->size);
++	return dma_init_global_coherent(dma_reserved_default_memory_base,
++					dma_reserved_default_memory_size);
+ }
+ core_initcall(dma_init_reserved_memory);
+ #endif /* CONFIG_DMA_GLOBAL_POOL */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index df27d08a723269..501d8c2fedff40 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10375,9 +10375,9 @@ static struct pmu perf_tracepoint = {
+ };
+ 
+ static int perf_tp_filter_match(struct perf_event *event,
+-				struct perf_sample_data *data)
++				struct perf_raw_record *raw)
+ {
+-	void *record = data->raw->frag.data;
++	void *record = raw->frag.data;
+ 
+ 	/* only top level events have filters set */
+ 	if (event->parent)
+@@ -10389,7 +10389,7 @@ static int perf_tp_filter_match(struct perf_event *event,
+ }
+ 
+ static int perf_tp_event_match(struct perf_event *event,
+-				struct perf_sample_data *data,
++				struct perf_raw_record *raw,
+ 				struct pt_regs *regs)
+ {
+ 	if (event->hw.state & PERF_HES_STOPPED)
+@@ -10400,7 +10400,7 @@ static int perf_tp_event_match(struct perf_event *event,
+ 	if (event->attr.exclude_kernel && !user_mode(regs))
+ 		return 0;
+ 
+-	if (!perf_tp_filter_match(event, data))
++	if (!perf_tp_filter_match(event, raw))
+ 		return 0;
+ 
+ 	return 1;
+@@ -10426,6 +10426,7 @@ EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
+ static void __perf_tp_event_target_task(u64 count, void *record,
+ 					struct pt_regs *regs,
+ 					struct perf_sample_data *data,
++					struct perf_raw_record *raw,
+ 					struct perf_event *event)
+ {
+ 	struct trace_entry *entry = record;
+@@ -10435,13 +10436,17 @@ static void __perf_tp_event_target_task(u64 count, void *record,
+ 	/* Cannot deliver synchronous signal to other task. */
+ 	if (event->attr.sigtrap)
+ 		return;
+-	if (perf_tp_event_match(event, data, regs))
++	if (perf_tp_event_match(event, raw, regs)) {
++		perf_sample_data_init(data, 0, 0);
++		perf_sample_save_raw_data(data, event, raw);
+ 		perf_swevent_event(event, count, data, regs);
++	}
+ }
+ 
+ static void perf_tp_event_target_task(u64 count, void *record,
+ 				      struct pt_regs *regs,
+ 				      struct perf_sample_data *data,
++				      struct perf_raw_record *raw,
+ 				      struct perf_event_context *ctx)
+ {
+ 	unsigned int cpu = smp_processor_id();
+@@ -10449,15 +10454,15 @@ static void perf_tp_event_target_task(u64 count, void *record,
+ 	struct perf_event *event, *sibling;
+ 
+ 	perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
+-		__perf_tp_event_target_task(count, record, regs, data, event);
++		__perf_tp_event_target_task(count, record, regs, data, raw, event);
+ 		for_each_sibling_event(sibling, event)
+-			__perf_tp_event_target_task(count, record, regs, data, sibling);
++			__perf_tp_event_target_task(count, record, regs, data, raw, sibling);
+ 	}
+ 
+ 	perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
+-		__perf_tp_event_target_task(count, record, regs, data, event);
++		__perf_tp_event_target_task(count, record, regs, data, raw, event);
+ 		for_each_sibling_event(sibling, event)
+-			__perf_tp_event_target_task(count, record, regs, data, sibling);
++			__perf_tp_event_target_task(count, record, regs, data, raw, sibling);
+ 	}
+ }
+ 
+@@ -10475,15 +10480,10 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ 		},
+ 	};
+ 
+-	perf_sample_data_init(&data, 0, 0);
+-	perf_sample_save_raw_data(&data, &raw);
+-
+ 	perf_trace_buf_update(record, event_type);
+ 
+ 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
+-		if (perf_tp_event_match(event, &data, regs)) {
+-			perf_swevent_event(event, count, &data, regs);
+-
++		if (perf_tp_event_match(event, &raw, regs)) {
+ 			/*
+ 			 * Here use the same on-stack perf_sample_data,
+ 			 * some members in data are event-specific and
+@@ -10493,7 +10493,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ 			 * because data->sample_flags is set.
+ 			 */
+ 			perf_sample_data_init(&data, 0, 0);
+-			perf_sample_save_raw_data(&data, &raw);
++			perf_sample_save_raw_data(&data, event, &raw);
++			perf_swevent_event(event, count, &data, regs);
+ 		}
+ 	}
+ 
+@@ -10510,7 +10511,7 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ 			goto unlock;
+ 
+ 		raw_spin_lock(&ctx->lock);
+-		perf_tp_event_target_task(count, record, regs, &data, ctx);
++		perf_tp_event_target_task(count, record, regs, &data, &raw, ctx);
+ 		raw_spin_unlock(&ctx->lock);
+ unlock:
+ 		rcu_read_unlock();
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index fe0272cd84a51a..a29df4b02a2ed9 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -441,10 +441,6 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc)
+ {
+ 	return desc->pending_mask;
+ }
+-static inline bool handle_enforce_irqctx(struct irq_data *data)
+-{
+-	return irqd_is_handle_enforce_irqctx(data);
+-}
+ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear);
+ #else /* CONFIG_GENERIC_PENDING_IRQ */
+ static inline bool irq_can_move_pcntxt(struct irq_data *data)
+@@ -471,11 +467,12 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
+ {
+ 	return false;
+ }
++#endif /* !CONFIG_GENERIC_PENDING_IRQ */
++
+ static inline bool handle_enforce_irqctx(struct irq_data *data)
+ {
+-	return false;
++	return irqd_is_handle_enforce_irqctx(data);
+ }
+-#endif /* !CONFIG_GENERIC_PENDING_IRQ */
+ 
+ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
+ static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve)
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 49b9bca9de12f7..93a07387af3b75 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -2583,7 +2583,10 @@ static noinline int do_init_module(struct module *mod)
+ #endif
+ 	ret = module_enable_rodata_ro(mod, true);
+ 	if (ret)
+-		goto fail_mutex_unlock;
++		pr_warn("%s: module_enable_rodata_ro_after_init() returned %d, "
++			"ro_after_init data might still be writable\n",
++			mod->name, ret);
++
+ 	mod_tree_remove_init(mod);
+ 	module_arch_freeing_init(mod);
+ 	for_class_mod_mem_type(type, init) {
+@@ -2622,8 +2625,6 @@ static noinline int do_init_module(struct module *mod)
+ 
+ 	return 0;
+ 
+-fail_mutex_unlock:
+-	mutex_unlock(&module_mutex);
+ fail_free_freeinit:
+ 	kfree(freeinit);
+ fail:
+diff --git a/kernel/padata.c b/kernel/padata.c
+index d899f34558afcc..22770372bdf329 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -47,6 +47,22 @@ struct padata_mt_job_state {
+ static void padata_free_pd(struct parallel_data *pd);
+ static void __init padata_mt_helper(struct work_struct *work);
+ 
++static inline void padata_get_pd(struct parallel_data *pd)
++{
++	refcount_inc(&pd->refcnt);
++}
++
++static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
++{
++	if (refcount_sub_and_test(cnt, &pd->refcnt))
++		padata_free_pd(pd);
++}
++
++static inline void padata_put_pd(struct parallel_data *pd)
++{
++	padata_put_pd_cnt(pd, 1);
++}
++
+ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+ {
+ 	int cpu, target_cpu;
+@@ -206,7 +222,7 @@ int padata_do_parallel(struct padata_shell *ps,
+ 	if ((pinst->flags & PADATA_RESET))
+ 		goto out;
+ 
+-	refcount_inc(&pd->refcnt);
++	padata_get_pd(pd);
+ 	padata->pd = pd;
+ 	padata->cb_cpu = *cb_cpu;
+ 
+@@ -336,8 +352,14 @@ static void padata_reorder(struct parallel_data *pd)
+ 	smp_mb();
+ 
+ 	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
+-	if (!list_empty(&reorder->list) && padata_find_next(pd, false))
++	if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
++		/*
++		 * Other context(eg. the padata_serial_worker) can finish the request.
++		 * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
++		 */
++		padata_get_pd(pd);
+ 		queue_work(pinst->serial_wq, &pd->reorder_work);
++	}
+ }
+ 
+ static void invoke_padata_reorder(struct work_struct *work)
+@@ -348,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work)
+ 	pd = container_of(work, struct parallel_data, reorder_work);
+ 	padata_reorder(pd);
+ 	local_bh_enable();
++	/* Pairs with putting the reorder_work in the serial_wq */
++	padata_put_pd(pd);
+ }
+ 
+ static void padata_serial_worker(struct work_struct *serial_work)
+@@ -380,8 +404,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
+ 	}
+ 	local_bh_enable();
+ 
+-	if (refcount_sub_and_test(cnt, &pd->refcnt))
+-		padata_free_pd(pd);
++	padata_put_pd_cnt(pd, cnt);
+ }
+ 
+ /**
+@@ -688,8 +711,7 @@ static int padata_replace(struct padata_instance *pinst)
+ 	synchronize_rcu();
+ 
+ 	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
+-		if (refcount_dec_and_test(&ps->opd->refcnt))
+-			padata_free_pd(ps->opd);
++		padata_put_pd(ps->opd);
+ 
+ 	pinst->flags &= ~PADATA_RESET;
+ 
+@@ -977,7 +999,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
+ 
+ 	pinst = kobj2pinst(kobj);
+ 	pentry = attr2pentry(attr);
+-	if (pentry->show)
++	if (pentry->store)
+ 		ret = pentry->store(pinst, attr, buf, count);
+ 
+ 	return ret;
+@@ -1128,11 +1150,16 @@ void padata_free_shell(struct padata_shell *ps)
+ 	if (!ps)
+ 		return;
+ 
++	/*
++	 * Wait for all _do_serial calls to finish to avoid touching
++	 * freed pd's and ps's.
++	 */
++	synchronize_rcu();
++
+ 	mutex_lock(&ps->pinst->lock);
+ 	list_del(&ps->list);
+ 	pd = rcu_dereference_protected(ps->pd, 1);
+-	if (refcount_dec_and_test(&pd->refcnt))
+-		padata_free_pd(pd);
++	padata_put_pd(pd);
+ 	mutex_unlock(&ps->pinst->lock);
+ 
+ 	kfree(ps);
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index e35829d360390f..b483fcea811b1a 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -608,7 +608,11 @@ int hibernation_platform_enter(void)
+ 
+ 	local_irq_disable();
+ 	system_state = SYSTEM_SUSPEND;
+-	syscore_suspend();
++
++	error = syscore_suspend();
++	if (error)
++		goto Enable_irqs;
++
+ 	if (pm_wakeup_pending()) {
+ 		error = -EAGAIN;
+ 		goto Power_up;
+@@ -620,6 +624,7 @@ int hibernation_platform_enter(void)
+ 
+  Power_up:
+ 	syscore_resume();
++ Enable_irqs:
+ 	system_state = SYSTEM_RUNNING;
+ 	local_irq_enable();
+ 
+diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
+index 3fcb48502adbd8..5eef70000b439d 100644
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -335,3 +335,9 @@ bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+ void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
+ void console_prepend_replay(struct printk_message *pmsg);
+ #endif
++
++#ifdef CONFIG_SMP
++bool is_printk_cpu_sync_owner(void);
++#else
++static inline bool is_printk_cpu_sync_owner(void) { return false; }
++#endif
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index beb808f4c367b9..7530df62ff7cbc 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -4892,6 +4892,11 @@ void console_try_replay_all(void)
+ static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
+ static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
+ 
++bool is_printk_cpu_sync_owner(void)
++{
++	return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id());
++}
++
+ /**
+  * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
+  *                            spinning lock is not owned by any CPU.
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index 2b35a9d3919d8b..e6198da7c7354a 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -43,10 +43,15 @@ bool is_printk_legacy_deferred(void)
+ 	/*
+ 	 * The per-CPU variable @printk_context can be read safely in any
+ 	 * context. CPU migration is always disabled when set.
++	 *
++	 * A context holding the printk_cpu_sync must not spin waiting for
++	 * another CPU. For legacy printing, it could be the console_lock
++	 * or the port lock.
+ 	 */
+ 	return (force_legacy_kthread() ||
+ 		this_cpu_read(printk_context) ||
+-		in_nmi());
++		in_nmi() ||
++		is_printk_cpu_sync_owner());
+ }
+ 
+ asmlinkage int vprintk(const char *fmt, va_list args)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index d07dc87787dff3..aba41c69f09c42 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2024,10 +2024,10 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
+ 	 */
+ 	uclamp_rq_inc(rq, p);
+ 
+-	if (!(flags & ENQUEUE_RESTORE)) {
++	psi_enqueue(p, flags);
++
++	if (!(flags & ENQUEUE_RESTORE))
+ 		sched_info_enqueue(rq, p);
+-		psi_enqueue(p, flags & ENQUEUE_MIGRATED);
+-	}
+ 
+ 	if (sched_core_enabled(rq))
+ 		sched_core_enqueue(rq, p);
+@@ -2044,10 +2044,10 @@ inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
+ 	if (!(flags & DEQUEUE_NOCLOCK))
+ 		update_rq_clock(rq);
+ 
+-	if (!(flags & DEQUEUE_SAVE)) {
++	if (!(flags & DEQUEUE_SAVE))
+ 		sched_info_dequeue(rq, p);
+-		psi_dequeue(p, !(flags & DEQUEUE_SLEEP));
+-	}
++
++	psi_dequeue(p, flags);
+ 
+ 	/*
+ 	 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
+@@ -6507,6 +6507,45 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ #define SM_PREEMPT		1
+ #define SM_RTLOCK_WAIT		2
+ 
++/*
++ * Helper function for __schedule()
++ *
++ * If a task does not have signals pending, deactivate it
++ * Otherwise marks the task's __state as RUNNING
++ */
++static bool try_to_block_task(struct rq *rq, struct task_struct *p,
++			      unsigned long task_state)
++{
++	int flags = DEQUEUE_NOCLOCK;
++
++	if (signal_pending_state(task_state, p)) {
++		WRITE_ONCE(p->__state, TASK_RUNNING);
++		return false;
++	}
++
++	p->sched_contributes_to_load =
++		(task_state & TASK_UNINTERRUPTIBLE) &&
++		!(task_state & TASK_NOLOAD) &&
++		!(task_state & TASK_FROZEN);
++
++	if (unlikely(is_special_task_state(task_state)))
++		flags |= DEQUEUE_SPECIAL;
++
++	/*
++	 * __schedule()			ttwu()
++	 *   prev_state = prev->state;    if (p->on_rq && ...)
++	 *   if (prev_state)		    goto out;
++	 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++	 *				  p->state = TASK_WAKING
++	 *
++	 * Where __schedule() and ttwu() have matching control dependencies.
++	 *
++	 * After this, schedule() must not care about p->state any more.
++	 */
++	block_task(rq, p, flags);
++	return true;
++}
++
+ /*
+  * __schedule() is the main scheduler function.
+  *
+@@ -6554,7 +6593,6 @@ static void __sched notrace __schedule(int sched_mode)
+ 	 * as a preemption by schedule_debug() and RCU.
+ 	 */
+ 	bool preempt = sched_mode > SM_NONE;
+-	bool block = false;
+ 	unsigned long *switch_count;
+ 	unsigned long prev_state;
+ 	struct rq_flags rf;
+@@ -6615,33 +6653,7 @@ static void __sched notrace __schedule(int sched_mode)
+ 			goto picked;
+ 		}
+ 	} else if (!preempt && prev_state) {
+-		if (signal_pending_state(prev_state, prev)) {
+-			WRITE_ONCE(prev->__state, TASK_RUNNING);
+-		} else {
+-			int flags = DEQUEUE_NOCLOCK;
+-
+-			prev->sched_contributes_to_load =
+-				(prev_state & TASK_UNINTERRUPTIBLE) &&
+-				!(prev_state & TASK_NOLOAD) &&
+-				!(prev_state & TASK_FROZEN);
+-
+-			if (unlikely(is_special_task_state(prev_state)))
+-				flags |= DEQUEUE_SPECIAL;
+-
+-			/*
+-			 * __schedule()			ttwu()
+-			 *   prev_state = prev->state;    if (p->on_rq && ...)
+-			 *   if (prev_state)		    goto out;
+-			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
+-			 *				  p->state = TASK_WAKING
+-			 *
+-			 * Where __schedule() and ttwu() have matching control dependencies.
+-			 *
+-			 * After this, schedule() must not care about p->state any more.
+-			 */
+-			block_task(rq, prev, flags);
+-			block = true;
+-		}
++		try_to_block_task(rq, prev, prev_state);
+ 		switch_count = &prev->nvcsw;
+ 	}
+ 
+@@ -6686,7 +6698,8 @@ static void __sched notrace __schedule(int sched_mode)
+ 
+ 		migrate_disable_switch(rq, prev);
+ 		psi_account_irqtime(rq, prev, next);
+-		psi_sched_switch(prev, next, block);
++		psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
++					     prev->se.sched_delayed);
+ 
+ 		trace_sched_switch(preempt, prev, next, prev_state);
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 28c77904ea749f..e51d5ce730be15 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -83,7 +83,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+ 
+ 	if (unlikely(sg_policy->limits_changed)) {
+ 		sg_policy->limits_changed = false;
+-		sg_policy->need_freq_update = true;
++		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
+ 		return true;
+ 	}
+ 
+@@ -96,7 +96,7 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
+ 				   unsigned int next_freq)
+ {
+ 	if (sg_policy->need_freq_update)
+-		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
++		sg_policy->need_freq_update = false;
+ 	else if (sg_policy->next_freq == next_freq)
+ 		return false;
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 60be5f8bbe7115..65e7be64487202 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5647,9 +5647,9 @@ static struct sched_entity *
+ pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
+ {
+ 	/*
+-	 * Enabling NEXT_BUDDY will affect latency but not fairness.
++	 * Picking the ->next buddy will affect latency but not fairness.
+ 	 */
+-	if (sched_feat(NEXT_BUDDY) &&
++	if (sched_feat(PICK_BUDDY) &&
+ 	    cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) {
+ 		/* ->next will never be delayed */
+ 		SCHED_WARN_ON(cfs_rq->next->sched_delayed);
+@@ -9418,6 +9418,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+ 	int tsk_cache_hot;
+ 
+ 	lockdep_assert_rq_held(env->src_rq);
++	if (p->sched_task_hot)
++		p->sched_task_hot = 0;
+ 
+ 	/*
+ 	 * We do not migrate tasks that are:
+@@ -9490,10 +9492,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+ 
+ 	if (tsk_cache_hot <= 0 ||
+ 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
+-		if (tsk_cache_hot == 1) {
+-			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
+-			schedstat_inc(p->stats.nr_forced_migrations);
+-		}
++		if (tsk_cache_hot == 1)
++			p->sched_task_hot = 1;
+ 		return 1;
+ 	}
+ 
+@@ -9508,6 +9508,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
+ {
+ 	lockdep_assert_rq_held(env->src_rq);
+ 
++	if (p->sched_task_hot) {
++		p->sched_task_hot = 0;
++		schedstat_inc(env->sd->lb_hot_gained[env->idle]);
++		schedstat_inc(p->stats.nr_forced_migrations);
++	}
++
+ 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
+ 	set_task_cpu(p, env->dst_cpu);
+ }
+@@ -9668,6 +9674,9 @@ static int detach_tasks(struct lb_env *env)
+ 
+ 		continue;
+ next:
++		if (p->sched_task_hot)
++			schedstat_inc(p->stats.nr_failed_migrations_hot);
++
+ 		list_move(&p->se.group_node, tasks);
+ 	}
+ 
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 290874079f60d9..050d7503064e3a 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -31,6 +31,15 @@ SCHED_FEAT(PREEMPT_SHORT, true)
+  */
+ SCHED_FEAT(NEXT_BUDDY, false)
+ 
++/*
++ * Allow completely ignoring cfs_rq->next; which can be set from various
++ * places:
++ *   - NEXT_BUDDY (wakeup preemption)
++ *   - yield_to_task()
++ *   - cgroup dequeue / pick
++ */
++SCHED_FEAT(PICK_BUDDY, true)
++
+ /*
+  * Consider buddies to be cache hot, decreases the likeliness of a
+  * cache buddy being migrated away, increases cache locality.
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index f2ef520513c4a2..5426969cf478a0 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2095,34 +2095,6 @@ static inline const struct cpumask *task_user_cpus(struct task_struct *p)
+ 
+ #endif /* CONFIG_SMP */
+ 
+-#include "stats.h"
+-
+-#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS)
+-
+-extern void __sched_core_account_forceidle(struct rq *rq);
+-
+-static inline void sched_core_account_forceidle(struct rq *rq)
+-{
+-	if (schedstat_enabled())
+-		__sched_core_account_forceidle(rq);
+-}
+-
+-extern void __sched_core_tick(struct rq *rq);
+-
+-static inline void sched_core_tick(struct rq *rq)
+-{
+-	if (sched_core_enabled(rq) && schedstat_enabled())
+-		__sched_core_tick(rq);
+-}
+-
+-#else /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS): */
+-
+-static inline void sched_core_account_forceidle(struct rq *rq) { }
+-
+-static inline void sched_core_tick(struct rq *rq) { }
+-
+-#endif /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS) */
+-
+ #ifdef CONFIG_CGROUP_SCHED
+ 
+ /*
+@@ -3209,6 +3181,34 @@ extern void nohz_run_idle_balance(int cpu);
+ static inline void nohz_run_idle_balance(int cpu) { }
+ #endif
+ 
++#include "stats.h"
++
++#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS)
++
++extern void __sched_core_account_forceidle(struct rq *rq);
++
++static inline void sched_core_account_forceidle(struct rq *rq)
++{
++	if (schedstat_enabled())
++		__sched_core_account_forceidle(rq);
++}
++
++extern void __sched_core_tick(struct rq *rq);
++
++static inline void sched_core_tick(struct rq *rq)
++{
++	if (sched_core_enabled(rq) && schedstat_enabled())
++		__sched_core_tick(rq);
++}
++
++#else /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS): */
++
++static inline void sched_core_account_forceidle(struct rq *rq) { }
++
++static inline void sched_core_tick(struct rq *rq) { }
++
++#endif /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS) */
++
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ 
+ struct irqtime {
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 767e098a3bd132..6ade91bce63ee3 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -127,21 +127,29 @@ static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
+  * go through migration requeues. In this case, *sleeping* states need
+  * to be transferred.
+  */
+-static inline void psi_enqueue(struct task_struct *p, bool migrate)
++static inline void psi_enqueue(struct task_struct *p, int flags)
+ {
+ 	int clear = 0, set = 0;
+ 
+ 	if (static_branch_likely(&psi_disabled))
+ 		return;
+ 
++	/* Same runqueue, nothing changed for psi */
++	if (flags & ENQUEUE_RESTORE)
++		return;
++
++	/* psi_sched_switch() will handle the flags */
++	if (task_on_cpu(task_rq(p), p))
++		return;
++
+ 	if (p->se.sched_delayed) {
+ 		/* CPU migration of "sleeping" task */
+-		SCHED_WARN_ON(!migrate);
++		SCHED_WARN_ON(!(flags & ENQUEUE_MIGRATED));
+ 		if (p->in_memstall)
+ 			set |= TSK_MEMSTALL;
+ 		if (p->in_iowait)
+ 			set |= TSK_IOWAIT;
+-	} else if (migrate) {
++	} else if (flags & ENQUEUE_MIGRATED) {
+ 		/* CPU migration of runnable task */
+ 		set = TSK_RUNNING;
+ 		if (p->in_memstall)
+@@ -158,17 +166,14 @@ static inline void psi_enqueue(struct task_struct *p, bool migrate)
+ 	psi_task_change(p, clear, set);
+ }
+ 
+-static inline void psi_dequeue(struct task_struct *p, bool migrate)
++static inline void psi_dequeue(struct task_struct *p, int flags)
+ {
+ 	if (static_branch_likely(&psi_disabled))
+ 		return;
+ 
+-	/*
+-	 * When migrating a task to another CPU, clear all psi
+-	 * state. The enqueue callback above will work it out.
+-	 */
+-	if (migrate)
+-		psi_task_change(p, p->psi_flags, 0);
++	/* Same runqueue, nothing changed for psi */
++	if (flags & DEQUEUE_SAVE)
++		return;
+ 
+ 	/*
+ 	 * A voluntary sleep is a dequeue followed by a task switch. To
+@@ -176,6 +181,14 @@ static inline void psi_dequeue(struct task_struct *p, bool migrate)
+ 	 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
+ 	 * Do nothing here.
+ 	 */
++	if (flags & DEQUEUE_SLEEP)
++		return;
++
++	/*
++	 * When migrating a task to another CPU, clear all psi
++	 * state. The enqueue callback above will work it out.
++	 */
++	psi_task_change(p, p->psi_flags, 0);
+ }
+ 
+ static inline void psi_ttwu_dequeue(struct task_struct *p)
+diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
+index 1784ed1fb3fe5d..f9cb7896c1b966 100644
+--- a/kernel/sched/syscalls.c
++++ b/kernel/sched/syscalls.c
+@@ -1471,7 +1471,7 @@ int __sched yield_to(struct task_struct *p, bool preempt)
+ 	struct rq *rq, *p_rq;
+ 	int yielded = 0;
+ 
+-	scoped_guard (irqsave) {
++	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
+ 		rq = this_rq();
+ 
+ again:
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 50881898e758d8..449efaaa387a68 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -619,7 +619,8 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
+ 
+ static __always_inline u64
+ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
+-			u64 flags, struct perf_sample_data *sd)
++			u64 flags, struct perf_raw_record *raw,
++			struct perf_sample_data *sd)
+ {
+ 	struct bpf_array *array = container_of(map, struct bpf_array, map);
+ 	unsigned int cpu = smp_processor_id();
+@@ -644,6 +645,8 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
+ 	if (unlikely(event->oncpu != cpu))
+ 		return -EOPNOTSUPP;
+ 
++	perf_sample_save_raw_data(sd, event, raw);
++
+ 	return perf_event_output(event, sd, regs);
+ }
+ 
+@@ -687,9 +690,8 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
+ 	}
+ 
+ 	perf_sample_data_init(sd, 0, 0);
+-	perf_sample_save_raw_data(sd, &raw);
+ 
+-	err = __bpf_perf_event_output(regs, map, flags, sd);
++	err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
+ out:
+ 	this_cpu_dec(bpf_trace_nest_level);
+ 	preempt_enable();
+@@ -748,9 +750,8 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ 
+ 	perf_fetch_caller_regs(regs);
+ 	perf_sample_data_init(sd, 0, 0);
+-	perf_sample_save_raw_data(sd, &raw);
+ 
+-	ret = __bpf_perf_event_output(regs, map, flags, sd);
++	ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
+ out:
+ 	this_cpu_dec(bpf_event_output_nest_level);
+ 	preempt_enable();
+@@ -832,7 +833,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
+ 	if (unlikely(is_global_init(current)))
+ 		return -EPERM;
+ 
+-	if (irqs_disabled()) {
++	if (!preemptible()) {
+ 		/* Do an early check on signal validity. Otherwise,
+ 		 * the error is lost in deferred irq_work.
+ 		 */
+diff --git a/lib/rhashtable.c b/lib/rhashtable.c
+index 6c902639728b76..0e9a1d4cf89be0 100644
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
+ 	 */
+ 	rht_assign_locked(bkt, obj);
+ 
+-	atomic_inc(&ht->nelems);
+-	if (rht_grow_above_75(ht, tbl))
+-		schedule_work(&ht->run_work);
+-
+ 	return NULL;
+ }
+ 
+@@ -615,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
+ 			new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ 			data = ERR_PTR(-EAGAIN);
+ 		} else {
++			bool inserted;
++
+ 			flags = rht_lock(tbl, bkt);
+ 			data = rhashtable_lookup_one(ht, bkt, tbl,
+ 						     hash, key, obj);
+ 			new_tbl = rhashtable_insert_one(ht, bkt, tbl,
+ 							hash, obj, data);
++			inserted = data && !new_tbl;
++			if (inserted)
++				atomic_inc(&ht->nelems);
+ 			if (PTR_ERR(new_tbl) != -EEXIST)
+ 				data = ERR_CAST(new_tbl);
+ 
+ 			rht_unlock(tbl, bkt, flags);
++
++			if (inserted && rht_grow_above_75(ht, tbl))
++				schedule_work(&ht->run_work);
+ 		}
+ 	} while (!IS_ERR_OR_NULL(new_tbl));
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 53db98d2c4a1b3..ae1d184d035a4d 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1139,6 +1139,7 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ {
+ 	struct mem_cgroup *iter;
+ 	int ret = 0;
++	int i = 0;
+ 
+ 	BUG_ON(mem_cgroup_is_root(memcg));
+ 
+@@ -1147,8 +1148,12 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ 		struct task_struct *task;
+ 
+ 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
+-		while (!ret && (task = css_task_iter_next(&it)))
++		while (!ret && (task = css_task_iter_next(&it))) {
++			/* Avoid potential softlockup warning */
++			if ((++i & 1023) == 0)
++				cond_resched();
+ 			ret = fn(task, arg);
++		}
+ 		css_task_iter_end(&it);
+ 		if (ret) {
+ 			mem_cgroup_iter_break(memcg, iter);
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 4d7a0004df2cac..8aa712afd8ae1a 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -45,6 +45,7 @@
+ #include <linux/init.h>
+ #include <linux/mmu_notifier.h>
+ #include <linux/cred.h>
++#include <linux/nmi.h>
+ 
+ #include <asm/tlb.h>
+ #include "internal.h"
+@@ -431,10 +432,15 @@ static void dump_tasks(struct oom_control *oc)
+ 		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
+ 	else {
+ 		struct task_struct *p;
++		int i = 0;
+ 
+ 		rcu_read_lock();
+-		for_each_process(p)
++		for_each_process(p) {
++			/* Avoid potential softlockup warning */
++			if ((++i & 1023) == 0)
++				touch_softlockup_watchdog();
+ 			dump_task(p, oc);
++		}
+ 		rcu_read_unlock();
+ 	}
+ }
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index d6f9fae06a9d81..aa6c714892ec9d 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -467,7 +467,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
+ 	goto out_put;
+ }
+ 
+-static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
++static void ax25_fillin_cb_from_dev(ax25_cb *ax25, const ax25_dev *ax25_dev)
+ {
+ 	ax25->rtt     = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2;
+ 	ax25->t1      = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]);
+@@ -677,22 +677,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		rtnl_lock();
+-		dev = __dev_get_by_name(&init_net, devname);
++		rcu_read_lock();
++		dev = dev_get_by_name_rcu(&init_net, devname);
+ 		if (!dev) {
+-			rtnl_unlock();
++			rcu_read_unlock();
+ 			res = -ENODEV;
+ 			break;
+ 		}
+ 
+ 		ax25->ax25_dev = ax25_dev_ax25dev(dev);
+ 		if (!ax25->ax25_dev) {
+-			rtnl_unlock();
++			rcu_read_unlock();
+ 			res = -ENODEV;
+ 			break;
+ 		}
+ 		ax25_fillin_cb(ax25, ax25->ax25_dev);
+-		rtnl_unlock();
++		rcu_read_unlock();
+ 		break;
+ 
+ 	default:
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index 9efd6690b34436..3733c0254a5084 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -90,7 +90,7 @@ void ax25_dev_device_up(struct net_device *dev)
+ 
+ 	spin_lock_bh(&ax25_dev_lock);
+ 	list_add(&ax25_dev->list, &ax25_dev_list);
+-	dev->ax25_ptr     = ax25_dev;
++	rcu_assign_pointer(dev->ax25_ptr, ax25_dev);
+ 	spin_unlock_bh(&ax25_dev_lock);
+ 
+ 	ax25_register_dev_sysctl(ax25_dev);
+@@ -125,7 +125,7 @@ void ax25_dev_device_down(struct net_device *dev)
+ 		}
+ 	}
+ 
+-	dev->ax25_ptr = NULL;
++	RCU_INIT_POINTER(dev->ax25_ptr, NULL);
+ 	spin_unlock_bh(&ax25_dev_lock);
+ 	netdev_put(dev, &ax25_dev->dev_tracker);
+ 	ax25_dev_put(ax25_dev);
+diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
+index 36249776c021e7..215d4ccf12b913 100644
+--- a/net/ax25/ax25_ip.c
++++ b/net/ax25/ax25_ip.c
+@@ -122,6 +122,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ 	if (dev == NULL)
+ 		dev = skb->dev;
+ 
++	rcu_read_lock();
+ 	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
+ 		kfree_skb(skb);
+ 		goto put;
+@@ -202,7 +203,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ 	ax25_queue_xmit(skb, dev);
+ 
+ put:
+-
++	rcu_read_unlock();
+ 	ax25_route_lock_unuse();
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
+index 3db76d2470e954..8bca2ace98e51b 100644
+--- a/net/ax25/ax25_out.c
++++ b/net/ax25/ax25_out.c
+@@ -39,10 +39,14 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *sr
+ 	 * specified.
+ 	 */
+ 	if (paclen == 0) {
+-		if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
++		rcu_read_lock();
++		ax25_dev = ax25_dev_ax25dev(dev);
++		if (!ax25_dev) {
++			rcu_read_unlock();
+ 			return NULL;
+-
++		}
+ 		paclen = ax25_dev->values[AX25_VALUES_PACLEN];
++		rcu_read_unlock();
+ 	}
+ 
+ 	/*
+@@ -53,13 +57,19 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *sr
+ 		return ax25;		/* It already existed */
+ 	}
+ 
+-	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
++	rcu_read_lock();
++	ax25_dev = ax25_dev_ax25dev(dev);
++	if (!ax25_dev) {
++		rcu_read_unlock();
+ 		return NULL;
++	}
+ 
+-	if ((ax25 = ax25_create_cb()) == NULL)
++	if ((ax25 = ax25_create_cb()) == NULL) {
++		rcu_read_unlock();
+ 		return NULL;
+-
++	}
+ 	ax25_fillin_cb(ax25, ax25_dev);
++	rcu_read_unlock();
+ 
+ 	ax25->source_addr = *src;
+ 	ax25->dest_addr   = *dest;
+@@ -358,7 +368,9 @@ void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	unsigned char *ptr;
+ 
++	rcu_read_lock();
+ 	skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
++	rcu_read_unlock();
+ 
+ 	ptr  = skb_push(skb, 1);
+ 	*ptr = 0x00;			/* KISS */
+diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
+index b7c4d656a94b71..69de75db0c9c21 100644
+--- a/net/ax25/ax25_route.c
++++ b/net/ax25/ax25_route.c
+@@ -406,6 +406,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
+ 		ax25_route_lock_unuse();
+ 		return -EHOSTUNREACH;
+ 	}
++	rcu_read_lock();
+ 	if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
+ 		err = -EHOSTUNREACH;
+ 		goto put;
+@@ -442,6 +443,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
+ 	}
+ 
+ put:
++	rcu_read_unlock();
+ 	ax25_route_lock_unuse();
+ 	return err;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1867a6a8d76da9..2e0fe38d0e877d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1279,7 +1279,9 @@ int dev_change_name(struct net_device *dev, const char *newname)
+ rollback:
+ 	ret = device_rename(&dev->dev, dev->name);
+ 	if (ret) {
++		write_seqlock_bh(&netdev_rename_lock);
+ 		memcpy(dev->name, oldname, IFNAMSIZ);
++		write_sequnlock_bh(&netdev_rename_lock);
+ 		WRITE_ONCE(dev->name_assign_type, old_assign_type);
+ 		up_write(&devnet_rename_sem);
+ 		return ret;
+@@ -2134,8 +2136,8 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
+ #endif
+ 
+ #ifdef CONFIG_NET_CLS_ACT
+-DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
+-EXPORT_SYMBOL(tcf_bypass_check_needed_key);
++DEFINE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
++EXPORT_SYMBOL(tcf_sw_enabled_key);
+ #endif
+ 
+ DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
+@@ -4028,10 +4030,13 @@ static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
+ 	if (!miniq)
+ 		return ret;
+ 
+-	if (static_branch_unlikely(&tcf_bypass_check_needed_key)) {
+-		if (tcf_block_bypass_sw(miniq->block))
+-			return ret;
+-	}
++	/* Global bypass */
++	if (!static_branch_likely(&tcf_sw_enabled_key))
++		return ret;
++
++	/* Block-wise bypass */
++	if (tcf_block_bypass_sw(miniq->block))
++		return ret;
+ 
+ 	tc_skb_cb(skb)->mru = 0;
+ 	tc_skb_cb(skb)->post_ct = false;
+@@ -9590,6 +9595,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
+ 			NL_SET_ERR_MSG(extack, "Program bound to different device");
+ 			return -EINVAL;
+ 		}
++		if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) {
++			NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode");
++			return -EINVAL;
++		}
+ 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
+ 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
+ 			return -EINVAL;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 46da488ff0703f..a2f990bf51e5e1 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -7662,7 +7662,7 @@ static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
+ 	.gpl_only	= false,
+ 	.ret_type	= RET_INTEGER,
+ 	.arg1_type	= ARG_PTR_TO_CTX,
+-	.arg2_type	= ARG_PTR_TO_MEM,
++	.arg2_type	= ARG_PTR_TO_MEM | MEM_WRITE,
+ 	.arg3_type	= ARG_CONST_SIZE,
+ 	.arg4_type	= ARG_ANYTHING,
+ };
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 86a2476678c484..5dd54a81339806 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -303,7 +303,7 @@ static int proc_do_dev_weight(const struct ctl_table *table, int write,
+ 	int ret, weight;
+ 
+ 	mutex_lock(&dev_weight_mutex);
+-	ret = proc_dointvec(table, write, buffer, lenp, ppos);
++	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ 	if (!ret && write) {
+ 		weight = READ_ONCE(weight_p);
+ 		WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias);
+@@ -396,6 +396,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_do_dev_weight,
++		.extra1         = SYSCTL_ONE,
+ 	},
+ 	{
+ 		.procname	= "dev_weight_rx_bias",
+@@ -403,6 +404,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_do_dev_weight,
++		.extra1         = SYSCTL_ONE,
+ 	},
+ 	{
+ 		.procname	= "dev_weight_tx_bias",
+@@ -410,6 +412,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_do_dev_weight,
++		.extra1         = SYSCTL_ONE,
+ 	},
+ 	{
+ 		.procname	= "netdev_max_backlog",
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 65cfe76dafbe2e..8b9692c35e7067 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -992,7 +992,13 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
+ 	if (rc)
+ 		return rc;
+ 
+-	if (ops->get_rxfh) {
++	/* Nonzero ring with RSS only makes sense if NIC adds them together */
++	if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS &&
++	    !ops->cap_rss_rxnfc_adds &&
++	    ethtool_get_flow_spec_ring(info.fs.ring_cookie))
++		return -EINVAL;
++
++	if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) {
+ 		struct ethtool_rxfh_param rxfh = {};
+ 
+ 		rc = ops->get_rxfh(dev, &rxfh);
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index e3f0ef6b851bb4..4d18dc29b30438 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -90,7 +90,7 @@ int ethnl_ops_begin(struct net_device *dev)
+ 		pm_runtime_get_sync(dev->dev.parent);
+ 
+ 	if (!netif_device_present(dev) ||
+-	    dev->reg_state == NETREG_UNREGISTERING) {
++	    dev->reg_state >= NETREG_UNREGISTERING) {
+ 		ret = -ENODEV;
+ 		goto err;
+ 	}
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 40c5fbbd155d66..c0217476eb17f9 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -688,9 +688,12 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 		frame->is_vlan = true;
+ 
+ 	if (frame->is_vlan) {
+-		if (skb->mac_len < offsetofend(struct hsr_vlan_ethhdr, vlanhdr))
++		/* Note: skb->mac_len might be wrong here. */
++		if (!pskb_may_pull(skb,
++				   skb_mac_offset(skb) +
++				   offsetofend(struct hsr_vlan_ethhdr, vlanhdr)))
+ 			return -EINVAL;
+-		vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
++		vlan_hdr = (struct hsr_vlan_ethhdr *)skb_mac_header(skb);
+ 		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
+ 		/* FIXME: */
+ 		netdev_warn_once(skb->dev, "VLAN not yet supported");
+diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
+index 80c4ea0e12f48a..e0d94270da28a3 100644
+--- a/net/ipv4/esp4_offload.c
++++ b/net/ipv4/esp4_offload.c
+@@ -53,9 +53,9 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
+ 		if (sp->len == XFRM_MAX_DEPTH)
+ 			goto out_reset;
+ 
+-		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
+-				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
+-				      spi, IPPROTO_ESP, AF_INET);
++		x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
++					    (xfrm_address_t *)&ip_hdr(skb)->daddr,
++					    spi, IPPROTO_ESP, AF_INET);
+ 
+ 		if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ 			/* non-offload path will record the error and audit log */
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index c3ad41573b33ea..932bd775fc2682 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -312,7 +312,6 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+ 	struct dst_entry *dst = &rt->dst;
+ 	struct inet_peer *peer;
+ 	bool rc = true;
+-	int vif;
+ 
+ 	if (!apply_ratelimit)
+ 		return true;
+@@ -321,12 +320,12 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+ 	if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
+ 		goto out;
+ 
+-	vif = l3mdev_master_ifindex(dst->dev);
+-	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
++	rcu_read_lock();
++	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr,
++			       l3mdev_master_ifindex_rcu(dst->dev));
+ 	rc = inet_peer_xrlim_allow(peer,
+ 				   READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
+-	if (peer)
+-		inet_putpeer(peer);
++	rcu_read_unlock();
+ out:
+ 	if (!rc)
+ 		__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 5bd7599634517a..9c5ffe3b5f776f 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -95,6 +95,7 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
+ {
+ 	struct rb_node **pp, *parent, *next;
+ 	struct inet_peer *p;
++	u32 now;
+ 
+ 	pp = &base->rb_root.rb_node;
+ 	parent = NULL;
+@@ -108,8 +109,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
+ 		p = rb_entry(parent, struct inet_peer, rb_node);
+ 		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
+ 		if (cmp == 0) {
+-			if (!refcount_inc_not_zero(&p->refcnt))
+-				break;
++			now = jiffies;
++			if (READ_ONCE(p->dtime) != now)
++				WRITE_ONCE(p->dtime, now);
+ 			return p;
+ 		}
+ 		if (gc_stack) {
+@@ -155,9 +157,6 @@ static void inet_peer_gc(struct inet_peer_base *base,
+ 	for (i = 0; i < gc_cnt; i++) {
+ 		p = gc_stack[i];
+ 
+-		/* The READ_ONCE() pairs with the WRITE_ONCE()
+-		 * in inet_putpeer()
+-		 */
+ 		delta = (__u32)jiffies - READ_ONCE(p->dtime);
+ 
+ 		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
+@@ -173,31 +172,23 @@ static void inet_peer_gc(struct inet_peer_base *base,
+ 	}
+ }
+ 
++/* Must be called under RCU : No refcount change is done here. */
+ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+-			       const struct inetpeer_addr *daddr,
+-			       int create)
++			       const struct inetpeer_addr *daddr)
+ {
+ 	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
+ 	struct rb_node **pp, *parent;
+ 	unsigned int gc_cnt, seq;
+-	int invalidated;
+ 
+ 	/* Attempt a lockless lookup first.
+ 	 * Because of a concurrent writer, we might not find an existing entry.
+ 	 */
+-	rcu_read_lock();
+ 	seq = read_seqbegin(&base->lock);
+ 	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
+-	invalidated = read_seqretry(&base->lock, seq);
+-	rcu_read_unlock();
+ 
+ 	if (p)
+ 		return p;
+ 
+-	/* If no writer did a change during our lookup, we can return early. */
+-	if (!create && !invalidated)
+-		return NULL;
+-
+ 	/* retry an exact lookup, taking the lock before.
+ 	 * At least, nodes should be hot in our cache.
+ 	 */
+@@ -206,12 +197,12 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ 
+ 	gc_cnt = 0;
+ 	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
+-	if (!p && create) {
++	if (!p) {
+ 		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
+ 		if (p) {
+ 			p->daddr = *daddr;
+ 			p->dtime = (__u32)jiffies;
+-			refcount_set(&p->refcnt, 2);
++			refcount_set(&p->refcnt, 1);
+ 			atomic_set(&p->rid, 0);
+ 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ 			p->rate_tokens = 0;
+@@ -236,15 +227,9 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
+ 
+ void inet_putpeer(struct inet_peer *p)
+ {
+-	/* The WRITE_ONCE() pairs with itself (we run lockless)
+-	 * and the READ_ONCE() in inet_peer_gc()
+-	 */
+-	WRITE_ONCE(p->dtime, (__u32)jiffies);
+-
+ 	if (refcount_dec_and_test(&p->refcnt))
+ 		call_rcu(&p->rcu, inetpeer_free_rcu);
+ }
+-EXPORT_SYMBOL_GPL(inet_putpeer);
+ 
+ /*
+  *	Check transmit rate limitation for given message.
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index a92664a5ef2efe..9ca0a183a55ffa 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -82,15 +82,20 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
+ {
+ 	struct ipq *qp = container_of(q, struct ipq, q);
+-	struct net *net = q->fqdir->net;
+-
+ 	const struct frag_v4_compare_key *key = a;
++	struct net *net = q->fqdir->net;
++	struct inet_peer *p = NULL;
+ 
+ 	q->key.v4 = *key;
+ 	qp->ecn = 0;
+-	qp->peer = q->fqdir->max_dist ?
+-		inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
+-		NULL;
++	if (q->fqdir->max_dist) {
++		rcu_read_lock();
++		p = inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif);
++		if (p && !refcount_inc_not_zero(&p->refcnt))
++			p = NULL;
++		rcu_read_unlock();
++	}
++	qp->peer = p;
+ }
+ 
+ static void ip4_frag_free(struct inet_frag_queue *q)
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 449a2ac40bdc00..de0d9cc7806a15 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -817,7 +817,7 @@ static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
+ 				cache->mfc_un.res.maxvif = vifi + 1;
+ 		}
+ 	}
+-	cache->mfc_un.res.lastuse = jiffies;
++	WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
+ }
+ 
+ static int vif_add(struct net *net, struct mr_table *mrt,
+@@ -1667,9 +1667,9 @@ int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
+ 		rcu_read_lock();
+ 		c = ipmr_cache_find(mrt, sr->src.s_addr, sr->grp.s_addr);
+ 		if (c) {
+-			sr->pktcnt = c->_c.mfc_un.res.pkt;
+-			sr->bytecnt = c->_c.mfc_un.res.bytes;
+-			sr->wrong_if = c->_c.mfc_un.res.wrong_if;
++			sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
++			sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
++			sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
+ 			rcu_read_unlock();
+ 			return 0;
+ 		}
+@@ -1739,9 +1739,9 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+ 		rcu_read_lock();
+ 		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+ 		if (c) {
+-			sr.pktcnt = c->_c.mfc_un.res.pkt;
+-			sr.bytecnt = c->_c.mfc_un.res.bytes;
+-			sr.wrong_if = c->_c.mfc_un.res.wrong_if;
++			sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
++			sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
++			sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
+ 			rcu_read_unlock();
+ 
+ 			if (copy_to_user(arg, &sr, sizeof(sr)))
+@@ -1974,9 +1974,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ 	int vif, ct;
+ 
+ 	vif = c->_c.mfc_parent;
+-	c->_c.mfc_un.res.pkt++;
+-	c->_c.mfc_un.res.bytes += skb->len;
+-	c->_c.mfc_un.res.lastuse = jiffies;
++	atomic_long_inc(&c->_c.mfc_un.res.pkt);
++	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
++	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
+ 
+ 	if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
+ 		struct mfc_cache *cache_proxy;
+@@ -2007,7 +2007,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ 			goto dont_forward;
+ 		}
+ 
+-		c->_c.mfc_un.res.wrong_if++;
++		atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
+ 
+ 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
+ 		    /* pimsm uses asserts, when switching from RPT to SPT,
+@@ -3015,9 +3015,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
+ 
+ 		if (it->cache != &mrt->mfc_unres_queue) {
+ 			seq_printf(seq, " %8lu %8lu %8lu",
+-				   mfc->_c.mfc_un.res.pkt,
+-				   mfc->_c.mfc_un.res.bytes,
+-				   mfc->_c.mfc_un.res.wrong_if);
++				   atomic_long_read(&mfc->_c.mfc_un.res.pkt),
++				   atomic_long_read(&mfc->_c.mfc_un.res.bytes),
++				   atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
+ 			for (n = mfc->_c.mfc_un.res.minvif;
+ 			     n < mfc->_c.mfc_un.res.maxvif; n++) {
+ 				if (VIF_EXISTS(mrt, n) &&
+diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
+index f0af12a2f70bcd..28d77d454d442e 100644
+--- a/net/ipv4/ipmr_base.c
++++ b/net/ipv4/ipmr_base.c
+@@ -263,9 +263,9 @@ int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ 	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
+ 	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
+ 
+-	mfcs.mfcs_packets = c->mfc_un.res.pkt;
+-	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
+-	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
++	mfcs.mfcs_packets = atomic_long_read(&c->mfc_un.res.pkt);
++	mfcs.mfcs_bytes = atomic_long_read(&c->mfc_un.res.bytes);
++	mfcs.mfcs_wrong_if = atomic_long_read(&c->mfc_un.res.wrong_if);
+ 	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
+ 	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
+ 			      RTA_PAD))
+@@ -330,9 +330,6 @@ int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
+ 	list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
+ 		if (e < s_e)
+ 			goto next_entry2;
+-		if (filter->dev &&
+-		    !mr_mfc_uses_dev(mrt, mfc, filter->dev))
+-			goto next_entry2;
+ 
+ 		err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
+ 			   cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 723ac9181558c3..2a27913588d05a 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -870,11 +870,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 	}
+ 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
+ 	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
+-	rcu_read_unlock();
+ 
+ 	net = dev_net(rt->dst.dev);
+-	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
++	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif);
+ 	if (!peer) {
++		rcu_read_unlock();
+ 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
+ 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
+ 		return;
+@@ -893,7 +893,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 	 */
+ 	if (peer->n_redirects >= ip_rt_redirect_number) {
+ 		peer->rate_last = jiffies;
+-		goto out_put_peer;
++		goto out_unlock;
+ 	}
+ 
+ 	/* Check for load limit; set rate_last to the latest sent
+@@ -914,8 +914,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 					     &ip_hdr(skb)->saddr, inet_iif(skb),
+ 					     &ip_hdr(skb)->daddr, &gw);
+ 	}
+-out_put_peer:
+-	inet_putpeer(peer);
++out_unlock:
++	rcu_read_unlock();
+ }
+ 
+ static int ip_error(struct sk_buff *skb)
+@@ -975,9 +975,9 @@ static int ip_error(struct sk_buff *skb)
+ 		break;
+ 	}
+ 
++	rcu_read_lock();
+ 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
+-			       l3mdev_master_ifindex(skb->dev), 1);
+-
++			       l3mdev_master_ifindex_rcu(skb->dev));
+ 	send = true;
+ 	if (peer) {
+ 		now = jiffies;
+@@ -989,8 +989,9 @@ static int ip_error(struct sk_buff *skb)
+ 			peer->rate_tokens -= ip_rt_error_cost;
+ 		else
+ 			send = false;
+-		inet_putpeer(peer);
+ 	}
++	rcu_read_unlock();
++
+ 	if (send)
+ 		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
+ 
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index 5dbed91c617825..76c23675ae50ab 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -392,6 +392,10 @@ static void hystart_update(struct sock *sk, u32 delay)
+ 	if (after(tp->snd_una, ca->end_seq))
+ 		bictcp_hystart_reset(sk);
+ 
++	/* hystart triggers when cwnd is larger than some threshold */
++	if (tcp_snd_cwnd(tp) < hystart_low_window)
++		return;
++
+ 	if (hystart_detect & HYSTART_ACK_TRAIN) {
+ 		u32 now = bictcp_clock_us(sk);
+ 
+@@ -467,9 +471,7 @@ __bpf_kfunc static void cubictcp_acked(struct sock *sk, const struct ack_sample
+ 	if (ca->delay_min == 0 || ca->delay_min > delay)
+ 		ca->delay_min = delay;
+ 
+-	/* hystart triggers when cwnd is larger than some threshold */
+-	if (!ca->found && tcp_in_slow_start(tp) && hystart &&
+-	    tcp_snd_cwnd(tp) >= hystart_low_window)
++	if (!ca->found && tcp_in_slow_start(tp) && hystart)
+ 		hystart_update(sk, delay);
+ }
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 8efc58716ce969..6d5387811c32ad 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -265,11 +265,14 @@ static u16 tcp_select_window(struct sock *sk)
+ 	u32 cur_win, new_win;
+ 
+ 	/* Make the window 0 if we failed to queue the data because we
+-	 * are out of memory. The window is temporary, so we don't store
+-	 * it on the socket.
++	 * are out of memory.
+ 	 */
+-	if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM))
++	if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) {
++		tp->pred_flags = 0;
++		tp->rcv_wnd = 0;
++		tp->rcv_wup = tp->rcv_nxt;
+ 		return 0;
++	}
+ 
+ 	cur_win = tcp_receive_window(tp);
+ 	new_win = __tcp_select_window(sk);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index ff85242720a0a9..d2eeb6fc49b382 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -420,6 +420,49 @@ u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
+ 			      udp_ehash_secret + net_hash_mix(net));
+ }
+ 
++/**
++ * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port)
++ * @net:	Network namespace
++ * @saddr:	Source address, network order
++ * @sport:	Source port, network order
++ * @daddr:	Destination address, network order
++ * @hnum:	Destination port, host order
++ * @dif:	Destination interface index
++ * @sdif:	Destination bridge port index, if relevant
++ * @udptable:	Set of UDP hash tables
++ *
++ * Simplified lookup to be used as fallback if no sockets are found due to a
++ * potential race between (receive) address change, and lookup happening before
++ * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
++ * result sockets, because if we have one, we don't need the fallback at all.
++ *
++ * Called under rcu_read_lock().
++ *
++ * Return: socket with highest matching score if any, NULL if none
++ */
++static struct sock *udp4_lib_lookup1(const struct net *net,
++				     __be32 saddr, __be16 sport,
++				     __be32 daddr, unsigned int hnum,
++				     int dif, int sdif,
++				     const struct udp_table *udptable)
++{
++	unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
++	struct udp_hslot *hslot = &udptable->hash[slot];
++	struct sock *sk, *result = NULL;
++	int score, badness = 0;
++
++	sk_for_each_rcu(sk, &hslot->head) {
++		score = compute_score(sk, net,
++				      saddr, sport, daddr, hnum, dif, sdif);
++		if (score > badness) {
++			result = sk;
++			badness = score;
++		}
++	}
++
++	return result;
++}
++
+ /* called with rcu_read_lock() */
+ static struct sock *udp4_lib_lookup2(const struct net *net,
+ 				     __be32 saddr, __be16 sport,
+@@ -525,6 +568,19 @@ struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
+ 	result = udp4_lib_lookup2(net, saddr, sport,
+ 				  htonl(INADDR_ANY), hnum, dif, sdif,
+ 				  hslot2, skb);
++	if (!IS_ERR_OR_NULL(result))
++		goto done;
++
++	/* Primary hash (destination port) lookup as fallback for this race:
++	 *   1. __ip4_datagram_connect() sets sk_rcv_saddr
++	 *   2. lookup (this function): new sk_rcv_saddr, hashes not updated yet
++	 *   3. rehash operation updating _secondary and four-tuple_ hashes
++	 * The primary hash doesn't need an update after 1., so, thanks to this
++	 * further step, 1. and 3. don't need to be atomic against the lookup.
++	 */
++	result = udp4_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
++				  udptable);
++
+ done:
+ 	if (IS_ERR(result))
+ 		return NULL;
+diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
+index 919ebfabbe4ee2..7b41fb4f00b587 100644
+--- a/net/ipv6/esp6_offload.c
++++ b/net/ipv6/esp6_offload.c
+@@ -80,9 +80,9 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
+ 		if (sp->len == XFRM_MAX_DEPTH)
+ 			goto out_reset;
+ 
+-		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
+-				      (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
+-				      spi, IPPROTO_ESP, AF_INET6);
++		x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
++					    (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
++					    spi, IPPROTO_ESP, AF_INET6);
+ 
+ 		if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
+ 			/* non-offload path will record the error and audit log */
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 071b0bc1179d81..a6984a29fdb9dd 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -222,10 +222,10 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+ 		if (rt->rt6i_dst.plen < 128)
+ 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
+ 
+-		peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1);
++		rcu_read_lock();
++		peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr);
+ 		res = inet_peer_xrlim_allow(peer, tmo);
+-		if (peer)
+-			inet_putpeer(peer);
++		rcu_read_unlock();
+ 	}
+ 	if (!res)
+ 		__ICMP6_INC_STATS(net, ip6_dst_idev(dst),
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index f26841f1490f5c..434ddf263b88a3 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -613,15 +613,15 @@ int ip6_forward(struct sk_buff *skb)
+ 		else
+ 			target = &hdr->daddr;
+ 
+-		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
++		rcu_read_lock();
++		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr);
+ 
+ 		/* Limit redirects both by destination (here)
+ 		   and by source (inside ndisc_send_redirect)
+ 		 */
+ 		if (inet_peer_xrlim_allow(peer, 1*HZ))
+ 			ndisc_send_redirect(skb, target);
+-		if (peer)
+-			inet_putpeer(peer);
++		rcu_read_unlock();
+ 	} else {
+ 		int addrtype = ipv6_addr_type(&hdr->saddr);
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index d5057401701c1a..440048d609c37a 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -506,9 +506,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
+ 
+ 		if (it->cache != &mrt->mfc_unres_queue) {
+ 			seq_printf(seq, " %8lu %8lu %8lu",
+-				   mfc->_c.mfc_un.res.pkt,
+-				   mfc->_c.mfc_un.res.bytes,
+-				   mfc->_c.mfc_un.res.wrong_if);
++				   atomic_long_read(&mfc->_c.mfc_un.res.pkt),
++				   atomic_long_read(&mfc->_c.mfc_un.res.bytes),
++				   atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
+ 			for (n = mfc->_c.mfc_un.res.minvif;
+ 			     n < mfc->_c.mfc_un.res.maxvif; n++) {
+ 				if (VIF_EXISTS(mrt, n) &&
+@@ -870,7 +870,7 @@ static void ip6mr_update_thresholds(struct mr_table *mrt,
+ 				cache->mfc_un.res.maxvif = vifi + 1;
+ 		}
+ 	}
+-	cache->mfc_un.res.lastuse = jiffies;
++	WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
+ }
+ 
+ static int mif6_add(struct net *net, struct mr_table *mrt,
+@@ -1928,9 +1928,9 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void *arg)
+ 		c = ip6mr_cache_find(mrt, &sr->src.sin6_addr,
+ 				     &sr->grp.sin6_addr);
+ 		if (c) {
+-			sr->pktcnt = c->_c.mfc_un.res.pkt;
+-			sr->bytecnt = c->_c.mfc_un.res.bytes;
+-			sr->wrong_if = c->_c.mfc_un.res.wrong_if;
++			sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
++			sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
++			sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
+ 			rcu_read_unlock();
+ 			return 0;
+ 		}
+@@ -2000,9 +2000,9 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+ 		rcu_read_lock();
+ 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+ 		if (c) {
+-			sr.pktcnt = c->_c.mfc_un.res.pkt;
+-			sr.bytecnt = c->_c.mfc_un.res.bytes;
+-			sr.wrong_if = c->_c.mfc_un.res.wrong_if;
++			sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
++			sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
++			sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
+ 			rcu_read_unlock();
+ 
+ 			if (copy_to_user(arg, &sr, sizeof(sr)))
+@@ -2125,9 +2125,9 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
+ 	int true_vifi = ip6mr_find_vif(mrt, dev);
+ 
+ 	vif = c->_c.mfc_parent;
+-	c->_c.mfc_un.res.pkt++;
+-	c->_c.mfc_un.res.bytes += skb->len;
+-	c->_c.mfc_un.res.lastuse = jiffies;
++	atomic_long_inc(&c->_c.mfc_un.res.pkt);
++	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
++	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
+ 
+ 	if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
+ 		struct mfc6_cache *cache_proxy;
+@@ -2145,7 +2145,7 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
+ 	 * Wrong interface: drop packet and (maybe) send PIM assert.
+ 	 */
+ 	if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
+-		c->_c.mfc_un.res.wrong_if++;
++		atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
+ 
+ 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
+ 		    /* pimsm uses asserts, when switching from RPT to SPT,
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index aba94a34867379..d044c67019de6d 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1731,10 +1731,12 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
+ 			  "Redirect: destination is not a neighbour\n");
+ 		goto release;
+ 	}
+-	peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1);
++
++	rcu_read_lock();
++	peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr);
+ 	ret = inet_peer_xrlim_allow(peer, 1*HZ);
+-	if (peer)
+-		inet_putpeer(peer);
++	rcu_read_unlock();
++
+ 	if (!ret)
+ 		goto release;
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 0cef8ae5d1ea18..896c9c827a288c 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -159,6 +159,49 @@ static int compute_score(struct sock *sk, const struct net *net,
+ 	return score;
+ }
+ 
++/**
++ * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
++ * @net:	Network namespace
++ * @saddr:	Source address, network order
++ * @sport:	Source port, network order
++ * @daddr:	Destination address, network order
++ * @hnum:	Destination port, host order
++ * @dif:	Destination interface index
++ * @sdif:	Destination bridge port index, if relevant
++ * @udptable:	Set of UDP hash tables
++ *
++ * Simplified lookup to be used as fallback if no sockets are found due to a
++ * potential race between (receive) address change, and lookup happening before
++ * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
++ * result sockets, because if we have one, we don't need the fallback at all.
++ *
++ * Called under rcu_read_lock().
++ *
++ * Return: socket with highest matching score if any, NULL if none
++ */
++static struct sock *udp6_lib_lookup1(const struct net *net,
++				     const struct in6_addr *saddr, __be16 sport,
++				     const struct in6_addr *daddr,
++				     unsigned int hnum, int dif, int sdif,
++				     const struct udp_table *udptable)
++{
++	unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
++	struct udp_hslot *hslot = &udptable->hash[slot];
++	struct sock *sk, *result = NULL;
++	int score, badness = 0;
++
++	sk_for_each_rcu(sk, &hslot->head) {
++		score = compute_score(sk, net,
++				      saddr, sport, daddr, hnum, dif, sdif);
++		if (score > badness) {
++			result = sk;
++			badness = score;
++		}
++	}
++
++	return result;
++}
++
+ /* called with rcu_read_lock() */
+ static struct sock *udp6_lib_lookup2(const struct net *net,
+ 		const struct in6_addr *saddr, __be16 sport,
+@@ -263,6 +306,13 @@ struct sock *__udp6_lib_lookup(const struct net *net,
+ 	result = udp6_lib_lookup2(net, saddr, sport,
+ 				  &in6addr_any, hnum, dif, sdif,
+ 				  hslot2, skb);
++	if (!IS_ERR_OR_NULL(result))
++		goto done;
++
++	/* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
++	result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
++				  udptable);
++
+ done:
+ 	if (IS_ERR(result))
+ 		return NULL;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index f79fb99271ed84..c56bb4f451e6de 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1354,7 +1354,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
+ 	}
+ 
+ 	if (hdr->sadb_msg_seq) {
+-		x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
++		x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq, UINT_MAX);
+ 		if (x && !xfrm_addr_equal(&x->id.daddr, xdaddr, family)) {
+ 			xfrm_state_put(x);
+ 			x = NULL;
+@@ -1362,7 +1362,8 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
+ 	}
+ 
+ 	if (!x)
+-		x = xfrm_find_acq(net, &dummy_mark, mode, reqid, 0, proto, xdaddr, xsaddr, 1, family);
++		x = xfrm_find_acq(net, &dummy_mark, mode, reqid, 0, UINT_MAX,
++				  proto, xdaddr, xsaddr, 1, family);
+ 
+ 	if (x == NULL)
+ 		return -ENOENT;
+@@ -1417,7 +1418,7 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb
+ 	if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
+ 		return 0;
+ 
+-	x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
++	x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq, UINT_MAX);
+ 	if (x == NULL)
+ 		return 0;
+ 
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index 68596ef78b15ee..d0b145888e1398 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -728,7 +728,7 @@ static ssize_t ieee80211_if_parse_active_links(struct ieee80211_sub_if_data *sda
+ {
+ 	u16 active_links;
+ 
+-	if (kstrtou16(buf, 0, &active_links))
++	if (kstrtou16(buf, 0, &active_links) || !active_links)
+ 		return -EINVAL;
+ 
+ 	return ieee80211_set_active_links(&sdata->vif, active_links) ?: buflen;
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index d382d9729e853f..a06644084d15d1 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -724,6 +724,9 @@ static inline void drv_flush_sta(struct ieee80211_local *local,
+ 	if (sdata && !check_sdata_in_driver(sdata))
+ 		return;
+ 
++	if (!sta->uploaded)
++		return;
++
+ 	trace_drv_flush_sta(local, sdata, &sta->sta);
+ 	if (local->ops->flush_sta)
+ 		local->ops->flush_sta(&local->hw, &sdata->vif, &sta->sta);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 694b43091fec6b..6f3a86040cfcd8 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2994,6 +2994,7 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
+ 	}
+ 
+ 	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
++	ieee80211_set_qos_hdr(sdata, fwd_skb);
+ 	ieee80211_add_pending_skb(local, fwd_skb);
+ 
+ rx_accept:
+diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
+index b0dd008e2114bc..dd595d9b5e50c7 100644
+--- a/net/mptcp/ctrl.c
++++ b/net/mptcp/ctrl.c
+@@ -405,9 +405,9 @@ void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
+ 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDROP);
+ 			subflow->mpc_drop = 1;
+ 			mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
+-		} else {
+-			subflow->mpc_drop = 0;
+ 		}
++	} else if (ssk->sk_state == TCP_SYN_SENT) {
++		subflow->mpc_drop = 0;
+ 	}
+ }
+ 
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 123f3f2972841a..fd2de185bc939f 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -108,7 +108,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->suboptions |= OPTION_MPTCP_DSS;
+ 			mp_opt->use_map = 1;
+ 			mp_opt->mpc_map = 1;
+-			mp_opt->use_ack = 0;
+ 			mp_opt->data_len = get_unaligned_be16(ptr);
+ 			ptr += 2;
+ 		}
+@@ -157,11 +156,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		pr_debug("DSS\n");
+ 		ptr++;
+ 
+-		/* we must clear 'mpc_map' be able to detect MP_CAPABLE
+-		 * map vs DSS map in mptcp_incoming_options(), and reconstruct
+-		 * map info accordingly
+-		 */
+-		mp_opt->mpc_map = 0;
+ 		flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
+ 		mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
+ 		mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
+@@ -369,8 +363,11 @@ void mptcp_get_options(const struct sk_buff *skb,
+ 	const unsigned char *ptr;
+ 	int length;
+ 
+-	/* initialize option status */
+-	mp_opt->suboptions = 0;
++	/* Ensure that casting the whole status to u32 is efficient and safe */
++	BUILD_BUG_ON(sizeof_field(struct mptcp_options_received, status) != sizeof(u32));
++	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct mptcp_options_received, status),
++				 sizeof(u32)));
++	*(u32 *)&mp_opt->status = 0;
+ 
+ 	length = (th->doff * 4) - sizeof(struct tcphdr);
+ 	ptr = (const unsigned char *)(th + 1);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 45a2b5f05d38b0..8c4f934d198cc6 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -2049,7 +2049,8 @@ int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info)
+ 		return -EINVAL;
+ 	}
+ 	if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
+-	    (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
++	    (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL |
++			     MPTCP_PM_ADDR_FLAG_IMPLICIT))) {
+ 		spin_unlock_bh(&pernet->lock);
+ 		GENL_SET_ERR_MSG(info, "invalid addr flags");
+ 		return -EINVAL;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 4b9d850ce85a25..fac774825aff39 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1766,8 +1766,10 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
+ 		 * see mptcp_disconnect().
+ 		 * Attempt it again outside the problematic scope.
+ 		 */
+-		if (!mptcp_disconnect(sk, 0))
++		if (!mptcp_disconnect(sk, 0)) {
++			sk->sk_disconnects++;
+ 			sk->sk_socket->state = SS_UNCONNECTED;
++		}
+ 	}
+ 	inet_clear_bit(DEFER_CONNECT, sk);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 73526f1d768fcb..b70a303e082878 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -149,22 +149,24 @@ struct mptcp_options_received {
+ 	u32	subflow_seq;
+ 	u16	data_len;
+ 	__sum16	csum;
+-	u16	suboptions;
++	struct_group(status,
++		u16 suboptions;
++		u16 use_map:1,
++		    dsn64:1,
++		    data_fin:1,
++		    use_ack:1,
++		    ack64:1,
++		    mpc_map:1,
++		    reset_reason:4,
++		    reset_transient:1,
++		    echo:1,
++		    backup:1,
++		    deny_join_id0:1,
++		    __unused:2;
++	);
++	u8	join_id;
+ 	u32	token;
+ 	u32	nonce;
+-	u16	use_map:1,
+-		dsn64:1,
+-		data_fin:1,
+-		use_ack:1,
+-		ack64:1,
+-		mpc_map:1,
+-		reset_reason:4,
+-		reset_transient:1,
+-		echo:1,
+-		backup:1,
+-		deny_join_id0:1,
+-		__unused:2;
+-	u8	join_id;
+ 	u64	thmac;
+ 	u8	hmac[MPTCPOPT_HMAC_LEN];
+ 	struct mptcp_addr_info addr;
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 14bd66909ca455..4a8ce2949faeac 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -1089,14 +1089,12 @@ static int ncsi_rsp_handler_netlink(struct ncsi_request *nr)
+ static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr)
+ {
+ 	struct ncsi_dev_priv *ndp = nr->ndp;
++	struct sockaddr *saddr = &ndp->pending_mac;
+ 	struct net_device *ndev = ndp->ndev.dev;
+ 	struct ncsi_rsp_gmcma_pkt *rsp;
+-	struct sockaddr saddr;
+-	int ret = -1;
+ 	int i;
+ 
+ 	rsp = (struct ncsi_rsp_gmcma_pkt *)skb_network_header(nr->rsp);
+-	saddr.sa_family = ndev->type;
+ 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ 
+ 	netdev_info(ndev, "NCSI: Received %d provisioned MAC addresses\n",
+@@ -1108,20 +1106,20 @@ static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr)
+ 			    rsp->addresses[i][4], rsp->addresses[i][5]);
+ 	}
+ 
++	saddr->sa_family = ndev->type;
+ 	for (i = 0; i < rsp->address_count; i++) {
+-		memcpy(saddr.sa_data, &rsp->addresses[i], ETH_ALEN);
+-		ret = ndev->netdev_ops->ndo_set_mac_address(ndev, &saddr);
+-		if (ret < 0) {
++		if (!is_valid_ether_addr(rsp->addresses[i])) {
+ 			netdev_warn(ndev, "NCSI: Unable to assign %pM to device\n",
+-				    saddr.sa_data);
++				    rsp->addresses[i]);
+ 			continue;
+ 		}
+-		netdev_warn(ndev, "NCSI: Set MAC address to %pM\n", saddr.sa_data);
++		memcpy(saddr->sa_data, rsp->addresses[i], ETH_ALEN);
++		netdev_warn(ndev, "NCSI: Will set MAC address to %pM\n", saddr->sa_data);
+ 		break;
+ 	}
+ 
+-	ndp->gma_flag = ret == 0;
+-	return ret;
++	ndp->gma_flag = 1;
++	return 0;
+ }
+ 
+ static struct ncsi_rsp_handler {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 42dc8cc721ff7b..939510247ef5a6 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4647,6 +4647,14 @@ static int nf_tables_fill_set_concat(struct sk_buff *skb,
+ 	return 0;
+ }
+ 
++static u32 nft_set_userspace_size(const struct nft_set_ops *ops, u32 size)
++{
++	if (ops->usize)
++		return ops->usize(size);
++
++	return size;
++}
++
+ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 			      const struct nft_set *set, u16 event, u16 flags)
+ {
+@@ -4717,7 +4725,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 	if (!nest)
+ 		goto nla_put_failure;
+ 	if (set->size &&
+-	    nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
++	    nla_put_be32(skb, NFTA_SET_DESC_SIZE,
++			 htonl(nft_set_userspace_size(set->ops, set->size))))
+ 		goto nla_put_failure;
+ 
+ 	if (set->field_count > 1 &&
+@@ -4959,7 +4968,7 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
+ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ 			       const struct nlattr *nla)
+ {
+-	u32 num_regs = 0, key_num_regs = 0;
++	u32 len = 0, num_regs;
+ 	struct nlattr *attr;
+ 	int rem, err, i;
+ 
+@@ -4973,12 +4982,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ 	}
+ 
+ 	for (i = 0; i < desc->field_count; i++)
+-		num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
++		len += round_up(desc->field_len[i], sizeof(u32));
+ 
+-	key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
+-	if (key_num_regs != num_regs)
++	if (len != desc->klen)
+ 		return -EINVAL;
+ 
++	num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
+ 	if (num_regs > NFT_REG32_COUNT)
+ 		return -E2BIG;
+ 
+@@ -5085,6 +5094,15 @@ static bool nft_set_is_same(const struct nft_set *set,
+ 	return true;
+ }
+ 
++static u32 nft_set_kernel_size(const struct nft_set_ops *ops,
++			       const struct nft_set_desc *desc)
++{
++	if (ops->ksize)
++		return ops->ksize(desc->size);
++
++	return desc->size;
++}
++
+ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 			    const struct nlattr * const nla[])
+ {
+@@ -5267,6 +5285,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (err < 0)
+ 			return err;
+ 
++		if (desc.size)
++			desc.size = nft_set_kernel_size(set->ops, &desc);
++
+ 		err = 0;
+ 		if (!nft_set_is_same(set, &desc, exprs, num_exprs, flags)) {
+ 			NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
+@@ -5289,6 +5310,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (IS_ERR(ops))
+ 		return PTR_ERR(ops);
+ 
++	if (desc.size)
++		desc.size = nft_set_kernel_size(ops, &desc);
++
+ 	udlen = 0;
+ 	if (nla[NFTA_SET_USERDATA])
+ 		udlen = nla_len(nla[NFTA_SET_USERDATA]);
+@@ -6855,6 +6879,27 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set,
+ 	return true;
+ }
+ 
++static u32 nft_set_maxsize(const struct nft_set *set)
++{
++	u32 maxsize, delta;
++
++	if (!set->size)
++		return UINT_MAX;
++
++	if (set->ops->adjust_maxsize)
++		delta = set->ops->adjust_maxsize(set);
++	else
++		delta = 0;
++
++	if (check_add_overflow(set->size, set->ndeact, &maxsize))
++		return UINT_MAX;
++
++	if (check_add_overflow(maxsize, delta, &maxsize))
++		return UINT_MAX;
++
++	return maxsize;
++}
++
+ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 			    const struct nlattr *attr, u32 nlmsg_flags)
+ {
+@@ -7218,7 +7263,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 	}
+ 
+ 	if (!(flags & NFT_SET_ELEM_CATCHALL)) {
+-		unsigned int max = set->size ? set->size + set->ndeact : UINT_MAX;
++		unsigned int max = nft_set_maxsize(set);
+ 
+ 		if (!atomic_add_unless(&set->nelems, 1, max)) {
+ 			err = -ENFILE;
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 2f732fae5a831e..da9ebd00b19891 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -289,6 +289,15 @@ static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
+ 	return false;
+ }
+ 
++static void flow_offload_ct_tcp(struct nf_conn *ct)
++{
++	/* conntrack will not see all packets, disable tcp window validation. */
++	spin_lock_bh(&ct->lock);
++	ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
++	ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
++	spin_unlock_bh(&ct->lock);
++}
++
+ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ 				  struct nft_regs *regs,
+ 				  const struct nft_pktinfo *pkt)
+@@ -356,11 +365,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ 		goto err_flow_alloc;
+ 
+ 	flow_offload_route_init(flow, &route);
+-
+-	if (tcph) {
+-		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+-		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+-	}
++	if (tcph)
++		flow_offload_ct_tcp(ct);
+ 
+ 	__set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
+ 	ret = flow_offload_add(flowtable, flow);
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index b7ea21327549b3..2e8ef16ff191d4 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -750,6 +750,46 @@ static void nft_rbtree_gc_init(const struct nft_set *set)
+ 	priv->last_gc = jiffies;
+ }
+ 
++/* rbtree stores ranges as singleton elements, each range is composed of two
++ * elements ...
++ */
++static u32 nft_rbtree_ksize(u32 size)
++{
++	return size * 2;
++}
++
++/* ... hide this detail to userspace. */
++static u32 nft_rbtree_usize(u32 size)
++{
++	if (!size)
++		return 0;
++
++	return size / 2;
++}
++
++static u32 nft_rbtree_adjust_maxsize(const struct nft_set *set)
++{
++	struct nft_rbtree *priv = nft_set_priv(set);
++	struct nft_rbtree_elem *rbe;
++	struct rb_node *node;
++	const void *key;
++
++	node = rb_last(&priv->root);
++	if (!node)
++		return 0;
++
++	rbe = rb_entry(node, struct nft_rbtree_elem, node);
++	if (!nft_rbtree_interval_end(rbe))
++		return 0;
++
++	key = nft_set_ext_key(&rbe->ext);
++	if (memchr(key, 1, set->klen))
++		return 0;
++
++	/* this is the all-zero no-match element. */
++	return 1;
++}
++
+ const struct nft_set_type nft_set_rbtree_type = {
+ 	.features	= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
+ 	.ops		= {
+@@ -768,5 +808,8 @@ const struct nft_set_type nft_set_rbtree_type = {
+ 		.lookup		= nft_rbtree_lookup,
+ 		.walk		= nft_rbtree_walk,
+ 		.get		= nft_rbtree_get,
++		.ksize		= nft_rbtree_ksize,
++		.usize		= nft_rbtree_usize,
++		.adjust_maxsize = nft_rbtree_adjust_maxsize,
+ 	},
+ };
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 59050caab65c8b..72c65d938a150e 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -397,15 +397,15 @@ static int rose_setsockopt(struct socket *sock, int level, int optname,
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct rose_sock *rose = rose_sk(sk);
+-	int opt;
++	unsigned int opt;
+ 
+ 	if (level != SOL_ROSE)
+ 		return -ENOPROTOOPT;
+ 
+-	if (optlen < sizeof(int))
++	if (optlen < sizeof(unsigned int))
+ 		return -EINVAL;
+ 
+-	if (copy_from_sockptr(&opt, optval, sizeof(int)))
++	if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
+ 		return -EFAULT;
+ 
+ 	switch (optname) {
+@@ -414,31 +414,31 @@ static int rose_setsockopt(struct socket *sock, int level, int optname,
+ 		return 0;
+ 
+ 	case ROSE_T1:
+-		if (opt < 1)
++		if (opt < 1 || opt > UINT_MAX / HZ)
+ 			return -EINVAL;
+ 		rose->t1 = opt * HZ;
+ 		return 0;
+ 
+ 	case ROSE_T2:
+-		if (opt < 1)
++		if (opt < 1 || opt > UINT_MAX / HZ)
+ 			return -EINVAL;
+ 		rose->t2 = opt * HZ;
+ 		return 0;
+ 
+ 	case ROSE_T3:
+-		if (opt < 1)
++		if (opt < 1 || opt > UINT_MAX / HZ)
+ 			return -EINVAL;
+ 		rose->t3 = opt * HZ;
+ 		return 0;
+ 
+ 	case ROSE_HOLDBACK:
+-		if (opt < 1)
++		if (opt < 1 || opt > UINT_MAX / HZ)
+ 			return -EINVAL;
+ 		rose->hb = opt * HZ;
+ 		return 0;
+ 
+ 	case ROSE_IDLE:
+-		if (opt < 0)
++		if (opt > UINT_MAX / (60 * HZ))
+ 			return -EINVAL;
+ 		rose->idle = opt * 60 * HZ;
+ 		return 0;
+diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
+index f06ddbed3fed63..1525773e94aa17 100644
+--- a/net/rose/rose_timer.c
++++ b/net/rose/rose_timer.c
+@@ -122,6 +122,10 @@ static void rose_heartbeat_expiry(struct timer_list *t)
+ 	struct rose_sock *rose = rose_sk(sk);
+ 
+ 	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
++		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ/20);
++		goto out;
++	}
+ 	switch (rose->state) {
+ 	case ROSE_STATE_0:
+ 		/* Magic here: If we listen() and a new link dies before it
+@@ -152,6 +156,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
+ 	}
+ 
+ 	rose_start_heartbeat(sk);
++out:
+ 	bh_unlock_sock(sk);
+ 	sock_put(sk);
+ }
+@@ -162,6 +167,10 @@ static void rose_timer_expiry(struct timer_list *t)
+ 	struct sock *sk = &rose->sock;
+ 
+ 	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
++		sk_reset_timer(sk, &rose->timer, jiffies + HZ/20);
++		goto out;
++	}
+ 	switch (rose->state) {
+ 	case ROSE_STATE_1:	/* T1 */
+ 	case ROSE_STATE_4:	/* T2 */
+@@ -182,6 +191,7 @@ static void rose_timer_expiry(struct timer_list *t)
+ 		}
+ 		break;
+ 	}
++out:
+ 	bh_unlock_sock(sk);
+ 	sock_put(sk);
+ }
+@@ -192,6 +202,10 @@ static void rose_idletimer_expiry(struct timer_list *t)
+ 	struct sock *sk = &rose->sock;
+ 
+ 	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
++		sk_reset_timer(sk, &rose->idletimer, jiffies + HZ/20);
++		goto out;
++	}
+ 	rose_clear_queues(sk);
+ 
+ 	rose_write_internal(sk, ROSE_CLEAR_REQUEST);
+@@ -207,6 +221,7 @@ static void rose_idletimer_expiry(struct timer_list *t)
+ 		sk->sk_state_change(sk);
+ 		sock_set_flag(sk, SOCK_DEAD);
+ 	}
++out:
+ 	bh_unlock_sock(sk);
+ 	sock_put(sk);
+ }
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index 598b4ee389fc1e..2a1396cd892f30 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -63,11 +63,12 @@ int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
+ /*
+  * Mark a connection as being remotely aborted.
+  */
+-static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn,
++static void rxrpc_input_conn_abort(struct rxrpc_connection *conn,
+ 				   struct sk_buff *skb)
+ {
+-	return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
+-				      RXRPC_CALL_REMOTELY_ABORTED);
++	trace_rxrpc_rx_conn_abort(conn, skb);
++	rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
++			       RXRPC_CALL_REMOTELY_ABORTED);
+ }
+ 
+ /*
+@@ -202,11 +203,14 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn)
+ 
+ 	for (i = 0; i < RXRPC_MAXCALLS; i++) {
+ 		call = conn->channels[i].call;
+-		if (call)
++		if (call) {
++			rxrpc_see_call(call, rxrpc_call_see_conn_abort);
+ 			rxrpc_set_call_completion(call,
+ 						  conn->completion,
+ 						  conn->abort_code,
+ 						  conn->error);
++			rxrpc_poke_call(call, rxrpc_call_poke_conn_abort);
++		}
+ 	}
+ 
+ 	_leave("");
+diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
+index 552ba84a255c43..5d0842efde69ff 100644
+--- a/net/rxrpc/peer_event.c
++++ b/net/rxrpc/peer_event.c
+@@ -238,7 +238,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+ 	bool use;
+ 	int slot;
+ 
+-	spin_lock(&rxnet->peer_hash_lock);
++	spin_lock_bh(&rxnet->peer_hash_lock);
+ 
+ 	while (!list_empty(collector)) {
+ 		peer = list_entry(collector->next,
+@@ -249,7 +249,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+ 			continue;
+ 
+ 		use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
+-		spin_unlock(&rxnet->peer_hash_lock);
++		spin_unlock_bh(&rxnet->peer_hash_lock);
+ 
+ 		if (use) {
+ 			keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+@@ -269,17 +269,17 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+ 			 */
+ 			slot += cursor;
+ 			slot &= mask;
+-			spin_lock(&rxnet->peer_hash_lock);
++			spin_lock_bh(&rxnet->peer_hash_lock);
+ 			list_add_tail(&peer->keepalive_link,
+ 				      &rxnet->peer_keepalive[slot & mask]);
+-			spin_unlock(&rxnet->peer_hash_lock);
++			spin_unlock_bh(&rxnet->peer_hash_lock);
+ 			rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
+ 		}
+ 		rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
+-		spin_lock(&rxnet->peer_hash_lock);
++		spin_lock_bh(&rxnet->peer_hash_lock);
+ 	}
+ 
+-	spin_unlock(&rxnet->peer_hash_lock);
++	spin_unlock_bh(&rxnet->peer_hash_lock);
+ }
+ 
+ /*
+@@ -309,7 +309,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
+ 	 * second; the bucket at cursor + 1 goes at now + 1s and so
+ 	 * on...
+ 	 */
+-	spin_lock(&rxnet->peer_hash_lock);
++	spin_lock_bh(&rxnet->peer_hash_lock);
+ 	list_splice_init(&rxnet->peer_keepalive_new, &collector);
+ 
+ 	stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
+@@ -321,7 +321,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
+ 	}
+ 
+ 	base = now;
+-	spin_unlock(&rxnet->peer_hash_lock);
++	spin_unlock_bh(&rxnet->peer_hash_lock);
+ 
+ 	rxnet->peer_keepalive_base = base;
+ 	rxnet->peer_keepalive_cursor = cursor;
+diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
+index 49dcda67a0d591..956fc7ea4b7346 100644
+--- a/net/rxrpc/peer_object.c
++++ b/net/rxrpc/peer_object.c
+@@ -313,10 +313,10 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
+ 	hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+ 	rxrpc_init_peer(local, peer, hash_key);
+ 
+-	spin_lock(&rxnet->peer_hash_lock);
++	spin_lock_bh(&rxnet->peer_hash_lock);
+ 	hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+ 	list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
+-	spin_unlock(&rxnet->peer_hash_lock);
++	spin_unlock_bh(&rxnet->peer_hash_lock);
+ }
+ 
+ /*
+@@ -348,7 +348,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+ 			return NULL;
+ 		}
+ 
+-		spin_lock(&rxnet->peer_hash_lock);
++		spin_lock_bh(&rxnet->peer_hash_lock);
+ 
+ 		/* Need to check that we aren't racing with someone else */
+ 		peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
+@@ -361,7 +361,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+ 				      &rxnet->peer_keepalive_new);
+ 		}
+ 
+-		spin_unlock(&rxnet->peer_hash_lock);
++		spin_unlock_bh(&rxnet->peer_hash_lock);
+ 
+ 		if (peer)
+ 			rxrpc_free_peer(candidate);
+@@ -411,10 +411,10 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
+ 
+ 	ASSERT(hlist_empty(&peer->error_targets));
+ 
+-	spin_lock(&rxnet->peer_hash_lock);
++	spin_lock_bh(&rxnet->peer_hash_lock);
+ 	hash_del_rcu(&peer->hash_link);
+ 	list_del_init(&peer->keepalive_link);
+-	spin_unlock(&rxnet->peer_hash_lock);
++	spin_unlock_bh(&rxnet->peer_hash_lock);
+ 
+ 	rxrpc_free_peer(peer);
+ }
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index bbc778c233c892..dfa3067084948f 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -390,6 +390,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
+ 	tp->protocol = protocol;
+ 	tp->prio = prio;
+ 	tp->chain = chain;
++	tp->usesw = !tp->ops->reoffload;
+ 	spin_lock_init(&tp->lock);
+ 	refcount_set(&tp->refcnt, 1);
+ 
+@@ -410,39 +411,31 @@ static void tcf_proto_get(struct tcf_proto *tp)
+ 	refcount_inc(&tp->refcnt);
+ }
+ 
+-static void tcf_maintain_bypass(struct tcf_block *block)
++static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
+ {
+-	int filtercnt = atomic_read(&block->filtercnt);
+-	int skipswcnt = atomic_read(&block->skipswcnt);
+-	bool bypass_wanted = filtercnt > 0 && filtercnt == skipswcnt;
+-
+-	if (bypass_wanted != block->bypass_wanted) {
+ #ifdef CONFIG_NET_CLS_ACT
+-		if (bypass_wanted)
+-			static_branch_inc(&tcf_bypass_check_needed_key);
+-		else
+-			static_branch_dec(&tcf_bypass_check_needed_key);
+-#endif
+-		block->bypass_wanted = bypass_wanted;
++	struct tcf_block *block = tp->chain->block;
++	bool counted = false;
++
++	if (!add) {
++		if (tp->usesw && tp->counted) {
++			if (!atomic_dec_return(&block->useswcnt))
++				static_branch_dec(&tcf_sw_enabled_key);
++			tp->counted = false;
++		}
++		return;
+ 	}
+-}
+-
+-static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add)
+-{
+-	lockdep_assert_not_held(&block->cb_lock);
+ 
+-	down_write(&block->cb_lock);
+-	if (*counted != add) {
+-		if (add) {
+-			atomic_inc(&block->filtercnt);
+-			*counted = true;
+-		} else {
+-			atomic_dec(&block->filtercnt);
+-			*counted = false;
+-		}
++	spin_lock(&tp->lock);
++	if (tp->usesw && !tp->counted) {
++		counted = true;
++		tp->counted = true;
+ 	}
+-	tcf_maintain_bypass(block);
+-	up_write(&block->cb_lock);
++	spin_unlock(&tp->lock);
++
++	if (counted && atomic_inc_return(&block->useswcnt) == 1)
++		static_branch_inc(&tcf_sw_enabled_key);
++#endif
+ }
+ 
+ static void tcf_chain_put(struct tcf_chain *chain);
+@@ -451,7 +444,7 @@ static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
+ 			      bool sig_destroy, struct netlink_ext_ack *extack)
+ {
+ 	tp->ops->destroy(tp, rtnl_held, extack);
+-	tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false);
++	tcf_proto_count_usesw(tp, false);
+ 	if (sig_destroy)
+ 		tcf_proto_signal_destroyed(tp->chain, tp);
+ 	tcf_chain_put(tp->chain);
+@@ -2404,7 +2397,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
+ 			       RTM_NEWTFILTER, false, rtnl_held, extack);
+ 		tfilter_put(tp, fh);
+-		tcf_block_filter_cnt_update(block, &tp->counted, true);
++		tcf_proto_count_usesw(tp, true);
+ 		/* q pointer is NULL for shared blocks */
+ 		if (q)
+ 			q->flags &= ~TCQ_F_CAN_BYPASS;
+@@ -3521,8 +3514,6 @@ static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
+ 	if (*flags & TCA_CLS_FLAGS_IN_HW)
+ 		return;
+ 	*flags |= TCA_CLS_FLAGS_IN_HW;
+-	if (tc_skip_sw(*flags))
+-		atomic_inc(&block->skipswcnt);
+ 	atomic_inc(&block->offloadcnt);
+ }
+ 
+@@ -3531,8 +3522,6 @@ static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
+ 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
+ 		return;
+ 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
+-	if (tc_skip_sw(*flags))
+-		atomic_dec(&block->skipswcnt);
+ 	atomic_dec(&block->offloadcnt);
+ }
+ 
+diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
+index 1941ebec23ff9c..7fbe42f0e5c2b7 100644
+--- a/net/sched/cls_bpf.c
++++ b/net/sched/cls_bpf.c
+@@ -509,6 +509,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+ 	if (!tc_in_hw(prog->gen_flags))
+ 		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++	tcf_proto_update_usesw(tp, prog->gen_flags);
++
+ 	if (oldprog) {
+ 		idr_replace(&head->handle_idr, prog, handle);
+ 		list_replace_rcu(&oldprog->link, &prog->link);
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 1008ec8a464c93..03505673d5234d 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -2503,6 +2503,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
+ 	if (!tc_in_hw(fnew->flags))
+ 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++	tcf_proto_update_usesw(tp, fnew->flags);
++
+ 	spin_lock(&tp->lock);
+ 
+ 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index 9f1e62ca508d04..f03bf5da39ee83 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -228,6 +228,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ 	if (!tc_in_hw(new->flags))
+ 		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++	tcf_proto_update_usesw(tp, new->flags);
++
+ 	*arg = head;
+ 	rcu_assign_pointer(tp->root, new);
+ 	return 0;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index d3a03c57545bcc..2a1c00048fd6f4 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -951,6 +951,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 		if (!tc_in_hw(new->flags))
+ 			new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++		tcf_proto_update_usesw(tp, new->flags);
++
+ 		u32_replace_knode(tp, tp_c, new);
+ 		tcf_unbind_filter(tp, &n->res);
+ 		tcf_exts_get_net(&n->exts);
+@@ -1164,6 +1166,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 		if (!tc_in_hw(n->flags))
+ 			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++		tcf_proto_update_usesw(tp, n->flags);
++
+ 		ins = &ht->ht[TC_U32_HASH(handle)];
+ 		for (pins = rtnl_dereference(*ins); pins;
+ 		     ins = &pins->next, pins = rtnl_dereference(*ins))
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index a1d27bc039a364..d26ac6bd9b1080 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1664,6 +1664,10 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 				q = qdisc_lookup(dev, tcm->tcm_handle);
+ 				if (!q)
+ 					goto create_n_graft;
++				if (q->parent != tcm->tcm_parent) {
++					NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent");
++					return -EINVAL;
++				}
+ 				if (n->nlmsg_flags & NLM_F_EXCL) {
+ 					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
+ 					return -EEXIST;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 38ec18f73de43a..8874ae6680952a 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -911,8 +911,8 @@ static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
+ 		bands[prio] = q;
+ 	}
+ 
+-	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
+-					 GFP_KERNEL);
++	return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len,
++					    GFP_KERNEL);
+ }
+ 
+ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 3b9245a3c767a6..65d5b59da58303 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -77,12 +77,6 @@
+ #define SFQ_EMPTY_SLOT		0xffff
+ #define SFQ_DEFAULT_HASH_DIVISOR 1024
+ 
+-/* We use 16 bits to store allot, and want to handle packets up to 64K
+- * Scale allot by 8 (1<<3) so that no overflow occurs.
+- */
+-#define SFQ_ALLOT_SHIFT		3
+-#define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
+-
+ /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
+ typedef u16 sfq_index;
+ 
+@@ -104,7 +98,7 @@ struct sfq_slot {
+ 	sfq_index	next; /* next slot in sfq RR chain */
+ 	struct sfq_head dep; /* anchor in dep[] chains */
+ 	unsigned short	hash; /* hash value (index in ht[]) */
+-	short		allot; /* credit for this slot */
++	int		allot; /* credit for this slot */
+ 
+ 	unsigned int    backlog;
+ 	struct red_vars vars;
+@@ -120,7 +114,6 @@ struct sfq_sched_data {
+ 	siphash_key_t 	perturbation;
+ 	u8		cur_depth;	/* depth of longest slot */
+ 	u8		flags;
+-	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
+ 	struct tcf_proto __rcu *filter_list;
+ 	struct tcf_block *block;
+ 	sfq_index	*ht;		/* Hash table ('divisor' slots) */
+@@ -456,7 +449,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ 		 */
+ 		q->tail = slot;
+ 		/* We could use a bigger initial quantum for new flows */
+-		slot->allot = q->scaled_quantum;
++		slot->allot = q->quantum;
+ 	}
+ 	if (++sch->q.qlen <= q->limit)
+ 		return NET_XMIT_SUCCESS;
+@@ -493,7 +486,7 @@ sfq_dequeue(struct Qdisc *sch)
+ 	slot = &q->slots[a];
+ 	if (slot->allot <= 0) {
+ 		q->tail = slot;
+-		slot->allot += q->scaled_quantum;
++		slot->allot += q->quantum;
+ 		goto next_slot;
+ 	}
+ 	skb = slot_dequeue_head(slot);
+@@ -512,7 +505,7 @@ sfq_dequeue(struct Qdisc *sch)
+ 		}
+ 		q->tail->next = next_a;
+ 	} else {
+-		slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
++		slot->allot -= qdisc_pkt_len(skb);
+ 	}
+ 	return skb;
+ }
+@@ -595,7 +588,7 @@ static void sfq_rehash(struct Qdisc *sch)
+ 				q->tail->next = x;
+ 			}
+ 			q->tail = slot;
+-			slot->allot = q->scaled_quantum;
++			slot->allot = q->quantum;
+ 		}
+ 	}
+ 	sch->q.qlen -= dropped;
+@@ -628,7 +621,8 @@ static void sfq_perturbation(struct timer_list *t)
+ 	rcu_read_unlock();
+ }
+ 
+-static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
++static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
++		      struct netlink_ext_ack *extack)
+ {
+ 	struct sfq_sched_data *q = qdisc_priv(sch);
+ 	struct tc_sfq_qopt *ctl = nla_data(opt);
+@@ -646,14 +640,10 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+ 		return -EINVAL;
+ 
+-	/* slot->allot is a short, make sure quantum is not too big. */
+-	if (ctl->quantum) {
+-		unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
+-
+-		if (scaled <= 0 || scaled > SHRT_MAX)
+-			return -EINVAL;
++	if ((int)ctl->quantum < 0) {
++		NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
++		return -EINVAL;
+ 	}
+-
+ 	if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
+ 					ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
+ 		return -EINVAL;
+@@ -662,11 +652,13 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 		if (!p)
+ 			return -ENOMEM;
+ 	}
++	if (ctl->limit == 1) {
++		NL_SET_ERR_MSG_MOD(extack, "invalid limit");
++		return -EINVAL;
++	}
+ 	sch_tree_lock(sch);
+-	if (ctl->quantum) {
++	if (ctl->quantum)
+ 		q->quantum = ctl->quantum;
+-		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+-	}
+ 	WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
+ 	if (ctl->flows)
+ 		q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
+@@ -762,12 +754,11 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
+ 	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
+ 	q->maxflows = SFQ_DEFAULT_FLOWS;
+ 	q->quantum = psched_mtu(qdisc_dev(sch));
+-	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+ 	q->perturb_period = 0;
+ 	get_random_bytes(&q->perturbation, sizeof(q->perturbation));
+ 
+ 	if (opt) {
+-		int err = sfq_change(sch, opt);
++		int err = sfq_change(sch, opt, extack);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -878,7 +869,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ 	if (idx != SFQ_EMPTY_SLOT) {
+ 		const struct sfq_slot *slot = &q->slots[idx];
+ 
+-		xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
++		xstats.allot = slot->allot;
+ 		qs.qlen = slot->qlen;
+ 		qs.backlog = slot->backlog;
+ 	}
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 6cc7b846cff1bb..ebc41a7b13dbec 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2738,7 +2738,7 @@ int smc_accept(struct socket *sock, struct socket *new_sock,
+ 			release_sock(clcsk);
+ 		} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
+ 			lock_sock(nsk);
+-			smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
++			smc_rx_wait(smc_sk(nsk), &timeo, 0, smc_rx_data_available);
+ 			release_sock(nsk);
+ 		}
+ 	}
+diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
+index f0cbe77a80b440..79047721df5110 100644
+--- a/net/smc/smc_rx.c
++++ b/net/smc/smc_rx.c
+@@ -238,22 +238,23 @@ static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
+ 	return -ENOMEM;
+ }
+ 
+-static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
++static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn, size_t peeked)
+ {
+-	return atomic_read(&conn->bytes_to_rcv) &&
++	return smc_rx_data_available(conn, peeked) &&
+ 	       !atomic_read(&conn->splice_pending);
+ }
+ 
+ /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
+  *   @smc    smc socket
+  *   @timeo  pointer to max seconds to wait, pointer to value 0 for no timeout
++ *   @peeked  number of bytes already peeked
+  *   @fcrit  add'l criterion to evaluate as function pointer
+  * Returns:
+  * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
+  * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
+  */
+-int smc_rx_wait(struct smc_sock *smc, long *timeo,
+-		int (*fcrit)(struct smc_connection *conn))
++int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked,
++		int (*fcrit)(struct smc_connection *conn, size_t baseline))
+ {
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 	struct smc_connection *conn = &smc->conn;
+@@ -262,7 +263,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
+ 	struct sock *sk = &smc->sk;
+ 	int rc;
+ 
+-	if (fcrit(conn))
++	if (fcrit(conn, peeked))
+ 		return 1;
+ 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ 	add_wait_queue(sk_sleep(sk), &wait);
+@@ -271,7 +272,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
+ 			   cflags->peer_conn_abort ||
+ 			   READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN ||
+ 			   conn->killed ||
+-			   fcrit(conn),
++			   fcrit(conn, peeked),
+ 			   &wait);
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+@@ -322,11 +323,11 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
+ 	return -EAGAIN;
+ }
+ 
+-static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
++static bool smc_rx_recvmsg_data_available(struct smc_sock *smc, size_t peeked)
+ {
+ 	struct smc_connection *conn = &smc->conn;
+ 
+-	if (smc_rx_data_available(conn))
++	if (smc_rx_data_available(conn, peeked))
+ 		return true;
+ 	else if (conn->urg_state == SMC_URG_VALID)
+ 		/* we received a single urgent Byte - skip */
+@@ -344,10 +345,10 @@ static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
+ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 		   struct pipe_inode_info *pipe, size_t len, int flags)
+ {
+-	size_t copylen, read_done = 0, read_remaining = len;
++	size_t copylen, read_done = 0, read_remaining = len, peeked_bytes = 0;
+ 	size_t chunk_len, chunk_off, chunk_len_sum;
+ 	struct smc_connection *conn = &smc->conn;
+-	int (*func)(struct smc_connection *conn);
++	int (*func)(struct smc_connection *conn, size_t baseline);
+ 	union smc_host_cursor cons;
+ 	int readable, chunk;
+ 	char *rcvbuf_base;
+@@ -384,14 +385,14 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 		if (conn->killed)
+ 			break;
+ 
+-		if (smc_rx_recvmsg_data_available(smc))
++		if (smc_rx_recvmsg_data_available(smc, peeked_bytes))
+ 			goto copy;
+ 
+ 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ 			/* smc_cdc_msg_recv_action() could have run after
+ 			 * above smc_rx_recvmsg_data_available()
+ 			 */
+-			if (smc_rx_recvmsg_data_available(smc))
++			if (smc_rx_recvmsg_data_available(smc, peeked_bytes))
+ 				goto copy;
+ 			break;
+ 		}
+@@ -425,26 +426,28 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 			}
+ 		}
+ 
+-		if (!smc_rx_data_available(conn)) {
+-			smc_rx_wait(smc, &timeo, smc_rx_data_available);
++		if (!smc_rx_data_available(conn, peeked_bytes)) {
++			smc_rx_wait(smc, &timeo, peeked_bytes, smc_rx_data_available);
+ 			continue;
+ 		}
+ 
+ copy:
+ 		/* initialize variables for 1st iteration of subsequent loop */
+ 		/* could be just 1 byte, even after waiting on data above */
+-		readable = atomic_read(&conn->bytes_to_rcv);
++		readable = smc_rx_data_available(conn, peeked_bytes);
+ 		splbytes = atomic_read(&conn->splice_pending);
+ 		if (!readable || (msg && splbytes)) {
+ 			if (splbytes)
+ 				func = smc_rx_data_available_and_no_splice_pend;
+ 			else
+ 				func = smc_rx_data_available;
+-			smc_rx_wait(smc, &timeo, func);
++			smc_rx_wait(smc, &timeo, peeked_bytes, func);
+ 			continue;
+ 		}
+ 
+ 		smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
++		if ((flags & MSG_PEEK) && peeked_bytes)
++			smc_curs_add(conn->rmb_desc->len, &cons, peeked_bytes);
+ 		/* subsequent splice() calls pick up where previous left */
+ 		if (splbytes)
+ 			smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
+@@ -480,6 +483,8 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 			}
+ 			read_remaining -= chunk_len;
+ 			read_done += chunk_len;
++			if (flags & MSG_PEEK)
++				peeked_bytes += chunk_len;
+ 
+ 			if (chunk_len_sum == copylen)
+ 				break; /* either on 1st or 2nd iteration */
+diff --git a/net/smc/smc_rx.h b/net/smc/smc_rx.h
+index db823c97d824ea..994f5e42d1ba26 100644
+--- a/net/smc/smc_rx.h
++++ b/net/smc/smc_rx.h
+@@ -21,11 +21,11 @@ void smc_rx_init(struct smc_sock *smc);
+ 
+ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 		   struct pipe_inode_info *pipe, size_t len, int flags);
+-int smc_rx_wait(struct smc_sock *smc, long *timeo,
+-		int (*fcrit)(struct smc_connection *conn));
+-static inline int smc_rx_data_available(struct smc_connection *conn)
++int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked,
++		int (*fcrit)(struct smc_connection *conn, size_t baseline));
++static inline int smc_rx_data_available(struct smc_connection *conn, size_t peeked)
+ {
+-	return atomic_read(&conn->bytes_to_rcv);
++	return atomic_read(&conn->bytes_to_rcv) - peeked;
+ }
+ 
+ #endif /* SMC_RX_H */
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 59e2c46240f5c1..3bfbb789c4beed 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1083,9 +1083,6 @@ static void svc_tcp_fragment_received(struct svc_sock *svsk)
+ 	/* If we have more data, signal svc_xprt_enqueue() to try again */
+ 	svsk->sk_tcplen = 0;
+ 	svsk->sk_marker = xdr_zero;
+-
+-	smp_wmb();
+-	tcp_set_rcvlowat(svsk->sk_sk, 1);
+ }
+ 
+ /**
+@@ -1175,17 +1172,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+ 		goto err_delete;
+ 	if (len == want)
+ 		svc_tcp_fragment_received(svsk);
+-	else {
+-		/* Avoid more ->sk_data_ready() calls until the rest
+-		 * of the message has arrived. This reduces service
+-		 * thread wake-ups on large incoming messages. */
+-		tcp_set_rcvlowat(svsk->sk_sk,
+-				 svc_sock_reclen(svsk) - svsk->sk_tcplen);
+-
++	else
+ 		trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
+ 				svc_sock_reclen(svsk),
+ 				svsk->sk_tcplen - sizeof(rpc_fraghdr));
+-	}
+ 	goto err_noclose;
+ error:
+ 	if (len != -EAGAIN)
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 15724f171b0f96..f5d116a1bdea1a 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1519,6 +1519,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
+ 		if (err < 0)
+ 			goto out;
+ 
++		/* sk_err might have been set as a result of an earlier
++		 * (failed) connect attempt.
++		 */
++		sk->sk_err = 0;
++
+ 		/* Mark sock as connecting and set the error code to in
+ 		 * progress in case this is a non-blocking connect.
+ 		 */
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index d0aed41ded2f19..18e132cdea72a8 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -763,12 +763,11 @@ static  void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
+ 		}
+ 	}
+ 
++	request->n_channels++;
+ 	request->channels[n_channels] = chan;
+ 	if (add_to_6ghz)
+ 		request->scan_6ghz_params[request->n_6ghz_params].channel_idx =
+ 			n_channels;
+-
+-	request->n_channels++;
+ }
+ 
+ static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap,
+@@ -858,9 +857,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 			if (ret)
+ 				continue;
+ 
+-			entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
+-					GFP_ATOMIC);
+-
++			entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ 			if (!entry)
+ 				continue;
+ 
+diff --git a/net/wireless/tests/scan.c b/net/wireless/tests/scan.c
+index 9f458be7165951..79a99cf5e8922f 100644
+--- a/net/wireless/tests/scan.c
++++ b/net/wireless/tests/scan.c
+@@ -810,6 +810,8 @@ static void test_cfg80211_parse_colocated_ap(struct kunit *test)
+ 		skb_put_data(input, "123", 3);
+ 
+ 	ies = kunit_kzalloc(test, struct_size(ies, data, input->len), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_NULL(test, ies);
++
+ 	ies->len = input->len;
+ 	memcpy(ies->data, input->data, input->len);
+ 
+diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
+index 91357ccaf4afe3..5b9ee63e30b69d 100644
+--- a/net/xfrm/xfrm_compat.c
++++ b/net/xfrm/xfrm_compat.c
+@@ -132,6 +132,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
+ 	[XFRMA_MTIMER_THRESH]	= { .type = NLA_U32 },
+ 	[XFRMA_SA_DIR]          = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
+ 	[XFRMA_NAT_KEEPALIVE_INTERVAL]	= { .type = NLA_U32 },
++	[XFRMA_SA_PCPU]		= { .type = NLA_U32 },
+ };
+ 
+ static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
+@@ -282,9 +283,10 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
+ 	case XFRMA_MTIMER_THRESH:
+ 	case XFRMA_SA_DIR:
+ 	case XFRMA_NAT_KEEPALIVE_INTERVAL:
++	case XFRMA_SA_PCPU:
+ 		return xfrm_nla_cpy(dst, src, nla_len(src));
+ 	default:
+-		BUILD_BUG_ON(XFRMA_MAX != XFRMA_NAT_KEEPALIVE_INTERVAL);
++		BUILD_BUG_ON(XFRMA_MAX != XFRMA_SA_PCPU);
+ 		pr_warn_once("unsupported nla_type %d\n", src->nla_type);
+ 		return -EOPNOTSUPP;
+ 	}
+@@ -439,7 +441,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
+ 	int err;
+ 
+ 	if (type > XFRMA_MAX) {
+-		BUILD_BUG_ON(XFRMA_MAX != XFRMA_NAT_KEEPALIVE_INTERVAL);
++		BUILD_BUG_ON(XFRMA_MAX != XFRMA_SA_PCPU);
+ 		NL_SET_ERR_MSG(extack, "Bad attribute");
+ 		return -EOPNOTSUPP;
+ 	}
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 749e7eea99e465..841a60a6fbfea3 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -572,7 +572,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ 			goto drop;
+ 		}
+ 
+-		x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
++		x = xfrm_input_state_lookup(net, mark, daddr, spi, nexthdr, family);
+ 		if (x == NULL) {
+ 			secpath_reset(skb);
+ 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index a2ea9dbac90b36..8a1b83191a6cdf 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -434,6 +434,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
+ 	if (policy) {
+ 		write_pnet(&policy->xp_net, net);
+ 		INIT_LIST_HEAD(&policy->walk.all);
++		INIT_HLIST_HEAD(&policy->state_cache_list);
+ 		INIT_HLIST_NODE(&policy->bydst);
+ 		INIT_HLIST_NODE(&policy->byidx);
+ 		rwlock_init(&policy->lock);
+@@ -475,6 +476,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
+ 
+ static void xfrm_policy_kill(struct xfrm_policy *policy)
+ {
++	struct net *net = xp_net(policy);
++	struct xfrm_state *x;
++
+ 	xfrm_dev_policy_delete(policy);
+ 
+ 	write_lock_bh(&policy->lock);
+@@ -490,6 +494,13 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
+ 	if (del_timer(&policy->timer))
+ 		xfrm_pol_put(policy);
+ 
++	/* XXX: Flush state cache */
++	spin_lock_bh(&net->xfrm.xfrm_state_lock);
++	hlist_for_each_entry_rcu(x, &policy->state_cache_list, state_cache) {
++		hlist_del_init_rcu(&x->state_cache);
++	}
++	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
++
+ 	xfrm_pol_put(policy);
+ }
+ 
+@@ -3275,6 +3286,7 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
+ 		dst_release(dst);
+ 		dst = dst_orig;
+ 	}
++
+ ok:
+ 	xfrm_pols_put(pols, drop_pols);
+ 	if (dst && dst->xfrm &&
+diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
+index bc56c630572527..235bbefc2abae2 100644
+--- a/net/xfrm/xfrm_replay.c
++++ b/net/xfrm/xfrm_replay.c
+@@ -714,10 +714,12 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
+ 			oseq += skb_shinfo(skb)->gso_segs;
+ 		}
+ 
+-		if (unlikely(xo->seq.low < replay_esn->oseq)) {
+-			XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
+-			xo->seq.hi = oseq_hi;
+-			replay_esn->oseq_hi = oseq_hi;
++		if (unlikely(oseq < replay_esn->oseq)) {
++			replay_esn->oseq_hi = ++oseq_hi;
++			if (xo->seq.low < replay_esn->oseq) {
++				XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
++				xo->seq.hi = oseq_hi;
++			}
+ 			if (replay_esn->oseq_hi == 0) {
+ 				replay_esn->oseq--;
+ 				replay_esn->oseq_hi--;
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 37478d36a8dff7..711e816fc4041e 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -34,6 +34,8 @@
+ 
+ #define xfrm_state_deref_prot(table, net) \
+ 	rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
++#define xfrm_state_deref_check(table, net) \
++	rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
+ 
+ static void xfrm_state_gc_task(struct work_struct *work);
+ 
+@@ -62,6 +64,8 @@ static inline unsigned int xfrm_dst_hash(struct net *net,
+ 					 u32 reqid,
+ 					 unsigned short family)
+ {
++	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
++
+ 	return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
+ }
+ 
+@@ -70,6 +74,8 @@ static inline unsigned int xfrm_src_hash(struct net *net,
+ 					 const xfrm_address_t *saddr,
+ 					 unsigned short family)
+ {
++	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
++
+ 	return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
+ }
+ 
+@@ -77,11 +83,15 @@ static inline unsigned int
+ xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
+ 	      __be32 spi, u8 proto, unsigned short family)
+ {
++	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
++
+ 	return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
+ }
+ 
+ static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
+ {
++	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
++
+ 	return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
+ }
+ 
+@@ -665,6 +675,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
+ 		refcount_set(&x->refcnt, 1);
+ 		atomic_set(&x->tunnel_users, 0);
+ 		INIT_LIST_HEAD(&x->km.all);
++		INIT_HLIST_NODE(&x->state_cache);
+ 		INIT_HLIST_NODE(&x->bydst);
+ 		INIT_HLIST_NODE(&x->bysrc);
+ 		INIT_HLIST_NODE(&x->byspi);
+@@ -679,6 +690,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
+ 		x->lft.hard_packet_limit = XFRM_INF;
+ 		x->replay_maxage = 0;
+ 		x->replay_maxdiff = 0;
++		x->pcpu_num = UINT_MAX;
+ 		spin_lock_init(&x->lock);
+ 	}
+ 	return x;
+@@ -743,12 +755,18 @@ int __xfrm_state_delete(struct xfrm_state *x)
+ 
+ 	if (x->km.state != XFRM_STATE_DEAD) {
+ 		x->km.state = XFRM_STATE_DEAD;
++
+ 		spin_lock(&net->xfrm.xfrm_state_lock);
+ 		list_del(&x->km.all);
+ 		hlist_del_rcu(&x->bydst);
+ 		hlist_del_rcu(&x->bysrc);
+ 		if (x->km.seq)
+ 			hlist_del_rcu(&x->byseq);
++		if (!hlist_unhashed(&x->state_cache))
++			hlist_del_rcu(&x->state_cache);
++		if (!hlist_unhashed(&x->state_cache_input))
++			hlist_del_rcu(&x->state_cache_input);
++
+ 		if (x->id.spi)
+ 			hlist_del_rcu(&x->byspi);
+ 		net->xfrm.state_num--;
+@@ -1033,16 +1051,38 @@ xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
+ 	x->props.family = tmpl->encap_family;
+ }
+ 
+-static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
++struct xfrm_hash_state_ptrs {
++	const struct hlist_head *bydst;
++	const struct hlist_head *bysrc;
++	const struct hlist_head *byspi;
++	unsigned int hmask;
++};
++
++static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs)
++{
++	unsigned int sequence;
++
++	do {
++		sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
++
++		ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net);
++		ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net);
++		ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net);
++		ptrs->hmask = net->xfrm.state_hmask;
++	} while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence));
++}
++
++static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs,
++						  u32 mark,
+ 						  const xfrm_address_t *daddr,
+ 						  __be32 spi, u8 proto,
+ 						  unsigned short family,
+ 						  struct xfrm_dev_offload *xdo)
+ {
+-	unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
++	unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
+ 	struct xfrm_state *x;
+ 
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
++	hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
+ #ifdef CONFIG_XFRM_OFFLOAD
+ 		if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
+ 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+@@ -1076,15 +1116,16 @@ static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
+ 	return NULL;
+ }
+ 
+-static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
++static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs,
++					      u32 mark,
+ 					      const xfrm_address_t *daddr,
+ 					      __be32 spi, u8 proto,
+ 					      unsigned short family)
+ {
+-	unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
++	unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
+ 	struct xfrm_state *x;
+ 
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
++	hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
+ 		if (x->props.family != family ||
+ 		    x->id.spi       != spi ||
+ 		    x->id.proto     != proto ||
+@@ -1101,15 +1142,63 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
+ 	return NULL;
+ }
+ 
+-static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
++struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
++					   const xfrm_address_t *daddr,
++					   __be32 spi, u8 proto,
++					   unsigned short family)
++{
++	struct xfrm_hash_state_ptrs state_ptrs;
++	struct hlist_head *state_cache_input;
++	struct xfrm_state *x = NULL;
++
++	state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);
++
++	rcu_read_lock();
++	hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) {
++		if (x->props.family != family ||
++		    x->id.spi       != spi ||
++		    x->id.proto     != proto ||
++		    !xfrm_addr_equal(&x->id.daddr, daddr, family))
++			continue;
++
++		if ((mark & x->mark.m) != x->mark.v)
++			continue;
++		if (!xfrm_state_hold_rcu(x))
++			continue;
++		goto out;
++	}
++
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
++	x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
++
++	if (x && x->km.state == XFRM_STATE_VALID) {
++		spin_lock_bh(&net->xfrm.xfrm_state_lock);
++		if (hlist_unhashed(&x->state_cache_input)) {
++			hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
++		} else {
++			hlist_del_rcu(&x->state_cache_input);
++			hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
++		}
++		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
++	}
++
++out:
++	rcu_read_unlock();
++	return x;
++}
++EXPORT_SYMBOL(xfrm_input_state_lookup);
++
++static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs,
++						     u32 mark,
+ 						     const xfrm_address_t *daddr,
+ 						     const xfrm_address_t *saddr,
+ 						     u8 proto, unsigned short family)
+ {
+-	unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
++	unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask);
+ 	struct xfrm_state *x;
+ 
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
++	hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) {
+ 		if (x->props.family != family ||
+ 		    x->id.proto     != proto ||
+ 		    !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
+@@ -1129,14 +1218,17 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ static inline struct xfrm_state *
+ __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
+ {
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct net *net = xs_net(x);
+ 	u32 mark = x->mark.v & x->mark.m;
+ 
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
+ 	if (use_spi)
+-		return __xfrm_state_lookup(net, mark, &x->id.daddr,
++		return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr,
+ 					   x->id.spi, x->id.proto, family);
+ 	else
+-		return __xfrm_state_lookup_byaddr(net, mark,
++		return __xfrm_state_lookup_byaddr(&state_ptrs, mark,
+ 						  &x->id.daddr,
+ 						  &x->props.saddr,
+ 						  x->id.proto, family);
+@@ -1155,6 +1247,12 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
+ 			       struct xfrm_state **best, int *acq_in_progress,
+ 			       int *error)
+ {
++	/* We need the cpu id just as a lookup key,
++	 * we don't require it to be stable.
++	 */
++	unsigned int pcpu_id = get_cpu();
++	put_cpu();
++
+ 	/* Resolution logic:
+ 	 * 1. There is a valid state with matching selector. Done.
+ 	 * 2. Valid state with inappropriate selector. Skip.
+@@ -1174,13 +1272,18 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
+ 							&fl->u.__fl_common))
+ 			return;
+ 
++		if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id)
++			return;
++
+ 		if (!*best ||
++		    ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) ||
+ 		    (*best)->km.dying > x->km.dying ||
+ 		    ((*best)->km.dying == x->km.dying &&
+ 		     (*best)->curlft.add_time < x->curlft.add_time))
+ 			*best = x;
+ 	} else if (x->km.state == XFRM_STATE_ACQ) {
+-		*acq_in_progress = 1;
++		if (!*best || x->pcpu_num == pcpu_id)
++			*acq_in_progress = 1;
+ 	} else if (x->km.state == XFRM_STATE_ERROR ||
+ 		   x->km.state == XFRM_STATE_EXPIRED) {
+ 		if ((!x->sel.family ||
+@@ -1199,6 +1302,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 		unsigned short family, u32 if_id)
+ {
+ 	static xfrm_address_t saddr_wildcard = { };
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct net *net = xp_net(pol);
+ 	unsigned int h, h_wildcard;
+ 	struct xfrm_state *x, *x0, *to_put;
+@@ -1209,14 +1313,64 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 	unsigned short encap_family = tmpl->encap_family;
+ 	unsigned int sequence;
+ 	struct km_event c;
++	unsigned int pcpu_id;
++	bool cached = false;
++
++	/* We need the cpu id just as a lookup key,
++	 * we don't require it to be stable.
++	 */
++	pcpu_id = get_cpu();
++	put_cpu();
+ 
+ 	to_put = NULL;
+ 
+ 	sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
+ 
+ 	rcu_read_lock();
+-	h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
++	hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
++		if (x->props.family == encap_family &&
++		    x->props.reqid == tmpl->reqid &&
++		    (mark & x->mark.m) == x->mark.v &&
++		    x->if_id == if_id &&
++		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
++		    xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
++		    tmpl->mode == x->props.mode &&
++		    tmpl->id.proto == x->id.proto &&
++		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
++			xfrm_state_look_at(pol, x, fl, encap_family,
++					   &best, &acquire_in_progress, &error);
++	}
++
++	if (best)
++		goto cached;
++
++	hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
++		if (x->props.family == encap_family &&
++		    x->props.reqid == tmpl->reqid &&
++		    (mark & x->mark.m) == x->mark.v &&
++		    x->if_id == if_id &&
++		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
++		    xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
++		    tmpl->mode == x->props.mode &&
++		    tmpl->id.proto == x->id.proto &&
++		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
++			xfrm_state_look_at(pol, x, fl, family,
++					   &best, &acquire_in_progress, &error);
++	}
++
++cached:
++	cached = true;
++	if (best)
++		goto found;
++	else if (error)
++		best = NULL;
++	else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
++		WARN_ON(1);
++
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
++	h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
++	hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
+ #ifdef CONFIG_XFRM_OFFLOAD
+ 		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
+ 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+@@ -1249,8 +1403,9 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 	if (best || acquire_in_progress)
+ 		goto found;
+ 
+-	h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
++	h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid,
++				     encap_family, state_ptrs.hmask);
++	hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) {
+ #ifdef CONFIG_XFRM_OFFLOAD
+ 		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
+ 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+@@ -1282,10 +1437,13 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 	}
+ 
+ found:
+-	x = best;
++	if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) ||
++	    (best && (best->pcpu_num == pcpu_id)))
++		x = best;
++
+ 	if (!x && !error && !acquire_in_progress) {
+ 		if (tmpl->id.spi &&
+-		    (x0 = __xfrm_state_lookup_all(net, mark, daddr,
++		    (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr,
+ 						  tmpl->id.spi, tmpl->id.proto,
+ 						  encap_family,
+ 						  &pol->xdo)) != NULL) {
+@@ -1314,6 +1472,8 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 		xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
+ 		memcpy(&x->mark, &pol->mark, sizeof(x->mark));
+ 		x->if_id = if_id;
++		if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best)
++			x->pcpu_num = pcpu_id;
+ 
+ 		error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
+ 		if (error) {
+@@ -1352,6 +1512,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 			x->km.state = XFRM_STATE_ACQ;
+ 			x->dir = XFRM_SA_DIR_OUT;
+ 			list_add(&x->km.all, &net->xfrm.state_all);
++			h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
+ 			XFRM_STATE_INSERT(bydst, &x->bydst,
+ 					  net->xfrm.state_bydst + h,
+ 					  x->xso.type);
+@@ -1359,6 +1520,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 			XFRM_STATE_INSERT(bysrc, &x->bysrc,
+ 					  net->xfrm.state_bysrc + h,
+ 					  x->xso.type);
++			INIT_HLIST_NODE(&x->state_cache);
+ 			if (x->id.spi) {
+ 				h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
+ 				XFRM_STATE_INSERT(byspi, &x->byspi,
+@@ -1392,6 +1554,11 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 			x = NULL;
+ 			error = -ESRCH;
+ 		}
++
++		/* Use the already installed 'fallback' while the CPU-specific
++		 * SA acquire is handled*/
++		if (best)
++			x = best;
+ 	}
+ out:
+ 	if (x) {
+@@ -1402,6 +1569,15 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 	} else {
+ 		*err = acquire_in_progress ? -EAGAIN : error;
+ 	}
++
++	if (x && x->km.state == XFRM_STATE_VALID && !cached &&
++	    (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) {
++		spin_lock_bh(&net->xfrm.xfrm_state_lock);
++		if (hlist_unhashed(&x->state_cache))
++			hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list);
++		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
++	}
++
+ 	rcu_read_unlock();
+ 	if (to_put)
+ 		xfrm_state_put(to_put);
+@@ -1524,12 +1700,14 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
+ 	unsigned int h;
+ 	u32 mark = xnew->mark.v & xnew->mark.m;
+ 	u32 if_id = xnew->if_id;
++	u32 cpu_id = xnew->pcpu_num;
+ 
+ 	h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
+ 	hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
+ 		if (x->props.family	== family &&
+ 		    x->props.reqid	== reqid &&
+ 		    x->if_id		== if_id &&
++		    x->pcpu_num		== cpu_id &&
+ 		    (mark & x->mark.m) == x->mark.v &&
+ 		    xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
+ 		    xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
+@@ -1552,7 +1730,7 @@ EXPORT_SYMBOL(xfrm_state_insert);
+ static struct xfrm_state *__find_acq_core(struct net *net,
+ 					  const struct xfrm_mark *m,
+ 					  unsigned short family, u8 mode,
+-					  u32 reqid, u32 if_id, u8 proto,
++					  u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
+ 					  const xfrm_address_t *daddr,
+ 					  const xfrm_address_t *saddr,
+ 					  int create)
+@@ -1569,6 +1747,7 @@ static struct xfrm_state *__find_acq_core(struct net *net,
+ 		    x->id.spi       != 0 ||
+ 		    x->id.proto	    != proto ||
+ 		    (mark & x->mark.m) != x->mark.v ||
++		    x->pcpu_num != pcpu_num ||
+ 		    !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
+ 		    !xfrm_addr_equal(&x->props.saddr, saddr, family))
+ 			continue;
+@@ -1602,6 +1781,7 @@ static struct xfrm_state *__find_acq_core(struct net *net,
+ 			break;
+ 		}
+ 
++		x->pcpu_num = pcpu_num;
+ 		x->km.state = XFRM_STATE_ACQ;
+ 		x->id.proto = proto;
+ 		x->props.family = family;
+@@ -1630,7 +1810,7 @@ static struct xfrm_state *__find_acq_core(struct net *net,
+ 	return x;
+ }
+ 
+-static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
++static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
+ 
+ int xfrm_state_add(struct xfrm_state *x)
+ {
+@@ -1656,7 +1836,7 @@ int xfrm_state_add(struct xfrm_state *x)
+ 	}
+ 
+ 	if (use_spi && x->km.seq) {
+-		x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
++		x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num);
+ 		if (x1 && ((x1->id.proto != x->id.proto) ||
+ 		    !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
+ 			to_put = x1;
+@@ -1666,7 +1846,7 @@ int xfrm_state_add(struct xfrm_state *x)
+ 
+ 	if (use_spi && !x1)
+ 		x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
+-				     x->props.reqid, x->if_id, x->id.proto,
++				     x->props.reqid, x->if_id, x->pcpu_num, x->id.proto,
+ 				     &x->id.daddr, &x->props.saddr, 0);
+ 
+ 	__xfrm_state_bump_genids(x);
+@@ -1791,6 +1971,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
+ 	x->props.flags = orig->props.flags;
+ 	x->props.extra_flags = orig->props.extra_flags;
+ 
++	x->pcpu_num = orig->pcpu_num;
+ 	x->if_id = orig->if_id;
+ 	x->tfcpad = orig->tfcpad;
+ 	x->replay_maxdiff = orig->replay_maxdiff;
+@@ -2041,10 +2222,13 @@ struct xfrm_state *
+ xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
+ 		  u8 proto, unsigned short family)
+ {
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct xfrm_state *x;
+ 
+ 	rcu_read_lock();
+-	x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
++	x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
+ 	rcu_read_unlock();
+ 	return x;
+ }
+@@ -2055,10 +2239,14 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 			 u8 proto, unsigned short family)
+ {
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct xfrm_state *x;
+ 
+ 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
+-	x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
++
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
++	x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family);
+ 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 	return x;
+ }
+@@ -2066,13 +2254,14 @@ EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
+ 
+ struct xfrm_state *
+ xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
+-	      u32 if_id, u8 proto, const xfrm_address_t *daddr,
++	      u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr,
+ 	      const xfrm_address_t *saddr, int create, unsigned short family)
+ {
+ 	struct xfrm_state *x;
+ 
+ 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
+-	x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
++	x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num,
++			    proto, daddr, saddr, create);
+ 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 
+ 	return x;
+@@ -2207,7 +2396,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
+ 
+ /* Silly enough, but I'm lazy to build resolution list */
+ 
+-static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
++static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
+ {
+ 	unsigned int h = xfrm_seq_hash(net, seq);
+ 	struct xfrm_state *x;
+@@ -2215,6 +2404,7 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s
+ 	hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) {
+ 		if (x->km.seq == seq &&
+ 		    (mark & x->mark.m) == x->mark.v &&
++		    x->pcpu_num == pcpu_num &&
+ 		    x->km.state == XFRM_STATE_ACQ) {
+ 			xfrm_state_hold(x);
+ 			return x;
+@@ -2224,12 +2414,12 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s
+ 	return NULL;
+ }
+ 
+-struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
++struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
+ {
+ 	struct xfrm_state *x;
+ 
+ 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
+-	x = __xfrm_find_acq_byseq(net, mark, seq);
++	x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num);
+ 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 	return x;
+ }
+@@ -2988,6 +3178,11 @@ int __net_init xfrm_state_init(struct net *net)
+ 	net->xfrm.state_byseq = xfrm_hash_alloc(sz);
+ 	if (!net->xfrm.state_byseq)
+ 		goto out_byseq;
++
++	net->xfrm.state_cache_input = alloc_percpu(struct hlist_head);
++	if (!net->xfrm.state_cache_input)
++		goto out_state_cache_input;
++
+ 	net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
+ 
+ 	net->xfrm.state_num = 0;
+@@ -2997,6 +3192,8 @@ int __net_init xfrm_state_init(struct net *net)
+ 			       &net->xfrm.xfrm_state_lock);
+ 	return 0;
+ 
++out_state_cache_input:
++	xfrm_hash_free(net->xfrm.state_byseq, sz);
+ out_byseq:
+ 	xfrm_hash_free(net->xfrm.state_byspi, sz);
+ out_byspi:
+@@ -3026,6 +3223,7 @@ void xfrm_state_fini(struct net *net)
+ 	xfrm_hash_free(net->xfrm.state_bysrc, sz);
+ 	WARN_ON(!hlist_empty(net->xfrm.state_bydst));
+ 	xfrm_hash_free(net->xfrm.state_bydst, sz);
++	free_percpu(net->xfrm.state_cache_input);
+ }
+ 
+ #ifdef CONFIG_AUDITSYSCALL
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index e3b8ce89831abf..87013623773a2b 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -460,6 +460,12 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
+ 		}
+ 	}
+ 
++	if (!sa_dir && attrs[XFRMA_SA_PCPU]) {
++		NL_SET_ERR_MSG(extack, "SA_PCPU only supported with SA_DIR");
++		err = -EINVAL;
++		goto out;
++	}
++
+ out:
+ 	return err;
+ }
+@@ -841,6 +847,12 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
+ 		x->nat_keepalive_interval =
+ 			nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]);
+ 
++	if (attrs[XFRMA_SA_PCPU]) {
++		x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
++		if (x->pcpu_num >= num_possible_cpus())
++			goto error;
++	}
++
+ 	err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack);
+ 	if (err)
+ 		goto error;
+@@ -1296,6 +1308,11 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
+ 		if (ret)
+ 			goto out;
+ 	}
++	if (x->pcpu_num != UINT_MAX) {
++		ret = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
++		if (ret)
++			goto out;
++	}
+ 	if (x->dir)
+ 		ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
+ 
+@@ -1700,6 +1717,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	u32 mark;
+ 	struct xfrm_mark m;
+ 	u32 if_id = 0;
++	u32 pcpu_num = UINT_MAX;
+ 
+ 	p = nlmsg_data(nlh);
+ 	err = verify_spi_info(p->info.id.proto, p->min, p->max, extack);
+@@ -1716,8 +1734,16 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	if (attrs[XFRMA_IF_ID])
+ 		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ 
++	if (attrs[XFRMA_SA_PCPU]) {
++		pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
++		if (pcpu_num >= num_possible_cpus()) {
++			err = -EINVAL;
++			goto out_noput;
++		}
++	}
++
+ 	if (p->info.seq) {
+-		x = xfrm_find_acq_byseq(net, mark, p->info.seq);
++		x = xfrm_find_acq_byseq(net, mark, p->info.seq, pcpu_num);
+ 		if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
+ 			xfrm_state_put(x);
+ 			x = NULL;
+@@ -1726,7 +1752,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 
+ 	if (!x)
+ 		x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
+-				  if_id, p->info.id.proto, daddr,
++				  if_id, pcpu_num, p->info.id.proto, daddr,
+ 				  &p->info.saddr, 1,
+ 				  family);
+ 	err = -ENOENT;
+@@ -2526,7 +2552,8 @@ static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
+ 	       + nla_total_size(sizeof(struct xfrm_mark))
+ 	       + nla_total_size(4) /* XFRM_AE_RTHR */
+ 	       + nla_total_size(4) /* XFRM_AE_ETHR */
+-	       + nla_total_size(sizeof(x->dir)); /* XFRMA_SA_DIR */
++	       + nla_total_size(sizeof(x->dir)) /* XFRMA_SA_DIR */
++	       + nla_total_size(4); /* XFRMA_SA_PCPU */
+ }
+ 
+ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
+@@ -2582,6 +2609,11 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
+ 	err = xfrm_if_id_put(skb, x->if_id);
+ 	if (err)
+ 		goto out_cancel;
++	if (x->pcpu_num != UINT_MAX) {
++		err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
++		if (err)
++			goto out_cancel;
++	}
+ 
+ 	if (x->dir) {
+ 		err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
+@@ -2852,6 +2884,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 
+ 	xfrm_mark_get(attrs, &mark);
+ 
++	if (attrs[XFRMA_SA_PCPU]) {
++		x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
++		err = -EINVAL;
++		if (x->pcpu_num >= num_possible_cpus())
++			goto free_state;
++	}
++
+ 	err = verify_newpolicy_info(&ua->policy, extack);
+ 	if (err)
+ 		goto free_state;
+@@ -3182,6 +3221,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ 	[XFRMA_MTIMER_THRESH]   = { .type = NLA_U32 },
+ 	[XFRMA_SA_DIR]          = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
+ 	[XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 },
++	[XFRMA_SA_PCPU]		= { .type = NLA_U32 },
+ };
+ EXPORT_SYMBOL_GPL(xfrma_policy);
+ 
+@@ -3348,7 +3388,8 @@ static inline unsigned int xfrm_expire_msgsize(void)
+ {
+ 	return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) +
+ 	       nla_total_size(sizeof(struct xfrm_mark)) +
+-	       nla_total_size(sizeof_field(struct xfrm_state, dir));
++	       nla_total_size(sizeof_field(struct xfrm_state, dir)) +
++	       nla_total_size(4); /* XFRMA_SA_PCPU */
+ }
+ 
+ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
+@@ -3374,6 +3415,11 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
+ 	err = xfrm_if_id_put(skb, x->if_id);
+ 	if (err)
+ 		return err;
++	if (x->pcpu_num != UINT_MAX) {
++		err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
++		if (err)
++			return err;
++	}
+ 
+ 	if (x->dir) {
+ 		err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
+@@ -3481,6 +3527,8 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
+ 	}
+ 	if (x->if_id)
+ 		l += nla_total_size(sizeof(x->if_id));
++	if (x->pcpu_num)
++		l += nla_total_size(sizeof(x->pcpu_num));
+ 
+ 	/* Must count x->lastused as it may become non-zero behind our back. */
+ 	l += nla_total_size_64bit(sizeof(u64));
+@@ -3587,6 +3635,7 @@ static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
+ 	       + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ 	       + nla_total_size(sizeof(struct xfrm_mark))
+ 	       + nla_total_size(xfrm_user_sec_ctx_size(x->security))
++	       + nla_total_size(4) /* XFRMA_SA_PCPU */
+ 	       + userpolicy_type_attrsize();
+ }
+ 
+@@ -3623,6 +3672,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
+ 		err = xfrm_if_id_put(skb, xp->if_id);
+ 	if (!err && xp->xdo.dev)
+ 		err = copy_user_offload(&xp->xdo, skb);
++	if (!err && x->pcpu_num != UINT_MAX)
++		err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
+ 	if (err) {
+ 		nlmsg_cancel(skb, nlh);
+ 		return err;
+diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
+index 57565dfd74a260..07fab2ef534e8d 100644
+--- a/samples/landlock/sandboxer.c
++++ b/samples/landlock/sandboxer.c
+@@ -91,6 +91,9 @@ static int parse_path(char *env_path, const char ***const path_list)
+ 		}
+ 	}
+ 	*path_list = malloc(num_paths * sizeof(**path_list));
++	if (!*path_list)
++		return -1;
++
+ 	for (i = 0; i < num_paths; i++)
+ 		(*path_list)[i] = strsep(&env_path, ENV_DELIMITER);
+ 
+@@ -127,6 +130,10 @@ static int populate_ruleset_fs(const char *const env_var, const int ruleset_fd,
+ 	env_path_name = strdup(env_path_name);
+ 	unsetenv(env_var);
+ 	num_paths = parse_path(env_path_name, &path_list);
++	if (num_paths < 0) {
++		fprintf(stderr, "Failed to allocate memory\n");
++		goto out_free_name;
++	}
+ 	if (num_paths == 1 && path_list[0][0] == '\0') {
+ 		/*
+ 		 * Allows to not use all possible restrictions (e.g. use
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index 01a9f567d5af48..fe5e132fcea89a 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -371,10 +371,10 @@ quiet_cmd_lzo_with_size = LZO     $@
+       cmd_lzo_with_size = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
+ 
+ quiet_cmd_lz4 = LZ4     $@
+-      cmd_lz4 = cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout > $@
++      cmd_lz4 = cat $(real-prereqs) | $(LZ4) -l -9 - - > $@
+ 
+ quiet_cmd_lz4_with_size = LZ4     $@
+-      cmd_lz4_with_size = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
++      cmd_lz4_with_size = { cat $(real-prereqs) | $(LZ4) -l -9 - -; \
+                   $(size_append); } > $@
+ 
+ # U-Boot mkimage
+diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
+index f3901c55df239d..bbc6b7d3088c15 100644
+--- a/scripts/genksyms/genksyms.c
++++ b/scripts/genksyms/genksyms.c
+@@ -239,6 +239,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
+ 						"unchanged\n");
+ 				}
+ 				sym->is_declared = 1;
++				free_list(defn, NULL);
+ 				return sym;
+ 			} else if (!sym->is_declared) {
+ 				if (sym->is_override && flag_preserve) {
+@@ -247,6 +248,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
+ 					print_type_name(type, name);
+ 					fprintf(stderr, " modversion change\n");
+ 					sym->is_declared = 1;
++					free_list(defn, NULL);
+ 					return sym;
+ 				} else {
+ 					status = is_unknown_symbol(sym) ?
+@@ -254,6 +256,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
+ 				}
+ 			} else {
+ 				error_with_pos("redefinition of %s", name);
++				free_list(defn, NULL);
+ 				return sym;
+ 			}
+ 			break;
+@@ -269,11 +272,15 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
+ 				break;
+ 			}
+ 		}
++
++		free_list(sym->defn, NULL);
++		free(sym->name);
++		free(sym);
+ 		--nsyms;
+ 	}
+ 
+ 	sym = xmalloc(sizeof(*sym));
+-	sym->name = name;
++	sym->name = xstrdup(name);
+ 	sym->type = type;
+ 	sym->defn = defn;
+ 	sym->expansion_trail = NULL;
+@@ -480,7 +487,7 @@ static void read_reference(FILE *f)
+ 			defn = def;
+ 			def = read_node(f);
+ 		}
+-		subsym = add_reference_symbol(xstrdup(sym->string), sym->tag,
++		subsym = add_reference_symbol(sym->string, sym->tag,
+ 					      defn, is_extern);
+ 		subsym->is_override = is_override;
+ 		free_node(sym);
+diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h
+index 21ed2ec2d98ca8..5621533dcb8e43 100644
+--- a/scripts/genksyms/genksyms.h
++++ b/scripts/genksyms/genksyms.h
+@@ -32,7 +32,7 @@ struct string_list {
+ 
+ struct symbol {
+ 	struct symbol *hash_next;
+-	const char *name;
++	char *name;
+ 	enum symbol_type type;
+ 	struct string_list *defn;
+ 	struct symbol *expansion_trail;
+diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
+index 8e9b5e69e8f01d..689cb6bb40b657 100644
+--- a/scripts/genksyms/parse.y
++++ b/scripts/genksyms/parse.y
+@@ -152,14 +152,19 @@ simple_declaration:
+ 	;
+ 
+ init_declarator_list_opt:
+-	/* empty */				{ $$ = NULL; }
+-	| init_declarator_list
++	/* empty */			{ $$ = NULL; }
++	| init_declarator_list		{ free_list(decl_spec, NULL); $$ = $1; }
+ 	;
+ 
+ init_declarator_list:
+ 	init_declarator
+ 		{ struct string_list *decl = *$1;
+ 		  *$1 = NULL;
++
++		  /* avoid sharing among multiple init_declarators */
++		  if (decl_spec)
++		    decl_spec = copy_list_range(decl_spec, NULL);
++
+ 		  add_symbol(current_name,
+ 			     is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
+ 		  current_name = NULL;
+@@ -170,6 +175,11 @@ init_declarator_list:
+ 		  *$3 = NULL;
+ 		  free_list(*$2, NULL);
+ 		  *$2 = decl_spec;
++
++		  /* avoid sharing among multiple init_declarators */
++		  if (decl_spec)
++		    decl_spec = copy_list_range(decl_spec, NULL);
++
+ 		  add_symbol(current_name,
+ 			     is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
+ 		  current_name = NULL;
+@@ -472,12 +482,12 @@ enumerator_list:
+ enumerator:
+ 	IDENT
+ 		{
+-			const char *name = strdup((*$1)->string);
++			const char *name = (*$1)->string;
+ 			add_symbol(name, SYM_ENUM_CONST, NULL, 0);
+ 		}
+ 	| IDENT '=' EXPRESSION_PHRASE
+ 		{
+-			const char *name = strdup((*$1)->string);
++			const char *name = (*$1)->string;
+ 			struct string_list *expr = copy_list_range(*$3, *$2);
+ 			add_symbol(name, SYM_ENUM_CONST, expr, 0);
+ 		}
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index 4286d5e7f95dc1..3b55e7a4131d9a 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -360,10 +360,12 @@ int conf_read_simple(const char *name, int def)
+ 
+ 			*p = '\0';
+ 
+-			in = zconf_fopen(env);
++			name = env;
++
++			in = zconf_fopen(name);
+ 			if (in) {
+ 				conf_message("using defaults found in %s",
+-					     env);
++					     name);
+ 				goto load;
+ 			}
+ 
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index a3af93aaaf32af..453721e66c4ebc 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -376,6 +376,7 @@ static void sym_warn_unmet_dep(const struct symbol *sym)
+ 			       "  Selected by [m]:\n");
+ 
+ 	fputs(str_get(&gs), stderr);
++	str_free(&gs);
+ 	sym_warnings++;
+ }
+ 
+diff --git a/security/landlock/fs.c b/security/landlock/fs.c
+index e31b97a9f175aa..7adb25150488fc 100644
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -937,10 +937,6 @@ static access_mask_t get_mode_access(const umode_t mode)
+ 	switch (mode & S_IFMT) {
+ 	case S_IFLNK:
+ 		return LANDLOCK_ACCESS_FS_MAKE_SYM;
+-	case 0:
+-		/* A zero mode translates to S_IFREG. */
+-	case S_IFREG:
+-		return LANDLOCK_ACCESS_FS_MAKE_REG;
+ 	case S_IFDIR:
+ 		return LANDLOCK_ACCESS_FS_MAKE_DIR;
+ 	case S_IFCHR:
+@@ -951,9 +947,12 @@ static access_mask_t get_mode_access(const umode_t mode)
+ 		return LANDLOCK_ACCESS_FS_MAKE_FIFO;
+ 	case S_IFSOCK:
+ 		return LANDLOCK_ACCESS_FS_MAKE_SOCK;
++	case S_IFREG:
++	case 0:
++		/* A zero mode translates to S_IFREG. */
+ 	default:
+-		WARN_ON_ONCE(1);
+-		return 0;
++		/* Treats weird files as regular files. */
++		return LANDLOCK_ACCESS_FS_MAKE_REG;
+ 	}
+ }
+ 
+diff --git a/sound/core/seq/Kconfig b/sound/core/seq/Kconfig
+index 0374bbf51cd4d3..e4f58cb985d47c 100644
+--- a/sound/core/seq/Kconfig
++++ b/sound/core/seq/Kconfig
+@@ -62,7 +62,7 @@ config SND_SEQ_VIRMIDI
+ 
+ config SND_SEQ_UMP
+ 	bool "Support for UMP events"
+-	default y if SND_SEQ_UMP_CLIENT
++	default SND_UMP
+ 	help
+ 	  Say Y here to enable the support for handling UMP (Universal MIDI
+ 	  Packet) events via ALSA sequencer infrastructure, which is an
+@@ -71,6 +71,6 @@ config SND_SEQ_UMP
+ 	  among legacy and UMP clients.
+ 
+ config SND_SEQ_UMP_CLIENT
+-	def_tristate SND_UMP
++	def_tristate SND_UMP && SND_SEQ_UMP
+ 
+ endif # SND_SEQUENCER
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8c4de5a253addf..5d99a4ea176a15 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10143,6 +10143,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1025, 0x1360, "Acer Aspire A115", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x141f, "Acer Spin SP513-54N", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
+index 56ce9e4b6accc7..92c5ff0deea2cd 100644
+--- a/sound/soc/amd/acp/acp-i2s.c
++++ b/sound/soc/amd/acp/acp-i2s.c
+@@ -181,6 +181,7 @@ static int acp_i2s_set_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, u32 rx_mas
+ 			break;
+ 		default:
+ 			dev_err(dev, "Unknown chip revision %d\n", chip->acp_rev);
++			spin_unlock_irq(&adata->acp_lock);
+ 			return -EINVAL;
+ 		}
+ 	}
+diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
+index 54cbc3feae3277..69cb0b39f22007 100644
+--- a/sound/soc/codecs/Makefile
++++ b/sound/soc/codecs/Makefile
+@@ -79,7 +79,7 @@ snd-soc-cs35l56-shared-y := cs35l56-shared.o
+ snd-soc-cs35l56-i2c-y := cs35l56-i2c.o
+ snd-soc-cs35l56-spi-y := cs35l56-spi.o
+ snd-soc-cs35l56-sdw-y := cs35l56-sdw.o
+-snd-soc-cs40l50-objs := cs40l50-codec.o
++snd-soc-cs40l50-y := cs40l50-codec.o
+ snd-soc-cs42l42-y := cs42l42.o
+ snd-soc-cs42l42-i2c-y := cs42l42-i2c.o
+ snd-soc-cs42l42-sdw-y := cs42l42-sdw.o
+@@ -324,8 +324,8 @@ snd-soc-wcd-classh-y := wcd-clsh-v2.o
+ snd-soc-wcd-mbhc-y := wcd-mbhc-v2.o
+ snd-soc-wcd9335-y := wcd9335.o
+ snd-soc-wcd934x-y := wcd934x.o
+-snd-soc-wcd937x-objs := wcd937x.o
+-snd-soc-wcd937x-sdw-objs := wcd937x-sdw.o
++snd-soc-wcd937x-y := wcd937x.o
++snd-soc-wcd937x-sdw-y := wcd937x-sdw.o
+ snd-soc-wcd938x-y := wcd938x.o
+ snd-soc-wcd938x-sdw-y := wcd938x-sdw.o
+ snd-soc-wcd939x-y := wcd939x.o
+diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
+index 486db60bf2dd14..f17f02d01f8c0f 100644
+--- a/sound/soc/codecs/da7213.c
++++ b/sound/soc/codecs/da7213.c
+@@ -2191,6 +2191,8 @@ static int da7213_i2c_probe(struct i2c_client *i2c)
+ 		return ret;
+ 	}
+ 
++	mutex_init(&da7213->ctrl_lock);
++
+ 	pm_runtime_set_autosuspend_delay(&i2c->dev, 100);
+ 	pm_runtime_use_autosuspend(&i2c->dev);
+ 	pm_runtime_set_active(&i2c->dev);
+diff --git a/sound/soc/intel/avs/apl.c b/sound/soc/intel/avs/apl.c
+index 27516ef5718591..3dccf0a57a3a11 100644
+--- a/sound/soc/intel/avs/apl.c
++++ b/sound/soc/intel/avs/apl.c
+@@ -12,6 +12,7 @@
+ #include "avs.h"
+ #include "messages.h"
+ #include "path.h"
++#include "registers.h"
+ #include "topology.h"
+ 
+ static irqreturn_t avs_apl_dsp_interrupt(struct avs_dev *adev)
+@@ -125,7 +126,7 @@ int avs_apl_coredump(struct avs_dev *adev, union avs_notify_msg *msg)
+ 	struct avs_apl_log_buffer_layout layout;
+ 	void __iomem *addr, *buf;
+ 	size_t dump_size;
+-	u16 offset = 0;
++	u32 offset = 0;
+ 	u8 *dump, *pos;
+ 
+ 	dump_size = AVS_FW_REGS_SIZE + msg->ext.coredump.stack_dump_size;
+diff --git a/sound/soc/intel/avs/cnl.c b/sound/soc/intel/avs/cnl.c
+index bd3c4bb8bf5a17..03f8fb0dc187f5 100644
+--- a/sound/soc/intel/avs/cnl.c
++++ b/sound/soc/intel/avs/cnl.c
+@@ -9,6 +9,7 @@
+ #include <sound/hdaudio_ext.h>
+ #include "avs.h"
+ #include "messages.h"
++#include "registers.h"
+ 
+ static void avs_cnl_ipc_interrupt(struct avs_dev *adev)
+ {
+diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
+index 73d4bde9b2f788..82839d0994ee3e 100644
+--- a/sound/soc/intel/avs/core.c
++++ b/sound/soc/intel/avs/core.c
+@@ -829,10 +829,10 @@ static const struct avs_spec jsl_desc = {
+ 	.hipc = &cnl_hipc_spec,
+ };
+ 
+-#define AVS_TGL_BASED_SPEC(sname)		\
++#define AVS_TGL_BASED_SPEC(sname, min)		\
+ static const struct avs_spec sname##_desc = {	\
+ 	.name = #sname,				\
+-	.min_fw_version = { 10,	29, 0, 5646 },	\
++	.min_fw_version = { 10,	min, 0, 5646 },	\
+ 	.dsp_ops = &avs_tgl_dsp_ops,		\
+ 	.core_init_mask = 1,			\
+ 	.attributes = AVS_PLATATTR_IMR,		\
+@@ -840,11 +840,11 @@ static const struct avs_spec sname##_desc = {	\
+ 	.hipc = &cnl_hipc_spec,			\
+ }
+ 
+-AVS_TGL_BASED_SPEC(lkf);
+-AVS_TGL_BASED_SPEC(tgl);
+-AVS_TGL_BASED_SPEC(ehl);
+-AVS_TGL_BASED_SPEC(adl);
+-AVS_TGL_BASED_SPEC(adl_n);
++AVS_TGL_BASED_SPEC(lkf, 28);
++AVS_TGL_BASED_SPEC(tgl, 29);
++AVS_TGL_BASED_SPEC(ehl, 30);
++AVS_TGL_BASED_SPEC(adl, 35);
++AVS_TGL_BASED_SPEC(adl_n, 35);
+ 
+ static const struct pci_device_id avs_ids[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
+diff --git a/sound/soc/intel/avs/loader.c b/sound/soc/intel/avs/loader.c
+index 890efd2f1feabe..37de077a998386 100644
+--- a/sound/soc/intel/avs/loader.c
++++ b/sound/soc/intel/avs/loader.c
+@@ -308,7 +308,7 @@ avs_hda_init_rom(struct avs_dev *adev, unsigned int dma_id, bool purge)
+ 	}
+ 
+ 	/* await ROM init */
+-	ret = snd_hdac_adsp_readq_poll(adev, spec->sram->rom_status_offset, reg,
++	ret = snd_hdac_adsp_readl_poll(adev, spec->sram->rom_status_offset, reg,
+ 				       (reg & 0xF) == AVS_ROM_INIT_DONE ||
+ 				       (reg & 0xF) == APL_ROM_FW_ENTERED,
+ 				       AVS_ROM_INIT_POLLING_US, APL_ROM_INIT_TIMEOUT_US);
+diff --git a/sound/soc/intel/avs/registers.h b/sound/soc/intel/avs/registers.h
+index f76e91cff2a9a6..5b6d60eb3c18bd 100644
+--- a/sound/soc/intel/avs/registers.h
++++ b/sound/soc/intel/avs/registers.h
+@@ -9,6 +9,8 @@
+ #ifndef __SOUND_SOC_INTEL_AVS_REGS_H
+ #define __SOUND_SOC_INTEL_AVS_REGS_H
+ 
++#include <linux/io-64-nonatomic-lo-hi.h>
++#include <linux/iopoll.h>
+ #include <linux/sizes.h>
+ 
+ #define AZX_PCIREG_PGCTL		0x44
+@@ -98,4 +100,47 @@
+ #define avs_downlink_addr(adev) \
+ 	avs_sram_addr(adev, AVS_DOWNLINK_WINDOW)
+ 
++#define snd_hdac_adsp_writeb(adev, reg, value) \
++	snd_hdac_reg_writeb(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
++#define snd_hdac_adsp_readb(adev, reg) \
++	snd_hdac_reg_readb(&(adev)->base.core, (adev)->dsp_ba + (reg))
++#define snd_hdac_adsp_writew(adev, reg, value) \
++	snd_hdac_reg_writew(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
++#define snd_hdac_adsp_readw(adev, reg) \
++	snd_hdac_reg_readw(&(adev)->base.core, (adev)->dsp_ba + (reg))
++#define snd_hdac_adsp_writel(adev, reg, value) \
++	snd_hdac_reg_writel(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
++#define snd_hdac_adsp_readl(adev, reg) \
++	snd_hdac_reg_readl(&(adev)->base.core, (adev)->dsp_ba + (reg))
++#define snd_hdac_adsp_writeq(adev, reg, value) \
++	snd_hdac_reg_writeq(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
++#define snd_hdac_adsp_readq(adev, reg) \
++	snd_hdac_reg_readq(&(adev)->base.core, (adev)->dsp_ba + (reg))
++
++#define snd_hdac_adsp_updateb(adev, reg, mask, val) \
++	snd_hdac_adsp_writeb(adev, reg, \
++			(snd_hdac_adsp_readb(adev, reg) & ~(mask)) | (val))
++#define snd_hdac_adsp_updatew(adev, reg, mask, val) \
++	snd_hdac_adsp_writew(adev, reg, \
++			(snd_hdac_adsp_readw(adev, reg) & ~(mask)) | (val))
++#define snd_hdac_adsp_updatel(adev, reg, mask, val) \
++	snd_hdac_adsp_writel(adev, reg, \
++			(snd_hdac_adsp_readl(adev, reg) & ~(mask)) | (val))
++#define snd_hdac_adsp_updateq(adev, reg, mask, val) \
++	snd_hdac_adsp_writeq(adev, reg, \
++			(snd_hdac_adsp_readq(adev, reg) & ~(mask)) | (val))
++
++#define snd_hdac_adsp_readb_poll(adev, reg, val, cond, delay_us, timeout_us) \
++	readb_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
++			   delay_us, timeout_us)
++#define snd_hdac_adsp_readw_poll(adev, reg, val, cond, delay_us, timeout_us) \
++	readw_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
++			   delay_us, timeout_us)
++#define snd_hdac_adsp_readl_poll(adev, reg, val, cond, delay_us, timeout_us) \
++	readl_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
++			   delay_us, timeout_us)
++#define snd_hdac_adsp_readq_poll(adev, reg, val, cond, delay_us, timeout_us) \
++	readq_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
++			   delay_us, timeout_us)
++
+ #endif /* __SOUND_SOC_INTEL_AVS_REGS_H */
+diff --git a/sound/soc/intel/avs/skl.c b/sound/soc/intel/avs/skl.c
+index 34f859d6e5a49a..d66ef000de9ee7 100644
+--- a/sound/soc/intel/avs/skl.c
++++ b/sound/soc/intel/avs/skl.c
+@@ -12,6 +12,7 @@
+ #include "avs.h"
+ #include "cldma.h"
+ #include "messages.h"
++#include "registers.h"
+ 
+ void avs_skl_ipc_interrupt(struct avs_dev *adev)
+ {
+diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
+index 5cda527020c7bf..d612f20ed98937 100644
+--- a/sound/soc/intel/avs/topology.c
++++ b/sound/soc/intel/avs/topology.c
+@@ -1466,7 +1466,7 @@ avs_tplg_path_template_create(struct snd_soc_component *comp, struct avs_tplg *o
+ 
+ static const struct avs_tplg_token_parser mod_init_config_parsers[] = {
+ 	{
+-		.token = AVS_TKN_MOD_INIT_CONFIG_ID_U32,
++		.token = AVS_TKN_INIT_CONFIG_ID_U32,
+ 		.type = SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ 		.offset = offsetof(struct avs_tplg_init_config, id),
+ 		.parse = avs_parse_word_token,
+@@ -1519,7 +1519,7 @@ static int avs_tplg_parse_initial_configs(struct snd_soc_component *comp,
+ 		esize = le32_to_cpu(tuples->size) + le32_to_cpu(tmp->size);
+ 
+ 		ret = parse_dictionary_entries(comp, tuples, esize, config, 1, sizeof(*config),
+-					       AVS_TKN_MOD_INIT_CONFIG_ID_U32,
++					       AVS_TKN_INIT_CONFIG_ID_U32,
+ 					       mod_init_config_parsers,
+ 					       ARRAY_SIZE(mod_init_config_parsers));
+ 
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 41042259f2b26e..9f2dc24d44cb54 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -22,6 +22,8 @@ static int quirk_override = -1;
+ module_param_named(quirk, quirk_override, int, 0444);
+ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+ 
++#define DMIC_DEFAULT_CHANNELS 2
++
+ static void log_quirks(struct device *dev)
+ {
+ 	if (SOC_SDW_JACK_JDSRC(sof_sdw_quirk))
+@@ -584,17 +586,32 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "3838")
++			DMI_MATCH(DMI_PRODUCT_NAME, "83JX")
+ 		},
+-		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "3832")
++			DMI_MATCH(DMI_PRODUCT_NAME, "83LC")
+ 		},
+-		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83MC")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
++	},	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83NM")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+@@ -1063,17 +1080,19 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
+ 		hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
+ 
+ 	/* enable dmic01 & dmic16k */
+-	if (sof_sdw_quirk & SOC_SDW_PCH_DMIC || mach_params->dmic_num) {
+-		if (ctx->ignore_internal_dmic)
+-			dev_warn(dev, "Ignoring PCH DMIC\n");
+-		else
+-			dmic_num = 2;
++	if (ctx->ignore_internal_dmic) {
++		dev_warn(dev, "Ignoring internal DMIC\n");
++		mach_params->dmic_num = 0;
++	} else if (mach_params->dmic_num) {
++		dmic_num = 2;
++	} else if (sof_sdw_quirk & SOC_SDW_PCH_DMIC) {
++		dmic_num = 2;
++		/*
++		 * mach_params->dmic_num will be used to set the cfg-mics value of
++		 * card->components string. Set it to the default value.
++		 */
++		mach_params->dmic_num = DMIC_DEFAULT_CHANNELS;
+ 	}
+-	/*
+-	 * mach_params->dmic_num will be used to set the cfg-mics value of card->components
+-	 * string. Overwrite it to the actual number of PCH DMICs used in the device.
+-	 */
+-	mach_params->dmic_num = dmic_num;
+ 
+ 	if (sof_sdw_quirk & SOF_SSP_BT_OFFLOAD_PRESENT)
+ 		bt_num = 1;
+diff --git a/sound/soc/mediatek/mt8365/Makefile b/sound/soc/mediatek/mt8365/Makefile
+index 52ba45a8498a20..b197025e34bb80 100644
+--- a/sound/soc/mediatek/mt8365/Makefile
++++ b/sound/soc/mediatek/mt8365/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+ # MTK Platform driver
+-snd-soc-mt8365-pcm-objs := \
++snd-soc-mt8365-pcm-y := \
+ 	mt8365-afe-clk.o \
+ 	mt8365-afe-pcm.o \
+ 	mt8365-dai-adda.o \
+diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
+index d1f28699652fe3..acd75e48851fcf 100644
+--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
+@@ -22,7 +22,6 @@
+ 
+ #define DRV_NAME "rockchip-i2s-tdm"
+ 
+-#define DEFAULT_MCLK_FS				256
+ #define CH_GRP_MAX				4  /* The max channel 8 / 2 */
+ #define MULTIPLEX_CH_MAX			10
+ 
+@@ -70,6 +69,8 @@ struct rk_i2s_tdm_dev {
+ 	bool has_playback;
+ 	bool has_capture;
+ 	struct snd_soc_dai_driver *dai;
++	unsigned int mclk_rx_freq;
++	unsigned int mclk_tx_freq;
+ };
+ 
+ static int to_ch_num(unsigned int val)
+@@ -645,6 +646,27 @@ static int rockchip_i2s_trcm_mode(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
++static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
++				       unsigned int freq, int dir)
++{
++	struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
++
++	if (i2s_tdm->clk_trcm) {
++		i2s_tdm->mclk_tx_freq = freq;
++		i2s_tdm->mclk_rx_freq = freq;
++	} else {
++		if (stream == SNDRV_PCM_STREAM_PLAYBACK)
++			i2s_tdm->mclk_tx_freq = freq;
++		else
++			i2s_tdm->mclk_rx_freq = freq;
++	}
++
++	dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
++		stream ? "rx" : "tx", freq);
++
++	return 0;
++}
++
+ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
+ 				      struct snd_pcm_hw_params *params,
+ 				      struct snd_soc_dai *dai)
+@@ -659,15 +681,19 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
+ 
+ 		if (i2s_tdm->clk_trcm == TRCM_TX) {
+ 			mclk = i2s_tdm->mclk_tx;
++			mclk_rate = i2s_tdm->mclk_tx_freq;
+ 		} else if (i2s_tdm->clk_trcm == TRCM_RX) {
+ 			mclk = i2s_tdm->mclk_rx;
++			mclk_rate = i2s_tdm->mclk_rx_freq;
+ 		} else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			mclk = i2s_tdm->mclk_tx;
++			mclk_rate = i2s_tdm->mclk_tx_freq;
+ 		} else {
+ 			mclk = i2s_tdm->mclk_rx;
++			mclk_rate = i2s_tdm->mclk_rx_freq;
+ 		}
+ 
+-		err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
++		err = clk_set_rate(mclk, mclk_rate);
+ 		if (err)
+ 			return err;
+ 
+@@ -827,6 +853,7 @@ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
+ 	.hw_params = rockchip_i2s_tdm_hw_params,
+ 	.set_bclk_ratio	= rockchip_i2s_tdm_set_bclk_ratio,
+ 	.set_fmt = rockchip_i2s_tdm_set_fmt,
++	.set_sysclk = rockchip_i2s_tdm_set_sysclk,
+ 	.set_tdm_slot = rockchip_dai_tdm_slot,
+ 	.trigger = rockchip_i2s_tdm_trigger,
+ };
+diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c
+index 040ce0431fd2c5..32db2cead8a4ec 100644
+--- a/sound/soc/sh/rz-ssi.c
++++ b/sound/soc/sh/rz-ssi.c
+@@ -258,8 +258,7 @@ static void rz_ssi_stream_quit(struct rz_ssi_priv *ssi,
+ static int rz_ssi_clk_setup(struct rz_ssi_priv *ssi, unsigned int rate,
+ 			    unsigned int channels)
+ {
+-	static s8 ckdv[16] = { 1,  2,  4,  8, 16, 32, 64, 128,
+-			       6, 12, 24, 48, 96, -1, -1, -1 };
++	static u8 ckdv[] = { 1,  2,  4,  8, 16, 32, 64, 128, 6, 12, 24, 48, 96 };
+ 	unsigned int channel_bits = 32;	/* System Word Length */
+ 	unsigned long bclk_rate = rate * channels * channel_bits;
+ 	unsigned int div;
+diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
+index 0aa4164232464e..7cf623cbe9ed4b 100644
+--- a/sound/soc/sunxi/sun4i-spdif.c
++++ b/sound/soc/sunxi/sun4i-spdif.c
+@@ -176,6 +176,7 @@ struct sun4i_spdif_quirks {
+ 	unsigned int reg_dac_txdata;
+ 	bool has_reset;
+ 	unsigned int val_fctl_ftx;
++	unsigned int mclk_multiplier;
+ };
+ 
+ struct sun4i_spdif_dev {
+@@ -313,6 +314,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
+ 	default:
+ 		return -EINVAL;
+ 	}
++	mclk *= host->quirks->mclk_multiplier;
+ 
+ 	ret = clk_set_rate(host->spdif_clk, mclk);
+ 	if (ret < 0) {
+@@ -347,6 +349,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
+ 	default:
+ 		return -EINVAL;
+ 	}
++	mclk_div *= host->quirks->mclk_multiplier;
+ 
+ 	reg_val = 0;
+ 	reg_val |= SUN4I_SPDIF_TXCFG_ASS;
+@@ -540,24 +543,28 @@ static struct snd_soc_dai_driver sun4i_spdif_dai = {
+ static const struct sun4i_spdif_quirks sun4i_a10_spdif_quirks = {
+ 	.reg_dac_txdata	= SUN4I_SPDIF_TXFIFO,
+ 	.val_fctl_ftx   = SUN4I_SPDIF_FCTL_FTX,
++	.mclk_multiplier = 1,
+ };
+ 
+ static const struct sun4i_spdif_quirks sun6i_a31_spdif_quirks = {
+ 	.reg_dac_txdata	= SUN4I_SPDIF_TXFIFO,
+ 	.val_fctl_ftx   = SUN4I_SPDIF_FCTL_FTX,
+ 	.has_reset	= true,
++	.mclk_multiplier = 1,
+ };
+ 
+ static const struct sun4i_spdif_quirks sun8i_h3_spdif_quirks = {
+ 	.reg_dac_txdata	= SUN8I_SPDIF_TXFIFO,
+ 	.val_fctl_ftx   = SUN4I_SPDIF_FCTL_FTX,
+ 	.has_reset	= true,
++	.mclk_multiplier = 4,
+ };
+ 
+ static const struct sun4i_spdif_quirks sun50i_h6_spdif_quirks = {
+ 	.reg_dac_txdata = SUN8I_SPDIF_TXFIFO,
+ 	.val_fctl_ftx   = SUN50I_H6_SPDIF_FCTL_FTX,
+ 	.has_reset      = true,
++	.mclk_multiplier = 1,
+ };
+ 
+ static const struct of_device_id sun4i_spdif_of_match[] = {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 7968d6a2f592ac..a97efb7b131ea2 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2343,6 +2343,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
++	DEVICE_FLG(0x2fc6, 0xf0b7, /* iBasso DC07 Pro */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
+diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
+index 156b62a163c5a6..8a48cc2536f566 100644
+--- a/tools/bootconfig/main.c
++++ b/tools/bootconfig/main.c
+@@ -226,7 +226,7 @@ static int load_xbc_from_initrd(int fd, char **buf)
+ 	/* Wrong Checksum */
+ 	rcsum = xbc_calc_checksum(*buf, size);
+ 	if (csum != rcsum) {
+-		pr_err("checksum error: %d != %d\n", csum, rcsum);
++		pr_err("checksum error: %u != %u\n", csum, rcsum);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -395,7 +395,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
+ 	xbc_get_info(&ret, NULL);
+ 	printf("\tNumber of nodes: %d\n", ret);
+ 	printf("\tSize: %u bytes\n", (unsigned int)size);
+-	printf("\tChecksum: %d\n", (unsigned int)csum);
++	printf("\tChecksum: %u\n", (unsigned int)csum);
+ 
+ 	/* TODO: Check the options by schema */
+ 	xbc_exit();
+diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
+index 2f082b01ff2284..42ec5ddaab8dc8 100644
+--- a/tools/include/uapi/linux/if_xdp.h
++++ b/tools/include/uapi/linux/if_xdp.h
+@@ -117,12 +117,12 @@ struct xdp_options {
+ 	((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
+ 
+ /* Request transmit timestamp. Upon completion, put it into tx_timestamp
+- * field of union xsk_tx_metadata.
++ * field of struct xsk_tx_metadata.
+  */
+ #define XDP_TXMD_FLAGS_TIMESTAMP		(1 << 0)
+ 
+ /* Request transmit checksum offload. Checksum start position and offset
+- * are communicated via csum_start and csum_offset fields of union
++ * are communicated via csum_start and csum_offset fields of struct
+  * xsk_tx_metadata.
+  */
+ #define XDP_TXMD_FLAGS_CHECKSUM			(1 << 1)
+diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
+index 3c131039c52326..27e7bfae953bd3 100644
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -1185,6 +1185,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
+ 
+ 	elf = elf_begin(fd, ELF_C_READ, NULL);
+ 	if (!elf) {
++		err = -LIBBPF_ERRNO__FORMAT;
+ 		pr_warn("failed to open %s as ELF file\n", path);
+ 		goto done;
+ 	}
+diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c
+index 4f7399d85eab3d..8ef8003480dac8 100644
+--- a/tools/lib/bpf/btf_relocate.c
++++ b/tools/lib/bpf/btf_relocate.c
+@@ -212,7 +212,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
+ 	 * need to match both name and size, otherwise embedding the base
+ 	 * struct/union in the split type is invalid.
+ 	 */
+-	for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
++	for (id = r->nr_dist_base_types; id < r->nr_dist_base_types + r->nr_split_types; id++) {
+ 		err = btf_mark_embedded_composite_type_ids(r, id);
+ 		if (err)
+ 			goto done;
+diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
+index 6985ab0f1ca9e8..777600822d8e45 100644
+--- a/tools/lib/bpf/linker.c
++++ b/tools/lib/bpf/linker.c
+@@ -567,17 +567,15 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ 	}
+ 	obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
+ 	if (!obj->elf) {
+-		err = -errno;
+ 		pr_warn_elf("failed to parse ELF file '%s'", filename);
+-		return err;
++		return -EINVAL;
+ 	}
+ 
+ 	/* Sanity check ELF file high-level properties */
+ 	ehdr = elf64_getehdr(obj->elf);
+ 	if (!ehdr) {
+-		err = -errno;
+ 		pr_warn_elf("failed to get ELF header for %s", filename);
+-		return err;
++		return -EINVAL;
+ 	}
+ 	if (ehdr->e_ident[EI_DATA] != host_endianness) {
+ 		err = -EOPNOTSUPP;
+@@ -593,9 +591,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ 	}
+ 
+ 	if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
+-		err = -errno;
+ 		pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
+-		return err;
++		return -EINVAL;
+ 	}
+ 
+ 	scn = NULL;
+@@ -605,26 +602,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ 
+ 		shdr = elf64_getshdr(scn);
+ 		if (!shdr) {
+-			err = -errno;
+ 			pr_warn_elf("failed to get section #%zu header for %s",
+ 				    sec_idx, filename);
+-			return err;
++			return -EINVAL;
+ 		}
+ 
+ 		sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
+ 		if (!sec_name) {
+-			err = -errno;
+ 			pr_warn_elf("failed to get section #%zu name for %s",
+ 				    sec_idx, filename);
+-			return err;
++			return -EINVAL;
+ 		}
+ 
+ 		data = elf_getdata(scn, 0);
+ 		if (!data) {
+-			err = -errno;
+ 			pr_warn_elf("failed to get section #%zu (%s) data from %s",
+ 				    sec_idx, sec_name, filename);
+-			return err;
++			return -EINVAL;
+ 		}
+ 
+ 		sec = add_src_sec(obj, sec_name);
+@@ -2635,14 +2629,14 @@ int bpf_linker__finalize(struct bpf_linker *linker)
+ 
+ 	/* Finalize ELF layout */
+ 	if (elf_update(linker->elf, ELF_C_NULL) < 0) {
+-		err = -errno;
++		err = -EINVAL;
+ 		pr_warn_elf("failed to finalize ELF layout");
+ 		return libbpf_err(err);
+ 	}
+ 
+ 	/* Write out final ELF contents */
+ 	if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
+-		err = -errno;
++		err = -EINVAL;
+ 		pr_warn_elf("failed to write ELF contents");
+ 		return libbpf_err(err);
+ 	}
+diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
+index 93794f01bb67cb..6ff28e7bf5e3da 100644
+--- a/tools/lib/bpf/usdt.c
++++ b/tools/lib/bpf/usdt.c
+@@ -659,7 +659,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
+ 		 *   [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ 		 */
+ 		usdt_abs_ip = note.loc_addr;
+-		if (base_addr)
++		if (base_addr && note.base_addr)
+ 			usdt_abs_ip += base_addr - note.base_addr;
+ 
+ 		/* When attaching uprobes (which is what USDTs basically are)
+diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
+index e16cef160bc2cb..ce32cb35007d6f 100644
+--- a/tools/net/ynl/lib/ynl.c
++++ b/tools/net/ynl/lib/ynl.c
+@@ -95,7 +95,7 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
+ 
+ 	ynl_attr_for_each_payload(start, data_len, attr) {
+ 		astart_off = (char *)attr - (char *)start;
+-		aend_off = astart_off + ynl_attr_data_len(attr);
++		aend_off = (char *)ynl_attr_data_end(attr) - (char *)start;
+ 		if (aend_off <= off)
+ 			continue;
+ 
+diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
+index dc42de1785cee7..908165fcec7de3 100644
+--- a/tools/perf/MANIFEST
++++ b/tools/perf/MANIFEST
+@@ -1,5 +1,6 @@
+ arch/arm64/tools/gen-sysreg.awk
+ arch/arm64/tools/sysreg
++arch/*/include/uapi/asm/bpf_perf_event.h
+ tools/perf
+ tools/arch
+ tools/scripts
+diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
+index d6989195a061ff..11e49cafa3af9d 100644
+--- a/tools/perf/builtin-inject.c
++++ b/tools/perf/builtin-inject.c
+@@ -2367,10 +2367,10 @@ int cmd_inject(int argc, const char **argv)
+ 	};
+ 	int ret;
+ 	const char *known_build_ids = NULL;
+-	bool build_ids;
+-	bool build_id_all;
+-	bool mmap2_build_ids;
+-	bool mmap2_build_id_all;
++	bool build_ids = false;
++	bool build_id_all = false;
++	bool mmap2_build_ids = false;
++	bool mmap2_build_id_all = false;
+ 
+ 	struct option options[] = {
+ 		OPT_BOOLEAN('b', "build-ids", &build_ids,
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index 062e2b56a2ab57..33a456980664a0 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -1591,8 +1591,8 @@ static const struct {
+ 	{ LCB_F_PERCPU | LCB_F_WRITE,	"pcpu-sem:W",	"percpu-rwsem" },
+ 	{ LCB_F_MUTEX,			"mutex",	"mutex" },
+ 	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex",	"mutex" },
+-	/* alias for get_type_flag() */
+-	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex-spin",	"mutex" },
++	/* alias for optimistic spinning only */
++	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex:spin",	"mutex-spin" },
+ };
+ 
+ static const char *get_type_str(unsigned int flags)
+@@ -1617,19 +1617,6 @@ static const char *get_type_name(unsigned int flags)
+ 	return "unknown";
+ }
+ 
+-static unsigned int get_type_flag(const char *str)
+-{
+-	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+-		if (!strcmp(lock_type_table[i].name, str))
+-			return lock_type_table[i].flags;
+-	}
+-	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+-		if (!strcmp(lock_type_table[i].str, str))
+-			return lock_type_table[i].flags;
+-	}
+-	return UINT_MAX;
+-}
+-
+ static void lock_filter_finish(void)
+ {
+ 	zfree(&filters.types);
+@@ -2350,29 +2337,58 @@ static int parse_lock_type(const struct option *opt __maybe_unused, const char *
+ 			   int unset __maybe_unused)
+ {
+ 	char *s, *tmp, *tok;
+-	int ret = 0;
+ 
+ 	s = strdup(str);
+ 	if (s == NULL)
+ 		return -1;
+ 
+ 	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+-		unsigned int flags = get_type_flag(tok);
++		bool found = false;
+ 
+-		if (flags == -1U) {
+-			pr_err("Unknown lock flags: %s\n", tok);
+-			ret = -1;
+-			break;
++		/* `tok` is `str` in `lock_type_table` if it contains ':'. */
++		if (strchr(tok, ':')) {
++			for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
++				if (!strcmp(lock_type_table[i].str, tok) &&
++				    add_lock_type(lock_type_table[i].flags)) {
++					found = true;
++					break;
++				}
++			}
++
++			if (!found) {
++				pr_err("Unknown lock flags name: %s\n", tok);
++				free(s);
++				return -1;
++			}
++
++			continue;
+ 		}
+ 
+-		if (!add_lock_type(flags)) {
+-			ret = -1;
+-			break;
++		/*
++		 * Otherwise `tok` is `name` in `lock_type_table`.
++		 * Single lock name could contain multiple flags.
++		 */
++		for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
++			if (!strcmp(lock_type_table[i].name, tok)) {
++				if (add_lock_type(lock_type_table[i].flags)) {
++					found = true;
++				} else {
++					free(s);
++					return -1;
++				}
++			}
+ 		}
++
++		if (!found) {
++			pr_err("Unknown lock name: %s\n", tok);
++			free(s);
++			return -1;
++		}
++
+ 	}
+ 
+ 	free(s);
+-	return ret;
++	return 0;
+ }
+ 
+ static bool add_lock_addr(unsigned long addr)
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index 5dc17ffee27a2d..645deec294c842 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -1418,7 +1418,7 @@ int cmd_report(int argc, const char **argv)
+ 	OPT_STRING(0, "addr2line", &addr2line_path, "path",
+ 		   "addr2line binary to use for line numbers"),
+ 	OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
+-		    "Disable symbol demangling"),
++		    "Symbol demangling. Enabled by default, use --no-demangle to disable."),
+ 	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ 		    "Enable kernel symbol demangling"),
+ 	OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index 724a7938632126..ca3e8eca6610e8 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -809,7 +809,7 @@ static void perf_event__process_sample(const struct perf_tool *tool,
+ 		 * invalid --vmlinux ;-)
+ 		 */
+ 		if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
+-		    __map__is_kernel(al.map) && map__has_symbols(al.map)) {
++		    __map__is_kernel(al.map) && !map__has_symbols(al.map)) {
+ 			if (symbol_conf.vmlinux_name) {
+ 				char serr[256];
+ 
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index ffa1295273099e..ecd26e058baf67 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -2122,8 +2122,12 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+ 		return PTR_ERR(sc->tp_format);
+ 	}
+ 
++	/*
++	 * The tracepoint format contains __syscall_nr field, so it's one more
++	 * than the actual number of syscall arguments.
++	 */
+ 	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
+-					RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
++					RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields - 1))
+ 		return -ENOMEM;
+ 
+ 	sc->args = sc->tp_format->format.fields;
+diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
+index 5a3b8a5a9b5cf2..8d1e6bbeac9068 100755
+--- a/tools/perf/tests/shell/trace_btf_enum.sh
++++ b/tools/perf/tests/shell/trace_btf_enum.sh
+@@ -26,8 +26,12 @@ check_vmlinux() {
+ trace_landlock() {
+   echo "Tracing syscall ${syscall}"
+ 
+-  # test flight just to see if landlock_add_rule and libbpf are available
+-  $TESTPROG
++  # test flight just to see if landlock_add_rule is available
++  if ! perf trace $TESTPROG 2>&1 | grep -q landlock
++  then
++    echo "No landlock system call found, skipping to non-syscall tracing."
++    return
++  fi
+ 
+   if perf trace -e $syscall $TESTPROG 2>&1 | \
+      grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index 13608237c50e05..c81444059ad077 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -289,7 +289,10 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
+ 		}
+ 
+ 		info_node->info_linear = info_linear;
+-		perf_env__insert_bpf_prog_info(env, info_node);
++		if (!perf_env__insert_bpf_prog_info(env, info_node)) {
++			free(info_linear);
++			free(info_node);
++		}
+ 		info_linear = NULL;
+ 
+ 		/*
+@@ -480,7 +483,10 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+ 	info_node = malloc(sizeof(struct bpf_prog_info_node));
+ 	if (info_node) {
+ 		info_node->info_linear = info_linear;
+-		perf_env__insert_bpf_prog_info(env, info_node);
++		if (!perf_env__insert_bpf_prog_info(env, info_node)) {
++			free(info_linear);
++			free(info_node);
++		}
+ 	} else
+ 		free(info_linear);
+ 
+diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+index 4a62ed593e84ed..e4352881e3faa6 100644
+--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
++++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+@@ -431,9 +431,9 @@ static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
+ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
+ {
+ 	bool augmented, do_output = false;
+-	int zero = 0, size, aug_size, index,
+-	    value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
++	int zero = 0, index, value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
+ 	u64 output = 0; /* has to be u64, otherwise it won't pass the verifier */
++	s64 aug_size, size;
+ 	unsigned int nr, *beauty_map;
+ 	struct beauty_payload_enter *payload;
+ 	void *arg, *payload_offset;
+@@ -484,14 +484,11 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
+ 		} else if (size > 0 && size <= value_size) { /* struct */
+ 			if (!bpf_probe_read_user(((struct augmented_arg *)payload_offset)->value, size, arg))
+ 				augmented = true;
+-		} else if (size < 0 && size >= -6) { /* buffer */
++		} else if ((int)size < 0 && size >= -6) { /* buffer */
+ 			index = -(size + 1);
+ 			barrier_var(index); // Prevent clang (noticed with v18) from removing the &= 7 trick.
+ 			index &= 7;	    // Satisfy the bounds checking with the verifier in some kernels.
+-			aug_size = args->args[index];
+-
+-			if (aug_size > TRACE_AUG_MAX_BUF)
+-				aug_size = TRACE_AUG_MAX_BUF;
++			aug_size = args->args[index] > TRACE_AUG_MAX_BUF ? TRACE_AUG_MAX_BUF : args->args[index];
+ 
+ 			if (aug_size > 0) {
+ 				if (!bpf_probe_read_user(((struct augmented_arg *)payload_offset)->value, aug_size, arg))
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index 1edbccfc3281d2..d981b6f4bc5ea2 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -22,15 +22,19 @@ struct perf_env perf_env;
+ #include "bpf-utils.h"
+ #include <bpf/libbpf.h>
+ 
+-void perf_env__insert_bpf_prog_info(struct perf_env *env,
++bool perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 				    struct bpf_prog_info_node *info_node)
+ {
++	bool ret;
++
+ 	down_write(&env->bpf_progs.lock);
+-	__perf_env__insert_bpf_prog_info(env, info_node);
++	ret = __perf_env__insert_bpf_prog_info(env, info_node);
+ 	up_write(&env->bpf_progs.lock);
++
++	return ret;
+ }
+ 
+-void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
++bool __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+ {
+ 	__u32 prog_id = info_node->info_linear->info.id;
+ 	struct bpf_prog_info_node *node;
+@@ -48,13 +52,14 @@ void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info
+ 			p = &(*p)->rb_right;
+ 		} else {
+ 			pr_debug("duplicated bpf prog info %u\n", prog_id);
+-			return;
++			return false;
+ 		}
+ 	}
+ 
+ 	rb_link_node(&info_node->rb_node, parent, p);
+ 	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ 	env->bpf_progs.infos_cnt++;
++	return true;
+ }
+ 
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
+index 51b36c36019be6..38de0af2a68081 100644
+--- a/tools/perf/util/env.h
++++ b/tools/perf/util/env.h
+@@ -176,9 +176,9 @@ const char *perf_env__raw_arch(struct perf_env *env);
+ int perf_env__nr_cpus_avail(struct perf_env *env);
+ 
+ void perf_env__init(struct perf_env *env);
+-void __perf_env__insert_bpf_prog_info(struct perf_env *env,
++bool __perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 				      struct bpf_prog_info_node *info_node);
+-void perf_env__insert_bpf_prog_info(struct perf_env *env,
++bool perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 				    struct bpf_prog_info_node *info_node);
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ 							__u32 prog_id);
+diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
+index b2536a59c44e64..90c6ce2212e4fe 100644
+--- a/tools/perf/util/expr.c
++++ b/tools/perf/util/expr.c
+@@ -288,7 +288,7 @@ struct expr_parse_ctx *expr__ctx_new(void)
+ {
+ 	struct expr_parse_ctx *ctx;
+ 
+-	ctx = malloc(sizeof(struct expr_parse_ctx));
++	ctx = calloc(1, sizeof(struct expr_parse_ctx));
+ 	if (!ctx)
+ 		return NULL;
+ 
+@@ -297,9 +297,6 @@ struct expr_parse_ctx *expr__ctx_new(void)
+ 		free(ctx);
+ 		return NULL;
+ 	}
+-	ctx->sctx.user_requested_cpu_list = NULL;
+-	ctx->sctx.runtime = 0;
+-	ctx->sctx.system_wide = false;
+ 
+ 	return ctx;
+ }
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index a6386d12afd729..7b99f58f7bf269 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -3188,7 +3188,10 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+ 		/* after reading from file, translate offset to address */
+ 		bpil_offs_to_addr(info_linear);
+ 		info_node->info_linear = info_linear;
+-		__perf_env__insert_bpf_prog_info(env, info_node);
++		if (!__perf_env__insert_bpf_prog_info(env, info_node)) {
++			free(info_linear);
++			free(info_node);
++		}
+ 	}
+ 
+ 	up_write(&env->bpf_progs.lock);
+@@ -3235,7 +3238,8 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+ 		if (__do_read(ff, node->data, data_size))
+ 			goto out;
+ 
+-		__perf_env__insert_btf(env, node);
++		if (!__perf_env__insert_btf(env, node))
++			free(node);
+ 		node = NULL;
+ 	}
+ 
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 27d5345d2b307a..9be2f4479f5257 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1003,7 +1003,7 @@ static int machine__get_running_kernel_start(struct machine *machine,
+ 
+ 	err = kallsyms__get_symbol_start(filename, "_edata", &addr);
+ 	if (err)
+-		err = kallsyms__get_function_start(filename, "_etext", &addr);
++		err = kallsyms__get_symbol_start(filename, "_etext", &addr);
+ 	if (!err)
+ 		*end = addr;
+ 
+diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
+index 432399cbe5dd39..09c9cc326c08d4 100644
+--- a/tools/perf/util/maps.c
++++ b/tools/perf/util/maps.c
+@@ -1136,8 +1136,13 @@ struct map *maps__find_next_entry(struct maps *maps, struct map *map)
+ 	struct map *result = NULL;
+ 
+ 	down_read(maps__lock(maps));
++	while (!maps__maps_by_address_sorted(maps)) {
++		up_read(maps__lock(maps));
++		maps__sort_by_address(maps);
++		down_read(maps__lock(maps));
++	}
+ 	i = maps__by_address_index(maps, map);
+-	if (i < maps__nr_maps(maps))
++	if (++i < maps__nr_maps(maps))
+ 		result = map__get(maps__maps_by_address(maps)[i]);
+ 
+ 	up_read(maps__lock(maps));
+diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
+index cb185c5659d6b3..68f5de2d79c72c 100644
+--- a/tools/perf/util/namespaces.c
++++ b/tools/perf/util/namespaces.c
+@@ -266,11 +266,16 @@ pid_t nsinfo__pid(const struct nsinfo  *nsi)
+ 	return RC_CHK_ACCESS(nsi)->pid;
+ }
+ 
+-pid_t nsinfo__in_pidns(const struct nsinfo  *nsi)
++bool nsinfo__in_pidns(const struct nsinfo *nsi)
+ {
+ 	return RC_CHK_ACCESS(nsi)->in_pidns;
+ }
+ 
++void nsinfo__set_in_pidns(struct nsinfo *nsi)
++{
++	RC_CHK_ACCESS(nsi)->in_pidns = true;
++}
++
+ void nsinfo__mountns_enter(struct nsinfo *nsi,
+ 				  struct nscookie *nc)
+ {
+diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
+index 8c0731c6cbb7ee..e95c79b80e27c8 100644
+--- a/tools/perf/util/namespaces.h
++++ b/tools/perf/util/namespaces.h
+@@ -58,7 +58,8 @@ void nsinfo__clear_need_setns(struct nsinfo *nsi);
+ pid_t nsinfo__tgid(const struct nsinfo  *nsi);
+ pid_t nsinfo__nstgid(const struct nsinfo  *nsi);
+ pid_t nsinfo__pid(const struct nsinfo  *nsi);
+-pid_t nsinfo__in_pidns(const struct nsinfo  *nsi);
++bool nsinfo__in_pidns(const struct nsinfo  *nsi);
++void nsinfo__set_in_pidns(struct nsinfo *nsi);
+ 
+ void nsinfo__mountns_enter(struct nsinfo *nsi, struct nscookie *nc);
+ void nsinfo__mountns_exit(struct nscookie *nc);
+diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+index ae6af354a81db5..08a399b0be286c 100644
+--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
++++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+@@ -33,7 +33,7 @@ static int mperf_get_count_percent(unsigned int self_id, double *percent,
+ 				   unsigned int cpu);
+ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ 				unsigned int cpu);
+-static struct timespec time_start, time_end;
++static struct timespec *time_start, *time_end;
+ 
+ static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = {
+ 	{
+@@ -174,7 +174,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
+ 		dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
+ 		       mperf_cstates[id].name, mperf_diff, tsc_diff);
+ 	} else if (max_freq_mode == MAX_FREQ_SYSFS) {
+-		timediff = max_frequency * timespec_diff_us(time_start, time_end);
++		timediff = max_frequency * timespec_diff_us(time_start[cpu], time_end[cpu]);
+ 		*percent = 100.0 * mperf_diff / timediff;
+ 		dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n",
+ 		       mperf_cstates[id].name, mperf_diff, timediff);
+@@ -207,7 +207,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ 	if (max_freq_mode == MAX_FREQ_TSC_REF) {
+ 		/* Calculate max_freq from TSC count */
+ 		tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
+-		time_diff = timespec_diff_us(time_start, time_end);
++		time_diff = timespec_diff_us(time_start[cpu], time_end[cpu]);
+ 		max_frequency = tsc_diff / time_diff;
+ 	}
+ 
+@@ -226,9 +226,8 @@ static int mperf_start(void)
+ {
+ 	int cpu;
+ 
+-	clock_gettime(CLOCK_REALTIME, &time_start);
+-
+ 	for (cpu = 0; cpu < cpu_count; cpu++) {
++		clock_gettime(CLOCK_REALTIME, &time_start[cpu]);
+ 		mperf_get_tsc(&tsc_at_measure_start[cpu]);
+ 		mperf_init_stats(cpu);
+ 	}
+@@ -243,9 +242,9 @@ static int mperf_stop(void)
+ 	for (cpu = 0; cpu < cpu_count; cpu++) {
+ 		mperf_measure_stats(cpu);
+ 		mperf_get_tsc(&tsc_at_measure_end[cpu]);
++		clock_gettime(CLOCK_REALTIME, &time_end[cpu]);
+ 	}
+ 
+-	clock_gettime(CLOCK_REALTIME, &time_end);
+ 	return 0;
+ }
+ 
+@@ -349,6 +348,8 @@ struct cpuidle_monitor *mperf_register(void)
+ 	aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
+ 	tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
+ 	tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
++	time_start = calloc(cpu_count, sizeof(struct timespec));
++	time_end = calloc(cpu_count, sizeof(struct timespec));
+ 	mperf_monitor.name_len = strlen(mperf_monitor.name);
+ 	return &mperf_monitor;
+ }
+@@ -361,6 +362,8 @@ void mperf_unregister(void)
+ 	free(aperf_current_count);
+ 	free(tsc_at_measure_start);
+ 	free(tsc_at_measure_end);
++	free(time_start);
++	free(time_end);
+ 	free(is_valid);
+ }
+ 
+diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
+index 067717bce1d4ab..56c7ff6efcdabc 100644
+--- a/tools/power/x86/turbostat/turbostat.8
++++ b/tools/power/x86/turbostat/turbostat.8
+@@ -33,6 +33,9 @@ name as necessary to disambiguate it from others is necessary.  Note that option
+ 		msr0xXXX is a hex offset, eg. msr0x10
+ 		/sys/path... is an absolute path to a sysfs attribute
+ 		<device> is a perf device from /sys/bus/event_source/devices/<device> eg. cstate_core
++			On Intel hybrid platforms, instead of one "cpu" perf device there are two, "cpu_core" and "cpu_atom" devices for P and E cores respectively.
++			Turbostat, in this case, allow user to use "cpu" device and will automatically detect the type of a CPU and translate it to "cpu_core" and "cpu_atom" accordingly.
++			For a complete example see "ADD PERF COUNTER EXAMPLE #2 (using virtual "cpu" device)".
+ 		<event> is a perf event for given device from /sys/bus/event_source/devices/<device>/events/<event> eg. c1-residency
+ 			perf/cstate_core/c1-residency would then use /sys/bus/event_source/devices/cstate_core/events/c1-residency
+ 
+@@ -387,6 +390,28 @@ CPU     pCPU%c1 CPU%c1
+ 
+ .fi
+ 
++.SH ADD PERF COUNTER EXAMPLE #2 (using virtual cpu device)
++Here we run on hybrid, Raptor Lake platform.
++We limit turbostat to show output for just cpu0 (pcore) and cpu12 (ecore).
++We add a counter showing number of L3 cache misses, using virtual "cpu" device,
++labeling it with the column header, "VCMISS".
++We add a counter showing number of L3 cache misses, using virtual "cpu_core" device,
++labeling it with the column header, "PCMISS". This will fail on ecore cpu12.
++We add a counter showing number of L3 cache misses, using virtual "cpu_atom" device,
++labeling it with the column header, "ECMISS". This will fail on pcore cpu0.
++We display it only once, after the conclusion of 0.1 second sleep.
++.nf
++sudo ./turbostat --quiet --cpu 0,12 --show CPU --add perf/cpu/cache-misses,cpu,delta,raw,VCMISS --add perf/cpu_core/cache-misses,cpu,delta,raw,PCMISS --add perf/cpu_atom/cache-misses,cpu,delta,raw,ECMISS sleep .1
++turbostat: added_perf_counters_init_: perf/cpu_atom/cache-misses: failed to open counter on cpu0
++turbostat: added_perf_counters_init_: perf/cpu_core/cache-misses: failed to open counter on cpu12
++0.104630 sec
++CPU                 ECMISS                  PCMISS                  VCMISS
++-       0x0000000000000000      0x0000000000000000      0x0000000000000000
++0       0x0000000000000000      0x0000000000007951      0x0000000000007796
++12      0x000000000001137a      0x0000000000000000      0x0000000000011392
++
++.fi
++
+ .SH ADD PMT COUNTER EXAMPLE
+ Here we limit turbostat to showing just the CPU number 0.
+ We add two counters, showing crystal clock count and the DC6 residency.
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index a5ebee8b23bbe3..235e82fe7d0a56 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -31,6 +31,9 @@
+ )
+ // end copied section
+ 
++#define CPUID_LEAF_MODEL_ID			0x1A
++#define CPUID_LEAF_MODEL_ID_CORE_TYPE_SHIFT	24
++
+ #define X86_VENDOR_INTEL	0
+ 
+ #include INTEL_FAMILY_HEADER
+@@ -89,6 +92,11 @@
+ #define PERF_DEV_NAME_BYTES 32
+ #define PERF_EVT_NAME_BYTES 32
+ 
++#define INTEL_ECORE_TYPE	0x20
++#define INTEL_PCORE_TYPE	0x40
++
++#define ROUND_UP_TO_PAGE_SIZE(n) (((n) + 0x1000UL-1UL) & ~(0x1000UL-1UL))
++
+ enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE };
+ enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC, COUNTER_K2M };
+ enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT, FORMAT_AVERAGE };
+@@ -1079,8 +1087,8 @@ int backwards_count;
+ char *progname;
+ 
+ #define CPU_SUBSET_MAXCPUS	1024	/* need to use before probe... */
+-cpu_set_t *cpu_present_set, *cpu_effective_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset;
+-size_t cpu_present_setsize, cpu_effective_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size;
++cpu_set_t *cpu_present_set, *cpu_possible_set, *cpu_effective_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset;
++size_t cpu_present_setsize, cpu_possible_setsize, cpu_effective_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size;
+ #define MAX_ADDED_THREAD_COUNTERS 24
+ #define MAX_ADDED_CORE_COUNTERS 8
+ #define MAX_ADDED_PACKAGE_COUNTERS 16
+@@ -1848,6 +1856,7 @@ struct cpu_topology {
+ 	int logical_node_id;	/* 0-based count within the package */
+ 	int physical_core_id;
+ 	int thread_id;
++	int type;
+ 	cpu_set_t *put_ids;	/* Processing Unit/Thread IDs */
+ } *cpus;
+ 
+@@ -5659,6 +5668,32 @@ int init_thread_id(int cpu)
+ 	return 0;
+ }
+ 
++int set_my_cpu_type(void)
++{
++	unsigned int eax, ebx, ecx, edx;
++	unsigned int max_level;
++
++	__cpuid(0, max_level, ebx, ecx, edx);
++
++	if (max_level < CPUID_LEAF_MODEL_ID)
++		return 0;
++
++	__cpuid(CPUID_LEAF_MODEL_ID, eax, ebx, ecx, edx);
++
++	return (eax >> CPUID_LEAF_MODEL_ID_CORE_TYPE_SHIFT);
++}
++
++int set_cpu_hybrid_type(int cpu)
++{
++	if (cpu_migrate(cpu))
++		return -1;
++
++	int type = set_my_cpu_type();
++
++	cpus[cpu].type = type;
++	return 0;
++}
++
+ /*
+  * snapshot_proc_interrupts()
+  *
+@@ -8188,6 +8223,33 @@ int dir_filter(const struct dirent *dirp)
+ 		return 0;
+ }
+ 
++char *possible_file = "/sys/devices/system/cpu/possible";
++char possible_buf[1024];
++
++int initialize_cpu_possible_set(void)
++{
++	FILE *fp;
++
++	fp = fopen(possible_file, "r");
++	if (!fp) {
++		warn("open %s", possible_file);
++		return -1;
++	}
++	if (fread(possible_buf, sizeof(char), 1024, fp) == 0) {
++		warn("read %s", possible_file);
++		goto err;
++	}
++	if (parse_cpu_str(possible_buf, cpu_possible_set, cpu_possible_setsize)) {
++		warnx("%s: cpu str malformat %s\n", possible_file, cpu_effective_str);
++		goto err;
++	}
++	return 0;
++
++err:
++	fclose(fp);
++	return -1;
++}
++
+ void topology_probe(bool startup)
+ {
+ 	int i;
+@@ -8219,6 +8281,16 @@ void topology_probe(bool startup)
+ 	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
+ 	for_all_proc_cpus(mark_cpu_present);
+ 
++	/*
++	 * Allocate and initialize cpu_possible_set
++	 */
++	cpu_possible_set = CPU_ALLOC((topo.max_cpu_num + 1));
++	if (cpu_possible_set == NULL)
++		err(3, "CPU_ALLOC");
++	cpu_possible_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
++	CPU_ZERO_S(cpu_possible_setsize, cpu_possible_set);
++	initialize_cpu_possible_set();
++
+ 	/*
+ 	 * Allocate and initialize cpu_effective_set
+ 	 */
+@@ -8287,6 +8359,8 @@ void topology_probe(bool startup)
+ 
+ 	for_all_proc_cpus(init_thread_id);
+ 
++	for_all_proc_cpus(set_cpu_hybrid_type);
++
+ 	/*
+ 	 * For online cpus
+ 	 * find max_core_id, max_package_id
+@@ -8551,6 +8625,35 @@ void check_perf_access(void)
+ 		bic_enabled &= ~BIC_IPC;
+ }
+ 
++bool perf_has_hybrid_devices(void)
++{
++	/*
++	 *  0: unknown
++	 *  1: has separate perf device for p and e core
++	 * -1: doesn't have separate perf device for p and e core
++	 */
++	static int cached;
++
++	if (cached > 0)
++		return true;
++
++	if (cached < 0)
++		return false;
++
++	if (access("/sys/bus/event_source/devices/cpu_core", F_OK)) {
++		cached = -1;
++		return false;
++	}
++
++	if (access("/sys/bus/event_source/devices/cpu_atom", F_OK)) {
++		cached = -1;
++		return false;
++	}
++
++	cached = 1;
++	return true;
++}
++
+ int added_perf_counters_init_(struct perf_counter_info *pinfo)
+ {
+ 	size_t num_domains = 0;
+@@ -8607,29 +8710,56 @@ int added_perf_counters_init_(struct perf_counter_info *pinfo)
+ 			if (domain_visited[next_domain])
+ 				continue;
+ 
+-			perf_type = read_perf_type(pinfo->device);
++			/*
++			 * Intel hybrid platforms expose different perf devices for P and E cores.
++			 * Instead of one, "/sys/bus/event_source/devices/cpu" device, there are
++			 * "/sys/bus/event_source/devices/{cpu_core,cpu_atom}".
++			 *
++			 * This makes it more complicated to the user, because most of the counters
++			 * are available on both and have to be handled manually, otherwise.
++			 *
++			 * Code below, allow user to use the old "cpu" name, which is translated accordingly.
++			 */
++			const char *perf_device = pinfo->device;
++
++			if (strcmp(perf_device, "cpu") == 0 && perf_has_hybrid_devices()) {
++				switch (cpus[cpu].type) {
++				case INTEL_PCORE_TYPE:
++					perf_device = "cpu_core";
++					break;
++
++				case INTEL_ECORE_TYPE:
++					perf_device = "cpu_atom";
++					break;
++
++				default: /* Don't change, we will probably fail and report a problem soon. */
++					break;
++				}
++			}
++
++			perf_type = read_perf_type(perf_device);
+ 			if (perf_type == (unsigned int)-1) {
+ 				warnx("%s: perf/%s/%s: failed to read %s",
+-				      __func__, pinfo->device, pinfo->event, "type");
++				      __func__, perf_device, pinfo->event, "type");
+ 				continue;
+ 			}
+ 
+-			perf_config = read_perf_config(pinfo->device, pinfo->event);
++			perf_config = read_perf_config(perf_device, pinfo->event);
+ 			if (perf_config == (unsigned int)-1) {
+ 				warnx("%s: perf/%s/%s: failed to read %s",
+-				      __func__, pinfo->device, pinfo->event, "config");
++				      __func__, perf_device, pinfo->event, "config");
+ 				continue;
+ 			}
+ 
+ 			/* Scale is not required, some counters just don't have it. */
+-			perf_scale = read_perf_scale(pinfo->device, pinfo->event);
++			perf_scale = read_perf_scale(perf_device, pinfo->event);
+ 			if (perf_scale == 0.0)
+ 				perf_scale = 1.0;
+ 
+ 			fd_perf = open_perf_counter(cpu, perf_type, perf_config, -1, 0);
+ 			if (fd_perf == -1) {
+ 				warnx("%s: perf/%s/%s: failed to open counter on cpu%d",
+-				      __func__, pinfo->device, pinfo->event, cpu);
++				      __func__, perf_device, pinfo->event, cpu);
+ 				continue;
+ 			}
+ 
+@@ -8639,7 +8769,7 @@ int added_perf_counters_init_(struct perf_counter_info *pinfo)
+ 
+ 			if (debug)
+ 				fprintf(stderr, "Add perf/%s/%s cpu%d: %d\n",
+-					pinfo->device, pinfo->event, cpu, pinfo->fd_perf_per_domain[next_domain]);
++					perf_device, pinfo->event, cpu, pinfo->fd_perf_per_domain[next_domain]);
+ 		}
+ 
+ 		pinfo = pinfo->next;
+@@ -8762,7 +8892,7 @@ struct pmt_mmio *pmt_mmio_open(unsigned int target_guid)
+ 		if (fd_pmt == -1)
+ 			goto loop_cleanup_and_break;
+ 
+-		mmap_size = (size + 0x1000UL) & (~0x1000UL);
++		mmap_size = ROUND_UP_TO_PAGE_SIZE(size);
+ 		mmio = mmap(0, mmap_size, PROT_READ, MAP_SHARED, fd_pmt, 0);
+ 		if (mmio != MAP_FAILED) {
+ 
+@@ -9001,6 +9131,18 @@ void turbostat_init()
+ 	}
+ }
+ 
++void affinitize_child(void)
++{
++	/* Prefer cpu_possible_set, if available */
++	if (sched_setaffinity(0, cpu_possible_setsize, cpu_possible_set)) {
++		warn("sched_setaffinity cpu_possible_set");
++
++		/* Otherwise, allow child to run on same cpu set as turbostat */
++		if (sched_setaffinity(0, cpu_allowed_setsize, cpu_allowed_set))
++			warn("sched_setaffinity cpu_allowed_set");
++	}
++}
++
+ int fork_it(char **argv)
+ {
+ 	pid_t child_pid;
+@@ -9016,6 +9158,7 @@ int fork_it(char **argv)
+ 	child_pid = fork();
+ 	if (!child_pid) {
+ 		/* child */
++		affinitize_child();
+ 		execvp(argv[0], argv);
+ 		err(errno, "exec %s", argv[0]);
+ 	} else {
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index dacad94e2be42a..c76ad0be54e2ed 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2419,6 +2419,11 @@ sub get_version {
+     return if ($have_version);
+     doprint "$make kernelrelease ... ";
+     $version = `$make -s kernelrelease | tail -1`;
++    if (!length($version)) {
++	run_command "$make allnoconfig" or return 0;
++	doprint "$make kernelrelease ... ";
++	$version = `$make -s kernelrelease | tail -1`;
++    }
+     chomp($version);
+     doprint "$version\n";
+     $have_version = 1;
+@@ -2960,8 +2965,6 @@ sub run_bisect_test {
+ 
+     my $failed = 0;
+     my $result;
+-    my $output;
+-    my $ret;
+ 
+     $in_bisect = 1;
+ 
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 43a02931847854..6fc29996ae2938 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -193,9 +193,9 @@ ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1)
+ $(let OUTPUT,$(OUTPUT)/,\
+ 	$(eval include ../../../build/Makefile.feature))
+ else
+-OUTPUT := $(OUTPUT)/
++override OUTPUT := $(OUTPUT)/
+ $(eval include ../../../build/Makefile.feature)
+-OUTPUT := $(patsubst %/,%,$(OUTPUT))
++override OUTPUT := $(patsubst %/,%,$(OUTPUT))
+ endif
+ endif
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
+index ca84726d5ac1b9..b72b966df77b90 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf_distill.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
+@@ -385,7 +385,7 @@ static void test_distilled_base_missing_err(void)
+ 		"[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ 	btf5 = btf__new_empty();
+ 	if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+-		return;
++		goto cleanup;
+ 	btf__add_int(btf5, "int", 4, BTF_INT_SIGNED);   /* [1] int */
+ 	VALIDATE_RAW_BTF(
+ 		btf5,
+@@ -478,7 +478,7 @@ static void test_distilled_base_multi_err2(void)
+ 		"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ 	btf5 = btf__new_empty();
+ 	if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+-		return;
++		goto cleanup;
+ 	btf__add_int(btf5, "int", 4, BTF_INT_SIGNED);   /* [1] int */
+ 	btf__add_int(btf5, "int", 4, BTF_INT_SIGNED);   /* [2] int */
+ 	VALIDATE_RAW_BTF(
+diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+index d50cbd8040d45f..e59af2aa660166 100644
+--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
++++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+@@ -171,6 +171,10 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
+ 		/* See also arch_adjust_kprobe_addr(). */
+ 		if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
+ 			entry_offset = 4;
++		if (skel->kconfig->CONFIG_PPC64 &&
++		    skel->kconfig->CONFIG_KPROBES_ON_FTRACE &&
++		    !skel->kconfig->CONFIG_PPC_FTRACE_OUT_OF_LINE)
++			entry_offset = 4;
+ 		err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset);
+ 		ASSERT_OK(err, "verify_perf_link_info");
+ 	} else {
+diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+index 21c5a37846adea..40f22454cf05b0 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
++++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+@@ -1496,8 +1496,8 @@ static void test_tailcall_bpf2bpf_hierarchy_3(void)
+ 	RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
+ }
+ 
+-/* test_tailcall_freplace checks that the attached freplace prog is OK to
+- * update the prog_array map.
++/* test_tailcall_freplace checks that the freplace prog fails to update the
++ * prog_array map, no matter whether the freplace prog attaches to its target.
+  */
+ static void test_tailcall_freplace(void)
+ {
+@@ -1505,7 +1505,7 @@ static void test_tailcall_freplace(void)
+ 	struct bpf_link *freplace_link = NULL;
+ 	struct bpf_program *freplace_prog;
+ 	struct tc_bpf2bpf *tc_skel = NULL;
+-	int prog_fd, map_fd;
++	int prog_fd, tc_prog_fd, map_fd;
+ 	char buff[128] = {};
+ 	int err, key;
+ 
+@@ -1523,9 +1523,10 @@ static void test_tailcall_freplace(void)
+ 	if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
+ 		goto out;
+ 
+-	prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
++	tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
+ 	freplace_prog = freplace_skel->progs.entry_freplace;
+-	err = bpf_program__set_attach_target(freplace_prog, prog_fd, "subprog");
++	err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd,
++					     "subprog_tc");
+ 	if (!ASSERT_OK(err, "set_attach_target"))
+ 		goto out;
+ 
+@@ -1533,27 +1534,116 @@ static void test_tailcall_freplace(void)
+ 	if (!ASSERT_OK(err, "tailcall_freplace__load"))
+ 		goto out;
+ 
+-	freplace_link = bpf_program__attach_freplace(freplace_prog, prog_fd,
+-						     "subprog");
++	map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
++	prog_fd = bpf_program__fd(freplace_prog);
++	key = 0;
++	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
++	ASSERT_ERR(err, "update jmp_table failure");
++
++	freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd,
++						     "subprog_tc");
+ 	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
+ 		goto out;
+ 
+-	map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
+-	prog_fd = bpf_program__fd(freplace_prog);
++	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
++	ASSERT_ERR(err, "update jmp_table failure");
++
++out:
++	bpf_link__destroy(freplace_link);
++	tailcall_freplace__destroy(freplace_skel);
++	tc_bpf2bpf__destroy(tc_skel);
++}
++
++/* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail
++ * callee prog with freplace prog or fails to update an extended prog to
++ * prog_array map.
++ */
++static void test_tailcall_bpf2bpf_freplace(void)
++{
++	struct tailcall_freplace *freplace_skel = NULL;
++	struct bpf_link *freplace_link = NULL;
++	struct tc_bpf2bpf *tc_skel = NULL;
++	char buff[128] = {};
++	int prog_fd, map_fd;
++	int err, key;
++
++	LIBBPF_OPTS(bpf_test_run_opts, topts,
++		    .data_in = buff,
++		    .data_size_in = sizeof(buff),
++		    .repeat = 1,
++	);
++
++	tc_skel = tc_bpf2bpf__open_and_load();
++	if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
++		goto out;
++
++	prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
++	freplace_skel = tailcall_freplace__open();
++	if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
++		goto out;
++
++	err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace,
++					     prog_fd, "subprog_tc");
++	if (!ASSERT_OK(err, "set_attach_target"))
++		goto out;
++
++	err = tailcall_freplace__load(freplace_skel);
++	if (!ASSERT_OK(err, "tailcall_freplace__load"))
++		goto out;
++
++	/* OK to attach then detach freplace prog. */
++
++	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
++						     prog_fd, "subprog_tc");
++	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
++		goto out;
++
++	err = bpf_link__destroy(freplace_link);
++	if (!ASSERT_OK(err, "destroy link"))
++		goto out;
++
++	/* OK to update prog_array map then delete element from the map. */
++
+ 	key = 0;
++	map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
+ 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ 	if (!ASSERT_OK(err, "update jmp_table"))
+ 		goto out;
+ 
+-	prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
+-	err = bpf_prog_test_run_opts(prog_fd, &topts);
+-	ASSERT_OK(err, "test_run");
+-	ASSERT_EQ(topts.retval, 34, "test_run retval");
++	err = bpf_map_delete_elem(map_fd, &key);
++	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
++		goto out;
++
++	/* Fail to attach a tail callee prog with freplace prog. */
++
++	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
++	if (!ASSERT_OK(err, "update jmp_table"))
++		goto out;
++
++	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
++						     prog_fd, "subprog_tc");
++	if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure"))
++		goto out;
++
++	err = bpf_map_delete_elem(map_fd, &key);
++	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
++		goto out;
++
++	/* Fail to update an extended prog to prog_array map. */
++
++	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
++						     prog_fd, "subprog_tc");
++	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
++		goto out;
++
++	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
++	if (!ASSERT_ERR(err, "update jmp_table failure"))
++		goto out;
+ 
+ out:
+ 	bpf_link__destroy(freplace_link);
+-	tc_bpf2bpf__destroy(tc_skel);
+ 	tailcall_freplace__destroy(freplace_skel);
++	tc_bpf2bpf__destroy(tc_skel);
+ }
+ 
+ void test_tailcalls(void)
+@@ -1606,4 +1696,6 @@ void test_tailcalls(void)
+ 	test_tailcall_bpf2bpf_hierarchy_3();
+ 	if (test__start_subtest("tailcall_freplace"))
+ 		test_tailcall_freplace();
++	if (test__start_subtest("tailcall_bpf2bpf_freplace"))
++		test_tailcall_bpf2bpf_freplace();
+ }
+diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
+index 79f5087dade224..fe6249d99b315b 100644
+--- a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
++++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
+@@ -5,10 +5,11 @@
+ #include "bpf_misc.h"
+ 
+ __noinline
+-int subprog(struct __sk_buff *skb)
++int subprog_tc(struct __sk_buff *skb)
+ {
+ 	int ret = 1;
+ 
++	__sink(skb);
+ 	__sink(ret);
+ 	/* let verifier know that 'subprog_tc' can change pointers to skb->data */
+ 	bpf_skb_change_proto(skb, 0, 0);
+@@ -18,7 +19,7 @@ int subprog(struct __sk_buff *skb)
+ SEC("tc")
+ int entry_tc(struct __sk_buff *skb)
+ {
+-	return subprog(skb);
++	return subprog_tc(skb);
+ }
+ 
+ char __license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
+index 6afa834756e9fd..fac33a14f2009c 100644
+--- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c
++++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
+@@ -6,13 +6,20 @@
+ #include <stdbool.h>
+ 
+ extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
++extern bool CONFIG_PPC_FTRACE_OUT_OF_LINE __kconfig __weak;
++extern bool CONFIG_KPROBES_ON_FTRACE __kconfig __weak;
++extern bool CONFIG_PPC64 __kconfig __weak;
+ 
+-/* This function is here to have CONFIG_X86_KERNEL_IBT
+- * used and added to object BTF.
++/* This function is here to have CONFIG_X86_KERNEL_IBT,
++ * CONFIG_PPC_FTRACE_OUT_OF_LINE, CONFIG_KPROBES_ON_FTRACE,
++ * CONFIG_PPC6 used and added to object BTF.
+  */
+ int unused(void)
+ {
+-	return CONFIG_X86_KERNEL_IBT ? 0 : 1;
++	return CONFIG_X86_KERNEL_IBT ||
++			CONFIG_PPC_FTRACE_OUT_OF_LINE ||
++			CONFIG_KPROBES_ON_FTRACE ||
++			CONFIG_PPC64 ? 0 : 1;
+ }
+ 
+ SEC("kprobe")
+diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+index 7989ec60845455..cb55a908bb0d70 100755
+--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+@@ -305,6 +305,7 @@ else
+ 	client_connect
+ 	verify_data
+ 	server_listen
++	wait_for_port ${port} ${netcat_opt}
+ fi
+ 
+ # serverside, use BPF for decap
+diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
+index 6f9956eed797f3..ad6c08dfd6c8cc 100644
+--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
++++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
+@@ -79,7 +79,7 @@ static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id)
+ 		.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ 		.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ 		.frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+-		.flags = XSK_UMEM__DEFAULT_FLAGS,
++		.flags = XDP_UMEM_TX_METADATA_LEN,
+ 		.tx_metadata_len = sizeof(struct xsk_tx_metadata),
+ 	};
+ 	__u32 idx = 0;
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+index 384cfa3d38a6cd..92c2f0376c081d 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+@@ -142,7 +142,7 @@ function pre_ethtool {
+ }
+ 
+ function check_table {
+-    local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1
++    local path=$NSIM_DEV_DFS/ports/$port/udp_ports/table$1
+     local -n expected=$2
+     local last=$3
+ 
+@@ -212,7 +212,7 @@ function check_tables {
+ }
+ 
+ function print_table {
+-    local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1
++    local path=$NSIM_DEV_DFS/ports/$port/udp_ports/table$1
+     read -a have < $path
+ 
+     tree $NSIM_DEV_DFS/
+@@ -641,7 +641,7 @@ for port in 0 1; do
+     NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ip link set dev $NSIM_NETDEV up
+ 
+-    echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
++    echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports/inject_error
+ 
+     msg="1 - create VxLANs v6"
+     exp0=( 0 0 0 0 )
+@@ -663,7 +663,7 @@ for port in 0 1; do
+     new_geneve gnv0 20000
+ 
+     msg="2 - destroy GENEVE"
+-    echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
++    echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports/inject_error
+     exp1=( `mke 20000 2` 0 0 0 )
+     del_dev gnv0
+ 
+@@ -764,7 +764,7 @@ for port in 0 1; do
+     msg="create VxLANs v4"
+     new_vxlan vxlan0 10000 $NSIM_NETDEV
+ 
+-    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
++    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
+     check_tables
+ 
+     msg="NIC device goes down"
+@@ -775,7 +775,7 @@ for port in 0 1; do
+     fi
+     check_tables
+ 
+-    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
++    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
+     check_tables
+ 
+     msg="NIC device goes up again"
+@@ -789,7 +789,7 @@ for port in 0 1; do
+     del_dev vxlan0
+     check_tables
+ 
+-    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
++    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
+     check_tables
+ 
+     msg="destroy NIC"
+@@ -896,7 +896,7 @@ msg="vacate VxLAN in overflow table"
+ exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` )
+ del_dev vxlan2
+ 
+-echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
++echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
+ check_tables
+ 
+ msg="tunnels destroyed 2"
+diff --git a/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc b/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
+index 35e8d47d607259..8a7ce647a60d1c 100644
+--- a/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
++++ b/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
+@@ -15,11 +15,11 @@ find_alternate_gid() {
+ 	tac /etc/group | grep -v ":$original_gid:" | head -1 | cut -d: -f3
+ }
+ 
+-mount_tracefs_with_options() {
++remount_tracefs_with_options() {
+ 	local mount_point="$1"
+ 	local options="$2"
+ 
+-	mount -t tracefs -o "$options" nodev "$mount_point"
++	mount -t tracefs -o "remount,$options" nodev "$mount_point"
+ 
+ 	setup
+ }
+@@ -81,7 +81,7 @@ test_gid_mount_option() {
+ 
+ 	# Unmount existing tracefs instance and mount with new GID
+ 	unmount_tracefs "$mount_point"
+-	mount_tracefs_with_options "$mount_point" "$new_options"
++	remount_tracefs_with_options "$mount_point" "$new_options"
+ 
+ 	check_gid "$mount_point" "$other_group"
+ 
+@@ -92,7 +92,7 @@ test_gid_mount_option() {
+ 
+ 	# Unmount and remount with the original GID
+ 	unmount_tracefs "$mount_point"
+-	mount_tracefs_with_options "$mount_point" "$mount_options"
++	remount_tracefs_with_options "$mount_point" "$mount_options"
+ 	check_gid "$mount_point" "$original_group"
+ }
+ 
+diff --git a/tools/testing/selftests/kselftest/ktap_helpers.sh b/tools/testing/selftests/kselftest/ktap_helpers.sh
+index 79a125eb24c2e8..14e7f3ec3f84c3 100644
+--- a/tools/testing/selftests/kselftest/ktap_helpers.sh
++++ b/tools/testing/selftests/kselftest/ktap_helpers.sh
+@@ -40,7 +40,7 @@ ktap_skip_all() {
+ __ktap_test() {
+ 	result="$1"
+ 	description="$2"
+-	directive="$3" # optional
++	directive="${3:-}" # optional
+ 
+ 	local directive_str=
+ 	[ ! -z "$directive" ] && directive_str="# $directive"
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index a5a72415e37b06..666c9fde76da9d 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -760,33 +760,33 @@
+ 		/* Report with actual signedness to avoid weird output. */ \
+ 		switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \
+ 		case 0: { \
+-			unsigned long long __exp_print = (uintptr_t)__exp; \
+-			unsigned long long __seen_print = (uintptr_t)__seen; \
+-			__TH_LOG("Expected %s (%llu) %s %s (%llu)", \
++			uintmax_t __exp_print = (uintmax_t)__exp; \
++			uintmax_t __seen_print = (uintmax_t)__seen; \
++			__TH_LOG("Expected %s (%ju) %s %s (%ju)", \
+ 				 _expected_str, __exp_print, #_t, \
+ 				 _seen_str, __seen_print); \
+ 			break; \
+ 			} \
+ 		case 1: { \
+-			unsigned long long __exp_print = (uintptr_t)__exp; \
+-			long long __seen_print = (intptr_t)__seen; \
+-			__TH_LOG("Expected %s (%llu) %s %s (%lld)", \
++			uintmax_t __exp_print = (uintmax_t)__exp; \
++			intmax_t  __seen_print = (intmax_t)__seen; \
++			__TH_LOG("Expected %s (%ju) %s %s (%jd)", \
+ 				 _expected_str, __exp_print, #_t, \
+ 				 _seen_str, __seen_print); \
+ 			break; \
+ 			} \
+ 		case 2: { \
+-			long long __exp_print = (intptr_t)__exp; \
+-			unsigned long long __seen_print = (uintptr_t)__seen; \
+-			__TH_LOG("Expected %s (%lld) %s %s (%llu)", \
++			intmax_t  __exp_print = (intmax_t)__exp; \
++			uintmax_t __seen_print = (uintmax_t)__seen; \
++			__TH_LOG("Expected %s (%jd) %s %s (%ju)", \
+ 				 _expected_str, __exp_print, #_t, \
+ 				 _seen_str, __seen_print); \
+ 			break; \
+ 			} \
+ 		case 3: { \
+-			long long __exp_print = (intptr_t)__exp; \
+-			long long __seen_print = (intptr_t)__seen; \
+-			__TH_LOG("Expected %s (%lld) %s %s (%lld)", \
++			intmax_t  __exp_print = (intmax_t)__exp; \
++			intmax_t  __seen_print = (intmax_t)__seen; \
++			__TH_LOG("Expected %s (%jd) %s %s (%jd)", \
+ 				 _expected_str, __exp_print, #_t, \
+ 				 _seen_str, __seen_print); \
+ 			break; \
+diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile
+index 348e2dbdb4e0b9..480f13e77fcc4b 100644
+--- a/tools/testing/selftests/landlock/Makefile
++++ b/tools/testing/selftests/landlock/Makefile
+@@ -13,11 +13,11 @@ TEST_GEN_PROGS := $(src_test:.c=)
+ TEST_GEN_PROGS_EXTENDED := true
+ 
+ # Short targets:
+-$(TEST_GEN_PROGS): LDLIBS += -lcap
++$(TEST_GEN_PROGS): LDLIBS += -lcap -lpthread
+ $(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
+ 
+ include ../lib.mk
+ 
+ # Targets with $(OUTPUT)/ prefix:
+-$(TEST_GEN_PROGS): LDLIBS += -lcap
++$(TEST_GEN_PROGS): LDLIBS += -lcap -lpthread
+ $(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
+diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
+index 6788762188feac..97d360eae4f69e 100644
+--- a/tools/testing/selftests/landlock/fs_test.c
++++ b/tools/testing/selftests/landlock/fs_test.c
+@@ -2003,8 +2003,7 @@ static void test_execute(struct __test_metadata *const _metadata, const int err,
+ 	ASSERT_EQ(1, WIFEXITED(status));
+ 	ASSERT_EQ(err ? 2 : 0, WEXITSTATUS(status))
+ 	{
+-		TH_LOG("Unexpected return code for \"%s\": %s", path,
+-		       strerror(errno));
++		TH_LOG("Unexpected return code for \"%s\"", path);
+ 	};
+ }
+ 
+diff --git a/tools/testing/selftests/net/lib/Makefile b/tools/testing/selftests/net/lib/Makefile
+index 82c3264b115eee..704b88b6a8d2a2 100644
+--- a/tools/testing/selftests/net/lib/Makefile
++++ b/tools/testing/selftests/net/lib/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g
++CFLAGS += -Wall -Wl,--no-as-needed -O2 -g
+ CFLAGS += -I../../../../../usr/include/ $(KHDR_INCLUDES)
+ # Additional include paths needed by kselftest.h
+ CFLAGS += -I../../
+diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
+index 5d796622e73099..580610c46e5aef 100644
+--- a/tools/testing/selftests/net/mptcp/Makefile
++++ b/tools/testing/selftests/net/mptcp/Makefile
+@@ -2,7 +2,7 @@
+ 
+ top_srcdir = ../../../../..
+ 
+-CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
++CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
+ 
+ TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
+ 	      simult_flows.sh mptcp_sockopt.sh userspace_pm.sh
+diff --git a/tools/testing/selftests/net/openvswitch/Makefile b/tools/testing/selftests/net/openvswitch/Makefile
+index 2f1508abc826b7..3fd1da2ec07d54 100644
+--- a/tools/testing/selftests/net/openvswitch/Makefile
++++ b/tools/testing/selftests/net/openvswitch/Makefile
+@@ -2,7 +2,7 @@
+ 
+ top_srcdir = ../../../../..
+ 
+-CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
++CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
+ 
+ TEST_PROGS := openvswitch.sh
+ 
+diff --git a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
+index 580fcac0a09f31..b71ef8a493ed1a 100644
+--- a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
++++ b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
+@@ -20,7 +20,7 @@ static int test_gettimeofday(void)
+ 		gettimeofday(&tv_end, NULL);
+ 	}
+ 
+-	timersub(&tv_start, &tv_end, &tv_diff);
++	timersub(&tv_end, &tv_start, &tv_diff);
+ 
+ 	printf("time = %.6f\n", tv_diff.tv_sec + (tv_diff.tv_usec) * 1e-6);
+ 
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index 5b9772cdf2651b..f6156790c3b4df 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -61,7 +61,6 @@ unsigned int rseq_size = -1U;
+ unsigned int rseq_flags;
+ 
+ static int rseq_ownership;
+-static int rseq_reg_success;	/* At least one rseq registration has succeded. */
+ 
+ /* Allocate a large area for the TLS. */
+ #define RSEQ_THREAD_AREA_ALLOC_SIZE	1024
+@@ -152,14 +151,27 @@ int rseq_register_current_thread(void)
+ 	}
+ 	rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG);
+ 	if (rc) {
+-		if (RSEQ_READ_ONCE(rseq_reg_success)) {
++		/*
++		 * After at least one thread has registered successfully
++		 * (rseq_size > 0), the registration of other threads should
++		 * never fail.
++		 */
++		if (RSEQ_READ_ONCE(rseq_size) > 0) {
+ 			/* Incoherent success/failure within process. */
+ 			abort();
+ 		}
+ 		return -1;
+ 	}
+ 	assert(rseq_current_cpu_raw() >= 0);
+-	RSEQ_WRITE_ONCE(rseq_reg_success, 1);
++
++	/*
++	 * The first thread to register sets the rseq_size to mimic the libc
++	 * behavior.
++	 */
++	if (RSEQ_READ_ONCE(rseq_size) == 0) {
++		RSEQ_WRITE_ONCE(rseq_size, get_rseq_kernel_feature_size());
++	}
++
+ 	return 0;
+ }
+ 
+@@ -235,12 +247,18 @@ void rseq_init(void)
+ 		return;
+ 	}
+ 	rseq_ownership = 1;
+-	if (!rseq_available()) {
+-		rseq_size = 0;
+-		return;
+-	}
++
++	/* Calculate the offset of the rseq area from the thread pointer. */
+ 	rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer();
++
++	/* rseq flags are deprecated, always set to 0. */
+ 	rseq_flags = 0;
++
++	/*
++	 * Set the size to 0 until at least one thread registers to mimic the
++	 * libc behavior.
++	 */
++	rseq_size = 0;
+ }
+ 
+ static __attribute__((destructor))
+diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
+index 4e217b620e0c7a..062d10925a1011 100644
+--- a/tools/testing/selftests/rseq/rseq.h
++++ b/tools/testing/selftests/rseq/rseq.h
+@@ -60,7 +60,14 @@
+ extern ptrdiff_t rseq_offset;
+ 
+ /*
+- * Size of the registered rseq area. 0 if the registration was
++ * The rseq ABI is composed of extensible feature fields. The extensions
++ * are done by appending additional fields at the end of the structure.
++ * The rseq_size defines the size of the active feature set which can be
++ * used by the application for the current rseq registration. Features
++ * starting at offset >= rseq_size are inactive and should not be used.
++ *
++ * The rseq_size is the intersection between the available allocation
++ * size for the rseq area and the feature size supported by the kernel.
+  * unsuccessful.
+  */
+ extern unsigned int rseq_size;
+diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c
+index c5264594064c85..83faa4e354e389 100644
+--- a/tools/testing/selftests/timers/clocksource-switch.c
++++ b/tools/testing/selftests/timers/clocksource-switch.c
+@@ -156,8 +156,8 @@ int main(int argc, char **argv)
+ 	/* Check everything is sane before we start switching asynchronously */
+ 	if (do_sanity_check) {
+ 		for (i = 0; i < count; i++) {
+-			printf("Validating clocksource %s\n",
+-				clocksource_list[i]);
++			ksft_print_msg("Validating clocksource %s\n",
++					clocksource_list[i]);
+ 			if (change_clocksource(clocksource_list[i])) {
+ 				status = -1;
+ 				goto out;
+@@ -169,7 +169,7 @@ int main(int argc, char **argv)
+ 		}
+ 	}
+ 
+-	printf("Running Asynchronous Switching Tests...\n");
++	ksft_print_msg("Running Asynchronous Switching Tests...\n");
+ 	pid = fork();
+ 	if (!pid)
+ 		return run_tests(runtime);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-02-01 23:07 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-02-01 23:07 UTC (permalink / raw
  To: gentoo-commits

commit:     05ce594d8f20da5e0040106fb43894994a64fd0b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb  1 23:06:45 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb  1 23:06:45 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=05ce594d

Linux patch 6.12.12

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1011_linux-6.12.12.patch | 1388 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1392 insertions(+)

diff --git a/0000_README b/0000_README
index 9c94906b..17858f75 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-6.12.11.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.11
 
+Patch:  1011_linux-6.12.12.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.12
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1011_linux-6.12.12.patch b/1011_linux-6.12.12.patch
new file mode 100644
index 00000000..921cacc4
--- /dev/null
+++ b/1011_linux-6.12.12.patch
@@ -0,0 +1,1388 @@
+diff --git a/Makefile b/Makefile
+index 7cf8f11975f89c..9e6246e733eb94 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+index bf636b28e3e16e..5bb8b78bf250a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+@@ -63,7 +63,8 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+ 
+ bool should_use_dmub_lock(struct dc_link *link)
+ {
+-	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
++	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
++	    link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ 		return true;
+ 
+ 	if (link->replay_settings.replay_feature_enabled)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+index 3ea54fd52e4683..e2a3764d9d181a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+@@ -578,8 +578,8 @@ static void CalculateBytePerPixelAndBlockSizes(
+ {
+ 	*BytePerPixelDETY = 0;
+ 	*BytePerPixelDETC = 0;
+-	*BytePerPixelY = 0;
+-	*BytePerPixelC = 0;
++	*BytePerPixelY = 1;
++	*BytePerPixelC = 1;
+ 
+ 	if (SourcePixelFormat == dml2_444_64) {
+ 		*BytePerPixelDETY = 8;
+diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
+index fc35f47e2849ed..ca7f43c8d6f1b3 100644
+--- a/drivers/gpu/drm/drm_connector.c
++++ b/drivers/gpu/drm/drm_connector.c
+@@ -507,6 +507,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
+ 	if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
+ 		return -EINVAL;
+ 
++	if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
++		return -EINVAL;
++
+ 	if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
+index da203045df9bec..72b6a119412fa7 100644
+--- a/drivers/gpu/drm/v3d/v3d_irq.c
++++ b/drivers/gpu/drm/v3d/v3d_irq.c
+@@ -107,8 +107,10 @@ v3d_irq(int irq, void *arg)
+ 
+ 		v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
+ 		trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+-		dma_fence_signal(&fence->base);
++
+ 		v3d->bin_job = NULL;
++		dma_fence_signal(&fence->base);
++
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -118,8 +120,10 @@ v3d_irq(int irq, void *arg)
+ 
+ 		v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
+ 		trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+-		dma_fence_signal(&fence->base);
++
+ 		v3d->render_job = NULL;
++		dma_fence_signal(&fence->base);
++
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -129,8 +133,10 @@ v3d_irq(int irq, void *arg)
+ 
+ 		v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
+ 		trace_v3d_csd_irq(&v3d->drm, fence->seqno);
+-		dma_fence_signal(&fence->base);
++
+ 		v3d->csd_job = NULL;
++		dma_fence_signal(&fence->base);
++
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -167,8 +173,10 @@ v3d_hub_irq(int irq, void *arg)
+ 
+ 		v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
+ 		trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+-		dma_fence_signal(&fence->base);
++
+ 		v3d->tfu_job = NULL;
++		dma_fence_signal(&fence->base);
++
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 0f23be98c56e22..ceb3b1a72e235c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -506,7 +506,6 @@
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+ 
+ #define I2C_VENDOR_ID_GOODIX		0x27c6
+-#define I2C_DEVICE_ID_GOODIX_01E0	0x01e0
+ #define I2C_DEVICE_ID_GOODIX_01E8	0x01e8
+ #define I2C_DEVICE_ID_GOODIX_01E9	0x01e9
+ #define I2C_DEVICE_ID_GOODIX_01F0	0x01f0
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index e936019d21fecf..d1b7ccfb3e051f 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1452,8 +1452,7 @@ static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ {
+ 	if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
+ 	    (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
+-	     hdev->product == I2C_DEVICE_ID_GOODIX_01E9 ||
+-		 hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) {
++	     hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
+ 		if (rdesc[607] == 0x15) {
+ 			rdesc[607] = 0x25;
+ 			dev_info(
+@@ -2079,10 +2078,7 @@ static const struct hid_device_id mt_devices[] = {
+ 		     I2C_DEVICE_ID_GOODIX_01E8) },
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ 	  HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
+-		     I2C_DEVICE_ID_GOODIX_01E9) },
+-	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+-	  HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
+-		     I2C_DEVICE_ID_GOODIX_01E0) },
++		     I2C_DEVICE_ID_GOODIX_01E8) },
+ 
+ 	/* GoodTouch panels */
+ 	{ .driver_data = MT_CLS_NSMU,
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 9843b52bd017a0..34428349fa3118 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -1370,17 +1370,6 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
+ 	if (!name)
+ 		return -ENOMEM;
+ 
+-	if (!read_only) {
+-		led->trigger.name = name;
+-		error = devm_led_trigger_register(dev, &led->trigger);
+-		if (error) {
+-			hid_err(wacom->hdev,
+-				"failed to register LED trigger %s: %d\n",
+-				led->cdev.name, error);
+-			return error;
+-		}
+-	}
+-
+ 	led->group = group;
+ 	led->id = id;
+ 	led->wacom = wacom;
+@@ -1397,6 +1386,19 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
+ 		led->cdev.brightness_set = wacom_led_readonly_brightness_set;
+ 	}
+ 
++	if (!read_only) {
++		led->trigger.name = name;
++		if (id == wacom->led.groups[group].select)
++			led->trigger.brightness = wacom_leds_brightness_get(led);
++		error = devm_led_trigger_register(dev, &led->trigger);
++		if (error) {
++			hid_err(wacom->hdev,
++				"failed to register LED trigger %s: %d\n",
++				led->cdev.name, error);
++			return error;
++		}
++	}
++
+ 	error = devm_led_classdev_register(dev, &led->cdev);
+ 	if (error) {
+ 		hid_err(wacom->hdev,
+diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
+index 2a4ec55ddb47ed..291d91f6864676 100644
+--- a/drivers/hwmon/drivetemp.c
++++ b/drivers/hwmon/drivetemp.c
+@@ -194,7 +194,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
+ 	scsi_cmd[14] = ata_command;
+ 
+ 	err = scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
+-			       ATA_SECT_SIZE, HZ, 5, NULL);
++			       ATA_SECT_SIZE, 10 * HZ, 5, NULL);
+ 	if (err > 0)
+ 		err = -EIO;
+ 	return err;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 22ea58bf76cb5c..77fddab9d9502e 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -150,6 +150,7 @@ static const struct xpad_device {
+ 	{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ 	{ 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 },
+ 	{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
++	{ 0x045e, 0x02a9, "Xbox 360 Wireless Receiver (Unofficial)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ 	{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
+ 	{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
+ 	{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE },
+@@ -305,6 +306,7 @@ static const struct xpad_device {
+ 	{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ 	{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
+ 	{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
++	{ 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+@@ -373,16 +375,19 @@ static const struct xpad_device {
+ 	{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
+ 	{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
+ 	{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+-	{ 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
++	{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ 	{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
++	{ 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
+ 	{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
++	{ 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
++	{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
+ 	{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
+ 	{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
+ 	{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+@@ -514,6 +519,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x1689),		/* Razer Onza */
+ 	XPAD_XBOX360_VENDOR(0x17ef),		/* Lenovo */
+ 	XPAD_XBOX360_VENDOR(0x1949),		/* Amazon controllers */
++	XPAD_XBOX360_VENDOR(0x1a86),		/* QH Electronics */
+ 	XPAD_XBOX360_VENDOR(0x1bad),		/* Harmonix Rock Band guitar and drums */
+ 	XPAD_XBOX360_VENDOR(0x20d6),		/* PowerA controllers */
+ 	XPAD_XBOXONE_VENDOR(0x20d6),		/* PowerA controllers */
+@@ -530,6 +536,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x2f24),		/* GameSir controllers */
+ 	XPAD_XBOX360_VENDOR(0x31e3),		/* Wooting Keyboards */
+ 	XPAD_XBOX360_VENDOR(0x3285),		/* Nacon GC-100 */
++	XPAD_XBOXONE_VENDOR(0x3285),		/* Nacon Evol-X */
+ 	XPAD_XBOX360_VENDOR(0x3537),		/* GameSir Controllers */
+ 	XPAD_XBOXONE_VENDOR(0x3537),		/* GameSir Controllers */
+ 	{ }
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index 5855d4fc6e6a4d..f7b08b359c9c67 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -89,7 +89,7 @@ static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = {
+ 	  0, 46, 45, 32, 18,  5,  4, 95,  0, 57, 47, 33, 20, 19,  6,183,
+ 	  0, 49, 48, 35, 34, 21,  7,184,  0,  0, 50, 36, 22,  8,  9,185,
+ 	  0, 51, 37, 23, 24, 11, 10,  0,  0, 52, 53, 38, 39, 25, 12,  0,
+-	  0, 89, 40,  0, 26, 13,  0,  0, 58, 54, 28, 27,  0, 43,  0, 85,
++	  0, 89, 40,  0, 26, 13,  0,193, 58, 54, 28, 27,  0, 43,  0, 85,
+ 	  0, 86, 91, 90, 92,  0, 14, 94,  0, 79,124, 75, 71,121,  0,  0,
+ 	 82, 83, 80, 76, 77, 72,  1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
+ 
+diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
+index bb92fd85e975f8..0b431215202434 100644
+--- a/drivers/irqchip/irq-sunxi-nmi.c
++++ b/drivers/irqchip/irq-sunxi-nmi.c
+@@ -186,7 +186,8 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+ 	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
+ 	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
+ 	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
+-	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
++	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
++						  IRQCHIP_SKIP_SET_WAKE;
+ 	gc->chip_types[0].regs.ack		= reg_offs->pend;
+ 	gc->chip_types[0].regs.mask		= reg_offs->enable;
+ 	gc->chip_types[0].regs.type		= reg_offs->ctrl;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+index f95898f68d68a5..4ce0c05c512910 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+@@ -8147,6 +8147,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8186, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+@@ -8157,12 +8159,18 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x11f2, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9043, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
+@@ -8179,6 +8187,10 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3358, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3359, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
+@@ -8193,6 +8205,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x9846, 0x9041, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
+@@ -8218,6 +8232,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
+@@ -8226,6 +8242,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0077, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
+@@ -8248,6 +8266,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330d, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
+diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
+index fa39611071b32f..cd310b26b50c81 100644
+--- a/drivers/of/unittest-data/tests-platform.dtsi
++++ b/drivers/of/unittest-data/tests-platform.dtsi
+@@ -34,5 +34,18 @@ dev@100 {
+ 				};
+ 			};
+ 		};
++
++		platform-tests-2 {
++			// No #address-cells or #size-cells
++			node {
++				#address-cells = <1>;
++				#size-cells = <1>;
++
++				test-device@100 {
++					compatible = "test-sub-device";
++					reg = <0x100 1>;
++				};
++			};
++		};
+ 	};
+ };
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index daf9a2dddd7e0d..576e9beefc7c8f 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -1342,6 +1342,7 @@ static void __init of_unittest_bus_3cell_ranges(void)
+ static void __init of_unittest_reg(void)
+ {
+ 	struct device_node *np;
++	struct resource res;
+ 	int ret;
+ 	u64 addr, size;
+ 
+@@ -1358,6 +1359,19 @@ static void __init of_unittest_reg(void)
+ 		np, addr);
+ 
+ 	of_node_put(np);
++
++	np = of_find_node_by_path("/testcase-data/platform-tests-2/node/test-device@100");
++	if (!np) {
++		pr_err("missing testcase data\n");
++		return;
++	}
++
++	ret = of_address_to_resource(np, 0, &res);
++	unittest(ret == -EINVAL, "of_address_to_resource(%pOF) expected error on untranslatable address\n",
++		 np);
++
++	of_node_put(np);
++
+ }
+ 
+ struct of_unittest_expected_res {
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index fde7de3b1e5538..9b47f91c5b9720 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -4104,7 +4104,7 @@ iscsi_if_rx(struct sk_buff *skb)
+ 		}
+ 		do {
+ 			/*
+-			 * special case for GET_STATS:
++			 * special case for GET_STATS, GET_CHAP and GET_HOST_STATS:
+ 			 * on success - sending reply and stats from
+ 			 * inside of if_recv_msg(),
+ 			 * on error - fall through.
+@@ -4113,6 +4113,8 @@ iscsi_if_rx(struct sk_buff *skb)
+ 				break;
+ 			if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
+ 				break;
++			if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err)
++				break;
+ 			err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
+ 						  ev, sizeof(*ev));
+ 			if (err == -EAGAIN && --retries < 0) {
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index d0b55c1fa908a5..b3c588b102d900 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -171,6 +171,12 @@ do {								\
+ 		dev_warn(&(dev)->device, fmt, ##__VA_ARGS__);	\
+ } while (0)
+ 
++#define storvsc_log_ratelimited(dev, level, fmt, ...)				\
++do {										\
++	if (do_logging(level))							\
++		dev_warn_ratelimited(&(dev)->device, fmt, ##__VA_ARGS__);	\
++} while (0)
++
+ struct vmscsi_request {
+ 	u16 length;
+ 	u8 srb_status;
+@@ -1177,7 +1183,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
+ 		int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
+ 			STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
+ 
+-		storvsc_log(device, loglevel,
++		storvsc_log_ratelimited(device, loglevel,
+ 			"tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ 			scsi_cmd_to_rq(request->cmd)->tag,
+ 			stor_pkt->vm_srb.cdb[0],
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index bc143a86c2ddf0..53d9fc41acc522 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -1420,10 +1420,6 @@ void gserial_disconnect(struct gserial *gser)
+ 	/* REVISIT as above: how best to track this? */
+ 	port->port_line_coding = gser->port_line_coding;
+ 
+-	/* disable endpoints, aborting down any active I/O */
+-	usb_ep_disable(gser->out);
+-	usb_ep_disable(gser->in);
+-
+ 	port->port_usb = NULL;
+ 	gser->ioport = NULL;
+ 	if (port->port.count > 0) {
+@@ -1435,6 +1431,10 @@ void gserial_disconnect(struct gserial *gser)
+ 	spin_unlock(&port->port_lock);
+ 	spin_unlock_irqrestore(&serial_port_lock, flags);
+ 
++	/* disable endpoints, aborting down any active I/O */
++	usb_ep_disable(gser->out);
++	usb_ep_disable(gser->in);
++
+ 	/* finally, free any unused/unusable I/O buffers */
+ 	spin_lock_irqsave(&port->port_lock, flags);
+ 	if (port->port.count == 0)
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index a317bdbd00ad5c..72fe83a6c97801 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -503,7 +503,7 @@ static void qt2_process_read_urb(struct urb *urb)
+ 
+ 				newport = *(ch + 3);
+ 
+-				if (newport > serial->num_ports) {
++				if (newport >= serial->num_ports) {
+ 					dev_err(&port->dev,
+ 						"%s - port change to invalid port: %i\n",
+ 						__func__, newport);
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index e53757d1d0958a..3bf1043cd7957c 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -388,6 +388,11 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
+ {
+ 	unsigned int done = 0;
+ 
++	if (off >= reg->size)
++		return -EINVAL;
++
++	count = min_t(size_t, count, reg->size - off);
++
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+@@ -467,6 +472,11 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
+ {
+ 	unsigned int done = 0;
+ 
++	if (off >= reg->size)
++		return -EINVAL;
++
++	count = min_t(size_t, count, reg->size - off);
++
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index f7dd64856c9bff..ac6f50167076d8 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -251,6 +251,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
+ 		error = filemap_fdatawait(inode->i_mapping);
+ 		if (error)
+ 			goto out;
++		truncate_inode_pages(inode->i_mapping, 0);
+ 		if (new_flags & GFS2_DIF_JDATA)
+ 			gfs2_ordered_del_inode(ip);
+ 	}
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 46966fd8bcf9f0..b0f262223b5351 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -241,9 +241,16 @@ const struct inode_operations simple_dir_inode_operations = {
+ };
+ EXPORT_SYMBOL(simple_dir_inode_operations);
+ 
+-/* 0 is '.', 1 is '..', so always start with offset 2 or more */
++/* simple_offset_add() never assigns these to a dentry */
+ enum {
+-	DIR_OFFSET_MIN	= 2,
++	DIR_OFFSET_FIRST	= 2,		/* Find first real entry */
++	DIR_OFFSET_EOD		= S32_MAX,
++};
++
++/* simple_offset_add() allocation range */
++enum {
++	DIR_OFFSET_MIN		= DIR_OFFSET_FIRST + 1,
++	DIR_OFFSET_MAX		= DIR_OFFSET_EOD - 1,
+ };
+ 
+ static void offset_set(struct dentry *dentry, long offset)
+@@ -287,9 +294,10 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
+ 		return -EBUSY;
+ 
+ 	ret = mtree_alloc_cyclic(&octx->mt, &offset, dentry, DIR_OFFSET_MIN,
+-				 LONG_MAX, &octx->next_offset, GFP_KERNEL);
+-	if (ret < 0)
+-		return ret;
++				 DIR_OFFSET_MAX, &octx->next_offset,
++				 GFP_KERNEL);
++	if (unlikely(ret < 0))
++		return ret == -EBUSY ? -ENOSPC : ret;
+ 
+ 	offset_set(dentry, offset);
+ 	return 0;
+@@ -325,38 +333,6 @@ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
+ 	offset_set(dentry, 0);
+ }
+ 
+-/**
+- * simple_offset_empty - Check if a dentry can be unlinked
+- * @dentry: dentry to be tested
+- *
+- * Returns 0 if @dentry is a non-empty directory; otherwise returns 1.
+- */
+-int simple_offset_empty(struct dentry *dentry)
+-{
+-	struct inode *inode = d_inode(dentry);
+-	struct offset_ctx *octx;
+-	struct dentry *child;
+-	unsigned long index;
+-	int ret = 1;
+-
+-	if (!inode || !S_ISDIR(inode->i_mode))
+-		return ret;
+-
+-	index = DIR_OFFSET_MIN;
+-	octx = inode->i_op->get_offset_ctx(inode);
+-	mt_for_each(&octx->mt, child, index, LONG_MAX) {
+-		spin_lock(&child->d_lock);
+-		if (simple_positive(child)) {
+-			spin_unlock(&child->d_lock);
+-			ret = 0;
+-			break;
+-		}
+-		spin_unlock(&child->d_lock);
+-	}
+-
+-	return ret;
+-}
+-
+ /**
+  * simple_offset_rename - handle directory offsets for rename
+  * @old_dir: parent directory of source entry
+@@ -450,14 +426,6 @@ void simple_offset_destroy(struct offset_ctx *octx)
+ 	mtree_destroy(&octx->mt);
+ }
+ 
+-static int offset_dir_open(struct inode *inode, struct file *file)
+-{
+-	struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
+-
+-	file->private_data = (void *)ctx->next_offset;
+-	return 0;
+-}
+-
+ /**
+  * offset_dir_llseek - Advance the read position of a directory descriptor
+  * @file: an open directory whose position is to be updated
+@@ -471,9 +439,6 @@ static int offset_dir_open(struct inode *inode, struct file *file)
+  */
+ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
+ {
+-	struct inode *inode = file->f_inode;
+-	struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
+-
+ 	switch (whence) {
+ 	case SEEK_CUR:
+ 		offset += file->f_pos;
+@@ -486,62 +451,89 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
+ 		return -EINVAL;
+ 	}
+ 
+-	/* In this case, ->private_data is protected by f_pos_lock */
+-	if (!offset)
+-		file->private_data = (void *)ctx->next_offset;
+ 	return vfs_setpos(file, offset, LONG_MAX);
+ }
+ 
+-static struct dentry *offset_find_next(struct offset_ctx *octx, loff_t offset)
++static struct dentry *find_positive_dentry(struct dentry *parent,
++					   struct dentry *dentry,
++					   bool next)
+ {
+-	MA_STATE(mas, &octx->mt, offset, offset);
++	struct dentry *found = NULL;
++
++	spin_lock(&parent->d_lock);
++	if (next)
++		dentry = d_next_sibling(dentry);
++	else if (!dentry)
++		dentry = d_first_child(parent);
++	hlist_for_each_entry_from(dentry, d_sib) {
++		if (!simple_positive(dentry))
++			continue;
++		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++		if (simple_positive(dentry))
++			found = dget_dlock(dentry);
++		spin_unlock(&dentry->d_lock);
++		if (likely(found))
++			break;
++	}
++	spin_unlock(&parent->d_lock);
++	return found;
++}
++
++static noinline_for_stack struct dentry *
++offset_dir_lookup(struct dentry *parent, loff_t offset)
++{
++	struct inode *inode = d_inode(parent);
++	struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
+ 	struct dentry *child, *found = NULL;
+ 
+-	rcu_read_lock();
+-	child = mas_find(&mas, LONG_MAX);
+-	if (!child)
+-		goto out;
+-	spin_lock(&child->d_lock);
+-	if (simple_positive(child))
+-		found = dget_dlock(child);
+-	spin_unlock(&child->d_lock);
+-out:
+-	rcu_read_unlock();
++	MA_STATE(mas, &octx->mt, offset, offset);
++
++	if (offset == DIR_OFFSET_FIRST)
++		found = find_positive_dentry(parent, NULL, false);
++	else {
++		rcu_read_lock();
++		child = mas_find(&mas, DIR_OFFSET_MAX);
++		found = find_positive_dentry(parent, child, false);
++		rcu_read_unlock();
++	}
+ 	return found;
+ }
+ 
+ static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
+ {
+ 	struct inode *inode = d_inode(dentry);
+-	long offset = dentry2offset(dentry);
+ 
+-	return ctx->actor(ctx, dentry->d_name.name, dentry->d_name.len, offset,
+-			  inode->i_ino, fs_umode_to_dtype(inode->i_mode));
++	return dir_emit(ctx, dentry->d_name.name, dentry->d_name.len,
++			inode->i_ino, fs_umode_to_dtype(inode->i_mode));
+ }
+ 
+-static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, long last_index)
++static void offset_iterate_dir(struct file *file, struct dir_context *ctx)
+ {
+-	struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
++	struct dentry *dir = file->f_path.dentry;
+ 	struct dentry *dentry;
+ 
++	dentry = offset_dir_lookup(dir, ctx->pos);
++	if (!dentry)
++		goto out_eod;
+ 	while (true) {
+-		dentry = offset_find_next(octx, ctx->pos);
+-		if (!dentry)
+-			return;
+-
+-		if (dentry2offset(dentry) >= last_index) {
+-			dput(dentry);
+-			return;
+-		}
++		struct dentry *next;
+ 
+-		if (!offset_dir_emit(ctx, dentry)) {
+-			dput(dentry);
+-			return;
+-		}
++		ctx->pos = dentry2offset(dentry);
++		if (!offset_dir_emit(ctx, dentry))
++			break;
+ 
+-		ctx->pos = dentry2offset(dentry) + 1;
++		next = find_positive_dentry(dir, dentry, true);
+ 		dput(dentry);
++
++		if (!next)
++			goto out_eod;
++		dentry = next;
+ 	}
++	dput(dentry);
++	return;
++
++out_eod:
++	ctx->pos = DIR_OFFSET_EOD;
+ }
+ 
+ /**
+@@ -561,6 +553,8 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon
+  *
+  * On return, @ctx->pos contains an offset that will read the next entry
+  * in this directory when offset_readdir() is called again with @ctx.
++ * Caller places this value in the d_off field of the last entry in the
++ * user's buffer.
+  *
+  * Return values:
+  *   %0 - Complete
+@@ -568,19 +562,17 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon
+ static int offset_readdir(struct file *file, struct dir_context *ctx)
+ {
+ 	struct dentry *dir = file->f_path.dentry;
+-	long last_index = (long)file->private_data;
+ 
+ 	lockdep_assert_held(&d_inode(dir)->i_rwsem);
+ 
+ 	if (!dir_emit_dots(file, ctx))
+ 		return 0;
+-
+-	offset_iterate_dir(d_inode(dir), ctx, last_index);
++	if (ctx->pos != DIR_OFFSET_EOD)
++		offset_iterate_dir(file, ctx);
+ 	return 0;
+ }
+ 
+ const struct file_operations simple_offset_dir_operations = {
+-	.open		= offset_dir_open,
+ 	.llseek		= offset_dir_llseek,
+ 	.iterate_shared	= offset_readdir,
+ 	.read		= generic_read_dir,
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index a55f0044d30bde..b935c1a62d10cf 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -176,27 +176,27 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 			    struct kvec *out_iov, int *out_buftype, struct dentry *dentry)
+ {
+ 
+-	struct reparse_data_buffer *rbuf;
++	struct smb2_query_info_rsp *qi_rsp = NULL;
+ 	struct smb2_compound_vars *vars = NULL;
+-	struct kvec *rsp_iov, *iov;
+-	struct smb_rqst *rqst;
+-	int rc;
+-	__le16 *utf16_path = NULL;
+ 	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_fid fid;
++	struct cifs_open_info_data *idata;
+ 	struct cifs_ses *ses = tcon->ses;
++	struct reparse_data_buffer *rbuf;
+ 	struct TCP_Server_Info *server;
+-	int num_rqst = 0, i;
+ 	int resp_buftype[MAX_COMPOUND];
+-	struct smb2_query_info_rsp *qi_rsp = NULL;
+-	struct cifs_open_info_data *idata;
++	int retries = 0, cur_sleep = 1;
++	__u8 delete_pending[8] = {1,};
++	struct kvec *rsp_iov, *iov;
+ 	struct inode *inode = NULL;
+-	int flags = 0;
+-	__u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
++	__le16 *utf16_path = NULL;
++	struct smb_rqst *rqst;
+ 	unsigned int size[2];
+-	void *data[2];
++	struct cifs_fid fid;
++	int num_rqst = 0, i;
+ 	unsigned int len;
+-	int retries = 0, cur_sleep = 1;
++	int tmp_rc, rc;
++	int flags = 0;
++	void *data[2];
+ 
+ replay_again:
+ 	/* reinitialize for possible replay */
+@@ -637,7 +637,14 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		tcon->need_reconnect = true;
+ 	}
+ 
++	tmp_rc = rc;
+ 	for (i = 0; i < num_cmds; i++) {
++		char *buf = rsp_iov[i + i].iov_base;
++
++		if (buf && resp_buftype[i + 1] != CIFS_NO_BUFFER)
++			rc = server->ops->map_error(buf, false);
++		else
++			rc = tmp_rc;
+ 		switch (cmds[i]) {
+ 		case SMB2_OP_QUERY_INFO:
+ 			idata = in_iov[i].iov_base;
+@@ -803,6 +810,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		}
+ 	}
+ 	SMB2_close_free(&rqst[num_rqst]);
++	rc = tmp_rc;
+ 
+ 	num_cmds += 2;
+ 	if (out_iov && out_buftype) {
+@@ -858,22 +866,52 @@ static int parse_create_response(struct cifs_open_info_data *data,
+ 	return rc;
+ }
+ 
++/* Check only if SMB2_OP_QUERY_WSL_EA command failed in the compound chain */
++static bool ea_unsupported(int *cmds, int num_cmds,
++			   struct kvec *out_iov, int *out_buftype)
++{
++	int i;
++
++	if (cmds[num_cmds - 1] != SMB2_OP_QUERY_WSL_EA)
++		return false;
++
++	for (i = 1; i < num_cmds - 1; i++) {
++		struct smb2_hdr *hdr = out_iov[i].iov_base;
++
++		if (out_buftype[i] == CIFS_NO_BUFFER || !hdr ||
++		    hdr->Status != STATUS_SUCCESS)
++			return false;
++	}
++	return true;
++}
++
++static inline void free_rsp_iov(struct kvec *iovs, int *buftype, int count)
++{
++	int i;
++
++	for (i = 0; i < count; i++) {
++		free_rsp_buf(buftype[i], iovs[i].iov_base);
++		memset(&iovs[i], 0, sizeof(*iovs));
++		buftype[i] = CIFS_NO_BUFFER;
++	}
++}
++
+ int smb2_query_path_info(const unsigned int xid,
+ 			 struct cifs_tcon *tcon,
+ 			 struct cifs_sb_info *cifs_sb,
+ 			 const char *full_path,
+ 			 struct cifs_open_info_data *data)
+ {
++	struct kvec in_iov[3], out_iov[5] = {};
++	struct cached_fid *cfid = NULL;
+ 	struct cifs_open_parms oparms;
+-	__u32 create_options = 0;
+ 	struct cifsFileInfo *cfile;
+-	struct cached_fid *cfid = NULL;
++	__u32 create_options = 0;
++	int out_buftype[5] = {};
+ 	struct smb2_hdr *hdr;
+-	struct kvec in_iov[3], out_iov[3] = {};
+-	int out_buftype[3] = {};
++	int num_cmds = 0;
+ 	int cmds[3];
+ 	bool islink;
+-	int i, num_cmds = 0;
+ 	int rc, rc2;
+ 
+ 	data->adjust_tz = false;
+@@ -943,14 +981,14 @@ int smb2_query_path_info(const unsigned int xid,
+ 		if (rc || !data->reparse_point)
+ 			goto out;
+ 
+-		if (!tcon->posix_extensions)
+-			cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
+ 		/*
+ 		 * Skip SMB2_OP_GET_REPARSE if symlink already parsed in create
+ 		 * response.
+ 		 */
+ 		if (data->reparse.tag != IO_REPARSE_TAG_SYMLINK)
+ 			cmds[num_cmds++] = SMB2_OP_GET_REPARSE;
++		if (!tcon->posix_extensions)
++			cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
+ 
+ 		oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
+ 				     FILE_READ_ATTRIBUTES |
+@@ -958,9 +996,18 @@ int smb2_query_path_info(const unsigned int xid,
+ 				     FILE_OPEN, create_options |
+ 				     OPEN_REPARSE_POINT, ACL_NO_MODE);
+ 		cifs_get_readable_path(tcon, full_path, &cfile);
++		free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov));
+ 		rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ 				      &oparms, in_iov, cmds, num_cmds,
+-				      cfile, NULL, NULL, NULL);
++				      cfile, out_iov, out_buftype, NULL);
++		if (rc && ea_unsupported(cmds, num_cmds,
++					 out_iov, out_buftype)) {
++			if (data->reparse.tag != IO_REPARSE_TAG_LX_BLK &&
++			    data->reparse.tag != IO_REPARSE_TAG_LX_CHR)
++				rc = 0;
++			else
++				rc = -EOPNOTSUPP;
++		}
+ 		break;
+ 	case -EREMOTE:
+ 		break;
+@@ -978,8 +1025,7 @@ int smb2_query_path_info(const unsigned int xid,
+ 	}
+ 
+ out:
+-	for (i = 0; i < ARRAY_SIZE(out_buftype); i++)
+-		free_rsp_buf(out_buftype[i], out_iov[i].iov_base);
++	free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov));
+ 	return rc;
+ }
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 4b5cad44a12683..fc3de42d9d764f 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3434,7 +3434,6 @@ struct offset_ctx {
+ void simple_offset_init(struct offset_ctx *octx);
+ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
+ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
+-int simple_offset_empty(struct dentry *dentry);
+ int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 			 struct inode *new_dir, struct dentry *new_dentry);
+ int simple_offset_rename_exchange(struct inode *old_dir,
+diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
+index 709ad84809e1ea..8934c7da47f4c3 100644
+--- a/include/linux/seccomp.h
++++ b/include/linux/seccomp.h
+@@ -50,10 +50,10 @@ struct seccomp_data;
+ 
+ #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+ static inline int secure_computing(void) { return 0; }
+-static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
+ #else
+ static inline void secure_computing_strict(int this_syscall) { return; }
+ #endif
++static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
+ 
+ static inline long prctl_get_seccomp(void)
+ {
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 6f3b6de230bd2b..a67bae350416b3 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -1153,6 +1153,13 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
+ 	struct io_rsrc_data *data;
+ 	int i, ret, nbufs;
+ 
++	/*
++	 * Accounting state is shared between the two rings; that only works if
++	 * both rings are accounted towards the same counters.
++	 */
++	if (ctx->user != src_ctx->user || ctx->mm_account != src_ctx->mm_account)
++		return -EINVAL;
++
+ 	/*
+ 	 * Drop our own lock here. We'll setup the data we need and reference
+ 	 * the source buffers, then re-grab, check, and assign at the end.
+diff --git a/mm/filemap.c b/mm/filemap.c
+index dc83baab85a140..05adf0392625da 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -4383,6 +4383,20 @@ static void filemap_cachestat(struct address_space *mapping,
+ 	rcu_read_unlock();
+ }
+ 
++/*
++ * See mincore: reveal pagecache information only for files
++ * that the calling process has write access to, or could (if
++ * tried) open for writing.
++ */
++static inline bool can_do_cachestat(struct file *f)
++{
++	if (f->f_mode & FMODE_WRITE)
++		return true;
++	if (inode_owner_or_capable(file_mnt_idmap(f), file_inode(f)))
++		return true;
++	return file_permission(f, MAY_WRITE) == 0;
++}
++
+ /*
+  * The cachestat(2) system call.
+  *
+@@ -4442,6 +4456,11 @@ SYSCALL_DEFINE4(cachestat, unsigned int, fd,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	if (!can_do_cachestat(fd_file(f))) {
++		fdput(f);
++		return -EPERM;
++	}
++
+ 	if (flags != 0) {
+ 		fdput(f);
+ 		return -EINVAL;
+diff --git a/mm/shmem.c b/mm/shmem.c
+index dd4eb11c84b59e..5960e5035f9835 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3700,7 +3700,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
+ 
+ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
+ {
+-	if (!simple_offset_empty(dentry))
++	if (!simple_empty(dentry))
+ 		return -ENOTEMPTY;
+ 
+ 	drop_nlink(d_inode(dentry));
+@@ -3757,7 +3757,7 @@ static int shmem_rename2(struct mnt_idmap *idmap,
+ 		return simple_offset_rename_exchange(old_dir, old_dentry,
+ 						     new_dir, new_dentry);
+ 
+-	if (!simple_offset_empty(new_dentry))
++	if (!simple_empty(new_dentry))
+ 		return -ENOTEMPTY;
+ 
+ 	if (flags & RENAME_WHITEOUT) {
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 0030ce8fecfc56..7fefb2eb3fcd80 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -251,7 +251,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+ 	struct zswap_pool *pool;
+ 	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
+ 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+-	int ret;
++	int ret, cpu;
+ 
+ 	if (!zswap_has_pool) {
+ 		/* if either are unset, pool initialization failed, and we
+@@ -285,6 +285,9 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+ 		goto error;
+ 	}
+ 
++	for_each_possible_cpu(cpu)
++		mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex);
++
+ 	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
+ 				       &pool->node);
+ 	if (ret)
+@@ -812,36 +815,41 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
+ {
+ 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+ 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+-	struct crypto_acomp *acomp;
+-	struct acomp_req *req;
++	struct crypto_acomp *acomp = NULL;
++	struct acomp_req *req = NULL;
++	u8 *buffer = NULL;
+ 	int ret;
+ 
+-	mutex_init(&acomp_ctx->mutex);
+-
+-	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+-	if (!acomp_ctx->buffer)
+-		return -ENOMEM;
++	buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
++	if (!buffer) {
++		ret = -ENOMEM;
++		goto fail;
++	}
+ 
+ 	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
+ 	if (IS_ERR(acomp)) {
+ 		pr_err("could not alloc crypto acomp %s : %ld\n",
+ 				pool->tfm_name, PTR_ERR(acomp));
+ 		ret = PTR_ERR(acomp);
+-		goto acomp_fail;
++		goto fail;
+ 	}
+-	acomp_ctx->acomp = acomp;
+-	acomp_ctx->is_sleepable = acomp_is_async(acomp);
+ 
+-	req = acomp_request_alloc(acomp_ctx->acomp);
++	req = acomp_request_alloc(acomp);
+ 	if (!req) {
+ 		pr_err("could not alloc crypto acomp_request %s\n",
+ 		       pool->tfm_name);
+ 		ret = -ENOMEM;
+-		goto req_fail;
++		goto fail;
+ 	}
+-	acomp_ctx->req = req;
+ 
++	/*
++	 * Only hold the mutex after completing allocations, otherwise we may
++	 * recurse into zswap through reclaim and attempt to hold the mutex
++	 * again resulting in a deadlock.
++	 */
++	mutex_lock(&acomp_ctx->mutex);
+ 	crypto_init_wait(&acomp_ctx->wait);
++
+ 	/*
+ 	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
+ 	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
+@@ -850,12 +858,17 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
+ 	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ 				   crypto_req_done, &acomp_ctx->wait);
+ 
++	acomp_ctx->buffer = buffer;
++	acomp_ctx->acomp = acomp;
++	acomp_ctx->is_sleepable = acomp_is_async(acomp);
++	acomp_ctx->req = req;
++	mutex_unlock(&acomp_ctx->mutex);
+ 	return 0;
+ 
+-req_fail:
+-	crypto_free_acomp(acomp_ctx->acomp);
+-acomp_fail:
+-	kfree(acomp_ctx->buffer);
++fail:
++	if (acomp)
++		crypto_free_acomp(acomp);
++	kfree(buffer);
+ 	return ret;
+ }
+ 
+@@ -864,17 +877,45 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
+ 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+ 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+ 
++	mutex_lock(&acomp_ctx->mutex);
+ 	if (!IS_ERR_OR_NULL(acomp_ctx)) {
+ 		if (!IS_ERR_OR_NULL(acomp_ctx->req))
+ 			acomp_request_free(acomp_ctx->req);
++		acomp_ctx->req = NULL;
+ 		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
+ 			crypto_free_acomp(acomp_ctx->acomp);
+ 		kfree(acomp_ctx->buffer);
+ 	}
++	mutex_unlock(&acomp_ctx->mutex);
+ 
+ 	return 0;
+ }
+ 
++static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool)
++{
++	struct crypto_acomp_ctx *acomp_ctx;
++
++	for (;;) {
++		acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
++		mutex_lock(&acomp_ctx->mutex);
++		if (likely(acomp_ctx->req))
++			return acomp_ctx;
++		/*
++		 * It is possible that we were migrated to a different CPU after
++		 * getting the per-CPU ctx but before the mutex was acquired. If
++		 * the old CPU got offlined, zswap_cpu_comp_dead() could have
++		 * already freed ctx->req (among other things) and set it to
++		 * NULL. Just try again on the new CPU that we ended up on.
++		 */
++		mutex_unlock(&acomp_ctx->mutex);
++	}
++}
++
++static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx)
++{
++	mutex_unlock(&acomp_ctx->mutex);
++}
++
+ static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
+ {
+ 	struct crypto_acomp_ctx *acomp_ctx;
+@@ -887,10 +928,7 @@ static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
+ 	gfp_t gfp;
+ 	u8 *dst;
+ 
+-	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+-
+-	mutex_lock(&acomp_ctx->mutex);
+-
++	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
+ 	dst = acomp_ctx->buffer;
+ 	sg_init_table(&input, 1);
+ 	sg_set_folio(&input, folio, PAGE_SIZE, 0);
+@@ -943,7 +981,7 @@ static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
+ 	else if (alloc_ret)
+ 		zswap_reject_alloc_fail++;
+ 
+-	mutex_unlock(&acomp_ctx->mutex);
++	acomp_ctx_put_unlock(acomp_ctx);
+ 	return comp_ret == 0 && alloc_ret == 0;
+ }
+ 
+@@ -954,9 +992,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
+ 	struct crypto_acomp_ctx *acomp_ctx;
+ 	u8 *src;
+ 
+-	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+-	mutex_lock(&acomp_ctx->mutex);
+-
++	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
+ 	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
+ 	/*
+ 	 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
+@@ -980,10 +1016,10 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
+ 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
+ 	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
+ 	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
+-	mutex_unlock(&acomp_ctx->mutex);
+ 
+ 	if (src != acomp_ctx->buffer)
+ 		zpool_unmap_handle(zpool, entry->handle);
++	acomp_ctx_put_unlock(acomp_ctx);
+ }
+ 
+ /*********************************
+diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
+index f80bc05d4c5a50..516038a4416380 100644
+--- a/net/sched/sch_ets.c
++++ b/net/sched/sch_ets.c
+@@ -91,6 +91,8 @@ ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct ets_sched *q = qdisc_priv(sch);
+ 
++	if (arg == 0 || arg > q->nbands)
++		return NULL;
+ 	return &q->classes[arg - 1];
+ }
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a9f6138b59b0c1..8c4de5a253addf 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10916,8 +10916,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x38e0, "Yoga Y990 Intel VECO Dual", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x38f8, "Yoga Book 9i", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x38df, "Y990 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
+-	SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+ 	SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 7092842480ef17..0d9d1d250f2b5e 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -2397,6 +2397,7 @@ config SND_SOC_WM8993
+ 
+ config SND_SOC_WM8994
+ 	tristate
++	depends on MFD_WM8994
+ 
+ config SND_SOC_WM8995
+ 	tristate
+diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
+index d0098b4558b529..8ec4083cd3b807 100644
+--- a/sound/soc/codecs/cs42l43.c
++++ b/sound/soc/codecs/cs42l43.c
+@@ -2446,6 +2446,7 @@ static const struct dev_pm_ops cs42l43_codec_pm_ops = {
+ 	SYSTEM_SLEEP_PM_OPS(cs42l43_codec_suspend, cs42l43_codec_resume)
+ 	NOIRQ_SYSTEM_SLEEP_PM_OPS(cs42l43_codec_suspend_noirq, cs42l43_codec_resume_noirq)
+ 	RUNTIME_PM_OPS(NULL, cs42l43_codec_runtime_resume, NULL)
++	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ };
+ 
+ static const struct platform_device_id cs42l43_codec_id_table[] = {
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index 61729e5b50a8e4..f508df01145bfb 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -39,7 +39,9 @@ struct es8316_priv {
+ 	struct snd_soc_jack *jack;
+ 	int irq;
+ 	unsigned int sysclk;
+-	unsigned int allowed_rates[ARRAY_SIZE(supported_mclk_lrck_ratios)];
++	/* ES83xx supports halving the MCLK so it supports twice as many rates
++	 */
++	unsigned int allowed_rates[ARRAY_SIZE(supported_mclk_lrck_ratios) * 2];
+ 	struct snd_pcm_hw_constraint_list sysclk_constraints;
+ 	bool jd_inverted;
+ };
+@@ -386,6 +388,12 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ 
+ 		if (freq % ratio == 0)
+ 			es8316->allowed_rates[count++] = freq / ratio;
++
++		/* We also check if the halved MCLK produces a valid rate
++		 * since the codec supports halving the MCLK.
++		 */
++		if ((freq / ratio) % 2 == 0)
++			es8316->allowed_rates[count++] = freq / ratio / 2;
+ 	}
+ 
+ 	if (count) {
+diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
+index 4b1ea7b2c79617..60b4b7b7521554 100644
+--- a/sound/soc/samsung/Kconfig
++++ b/sound/soc/samsung/Kconfig
+@@ -127,8 +127,9 @@ config SND_SOC_SAMSUNG_TM2_WM5110
+ 
+ config SND_SOC_SAMSUNG_ARIES_WM8994
+ 	tristate "SoC I2S Audio support for WM8994 on Aries"
+-	depends on SND_SOC_SAMSUNG && MFD_WM8994 && IIO && EXTCON
++	depends on SND_SOC_SAMSUNG && I2C && IIO && EXTCON
+ 	select SND_SOC_BT_SCO
++	select MFD_WM8994
+ 	select SND_SOC_WM8994
+ 	select SND_SAMSUNG_I2S
+ 	help
+@@ -140,8 +141,9 @@ config SND_SOC_SAMSUNG_ARIES_WM8994
+ 
+ config SND_SOC_SAMSUNG_MIDAS_WM1811
+ 	tristate "SoC I2S Audio support for Midas boards"
+-	depends on SND_SOC_SAMSUNG && IIO
++	depends on SND_SOC_SAMSUNG && I2C && IIO
+ 	select SND_SAMSUNG_I2S
++	select MFD_WM8994
+ 	select SND_SOC_WM8994
+ 	help
+ 	  Say Y if you want to add support for SoC audio on the Midas boards.
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 8ba0aff8be2ec2..7968d6a2f592ac 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2239,6 +2239,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+ 		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-01-30 12:47 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-01-30 12:47 UTC (permalink / raw
  To: gentoo-commits

commit:     5e62dc25f02e5f291109108ac41ca60469dc4531
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 30 12:47:37 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan 30 12:47:37 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5e62dc25

Update CPU Optimization patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5010_enable-cpu-optimizations-universal.patch | 64 ++++++++++++++++-----------
 1 file changed, 38 insertions(+), 26 deletions(-)

diff --git a/5010_enable-cpu-optimizations-universal.patch b/5010_enable-cpu-optimizations-universal.patch
index 0758b0ba..5011aaa6 100644
--- a/5010_enable-cpu-optimizations-universal.patch
+++ b/5010_enable-cpu-optimizations-universal.patch
@@ -116,13 +116,13 @@ REFERENCES
 4.  http://www.linuxforge.net/docs/linux/linux-gcc.php
 
 ---
- arch/x86/Kconfig.cpu            | 359 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  87 +++++++-
- arch/x86/include/asm/vermagic.h |  70 +++++++
- 3 files changed, 499 insertions(+), 17 deletions(-)
+ arch/x86/Kconfig.cpu            | 367 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  89 +++++++-
+ arch/x86/include/asm/vermagic.h |  72 +++++++
+ 3 files changed, 511 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 2a7279d80460..abfadddd1b23 100644
+index ce5ed2c2db0c..6d89f21aba52 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -155,9 +155,8 @@ config MPENTIUM4
@@ -252,7 +252,7 @@ index 2a7279d80460..abfadddd1b23 100644
 +
 +config MZEN5
 +	bool "AMD Zen 5"
-+	depends on (CC_IS_GCC && GCC_VERSION > 140000) || (CC_IS_CLANG && CLANG_VERSION >= 191000)
++	depends on (CC_IS_GCC && GCC_VERSION > 140000) || (CC_IS_CLANG && CLANG_VERSION >= 190100)
 +	help
 +	  Select this for AMD Family 19h Zen 5 processors.
 +
@@ -280,7 +280,7 @@ index 2a7279d80460..abfadddd1b23 100644
  	help
 
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,14 +388,191 @@ config MCORE2
+@@ -278,14 +388,199 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
 
@@ -388,14 +388,22 @@ index 2a7279d80460..abfadddd1b23 100644
 +
 +	  Enables -march=cannonlake
 +
-+config MICELAKE
++config MICELAKE_CLIENT
 +	bool "Intel Ice Lake"
 +	help
 +
-+	  Select this for 10th Gen Core processors in the Ice Lake family.
++	  Select this for 10th Gen Core client processors in the Ice Lake family.
 +
 +	  Enables -march=icelake-client
 +
++config MICELAKE_SERVER
++	bool "Intel Ice Lake Server"
++	help
++
++	  Select this for 10th Gen Core server processors in the Ice Lake family.
++
++	  Enables -march=icelake-server
++
 +config MCASCADELAKE
 +	bool "Intel Cascade Lake"
 +	help
@@ -478,7 +486,7 @@ index 2a7279d80460..abfadddd1b23 100644
 
  config GENERIC_CPU
  	bool "Generic-x86-64"
-@@ -294,6 +581,26 @@ config GENERIC_CPU
+@@ -294,6 +589,26 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
 
@@ -505,7 +513,7 @@ index 2a7279d80460..abfadddd1b23 100644
  endchoice
 
  config X86_GENERIC
-@@ -308,6 +615,30 @@ config X86_GENERIC
+@@ -308,6 +623,30 @@ config X86_GENERIC
  	  This is really intended for distributors who need more
  	  generic optimizations.
 
@@ -536,34 +544,34 @@ index 2a7279d80460..abfadddd1b23 100644
  #
  # Define implied options from the CPU selection here
  config X86_INTERNODE_CACHE_SHIFT
-@@ -318,7 +649,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,7 +657,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
++	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE_CLIENT || MICELAKE_SERVER || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 
-@@ -336,11 +667,11 @@ config X86_ALIGNMENT_16
+@@ -336,11 +675,11 @@ config X86_ALIGNMENT_16
 
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE_CLIENT || MICELAKE_SERVER || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL
 
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE_CLIENT || MICELAKE_SERVER || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
 
  #
  # P6_NOPs are a relatively minor optimization that require a family >=
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index cd75e78a06c1..396d1db12bca 100644
+index 3419ffa2a350..aafb069de612 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -181,15 +181,96 @@ else
+@@ -152,15 +152,98 @@ else
          cflags-$(CONFIG_MK8)		+= -march=k8
          cflags-$(CONFIG_MPSC)		+= -march=nocona
          cflags-$(CONFIG_MCORE2)		+= -march=core2
@@ -605,7 +613,8 @@ index cd75e78a06c1..396d1db12bca 100644
 +        cflags-$(CONFIG_MSKYLAKE) 	+= -march=skylake
 +        cflags-$(CONFIG_MSKYLAKEX) 	+= -march=skylake-avx512
 +        cflags-$(CONFIG_MCANNONLAKE) 	+= -march=cannonlake
-+        cflags-$(CONFIG_MICELAKE) 	+= -march=icelake-client
++        cflags-$(CONFIG_MICELAKE_CLIENT) 	+= -march=icelake-client
++        cflags-$(CONFIG_MICELAKE_SERVER) 	+= -march=icelake-server
 +        cflags-$(CONFIG_MCASCADELAKE) 	+= -march=cascadelake
 +        cflags-$(CONFIG_MCOOPERLAKE) 	+= -march=cooperlake
 +        cflags-$(CONFIG_MTIGERLAKE) 	+= -march=tigerlake
@@ -650,7 +659,8 @@ index cd75e78a06c1..396d1db12bca 100644
 +        rustflags-$(CONFIG_MSKYLAKE) 	+= -Ctarget-cpu=skylake
 +        rustflags-$(CONFIG_MSKYLAKEX) 	+= -Ctarget-cpu=skylake-avx512
 +        rustflags-$(CONFIG_MCANNONLAKE) 	+= -Ctarget-cpu=cannonlake
-+        rustflags-$(CONFIG_MICELAKE) 	+= -Ctarget-cpu=icelake-client
++        rustflags-$(CONFIG_MICELAKE_CLIENT) 	+= -Ctarget-cpu=icelake-client
++        rustflags-$(CONFIG_MICELAKE_SERVER) 	+= -Ctarget-cpu=icelake-server
 +        rustflags-$(CONFIG_MCASCADELAKE) 	+= -Ctarget-cpu=cascadelake
 +        rustflags-$(CONFIG_MCOOPERLAKE) 	+= -Ctarget-cpu=cooperlake
 +        rustflags-$(CONFIG_MTIGERLAKE) 	+= -Ctarget-cpu=tigerlake
@@ -664,10 +674,10 @@ index cd75e78a06c1..396d1db12bca 100644
 
          KBUILD_CFLAGS += -mno-red-zone
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..f4e29563473d 100644
+index 75884d2cdec3..2fdae271f47f 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,54 @@
+@@ -17,6 +17,56 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
@@ -699,8 +709,10 @@ index 75884d2cdec3..f4e29563473d 100644
 +#define MODULE_PROC_FAMILY "SKYLAKEX "
 +#elif defined CONFIG_MCANNONLAKE
 +#define MODULE_PROC_FAMILY "CANNONLAKE "
-+#elif defined CONFIG_MICELAKE
-+#define MODULE_PROC_FAMILY "ICELAKE "
++#elif defined CONFIG_MICELAKE_CLIENT
++#define MODULE_PROC_FAMILY "ICELAKE_CLIENT "
++#elif defined CONFIG_MICELAKE_SERVER
++#define MODULE_PROC_FAMILY "ICELAKE_SERVER "
 +#elif defined CONFIG_MCASCADELAKE
 +#define MODULE_PROC_FAMILY "CASCADELAKE "
 +#elif defined CONFIG_MCOOPERLAKE
@@ -722,7 +734,7 @@ index 75884d2cdec3..f4e29563473d 100644
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -35,6 +83,28 @@
+@@ -35,6 +85,28 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -752,5 +764,5 @@ index 75884d2cdec3..f4e29563473d 100644
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
 --
-2.46.2
+2.47.1
 


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-01-23 17:02 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-01-23 17:02 UTC (permalink / raw
  To: gentoo-commits

commit:     be3664f69c599632b14b89fe20fa9dd3418eff74
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 23 17:02:05 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan 23 17:02:05 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=be3664f6

Linux patch 6.12.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1010_linux-6.12.11.patch | 5351 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5355 insertions(+)

diff --git a/0000_README b/0000_README
index 20574d29..9c94906b 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-6.12.10.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.10
 
+Patch:  1010_linux-6.12.11.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.11
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1010_linux-6.12.11.patch b/1010_linux-6.12.11.patch
new file mode 100644
index 00000000..b7226d9f
--- /dev/null
+++ b/1010_linux-6.12.11.patch
@@ -0,0 +1,5351 @@
+diff --git a/Makefile b/Makefile
+index 233e9e88e402e7..7cf8f11975f89c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
+index aec6e2d3aa1d52..98bfc097389c4e 100644
+--- a/arch/x86/include/asm/special_insns.h
++++ b/arch/x86/include/asm/special_insns.h
+@@ -217,7 +217,7 @@ static inline int write_user_shstk_64(u64 __user *addr, u64 val)
+ 
+ #define nop() asm volatile ("nop")
+ 
+-static inline void serialize(void)
++static __always_inline void serialize(void)
+ {
+ 	/* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */
+ 	asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory");
+diff --git a/arch/x86/kernel/fred.c b/arch/x86/kernel/fred.c
+index 8d32c3f48abc0c..5e2cd10049804e 100644
+--- a/arch/x86/kernel/fred.c
++++ b/arch/x86/kernel/fred.c
+@@ -50,7 +50,13 @@ void cpu_init_fred_exceptions(void)
+ 	       FRED_CONFIG_ENTRYPOINT(asm_fred_entrypoint_user));
+ 
+ 	wrmsrl(MSR_IA32_FRED_STKLVLS, 0);
+-	wrmsrl(MSR_IA32_FRED_RSP0, 0);
++
++	/*
++	 * Ater a CPU offline/online cycle, the FRED RSP0 MSR should be
++	 * resynchronized with its per-CPU cache.
++	 */
++	wrmsrl(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0));
++
+ 	wrmsrl(MSR_IA32_FRED_RSP1, 0);
+ 	wrmsrl(MSR_IA32_FRED_RSP2, 0);
+ 	wrmsrl(MSR_IA32_FRED_RSP3, 0);
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index d27a3bf96f80d8..90aaec923889cf 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -689,11 +689,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+ 	for (i = 0; i < ARRAY_SIZE(override_table); i++) {
+ 		const struct irq_override_cmp *entry = &override_table[i];
+ 
+-		if (dmi_check_system(entry->system) &&
+-		    entry->irq == gsi &&
++		if (entry->irq == gsi &&
+ 		    entry->triggering == triggering &&
+ 		    entry->polarity == polarity &&
+-		    entry->shareable == shareable)
++		    entry->shareable == shareable &&
++		    dmi_check_system(entry->system))
+ 			return entry->override;
+ 	}
+ 
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index bf83a104086cce..76b326ddd75c47 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1349,6 +1349,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
+ 	zram->mem_pool = zs_create_pool(zram->disk->disk_name);
+ 	if (!zram->mem_pool) {
+ 		vfree(zram->table);
++		zram->table = NULL;
+ 		return false;
+ 	}
+ 
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
+index 2561b215432a82..588ab1cc6d557c 100644
+--- a/drivers/cpufreq/Kconfig
++++ b/drivers/cpufreq/Kconfig
+@@ -311,8 +311,6 @@ config QORIQ_CPUFREQ
+ 	  This adds the CPUFreq driver support for Freescale QorIQ SoCs
+ 	  which are capable of changing the CPU's frequency dynamically.
+ 
+-endif
+-
+ config ACPI_CPPC_CPUFREQ
+ 	tristate "CPUFreq driver based on the ACPI CPPC spec"
+ 	depends on ACPI_PROCESSOR
+@@ -341,4 +339,6 @@ config ACPI_CPPC_CPUFREQ_FIE
+ 
+ 	  If in doubt, say N.
+ 
++endif
++
+ endmenu
+diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
+index f2992f92d8db86..173ddcac540ade 100644
+--- a/drivers/cpuidle/governors/teo.c
++++ b/drivers/cpuidle/governors/teo.c
+@@ -10,25 +10,27 @@
+  * DOC: teo-description
+  *
+  * The idea of this governor is based on the observation that on many systems
+- * timer events are two or more orders of magnitude more frequent than any
+- * other interrupts, so they are likely to be the most significant cause of CPU
+- * wakeups from idle states.  Moreover, information about what happened in the
+- * (relatively recent) past can be used to estimate whether or not the deepest
+- * idle state with target residency within the (known) time till the closest
+- * timer event, referred to as the sleep length, is likely to be suitable for
+- * the upcoming CPU idle period and, if not, then which of the shallower idle
+- * states to choose instead of it.
++ * timer interrupts are two or more orders of magnitude more frequent than any
++ * other interrupt types, so they are likely to dominate CPU wakeup patterns.
++ * Moreover, in principle, the time when the next timer event is going to occur
++ * can be determined at the idle state selection time, although doing that may
++ * be costly, so it can be regarded as the most reliable source of information
++ * for idle state selection.
+  *
+- * Of course, non-timer wakeup sources are more important in some use cases
+- * which can be covered by taking a few most recent idle time intervals of the
+- * CPU into account.  However, even in that context it is not necessary to
+- * consider idle duration values greater than the sleep length, because the
+- * closest timer will ultimately wake up the CPU anyway unless it is woken up
+- * earlier.
++ * Of course, non-timer wakeup sources are more important in some use cases,
++ * but even then it is generally unnecessary to consider idle duration values
++ * greater than the time time till the next timer event, referred as the sleep
++ * length in what follows, because the closest timer will ultimately wake up the
++ * CPU anyway unless it is woken up earlier.
+  *
+- * Thus this governor estimates whether or not the prospective idle duration of
+- * a CPU is likely to be significantly shorter than the sleep length and selects
+- * an idle state for it accordingly.
++ * However, since obtaining the sleep length may be costly, the governor first
++ * checks if it can select a shallow idle state using wakeup pattern information
++ * from recent times, in which case it can do without knowing the sleep length
++ * at all.  For this purpose, it counts CPU wakeup events and looks for an idle
++ * state whose target residency has not exceeded the idle duration (measured
++ * after wakeup) in the majority of relevant recent cases.  If the target
++ * residency of that state is small enough, it may be used right away and the
++ * sleep length need not be determined.
+  *
+  * The computations carried out by this governor are based on using bins whose
+  * boundaries are aligned with the target residency parameter values of the CPU
+@@ -39,7 +41,11 @@
+  * idle state 2, the third bin spans from the target residency of idle state 2
+  * up to, but not including, the target residency of idle state 3 and so on.
+  * The last bin spans from the target residency of the deepest idle state
+- * supplied by the driver to infinity.
++ * supplied by the driver to the scheduler tick period length or to infinity if
++ * the tick period length is less than the target residency of that state.  In
++ * the latter case, the governor also counts events with the measured idle
++ * duration between the tick period length and the target residency of the
++ * deepest idle state.
+  *
+  * Two metrics called "hits" and "intercepts" are associated with each bin.
+  * They are updated every time before selecting an idle state for the given CPU
+@@ -49,47 +55,46 @@
+  * sleep length and the idle duration measured after CPU wakeup fall into the
+  * same bin (that is, the CPU appears to wake up "on time" relative to the sleep
+  * length).  In turn, the "intercepts" metric reflects the relative frequency of
+- * situations in which the measured idle duration is so much shorter than the
+- * sleep length that the bin it falls into corresponds to an idle state
+- * shallower than the one whose bin is fallen into by the sleep length (these
+- * situations are referred to as "intercepts" below).
++ * non-timer wakeup events for which the measured idle duration falls into a bin
++ * that corresponds to an idle state shallower than the one whose bin is fallen
++ * into by the sleep length (these events are also referred to as "intercepts"
++ * below).
+  *
+  * In order to select an idle state for a CPU, the governor takes the following
+  * steps (modulo the possible latency constraint that must be taken into account
+  * too):
+  *
+- * 1. Find the deepest CPU idle state whose target residency does not exceed
+- *    the current sleep length (the candidate idle state) and compute 2 sums as
+- *    follows:
++ * 1. Find the deepest enabled CPU idle state (the candidate idle state) and
++ *    compute 2 sums as follows:
+  *
+- *    - The sum of the "hits" and "intercepts" metrics for the candidate state
+- *      and all of the deeper idle states (it represents the cases in which the
+- *      CPU was idle long enough to avoid being intercepted if the sleep length
+- *      had been equal to the current one).
++ *    - The sum of the "hits" metric for all of the idle states shallower than
++ *      the candidate one (it represents the cases in which the CPU was likely
++ *      woken up by a timer).
+  *
+- *    - The sum of the "intercepts" metrics for all of the idle states shallower
+- *      than the candidate one (it represents the cases in which the CPU was not
+- *      idle long enough to avoid being intercepted if the sleep length had been
+- *      equal to the current one).
++ *    - The sum of the "intercepts" metric for all of the idle states shallower
++ *      than the candidate one (it represents the cases in which the CPU was
++ *      likely woken up by a non-timer wakeup source).
+  *
+- * 2. If the second sum is greater than the first one the CPU is likely to wake
+- *    up early, so look for an alternative idle state to select.
++ * 2. If the second sum computed in step 1 is greater than a half of the sum of
++ *    both metrics for the candidate state bin and all subsequent bins(if any),
++ *    a shallower idle state is likely to be more suitable, so look for it.
+  *
+- *    - Traverse the idle states shallower than the candidate one in the
++ *    - Traverse the enabled idle states shallower than the candidate one in the
+  *      descending order.
+  *
+  *    - For each of them compute the sum of the "intercepts" metrics over all
+  *      of the idle states between it and the candidate one (including the
+  *      former and excluding the latter).
+  *
+- *    - If each of these sums that needs to be taken into account (because the
+- *      check related to it has indicated that the CPU is likely to wake up
+- *      early) is greater than a half of the corresponding sum computed in step
+- *      1 (which means that the target residency of the state in question had
+- *      not exceeded the idle duration in over a half of the relevant cases),
+- *      select the given idle state instead of the candidate one.
++ *    - If this sum is greater than a half of the second sum computed in step 1,
++ *      use the given idle state as the new candidate one.
+  *
+- * 3. By default, select the candidate state.
++ * 3. If the current candidate state is state 0 or its target residency is short
++ *    enough, return it and prevent the scheduler tick from being stopped.
++ *
++ * 4. Obtain the sleep length value and check if it is below the target
++ *    residency of the current candidate state, in which case a new shallower
++ *    candidate state needs to be found, so look for it.
+  */
+ 
+ #include <linux/cpuidle.h>
+diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
+index 72f2537d90cafd..f45c70154a9302 100644
+--- a/drivers/firmware/efi/Kconfig
++++ b/drivers/firmware/efi/Kconfig
+@@ -76,10 +76,6 @@ config EFI_ZBOOT
+ 	bool "Enable the generic EFI decompressor"
+ 	depends on EFI_GENERIC_STUB && !ARM
+ 	select HAVE_KERNEL_GZIP
+-	select HAVE_KERNEL_LZ4
+-	select HAVE_KERNEL_LZMA
+-	select HAVE_KERNEL_LZO
+-	select HAVE_KERNEL_XZ
+ 	select HAVE_KERNEL_ZSTD
+ 	help
+ 	  Create the bootable image as an EFI application that carries the
+diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
+index 65ffd0b760b2fb..48842b5c106b83 100644
+--- a/drivers/firmware/efi/libstub/Makefile.zboot
++++ b/drivers/firmware/efi/libstub/Makefile.zboot
+@@ -12,22 +12,16 @@ quiet_cmd_copy_and_pad = PAD     $@
+ $(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
+ 	$(call if_changed,copy_and_pad)
+ 
+-comp-type-$(CONFIG_KERNEL_GZIP)		:= gzip
+-comp-type-$(CONFIG_KERNEL_LZ4)		:= lz4
+-comp-type-$(CONFIG_KERNEL_LZMA)		:= lzma
+-comp-type-$(CONFIG_KERNEL_LZO)		:= lzo
+-comp-type-$(CONFIG_KERNEL_XZ)		:= xzkern
+-comp-type-$(CONFIG_KERNEL_ZSTD)		:= zstd22
+-
+ # in GZIP, the appended le32 carrying the uncompressed size is part of the
+ # format, but in other cases, we just append it at the end for convenience,
+ # causing the original tools to complain when checking image integrity.
+-# So disregard it when calculating the payload size in the zimage header.
+-zboot-method-y                         := $(comp-type-y)_with_size
+-zboot-size-len-y                       := 4
++comp-type-y				:= gzip
++zboot-method-y				:= gzip
++zboot-size-len-y			:= 0
+ 
+-zboot-method-$(CONFIG_KERNEL_GZIP)     := gzip
+-zboot-size-len-$(CONFIG_KERNEL_GZIP)   := 0
++comp-type-$(CONFIG_KERNEL_ZSTD)		:= zstd
++zboot-method-$(CONFIG_KERNEL_ZSTD)	:= zstd22_with_size
++zboot-size-len-$(CONFIG_KERNEL_ZSTD)	:= 4
+ 
+ $(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
+ 	$(call if_changed,$(zboot-method-y))
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index dcca1d7f173e5f..deedacdeb23952 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -1030,6 +1030,30 @@ static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
+ 	dev->pdev = NULL;
+ }
+ 
++static void
++gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
++{
++	struct configfs_subsystem *subsys = dev->group.cg_subsys;
++	struct gpio_sim_bank *bank;
++	struct gpio_sim_line *line;
++
++	/*
++	 * The device only needs to depend on leaf line entries. This is
++	 * sufficient to lock up all the configfs entries that the
++	 * instantiated, alive device depends on.
++	 */
++	list_for_each_entry(bank, &dev->bank_list, siblings) {
++		list_for_each_entry(line, &bank->line_list, siblings) {
++			if (lock)
++				WARN_ON(configfs_depend_item_unlocked(
++						subsys, &line->group.cg_item));
++			else
++				configfs_undepend_item_unlocked(
++						&line->group.cg_item);
++		}
++	}
++}
++
+ static ssize_t
+ gpio_sim_device_config_live_store(struct config_item *item,
+ 				  const char *page, size_t count)
+@@ -1042,14 +1066,24 @@ gpio_sim_device_config_live_store(struct config_item *item,
+ 	if (ret)
+ 		return ret;
+ 
+-	guard(mutex)(&dev->lock);
++	if (live)
++		gpio_sim_device_lockup_configfs(dev, true);
+ 
+-	if (live == gpio_sim_device_is_live(dev))
+-		ret = -EPERM;
+-	else if (live)
+-		ret = gpio_sim_device_activate(dev);
+-	else
+-		gpio_sim_device_deactivate(dev);
++	scoped_guard(mutex, &dev->lock) {
++		if (live == gpio_sim_device_is_live(dev))
++			ret = -EPERM;
++		else if (live)
++			ret = gpio_sim_device_activate(dev);
++		else
++			gpio_sim_device_deactivate(dev);
++	}
++
++	/*
++	 * Undepend is required only if device disablement (live == 0)
++	 * succeeds or if device enablement (live == 1) fails.
++	 */
++	if (live == !!ret)
++		gpio_sim_device_lockup_configfs(dev, false);
+ 
+ 	return ret ?: count;
+ }
+diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
+index d6244f0d3bc752..e89f299f214009 100644
+--- a/drivers/gpio/gpio-virtuser.c
++++ b/drivers/gpio/gpio-virtuser.c
+@@ -1546,6 +1546,30 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
+ 	dev->pdev = NULL;
+ }
+ 
++static void
++gpio_virtuser_device_lockup_configfs(struct gpio_virtuser_device *dev, bool lock)
++{
++	struct configfs_subsystem *subsys = dev->group.cg_subsys;
++	struct gpio_virtuser_lookup_entry *entry;
++	struct gpio_virtuser_lookup *lookup;
++
++	/*
++	 * The device only needs to depend on leaf lookup entries. This is
++	 * sufficient to lock up all the configfs entries that the
++	 * instantiated, alive device depends on.
++	 */
++	list_for_each_entry(lookup, &dev->lookup_list, siblings) {
++		list_for_each_entry(entry, &lookup->entry_list, siblings) {
++			if (lock)
++				WARN_ON(configfs_depend_item_unlocked(
++						subsys, &entry->group.cg_item));
++			else
++				configfs_undepend_item_unlocked(
++						&entry->group.cg_item);
++		}
++	}
++}
++
+ static ssize_t
+ gpio_virtuser_device_config_live_store(struct config_item *item,
+ 				       const char *page, size_t count)
+@@ -1558,15 +1582,24 @@ gpio_virtuser_device_config_live_store(struct config_item *item,
+ 	if (ret)
+ 		return ret;
+ 
+-	guard(mutex)(&dev->lock);
++	if (live)
++		gpio_virtuser_device_lockup_configfs(dev, true);
+ 
+-	if (live == gpio_virtuser_device_is_live(dev))
+-		return -EPERM;
++	scoped_guard(mutex, &dev->lock) {
++		if (live == gpio_virtuser_device_is_live(dev))
++			ret = -EPERM;
++		else if (live)
++			ret = gpio_virtuser_device_activate(dev);
++		else
++			gpio_virtuser_device_deactivate(dev);
++	}
+ 
+-	if (live)
+-		ret = gpio_virtuser_device_activate(dev);
+-	else
+-		gpio_virtuser_device_deactivate(dev);
++	/*
++	 * Undepend is required only if device disablement (live == 0)
++	 * succeeds or if device enablement (live == 1) fails.
++	 */
++	if (live == !!ret)
++		gpio_virtuser_device_lockup_configfs(dev, false);
+ 
+ 	return ret ?: count;
+ }
+diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
+index afcf432a1573ed..2ea8ccfbdccdd4 100644
+--- a/drivers/gpio/gpio-xilinx.c
++++ b/drivers/gpio/gpio-xilinx.c
+@@ -65,7 +65,7 @@ struct xgpio_instance {
+ 	DECLARE_BITMAP(state, 64);
+ 	DECLARE_BITMAP(last_irq_read, 64);
+ 	DECLARE_BITMAP(dir, 64);
+-	spinlock_t gpio_lock;	/* For serializing operations */
++	raw_spinlock_t gpio_lock;	/* For serializing operations */
+ 	int irq;
+ 	DECLARE_BITMAP(enable, 64);
+ 	DECLARE_BITMAP(rising_edge, 64);
+@@ -179,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+ 	struct xgpio_instance *chip = gpiochip_get_data(gc);
+ 	int bit = xgpio_to_bit(chip, gpio);
+ 
+-	spin_lock_irqsave(&chip->gpio_lock, flags);
++	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ 
+ 	/* Write to GPIO signal and set its direction to output */
+ 	__assign_bit(bit, chip->state, val);
+ 
+ 	xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
+ 
+-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ }
+ 
+ /**
+@@ -210,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ 	bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
+ 	bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
+ 
+-	spin_lock_irqsave(&chip->gpio_lock, flags);
++	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ 
+ 	bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
+ 
+@@ -218,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ 
+ 	bitmap_copy(chip->state, state, 64);
+ 
+-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ }
+ 
+ /**
+@@ -236,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+ 	struct xgpio_instance *chip = gpiochip_get_data(gc);
+ 	int bit = xgpio_to_bit(chip, gpio);
+ 
+-	spin_lock_irqsave(&chip->gpio_lock, flags);
++	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ 
+ 	/* Set the GPIO bit in shadow register and set direction as input */
+ 	__set_bit(bit, chip->dir);
+ 	xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
+ 
+-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -265,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+ 	struct xgpio_instance *chip = gpiochip_get_data(gc);
+ 	int bit = xgpio_to_bit(chip, gpio);
+ 
+-	spin_lock_irqsave(&chip->gpio_lock, flags);
++	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ 
+ 	/* Write state of GPIO signal */
+ 	__assign_bit(bit, chip->state, val);
+@@ -275,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+ 	__clear_bit(bit, chip->dir);
+ 	xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
+ 
+-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -398,7 +398,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
+ 	int bit = xgpio_to_bit(chip, irq_offset);
+ 	u32 mask = BIT(bit / 32), temp;
+ 
+-	spin_lock_irqsave(&chip->gpio_lock, flags);
++	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ 
+ 	__clear_bit(bit, chip->enable);
+ 
+@@ -408,7 +408,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
+ 		temp &= ~mask;
+ 		xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
+ 	}
+-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ 
+ 	gpiochip_disable_irq(&chip->gc, irq_offset);
+ }
+@@ -428,7 +428,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
+ 
+ 	gpiochip_enable_irq(&chip->gc, irq_offset);
+ 
+-	spin_lock_irqsave(&chip->gpio_lock, flags);
++	raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ 
+ 	__set_bit(bit, chip->enable);
+ 
+@@ -447,7 +447,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
+ 		xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
+ 	}
+ 
+-	spin_unlock_irqrestore(&chip->gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ }
+ 
+ /**
+@@ -512,7 +512,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
+ 
+ 	chained_irq_enter(irqchip, desc);
+ 
+-	spin_lock(&chip->gpio_lock);
++	raw_spin_lock(&chip->gpio_lock);
+ 
+ 	xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
+ 
+@@ -529,7 +529,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
+ 	bitmap_copy(chip->last_irq_read, all, 64);
+ 	bitmap_or(all, rising, falling, 64);
+ 
+-	spin_unlock(&chip->gpio_lock);
++	raw_spin_unlock(&chip->gpio_lock);
+ 
+ 	dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
+ 
+@@ -620,7 +620,7 @@ static int xgpio_probe(struct platform_device *pdev)
+ 	bitmap_set(chip->hw_map,  0, width[0]);
+ 	bitmap_set(chip->hw_map, 32, width[1]);
+ 
+-	spin_lock_init(&chip->gpio_lock);
++	raw_spin_lock_init(&chip->gpio_lock);
+ 
+ 	chip->gc.base = -1;
+ 	chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index e41318bfbf4575..84e5364d1f67d0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -715,8 +715,9 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
+ void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
+ {
+ 	enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
+-	if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+-	    ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
++	if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
++	    ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
++		(IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
+ 		pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
+ 		amdgpu_gfx_off_ctrl(adev, idle);
+ 	} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+index 2d4b67175b55be..328a1b9635481c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
+ 	if (adev->flags & AMD_IS_APU)
+ 		return 0;
+ 
++	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
++	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
++		return 0;
++
+ 	if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ 		return 1;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 8b512dc28df838..071f187f5e282f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -193,8 +193,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
+ 	need_ctx_switch = ring->current_ctx != fence_ctx;
+ 	if (ring->funcs->emit_pipeline_sync && job &&
+ 	    ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
+-	     (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
+-	     amdgpu_vm_need_pipeline_sync(ring, job))) {
++	     need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
++
+ 		need_pipe_sync = true;
+ 
+ 		if (tmp)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ea403fece8392c..08c58d0315de7f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -8889,6 +8889,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
+ 	struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ 	struct amdgpu_dm_connector *aconn =
+ 		(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
++	bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
+ 
+ 	if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ 		if (pr->config.replay_supported && !pr->replay_feature_enabled)
+@@ -8915,14 +8916,15 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
+ 		 * adequate number of fast atomic commits to notify KMD
+ 		 * of update events. See `vblank_control_worker()`.
+ 		 */
+-		if (acrtc_attach->dm_irq_params.allow_sr_entry &&
++		if (!vrr_active &&
++		    acrtc_attach->dm_irq_params.allow_sr_entry &&
+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ 		    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+ #endif
+ 		    (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
+ 			if (pr->replay_feature_enabled && !pr->replay_allow_active)
+ 				amdgpu_dm_replay_enable(acrtc_state->stream, true);
+-			if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
++			if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
+ 			    !psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
+ 				amdgpu_dm_psr_enable(acrtc_state->stream);
+ 		}
+@@ -9093,7 +9095,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 				acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
+ 				timestamp_ns;
+ 				if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+-					amdgpu_dm_psr_disable(acrtc_state->stream);
++					amdgpu_dm_psr_disable(acrtc_state->stream, true);
+ 				mutex_unlock(&dm->dc_lock);
+ 			}
+ 		}
+@@ -9259,11 +9261,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
+ 
+ 		mutex_lock(&dm->dc_lock);
+-		if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
++		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
+ 			if (acrtc_state->stream->link->replay_settings.replay_allow_active)
+ 				amdgpu_dm_replay_disable(acrtc_state->stream);
+ 			if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+-				amdgpu_dm_psr_disable(acrtc_state->stream);
++				amdgpu_dm_psr_disable(acrtc_state->stream, true);
+ 		}
+ 		mutex_unlock(&dm->dc_lock);
+ 
+@@ -11370,6 +11372,25 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
+ 	return 0;
+ }
+ 
++static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
++					    struct drm_atomic_state *state,
++					    struct drm_crtc_state *crtc_state)
++{
++	struct drm_plane *plane;
++	struct drm_plane_state *new_plane_state, *old_plane_state;
++
++	drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
++		new_plane_state = drm_atomic_get_plane_state(state, plane);
++		old_plane_state = drm_atomic_get_plane_state(state, plane);
++
++		if (old_plane_state->fb && new_plane_state->fb &&
++		    get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
++			return true;
++	}
++
++	return false;
++}
++
+ /**
+  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+  *
+@@ -11567,10 +11588,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 
+ 	/* Remove exiting planes if they are modified */
+ 	for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
+-		if (old_plane_state->fb && new_plane_state->fb &&
+-		    get_mem_type(old_plane_state->fb) !=
+-		    get_mem_type(new_plane_state->fb))
+-			lock_and_validation_needed = true;
+ 
+ 		ret = dm_update_plane_state(dc, state, plane,
+ 					    old_plane_state,
+@@ -11865,9 +11882,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 
+ 		/*
+ 		 * Only allow async flips for fast updates that don't change
+-		 * the FB pitch, the DCC state, rotation, etc.
++		 * the FB pitch, the DCC state, rotation, mem_type, etc.
+ 		 */
+-		if (new_crtc_state->async_flip && lock_and_validation_needed) {
++		if (new_crtc_state->async_flip &&
++		    (lock_and_validation_needed ||
++		     amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
+ 			drm_dbg_atomic(crtc->dev,
+ 				       "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ 				       crtc->base.id, crtc->name);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+index f936a35fa9ebb7..0f6ba7b1575d08 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+@@ -30,6 +30,7 @@
+ #include "amdgpu_dm.h"
+ #include "dc.h"
+ #include "amdgpu_securedisplay.h"
++#include "amdgpu_dm_psr.h"
+ 
+ static const char *const pipe_crc_sources[] = {
+ 	"none",
+@@ -224,6 +225,10 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
+ 
+ 	mutex_lock(&adev->dm.dc_lock);
+ 
++	/* For PSR1, check that the panel has exited PSR */
++	if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
++		amdgpu_dm_psr_wait_disable(stream_state);
++
+ 	/* Enable or disable CRTC CRC generation */
+ 	if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
+ 		if (!dc_stream_configure_crc(stream_state->ctx->dc,
+@@ -357,6 +362,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+ 
+ 	}
+ 
++	/*
++	 * Reading the CRC requires the vblank interrupt handler to be
++	 * enabled. Keep a reference until CRC capture stops.
++	 */
++	enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
++	if (!enabled && enable) {
++		ret = drm_crtc_vblank_get(crtc);
++		if (ret)
++			goto cleanup;
++	}
++
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ 	/* Reset secure_display when we change crc source from debugfs */
+ 	amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
+@@ -367,16 +383,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+ 		goto cleanup;
+ 	}
+ 
+-	/*
+-	 * Reading the CRC requires the vblank interrupt handler to be
+-	 * enabled. Keep a reference until CRC capture stops.
+-	 */
+-	enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ 	if (!enabled && enable) {
+-		ret = drm_crtc_vblank_get(crtc);
+-		if (ret)
+-			goto cleanup;
+-
+ 		if (dm_is_crc_source_dprx(source)) {
+ 			if (drm_dp_start_crc(aux, crtc)) {
+ 				DRM_DEBUG_DRIVER("dp start crc failed\n");
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 9be87b53251739..70fcfae8e4c552 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+ 	return rc;
+ }
+ 
+-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
++bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
+ {
+ 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
+ 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+@@ -142,7 +142,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
+ 		amdgpu_dm_replay_enable(vblank_work->stream, true);
+ 	} else if (vblank_enabled) {
+ 		if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
+-			amdgpu_dm_psr_disable(vblank_work->stream);
++			amdgpu_dm_psr_disable(vblank_work->stream, false);
+ 	} else if (link->psr_settings.psr_feature_enabled &&
+ 		allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+index 17e948753f59bd..c1212947a77b83 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
+ 
+ bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
+ 
+-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
++bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
+ 
+ int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index db56b0aa545454..98e88903d07d52 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -3638,7 +3638,7 @@ static int crc_win_update_set(void *data, u64 val)
+ 		/* PSR may write to OTG CRC window control register,
+ 		 * so close it before starting secure_display.
+ 		 */
+-		amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
++		amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
+ 
+ 		spin_lock_irq(&adev_to_drm(adev)->event_lock);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 32b025c92c63cf..3d624ae6d9bdfe 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1831,11 +1831,15 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ 			if (immediate_upstream_port) {
+ 				virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ 				virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+-				if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+-					DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+-							 "Max dsc compression can't fit into MST available bw\n");
+-					return DC_FAIL_BANDWIDTH_VALIDATE;
+-				}
++			} else {
++				/* For topology LCT 1 case - only one mstb*/
++				virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
++			}
++
++			if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
++				DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
++						 "Max dsc compression can't fit into MST available bw\n");
++				return DC_FAIL_BANDWIDTH_VALIDATE;
+ 			}
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index f40240aafe988e..45858bf1523d8f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -201,14 +201,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+  *
+  * Return: true if success
+  */
+-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
++bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
+ {
+-	unsigned int power_opt = 0;
+ 	bool psr_enable = false;
+ 
+ 	DRM_DEBUG_DRIVER("Disabling psr...\n");
+ 
+-	return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
++	return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
+ }
+ 
+ /*
+@@ -251,3 +250,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
+ 
+ 	return allow_active;
+ }
++
++/**
++ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
++ * @stream: stream state attached to the eDP link
++ *
++ * Waits for a max of 500ms for the eDP panel to exit PSR.
++ *
++ * Return: true if panel exited PSR, false otherwise.
++ */
++bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
++{
++	enum dc_psr_state psr_state = PSR_STATE0;
++	struct dc_link *link = stream->link;
++	int retry_count;
++
++	if (link == NULL)
++		return false;
++
++	for (retry_count = 0; retry_count <= 1000; retry_count++) {
++		dc_link_get_psr_state(link, &psr_state);
++		if (psr_state == PSR_STATE0)
++			break;
++		udelay(500);
++	}
++
++	if (retry_count == 1000)
++		return false;
++
++	return true;
++}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+index cd2d45c2b5ef01..e2366321a3c1bd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+@@ -34,8 +34,9 @@
+ void amdgpu_dm_set_psr_caps(struct dc_link *link);
+ void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
++bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
+ bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
+ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
++bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
+ 
+ #endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+index beed7adbbd43e0..47d785204f29cb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
+ 	.dcn_downspread_percent = 0.5,
+ 	.gpuvm_min_page_size_bytes = 4096,
+ 	.hostvm_min_page_size_bytes = 4096,
+-	.do_urgent_latency_adjustment = 1,
++	.do_urgent_latency_adjustment = 0,
+ 	.urgent_latency_adjustment_fabric_clock_component_us = 0,
+-	.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
++	.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ };
+ 
+ void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index cd2cf0ffc0f5cb..5a0a10144a73fe 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -2549,11 +2549,12 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ 					  &backend_workload_mask);
+ 
+ 	/* Add optimizations for SMU13.0.0/10.  Reuse the power saving profile */
+-	if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+-	     ((smu->adev->pm.fw_version == 0x004e6601) ||
+-	      (smu->adev->pm.fw_version >= 0x004e7300))) ||
+-	    (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+-	     smu->adev->pm.fw_version >= 0x00504500)) {
++	if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) &&
++	    ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
++	      ((smu->adev->pm.fw_version == 0x004e6601) ||
++	       (smu->adev->pm.fw_version >= 0x004e7300))) ||
++	     (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
++	      smu->adev->pm.fw_version >= 0x00504500))) {
+ 		workload_type = smu_cmn_to_asic_specific_index(smu,
+ 							       CMN2ASIC_MAPPING_WORKLOAD,
+ 							       PP_SMC_POWER_PROFILE_POWERSAVING);
+diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
+index 35557d98d7a700..3d16e9406dc6b1 100644
+--- a/drivers/gpu/drm/i915/display/intel_fb.c
++++ b/drivers/gpu/drm/i915/display/intel_fb.c
+@@ -1613,7 +1613,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
+ 		 * arithmetic related to alignment and offset calculation.
+ 		 */
+ 		if (is_gen12_ccs_cc_plane(&fb->base, i)) {
+-			if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
++			if (IS_ALIGNED(fb->base.offsets[i], 64))
+ 				continue;
+ 			else
+ 				return -EINVAL;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+index 09686d038d6053..7cc84472cecec2 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -387,11 +387,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+ 			if (f) {
+ 				struct nouveau_channel *prev;
+ 				bool must_wait = true;
++				bool local;
+ 
+ 				rcu_read_lock();
+ 				prev = rcu_dereference(f->channel);
+-				if (prev && (prev == chan ||
+-					     fctx->sync(f, prev, chan) == 0))
++				local = prev && prev->cli->drm == chan->cli->drm;
++				if (local && (prev == chan ||
++					      fctx->sync(f, prev, chan) == 0))
+ 					must_wait = false;
+ 				rcu_read_unlock();
+ 				if (!must_wait)
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+index 841e3b69fcaf3e..5a0c9b8a79f3ec 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+@@ -31,6 +31,7 @@ mcp77_sor = {
+ 	.state = g94_sor_state,
+ 	.power = nv50_sor_power,
+ 	.clock = nv50_sor_clock,
++	.bl = &nv50_sor_bl,
+ 	.hdmi = &g84_sor_hdmi,
+ 	.dp = &g94_sor_dp,
+ };
+diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
+index 04a6b8cc62ac67..3c0b7824c0be37 100644
+--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
++++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
+@@ -320,8 +320,7 @@ static void kunit_action_drm_mode_destroy(void *ptr)
+ }
+ 
+ /**
+- * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
+-					   for a KUnit test
++ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
+  * @test: The test context object
+  * @dev: DRM device
+  * @video_code: CEA VIC of the mode
+diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
+index 20bf33702c3c4f..da203045df9bec 100644
+--- a/drivers/gpu/drm/v3d/v3d_irq.c
++++ b/drivers/gpu/drm/v3d/v3d_irq.c
+@@ -108,6 +108,7 @@ v3d_irq(int irq, void *arg)
+ 		v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
+ 		trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+ 		dma_fence_signal(&fence->base);
++		v3d->bin_job = NULL;
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -118,6 +119,7 @@ v3d_irq(int irq, void *arg)
+ 		v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
+ 		trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+ 		dma_fence_signal(&fence->base);
++		v3d->render_job = NULL;
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -128,6 +130,7 @@ v3d_irq(int irq, void *arg)
+ 		v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
+ 		trace_v3d_csd_irq(&v3d->drm, fence->seqno);
+ 		dma_fence_signal(&fence->base);
++		v3d->csd_job = NULL;
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -165,6 +168,7 @@ v3d_hub_irq(int irq, void *arg)
+ 		v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
+ 		trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+ 		dma_fence_signal(&fence->base);
++		v3d->tfu_job = NULL;
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index a0e433fbcba67c..183cda50094cb7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -443,7 +443,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
+ 
+ 	if (params->pin)
+ 		ttm_bo_pin(&vmw_bo->tbo);
+-	ttm_bo_unreserve(&vmw_bo->tbo);
++	if (!params->keep_resv)
++		ttm_bo_unreserve(&vmw_bo->tbo);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+index 43b5439ec9f760..c21ba7ff773682 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+@@ -56,8 +56,9 @@ struct vmw_bo_params {
+ 	u32 domain;
+ 	u32 busy_domain;
+ 	enum ttm_bo_type bo_type;
+-	size_t size;
+ 	bool pin;
++	bool keep_resv;
++	size_t size;
+ 	struct dma_resv *resv;
+ 	struct sg_table *sg;
+ };
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 2825dd3149ed5c..2e84e1029732d3 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -401,7 +401,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
+ 		.busy_domain = VMW_BO_DOMAIN_SYS,
+ 		.bo_type = ttm_bo_type_kernel,
+ 		.size = PAGE_SIZE,
+-		.pin = true
++		.pin = true,
++		.keep_resv = true,
+ 	};
+ 
+ 	/*
+@@ -413,10 +414,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+-	ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
+-	BUG_ON(ret != 0);
+-	vmw_bo_pin_reserved(vbo, true);
+-
+ 	ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
+ 	if (likely(ret == 0)) {
+ 		result = ttm_kmap_obj_virtual(&map, &dummy);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+index b9857f37ca1ac6..ed5015ced3920a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+@@ -206,6 +206,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ 		.bo_type = ttm_bo_type_sg,
+ 		.size = attach->dmabuf->size,
+ 		.pin = false,
++		.keep_resv = true,
+ 		.resv = attach->dmabuf->resv,
+ 		.sg = table,
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 10d596cb4b4029..5f99f7437ae614 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -750,6 +750,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+ 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
+ 	struct vmw_bo *old_bo = NULL;
+ 	struct vmw_bo *new_bo = NULL;
++	struct ww_acquire_ctx ctx;
+ 	s32 hotspot_x, hotspot_y;
+ 	int ret;
+ 
+@@ -769,9 +770,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+ 	if (du->cursor_surface)
+ 		du->cursor_age = du->cursor_surface->snooper.age;
+ 
++	ww_acquire_init(&ctx, &reservation_ww_class);
++
+ 	if (!vmw_user_object_is_null(&old_vps->uo)) {
+ 		old_bo = vmw_user_object_buffer(&old_vps->uo);
+-		ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
++		ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
+ 		if (ret != 0)
+ 			return;
+ 	}
+@@ -779,9 +782,14 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+ 	if (!vmw_user_object_is_null(&vps->uo)) {
+ 		new_bo = vmw_user_object_buffer(&vps->uo);
+ 		if (old_bo != new_bo) {
+-			ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
+-			if (ret != 0)
++			ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
++			if (ret != 0) {
++				if (old_bo) {
++					ttm_bo_unreserve(&old_bo->tbo);
++					ww_acquire_fini(&ctx);
++				}
+ 				return;
++			}
+ 		} else {
+ 			new_bo = NULL;
+ 		}
+@@ -803,10 +811,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+ 						hotspot_x, hotspot_y);
+ 	}
+ 
+-	if (old_bo)
+-		ttm_bo_unreserve(&old_bo->tbo);
+ 	if (new_bo)
+ 		ttm_bo_unreserve(&new_bo->tbo);
++	if (old_bo)
++		ttm_bo_unreserve(&old_bo->tbo);
++
++	ww_acquire_fini(&ctx);
+ 
+ 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
+ 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+index a01ca3226d0af8..7fb1c88bcc475f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ 		.busy_domain = VMW_BO_DOMAIN_SYS,
+ 		.bo_type = ttm_bo_type_device,
+ 		.size = size,
+-		.pin = true
++		.pin = true,
++		.keep_resv = true,
+ 	};
+ 
+ 	if (!vmw_shader_id_ok(user_key, shader_type))
+@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ 	if (unlikely(ret != 0))
+ 		goto out;
+ 
+-	ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
+-	if (unlikely(ret != 0))
+-		goto no_reserve;
+-
+ 	/* Map and copy shader bytecode. */
+ 	ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
+ 	if (unlikely(ret != 0)) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+index 621d98b376bbbc..5553892d7c3e0d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+@@ -572,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
+ 		.busy_domain = domain,
+ 		.bo_type = ttm_bo_type_kernel,
+ 		.size = bo_size,
+-		.pin = true
++		.pin = true,
++		.keep_resv = true,
+ 	};
+ 
+ 	ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+-	ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
+-	BUG_ON(ret != 0);
+ 	ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
+ 	if (likely(ret == 0)) {
+ 		struct vmw_ttm_tt *vmw_tt =
+diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
+index 547919e8ce9e45..b11bc0f00dfda1 100644
+--- a/drivers/gpu/drm/xe/xe_hw_engine.c
++++ b/drivers/gpu/drm/xe/xe_hw_engine.c
+@@ -417,7 +417,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
+ 	 * Bspec: 72161
+ 	 */
+ 	const u8 mocs_write_idx = gt->mocs.uc_index;
+-	const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
++	const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
+ 				 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
+ 				 gt->mocs.wb_index : gt->mocs.uc_index;
+ 	u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 78823f53d2905d..6fc00d63b2857f 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1980,6 +1980,7 @@ static const struct xe_mmio_range xe2_oa_mux_regs[] = {
+ 	{ .start = 0x5194, .end = 0x5194 },	/* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
+ 	{ .start = 0x8704, .end = 0x8704 },	/* LMEM_LAT_MEASURE_MCFG_GRP */
+ 	{ .start = 0xB1BC, .end = 0xB1BC },	/* L3_BANK_LAT_MEASURE_LBCF_GFX */
++	{ .start = 0xD0E0, .end = 0xD0F4 },	/* VISACTL */
+ 	{ .start = 0xE18C, .end = 0xE18C },	/* SAMPLER_MODE */
+ 	{ .start = 0xE590, .end = 0xE590 },	/* TDL_LSC_LAT_MEASURE_TDL_GFX */
+ 	{ .start = 0x13000, .end = 0x137FC },	/* PES_0_PESL0 - PES_63_UPPER_PESL3 */
+diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
+index 7ca139e4b6aff0..6d5d4cb846daf3 100644
+--- a/drivers/hwmon/ltc2991.c
++++ b/drivers/hwmon/ltc2991.c
+@@ -125,7 +125,7 @@ static int ltc2991_get_curr(struct ltc2991_state *st, u32 reg, int channel,
+ 
+ 	/* Vx-Vy, 19.075uV/LSB */
+ 	*val = DIV_ROUND_CLOSEST(sign_extend32(reg_val, 14) * 19075,
+-				 st->r_sense_uohm[channel]);
++				 (s32)st->r_sense_uohm[channel]);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
+index 1c2cb12071b808..5acbfd7d088dd5 100644
+--- a/drivers/hwmon/tmp513.c
++++ b/drivers/hwmon/tmp513.c
+@@ -207,7 +207,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ 		*val = sign_extend32(regval,
+ 				     reg == TMP51X_SHUNT_CURRENT_RESULT ?
+ 				     16 - tmp51x_get_pga_shift(data) : 15);
+-		*val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
++		*val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms);
++
+ 		break;
+ 	case TMP51X_BUS_VOLTAGE_RESULT:
+ 	case TMP51X_BUS_VOLTAGE_H_LIMIT:
+@@ -223,7 +224,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ 	case TMP51X_BUS_CURRENT_RESULT:
+ 		// Current = (ShuntVoltage * CalibrationRegister) / 4096
+ 		*val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
+-		*val = DIV_ROUND_CLOSEST(*val, MILLI);
++		*val = DIV_ROUND_CLOSEST(*val, (long)MILLI);
+ 		break;
+ 	case TMP51X_LOCAL_TEMP_RESULT:
+ 	case TMP51X_REMOTE_TEMP_RESULT_1:
+@@ -263,7 +264,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
+ 		 * The user enter current value and we convert it to
+ 		 * voltage. 1lsb = 10uV
+ 		 */
+-		val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI);
++		val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI);
+ 		max_val = U16_MAX >> tmp51x_get_pga_shift(data);
+ 		regval = clamp_val(val, -max_val, max_val);
+ 		break;
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index 9267df38c2d0a1..3991224148214a 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -130,6 +130,8 @@
+ #define ID_P_PM_BLOCKED		BIT(31)
+ #define ID_P_MASK		GENMASK(31, 27)
+ 
++#define ID_SLAVE_NACK		BIT(0)
++
+ enum rcar_i2c_type {
+ 	I2C_RCAR_GEN1,
+ 	I2C_RCAR_GEN2,
+@@ -166,6 +168,7 @@ struct rcar_i2c_priv {
+ 	int irq;
+ 
+ 	struct i2c_client *host_notify_client;
++	u8 slave_flags;
+ };
+ 
+ #define rcar_i2c_priv_to_dev(p)		((p)->adap.dev.parent)
+@@ -655,6 +658,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+ {
+ 	u32 ssr_raw, ssr_filtered;
+ 	u8 value;
++	int ret;
+ 
+ 	ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff;
+ 	ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER);
+@@ -670,7 +674,10 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+ 			rcar_i2c_write(priv, ICRXTX, value);
+ 			rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR);
+ 		} else {
+-			i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++			ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++			if (ret)
++				priv->slave_flags |= ID_SLAVE_NACK;
++
+ 			rcar_i2c_read(priv, ICRXTX);	/* dummy read */
+ 			rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
+ 		}
+@@ -683,18 +690,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+ 	if (ssr_filtered & SSR) {
+ 		i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
+ 		rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
++		priv->slave_flags &= ~ID_SLAVE_NACK;
+ 		rcar_i2c_write(priv, ICSIER, SAR);
+ 		rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
+ 	}
+ 
+ 	/* master wants to write to us */
+ 	if (ssr_filtered & SDR) {
+-		int ret;
+-
+ 		value = rcar_i2c_read(priv, ICRXTX);
+ 		ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+-		/* Send NACK in case of error */
+-		rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0));
++		if (ret)
++			priv->slave_flags |= ID_SLAVE_NACK;
++
++		/* Send NACK in case of error, but it will come 1 byte late :( */
++		rcar_i2c_write(priv, ICSCR, SIE | SDBS |
++			       (priv->slave_flags & ID_SLAVE_NACK ? FNA : 0));
+ 		rcar_i2c_write(priv, ICSSR, ~SDR & 0xff);
+ 	}
+ 
+diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
+index f21475ae592183..0d54d0b5e32731 100644
+--- a/drivers/i2c/i2c-atr.c
++++ b/drivers/i2c/i2c-atr.c
+@@ -412,7 +412,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
+ 				dev_name(dev), ret);
+ 		break;
+ 
+-	case BUS_NOTIFY_DEL_DEVICE:
++	case BUS_NOTIFY_REMOVED_DEVICE:
+ 		i2c_atr_detach_client(client->adapter, client);
+ 		break;
+ 
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 7c810893bfa332..75d30861ffe21a 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -1562,6 +1562,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
+ 	res = device_add(&adap->dev);
+ 	if (res) {
+ 		pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
++		put_device(&adap->dev);
+ 		goto out_list;
+ 	}
+ 
+diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
+index 9fe3150378e863..7ae0c7902f670b 100644
+--- a/drivers/i2c/i2c-slave-testunit.c
++++ b/drivers/i2c/i2c-slave-testunit.c
+@@ -38,6 +38,7 @@ enum testunit_regs {
+ 
+ enum testunit_flags {
+ 	TU_FLAG_IN_PROCESS,
++	TU_FLAG_NACK,
+ };
+ 
+ struct testunit_data {
+@@ -90,8 +91,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ 
+ 	switch (event) {
+ 	case I2C_SLAVE_WRITE_REQUESTED:
+-		if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
+-			return -EBUSY;
++		if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
++			ret = -EBUSY;
++			break;
++		}
+ 
+ 		memset(tu->regs, 0, TU_NUM_REGS);
+ 		tu->reg_idx = 0;
+@@ -99,8 +102,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ 		break;
+ 
+ 	case I2C_SLAVE_WRITE_RECEIVED:
+-		if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
+-			return -EBUSY;
++		if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
++			ret = -EBUSY;
++			break;
++		}
+ 
+ 		if (tu->reg_idx < TU_NUM_REGS)
+ 			tu->regs[tu->reg_idx] = *val;
+@@ -129,6 +134,8 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ 		 * here because we still need them in the workqueue!
+ 		 */
+ 		tu->reg_idx = 0;
++
++		clear_bit(TU_FLAG_NACK, &tu->flags);
+ 		break;
+ 
+ 	case I2C_SLAVE_READ_PROCESSED:
+@@ -151,6 +158,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ 		break;
+ 	}
+ 
++	/* If an error occurred somewhen, we NACK everything until next STOP */
++	if (ret)
++		set_bit(TU_FLAG_NACK, &tu->flags);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+index 7e2686b606c04d..cec7f3447e1935 100644
+--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+@@ -261,7 +261,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
+ 	pm_runtime_no_callbacks(&pdev->dev);
+ 
+ 	/* switch to first parent as active master */
+-	i2c_demux_activate_master(priv, 0);
++	err = i2c_demux_activate_master(priv, 0);
++	if (err)
++		goto err_rollback;
+ 
+ 	err = device_create_file(&pdev->dev, &dev_attr_available_masters);
+ 	if (err)
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index b20cffcc3e7d2d..14e434ff51edea 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -2269,6 +2269,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
+ 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
+ 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
++	qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
+ 	qp_attr->rq_psn = qplib_qp->rq.psn;
+ 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
+ 	qp_attr->sq_psn = qplib_qp->sq.psn;
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+index b789e47ec97a85..9cd8f770d1b27e 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+@@ -264,6 +264,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
+ int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+ void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+ 
++static inline u32 __to_ib_port_num(u16 port_id)
++{
++	return (u32)port_id + 1;
++}
+ 
+ unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
+ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 828e2f9808012b..613b5fc70e13ea 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -1479,6 +1479,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
+ 	memcpy(qp->smac, sb->src_mac, 6);
+ 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
++	qp->port_id = le16_to_cpu(sb->port_id);
+ bail:
+ 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ 			  sbuf.sb, sbuf.dma_addr);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index d8c71c024613bf..6f02954eb1429f 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -298,6 +298,7 @@ struct bnxt_qplib_qp {
+ 	u32				dest_qpn;
+ 	u8				smac[6];
+ 	u16				vlan_id;
++	u16				port_id;
+ 	u8				nw_type;
+ 	struct bnxt_qplib_ah		ah;
+ 
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index d9b6ec844cdda0..0d3a889b1905c7 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1961,7 +1961,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+ 	if (!is_v4(its_dev->its))
+ 		return -EINVAL;
+ 
+-	guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
++	guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
+ 
+ 	/* Unmap request? */
+ 	if (!info)
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index b0bfb61539c202..8fdee511bc0f2c 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -1522,7 +1522,7 @@ static int gic_retrigger(struct irq_data *data)
+ static int gic_cpu_pm_notifier(struct notifier_block *self,
+ 			       unsigned long cmd, void *v)
+ {
+-	if (cmd == CPU_PM_EXIT) {
++	if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) {
+ 		if (gic_dist_security_disabled())
+ 			gic_enable_redist(true);
+ 		gic_cpu_sys_reg_enable();
+diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
+index 1eeb0d0156ce9e..0ee7b6b71f5fa5 100644
+--- a/drivers/irqchip/irqchip.c
++++ b/drivers/irqchip/irqchip.c
+@@ -35,11 +35,10 @@ void __init irqchip_init(void)
+ int platform_irqchip_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *np = pdev->dev.of_node;
+-	struct device_node *par_np = of_irq_find_parent(np);
++	struct device_node *par_np __free(device_node) = of_irq_find_parent(np);
+ 	of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
+ 
+ 	if (!irq_init_cb) {
+-		of_node_put(par_np);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -55,7 +54,6 @@ int platform_irqchip_probe(struct platform_device *pdev)
+ 	 * interrupt controller can check for specific domains as necessary.
+ 	 */
+ 	if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
+-		of_node_put(par_np);
+ 		return -EPROBE_DEFER;
+ 	}
+ 
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 8c57df44c40fe8..9d6e85bf227b92 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -89,7 +89,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor,
+ 		op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+ 
+ 	if (op->dummy.nbytes)
+-		op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto);
++		op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+ 
+ 	if (op->data.nbytes)
+ 		op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 6a716337f48be1..268399dfcf22f0 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -923,7 +923,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
+ 
+ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+ {
+-	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+ 	struct xgbe_phy_data *phy_data = pdata->phy_data;
+ 	unsigned int phy_id = phy_data->phydev->phy_id;
+ 
+@@ -945,14 +944,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+ 	phy_write(phy_data->phydev, 0x04, 0x0d01);
+ 	phy_write(phy_data->phydev, 0x00, 0x9140);
+ 
+-	linkmode_set_bit_array(phy_10_100_features_array,
+-			       ARRAY_SIZE(phy_10_100_features_array),
+-			       supported);
+-	linkmode_set_bit_array(phy_gbit_features_array,
+-			       ARRAY_SIZE(phy_gbit_features_array),
+-			       supported);
+-
+-	linkmode_copy(phy_data->phydev->supported, supported);
++	linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
+ 
+ 	phy_support_asym_pause(phy_data->phydev);
+ 
+@@ -964,7 +956,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+ 
+ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+ {
+-	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+ 	struct xgbe_phy_data *phy_data = pdata->phy_data;
+ 	struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ 	unsigned int phy_id = phy_data->phydev->phy_id;
+@@ -1028,13 +1019,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+ 	reg = phy_read(phy_data->phydev, 0x00);
+ 	phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
+ 
+-	linkmode_set_bit_array(phy_10_100_features_array,
+-			       ARRAY_SIZE(phy_10_100_features_array),
+-			       supported);
+-	linkmode_set_bit_array(phy_gbit_features_array,
+-			       ARRAY_SIZE(phy_gbit_features_array),
+-			       supported);
+-	linkmode_copy(phy_data->phydev->supported, supported);
++	linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
+ 	phy_support_asym_pause(phy_data->phydev);
+ 
+ 	netif_dbg(pdata, drv, pdata->netdev,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index c255445e97f3c5..603e9c968c44bd 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4558,7 +4558,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
+ /* Changing allocation mode of RX rings.
+  * TODO: Update when extending xdp_rxq_info to support allocation modes.
+  */
+-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
++static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+ {
+ 	struct net_device *dev = bp->dev;
+ 
+@@ -4579,15 +4579,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+ 			bp->rx_skb_func = bnxt_rx_page_skb;
+ 		}
+ 		bp->rx_dir = DMA_BIDIRECTIONAL;
+-		/* Disable LRO or GRO_HW */
+-		netdev_update_features(dev);
+ 	} else {
+ 		dev->max_mtu = bp->max_mtu;
+ 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
+ 		bp->rx_dir = DMA_FROM_DEVICE;
+ 		bp->rx_skb_func = bnxt_rx_skb;
+ 	}
+-	return 0;
++}
++
++void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
++{
++	__bnxt_set_rx_skb_mode(bp, page_mode);
++
++	if (!page_mode) {
++		int rx, tx;
++
++		bnxt_get_max_rings(bp, &rx, &tx, true);
++		if (rx > 1) {
++			bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
++			bp->dev->hw_features |= NETIF_F_LRO;
++		}
++	}
++
++	/* Update LRO and GRO_HW availability */
++	netdev_update_features(bp->dev);
+ }
+ 
+ static void bnxt_free_vnic_attributes(struct bnxt *bp)
+@@ -15909,7 +15924,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (bp->max_fltr < BNXT_MAX_FLTR)
+ 		bp->max_fltr = BNXT_MAX_FLTR;
+ 	bnxt_init_l2_fltr_tbl(bp);
+-	bnxt_set_rx_skb_mode(bp, false);
++	__bnxt_set_rx_skb_mode(bp, false);
+ 	bnxt_set_tpa_flags(bp);
+ 	bnxt_set_ring_params(bp);
+ 	bnxt_rdma_aux_device_init(bp);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 9e05704d94450e..bee645f58d0bde 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -2796,7 +2796,7 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
+ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
+ void bnxt_set_tpa_flags(struct bnxt *bp);
+ void bnxt_set_ring_params(struct bnxt *);
+-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
++void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+ void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+ void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index f88b641533fcc5..dc51dce209d5f0 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -422,15 +422,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
+ 		bnxt_set_rx_skb_mode(bp, true);
+ 		xdp_features_set_redirect_target(dev, true);
+ 	} else {
+-		int rx, tx;
+-
+ 		xdp_features_clear_redirect_target(dev);
+ 		bnxt_set_rx_skb_mode(bp, false);
+-		bnxt_get_max_rings(bp, &rx, &tx, true);
+-		if (rx > 1) {
+-			bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+-			bp->dev->hw_features |= NETIF_F_LRO;
+-		}
+ 	}
+ 	bp->tx_nr_rings_xdp = tx_xdp;
+ 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 9d9fcec41488e3..49d1748e0c043d 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1591,19 +1591,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
+ 		fec_enet_tx_queue(ndev, i, budget);
+ }
+ 
+-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
++static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+ 				struct bufdesc *bdp, int index)
+ {
+ 	struct page *new_page;
+ 	dma_addr_t phys_addr;
+ 
+ 	new_page = page_pool_dev_alloc_pages(rxq->page_pool);
+-	WARN_ON(!new_page);
+-	rxq->rx_skb_info[index].page = new_page;
++	if (unlikely(!new_page))
++		return -ENOMEM;
+ 
++	rxq->rx_skb_info[index].page = new_page;
+ 	rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ 	phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
+ 	bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
++
++	return 0;
+ }
+ 
+ static u32
+@@ -1698,6 +1701,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ 	int cpu = smp_processor_id();
+ 	struct xdp_buff xdp;
+ 	struct page *page;
++	__fec32 cbd_bufaddr;
+ 	u32 sub_len = 4;
+ 
+ #if !defined(CONFIG_M5272)
+@@ -1766,12 +1770,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ 
+ 		index = fec_enet_get_bd_index(bdp, &rxq->bd);
+ 		page = rxq->rx_skb_info[index].page;
++		cbd_bufaddr = bdp->cbd_bufaddr;
++		if (fec_enet_update_cbd(rxq, bdp, index)) {
++			ndev->stats.rx_dropped++;
++			goto rx_processing_done;
++		}
++
+ 		dma_sync_single_for_cpu(&fep->pdev->dev,
+-					fec32_to_cpu(bdp->cbd_bufaddr),
++					fec32_to_cpu(cbd_bufaddr),
+ 					pkt_len,
+ 					DMA_FROM_DEVICE);
+ 		prefetch(page_address(page));
+-		fec_enet_update_cbd(rxq, bdp, index);
+ 
+ 		if (xdp_prog) {
+ 			xdp_buff_clear_frags_flag(&xdp);
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index d6f80da30decf4..558cda577191d6 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -1047,5 +1047,10 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
+ 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ }
+ 
++static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
++{
++	return hw->ptp.phy_model;
++}
++
+ extern const struct xdp_metadata_ops ice_xdp_md_ops;
+ #endif /* _ICE_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
+index ad84d8ad49a63f..f3e195974a8efa 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
+@@ -40,11 +40,17 @@ static struct ice_adapter *ice_adapter_new(void)
+ 	spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ 	refcount_set(&adapter->refcount, 1);
+ 
++	mutex_init(&adapter->ports.lock);
++	INIT_LIST_HEAD(&adapter->ports.ports);
++
+ 	return adapter;
+ }
+ 
+ static void ice_adapter_free(struct ice_adapter *adapter)
+ {
++	WARN_ON(!list_empty(&adapter->ports.ports));
++	mutex_destroy(&adapter->ports.lock);
++
+ 	kfree(adapter);
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
+index 9d11014ec02ff2..e233225848b384 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
+@@ -4,22 +4,42 @@
+ #ifndef _ICE_ADAPTER_H_
+ #define _ICE_ADAPTER_H_
+ 
++#include <linux/types.h>
+ #include <linux/spinlock_types.h>
+ #include <linux/refcount_types.h>
+ 
+ struct pci_dev;
++struct ice_pf;
++
++/**
++ * struct ice_port_list - data used to store the list of adapter ports
++ *
++ * This structure contains data used to maintain a list of adapter ports
++ *
++ * @ports: list of ports
++ * @lock: protect access to the ports list
++ */
++struct ice_port_list {
++	struct list_head ports;
++	/* To synchronize the ports list operations */
++	struct mutex lock;
++};
+ 
+ /**
+  * struct ice_adapter - PCI adapter resources shared across PFs
+  * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
+  *                        register of the PTP clock.
+  * @refcount: Reference count. struct ice_pf objects hold the references.
++ * @ctrl_pf: Control PF of the adapter
++ * @ports: Ports list
+  */
+ struct ice_adapter {
++	refcount_t refcount;
+ 	/* For access to the GLTSYN_TIME register */
+ 	spinlock_t ptp_gltsyn_time_lock;
+ 
+-	refcount_t refcount;
++	struct ice_pf *ctrl_pf;
++	struct ice_port_list ports;
+ };
+ 
+ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
+diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+index 79a6edd0be0ec4..80f3dfd2712430 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+@@ -1648,6 +1648,7 @@ struct ice_aqc_get_port_options_elem {
+ #define ICE_AQC_PORT_OPT_MAX_LANE_25G	5
+ #define ICE_AQC_PORT_OPT_MAX_LANE_50G	6
+ #define ICE_AQC_PORT_OPT_MAX_LANE_100G	7
++#define ICE_AQC_PORT_OPT_MAX_LANE_200G	8
+ 
+ 	u8 global_scid[2];
+ 	u8 phy_scid[2];
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
+index f1324e25b2af1c..068a467de1d56d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -4074,6 +4074,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ }
+ 
++/**
++ * ice_get_phy_lane_number - Get PHY lane number for current adapter
++ * @hw: pointer to the hw struct
++ *
++ * Return: PHY lane number on success, negative error code otherwise.
++ */
++int ice_get_phy_lane_number(struct ice_hw *hw)
++{
++	struct ice_aqc_get_port_options_elem *options;
++	unsigned int lport = 0;
++	unsigned int lane;
++	int err;
++
++	options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
++	if (!options)
++		return -ENOMEM;
++
++	for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
++		u8 options_count = ICE_AQC_PORT_OPT_MAX;
++		u8 speed, active_idx, pending_idx;
++		bool active_valid, pending_valid;
++
++		err = ice_aq_get_port_options(hw, options, &options_count, lane,
++					      true, &active_idx, &active_valid,
++					      &pending_idx, &pending_valid);
++		if (err)
++			goto err;
++
++		if (!active_valid)
++			continue;
++
++		speed = options[active_idx].max_lane_speed;
++		/* If we don't get speed for this lane, it's unoccupied */
++		if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
++			continue;
++
++		if (hw->pf_id == lport) {
++			kfree(options);
++			return lane;
++		}
++
++		lport++;
++	}
++
++	/* PHY lane not found */
++	err = -ENXIO;
++err:
++	kfree(options);
++	return err;
++}
++
+ /**
+  * ice_aq_sff_eeprom
+  * @hw: pointer to the HW struct
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
+index 27208a60cece51..fe6f88cfd94866 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.h
++++ b/drivers/net/ethernet/intel/ice/ice_common.h
+@@ -193,6 +193,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
+ int
+ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ 		       u8 new_option);
++int ice_get_phy_lane_number(struct ice_hw *hw);
+ int
+ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 8f2e758c394277..45eefe22fb5b73 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -1144,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
+ 	if (link_up == old_link && link_speed == old_link_speed)
+ 		return 0;
+ 
+-	ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
++	ice_ptp_link_change(pf, link_up);
+ 
+ 	if (ice_is_dcb_active(pf)) {
+ 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+@@ -6744,7 +6744,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ 		ice_print_link_msg(vsi, true);
+ 		netif_tx_start_all_queues(vsi->netdev);
+ 		netif_carrier_on(vsi->netdev);
+-		ice_ptp_link_change(pf, pf->hw.pf_id, true);
++		ice_ptp_link_change(pf, true);
+ 	}
+ 
+ 	/* Perform an initial read of the statistics registers now to
+@@ -7214,7 +7214,7 @@ int ice_down(struct ice_vsi *vsi)
+ 
+ 	if (vsi->netdev) {
+ 		vlan_err = ice_vsi_del_vlan_zero(vsi);
+-		ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
++		ice_ptp_link_change(vsi->back, false);
+ 		netif_carrier_off(vsi->netdev);
+ 		netif_tx_disable(vsi->netdev);
+ 	}
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index ef2e858f49bb0e..7c6f81beaee460 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -16,6 +16,18 @@ static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
+ 	{ "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+ };
+ 
++static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
++{
++	return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
++}
++
++static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
++{
++	struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
++
++	return !ctrl_pf ? NULL : &ctrl_pf->ptp;
++}
++
+ /**
+  * ice_get_sma_config_e810t
+  * @hw: pointer to the hw struct
+@@ -800,8 +812,8 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
+ 	struct ice_ptp_port *port;
+ 	unsigned int i;
+ 
+-	mutex_lock(&pf->ptp.ports_owner.lock);
+-	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
++	mutex_lock(&pf->adapter->ports.lock);
++	list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
+ 		struct ice_ptp_tx *tx = &port->tx;
+ 
+ 		if (!tx || !tx->init)
+@@ -809,7 +821,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
+ 
+ 		ice_ptp_process_tx_tstamp(tx);
+ 	}
+-	mutex_unlock(&pf->ptp.ports_owner.lock);
++	mutex_unlock(&pf->adapter->ports.lock);
+ 
+ 	for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
+ 		u64 tstamp_ready;
+@@ -974,7 +986,7 @@ ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
+ {
+ 	struct ice_ptp_port *port;
+ 
+-	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
++	list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
+ 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
+ }
+ 
+@@ -1363,7 +1375,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
+ 
+ 	mutex_lock(&ptp_port->ps_lock);
+ 
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		err = ice_stop_phy_timer_eth56g(hw, port, true);
+ 		break;
+@@ -1409,7 +1421,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
+ 
+ 	mutex_lock(&ptp_port->ps_lock);
+ 
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		err = ice_start_phy_timer_eth56g(hw, port);
+ 		break;
+@@ -1454,10 +1466,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
+ /**
+  * ice_ptp_link_change - Reconfigure PTP after link status change
+  * @pf: Board private structure
+- * @port: Port for which the PHY start is set
+  * @linkup: Link is up or down
+  */
+-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
++void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
+ {
+ 	struct ice_ptp_port *ptp_port;
+ 	struct ice_hw *hw = &pf->hw;
+@@ -1465,14 +1476,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+ 	if (pf->ptp.state != ICE_PTP_READY)
+ 		return;
+ 
+-	if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
+-		return;
+-
+ 	ptp_port = &pf->ptp.port;
+-	if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
+-		port *= 2;
+-	if (WARN_ON_ONCE(ptp_port->port_num != port))
+-		return;
+ 
+ 	/* Update cached link status for this port immediately */
+ 	ptp_port->link_up = linkup;
+@@ -1480,8 +1484,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+ 	/* Skip HW writes if reset is in progress */
+ 	if (pf->hw.reset_ongoing)
+ 		return;
+-
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_E810:
+ 		/* Do not reconfigure E810 PHY */
+ 		return;
+@@ -1514,7 +1517,7 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
+ 
+ 	ice_ptp_reset_ts_memory(hw);
+ 
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G: {
+ 		int port;
+ 
+@@ -1553,7 +1556,7 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
+ 	case ICE_PHY_UNSUP:
+ 	default:
+ 		dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
+-			 hw->ptp.phy_model);
++			 ice_get_phy_model(hw));
+ 		return -EOPNOTSUPP;
+ 	}
+ }
+@@ -1575,10 +1578,10 @@ static void ice_ptp_restart_all_phy(struct ice_pf *pf)
+ {
+ 	struct list_head *entry;
+ 
+-	list_for_each(entry, &pf->ptp.ports_owner.ports) {
++	list_for_each(entry, &pf->adapter->ports.ports) {
+ 		struct ice_ptp_port *port = list_entry(entry,
+ 						       struct ice_ptp_port,
+-						       list_member);
++						       list_node);
+ 
+ 		if (port->link_up)
+ 			ice_ptp_port_phy_restart(port);
+@@ -2059,7 +2062,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
+ 	/* For Vernier mode on E82X, we need to recalibrate after new settime.
+ 	 * Start with marking timestamps as invalid.
+ 	 */
+-	if (hw->ptp.phy_model == ICE_PHY_E82X) {
++	if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
+ 		err = ice_ptp_clear_phy_offset_ready_e82x(hw);
+ 		if (err)
+ 			dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
+@@ -2083,7 +2086,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
+ 	ice_ptp_enable_all_clkout(pf);
+ 
+ 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
+-	if (hw->ptp.phy_model == ICE_PHY_E82X)
++	if (ice_get_phy_model(hw) == ICE_PHY_E82X)
+ 		ice_ptp_restart_all_phy(pf);
+ exit:
+ 	if (err) {
+@@ -2895,6 +2898,50 @@ void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+ 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
+ }
+ 
++static bool ice_is_primary(struct ice_hw *hw)
++{
++	return ice_is_e825c(hw) && ice_is_dual(hw) ?
++		!!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
++}
++
++static int ice_ptp_setup_adapter(struct ice_pf *pf)
++{
++	if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
++		return -EPERM;
++
++	pf->adapter->ctrl_pf = pf;
++
++	return 0;
++}
++
++static int ice_ptp_setup_pf(struct ice_pf *pf)
++{
++	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
++	struct ice_ptp *ptp = &pf->ptp;
++
++	if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
++		return -ENODEV;
++
++	INIT_LIST_HEAD(&ptp->port.list_node);
++	mutex_lock(&pf->adapter->ports.lock);
++
++	list_add(&ptp->port.list_node,
++		 &pf->adapter->ports.ports);
++	mutex_unlock(&pf->adapter->ports.lock);
++
++	return 0;
++}
++
++static void ice_ptp_cleanup_pf(struct ice_pf *pf)
++{
++	struct ice_ptp *ptp = &pf->ptp;
++
++	if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
++		mutex_lock(&pf->adapter->ports.lock);
++		list_del(&ptp->port.list_node);
++		mutex_unlock(&pf->adapter->ports.lock);
++	}
++}
+ /**
+  * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
+  * @aux_dev: auxiliary device to get the auxiliary PF for
+@@ -2946,9 +2993,9 @@ static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
+ 	if (WARN_ON(!owner_pf))
+ 		return -ENODEV;
+ 
+-	INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
++	INIT_LIST_HEAD(&aux_pf->ptp.port.list_node);
+ 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
+-	list_add(&aux_pf->ptp.port.list_member,
++	list_add(&aux_pf->ptp.port.list_node,
+ 		 &owner_pf->ptp.ports_owner.ports);
+ 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+ 
+@@ -2965,7 +3012,7 @@ static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
+ 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+ 
+ 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
+-	list_del(&aux_pf->ptp.port.list_member);
++	list_del(&aux_pf->ptp.port.list_node);
+ 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+ }
+ 
+@@ -3025,7 +3072,7 @@ ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
+  * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
+  * @pf: Board private structure
+  */
+-static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
++static int __always_unused ice_ptp_register_auxbus_driver(struct ice_pf *pf)
+ {
+ 	struct auxiliary_driver *aux_driver;
+ 	struct ice_ptp *ptp;
+@@ -3068,7 +3115,7 @@ static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
+  * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
+  * @pf: Board private structure
+  */
+-static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
++static void __always_unused ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
+ {
+ 	struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
+ 
+@@ -3087,15 +3134,12 @@ static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
+  */
+ int ice_ptp_clock_index(struct ice_pf *pf)
+ {
+-	struct auxiliary_device *aux_dev;
+-	struct ice_pf *owner_pf;
++	struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
+ 	struct ptp_clock *clock;
+ 
+-	aux_dev = &pf->ptp.port.aux_dev;
+-	owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+-	if (!owner_pf)
++	if (!ctrl_ptp)
+ 		return -1;
+-	clock = owner_pf->ptp.clock;
++	clock = ctrl_ptp->clock;
+ 
+ 	return clock ? ptp_clock_index(clock) : -1;
+ }
+@@ -3155,15 +3199,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
+ 	if (err)
+ 		goto err_clk;
+ 
+-	err = ice_ptp_register_auxbus_driver(pf);
+-	if (err) {
+-		dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
+-		goto err_aux;
+-	}
+-
+ 	return 0;
+-err_aux:
+-	ptp_clock_unregister(pf->ptp.clock);
+ err_clk:
+ 	pf->ptp.clock = NULL;
+ err_exit:
+@@ -3209,7 +3245,7 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
+ 
+ 	mutex_init(&ptp_port->ps_lock);
+ 
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
+ 					      ptp_port->port_num);
+@@ -3239,7 +3275,7 @@ static void ice_ptp_release_auxbus_device(struct device *dev)
+  * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
+  * @pf: Board private structure
+  */
+-static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
++static __always_unused int ice_ptp_create_auxbus_device(struct ice_pf *pf)
+ {
+ 	struct auxiliary_device *aux_dev;
+ 	struct ice_ptp *ptp;
+@@ -3286,7 +3322,7 @@ static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
+  * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
+  * @pf: Board private structure
+  */
+-static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
++static __always_unused void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
+ {
+ 	struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
+ 
+@@ -3307,7 +3343,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
+  */
+ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
+ {
+-	switch (pf->hw.ptp.phy_model) {
++	switch (ice_get_phy_model(&pf->hw)) {
+ 	case ICE_PHY_E82X:
+ 		/* E822 based PHY has the clock owner process the interrupt
+ 		 * for all ports.
+@@ -3339,10 +3375,17 @@ void ice_ptp_init(struct ice_pf *pf)
+ {
+ 	struct ice_ptp *ptp = &pf->ptp;
+ 	struct ice_hw *hw = &pf->hw;
+-	int err;
++	int lane_num, err;
+ 
+ 	ptp->state = ICE_PTP_INITIALIZING;
+ 
++	lane_num = ice_get_phy_lane_number(hw);
++	if (lane_num < 0) {
++		err = lane_num;
++		goto err_exit;
++	}
++
++	ptp->port.port_num = (u8)lane_num;
+ 	ice_ptp_init_hw(hw);
+ 
+ 	ice_ptp_init_tx_interrupt_mode(pf);
+@@ -3350,19 +3393,22 @@ void ice_ptp_init(struct ice_pf *pf)
+ 	/* If this function owns the clock hardware, it must allocate and
+ 	 * configure the PTP clock device to represent it.
+ 	 */
+-	if (ice_pf_src_tmr_owned(pf)) {
++	if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
++		err = ice_ptp_setup_adapter(pf);
++		if (err)
++			goto err_exit;
+ 		err = ice_ptp_init_owner(pf);
+ 		if (err)
+-			goto err;
++			goto err_exit;
+ 	}
+ 
+-	ptp->port.port_num = hw->pf_id;
+-	if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
+-		ptp->port.port_num = hw->pf_id * 2;
++	err = ice_ptp_setup_pf(pf);
++	if (err)
++		goto err_exit;
+ 
+ 	err = ice_ptp_init_port(pf, &ptp->port);
+ 	if (err)
+-		goto err;
++		goto err_exit;
+ 
+ 	/* Start the PHY timestamping block */
+ 	ice_ptp_reset_phy_timestamping(pf);
+@@ -3370,20 +3416,16 @@ void ice_ptp_init(struct ice_pf *pf)
+ 	/* Configure initial Tx interrupt settings */
+ 	ice_ptp_cfg_tx_interrupt(pf);
+ 
+-	err = ice_ptp_create_auxbus_device(pf);
+-	if (err)
+-		goto err;
+-
+ 	ptp->state = ICE_PTP_READY;
+ 
+ 	err = ice_ptp_init_work(pf, ptp);
+ 	if (err)
+-		goto err;
++		goto err_exit;
+ 
+ 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
+ 	return;
+ 
+-err:
++err_exit:
+ 	/* If we registered a PTP clock, release it */
+ 	if (pf->ptp.clock) {
+ 		ptp_clock_unregister(ptp->clock);
+@@ -3410,7 +3452,7 @@ void ice_ptp_release(struct ice_pf *pf)
+ 	/* Disable timestamping for both Tx and Rx */
+ 	ice_ptp_disable_timestamp_mode(pf);
+ 
+-	ice_ptp_remove_auxbus_device(pf);
++	ice_ptp_cleanup_pf(pf);
+ 
+ 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+ 
+@@ -3425,9 +3467,6 @@ void ice_ptp_release(struct ice_pf *pf)
+ 		pf->ptp.kworker = NULL;
+ 	}
+ 
+-	if (ice_pf_src_tmr_owned(pf))
+-		ice_ptp_unregister_auxbus_driver(pf);
+-
+ 	if (!pf->ptp.clock)
+ 		return;
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
+index 2db2257a0fb2f3..f1cfa6aa4e76bf 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
+@@ -169,7 +169,7 @@ struct ice_ptp_tx {
+  * ready for PTP functionality. It is used to track the port initialization
+  * and determine when the port's PHY offset is valid.
+  *
+- * @list_member: list member structure of auxiliary device
++ * @list_node: list member structure
+  * @tx: Tx timestamp tracking for this port
+  * @aux_dev: auxiliary device associated with this port
+  * @ov_work: delayed work task for tracking when PHY offset is valid
+@@ -179,7 +179,7 @@ struct ice_ptp_tx {
+  * @port_num: the port number this structure represents
+  */
+ struct ice_ptp_port {
+-	struct list_head list_member;
++	struct list_head list_node;
+ 	struct ice_ptp_tx tx;
+ 	struct auxiliary_device aux_dev;
+ 	struct kthread_delayed_work ov_work;
+@@ -205,6 +205,7 @@ enum ice_ptp_tx_interrupt {
+  * @ports: list of porst handled by this port owner
+  * @lock: protect access to ports list
+  */
++
+ struct ice_ptp_port_owner {
+ 	struct auxiliary_driver aux_driver;
+ 	struct list_head ports;
+@@ -331,7 +332,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ 			       enum ice_reset_req reset_type);
+ void ice_ptp_init(struct ice_pf *pf);
+ void ice_ptp_release(struct ice_pf *pf);
+-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
++void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
+ #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+ static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+ {
+@@ -379,7 +380,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ }
+ static inline void ice_ptp_init(struct ice_pf *pf) { }
+ static inline void ice_ptp_release(struct ice_pf *pf) { }
+-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
++static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
+ {
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+index 3005dd252a1026..bdb1020147d1c2 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+@@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
+ 		.rx_offset = {
+ 			.serdes = 0xffffeb27, /* -10.42424 */
+ 			.no_fec = 0xffffcccd, /* -25.6 */
+-			.fc = 0xfffe0014, /* -255.96 */
++			.fc = 0xfffc557b, /* -469.26 */
+ 			.sfd = 0x4a4, /* 2.32 */
+ 			.bs_ds = 0x32 /* 0.0969697 */
+ 		}
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+index 3816e45b6ab44a..7190fde16c8681 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+@@ -804,7 +804,7 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
+ 	/* Certain hardware families share the same register values for the
+ 	 * port register and source timer register.
+ 	 */
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_E810:
+ 		return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
+ 	default:
+@@ -877,31 +877,46 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
+  * The following functions operate on devices with the ETH 56G PHY.
+  */
+ 
++/**
++ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
++ * @hw: pointer to the HW struct
++ * @port: destination port
++ *
++ * Return: destination sideband queue PHY device.
++ */
++static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
++						      u8 port)
++{
++	/* On a single complex E825, PHY 0 is always destination device phy_0
++	 * and PHY 1 is phy_0_peer.
++	 */
++	if (port >= hw->ptp.ports_per_phy)
++		return eth56g_phy_1;
++	else
++		return eth56g_phy_0;
++}
++
+ /**
+  * ice_write_phy_eth56g - Write a PHY port register
+  * @hw: pointer to the HW struct
+- * @phy_idx: PHY index
++ * @port: destination port
+  * @addr: PHY register address
+  * @val: Value to write
+  *
+  * Return: 0 on success, other error codes when failed to write to PHY
+  */
+-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+-				u32 val)
++static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
+ {
+-	struct ice_sbq_msg_input phy_msg;
++	struct ice_sbq_msg_input msg = {
++		.dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
++		.opcode = ice_sbq_msg_wr,
++		.msg_addr_low = lower_16_bits(addr),
++		.msg_addr_high = upper_16_bits(addr),
++		.data = val
++	};
+ 	int err;
+ 
+-	phy_msg.opcode = ice_sbq_msg_wr;
+-
+-	phy_msg.msg_addr_low = lower_16_bits(addr);
+-	phy_msg.msg_addr_high = upper_16_bits(addr);
+-
+-	phy_msg.data = val;
+-	phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
+-
+-	err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
+-
++	err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ 	if (err)
+ 		ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ 			  err);
+@@ -912,41 +927,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+ /**
+  * ice_read_phy_eth56g - Read a PHY port register
+  * @hw: pointer to the HW struct
+- * @phy_idx: PHY index
++ * @port: destination port
+  * @addr: PHY register address
+  * @val: Value to write
+  *
+  * Return: 0 on success, other error codes when failed to read from PHY
+  */
+-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+-			       u32 *val)
++static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
+ {
+-	struct ice_sbq_msg_input phy_msg;
++	struct ice_sbq_msg_input msg = {
++		.dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
++		.opcode = ice_sbq_msg_rd,
++		.msg_addr_low = lower_16_bits(addr),
++		.msg_addr_high = upper_16_bits(addr)
++	};
+ 	int err;
+ 
+-	phy_msg.opcode = ice_sbq_msg_rd;
+-
+-	phy_msg.msg_addr_low = lower_16_bits(addr);
+-	phy_msg.msg_addr_high = upper_16_bits(addr);
+-
+-	phy_msg.data = 0;
+-	phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
+-
+-	err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
+-	if (err) {
++	err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
++	if (err)
+ 		ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ 			  err);
+-		return err;
+-	}
+-
+-	*val = phy_msg.data;
++	else
++		*val = msg.data;
+ 
+-	return 0;
++	return err;
+ }
+ 
+ /**
+  * ice_phy_res_address_eth56g - Calculate a PHY port register address
+- * @port: Port number to be written
++ * @hw: pointer to the HW struct
++ * @lane: Lane number to be written
+  * @res_type: resource type (register/memory)
+  * @offset: Offset from PHY port register base
+  * @addr: The result address
+@@ -955,17 +965,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+  * * %0      - success
+  * * %EINVAL - invalid port number or resource type
+  */
+-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
+-				      u32 offset, u32 *addr)
++static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
++				      enum eth56g_res_type res_type,
++				      u32 offset,
++				      u32 *addr)
+ {
+-	u8 lane = port % ICE_PORTS_PER_QUAD;
+-	u8 phy = ICE_GET_QUAD_NUM(port);
+-
+ 	if (res_type >= NUM_ETH56G_PHY_RES)
+ 		return -EINVAL;
+ 
+-	*addr = eth56g_phy_res[res_type].base[phy] +
++	/* Lanes 4..7 are in fact 0..3 on a second PHY */
++	lane %= hw->ptp.ports_per_phy;
++	*addr = eth56g_phy_res[res_type].base[0] +
+ 		lane * eth56g_phy_res[res_type].step + offset;
++
+ 	return 0;
+ }
+ 
+@@ -985,19 +997,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
+ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ 				 u32 val, enum eth56g_res_type res_type)
+ {
+-	u8 phy_port = port % hw->ptp.ports_per_phy;
+-	u8 phy_idx = port / hw->ptp.ports_per_phy;
+ 	u32 addr;
+ 	int err;
+ 
+ 	if (port >= hw->ptp.num_lports)
+ 		return -EINVAL;
+ 
+-	err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
++	err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
+ 	if (err)
+ 		return err;
+ 
+-	return ice_write_phy_eth56g(hw, phy_idx, addr, val);
++	return ice_write_phy_eth56g(hw, port, addr, val);
+ }
+ 
+ /**
+@@ -1016,19 +1026,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ 				u32 *val, enum eth56g_res_type res_type)
+ {
+-	u8 phy_port = port % hw->ptp.ports_per_phy;
+-	u8 phy_idx = port / hw->ptp.ports_per_phy;
+ 	u32 addr;
+ 	int err;
+ 
+ 	if (port >= hw->ptp.num_lports)
+ 		return -EINVAL;
+ 
+-	err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
++	err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
+ 	if (err)
+ 		return err;
+ 
+-	return ice_read_phy_eth56g(hw, phy_idx, addr, val);
++	return ice_read_phy_eth56g(hw, port, addr, val);
+ }
+ 
+ /**
+@@ -1177,6 +1185,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ 	return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+ }
+ 
++/**
++ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
++ * @hw: pointer to the HW struct
++ * @offset: PHY register offset
++ * @port: Port number
++ * @val: Value to write
++ *
++ * Return:
++ * * %0     - success
++ * * %EIO  - invalid port number or resource type
++ * * %other - failed to write to PHY
++ */
++static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
++					 u32 offset, u32 val)
++{
++	u32 addr;
++
++	if (port >= hw->ptp.num_lports)
++		return -EIO;
++
++	addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
++
++	return ice_write_phy_eth56g(hw, port, addr, val);
++}
++
++/**
++ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
++ * @hw: pointer to the HW struct
++ * @offset: PHY register offset
++ * @port: Port number
++ * @val: Value to read
++ *
++ * Return:
++ * * %0     - success
++ * * %EIO  - invalid port number or resource type
++ * * %other - failed to read from PHY
++ */
++static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
++					u32 offset, u32 *val)
++{
++	u32 addr;
++
++	if (port >= hw->ptp.num_lports)
++		return -EIO;
++
++	addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
++
++	return ice_read_phy_eth56g(hw, port, addr, val);
++}
++
+ /**
+  * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
+  * @low_addr: the low address to check
+@@ -1896,7 +1954,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
+  */
+ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
+ {
+-	u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ 	u32 val;
+ 	int err;
+ 
+@@ -1911,8 +1968,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
+ 	switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ 	case ICE_ETH56G_LNK_SPD_1G:
+ 	case ICE_ETH56G_LNK_SPD_2_5G:
+-		err = ice_read_ptp_reg_eth56g(hw, port_blk,
+-					      PHY_GPCS_CONFIG_REG0, &val);
++		err = ice_read_quad_ptp_reg_eth56g(hw, port,
++						   PHY_GPCS_CONFIG_REG0, &val);
+ 		if (err) {
+ 			ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
+ 				  err);
+@@ -1923,8 +1980,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
+ 		val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
+ 				  ICE_ETH56G_NOMINAL_TX_THRESH);
+ 
+-		err = ice_write_ptp_reg_eth56g(hw, port_blk,
+-					       PHY_GPCS_CONFIG_REG0, val);
++		err = ice_write_quad_ptp_reg_eth56g(hw, port,
++						    PHY_GPCS_CONFIG_REG0, val);
+ 		if (err) {
+ 			ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
+ 				  err);
+@@ -1965,50 +2022,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
+  */
+ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
+ {
+-	u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+-	u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
++	u8 quad_lane = port % ICE_PORTS_PER_QUAD;
++	u32 addr, val, peer_delay;
+ 	bool enable, sfd_ena;
+-	u32 val, peer_delay;
+ 	int err;
+ 
+ 	enable = hw->ptp.phy.eth56g.onestep_ena;
+ 	peer_delay = hw->ptp.phy.eth56g.peer_delay;
+ 	sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
+ 
+-	/* PHY_PTP_1STEP_CONFIG */
+-	err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
++	addr = PHY_PTP_1STEP_CONFIG;
++	err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
+ 	if (err)
+ 		return err;
+ 
+ 	if (enable)
+-		val |= blk_port;
++		val |= BIT(quad_lane);
+ 	else
+-		val &= ~blk_port;
++		val &= ~BIT(quad_lane);
+ 
+ 	val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
+ 
+-	err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
++	err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
+ 	if (err)
+ 		return err;
+ 
+-	/* PHY_PTP_1STEP_PEER_DELAY */
++	addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
+ 	val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
+ 	if (peer_delay)
+ 		val |= PHY_PTP_1STEP_PD_ADD_PD_M;
+ 	val |= PHY_PTP_1STEP_PD_DLY_V_M;
+-	err = ice_write_ptp_reg_eth56g(hw, port_blk,
+-				       PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
++	err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
+ 	if (err)
+ 		return err;
+ 
+ 	val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
+-	err = ice_write_ptp_reg_eth56g(hw, port_blk,
+-				       PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
++	err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
+ 	if (err)
+ 		return err;
+ 
+-	/* PHY_MAC_XIF_MODE */
+-	err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
++	addr = PHY_MAC_XIF_MODE;
++	err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
+ 	if (err)
+ 		return err;
+ 
+@@ -2028,7 +2082,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
+ 	       FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
+ 	       FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
+ 
+-	return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
++	return ice_write_mac_reg_eth56g(hw, port, addr, val);
+ }
+ 
+ /**
+@@ -2070,21 +2124,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
+ 				       bool fc, bool rs,
+ 				       enum ice_eth56g_link_spd spd)
+ {
+-	u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
+-	u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ 	u32 bitslip;
+ 	int err;
+ 
+ 	if (!bs || rs)
+ 		return 0;
+ 
+-	if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
++	if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
+ 		err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
+ 					       &bitslip);
+-	else
+-		err = ice_read_ptp_reg_eth56g(hw, port_blk,
+-					      PHY_REG_SD_BIT_SLIP(port_offset),
+-					      &bitslip);
++	} else {
++		u8 quad_lane = port % ICE_PORTS_PER_QUAD;
++		u32 addr;
++
++		addr = PHY_REG_SD_BIT_SLIP(quad_lane);
++		err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
++	}
+ 	if (err)
+ 		return 0;
+ 
+@@ -2644,59 +2699,29 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
+ }
+ 
+ /**
+- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
+- * @hw: pointer to the HW struct
+- *
+- * Return: true if it's 2x50 breakout topology, false otherwise
+- */
+-static bool ice_is_muxed_topo(struct ice_hw *hw)
+-{
+-	u8 link_topo;
+-	bool mux;
+-	u32 val;
+-
+-	val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
+-	mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
+-	val = rd32(hw, GLGEN_MAC_LINK_TOPO);
+-	link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
+-
+-	return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
+-}
+-
+-/**
+- * ice_ptp_init_phy_e825c - initialize PHY parameters
++ * ice_ptp_init_phy_e825 - initialize PHY parameters
+  * @hw: pointer to the HW struct
+  */
+-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
++static void ice_ptp_init_phy_e825(struct ice_hw *hw)
+ {
+ 	struct ice_ptp_hw *ptp = &hw->ptp;
+ 	struct ice_eth56g_params *params;
+-	u8 phy;
++	u32 phy_rev;
++	int err;
+ 
+ 	ptp->phy_model = ICE_PHY_ETH56G;
+ 	params = &ptp->phy.eth56g;
+ 	params->onestep_ena = false;
+ 	params->peer_delay = 0;
+ 	params->sfd_ena = false;
+-	params->phy_addr[0] = eth56g_phy_0;
+-	params->phy_addr[1] = eth56g_phy_1;
+ 	params->num_phys = 2;
+ 	ptp->ports_per_phy = 4;
+ 	ptp->num_lports = params->num_phys * ptp->ports_per_phy;
+ 
+ 	ice_sb_access_ena_eth56g(hw, true);
+-	for (phy = 0; phy < params->num_phys; phy++) {
+-		u32 phy_rev;
+-		int err;
+-
+-		err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
+-		if (err || phy_rev != PHY_REVISION_ETH56G) {
+-			ptp->phy_model = ICE_PHY_UNSUP;
+-			return;
+-		}
+-	}
+-
+-	ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
++	err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
++	if (err || phy_rev != PHY_REVISION_ETH56G)
++		ptp->phy_model = ICE_PHY_UNSUP;
+ }
+ 
+ /* E822 family functions
+@@ -2715,10 +2740,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
+ 				  struct ice_sbq_msg_input *msg, u8 port,
+ 				  u16 offset)
+ {
+-	int phy_port, phy, quadtype;
++	int phy_port, quadtype;
+ 
+ 	phy_port = port % hw->ptp.ports_per_phy;
+-	phy = port / hw->ptp.ports_per_phy;
+ 	quadtype = ICE_GET_QUAD_NUM(port) %
+ 		   ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
+ 
+@@ -2730,12 +2754,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
+ 		msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
+ 	}
+ 
+-	if (phy == 0)
+-		msg->dest_dev = rmn_0;
+-	else if (phy == 1)
+-		msg->dest_dev = rmn_1;
+-	else
+-		msg->dest_dev = rmn_2;
++	msg->dest_dev = rmn_0;
+ }
+ 
+ /**
+@@ -5395,7 +5414,7 @@ void ice_ptp_init_hw(struct ice_hw *hw)
+ 	else if (ice_is_e810(hw))
+ 		ice_ptp_init_phy_e810(ptp);
+ 	else if (ice_is_e825c(hw))
+-		ice_ptp_init_phy_e825c(hw);
++		ice_ptp_init_phy_e825(hw);
+ 	else
+ 		ptp->phy_model = ICE_PHY_UNSUP;
+ }
+@@ -5418,7 +5437,7 @@ void ice_ptp_init_hw(struct ice_hw *hw)
+ static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
+ 				  enum ice_ptp_tmr_cmd cmd)
+ {
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
+ 	case ICE_PHY_E82X:
+@@ -5483,7 +5502,7 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ 	u32 port;
+ 
+ 	/* PHY models which can program all ports simultaneously */
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_E810:
+ 		return ice_ptp_port_cmd_e810(hw, cmd);
+ 	default:
+@@ -5562,7 +5581,7 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
+ 
+ 	/* PHY timers */
+ 	/* Fill Rx and Tx ports and send msg to PHY */
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		err = ice_ptp_prep_phy_time_eth56g(hw,
+ 						   (u32)(time & 0xFFFFFFFF));
+@@ -5608,7 +5627,7 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
+ 	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
+ 	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
+ 
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ 		break;
+@@ -5677,7 +5696,7 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
+ 	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
+ 	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
+ 
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ 		break;
+@@ -5710,7 +5729,7 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
+  */
+ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
+ {
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
+ 	case ICE_PHY_E810:
+@@ -5740,7 +5759,7 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
+  */
+ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
+ {
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
+ 	case ICE_PHY_E810:
+@@ -5803,7 +5822,7 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
+  */
+ void ice_ptp_reset_ts_memory(struct ice_hw *hw)
+ {
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		ice_ptp_reset_ts_memory_eth56g(hw);
+ 		break;
+@@ -5832,7 +5851,7 @@ int ice_ptp_init_phc(struct ice_hw *hw)
+ 	/* Clear event err indications for auxiliary pins */
+ 	(void)rd32(hw, GLTSYN_STAT(src_idx));
+ 
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		return ice_ptp_init_phc_eth56g(hw);
+ 	case ICE_PHY_E810:
+@@ -5857,7 +5876,7 @@ int ice_ptp_init_phc(struct ice_hw *hw)
+  */
+ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
+ {
+-	switch (hw->ptp.phy_model) {
++	switch (ice_get_phy_model(hw)) {
+ 	case ICE_PHY_ETH56G:
+ 		return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ 							  tstamp_ready);
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+index 4c8b8457134427..3499062218b59e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+@@ -452,6 +452,11 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
+ 	}
+ }
+ 
++static inline bool ice_is_dual(struct ice_hw *hw)
++{
++	return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
++}
++
+ #define PFTSYN_SEM_BYTES	4
+ 
+ #define ICE_PTP_CLOCK_INDEX_0	0x00
+diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
+index 45768796691fec..609f31e0dfdede 100644
+--- a/drivers/net/ethernet/intel/ice/ice_type.h
++++ b/drivers/net/ethernet/intel/ice/ice_type.h
+@@ -850,7 +850,6 @@ struct ice_mbx_data {
+ 
+ struct ice_eth56g_params {
+ 	u8 num_phys;
+-	u8 phy_addr[2];
+ 	bool onestep_ena;
+ 	bool sfd_ena;
+ 	u32 peer_delay;
+@@ -881,7 +880,6 @@ struct ice_ptp_hw {
+ 	union ice_phy_params phy;
+ 	u8 num_lports;
+ 	u8 ports_per_phy;
+-	bool is_2x50g_muxed_topo;
+ };
+ 
+ /* Port hardware description */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index ca92e518be7669..1baf8933a07cb0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -724,6 +724,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+ 	/* check esn */
+ 	if (x->props.flags & XFRM_STATE_ESN)
+ 		mlx5e_ipsec_update_esn_state(sa_entry);
++	else
++		/* According to RFC4303, section "3.3.3. Sequence Number Generation",
++		 * the first packet sent using a given SA will contain a sequence
++		 * number of 1.
++		 */
++		sa_entry->esn_state.esn = 1;
+ 
+ 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
+ 
+@@ -768,9 +774,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+ 				   MLX5_IPSEC_RESCHED);
+ 
+ 	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
+-	    x->props.mode == XFRM_MODE_TUNNEL)
+-		xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+-			    MLX5E_IPSEC_TUNNEL_SA);
++	    x->props.mode == XFRM_MODE_TUNNEL) {
++		xa_lock_bh(&ipsec->sadb);
++		__xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
++			      MLX5E_IPSEC_TUNNEL_SA);
++		xa_unlock_bh(&ipsec->sadb);
++	}
+ 
+ out:
+ 	x->xso.offload_handle = (unsigned long)sa_entry;
+@@ -797,7 +806,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+ {
+ 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+-	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ 	struct mlx5e_ipsec_sa_entry *old;
+ 
+@@ -806,12 +814,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+ 
+ 	old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
+ 	WARN_ON(old != sa_entry);
+-
+-	if (attrs->mode == XFRM_MODE_TUNNEL &&
+-	    attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+-		/* Make sure that no ARP requests are running in parallel */
+-		flush_workqueue(ipsec->wq);
+-
+ }
+ 
+ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+index e51b03d4c717f1..57861d34d46f85 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+@@ -1718,23 +1718,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+ 		goto err_alloc;
+ 	}
+ 
+-	if (attrs->family == AF_INET)
+-		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+-	else
+-		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+-
+ 	setup_fte_no_frags(spec);
+ 	setup_fte_upper_proto_match(spec, &attrs->upspec);
+ 
+ 	switch (attrs->type) {
+ 	case XFRM_DEV_OFFLOAD_CRYPTO:
++		if (attrs->family == AF_INET)
++			setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
++		else
++			setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ 		setup_fte_spi(spec, attrs->spi, false);
+ 		setup_fte_esp(spec);
+ 		setup_fte_reg_a(spec);
+ 		break;
+ 	case XFRM_DEV_OFFLOAD_PACKET:
+-		if (attrs->reqid)
+-			setup_fte_reg_c4(spec, attrs->reqid);
++		setup_fte_reg_c4(spec, attrs->reqid);
+ 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
+ 		if (err)
+ 			goto err_pkt_reformat;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+index 53cfa39188cb0e..820debf3fbbf22 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+@@ -91,8 +91,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+ EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
+ 
+ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
+-				     struct mlx5_accel_esp_xfrm_attrs *attrs)
++				     struct mlx5e_ipsec_sa_entry *sa_entry)
+ {
++	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ 	void *aso_ctx;
+ 
+ 	aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
+@@ -120,8 +121,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
+ 	 * active.
+ 	 */
+ 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
+-	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
++	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
+ 		MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
++		if (!attrs->replay_esn.trigger)
++			MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
++				 sa_entry->esn_state.esn);
++	}
+ 
+ 	if (attrs->lft.hard_packet_limit != XFRM_INF) {
+ 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
+@@ -175,7 +180,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
+ 
+ 	res = &mdev->mlx5e_res.hw_objs;
+ 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+-		mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
++		mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
+ 
+ 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ 	if (!err)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 2eabfcc247c6ae..0ce999706d412a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -2709,6 +2709,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ 		break;
+ 	case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ 		root_ns = steering->rdma_tx_root_ns;
++		prio = RDMA_TX_BYPASS_PRIO;
+ 		break;
+ 	case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
+ 		root_ns = steering->rdma_rx_root_ns;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+index ab2717012b79b5..39e80704b1c425 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+@@ -530,7 +530,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ 	set_tt_map(port_sel, hash_type);
+ 	err = mlx5_lag_create_definers(ldev, hash_type, ports);
+ 	if (err)
+-		return err;
++		goto clear_port_sel;
+ 
+ 	if (port_sel->tunnel) {
+ 		err = mlx5_lag_create_inner_ttc_table(ldev);
+@@ -549,6 +549,8 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ 		mlx5_destroy_ttc_table(port_sel->inner.ttc);
+ destroy_definers:
+ 	mlx5_lag_destroy_definers(ldev);
++clear_port_sel:
++	memset(port_sel, 0, sizeof(*port_sel));
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+index a96be98be032f5..b96909fbeb12de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+@@ -257,6 +257,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
+ 	return 0;
+ 
+ esw_err:
++	mlx5_sf_function_id_erase(table, sf);
+ 	mlx5_sf_free(table, sf);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+index 1bed75eca97db8..740b719e7072df 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+@@ -382,6 +382,7 @@ static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
+ 
+ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
+ {
++	struct mutex *wc_state_lock = &mdev->wc_state_lock;
+ 	struct mlx5_core_dev *parent = NULL;
+ 
+ 	if (!MLX5_CAP_GEN(mdev, bf)) {
+@@ -400,32 +401,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
+ 		 */
+ 		goto out;
+ 
+-	mutex_lock(&mdev->wc_state_lock);
+-
+-	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+-		goto unlock;
+-
+ #ifdef CONFIG_MLX5_SF
+-	if (mlx5_core_is_sf(mdev))
++	if (mlx5_core_is_sf(mdev)) {
+ 		parent = mdev->priv.parent_mdev;
++		wc_state_lock = &parent->wc_state_lock;
++	}
+ #endif
+ 
+-	if (parent) {
+-		mutex_lock(&parent->wc_state_lock);
++	mutex_lock(wc_state_lock);
+ 
++	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
++		goto unlock;
++
++	if (parent) {
+ 		mlx5_core_test_wc(parent);
+ 
+ 		mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
+ 			      parent->wc_state);
+ 		mdev->wc_state = parent->wc_state;
+ 
+-		mutex_unlock(&parent->wc_state_lock);
++	} else {
++		mlx5_core_test_wc(mdev);
+ 	}
+ 
+-	mlx5_core_test_wc(mdev);
+-
+ unlock:
+-	mutex_unlock(&mdev->wc_state_lock);
++	mutex_unlock(wc_state_lock);
+ out:
+ 	mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+index 9d97cd281f18e4..c03558adda91eb 100644
+--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+ 	map_id_full = be64_to_cpu(cbe->map_ptr);
+ 	map_id = map_id_full;
+ 
+-	if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
++	if (size_add(pkt_size, data_size) > INT_MAX ||
++	    len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ 		return -EINVAL;
+ 	if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
+ 		return -EINVAL;
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 907af4651c5534..6f6b0566c65bcb 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -2756,6 +2756,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
+ 	.net_features = NETIF_F_RXCSUM,
+ 	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ 	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
++	.tx_max_frame_size = SZ_2K,
+ 	.rx_max_frame_size = SZ_2K,
+ 	.rx_buffer_size = SZ_2K +
+ 			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 8d02d2b2142937..dc5e247ca5d1a6 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -127,15 +127,15 @@ struct cpsw_ale_dev_id {
+ 
+ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+ {
+-	int idx, idx2;
++	int idx, idx2, index;
+ 	u32 hi_val = 0;
+ 
+ 	idx    = start / 32;
+ 	idx2 = (start + bits - 1) / 32;
+ 	/* Check if bits to be fetched exceed a word */
+ 	if (idx != idx2) {
+-		idx2 = 2 - idx2; /* flip */
+-		hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
++		index = 2 - idx2; /* flip */
++		hi_val = ale_entry[index] << ((idx2 * 32) - start);
+ 	}
+ 	start -= idx * 32;
+ 	idx    = 2 - idx; /* flip */
+@@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+ static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+ 				      u32 value)
+ {
+-	int idx, idx2;
++	int idx, idx2, index;
+ 
+ 	value &= BITMASK(bits);
+ 	idx = start / 32;
+ 	idx2 = (start + bits - 1) / 32;
+ 	/* Check if bits to be set exceed a word */
+ 	if (idx != idx2) {
+-		idx2 = 2 - idx2; /* flip */
+-		ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
+-		ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
++		index = 2 - idx2; /* flip */
++		ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
++		ale_entry[index] |= (value >> ((idx2 * 32) - start));
+ 	}
+ 	start -= idx * 32;
+ 	idx = 2 - idx; /* flip */
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 1fcbcaa85ebdb4..de10a2d08c428e 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -2056,6 +2056,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
+ 		return -EBUSY;
+ 	}
+ 
++	if (ecoalesce->rx_max_coalesced_frames > 255 ||
++	    ecoalesce->tx_max_coalesced_frames > 255) {
++		NL_SET_ERR_MSG(extack, "frames must be less than 256");
++		return -EINVAL;
++	}
++
+ 	if (ecoalesce->rx_max_coalesced_frames)
+ 		lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ 	if (ecoalesce->rx_coalesce_usecs)
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 70f981887518aa..47406ce9901612 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1526,8 +1526,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ 		goto out_encap;
+ 	}
+ 
+-	gn = net_generic(dev_net(dev), gtp_net_id);
+-	list_add_rcu(&gtp->list, &gn->gtp_dev_list);
++	gn = net_generic(src_net, gtp_net_id);
++	list_add(&gtp->list, &gn->gtp_dev_list);
+ 	dev->priv_destructor = gtp_destructor;
+ 
+ 	netdev_dbg(dev, "registered new GTP interface\n");
+@@ -1553,7 +1553,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
+ 		hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
+ 			pdp_context_delete(pctx);
+ 
+-	list_del_rcu(&gtp->list);
++	list_del(&gtp->list);
+ 	unregister_netdevice_queue(dev, head);
+ }
+ 
+@@ -2279,16 +2279,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
+ 	struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+ 	int i, j, bucket = cb->args[0], skip = cb->args[1];
+ 	struct net *net = sock_net(skb->sk);
++	struct net_device *dev;
+ 	struct pdp_ctx *pctx;
+-	struct gtp_net *gn;
+-
+-	gn = net_generic(net, gtp_net_id);
+ 
+ 	if (cb->args[4])
+ 		return 0;
+ 
+ 	rcu_read_lock();
+-	list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
++	for_each_netdev_rcu(net, dev) {
++		if (dev->rtnl_link_ops != &gtp_link_ops)
++			continue;
++
++		gtp = netdev_priv(dev);
++
+ 		if (last_gtp && last_gtp != gtp)
+ 			continue;
+ 		else
+@@ -2483,9 +2486,14 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+ 
+ 	list_for_each_entry(net, net_list, exit_list) {
+ 		struct gtp_net *gn = net_generic(net, gtp_net_id);
+-		struct gtp_dev *gtp;
++		struct gtp_dev *gtp, *gtp_next;
++		struct net_device *dev;
++
++		for_each_netdev(net, dev)
++			if (dev->rtnl_link_ops == &gtp_link_ops)
++				gtp_dellink(dev, dev_to_kill);
+ 
+-		list_for_each_entry(gtp, &gn->gtp_dev_list, list)
++		list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
+ 			gtp_dellink(gtp->dev, dev_to_kill);
+ 	}
+ }
+diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
+index 69434fd13f9612..68d0d9e92a2209 100644
+--- a/drivers/net/pfcp.c
++++ b/drivers/net/pfcp.c
+@@ -206,8 +206,8 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
+ 		goto exit_del_pfcp_sock;
+ 	}
+ 
+-	pn = net_generic(dev_net(dev), pfcp_net_id);
+-	list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
++	pn = net_generic(net, pfcp_net_id);
++	list_add(&pfcp->list, &pn->pfcp_dev_list);
+ 
+ 	netdev_dbg(dev, "registered new PFCP interface\n");
+ 
+@@ -224,7 +224,7 @@ static void pfcp_dellink(struct net_device *dev, struct list_head *head)
+ {
+ 	struct pfcp_dev *pfcp = netdev_priv(dev);
+ 
+-	list_del_rcu(&pfcp->list);
++	list_del(&pfcp->list);
+ 	unregister_netdevice_queue(dev, head);
+ }
+ 
+@@ -247,11 +247,16 @@ static int __net_init pfcp_net_init(struct net *net)
+ static void __net_exit pfcp_net_exit(struct net *net)
+ {
+ 	struct pfcp_net *pn = net_generic(net, pfcp_net_id);
+-	struct pfcp_dev *pfcp;
++	struct pfcp_dev *pfcp, *pfcp_next;
++	struct net_device *dev;
+ 	LIST_HEAD(list);
+ 
+ 	rtnl_lock();
+-	list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
++	for_each_netdev(net, dev)
++		if (dev->rtnl_link_ops == &pfcp_link_ops)
++			pfcp_dellink(dev, &list);
++
++	list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
+ 		pfcp_dellink(pfcp->dev, &list);
+ 
+ 	unregister_netdevice_many(&list);
+diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
+index 0bda83d0fc3e08..eaf31c823cbe88 100644
+--- a/drivers/nvme/target/io-cmd-bdev.c
++++ b/drivers/nvme/target/io-cmd-bdev.c
+@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
+ 	 */
+ 	id->nsfeat |= 1 << 4;
+ 	/* NPWG = Namespace Preferred Write Granularity. 0's based */
+-	id->npwg = lpp0b;
++	id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
+ 	/* NPWA = Namespace Preferred Write Alignment. 0's based */
+ 	id->npwa = id->npwg;
+ 	/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
+diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
+index 3995f90add4568..c45bc332af7a02 100644
+--- a/drivers/platform/x86/dell/dell-uart-backlight.c
++++ b/drivers/platform/x86/dell/dell-uart-backlight.c
+@@ -283,6 +283,9 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
+ 	init_waitqueue_head(&dell_bl->wait_queue);
+ 	dell_bl->dev = dev;
+ 
++	serdev_device_set_drvdata(serdev, dell_bl);
++	serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
++
+ 	ret = devm_serdev_device_open(dev, serdev);
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "opening UART device\n");
+@@ -290,8 +293,6 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
+ 	/* 9600 bps, no flow control, these are the default but set them to be sure */
+ 	serdev_device_set_baudrate(serdev, 9600);
+ 	serdev_device_set_flow_control(serdev, false);
+-	serdev_device_set_drvdata(serdev, dell_bl);
+-	serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
+ 
+ 	get_version[0] = DELL_SOF(GET_CMD_LEN);
+ 	get_version[1] = CMD_GET_VERSION;
+diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+index 1e46e30dae9669..dbcd3087aaa4b0 100644
+--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+@@ -804,6 +804,7 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
+ static const struct x86_cpu_id isst_cpu_ids[] = {
+ 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT,	SST_HPM_SUPPORTED),
+ 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X,	SST_HPM_SUPPORTED),
++	X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X,	SST_HPM_SUPPORTED),
+ 	X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X,	0),
+ 	X86_MATCH_VFM(INTEL_GRANITERAPIDS_D,	SST_HPM_SUPPORTED),
+ 	X86_MATCH_VFM(INTEL_GRANITERAPIDS_X,	SST_HPM_SUPPORTED),
+diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
+index 0609a8320f7ec1..12fb0943b5dc37 100644
+--- a/drivers/platform/x86/intel/tpmi_power_domains.c
++++ b/drivers/platform/x86/intel/tpmi_power_domains.c
+@@ -81,6 +81,7 @@ static const struct x86_cpu_id tpmi_cpu_ids[] = {
+ 	X86_MATCH_VFM(INTEL_GRANITERAPIDS_X,	NULL),
+ 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X,	NULL),
+ 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT,	NULL),
++	X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X,	NULL),
+ 	X86_MATCH_VFM(INTEL_GRANITERAPIDS_D,	NULL),
+ 	X86_MATCH_VFM(INTEL_PANTHERCOVE_X,	NULL),
+ 	{}
+diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+index d525bdc8ca9b3f..32d9b6009c4229 100644
+--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
++++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+@@ -199,14 +199,15 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
+ 	if (ret)
+ 		return ret;
+ 
++	serdev_device_set_drvdata(serdev, fc);
++	serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
++
+ 	ret = devm_serdev_device_open(dev, serdev);
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "opening UART device\n");
+ 
+ 	serdev_device_set_baudrate(serdev, 600);
+ 	serdev_device_set_flow_control(serdev, false);
+-	serdev_device_set_drvdata(serdev, fc);
+-	serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
+ 
+ 	ret = devm_extcon_register_notifier_all(dev, fc->extcon, &fc->nb);
+ 	if (ret)
+diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+index 77e889165eed3c..a19e806bb14726 100644
+--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
++++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+@@ -770,7 +770,7 @@ static void imx8mp_blk_ctrl_remove(struct platform_device *pdev)
+ 
+ 	of_genpd_del_provider(pdev->dev.of_node);
+ 
+-	for (i = 0; bc->onecell_data.num_domains; i++) {
++	for (i = 0; i < bc->onecell_data.num_domains; i++) {
+ 		struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
+ 
+ 		pm_genpd_remove(&domain->genpd);
+diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+index 1cd157f4f03b47..4e2ac1f0060c0d 100644
+--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
++++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+@@ -176,6 +176,7 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
+ 	vdev->dev.parent = dev;
+ 	priv->vdev = vdev;
+ 
++	device_set_of_node_from_dev(&vdev->dev, dev);
+ 	error = platform_device_add(vdev);
+ 	if (error)
+ 		goto err_device_put;
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 05b936ad353be7..6cc9e61cca07de 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -10589,14 +10589,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	}
+ 
+ 	/*
+-	 * Set the default power management level for runtime and system PM.
++	 * Set the default power management level for runtime and system PM if
++	 * not set by the host controller drivers.
+ 	 * Default power saving mode is to keep UFS link in Hibern8 state
+ 	 * and UFS device in sleep state.
+ 	 */
+-	hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
++	if (!hba->rpm_lvl)
++		hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ 						UFS_SLEEP_PWR_MODE,
+ 						UIC_LINK_HIBERN8_STATE);
+-	hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
++	if (!hba->spm_lvl)
++		hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ 						UFS_SLEEP_PWR_MODE,
+ 						UIC_LINK_HIBERN8_STATE);
+ 
+diff --git a/fs/afs/addr_prefs.c b/fs/afs/addr_prefs.c
+index a189ff8a5034e0..c0384201b8feb5 100644
+--- a/fs/afs/addr_prefs.c
++++ b/fs/afs/addr_prefs.c
+@@ -413,8 +413,10 @@ int afs_proc_addr_prefs_write(struct file *file, char *buf, size_t size)
+ 
+ 	do {
+ 		argc = afs_split_string(&buf, argv, ARRAY_SIZE(argv));
+-		if (argc < 0)
+-			return argc;
++		if (argc < 0) {
++			ret = argc;
++			goto done;
++		}
+ 		if (argc < 2)
+ 			goto inval;
+ 
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 0c4d14c59ebec5..395b8b880ce786 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -797,6 +797,10 @@ static int get_canonical_dev_path(const char *dev_path, char *canonical)
+ 	if (ret)
+ 		goto out;
+ 	resolved_path = d_path(&path, path_buf, PATH_MAX);
++	if (IS_ERR(resolved_path)) {
++		ret = PTR_ERR(resolved_path);
++		goto out;
++	}
+ 	ret = strscpy(canonical, resolved_path, PATH_MAX);
+ out:
+ 	kfree(path_buf);
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index 89b11336a83697..1806bff8e59bc3 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -15,6 +15,7 @@
+ #include <linux/namei.h>
+ #include <linux/poll.h>
+ #include <linux/mount.h>
++#include <linux/security.h>
+ #include <linux/statfs.h>
+ #include <linux/ctype.h>
+ #include <linux/string.h>
+@@ -576,7 +577,7 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
+  */
+ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
+ {
+-	char *secctx;
++	int err;
+ 
+ 	_enter(",%s", args);
+ 
+@@ -585,16 +586,16 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (cache->secctx) {
++	if (cache->have_secid) {
+ 		pr_err("Second security context specified\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	secctx = kstrdup(args, GFP_KERNEL);
+-	if (!secctx)
+-		return -ENOMEM;
++	err = security_secctx_to_secid(args, strlen(args), &cache->secid);
++	if (err)
++		return err;
+ 
+-	cache->secctx = secctx;
++	cache->have_secid = true;
+ 	return 0;
+ }
+ 
+@@ -820,7 +821,6 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
+ 	put_cred(cache->cache_cred);
+ 
+ 	kfree(cache->rootdirname);
+-	kfree(cache->secctx);
+ 	kfree(cache->tag);
+ 
+ 	_leave("");
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index 7b99bd98de75b8..38c236e38cef85 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -122,7 +122,6 @@ struct cachefiles_cache {
+ #define CACHEFILES_STATE_CHANGED	3	/* T if state changed (poll trigger) */
+ #define CACHEFILES_ONDEMAND_MODE	4	/* T if in on-demand read mode */
+ 	char				*rootdirname;	/* name of cache root directory */
+-	char				*secctx;	/* LSM security context */
+ 	char				*tag;		/* cache binding tag */
+ 	refcount_t			unbind_pincount;/* refcount to do daemon unbind */
+ 	struct xarray			reqs;		/* xarray of pending on-demand requests */
+@@ -130,6 +129,8 @@ struct cachefiles_cache {
+ 	struct xarray			ondemand_ids;	/* xarray for ondemand_id allocation */
+ 	u32				ondemand_id_next;
+ 	u32				msg_id_next;
++	u32				secid;		/* LSM security id */
++	bool				have_secid;	/* whether "secid" was set */
+ };
+ 
+ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
+diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
+index fe777164f1d894..fc6611886b3b5e 100644
+--- a/fs/cachefiles/security.c
++++ b/fs/cachefiles/security.c
+@@ -18,7 +18,7 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
+ 	struct cred *new;
+ 	int ret;
+ 
+-	_enter("{%s}", cache->secctx);
++	_enter("{%u}", cache->have_secid ? cache->secid : 0);
+ 
+ 	new = prepare_kernel_cred(current);
+ 	if (!new) {
+@@ -26,8 +26,8 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache)
+ 		goto error;
+ 	}
+ 
+-	if (cache->secctx) {
+-		ret = set_security_override_from_ctx(new, cache->secctx);
++	if (cache->have_secid) {
++		ret = set_security_override(new, cache->secid);
+ 		if (ret < 0) {
+ 			put_cred(new);
+ 			pr_err("Security denies permission to nominate security context: error %d\n",
+diff --git a/fs/file.c b/fs/file.c
+index eb093e73697206..4cb952541dd036 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -21,6 +21,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/close_range.h>
+ #include <net/sock.h>
++#include <linux/init_task.h>
+ 
+ #include "internal.h"
+ 
+diff --git a/fs/hfs/super.c b/fs/hfs/super.c
+index eeac99765f0d61..cf13b5cc108488 100644
+--- a/fs/hfs/super.c
++++ b/fs/hfs/super.c
+@@ -419,11 +419,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
+ 		goto bail_no_root;
+ 	res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
+ 	if (!res) {
+-		if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
++		if (fd.entrylength != sizeof(rec.dir)) {
+ 			res =  -EIO;
+ 			goto bail_hfs_find;
+ 		}
+ 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
++		if (rec.type != HFS_CDR_DIR)
++			res = -EIO;
+ 	}
+ 	if (res)
+ 		goto bail_hfs_find;
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index 25d1ede6bb0eb0..1bad460275ebe2 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -1138,7 +1138,7 @@ static void iomap_write_delalloc_scan(struct inode *inode,
+ 				start_byte, end_byte, iomap, punch);
+ 
+ 		/* move offset to start of next folio in range */
+-		start_byte = folio_next_index(folio) << PAGE_SHIFT;
++		start_byte = folio_pos(folio) + folio_size(folio);
+ 		folio_unlock(folio);
+ 		folio_put(folio);
+ 	}
+diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
+index e70eb4ea21c038..a44132c986538b 100644
+--- a/fs/netfs/read_collect.c
++++ b/fs/netfs/read_collect.c
+@@ -249,16 +249,17 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was
+ 
+ 	/* Deal with the trickiest case: that this subreq is in the middle of a
+ 	 * folio, not touching either edge, but finishes first.  In such a
+-	 * case, we donate to the previous subreq, if there is one, so that the
+-	 * donation is only handled when that completes - and remove this
+-	 * subreq from the list.
++	 * case, we donate to the previous subreq, if there is one and if it is
++	 * contiguous, so that the donation is only handled when that completes
++	 * - and remove this subreq from the list.
+ 	 *
+ 	 * If the previous subreq finished first, we will have acquired their
+ 	 * donation and should be able to unlock folios and/or donate nextwards.
+ 	 */
+ 	if (!subreq->consumed &&
+ 	    !prev_donated &&
+-	    !list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
++	    !list_is_first(&subreq->rreq_link, &rreq->subrequests) &&
++	    subreq->start == prev->start + prev->len) {
+ 		prev = list_prev_entry(subreq, rreq_link);
+ 		WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
+ 		subreq->start += subreq->len;
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index b4521b09605881..387a7a176ad84b 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -404,6 +404,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
+ 			if (!iov_iter_count(iter))
+ 				return acc;
+ 		}
++
++		cond_resched();
+ 	}
+ 
+ 	return acc;
+diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
+index 85925ec0051a97..3310d1ad4d0e98 100644
+--- a/fs/qnx6/inode.c
++++ b/fs/qnx6/inode.c
+@@ -179,8 +179,7 @@ static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf)
+  */
+ static const char *qnx6_checkroot(struct super_block *s)
+ {
+-	static char match_root[2][3] = {".\0\0", "..\0"};
+-	int i, error = 0;
++	int error = 0;
+ 	struct qnx6_dir_entry *dir_entry;
+ 	struct inode *root = d_inode(s->s_root);
+ 	struct address_space *mapping = root->i_mapping;
+@@ -189,11 +188,9 @@ static const char *qnx6_checkroot(struct super_block *s)
+ 	if (IS_ERR(folio))
+ 		return "error reading root directory";
+ 	dir_entry = kmap_local_folio(folio, 0);
+-	for (i = 0; i < 2; i++) {
+-		/* maximum 3 bytes - due to match_root limitation */
+-		if (strncmp(dir_entry[i].de_fname, match_root[i], 3))
+-			error = 1;
+-	}
++	if (memcmp(dir_entry[0].de_fname, ".", 2) ||
++	    memcmp(dir_entry[1].de_fname, "..", 3))
++		error = 1;
+ 	folio_release_kmap(folio, dir_entry);
+ 	if (error)
+ 		return "error reading root directory.";
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index fe40152b915d82..fb51cdf5520617 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1044,6 +1044,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
+ 	/* Release netns reference for this server. */
+ 	put_net(cifs_net_ns(server));
+ 	kfree(server->leaf_fullpath);
++	kfree(server->hostname);
+ 	kfree(server);
+ 
+ 	length = atomic_dec_return(&tcpSesAllocCount);
+@@ -1670,8 +1671,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+ 	kfree_sensitive(server->session_key.response);
+ 	server->session_key.response = NULL;
+ 	server->session_key.len = 0;
+-	kfree(server->hostname);
+-	server->hostname = NULL;
+ 
+ 	task = xchg(&server->tsk, NULL);
+ 	if (task)
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index aa1e65ccb61584..6caaa62d2b1f89 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -379,6 +379,7 @@ extern void __init hrtimers_init(void);
+ extern void sysrq_timer_list_show(void);
+ 
+ int hrtimers_prepare_cpu(unsigned int cpu);
++int hrtimers_cpu_starting(unsigned int cpu);
+ #ifdef CONFIG_HOTPLUG_CPU
+ int hrtimers_cpu_dying(unsigned int cpu);
+ #else
+diff --git a/include/linux/poll.h b/include/linux/poll.h
+index d1ea4f3714a848..fc641b50f1298e 100644
+--- a/include/linux/poll.h
++++ b/include/linux/poll.h
+@@ -41,8 +41,16 @@ typedef struct poll_table_struct {
+ 
+ static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
+ {
+-	if (p && p->_qproc && wait_address)
++	if (p && p->_qproc && wait_address) {
+ 		p->_qproc(filp, wait_address, p);
++		/*
++		 * This memory barrier is paired in the wq_has_sleeper().
++		 * See the comment above prepare_to_wait(), we need to
++		 * ensure that subsequent tests in this thread can't be
++		 * reordered with __add_wait_queue() in _qproc() paths.
++		 */
++		smp_mb();
++	}
+ }
+ 
+ /*
+diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
+index c9a31c567e85bf..2e18fef1a2e109 100644
+--- a/include/linux/pruss_driver.h
++++ b/include/linux/pruss_driver.h
+@@ -144,32 +144,32 @@ static inline int pruss_release_mem_region(struct pruss *pruss,
+ static inline int pruss_cfg_get_gpmux(struct pruss *pruss,
+ 				      enum pruss_pru_id pru_id, u8 *mux)
+ {
+-	return ERR_PTR(-EOPNOTSUPP);
++	return -EOPNOTSUPP;
+ }
+ 
+ static inline int pruss_cfg_set_gpmux(struct pruss *pruss,
+ 				      enum pruss_pru_id pru_id, u8 mux)
+ {
+-	return ERR_PTR(-EOPNOTSUPP);
++	return -EOPNOTSUPP;
+ }
+ 
+ static inline int pruss_cfg_gpimode(struct pruss *pruss,
+ 				    enum pruss_pru_id pru_id,
+ 				    enum pruss_gpi_mode mode)
+ {
+-	return ERR_PTR(-EOPNOTSUPP);
++	return -EOPNOTSUPP;
+ }
+ 
+ static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable)
+ {
+-	return ERR_PTR(-EOPNOTSUPP);
++	return -EOPNOTSUPP;
+ }
+ 
+ static inline int pruss_cfg_xfr_enable(struct pruss *pruss,
+ 				       enum pru_type pru_type,
+-				       bool enable);
++				       bool enable)
+ {
+-	return ERR_PTR(-EOPNOTSUPP);
++	return -EOPNOTSUPP;
+ }
+ 
+ #endif /* CONFIG_TI_PRUSS */
+diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
+index cb40f1a1d0811d..75342022d14414 100644
+--- a/include/linux/userfaultfd_k.h
++++ b/include/linux/userfaultfd_k.h
+@@ -247,6 +247,13 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
+ 	    vma_is_shmem(vma);
+ }
+ 
++static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
++{
++	struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx;
++
++	return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0;
++}
++
+ extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
+ extern void dup_userfaultfd_complete(struct list_head *);
+ void dup_userfaultfd_fail(struct list_head *);
+@@ -402,6 +409,11 @@ static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
+ 	return false;
+ }
+ 
++static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
++{
++	return false;
++}
++
+ #endif /* CONFIG_USERFAULTFD */
+ 
+ static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
+diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
+index 793e6fd78bc5c0..60a5347922becc 100644
+--- a/include/net/page_pool/helpers.h
++++ b/include/net/page_pool/helpers.h
+@@ -294,7 +294,7 @@ static inline long page_pool_unref_page(struct page *page, long nr)
+ 
+ static inline void page_pool_ref_netmem(netmem_ref netmem)
+ {
+-	atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count);
++	atomic_long_inc(netmem_get_pp_ref_count_ref(netmem));
+ }
+ 
+ static inline void page_pool_ref_page(struct page *page)
+diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
+index bb8a59c6caa219..d36c857dd24974 100644
+--- a/include/trace/events/mmflags.h
++++ b/include/trace/events/mmflags.h
+@@ -13,6 +13,69 @@
+  * Thus most bits set go first.
+  */
+ 
++/* These define the values that are enums (the bits) */
++#define TRACE_GFP_FLAGS_GENERAL			\
++	TRACE_GFP_EM(DMA)			\
++	TRACE_GFP_EM(HIGHMEM)			\
++	TRACE_GFP_EM(DMA32)			\
++	TRACE_GFP_EM(MOVABLE)			\
++	TRACE_GFP_EM(RECLAIMABLE)		\
++	TRACE_GFP_EM(HIGH)			\
++	TRACE_GFP_EM(IO)			\
++	TRACE_GFP_EM(FS)			\
++	TRACE_GFP_EM(ZERO)			\
++	TRACE_GFP_EM(DIRECT_RECLAIM)		\
++	TRACE_GFP_EM(KSWAPD_RECLAIM)		\
++	TRACE_GFP_EM(WRITE)			\
++	TRACE_GFP_EM(NOWARN)			\
++	TRACE_GFP_EM(RETRY_MAYFAIL)		\
++	TRACE_GFP_EM(NOFAIL)			\
++	TRACE_GFP_EM(NORETRY)			\
++	TRACE_GFP_EM(MEMALLOC)			\
++	TRACE_GFP_EM(COMP)			\
++	TRACE_GFP_EM(NOMEMALLOC)		\
++	TRACE_GFP_EM(HARDWALL)			\
++	TRACE_GFP_EM(THISNODE)			\
++	TRACE_GFP_EM(ACCOUNT)			\
++	TRACE_GFP_EM(ZEROTAGS)
++
++#ifdef CONFIG_KASAN_HW_TAGS
++# define TRACE_GFP_FLAGS_KASAN			\
++	TRACE_GFP_EM(SKIP_ZERO)			\
++	TRACE_GFP_EM(SKIP_KASAN)
++#else
++# define TRACE_GFP_FLAGS_KASAN
++#endif
++
++#ifdef CONFIG_LOCKDEP
++# define TRACE_GFP_FLAGS_LOCKDEP		\
++	TRACE_GFP_EM(NOLOCKDEP)
++#else
++# define TRACE_GFP_FLAGS_LOCKDEP
++#endif
++
++#ifdef CONFIG_SLAB_OBJ_EXT
++# define TRACE_GFP_FLAGS_SLAB			\
++	TRACE_GFP_EM(NO_OBJ_EXT)
++#else
++# define TRACE_GFP_FLAGS_SLAB
++#endif
++
++#define TRACE_GFP_FLAGS				\
++	TRACE_GFP_FLAGS_GENERAL			\
++	TRACE_GFP_FLAGS_KASAN			\
++	TRACE_GFP_FLAGS_LOCKDEP			\
++	TRACE_GFP_FLAGS_SLAB
++
++#undef TRACE_GFP_EM
++#define TRACE_GFP_EM(a) TRACE_DEFINE_ENUM(___GFP_##a##_BIT);
++
++TRACE_GFP_FLAGS
++
++/* Just in case these are ever used */
++TRACE_DEFINE_ENUM(___GFP_UNUSED_BIT);
++TRACE_DEFINE_ENUM(___GFP_LAST_BIT);
++
+ #define gfpflag_string(flag) {(__force unsigned long)flag, #flag}
+ 
+ #define __def_gfpflag_names			\
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index d293d52a3e00e1..9ee6c9145b1df9 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2179,7 +2179,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+ 	},
+ 	[CPUHP_AP_HRTIMERS_DYING] = {
+ 		.name			= "hrtimers:dying",
+-		.startup.single		= NULL,
++		.startup.single		= hrtimers_cpu_starting,
+ 		.teardown.single	= hrtimers_cpu_dying,
+ 	},
+ 	[CPUHP_AP_TICK_DYING] = {
+diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
+index 383fd43ac61222..7e1340da5acae6 100755
+--- a/kernel/gen_kheaders.sh
++++ b/kernel/gen_kheaders.sh
+@@ -89,6 +89,7 @@ find $cpio_dir -type f -print0 |
+ 
+ # Create archive and try to normalize metadata for reproducibility.
+ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
++    --exclude=".__afs*" --exclude=".nfs*" \
+     --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
+     -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
+ 
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index f928a67a07d29a..4c4681cb9337b4 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -2630,6 +2630,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
+ {
+ 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
++	bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
+ 	int nr_loops = SCX_DSP_MAX_LOOPS;
+ 
+ 	lockdep_assert_rq_held(rq);
+@@ -2662,8 +2663,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
+ 		 * See scx_ops_disable_workfn() for the explanation on the
+ 		 * bypassing test.
+ 		 */
+-		if ((prev->scx.flags & SCX_TASK_QUEUED) &&
+-		    prev->scx.slice && !scx_rq_bypassing(rq)) {
++		if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
+ 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
+ 			goto has_tasks;
+ 		}
+@@ -2696,6 +2696,10 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
+ 
+ 		flush_dispatch_buf(rq);
+ 
++		if (prev_on_rq && prev->scx.slice) {
++			rq->scx.flags |= SCX_RQ_BAL_KEEP;
++			goto has_tasks;
++		}
+ 		if (rq->scx.local_dsq.nr)
+ 			goto has_tasks;
+ 		if (consume_global_dsq(rq))
+@@ -2721,8 +2725,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
+ 	 * Didn't find another task to run. Keep running @prev unless
+ 	 * %SCX_OPS_ENQ_LAST is in effect.
+ 	 */
+-	if ((prev->scx.flags & SCX_TASK_QUEUED) &&
+-	    (!static_branch_unlikely(&scx_ops_enq_last) ||
++	if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
+ 	     scx_rq_bypassing(rq))) {
+ 		rq->scx.flags |= SCX_RQ_BAL_KEEP;
+ 		goto has_tasks;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 1ca96c99872f08..60be5f8bbe7115 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4065,7 +4065,11 @@ static void update_cfs_group(struct sched_entity *se)
+ 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+ 	long shares;
+ 
+-	if (!gcfs_rq)
++	/*
++	 * When a group becomes empty, preserve its weight. This matters for
++	 * DELAY_DEQUEUE.
++	 */
++	if (!gcfs_rq || !gcfs_rq->load.weight)
+ 		return;
+ 
+ 	if (throttled_hierarchy(gcfs_rq))
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index cddcd08ea827f9..ee20f5032a0366 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2156,6 +2156,15 @@ int hrtimers_prepare_cpu(unsigned int cpu)
+ 	}
+ 
+ 	cpu_base->cpu = cpu;
++	hrtimer_cpu_base_init_expiry_lock(cpu_base);
++	return 0;
++}
++
++int hrtimers_cpu_starting(unsigned int cpu)
++{
++	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
++
++	/* Clear out any left over state from a CPU down operation */
+ 	cpu_base->active_bases = 0;
+ 	cpu_base->hres_active = 0;
+ 	cpu_base->hang_detected = 0;
+@@ -2164,7 +2173,6 @@ int hrtimers_prepare_cpu(unsigned int cpu)
+ 	cpu_base->expires_next = KTIME_MAX;
+ 	cpu_base->softirq_expires_next = KTIME_MAX;
+ 	cpu_base->online = 1;
+-	hrtimer_cpu_base_init_expiry_lock(cpu_base);
+ 	return 0;
+ }
+ 
+@@ -2240,6 +2248,7 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+ void __init hrtimers_init(void)
+ {
+ 	hrtimers_prepare_cpu(smp_processor_id());
++	hrtimers_cpu_starting(smp_processor_id());
+ 	open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
+ }
+ 
+diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
+index 8d57f7686bb03a..371a62a749aad3 100644
+--- a/kernel/time/timer_migration.c
++++ b/kernel/time/timer_migration.c
+@@ -534,8 +534,13 @@ static void __walk_groups(up_f up, struct tmigr_walk *data,
+ 			break;
+ 
+ 		child = group;
+-		group = group->parent;
++		/*
++		 * Pairs with the store release on group connection
++		 * to make sure group initialization is visible.
++		 */
++		group = READ_ONCE(group->parent);
+ 		data->childmask = child->groupmask;
++		WARN_ON_ONCE(!data->childmask);
+ 	} while (group);
+ }
+ 
+@@ -1487,6 +1492,21 @@ static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
+ 	s.seq = 0;
+ 	atomic_set(&group->migr_state, s.state);
+ 
++	/*
++	 * If this is a new top-level, prepare its groupmask in advance.
++	 * This avoids accidents where yet another new top-level is
++	 * created in the future and made visible before the current groupmask.
++	 */
++	if (list_empty(&tmigr_level_list[lvl])) {
++		group->groupmask = BIT(0);
++		/*
++		 * The previous top level has prepared its groupmask already,
++		 * simply account it as the first child.
++		 */
++		if (lvl > 0)
++			group->num_children = 1;
++	}
++
+ 	timerqueue_init_head(&group->events);
+ 	timerqueue_init(&group->groupevt.nextevt);
+ 	group->groupevt.nextevt.expires = KTIME_MAX;
+@@ -1550,8 +1570,25 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
+ 	raw_spin_lock_irq(&child->lock);
+ 	raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
+ 
+-	child->parent = parent;
+-	child->groupmask = BIT(parent->num_children++);
++	if (activate) {
++		/*
++		 * @child is the old top and @parent the new one. In this
++		 * case groupmask is pre-initialized and @child already
++		 * accounted, along with its new sibling corresponding to the
++		 * CPU going up.
++		 */
++		WARN_ON_ONCE(child->groupmask != BIT(0) || parent->num_children != 2);
++	} else {
++		/* Adding @child for the CPU going up to @parent. */
++		child->groupmask = BIT(parent->num_children++);
++	}
++
++	/*
++	 * Make sure parent initialization is visible before publishing it to a
++	 * racing CPU entering/exiting idle. This RELEASE barrier enforces an
++	 * address dependency that pairs with the READ_ONCE() in __walk_groups().
++	 */
++	smp_store_release(&child->parent, parent);
+ 
+ 	raw_spin_unlock(&parent->lock);
+ 	raw_spin_unlock_irq(&child->lock);
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 56fa431c52af7b..dc83baab85a140 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -3004,7 +3004,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas,
+ 		if (ops->is_partially_uptodate(folio, offset, bsz) ==
+ 							seek_data)
+ 			break;
+-		start = (start + bsz) & ~(bsz - 1);
++		start = (start + bsz) & ~((u64)bsz - 1);
+ 		offset += bsz;
+ 	} while (offset < folio_size(folio));
+ unlock:
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 7e0f72cd9fd4a0..f127b61f04a825 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2132,6 +2132,16 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
+ 	return pmd;
+ }
+ 
++static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
++{
++	if (pmd_present(pmd))
++		pmd = pmd_clear_uffd_wp(pmd);
++	else if (is_swap_pmd(pmd))
++		pmd = pmd_swp_clear_uffd_wp(pmd);
++
++	return pmd;
++}
++
+ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
+ {
+@@ -2170,6 +2180,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
+ 		}
+ 		pmd = move_soft_dirty_pmd(pmd);
++		if (vma_has_uffd_without_event_remap(vma))
++			pmd = clear_uffd_wp_pmd(pmd);
+ 		set_pmd_at(mm, new_addr, new_pmd, pmd);
+ 		if (force_flush)
+ 			flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 2fa87b9ecec6c7..4a8a4f3535caf7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5395,6 +5395,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
+ 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
+ 			  unsigned long sz)
+ {
++	bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
+ 	struct hstate *h = hstate_vma(vma);
+ 	struct mm_struct *mm = vma->vm_mm;
+ 	spinlock_t *src_ptl, *dst_ptl;
+@@ -5411,7 +5412,18 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
+ 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ 
+ 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
+-	set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
++
++	if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
++		huge_pte_clear(mm, new_addr, dst_pte, sz);
++	else {
++		if (need_clear_uffd_wp) {
++			if (pte_present(pte))
++				pte = huge_pte_clear_uffd_wp(pte);
++			else if (is_swap_pte(pte))
++				pte = pte_swp_clear_uffd_wp(pte);
++		}
++		set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
++	}
+ 
+ 	if (src_ptl != dst_ptl)
+ 		spin_unlock(src_ptl);
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 74f5f4c51ab8c8..5f878ee05ff80b 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1071,7 +1071,7 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ 	pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
+ 
+ 	if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
+-		create_object_percpu((__force unsigned long)ptr, size, 0, gfp);
++		create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
+ }
+ EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
+ 
+diff --git a/mm/mremap.c b/mm/mremap.c
+index dee98ff2bbd644..1b2edd65c2a172 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -138,6 +138,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
+ 		struct vm_area_struct *new_vma, pmd_t *new_pmd,
+ 		unsigned long new_addr, bool need_rmap_locks)
+ {
++	bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
+ 	struct mm_struct *mm = vma->vm_mm;
+ 	pte_t *old_pte, *new_pte, pte;
+ 	spinlock_t *old_ptl, *new_ptl;
+@@ -207,7 +208,18 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
+ 			force_flush = true;
+ 		pte = move_pte(pte, old_addr, new_addr);
+ 		pte = move_soft_dirty_pte(pte);
+-		set_pte_at(mm, new_addr, new_pte, pte);
++
++		if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
++			pte_clear(mm, new_addr, new_pte);
++		else {
++			if (need_clear_uffd_wp) {
++				if (pte_present(pte))
++					pte = pte_clear_uffd_wp(pte);
++				else if (is_swap_pte(pte))
++					pte = pte_swp_clear_uffd_wp(pte);
++			}
++			set_pte_at(mm, new_addr, new_pte, pte);
++		}
+ 	}
+ 
+ 	arch_leave_lazy_mmu_mode();
+@@ -269,6 +281,15 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ 	if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
+ 		return false;
+ 
++	/* If this pmd belongs to a uffd vma with remap events disabled, we need
++	 * to ensure that the uffd-wp state is cleared from all pgtables. This
++	 * means recursing into lower page tables in move_page_tables(), and we
++	 * can reuse the existing code if we simply treat the entry as "not
++	 * moved".
++	 */
++	if (vma_has_uffd_without_event_remap(vma))
++		return false;
++
+ 	/*
+ 	 * We don't have to worry about the ordering of src and dst
+ 	 * ptlocks because exclusive mmap_lock prevents deadlock.
+@@ -324,6 +345,15 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
+ 	if (WARN_ON_ONCE(!pud_none(*new_pud)))
+ 		return false;
+ 
++	/* If this pud belongs to a uffd vma with remap events disabled, we need
++	 * to ensure that the uffd-wp state is cleared from all pgtables. This
++	 * means recursing into lower page tables in move_page_tables(), and we
++	 * can reuse the existing code if we simply treat the entry as "not
++	 * moved".
++	 */
++	if (vma_has_uffd_without_event_remap(vma))
++		return false;
++
+ 	/*
+ 	 * We don't have to worry about the ordering of src and dst
+ 	 * ptlocks because exclusive mmap_lock prevents deadlock.
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 67a680e4b484d7..d81d667907448c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4637,6 +4637,9 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
+ 		reset_batch_size(walk);
+ 	}
+ 
++	__mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(),
++					stat.nr_demoted);
++
+ 	item = PGSTEAL_KSWAPD + reclaimer_offset();
+ 	if (!cgroup_reclaim(sc))
+ 		__count_vm_events(item, reclaimed);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 54a53fae9e98f5..46da488ff0703f 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -11263,6 +11263,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
+ 	bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
+ 	struct sock_reuseport *reuse;
+ 	struct sock *selected_sk;
++	int err;
+ 
+ 	selected_sk = map->ops->map_lookup_elem(map, key);
+ 	if (!selected_sk)
+@@ -11270,10 +11271,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
+ 
+ 	reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
+ 	if (!reuse) {
+-		/* Lookup in sock_map can return TCP ESTABLISHED sockets. */
+-		if (sk_is_refcounted(selected_sk))
+-			sock_put(selected_sk);
+-
+ 		/* reuseport_array has only sk with non NULL sk_reuseport_cb.
+ 		 * The only (!reuse) case here is - the sk has already been
+ 		 * unhashed (e.g. by close()), so treat it as -ENOENT.
+@@ -11281,24 +11278,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
+ 		 * Other maps (e.g. sock_map) do not provide this guarantee and
+ 		 * the sk may never be in the reuseport group to begin with.
+ 		 */
+-		return is_sockarray ? -ENOENT : -EINVAL;
++		err = is_sockarray ? -ENOENT : -EINVAL;
++		goto error;
+ 	}
+ 
+ 	if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
+ 		struct sock *sk = reuse_kern->sk;
+ 
+-		if (sk->sk_protocol != selected_sk->sk_protocol)
+-			return -EPROTOTYPE;
+-		else if (sk->sk_family != selected_sk->sk_family)
+-			return -EAFNOSUPPORT;
+-
+-		/* Catch all. Likely bound to a different sockaddr. */
+-		return -EBADFD;
++		if (sk->sk_protocol != selected_sk->sk_protocol) {
++			err = -EPROTOTYPE;
++		} else if (sk->sk_family != selected_sk->sk_family) {
++			err = -EAFNOSUPPORT;
++		} else {
++			/* Catch all. Likely bound to a different sockaddr. */
++			err = -EBADFD;
++		}
++		goto error;
+ 	}
+ 
+ 	reuse_kern->selected_sk = selected_sk;
+ 
+ 	return 0;
++error:
++	/* Lookup in sock_map can return TCP ESTABLISHED sockets. */
++	if (sk_is_refcounted(selected_sk))
++		sock_put(selected_sk);
++
++	return err;
+ }
+ 
+ static const struct bpf_func_proto sk_select_reuseport_proto = {
+diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
+index b28424ae06d5fa..8614988fc67b9a 100644
+--- a/net/core/netdev-genl-gen.c
++++ b/net/core/netdev-genl-gen.c
+@@ -178,6 +178,16 @@ static const struct genl_multicast_group netdev_nl_mcgrps[] = {
+ 	[NETDEV_NLGRP_PAGE_POOL] = { "page-pool", },
+ };
+ 
++static void __netdev_nl_sock_priv_init(void *priv)
++{
++	netdev_nl_sock_priv_init(priv);
++}
++
++static void __netdev_nl_sock_priv_destroy(void *priv)
++{
++	netdev_nl_sock_priv_destroy(priv);
++}
++
+ struct genl_family netdev_nl_family __ro_after_init = {
+ 	.name		= NETDEV_FAMILY_NAME,
+ 	.version	= NETDEV_FAMILY_VERSION,
+@@ -189,6 +199,6 @@ struct genl_family netdev_nl_family __ro_after_init = {
+ 	.mcgrps		= netdev_nl_mcgrps,
+ 	.n_mcgrps	= ARRAY_SIZE(netdev_nl_mcgrps),
+ 	.sock_priv_size	= sizeof(struct list_head),
+-	.sock_priv_init	= (void *)netdev_nl_sock_priv_init,
+-	.sock_priv_destroy = (void *)netdev_nl_sock_priv_destroy,
++	.sock_priv_init	= __netdev_nl_sock_priv_init,
++	.sock_priv_destroy = __netdev_nl_sock_priv_destroy,
+ };
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 34f68ef74b8f2c..b6db4910359bb5 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -851,6 +851,9 @@ static ssize_t get_imix_entries(const char __user *buffer,
+ 		unsigned long weight;
+ 		unsigned long size;
+ 
++		if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES)
++			return -E2BIG;
++
+ 		len = num_arg(&buffer[i], max_digits, &size);
+ 		if (len < 0)
+ 			return len;
+@@ -880,9 +883,6 @@ static ssize_t get_imix_entries(const char __user *buffer,
+ 
+ 		i++;
+ 		pkt_dev->n_imix_entries++;
+-
+-		if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES)
+-			return -E2BIG;
+ 	} while (c == ' ');
+ 
+ 	return i;
+diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
+index c0e2da5072bea2..9e4631fade90c9 100644
+--- a/net/mac802154/iface.c
++++ b/net/mac802154/iface.c
+@@ -684,6 +684,10 @@ void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata)
+ 	ASSERT_RTNL();
+ 
+ 	mutex_lock(&sdata->local->iflist_mtx);
++	if (list_empty(&sdata->local->interfaces)) {
++		mutex_unlock(&sdata->local->iflist_mtx);
++		return;
++	}
+ 	list_del_rcu(&sdata->list);
+ 	mutex_unlock(&sdata->local->iflist_mtx);
+ 
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index a62bc874bf1e17..123f3f2972841a 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -607,7 +607,6 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
+ 	}
+ 	opts->ext_copy.use_ack = 1;
+ 	opts->suboptions = OPTION_MPTCP_DSS;
+-	WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
+ 
+ 	/* Add kind/length/subtype/flag overhead if mapping is not populated */
+ 	if (dss_size == 0)
+@@ -1288,7 +1287,7 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
+ 			}
+ 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
+ 		}
+-		return;
++		goto update_wspace;
+ 	}
+ 
+ 	if (rcv_wnd_new != rcv_wnd_old) {
+@@ -1313,6 +1312,9 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
+ 		th->window = htons(new_win);
+ 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDSHARED);
+ 	}
++
++update_wspace:
++	WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
+ }
+ 
+ __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index a93e661ef5c435..73526f1d768fcb 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -760,10 +760,15 @@ static inline u64 mptcp_data_avail(const struct mptcp_sock *msk)
+ 
+ static inline bool mptcp_epollin_ready(const struct sock *sk)
+ {
++	u64 data_avail = mptcp_data_avail(mptcp_sk(sk));
++
++	if (!data_avail)
++		return false;
++
+ 	/* mptcp doesn't have to deal with small skbs in the receive queue,
+-	 * at it can always coalesce them
++	 * as it can always coalesce them
+ 	 */
+-	return (mptcp_data_avail(mptcp_sk(sk)) >= sk->sk_rcvlowat) ||
++	return (data_avail >= sk->sk_rcvlowat) ||
+ 	       (mem_cgroup_sockets_enabled && sk->sk_memcg &&
+ 		mem_cgroup_under_socket_pressure(sk->sk_memcg)) ||
+ 	       READ_ONCE(tcp_memory_pressure);
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index ef0f8f73826f53..4e0842df5234ea 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -289,6 +289,7 @@ enum {
+ 	ncsi_dev_state_config_sp	= 0x0301,
+ 	ncsi_dev_state_config_cis,
+ 	ncsi_dev_state_config_oem_gma,
++	ncsi_dev_state_config_apply_mac,
+ 	ncsi_dev_state_config_clear_vids,
+ 	ncsi_dev_state_config_svf,
+ 	ncsi_dev_state_config_ev,
+@@ -322,6 +323,7 @@ struct ncsi_dev_priv {
+ #define NCSI_DEV_RESHUFFLE	4
+ #define NCSI_DEV_RESET		8            /* Reset state of NC          */
+ 	unsigned int        gma_flag;        /* OEM GMA flag               */
++	struct sockaddr     pending_mac;     /* MAC address received from GMA */
+ 	spinlock_t          lock;            /* Protect the NCSI device    */
+ 	unsigned int        package_probe_id;/* Current ID during probe    */
+ 	unsigned int        package_num;     /* Number of packages         */
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index 5cf55bde366d18..bf276eaf933075 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -1038,7 +1038,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
+ 			  : ncsi_dev_state_config_clear_vids;
+ 		break;
+ 	case ncsi_dev_state_config_oem_gma:
+-		nd->state = ncsi_dev_state_config_clear_vids;
++		nd->state = ncsi_dev_state_config_apply_mac;
+ 
+ 		nca.package = np->id;
+ 		nca.channel = nc->id;
+@@ -1050,10 +1050,22 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
+ 			nca.type = NCSI_PKT_CMD_OEM;
+ 			ret = ncsi_gma_handler(&nca, nc->version.mf_id);
+ 		}
+-		if (ret < 0)
++		if (ret < 0) {
++			nd->state = ncsi_dev_state_config_clear_vids;
+ 			schedule_work(&ndp->work);
++		}
+ 
+ 		break;
++	case ncsi_dev_state_config_apply_mac:
++		rtnl_lock();
++		ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
++		rtnl_unlock();
++		if (ret < 0)
++			netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
++
++		nd->state = ncsi_dev_state_config_clear_vids;
++
++		fallthrough;
+ 	case ncsi_dev_state_config_clear_vids:
+ 	case ncsi_dev_state_config_svf:
+ 	case ncsi_dev_state_config_ev:
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index e28be33bdf2c48..14bd66909ca455 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -628,16 +628,14 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
+ static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
+ {
+ 	struct ncsi_dev_priv *ndp = nr->ndp;
++	struct sockaddr *saddr = &ndp->pending_mac;
+ 	struct net_device *ndev = ndp->ndev.dev;
+ 	struct ncsi_rsp_oem_pkt *rsp;
+-	struct sockaddr saddr;
+ 	u32 mac_addr_off = 0;
+-	int ret = 0;
+ 
+ 	/* Get the response header */
+ 	rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+ 
+-	saddr.sa_family = ndev->type;
+ 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ 	if (mfr_id == NCSI_OEM_MFR_BCM_ID)
+ 		mac_addr_off = BCM_MAC_ADDR_OFFSET;
+@@ -646,22 +644,17 @@ static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
+ 	else if (mfr_id == NCSI_OEM_MFR_INTEL_ID)
+ 		mac_addr_off = INTEL_MAC_ADDR_OFFSET;
+ 
+-	memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
++	saddr->sa_family = ndev->type;
++	memcpy(saddr->sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
+ 	if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID)
+-		eth_addr_inc((u8 *)saddr.sa_data);
+-	if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
++		eth_addr_inc((u8 *)saddr->sa_data);
++	if (!is_valid_ether_addr((const u8 *)saddr->sa_data))
+ 		return -ENXIO;
+ 
+ 	/* Set the flag for GMA command which should only be called once */
+ 	ndp->gma_flag = 1;
+ 
+-	rtnl_lock();
+-	ret = dev_set_mac_address(ndev, &saddr, NULL);
+-	rtnl_unlock();
+-	if (ret < 0)
+-		netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
+-
+-	return ret;
++	return 0;
+ }
+ 
+ /* Response handler for Mellanox card */
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 16e26001468449..704c858cf2093b 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -934,7 +934,9 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+ {
+ 	struct vport *vport = ovs_vport_rcu(dp, out_port);
+ 
+-	if (likely(vport && netif_carrier_ok(vport->dev))) {
++	if (likely(vport &&
++		   netif_running(vport->dev) &&
++		   netif_carrier_ok(vport->dev))) {
+ 		u16 mru = OVS_CB(skb)->mru;
+ 		u32 cutlen = OVS_CB(skb)->cutlen;
+ 
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index b52b798aa4c292..15724f171b0f96 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -491,6 +491,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
+ 		 */
+ 		vsk->transport->release(vsk);
+ 		vsock_deassign_transport(vsk);
++
++		/* transport's release() and destruct() can touch some socket
++		 * state, since we are reassigning the socket to a new transport
++		 * during vsock_connect(), let's reset these fields to have a
++		 * clean state.
++		 */
++		sock_reset_flag(sk, SOCK_DONE);
++		sk->sk_state = TCP_CLOSE;
++		vsk->peer_shutdown = 0;
+ 	}
+ 
+ 	/* We increase the module refcnt to prevent the transport unloading
+@@ -870,6 +879,9 @@ EXPORT_SYMBOL_GPL(vsock_create_connected);
+ 
+ s64 vsock_stream_has_data(struct vsock_sock *vsk)
+ {
++	if (WARN_ON(!vsk->transport))
++		return 0;
++
+ 	return vsk->transport->stream_has_data(vsk);
+ }
+ EXPORT_SYMBOL_GPL(vsock_stream_has_data);
+@@ -878,6 +890,9 @@ s64 vsock_connectible_has_data(struct vsock_sock *vsk)
+ {
+ 	struct sock *sk = sk_vsock(vsk);
+ 
++	if (WARN_ON(!vsk->transport))
++		return 0;
++
+ 	if (sk->sk_type == SOCK_SEQPACKET)
+ 		return vsk->transport->seqpacket_has_data(vsk);
+ 	else
+@@ -887,6 +902,9 @@ EXPORT_SYMBOL_GPL(vsock_connectible_has_data);
+ 
+ s64 vsock_stream_has_space(struct vsock_sock *vsk)
+ {
++	if (WARN_ON(!vsk->transport))
++		return 0;
++
+ 	return vsk->transport->stream_has_space(vsk);
+ }
+ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 9acc13ab3f822d..7f7de6d8809655 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -26,6 +26,9 @@
+ /* Threshold for detecting small packets to copy */
+ #define GOOD_COPY_LEN  128
+ 
++static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
++					       bool cancel_timeout);
++
+ static const struct virtio_transport *
+ virtio_transport_get_ops(struct vsock_sock *vsk)
+ {
+@@ -1109,6 +1112,8 @@ void virtio_transport_destruct(struct vsock_sock *vsk)
+ {
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+ 
++	virtio_transport_cancel_close_work(vsk, true);
++
+ 	kfree(vvs);
+ 	vsk->trans = NULL;
+ }
+@@ -1204,17 +1209,11 @@ static void virtio_transport_wait_close(struct sock *sk, long timeout)
+ 	}
+ }
+ 
+-static void virtio_transport_do_close(struct vsock_sock *vsk,
+-				      bool cancel_timeout)
++static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
++					       bool cancel_timeout)
+ {
+ 	struct sock *sk = sk_vsock(vsk);
+ 
+-	sock_set_flag(sk, SOCK_DONE);
+-	vsk->peer_shutdown = SHUTDOWN_MASK;
+-	if (vsock_stream_has_data(vsk) <= 0)
+-		sk->sk_state = TCP_CLOSING;
+-	sk->sk_state_change(sk);
+-
+ 	if (vsk->close_work_scheduled &&
+ 	    (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
+ 		vsk->close_work_scheduled = false;
+@@ -1226,6 +1225,20 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
+ 	}
+ }
+ 
++static void virtio_transport_do_close(struct vsock_sock *vsk,
++				      bool cancel_timeout)
++{
++	struct sock *sk = sk_vsock(vsk);
++
++	sock_set_flag(sk, SOCK_DONE);
++	vsk->peer_shutdown = SHUTDOWN_MASK;
++	if (vsock_stream_has_data(vsk) <= 0)
++		sk->sk_state = TCP_CLOSING;
++	sk->sk_state_change(sk);
++
++	virtio_transport_cancel_close_work(vsk, cancel_timeout);
++}
++
+ static void virtio_transport_close_timeout(struct work_struct *work)
+ {
+ 	struct vsock_sock *vsk =
+@@ -1628,8 +1641,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
+ 
+ 	lock_sock(sk);
+ 
+-	/* Check if sk has been closed before lock_sock */
+-	if (sock_flag(sk, SOCK_DONE)) {
++	/* Check if sk has been closed or assigned to another transport before
++	 * lock_sock (note: listener sockets are not assigned to any transport)
++	 */
++	if (sock_flag(sk, SOCK_DONE) ||
++	    (sk->sk_state != TCP_LISTEN && vsk->transport != &t->transport)) {
+ 		(void)virtio_transport_reset_no_sock(t, skb);
+ 		release_sock(sk);
+ 		sock_put(sk);
+diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
+index 4aa6e74ec2957b..f201d9eca1df2f 100644
+--- a/net/vmw_vsock/vsock_bpf.c
++++ b/net/vmw_vsock/vsock_bpf.c
+@@ -77,6 +77,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ 			     size_t len, int flags, int *addr_len)
+ {
+ 	struct sk_psock *psock;
++	struct vsock_sock *vsk;
+ 	int copied;
+ 
+ 	psock = sk_psock_get(sk);
+@@ -84,6 +85,13 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ 		return __vsock_recvmsg(sk, msg, len, flags);
+ 
+ 	lock_sock(sk);
++	vsk = vsock_sk(sk);
++
++	if (!vsk->transport) {
++		copied = -ENODEV;
++		goto out;
++	}
++
+ 	if (vsock_has_data(sk, psock) && sk_psock_queue_empty(psock)) {
+ 		release_sock(sk);
+ 		sk_psock_put(sk, psock);
+@@ -108,6 +116,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ 		copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
+ 	}
+ 
++out:
+ 	release_sock(sk);
+ 	sk_psock_put(sk, psock);
+ 
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index 14df15e3569525..105706abf281b2 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -626,6 +626,7 @@ struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name,
+ 
+ 	/* TODO: ideally we should inherit abi from parent */
+ 	profile->label.flags |= FLAG_NULL;
++	profile->attach.xmatch = aa_get_pdb(nullpdb);
+ 	rules = list_first_entry(&profile->rules, typeof(*rules), list);
+ 	rules->file = aa_get_pdb(nullpdb);
+ 	rules->policy = aa_get_pdb(nullpdb);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3ed82f98e2de9e..a9f6138b59b0c1 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10625,6 +10625,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
++	SND_PCI_QUIRK(0x1043, 0x1e63, "ASUS H7606W", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
++	SND_PCI_QUIRK(0x1043, 0x1e83, "ASUS GA605W", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
+ 	SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10979,6 +10981,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13),
+ 	SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
+index 717530bc9c52e7..463f1394ab971b 100755
+--- a/tools/net/ynl/ynl-gen-c.py
++++ b/tools/net/ynl/ynl-gen-c.py
+@@ -2361,6 +2361,17 @@ def print_kernel_family_struct_src(family, cw):
+     if not kernel_can_gen_family_struct(family):
+         return
+ 
++    if 'sock-priv' in family.kernel_family:
++        # Generate "trampolines" to make CFI happy
++        cw.write_func("static void", f"__{family.c_name}_nl_sock_priv_init",
++                      [f"{family.c_name}_nl_sock_priv_init(priv);"],
++                      ["void *priv"])
++        cw.nl()
++        cw.write_func("static void", f"__{family.c_name}_nl_sock_priv_destroy",
++                      [f"{family.c_name}_nl_sock_priv_destroy(priv);"],
++                      ["void *priv"])
++        cw.nl()
++
+     cw.block_start(f"struct genl_family {family.ident_name}_nl_family __ro_after_init =")
+     cw.p('.name\t\t= ' + family.fam_key + ',')
+     cw.p('.version\t= ' + family.ver_key + ',')
+@@ -2378,9 +2389,8 @@ def print_kernel_family_struct_src(family, cw):
+         cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.c_name}_nl_mcgrps),')
+     if 'sock-priv' in family.kernel_family:
+         cw.p(f'.sock_priv_size\t= sizeof({family.kernel_family["sock-priv"]}),')
+-        # Force cast here, actual helpers take pointer to the real type.
+-        cw.p(f'.sock_priv_init\t= (void *){family.c_name}_nl_sock_priv_init,')
+-        cw.p(f'.sock_priv_destroy = (void *){family.c_name}_nl_sock_priv_destroy,')
++        cw.p(f'.sock_priv_init\t= __{family.c_name}_nl_sock_priv_init,')
++        cw.p(f'.sock_priv_destroy = __{family.c_name}_nl_sock_priv_destroy,')
+     cw.block_end(';')
+ 
+ 
+diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
+index 32c6ccc2a6be98..1238e1c5aae150 100644
+--- a/tools/testing/selftests/mm/cow.c
++++ b/tools/testing/selftests/mm/cow.c
+@@ -758,7 +758,7 @@ static void do_run_with_base_page(test_fn fn, bool swapout)
+ 	}
+ 
+ 	/* Populate a base page. */
+-	memset(mem, 0, pagesize);
++	memset(mem, 1, pagesize);
+ 
+ 	if (swapout) {
+ 		madvise(mem, pagesize, MADV_PAGEOUT);
+@@ -824,12 +824,12 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize)
+ 	 * Try to populate a THP. Touch the first sub-page and test if
+ 	 * we get the last sub-page populated automatically.
+ 	 */
+-	mem[0] = 0;
++	mem[0] = 1;
+ 	if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
+ 		ksft_test_result_skip("Did not get a THP populated\n");
+ 		goto munmap;
+ 	}
+-	memset(mem, 0, thpsize);
++	memset(mem, 1, thpsize);
+ 
+ 	size = thpsize;
+ 	switch (thp_run) {
+@@ -1012,7 +1012,7 @@ static void run_with_hugetlb(test_fn fn, const char *desc, size_t hugetlbsize)
+ 	}
+ 
+ 	/* Populate an huge page. */
+-	memset(mem, 0, hugetlbsize);
++	memset(mem, 1, hugetlbsize);
+ 
+ 	/*
+ 	 * We need a total of two hugetlb pages to handle COW/unsharing
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index 4209b95690394b..414addef9a4514 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -25,6 +25,8 @@
+ #include <sys/types.h>
+ #include <sys/mman.h>
+ 
++#include <arpa/inet.h>
++
+ #include <netdb.h>
+ #include <netinet/in.h>
+ 
+@@ -1211,23 +1213,42 @@ static void parse_setsock_options(const char *name)
+ 	exit(1);
+ }
+ 
+-void xdisconnect(int fd, int addrlen)
++void xdisconnect(int fd)
+ {
+-	struct sockaddr_storage empty;
++	socklen_t addrlen = sizeof(struct sockaddr_storage);
++	struct sockaddr_storage addr, empty;
+ 	int msec_sleep = 10;
+-	int queued = 1;
+-	int i;
++	void *raw_addr;
++	int i, cmdlen;
++	char cmd[128];
++
++	/* get the local address and convert it to string */
++	if (getsockname(fd, (struct sockaddr *)&addr, &addrlen) < 0)
++		xerror("getsockname");
++
++	if (addr.ss_family == AF_INET)
++		raw_addr = &(((struct sockaddr_in *)&addr)->sin_addr);
++	else if (addr.ss_family == AF_INET6)
++		raw_addr = &(((struct sockaddr_in6 *)&addr)->sin6_addr);
++	else
++		xerror("bad family");
++
++	strcpy(cmd, "ss -M | grep -q ");
++	cmdlen = strlen(cmd);
++	if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen],
++		       sizeof(cmd) - cmdlen))
++		xerror("inet_ntop");
+ 
+ 	shutdown(fd, SHUT_WR);
+ 
+-	/* while until the pending data is completely flushed, the later
++	/*
++	 * wait until the pending data is completely flushed and all
++	 * the MPTCP sockets reached the closed status.
+ 	 * disconnect will bypass/ignore/drop any pending data.
+ 	 */
+ 	for (i = 0; ; i += msec_sleep) {
+-		if (ioctl(fd, SIOCOUTQ, &queued) < 0)
+-			xerror("can't query out socket queue: %d", errno);
+-
+-		if (!queued)
++		/* closed socket are not listed by 'ss' */
++		if (system(cmd) != 0)
+ 			break;
+ 
+ 		if (i > poll_timeout)
+@@ -1281,9 +1302,9 @@ int main_loop(void)
+ 		return ret;
+ 
+ 	if (cfg_truncate > 0) {
+-		xdisconnect(fd, peer->ai_addrlen);
++		xdisconnect(fd);
+ 	} else if (--cfg_repeat > 0) {
+-		xdisconnect(fd, peer->ai_addrlen);
++		xdisconnect(fd);
+ 
+ 		/* the socket could be unblocking at this point, we need the
+ 		 * connect to be blocking
+diff --git a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
+index 37d9bf6fb7458d..6f4c3f5a1c5d99 100644
+--- a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
++++ b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
+@@ -20,7 +20,7 @@ s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_select_cpu, struct task_struct *p,
+ 		 * If we dispatch to a bogus DSQ that will fall back to the
+ 		 * builtin global DSQ, we fail gracefully.
+ 		 */
+-		scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
++		scx_bpf_dsq_insert_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
+ 				       p->scx.dsq_vtime, 0);
+ 		return cpu;
+ 	}
+diff --git a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
+index dffc97d9cdf141..e4a55027778fd0 100644
+--- a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
++++ b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
+@@ -17,8 +17,8 @@ s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_select_cpu, struct task_struct *p,
+ 
+ 	if (cpu >= 0) {
+ 		/* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */
+-		scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
+-				       p->scx.dsq_vtime, 0);
++		scx_bpf_dsq_insert_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
++					 p->scx.dsq_vtime, 0);
+ 		return cpu;
+ 	}
+ 
+diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+index 6a7db1502c29e1..fbda6bf5467128 100644
+--- a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
++++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+@@ -43,9 +43,12 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
+ 	if (!p)
+ 		return;
+ 
+-	target = bpf_get_prandom_u32() % nr_cpus;
++	if (p->nr_cpus_allowed == nr_cpus)
++		target = bpf_get_prandom_u32() % nr_cpus;
++	else
++		target = scx_bpf_task_cpu(p);
+ 
+-	scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
++	scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
+ 	bpf_task_release(p);
+ }
+ 
+diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.c b/tools/testing/selftests/sched_ext/dsp_local_on.c
+index 472851b5685487..0ff27e57fe4303 100644
+--- a/tools/testing/selftests/sched_ext/dsp_local_on.c
++++ b/tools/testing/selftests/sched_ext/dsp_local_on.c
+@@ -34,9 +34,10 @@ static enum scx_test_status run(void *ctx)
+ 	/* Just sleeping is fine, plenty of scheduling events happening */
+ 	sleep(1);
+ 
+-	SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR));
+ 	bpf_link__destroy(link);
+ 
++	SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_UNREG));
++
+ 	return SCX_TEST_PASS;
+ }
+ 
+@@ -50,7 +51,7 @@ static void cleanup(void *ctx)
+ struct scx_test dsp_local_on = {
+ 	.name = "dsp_local_on",
+ 	.description = "Verify we can directly dispatch tasks to a local DSQs "
+-		       "from osp.dispatch()",
++		       "from ops.dispatch()",
+ 	.setup = setup,
+ 	.run = run,
+ 	.cleanup = cleanup,
+diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
+index 1efb50d61040ad..a7cf868d5e311d 100644
+--- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
++++ b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
+@@ -31,7 +31,7 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
+ 	/* Can only call from ops.select_cpu() */
+ 	scx_bpf_select_cpu_dfl(p, 0, 0, &found);
+ 
+-	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ SEC(".struct_ops.link")
+diff --git a/tools/testing/selftests/sched_ext/exit.bpf.c b/tools/testing/selftests/sched_ext/exit.bpf.c
+index d75d4faf07f6d5..4bc36182d3ffc2 100644
+--- a/tools/testing/selftests/sched_ext/exit.bpf.c
++++ b/tools/testing/selftests/sched_ext/exit.bpf.c
+@@ -33,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
+ 	if (exit_point == EXIT_ENQUEUE)
+ 		EXIT_CLEANLY();
+ 
+-	scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
+@@ -41,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
+ 	if (exit_point == EXIT_DISPATCH)
+ 		EXIT_CLEANLY();
+ 
+-	scx_bpf_consume(DSQ_ID);
++	scx_bpf_dsq_move_to_local(DSQ_ID);
+ }
+ 
+ void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
+diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
+index 4d4cd8d966dba6..430f5e13bf5544 100644
+--- a/tools/testing/selftests/sched_ext/maximal.bpf.c
++++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
+@@ -12,6 +12,8 @@
+ 
+ char _license[] SEC("license") = "GPL";
+ 
++#define DSQ_ID 0
++
+ s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
+ 		   u64 wake_flags)
+ {
+@@ -20,7 +22,7 @@ s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
+ 
+ void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
+ {
+-	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
+@@ -28,7 +30,7 @@ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
+ 
+ void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
+ {
+-	scx_bpf_consume(SCX_DSQ_GLOBAL);
++	scx_bpf_dsq_move_to_local(DSQ_ID);
+ }
+ 
+ void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
+@@ -123,7 +125,7 @@ void BPF_STRUCT_OPS(maximal_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
+ 
+ s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init)
+ {
+-	return 0;
++	return scx_bpf_create_dsq(DSQ_ID, -1);
+ }
+ 
+ void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
+index f171ac47097060..13d0f5be788d12 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
+@@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
+ 	}
+ 	scx_bpf_put_idle_cpumask(idle_mask);
+ 
+-	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ SEC(".struct_ops.link")
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
+index 9efdbb7da92887..815f1d5d61ac43 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
+@@ -67,7 +67,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p,
+ 		saw_local = true;
+ 	}
+ 
+-	scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags);
++	scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, enq_flags);
+ }
+ 
+ s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
+index 59bfc4f36167a7..4bb99699e9209c 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
+@@ -29,7 +29,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p,
+ 	cpu = prev_cpu;
+ 
+ dispatch:
+-	scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0);
++	scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, 0);
+ 	return cpu;
+ }
+ 
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
+index 3bbd5fcdfb18e0..2a75de11b2cfd5 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
+@@ -18,7 +18,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_select_cpu, struct task_struct *p
+ 		   s32 prev_cpu, u64 wake_flags)
+ {
+ 	/* Dispatching to a random DSQ should fail. */
+-	scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0);
++	scx_bpf_dsq_insert(p, 0xcafef00d, SCX_SLICE_DFL, 0);
+ 
+ 	return prev_cpu;
+ }
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
+index 0fda57fe0ecfae..99d075695c9743 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
+@@ -18,8 +18,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_select_cpu, struct task_struct *p
+ 		   s32 prev_cpu, u64 wake_flags)
+ {
+ 	/* Dispatching twice in a row is disallowed. */
+-	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+-	scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
++	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
++	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+ 
+ 	return prev_cpu;
+ }
+diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+index e6c67bcf5e6e35..bfcb96cd4954bd 100644
+--- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
++++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+@@ -2,8 +2,8 @@
+ /*
+  * A scheduler that validates that enqueue flags are properly stored and
+  * applied at dispatch time when a task is directly dispatched from
+- * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
+- * making the test a very basic vtime scheduler.
++ * ops.select_cpu(). We validate this by using scx_bpf_dsq_insert_vtime(),
++ * and making the test a very basic vtime scheduler.
+  *
+  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+  * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+@@ -47,13 +47,13 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
+ 	cpu = prev_cpu;
+ 	scx_bpf_test_and_clear_cpu_idle(cpu);
+ ddsp:
+-	scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
++	scx_bpf_dsq_insert_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
+ 	return cpu;
+ }
+ 
+ void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
+ {
+-	if (scx_bpf_consume(VTIME_DSQ))
++	if (scx_bpf_dsq_move_to_local(VTIME_DSQ))
+ 		consumed = true;
+ }
+ 
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json b/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json
+index 58189327f6444a..383fbda07245c8 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json
+@@ -78,10 +78,10 @@
+         "setup": [
+             "$TC qdisc add dev $DEV1 ingress"
+         ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0xff",
++        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0x1f",
+         "expExitCode": "0",
+         "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow",
+-        "matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 255 baseclass",
++        "matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 31 baseclass",
+         "matchCount": "1",
+         "teardown": [
+             "$TC qdisc del dev $DEV1 ingress"
+diff --git a/tools/testing/shared/linux/maple_tree.h b/tools/testing/shared/linux/maple_tree.h
+index 06c89bdcc51541..f67d47d32857ce 100644
+--- a/tools/testing/shared/linux/maple_tree.h
++++ b/tools/testing/shared/linux/maple_tree.h
+@@ -2,6 +2,6 @@
+ #define atomic_t int32_t
+ #define atomic_inc(x) uatomic_inc(x)
+ #define atomic_read(x) uatomic_read(x)
+-#define atomic_set(x, y) do {} while (0)
++#define atomic_set(x, y) uatomic_set(x, y)
+ #define U8_MAX UCHAR_MAX
+ #include "../../../../include/linux/maple_tree.h"
+diff --git a/tools/testing/vma/linux/atomic.h b/tools/testing/vma/linux/atomic.h
+index e01f66f9898279..3e1b6adc027b99 100644
+--- a/tools/testing/vma/linux/atomic.h
++++ b/tools/testing/vma/linux/atomic.h
+@@ -6,7 +6,7 @@
+ #define atomic_t int32_t
+ #define atomic_inc(x) uatomic_inc(x)
+ #define atomic_read(x) uatomic_read(x)
+-#define atomic_set(x, y) do {} while (0)
++#define atomic_set(x, y) uatomic_set(x, y)
+ #define U8_MAX UCHAR_MAX
+ 
+ #endif	/* _LINUX_ATOMIC_H */


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-01-17 13:18 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-01-17 13:18 UTC (permalink / raw
  To: gentoo-commits

commit:     bbd5b42d3ff847b17f85f7ce29fa19f28f88b798
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jan 13 17:18:55 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jan 13 17:18:55 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bbd5b42d

BMQ(BitMap Queue) Schedule r1 version bump

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   2 +-
 ...=> 5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch | 252 +++++++++++++++------
 2 files changed, 184 insertions(+), 70 deletions(-)

diff --git a/0000_README b/0000_README
index 29d9187b..06b9cb3f 100644
--- a/0000_README
+++ b/0000_README
@@ -127,7 +127,7 @@ Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
 
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
 From:   https://gitlab.com/alfredchen/projectc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
 

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
similarity index 98%
rename from 5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
index 9eb3139f..532813fd 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
@@ -158,7 +158,7 @@ index 8874f681b056..59eb72bf7d5f 100644
  	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
  }
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index bb343136ddd0..212d9204e9aa 100644
+index bb343136ddd0..6adfea989b7b 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -804,9 +804,13 @@ struct task_struct {
@@ -212,7 +212,34 @@ index bb343136ddd0..212d9204e9aa 100644
  
  #ifdef CONFIG_CGROUP_SCHED
  	struct task_group		*sched_task_group;
-@@ -1609,6 +1628,15 @@ struct task_struct {
+@@ -878,11 +897,15 @@ struct task_struct {
+ 	const cpumask_t			*cpus_ptr;
+ 	cpumask_t			*user_cpus_ptr;
+ 	cpumask_t			cpus_mask;
++#ifndef CONFIG_SCHED_ALT
+ 	void				*migration_pending;
++#endif
+ #ifdef CONFIG_SMP
+ 	unsigned short			migration_disabled;
+ #endif
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned short			migration_flags;
++#endif
+ 
+ #ifdef CONFIG_PREEMPT_RCU
+ 	int				rcu_read_lock_nesting;
+@@ -914,8 +937,10 @@ struct task_struct {
+ 
+ 	struct list_head		tasks;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 	struct plist_node		pushable_tasks;
+ 	struct rb_node			pushable_dl_tasks;
++#endif
+ #endif
+ 
+ 	struct mm_struct		*mm;
+@@ -1609,6 +1634,15 @@ struct task_struct {
  	 */
  };
  
@@ -228,7 +255,7 @@ index bb343136ddd0..212d9204e9aa 100644
  #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
  #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
  
-@@ -2135,7 +2163,11 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
+@@ -2135,7 +2169,11 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
  
  static inline bool task_is_runnable(struct task_struct *p)
  {
@@ -341,7 +368,7 @@ index 4237daa5ac7a..3cebd93c49c8 100644
  #else
  static inline void rebuild_sched_domains_energy(void)
 diff --git a/init/Kconfig b/init/Kconfig
-index c521e1421ad4..131a599fcde2 100644
+index c521e1421ad4..4a397b48a453 100644
 --- a/init/Kconfig
 +++ b/init/Kconfig
 @@ -652,6 +652,7 @@ config TASK_IO_ACCOUNTING
@@ -352,15 +379,7 @@ index c521e1421ad4..131a599fcde2 100644
  	select KERNFS
  	help
  	  Collect metrics that indicate how overcommitted the CPU, memory,
-@@ -817,6 +818,7 @@ menu "Scheduler features"
- config UCLAMP_TASK
- 	bool "Enable utilization clamping for RT/FAIR tasks"
- 	depends on CPU_FREQ_GOV_SCHEDUTIL
-+	depends on !SCHED_ALT
- 	help
- 	  This feature enables the scheduler to track the clamped utilization
- 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
-@@ -863,6 +865,35 @@ config UCLAMP_BUCKETS_COUNT
+@@ -863,6 +864,35 @@ config UCLAMP_BUCKETS_COUNT
  
  	  If in doubt, use the default value.
  
@@ -396,7 +415,7 @@ index c521e1421ad4..131a599fcde2 100644
  endmenu
  
  #
-@@ -928,6 +959,7 @@ config NUMA_BALANCING
+@@ -928,6 +958,7 @@ config NUMA_BALANCING
  	depends on ARCH_SUPPORTS_NUMA_BALANCING
  	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
  	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
@@ -404,23 +423,7 @@ index c521e1421ad4..131a599fcde2 100644
  	help
  	  This option adds support for automatic NUMA aware memory/task placement.
  	  The mechanism is quite primitive and is based on migrating memory when
-@@ -1036,6 +1068,7 @@ menuconfig CGROUP_SCHED
- 	  tasks.
- 
- if CGROUP_SCHED
-+if !SCHED_ALT
- config GROUP_SCHED_WEIGHT
- 	def_bool n
- 
-@@ -1073,6 +1106,7 @@ config EXT_GROUP_SCHED
- 	select GROUP_SCHED_WEIGHT
- 	default y
- 
-+endif #!SCHED_ALT
- endif #CGROUP_SCHED
- 
- config SCHED_MM_CID
-@@ -1334,6 +1368,7 @@ config CHECKPOINT_RESTORE
+@@ -1334,6 +1365,7 @@ config CHECKPOINT_RESTORE
  
  config SCHED_AUTOGROUP
  	bool "Automatic process group scheduling"
@@ -429,7 +432,7 @@ index c521e1421ad4..131a599fcde2 100644
  	select CGROUP_SCHED
  	select FAIR_GROUP_SCHED
 diff --git a/init/init_task.c b/init/init_task.c
-index 136a8231355a..03770079619a 100644
+index 136a8231355a..12c01ab8e718 100644
 --- a/init/init_task.c
 +++ b/init/init_task.c
 @@ -71,9 +71,16 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
@@ -466,14 +469,20 @@ index 136a8231355a..03770079619a 100644
  	.se		= {
  		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
  	},
-@@ -93,6 +110,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
+@@ -93,10 +110,13 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
  		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
  		.time_slice	= RR_TIMESLICE,
  	},
 +#endif
  	.tasks		= LIST_HEAD_INIT(init_task.tasks),
++#ifndef CONFIG_SCHED_ALT
  #ifdef CONFIG_SMP
  	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+ #endif
++#endif
+ #ifdef CONFIG_CGROUP_SCHED
+ 	.sched_task_group = &root_task_group,
+ #endif
 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
 index fe782cd77388..d27d2154d71a 100644
 --- a/kernel/Kconfig.preempt
@@ -700,10 +709,10 @@ index 976092b7bd45..31d587c16ec1 100644
  obj-y += build_utility.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..c59691742340
+index 000000000000..0a08bc0176ac
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7458 @@
+@@ -0,0 +1,7515 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -782,7 +791,7 @@ index 000000000000..c59691742340
 +#define sched_feat(x)	(0)
 +#endif /* CONFIG_SCHED_DEBUG */
 +
-+#define ALT_SCHED_VERSION "v6.12-r0"
++#define ALT_SCHED_VERSION "v6.12-r1"
 +
 +#define STOP_PRIO		(MAX_RT_PRIO - 1)
 +
@@ -2144,8 +2153,6 @@ index 000000000000..c59691742340
 +	__set_task_cpu(p, new_cpu);
 +}
 +
-+#define MDF_FORCE_ENABLED	0x80
-+
 +static void
 +__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
 +{
@@ -2186,8 +2193,6 @@ index 000000000000..c59691742340
 +	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
 +		cpu_rq(cpu)->nr_pinned++;
 +		p->migration_disabled = 1;
-+		p->migration_flags &= ~MDF_FORCE_ENABLED;
-+
 +		/*
 +		 * Violates locking rules! see comment in __do_set_cpus_ptr().
 +		 */
@@ -2237,6 +2242,15 @@ index 000000000000..c59691742340
 +}
 +EXPORT_SYMBOL_GPL(migrate_enable);
 +
++static void __migrate_force_enable(struct task_struct *p, struct rq *rq)
++{
++	if (likely(p->cpus_ptr != &p->cpus_mask))
++		__do_set_cpus_ptr(p, &p->cpus_mask);
++	p->migration_disabled = 0;
++	/* When p is migrate_disabled, rq->lock should be held */
++	rq->nr_pinned--;
++}
++
 +static inline bool rq_has_pinned_tasks(struct rq *rq)
 +{
 +	return rq->nr_pinned;
@@ -2417,6 +2431,9 @@ index 000000000000..c59691742340
 +
 +	__do_set_cpus_allowed(p, &ac);
 +
++	if (is_migration_disabled(p) && !cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
++		__migrate_force_enable(p, task_rq(p));
++
 +	/*
 +	 * Because this is called with p->pi_lock held, it is not possible
 +	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
@@ -2712,14 +2729,8 @@ index 000000000000..c59691742340
 +{
 +	/* Can the task run on the task's current CPU? If so, we're done */
 +	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
-+		if (p->migration_disabled) {
-+			if (likely(p->cpus_ptr != &p->cpus_mask))
-+				__do_set_cpus_ptr(p, &p->cpus_mask);
-+			p->migration_disabled = 0;
-+			p->migration_flags |= MDF_FORCE_ENABLED;
-+			/* When p is migrate_disabled, rq->lock should be held */
-+			rq->nr_pinned--;
-+		}
++		if (is_migration_disabled(p))
++			__migrate_force_enable(p, rq);
 +
 +		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
 +			struct migration_arg arg = { p, dest_cpu };
@@ -7178,9 +7189,6 @@ index 000000000000..c59691742340
 +	if (preempt_count() > 0)
 +		return;
 +
-+	if (current->migration_flags & MDF_FORCE_ENABLED)
-+		return;
-+
 +	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 +		return;
 +	prev_jiffy = jiffies;
@@ -7374,6 +7382,43 @@ index 000000000000..c59691742340
 +{
 +}
 +
++#ifdef CONFIG_GROUP_SCHED_WEIGHT
++static int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	return 0;
++}
++
++static int sched_group_set_idle(struct task_group *tg, long idle)
++{
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	return sched_group_set_shares(css_tg(css), shareval);
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	return 0;
++}
++
++static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
++				struct cftype *cft, s64 idle)
++{
++	return sched_group_set_idle(css_tg(css), idle);
++}
++#endif
++
++#ifdef CONFIG_CFS_BANDWIDTH
 +static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
 +				  struct cftype *cft)
 +{
@@ -7419,7 +7464,9 @@ index 000000000000..c59691742340
 +{
 +	return 0;
 +}
++#endif
 +
++#ifdef CONFIG_RT_GROUP_SCHED
 +static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
 +				struct cftype *cft, s64 val)
 +{
@@ -7443,7 +7490,9 @@ index 000000000000..c59691742340
 +{
 +	return 0;
 +}
++#endif
 +
++#ifdef CONFIG_UCLAMP_TASK_GROUP
 +static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
 +{
 +	return 0;
@@ -7467,8 +7516,22 @@ index 000000000000..c59691742340
 +{
 +	return nbytes;
 +}
++#endif
 +
 +static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_GROUP_SCHED_WEIGHT
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++	{
++		.name = "idle",
++		.read_s64 = cpu_idle_read_s64,
++		.write_s64 = cpu_idle_write_s64,
++	},
++#endif
++#ifdef CONFIG_CFS_BANDWIDTH
 +	{
 +		.name = "cfs_quota_us",
 +		.read_s64 = cpu_cfs_quota_read_s64,
@@ -7492,6 +7555,8 @@ index 000000000000..c59691742340
 +		.name = "stat.local",
 +		.seq_show = cpu_cfs_local_stat_show,
 +	},
++#endif
++#ifdef CONFIG_RT_GROUP_SCHED
 +	{
 +		.name = "rt_runtime_us",
 +		.read_s64 = cpu_rt_runtime_read,
@@ -7502,6 +7567,8 @@ index 000000000000..c59691742340
 +		.read_u64 = cpu_rt_period_read_uint,
 +		.write_u64 = cpu_rt_period_write_uint,
 +	},
++#endif
++#ifdef CONFIG_UCLAMP_TASK_GROUP
 +	{
 +		.name = "uclamp.min",
 +		.flags = CFTYPE_NOT_ON_ROOT,
@@ -7514,9 +7581,11 @@ index 000000000000..c59691742340
 +		.seq_show = cpu_uclamp_max_show,
 +		.write = cpu_uclamp_max_write,
 +	},
++#endif
 +	{ }	/* Terminate */
 +};
 +
++#ifdef CONFIG_GROUP_SCHED_WEIGHT
 +static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
 +			       struct cftype *cft)
 +{
@@ -7540,19 +7609,9 @@ index 000000000000..c59691742340
 +{
 +	return 0;
 +}
++#endif
 +
-+static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
-+			       struct cftype *cft)
-+{
-+	return 0;
-+}
-+
-+static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
-+				struct cftype *cft, s64 idle)
-+{
-+	return 0;
-+}
-+
++#ifdef CONFIG_CFS_BANDWIDTH
 +static int cpu_max_show(struct seq_file *sf, void *v)
 +{
 +	return 0;
@@ -7563,8 +7622,10 @@ index 000000000000..c59691742340
 +{
 +	return nbytes;
 +}
++#endif
 +
 +static struct cftype cpu_files[] = {
++#ifdef CONFIG_GROUP_SCHED_WEIGHT
 +	{
 +		.name = "weight",
 +		.flags = CFTYPE_NOT_ON_ROOT,
@@ -7583,6 +7644,8 @@ index 000000000000..c59691742340
 +		.read_s64 = cpu_idle_read_s64,
 +		.write_s64 = cpu_idle_write_s64,
 +	},
++#endif
++#ifdef CONFIG_CFS_BANDWIDTH
 +	{
 +		.name = "max",
 +		.flags = CFTYPE_NOT_ON_ROOT,
@@ -7595,6 +7658,8 @@ index 000000000000..c59691742340
 +		.read_u64 = cpu_cfs_burst_read_u64,
 +		.write_u64 = cpu_cfs_burst_write_u64,
 +	},
++#endif
++#ifdef CONFIG_UCLAMP_TASK_GROUP
 +	{
 +		.name = "uclamp.min",
 +		.flags = CFTYPE_NOT_ON_ROOT,
@@ -7607,6 +7672,7 @@ index 000000000000..c59691742340
 +		.seq_show = cpu_uclamp_max_show,
 +		.write = cpu_uclamp_max_write,
 +	},
++#endif
 +	{ }	/* terminate */
 +};
 +
@@ -8421,10 +8487,10 @@ index 000000000000..1dbd7eb6a434
 +{}
 diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
 new file mode 100644
-index 000000000000..09c9e9f80bf4
+index 000000000000..7fb3433c5c41
 --- /dev/null
 +++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,971 @@
+@@ -0,0 +1,997 @@
 +#ifndef _KERNEL_SCHED_ALT_SCHED_H
 +#define _KERNEL_SCHED_ALT_SCHED_H
 +
@@ -9120,15 +9186,41 @@ index 000000000000..09c9e9f80bf4
 +
 +static inline void nohz_run_idle_balance(int cpu) { }
 +
-+static inline
-+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
-+				  struct task_struct *p)
++static inline unsigned long
++uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
 +{
-+	return util;
++	if (clamp_id == UCLAMP_MIN)
++		return 0;
++
++	return SCHED_CAPACITY_SCALE;
 +}
 +
 +static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
 +
++static inline bool uclamp_is_used(void)
++{
++	return false;
++}
++
++static inline unsigned long
++uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
++{
++	if (clamp_id == UCLAMP_MIN)
++		return 0;
++
++	return SCHED_CAPACITY_SCALE;
++}
++
++static inline void
++uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value)
++{
++}
++
++static inline bool uclamp_rq_is_idle(struct rq *rq)
++{
++	return false;
++}
++
 +#ifdef CONFIG_SCHED_MM_CID
 +
 +#define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */
@@ -11109,6 +11201,28 @@ index 6bcee4704059..cf88205fd4a2 100644
  
  	return false;
  }
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index a50ed23bee77..be0477666049 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -1665,6 +1665,9 @@ static void osnoise_sleep(bool skip_period)
+  */
+ static inline int osnoise_migration_pending(void)
+ {
++#ifdef CONFIG_SCHED_ALT
++	return 0;
++#else
+ 	if (!current->migration_pending)
+ 		return 0;
+ 
+@@ -1686,6 +1689,7 @@ static inline int osnoise_migration_pending(void)
+ 	mutex_unlock(&interface_lock);
+ 
+ 	return 1;
++#endif
+ }
+ 
+ /*
 diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
 index 1469dd8075fa..803527a0e48a 100644
 --- a/kernel/trace/trace_selftest.c


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-01-17 13:18 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-01-17 13:18 UTC (permalink / raw
  To: gentoo-commits

commit:     0ac150344b6ac5bfe1c306054599ae5cb28d7d74
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jan 17 13:18:02 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jan 17 13:18:02 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0ac15034

Linux patch 6.12.10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1009_linux-6.12.10.patch | 6476 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6480 insertions(+)

diff --git a/0000_README b/0000_README
index 06b9cb3f..20574d29 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-6.12.9.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.9
 
+Patch:  1009_linux-6.12.10.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.10
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1009_linux-6.12.10.patch b/1009_linux-6.12.10.patch
new file mode 100644
index 00000000..af53d00b
--- /dev/null
+++ b/1009_linux-6.12.10.patch
@@ -0,0 +1,6476 @@
+diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
+index 6d02168d78bed6..2cb58daf3089ba 100644
+--- a/Documentation/admin-guide/cgroup-v2.rst
++++ b/Documentation/admin-guide/cgroup-v2.rst
+@@ -2954,7 +2954,7 @@ following two functions.
+ 	a queue (device) has been associated with the bio and
+ 	before submission.
+ 
+-  wbc_account_cgroup_owner(@wbc, @page, @bytes)
++  wbc_account_cgroup_owner(@wbc, @folio, @bytes)
+ 	Should be called for each data segment being written out.
+ 	While this function doesn't care exactly when it's called
+ 	during the writeback session, it's the easiest and most
+diff --git a/Makefile b/Makefile
+index 80151f53d8ee0f..233e9e88e402e7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi b/arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi
+index dd714d235d5f6a..b0bad0d1ba36f4 100644
+--- a/arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imxrt1050.dtsi
+@@ -87,7 +87,7 @@ usdhc1: mmc@402c0000 {
+ 			reg = <0x402c0000 0x4000>;
+ 			interrupts = <110>;
+ 			clocks = <&clks IMXRT1050_CLK_IPG_PDOF>,
+-				<&clks IMXRT1050_CLK_OSC>,
++				<&clks IMXRT1050_CLK_AHB_PODF>,
+ 				<&clks IMXRT1050_CLK_USDHC1>;
+ 			clock-names = "ipg", "ahb", "per";
+ 			bus-width = <4>;
+diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
+index 03661e76550f4d..40cbb071f265cf 100644
+--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
+@@ -1609,7 +1609,7 @@ pcie1_ep: pcie-ep@4c380000 {
+ 
+ 		netcmix_blk_ctrl: syscon@4c810000 {
+ 			compatible = "nxp,imx95-netcmix-blk-ctrl", "syscon";
+-			reg = <0x0 0x4c810000 0x0 0x10000>;
++			reg = <0x0 0x4c810000 0x0 0x8>;
+ 			#clock-cells = <1>;
+ 			clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>;
+ 			assigned-clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>;
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+index e8dbc8d820a64f..8a21448c0fa845 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+@@ -1940,6 +1940,7 @@ tpdm@4003000 {
+ 
+ 			qcom,cmb-element-bits = <32>;
+ 			qcom,cmb-msrs-num = <32>;
++			status = "disabled";
+ 
+ 			out-ports {
+ 				port {
+@@ -5587,7 +5588,7 @@ pcie0_ep: pcie-ep@1c00000 {
+ 		      <0x0 0x40000000 0x0 0xf20>,
+ 		      <0x0 0x40000f20 0x0 0xa8>,
+ 		      <0x0 0x40001000 0x0 0x4000>,
+-		      <0x0 0x40200000 0x0 0x100000>,
++		      <0x0 0x40200000 0x0 0x1fe00000>,
+ 		      <0x0 0x01c03000 0x0 0x1000>,
+ 		      <0x0 0x40005000 0x0 0x2000>;
+ 		reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
+@@ -5744,7 +5745,7 @@ pcie1_ep: pcie-ep@1c10000 {
+ 		      <0x0 0x60000000 0x0 0xf20>,
+ 		      <0x0 0x60000f20 0x0 0xa8>,
+ 		      <0x0 0x60001000 0x0 0x4000>,
+-		      <0x0 0x60200000 0x0 0x100000>,
++		      <0x0 0x60200000 0x0 0x1fe00000>,
+ 		      <0x0 0x01c13000 0x0 0x1000>,
+ 		      <0x0 0x60005000 0x0 0x2000>;
+ 		reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index 914f9cb3aca215..a97ceff939d882 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -2925,7 +2925,7 @@ pcie6a: pci@1bf8000 {
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 			ranges = <0x01000000 0x0 0x00000000 0x0 0x70200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x70300000 0x0 0x70300000 0x0 0x1d00000>;
++				 <0x02000000 0x0 0x70300000 0x0 0x70300000 0x0 0x3d00000>;
+ 			bus-range = <0x00 0xff>;
+ 
+ 			dma-coherent;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index c01a4cad48f30e..d16a13d6442f88 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -333,6 +333,7 @@ power: power-controller {
+ 
+ 			power-domain@RK3328_PD_HEVC {
+ 				reg = <RK3328_PD_HEVC>;
++				clocks = <&cru SCLK_VENC_CORE>;
+ 				#power-domain-cells = <0>;
+ 			};
+ 			power-domain@RK3328_PD_VIDEO {
+diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
+index 32d308a3355fd4..febf820d505837 100644
+--- a/arch/riscv/include/asm/page.h
++++ b/arch/riscv/include/asm/page.h
+@@ -124,6 +124,7 @@ struct kernel_mapping {
+ 
+ extern struct kernel_mapping kernel_map;
+ extern phys_addr_t phys_ram_base;
++extern unsigned long vmemmap_start_pfn;
+ 
+ #define is_kernel_mapping(x)	\
+ 	((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index e79f15293492d5..c0866ada5bbc49 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -87,7 +87,7 @@
+  * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+  * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+  */
+-#define vmemmap		((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
++#define vmemmap		((struct page *)VMEMMAP_START - vmemmap_start_pfn)
+ 
+ #define PCI_IO_SIZE      SZ_16M
+ #define PCI_IO_END       VMEMMAP_START
+diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
+index 98f631b051dba8..9be38b05f4adff 100644
+--- a/arch/riscv/include/asm/sbi.h
++++ b/arch/riscv/include/asm/sbi.h
+@@ -158,6 +158,7 @@ struct riscv_pmu_snapshot_data {
+ };
+ 
+ #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
++#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
+ #define RISCV_PMU_RAW_EVENT_IDX 0x20000
+ #define RISCV_PLAT_FW_EVENT	0xFFFF
+ 
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index c200d329d4bdbe..33a5a9f2a0d4e1 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -23,21 +23,21 @@
+ 	REG_S 	a0, TASK_TI_A0(tp)
+ 	csrr 	a0, CSR_CAUSE
+ 	/* Exclude IRQs */
+-	blt  	a0, zero, _new_vmalloc_restore_context_a0
++	blt  	a0, zero, .Lnew_vmalloc_restore_context_a0
+ 
+ 	REG_S 	a1, TASK_TI_A1(tp)
+ 	/* Only check new_vmalloc if we are in page/protection fault */
+ 	li   	a1, EXC_LOAD_PAGE_FAULT
+-	beq  	a0, a1, _new_vmalloc_kernel_address
++	beq  	a0, a1, .Lnew_vmalloc_kernel_address
+ 	li   	a1, EXC_STORE_PAGE_FAULT
+-	beq  	a0, a1, _new_vmalloc_kernel_address
++	beq  	a0, a1, .Lnew_vmalloc_kernel_address
+ 	li   	a1, EXC_INST_PAGE_FAULT
+-	bne  	a0, a1, _new_vmalloc_restore_context_a1
++	bne  	a0, a1, .Lnew_vmalloc_restore_context_a1
+ 
+-_new_vmalloc_kernel_address:
++.Lnew_vmalloc_kernel_address:
+ 	/* Is it a kernel address? */
+ 	csrr 	a0, CSR_TVAL
+-	bge 	a0, zero, _new_vmalloc_restore_context_a1
++	bge 	a0, zero, .Lnew_vmalloc_restore_context_a1
+ 
+ 	/* Check if a new vmalloc mapping appeared that could explain the trap */
+ 	REG_S	a2, TASK_TI_A2(tp)
+@@ -69,7 +69,7 @@ _new_vmalloc_kernel_address:
+ 	/* Check the value of new_vmalloc for this cpu */
+ 	REG_L	a2, 0(a0)
+ 	and	a2, a2, a1
+-	beq	a2, zero, _new_vmalloc_restore_context
++	beq	a2, zero, .Lnew_vmalloc_restore_context
+ 
+ 	/* Atomically reset the current cpu bit in new_vmalloc */
+ 	amoxor.d	a0, a1, (a0)
+@@ -83,11 +83,11 @@ _new_vmalloc_kernel_address:
+ 	csrw	CSR_SCRATCH, x0
+ 	sret
+ 
+-_new_vmalloc_restore_context:
++.Lnew_vmalloc_restore_context:
+ 	REG_L 	a2, TASK_TI_A2(tp)
+-_new_vmalloc_restore_context_a1:
++.Lnew_vmalloc_restore_context_a1:
+ 	REG_L 	a1, TASK_TI_A1(tp)
+-_new_vmalloc_restore_context_a0:
++.Lnew_vmalloc_restore_context_a0:
+ 	REG_L	a0, TASK_TI_A0(tp)
+ .endm
+ 
+@@ -278,6 +278,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
+ #else
+ 	sret
+ #endif
++SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
+ SYM_CODE_END(ret_from_exception)
+ ASM_NOKPROBE(ret_from_exception)
+ 
+diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
+index 1cd461f3d8726d..47d0ebeec93c23 100644
+--- a/arch/riscv/kernel/module.c
++++ b/arch/riscv/kernel/module.c
+@@ -23,7 +23,7 @@ struct used_bucket {
+ 
+ struct relocation_head {
+ 	struct hlist_node node;
+-	struct list_head *rel_entry;
++	struct list_head rel_entry;
+ 	void *location;
+ };
+ 
+@@ -634,7 +634,7 @@ process_accumulated_relocations(struct module *me,
+ 			location = rel_head_iter->location;
+ 			list_for_each_entry_safe(rel_entry_iter,
+ 						 rel_entry_iter_tmp,
+-						 rel_head_iter->rel_entry,
++						 &rel_head_iter->rel_entry,
+ 						 head) {
+ 				curr_type = rel_entry_iter->type;
+ 				reloc_handlers[curr_type].reloc_handler(
+@@ -704,16 +704,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
+ 			return -ENOMEM;
+ 		}
+ 
+-		rel_head->rel_entry =
+-			kmalloc(sizeof(struct list_head), GFP_KERNEL);
+-
+-		if (!rel_head->rel_entry) {
+-			kfree(entry);
+-			kfree(rel_head);
+-			return -ENOMEM;
+-		}
+-
+-		INIT_LIST_HEAD(rel_head->rel_entry);
++		INIT_LIST_HEAD(&rel_head->rel_entry);
+ 		rel_head->location = location;
+ 		INIT_HLIST_NODE(&rel_head->node);
+ 		if (!current_head->first) {
+@@ -722,7 +713,6 @@ static int add_relocation_to_accumulate(struct module *me, int type,
+ 
+ 			if (!bucket) {
+ 				kfree(entry);
+-				kfree(rel_head->rel_entry);
+ 				kfree(rel_head);
+ 				return -ENOMEM;
+ 			}
+@@ -735,7 +725,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
+ 	}
+ 
+ 	/* Add relocation to head of discovered rel_head */
+-	list_add_tail(&entry->head, rel_head->rel_entry);
++	list_add_tail(&entry->head, &rel_head->rel_entry);
+ 
+ 	return 0;
+ }
+diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
+index 474a6521365783..d2dacea1aedd9e 100644
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -30,7 +30,7 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+ 	p->ainsn.api.restore = (unsigned long)p->addr + len;
+ 
+ 	patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
+-	patch_text_nosync(p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
++	patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
+ }
+ 
+ static void __kprobes arch_prepare_simulate(struct kprobe *p)
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 153a2db4c5fa14..d4355c770c36ac 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -17,6 +17,7 @@
+ #ifdef CONFIG_FRAME_POINTER
+ 
+ extern asmlinkage void handle_exception(void);
++extern unsigned long ret_from_exception_end;
+ 
+ static inline int fp_is_valid(unsigned long fp, unsigned long sp)
+ {
+@@ -71,7 +72,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 			fp = frame->fp;
+ 			pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
+ 						   &frame->ra);
+-			if (pc == (unsigned long)handle_exception) {
++			if (pc >= (unsigned long)handle_exception &&
++			    pc < (unsigned long)&ret_from_exception_end) {
+ 				if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ 					break;
+ 
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 51ebfd23e00764..8ff8e8b36524b7 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -35,7 +35,7 @@
+ 
+ int show_unhandled_signals = 1;
+ 
+-static DEFINE_SPINLOCK(die_lock);
++static DEFINE_RAW_SPINLOCK(die_lock);
+ 
+ static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
+ {
+@@ -81,7 +81,7 @@ void die(struct pt_regs *regs, const char *str)
+ 
+ 	oops_enter();
+ 
+-	spin_lock_irqsave(&die_lock, flags);
++	raw_spin_lock_irqsave(&die_lock, flags);
+ 	console_verbose();
+ 	bust_spinlocks(1);
+ 
+@@ -100,7 +100,7 @@ void die(struct pt_regs *regs, const char *str)
+ 
+ 	bust_spinlocks(0);
+ 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+-	spin_unlock_irqrestore(&die_lock, flags);
++	raw_spin_unlock_irqrestore(&die_lock, flags);
+ 	oops_exit();
+ 
+ 	if (in_interrupt())
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index fc53ce748c8049..8d167e09f1fea5 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -33,6 +33,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/sections.h>
+ #include <asm/soc.h>
++#include <asm/sparsemem.h>
+ #include <asm/tlbflush.h>
+ 
+ #include "../kernel/head.h"
+@@ -62,6 +63,13 @@ EXPORT_SYMBOL(pgtable_l5_enabled);
+ phys_addr_t phys_ram_base __ro_after_init;
+ EXPORT_SYMBOL(phys_ram_base);
+ 
++#ifdef CONFIG_SPARSEMEM_VMEMMAP
++#define VMEMMAP_ADDR_ALIGN	(1ULL << SECTION_SIZE_BITS)
++
++unsigned long vmemmap_start_pfn __ro_after_init;
++EXPORT_SYMBOL(vmemmap_start_pfn);
++#endif
++
+ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ 							__page_aligned_bss;
+ EXPORT_SYMBOL(empty_zero_page);
+@@ -240,8 +248,12 @@ static void __init setup_bootmem(void)
+ 	 * Make sure we align the start of the memory on a PMD boundary so that
+ 	 * at worst, we map the linear mapping with PMD mappings.
+ 	 */
+-	if (!IS_ENABLED(CONFIG_XIP_KERNEL))
++	if (!IS_ENABLED(CONFIG_XIP_KERNEL)) {
+ 		phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
++#ifdef CONFIG_SPARSEMEM_VMEMMAP
++		vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
++#endif
++	}
+ 
+ 	/*
+ 	 * In 64-bit, any use of __va/__pa before this point is wrong as we
+@@ -1101,6 +1113,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 	kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
+ 
+ 	phys_ram_base = CONFIG_PHYS_RAM_BASE;
++#ifdef CONFIG_SPARSEMEM_VMEMMAP
++	vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
++#endif
+ 	kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
+ 	kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
+ 
+diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
+index 6bc1eb2a21bd92..887b0b8e21e364 100644
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -190,7 +190,8 @@ int ssp_get(struct task_struct *target, const struct user_regset *regset,
+ 	struct fpu *fpu = &target->thread.fpu;
+ 	struct cet_user_state *cetregs;
+ 
+-	if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
++	if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
++	    !ssp_active(target, regset))
+ 		return -ENODEV;
+ 
+ 	sync_fpstate(fpu);
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 95dd7b79593565..cad16c163611b5 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -6844,16 +6844,24 @@ static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
+ 		if (new_bfqq == waker_bfqq) {
+ 			/*
+ 			 * If waker_bfqq is in the merge chain, and current
+-			 * is the only procress.
++			 * is the only process, waker_bfqq can be freed.
+ 			 */
+ 			if (bfqq_process_refs(waker_bfqq) == 1)
+ 				return NULL;
+-			break;
++
++			return waker_bfqq;
+ 		}
+ 
+ 		new_bfqq = new_bfqq->new_bfqq;
+ 	}
+ 
++	/*
++	 * If waker_bfqq is not in the merge chain, and it's procress reference
++	 * is 0, waker_bfqq can be freed.
++	 */
++	if (bfqq_process_refs(waker_bfqq) == 0)
++		return NULL;
++
+ 	return waker_bfqq;
+ }
+ 
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 821867de43bea3..d27a3bf96f80d8 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ 		},
+ 	},
++	{
++		/* Asus Vivobook X1504VAP */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"),
++		},
++	},
+ 	{
+ 		/* Asus Vivobook X1704VAP */
+ 		.matches = {
+@@ -646,6 +653,17 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ 		},
+ 	},
++	{
++		/*
++		 * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
++		 * board-name is changed, so check OEM strings instead. Note
++		 * OEM string matches are always exact matches.
++		 * https://bugzilla.kernel.org/show_bug.cgi?id=219614
++		 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/base/topology.c b/drivers/base/topology.c
+index 89f98be5c5b991..d293cbd253e4f9 100644
+--- a/drivers/base/topology.c
++++ b/drivers/base/topology.c
+@@ -27,9 +27,17 @@ static ssize_t name##_read(struct file *file, struct kobject *kobj,		\
+ 			   loff_t off, size_t count)				\
+ {										\
+ 	struct device *dev = kobj_to_dev(kobj);                                 \
++	cpumask_var_t mask;							\
++	ssize_t n;								\
+ 										\
+-	return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id),	\
+-					   off, count);                         \
++	if (!alloc_cpumask_var(&mask, GFP_KERNEL))				\
++		return -ENOMEM;							\
++										\
++	cpumask_copy(mask, topology_##mask(dev->id));				\
++	n = cpumap_print_bitmask_to_buf(buf, mask, off, count);			\
++	free_cpumask_var(mask);							\
++										\
++	return n;								\
+ }										\
+ 										\
+ static ssize_t name##_list_read(struct file *file, struct kobject *kobj,	\
+@@ -37,9 +45,17 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj,	\
+ 				loff_t off, size_t count)			\
+ {										\
+ 	struct device *dev = kobj_to_dev(kobj);					\
++	cpumask_var_t mask;							\
++	ssize_t n;								\
++										\
++	if (!alloc_cpumask_var(&mask, GFP_KERNEL))				\
++		return -ENOMEM;							\
++										\
++	cpumask_copy(mask, topology_##mask(dev->id));				\
++	n = cpumap_print_list_to_buf(buf, mask, off, count);			\
++	free_cpumask_var(mask);							\
+ 										\
+-	return cpumap_print_list_to_buf(buf, topology_##mask(dev->id),		\
+-					off, count);				\
++	return n;								\
+ }
+ 
+ define_id_show_func(physical_package_id, "%d");
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index 85e99641eaae02..af487abe9932aa 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -1472,10 +1472,15 @@ EXPORT_SYMBOL_GPL(btmtk_usb_setup);
+ 
+ int btmtk_usb_shutdown(struct hci_dev *hdev)
+ {
++	struct btmtk_data *data = hci_get_priv(hdev);
+ 	struct btmtk_hci_wmt_params wmt_params;
+ 	u8 param = 0;
+ 	int err;
+ 
++	err = usb_autopm_get_interface(data->intf);
++	if (err < 0)
++		return err;
++
+ 	/* Disable the device */
+ 	wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ 	wmt_params.flag = 0;
+@@ -1486,9 +1491,11 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
+ 	err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
+ 	if (err < 0) {
+ 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
++		usb_autopm_put_interface(data->intf);
+ 		return err;
+ 	}
+ 
++	usb_autopm_put_interface(data->intf);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
+diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
+index 5ea0d23e88c02b..a028984f27829c 100644
+--- a/drivers/bluetooth/btnxpuart.c
++++ b/drivers/bluetooth/btnxpuart.c
+@@ -1336,6 +1336,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
+ 
+ 	while ((skb = nxp_dequeue(nxpdev))) {
+ 		len = serdev_device_write_buf(serdev, skb->data, skb->len);
++		serdev_device_wait_until_sent(serdev, 0);
+ 		hdev->stat.byte_tx += len;
+ 
+ 		skb_pull(skb, len);
+diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
+index d228b4d18d5600..77a1fc668ae27b 100644
+--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
+@@ -500,12 +500,12 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
+ 	int cpu, ret;
+ 	struct cpuidle_driver *drv;
+ 	struct cpuidle_device *dev;
+-	struct device_node *np, *pds_node;
++	struct device_node *pds_node;
+ 
+ 	/* Detect OSI support based on CPU DT nodes */
+ 	sbi_cpuidle_use_osi = true;
+ 	for_each_possible_cpu(cpu) {
+-		np = of_cpu_device_node_get(cpu);
++		struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
+ 		if (np &&
+ 		    of_property_present(np, "power-domains") &&
+ 		    of_property_present(np, "power-domain-names")) {
+diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
+index 6749d4dd6d6496..7f4d78fd800e7e 100644
+--- a/drivers/gpio/gpio-loongson-64bit.c
++++ b/drivers/gpio/gpio-loongson-64bit.c
+@@ -237,9 +237,9 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
+ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
+ 	.label = "ls2k2000_gpio",
+ 	.mode = BIT_CTRL_MODE,
+-	.conf_offset = 0x84,
+-	.in_offset = 0x88,
+-	.out_offset = 0x80,
++	.conf_offset = 0x4,
++	.in_offset = 0x8,
++	.out_offset = 0x0,
+ };
+ 
+ static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
+diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
+index 91b6352c957cf9..d6244f0d3bc752 100644
+--- a/drivers/gpio/gpio-virtuser.c
++++ b/drivers/gpio/gpio-virtuser.c
+@@ -1410,7 +1410,7 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
+ 	size_t num_entries = gpio_virtuser_get_lookup_count(dev);
+ 	struct gpio_virtuser_lookup_entry *entry;
+ 	struct gpio_virtuser_lookup *lookup;
+-	unsigned int i = 0;
++	unsigned int i = 0, idx;
+ 
+ 	lockdep_assert_held(&dev->lock);
+ 
+@@ -1424,12 +1424,12 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
+ 		return -ENOMEM;
+ 
+ 	list_for_each_entry(lookup, &dev->lookup_list, siblings) {
++		idx = 0;
+ 		list_for_each_entry(entry, &lookup->entry_list, siblings) {
+-			table->table[i] =
++			table->table[i++] =
+ 				GPIO_LOOKUP_IDX(entry->key,
+ 						entry->offset < 0 ? U16_MAX : entry->offset,
+-						lookup->con_id, i, entry->flags);
+-			i++;
++						lookup->con_id, idx++, entry->flags);
+ 		}
+ 	}
+ 
+@@ -1439,6 +1439,15 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
+ 	return 0;
+ }
+ 
++static void
++gpio_virtuser_remove_lookup_table(struct gpio_virtuser_device *dev)
++{
++	gpiod_remove_lookup_table(dev->lookup_table);
++	kfree(dev->lookup_table->dev_id);
++	kfree(dev->lookup_table);
++	dev->lookup_table = NULL;
++}
++
+ static struct fwnode_handle *
+ gpio_virtuser_make_device_swnode(struct gpio_virtuser_device *dev)
+ {
+@@ -1487,10 +1496,8 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
+ 	pdevinfo.fwnode = swnode;
+ 
+ 	ret = gpio_virtuser_make_lookup_table(dev);
+-	if (ret) {
+-		fwnode_remove_software_node(swnode);
+-		return ret;
+-	}
++	if (ret)
++		goto err_remove_swnode;
+ 
+ 	reinit_completion(&dev->probe_completion);
+ 	dev->driver_bound = false;
+@@ -1498,23 +1505,31 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
+ 
+ 	pdev = platform_device_register_full(&pdevinfo);
+ 	if (IS_ERR(pdev)) {
++		ret = PTR_ERR(pdev);
+ 		bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+-		fwnode_remove_software_node(swnode);
+-		return PTR_ERR(pdev);
++		goto err_remove_lookup_table;
+ 	}
+ 
+ 	wait_for_completion(&dev->probe_completion);
+ 	bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+ 
+ 	if (!dev->driver_bound) {
+-		platform_device_unregister(pdev);
+-		fwnode_remove_software_node(swnode);
+-		return -ENXIO;
++		ret = -ENXIO;
++		goto err_unregister_pdev;
+ 	}
+ 
+ 	dev->pdev = pdev;
+ 
+ 	return 0;
++
++err_unregister_pdev:
++	platform_device_unregister(pdev);
++err_remove_lookup_table:
++	gpio_virtuser_remove_lookup_table(dev);
++err_remove_swnode:
++	fwnode_remove_software_node(swnode);
++
++	return ret;
+ }
+ 
+ static void
+@@ -1526,10 +1541,9 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
+ 
+ 	swnode = dev_fwnode(&dev->pdev->dev);
+ 	platform_device_unregister(dev->pdev);
++	gpio_virtuser_remove_lookup_table(dev);
+ 	fwnode_remove_software_node(swnode);
+ 	dev->pdev = NULL;
+-	gpiod_remove_lookup_table(dev->lookup_table);
+-	kfree(dev->lookup_table);
+ }
+ 
+ static ssize_t
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 7d26a962f811cf..ff5e52025266cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -567,7 +567,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
+ 		else
+ 			remaining_size -= size;
+ 	}
+-	mutex_unlock(&mgr->lock);
+ 
+ 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
+ 		struct drm_buddy_block *dcc_block;
+@@ -584,6 +583,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
+ 				     (u64)vres->base.size,
+ 				     &vres->blocks);
+ 	}
++	mutex_unlock(&mgr->lock);
+ 
+ 	vres->base.start = 0;
+ 	size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+index 312dfa84f29f84..a8abc309180137 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+@@ -350,10 +350,27 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
+ {
+ 	uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
+ 	uint32_t flags = pdd->process->dbg_flags;
++	struct amdgpu_device *adev = pdd->dev->adev;
++	int r;
+ 
+ 	if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
+ 		return 0;
+ 
++	if (!pdd->proc_ctx_cpu_ptr) {
++			r = amdgpu_amdkfd_alloc_gtt_mem(adev,
++				AMDGPU_MES_PROC_CTX_SIZE,
++				&pdd->proc_ctx_bo,
++				&pdd->proc_ctx_gpu_addr,
++				&pdd->proc_ctx_cpu_ptr,
++				false);
++		if (r) {
++			dev_err(adev->dev,
++			"failed to allocate process context bo\n");
++			return r;
++		}
++		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
++	}
++
+ 	return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
+ 						pdd->watch_points, flags, sq_trap_en);
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 3139987b82b100..264bd764f6f27d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -1160,7 +1160,8 @@ static void kfd_process_wq_release(struct work_struct *work)
+ 	 */
+ 	synchronize_rcu();
+ 	ef = rcu_access_pointer(p->ef);
+-	dma_fence_signal(ef);
++	if (ef)
++		dma_fence_signal(ef);
+ 
+ 	kfd_process_remove_sysfs(p);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ad3a3aa72b51f3..ea403fece8392c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -8393,16 +8393,6 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
+ 				 struct amdgpu_crtc *acrtc,
+ 				 struct dm_crtc_state *acrtc_state)
+ {
+-	/*
+-	 * We have no guarantee that the frontend index maps to the same
+-	 * backend index - some even map to more than one.
+-	 *
+-	 * TODO: Use a different interrupt or check DC itself for the mapping.
+-	 */
+-	int irq_type =
+-		amdgpu_display_crtc_idx_to_irq_type(
+-			adev,
+-			acrtc->crtc_id);
+ 	struct drm_vblank_crtc_config config = {0};
+ 	struct dc_crtc_timing *timing;
+ 	int offdelay;
+@@ -8428,28 +8418,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
+ 
+ 		drm_crtc_vblank_on_config(&acrtc->base,
+ 					  &config);
+-
+-		amdgpu_irq_get(
+-			adev,
+-			&adev->pageflip_irq,
+-			irq_type);
+-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+-		amdgpu_irq_get(
+-			adev,
+-			&adev->vline0_irq,
+-			irq_type);
+-#endif
+ 	} else {
+-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+-		amdgpu_irq_put(
+-			adev,
+-			&adev->vline0_irq,
+-			irq_type);
+-#endif
+-		amdgpu_irq_put(
+-			adev,
+-			&adev->pageflip_irq,
+-			irq_type);
+ 		drm_crtc_vblank_off(&acrtc->base);
+ 	}
+ }
+@@ -11146,8 +11115,8 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
+ 	int plane_src_w, plane_src_h;
+ 
+ 	dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
+-	*out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
+-	*out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
++	*out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
++	*out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 9f570d447c2099..6d4ee8fe615c38 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -4421,7 +4421,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
+ 	struct pipe_split_policy_backup policy;
+ 	struct dc_state *intermediate_context;
+ 	struct dc_state *old_current_state = dc->current_state;
+-	struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
++	struct dc_surface_update srf_updates[MAX_SURFACES] = {0};
+ 	int surface_count;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+index e006f816ff2f74..1b2cce127981d9 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+@@ -483,9 +483,9 @@ bool dc_state_add_plane(
+ 	if (stream_status == NULL) {
+ 		dm_error("Existing stream not found; failed to attach surface!\n");
+ 		goto out;
+-	} else if (stream_status->plane_count == MAX_SURFACE_NUM) {
++	} else if (stream_status->plane_count == MAX_SURFACES) {
+ 		dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+-				plane_state, MAX_SURFACE_NUM);
++				plane_state, MAX_SURFACES);
+ 		goto out;
+ 	} else if (!otg_master_pipe) {
+ 		goto out;
+@@ -600,7 +600,7 @@ bool dc_state_rem_all_planes_for_stream(
+ {
+ 	int i, old_plane_count;
+ 	struct dc_stream_status *stream_status = NULL;
+-	struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
++	struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
+ 
+ 	for (i = 0; i < state->stream_count; i++)
+ 		if (state->streams[i] == stream) {
+@@ -875,7 +875,7 @@ bool dc_state_rem_all_phantom_planes_for_stream(
+ {
+ 	int i, old_plane_count;
+ 	struct dc_stream_status *stream_status = NULL;
+-	struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
++	struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
+ 
+ 	for (i = 0; i < state->stream_count; i++)
+ 		if (state->streams[i] == phantom_stream) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 7c163aa7e8bd2d..a4f6ff7155c2a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -57,7 +57,7 @@ struct dmub_notification;
+ 
+ #define DC_VER "3.2.301"
+ 
+-#define MAX_SURFACES 3
++#define MAX_SURFACES 4
+ #define MAX_PLANES 6
+ #define MAX_STREAMS 6
+ #define MIN_VIEWPORT_SIZE 12
+@@ -1390,7 +1390,7 @@ struct dc_scratch_space {
+ 	 * store current value in plane states so we can still recover
+ 	 * a valid current state during dc update.
+ 	 */
+-	struct dc_plane_state plane_states[MAX_SURFACE_NUM];
++	struct dc_plane_state plane_states[MAX_SURFACES];
+ 
+ 	struct dc_stream_state stream_state;
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 14ea47eda0c873..8b9af1a6a03162 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -56,7 +56,7 @@ struct dc_stream_status {
+ 	int plane_count;
+ 	int audio_inst;
+ 	struct timing_sync_info timing_sync_info;
+-	struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
++	struct dc_plane_state *plane_states[MAX_SURFACES];
+ 	bool is_abm_supported;
+ 	struct mall_stream_config mall_stream_config;
+ 	bool fpo_in_use;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 6d7989b751e2ce..c8bdbbba44ef9d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -76,7 +76,6 @@ struct dc_perf_trace {
+ 	unsigned long last_entry_write;
+ };
+ 
+-#define MAX_SURFACE_NUM 6
+ #define NUM_PIXEL_FORMATS 10
+ 
+ enum tiling_mode {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+index 072bd053960594..6b2ab4ec2b5ffe 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+@@ -66,11 +66,15 @@ static inline double dml_max5(double a, double b, double c, double d, double e)
+ 
+ static inline double dml_ceil(double a, double granularity)
+ {
++	if (granularity == 0)
++		return 0;
+ 	return (double) dcn_bw_ceil2(a, granularity);
+ }
+ 
+ static inline double dml_floor(double a, double granularity)
+ {
++	if (granularity == 0)
++		return 0;
+ 	return (double) dcn_bw_floor2(a, granularity);
+ }
+ 
+@@ -114,11 +118,15 @@ static inline double dml_ceil_2(double f)
+ 
+ static inline double dml_ceil_ex(double x, double granularity)
+ {
++	if (granularity == 0)
++		return 0;
+ 	return (double) dcn_bw_ceil2(x, granularity);
+ }
+ 
+ static inline double dml_floor_ex(double x, double granularity)
+ {
++	if (granularity == 0)
++		return 0;
+ 	return (double) dcn_bw_floor2(x, granularity);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+index 3d29169dd6bbf0..6b3b8803e0aee2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+@@ -813,7 +813,7 @@ static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struc
+ {
+ 	int i, old_plane_count;
+ 	struct dc_stream_status *stream_status = NULL;
+-	struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
++	struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
+ 
+ 	for (i = 0; i < context->stream_count; i++)
+ 			if (context->streams[i] == stream) {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index e58220a7ee2f70..30178dde6d49fc 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -302,5 +302,7 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
+ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
+ 				     enum smu_clk_type clk_type,
+ 				     uint32_t *value);
++
++void smu_v13_0_interrupt_work(struct smu_context *smu);
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index e17466cc19522d..2024a85fa11bd5 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -1320,11 +1320,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
+ 	return 0;
+ }
+ 
+-static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
++void smu_v13_0_interrupt_work(struct smu_context *smu)
+ {
+-	return smu_cmn_send_smc_msg(smu,
+-				    SMU_MSG_ReenableAcDcInterrupt,
+-				    NULL);
++	smu_cmn_send_smc_msg(smu,
++			     SMU_MSG_ReenableAcDcInterrupt,
++			     NULL);
+ }
+ 
+ #define THM_11_0__SRCID__THM_DIG_THERM_L2H		0		/* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
+@@ -1377,12 +1377,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
+ 			switch (ctxid) {
+ 			case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
+ 				dev_dbg(adev->dev, "Switched to AC mode!\n");
+-				smu_v13_0_ack_ac_dc_interrupt(smu);
++				schedule_work(&smu->interrupt_work);
+ 				adev->pm.ac_power = true;
+ 				break;
+ 			case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
+ 				dev_dbg(adev->dev, "Switched to DC mode!\n");
+-				smu_v13_0_ack_ac_dc_interrupt(smu);
++				schedule_work(&smu->interrupt_work);
+ 				adev->pm.ac_power = false;
+ 				break;
+ 			case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index a9373968807164..cd2cf0ffc0f5cb 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -3126,6 +3126,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
+ 	.is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
+ 	.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
+ 	.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
++	.interrupt_work = smu_v13_0_interrupt_work,
+ };
+ 
+ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 1aedfafa507f7e..7c753d795287d9 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -2704,6 +2704,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
+ 	.is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
+ 	.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
+ 	.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
++	.interrupt_work = smu_v13_0_interrupt_work,
+ };
+ 
+ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
+diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
+index 417ac8c9af4194..a749c01199d40e 100644
+--- a/drivers/gpu/drm/mediatek/Kconfig
++++ b/drivers/gpu/drm/mediatek/Kconfig
+@@ -13,9 +13,6 @@ config DRM_MEDIATEK
+ 	select DRM_BRIDGE_CONNECTOR
+ 	select DRM_MIPI_DSI
+ 	select DRM_PANEL
+-	select MEMORY
+-	select MTK_SMI
+-	select PHY_MTK_MIPI_DSI
+ 	select VIDEOMODE_HELPERS
+ 	help
+ 	  Choose this option if you have a Mediatek SoCs.
+@@ -26,7 +23,6 @@ config DRM_MEDIATEK
+ config DRM_MEDIATEK_DP
+ 	tristate "DRM DPTX Support for MediaTek SoCs"
+ 	depends on DRM_MEDIATEK
+-	select PHY_MTK_DP
+ 	select DRM_DISPLAY_HELPER
+ 	select DRM_DISPLAY_DP_HELPER
+ 	select DRM_DISPLAY_DP_AUX_BUS
+@@ -37,6 +33,5 @@ config DRM_MEDIATEK_HDMI
+ 	tristate "DRM HDMI Support for Mediatek SoCs"
+ 	depends on DRM_MEDIATEK
+ 	select SND_SOC_HDMI_CODEC if SND_SOC
+-	select PHY_MTK_HDMI
+ 	help
+ 	  DRM/KMS HDMI driver for Mediatek SoCs
+diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
+index eb0e1233ad0435..5674f5707cca83 100644
+--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
+@@ -112,6 +112,11 @@ static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
+ 
+ 	drm_crtc_handle_vblank(&mtk_crtc->base);
+ 
++#if IS_REACHABLE(CONFIG_MTK_CMDQ)
++	if (mtk_crtc->cmdq_client.chan)
++		return;
++#endif
++
+ 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ 	if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
+ 		mtk_crtc_finish_page_flip(mtk_crtc);
+@@ -284,10 +289,8 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
+ 	state = to_mtk_crtc_state(mtk_crtc->base.state);
+ 
+ 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+-	if (mtk_crtc->config_updating) {
+-		spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
++	if (mtk_crtc->config_updating)
+ 		goto ddp_cmdq_cb_out;
+-	}
+ 
+ 	state->pending_config = false;
+ 
+@@ -315,10 +318,15 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
+ 		mtk_crtc->pending_async_planes = false;
+ 	}
+ 
+-	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+-
+ ddp_cmdq_cb_out:
+ 
++	if (mtk_crtc->pending_needs_vblank) {
++		mtk_crtc_finish_page_flip(mtk_crtc);
++		mtk_crtc->pending_needs_vblank = false;
++	}
++
++	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
++
+ 	mtk_crtc->cmdq_vblank_cnt = 0;
+ 	wake_up(&mtk_crtc->cb_blocking_queue);
+ }
+@@ -606,13 +614,18 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
+ 		 */
+ 		mtk_crtc->cmdq_vblank_cnt = 3;
+ 
++		spin_lock_irqsave(&mtk_crtc->config_lock, flags);
++		mtk_crtc->config_updating = false;
++		spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
++
+ 		mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
+ 		mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
+ 	}
+-#endif
++#else
+ 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ 	mtk_crtc->config_updating = false;
+ 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
++#endif
+ 
+ 	mutex_unlock(&mtk_crtc->hw_lock);
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+index e0c0bb01f65ae0..19b0d508398198 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+@@ -460,6 +460,29 @@ static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
+ 	}
+ }
+ 
++static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
++				      unsigned int idx,
++				      struct mtk_plane_pending_state *pending,
++				      struct cmdq_pkt *cmdq_pkt)
++{
++	unsigned int pitch_msb = pending->pitch >> 16;
++	unsigned int hdr_pitch = pending->hdr_pitch;
++	unsigned int hdr_addr = pending->hdr_addr;
++
++	if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
++		mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
++				      DISP_REG_OVL_HDR_ADDR(ovl, idx));
++		mtk_ddp_write_relaxed(cmdq_pkt,
++				      OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
++				      &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
++		mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
++				      DISP_REG_OVL_HDR_PITCH(ovl, idx));
++	} else {
++		mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
++				      &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
++	}
++}
++
+ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ 			  struct mtk_plane_state *state,
+ 			  struct cmdq_pkt *cmdq_pkt)
+@@ -467,25 +490,14 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ 	struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+ 	struct mtk_plane_pending_state *pending = &state->pending;
+ 	unsigned int addr = pending->addr;
+-	unsigned int hdr_addr = pending->hdr_addr;
+-	unsigned int pitch = pending->pitch;
+-	unsigned int hdr_pitch = pending->hdr_pitch;
++	unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
+ 	unsigned int fmt = pending->format;
++	unsigned int rotation = pending->rotation;
+ 	unsigned int offset = (pending->y << 16) | pending->x;
+ 	unsigned int src_size = (pending->height << 16) | pending->width;
+ 	unsigned int blend_mode = state->base.pixel_blend_mode;
+ 	unsigned int ignore_pixel_alpha = 0;
+ 	unsigned int con;
+-	bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
+-	union overlay_pitch {
+-		struct split_pitch {
+-			u16 lsb;
+-			u16 msb;
+-		} split_pitch;
+-		u32 pitch;
+-	} overlay_pitch;
+-
+-	overlay_pitch.pitch = pitch;
+ 
+ 	if (!pending->enable) {
+ 		mtk_ovl_layer_off(dev, idx, cmdq_pkt);
+@@ -513,22 +525,30 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ 			ignore_pixel_alpha = OVL_CONST_BLEND;
+ 	}
+ 
+-	if (pending->rotation & DRM_MODE_REFLECT_Y) {
++	/*
++	 * Treat rotate 180 as flip x + flip y, and XOR the original rotation value
++	 * to flip x + flip y to support both in the same time.
++	 */
++	if (rotation & DRM_MODE_ROTATE_180)
++		rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
++
++	if (rotation & DRM_MODE_REFLECT_Y) {
+ 		con |= OVL_CON_VIRT_FLIP;
+ 		addr += (pending->height - 1) * pending->pitch;
+ 	}
+ 
+-	if (pending->rotation & DRM_MODE_REFLECT_X) {
++	if (rotation & DRM_MODE_REFLECT_X) {
+ 		con |= OVL_CON_HORZ_FLIP;
+ 		addr += pending->pitch - 1;
+ 	}
+ 
+ 	if (ovl->data->supports_afbc)
+-		mtk_ovl_set_afbc(ovl, cmdq_pkt, idx, is_afbc);
++		mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
++				 pending->modifier != DRM_FORMAT_MOD_LINEAR);
+ 
+ 	mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
+ 			      DISP_REG_OVL_CON(idx));
+-	mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha,
++	mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
+ 			      &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
+ 	mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
+ 			      DISP_REG_OVL_SRC_SIZE(idx));
+@@ -537,19 +557,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ 	mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
+ 			      DISP_REG_OVL_ADDR(ovl, idx));
+ 
+-	if (is_afbc) {
+-		mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
+-				      DISP_REG_OVL_HDR_ADDR(ovl, idx));
+-		mtk_ddp_write_relaxed(cmdq_pkt,
+-				      OVL_PITCH_MSB_2ND_SUBBUF | overlay_pitch.split_pitch.msb,
+-				      &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+-		mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
+-				      DISP_REG_OVL_HDR_PITCH(ovl, idx));
+-	} else {
+-		mtk_ddp_write_relaxed(cmdq_pkt,
+-				      overlay_pitch.split_pitch.msb,
+-				      &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+-	}
++	if (ovl->data->supports_afbc)
++		mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
+ 
+ 	mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
+ 	mtk_ovl_layer_on(dev, idx, cmdq_pkt);
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index f2bee617f063a7..cad65ea851edc7 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -543,18 +543,16 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
+ 				   enum dp_pixelformat color_format)
+ {
+ 	u32 val;
+-
+-	/* update MISC0 */
+-	mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+-			   color_format << DP_TEST_COLOR_FORMAT_SHIFT,
+-			   DP_TEST_COLOR_FORMAT_MASK);
++	u32 misc0_color;
+ 
+ 	switch (color_format) {
+ 	case DP_PIXELFORMAT_YUV422:
+ 		val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
++		misc0_color = DP_COLOR_FORMAT_YCbCr422;
+ 		break;
+ 	case DP_PIXELFORMAT_RGB:
+ 		val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
++		misc0_color = DP_COLOR_FORMAT_RGB;
+ 		break;
+ 	default:
+ 		drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
+@@ -562,6 +560,11 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
+ 		return -EINVAL;
+ 	}
+ 
++	/* update MISC0 */
++	mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
++			   misc0_color,
++			   DP_TEST_COLOR_FORMAT_MASK);
++
+ 	mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ 			   val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
+ 	return 0;
+@@ -2100,7 +2103,6 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
+ 	struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ 	enum drm_connector_status ret = connector_status_disconnected;
+ 	bool enabled = mtk_dp->enabled;
+-	u8 sink_count = 0;
+ 
+ 	if (!mtk_dp->train_info.cable_plugged_in)
+ 		return ret;
+@@ -2115,8 +2117,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
+ 	 * function, we just need to check the HPD connection to check
+ 	 * whether we connect to a sink device.
+ 	 */
+-	drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
+-	if (DP_GET_SINK_COUNT(sink_count))
++
++	if (drm_dp_read_sink_count(&mtk_dp->aux) > 0)
+ 		ret = connector_status_connected;
+ 
+ 	if (!enabled)
+@@ -2408,12 +2410,19 @@ mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ {
+ 	struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ 	u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
+-	u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
+-			      drm_dp_max_lane_count(mtk_dp->rx_cap),
+-			 drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
+-			 mtk_dp->max_lanes);
++	u32 lane_count_min = mtk_dp->train_info.lane_count;
++	u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
++		   lane_count_min;
+ 
+-	if (rate < mode->clock * bpp / 8)
++	/*
++	 *FEC overhead is approximately 2.4% from DP 1.4a spec 2.2.1.4.2.
++	 *The down-spread amplitude shall either be disabled (0.0%) or up
++	 *to 0.5% from 1.4a 3.5.2.6. Add up to approximately 3% total overhead.
++	 *
++	 *Because rate is already divided by 10,
++	 *mode->clock does not need to be multiplied by 10
++	 */
++	if ((rate * 97 / 100) < (mode->clock * bpp / 8))
+ 		return MODE_CLOCK_HIGH;
+ 
+ 	return MODE_OK;
+@@ -2454,10 +2463,9 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ 	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ 	struct drm_display_info *display_info =
+ 		&conn_state->connector->display_info;
+-	u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
+-			      drm_dp_max_lane_count(mtk_dp->rx_cap),
+-			 drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
+-			 mtk_dp->max_lanes);
++	u32 lane_count_min = mtk_dp->train_info.lane_count;
++	u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
++		   lane_count_min;
+ 
+ 	*num_input_fmts = 0;
+ 
+@@ -2466,8 +2474,8 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ 	 * datarate of YUV422 and sink device supports YUV422, we output YUV422
+ 	 * format. Use this condition, we can support more resolution.
+ 	 */
+-	if ((rate < (mode->clock * 24 / 8)) &&
+-	    (rate > (mode->clock * 16 / 8)) &&
++	if (((rate * 97 / 100) < (mode->clock * 24 / 8)) &&
++	    ((rate * 97 / 100) > (mode->clock * 16 / 8)) &&
+ 	    (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
+ 		input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
+ 		if (!input_fmts)
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 2c1cb335d8623f..4e93fd075e03cc 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -673,6 +673,8 @@ static int mtk_drm_bind(struct device *dev)
+ err_free:
+ 	private->drm = NULL;
+ 	drm_dev_put(drm);
++	for (i = 0; i < private->data->mmsys_dev_num; i++)
++		private->all_drm_private[i]->drm = NULL;
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index eeec641cab60db..b9b7fd08b7d7e9 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -139,11 +139,11 @@
+ #define CLK_HS_POST			GENMASK(15, 8)
+ #define CLK_HS_EXIT			GENMASK(23, 16)
+ 
+-#define DSI_VM_CMD_CON		0x130
++/* DSI_VM_CMD_CON */
+ #define VM_CMD_EN			BIT(0)
+ #define TS_VFP_EN			BIT(5)
+ 
+-#define DSI_SHADOW_DEBUG	0x190U
++/* DSI_SHADOW_DEBUG */
+ #define FORCE_COMMIT			BIT(0)
+ #define BYPASS_SHADOW			BIT(1)
+ 
+@@ -187,6 +187,8 @@ struct phy;
+ 
+ struct mtk_dsi_driver_data {
+ 	const u32 reg_cmdq_off;
++	const u32 reg_vm_cmd_off;
++	const u32 reg_shadow_dbg_off;
+ 	bool has_shadow_ctl;
+ 	bool has_size_ctl;
+ 	bool cmdq_long_packet_ctl;
+@@ -246,23 +248,22 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
+ 	u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
+ 	struct mtk_phy_timing *timing = &dsi->phy_timing;
+ 
+-	timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1;
+-	timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1;
+-	timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 -
++	timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
++	timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
++	timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+ 			     timing->da_hs_prepare;
+-	timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
++	timing->da_hs_trail = timing->da_hs_prepare + 1;
+ 
+-	timing->ta_go = 4 * timing->lpx;
+-	timing->ta_sure = 3 * timing->lpx / 2;
+-	timing->ta_get = 5 * timing->lpx;
+-	timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
++	timing->ta_go = 4 * timing->lpx - 2;
++	timing->ta_sure = timing->lpx + 2;
++	timing->ta_get = 4 * timing->lpx;
++	timing->da_hs_exit = 2 * timing->lpx + 1;
+ 
+-	timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1;
+-	timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1;
+-	timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
+-	timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 -
+-			      timing->clk_hs_prepare;
+-	timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
++	timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
++	timing->clk_hs_post = timing->clk_hs_prepare + 8;
++	timing->clk_hs_trail = timing->clk_hs_prepare;
++	timing->clk_hs_zero = timing->clk_hs_trail * 4;
++	timing->clk_hs_exit = 2 * timing->clk_hs_trail;
+ 
+ 	timcon0 = FIELD_PREP(LPX, timing->lpx) |
+ 		  FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
+@@ -367,8 +368,8 @@ static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
+ 
+ static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
+ {
+-	mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
+-	mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
++	mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, VM_CMD_EN, VM_CMD_EN);
++	mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, TS_VFP_EN, TS_VFP_EN);
+ }
+ 
+ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
+@@ -714,7 +715,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
+ 
+ 	if (dsi->driver_data->has_shadow_ctl)
+ 		writel(FORCE_COMMIT | BYPASS_SHADOW,
+-		       dsi->regs + DSI_SHADOW_DEBUG);
++		       dsi->regs + dsi->driver_data->reg_shadow_dbg_off);
+ 
+ 	mtk_dsi_reset_engine(dsi);
+ 	mtk_dsi_phy_timconfig(dsi);
+@@ -1255,26 +1256,36 @@ static void mtk_dsi_remove(struct platform_device *pdev)
+ 
+ static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
+ 	.reg_cmdq_off = 0x200,
++	.reg_vm_cmd_off = 0x130,
++	.reg_shadow_dbg_off = 0x190
+ };
+ 
+ static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
+ 	.reg_cmdq_off = 0x180,
++	.reg_vm_cmd_off = 0x130,
++	.reg_shadow_dbg_off = 0x190
+ };
+ 
+ static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
+ 	.reg_cmdq_off = 0x200,
++	.reg_vm_cmd_off = 0x130,
++	.reg_shadow_dbg_off = 0x190,
+ 	.has_shadow_ctl = true,
+ 	.has_size_ctl = true,
+ };
+ 
+ static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = {
+ 	.reg_cmdq_off = 0xd00,
++	.reg_vm_cmd_off = 0x200,
++	.reg_shadow_dbg_off = 0xc00,
+ 	.has_shadow_ctl = true,
+ 	.has_size_ctl = true,
+ };
+ 
+ static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
+ 	.reg_cmdq_off = 0xd00,
++	.reg_vm_cmd_off = 0x200,
++	.reg_shadow_dbg_off = 0xc00,
+ 	.has_shadow_ctl = true,
+ 	.has_size_ctl = true,
+ 	.cmdq_long_packet_ctl = true,
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index d5fd6a089b7ccc..b940688c361356 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -386,6 +386,10 @@ int xe_gt_init_early(struct xe_gt *gt)
+ 	xe_force_wake_init_gt(gt, gt_to_fw(gt));
+ 	spin_lock_init(&gt->global_invl_lock);
+ 
++	err = xe_gt_tlb_invalidation_init_early(gt);
++	if (err)
++		return err;
++
+ 	return 0;
+ }
+ 
+@@ -585,10 +589,6 @@ int xe_gt_init(struct xe_gt *gt)
+ 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
+ 	}
+ 
+-	err = xe_gt_tlb_invalidation_init(gt);
+-	if (err)
+-		return err;
+-
+ 	err = xe_gt_pagefault_init(gt);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+index 7e385940df0863..ace1fe831a7b72 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+@@ -106,7 +106,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
+ }
+ 
+ /**
+- * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
++ * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
+  * @gt: graphics tile
+  *
+  * Initialize GT TLB invalidation state, purely software initialization, should
+@@ -114,7 +114,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
+  *
+  * Return: 0 on success, negative error code on error.
+  */
+-int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
++int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
+ {
+ 	gt->tlb_invalidation.seqno = 1;
+ 	INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+index 00b1c6c01e8d95..672acfcdf0d70d 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+@@ -14,7 +14,8 @@ struct xe_gt;
+ struct xe_guc;
+ struct xe_vma;
+ 
+-int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
++int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
++
+ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
+ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
+ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
+diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
+index 6bdd21aa005ab8..2a4ec55ddb47ed 100644
+--- a/drivers/hwmon/drivetemp.c
++++ b/drivers/hwmon/drivetemp.c
+@@ -165,6 +165,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
+ {
+ 	u8 scsi_cmd[MAX_COMMAND_SIZE];
+ 	enum req_op op;
++	int err;
+ 
+ 	memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ 	scsi_cmd[0] = ATA_16;
+@@ -192,8 +193,11 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
+ 	scsi_cmd[12] = lba_high;
+ 	scsi_cmd[14] = ata_command;
+ 
+-	return scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
+-				ATA_SECT_SIZE, HZ, 5, NULL);
++	err = scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
++			       ATA_SECT_SIZE, HZ, 5, NULL);
++	if (err > 0)
++		err = -EIO;
++	return err;
+ }
+ 
+ static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
+diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
+index b79c48d46cccf8..8d94bc2b1cac35 100644
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -917,6 +917,9 @@ static int ad7124_setup(struct ad7124_state *st)
+ 		 * set all channels to this default value.
+ 		 */
+ 		ad7124_set_channel_odr(st, i, 10);
++
++		/* Disable all channels to prevent unintended conversions. */
++		ad_sd_write_reg(&st->sd, AD7124_CHANNEL(i), 2, 0);
+ 	}
+ 
+ 	ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
+diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
+index 0702ec71aa2933..5a65be00dd190f 100644
+--- a/drivers/iio/adc/ad7173.c
++++ b/drivers/iio/adc/ad7173.c
+@@ -198,6 +198,7 @@ struct ad7173_channel {
+ 
+ struct ad7173_state {
+ 	struct ad_sigma_delta sd;
++	struct ad_sigma_delta_info sigma_delta_info;
+ 	const struct ad7173_device_info *info;
+ 	struct ad7173_channel *channels;
+ 	struct regulator_bulk_data regulators[3];
+@@ -733,7 +734,7 @@ static int ad7173_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
+ 	return ad_sd_write_reg(sd, AD7173_REG_CH(chan), 2, 0);
+ }
+ 
+-static struct ad_sigma_delta_info ad7173_sigma_delta_info = {
++static const struct ad_sigma_delta_info ad7173_sigma_delta_info = {
+ 	.set_channel = ad7173_set_channel,
+ 	.append_status = ad7173_append_status,
+ 	.disable_all = ad7173_disable_all,
+@@ -1371,7 +1372,7 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
+ 	if (ret < 0)
+ 		return dev_err_probe(dev, ret, "Interrupt 'rdy' is required\n");
+ 
+-	ad7173_sigma_delta_info.irq_line = ret;
++	st->sigma_delta_info.irq_line = ret;
+ 
+ 	return ad7173_fw_parse_channel_config(indio_dev);
+ }
+@@ -1404,8 +1405,9 @@ static int ad7173_probe(struct spi_device *spi)
+ 	spi->mode = SPI_MODE_3;
+ 	spi_setup(spi);
+ 
+-	ad7173_sigma_delta_info.num_slots = st->info->num_configs;
+-	ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7173_sigma_delta_info);
++	st->sigma_delta_info = ad7173_sigma_delta_info;
++	st->sigma_delta_info.num_slots = st->info->num_configs;
++	ret = ad_sd_init(&st->sd, indio_dev, spi, &st->sigma_delta_info);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
+index 9c39acff17e658..6ab0e88f6895af 100644
+--- a/drivers/iio/adc/at91_adc.c
++++ b/drivers/iio/adc/at91_adc.c
+@@ -979,7 +979,7 @@ static int at91_ts_register(struct iio_dev *idev,
+ 	return ret;
+ 
+ err:
+-	input_free_device(st->ts_input);
++	input_free_device(input);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
+index 240cfa391674e7..dfd47a6e1f4a1b 100644
+--- a/drivers/iio/adc/rockchip_saradc.c
++++ b/drivers/iio/adc/rockchip_saradc.c
+@@ -368,6 +368,8 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
+ 	int ret;
+ 	int i, j = 0;
+ 
++	memset(&data, 0, sizeof(data));
++
+ 	mutex_lock(&info->lock);
+ 
+ 	iio_for_each_active_channel(i_dev, i) {
+diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
+index 1c760637514928..6637cb6a6dda4a 100644
+--- a/drivers/iio/adc/ti-ads1119.c
++++ b/drivers/iio/adc/ti-ads1119.c
+@@ -500,12 +500,14 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private)
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct ads1119_state *st = iio_priv(indio_dev);
+ 	struct {
+-		unsigned int sample;
++		s16 sample;
+ 		s64 timestamp __aligned(8);
+ 	} scan;
+ 	unsigned int index;
+ 	int ret;
+ 
++	memset(&scan, 0, sizeof(scan));
++
+ 	if (!iio_trigger_using_own(indio_dev)) {
+ 		index = find_first_bit(indio_dev->active_scan_mask,
+ 				       iio_get_masklength(indio_dev));
+diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
+index 425b48d8986f52..f452f57f11c956 100644
+--- a/drivers/iio/adc/ti-ads124s08.c
++++ b/drivers/iio/adc/ti-ads124s08.c
+@@ -183,9 +183,9 @@ static int ads124s_reset(struct iio_dev *indio_dev)
+ 	struct ads124s_private *priv = iio_priv(indio_dev);
+ 
+ 	if (priv->reset_gpio) {
+-		gpiod_set_value(priv->reset_gpio, 0);
++		gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ 		udelay(200);
+-		gpiod_set_value(priv->reset_gpio, 1);
++		gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ 	} else {
+ 		return ads124s_write_cmd(indio_dev, ADS124S08_CMD_RESET);
+ 	}
+diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
+index 0f9f75baaebbf7..d00cd169e8dfd5 100644
+--- a/drivers/iio/adc/ti-ads1298.c
++++ b/drivers/iio/adc/ti-ads1298.c
+@@ -613,6 +613,8 @@ static int ads1298_init(struct iio_dev *indio_dev)
+ 	}
+ 	indio_dev->name = devm_kasprintf(dev, GFP_KERNEL, "ads129%u%s",
+ 					 indio_dev->num_channels, suffix);
++	if (!indio_dev->name)
++		return -ENOMEM;
+ 
+ 	/* Enable internal test signal, double amplitude, double frequency */
+ 	ret = regmap_write(priv->regmap, ADS1298_REG_CONFIG2,
+diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
+index 9b1814f1965a37..a31658b760a4ae 100644
+--- a/drivers/iio/adc/ti-ads8688.c
++++ b/drivers/iio/adc/ti-ads8688.c
+@@ -381,7 +381,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
+ 	struct iio_poll_func *pf = p;
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	/* Ensure naturally aligned timestamp */
+-	u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
++	u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8) = { };
+ 	int i, j = 0;
+ 
+ 	iio_for_each_active_channel(indio_dev, i) {
+diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
+index 4ca3f1aaff9996..288880346707a2 100644
+--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
++++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
+@@ -48,7 +48,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
+ 	int i = 0, j;
+ 	u16 *data;
+ 
+-	data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
++	data = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ 	if (!data)
+ 		goto done;
+ 
+diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
+index c28d17ca6f5ee0..aabc5e2d788d15 100644
+--- a/drivers/iio/gyro/fxas21002c_core.c
++++ b/drivers/iio/gyro/fxas21002c_core.c
+@@ -730,14 +730,21 @@ static irqreturn_t fxas21002c_trigger_handler(int irq, void *p)
+ 	int ret;
+ 
+ 	mutex_lock(&data->lock);
++	ret = fxas21002c_pm_get(data);
++	if (ret < 0)
++		goto out_unlock;
++
+ 	ret = regmap_bulk_read(data->regmap, FXAS21002C_REG_OUT_X_MSB,
+ 			       data->buffer, CHANNEL_SCAN_MAX * sizeof(s16));
+ 	if (ret < 0)
+-		goto out_unlock;
++		goto out_pm_put;
+ 
+ 	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ 					   data->timestamp);
+ 
++out_pm_put:
++	fxas21002c_pm_put(data);
++
+ out_unlock:
+ 	mutex_unlock(&data->lock);
+ 
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+index 3a07e43e4cf154..18787a43477b89 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+@@ -403,6 +403,7 @@ struct inv_icm42600_sensor_state {
+ typedef int (*inv_icm42600_bus_setup)(struct inv_icm42600_state *);
+ 
+ extern const struct regmap_config inv_icm42600_regmap_config;
++extern const struct regmap_config inv_icm42600_spi_regmap_config;
+ extern const struct dev_pm_ops inv_icm42600_pm_ops;
+ 
+ const struct iio_mount_matrix *
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+index c3924cc6190ee4..a0bed49c3ba674 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -87,6 +87,21 @@ const struct regmap_config inv_icm42600_regmap_config = {
+ };
+ EXPORT_SYMBOL_NS_GPL(inv_icm42600_regmap_config, IIO_ICM42600);
+ 
++/* define specific regmap for SPI not supporting burst write */
++const struct regmap_config inv_icm42600_spi_regmap_config = {
++	.name = "inv_icm42600",
++	.reg_bits = 8,
++	.val_bits = 8,
++	.max_register = 0x4FFF,
++	.ranges = inv_icm42600_regmap_ranges,
++	.num_ranges = ARRAY_SIZE(inv_icm42600_regmap_ranges),
++	.volatile_table = inv_icm42600_regmap_volatile_accesses,
++	.rd_noinc_table = inv_icm42600_regmap_rd_noinc_accesses,
++	.cache_type = REGCACHE_RBTREE,
++	.use_single_write = true,
++};
++EXPORT_SYMBOL_NS_GPL(inv_icm42600_spi_regmap_config, IIO_ICM42600);
++
+ struct inv_icm42600_hw {
+ 	uint8_t whoami;
+ 	const char *name;
+@@ -822,6 +837,8 @@ static int inv_icm42600_suspend(struct device *dev)
+ static int inv_icm42600_resume(struct device *dev)
+ {
+ 	struct inv_icm42600_state *st = dev_get_drvdata(dev);
++	struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
++	struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
+ 	int ret;
+ 
+ 	mutex_lock(&st->lock);
+@@ -842,9 +859,12 @@ static int inv_icm42600_resume(struct device *dev)
+ 		goto out_unlock;
+ 
+ 	/* restore FIFO data streaming */
+-	if (st->fifo.on)
++	if (st->fifo.on) {
++		inv_sensors_timestamp_reset(&gyro_st->ts);
++		inv_sensors_timestamp_reset(&accel_st->ts);
+ 		ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
+ 				   INV_ICM42600_FIFO_CONFIG_STREAM);
++	}
+ 
+ out_unlock:
+ 	mutex_unlock(&st->lock);
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+index eae5ff7a3cc102..36fe8d94ec1cc6 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+@@ -59,7 +59,8 @@ static int inv_icm42600_probe(struct spi_device *spi)
+ 		return -EINVAL;
+ 	chip = (uintptr_t)match;
+ 
+-	regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config);
++	/* use SPI specific regmap */
++	regmap = devm_regmap_init_spi(spi, &inv_icm42600_spi_regmap_config);
+ 	if (IS_ERR(regmap))
+ 		return PTR_ERR(regmap);
+ 
+diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
+index c61c012e25bbaa..53773418610f75 100644
+--- a/drivers/iio/imu/kmx61.c
++++ b/drivers/iio/imu/kmx61.c
+@@ -1192,7 +1192,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
+ 	struct kmx61_data *data = kmx61_get_data(indio_dev);
+ 	int bit, ret, i = 0;
+ 	u8 base;
+-	s16 buffer[8];
++	s16 buffer[8] = { };
+ 
+ 	if (indio_dev == data->acc_indio_dev)
+ 		base = KMX61_ACC_XOUT_L;
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 3305ebbdbc0787..1155487f7aeac8 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -499,7 +499,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
+ 	return_ptr(chans);
+ 
+ error_free_chans:
+-	for (i = 0; i < nummaps; i++)
++	for (i = 0; i < mapind; i++)
+ 		iio_device_put(chans[i].indio_dev);
+ 	return ERR_PTR(ret);
+ }
+diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
+index 2e458e9d5d8530..a025e279df0747 100644
+--- a/drivers/iio/light/bh1745.c
++++ b/drivers/iio/light/bh1745.c
+@@ -750,6 +750,8 @@ static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
+ 	int i;
+ 	int j = 0;
+ 
++	memset(&scan, 0, sizeof(scan));
++
+ 	iio_for_each_active_channel(indio_dev, i) {
+ 		ret = regmap_bulk_read(data->regmap, BH1745_RED_LSB + 2 * i,
+ 				       &value, 2);
+diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
+index 337a1332c2c64a..67c94be0201897 100644
+--- a/drivers/iio/light/vcnl4035.c
++++ b/drivers/iio/light/vcnl4035.c
+@@ -105,7 +105,7 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct vcnl4035_data *data = iio_priv(indio_dev);
+ 	/* Ensure naturally aligned timestamp */
+-	u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)]  __aligned(8);
++	u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)]  __aligned(8) = { };
+ 	int ret;
+ 
+ 	ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer);
+diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
+index 950f8dee2b26b7..b4c6c7c4725694 100644
+--- a/drivers/iio/pressure/zpa2326.c
++++ b/drivers/iio/pressure/zpa2326.c
+@@ -586,6 +586,8 @@ static int zpa2326_fill_sample_buffer(struct iio_dev               *indio_dev,
+ 	}   sample;
+ 	int err;
+ 
++	memset(&sample, 0, sizeof(sample));
++
+ 	if (test_bit(0, indio_dev->active_scan_mask)) {
+ 		/* Get current pressure from hardware FIFO. */
+ 		err = zpa2326_dequeue_pressure(indio_dev, &sample.pressure);
+diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
+index ec5db1478b2fce..18ae45dcbfb28b 100644
+--- a/drivers/md/dm-ebs-target.c
++++ b/drivers/md/dm-ebs-target.c
+@@ -442,7 +442,7 @@ static int ebs_iterate_devices(struct dm_target *ti,
+ static struct target_type ebs_target = {
+ 	.name		 = "ebs",
+ 	.version	 = {1, 0, 1},
+-	.features	 = DM_TARGET_PASSES_INTEGRITY,
++	.features	 = 0,
+ 	.module		 = THIS_MODULE,
+ 	.ctr		 = ebs_ctr,
+ 	.dtr		 = ebs_dtr,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index c9f47d0cccf9bb..872bb59f547055 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2332,10 +2332,9 @@ static struct thin_c *get_first_thin(struct pool *pool)
+ 	struct thin_c *tc = NULL;
+ 
+ 	rcu_read_lock();
+-	if (!list_empty(&pool->active_thins)) {
+-		tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
++	tc = list_first_or_null_rcu(&pool->active_thins, struct thin_c, list);
++	if (tc)
+ 		thin_get(tc);
+-	}
+ 	rcu_read_unlock();
+ 
+ 	return tc;
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 62b1a44b8dd2e7..6bd9848518d477 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -60,15 +60,19 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
+  * to the data block. Caller is responsible for releasing buf.
+  */
+ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+-			   unsigned int *offset, struct dm_buffer **buf,
+-			   unsigned short ioprio)
++			   unsigned int *offset, unsigned int par_buf_offset,
++			   struct dm_buffer **buf, unsigned short ioprio)
+ {
+ 	u64 position, block, rem;
+ 	u8 *res;
+ 
++	/* We have already part of parity bytes read, skip to the next block */
++	if (par_buf_offset)
++		index++;
++
+ 	position = (index + rsb) * v->fec->roots;
+ 	block = div64_u64_rem(position, v->fec->io_size, &rem);
+-	*offset = (unsigned int)rem;
++	*offset = par_buf_offset ? 0 : (unsigned int)rem;
+ 
+ 	res = dm_bufio_read_with_ioprio(v->fec->bufio, block, buf, ioprio);
+ 	if (IS_ERR(res)) {
+@@ -128,11 +132,12 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ {
+ 	int r, corrected = 0, res;
+ 	struct dm_buffer *buf;
+-	unsigned int n, i, offset;
+-	u8 *par, *block;
++	unsigned int n, i, offset, par_buf_offset = 0;
++	u8 *par, *block, par_buf[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
+ 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+ 
+-	par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
++	par = fec_read_parity(v, rsb, block_offset, &offset,
++			      par_buf_offset, &buf, bio_prio(bio));
+ 	if (IS_ERR(par))
+ 		return PTR_ERR(par);
+ 
+@@ -142,7 +147,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ 	 */
+ 	fec_for_each_buffer_rs_block(fio, n, i) {
+ 		block = fec_buffer_rs_block(v, fio, n, i);
+-		res = fec_decode_rs8(v, fio, block, &par[offset], neras);
++		memcpy(&par_buf[par_buf_offset], &par[offset], v->fec->roots - par_buf_offset);
++		res = fec_decode_rs8(v, fio, block, par_buf, neras);
+ 		if (res < 0) {
+ 			r = res;
+ 			goto error;
+@@ -155,12 +161,21 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ 		if (block_offset >= 1 << v->data_dev_block_bits)
+ 			goto done;
+ 
+-		/* read the next block when we run out of parity bytes */
+-		offset += v->fec->roots;
++		/* Read the next block when we run out of parity bytes */
++		offset += (v->fec->roots - par_buf_offset);
++		/* Check if parity bytes are split between blocks */
++		if (offset < v->fec->io_size && (offset + v->fec->roots) > v->fec->io_size) {
++			par_buf_offset = v->fec->io_size - offset;
++			memcpy(par_buf, &par[offset], par_buf_offset);
++			offset += par_buf_offset;
++		} else
++			par_buf_offset = 0;
++
+ 		if (offset >= v->fec->io_size) {
+ 			dm_bufio_release(buf);
+ 
+-			par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
++			par = fec_read_parity(v, rsb, block_offset, &offset,
++					      par_buf_offset, &buf, bio_prio(bio));
+ 			if (IS_ERR(par))
+ 				return PTR_ERR(par);
+ 		}
+@@ -724,10 +739,7 @@ int verity_fec_ctr(struct dm_verity *v)
+ 		return -E2BIG;
+ 	}
+ 
+-	if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
+-		f->io_size = 1 << v->data_dev_block_bits;
+-	else
+-		f->io_size = v->fec->roots << SECTOR_SHIFT;
++	f->io_size = 1 << v->data_dev_block_bits;
+ 
+ 	f->bufio = dm_bufio_client_create(f->dev->bdev,
+ 					  f->io_size,
+diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
+index 157c9bd2fed741..8f8792e5580639 100644
+--- a/drivers/md/persistent-data/dm-array.c
++++ b/drivers/md/persistent-data/dm-array.c
+@@ -917,23 +917,27 @@ static int load_ablock(struct dm_array_cursor *c)
+ 	if (c->block)
+ 		unlock_ablock(c->info, c->block);
+ 
+-	c->block = NULL;
+-	c->ab = NULL;
+ 	c->index = 0;
+ 
+ 	r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le);
+ 	if (r) {
+ 		DMERR("dm_btree_cursor_get_value failed");
+-		dm_btree_cursor_end(&c->cursor);
++		goto out;
+ 
+ 	} else {
+ 		r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab);
+ 		if (r) {
+ 			DMERR("get_ablock failed");
+-			dm_btree_cursor_end(&c->cursor);
++			goto out;
+ 		}
+ 	}
+ 
++	return 0;
++
++out:
++	dm_btree_cursor_end(&c->cursor);
++	c->block = NULL;
++	c->ab = NULL;
+ 	return r;
+ }
+ 
+@@ -956,10 +960,10 @@ EXPORT_SYMBOL_GPL(dm_array_cursor_begin);
+ 
+ void dm_array_cursor_end(struct dm_array_cursor *c)
+ {
+-	if (c->block) {
++	if (c->block)
+ 		unlock_ablock(c->info, c->block);
+-		dm_btree_cursor_end(&c->cursor);
+-	}
++
++	dm_btree_cursor_end(&c->cursor);
+ }
+ EXPORT_SYMBOL_GPL(dm_array_cursor_end);
+ 
+@@ -999,6 +1003,7 @@ int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count)
+ 		}
+ 
+ 		count -= remaining;
++		c->index += (remaining - 1);
+ 		r = dm_array_cursor_next(c);
+ 
+ 	} while (!r);
+diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+index e616e3ec2b42fd..3c1359d8d4e692 100644
+--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+@@ -148,7 +148,7 @@ static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
+ 		pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
+ 		break;
+ 	default:
+-		ret = -EOPNOTSUPP;
++		ret = -ENOTSUPP;
+ 		break;
+ 	}
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+@@ -277,7 +277,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
+ 			writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
+ 			spin_unlock_irqrestore(&priv->lock, flags);
+ 			irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
+-			generic_handle_irq(irq);
++			handle_nested_irq(irq);
+ 		}
+ 	}
+ 	spin_lock_irqsave(&priv->lock, flags);
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index 2681889162a25e..44971e71991ff5 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -118,7 +118,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ 	if (err && err != -EIO)
+ 		return err;
+ 
+-	listlen = fw_list.num_fw_slots;
++	listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
+ 	for (i = 0; i < listlen; i++) {
+ 		if (i < ARRAY_SIZE(fw_slotnames))
+ 			strscpy(buf, fw_slotnames[i], sizeof(buf));
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index dafc5a4039cd2c..c255445e97f3c5 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2826,6 +2826,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+ 	return 0;
+ }
+ 
++static bool bnxt_vnic_is_active(struct bnxt *bp)
++{
++	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
++
++	return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
++}
++
+ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+ {
+ 	struct bnxt_napi *bnapi = dev_instance;
+@@ -3093,7 +3100,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
+ 			break;
+ 		}
+ 	}
+-	if (bp->flags & BNXT_FLAG_DIM) {
++	if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
+ 		struct dim_sample dim_sample = {};
+ 
+ 		dim_update_sample(cpr->event_ctr,
+@@ -3224,7 +3231,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
+ poll_done:
+ 	cpr_rx = &cpr->cp_ring_arr[0];
+ 	if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
+-	    (bp->flags & BNXT_FLAG_DIM)) {
++	    (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
+ 		struct dim_sample dim_sample = {};
+ 
+ 		dim_update_sample(cpr->event_ctr,
+@@ -7116,6 +7123,26 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+ 	return rc;
+ }
+ 
++static void bnxt_cancel_dim(struct bnxt *bp)
++{
++	int i;
++
++	/* DIM work is initialized in bnxt_enable_napi().  Proceed only
++	 * if NAPI is enabled.
++	 */
++	if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
++		return;
++
++	/* Make sure NAPI sees that the VNIC is disabled */
++	synchronize_net();
++	for (i = 0; i < bp->rx_nr_rings; i++) {
++		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
++		struct bnxt_napi *bnapi = rxr->bnapi;
++
++		cancel_work_sync(&bnapi->cp_ring.dim.work);
++	}
++}
++
+ static int hwrm_ring_free_send_msg(struct bnxt *bp,
+ 				   struct bnxt_ring_struct *ring,
+ 				   u32 ring_type, int cmpl_ring_id)
+@@ -7216,6 +7243,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+ 		}
+ 	}
+ 
++	bnxt_cancel_dim(bp);
+ 	for (i = 0; i < bp->rx_nr_rings; i++) {
+ 		bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
+ 		bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
+@@ -11012,8 +11040,6 @@ static void bnxt_disable_napi(struct bnxt *bp)
+ 		if (bnapi->in_reset)
+ 			cpr->sw_stats->rx.rx_resets++;
+ 		napi_disable(&bnapi->napi);
+-		if (bnapi->rx_ring)
+-			cancel_work_sync(&cpr->dim.work);
+ 	}
+ }
+ 
+@@ -15269,8 +15295,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+ 		bnxt_hwrm_vnic_update(bp, vnic,
+ 				      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ 	}
+-
++	/* Make sure NAPI sees that the VNIC is disabled */
++	synchronize_net();
+ 	rxr = &bp->rx_ring[idx];
++	cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
+ 	bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ 	bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+ 	rxr->rx_next_cons = 0;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index fdd6356f21efb3..546d9a3d7efea7 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -208,7 +208,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
+ 
+ 	rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
+ 	if (rc)
+-		return rc;
++		goto drop_req;
+ 
+ 	hwrm_req_timeout(bp, req, fw_msg->timeout);
+ 	resp = hwrm_req_hold(bp, req);
+@@ -220,6 +220,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
+ 
+ 		memcpy(fw_msg->resp, resp, resp_len);
+ 	}
++drop_req:
+ 	hwrm_req_drop(bp, req);
+ 	return rc;
+ }
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index fb3933fbb8425e..757c6484f53515 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+ 	struct adapter *adap = container_of(t, struct adapter, tids);
+ 	struct sk_buff *skb;
+ 
+-	WARN_ON(tid_out_of_range(&adap->tids, tid));
++	if (tid_out_of_range(&adap->tids, tid)) {
++		dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
++		return;
++	}
+ 
+ 	if (t->tid_tab[tid - adap->tids.tid_base]) {
+ 		t->tid_tab[tid - adap->tids.tid_base] = NULL;
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index d404819ebc9b3f..f985a3cf2b11fa 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -2224,14 +2224,18 @@ static void gve_service_task(struct work_struct *work)
+ 
+ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
+ {
++	xdp_features_t xdp_features;
++
+ 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
+-		priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
+-		priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+-		priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+-		priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
++		xdp_features = NETDEV_XDP_ACT_BASIC;
++		xdp_features |= NETDEV_XDP_ACT_REDIRECT;
++		xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
++		xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ 	} else {
+-		priv->dev->xdp_features = 0;
++		xdp_features = 0;
+ 	}
++
++	xdp_set_features_flag(priv->dev, xdp_features);
+ }
+ 
+ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index 27dbe367f3d355..d873523e84f271 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -916,9 +916,6 @@ struct hnae3_handle {
+ 
+ 	u8 netdev_flags;
+ 	struct dentry *hnae3_dbgfs;
+-	/* protects concurrent contention between debugfs commands */
+-	struct mutex dbgfs_lock;
+-	char **dbgfs_buf;
+ 
+ 	/* Network interface message level enabled bits */
+ 	u32 msg_enable;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 807eb3bbb11c04..9bbece25552b17 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -1260,69 +1260,55 @@ static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
+ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
+ 			     size_t count, loff_t *ppos)
+ {
+-	struct hns3_dbg_data *dbg_data = filp->private_data;
++	char *buf = filp->private_data;
++
++	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
++}
++
++static int hns3_dbg_open(struct inode *inode, struct file *filp)
++{
++	struct hns3_dbg_data *dbg_data = inode->i_private;
+ 	struct hnae3_handle *handle = dbg_data->handle;
+ 	struct hns3_nic_priv *priv = handle->priv;
+-	ssize_t size = 0;
+-	char **save_buf;
+-	char *read_buf;
+ 	u32 index;
++	char *buf;
+ 	int ret;
+ 
++	if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
++	    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
++		return -EBUSY;
++
+ 	ret = hns3_dbg_get_cmd_index(dbg_data, &index);
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&handle->dbgfs_lock);
+-	save_buf = &handle->dbgfs_buf[index];
+-
+-	if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+-	    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
+-
+-	if (*save_buf) {
+-		read_buf = *save_buf;
+-	} else {
+-		read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
+-		if (!read_buf) {
+-			ret = -ENOMEM;
+-			goto out;
+-		}
+-
+-		/* save the buffer addr until the last read operation */
+-		*save_buf = read_buf;
+-
+-		/* get data ready for the first time to read */
+-		ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
+-					read_buf, hns3_dbg_cmd[index].buf_len);
+-		if (ret)
+-			goto out;
+-	}
++	buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
+ 
+-	size = simple_read_from_buffer(buffer, count, ppos, read_buf,
+-				       strlen(read_buf));
+-	if (size > 0) {
+-		mutex_unlock(&handle->dbgfs_lock);
+-		return size;
++	ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
++				buf, hns3_dbg_cmd[index].buf_len);
++	if (ret) {
++		kvfree(buf);
++		return ret;
+ 	}
+ 
+-out:
+-	/* free the buffer for the last read operation */
+-	if (*save_buf) {
+-		kvfree(*save_buf);
+-		*save_buf = NULL;
+-	}
++	filp->private_data = buf;
++	return 0;
++}
+ 
+-	mutex_unlock(&handle->dbgfs_lock);
+-	return ret;
++static int hns3_dbg_release(struct inode *inode, struct file *filp)
++{
++	kvfree(filp->private_data);
++	filp->private_data = NULL;
++	return 0;
+ }
+ 
+ static const struct file_operations hns3_dbg_fops = {
+ 	.owner = THIS_MODULE,
+-	.open  = simple_open,
++	.open  = hns3_dbg_open,
+ 	.read  = hns3_dbg_read,
++	.release = hns3_dbg_release,
+ };
+ 
+ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
+@@ -1379,13 +1365,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
+ 	int ret;
+ 	u32 i;
+ 
+-	handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
+-					 ARRAY_SIZE(hns3_dbg_cmd),
+-					 sizeof(*handle->dbgfs_buf),
+-					 GFP_KERNEL);
+-	if (!handle->dbgfs_buf)
+-		return -ENOMEM;
+-
+ 	hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
+ 				debugfs_create_dir(name, hns3_dbgfs_root);
+ 	handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
+@@ -1395,8 +1374,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
+ 			debugfs_create_dir(hns3_dbg_dentry[i].name,
+ 					   handle->hnae3_dbgfs);
+ 
+-	mutex_init(&handle->dbgfs_lock);
+-
+ 	for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
+ 		if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
+ 		     ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
+@@ -1425,24 +1402,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
+ out:
+ 	debugfs_remove_recursive(handle->hnae3_dbgfs);
+ 	handle->hnae3_dbgfs = NULL;
+-	mutex_destroy(&handle->dbgfs_lock);
+ 	return ret;
+ }
+ 
+ void hns3_dbg_uninit(struct hnae3_handle *handle)
+ {
+-	u32 i;
+-
+ 	debugfs_remove_recursive(handle->hnae3_dbgfs);
+ 	handle->hnae3_dbgfs = NULL;
+-
+-	for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
+-		if (handle->dbgfs_buf[i]) {
+-			kvfree(handle->dbgfs_buf[i]);
+-			handle->dbgfs_buf[i] = NULL;
+-		}
+-
+-	mutex_destroy(&handle->dbgfs_lock);
+ }
+ 
+ void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 4cbc4d069a1f36..73825b6bd485d1 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2452,7 +2452,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
+ 			return ret;
+ 	}
+ 
+-	netdev->features = features;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index bd86efd92a5a7d..9a67fe0554a52b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -6,6 +6,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+@@ -3584,6 +3585,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
+ 	return ret;
+ }
+ 
++static void hclge_set_reset_pending(struct hclge_dev *hdev,
++				    enum hnae3_reset_type reset_type)
++{
++	/* When an incorrect reset type is executed, the get_reset_level
++	 * function generates the HNAE3_NONE_RESET flag. As a result, this
++	 * type do not need to pending.
++	 */
++	if (reset_type != HNAE3_NONE_RESET)
++		set_bit(reset_type, &hdev->reset_pending);
++}
++
+ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+ {
+ 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
+@@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+ 	 */
+ 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
+ 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+-		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
++		hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
+ 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+ 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ 		hdev->rst_stats.imp_rst_cnt++;
+@@ -3614,7 +3626,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+ 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
+ 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
+ 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+-		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
++		hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
+ 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ 		hdev->rst_stats.global_rst_cnt++;
+ 		return HCLGE_VECTOR0_EVENT_RST;
+@@ -3769,7 +3781,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
+ 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ 		 HCLGE_NAME, pci_name(hdev->pdev));
+ 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+-			  0, hdev->misc_vector.name, hdev);
++			  IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
+ 	if (ret) {
+ 		hclge_free_vector(hdev, 0);
+ 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
+@@ -4062,7 +4074,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
+ 	case HNAE3_FUNC_RESET:
+ 		dev_info(&pdev->dev, "PF reset requested\n");
+ 		/* schedule again to check later */
+-		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
++		hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
+ 		hclge_reset_task_schedule(hdev);
+ 		break;
+ 	default:
+@@ -4096,6 +4108,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
+ 		clear_bit(HNAE3_FLR_RESET, addr);
+ 	}
+ 
++	clear_bit(HNAE3_NONE_RESET, addr);
++
+ 	if (hdev->reset_type != HNAE3_NONE_RESET &&
+ 	    rst_level < hdev->reset_type)
+ 		return HNAE3_NONE_RESET;
+@@ -4237,7 +4251,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
+ 		return false;
+ 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ 		hdev->rst_stats.reset_fail_cnt++;
+-		set_bit(hdev->reset_type, &hdev->reset_pending);
++		hclge_set_reset_pending(hdev, hdev->reset_type);
+ 		dev_info(&hdev->pdev->dev,
+ 			 "re-schedule reset task(%u)\n",
+ 			 hdev->rst_stats.reset_fail_cnt);
+@@ -4480,8 +4494,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
+ static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ 					enum hnae3_reset_type rst_type)
+ {
++#define HCLGE_SUPPORT_RESET_TYPE \
++	(BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
++	BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
++
+ 	struct hclge_dev *hdev = ae_dev->priv;
+ 
++	if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
++		/* To prevent reset triggered by hclge_reset_event */
++		set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
++		dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
++			 rst_type);
++		return;
++	}
++
+ 	set_bit(rst_type, &hdev->default_reset_request);
+ }
+ 
+@@ -11891,9 +11917,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 
+ 	hclge_init_rxd_adv_layout(hdev);
+ 
+-	/* Enable MISC vector(vector0) */
+-	hclge_enable_vector(&hdev->misc_vector, true);
+-
+ 	ret = hclge_init_wol(hdev);
+ 	if (ret)
+ 		dev_warn(&pdev->dev,
+@@ -11906,6 +11929,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 	hclge_state_init(hdev);
+ 	hdev->last_reset_time = jiffies;
+ 
++	/* Enable MISC vector(vector0) */
++	enable_irq(hdev->misc_vector.vector_irq);
++	hclge_enable_vector(&hdev->misc_vector, true);
++
+ 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+ 		 HCLGE_DRIVER_NAME);
+ 
+@@ -12311,7 +12338,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 
+ 	/* Disable MISC vector(vector0) */
+ 	hclge_enable_vector(&hdev->misc_vector, false);
+-	synchronize_irq(hdev->misc_vector.vector_irq);
++	disable_irq(hdev->misc_vector.vector_irq);
+ 
+ 	/* Disable all hw interrupts */
+ 	hclge_config_mac_tnl_int(hdev, false);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index 5505caea88e981..bab16c2191b2f0 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
+ 	struct hclge_dev *hdev = vport->back;
+ 	struct hclge_ptp *ptp = hdev->ptp;
+ 
++	if (!ptp)
++		return false;
++
+ 	if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
+ 	    test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
+ 		ptp->tx_skipped++;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+index 43c1c18fa81f8d..8c057192aae6e1 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+@@ -510,9 +510,9 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ 			      struct hnae3_knic_private_info *kinfo)
+ {
+-#define HCLGE_RING_REG_OFFSET		0x200
+ #define HCLGE_RING_INT_REG_OFFSET	0x4
+ 
++	struct hnae3_queue *tqp;
+ 	int i, j, reg_num;
+ 	int data_num_sum;
+ 	u32 *reg = data;
+@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ 	for (j = 0; j < kinfo->num_tqps; j++) {
+ 		reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
++		tqp = kinfo->tqp[j];
+ 		for (i = 0; i < reg_num; i++)
+-			*reg++ = hclge_read_dev(&hdev->hw,
+-						ring_reg_addr_list[i] +
+-						HCLGE_RING_REG_OFFSET * j);
++			*reg++ = readl_relaxed(tqp->io_base -
++					       HCLGE_TQP_REG_OFFSET +
++					       ring_reg_addr_list[i]);
+ 	}
+ 	data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 094a7c7b55921f..d47bd8d6145f97 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1395,6 +1395,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
+ 	return ret;
+ }
+ 
++static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
++				      enum hnae3_reset_type reset_type)
++{
++	/* When an incorrect reset type is executed, the get_reset_level
++	 * function generates the HNAE3_NONE_RESET flag. As a result, this
++	 * type do not need to pending.
++	 */
++	if (reset_type != HNAE3_NONE_RESET)
++		set_bit(reset_type, &hdev->reset_pending);
++}
++
+ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
+ {
+ #define HCLGEVF_RESET_WAIT_US	20000
+@@ -1544,7 +1555,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
+ 		hdev->rst_stats.rst_fail_cnt);
+ 
+ 	if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
+-		set_bit(hdev->reset_type, &hdev->reset_pending);
++		hclgevf_set_reset_pending(hdev, hdev->reset_type);
+ 
+ 	if (hclgevf_is_reset_pending(hdev)) {
+ 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+@@ -1664,6 +1675,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
+ 		clear_bit(HNAE3_FLR_RESET, addr);
+ 	}
+ 
++	clear_bit(HNAE3_NONE_RESET, addr);
++
+ 	return rst_level;
+ }
+ 
+@@ -1673,14 +1686,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
+ 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ 	struct hclgevf_dev *hdev = ae_dev->priv;
+ 
+-	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
+-
+ 	if (hdev->default_reset_request)
+ 		hdev->reset_level =
+ 			hclgevf_get_reset_level(&hdev->default_reset_request);
+ 	else
+ 		hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ 
++	dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
++		 hdev->reset_level);
++
+ 	/* reset of this VF requested */
+ 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
+ 	hclgevf_reset_task_schedule(hdev);
+@@ -1691,8 +1705,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
+ static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ 					  enum hnae3_reset_type rst_type)
+ {
++#define HCLGEVF_SUPPORT_RESET_TYPE \
++	(BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
++	BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
++	BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
++
+ 	struct hclgevf_dev *hdev = ae_dev->priv;
+ 
++	if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
++		/* To prevent reset triggered by hclge_reset_event */
++		set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
++		dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
++			 rst_type);
++		return;
++	}
+ 	set_bit(rst_type, &hdev->default_reset_request);
+ }
+ 
+@@ -1849,14 +1875,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
+ 		 */
+ 		if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
+ 			/* prepare for full reset of stack + pcie interface */
+-			set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
++			hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
+ 
+ 			/* "defer" schedule the reset task again */
+ 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ 		} else {
+ 			hdev->reset_attempts++;
+ 
+-			set_bit(hdev->reset_level, &hdev->reset_pending);
++			hclgevf_set_reset_pending(hdev, hdev->reset_level);
+ 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ 		}
+ 		hclgevf_reset_task_schedule(hdev);
+@@ -1979,7 +2005,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ 		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ 		dev_info(&hdev->pdev->dev,
+ 			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
+-		set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
++		hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
+ 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+ 		*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
+@@ -2289,6 +2315,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
+ 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+ 
+ 	INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
++	/* timer needs to be initialized before misc irq */
++	timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+ 
+ 	mutex_init(&hdev->mbx_resp.mbx_mutex);
+ 	sema_init(&hdev->reset_sem, 1);
+@@ -2988,7 +3016,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ 		 HCLGEVF_DRIVER_NAME);
+ 
+ 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+-	timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+index 6db415d8b9176c..7d9d9dbc75603a 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
+ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ 		      void *data)
+ {
+-#define HCLGEVF_RING_REG_OFFSET		0x200
+ #define HCLGEVF_RING_INT_REG_OFFSET	0x4
+ 
+ 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++	struct hnae3_queue *tqp;
+ 	int i, j, reg_um;
+ 	u32 *reg = data;
+ 
+@@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ 	reg_um = ARRAY_SIZE(ring_reg_addr_list);
+ 	for (j = 0; j < hdev->num_tqps; j++) {
+ 		reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
++		tqp = &hdev->htqp[j].q;
+ 		for (i = 0; i < reg_um; i++)
+-			*reg++ = hclgevf_read_dev(&hdev->hw,
+-						  ring_reg_addr_list[i] +
+-						  HCLGEVF_RING_REG_OFFSET * j);
++			*reg++ = readl_relaxed(tqp->io_base -
++					       HCLGEVF_TQP_REG_OFFSET +
++					       ring_reg_addr_list[i]);
+ 	}
+ 
+ 	reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
+diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+index 0be1a98d7cc1b5..79a6edd0be0ec4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+@@ -2238,6 +2238,8 @@ struct ice_aqc_get_pkg_info_resp {
+ 	struct ice_aqc_get_pkg_info pkg_info[];
+ };
+ 
++#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ	GENMASK(30, 0)
++
+ /* Get CGU abilities command response data structure (indirect 0x0C61) */
+ struct ice_aqc_get_cgu_abilities {
+ 	u8 num_inputs;
+diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
+index d5ad6d84007c21..38e151c7ea2362 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
++++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
+@@ -2064,6 +2064,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
+ 	return 0;
+ }
+ 
++/**
++ * ice_dpll_phase_range_set - initialize phase adjust range helper
++ * @range: pointer to phase adjust range struct to be initialized
++ * @phase_adj: a value to be used as min(-)/max(+) boundary
++ */
++static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
++				     u32 phase_adj)
++{
++	range->min = -phase_adj;
++	range->max = phase_adj;
++}
++
+ /**
+  * ice_dpll_init_info_pins_generic - initializes generic pins info
+  * @pf: board private structure
+@@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
+ 	for (i = 0; i < pin_num; i++) {
+ 		pins[i].idx = i;
+ 		pins[i].prop.board_label = labels[i];
+-		pins[i].prop.phase_range.min = phase_adj_max;
+-		pins[i].prop.phase_range.max = -phase_adj_max;
++		ice_dpll_phase_range_set(&pins[i].prop.phase_range,
++					 phase_adj_max);
+ 		pins[i].prop.capabilities = cap;
+ 		pins[i].pf = pf;
+ 		ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+@@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+ 	struct ice_hw *hw = &pf->hw;
+ 	struct ice_dpll_pin *pins;
+ 	unsigned long caps;
++	u32 phase_adj_max;
+ 	u8 freq_supp_num;
+ 	bool input;
+ 
+@@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+ 	case ICE_DPLL_PIN_TYPE_INPUT:
+ 		pins = pf->dplls.inputs;
+ 		num_pins = pf->dplls.num_inputs;
++		phase_adj_max = pf->dplls.input_phase_adj_max;
+ 		input = true;
+ 		break;
+ 	case ICE_DPLL_PIN_TYPE_OUTPUT:
+ 		pins = pf->dplls.outputs;
+ 		num_pins = pf->dplls.num_outputs;
++		phase_adj_max = pf->dplls.output_phase_adj_max;
+ 		input = false;
+ 		break;
+ 	default:
+@@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+ 				return ret;
+ 			caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ 				 DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
+-			pins[i].prop.phase_range.min =
+-				pf->dplls.input_phase_adj_max;
+-			pins[i].prop.phase_range.max =
+-				-pf->dplls.input_phase_adj_max;
+ 		} else {
+-			pins[i].prop.phase_range.min =
+-				pf->dplls.output_phase_adj_max;
+-			pins[i].prop.phase_range.max =
+-				-pf->dplls.output_phase_adj_max;
+ 			ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
+ 			if (ret)
+ 				return ret;
+ 		}
++		ice_dpll_phase_range_set(&pins[i].prop.phase_range,
++					 phase_adj_max);
+ 		pins[i].prop.capabilities = caps;
+ 		ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+ 		if (ret)
+@@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
+ 	dp->dpll_idx = abilities.pps_dpll_idx;
+ 	d->num_inputs = abilities.num_inputs;
+ 	d->num_outputs = abilities.num_outputs;
+-	d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
+-	d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
++	d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
++		ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
++	d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
++		ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
+ 
+ 	alloc_size = sizeof(*d->inputs) * d->num_inputs;
+ 	d->inputs = kzalloc(alloc_size, GFP_KERNEL);
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+index e6980b94a6c1d6..3005dd252a1026 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+@@ -761,9 +761,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+ 		/* rx_desk_rsgb_par */
+ 		644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 		/* tx_desk_rsgb_pcs */
+-		644531250, /* 644.53125 MHz Reed Solomon gearbox */
++		390625000, /* 390.625 MHz Reed Solomon gearbox */
+ 		/* rx_desk_rsgb_pcs */
+-		644531250, /* 644.53125 MHz Reed Solomon gearbox */
++		390625000, /* 390.625 MHz Reed Solomon gearbox */
+ 		/* tx_fixed_delay */
+ 		1620,
+ 		/* pmd_adj_divisor */
+diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
+index 9fae8bdec2a7c8..1613b562d17c52 100644
+--- a/drivers/net/ethernet/intel/igc/igc_base.c
++++ b/drivers/net/ethernet/intel/igc/igc_base.c
+@@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
+ 	u32 eecd = rd32(IGC_EECD);
+ 	u16 size;
+ 
++	/* failed to read reg and got all F's */
++	if (!(~eecd))
++		return -ENXIO;
++
+ 	size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
+ 
+ 	/* Added to a constant, "size" becomes the left-shift value
+@@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
+ 
+ 	/* NVM initialization */
+ 	ret_val = igc_init_nvm_params_base(hw);
++	if (ret_val)
++		goto out;
+ 	switch (hw->mac.type) {
+ 	case igc_i225:
+ 		ret_val = igc_init_nvm_params_i225(hw);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 6bd8a18e3af3a1..e733b81e18a21a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1013,6 +1013,7 @@ static void cmd_work_handler(struct work_struct *work)
+ 				complete(&ent->done);
+ 			}
+ 			up(&cmd->vars.sem);
++			complete(&ent->slotted);
+ 			return;
+ 		}
+ 	} else {
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+index 1bfe5ef40c522d..14ffd45e9a25a7 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
++++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+@@ -1827,7 +1827,7 @@ static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
+ 
+ 	for (i = 0; i < tp->int_nums; i++) {
+ 		irq = pci_irq_vector(pdev, i);
+-		if (!irq) {
++		if (irq < 0) {
+ 			pci_disable_msix(pdev);
+ 			return irq;
+ 		}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+index 6fdd94c8919ec2..2996bcdea9a28e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+@@ -1,4 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0-only
++#include <linux/iommu.h>
+ #include <linux/platform_device.h>
+ #include <linux/of.h>
+ #include <linux/module.h>
+@@ -19,6 +20,8 @@ struct tegra_mgbe {
+ 	struct reset_control *rst_mac;
+ 	struct reset_control *rst_pcs;
+ 
++	u32 iommu_sid;
++
+ 	void __iomem *hv;
+ 	void __iomem *regs;
+ 	void __iomem *xpcs;
+@@ -50,7 +53,6 @@ struct tegra_mgbe {
+ #define MGBE_WRAP_COMMON_INTR_ENABLE	0x8704
+ #define MAC_SBD_INTR			BIT(2)
+ #define MGBE_WRAP_AXI_ASID0_CTRL	0x8400
+-#define MGBE_SID			0x6
+ 
+ static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
+ {
+@@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev)
+ 	writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
+ 
+ 	/* Program SID */
+-	writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
++	writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ 
+ 	value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
+ 	if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
+@@ -241,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
+ 	if (IS_ERR(mgbe->xpcs))
+ 		return PTR_ERR(mgbe->xpcs);
+ 
++	/* get controller's stream id from iommu property in device tree */
++	if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
++		dev_err(mgbe->dev, "failed to get iommu stream id\n");
++		return -EINVAL;
++	}
++
+ 	res.addr = mgbe->regs;
+ 	res.irq = irq;
+ 
+@@ -346,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
+ 	writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
+ 
+ 	/* Program SID */
+-	writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
++	writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ 
+ 	plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
+ 
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index 1bf9c38e412562..deaf670c160ebf 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -334,27 +334,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
+ 	status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
+ 				   timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
+ 
++	buf[0] = rd32(wx, WX_MNG_MBOX);
++	if ((buf[0] & 0xff0000) >> 16 == 0x80) {
++		wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
++		status = -EINVAL;
++		goto rel_out;
++	}
++
+ 	/* Check command completion */
+ 	if (status) {
+-		wx_dbg(wx, "Command has failed with no status valid.\n");
+-
+-		buf[0] = rd32(wx, WX_MNG_MBOX);
+-		if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
+-			status = -EINVAL;
+-			goto rel_out;
+-		}
+-		if ((buf[0] & 0xff0000) >> 16 == 0x80) {
+-			wx_dbg(wx, "It's unknown cmd.\n");
+-			status = -EINVAL;
+-			goto rel_out;
+-		}
+-
++		wx_err(wx, "Command has failed with no status valid.\n");
+ 		wx_dbg(wx, "write value:\n");
+ 		for (i = 0; i < dword_len; i++)
+ 			wx_dbg(wx, "%x ", buffer[i]);
+ 		wx_dbg(wx, "read value:\n");
+ 		for (i = 0; i < dword_len; i++)
+ 			wx_dbg(wx, "%x ", buf[i]);
++		wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
++
++		goto rel_out;
+ 	}
+ 
+ 	if (!return_data)
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index e685a7f946f0f8..753215ebc67c70 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -3072,7 +3072,11 @@ static int ca8210_probe(struct spi_device *spi_device)
+ 	spi_set_drvdata(priv->spi, priv);
+ 	if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) {
+ 		cascoda_api_upstream = ca8210_test_int_driver_write;
+-		ca8210_test_interface_init(priv);
++		ret = ca8210_test_interface_init(priv);
++		if (ret) {
++			dev_crit(&spi_device->dev, "ca8210_test_interface_init failed\n");
++			goto error;
++		}
+ 	} else {
+ 		cascoda_api_upstream = NULL;
+ 	}
+diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
+index 1bc87a0626860f..ee9d562f0817cf 100644
+--- a/drivers/net/mctp/mctp-i3c.c
++++ b/drivers/net/mctp/mctp-i3c.c
+@@ -125,6 +125,8 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
+ 
+ 	xfer.data.in = skb_put(skb, mi->mrl);
+ 
++	/* Make sure netif_rx() is read in the same order as i3c. */
++	mutex_lock(&mi->lock);
+ 	rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1);
+ 	if (rc < 0)
+ 		goto err;
+@@ -166,8 +168,10 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
+ 		stats->rx_dropped++;
+ 	}
+ 
++	mutex_unlock(&mi->lock);
+ 	return 0;
+ err:
++	mutex_unlock(&mi->lock);
+ 	kfree_skb(skb);
+ 	return rc;
+ }
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 1aa303f76cc7af..da3651d329069c 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -507,8 +507,7 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
+ {
+ 	u32 type = event->attr.type;
+ 	u64 config = event->attr.config;
+-	u64 raw_config_val;
+-	int ret;
++	int ret = -ENOENT;
+ 
+ 	/*
+ 	 * Ensure we are finished checking standard hardware events for
+@@ -528,21 +527,20 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
+ 	case PERF_TYPE_RAW:
+ 		/*
+ 		 * As per SBI specification, the upper 16 bits must be unused
+-		 * for a raw event.
++		 * for a hardware raw event.
+ 		 * Bits 63:62 are used to distinguish between raw events
+ 		 * 00 - Hardware raw event
+ 		 * 10 - SBI firmware events
+ 		 * 11 - Risc-V platform specific firmware event
+ 		 */
+-		raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
++
+ 		switch (config >> 62) {
+ 		case 0:
+ 			ret = RISCV_PMU_RAW_EVENT_IDX;
+-			*econfig = raw_config_val;
++			*econfig = config & RISCV_PMU_RAW_EVENT_MASK;
+ 			break;
+ 		case 2:
+-			ret = (raw_config_val & 0xFFFF) |
+-				(SBI_PMU_EVENT_TYPE_FW << 16);
++			ret = (config & 0xFFFF) | (SBI_PMU_EVENT_TYPE_FW << 16);
+ 			break;
+ 		case 3:
+ 			/*
+@@ -551,12 +549,13 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
+ 			 * Event data - raw event encoding
+ 			 */
+ 			ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
+-			*econfig = raw_config_val;
++			*econfig = config & RISCV_PMU_PLAT_FW_EVENT_MASK;
++			break;
++		default:
+ 			break;
+ 		}
+ 		break;
+ 	default:
+-		ret = -ENOENT;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
+index 5669f94c3d06bf..4d3acfe849bf4e 100644
+--- a/drivers/platform/x86/amd/pmc/pmc.c
++++ b/drivers/platform/x86/amd/pmc/pmc.c
+@@ -947,6 +947,10 @@ static int amd_pmc_suspend_handler(struct device *dev)
+ {
+ 	struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ 
++	/*
++	 * Must be called only from the same set of dev_pm_ops handlers
++	 * as i8042_pm_suspend() is called: currently just from .suspend.
++	 */
+ 	if (pdev->disable_8042_wakeup && !disable_workarounds) {
+ 		int rc = amd_pmc_wa_irq1(pdev);
+ 
+@@ -959,7 +963,9 @@ static int amd_pmc_suspend_handler(struct device *dev)
+ 	return 0;
+ }
+ 
+-static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
++static const struct dev_pm_ops amd_pmc_pm = {
++	.suspend = amd_pmc_suspend_handler,
++};
+ 
+ static const struct pci_device_id pmc_pci_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
+index 8504154b649f47..927f58dc73e324 100644
+--- a/drivers/platform/x86/intel/pmc/core_ssram.c
++++ b/drivers/platform/x86/intel/pmc/core_ssram.c
+@@ -269,8 +269,12 @@ pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset)
+ 		/*
+ 		 * The secondary PMC BARS (which are behind hidden PCI devices)
+ 		 * are read from fixed offsets in MMIO of the primary PMC BAR.
++		 * If a device is not present, the value will be 0.
+ 		 */
+ 		ssram_base = get_base(tmp_ssram, offset);
++		if (!ssram_base)
++			return 0;
++
+ 		ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ 		if (!ssram)
+ 			return -ENOMEM;
+diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
+index 492612e8f8bad5..140ee4f9c137f5 100644
+--- a/drivers/staging/iio/frequency/ad9832.c
++++ b/drivers/staging/iio/frequency/ad9832.c
+@@ -158,7 +158,7 @@ static int ad9832_write_frequency(struct ad9832_state *st,
+ static int ad9832_write_phase(struct ad9832_state *st,
+ 			      unsigned long addr, unsigned long phase)
+ {
+-	if (phase > BIT(AD9832_PHASE_BITS))
++	if (phase >= BIT(AD9832_PHASE_BITS))
+ 		return -EINVAL;
+ 
+ 	st->phase_data[0] = cpu_to_be16((AD9832_CMD_PHA8BITSW << CMD_SHIFT) |
+diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
+index 47e7d7e6d92089..6e99e008c5f432 100644
+--- a/drivers/staging/iio/frequency/ad9834.c
++++ b/drivers/staging/iio/frequency/ad9834.c
+@@ -131,7 +131,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
+ static int ad9834_write_phase(struct ad9834_state *st,
+ 			      unsigned long addr, unsigned long phase)
+ {
+-	if (phase > BIT(AD9834_PHASE_BITS))
++	if (phase >= BIT(AD9834_PHASE_BITS))
+ 		return -EINVAL;
+ 	st->data = cpu_to_be16(addr | phase);
+ 
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 07e09897165f34..5d3d8ce672cd51 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -176,6 +176,7 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int
+ 				goto out;
+ 			}
+ 
++			of_node_put(sensor_specs.np);
+ 			if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ?
+ 								  sensor_specs.args[0] : 0)) {
+ 				pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child);
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 5f9f06911795cc..68baf75bdadc42 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -812,6 +812,9 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
+ 			uart->dl_write = up->dl_write;
+ 
+ 		if (uart->port.type != PORT_8250_CIR) {
++			if (uart_console_registered(&uart->port))
++				pm_runtime_get_sync(uart->port.dev);
++
+ 			if (serial8250_isa_config != NULL)
+ 				serial8250_isa_config(0, &uart->port,
+ 						&uart->capabilities);
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index e1e7bc04c57920..f5199fdecff278 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -1051,14 +1051,14 @@ static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&port->lock, flags);
++	uart_port_lock_irqsave(port, &flags);
+ 
+ 	if (break_state)
+ 		stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ);
+ 	else
+ 		stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ);
+ 
+-	spin_unlock_irqrestore(&port->lock, flags);
++	uart_port_unlock_irqrestore(port, flags);
+ }
+ 
+ static int stm32_usart_startup(struct uart_port *port)
+diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
+index 9ffd94ddf8c7ce..786f20ef22386b 100644
+--- a/drivers/ufs/core/ufshcd-priv.h
++++ b/drivers/ufs/core/ufshcd-priv.h
+@@ -237,12 +237,6 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
+ 		hba->vops->config_scaling_param(hba, p, data);
+ }
+ 
+-static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba)
+-{
+-	if (hba->vops && hba->vops->reinit_notify)
+-		hba->vops->reinit_notify(hba);
+-}
+-
+ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
+ {
+ 	if (hba->vops && hba->vops->mcq_config_resource)
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index bc13133efaa508..05b936ad353be7 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -8881,7 +8881,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+ 		ufshcd_device_reset(hba);
+ 		ufs_put_device_desc(hba);
+ 		ufshcd_hba_stop(hba);
+-		ufshcd_vops_reinit_notify(hba);
+ 		ret = ufshcd_hba_enable(hba);
+ 		if (ret) {
+ 			dev_err(hba->dev, "Host controller enable failed\n");
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index 91127fb171864f..989692fb91083f 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -368,6 +368,11 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
+ 	if (ret)
+ 		return ret;
+ 
++	if (phy->power_count) {
++		phy_power_off(phy);
++		phy_exit(phy);
++	}
++
+ 	/* phy initialization - calibrate the phy */
+ 	ret = phy_init(phy);
+ 	if (ret) {
+@@ -1562,13 +1567,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
+ }
+ #endif
+ 
+-static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
+-{
+-	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+-
+-	phy_power_off(host->generic_phy);
+-}
+-
+ /* Resources */
+ static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
+ 	{.name = "ufs_mem",},
+@@ -1807,7 +1805,6 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
+ 	.device_reset		= ufs_qcom_device_reset,
+ 	.config_scaling_param = ufs_qcom_config_scaling_param,
+ 	.program_key		= ufs_qcom_ice_program_key,
+-	.reinit_notify		= ufs_qcom_reinit_notify,
+ 	.mcq_config_resource	= ufs_qcom_mcq_config_resource,
+ 	.get_hba_mac		= ufs_qcom_get_hba_mac,
+ 	.op_runtime_config	= ufs_qcom_op_runtime_config,
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 17b3ac2ac8a1e8..46d1a4428b9a98 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -370,25 +370,29 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 		data->pinctrl = devm_pinctrl_get(dev);
+ 		if (PTR_ERR(data->pinctrl) == -ENODEV)
+ 			data->pinctrl = NULL;
+-		else if (IS_ERR(data->pinctrl))
+-			return dev_err_probe(dev, PTR_ERR(data->pinctrl),
++		else if (IS_ERR(data->pinctrl)) {
++			ret = dev_err_probe(dev, PTR_ERR(data->pinctrl),
+ 					     "pinctrl get failed\n");
++			goto err_put;
++		}
+ 
+ 		data->hsic_pad_regulator =
+ 				devm_regulator_get_optional(dev, "hsic");
+ 		if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
+ 			/* no pad regulator is needed */
+ 			data->hsic_pad_regulator = NULL;
+-		} else if (IS_ERR(data->hsic_pad_regulator))
+-			return dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
++		} else if (IS_ERR(data->hsic_pad_regulator)) {
++			ret = dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
+ 					     "Get HSIC pad regulator error\n");
++			goto err_put;
++		}
+ 
+ 		if (data->hsic_pad_regulator) {
+ 			ret = regulator_enable(data->hsic_pad_regulator);
+ 			if (ret) {
+ 				dev_err(dev,
+ 					"Failed to enable HSIC pad regulator\n");
+-				return ret;
++				goto err_put;
+ 			}
+ 		}
+ 	}
+@@ -402,13 +406,14 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 			dev_err(dev,
+ 				"pinctrl_hsic_idle lookup failed, err=%ld\n",
+ 					PTR_ERR(pinctrl_hsic_idle));
+-			return PTR_ERR(pinctrl_hsic_idle);
++			ret = PTR_ERR(pinctrl_hsic_idle);
++			goto err_put;
+ 		}
+ 
+ 		ret = pinctrl_select_state(data->pinctrl, pinctrl_hsic_idle);
+ 		if (ret) {
+ 			dev_err(dev, "hsic_idle select failed, err=%d\n", ret);
+-			return ret;
++			goto err_put;
+ 		}
+ 
+ 		data->pinctrl_hsic_active = pinctrl_lookup_state(data->pinctrl,
+@@ -417,7 +422,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 			dev_err(dev,
+ 				"pinctrl_hsic_active lookup failed, err=%ld\n",
+ 					PTR_ERR(data->pinctrl_hsic_active));
+-			return PTR_ERR(data->pinctrl_hsic_active);
++			ret = PTR_ERR(data->pinctrl_hsic_active);
++			goto err_put;
+ 		}
+ 	}
+ 
+@@ -527,6 +533,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 	if (pdata.flags & CI_HDRC_PMQOS)
+ 		cpu_latency_qos_remove_request(&data->pm_qos_req);
+ 	data->ci_pdev = NULL;
++err_put:
++	put_device(data->usbmisc_data->dev);
+ 	return ret;
+ }
+ 
+@@ -551,6 +559,7 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
+ 		if (data->hsic_pad_regulator)
+ 			regulator_disable(data->hsic_pad_regulator);
+ 	}
++	put_device(data->usbmisc_data->dev);
+ }
+ 
+ static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
+diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
+index 5a2e43331064eb..ff1a941fd2ede4 100644
+--- a/drivers/usb/class/usblp.c
++++ b/drivers/usb/class/usblp.c
+@@ -1337,11 +1337,12 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
+ 	if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
+ 		return -EINVAL;
+ 
++	alts = usblp->protocol[protocol].alt_setting;
++	if (alts < 0)
++		return -EINVAL;
++
+ 	/* Don't unnecessarily set the interface if there's a single alt. */
+ 	if (usblp->intf->num_altsetting > 1) {
+-		alts = usblp->protocol[protocol].alt_setting;
+-		if (alts < 0)
+-			return -EINVAL;
+ 		r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+ 		if (r < 0) {
+ 			printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 4b93c0bd1d4bcc..21ac9b464696f5 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2663,13 +2663,13 @@ int usb_new_device(struct usb_device *udev)
+ 		err = sysfs_create_link(&udev->dev.kobj,
+ 				&port_dev->dev.kobj, "port");
+ 		if (err)
+-			goto fail;
++			goto out_del_dev;
+ 
+ 		err = sysfs_create_link(&port_dev->dev.kobj,
+ 				&udev->dev.kobj, "device");
+ 		if (err) {
+ 			sysfs_remove_link(&udev->dev.kobj, "port");
+-			goto fail;
++			goto out_del_dev;
+ 		}
+ 
+ 		if (!test_and_set_bit(port1, hub->child_usage_bits))
+@@ -2683,6 +2683,8 @@ int usb_new_device(struct usb_device *udev)
+ 	pm_runtime_put_sync_autosuspend(&udev->dev);
+ 	return err;
+ 
++out_del_dev:
++	device_del(&udev->dev);
+ fail:
+ 	usb_set_device_state(udev, USB_STATE_NOTATTACHED);
+ 	pm_runtime_disable(&udev->dev);
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index e7da2fca11a48c..c92fb648a1c4c0 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -452,10 +452,11 @@ static int usb_port_runtime_suspend(struct device *dev)
+ static void usb_port_shutdown(struct device *dev)
+ {
+ 	struct usb_port *port_dev = to_usb_port(dev);
++	struct usb_device *udev = port_dev->child;
+ 
+-	if (port_dev->child) {
+-		usb_disable_usb2_hardware_lpm(port_dev->child);
+-		usb_unlocked_disable_lpm(port_dev->child);
++	if (udev && !udev->port_is_suspended) {
++		usb_disable_usb2_hardware_lpm(udev);
++		usb_unlocked_disable_lpm(udev);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 0b9ba338b2654c..0e91a227507fff 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -464,6 +464,7 @@
+ #define DWC3_DCTL_TRGTULST_SS_INACT	(DWC3_DCTL_TRGTULST(6))
+ 
+ /* These apply for core versions 1.94a and later */
++#define DWC3_DCTL_NYET_THRES_MASK	(0xf << 20)
+ #define DWC3_DCTL_NYET_THRES(n)		(((n) & 0xf) << 20)
+ 
+ #define DWC3_DCTL_KEEP_CONNECT		BIT(19)
+diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
+index fad151e78fd669..538185a4d1b4fb 100644
+--- a/drivers/usb/dwc3/dwc3-am62.c
++++ b/drivers/usb/dwc3/dwc3-am62.c
+@@ -309,6 +309,7 @@ static void dwc3_ti_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_put_sync(dev);
+ 	pm_runtime_disable(dev);
++	pm_runtime_dont_use_autosuspend(dev);
+ 	pm_runtime_set_suspended(dev);
+ }
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 56744b11e67cb9..a5d75d7d0a8707 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -4208,8 +4208,10 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+ 		WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
+ 				"LPM Erratum not available on dwc3 revisions < 2.40a\n");
+ 
+-		if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
++		if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A)) {
++			reg &= ~DWC3_DCTL_NYET_THRES_MASK;
+ 			reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
++		}
+ 
+ 		dwc3_gadget_dctl_write_safe(dwc, reg);
+ 	} else {
+diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
+index 566ff0b1282a82..76521555e3c14c 100644
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -211,6 +211,8 @@ config USB_F_MIDI
+ 
+ config USB_F_MIDI2
+ 	tristate
++	select SND_UMP
++	select SND_UMP_LEGACY_RAWMIDI
+ 
+ config USB_F_HID
+ 	tristate
+@@ -445,8 +447,6 @@ config USB_CONFIGFS_F_MIDI2
+ 	depends on USB_CONFIGFS
+ 	depends on SND
+ 	select USB_LIBCOMPOSITE
+-	select SND_UMP
+-	select SND_UMP_LEGACY_RAWMIDI
+ 	select USB_F_MIDI2
+ 	help
+ 	  The MIDI 2.0 function driver provides the generic emulated
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index c82a6a0fba93dd..29390d573e2346 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -827,11 +827,15 @@ static ssize_t gadget_string_s_store(struct config_item *item, const char *page,
+ {
+ 	struct gadget_string *string = to_gadget_string(item);
+ 	int size = min(sizeof(string->string), len + 1);
++	ssize_t cpy_len;
+ 
+ 	if (len > USB_MAX_STRING_LEN)
+ 		return -EINVAL;
+ 
+-	return strscpy(string->string, page, size);
++	cpy_len = strscpy(string->string, page, size);
++	if (cpy_len > 0 && string->string[cpy_len - 1] == '\n')
++		string->string[cpy_len - 1] = 0;
++	return len;
+ }
+ CONFIGFS_ATTR(gadget_string_, s);
+ 
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 2920f8000bbd83..92c883440e02cd 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -2285,7 +2285,7 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+ 	struct usb_gadget_strings **lang;
+ 	int first_id;
+ 
+-	if (WARN_ON(ffs->state != FFS_ACTIVE
++	if ((ffs->state != FFS_ACTIVE
+ 		 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
+ 		return -EBADFD;
+ 
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index ce5b77f8919026..9b324821c93bd0 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -1185,6 +1185,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ 		uac2->as_in_alt = 0;
+ 	}
+ 
++	std_ac_if_desc.bNumEndpoints = 0;
+ 	if (FUOUT_EN(uac2_opts) || FUIN_EN(uac2_opts)) {
+ 		uac2->int_ep = usb_ep_autoconfig(gadget, &fs_ep_int_desc);
+ 		if (!uac2->int_ep) {
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 53d9fc41acc522..bc143a86c2ddf0 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -1420,6 +1420,10 @@ void gserial_disconnect(struct gserial *gser)
+ 	/* REVISIT as above: how best to track this? */
+ 	port->port_line_coding = gser->port_line_coding;
+ 
++	/* disable endpoints, aborting down any active I/O */
++	usb_ep_disable(gser->out);
++	usb_ep_disable(gser->in);
++
+ 	port->port_usb = NULL;
+ 	gser->ioport = NULL;
+ 	if (port->port.count > 0) {
+@@ -1431,10 +1435,6 @@ void gserial_disconnect(struct gserial *gser)
+ 	spin_unlock(&port->port_lock);
+ 	spin_unlock_irqrestore(&serial_port_lock, flags);
+ 
+-	/* disable endpoints, aborting down any active I/O */
+-	usb_ep_disable(gser->out);
+-	usb_ep_disable(gser->in);
+-
+ 	/* finally, free any unused/unusable I/O buffers */
+ 	spin_lock_irqsave(&port->port_lock, flags);
+ 	if (port->port.count == 0)
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index ecaa75718e5926..e6660472501e4d 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -290,7 +290,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ 
+ 	hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
+ 
+-	if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
++	if ((priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) ||
++	    (xhci->quirks & XHCI_SKIP_PHY_INIT))
+ 		hcd->skip_phy_initialization = 1;
+ 
+ 	if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index c24101f0a07ad1..9960ac2b10b719 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -223,6 +223,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
++	{ USB_DEVICE(0x1B93, 0x1013) }, /* Phoenix Contact UPS Device */
+ 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+ 	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ 	{ USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 64317b390d2285..1e2ae0c6c41c79 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -621,7 +621,7 @@ static void option_instat_callback(struct urb *urb);
+ 
+ /* MeiG Smart Technology products */
+ #define MEIGSMART_VENDOR_ID			0x2dee
+-/* MeiG Smart SRM825L based on Qualcomm 315 */
++/* MeiG Smart SRM815/SRM825L based on Qualcomm 315 */
+ #define MEIGSMART_PRODUCT_SRM825L		0x4d22
+ /* MeiG Smart SLM320 based on UNISOC UIS8910 */
+ #define MEIGSMART_PRODUCT_SLM320		0x4d41
+@@ -2405,6 +2405,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
+@@ -2412,6 +2413,7 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff),			/* TCL IK512 ECM */
+ 	  .driver_info = NCTRL(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2949, 0x8700, 0xff) },			/* Neoway N723-EA */
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index e5ad23d86833d5..54f0b1c83317cd 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -255,6 +255,13 @@ UNUSUAL_DEV(  0x0421, 0x06aa, 0x1110, 0x1110,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_MAX_SECTORS_64 ),
+ 
++/* Added by Lubomir Rintel <lkundrak@v3.sk>, a very fine chap */
++UNUSUAL_DEV(  0x0421, 0x06c2, 0x0000, 0x0406,
++		"Nokia",
++		"Nokia 208",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_MAX_SECTORS_64 ),
++
+ #ifdef NO_SDDR09
+ UNUSUAL_DEV(  0x0436, 0x0005, 0x0100, 0x0100,
+ 		"Microtech",
+diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
+index 22163d8f9eb07e..0cdda06592fd3c 100644
+--- a/drivers/usb/typec/tcpm/maxim_contaminant.c
++++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
+@@ -135,7 +135,7 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
+ 
+ 		mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
+ 		if (mv < 0)
+-			return ret;
++			return mv;
+ 
+ 		/* OVP enable */
+ 		ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, 0);
+@@ -157,7 +157,7 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
+ 
+ 	mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
+ 	if (mv < 0)
+-		return ret;
++		return mv;
+ 	/* Disable current source */
+ 	ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, 0);
+ 	if (ret < 0)
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index ed32583829bec2..24a6a4354df8ba 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -700,7 +700,7 @@ static int tcpci_init(struct tcpc_dev *tcpc)
+ 
+ 	tcpci->alert_mask = reg;
+ 
+-	return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
++	return 0;
+ }
+ 
+ irqreturn_t tcpci_irq(struct tcpci *tcpci)
+@@ -923,22 +923,27 @@ static int tcpci_probe(struct i2c_client *client)
+ 
+ 	chip->data.set_orientation = err;
+ 
++	chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
++	if (IS_ERR(chip->tcpci))
++		return PTR_ERR(chip->tcpci);
++
+ 	err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ 					_tcpci_irq,
+ 					IRQF_SHARED | IRQF_ONESHOT,
+ 					dev_name(&client->dev), chip);
+ 	if (err < 0)
+-		return err;
++		goto unregister_port;
+ 
+-	/*
+-	 * Disable irq while registering port. If irq is configured as an edge
+-	 * irq this allow to keep track and process the irq as soon as it is enabled.
+-	 */
+-	disable_irq(client->irq);
+-	chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
+-	enable_irq(client->irq);
++	/* Enable chip interrupts at last */
++	err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, chip->tcpci->alert_mask);
++	if (err < 0)
++		goto unregister_port;
+ 
+-	return PTR_ERR_OR_ZERO(chip->tcpci);
++	return 0;
++
++unregister_port:
++	tcpci_unregister_port(chip->tcpci);
++	return err;
+ }
+ 
+ static void tcpci_remove(struct i2c_client *client)
+diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
+index fcb8e61136cfd7..740171f24ef9fa 100644
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
+@@ -646,7 +646,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+ 			UCSI_CMD_CONNECTOR_MASK;
+ 		if (con_index == 0) {
+ 			ret = -EINVAL;
+-			goto unlock;
++			goto err_put;
+ 		}
+ 		con = &uc->ucsi->connector[con_index - 1];
+ 		ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
+@@ -654,8 +654,8 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+ 
+ 	ret = ucsi_sync_control_common(ucsi, command);
+ 
++err_put:
+ 	pm_runtime_put_sync(uc->dev);
+-unlock:
+ 	mutex_unlock(&uc->lock);
+ 
+ 	return ret;
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index 1ab58da9f38a6e..1a4ed5a357d360 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -1661,14 +1661,15 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
+ 	unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
+ 	vm_fault_t ret = VM_FAULT_SIGBUS;
+ 
+-	if (order && (vmf->address & ((PAGE_SIZE << order) - 1) ||
++	pfn = vma_to_pfn(vma) + pgoff;
++
++	if (order && (pfn & ((1 << order) - 1) ||
++		      vmf->address & ((PAGE_SIZE << order) - 1) ||
+ 		      vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
+ 		ret = VM_FAULT_FALLBACK;
+ 		goto out;
+ 	}
+ 
+-	pfn = vma_to_pfn(vma);
+-
+ 	down_read(&vdev->memory_lock);
+ 
+ 	if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
+@@ -1676,18 +1677,18 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
+ 
+ 	switch (order) {
+ 	case 0:
+-		ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
++		ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ 		break;
+ #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ 	case PMD_ORDER:
+-		ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff,
+-							     PFN_DEV), false);
++		ret = vmf_insert_pfn_pmd(vmf,
++					 __pfn_to_pfn_t(pfn, PFN_DEV), false);
+ 		break;
+ #endif
+ #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+ 	case PUD_ORDER:
+-		ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff,
+-							     PFN_DEV), false);
++		ret = vmf_insert_pfn_pud(vmf,
++					 __pfn_to_pfn_t(pfn, PFN_DEV), false);
+ 		break;
+ #endif
+ 	default:
+diff --git a/fs/afs/afs.h b/fs/afs/afs.h
+index b488072aee87ae..ec3db00bd0813c 100644
+--- a/fs/afs/afs.h
++++ b/fs/afs/afs.h
+@@ -10,7 +10,7 @@
+ 
+ #include <linux/in.h>
+ 
+-#define AFS_MAXCELLNAME		256  	/* Maximum length of a cell name */
++#define AFS_MAXCELLNAME		253  	/* Maximum length of a cell name (DNS limited) */
+ #define AFS_MAXVOLNAME		64  	/* Maximum length of a volume name */
+ #define AFS_MAXNSERVERS		8   	/* Maximum servers in a basic volume record */
+ #define AFS_NMAXNSERVERS	13  	/* Maximum servers in a N/U-class volume record */
+diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
+index a06296c8827d42..b835e25a2c02d3 100644
+--- a/fs/afs/afs_vl.h
++++ b/fs/afs/afs_vl.h
+@@ -13,6 +13,7 @@
+ #define AFS_VL_PORT		7003	/* volume location service port */
+ #define VL_SERVICE		52	/* RxRPC service ID for the Volume Location service */
+ #define YFS_VL_SERVICE		2503	/* Service ID for AuriStor upgraded VL service */
++#define YFS_VL_MAXCELLNAME	256  	/* Maximum length of a cell name in YFS protocol */
+ 
+ enum AFSVL_Operations {
+ 	VLGETENTRYBYID		= 503,	/* AFS Get VLDB entry by ID */
+diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
+index 9f36e14f1c2d24..f9e76b604f31b9 100644
+--- a/fs/afs/vl_alias.c
++++ b/fs/afs/vl_alias.c
+@@ -253,6 +253,7 @@ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
+ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
+ {
+ 	struct afs_cell *master;
++	size_t name_len;
+ 	char *cell_name;
+ 
+ 	cell_name = afs_vl_get_cell_name(cell, key);
+@@ -264,8 +265,11 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
+ 		return 0;
+ 	}
+ 
+-	master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name),
+-				 NULL, false);
++	name_len = strlen(cell_name);
++	if (!name_len || name_len > AFS_MAXCELLNAME)
++		master = ERR_PTR(-EOPNOTSUPP);
++	else
++		master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false);
+ 	kfree(cell_name);
+ 	if (IS_ERR(master))
+ 		return PTR_ERR(master);
+diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
+index cac75f89b64ad1..55dd0fc5aad7bf 100644
+--- a/fs/afs/vlclient.c
++++ b/fs/afs/vlclient.c
+@@ -697,7 +697,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
+ 			return ret;
+ 
+ 		namesz = ntohl(call->tmp);
+-		if (namesz > AFS_MAXCELLNAME)
++		if (namesz > YFS_VL_MAXCELLNAME)
+ 			return afs_protocol_error(call, afs_eproto_cellname_len);
+ 		paddedsz = (namesz + 3) & ~3;
+ 		call->count = namesz;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 872cca54cc6ce4..42c9899d9241c9 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -786,7 +786,7 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
+ 		}
+ 
+ 		if (bio_ctrl->wbc)
+-			wbc_account_cgroup_owner(bio_ctrl->wbc, &folio->page,
++			wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
+ 						 len);
+ 
+ 		size -= len;
+@@ -1708,7 +1708,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
+ 		ret = bio_add_folio(&bbio->bio, folio, eb->len,
+ 				    eb->start - folio_pos(folio));
+ 		ASSERT(ret);
+-		wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
++		wbc_account_cgroup_owner(wbc, folio, eb->len);
+ 		folio_unlock(folio);
+ 	} else {
+ 		int num_folios = num_extent_folios(eb);
+@@ -1722,8 +1722,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
+ 			folio_start_writeback(folio);
+ 			ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
+ 			ASSERT(ret);
+-			wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
+-						 eb->folio_size);
++			wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
+ 			wbc->nr_to_write -= folio_nr_pages(folio);
+ 			folio_unlock(folio);
+ 		}
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index b5cfb85af937fc..a3c861b2a6d25d 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1729,7 +1729,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
+ 			 * need full accuracy.  Just account the whole thing
+ 			 * against the first page.
+ 			 */
+-			wbc_account_cgroup_owner(wbc, &locked_folio->page,
++			wbc_account_cgroup_owner(wbc, locked_folio,
+ 						 cur_end - start);
+ 			async_chunk[i].locked_folio = locked_folio;
+ 			locked_folio = NULL;
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 3a34274280746c..c73a41b1ad5607 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1541,6 +1541,10 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ 	u64 extent_gen;
+ 	int ret;
+ 
++	if (unlikely(!extent_root)) {
++		btrfs_err(fs_info, "no valid extent root for scrub");
++		return -EUCLEAN;
++	}
+ 	memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
+ 				   stripe->nr_sectors);
+ 	scrub_stripe_reset_bitmaps(stripe);
+diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
+index 100abc00b794ca..03958d1a53b0eb 100644
+--- a/fs/btrfs/zlib.c
++++ b/fs/btrfs/zlib.c
+@@ -174,10 +174,10 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
+ 					copy_page(workspace->buf + i * PAGE_SIZE,
+ 						  data_in);
+ 					start += PAGE_SIZE;
+-					workspace->strm.avail_in =
+-						(in_buf_folios << PAGE_SHIFT);
+ 				}
+ 				workspace->strm.next_in = workspace->buf;
++				workspace->strm.avail_in = min(bytes_left,
++							       in_buf_folios << PAGE_SHIFT);
+ 			} else {
+ 				unsigned int pg_off;
+ 				unsigned int cur_len;
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 1fc9a50def0b51..32bd0f4c422360 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2803,7 +2803,7 @@ static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
+ 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ 	bio->bi_write_hint = write_hint;
+ 
+-	__bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
++	bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
+ 
+ 	bio->bi_end_io = end_bio_bh_io_sync;
+ 	bio->bi_private = bh;
+@@ -2813,7 +2813,7 @@ static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
+ 
+ 	if (wbc) {
+ 		wbc_init_bio(wbc, bio);
+-		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
++		wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
+ 	}
+ 
+ 	submit_bio(bio);
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 7446bf09a04a8f..9d8848872fe8ac 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -125,7 +125,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+ 			type = exfat_get_entry_type(ep);
+ 			if (type == TYPE_UNUSED) {
+ 				brelse(bh);
+-				break;
++				goto out;
+ 			}
+ 
+ 			if (type != TYPE_FILE && type != TYPE_DIR) {
+@@ -189,6 +189,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+ 		}
+ 	}
+ 
++out:
+ 	dir_entry->namebuf.lfn[0] = '\0';
+ 	*cpos = EXFAT_DEN_TO_B(dentry);
+ 	return 0;
+diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
+index 773c320d68f3f2..9e5492ac409b07 100644
+--- a/fs/exfat/fatent.c
++++ b/fs/exfat/fatent.c
+@@ -216,6 +216,16 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
+ 
+ 			if (err)
+ 				goto dec_used_clus;
++
++			if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) {
++				/*
++				 * The cluster chain includes a loop, scan the
++				 * bitmap to get the number of used clusters.
++				 */
++				exfat_count_used_clusters(sb, &sbi->used_clusters);
++
++				return 0;
++			}
+ 		} while (clu != EXFAT_EOF_CLUSTER);
+ 	}
+ 
+diff --git a/fs/exfat/file.c b/fs/exfat/file.c
+index fb38769c3e39d1..05b51e7217838f 100644
+--- a/fs/exfat/file.c
++++ b/fs/exfat/file.c
+@@ -545,6 +545,7 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
+ 	while (pos < new_valid_size) {
+ 		u32 len;
+ 		struct folio *folio;
++		unsigned long off;
+ 
+ 		len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
+ 		if (pos + len > new_valid_size)
+@@ -554,6 +555,9 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
+ 		if (err)
+ 			goto out;
+ 
++		off = offset_in_folio(folio, pos);
++		folio_zero_new_buffers(folio, off, off + len);
++
+ 		err = ops->write_end(file, mapping, pos, len, len, folio, NULL);
+ 		if (err < 0)
+ 			goto out;
+@@ -563,6 +567,8 @@ static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
+ 		cond_resched();
+ 	}
+ 
++	return 0;
++
+ out:
+ 	return err;
+ }
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index ad5543866d2152..b7b9261fec3b50 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -421,7 +421,7 @@ static void io_submit_add_bh(struct ext4_io_submit *io,
+ 		io_submit_init_bio(io, bh);
+ 	if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
+ 		goto submit_and_retry;
+-	wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
++	wbc_account_cgroup_owner(io->io_wbc, folio, bh->b_size);
+ 	io->io_next_block++;
+ }
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index da0960d496ae09..1b0050b8421d88 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -711,7 +711,8 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
+ 	}
+ 
+ 	if (fio->io_wbc && !is_read_io(fio->op))
+-		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
++					 PAGE_SIZE);
+ 
+ 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
+ 			__read_io_type(page) : WB_DATA_TYPE(fio->page, false));
+@@ -911,7 +912,8 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
+ 	}
+ 
+ 	if (fio->io_wbc)
+-		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
++					 PAGE_SIZE);
+ 
+ 	inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
+ 
+@@ -1011,7 +1013,8 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ 	}
+ 
+ 	if (fio->io_wbc)
+-		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
++					 PAGE_SIZE);
+ 
+ 	io->last_block_in_bio = fio->new_blkaddr;
+ 
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index d8bec3c1bb1fa7..2391b09f4cedec 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -890,17 +890,16 @@ EXPORT_SYMBOL_GPL(wbc_detach_inode);
+ /**
+  * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
+  * @wbc: writeback_control of the writeback in progress
+- * @page: page being written out
++ * @folio: folio being written out
+  * @bytes: number of bytes being written out
+  *
+- * @bytes from @page are about to written out during the writeback
++ * @bytes from @folio are about to written out during the writeback
+  * controlled by @wbc.  Keep the book for foreign inode detection.  See
+  * wbc_detach_inode().
+  */
+-void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
++void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
+ 			      size_t bytes)
+ {
+-	struct folio *folio;
+ 	struct cgroup_subsys_state *css;
+ 	int id;
+ 
+@@ -913,7 +912,6 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
+ 	if (!wbc->wb || wbc->no_cgroup_owner)
+ 		return;
+ 
+-	folio = page_folio(page);
+ 	css = mem_cgroup_css_from_folio(folio);
+ 	/* dead cgroups shouldn't contribute to inode ownership arbitration */
+ 	if (!(css->flags & CSS_ONLINE))
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 54104dd48af7c9..2e62e62c07f836 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1680,6 +1680,8 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
+ 		 */
+ 		if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
+ 			nonseekable_open(inode, file);
++		if (!(ff->open_flags & FOPEN_KEEP_CACHE))
++			invalidate_inode_pages2(inode->i_mapping);
+ 	}
+ 
+ 	return err;
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index ef0b68bccbb612..25d1ede6bb0eb0 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -1764,7 +1764,8 @@ static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
+  */
+ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
+ 		struct writeback_control *wbc, struct folio *folio,
+-		struct inode *inode, loff_t pos, unsigned len)
++		struct inode *inode, loff_t pos, loff_t end_pos,
++		unsigned len)
+ {
+ 	struct iomap_folio_state *ifs = folio->private;
+ 	size_t poff = offset_in_folio(folio, pos);
+@@ -1783,15 +1784,60 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
+ 
+ 	if (ifs)
+ 		atomic_add(len, &ifs->write_bytes_pending);
++
++	/*
++	 * Clamp io_offset and io_size to the incore EOF so that ondisk
++	 * file size updates in the ioend completion are byte-accurate.
++	 * This avoids recovering files with zeroed tail regions when
++	 * writeback races with appending writes:
++	 *
++	 *    Thread 1:                  Thread 2:
++	 *    ------------               -----------
++	 *    write [A, A+B]
++	 *    update inode size to A+B
++	 *    submit I/O [A, A+BS]
++	 *                               write [A+B, A+B+C]
++	 *                               update inode size to A+B+C
++	 *    <I/O completes, updates disk size to min(A+B+C, A+BS)>
++	 *    <power failure>
++	 *
++	 *  After reboot:
++	 *    1) with A+B+C < A+BS, the file has zero padding in range
++	 *       [A+B, A+B+C]
++	 *
++	 *    |<     Block Size (BS)   >|
++	 *    |DDDDDDDDDDDD0000000000000|
++	 *    ^           ^        ^
++	 *    A          A+B     A+B+C
++	 *                       (EOF)
++	 *
++	 *    2) with A+B+C > A+BS, the file has zero padding in range
++	 *       [A+B, A+BS]
++	 *
++	 *    |<     Block Size (BS)   >|<     Block Size (BS)    >|
++	 *    |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
++	 *    ^           ^             ^           ^
++	 *    A          A+B           A+BS       A+B+C
++	 *                             (EOF)
++	 *
++	 *    D = Valid Data
++	 *    0 = Zero Padding
++	 *
++	 * Note that this defeats the ability to chain the ioends of
++	 * appending writes.
++	 */
+ 	wpc->ioend->io_size += len;
+-	wbc_account_cgroup_owner(wbc, &folio->page, len);
++	if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos)
++		wpc->ioend->io_size = end_pos - wpc->ioend->io_offset;
++
++	wbc_account_cgroup_owner(wbc, folio, len);
+ 	return 0;
+ }
+ 
+ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
+ 		struct writeback_control *wbc, struct folio *folio,
+-		struct inode *inode, u64 pos, unsigned dirty_len,
+-		unsigned *count)
++		struct inode *inode, u64 pos, u64 end_pos,
++		unsigned dirty_len, unsigned *count)
+ {
+ 	int error;
+ 
+@@ -1816,7 +1862,7 @@ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
+ 			break;
+ 		default:
+ 			error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
+-					map_len);
++					end_pos, map_len);
+ 			if (!error)
+ 				(*count)++;
+ 			break;
+@@ -1887,11 +1933,11 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
+ 		 *    remaining memory is zeroed when mapped, and writes to that
+ 		 *    region are not written out to the file.
+ 		 *
+-		 * Also adjust the writeback range to skip all blocks entirely
+-		 * beyond i_size.
++		 * Also adjust the end_pos to the end of file and skip writeback
++		 * for all blocks entirely beyond i_size.
+ 		 */
+ 		folio_zero_segment(folio, poff, folio_size(folio));
+-		*end_pos = round_up(isize, i_blocksize(inode));
++		*end_pos = isize;
+ 	}
+ 
+ 	return true;
+@@ -1904,6 +1950,7 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ 	struct inode *inode = folio->mapping->host;
+ 	u64 pos = folio_pos(folio);
+ 	u64 end_pos = pos + folio_size(folio);
++	u64 end_aligned = 0;
+ 	unsigned count = 0;
+ 	int error = 0;
+ 	u32 rlen;
+@@ -1945,9 +1992,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ 	/*
+ 	 * Walk through the folio to find dirty areas to write back.
+ 	 */
+-	while ((rlen = iomap_find_dirty_range(folio, &pos, end_pos))) {
++	end_aligned = round_up(end_pos, i_blocksize(inode));
++	while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
+ 		error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
+-				pos, rlen, &count);
++				pos, end_pos, rlen, &count);
+ 		if (error)
+ 			break;
+ 		pos += rlen;
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 4305a1ac808a60..f95cf272a1b500 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -776,9 +776,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	/*
+ 	 * If the journal is not located on the file system device,
+ 	 * then we must flush the file system device before we issue
+-	 * the commit record
++	 * the commit record and update the journal tail sequence.
+ 	 */
+-	if (commit_transaction->t_need_data_flush &&
++	if ((commit_transaction->t_need_data_flush || update_tail) &&
+ 	    (journal->j_fs_dev != journal->j_dev) &&
+ 	    (journal->j_flags & JBD2_BARRIER))
+ 		blkdev_issue_flush(journal->j_fs_dev);
+diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
+index 4556e468902449..ce63d5fde9c3a8 100644
+--- a/fs/jbd2/revoke.c
++++ b/fs/jbd2/revoke.c
+@@ -654,7 +654,7 @@ static void flush_descriptor(journal_t *journal,
+ 	set_buffer_jwrite(descriptor);
+ 	BUFFER_TRACE(descriptor, "write");
+ 	set_buffer_dirty(descriptor);
+-	write_dirty_buffer(descriptor, REQ_SYNC);
++	write_dirty_buffer(descriptor, JBD2_JOURNAL_REQ_FLAGS);
+ }
+ #endif
+ 
+diff --git a/fs/mount.h b/fs/mount.h
+index 185fc56afc1333..179f690a0c7223 100644
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -38,6 +38,7 @@ struct mount {
+ 	struct dentry *mnt_mountpoint;
+ 	struct vfsmount mnt;
+ 	union {
++		struct rb_node mnt_node; /* node in the ns->mounts rbtree */
+ 		struct rcu_head mnt_rcu;
+ 		struct llist_node mnt_llist;
+ 	};
+@@ -51,10 +52,7 @@ struct mount {
+ 	struct list_head mnt_child;	/* and going through their mnt_child */
+ 	struct list_head mnt_instance;	/* mount instance on sb->s_mounts */
+ 	const char *mnt_devname;	/* Name of device e.g. /dev/dsk/hda1 */
+-	union {
+-		struct rb_node mnt_node;	/* Under ns->mounts */
+-		struct list_head mnt_list;
+-	};
++	struct list_head mnt_list;
+ 	struct list_head mnt_expire;	/* link in fs-specific expiry list */
+ 	struct list_head mnt_share;	/* circular list of shared mounts */
+ 	struct list_head mnt_slave_list;/* list of slave mounts */
+@@ -145,11 +143,16 @@ static inline bool is_anon_ns(struct mnt_namespace *ns)
+ 	return ns->seq == 0;
+ }
+ 
++static inline bool mnt_ns_attached(const struct mount *mnt)
++{
++	return !RB_EMPTY_NODE(&mnt->mnt_node);
++}
++
+ static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
+ {
+-	WARN_ON(!(mnt->mnt.mnt_flags & MNT_ONRB));
+-	mnt->mnt.mnt_flags &= ~MNT_ONRB;
++	WARN_ON(!mnt_ns_attached(mnt));
+ 	rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts);
++	RB_CLEAR_NODE(&mnt->mnt_node);
+ 	list_add_tail(&mnt->mnt_list, dt_list);
+ }
+ 
+diff --git a/fs/mpage.c b/fs/mpage.c
+index b5b5ddf9d513d4..82aecf37274379 100644
+--- a/fs/mpage.c
++++ b/fs/mpage.c
+@@ -606,7 +606,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
+ 	 * the confused fail path above (OOM) will be very confused when
+ 	 * it finds all bh marked clean (i.e. it will not write anything)
+ 	 */
+-	wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio));
++	wbc_account_cgroup_owner(wbc, folio, folio_size(folio));
+ 	length = first_unmapped << blkbits;
+ 	if (!bio_add_folio(bio, folio, length, 0)) {
+ 		bio = mpage_bio_submit_write(bio);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index d26f5e6d2ca35f..5ea644b679add5 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -344,6 +344,7 @@ static struct mount *alloc_vfsmnt(const char *name)
+ 		INIT_HLIST_NODE(&mnt->mnt_mp_list);
+ 		INIT_LIST_HEAD(&mnt->mnt_umounting);
+ 		INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
++		RB_CLEAR_NODE(&mnt->mnt_node);
+ 		mnt->mnt.mnt_idmap = &nop_mnt_idmap;
+ 	}
+ 	return mnt;
+@@ -1124,7 +1125,7 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
+ 	struct rb_node **link = &ns->mounts.rb_node;
+ 	struct rb_node *parent = NULL;
+ 
+-	WARN_ON(mnt->mnt.mnt_flags & MNT_ONRB);
++	WARN_ON(mnt_ns_attached(mnt));
+ 	mnt->mnt_ns = ns;
+ 	while (*link) {
+ 		parent = *link;
+@@ -1135,7 +1136,6 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
+ 	}
+ 	rb_link_node(&mnt->mnt_node, parent, link);
+ 	rb_insert_color(&mnt->mnt_node, &ns->mounts);
+-	mnt->mnt.mnt_flags |= MNT_ONRB;
+ }
+ 
+ /*
+@@ -1305,7 +1305,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
+ 	}
+ 
+ 	mnt->mnt.mnt_flags = old->mnt.mnt_flags;
+-	mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL|MNT_ONRB);
++	mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
+ 
+ 	atomic_inc(&sb->s_active);
+ 	mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
+@@ -1763,7 +1763,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
+ 	/* Gather the mounts to umount */
+ 	for (p = mnt; p; p = next_mnt(p, mnt)) {
+ 		p->mnt.mnt_flags |= MNT_UMOUNT;
+-		if (p->mnt.mnt_flags & MNT_ONRB)
++		if (mnt_ns_attached(p))
+ 			move_from_ns(p, &tmp_list);
+ 		else
+ 			list_move(&p->mnt_list, &tmp_list);
+@@ -1912,16 +1912,14 @@ static int do_umount(struct mount *mnt, int flags)
+ 
+ 	event++;
+ 	if (flags & MNT_DETACH) {
+-		if (mnt->mnt.mnt_flags & MNT_ONRB ||
+-		    !list_empty(&mnt->mnt_list))
++		if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list))
+ 			umount_tree(mnt, UMOUNT_PROPAGATE);
+ 		retval = 0;
+ 	} else {
+ 		shrink_submounts(mnt);
+ 		retval = -EBUSY;
+ 		if (!propagate_mount_busy(mnt, 2)) {
+-			if (mnt->mnt.mnt_flags & MNT_ONRB ||
+-			    !list_empty(&mnt->mnt_list))
++			if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list))
+ 				umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ 			retval = 0;
+ 		}
+@@ -2055,9 +2053,15 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
+ 
+ static bool is_mnt_ns_file(struct dentry *dentry)
+ {
++	struct ns_common *ns;
++
+ 	/* Is this a proxy for a mount namespace? */
+-	return dentry->d_op == &ns_dentry_operations &&
+-	       dentry->d_fsdata == &mntns_operations;
++	if (dentry->d_op != &ns_dentry_operations)
++		return false;
++
++	ns = d_inode(dentry)->i_private;
++
++	return ns->ops == &mntns_operations;
+ }
+ 
+ struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
+diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
+index af46a598f4d7c7..2dd2260352dbf8 100644
+--- a/fs/netfs/buffered_read.c
++++ b/fs/netfs/buffered_read.c
+@@ -275,22 +275,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
+ 			netfs_stat(&netfs_n_rh_download);
+ 			if (rreq->netfs_ops->prepare_read) {
+ 				ret = rreq->netfs_ops->prepare_read(subreq);
+-				if (ret < 0) {
+-					atomic_dec(&rreq->nr_outstanding);
+-					netfs_put_subrequest(subreq, false,
+-							     netfs_sreq_trace_put_cancel);
+-					break;
+-				}
++				if (ret < 0)
++					goto prep_failed;
+ 				trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ 			}
+ 
+ 			slice = netfs_prepare_read_iterator(subreq);
+-			if (slice < 0) {
+-				atomic_dec(&rreq->nr_outstanding);
+-				netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+-				ret = slice;
+-				break;
+-			}
++			if (slice < 0)
++				goto prep_iter_failed;
+ 
+ 			rreq->netfs_ops->issue_read(subreq);
+ 			goto done;
+@@ -302,6 +294,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
+ 			trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ 			netfs_stat(&netfs_n_rh_zero);
+ 			slice = netfs_prepare_read_iterator(subreq);
++			if (slice < 0)
++				goto prep_iter_failed;
+ 			__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ 			netfs_read_subreq_terminated(subreq, 0, false);
+ 			goto done;
+@@ -310,6 +304,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
+ 		if (source == NETFS_READ_FROM_CACHE) {
+ 			trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ 			slice = netfs_prepare_read_iterator(subreq);
++			if (slice < 0)
++				goto prep_iter_failed;
+ 			netfs_read_cache_to_pagecache(rreq, subreq);
+ 			goto done;
+ 		}
+@@ -318,6 +314,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
+ 		WARN_ON_ONCE(1);
+ 		break;
+ 
++	prep_iter_failed:
++		ret = slice;
++	prep_failed:
++		subreq->error = ret;
++		atomic_dec(&rreq->nr_outstanding);
++		netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
++		break;
++
+ 	done:
+ 		size -= slice;
+ 		start += slice;
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index 88f2adfab75e92..26cf9c94deebb3 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -67,7 +67,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
+ 		 * allocate a sufficiently large bvec array and may shorten the
+ 		 * request.
+ 		 */
+-		if (async || user_backed_iter(iter)) {
++		if (user_backed_iter(iter)) {
+ 			n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
+ 			if (n < 0) {
+ 				ret = n;
+@@ -77,6 +77,11 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
+ 			wreq->direct_bv_count = n;
+ 			wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
+ 		} else {
++			/* If this is a kernel-generated async DIO request,
++			 * assume that any resources the iterator points to
++			 * (eg. a bio_vec array) will persist till the end of
++			 * the op.
++			 */
+ 			wreq->iter = *iter;
+ 		}
+ 
+diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
+index 3cbb289535a85a..e70eb4ea21c038 100644
+--- a/fs/netfs/read_collect.c
++++ b/fs/netfs/read_collect.c
+@@ -62,10 +62,14 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
+ 		} else {
+ 			trace_netfs_folio(folio, netfs_folio_trace_read_done);
+ 		}
++
++		folioq_clear(folioq, slot);
+ 	} else {
+ 		// TODO: Use of PG_private_2 is deprecated.
+ 		if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+ 			netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot);
++		else
++			folioq_clear(folioq, slot);
+ 	}
+ 
+ 	if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+@@ -77,8 +81,6 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
+ 			folio_unlock(folio);
+ 		}
+ 	}
+-
+-	folioq_clear(folioq, slot);
+ }
+ 
+ /*
+@@ -378,8 +380,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq)
+ 	task_io_account_read(rreq->transferred);
+ 
+ 	trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
+-	clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+-	wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
++	clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+ 
+ 	trace_netfs_rreq(rreq, netfs_rreq_trace_done);
+ 	netfs_clear_subrequests(rreq, false);
+diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
+index ba5af89d37fae5..54d5004fec1826 100644
+--- a/fs/netfs/read_pgpriv2.c
++++ b/fs/netfs/read_pgpriv2.c
+@@ -170,6 +170,10 @@ void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
+ 
+ 	trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
+ 	netfs_stat(&netfs_n_wh_copy_to_cache);
++	if (!wreq->io_streams[1].avail) {
++		netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
++		goto couldnt_start;
++	}
+ 
+ 	for (;;) {
+ 		error = netfs_pgpriv2_copy_folio(wreq, folio);
+diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
+index 0350592ea8047d..48fb0303f7eee0 100644
+--- a/fs/netfs/read_retry.c
++++ b/fs/netfs/read_retry.c
+@@ -49,7 +49,7 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
+ 	 * up to the first permanently failed one.
+ 	 */
+ 	if (!rreq->netfs_ops->prepare_read &&
+-	    !test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) {
++	    !rreq->cache_resources.ops) {
+ 		struct netfs_io_subrequest *subreq;
+ 
+ 		list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+@@ -149,7 +149,8 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
+ 			BUG_ON(!len);
+ 
+ 			/* Renegotiate max_len (rsize) */
+-			if (rreq->netfs_ops->prepare_read(subreq) < 0) {
++			if (rreq->netfs_ops->prepare_read &&
++			    rreq->netfs_ops->prepare_read(subreq) < 0) {
+ 				trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
+ 				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ 			}
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 1d438be2e1b4b8..82290c92ba7a29 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -501,8 +501,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
+ 		goto need_retry;
+ 	if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
+ 		trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
+-		clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
+-		wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE);
++		clear_and_wake_up_bit(NETFS_RREQ_PAUSE, &wreq->flags);
+ 	}
+ 
+ 	if (notes & NEED_REASSESS) {
+@@ -605,8 +604,7 @@ void netfs_write_collection_worker(struct work_struct *work)
+ 
+ 	_debug("finished");
+ 	trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
+-	clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
+-	wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
++	clear_and_wake_up_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
+ 
+ 	if (wreq->iocb) {
+ 		size_t written = min(wreq->transferred, wreq->len);
+@@ -714,8 +712,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+ 
+ 	trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+ 
+-	clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+-	wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS);
++	clear_and_wake_up_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ 
+ 	/* If we are at the head of the queue, wake up the collector,
+ 	 * transferring a ref to it if we were the ones to do so.
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index 810269ee0a50e6..d49e4ce279994f 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -263,6 +263,12 @@ int nfs_netfs_readahead(struct readahead_control *ractl)
+ static atomic_t nfs_netfs_debug_id;
+ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
+ {
++	if (!file) {
++		if (WARN_ON_ONCE(rreq->origin != NETFS_PGPRIV2_COPY_TO_CACHE))
++			return -EIO;
++		return 0;
++	}
++
+ 	rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
+ 	rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
+ 	/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+@@ -274,7 +280,8 @@ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *fi
+ 
+ static void nfs_netfs_free_request(struct netfs_io_request *rreq)
+ {
+-	put_nfs_open_context(rreq->netfs_priv);
++	if (rreq->netfs_priv)
++		put_nfs_open_context(rreq->netfs_priv);
+ }
+ 
+ static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
+diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
+index dec553034027e0..e933f9c65d904a 100644
+--- a/fs/notify/fdinfo.c
++++ b/fs/notify/fdinfo.c
+@@ -47,10 +47,8 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
+ 	size = f->handle_bytes >> 2;
+ 
+ 	ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size);
+-	if ((ret == FILEID_INVALID) || (ret < 0)) {
+-		WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
++	if ((ret == FILEID_INVALID) || (ret < 0))
+ 		return;
+-	}
+ 
+ 	f->handle_type = ret;
+ 	f->handle_bytes = size * sizeof(u32);
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 2ed6ad641a2069..b2c78621da44a4 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -416,13 +416,13 @@ int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
+ 	return err;
+ }
+ 
+-struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
++struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode,
+ 				  bool is_upper)
+ {
+ 	struct ovl_fh *fh;
+ 	int fh_type, dwords;
+ 	int buflen = MAX_HANDLE_SZ;
+-	uuid_t *uuid = &real->d_sb->s_uuid;
++	uuid_t *uuid = &realinode->i_sb->s_uuid;
+ 	int err;
+ 
+ 	/* Make sure the real fid stays 32bit aligned */
+@@ -439,13 +439,13 @@ struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
+ 	 * the price or reconnecting the dentry.
+ 	 */
+ 	dwords = buflen >> 2;
+-	fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0);
++	fh_type = exportfs_encode_inode_fh(realinode, (void *)fh->fb.fid,
++					   &dwords, NULL, 0);
+ 	buflen = (dwords << 2);
+ 
+ 	err = -EIO;
+-	if (WARN_ON(fh_type < 0) ||
+-	    WARN_ON(buflen > MAX_HANDLE_SZ) ||
+-	    WARN_ON(fh_type == FILEID_INVALID))
++	if (fh_type < 0 || fh_type == FILEID_INVALID ||
++	    WARN_ON(buflen > MAX_HANDLE_SZ))
+ 		goto out_err;
+ 
+ 	fh->fb.version = OVL_FH_VERSION;
+@@ -481,7 +481,7 @@ struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
+ 	if (!ovl_can_decode_fh(origin->d_sb))
+ 		return NULL;
+ 
+-	return ovl_encode_real_fh(ofs, origin, false);
++	return ovl_encode_real_fh(ofs, d_inode(origin), false);
+ }
+ 
+ int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
+@@ -506,7 +506,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
+ 	const struct ovl_fh *fh;
+ 	int err;
+ 
+-	fh = ovl_encode_real_fh(ofs, upper, true);
++	fh = ovl_encode_real_fh(ofs, d_inode(upper), true);
+ 	if (IS_ERR(fh))
+ 		return PTR_ERR(fh);
+ 
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index 5868cb2229552f..444aeeccb6daf9 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -176,35 +176,37 @@ static int ovl_connect_layer(struct dentry *dentry)
+  *
+  * Return 0 for upper file handle, > 0 for lower file handle or < 0 on error.
+  */
+-static int ovl_check_encode_origin(struct dentry *dentry)
++static int ovl_check_encode_origin(struct inode *inode)
+ {
+-	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
++	struct ovl_fs *ofs = OVL_FS(inode->i_sb);
+ 	bool decodable = ofs->config.nfs_export;
++	struct dentry *dentry;
++	int err;
+ 
+ 	/* No upper layer? */
+ 	if (!ovl_upper_mnt(ofs))
+ 		return 1;
+ 
+ 	/* Lower file handle for non-upper non-decodable */
+-	if (!ovl_dentry_upper(dentry) && !decodable)
++	if (!ovl_inode_upper(inode) && !decodable)
+ 		return 1;
+ 
+ 	/* Upper file handle for pure upper */
+-	if (!ovl_dentry_lower(dentry))
++	if (!ovl_inode_lower(inode))
+ 		return 0;
+ 
+ 	/*
+ 	 * Root is never indexed, so if there's an upper layer, encode upper for
+ 	 * root.
+ 	 */
+-	if (dentry == dentry->d_sb->s_root)
++	if (inode == d_inode(inode->i_sb->s_root))
+ 		return 0;
+ 
+ 	/*
+ 	 * Upper decodable file handle for non-indexed upper.
+ 	 */
+-	if (ovl_dentry_upper(dentry) && decodable &&
+-	    !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
++	if (ovl_inode_upper(inode) && decodable &&
++	    !ovl_test_flag(OVL_INDEX, inode))
+ 		return 0;
+ 
+ 	/*
+@@ -213,14 +215,23 @@ static int ovl_check_encode_origin(struct dentry *dentry)
+ 	 * ovl_connect_layer() will try to make origin's layer "connected" by
+ 	 * copying up a "connectable" ancestor.
+ 	 */
+-	if (d_is_dir(dentry) && decodable)
+-		return ovl_connect_layer(dentry);
++	if (!decodable || !S_ISDIR(inode->i_mode))
++		return 1;
++
++	dentry = d_find_any_alias(inode);
++	if (!dentry)
++		return -ENOENT;
++
++	err = ovl_connect_layer(dentry);
++	dput(dentry);
++	if (err < 0)
++		return err;
+ 
+ 	/* Lower file handle for indexed and non-upper dir/non-dir */
+ 	return 1;
+ }
+ 
+-static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
++static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct inode *inode,
+ 			     u32 *fid, int buflen)
+ {
+ 	struct ovl_fh *fh = NULL;
+@@ -231,13 +242,13 @@ static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
+ 	 * Check if we should encode a lower or upper file handle and maybe
+ 	 * copy up an ancestor to make lower file handle connectable.
+ 	 */
+-	err = enc_lower = ovl_check_encode_origin(dentry);
++	err = enc_lower = ovl_check_encode_origin(inode);
+ 	if (enc_lower < 0)
+ 		goto fail;
+ 
+ 	/* Encode an upper or lower file handle */
+-	fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_dentry_lower(dentry) :
+-				ovl_dentry_upper(dentry), !enc_lower);
++	fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_inode_lower(inode) :
++				ovl_inode_upper(inode), !enc_lower);
+ 	if (IS_ERR(fh))
+ 		return PTR_ERR(fh);
+ 
+@@ -251,8 +262,8 @@ static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry,
+ 	return err;
+ 
+ fail:
+-	pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n",
+-			    dentry, err);
++	pr_warn_ratelimited("failed to encode file handle (ino=%lu, err=%i)\n",
++			    inode->i_ino, err);
+ 	goto out;
+ }
+ 
+@@ -260,19 +271,13 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
+ 			 struct inode *parent)
+ {
+ 	struct ovl_fs *ofs = OVL_FS(inode->i_sb);
+-	struct dentry *dentry;
+ 	int bytes, buflen = *max_len << 2;
+ 
+ 	/* TODO: encode connectable file handles */
+ 	if (parent)
+ 		return FILEID_INVALID;
+ 
+-	dentry = d_find_any_alias(inode);
+-	if (!dentry)
+-		return FILEID_INVALID;
+-
+-	bytes = ovl_dentry_to_fid(ofs, dentry, fid, buflen);
+-	dput(dentry);
++	bytes = ovl_dentry_to_fid(ofs, inode, fid, buflen);
+ 	if (bytes <= 0)
+ 		return FILEID_INVALID;
+ 
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index 5764f91d283e70..42b73ae5ba01be 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -542,7 +542,7 @@ int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry,
+ 	struct ovl_fh *fh;
+ 	int err;
+ 
+-	fh = ovl_encode_real_fh(ofs, real, is_upper);
++	fh = ovl_encode_real_fh(ofs, d_inode(real), is_upper);
+ 	err = PTR_ERR(fh);
+ 	if (IS_ERR(fh)) {
+ 		fh = NULL;
+@@ -738,7 +738,7 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
+ 	struct ovl_fh *fh;
+ 	int err;
+ 
+-	fh = ovl_encode_real_fh(ofs, origin, false);
++	fh = ovl_encode_real_fh(ofs, d_inode(origin), false);
+ 	if (IS_ERR(fh))
+ 		return PTR_ERR(fh);
+ 
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 0bfe35da4b7b7a..844874b4a91a94 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -869,7 +869,7 @@ int ovl_copy_up_with_data(struct dentry *dentry);
+ int ovl_maybe_copy_up(struct dentry *dentry, int flags);
+ int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentry *new);
+ int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat);
+-struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
++struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode,
+ 				  bool is_upper);
+ struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin);
+ int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
+diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
+index 0f788031b7405f..e3f9213131c467 100644
+--- a/fs/smb/client/namespace.c
++++ b/fs/smb/client/namespace.c
+@@ -196,11 +196,28 @@ static struct vfsmount *cifs_do_automount(struct path *path)
+ 	struct smb3_fs_context tmp;
+ 	char *full_path;
+ 	struct vfsmount *mnt;
++	struct cifs_sb_info *mntpt_sb;
++	struct cifs_ses *ses;
+ 
+ 	if (IS_ROOT(mntpt))
+ 		return ERR_PTR(-ESTALE);
+ 
+-	cur_ctx = CIFS_SB(mntpt->d_sb)->ctx;
++	mntpt_sb = CIFS_SB(mntpt->d_sb);
++	ses = cifs_sb_master_tcon(mntpt_sb)->ses;
++	cur_ctx = mntpt_sb->ctx;
++
++	/*
++	 * At this point, the root session should be in the mntpt sb. We should
++	 * bring the sb context passwords in sync with the root session's
++	 * passwords. This would help prevent unnecessary retries and password
++	 * swaps for automounts.
++	 */
++	mutex_lock(&ses->session_mutex);
++	rc = smb3_sync_session_ctx_passwords(mntpt_sb, ses);
++	mutex_unlock(&ses->session_mutex);
++
++	if (rc)
++		return ERR_PTR(rc);
+ 
+ 	fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, mntpt);
+ 	if (IS_ERR(fc))
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 04ffc5b158c3bf..c763a2f7df6640 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -695,6 +695,9 @@ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
+ 	struct smb2_hdr *rsp_hdr;
+ 	struct ksmbd_work *in_work = ksmbd_alloc_work_struct();
+ 
++	if (!in_work)
++		return;
++
+ 	if (allocate_interim_rsp_buf(in_work)) {
+ 		pr_err("smb_allocate_rsp_buf failed!\n");
+ 		ksmbd_free_work_struct(in_work);
+@@ -3985,6 +3988,26 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+ 		posix_info->DeviceId = cpu_to_le32(ksmbd_kstat->kstat->rdev);
+ 		posix_info->HardLinks = cpu_to_le32(ksmbd_kstat->kstat->nlink);
+ 		posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode & 0777);
++		switch (ksmbd_kstat->kstat->mode & S_IFMT) {
++		case S_IFDIR:
++			posix_info->Mode |= cpu_to_le32(POSIX_TYPE_DIR << POSIX_FILETYPE_SHIFT);
++			break;
++		case S_IFLNK:
++			posix_info->Mode |= cpu_to_le32(POSIX_TYPE_SYMLINK << POSIX_FILETYPE_SHIFT);
++			break;
++		case S_IFCHR:
++			posix_info->Mode |= cpu_to_le32(POSIX_TYPE_CHARDEV << POSIX_FILETYPE_SHIFT);
++			break;
++		case S_IFBLK:
++			posix_info->Mode |= cpu_to_le32(POSIX_TYPE_BLKDEV << POSIX_FILETYPE_SHIFT);
++			break;
++		case S_IFIFO:
++			posix_info->Mode |= cpu_to_le32(POSIX_TYPE_FIFO << POSIX_FILETYPE_SHIFT);
++			break;
++		case S_IFSOCK:
++			posix_info->Mode |= cpu_to_le32(POSIX_TYPE_SOCKET << POSIX_FILETYPE_SHIFT);
++		}
++
+ 		posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino);
+ 		posix_info->DosAttributes =
+ 			S_ISDIR(ksmbd_kstat->kstat->mode) ?
+@@ -5173,6 +5196,26 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
+ 	file_info->HardLinks = cpu_to_le32(stat.nlink);
+ 	file_info->Mode = cpu_to_le32(stat.mode & 0777);
++	switch (stat.mode & S_IFMT) {
++	case S_IFDIR:
++		file_info->Mode |= cpu_to_le32(POSIX_TYPE_DIR << POSIX_FILETYPE_SHIFT);
++		break;
++	case S_IFLNK:
++		file_info->Mode |= cpu_to_le32(POSIX_TYPE_SYMLINK << POSIX_FILETYPE_SHIFT);
++		break;
++	case S_IFCHR:
++		file_info->Mode |= cpu_to_le32(POSIX_TYPE_CHARDEV << POSIX_FILETYPE_SHIFT);
++		break;
++	case S_IFBLK:
++		file_info->Mode |= cpu_to_le32(POSIX_TYPE_BLKDEV << POSIX_FILETYPE_SHIFT);
++		break;
++	case S_IFIFO:
++		file_info->Mode |= cpu_to_le32(POSIX_TYPE_FIFO << POSIX_FILETYPE_SHIFT);
++		break;
++	case S_IFSOCK:
++		file_info->Mode |= cpu_to_le32(POSIX_TYPE_SOCKET << POSIX_FILETYPE_SHIFT);
++	}
++
+ 	file_info->DeviceId = cpu_to_le32(stat.rdev);
+ 
+ 	/*
+diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
+index 649dacf7e8c493..17a0b18a8406b3 100644
+--- a/fs/smb/server/smb2pdu.h
++++ b/fs/smb/server/smb2pdu.h
+@@ -502,4 +502,14 @@ static inline void *smb2_get_msg(void *buf)
+ 	return buf + 4;
+ }
+ 
++#define POSIX_TYPE_FILE		0
++#define POSIX_TYPE_DIR		1
++#define POSIX_TYPE_SYMLINK	2
++#define POSIX_TYPE_CHARDEV	3
++#define POSIX_TYPE_BLKDEV	4
++#define POSIX_TYPE_FIFO		5
++#define POSIX_TYPE_SOCKET	6
++
++#define POSIX_FILETYPE_SHIFT	12
++
+ #endif	/* _SMB2PDU_H */
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index 7cbd580120d129..ee825971abd9ab 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -1264,6 +1264,8 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ 					      filepath,
+ 					      flags,
+ 					      path);
++			if (!is_last)
++				next[0] = '/';
+ 			if (err)
+ 				goto out2;
+ 			else if (is_last)
+@@ -1271,7 +1273,6 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ 			path_put(parent_path);
+ 			*parent_path = *path;
+ 
+-			next[0] = '/';
+ 			remain_len -= filename_len + 1;
+ 		}
+ 
+diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
+index 18e0a2fc3816ac..5178b72bc92098 100644
+--- a/include/linux/bus/stm32_firewall_device.h
++++ b/include/linux/bus/stm32_firewall_device.h
+@@ -115,7 +115,7 @@ void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 su
+ #else /* CONFIG_STM32_FIREWALL */
+ 
+ int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+-				unsigned int nb_firewall);
++				unsigned int nb_firewall)
+ {
+ 	return -ENODEV;
+ }
+diff --git a/include/linux/iomap.h b/include/linux/iomap.h
+index f61407e3b12192..d204dcd35063d7 100644
+--- a/include/linux/iomap.h
++++ b/include/linux/iomap.h
+@@ -330,7 +330,7 @@ struct iomap_ioend {
+ 	u16			io_type;
+ 	u16			io_flags;	/* IOMAP_F_* */
+ 	struct inode		*io_inode;	/* file being written to */
+-	size_t			io_size;	/* size of the extent */
++	size_t			io_size;	/* size of data within eof */
+ 	loff_t			io_offset;	/* offset in the file */
+ 	sector_t		io_sector;	/* start sector of ioend */
+ 	struct bio		io_bio;		/* MUST BE LAST! */
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index c34c18b4e8f36f..04213d8ef8376d 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -50,7 +50,7 @@ struct path;
+ #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
+ 
+ #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
+-			    MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB)
++			    MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
+ 
+ #define MNT_INTERNAL	0x4000
+ 
+@@ -64,7 +64,6 @@ struct path;
+ #define MNT_SYNC_UMOUNT		0x2000000
+ #define MNT_MARKED		0x4000000
+ #define MNT_UMOUNT		0x8000000
+-#define MNT_ONRB		0x10000000
+ 
+ struct vfsmount {
+ 	struct dentry *mnt_root;	/* root of the mounted tree */
+diff --git a/include/linux/netfs.h b/include/linux/netfs.h
+index 5eaceef41e6cac..474481ee8b7c29 100644
+--- a/include/linux/netfs.h
++++ b/include/linux/netfs.h
+@@ -269,7 +269,6 @@ struct netfs_io_request {
+ 	size_t			prev_donated;	/* Fallback for subreq->prev_donated */
+ 	refcount_t		ref;
+ 	unsigned long		flags;
+-#define NETFS_RREQ_COPY_TO_CACHE	1	/* Need to write to the cache */
+ #define NETFS_RREQ_NO_UNLOCK_FOLIO	2	/* Don't unlock no_unlock_folio on completion */
+ #define NETFS_RREQ_DONT_UNLOCK_FOLIOS	3	/* Don't unlock the folios on completion */
+ #define NETFS_RREQ_FAILED		4	/* The request failed */
+diff --git a/include/linux/writeback.h b/include/linux/writeback.h
+index d6db822e4bb30c..641a057e041329 100644
+--- a/include/linux/writeback.h
++++ b/include/linux/writeback.h
+@@ -217,7 +217,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ 				 struct inode *inode)
+ 	__releases(&inode->i_lock);
+ void wbc_detach_inode(struct writeback_control *wbc);
+-void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
++void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
+ 			      size_t bytes);
+ int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
+ 			   enum wb_reason reason, struct wb_completion *done);
+@@ -324,7 +324,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+ }
+ 
+ static inline void wbc_account_cgroup_owner(struct writeback_control *wbc,
+-					    struct page *page, size_t bytes)
++					    struct folio *folio, size_t bytes)
+ {
+ }
+ 
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index c0deaafebfdc0b..4bd93571e6c1b5 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -281,7 +281,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
+ 
+ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
+ {
+-	return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog);
++	return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog);
+ }
+ 
+ bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 8932ec5bd7c029..20c5374e922ef5 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -329,7 +329,6 @@ struct ufs_pwr_mode_info {
+  * @program_key: program or evict an inline encryption key
+  * @fill_crypto_prdt: initialize crypto-related fields in the PRDT
+  * @event_notify: called to notify important events
+- * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
+  * @mcq_config_resource: called to configure MCQ platform resources
+  * @get_hba_mac: reports maximum number of outstanding commands supported by
+  *	the controller. Should be implemented for UFSHCI 4.0 or later
+@@ -381,7 +380,6 @@ struct ufs_hba_variant_ops {
+ 				    void *prdt, unsigned int num_segments);
+ 	void	(*event_notify)(struct ufs_hba *hba,
+ 				enum ufs_event_type evt, void *data);
+-	void	(*reinit_notify)(struct ufs_hba *);
+ 	int	(*mcq_config_resource)(struct ufs_hba *hba);
+ 	int	(*get_hba_mac)(struct ufs_hba *hba);
+ 	int	(*op_runtime_config)(struct ufs_hba *hba);
+diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
+index e37fddd5d9ce8e..ffc4bd17d0786c 100644
+--- a/io_uring/eventfd.c
++++ b/io_uring/eventfd.c
+@@ -38,7 +38,7 @@ static void io_eventfd_do_signal(struct rcu_head *rcu)
+ 	eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
+ 
+ 	if (refcount_dec_and_test(&ev_fd->refs))
+-		io_eventfd_free(rcu);
++		call_rcu(&ev_fd->rcu, io_eventfd_free);
+ }
+ 
+ void io_eventfd_signal(struct io_ring_ctx *ctx)
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 9849da128364af..21f1bcba2f52b5 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1244,10 +1244,7 @@ static void io_req_normal_work_add(struct io_kiocb *req)
+ 
+ 	/* SQPOLL doesn't need the task_work added, it'll run it itself */
+ 	if (ctx->flags & IORING_SETUP_SQPOLL) {
+-		struct io_sq_data *sqd = ctx->sq_data;
+-
+-		if (sqd->thread)
+-			__set_notify_signal(sqd->thread);
++		__set_notify_signal(req->task);
+ 		return;
+ 	}
+ 
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 1cfcc735b8e38e..5bc54c6df20fd6 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -275,8 +275,12 @@ static int io_sq_thread(void *data)
+ 	DEFINE_WAIT(wait);
+ 
+ 	/* offload context creation failed, just exit */
+-	if (!current->io_uring)
++	if (!current->io_uring) {
++		mutex_lock(&sqd->lock);
++		sqd->thread = NULL;
++		mutex_unlock(&sqd->lock);
+ 		goto err_out;
++	}
+ 
+ 	snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
+ 	set_task_comm(current, buf);
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index 9973876d91b0ef..21c4bfea79f1c9 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -409,10 +409,12 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
+ 
+ 	timeout->off = 0; /* noseq */
+ 	data = req->async_data;
++	data->ts = *ts;
++
+ 	list_add_tail(&timeout->list, &ctx->timeout_list);
+ 	hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
+ 	data->timer.function = io_timeout_fn;
+-	hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
++	hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), mode);
+ 	return 0;
+ }
+ 
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index a4dd285cdf39b7..24ece85fd3b126 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -862,7 +862,15 @@ static int generate_sched_domains(cpumask_var_t **domains,
+ 	 */
+ 	if (cgrpv2) {
+ 		for (i = 0; i < ndoms; i++) {
+-			cpumask_copy(doms[i], csa[i]->effective_cpus);
++			/*
++			 * The top cpuset may contain some boot time isolated
++			 * CPUs that need to be excluded from the sched domain.
++			 */
++			if (csa[i] == &top_cpuset)
++				cpumask_and(doms[i], csa[i]->effective_cpus,
++					    housekeeping_cpumask(HK_TYPE_DOMAIN));
++			else
++				cpumask_copy(doms[i], csa[i]->effective_cpus);
+ 			if (dattr)
+ 				dattr[i] = SD_ATTR_INIT;
+ 		}
+@@ -3102,29 +3110,6 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ 	int retval = -ENODEV;
+ 
+ 	buf = strstrip(buf);
+-
+-	/*
+-	 * CPU or memory hotunplug may leave @cs w/o any execution
+-	 * resources, in which case the hotplug code asynchronously updates
+-	 * configuration and transfers all tasks to the nearest ancestor
+-	 * which can execute.
+-	 *
+-	 * As writes to "cpus" or "mems" may restore @cs's execution
+-	 * resources, wait for the previously scheduled operations before
+-	 * proceeding, so that we don't end up keep removing tasks added
+-	 * after execution capability is restored.
+-	 *
+-	 * cpuset_handle_hotplug may call back into cgroup core asynchronously
+-	 * via cgroup_transfer_tasks() and waiting for it from a cgroupfs
+-	 * operation like this one can lead to a deadlock through kernfs
+-	 * active_ref protection.  Let's break the protection.  Losing the
+-	 * protection is okay as we check whether @cs is online after
+-	 * grabbing cpuset_mutex anyway.  This only happens on the legacy
+-	 * hierarchies.
+-	 */
+-	css_get(&cs->css);
+-	kernfs_break_active_protection(of->kn);
+-
+ 	cpus_read_lock();
+ 	mutex_lock(&cpuset_mutex);
+ 	if (!is_cpuset_online(cs))
+@@ -3155,8 +3140,6 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ out_unlock:
+ 	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+-	kernfs_unbreak_active_protection(of->kn);
+-	css_put(&cs->css);
+ 	flush_workqueue(cpuset_migrate_mm_wq);
+ 	return retval ?: nbytes;
+ }
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 40f915f893e2ed..f928a67a07d29a 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -2917,7 +2917,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
+ 		 */
+ 		if (p->scx.slice && !scx_rq_bypassing(rq)) {
+ 			dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
+-			return;
++			goto switch_class;
+ 		}
+ 
+ 		/*
+@@ -2934,6 +2934,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
+ 		}
+ 	}
+ 
++switch_class:
+ 	if (next && next->sched_class != &ext_sched_class)
+ 		switch_class(rq, next);
+ }
+@@ -3239,16 +3240,8 @@ static void reset_idle_masks(void)
+ 	cpumask_copy(idle_masks.smt, cpu_online_mask);
+ }
+ 
+-void __scx_update_idle(struct rq *rq, bool idle)
++static void update_builtin_idle(int cpu, bool idle)
+ {
+-	int cpu = cpu_of(rq);
+-
+-	if (SCX_HAS_OP(update_idle) && !scx_rq_bypassing(rq)) {
+-		SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
+-		if (!static_branch_unlikely(&scx_builtin_idle_enabled))
+-			return;
+-	}
+-
+ 	if (idle)
+ 		cpumask_set_cpu(cpu, idle_masks.cpu);
+ 	else
+@@ -3275,6 +3268,57 @@ void __scx_update_idle(struct rq *rq, bool idle)
+ #endif
+ }
+ 
++/*
++ * Update the idle state of a CPU to @idle.
++ *
++ * If @do_notify is true, ops.update_idle() is invoked to notify the scx
++ * scheduler of an actual idle state transition (idle to busy or vice
++ * versa). If @do_notify is false, only the idle state in the idle masks is
++ * refreshed without invoking ops.update_idle().
++ *
++ * This distinction is necessary, because an idle CPU can be "reserved" and
++ * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
++ * busy even if no tasks are dispatched. In this case, the CPU may return
++ * to idle without a true state transition. Refreshing the idle masks
++ * without invoking ops.update_idle() ensures accurate idle state tracking
++ * while avoiding unnecessary updates and maintaining balanced state
++ * transitions.
++ */
++void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
++{
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_rq_held(rq);
++
++	/*
++	 * Trigger ops.update_idle() only when transitioning from a task to
++	 * the idle thread and vice versa.
++	 *
++	 * Idle transitions are indicated by do_notify being set to true,
++	 * managed by put_prev_task_idle()/set_next_task_idle().
++	 */
++	if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
++		SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
++
++	/*
++	 * Update the idle masks:
++	 * - for real idle transitions (do_notify == true)
++	 * - for idle-to-idle transitions (indicated by the previous task
++	 *   being the idle thread, managed by pick_task_idle())
++	 *
++	 * Skip updating idle masks if the previous task is not the idle
++	 * thread, since set_next_task_idle() has already handled it when
++	 * transitioning from a task to the idle thread (calling this
++	 * function with do_notify == true).
++	 *
++	 * In this way we can avoid updating the idle masks twice,
++	 * unnecessarily.
++	 */
++	if (static_branch_likely(&scx_builtin_idle_enabled))
++		if (do_notify || is_idle_task(rq->curr))
++			update_builtin_idle(cpu, idle);
++}
++
+ static void handle_hotplug(struct rq *rq, bool online)
+ {
+ 	int cpu = cpu_of(rq);
+@@ -4348,10 +4392,9 @@ static void scx_ops_bypass(bool bypass)
+ 	 */
+ 	for_each_possible_cpu(cpu) {
+ 		struct rq *rq = cpu_rq(cpu);
+-		struct rq_flags rf;
+ 		struct task_struct *p, *n;
+ 
+-		rq_lock(rq, &rf);
++		raw_spin_rq_lock(rq);
+ 
+ 		if (bypass) {
+ 			WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
+@@ -4367,7 +4410,7 @@ static void scx_ops_bypass(bool bypass)
+ 		 * sees scx_rq_bypassing() before moving tasks to SCX.
+ 		 */
+ 		if (!scx_enabled()) {
+-			rq_unlock(rq, &rf);
++			raw_spin_rq_unlock(rq);
+ 			continue;
+ 		}
+ 
+@@ -4387,10 +4430,11 @@ static void scx_ops_bypass(bool bypass)
+ 			sched_enq_and_set_task(&ctx);
+ 		}
+ 
+-		rq_unlock(rq, &rf);
+-
+ 		/* resched to restore ticks and idle state */
+-		resched_cpu(cpu);
++		if (cpu_online(cpu) || cpu == smp_processor_id())
++			resched_curr(rq);
++
++		raw_spin_rq_unlock(rq);
+ 	}
+ unlock:
+ 	raw_spin_unlock_irqrestore(&__scx_ops_bypass_lock, flags);
+diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
+index b1675bb59fc461..4d022d17ac7dd6 100644
+--- a/kernel/sched/ext.h
++++ b/kernel/sched/ext.h
+@@ -57,15 +57,15 @@ static inline void init_sched_ext_class(void) {}
+ #endif	/* CONFIG_SCHED_CLASS_EXT */
+ 
+ #if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
+-void __scx_update_idle(struct rq *rq, bool idle);
++void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
+ 
+-static inline void scx_update_idle(struct rq *rq, bool idle)
++static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify)
+ {
+ 	if (scx_enabled())
+-		__scx_update_idle(rq, idle);
++		__scx_update_idle(rq, idle, do_notify);
+ }
+ #else
+-static inline void scx_update_idle(struct rq *rq, bool idle) {}
++static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
+ #endif
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index d2f096bb274c3f..53bb9193c537a8 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -453,19 +453,20 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
+ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
+ {
+ 	dl_server_update_idle_time(rq, prev);
+-	scx_update_idle(rq, false);
++	scx_update_idle(rq, false, true);
+ }
+ 
+ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
+ {
+ 	update_idle_core(rq);
+-	scx_update_idle(rq, true);
++	scx_update_idle(rq, true, true);
+ 	schedstat_inc(rq->sched_goidle);
+ 	next->se.exec_start = rq_clock_task(rq);
+ }
+ 
+ struct task_struct *pick_task_idle(struct rq *rq)
+ {
++	scx_update_idle(rq, true, false);
+ 	return rq->idle;
+ }
+ 
+diff --git a/net/802/psnap.c b/net/802/psnap.c
+index fca9d454905fe3..389df460c8c4b9 100644
+--- a/net/802/psnap.c
++++ b/net/802/psnap.c
+@@ -55,11 +55,11 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		goto drop;
+ 
+ 	rcu_read_lock();
+-	proto = find_snap_client(skb_transport_header(skb));
++	proto = find_snap_client(skb->data);
+ 	if (proto) {
+ 		/* Pass the frame on. */
+-		skb->transport_header += 5;
+ 		skb_pull_rcsum(skb, 5);
++		skb_reset_transport_header(skb);
+ 		rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
+ 	}
+ 	rcu_read_unlock();
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index c86f4e42e69cab..7b2b04d6b85630 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -1031,9 +1031,9 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
+ 
+ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
+ {
+-	/* If we're advertising or initiating an LE connection we can't
+-	 * go ahead and change the random address at this time. This is
+-	 * because the eventual initiator address used for the
++	/* If a random_addr has been set we're advertising or initiating an LE
++	 * connection we can't go ahead and change the random address at this
++	 * time. This is because the eventual initiator address used for the
+ 	 * subsequently created connection will be undefined (some
+ 	 * controllers use the new address and others the one we had
+ 	 * when the operation started).
+@@ -1041,8 +1041,9 @@ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
+ 	 * In this kind of scenario skip the update and let the random
+ 	 * address be updated at the next cycle.
+ 	 */
+-	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
+-	    hci_lookup_le_connect(hdev)) {
++	if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
++	    (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
++	    hci_lookup_le_connect(hdev))) {
+ 		bt_dev_dbg(hdev, "Deferring random address update");
+ 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ 		return 0;
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 2343e15f8938ec..7dc315c1658e7d 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7596,6 +7596,24 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
+ 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
+ }
+ 
++static void add_device_complete(struct hci_dev *hdev, void *data, int err)
++{
++	struct mgmt_pending_cmd *cmd = data;
++	struct mgmt_cp_add_device *cp = cmd->param;
++
++	if (!err) {
++		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
++			     cp->action);
++		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
++				     cp->addr.type, hdev->conn_flags,
++				     PTR_UINT(cmd->user_data));
++	}
++
++	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
++			  mgmt_status(err), &cp->addr, sizeof(cp->addr));
++	mgmt_pending_free(cmd);
++}
++
+ static int add_device_sync(struct hci_dev *hdev, void *data)
+ {
+ 	return hci_update_passive_scan_sync(hdev);
+@@ -7604,6 +7622,7 @@ static int add_device_sync(struct hci_dev *hdev, void *data)
+ static int add_device(struct sock *sk, struct hci_dev *hdev,
+ 		      void *data, u16 len)
+ {
++	struct mgmt_pending_cmd *cmd;
+ 	struct mgmt_cp_add_device *cp = data;
+ 	u8 auto_conn, addr_type;
+ 	struct hci_conn_params *params;
+@@ -7684,9 +7703,24 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
+ 			current_flags = params->flags;
+ 	}
+ 
+-	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
+-	if (err < 0)
++	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
++	if (!cmd) {
++		err = -ENOMEM;
+ 		goto unlock;
++	}
++
++	cmd->user_data = UINT_PTR(current_flags);
++
++	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
++				 add_device_complete);
++	if (err < 0) {
++		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
++					MGMT_STATUS_FAILED, &cp->addr,
++					sizeof(cp->addr));
++		mgmt_pending_free(cmd);
++	}
++
++	goto unlock;
+ 
+ added:
+ 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index af80d599c33715..21a5b5535ebceb 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -201,14 +201,14 @@ static ssize_t address_show(struct device *tty_dev,
+ 			    struct device_attribute *attr, char *buf)
+ {
+ 	struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
+-	return sprintf(buf, "%pMR\n", &dev->dst);
++	return sysfs_emit(buf, "%pMR\n", &dev->dst);
+ }
+ 
+ static ssize_t channel_show(struct device *tty_dev,
+ 			    struct device_attribute *attr, char *buf)
+ {
+ 	struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
+-	return sprintf(buf, "%d\n", dev->channel);
++	return sysfs_emit(buf, "%d\n", dev->channel);
+ }
+ 
+ static DEVICE_ATTR_RO(address);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index f3fa8353d262b0..1867a6a8d76da9 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -753,6 +753,36 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
+ }
+ EXPORT_SYMBOL_GPL(dev_fill_forward_path);
+ 
++/* must be called under rcu_read_lock(), as we dont take a reference */
++static struct napi_struct *napi_by_id(unsigned int napi_id)
++{
++	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
++	struct napi_struct *napi;
++
++	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
++		if (napi->napi_id == napi_id)
++			return napi;
++
++	return NULL;
++}
++
++/* must be called under rcu_read_lock(), as we dont take a reference */
++struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id)
++{
++	struct napi_struct *napi;
++
++	napi = napi_by_id(napi_id);
++	if (!napi)
++		return NULL;
++
++	if (WARN_ON_ONCE(!napi->dev))
++		return NULL;
++	if (!net_eq(net, dev_net(napi->dev)))
++		return NULL;
++
++	return napi;
++}
++
+ /**
+  *	__dev_get_by_name	- find a device by its name
+  *	@net: the applicable net namespace
+@@ -6291,19 +6321,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
+ }
+ EXPORT_SYMBOL(napi_complete_done);
+ 
+-/* must be called under rcu_read_lock(), as we dont take a reference */
+-struct napi_struct *napi_by_id(unsigned int napi_id)
+-{
+-	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
+-	struct napi_struct *napi;
+-
+-	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
+-		if (napi->napi_id == napi_id)
+-			return napi;
+-
+-	return NULL;
+-}
+-
+ static void skb_defer_free_flush(struct softnet_data *sd)
+ {
+ 	struct sk_buff *skb, *next;
+diff --git a/net/core/dev.h b/net/core/dev.h
+index 5654325c5b710c..2e3bb7669984a6 100644
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -22,6 +22,8 @@ struct sd_flow_limit {
+ 
+ extern int netdev_flow_limit_table_len;
+ 
++struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id);
++
+ #ifdef CONFIG_PROC_FS
+ int __init dev_proc_init(void);
+ #else
+@@ -146,7 +148,6 @@ void xdp_do_check_flushed(struct napi_struct *napi);
+ static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
+ #endif
+ 
+-struct napi_struct *napi_by_id(unsigned int napi_id);
+ void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
+ 
+ #define XMIT_RECURSION_LIMIT	8
+diff --git a/net/core/link_watch.c b/net/core/link_watch.c
+index 1b4d39e3808427..cb04ef2b9807c9 100644
+--- a/net/core/link_watch.c
++++ b/net/core/link_watch.c
+@@ -42,14 +42,18 @@ static unsigned int default_operstate(const struct net_device *dev)
+ 	 * first check whether lower is indeed the source of its down state.
+ 	 */
+ 	if (!netif_carrier_ok(dev)) {
+-		int iflink = dev_get_iflink(dev);
+ 		struct net_device *peer;
++		int iflink;
+ 
+ 		/* If called from netdev_run_todo()/linkwatch_sync_dev(),
+ 		 * dev_net(dev) can be already freed, and RTNL is not held.
+ 		 */
+-		if (dev->reg_state == NETREG_UNREGISTERED ||
+-		    iflink == dev->ifindex)
++		if (dev->reg_state <= NETREG_REGISTERED)
++			iflink = dev_get_iflink(dev);
++		else
++			iflink = dev->ifindex;
++
++		if (iflink == dev->ifindex)
+ 			return IF_OPER_DOWN;
+ 
+ 		ASSERT_RTNL();
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index d58270b48cb2cf..ad426b3a03b526 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -164,8 +164,6 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
+ 	void *hdr;
+ 	pid_t pid;
+ 
+-	if (WARN_ON_ONCE(!napi->dev))
+-		return -EINVAL;
+ 	if (!(napi->dev->flags & IFF_UP))
+ 		return 0;
+ 
+@@ -173,8 +171,7 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
+ 	if (!hdr)
+ 		return -EMSGSIZE;
+ 
+-	if (napi->napi_id >= MIN_NAPI_ID &&
+-	    nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
++	if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
+ 		goto nla_put_failure;
+ 
+ 	if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
+@@ -217,7 +214,7 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
+ 	rtnl_lock();
+ 	rcu_read_lock();
+ 
+-	napi = napi_by_id(napi_id);
++	napi = netdev_napi_by_id(genl_info_net(info), napi_id);
+ 	if (napi) {
+ 		err = netdev_nl_napi_fill_one(rsp, napi, info);
+ 	} else {
+@@ -254,6 +251,8 @@ netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
+ 		return err;
+ 
+ 	list_for_each_entry(napi, &netdev->napi_list, dev_list) {
++		if (napi->napi_id < MIN_NAPI_ID)
++			continue;
+ 		if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
+ 			continue;
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index a7cd433a54c9ae..bcc2f1e090c7db 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -896,7 +896,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
+ 	sock_net_set(ctl_sk, net);
+ 	if (sk) {
+ 		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
+-				   inet_twsk(sk)->tw_mark : sk->sk_mark;
++				   inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
+ 		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
+ 				   inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
+ 		transmit_time = tcp_transmit_time(sk);
+diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
+index 38d8121331d4a9..b0dd008e2114bc 100644
+--- a/net/mptcp/ctrl.c
++++ b/net/mptcp/ctrl.c
+@@ -102,16 +102,15 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
+ }
+ 
+ #ifdef CONFIG_SYSCTL
+-static int mptcp_set_scheduler(const struct net *net, const char *name)
++static int mptcp_set_scheduler(char *scheduler, const char *name)
+ {
+-	struct mptcp_pernet *pernet = mptcp_get_pernet(net);
+ 	struct mptcp_sched_ops *sched;
+ 	int ret = 0;
+ 
+ 	rcu_read_lock();
+ 	sched = mptcp_sched_find(name);
+ 	if (sched)
+-		strscpy(pernet->scheduler, name, MPTCP_SCHED_NAME_MAX);
++		strscpy(scheduler, name, MPTCP_SCHED_NAME_MAX);
+ 	else
+ 		ret = -ENOENT;
+ 	rcu_read_unlock();
+@@ -122,7 +121,7 @@ static int mptcp_set_scheduler(const struct net *net, const char *name)
+ static int proc_scheduler(const struct ctl_table *ctl, int write,
+ 			  void *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	const struct net *net = current->nsproxy->net_ns;
++	char (*scheduler)[MPTCP_SCHED_NAME_MAX] = ctl->data;
+ 	char val[MPTCP_SCHED_NAME_MAX];
+ 	struct ctl_table tbl = {
+ 		.data = val,
+@@ -130,11 +129,11 @@ static int proc_scheduler(const struct ctl_table *ctl, int write,
+ 	};
+ 	int ret;
+ 
+-	strscpy(val, mptcp_get_scheduler(net), MPTCP_SCHED_NAME_MAX);
++	strscpy(val, *scheduler, MPTCP_SCHED_NAME_MAX);
+ 
+ 	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ 	if (write && ret == 0)
+-		ret = mptcp_set_scheduler(net, val);
++		ret = mptcp_set_scheduler(*scheduler, val);
+ 
+ 	return ret;
+ }
+@@ -161,7 +160,9 @@ static int proc_blackhole_detect_timeout(const struct ctl_table *table,
+ 					 int write, void *buffer, size_t *lenp,
+ 					 loff_t *ppos)
+ {
+-	struct mptcp_pernet *pernet = mptcp_get_pernet(current->nsproxy->net_ns);
++	struct mptcp_pernet *pernet = container_of(table->data,
++						   struct mptcp_pernet,
++						   blackhole_timeout);
+ 	int ret;
+ 
+ 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+@@ -228,7 +229,7 @@ static struct ctl_table mptcp_sysctl_table[] = {
+ 	{
+ 		.procname = "available_schedulers",
+ 		.maxlen	= MPTCP_SCHED_BUF_MAX,
+-		.mode = 0644,
++		.mode = 0444,
+ 		.proc_handler = proc_available_schedulers,
+ 	},
+ 	{
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 9db3e2b0b1c347..456446d7af200e 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -2517,12 +2517,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
+ 	struct hlist_nulls_head *hash;
+ 	unsigned int nr_slots, i;
+ 
+-	if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
++	if (*sizep > (INT_MAX / sizeof(struct hlist_nulls_head)))
+ 		return NULL;
+ 
+ 	BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
+ 	nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+ 
++	if (nr_slots > (INT_MAX / sizeof(struct hlist_nulls_head)))
++		return NULL;
++
+ 	hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
+ 
+ 	if (hash && nulls)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 0c5ff4afc37022..42dc8cc721ff7b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -8565,6 +8565,7 @@ static void nft_unregister_flowtable_hook(struct net *net,
+ }
+ 
+ static void __nft_unregister_flowtable_net_hooks(struct net *net,
++						 struct nft_flowtable *flowtable,
+ 						 struct list_head *hook_list,
+ 					         bool release_netdev)
+ {
+@@ -8572,6 +8573,8 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
+ 
+ 	list_for_each_entry_safe(hook, next, hook_list, list) {
+ 		nf_unregister_net_hook(net, &hook->ops);
++		flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
++					    FLOW_BLOCK_UNBIND);
+ 		if (release_netdev) {
+ 			list_del(&hook->list);
+ 			kfree_rcu(hook, rcu);
+@@ -8580,9 +8583,10 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
+ }
+ 
+ static void nft_unregister_flowtable_net_hooks(struct net *net,
++					       struct nft_flowtable *flowtable,
+ 					       struct list_head *hook_list)
+ {
+-	__nft_unregister_flowtable_net_hooks(net, hook_list, false);
++	__nft_unregister_flowtable_net_hooks(net, flowtable, hook_list, false);
+ }
+ 
+ static int nft_register_flowtable_net_hooks(struct net *net,
+@@ -9223,8 +9227,6 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+ 
+ 	flowtable->data.type->free(&flowtable->data);
+ 	list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+-		flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+-					    FLOW_BLOCK_UNBIND);
+ 		list_del_rcu(&hook->list);
+ 		kfree_rcu(hook, rcu);
+ 	}
+@@ -10622,6 +10624,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 							   &nft_trans_flowtable_hooks(trans),
+ 							   trans->msg_type);
+ 				nft_unregister_flowtable_net_hooks(net,
++								   nft_trans_flowtable(trans),
+ 								   &nft_trans_flowtable_hooks(trans));
+ 			} else {
+ 				list_del_rcu(&nft_trans_flowtable(trans)->list);
+@@ -10630,6 +10633,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 							   NULL,
+ 							   trans->msg_type);
+ 				nft_unregister_flowtable_net_hooks(net,
++						nft_trans_flowtable(trans),
+ 						&nft_trans_flowtable(trans)->hook_list);
+ 			}
+ 			break;
+@@ -10901,11 +10905,13 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 		case NFT_MSG_NEWFLOWTABLE:
+ 			if (nft_trans_flowtable_update(trans)) {
+ 				nft_unregister_flowtable_net_hooks(net,
++						nft_trans_flowtable(trans),
+ 						&nft_trans_flowtable_hooks(trans));
+ 			} else {
+ 				nft_use_dec_restore(&table->use);
+ 				list_del_rcu(&nft_trans_flowtable(trans)->list);
+ 				nft_unregister_flowtable_net_hooks(net,
++						nft_trans_flowtable(trans),
+ 						&nft_trans_flowtable(trans)->hook_list);
+ 			}
+ 			break;
+@@ -11498,7 +11504,8 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
+ 	list_for_each_entry(chain, &table->chains, list)
+ 		__nf_tables_unregister_hook(net, table, chain, true);
+ 	list_for_each_entry(flowtable, &table->flowtables, list)
+-		__nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
++		__nft_unregister_flowtable_net_hooks(net, flowtable,
++						     &flowtable->hook_list,
+ 						     true);
+ }
+ 
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 351ac1747224a3..0581c53e651704 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -61,8 +61,10 @@ static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
+ 
+ static struct kmem_cache *rds_tcp_conn_slab;
+ 
+-static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
+-				 void *buffer, size_t *lenp, loff_t *fpos);
++static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
++				  void *buffer, size_t *lenp, loff_t *fpos);
++static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
++				  void *buffer, size_t *lenp, loff_t *fpos);
+ 
+ static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
+ static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
+@@ -74,7 +76,7 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
+ 		/* data is per-net pointer */
+ 		.maxlen         = sizeof(int),
+ 		.mode           = 0644,
+-		.proc_handler   = rds_tcp_skbuf_handler,
++		.proc_handler   = rds_tcp_sndbuf_handler,
+ 		.extra1		= &rds_tcp_min_sndbuf,
+ 	},
+ #define	RDS_TCP_RCVBUF	1
+@@ -83,7 +85,7 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
+ 		/* data is per-net pointer */
+ 		.maxlen         = sizeof(int),
+ 		.mode           = 0644,
+-		.proc_handler   = rds_tcp_skbuf_handler,
++		.proc_handler   = rds_tcp_rcvbuf_handler,
+ 		.extra1		= &rds_tcp_min_rcvbuf,
+ 	},
+ };
+@@ -682,10 +684,10 @@ static void rds_tcp_sysctl_reset(struct net *net)
+ 	spin_unlock_irq(&rds_tcp_conn_lock);
+ }
+ 
+-static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
++static int rds_tcp_skbuf_handler(struct rds_tcp_net *rtn,
++				 const struct ctl_table *ctl, int write,
+ 				 void *buffer, size_t *lenp, loff_t *fpos)
+ {
+-	struct net *net = current->nsproxy->net_ns;
+ 	int err;
+ 
+ 	err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
+@@ -694,11 +696,34 @@ static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write,
+ 			*(int *)(ctl->extra1));
+ 		return err;
+ 	}
+-	if (write)
++
++	if (write && rtn->rds_tcp_listen_sock && rtn->rds_tcp_listen_sock->sk) {
++		struct net *net = sock_net(rtn->rds_tcp_listen_sock->sk);
++
+ 		rds_tcp_sysctl_reset(net);
++	}
++
+ 	return 0;
+ }
+ 
++static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
++				  void *buffer, size_t *lenp, loff_t *fpos)
++{
++	struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
++					       sndbuf_size);
++
++	return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
++}
++
++static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
++				  void *buffer, size_t *lenp, loff_t *fpos)
++{
++	struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
++					       rcvbuf_size);
++
++	return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
++}
++
+ static void rds_tcp_exit(void)
+ {
+ 	rds_tcp_set_unloading();
+diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
+index 5502998aace741..5c2580a07530e4 100644
+--- a/net/sched/cls_flow.c
++++ b/net/sched/cls_flow.c
+@@ -356,7 +356,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
+ 	[TCA_FLOW_KEYS]		= { .type = NLA_U32 },
+ 	[TCA_FLOW_MODE]		= { .type = NLA_U32 },
+ 	[TCA_FLOW_BASECLASS]	= { .type = NLA_U32 },
+-	[TCA_FLOW_RSHIFT]	= { .type = NLA_U32 },
++	[TCA_FLOW_RSHIFT]	= NLA_POLICY_MAX(NLA_U32,
++						 31 /* BITS_PER_U32 - 1 */),
+ 	[TCA_FLOW_ADDEND]	= { .type = NLA_U32 },
+ 	[TCA_FLOW_MASK]		= { .type = NLA_U32 },
+ 	[TCA_FLOW_XOR]		= { .type = NLA_U32 },
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 8d8b2db4653c0c..2c2e2a67f3b244 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
+ 	return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
+ }
+ 
++static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
++					     struct cake_flow *flow,
++					     int flow_mode)
++{
++	if (likely(cake_dsrc(flow_mode) &&
++		   q->hosts[flow->srchost].srchost_bulk_flow_count))
++		q->hosts[flow->srchost].srchost_bulk_flow_count--;
++}
++
++static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
++					     struct cake_flow *flow,
++					     int flow_mode)
++{
++	if (likely(cake_dsrc(flow_mode) &&
++		   q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
++		q->hosts[flow->srchost].srchost_bulk_flow_count++;
++}
++
++static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
++					     struct cake_flow *flow,
++					     int flow_mode)
++{
++	if (likely(cake_ddst(flow_mode) &&
++		   q->hosts[flow->dsthost].dsthost_bulk_flow_count))
++		q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
++}
++
++static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
++					     struct cake_flow *flow,
++					     int flow_mode)
++{
++	if (likely(cake_ddst(flow_mode) &&
++		   q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
++		q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
++}
++
++static u16 cake_get_flow_quantum(struct cake_tin_data *q,
++				 struct cake_flow *flow,
++				 int flow_mode)
++{
++	u16 host_load = 1;
++
++	if (cake_dsrc(flow_mode))
++		host_load = max(host_load,
++				q->hosts[flow->srchost].srchost_bulk_flow_count);
++
++	if (cake_ddst(flow_mode))
++		host_load = max(host_load,
++				q->hosts[flow->dsthost].dsthost_bulk_flow_count);
++
++	/* The get_random_u16() is a way to apply dithering to avoid
++	 * accumulating roundoff errors
++	 */
++	return (q->flow_quantum * quantum_div[host_load] +
++		get_random_u16()) >> 16;
++}
++
+ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ 		     int flow_mode, u16 flow_override, u16 host_override)
+ {
+@@ -773,10 +830,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ 		allocate_dst = cake_ddst(flow_mode);
+ 
+ 		if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+-			if (allocate_src)
+-				q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+-			if (allocate_dst)
+-				q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
++			cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
++			cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+ 		}
+ found:
+ 		/* reserve queue for future packets in same flow */
+@@ -801,9 +856,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ 			q->hosts[outer_hash + k].srchost_tag = srchost_hash;
+ found_src:
+ 			srchost_idx = outer_hash + k;
+-			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+-				q->hosts[srchost_idx].srchost_bulk_flow_count++;
+ 			q->flows[reduced_hash].srchost = srchost_idx;
++
++			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
++				cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
+ 		}
+ 
+ 		if (allocate_dst) {
+@@ -824,9 +880,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ 			q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
+ found_dst:
+ 			dsthost_idx = outer_hash + k;
+-			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+-				q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
+ 			q->flows[reduced_hash].dsthost = dsthost_idx;
++
++			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
++				cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
+ 		}
+ 	}
+ 
+@@ -1839,10 +1896,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 
+ 	/* flowchain */
+ 	if (!flow->set || flow->set == CAKE_SET_DECAYING) {
+-		struct cake_host *srchost = &b->hosts[flow->srchost];
+-		struct cake_host *dsthost = &b->hosts[flow->dsthost];
+-		u16 host_load = 1;
+-
+ 		if (!flow->set) {
+ 			list_add_tail(&flow->flowchain, &b->new_flows);
+ 		} else {
+@@ -1852,18 +1905,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		flow->set = CAKE_SET_SPARSE;
+ 		b->sparse_flow_count++;
+ 
+-		if (cake_dsrc(q->flow_mode))
+-			host_load = max(host_load, srchost->srchost_bulk_flow_count);
+-
+-		if (cake_ddst(q->flow_mode))
+-			host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+-
+-		flow->deficit = (b->flow_quantum *
+-				 quantum_div[host_load]) >> 16;
++		flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
+ 	} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
+-		struct cake_host *srchost = &b->hosts[flow->srchost];
+-		struct cake_host *dsthost = &b->hosts[flow->dsthost];
+-
+ 		/* this flow was empty, accounted as a sparse flow, but actually
+ 		 * in the bulk rotation.
+ 		 */
+@@ -1871,12 +1914,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		b->sparse_flow_count--;
+ 		b->bulk_flow_count++;
+ 
+-		if (cake_dsrc(q->flow_mode))
+-			srchost->srchost_bulk_flow_count++;
+-
+-		if (cake_ddst(q->flow_mode))
+-			dsthost->dsthost_bulk_flow_count++;
+-
++		cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
++		cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ 	}
+ 
+ 	if (q->buffer_used > q->buffer_max_used)
+@@ -1933,13 +1972,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+ {
+ 	struct cake_sched_data *q = qdisc_priv(sch);
+ 	struct cake_tin_data *b = &q->tins[q->cur_tin];
+-	struct cake_host *srchost, *dsthost;
+ 	ktime_t now = ktime_get();
+ 	struct cake_flow *flow;
+ 	struct list_head *head;
+ 	bool first_flow = true;
+ 	struct sk_buff *skb;
+-	u16 host_load;
+ 	u64 delay;
+ 	u32 len;
+ 
+@@ -2039,11 +2076,6 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+ 	q->cur_flow = flow - b->flows;
+ 	first_flow = false;
+ 
+-	/* triple isolation (modified DRR++) */
+-	srchost = &b->hosts[flow->srchost];
+-	dsthost = &b->hosts[flow->dsthost];
+-	host_load = 1;
+-
+ 	/* flow isolation (DRR++) */
+ 	if (flow->deficit <= 0) {
+ 		/* Keep all flows with deficits out of the sparse and decaying
+@@ -2055,11 +2087,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+ 				b->sparse_flow_count--;
+ 				b->bulk_flow_count++;
+ 
+-				if (cake_dsrc(q->flow_mode))
+-					srchost->srchost_bulk_flow_count++;
+-
+-				if (cake_ddst(q->flow_mode))
+-					dsthost->dsthost_bulk_flow_count++;
++				cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
++				cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ 
+ 				flow->set = CAKE_SET_BULK;
+ 			} else {
+@@ -2071,19 +2100,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+ 			}
+ 		}
+ 
+-		if (cake_dsrc(q->flow_mode))
+-			host_load = max(host_load, srchost->srchost_bulk_flow_count);
+-
+-		if (cake_ddst(q->flow_mode))
+-			host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+-
+-		WARN_ON(host_load > CAKE_QUEUES);
+-
+-		/* The get_random_u16() is a way to apply dithering to avoid
+-		 * accumulating roundoff errors
+-		 */
+-		flow->deficit += (b->flow_quantum * quantum_div[host_load] +
+-				  get_random_u16()) >> 16;
++		flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
+ 		list_move_tail(&flow->flowchain, &b->old_flows);
+ 
+ 		goto retry;
+@@ -2107,11 +2124,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+ 				if (flow->set == CAKE_SET_BULK) {
+ 					b->bulk_flow_count--;
+ 
+-					if (cake_dsrc(q->flow_mode))
+-						srchost->srchost_bulk_flow_count--;
+-
+-					if (cake_ddst(q->flow_mode))
+-						dsthost->dsthost_bulk_flow_count--;
++					cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
++					cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ 
+ 					b->decaying_flow_count++;
+ 				} else if (flow->set == CAKE_SET_SPARSE ||
+@@ -2129,12 +2143,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+ 				else if (flow->set == CAKE_SET_BULK) {
+ 					b->bulk_flow_count--;
+ 
+-					if (cake_dsrc(q->flow_mode))
+-						srchost->srchost_bulk_flow_count--;
+-
+-					if (cake_ddst(q->flow_mode))
+-						dsthost->dsthost_bulk_flow_count--;
+-
++					cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
++					cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ 				} else
+ 					b->decaying_flow_count--;
+ 
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index e5a5af343c4c98..8e1e97be4df79f 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -387,7 +387,8 @@ static struct ctl_table sctp_net_table[] = {
+ static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
+ 				 void *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	struct net *net = current->nsproxy->net_ns;
++	struct net *net = container_of(ctl->data, struct net,
++				       sctp.sctp_hmac_alg);
+ 	struct ctl_table tbl;
+ 	bool changed = false;
+ 	char *none = "none";
+@@ -432,7 +433,7 @@ static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
+ static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
+ 				void *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	struct net *net = current->nsproxy->net_ns;
++	struct net *net = container_of(ctl->data, struct net, sctp.rto_min);
+ 	unsigned int min = *(unsigned int *) ctl->extra1;
+ 	unsigned int max = *(unsigned int *) ctl->extra2;
+ 	struct ctl_table tbl;
+@@ -460,7 +461,7 @@ static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
+ static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write,
+ 				void *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	struct net *net = current->nsproxy->net_ns;
++	struct net *net = container_of(ctl->data, struct net, sctp.rto_max);
+ 	unsigned int min = *(unsigned int *) ctl->extra1;
+ 	unsigned int max = *(unsigned int *) ctl->extra2;
+ 	struct ctl_table tbl;
+@@ -498,7 +499,7 @@ static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write,
+ static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
+ 			     void *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	struct net *net = current->nsproxy->net_ns;
++	struct net *net = container_of(ctl->data, struct net, sctp.auth_enable);
+ 	struct ctl_table tbl;
+ 	int new_value, ret;
+ 
+@@ -527,7 +528,7 @@ static int proc_sctp_do_auth(const struct ctl_table *ctl, int write,
+ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
+ 				 void *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	struct net *net = current->nsproxy->net_ns;
++	struct net *net = container_of(ctl->data, struct net, sctp.udp_port);
+ 	unsigned int min = *(unsigned int *)ctl->extra1;
+ 	unsigned int max = *(unsigned int *)ctl->extra2;
+ 	struct ctl_table tbl;
+@@ -568,7 +569,8 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write,
+ static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write,
+ 				       void *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	struct net *net = current->nsproxy->net_ns;
++	struct net *net = container_of(ctl->data, struct net,
++				       sctp.probe_interval);
+ 	struct ctl_table tbl;
+ 	int ret, new_value;
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index bbf26cc4f6ee26..7bcc9b4408a2c7 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -458,7 +458,7 @@ int tls_tx_records(struct sock *sk, int flags)
+ 
+ tx_err:
+ 	if (rc < 0 && rc != -EAGAIN)
+-		tls_err_abort(sk, -EBADMSG);
++		tls_err_abort(sk, rc);
+ 
+ 	return rc;
+ }
+diff --git a/sound/soc/codecs/rt722-sdca.c b/sound/soc/codecs/rt722-sdca.c
+index f9f7512ca36087..9a0747c4bdeac0 100644
+--- a/sound/soc/codecs/rt722-sdca.c
++++ b/sound/soc/codecs/rt722-sdca.c
+@@ -1467,13 +1467,18 @@ static void rt722_sdca_jack_preset(struct rt722_sdca_priv *rt722)
+ 		0x008d);
+ 	/* check HP calibration FSM status */
+ 	for (loop_check = 0; loop_check < chk_cnt; loop_check++) {
++		usleep_range(10000, 11000);
+ 		ret = rt722_sdca_index_read(rt722, RT722_VENDOR_CALI,
+ 			RT722_DAC_DC_CALI_CTL3, &calib_status);
+-		if (ret < 0 || loop_check == chk_cnt)
++		if (ret < 0)
+ 			dev_dbg(&rt722->slave->dev, "calibration failed!, ret=%d\n", ret);
+ 		if ((calib_status & 0x0040) == 0x0)
+ 			break;
+ 	}
++
++	if (loop_check == chk_cnt)
++		dev_dbg(&rt722->slave->dev, "%s, calibration time-out!\n", __func__);
++
+ 	/* Set ADC09 power entity floating control */
+ 	rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_ADC0A_08_PDE_FLOAT_CTL,
+ 		0x2a12);
+diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+index 9b72b2a7ae917e..6b633058394140 100644
+--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
++++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+@@ -120,8 +120,8 @@ int mtk_afe_pcm_new(struct snd_soc_component *component,
+ 	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+ 
+ 	size = afe->mtk_afe_hardware->buffer_bytes_max;
+-	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+-				       afe->dev, size, size);
++	snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, afe->dev, 0, size);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(mtk_afe_pcm_new);
+diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile
+index 944279160fed26..8dab90ad22bb27 100644
+--- a/tools/testing/selftests/alsa/Makefile
++++ b/tools/testing/selftests/alsa/Makefile
+@@ -27,5 +27,5 @@ include ../lib.mk
+ $(OUTPUT)/libatest.so: conf.c alsa-local.h
+ 	$(CC) $(CFLAGS) -shared -fPIC $< $(LDLIBS) -o $@
+ 
+-$(OUTPUT)/%: %.c $(TEST_GEN_PROGS_EXTENDED) alsa-local.h
++$(OUTPUT)/%: %.c $(OUTPUT)/libatest.so alsa-local.h
+ 	$(CC) $(CFLAGS) $< $(LDLIBS) -latest -o $@
+diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+index 03c1bdaed2c3c5..400a696a0d212e 100755
+--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
++++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+@@ -86,15 +86,15 @@ echo "" > test/cpuset.cpus
+ 
+ #
+ # If isolated CPUs have been reserved at boot time (as shown in
+-# cpuset.cpus.isolated), these isolated CPUs should be outside of CPUs 0-7
++# cpuset.cpus.isolated), these isolated CPUs should be outside of CPUs 0-8
+ # that will be used by this script for testing purpose. If not, some of
+-# the tests may fail incorrectly. These isolated CPUs will also be removed
+-# before being compared with the expected results.
++# the tests may fail incorrectly. These pre-isolated CPUs should stay in
++# an isolated state throughout the testing process for now.
+ #
+ BOOT_ISOLCPUS=$(cat $CGROUP2/cpuset.cpus.isolated)
+ if [[ -n "$BOOT_ISOLCPUS" ]]
+ then
+-	[[ $(echo $BOOT_ISOLCPUS | sed -e "s/[,-].*//") -le 7 ]] &&
++	[[ $(echo $BOOT_ISOLCPUS | sed -e "s/[,-].*//") -le 8 ]] &&
+ 		skip_test "Pre-isolated CPUs ($BOOT_ISOLCPUS) overlap CPUs to be tested"
+ 	echo "Pre-isolated CPUs: $BOOT_ISOLCPUS"
+ fi
+@@ -683,15 +683,19 @@ check_isolcpus()
+ 		EXPECT_VAL2=$EXPECT_VAL
+ 	fi
+ 
++	#
++	# Appending pre-isolated CPUs
++	# Even though CPU #8 isn't used for testing, it can't be pre-isolated
++	# to make appending those CPUs easier.
++	#
++	[[ -n "$BOOT_ISOLCPUS" ]] && {
++		EXPECT_VAL=${EXPECT_VAL:+${EXPECT_VAL},}${BOOT_ISOLCPUS}
++		EXPECT_VAL2=${EXPECT_VAL2:+${EXPECT_VAL2},}${BOOT_ISOLCPUS}
++	}
++
+ 	#
+ 	# Check cpuset.cpus.isolated cpumask
+ 	#
+-	if [[ -z "$BOOT_ISOLCPUS" ]]
+-	then
+-		ISOLCPUS=$(cat $ISCPUS)
+-	else
+-		ISOLCPUS=$(cat $ISCPUS | sed -e "s/,*$BOOT_ISOLCPUS//")
+-	fi
+ 	[[ "$EXPECT_VAL2" != "$ISOLCPUS" ]] && {
+ 		# Take a 50ms pause and try again
+ 		pause 0.05
+@@ -731,8 +735,6 @@ check_isolcpus()
+ 		fi
+ 	done
+ 	[[ "$ISOLCPUS" = *- ]] && ISOLCPUS=${ISOLCPUS}$LASTISOLCPU
+-	[[ -n "BOOT_ISOLCPUS" ]] &&
+-		ISOLCPUS=$(echo $ISOLCPUS | sed -e "s/,*$BOOT_ISOLCPUS//")
+ 
+ 	[[ "$EXPECT_VAL" = "$ISOLCPUS" ]]
+ }
+@@ -836,8 +838,11 @@ run_state_test()
+ 		# if available
+ 		[[ -n "$ICPUS" ]] && {
+ 			check_isolcpus $ICPUS
+-			[[ $? -ne 0 ]] && test_fail $I "isolated CPU" \
+-				"Expect $ICPUS, get $ISOLCPUS instead"
++			[[ $? -ne 0 ]] && {
++				[[ -n "$BOOT_ISOLCPUS" ]] && ICPUS=${ICPUS},${BOOT_ISOLCPUS}
++				test_fail $I "isolated CPU" \
++					"Expect $ICPUS, get $ISOLCPUS instead"
++			}
+ 		}
+ 		reset_cgroup_states
+ 		#


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-01-09 13:51 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-01-09 13:51 UTC (permalink / raw
  To: gentoo-commits

commit:     dce11bba7397f8cff2d315b9195b222824bbeed4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan  9 13:51:24 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan  9 13:51:24 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dce11bba

Linux patch 6.12.9

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1008_linux-6.12.9.patch | 6461 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6465 insertions(+)

diff --git a/0000_README b/0000_README
index 483a9fde..29d9187b 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-6.12.8.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.8
 
+Patch:  1008_linux-6.12.9.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.9
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1008_linux-6.12.9.patch b/1008_linux-6.12.9.patch
new file mode 100644
index 00000000..9db1b6b3
--- /dev/null
+++ b/1008_linux-6.12.9.patch
@@ -0,0 +1,6461 @@
+diff --git a/Documentation/admin-guide/laptops/thinkpad-acpi.rst b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
+index 7f674a6cfa8a7b..4ab0fef7d440d1 100644
+--- a/Documentation/admin-guide/laptops/thinkpad-acpi.rst
++++ b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
+@@ -445,8 +445,10 @@ event	code	Key		Notes
+ 0x1008	0x07	FN+F8		IBM: toggle screen expand
+ 				Lenovo: configure UltraNav,
+ 				or toggle screen expand.
+-				On newer platforms (2024+)
+-				replaced by 0x131f (see below)
++				On 2024 platforms replaced by
++				0x131f (see below) and on newer
++				platforms (2025 +) keycode is
++				replaced by 0x1401 (see below).
+ 
+ 0x1009	0x08	FN+F9		-
+ 
+@@ -506,9 +508,11 @@ event	code	Key		Notes
+ 
+ 0x1019	0x18	unknown
+ 
+-0x131f	...	FN+F8	        Platform Mode change.
++0x131f	...	FN+F8		Platform Mode change (2024 systems).
+ 				Implemented in driver.
+ 
++0x1401	...	FN+F8		Platform Mode change (2025 + systems).
++				Implemented in driver.
+ ...	...	...
+ 
+ 0x1020	0x1F	unknown
+diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+index df20a3c9c74479..ec89115c74e4d3 100644
+--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
++++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+@@ -90,7 +90,7 @@ properties:
+   adi,dsi-lanes:
+     description: Number of DSI data lanes connected to the DSI host.
+     $ref: /schemas/types.yaml#/definitions/uint32
+-    enum: [ 1, 2, 3, 4 ]
++    enum: [ 2, 3, 4 ]
+ 
+   "#sound-dai-cells":
+     const: 0
+diff --git a/Makefile b/Makefile
+index 8a10105c2539cf..80151f53d8ee0f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index 5b248814204147..69c6e71fa1e6ba 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -297,7 +297,6 @@ config ARC_PAGE_SIZE_16K
+ config ARC_PAGE_SIZE_4K
+ 	bool "4KB"
+ 	select HAVE_PAGE_SIZE_4KB
+-	depends on ARC_MMU_V3 || ARC_MMU_V4
+ 
+ endchoice
+ 
+@@ -474,7 +473,8 @@ config HIGHMEM
+ 
+ config ARC_HAS_PAE40
+ 	bool "Support for the 40-bit Physical Address Extension"
+-	depends on ISA_ARCV2
++	depends on MMU_V4
++	depends on !ARC_PAGE_SIZE_4K
+ 	select HIGHMEM
+ 	select PHYS_ADDR_T_64BIT
+ 	help
+diff --git a/arch/arc/Makefile b/arch/arc/Makefile
+index 2390dd042e3636..fb98478ed1ab09 100644
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -6,7 +6,7 @@
+ KBUILD_DEFCONFIG := haps_hs_smp_defconfig
+ 
+ ifeq ($(CROSS_COMPILE),)
+-CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
++CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux- arc-linux-gnu-)
+ endif
+ 
+ cflags-y	+= -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
+diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
+index 58045c89834045..76f43db0890fcd 100644
+--- a/arch/arc/include/asm/cmpxchg.h
++++ b/arch/arc/include/asm/cmpxchg.h
+@@ -48,7 +48,7 @@
+ 									\
+ 	switch(sizeof((_p_))) {						\
+ 	case 1:								\
+-		_prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, (uintptr_t)_n_);	\
++		_prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *__force)_p_, (uintptr_t)_o_, (uintptr_t)_n_);	\
+ 		break;							\
+ 	case 4:								\
+ 		_prev_ = __cmpxchg(_p_, _o_, _n_);			\
+diff --git a/arch/arc/net/bpf_jit_arcv2.c b/arch/arc/net/bpf_jit_arcv2.c
+index 4458e409ca0a84..6d989b6d88c69b 100644
+--- a/arch/arc/net/bpf_jit_arcv2.c
++++ b/arch/arc/net/bpf_jit_arcv2.c
+@@ -2916,7 +2916,7 @@ bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond)
+ 	addendum = (cond == ARC_CC_AL) ? 0 : INSN_len_normal;
+ 	disp = get_displacement(curr_off + addendum, targ_off);
+ 
+-	if (ARC_CC_AL)
++	if (cond == ARC_CC_AL)
+ 		return is_valid_far_disp(disp);
+ 	else
+ 		return is_valid_near_disp(disp);
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 28b4312f25631c..f558be868a50b6 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -7067,6 +7067,7 @@ __init int intel_pmu_init(void)
+ 
+ 	case INTEL_METEORLAKE:
+ 	case INTEL_METEORLAKE_L:
++	case INTEL_ARROWLAKE_U:
+ 		intel_pmu_init_hybrid(hybrid_big_small);
+ 
+ 		x86_pmu.pebs_latency_data = cmt_latency_data;
+diff --git a/block/blk.h b/block/blk.h
+index 88fab6a81701ed..1426f9c281973e 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -469,11 +469,6 @@ static inline bool bio_zone_write_plugging(struct bio *bio)
+ {
+ 	return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
+ }
+-static inline bool bio_is_zone_append(struct bio *bio)
+-{
+-	return bio_op(bio) == REQ_OP_ZONE_APPEND ||
+-		bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
+-}
+ void blk_zone_write_plug_bio_merged(struct bio *bio);
+ void blk_zone_write_plug_init_request(struct request *rq);
+ static inline void blk_zone_update_request_bio(struct request *rq,
+@@ -522,10 +517,6 @@ static inline bool bio_zone_write_plugging(struct bio *bio)
+ {
+ 	return false;
+ }
+-static inline bool bio_is_zone_append(struct bio *bio)
+-{
+-	return false;
+-}
+ static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
+ {
+ }
+diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
+index b2cb157703c57f..c409fc7e061869 100644
+--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
++++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
+@@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ 
+ #else /* !CONFIG_RESET_CONTROLLER */
+ 
+-static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
++static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
++							 struct clk_imx8mp_audiomix_priv *priv)
+ {
+ 	return 0;
+ }
+diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
+index 17e32ae08720cb..1015fab9525157 100644
+--- a/drivers/clk/thead/clk-th1520-ap.c
++++ b/drivers/clk/thead/clk-th1520-ap.c
+@@ -779,6 +779,13 @@ static struct ccu_div dpu1_clk = {
+ 	},
+ };
+ 
++static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
++			   &video_pll_clk.common.hw, 4, 1, 0);
++
++static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
++	{ .hw = &emmc_sdio_ref_clk.hw },
++};
++
+ static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
+ static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
+ static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
+@@ -798,7 +805,7 @@ static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", p
+ 		0x150, BIT(12), 0);
+ static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
+ static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
+-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", video_pll_clk_pd, 0x204, BIT(30), 0);
++static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
+ static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
+ static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
+ static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
+@@ -1059,6 +1066,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
+ 		return ret;
+ 	priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ 
++	ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
++	if (ret)
++		return ret;
++
+ 	ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 51904906545e59..45e28726e148e9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3721,8 +3721,12 @@ static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
+ 			continue;
+ 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ 			r = adev->ip_blocks[i].version->funcs->resume(adev);
+-			if (r)
++			if (r) {
++				DRM_ERROR("resume of IP block <%s> failed %d\n",
++					  adev->ip_blocks[i].version->funcs->name, r);
+ 				return r;
++			}
++			adev->ip_blocks[i].status.hw = true;
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index c100845409f794..ffdb966c4127ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
+ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
++MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
++MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
+ 
+ #define GFX9_MEC_HPD_SIZE 4096
+ #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+@@ -574,8 +576,12 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
+ {
+ 	int err;
+ 
+-	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+-				   "amdgpu/%s_mec.bin", chip_name);
++	if (amdgpu_sriov_vf(adev))
++		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
++				"amdgpu/%s_sjt_mec.bin", chip_name);
++	else
++		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
++				"amdgpu/%s_mec.bin", chip_name);
+ 	if (err)
+ 		goto out;
+ 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 8ee3d07ffbdfa2..f31e9fbf634a0f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -306,7 +306,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
+ 		spage = migrate_pfn_to_page(migrate->src[i]);
+ 		if (spage && !is_zone_device_page(spage)) {
+ 			src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
+-					      DMA_TO_DEVICE);
++					      DMA_BIDIRECTIONAL);
+ 			r = dma_mapping_error(dev, src[i]);
+ 			if (r) {
+ 				dev_err(dev, "%s: fail %d dma_map_page\n",
+@@ -630,7 +630,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
+ 			goto out_oom;
+ 		}
+ 
+-		dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
++		dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ 		r = dma_mapping_error(dev, dst[i]);
+ 		if (r) {
+ 			dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+index 61f4a38e7d2bf6..8f786592143b6c 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+@@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ 			   ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ 	regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ 			   ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+-	regmap_write(adv7511->regmap, 0x73, 0x1);
++
++	/* send current Audio infoframe values while updating */
++	regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
++			   BIT(5), BIT(5));
++
++	regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
++
++	/* use Audio infoframe updated info */
++	regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
++			   BIT(5), 0);
+ 
+ 	return 0;
+ }
+@@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
+ 	regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
+ 				BIT(7) | BIT(6), BIT(7));
+ 	/* use Audio infoframe updated info */
+-	regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
++	regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ 				BIT(5), 0);
++
+ 	/* enable SPDIF receiver */
+ 	if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ 		regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index eb5919b382635e..a13b3d8ab6ac60 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_client *i2c)
+ 		return ret;
+ 
+ 	ret = adv7511_init_regulators(adv7511);
+-	if (ret)
+-		return dev_err_probe(dev, ret, "failed to init regulators\n");
++	if (ret) {
++		dev_err_probe(dev, ret, "failed to init regulators\n");
++		goto err_of_node_put;
++	}
+ 
+ 	/*
+ 	 * The power down GPIO is optional. If present, toggle it from active to
+@@ -1363,6 +1365,8 @@ static int adv7511_probe(struct i2c_client *i2c)
+ 	i2c_unregister_device(adv7511->i2c_edid);
+ uninit_regulators:
+ 	adv7511_uninit_regulators(adv7511);
++err_of_node_put:
++	of_node_put(adv7511->host_node);
+ 
+ 	return ret;
+ }
+@@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_client *i2c)
+ {
+ 	struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ 
++	of_node_put(adv7511->host_node);
++
+ 	adv7511_uninit_regulators(adv7511);
+ 
+ 	drm_bridge_remove(&adv7511->bridge);
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+index 4481489aaf5ebf..122ad91e8a3293 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+ 
+ 	of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+ 
+-	if (num_lanes < 1 || num_lanes > 4)
++	if (num_lanes < 2 || num_lanes > 4)
+ 		return -EINVAL;
+ 
+ 	adv->num_dsi_lanes = num_lanes;
+@@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+ 	if (!adv->host_node)
+ 		return -ENODEV;
+ 
+-	of_node_put(adv->host_node);
+-
+ 	adv->use_timing_gen = !of_property_read_bool(np,
+ 						"adi,disable-timing-generator");
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+index 4a6c3040ca15ef..f11309efff3398 100644
+--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
++++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+@@ -2084,14 +2084,6 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
+ 		      0, C10_VDR_CTRL_MSGBUS_ACCESS,
+ 		      MB_WRITE_COMMITTED);
+ 
+-	/* Custom width needs to be programmed to 0 for both the phy lanes */
+-	intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+-		      C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
+-		      MB_WRITE_COMMITTED);
+-	intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+-		      0, C10_VDR_CTRL_UPDATE_CFG,
+-		      MB_WRITE_COMMITTED);
+-
+ 	/* Program the pll values only for the master lane */
+ 	for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
+ 		intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
+@@ -2101,6 +2093,10 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
+ 	intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
+ 	intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+ 
++	/* Custom width needs to be programmed to 0 for both the phy lanes */
++	intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
++		      C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
++		      MB_WRITE_COMMITTED);
+ 	intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
+ 		      0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
+ 		      MB_WRITE_COMMITTED);
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index c864d101faf941..9378d5901c4939 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
+ 			GEN9_MEDIA_PG_ENABLE |
+ 			GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ 
+-	if (GRAPHICS_VER(gt->i915) >= 12) {
++	if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
+ 		for (i = 0; i < I915_MAX_VCS; i++)
+ 			if (HAS_ENGINE(gt, _VCS(i)))
+ 				pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
+diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
+index 2a093540354e89..84e327b569252f 100644
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -722,7 +722,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
+ 	    new_mem->mem_type == XE_PL_SYSTEM) {
+ 		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
+ 						     DMA_RESV_USAGE_BOOKKEEP,
+-						     true,
++						     false,
+ 						     MAX_SCHEDULE_TIMEOUT);
+ 		if (timeout < 0) {
+ 			ret = timeout;
+@@ -846,8 +846,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
+ 
+ out:
+ 	if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
+-	    ttm_bo->ttm)
++	    ttm_bo->ttm) {
++		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
++						     DMA_RESV_USAGE_KERNEL,
++						     false,
++						     MAX_SCHEDULE_TIMEOUT);
++		if (timeout < 0)
++			ret = timeout;
++
+ 		xe_tt_unmap_sg(ttm_bo->ttm);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
+index c18e463092afa5..85aa3ab0da3b87 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.c
++++ b/drivers/gpu/drm/xe/xe_devcoredump.c
+@@ -104,7 +104,11 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+ 	drm_puts(&p, "\n**** GuC CT ****\n");
+ 	xe_guc_ct_snapshot_print(ss->ct, &p);
+ 
+-	drm_puts(&p, "\n**** Contexts ****\n");
++	/*
++	 * Don't add a new section header here because the mesa debug decoder
++	 * tool expects the context information to be in the 'GuC CT' section.
++	 */
++	/* drm_puts(&p, "\n**** Contexts ****\n"); */
+ 	xe_guc_exec_queue_snapshot_print(ss->ge, &p);
+ 
+ 	drm_puts(&p, "\n**** Job ****\n");
+@@ -358,6 +362,15 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+ 	char buff[ASCII85_BUFSZ], *line_buff;
+ 	size_t line_pos = 0;
+ 
++	/*
++	 * Splitting blobs across multiple lines is not compatible with the mesa
++	 * debug decoder tool. Note that even dropping the explicit '\n' below
++	 * doesn't help because the GuC log is so big some underlying implementation
++	 * still splits the lines at 512K characters. So just bail completely for
++	 * the moment.
++	 */
++	return;
++
+ #define DMESG_MAX_LINE_LEN	800
+ #define MIN_SPACE		(ASCII85_BUFSZ + 2)		/* 85 + "\n\0" */
+ 
+diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
+index fd0f3b3c9101d4..268cd3123be9d9 100644
+--- a/drivers/gpu/drm/xe/xe_exec_queue.c
++++ b/drivers/gpu/drm/xe/xe_exec_queue.c
+@@ -8,6 +8,7 @@
+ #include <linux/nospec.h>
+ 
+ #include <drm/drm_device.h>
++#include <drm/drm_drv.h>
+ #include <drm/drm_file.h>
+ #include <uapi/drm/xe_drm.h>
+ 
+@@ -762,9 +763,11 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
+  */
+ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
+ {
++	struct xe_device *xe = gt_to_xe(q->gt);
+ 	struct xe_file *xef;
+ 	struct xe_lrc *lrc;
+ 	u32 old_ts, new_ts;
++	int idx;
+ 
+ 	/*
+ 	 * Jobs that are run during driver load may use an exec_queue, but are
+@@ -774,6 +777,10 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
+ 	if (!q->vm || !q->vm->xef)
+ 		return;
+ 
++	/* Synchronize with unbind while holding the xe file open */
++	if (!drm_dev_enter(&xe->drm, &idx))
++		return;
++
+ 	xef = q->vm->xef;
+ 
+ 	/*
+@@ -787,6 +794,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
+ 	lrc = q->lrc[0];
+ 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
+ 	xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
++
++	drm_dev_exit(idx);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+index afdb477ecf833d..c9ed996b9cb0c3 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+@@ -2038,7 +2038,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
+ 	valid_any = valid_any || (valid_ggtt && is_primary);
+ 
+ 	if (IS_DGFX(xe)) {
+-		bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
++		bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
+ 
+ 		valid_any = valid_any || (valid_lmem && is_primary);
+ 		valid_all = valid_all && valid_lmem;
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 64ace0b968f07f..91db10515d7472 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
+ 	int bound_if_index = dev_addr->bound_dev_if;
+ 	int dev_type = dev_addr->dev_type;
+ 	struct net_device *ndev = NULL;
++	struct net_device *pdev = NULL;
+ 
+ 	if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
+ 		goto out;
+@@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
+ 
+ 		rcu_read_lock();
+ 		ndev = rcu_dereference(sgid_attr->ndev);
++		if (ndev->ifindex != bound_if_index) {
++			pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
++			if (pdev) {
++				if (is_vlan_dev(pdev)) {
++					pdev = vlan_dev_real_dev(pdev);
++					if (ndev->ifindex == pdev->ifindex)
++						bound_if_index = pdev->ifindex;
++				}
++				if (is_vlan_dev(ndev)) {
++					pdev = vlan_dev_real_dev(ndev);
++					if (bound_if_index == pdev->ifindex)
++						bound_if_index = ndev->ifindex;
++				}
++			}
++		}
+ 		if (!net_eq(dev_net(ndev), dev_addr->net) ||
+ 		    ndev->ifindex != bound_if_index) {
+ 			rdma_put_gid_attr(sgid_attr);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index 7dc8e2ec62cc8b..f121899863034a 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -2802,8 +2802,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
+ 			  enum rdma_nl_notify_event_type type)
+ {
+ 	struct sk_buff *skb;
++	int ret = -EMSGSIZE;
+ 	struct net *net;
+-	int ret = 0;
+ 	void *nlh;
+ 
+ 	net = read_pnet(&device->coredev.rdma_net);
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index a4cce360df2178..edef79daed3fa8 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
+ {
+ 	const void __user *res = iter->cur;
+ 
+-	if (iter->cur + len > iter->end)
++	if (len > iter->end - iter->cur)
+ 		return (void __force __user *)ERR_PTR(-ENOSPC);
+ 	iter->cur += len;
+ 	return res;
+@@ -2010,11 +2010,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
+ 	ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
+ 	if (ret)
+ 		return ret;
+-	wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
++	wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
++						       cmd.wr_count));
+ 	if (IS_ERR(wqes))
+ 		return PTR_ERR(wqes);
+-	sgls = uverbs_request_next_ptr(
+-		&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
++	sgls = uverbs_request_next_ptr(&iter,
++				       size_mul(cmd.sge_count,
++						sizeof(struct ib_uverbs_sge)));
+ 	if (IS_ERR(sgls))
+ 		return PTR_ERR(sgls);
+ 	ret = uverbs_request_finish(&iter);
+@@ -2200,11 +2202,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
+ 	if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
++	wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
+ 	if (IS_ERR(wqes))
+ 		return ERR_CAST(wqes);
+-	sgls = uverbs_request_next_ptr(
+-		iter, sge_count * sizeof(struct ib_uverbs_sge));
++	sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
++						      sizeof(struct ib_uverbs_sge)));
+ 	if (IS_ERR(sgls))
+ 		return ERR_CAST(sgls);
+ 	ret = uverbs_request_finish(iter);
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 160096792224b1..b20cffcc3e7d2d 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -156,7 +156,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
+ 
+ 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
+ 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
+-	ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
++	ib_attr->hw_ver = rdev->en_dev->pdev->revision;
+ 	ib_attr->max_qp = dev_attr->max_qp;
+ 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
+ 	ib_attr->device_cap_flags =
+@@ -2107,18 +2107,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ 		}
+ 	}
+ 
+-	if (qp_attr_mask & IB_QP_PATH_MTU) {
+-		qp->qplib_qp.modify_flags |=
+-				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+-		qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
+-		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
+-	} else if (qp_attr->qp_state == IB_QPS_RTR) {
+-		qp->qplib_qp.modify_flags |=
+-			CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+-		qp->qplib_qp.path_mtu =
+-			__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
+-		qp->qplib_qp.mtu =
+-			ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
++	if (qp_attr->qp_state == IB_QPS_RTR) {
++		enum ib_mtu qpmtu;
++
++		qpmtu = iboe_get_mtu(rdev->netdev->mtu);
++		if (qp_attr_mask & IB_QP_PATH_MTU) {
++			if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
++			    ib_mtu_enum_to_int(qpmtu))
++				return -EINVAL;
++			qpmtu = qp_attr->path_mtu;
++		}
++
++		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
++		qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
++		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
+ 	}
+ 
+ 	if (qp_attr_mask & IB_QP_TIMEOUT) {
+@@ -2763,7 +2765,8 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
+ 		wr = wr->next;
+ 	}
+ 	bnxt_qplib_post_send_db(&qp->qplib_qp);
+-	bnxt_ud_qp_hw_stall_workaround(qp);
++	if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
++		bnxt_ud_qp_hw_stall_workaround(qp);
+ 	spin_unlock_irqrestore(&qp->sq_lock, flags);
+ 	return rc;
+ }
+@@ -2875,7 +2878,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ 		wr = wr->next;
+ 	}
+ 	bnxt_qplib_post_send_db(&qp->qplib_qp);
+-	bnxt_ud_qp_hw_stall_workaround(qp);
++	if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
++		bnxt_ud_qp_hw_stall_workaround(qp);
+ 	spin_unlock_irqrestore(&qp->sq_lock, flags);
+ 
+ 	return rc;
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 2ac8ddbed576f5..8abd1b723f8ff5 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -1435,11 +1435,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
+ 
+ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
+ {
+-	int mask = IB_QP_STATE;
+-	struct ib_qp_attr qp_attr;
+ 	struct bnxt_re_qp *qp;
+ 
+-	qp_attr.qp_state = IB_QPS_ERR;
+ 	mutex_lock(&rdev->qp_lock);
+ 	list_for_each_entry(qp, &rdev->qp_list, list) {
+ 		/* Modify the state of all QPs except QP1/Shadow QP */
+@@ -1447,12 +1444,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
+ 			if (qp->qplib_qp.state !=
+ 			    CMDQ_MODIFY_QP_NEW_STATE_RESET &&
+ 			    qp->qplib_qp.state !=
+-			    CMDQ_MODIFY_QP_NEW_STATE_ERR) {
++			    CMDQ_MODIFY_QP_NEW_STATE_ERR)
+ 				bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
+ 						       1, IB_EVENT_QP_FATAL);
+-				bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
+-						  NULL);
+-			}
+ 		}
+ 	}
+ 	mutex_unlock(&rdev->qp_lock);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 7ad83566ab0f41..828e2f9808012b 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -658,13 +658,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ 	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
+ 	if (rc)
+ 		return rc;
+-
+-	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+-			   GFP_KERNEL);
+-	if (!srq->swq) {
+-		rc = -ENOMEM;
+-		goto fail;
+-	}
+ 	srq->dbinfo.flags = 0;
+ 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ 				 CMDQ_BASE_OPCODE_CREATE_SRQ,
+@@ -693,9 +686,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ 	spin_lock_init(&srq->lock);
+ 	srq->start_idx = 0;
+ 	srq->last_idx = srq->hwq.max_elements - 1;
+-	for (idx = 0; idx < srq->hwq.max_elements; idx++)
+-		srq->swq[idx].next_idx = idx + 1;
+-	srq->swq[srq->last_idx].next_idx = -1;
++	if (!srq->hwq.is_user) {
++		srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
++				   GFP_KERNEL);
++		if (!srq->swq) {
++			rc = -ENOMEM;
++			goto fail;
++		}
++		for (idx = 0; idx < srq->hwq.max_elements; idx++)
++			srq->swq[idx].next_idx = idx + 1;
++		srq->swq[srq->last_idx].next_idx = -1;
++	}
+ 
+ 	srq->id = le32_to_cpu(resp.xid);
+ 	srq->dbinfo.hwq = &srq->hwq;
+@@ -999,9 +1000,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ 	u32 tbl_indx;
+ 	u16 nsge;
+ 
+-	if (res->dattr)
+-		qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
+-
++	qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
+ 	sq->dbinfo.flags = 0;
+ 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ 				 CMDQ_BASE_OPCODE_CREATE_QP,
+@@ -1033,7 +1032,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ 				    : 0;
+ 	/* Update msn tbl size */
+ 	if (qp->is_host_msn_tbl && psn_sz) {
+-		hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
++		if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
++			hwq_attr.aux_depth =
++				roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
++		else
++			hwq_attr.aux_depth =
++				roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
+ 		qp->msn_tbl_sz = hwq_attr.aux_depth;
+ 		qp->msn = 0;
+ 	}
+@@ -1043,13 +1047,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ 	if (rc)
+ 		return rc;
+ 
+-	rc = bnxt_qplib_alloc_init_swq(sq);
+-	if (rc)
+-		goto fail_sq;
+-
+-	if (psn_sz)
+-		bnxt_qplib_init_psn_ptr(qp, psn_sz);
++	if (!sq->hwq.is_user) {
++		rc = bnxt_qplib_alloc_init_swq(sq);
++		if (rc)
++			goto fail_sq;
+ 
++		if (psn_sz)
++			bnxt_qplib_init_psn_ptr(qp, psn_sz);
++	}
+ 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ 	pbl = &sq->hwq.pbl[PBL_LVL_0];
+ 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+@@ -1075,9 +1080,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
+ 		if (rc)
+ 			goto sq_swq;
+-		rc = bnxt_qplib_alloc_init_swq(rq);
+-		if (rc)
+-			goto fail_rq;
++		if (!rq->hwq.is_user) {
++			rc = bnxt_qplib_alloc_init_swq(rq);
++			if (rc)
++				goto fail_rq;
++		}
+ 
+ 		req.rq_size = cpu_to_le32(rq->max_wqe);
+ 		pbl = &rq->hwq.pbl[PBL_LVL_0];
+@@ -1173,9 +1180,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ 		rq->dbinfo.db = qp->dpi->dbr;
+ 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
+ 	}
++	spin_lock_bh(&rcfw->tbl_lock);
+ 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
++	spin_unlock_bh(&rcfw->tbl_lock);
+ 
+ 	return 0;
+ fail:
+@@ -2596,10 +2605,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
+ 			bnxt_qplib_add_flush_qp(qp);
+ 		} else {
+ 			/* Before we complete, do WA 9060 */
+-			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
+-				      cqe_sq_cons)) {
+-				*lib_qp = qp;
+-				goto out;
++			if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
++				if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
++					      cqe_sq_cons)) {
++					*lib_qp = qp;
++					goto out;
++				}
+ 			}
+ 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+ 				cqe->status = CQ_REQ_STATUS_OK;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index f55958e5fddb4a..d8c71c024613bf 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -114,7 +114,6 @@ struct bnxt_qplib_sge {
+ 	u32				size;
+ };
+ 
+-#define BNXT_QPLIB_QP_MAX_SGL	6
+ struct bnxt_qplib_swq {
+ 	u64				wr_id;
+ 	int				next_idx;
+@@ -154,7 +153,7 @@ struct bnxt_qplib_swqe {
+ #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE			BIT(2)
+ #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT		BIT(3)
+ #define BNXT_QPLIB_SWQE_FLAGS_INLINE			BIT(4)
+-	struct bnxt_qplib_sge		sg_list[BNXT_QPLIB_QP_MAX_SGL];
++	struct bnxt_qplib_sge		sg_list[BNXT_VAR_MAX_SGE];
+ 	int				num_sge;
+ 	/* Max inline data is 96 bytes */
+ 	u32				inline_len;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index e82bd37158ad6c..7a099580ca8bff 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -424,7 +424,8 @@ static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
+ 
+ 	/* Prevent posting if f/w is not in a state to process */
+ 	if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
+-		return bnxt_qplib_map_rc(opcode);
++		return -ENXIO;
++
+ 	if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ 		return -ETIMEDOUT;
+ 
+@@ -493,7 +494,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ 
+ 	rc = __send_message_basic_sanity(rcfw, msg, opcode);
+ 	if (rc)
+-		return rc;
++		return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
+ 
+ 	rc = __send_message(rcfw, msg, opcode);
+ 	if (rc)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index e29fbbdab9fd68..3cca7b1395f6a7 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -129,12 +129,18 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ 	attr->max_qp_init_rd_atom =
+ 		sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
+ 		BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
+-	attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
+-	/*
+-	 * 128 WQEs needs to be reserved for the HW (8916). Prevent
+-	 * reporting the max number
+-	 */
+-	attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
++	attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
++	if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
++		/*
++		 * 128 WQEs needs to be reserved for the HW (8916). Prevent
++		 * reporting the max number on legacy devices
++		 */
++		attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
++	}
++
++	/* Adjust for max_qp_wqes for variable wqe */
++	if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
++		attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
+ 
+ 	attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
+ 			    min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index f84521be3bea4a..605562122ecce2 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -931,6 +931,7 @@ struct hns_roce_hem_item {
+ 	size_t count; /* max ba numbers */
+ 	int start; /* start buf offset in this hem */
+ 	int end; /* end buf offset in this hem */
++	bool exist_bt;
+ };
+ 
+ /* All HEM items are linked in a tree structure */
+@@ -959,6 +960,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ 		}
+ 	}
+ 
++	hem->exist_bt = exist_bt;
+ 	hem->count = count;
+ 	hem->start = start;
+ 	hem->end = end;
+@@ -969,22 +971,22 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ }
+ 
+ static void hem_list_free_item(struct hns_roce_dev *hr_dev,
+-			       struct hns_roce_hem_item *hem, bool exist_bt)
++			       struct hns_roce_hem_item *hem)
+ {
+-	if (exist_bt)
++	if (hem->exist_bt)
+ 		dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
+ 				  hem->addr, hem->dma_addr);
+ 	kfree(hem);
+ }
+ 
+ static void hem_list_free_all(struct hns_roce_dev *hr_dev,
+-			      struct list_head *head, bool exist_bt)
++			      struct list_head *head)
+ {
+ 	struct hns_roce_hem_item *hem, *temp_hem;
+ 
+ 	list_for_each_entry_safe(hem, temp_hem, head, list) {
+ 		list_del(&hem->list);
+-		hem_list_free_item(hr_dev, hem, exist_bt);
++		hem_list_free_item(hr_dev, hem);
+ 	}
+ }
+ 
+@@ -1084,6 +1086,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
+ 
+ 	for (i = 0; i < region_cnt; i++) {
+ 		r = (struct hns_roce_buf_region *)&regions[i];
++		/* when r->hopnum = 0, the region should not occupy root_ba. */
++		if (!r->hopnum)
++			continue;
++
+ 		if (r->hopnum > 1) {
+ 			step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ 			if (step > 0)
+@@ -1177,7 +1183,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+ 
+ err_exit:
+ 	for (level = 1; level < hopnum; level++)
+-		hem_list_free_all(hr_dev, &temp_list[level], true);
++		hem_list_free_all(hr_dev, &temp_list[level]);
+ 
+ 	return ret;
+ }
+@@ -1218,16 +1224,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ {
+ 	struct hns_roce_hem_item *hem;
+ 
++	/* This is on the has_mtt branch, if r->hopnum
++	 * is 0, there is no root_ba to reuse for the
++	 * region's fake hem, so a dma_alloc request is
++	 * necessary here.
++	 */
+ 	hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
+-				  r->count, false);
++				  r->count, !r->hopnum);
+ 	if (!hem)
+ 		return -ENOMEM;
+ 
+-	hem_list_assign_bt(hem, cpu_base, phy_base);
++	/* The root_ba can be reused only when r->hopnum > 0. */
++	if (r->hopnum)
++		hem_list_assign_bt(hem, cpu_base, phy_base);
+ 	list_add(&hem->list, branch_head);
+ 	list_add(&hem->sibling, leaf_head);
+ 
+-	return r->count;
++	/* If r->hopnum == 0, 0 is returned,
++	 * so that the root_bt entry is not occupied.
++	 */
++	return r->hopnum ? r->count : 0;
+ }
+ 
+ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+@@ -1271,7 +1287,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
+ 		return -ENOMEM;
+ 
+ 	total = 0;
+-	for (i = 0; i < region_cnt && total < max_ba_num; i++) {
++	for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
+ 		r = &regions[i];
+ 		if (!r->count)
+ 			continue;
+@@ -1337,9 +1353,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ 			     region_cnt);
+ 	if (ret) {
+ 		for (i = 0; i < region_cnt; i++)
+-			hem_list_free_all(hr_dev, &head.branch[i], false);
++			hem_list_free_all(hr_dev, &head.branch[i]);
+ 
+-		hem_list_free_all(hr_dev, &head.root, true);
++		hem_list_free_all(hr_dev, &head.root);
+ 	}
+ 
+ 	return ret;
+@@ -1402,10 +1418,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
+ 
+ 	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
+ 		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
+-			hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
+-					  j != 0);
++			hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
+ 
+-	hem_list_free_all(hr_dev, &hem_list->root_bt, true);
++	hem_list_free_all(hr_dev, &hem_list->root_bt);
+ 	INIT_LIST_HEAD(&hem_list->btm_bt);
+ 	hem_list->root_ba = 0;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 697b17cca02e71..0144e7210d05a1 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -468,7 +468,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
+ 	valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+ 
+ 	ret = set_ud_opcode(ud_sq_wqe, wr);
+-	if (WARN_ON(ret))
++	if (WARN_ON_ONCE(ret))
+ 		return ret;
+ 
+ 	ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
+@@ -572,7 +572,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ 	rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
+ 
+ 	ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
+-	if (WARN_ON(ret))
++	if (WARN_ON_ONCE(ret))
+ 		return ret;
+ 
+ 	hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
+@@ -670,6 +670,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ #define HNS_ROCE_SL_SHIFT 2
+ 	struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+ 
++	if (unlikely(qp->state == IB_QPS_ERR)) {
++		flush_cqe(hr_dev, qp);
++		return;
++	}
+ 	/* All kinds of DirectWQE have the same header field layout */
+ 	hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
+ 	hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
+@@ -5619,6 +5623,9 @@ static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
+ {
+ 	struct hns_roce_dip *hr_dip = hr_qp->dip;
+ 
++	if (!hr_dip)
++		return;
++
+ 	xa_lock(&hr_dev->qp_table.dip_xa);
+ 
+ 	hr_dip->qp_cnt--;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index bf30b3a65a9ba9..55b9283bfc6f03 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -814,11 +814,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ 	for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
+ 	     mapped_cnt < page_cnt; i++) {
+ 		r = &mtr->hem_cfg.region[i];
+-		/* if hopnum is 0, no need to map pages in this region */
+-		if (!r->hopnum) {
+-			mapped_cnt += r->count;
+-			continue;
+-		}
+ 
+ 		if (r->offset + r->count > page_cnt) {
+ 			ret = -EINVAL;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index ac20ab3bbabf47..8c47cb4edd0a0a 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2831,7 +2831,7 @@ static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
+ 	int err;
+ 
+ 	*num_plane = 0;
+-	if (!MLX5_CAP_GEN(mdev, ib_virt))
++	if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
+ 		return 0;
+ 
+ 	err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
+@@ -3631,7 +3631,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+ 		list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ 				    list) {
+ 			if (dev->sys_image_guid == mpi->sys_image_guid &&
+-			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
++			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
++			    mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
+ 				bound = mlx5_ib_bind_slave_port(dev, mpi);
+ 			}
+ 
+@@ -4776,7 +4777,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
+ 
+ 	mutex_lock(&mlx5_ib_multiport_mutex);
+ 	list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+-		if (dev->sys_image_guid == mpi->sys_image_guid)
++		if (dev->sys_image_guid == mpi->sys_image_guid &&
++		    mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
+ 			bound = mlx5_ib_bind_slave_port(dev, mpi);
+ 
+ 		if (bound) {
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 255677bc12b2ab..1ba4a0c8726aed 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -40,6 +40,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
+ /* initialize rxe device parameters */
+ static void rxe_init_device_param(struct rxe_dev *rxe)
+ {
++	struct net_device *ndev;
++
+ 	rxe->max_inline_data			= RXE_MAX_INLINE_DATA;
+ 
+ 	rxe->attr.vendor_id			= RXE_VENDOR_ID;
+@@ -71,8 +73,15 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
+ 	rxe->attr.max_fast_reg_page_list_len	= RXE_MAX_FMR_PAGE_LIST_LEN;
+ 	rxe->attr.max_pkeys			= RXE_MAX_PKEYS;
+ 	rxe->attr.local_ca_ack_delay		= RXE_LOCAL_CA_ACK_DELAY;
++
++	ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++	if (!ndev)
++		return;
++
+ 	addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
+-			rxe->ndev->dev_addr);
++			ndev->dev_addr);
++
++	dev_put(ndev);
+ 
+ 	rxe->max_ucontext			= RXE_MAX_UCONTEXT;
+ }
+@@ -109,10 +118,15 @@ static void rxe_init_port_param(struct rxe_port *port)
+ static void rxe_init_ports(struct rxe_dev *rxe)
+ {
+ 	struct rxe_port *port = &rxe->port;
++	struct net_device *ndev;
+ 
+ 	rxe_init_port_param(port);
++	ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++	if (!ndev)
++		return;
+ 	addrconf_addr_eui48((unsigned char *)&port->port_guid,
+-			    rxe->ndev->dev_addr);
++			    ndev->dev_addr);
++	dev_put(ndev);
+ 	spin_lock_init(&port->port_lock);
+ }
+ 
+@@ -167,12 +181,13 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+ /* called by ifc layer to create new rxe device.
+  * The caller should allocate memory for rxe by calling ib_alloc_device.
+  */
+-int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
++int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
++			struct net_device *ndev)
+ {
+ 	rxe_init(rxe);
+ 	rxe_set_mtu(rxe, mtu);
+ 
+-	return rxe_register_device(rxe, ibdev_name);
++	return rxe_register_device(rxe, ibdev_name, ndev);
+ }
+ 
+ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
+diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
+index d8fb2c7af30a7e..fe7f9706673255 100644
+--- a/drivers/infiniband/sw/rxe/rxe.h
++++ b/drivers/infiniband/sw/rxe/rxe.h
+@@ -139,7 +139,8 @@ enum resp_states {
+ 
+ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+ 
+-int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
++int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
++			struct net_device *ndev);
+ 
+ void rxe_rcv(struct sk_buff *skb);
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
+index 86cc2e18a7fdaf..07ff47bae31df9 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
++++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
+@@ -31,10 +31,19 @@
+ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
+ {
+ 	unsigned char ll_addr[ETH_ALEN];
++	struct net_device *ndev;
++	int ret;
++
++	ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++	if (!ndev)
++		return -ENODEV;
+ 
+ 	ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+ 
+-	return dev_mc_add(rxe->ndev, ll_addr);
++	ret = dev_mc_add(ndev, ll_addr);
++	dev_put(ndev);
++
++	return ret;
+ }
+ 
+ /**
+@@ -47,10 +56,19 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
+ static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
+ {
+ 	unsigned char ll_addr[ETH_ALEN];
++	struct net_device *ndev;
++	int ret;
++
++	ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++	if (!ndev)
++		return -ENODEV;
+ 
+ 	ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+ 
+-	return dev_mc_del(rxe->ndev, ll_addr);
++	ret = dev_mc_del(ndev, ll_addr);
++	dev_put(ndev);
++
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 75d1407db52d4d..8cc64ceeb3569b 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -524,7 +524,16 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+  */
+ const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
+ {
+-	return rxe->ndev->name;
++	struct net_device *ndev;
++	char *ndev_name;
++
++	ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++	if (!ndev)
++		return NULL;
++	ndev_name = ndev->name;
++	dev_put(ndev);
++
++	return ndev_name;
+ }
+ 
+ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
+@@ -536,10 +545,9 @@ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
+ 	if (!rxe)
+ 		return -ENOMEM;
+ 
+-	rxe->ndev = ndev;
+ 	ib_mark_name_assigned_by_user(&rxe->ib_dev);
+ 
+-	err = rxe_add(rxe, ndev->mtu, ibdev_name);
++	err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
+ 	if (err) {
+ 		ib_dealloc_device(&rxe->ib_dev);
+ 		return err;
+@@ -587,10 +595,18 @@ void rxe_port_down(struct rxe_dev *rxe)
+ 
+ void rxe_set_port_state(struct rxe_dev *rxe)
+ {
+-	if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
++	struct net_device *ndev;
++
++	ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++	if (!ndev)
++		return;
++
++	if (netif_running(ndev) && netif_carrier_ok(ndev))
+ 		rxe_port_up(rxe);
+ 	else
+ 		rxe_port_down(rxe);
++
++	dev_put(ndev);
+ }
+ 
+ static int rxe_notify(struct notifier_block *not_blk,
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 5c18f7e342f294..8a5fc20fd18692 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -41,6 +41,7 @@ static int rxe_query_port(struct ib_device *ibdev,
+ 			  u32 port_num, struct ib_port_attr *attr)
+ {
+ 	struct rxe_dev *rxe = to_rdev(ibdev);
++	struct net_device *ndev;
+ 	int err, ret;
+ 
+ 	if (port_num != 1) {
+@@ -49,6 +50,12 @@ static int rxe_query_port(struct ib_device *ibdev,
+ 		goto err_out;
+ 	}
+ 
++	ndev = rxe_ib_device_get_netdev(ibdev);
++	if (!ndev) {
++		err = -ENODEV;
++		goto err_out;
++	}
++
+ 	memcpy(attr, &rxe->port.attr, sizeof(*attr));
+ 
+ 	mutex_lock(&rxe->usdev_lock);
+@@ -57,13 +64,14 @@ static int rxe_query_port(struct ib_device *ibdev,
+ 
+ 	if (attr->state == IB_PORT_ACTIVE)
+ 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+-	else if (dev_get_flags(rxe->ndev) & IFF_UP)
++	else if (dev_get_flags(ndev) & IFF_UP)
+ 		attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
+ 	else
+ 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ 
+ 	mutex_unlock(&rxe->usdev_lock);
+ 
++	dev_put(ndev);
+ 	return ret;
+ 
+ err_out:
+@@ -1425,9 +1433,16 @@ static const struct attribute_group rxe_attr_group = {
+ static int rxe_enable_driver(struct ib_device *ib_dev)
+ {
+ 	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
++	struct net_device *ndev;
++
++	ndev = rxe_ib_device_get_netdev(ib_dev);
++	if (!ndev)
++		return -ENODEV;
+ 
+ 	rxe_set_port_state(rxe);
+-	dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
++	dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
++
++	dev_put(ndev);
+ 	return 0;
+ }
+ 
+@@ -1495,7 +1510,8 @@ static const struct ib_device_ops rxe_dev_ops = {
+ 	INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
+ };
+ 
+-int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
++int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
++						struct net_device *ndev)
+ {
+ 	int err;
+ 	struct ib_device *dev = &rxe->ib_dev;
+@@ -1507,13 +1523,13 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
+ 	dev->num_comp_vectors = num_possible_cpus();
+ 	dev->local_dma_lkey = 0;
+ 	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
+-			    rxe->ndev->dev_addr);
++			    ndev->dev_addr);
+ 
+ 	dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
+ 				BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
+ 
+ 	ib_set_device_ops(dev, &rxe_dev_ops);
+-	err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
++	err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
+index 3c1354f82283e6..6573ceec0ef583 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -370,6 +370,7 @@ struct rxe_port {
+ 	u32			qp_gsi_index;
+ };
+ 
++#define	RXE_PORT	1
+ struct rxe_dev {
+ 	struct ib_device	ib_dev;
+ 	struct ib_device_attr	attr;
+@@ -377,8 +378,6 @@ struct rxe_dev {
+ 	int			max_inline_data;
+ 	struct mutex	usdev_lock;
+ 
+-	struct net_device	*ndev;
+-
+ 	struct rxe_pool		uc_pool;
+ 	struct rxe_pool		pd_pool;
+ 	struct rxe_pool		ah_pool;
+@@ -406,6 +405,11 @@ struct rxe_dev {
+ 	struct crypto_shash	*tfm;
+ };
+ 
++static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
++{
++	return ib_device_get_netdev(dev, RXE_PORT);
++}
++
+ static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
+ {
+ 	atomic64_inc(&rxe->stats_counters[index]);
+@@ -471,6 +475,7 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
+ 	return to_rpd(mw->ibmw.pd);
+ }
+ 
+-int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
++int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
++						struct net_device *ndev);
+ 
+ #endif /* RXE_VERBS_H */
+diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
+index 86d4d6a2170e17..ea5eee50dc39d0 100644
+--- a/drivers/infiniband/sw/siw/siw.h
++++ b/drivers/infiniband/sw/siw/siw.h
+@@ -46,6 +46,9 @@
+  */
+ #define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
+ 
++/* There is always only a port 1 per siw device */
++#define SIW_PORT 1
++
+ struct siw_dev_cap {
+ 	int max_qp;
+ 	int max_qp_wr;
+@@ -69,16 +72,12 @@ struct siw_pd {
+ 
+ struct siw_device {
+ 	struct ib_device base_dev;
+-	struct net_device *netdev;
+ 	struct siw_dev_cap attrs;
+ 
+ 	u32 vendor_part_id;
+ 	int numa_node;
+ 	char raw_gid[ETH_ALEN];
+ 
+-	/* physical port state (only one port per device) */
+-	enum ib_port_state state;
+-
+ 	spinlock_t lock;
+ 
+ 	struct xarray qp_xa;
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index 86323918a570eb..708b13993fdfd3 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -1759,6 +1759,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ {
+ 	struct socket *s;
+ 	struct siw_cep *cep = NULL;
++	struct net_device *ndev = NULL;
+ 	struct siw_device *sdev = to_siw_dev(id->device);
+ 	int addr_family = id->local_addr.ss_family;
+ 	int rv = 0;
+@@ -1779,9 +1780,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ 		struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+ 
+ 		/* For wildcard addr, limit binding to current device only */
+-		if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
+-			s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
+-
++		if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) {
++			ndev = ib_device_get_netdev(id->device, SIW_PORT);
++			if (ndev) {
++				s->sk->sk_bound_dev_if = ndev->ifindex;
++			} else {
++				rv = -ENODEV;
++				goto error;
++			}
++		}
+ 		rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ 				  sizeof(struct sockaddr_in));
+ 	} else {
+@@ -1797,9 +1804,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ 		}
+ 
+ 		/* For wildcard addr, limit binding to current device only */
+-		if (ipv6_addr_any(&laddr->sin6_addr))
+-			s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
+-
++		if (ipv6_addr_any(&laddr->sin6_addr)) {
++			ndev = ib_device_get_netdev(id->device, SIW_PORT);
++			if (ndev) {
++				s->sk->sk_bound_dev_if = ndev->ifindex;
++			} else {
++				rv = -ENODEV;
++				goto error;
++			}
++		}
+ 		rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ 				  sizeof(struct sockaddr_in6));
+ 	}
+@@ -1860,6 +1873,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ 	}
+ 	list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ 	cep->state = SIW_EPSTATE_LISTENING;
++	dev_put(ndev);
+ 
+ 	siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
+ 
+@@ -1879,6 +1893,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ 		siw_cep_set_free_and_put(cep);
+ 	}
+ 	sock_release(s);
++	dev_put(ndev);
+ 
+ 	return rv;
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index 17abef48abcd22..14d3103aee6f8a 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -287,7 +287,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
+ 		return NULL;
+ 
+ 	base_dev = &sdev->base_dev;
+-	sdev->netdev = netdev;
+ 
+ 	if (netdev->addr_len) {
+ 		memcpy(sdev->raw_gid, netdev->dev_addr,
+@@ -381,12 +380,10 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
+ 
+ 	switch (event) {
+ 	case NETDEV_UP:
+-		sdev->state = IB_PORT_ACTIVE;
+ 		siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
+ 		break;
+ 
+ 	case NETDEV_DOWN:
+-		sdev->state = IB_PORT_DOWN;
+ 		siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
+ 		break;
+ 
+@@ -407,12 +404,8 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
+ 		siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE);
+ 		break;
+ 	/*
+-	 * Todo: Below netdev events are currently not handled.
++	 * All other events are not handled
+ 	 */
+-	case NETDEV_CHANGEMTU:
+-	case NETDEV_CHANGE:
+-		break;
+-
+ 	default:
+ 		break;
+ 	}
+@@ -442,12 +435,6 @@ static int siw_newlink(const char *basedev_name, struct net_device *netdev)
+ 	sdev = siw_device_create(netdev);
+ 	if (sdev) {
+ 		dev_dbg(&netdev->dev, "siw: new device\n");
+-
+-		if (netif_running(netdev) && netif_carrier_ok(netdev))
+-			sdev->state = IB_PORT_ACTIVE;
+-		else
+-			sdev->state = IB_PORT_DOWN;
+-
+ 		ib_mark_name_assigned_by_user(&sdev->base_dev);
+ 		rv = siw_device_register(sdev, basedev_name);
+ 		if (rv)
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 986666c19378a1..7ca0297d68a4a7 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -171,21 +171,29 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
+ int siw_query_port(struct ib_device *base_dev, u32 port,
+ 		   struct ib_port_attr *attr)
+ {
+-	struct siw_device *sdev = to_siw_dev(base_dev);
++	struct net_device *ndev;
+ 	int rv;
+ 
+ 	memset(attr, 0, sizeof(*attr));
+ 
+ 	rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
+ 			 &attr->active_width);
++	if (rv)
++		return rv;
++
++	ndev = ib_device_get_netdev(base_dev, SIW_PORT);
++	if (!ndev)
++		return -ENODEV;
++
+ 	attr->gid_tbl_len = 1;
+ 	attr->max_msg_sz = -1;
+-	attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
+-	attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
+-	attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
++	attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
++	attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
++	attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
+ 		IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
++	attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
++		IB_PORT_ACTIVE : IB_PORT_DOWN;
+ 	attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
+-	attr->state = sdev->state;
+ 	/*
+ 	 * All zero
+ 	 *
+@@ -199,6 +207,7 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
+ 	 * attr->subnet_timeout = 0;
+ 	 * attr->init_type_repy = 0;
+ 	 */
++	dev_put(ndev);
+ 	return rv;
+ }
+ 
+@@ -505,21 +514,24 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
+ 		 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+ {
+ 	struct siw_qp *qp;
+-	struct siw_device *sdev;
++	struct net_device *ndev;
+ 
+-	if (base_qp && qp_attr && qp_init_attr) {
++	if (base_qp && qp_attr && qp_init_attr)
+ 		qp = to_siw_qp(base_qp);
+-		sdev = to_siw_dev(base_qp->device);
+-	} else {
++	else
+ 		return -EINVAL;
+-	}
++
++	ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
++	if (!ndev)
++		return -ENODEV;
++
+ 	qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
+ 	qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
+ 	qp_attr->cap.max_send_wr = qp->attrs.sq_size;
+ 	qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
+ 	qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
+ 	qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
+-	qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
++	qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
+ 	qp_attr->max_rd_atomic = qp->attrs.irq_size;
+ 	qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
+ 
+@@ -534,6 +546,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
+ 
+ 	qp_init_attr->cap = qp_attr->cap;
+ 
++	dev_put(ndev);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index e83d956478521d..ef4abdea3c2d2e 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -349,6 +349,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ 	struct rtrs_srv_mr *srv_mr;
+ 	bool need_inval = false;
+ 	enum ib_send_flags flags;
++	struct ib_sge list;
+ 	u32 imm;
+ 	int err;
+ 
+@@ -401,7 +402,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ 	imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+ 	imm_wr.wr.next = NULL;
+ 	if (always_invalidate) {
+-		struct ib_sge list;
+ 		struct rtrs_msg_rkey_rsp *msg;
+ 
+ 		srv_mr = &srv_path->mrs[id->msg_id];
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 3be7bd8cd8cdeb..32abc2916b40ff 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -64,7 +64,7 @@ static void gic_check_cpu_features(void)
+ 
+ union gic_base {
+ 	void __iomem *common_base;
+-	void __percpu * __iomem *percpu_base;
++	void __iomem * __percpu *percpu_base;
+ };
+ 
+ struct gic_chip_data {
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index e113b99a3eab59..8716004fcf6c90 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -1867,20 +1867,20 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
+ 	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ 	union cqhci_crypto_cap_entry cap;
+ 
++	if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
++		return qcom_ice_evict_key(msm_host->ice, slot);
++
+ 	/* Only AES-256-XTS has been tested so far. */
+ 	cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
+ 	if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
+ 		cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
+ 		return -EINVAL;
+ 
+-	if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
+-		return qcom_ice_program_key(msm_host->ice,
+-					    QCOM_ICE_CRYPTO_ALG_AES_XTS,
+-					    QCOM_ICE_CRYPTO_KEY_SIZE_256,
+-					    cfg->crypto_key,
+-					    cfg->data_unit_size, slot);
+-	else
+-		return qcom_ice_evict_key(msm_host->ice, slot);
++	return qcom_ice_program_key(msm_host->ice,
++				    QCOM_ICE_CRYPTO_ALG_AES_XTS,
++				    QCOM_ICE_CRYPTO_KEY_SIZE_256,
++				    cfg->crypto_key,
++				    cfg->data_unit_size, slot);
+ }
+ 
+ #else /* CONFIG_MMC_CRYPTO */
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index 0ba658a72d8fea..22556d339d6ea5 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -2,7 +2,7 @@
+ /*
+  * Microchip KSZ9477 switch driver main logic
+  *
+- * Copyright (C) 2017-2019 Microchip Technology Inc.
++ * Copyright (C) 2017-2024 Microchip Technology Inc.
+  */
+ 
+ #include <linux/kernel.h>
+@@ -983,26 +983,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
+ int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+ {
+ 	u32 secs = msecs / 1000;
+-	u8 value;
+-	u8 data;
++	u8 data, mult, value;
++	u32 max_val;
+ 	int ret;
+ 
+-	value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
++#define MAX_TIMER_VAL	((1 << 8) - 1)
+ 
+-	ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+-	if (ret < 0)
+-		return ret;
++	/* The aging timer comprises a 3-bit multiplier and an 8-bit second
++	 * value.  Either of them cannot be zero.  The maximum timer is then
++	 * 7 * 255 = 1785 seconds.
++	 */
++	if (!secs)
++		secs = 1;
+ 
+-	data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
++	/* Return error if too large. */
++	else if (secs > 7 * MAX_TIMER_VAL)
++		return -EINVAL;
+ 
+ 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	value &= ~SW_AGE_CNT_M;
+-	value |= FIELD_PREP(SW_AGE_CNT_M, data);
++	/* Check whether there is need to update the multiplier. */
++	mult = FIELD_GET(SW_AGE_CNT_M, value);
++	max_val = MAX_TIMER_VAL;
++	if (mult > 0) {
++		/* Try to use the same multiplier already in the register as
++		 * the hardware default uses multiplier 4 and 75 seconds for
++		 * 300 seconds.
++		 */
++		max_val = DIV_ROUND_UP(secs, mult);
++		if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
++			max_val = MAX_TIMER_VAL;
++	}
++
++	data = DIV_ROUND_UP(secs, max_val);
++	if (mult != data) {
++		value &= ~SW_AGE_CNT_M;
++		value |= FIELD_PREP(SW_AGE_CNT_M, data);
++		ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
++		if (ret < 0)
++			return ret;
++	}
+ 
+-	return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
++	value = DIV_ROUND_UP(secs, data);
++	return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ }
+ 
+ void ksz9477_port_queue_split(struct ksz_device *dev, int port)
+diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
+index 04235c22bf40e4..ff579920078ee3 100644
+--- a/drivers/net/dsa/microchip/ksz9477_reg.h
++++ b/drivers/net/dsa/microchip/ksz9477_reg.h
+@@ -2,7 +2,7 @@
+ /*
+  * Microchip KSZ9477 register definitions
+  *
+- * Copyright (C) 2017-2018 Microchip Technology Inc.
++ * Copyright (C) 2017-2024 Microchip Technology Inc.
+  */
+ 
+ #ifndef __KSZ9477_REGS_H
+@@ -165,8 +165,6 @@
+ #define SW_VLAN_ENABLE			BIT(7)
+ #define SW_DROP_INVALID_VID		BIT(6)
+ #define SW_AGE_CNT_M			GENMASK(5, 3)
+-#define SW_AGE_CNT_S			3
+-#define SW_AGE_PERIOD_10_8_M		GENMASK(10, 8)
+ #define SW_RESV_MCAST_ENABLE		BIT(2)
+ #define SW_HASH_OPTION_M		0x03
+ #define SW_HASH_OPTION_CRC		1
+diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
+index 824d9309a3d35e..7fe127a075de31 100644
+--- a/drivers/net/dsa/microchip/lan937x_main.c
++++ b/drivers/net/dsa/microchip/lan937x_main.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Microchip LAN937X switch driver main logic
+- * Copyright (C) 2019-2022 Microchip Technology Inc.
++ * Copyright (C) 2019-2024 Microchip Technology Inc.
+  */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -260,10 +260,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+ 
+ int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+ {
+-	u32 secs = msecs / 1000;
+-	u32 value;
++	u8 data, mult, value8;
++	bool in_msec = false;
++	u32 max_val, value;
++	u32 secs = msecs;
+ 	int ret;
+ 
++#define MAX_TIMER_VAL	((1 << 20) - 1)
++
++	/* The aging timer comprises a 3-bit multiplier and a 20-bit second
++	 * value.  Either of them cannot be zero.  The maximum timer is then
++	 * 7 * 1048575 = 7340025 seconds.  As this value is too large for
++	 * practical use it can be interpreted as microseconds, making the
++	 * maximum timer 7340 seconds with finer control.  This allows for
++	 * maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
++	 */
++	if (msecs % 1000)
++		in_msec = true;
++	else
++		secs /= 1000;
++	if (!secs)
++		secs = 1;
++
++	/* Return error if too large. */
++	else if (secs > 7 * MAX_TIMER_VAL)
++		return -EINVAL;
++
++	/* Configure how to interpret the number value. */
++	ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
++		       in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
++	if (ret < 0)
++		return ret;
++
++	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
++	if (ret < 0)
++		return ret;
++
++	/* Check whether there is need to update the multiplier. */
++	mult = FIELD_GET(SW_AGE_CNT_M, value8);
++	max_val = MAX_TIMER_VAL;
++	if (mult > 0) {
++		/* Try to use the same multiplier already in the register as
++		 * the hardware default uses multiplier 4 and 75 seconds for
++		 * 300 seconds.
++		 */
++		max_val = DIV_ROUND_UP(secs, mult);
++		if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
++			max_val = MAX_TIMER_VAL;
++	}
++
++	data = DIV_ROUND_UP(secs, max_val);
++	if (mult != data) {
++		value8 &= ~SW_AGE_CNT_M;
++		value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
++		ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
++		if (ret < 0)
++			return ret;
++	}
++
++	secs = DIV_ROUND_UP(secs, data);
++
+ 	value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+ 
+ 	ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
+index 2f22a9d01de36b..35269f74a314b4 100644
+--- a/drivers/net/dsa/microchip/lan937x_reg.h
++++ b/drivers/net/dsa/microchip/lan937x_reg.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /* Microchip LAN937X switch register definitions
+- * Copyright (C) 2019-2021 Microchip Technology Inc.
++ * Copyright (C) 2019-2024 Microchip Technology Inc.
+  */
+ #ifndef __LAN937X_REG_H
+ #define __LAN937X_REG_H
+@@ -52,8 +52,7 @@
+ 
+ #define SW_VLAN_ENABLE			BIT(7)
+ #define SW_DROP_INVALID_VID		BIT(6)
+-#define SW_AGE_CNT_M			0x7
+-#define SW_AGE_CNT_S			3
++#define SW_AGE_CNT_M			GENMASK(5, 3)
+ #define SW_RESV_MCAST_ENABLE		BIT(2)
+ 
+ #define REG_SW_LUE_CTRL_1		0x0311
+@@ -66,6 +65,10 @@
+ #define SW_FAST_AGING			BIT(1)
+ #define SW_LINK_AUTO_AGING		BIT(0)
+ 
++#define REG_SW_LUE_CTRL_2		0x0312
++
++#define SW_AGE_CNT_IN_MICROSEC		BIT(7)
++
+ #define REG_SW_AGE_PERIOD__1		0x0313
+ #define SW_AGE_PERIOD_7_0_M		GENMASK(7, 0)
+ 
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 0a68b526e4a821..2b784ced06451f 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1967,7 +1967,11 @@ static int bcm_sysport_open(struct net_device *dev)
+ 	unsigned int i;
+ 	int ret;
+ 
+-	clk_prepare_enable(priv->clk);
++	ret = clk_prepare_enable(priv->clk);
++	if (ret) {
++		netdev_err(dev, "could not enable priv clock\n");
++		return ret;
++	}
+ 
+ 	/* Reset UniMAC */
+ 	umac_reset(priv);
+@@ -2625,7 +2629,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+ 		goto err_deregister_notifier;
+ 	}
+ 
+-	clk_prepare_enable(priv->clk);
++	ret = clk_prepare_enable(priv->clk);
++	if (ret) {
++		dev_err(&pdev->dev, "could not enable priv clock\n");
++		goto err_deregister_netdev;
++	}
+ 
+ 	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+ 	dev_info(&pdev->dev,
+@@ -2639,6 +2647,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
++err_deregister_netdev:
++	unregister_netdev(dev);
+ err_deregister_notifier:
+ 	unregister_netdevice_notifier(&priv->netdev_notifier);
+ err_deregister_fixed_link:
+@@ -2808,7 +2818,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
+ 	if (!netif_running(dev))
+ 		return 0;
+ 
+-	clk_prepare_enable(priv->clk);
++	ret = clk_prepare_enable(priv->clk);
++	if (ret) {
++		netdev_err(dev, "could not enable priv clock\n");
++		return ret;
++	}
++
+ 	if (priv->wolopts)
+ 		clk_disable_unprepare(priv->wol_clk);
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
+index 301fa1ea4f5167..95471cfcff420a 100644
+--- a/drivers/net/ethernet/google/gve/gve.h
++++ b/drivers/net/ethernet/google/gve/gve.h
+@@ -1134,6 +1134,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
+ void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
+ bool gve_tx_poll(struct gve_notify_block *block, int budget);
+ bool gve_xdp_poll(struct gve_notify_block *block, int budget);
++int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
+ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+ 			   struct gve_tx_alloc_rings_cfg *cfg);
+ void gve_tx_free_rings_gqi(struct gve_priv *priv,
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 661566db68c860..d404819ebc9b3f 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
+ 
+ 	if (block->rx) {
+ 		work_done = gve_rx_poll(block, budget);
++
++		/* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
++		 * TX and RX work done.
++		 */
++		if (priv->xdp_prog)
++			work_done = max_t(int, work_done,
++					  gve_xsk_tx_poll(block, budget));
++
+ 		reschedule |= work_done == budget;
+ 	}
+ 
+@@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv)
+ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
+ 				      struct gve_tx_alloc_rings_cfg *cfg)
+ {
++	int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
++
+ 	cfg->qcfg = &priv->tx_cfg;
+ 	cfg->raw_addressing = !gve_is_qpl(priv);
+ 	cfg->ring_size = priv->tx_desc_cnt;
+ 	cfg->start_idx = 0;
+-	cfg->num_rings = gve_num_tx_queues(priv);
++	cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
+ 	cfg->tx = priv->tx;
+ }
+ 
+@@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
+ 	if (err)
+ 		return err;
+ 
+-	/* If XDP prog is not installed, return */
+-	if (!priv->xdp_prog)
++	/* If XDP prog is not installed or interface is down, return. */
++	if (!priv->xdp_prog || !netif_running(dev))
+ 		return 0;
+ 
+ 	rx = &priv->rx[qid];
+@@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
+ 	if (qid >= priv->rx_cfg.num_queues)
+ 		return -EINVAL;
+ 
+-	/* If XDP prog is not installed, unmap DMA and return */
+-	if (!priv->xdp_prog)
+-		goto done;
+-
+-	tx_qid = gve_xdp_tx_queue_id(priv, qid);
+-	if (!netif_running(dev)) {
+-		priv->rx[qid].xsk_pool = NULL;
+-		xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
+-		priv->tx[tx_qid].xsk_pool = NULL;
++	/* If XDP prog is not installed or interface is down, unmap DMA and
++	 * return.
++	 */
++	if (!priv->xdp_prog || !netif_running(dev))
+ 		goto done;
+-	}
+ 
+ 	napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
+ 	napi_disable(napi_rx); /* make sure current rx poll is done */
+ 
++	tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ 	napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
+ 	napi_disable(napi_tx); /* make sure current tx poll is done */
+ 
+@@ -1709,24 +1714,20 @@ static int gve_xsk_pool_disable(struct net_device *dev,
+ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+ {
+ 	struct gve_priv *priv = netdev_priv(dev);
+-	int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
++	struct napi_struct *napi;
++
++	if (!gve_get_napi_enabled(priv))
++		return -ENETDOWN;
+ 
+ 	if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
+ 		return -EINVAL;
+ 
+-	if (flags & XDP_WAKEUP_TX) {
+-		struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
+-		struct napi_struct *napi =
+-			&priv->ntfy_blocks[tx->ntfy_id].napi;
+-
+-		if (!napi_if_scheduled_mark_missed(napi)) {
+-			/* Call local_bh_enable to trigger SoftIRQ processing */
+-			local_bh_disable();
+-			napi_schedule(napi);
+-			local_bh_enable();
+-		}
+-
+-		tx->xdp_xsk_wakeup++;
++	napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
++	if (!napi_if_scheduled_mark_missed(napi)) {
++		/* Call local_bh_enable to trigger SoftIRQ processing */
++		local_bh_disable();
++		napi_schedule(napi);
++		local_bh_enable();
+ 	}
+ 
+ 	return 0;
+@@ -1837,6 +1838,7 @@ int gve_adjust_queues(struct gve_priv *priv,
+ {
+ 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
++	int num_xdp_queues;
+ 	int err;
+ 
+ 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+@@ -1847,6 +1849,10 @@ int gve_adjust_queues(struct gve_priv *priv,
+ 	rx_alloc_cfg.qcfg = &new_rx_config;
+ 	tx_alloc_cfg.num_rings = new_tx_config.num_queues;
+ 
++	/* Add dedicated XDP TX queues if enabled. */
++	num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
++	tx_alloc_cfg.num_rings += num_xdp_queues;
++
+ 	if (netif_running(priv->dev)) {
+ 		err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ 		return err;
+@@ -1891,6 +1897,9 @@ static void gve_turndown(struct gve_priv *priv)
+ 
+ 	gve_clear_napi_enabled(priv);
+ 	gve_clear_report_stats(priv);
++
++	/* Make sure that all traffic is finished processing. */
++	synchronize_net();
+ }
+ 
+ static void gve_turnup(struct gve_priv *priv)
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index e7fb7d6d283df1..4350ebd9c2bd9e 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
+ 		return;
+ 
+ 	gve_remove_napi(priv, ntfy_idx);
+-	gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
++	if (tx->q_num < priv->tx_cfg.num_queues)
++		gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
++	else
++		gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
+ 	netdev_tx_reset_queue(tx->netdev_txq);
+ 	gve_tx_remove_from_block(priv, idx);
+ }
+@@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ 	struct gve_tx_ring *tx;
+ 	int i, err = 0, qid;
+ 
+-	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
++	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
+ 		return -EINVAL;
+ 
++	if (!gve_get_napi_enabled(priv))
++		return -ENETDOWN;
++
+ 	qid = gve_xdp_tx_queue_id(priv,
+ 				  smp_processor_id() % priv->num_xdp_queues);
+ 
+@@ -975,33 +981,41 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
+ 	return sent;
+ }
+ 
++int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
++{
++	struct gve_rx_ring *rx = rx_block->rx;
++	struct gve_priv *priv = rx->gve;
++	struct gve_tx_ring *tx;
++	int sent = 0;
++
++	tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
++	if (tx->xsk_pool) {
++		sent = gve_xsk_tx(priv, tx, budget);
++
++		u64_stats_update_begin(&tx->statss);
++		tx->xdp_xsk_sent += sent;
++		u64_stats_update_end(&tx->statss);
++		if (xsk_uses_need_wakeup(tx->xsk_pool))
++			xsk_set_tx_need_wakeup(tx->xsk_pool);
++	}
++
++	return sent;
++}
++
+ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+ {
+ 	struct gve_priv *priv = block->priv;
+ 	struct gve_tx_ring *tx = block->tx;
+ 	u32 nic_done;
+-	bool repoll;
+ 	u32 to_do;
+ 
+ 	/* Find out how much work there is to be done */
+ 	nic_done = gve_tx_load_event_counter(priv, tx);
+ 	to_do = min_t(u32, (nic_done - tx->done), budget);
+ 	gve_clean_xdp_done(priv, tx, to_do);
+-	repoll = nic_done != tx->done;
+-
+-	if (tx->xsk_pool) {
+-		int sent = gve_xsk_tx(priv, tx, budget);
+-
+-		u64_stats_update_begin(&tx->statss);
+-		tx->xdp_xsk_sent += sent;
+-		u64_stats_update_end(&tx->statss);
+-		repoll |= (sent == budget);
+-		if (xsk_uses_need_wakeup(tx->xsk_pool))
+-			xsk_set_tx_need_wakeup(tx->xsk_pool);
+-	}
+ 
+ 	/* If we still have work we want to repoll */
+-	return repoll;
++	return nic_done != tx->done;
+ }
+ 
+ bool gve_tx_poll(struct gve_notify_block *block, int budget)
+diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
+index 9e80899546d996..83b9905666e24f 100644
+--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
+@@ -2708,9 +2708,15 @@ static struct platform_device *port_platdev[3];
+ 
+ static void mv643xx_eth_shared_of_remove(void)
+ {
++	struct mv643xx_eth_platform_data *pd;
+ 	int n;
+ 
+ 	for (n = 0; n < 3; n++) {
++		if (!port_platdev[n])
++			continue;
++		pd = dev_get_platdata(&port_platdev[n]->dev);
++		if (pd)
++			of_node_put(pd->phy_node);
+ 		platform_device_del(port_platdev[n]);
+ 		port_platdev[n] = NULL;
+ 	}
+@@ -2773,8 +2779,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+ 	}
+ 
+ 	ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
+-	if (!ppdev)
+-		return -ENOMEM;
++	if (!ppdev) {
++		ret = -ENOMEM;
++		goto put_err;
++	}
+ 	ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ 	ppdev->dev.of_node = pnp;
+ 
+@@ -2796,6 +2804,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+ 
+ port_err:
+ 	platform_device_put(ppdev);
++put_err:
++	of_node_put(ppd.phy_node);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index a7a16eac189134..52d99908d0e9d3 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
++	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index cc9bcc42003242..6ab02f3fc29123 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ {
+ 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ 	struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
++	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ 	struct mlx5_macsec_rule_attrs rule_attrs;
+ 	union mlx5_macsec_rule *macsec_rule;
+ 
++	if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
++		return 0;
++
+ 	rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ 	rule_attrs.sci = sa->sci;
+ 	rule_attrs.assoc_num = sa->assoc_num;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index c14bef83d84d0f..62b8a7c1c6b54a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -6510,8 +6510,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
+ 
+ 	mlx5_core_uplink_netdev_set(mdev, NULL);
+ 	mlx5e_dcbnl_delete_app(priv);
+-	unregister_netdev(priv->netdev);
+-	_mlx5e_suspend(adev, false);
++	/* When unload driver, the netdev is in registered state
++	 * if it's from legacy mode. If from switchdev mode, it
++	 * is already unregistered before changing to NIC profile.
++	 */
++	if (priv->netdev->reg_state == NETREG_REGISTERED) {
++		unregister_netdev(priv->netdev);
++		_mlx5e_suspend(adev, false);
++	} else {
++		struct mlx5_core_dev *pos;
++		int i;
++
++		if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
++			mlx5_sd_for_each_dev(i, mdev, pos)
++				mlx5e_destroy_mdev_resources(pos);
++		else
++			_mlx5e_suspend(adev, true);
++	}
+ 	/* Avoid cleanup if profile rollback failed. */
+ 	if (priv->profile)
+ 		priv->profile->cleanup(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 92094bf60d5986..0657d107653577 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1508,6 +1508,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
+ 
+ 	priv = netdev_priv(netdev);
+ 
++	/* This bit is set when using devlink to change eswitch mode from
++	 * switchdev to legacy. As need to keep uplink netdev ifindex, we
++	 * detach uplink representor profile and attach NIC profile only.
++	 * The netdev will be unregistered later when unload NIC auxiliary
++	 * driver for this case.
++	 * We explicitly block devlink eswitch mode change if any IPSec rules
++	 * offloaded, but can't block other cases, such as driver unload
++	 * and devlink reload. We have to unregister netdev before profile
++	 * change for those cases. This is to avoid resource leak because
++	 * the offloaded rules don't have the chance to be unoffloaded before
++	 * cleanup which is triggered by detach uplink representor profile.
++	 */
++	if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
++		unregister_netdev(netdev);
++
+ 	mlx5e_netdev_attach_nic_profile(priv);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+index 5a0047bdcb5105..ed977ae75fab89 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+@@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
+ 	unsigned long i;
+ 	int err;
+ 
+-	xa_for_each(&esw->offloads.vport_reps, i, rep) {
+-		rpriv = rep->rep_data[REP_ETH].priv;
+-		if (!rpriv || !rpriv->netdev)
++	mlx5_esw_for_each_rep(esw, i, rep) {
++		if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
+ 			continue;
+ 
++		rpriv = rep->rep_data[REP_ETH].priv;
+ 		rhashtable_walk_enter(&rpriv->tc_ht, &iter);
+ 		rhashtable_walk_start(&iter);
+ 		while ((flow = rhashtable_walk_next(&iter)) != NULL) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index f44b4c7ebcfd73..48fd0400ffd4ec 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -716,6 +716,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+ 			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
+ 			  (last) - 1)
+ 
++#define mlx5_esw_for_each_rep(esw, i, rep) \
++	xa_for_each(&((esw)->offloads.vport_reps), i, rep)
++
+ struct mlx5_eswitch *__must_check
+ mlx5_devlink_eswitch_get(struct devlink *devlink);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 8cf61ae8b89d24..3950b1d4b3d8e5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -53,9 +53,6 @@
+ #include "lag/lag.h"
+ #include "en/tc/post_meter.h"
+ 
+-#define mlx5_esw_for_each_rep(esw, i, rep) \
+-	xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+-
+ /* There are two match-all miss flows, one for unicast dst mac and
+  * one for multicast.
+  */
+@@ -3762,6 +3759,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ 	esw->eswitch_operation_in_progress = true;
+ 	up_write(&esw->mode_lock);
+ 
++	if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
++		esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
+ 	mlx5_eswitch_disable_locked(esw);
+ 	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+ 		if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+index 6fa06ba2d34653..f57c84e5128bc7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+@@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+ 	int inlen, err, eqn;
+ 	void *cqc, *in;
+ 	__be64 *pas;
+-	int vector;
+ 	u32 i;
+ 
+ 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+@@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+ 	if (!in)
+ 		goto err_cqwq;
+ 
+-	vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
+-	err = mlx5_comp_eqn_get(mdev, vector, &eqn);
++	err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ 	if (err) {
+ 		kvfree(in);
+ 		goto err_cqwq;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+index 4b5fd71c897ddb..32d2e61f2b8238 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
+ 
+ 	parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ 	ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
+-			    0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+-			    0);
++			    0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
+ 
+ 	rt = ip_route_output_key(tun->net, &fl4);
+ 	if (IS_ERR(rt))
+diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
+index d90206f27161e4..c0603f54cec3ad 100644
+--- a/drivers/net/ethernet/sfc/tc_conntrack.c
++++ b/drivers/net/ethernet/sfc/tc_conntrack.c
+@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
+ 			     void *cb_priv);
+ 
+ static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
+-	.key_len	= offsetof(struct efx_tc_ct_zone, linkage),
++	.key_len	= sizeof_field(struct efx_tc_ct_zone, zone),
+ 	.key_offset	= 0,
+ 	.head_offset	= offsetof(struct efx_tc_ct_zone, linkage),
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index ad868e8d195d59..aaf008bdbbcd46 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -405,22 +405,6 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
+ 	return -ENODEV;
+ }
+ 
+-/**
+- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
+- * @pdev: platform_device structure
+- * @plat: driver data platform structure
+- *
+- * Release resources claimed by stmmac_probe_config_dt().
+- */
+-static void stmmac_remove_config_dt(struct platform_device *pdev,
+-				    struct plat_stmmacenet_data *plat)
+-{
+-	clk_disable_unprepare(plat->stmmac_clk);
+-	clk_disable_unprepare(plat->pclk);
+-	of_node_put(plat->phy_node);
+-	of_node_put(plat->mdio_node);
+-}
+-
+ /**
+  * stmmac_probe_config_dt - parse device-tree driver parameters
+  * @pdev: platform_device structure
+@@ -490,8 +474,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ 		dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
+ 
+ 	rc = stmmac_mdio_setup(plat, np, &pdev->dev);
+-	if (rc)
+-		return ERR_PTR(rc);
++	if (rc) {
++		ret = ERR_PTR(rc);
++		goto error_put_phy;
++	}
+ 
+ 	of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
+ 
+@@ -580,8 +566,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ 	dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
+ 			       GFP_KERNEL);
+ 	if (!dma_cfg) {
+-		stmmac_remove_config_dt(pdev, plat);
+-		return ERR_PTR(-ENOMEM);
++		ret = ERR_PTR(-ENOMEM);
++		goto error_put_mdio;
+ 	}
+ 	plat->dma_cfg = dma_cfg;
+ 
+@@ -609,8 +595,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ 
+ 	rc = stmmac_mtl_setup(pdev, plat);
+ 	if (rc) {
+-		stmmac_remove_config_dt(pdev, plat);
+-		return ERR_PTR(rc);
++		ret = ERR_PTR(rc);
++		goto error_put_mdio;
+ 	}
+ 
+ 	/* clock setup */
+@@ -662,6 +648,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ 	clk_disable_unprepare(plat->pclk);
+ error_pclk_get:
+ 	clk_disable_unprepare(plat->stmmac_clk);
++error_put_mdio:
++	of_node_put(plat->mdio_node);
++error_put_phy:
++	of_node_put(plat->phy_node);
+ 
+ 	return ret;
+ }
+@@ -670,16 +660,17 @@ static void devm_stmmac_remove_config_dt(void *data)
+ {
+ 	struct plat_stmmacenet_data *plat = data;
+ 
+-	/* Platform data argument is unused */
+-	stmmac_remove_config_dt(NULL, plat);
++	clk_disable_unprepare(plat->stmmac_clk);
++	clk_disable_unprepare(plat->pclk);
++	of_node_put(plat->mdio_node);
++	of_node_put(plat->phy_node);
+ }
+ 
+ /**
+  * devm_stmmac_probe_config_dt
+  * @pdev: platform_device structure
+  * @mac: MAC address to use
+- * Description: Devres variant of stmmac_probe_config_dt(). Does not require
+- * the user to call stmmac_remove_config_dt() at driver detach.
++ * Description: Devres variant of stmmac_probe_config_dt().
+  */
+ struct plat_stmmacenet_data *
+ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index ba6db61dd227c4..dfca13b82bdce2 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -3525,7 +3525,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+ 	init_completion(&common->tdown_complete);
+ 	common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
+ 	common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
+-	common->pf_p0_rx_ptype_rrobin = false;
++	common->pf_p0_rx_ptype_rrobin = true;
+ 	common->default_vlan = 1;
+ 
+ 	common->ports = devm_kcalloc(dev, common->port_num,
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 5d6d1cf78e93f2..768578c0d9587d 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
+ 	for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
+ 		regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
+ 				   IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
++
++		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
++				   IEP_CMP_CFG_CMP_EN(cmp), 0);
+ 	}
+ 
+ 	/* enable reset counter on CMP0 event */
+@@ -780,6 +783,11 @@ int icss_iep_exit(struct icss_iep *iep)
+ 	}
+ 	icss_iep_disable(iep);
+ 
++	if (iep->pps_enabled)
++		icss_iep_pps_enable(iep, false);
++	else if (iep->perout_enabled)
++		icss_iep_perout_enable(iep, NULL, false);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(icss_iep_exit);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
+index fdebeb2f84e00c..74f0f200a89d4f 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
+@@ -855,31 +855,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
+ }
+ EXPORT_SYMBOL_GPL(prueth_rx_irq);
+ 
+-void prueth_emac_stop(struct prueth_emac *emac)
+-{
+-	struct prueth *prueth = emac->prueth;
+-	int slice;
+-
+-	switch (emac->port_id) {
+-	case PRUETH_PORT_MII0:
+-		slice = ICSS_SLICE0;
+-		break;
+-	case PRUETH_PORT_MII1:
+-		slice = ICSS_SLICE1;
+-		break;
+-	default:
+-		netdev_err(emac->ndev, "invalid port\n");
+-		return;
+-	}
+-
+-	emac->fw_running = 0;
+-	if (!emac->is_sr1)
+-		rproc_shutdown(prueth->txpru[slice]);
+-	rproc_shutdown(prueth->rtu[slice]);
+-	rproc_shutdown(prueth->pru[slice]);
+-}
+-EXPORT_SYMBOL_GPL(prueth_emac_stop);
+-
+ void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+ {
+ 	int i;
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
+index 5d2491c2943a8b..ddfd1c02a88544 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
+@@ -397,7 +397,7 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
+ 	return 0;
+ }
+ 
+-static void icssg_init_emac_mode(struct prueth *prueth)
++void icssg_init_emac_mode(struct prueth *prueth)
+ {
+ 	/* When the device is configured as a bridge and it is being brought
+ 	 * back to the emac mode, the host mac address has to be set as 0.
+@@ -406,9 +406,6 @@ static void icssg_init_emac_mode(struct prueth *prueth)
+ 	int i;
+ 	u8 mac[ETH_ALEN] = { 0 };
+ 
+-	if (prueth->emacs_initialized)
+-		return;
+-
+ 	/* Set VLAN TABLE address base */
+ 	regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ 			   addr <<  SMEM_VLAN_OFFSET);
+@@ -423,15 +420,13 @@ static void icssg_init_emac_mode(struct prueth *prueth)
+ 	/* Clear host MAC address */
+ 	icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
+ }
++EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
+ 
+-static void icssg_init_fw_offload_mode(struct prueth *prueth)
++void icssg_init_fw_offload_mode(struct prueth *prueth)
+ {
+ 	u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ 	int i;
+ 
+-	if (prueth->emacs_initialized)
+-		return;
+-
+ 	/* Set VLAN TABLE address base */
+ 	regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ 			   addr <<  SMEM_VLAN_OFFSET);
+@@ -448,6 +443,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth)
+ 		icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
+ 	icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
+ }
++EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
+ 
+ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
+ {
+@@ -455,11 +451,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
+ 	struct icssg_flow_cfg __iomem *flow_cfg;
+ 	int ret;
+ 
+-	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+-		icssg_init_fw_offload_mode(prueth);
+-	else
+-		icssg_init_emac_mode(prueth);
+-
+ 	memset_io(config, 0, TAS_GATE_MASK_LIST0);
+ 	icssg_miig_queues_init(prueth, slice);
+ 
+@@ -786,3 +777,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
+ 		writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
+ }
+ EXPORT_SYMBOL_GPL(icssg_set_pvid);
++
++int emac_fdb_flow_id_updated(struct prueth_emac *emac)
++{
++	struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
++	int slice = prueth_emac_slice(emac);
++	struct mgmt_cmd fdb_cmd = { 0 };
++	int ret;
++
++	fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
++	fdb_cmd.type   = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
++	fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
++	fdb_cmd.param  = 0;
++
++	fdb_cmd.param |= (slice << 4);
++	fdb_cmd.cmd_args[0] = 0;
++
++	ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
++	if (ret)
++		return ret;
++
++	WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
++	return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
++}
++EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
+index 92c2deaa306835..c884e9fa099e6f 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
++++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
+@@ -55,6 +55,7 @@ struct icssg_rxq_ctx {
+ #define ICSSG_FW_MGMT_FDB_CMD_TYPE	0x03
+ #define ICSSG_FW_MGMT_CMD_TYPE		0x04
+ #define ICSSG_FW_MGMT_PKT		0x80000000
++#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW	0x05
+ 
+ struct icssg_r30_cmd {
+ 	u32 cmd[4];
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index fe2fd1bfc904db..cb11635a8d1209 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -164,11 +164,26 @@ static struct icssg_firmwares icssg_emac_firmwares[] = {
+ 	}
+ };
+ 
+-static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
++static int prueth_start(struct rproc *rproc, const char *fw_name)
++{
++	int ret;
++
++	ret = rproc_set_firmware(rproc, fw_name);
++	if (ret)
++		return ret;
++	return rproc_boot(rproc);
++}
++
++static void prueth_shutdown(struct rproc *rproc)
++{
++	rproc_shutdown(rproc);
++}
++
++static int prueth_emac_start(struct prueth *prueth)
+ {
+ 	struct icssg_firmwares *firmwares;
+ 	struct device *dev = prueth->dev;
+-	int slice, ret;
++	int ret, slice;
+ 
+ 	if (prueth->is_switch_mode)
+ 		firmwares = icssg_switch_firmwares;
+@@ -177,49 +192,126 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+ 	else
+ 		firmwares = icssg_emac_firmwares;
+ 
+-	slice = prueth_emac_slice(emac);
+-	if (slice < 0) {
+-		netdev_err(emac->ndev, "invalid port\n");
+-		return -EINVAL;
++	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
++		ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
++		if (ret) {
++			dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
++			goto unwind_slices;
++		}
++
++		ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
++		if (ret) {
++			dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
++			rproc_shutdown(prueth->pru[slice]);
++			goto unwind_slices;
++		}
++
++		ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
++		if (ret) {
++			dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
++			rproc_shutdown(prueth->rtu[slice]);
++			rproc_shutdown(prueth->pru[slice]);
++			goto unwind_slices;
++		}
+ 	}
+ 
+-	ret = icssg_config(prueth, emac, slice);
+-	if (ret)
+-		return ret;
++	return 0;
+ 
+-	ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
+-	ret = rproc_boot(prueth->pru[slice]);
+-	if (ret) {
+-		dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+-		return -EINVAL;
++unwind_slices:
++	while (--slice >= 0) {
++		prueth_shutdown(prueth->txpru[slice]);
++		prueth_shutdown(prueth->rtu[slice]);
++		prueth_shutdown(prueth->pru[slice]);
+ 	}
+ 
+-	ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
+-	ret = rproc_boot(prueth->rtu[slice]);
+-	if (ret) {
+-		dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+-		goto halt_pru;
++	return ret;
++}
++
++static void prueth_emac_stop(struct prueth *prueth)
++{
++	int slice;
++
++	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
++		prueth_shutdown(prueth->txpru[slice]);
++		prueth_shutdown(prueth->rtu[slice]);
++		prueth_shutdown(prueth->pru[slice]);
+ 	}
++}
++
++static int prueth_emac_common_start(struct prueth *prueth)
++{
++	struct prueth_emac *emac;
++	int ret = 0;
++	int slice;
++
++	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
++		return -EINVAL;
++
++	/* clear SMEM and MSMC settings for all slices */
++	memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
++	memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
++
++	icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
++	icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
++
++	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
++		icssg_init_fw_offload_mode(prueth);
++	else
++		icssg_init_emac_mode(prueth);
++
++	for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
++		emac = prueth->emac[slice];
++		if (!emac)
++			continue;
++		ret = icssg_config(prueth, emac, slice);
++		if (ret)
++			goto disable_class;
++	}
++
++	ret = prueth_emac_start(prueth);
++	if (ret)
++		goto disable_class;
+ 
+-	ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
+-	ret = rproc_boot(prueth->txpru[slice]);
++	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
++	       prueth->emac[ICSS_SLICE1];
++	ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
++			    emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ 	if (ret) {
+-		dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+-		goto halt_rtu;
++		dev_err(prueth->dev, "Failed to initialize IEP module\n");
++		goto stop_pruss;
+ 	}
+ 
+-	emac->fw_running = 1;
+ 	return 0;
+ 
+-halt_rtu:
+-	rproc_shutdown(prueth->rtu[slice]);
++stop_pruss:
++	prueth_emac_stop(prueth);
+ 
+-halt_pru:
+-	rproc_shutdown(prueth->pru[slice]);
++disable_class:
++	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
++	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
+ 
+ 	return ret;
+ }
+ 
++static int prueth_emac_common_stop(struct prueth *prueth)
++{
++	struct prueth_emac *emac;
++
++	if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
++		return -EINVAL;
++
++	icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
++	icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
++
++	prueth_emac_stop(prueth);
++
++	emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
++	       prueth->emac[ICSS_SLICE1];
++	icss_iep_exit(emac->iep);
++
++	return 0;
++}
++
+ /* called back by PHY layer if there is change in link state of hw port*/
+ static void emac_adjust_link(struct net_device *ndev)
+ {
+@@ -374,9 +466,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
+ 	u32 cycletime;
+ 	int timeout;
+ 
+-	if (!emac->fw_running)
+-		return;
+-
+ 	sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
+ 
+ 	cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
+@@ -543,23 +632,17 @@ static int emac_ndo_open(struct net_device *ndev)
+ {
+ 	struct prueth_emac *emac = netdev_priv(ndev);
+ 	int ret, i, num_data_chn = emac->tx_ch_num;
++	struct icssg_flow_cfg __iomem *flow_cfg;
+ 	struct prueth *prueth = emac->prueth;
+ 	int slice = prueth_emac_slice(emac);
+ 	struct device *dev = prueth->dev;
+ 	int max_rx_flows;
+ 	int rx_flow;
+ 
+-	/* clear SMEM and MSMC settings for all slices */
+-	if (!prueth->emacs_initialized) {
+-		memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+-		memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+-	}
+-
+ 	/* set h/w MAC as user might have re-configured */
+ 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+ 
+ 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+-	icssg_class_default(prueth->miig_rt, slice, 0, false);
+ 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+ 
+ 	/* Notify the stack of the actual queue counts. */
+@@ -597,18 +680,23 @@ static int emac_ndo_open(struct net_device *ndev)
+ 		goto cleanup_napi;
+ 	}
+ 
+-	/* reset and start PRU firmware */
+-	ret = prueth_emac_start(prueth, emac);
+-	if (ret)
+-		goto free_rx_irq;
++	if (!prueth->emacs_initialized) {
++		ret = prueth_emac_common_start(prueth);
++		if (ret)
++			goto free_rx_irq;
++	}
+ 
+-	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
++	flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
++	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
++	ret = emac_fdb_flow_id_updated(emac);
+ 
+-	if (!prueth->emacs_initialized) {
+-		ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+-				    emac, IEP_DEFAULT_CYCLE_TIME_NS);
++	if (ret) {
++		netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
++		goto stop;
+ 	}
+ 
++	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
++
+ 	ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
+ 				   IRQF_ONESHOT, dev_name(dev), emac);
+ 	if (ret)
+@@ -653,7 +741,8 @@ static int emac_ndo_open(struct net_device *ndev)
+ free_tx_ts_irq:
+ 	free_irq(emac->tx_ts_irq, emac);
+ stop:
+-	prueth_emac_stop(emac);
++	if (!prueth->emacs_initialized)
++		prueth_emac_common_stop(prueth);
+ free_rx_irq:
+ 	free_irq(emac->rx_chns.irq[rx_flow], emac);
+ cleanup_napi:
+@@ -689,8 +778,6 @@ static int emac_ndo_stop(struct net_device *ndev)
+ 	if (ndev->phydev)
+ 		phy_stop(ndev->phydev);
+ 
+-	icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
+-
+ 	if (emac->prueth->is_hsr_offload_mode)
+ 		__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
+ 	else
+@@ -728,11 +815,9 @@ static int emac_ndo_stop(struct net_device *ndev)
+ 	/* Destroying the queued work in ndo_stop() */
+ 	cancel_delayed_work_sync(&emac->stats_work);
+ 
+-	if (prueth->emacs_initialized == 1)
+-		icss_iep_exit(emac->iep);
+-
+ 	/* stop PRUs */
+-	prueth_emac_stop(emac);
++	if (prueth->emacs_initialized == 1)
++		prueth_emac_common_stop(prueth);
+ 
+ 	free_irq(emac->tx_ts_irq, emac);
+ 
+@@ -1010,10 +1095,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth)
+ 	}
+ }
+ 
+-static void prueth_emac_restart(struct prueth *prueth)
++static int prueth_emac_restart(struct prueth *prueth)
+ {
+ 	struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
+ 	struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
++	int ret;
+ 
+ 	/* Detach the net_device for both PRUeth ports*/
+ 	if (netif_running(emac0->ndev))
+@@ -1022,36 +1108,46 @@ static void prueth_emac_restart(struct prueth *prueth)
+ 		netif_device_detach(emac1->ndev);
+ 
+ 	/* Disable both PRUeth ports */
+-	icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
+-	icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
++	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
++	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
++	if (ret)
++		return ret;
+ 
+ 	/* Stop both pru cores for both PRUeth ports*/
+-	prueth_emac_stop(emac0);
+-	prueth->emacs_initialized--;
+-	prueth_emac_stop(emac1);
+-	prueth->emacs_initialized--;
++	ret = prueth_emac_common_stop(prueth);
++	if (ret) {
++		dev_err(prueth->dev, "Failed to stop the firmwares");
++		return ret;
++	}
+ 
+ 	/* Start both pru cores for both PRUeth ports */
+-	prueth_emac_start(prueth, emac0);
+-	prueth->emacs_initialized++;
+-	prueth_emac_start(prueth, emac1);
+-	prueth->emacs_initialized++;
++	ret = prueth_emac_common_start(prueth);
++	if (ret) {
++		dev_err(prueth->dev, "Failed to start the firmwares");
++		return ret;
++	}
+ 
+ 	/* Enable forwarding for both PRUeth ports */
+-	icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
+-	icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
++	ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
++	ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
+ 
+ 	/* Attache net_device for both PRUeth ports */
+ 	netif_device_attach(emac0->ndev);
+ 	netif_device_attach(emac1->ndev);
++
++	return ret;
+ }
+ 
+ static void icssg_change_mode(struct prueth *prueth)
+ {
+ 	struct prueth_emac *emac;
+-	int mac;
++	int mac, ret;
+ 
+-	prueth_emac_restart(prueth);
++	ret = prueth_emac_restart(prueth);
++	if (ret) {
++		dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
++		return;
++	}
+ 
+ 	for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+ 		emac = prueth->emac[mac];
+@@ -1130,13 +1226,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
+ {
+ 	struct prueth_emac *emac = netdev_priv(ndev);
+ 	struct prueth *prueth = emac->prueth;
++	int ret;
+ 
+ 	prueth->br_members &= ~BIT(emac->port_id);
+ 
+ 	if (prueth->is_switch_mode) {
+ 		prueth->is_switch_mode = false;
+ 		emac->port_vlan = 0;
+-		prueth_emac_restart(prueth);
++		ret = prueth_emac_restart(prueth);
++		if (ret) {
++			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
++			return;
++		}
+ 	}
+ 
+ 	prueth_offload_fwd_mark_update(prueth);
+@@ -1185,6 +1286,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
+ 	struct prueth *prueth = emac->prueth;
+ 	struct prueth_emac *emac0;
+ 	struct prueth_emac *emac1;
++	int ret;
+ 
+ 	emac0 = prueth->emac[PRUETH_MAC0];
+ 	emac1 = prueth->emac[PRUETH_MAC1];
+@@ -1195,7 +1297,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
+ 		emac0->port_vlan = 0;
+ 		emac1->port_vlan = 0;
+ 		prueth->hsr_dev = NULL;
+-		prueth_emac_restart(prueth);
++		ret = prueth_emac_restart(prueth);
++		if (ret) {
++			dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
++			return;
++		}
+ 		netdev_dbg(ndev, "Disabling HSR Offload mode\n");
+ 	}
+ }
+@@ -1370,13 +1476,10 @@ static int prueth_probe(struct platform_device *pdev)
+ 		prueth->pa_stats = NULL;
+ 	}
+ 
+-	if (eth0_node) {
++	if (eth0_node || eth1_node) {
+ 		ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
+ 		if (ret)
+ 			goto put_cores;
+-	}
+-
+-	if (eth1_node) {
+ 		ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
+ 		if (ret)
+ 			goto put_cores;
+@@ -1575,14 +1678,12 @@ static int prueth_probe(struct platform_device *pdev)
+ 	pruss_put(prueth->pruss);
+ 
+ put_cores:
+-	if (eth1_node) {
+-		prueth_put_cores(prueth, ICSS_SLICE1);
+-		of_node_put(eth1_node);
+-	}
+-
+-	if (eth0_node) {
++	if (eth0_node || eth1_node) {
+ 		prueth_put_cores(prueth, ICSS_SLICE0);
+ 		of_node_put(eth0_node);
++
++		prueth_put_cores(prueth, ICSS_SLICE1);
++		of_node_put(eth1_node);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+index f5c1d473e9f991..5473315ea20406 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+@@ -140,7 +140,6 @@ struct prueth_rx_chn {
+ /* data for each emac port */
+ struct prueth_emac {
+ 	bool is_sr1;
+-	bool fw_running;
+ 	struct prueth *prueth;
+ 	struct net_device *ndev;
+ 	u8 mac_addr[6];
+@@ -361,6 +360,8 @@ int icssg_set_port_state(struct prueth_emac *emac,
+ 			 enum icssg_port_state_cmd state);
+ void icssg_config_set_speed(struct prueth_emac *emac);
+ void icssg_config_half_duplex(struct prueth_emac *emac);
++void icssg_init_emac_mode(struct prueth *prueth);
++void icssg_init_fw_offload_mode(struct prueth *prueth);
+ 
+ /* Buffer queue helpers */
+ int icssg_queue_pop(struct prueth *prueth, u8 queue);
+@@ -377,6 +378,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ 		       u8 untag_mask, bool add);
+ u16 icssg_get_pvid(struct prueth_emac *emac);
+ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
++int emac_fdb_flow_id_updated(struct prueth_emac *emac);
+ #define prueth_napi_to_tx_chn(pnapi) \
+ 	container_of(pnapi, struct prueth_tx_chn, napi_tx)
+ 
+@@ -407,7 +409,6 @@ void emac_rx_timestamp(struct prueth_emac *emac,
+ 		       struct sk_buff *skb, u32 *psdata);
+ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+ irqreturn_t prueth_rx_irq(int irq, void *dev_id);
+-void prueth_emac_stop(struct prueth_emac *emac);
+ void prueth_cleanup_tx_ts(struct prueth_emac *emac);
+ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
+ int prueth_prepare_rx_chan(struct prueth_emac *emac,
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+index 292f04d29f4f7b..f88cdc8f012f12 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+@@ -440,7 +440,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+ 		goto halt_pru;
+ 	}
+ 
+-	emac->fw_running = 1;
+ 	return 0;
+ 
+ halt_pru:
+@@ -449,6 +448,29 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+ 	return ret;
+ }
+ 
++static void prueth_emac_stop(struct prueth_emac *emac)
++{
++	struct prueth *prueth = emac->prueth;
++	int slice;
++
++	switch (emac->port_id) {
++	case PRUETH_PORT_MII0:
++		slice = ICSS_SLICE0;
++		break;
++	case PRUETH_PORT_MII1:
++		slice = ICSS_SLICE1;
++		break;
++	default:
++		netdev_err(emac->ndev, "invalid port\n");
++		return;
++	}
++
++	if (!emac->is_sr1)
++		rproc_shutdown(prueth->txpru[slice]);
++	rproc_shutdown(prueth->rtu[slice]);
++	rproc_shutdown(prueth->pru[slice]);
++}
++
+ /**
+  * emac_ndo_open - EMAC device open
+  * @ndev: network adapter device
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 65b0a3115e14cd..64926240b0071d 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -432,10 +432,12 @@ struct kszphy_ptp_priv {
+ struct kszphy_priv {
+ 	struct kszphy_ptp_priv ptp_priv;
+ 	const struct kszphy_type *type;
++	struct clk *clk;
+ 	int led_mode;
+ 	u16 vct_ctrl1000;
+ 	bool rmii_ref_clk_sel;
+ 	bool rmii_ref_clk_sel_val;
++	bool clk_enable;
+ 	u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
+ };
+ 
+@@ -2052,6 +2054,46 @@ static void kszphy_get_stats(struct phy_device *phydev,
+ 		data[i] = kszphy_get_stat(phydev, i);
+ }
+ 
++static void kszphy_enable_clk(struct phy_device *phydev)
++{
++	struct kszphy_priv *priv = phydev->priv;
++
++	if (!priv->clk_enable && priv->clk) {
++		clk_prepare_enable(priv->clk);
++		priv->clk_enable = true;
++	}
++}
++
++static void kszphy_disable_clk(struct phy_device *phydev)
++{
++	struct kszphy_priv *priv = phydev->priv;
++
++	if (priv->clk_enable && priv->clk) {
++		clk_disable_unprepare(priv->clk);
++		priv->clk_enable = false;
++	}
++}
++
++static int kszphy_generic_resume(struct phy_device *phydev)
++{
++	kszphy_enable_clk(phydev);
++
++	return genphy_resume(phydev);
++}
++
++static int kszphy_generic_suspend(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = genphy_suspend(phydev);
++	if (ret)
++		return ret;
++
++	kszphy_disable_clk(phydev);
++
++	return 0;
++}
++
+ static int kszphy_suspend(struct phy_device *phydev)
+ {
+ 	/* Disable PHY Interrupts */
+@@ -2061,7 +2103,7 @@ static int kszphy_suspend(struct phy_device *phydev)
+ 			phydev->drv->config_intr(phydev);
+ 	}
+ 
+-	return genphy_suspend(phydev);
++	return kszphy_generic_suspend(phydev);
+ }
+ 
+ static void kszphy_parse_led_mode(struct phy_device *phydev)
+@@ -2092,7 +2134,9 @@ static int kszphy_resume(struct phy_device *phydev)
+ {
+ 	int ret;
+ 
+-	genphy_resume(phydev);
++	ret = kszphy_generic_resume(phydev);
++	if (ret)
++		return ret;
+ 
+ 	/* After switching from power-down to normal mode, an internal global
+ 	 * reset is automatically generated. Wait a minimum of 1 ms before
+@@ -2114,6 +2158,24 @@ static int kszphy_resume(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++/* Because of errata DS80000700A, receiver error following software
++ * power down. Suspend and resume callbacks only disable and enable
++ * external rmii reference clock.
++ */
++static int ksz8041_resume(struct phy_device *phydev)
++{
++	kszphy_enable_clk(phydev);
++
++	return 0;
++}
++
++static int ksz8041_suspend(struct phy_device *phydev)
++{
++	kszphy_disable_clk(phydev);
++
++	return 0;
++}
++
+ static int ksz9477_resume(struct phy_device *phydev)
+ {
+ 	int ret;
+@@ -2161,7 +2223,10 @@ static int ksz8061_resume(struct phy_device *phydev)
+ 	if (!(ret & BMCR_PDOWN))
+ 		return 0;
+ 
+-	genphy_resume(phydev);
++	ret = kszphy_generic_resume(phydev);
++	if (ret)
++		return ret;
++
+ 	usleep_range(1000, 2000);
+ 
+ 	/* Re-program the value after chip is reset. */
+@@ -2179,6 +2244,11 @@ static int ksz8061_resume(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++static int ksz8061_suspend(struct phy_device *phydev)
++{
++	return kszphy_suspend(phydev);
++}
++
+ static int kszphy_probe(struct phy_device *phydev)
+ {
+ 	const struct kszphy_type *type = phydev->drv->driver_data;
+@@ -2219,10 +2289,14 @@ static int kszphy_probe(struct phy_device *phydev)
+ 	} else if (!clk) {
+ 		/* unnamed clock from the generic ethernet-phy binding */
+ 		clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
+-		if (IS_ERR(clk))
+-			return PTR_ERR(clk);
+ 	}
+ 
++	if (IS_ERR(clk))
++		return PTR_ERR(clk);
++
++	clk_disable_unprepare(clk);
++	priv->clk = clk;
++
+ 	if (ksz8041_fiber_mode(phydev))
+ 		phydev->port = PORT_FIBRE;
+ 
+@@ -5292,6 +5366,21 @@ static int lan8841_probe(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++static int lan8804_resume(struct phy_device *phydev)
++{
++	return kszphy_resume(phydev);
++}
++
++static int lan8804_suspend(struct phy_device *phydev)
++{
++	return kszphy_generic_suspend(phydev);
++}
++
++static int lan8841_resume(struct phy_device *phydev)
++{
++	return kszphy_generic_resume(phydev);
++}
++
+ static int lan8841_suspend(struct phy_device *phydev)
+ {
+ 	struct kszphy_priv *priv = phydev->priv;
+@@ -5300,7 +5389,7 @@ static int lan8841_suspend(struct phy_device *phydev)
+ 	if (ptp_priv->ptp_clock)
+ 		ptp_cancel_worker_sync(ptp_priv->ptp_clock);
+ 
+-	return genphy_suspend(phydev);
++	return kszphy_generic_suspend(phydev);
+ }
+ 
+ static struct phy_driver ksphy_driver[] = {
+@@ -5360,9 +5449,8 @@ static struct phy_driver ksphy_driver[] = {
+ 	.get_sset_count = kszphy_get_sset_count,
+ 	.get_strings	= kszphy_get_strings,
+ 	.get_stats	= kszphy_get_stats,
+-	/* No suspend/resume callbacks because of errata DS80000700A,
+-	 * receiver error following software power down.
+-	 */
++	.suspend	= ksz8041_suspend,
++	.resume		= ksz8041_resume,
+ }, {
+ 	.phy_id		= PHY_ID_KSZ8041RNLI,
+ 	.phy_id_mask	= MICREL_PHY_ID_MASK,
+@@ -5438,7 +5526,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	.soft_reset	= genphy_soft_reset,
+ 	.config_intr	= kszphy_config_intr,
+ 	.handle_interrupt = kszphy_handle_interrupt,
+-	.suspend	= kszphy_suspend,
++	.suspend	= ksz8061_suspend,
+ 	.resume		= ksz8061_resume,
+ }, {
+ 	.phy_id		= PHY_ID_KSZ9021,
+@@ -5509,8 +5597,8 @@ static struct phy_driver ksphy_driver[] = {
+ 	.get_sset_count	= kszphy_get_sset_count,
+ 	.get_strings	= kszphy_get_strings,
+ 	.get_stats	= kszphy_get_stats,
+-	.suspend	= genphy_suspend,
+-	.resume		= kszphy_resume,
++	.suspend	= lan8804_suspend,
++	.resume		= lan8804_resume,
+ 	.config_intr	= lan8804_config_intr,
+ 	.handle_interrupt = lan8804_handle_interrupt,
+ }, {
+@@ -5528,7 +5616,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	.get_strings	= kszphy_get_strings,
+ 	.get_stats	= kszphy_get_stats,
+ 	.suspend	= lan8841_suspend,
+-	.resume		= genphy_resume,
++	.resume		= lan8841_resume,
+ 	.cable_test_start	= lan8814_cable_test_start,
+ 	.cable_test_get_status	= ksz886x_cable_test_get_status,
+ }, {
+diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
+index 5c4e88be46ee33..8797ca1a8a219c 100644
+--- a/drivers/net/pse-pd/tps23881.c
++++ b/drivers/net/pse-pd/tps23881.c
+@@ -64,15 +64,11 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
+ 	if (id >= TPS23881_MAX_CHANS)
+ 		return -ERANGE;
+ 
+-	ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+-	if (ret < 0)
+-		return ret;
+-
+ 	chan = priv->port[id].chan[0];
+ 	if (chan < 4)
+-		val = (u16)(ret | BIT(chan));
++		val = BIT(chan);
+ 	else
+-		val = (u16)(ret | BIT(chan + 4));
++		val = BIT(chan + 4);
+ 
+ 	if (priv->port[id].is_4p) {
+ 		chan = priv->port[id].chan[1];
+@@ -100,15 +96,11 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
+ 	if (id >= TPS23881_MAX_CHANS)
+ 		return -ERANGE;
+ 
+-	ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+-	if (ret < 0)
+-		return ret;
+-
+ 	chan = priv->port[id].chan[0];
+ 	if (chan < 4)
+-		val = (u16)(ret | BIT(chan + 4));
++		val = BIT(chan + 4);
+ 	else
+-		val = (u16)(ret | BIT(chan + 8));
++		val = BIT(chan + 8);
+ 
+ 	if (priv->port[id].is_4p) {
+ 		chan = priv->port[id].chan[1];
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 0c011d8f5d4db2..9fe7f704a2f7b8 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1365,6 +1365,9 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+index fa1be8c54d3c1a..c18c6e933f478e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+@@ -161,6 +161,7 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
+ 
+ const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+ const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
++const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
+ const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
+ const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index 34c91deca57b1b..17721bb47e2511 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -545,6 +545,7 @@ extern const char iwl_ax231_name[];
+ extern const char iwl_ax411_name[];
+ extern const char iwl_bz_name[];
+ extern const char iwl_fm_name[];
++extern const char iwl_wh_name[];
+ extern const char iwl_gl_name[];
+ extern const char iwl_mtp_name[];
+ extern const char iwl_sc_name[];
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 1a814eb6743e80..6a4300c01d41d1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2871,6 +2871,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+ 				    int idx)
+ {
+ 	int i;
++	int n_channels = 0;
+ 
+ 	if (fw_has_api(&mvm->fw->ucode_capa,
+ 		       IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+@@ -2879,7 +2880,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+ 
+ 		for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
+ 			if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+-				match->channels[match->n_channels++] =
++				match->channels[n_channels++] =
+ 					mvm->nd_channels[i]->center_freq;
+ 	} else {
+ 		struct iwl_scan_offload_profile_match_v1 *matches =
+@@ -2887,9 +2888,11 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+ 
+ 		for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
+ 			if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+-				match->channels[match->n_channels++] =
++				match->channels[n_channels++] =
+ 					mvm->nd_channels[i]->center_freq;
+ 	}
++	/* We may have ended up with fewer channels than we allocated. */
++	match->n_channels = n_channels;
+ }
+ 
+ /**
+@@ -2970,6 +2973,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ 			     GFP_KERNEL);
+ 	if (!net_detect || !n_matches)
+ 		goto out_report_nd;
++	net_detect->n_matches = n_matches;
++	n_matches = 0;
+ 
+ 	for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+ 		struct cfg80211_wowlan_nd_match *match;
+@@ -2983,8 +2988,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ 				GFP_KERNEL);
+ 		if (!match)
+ 			goto out_report_nd;
++		match->n_channels = n_channels;
+ 
+-		net_detect->matches[net_detect->n_matches++] = match;
++		net_detect->matches[n_matches++] = match;
+ 
+ 		/* We inverted the order of the SSIDs in the scan
+ 		 * request, so invert the index here.
+@@ -2999,6 +3005,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ 
+ 		iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
+ 	}
++	/* We may have fewer matches than we allocated. */
++	net_detect->n_matches = n_matches;
+ 
+ out_report_nd:
+ 	wakeup.net_detect = net_detect;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 805fb249a0c6a2..8fb2aa28224212 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -1106,19 +1106,54 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
+ 		      iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+ 
+ /* Bz */
+-/* FIXME: need to change the naming according to the actual CRF */
+ 	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_bz, iwl_ax201_name),
++
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_bz, iwl_ax211_name),
++
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      iwl_cfg_bz, iwl_fm_name),
+ 
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_bz, iwl_wh_name),
++
+ 	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_bz, iwl_ax201_name),
++
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_bz, iwl_ax211_name),
++
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      iwl_cfg_bz, iwl_fm_name),
+ 
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_bz, iwl_wh_name),
++
+ /* Ga (Gl) */
+ 	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+index 63eb08c43c0517..6764c13530b9bd 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+@@ -104,7 +104,7 @@ struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
+ 			break;
+ 
+ 		msleep(20);
+-	} while (retries-- > 0);
++	} while (--retries > 0);
+ 
+ 	if (!retries) {
+ 		dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
+diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+index 3931c7a13f5ab2..cbdbb91e8381fc 100644
+--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+@@ -104,14 +104,21 @@ void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
+ 	fsm_state_notify(ctl->md, state);
+ }
+ 
++static void fsm_release_command(struct kref *ref)
++{
++	struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
++
++	kfree(cmd);
++}
++
+ static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
+ {
+ 	if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+-		*cmd->ret = result;
+-		complete_all(cmd->done);
++		cmd->result = result;
++		complete_all(&cmd->done);
+ 	}
+ 
+-	kfree(cmd);
++	kref_put(&cmd->refcnt, fsm_release_command);
+ }
+ 
+ static void fsm_del_kf_event(struct t7xx_fsm_event *event)
+@@ -475,7 +482,6 @@ static int fsm_main_thread(void *data)
+ 
+ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
+ {
+-	DECLARE_COMPLETION_ONSTACK(done);
+ 	struct t7xx_fsm_command *cmd;
+ 	unsigned long flags;
+ 	int ret;
+@@ -487,11 +493,13 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
+ 	INIT_LIST_HEAD(&cmd->entry);
+ 	cmd->cmd_id = cmd_id;
+ 	cmd->flag = flag;
++	kref_init(&cmd->refcnt);
+ 	if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+-		cmd->done = &done;
+-		cmd->ret = &ret;
++		init_completion(&cmd->done);
++		kref_get(&cmd->refcnt);
+ 	}
+ 
++	kref_get(&cmd->refcnt);
+ 	spin_lock_irqsave(&ctl->command_lock, flags);
+ 	list_add_tail(&cmd->entry, &ctl->command_queue);
+ 	spin_unlock_irqrestore(&ctl->command_lock, flags);
+@@ -501,11 +509,11 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
+ 	if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ 		unsigned long wait_ret;
+ 
+-		wait_ret = wait_for_completion_timeout(&done,
++		wait_ret = wait_for_completion_timeout(&cmd->done,
+ 						       msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
+-		if (!wait_ret)
+-			return -ETIMEDOUT;
+ 
++		ret = wait_ret ? cmd->result : -ETIMEDOUT;
++		kref_put(&cmd->refcnt, fsm_release_command);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+index 7b0a9baf488c18..6e0601bb752e51 100644
+--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+@@ -110,8 +110,9 @@ struct t7xx_fsm_command {
+ 	struct list_head	entry;
+ 	enum t7xx_fsm_cmd_state	cmd_id;
+ 	unsigned int		flag;
+-	struct completion	*done;
+-	int			*ret;
++	struct completion	done;
++	int			result;
++	struct kref		refcnt;
+ };
+ 
+ struct t7xx_fsm_notifier {
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 093cb423f536be..61bba5513de05a 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -173,6 +173,11 @@ enum nvme_quirks {
+ 	 * MSI (but not MSI-X) interrupts are broken and never fire.
+ 	 */
+ 	NVME_QUIRK_BROKEN_MSI			= (1 << 21),
++
++	/*
++	 * Align dma pool segment size to 512 bytes
++	 */
++	NVME_QUIRK_DMAPOOL_ALIGN_512		= (1 << 22),
+ };
+ 
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 55af3dfbc2607b..76b3f7b396c86b 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2690,15 +2690,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+ 
+ static int nvme_setup_prp_pools(struct nvme_dev *dev)
+ {
++	size_t small_align = 256;
++
+ 	dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
+ 						NVME_CTRL_PAGE_SIZE,
+ 						NVME_CTRL_PAGE_SIZE, 0);
+ 	if (!dev->prp_page_pool)
+ 		return -ENOMEM;
+ 
++	if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
++		small_align = 512;
++
+ 	/* Optimisation for I/Os between 4k and 128k */
+ 	dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
+-						256, 256, 0);
++						256, small_align, 0);
+ 	if (!dev->prp_small_pool) {
+ 		dma_pool_destroy(dev->prp_page_pool);
+ 		return -ENOMEM;
+@@ -3446,7 +3451,7 @@ static const struct pci_device_id nvme_id_table[] = {
+ 	{ PCI_VDEVICE(REDHAT, 0x0010),	/* Qemu emulated controller */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
+-		.driver_data = NVME_QUIRK_QDEPTH_ONE },
++		.driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
+ 	{ PCI_DEVICE(0x126f, 0x2262),	/* Silicon Motion generic */
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ 				NVME_QUIRK_BOGUS_NID, },
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 685e89b35d330d..cfbab198693b03 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -2227,12 +2227,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
+ 		const char *page, size_t count)
+ {
+ 	struct list_head *entry;
++	char *old_nqn, *new_nqn;
+ 	size_t len;
+ 
+ 	len = strcspn(page, "\n");
+ 	if (!len || len > NVMF_NQN_FIELD_LEN - 1)
+ 		return -EINVAL;
+ 
++	new_nqn = kstrndup(page, len, GFP_KERNEL);
++	if (!new_nqn)
++		return -ENOMEM;
++
+ 	down_write(&nvmet_config_sem);
+ 	list_for_each(entry, &nvmet_subsystems_group.cg_children) {
+ 		struct config_item *item =
+@@ -2241,13 +2246,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
+ 		if (!strncmp(config_item_name(item), page, len)) {
+ 			pr_err("duplicate NQN %s\n", config_item_name(item));
+ 			up_write(&nvmet_config_sem);
++			kfree(new_nqn);
+ 			return -EINVAL;
+ 		}
+ 	}
+-	memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
+-	memcpy(nvmet_disc_subsys->subsysnqn, page, len);
++	old_nqn = nvmet_disc_subsys->subsysnqn;
++	nvmet_disc_subsys->subsysnqn = new_nqn;
+ 	up_write(&nvmet_config_sem);
+ 
++	kfree(old_nqn);
+ 	return len;
+ }
+ 
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index 737d0ae3d0b662..f384c72d955452 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -86,6 +86,7 @@ const struct regmap_config mcp23x08_regmap = {
+ 	.num_reg_defaults = ARRAY_SIZE(mcp23x08_defaults),
+ 	.cache_type = REGCACHE_FLAT,
+ 	.max_register = MCP_OLAT,
++	.disable_locking = true, /* mcp->lock protects the regmap */
+ };
+ EXPORT_SYMBOL_GPL(mcp23x08_regmap);
+ 
+@@ -132,6 +133,7 @@ const struct regmap_config mcp23x17_regmap = {
+ 	.num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
+ 	.cache_type = REGCACHE_FLAT,
+ 	.val_format_endian = REGMAP_ENDIAN_LITTLE,
++	.disable_locking = true, /* mcp->lock protects the regmap */
+ };
+ EXPORT_SYMBOL_GPL(mcp23x17_regmap);
+ 
+@@ -228,7 +230,9 @@ static int mcp_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ 
+ 	switch (param) {
+ 	case PIN_CONFIG_BIAS_PULL_UP:
++		mutex_lock(&mcp->lock);
+ 		ret = mcp_read(mcp, MCP_GPPU, &data);
++		mutex_unlock(&mcp->lock);
+ 		if (ret < 0)
+ 			return ret;
+ 		status = (data & BIT(pin)) ? 1 : 0;
+@@ -257,7 +261,9 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 
+ 		switch (param) {
+ 		case PIN_CONFIG_BIAS_PULL_UP:
++			mutex_lock(&mcp->lock);
+ 			ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
++			mutex_unlock(&mcp->lock);
+ 			break;
+ 		default:
+ 			dev_dbg(mcp->dev, "Invalid config param %04x\n", param);
+diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
+index 8c05e0dd2a218e..3ba9c43d5516ae 100644
+--- a/drivers/platform/x86/hp/hp-wmi.c
++++ b/drivers/platform/x86/hp/hp-wmi.c
+@@ -64,7 +64,7 @@ static const char * const omen_thermal_profile_boards[] = {
+ 	"874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
+ 	"88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
+ 	"88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
+-	"8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42"
++	"8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42", "8A15"
+ };
+ 
+ /* DMI Board names of Omen laptops that are specifically set to be thermal
+@@ -80,7 +80,7 @@ static const char * const omen_thermal_profile_force_v0_boards[] = {
+  * "balanced" when reaching zero.
+  */
+ static const char * const omen_timed_thermal_profile_boards[] = {
+-	"8BAD", "8A42"
++	"8BAD", "8A42", "8A15"
+ };
+ 
+ /* DMI Board names of Victus laptops */
+diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
+index 9d70146fd7420a..1a09f2dfb7bca0 100644
+--- a/drivers/platform/x86/mlx-platform.c
++++ b/drivers/platform/x86/mlx-platform.c
+@@ -6237,6 +6237,7 @@ mlxplat_pci_fpga_device_init(unsigned int device, const char *res_name, struct p
+ fail_pci_request_regions:
+ 	pci_disable_device(pci_dev);
+ fail_pci_enable_device:
++	pci_dev_put(pci_dev);
+ 	return err;
+ }
+ 
+@@ -6247,6 +6248,7 @@ mlxplat_pci_fpga_device_exit(struct pci_dev *pci_bridge,
+ 	iounmap(pci_bridge_addr);
+ 	pci_release_regions(pci_bridge);
+ 	pci_disable_device(pci_bridge);
++	pci_dev_put(pci_bridge);
+ }
+ 
+ static int
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 6371a9f765c139..2cfb2ac3f465aa 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -184,7 +184,8 @@ enum tpacpi_hkey_event_t {
+ 						   */
+ 	TP_HKEY_EV_AMT_TOGGLE		= 0x131a, /* Toggle AMT on/off */
+ 	TP_HKEY_EV_DOUBLETAP_TOGGLE	= 0x131c, /* Toggle trackpoint doubletap on/off */
+-	TP_HKEY_EV_PROFILE_TOGGLE	= 0x131f, /* Toggle platform profile */
++	TP_HKEY_EV_PROFILE_TOGGLE	= 0x131f, /* Toggle platform profile in 2024 systems */
++	TP_HKEY_EV_PROFILE_TOGGLE2	= 0x1401, /* Toggle platform profile in 2025 + systems */
+ 
+ 	/* Reasons for waking up from S3/S4 */
+ 	TP_HKEY_EV_WKUP_S3_UNDOCK	= 0x2304, /* undock requested, S3 */
+@@ -11200,6 +11201,7 @@ static bool tpacpi_driver_event(const unsigned int hkey_event)
+ 		tp_features.trackpoint_doubletap = !tp_features.trackpoint_doubletap;
+ 		return true;
+ 	case TP_HKEY_EV_PROFILE_TOGGLE:
++	case TP_HKEY_EV_PROFILE_TOGGLE2:
+ 		platform_profile_cycle();
+ 		return true;
+ 	}
+diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
+index 778ff187ac59e6..88819659df83a2 100644
+--- a/drivers/pmdomain/core.c
++++ b/drivers/pmdomain/core.c
+@@ -2141,6 +2141,11 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+ 	return 0;
+ }
+ 
++static void genpd_provider_release(struct device *dev)
++{
++	/* nothing to be done here */
++}
++
+ static int genpd_alloc_data(struct generic_pm_domain *genpd)
+ {
+ 	struct genpd_governor_data *gd = NULL;
+@@ -2172,6 +2177,7 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
+ 
+ 	genpd->gd = gd;
+ 	device_initialize(&genpd->dev);
++	genpd->dev.release = genpd_provider_release;
+ 
+ 	if (!genpd_is_dev_name_fw(genpd)) {
+ 		dev_set_name(&genpd->dev, "%s", genpd->name);
+diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
+index 3f0e6960f47fc2..e03c2cb39a6936 100644
+--- a/drivers/pmdomain/imx/gpcv2.c
++++ b/drivers/pmdomain/imx/gpcv2.c
+@@ -1458,12 +1458,12 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
+ 		.max_register   = SZ_4K,
+ 	};
+ 	struct device *dev = &pdev->dev;
+-	struct device_node *pgc_np;
++	struct device_node *pgc_np __free(device_node) =
++		of_get_child_by_name(dev->of_node, "pgc");
+ 	struct regmap *regmap;
+ 	void __iomem *base;
+ 	int ret;
+ 
+-	pgc_np = of_get_child_by_name(dev->of_node, "pgc");
+ 	if (!pgc_np) {
+ 		dev_err(dev, "No power domains specified in DT\n");
+ 		return -EINVAL;
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 1755ca026f08ff..73b1edd0531b43 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -43,6 +43,7 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX);
+ #define CQSPI_SLOW_SRAM		BIT(4)
+ #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR	BIT(5)
+ #define CQSPI_RD_NO_IRQ			BIT(6)
++#define CQSPI_DISABLE_STIG_MODE		BIT(7)
+ 
+ /* Capabilities */
+ #define CQSPI_SUPPORTS_OCTAL		BIT(0)
+@@ -103,6 +104,7 @@ struct cqspi_st {
+ 	bool			apb_ahb_hazard;
+ 
+ 	bool			is_jh7110; /* Flag for StarFive JH7110 SoC */
++	bool			disable_stig_mode;
+ 
+ 	const struct cqspi_driver_platdata *ddata;
+ };
+@@ -1416,7 +1418,8 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
+ 	 * reads, prefer STIG mode for such small reads.
+ 	 */
+ 		if (!op->addr.nbytes ||
+-		    op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX)
++		    (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX &&
++		     !cqspi->disable_stig_mode))
+ 			return cqspi_command_read(f_pdata, op);
+ 
+ 		return cqspi_read(f_pdata, op);
+@@ -1880,6 +1883,8 @@ static int cqspi_probe(struct platform_device *pdev)
+ 			if (ret)
+ 				goto probe_reset_failed;
+ 		}
++		if (ddata->quirks & CQSPI_DISABLE_STIG_MODE)
++			cqspi->disable_stig_mode = true;
+ 
+ 		if (of_device_is_compatible(pdev->dev.of_node,
+ 					    "xlnx,versal-ospi-1.0")) {
+@@ -2043,7 +2048,8 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
+ static const struct cqspi_driver_platdata socfpga_qspi = {
+ 	.quirks = CQSPI_DISABLE_DAC_MODE
+ 			| CQSPI_NO_SUPPORT_WR_COMPLETION
+-			| CQSPI_SLOW_SRAM,
++			| CQSPI_SLOW_SRAM
++			| CQSPI_DISABLE_STIG_MODE,
+ };
+ 
+ static const struct cqspi_driver_platdata versal_ospi = {
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index 3d2376caedfa68..5fd0b39d8c703b 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -81,6 +81,9 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
+ 
+ 	bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
+ 			&btrfs_clone_bioset);
++	if (IS_ERR(bio))
++		return ERR_CAST(bio);
++
+ 	bbio = btrfs_bio(bio);
+ 	btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
+ 	bbio->inode = orig_bbio->inode;
+@@ -355,7 +358,7 @@ static void btrfs_simple_end_io(struct bio *bio)
+ 		INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
+ 		queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
+ 	} else {
+-		if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
++		if (bio_is_zone_append(bio) && !bio->bi_status)
+ 			btrfs_record_physical_zoned(bbio);
+ 		btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+ 	}
+@@ -398,7 +401,7 @@ static void btrfs_orig_write_end_io(struct bio *bio)
+ 	else
+ 		bio->bi_status = BLK_STS_OK;
+ 
+-	if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
++	if (bio_is_zone_append(bio) && !bio->bi_status)
+ 		stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ 
+ 	btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+@@ -412,7 +415,7 @@ static void btrfs_clone_write_end_io(struct bio *bio)
+ 	if (bio->bi_status) {
+ 		atomic_inc(&stripe->bioc->error);
+ 		btrfs_log_dev_io_error(bio, stripe->dev);
+-	} else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
++	} else if (bio_is_zone_append(bio)) {
+ 		stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ 	}
+ 
+@@ -684,7 +687,8 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ 				&bioc, &smap, &mirror_num);
+ 	if (error) {
+ 		ret = errno_to_blk_status(error);
+-		goto fail;
++		btrfs_bio_counter_dec(fs_info);
++		goto end_bbio;
+ 	}
+ 
+ 	map_length = min(map_length, length);
+@@ -692,7 +696,15 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ 		map_length = btrfs_append_map_length(bbio, map_length);
+ 
+ 	if (map_length < length) {
+-		bbio = btrfs_split_bio(fs_info, bbio, map_length);
++		struct btrfs_bio *split;
++
++		split = btrfs_split_bio(fs_info, bbio, map_length);
++		if (IS_ERR(split)) {
++			ret = errno_to_blk_status(PTR_ERR(split));
++			btrfs_bio_counter_dec(fs_info);
++			goto end_bbio;
++		}
++		bbio = split;
+ 		bio = &bbio->bio;
+ 	}
+ 
+@@ -766,6 +778,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ 
+ 		btrfs_bio_end_io(remaining, ret);
+ 	}
++end_bbio:
+ 	btrfs_bio_end_io(bbio, ret);
+ 	/* Do not submit another chunk */
+ 	return true;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 43b7b331b2da36..563f106774e592 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4264,6 +4264,15 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ 	 * already the cleaner, but below we run all pending delayed iputs.
+ 	 */
+ 	btrfs_flush_workqueue(fs_info->fixup_workers);
++	/*
++	 * Similar case here, we have to wait for delalloc workers before we
++	 * proceed below and stop the cleaner kthread, otherwise we trigger a
++	 * use-after-tree on the cleaner kthread task_struct when a delalloc
++	 * worker running submit_compressed_extents() adds a delayed iput, which
++	 * does a wake up on the cleaner kthread, which was already freed below
++	 * when we call kthread_stop().
++	 */
++	btrfs_flush_workqueue(fs_info->delalloc_workers);
+ 
+ 	/*
+ 	 * After we parked the cleaner kthread, ordered extents may have
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 4b3e256e0d0b88..b5cfb85af937fc 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -10056,6 +10056,11 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 			bsi.block_start = physical_block_start;
+ 			bsi.block_len = len;
+ 		}
++
++		if (fatal_signal_pending(current)) {
++			ret = -EINTR;
++			goto out;
++		}
+ 	}
+ 
+ 	if (bsi.block_len)
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index 2b0daced98ebb4..3404e7a30c330c 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -893,7 +893,7 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
+ 	int status = 0;
+ 
+ 	trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+-	if (!sb_has_quota_loaded(sb, type)) {
++	if (!sb_has_quota_active(sb, type)) {
+ 		status = -ESRCH;
+ 		goto out;
+ 	}
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index 73d3367c533b8a..2956d888c13145 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -867,6 +867,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
+ 	brelse(oinfo->dqi_libh);
+ 	brelse(oinfo->dqi_lqi_bh);
+ 	kfree(oinfo);
++	info->dqi_priv = NULL;
+ 	return status;
+ }
+ 
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 7eb010de39fe26..536b7dc4538182 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1810,7 +1810,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ 		}
+ 
+ 		for (; addr != end; addr += PAGE_SIZE, idx++) {
+-			unsigned long cur_flags = flags;
++			u64 cur_flags = flags;
+ 			pagemap_entry_t pme;
+ 
+ 			if (folio && (flags & PM_PRESENT) &&
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index bf909c2f6b963b..0ceebde38f9fe0 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -2018,6 +2018,7 @@ exit_cifs(void)
+ 	destroy_workqueue(decrypt_wq);
+ 	destroy_workqueue(fileinfo_put_wq);
+ 	destroy_workqueue(serverclose_wq);
++	destroy_workqueue(cfid_put_wq);
+ 	destroy_workqueue(cifsiod_wq);
+ 	cifs_proc_clean();
+ }
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 7d01dd313351f7..04ffc5b158c3bf 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4224,6 +4224,7 @@ static bool __query_dir(struct dir_context *ctx, const char *name, int namlen,
+ 	/* dot and dotdot entries are already reserved */
+ 	if (!strcmp(".", name) || !strcmp("..", name))
+ 		return true;
++	d_info->num_scan++;
+ 	if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name))
+ 		return true;
+ 	if (!match_pattern(name, namlen, priv->search_pattern))
+@@ -4384,8 +4385,17 @@ int smb2_query_dir(struct ksmbd_work *work)
+ 	query_dir_private.info_level		= req->FileInformationClass;
+ 	dir_fp->readdir_data.private		= &query_dir_private;
+ 	set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
+-
++again:
++	d_info.num_scan = 0;
+ 	rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
++	/*
++	 * num_entry can be 0 if the directory iteration stops before reaching
++	 * the end of the directory and no file is matched with the search
++	 * pattern.
++	 */
++	if (rc >= 0 && !d_info.num_entry && d_info.num_scan &&
++	    d_info.out_buf_len > 0)
++		goto again;
+ 	/*
+ 	 * req->OutputBufferLength is too small to contain even one entry.
+ 	 * In this case, it immediately returns OutputBufferLength 0 to client.
+@@ -6006,15 +6016,13 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ 		attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+ 	}
+ 
+-	attrs.ia_valid |= ATTR_CTIME;
+ 	if (file_info->ChangeTime)
+-		attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
+-	else
+-		attrs.ia_ctime = inode_get_ctime(inode);
++		inode_set_ctime_to_ts(inode,
++				ksmbd_NTtimeToUnix(file_info->ChangeTime));
+ 
+ 	if (file_info->LastWriteTime) {
+ 		attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
+-		attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
++		attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME);
+ 	}
+ 
+ 	if (file_info->Attributes) {
+@@ -6056,8 +6064,6 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ 			return -EACCES;
+ 
+ 		inode_lock(inode);
+-		inode_set_ctime_to_ts(inode, attrs.ia_ctime);
+-		attrs.ia_valid &= ~ATTR_CTIME;
+ 		rc = notify_change(idmap, dentry, &attrs, NULL);
+ 		inode_unlock(inode);
+ 	}
+diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
+index cb76f4b5bafe8c..06903024a2d88b 100644
+--- a/fs/smb/server/vfs.h
++++ b/fs/smb/server/vfs.h
+@@ -43,6 +43,7 @@ struct ksmbd_dir_info {
+ 	char		*rptr;
+ 	int		name_len;
+ 	int		out_buf_len;
++	int		num_scan;
+ 	int		num_entry;
+ 	int		data_count;
+ 	int		last_entry_offset;
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index faceadb040f9ac..66b7620a1b5333 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -677,6 +677,23 @@ static inline void bio_clear_polled(struct bio *bio)
+ 	bio->bi_opf &= ~REQ_POLLED;
+ }
+ 
++/**
++ * bio_is_zone_append - is this a zone append bio?
++ * @bio:	bio to check
++ *
++ * Check if @bio is a zone append operation.  Core block layer code and end_io
++ * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check
++ * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if
++ * it is not natively supported.
++ */
++static inline bool bio_is_zone_append(struct bio *bio)
++{
++	if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
++		return false;
++	return bio_op(bio) == REQ_OP_ZONE_APPEND ||
++		bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
++}
++
+ struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ 		unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+ struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 7d7578a8eac10b..5118caf8aa1c70 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -1121,7 +1121,7 @@ bool bpf_jit_supports_arena(void);
+ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
+ u64 bpf_arch_uaddress_limit(void);
+ void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
+-bool bpf_helper_changes_pkt_data(void *func);
++bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
+ 
+ static inline bool bpf_dump_raw_ok(const struct cred *cred)
+ {
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index c1645c86eed969..d65b5d71b93bf8 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -585,13 +585,16 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
+  * vlan_get_protocol - get protocol EtherType.
+  * @skb: skbuff to query
+  * @type: first vlan protocol
++ * @mac_offset: MAC offset
+  * @depth: buffer to store length of eth and vlan tags in bytes
+  *
+  * Returns the EtherType of the packet, regardless of whether it is
+  * vlan encapsulated (normal or hardware accelerated) or not.
+  */
+-static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+-					 int *depth)
++static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
++						__be16 type,
++						int mac_offset,
++						int *depth)
+ {
+ 	unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
+ 
+@@ -610,7 +613,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ 		do {
+ 			struct vlan_hdr vhdr, *vh;
+ 
+-			vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
++			vh = skb_header_pointer(skb, mac_offset + vlan_depth,
++						sizeof(vhdr), &vhdr);
+ 			if (unlikely(!vh || !--parse_depth))
+ 				return 0;
+ 
+@@ -625,6 +629,12 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ 	return type;
+ }
+ 
++static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
++					 int *depth)
++{
++	return __vlan_get_protocol_offset(skb, type, 0, depth);
++}
++
+ /**
+  * vlan_get_protocol - get protocol EtherType.
+  * @skb: skbuff to query
+diff --git a/include/linux/memfd.h b/include/linux/memfd.h
+index 3f2cf339ceafd9..d437e30708502e 100644
+--- a/include/linux/memfd.h
++++ b/include/linux/memfd.h
+@@ -7,6 +7,7 @@
+ #ifdef CONFIG_MEMFD_CREATE
+ extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
+ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
++unsigned int *memfd_file_seals_ptr(struct file *file);
+ #else
+ static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
+ {
+@@ -16,6 +17,19 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
+ {
+ 	return ERR_PTR(-EINVAL);
+ }
++
++static inline unsigned int *memfd_file_seals_ptr(struct file *file)
++{
++	return NULL;
++}
+ #endif
+ 
++/* Retrieve memfd seals associated with the file, if any. */
++static inline unsigned int memfd_file_seals(struct file *file)
++{
++	unsigned int *sealsp = memfd_file_seals_ptr(file);
++
++	return sealsp ? *sealsp : 0;
++}
++
+ #endif /* __LINUX_MEMFD_H */
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index e23c692a34c702..82c7056e27599e 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -555,6 +555,7 @@ enum {
+ 	 * creation/deletion on drivers rescan. Unset during device attach.
+ 	 */
+ 	MLX5_PRIV_FLAGS_DETACH = 1 << 2,
++	MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3,
+ };
+ 
+ struct mlx5_adev {
+@@ -1233,6 +1234,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
+ 	return dev->coredev_type == MLX5_COREDEV_VF;
+ }
+ 
++static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
++					       const struct mlx5_core_dev *dev2)
++{
++	return dev1->coredev_type == dev2->coredev_type;
++}
++
+ static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
+ {
+ 	return dev->caps.embedded_cpu;
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 96d369112bfa03..512e25c416ae29 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -2113,7 +2113,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
+ 	u8	   migration_in_chunks[0x1];
+ 	u8	   reserved_at_d1[0x1];
+ 	u8	   sf_eq_usage[0x1];
+-	u8	   reserved_at_d3[0xd];
++	u8	   reserved_at_d3[0x5];
++	u8	   multiplane[0x1];
++	u8	   reserved_at_d9[0x7];
+ 
+ 	u8	   cross_vhca_object_to_object_supported[0x20];
+ 
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 61fff5d34ed532..8617adc6becd1f 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -3100,6 +3100,7 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
+ 	if (!pmd_ptlock_init(ptdesc))
+ 		return false;
+ 	__folio_set_pgtable(folio);
++	ptdesc_pmd_pts_init(ptdesc);
+ 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
+ 	return true;
+ }
+@@ -4079,6 +4080,37 @@ void mem_dump_obj(void *object);
+ static inline void mem_dump_obj(void *object) {}
+ #endif
+ 
++static inline bool is_write_sealed(int seals)
++{
++	return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
++}
++
++/**
++ * is_readonly_sealed - Checks whether write-sealed but mapped read-only,
++ *                      in which case writes should be disallowing moving
++ *                      forwards.
++ * @seals: the seals to check
++ * @vm_flags: the VMA flags to check
++ *
++ * Returns whether readonly sealed, in which case writess should be disallowed
++ * going forward.
++ */
++static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags)
++{
++	/*
++	 * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
++	 * MAP_SHARED and read-only, take care to not allow mprotect to
++	 * revert protections on such mappings. Do this only for shared
++	 * mappings. For private mappings, don't need to mask
++	 * VM_MAYWRITE as we still want them to be COW-writable.
++	 */
++	if (is_write_sealed(seals) &&
++	    ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED))
++		return true;
++
++	return false;
++}
++
+ /**
+  * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
+  *                    handle them.
+@@ -4090,24 +4122,15 @@ static inline void mem_dump_obj(void *object) {}
+  */
+ static inline int seal_check_write(int seals, struct vm_area_struct *vma)
+ {
+-	if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
+-		/*
+-		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+-		 * write seals are active.
+-		 */
+-		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+-			return -EPERM;
+-
+-		/*
+-		 * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
+-		 * MAP_SHARED and read-only, take care to not allow mprotect to
+-		 * revert protections on such mappings. Do this only for shared
+-		 * mappings. For private mappings, don't need to mask
+-		 * VM_MAYWRITE as we still want them to be COW-writable.
+-		 */
+-		if (vma->vm_flags & VM_SHARED)
+-			vm_flags_clear(vma, VM_MAYWRITE);
+-	}
++	if (!is_write_sealed(seals))
++		return 0;
++
++	/*
++	 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
++	 * write seals are active.
++	 */
++	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
++		return -EPERM;
+ 
+ 	return 0;
+ }
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 6e3bdf8e38bcae..6894de506b364f 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -445,6 +445,7 @@ FOLIO_MATCH(compound_head, _head_2a);
+  * @pt_index:         Used for s390 gmap.
+  * @pt_mm:            Used for x86 pgds.
+  * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
++ * @pt_share_count:   Used for HugeTLB PMD page table share count.
+  * @_pt_pad_2:        Padding to ensure proper alignment.
+  * @ptl:              Lock for the page table.
+  * @__page_type:      Same as page->page_type. Unused for page tables.
+@@ -471,6 +472,9 @@ struct ptdesc {
+ 		pgoff_t pt_index;
+ 		struct mm_struct *pt_mm;
+ 		atomic_t pt_frag_refcount;
++#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
++		atomic_t pt_share_count;
++#endif
+ 	};
+ 
+ 	union {
+@@ -516,6 +520,32 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
+ 	const struct page *:		(const struct ptdesc *)(p),	\
+ 	struct page *:			(struct ptdesc *)(p)))
+ 
++#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
++static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
++{
++	atomic_set(&ptdesc->pt_share_count, 0);
++}
++
++static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
++{
++	atomic_inc(&ptdesc->pt_share_count);
++}
++
++static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
++{
++	atomic_dec(&ptdesc->pt_share_count);
++}
++
++static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
++{
++	return atomic_read(&ptdesc->pt_share_count);
++}
++#else
++static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
++{
++}
++#endif
++
+ /*
+  * Used for sizing the vmemmap region on some architectures
+  */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c95f7e6ba25514..ba7b52584770d7 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -804,7 +804,6 @@ struct hci_conn_params {
+ extern struct list_head hci_dev_list;
+ extern struct list_head hci_cb_list;
+ extern rwlock_t hci_dev_list_lock;
+-extern struct mutex hci_cb_list_lock;
+ 
+ #define hci_dev_set_flag(hdev, nr)             set_bit((nr), (hdev)->dev_flags)
+ #define hci_dev_clear_flag(hdev, nr)           clear_bit((nr), (hdev)->dev_flags)
+@@ -2007,24 +2006,47 @@ struct hci_cb {
+ 
+ 	char *name;
+ 
++	bool (*match)		(struct hci_conn *conn);
+ 	void (*connect_cfm)	(struct hci_conn *conn, __u8 status);
+ 	void (*disconn_cfm)	(struct hci_conn *conn, __u8 status);
+ 	void (*security_cfm)	(struct hci_conn *conn, __u8 status,
+-								__u8 encrypt);
++				 __u8 encrypt);
+ 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
+ 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
+ };
+ 
++static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list)
++{
++	struct hci_cb *cb, *cpy;
++
++	rcu_read_lock();
++	list_for_each_entry_rcu(cb, &hci_cb_list, list) {
++		if (cb->match && cb->match(conn)) {
++			cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC);
++			if (!cpy)
++				break;
++
++			*cpy = *cb;
++			INIT_LIST_HEAD(&cpy->list);
++			list_add_rcu(&cpy->list, list);
++		}
++	}
++	rcu_read_unlock();
++}
++
+ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+ {
+-	struct hci_cb *cb;
++	struct list_head list;
++	struct hci_cb *cb, *tmp;
++
++	INIT_LIST_HEAD(&list);
++	hci_cb_lookup(conn, &list);
+ 
+-	mutex_lock(&hci_cb_list_lock);
+-	list_for_each_entry(cb, &hci_cb_list, list) {
++	list_for_each_entry_safe(cb, tmp, &list, list) {
+ 		if (cb->connect_cfm)
+ 			cb->connect_cfm(conn, status);
++		kfree(cb);
+ 	}
+-	mutex_unlock(&hci_cb_list_lock);
+ 
+ 	if (conn->connect_cfm_cb)
+ 		conn->connect_cfm_cb(conn, status);
+@@ -2032,43 +2054,55 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+ 
+ static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
+ {
+-	struct hci_cb *cb;
++	struct list_head list;
++	struct hci_cb *cb, *tmp;
++
++	INIT_LIST_HEAD(&list);
++	hci_cb_lookup(conn, &list);
+ 
+-	mutex_lock(&hci_cb_list_lock);
+-	list_for_each_entry(cb, &hci_cb_list, list) {
++	list_for_each_entry_safe(cb, tmp, &list, list) {
+ 		if (cb->disconn_cfm)
+ 			cb->disconn_cfm(conn, reason);
++		kfree(cb);
+ 	}
+-	mutex_unlock(&hci_cb_list_lock);
+ 
+ 	if (conn->disconn_cfm_cb)
+ 		conn->disconn_cfm_cb(conn, reason);
+ }
+ 
+-static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
++static inline void hci_security_cfm(struct hci_conn *conn, __u8 status,
++				    __u8 encrypt)
+ {
+-	struct hci_cb *cb;
+-	__u8 encrypt;
+-
+-	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+-		return;
++	struct list_head list;
++	struct hci_cb *cb, *tmp;
+ 
+-	encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
++	INIT_LIST_HEAD(&list);
++	hci_cb_lookup(conn, &list);
+ 
+-	mutex_lock(&hci_cb_list_lock);
+-	list_for_each_entry(cb, &hci_cb_list, list) {
++	list_for_each_entry_safe(cb, tmp, &list, list) {
+ 		if (cb->security_cfm)
+ 			cb->security_cfm(conn, status, encrypt);
++		kfree(cb);
+ 	}
+-	mutex_unlock(&hci_cb_list_lock);
+ 
+ 	if (conn->security_cfm_cb)
+ 		conn->security_cfm_cb(conn, status);
+ }
+ 
++static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
++{
++	__u8 encrypt;
++
++	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
++		return;
++
++	encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
++
++	hci_security_cfm(conn, status, encrypt);
++}
++
+ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+ {
+-	struct hci_cb *cb;
+ 	__u8 encrypt;
+ 
+ 	if (conn->state == BT_CONFIG) {
+@@ -2095,40 +2129,38 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+ 			conn->sec_level = conn->pending_sec_level;
+ 	}
+ 
+-	mutex_lock(&hci_cb_list_lock);
+-	list_for_each_entry(cb, &hci_cb_list, list) {
+-		if (cb->security_cfm)
+-			cb->security_cfm(conn, status, encrypt);
+-	}
+-	mutex_unlock(&hci_cb_list_lock);
+-
+-	if (conn->security_cfm_cb)
+-		conn->security_cfm_cb(conn, status);
++	hci_security_cfm(conn, status, encrypt);
+ }
+ 
+ static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
+ {
+-	struct hci_cb *cb;
++	struct list_head list;
++	struct hci_cb *cb, *tmp;
++
++	INIT_LIST_HEAD(&list);
++	hci_cb_lookup(conn, &list);
+ 
+-	mutex_lock(&hci_cb_list_lock);
+-	list_for_each_entry(cb, &hci_cb_list, list) {
++	list_for_each_entry_safe(cb, tmp, &list, list) {
+ 		if (cb->key_change_cfm)
+ 			cb->key_change_cfm(conn, status);
++		kfree(cb);
+ 	}
+-	mutex_unlock(&hci_cb_list_lock);
+ }
+ 
+ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+ 								__u8 role)
+ {
+-	struct hci_cb *cb;
++	struct list_head list;
++	struct hci_cb *cb, *tmp;
++
++	INIT_LIST_HEAD(&list);
++	hci_cb_lookup(conn, &list);
+ 
+-	mutex_lock(&hci_cb_list_lock);
+-	list_for_each_entry(cb, &hci_cb_list, list) {
++	list_for_each_entry_safe(cb, tmp, &list, list) {
+ 		if (cb->role_switch_cfm)
+ 			cb->role_switch_cfm(conn, status, role);
++		kfree(cb);
+ 	}
+-	mutex_unlock(&hci_cb_list_lock);
+ }
+ 
+ static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 91ae20cb76485b..471c353d32a4a5 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -733,15 +733,18 @@ struct nft_set_ext_tmpl {
+ /**
+  *	struct nft_set_ext - set extensions
+  *
+- *	@genmask: generation mask
++ *	@genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT)
+  *	@offset: offsets of individual extension types
+  *	@data: beginning of extension data
++ *
++ *	This structure must be aligned to word size, otherwise atomic bitops
++ *	on genmask field can cause alignment failure on some archs.
+  */
+ struct nft_set_ext {
+ 	u8	genmask;
+ 	u8	offset[NFT_SET_EXT_NUM];
+ 	char	data[];
+-};
++} __aligned(BITS_PER_LONG / 8);
+ 
+ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
+ {
+diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
+index 94e8185c4795fe..3dc7a1551ac350 100644
+--- a/include/sound/cs35l56.h
++++ b/include/sound/cs35l56.h
+@@ -271,12 +271,6 @@ struct cs35l56_base {
+ 	struct gpio_desc *reset_gpio;
+ };
+ 
+-/* Temporary to avoid a build break with the HDA driver */
+-static inline int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base)
+-{
+-	return 0;
+-}
+-
+ static inline bool cs35l56_is_otp_register(unsigned int reg)
+ {
+ 	return (reg >> 16) == 3;
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index d407576ddfb782..eec5eb7de8430e 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -139,6 +139,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ 	struct io_uring_buf_ring *br = bl->buf_ring;
+ 	__u16 tail, head = bl->head;
+ 	struct io_uring_buf *buf;
++	void __user *ret;
+ 
+ 	tail = smp_load_acquire(&br->tail);
+ 	if (unlikely(tail == head))
+@@ -153,6 +154,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ 	req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
+ 	req->buf_list = bl;
+ 	req->buf_index = buf->bid;
++	ret = u64_to_user_ptr(buf->addr);
+ 
+ 	if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
+ 		/*
+@@ -168,7 +170,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ 		io_kbuf_commit(req, bl, *len, 1);
+ 		req->buf_list = NULL;
+ 	}
+-	return u64_to_user_ptr(buf->addr);
++	return ret;
+ }
+ 
+ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 18507658a921d7..7f549be9abd1e6 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -748,6 +748,7 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
+ 	if (req->opcode == IORING_OP_RECV) {
+ 		kmsg->msg.msg_name = NULL;
+ 		kmsg->msg.msg_namelen = 0;
++		kmsg->msg.msg_inq = 0;
+ 		kmsg->msg.msg_control = NULL;
+ 		kmsg->msg.msg_get_inq = 1;
+ 		kmsg->msg.msg_controllen = 0;
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 155938f1009313..39ad25d16ed404 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -979,6 +979,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
+ 		io_kbuf_recycle(req, issue_flags);
+ 		if (ret < 0)
+ 			req_set_fail(req);
++	} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
++		cflags = io_put_kbuf(req, ret, issue_flags);
+ 	} else {
+ 		/*
+ 		 * Any successful return value will keep the multishot read
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 233ea78f8f1bd9..2b9c8c168a0ba3 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -539,6 +539,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+ 
+ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
+ {
++	int err;
++
+ 	/* Branch offsets can't overflow when program is shrinking, no need
+ 	 * to call bpf_adj_branches(..., true) here
+ 	 */
+@@ -546,7 +548,9 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
+ 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
+ 	prog->len -= cnt;
+ 
+-	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
++	err = bpf_adj_branches(prog, off, off + cnt, off, false);
++	WARN_ON_ONCE(err);
++	return err;
+ }
+ 
+ static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+@@ -2936,7 +2940,7 @@ void __weak bpf_jit_compile(struct bpf_prog *prog)
+ {
+ }
+ 
+-bool __weak bpf_helper_changes_pkt_data(void *func)
++bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
+ {
+ 	return false;
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 767f1cb8c27e17..a0cab0d0252fab 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -10476,7 +10476,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ 	}
+ 
+ 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
+-	changes_data = bpf_helper_changes_pkt_data(fn->func);
++	changes_data = bpf_helper_changes_pkt_data(func_id);
+ 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
+ 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
+ 			func_id_name(func_id), func_id);
+diff --git a/kernel/kcov.c b/kernel/kcov.c
+index 28a6be6e64fdd7..187ba1b80bda16 100644
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -166,7 +166,7 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
+  * Unlike in_serving_softirq(), this function returns false when called during
+  * a hardirq or an NMI that happened in the softirq context.
+  */
+-static inline bool in_softirq_really(void)
++static __always_inline bool in_softirq_really(void)
+ {
+ 	return in_serving_softirq() && !in_hardirq() && !in_nmi();
+ }
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 79bb18651cdb8b..40f915f893e2ed 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4367,7 +4367,7 @@ static void scx_ops_bypass(bool bypass)
+ 		 * sees scx_rq_bypassing() before moving tasks to SCX.
+ 		 */
+ 		if (!scx_enabled()) {
+-			rq_unlock_irqrestore(rq, &rf);
++			rq_unlock(rq, &rf);
+ 			continue;
+ 		}
+ 
+@@ -6637,7 +6637,7 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
+ 		return -ENOENT;
+ 
+ 	INIT_LIST_HEAD(&kit->cursor.node);
+-	kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
++	kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
+ 	kit->cursor.priv = READ_ONCE(kit->dsq->seq);
+ 
+ 	return 0;
+diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
+index 72bcbfad53db04..c12335499ec91e 100644
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -802,7 +802,7 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
+ #endif
+ 	{
+ 		for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
+-			struct fgraph_ops *gops = fgraph_array[i];
++			struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
+ 
+ 			if (gops == &fgraph_stub)
+ 				continue;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 3dd3b97d8049ae..cd9dbfb3038330 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -883,16 +883,13 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
+ }
+ 
+ static struct fgraph_ops fprofiler_ops = {
+-	.ops = {
+-		.flags = FTRACE_OPS_FL_INITIALIZED,
+-		INIT_OPS_HASH(fprofiler_ops.ops)
+-	},
+ 	.entryfunc = &profile_graph_entry,
+ 	.retfunc = &profile_graph_return,
+ };
+ 
+ static int register_ftrace_profiler(void)
+ {
++	ftrace_ops_set_global_filter(&fprofiler_ops.ops);
+ 	return register_ftrace_graph(&fprofiler_ops);
+ }
+ 
+@@ -903,12 +900,11 @@ static void unregister_ftrace_profiler(void)
+ #else
+ static struct ftrace_ops ftrace_profile_ops __read_mostly = {
+ 	.func		= function_profile_call,
+-	.flags		= FTRACE_OPS_FL_INITIALIZED,
+-	INIT_OPS_HASH(ftrace_profile_ops)
+ };
+ 
+ static int register_ftrace_profiler(void)
+ {
++	ftrace_ops_set_global_filter(&ftrace_profile_ops);
+ 	return register_ftrace_function(&ftrace_profile_ops);
+ }
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 7149cd6fd4795e..ea9b44847ce6b7 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -364,6 +364,18 @@ static bool process_string(const char *fmt, int len, struct trace_event_call *ca
+ 		s = r + 1;
+ 	} while (s < e);
+ 
++	/*
++	 * Check for arrays. If the argument has: foo[REC->val]
++	 * then it is very likely that foo is an array of strings
++	 * that are safe to use.
++	 */
++	r = strstr(s, "[");
++	if (r && r < e) {
++		r = strstr(r, "REC->");
++		if (r && r < e)
++			return true;
++	}
++
+ 	/*
+ 	 * If there's any strings in the argument consider this arg OK as it
+ 	 * could be: REC->field ? "foo" : "bar" and we don't want to get into
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9949ffad8df09d..cee65cb4310816 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3680,23 +3680,27 @@ void workqueue_softirq_dead(unsigned int cpu)
+  * check_flush_dependency - check for flush dependency sanity
+  * @target_wq: workqueue being flushed
+  * @target_work: work item being flushed (NULL for workqueue flushes)
++ * @from_cancel: are we called from the work cancel path
+  *
+  * %current is trying to flush the whole @target_wq or @target_work on it.
+- * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
+- * reclaiming memory or running on a workqueue which doesn't have
+- * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
+- * a deadlock.
++ * If this is not the cancel path (which implies work being flushed is either
++ * already running, or will not be at all), check if @target_wq doesn't have
++ * %WQ_MEM_RECLAIM and verify that %current is not reclaiming memory or running
++ * on a workqueue which doesn't have %WQ_MEM_RECLAIM as that can break forward-
++ * progress guarantee leading to a deadlock.
+  */
+ static void check_flush_dependency(struct workqueue_struct *target_wq,
+-				   struct work_struct *target_work)
++				   struct work_struct *target_work,
++				   bool from_cancel)
+ {
+-	work_func_t target_func = target_work ? target_work->func : NULL;
++	work_func_t target_func;
+ 	struct worker *worker;
+ 
+-	if (target_wq->flags & WQ_MEM_RECLAIM)
++	if (from_cancel || target_wq->flags & WQ_MEM_RECLAIM)
+ 		return;
+ 
+ 	worker = current_wq_worker();
++	target_func = target_work ? target_work->func : NULL;
+ 
+ 	WARN_ONCE(current->flags & PF_MEMALLOC,
+ 		  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
+@@ -3966,7 +3970,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
+ 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
+ 	}
+ 
+-	check_flush_dependency(wq, NULL);
++	check_flush_dependency(wq, NULL, false);
+ 
+ 	mutex_unlock(&wq->mutex);
+ 
+@@ -4141,7 +4145,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+ 	}
+ 
+ 	wq = pwq->wq;
+-	check_flush_dependency(wq, work);
++	check_flush_dependency(wq, work, from_cancel);
+ 
+ 	insert_wq_barrier(pwq, barr, work, worker);
+ 	raw_spin_unlock_irq(&pool->lock);
+@@ -5627,6 +5631,7 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
+ 	} while (activated);
+ }
+ 
++__printf(1, 0)
+ static struct workqueue_struct *__alloc_workqueue(const char *fmt,
+ 						  unsigned int flags,
+ 						  int max_active, va_list args)
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 8d83e217271967..0cbe913634be4b 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -4367,6 +4367,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
+ 		ret = 1;
+ 	}
+ 	if (ret < 0 && range_lo > min) {
++		mas_reset(mas);
+ 		ret = mas_empty_area(mas, min, range_hi, 1);
+ 		if (ret == 0)
+ 			ret = 1;
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 511c3f61ab44c4..54f4dd8d549f06 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -868,6 +868,11 @@ static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
+ 				NUMA_NO_NODE);
+ 		if (!new_scheme)
+ 			return -ENOMEM;
++		err = damos_commit(new_scheme, src_scheme);
++		if (err) {
++			damon_destroy_scheme(new_scheme);
++			return err;
++		}
+ 		damon_add_scheme(dst, new_scheme);
+ 	}
+ 	return 0;
+@@ -961,8 +966,11 @@ static int damon_commit_targets(
+ 			return -ENOMEM;
+ 		err = damon_commit_target(new_target, false,
+ 				src_target, damon_target_has_pid(src));
+-		if (err)
++		if (err) {
++			damon_destroy_target(new_target);
+ 			return err;
++		}
++		damon_add_target(dst, new_target);
+ 	}
+ 	return 0;
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 5dc57b74a8fe9a..2fa87b9ecec6c7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -7200,7 +7200,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			spte = hugetlb_walk(svma, saddr,
+ 					    vma_mmu_pagesize(svma));
+ 			if (spte) {
+-				get_page(virt_to_page(spte));
++				ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
+ 				break;
+ 			}
+ 		}
+@@ -7215,7 +7215,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
+ 		mm_inc_nr_pmds(mm);
+ 	} else {
+-		put_page(virt_to_page(spte));
++		ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
+ 	}
+ 	spin_unlock(&mm->page_table_lock);
+ out:
+@@ -7227,10 +7227,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ /*
+  * unmap huge page backed by shared pte.
+  *
+- * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
+- * indicated by page_count > 1, unmap is achieved by clearing pud and
+- * decrementing the ref count. If count == 1, the pte page is not shared.
+- *
+  * Called with page table lock held.
+  *
+  * returns: 1 successfully unmapped a shared pte page
+@@ -7239,18 +7235,20 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ 					unsigned long addr, pte_t *ptep)
+ {
++	unsigned long sz = huge_page_size(hstate_vma(vma));
+ 	pgd_t *pgd = pgd_offset(mm, addr);
+ 	p4d_t *p4d = p4d_offset(pgd, addr);
+ 	pud_t *pud = pud_offset(p4d, addr);
+ 
+ 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
+ 	hugetlb_vma_assert_locked(vma);
+-	BUG_ON(page_count(virt_to_page(ptep)) == 0);
+-	if (page_count(virt_to_page(ptep)) == 1)
++	if (sz != PMD_SIZE)
++		return 0;
++	if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep)))
+ 		return 0;
+ 
+ 	pud_clear(pud);
+-	put_page(virt_to_page(ptep));
++	ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
+ 	mm_dec_nr_pmds(mm);
+ 	return 1;
+ }
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 0400f5e8ac60de..74f5f4c51ab8c8 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -373,7 +373,7 @@ static void print_unreferenced(struct seq_file *seq,
+ 
+ 	for (i = 0; i < nr_entries; i++) {
+ 		void *ptr = (void *)entries[i];
+-		warn_or_seq_printf(seq, "    [<%pK>] %pS\n", ptr, ptr);
++		warn_or_seq_printf(seq, "    %pS\n", ptr);
+ 	}
+ }
+ 
+diff --git a/mm/memfd.c b/mm/memfd.c
+index c17c3ea701a17e..35a370d75c9ad7 100644
+--- a/mm/memfd.c
++++ b/mm/memfd.c
+@@ -170,7 +170,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
+ 	return error;
+ }
+ 
+-static unsigned int *memfd_file_seals_ptr(struct file *file)
++unsigned int *memfd_file_seals_ptr(struct file *file)
+ {
+ 	if (shmem_file(file))
+ 		return &SHMEM_I(file_inode(file))->seals;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 7fb4c1e97175f9..6183805f6f9e6e 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -47,6 +47,7 @@
+ #include <linux/oom.h>
+ #include <linux/sched/mm.h>
+ #include <linux/ksm.h>
++#include <linux/memfd.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -368,6 +369,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+ 
+ 	if (file) {
+ 		struct inode *inode = file_inode(file);
++		unsigned int seals = memfd_file_seals(file);
+ 		unsigned long flags_mask;
+ 
+ 		if (!file_mmap_ok(file, inode, pgoff, len))
+@@ -408,6 +410,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+ 			vm_flags |= VM_SHARED | VM_MAYSHARE;
+ 			if (!(file->f_mode & FMODE_WRITE))
+ 				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
++			else if (is_readonly_sealed(seals, vm_flags))
++				vm_flags &= ~VM_MAYWRITE;
+ 			fallthrough;
+ 		case MAP_PRIVATE:
+ 			if (!(file->f_mode & FMODE_READ))
+diff --git a/mm/readahead.c b/mm/readahead.c
+index 99fdb2b5b56862..bf79275060f3be 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -641,7 +641,11 @@ void page_cache_async_ra(struct readahead_control *ractl,
+ 			1UL << order);
+ 	if (index == expected) {
+ 		ra->start += ra->size;
+-		ra->size = get_next_ra_size(ra, max_pages);
++		/*
++		 * In the case of MADV_HUGEPAGE, the actual size might exceed
++		 * the readahead window.
++		 */
++		ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
+ 		ra->async_size = ra->size;
+ 		goto readit;
+ 	}
+diff --git a/mm/shmem.c b/mm/shmem.c
+index b03ced0c3d4858..dd4eb11c84b59e 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1527,7 +1527,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+ 			    !shmem_falloc->waitq &&
+ 			    index >= shmem_falloc->start &&
+ 			    index < shmem_falloc->next)
+-				shmem_falloc->nr_unswapped++;
++				shmem_falloc->nr_unswapped += nr_pages;
+ 			else
+ 				shmem_falloc = NULL;
+ 			spin_unlock(&inode->i_lock);
+@@ -1664,6 +1664,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ 	unsigned long mask = READ_ONCE(huge_shmem_orders_always);
+ 	unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
+ 	unsigned long vm_flags = vma ? vma->vm_flags : 0;
++	pgoff_t aligned_index;
+ 	bool global_huge;
+ 	loff_t i_size;
+ 	int order;
+@@ -1698,9 +1699,9 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ 	/* Allow mTHP that will be fully within i_size. */
+ 	order = highest_order(within_size_orders);
+ 	while (within_size_orders) {
+-		index = round_up(index + 1, order);
++		aligned_index = round_up(index + 1, 1 << order);
+ 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
+-		if (i_size >> PAGE_SHIFT >= index) {
++		if (i_size >> PAGE_SHIFT >= aligned_index) {
+ 			mask |= within_size_orders;
+ 			break;
+ 		}
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 28ba2b06fc7dc2..67a680e4b484d7 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -374,7 +374,14 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
+ 	if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
+ 		nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
+ 			zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
+-
++	/*
++	 * If there are no reclaimable file-backed or anonymous pages,
++	 * ensure zones with sufficient free pages are not skipped.
++	 * This prevents zones like DMA32 from being ignored in reclaim
++	 * scenarios where they can still help alleviate memory pressure.
++	 */
++	if (nr == 0)
++		nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
+ 	return nr;
+ }
+ 
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 72439764186ed2..b5553c08e73162 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -57,7 +57,6 @@ DEFINE_RWLOCK(hci_dev_list_lock);
+ 
+ /* HCI callback list */
+ LIST_HEAD(hci_cb_list);
+-DEFINE_MUTEX(hci_cb_list_lock);
+ 
+ /* HCI ID Numbering */
+ static DEFINE_IDA(hci_index_ida);
+@@ -2993,9 +2992,7 @@ int hci_register_cb(struct hci_cb *cb)
+ {
+ 	BT_DBG("%p name %s", cb, cb->name);
+ 
+-	mutex_lock(&hci_cb_list_lock);
+-	list_add_tail(&cb->list, &hci_cb_list);
+-	mutex_unlock(&hci_cb_list_lock);
++	list_add_tail_rcu(&cb->list, &hci_cb_list);
+ 
+ 	return 0;
+ }
+@@ -3005,9 +3002,8 @@ int hci_unregister_cb(struct hci_cb *cb)
+ {
+ 	BT_DBG("%p name %s", cb, cb->name);
+ 
+-	mutex_lock(&hci_cb_list_lock);
+-	list_del(&cb->list);
+-	mutex_unlock(&hci_cb_list_lock);
++	list_del_rcu(&cb->list);
++	synchronize_rcu();
+ 
+ 	return 0;
+ }
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 644b606743e212..bda2f2da7d7311 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -2137,6 +2137,11 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 	return HCI_LM_ACCEPT;
+ }
+ 
++static bool iso_match(struct hci_conn *hcon)
++{
++	return hcon->type == ISO_LINK || hcon->type == LE_LINK;
++}
++
+ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
+ {
+ 	if (hcon->type != ISO_LINK) {
+@@ -2318,6 +2323,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 
+ static struct hci_cb iso_cb = {
+ 	.name		= "ISO",
++	.match		= iso_match,
+ 	.connect_cfm	= iso_connect_cfm,
+ 	.disconn_cfm	= iso_disconn_cfm,
+ };
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 6544c1ed714344..27b4c4a2ba1fdd 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -7217,6 +7217,11 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
+ 	return NULL;
+ }
+ 
++static bool l2cap_match(struct hci_conn *hcon)
++{
++	return hcon->type == ACL_LINK || hcon->type == LE_LINK;
++}
++
+ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ {
+ 	struct hci_dev *hdev = hcon->hdev;
+@@ -7224,9 +7229,6 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ 	struct l2cap_chan *pchan;
+ 	u8 dst_type;
+ 
+-	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+-		return;
+-
+ 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+ 
+ 	if (status) {
+@@ -7291,9 +7293,6 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
+ 
+ static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+ {
+-	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+-		return;
+-
+ 	BT_DBG("hcon %p reason %d", hcon, reason);
+ 
+ 	l2cap_conn_del(hcon, bt_to_errno(reason));
+@@ -7572,6 +7571,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 
+ static struct hci_cb l2cap_cb = {
+ 	.name		= "L2CAP",
++	.match		= l2cap_match,
+ 	.connect_cfm	= l2cap_connect_cfm,
+ 	.disconn_cfm	= l2cap_disconn_cfm,
+ 	.security_cfm	= l2cap_security_cfm,
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index ad5177e3a69b77..4c56ca5a216c6f 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -2134,6 +2134,11 @@ static int rfcomm_run(void *unused)
+ 	return 0;
+ }
+ 
++static bool rfcomm_match(struct hci_conn *hcon)
++{
++	return hcon->type == ACL_LINK;
++}
++
+ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+ {
+ 	struct rfcomm_session *s;
+@@ -2180,6 +2185,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+ 
+ static struct hci_cb rfcomm_cb = {
+ 	.name		= "RFCOMM",
++	.match		= rfcomm_match,
+ 	.security_cfm	= rfcomm_security_cfm
+ };
+ 
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index b872a2ca3ff38b..071c404c790af9 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -1355,11 +1355,13 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 	return lm;
+ }
+ 
+-static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
++static bool sco_match(struct hci_conn *hcon)
+ {
+-	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+-		return;
++	return hcon->type == SCO_LINK || hcon->type == ESCO_LINK;
++}
+ 
++static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
++{
+ 	BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status);
+ 
+ 	if (!status) {
+@@ -1374,9 +1376,6 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+ 
+ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+ {
+-	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+-		return;
+-
+ 	BT_DBG("hcon %p reason %d", hcon, reason);
+ 
+ 	sco_conn_del(hcon, bt_to_errno(reason));
+@@ -1402,6 +1401,7 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+ 
+ static struct hci_cb sco_cb = {
+ 	.name		= "SCO",
++	.match		= sco_match,
+ 	.connect_cfm	= sco_connect_cfm,
+ 	.disconn_cfm	= sco_disconn_cfm,
+ };
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 8453e14d301b63..f3fa8353d262b0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3640,8 +3640,10 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
+ 
+ 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ 		if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
+-		    skb_network_header_len(skb) != sizeof(struct ipv6hdr))
++		    skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++		    !ipv6_has_hopopt_jumbo(skb))
+ 			goto sw_checksum;
++
+ 		switch (skb->csum_offset) {
+ 		case offsetof(struct tcphdr, check):
+ 		case offsetof(struct udphdr, check):
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 55495063621d6c..54a53fae9e98f5 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -7918,42 +7918,37 @@ static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = {
+ 
+ #endif /* CONFIG_INET */
+ 
+-bool bpf_helper_changes_pkt_data(void *func)
+-{
+-	if (func == bpf_skb_vlan_push ||
+-	    func == bpf_skb_vlan_pop ||
+-	    func == bpf_skb_store_bytes ||
+-	    func == bpf_skb_change_proto ||
+-	    func == bpf_skb_change_head ||
+-	    func == sk_skb_change_head ||
+-	    func == bpf_skb_change_tail ||
+-	    func == sk_skb_change_tail ||
+-	    func == bpf_skb_adjust_room ||
+-	    func == sk_skb_adjust_room ||
+-	    func == bpf_skb_pull_data ||
+-	    func == sk_skb_pull_data ||
+-	    func == bpf_clone_redirect ||
+-	    func == bpf_l3_csum_replace ||
+-	    func == bpf_l4_csum_replace ||
+-	    func == bpf_xdp_adjust_head ||
+-	    func == bpf_xdp_adjust_meta ||
+-	    func == bpf_msg_pull_data ||
+-	    func == bpf_msg_push_data ||
+-	    func == bpf_msg_pop_data ||
+-	    func == bpf_xdp_adjust_tail ||
+-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
+-	    func == bpf_lwt_seg6_store_bytes ||
+-	    func == bpf_lwt_seg6_adjust_srh ||
+-	    func == bpf_lwt_seg6_action ||
+-#endif
+-#ifdef CONFIG_INET
+-	    func == bpf_sock_ops_store_hdr_opt ||
+-#endif
+-	    func == bpf_lwt_in_push_encap ||
+-	    func == bpf_lwt_xmit_push_encap)
++bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
++{
++	switch (func_id) {
++	case BPF_FUNC_clone_redirect:
++	case BPF_FUNC_l3_csum_replace:
++	case BPF_FUNC_l4_csum_replace:
++	case BPF_FUNC_lwt_push_encap:
++	case BPF_FUNC_lwt_seg6_action:
++	case BPF_FUNC_lwt_seg6_adjust_srh:
++	case BPF_FUNC_lwt_seg6_store_bytes:
++	case BPF_FUNC_msg_pop_data:
++	case BPF_FUNC_msg_pull_data:
++	case BPF_FUNC_msg_push_data:
++	case BPF_FUNC_skb_adjust_room:
++	case BPF_FUNC_skb_change_head:
++	case BPF_FUNC_skb_change_proto:
++	case BPF_FUNC_skb_change_tail:
++	case BPF_FUNC_skb_pull_data:
++	case BPF_FUNC_skb_store_bytes:
++	case BPF_FUNC_skb_vlan_pop:
++	case BPF_FUNC_skb_vlan_push:
++	case BPF_FUNC_store_hdr_opt:
++	case BPF_FUNC_xdp_adjust_head:
++	case BPF_FUNC_xdp_adjust_meta:
++	case BPF_FUNC_xdp_adjust_tail:
++	/* tail-called program could call any of the above */
++	case BPF_FUNC_tail_call:
+ 		return true;
+-
+-	return false;
++	default:
++		return false;
++	}
+ }
+ 
+ const struct bpf_func_proto bpf_event_output_data_proto __weak;
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index 7ce22f40db5b04..d58270b48cb2cf 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -228,8 +228,12 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
+ 	rcu_read_unlock();
+ 	rtnl_unlock();
+ 
+-	if (err)
++	if (err) {
++		goto err_free_msg;
++	} else if (!rsp->len) {
++		err = -ENOENT;
+ 		goto err_free_msg;
++	}
+ 
+ 	return genlmsg_reply(rsp, info);
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index da50df485090ff..a83f64a1d96a29 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1300,7 +1300,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
+ 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
+ 		break;
+ 	case SO_REUSEPORT:
+-		sk->sk_reuseport = valbool;
++		if (valbool && !sk_is_inet(sk))
++			ret = -EOPNOTSUPP;
++		else
++			sk->sk_reuseport = valbool;
+ 		break;
+ 	case SO_DONTROUTE:
+ 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 25505f9b724c33..09b73acf037ae2 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -294,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
+ 
+ 		ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
+ 				    iph->saddr, tunnel->parms.o_key,
+-				    iph->tos & INET_DSCP_MASK, dev_net(dev),
++				    iph->tos & INET_DSCP_MASK, tunnel->net,
+ 				    tunnel->parms.link, tunnel->fwmark, 0, 0);
+ 		rt = ip_route_output_key(tunnel->net, &fl4);
+ 
+@@ -611,7 +611,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	}
+ 	ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
+ 			    tunnel_id_to_key32(key->tun_id),
+-			    tos & INET_DSCP_MASK, dev_net(dev), 0, skb->mark,
++			    tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark,
+ 			    skb_get_hash(skb), key->flow_flags);
+ 
+ 	if (!tunnel_hlen)
+@@ -774,7 +774,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
+ 			    tunnel->parms.o_key, tos & INET_DSCP_MASK,
+-			    dev_net(dev), READ_ONCE(tunnel->parms.link),
++			    tunnel->net, READ_ONCE(tunnel->parms.link),
+ 			    tunnel->fwmark, skb_get_hash(skb), 0);
+ 
+ 	if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 2d844e1f867f0a..2d43b29da15e20 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -7328,6 +7328,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ 			if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
+ 								    req->timeout))) {
+ 				reqsk_free(req);
++				dst_release(dst);
+ 				return 0;
+ 			}
+ 
+diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
+index 534a4498e280d7..fff09f5a796a75 100644
+--- a/net/ipv6/ila/ila_xlat.c
++++ b/net/ipv6/ila/ila_xlat.c
+@@ -200,6 +200,8 @@ static const struct nf_hook_ops ila_nf_hook_ops[] = {
+ 	},
+ };
+ 
++static DEFINE_MUTEX(ila_mutex);
++
+ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
+ {
+ 	struct ila_net *ilan = net_generic(net, ila_net_id);
+@@ -207,16 +209,20 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
+ 	spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
+ 	int err = 0, order;
+ 
+-	if (!ilan->xlat.hooks_registered) {
++	if (!READ_ONCE(ilan->xlat.hooks_registered)) {
+ 		/* We defer registering net hooks in the namespace until the
+ 		 * first mapping is added.
+ 		 */
+-		err = nf_register_net_hooks(net, ila_nf_hook_ops,
+-					    ARRAY_SIZE(ila_nf_hook_ops));
++		mutex_lock(&ila_mutex);
++		if (!ilan->xlat.hooks_registered) {
++			err = nf_register_net_hooks(net, ila_nf_hook_ops,
++						ARRAY_SIZE(ila_nf_hook_ops));
++			if (!err)
++				WRITE_ONCE(ilan->xlat.hooks_registered, true);
++		}
++		mutex_unlock(&ila_mutex);
+ 		if (err)
+ 			return err;
+-
+-		ilan->xlat.hooks_registered = true;
+ 	}
+ 
+ 	ila = kzalloc(sizeof(*ila), GFP_KERNEL);
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index 51bccfb00a9cd9..61b0159b2fbee6 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -124,8 +124,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
+ 	if (unlikely(!pskb_may_pull(skb, llc_len)))
+ 		return 0;
+ 
+-	skb->transport_header += llc_len;
+ 	skb_pull(skb, llc_len);
++	skb_reset_transport_header(skb);
+ 	if (skb->protocol == htons(ETH_P_802_2)) {
+ 		__be16 pdulen;
+ 		s32 data_size;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 1b1bf044378d48..f11fd360b422dd 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -4992,10 +4992,16 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy,
+ 				    unsigned int link_id)
+ {
+ 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
++	u16 new_links = wdev->valid_links & ~BIT(link_id);
+ 
+ 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+ 
+-	ieee80211_vif_set_links(sdata, wdev->valid_links, 0);
++	/* During the link teardown process, certain functions require the
++	 * link_id to remain in the valid_links bitmap. Therefore, instead
++	 * of removing the link_id from the bitmap, pass a masked value to
++	 * simulate as if link_id does not exist anymore.
++	 */
++	ieee80211_vif_set_links(sdata, new_links, 0);
+ }
+ 
+ static int
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 640239f4425b16..50eb1d8cd43deb 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1157,14 +1157,14 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ 				       u64 changed)
+ {
+ 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+-	unsigned long bits = changed;
++	unsigned long bits[] = { BITMAP_FROM_U64(changed) };
+ 	u32 bit;
+ 
+-	if (!bits)
++	if (!changed)
+ 		return;
+ 
+ 	/* if we race with running work, worst case this work becomes a noop */
+-	for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
++	for_each_set_bit(bit, bits, sizeof(changed) * BITS_PER_BYTE)
+ 		set_bit(bit, ifmsh->mbss_changed);
+ 	set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
+ 	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index b4814e97cf7422..38c30e4ddda98c 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1825,6 +1825,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ 			WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
+ 		else
+ 			WARN(1, "Hardware became unavailable during restart.\n");
++		ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
++						IEEE80211_QUEUE_STOP_REASON_SUSPEND,
++						false);
+ 		ieee80211_handle_reconfig_failure(local);
+ 		return res;
+ 	}
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 1603b3702e2207..a62bc874bf1e17 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -667,8 +667,15 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 		    &echo, &drop_other_suboptions))
+ 		return false;
+ 
++	/*
++	 * Later on, mptcp_write_options() will enforce mutually exclusion with
++	 * DSS, bail out if such option is set and we can't drop it.
++	 */
+ 	if (drop_other_suboptions)
+ 		remaining += opt_size;
++	else if (opts->suboptions & OPTION_MPTCP_DSS)
++		return false;
++
+ 	len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
+ 	if (remaining < len)
+ 		return false;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 8a8e8fee337f5e..4b9d850ce85a25 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -528,13 +528,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
+ 		mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
+ }
+ 
+-static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
++static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
+ {
+ 	bool slow;
+ 
+ 	slow = lock_sock_fast(ssk);
+ 	if (tcp_can_send_ack(ssk))
+-		tcp_cleanup_rbuf(ssk, 1);
++		tcp_cleanup_rbuf(ssk, copied);
+ 	unlock_sock_fast(ssk, slow);
+ }
+ 
+@@ -551,7 +551,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
+ 			      (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
+ }
+ 
+-static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
++static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
+ {
+ 	int old_space = READ_ONCE(msk->old_wspace);
+ 	struct mptcp_subflow_context *subflow;
+@@ -559,14 +559,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
+ 	int space =  __mptcp_space(sk);
+ 	bool cleanup, rx_empty;
+ 
+-	cleanup = (space > 0) && (space >= (old_space << 1));
+-	rx_empty = !__mptcp_rmem(sk);
++	cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
++	rx_empty = !__mptcp_rmem(sk) && copied;
+ 
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 
+ 		if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
+-			mptcp_subflow_cleanup_rbuf(ssk);
++			mptcp_subflow_cleanup_rbuf(ssk, copied);
+ 	}
+ }
+ 
+@@ -1939,6 +1939,8 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	goto out;
+ }
+ 
++static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
++
+ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ 				struct msghdr *msg,
+ 				size_t len, int flags,
+@@ -1992,6 +1994,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ 			break;
+ 	}
+ 
++	mptcp_rcv_space_adjust(msk, copied);
+ 	return copied;
+ }
+ 
+@@ -2217,9 +2220,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 
+ 		copied += bytes_read;
+ 
+-		/* be sure to advertise window change */
+-		mptcp_cleanup_rbuf(msk);
+-
+ 		if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
+ 			continue;
+ 
+@@ -2268,7 +2268,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		}
+ 
+ 		pr_debug("block timeout %ld\n", timeo);
+-		mptcp_rcv_space_adjust(msk, copied);
++		mptcp_cleanup_rbuf(msk, copied);
+ 		err = sk_wait_data(sk, &timeo, NULL);
+ 		if (err < 0) {
+ 			err = copied ? : err;
+@@ -2276,7 +2276,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		}
+ 	}
+ 
+-	mptcp_rcv_space_adjust(msk, copied);
++	mptcp_cleanup_rbuf(msk, copied);
+ 
+ out_err:
+ 	if (cmsg_flags && copied >= 0) {
+diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
+index 2b5e246b8d9a7a..b94cb2ffbaf8fa 100644
+--- a/net/netrom/nr_route.c
++++ b/net/netrom/nr_route.c
+@@ -754,6 +754,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ 	int ret;
+ 	struct sk_buff *skbn;
+ 
++	/*
++	 * Reject malformed packets early. Check that it contains at least 2
++	 * addresses and 1 byte more for Time-To-Live
++	 */
++	if (skb->len < 2 * sizeof(ax25_address) + 1)
++		return 0;
+ 
+ 	nr_src  = (ax25_address *)(skb->data + 0);
+ 	nr_dest = (ax25_address *)(skb->data + 7);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 97774bd4b6cb11..f3cecb3e4bcb18 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -538,10 +538,8 @@ static void *packet_current_frame(struct packet_sock *po,
+ 	return packet_lookup_frame(po, rb, rb->head, status);
+ }
+ 
+-static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
++static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
+ {
+-	u8 *skb_orig_data = skb->data;
+-	int skb_orig_len = skb->len;
+ 	struct vlan_hdr vhdr, *vh;
+ 	unsigned int header_len;
+ 
+@@ -562,33 +560,21 @@ static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
+ 	else
+ 		return 0;
+ 
+-	skb_push(skb, skb->data - skb_mac_header(skb));
+-	vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
+-	if (skb_orig_data != skb->data) {
+-		skb->data = skb_orig_data;
+-		skb->len = skb_orig_len;
+-	}
++	vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
++				sizeof(vhdr), &vhdr);
+ 	if (unlikely(!vh))
+ 		return 0;
+ 
+ 	return ntohs(vh->h_vlan_TCI);
+ }
+ 
+-static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
++static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
+ {
+ 	__be16 proto = skb->protocol;
+ 
+-	if (unlikely(eth_type_vlan(proto))) {
+-		u8 *skb_orig_data = skb->data;
+-		int skb_orig_len = skb->len;
+-
+-		skb_push(skb, skb->data - skb_mac_header(skb));
+-		proto = __vlan_get_protocol(skb, proto, NULL);
+-		if (skb_orig_data != skb->data) {
+-			skb->data = skb_orig_data;
+-			skb->len = skb_orig_len;
+-		}
+-	}
++	if (unlikely(eth_type_vlan(proto)))
++		proto = __vlan_get_protocol_offset(skb, proto,
++						   skb_mac_offset(skb), NULL);
+ 
+ 	return proto;
+ }
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index c45c192b787873..0b0794f164cf2e 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -137,7 +137,8 @@ static struct sctp_association *sctp_association_init(
+ 		= 5 * asoc->rto_max;
+ 
+ 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
+-	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
++	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
++		(unsigned long)sp->autoclose * HZ;
+ 
+ 	/* Initializes the timers */
+ 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index f49b55724f8341..18585b1416c662 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -2843,10 +2843,9 @@ void cfg80211_remove_link(struct wireless_dev *wdev, unsigned int link_id)
+ 		break;
+ 	}
+ 
+-	wdev->valid_links &= ~BIT(link_id);
+-
+ 	rdev_del_intf_link(rdev, wdev, link_id);
+ 
++	wdev->valid_links &= ~BIT(link_id);
+ 	eth_zero_addr(wdev->links[link_id].addr);
+ }
+ 
+diff --git a/scripts/mksysmap b/scripts/mksysmap
+index c12723a0465562..3accbdb269ac70 100755
+--- a/scripts/mksysmap
++++ b/scripts/mksysmap
+@@ -26,7 +26,7 @@
+ #  (do not forget a space before each pattern)
+ 
+ # local symbols for ARM, MIPS, etc.
+-/ \\$/d
++/ \$/d
+ 
+ # local labels, .LBB, .Ltmpxxx, .L__unnamed_xx, .LASANPC, etc.
+ / \.L/d
+@@ -39,7 +39,7 @@
+ / __pi_\.L/d
+ 
+ # arm64 local symbols in non-VHE KVM namespace
+-/ __kvm_nvhe_\\$/d
++/ __kvm_nvhe_\$/d
+ / __kvm_nvhe_\.L/d
+ 
+ # lld arm/aarch64/mips thunks
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 634e40748287c0..721e0e9f17cada 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -742,7 +742,7 @@ static void do_input(char *alias,
+ 
+ 	for (i = min / BITS_PER_LONG; i < max / BITS_PER_LONG + 1; i++)
+ 		arr[i] = TO_NATIVE(arr[i]);
+-	for (i = min; i < max; i++)
++	for (i = min; i <= max; i++)
+ 		if (arr[i / BITS_PER_LONG] & (1ULL << (i%BITS_PER_LONG)))
+ 			sprintf(alias + strlen(alias), "%X,*", i);
+ }
+diff --git a/scripts/package/PKGBUILD b/scripts/package/PKGBUILD
+index f83493838cf96a..dca706617adc76 100644
+--- a/scripts/package/PKGBUILD
++++ b/scripts/package/PKGBUILD
+@@ -103,7 +103,7 @@ _package-headers() {
+ 
+ _package-api-headers() {
+ 	pkgdesc="Kernel headers sanitized for use in userspace"
+-	provides=(linux-api-headers)
++	provides=(linux-api-headers="${pkgver}")
+ 	conflicts=(linux-api-headers)
+ 
+ 	_prologue
+diff --git a/scripts/sorttable.h b/scripts/sorttable.h
+index 7bd0184380d3b9..a7c5445baf0027 100644
+--- a/scripts/sorttable.h
++++ b/scripts/sorttable.h
+@@ -110,7 +110,7 @@ static inline unsigned long orc_ip(const int *ip)
+ 
+ static int orc_sort_cmp(const void *_a, const void *_b)
+ {
+-	struct orc_entry *orc_a;
++	struct orc_entry *orc_a, *orc_b;
+ 	const int *a = g_orc_ip_table + *(int *)_a;
+ 	const int *b = g_orc_ip_table + *(int *)_b;
+ 	unsigned long a_val = orc_ip(a);
+@@ -128,6 +128,9 @@ static int orc_sort_cmp(const void *_a, const void *_b)
+ 	 * whitelisted .o files which didn't get objtool generation.
+ 	 */
+ 	orc_a = g_orc_table + (a - g_orc_ip_table);
++	orc_b = g_orc_table + (b - g_orc_ip_table);
++	if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
++		return 0;
+ 	return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
+ }
+ 
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index a9830fbfc5c66c..88850405ded929 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -955,7 +955,10 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+ 					xpermd->driver))
+ 			return;
+ 	} else {
+-		BUG();
++		pr_warn_once(
++			"SELinux: unknown extended permission (%u) will be ignored\n",
++			node->datum.u.xperms->specified);
++		return;
+ 	}
+ 
+ 	if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
+@@ -992,7 +995,8 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+ 					node->datum.u.xperms->perms.p[i];
+ 		}
+ 	} else {
+-		BUG();
++		pr_warn_once("SELinux: unknown specified key (%u)\n",
++			     node->key.specified);
+ 	}
+ }
+ 
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index e3394919daa09a..51ee4c00a84310 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -66,6 +66,7 @@ static struct seq_oss_synth midi_synth_dev = {
+ };
+ 
+ static DEFINE_SPINLOCK(register_lock);
++static DEFINE_MUTEX(sysex_mutex);
+ 
+ /*
+  * prototypes
+@@ -497,6 +498,7 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ 	if (!info)
+ 		return -ENXIO;
+ 
++	guard(mutex)(&sysex_mutex);
+ 	sysex = info->sysex;
+ 	if (sysex == NULL) {
+ 		sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 3930e2f9082f42..77b6ac9b5c11bc 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1275,10 +1275,16 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
+ 	if (client->type != client_info->type)
+ 		return -EINVAL;
+ 
+-	/* check validity of midi_version field */
+-	if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3) &&
+-	    client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0)
+-		return -EINVAL;
++	if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3)) {
++		/* check validity of midi_version field */
++		if (client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0)
++			return -EINVAL;
++
++		/* check if UMP is supported in kernel */
++		if (!IS_ENABLED(CONFIG_SND_SEQ_UMP) &&
++		    client_info->midi_version > 0)
++			return -EINVAL;
++	}
+ 
+ 	/* fill the info fields */
+ 	if (client_info->name[0])
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index bd26bb2210cbd4..abc537d54b7312 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -1244,7 +1244,7 @@ static int fill_legacy_mapping(struct snd_ump_endpoint *ump)
+ 
+ 	num = 0;
+ 	for (i = 0; i < SNDRV_UMP_MAX_GROUPS; i++)
+-		if ((group_maps & (1U << i)) && ump->groups[i].valid)
++		if (group_maps & (1U << i))
+ 			ump->legacy_mapping[num++] = i;
+ 
+ 	return num;
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index e3ac0e23ae3211..7baf3b506eefec 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -151,10 +151,6 @@ static int cs35l56_hda_runtime_resume(struct device *dev)
+ 		}
+ 	}
+ 
+-	ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+-	if (ret)
+-		goto err;
+-
+ 	return 0;
+ 
+ err:
+@@ -1059,9 +1055,6 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
+ 
+ 	regmap_multi_reg_write(cs35l56->base.regmap, cs35l56_hda_dai_config,
+ 			       ARRAY_SIZE(cs35l56_hda_dai_config));
+-	ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+-	if (ret)
+-		goto dsp_err;
+ 
+ 	/*
+ 	 * By default only enable one ASP1TXn, where n=amplifier index,
+@@ -1087,7 +1080,6 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
+ 
+ pm_err:
+ 	pm_runtime_disable(cs35l56->base.dev);
+-dsp_err:
+ 	cs_dsp_remove(&cs35l56->cs_dsp);
+ err:
+ 	gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index e4673a71551a3b..d40197fb5fbd58 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1134,7 +1134,6 @@ struct ca0132_spec {
+ 
+ 	struct hda_codec *codec;
+ 	struct delayed_work unsol_hp_work;
+-	int quirk;
+ 
+ #ifdef ENABLE_TUNING_CONTROLS
+ 	long cur_ctl_vals[TUNING_CTLS_COUNT];
+@@ -1166,7 +1165,6 @@ struct ca0132_spec {
+  * CA0132 quirks table
+  */
+ enum {
+-	QUIRK_NONE,
+ 	QUIRK_ALIENWARE,
+ 	QUIRK_ALIENWARE_M17XR4,
+ 	QUIRK_SBZ,
+@@ -1176,10 +1174,11 @@ enum {
+ 	QUIRK_R3D,
+ 	QUIRK_AE5,
+ 	QUIRK_AE7,
++	QUIRK_NONE = HDA_FIXUP_ID_NOT_SET,
+ };
+ 
+ #ifdef CONFIG_PCI
+-#define ca0132_quirk(spec)		((spec)->quirk)
++#define ca0132_quirk(spec)		((spec)->codec->fixup_id)
+ #define ca0132_use_pci_mmio(spec)	((spec)->use_pci_mmio)
+ #define ca0132_use_alt_functions(spec)	((spec)->use_alt_functions)
+ #define ca0132_use_alt_controls(spec)	((spec)->use_alt_controls)
+@@ -1293,7 +1292,7 @@ static const struct hda_pintbl ae7_pincfgs[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk ca0132_quirks[] = {
++static const struct hda_quirk ca0132_quirks[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
+ 	SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
+ 	SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
+@@ -1316,6 +1315,19 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ 	{}
+ };
+ 
++static const struct hda_model_fixup ca0132_quirk_models[] = {
++	{ .id = QUIRK_ALIENWARE, .name = "alienware" },
++	{ .id = QUIRK_ALIENWARE_M17XR4, .name = "alienware-m17xr4" },
++	{ .id = QUIRK_SBZ, .name = "sbz" },
++	{ .id = QUIRK_ZXR, .name = "zxr" },
++	{ .id = QUIRK_ZXR_DBPRO, .name = "zxr-dbpro" },
++	{ .id = QUIRK_R3DI, .name = "r3di" },
++	{ .id = QUIRK_R3D, .name = "r3d" },
++	{ .id = QUIRK_AE5, .name = "ae5" },
++	{ .id = QUIRK_AE7, .name = "ae7" },
++	{}
++};
++
+ /* Output selection quirk info structures. */
+ #define MAX_QUIRK_MMIO_GPIO_SET_VALS 3
+ #define MAX_QUIRK_SCP_SET_VALS 2
+@@ -9957,17 +9969,15 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
+  */
+ static void sbz_detect_quirk(struct hda_codec *codec)
+ {
+-	struct ca0132_spec *spec = codec->spec;
+-
+ 	switch (codec->core.subsystem_id) {
+ 	case 0x11020033:
+-		spec->quirk = QUIRK_ZXR;
++		codec->fixup_id = QUIRK_ZXR;
+ 		break;
+ 	case 0x1102003f:
+-		spec->quirk = QUIRK_ZXR_DBPRO;
++		codec->fixup_id = QUIRK_ZXR_DBPRO;
+ 		break;
+ 	default:
+-		spec->quirk = QUIRK_SBZ;
++		codec->fixup_id = QUIRK_SBZ;
+ 		break;
+ 	}
+ }
+@@ -9976,7 +9986,6 @@ static int patch_ca0132(struct hda_codec *codec)
+ {
+ 	struct ca0132_spec *spec;
+ 	int err;
+-	const struct snd_pci_quirk *quirk;
+ 
+ 	codec_dbg(codec, "patch_ca0132\n");
+ 
+@@ -9987,11 +9996,7 @@ static int patch_ca0132(struct hda_codec *codec)
+ 	spec->codec = codec;
+ 
+ 	/* Detect codec quirk */
+-	quirk = snd_pci_quirk_lookup(codec->bus->pci, ca0132_quirks);
+-	if (quirk)
+-		spec->quirk = quirk->value;
+-	else
+-		spec->quirk = QUIRK_NONE;
++	snd_hda_pick_fixup(codec, ca0132_quirk_models, ca0132_quirks, NULL);
+ 	if (ca0132_quirk(spec) == QUIRK_SBZ)
+ 		sbz_detect_quirk(codec);
+ 
+@@ -10068,7 +10073,7 @@ static int patch_ca0132(struct hda_codec *codec)
+ 		spec->mem_base = pci_iomap(codec->bus->pci, 2, 0xC20);
+ 		if (spec->mem_base == NULL) {
+ 			codec_warn(codec, "pci_iomap failed! Setting quirk to QUIRK_NONE.");
+-			spec->quirk = QUIRK_NONE;
++			codec->fixup_id = QUIRK_NONE;
+ 		}
+ 	}
+ #endif
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 192fc75b51e6db..3ed82f98e2de9e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7704,6 +7704,7 @@ enum {
+ 	ALC274_FIXUP_HP_MIC,
+ 	ALC274_FIXUP_HP_HEADSET_MIC,
+ 	ALC274_FIXUP_HP_ENVY_GPIO,
++	ALC274_FIXUP_ASUS_ZEN_AIO_27,
+ 	ALC256_FIXUP_ASUS_HPE,
+ 	ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ 	ALC287_FIXUP_HP_GPIO_LED,
+@@ -9505,6 +9506,26 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc274_fixup_hp_envy_gpio,
+ 	},
++	[ALC274_FIXUP_ASUS_ZEN_AIO_27] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x10 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0xc420 },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x49 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x0249 },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x4a },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x202b },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x62 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0xa007 },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x6b },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x5060 },
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC2XX_FIXUP_HEADSET_MIC,
++	},
+ 	[ALC256_FIXUP_ASUS_HPE] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -10615,6 +10636,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
++	SND_PCI_QUIRK(0x1043, 0x31d0, "ASUS Zen AIO 27 Z272SD_A272SD", ALC274_FIXUP_ASUS_ZEN_AIO_27),
+ 	SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+@@ -10971,6 +10993,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+ 	/* Below is a quirk table taken from the old code.
+@@ -11162,6 +11185,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ 	{.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
+ 	{.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
++	{.id = ALC2XX_FIXUP_HEADSET_MIC, .name = "alc2xx-fixup-headset-mic"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/generic/audio-graph-card2.c b/sound/soc/generic/audio-graph-card2.c
+index 93eee40cec760c..63837e25965956 100644
+--- a/sound/soc/generic/audio-graph-card2.c
++++ b/sound/soc/generic/audio-graph-card2.c
+@@ -779,7 +779,7 @@ static void graph_link_init(struct simple_util_priv *priv,
+ 	of_node_get(port_codec);
+ 	if (graph_lnk_is_multi(port_codec)) {
+ 		ep_codec = graph_get_next_multi_ep(&port_codec);
+-		of_node_put(port_cpu);
++		of_node_put(port_codec);
+ 		port_codec = ep_to_port(ep_codec);
+ 	} else {
+ 		ep_codec = port_to_endpoint(port_codec);
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 0cbf1d4fbe6edd..6049d957694ca6 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -60,6 +60,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ 			pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL;
+ 			/* flag potentially raw DSD capable altsettings */
+ 			fp->dsd_raw = true;
++			/* clear special format bit to avoid "unsupported format" msg below */
++			format &= ~UAC2_FORMAT_TYPE_I_RAW_DATA;
+ 		}
+ 
+ 		format <<= 1;
+@@ -71,8 +73,11 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ 		sample_width = as->bBitResolution;
+ 		sample_bytes = as->bSubslotSize;
+ 
+-		if (format & UAC3_FORMAT_TYPE_I_RAW_DATA)
++		if (format & UAC3_FORMAT_TYPE_I_RAW_DATA) {
+ 			pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL;
++			/* clear special format bit to avoid "unsupported format" msg below */
++			format &= ~UAC3_FORMAT_TYPE_I_RAW_DATA;
++		}
+ 
+ 		format <<= 1;
+ 		break;
+diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c
+index 6eb7d93b358d99..20ac32635f1f50 100644
+--- a/sound/usb/mixer_us16x08.c
++++ b/sound/usb/mixer_us16x08.c
+@@ -687,7 +687,7 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
+ 	struct usb_mixer_elem_info *elem = kcontrol->private_data;
+ 	struct snd_usb_audio *chip = elem->head.mixer->chip;
+ 	struct snd_us16x08_meter_store *store = elem->private_data;
+-	u8 meter_urb[64];
++	u8 meter_urb[64] = {0};
+ 
+ 	switch (kcontrol->private_value) {
+ 	case 0: {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a0767de7f1b7ed..8ba0aff8be2ec2 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2325,6 +2325,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
+ 		   QUIRK_FLAG_SET_IFACE_FIRST),
++	DEVICE_FLG(0x262a, 0x9302, /* ddHiFi TC44C */
++		   QUIRK_FLAG_DSD_RAW),
+ 	DEVICE_FLG(0x2708, 0x0002, /* Audient iD14 */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
+diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c
+index 21deea320bd785..e938156ed0a0d0 100644
+--- a/tools/sched_ext/scx_central.c
++++ b/tools/sched_ext/scx_central.c
+@@ -97,7 +97,7 @@ int main(int argc, char **argv)
+ 	SCX_BUG_ON(!cpuset, "Failed to allocate cpuset");
+ 	CPU_ZERO(cpuset);
+ 	CPU_SET(skel->rodata->central_cpu, cpuset);
+-	SCX_BUG_ON(sched_setaffinity(0, sizeof(cpuset), cpuset),
++	SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset),
+ 		   "Failed to affinitize to central CPU %d (max %d)",
+ 		   skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1);
+ 	CPU_FREE(cpuset);
+diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
+index 8a0632c37839a3..79f5087dade224 100644
+--- a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
++++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
+@@ -10,6 +10,8 @@ int subprog(struct __sk_buff *skb)
+ 	int ret = 1;
+ 
+ 	__sink(ret);
++	/* let verifier know that 'subprog_tc' can change pointers to skb->data */
++	bpf_skb_change_proto(skb, 0, 0);
+ 	return ret;
+ }
+ 
+diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
+index c35548767756d0..ecd34f364125cb 100755
+--- a/tools/testing/selftests/net/forwarding/local_termination.sh
++++ b/tools/testing/selftests/net/forwarding/local_termination.sh
+@@ -7,7 +7,6 @@ ALL_TESTS="standalone vlan_unaware_bridge vlan_aware_bridge test_vlan \
+ NUM_NETIFS=2
+ PING_COUNT=1
+ REQUIRE_MTOOLS=yes
+-REQUIRE_MZ=no
+ 
+ source lib.sh
+ 


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2025-01-02 12:31 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2025-01-02 12:31 UTC (permalink / raw
  To: gentoo-commits

commit:     dd372ef01fb36fbfcde98200d805cbd936e6afc8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan  2 12:26:21 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan  2 12:26:21 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dd372ef0

Linux patch 6.12.8

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1007_linux-6.12.8.patch | 4586 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4590 insertions(+)

diff --git a/0000_README b/0000_README
index 6961ab2e..483a9fde 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-6.12.7.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.7
 
+Patch:  1007_linux-6.12.8.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.8
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1007_linux-6.12.8.patch b/1007_linux-6.12.8.patch
new file mode 100644
index 00000000..6c1c3893
--- /dev/null
+++ b/1007_linux-6.12.8.patch
@@ -0,0 +1,4586 @@
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index 77db10e944f039..b42fea07c5cec8 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -255,8 +255,9 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Hisilicon      | Hip08 SMMU PMCG | #162001800      | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
+-| Hisilicon      | Hip{08,09,10,10C| #162001900      | N/A                         |
+-|                | ,11} SMMU PMCG  |                 |                             |
++| Hisilicon      | Hip{08,09,09A,10| #162001900      | N/A                         |
++|                | ,10C,11}        |                 |                             |
++|                | SMMU PMCG       |                 |                             |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Hisilicon      | Hip09           | #162100801      | HISILICON_ERRATUM_162100801 |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5645.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5645.yaml
+index 13f09f1bc8003a..0a698798c22be2 100644
+--- a/Documentation/devicetree/bindings/sound/realtek,rt5645.yaml
++++ b/Documentation/devicetree/bindings/sound/realtek,rt5645.yaml
+@@ -51,7 +51,7 @@ properties:
+     description: Power supply for AVDD, providing 1.8V.
+ 
+   cpvdd-supply:
+-    description: Power supply for CPVDD, providing 3.5V.
++    description: Power supply for CPVDD, providing 1.8V.
+ 
+   hp-detect-gpios:
+     description: 
+diff --git a/Makefile b/Makefile
+index 685a57f6c8d279..8a10105c2539cf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+index 6e5a984c1d4ea1..26a29e5e5078d5 100644
+--- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+@@ -67,7 +67,7 @@ cpu0: cpu@0 {
+ 			l2_cache_l0: l2-cache-l0 {
+ 				compatible = "cache";
+ 				cache-size = <0x80000>;
+-				cache-line-size = <128>;
++				cache-line-size = <64>;
+ 				cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
+ 				cache-level = <2>;
+ 				cache-unified;
+@@ -91,7 +91,7 @@ cpu1: cpu@1 {
+ 			l2_cache_l1: l2-cache-l1 {
+ 				compatible = "cache";
+ 				cache-size = <0x80000>;
+-				cache-line-size = <128>;
++				cache-line-size = <64>;
+ 				cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
+ 				cache-level = <2>;
+ 				cache-unified;
+@@ -115,7 +115,7 @@ cpu2: cpu@2 {
+ 			l2_cache_l2: l2-cache-l2 {
+ 				compatible = "cache";
+ 				cache-size = <0x80000>;
+-				cache-line-size = <128>;
++				cache-line-size = <64>;
+ 				cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
+ 				cache-level = <2>;
+ 				cache-unified;
+@@ -139,7 +139,7 @@ cpu3: cpu@3 {
+ 			l2_cache_l3: l2-cache-l3 {
+ 				compatible = "cache";
+ 				cache-size = <0x80000>;
+-				cache-line-size = <128>;
++				cache-line-size = <64>;
+ 				cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
+ 				cache-level = <2>;
+ 				cache-unified;
+diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
+index 944482063f14e3..3089785ca97e78 100644
+--- a/arch/loongarch/include/asm/inst.h
++++ b/arch/loongarch/include/asm/inst.h
+@@ -683,7 +683,17 @@ DEF_EMIT_REG2I16_FORMAT(blt, blt_op)
+ DEF_EMIT_REG2I16_FORMAT(bge, bge_op)
+ DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op)
+ DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op)
+-DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op)
++
++static inline void emit_jirl(union loongarch_instruction *insn,
++			     enum loongarch_gpr rd,
++			     enum loongarch_gpr rj,
++			     int offset)
++{
++	insn->reg2i16_format.opcode = jirl_op;
++	insn->reg2i16_format.immediate = offset;
++	insn->reg2i16_format.rd = rd;
++	insn->reg2i16_format.rj = rj;
++}
+ 
+ #define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP)				\
+ static inline void emit_##NAME(union loongarch_instruction *insn,	\
+diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
+index 2bf86aeda874c7..de21e72759eebc 100644
+--- a/arch/loongarch/kernel/efi.c
++++ b/arch/loongarch/kernel/efi.c
+@@ -95,7 +95,7 @@ static void __init init_screen_info(void)
+ 	memset(si, 0, sizeof(*si));
+ 	early_memunmap(si, sizeof(*si));
+ 
+-	memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
++	memblock_reserve(__screen_info_lfb_base(&screen_info), screen_info.lfb_size);
+ }
+ 
+ void __init efi_init(void)
+diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
+index 3050329556d118..14d7d700bcb98f 100644
+--- a/arch/loongarch/kernel/inst.c
++++ b/arch/loongarch/kernel/inst.c
+@@ -332,7 +332,7 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+ 		return INSN_BREAK;
+ 	}
+ 
+-	emit_jirl(&insn, rj, rd, imm >> 2);
++	emit_jirl(&insn, rd, rj, imm >> 2);
+ 
+ 	return insn.word;
+ }
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index dd350cba1252f9..ea357a3edc0943 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -181,13 +181,13 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
+ 		/* Set return value */
+ 		emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
+ 		/* Return to the caller */
+-		emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
++		emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
+ 	} else {
+ 		/*
+ 		 * Call the next bpf prog and skip the first instruction
+ 		 * of TCC initialization.
+ 		 */
+-		emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
++		emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1);
+ 	}
+ }
+ 
+@@ -904,7 +904,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 			return ret;
+ 
+ 		move_addr(ctx, t1, func_addr);
+-		emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
++		emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
+ 		move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+ 		break;
+ 
+diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
+index f381b177ea06ad..0b6365d85d1171 100644
+--- a/arch/powerpc/platforms/book3s/vas-api.c
++++ b/arch/powerpc/platforms/book3s/vas-api.c
+@@ -464,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ 	return VM_FAULT_SIGBUS;
+ }
+ 
++/*
++ * During mmap() paste address, mapping VMA is saved in VAS window
++ * struct which is used to unmap during migration if the window is
++ * still open. But the user space can remove this mapping with
++ * munmap() before closing the window and the VMA address will
++ * be invalid. Set VAS window VMA to NULL in this function which
++ * is called before VMA free.
++ */
++static void vas_mmap_close(struct vm_area_struct *vma)
++{
++	struct file *fp = vma->vm_file;
++	struct coproc_instance *cp_inst = fp->private_data;
++	struct vas_window *txwin;
++
++	/* Should not happen */
++	if (!cp_inst || !cp_inst->txwin) {
++		pr_err("No attached VAS window for the paste address mmap\n");
++		return;
++	}
++
++	txwin = cp_inst->txwin;
++	/*
++	 * task_ref.vma is set in coproc_mmap() during mmap paste
++	 * address. So it has to be the same VMA that is getting freed.
++	 */
++	if (WARN_ON(txwin->task_ref.vma != vma)) {
++		pr_err("Invalid paste address mmaping\n");
++		return;
++	}
++
++	mutex_lock(&txwin->task_ref.mmap_mutex);
++	txwin->task_ref.vma = NULL;
++	mutex_unlock(&txwin->task_ref.mmap_mutex);
++}
++
+ static const struct vm_operations_struct vas_vm_ops = {
++	.close = vas_mmap_close,
+ 	.fault = vas_mmap_fault,
+ };
+ 
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index d879478db3f572..28b4312f25631c 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -429,6 +429,16 @@ static struct event_constraint intel_lnc_event_constraints[] = {
+ 	EVENT_CONSTRAINT_END
+ };
+ 
++static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
++	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
++	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
++	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
++	INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
++	INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
++	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
++	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
++	EVENT_EXTRA_END
++};
+ 
+ EVENT_ATTR_STR(mem-loads,	mem_ld_nhm,	"event=0x0b,umask=0x10,ldlat=3");
+ EVENT_ATTR_STR(mem-loads,	mem_ld_snb,	"event=0xcd,umask=0x1,ldlat=3");
+@@ -6344,7 +6354,7 @@ static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
+ 	intel_pmu_init_glc(pmu);
+ 	hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
+ 	hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
+-	hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
++	hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
+ }
+ 
+ static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 6188650707ab27..19a9fd974e3e1d 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -2496,6 +2496,7 @@ void __init intel_ds_init(void)
+ 			x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
+ 			break;
+ 
++		case 6:
+ 		case 5:
+ 			x86_pmu.pebs_ept = 1;
+ 			fallthrough;
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index d98fac56768469..e7aba7349231d1 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -1910,6 +1910,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
+ 	X86_MATCH_VFM(INTEL_ATOM_GRACEMONT,	&adl_uncore_init),
+ 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X,	&gnr_uncore_init),
+ 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT,	&gnr_uncore_init),
++	X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X,	&gnr_uncore_init),
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
+diff --git a/arch/x86/kernel/cet.c b/arch/x86/kernel/cet.c
+index d2c732a34e5d90..303bf74d175b30 100644
+--- a/arch/x86/kernel/cet.c
++++ b/arch/x86/kernel/cet.c
+@@ -81,6 +81,34 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
+ 
+ static __ro_after_init bool ibt_fatal = true;
+ 
++/*
++ * By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
++ *
++ * For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
++ * the WFE state of the interrupted context needs to be cleared to let execution
++ * continue.  Otherwise when the CPU resumes from the instruction that just
++ * caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
++ * enters a dead loop.
++ *
++ * This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
++ * set WFE.  But FRED provides space on the entry stack (in an expanded CS area)
++ * to save and restore the WFE state, thus the WFE state is no longer clobbered,
++ * so software must clear it.
++ */
++static void ibt_clear_fred_wfe(struct pt_regs *regs)
++{
++	/*
++	 * No need to do any FRED checks.
++	 *
++	 * For IDT event delivery, the high-order 48 bits of CS are pushed
++	 * as 0s into the stack, and later IRET ignores these bits.
++	 *
++	 * For FRED, a test to check if fred_cs.wfe is set would be dropped
++	 * by compilers.
++	 */
++	regs->fred_cs.wfe = 0;
++}
++
+ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
+ {
+ 	if ((error_code & CP_EC) != CP_ENDBR) {
+@@ -90,6 +118,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
+ 
+ 	if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
+ 		regs->ax = 0;
++		ibt_clear_fred_wfe(regs);
+ 		return;
+ 	}
+ 
+@@ -97,6 +126,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
+ 	if (!ibt_fatal) {
+ 		printk(KERN_DEFAULT CUT_HERE);
+ 		__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
++		ibt_clear_fred_wfe(regs);
+ 		return;
+ 	}
+ 	BUG();
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index d5995021815ddf..4e76651e786d19 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3903,16 +3903,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
+ {
+ 	hctx->queue_num = hctx_idx;
+ 
+-	if (!(hctx->flags & BLK_MQ_F_STACKING))
+-		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
+-				&hctx->cpuhp_online);
+-	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
+-
+ 	hctx->tags = set->tags[hctx_idx];
+ 
+ 	if (set->ops->init_hctx &&
+ 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+-		goto unregister_cpu_notifier;
++		goto fail;
+ 
+ 	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
+ 				hctx->numa_node))
+@@ -3921,6 +3916,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
+ 	if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
+ 		goto exit_flush_rq;
+ 
++	if (!(hctx->flags & BLK_MQ_F_STACKING))
++		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
++				&hctx->cpuhp_online);
++	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
++
+ 	return 0;
+ 
+  exit_flush_rq:
+@@ -3929,8 +3929,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
+  exit_hctx:
+ 	if (set->ops->exit_hctx)
+ 		set->ops->exit_hctx(hctx, hctx_idx);
+- unregister_cpu_notifier:
+-	blk_mq_remove_cpuhp(hctx);
++ fail:
+ 	return -1;
+ }
+ 
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index 4c745a26226b27..bf3be532e0895d 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -1703,6 +1703,8 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
+ 	/* HiSilicon Hip09 Platform */
+ 	{"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ 	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
++	{"HISI  ", "HIP09A  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
++	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ 	/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
+ 	{"HISI  ", "HIP10   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ 	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index e3e2afc2c83c6b..5962ea1230a17e 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1063,13 +1063,13 @@ struct regmap *__regmap_init(struct device *dev,
+ 
+ 		/* Sanity check */
+ 		if (range_cfg->range_max < range_cfg->range_min) {
+-			dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
++			dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
+ 				range_cfg->range_max, range_cfg->range_min);
+ 			goto err_range;
+ 		}
+ 
+ 		if (range_cfg->range_max > map->max_register) {
+-			dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
++			dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
+ 				range_cfg->range_max, map->max_register);
+ 			goto err_range;
+ 		}
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 90bc605ff6c299..458ac54e7b201e 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1599,6 +1599,21 @@ static void ublk_unquiesce_dev(struct ublk_device *ub)
+ 	blk_mq_kick_requeue_list(ub->ub_disk->queue);
+ }
+ 
++static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
++{
++	struct gendisk *disk;
++
++	/* Sync with ublk_abort_queue() by holding the lock */
++	spin_lock(&ub->lock);
++	disk = ub->ub_disk;
++	ub->dev_info.state = UBLK_S_DEV_DEAD;
++	ub->dev_info.ublksrv_pid = -1;
++	ub->ub_disk = NULL;
++	spin_unlock(&ub->lock);
++
++	return disk;
++}
++
+ static void ublk_stop_dev(struct ublk_device *ub)
+ {
+ 	struct gendisk *disk;
+@@ -1612,14 +1627,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
+ 		ublk_unquiesce_dev(ub);
+ 	}
+ 	del_gendisk(ub->ub_disk);
+-
+-	/* Sync with ublk_abort_queue() by holding the lock */
+-	spin_lock(&ub->lock);
+-	disk = ub->ub_disk;
+-	ub->dev_info.state = UBLK_S_DEV_DEAD;
+-	ub->dev_info.ublksrv_pid = -1;
+-	ub->ub_disk = NULL;
+-	spin_unlock(&ub->lock);
++	disk = ublk_detach_disk(ub);
+ 	put_disk(disk);
+  unlock:
+ 	mutex_unlock(&ub->mutex);
+@@ -2295,7 +2303,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
+ 
+ out_put_cdev:
+ 	if (ret) {
+-		ub->dev_info.state = UBLK_S_DEV_DEAD;
++		ublk_detach_disk(ub);
+ 		ublk_put_device(ub);
+ 	}
+ 	if (ret)
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 43c96b73a7118f..0e50b65e1dbf5a 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1587,9 +1587,12 @@ static void virtblk_remove(struct virtio_device *vdev)
+ static int virtblk_freeze(struct virtio_device *vdev)
+ {
+ 	struct virtio_blk *vblk = vdev->priv;
++	struct request_queue *q = vblk->disk->queue;
+ 
+ 	/* Ensure no requests in virtqueues before deleting vqs. */
+-	blk_mq_freeze_queue(vblk->disk->queue);
++	blk_mq_freeze_queue(q);
++	blk_mq_quiesce_queue_nowait(q);
++	blk_mq_unfreeze_queue(q);
+ 
+ 	/* Ensure we don't receive any more interrupts */
+ 	virtio_reset_device(vdev);
+@@ -1613,8 +1616,8 @@ static int virtblk_restore(struct virtio_device *vdev)
+ 		return ret;
+ 
+ 	virtio_device_ready(vdev);
++	blk_mq_unquiesce_queue(vblk->disk->queue);
+ 
+-	blk_mq_unfreeze_queue(vblk->disk->queue);
+ 	return 0;
+ }
+ #endif
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 11755cb1eb1635..0c85c981a8334a 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -870,6 +870,7 @@ struct btusb_data {
+ 
+ 	int (*suspend)(struct hci_dev *hdev);
+ 	int (*resume)(struct hci_dev *hdev);
++	int (*disconnect)(struct hci_dev *hdev);
+ 
+ 	int oob_wake_irq;   /* irq for out-of-band wake-on-bt */
+ 	unsigned cmd_timeout_cnt;
+@@ -2643,11 +2644,11 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
+ 	init_usb_anchor(&btmtk_data->isopkt_anchor);
+ }
+ 
+-static void btusb_mtk_release_iso_intf(struct btusb_data *data)
++static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
+ {
+-	struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
++	struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ 
+-	if (btmtk_data->isopkt_intf) {
++	if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
+ 		usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
+ 		clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
+ 
+@@ -2661,6 +2662,16 @@ static void btusb_mtk_release_iso_intf(struct btusb_data *data)
+ 	clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
+ }
+ 
++static int btusb_mtk_disconnect(struct hci_dev *hdev)
++{
++	/* This function describes the specific additional steps taken by MediaTek
++	 * when Bluetooth usb driver's resume function is called.
++	 */
++	btusb_mtk_release_iso_intf(hdev);
++
++	return 0;
++}
++
+ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
+ {
+ 	struct btusb_data *data = hci_get_drvdata(hdev);
+@@ -2677,8 +2688,8 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
+-		btusb_mtk_release_iso_intf(data);
++	/* Release MediaTek ISO data interface */
++	btusb_mtk_release_iso_intf(hdev);
+ 
+ 	btusb_stop_traffic(data);
+ 	usb_kill_anchored_urbs(&data->tx_anchor);
+@@ -2723,22 +2734,24 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
+ 	btmtk_data->reset_sync = btusb_mtk_reset;
+ 
+ 	/* Claim ISO data interface and endpoint */
+-	btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
+-	if (btmtk_data->isopkt_intf)
++	if (!test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
++		btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
+ 		btusb_mtk_claim_iso_intf(data);
++	}
+ 
+ 	return btmtk_usb_setup(hdev);
+ }
+ 
+ static int btusb_mtk_shutdown(struct hci_dev *hdev)
+ {
+-	struct btusb_data *data = hci_get_drvdata(hdev);
+-	struct btmtk_data *btmtk_data = hci_get_priv(hdev);
++	int ret;
+ 
+-	if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
+-		btusb_mtk_release_iso_intf(data);
++	ret = btmtk_usb_shutdown(hdev);
+ 
+-	return btmtk_usb_shutdown(hdev);
++	/* Release MediaTek iso interface after shutdown */
++	btusb_mtk_release_iso_intf(hdev);
++
++	return ret;
+ }
+ 
+ #ifdef CONFIG_PM
+@@ -3850,6 +3863,7 @@ static int btusb_probe(struct usb_interface *intf,
+ 		data->recv_acl = btmtk_usb_recv_acl;
+ 		data->suspend = btmtk_usb_suspend;
+ 		data->resume = btmtk_usb_resume;
++		data->disconnect = btusb_mtk_disconnect;
+ 	}
+ 
+ 	if (id->driver_info & BTUSB_SWAVE) {
+@@ -4040,6 +4054,9 @@ static void btusb_disconnect(struct usb_interface *intf)
+ 	if (data->diag)
+ 		usb_set_intfdata(data->diag, NULL);
+ 
++	if (data->disconnect)
++		data->disconnect(hdev);
++
+ 	hci_unregister_dev(hdev);
+ 
+ 	if (intf == data->intf) {
+diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
+index b0a1f3ad851b1e..4761fa25501561 100644
+--- a/drivers/dma/amd/qdma/qdma.c
++++ b/drivers/dma/amd/qdma/qdma.c
+@@ -7,9 +7,9 @@
+ #include <linux/bitfield.h>
+ #include <linux/bitops.h>
+ #include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+-#include <linux/dma-map-ops.h>
+ #include <linux/platform_device.h>
+ #include <linux/platform_data/amd_qdma.h>
+ #include <linux/regmap.h>
+@@ -492,18 +492,9 @@ static int qdma_device_verify(struct qdma_device *qdev)
+ 
+ static int qdma_device_setup(struct qdma_device *qdev)
+ {
+-	struct device *dev = &qdev->pdev->dev;
+ 	u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
+ 	int ret = 0;
+ 
+-	while (dev && get_dma_ops(dev))
+-		dev = dev->parent;
+-	if (!dev) {
+-		qdma_err(qdev, "dma device not found");
+-		return -EINVAL;
+-	}
+-	set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
+-
+ 	ret = qdma_setup_fmap_context(qdev);
+ 	if (ret) {
+ 		qdma_err(qdev, "Failed setup fmap context");
+@@ -548,11 +539,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan)
+ {
+ 	struct qdma_queue *queue = to_qdma_queue(chan);
+ 	struct qdma_device *qdev = queue->qdev;
+-	struct device *dev = qdev->dma_dev.dev;
++	struct qdma_platdata *pdata;
+ 
+ 	qdma_clear_queue_context(queue);
+ 	vchan_free_chan_resources(&queue->vchan);
+-	dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
++	pdata = dev_get_platdata(&qdev->pdev->dev);
++	dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
+ 			  queue->desc_base, queue->dma_desc_base);
+ }
+ 
+@@ -565,6 +557,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
+ 	struct qdma_queue *queue = to_qdma_queue(chan);
+ 	struct qdma_device *qdev = queue->qdev;
+ 	struct qdma_ctxt_sw_desc desc;
++	struct qdma_platdata *pdata;
+ 	size_t size;
+ 	int ret;
+ 
+@@ -572,8 +565,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
+ 	if (ret)
+ 		return ret;
+ 
++	pdata = dev_get_platdata(&qdev->pdev->dev);
+ 	size = queue->ring_size * QDMA_MM_DESC_SIZE;
+-	queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
++	queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
+ 					      &queue->dma_desc_base,
+ 					      GFP_KERNEL);
+ 	if (!queue->desc_base) {
+@@ -588,7 +582,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
+ 	if (ret) {
+ 		qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
+ 			 chan->name);
+-		dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
++		dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
+ 				  queue->dma_desc_base);
+ 		return ret;
+ 	}
+@@ -948,8 +942,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev)
+ 
+ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
+ {
+-	u32 ctxt[QDMA_CTXT_REGMAP_LEN];
++	struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
+ 	struct device *dev = &qdev->pdev->dev;
++	u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ 	struct qdma_intr_ring *ring;
+ 	struct qdma_ctxt_intr intr_ctxt;
+ 	u32 vector;
+@@ -969,7 +964,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
+ 		ring->msix_id = qdev->err_irq_idx + i + 1;
+ 		ring->ridx = i;
+ 		ring->color = 1;
+-		ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
++		ring->base = dmam_alloc_coherent(pdata->dma_dev,
++						 QDMA_INTR_RING_SIZE,
+ 						 &ring->dev_base, GFP_KERNEL);
+ 		if (!ring->base) {
+ 			qdma_err(qdev, "Failed to alloc intr ring %d", i);
+diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
+index 9588773dd2eb67..037ec38730cf98 100644
+--- a/drivers/dma/apple-admac.c
++++ b/drivers/dma/apple-admac.c
+@@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad,
+ {
+ 	struct admac_sram *sram;
+ 	int i, ret = 0, nblocks;
++	ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
++	ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
+ 
+ 	if (dir == DMA_MEM_TO_DEV)
+ 		sram = &ad->txcache;
+@@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev)
+ 		goto free_irq;
+ 	}
+ 
+-	ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
+-	ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
+-
+ 	dev_info(&pdev->dev, "Audio DMA Controller\n");
+-	dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
+-		 readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 299396121e6dc5..e847ad66dc0b49 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
+ 		return NULL;
+ 
+ 	desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
++	if (!desc)
++		return NULL;
+ 	list_add_tail(&desc->desc_node, &desc->descs_list);
+ 
+ 	desc->tx_dma_desc.cookie = -EBUSY;
+diff --git a/drivers/dma/dw/acpi.c b/drivers/dma/dw/acpi.c
+index c510c109d2c3ad..b6452fffa657ad 100644
+--- a/drivers/dma/dw/acpi.c
++++ b/drivers/dma/dw/acpi.c
+@@ -8,13 +8,15 @@
+ 
+ static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+ {
++	struct dw_dma *dw = to_dw_dma(chan->device);
++	struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev);
+ 	struct acpi_dma_spec *dma_spec = param;
+ 	struct dw_dma_slave slave = {
+ 		.dma_dev = dma_spec->dev,
+ 		.src_id = dma_spec->slave_id,
+ 		.dst_id = dma_spec->slave_id,
+-		.m_master = 0,
+-		.p_master = 1,
++		.m_master = data->m_master,
++		.p_master = data->p_master,
+ 	};
+ 
+ 	return dw_dma_filter(chan, &slave);
+diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
+index 563ce73488db32..f1bd06a20cd611 100644
+--- a/drivers/dma/dw/internal.h
++++ b/drivers/dma/dw/internal.h
+@@ -51,11 +51,15 @@ struct dw_dma_chip_pdata {
+ 	int (*probe)(struct dw_dma_chip *chip);
+ 	int (*remove)(struct dw_dma_chip *chip);
+ 	struct dw_dma_chip *chip;
++	u8 m_master;
++	u8 p_master;
+ };
+ 
+ static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
+ 	.probe = dw_dma_probe,
+ 	.remove = dw_dma_remove,
++	.m_master = 0,
++	.p_master = 1,
+ };
+ 
+ static const struct dw_dma_platform_data idma32_pdata = {
+@@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
+ 	.pdata = &idma32_pdata,
+ 	.probe = idma32_dma_probe,
+ 	.remove = idma32_dma_remove,
++	.m_master = 0,
++	.p_master = 0,
+ };
+ 
+ static const struct dw_dma_platform_data xbar_pdata = {
+@@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
+ 	.pdata = &xbar_pdata,
+ 	.probe = idma32_dma_probe,
+ 	.remove = idma32_dma_remove,
++	.m_master = 0,
++	.p_master = 0,
+ };
+ 
+ #endif /* _DMA_DW_INTERNAL_H */
+diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
+index ad2d4d012cf729..e8a0eb81726a56 100644
+--- a/drivers/dma/dw/pci.c
++++ b/drivers/dma/dw/pci.c
+@@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
+ 	if (ret)
+ 		return ret;
+ 
+-	dw_dma_acpi_controller_register(chip->dw);
+-
+ 	pci_set_drvdata(pdev, data);
+ 
++	dw_dma_acpi_controller_register(chip->dw);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
+index ce37e1ee9c462d..fe8f103d4a6378 100644
+--- a/drivers/dma/fsl-edma-common.h
++++ b/drivers/dma/fsl-edma-common.h
+@@ -166,6 +166,7 @@ struct fsl_edma_chan {
+ 	struct work_struct		issue_worker;
+ 	struct platform_device		*pdev;
+ 	struct device			*pd_dev;
++	struct device_link		*pd_dev_link;
+ 	u32				srcid;
+ 	struct clk			*clk;
+ 	int                             priority;
+diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
+index f9f1eda792546e..70cb7fda757a94 100644
+--- a/drivers/dma/fsl-edma-main.c
++++ b/drivers/dma/fsl-edma-main.c
+@@ -417,10 +417,33 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+ 
++static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
++{
++	struct fsl_edma_chan *fsl_chan;
++	int i;
++
++	for (i = 0; i < fsl_edma->n_chans; i++) {
++		if (fsl_edma->chan_masked & BIT(i))
++			continue;
++		fsl_chan = &fsl_edma->chans[i];
++		if (fsl_chan->pd_dev_link)
++			device_link_del(fsl_chan->pd_dev_link);
++		if (fsl_chan->pd_dev) {
++			dev_pm_domain_detach(fsl_chan->pd_dev, false);
++			pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
++			pm_runtime_set_suspended(fsl_chan->pd_dev);
++		}
++	}
++}
++
++static void devm_fsl_edma3_detach_pd(void *data)
++{
++	fsl_edma3_detach_pd(data);
++}
++
+ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+ {
+ 	struct fsl_edma_chan *fsl_chan;
+-	struct device_link *link;
+ 	struct device *pd_chan;
+ 	struct device *dev;
+ 	int i;
+@@ -436,15 +459,16 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
+ 		pd_chan = dev_pm_domain_attach_by_id(dev, i);
+ 		if (IS_ERR_OR_NULL(pd_chan)) {
+ 			dev_err(dev, "Failed attach pd %d\n", i);
+-			return -EINVAL;
++			goto detach;
+ 		}
+ 
+-		link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
++		fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ 					     DL_FLAG_PM_RUNTIME |
+ 					     DL_FLAG_RPM_ACTIVE);
+-		if (!link) {
++		if (!fsl_chan->pd_dev_link) {
+ 			dev_err(dev, "Failed to add device_link to %d\n", i);
+-			return -EINVAL;
++			dev_pm_domain_detach(pd_chan, false);
++			goto detach;
+ 		}
+ 
+ 		fsl_chan->pd_dev = pd_chan;
+@@ -455,6 +479,10 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
+ 	}
+ 
+ 	return 0;
++
++detach:
++	fsl_edma3_detach_pd(fsl_edma);
++	return -EINVAL;
+ }
+ 
+ static int fsl_edma_probe(struct platform_device *pdev)
+@@ -544,6 +572,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
+ 		ret = fsl_edma3_attach_pd(pdev, fsl_edma);
+ 		if (ret)
+ 			return ret;
++		ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	if (drvdata->flags & FSL_EDMA_DRV_TCD64)
+diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
+index 9652e86667224b..b4f18be6294574 100644
+--- a/drivers/dma/ls2x-apb-dma.c
++++ b/drivers/dma/ls2x-apb-dma.c
+@@ -31,7 +31,7 @@
+ #define LDMA_ASK_VALID		BIT(2)
+ #define LDMA_START		BIT(3) /* DMA start operation */
+ #define LDMA_STOP		BIT(4) /* DMA stop operation */
+-#define LDMA_CONFIG_MASK	GENMASK(4, 0) /* DMA controller config bits mask */
++#define LDMA_CONFIG_MASK	GENMASK_ULL(4, 0) /* DMA controller config bits mask */
+ 
+ /* Bitfields in ndesc_addr field of HW descriptor */
+ #define LDMA_DESC_EN		BIT(0) /*1: The next descriptor is valid */
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index 43efce77bb5770..40b76b40bc30c2 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -1388,6 +1388,7 @@ static int mv_xor_probe(struct platform_device *pdev)
+ 			irq = irq_of_parse_and_map(np, 0);
+ 			if (!irq) {
+ 				ret = -ENODEV;
++				of_node_put(np);
+ 				goto err_channel_add;
+ 			}
+ 
+@@ -1396,6 +1397,7 @@ static int mv_xor_probe(struct platform_device *pdev)
+ 			if (IS_ERR(chan)) {
+ 				ret = PTR_ERR(chan);
+ 				irq_dispose_mapping(irq);
++				of_node_put(np);
+ 				goto err_channel_add;
+ 			}
+ 
+diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
+index 3642508e88bb22..adca05ee98c922 100644
+--- a/drivers/dma/tegra186-gpc-dma.c
++++ b/drivers/dma/tegra186-gpc-dma.c
+@@ -231,6 +231,7 @@ struct tegra_dma_channel {
+ 	bool config_init;
+ 	char name[30];
+ 	enum dma_transfer_direction sid_dir;
++	enum dma_status status;
+ 	int id;
+ 	int irq;
+ 	int slave_id;
+@@ -393,6 +394,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc)
+ 		tegra_dma_dump_chan_regs(tdc);
+ 	}
+ 
++	tdc->status = DMA_PAUSED;
++
+ 	return ret;
+ }
+ 
+@@ -419,6 +422,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
+ 	val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
+ 	val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
+ 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
++
++	tdc->status = DMA_IN_PROGRESS;
+ }
+ 
+ static int tegra_dma_device_resume(struct dma_chan *dc)
+@@ -544,6 +549,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
+ 
+ 	tegra_dma_sid_free(tdc);
+ 	tdc->dma_desc = NULL;
++	tdc->status = DMA_COMPLETE;
+ }
+ 
+ static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
+@@ -716,6 +722,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
+ 		tdc->dma_desc = NULL;
+ 	}
+ 
++	tdc->status = DMA_COMPLETE;
+ 	tegra_dma_sid_free(tdc);
+ 	vchan_get_all_descriptors(&tdc->vc, &head);
+ 	spin_unlock_irqrestore(&tdc->vc.lock, flags);
+@@ -769,6 +776,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
+ 	if (ret == DMA_COMPLETE)
+ 		return ret;
+ 
++	if (tdc->status == DMA_PAUSED)
++		ret = DMA_PAUSED;
++
+ 	spin_lock_irqsave(&tdc->vc.lock, flags);
+ 	vd = vchan_find_desc(&tdc->vc, cookie);
+ 	if (vd) {
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index bcf3a33123be1c..f0c6d50d8c3345 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -4108,9 +4108,10 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
+ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ {
+ 	struct drm_dp_pending_up_req *up_req;
++	struct drm_dp_mst_branch *mst_primary;
+ 
+ 	if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
+-		goto out;
++		goto out_clear_reply;
+ 
+ 	if (!mgr->up_req_recv.have_eomt)
+ 		return 0;
+@@ -4128,10 +4129,19 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 		drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
+ 			    up_req->msg.req_type);
+ 		kfree(up_req);
+-		goto out;
++		goto out_clear_reply;
++	}
++
++	mutex_lock(&mgr->lock);
++	mst_primary = mgr->mst_primary;
++	if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) {
++		mutex_unlock(&mgr->lock);
++		kfree(up_req);
++		goto out_clear_reply;
+ 	}
++	mutex_unlock(&mgr->lock);
+ 
+-	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
++	drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
+ 				 false);
+ 
+ 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+@@ -4148,13 +4158,13 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 			    conn_stat->peer_device_type);
+ 
+ 		mutex_lock(&mgr->probe_lock);
+-		handle_csn = mgr->mst_primary->link_address_sent;
++		handle_csn = mst_primary->link_address_sent;
+ 		mutex_unlock(&mgr->probe_lock);
+ 
+ 		if (!handle_csn) {
+ 			drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
+ 			kfree(up_req);
+-			goto out;
++			goto out_put_primary;
+ 		}
+ 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ 		const struct drm_dp_resource_status_notify *res_stat =
+@@ -4171,7 +4181,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 	mutex_unlock(&mgr->up_req_lock);
+ 	queue_work(system_long_wq, &mgr->up_req_work);
+ 
+-out:
++out_put_primary:
++	drm_dp_mst_topology_put_mstb(mst_primary);
++out_clear_reply:
+ 	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
+index 5221ee3f12149b..c18e463092afa5 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.c
++++ b/drivers/gpu/drm/xe/xe_devcoredump.c
+@@ -20,6 +20,7 @@
+ #include "xe_guc_ct.h"
+ #include "xe_guc_submit.h"
+ #include "xe_hw_engine.h"
++#include "xe_pm.h"
+ #include "xe_sched_job.h"
+ #include "xe_vm.h"
+ 
+@@ -143,31 +144,6 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
+ 	ss->vm = NULL;
+ }
+ 
+-static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+-{
+-	struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+-	struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
+-	unsigned int fw_ref;
+-
+-	/* keep going if fw fails as we still want to save the memory and SW data */
+-	fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+-	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+-		xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+-	xe_vm_snapshot_capture_delayed(ss->vm);
+-	xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
+-	xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
+-
+-	/* Calculate devcoredump size */
+-	ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
+-
+-	ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+-	if (!ss->read.buffer)
+-		return;
+-
+-	__xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
+-	xe_devcoredump_snapshot_free(ss);
+-}
+-
+ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
+ 				   size_t count, void *data, size_t datalen)
+ {
+@@ -216,6 +192,45 @@ static void xe_devcoredump_free(void *data)
+ 		 "Xe device coredump has been deleted.\n");
+ }
+ 
++static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
++{
++	struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
++	struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
++	struct xe_device *xe = coredump_to_xe(coredump);
++	unsigned int fw_ref;
++
++	/*
++	 * NB: Despite passing a GFP_ flags parameter here, more allocations are done
++	 * internally using GFP_KERNEL expliictly. Hence this call must be in the worker
++	 * thread and not in the initial capture call.
++	 */
++	dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
++			      xe_devcoredump_read, xe_devcoredump_free,
++			      XE_COREDUMP_TIMEOUT_JIFFIES);
++
++	xe_pm_runtime_get(xe);
++
++	/* keep going if fw fails as we still want to save the memory and SW data */
++	fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
++	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
++		xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
++	xe_vm_snapshot_capture_delayed(ss->vm);
++	xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
++	xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
++
++	xe_pm_runtime_put(xe);
++
++	/* Calculate devcoredump size */
++	ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
++
++	ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
++	if (!ss->read.buffer)
++		return;
++
++	__xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
++	xe_devcoredump_snapshot_free(ss);
++}
++
+ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ 				 struct xe_sched_job *job)
+ {
+@@ -299,10 +314,6 @@ void xe_devcoredump(struct xe_sched_job *job)
+ 	drm_info(&xe->drm, "Xe device coredump has been created\n");
+ 	drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
+ 		 xe->drm.primary->index);
+-
+-	dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
+-			      xe_devcoredump_read, xe_devcoredump_free,
+-			      XE_COREDUMP_TIMEOUT_JIFFIES);
+ }
+ 
+ static void xe_driver_devcoredump_fini(void *arg)
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index 98539313cbc970..c5224d43eea45e 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -282,6 +282,7 @@ static const struct of_device_id i2c_imx_dt_ids[] = {
+ 	{ .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
+ 	{ .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
+ 	{ .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
++	{ .compatible = "fsl,imx7d-i2c", .data = &imx6_i2c_hwdata, },
+ 	{ .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
+ 	{ .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
+ 	{ .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
+diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
+index 0b0a1c4d17caef..b0a51695138ad0 100644
+--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
++++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
+@@ -93,27 +93,35 @@
+  * @base:		pointer to register struct
+  * @dev:		device reference
+  * @i2c_clk:		clock reference for i2c input clock
++ * @msg_queue:		pointer to the messages requiring sending
+  * @buf:		pointer to msg buffer for easier use
+  * @msg_complete:	xfer completion object
+  * @adapter:		core i2c abstraction
+  * @msg_err:		error code for completed message
+  * @bus_clk_rate:	current i2c bus clock rate
+  * @isr_status:		cached copy of local ISR status
++ * @total_num:		total number of messages to be sent/received
++ * @current_num:	index of the current message being sent/received
+  * @msg_len:		number of bytes transferred in msg
+  * @addr:		address of the current slave
++ * @restart_needed:	whether or not a repeated start is required after current message
+  */
+ struct mchp_corei2c_dev {
+ 	void __iomem *base;
+ 	struct device *dev;
+ 	struct clk *i2c_clk;
++	struct i2c_msg *msg_queue;
+ 	u8 *buf;
+ 	struct completion msg_complete;
+ 	struct i2c_adapter adapter;
+ 	int msg_err;
++	int total_num;
++	int current_num;
+ 	u32 bus_clk_rate;
+ 	u32 isr_status;
+ 	u16 msg_len;
+ 	u8 addr;
++	bool restart_needed;
+ };
+ 
+ static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev)
+@@ -222,6 +230,47 @@ static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev)
+ 	return 0;
+ }
+ 
++static void mchp_corei2c_next_msg(struct mchp_corei2c_dev *idev)
++{
++	struct i2c_msg *this_msg;
++	u8 ctrl;
++
++	if (idev->current_num >= idev->total_num) {
++		complete(&idev->msg_complete);
++		return;
++	}
++
++	/*
++	 * If there's been an error, the isr needs to return control
++	 * to the "main" part of the driver, so as not to keep sending
++	 * messages once it completes and clears the SI bit.
++	 */
++	if (idev->msg_err) {
++		complete(&idev->msg_complete);
++		return;
++	}
++
++	this_msg = idev->msg_queue++;
++
++	if (idev->current_num < (idev->total_num - 1)) {
++		struct i2c_msg *next_msg = idev->msg_queue;
++
++		idev->restart_needed = next_msg->flags & I2C_M_RD;
++	} else {
++		idev->restart_needed = false;
++	}
++
++	idev->addr = i2c_8bit_addr_from_msg(this_msg);
++	idev->msg_len = this_msg->len;
++	idev->buf = this_msg->buf;
++
++	ctrl = readb(idev->base + CORE_I2C_CTRL);
++	ctrl |= CTRL_STA;
++	writeb(ctrl, idev->base + CORE_I2C_CTRL);
++
++	idev->current_num++;
++}
++
+ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
+ {
+ 	u32 status = idev->isr_status;
+@@ -238,8 +287,6 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
+ 		ctrl &= ~CTRL_STA;
+ 		writeb(idev->addr, idev->base + CORE_I2C_DATA);
+ 		writeb(ctrl, idev->base + CORE_I2C_CTRL);
+-		if (idev->msg_len == 0)
+-			finished = true;
+ 		break;
+ 	case STATUS_M_ARB_LOST:
+ 		idev->msg_err = -EAGAIN;
+@@ -247,10 +294,14 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
+ 		break;
+ 	case STATUS_M_SLAW_ACK:
+ 	case STATUS_M_TX_DATA_ACK:
+-		if (idev->msg_len > 0)
++		if (idev->msg_len > 0) {
+ 			mchp_corei2c_fill_tx(idev);
+-		else
+-			last_byte = true;
++		} else {
++			if (idev->restart_needed)
++				finished = true;
++			else
++				last_byte = true;
++		}
+ 		break;
+ 	case STATUS_M_TX_DATA_NACK:
+ 	case STATUS_M_SLAR_NACK:
+@@ -287,7 +338,7 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
+ 		mchp_corei2c_stop(idev);
+ 
+ 	if (last_byte || finished)
+-		complete(&idev->msg_complete);
++		mchp_corei2c_next_msg(idev);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -311,21 +362,48 @@ static irqreturn_t mchp_corei2c_isr(int irq, void *_dev)
+ 	return ret;
+ }
+ 
+-static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
+-				 struct i2c_msg *msg)
++static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
++			     int num)
+ {
+-	u8 ctrl;
++	struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
++	struct i2c_msg *this_msg = msgs;
+ 	unsigned long time_left;
++	u8 ctrl;
++
++	mchp_corei2c_core_enable(idev);
++
++	/*
++	 * The isr controls the flow of a transfer, this info needs to be saved
++	 * to a location that it can access the queue information from.
++	 */
++	idev->restart_needed = false;
++	idev->msg_queue = msgs;
++	idev->total_num = num;
++	idev->current_num = 0;
+ 
+-	idev->addr = i2c_8bit_addr_from_msg(msg);
+-	idev->msg_len = msg->len;
+-	idev->buf = msg->buf;
++	/*
++	 * But the first entry to the isr is triggered by the start in this
++	 * function, so the first message needs to be "dequeued".
++	 */
++	idev->addr = i2c_8bit_addr_from_msg(this_msg);
++	idev->msg_len = this_msg->len;
++	idev->buf = this_msg->buf;
+ 	idev->msg_err = 0;
+ 
+-	reinit_completion(&idev->msg_complete);
++	if (idev->total_num > 1) {
++		struct i2c_msg *next_msg = msgs + 1;
+ 
+-	mchp_corei2c_core_enable(idev);
++		idev->restart_needed = next_msg->flags & I2C_M_RD;
++	}
+ 
++	idev->current_num++;
++	idev->msg_queue++;
++
++	reinit_completion(&idev->msg_complete);
++
++	/*
++	 * Send the first start to pass control to the isr
++	 */
+ 	ctrl = readb(idev->base + CORE_I2C_CTRL);
+ 	ctrl |= CTRL_STA;
+ 	writeb(ctrl, idev->base + CORE_I2C_CTRL);
+@@ -335,20 +413,8 @@ static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
+ 	if (!time_left)
+ 		return -ETIMEDOUT;
+ 
+-	return idev->msg_err;
+-}
+-
+-static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+-			     int num)
+-{
+-	struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+-	int i, ret;
+-
+-	for (i = 0; i < num; i++) {
+-		ret = mchp_corei2c_xfer_msg(idev, msgs++);
+-		if (ret)
+-			return ret;
+-	}
++	if (idev->msg_err)
++		return idev->msg_err;
+ 
+ 	return num;
+ }
+diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
+index c598b2a6332565..7c452ddd9e40fa 100644
+--- a/drivers/media/dvb-frontends/dib3000mb.c
++++ b/drivers/media/dvb-frontends/dib3000mb.c
+@@ -51,7 +51,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-a
+ static int dib3000_read_reg(struct dib3000_state *state, u16 reg)
+ {
+ 	u8 wb[] = { ((reg >> 8) | 0x80) & 0xff, reg & 0xff };
+-	u8 rb[2];
++	u8 rb[2] = {};
+ 	struct i2c_msg msg[] = {
+ 		{ .addr = state->config.demod_address, .flags = 0,        .buf = wb, .len = 2 },
+ 		{ .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 },
+diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
+index 5436ec4a8fde42..a52a9f5a75e021 100644
+--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
++++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
+@@ -1409,8 +1409,8 @@ static int anfc_parse_cs(struct arasan_nfc *nfc)
+ 	 * case, the "not" chosen CS is assigned to nfc->spare_cs and selected
+ 	 * whenever a GPIO CS must be asserted.
+ 	 */
+-	if (nfc->cs_array && nfc->ncs > 2) {
+-		if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
++	if (nfc->cs_array) {
++		if (nfc->ncs > 2 && !nfc->cs_array[0] && !nfc->cs_array[1]) {
+ 			dev_err(nfc->dev,
+ 				"Assign a single native CS when using GPIOs\n");
+ 			return -EINVAL;
+@@ -1478,8 +1478,15 @@ static int anfc_probe(struct platform_device *pdev)
+ 
+ static void anfc_remove(struct platform_device *pdev)
+ {
++	int i;
+ 	struct arasan_nfc *nfc = platform_get_drvdata(pdev);
+ 
++	for (i = 0; i < nfc->ncs; i++) {
++		if (nfc->cs_array[i]) {
++			gpiod_put(nfc->cs_array[i]);
++		}
++	}
++
+ 	anfc_chips_cleanup(nfc);
+ }
+ 
+diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
+index a22aab4ed4e8ab..3c7dee1be21df1 100644
+--- a/drivers/mtd/nand/raw/atmel/pmecc.c
++++ b/drivers/mtd/nand/raw/atmel/pmecc.c
+@@ -380,10 +380,8 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ 	user->delta = user->dmu + req->ecc.strength + 1;
+ 
+ 	gf_tables = atmel_pmecc_get_gf_tables(req);
+-	if (IS_ERR(gf_tables)) {
+-		kfree(user);
++	if (IS_ERR(gf_tables))
+ 		return ERR_CAST(gf_tables);
+-	}
+ 
+ 	user->gf_tables = gf_tables;
+ 
+diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
+index 8db7fc42457111..70d6c2250f32c8 100644
+--- a/drivers/mtd/nand/raw/diskonchip.c
++++ b/drivers/mtd/nand/raw/diskonchip.c
+@@ -1098,7 +1098,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
+ 		    (i == 0) && (ip->firstUnit > 0)) {
+ 			parts[0].name = " DiskOnChip IPL / Media Header partition";
+ 			parts[0].offset = 0;
+-			parts[0].size = mtd->erasesize * ip->firstUnit;
++			parts[0].size = (uint64_t)mtd->erasesize * ip->firstUnit;
+ 			numparts = 1;
+ 		}
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+index e95ffe3035473c..c70da7281551a2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+@@ -1074,12 +1074,13 @@ int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
+ void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
+ #endif
+ 
+-#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
+-	do {								      \
+-		if (__builtin_constant_p(bufsize))			      \
+-			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
+-		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
+-	} while (0)
++#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)	\
++	({							\
++		if (__builtin_constant_p(bufsize))		\
++			BUILD_BUG_ON((bufsize) % sizeof(u32));	\
++		iwl_trans_read_mem(trans, addr, buf,		\
++				   (bufsize) / sizeof(u32));	\
++	})
+ 
+ int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
+ 			    u64 src_addr, u32 byte_cnt);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 244ca8cab9d1a2..1a814eb6743e80 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -3032,13 +3032,18 @@ static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id)
+ 		/* cf. struct iwl_error_event_table */
+ 		u32 valid;
+ 		__le32 err_id;
+-	} err_info;
++	} err_info = {};
++	int ret;
+ 
+ 	if (!base)
+ 		return false;
+ 
+-	iwl_trans_read_mem_bytes(trans, base,
+-				 &err_info, sizeof(err_info));
++	ret = iwl_trans_read_mem_bytes(trans, base,
++				       &err_info, sizeof(err_info));
++
++	if (ret)
++		return true;
++
+ 	if (err_info.valid && err_id)
+ 		*err_id = le32_to_cpu(err_info.err_id);
+ 
+@@ -3635,22 +3640,31 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+ 	iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+ 
+ 	if (iwl_mvm_check_rt_status(mvm, NULL)) {
++		IWL_ERR(mvm,
++			"iwl_mvm_check_rt_status failed, device is gone during suspend\n");
+ 		set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ 		iwl_mvm_dump_nic_error_log(mvm);
+ 		iwl_dbg_tlv_time_point(&mvm->fwrt,
+ 				       IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
+ 		iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ 					false, 0);
+-		return -ENODEV;
++		mvm->trans->state = IWL_TRANS_NO_FW;
++		ret = -ENODEV;
++
++		goto out;
+ 	}
+ 	ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
++
++	if (ret) {
++		IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
++		mvm->trans->state = IWL_TRANS_NO_FW;
++	}
++
++out:
+ 	clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ 	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ 	mvm->fast_resume = false;
+ 
+-	if (ret)
+-		IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 3b9943eb69341e..d19b3bd0866bda 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1643,6 +1643,8 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ out:
+ 	if (*status == IWL_D3_STATUS_ALIVE)
+ 		ret = iwl_pcie_d3_handshake(trans, false);
++	else
++		trans->state = IWL_TRANS_NO_FW;
+ 
+ 	return ret;
+ }
+diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
+index 569125726b3e19..d7ba8795d60f81 100644
+--- a/drivers/pci/msi/irqdomain.c
++++ b/drivers/pci/msi/irqdomain.c
+@@ -350,8 +350,11 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
+ 
+ 	domain = dev_get_msi_domain(&pdev->dev);
+ 
+-	if (!domain || !irq_domain_is_hierarchy(domain))
+-		return mode == ALLOW_LEGACY;
++	if (!domain || !irq_domain_is_hierarchy(domain)) {
++		if (IS_ENABLED(CONFIG_PCI_MSI_ARCH_FALLBACKS))
++			return mode == ALLOW_LEGACY;
++		return false;
++	}
+ 
+ 	if (!irq_domain_is_msi_parent(domain)) {
+ 		/*
+diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
+index 3a45879d85db96..2f647cac4cae34 100644
+--- a/drivers/pci/msi/msi.c
++++ b/drivers/pci/msi/msi.c
+@@ -433,6 +433,10 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
+ 	if (WARN_ON_ONCE(dev->msi_enabled))
+ 		return -EINVAL;
+ 
++	/* Test for the availability of MSI support */
++	if (!pci_msi_domain_supports(dev, 0, ALLOW_LEGACY))
++		return -ENOTSUPP;
++
+ 	nvec = pci_msi_vec_count(dev);
+ 	if (nvec < 0)
+ 		return nvec;
+diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+index 950b7ae1d1a838..dc452610934add 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
++++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+@@ -325,6 +325,12 @@ static void usb_init_common_7216(struct brcm_usb_init_params *params)
+ 	void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ 
+ 	USB_CTRL_UNSET(ctrl, USB_PM, XHC_S2_CLK_SWITCH_EN);
++
++	/*
++	 * The PHY might be in a bad state if it is already powered
++	 * up. Toggle the power just in case.
++	 */
++	USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+ 	USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+ 
+ 	/* 1 millisecond - for USB clocks to settle down */
+diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
+index f053b525ccffab..413f76e2d1744d 100644
+--- a/drivers/phy/phy-core.c
++++ b/drivers/phy/phy-core.c
+@@ -145,8 +145,10 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
+ 			return phy_provider;
+ 
+ 		for_each_child_of_node(phy_provider->children, child)
+-			if (child == node)
++			if (child == node) {
++				of_node_put(child);
+ 				return phy_provider;
++			}
+ 	}
+ 
+ 	return ERR_PTR(-EPROBE_DEFER);
+@@ -629,8 +631,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
+ 		return ERR_PTR(-ENODEV);
+ 
+ 	/* This phy type handled by the usb-phy subsystem for now */
+-	if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
+-		return ERR_PTR(-ENODEV);
++	if (of_device_is_compatible(args.np, "usb-nop-xceiv")) {
++		phy = ERR_PTR(-ENODEV);
++		goto out_put_node;
++	}
+ 
+ 	mutex_lock(&phy_provider_mutex);
+ 	phy_provider = of_phy_provider_lookup(args.np);
+@@ -652,6 +656,7 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
+ 
+ out_unlock:
+ 	mutex_unlock(&phy_provider_mutex);
++out_put_node:
+ 	of_node_put(args.np);
+ 
+ 	return phy;
+@@ -737,7 +742,7 @@ void devm_phy_put(struct device *dev, struct phy *phy)
+ 	if (!phy)
+ 		return;
+ 
+-	r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
++	r = devres_release(dev, devm_phy_release, devm_phy_match, phy);
+ 	dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
+ }
+ EXPORT_SYMBOL_GPL(devm_phy_put);
+@@ -1121,7 +1126,7 @@ void devm_phy_destroy(struct device *dev, struct phy *phy)
+ {
+ 	int r;
+ 
+-	r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
++	r = devres_release(dev, devm_phy_consume, devm_phy_match, phy);
+ 	dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
+ }
+ EXPORT_SYMBOL_GPL(devm_phy_destroy);
+@@ -1259,12 +1264,12 @@ EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
+  * of_phy_provider_unregister to unregister the phy provider.
+  */
+ void devm_of_phy_provider_unregister(struct device *dev,
+-	struct phy_provider *phy_provider)
++				     struct phy_provider *phy_provider)
+ {
+ 	int r;
+ 
+-	r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
+-		phy_provider);
++	r = devres_release(dev, devm_phy_provider_release, devm_phy_match,
++			   phy_provider);
+ 	dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
+ }
+ EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+index 1246d3bc8b92f8..8e2cd2c178d6b2 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+@@ -1008,7 +1008,7 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_rx_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x0a),
++	QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+index 0a9989e41237f1..2eb3329ca23f67 100644
+--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
++++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+@@ -309,7 +309,7 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
+ 
+ 	priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
+ 
+-	priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
++	priv->phy_rst = devm_reset_control_get(dev, "phy");
+ 	if (IS_ERR(priv->phy_rst))
+ 		return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
+ 
+diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+index 9f084697dd05ce..69c3ec0938f74f 100644
+--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
++++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+@@ -1116,6 +1116,8 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, PTR_ERR(hdptx->grf),
+ 				     "Could not get GRF syscon\n");
+ 
++	platform_set_drvdata(pdev, hdptx);
++
+ 	ret = devm_pm_runtime_enable(dev);
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+@@ -1125,7 +1127,6 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, PTR_ERR(hdptx->phy),
+ 				     "Failed to create HDMI PHY\n");
+ 
+-	platform_set_drvdata(pdev, hdptx);
+ 	phy_set_drvdata(hdptx->phy, hdptx);
+ 	phy_set_bus_width(hdptx->phy, 8);
+ 
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index c784119ab5dc0c..626e2635e3da70 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -707,7 +707,7 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
+ 		/* Framework Laptop (12th Gen Intel Core) */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "12th Gen Intel Core"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Laptop (12th Gen Intel Core)"),
+ 		},
+ 		.driver_data = (void *)&framework_laptop_mec_lpc_driver_data,
+ 	},
+@@ -715,7 +715,7 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
+ 		/* Framework Laptop (13th Gen Intel Core) */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "13th Gen Intel Core"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Laptop (13th Gen Intel Core)"),
+ 		},
+ 		.driver_data = (void *)&framework_laptop_mec_lpc_driver_data,
+ 	},
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index ef04d396f61c77..a5933980ade3d6 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -623,6 +623,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ 	{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
+ 	{ KE_IGNORE, 0xC6, },  /* Ambient Light Sensor notification */
++	{ KE_IGNORE, 0xCF, },	/* AC mode */
+ 	{ KE_KEY, 0xFA, { KEY_PROG2 } },           /* Lid flip action */
+ 	{ KE_KEY, 0xBD, { KEY_PROG2 } },           /* Lid flip action on ROG xflow laptops */
+ 	{ KE_END, 0},
+diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
+index 2b393eb5c2820e..c47f32f152e602 100644
+--- a/drivers/power/supply/bq24190_charger.c
++++ b/drivers/power/supply/bq24190_charger.c
+@@ -567,6 +567,7 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
+ 
+ static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
+ {
++	union power_supply_propval val = { .intval = bdi->charge_type };
+ 	int ret;
+ 
+ 	ret = pm_runtime_resume_and_get(bdi->dev);
+@@ -587,13 +588,18 @@ static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
+ 
+ 		ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ 					 BQ24296_REG_POC_OTG_CONFIG_MASK,
+-					 BQ24296_REG_POC_CHG_CONFIG_SHIFT,
++					 BQ24296_REG_POC_OTG_CONFIG_SHIFT,
+ 					 BQ24296_REG_POC_OTG_CONFIG_OTG);
+-	} else
++	} else {
+ 		ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ 					 BQ24296_REG_POC_OTG_CONFIG_MASK,
+-					 BQ24296_REG_POC_CHG_CONFIG_SHIFT,
++					 BQ24296_REG_POC_OTG_CONFIG_SHIFT,
+ 					 BQ24296_REG_POC_OTG_CONFIG_DISABLE);
++		if (ret < 0)
++			goto out;
++
++		ret = bq24190_charger_set_charge_type(bdi, &val);
++	}
+ 
+ out:
+ 	pm_runtime_mark_last_busy(bdi->dev);
+diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
+index 17c53591ce197d..9b0a7500296b4d 100644
+--- a/drivers/power/supply/cros_charge-control.c
++++ b/drivers/power/supply/cros_charge-control.c
+@@ -7,8 +7,10 @@
+ #include <acpi/battery.h>
+ #include <linux/container_of.h>
+ #include <linux/dmi.h>
++#include <linux/lockdep.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/platform_data/cros_ec_commands.h>
+ #include <linux/platform_data/cros_ec_proto.h>
+ #include <linux/platform_device.h>
+@@ -49,6 +51,7 @@ struct cros_chctl_priv {
+ 	struct attribute *attributes[_CROS_CHCTL_ATTR_COUNT];
+ 	struct attribute_group group;
+ 
++	struct mutex lock; /* protects fields below and cros_ec */
+ 	enum power_supply_charge_behaviour current_behaviour;
+ 	u8 current_start_threshold, current_end_threshold;
+ };
+@@ -85,6 +88,8 @@ static int cros_chctl_configure_ec(struct cros_chctl_priv *priv)
+ {
+ 	struct ec_params_charge_control req = {};
+ 
++	lockdep_assert_held(&priv->lock);
++
+ 	req.cmd = EC_CHARGE_CONTROL_CMD_SET;
+ 
+ 	switch (priv->current_behaviour) {
+@@ -134,11 +139,15 @@ static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_
+ 		return -EINVAL;
+ 
+ 	if (is_end_threshold) {
+-		if (val <= priv->current_start_threshold)
++		/* Start threshold is not exposed, use fixed value */
++		if (priv->cmd_version == 2)
++			priv->current_start_threshold = val == 100 ? 0 : val;
++
++		if (val < priv->current_start_threshold)
+ 			return -EINVAL;
+ 		priv->current_end_threshold = val;
+ 	} else {
+-		if (val >= priv->current_end_threshold)
++		if (val > priv->current_end_threshold)
+ 			return -EINVAL;
+ 		priv->current_start_threshold = val;
+ 	}
+@@ -159,6 +168,7 @@ static ssize_t charge_control_start_threshold_show(struct device *dev,
+ 	struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ 							       CROS_CHCTL_ATTR_START_THRESHOLD);
+ 
++	guard(mutex)(&priv->lock);
+ 	return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_start_threshold);
+ }
+ 
+@@ -169,6 +179,7 @@ static ssize_t charge_control_start_threshold_store(struct device *dev,
+ 	struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ 							       CROS_CHCTL_ATTR_START_THRESHOLD);
+ 
++	guard(mutex)(&priv->lock);
+ 	return cros_chctl_store_threshold(dev, priv, 0, buf, count);
+ }
+ 
+@@ -178,6 +189,7 @@ static ssize_t charge_control_end_threshold_show(struct device *dev, struct devi
+ 	struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ 							       CROS_CHCTL_ATTR_END_THRESHOLD);
+ 
++	guard(mutex)(&priv->lock);
+ 	return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_end_threshold);
+ }
+ 
+@@ -187,6 +199,7 @@ static ssize_t charge_control_end_threshold_store(struct device *dev, struct dev
+ 	struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ 							       CROS_CHCTL_ATTR_END_THRESHOLD);
+ 
++	guard(mutex)(&priv->lock);
+ 	return cros_chctl_store_threshold(dev, priv, 1, buf, count);
+ }
+ 
+@@ -195,6 +208,7 @@ static ssize_t charge_behaviour_show(struct device *dev, struct device_attribute
+ 	struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ 							       CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
+ 
++	guard(mutex)(&priv->lock);
+ 	return power_supply_charge_behaviour_show(dev, EC_CHARGE_CONTROL_BEHAVIOURS,
+ 						  priv->current_behaviour, buf);
+ }
+@@ -210,6 +224,7 @@ static ssize_t charge_behaviour_store(struct device *dev, struct device_attribut
+ 	if (ret < 0)
+ 		return ret;
+ 
++	guard(mutex)(&priv->lock);
+ 	priv->current_behaviour = ret;
+ 
+ 	ret = cros_chctl_configure_ec(priv);
+@@ -223,12 +238,10 @@ static umode_t cros_chtl_attr_is_visible(struct kobject *kobj, struct attribute
+ {
+ 	struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(attr, n);
+ 
+-	if (priv->cmd_version < 2) {
+-		if (n == CROS_CHCTL_ATTR_START_THRESHOLD)
+-			return 0;
+-		if (n == CROS_CHCTL_ATTR_END_THRESHOLD)
+-			return 0;
+-	}
++	if (n == CROS_CHCTL_ATTR_START_THRESHOLD && priv->cmd_version < 3)
++		return 0;
++	else if (n == CROS_CHCTL_ATTR_END_THRESHOLD && priv->cmd_version < 2)
++		return 0;
+ 
+ 	return attr->mode;
+ }
+@@ -290,6 +303,10 @@ static int cros_chctl_probe(struct platform_device *pdev)
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
++	ret = devm_mutex_init(dev, &priv->lock);
++	if (ret)
++		return ret;
++
+ 	ret = cros_ec_get_cmd_versions(cros_ec, EC_CMD_CHARGE_CONTROL);
+ 	if (ret < 0)
+ 		return ret;
+@@ -327,7 +344,8 @@ static int cros_chctl_probe(struct platform_device *pdev)
+ 	priv->current_end_threshold = 100;
+ 
+ 	/* Bring EC into well-known state */
+-	ret = cros_chctl_configure_ec(priv);
++	scoped_guard(mutex, &priv->lock)
++		ret = cros_chctl_configure_ec(priv);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
+index 68212b39785bea..6139f736ecbe4f 100644
+--- a/drivers/power/supply/gpio-charger.c
++++ b/drivers/power/supply/gpio-charger.c
+@@ -67,6 +67,14 @@ static int set_charge_current_limit(struct gpio_charger *gpio_charger, int val)
+ 		if (gpio_charger->current_limit_map[i].limit_ua <= val)
+ 			break;
+ 	}
++
++	/*
++	 * If a valid charge current limit isn't found, default to smallest
++	 * current limitation for safety reasons.
++	 */
++	if (i >= gpio_charger->current_limit_map_size)
++		i = gpio_charger->current_limit_map_size - 1;
++
+ 	mapping = gpio_charger->current_limit_map[i];
+ 
+ 	for (i = 0; i < ndescs; i++) {
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 8e75e2e279a40a..50f1dcb6d58460 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -8907,8 +8907,11 @@ megasas_aen_polling(struct work_struct *work)
+ 						   (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
+ 						   (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL),
+ 						   0);
+-			if (sdev1)
++			if (sdev1) {
++				mutex_unlock(&instance->reset_mutex);
+ 				megasas_remove_scsi_device(sdev1);
++				mutex_lock(&instance->reset_mutex);
++			}
+ 
+ 			event_type = SCAN_VD_CHANNEL;
+ 			break;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index 81bb408ce56d8f..1e715fd65a7d4b 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -134,8 +134,6 @@ extern atomic64_t event_counter;
+ 
+ #define MPI3MR_WATCHDOG_INTERVAL		1000 /* in milli seconds */
+ 
+-#define MPI3MR_DEFAULT_CFG_PAGE_SZ		1024 /* in bytes */
+-
+ #define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME	10
+ 
+ #define MPI3MR_SCMD_TIMEOUT    (60 * HZ)
+@@ -1133,9 +1131,6 @@ struct scmd_priv {
+  * @io_throttle_low: I/O size to stop throttle in 512b blocks
+  * @num_io_throttle_group: Maximum number of throttle groups
+  * @throttle_groups: Pointer to throttle group info structures
+- * @cfg_page: Default memory for configuration pages
+- * @cfg_page_dma: Configuration page DMA address
+- * @cfg_page_sz: Default configuration page memory size
+  * @sas_transport_enabled: SAS transport enabled or not
+  * @scsi_device_channel: Channel ID for SCSI devices
+  * @transport_cmds: Command tracker for SAS transport commands
+@@ -1332,10 +1327,6 @@ struct mpi3mr_ioc {
+ 	u16 num_io_throttle_group;
+ 	struct mpi3mr_throttle_group_info *throttle_groups;
+ 
+-	void *cfg_page;
+-	dma_addr_t cfg_page_dma;
+-	u16 cfg_page_sz;
+-
+ 	u8 sas_transport_enabled;
+ 	u8 scsi_device_channel;
+ 	struct mpi3mr_drv_cmd transport_cmds;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 01f035f9330e4b..10b8e4dc64f8b0 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -2329,6 +2329,15 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 	if (!mrioc)
+ 		return -ENODEV;
+ 
++	if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
++		return -ERESTARTSYS;
++
++	if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
++		dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
++		mutex_unlock(&mrioc->bsg_cmds.mutex);
++		return -EAGAIN;
++	}
++
+ 	if (!mrioc->ioctl_sges_allocated) {
+ 		dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
+ 			       __func__);
+@@ -2339,13 +2348,16 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 		karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
+ 
+ 	mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
+-	if (!mpi_req)
++	if (!mpi_req) {
++		mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 		return -ENOMEM;
++	}
+ 	mpi_header = (struct mpi3_request_header *)mpi_req;
+ 
+ 	bufcnt = karg->buf_entry_list.num_of_entries;
+ 	drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
+ 	if (!drv_bufs) {
++		mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 		rval = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -2353,6 +2365,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 	dout_buf = kzalloc(job->request_payload.payload_len,
+ 				      GFP_KERNEL);
+ 	if (!dout_buf) {
++		mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 		rval = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -2360,6 +2373,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 	din_buf = kzalloc(job->reply_payload.payload_len,
+ 				     GFP_KERNEL);
+ 	if (!din_buf) {
++		mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 		rval = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -2435,6 +2449,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 					(mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
+ 				dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
+ 					__func__);
++				mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 				rval = -EINVAL;
+ 				goto out;
+ 			}
+@@ -2447,6 +2462,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 		if (invalid_be) {
+ 			dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
+ 				__func__);
++			mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 			rval = -EINVAL;
+ 			goto out;
+ 		}
+@@ -2454,12 +2470,14 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 		if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
+ 			dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
+ 				       __func__);
++			mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 			rval = -EINVAL;
+ 			goto out;
+ 		}
+ 		if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
+ 			dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
+ 				       __func__);
++			mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 			rval = -EINVAL;
+ 			goto out;
+ 		}
+@@ -2472,6 +2490,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 		dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n",
+ 			       __func__, __LINE__, mpi_header->function, din_size,
+ 			       dout_size);
++		mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 		rval = -EINVAL;
+ 		goto out;
+ 	}
+@@ -2480,6 +2499,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 		dprint_bsg_err(mrioc,
+ 		    "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
+ 		    __func__, __LINE__, mpi_header->function, din_size);
++		mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 		rval = -EINVAL;
+ 		goto out;
+ 	}
+@@ -2487,6 +2507,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 		dprint_bsg_err(mrioc,
+ 		    "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
+ 		    __func__, __LINE__, mpi_header->function, dout_size);
++		mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 		rval = -EINVAL;
+ 		goto out;
+ 	}
+@@ -2497,6 +2518,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 			dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n",
+ 				       __func__, __LINE__, din_cnt, dout_cnt, din_size,
+ 			    dout_size);
++			mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 			rval = -EINVAL;
+ 			goto out;
+ 		}
+@@ -2544,6 +2566,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 				continue;
+ 			if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) {
+ 				rval = -ENOMEM;
++				mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 				dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n",
+ 					       __func__, __LINE__);
+ 			goto out;
+@@ -2556,20 +2579,11 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
+ 		sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
+ 		if (!sense_buff_k) {
+ 			rval = -ENOMEM;
++			mutex_unlock(&mrioc->bsg_cmds.mutex);
+ 			goto out;
+ 		}
+ 	}
+ 
+-	if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) {
+-		rval = -ERESTARTSYS;
+-		goto out;
+-	}
+-	if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
+-		rval = -EAGAIN;
+-		dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+-		mutex_unlock(&mrioc->bsg_cmds.mutex);
+-		goto out;
+-	}
+ 	if (mrioc->unrecoverable) {
+ 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
+ 		    __func__);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index f1ab76351bd81e..5ed31fe57474a3 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -1035,6 +1035,36 @@ static const char *mpi3mr_reset_type_name(u16 reset_type)
+ 	return name;
+ }
+ 
++/**
++ * mpi3mr_is_fault_recoverable - Read fault code and decide
++ * whether the controller can be recoverable
++ * @mrioc: Adapter instance reference
++ * Return: true if fault is recoverable, false otherwise.
++ */
++static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
++{
++	u32 fault;
++
++	fault = (readl(&mrioc->sysif_regs->fault) &
++		      MPI3_SYSIF_FAULT_CODE_MASK);
++
++	switch (fault) {
++	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
++	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
++		ioc_warn(mrioc,
++		    "controller requires system power cycle, marking controller as unrecoverable\n");
++		return false;
++	case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
++		ioc_warn(mrioc,
++		    "controller faulted due to insufficient power,\n"
++		    " try by connecting it to a different slot\n");
++		return false;
++	default:
++		break;
++	}
++	return true;
++}
++
+ /**
+  * mpi3mr_print_fault_info - Display fault information
+  * @mrioc: Adapter instance reference
+@@ -1373,6 +1403,11 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+ 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
+ 	    ioc_status, ioc_config, base_info);
+ 
++	if (!mpi3mr_is_fault_recoverable(mrioc)) {
++		mrioc->unrecoverable = 1;
++		goto out_device_not_present;
++	}
++
+ 	/*The timeout value is in 2sec unit, changing it to seconds*/
+ 	mrioc->ready_timeout =
+ 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
+@@ -2734,6 +2769,11 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
+ 	mpi3mr_print_fault_info(mrioc);
+ 	mrioc->diagsave_timeout = 0;
+ 
++	if (!mpi3mr_is_fault_recoverable(mrioc)) {
++		mrioc->unrecoverable = 1;
++		goto schedule_work;
++	}
++
+ 	switch (trigger_data.fault) {
+ 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
+ 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
+@@ -4186,17 +4226,6 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
+ 	mpi3mr_read_tsu_interval(mrioc);
+ 	mpi3mr_print_ioc_info(mrioc);
+ 
+-	if (!mrioc->cfg_page) {
+-		dprint_init(mrioc, "allocating config page buffers\n");
+-		mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+-		mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+-		    mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
+-		if (!mrioc->cfg_page) {
+-			retval = -1;
+-			goto out_failed_noretry;
+-		}
+-	}
+-
+ 	dprint_init(mrioc, "allocating host diag buffers\n");
+ 	mpi3mr_alloc_diag_bufs(mrioc);
+ 
+@@ -4768,11 +4797,7 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+ 		    mrioc->admin_req_base, mrioc->admin_req_dma);
+ 		mrioc->admin_req_base = NULL;
+ 	}
+-	if (mrioc->cfg_page) {
+-		dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
+-		    mrioc->cfg_page, mrioc->cfg_page_dma);
+-		mrioc->cfg_page = NULL;
+-	}
++
+ 	if (mrioc->pel_seqnum_virt) {
+ 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
+ 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
+@@ -5392,55 +5417,6 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ 	return retval;
+ }
+ 
+-
+-/**
+- * mpi3mr_free_config_dma_memory - free memory for config page
+- * @mrioc: Adapter instance reference
+- * @mem_desc: memory descriptor structure
+- *
+- * Check whether the size of the buffer specified by the memory
+- * descriptor is greater than the default page size if so then
+- * free the memory pointed by the descriptor.
+- *
+- * Return: Nothing.
+- */
+-static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
+-	struct dma_memory_desc *mem_desc)
+-{
+-	if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
+-		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
+-		    mem_desc->addr, mem_desc->dma_addr);
+-		mem_desc->addr = NULL;
+-	}
+-}
+-
+-/**
+- * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
+- * @mrioc: Adapter instance reference
+- * @mem_desc: Memory descriptor to hold dma memory info
+- *
+- * This function allocates new dmaable memory or provides the
+- * default config page dmaable memory based on the memory size
+- * described by the descriptor.
+- *
+- * Return: 0 on success, non-zero on failure.
+- */
+-static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
+-	struct dma_memory_desc *mem_desc)
+-{
+-	if (mem_desc->size > mrioc->cfg_page_sz) {
+-		mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+-		    mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
+-		if (!mem_desc->addr)
+-			return -ENOMEM;
+-	} else {
+-		mem_desc->addr = mrioc->cfg_page;
+-		mem_desc->dma_addr = mrioc->cfg_page_dma;
+-		memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
+-	}
+-	return 0;
+-}
+-
+ /**
+  * mpi3mr_post_cfg_req - Issue config requests and wait
+  * @mrioc: Adapter instance reference
+@@ -5596,8 +5572,12 @@ static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
+ 		cfg_req->page_length = cfg_hdr->page_length;
+ 		cfg_req->page_version = cfg_hdr->page_version;
+ 	}
+-	if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
+-		goto out;
++
++	mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
++		mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
++
++	if (!mem_desc.addr)
++		return retval;
+ 
+ 	mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
+ 	    mem_desc.dma_addr);
+@@ -5626,7 +5606,12 @@ static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
+ 	}
+ 
+ out:
+-	mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
++	if (mem_desc.addr) {
++		dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
++			mem_desc.addr, mem_desc.dma_addr);
++		mem_desc.addr = NULL;
++	}
++
+ 	return retval;
+ }
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 5f2f67acf8bf31..1bef88130d0c06 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -5215,7 +5215,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	}
+ 
+ 	mrioc = shost_priv(shost);
+-	retval = ida_alloc_range(&mrioc_ida, 1, U8_MAX, GFP_KERNEL);
++	retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL);
+ 	if (retval < 0)
+ 		goto id_alloc_failed;
+ 	mrioc->id = (u8)retval;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index ed5046593fdab6..16ac2267c71e19 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -7041,11 +7041,12 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 	int i;
+ 	u8 failed;
+ 	__le32 *mfp;
++	int ret_val;
+ 
+ 	/* make sure doorbell is not in use */
+ 	if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ 		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
+-		return -EFAULT;
++		goto doorbell_diag_reset;
+ 	}
+ 
+ 	/* clear pending doorbell interrupts from previous state changes */
+@@ -7135,6 +7136,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 			    le32_to_cpu(mfp[i]));
+ 	}
+ 	return 0;
++
++doorbell_diag_reset:
++	ret_val = _base_diag_reset(ioc);
++	return ret_val;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
+index d309e2ca14deb3..dea2290b37d4d7 100644
+--- a/drivers/scsi/qla1280.h
++++ b/drivers/scsi/qla1280.h
+@@ -116,12 +116,12 @@ struct device_reg {
+ 	uint16_t id_h;		/* ID high */
+ 	uint16_t cfg_0;		/* Configuration 0 */
+ #define ISP_CFG0_HWMSK   0x000f	/* Hardware revision mask */
+-#define ISP_CFG0_1020    BIT_0	/* ISP1020 */
+-#define ISP_CFG0_1020A	 BIT_1	/* ISP1020A */
+-#define ISP_CFG0_1040	 BIT_2	/* ISP1040 */
+-#define ISP_CFG0_1040A	 BIT_3	/* ISP1040A */
+-#define ISP_CFG0_1040B	 BIT_4	/* ISP1040B */
+-#define ISP_CFG0_1040C	 BIT_5	/* ISP1040C */
++#define ISP_CFG0_1020	 1	/* ISP1020 */
++#define ISP_CFG0_1020A	 2	/* ISP1020A */
++#define ISP_CFG0_1040	 3	/* ISP1040 */
++#define ISP_CFG0_1040A	 4	/* ISP1040A */
++#define ISP_CFG0_1040B	 5	/* ISP1040B */
++#define ISP_CFG0_1040C	 6	/* ISP1040C */
+ 	uint16_t cfg_1;		/* Configuration 1 */
+ #define ISP_CFG1_F128    BIT_6  /* 128-byte FIFO threshold */
+ #define ISP_CFG1_F64     BIT_4|BIT_5 /* 128-byte FIFO threshold */
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 7ceb982040a5df..d0b55c1fa908a5 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -149,6 +149,8 @@ struct hv_fc_wwn_packet {
+ */
+ static int vmstor_proto_version;
+ 
++static bool hv_dev_is_fc(struct hv_device *hv_dev);
++
+ #define STORVSC_LOGGING_NONE	0
+ #define STORVSC_LOGGING_ERROR	1
+ #define STORVSC_LOGGING_WARN	2
+@@ -1138,6 +1140,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
+ 	 * not correctly handle:
+ 	 * INQUIRY command with page code parameter set to 0x80
+ 	 * MODE_SENSE command with cmd[2] == 0x1c
++	 * MAINTENANCE_IN is not supported by HyperV FC passthrough
+ 	 *
+ 	 * Setup srb and scsi status so this won't be fatal.
+ 	 * We do this so we can distinguish truly fatal failues
+@@ -1145,7 +1148,9 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
+ 	 */
+ 
+ 	if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
+-	   (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
++	   (stor_pkt->vm_srb.cdb[0] == MODE_SENSE) ||
++	   (stor_pkt->vm_srb.cdb[0] == MAINTENANCE_IN &&
++	   hv_dev_is_fc(device))) {
+ 		vstor_packet->vm_srb.scsi_status = 0;
+ 		vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
+ 	}
+diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
+index 4337ca51d7aa21..5c0dec90eec1df 100644
+--- a/drivers/spi/spi-intel-pci.c
++++ b/drivers/spi/spi-intel-pci.c
+@@ -86,6 +86,8 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info },
++	{ PCI_VDEVICE(INTEL, 0xe323), (unsigned long)&cnl_info },
++	{ PCI_VDEVICE(INTEL, 0xe423), (unsigned long)&cnl_info },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 2c043817c66a88..4a2f84c4d22e5f 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -1561,10 +1561,10 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+-	if (mcspi->ref_clk)
+-		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
+-	else
++	if (IS_ERR(mcspi->ref_clk))
+ 		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
++	else
++		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
+ 	ctlr->max_speed_hz = mcspi->ref_clk_hz;
+ 	ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
+ 
+diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c
+index d7db6c824e13de..224e7dde9cdee8 100644
+--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
++++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
+@@ -124,10 +124,8 @@ static void *alloc_quote_buf(void)
+ 	if (!addr)
+ 		return NULL;
+ 
+-	if (set_memory_decrypted((unsigned long)addr, count)) {
+-		free_pages_exact(addr, len);
++	if (set_memory_decrypted((unsigned long)addr, count))
+ 		return NULL;
+-	}
+ 
+ 	return addr;
+ }
+diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
+index 94c96bcfefe347..0b59c669c26d35 100644
+--- a/drivers/watchdog/Kconfig
++++ b/drivers/watchdog/Kconfig
+@@ -549,6 +549,7 @@ config S3C2410_WATCHDOG
+ 	tristate "S3C6410/S5Pv210/Exynos Watchdog"
+ 	depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ 	select WATCHDOG_CORE
++	select MFD_SYSCON if ARCH_EXYNOS
+ 	help
+ 	  Watchdog timer block in the Samsung S3C64xx, S5Pv210 and Exynos
+ 	  SoCs. This will reboot the system when the timer expires with
+diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
+index 3e8c15138eddad..1a5a0a2c3f2e37 100644
+--- a/drivers/watchdog/it87_wdt.c
++++ b/drivers/watchdog/it87_wdt.c
+@@ -20,6 +20,8 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/bits.h>
++#include <linux/dmi.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -40,6 +42,7 @@
+ #define VAL		0x2f
+ 
+ /* Logical device Numbers LDN */
++#define EC		0x04
+ #define GPIO		0x07
+ 
+ /* Configuration Registers and Functions */
+@@ -73,6 +76,12 @@
+ #define IT8784_ID	0x8784
+ #define IT8786_ID	0x8786
+ 
++/* Environment Controller Configuration Registers LDN=0x04 */
++#define SCR1		0xfa
++
++/* Environment Controller Bits SCR1 */
++#define WDT_PWRGD	0x20
++
+ /* GPIO Configuration Registers LDN=0x07 */
+ #define WDTCTRL		0x71
+ #define WDTCFG		0x72
+@@ -240,6 +249,21 @@ static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
+ 	return ret;
+ }
+ 
++enum {
++	IT87_WDT_OUTPUT_THROUGH_PWRGD	= BIT(0),
++};
++
++static const struct dmi_system_id it87_quirks[] = {
++	{
++		/* Qotom Q30900P (IT8786) */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_BOARD_NAME, "QCML04"),
++		},
++		.driver_data = (void *)IT87_WDT_OUTPUT_THROUGH_PWRGD,
++	},
++	{}
++};
++
+ static const struct watchdog_info ident = {
+ 	.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ 	.firmware_version = 1,
+@@ -261,8 +285,10 @@ static struct watchdog_device wdt_dev = {
+ 
+ static int __init it87_wdt_init(void)
+ {
++	const struct dmi_system_id *dmi_id;
+ 	u8  chip_rev;
+ 	u8 ctrl;
++	int quirks = 0;
+ 	int rc;
+ 
+ 	rc = superio_enter();
+@@ -273,6 +299,10 @@ static int __init it87_wdt_init(void)
+ 	chip_rev  = superio_inb(CHIPREV) & 0x0f;
+ 	superio_exit();
+ 
++	dmi_id = dmi_first_match(it87_quirks);
++	if (dmi_id)
++		quirks = (long)dmi_id->driver_data;
++
+ 	switch (chip_type) {
+ 	case IT8702_ID:
+ 		max_units = 255;
+@@ -333,6 +363,15 @@ static int __init it87_wdt_init(void)
+ 		superio_outb(0x00, WDTCTRL);
+ 	}
+ 
++	if (quirks & IT87_WDT_OUTPUT_THROUGH_PWRGD) {
++		superio_select(EC);
++		ctrl = superio_inb(SCR1);
++		if (!(ctrl & WDT_PWRGD)) {
++			ctrl |= WDT_PWRGD;
++			superio_outb(ctrl, SCR1);
++		}
++	}
++
+ 	superio_exit();
+ 
+ 	if (timeout < 1 || timeout > max_units * 60) {
+diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
+index e2d7a57d6ea2e7..91d110646e16f7 100644
+--- a/drivers/watchdog/mtk_wdt.c
++++ b/drivers/watchdog/mtk_wdt.c
+@@ -10,6 +10,7 @@
+  */
+ 
+ #include <dt-bindings/reset/mt2712-resets.h>
++#include <dt-bindings/reset/mediatek,mt6735-wdt.h>
+ #include <dt-bindings/reset/mediatek,mt6795-resets.h>
+ #include <dt-bindings/reset/mt7986-resets.h>
+ #include <dt-bindings/reset/mt8183-resets.h>
+@@ -87,6 +88,10 @@ static const struct mtk_wdt_data mt2712_data = {
+ 	.toprgu_sw_rst_num = MT2712_TOPRGU_SW_RST_NUM,
+ };
+ 
++static const struct mtk_wdt_data mt6735_data = {
++	.toprgu_sw_rst_num = MT6735_TOPRGU_RST_NUM,
++};
++
+ static const struct mtk_wdt_data mt6795_data = {
+ 	.toprgu_sw_rst_num = MT6795_TOPRGU_SW_RST_NUM,
+ };
+@@ -489,6 +494,7 @@ static int mtk_wdt_resume(struct device *dev)
+ static const struct of_device_id mtk_wdt_dt_ids[] = {
+ 	{ .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
+ 	{ .compatible = "mediatek,mt6589-wdt" },
++	{ .compatible = "mediatek,mt6735-wdt", .data = &mt6735_data },
+ 	{ .compatible = "mediatek,mt6795-wdt", .data = &mt6795_data },
+ 	{ .compatible = "mediatek,mt7986-wdt", .data = &mt7986_data },
+ 	{ .compatible = "mediatek,mt7988-wdt", .data = &mt7988_data },
+diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
+index 2a35f890a2883a..11bbe48160ec9c 100644
+--- a/drivers/watchdog/rzg2l_wdt.c
++++ b/drivers/watchdog/rzg2l_wdt.c
+@@ -12,6 +12,7 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_domain.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/reset.h>
+ #include <linux/units.h>
+@@ -166,8 +167,22 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+ 	struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ 	int ret;
+ 
+-	clk_prepare_enable(priv->pclk);
+-	clk_prepare_enable(priv->osc_clk);
++	/*
++	 * In case of RZ/G3S the watchdog device may be part of an IRQ safe power
++	 * domain that is currently powered off. In this case we need to power
++	 * it on before accessing registers. Along with this the clocks will be
++	 * enabled. We don't undo the pm_runtime_resume_and_get() as the device
++	 * need to be on for the reboot to happen.
++	 *
++	 * For the rest of SoCs not registering a watchdog IRQ safe power
++	 * domain it is safe to call pm_runtime_resume_and_get() as the
++	 * irq_safe_dev_in_sleep_domain() call in genpd_runtime_resume()
++	 * returns non zero value and the genpd_lock() is avoided, thus, there
++	 * will be no invalid wait context reported by lockdep.
++	 */
++	ret = pm_runtime_resume_and_get(wdev->parent);
++	if (ret)
++		return ret;
+ 
+ 	if (priv->devtype == WDT_RZG2L) {
+ 		ret = reset_control_deassert(priv->rstc);
+@@ -275,6 +290,7 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
+ 
+ 	priv->devtype = (uintptr_t)of_device_get_match_data(dev);
+ 
++	pm_runtime_irq_safe(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
+ 
+ 	priv->wdev.info = &rzg2l_wdt_ident;
+diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
+index 686cf544d0ae7a..349d30462c8c0c 100644
+--- a/drivers/watchdog/s3c2410_wdt.c
++++ b/drivers/watchdog/s3c2410_wdt.c
+@@ -24,9 +24,9 @@
+ #include <linux/slab.h>
+ #include <linux/err.h>
+ #include <linux/of.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/regmap.h>
+ #include <linux/delay.h>
+-#include <linux/soc/samsung/exynos-pmu.h>
+ 
+ #define S3C2410_WTCON		0x00
+ #define S3C2410_WTDAT		0x04
+@@ -699,11 +699,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
+-		wdt->pmureg = exynos_get_pmu_regmap_by_phandle(dev->of_node,
+-						 "samsung,syscon-phandle");
++		wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
++						"samsung,syscon-phandle");
+ 		if (IS_ERR(wdt->pmureg))
+ 			return dev_err_probe(dev, PTR_ERR(wdt->pmureg),
+-					     "PMU regmap lookup failed.\n");
++					     "syscon regmap lookup failed.\n");
+ 	}
+ 
+ 	wdt_irq = platform_get_irq(pdev, 0);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 9c05cab473f577..29c16459740112 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -654,6 +654,8 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
+ 			goto error_unlock_cow;
+ 		}
+ 	}
++
++	trace_btrfs_cow_block(root, buf, cow);
+ 	if (unlock_orig)
+ 		btrfs_tree_unlock(buf);
+ 	free_extent_buffer_stale(buf);
+@@ -710,7 +712,6 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	u64 search_start;
+-	int ret;
+ 
+ 	if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
+ 		btrfs_abort_transaction(trans, -EUCLEAN);
+@@ -751,12 +752,8 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 	 * Also We don't care about the error, as it's handled internally.
+ 	 */
+ 	btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
+-	ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
+-				    cow_ret, search_start, 0, nest);
+-
+-	trace_btrfs_cow_block(root, buf, *cow_ret);
+-
+-	return ret;
++	return btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
++				     cow_ret, search_start, 0, nest);
+ }
+ ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 58ffe78132d9d6..4b3e256e0d0b88 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7117,6 +7117,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+ 			ret = -EAGAIN;
+ 			goto out;
+ 		}
++
++		cond_resched();
+ 	}
+ 
+ 	if (file_extent)
+@@ -9780,15 +9782,25 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ 	struct extent_state *cached_state = NULL;
+-	struct extent_map *em = NULL;
+ 	struct btrfs_chunk_map *map = NULL;
+ 	struct btrfs_device *device = NULL;
+ 	struct btrfs_swap_info bsi = {
+ 		.lowest_ppage = (sector_t)-1ULL,
+ 	};
++	struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
++	struct btrfs_path *path = NULL;
+ 	int ret = 0;
+ 	u64 isize;
+-	u64 start;
++	u64 prev_extent_end = 0;
++
++	/*
++	 * Acquire the inode's mmap lock to prevent races with memory mapped
++	 * writes, as they could happen after we flush delalloc below and before
++	 * we lock the extent range further below. The inode was already locked
++	 * up in the call chain.
++	 */
++	btrfs_assert_inode_locked(BTRFS_I(inode));
++	down_write(&BTRFS_I(inode)->i_mmap_lock);
+ 
+ 	/*
+ 	 * If the swap file was just created, make sure delalloc is done. If the
+@@ -9797,22 +9809,32 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	 */
+ 	ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
+ 	if (ret)
+-		return ret;
++		goto out_unlock_mmap;
+ 
+ 	/*
+ 	 * The inode is locked, so these flags won't change after we check them.
+ 	 */
+ 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
+ 		btrfs_warn(fs_info, "swapfile must not be compressed");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out_unlock_mmap;
+ 	}
+ 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
+ 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out_unlock_mmap;
+ 	}
+ 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ 		btrfs_warn(fs_info, "swapfile must not be checksummed");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out_unlock_mmap;
++	}
++
++	path = btrfs_alloc_path();
++	backref_ctx = btrfs_alloc_backref_share_check_ctx();
++	if (!path || !backref_ctx) {
++		ret = -ENOMEM;
++		goto out_unlock_mmap;
+ 	}
+ 
+ 	/*
+@@ -9827,7 +9849,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
+ 		btrfs_warn(fs_info,
+ 	   "cannot activate swapfile while exclusive operation is running");
+-		return -EBUSY;
++		ret = -EBUSY;
++		goto out_unlock_mmap;
+ 	}
+ 
+ 	/*
+@@ -9841,7 +9864,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 		btrfs_exclop_finish(fs_info);
+ 		btrfs_warn(fs_info,
+ 	   "cannot activate swapfile because snapshot creation is in progress");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out_unlock_mmap;
+ 	}
+ 	/*
+ 	 * Snapshots can create extents which require COW even if NODATACOW is
+@@ -9862,7 +9886,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 		btrfs_warn(fs_info,
+ 		"cannot activate swapfile because subvolume %llu is being deleted",
+ 			btrfs_root_id(root));
+-		return -EPERM;
++		ret = -EPERM;
++		goto out_unlock_mmap;
+ 	}
+ 	atomic_inc(&root->nr_swapfiles);
+ 	spin_unlock(&root->root_item_lock);
+@@ -9870,24 +9895,39 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
+ 
+ 	lock_extent(io_tree, 0, isize - 1, &cached_state);
+-	start = 0;
+-	while (start < isize) {
+-		u64 logical_block_start, physical_block_start;
++	while (prev_extent_end < isize) {
++		struct btrfs_key key;
++		struct extent_buffer *leaf;
++		struct btrfs_file_extent_item *ei;
+ 		struct btrfs_block_group *bg;
+-		u64 len = isize - start;
++		u64 logical_block_start;
++		u64 physical_block_start;
++		u64 extent_gen;
++		u64 disk_bytenr;
++		u64 len;
+ 
+-		em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
+-		if (IS_ERR(em)) {
+-			ret = PTR_ERR(em);
++		key.objectid = btrfs_ino(BTRFS_I(inode));
++		key.type = BTRFS_EXTENT_DATA_KEY;
++		key.offset = prev_extent_end;
++
++		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
++		if (ret < 0)
+ 			goto out;
+-		}
+ 
+-		if (em->disk_bytenr == EXTENT_MAP_HOLE) {
++		/*
++		 * If key not found it means we have an implicit hole (NO_HOLES
++		 * is enabled).
++		 */
++		if (ret > 0) {
+ 			btrfs_warn(fs_info, "swapfile must not have holes");
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+-		if (em->disk_bytenr == EXTENT_MAP_INLINE) {
++
++		leaf = path->nodes[0];
++		ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
++
++		if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
+ 			/*
+ 			 * It's unlikely we'll ever actually find ourselves
+ 			 * here, as a file small enough to fit inline won't be
+@@ -9899,23 +9939,45 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+-		if (extent_map_is_compressed(em)) {
++
++		if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
+ 			btrfs_warn(fs_info, "swapfile must not be compressed");
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+-		logical_block_start = extent_map_block_start(em) + (start - em->start);
+-		len = min(len, em->len - (start - em->start));
+-		free_extent_map(em);
+-		em = NULL;
++		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
++		if (disk_bytenr == 0) {
++			btrfs_warn(fs_info, "swapfile must not have holes");
++			ret = -EINVAL;
++			goto out;
++		}
++
++		logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
++		extent_gen = btrfs_file_extent_generation(leaf, ei);
++		prev_extent_end = btrfs_file_extent_end(path);
++
++		if (prev_extent_end > isize)
++			len = isize - key.offset;
++		else
++			len = btrfs_file_extent_num_bytes(leaf, ei);
++
++		backref_ctx->curr_leaf_bytenr = leaf->start;
++
++		/*
++		 * Don't need the path anymore, release to avoid deadlocks when
++		 * calling btrfs_is_data_extent_shared() because when joining a
++		 * transaction it can block waiting for the current one's commit
++		 * which in turn may be trying to lock the same leaf to flush
++		 * delayed items for example.
++		 */
++		btrfs_release_path(path);
+ 
+-		ret = can_nocow_extent(inode, start, &len, NULL, false, true);
++		ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
++						  extent_gen, backref_ctx);
+ 		if (ret < 0) {
+ 			goto out;
+-		} else if (ret) {
+-			ret = 0;
+-		} else {
++		} else if (ret > 0) {
+ 			btrfs_warn(fs_info,
+ 				   "swapfile must not be copy-on-write");
+ 			ret = -EINVAL;
+@@ -9950,7 +10012,6 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 
+ 		physical_block_start = (map->stripes[0].physical +
+ 					(logical_block_start - map->start));
+-		len = min(len, map->chunk_len - (logical_block_start - map->start));
+ 		btrfs_free_chunk_map(map);
+ 		map = NULL;
+ 
+@@ -9991,20 +10052,16 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 				if (ret)
+ 					goto out;
+ 			}
+-			bsi.start = start;
++			bsi.start = key.offset;
+ 			bsi.block_start = physical_block_start;
+ 			bsi.block_len = len;
+ 		}
+-
+-		start += len;
+ 	}
+ 
+ 	if (bsi.block_len)
+ 		ret = btrfs_add_swap_extent(sis, &bsi);
+ 
+ out:
+-	if (!IS_ERR_OR_NULL(em))
+-		free_extent_map(em);
+ 	if (!IS_ERR_OR_NULL(map))
+ 		btrfs_free_chunk_map(map);
+ 
+@@ -10017,6 +10074,10 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 
+ 	btrfs_exclop_finish(fs_info);
+ 
++out_unlock_mmap:
++	up_write(&BTRFS_I(inode)->i_mmap_lock);
++	btrfs_free_backref_share_ctx(backref_ctx);
++	btrfs_free_path(path);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index a0e8deca87a7a6..e70ed857fc743b 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1122,6 +1122,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
+ 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
+ 	if (simple) {
+ 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
++		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
+ 		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
+ 	} else {
+ 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+@@ -1255,8 +1256,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
+ 	spin_lock(&fs_info->qgroup_lock);
+ 	fs_info->quota_root = quota_root;
+ 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+-	if (simple)
+-		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
+ 	spin_unlock(&fs_info->qgroup_lock);
+ 
+ 	/* Skip rescan for simple qgroups. */
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index f3834f8d26b456..adcbdc970f9ea4 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -2902,6 +2902,7 @@ static int relocate_one_folio(struct reloc_control *rc,
+ 	const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
+ 
+ 	ASSERT(index <= last_index);
++again:
+ 	folio = filemap_lock_folio(inode->i_mapping, index);
+ 	if (IS_ERR(folio)) {
+ 
+@@ -2937,6 +2938,11 @@ static int relocate_one_folio(struct reloc_control *rc,
+ 			ret = -EIO;
+ 			goto release_folio;
+ 		}
++		if (folio->mapping != inode->i_mapping) {
++			folio_unlock(folio);
++			folio_put(folio);
++			goto again;
++		}
+ 	}
+ 
+ 	/*
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 0cb11dcd10cd4b..b1015f383f75ef 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -5291,6 +5291,7 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
+ 		unsigned cur_len = min_t(unsigned, len,
+ 					 PAGE_SIZE - pg_offset);
+ 
++again:
+ 		folio = filemap_lock_folio(mapping, index);
+ 		if (IS_ERR(folio)) {
+ 			page_cache_sync_readahead(mapping,
+@@ -5323,6 +5324,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
+ 				ret = -EIO;
+ 				break;
+ 			}
++			if (folio->mapping != mapping) {
++				folio_unlock(folio);
++				folio_put(folio);
++				goto again;
++			}
+ 		}
+ 
+ 		memcpy_from_folio(sctx->send_buf + sctx->send_size, folio,
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index 03926ad467c919..5912d505776660 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -1118,7 +1118,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
+ {
+ 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ 
+-	return sysfs_emit(buf, "%u\n", fs_info->super_copy->nodesize);
++	return sysfs_emit(buf, "%u\n", fs_info->nodesize);
+ }
+ 
+ BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
+@@ -1128,7 +1128,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
+ {
+ 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ 
+-	return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
++	return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
+ }
+ 
+ BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
+@@ -1180,7 +1180,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
+ {
+ 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ 
+-	return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
++	return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
+ }
+ 
+ BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 67468d88f13908..851d70200c6b8f 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -1552,7 +1552,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 		}
+ 
+ 		op = &req->r_ops[0];
+-		if (sparse) {
++		if (!write && sparse) {
+ 			extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+ 			ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ 			if (ret) {
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 6d0455973d641e..49aede376d8668 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -40,24 +40,15 @@
+ #define	EXPKEY_HASHMAX		(1 << EXPKEY_HASHBITS)
+ #define	EXPKEY_HASHMASK		(EXPKEY_HASHMAX -1)
+ 
+-static void expkey_put_work(struct work_struct *work)
++static void expkey_put(struct kref *ref)
+ {
+-	struct svc_expkey *key =
+-		container_of(to_rcu_work(work), struct svc_expkey, ek_rcu_work);
++	struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
+ 
+ 	if (test_bit(CACHE_VALID, &key->h.flags) &&
+ 	    !test_bit(CACHE_NEGATIVE, &key->h.flags))
+ 		path_put(&key->ek_path);
+ 	auth_domain_put(key->ek_client);
+-	kfree(key);
+-}
+-
+-static void expkey_put(struct kref *ref)
+-{
+-	struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
+-
+-	INIT_RCU_WORK(&key->ek_rcu_work, expkey_put_work);
+-	queue_rcu_work(system_wq, &key->ek_rcu_work);
++	kfree_rcu(key, ek_rcu);
+ }
+ 
+ static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
+@@ -364,26 +355,16 @@ static void export_stats_destroy(struct export_stats *stats)
+ 					    EXP_STATS_COUNTERS_NUM);
+ }
+ 
+-static void svc_export_put_work(struct work_struct *work)
++static void svc_export_put(struct kref *ref)
+ {
+-	struct svc_export *exp =
+-		container_of(to_rcu_work(work), struct svc_export, ex_rcu_work);
+-
++	struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
+ 	path_put(&exp->ex_path);
+ 	auth_domain_put(exp->ex_client);
+ 	nfsd4_fslocs_free(&exp->ex_fslocs);
+ 	export_stats_destroy(exp->ex_stats);
+ 	kfree(exp->ex_stats);
+ 	kfree(exp->ex_uuid);
+-	kfree(exp);
+-}
+-
+-static void svc_export_put(struct kref *ref)
+-{
+-	struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
+-
+-	INIT_RCU_WORK(&exp->ex_rcu_work, svc_export_put_work);
+-	queue_rcu_work(system_wq, &exp->ex_rcu_work);
++	kfree_rcu(exp, ex_rcu);
+ }
+ 
+ static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
+diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
+index 081afb68681e14..3794ae253a7016 100644
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -75,7 +75,7 @@ struct svc_export {
+ 	u32			ex_layout_types;
+ 	struct nfsd4_deviceid_map *ex_devid_map;
+ 	struct cache_detail	*cd;
+-	struct rcu_work		ex_rcu_work;
++	struct rcu_head		ex_rcu;
+ 	unsigned long		ex_xprtsec_modes;
+ 	struct export_stats	*ex_stats;
+ };
+@@ -92,7 +92,7 @@ struct svc_expkey {
+ 	u32			ek_fsid[6];
+ 
+ 	struct path		ek_path;
+-	struct rcu_work		ek_rcu_work;
++	struct rcu_head		ek_rcu;
+ };
+ 
+ #define EX_ISSYNC(exp)		(!((exp)->ex_flags & NFSEXP_ASYNC))
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index b8cbb15560040f..de076365254978 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -1058,7 +1058,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ 		args.authflavor = clp->cl_cred.cr_flavor;
+ 		clp->cl_cb_ident = conn->cb_ident;
+ 	} else {
+-		if (!conn->cb_xprt)
++		if (!conn->cb_xprt || !ses)
+ 			return -EINVAL;
+ 		clp->cl_cb_session = ses;
+ 		args.bc_xprt = conn->cb_xprt;
+@@ -1461,8 +1461,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
+ 		ses = c->cn_session;
+ 	}
+ 	spin_unlock(&clp->cl_lock);
+-	if (!c)
+-		return;
+ 
+ 	err = setup_callback_client(clp, &conn, ses);
+ 	if (err) {
+diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig
+index 2aff6d1395ce39..9f05f94e265a6d 100644
+--- a/fs/smb/client/Kconfig
++++ b/fs/smb/client/Kconfig
+@@ -2,7 +2,6 @@
+ config CIFS
+ 	tristate "SMB3 and CIFS support (advanced network filesystem)"
+ 	depends on INET
+-	select NETFS_SUPPORT
+ 	select NLS
+ 	select NLS_UCS2_UTILS
+ 	select CRYPTO
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index d1bd69cbfe09a5..4750505465ae63 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -4855,6 +4855,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
+ 		if (written > wdata->subreq.len)
+ 			written &= 0xFFFF;
+ 
++		cifs_stats_bytes_written(tcon, written);
++
+ 		if (written < wdata->subreq.len)
+ 			wdata->result = -ENOSPC;
+ 		else
+@@ -5171,6 +5173,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
+ 		cifs_dbg(VFS, "Send error in write = %d\n", rc);
+ 	} else {
+ 		*nbytes = le32_to_cpu(rsp->DataLength);
++		cifs_stats_bytes_written(io_parms->tcon, *nbytes);
+ 		trace_smb3_write_done(0, 0, xid,
+ 				      req->PersistentFileId,
+ 				      io_parms->tcon->tid,
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index 75b4eb856d32f7..af8e24163bf261 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -18,8 +18,8 @@
+ #include "mgmt/share_config.h"
+ 
+ /*for shortname implementation */
+-static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
+-#define MANGLE_BASE (sizeof(basechars) / sizeof(char) - 1)
++static const char *basechars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
++#define MANGLE_BASE (strlen(basechars) - 1)
+ #define MAGIC_CHAR '~'
+ #define PERIOD '.'
+ #define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index 78a603129dd583..2cb49b6b07168a 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -517,7 +517,11 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
+ 			 inode->i_nlink);
+ 	clear_nlink(inode);
+ 	inode->i_size = 0;
+-	inode_dec_link_count(dir);
++	if (dir->i_nlink >= 3)
++		inode_dec_link_count(dir);
++	else
++		udf_warn(inode->i_sb, "parent dir link count too low (%u)\n",
++			 dir->i_nlink);
+ 	udf_add_fid_counter(dir->i_sb, true, -1);
+ 	inode_set_mtime_to_ts(dir,
+ 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
+@@ -787,8 +791,18 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ 			retval = -ENOTEMPTY;
+ 			if (!empty_dir(new_inode))
+ 				goto out_oiter;
++			retval = -EFSCORRUPTED;
++			if (new_inode->i_nlink != 2)
++				goto out_oiter;
+ 		}
++		retval = -EFSCORRUPTED;
++		if (old_dir->i_nlink < 3)
++			goto out_oiter;
+ 		is_dir = true;
++	} else if (new_inode) {
++		retval = -EFSCORRUPTED;
++		if (new_inode->i_nlink < 1)
++			goto out_oiter;
+ 	}
+ 	if (is_dir && old_dir != new_dir) {
+ 		retval = udf_fiiter_find_entry(old_inode, &dotdot_name,
+diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h
+index 576d952f97edd4..967a6ef31cf982 100644
+--- a/include/linux/platform_data/amd_qdma.h
++++ b/include/linux/platform_data/amd_qdma.h
+@@ -26,11 +26,13 @@ struct dma_slave_map;
+  * @max_mm_channels: Maximum number of MM DMA channels in each direction
+  * @device_map: DMA slave map
+  * @irq_index: The index of first IRQ
++ * @dma_dev: The device pointer for dma operations
+  */
+ struct qdma_platdata {
+ 	u32			max_mm_channels;
+ 	u32			irq_index;
+ 	struct dma_slave_map	*device_map;
++	struct device		*dma_dev;
+ };
+ 
+ #endif /* _PLATDATA_AMD_QDMA_H */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index c14446c6164d72..02eaf84c8626f4 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1633,8 +1633,9 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
+ 	 * We're lying here, but rather than expose a completely new task state
+ 	 * to userspace, we can make this appear as if the task has gone through
+ 	 * a regular rt_mutex_lock() call.
++	 * Report frozen tasks as uninterruptible.
+ 	 */
+-	if (tsk_state & TASK_RTLOCK_WAIT)
++	if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN))
+ 		state = TASK_UNINTERRUPTIBLE;
+ 
+ 	return fls(state);
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index d9b03e0746e7a4..2cbe0c22a32f3c 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -317,17 +317,22 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
+ 	kfree_skb(skb);
+ }
+ 
+-static inline void sk_psock_queue_msg(struct sk_psock *psock,
++static inline bool sk_psock_queue_msg(struct sk_psock *psock,
+ 				      struct sk_msg *msg)
+ {
++	bool ret;
++
+ 	spin_lock_bh(&psock->ingress_lock);
+-	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
++	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+ 		list_add_tail(&msg->list, &psock->ingress_msg);
+-	else {
++		ret = true;
++	} else {
+ 		sk_msg_free(psock->sk, msg);
+ 		kfree(msg);
++		ret = false;
+ 	}
+ 	spin_unlock_bh(&psock->ingress_lock);
++	return ret;
+ }
+ 
+ static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 4df2ff81d3dea5..77769ff5054441 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -379,7 +379,7 @@ struct trace_event_call {
+ 	struct list_head	list;
+ 	struct trace_event_class *class;
+ 	union {
+-		char			*name;
++		const char		*name;
+ 		/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
+ 		struct tracepoint	*tp;
+ 	};
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index d2761bf8ff32c9..9f3a04345b8606 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -515,7 +515,7 @@ static inline const char *node_stat_name(enum node_stat_item item)
+ 
+ static inline const char *lru_list_name(enum lru_list lru)
+ {
+-	return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
++	return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_"
+ }
+ 
+ #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index f29c1444893875..fa055cf1785efd 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1521,7 +1521,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size)
+ }
+ 
+ static inline bool
+-sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
++__sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc)
+ {
+ 	int delta;
+ 
+@@ -1529,7 +1529,13 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
+ 		return true;
+ 	delta = size - sk->sk_forward_alloc;
+ 	return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
+-		skb_pfmemalloc(skb);
++	       pfmemalloc;
++}
++
++static inline bool
++sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
++{
++	return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb));
+ }
+ 
+ static inline int sk_unused_reserved_mem(const struct sock *sk)
+diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
+index 58154117d9b090..a6fce46aeb37c9 100644
+--- a/include/uapi/linux/stddef.h
++++ b/include/uapi/linux/stddef.h
+@@ -8,6 +8,13 @@
+ #define __always_inline inline
+ #endif
+ 
++/* Not all C++ standards support type declarations inside an anonymous union */
++#ifndef __cplusplus
++#define __struct_group_tag(TAG)		TAG
++#else
++#define __struct_group_tag(TAG)
++#endif
++
+ /**
+  * __struct_group() - Create a mirrored named and anonyomous struct
+  *
+@@ -20,13 +27,13 @@
+  * and size: one anonymous and one named. The former's members can be used
+  * normally without sub-struct naming, and the latter can be used to
+  * reason about the start, end, and size of the group of struct members.
+- * The named struct can also be explicitly tagged for layer reuse, as well
+- * as both having struct attributes appended.
++ * The named struct can also be explicitly tagged for layer reuse (C only),
++ * as well as both having struct attributes appended.
+  */
+ #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
+ 	union { \
+ 		struct { MEMBERS } ATTRS; \
+-		struct TAG { MEMBERS } ATTRS NAME; \
++		struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
+ 	} ATTRS
+ 
+ #ifdef __cplusplus
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index a26593979887f3..1cfcc735b8e38e 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -412,6 +412,7 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
+ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ 				struct io_uring_params *p)
+ {
++	struct task_struct *task_to_put = NULL;
+ 	int ret;
+ 
+ 	/* Retain compatibility with failing for an invalid attach attempt */
+@@ -492,6 +493,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ 		}
+ 
+ 		sqd->thread = tsk;
++		task_to_put = get_task_struct(tsk);
+ 		ret = io_uring_alloc_task_context(tsk, ctx);
+ 		wake_up_new_task(tsk);
+ 		if (ret)
+@@ -502,11 +504,15 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ 		goto err;
+ 	}
+ 
++	if (task_to_put)
++		put_task_struct(task_to_put);
+ 	return 0;
+ err_sqpoll:
+ 	complete(&ctx->sq_data->exited);
+ err:
+ 	io_sq_thread_finish(ctx);
++	if (task_to_put)
++		put_task_struct(task_to_put);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 4c486a0bfcc4d8..767f1cb8c27e17 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -7868,7 +7868,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn
+ 	if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) {
+ 		verbose(env,
+ 			"arg#%d expected pointer to stack or const struct bpf_dynptr\n",
+-			regno);
++			regno - 1);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -7922,7 +7922,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn
+ 		if (!is_dynptr_reg_valid_init(env, reg)) {
+ 			verbose(env,
+ 				"Expected an initialized dynptr as arg #%d\n",
+-				regno);
++				regno - 1);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -7930,7 +7930,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn
+ 		if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) {
+ 			verbose(env,
+ 				"Expected a dynptr of type %s as arg #%d\n",
+-				dynptr_type_str(arg_to_dynptr_type(arg_type)), regno);
++				dynptr_type_str(arg_to_dynptr_type(arg_type)), regno - 1);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -7999,7 +7999,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
+ 	 */
+ 	btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1);
+ 	if (btf_id < 0) {
+-		verbose(env, "expected valid iter pointer as arg #%d\n", regno);
++		verbose(env, "expected valid iter pointer as arg #%d\n", regno - 1);
+ 		return -EINVAL;
+ 	}
+ 	t = btf_type_by_id(meta->btf, btf_id);
+@@ -8009,7 +8009,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
+ 		/* bpf_iter_<type>_new() expects pointer to uninit iter state */
+ 		if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) {
+ 			verbose(env, "expected uninitialized iter_%s as arg #%d\n",
+-				iter_type_str(meta->btf, btf_id), regno);
++				iter_type_str(meta->btf, btf_id), regno - 1);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -8033,7 +8033,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
+ 			break;
+ 		case -EINVAL:
+ 			verbose(env, "expected an initialized iter_%s as arg #%d\n",
+-				iter_type_str(meta->btf, btf_id), regno);
++				iter_type_str(meta->btf, btf_id), regno - 1);
+ 			return err;
+ 		case -EPROTO:
+ 			verbose(env, "expected an RCU CS when using %s\n", meta->func_name);
+@@ -21085,11 +21085,15 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
+ 			 * changed in some incompatible and hard to support
+ 			 * way, it's fine to back out this inlining logic
+ 			 */
++#ifdef CONFIG_SMP
+ 			insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number);
+ 			insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
+ 			insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0);
+ 			cnt = 3;
+-
++#else
++			insn_buf[0] = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
++			cnt = 1;
++#endif
+ 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ 			if (!new_prog)
+ 				return -ENOMEM;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index ce8be55e5e04b3..e192bdbc9adebb 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -640,11 +640,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 	LIST_HEAD(uf);
+ 	VMA_ITERATOR(vmi, mm, 0);
+ 
+-	uprobe_start_dup_mmap();
+-	if (mmap_write_lock_killable(oldmm)) {
+-		retval = -EINTR;
+-		goto fail_uprobe_end;
+-	}
++	if (mmap_write_lock_killable(oldmm))
++		return -EINTR;
+ 	flush_cache_dup_mm(oldmm);
+ 	uprobe_dup_mmap(oldmm, mm);
+ 	/*
+@@ -783,8 +780,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 		dup_userfaultfd_complete(&uf);
+ 	else
+ 		dup_userfaultfd_fail(&uf);
+-fail_uprobe_end:
+-	uprobe_end_dup_mmap();
+ 	return retval;
+ 
+ fail_nomem_anon_vma_fork:
+@@ -1692,9 +1687,11 @@ static struct mm_struct *dup_mm(struct task_struct *tsk,
+ 	if (!mm_init(mm, tsk, mm->user_ns))
+ 		goto fail_nomem;
+ 
++	uprobe_start_dup_mmap();
+ 	err = dup_mmap(mm, oldmm);
+ 	if (err)
+ 		goto free_pt;
++	uprobe_end_dup_mmap();
+ 
+ 	mm->hiwater_rss = get_mm_rss(mm);
+ 	mm->hiwater_vm = mm->total_vm;
+@@ -1709,6 +1706,8 @@ static struct mm_struct *dup_mm(struct task_struct *tsk,
+ 	mm->binfmt = NULL;
+ 	mm_init_owner(mm, NULL);
+ 	mmput(mm);
++	if (err)
++		uprobe_end_dup_mmap();
+ 
+ fail_nomem:
+ 	return NULL;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 35515192aa0fda..b04990385a6a87 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5111,6 +5111,9 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ 	cpumask_var_t tracing_cpumask_new;
+ 	int err;
+ 
++	if (count == 0 || count > KMALLOC_MAX_SIZE)
++		return -EINVAL;
++
+ 	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 263fac44d3ca32..935a886af40c90 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -725,7 +725,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
+ 
+ static struct notifier_block trace_kprobe_module_nb = {
+ 	.notifier_call = trace_kprobe_module_callback,
+-	.priority = 1	/* Invoked after kprobe module callback */
++	.priority = 2	/* Invoked after kprobe and jump_label module callback */
+ };
+ static int trace_kprobe_register_module_notifier(void)
+ {
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 9d078b37fe0b9b..abac770bc0b4c7 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1173,6 +1173,8 @@ EXPORT_SYMBOL(ceph_osdc_new_request);
+ 
+ int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
+ {
++	WARN_ON(op->op != CEPH_OSD_OP_SPARSE_READ);
++
+ 	op->extent.sparse_ext_cnt = cnt;
+ 	op->extent.sparse_ext = kmalloc_array(cnt,
+ 					      sizeof(*op->extent.sparse_ext),
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 9a459213d283f1..55495063621d6c 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3751,13 +3751,22 @@ static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
+ 
+ static u32 __bpf_skb_min_len(const struct sk_buff *skb)
+ {
+-	u32 min_len = skb_network_offset(skb);
++	int offset = skb_network_offset(skb);
++	u32 min_len = 0;
+ 
+-	if (skb_transport_header_was_set(skb))
+-		min_len = skb_transport_offset(skb);
+-	if (skb->ip_summed == CHECKSUM_PARTIAL)
+-		min_len = skb_checksum_start_offset(skb) +
+-			  skb->csum_offset + sizeof(__sum16);
++	if (offset > 0)
++		min_len = offset;
++	if (skb_transport_header_was_set(skb)) {
++		offset = skb_transport_offset(skb);
++		if (offset > 0)
++			min_len = offset;
++	}
++	if (skb->ip_summed == CHECKSUM_PARTIAL) {
++		offset = skb_checksum_start_offset(skb) +
++			 skb->csum_offset + sizeof(__sum16);
++		if (offset > 0)
++			min_len = offset;
++	}
+ 	return min_len;
+ }
+ 
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index e90fbab703b2db..8ad7e6755fd642 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -445,8 +445,10 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ 			if (likely(!peek)) {
+ 				sge->offset += copy;
+ 				sge->length -= copy;
+-				if (!msg_rx->skb)
++				if (!msg_rx->skb) {
+ 					sk_mem_uncharge(sk, copy);
++					atomic_sub(copy, &sk->sk_rmem_alloc);
++				}
+ 				msg_rx->sg.size -= copy;
+ 
+ 				if (!sge->length) {
+@@ -772,6 +774,8 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
+ 
+ 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
+ 		list_del(&msg->list);
++		if (!msg->skb)
++			atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
+ 		sk_msg_free(psock->sk, msg);
+ 		kfree(msg);
+ 	}
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 99cef92e6290cf..392678ae80f4ed 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -49,13 +49,14 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
+ 		sge = sk_msg_elem(msg, i);
+ 		size = (apply && apply_bytes < sge->length) ?
+ 			apply_bytes : sge->length;
+-		if (!sk_wmem_schedule(sk, size)) {
++		if (!__sk_rmem_schedule(sk, size, false)) {
+ 			if (!copied)
+ 				ret = -ENOMEM;
+ 			break;
+ 		}
+ 
+ 		sk_mem_charge(sk, size);
++		atomic_add(size, &sk->sk_rmem_alloc);
+ 		sk_msg_xfer(tmp, msg, i, size);
+ 		copied += size;
+ 		if (sge->length)
+@@ -74,7 +75,8 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
+ 
+ 	if (!ret) {
+ 		msg->sg.start = i;
+-		sk_psock_queue_msg(psock, tmp);
++		if (!sk_psock_queue_msg(psock, tmp))
++			atomic_sub(copied, &sk->sk_rmem_alloc);
+ 		sk_psock_data_ready(sk, psock);
+ 	} else {
+ 		sk_msg_free(sk, tmp);
+diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
+index 13b71069ae1874..b3853583d2ae1c 100644
+--- a/sound/core/memalloc.c
++++ b/sound/core/memalloc.c
+@@ -505,7 +505,7 @@ static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+ 	if (!p)
+ 		return NULL;
+ 	dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL);
+-	if (dmab->addr == DMA_MAPPING_ERROR) {
++	if (dma_mapping_error(dmab->dev.dev, dmab->addr)) {
+ 		do_free_pages(dmab->area, size, true);
+ 		return NULL;
+ 	}
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index 8d37f237f83b2e..bd26bb2210cbd4 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -37,6 +37,7 @@ static int process_legacy_output(struct snd_ump_endpoint *ump,
+ 				 u32 *buffer, int count);
+ static void process_legacy_input(struct snd_ump_endpoint *ump, const u32 *src,
+ 				 int words);
++static void update_legacy_names(struct snd_ump_endpoint *ump);
+ #else
+ static inline int process_legacy_output(struct snd_ump_endpoint *ump,
+ 					u32 *buffer, int count)
+@@ -47,6 +48,9 @@ static inline void process_legacy_input(struct snd_ump_endpoint *ump,
+ 					const u32 *src, int words)
+ {
+ }
++static inline void update_legacy_names(struct snd_ump_endpoint *ump)
++{
++}
+ #endif
+ 
+ static const struct snd_rawmidi_global_ops snd_ump_rawmidi_ops = {
+@@ -861,6 +865,7 @@ static int ump_handle_fb_info_msg(struct snd_ump_endpoint *ump,
+ 		fill_fb_info(ump, &fb->info, buf);
+ 		if (ump->parsed) {
+ 			snd_ump_update_group_attrs(ump);
++			update_legacy_names(ump);
+ 			seq_notify_fb_change(ump, fb);
+ 		}
+ 	}
+@@ -893,6 +898,7 @@ static int ump_handle_fb_name_msg(struct snd_ump_endpoint *ump,
+ 	/* notify the FB name update to sequencer, too */
+ 	if (ret > 0 && ump->parsed) {
+ 		snd_ump_update_group_attrs(ump);
++		update_legacy_names(ump);
+ 		seq_notify_fb_change(ump, fb);
+ 	}
+ 	return ret;
+@@ -1087,6 +1093,8 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream)
+ 	guard(mutex)(&ump->open_mutex);
+ 	if (ump->legacy_substreams[dir][group])
+ 		return -EBUSY;
++	if (!ump->groups[group].active)
++		return -ENODEV;
+ 	if (dir == SNDRV_RAWMIDI_STREAM_OUTPUT) {
+ 		if (!ump->legacy_out_opens) {
+ 			err = snd_rawmidi_kernel_open(&ump->core, 0,
+@@ -1254,11 +1262,20 @@ static void fill_substream_names(struct snd_ump_endpoint *ump,
+ 		name = ump->groups[idx].name;
+ 		if (!*name)
+ 			name = ump->info.name;
+-		snprintf(s->name, sizeof(s->name), "Group %d (%.16s)",
+-			 idx + 1, name);
++		scnprintf(s->name, sizeof(s->name), "Group %d (%.16s)%s",
++			  idx + 1, name,
++			  ump->groups[idx].active ? "" : " [Inactive]");
+ 	}
+ }
+ 
++static void update_legacy_names(struct snd_ump_endpoint *ump)
++{
++	struct snd_rawmidi *rmidi = ump->legacy_rmidi;
++
++	fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT);
++	fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT);
++}
++
+ int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ 				  char *id, int device)
+ {
+@@ -1295,10 +1312,7 @@ int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ 	rmidi->ops = &snd_ump_legacy_ops;
+ 	rmidi->private_data = ump;
+ 	ump->legacy_rmidi = rmidi;
+-	if (input)
+-		fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT);
+-	if (output)
+-		fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT);
++	update_legacy_names(ump);
+ 
+ 	ump_dbg(ump, "Created a legacy rawmidi #%d (%s)\n", device, id);
+ 	return 0;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 2e9f817b948eb3..538c37a78a56f7 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -307,6 +307,7 @@ enum {
+ 	CXT_FIXUP_HP_MIC_NO_PRESENCE,
+ 	CXT_PINCFG_SWS_JS201D,
+ 	CXT_PINCFG_TOP_SPEAKER,
++	CXT_FIXUP_HP_A_U,
+ };
+ 
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -774,6 +775,18 @@ static void cxt_setup_mute_led(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void cxt_setup_gpio_unmute(struct hda_codec *codec,
++				  unsigned int gpio_mute_mask)
++{
++	if (gpio_mute_mask) {
++		// set gpio data to 0.
++		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 0);
++		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK, gpio_mute_mask);
++		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION, gpio_mute_mask);
++		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_STICKY_MASK, 0);
++	}
++}
++
+ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+ {
+@@ -788,6 +801,15 @@ static void cxt_fixup_hp_zbook_mute_led(struct hda_codec *codec,
+ 		cxt_setup_mute_led(codec, 0x10, 0x20);
+ }
+ 
++static void cxt_fixup_hp_a_u(struct hda_codec *codec,
++			     const struct hda_fixup *fix, int action)
++{
++	// Init vers in BIOS mute the spk/hp by set gpio high to avoid pop noise,
++	// so need to unmute once by clearing the gpio data when runs into the system.
++	if (action == HDA_FIXUP_ACT_INIT)
++		cxt_setup_gpio_unmute(codec, 0x2);
++}
++
+ /* ThinkPad X200 & co with cxt5051 */
+ static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
+ 	{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
+@@ -998,6 +1020,10 @@ static const struct hda_fixup cxt_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[CXT_FIXUP_HP_A_U] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cxt_fixup_hp_a_u,
++	},
+ };
+ 
+ static const struct hda_quirk cxt5045_fixups[] = {
+@@ -1072,6 +1098,7 @@ static const struct hda_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
++	SND_PCI_QUIRK(0x14f1, 0x0252, "MBX-Z60MR100", CXT_FIXUP_HP_A_U),
+ 	SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D),
+ 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+@@ -1117,6 +1144,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ 	{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
+ 	{ .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
+ 	{ .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" },
++	{ .id = CXT_FIXUP_HP_A_U, .name = "HP-U-support" },
+ 	{}
+ };
+ 
+diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
+index e7b6ce7bd086bd..1c1c14708f0181 100644
+--- a/sound/sh/sh_dac_audio.c
++++ b/sound/sh/sh_dac_audio.c
+@@ -163,7 +163,7 @@ static int snd_sh_dac_pcm_copy(struct snd_pcm_substream *substream,
+ 	/* channel is not used (interleaved data) */
+ 	struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
+ 
+-	if (copy_from_iter_toio(chip->data_buffer + pos, src, count))
++	if (copy_from_iter(chip->data_buffer + pos, count, src) != count)
+ 		return -EFAULT;
+ 	chip->buffer_end = chip->data_buffer + pos + count;
+ 
+@@ -182,7 +182,7 @@ static int snd_sh_dac_pcm_silence(struct snd_pcm_substream *substream,
+ 	/* channel is not used (interleaved data) */
+ 	struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
+ 
+-	memset_io(chip->data_buffer + pos, 0, count);
++	memset(chip->data_buffer + pos, 0, count);
+ 	chip->buffer_end = chip->data_buffer + pos + count;
+ 
+ 	if (chip->empty) {
+@@ -211,7 +211,6 @@ static const struct snd_pcm_ops snd_sh_dac_pcm_ops = {
+ 	.pointer	= snd_sh_dac_pcm_pointer,
+ 	.copy		= snd_sh_dac_pcm_copy,
+ 	.fill_silence	= snd_sh_dac_pcm_silence,
+-	.mmap		= snd_pcm_lib_mmap_iomem,
+ };
+ 
+ static int snd_sh_dac_pcm(struct snd_sh_dac *chip, int device)
+diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
+index c72d666d51bdf4..5c4a0be7a78892 100644
+--- a/sound/soc/amd/ps/pci-ps.c
++++ b/sound/soc/amd/ps/pci-ps.c
+@@ -375,11 +375,18 @@ static int get_acp63_device_config(struct pci_dev *pci, struct acp63_dev_data *a
+ {
+ 	struct acpi_device *pdm_dev;
+ 	const union acpi_object *obj;
++	acpi_handle handle;
++	acpi_integer dmic_status;
+ 	u32 config;
+ 	bool is_dmic_dev = false;
+ 	bool is_sdw_dev = false;
++	bool wov_en, dmic_en;
+ 	int ret;
+ 
++	/* IF WOV entry not found, enable dmic based on acp-audio-device-type entry*/
++	wov_en = true;
++	dmic_en = false;
++
+ 	config = readl(acp_data->acp63_base + ACP_PIN_CONFIG);
+ 	switch (config) {
+ 	case ACP_CONFIG_4:
+@@ -412,10 +419,18 @@ static int get_acp63_device_config(struct pci_dev *pci, struct acp63_dev_data *a
+ 			if (!acpi_dev_get_property(pdm_dev, "acp-audio-device-type",
+ 						   ACPI_TYPE_INTEGER, &obj) &&
+ 						   obj->integer.value == ACP_DMIC_DEV)
+-				is_dmic_dev = true;
++				dmic_en = true;
+ 		}
++
++		handle = ACPI_HANDLE(&pci->dev);
++		ret = acpi_evaluate_integer(handle, "_WOV", NULL, &dmic_status);
++		if (!ACPI_FAILURE(ret))
++			wov_en = dmic_status;
+ 	}
+ 
++	if (dmic_en && wov_en)
++		is_dmic_dev = true;
++
+ 	if (acp_data->is_sdw_config) {
+ 		ret = acp_scan_sdw_devices(&pci->dev, ACP63_SDW_ADDR);
+ 		if (!ret && acp_data->info.link_mask)
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index db57292c00ca1e..41042259f2b26e 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -608,7 +608,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "233C")
++			DMI_MATCH(DMI_PRODUCT_NAME, "21QB")
+ 		},
+ 		/* Note this quirk excludes the CODEC mic */
+ 		.driver_data = (void *)(SOC_SDW_CODEC_MIC),
+@@ -617,9 +617,26 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "233B")
++			DMI_MATCH(DMI_PRODUCT_NAME, "21QA")
+ 		},
+-		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++		/* Note this quirk excludes the CODEC mic */
++		.driver_data = (void *)(SOC_SDW_CODEC_MIC),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21Q6")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21Q7")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
+ 	},
+ 
+ 	/* ArrowLake devices */
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index ac505c7ad34295..82f46ecd94301e 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -103,8 +103,10 @@ hda_dai_get_ops(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai
+ 	return sdai->platform_private;
+ }
+ 
+-int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream,
+-			 struct snd_soc_dai *cpu_dai)
++static int
++hda_link_dma_cleanup(struct snd_pcm_substream *substream,
++		     struct hdac_ext_stream *hext_stream,
++		     struct snd_soc_dai *cpu_dai, bool release)
+ {
+ 	const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
+ 	struct sof_intel_hda_stream *hda_stream;
+@@ -128,6 +130,17 @@ int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_st
+ 		snd_hdac_ext_bus_link_clear_stream_id(hlink, stream_tag);
+ 	}
+ 
++	if (!release) {
++		/*
++		 * Force stream reconfiguration without releasing the channel on
++		 * subsequent stream restart (without free), including LinkDMA
++		 * reset.
++		 * The stream is released via hda_dai_hw_free()
++		 */
++		hext_stream->link_prepared = 0;
++		return 0;
++	}
++
+ 	if (ops->release_hext_stream)
+ 		ops->release_hext_stream(sdev, cpu_dai, substream);
+ 
+@@ -211,7 +224,7 @@ static int __maybe_unused hda_dai_hw_free(struct snd_pcm_substream *substream,
+ 	if (!hext_stream)
+ 		return 0;
+ 
+-	return hda_link_dma_cleanup(substream, hext_stream, cpu_dai);
++	return hda_link_dma_cleanup(substream, hext_stream, cpu_dai, true);
+ }
+ 
+ static int __maybe_unused hda_dai_hw_params_data(struct snd_pcm_substream *substream,
+@@ -304,7 +317,8 @@ static int __maybe_unused hda_dai_trigger(struct snd_pcm_substream *substream, i
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+-		ret = hda_link_dma_cleanup(substream, hext_stream, dai);
++		ret = hda_link_dma_cleanup(substream, hext_stream, dai,
++					   cmd == SNDRV_PCM_TRIGGER_STOP ? false : true);
+ 		if (ret < 0) {
+ 			dev_err(sdev->dev, "%s: failed to clean up link DMA\n", __func__);
+ 			return ret;
+@@ -656,8 +670,7 @@ static int hda_dai_suspend(struct hdac_bus *bus)
+ 			}
+ 
+ 			ret = hda_link_dma_cleanup(hext_stream->link_substream,
+-						   hext_stream,
+-						   cpu_dai);
++						   hext_stream, cpu_dai, true);
+ 			if (ret < 0)
+ 				return ret;
+ 		}
+diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
+index b74a472435b5d2..4a4a0b55f0bc60 100644
+--- a/sound/soc/sof/intel/hda.h
++++ b/sound/soc/sof/intel/hda.h
+@@ -1028,8 +1028,6 @@ const struct hda_dai_widget_dma_ops *
+ hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+ int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags,
+ 		   struct snd_sof_dai_config_data *data);
+-int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream,
+-			 struct snd_soc_dai *cpu_dai);
+ 
+ static inline struct snd_sof_dev *widget_to_sdev(struct snd_soc_dapm_widget *w)
+ {
+diff --git a/tools/include/uapi/linux/stddef.h b/tools/include/uapi/linux/stddef.h
+index bb6ea517efb511..c53cde425406b7 100644
+--- a/tools/include/uapi/linux/stddef.h
++++ b/tools/include/uapi/linux/stddef.h
+@@ -8,6 +8,13 @@
+ #define __always_inline __inline__
+ #endif
+ 
++/* Not all C++ standards support type declarations inside an anonymous union */
++#ifndef __cplusplus
++#define __struct_group_tag(TAG)		TAG
++#else
++#define __struct_group_tag(TAG)
++#endif
++
+ /**
+  * __struct_group() - Create a mirrored named and anonyomous struct
+  *
+@@ -20,14 +27,14 @@
+  * and size: one anonymous and one named. The former's members can be used
+  * normally without sub-struct naming, and the latter can be used to
+  * reason about the start, end, and size of the group of struct members.
+- * The named struct can also be explicitly tagged for layer reuse, as well
+- * as both having struct attributes appended.
++ * The named struct can also be explicitly tagged for layer reuse (C only),
++ * as well as both having struct attributes appended.
+  */
+ #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
+ 	union { \
+ 		struct { MEMBERS } ATTRS; \
+-		struct TAG { MEMBERS } ATTRS NAME; \
+-	}
++		struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
++	} ATTRS
+ 
+ /**
+  * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
+index e7da92489167e9..f98dc0e1c99c4a 100644
+--- a/tools/objtool/noreturns.h
++++ b/tools/objtool/noreturns.h
+@@ -20,6 +20,7 @@ NORETURN(__x64_sys_exit_group)
+ NORETURN(arch_cpu_idle_dead)
+ NORETURN(bch2_trans_in_restart_error)
+ NORETURN(bch2_trans_restart_error)
++NORETURN(bch2_trans_unlocked_error)
+ NORETURN(cpu_bringup_and_idle)
+ NORETURN(cpu_startup_entry)
+ NORETURN(do_exit)
+diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
+index 8f36c9de759152..dfd817d0348c47 100644
+--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
++++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
+@@ -149,7 +149,7 @@ int ringbuf_release_uninit_dynptr(void *ctx)
+ 
+ /* A dynptr can't be used after it has been invalidated */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #3")
++__failure __msg("Expected an initialized dynptr as arg #2")
+ int use_after_invalid(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+@@ -428,7 +428,7 @@ int invalid_helper2(void *ctx)
+ 
+ /* A bpf_dynptr is invalidated if it's been written into */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #1")
++__failure __msg("Expected an initialized dynptr as arg #0")
+ int invalid_write1(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+@@ -1407,7 +1407,7 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
+ 
+ /* bpf_dynptr_adjust can only be called on initialized dynptrs */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #1")
++__failure __msg("Expected an initialized dynptr as arg #0")
+ int dynptr_adjust_invalid(void *ctx)
+ {
+ 	struct bpf_dynptr ptr = {};
+@@ -1420,7 +1420,7 @@ int dynptr_adjust_invalid(void *ctx)
+ 
+ /* bpf_dynptr_is_null can only be called on initialized dynptrs */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #1")
++__failure __msg("Expected an initialized dynptr as arg #0")
+ int dynptr_is_null_invalid(void *ctx)
+ {
+ 	struct bpf_dynptr ptr = {};
+@@ -1433,7 +1433,7 @@ int dynptr_is_null_invalid(void *ctx)
+ 
+ /* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #1")
++__failure __msg("Expected an initialized dynptr as arg #0")
+ int dynptr_is_rdonly_invalid(void *ctx)
+ {
+ 	struct bpf_dynptr ptr = {};
+@@ -1446,7 +1446,7 @@ int dynptr_is_rdonly_invalid(void *ctx)
+ 
+ /* bpf_dynptr_size can only be called on initialized dynptrs */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #1")
++__failure __msg("Expected an initialized dynptr as arg #0")
+ int dynptr_size_invalid(void *ctx)
+ {
+ 	struct bpf_dynptr ptr = {};
+@@ -1459,7 +1459,7 @@ int dynptr_size_invalid(void *ctx)
+ 
+ /* Only initialized dynptrs can be cloned */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #1")
++__failure __msg("Expected an initialized dynptr as arg #0")
+ int clone_invalid1(void *ctx)
+ {
+ 	struct bpf_dynptr ptr1 = {};
+@@ -1493,7 +1493,7 @@ int clone_invalid2(struct xdp_md *xdp)
+ 
+ /* Invalidating a dynptr should invalidate its clones */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #3")
++__failure __msg("Expected an initialized dynptr as arg #2")
+ int clone_invalidate1(void *ctx)
+ {
+ 	struct bpf_dynptr clone;
+@@ -1514,7 +1514,7 @@ int clone_invalidate1(void *ctx)
+ 
+ /* Invalidating a dynptr should invalidate its parent */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #3")
++__failure __msg("Expected an initialized dynptr as arg #2")
+ int clone_invalidate2(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+@@ -1535,7 +1535,7 @@ int clone_invalidate2(void *ctx)
+ 
+ /* Invalidating a dynptr should invalidate its siblings */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #3")
++__failure __msg("Expected an initialized dynptr as arg #2")
+ int clone_invalidate3(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+@@ -1723,7 +1723,7 @@ __noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr)
+ }
+ 
+ SEC("?raw_tp")
+-__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr")
++__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
+ int test_dynptr_reg_type(void *ctx)
+ {
+ 	struct task_struct *current = NULL;
+diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c
+index d47e59aba6de35..f41257eadbb258 100644
+--- a/tools/testing/selftests/bpf/progs/iters_state_safety.c
++++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c
+@@ -73,7 +73,7 @@ int create_and_forget_to_destroy_fail(void *ctx)
+ }
+ 
+ SEC("?raw_tp")
+-__failure __msg("expected an initialized iter_num as arg #1")
++__failure __msg("expected an initialized iter_num as arg #0")
+ int destroy_without_creating_fail(void *ctx)
+ {
+ 	/* init with zeros to stop verifier complaining about uninit stack */
+@@ -91,7 +91,7 @@ int destroy_without_creating_fail(void *ctx)
+ }
+ 
+ SEC("?raw_tp")
+-__failure __msg("expected an initialized iter_num as arg #1")
++__failure __msg("expected an initialized iter_num as arg #0")
+ int compromise_iter_w_direct_write_fail(void *ctx)
+ {
+ 	struct bpf_iter_num iter;
+@@ -143,7 +143,7 @@ int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx)
+ }
+ 
+ SEC("?raw_tp")
+-__failure __msg("expected an initialized iter_num as arg #1")
++__failure __msg("expected an initialized iter_num as arg #0")
+ int compromise_iter_w_helper_write_fail(void *ctx)
+ {
+ 	struct bpf_iter_num iter;
+@@ -230,7 +230,7 @@ int valid_stack_reuse(void *ctx)
+ }
+ 
+ SEC("?raw_tp")
+-__failure __msg("expected uninitialized iter_num as arg #1")
++__failure __msg("expected uninitialized iter_num as arg #0")
+ int double_create_fail(void *ctx)
+ {
+ 	struct bpf_iter_num iter;
+@@ -258,7 +258,7 @@ int double_create_fail(void *ctx)
+ }
+ 
+ SEC("?raw_tp")
+-__failure __msg("expected an initialized iter_num as arg #1")
++__failure __msg("expected an initialized iter_num as arg #0")
+ int double_destroy_fail(void *ctx)
+ {
+ 	struct bpf_iter_num iter;
+@@ -284,7 +284,7 @@ int double_destroy_fail(void *ctx)
+ }
+ 
+ SEC("?raw_tp")
+-__failure __msg("expected an initialized iter_num as arg #1")
++__failure __msg("expected an initialized iter_num as arg #0")
+ int next_without_new_fail(void *ctx)
+ {
+ 	struct bpf_iter_num iter;
+@@ -305,7 +305,7 @@ int next_without_new_fail(void *ctx)
+ }
+ 
+ SEC("?raw_tp")
+-__failure __msg("expected an initialized iter_num as arg #1")
++__failure __msg("expected an initialized iter_num as arg #0")
+ int next_after_destroy_fail(void *ctx)
+ {
+ 	struct bpf_iter_num iter;
+diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
+index 4a176e6aede897..6543d5b6e0a976 100644
+--- a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
++++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
+@@ -79,7 +79,7 @@ int testmod_seq_truncated(const void *ctx)
+ 
+ SEC("?raw_tp")
+ __failure
+-__msg("expected an initialized iter_testmod_seq as arg #2")
++__msg("expected an initialized iter_testmod_seq as arg #1")
+ int testmod_seq_getter_before_bad(const void *ctx)
+ {
+ 	struct bpf_iter_testmod_seq it;
+@@ -89,7 +89,7 @@ int testmod_seq_getter_before_bad(const void *ctx)
+ 
+ SEC("?raw_tp")
+ __failure
+-__msg("expected an initialized iter_testmod_seq as arg #2")
++__msg("expected an initialized iter_testmod_seq as arg #1")
+ int testmod_seq_getter_after_bad(const void *ctx)
+ {
+ 	struct bpf_iter_testmod_seq it;
+diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
+index e68667aec6a652..cd4d752bd089ca 100644
+--- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
++++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
+@@ -45,7 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size)
+ }
+ 
+ SEC("?lsm.s/bpf")
+-__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr")
++__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
+ int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size)
+ {
+ 	unsigned long val = 0;
+diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
+index a7a6ae6c162fe0..8bcddadfc4daed 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
++++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
+@@ -32,7 +32,7 @@ int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+ 
+ SEC("iter/cgroup")
+ __description("uninitialized iter in ->next()")
+-__failure __msg("expected an initialized iter_bits as arg #1")
++__failure __msg("expected an initialized iter_bits as arg #0")
+ int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+ {
+ 	struct bpf_iter_bits it = {};
+@@ -43,7 +43,7 @@ int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+ 
+ SEC("iter/cgroup")
+ __description("uninitialized iter in ->destroy()")
+-__failure __msg("expected an initialized iter_bits as arg #1")
++__failure __msg("expected an initialized iter_bits as arg #0")
+ int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+ {
+ 	struct bpf_iter_bits it = {};
+diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
+index 2d742fdac6b977..81943c6254e6bc 100644
+--- a/tools/testing/selftests/bpf/trace_helpers.c
++++ b/tools/testing/selftests/bpf/trace_helpers.c
+@@ -293,6 +293,10 @@ static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *st
+ 	return 0;
+ }
+ #else
++# ifndef PROCMAP_QUERY_VMA_EXECUTABLE
++#  define PROCMAP_QUERY_VMA_EXECUTABLE 0x04
++# endif
++
+ static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *start, size_t *offset, int *flags)
+ {
+ 	return -EOPNOTSUPP;
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index ae55cd79128336..2cc3ffcbc983d3 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -280,6 +280,21 @@ static void timerlat_hist_header(struct osnoise_tool *tool)
+ 	trace_seq_reset(s);
+ }
+ 
++/*
++ * format_summary_value - format a line of summary value (min, max or avg)
++ * of hist data
++ */
++static void format_summary_value(struct trace_seq *seq,
++				 int count,
++				 unsigned long long val,
++				 bool avg)
++{
++	if (count)
++		trace_seq_printf(seq, "%9llu ", avg ? val / count : val);
++	else
++		trace_seq_printf(seq, "%9c ", '-');
++}
++
+ /*
+  * timerlat_print_summary - print the summary of the hist data to the output
+  */
+@@ -327,29 +342,23 @@ timerlat_print_summary(struct timerlat_hist_params *params,
+ 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ 			continue;
+ 
+-		if (!params->no_irq) {
+-			if (data->hist[cpu].irq_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						data->hist[cpu].min_irq);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (!params->no_irq)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].irq_count,
++					     data->hist[cpu].min_irq,
++					     false);
+ 
+-		if (!params->no_thread) {
+-			if (data->hist[cpu].thread_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						data->hist[cpu].min_thread);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (!params->no_thread)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].thread_count,
++					     data->hist[cpu].min_thread,
++					     false);
+ 
+-		if (params->user_hist) {
+-			if (data->hist[cpu].user_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						data->hist[cpu].min_user);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (params->user_hist)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].user_count,
++					     data->hist[cpu].min_user,
++					     false);
+ 	}
+ 	trace_seq_printf(trace->seq, "\n");
+ 
+@@ -363,29 +372,23 @@ timerlat_print_summary(struct timerlat_hist_params *params,
+ 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ 			continue;
+ 
+-		if (!params->no_irq) {
+-			if (data->hist[cpu].irq_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						 data->hist[cpu].sum_irq / data->hist[cpu].irq_count);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (!params->no_irq)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].irq_count,
++					     data->hist[cpu].sum_irq,
++					     true);
+ 
+-		if (!params->no_thread) {
+-			if (data->hist[cpu].thread_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						 data->hist[cpu].sum_thread / data->hist[cpu].thread_count);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (!params->no_thread)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].thread_count,
++					     data->hist[cpu].sum_thread,
++					     true);
+ 
+-		if (params->user_hist) {
+-			if (data->hist[cpu].user_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						 data->hist[cpu].sum_user / data->hist[cpu].user_count);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (params->user_hist)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].user_count,
++					     data->hist[cpu].sum_user,
++					     true);
+ 	}
+ 	trace_seq_printf(trace->seq, "\n");
+ 
+@@ -399,29 +402,23 @@ timerlat_print_summary(struct timerlat_hist_params *params,
+ 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ 			continue;
+ 
+-		if (!params->no_irq) {
+-			if (data->hist[cpu].irq_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						 data->hist[cpu].max_irq);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (!params->no_irq)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].irq_count,
++					     data->hist[cpu].max_irq,
++					     false);
+ 
+-		if (!params->no_thread) {
+-			if (data->hist[cpu].thread_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						data->hist[cpu].max_thread);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (!params->no_thread)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].thread_count,
++					     data->hist[cpu].max_thread,
++					     false);
+ 
+-		if (params->user_hist) {
+-			if (data->hist[cpu].user_count)
+-				trace_seq_printf(trace->seq, "%9llu ",
+-						data->hist[cpu].max_user);
+-			else
+-				trace_seq_printf(trace->seq, "        - ");
+-		}
++		if (params->user_hist)
++			format_summary_value(trace->seq,
++					     data->hist[cpu].user_count,
++					     data->hist[cpu].max_user,
++					     false);
+ 	}
+ 	trace_seq_printf(trace->seq, "\n");
+ 	trace_seq_do_printf(trace->seq);
+@@ -505,16 +502,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params,
+ 		trace_seq_printf(trace->seq, "min:  ");
+ 
+ 	if (!params->no_irq)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.min_irq);
++		format_summary_value(trace->seq,
++				     sum.irq_count,
++				     sum.min_irq,
++				     false);
+ 
+ 	if (!params->no_thread)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.min_thread);
++		format_summary_value(trace->seq,
++				     sum.thread_count,
++				     sum.min_thread,
++				     false);
+ 
+ 	if (params->user_hist)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.min_user);
++		format_summary_value(trace->seq,
++				     sum.user_count,
++				     sum.min_user,
++				     false);
+ 
+ 	trace_seq_printf(trace->seq, "\n");
+ 
+@@ -522,16 +525,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params,
+ 		trace_seq_printf(trace->seq, "avg:  ");
+ 
+ 	if (!params->no_irq)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.sum_irq / sum.irq_count);
++		format_summary_value(trace->seq,
++				     sum.irq_count,
++				     sum.sum_irq,
++				     true);
+ 
+ 	if (!params->no_thread)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.sum_thread / sum.thread_count);
++		format_summary_value(trace->seq,
++				     sum.thread_count,
++				     sum.sum_thread,
++				     true);
+ 
+ 	if (params->user_hist)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.sum_user / sum.user_count);
++		format_summary_value(trace->seq,
++				     sum.user_count,
++				     sum.sum_user,
++				     true);
+ 
+ 	trace_seq_printf(trace->seq, "\n");
+ 
+@@ -539,16 +548,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params,
+ 		trace_seq_printf(trace->seq, "max:  ");
+ 
+ 	if (!params->no_irq)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.max_irq);
++		format_summary_value(trace->seq,
++				     sum.irq_count,
++				     sum.max_irq,
++				     false);
+ 
+ 	if (!params->no_thread)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.max_thread);
++		format_summary_value(trace->seq,
++				     sum.thread_count,
++				     sum.max_thread,
++				     false);
+ 
+ 	if (params->user_hist)
+-		trace_seq_printf(trace->seq, "%9llu ",
+-				 sum.max_user);
++		format_summary_value(trace->seq,
++				     sum.user_count,
++				     sum.max_user,
++				     false);
+ 
+ 	trace_seq_printf(trace->seq, "\n");
+ 	trace_seq_do_printf(trace->seq);


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-27 14:08 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-27 14:08 UTC (permalink / raw
  To: gentoo-commits

commit:     671fd61e3eafab207b759f2bca79a6eda9cf710a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 27 14:07:47 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec 27 14:07:47 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=671fd61e

Linux patch 6.12.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1006_linux-6.12.7.patch | 6443 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6447 insertions(+)

diff --git a/0000_README b/0000_README
index 1bb8df77..6961ab2e 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-6.12.6.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.6
 
+Patch:  1006_linux-6.12.7.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.7
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1006_linux-6.12.7.patch b/1006_linux-6.12.7.patch
new file mode 100644
index 00000000..17157109
--- /dev/null
+++ b/1006_linux-6.12.7.patch
@@ -0,0 +1,6443 @@
+diff --git a/Makefile b/Makefile
+index c10952585c14b0..685a57f6c8d279 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index fbed433283c9b9..42791971f75887 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -2503,7 +2503,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 	ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
+ 					ID_AA64MMFR0_EL1_TGRAN4_2 |
+ 					ID_AA64MMFR0_EL1_TGRAN64_2 |
+-					ID_AA64MMFR0_EL1_TGRAN16_2)),
++					ID_AA64MMFR0_EL1_TGRAN16_2 |
++					ID_AA64MMFR0_EL1_ASIDBITS)),
+ 	ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
+ 					ID_AA64MMFR1_EL1_HCX |
+ 					ID_AA64MMFR1_EL1_TWED |
+diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
+index 92d005958dfb23..ff172cbe5881a0 100644
+--- a/arch/hexagon/Makefile
++++ b/arch/hexagon/Makefile
+@@ -32,3 +32,9 @@ KBUILD_LDFLAGS += $(ldflags-y)
+ TIR_NAME := r19
+ KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
+ KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
++
++# Disable HexagonConstExtenders pass for LLVM versions prior to 19.1.0
++# https://github.com/llvm/llvm-project/issues/99714
++ifneq ($(call clang-min-version, 190100),y)
++KBUILD_CFLAGS += -mllvm -hexagon-cext=false
++endif
+diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
+index 2967d305c44278..9f3b527596ded8 100644
+--- a/arch/riscv/kvm/aia.c
++++ b/arch/riscv/kvm/aia.c
+@@ -552,7 +552,7 @@ void kvm_riscv_aia_enable(void)
+ 	csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
+ 	/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
+ 	if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
+-		csr_write(CSR_HVIEN, BIT(IRQ_PMU_OVF));
++		csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
+ }
+ 
+ void kvm_riscv_aia_disable(void)
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index c8f149ad77e584..c2ee0745f59edc 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -231,6 +231,8 @@ static unsigned long get_vmem_size(unsigned long identity_size,
+ 	vsize = round_up(SZ_2G + max_mappable, rte_size) +
+ 		round_up(vmemmap_size, rte_size) +
+ 		FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
++	if (IS_ENABLED(CONFIG_KMSAN))
++		vsize += MODULES_LEN * 2;
+ 	return size_add(vsize, vmalloc_size);
+ }
+ 
+diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
+index 145035f84a0e3e..3fa28db2fe59f4 100644
+--- a/arch/s390/boot/vmem.c
++++ b/arch/s390/boot/vmem.c
+@@ -306,7 +306,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
+ 			pages++;
+ 		}
+ 	}
+-	if (mode == POPULATE_DIRECT)
++	if (mode == POPULATE_IDENTITY)
+ 		update_page_count(PG_DIRECT_MAP_4K, pages);
+ }
+ 
+@@ -339,7 +339,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
+ 		}
+ 		pgtable_pte_populate(pmd, addr, next, mode);
+ 	}
+-	if (mode == POPULATE_DIRECT)
++	if (mode == POPULATE_IDENTITY)
+ 		update_page_count(PG_DIRECT_MAP_1M, pages);
+ }
+ 
+@@ -372,7 +372,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
+ 		}
+ 		pgtable_pmd_populate(pud, addr, next, mode);
+ 	}
+-	if (mode == POPULATE_DIRECT)
++	if (mode == POPULATE_IDENTITY)
+ 		update_page_count(PG_DIRECT_MAP_2G, pages);
+ }
+ 
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index f17bb7bf939242..5fa203f4bc6b80 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -270,7 +270,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj,	\
+ 	if (len >= sizeof(_value))					\
+ 		return -E2BIG;						\
+ 	len = strscpy(_value, buf, sizeof(_value));			\
+-	if (len < 0)							\
++	if ((ssize_t)len < 0)						\
+ 		return len;						\
+ 	strim(_value);							\
+ 	return len;							\
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index d18078834dedac..dc12fe5ef3caa9 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -223,6 +223,63 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
+ 	hyperv_cleanup();
+ }
+ #endif /* CONFIG_CRASH_DUMP */
++
++static u64 hv_ref_counter_at_suspend;
++static void (*old_save_sched_clock_state)(void);
++static void (*old_restore_sched_clock_state)(void);
++
++/*
++ * Hyper-V clock counter resets during hibernation. Save and restore clock
++ * offset during suspend/resume, while also considering the time passed
++ * before suspend. This is to make sure that sched_clock using hv tsc page
++ * based clocksource, proceeds from where it left off during suspend and
++ * it shows correct time for the timestamps of kernel messages after resume.
++ */
++static void save_hv_clock_tsc_state(void)
++{
++	hv_ref_counter_at_suspend = hv_read_reference_counter();
++}
++
++static void restore_hv_clock_tsc_state(void)
++{
++	/*
++	 * Adjust the offsets used by hv tsc clocksource to
++	 * account for the time spent before hibernation.
++	 * adjusted value = reference counter (time) at suspend
++	 *                - reference counter (time) now.
++	 */
++	hv_adj_sched_clock_offset(hv_ref_counter_at_suspend - hv_read_reference_counter());
++}
++
++/*
++ * Functions to override save_sched_clock_state and restore_sched_clock_state
++ * functions of x86_platform. The Hyper-V clock counter is reset during
++ * suspend-resume and the offset used to measure time needs to be
++ * corrected, post resume.
++ */
++static void hv_save_sched_clock_state(void)
++{
++	old_save_sched_clock_state();
++	save_hv_clock_tsc_state();
++}
++
++static void hv_restore_sched_clock_state(void)
++{
++	restore_hv_clock_tsc_state();
++	old_restore_sched_clock_state();
++}
++
++static void __init x86_setup_ops_for_tsc_pg_clock(void)
++{
++	if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
++		return;
++
++	old_save_sched_clock_state = x86_platform.save_sched_clock_state;
++	x86_platform.save_sched_clock_state = hv_save_sched_clock_state;
++
++	old_restore_sched_clock_state = x86_platform.restore_sched_clock_state;
++	x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state;
++}
+ #endif /* CONFIG_HYPERV */
+ 
+ static uint32_t  __init ms_hyperv_platform(void)
+@@ -579,6 +636,7 @@ static void __init ms_hyperv_init_platform(void)
+ 
+ 	/* Register Hyper-V specific clocksource */
+ 	hv_init_clocksource();
++	x86_setup_ops_for_tsc_pg_clock();
+ 	hv_vtl_init_platform();
+ #endif
+ 	/*
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 41786b834b1635..83bfecd1a6e40c 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -36,6 +36,26 @@
+ u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
+ EXPORT_SYMBOL_GPL(kvm_cpu_caps);
+ 
++struct cpuid_xstate_sizes {
++	u32 eax;
++	u32 ebx;
++	u32 ecx;
++};
++
++static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init;
++
++void __init kvm_init_xstate_sizes(void)
++{
++	u32 ign;
++	int i;
++
++	for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) {
++		struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
++
++		cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
++	}
++}
++
+ u32 xstate_required_size(u64 xstate_bv, bool compacted)
+ {
+ 	int feature_bit = 0;
+@@ -44,14 +64,15 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
+ 	xstate_bv &= XFEATURE_MASK_EXTEND;
+ 	while (xstate_bv) {
+ 		if (xstate_bv & 0x1) {
+-		        u32 eax, ebx, ecx, edx, offset;
+-		        cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
++			struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit];
++			u32 offset;
++
+ 			/* ECX[1]: 64B alignment in compacted form */
+ 			if (compacted)
+-				offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
++				offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
+ 			else
+-				offset = ebx;
+-			ret = max(ret, offset + eax);
++				offset = xs->ebx;
++			ret = max(ret, offset + xs->eax);
+ 		}
+ 
+ 		xstate_bv >>= 1;
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 41697cca354e6b..ad479cfb91bc7b 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -32,6 +32,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
+ 	       u32 *ecx, u32 *edx, bool exact_only);
+ 
++void __init kvm_init_xstate_sizes(void);
+ u32 xstate_required_size(u64 xstate_bv, bool compacted);
+ 
+ int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9df3e1e5ae81a1..4543dd6bcab2cb 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3199,15 +3199,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 		if (data & ~supported_de_cfg)
+ 			return 1;
+ 
+-		/*
+-		 * Don't let the guest change the host-programmed value.  The
+-		 * MSR is very model specific, i.e. contains multiple bits that
+-		 * are completely unknown to KVM, and the one bit known to KVM
+-		 * is simply a reflection of hardware capabilities.
+-		 */
+-		if (!msr->host_initiated && data != svm->msr_decfg)
+-			return 1;
+-
+ 		svm->msr_decfg = data;
+ 		break;
+ 	}
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 83fe0a78146fc1..b49e2eb4893080 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9991,7 +9991,7 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
+ {
+ 	u64 ret = vcpu->run->hypercall.ret;
+ 
+-	if (!is_64_bit_mode(vcpu))
++	if (!is_64_bit_hypercall(vcpu))
+ 		ret = (u32)ret;
+ 	kvm_rax_write(vcpu, ret);
+ 	++vcpu->stat.hypercalls;
+@@ -14010,6 +14010,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault);
+ 
+ static int __init kvm_x86_init(void)
+ {
++	kvm_init_xstate_sizes();
++
+ 	kvm_mmu_x86_module_init();
+ 	mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
+ 	return 0;
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index cd5ea6eaa76b09..156e9bb07abf1a 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -275,13 +275,15 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
+ 	struct blk_mq_hw_ctx *hctx;
+ 	unsigned long i;
+ 
+-	lockdep_assert_held(&q->sysfs_dir_lock);
+-
++	mutex_lock(&q->sysfs_dir_lock);
+ 	if (!q->mq_sysfs_init_done)
+-		return;
++		goto unlock;
+ 
+ 	queue_for_each_hw_ctx(q, hctx, i)
+ 		blk_mq_unregister_hctx(hctx);
++
++unlock:
++	mutex_unlock(&q->sysfs_dir_lock);
+ }
+ 
+ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
+@@ -290,10 +292,9 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
+ 	unsigned long i;
+ 	int ret = 0;
+ 
+-	lockdep_assert_held(&q->sysfs_dir_lock);
+-
++	mutex_lock(&q->sysfs_dir_lock);
+ 	if (!q->mq_sysfs_init_done)
+-		return ret;
++		goto unlock;
+ 
+ 	queue_for_each_hw_ctx(q, hctx, i) {
+ 		ret = blk_mq_register_hctx(hctx);
+@@ -301,5 +302,8 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
+ 			break;
+ 	}
+ 
++unlock:
++	mutex_unlock(&q->sysfs_dir_lock);
++
+ 	return ret;
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index cc1b3202383840..d5995021815ddf 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -4421,6 +4421,15 @@ struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
+ }
+ EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
+ 
++/*
++ * Only hctx removed from cpuhp list can be reused
++ */
++static bool blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx *hctx)
++{
++	return hlist_unhashed(&hctx->cpuhp_online) &&
++		hlist_unhashed(&hctx->cpuhp_dead);
++}
++
+ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
+ 		struct blk_mq_tag_set *set, struct request_queue *q,
+ 		int hctx_idx, int node)
+@@ -4430,7 +4439,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
+ 	/* reuse dead hctx first */
+ 	spin_lock(&q->unused_hctx_lock);
+ 	list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
+-		if (tmp->numa_node == node) {
++		if (tmp->numa_node == node && blk_mq_hctx_is_reusable(tmp)) {
+ 			hctx = tmp;
+ 			break;
+ 		}
+@@ -4462,8 +4471,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ 	unsigned long i, j;
+ 
+ 	/* protect against switching io scheduler  */
+-	lockdep_assert_held(&q->sysfs_lock);
+-
++	mutex_lock(&q->sysfs_lock);
+ 	for (i = 0; i < set->nr_hw_queues; i++) {
+ 		int old_node;
+ 		int node = blk_mq_get_hctx_node(set, i);
+@@ -4496,6 +4504,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ 
+ 	xa_for_each_start(&q->hctx_table, j, hctx, j)
+ 		blk_mq_exit_hctx(q, set, hctx, j);
++	mutex_unlock(&q->sysfs_lock);
+ 
+ 	/* unregister cpuhp callbacks for exited hctxs */
+ 	blk_mq_remove_hw_queues_cpuhp(q);
+@@ -4527,14 +4536,10 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ 
+ 	xa_init(&q->hctx_table);
+ 
+-	mutex_lock(&q->sysfs_lock);
+-
+ 	blk_mq_realloc_hw_ctxs(set, q);
+ 	if (!q->nr_hw_queues)
+ 		goto err_hctxs;
+ 
+-	mutex_unlock(&q->sysfs_lock);
+-
+ 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
+ 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
+ 
+@@ -4553,7 +4558,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ 	return 0;
+ 
+ err_hctxs:
+-	mutex_unlock(&q->sysfs_lock);
+ 	blk_mq_release(q);
+ err_exit:
+ 	q->mq_ops = NULL;
+@@ -4934,12 +4938,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
+ 		return false;
+ 
+ 	/* q->elevator needs protection from ->sysfs_lock */
+-	lockdep_assert_held(&q->sysfs_lock);
++	mutex_lock(&q->sysfs_lock);
+ 
+ 	/* the check has to be done with holding sysfs_lock */
+ 	if (!q->elevator) {
+ 		kfree(qe);
+-		goto out;
++		goto unlock;
+ 	}
+ 
+ 	INIT_LIST_HEAD(&qe->node);
+@@ -4949,7 +4953,9 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
+ 	__elevator_get(qe->type);
+ 	list_add(&qe->node, head);
+ 	elevator_disable(q);
+-out:
++unlock:
++	mutex_unlock(&q->sysfs_lock);
++
+ 	return true;
+ }
+ 
+@@ -4978,9 +4984,11 @@ static void blk_mq_elv_switch_back(struct list_head *head,
+ 	list_del(&qe->node);
+ 	kfree(qe);
+ 
++	mutex_lock(&q->sysfs_lock);
+ 	elevator_switch(q, t);
+ 	/* drop the reference acquired in blk_mq_elv_switch_none */
+ 	elevator_put(t);
++	mutex_unlock(&q->sysfs_lock);
+ }
+ 
+ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+@@ -5000,11 +5008,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ 	if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
+ 		return;
+ 
+-	list_for_each_entry(q, &set->tag_list, tag_set_list) {
+-		mutex_lock(&q->sysfs_dir_lock);
+-		mutex_lock(&q->sysfs_lock);
++	list_for_each_entry(q, &set->tag_list, tag_set_list)
+ 		blk_mq_freeze_queue(q);
+-	}
+ 	/*
+ 	 * Switch IO scheduler to 'none', cleaning up the data associated
+ 	 * with the previous scheduler. We will switch back once we are done
+@@ -5060,11 +5065,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ 	list_for_each_entry(q, &set->tag_list, tag_set_list)
+ 		blk_mq_elv_switch_back(&head, q);
+ 
+-	list_for_each_entry(q, &set->tag_list, tag_set_list) {
++	list_for_each_entry(q, &set->tag_list, tag_set_list)
+ 		blk_mq_unfreeze_queue(q);
+-		mutex_unlock(&q->sysfs_lock);
+-		mutex_unlock(&q->sysfs_dir_lock);
+-	}
+ 
+ 	/* Free the excess tags when nr_hw_queues shrink. */
+ 	for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 42c2cb97d778af..207577145c54f4 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -690,11 +690,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ 			return res;
+ 	}
+ 
+-	mutex_lock(&q->sysfs_lock);
+ 	blk_mq_freeze_queue(q);
++	mutex_lock(&q->sysfs_lock);
+ 	res = entry->store(disk, page, length);
+-	blk_mq_unfreeze_queue(q);
+ 	mutex_unlock(&q->sysfs_lock);
++	blk_mq_unfreeze_queue(q);
+ 	return res;
+ }
+ 
+diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
+index 1b409dbd332d80..c8daffd90f3001 100644
+--- a/drivers/accel/ivpu/ivpu_gem.c
++++ b/drivers/accel/ivpu/ivpu_gem.c
+@@ -406,7 +406,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
+ 	mutex_lock(&bo->lock);
+ 
+ 	drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+-		   bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
++		   bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
+ 		   bo->flags, kref_read(&bo->base.base.refcount));
+ 
+ 	if (bo->base.pages)
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index 59d3170f5e3541..10b7ae0f866c98 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -364,6 +364,7 @@ void ivpu_pm_init(struct ivpu_device *vdev)
+ 
+ 	pm_runtime_use_autosuspend(dev);
+ 	pm_runtime_set_autosuspend_delay(dev, delay);
++	pm_runtime_set_active(dev);
+ 
+ 	ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
+ }
+@@ -378,7 +379,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev)
+ {
+ 	struct device *dev = vdev->drm.dev;
+ 
+-	pm_runtime_set_active(dev);
+ 	pm_runtime_allow(dev);
+ 	pm_runtime_mark_last_busy(dev);
+ 	pm_runtime_put_autosuspend(dev);
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index d0432b1707ceb6..bf83a104086cce 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -524,6 +524,12 @@ static ssize_t backing_dev_store(struct device *dev,
+ 	}
+ 
+ 	nr_pages = i_size_read(inode) >> PAGE_SHIFT;
++	/* Refuse to use zero sized device (also prevents self reference) */
++	if (!nr_pages) {
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
+ 	bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
+ 	if (!bitmap) {
+@@ -1319,12 +1325,16 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
+ 	size_t num_pages = disksize >> PAGE_SHIFT;
+ 	size_t index;
+ 
++	if (!zram->table)
++		return;
++
+ 	/* Free all pages that are still in this zram device */
+ 	for (index = 0; index < num_pages; index++)
+ 		zram_free_page(zram, index);
+ 
+ 	zs_destroy_pool(zram->mem_pool);
+ 	vfree(zram->table);
++	zram->table = NULL;
+ }
+ 
+ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
+@@ -2165,11 +2175,6 @@ static void zram_reset_device(struct zram *zram)
+ 
+ 	zram->limit_pages = 0;
+ 
+-	if (!init_done(zram)) {
+-		up_write(&zram->init_lock);
+-		return;
+-	}
+-
+ 	set_capacity_and_notify(zram->disk, 0);
+ 	part_stat_set_all(zram->disk->part0, 0);
+ 
+diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
+index 99177835cadec4..b39dee7b93af04 100644
+--- a/drivers/clocksource/hyperv_timer.c
++++ b/drivers/clocksource/hyperv_timer.c
+@@ -27,7 +27,8 @@
+ #include <asm/mshyperv.h>
+ 
+ static struct clock_event_device __percpu *hv_clock_event;
+-static u64 hv_sched_clock_offset __ro_after_init;
++/* Note: offset can hold negative values after hibernation. */
++static u64 hv_sched_clock_offset __read_mostly;
+ 
+ /*
+  * If false, we're using the old mechanism for stimer0 interrupts
+@@ -470,6 +471,17 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
+ 	hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+ }
+ 
++/*
++ * Called during resume from hibernation, from overridden
++ * x86_platform.restore_sched_clock_state routine. This is to adjust offsets
++ * used to calculate time for hv tsc page based sched_clock, to account for
++ * time spent before hibernation.
++ */
++void hv_adj_sched_clock_offset(u64 offset)
++{
++	hv_sched_clock_offset -= offset;
++}
++
+ #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
+ static int hv_cs_enable(struct clocksource *cs)
+ {
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index dff618c708dc68..a0d6e8d7f42c8a 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1295,6 +1295,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ 	struct cxl_region_params *p = &cxlr->params;
+ 	struct cxl_decoder *cxld = cxl_rr->decoder;
+ 	struct cxl_switch_decoder *cxlsd;
++	struct cxl_port *iter = port;
+ 	u16 eig, peig;
+ 	u8 eiw, peiw;
+ 
+@@ -1311,16 +1312,26 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ 
+ 	cxlsd = to_cxl_switch_decoder(&cxld->dev);
+ 	if (cxl_rr->nr_targets_set) {
+-		int i, distance;
++		int i, distance = 1;
++		struct cxl_region_ref *cxl_rr_iter;
+ 
+ 		/*
+-		 * Passthrough decoders impose no distance requirements between
+-		 * peers
++		 * The "distance" between peer downstream ports represents which
++		 * endpoint positions in the region interleave a given port can
++		 * host.
++		 *
++		 * For example, at the root of a hierarchy the distance is
++		 * always 1 as every index targets a different host-bridge. At
++		 * each subsequent switch level those ports map every Nth region
++		 * position where N is the width of the switch == distance.
+ 		 */
+-		if (cxl_rr->nr_targets == 1)
+-			distance = 0;
+-		else
+-			distance = p->nr_targets / cxl_rr->nr_targets;
++		do {
++			cxl_rr_iter = cxl_rr_load(iter, cxlr);
++			distance *= cxl_rr_iter->nr_targets;
++			iter = to_cxl_port(iter->dev.parent);
++		} while (!is_cxl_root(iter));
++		distance *= cxlrd->cxlsd.cxld.interleave_ways;
++
+ 		for (i = 0; i < cxl_rr->nr_targets_set; i++)
+ 			if (ep->dport == cxlsd->target[i]) {
+ 				rc = check_last_peer(cxled, ep, cxl_rr,
+diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
+index 188412d45e0d26..6e553b5752b1dd 100644
+--- a/drivers/cxl/pci.c
++++ b/drivers/cxl/pci.c
+@@ -942,8 +942,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (rc)
+ 		return rc;
+ 
+-	rc = cxl_pci_ras_unmask(pdev);
+-	if (rc)
++	if (cxl_pci_ras_unmask(pdev))
+ 		dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
+ 
+ 	pci_save_state(pdev);
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index 8892bc701a662d..afb8c1c5010735 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -60,7 +60,7 @@ static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+ {
+ }
+ 
+-static void __dma_buf_debugfs_list_del(struct file *file)
++static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+ {
+ }
+ #endif
+diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
+index a3638ccc15f571..5e836e4e5b449a 100644
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -256,15 +256,12 @@ static const struct dma_buf_ops udmabuf_ops = {
+ };
+ 
+ #define SEALS_WANTED (F_SEAL_SHRINK)
+-#define SEALS_DENIED (F_SEAL_WRITE)
++#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
+ 
+ static int check_memfd_seals(struct file *memfd)
+ {
+ 	int seals;
+ 
+-	if (!memfd)
+-		return -EBADFD;
+-
+ 	if (!shmem_file(memfd) && !is_file_hugepages(memfd))
+ 		return -EBADFD;
+ 
+@@ -279,12 +276,10 @@ static int check_memfd_seals(struct file *memfd)
+ 	return 0;
+ }
+ 
+-static int export_udmabuf(struct udmabuf *ubuf,
+-			  struct miscdevice *device,
+-			  u32 flags)
++static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
++				      struct miscdevice *device)
+ {
+ 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+-	struct dma_buf *buf;
+ 
+ 	ubuf->device = device;
+ 	exp_info.ops  = &udmabuf_ops;
+@@ -292,24 +287,72 @@ static int export_udmabuf(struct udmabuf *ubuf,
+ 	exp_info.priv = ubuf;
+ 	exp_info.flags = O_RDWR;
+ 
+-	buf = dma_buf_export(&exp_info);
+-	if (IS_ERR(buf))
+-		return PTR_ERR(buf);
++	return dma_buf_export(&exp_info);
++}
++
++static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
++			       loff_t start, loff_t size)
++{
++	pgoff_t pgoff, pgcnt, upgcnt = ubuf->pagecount;
++	struct folio **folios = NULL;
++	u32 cur_folio, cur_pgcnt;
++	long nr_folios;
++	long ret = 0;
++	loff_t end;
++
++	pgcnt = size >> PAGE_SHIFT;
++	folios = kvmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
++	if (!folios)
++		return -ENOMEM;
++
++	end = start + (pgcnt << PAGE_SHIFT) - 1;
++	nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff);
++	if (nr_folios <= 0) {
++		ret = nr_folios ? nr_folios : -EINVAL;
++		goto end;
++	}
+ 
+-	return dma_buf_fd(buf, flags);
++	cur_pgcnt = 0;
++	for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) {
++		pgoff_t subpgoff = pgoff;
++		size_t fsize = folio_size(folios[cur_folio]);
++
++		ret = add_to_unpin_list(&ubuf->unpin_list, folios[cur_folio]);
++		if (ret < 0)
++			goto end;
++
++		for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
++			ubuf->folios[upgcnt] = folios[cur_folio];
++			ubuf->offsets[upgcnt] = subpgoff;
++			++upgcnt;
++
++			if (++cur_pgcnt >= pgcnt)
++				goto end;
++		}
++
++		/**
++		 * In a given range, only the first subpage of the first folio
++		 * has an offset, that is returned by memfd_pin_folios().
++		 * The first subpages of other folios (in the range) have an
++		 * offset of 0.
++		 */
++		pgoff = 0;
++	}
++end:
++	ubuf->pagecount = upgcnt;
++	kvfree(folios);
++	return ret;
+ }
+ 
+ static long udmabuf_create(struct miscdevice *device,
+ 			   struct udmabuf_create_list *head,
+ 			   struct udmabuf_create_item *list)
+ {
+-	pgoff_t pgoff, pgcnt, pglimit, pgbuf = 0;
+-	long nr_folios, ret = -EINVAL;
+-	struct file *memfd = NULL;
+-	struct folio **folios;
++	pgoff_t pgcnt = 0, pglimit;
+ 	struct udmabuf *ubuf;
+-	u32 i, j, k, flags;
+-	loff_t end;
++	struct dma_buf *dmabuf;
++	long ret = -EINVAL;
++	u32 i, flags;
+ 
+ 	ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ 	if (!ubuf)
+@@ -318,93 +361,76 @@ static long udmabuf_create(struct miscdevice *device,
+ 	INIT_LIST_HEAD(&ubuf->unpin_list);
+ 	pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ 	for (i = 0; i < head->count; i++) {
+-		if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
++		if (!PAGE_ALIGNED(list[i].offset))
+ 			goto err;
+-		if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
++		if (!PAGE_ALIGNED(list[i].size))
+ 			goto err;
+-		ubuf->pagecount += list[i].size >> PAGE_SHIFT;
+-		if (ubuf->pagecount > pglimit)
++
++		pgcnt += list[i].size >> PAGE_SHIFT;
++		if (pgcnt > pglimit)
+ 			goto err;
+ 	}
+ 
+-	if (!ubuf->pagecount)
++	if (!pgcnt)
+ 		goto err;
+ 
+-	ubuf->folios = kvmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
+-				      GFP_KERNEL);
++	ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL);
+ 	if (!ubuf->folios) {
+ 		ret = -ENOMEM;
+ 		goto err;
+ 	}
+-	ubuf->offsets = kvcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
+-				 GFP_KERNEL);
++
++	ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL);
+ 	if (!ubuf->offsets) {
+ 		ret = -ENOMEM;
+ 		goto err;
+ 	}
+ 
+-	pgbuf = 0;
+ 	for (i = 0; i < head->count; i++) {
+-		memfd = fget(list[i].memfd);
+-		ret = check_memfd_seals(memfd);
+-		if (ret < 0)
+-			goto err;
+-
+-		pgcnt = list[i].size >> PAGE_SHIFT;
+-		folios = kvmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
+-		if (!folios) {
+-			ret = -ENOMEM;
+-			goto err;
+-		}
++		struct file *memfd = fget(list[i].memfd);
+ 
+-		end = list[i].offset + (pgcnt << PAGE_SHIFT) - 1;
+-		ret = memfd_pin_folios(memfd, list[i].offset, end,
+-				       folios, pgcnt, &pgoff);
+-		if (ret <= 0) {
+-			kvfree(folios);
+-			if (!ret)
+-				ret = -EINVAL;
++		if (!memfd) {
++			ret = -EBADFD;
+ 			goto err;
+ 		}
+ 
+-		nr_folios = ret;
+-		pgoff >>= PAGE_SHIFT;
+-		for (j = 0, k = 0; j < pgcnt; j++) {
+-			ubuf->folios[pgbuf] = folios[k];
+-			ubuf->offsets[pgbuf] = pgoff << PAGE_SHIFT;
+-
+-			if (j == 0 || ubuf->folios[pgbuf-1] != folios[k]) {
+-				ret = add_to_unpin_list(&ubuf->unpin_list,
+-							folios[k]);
+-				if (ret < 0) {
+-					kfree(folios);
+-					goto err;
+-				}
+-			}
+-
+-			pgbuf++;
+-			if (++pgoff == folio_nr_pages(folios[k])) {
+-				pgoff = 0;
+-				if (++k == nr_folios)
+-					break;
+-			}
+-		}
++		/*
++		 * Take the inode lock to protect against concurrent
++		 * memfd_add_seals(), which takes this lock in write mode.
++		 */
++		inode_lock_shared(file_inode(memfd));
++		ret = check_memfd_seals(memfd);
++		if (ret)
++			goto out_unlock;
+ 
+-		kvfree(folios);
++		ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
++					 list[i].size);
++out_unlock:
++		inode_unlock_shared(file_inode(memfd));
+ 		fput(memfd);
+-		memfd = NULL;
++		if (ret)
++			goto err;
+ 	}
+ 
+ 	flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
+-	ret = export_udmabuf(ubuf, device, flags);
+-	if (ret < 0)
++	dmabuf = export_udmabuf(ubuf, device);
++	if (IS_ERR(dmabuf)) {
++		ret = PTR_ERR(dmabuf);
+ 		goto err;
++	}
++	/*
++	 * Ownership of ubuf is held by the dmabuf from here.
++	 * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
++	 * dmabuf and the ubuf (through udmabuf_ops.release).
++	 */
++
++	ret = dma_buf_fd(dmabuf, flags);
++	if (ret < 0)
++		dma_buf_put(dmabuf);
+ 
+ 	return ret;
+ 
+ err:
+-	if (memfd)
+-		fput(memfd);
+ 	unpin_all_folios(&ubuf->unpin_list);
+ 	kvfree(ubuf->offsets);
+ 	kvfree(ubuf->folios);
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index ddfbdb66b794d7..5d356b7c45897c 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -3362,36 +3362,24 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt)
+ 
+ static bool umc_ecc_enabled(struct amd64_pvt *pvt)
+ {
+-	u8 umc_en_mask = 0, ecc_en_mask = 0;
+-	u16 nid = pvt->mc_node_id;
+ 	struct amd64_umc *umc;
+-	u8 ecc_en = 0, i;
++	bool ecc_en = false;
++	int i;
+ 
++	/* Check whether at least one UMC is enabled: */
+ 	for_each_umc(i) {
+ 		umc = &pvt->umc[i];
+ 
+-		/* Only check enabled UMCs. */
+-		if (!(umc->sdp_ctrl & UMC_SDP_INIT))
+-			continue;
+-
+-		umc_en_mask |= BIT(i);
+-
+-		if (umc->umc_cap_hi & UMC_ECC_ENABLED)
+-			ecc_en_mask |= BIT(i);
++		if (umc->sdp_ctrl & UMC_SDP_INIT &&
++		    umc->umc_cap_hi & UMC_ECC_ENABLED) {
++			ecc_en = true;
++			break;
++		}
+ 	}
+ 
+-	/* Check whether at least one UMC is enabled: */
+-	if (umc_en_mask)
+-		ecc_en = umc_en_mask == ecc_en_mask;
+-	else
+-		edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
+-
+-	edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
++	edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, (ecc_en ? "enabled" : "disabled"));
+ 
+-	if (!ecc_en)
+-		return false;
+-	else
+-		return true;
++	return ecc_en;
+ }
+ 
+ static inline void
+diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
+index eb17d03b66fec9..dfda5ffc14db72 100644
+--- a/drivers/firmware/arm_ffa/bus.c
++++ b/drivers/firmware/arm_ffa/bus.c
+@@ -187,13 +187,18 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
+ 	return valid;
+ }
+ 
+-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+-				       const struct ffa_ops *ops)
++struct ffa_device *
++ffa_device_register(const struct ffa_partition_info *part_info,
++		    const struct ffa_ops *ops)
+ {
+ 	int id, ret;
++	uuid_t uuid;
+ 	struct device *dev;
+ 	struct ffa_device *ffa_dev;
+ 
++	if (!part_info)
++		return NULL;
++
+ 	id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
+ 	if (id < 0)
+ 		return NULL;
+@@ -210,9 +215,11 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ 	dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+ 
+ 	ffa_dev->id = id;
+-	ffa_dev->vm_id = vm_id;
++	ffa_dev->vm_id = part_info->id;
++	ffa_dev->properties = part_info->properties;
+ 	ffa_dev->ops = ops;
+-	uuid_copy(&ffa_dev->uuid, uuid);
++	import_uuid(&uuid, (u8 *)part_info->uuid);
++	uuid_copy(&ffa_dev->uuid, &uuid);
+ 
+ 	ret = device_register(&ffa_dev->dev);
+ 	if (ret) {
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index b14cbdae94e82b..2c2ec3c35f1561 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -1387,7 +1387,6 @@ static struct notifier_block ffa_bus_nb = {
+ static int ffa_setup_partitions(void)
+ {
+ 	int count, idx, ret;
+-	uuid_t uuid;
+ 	struct ffa_device *ffa_dev;
+ 	struct ffa_dev_part_info *info;
+ 	struct ffa_partition_info *pbuf, *tpbuf;
+@@ -1406,23 +1405,19 @@ static int ffa_setup_partitions(void)
+ 
+ 	xa_init(&drv_info->partition_info);
+ 	for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
+-		import_uuid(&uuid, (u8 *)tpbuf->uuid);
+-
+ 		/* Note that if the UUID will be uuid_null, that will require
+ 		 * ffa_bus_notifier() to find the UUID of this partition id
+ 		 * with help of ffa_device_match_uuid(). FF-A v1.1 and above
+ 		 * provides UUID here for each partition as part of the
+ 		 * discovery API and the same is passed.
+ 		 */
+-		ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
++		ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops);
+ 		if (!ffa_dev) {
+ 			pr_err("%s: failed to register partition ID 0x%x\n",
+ 			       __func__, tpbuf->id);
+ 			continue;
+ 		}
+ 
+-		ffa_dev->properties = tpbuf->properties;
+-
+ 		if (drv_info->version > FFA_VERSION_1_0 &&
+ 		    !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+ 			ffa_mode_32bit_set(ffa_dev);
+diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+index 2883ed24a84d65..a01bf5e47301d2 100644
+--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
++++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+@@ -15,6 +15,7 @@ config IMX_SCMI_BBM_EXT
+ config IMX_SCMI_MISC_EXT
+ 	tristate "i.MX SCMI MISC EXTENSION"
+ 	depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
++	depends on IMX_SCMI_MISC_DRV
+ 	default y if ARCH_MXC
+ 	help
+ 	  This enables i.MX System MISC control logic such as gpio expander
+diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
+index 477d3f32d99a6b..907cd149c40a8b 100644
+--- a/drivers/firmware/imx/Kconfig
++++ b/drivers/firmware/imx/Kconfig
+@@ -25,7 +25,6 @@ config IMX_SCU
+ 
+ config IMX_SCMI_MISC_DRV
+ 	tristate "IMX SCMI MISC Protocol driver"
+-	depends on IMX_SCMI_MISC_EXT || COMPILE_TEST
+ 	default y if ARCH_MXC
+ 	help
+ 	  The System Controller Management Interface firmware (SCMI FW) is
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+index 5ac59b62020cf2..18b3b1aaa1d3b7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+@@ -345,11 +345,10 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ 	coredump->skip_vram_check = skip_vram_check;
+ 	coredump->reset_vram_lost = vram_lost;
+ 
+-	if (job && job->vm) {
+-		struct amdgpu_vm *vm = job->vm;
++	if (job && job->pasid) {
+ 		struct amdgpu_task_info *ti;
+ 
+-		ti = amdgpu_vm_get_task_info_vm(vm);
++		ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+ 		if (ti) {
+ 			coredump->reset_task_info = *ti;
+ 			amdgpu_vm_put_task_info(ti);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 16f2605ac50b99..1ce20a19be8ba9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -253,7 +253,6 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ 
+ void amdgpu_job_free_resources(struct amdgpu_job *job)
+ {
+-	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
+ 	struct dma_fence *f;
+ 	unsigned i;
+ 
+@@ -266,7 +265,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
+ 		f = NULL;
+ 
+ 	for (i = 0; i < job->num_ibs; ++i)
+-		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
++		amdgpu_ib_free(NULL, &job->ibs[i], f);
+ }
+ 
+ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 8d2562d0f143c7..73e02141a6e215 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1260,10 +1260,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
+ 	 * next command submission.
+ 	 */
+ 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
+-		uint32_t mem_type = bo->tbo.resource->mem_type;
+-
+-		if (!(bo->preferred_domains &
+-		      amdgpu_mem_type_to_domain(mem_type)))
++		if (bo->tbo.resource &&
++		    !(bo->preferred_domains &
++		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
+ 			amdgpu_vm_bo_evicted(&bo_va->base);
+ 		else
+ 			amdgpu_vm_bo_idle(&bo_va->base);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+index 47b47d21f46447..6c19626ec59e9d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+@@ -4105,7 +4105,7 @@ static int gfx_v12_0_set_clockgating_state(void *handle,
+ 	if (amdgpu_sriov_vf(adev))
+ 		return 0;
+ 
+-	switch (adev->ip_versions[GC_HWIP][0]) {
++	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ 	case IP_VERSION(12, 0, 0):
+ 	case IP_VERSION(12, 0, 1):
+ 		gfx_v12_0_update_gfx_clock_gating(adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+index 0fbc3be81f140f..f2ab5001b49249 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+@@ -108,7 +108,7 @@ mmhub_v4_1_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ 	dev_err(adev->dev,
+ 		"MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n",
+ 		status);
+-	switch (adev->ip_versions[MMHUB_HWIP][0]) {
++	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
+ 	case IP_VERSION(4, 1, 0):
+ 		mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw];
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+index b1b57dcc5a7370..d1032e9992b49c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+@@ -271,8 +271,19 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
+ 	.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
+ };
+ 
++#define regRCC_DEV0_EPF6_STRAP4                                                                         0xd304
++#define regRCC_DEV0_EPF6_STRAP4_BASE_IDX                                                                5
++
+ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
+ {
++	uint32_t data;
++
++	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
++	case IP_VERSION(2, 5, 0):
++		data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4) & ~BIT(23);
++		WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4, data);
++		break;
++	}
+ }
+ 
+ #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+index 814ab59fdd4a3a..41421da63a0846 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+@@ -275,7 +275,7 @@ static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
+ 	if (def != data)
+ 		WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
+ 
+-	switch (adev->ip_versions[NBIO_HWIP][0]) {
++	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ 	case IP_VERSION(7, 11, 0):
+ 	case IP_VERSION(7, 11, 1):
+ 	case IP_VERSION(7, 11, 2):
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+index 1ac730328516ff..3fb6d2aa7e3b39 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+@@ -247,7 +247,7 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
+ 	if (def != data)
+ 		WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
+ 
+-	switch (adev->ip_versions[NBIO_HWIP][0]) {
++	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ 	case IP_VERSION(7, 7, 0):
+ 		data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
+ 		WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+index b22fb7eafcd3f2..9ec53431f2c32d 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+@@ -2108,7 +2108,7 @@ static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
+ {
+ 	struct amdgpu_device *adev = smu->adev;
+ 
+-	if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2))
++	if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2))
+ 		return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
+ 										   FEATURE_PWR_GFX, NULL);
+ 	else
+diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
+index 48b2df120086c9..90fe07a89260e2 100644
+--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
++++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
+@@ -1896,8 +1896,8 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
+  *
+  * Creates a DP tunnel manager for @dev.
+  *
+- * Returns a pointer to the tunnel manager if created successfully or NULL in
+- * case of an error.
++ * Returns a pointer to the tunnel manager if created successfully or error
++ * pointer in case of failure.
+  */
+ struct drm_dp_tunnel_mgr *
+ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+@@ -1907,7 +1907,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+ 
+ 	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ 	if (!mgr)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	mgr->dev = dev;
+ 	init_waitqueue_head(&mgr->bw_req_queue);
+@@ -1916,7 +1916,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+ 	if (!mgr->groups) {
+ 		kfree(mgr);
+ 
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+ #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
+@@ -1927,7 +1927,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+ 		if (!init_group(mgr, &mgr->groups[i])) {
+ 			destroy_mgr(mgr);
+ 
+-			return NULL;
++			return ERR_PTR(-ENOMEM);
+ 		}
+ 
+ 		mgr->group_count++;
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index 6ba167a3346134..71573b85d9242e 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -1287,14 +1287,11 @@ EXPORT_SYMBOL(drm_mode_set_name);
+  */
+ int drm_mode_vrefresh(const struct drm_display_mode *mode)
+ {
+-	unsigned int num, den;
++	unsigned int num = 1, den = 1;
+ 
+ 	if (mode->htotal == 0 || mode->vtotal == 0)
+ 		return 0;
+ 
+-	num = mode->clock;
+-	den = mode->htotal * mode->vtotal;
+-
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ 		num *= 2;
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+@@ -1302,6 +1299,12 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
+ 	if (mode->vscan > 1)
+ 		den *= mode->vscan;
+ 
++	if (check_mul_overflow(mode->clock, num, &num))
++		return 0;
++
++	if (check_mul_overflow(mode->htotal * mode->vtotal, den, &den))
++		return 0;
++
+ 	return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
+ }
+ EXPORT_SYMBOL(drm_mode_vrefresh);
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+index ba55c059063dbb..fe1f85e5dda330 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+@@ -343,6 +343,11 @@ struct intel_engine_guc_stats {
+ 	 * @start_gt_clk: GT clock time of last idle to active transition.
+ 	 */
+ 	u64 start_gt_clk;
++
++	/**
++	 * @total: The last value of total returned
++	 */
++	u64 total;
+ };
+ 
+ union intel_engine_tlb_inv_reg {
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index ed979847187f53..ee12ee0ed41871 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -1243,6 +1243,21 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
+ 	} while (++i < 6);
+ }
+ 
++static void __set_engine_usage_record(struct intel_engine_cs *engine,
++				      u32 last_in, u32 id, u32 total)
++{
++	struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
++
++#define record_write(map_, field_, val_) \
++	iosys_map_wr_field(map_, 0, struct guc_engine_usage_record, field_, val_)
++
++	record_write(&rec_map, last_switch_in_stamp, last_in);
++	record_write(&rec_map, current_context_index, id);
++	record_write(&rec_map, total_runtime, total);
++
++#undef record_write
++}
++
+ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+ {
+ 	struct intel_engine_guc_stats *stats = &engine->stats.guc;
+@@ -1363,9 +1378,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
+ 		total += intel_gt_clock_interval_to_ns(gt, clk);
+ 	}
+ 
++	if (total > stats->total)
++		stats->total = total;
++
+ 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+ 
+-	return ns_to_ktime(total);
++	return ns_to_ktime(stats->total);
+ }
+ 
+ static void guc_enable_busyness_worker(struct intel_guc *guc)
+@@ -1431,8 +1449,21 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
+ 
+ 	guc_update_pm_timestamp(guc, &unused);
+ 	for_each_engine(engine, gt, id) {
++		struct intel_engine_guc_stats *stats = &engine->stats.guc;
++
+ 		guc_update_engine_gt_clks(engine);
+-		engine->stats.guc.prev_total = 0;
++
++		/*
++		 * If resetting a running context, accumulate the active
++		 * time as well since there will be no context switch.
++		 */
++		if (stats->running) {
++			u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
++
++			stats->total_gt_clks += clk;
++		}
++		stats->prev_total = 0;
++		stats->running = 0;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+@@ -1543,6 +1574,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
+ 
+ static int guc_action_enable_usage_stats(struct intel_guc *guc)
+ {
++	struct intel_gt *gt = guc_to_gt(guc);
++	struct intel_engine_cs *engine;
++	enum intel_engine_id id;
+ 	u32 offset = intel_guc_engine_usage_offset(guc);
+ 	u32 action[] = {
+ 		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
+@@ -1550,6 +1584,9 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
+ 		0,
+ 	};
+ 
++	for_each_engine(engine, gt, id)
++		__set_engine_usage_record(engine, 0, 0xffffffff, 0);
++
+ 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+ }
+ 
+diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
+index 8b48bba181316c..3644a7544b935d 100644
+--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
++++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
+@@ -565,6 +565,8 @@ static int hx83102_get_modes(struct drm_panel *panel,
+ 	struct drm_display_mode *mode;
+ 
+ 	mode = drm_mode_duplicate(connector->dev, m);
++	if (!mode)
++		return -ENOMEM;
+ 
+ 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ 	drm_mode_set_name(mode);
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+index b036208f93560e..08b22b592ab045 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+@@ -481,9 +481,9 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
+ 			return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
+ 
+ 		nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
+-		if (!nt->dsi[1]) {
++		if (IS_ERR(nt->dsi[1])) {
+ 			dev_err(dev, "Cannot get secondary DSI node\n");
+-			return -ENODEV;
++			return PTR_ERR(nt->dsi[1]);
+ 		}
+ 		num_dsis++;
+ 	}
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+index eef03d04e0cd2d..1f72ef7ca74c93 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+@@ -1177,6 +1177,7 @@ static int st7701_probe(struct device *dev, int connector_type)
+ 		return dev_err_probe(dev, ret, "Failed to get orientation\n");
+ 
+ 	drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
++	st7701->panel.prepare_prev_first = true;
+ 
+ 	/**
+ 	 * Once sleep out has been issued, ST7701 IC required to wait 120ms
+diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+index 169c629746c714..17349825543fe6 100644
+--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
++++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+@@ -325,7 +325,7 @@ static void r63353_panel_shutdown(struct mipi_dsi_device *dsi)
+ {
+ 	struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
+ 
+-	r63353_panel_unprepare(&rpanel->base);
++	drm_panel_unprepare(&rpanel->base);
+ }
+ 
+ static const struct r63353_desc sharp_ls068b3sx02_data = {
+diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
+index d35b60c0611486..77017d9518267c 100644
+--- a/drivers/hv/hv_kvp.c
++++ b/drivers/hv/hv_kvp.c
+@@ -767,6 +767,12 @@ hv_kvp_init(struct hv_util_service *srv)
+ 	 */
+ 	kvp_transaction.state = HVUTIL_DEVICE_INIT;
+ 
++	return 0;
++}
++
++int
++hv_kvp_init_transport(void)
++{
+ 	hvt = hvutil_transport_init(kvp_devname, CN_KVP_IDX, CN_KVP_VAL,
+ 				    kvp_on_msg, kvp_on_reset);
+ 	if (!hvt)
+diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
+index 0d2184be169125..397f4c8fa46c31 100644
+--- a/drivers/hv/hv_snapshot.c
++++ b/drivers/hv/hv_snapshot.c
+@@ -388,6 +388,12 @@ hv_vss_init(struct hv_util_service *srv)
+ 	 */
+ 	vss_transaction.state = HVUTIL_DEVICE_INIT;
+ 
++	return 0;
++}
++
++int
++hv_vss_init_transport(void)
++{
+ 	hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
+ 				    vss_on_msg, vss_on_reset);
+ 	if (!hvt) {
+diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
+index c4f525325790fa..3d9360fd909acc 100644
+--- a/drivers/hv/hv_util.c
++++ b/drivers/hv/hv_util.c
+@@ -141,6 +141,7 @@ static struct hv_util_service util_heartbeat = {
+ static struct hv_util_service util_kvp = {
+ 	.util_cb = hv_kvp_onchannelcallback,
+ 	.util_init = hv_kvp_init,
++	.util_init_transport = hv_kvp_init_transport,
+ 	.util_pre_suspend = hv_kvp_pre_suspend,
+ 	.util_pre_resume = hv_kvp_pre_resume,
+ 	.util_deinit = hv_kvp_deinit,
+@@ -149,6 +150,7 @@ static struct hv_util_service util_kvp = {
+ static struct hv_util_service util_vss = {
+ 	.util_cb = hv_vss_onchannelcallback,
+ 	.util_init = hv_vss_init,
++	.util_init_transport = hv_vss_init_transport,
+ 	.util_pre_suspend = hv_vss_pre_suspend,
+ 	.util_pre_resume = hv_vss_pre_resume,
+ 	.util_deinit = hv_vss_deinit,
+@@ -613,6 +615,13 @@ static int util_probe(struct hv_device *dev,
+ 	if (ret)
+ 		goto error;
+ 
++	if (srv->util_init_transport) {
++		ret = srv->util_init_transport();
++		if (ret) {
++			vmbus_close(dev->channel);
++			goto error;
++		}
++	}
+ 	return 0;
+ 
+ error:
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index d2856023d53c9a..52cb744b4d7fde 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -370,12 +370,14 @@ void vmbus_on_event(unsigned long data);
+ void vmbus_on_msg_dpc(unsigned long data);
+ 
+ int hv_kvp_init(struct hv_util_service *srv);
++int hv_kvp_init_transport(void);
+ void hv_kvp_deinit(void);
+ int hv_kvp_pre_suspend(void);
+ int hv_kvp_pre_resume(void);
+ void hv_kvp_onchannelcallback(void *context);
+ 
+ int hv_vss_init(struct hv_util_service *srv);
++int hv_vss_init_transport(void);
+ void hv_vss_deinit(void);
+ int hv_vss_pre_suspend(void);
+ int hv_vss_pre_resume(void);
+diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
+index 926d28cd3fab55..1c2cb12071b808 100644
+--- a/drivers/hwmon/tmp513.c
++++ b/drivers/hwmon/tmp513.c
+@@ -182,7 +182,7 @@ struct tmp51x_data {
+ 	struct regmap *regmap;
+ };
+ 
+-// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1
++// Set the shift based on the gain: 8 -> 1, 4 -> 2, 2 -> 3, 1 -> 4
+ static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
+ {
+ 	return 5 - ffs(data->pga_gain);
+@@ -204,7 +204,9 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ 		 * 2's complement number shifted by one to four depending
+ 		 * on the pga gain setting. 1lsb = 10uV
+ 		 */
+-		*val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
++		*val = sign_extend32(regval,
++				     reg == TMP51X_SHUNT_CURRENT_RESULT ?
++				     16 - tmp51x_get_pga_shift(data) : 15);
+ 		*val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
+ 		break;
+ 	case TMP51X_BUS_VOLTAGE_RESULT:
+@@ -220,7 +222,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ 		break;
+ 	case TMP51X_BUS_CURRENT_RESULT:
+ 		// Current = (ShuntVoltage * CalibrationRegister) / 4096
+-		*val = sign_extend32(regval, 16) * data->curr_lsb_ua;
++		*val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
+ 		*val = DIV_ROUND_CLOSEST(*val, MILLI);
+ 		break;
+ 	case TMP51X_LOCAL_TEMP_RESULT:
+@@ -232,7 +234,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ 	case TMP51X_REMOTE_TEMP_LIMIT_2:
+ 	case TMP513_REMOTE_TEMP_LIMIT_3:
+ 		// 1lsb = 0.0625 degrees centigrade
+-		*val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT;
++		*val = sign_extend32(regval, 15) >> TMP51X_TEMP_SHIFT;
+ 		*val = DIV_ROUND_CLOSEST(*val * 625, 10);
+ 		break;
+ 	case TMP51X_N_FACTOR_AND_HYST_1:
+diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
+index 1dafadda73af3a..135300f3b53428 100644
+--- a/drivers/i2c/busses/i2c-pnx.c
++++ b/drivers/i2c/busses/i2c-pnx.c
+@@ -95,7 +95,7 @@ enum {
+ 
+ static inline int wait_timeout(struct i2c_pnx_algo_data *data)
+ {
+-	long timeout = data->timeout;
++	long timeout = jiffies_to_msecs(data->timeout);
+ 	while (timeout > 0 &&
+ 			(ioread32(I2C_REG_STS(data)) & mstatus_active)) {
+ 		mdelay(1);
+@@ -106,7 +106,7 @@ static inline int wait_timeout(struct i2c_pnx_algo_data *data)
+ 
+ static inline int wait_reset(struct i2c_pnx_algo_data *data)
+ {
+-	long timeout = data->timeout;
++	long timeout = jiffies_to_msecs(data->timeout);
+ 	while (timeout > 0 &&
+ 			(ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) {
+ 		mdelay(1);
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index c7f3a4c0247023..2c982199782f9b 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -352,7 +352,7 @@ static int riic_init_hw(struct riic_dev *riic)
+ 		if (brl <= (0x1F + 3))
+ 			break;
+ 
+-		total_ticks /= 2;
++		total_ticks = DIV_ROUND_UP(total_ticks, 2);
+ 		rate /= 2;
+ 	}
+ 
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 8b6159f4cdafa4..b0bfb61539c202 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -161,7 +161,22 @@ static bool cpus_have_group0 __ro_after_init;
+ 
+ static void __init gic_prio_init(void)
+ {
+-	cpus_have_security_disabled = gic_dist_security_disabled();
++	bool ds;
++
++	ds = gic_dist_security_disabled();
++	if (!ds) {
++		u32 val;
++
++		val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
++		val |= GICD_CTLR_DS;
++		writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
++
++		ds = gic_dist_security_disabled();
++		if (ds)
++			pr_warn("Broken GIC integration, security disabled");
++	}
++
++	cpus_have_security_disabled = ds;
+ 	cpus_have_group0 = gic_has_group0();
+ 
+ 	/*
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 813bc20cfb5a6c..6e62415de2e5ec 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2924,6 +2924,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ 	msdc_gate_clock(host);
+ 	platform_set_drvdata(pdev, NULL);
+ release_mem:
++	device_init_wakeup(&pdev->dev, false);
+ 	if (host->dma.gpd)
+ 		dma_free_coherent(&pdev->dev,
+ 			2 * sizeof(struct mt_gpdma_desc),
+@@ -2957,6 +2958,7 @@ static void msdc_drv_remove(struct platform_device *pdev)
+ 			host->dma.gpd, host->dma.gpd_addr);
+ 	dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ 			  host->dma.bd, host->dma.bd_addr);
++	device_init_wakeup(&pdev->dev, false);
+ }
+ 
+ static void msdc_save_reg(struct msdc_host *host)
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index 1ad0a6b3a2eb77..7b6b82bec8556c 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -1525,7 +1525,6 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
+ 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ 		  SDHCI_QUIRK_NO_HISPD_BIT |
+-		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ 		   SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 533bcb77c9f934..97cd8bbf2e32a9 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1220,20 +1220,32 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
+ static int m_can_interrupt_handler(struct m_can_classdev *cdev)
+ {
+ 	struct net_device *dev = cdev->net;
+-	u32 ir;
++	u32 ir = 0, ir_read;
+ 	int ret;
+ 
+ 	if (pm_runtime_suspended(cdev->dev))
+ 		return IRQ_NONE;
+ 
+-	ir = m_can_read(cdev, M_CAN_IR);
++	/* The m_can controller signals its interrupt status as a level, but
++	 * depending in the integration the CPU may interpret the signal as
++	 * edge-triggered (for example with m_can_pci). For these
++	 * edge-triggered integrations, we must observe that IR is 0 at least
++	 * once to be sure that the next interrupt will generate an edge.
++	 */
++	while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
++		ir |= ir_read;
++
++		/* ACK all irqs */
++		m_can_write(cdev, M_CAN_IR, ir);
++
++		if (!cdev->irq_edge_triggered)
++			break;
++	}
++
+ 	m_can_coalescing_update(cdev, ir);
+ 	if (!ir)
+ 		return IRQ_NONE;
+ 
+-	/* ACK all irqs */
+-	m_can_write(cdev, M_CAN_IR, ir);
+-
+ 	if (cdev->ops->clear_interrupts)
+ 		cdev->ops->clear_interrupts(cdev);
+ 
+@@ -1695,6 +1707,14 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
+ 		return -EINVAL;
+ 	}
+ 
++	/* Write the INIT bit, in case no hardware reset has happened before
++	 * the probe (for example, it was observed that the Intel Elkhart Lake
++	 * SoCs do not properly reset the CAN controllers on reboot)
++	 */
++	err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
++	if (err)
++		return err;
++
+ 	if (!cdev->is_peripheral)
+ 		netif_napi_add(dev, &cdev->napi, m_can_poll);
+ 
+@@ -1746,11 +1766,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	/* Forcing standby mode should be redundant, as the chip should be in
+-	 * standby after a reset. Write the INIT bit anyways, should the chip
+-	 * be configured by previous stage.
+-	 */
+-	return m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
++	return 0;
+ }
+ 
+ static void m_can_stop(struct net_device *dev)
+diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
+index 92b2bd8628e6b3..ef39e8e527ab67 100644
+--- a/drivers/net/can/m_can/m_can.h
++++ b/drivers/net/can/m_can/m_can.h
+@@ -99,6 +99,7 @@ struct m_can_classdev {
+ 	int pm_clock_support;
+ 	int pm_wake_source;
+ 	int is_peripheral;
++	bool irq_edge_triggered;
+ 
+ 	// Cached M_CAN_IE register content
+ 	u32 active_interrupts;
+diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
+index d72fe771dfc7aa..9ad7419f88f830 100644
+--- a/drivers/net/can/m_can/m_can_pci.c
++++ b/drivers/net/can/m_can/m_can_pci.c
+@@ -127,6 +127,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ 	mcan_class->pm_clock_support = 1;
+ 	mcan_class->pm_wake_source = 0;
+ 	mcan_class->can.clock.freq = id->driver_data;
++	mcan_class->irq_edge_triggered = true;
+ 	mcan_class->ops = &m_can_pci_ops;
+ 
+ 	pci_set_drvdata(pci, mcan_class);
+diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
+index 77425c7a32dbf8..78f7862ca00669 100644
+--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
++++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
+@@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac)
+ static int bgmac_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *np = pdev->dev.of_node;
++	struct device_node *phy_node;
+ 	struct bgmac *bgmac;
+ 	struct resource *regs;
+ 	int ret;
+@@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
+ 	bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
+ 	bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
+ 	bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
+-	if (of_parse_phandle(np, "phy-handle", 0)) {
++	phy_node = of_parse_phandle(np, "phy-handle", 0);
++	if (phy_node) {
++		of_node_put(phy_node);
+ 		bgmac->phy_connect = platform_phy_connect;
+ 	} else {
+ 		bgmac->phy_connect = bgmac_phy_connect_direct;
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+index 455a54708be440..a83e7d3c2485bd 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+@@ -346,8 +346,9 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
+ 	 * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
+ 	 * through the regular cpl_pass_accept_req processing in TOM.
+ 	 */
+-	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
+-			- pktshift, GFP_ATOMIC);
++	skb = alloc_skb(size_add(gl->tot_len,
++				 sizeof(struct cpl_pass_accept_req)) -
++			pktshift, GFP_ATOMIC);
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 	__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
+index 890f213da8d180..ae1f523d6841b5 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
+@@ -172,6 +172,7 @@ static int create_txqs(struct hinic_dev *nic_dev)
+ 	hinic_sq_dbgfs_uninit(nic_dev);
+ 
+ 	devm_kfree(&netdev->dev, nic_dev->txqs);
++	nic_dev->txqs = NULL;
+ 	return err;
+ }
+ 
+@@ -268,6 +269,7 @@ static int create_rxqs(struct hinic_dev *nic_dev)
+ 	hinic_rq_dbgfs_uninit(nic_dev);
+ 
+ 	devm_kfree(&netdev->dev, nic_dev->rxqs);
++	nic_dev->rxqs = NULL;
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 3d72aa7b130503..ef93df52088710 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1432,7 +1432,7 @@ void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
+ 
+ 	memset(ifh, 0, OCELOT_TAG_LEN);
+ 	ocelot_ifh_set_bypass(ifh, 1);
+-	ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
++	ocelot_ifh_set_src(ifh, ocelot->num_phys_ports);
+ 	ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+ 	ocelot_ifh_set_qos_class(ifh, qos_class);
+ 	ocelot_ifh_set_tag_type(ifh, tag_type);
+diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
+index f9c0dcd965c2e7..db200e4ec284d7 100644
+--- a/drivers/net/ethernet/oa_tc6.c
++++ b/drivers/net/ethernet/oa_tc6.c
+@@ -113,6 +113,7 @@ struct oa_tc6 {
+ 	struct mii_bus *mdiobus;
+ 	struct spi_device *spi;
+ 	struct mutex spi_ctrl_lock; /* Protects spi control transfer */
++	spinlock_t tx_skb_lock; /* Protects tx skb handling */
+ 	void *spi_ctrl_tx_buf;
+ 	void *spi_ctrl_rx_buf;
+ 	void *spi_data_tx_buf;
+@@ -1004,8 +1005,10 @@ static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
+ 	for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
+ 	     used_tx_credits++) {
+ 		if (!tc6->ongoing_tx_skb) {
++			spin_lock_bh(&tc6->tx_skb_lock);
+ 			tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
+ 			tc6->waiting_tx_skb = NULL;
++			spin_unlock_bh(&tc6->tx_skb_lock);
+ 		}
+ 		if (!tc6->ongoing_tx_skb)
+ 			break;
+@@ -1111,8 +1114,9 @@ static int oa_tc6_spi_thread_handler(void *data)
+ 		/* This kthread will be waken up if there is a tx skb or mac-phy
+ 		 * interrupt to perform spi transfer with tx chunks.
+ 		 */
+-		wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
+-					 tc6->int_flag ||
++		wait_event_interruptible(tc6->spi_wq, tc6->int_flag ||
++					 (tc6->waiting_tx_skb &&
++					 tc6->tx_credits) ||
+ 					 kthread_should_stop());
+ 
+ 		if (kthread_should_stop())
+@@ -1209,7 +1213,9 @@ netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
++	spin_lock_bh(&tc6->tx_skb_lock);
+ 	tc6->waiting_tx_skb = skb;
++	spin_unlock_bh(&tc6->tx_skb_lock);
+ 
+ 	/* Wake spi kthread to perform spi transfer */
+ 	wake_up_interruptible(&tc6->spi_wq);
+@@ -1239,6 +1245,7 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
+ 	tc6->netdev = netdev;
+ 	SET_NETDEV_DEV(netdev, &spi->dev);
+ 	mutex_init(&tc6->spi_ctrl_lock);
++	spin_lock_init(&tc6->tx_skb_lock);
+ 
+ 	/* Set the SPI controller to pump at realtime priority */
+ 	tc6->spi->rt = true;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+index 9e42d599840ded..57edcde9e6f8c6 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+@@ -277,7 +277,10 @@ void ionic_dev_teardown(struct ionic *ionic)
+ 	idev->phy_cmb_pages = 0;
+ 	idev->cmb_npages = 0;
+ 
+-	destroy_workqueue(ionic->wq);
++	if (ionic->wq) {
++		destroy_workqueue(ionic->wq);
++		ionic->wq = NULL;
++	}
+ 	mutex_destroy(&idev->cmb_inuse_lock);
+ }
+ 
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+index dda22fa4448cff..9b7f78b6cdb1e3 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+@@ -961,8 +961,8 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
+ 	len = min_t(u32, sizeof(xcvr->sprom), ee->len);
+ 
+ 	do {
+-		memcpy(data, xcvr->sprom, len);
+-		memcpy(tbuf, xcvr->sprom, len);
++		memcpy(data, &xcvr->sprom[ee->offset], len);
++		memcpy(tbuf, &xcvr->sprom[ee->offset], len);
+ 
+ 		/* Let's make sure we got a consistent copy */
+ 		if (!memcmp(data, tbuf, len))
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 40496587b2b318..3d3f936779f7d9 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -3869,8 +3869,8 @@ int ionic_lif_register(struct ionic_lif *lif)
+ 	/* only register LIF0 for now */
+ 	err = register_netdev(lif->netdev);
+ 	if (err) {
+-		dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
+-		ionic_lif_unregister_phc(lif);
++		dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err);
++		ionic_lif_unregister(lif);
+ 		return err;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 09117110e3dd2a..f86fcecb91a8bd 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -547,7 +547,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
+ 	desc = &gq->ts_ring[gq->ring_size];
+ 	desc->desc.die_dt = DT_LINKFIX;
+ 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
+-	INIT_LIST_HEAD(&priv->gwca.ts_info_list);
+ 
+ 	return 0;
+ }
+@@ -1003,9 +1002,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
+ static void rswitch_ts(struct rswitch_private *priv)
+ {
+ 	struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+-	struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ 	struct skb_shared_hwtstamps shhwtstamps;
+ 	struct rswitch_ts_desc *desc;
++	struct rswitch_device *rdev;
++	struct sk_buff *ts_skb;
+ 	struct timespec64 ts;
+ 	unsigned int num;
+ 	u32 tag, port;
+@@ -1015,23 +1015,28 @@ static void rswitch_ts(struct rswitch_private *priv)
+ 		dma_rmb();
+ 
+ 		port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
+-		tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+-
+-		list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
+-			if (!(ts_info->port == port && ts_info->tag == tag))
+-				continue;
+-
+-			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+-			ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+-			ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+-			shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+-			skb_tstamp_tx(ts_info->skb, &shhwtstamps);
+-			dev_consume_skb_irq(ts_info->skb);
+-			list_del(&ts_info->list);
+-			kfree(ts_info);
+-			break;
+-		}
++		if (unlikely(port >= RSWITCH_NUM_PORTS))
++			goto next;
++		rdev = priv->rdev[port];
+ 
++		tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
++		if (unlikely(tag >= TS_TAGS_PER_PORT))
++			goto next;
++		ts_skb = xchg(&rdev->ts_skb[tag], NULL);
++		smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
++		clear_bit(tag, rdev->ts_skb_used);
++
++		if (unlikely(!ts_skb))
++			goto next;
++
++		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++		ts.tv_sec = __le32_to_cpu(desc->ts_sec);
++		ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
++		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
++		skb_tstamp_tx(ts_skb, &shhwtstamps);
++		dev_consume_skb_irq(ts_skb);
++
++next:
+ 		gq->cur = rswitch_next_queue_index(gq, true, 1);
+ 		desc = &gq->ts_ring[gq->cur];
+ 	}
+@@ -1576,8 +1581,9 @@ static int rswitch_open(struct net_device *ndev)
+ static int rswitch_stop(struct net_device *ndev)
+ {
+ 	struct rswitch_device *rdev = netdev_priv(ndev);
+-	struct rswitch_gwca_ts_info *ts_info, *ts_info2;
++	struct sk_buff *ts_skb;
+ 	unsigned long flags;
++	unsigned int tag;
+ 
+ 	netif_tx_stop_all_queues(ndev);
+ 
+@@ -1594,12 +1600,13 @@ static int rswitch_stop(struct net_device *ndev)
+ 	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ 		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+ 
+-	list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
+-		if (ts_info->port != rdev->port)
+-			continue;
+-		dev_kfree_skb_irq(ts_info->skb);
+-		list_del(&ts_info->list);
+-		kfree(ts_info);
++	for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
++	     tag < TS_TAGS_PER_PORT;
++	     tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
++		ts_skb = xchg(&rdev->ts_skb[tag], NULL);
++		clear_bit(tag, rdev->ts_skb_used);
++		if (ts_skb)
++			dev_kfree_skb(ts_skb);
+ 	}
+ 
+ 	return 0;
+@@ -1612,20 +1619,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
+ 	desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
+ 				  INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
+ 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+-		struct rswitch_gwca_ts_info *ts_info;
++		unsigned int tag;
+ 
+-		ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
+-		if (!ts_info)
++		tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
++		if (tag == TS_TAGS_PER_PORT)
+ 			return false;
++		smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
++		rdev->ts_skb[tag] = skb_get(skb);
++		set_bit(tag, rdev->ts_skb_used);
+ 
+ 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+-		rdev->ts_tag++;
+-		desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
+-
+-		ts_info->skb = skb_get(skb);
+-		ts_info->port = rdev->port;
+-		ts_info->tag = rdev->ts_tag;
+-		list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
++		desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
+ 
+ 		skb_tx_timestamp(skb);
+ 	}
+diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
+index e020800dcc570e..d8d4ed7d7f8b6a 100644
+--- a/drivers/net/ethernet/renesas/rswitch.h
++++ b/drivers/net/ethernet/renesas/rswitch.h
+@@ -972,14 +972,6 @@ struct rswitch_gwca_queue {
+ 	};
+ };
+ 
+-struct rswitch_gwca_ts_info {
+-	struct sk_buff *skb;
+-	struct list_head list;
+-
+-	int port;
+-	u8 tag;
+-};
+-
+ #define RSWITCH_NUM_IRQ_REGS	(RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
+ struct rswitch_gwca {
+ 	unsigned int index;
+@@ -989,7 +981,6 @@ struct rswitch_gwca {
+ 	struct rswitch_gwca_queue *queues;
+ 	int num_queues;
+ 	struct rswitch_gwca_queue ts_queue;
+-	struct list_head ts_info_list;
+ 	DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
+ 	u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
+ 	u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
+@@ -997,6 +988,7 @@ struct rswitch_gwca {
+ };
+ 
+ #define NUM_QUEUES_PER_NDEV	2
++#define TS_TAGS_PER_PORT	256
+ struct rswitch_device {
+ 	struct rswitch_private *priv;
+ 	struct net_device *ndev;
+@@ -1004,7 +996,8 @@ struct rswitch_device {
+ 	void __iomem *addr;
+ 	struct rswitch_gwca_queue *tx_queue;
+ 	struct rswitch_gwca_queue *rx_queue;
+-	u8 ts_tag;
++	struct sk_buff *ts_skb[TS_TAGS_PER_PORT];
++	DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
+ 	bool disabled;
+ 
+ 	int port;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 766213ee82c16e..cf7b59b8cc64b3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4220,8 +4220,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	struct stmmac_txq_stats *txq_stats;
+ 	struct stmmac_tx_queue *tx_q;
+ 	u32 pay_len, mss, queue;
++	dma_addr_t tso_des, des;
+ 	u8 proto_hdr_len, hdr;
+-	dma_addr_t des;
+ 	bool set_ic;
+ 	int i;
+ 
+@@ -4317,14 +4317,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 		/* If needed take extra descriptors to fill the remaining payload */
+ 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
++		tso_des = des;
+ 	} else {
+ 		stmmac_set_desc_addr(priv, first, des);
+ 		tmp_pay_len = pay_len;
+-		des += proto_hdr_len;
++		tso_des = des + proto_hdr_len;
+ 		pay_len = 0;
+ 	}
+ 
+-	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
++	stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
+ 
+ 	/* In case two or more DMA transmit descriptors are allocated for this
+ 	 * non-paged SKB data, the DMA buffer address should be saved to
+diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
+index b156493d708415..aea0f03575689a 100644
+--- a/drivers/net/mdio/fwnode_mdio.c
++++ b/drivers/net/mdio/fwnode_mdio.c
+@@ -40,6 +40,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode)
+ static struct mii_timestamper *
+ fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
+ {
++	struct mii_timestamper *mii_ts;
+ 	struct of_phandle_args arg;
+ 	int err;
+ 
+@@ -53,10 +54,16 @@ fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
+ 	else if (err)
+ 		return ERR_PTR(err);
+ 
+-	if (arg.args_count != 1)
+-		return ERR_PTR(-EINVAL);
++	if (arg.args_count != 1) {
++		mii_ts = ERR_PTR(-EINVAL);
++		goto put_node;
++	}
++
++	mii_ts = register_mii_timestamper(arg.np, arg.args[0]);
+ 
+-	return register_mii_timestamper(arg.np, arg.args[0]);
++put_node:
++	of_node_put(arg.np);
++	return mii_ts;
+ }
+ 
+ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
+index 70e8bdf34be900..688f05316b5e10 100644
+--- a/drivers/net/netdevsim/health.c
++++ b/drivers/net/netdevsim/health.c
+@@ -149,6 +149,8 @@ static ssize_t nsim_dev_health_break_write(struct file *file,
+ 	char *break_msg;
+ 	int err;
+ 
++	if (count == 0 || count > PAGE_SIZE)
++		return -EINVAL;
+ 	break_msg = memdup_user_nul(data, count);
+ 	if (IS_ERR(break_msg))
+ 		return PTR_ERR(break_msg);
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index 017a6102be0a22..1b29d1d794a201 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -596,10 +596,10 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
+ 		page_pool_put_full_page(ns->page->pp, ns->page, false);
+ 		ns->page = NULL;
+ 	}
+-	rtnl_unlock();
+ 
+ exit:
+-	return count;
++	rtnl_unlock();
++	return ret;
+ }
+ 
+ static const struct file_operations nsim_pp_hold_fops = {
+diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
+index 6ace5a74cddb57..1c85dda83825d8 100644
+--- a/drivers/net/team/team_core.c
++++ b/drivers/net/team/team_core.c
+@@ -998,9 +998,13 @@ static void __team_compute_features(struct team *team)
+ 	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
+ 					IFF_XMIT_DST_RELEASE_PERM;
+ 
++	rcu_read_lock();
++	if (list_empty(&team->port_list))
++		goto done;
++
+ 	vlan_features = netdev_base_features(vlan_features);
++	enc_features = netdev_base_features(enc_features);
+ 
+-	rcu_read_lock();
+ 	list_for_each_entry_rcu(port, &team->port_list, list) {
+ 		vlan_features = netdev_increment_features(vlan_features,
+ 					port->dev->vlan_features,
+@@ -1010,11 +1014,11 @@ static void __team_compute_features(struct team *team)
+ 						  port->dev->hw_enc_features,
+ 						  TEAM_ENC_FEATURES);
+ 
+-
+ 		dst_release_flag &= port->dev->priv_flags;
+ 		if (port->dev->hard_header_len > max_hard_header_len)
+ 			max_hard_header_len = port->dev->hard_header_len;
+ 	}
++done:
+ 	rcu_read_unlock();
+ 
+ 	team->dev->vlan_features = vlan_features;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 9a0f6eb3201661..03fe9e3ee7af15 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1481,7 +1481,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
+ 	skb->truesize += skb->data_len;
+ 
+ 	for (i = 1; i < it->nr_segs; i++) {
+-		const struct iovec *iov = iter_iov(it);
++		const struct iovec *iov = iter_iov(it) + i;
+ 		size_t fragsz = iov->iov_len;
+ 		struct page *page;
+ 		void *frag;
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 286f0c161e332f..a565b8c91da593 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -455,7 +455,8 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ 	}
+ 	if (ranges == NULL || rlen == 0) {
+ 		offset = of_read_number(addr, na);
+-		memset(addr, 0, pna * 4);
++		/* set address to zero, pass flags through */
++		memset(addr + pbus->flag_cells, 0, (pna - pbus->flag_cells) * 4);
+ 		pr_debug("empty ranges; 1:1 translation\n");
+ 		goto finish;
+ 	}
+@@ -615,7 +616,7 @@ struct device_node *__of_get_dma_parent(const struct device_node *np)
+ 	if (ret < 0)
+ 		return of_get_parent(np);
+ 
+-	return of_node_get(args.np);
++	return args.np;
+ }
+ #endif
+ 
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 20603d3c9931b8..63161d0f72b4e8 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1455,8 +1455,10 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ 			map_len--;
+ 
+ 			/* Check if not found */
+-			if (!new)
++			if (!new) {
++				ret = -EINVAL;
+ 				goto put;
++			}
+ 
+ 			if (!of_device_is_available(new))
+ 				match = 0;
+@@ -1466,17 +1468,20 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ 				goto put;
+ 
+ 			/* Check for malformed properties */
+-			if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
+-				goto put;
+-			if (map_len < new_size)
++			if (WARN_ON(new_size > MAX_PHANDLE_ARGS) ||
++			    map_len < new_size) {
++				ret = -EINVAL;
+ 				goto put;
++			}
+ 
+ 			/* Move forward by new node's #<list>-cells amount */
+ 			map += new_size;
+ 			map_len -= new_size;
+ 		}
+-		if (!match)
++		if (!match) {
++			ret = -ENOENT;
+ 			goto put;
++		}
+ 
+ 		/* Get the <list>-map-pass-thru property (optional) */
+ 		pass = of_get_property(cur, pass_name, NULL);
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index a494f56a0d0ee4..1fb329c0a55b8c 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -111,6 +111,7 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph
+ 	else
+ 		np = of_find_node_by_phandle(be32_to_cpup(imap));
+ 	imap++;
++	len--;
+ 
+ 	/* Check if not found */
+ 	if (!np) {
+@@ -354,6 +355,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 		return of_irq_parse_oldworld(device, index, out_irq);
+ 
+ 	/* Get the reg property (if any) */
++	addr_len = 0;
+ 	addr = of_get_property(device, "reg", &addr_len);
+ 
+ 	/* Prevent out-of-bounds read in case of longer interrupt parent address size */
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index 11b922fde7af16..7bd8390f2fba5e 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -1213,7 +1213,6 @@ DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
+ DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
+ DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
+ DEFINE_SIMPLE_PROP(io_backends, "io-backends", "#io-backend-cells")
+-DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
+ DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+ DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
+ DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
+@@ -1359,7 +1358,6 @@ static const struct supplier_bindings of_supplier_bindings[] = {
+ 	{ .parse_prop = parse_mboxes, },
+ 	{ .parse_prop = parse_io_channels, },
+ 	{ .parse_prop = parse_io_backends, },
+-	{ .parse_prop = parse_interrupt_parent, },
+ 	{ .parse_prop = parse_dmas, .optional = true, },
+ 	{ .parse_prop = parse_power_domains, },
+ 	{ .parse_prop = parse_hwlocks, },
+diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
+index 31f38309b389ab..c56650b9ff9628 100644
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -42,6 +42,7 @@ struct p2sb_res_cache {
+ };
+ 
+ static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
++static bool p2sb_hidden_by_bios;
+ 
+ static void p2sb_get_devfn(unsigned int *devfn)
+ {
+@@ -96,6 +97,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
+ 
+ static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+ {
++	/*
++	 * The BIOS prevents the P2SB device from being enumerated by the PCI
++	 * subsystem, so we need to unhide and hide it back to lookup the BAR.
++	 */
++	pci_bus_write_config_dword(bus, devfn, P2SBC, 0);
++
+ 	/* Scan the P2SB device and cache its BAR0 */
+ 	p2sb_scan_and_cache_devfn(bus, devfn);
+ 
+@@ -103,6 +110,8 @@ static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+ 	if (devfn == P2SB_DEVFN_GOLDMONT)
+ 		p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
+ 
++	pci_bus_write_config_dword(bus, devfn, P2SBC, P2SBC_HIDE);
++
+ 	if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
+ 		return -ENOENT;
+ 
+@@ -128,7 +137,7 @@ static int p2sb_cache_resources(void)
+ 	u32 value = P2SBC_HIDE;
+ 	struct pci_bus *bus;
+ 	u16 class;
+-	int ret;
++	int ret = 0;
+ 
+ 	/* Get devfn for P2SB device itself */
+ 	p2sb_get_devfn(&devfn_p2sb);
+@@ -151,22 +160,53 @@ static int p2sb_cache_resources(void)
+ 	 */
+ 	pci_lock_rescan_remove();
+ 
++	pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
++	p2sb_hidden_by_bios = value & P2SBC_HIDE;
++
+ 	/*
+-	 * The BIOS prevents the P2SB device from being enumerated by the PCI
+-	 * subsystem, so we need to unhide and hide it back to lookup the BAR.
+-	 * Unhide the P2SB device here, if needed.
++	 * If the BIOS does not hide the P2SB device then its resources
++	 * are accesilble. Cache them only if the P2SB device is hidden.
+ 	 */
+-	pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+-	if (value & P2SBC_HIDE)
+-		pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
++	if (p2sb_hidden_by_bios)
++		ret = p2sb_scan_and_cache(bus, devfn_p2sb);
+ 
+-	ret = p2sb_scan_and_cache(bus, devfn_p2sb);
++	pci_unlock_rescan_remove();
+ 
+-	/* Hide the P2SB device, if it was hidden */
+-	if (value & P2SBC_HIDE)
+-		pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
++	return ret;
++}
+ 
+-	pci_unlock_rescan_remove();
++static int p2sb_read_from_cache(struct pci_bus *bus, unsigned int devfn,
++				struct resource *mem)
++{
++	struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
++
++	if (cache->bus_dev_id != bus->dev.id)
++		return -ENODEV;
++
++	if (!p2sb_valid_resource(&cache->res))
++		return -ENOENT;
++
++	memcpy(mem, &cache->res, sizeof(*mem));
++
++	return 0;
++}
++
++static int p2sb_read_from_dev(struct pci_bus *bus, unsigned int devfn,
++			      struct resource *mem)
++{
++	struct pci_dev *pdev;
++	int ret = 0;
++
++	pdev = pci_get_slot(bus, devfn);
++	if (!pdev)
++		return -ENODEV;
++
++	if (p2sb_valid_resource(pci_resource_n(pdev, 0)))
++		p2sb_read_bar0(pdev, mem);
++	else
++		ret = -ENOENT;
++
++	pci_dev_put(pdev);
+ 
+ 	return ret;
+ }
+@@ -187,8 +227,6 @@ static int p2sb_cache_resources(void)
+  */
+ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ {
+-	struct p2sb_res_cache *cache;
+-
+ 	bus = p2sb_get_bus(bus);
+ 	if (!bus)
+ 		return -ENODEV;
+@@ -196,15 +234,10 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ 	if (!devfn)
+ 		p2sb_get_devfn(&devfn);
+ 
+-	cache = &p2sb_resources[PCI_FUNC(devfn)];
+-	if (cache->bus_dev_id != bus->dev.id)
+-		return -ENODEV;
++	if (p2sb_hidden_by_bios)
++		return p2sb_read_from_cache(bus, devfn, mem);
+ 
+-	if (!p2sb_valid_resource(&cache->res))
+-		return -ENOENT;
+-
+-	memcpy(mem, &cache->res, sizeof(*mem));
+-	return 0;
++	return p2sb_read_from_dev(bus, devfn, mem);
+ }
+ EXPORT_SYMBOL_GPL(p2sb_bar);
+ 
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 7af2642b97cb81..7c42e303740af2 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -1520,6 +1520,14 @@ static struct pci_device_id nhi_ids[] = {
+ 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
+ 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI0),
++	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI1),
++	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI0),
++	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI1),
++	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
+ 
+diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
+index 7a07c7c1a9c2c6..16744f25a9a069 100644
+--- a/drivers/thunderbolt/nhi.h
++++ b/drivers/thunderbolt/nhi.h
+@@ -92,6 +92,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
+ #define PCI_DEVICE_ID_INTEL_RPL_NHI1			0xa76d
+ #define PCI_DEVICE_ID_INTEL_LNL_NHI0			0xa833
+ #define PCI_DEVICE_ID_INTEL_LNL_NHI1			0xa834
++#define PCI_DEVICE_ID_INTEL_PTL_M_NHI0			0xe333
++#define PCI_DEVICE_ID_INTEL_PTL_M_NHI1			0xe334
++#define PCI_DEVICE_ID_INTEL_PTL_P_NHI0			0xe433
++#define PCI_DEVICE_ID_INTEL_PTL_P_NHI1			0xe434
+ 
+ #define PCI_CLASS_SERIAL_USB_USB4			0x0c0340
+ 
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 89d2919d0193e8..eeb64433ebbca0 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -103,6 +103,7 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
+ 
+ err_nvm:
+ 	dev_dbg(&rt->dev, "NVM upgrade disabled\n");
++	rt->no_nvm_upgrade = true;
+ 	if (!IS_ERR(nvm))
+ 		tb_nvm_free(nvm);
+ 
+@@ -182,8 +183,6 @@ static ssize_t nvm_authenticate_show(struct device *dev,
+ 
+ 	if (!rt->nvm)
+ 		ret = -EAGAIN;
+-	else if (rt->no_nvm_upgrade)
+-		ret = -EOPNOTSUPP;
+ 	else
+ 		ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
+ 
+@@ -323,8 +322,6 @@ static ssize_t nvm_version_show(struct device *dev,
+ 
+ 	if (!rt->nvm)
+ 		ret = -EAGAIN;
+-	else if (rt->no_nvm_upgrade)
+-		ret = -EOPNOTSUPP;
+ 	else
+ 		ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
+ 
+@@ -342,6 +339,19 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(vendor);
+ 
++static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
++				  int n)
++{
++	struct device *dev = kobj_to_dev(kobj);
++	struct tb_retimer *rt = tb_to_retimer(dev);
++
++	if (attr == &dev_attr_nvm_authenticate.attr ||
++	    attr == &dev_attr_nvm_version.attr)
++		return rt->no_nvm_upgrade ? 0 : attr->mode;
++
++	return attr->mode;
++}
++
+ static struct attribute *retimer_attrs[] = {
+ 	&dev_attr_device.attr,
+ 	&dev_attr_nvm_authenticate.attr,
+@@ -351,6 +361,7 @@ static struct attribute *retimer_attrs[] = {
+ };
+ 
+ static const struct attribute_group retimer_group = {
++	.is_visible = retimer_is_visible,
+ 	.attrs = retimer_attrs,
+ };
+ 
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index 4f777788e9179c..a7c6919fbf9788 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -2059,6 +2059,37 @@ static void tb_exit_redrive(struct tb_port *port)
+ 	}
+ }
+ 
++static void tb_switch_enter_redrive(struct tb_switch *sw)
++{
++	struct tb_port *port;
++
++	tb_switch_for_each_port(sw, port)
++		tb_enter_redrive(port);
++}
++
++/*
++ * Called during system and runtime suspend to forcefully exit redrive
++ * mode without querying whether the resource is available.
++ */
++static void tb_switch_exit_redrive(struct tb_switch *sw)
++{
++	struct tb_port *port;
++
++	if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
++		return;
++
++	tb_switch_for_each_port(sw, port) {
++		if (!tb_port_is_dpin(port))
++			continue;
++
++		if (port->redrive) {
++			port->redrive = false;
++			pm_runtime_put(&sw->dev);
++			tb_port_dbg(port, "exit redrive mode\n");
++		}
++	}
++}
++
+ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+ {
+ 	struct tb_port *in, *out;
+@@ -2909,6 +2940,7 @@ static int tb_start(struct tb *tb, bool reset)
+ 	tb_create_usb3_tunnels(tb->root_switch);
+ 	/* Add DP IN resources for the root switch */
+ 	tb_add_dp_resources(tb->root_switch);
++	tb_switch_enter_redrive(tb->root_switch);
+ 	/* Make the discovered switches available to the userspace */
+ 	device_for_each_child(&tb->root_switch->dev, NULL,
+ 			      tb_scan_finalize_switch);
+@@ -2924,6 +2956,7 @@ static int tb_suspend_noirq(struct tb *tb)
+ 
+ 	tb_dbg(tb, "suspending...\n");
+ 	tb_disconnect_and_release_dp(tb);
++	tb_switch_exit_redrive(tb->root_switch);
+ 	tb_switch_suspend(tb->root_switch, false);
+ 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
+ 	tb_dbg(tb, "suspend finished\n");
+@@ -3016,6 +3049,7 @@ static int tb_resume_noirq(struct tb *tb)
+ 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
+ 		msleep(100);
+ 	}
++	tb_switch_enter_redrive(tb->root_switch);
+ 	 /* Allow tb_handle_hotplug to progress events */
+ 	tcm->hotplug_active = true;
+ 	tb_dbg(tb, "resume finished\n");
+@@ -3079,6 +3113,12 @@ static int tb_runtime_suspend(struct tb *tb)
+ 	struct tb_cm *tcm = tb_priv(tb);
+ 
+ 	mutex_lock(&tb->lock);
++	/*
++	 * The below call only releases DP resources to allow exiting and
++	 * re-entering redrive mode.
++	 */
++	tb_disconnect_and_release_dp(tb);
++	tb_switch_exit_redrive(tb->root_switch);
+ 	tb_switch_suspend(tb->root_switch, true);
+ 	tcm->hotplug_active = false;
+ 	mutex_unlock(&tb->lock);
+@@ -3110,6 +3150,7 @@ static int tb_runtime_resume(struct tb *tb)
+ 	tb_restore_children(tb->root_switch);
+ 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+ 		tb_tunnel_restart(tunnel);
++	tb_switch_enter_redrive(tb->root_switch);
+ 	tcm->hotplug_active = true;
+ 	mutex_unlock(&tb->lock);
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index f318864732f2db..b267dae14d3904 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1192,8 +1192,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ 			 * Keep retrying until the EP starts and stops again, on
+ 			 * chips where this is known to help. Wait for 100ms.
+ 			 */
+-			if (!(xhci->quirks & XHCI_NEC_HOST))
+-				break;
+ 			if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ 				break;
+ 			fallthrough;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 9ba5584061c8c4..64317b390d2285 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -625,6 +625,8 @@ static void option_instat_callback(struct urb *urb);
+ #define MEIGSMART_PRODUCT_SRM825L		0x4d22
+ /* MeiG Smart SLM320 based on UNISOC UIS8910 */
+ #define MEIGSMART_PRODUCT_SLM320		0x4d41
++/* MeiG Smart SLM770A based on ASR1803 */
++#define MEIGSMART_PRODUCT_SLM770A		0x4d57
+ 
+ /* Device flags */
+ 
+@@ -1395,6 +1397,12 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff),	/* Telit FN920C04 (MBIM) */
+ 	  .driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff),	/* Telit FE910C04 (rmnet) */
++	  .driver_info = RSVD(0) | NCTRL(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff),	/* Telit FE910C04 (rmnet) */
++	  .driver_info = RSVD(0) | NCTRL(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff),	/* Telit FE910C04 (rmnet) */
++	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -2247,6 +2255,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00),
+ 	  .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7129, 0xff, 0x00, 0x00),        /* MediaTek T7XX  */
++	  .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
+ 	  .driver_info = RSVD(1) | RSVD(4) },
+@@ -2375,6 +2385,18 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for Golbal EDU */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WRD for WWAN Ready */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for WWAN Ready */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WRD for WWAN Ready */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for WWAN Ready */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+@@ -2382,9 +2404,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff),			/* TCL IK512 MBIM */
++	  .driver_info = NCTRL(1) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff),			/* TCL IK512 ECM */
++	  .driver_info = NCTRL(3) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index 7e0f9600b80c43..3d2376caedfa68 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -649,8 +649,14 @@ static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
+ 	map_length = min(map_length, bbio->fs_info->max_zone_append_size);
+ 	sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
+ 					&nr_segs, map_length);
+-	if (sector_offset)
+-		return sector_offset << SECTOR_SHIFT;
++	if (sector_offset) {
++		/*
++		 * bio_split_rw_at() could split at a size smaller than our
++		 * sectorsize and thus cause unaligned I/Os.  Fix that by
++		 * always rounding down to the nearest boundary.
++		 */
++		return ALIGN_DOWN(sector_offset << SECTOR_SHIFT, bbio->fs_info->sectorsize);
++	}
+ 	return map_length;
+ }
+ 
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 317a3712270fc0..2034d371083331 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -370,6 +370,25 @@ static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transi
+ 	WRITE_ONCE(root->last_trans, transid);
+ }
+ 
++/*
++ * Return the generation this root started with.
++ *
++ * Every normal root that is created with root->root_key.offset set to it's
++ * originating generation.  If it is a snapshot it is the generation when the
++ * snapshot was created.
++ *
++ * However for TREE_RELOC roots root_key.offset is the objectid of the owning
++ * tree root.  Thankfully we copy the root item of the owning tree root, which
++ * has it's last_snapshot set to what we would have root_key.offset set to, so
++ * return that if this is a TREE_RELOC root.
++ */
++static inline u64 btrfs_root_origin_generation(const struct btrfs_root *root)
++{
++	if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
++		return btrfs_root_last_snapshot(&root->root_item);
++	return root->root_key.offset;
++}
++
+ /*
+  * Structure that conveys information about an extent that is going to replace
+  * all the extents in a file range.
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index b43a8611aca5c6..f3e93ba7ec97fa 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5308,7 +5308,7 @@ static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *
+ 	 * reference to it.
+ 	 */
+ 	generation = btrfs_node_ptr_generation(eb, slot);
+-	if (!wc->update_ref || generation <= root->root_key.offset)
++	if (!wc->update_ref || generation <= btrfs_root_origin_generation(root))
+ 		return false;
+ 
+ 	/*
+@@ -5363,7 +5363,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
+ 			goto reada;
+ 
+ 		if (wc->stage == UPDATE_BACKREF &&
+-		    generation <= root->root_key.offset)
++		    generation <= btrfs_root_origin_generation(root))
+ 			continue;
+ 
+ 		/* We don't lock the tree block, it's OK to be racy here */
+@@ -5706,7 +5706,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
+ 	 * for the subtree
+ 	 */
+ 	if (wc->stage == UPDATE_BACKREF &&
+-	    generation <= root->root_key.offset) {
++	    generation <= btrfs_root_origin_generation(root)) {
+ 		wc->lookup_info = 1;
+ 		return 1;
+ 	}
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 7b50263723bc1a..ffa5b83d3a4a3a 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1527,6 +1527,11 @@ static int check_extent_item(struct extent_buffer *leaf,
+ 					   dref_offset, fs_info->sectorsize);
+ 				return -EUCLEAN;
+ 			}
++			if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
++				extent_err(leaf, slot,
++			"invalid data ref count, should have non-zero value");
++				return -EUCLEAN;
++			}
+ 			inline_refs += btrfs_extent_data_ref_count(leaf, dref);
+ 			break;
+ 		/* Contains parent bytenr and ref count */
+@@ -1539,6 +1544,11 @@ static int check_extent_item(struct extent_buffer *leaf,
+ 					   inline_offset, fs_info->sectorsize);
+ 				return -EUCLEAN;
+ 			}
++			if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
++				extent_err(leaf, slot,
++			"invalid shared data ref count, should have non-zero value");
++				return -EUCLEAN;
++			}
+ 			inline_refs += btrfs_shared_data_ref_count(leaf, sref);
+ 			break;
+ 		case BTRFS_EXTENT_OWNER_REF_KEY:
+@@ -1611,8 +1621,18 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf,
+ {
+ 	u32 expect_item_size = 0;
+ 
+-	if (key->type == BTRFS_SHARED_DATA_REF_KEY)
++	if (key->type == BTRFS_SHARED_DATA_REF_KEY) {
++		struct btrfs_shared_data_ref *sref;
++
++		sref = btrfs_item_ptr(leaf, slot, struct btrfs_shared_data_ref);
++		if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
++			extent_err(leaf, slot,
++		"invalid shared data backref count, should have non-zero value");
++			return -EUCLEAN;
++		}
++
+ 		expect_item_size = sizeof(struct btrfs_shared_data_ref);
++	}
+ 
+ 	if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) {
+ 		generic_err(leaf, slot,
+@@ -1689,6 +1709,11 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 				   offset, leaf->fs_info->sectorsize);
+ 			return -EUCLEAN;
+ 		}
++		if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
++			extent_err(leaf, slot,
++	"invalid extent data backref count, should have non-zero value");
++			return -EUCLEAN;
++		}
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 4b8d59ebda0092..67468d88f13908 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -1066,7 +1066,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ 	if (ceph_inode_is_shutdown(inode))
+ 		return -EIO;
+ 
+-	if (!len)
++	if (!len || !i_size)
+ 		return 0;
+ 	/*
+ 	 * flush any page cache pages in this range.  this
+@@ -1086,7 +1086,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ 		int num_pages;
+ 		size_t page_off;
+ 		bool more;
+-		int idx;
++		int idx = 0;
+ 		size_t left;
+ 		struct ceph_osd_req_op *op;
+ 		u64 read_off = off;
+@@ -1116,6 +1116,16 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ 			len = read_off + read_len - off;
+ 		more = len < iov_iter_count(to);
+ 
++		op = &req->r_ops[0];
++		if (sparse) {
++			extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
++			ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
++			if (ret) {
++				ceph_osdc_put_request(req);
++				break;
++			}
++		}
++
+ 		num_pages = calc_pages_for(read_off, read_len);
+ 		page_off = offset_in_page(off);
+ 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
+@@ -1127,17 +1137,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ 
+ 		osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
+ 						 offset_in_page(read_off),
+-						 false, false);
+-
+-		op = &req->r_ops[0];
+-		if (sparse) {
+-			extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
+-			ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+-			if (ret) {
+-				ceph_osdc_put_request(req);
+-				break;
+-			}
+-		}
++						 false, true);
+ 
+ 		ceph_osdc_start_request(osdc, req);
+ 		ret = ceph_osdc_wait_request(osdc, req);
+@@ -1160,7 +1160,14 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ 		else if (ret == -ENOENT)
+ 			ret = 0;
+ 
+-		if (ret > 0 && IS_ENCRYPTED(inode)) {
++		if (ret < 0) {
++			ceph_osdc_put_request(req);
++			if (ret == -EBLOCKLISTED)
++				fsc->blocklisted = true;
++			break;
++		}
++
++		if (IS_ENCRYPTED(inode)) {
+ 			int fret;
+ 
+ 			fret = ceph_fscrypt_decrypt_extents(inode, pages,
+@@ -1186,10 +1193,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ 			ret = min_t(ssize_t, fret, len);
+ 		}
+ 
+-		ceph_osdc_put_request(req);
+-
+ 		/* Short read but not EOF? Zero out the remainder. */
+-		if (ret >= 0 && ret < len && (off + ret < i_size)) {
++		if (ret < len && (off + ret < i_size)) {
+ 			int zlen = min(len - ret, i_size - off - ret);
+ 			int zoff = page_off + ret;
+ 
+@@ -1199,13 +1204,11 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ 			ret += zlen;
+ 		}
+ 
+-		idx = 0;
+-		if (ret <= 0)
+-			left = 0;
+-		else if (off + ret > i_size)
+-			left = i_size - off;
++		if (off + ret > i_size)
++			left = (i_size > off) ? i_size - off : 0;
+ 		else
+ 			left = ret;
++
+ 		while (left > 0) {
+ 			size_t plen, copied;
+ 
+@@ -1221,13 +1224,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ 				break;
+ 			}
+ 		}
+-		ceph_release_page_vector(pages, num_pages);
+ 
+-		if (ret < 0) {
+-			if (ret == -EBLOCKLISTED)
+-				fsc->blocklisted = true;
+-			break;
+-		}
++		ceph_osdc_put_request(req);
+ 
+ 		if (off >= i_size || !more)
+ 			break;
+@@ -1553,6 +1551,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 			break;
+ 		}
+ 
++		op = &req->r_ops[0];
++		if (sparse) {
++			extent_cnt = __ceph_sparse_read_ext_count(inode, size);
++			ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
++			if (ret) {
++				ceph_osdc_put_request(req);
++				break;
++			}
++		}
++
+ 		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
+ 		if (len < 0) {
+ 			ceph_osdc_put_request(req);
+@@ -1562,6 +1570,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 		if (len != size)
+ 			osd_req_op_extent_update(req, 0, len);
+ 
++		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
++
+ 		/*
+ 		 * To simplify error handling, allow AIO when IO within i_size
+ 		 * or IO can be satisfied by single OSD request.
+@@ -1593,17 +1603,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 			req->r_mtime = mtime;
+ 		}
+ 
+-		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
+-		op = &req->r_ops[0];
+-		if (sparse) {
+-			extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+-			ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+-			if (ret) {
+-				ceph_osdc_put_request(req);
+-				break;
+-			}
+-		}
+-
+ 		if (aio_req) {
+ 			aio_req->total_len += len;
+ 			aio_req->num_reqs++;
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index cf92b75745e2a5..f48242262b2177 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -2808,12 +2808,11 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+ 
+ 	if (pos < 0) {
+ 		/*
+-		 * A rename didn't occur, but somehow we didn't end up where
+-		 * we thought we would. Throw a warning and try again.
++		 * The path is longer than PATH_MAX and this function
++		 * cannot ever succeed.  Creating paths that long is
++		 * possible with Ceph, but Linux cannot use them.
+ 		 */
+-		pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
+-			       pos);
+-		goto retry;
++		return ERR_PTR(-ENAMETOOLONG);
+ 	}
+ 
+ 	*pbase = base;
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 86480e5a215e51..c235f9a60394c2 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -431,6 +431,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
+ 
+ 	switch (token) {
+ 	case Opt_snapdirname:
++		if (strlen(param->string) > NAME_MAX)
++			return invalfc(fc, "snapdirname too long");
+ 		kfree(fsopt->snapdir_name);
+ 		fsopt->snapdir_name = param->string;
+ 		param->string = NULL;
+diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
+index 586446e02ef72d..ec23da8405ff8e 100644
+--- a/fs/efivarfs/inode.c
++++ b/fs/efivarfs/inode.c
+@@ -51,7 +51,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
+  *
+  *	VariableName-12345678-1234-1234-1234-1234567891bc
+  */
+-bool efivarfs_valid_name(const char *str, int len)
++static bool efivarfs_valid_name(const char *str, int len)
+ {
+ 	const char *s = str + len - EFI_VARIABLE_GUID_LEN;
+ 
+diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
+index d71d2e08422f09..74f0602a9e016c 100644
+--- a/fs/efivarfs/internal.h
++++ b/fs/efivarfs/internal.h
+@@ -60,7 +60,6 @@ bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+ 
+ extern const struct file_operations efivarfs_file_operations;
+ extern const struct inode_operations efivarfs_dir_inode_operations;
+-extern bool efivarfs_valid_name(const char *str, int len);
+ extern struct inode *efivarfs_get_inode(struct super_block *sb,
+ 			const struct inode *dir, int mode, dev_t dev,
+ 			bool is_removable);
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index a929f1b613be84..beba15673be8d3 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -144,9 +144,6 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
+ 	const unsigned char *s = qstr->name;
+ 	unsigned int len = qstr->len;
+ 
+-	if (!efivarfs_valid_name(s, len))
+-		return -EINVAL;
+-
+ 	while (len-- > EFI_VARIABLE_GUID_LEN)
+ 		hash = partial_name_hash(*s++, hash);
+ 
+diff --git a/fs/erofs/data.c b/fs/erofs/data.c
+index fa51437e1d99d9..722151d3fee8b4 100644
+--- a/fs/erofs/data.c
++++ b/fs/erofs/data.c
+@@ -63,10 +63,10 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
+ 
+ 	buf->file = NULL;
+ 	if (erofs_is_fileio_mode(sbi)) {
+-		buf->file = sbi->fdev;		/* some fs like FUSE needs it */
++		buf->file = sbi->dif0.file;	/* some fs like FUSE needs it */
+ 		buf->mapping = buf->file->f_mapping;
+ 	} else if (erofs_is_fscache_mode(sb))
+-		buf->mapping = sbi->s_fscache->inode->i_mapping;
++		buf->mapping = sbi->dif0.fscache->inode->i_mapping;
+ 	else
+ 		buf->mapping = sb->s_bdev->bd_mapping;
+ }
+@@ -186,19 +186,13 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
+ }
+ 
+ static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
+-				    struct erofs_device_info *dif)
++		struct super_block *sb, struct erofs_device_info *dif)
+ {
++	map->m_sb = sb;
++	map->m_dif = dif;
+ 	map->m_bdev = NULL;
+-	map->m_fp = NULL;
+-	if (dif->file) {
+-		if (S_ISBLK(file_inode(dif->file)->i_mode))
+-			map->m_bdev = file_bdev(dif->file);
+-		else
+-			map->m_fp = dif->file;
+-	}
+-	map->m_daxdev = dif->dax_dev;
+-	map->m_dax_part_off = dif->dax_part_off;
+-	map->m_fscache = dif->fscache;
++	if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
++		map->m_bdev = file_bdev(dif->file);
+ }
+ 
+ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
+@@ -208,12 +202,8 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
+ 	erofs_off_t startoff, length;
+ 	int id;
+ 
+-	map->m_bdev = sb->s_bdev;
+-	map->m_daxdev = EROFS_SB(sb)->dax_dev;
+-	map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
+-	map->m_fscache = EROFS_SB(sb)->s_fscache;
+-	map->m_fp = EROFS_SB(sb)->fdev;
+-
++	erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
++	map->m_bdev = sb->s_bdev;	/* use s_bdev for the primary device */
+ 	if (map->m_deviceid) {
+ 		down_read(&devs->rwsem);
+ 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
+@@ -226,7 +216,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
+ 			up_read(&devs->rwsem);
+ 			return 0;
+ 		}
+-		erofs_fill_from_devinfo(map, dif);
++		erofs_fill_from_devinfo(map, sb, dif);
+ 		up_read(&devs->rwsem);
+ 	} else if (devs->extra_devices && !devs->flatdev) {
+ 		down_read(&devs->rwsem);
+@@ -239,7 +229,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
+ 			if (map->m_pa >= startoff &&
+ 			    map->m_pa < startoff + length) {
+ 				map->m_pa -= startoff;
+-				erofs_fill_from_devinfo(map, dif);
++				erofs_fill_from_devinfo(map, sb, dif);
+ 				break;
+ 			}
+ 		}
+@@ -309,7 +299,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 
+ 	iomap->offset = map.m_la;
+ 	if (flags & IOMAP_DAX)
+-		iomap->dax_dev = mdev.m_daxdev;
++		iomap->dax_dev = mdev.m_dif->dax_dev;
+ 	else
+ 		iomap->bdev = mdev.m_bdev;
+ 	iomap->length = map.m_llen;
+@@ -338,7 +328,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 		iomap->type = IOMAP_MAPPED;
+ 		iomap->addr = mdev.m_pa;
+ 		if (flags & IOMAP_DAX)
+-			iomap->addr += mdev.m_dax_part_off;
++			iomap->addr += mdev.m_dif->dax_part_off;
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
+index 3af96b1e2c2aa8..33f8539dda4aeb 100644
+--- a/fs/erofs/fileio.c
++++ b/fs/erofs/fileio.c
+@@ -9,6 +9,7 @@ struct erofs_fileio_rq {
+ 	struct bio_vec bvecs[BIO_MAX_VECS];
+ 	struct bio bio;
+ 	struct kiocb iocb;
++	struct super_block *sb;
+ };
+ 
+ struct erofs_fileio {
+@@ -52,8 +53,9 @@ static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
+ 	rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ 	rq->iocb.ki_ioprio = get_current_ioprio();
+ 	rq->iocb.ki_complete = erofs_fileio_ki_complete;
+-	rq->iocb.ki_flags = (rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) ?
+-				IOCB_DIRECT : 0;
++	if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
++	    rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
++		rq->iocb.ki_flags = IOCB_DIRECT;
+ 	iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
+ 		      rq->bio.bi_iter.bi_size);
+ 	ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
+@@ -67,7 +69,8 @@ static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
+ 					     GFP_KERNEL | __GFP_NOFAIL);
+ 
+ 	bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ);
+-	rq->iocb.ki_filp = mdev->m_fp;
++	rq->iocb.ki_filp = mdev->m_dif->file;
++	rq->sb = mdev->m_sb;
+ 	return rq;
+ }
+ 
+diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
+index fda16eedafb578..ce3d8737df85d4 100644
+--- a/fs/erofs/fscache.c
++++ b/fs/erofs/fscache.c
+@@ -198,7 +198,7 @@ struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
+ 
+ 	io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL);
+ 	bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
+-	io->io.private = mdev->m_fscache->cookie;
++	io->io.private = mdev->m_dif->fscache->cookie;
+ 	io->io.end_io = erofs_fscache_bio_endio;
+ 	refcount_set(&io->io.ref, 1);
+ 	return &io->bio;
+@@ -316,7 +316,7 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
+ 	if (!io)
+ 		return -ENOMEM;
+ 	iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
+-	ret = erofs_fscache_read_io_async(mdev.m_fscache->cookie,
++	ret = erofs_fscache_read_io_async(mdev.m_dif->fscache->cookie,
+ 			mdev.m_pa + (pos - map.m_la), io);
+ 	erofs_fscache_req_io_put(io);
+ 
+@@ -657,7 +657,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
+ 	if (IS_ERR(fscache))
+ 		return PTR_ERR(fscache);
+ 
+-	sbi->s_fscache = fscache;
++	sbi->dif0.fscache = fscache;
+ 	return 0;
+ }
+ 
+@@ -665,14 +665,14 @@ void erofs_fscache_unregister_fs(struct super_block *sb)
+ {
+ 	struct erofs_sb_info *sbi = EROFS_SB(sb);
+ 
+-	erofs_fscache_unregister_cookie(sbi->s_fscache);
++	erofs_fscache_unregister_cookie(sbi->dif0.fscache);
+ 
+ 	if (sbi->domain)
+ 		erofs_fscache_domain_put(sbi->domain);
+ 	else
+ 		fscache_relinquish_volume(sbi->volume, NULL, false);
+ 
+-	sbi->s_fscache = NULL;
++	sbi->dif0.fscache = NULL;
+ 	sbi->volume = NULL;
+ 	sbi->domain = NULL;
+ }
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 9b03c8f323a762..77e785a6dfa7ff 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -113,6 +113,7 @@ struct erofs_xattr_prefix_item {
+ };
+ 
+ struct erofs_sb_info {
++	struct erofs_device_info dif0;
+ 	struct erofs_mount_opts opt;	/* options */
+ #ifdef CONFIG_EROFS_FS_ZIP
+ 	/* list for all registered superblocks, mainly for shrinker */
+@@ -130,13 +131,9 @@ struct erofs_sb_info {
+ 
+ 	struct erofs_sb_lz4_info lz4;
+ #endif	/* CONFIG_EROFS_FS_ZIP */
+-	struct file *fdev;
+ 	struct inode *packed_inode;
+ 	struct erofs_dev_context *devs;
+-	struct dax_device *dax_dev;
+-	u64 dax_part_off;
+ 	u64 total_blocks;
+-	u32 primarydevice_blocks;
+ 
+ 	u32 meta_blkaddr;
+ #ifdef CONFIG_EROFS_FS_XATTR
+@@ -172,7 +169,6 @@ struct erofs_sb_info {
+ 
+ 	/* fscache support */
+ 	struct fscache_volume *volume;
+-	struct erofs_fscache *s_fscache;
+ 	struct erofs_domain *domain;
+ 	char *fsid;
+ 	char *domain_id;
+@@ -186,6 +182,7 @@ struct erofs_sb_info {
+ #define EROFS_MOUNT_POSIX_ACL		0x00000020
+ #define EROFS_MOUNT_DAX_ALWAYS		0x00000040
+ #define EROFS_MOUNT_DAX_NEVER		0x00000080
++#define EROFS_MOUNT_DIRECT_IO		0x00000100
+ 
+ #define clear_opt(opt, option)	((opt)->mount_opt &= ~EROFS_MOUNT_##option)
+ #define set_opt(opt, option)	((opt)->mount_opt |= EROFS_MOUNT_##option)
+@@ -193,7 +190,7 @@ struct erofs_sb_info {
+ 
+ static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi)
+ {
+-	return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->fdev;
++	return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->dif0.file;
+ }
+ 
+ static inline bool erofs_is_fscache_mode(struct super_block *sb)
+@@ -370,11 +367,9 @@ enum {
+ };
+ 
+ struct erofs_map_dev {
+-	struct erofs_fscache *m_fscache;
++	struct super_block *m_sb;
++	struct erofs_device_info *m_dif;
+ 	struct block_device *m_bdev;
+-	struct dax_device *m_daxdev;
+-	struct file *m_fp;
+-	u64 m_dax_part_off;
+ 
+ 	erofs_off_t m_pa;
+ 	unsigned int m_deviceid;
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 2dd7d819572f40..5b279977c9d5d6 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -218,7 +218,7 @@ static int erofs_scan_devices(struct super_block *sb,
+ 	struct erofs_device_info *dif;
+ 	int id, err = 0;
+ 
+-	sbi->total_blocks = sbi->primarydevice_blocks;
++	sbi->total_blocks = sbi->dif0.blocks;
+ 	if (!erofs_sb_has_device_table(sbi))
+ 		ondisk_extradevs = 0;
+ 	else
+@@ -322,7 +322,7 @@ static int erofs_read_superblock(struct super_block *sb)
+ 			  sbi->sb_size);
+ 		goto out;
+ 	}
+-	sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
++	sbi->dif0.blocks = le32_to_cpu(dsb->blocks);
+ 	sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
+ #ifdef CONFIG_EROFS_FS_XATTR
+ 	sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
+@@ -379,14 +379,8 @@ static void erofs_default_options(struct erofs_sb_info *sbi)
+ }
+ 
+ enum {
+-	Opt_user_xattr,
+-	Opt_acl,
+-	Opt_cache_strategy,
+-	Opt_dax,
+-	Opt_dax_enum,
+-	Opt_device,
+-	Opt_fsid,
+-	Opt_domain_id,
++	Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum,
++	Opt_device, Opt_fsid, Opt_domain_id, Opt_directio,
+ 	Opt_err
+ };
+ 
+@@ -413,6 +407,7 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
+ 	fsparam_string("device",	Opt_device),
+ 	fsparam_string("fsid",		Opt_fsid),
+ 	fsparam_string("domain_id",	Opt_domain_id),
++	fsparam_flag_no("directio",	Opt_directio),
+ 	{}
+ };
+ 
+@@ -526,6 +521,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
+ 		errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+ 		break;
+ #endif
++	case Opt_directio:
++#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
++		if (result.boolean)
++			set_opt(&sbi->opt, DIRECT_IO);
++		else
++			clear_opt(&sbi->opt, DIRECT_IO);
++#else
++		errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
++#endif
++		break;
+ 	default:
+ 		return -ENOPARAM;
+ 	}
+@@ -617,9 +622,8 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ 			return -EINVAL;
+ 		}
+ 
+-		sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
+-						  &sbi->dax_part_off,
+-						  NULL, NULL);
++		sbi->dif0.dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
++				&sbi->dif0.dax_part_off, NULL, NULL);
+ 	}
+ 
+ 	err = erofs_read_superblock(sb);
+@@ -642,7 +646,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	}
+ 
+ 	if (test_opt(&sbi->opt, DAX_ALWAYS)) {
+-		if (!sbi->dax_dev) {
++		if (!sbi->dif0.dax_dev) {
+ 			errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
+ 			clear_opt(&sbi->opt, DAX_ALWAYS);
+ 		} else if (sbi->blkszbits != PAGE_SHIFT) {
+@@ -718,16 +722,18 @@ static int erofs_fc_get_tree(struct fs_context *fc)
+ 			GET_TREE_BDEV_QUIET_LOOKUP : 0);
+ #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ 	if (ret == -ENOTBLK) {
++		struct file *file;
++
+ 		if (!fc->source)
+ 			return invalf(fc, "No source specified");
+-		sbi->fdev = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
+-		if (IS_ERR(sbi->fdev))
+-			return PTR_ERR(sbi->fdev);
++		file = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
++		if (IS_ERR(file))
++			return PTR_ERR(file);
++		sbi->dif0.file = file;
+ 
+-		if (S_ISREG(file_inode(sbi->fdev)->i_mode) &&
+-		    sbi->fdev->f_mapping->a_ops->read_folio)
++		if (S_ISREG(file_inode(sbi->dif0.file)->i_mode) &&
++		    sbi->dif0.file->f_mapping->a_ops->read_folio)
+ 			return get_tree_nodev(fc, erofs_fc_fill_super);
+-		fput(sbi->fdev);
+ 	}
+ #endif
+ 	return ret;
+@@ -778,19 +784,24 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
+ 	kfree(devs);
+ }
+ 
+-static void erofs_fc_free(struct fs_context *fc)
++static void erofs_sb_free(struct erofs_sb_info *sbi)
+ {
+-	struct erofs_sb_info *sbi = fc->s_fs_info;
+-
+-	if (!sbi)
+-		return;
+-
+ 	erofs_free_dev_context(sbi->devs);
+ 	kfree(sbi->fsid);
+ 	kfree(sbi->domain_id);
++	if (sbi->dif0.file)
++		fput(sbi->dif0.file);
+ 	kfree(sbi);
+ }
+ 
++static void erofs_fc_free(struct fs_context *fc)
++{
++	struct erofs_sb_info *sbi = fc->s_fs_info;
++
++	if (sbi) /* free here if an error occurs before transferring to sb */
++		erofs_sb_free(sbi);
++}
++
+ static const struct fs_context_operations erofs_context_ops = {
+ 	.parse_param	= erofs_fc_parse_param,
+ 	.get_tree       = erofs_fc_get_tree,
+@@ -824,19 +835,14 @@ static void erofs_kill_sb(struct super_block *sb)
+ {
+ 	struct erofs_sb_info *sbi = EROFS_SB(sb);
+ 
+-	if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) || sbi->fdev)
++	if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) ||
++	    sbi->dif0.file)
+ 		kill_anon_super(sb);
+ 	else
+ 		kill_block_super(sb);
+-
+-	erofs_free_dev_context(sbi->devs);
+-	fs_put_dax(sbi->dax_dev, NULL);
++	fs_put_dax(sbi->dif0.dax_dev, NULL);
+ 	erofs_fscache_unregister_fs(sb);
+-	kfree(sbi->fsid);
+-	kfree(sbi->domain_id);
+-	if (sbi->fdev)
+-		fput(sbi->fdev);
+-	kfree(sbi);
++	erofs_sb_free(sbi);
+ 	sb->s_fs_info = NULL;
+ }
+ 
+@@ -962,6 +968,8 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
+ 		seq_puts(seq, ",dax=always");
+ 	if (test_opt(opt, DAX_NEVER))
+ 		seq_puts(seq, ",dax=never");
++	if (erofs_is_fileio_mode(sbi) && test_opt(opt, DIRECT_IO))
++		seq_puts(seq, ",directio");
+ #ifdef CONFIG_EROFS_FS_ONDEMAND
+ 	if (sbi->fsid)
+ 		seq_printf(seq, ",fsid=%s", sbi->fsid);
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index a569ff9dfd0442..1a00f061798a3c 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1679,9 +1679,9 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ 			erofs_fscache_submit_bio(bio);
+ 		else
+ 			submit_bio(bio);
+-		if (memstall)
+-			psi_memstall_leave(&pflags);
+ 	}
++	if (memstall)
++		psi_memstall_leave(&pflags);
+ 
+ 	/*
+ 	 * although background is preferred, no one is pending for submission.
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 90fbab6b6f0363..1a06e462b6efba 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1373,7 +1373,10 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
+ 				break;
+ 			}
+ 		}
+-		wake_up(&ep->wq);
++		if (sync)
++			wake_up_sync(&ep->wq);
++		else
++			wake_up(&ep->wq);
+ 	}
+ 	if (waitqueue_active(&ep->poll_wait))
+ 		pwake++;
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 5cf327337e2276..c0856585bb6386 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -893,7 +893,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
+ 			error = PTR_ERR(folio);
+ 			goto out;
+ 		}
+-		folio_zero_user(folio, ALIGN_DOWN(addr, hpage_size));
++		folio_zero_user(folio, addr);
+ 		__folio_mark_uptodate(folio);
+ 		error = hugetlb_add_to_page_cache(folio, mapping, index);
+ 		if (unlikely(error)) {
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 0d16b383a45262..5f582713bf05eb 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1308,7 +1308,7 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
+ 		enum pnfs_iomode *iomode)
+ {
+ 	/* Serialise LAYOUTGET/LAYOUTRETURN */
+-	if (atomic_read(&lo->plh_outstanding) != 0)
++	if (atomic_read(&lo->plh_outstanding) != 0 && lo->plh_return_seq == 0)
+ 		return false;
+ 	if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
+ 		return false;
+diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
+index 501ad7be5174cb..54a3fa0cf67edb 100644
+--- a/fs/nilfs2/btnode.c
++++ b/fs/nilfs2/btnode.c
+@@ -35,6 +35,7 @@ void nilfs_init_btnc_inode(struct inode *btnc_inode)
+ 	ii->i_flags = 0;
+ 	memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
+ 	mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
++	btnc_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
+ }
+ 
+ void nilfs_btnode_cache_clear(struct address_space *btnc)
+diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
+index ace22253fed0f2..2dbb15767df16e 100644
+--- a/fs/nilfs2/gcinode.c
++++ b/fs/nilfs2/gcinode.c
+@@ -163,7 +163,7 @@ int nilfs_init_gcinode(struct inode *inode)
+ 
+ 	inode->i_mode = S_IFREG;
+ 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+-	inode->i_mapping->a_ops = &empty_aops;
++	inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
+ 
+ 	ii->i_flags = 0;
+ 	nilfs_bmap_init_gc(ii->i_bmap);
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index be6acf6e2bfc59..aaca34ec678f26 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -307,6 +307,10 @@ const struct address_space_operations nilfs_aops = {
+ 	.is_partially_uptodate  = block_is_partially_uptodate,
+ };
+ 
++const struct address_space_operations nilfs_buffer_cache_aops = {
++	.invalidate_folio	= block_invalidate_folio,
++};
++
+ static int nilfs_insert_inode_locked(struct inode *inode,
+ 				     struct nilfs_root *root,
+ 				     unsigned long ino)
+@@ -575,8 +579,14 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
+ 	inode = nilfs_iget_locked(sb, root, ino);
+ 	if (unlikely(!inode))
+ 		return ERR_PTR(-ENOMEM);
+-	if (!(inode->i_state & I_NEW))
++
++	if (!(inode->i_state & I_NEW)) {
++		if (!inode->i_nlink) {
++			iput(inode);
++			return ERR_PTR(-ESTALE);
++		}
+ 		return inode;
++	}
+ 
+ 	err = __nilfs_read_inode(sb, root, ino, inode);
+ 	if (unlikely(err)) {
+@@ -706,6 +716,7 @@ struct inode *nilfs_iget_for_shadow(struct inode *inode)
+ 	NILFS_I(s_inode)->i_flags = 0;
+ 	memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
+ 	mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
++	s_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
+ 
+ 	err = nilfs_attach_btree_node_cache(s_inode);
+ 	if (unlikely(err)) {
+diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
+index 9b108052d9f71f..1d836a5540f3b1 100644
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -67,6 +67,11 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
+ 		inode = NULL;
+ 	} else {
+ 		inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
++		if (inode == ERR_PTR(-ESTALE)) {
++			nilfs_error(dir->i_sb,
++					"deleted inode referenced: %lu", ino);
++			return ERR_PTR(-EIO);
++		}
+ 	}
+ 
+ 	return d_splice_alias(inode, dentry);
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index 45d03826eaf157..dff241c53fc583 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -401,6 +401,7 @@ extern const struct file_operations nilfs_dir_operations;
+ extern const struct inode_operations nilfs_file_inode_operations;
+ extern const struct file_operations nilfs_file_operations;
+ extern const struct address_space_operations nilfs_aops;
++extern const struct address_space_operations nilfs_buffer_cache_aops;
+ extern const struct inode_operations nilfs_dir_inode_operations;
+ extern const struct inode_operations nilfs_special_inode_operations;
+ extern const struct inode_operations nilfs_symlink_inode_operations;
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index 5df34561c551c6..d1aa04a5af1b1c 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -971,9 +971,9 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
+ 	start = count = 0;
+ 	left = le32_to_cpu(alloc->id1.bitmap1.i_total);
+ 
+-	while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) <
+-	       left) {
+-		if (bit_off == start) {
++	while (1) {
++		bit_off = ocfs2_find_next_zero_bit(bitmap, left, start);
++		if ((bit_off < left) && (bit_off == start)) {
+ 			count++;
+ 			start++;
+ 			continue;
+@@ -998,6 +998,8 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
+ 			}
+ 		}
+ 
++		if (bit_off >= left)
++			break;
+ 		count = 1;
+ 		start = bit_off + 1;
+ 	}
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index feff3324d39c6d..fe40152b915d82 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -987,9 +987,13 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
+ 	msleep(125);
+ 	if (cifs_rdma_enabled(server))
+ 		smbd_destroy(server);
++
+ 	if (server->ssocket) {
+ 		sock_release(server->ssocket);
+ 		server->ssocket = NULL;
++
++		/* Release netns reference for the socket. */
++		put_net(cifs_net_ns(server));
+ 	}
+ 
+ 	if (!list_empty(&server->pending_mid_q)) {
+@@ -1037,6 +1041,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
+ 		 */
+ 	}
+ 
++	/* Release netns reference for this server. */
+ 	put_net(cifs_net_ns(server));
+ 	kfree(server->leaf_fullpath);
+ 	kfree(server);
+@@ -1713,6 +1718,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ 
+ 	tcp_ses->ops = ctx->ops;
+ 	tcp_ses->vals = ctx->vals;
++
++	/* Grab netns reference for this server. */
+ 	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
+ 
+ 	tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
+@@ -1844,6 +1851,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ out_err_crypto_release:
+ 	cifs_crypto_secmech_release(tcp_ses);
+ 
++	/* Release netns reference for this server. */
+ 	put_net(cifs_net_ns(tcp_ses));
+ 
+ out_err:
+@@ -1852,8 +1860,10 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ 			cifs_put_tcp_session(tcp_ses->primary_server, false);
+ 		kfree(tcp_ses->hostname);
+ 		kfree(tcp_ses->leaf_fullpath);
+-		if (tcp_ses->ssocket)
++		if (tcp_ses->ssocket) {
+ 			sock_release(tcp_ses->ssocket);
++			put_net(cifs_net_ns(tcp_ses));
++		}
+ 		kfree(tcp_ses);
+ 	}
+ 	return ERR_PTR(rc);
+@@ -3111,20 +3121,20 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 		socket = server->ssocket;
+ 	} else {
+ 		struct net *net = cifs_net_ns(server);
+-		struct sock *sk;
+ 
+-		rc = __sock_create(net, sfamily, SOCK_STREAM,
+-				   IPPROTO_TCP, &server->ssocket, 1);
++		rc = sock_create_kern(net, sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket);
+ 		if (rc < 0) {
+ 			cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
+ 			return rc;
+ 		}
+ 
+-		sk = server->ssocket->sk;
+-		__netns_tracker_free(net, &sk->ns_tracker, false);
+-		sk->sk_net_refcnt = 1;
+-		get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+-		sock_inuse_add(net, 1);
++		/*
++		 * Grab netns reference for the socket.
++		 *
++		 * It'll be released here, on error, or in clean_demultiplex_info() upon server
++		 * teardown.
++		 */
++		get_net(net);
+ 
+ 		/* BB other socket options to set KEEPALIVE, NODELAY? */
+ 		cifs_dbg(FYI, "Socket created\n");
+@@ -3138,8 +3148,10 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 	}
+ 
+ 	rc = bind_socket(server);
+-	if (rc < 0)
++	if (rc < 0) {
++		put_net(cifs_net_ns(server));
+ 		return rc;
++	}
+ 
+ 	/*
+ 	 * Eventually check for other socket options to change from
+@@ -3176,6 +3188,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 	if (rc < 0) {
+ 		cifs_dbg(FYI, "Error %d connecting to server\n", rc);
+ 		trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
++		put_net(cifs_net_ns(server));
+ 		sock_release(socket);
+ 		server->ssocket = NULL;
+ 		return rc;
+@@ -3184,6 +3197,9 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 	if (sport == htons(RFC1001_PORT))
+ 		rc = ip_rfc1001_connect(server);
+ 
++	if (rc < 0)
++		put_net(cifs_net_ns(server));
++
+ 	return rc;
+ }
+ 
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index e6a72f75ab94ba..bf45822db5d589 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -70,7 +70,6 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ 	atomic_set(&conn->req_running, 0);
+ 	atomic_set(&conn->r_count, 0);
+ 	atomic_set(&conn->refcnt, 1);
+-	atomic_set(&conn->mux_smb_requests, 0);
+ 	conn->total_credits = 1;
+ 	conn->outstanding_credits = 0;
+ 
+@@ -120,8 +119,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
+ 	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
+ 		requests_queue = &conn->requests;
+ 
++	atomic_inc(&conn->req_running);
+ 	if (requests_queue) {
+-		atomic_inc(&conn->req_running);
+ 		spin_lock(&conn->request_lock);
+ 		list_add_tail(&work->request_entry, requests_queue);
+ 		spin_unlock(&conn->request_lock);
+@@ -132,11 +131,14 @@ void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+ 
++	atomic_dec(&conn->req_running);
++	if (waitqueue_active(&conn->req_running_q))
++		wake_up(&conn->req_running_q);
++
+ 	if (list_empty(&work->request_entry) &&
+ 	    list_empty(&work->async_request_entry))
+ 		return;
+ 
+-	atomic_dec(&conn->req_running);
+ 	spin_lock(&conn->request_lock);
+ 	list_del_init(&work->request_entry);
+ 	spin_unlock(&conn->request_lock);
+@@ -308,7 +310,7 @@ int ksmbd_conn_handler_loop(void *p)
+ {
+ 	struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
+ 	struct ksmbd_transport *t = conn->transport;
+-	unsigned int pdu_size, max_allowed_pdu_size;
++	unsigned int pdu_size, max_allowed_pdu_size, max_req;
+ 	char hdr_buf[4] = {0,};
+ 	int size;
+ 
+@@ -318,6 +320,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 	if (t->ops->prepare && t->ops->prepare(t))
+ 		goto out;
+ 
++	max_req = server_conf.max_inflight_req;
+ 	conn->last_active = jiffies;
+ 	set_freezable();
+ 	while (ksmbd_conn_alive(conn)) {
+@@ -327,6 +330,13 @@ int ksmbd_conn_handler_loop(void *p)
+ 		kvfree(conn->request_buf);
+ 		conn->request_buf = NULL;
+ 
++recheck:
++		if (atomic_read(&conn->req_running) + 1 > max_req) {
++			wait_event_interruptible(conn->req_running_q,
++				atomic_read(&conn->req_running) < max_req);
++			goto recheck;
++		}
++
+ 		size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
+ 		if (size != sizeof(hdr_buf))
+ 			break;
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 8ddd5a3c7bafb6..b379ae4fdcdffa 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -107,7 +107,6 @@ struct ksmbd_conn {
+ 	__le16				signing_algorithm;
+ 	bool				binding;
+ 	atomic_t			refcnt;
+-	atomic_t			mux_smb_requests;
+ };
+ 
+ struct ksmbd_conn_ops {
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index 698af37e988d7b..d146b0e7c3a9dd 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -270,7 +270,6 @@ static void handle_ksmbd_work(struct work_struct *wk)
+ 
+ 	ksmbd_conn_try_dequeue_request(work);
+ 	ksmbd_free_work_struct(work);
+-	atomic_dec(&conn->mux_smb_requests);
+ 	/*
+ 	 * Checking waitqueue to dropping pending requests on
+ 	 * disconnection. waitqueue_active is safe because it
+@@ -300,11 +299,6 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
+ 	if (err)
+ 		return 0;
+ 
+-	if (atomic_inc_return(&conn->mux_smb_requests) >= conn->vals->max_credits) {
+-		atomic_dec_return(&conn->mux_smb_requests);
+-		return -ENOSPC;
+-	}
+-
+ 	work = ksmbd_alloc_work_struct();
+ 	if (!work) {
+ 		pr_err("allocation for work failed\n");
+@@ -367,6 +361,7 @@ static int server_conf_init(void)
+ 	server_conf.auth_mechs |= KSMBD_AUTH_KRB5 |
+ 				KSMBD_AUTH_MSKRB5;
+ #endif
++	server_conf.max_inflight_req = SMB2_MAX_CREDITS;
+ 	return 0;
+ }
+ 
+diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
+index 4fc529335271f7..94187628ff089f 100644
+--- a/fs/smb/server/server.h
++++ b/fs/smb/server/server.h
+@@ -42,6 +42,7 @@ struct ksmbd_server_config {
+ 	struct smb_sid		domain_sid;
+ 	unsigned int		auth_mechs;
+ 	unsigned int		max_connections;
++	unsigned int		max_inflight_req;
+ 
+ 	char			*conf[SERVER_CONF_WORK_GROUP + 1];
+ 	struct task_struct	*dh_task;
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 2f27afb695f62e..6de351cc2b60e0 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -319,8 +319,11 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ 		init_smb2_max_write_size(req->smb2_max_write);
+ 	if (req->smb2_max_trans)
+ 		init_smb2_max_trans_size(req->smb2_max_trans);
+-	if (req->smb2_max_credits)
++	if (req->smb2_max_credits) {
+ 		init_smb2_max_credits(req->smb2_max_credits);
++		server_conf.max_inflight_req =
++			req->smb2_max_credits;
++	}
+ 	if (req->smbd_max_io_size)
+ 		init_smbd_max_io_size(req->smbd_max_io_size);
+ 
+diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
+index 271855227514cb..6258527315f28b 100644
+--- a/fs/xfs/libxfs/xfs_ialloc.c
++++ b/fs/xfs/libxfs/xfs_ialloc.c
+@@ -855,7 +855,8 @@ xfs_ialloc_ag_alloc(
+ 		 * the end of the AG.
+ 		 */
+ 		args.min_agbno = args.mp->m_sb.sb_inoalignmt;
+-		args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
++		args.max_agbno = round_down(xfs_ag_block_count(args.mp,
++							pag->pag_agno),
+ 					    args.mp->m_sb.sb_inoalignmt) -
+ 				 igeo->ialloc_blks;
+ 
+@@ -2332,9 +2333,9 @@ xfs_difree(
+ 		return -EINVAL;
+ 	}
+ 	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
+-	if (agbno >= mp->m_sb.sb_agblocks)  {
+-		xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
+-			__func__, agbno, mp->m_sb.sb_agblocks);
++	if (agbno >= xfs_ag_block_count(mp, pag->pag_agno)) {
++		xfs_warn(mp, "%s: agbno >= xfs_ag_block_count (%d >= %d).",
++			__func__, agbno, xfs_ag_block_count(mp, pag->pag_agno));
+ 		ASSERT(0);
+ 		return -EINVAL;
+ 	}
+@@ -2457,7 +2458,7 @@ xfs_imap(
+ 	 */
+ 	agino = XFS_INO_TO_AGINO(mp, ino);
+ 	agbno = XFS_AGINO_TO_AGBNO(mp, agino);
+-	if (agbno >= mp->m_sb.sb_agblocks ||
++	if (agbno >= xfs_ag_block_count(mp, pag->pag_agno) ||
+ 	    ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
+ 		error = -EINVAL;
+ #ifdef DEBUG
+@@ -2467,11 +2468,12 @@ xfs_imap(
+ 		 */
+ 		if (flags & XFS_IGET_UNTRUSTED)
+ 			return error;
+-		if (agbno >= mp->m_sb.sb_agblocks) {
++		if (agbno >= xfs_ag_block_count(mp, pag->pag_agno)) {
+ 			xfs_alert(mp,
+ 		"%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
+ 				__func__, (unsigned long long)agbno,
+-				(unsigned long)mp->m_sb.sb_agblocks);
++				(unsigned long)xfs_ag_block_count(mp,
++							pag->pag_agno));
+ 		}
+ 		if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
+ 			xfs_alert(mp,
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index 02ebcbc4882f5b..e27b63281d012a 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -391,6 +391,21 @@ xfs_validate_sb_common(
+ 					 sbp->sb_inoalignmt, align);
+ 				return -EINVAL;
+ 			}
++
++			if (sbp->sb_spino_align &&
++			    (sbp->sb_spino_align > sbp->sb_inoalignmt ||
++			     (sbp->sb_inoalignmt % sbp->sb_spino_align) != 0)) {
++				xfs_warn(mp,
++"Sparse inode alignment (%u) is invalid, must be integer factor of (%u).",
++					sbp->sb_spino_align,
++					sbp->sb_inoalignmt);
++				return -EINVAL;
++			}
++		} else if (sbp->sb_spino_align) {
++			xfs_warn(mp,
++				"Sparse inode alignment (%u) should be zero.",
++				sbp->sb_spino_align);
++			return -EINVAL;
+ 		}
+ 	} else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
+ 				XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
+diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
+index da30f926cbe66d..0f2f1852d58fe7 100644
+--- a/fs/xfs/scrub/agheader.c
++++ b/fs/xfs/scrub/agheader.c
+@@ -59,6 +59,30 @@ xchk_superblock_xref(
+ 	/* scrub teardown will take care of sc->sa for us */
+ }
+ 
++/*
++ * Calculate the ondisk superblock size in bytes given the feature set of the
++ * mounted filesystem (aka the primary sb).  This is subtlely different from
++ * the logic in xfs_repair, which computes the size of a secondary sb given the
++ * featureset listed in the secondary sb.
++ */
++STATIC size_t
++xchk_superblock_ondisk_size(
++	struct xfs_mount	*mp)
++{
++	if (xfs_has_metauuid(mp))
++		return offsetofend(struct xfs_dsb, sb_meta_uuid);
++	if (xfs_has_crc(mp))
++		return offsetofend(struct xfs_dsb, sb_lsn);
++	if (xfs_sb_version_hasmorebits(&mp->m_sb))
++		return offsetofend(struct xfs_dsb, sb_bad_features2);
++	if (xfs_has_logv2(mp))
++		return offsetofend(struct xfs_dsb, sb_logsunit);
++	if (xfs_has_sector(mp))
++		return offsetofend(struct xfs_dsb, sb_logsectsize);
++	/* only support dirv2 or more recent */
++	return offsetofend(struct xfs_dsb, sb_dirblklog);
++}
++
+ /*
+  * Scrub the filesystem superblock.
+  *
+@@ -75,6 +99,7 @@ xchk_superblock(
+ 	struct xfs_buf		*bp;
+ 	struct xfs_dsb		*sb;
+ 	struct xfs_perag	*pag;
++	size_t			sblen;
+ 	xfs_agnumber_t		agno;
+ 	uint32_t		v2_ok;
+ 	__be32			features_mask;
+@@ -350,8 +375,8 @@ xchk_superblock(
+ 	}
+ 
+ 	/* Everything else must be zero. */
+-	if (memchr_inv(sb + 1, 0,
+-			BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
++	sblen = xchk_superblock_ondisk_size(mp);
++	if (memchr_inv((char *)sb + sblen, 0, BBTOB(bp->b_length) - sblen))
+ 		xchk_block_set_corrupt(sc, bp);
+ 
+ 	xchk_superblock_xref(sc, bp);
+diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
+index ae18ab86e608b5..8712b891defbc7 100644
+--- a/fs/xfs/xfs_fsmap.c
++++ b/fs/xfs/xfs_fsmap.c
+@@ -162,7 +162,8 @@ struct xfs_getfsmap_info {
+ 	xfs_daddr_t		next_daddr;	/* next daddr we expect */
+ 	/* daddr of low fsmap key when we're using the rtbitmap */
+ 	xfs_daddr_t		low_daddr;
+-	xfs_daddr_t		end_daddr;	/* daddr of high fsmap key */
++	/* daddr of high fsmap key, or the last daddr on the device */
++	xfs_daddr_t		end_daddr;
+ 	u64			missing_owner;	/* owner of holes */
+ 	u32			dev;		/* device id */
+ 	/*
+@@ -306,7 +307,7 @@ xfs_getfsmap_helper(
+ 	 * Note that if the btree query found a mapping, there won't be a gap.
+ 	 */
+ 	if (info->last && info->end_daddr != XFS_BUF_DADDR_NULL)
+-		rec_daddr = info->end_daddr;
++		rec_daddr = info->end_daddr + 1;
+ 
+ 	/* Are we just counting mappings? */
+ 	if (info->head->fmh_count == 0) {
+@@ -898,7 +899,10 @@ xfs_getfsmap(
+ 	struct xfs_trans		*tp = NULL;
+ 	struct xfs_fsmap		dkeys[2];	/* per-dev keys */
+ 	struct xfs_getfsmap_dev		handlers[XFS_GETFSMAP_DEVS];
+-	struct xfs_getfsmap_info	info = { NULL };
++	struct xfs_getfsmap_info	info = {
++		.fsmap_recs		= fsmap_recs,
++		.head			= head,
++	};
+ 	bool				use_rmap;
+ 	int				i;
+ 	int				error = 0;
+@@ -963,9 +967,6 @@ xfs_getfsmap(
+ 
+ 	info.next_daddr = head->fmh_keys[0].fmr_physical +
+ 			  head->fmh_keys[0].fmr_length;
+-	info.end_daddr = XFS_BUF_DADDR_NULL;
+-	info.fsmap_recs = fsmap_recs;
+-	info.head = head;
+ 
+ 	/* For each device we support... */
+ 	for (i = 0; i < XFS_GETFSMAP_DEVS; i++) {
+@@ -978,17 +979,23 @@ xfs_getfsmap(
+ 			break;
+ 
+ 		/*
+-		 * If this device number matches the high key, we have
+-		 * to pass the high key to the handler to limit the
+-		 * query results.  If the device number exceeds the
+-		 * low key, zero out the low key so that we get
+-		 * everything from the beginning.
++		 * If this device number matches the high key, we have to pass
++		 * the high key to the handler to limit the query results, and
++		 * set the end_daddr so that we can synthesize records at the
++		 * end of the query range or device.
+ 		 */
+ 		if (handlers[i].dev == head->fmh_keys[1].fmr_device) {
+ 			dkeys[1] = head->fmh_keys[1];
+ 			info.end_daddr = min(handlers[i].nr_sectors - 1,
+ 					     dkeys[1].fmr_physical);
++		} else {
++			info.end_daddr = handlers[i].nr_sectors - 1;
+ 		}
++
++		/*
++		 * If the device number exceeds the low key, zero out the low
++		 * key so that we get everything from the beginning.
++		 */
+ 		if (handlers[i].dev > head->fmh_keys[0].fmr_device)
+ 			memset(&dkeys[0], 0, sizeof(struct xfs_fsmap));
+ 
+diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
+index 6cdc873ac907f5..aa5233b1eba970 100644
+--- a/include/clocksource/hyperv_timer.h
++++ b/include/clocksource/hyperv_timer.h
+@@ -38,6 +38,8 @@ extern void hv_remap_tsc_clocksource(void);
+ extern unsigned long hv_get_tsc_pfn(void);
+ extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
+ 
++extern void hv_adj_sched_clock_offset(u64 offset);
++
+ static __always_inline bool
+ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ 		     u64 *cur_tsc, u64 *time)
+diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
+index 941deffc590dfd..6073a8f13c413c 100644
+--- a/include/linux/alloc_tag.h
++++ b/include/linux/alloc_tag.h
+@@ -48,7 +48,12 @@ static inline void set_codetag_empty(union codetag_ref *ref)
+ #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+ 
+ static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
+-static inline void set_codetag_empty(union codetag_ref *ref) {}
++
++static inline void set_codetag_empty(union codetag_ref *ref)
++{
++	if (ref)
++		ref->ct = NULL;
++}
+ 
+ #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+ 
+diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
+index a28e2a6a13d05a..74169dd0f65948 100644
+--- a/include/linux/arm_ffa.h
++++ b/include/linux/arm_ffa.h
+@@ -166,9 +166,12 @@ static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
+ 	return dev_get_drvdata(&fdev->dev);
+ }
+ 
++struct ffa_partition_info;
++
+ #if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
+-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+-				       const struct ffa_ops *ops);
++struct ffa_device *
++ffa_device_register(const struct ffa_partition_info *part_info,
++		    const struct ffa_ops *ops);
+ void ffa_device_unregister(struct ffa_device *ffa_dev);
+ int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ 			const char *mod_name);
+@@ -176,9 +179,9 @@ void ffa_driver_unregister(struct ffa_driver *driver);
+ bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+ 
+ #else
+-static inline
+-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+-				       const struct ffa_ops *ops)
++static inline struct ffa_device *
++ffa_device_register(const struct ffa_partition_info *part_info,
++		    const struct ffa_ops *ops)
+ {
+ 	return NULL;
+ }
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 22c22fb9104214..02a226bcf0edc9 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -1559,6 +1559,7 @@ struct hv_util_service {
+ 	void *channel;
+ 	void (*util_cb)(void *);
+ 	int (*util_init)(struct hv_util_service *);
++	int (*util_init_transport)(void);
+ 	void (*util_deinit)(void);
+ 	int (*util_pre_suspend)(void);
+ 	int (*util_pre_resume)(void);
+diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
+index e123d5e17b5261..85fe4e6b275c7d 100644
+--- a/include/linux/io_uring.h
++++ b/include/linux/io_uring.h
+@@ -15,10 +15,8 @@ bool io_is_uring_fops(struct file *file);
+ 
+ static inline void io_uring_files_cancel(void)
+ {
+-	if (current->io_uring) {
+-		io_uring_unreg_ringfd();
++	if (current->io_uring)
+ 		__io_uring_cancel(false);
+-	}
+ }
+ static inline void io_uring_task_cancel(void)
+ {
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index 74aa9fbbdae70b..48c66b84668281 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -860,18 +860,10 @@ static inline void ClearPageCompound(struct page *page)
+ 	ClearPageHead(page);
+ }
+ FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
+-FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+-/*
+- * PG_partially_mapped is protected by deferred_split split_queue_lock,
+- * so its safe to use non-atomic set/clear.
+- */
+-__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+-__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
++FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+ #else
+ FOLIO_FLAG_FALSE(large_rmappable)
+-FOLIO_TEST_FLAG_FALSE(partially_mapped)
+-__FOLIO_SET_FLAG_NOOP(partially_mapped)
+-__FOLIO_CLEAR_FLAG_NOOP(partially_mapped)
++FOLIO_FLAG_FALSE(partially_mapped)
+ #endif
+ 
+ #define PG_head_mask ((1UL << PG_head))
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index bb343136ddd05d..c14446c6164d72 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -656,6 +656,12 @@ struct sched_dl_entity {
+ 	 * @dl_defer_armed tells if the deferrable server is waiting
+ 	 * for the replenishment timer to activate it.
+ 	 *
++	 * @dl_server_active tells if the dlserver is active(started).
++	 * dlserver is started on first cfs enqueue on an idle runqueue
++	 * and is stopped when a dequeue results in 0 cfs tasks on the
++	 * runqueue. In other words, dlserver is active only when cpu's
++	 * runqueue has atleast one cfs task.
++	 *
+ 	 * @dl_defer_running tells if the deferrable server is actually
+ 	 * running, skipping the defer phase.
+ 	 */
+@@ -664,6 +670,7 @@ struct sched_dl_entity {
+ 	unsigned int			dl_non_contending : 1;
+ 	unsigned int			dl_overrun	  : 1;
+ 	unsigned int			dl_server         : 1;
++	unsigned int			dl_server_active  : 1;
+ 	unsigned int			dl_defer	  : 1;
+ 	unsigned int			dl_defer_armed	  : 1;
+ 	unsigned int			dl_defer_running  : 1;
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 42bedcddd5113e..4df2ff81d3dea5 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -285,7 +285,8 @@ struct trace_event_fields {
+ 			const char *name;
+ 			const int  size;
+ 			const int  align;
+-			const int  is_signed;
++			const unsigned int is_signed:1;
++			unsigned int needs_test:1;
+ 			const int  filter_type;
+ 			const int  len;
+ 		};
+@@ -337,6 +338,7 @@ enum {
+ 	TRACE_EVENT_FL_EPROBE_BIT,
+ 	TRACE_EVENT_FL_FPROBE_BIT,
+ 	TRACE_EVENT_FL_CUSTOM_BIT,
++	TRACE_EVENT_FL_TEST_STR_BIT,
+ };
+ 
+ /*
+@@ -354,6 +356,7 @@ enum {
+  *  CUSTOM        - Event is a custom event (to be attached to an exsiting tracepoint)
+  *                   This is set when the custom event has not been attached
+  *                   to a tracepoint yet, then it is cleared when it is.
++ *  TEST_STR      - The event has a "%s" that points to a string outside the event
+  */
+ enum {
+ 	TRACE_EVENT_FL_FILTERED		= (1 << TRACE_EVENT_FL_FILTERED_BIT),
+@@ -367,6 +370,7 @@ enum {
+ 	TRACE_EVENT_FL_EPROBE		= (1 << TRACE_EVENT_FL_EPROBE_BIT),
+ 	TRACE_EVENT_FL_FPROBE		= (1 << TRACE_EVENT_FL_FPROBE_BIT),
+ 	TRACE_EVENT_FL_CUSTOM		= (1 << TRACE_EVENT_FL_CUSTOM_BIT),
++	TRACE_EVENT_FL_TEST_STR		= (1 << TRACE_EVENT_FL_TEST_STR_BIT),
+ };
+ 
+ #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 8aa3372f21a080..2b322a9b88a2bd 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -221,6 +221,7 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head);
+ #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
+ #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
+ #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
++#define wake_up_sync(x)			__wake_up_sync(x, TASK_NORMAL)
+ 
+ #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
+ #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index b2736e3491b862..9849da128364af 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -515,7 +515,11 @@ static void io_queue_iowq(struct io_kiocb *req)
+ 	struct io_uring_task *tctx = req->task->io_uring;
+ 
+ 	BUG_ON(!tctx);
+-	BUG_ON(!tctx->io_wq);
++
++	if ((current->flags & PF_KTHREAD) || !tctx->io_wq) {
++		io_req_task_queue_fail(req, -ECANCELED);
++		return;
++	}
+ 
+ 	/* init ->work of the whole link before punting */
+ 	io_prep_async_link(req);
+@@ -3230,6 +3234,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
+ 
+ void __io_uring_cancel(bool cancel_all)
+ {
++	io_uring_unreg_ringfd();
+ 	io_uring_cancel_generic(cancel_all, NULL);
+ }
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 6cc12777bb11ab..d07dc87787dff3 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1300,7 +1300,7 @@ bool sched_can_stop_tick(struct rq *rq)
+ 	if (scx_enabled() && !scx_can_stop_tick(rq))
+ 		return false;
+ 
+-	if (rq->cfs.nr_running > 1)
++	if (rq->cfs.h_nr_running > 1)
+ 		return false;
+ 
+ 	/*
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index fc6f41ac33eb13..a17c23b53049cc 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -1647,6 +1647,7 @@ void dl_server_start(struct sched_dl_entity *dl_se)
+ 	if (!dl_se->dl_runtime)
+ 		return;
+ 
++	dl_se->dl_server_active = 1;
+ 	enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
+ 	if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
+ 		resched_curr(dl_se->rq);
+@@ -1661,6 +1662,7 @@ void dl_server_stop(struct sched_dl_entity *dl_se)
+ 	hrtimer_try_to_cancel(&dl_se->dl_timer);
+ 	dl_se->dl_defer_armed = 0;
+ 	dl_se->dl_throttled = 0;
++	dl_se->dl_server_active = 0;
+ }
+ 
+ void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
+@@ -2420,8 +2422,10 @@ static struct task_struct *__pick_task_dl(struct rq *rq)
+ 	if (dl_server(dl_se)) {
+ 		p = dl_se->server_pick_task(dl_se);
+ 		if (!p) {
+-			dl_se->dl_yielded = 1;
+-			update_curr_dl_se(rq, dl_se, 0);
++			if (dl_server_active(dl_se)) {
++				dl_se->dl_yielded = 1;
++				update_curr_dl_se(rq, dl_se, 0);
++			}
+ 			goto again;
+ 		}
+ 		rq->dl_server = dl_se;
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index f4035c7a0fa1df..82b165bf48c423 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -844,6 +844,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
+ 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
+ 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
++	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
+ 	SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
+ 			cfs_rq->idle_nr_running);
+ 	SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 782ce70ebd1b08..1ca96c99872f08 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1159,8 +1159,6 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec)
+ 	trace_sched_stat_runtime(p, delta_exec);
+ 	account_group_exec_runtime(p, delta_exec);
+ 	cgroup_account_cputime(p, delta_exec);
+-	if (p->dl_server)
+-		dl_server_update(p->dl_server, delta_exec);
+ }
+ 
+ static inline bool did_preempt_short(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -1237,11 +1235,16 @@ static void update_curr(struct cfs_rq *cfs_rq)
+ 		update_curr_task(p, delta_exec);
+ 
+ 		/*
+-		 * Any fair task that runs outside of fair_server should
+-		 * account against fair_server such that it can account for
+-		 * this time and possibly avoid running this period.
++		 * If the fair_server is active, we need to account for the
++		 * fair_server time whether or not the task is running on
++		 * behalf of fair_server or not:
++		 *  - If the task is running on behalf of fair_server, we need
++		 *    to limit its time based on the assigned runtime.
++		 *  - Fair task that runs outside of fair_server should account
++		 *    against fair_server such that it can account for this time
++		 *    and possibly avoid running this period.
+ 		 */
+-		if (p->dl_server != &rq->fair_server)
++		if (dl_server_active(&rq->fair_server))
+ 			dl_server_update(&rq->fair_server, delta_exec);
+ 	}
+ 
+@@ -5471,9 +5474,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ 
+ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+ 
+-static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
++static void set_delayed(struct sched_entity *se)
++{
++	se->sched_delayed = 1;
++	for_each_sched_entity(se) {
++		struct cfs_rq *cfs_rq = cfs_rq_of(se);
++
++		cfs_rq->h_nr_delayed++;
++		if (cfs_rq_throttled(cfs_rq))
++			break;
++	}
++}
++
++static void clear_delayed(struct sched_entity *se)
+ {
+ 	se->sched_delayed = 0;
++	for_each_sched_entity(se) {
++		struct cfs_rq *cfs_rq = cfs_rq_of(se);
++
++		cfs_rq->h_nr_delayed--;
++		if (cfs_rq_throttled(cfs_rq))
++			break;
++	}
++}
++
++static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
++{
++	clear_delayed(se);
+ 	if (sched_feat(DELAY_ZERO) && se->vlag > 0)
+ 		se->vlag = 0;
+ }
+@@ -5484,6 +5511,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ 	bool sleep = flags & DEQUEUE_SLEEP;
+ 
+ 	update_curr(cfs_rq);
++	clear_buddies(cfs_rq, se);
+ 
+ 	if (flags & DEQUEUE_DELAYED) {
+ 		SCHED_WARN_ON(!se->sched_delayed);
+@@ -5500,10 +5528,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ 
+ 		if (sched_feat(DELAY_DEQUEUE) && delay &&
+ 		    !entity_eligible(cfs_rq, se)) {
+-			if (cfs_rq->next == se)
+-				cfs_rq->next = NULL;
+ 			update_load_avg(cfs_rq, se, 0);
+-			se->sched_delayed = 1;
++			set_delayed(se);
+ 			return false;
+ 		}
+ 	}
+@@ -5526,8 +5552,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ 
+ 	update_stats_dequeue_fair(cfs_rq, se, flags);
+ 
+-	clear_buddies(cfs_rq, se);
+-
+ 	update_entity_lag(cfs_rq, se);
+ 	if (sched_feat(PLACE_REL_DEADLINE) && !sleep) {
+ 		se->deadline -= se->vruntime;
+@@ -5923,7 +5947,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ 	struct rq *rq = rq_of(cfs_rq);
+ 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+ 	struct sched_entity *se;
+-	long task_delta, idle_task_delta, dequeue = 1;
++	long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
+ 	long rq_h_nr_running = rq->cfs.h_nr_running;
+ 
+ 	raw_spin_lock(&cfs_b->lock);
+@@ -5956,6 +5980,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ 
+ 	task_delta = cfs_rq->h_nr_running;
+ 	idle_task_delta = cfs_rq->idle_h_nr_running;
++	delayed_delta = cfs_rq->h_nr_delayed;
+ 	for_each_sched_entity(se) {
+ 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
+ 		int flags;
+@@ -5979,6 +6004,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ 
+ 		qcfs_rq->h_nr_running -= task_delta;
+ 		qcfs_rq->idle_h_nr_running -= idle_task_delta;
++		qcfs_rq->h_nr_delayed -= delayed_delta;
+ 
+ 		if (qcfs_rq->load.weight) {
+ 			/* Avoid re-evaluating load for this entity: */
+@@ -6001,6 +6027,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ 
+ 		qcfs_rq->h_nr_running -= task_delta;
+ 		qcfs_rq->idle_h_nr_running -= idle_task_delta;
++		qcfs_rq->h_nr_delayed -= delayed_delta;
+ 	}
+ 
+ 	/* At this point se is NULL and we are at root level*/
+@@ -6026,7 +6053,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 	struct rq *rq = rq_of(cfs_rq);
+ 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+ 	struct sched_entity *se;
+-	long task_delta, idle_task_delta;
++	long task_delta, idle_task_delta, delayed_delta;
+ 	long rq_h_nr_running = rq->cfs.h_nr_running;
+ 
+ 	se = cfs_rq->tg->se[cpu_of(rq)];
+@@ -6062,6 +6089,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 
+ 	task_delta = cfs_rq->h_nr_running;
+ 	idle_task_delta = cfs_rq->idle_h_nr_running;
++	delayed_delta = cfs_rq->h_nr_delayed;
+ 	for_each_sched_entity(se) {
+ 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
+ 
+@@ -6079,6 +6107,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 
+ 		qcfs_rq->h_nr_running += task_delta;
+ 		qcfs_rq->idle_h_nr_running += idle_task_delta;
++		qcfs_rq->h_nr_delayed += delayed_delta;
+ 
+ 		/* end evaluation on encountering a throttled cfs_rq */
+ 		if (cfs_rq_throttled(qcfs_rq))
+@@ -6096,6 +6125,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 
+ 		qcfs_rq->h_nr_running += task_delta;
+ 		qcfs_rq->idle_h_nr_running += idle_task_delta;
++		qcfs_rq->h_nr_delayed += delayed_delta;
+ 
+ 		/* end evaluation on encountering a throttled cfs_rq */
+ 		if (cfs_rq_throttled(qcfs_rq))
+@@ -6949,7 +6979,7 @@ requeue_delayed_entity(struct sched_entity *se)
+ 	}
+ 
+ 	update_load_avg(cfs_rq, se, 0);
+-	se->sched_delayed = 0;
++	clear_delayed(se);
+ }
+ 
+ /*
+@@ -6963,6 +6993,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	struct cfs_rq *cfs_rq;
+ 	struct sched_entity *se = &p->se;
+ 	int idle_h_nr_running = task_has_idle_policy(p);
++	int h_nr_delayed = 0;
+ 	int task_new = !(flags & ENQUEUE_WAKEUP);
+ 	int rq_h_nr_running = rq->cfs.h_nr_running;
+ 	u64 slice = 0;
+@@ -6989,6 +7020,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	if (p->in_iowait)
+ 		cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
+ 
++	if (task_new)
++		h_nr_delayed = !!se->sched_delayed;
++
+ 	for_each_sched_entity(se) {
+ 		if (se->on_rq) {
+ 			if (se->sched_delayed)
+@@ -7011,6 +7045,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 
+ 		cfs_rq->h_nr_running++;
+ 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
++		cfs_rq->h_nr_delayed += h_nr_delayed;
+ 
+ 		if (cfs_rq_is_idle(cfs_rq))
+ 			idle_h_nr_running = 1;
+@@ -7034,6 +7069,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 
+ 		cfs_rq->h_nr_running++;
+ 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
++		cfs_rq->h_nr_delayed += h_nr_delayed;
+ 
+ 		if (cfs_rq_is_idle(cfs_rq))
+ 			idle_h_nr_running = 1;
+@@ -7096,6 +7132,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 	struct task_struct *p = NULL;
+ 	int idle_h_nr_running = 0;
+ 	int h_nr_running = 0;
++	int h_nr_delayed = 0;
+ 	struct cfs_rq *cfs_rq;
+ 	u64 slice = 0;
+ 
+@@ -7103,6 +7140,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 		p = task_of(se);
+ 		h_nr_running = 1;
+ 		idle_h_nr_running = task_has_idle_policy(p);
++		if (!task_sleep && !task_delayed)
++			h_nr_delayed = !!se->sched_delayed;
+ 	} else {
+ 		cfs_rq = group_cfs_rq(se);
+ 		slice = cfs_rq_min_slice(cfs_rq);
+@@ -7120,6 +7159,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 
+ 		cfs_rq->h_nr_running -= h_nr_running;
+ 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
++		cfs_rq->h_nr_delayed -= h_nr_delayed;
+ 
+ 		if (cfs_rq_is_idle(cfs_rq))
+ 			idle_h_nr_running = h_nr_running;
+@@ -7158,6 +7198,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
+ 
+ 		cfs_rq->h_nr_running -= h_nr_running;
+ 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
++		cfs_rq->h_nr_delayed -= h_nr_delayed;
+ 
+ 		if (cfs_rq_is_idle(cfs_rq))
+ 			idle_h_nr_running = h_nr_running;
+@@ -8786,7 +8827,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
+ 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
+ 		return;
+ 
+-	if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) {
++	if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK) && !pse->sched_delayed) {
+ 		set_next_buddy(pse);
+ 	}
+ 
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index a9c65d97b3cac6..171a802420a10a 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
+ {
+ 	if (___update_load_sum(now, &cfs_rq->avg,
+ 				scale_load_down(cfs_rq->load.weight),
+-				cfs_rq->h_nr_running,
++				cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
+ 				cfs_rq->curr != NULL)) {
+ 
+ 		___update_load_avg(&cfs_rq->avg, 1);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index c03b3d7b320e9c..f2ef520513c4a2 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -398,6 +398,11 @@ extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq
+ extern int dl_server_apply_params(struct sched_dl_entity *dl_se,
+ 		    u64 runtime, u64 period, bool init);
+ 
++static inline bool dl_server_active(struct sched_dl_entity *dl_se)
++{
++	return dl_se->dl_server_active;
++}
++
+ #ifdef CONFIG_CGROUP_SCHED
+ 
+ extern struct list_head task_groups;
+@@ -649,6 +654,7 @@ struct cfs_rq {
+ 	unsigned int		h_nr_running;      /* SCHED_{NORMAL,BATCH,IDLE} */
+ 	unsigned int		idle_nr_running;   /* SCHED_IDLE */
+ 	unsigned int		idle_h_nr_running; /* SCHED_IDLE */
++	unsigned int		h_nr_delayed;
+ 
+ 	s64			avg_vruntime;
+ 	u64			avg_load;
+@@ -898,8 +904,11 @@ struct dl_rq {
+ 
+ static inline void se_update_runnable(struct sched_entity *se)
+ {
+-	if (!entity_is_task(se))
+-		se->runnable_weight = se->my_q->h_nr_running;
++	if (!entity_is_task(se)) {
++		struct cfs_rq *cfs_rq = se->my_q;
++
++		se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
++	}
+ }
+ 
+ static inline long se_runnable(struct sched_entity *se)
+diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
+index 69e226a48daa92..72bcbfad53db04 100644
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -1160,7 +1160,7 @@ void fgraph_update_pid_func(void)
+ static int start_graph_tracing(void)
+ {
+ 	unsigned long **ret_stack_list;
+-	int ret;
++	int ret, cpu;
+ 
+ 	ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
+ 				 sizeof(*ret_stack_list), GFP_KERNEL);
+@@ -1168,6 +1168,12 @@ static int start_graph_tracing(void)
+ 	if (!ret_stack_list)
+ 		return -ENOMEM;
+ 
++	/* The cpu_boot init_task->ret_stack will never be freed */
++	for_each_online_cpu(cpu) {
++		if (!idle_task(cpu)->ret_stack)
++			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
++	}
++
+ 	do {
+ 		ret = alloc_retstack_tasklist(ret_stack_list);
+ 	} while (ret == -EAGAIN);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 366eb4c4f28e57..703978b2d557d7 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -7019,7 +7019,11 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
+ 	lockdep_assert_held(&cpu_buffer->mapping_lock);
+ 
+ 	nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
+-	nr_pages = ((nr_subbufs + 1) << subbuf_order) - pgoff; /* + meta-page */
++	nr_pages = ((nr_subbufs + 1) << subbuf_order); /* + meta-page */
++	if (nr_pages <= pgoff)
++		return -EINVAL;
++
++	nr_pages -= pgoff;
+ 
+ 	nr_vma_pages = vma_pages(vma);
+ 	if (!nr_vma_pages || nr_vma_pages > nr_pages)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 17d2ffde0bb604..35515192aa0fda 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3635,17 +3635,12 @@ char *trace_iter_expand_format(struct trace_iterator *iter)
+ }
+ 
+ /* Returns true if the string is safe to dereference from an event */
+-static bool trace_safe_str(struct trace_iterator *iter, const char *str,
+-			   bool star, int len)
++static bool trace_safe_str(struct trace_iterator *iter, const char *str)
+ {
+ 	unsigned long addr = (unsigned long)str;
+ 	struct trace_event *trace_event;
+ 	struct trace_event_call *event;
+ 
+-	/* Ignore strings with no length */
+-	if (star && !len)
+-		return true;
+-
+ 	/* OK if part of the event data */
+ 	if ((addr >= (unsigned long)iter->ent) &&
+ 	    (addr < (unsigned long)iter->ent + iter->ent_size))
+@@ -3685,181 +3680,69 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str,
+ 	return false;
+ }
+ 
+-static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
+-
+-static int test_can_verify_check(const char *fmt, ...)
+-{
+-	char buf[16];
+-	va_list ap;
+-	int ret;
+-
+-	/*
+-	 * The verifier is dependent on vsnprintf() modifies the va_list
+-	 * passed to it, where it is sent as a reference. Some architectures
+-	 * (like x86_32) passes it by value, which means that vsnprintf()
+-	 * does not modify the va_list passed to it, and the verifier
+-	 * would then need to be able to understand all the values that
+-	 * vsnprintf can use. If it is passed by value, then the verifier
+-	 * is disabled.
+-	 */
+-	va_start(ap, fmt);
+-	vsnprintf(buf, 16, "%d", ap);
+-	ret = va_arg(ap, int);
+-	va_end(ap);
+-
+-	return ret;
+-}
+-
+-static void test_can_verify(void)
+-{
+-	if (!test_can_verify_check("%d %d", 0, 1)) {
+-		pr_info("trace event string verifier disabled\n");
+-		static_branch_inc(&trace_no_verify);
+-	}
+-}
+-
+ /**
+- * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
++ * ignore_event - Check dereferenced fields while writing to the seq buffer
+  * @iter: The iterator that holds the seq buffer and the event being printed
+- * @fmt: The format used to print the event
+- * @ap: The va_list holding the data to print from @fmt.
+  *
+- * This writes the data into the @iter->seq buffer using the data from
+- * @fmt and @ap. If the format has a %s, then the source of the string
+- * is examined to make sure it is safe to print, otherwise it will
+- * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
+- * pointer.
++ * At boot up, test_event_printk() will flag any event that dereferences
++ * a string with "%s" that does exist in the ring buffer. It may still
++ * be valid, as the string may point to a static string in the kernel
++ * rodata that never gets freed. But if the string pointer is pointing
++ * to something that was allocated, there's a chance that it can be freed
++ * by the time the user reads the trace. This would cause a bad memory
++ * access by the kernel and possibly crash the system.
++ *
++ * This function will check if the event has any fields flagged as needing
++ * to be checked at runtime and perform those checks.
++ *
++ * If it is found that a field is unsafe, it will write into the @iter->seq
++ * a message stating what was found to be unsafe.
++ *
++ * @return: true if the event is unsafe and should be ignored,
++ *          false otherwise.
+  */
+-void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
+-			 va_list ap)
++bool ignore_event(struct trace_iterator *iter)
+ {
+-	long text_delta = 0;
+-	long data_delta = 0;
+-	const char *p = fmt;
+-	const char *str;
+-	bool good;
+-	int i, j;
++	struct ftrace_event_field *field;
++	struct trace_event *trace_event;
++	struct trace_event_call *event;
++	struct list_head *head;
++	struct trace_seq *seq;
++	const void *ptr;
+ 
+-	if (WARN_ON_ONCE(!fmt))
+-		return;
++	trace_event = ftrace_find_event(iter->ent->type);
+ 
+-	if (static_branch_unlikely(&trace_no_verify))
+-		goto print;
++	seq = &iter->seq;
+ 
+-	/*
+-	 * When the kernel is booted with the tp_printk command line
+-	 * parameter, trace events go directly through to printk().
+-	 * It also is checked by this function, but it does not
+-	 * have an associated trace_array (tr) for it.
+-	 */
+-	if (iter->tr) {
+-		text_delta = iter->tr->text_delta;
+-		data_delta = iter->tr->data_delta;
++	if (!trace_event) {
++		trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
++		return true;
+ 	}
+ 
+-	/* Don't bother checking when doing a ftrace_dump() */
+-	if (iter->fmt == static_fmt_buf)
+-		goto print;
+-
+-	while (*p) {
+-		bool star = false;
+-		int len = 0;
+-
+-		j = 0;
+-
+-		/*
+-		 * We only care about %s and variants
+-		 * as well as %p[sS] if delta is non-zero
+-		 */
+-		for (i = 0; p[i]; i++) {
+-			if (i + 1 >= iter->fmt_size) {
+-				/*
+-				 * If we can't expand the copy buffer,
+-				 * just print it.
+-				 */
+-				if (!trace_iter_expand_format(iter))
+-					goto print;
+-			}
+-
+-			if (p[i] == '\\' && p[i+1]) {
+-				i++;
+-				continue;
+-			}
+-			if (p[i] == '%') {
+-				/* Need to test cases like %08.*s */
+-				for (j = 1; p[i+j]; j++) {
+-					if (isdigit(p[i+j]) ||
+-					    p[i+j] == '.')
+-						continue;
+-					if (p[i+j] == '*') {
+-						star = true;
+-						continue;
+-					}
+-					break;
+-				}
+-				if (p[i+j] == 's')
+-					break;
+-
+-				if (text_delta && p[i+1] == 'p' &&
+-				    ((p[i+2] == 's' || p[i+2] == 'S')))
+-					break;
+-
+-				star = false;
+-			}
+-			j = 0;
+-		}
+-		/* If no %s found then just print normally */
+-		if (!p[i])
+-			break;
+-
+-		/* Copy up to the %s, and print that */
+-		strncpy(iter->fmt, p, i);
+-		iter->fmt[i] = '\0';
+-		trace_seq_vprintf(&iter->seq, iter->fmt, ap);
++	event = container_of(trace_event, struct trace_event_call, event);
++	if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
++		return false;
+ 
+-		/* Add delta to %pS pointers */
+-		if (p[i+1] == 'p') {
+-			unsigned long addr;
+-			char fmt[4];
++	head = trace_get_fields(event);
++	if (!head) {
++		trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n",
++				 trace_event_name(event));
++		return true;
++	}
+ 
+-			fmt[0] = '%';
+-			fmt[1] = 'p';
+-			fmt[2] = p[i+2]; /* Either %ps or %pS */
+-			fmt[3] = '\0';
++	/* Offsets are from the iter->ent that points to the raw event */
++	ptr = iter->ent;
+ 
+-			addr = va_arg(ap, unsigned long);
+-			addr += text_delta;
+-			trace_seq_printf(&iter->seq, fmt, (void *)addr);
++	list_for_each_entry(field, head, link) {
++		const char *str;
++		bool good;
+ 
+-			p += i + 3;
++		if (!field->needs_test)
+ 			continue;
+-		}
+ 
+-		/*
+-		 * If iter->seq is full, the above call no longer guarantees
+-		 * that ap is in sync with fmt processing, and further calls
+-		 * to va_arg() can return wrong positional arguments.
+-		 *
+-		 * Ensure that ap is no longer used in this case.
+-		 */
+-		if (iter->seq.full) {
+-			p = "";
+-			break;
+-		}
++		str = *(const char **)(ptr + field->offset);
+ 
+-		if (star)
+-			len = va_arg(ap, int);
+-
+-		/* The ap now points to the string data of the %s */
+-		str = va_arg(ap, const char *);
+-
+-		good = trace_safe_str(iter, str, star, len);
+-
+-		/* Could be from the last boot */
+-		if (data_delta && !good) {
+-			str += data_delta;
+-			good = trace_safe_str(iter, str, star, len);
+-		}
++		good = trace_safe_str(iter, str);
+ 
+ 		/*
+ 		 * If you hit this warning, it is likely that the
+@@ -3870,44 +3753,14 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
+ 		 * instead. See samples/trace_events/trace-events-sample.h
+ 		 * for reference.
+ 		 */
+-		if (WARN_ONCE(!good, "fmt: '%s' current_buffer: '%s'",
+-			      fmt, seq_buf_str(&iter->seq.seq))) {
+-			int ret;
+-
+-			/* Try to safely read the string */
+-			if (star) {
+-				if (len + 1 > iter->fmt_size)
+-					len = iter->fmt_size - 1;
+-				if (len < 0)
+-					len = 0;
+-				ret = copy_from_kernel_nofault(iter->fmt, str, len);
+-				iter->fmt[len] = 0;
+-				star = false;
+-			} else {
+-				ret = strncpy_from_kernel_nofault(iter->fmt, str,
+-								  iter->fmt_size);
+-			}
+-			if (ret < 0)
+-				trace_seq_printf(&iter->seq, "(0x%px)", str);
+-			else
+-				trace_seq_printf(&iter->seq, "(0x%px:%s)",
+-						 str, iter->fmt);
+-			str = "[UNSAFE-MEMORY]";
+-			strcpy(iter->fmt, "%s");
+-		} else {
+-			strncpy(iter->fmt, p + i, j + 1);
+-			iter->fmt[j+1] = '\0';
++		if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'",
++			      trace_event_name(event), field->name)) {
++			trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n",
++					 trace_event_name(event), field->name);
++			return true;
+ 		}
+-		if (star)
+-			trace_seq_printf(&iter->seq, iter->fmt, len, str);
+-		else
+-			trace_seq_printf(&iter->seq, iter->fmt, str);
+-
+-		p += i + j + 1;
+ 	}
+- print:
+-	if (*p)
+-		trace_seq_vprintf(&iter->seq, p, ap);
++	return false;
+ }
+ 
+ const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
+@@ -4377,6 +4230,15 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
+ 	if (event) {
+ 		if (tr->trace_flags & TRACE_ITER_FIELDS)
+ 			return print_event_fields(iter, event);
++		/*
++		 * For TRACE_EVENT() events, the print_fmt is not
++		 * safe to use if the array has delta offsets
++		 * Force printing via the fields.
++		 */
++		if ((tr->text_delta || tr->data_delta) &&
++		    event->type > __TRACE_LAST_TYPE)
++			return print_event_fields(iter, event);
++
+ 		return event->funcs->trace(iter, sym_flags, event);
+ 	}
+ 
+@@ -10794,8 +10656,6 @@ __init static int tracer_alloc_buffers(void)
+ 
+ 	register_snapshot_cmd();
+ 
+-	test_can_verify();
+-
+ 	return 0;
+ 
+ out_free_pipe_cpumask:
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 30d6675c78cfe1..04ea327198ba80 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -664,9 +664,8 @@ void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
+ 
+ bool trace_is_tracepoint_string(const char *str);
+ const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
+-void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
+-			 va_list ap) __printf(2, 0);
+ char *trace_iter_expand_format(struct trace_iterator *iter);
++bool ignore_event(struct trace_iterator *iter);
+ 
+ int trace_empty(struct trace_iterator *iter);
+ 
+@@ -1402,7 +1401,8 @@ struct ftrace_event_field {
+ 	int			filter_type;
+ 	int			offset;
+ 	int			size;
+-	int			is_signed;
++	unsigned int		is_signed:1;
++	unsigned int		needs_test:1;
+ 	int			len;
+ };
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 7266ec2a4eea00..7149cd6fd4795e 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -82,7 +82,7 @@ static int system_refcount_dec(struct event_subsystem *system)
+ 	}
+ 
+ static struct ftrace_event_field *
+-__find_event_field(struct list_head *head, char *name)
++__find_event_field(struct list_head *head, const char *name)
+ {
+ 	struct ftrace_event_field *field;
+ 
+@@ -114,7 +114,8 @@ trace_find_event_field(struct trace_event_call *call, char *name)
+ 
+ static int __trace_define_field(struct list_head *head, const char *type,
+ 				const char *name, int offset, int size,
+-				int is_signed, int filter_type, int len)
++				int is_signed, int filter_type, int len,
++				int need_test)
+ {
+ 	struct ftrace_event_field *field;
+ 
+@@ -133,6 +134,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
+ 	field->offset = offset;
+ 	field->size = size;
+ 	field->is_signed = is_signed;
++	field->needs_test = need_test;
+ 	field->len = len;
+ 
+ 	list_add(&field->link, head);
+@@ -151,13 +153,13 @@ int trace_define_field(struct trace_event_call *call, const char *type,
+ 
+ 	head = trace_get_fields(call);
+ 	return __trace_define_field(head, type, name, offset, size,
+-				    is_signed, filter_type, 0);
++				    is_signed, filter_type, 0, 0);
+ }
+ EXPORT_SYMBOL_GPL(trace_define_field);
+ 
+ static int trace_define_field_ext(struct trace_event_call *call, const char *type,
+ 		       const char *name, int offset, int size, int is_signed,
+-		       int filter_type, int len)
++		       int filter_type, int len, int need_test)
+ {
+ 	struct list_head *head;
+ 
+@@ -166,13 +168,13 @@ static int trace_define_field_ext(struct trace_event_call *call, const char *typ
+ 
+ 	head = trace_get_fields(call);
+ 	return __trace_define_field(head, type, name, offset, size,
+-				    is_signed, filter_type, len);
++				    is_signed, filter_type, len, need_test);
+ }
+ 
+ #define __generic_field(type, item, filter_type)			\
+ 	ret = __trace_define_field(&ftrace_generic_fields, #type,	\
+ 				   #item, 0, 0, is_signed_type(type),	\
+-				   filter_type, 0);			\
++				   filter_type, 0, 0);			\
+ 	if (ret)							\
+ 		return ret;
+ 
+@@ -181,7 +183,8 @@ static int trace_define_field_ext(struct trace_event_call *call, const char *typ
+ 				   "common_" #item,			\
+ 				   offsetof(typeof(ent), item),		\
+ 				   sizeof(ent.item),			\
+-				   is_signed_type(type), FILTER_OTHER, 0);	\
++				   is_signed_type(type), FILTER_OTHER,	\
++				   0, 0);				\
+ 	if (ret)							\
+ 		return ret;
+ 
+@@ -244,19 +247,16 @@ int trace_event_get_offsets(struct trace_event_call *call)
+ 	return tail->offset + tail->size;
+ }
+ 
+-/*
+- * Check if the referenced field is an array and return true,
+- * as arrays are OK to dereference.
+- */
+-static bool test_field(const char *fmt, struct trace_event_call *call)
++
++static struct trace_event_fields *find_event_field(const char *fmt,
++						   struct trace_event_call *call)
+ {
+ 	struct trace_event_fields *field = call->class->fields_array;
+-	const char *array_descriptor;
+ 	const char *p = fmt;
+ 	int len;
+ 
+ 	if (!(len = str_has_prefix(fmt, "REC->")))
+-		return false;
++		return NULL;
+ 	fmt += len;
+ 	for (p = fmt; *p; p++) {
+ 		if (!isalnum(*p) && *p != '_')
+@@ -265,16 +265,129 @@ static bool test_field(const char *fmt, struct trace_event_call *call)
+ 	len = p - fmt;
+ 
+ 	for (; field->type; field++) {
+-		if (strncmp(field->name, fmt, len) ||
+-		    field->name[len])
++		if (strncmp(field->name, fmt, len) || field->name[len])
+ 			continue;
+-		array_descriptor = strchr(field->type, '[');
+-		/* This is an array and is OK to dereference. */
+-		return array_descriptor != NULL;
++
++		return field;
++	}
++	return NULL;
++}
++
++/*
++ * Check if the referenced field is an array and return true,
++ * as arrays are OK to dereference.
++ */
++static bool test_field(const char *fmt, struct trace_event_call *call)
++{
++	struct trace_event_fields *field;
++
++	field = find_event_field(fmt, call);
++	if (!field)
++		return false;
++
++	/* This is an array and is OK to dereference. */
++	return strchr(field->type, '[') != NULL;
++}
++
++/* Look for a string within an argument */
++static bool find_print_string(const char *arg, const char *str, const char *end)
++{
++	const char *r;
++
++	r = strstr(arg, str);
++	return r && r < end;
++}
++
++/* Return true if the argument pointer is safe */
++static bool process_pointer(const char *fmt, int len, struct trace_event_call *call)
++{
++	const char *r, *e, *a;
++
++	e = fmt + len;
++
++	/* Find the REC-> in the argument */
++	r = strstr(fmt, "REC->");
++	if (r && r < e) {
++		/*
++		 * Addresses of events on the buffer, or an array on the buffer is
++		 * OK to dereference. There's ways to fool this, but
++		 * this is to catch common mistakes, not malicious code.
++		 */
++		a = strchr(fmt, '&');
++		if ((a && (a < r)) || test_field(r, call))
++			return true;
++	} else if (find_print_string(fmt, "__get_dynamic_array(", e)) {
++		return true;
++	} else if (find_print_string(fmt, "__get_rel_dynamic_array(", e)) {
++		return true;
++	} else if (find_print_string(fmt, "__get_dynamic_array_len(", e)) {
++		return true;
++	} else if (find_print_string(fmt, "__get_rel_dynamic_array_len(", e)) {
++		return true;
++	} else if (find_print_string(fmt, "__get_sockaddr(", e)) {
++		return true;
++	} else if (find_print_string(fmt, "__get_rel_sockaddr(", e)) {
++		return true;
+ 	}
+ 	return false;
+ }
+ 
++/* Return true if the string is safe */
++static bool process_string(const char *fmt, int len, struct trace_event_call *call)
++{
++	struct trace_event_fields *field;
++	const char *r, *e, *s;
++
++	e = fmt + len;
++
++	/*
++	 * There are several helper functions that return strings.
++	 * If the argument contains a function, then assume its field is valid.
++	 * It is considered that the argument has a function if it has:
++	 *   alphanumeric or '_' before a parenthesis.
++	 */
++	s = fmt;
++	do {
++		r = strstr(s, "(");
++		if (!r || r >= e)
++			break;
++		for (int i = 1; r - i >= s; i++) {
++			char ch = *(r - i);
++			if (isspace(ch))
++				continue;
++			if (isalnum(ch) || ch == '_')
++				return true;
++			/* Anything else, this isn't a function */
++			break;
++		}
++		/* A function could be wrapped in parethesis, try the next one */
++		s = r + 1;
++	} while (s < e);
++
++	/*
++	 * If there's any strings in the argument consider this arg OK as it
++	 * could be: REC->field ? "foo" : "bar" and we don't want to get into
++	 * verifying that logic here.
++	 */
++	if (find_print_string(fmt, "\"", e))
++		return true;
++
++	/* Dereferenced strings are also valid like any other pointer */
++	if (process_pointer(fmt, len, call))
++		return true;
++
++	/* Make sure the field is found */
++	field = find_event_field(fmt, call);
++	if (!field)
++		return false;
++
++	/* Test this field's string before printing the event */
++	call->flags |= TRACE_EVENT_FL_TEST_STR;
++	field->needs_test = 1;
++
++	return true;
++}
++
+ /*
+  * Examine the print fmt of the event looking for unsafe dereference
+  * pointers using %p* that could be recorded in the trace event and
+@@ -284,13 +397,14 @@ static bool test_field(const char *fmt, struct trace_event_call *call)
+ static void test_event_printk(struct trace_event_call *call)
+ {
+ 	u64 dereference_flags = 0;
++	u64 string_flags = 0;
+ 	bool first = true;
+-	const char *fmt, *c, *r, *a;
++	const char *fmt;
+ 	int parens = 0;
+ 	char in_quote = 0;
+ 	int start_arg = 0;
+ 	int arg = 0;
+-	int i;
++	int i, e;
+ 
+ 	fmt = call->print_fmt;
+ 
+@@ -374,8 +488,16 @@ static void test_event_printk(struct trace_event_call *call)
+ 						star = true;
+ 						continue;
+ 					}
+-					if ((fmt[i + j] == 's') && star)
+-						arg++;
++					if ((fmt[i + j] == 's')) {
++						if (star)
++							arg++;
++						if (WARN_ONCE(arg == 63,
++							      "Too many args for event: %s",
++							      trace_event_name(call)))
++							return;
++						dereference_flags |= 1ULL << arg;
++						string_flags |= 1ULL << arg;
++					}
+ 					break;
+ 				}
+ 				break;
+@@ -403,42 +525,47 @@ static void test_event_printk(struct trace_event_call *call)
+ 		case ',':
+ 			if (in_quote || parens)
+ 				continue;
++			e = i;
+ 			i++;
+ 			while (isspace(fmt[i]))
+ 				i++;
+-			start_arg = i;
+-			if (!(dereference_flags & (1ULL << arg)))
+-				goto next_arg;
+ 
+-			/* Find the REC-> in the argument */
+-			c = strchr(fmt + i, ',');
+-			r = strstr(fmt + i, "REC->");
+-			if (r && (!c || r < c)) {
+-				/*
+-				 * Addresses of events on the buffer,
+-				 * or an array on the buffer is
+-				 * OK to dereference.
+-				 * There's ways to fool this, but
+-				 * this is to catch common mistakes,
+-				 * not malicious code.
+-				 */
+-				a = strchr(fmt + i, '&');
+-				if ((a && (a < r)) || test_field(r, call))
++			/*
++			 * If start_arg is zero, then this is the start of the
++			 * first argument. The processing of the argument happens
++			 * when the end of the argument is found, as it needs to
++			 * handle paranthesis and such.
++			 */
++			if (!start_arg) {
++				start_arg = i;
++				/* Balance out the i++ in the for loop */
++				i--;
++				continue;
++			}
++
++			if (dereference_flags & (1ULL << arg)) {
++				if (string_flags & (1ULL << arg)) {
++					if (process_string(fmt + start_arg, e - start_arg, call))
++						dereference_flags &= ~(1ULL << arg);
++				} else if (process_pointer(fmt + start_arg, e - start_arg, call))
+ 					dereference_flags &= ~(1ULL << arg);
+-			} else if ((r = strstr(fmt + i, "__get_dynamic_array(")) &&
+-				   (!c || r < c)) {
+-				dereference_flags &= ~(1ULL << arg);
+-			} else if ((r = strstr(fmt + i, "__get_sockaddr(")) &&
+-				   (!c || r < c)) {
+-				dereference_flags &= ~(1ULL << arg);
+ 			}
+ 
+-		next_arg:
+-			i--;
++			start_arg = i;
+ 			arg++;
++			/* Balance out the i++ in the for loop */
++			i--;
+ 		}
+ 	}
+ 
++	if (dereference_flags & (1ULL << arg)) {
++		if (string_flags & (1ULL << arg)) {
++			if (process_string(fmt + start_arg, i - start_arg, call))
++				dereference_flags &= ~(1ULL << arg);
++		} else if (process_pointer(fmt + start_arg, i - start_arg, call))
++			dereference_flags &= ~(1ULL << arg);
++	}
++
+ 	/*
+ 	 * If you triggered the below warning, the trace event reported
+ 	 * uses an unsafe dereference pointer %p*. As the data stored
+@@ -2471,7 +2598,7 @@ event_define_fields(struct trace_event_call *call)
+ 			ret = trace_define_field_ext(call, field->type, field->name,
+ 						 offset, field->size,
+ 						 field->is_signed, field->filter_type,
+-						 field->len);
++						 field->len, field->needs_test);
+ 			if (WARN_ON_ONCE(ret)) {
+ 				pr_err("error code is %d\n", ret);
+ 				break;
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index c14573e5a90337..6e7090e8bf3097 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -317,10 +317,14 @@ EXPORT_SYMBOL(trace_raw_output_prep);
+ 
+ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
+ {
++	struct trace_seq *s = &iter->seq;
+ 	va_list ap;
+ 
++	if (ignore_event(iter))
++		return;
++
+ 	va_start(ap, fmt);
+-	trace_check_vprintf(iter, trace_event_format(iter, fmt), ap);
++	trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
+ 	va_end(ap);
+ }
+ EXPORT_SYMBOL(trace_event_printf);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 5734d5d5060f32..7e0f72cd9fd4a0 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3503,7 +3503,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ 		    !list_empty(&folio->_deferred_list)) {
+ 			ds_queue->split_queue_len--;
+ 			if (folio_test_partially_mapped(folio)) {
+-				__folio_clear_partially_mapped(folio);
++				folio_clear_partially_mapped(folio);
+ 				mod_mthp_stat(folio_order(folio),
+ 					      MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ 			}
+@@ -3615,7 +3615,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
+ 	if (!list_empty(&folio->_deferred_list)) {
+ 		ds_queue->split_queue_len--;
+ 		if (folio_test_partially_mapped(folio)) {
+-			__folio_clear_partially_mapped(folio);
++			folio_clear_partially_mapped(folio);
+ 			mod_mthp_stat(folio_order(folio),
+ 				      MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ 		}
+@@ -3659,7 +3659,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
+ 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ 	if (partially_mapped) {
+ 		if (!folio_test_partially_mapped(folio)) {
+-			__folio_set_partially_mapped(folio);
++			folio_set_partially_mapped(folio);
+ 			if (folio_test_pmd_mappable(folio))
+ 				count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+ 			count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
+@@ -3752,7 +3752,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
+ 		} else {
+ 			/* We lost race with folio_put() */
+ 			if (folio_test_partially_mapped(folio)) {
+-				__folio_clear_partially_mapped(folio);
++				folio_clear_partially_mapped(folio);
+ 				mod_mthp_stat(folio_order(folio),
+ 					      MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ 			}
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 190fa05635f4a9..5dc57b74a8fe9a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5333,7 +5333,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ 					break;
+ 				}
+ 				ret = copy_user_large_folio(new_folio, pte_folio,
+-						ALIGN_DOWN(addr, sz), dst_vma);
++							    addr, dst_vma);
+ 				folio_put(pte_folio);
+ 				if (ret) {
+ 					folio_put(new_folio);
+@@ -6632,8 +6632,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ 			*foliop = NULL;
+ 			goto out;
+ 		}
+-		ret = copy_user_large_folio(folio, *foliop,
+-					    ALIGN_DOWN(dst_addr, size), dst_vma);
++		ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
+ 		folio_put(*foliop);
+ 		*foliop = NULL;
+ 		if (ret) {
+diff --git a/mm/memory.c b/mm/memory.c
+index bdf77a3ec47bc2..d322ddfe679167 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -6780,9 +6780,10 @@ static inline int process_huge_page(
+ 	return 0;
+ }
+ 
+-static void clear_gigantic_page(struct folio *folio, unsigned long addr,
++static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint,
+ 				unsigned int nr_pages)
+ {
++	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio));
+ 	int i;
+ 
+ 	might_sleep();
+@@ -6816,13 +6817,14 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
+ }
+ 
+ static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
+-				   unsigned long addr,
++				   unsigned long addr_hint,
+ 				   struct vm_area_struct *vma,
+ 				   unsigned int nr_pages)
+ {
+-	int i;
++	unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
+ 	struct page *dst_page;
+ 	struct page *src_page;
++	int i;
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+ 		dst_page = folio_page(dst, i);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index b6958333054d06..de65e8b4f75f21 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1238,13 +1238,15 @@ static void split_large_buddy(struct zone *zone, struct page *page,
+ 	if (order > pageblock_order)
+ 		order = pageblock_order;
+ 
+-	while (pfn != end) {
++	do {
+ 		int mt = get_pfnblock_migratetype(page, pfn);
+ 
+ 		__free_one_page(page, pfn, zone, order, mt, fpi);
+ 		pfn += 1 << order;
++		if (pfn == end)
++			break;
+ 		page = pfn_to_page(pfn);
+-	}
++	} while (1);
+ }
+ 
+ static void free_one_page(struct zone *zone, struct page *page,
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 568bb290bdce3e..b03ced0c3d4858 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -779,6 +779,14 @@ static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
++static void shmem_update_stats(struct folio *folio, int nr_pages)
++{
++	if (folio_test_pmd_mappable(folio))
++		__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
++	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
++	__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
++}
++
+ /*
+  * Somewhat like filemap_add_folio, but error if expected item has gone.
+  */
+@@ -813,10 +821,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
+ 		xas_store(&xas, folio);
+ 		if (xas_error(&xas))
+ 			goto unlock;
+-		if (folio_test_pmd_mappable(folio))
+-			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
+-		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+-		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
++		shmem_update_stats(folio, nr);
+ 		mapping->nrpages += nr;
+ unlock:
+ 		xas_unlock_irq(&xas);
+@@ -844,8 +849,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
+ 	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
+ 	folio->mapping = NULL;
+ 	mapping->nrpages -= nr;
+-	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
+-	__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
++	shmem_update_stats(folio, -nr);
+ 	xa_unlock_irq(&mapping->i_pages);
+ 	folio_put_refs(folio, nr);
+ 	BUG_ON(error);
+@@ -1944,10 +1948,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
+ 	}
+ 	if (!error) {
+ 		mem_cgroup_replace_folio(old, new);
+-		__lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
+-		__lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
+-		__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
+-		__lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
++		shmem_update_stats(new, nr_pages);
++		shmem_update_stats(old, -nr_pages);
+ 	}
+ 	xa_unlock_irq(&swap_mapping->i_pages);
+ 
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 0161cb4391e1d1..3f9255dfacb0c1 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -3369,7 +3369,8 @@ void vfree(const void *addr)
+ 		struct page *page = vm->pages[i];
+ 
+ 		BUG_ON(!page);
+-		mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
++		if (!(vm->flags & VM_MAP_PUT_PAGES))
++			mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
+ 		/*
+ 		 * High-order allocs for huge vmallocs are split, so
+ 		 * can be freed as an array of order-0 allocations
+@@ -3377,7 +3378,8 @@ void vfree(const void *addr)
+ 		__free_page(page);
+ 		cond_resched();
+ 	}
+-	atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
++	if (!(vm->flags & VM_MAP_PUT_PAGES))
++		atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
+ 	kvfree(vm->pages);
+ 	kfree(vm);
+ }
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index d2baa1af9df09e..7ce22f40db5b04 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -359,10 +359,10 @@ static int
+ netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
+ 		     u32 q_type, const struct genl_info *info)
+ {
+-	int err = 0;
++	int err;
+ 
+ 	if (!(netdev->flags & IFF_UP))
+-		return err;
++		return -ENOENT;
+ 
+ 	err = netdev_nl_queue_validate(netdev, q_idx, q_type);
+ 	if (err)
+@@ -417,24 +417,21 @@ netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
+ 			 struct netdev_nl_dump_ctx *ctx)
+ {
+ 	int err = 0;
+-	int i;
+ 
+ 	if (!(netdev->flags & IFF_UP))
+ 		return err;
+ 
+-	for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) {
+-		err = netdev_nl_queue_fill_one(rsp, netdev, i,
++	for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
++		err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
+ 					       NETDEV_QUEUE_TYPE_RX, info);
+ 		if (err)
+ 			return err;
+-		ctx->rxq_idx = i++;
+ 	}
+-	for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) {
+-		err = netdev_nl_queue_fill_one(rsp, netdev, i,
++	for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
++		err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
+ 					       NETDEV_QUEUE_TYPE_TX, info);
+ 		if (err)
+ 			return err;
+-		ctx->txq_idx = i++;
+ 	}
+ 
+ 	return err;
+@@ -600,7 +597,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ 					    i, info);
+ 		if (err)
+ 			return err;
+-		ctx->rxq_idx = i++;
++		ctx->rxq_idx = ++i;
+ 	}
+ 	i = ctx->txq_idx;
+ 	while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
+@@ -608,7 +605,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ 					    i, info);
+ 		if (err)
+ 			return err;
+-		ctx->txq_idx = i++;
++		ctx->txq_idx = ++i;
+ 	}
+ 
+ 	ctx->rxq_idx = 0;
+diff --git a/net/dsa/tag.h b/net/dsa/tag.h
+index d5707870906bc9..5d80ddad4ff6b1 100644
+--- a/net/dsa/tag.h
++++ b/net/dsa/tag.h
+@@ -138,9 +138,10 @@ static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
+  * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
+  * @skb: Pointer to socket buffer (packet)
+  *
+- * Receive path method for switches which cannot avoid tagging all packets
+- * towards the CPU port. Called when ds->untag_bridge_pvid (legacy) or
+- * ds->untag_vlan_aware_bridge_pvid is set to true.
++ * Receive path method for switches which send some packets as VLAN-tagged
++ * towards the CPU port (generally from VLAN-aware bridge ports) even when the
++ * packet was not tagged on the wire. Called when ds->untag_bridge_pvid
++ * (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true.
+  *
+  * As a side effect of this method, any VLAN tag from the skb head is moved
+  * to hwaccel.
+@@ -149,14 +150,19 @@ static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
+ {
+ 	struct dsa_port *dp = dsa_user_to_port(skb->dev);
+ 	struct net_device *br = dsa_port_bridge_dev_get(dp);
+-	u16 vid;
++	u16 vid, proto;
++	int err;
+ 
+ 	/* software untagging for standalone ports not yet necessary */
+ 	if (!br)
+ 		return skb;
+ 
++	err = br_vlan_get_proto(br, &proto);
++	if (err)
++		return skb;
++
+ 	/* Move VLAN tag from data to hwaccel */
+-	if (!skb_vlan_tag_present(skb)) {
++	if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
+ 		skb = skb_vlan_untag(skb);
+ 		if (!skb)
+ 			return NULL;
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 597e9cf5aa6444..3f2bd65ff5e3c9 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -374,8 +374,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 	msk = NULL;
+ 	rc = -EINVAL;
+ 
+-	/* we may be receiving a locally-routed packet; drop source sk
+-	 * accounting
++	/* We may be receiving a locally-routed packet; drop source sk
++	 * accounting.
++	 *
++	 * From here, we will either queue the skb - either to a frag_queue, or
++	 * to a receiving socket. When that succeeds, we clear the skb pointer;
++	 * a non-NULL skb on exit will be otherwise unowned, and hence
++	 * kfree_skb()-ed.
+ 	 */
+ 	skb_orphan(skb);
+ 
+@@ -434,7 +439,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 		 * pending key.
+ 		 */
+ 		if (flags & MCTP_HDR_FLAG_EOM) {
+-			sock_queue_rcv_skb(&msk->sk, skb);
++			rc = sock_queue_rcv_skb(&msk->sk, skb);
++			if (!rc)
++				skb = NULL;
+ 			if (key) {
+ 				/* we've hit a pending reassembly; not much we
+ 				 * can do but drop it
+@@ -443,7 +450,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 						   MCTP_TRACE_KEY_REPLIED);
+ 				key = NULL;
+ 			}
+-			rc = 0;
+ 			goto out_unlock;
+ 		}
+ 
+@@ -470,8 +476,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 			 * this function.
+ 			 */
+ 			rc = mctp_key_add(key, msk);
+-			if (!rc)
++			if (!rc) {
+ 				trace_mctp_key_acquire(key);
++				skb = NULL;
++			}
+ 
+ 			/* we don't need to release key->lock on exit, so
+ 			 * clean up here and suppress the unlock via
+@@ -489,6 +497,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 				key = NULL;
+ 			} else {
+ 				rc = mctp_frag_queue(key, skb);
++				if (!rc)
++					skb = NULL;
+ 			}
+ 		}
+ 
+@@ -503,12 +513,19 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 		else
+ 			rc = mctp_frag_queue(key, skb);
+ 
++		if (rc)
++			goto out_unlock;
++
++		/* we've queued; the queue owns the skb now */
++		skb = NULL;
++
+ 		/* end of message? deliver to socket, and we're done with
+ 		 * the reassembly/response key
+ 		 */
+-		if (!rc && flags & MCTP_HDR_FLAG_EOM) {
+-			sock_queue_rcv_skb(key->sk, key->reasm_head);
+-			key->reasm_head = NULL;
++		if (flags & MCTP_HDR_FLAG_EOM) {
++			rc = sock_queue_rcv_skb(key->sk, key->reasm_head);
++			if (!rc)
++				key->reasm_head = NULL;
+ 			__mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED);
+ 			key = NULL;
+ 		}
+@@ -527,8 +544,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 	if (any_key)
+ 		mctp_key_unref(any_key);
+ out:
+-	if (rc)
+-		kfree_skb(skb);
++	kfree_skb(skb);
+ 	return rc;
+ }
+ 
+diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
+index 8551dab1d1e698..17165b86ce22d4 100644
+--- a/net/mctp/test/route-test.c
++++ b/net/mctp/test/route-test.c
+@@ -837,6 +837,90 @@ static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
+ 	mctp_test_route_input_multiple_nets_key_fini(test, &t2);
+ }
+ 
++/* Input route to socket, using a single-packet message, where sock delivery
++ * fails. Ensure we're handling the failure appropriately.
++ */
++static void mctp_test_route_input_sk_fail_single(struct kunit *test)
++{
++	const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO);
++	struct mctp_test_route *rt;
++	struct mctp_test_dev *dev;
++	struct socket *sock;
++	struct sk_buff *skb;
++	int rc;
++
++	__mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
++
++	/* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will
++	 * clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this.
++	 */
++	lock_sock(sock->sk);
++	WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
++	release_sock(sock->sk);
++
++	skb = mctp_test_create_skb(&hdr, 10);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
++	skb_get(skb);
++
++	mctp_test_skb_set_dev(skb, dev);
++
++	/* do route input, which should fail */
++	rc = mctp_route_input(&rt->rt, skb);
++	KUNIT_EXPECT_NE(test, rc, 0);
++
++	/* we should hold the only reference to skb */
++	KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1);
++	kfree_skb(skb);
++
++	__mctp_route_test_fini(test, dev, rt, sock);
++}
++
++/* Input route to socket, using a fragmented message, where sock delivery fails.
++ */
++static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
++{
++	const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) };
++	struct mctp_test_route *rt;
++	struct mctp_test_dev *dev;
++	struct sk_buff *skbs[2];
++	struct socket *sock;
++	unsigned int i;
++	int rc;
++
++	__mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
++
++	lock_sock(sock->sk);
++	WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
++	release_sock(sock->sk);
++
++	for (i = 0; i < ARRAY_SIZE(skbs); i++) {
++		skbs[i] = mctp_test_create_skb(&hdrs[i], 10);
++		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skbs[i]);
++		skb_get(skbs[i]);
++
++		mctp_test_skb_set_dev(skbs[i], dev);
++	}
++
++	/* first route input should succeed, we're only queueing to the
++	 * frag list
++	 */
++	rc = mctp_route_input(&rt->rt, skbs[0]);
++	KUNIT_EXPECT_EQ(test, rc, 0);
++
++	/* final route input should fail to deliver to the socket */
++	rc = mctp_route_input(&rt->rt, skbs[1]);
++	KUNIT_EXPECT_NE(test, rc, 0);
++
++	/* we should hold the only reference to both skbs */
++	KUNIT_EXPECT_EQ(test, refcount_read(&skbs[0]->users), 1);
++	kfree_skb(skbs[0]);
++
++	KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1);
++	kfree_skb(skbs[1]);
++
++	__mctp_route_test_fini(test, dev, rt, sock);
++}
++
+ #if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ 
+ static void mctp_test_flow_init(struct kunit *test,
+@@ -1053,6 +1137,8 @@ static struct kunit_case mctp_test_cases[] = {
+ 			 mctp_route_input_sk_reasm_gen_params),
+ 	KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
+ 			 mctp_route_input_sk_keys_gen_params),
++	KUNIT_CASE(mctp_test_route_input_sk_fail_single),
++	KUNIT_CASE(mctp_test_route_input_sk_fail_frag),
+ 	KUNIT_CASE(mctp_test_route_input_multiple_nets_bind),
+ 	KUNIT_CASE(mctp_test_route_input_multiple_nets_key),
+ 	KUNIT_CASE(mctp_test_packet_flow),
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index bfae7066936bb9..db794fe1300e69 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -611,6 +611,8 @@ init_list_set(struct net *net, struct ip_set *set, u32 size)
+ 	return true;
+ }
+ 
++static struct lock_class_key list_set_lockdep_key;
++
+ static int
+ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ 		u32 flags)
+@@ -627,6 +629,7 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ 	if (size < IP_SET_LIST_MIN_SIZE)
+ 		size = IP_SET_LIST_MIN_SIZE;
+ 
++	lockdep_set_class(&set->lock, &list_set_lockdep_key);
+ 	set->variant = &set_variant;
+ 	set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
+ 				     __alignof__(struct set_elem));
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index 98d7dbe3d78760..c0289f83f96df8 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -1495,8 +1495,8 @@ int __init ip_vs_conn_init(void)
+ 	max_avail -= 2;		/* ~4 in hash row */
+ 	max_avail -= 1;		/* IPVS up to 1/2 of mem */
+ 	max_avail -= order_base_2(sizeof(struct ip_vs_conn));
+-	max = clamp(max, min, max_avail);
+-	ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max);
++	max = clamp(max_avail, min, max);
++	ip_vs_conn_tab_bits = clamp(ip_vs_conn_tab_bits, min, max);
+ 	ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
+ 	ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
+ 
+diff --git a/net/psample/psample.c b/net/psample/psample.c
+index a0ddae8a65f917..25f92ba0840c67 100644
+--- a/net/psample/psample.c
++++ b/net/psample/psample.c
+@@ -393,7 +393,9 @@ void psample_sample_packet(struct psample_group *group,
+ 		   nla_total_size_64bit(sizeof(u64)) +	/* timestamp */
+ 		   nla_total_size(sizeof(u16)) +	/* protocol */
+ 		   (md->user_cookie_len ?
+-		    nla_total_size(md->user_cookie_len) : 0); /* user cookie */
++		    nla_total_size(md->user_cookie_len) : 0) + /* user cookie */
++		   (md->rate_as_probability ?
++		    nla_total_size(0) : 0);	/* rate as probability */
+ 
+ #ifdef CONFIG_INET
+ 	tun_info = skb_tunnel_info(skb);
+@@ -498,8 +500,9 @@ void psample_sample_packet(struct psample_group *group,
+ 		    md->user_cookie))
+ 		goto error;
+ 
+-	if (md->rate_as_probability)
+-		nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY);
++	if (md->rate_as_probability &&
++	    nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY))
++		goto error;
+ 
+ 	genlmsg_end(nl_skb, data);
+ 	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index f2f9b75008bb05..8d8b2db4653c0c 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1525,7 +1525,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
+ 	b->backlogs[idx]    -= len;
+ 	b->tin_backlog      -= len;
+ 	sch->qstats.backlog -= len;
+-	qdisc_tree_reduce_backlog(sch, 1, len);
+ 
+ 	flow->dropped++;
+ 	b->tin_dropped++;
+@@ -1536,6 +1535,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
+ 
+ 	__qdisc_drop(skb, to_free);
+ 	sch->q.qlen--;
++	qdisc_tree_reduce_backlog(sch, 1, len);
+ 
+ 	cake_heapify(q, 0);
+ 
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index 91072010923d18..757b89292e7e6f 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -123,10 +123,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
+ 	if (idx == q->tail)
+ 		choke_zap_tail_holes(q);
+ 
++	--sch->q.qlen;
+ 	qdisc_qstats_backlog_dec(sch, skb);
+ 	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
+ 	qdisc_drop(skb, sch, to_free);
+-	--sch->q.qlen;
+ }
+ 
+ struct choke_skb_cb {
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 9e6c69d18581ce..6cc7b846cff1bb 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2032,6 +2032,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc,
+ 	if (pclc->hdr.typev1 == SMC_TYPE_N)
+ 		return 0;
+ 	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
++	if (!pclc_prfx)
++		return -EPROTO;
+ 	if (smc_clc_prfx_match(newclcsock, pclc_prfx))
+ 		return SMC_CLC_DECL_DIFFPREFIX;
+ 
+@@ -2145,6 +2147,8 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
+ 	pclc_smcd = smc_get_clc_msg_smcd(pclc);
+ 	smc_v2_ext = smc_get_clc_v2_ext(pclc);
+ 	smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
++	if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext)
++		goto not_found;
+ 
+ 	mutex_lock(&smcd_dev_list.mutex);
+ 	if (pclc_smcd->ism.chid) {
+@@ -2221,7 +2225,9 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
+ 	int rc = 0;
+ 
+ 	/* check if ISM V1 is available */
+-	if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
++	if (!(ini->smcd_version & SMC_V1) ||
++	    !smcd_indicated(ini->smc_type_v1) ||
++	    !pclc_smcd)
+ 		goto not_found;
+ 	ini->is_smcd = true; /* prepare ISM check */
+ 	ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
+@@ -2272,7 +2278,8 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
+ 		goto not_found;
+ 
+ 	smc_v2_ext = smc_get_clc_v2_ext(pclc);
+-	if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
++	if (!smc_v2_ext ||
++	    !smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
+ 		goto not_found;
+ 
+ 	/* prepare RDMA check */
+@@ -2881,6 +2888,13 @@ __poll_t smc_poll(struct file *file, struct socket *sock,
+ 			} else {
+ 				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
++
++				if (sk->sk_state != SMC_INIT) {
++					/* Race breaker the same way as tcp_poll(). */
++					smp_mb__after_atomic();
++					if (atomic_read(&smc->conn.sndbuf_space))
++						mask |= EPOLLOUT | EPOLLWRNORM;
++				}
+ 			}
+ 			if (atomic_read(&smc->conn.bytes_to_rcv))
+ 				mask |= EPOLLIN | EPOLLRDNORM;
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index 33fa787c28ebb2..521f5df80e10ca 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -352,8 +352,11 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
+ 	struct smc_clc_msg_hdr *hdr = &pclc->hdr;
+ 	struct smc_clc_v2_extension *v2_ext;
+ 
+-	v2_ext = smc_get_clc_v2_ext(pclc);
+ 	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
++	if (!pclc_prfx ||
++	    pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX)
++		return false;
++
+ 	if (hdr->version == SMC_V1) {
+ 		if (hdr->typev1 == SMC_TYPE_N)
+ 			return false;
+@@ -365,6 +368,13 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
+ 			sizeof(struct smc_clc_msg_trail))
+ 			return false;
+ 	} else {
++		v2_ext = smc_get_clc_v2_ext(pclc);
++		if ((hdr->typev2 != SMC_TYPE_N &&
++		     (!v2_ext || v2_ext->hdr.eid_cnt > SMC_CLC_MAX_UEID)) ||
++		    (smcd_indicated(hdr->typev2) &&
++		     v2_ext->hdr.ism_gid_cnt > SMCD_CLC_MAX_V2_GID_ENTRIES))
++			return false;
++
+ 		if (ntohs(hdr->length) !=
+ 			sizeof(*pclc) +
+ 			sizeof(struct smc_clc_msg_smcd) +
+@@ -764,6 +774,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
+ 						SMC_CLC_RECV_BUF_LEN : datlen;
+ 		iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
+ 		len = sock_recvmsg(smc->clcsock, &msg, krflags);
++		if (len < recvlen) {
++			smc->sk.sk_err = EPROTO;
++			reason_code = -EPROTO;
++			goto out;
++		}
+ 		datlen -= len;
+ 	}
+ 	if (clcm->type == SMC_CLC_DECLINE) {
+diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
+index 5625fda2960b03..1a7676227f16c5 100644
+--- a/net/smc/smc_clc.h
++++ b/net/smc/smc_clc.h
+@@ -336,8 +336,12 @@ struct smc_clc_msg_decline_v2 {	/* clc decline message */
+ static inline struct smc_clc_msg_proposal_prefix *
+ smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
+ {
++	u16 offset = ntohs(pclc->iparea_offset);
++
++	if (offset > sizeof(struct smc_clc_msg_smcd))
++		return NULL;
+ 	return (struct smc_clc_msg_proposal_prefix *)
+-	       ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
++	       ((u8 *)pclc + sizeof(*pclc) + offset);
+ }
+ 
+ static inline bool smcr_indicated(int smc_type)
+@@ -376,8 +380,14 @@ static inline struct smc_clc_v2_extension *
+ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
+ {
+ 	struct smc_clc_msg_smcd *prop_smcd = smc_get_clc_msg_smcd(prop);
++	u16 max_offset;
+ 
+-	if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset))
++	max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
++		     offsetof(struct smc_clc_msg_proposal_area, pclc_smcd) -
++		     offsetofend(struct smc_clc_msg_smcd, v2_ext_offset);
++
++	if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset) ||
++	    ntohs(prop_smcd->v2_ext_offset) > max_offset)
+ 		return NULL;
+ 
+ 	return (struct smc_clc_v2_extension *)
+@@ -390,9 +400,15 @@ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
+ static inline struct smc_clc_smcd_v2_extension *
+ smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext)
+ {
++	u16 max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_smcd_v2_ext) -
++		offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
++		offsetof(struct smc_clc_v2_extension, hdr) -
++		offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset);
++
+ 	if (!prop_v2ext)
+ 		return NULL;
+-	if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset))
++	if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) ||
++	    ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) > max_offset)
+ 		return NULL;
+ 
+ 	return (struct smc_clc_smcd_v2_extension *)
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 4e694860ece4ac..68515a41d776c4 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1818,7 +1818,9 @@ void smcr_link_down_cond_sched(struct smc_link *lnk)
+ {
+ 	if (smc_link_downing(&lnk->state)) {
+ 		trace_smcr_link_down(lnk, __builtin_return_address(0));
+-		schedule_work(&lnk->link_down_wrk);
++		smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */
++		if (!schedule_work(&lnk->link_down_wrk))
++			smcr_link_put(lnk);
+ 	}
+ }
+ 
+@@ -1850,11 +1852,14 @@ static void smc_link_down_work(struct work_struct *work)
+ 	struct smc_link_group *lgr = link->lgr;
+ 
+ 	if (list_empty(&lgr->list))
+-		return;
++		goto out;
+ 	wake_up_all(&lgr->llc_msg_waiter);
+ 	down_write(&lgr->llc_conf_mutex);
+ 	smcr_link_down(link);
+ 	up_write(&lgr->llc_conf_mutex);
++
++out:
++	smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */
+ }
+ 
+ static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
+diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
+index e283751abfefe8..678540b7828059 100644
+--- a/sound/soc/fsl/Kconfig
++++ b/sound/soc/fsl/Kconfig
+@@ -29,6 +29,7 @@ config SND_SOC_FSL_SAI
+ config SND_SOC_FSL_MQS
+ 	tristate "Medium Quality Sound (MQS) module support"
+ 	depends on SND_SOC_FSL_SAI
++	depends on IMX_SCMI_MISC_DRV || !IMX_SCMI_MISC_DRV
+ 	select REGMAP_MMIO
+ 	help
+ 	  Say Y if you want to add Medium Quality Sound (MQS)
+diff --git a/tools/hv/hv_fcopy_uio_daemon.c b/tools/hv/hv_fcopy_uio_daemon.c
+index 7a00f3066a9807..12743d7f164f0d 100644
+--- a/tools/hv/hv_fcopy_uio_daemon.c
++++ b/tools/hv/hv_fcopy_uio_daemon.c
+@@ -35,8 +35,6 @@
+ #define WIN8_SRV_MINOR		1
+ #define WIN8_SRV_VERSION	(WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+ 
+-#define MAX_FOLDER_NAME		15
+-#define MAX_PATH_LEN		15
+ #define FCOPY_UIO		"/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/uio"
+ 
+ #define FCOPY_VER_COUNT		1
+@@ -51,7 +49,7 @@ static const int fw_versions[] = {
+ 
+ #define HV_RING_SIZE		0x4000 /* 16KB ring buffer size */
+ 
+-unsigned char desc[HV_RING_SIZE];
++static unsigned char desc[HV_RING_SIZE];
+ 
+ static int target_fd;
+ static char target_fname[PATH_MAX];
+@@ -409,8 +407,8 @@ int main(int argc, char *argv[])
+ 	struct vmbus_br txbr, rxbr;
+ 	void *ring;
+ 	uint32_t len = HV_RING_SIZE;
+-	char uio_name[MAX_FOLDER_NAME] = {0};
+-	char uio_dev_path[MAX_PATH_LEN] = {0};
++	char uio_name[NAME_MAX] = {0};
++	char uio_dev_path[PATH_MAX] = {0};
+ 
+ 	static struct option long_options[] = {
+ 		{"help",	no_argument,	   0,  'h' },
+diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
+index 440a91b35823bf..2f8baed2b8f796 100755
+--- a/tools/hv/hv_set_ifconfig.sh
++++ b/tools/hv/hv_set_ifconfig.sh
+@@ -81,7 +81,7 @@ echo "ONBOOT=yes" >> $1
+ 
+ cp $1 /etc/sysconfig/network-scripts/
+ 
+-chmod 600 $2
++umask 0177
+ interface=$(echo $2 | awk -F - '{ print $2 }')
+ filename="${2##*/}"
+ 
+diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
+index c22c22bf2cb7d1..a3f741fed0a343 100644
+--- a/tools/net/ynl/lib/ynl.py
++++ b/tools/net/ynl/lib/ynl.py
+@@ -553,10 +553,10 @@ class YnlFamily(SpecFamily):
+         if attr["type"] == 'nest':
+             nl_type |= Netlink.NLA_F_NESTED
+             attr_payload = b''
+-            sub_attrs = SpaceAttrs(self.attr_sets[space], value, search_attrs)
++            sub_space = attr['nested-attributes']
++            sub_attrs = SpaceAttrs(self.attr_sets[sub_space], value, search_attrs)
+             for subname, subvalue in value.items():
+-                attr_payload += self._add_attr(attr['nested-attributes'],
+-                                               subname, subvalue, sub_attrs)
++                attr_payload += self._add_attr(sub_space, subname, subvalue, sub_attrs)
+         elif attr["type"] == 'flag':
+             if not value:
+                 # If value is absent or false then skip attribute creation.
+diff --git a/tools/testing/selftests/bpf/sdt.h b/tools/testing/selftests/bpf/sdt.h
+index ca0162b4dc5752..1fcfa5160231de 100644
+--- a/tools/testing/selftests/bpf/sdt.h
++++ b/tools/testing/selftests/bpf/sdt.h
+@@ -102,6 +102,8 @@
+ # define STAP_SDT_ARG_CONSTRAINT        nZr
+ # elif defined __arm__
+ # define STAP_SDT_ARG_CONSTRAINT        g
++# elif defined __loongarch__
++# define STAP_SDT_ARG_CONSTRAINT        nmr
+ # else
+ # define STAP_SDT_ARG_CONSTRAINT        nor
+ # endif
+diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
+index 95af2d78fd318c..0a0b5551602808 100644
+--- a/tools/testing/selftests/memfd/memfd_test.c
++++ b/tools/testing/selftests/memfd/memfd_test.c
+@@ -9,6 +9,7 @@
+ #include <fcntl.h>
+ #include <linux/memfd.h>
+ #include <sched.h>
++#include <stdbool.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <signal.h>
+@@ -1557,6 +1558,11 @@ static void test_share_fork(char *banner, char *b_suffix)
+ 	close(fd);
+ }
+ 
++static bool pid_ns_supported(void)
++{
++	return access("/proc/self/ns/pid", F_OK) == 0;
++}
++
+ int main(int argc, char **argv)
+ {
+ 	pid_t pid;
+@@ -1591,8 +1597,12 @@ int main(int argc, char **argv)
+ 	test_seal_grow();
+ 	test_seal_resize();
+ 
+-	test_sysctl_simple();
+-	test_sysctl_nested();
++	if (pid_ns_supported()) {
++		test_sysctl_simple();
++		test_sysctl_nested();
++	} else {
++		printf("PID namespaces are not supported; skipping sysctl tests\n");
++	}
+ 
+ 	test_share_dup("SHARE-DUP", "");
+ 	test_share_mmap("SHARE-MMAP", "");
+diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+index cc0bfae2bafa1b..960e1ab4dd04b1 100755
+--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
++++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+@@ -171,8 +171,10 @@ ovs_add_netns_and_veths () {
+ 		ovs_add_if "$1" "$2" "$4" -u || return 1
+ 	fi
+ 
+-	[ $TRACING -eq 1 ] && ovs_netns_spawn_daemon "$1" "$ns" \
+-			tcpdump -i any -s 65535
++	if [ $TRACING -eq 1 ]; then
++		ovs_netns_spawn_daemon "$1" "$3" tcpdump -l -i any -s 6553
++		ovs_wait grep -q "listening on any" ${ovs_dir}/stderr
++	fi
+ 
+ 	return 0
+ }


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-19 18:07 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-19 18:07 UTC (permalink / raw
  To: gentoo-commits

commit:     1d0712601fc0cfb16d2abc9bf8d0e34c43b6afe4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Dec 19 18:07:10 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Dec 19 18:07:10 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1d071260

Linuxpatch 6.12.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1005_linux-6.12.6.patch | 8402 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8406 insertions(+)

diff --git a/0000_README b/0000_README
index a2c9782d..1bb8df77 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-6.12.5.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.5
 
+Patch:  1005_linux-6.12.6.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.6
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1005_linux-6.12.6.patch b/1005_linux-6.12.6.patch
new file mode 100644
index 00000000..e9bbd96e
--- /dev/null
+++ b/1005_linux-6.12.6.patch
@@ -0,0 +1,8402 @@
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index eacf8983e23074..dcbb6f6caf6de3 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -2170,6 +2170,12 @@ nexthop_compat_mode - BOOLEAN
+ 	understands the new API, this sysctl can be disabled to achieve full
+ 	performance benefits of the new API by disabling the nexthop expansion
+ 	and extraneous notifications.
++
++	Note that as a backward-compatible mode, dumping of modern features
++	might be incomplete or wrong. For example, resilient groups will not be
++	shown as such, but rather as just a list of next hops. Also weights that
++	do not fit into 8 bits will show incorrectly.
++
+ 	Default: true (backward compat mode)
+ 
+ fib_notify_on_flag_change - INTEGER
+diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
+index 53d1996460abfc..12f429359a823e 100644
+--- a/Documentation/power/runtime_pm.rst
++++ b/Documentation/power/runtime_pm.rst
+@@ -347,7 +347,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+ 
+   `int pm_runtime_resume_and_get(struct device *dev);`
+     - run pm_runtime_resume(dev) and if successful, increment the device's
+-      usage counter; return the result of pm_runtime_resume
++      usage counter; returns 0 on success (whether or not the device's
++      runtime PM status was already 'active') or the error code from
++      pm_runtime_resume() on failure.
+ 
+   `int pm_request_idle(struct device *dev);`
+     - submit a request to execute the subsystem-level idle callback for the
+diff --git a/Makefile b/Makefile
+index f158bfe6407ac9..c10952585c14b0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index ff8c4e1b847ed4..fbed433283c9b9 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1535,6 +1535,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
+ 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
+ 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2);
+ 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
++		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
+ 		break;
+ 	case SYS_ID_AA64PFR2_EL1:
+ 		/* We only expose FPMR */
+@@ -1724,6 +1725,13 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
+ 
+ 	val &= ~ID_AA64PFR0_EL1_AMU_MASK;
+ 
++	/*
++	 * MPAM is disabled by default as KVM also needs a set of PARTID to
++	 * program the MPAMVPMx_EL2 PARTID remapping registers with. But some
++	 * older kernels let the guest see the ID bit.
++	 */
++	val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
++
+ 	return val;
+ }
+ 
+@@ -1834,6 +1842,42 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
+ 	return set_id_reg(vcpu, rd, val);
+ }
+ 
++static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
++			       const struct sys_reg_desc *rd, u64 user_val)
++{
++	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++	u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
++
++	/*
++	 * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
++	 * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
++	 * guests, but didn't add trap handling. KVM doesn't support MPAM and
++	 * always returns an UNDEF for these registers. The guest must see 0
++	 * for this field.
++	 *
++	 * But KVM must also accept values from user-space that were provided
++	 * by KVM. On CPUs that support MPAM, permit user-space to write
++	 * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
++	 */
++	if ((hw_val & mpam_mask) == (user_val & mpam_mask))
++		user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
++
++	return set_id_reg(vcpu, rd, user_val);
++}
++
++static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
++			       const struct sys_reg_desc *rd, u64 user_val)
++{
++	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
++	u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
++
++	/* See set_id_aa64pfr0_el1 for comment about MPAM */
++	if ((hw_val & mpam_mask) == (user_val & mpam_mask))
++		user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
++
++	return set_id_reg(vcpu, rd, user_val);
++}
++
+ /*
+  * cpufeature ID register user accessors
+  *
+@@ -2377,7 +2421,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 	{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
+ 	  .access = access_id_reg,
+ 	  .get_user = get_id_reg,
+-	  .set_user = set_id_reg,
++	  .set_user = set_id_aa64pfr0_el1,
+ 	  .reset = read_sanitised_id_aa64pfr0_el1,
+ 	  .val = ~(ID_AA64PFR0_EL1_AMU |
+ 		   ID_AA64PFR0_EL1_MPAM |
+@@ -2385,7 +2429,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 		   ID_AA64PFR0_EL1_RAS |
+ 		   ID_AA64PFR0_EL1_AdvSIMD |
+ 		   ID_AA64PFR0_EL1_FP), },
+-	ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR |
++	{ SYS_DESC(SYS_ID_AA64PFR1_EL1),
++	  .access	= access_id_reg,
++	  .get_user	= get_id_reg,
++	  .set_user	= set_id_aa64pfr1_el1,
++	  .reset	= kvm_read_sanitised_id_reg,
++	  .val		=	     ~(ID_AA64PFR1_EL1_PFAR |
+ 				       ID_AA64PFR1_EL1_DF2 |
+ 				       ID_AA64PFR1_EL1_MTEX |
+ 				       ID_AA64PFR1_EL1_THE |
+@@ -2397,7 +2446,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 				       ID_AA64PFR1_EL1_RES0 |
+ 				       ID_AA64PFR1_EL1_MPAM_frac |
+ 				       ID_AA64PFR1_EL1_RAS_frac |
+-				       ID_AA64PFR1_EL1_MTE)),
++				       ID_AA64PFR1_EL1_MTE), },
+ 	ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
+ 	ID_UNALLOCATED(4,3),
+ 	ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
+diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
+index 7388edd88986f9..d08bf7fb3aee61 100644
+--- a/arch/riscv/include/asm/kfence.h
++++ b/arch/riscv/include/asm/kfence.h
+@@ -22,7 +22,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
+ 	else
+ 		set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
+ 
+-	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
++	preempt_disable();
++	local_flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
++	preempt_enable();
+ 
+ 	return true;
+ }
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index 26c886db4fb3d1..2b3c152d3c91f5 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -227,7 +227,7 @@ static void __init init_resources(void)
+ static void __init parse_dtb(void)
+ {
+ 	/* Early scan of device tree from init memory */
+-	if (early_init_dt_scan(dtb_early_va, __pa(dtb_early_va))) {
++	if (early_init_dt_scan(dtb_early_va, dtb_early_pa)) {
+ 		const char *name = of_flat_dt_get_machine_name();
+ 
+ 		if (name) {
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 0e8c20adcd98df..fc53ce748c8049 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -1566,7 +1566,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
+ 	pmd_clear(pmd);
+ }
+ 
+-static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
++static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemmap)
+ {
+ 	struct page *page = pud_page(*pud);
+ 	struct ptdesc *ptdesc = page_ptdesc(page);
+@@ -1579,7 +1579,8 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
+ 			return;
+ 	}
+ 
+-	pagetable_pmd_dtor(ptdesc);
++	if (!is_vmemmap)
++		pagetable_pmd_dtor(ptdesc);
+ 	if (PageReserved(page))
+ 		free_reserved_page(page);
+ 	else
+@@ -1703,7 +1704,7 @@ static void __meminit remove_pud_mapping(pud_t *pud_base, unsigned long addr, un
+ 		remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);
+ 
+ 		if (pgtable_l4_enabled)
+-			free_pmd_table(pmd_base, pudp);
++			free_pmd_table(pmd_base, pudp, is_vmemmap);
+ 	}
+ }
+ 
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index fa5ea65de0d0fa..6188650707ab27 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1468,7 +1468,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
+ 			 * hence we need to drain when changing said
+ 			 * size.
+ 			 */
+-			intel_pmu_drain_large_pebs(cpuc);
++			intel_pmu_drain_pebs_buffer();
+ 			adaptive_pebs_record_size_update();
+ 			wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
+ 			cpuc->active_pebs_data_cfg = pebs_data_cfg;
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 4a686f0e5dbf6d..2d776635aa539e 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -212,6 +212,8 @@ static inline unsigned long long l1tf_pfn_limit(void)
+ 	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
+ }
+ 
++void init_cpu_devs(void);
++void get_cpu_vendor(struct cpuinfo_x86 *c);
+ extern void early_cpu_init(void);
+ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+ extern void print_cpu_info(struct cpuinfo_x86 *);
+diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
+index 125c407e2abe6d..41502bd2afd646 100644
+--- a/arch/x86/include/asm/static_call.h
++++ b/arch/x86/include/asm/static_call.h
+@@ -65,4 +65,19 @@
+ 
+ extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
+ 
++extern void __static_call_update_early(void *tramp, void *func);
++
++#define static_call_update_early(name, _func)				\
++({									\
++	typeof(&STATIC_CALL_TRAMP(name)) __F = (_func);			\
++	if (static_call_initialized) {					\
++		__static_call_update(&STATIC_CALL_KEY(name),		\
++				     STATIC_CALL_TRAMP_ADDR(name), __F);\
++	} else {							\
++		WRITE_ONCE(STATIC_CALL_KEY(name).func, _func);		\
++		__static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
++					   __F);			\
++	}								\
++})
++
+ #endif /* _ASM_STATIC_CALL_H */
+diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
+index ab7382f92aff27..96bda43538ee70 100644
+--- a/arch/x86/include/asm/sync_core.h
++++ b/arch/x86/include/asm/sync_core.h
+@@ -8,7 +8,7 @@
+ #include <asm/special_insns.h>
+ 
+ #ifdef CONFIG_X86_32
+-static inline void iret_to_self(void)
++static __always_inline void iret_to_self(void)
+ {
+ 	asm volatile (
+ 		"pushfl\n\t"
+@@ -19,7 +19,7 @@ static inline void iret_to_self(void)
+ 		: ASM_CALL_CONSTRAINT : : "memory");
+ }
+ #else
+-static inline void iret_to_self(void)
++static __always_inline void iret_to_self(void)
+ {
+ 	unsigned int tmp;
+ 
+@@ -55,7 +55,7 @@ static inline void iret_to_self(void)
+  * Like all of Linux's memory ordering operations, this is a
+  * compiler barrier as well.
+  */
+-static inline void sync_core(void)
++static __always_inline void sync_core(void)
+ {
+ 	/*
+ 	 * The SERIALIZE instruction is the most straightforward way to
+diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
+index a2dd24947eb85a..97771b9d33af30 100644
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -39,9 +39,11 @@
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/pgtable.h>
++#include <linux/instrumentation.h>
+ 
+ #include <trace/events/xen.h>
+ 
++#include <asm/alternative.h>
+ #include <asm/page.h>
+ #include <asm/smap.h>
+ #include <asm/nospec-branch.h>
+@@ -86,11 +88,20 @@ struct xen_dm_op_buf;
+  * there aren't more than 5 arguments...)
+  */
+ 
+-extern struct { char _entry[32]; } hypercall_page[];
++void xen_hypercall_func(void);
++DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
+ 
+-#define __HYPERCALL		"call hypercall_page+%c[offset]"
+-#define __HYPERCALL_ENTRY(x)						\
+-	[offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
++#ifdef MODULE
++#define __ADDRESSABLE_xen_hypercall
++#else
++#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
++#endif
++
++#define __HYPERCALL					\
++	__ADDRESSABLE_xen_hypercall			\
++	"call __SCT__xen_hypercall"
++
++#define __HYPERCALL_ENTRY(x)	"a" (x)
+ 
+ #ifdef CONFIG_X86_32
+ #define __HYPERCALL_RETREG	"eax"
+@@ -148,7 +159,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ 	__HYPERCALL_0ARG();						\
+ 	asm volatile (__HYPERCALL					\
+ 		      : __HYPERCALL_0PARAM				\
+-		      : __HYPERCALL_ENTRY(name)				\
++		      : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)	\
+ 		      : __HYPERCALL_CLOBBER0);				\
+ 	(type)__res;							\
+ })
+@@ -159,7 +170,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ 	__HYPERCALL_1ARG(a1);						\
+ 	asm volatile (__HYPERCALL					\
+ 		      : __HYPERCALL_1PARAM				\
+-		      : __HYPERCALL_ENTRY(name)				\
++		      : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)	\
+ 		      : __HYPERCALL_CLOBBER1);				\
+ 	(type)__res;							\
+ })
+@@ -170,7 +181,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ 	__HYPERCALL_2ARG(a1, a2);					\
+ 	asm volatile (__HYPERCALL					\
+ 		      : __HYPERCALL_2PARAM				\
+-		      : __HYPERCALL_ENTRY(name)				\
++		      : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)	\
+ 		      : __HYPERCALL_CLOBBER2);				\
+ 	(type)__res;							\
+ })
+@@ -181,7 +192,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ 	__HYPERCALL_3ARG(a1, a2, a3);					\
+ 	asm volatile (__HYPERCALL					\
+ 		      : __HYPERCALL_3PARAM				\
+-		      : __HYPERCALL_ENTRY(name)				\
++		      : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)	\
+ 		      : __HYPERCALL_CLOBBER3);				\
+ 	(type)__res;							\
+ })
+@@ -192,7 +203,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ 	__HYPERCALL_4ARG(a1, a2, a3, a4);				\
+ 	asm volatile (__HYPERCALL					\
+ 		      : __HYPERCALL_4PARAM				\
+-		      : __HYPERCALL_ENTRY(name)				\
++		      : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)	\
+ 		      : __HYPERCALL_CLOBBER4);				\
+ 	(type)__res;							\
+ })
+@@ -206,12 +217,9 @@ xen_single_call(unsigned int call,
+ 	__HYPERCALL_DECLS;
+ 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ 
+-	if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+-		return -EINVAL;
+-
+-	asm volatile(CALL_NOSPEC
++	asm volatile(__HYPERCALL
+ 		     : __HYPERCALL_5PARAM
+-		     : [thunk_target] "a" (&hypercall_page[call])
++		     : __HYPERCALL_ENTRY(call)
+ 		     : __HYPERCALL_CLOBBER5);
+ 
+ 	return (long)__res;
+diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
+index 4656474567533b..f17d166078823c 100644
+--- a/arch/x86/kernel/callthunks.c
++++ b/arch/x86/kernel/callthunks.c
+@@ -142,11 +142,6 @@ static bool skip_addr(void *dest)
+ 	if (dest >= (void *)relocate_kernel &&
+ 	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
+ 		return true;
+-#endif
+-#ifdef CONFIG_XEN
+-	if (dest >= (void *)hypercall_page &&
+-	    dest < (void*)hypercall_page + PAGE_SIZE)
+-		return true;
+ #endif
+ 	return false;
+ }
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index b17bcf9b67eed4..f439763f45ae6f 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -868,7 +868,7 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
+ 		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
+ }
+ 
+-static void get_cpu_vendor(struct cpuinfo_x86 *c)
++void get_cpu_vendor(struct cpuinfo_x86 *c)
+ {
+ 	char *v = c->x86_vendor_id;
+ 	int i;
+@@ -1652,15 +1652,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ 	detect_nopl();
+ }
+ 
+-void __init early_cpu_init(void)
++void __init init_cpu_devs(void)
+ {
+ 	const struct cpu_dev *const *cdev;
+ 	int count = 0;
+ 
+-#ifdef CONFIG_PROCESSOR_SELECT
+-	pr_info("KERNEL supported cpus:\n");
+-#endif
+-
+ 	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
+ 		const struct cpu_dev *cpudev = *cdev;
+ 
+@@ -1668,20 +1664,30 @@ void __init early_cpu_init(void)
+ 			break;
+ 		cpu_devs[count] = cpudev;
+ 		count++;
++	}
++}
+ 
++void __init early_cpu_init(void)
++{
+ #ifdef CONFIG_PROCESSOR_SELECT
+-		{
+-			unsigned int j;
+-
+-			for (j = 0; j < 2; j++) {
+-				if (!cpudev->c_ident[j])
+-					continue;
+-				pr_info("  %s %s\n", cpudev->c_vendor,
+-					cpudev->c_ident[j]);
+-			}
+-		}
++	unsigned int i, j;
++
++	pr_info("KERNEL supported cpus:\n");
+ #endif
++
++	init_cpu_devs();
++
++#ifdef CONFIG_PROCESSOR_SELECT
++	for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) {
++		for (j = 0; j < 2; j++) {
++			if (!cpu_devs[i]->c_ident[j])
++				continue;
++			pr_info("  %s %s\n", cpu_devs[i]->c_vendor,
++				cpu_devs[i]->c_ident[j]);
++		}
+ 	}
++#endif
++
+ 	early_identify_cpu(&boot_cpu_data);
+ }
+ 
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index 4eefaac64c6cba..9eed0c144dad51 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -172,6 +172,15 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
+ }
+ EXPORT_SYMBOL_GPL(arch_static_call_transform);
+ 
++noinstr void __static_call_update_early(void *tramp, void *func)
++{
++	BUG_ON(system_state != SYSTEM_BOOTING);
++	BUG_ON(!early_boot_irqs_disabled);
++	BUG_ON(static_call_initialized);
++	__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
++	sync_core();
++}
++
+ #ifdef CONFIG_MITIGATION_RETHUNK
+ /*
+  * This is called by apply_returns() to fix up static call trampolines,
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 84e5adbd0925cb..b4f3784f27e956 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -2,6 +2,7 @@
+ 
+ #include <linux/console.h>
+ #include <linux/cpu.h>
++#include <linux/instrumentation.h>
+ #include <linux/kexec.h>
+ #include <linux/memblock.h>
+ #include <linux/slab.h>
+@@ -21,7 +22,8 @@
+ 
+ #include "xen-ops.h"
+ 
+-EXPORT_SYMBOL_GPL(hypercall_page);
++DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm);
++EXPORT_STATIC_CALL_TRAMP(xen_hypercall);
+ 
+ /*
+  * Pointer to the xen_vcpu_info structure or
+@@ -68,6 +70,66 @@ EXPORT_SYMBOL(xen_start_flags);
+  */
+ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
+ 
++static __ref void xen_get_vendor(void)
++{
++	init_cpu_devs();
++	cpu_detect(&boot_cpu_data);
++	get_cpu_vendor(&boot_cpu_data);
++}
++
++void xen_hypercall_setfunc(void)
++{
++	if (static_call_query(xen_hypercall) != xen_hypercall_hvm)
++		return;
++
++	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
++	     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
++		static_call_update(xen_hypercall, xen_hypercall_amd);
++	else
++		static_call_update(xen_hypercall, xen_hypercall_intel);
++}
++
++/*
++ * Evaluate processor vendor in order to select the correct hypercall
++ * function for HVM/PVH guests.
++ * Might be called very early in boot before vendor has been set by
++ * early_cpu_init().
++ */
++noinstr void *__xen_hypercall_setfunc(void)
++{
++	void (*func)(void);
++
++	/*
++	 * Xen is supported only on CPUs with CPUID, so testing for
++	 * X86_FEATURE_CPUID is a test for early_cpu_init() having been
++	 * run.
++	 *
++	 * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
++	 * dependency chain: it is being called via the xen_hypercall static
++	 * call when running as a PVH or HVM guest. Hypercalls need to be
++	 * noinstr due to PV guests using hypercalls in noinstr code. So we
++	 * the PV guest requirement is not of interest here (xen_get_vendor()
++	 * calls noinstr functions, and static_call_update_early() might do
++	 * so, too).
++	 */
++	instrumentation_begin();
++
++	if (!boot_cpu_has(X86_FEATURE_CPUID))
++		xen_get_vendor();
++
++	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
++	     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
++		func = xen_hypercall_amd;
++	else
++		func = xen_hypercall_intel;
++
++	static_call_update_early(xen_hypercall, func);
++
++	instrumentation_end();
++
++	return func;
++}
++
+ static int xen_cpu_up_online(unsigned int cpu)
+ {
+ 	xen_init_lock_cpu(cpu);
+diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
+index 24d2957a4726d8..fe57ff85d004ba 100644
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -106,15 +106,8 @@ static void __init init_hvm_pv_info(void)
+ 	/* PVH set up hypercall page in xen_prepare_pvh(). */
+ 	if (xen_pvh_domain())
+ 		pv_info.name = "Xen PVH";
+-	else {
+-		u64 pfn;
+-		uint32_t msr;
+-
++	else
+ 		pv_info.name = "Xen HVM";
+-		msr = cpuid_ebx(base + 2);
+-		pfn = __pa(hypercall_page);
+-		wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+-	}
+ 
+ 	xen_setup_features();
+ 
+@@ -300,6 +293,10 @@ static uint32_t __init xen_platform_hvm(void)
+ 	if (xen_pv_domain())
+ 		return 0;
+ 
++	/* Set correct hypercall function. */
++	if (xen_domain)
++		xen_hypercall_setfunc();
++
+ 	if (xen_pvh_domain() && nopv) {
+ 		/* Guest booting via the Xen-PVH boot entry goes here */
+ 		pr_info("\"nopv\" parameter is ignored in PVH guest\n");
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index d6818c6cafda16..a8eb7e0c473cf6 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1341,6 +1341,9 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
+ 
+ 	xen_domain_type = XEN_PV_DOMAIN;
+ 	xen_start_flags = xen_start_info->flags;
++	/* Interrupts are guaranteed to be off initially. */
++	early_boot_irqs_disabled = true;
++	static_call_update_early(xen_hypercall, xen_hypercall_pv);
+ 
+ 	xen_setup_features();
+ 
+@@ -1431,7 +1434,6 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
+ 	WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
+ 
+ 	local_irq_disable();
+-	early_boot_irqs_disabled = true;
+ 
+ 	xen_raw_console_write("mapping kernel into physical memory\n");
+ 	xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
+diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
+index bf68c329fc013e..0e3d930bcb89e8 100644
+--- a/arch/x86/xen/enlighten_pvh.c
++++ b/arch/x86/xen/enlighten_pvh.c
+@@ -129,17 +129,10 @@ static void __init pvh_arch_setup(void)
+ 
+ void __init xen_pvh_init(struct boot_params *boot_params)
+ {
+-	u32 msr;
+-	u64 pfn;
+-
+ 	xen_pvh = 1;
+ 	xen_domain_type = XEN_HVM_DOMAIN;
+ 	xen_start_flags = pvh_start_info.flags;
+ 
+-	msr = cpuid_ebx(xen_cpuid_base() + 2);
+-	pfn = __pa(hypercall_page);
+-	wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+-
+ 	x86_init.oem.arch_setup = pvh_arch_setup;
+ 	x86_init.oem.banner = xen_banner;
+ 
+diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
+index 83189cf5cdce93..b518f36d1ca2e7 100644
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -20,9 +20,32 @@
+ 
+ #include <linux/init.h>
+ #include <linux/linkage.h>
++#include <linux/objtool.h>
+ #include <../entry/calling.h>
+ 
+ .pushsection .noinstr.text, "ax"
++/*
++ * PV hypercall interface to the hypervisor.
++ *
++ * Called via inline asm(), so better preserve %rcx and %r11.
++ *
++ * Input:
++ *	%eax: hypercall number
++ *	%rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
++ * Output: %rax
++ */
++SYM_FUNC_START(xen_hypercall_pv)
++	ANNOTATE_NOENDBR
++	push %rcx
++	push %r11
++	UNWIND_HINT_SAVE
++	syscall
++	UNWIND_HINT_RESTORE
++	pop %r11
++	pop %rcx
++	RET
++SYM_FUNC_END(xen_hypercall_pv)
++
+ /*
+  * Disabling events is simply a matter of making the event mask
+  * non-zero.
+@@ -176,7 +199,6 @@ SYM_CODE_START(xen_early_idt_handler_array)
+ SYM_CODE_END(xen_early_idt_handler_array)
+ 	__FINIT
+ 
+-hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
+ /*
+  * Xen64 iret frame:
+  *
+@@ -186,17 +208,28 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
+  *	cs
+  *	rip		<-- standard iret frame
+  *
+- *	flags
++ *	flags		<-- xen_iret must push from here on
+  *
+- *	rcx		}
+- *	r11		}<-- pushed by hypercall page
+- * rsp->rax		}
++ *	rcx
++ *	r11
++ * rsp->rax
+  */
++.macro xen_hypercall_iret
++	pushq $0	/* Flags */
++	push %rcx
++	push %r11
++	push %rax
++	mov  $__HYPERVISOR_iret, %eax
++	syscall		/* Do the IRET. */
++#ifdef CONFIG_MITIGATION_SLS
++	int3
++#endif
++.endm
++
+ SYM_CODE_START(xen_iret)
+ 	UNWIND_HINT_UNDEFINED
+ 	ANNOTATE_NOENDBR
+-	pushq $0
+-	jmp hypercall_iret
++	xen_hypercall_iret
+ SYM_CODE_END(xen_iret)
+ 
+ /*
+@@ -301,8 +334,7 @@ SYM_CODE_START(xen_entry_SYSENTER_compat)
+ 	ENDBR
+ 	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
+ 	mov $-ENOSYS, %rax
+-	pushq $0
+-	jmp hypercall_iret
++	xen_hypercall_iret
+ SYM_CODE_END(xen_entry_SYSENTER_compat)
+ SYM_CODE_END(xen_entry_SYSCALL_compat)
+ 
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index 758bcd47b72d32..721a57700a3b05 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -6,9 +6,11 @@
+ 
+ #include <linux/elfnote.h>
+ #include <linux/init.h>
++#include <linux/instrumentation.h>
+ 
+ #include <asm/boot.h>
+ #include <asm/asm.h>
++#include <asm/frame.h>
+ #include <asm/msr.h>
+ #include <asm/page_types.h>
+ #include <asm/percpu.h>
+@@ -20,28 +22,6 @@
+ #include <xen/interface/xen-mca.h>
+ #include <asm/xen/interface.h>
+ 
+-.pushsection .noinstr.text, "ax"
+-	.balign PAGE_SIZE
+-SYM_CODE_START(hypercall_page)
+-	.rept (PAGE_SIZE / 32)
+-		UNWIND_HINT_FUNC
+-		ANNOTATE_NOENDBR
+-		ANNOTATE_UNRET_SAFE
+-		ret
+-		/*
+-		 * Xen will write the hypercall page, and sort out ENDBR.
+-		 */
+-		.skip 31, 0xcc
+-	.endr
+-
+-#define HYPERCALL(n) \
+-	.equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
+-	.type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
+-#include <asm/xen-hypercalls.h>
+-#undef HYPERCALL
+-SYM_CODE_END(hypercall_page)
+-.popsection
+-
+ #ifdef CONFIG_XEN_PV
+ 	__INIT
+ SYM_CODE_START(startup_xen)
+@@ -87,6 +67,87 @@ SYM_CODE_END(xen_cpu_bringup_again)
+ #endif
+ #endif
+ 
++	.pushsection .noinstr.text, "ax"
++/*
++ * Xen hypercall interface to the hypervisor.
++ *
++ * Input:
++ *     %eax: hypercall number
++ *   32-bit:
++ *     %ebx, %ecx, %edx, %esi, %edi: args 1..5 for the hypercall
++ *   64-bit:
++ *     %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
++ * Output: %[er]ax
++ */
++SYM_FUNC_START(xen_hypercall_hvm)
++	ENDBR
++	FRAME_BEGIN
++	/* Save all relevant registers (caller save and arguments). */
++#ifdef CONFIG_X86_32
++	push %eax
++	push %ebx
++	push %ecx
++	push %edx
++	push %esi
++	push %edi
++#else
++	push %rax
++	push %rcx
++	push %rdx
++	push %rdi
++	push %rsi
++	push %r11
++	push %r10
++	push %r9
++	push %r8
++#ifdef CONFIG_FRAME_POINTER
++	pushq $0	/* Dummy push for stack alignment. */
++#endif
++#endif
++	/* Set the vendor specific function. */
++	call __xen_hypercall_setfunc
++	/* Set ZF = 1 if AMD, Restore saved registers. */
++#ifdef CONFIG_X86_32
++	lea xen_hypercall_amd, %ebx
++	cmp %eax, %ebx
++	pop %edi
++	pop %esi
++	pop %edx
++	pop %ecx
++	pop %ebx
++	pop %eax
++#else
++	lea xen_hypercall_amd(%rip), %rbx
++	cmp %rax, %rbx
++#ifdef CONFIG_FRAME_POINTER
++	pop %rax	/* Dummy pop. */
++#endif
++	pop %r8
++	pop %r9
++	pop %r10
++	pop %r11
++	pop %rsi
++	pop %rdi
++	pop %rdx
++	pop %rcx
++	pop %rax
++#endif
++	/* Use correct hypercall function. */
++	jz xen_hypercall_amd
++	jmp xen_hypercall_intel
++SYM_FUNC_END(xen_hypercall_hvm)
++
++SYM_FUNC_START(xen_hypercall_amd)
++	vmmcall
++	RET
++SYM_FUNC_END(xen_hypercall_amd)
++
++SYM_FUNC_START(xen_hypercall_intel)
++	vmcall
++	RET
++SYM_FUNC_END(xen_hypercall_intel)
++	.popsection
++
+ 	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz "linux")
+ 	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,  .asciz "2.6")
+ 	ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,    .asciz "xen-3.0")
+@@ -115,7 +176,6 @@ SYM_CODE_END(xen_cpu_bringup_again)
+ #else
+ # define FEATURES_DOM0 0
+ #endif
+-	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
+ 	ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
+ 		.long FEATURES_PV | FEATURES_PVH | FEATURES_DOM0)
+ 	ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz "generic")
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index e1b782e823e6b4..63c13a2ccf556a 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -326,4 +326,13 @@ static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
+ static inline void xen_smp_count_cpus(void) { }
+ #endif /* CONFIG_SMP */
+ 
++#ifdef CONFIG_XEN_PV
++void xen_hypercall_pv(void);
++#endif
++void xen_hypercall_hvm(void);
++void xen_hypercall_amd(void);
++void xen_hypercall_intel(void);
++void xen_hypercall_setfunc(void);
++void *__xen_hypercall_setfunc(void);
++
+ #endif /* XEN_OPS_H */
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index e68c725cf8d975..45a395862fbc88 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1324,10 +1324,14 @@ void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css)
+ 	struct blkcg *blkcg = css_to_blkcg(blkcg_css);
+ 
+ 	do {
++		struct blkcg *parent;
++
+ 		if (!refcount_dec_and_test(&blkcg->online_pin))
+ 			break;
++
++		parent = blkcg_parent(blkcg);
+ 		blkcg_destroy_blkgs(blkcg);
+-		blkcg = blkcg_parent(blkcg);
++		blkcg = parent;
+ 	} while (blkcg);
+ }
+ 
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 384aa15e8260bd..a5894ec9696e7e 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1098,7 +1098,14 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
+ 		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
+ 					   iocg->child_active_sum);
+ 	} else {
+-		inuse = clamp_t(u32, inuse, 1, active);
++		/*
++		 * It may be tempting to turn this into a clamp expression with
++		 * a lower limit of 1 but active may be 0, which cannot be used
++		 * as an upper limit in that situation. This expression allows
++		 * active to clamp inuse unless it is 0, in which case inuse
++		 * becomes 1.
++		 */
++		inuse = min(inuse, active) ?: 1;
+ 	}
+ 
+ 	iocg->last_inuse = iocg->inuse;
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index 156e9bb07abf1a..cd5ea6eaa76b09 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -275,15 +275,13 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
+ 	struct blk_mq_hw_ctx *hctx;
+ 	unsigned long i;
+ 
+-	mutex_lock(&q->sysfs_dir_lock);
++	lockdep_assert_held(&q->sysfs_dir_lock);
++
+ 	if (!q->mq_sysfs_init_done)
+-		goto unlock;
++		return;
+ 
+ 	queue_for_each_hw_ctx(q, hctx, i)
+ 		blk_mq_unregister_hctx(hctx);
+-
+-unlock:
+-	mutex_unlock(&q->sysfs_dir_lock);
+ }
+ 
+ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
+@@ -292,9 +290,10 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
+ 	unsigned long i;
+ 	int ret = 0;
+ 
+-	mutex_lock(&q->sysfs_dir_lock);
++	lockdep_assert_held(&q->sysfs_dir_lock);
++
+ 	if (!q->mq_sysfs_init_done)
+-		goto unlock;
++		return ret;
+ 
+ 	queue_for_each_hw_ctx(q, hctx, i) {
+ 		ret = blk_mq_register_hctx(hctx);
+@@ -302,8 +301,5 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
+ 			break;
+ 	}
+ 
+-unlock:
+-	mutex_unlock(&q->sysfs_dir_lock);
+-
+ 	return ret;
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index b4fba7b398e5bc..cc1b3202383840 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -43,6 +43,7 @@
+ 
+ static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
+ static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
++static DEFINE_MUTEX(blk_mq_cpuhp_lock);
+ 
+ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
+ static void blk_mq_request_bypass_insert(struct request *rq,
+@@ -3740,13 +3741,91 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
+ 	return 0;
+ }
+ 
+-static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
++static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
+ {
+-	if (!(hctx->flags & BLK_MQ_F_STACKING))
++	lockdep_assert_held(&blk_mq_cpuhp_lock);
++
++	if (!(hctx->flags & BLK_MQ_F_STACKING) &&
++	    !hlist_unhashed(&hctx->cpuhp_online)) {
+ 		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
+ 						    &hctx->cpuhp_online);
+-	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
+-					    &hctx->cpuhp_dead);
++		INIT_HLIST_NODE(&hctx->cpuhp_online);
++	}
++
++	if (!hlist_unhashed(&hctx->cpuhp_dead)) {
++		cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
++						    &hctx->cpuhp_dead);
++		INIT_HLIST_NODE(&hctx->cpuhp_dead);
++	}
++}
++
++static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
++{
++	mutex_lock(&blk_mq_cpuhp_lock);
++	__blk_mq_remove_cpuhp(hctx);
++	mutex_unlock(&blk_mq_cpuhp_lock);
++}
++
++static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx)
++{
++	lockdep_assert_held(&blk_mq_cpuhp_lock);
++
++	if (!(hctx->flags & BLK_MQ_F_STACKING) &&
++	    hlist_unhashed(&hctx->cpuhp_online))
++		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
++				&hctx->cpuhp_online);
++
++	if (hlist_unhashed(&hctx->cpuhp_dead))
++		cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD,
++				&hctx->cpuhp_dead);
++}
++
++static void __blk_mq_remove_cpuhp_list(struct list_head *head)
++{
++	struct blk_mq_hw_ctx *hctx;
++
++	lockdep_assert_held(&blk_mq_cpuhp_lock);
++
++	list_for_each_entry(hctx, head, hctx_list)
++		__blk_mq_remove_cpuhp(hctx);
++}
++
++/*
++ * Unregister cpuhp callbacks from exited hw queues
++ *
++ * Safe to call if this `request_queue` is live
++ */
++static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
++{
++	LIST_HEAD(hctx_list);
++
++	spin_lock(&q->unused_hctx_lock);
++	list_splice_init(&q->unused_hctx_list, &hctx_list);
++	spin_unlock(&q->unused_hctx_lock);
++
++	mutex_lock(&blk_mq_cpuhp_lock);
++	__blk_mq_remove_cpuhp_list(&hctx_list);
++	mutex_unlock(&blk_mq_cpuhp_lock);
++
++	spin_lock(&q->unused_hctx_lock);
++	list_splice(&hctx_list, &q->unused_hctx_list);
++	spin_unlock(&q->unused_hctx_lock);
++}
++
++/*
++ * Register cpuhp callbacks from all hw queues
++ *
++ * Safe to call if this `request_queue` is live
++ */
++static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
++{
++	struct blk_mq_hw_ctx *hctx;
++	unsigned long i;
++
++	mutex_lock(&blk_mq_cpuhp_lock);
++	queue_for_each_hw_ctx(q, hctx, i)
++		__blk_mq_add_cpuhp(hctx);
++	mutex_unlock(&blk_mq_cpuhp_lock);
+ }
+ 
+ /*
+@@ -3797,8 +3876,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
+ 	if (set->ops->exit_hctx)
+ 		set->ops->exit_hctx(hctx, hctx_idx);
+ 
+-	blk_mq_remove_cpuhp(hctx);
+-
+ 	xa_erase(&q->hctx_table, hctx_idx);
+ 
+ 	spin_lock(&q->unused_hctx_lock);
+@@ -3815,6 +3892,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
+ 	queue_for_each_hw_ctx(q, hctx, i) {
+ 		if (i == nr_queue)
+ 			break;
++		blk_mq_remove_cpuhp(hctx);
+ 		blk_mq_exit_hctx(q, set, hctx, i);
+ 	}
+ }
+@@ -3878,6 +3956,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
+ 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ 	spin_lock_init(&hctx->lock);
+ 	INIT_LIST_HEAD(&hctx->dispatch);
++	INIT_HLIST_NODE(&hctx->cpuhp_dead);
++	INIT_HLIST_NODE(&hctx->cpuhp_online);
+ 	hctx->queue = q;
+ 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
+ 
+@@ -4382,7 +4462,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ 	unsigned long i, j;
+ 
+ 	/* protect against switching io scheduler  */
+-	mutex_lock(&q->sysfs_lock);
++	lockdep_assert_held(&q->sysfs_lock);
++
+ 	for (i = 0; i < set->nr_hw_queues; i++) {
+ 		int old_node;
+ 		int node = blk_mq_get_hctx_node(set, i);
+@@ -4415,7 +4496,12 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ 
+ 	xa_for_each_start(&q->hctx_table, j, hctx, j)
+ 		blk_mq_exit_hctx(q, set, hctx, j);
+-	mutex_unlock(&q->sysfs_lock);
++
++	/* unregister cpuhp callbacks for exited hctxs */
++	blk_mq_remove_hw_queues_cpuhp(q);
++
++	/* register cpuhp for new initialized hctxs */
++	blk_mq_add_hw_queues_cpuhp(q);
+ }
+ 
+ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+@@ -4441,10 +4527,14 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ 
+ 	xa_init(&q->hctx_table);
+ 
++	mutex_lock(&q->sysfs_lock);
++
+ 	blk_mq_realloc_hw_ctxs(set, q);
+ 	if (!q->nr_hw_queues)
+ 		goto err_hctxs;
+ 
++	mutex_unlock(&q->sysfs_lock);
++
+ 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
+ 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
+ 
+@@ -4463,6 +4553,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ 	return 0;
+ 
+ err_hctxs:
++	mutex_unlock(&q->sysfs_lock);
+ 	blk_mq_release(q);
+ err_exit:
+ 	q->mq_ops = NULL;
+@@ -4843,12 +4934,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
+ 		return false;
+ 
+ 	/* q->elevator needs protection from ->sysfs_lock */
+-	mutex_lock(&q->sysfs_lock);
++	lockdep_assert_held(&q->sysfs_lock);
+ 
+ 	/* the check has to be done with holding sysfs_lock */
+ 	if (!q->elevator) {
+ 		kfree(qe);
+-		goto unlock;
++		goto out;
+ 	}
+ 
+ 	INIT_LIST_HEAD(&qe->node);
+@@ -4858,9 +4949,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
+ 	__elevator_get(qe->type);
+ 	list_add(&qe->node, head);
+ 	elevator_disable(q);
+-unlock:
+-	mutex_unlock(&q->sysfs_lock);
+-
++out:
+ 	return true;
+ }
+ 
+@@ -4889,11 +4978,9 @@ static void blk_mq_elv_switch_back(struct list_head *head,
+ 	list_del(&qe->node);
+ 	kfree(qe);
+ 
+-	mutex_lock(&q->sysfs_lock);
+ 	elevator_switch(q, t);
+ 	/* drop the reference acquired in blk_mq_elv_switch_none */
+ 	elevator_put(t);
+-	mutex_unlock(&q->sysfs_lock);
+ }
+ 
+ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+@@ -4913,8 +5000,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ 	if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
+ 		return;
+ 
+-	list_for_each_entry(q, &set->tag_list, tag_set_list)
++	list_for_each_entry(q, &set->tag_list, tag_set_list) {
++		mutex_lock(&q->sysfs_dir_lock);
++		mutex_lock(&q->sysfs_lock);
+ 		blk_mq_freeze_queue(q);
++	}
+ 	/*
+ 	 * Switch IO scheduler to 'none', cleaning up the data associated
+ 	 * with the previous scheduler. We will switch back once we are done
+@@ -4970,8 +5060,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ 	list_for_each_entry(q, &set->tag_list, tag_set_list)
+ 		blk_mq_elv_switch_back(&head, q);
+ 
+-	list_for_each_entry(q, &set->tag_list, tag_set_list)
++	list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ 		blk_mq_unfreeze_queue(q);
++		mutex_unlock(&q->sysfs_lock);
++		mutex_unlock(&q->sysfs_dir_lock);
++	}
+ 
+ 	/* Free the excess tags when nr_hw_queues shrink. */
+ 	for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 207577145c54f4..42c2cb97d778af 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -690,11 +690,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ 			return res;
+ 	}
+ 
+-	blk_mq_freeze_queue(q);
+ 	mutex_lock(&q->sysfs_lock);
++	blk_mq_freeze_queue(q);
+ 	res = entry->store(disk, page, length);
+-	mutex_unlock(&q->sysfs_lock);
+ 	blk_mq_unfreeze_queue(q);
++	mutex_unlock(&q->sysfs_lock);
+ 	return res;
+ }
+ 
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 0b1184176ce77a..767bcbce74facb 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -18,7 +18,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/sched/mm.h>
+ #include <linux/spinlock.h>
+-#include <linux/atomic.h>
++#include <linux/refcount.h>
+ #include <linux/mempool.h>
+ 
+ #include "blk.h"
+@@ -41,7 +41,6 @@ static const char *const zone_cond_name[] = {
+ /*
+  * Per-zone write plug.
+  * @node: hlist_node structure for managing the plug using a hash table.
+- * @link: To list the plug in the zone write plug error list of the disk.
+  * @ref: Zone write plug reference counter. A zone write plug reference is
+  *       always at least 1 when the plug is hashed in the disk plug hash table.
+  *       The reference is incremented whenever a new BIO needing plugging is
+@@ -63,8 +62,7 @@ static const char *const zone_cond_name[] = {
+  */
+ struct blk_zone_wplug {
+ 	struct hlist_node	node;
+-	struct list_head	link;
+-	atomic_t		ref;
++	refcount_t		ref;
+ 	spinlock_t		lock;
+ 	unsigned int		flags;
+ 	unsigned int		zone_no;
+@@ -80,8 +78,8 @@ struct blk_zone_wplug {
+  *  - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
+  *    that is, that write BIOs are being throttled due to a write BIO already
+  *    being executed or the zone write plug bio list is not empty.
+- *  - BLK_ZONE_WPLUG_ERROR: Indicates that a write error happened which will be
+- *    recovered with a report zone to update the zone write pointer offset.
++ *  - BLK_ZONE_WPLUG_NEED_WP_UPDATE: Indicates that we lost track of a zone
++ *    write pointer offset and need to update it.
+  *  - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
+  *    from the disk hash table and that the initial reference to the zone
+  *    write plug set when the plug was first added to the hash table has been
+@@ -91,11 +89,9 @@ struct blk_zone_wplug {
+  *    freed once all remaining references from BIOs or functions are dropped.
+  */
+ #define BLK_ZONE_WPLUG_PLUGGED		(1U << 0)
+-#define BLK_ZONE_WPLUG_ERROR		(1U << 1)
++#define BLK_ZONE_WPLUG_NEED_WP_UPDATE	(1U << 1)
+ #define BLK_ZONE_WPLUG_UNHASHED		(1U << 2)
+ 
+-#define BLK_ZONE_WPLUG_BUSY	(BLK_ZONE_WPLUG_PLUGGED | BLK_ZONE_WPLUG_ERROR)
+-
+ /**
+  * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
+  * @zone_cond: BLK_ZONE_COND_XXX.
+@@ -115,6 +111,30 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
+ }
+ EXPORT_SYMBOL_GPL(blk_zone_cond_str);
+ 
++struct disk_report_zones_cb_args {
++	struct gendisk	*disk;
++	report_zones_cb	user_cb;
++	void		*user_data;
++};
++
++static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
++					   struct blk_zone *zone);
++
++static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx,
++				void *data)
++{
++	struct disk_report_zones_cb_args *args = data;
++	struct gendisk *disk = args->disk;
++
++	if (disk->zone_wplugs_hash)
++		disk_zone_wplug_sync_wp_offset(disk, zone);
++
++	if (!args->user_cb)
++		return 0;
++
++	return args->user_cb(zone, idx, args->user_data);
++}
++
+ /**
+  * blkdev_report_zones - Get zones information
+  * @bdev:	Target block device
+@@ -139,6 +159,11 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ {
+ 	struct gendisk *disk = bdev->bd_disk;
+ 	sector_t capacity = get_capacity(disk);
++	struct disk_report_zones_cb_args args = {
++		.disk = disk,
++		.user_cb = cb,
++		.user_data = data,
++	};
+ 
+ 	if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
+ 		return -EOPNOTSUPP;
+@@ -146,7 +171,8 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ 	if (!nr_zones || sector >= capacity)
+ 		return 0;
+ 
+-	return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
++	return disk->fops->report_zones(disk, sector, nr_zones,
++					disk_report_zones_cb, &args);
+ }
+ EXPORT_SYMBOL_GPL(blkdev_report_zones);
+ 
+@@ -417,7 +443,7 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
+ 
+ 	hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) {
+ 		if (zwplug->zone_no == zno &&
+-		    atomic_inc_not_zero(&zwplug->ref)) {
++		    refcount_inc_not_zero(&zwplug->ref)) {
+ 			rcu_read_unlock();
+ 			return zwplug;
+ 		}
+@@ -438,9 +464,9 @@ static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
+ 
+ static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
+ {
+-	if (atomic_dec_and_test(&zwplug->ref)) {
++	if (refcount_dec_and_test(&zwplug->ref)) {
+ 		WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
+-		WARN_ON_ONCE(!list_empty(&zwplug->link));
++		WARN_ON_ONCE(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
+ 		WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
+ 
+ 		call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
+@@ -454,8 +480,8 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
+ 	if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
+ 		return false;
+ 
+-	/* If the zone write plug is still busy, it cannot be removed. */
+-	if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
++	/* If the zone write plug is still plugged, it cannot be removed. */
++	if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)
+ 		return false;
+ 
+ 	/*
+@@ -469,7 +495,7 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
+ 	 * taken when the plug was allocated and another reference taken by the
+ 	 * caller context).
+ 	 */
+-	if (atomic_read(&zwplug->ref) > 2)
++	if (refcount_read(&zwplug->ref) > 2)
+ 		return false;
+ 
+ 	/* We can remove zone write plugs for zones that are empty or full. */
+@@ -538,12 +564,11 @@ static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk,
+ 		return NULL;
+ 
+ 	INIT_HLIST_NODE(&zwplug->node);
+-	INIT_LIST_HEAD(&zwplug->link);
+-	atomic_set(&zwplug->ref, 2);
++	refcount_set(&zwplug->ref, 2);
+ 	spin_lock_init(&zwplug->lock);
+ 	zwplug->flags = 0;
+ 	zwplug->zone_no = zno;
+-	zwplug->wp_offset = sector & (disk->queue->limits.chunk_sectors - 1);
++	zwplug->wp_offset = bdev_offset_from_zone_start(disk->part0, sector);
+ 	bio_list_init(&zwplug->bio_list);
+ 	INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
+ 	zwplug->disk = disk;
+@@ -587,124 +612,81 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
+ }
+ 
+ /*
+- * Abort (fail) all plugged BIOs of a zone write plug that are not aligned
+- * with the assumed write pointer location of the zone when the BIO will
+- * be unplugged.
++ * Set a zone write plug write pointer offset to the specified value.
++ * This aborts all plugged BIOs, which is fine as this function is called for
++ * a zone reset operation, a zone finish operation or if the zone needs a wp
++ * update from a report zone after a write error.
+  */
+-static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
+-					    struct blk_zone_wplug *zwplug)
+-{
+-	unsigned int wp_offset = zwplug->wp_offset;
+-	struct bio_list bl = BIO_EMPTY_LIST;
+-	struct bio *bio;
+-
+-	while ((bio = bio_list_pop(&zwplug->bio_list))) {
+-		if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
+-		    (bio_op(bio) != REQ_OP_ZONE_APPEND &&
+-		     bio_offset_from_zone_start(bio) != wp_offset)) {
+-			blk_zone_wplug_bio_io_error(zwplug, bio);
+-			continue;
+-		}
+-
+-		wp_offset += bio_sectors(bio);
+-		bio_list_add(&bl, bio);
+-	}
+-
+-	bio_list_merge(&zwplug->bio_list, &bl);
+-}
+-
+-static inline void disk_zone_wplug_set_error(struct gendisk *disk,
+-					     struct blk_zone_wplug *zwplug)
++static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
++					  struct blk_zone_wplug *zwplug,
++					  unsigned int wp_offset)
+ {
+-	unsigned long flags;
++	lockdep_assert_held(&zwplug->lock);
+ 
+-	if (zwplug->flags & BLK_ZONE_WPLUG_ERROR)
+-		return;
++	/* Update the zone write pointer and abort all plugged BIOs. */
++	zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE;
++	zwplug->wp_offset = wp_offset;
++	disk_zone_wplug_abort(zwplug);
+ 
+ 	/*
+-	 * At this point, we already have a reference on the zone write plug.
+-	 * However, since we are going to add the plug to the disk zone write
+-	 * plugs work list, increase its reference count. This reference will
+-	 * be dropped in disk_zone_wplugs_work() once the error state is
+-	 * handled, or in disk_zone_wplug_clear_error() if the zone is reset or
+-	 * finished.
++	 * The zone write plug now has no BIO plugged: remove it from the
++	 * hash table so that it cannot be seen. The plug will be freed
++	 * when the last reference is dropped.
+ 	 */
+-	zwplug->flags |= BLK_ZONE_WPLUG_ERROR;
+-	atomic_inc(&zwplug->ref);
+-
+-	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+-	list_add_tail(&zwplug->link, &disk->zone_wplugs_err_list);
+-	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
++	if (disk_should_remove_zone_wplug(disk, zwplug))
++		disk_remove_zone_wplug(disk, zwplug);
+ }
+ 
+-static inline void disk_zone_wplug_clear_error(struct gendisk *disk,
+-					       struct blk_zone_wplug *zwplug)
++static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
+ {
+-	unsigned long flags;
+-
+-	if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
+-		return;
+-
+-	/*
+-	 * We are racing with the error handling work which drops the reference
+-	 * on the zone write plug after handling the error state. So remove the
+-	 * plug from the error list and drop its reference count only if the
+-	 * error handling has not yet started, that is, if the zone write plug
+-	 * is still listed.
+-	 */
+-	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+-	if (!list_empty(&zwplug->link)) {
+-		list_del_init(&zwplug->link);
+-		zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
+-		disk_put_zone_wplug(zwplug);
++	switch (zone->cond) {
++	case BLK_ZONE_COND_IMP_OPEN:
++	case BLK_ZONE_COND_EXP_OPEN:
++	case BLK_ZONE_COND_CLOSED:
++		return zone->wp - zone->start;
++	case BLK_ZONE_COND_FULL:
++		return zone->len;
++	case BLK_ZONE_COND_EMPTY:
++		return 0;
++	case BLK_ZONE_COND_NOT_WP:
++	case BLK_ZONE_COND_OFFLINE:
++	case BLK_ZONE_COND_READONLY:
++	default:
++		/*
++		 * Conventional, offline and read-only zones do not have a valid
++		 * write pointer.
++		 */
++		return UINT_MAX;
+ 	}
+-	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+ }
+ 
+-/*
+- * Set a zone write plug write pointer offset to either 0 (zone reset case)
+- * or to the zone size (zone finish case). This aborts all plugged BIOs, which
+- * is fine to do as doing a zone reset or zone finish while writes are in-flight
+- * is a mistake from the user which will most likely cause all plugged BIOs to
+- * fail anyway.
+- */
+-static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
+-					  struct blk_zone_wplug *zwplug,
+-					  unsigned int wp_offset)
++static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
++					   struct blk_zone *zone)
+ {
++	struct blk_zone_wplug *zwplug;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&zwplug->lock, flags);
+-
+-	/*
+-	 * Make sure that a BIO completion or another zone reset or finish
+-	 * operation has not already removed the plug from the hash table.
+-	 */
+-	if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
+-		spin_unlock_irqrestore(&zwplug->lock, flags);
++	zwplug = disk_get_zone_wplug(disk, zone->start);
++	if (!zwplug)
+ 		return;
+-	}
+ 
+-	/* Update the zone write pointer and abort all plugged BIOs. */
+-	zwplug->wp_offset = wp_offset;
+-	disk_zone_wplug_abort(zwplug);
++	spin_lock_irqsave(&zwplug->lock, flags);
++	if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
++		disk_zone_wplug_set_wp_offset(disk, zwplug,
++					      blk_zone_wp_offset(zone));
++	spin_unlock_irqrestore(&zwplug->lock, flags);
+ 
+-	/*
+-	 * Updating the write pointer offset puts back the zone
+-	 * in a good state. So clear the error flag and decrement the
+-	 * error count if we were in error state.
+-	 */
+-	disk_zone_wplug_clear_error(disk, zwplug);
++	disk_put_zone_wplug(zwplug);
++}
+ 
+-	/*
+-	 * The zone write plug now has no BIO plugged: remove it from the
+-	 * hash table so that it cannot be seen. The plug will be freed
+-	 * when the last reference is dropped.
+-	 */
+-	if (disk_should_remove_zone_wplug(disk, zwplug))
+-		disk_remove_zone_wplug(disk, zwplug);
++static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector)
++{
++	struct disk_report_zones_cb_args args = {
++		.disk = disk,
++	};
+ 
+-	spin_unlock_irqrestore(&zwplug->lock, flags);
++	return disk->fops->report_zones(disk, sector, 1,
++					disk_report_zones_cb, &args);
+ }
+ 
+ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
+@@ -713,6 +695,7 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
+ 	struct gendisk *disk = bio->bi_bdev->bd_disk;
+ 	sector_t sector = bio->bi_iter.bi_sector;
+ 	struct blk_zone_wplug *zwplug;
++	unsigned long flags;
+ 
+ 	/* Conventional zones cannot be reset nor finished. */
+ 	if (disk_zone_is_conv(disk, sector)) {
+@@ -720,6 +703,15 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
+ 		return true;
+ 	}
+ 
++	/*
++	 * No-wait reset or finish BIOs do not make much sense as the callers
++	 * issue these as blocking operations in most cases. To avoid issues
++	 * the BIO execution potentially failing with BLK_STS_AGAIN, warn about
++	 * REQ_NOWAIT being set and ignore that flag.
++	 */
++	if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
++		bio->bi_opf &= ~REQ_NOWAIT;
++
+ 	/*
+ 	 * If we have a zone write plug, set its write pointer offset to 0
+ 	 * (reset case) or to the zone size (finish case). This will abort all
+@@ -729,7 +721,9 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
+ 	 */
+ 	zwplug = disk_get_zone_wplug(disk, sector);
+ 	if (zwplug) {
++		spin_lock_irqsave(&zwplug->lock, flags);
+ 		disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
++		spin_unlock_irqrestore(&zwplug->lock, flags);
+ 		disk_put_zone_wplug(zwplug);
+ 	}
+ 
+@@ -740,6 +734,7 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
+ {
+ 	struct gendisk *disk = bio->bi_bdev->bd_disk;
+ 	struct blk_zone_wplug *zwplug;
++	unsigned long flags;
+ 	sector_t sector;
+ 
+ 	/*
+@@ -751,7 +746,9 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
+ 	     sector += disk->queue->limits.chunk_sectors) {
+ 		zwplug = disk_get_zone_wplug(disk, sector);
+ 		if (zwplug) {
++			spin_lock_irqsave(&zwplug->lock, flags);
+ 			disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
++			spin_unlock_irqrestore(&zwplug->lock, flags);
+ 			disk_put_zone_wplug(zwplug);
+ 		}
+ 	}
+@@ -759,9 +756,25 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
+ 	return false;
+ }
+ 
+-static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
+-					  struct bio *bio, unsigned int nr_segs)
++static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
++					      struct blk_zone_wplug *zwplug)
+ {
++	/*
++	 * Take a reference on the zone write plug and schedule the submission
++	 * of the next plugged BIO. blk_zone_wplug_bio_work() will release the
++	 * reference we take here.
++	 */
++	WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
++	refcount_inc(&zwplug->ref);
++	queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
++}
++
++static inline void disk_zone_wplug_add_bio(struct gendisk *disk,
++				struct blk_zone_wplug *zwplug,
++				struct bio *bio, unsigned int nr_segs)
++{
++	bool schedule_bio_work = false;
++
+ 	/*
+ 	 * Grab an extra reference on the BIO request queue usage counter.
+ 	 * This reference will be reused to submit a request for the BIO for
+@@ -777,6 +790,16 @@ static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
+ 	 */
+ 	bio_clear_polled(bio);
+ 
++	/*
++	 * REQ_NOWAIT BIOs are always handled using the zone write plug BIO
++	 * work, which can block. So clear the REQ_NOWAIT flag and schedule the
++	 * work if this is the first BIO we are plugging.
++	 */
++	if (bio->bi_opf & REQ_NOWAIT) {
++		schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
++		bio->bi_opf &= ~REQ_NOWAIT;
++	}
++
+ 	/*
+ 	 * Reuse the poll cookie field to store the number of segments when
+ 	 * split to the hardware limits.
+@@ -790,6 +813,11 @@ static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
+ 	 * at the tail of the list to preserve the sequential write order.
+ 	 */
+ 	bio_list_add(&zwplug->bio_list, bio);
++
++	zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
++
++	if (schedule_bio_work)
++		disk_zone_wplug_schedule_bio_work(disk, zwplug);
+ }
+ 
+ /*
+@@ -902,13 +930,23 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
+ {
+ 	struct gendisk *disk = bio->bi_bdev->bd_disk;
+ 
++	/*
++	 * If we lost track of the zone write pointer due to a write error,
++	 * the user must either execute a report zones, reset the zone or finish
++	 * the to recover a reliable write pointer position. Fail BIOs if the
++	 * user did not do that as we cannot handle emulated zone append
++	 * otherwise.
++	 */
++	if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
++		return false;
++
+ 	/*
+ 	 * Check that the user is not attempting to write to a full zone.
+ 	 * We know such BIO will fail, and that would potentially overflow our
+ 	 * write pointer offset beyond the end of the zone.
+ 	 */
+ 	if (disk_zone_wplug_is_full(disk, zwplug))
+-		goto err;
++		return false;
+ 
+ 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ 		/*
+@@ -927,24 +965,18 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
+ 		bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
+ 	} else {
+ 		/*
+-		 * Check for non-sequential writes early because we avoid a
+-		 * whole lot of error handling trouble if we don't send it off
+-		 * to the driver.
++		 * Check for non-sequential writes early as we know that BIOs
++		 * with a start sector not unaligned to the zone write pointer
++		 * will fail.
+ 		 */
+ 		if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
+-			goto err;
++			return false;
+ 	}
+ 
+ 	/* Advance the zone write pointer offset. */
+ 	zwplug->wp_offset += bio_sectors(bio);
+ 
+ 	return true;
+-
+-err:
+-	/* We detected an invalid write BIO: schedule error recovery. */
+-	disk_zone_wplug_set_error(disk, zwplug);
+-	kblockd_schedule_work(&disk->zone_wplugs_work);
+-	return false;
+ }
+ 
+ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
+@@ -983,7 +1015,10 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
+ 
+ 	zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
+ 	if (!zwplug) {
+-		bio_io_error(bio);
++		if (bio->bi_opf & REQ_NOWAIT)
++			bio_wouldblock_error(bio);
++		else
++			bio_io_error(bio);
+ 		return true;
+ 	}
+ 
+@@ -991,18 +1026,20 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
+ 	bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
+ 
+ 	/*
+-	 * If the zone is already plugged or has a pending error, add the BIO
+-	 * to the plug BIO list. Otherwise, plug and let the BIO execute.
++	 * If the zone is already plugged, add the BIO to the plug BIO list.
++	 * Do the same for REQ_NOWAIT BIOs to ensure that we will not see a
++	 * BLK_STS_AGAIN failure if we let the BIO execute.
++	 * Otherwise, plug and let the BIO execute.
+ 	 */
+-	if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
++	if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) ||
++	    (bio->bi_opf & REQ_NOWAIT))
+ 		goto plug;
+ 
+-	/*
+-	 * If an error is detected when preparing the BIO, add it to the BIO
+-	 * list so that error recovery can deal with it.
+-	 */
+-	if (!blk_zone_wplug_prepare_bio(zwplug, bio))
+-		goto plug;
++	if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
++		spin_unlock_irqrestore(&zwplug->lock, flags);
++		bio_io_error(bio);
++		return true;
++	}
+ 
+ 	zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
+ 
+@@ -1011,8 +1048,7 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
+ 	return false;
+ 
+ plug:
+-	zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
+-	blk_zone_wplug_add_bio(zwplug, bio, nr_segs);
++	disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs);
+ 
+ 	spin_unlock_irqrestore(&zwplug->lock, flags);
+ 
+@@ -1096,19 +1132,6 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+ }
+ EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
+ 
+-static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
+-					      struct blk_zone_wplug *zwplug)
+-{
+-	/*
+-	 * Take a reference on the zone write plug and schedule the submission
+-	 * of the next plugged BIO. blk_zone_wplug_bio_work() will release the
+-	 * reference we take here.
+-	 */
+-	WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
+-	atomic_inc(&zwplug->ref);
+-	queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
+-}
+-
+ static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
+ 				       struct blk_zone_wplug *zwplug)
+ {
+@@ -1116,16 +1139,6 @@ static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
+ 
+ 	spin_lock_irqsave(&zwplug->lock, flags);
+ 
+-	/*
+-	 * If we had an error, schedule error recovery. The recovery work
+-	 * will restart submission of plugged BIOs.
+-	 */
+-	if (zwplug->flags & BLK_ZONE_WPLUG_ERROR) {
+-		spin_unlock_irqrestore(&zwplug->lock, flags);
+-		kblockd_schedule_work(&disk->zone_wplugs_work);
+-		return;
+-	}
+-
+ 	/* Schedule submission of the next plugged BIO if we have one. */
+ 	if (!bio_list_empty(&zwplug->bio_list)) {
+ 		disk_zone_wplug_schedule_bio_work(disk, zwplug);
+@@ -1168,12 +1181,13 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
+ 	}
+ 
+ 	/*
+-	 * If the BIO failed, mark the plug as having an error to trigger
+-	 * recovery.
++	 * If the BIO failed, abort all plugged BIOs and mark the plug as
++	 * needing a write pointer update.
+ 	 */
+ 	if (bio->bi_status != BLK_STS_OK) {
+ 		spin_lock_irqsave(&zwplug->lock, flags);
+-		disk_zone_wplug_set_error(disk, zwplug);
++		disk_zone_wplug_abort(zwplug);
++		zwplug->flags |= BLK_ZONE_WPLUG_NEED_WP_UPDATE;
+ 		spin_unlock_irqrestore(&zwplug->lock, flags);
+ 	}
+ 
+@@ -1229,6 +1243,7 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
+ 	 */
+ 	spin_lock_irqsave(&zwplug->lock, flags);
+ 
++again:
+ 	bio = bio_list_pop(&zwplug->bio_list);
+ 	if (!bio) {
+ 		zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
+@@ -1237,10 +1252,8 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
+ 	}
+ 
+ 	if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
+-		/* Error recovery will decide what to do with the BIO. */
+-		bio_list_add_head(&zwplug->bio_list, bio);
+-		spin_unlock_irqrestore(&zwplug->lock, flags);
+-		goto put_zwplug;
++		blk_zone_wplug_bio_io_error(zwplug, bio);
++		goto again;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&zwplug->lock, flags);
+@@ -1262,120 +1275,6 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
+ 	disk_put_zone_wplug(zwplug);
+ }
+ 
+-static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
+-{
+-	switch (zone->cond) {
+-	case BLK_ZONE_COND_IMP_OPEN:
+-	case BLK_ZONE_COND_EXP_OPEN:
+-	case BLK_ZONE_COND_CLOSED:
+-		return zone->wp - zone->start;
+-	case BLK_ZONE_COND_FULL:
+-		return zone->len;
+-	case BLK_ZONE_COND_EMPTY:
+-		return 0;
+-	case BLK_ZONE_COND_NOT_WP:
+-	case BLK_ZONE_COND_OFFLINE:
+-	case BLK_ZONE_COND_READONLY:
+-	default:
+-		/*
+-		 * Conventional, offline and read-only zones do not have a valid
+-		 * write pointer.
+-		 */
+-		return UINT_MAX;
+-	}
+-}
+-
+-static int blk_zone_wplug_report_zone_cb(struct blk_zone *zone,
+-					 unsigned int idx, void *data)
+-{
+-	struct blk_zone *zonep = data;
+-
+-	*zonep = *zone;
+-	return 0;
+-}
+-
+-static void disk_zone_wplug_handle_error(struct gendisk *disk,
+-					 struct blk_zone_wplug *zwplug)
+-{
+-	sector_t zone_start_sector =
+-		bdev_zone_sectors(disk->part0) * zwplug->zone_no;
+-	unsigned int noio_flag;
+-	struct blk_zone zone;
+-	unsigned long flags;
+-	int ret;
+-
+-	/* Get the current zone information from the device. */
+-	noio_flag = memalloc_noio_save();
+-	ret = disk->fops->report_zones(disk, zone_start_sector, 1,
+-				       blk_zone_wplug_report_zone_cb, &zone);
+-	memalloc_noio_restore(noio_flag);
+-
+-	spin_lock_irqsave(&zwplug->lock, flags);
+-
+-	/*
+-	 * A zone reset or finish may have cleared the error already. In such
+-	 * case, do nothing as the report zones may have seen the "old" write
+-	 * pointer value before the reset/finish operation completed.
+-	 */
+-	if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
+-		goto unlock;
+-
+-	zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
+-
+-	if (ret != 1) {
+-		/*
+-		 * We failed to get the zone information, meaning that something
+-		 * is likely really wrong with the device. Abort all remaining
+-		 * plugged BIOs as otherwise we could endup waiting forever on
+-		 * plugged BIOs to complete if there is a queue freeze on-going.
+-		 */
+-		disk_zone_wplug_abort(zwplug);
+-		goto unplug;
+-	}
+-
+-	/* Update the zone write pointer offset. */
+-	zwplug->wp_offset = blk_zone_wp_offset(&zone);
+-	disk_zone_wplug_abort_unaligned(disk, zwplug);
+-
+-	/* Restart BIO submission if we still have any BIO left. */
+-	if (!bio_list_empty(&zwplug->bio_list)) {
+-		disk_zone_wplug_schedule_bio_work(disk, zwplug);
+-		goto unlock;
+-	}
+-
+-unplug:
+-	zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
+-	if (disk_should_remove_zone_wplug(disk, zwplug))
+-		disk_remove_zone_wplug(disk, zwplug);
+-
+-unlock:
+-	spin_unlock_irqrestore(&zwplug->lock, flags);
+-}
+-
+-static void disk_zone_wplugs_work(struct work_struct *work)
+-{
+-	struct gendisk *disk =
+-		container_of(work, struct gendisk, zone_wplugs_work);
+-	struct blk_zone_wplug *zwplug;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+-
+-	while (!list_empty(&disk->zone_wplugs_err_list)) {
+-		zwplug = list_first_entry(&disk->zone_wplugs_err_list,
+-					  struct blk_zone_wplug, link);
+-		list_del_init(&zwplug->link);
+-		spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+-
+-		disk_zone_wplug_handle_error(disk, zwplug);
+-		disk_put_zone_wplug(zwplug);
+-
+-		spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+-	}
+-
+-	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+-}
+-
+ static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
+ {
+ 	return 1U << disk->zone_wplugs_hash_bits;
+@@ -1384,8 +1283,6 @@ static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
+ void disk_init_zone_resources(struct gendisk *disk)
+ {
+ 	spin_lock_init(&disk->zone_wplugs_lock);
+-	INIT_LIST_HEAD(&disk->zone_wplugs_err_list);
+-	INIT_WORK(&disk->zone_wplugs_work, disk_zone_wplugs_work);
+ }
+ 
+ /*
+@@ -1450,7 +1347,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
+ 		while (!hlist_empty(&disk->zone_wplugs_hash[i])) {
+ 			zwplug = hlist_entry(disk->zone_wplugs_hash[i].first,
+ 					     struct blk_zone_wplug, node);
+-			atomic_inc(&zwplug->ref);
++			refcount_inc(&zwplug->ref);
+ 			disk_remove_zone_wplug(disk, zwplug);
+ 			disk_put_zone_wplug(zwplug);
+ 		}
+@@ -1484,8 +1381,6 @@ void disk_free_zone_resources(struct gendisk *disk)
+ 	if (!disk->zone_wplugs_pool)
+ 		return;
+ 
+-	cancel_work_sync(&disk->zone_wplugs_work);
+-
+ 	if (disk->zone_wplugs_wq) {
+ 		destroy_workqueue(disk->zone_wplugs_wq);
+ 		disk->zone_wplugs_wq = NULL;
+@@ -1682,6 +1577,8 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
+ 	if (!disk->zone_wplugs_hash)
+ 		return 0;
+ 
++	disk_zone_wplug_sync_wp_offset(disk, zone);
++
+ 	wp_offset = blk_zone_wp_offset(zone);
+ 	if (!wp_offset || wp_offset >= zone->capacity)
+ 		return 0;
+@@ -1818,6 +1715,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
+ 		memalloc_noio_restore(noio_flag);
+ 		return ret;
+ 	}
++
+ 	ret = disk->fops->report_zones(disk, 0, UINT_MAX,
+ 				       blk_revalidate_zone_cb, &args);
+ 	if (!ret) {
+@@ -1854,6 +1752,48 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
+ }
+ EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
+ 
++/**
++ * blk_zone_issue_zeroout - zero-fill a block range in a zone
++ * @bdev:	blockdev to write
++ * @sector:	start sector
++ * @nr_sects:	number of sectors to write
++ * @gfp_mask:	memory allocation flags (for bio_alloc)
++ *
++ * Description:
++ *  Zero-fill a block range in a zone (@sector must be equal to the zone write
++ *  pointer), handling potential errors due to the (initially unknown) lack of
++ *  hardware offload (See blkdev_issue_zeroout()).
++ */
++int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
++			   sector_t nr_sects, gfp_t gfp_mask)
++{
++	int ret;
++
++	if (WARN_ON_ONCE(!bdev_is_zoned(bdev)))
++		return -EIO;
++
++	ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
++				   BLKDEV_ZERO_NOFALLBACK);
++	if (ret != -EOPNOTSUPP)
++		return ret;
++
++	/*
++	 * The failed call to blkdev_issue_zeroout() advanced the zone write
++	 * pointer. Undo this using a report zone to update the zone write
++	 * pointer to the correct current value.
++	 */
++	ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector);
++	if (ret != 1)
++		return ret < 0 ? ret : -EIO;
++
++	/*
++	 * Retry without BLKDEV_ZERO_NOFALLBACK to force the fallback to a
++	 * regular write with zero-pages.
++	 */
++	return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0);
++}
++EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout);
++
+ #ifdef CONFIG_BLK_DEBUG_FS
+ 
+ int queue_zone_wplugs_show(void *data, struct seq_file *m)
+@@ -1876,7 +1816,7 @@ int queue_zone_wplugs_show(void *data, struct seq_file *m)
+ 			spin_lock_irqsave(&zwplug->lock, flags);
+ 			zwp_zone_no = zwplug->zone_no;
+ 			zwp_flags = zwplug->flags;
+-			zwp_ref = atomic_read(&zwplug->ref);
++			zwp_ref = refcount_read(&zwplug->ref);
+ 			zwp_wp_offset = zwplug->wp_offset;
+ 			zwp_bio_list_size = bio_list_size(&zwplug->bio_list);
+ 			spin_unlock_irqrestore(&zwplug->lock, flags);
+diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
+index 95f78383bbdba1..bff2d099f4691e 100644
+--- a/drivers/acpi/acpica/evxfregn.c
++++ b/drivers/acpi/acpica/evxfregn.c
+@@ -232,8 +232,6 @@ acpi_remove_address_space_handler(acpi_handle device,
+ 
+ 			/* Now we can delete the handler object */
+ 
+-			acpi_os_release_mutex(handler_obj->address_space.
+-					      context_mutex);
+ 			acpi_ut_remove_reference(handler_obj);
+ 			goto unlock_and_exit;
+ 		}
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index 5429ec9ef06f06..a5d47819b3a4e2 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ 	if (cmd_rc)
+ 		*cmd_rc = -EINVAL;
+ 
+-	if (cmd == ND_CMD_CALL)
++	if (cmd == ND_CMD_CALL) {
++		if (!buf || buf_len < sizeof(*call_pkg))
++			return -EINVAL;
++
+ 		call_pkg = buf;
++	}
++
+ 	func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
+ 	if (func < 0)
+ 		return func;
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 7fe842dae1ec05..821867de43bea3 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
+ 	switch (addr->resource_type) {
+ 	case ACPI_MEMORY_RANGE:
+ 		acpi_dev_memresource_flags(res, len, wp);
++
++		if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
++			res->flags |= IORESOURCE_PREFETCH;
+ 		break;
+ 	case ACPI_IO_RANGE:
+ 		acpi_dev_ioresource_flags(res, len, iodec,
+@@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
+ 	if (addr->producer_consumer == ACPI_PRODUCER)
+ 		res->flags |= IORESOURCE_WINDOW;
+ 
+-	if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+-		res->flags |= IORESOURCE_PREFETCH;
+-
+ 	return !(res->flags & IORESOURCE_DISABLED);
+ }
+ 
+diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
+index 63ef7bb073ce03..596c6d294da906 100644
+--- a/drivers/ata/sata_highbank.c
++++ b/drivers/ata/sata_highbank.c
+@@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
+ 			phy_nodes[phy] = phy_data.np;
+ 			cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
+ 			if (cphy_base[phy] == NULL) {
++				of_node_put(phy_data.np);
+ 				return 0;
+ 			}
+ 			phy_count += 1;
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index 480e4adba9faa6..85e99641eaae02 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -395,6 +395,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+ 	struct btmtk_data *data = hci_get_priv(hdev);
+ 	int err;
++	bool complete = false;
+ 
+ 	if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ 		kfree_skb(skb);
+@@ -416,19 +417,22 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
+ 		fallthrough;
+ 	case HCI_DEVCOREDUMP_ACTIVE:
+ 	default:
++		/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
++		if (data->cd_info.cnt >= MTK_COREDUMP_NUM &&
++		    skb->len > MTK_COREDUMP_END_LEN)
++			if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
++				    MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1))
++				complete = true;
++
+ 		err = hci_devcd_append(hdev, skb);
+ 		if (err < 0)
+ 			break;
+ 		data->cd_info.cnt++;
+ 
+-		/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
+-		if (data->cd_info.cnt > MTK_COREDUMP_NUM &&
+-		    skb->len > MTK_COREDUMP_END_LEN)
+-			if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
+-				    MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
+-				bt_dev_info(hdev, "Mediatek coredump end");
+-				hci_devcd_complete(hdev);
+-			}
++		if (complete) {
++			bt_dev_info(hdev, "Mediatek coredump end");
++			hci_devcd_complete(hdev);
++		}
+ 
+ 		break;
+ 	}
+diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
+index bc21b292144926..62a62eaba2aad8 100644
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -92,6 +92,7 @@ static const u32 slic_base[] = { 100000000, 3125000 };
+ static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
+ /* EN7581 */
+ static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
++static const u32 bus7581_base[] = { 600000000, 540000000 };
+ static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
+ static const u32 crypto_base[] = { 540000000, 480000000 };
+ 
+@@ -227,8 +228,8 @@ static const struct en_clk_desc en7581_base_clks[] = {
+ 		.base_reg = REG_BUS_CLK_DIV_SEL,
+ 		.base_bits = 1,
+ 		.base_shift = 8,
+-		.base_values = bus_base,
+-		.n_base_values = ARRAY_SIZE(bus_base),
++		.base_values = bus7581_base,
++		.n_base_values = ARRAY_SIZE(bus7581_base),
+ 
+ 		.div_bits = 3,
+ 		.div_shift = 0,
+diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
+index 1b9b7bccdeff08..45e130b901eb5e 100644
+--- a/drivers/crypto/hisilicon/debugfs.c
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -192,7 +192,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
+ 
+ 	down_read(&qm->qps_lock);
+ 	if (qm->sqc) {
+-		memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc));
++		memcpy(&sqc, qm->sqc + qp_id, sizeof(struct qm_sqc));
+ 		sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ 		sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ 		dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
+@@ -229,7 +229,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
+ 
+ 	down_read(&qm->qps_lock);
+ 	if (qm->cqc) {
+-		memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc));
++		memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc));
+ 		cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ 		cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ 		dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
+diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
+index f2e911a3d2ca02..ad6a045fd3d2d2 100644
+--- a/drivers/gpio/gpio-graniterapids.c
++++ b/drivers/gpio/gpio-graniterapids.c
+@@ -32,12 +32,14 @@
+ #define GNR_PINS_PER_REG 32
+ #define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG)
+ 
+-#define GNR_CFG_BAR		0x00
++#define GNR_CFG_PADBAR		0x00
+ #define GNR_CFG_LOCK_OFFSET	0x04
+-#define GNR_GPI_STATUS_OFFSET	0x20
++#define GNR_GPI_STATUS_OFFSET	0x14
+ #define GNR_GPI_ENABLE_OFFSET	0x24
+ 
+-#define GNR_CFG_DW_RX_MASK	GENMASK(25, 22)
++#define GNR_CFG_DW_HOSTSW_MODE	BIT(27)
++#define GNR_CFG_DW_RX_MASK	GENMASK(23, 22)
++#define GNR_CFG_DW_INTSEL_MASK	GENMASK(21, 14)
+ #define GNR_CFG_DW_RX_DISABLE	FIELD_PREP(GNR_CFG_DW_RX_MASK, 2)
+ #define GNR_CFG_DW_RX_EDGE	FIELD_PREP(GNR_CFG_DW_RX_MASK, 1)
+ #define GNR_CFG_DW_RX_LEVEL	FIELD_PREP(GNR_CFG_DW_RX_MASK, 0)
+@@ -50,6 +52,7 @@
+  * struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state
+  * @gc: GPIO controller interface
+  * @reg_base: base address of the GPIO registers
++ * @pad_base: base address of the vGPIO pad configuration registers
+  * @ro_bitmap: bitmap of read-only pins
+  * @lock: guard the registers
+  * @pad_backup: backup of the register state for suspend
+@@ -57,6 +60,7 @@
+ struct gnr_gpio {
+ 	struct gpio_chip gc;
+ 	void __iomem *reg_base;
++	void __iomem *pad_base;
+ 	DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS);
+ 	raw_spinlock_t lock;
+ 	u32 pad_backup[];
+@@ -65,7 +69,7 @@ struct gnr_gpio {
+ static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv,
+ 					      unsigned int gpio)
+ {
+-	return priv->reg_base + gpio * sizeof(u32);
++	return priv->pad_base + gpio * sizeof(u32);
+ }
+ 
+ static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
+@@ -88,6 +92,20 @@ static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
+ 	return 0;
+ }
+ 
++static int gnr_gpio_request(struct gpio_chip *gc, unsigned int gpio)
++{
++	struct gnr_gpio *priv = gpiochip_get_data(gc);
++	u32 dw;
++
++	dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
++	if (!(dw & GNR_CFG_DW_HOSTSW_MODE)) {
++		dev_warn(gc->parent, "GPIO %u is not owned by host", gpio);
++		return -EBUSY;
++	}
++
++	return 0;
++}
++
+ static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+ {
+ 	const struct gnr_gpio *priv = gpiochip_get_data(gc);
+@@ -139,6 +157,7 @@ static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, in
+ 
+ static const struct gpio_chip gnr_gpio_chip = {
+ 	.owner		  = THIS_MODULE,
++	.request	  = gnr_gpio_request,
+ 	.get		  = gnr_gpio_get,
+ 	.set		  = gnr_gpio_set,
+ 	.get_direction    = gnr_gpio_get_direction,
+@@ -166,7 +185,7 @@ static void gnr_gpio_irq_ack(struct irq_data *d)
+ 	guard(raw_spinlock_irqsave)(&priv->lock);
+ 
+ 	reg = readl(addr);
+-	reg &= ~BIT(bit_idx);
++	reg |= BIT(bit_idx);
+ 	writel(reg, addr);
+ }
+ 
+@@ -209,10 +228,18 @@ static void gnr_gpio_irq_unmask(struct irq_data *d)
+ static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+-	irq_hw_number_t pin = irqd_to_hwirq(d);
+-	u32 mask = GNR_CFG_DW_RX_MASK;
++	struct gnr_gpio *priv = gpiochip_get_data(gc);
++	irq_hw_number_t hwirq = irqd_to_hwirq(d);
++	u32 reg;
+ 	u32 set;
+ 
++	/* Allow interrupts only if Interrupt Select field is non-zero */
++	reg = readl(gnr_gpio_get_padcfg_addr(priv, hwirq));
++	if (!(reg & GNR_CFG_DW_INTSEL_MASK)) {
++		dev_dbg(gc->parent, "GPIO %lu cannot be used as IRQ", hwirq);
++		return -EPERM;
++	}
++
+ 	/* Falling edge and level low triggers not supported by the GPIO controller */
+ 	switch (type) {
+ 	case IRQ_TYPE_NONE:
+@@ -230,10 +257,11 @@ static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		return -EINVAL;
+ 	}
+ 
+-	return gnr_gpio_configure_line(gc, pin, mask, set);
++	return gnr_gpio_configure_line(gc, hwirq, GNR_CFG_DW_RX_MASK, set);
+ }
+ 
+ static const struct irq_chip gnr_gpio_irq_chip = {
++	.name		= "gpio-graniterapids",
+ 	.irq_ack	= gnr_gpio_irq_ack,
+ 	.irq_mask	= gnr_gpio_irq_mask,
+ 	.irq_unmask	= gnr_gpio_irq_unmask,
+@@ -291,6 +319,7 @@ static int gnr_gpio_probe(struct platform_device *pdev)
+ 	struct gnr_gpio *priv;
+ 	void __iomem *regs;
+ 	int irq, ret;
++	u32 offset;
+ 
+ 	priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL);
+ 	if (!priv)
+@@ -302,6 +331,10 @@ static int gnr_gpio_probe(struct platform_device *pdev)
+ 	if (IS_ERR(regs))
+ 		return PTR_ERR(regs);
+ 
++	priv->reg_base = regs;
++	offset = readl(priv->reg_base + GNR_CFG_PADBAR);
++	priv->pad_base = priv->reg_base + offset;
++
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0)
+ 		return irq;
+@@ -311,8 +344,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "failed to request interrupt\n");
+ 
+-	priv->reg_base = regs + readl(regs + GNR_CFG_BAR);
+-
+ 	gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET,
+ 				  priv->ro_bitmap);
+ 
+@@ -324,7 +355,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
+ 
+ 	girq = &priv->gc.irq;
+ 	gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip);
+-	girq->chip->name	= dev_name(dev);
+ 	girq->parent_handler	= NULL;
+ 	girq->num_parents	= 0;
+ 	girq->parents		= NULL;
+diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
+index dfec9fbfc7a9bd..c2a9b425397441 100644
+--- a/drivers/gpio/gpio-ljca.c
++++ b/drivers/gpio/gpio-ljca.c
+@@ -82,9 +82,9 @@ static int ljca_gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
+ 	int ret;
+ 
+ 	mutex_lock(&ljca_gpio->trans_lock);
++	packet->num = 1;
+ 	packet->item[0].index = gpio_id;
+ 	packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
+-	packet->num = 1;
+ 
+ 	ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet,
+ 			    struct_size(packet, item, packet->num), NULL, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index d891ab779ca7f5..5df21529b3b13e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1801,13 +1801,18 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ 	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
+ 		return -EINVAL;
+ 
++	/* Make sure VRAM is allocated contigiously */
+ 	(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+-	amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+-	for (i = 0; i < (*bo)->placement.num_placement; i++)
+-		(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
+-	r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+-	if (r)
+-		return r;
++	if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
++	    !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
++
++		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
++		for (i = 0; i < (*bo)->placement.num_placement; i++)
++			(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
++		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
++		if (r)
++			return r;
++	}
+ 
+ 	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 31fd30dcd593ba..65bb26215e867a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -551,6 +551,8 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
+ 	for (i = 0; i < abo->placement.num_placement; ++i) {
+ 		abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
+ 		abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
++		if (abo->placements[i].mem_type == TTM_PL_VRAM)
++			abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 6005280f5f38f0..8d2562d0f143c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -674,12 +674,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
+ 		ring->funcs->emit_wreg;
+ 
+-	if (adev->gfx.enable_cleaner_shader &&
+-	    ring->funcs->emit_cleaner_shader &&
+-	    job->enforce_isolation)
+-		ring->funcs->emit_cleaner_shader(ring);
+-
+-	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
++	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
++	    !(job->enforce_isolation && !job->vmid))
+ 		return 0;
+ 
+ 	amdgpu_ring_ib_begin(ring);
+@@ -690,6 +686,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ 	if (need_pipe_sync)
+ 		amdgpu_ring_emit_pipeline_sync(ring);
+ 
++	if (adev->gfx.enable_cleaner_shader &&
++	    ring->funcs->emit_cleaner_shader &&
++	    job->enforce_isolation)
++		ring->funcs->emit_cleaner_shader(ring);
++
+ 	if (vm_flush_needed) {
+ 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
+ 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 6068b784dc6938..9a30b8c10838c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -1289,7 +1289,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ 					   struct amdgpu_job *job,
+ 					   struct amdgpu_ib *ib)
+ {
+-	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
++	struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ 	unsigned i;
+ 
+ 	/* No patching necessary for the first instance */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 8de61cc524c943..d2993594c848ad 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -1422,6 +1422,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
+ 
+ 
+ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
++						   bool cache_line_size_missing,
+ 						   struct kfd_gpu_cache_info *pcache_info)
+ {
+ 	struct amdgpu_device *adev = kdev->adev;
+@@ -1436,6 +1437,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+ 		pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
+ 		pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size;
++		if (cache_line_size_missing && !pcache_info[i].cache_line_size)
++			pcache_info[i].cache_line_size = 128;
+ 		i++;
+ 	}
+ 	/* Scalar L1 Instruction Cache per SQC */
+@@ -1448,6 +1451,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+ 		pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
+ 		pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size;
++		if (cache_line_size_missing && !pcache_info[i].cache_line_size)
++			pcache_info[i].cache_line_size = 128;
+ 		i++;
+ 	}
+ 	/* Scalar L1 Data Cache per SQC */
+@@ -1459,6 +1464,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+ 		pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
+ 		pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size;
++		if (cache_line_size_missing && !pcache_info[i].cache_line_size)
++			pcache_info[i].cache_line_size = 64;
+ 		i++;
+ 	}
+ 	/* GL1 Data Cache per SA */
+@@ -1471,7 +1478,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ 					CRAT_CACHE_FLAGS_DATA_CACHE |
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+ 		pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+-		pcache_info[i].cache_line_size = 0;
++		if (cache_line_size_missing)
++			pcache_info[i].cache_line_size = 128;
+ 		i++;
+ 	}
+ 	/* L2 Data Cache per GPU (Total Tex Cache) */
+@@ -1483,6 +1491,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+ 		pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+ 		pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size;
++		if (cache_line_size_missing && !pcache_info[i].cache_line_size)
++			pcache_info[i].cache_line_size = 128;
+ 		i++;
+ 	}
+ 	/* L3 Data Cache per GPU */
+@@ -1493,7 +1503,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ 					CRAT_CACHE_FLAGS_DATA_CACHE |
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+ 		pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+-		pcache_info[i].cache_line_size = 0;
++		pcache_info[i].cache_line_size = 64;
+ 		i++;
+ 	}
+ 	return i;
+@@ -1568,6 +1578,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
+ {
+ 	int num_of_cache_types = 0;
++	bool cache_line_size_missing = false;
+ 
+ 	switch (kdev->adev->asic_type) {
+ 	case CHIP_KAVERI:
+@@ -1691,10 +1702,17 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
+ 		case IP_VERSION(11, 5, 0):
+ 		case IP_VERSION(11, 5, 1):
+ 		case IP_VERSION(11, 5, 2):
++			/* Cacheline size not available in IP discovery for gc11.
++			 * kfd_fill_gpu_cache_info_from_gfx_config to hard code it
++			 */
++			cache_line_size_missing = true;
++			fallthrough;
+ 		case IP_VERSION(12, 0, 0):
+ 		case IP_VERSION(12, 0, 1):
+ 			num_of_cache_types =
+-				kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
++				kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd,
++									cache_line_size_missing,
++									*pcache_info);
+ 			break;
+ 		default:
+ 			*pcache_info = dummy_cache_info;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 648f40091aa395..f5b3ed20e891b3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -205,6 +205,21 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
+ 	if (!down_read_trylock(&adev->reset_domain->sem))
+ 		return -EIO;
+ 
++	if (!pdd->proc_ctx_cpu_ptr) {
++		r = amdgpu_amdkfd_alloc_gtt_mem(adev,
++				AMDGPU_MES_PROC_CTX_SIZE,
++				&pdd->proc_ctx_bo,
++				&pdd->proc_ctx_gpu_addr,
++				&pdd->proc_ctx_cpu_ptr,
++				false);
++		if (r) {
++			dev_err(adev->dev,
++				"failed to allocate process context bo\n");
++			return r;
++		}
++		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
++	}
++
+ 	memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
+ 	queue_input.process_id = qpd->pqm->process->pasid;
+ 	queue_input.page_table_base_addr =  qpd->page_table_base;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index ff34bb1ac9db79..3139987b82b100 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -1076,7 +1076,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+ 
+ 		kfd_free_process_doorbells(pdd->dev->kfd, pdd);
+ 
+-		if (pdd->dev->kfd->shared_resources.enable_mes)
++		if (pdd->dev->kfd->shared_resources.enable_mes &&
++			pdd->proc_ctx_cpu_ptr)
+ 			amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
+ 						   &pdd->proc_ctx_bo);
+ 		/*
+@@ -1610,7 +1611,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
+ 							struct kfd_process *p)
+ {
+ 	struct kfd_process_device *pdd = NULL;
+-	int retval = 0;
+ 
+ 	if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
+ 		return NULL;
+@@ -1634,21 +1634,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
+ 	pdd->user_gpu_id = dev->id;
+ 	atomic64_set(&pdd->evict_duration_counter, 0);
+ 
+-	if (dev->kfd->shared_resources.enable_mes) {
+-		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
+-						AMDGPU_MES_PROC_CTX_SIZE,
+-						&pdd->proc_ctx_bo,
+-						&pdd->proc_ctx_gpu_addr,
+-						&pdd->proc_ctx_cpu_ptr,
+-						false);
+-		if (retval) {
+-			dev_err(dev->adev->dev,
+-				"failed to allocate process context bo\n");
+-			goto err_free_pdd;
+-		}
+-		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+-	}
+-
+ 	p->pdds[p->n_pdds++] = pdd;
+ 	if (kfd_dbg_is_per_vmid_supported(pdd->dev))
+ 		pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
+@@ -1660,10 +1645,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
+ 	idr_init(&pdd->alloc_idr);
+ 
+ 	return pdd;
+-
+-err_free_pdd:
+-	kfree(pdd);
+-	return NULL;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 01b960b152743d..ead4317a21680b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -212,13 +212,17 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
+ void pqm_uninit(struct process_queue_manager *pqm)
+ {
+ 	struct process_queue_node *pqn, *next;
+-	struct kfd_process_device *pdd;
+ 
+ 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
+ 		if (pqn->q) {
+-			pdd = kfd_get_process_device_data(pqn->q->device, pqm->process);
+-			kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
+-			kfd_queue_release_buffers(pdd, &pqn->q->properties);
++			struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
++										     pqm->process);
++			if (pdd) {
++				kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
++				kfd_queue_release_buffers(pdd, &pqn->q->properties);
++			} else {
++				WARN_ON(!pdd);
++			}
+ 			pqm_clean_queue_resource(pqm, pqn);
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index d0e6d051e9cf9f..1aedfafa507f7e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -2717,4 +2717,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
+ 	smu->workload_map = smu_v13_0_7_workload_map;
+ 	smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
+ 	smu_v13_0_set_smu_mailbox_registers(smu);
++	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ }
+diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
+index 1ef56cb07dfbd2..447740d79d3d2e 100644
+--- a/drivers/gpu/drm/drm_panic_qr.rs
++++ b/drivers/gpu/drm/drm_panic_qr.rs
+@@ -929,7 +929,6 @@ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
+ /// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
+ ///
+ /// They must remain valid for the duration of the function call.
+-
+ #[no_mangle]
+ pub unsafe extern "C" fn drm_panic_qr_generate(
+     url: *const i8,
+diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
+index 5d701f48351b96..ec55cb651d4498 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -1333,19 +1333,29 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
+ 	lut = blob->data;
+ 
+ 	/*
+-	 * DSB fails to correctly load the legacy LUT
+-	 * unless we either write each entry twice,
+-	 * or use non-posted writes
++	 * DSB fails to correctly load the legacy LUT unless
++	 * we either write each entry twice when using posted
++	 * writes, or we use non-posted writes.
++	 *
++	 * If palette anti-collision is active during LUT
++	 * register writes:
++	 * - posted writes simply get dropped and thus the LUT
++	 *   contents may not be correctly updated
++	 * - non-posted writes are blocked and thus the LUT
++	 *   contents are always correct, but simultaneous CPU
++	 *   MMIO access will start to fail
++	 *
++	 * Choose the lesser of two evils and use posted writes.
++	 * Using posted writes is also faster, even when having
++	 * to write each register twice.
+ 	 */
+-	if (crtc_state->dsb_color_vblank)
+-		intel_dsb_nonpost_start(crtc_state->dsb_color_vblank);
+-
+-	for (i = 0; i < 256; i++)
++	for (i = 0; i < 256; i++) {
+ 		ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
+ 			      i9xx_lut_8(&lut[i]));
+-
+-	if (crtc_state->dsb_color_vblank)
+-		intel_dsb_nonpost_end(crtc_state->dsb_color_vblank);
++		if (crtc_state->dsb_color_vblank)
++			ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
++				      i9xx_lut_8(&lut[i]));
++	}
+ }
+ 
+ static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
+index 6469b9bcf2ec44..082ac72c757a9f 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.c
++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
+@@ -1652,9 +1652,21 @@ capture_engine(struct intel_engine_cs *engine,
+ 		return NULL;
+ 
+ 	intel_engine_get_hung_entity(engine, &ce, &rq);
+-	if (rq && !i915_request_started(rq))
+-		drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
+-			 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
++	if (rq && !i915_request_started(rq)) {
++		/*
++		 * We want to know also what is the guc_id of the context,
++		 * but if we don't have the context reference, then skip
++		 * printing it.
++		 */
++		if (ce)
++			drm_info(&engine->gt->i915->drm,
++				 "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
++				 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
++		else
++			drm_info(&engine->gt->i915->drm,
++				 "Got hung context on %s with active request %lld:%lld not yet started\n",
++				 engine->name, rq->fence.context, rq->fence.seqno);
++	}
+ 
+ 	if (rq) {
+ 		capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
+diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
+index 762127dd56c538..70a854557e6ec5 100644
+--- a/drivers/gpu/drm/i915/i915_scheduler.c
++++ b/drivers/gpu/drm/i915/i915_scheduler.c
+@@ -506,6 +506,6 @@ int __init i915_scheduler_module_init(void)
+ 	return 0;
+ 
+ err_priorities:
+-	kmem_cache_destroy(slab_priorities);
++	kmem_cache_destroy(slab_dependencies);
+ 	return -ENOMEM;
+ }
+diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
+index 1a192a2a941b69..3bbdb362d6f0dc 100644
+--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
++++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
+@@ -224,8 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
+ 				    XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ 				    XE_BO_FLAG_PINNED);
+ 	if (IS_ERR(tiny)) {
+-		KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
+-			   PTR_ERR(pt));
++		KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
++			   PTR_ERR(tiny));
+ 		goto free_pt;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+index 9d82ea30f4df23..7e385940df0863 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+@@ -65,6 +65,14 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
+ 	__invalidation_fence_signal(xe, fence);
+ }
+ 
++void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
++{
++	if (WARN_ON_ONCE(!fence->gt))
++		return;
++
++	__invalidation_fence_signal(gt_to_xe(fence->gt), fence);
++}
++
+ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
+ {
+ 	struct xe_gt *gt = container_of(work, struct xe_gt,
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+index f430d5797af701..00b1c6c01e8d95 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+@@ -28,6 +28,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ 				       struct xe_gt_tlb_invalidation_fence *fence,
+ 				       bool stack);
++void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
+ 
+ static inline void
+ xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
+diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
+index f27f579f4d85aa..797576690356f2 100644
+--- a/drivers/gpu/drm/xe/xe_pt.c
++++ b/drivers/gpu/drm/xe/xe_pt.c
+@@ -1333,8 +1333,7 @@ static void invalidation_fence_cb(struct dma_fence *fence,
+ 		queue_work(system_wq, &ifence->work);
+ 	} else {
+ 		ifence->base.base.error = ifence->fence->error;
+-		dma_fence_signal(&ifence->base.base);
+-		dma_fence_put(&ifence->base.base);
++		xe_gt_tlb_invalidation_fence_signal(&ifence->base);
+ 	}
+ 	dma_fence_put(ifence->fence);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
+index 440ac572f6e5ef..52969c0909659d 100644
+--- a/drivers/gpu/drm/xe/xe_reg_sr.c
++++ b/drivers/gpu/drm/xe/xe_reg_sr.c
+@@ -26,46 +26,27 @@
+ #include "xe_reg_whitelist.h"
+ #include "xe_rtp_types.h"
+ 
+-#define XE_REG_SR_GROW_STEP_DEFAULT	16
+-
+ static void reg_sr_fini(struct drm_device *drm, void *arg)
+ {
+ 	struct xe_reg_sr *sr = arg;
++	struct xe_reg_sr_entry *entry;
++	unsigned long reg;
++
++	xa_for_each(&sr->xa, reg, entry)
++		kfree(entry);
+ 
+ 	xa_destroy(&sr->xa);
+-	kfree(sr->pool.arr);
+-	memset(&sr->pool, 0, sizeof(sr->pool));
+ }
+ 
+ int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
+ {
+ 	xa_init(&sr->xa);
+-	memset(&sr->pool, 0, sizeof(sr->pool));
+-	sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT;
+ 	sr->name = name;
+ 
+ 	return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
+ }
+ EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
+ 
+-static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr)
+-{
+-	if (sr->pool.used == sr->pool.allocated) {
+-		struct xe_reg_sr_entry *arr;
+-
+-		arr = krealloc_array(sr->pool.arr,
+-				     ALIGN(sr->pool.allocated + 1, sr->pool.grow_step),
+-				     sizeof(*arr), GFP_KERNEL);
+-		if (!arr)
+-			return NULL;
+-
+-		sr->pool.arr = arr;
+-		sr->pool.allocated += sr->pool.grow_step;
+-	}
+-
+-	return &sr->pool.arr[sr->pool.used++];
+-}
+-
+ static bool compatible_entries(const struct xe_reg_sr_entry *e1,
+ 			       const struct xe_reg_sr_entry *e2)
+ {
+@@ -111,7 +92,7 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
+ 		return 0;
+ 	}
+ 
+-	pentry = alloc_entry(sr);
++	pentry = kmalloc(sizeof(*pentry), GFP_KERNEL);
+ 	if (!pentry) {
+ 		ret = -ENOMEM;
+ 		goto fail;
+diff --git a/drivers/gpu/drm/xe/xe_reg_sr_types.h b/drivers/gpu/drm/xe/xe_reg_sr_types.h
+index ad48a52b824a18..ebe11f237fa26d 100644
+--- a/drivers/gpu/drm/xe/xe_reg_sr_types.h
++++ b/drivers/gpu/drm/xe/xe_reg_sr_types.h
+@@ -20,12 +20,6 @@ struct xe_reg_sr_entry {
+ };
+ 
+ struct xe_reg_sr {
+-	struct {
+-		struct xe_reg_sr_entry *arr;
+-		unsigned int used;
+-		unsigned int allocated;
+-		unsigned int grow_step;
+-	} pool;
+ 	struct xarray xa;
+ 	const char *name;
+ 
+diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+index c8ec74f089f3d6..6e41ddaa24d636 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
++++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+@@ -339,7 +339,7 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
+ 	 * one CPU at a time can enter the process, while the others
+ 	 * will be spinning at the same lock.
+ 	 */
+-	lidx = smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
++	lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
+ 	vcmdq = vintf->lvcmdqs[lidx];
+ 	if (!vcmdq || !READ_ONCE(vcmdq->enabled))
+ 		return NULL;
+diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
+index e5b89f728ad3b2..09694cca8752df 100644
+--- a/drivers/iommu/intel/cache.c
++++ b/drivers/iommu/intel/cache.c
+@@ -105,12 +105,35 @@ static void cache_tag_unassign(struct dmar_domain *domain, u16 did,
+ 	spin_unlock_irqrestore(&domain->cache_lock, flags);
+ }
+ 
++/* domain->qi_batch will be freed in iommu_free_domain() path. */
++static int domain_qi_batch_alloc(struct dmar_domain *domain)
++{
++	unsigned long flags;
++	int ret = 0;
++
++	spin_lock_irqsave(&domain->cache_lock, flags);
++	if (domain->qi_batch)
++		goto out_unlock;
++
++	domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_ATOMIC);
++	if (!domain->qi_batch)
++		ret = -ENOMEM;
++out_unlock:
++	spin_unlock_irqrestore(&domain->cache_lock, flags);
++
++	return ret;
++}
++
+ static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did,
+ 				     struct device *dev, ioasid_t pasid)
+ {
+ 	struct device_domain_info *info = dev_iommu_priv_get(dev);
+ 	int ret;
+ 
++	ret = domain_qi_batch_alloc(domain);
++	if (ret)
++		return ret;
++
+ 	ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
+ 	if (ret || !info->ats_enabled)
+ 		return ret;
+@@ -139,6 +162,10 @@ static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did,
+ 	struct device_domain_info *info = dev_iommu_priv_get(dev);
+ 	int ret;
+ 
++	ret = domain_qi_batch_alloc(domain);
++	if (ret)
++		return ret;
++
+ 	ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
+ 	if (ret || !info->ats_enabled)
+ 		return ret;
+@@ -190,13 +217,6 @@ int cache_tag_assign_domain(struct dmar_domain *domain,
+ 	u16 did = domain_get_id_for_dev(domain, dev);
+ 	int ret;
+ 
+-	/* domain->qi_bach will be freed in iommu_free_domain() path. */
+-	if (!domain->qi_batch) {
+-		domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_KERNEL);
+-		if (!domain->qi_batch)
+-			return -ENOMEM;
+-	}
+-
+ 	ret = __cache_tag_assign_domain(domain, did, dev, pasid);
+ 	if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED)
+ 		return ret;
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index a167d59101ae2e..cc23cfcdeb2d59 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3372,6 +3372,9 @@ void device_block_translation(struct device *dev)
+ 	struct intel_iommu *iommu = info->iommu;
+ 	unsigned long flags;
+ 
++	if (info->domain)
++		cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
++
+ 	iommu_disable_pci_caps(info);
+ 	if (!dev_is_real_dma_subdevice(dev)) {
+ 		if (sm_supported(iommu))
+@@ -3388,7 +3391,6 @@ void device_block_translation(struct device *dev)
+ 	list_del(&info->link);
+ 	spin_unlock_irqrestore(&info->domain->lock, flags);
+ 
+-	cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
+ 	domain_detach_iommu(info->domain, iommu);
+ 	info->domain = NULL;
+ }
+diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
+index d58db9a27e6cfd..76e2c686854871 100644
+--- a/drivers/md/dm-zoned-reclaim.c
++++ b/drivers/md/dm-zoned-reclaim.c
+@@ -76,9 +76,9 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
+ 	 * pointer and the requested position.
+ 	 */
+ 	nr_blocks = block - wp_block;
+-	ret = blkdev_issue_zeroout(dev->bdev,
+-				   dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
+-				   dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
++	ret = blk_zone_issue_zeroout(dev->bdev,
++			dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
++			dmz_blk2sect(nr_blocks), GFP_NOIO);
+ 	if (ret) {
+ 		dmz_dev_err(dev,
+ 			    "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 15e0f14d0d49de..4d73abae503d1e 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1520,9 +1520,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
+ 	struct slave *slave;
+ 
+ 	mask = features;
+-
+-	features &= ~NETIF_F_ONE_FOR_ALL;
+-	features |= NETIF_F_ALL_FOR_ALL;
++	features = netdev_base_features(features);
+ 
+ 	bond_for_each_slave(bond, slave, iter) {
+ 		features = netdev_increment_features(features,
+@@ -1536,6 +1534,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
+ 
+ #define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
+ 				 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
++				 NETIF_F_GSO_ENCAP_ALL | \
+ 				 NETIF_F_HIGHDMA | NETIF_F_LRO)
+ 
+ #define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
+@@ -1564,8 +1563,9 @@ static void bond_compute_features(struct bonding *bond)
+ 
+ 	if (!bond_has_slaves(bond))
+ 		goto done;
+-	vlan_features &= NETIF_F_ALL_FOR_ALL;
+-	mpls_features &= NETIF_F_ALL_FOR_ALL;
++
++	vlan_features = netdev_base_features(vlan_features);
++	mpls_features = netdev_base_features(mpls_features);
+ 
+ 	bond_for_each_slave(bond, slave, iter) {
+ 		vlan_features = netdev_increment_features(vlan_features,
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 5290f5ad98f392..bf26cd0abf6dd9 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -1098,10 +1098,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
+ 	regmap_reg_range(0x1030, 0x1030),
+ 	regmap_reg_range(0x1100, 0x1115),
+ 	regmap_reg_range(0x111a, 0x111f),
+-	regmap_reg_range(0x1122, 0x1127),
+-	regmap_reg_range(0x112a, 0x112b),
+-	regmap_reg_range(0x1136, 0x1139),
+-	regmap_reg_range(0x113e, 0x113f),
++	regmap_reg_range(0x1120, 0x112b),
++	regmap_reg_range(0x1134, 0x113b),
++	regmap_reg_range(0x113c, 0x113f),
+ 	regmap_reg_range(0x1400, 0x1401),
+ 	regmap_reg_range(0x1403, 0x1403),
+ 	regmap_reg_range(0x1410, 0x1417),
+@@ -1128,10 +1127,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
+ 	regmap_reg_range(0x2030, 0x2030),
+ 	regmap_reg_range(0x2100, 0x2115),
+ 	regmap_reg_range(0x211a, 0x211f),
+-	regmap_reg_range(0x2122, 0x2127),
+-	regmap_reg_range(0x212a, 0x212b),
+-	regmap_reg_range(0x2136, 0x2139),
+-	regmap_reg_range(0x213e, 0x213f),
++	regmap_reg_range(0x2120, 0x212b),
++	regmap_reg_range(0x2134, 0x213b),
++	regmap_reg_range(0x213c, 0x213f),
+ 	regmap_reg_range(0x2400, 0x2401),
+ 	regmap_reg_range(0x2403, 0x2403),
+ 	regmap_reg_range(0x2410, 0x2417),
+@@ -1158,10 +1156,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
+ 	regmap_reg_range(0x3030, 0x3030),
+ 	regmap_reg_range(0x3100, 0x3115),
+ 	regmap_reg_range(0x311a, 0x311f),
+-	regmap_reg_range(0x3122, 0x3127),
+-	regmap_reg_range(0x312a, 0x312b),
+-	regmap_reg_range(0x3136, 0x3139),
+-	regmap_reg_range(0x313e, 0x313f),
++	regmap_reg_range(0x3120, 0x312b),
++	regmap_reg_range(0x3134, 0x313b),
++	regmap_reg_range(0x313c, 0x313f),
+ 	regmap_reg_range(0x3400, 0x3401),
+ 	regmap_reg_range(0x3403, 0x3403),
+ 	regmap_reg_range(0x3410, 0x3417),
+@@ -1188,10 +1185,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
+ 	regmap_reg_range(0x4030, 0x4030),
+ 	regmap_reg_range(0x4100, 0x4115),
+ 	regmap_reg_range(0x411a, 0x411f),
+-	regmap_reg_range(0x4122, 0x4127),
+-	regmap_reg_range(0x412a, 0x412b),
+-	regmap_reg_range(0x4136, 0x4139),
+-	regmap_reg_range(0x413e, 0x413f),
++	regmap_reg_range(0x4120, 0x412b),
++	regmap_reg_range(0x4134, 0x413b),
++	regmap_reg_range(0x413c, 0x413f),
+ 	regmap_reg_range(0x4400, 0x4401),
+ 	regmap_reg_range(0x4403, 0x4403),
+ 	regmap_reg_range(0x4410, 0x4417),
+@@ -1218,10 +1214,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
+ 	regmap_reg_range(0x5030, 0x5030),
+ 	regmap_reg_range(0x5100, 0x5115),
+ 	regmap_reg_range(0x511a, 0x511f),
+-	regmap_reg_range(0x5122, 0x5127),
+-	regmap_reg_range(0x512a, 0x512b),
+-	regmap_reg_range(0x5136, 0x5139),
+-	regmap_reg_range(0x513e, 0x513f),
++	regmap_reg_range(0x5120, 0x512b),
++	regmap_reg_range(0x5134, 0x513b),
++	regmap_reg_range(0x513c, 0x513f),
+ 	regmap_reg_range(0x5400, 0x5401),
+ 	regmap_reg_range(0x5403, 0x5403),
+ 	regmap_reg_range(0x5410, 0x5417),
+@@ -1248,10 +1243,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
+ 	regmap_reg_range(0x6030, 0x6030),
+ 	regmap_reg_range(0x6100, 0x6115),
+ 	regmap_reg_range(0x611a, 0x611f),
+-	regmap_reg_range(0x6122, 0x6127),
+-	regmap_reg_range(0x612a, 0x612b),
+-	regmap_reg_range(0x6136, 0x6139),
+-	regmap_reg_range(0x613e, 0x613f),
++	regmap_reg_range(0x6120, 0x612b),
++	regmap_reg_range(0x6134, 0x613b),
++	regmap_reg_range(0x613c, 0x613f),
+ 	regmap_reg_range(0x6300, 0x6301),
+ 	regmap_reg_range(0x6400, 0x6401),
+ 	regmap_reg_range(0x6403, 0x6403),
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 0102a82e88cc61..940f1b71226d64 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -24,7 +24,7 @@
+ #define VSC9959_NUM_PORTS		6
+ 
+ #define VSC9959_TAS_GCL_ENTRY_MAX	63
+-#define VSC9959_TAS_MIN_GATE_LEN_NS	33
++#define VSC9959_TAS_MIN_GATE_LEN_NS	35
+ #define VSC9959_VCAP_POLICER_BASE	63
+ #define VSC9959_VCAP_POLICER_MAX	383
+ #define VSC9959_SWITCH_PCI_BAR		4
+@@ -1056,11 +1056,15 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+ 	mdiobus_free(felix->imdio);
+ }
+ 
+-/* The switch considers any frame (regardless of size) as eligible for
+- * transmission if the traffic class gate is open for at least 33 ns.
++/* The switch considers any frame (regardless of size) as eligible
++ * for transmission if the traffic class gate is open for at least
++ * VSC9959_TAS_MIN_GATE_LEN_NS.
++ *
+  * Overruns are prevented by cropping an interval at the end of the gate time
+- * slot for which egress scheduling is blocked, but we need to still keep 33 ns
+- * available for one packet to be transmitted, otherwise the port tc will hang.
++ * slot for which egress scheduling is blocked, but we need to still keep
++ * VSC9959_TAS_MIN_GATE_LEN_NS available for one packet to be transmitted,
++ * otherwise the port tc will hang.
++ *
+  * This function returns the size of a gate interval that remains available for
+  * setting the guard band, after reserving the space for one egress frame.
+  */
+@@ -1303,7 +1307,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+ 			 * per-tc static guard band lengths, so it reduces the
+ 			 * useful gate interval length. Therefore, be careful
+ 			 * to calculate a guard band (and therefore max_sdu)
+-			 * that still leaves 33 ns available in the time slot.
++			 * that still leaves VSC9959_TAS_MIN_GATE_LEN_NS
++			 * available in the time slot.
+ 			 */
+ 			max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
+ 			/* A TC gate may be completely closed, which is a
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 3d9ee91e1f8be0..dafc5a4039cd2c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1518,7 +1518,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ 		if (TPA_START_IS_IPV6(tpa_start1))
+ 			tpa_info->gso_type = SKB_GSO_TCPV6;
+ 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+-		else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
++		else if (!BNXT_CHIP_P4_PLUS(bp) &&
+ 			 TPA_START_HASH_TYPE(tpa_start) == 3)
+ 			tpa_info->gso_type = SKB_GSO_TCPV6;
+ 		tpa_info->rss_hash =
+@@ -2212,15 +2212,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 		if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ 			type = bnxt_rss_ext_op(bp, rxcmp);
+ 		} else {
+-			u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
++			u32 itypes = RX_CMP_ITYPES(rxcmp);
+ 
+-			/* RSS profiles 1 and 3 with extract code 0 for inner
+-			 * 4-tuple
+-			 */
+-			if (hash_type != 1 && hash_type != 3)
+-				type = PKT_HASH_TYPE_L3;
+-			else
++			if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
++			    itypes == RX_CMP_FLAGS_ITYPE_UDP)
+ 				type = PKT_HASH_TYPE_L4;
++			else
++				type = PKT_HASH_TYPE_L3;
+ 		}
+ 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 69231e85140b2e..9e05704d94450e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -267,6 +267,9 @@ struct rx_cmp {
+ 	(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+ 	  RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+ 
++#define RX_CMP_ITYPES(rxcmp)					\
++	(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
++
+ #define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp)				\
+ 	((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
+ 	 RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
+@@ -378,7 +381,7 @@ struct rx_agg_cmp {
+ 	u32 rx_agg_cmp_opaque;
+ 	__le32 rx_agg_cmp_v;
+ 	#define RX_AGG_CMP_V					(1 << 0)
+-	#define RX_AGG_CMP_AGG_ID				(0xffff << 16)
++	#define RX_AGG_CMP_AGG_ID				(0x0fff << 16)
+ 	 #define RX_AGG_CMP_AGG_ID_SHIFT			 16
+ 	__le32 rx_agg_cmp_unused;
+ };
+@@ -416,7 +419,7 @@ struct rx_tpa_start_cmp {
+ 	 #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT	 7
+ 	#define RX_TPA_START_CMP_AGG_ID				(0x7f << 25)
+ 	 #define RX_TPA_START_CMP_AGG_ID_SHIFT			 25
+-	#define RX_TPA_START_CMP_AGG_ID_P5			(0xffff << 16)
++	#define RX_TPA_START_CMP_AGG_ID_P5			(0x0fff << 16)
+ 	 #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5		 16
+ 	#define RX_TPA_START_CMP_METADATA1			(0xf << 28)
+ 	 #define RX_TPA_START_CMP_METADATA1_SHIFT		 28
+@@ -540,7 +543,7 @@ struct rx_tpa_end_cmp {
+ 	 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT		 16
+ 	#define RX_TPA_END_CMP_AGG_ID				(0x7f << 25)
+ 	 #define RX_TPA_END_CMP_AGG_ID_SHIFT			 25
+-	#define RX_TPA_END_CMP_AGG_ID_P5			(0xffff << 16)
++	#define RX_TPA_END_CMP_AGG_ID_P5			(0x0fff << 16)
+ 	 #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5			 16
+ 
+ 	__le32 rx_tpa_end_cmp_tsdelta;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index bbf7641a0fc799..7e13cd69f68a1f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -2077,7 +2077,7 @@ void t4_idma_monitor(struct adapter *adapter,
+ 		     struct sge_idma_monitor_state *idma,
+ 		     int hz, int ticks);
+ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+-		      unsigned int naddr, u8 *addr);
++		      u8 start, unsigned int naddr, u8 *addr);
+ void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
+ 		    u32 start_index, bool sleep_ok);
+ void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 2418645c882373..fb3933fbb8425e 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -3246,7 +3246,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+ 
+ 	dev_info(pi->adapter->pdev_dev,
+ 		 "Setting MAC %pM on VF %d\n", mac, vf);
+-	ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
++	ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac);
+ 	if (!ret)
+ 		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
+ 	return ret;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 76de55306c4d01..175bf9b1305888 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -10215,11 +10215,12 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+  *	t4_set_vf_mac_acl - Set MAC address for the specified VF
+  *	@adapter: The adapter
+  *	@vf: one of the VFs instantiated by the specified PF
++ *	@start: The start port id associated with specified VF
+  *	@naddr: the number of MAC addresses
+  *	@addr: the MAC address(es) to be set to the specified VF
+  */
+ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+-		      unsigned int naddr, u8 *addr)
++		      u8 start, unsigned int naddr, u8 *addr)
+ {
+ 	struct fw_acl_mac_cmd cmd;
+ 
+@@ -10234,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+ 	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+ 	cmd.nmac = naddr;
+ 
+-	switch (adapter->pf) {
++	switch (start) {
+ 	case 3:
+ 		memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
+ 		break;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+index 3d74109f82300e..49f22cad92bfd0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+@@ -297,7 +297,9 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+ 	if (ret) {
+ 		mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
+ 		kvfree(vport_caps);
+-		return ERR_PTR(ret);
++		if (ret == -EBUSY)
++			return ERR_PTR(-EBUSY);
++		return NULL;
+ 	}
+ 
+ 	return vport_caps;
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+index b64c814eac11e8..0c4c75b3682faa 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+@@ -693,12 +693,11 @@ static int sparx5_start(struct sparx5 *sparx5)
+ 	err = -ENXIO;
+ 	if (sparx5->fdma_irq >= 0) {
+ 		if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
+-			err = devm_request_threaded_irq(sparx5->dev,
+-							sparx5->fdma_irq,
+-							NULL,
+-							sparx5_fdma_handler,
+-							IRQF_ONESHOT,
+-							"sparx5-fdma", sparx5);
++			err = devm_request_irq(sparx5->dev,
++					       sparx5->fdma_irq,
++					       sparx5_fdma_handler,
++					       0,
++					       "sparx5-fdma", sparx5);
+ 		if (!err)
+ 			err = sparx5_fdma_start(sparx5);
+ 		if (err)
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+index 062e486c002cf6..672508efce5c29 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+@@ -1119,7 +1119,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
+ 	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ 		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
+ 		      devinst,
+-		      DEV10G_MAC_ENA_CFG(0));
++		      DEV10G_MAC_MAXLEN_CFG(0));
+ 
+ 	/* Handle Signal Detect in 10G PCS */
+ 	spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index ca4ed58f1206dd..0c2ba2fa88c466 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -1315,7 +1315,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
+ 				   GFP_KERNEL);
+ 	if (!gc->irq_contexts) {
+ 		err = -ENOMEM;
+-		goto free_irq_vector;
++		goto free_irq_array;
+ 	}
+ 
+ 	for (i = 0; i < nvec; i++) {
+@@ -1372,6 +1372,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
+ 	gc->max_num_msix = nvec;
+ 	gc->num_msix_usable = nvec;
+ 	cpus_read_unlock();
++	kfree(irqs);
+ 	return 0;
+ 
+ free_irq:
+@@ -1384,8 +1385,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
+ 	}
+ 
+ 	kfree(gc->irq_contexts);
+-	kfree(irqs);
+ 	gc->irq_contexts = NULL;
++free_irq_array:
++	kfree(irqs);
+ free_irq_vector:
+ 	cpus_read_unlock();
+ 	pci_free_irq_vectors(pdev);
+diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
+index e172638b060102..808ce8e68d3937 100644
+--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
++++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
+@@ -14,6 +14,8 @@
+ #include <soc/mscc/ocelot.h>
+ #include "ocelot.h"
+ 
++#define OCELOT_PTP_TX_TSTAMP_TIMEOUT		(5 * HZ)
++
+ int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+ {
+ 	struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+@@ -495,6 +497,28 @@ static int ocelot_traps_to_ptp_rx_filter(unsigned int proto)
+ 	return HWTSTAMP_FILTER_NONE;
+ }
+ 
++static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd)
++{
++	switch (tx_type) {
++	case HWTSTAMP_TX_ON:
++		*ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
++		break;
++	case HWTSTAMP_TX_ONESTEP_SYNC:
++		/* IFH_REW_OP_ONE_STEP_PTP updates the correctionField,
++		 * what we need to update is the originTimestamp.
++		 */
++		*ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
++		break;
++	case HWTSTAMP_TX_OFF:
++		*ptp_cmd = 0;
++		break;
++	default:
++		return -ERANGE;
++	}
++
++	return 0;
++}
++
+ int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+ {
+ 	struct ocelot_port *ocelot_port = ocelot->ports[port];
+@@ -521,30 +545,19 @@ EXPORT_SYMBOL(ocelot_hwstamp_get);
+ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+ {
+ 	struct ocelot_port *ocelot_port = ocelot->ports[port];
++	int ptp_cmd, old_ptp_cmd = ocelot_port->ptp_cmd;
+ 	bool l2 = false, l4 = false;
+ 	struct hwtstamp_config cfg;
++	bool old_l2, old_l4;
+ 	int err;
+ 
+ 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ 		return -EFAULT;
+ 
+ 	/* Tx type sanity check */
+-	switch (cfg.tx_type) {
+-	case HWTSTAMP_TX_ON:
+-		ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+-		break;
+-	case HWTSTAMP_TX_ONESTEP_SYNC:
+-		/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
+-		 * need to update the origin time.
+-		 */
+-		ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+-		break;
+-	case HWTSTAMP_TX_OFF:
+-		ocelot_port->ptp_cmd = 0;
+-		break;
+-	default:
+-		return -ERANGE;
+-	}
++	err = ocelot_ptp_tx_type_to_cmd(cfg.tx_type, &ptp_cmd);
++	if (err)
++		return err;
+ 
+ 	switch (cfg.rx_filter) {
+ 	case HWTSTAMP_FILTER_NONE:
+@@ -569,13 +582,27 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+ 		return -ERANGE;
+ 	}
+ 
++	old_l2 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L2;
++	old_l4 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L4;
++
+ 	err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
+ 	if (err)
+ 		return err;
+ 
++	ocelot_port->ptp_cmd = ptp_cmd;
++
+ 	cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+ 
+-	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
++	if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) {
++		err = -EFAULT;
++		goto out_restore_ptp_traps;
++	}
++
++	return 0;
++out_restore_ptp_traps:
++	ocelot_setup_ptp_traps(ocelot, port, old_l2, old_l4);
++	ocelot_port->ptp_cmd = old_ptp_cmd;
++	return err;
+ }
+ EXPORT_SYMBOL(ocelot_hwstamp_set);
+ 
+@@ -603,34 +630,87 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ }
+ EXPORT_SYMBOL(ocelot_get_ts_info);
+ 
+-static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+-					struct sk_buff *clone)
++static struct sk_buff *ocelot_port_dequeue_ptp_tx_skb(struct ocelot *ocelot,
++						      int port, u8 ts_id,
++						      u32 seqid)
+ {
+ 	struct ocelot_port *ocelot_port = ocelot->ports[port];
+-	unsigned long flags;
++	struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
++	struct ptp_header *hdr;
+ 
+-	spin_lock_irqsave(&ocelot->ts_id_lock, flags);
++	spin_lock(&ocelot->ts_id_lock);
+ 
+-	if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+-	    ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+-		spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+-		return -EBUSY;
++	skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
++		if (OCELOT_SKB_CB(skb)->ts_id != ts_id)
++			continue;
++
++		/* Check that the timestamp ID is for the expected PTP
++		 * sequenceId. We don't have to test ptp_parse_header() against
++		 * NULL, because we've pre-validated the packet's ptp_class.
++		 */
++		hdr = ptp_parse_header(skb, OCELOT_SKB_CB(skb)->ptp_class);
++		if (seqid != ntohs(hdr->sequence_id))
++			continue;
++
++		__skb_unlink(skb, &ocelot_port->tx_skbs);
++		ocelot->ptp_skbs_in_flight--;
++		skb_match = skb;
++		break;
+ 	}
+ 
+-	skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+-	/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
+-	OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
++	spin_unlock(&ocelot->ts_id_lock);
+ 
+-	ocelot_port->ts_id++;
+-	if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+-		ocelot_port->ts_id = 0;
++	return skb_match;
++}
++
++static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port,
++					struct sk_buff *clone)
++{
++	struct ocelot_port *ocelot_port = ocelot->ports[port];
++	DECLARE_BITMAP(ts_id_in_flight, OCELOT_MAX_PTP_ID);
++	struct sk_buff *skb, *skb_tmp;
++	unsigned long n;
++
++	spin_lock(&ocelot->ts_id_lock);
++
++	/* To get a better chance of acquiring a timestamp ID, first flush the
++	 * stale packets still waiting in the TX timestamping queue. They are
++	 * probably lost.
++	 */
++	skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
++		if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time +
++				OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) {
++			dev_warn_ratelimited(ocelot->dev,
++					     "port %d invalidating stale timestamp ID %u which seems lost\n",
++					     port, OCELOT_SKB_CB(skb)->ts_id);
++			__skb_unlink(skb, &ocelot_port->tx_skbs);
++			kfree_skb(skb);
++			ocelot->ptp_skbs_in_flight--;
++		} else {
++			__set_bit(OCELOT_SKB_CB(skb)->ts_id, ts_id_in_flight);
++		}
++	}
++
++	if (ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
++		spin_unlock(&ocelot->ts_id_lock);
++		return -EBUSY;
++	}
++
++	n = find_first_zero_bit(ts_id_in_flight, OCELOT_MAX_PTP_ID);
++	if (n == OCELOT_MAX_PTP_ID) {
++		spin_unlock(&ocelot->ts_id_lock);
++		return -EBUSY;
++	}
+ 
+-	ocelot_port->ptp_skbs_in_flight++;
++	/* Found an available timestamp ID, use it */
++	OCELOT_SKB_CB(clone)->ts_id = n;
++	OCELOT_SKB_CB(clone)->ptp_tx_time = jiffies;
+ 	ocelot->ptp_skbs_in_flight++;
++	__skb_queue_tail(&ocelot_port->tx_skbs, clone);
+ 
+-	skb_queue_tail(&ocelot_port->tx_skbs, clone);
++	spin_unlock(&ocelot->ts_id_lock);
+ 
+-	spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
++	dev_dbg_ratelimited(ocelot->dev, "port %d timestamp id %lu\n", port, n);
+ 
+ 	return 0;
+ }
+@@ -687,10 +767,14 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
+ 		if (!(*clone))
+ 			return -ENOMEM;
+ 
+-		err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+-		if (err)
++		/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
++		err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone);
++		if (err) {
++			kfree_skb(*clone);
+ 			return err;
++		}
+ 
++		skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ 		OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ 		OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
+ 	}
+@@ -726,28 +810,15 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
+ 	spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ }
+ 
+-static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+-{
+-	struct ptp_header *hdr;
+-
+-	hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+-	if (WARN_ON(!hdr))
+-		return false;
+-
+-	return seqid == ntohs(hdr->sequence_id);
+-}
+-
+ void ocelot_get_txtstamp(struct ocelot *ocelot)
+ {
+ 	int budget = OCELOT_PTP_QUEUE_SZ;
+ 
+ 	while (budget--) {
+-		struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ 		struct skb_shared_hwtstamps shhwtstamps;
+ 		u32 val, id, seqid, txport;
+-		struct ocelot_port *port;
++		struct sk_buff *skb_match;
+ 		struct timespec64 ts;
+-		unsigned long flags;
+ 
+ 		val = ocelot_read(ocelot, SYS_PTP_STATUS);
+ 
+@@ -762,36 +833,14 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
+ 		txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ 		seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
+ 
+-		port = ocelot->ports[txport];
+-
+-		spin_lock(&ocelot->ts_id_lock);
+-		port->ptp_skbs_in_flight--;
+-		ocelot->ptp_skbs_in_flight--;
+-		spin_unlock(&ocelot->ts_id_lock);
+-
+ 		/* Retrieve its associated skb */
+-try_again:
+-		spin_lock_irqsave(&port->tx_skbs.lock, flags);
+-
+-		skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+-			if (OCELOT_SKB_CB(skb)->ts_id != id)
+-				continue;
+-			__skb_unlink(skb, &port->tx_skbs);
+-			skb_match = skb;
+-			break;
+-		}
+-
+-		spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+-
+-		if (WARN_ON(!skb_match))
+-			continue;
+-
+-		if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+-			dev_err_ratelimited(ocelot->dev,
+-					    "port %d received stale TX timestamp for seqid %d, discarding\n",
+-					    txport, seqid);
+-			dev_kfree_skb_any(skb);
+-			goto try_again;
++		skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id,
++							   seqid);
++		if (!skb_match) {
++			dev_warn_ratelimited(ocelot->dev,
++					     "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
++					     txport, seqid, id);
++			goto next_ts;
+ 		}
+ 
+ 		/* Get the h/w timestamp */
+@@ -802,7 +851,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
+ 		shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
+ 
+-		/* Next ts */
++next_ts:
+ 		ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ 	}
+ }
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 8f7ce6b51a1c9b..6b4b40c6e1fe00 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -53,7 +53,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."
+ 
+ #define QCASPI_PLUGGABLE_MIN 0
+ #define QCASPI_PLUGGABLE_MAX 1
+-static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
++static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
+ module_param(qcaspi_pluggable, int, 0);
+ MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
+ 
+@@ -812,7 +812,6 @@ qcaspi_netdev_init(struct net_device *dev)
+ 
+ 	dev->mtu = QCAFRM_MAX_MTU;
+ 	dev->type = ARPHRD_ETHER;
+-	qca->clkspeed = qcaspi_clkspeed;
+ 	qca->burst_len = qcaspi_burst_len;
+ 	qca->spi_thread = NULL;
+ 	qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
+@@ -903,17 +902,15 @@ qca_spi_probe(struct spi_device *spi)
+ 	legacy_mode = of_property_read_bool(spi->dev.of_node,
+ 					    "qca,legacy-mode");
+ 
+-	if (qcaspi_clkspeed == 0) {
+-		if (spi->max_speed_hz)
+-			qcaspi_clkspeed = spi->max_speed_hz;
+-		else
+-			qcaspi_clkspeed = QCASPI_CLK_SPEED;
+-	}
++	if (qcaspi_clkspeed)
++		spi->max_speed_hz = qcaspi_clkspeed;
++	else if (!spi->max_speed_hz)
++		spi->max_speed_hz = QCASPI_CLK_SPEED;
+ 
+-	if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
+-	    (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
+-		dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+-			qcaspi_clkspeed);
++	if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
++	    spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
++		dev_err(&spi->dev, "Invalid clkspeed: %u\n",
++			spi->max_speed_hz);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -938,14 +935,13 @@ qca_spi_probe(struct spi_device *spi)
+ 		return -EINVAL;
+ 	}
+ 
+-	dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
++	dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
+ 		 QCASPI_DRV_VERSION,
+-		 qcaspi_clkspeed,
++		 spi->max_speed_hz,
+ 		 qcaspi_burst_len,
+ 		 qcaspi_pluggable);
+ 
+ 	spi->mode = SPI_MODE_3;
+-	spi->max_speed_hz = qcaspi_clkspeed;
+ 	if (spi_setup(spi) < 0) {
+ 		dev_err(&spi->dev, "Unable to setup SPI device\n");
+ 		return -EFAULT;
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
+index 8f4808695e8206..0831cefc58b898 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.h
++++ b/drivers/net/ethernet/qualcomm/qca_spi.h
+@@ -89,7 +89,6 @@ struct qcaspi {
+ #endif
+ 
+ 	/* user configurable options */
+-	u32 clkspeed;
+ 	u8 legacy_mode;
+ 	u16 burst_len;
+ };
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index b80aa27a7214d4..09117110e3dd2a 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -862,13 +862,10 @@ static void rswitch_tx_free(struct net_device *ndev)
+ 	struct rswitch_ext_desc *desc;
+ 	struct sk_buff *skb;
+ 
+-	for (; rswitch_get_num_cur_queues(gq) > 0;
+-	     gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
+-		desc = &gq->tx_ring[gq->dirty];
+-		if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
+-			break;
+-
++	desc = &gq->tx_ring[gq->dirty];
++	while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
+ 		dma_rmb();
++
+ 		skb = gq->skbs[gq->dirty];
+ 		if (skb) {
+ 			rdev->ndev->stats.tx_packets++;
+@@ -879,7 +876,10 @@ static void rswitch_tx_free(struct net_device *ndev)
+ 			dev_kfree_skb_any(gq->skbs[gq->dirty]);
+ 			gq->skbs[gq->dirty] = NULL;
+ 		}
++
+ 		desc->desc.die_dt = DT_EEMPTY;
++		gq->dirty = rswitch_next_queue_index(gq, false, 1);
++		desc = &gq->tx_ring[gq->dirty];
+ 	}
+ }
+ 
+@@ -908,8 +908,10 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
+ 
+ 	if (napi_complete_done(napi, budget - quota)) {
+ 		spin_lock_irqsave(&priv->lock, flags);
+-		rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+-		rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
++		if (test_bit(rdev->port, priv->opened_ports)) {
++			rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
++			rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
++		}
+ 		spin_unlock_irqrestore(&priv->lock, flags);
+ 	}
+ 
+@@ -1114,25 +1116,40 @@ static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
+ 
+ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
+ {
+-	u32 val;
++	u32 pis, lsc;
+ 
+ 	rswitch_etha_write_mac_address(etha, mac);
+ 
++	switch (etha->phy_interface) {
++	case PHY_INTERFACE_MODE_SGMII:
++		pis = MPIC_PIS_GMII;
++		break;
++	case PHY_INTERFACE_MODE_USXGMII:
++	case PHY_INTERFACE_MODE_5GBASER:
++		pis = MPIC_PIS_XGMII;
++		break;
++	default:
++		pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
++		break;
++	}
++
+ 	switch (etha->speed) {
+ 	case 100:
+-		val = MPIC_LSC_100M;
++		lsc = MPIC_LSC_100M;
+ 		break;
+ 	case 1000:
+-		val = MPIC_LSC_1G;
++		lsc = MPIC_LSC_1G;
+ 		break;
+ 	case 2500:
+-		val = MPIC_LSC_2_5G;
++		lsc = MPIC_LSC_2_5G;
+ 		break;
+ 	default:
+-		return;
++		lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
++		break;
+ 	}
+ 
+-	iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
++	rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
++		       FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
+ }
+ 
+ static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
+@@ -1538,20 +1555,20 @@ static int rswitch_open(struct net_device *ndev)
+ 	struct rswitch_device *rdev = netdev_priv(ndev);
+ 	unsigned long flags;
+ 
+-	phy_start(ndev->phydev);
++	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
++		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+ 
+ 	napi_enable(&rdev->napi);
+-	netif_start_queue(ndev);
+ 
+ 	spin_lock_irqsave(&rdev->priv->lock, flags);
++	bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
+ 	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
+ 	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
+ 	spin_unlock_irqrestore(&rdev->priv->lock, flags);
+ 
+-	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+-		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
++	phy_start(ndev->phydev);
+ 
+-	bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
++	netif_start_queue(ndev);
+ 
+ 	return 0;
+ };
+@@ -1563,7 +1580,16 @@ static int rswitch_stop(struct net_device *ndev)
+ 	unsigned long flags;
+ 
+ 	netif_tx_stop_all_queues(ndev);
++
++	phy_stop(ndev->phydev);
++
++	spin_lock_irqsave(&rdev->priv->lock, flags);
++	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
++	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ 	bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
++	spin_unlock_irqrestore(&rdev->priv->lock, flags);
++
++	napi_disable(&rdev->napi);
+ 
+ 	if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ 		iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+@@ -1576,14 +1602,6 @@ static int rswitch_stop(struct net_device *ndev)
+ 		kfree(ts_info);
+ 	}
+ 
+-	spin_lock_irqsave(&rdev->priv->lock, flags);
+-	rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
+-	rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+-	spin_unlock_irqrestore(&rdev->priv->lock, flags);
+-
+-	phy_stop(ndev->phydev);
+-	napi_disable(&rdev->napi);
+-
+ 	return 0;
+ };
+ 
+@@ -1681,8 +1699,11 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ 	if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
+ 		goto err_kfree;
+ 
+-	gq->skbs[gq->cur] = skb;
+-	gq->unmap_addrs[gq->cur] = dma_addr_orig;
++	/* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
++	gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
++	gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
++
++	dma_wmb();
+ 
+ 	/* DT_FSTART should be set at last. So, this is reverse order. */
+ 	for (i = nr_desc; i-- > 0; ) {
+@@ -1694,14 +1715,13 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ 			goto err_unmap;
+ 	}
+ 
+-	wmb();	/* gq->cur must be incremented after die_dt was set */
+-
+ 	gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
+ 	rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
+ 
+ 	return ret;
+ 
+ err_unmap:
++	gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
+ 	dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
+ 
+ err_kfree:
+@@ -1889,7 +1909,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
+ 	rdev->np_port = rswitch_get_port_node(rdev);
+ 	rdev->disabled = !rdev->np_port;
+ 	err = of_get_ethdev_address(rdev->np_port, ndev);
+-	of_node_put(rdev->np_port);
+ 	if (err) {
+ 		if (is_valid_ether_addr(rdev->etha->mac_addr))
+ 			eth_hw_addr_set(ndev, rdev->etha->mac_addr);
+@@ -1919,6 +1938,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
+ 
+ out_rxdmac:
+ out_get_params:
++	of_node_put(rdev->np_port);
+ 	netif_napi_del(&rdev->napi);
+ 	free_netdev(ndev);
+ 
+@@ -1932,6 +1952,7 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index
+ 
+ 	rswitch_txdmac_free(ndev);
+ 	rswitch_rxdmac_free(ndev);
++	of_node_put(rdev->np_port);
+ 	netif_napi_del(&rdev->napi);
+ 	free_netdev(ndev);
+ }
+diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
+index 72e3ff596d3183..e020800dcc570e 100644
+--- a/drivers/net/ethernet/renesas/rswitch.h
++++ b/drivers/net/ethernet/renesas/rswitch.h
+@@ -724,13 +724,13 @@ enum rswitch_etha_mode {
+ 
+ #define EAVCC_VEM_SC_TAG	(0x3 << 16)
+ 
+-#define MPIC_PIS_MII		0x00
+-#define MPIC_PIS_GMII		0x02
+-#define MPIC_PIS_XGMII		0x04
+-#define MPIC_LSC_SHIFT		3
+-#define MPIC_LSC_100M		(1 << MPIC_LSC_SHIFT)
+-#define MPIC_LSC_1G		(2 << MPIC_LSC_SHIFT)
+-#define MPIC_LSC_2_5G		(3 << MPIC_LSC_SHIFT)
++#define MPIC_PIS		GENMASK(2, 0)
++#define MPIC_PIS_GMII		2
++#define MPIC_PIS_XGMII		4
++#define MPIC_LSC		GENMASK(5, 3)
++#define MPIC_LSC_100M		1
++#define MPIC_LSC_1G		2
++#define MPIC_LSC_2_5G		3
+ 
+ #define MDIO_READ_C45		0x03
+ #define MDIO_WRITE_C45		0x01
+diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
+index 18191d5a8bd4d3..6ace5a74cddb57 100644
+--- a/drivers/net/team/team_core.c
++++ b/drivers/net/team/team_core.c
+@@ -983,7 +983,8 @@ static void team_port_disable(struct team *team,
+ 
+ #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ 			    NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
+-			    NETIF_F_HIGHDMA | NETIF_F_LRO)
++			    NETIF_F_HIGHDMA | NETIF_F_LRO | \
++			    NETIF_F_GSO_ENCAP_ALL)
+ 
+ #define TEAM_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
+ 				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
+@@ -991,13 +992,14 @@ static void team_port_disable(struct team *team,
+ static void __team_compute_features(struct team *team)
+ {
+ 	struct team_port *port;
+-	netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
+-					  NETIF_F_ALL_FOR_ALL;
++	netdev_features_t vlan_features = TEAM_VLAN_FEATURES;
+ 	netdev_features_t enc_features  = TEAM_ENC_FEATURES;
+ 	unsigned short max_hard_header_len = ETH_HLEN;
+ 	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
+ 					IFF_XMIT_DST_RELEASE_PERM;
+ 
++	vlan_features = netdev_base_features(vlan_features);
++
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(port, &team->port_list, list) {
+ 		vlan_features = netdev_increment_features(vlan_features,
+@@ -2012,8 +2014,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
+ 	netdev_features_t mask;
+ 
+ 	mask = features;
+-	features &= ~NETIF_F_ONE_FOR_ALL;
+-	features |= NETIF_F_ALL_FOR_ALL;
++	features = netdev_base_features(features);
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(port, &team->port_list, list) {
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index c897afef0b414c..60027b439021b8 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -502,6 +502,7 @@ struct virtio_net_common_hdr {
+ };
+ 
+ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
++static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
+ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ 			       struct net_device *dev,
+ 			       unsigned int *xdp_xmit,
+@@ -2898,7 +2899,6 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
+ 	if (err < 0)
+ 		goto err_xdp_reg_mem_model;
+ 
+-	netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index));
+ 	virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
+ 	virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+ 
+@@ -3166,7 +3166,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
+ 
+ 	virtnet_rx_pause(vi, rq);
+ 
+-	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
++	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
+ 	if (err)
+ 		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+ 
+@@ -3229,7 +3229,8 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
+ 
+ 	virtnet_tx_pause(vi, sq);
+ 
+-	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
++	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf,
++			       virtnet_sq_free_unused_buf_done);
+ 	if (err)
+ 		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+ 
+@@ -5997,6 +5998,14 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+ 		xdp_return_frame(ptr_to_xdp(buf));
+ }
+ 
++static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq)
++{
++	struct virtnet_info *vi = vq->vdev->priv;
++	int i = vq2txq(vq);
++
++	netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
++}
++
+ static void free_unused_bufs(struct virtnet_info *vi)
+ {
+ 	void *buf;
+@@ -6728,11 +6737,20 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 
+ static void remove_vq_common(struct virtnet_info *vi)
+ {
++	int i;
++
+ 	virtio_reset_device(vi->vdev);
+ 
+ 	/* Free unused buffers in both send and recv, if any. */
+ 	free_unused_bufs(vi);
+ 
++	/*
++	 * Rule of thumb is netdev_tx_reset_queue() should follow any
++	 * skb freeing not followed by netdev_tx_completed_queue()
++	 */
++	for (i = 0; i < vi->max_queue_pairs; i++)
++		netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
++
+ 	free_receive_bufs(vi);
+ 
+ 	free_receive_page_frags(vi);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+index a7a10e716e6517..e96ddaeeeeff52 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+@@ -1967,7 +1967,7 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ 	if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+ 			    CS_ERR_LONG_DELAY_AFTER_CS |
+ 			    CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+-		ieee80211_channel_switch_disconnect(vif, true);
++		ieee80211_channel_switch_disconnect(vif);
+ 	rcu_read_unlock();
+ }
+ 
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 4265c1cd0ff716..63fe51d0e64db3 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -867,7 +867,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
+ static int xennet_close(struct net_device *dev)
+ {
+ 	struct netfront_info *np = netdev_priv(dev);
+-	unsigned int num_queues = dev->real_num_tx_queues;
++	unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
+ 	unsigned int i;
+ 	struct netfront_queue *queue;
+ 	netif_tx_stop_all_queues(np->netdev);
+@@ -882,6 +882,9 @@ static void xennet_destroy_queues(struct netfront_info *info)
+ {
+ 	unsigned int i;
+ 
++	if (!info->queues)
++		return;
++
+ 	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+ 		struct netfront_queue *queue = &info->queues[i];
+ 
+diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
+index 617c8d6706d3d0..6cea4fe39bcfe4 100644
+--- a/drivers/ptp/ptp_kvm_x86.c
++++ b/drivers/ptp/ptp_kvm_x86.c
+@@ -26,7 +26,7 @@ int kvm_arch_ptp_init(void)
+ 	long ret;
+ 
+ 	if (!kvm_para_available())
+-		return -ENODEV;
++		return -EOPNOTSUPP;
+ 
+ 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ 		p = alloc_page(GFP_KERNEL | __GFP_ZERO);
+@@ -46,14 +46,14 @@ int kvm_arch_ptp_init(void)
+ 
+ 	clock_pair_gpa = slow_virt_to_phys(clock_pair);
+ 	if (!pvclock_get_pvti_cpu0_va()) {
+-		ret = -ENODEV;
++		ret = -EOPNOTSUPP;
+ 		goto err;
+ 	}
+ 
+ 	ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
+ 			     KVM_CLOCK_PAIRING_WALLCLOCK);
+ 	if (ret == -KVM_ENOSYS) {
+-		ret = -ENODEV;
++		ret = -EOPNOTSUPP;
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index a8e91d9d028b89..945d2917b91bac 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -371,8 +371,8 @@
+ 		.ops		= &axp20x_ops,					\
+ 	}
+ 
+-#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg,	\
+-		 _vmask, _ereg, _emask) 					\
++#define AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg,	\
++		 _vmask, _ereg, _emask, _ramp_delay) 				\
+ 	[_family##_##_id] = {							\
+ 		.name		= (_match),					\
+ 		.supply_name	= (_supply),					\
+@@ -388,9 +388,15 @@
+ 		.vsel_mask	= (_vmask),					\
+ 		.enable_reg	= (_ereg),					\
+ 		.enable_mask	= (_emask),					\
++		.ramp_delay = (_ramp_delay),					\
+ 		.ops		= &axp20x_ops,					\
+ 	}
+ 
++#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg,	\
++		 _vmask, _ereg, _emask) 					\
++	AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg,	\
++		 _vmask, _ereg, _emask, 0)
++
+ #define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask)		\
+ 	[_family##_##_id] = {							\
+ 		.name		= (_match),					\
+@@ -419,8 +425,8 @@
+ 		.ops		= &axp20x_ops_fixed				\
+ 	}
+ 
+-#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages,	\
+-			_vreg, _vmask, _ereg, _emask)				\
++#define AXP_DESC_RANGES_DELAY(_family, _id, _match, _supply, _ranges, _n_voltages,	\
++			_vreg, _vmask, _ereg, _emask, _ramp_delay)	\
+ 	[_family##_##_id] = {							\
+ 		.name		= (_match),					\
+ 		.supply_name	= (_supply),					\
+@@ -436,9 +442,15 @@
+ 		.enable_mask	= (_emask),					\
+ 		.linear_ranges	= (_ranges),					\
+ 		.n_linear_ranges = ARRAY_SIZE(_ranges),				\
++		.ramp_delay = (_ramp_delay),					\
+ 		.ops		= &axp20x_ops_range,				\
+ 	}
+ 
++#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages,	\
++			_vreg, _vmask, _ereg, _emask)				\
++	AXP_DESC_RANGES_DELAY(_family, _id, _match, _supply, _ranges,		\
++		 _n_voltages, _vreg, _vmask, _ereg, _emask, 0)
++
+ static const int axp209_dcdc2_ldo3_slew_rates[] = {
+ 	1600,
+ 	 800,
+@@ -781,21 +793,21 @@ static const struct linear_range axp717_dcdc3_ranges[] = {
+ };
+ 
+ static const struct regulator_desc axp717_regulators[] = {
+-	AXP_DESC_RANGES(AXP717, DCDC1, "dcdc1", "vin1",
++	AXP_DESC_RANGES_DELAY(AXP717, DCDC1, "dcdc1", "vin1",
+ 			axp717_dcdc1_ranges, AXP717_DCDC1_NUM_VOLTAGES,
+ 			AXP717_DCDC1_CONTROL, AXP717_DCDC_V_OUT_MASK,
+-			AXP717_DCDC_OUTPUT_CONTROL, BIT(0)),
+-	AXP_DESC_RANGES(AXP717, DCDC2, "dcdc2", "vin2",
++			AXP717_DCDC_OUTPUT_CONTROL, BIT(0), 640),
++	AXP_DESC_RANGES_DELAY(AXP717, DCDC2, "dcdc2", "vin2",
+ 			axp717_dcdc2_ranges, AXP717_DCDC2_NUM_VOLTAGES,
+ 			AXP717_DCDC2_CONTROL, AXP717_DCDC_V_OUT_MASK,
+-			AXP717_DCDC_OUTPUT_CONTROL, BIT(1)),
+-	AXP_DESC_RANGES(AXP717, DCDC3, "dcdc3", "vin3",
++			AXP717_DCDC_OUTPUT_CONTROL, BIT(1), 640),
++	AXP_DESC_RANGES_DELAY(AXP717, DCDC3, "dcdc3", "vin3",
+ 			axp717_dcdc3_ranges, AXP717_DCDC3_NUM_VOLTAGES,
+ 			AXP717_DCDC3_CONTROL, AXP717_DCDC_V_OUT_MASK,
+-			AXP717_DCDC_OUTPUT_CONTROL, BIT(2)),
+-	AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
++			AXP717_DCDC_OUTPUT_CONTROL, BIT(2), 640),
++	AXP_DESC_DELAY(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
+ 		 AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK,
+-		 AXP717_DCDC_OUTPUT_CONTROL, BIT(3)),
++		 AXP717_DCDC_OUTPUT_CONTROL, BIT(3), 6400),
+ 	AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100,
+ 		 AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
+ 		 AXP717_LDO0_OUTPUT_CONTROL, BIT(0)),
+diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
+index bbd417c55e7f56..b0e3f307b28353 100644
+--- a/drivers/spi/spi-aspeed-smc.c
++++ b/drivers/spi/spi-aspeed-smc.c
+@@ -239,7 +239,7 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
+ 
+ 	ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
+ 	if (ret < 0)
+-		return ret;
++		goto stop_user;
+ 
+ 	if (op->dummy.buswidth && op->dummy.nbytes) {
+ 		for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
+@@ -249,8 +249,9 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
+ 	aspeed_spi_set_io_mode(chip, io_mode);
+ 
+ 	aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
++stop_user:
+ 	aspeed_spi_stop_user(chip);
+-	return 0;
++	return ret;
+ }
+ 
+ static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
+@@ -261,10 +262,11 @@ static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
+ 	aspeed_spi_start_user(chip);
+ 	ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
+ 	if (ret < 0)
+-		return ret;
++		goto stop_user;
+ 	aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
++stop_user:
+ 	aspeed_spi_stop_user(chip);
+-	return 0;
++	return ret;
+ }
+ 
+ /* support for 1-1-1, 1-1-2 or 1-1-4 */
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 0bb33c43b1b46e..40a64a598a7495 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -241,6 +241,20 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ 	struct spi_controller *ctlr = spi->controller;
+ 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ 	bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
++	bool cs_actual;
++
++	/*
++	 * SPI subsystem tries to avoid no-op calls that would break the PM
++	 * refcount below. It can't however for the first time it is used.
++	 * To detect this case we read it here and bail out early for no-ops.
++	 */
++	if (spi_get_csgpiod(spi, 0))
++		cs_actual = !!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & 1);
++	else
++		cs_actual = !!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) &
++			       BIT(spi_get_chipselect(spi, 0)));
++	if (unlikely(cs_actual == cs_asserted))
++		return;
+ 
+ 	if (cs_asserted) {
+ 		/* Keep things powered as long as CS is asserted */
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index b80e9a528e17ff..bdf17eafd3598d 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -157,6 +157,7 @@ struct sci_port {
+ 
+ 	bool has_rtscts;
+ 	bool autorts;
++	bool tx_occurred;
+ };
+ 
+ #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
+@@ -850,6 +851,7 @@ static void sci_transmit_chars(struct uart_port *port)
+ {
+ 	struct tty_port *tport = &port->state->port;
+ 	unsigned int stopped = uart_tx_stopped(port);
++	struct sci_port *s = to_sci_port(port);
+ 	unsigned short status;
+ 	unsigned short ctrl;
+ 	int count;
+@@ -885,6 +887,7 @@ static void sci_transmit_chars(struct uart_port *port)
+ 		}
+ 
+ 		sci_serial_out(port, SCxTDR, c);
++		s->tx_occurred = true;
+ 
+ 		port->icount.tx++;
+ 	} while (--count > 0);
+@@ -1241,6 +1244,8 @@ static void sci_dma_tx_complete(void *arg)
+ 	if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
+ 		uart_write_wakeup(port);
+ 
++	s->tx_occurred = true;
++
+ 	if (!kfifo_is_empty(&tport->xmit_fifo)) {
+ 		s->cookie_tx = 0;
+ 		schedule_work(&s->work_tx);
+@@ -1731,6 +1736,19 @@ static void sci_flush_buffer(struct uart_port *port)
+ 		s->cookie_tx = -EINVAL;
+ 	}
+ }
++
++static void sci_dma_check_tx_occurred(struct sci_port *s)
++{
++	struct dma_tx_state state;
++	enum dma_status status;
++
++	if (!s->chan_tx)
++		return;
++
++	status = dmaengine_tx_status(s->chan_tx, s->cookie_tx, &state);
++	if (status == DMA_COMPLETE || status == DMA_IN_PROGRESS)
++		s->tx_occurred = true;
++}
+ #else /* !CONFIG_SERIAL_SH_SCI_DMA */
+ static inline void sci_request_dma(struct uart_port *port)
+ {
+@@ -1740,6 +1758,10 @@ static inline void sci_free_dma(struct uart_port *port)
+ {
+ }
+ 
++static void sci_dma_check_tx_occurred(struct sci_port *s)
++{
++}
++
+ #define sci_flush_buffer	NULL
+ #endif /* !CONFIG_SERIAL_SH_SCI_DMA */
+ 
+@@ -2076,6 +2098,12 @@ static unsigned int sci_tx_empty(struct uart_port *port)
+ {
+ 	unsigned short status = sci_serial_in(port, SCxSR);
+ 	unsigned short in_tx_fifo = sci_txfill(port);
++	struct sci_port *s = to_sci_port(port);
++
++	sci_dma_check_tx_occurred(s);
++
++	if (!s->tx_occurred)
++		return TIOCSER_TEMT;
+ 
+ 	return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
+ }
+@@ -2247,6 +2275,7 @@ static int sci_startup(struct uart_port *port)
+ 
+ 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ 
++	s->tx_occurred = false;
+ 	sci_request_dma(port);
+ 
+ 	ret = sci_request_irq(s);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index cfebe4a1af9e84..bc13133efaa508 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -5566,6 +5566,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ 
+ 	lrbp = &hba->lrb[task_tag];
+ 	lrbp->compl_time_stamp = ktime_get();
++	lrbp->compl_time_stamp_local_clock = local_clock();
+ 	cmd = lrbp->cmd;
+ 	if (cmd) {
+ 		if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 500dc35e64774d..0b2490347b9fe7 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2794,8 +2794,14 @@ int usb_add_hcd(struct usb_hcd *hcd,
+ 	int retval;
+ 	struct usb_device *rhdev;
+ 	struct usb_hcd *shared_hcd;
++	int skip_phy_initialization;
+ 
+-	if (!hcd->skip_phy_initialization) {
++	if (usb_hcd_is_primary_hcd(hcd))
++		skip_phy_initialization = hcd->skip_phy_initialization;
++	else
++		skip_phy_initialization = hcd->primary_hcd->skip_phy_initialization;
++
++	if (!skip_phy_initialization) {
+ 		if (usb_hcd_is_primary_hcd(hcd)) {
+ 			hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
+ 			if (IS_ERR(hcd->phy_roothub))
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index cb54390e7de488..8c3941ecaaf5d4 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -3546,11 +3546,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ 			port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ 		}
+ 
+-		if (!hsotg->flags.b.port_connect_status) {
++		if (dwc2_is_device_mode(hsotg)) {
+ 			/*
+-			 * The port is disconnected, which means the core is
+-			 * either in device mode or it soon will be. Just
+-			 * return 0's for the remainder of the port status
++			 * Just return 0's for the remainder of the port status
+ 			 * since the port register can't be read if the core
+ 			 * is in device mode.
+ 			 */
+@@ -3620,13 +3618,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ 		if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
+ 			goto error;
+ 
+-		if (!hsotg->flags.b.port_connect_status) {
++		if (dwc2_is_device_mode(hsotg)) {
+ 			/*
+-			 * The port is disconnected, which means the core is
+-			 * either in device mode or it soon will be. Just
+-			 * return without doing anything since the port
+-			 * register can't be written if the core is in device
+-			 * mode.
++			 * Just return 0's for the remainder of the port status
++			 * since the port register can't be read if the core
++			 * is in device mode.
+ 			 */
+ 			break;
+ 		}
+@@ -4349,7 +4345,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
+ 	if (hsotg->bus_suspended)
+ 		goto skip_power_saving;
+ 
+-	if (hsotg->flags.b.port_connect_status == 0)
++	if (!(dwc2_read_hprt0(hsotg) & HPRT0_CONNSTS))
+ 		goto skip_power_saving;
+ 
+ 	switch (hsotg->params.power_down) {
+@@ -4431,6 +4427,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
+ 	 * Power Down mode.
+ 	 */
+ 	if (hprt0 & HPRT0_CONNSTS) {
++		set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ 		hsotg->lx_state = DWC2_L0;
+ 		goto unlock;
+ 	}
+diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
+index 64c0cd1995aa06..e99faf014c78a6 100644
+--- a/drivers/usb/dwc3/dwc3-imx8mp.c
++++ b/drivers/usb/dwc3/dwc3-imx8mp.c
+@@ -129,6 +129,16 @@ static void dwc3_imx8mp_wakeup_disable(struct dwc3_imx8mp *dwc3_imx)
+ 	writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
+ }
+ 
++static const struct property_entry dwc3_imx8mp_properties[] = {
++	PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk"),
++	PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk"),
++	{},
++};
++
++static const struct software_node dwc3_imx8mp_swnode = {
++	.properties = dwc3_imx8mp_properties,
++};
++
+ static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
+ {
+ 	struct dwc3_imx8mp	*dwc3_imx = _dwc3_imx;
+@@ -148,17 +158,6 @@ static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int dwc3_imx8mp_set_software_node(struct device *dev)
+-{
+-	struct property_entry props[3] = { 0 };
+-	int prop_idx = 0;
+-
+-	props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk");
+-	props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk");
+-
+-	return device_create_managed_software_node(dev, props, NULL);
+-}
+-
+ static int dwc3_imx8mp_probe(struct platform_device *pdev)
+ {
+ 	struct device		*dev = &pdev->dev;
+@@ -221,17 +220,17 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
+ 	if (err < 0)
+ 		goto disable_rpm;
+ 
+-	err = dwc3_imx8mp_set_software_node(dev);
++	err = device_add_software_node(dev, &dwc3_imx8mp_swnode);
+ 	if (err) {
+ 		err = -ENODEV;
+-		dev_err(dev, "failed to create software node\n");
++		dev_err(dev, "failed to add software node\n");
+ 		goto disable_rpm;
+ 	}
+ 
+ 	err = of_platform_populate(node, NULL, NULL, dev);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "failed to create dwc3 core\n");
+-		goto disable_rpm;
++		goto remove_swnode;
+ 	}
+ 
+ 	dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np);
+@@ -255,6 +254,8 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
+ 
+ depopulate:
+ 	of_platform_depopulate(dev);
++remove_swnode:
++	device_remove_software_node(dev);
+ disable_rpm:
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_put_noidle(dev);
+@@ -268,6 +269,7 @@ static void dwc3_imx8mp_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(dev);
+ 	of_platform_depopulate(dev);
++	device_remove_software_node(dev);
+ 
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_put_noidle(dev);
+diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
+index b5e5be424ce997..96c87dc4757f22 100644
+--- a/drivers/usb/dwc3/dwc3-xilinx.c
++++ b/drivers/usb/dwc3/dwc3-xilinx.c
+@@ -121,8 +121,11 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
+ 	 * in use but the usb3-phy entry is missing from the device tree.
+ 	 * Therefore, skip these operations in this case.
+ 	 */
+-	if (!priv_data->usb3_phy)
++	if (!priv_data->usb3_phy) {
++		/* Deselect the PIPE Clock Select bit in FPD PIPE Clock register */
++		writel(PIPE_CLK_DESELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK);
+ 		goto skip_usb3_phy;
++	}
+ 
+ 	crst = devm_reset_control_get_exclusive(dev, "usb_crst");
+ 	if (IS_ERR(crst)) {
+diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
+index 8285df9ed6fd78..8c9d0074db588b 100644
+--- a/drivers/usb/gadget/function/f_midi2.c
++++ b/drivers/usb/gadget/function/f_midi2.c
+@@ -1593,7 +1593,11 @@ static int f_midi2_create_card(struct f_midi2 *midi2)
+ 			fb->info.midi_ci_version = b->midi_ci_version;
+ 			fb->info.ui_hint = reverse_dir(b->ui_hint);
+ 			fb->info.sysex8_streams = b->sysex8_streams;
+-			fb->info.flags |= b->is_midi1;
++			if (b->is_midi1 < 2)
++				fb->info.flags |= b->is_midi1;
++			else
++				fb->info.flags |= SNDRV_UMP_BLOCK_IS_MIDI1 |
++					SNDRV_UMP_BLOCK_IS_LOWSPEED;
+ 			strscpy(fb->info.name, ump_fb_name(b),
+ 				sizeof(fb->info.name));
+ 		}
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 0a8c05b2746b4e..53d9fc41acc522 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -579,9 +579,12 @@ static int gs_start_io(struct gs_port *port)
+ 		 * we didn't in gs_start_tx() */
+ 		tty_wakeup(port->port.tty);
+ 	} else {
+-		gs_free_requests(ep, head, &port->read_allocated);
+-		gs_free_requests(port->port_usb->in, &port->write_pool,
+-			&port->write_allocated);
++		/* Free reqs only if we are still connected */
++		if (port->port_usb) {
++			gs_free_requests(ep, head, &port->read_allocated);
++			gs_free_requests(port->port_usb->in, &port->write_pool,
++				&port->write_allocated);
++		}
+ 		status = -EIO;
+ 	}
+ 
+diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
+index d31d9506e41ab0..7c2b2339e674dd 100644
+--- a/drivers/usb/host/ehci-sh.c
++++ b/drivers/usb/host/ehci-sh.c
+@@ -119,8 +119,12 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->iclk))
+ 		priv->iclk = NULL;
+ 
+-	clk_enable(priv->fclk);
+-	clk_enable(priv->iclk);
++	ret = clk_enable(priv->fclk);
++	if (ret)
++		goto fail_request_resource;
++	ret = clk_enable(priv->iclk);
++	if (ret)
++		goto fail_iclk;
+ 
+ 	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ 	if (ret != 0) {
+@@ -136,6 +140,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
+ 
+ fail_add_hcd:
+ 	clk_disable(priv->iclk);
++fail_iclk:
+ 	clk_disable(priv->fclk);
+ 
+ fail_request_resource:
+diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
+index 9fe4f48b18980c..0881fdd1823e0b 100644
+--- a/drivers/usb/host/max3421-hcd.c
++++ b/drivers/usb/host/max3421-hcd.c
+@@ -779,11 +779,17 @@ max3421_check_unlink(struct usb_hcd *hcd)
+ 				retval = 1;
+ 				dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+ 					__func__, urb, urb->unlinked);
+-				usb_hcd_unlink_urb_from_ep(hcd, urb);
+-				spin_unlock_irqrestore(&max3421_hcd->lock,
+-						       flags);
+-				usb_hcd_giveback_urb(hcd, urb, 0);
+-				spin_lock_irqsave(&max3421_hcd->lock, flags);
++				if (urb == max3421_hcd->curr_urb) {
++					max3421_hcd->urb_done = 1;
++					max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
++							       BIT(MAX3421_HI_RCVDAV_BIT));
++				} else {
++					usb_hcd_unlink_urb_from_ep(hcd, urb);
++					spin_unlock_irqrestore(&max3421_hcd->lock,
++							       flags);
++					usb_hcd_giveback_urb(hcd, urb, 0);
++					spin_lock_irqsave(&max3421_hcd->lock, flags);
++				}
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c
+index 75dfdca04ff1c2..27b0a6e182678b 100644
+--- a/drivers/usb/misc/onboard_usb_dev.c
++++ b/drivers/usb/misc/onboard_usb_dev.c
+@@ -407,8 +407,10 @@ static int onboard_dev_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		if (of_device_is_compatible(pdev->dev.of_node, "usb424,2744") ||
+-		    of_device_is_compatible(pdev->dev.of_node, "usb424,5744"))
++		    of_device_is_compatible(pdev->dev.of_node, "usb424,5744")) {
+ 			err = onboard_dev_5744_i2c_init(client);
++			onboard_dev->always_powered_in_suspend = true;
++		}
+ 
+ 		put_device(&client->dev);
+ 		if (err < 0)
+diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
+index d1e7c487ddfbb5..0ae0a5ee3fae07 100644
+--- a/drivers/usb/typec/anx7411.c
++++ b/drivers/usb/typec/anx7411.c
+@@ -290,6 +290,8 @@ struct anx7411_data {
+ 	struct power_supply *psy;
+ 	struct power_supply_desc psy_desc;
+ 	struct device *dev;
++	struct fwnode_handle *switch_node;
++	struct fwnode_handle *mux_node;
+ };
+ 
+ static u8 snk_identity[] = {
+@@ -1021,6 +1023,16 @@ static void anx7411_port_unregister_altmodes(struct typec_altmode **adev)
+ 		}
+ }
+ 
++static void anx7411_port_unregister(struct typec_params *typecp)
++{
++	fwnode_handle_put(typecp->caps.fwnode);
++	anx7411_port_unregister_altmodes(typecp->port_amode);
++	if (typecp->port)
++		typec_unregister_port(typecp->port);
++	if (typecp->role_sw)
++		usb_role_switch_put(typecp->role_sw);
++}
++
+ static int anx7411_usb_mux_set(struct typec_mux_dev *mux,
+ 			       struct typec_mux_state *state)
+ {
+@@ -1089,6 +1101,7 @@ static void anx7411_unregister_mux(struct anx7411_data *ctx)
+ 	if (ctx->typec.typec_mux) {
+ 		typec_mux_unregister(ctx->typec.typec_mux);
+ 		ctx->typec.typec_mux = NULL;
++		fwnode_handle_put(ctx->mux_node);
+ 	}
+ }
+ 
+@@ -1097,6 +1110,7 @@ static void anx7411_unregister_switch(struct anx7411_data *ctx)
+ 	if (ctx->typec.typec_switch) {
+ 		typec_switch_unregister(ctx->typec.typec_switch);
+ 		ctx->typec.typec_switch = NULL;
++		fwnode_handle_put(ctx->switch_node);
+ 	}
+ }
+ 
+@@ -1104,28 +1118,29 @@ static int anx7411_typec_switch_probe(struct anx7411_data *ctx,
+ 				      struct device *dev)
+ {
+ 	int ret;
+-	struct device_node *node;
+ 
+-	node = of_get_child_by_name(dev->of_node, "orientation_switch");
+-	if (!node)
++	ctx->switch_node = device_get_named_child_node(dev, "orientation_switch");
++	if (!ctx->switch_node)
+ 		return 0;
+ 
+-	ret = anx7411_register_switch(ctx, dev, &node->fwnode);
++	ret = anx7411_register_switch(ctx, dev, ctx->switch_node);
+ 	if (ret) {
+ 		dev_err(dev, "failed register switch");
++		fwnode_handle_put(ctx->switch_node);
+ 		return ret;
+ 	}
+ 
+-	node = of_get_child_by_name(dev->of_node, "mode_switch");
+-	if (!node) {
++	ctx->mux_node = device_get_named_child_node(dev, "mode_switch");
++	if (!ctx->mux_node) {
+ 		dev_err(dev, "no typec mux exist");
+ 		ret = -ENODEV;
+ 		goto unregister_switch;
+ 	}
+ 
+-	ret = anx7411_register_mux(ctx, dev, &node->fwnode);
++	ret = anx7411_register_mux(ctx, dev, ctx->mux_node);
+ 	if (ret) {
+ 		dev_err(dev, "failed register mode switch");
++		fwnode_handle_put(ctx->mux_node);
+ 		ret = -ENODEV;
+ 		goto unregister_switch;
+ 	}
+@@ -1154,34 +1169,34 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx,
+ 	ret = fwnode_property_read_string(fwnode, "power-role", &buf);
+ 	if (ret) {
+ 		dev_err(dev, "power-role not found: %d\n", ret);
+-		return ret;
++		goto put_fwnode;
+ 	}
+ 
+ 	ret = typec_find_port_power_role(buf);
+ 	if (ret < 0)
+-		return ret;
++		goto put_fwnode;
+ 	cap->type = ret;
+ 
+ 	ret = fwnode_property_read_string(fwnode, "data-role", &buf);
+ 	if (ret) {
+ 		dev_err(dev, "data-role not found: %d\n", ret);
+-		return ret;
++		goto put_fwnode;
+ 	}
+ 
+ 	ret = typec_find_port_data_role(buf);
+ 	if (ret < 0)
+-		return ret;
++		goto put_fwnode;
+ 	cap->data = ret;
+ 
+ 	ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
+ 	if (ret) {
+ 		dev_err(dev, "try-power-role not found: %d\n", ret);
+-		return ret;
++		goto put_fwnode;
+ 	}
+ 
+ 	ret = typec_find_power_role(buf);
+ 	if (ret < 0)
+-		return ret;
++		goto put_fwnode;
+ 	cap->prefer_role = ret;
+ 
+ 	/* Get source pdos */
+@@ -1193,7 +1208,7 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx,
+ 						     typecp->src_pdo_nr);
+ 		if (ret < 0) {
+ 			dev_err(dev, "source cap validate failed: %d\n", ret);
+-			return -EINVAL;
++			goto put_fwnode;
+ 		}
+ 
+ 		typecp->caps_flags |= HAS_SOURCE_CAP;
+@@ -1207,7 +1222,7 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx,
+ 						     typecp->sink_pdo_nr);
+ 		if (ret < 0) {
+ 			dev_err(dev, "sink cap validate failed: %d\n", ret);
+-			return -EINVAL;
++			goto put_fwnode;
+ 		}
+ 
+ 		for (i = 0; i < typecp->sink_pdo_nr; i++) {
+@@ -1251,13 +1266,21 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx,
+ 		ret = PTR_ERR(ctx->typec.port);
+ 		ctx->typec.port = NULL;
+ 		dev_err(dev, "Failed to register type c port %d\n", ret);
+-		return ret;
++		goto put_usb_role_switch;
+ 	}
+ 
+ 	typec_port_register_altmodes(ctx->typec.port, NULL, ctx,
+ 				     ctx->typec.port_amode,
+ 				     MAX_ALTMODE);
+ 	return 0;
++
++put_usb_role_switch:
++	if (ctx->typec.role_sw)
++		usb_role_switch_put(ctx->typec.role_sw);
++put_fwnode:
++	fwnode_handle_put(fwnode);
++
++	return ret;
+ }
+ 
+ static int anx7411_typec_check_connection(struct anx7411_data *ctx)
+@@ -1523,8 +1546,7 @@ static int anx7411_i2c_probe(struct i2c_client *client)
+ 	destroy_workqueue(plat->workqueue);
+ 
+ free_typec_port:
+-	typec_unregister_port(plat->typec.port);
+-	anx7411_port_unregister_altmodes(plat->typec.port_amode);
++	anx7411_port_unregister(&plat->typec);
+ 
+ free_typec_switch:
+ 	anx7411_unregister_switch(plat);
+@@ -1548,17 +1570,11 @@ static void anx7411_i2c_remove(struct i2c_client *client)
+ 
+ 	i2c_unregister_device(plat->spi_client);
+ 
+-	if (plat->typec.role_sw)
+-		usb_role_switch_put(plat->typec.role_sw);
+-
+ 	anx7411_unregister_mux(plat);
+ 
+ 	anx7411_unregister_switch(plat);
+ 
+-	if (plat->typec.port)
+-		typec_unregister_port(plat->typec.port);
+-
+-	anx7411_port_unregister_altmodes(plat->typec.port_amode);
++	anx7411_port_unregister(&plat->typec);
+ }
+ 
+ static const struct i2c_device_id anx7411_id[] = {
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index e0f3925e401b3d..7a3f0f5af38fdb 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -46,11 +46,11 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
+ 		ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
+ 
+ 	if (cci & UCSI_CCI_ACK_COMPLETE &&
+-	    test_bit(ACK_PENDING, &ucsi->flags))
++	    test_and_clear_bit(ACK_PENDING, &ucsi->flags))
+ 		complete(&ucsi->complete);
+ 
+ 	if (cci & UCSI_CCI_COMMAND_COMPLETE &&
+-	    test_bit(COMMAND_PENDING, &ucsi->flags))
++	    test_and_clear_bit(COMMAND_PENDING, &ucsi->flags))
+ 		complete(&ucsi->complete);
+ }
+ EXPORT_SYMBOL_GPL(ucsi_notify_common);
+@@ -65,6 +65,8 @@ int ucsi_sync_control_common(struct ucsi *ucsi, u64 command)
+ 	else
+ 		set_bit(COMMAND_PENDING, &ucsi->flags);
+ 
++	reinit_completion(&ucsi->complete);
++
+ 	ret = ucsi->ops->async_control(ucsi, command);
+ 	if (ret)
+ 		goto out_clear_bit;
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 98374ed7c57723..0112742e4504b9 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -2716,6 +2716,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
+  * @_vq: the struct virtqueue we're talking about.
+  * @num: new ring num
+  * @recycle: callback to recycle unused buffers
++ * @recycle_done: callback to be invoked when recycle for all unused buffers done
+  *
+  * When it is really necessary to create a new vring, it will set the current vq
+  * into the reset state. Then call the passed callback to recycle the buffer
+@@ -2736,7 +2737,8 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
+  *
+  */
+ int virtqueue_resize(struct virtqueue *_vq, u32 num,
+-		     void (*recycle)(struct virtqueue *vq, void *buf))
++		     void (*recycle)(struct virtqueue *vq, void *buf),
++		     void (*recycle_done)(struct virtqueue *vq))
+ {
+ 	struct vring_virtqueue *vq = to_vvq(_vq);
+ 	int err;
+@@ -2753,6 +2755,8 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ 	err = virtqueue_disable_and_recycle(_vq, recycle);
+ 	if (err)
+ 		return err;
++	if (recycle_done)
++		recycle_done(_vq);
+ 
+ 	if (vq->packed_ring)
+ 		err = virtqueue_resize_packed(_vq, num);
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index b35fe1075503e1..fafc07e38663ca 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1925,6 +1925,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
+ 		goto unlink_out;
+ 	}
+ 
++	netfs_wait_for_outstanding_io(inode);
+ 	cifs_close_deferred_file_under_dentry(tcon, full_path);
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ 	if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+@@ -2442,8 +2443,10 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
+ 	}
+ 
+ 	cifs_close_deferred_file_under_dentry(tcon, from_name);
+-	if (d_inode(target_dentry) != NULL)
++	if (d_inode(target_dentry) != NULL) {
++		netfs_wait_for_outstanding_io(d_inode(target_dentry));
+ 		cifs_close_deferred_file_under_dentry(tcon, to_name);
++	}
+ 
+ 	rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+ 			    to_name);
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+index 611716bc8f27c1..8892177e500f19 100644
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -1016,6 +1016,8 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
+ 
+ 	ses_enc_key = enc ? sess->smb3encryptionkey :
+ 		sess->smb3decryptionkey;
++	if (enc)
++		ksmbd_user_session_get(sess);
+ 	memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+ 
+ 	return 0;
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index ad02fe555fda7e..d960ddcbba1657 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -263,8 +263,10 @@ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ 
+ 	down_read(&conn->session_lock);
+ 	sess = xa_load(&conn->sessions, id);
+-	if (sess)
++	if (sess) {
+ 		sess->last_active = jiffies;
++		ksmbd_user_session_get(sess);
++	}
+ 	up_read(&conn->session_lock);
+ 	return sess;
+ }
+@@ -275,6 +277,8 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+ 
+ 	down_read(&sessions_table_lock);
+ 	sess = __session_lookup(id);
++	if (sess)
++		ksmbd_user_session_get(sess);
+ 	up_read(&sessions_table_lock);
+ 
+ 	return sess;
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index c8cc6fa6fc3ebb..698af37e988d7b 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -241,14 +241,14 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
+ 	if (work->tcon)
+ 		ksmbd_tree_connect_put(work->tcon);
+ 	smb3_preauth_hash_rsp(work);
+-	if (work->sess)
+-		ksmbd_user_session_put(work->sess);
+ 	if (work->sess && work->sess->enc && work->encrypted &&
+ 	    conn->ops->encrypt_resp) {
+ 		rc = conn->ops->encrypt_resp(work);
+ 		if (rc < 0)
+ 			conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
+ 	}
++	if (work->sess)
++		ksmbd_user_session_put(work->sess);
+ 
+ 	ksmbd_conn_write(work);
+ }
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index d0836d710f1814..7d01dd313351f7 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -67,8 +67,10 @@ static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
+ 		return false;
+ 
+ 	sess = ksmbd_session_lookup_all(conn, id);
+-	if (sess)
++	if (sess) {
++		ksmbd_user_session_put(sess);
+ 		return true;
++	}
+ 	pr_err("Invalid user session id: %llu\n", id);
+ 	return false;
+ }
+@@ -605,10 +607,8 @@ int smb2_check_user_session(struct ksmbd_work *work)
+ 
+ 	/* Check for validity of user session */
+ 	work->sess = ksmbd_session_lookup_all(conn, sess_id);
+-	if (work->sess) {
+-		ksmbd_user_session_get(work->sess);
++	if (work->sess)
+ 		return 1;
+-	}
+ 	ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id);
+ 	return -ENOENT;
+ }
+@@ -1701,29 +1701,35 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 
+ 		if (conn->dialect != sess->dialect) {
+ 			rc = -EINVAL;
++			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+ 
+ 		if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) {
+ 			rc = -EINVAL;
++			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+ 
+ 		if (strncmp(conn->ClientGUID, sess->ClientGUID,
+ 			    SMB2_CLIENT_GUID_SIZE)) {
+ 			rc = -ENOENT;
++			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+ 
+ 		if (sess->state == SMB2_SESSION_IN_PROGRESS) {
+ 			rc = -EACCES;
++			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
+ 
+ 		if (sess->state == SMB2_SESSION_EXPIRED) {
+ 			rc = -EFAULT;
++			ksmbd_user_session_put(sess);
+ 			goto out_err;
+ 		}
++		ksmbd_user_session_put(sess);
+ 
+ 		if (ksmbd_conn_need_reconnect(conn)) {
+ 			rc = -EFAULT;
+@@ -1731,7 +1737,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			goto out_err;
+ 		}
+ 
+-		if (ksmbd_session_lookup(conn, sess_id)) {
++		sess = ksmbd_session_lookup(conn, sess_id);
++		if (!sess) {
+ 			rc = -EACCES;
+ 			goto out_err;
+ 		}
+@@ -1742,7 +1749,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 		}
+ 
+ 		conn->binding = true;
+-		ksmbd_user_session_get(sess);
+ 	} else if ((conn->dialect < SMB30_PROT_ID ||
+ 		    server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+ 		   (req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+@@ -1769,7 +1775,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 		}
+ 
+ 		conn->binding = false;
+-		ksmbd_user_session_get(sess);
+ 	}
+ 	work->sess = sess;
+ 
+@@ -2195,9 +2200,9 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
+ int smb2_session_logoff(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
+ 	struct smb2_logoff_req *req;
+ 	struct smb2_logoff_rsp *rsp;
+-	struct ksmbd_session *sess;
+ 	u64 sess_id;
+ 	int err;
+ 
+@@ -2219,11 +2224,6 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ 	ksmbd_close_session_fds(work);
+ 	ksmbd_conn_wait_idle(conn);
+ 
+-	/*
+-	 * Re-lookup session to validate if session is deleted
+-	 * while waiting request complete
+-	 */
+-	sess = ksmbd_session_lookup_all(conn, sess_id);
+ 	if (ksmbd_tree_conn_session_logoff(sess)) {
+ 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+@@ -8962,6 +8962,7 @@ int smb3_decrypt_req(struct ksmbd_work *work)
+ 		       le64_to_cpu(tr_hdr->SessionId));
+ 		return -ECONNABORTED;
+ 	}
++	ksmbd_user_session_put(sess);
+ 
+ 	iov[0].iov_base = buf;
+ 	iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
+diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
+index a5c4af148853f8..134d87b3489aa4 100644
+--- a/fs/xfs/libxfs/xfs_btree.c
++++ b/fs/xfs/libxfs/xfs_btree.c
+@@ -3569,14 +3569,31 @@ xfs_btree_insrec(
+ 	xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
+ 
+ 	/*
+-	 * If we just inserted into a new tree block, we have to
+-	 * recalculate nkey here because nkey is out of date.
++	 * Update btree keys to reflect the newly added record or keyptr.
++	 * There are three cases here to be aware of.  Normally, all we have to
++	 * do is walk towards the root, updating keys as necessary.
+ 	 *
+-	 * Otherwise we're just updating an existing block (having shoved
+-	 * some records into the new tree block), so use the regular key
+-	 * update mechanism.
++	 * If the caller had us target a full block for the insertion, we dealt
++	 * with that by calling the _make_block_unfull function.  If the
++	 * "make unfull" function splits the block, it'll hand us back the key
++	 * and pointer of the new block.  We haven't yet added the new block to
++	 * the next level up, so if we decide to add the new record to the new
++	 * block (bp->b_bn != old_bn), we have to update the caller's pointer
++	 * so that the caller adds the new block with the correct key.
++	 *
++	 * However, there is a third possibility-- if the selected block is the
++	 * root block of an inode-rooted btree and cannot be expanded further,
++	 * the "make unfull" function moves the root block contents to a new
++	 * block and updates the root block to point to the new block.  In this
++	 * case, no block pointer is passed back because the block has already
++	 * been added to the btree.  In this case, we need to use the regular
++	 * key update function, just like the first case.  This is critical for
++	 * overlapping btrees, because the high key must be updated to reflect
++	 * the entire tree, not just the subtree accessible through the first
++	 * child of the root (which is now two levels down from the root).
+ 	 */
+-	if (bp && xfs_buf_daddr(bp) != old_bn) {
++	if (!xfs_btree_ptr_is_null(cur, &nptr) &&
++	    bp && xfs_buf_daddr(bp) != old_bn) {
+ 		xfs_btree_get_keys(cur, block, lkey);
+ 	} else if (xfs_btree_needs_key_update(cur, optr)) {
+ 		error = xfs_btree_update_keys(cur, level);
+@@ -5156,7 +5173,7 @@ xfs_btree_count_blocks_helper(
+ 	int			level,
+ 	void			*data)
+ {
+-	xfs_extlen_t		*blocks = data;
++	xfs_filblks_t		*blocks = data;
+ 	(*blocks)++;
+ 
+ 	return 0;
+@@ -5166,7 +5183,7 @@ xfs_btree_count_blocks_helper(
+ int
+ xfs_btree_count_blocks(
+ 	struct xfs_btree_cur	*cur,
+-	xfs_extlen_t		*blocks)
++	xfs_filblks_t		*blocks)
+ {
+ 	*blocks = 0;
+ 	return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
+diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
+index 10b7ddc3b2b34e..91e0b6dac31ec6 100644
+--- a/fs/xfs/libxfs/xfs_btree.h
++++ b/fs/xfs/libxfs/xfs_btree.h
+@@ -485,7 +485,7 @@ typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
+ int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
+ 		xfs_btree_visit_blocks_fn fn, unsigned int flags, void *data);
+ 
+-int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_extlen_t *blocks);
++int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_filblks_t *blocks);
+ 
+ union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n,
+ 		struct xfs_btree_block *block);
+diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
+index 401b42d52af686..6aa43f3fc68e03 100644
+--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
++++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
+@@ -743,6 +743,7 @@ xfs_finobt_count_blocks(
+ {
+ 	struct xfs_buf		*agbp = NULL;
+ 	struct xfs_btree_cur	*cur;
++	xfs_filblks_t		blocks;
+ 	int			error;
+ 
+ 	error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
+@@ -750,9 +751,10 @@ xfs_finobt_count_blocks(
+ 		return error;
+ 
+ 	cur = xfs_finobt_init_cursor(pag, tp, agbp);
+-	error = xfs_btree_count_blocks(cur, tree_blocks);
++	error = xfs_btree_count_blocks(cur, &blocks);
+ 	xfs_btree_del_cursor(cur, error);
+ 	xfs_trans_brelse(tp, agbp);
++	*tree_blocks = blocks;
+ 
+ 	return error;
+ }
+diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
+index f228127a88ff26..fb47a76ead18c2 100644
+--- a/fs/xfs/libxfs/xfs_symlink_remote.c
++++ b/fs/xfs/libxfs/xfs_symlink_remote.c
+@@ -92,8 +92,10 @@ xfs_symlink_verify(
+ 	struct xfs_mount	*mp = bp->b_mount;
+ 	struct xfs_dsymlink_hdr	*dsl = bp->b_addr;
+ 
++	/* no verification of non-crc buffers */
+ 	if (!xfs_has_crc(mp))
+-		return __this_address;
++		return NULL;
++
+ 	if (!xfs_verify_magic(bp, dsl->sl_magic))
+ 		return __this_address;
+ 	if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid))
+diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
+index f8e5b67128d25a..da30f926cbe66d 100644
+--- a/fs/xfs/scrub/agheader.c
++++ b/fs/xfs/scrub/agheader.c
+@@ -434,7 +434,7 @@ xchk_agf_xref_btreeblks(
+ {
+ 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
+ 	struct xfs_mount	*mp = sc->mp;
+-	xfs_agblock_t		blocks;
++	xfs_filblks_t		blocks;
+ 	xfs_agblock_t		btreeblks;
+ 	int			error;
+ 
+@@ -483,7 +483,7 @@ xchk_agf_xref_refcblks(
+ 	struct xfs_scrub	*sc)
+ {
+ 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
+-	xfs_agblock_t		blocks;
++	xfs_filblks_t		blocks;
+ 	int			error;
+ 
+ 	if (!sc->sa.refc_cur)
+@@ -816,7 +816,7 @@ xchk_agi_xref_fiblocks(
+ 	struct xfs_scrub	*sc)
+ {
+ 	struct xfs_agi		*agi = sc->sa.agi_bp->b_addr;
+-	xfs_agblock_t		blocks;
++	xfs_filblks_t		blocks;
+ 	int			error = 0;
+ 
+ 	if (!xfs_has_inobtcounts(sc->mp))
+diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
+index 2f98d90d7fd66d..69b003259784fe 100644
+--- a/fs/xfs/scrub/agheader_repair.c
++++ b/fs/xfs/scrub/agheader_repair.c
+@@ -256,7 +256,7 @@ xrep_agf_calc_from_btrees(
+ 	struct xfs_agf		*agf = agf_bp->b_addr;
+ 	struct xfs_mount	*mp = sc->mp;
+ 	xfs_agblock_t		btreeblks;
+-	xfs_agblock_t		blocks;
++	xfs_filblks_t		blocks;
+ 	int			error;
+ 
+ 	/* Update the AGF counters from the bnobt. */
+@@ -946,7 +946,7 @@ xrep_agi_calc_from_btrees(
+ 	if (error)
+ 		goto err;
+ 	if (xfs_has_inobtcounts(mp)) {
+-		xfs_agblock_t	blocks;
++		xfs_filblks_t	blocks;
+ 
+ 		error = xfs_btree_count_blocks(cur, &blocks);
+ 		if (error)
+@@ -959,7 +959,7 @@ xrep_agi_calc_from_btrees(
+ 	agi->agi_freecount = cpu_to_be32(freecount);
+ 
+ 	if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) {
+-		xfs_agblock_t	blocks;
++		xfs_filblks_t	blocks;
+ 
+ 		cur = xfs_finobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
+ 		error = xfs_btree_count_blocks(cur, &blocks);
+diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
+index 1d3e98346933e1..454f17595c9c9e 100644
+--- a/fs/xfs/scrub/fscounters.c
++++ b/fs/xfs/scrub/fscounters.c
+@@ -261,7 +261,7 @@ xchk_fscount_btreeblks(
+ 	struct xchk_fscounters	*fsc,
+ 	xfs_agnumber_t		agno)
+ {
+-	xfs_extlen_t		blocks;
++	xfs_filblks_t		blocks;
+ 	int			error;
+ 
+ 	error = xchk_ag_init_existing(sc, agno, &sc->sa);
+diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
+index 750d7b0cd25a78..a59c44e5903a45 100644
+--- a/fs/xfs/scrub/ialloc.c
++++ b/fs/xfs/scrub/ialloc.c
+@@ -652,8 +652,8 @@ xchk_iallocbt_xref_rmap_btreeblks(
+ 	struct xfs_scrub	*sc)
+ {
+ 	xfs_filblks_t		blocks;
+-	xfs_extlen_t		inobt_blocks = 0;
+-	xfs_extlen_t		finobt_blocks = 0;
++	xfs_filblks_t		inobt_blocks = 0;
++	xfs_filblks_t		finobt_blocks = 0;
+ 	int			error;
+ 
+ 	if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
+diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
+index d0c7d4a29c0feb..cccf39d917a09c 100644
+--- a/fs/xfs/scrub/refcount.c
++++ b/fs/xfs/scrub/refcount.c
+@@ -490,7 +490,7 @@ xchk_refcount_xref_rmap(
+ 	struct xfs_scrub	*sc,
+ 	xfs_filblks_t		cow_blocks)
+ {
+-	xfs_extlen_t		refcbt_blocks = 0;
++	xfs_filblks_t		refcbt_blocks = 0;
+ 	xfs_filblks_t		blocks;
+ 	int			error;
+ 
+diff --git a/fs/xfs/scrub/symlink_repair.c b/fs/xfs/scrub/symlink_repair.c
+index d015a86ef460fb..953ce7be78dc2f 100644
+--- a/fs/xfs/scrub/symlink_repair.c
++++ b/fs/xfs/scrub/symlink_repair.c
+@@ -36,6 +36,7 @@
+ #include "scrub/tempfile.h"
+ #include "scrub/tempexch.h"
+ #include "scrub/reap.h"
++#include "scrub/health.h"
+ 
+ /*
+  * Symbolic Link Repair
+@@ -233,7 +234,7 @@ xrep_symlink_salvage(
+ 	 * target zapped flag.
+ 	 */
+ 	if (buflen == 0) {
+-		sc->sick_mask |= XFS_SICK_INO_SYMLINK_ZAPPED;
++		xchk_mark_healthy_if_clean(sc, XFS_SICK_INO_SYMLINK_ZAPPED);
+ 		sprintf(target_buf, DUMMY_TARGET);
+ 	}
+ 
+diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
+index c886d5d0eb021a..da773fee8638af 100644
+--- a/fs/xfs/scrub/trace.h
++++ b/fs/xfs/scrub/trace.h
+@@ -601,7 +601,7 @@ TRACE_EVENT(xchk_ifork_btree_op_error,
+ 	TP_fast_assign(
+ 		xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
+ 		__entry->dev = sc->mp->m_super->s_dev;
+-		__entry->ino = sc->ip->i_ino;
++		__entry->ino = cur->bc_ino.ip->i_ino;
+ 		__entry->whichfork = cur->bc_ino.whichfork;
+ 		__entry->type = sc->sm->sm_type;
+ 		__assign_str(name);
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index edaf193dbd5ccc..95f8a09f96ae20 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -111,7 +111,7 @@ xfs_bmap_count_blocks(
+ 	struct xfs_mount	*mp = ip->i_mount;
+ 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
+ 	struct xfs_btree_cur	*cur;
+-	xfs_extlen_t		btblocks = 0;
++	xfs_filblks_t		btblocks = 0;
+ 	int			error;
+ 
+ 	*nextents = 0;
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index b19916b11fd563..aba54e3c583661 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -1228,6 +1228,14 @@ xfs_file_remap_range(
+ 	xfs_iunlock2_remapping(src, dest);
+ 	if (ret)
+ 		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
++	/*
++	 * If the caller did not set CAN_SHORTEN, then it is not prepared to
++	 * handle partial results -- either the whole remap succeeds, or we
++	 * must say why it did not.  In this case, any error should be returned
++	 * to the caller.
++	 */
++	if (ret && remapped < len && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
++		return ret;
+ 	return remapped > 0 ? remapped : ret;
+ }
+ 
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 3a2005a1e673dc..8caa55b8167467 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -1295,7 +1295,7 @@ xfs_rtallocate(
+ 	 * For an allocation to an empty file at offset 0, pick an extent that
+ 	 * will space things out in the rt area.
+ 	 */
+-	if (bno_hint)
++	if (bno_hint != NULLFSBLOCK)
+ 		start = xfs_rtb_to_rtx(args.mp, bno_hint);
+ 	else if (initial_user_data)
+ 		start = xfs_rtpick_extent(args.mp, tp, maxlen);
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index bdf3704dc30118..30e03342287a94 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -834,13 +834,6 @@ __xfs_trans_commit(
+ 
+ 	trace_xfs_trans_commit(tp, _RET_IP_);
+ 
+-	error = xfs_trans_run_precommits(tp);
+-	if (error) {
+-		if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
+-			xfs_defer_cancel(tp);
+-		goto out_unreserve;
+-	}
+-
+ 	/*
+ 	 * Finish deferred items on final commit. Only permanent transactions
+ 	 * should ever have deferred ops.
+@@ -851,13 +844,12 @@ __xfs_trans_commit(
+ 		error = xfs_defer_finish_noroll(&tp);
+ 		if (error)
+ 			goto out_unreserve;
+-
+-		/* Run precommits from final tx in defer chain. */
+-		error = xfs_trans_run_precommits(tp);
+-		if (error)
+-			goto out_unreserve;
+ 	}
+ 
++	error = xfs_trans_run_precommits(tp);
++	if (error)
++		goto out_unreserve;
++
+ 	/*
+ 	 * If there is nothing to be logged by the transaction,
+ 	 * then unlock all of the items associated with the
+@@ -1382,5 +1374,8 @@ xfs_trans_alloc_dir(
+ 
+ out_cancel:
+ 	xfs_trans_cancel(tp);
++	xfs_iunlock(dp, XFS_ILOCK_EXCL);
++	if (dp != ip)
++		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 	return error;
+ }
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 6b4bc85f4999ba..b7f327ce797e5b 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -200,8 +200,6 @@ struct gendisk {
+ 	spinlock_t              zone_wplugs_lock;
+ 	struct mempool_s	*zone_wplugs_pool;
+ 	struct hlist_head       *zone_wplugs_hash;
+-	struct list_head        zone_wplugs_err_list;
+-	struct work_struct	zone_wplugs_work;
+ 	struct workqueue_struct *zone_wplugs_wq;
+ #endif /* CONFIG_BLK_DEV_ZONED */
+ 
+@@ -1386,6 +1384,9 @@ static inline bool bdev_is_zone_start(struct block_device *bdev,
+ 	return bdev_offset_from_zone_start(bdev, sector) == 0;
+ }
+ 
++int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
++			   sector_t nr_sects, gfp_t gfp_mask);
++
+ static inline int queue_dma_alignment(const struct request_queue *q)
+ {
+ 	return q->limits.dma_alignment;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index cbe2350912460b..a7af13f550e0d4 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -2157,26 +2157,25 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
+  * rcu-protected dynamically sized maps.
+  */
+ static __always_inline u32
+-bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
++bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
+ 			  const void *ctx, bpf_prog_run_fn run_prog)
+ {
+ 	const struct bpf_prog_array_item *item;
+ 	const struct bpf_prog *prog;
+-	const struct bpf_prog_array *array;
+ 	struct bpf_run_ctx *old_run_ctx;
+ 	struct bpf_trace_run_ctx run_ctx;
+ 	u32 ret = 1;
+ 
+ 	might_fault();
++	RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
++
++	if (unlikely(!array))
++		return ret;
+ 
+-	rcu_read_lock_trace();
+ 	migrate_disable();
+ 
+ 	run_ctx.is_uprobe = true;
+ 
+-	array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
+-	if (unlikely(!array))
+-		goto out;
+ 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ 	item = &array->items[0];
+ 	while ((prog = READ_ONCE(item->prog))) {
+@@ -2191,9 +2190,7 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
+ 			rcu_read_unlock();
+ 	}
+ 	bpf_reset_run_ctx(old_run_ctx);
+-out:
+ 	migrate_enable();
+-	rcu_read_unlock_trace();
+ 	return ret;
+ }
+ 
+@@ -3471,10 +3468,4 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
+ 	return prog->aux->func_idx != 0;
+ }
+ 
+-static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog)
+-{
+-	return prog->type == BPF_PROG_TYPE_TRACING &&
+-	       prog->expected_attach_type == BPF_TRACE_RAW_TP;
+-}
+-
+ #endif /* _LINUX_BPF_H */
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 4d4e23b6e3e761..2d962dade9faee 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -216,28 +216,43 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ 
+ #endif /* __KERNEL__ */
+ 
++/**
++ * offset_to_ptr - convert a relative memory offset to an absolute pointer
++ * @off:	the address of the 32-bit offset value
++ */
++static inline void *offset_to_ptr(const int *off)
++{
++	return (void *)((unsigned long)off + *off);
++}
++
++#endif /* __ASSEMBLY__ */
++
++#ifdef CONFIG_64BIT
++#define ARCH_SEL(a,b) a
++#else
++#define ARCH_SEL(a,b) b
++#endif
++
+ /*
+  * Force the compiler to emit 'sym' as a symbol, so that we can reference
+  * it from inline assembler. Necessary in case 'sym' could be inlined
+  * otherwise, or eliminated entirely due to lack of references that are
+  * visible to the compiler.
+  */
+-#define ___ADDRESSABLE(sym, __attrs) \
+-	static void * __used __attrs \
++#define ___ADDRESSABLE(sym, __attrs)						\
++	static void * __used __attrs						\
+ 	__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym;
++
+ #define __ADDRESSABLE(sym) \
+ 	___ADDRESSABLE(sym, __section(".discard.addressable"))
+ 
+-/**
+- * offset_to_ptr - convert a relative memory offset to an absolute pointer
+- * @off:	the address of the 32-bit offset value
+- */
+-static inline void *offset_to_ptr(const int *off)
+-{
+-	return (void *)((unsigned long)off + *off);
+-}
++#define __ADDRESSABLE_ASM(sym)						\
++	.pushsection .discard.addressable,"aw";				\
++	.align ARCH_SEL(8,4);						\
++	ARCH_SEL(.quad, .long) __stringify(sym);			\
++	.popsection;
+ 
+-#endif /* __ASSEMBLY__ */
++#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
+ 
+ /* &a[0] degrades to a pointer: a different type from an array */
+ #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
+index 6fbfbde68a37c3..620a3260fc0802 100644
+--- a/include/linux/dsa/ocelot.h
++++ b/include/linux/dsa/ocelot.h
+@@ -15,6 +15,7 @@
+ struct ocelot_skb_cb {
+ 	struct sk_buff *clone;
+ 	unsigned int ptp_class; /* valid only for clones */
++	unsigned long ptp_tx_time; /* valid only for clones */
+ 	u32 tstamp_lo;
+ 	u8 ptp_cmd;
+ 	u8 ts_id;
+diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
+index 66e7d26b70a4fe..11be70a7929f28 100644
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -253,4 +253,11 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+ 				 NETIF_F_GSO_UDP_TUNNEL |		\
+ 				 NETIF_F_GSO_UDP_TUNNEL_CSUM)
+ 
++static inline netdev_features_t netdev_base_features(netdev_features_t features)
++{
++	features &= ~NETIF_F_ONE_FOR_ALL;
++	features |= NETIF_F_ALL_FOR_ALL;
++	return features;
++}
++
+ #endif	/* _LINUX_NETDEV_FEATURES_H */
+diff --git a/include/linux/static_call.h b/include/linux/static_call.h
+index 141e6b176a1b30..78a77a4ae0ea87 100644
+--- a/include/linux/static_call.h
++++ b/include/linux/static_call.h
+@@ -160,6 +160,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
+ 
+ #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+ 
++extern int static_call_initialized;
++
+ extern int __init static_call_init(void);
+ 
+ extern void static_call_force_reinit(void);
+@@ -225,6 +227,8 @@ extern long __static_call_return0(void);
+ 
+ #elif defined(CONFIG_HAVE_STATIC_CALL)
+ 
++#define static_call_initialized 0
++
+ static inline int static_call_init(void) { return 0; }
+ 
+ #define DEFINE_STATIC_CALL(name, _func)					\
+@@ -281,6 +285,8 @@ extern long __static_call_return0(void);
+ 
+ #else /* Generic implementation */
+ 
++#define static_call_initialized 0
++
+ static inline int static_call_init(void) { return 0; }
+ 
+ static inline long __static_call_return0(void)
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 306137a15d0753..73c8922e69e095 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -100,7 +100,8 @@ dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq);
+ dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
+ 
+ int virtqueue_resize(struct virtqueue *vq, u32 num,
+-		     void (*recycle)(struct virtqueue *vq, void *buf));
++		     void (*recycle)(struct virtqueue *vq, void *buf),
++		     void (*recycle_done)(struct virtqueue *vq));
+ int virtqueue_reset(struct virtqueue *vq,
+ 		    void (*recycle)(struct virtqueue *vq, void *buf));
+ 
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index f66bc85c6411dd..435250c72d5684 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -123,6 +123,7 @@ struct bt_voice {
+ 
+ #define BT_VOICE_TRANSPARENT			0x0003
+ #define BT_VOICE_CVSD_16BIT			0x0060
++#define BT_VOICE_TRANSPARENT_16BIT		0x0063
+ 
+ #define BT_SNDMTU		12
+ #define BT_RCVMTU		13
+@@ -590,15 +591,6 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ 	return skb;
+ }
+ 
+-static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
+-				       sockptr_t src, size_t src_size)
+-{
+-	if (dst_size > src_size)
+-		return -EINVAL;
+-
+-	return copy_from_sockptr(dst, src, dst_size);
+-}
+-
+ int bt_to_errno(u16 code);
+ __u8 bt_status(int err);
+ 
+diff --git a/include/net/lapb.h b/include/net/lapb.h
+index 124ee122f2c8f8..6c07420644e45a 100644
+--- a/include/net/lapb.h
++++ b/include/net/lapb.h
+@@ -4,7 +4,7 @@
+ #include <linux/lapb.h>
+ #include <linux/refcount.h>
+ 
+-#define	LAPB_HEADER_LEN	20		/* LAPB over Ethernet + a bit more */
++#define	LAPB_HEADER_LEN MAX_HEADER		/* LAPB over Ethernet + a bit more */
+ 
+ #define	LAPB_ACK_PENDING_CONDITION	0x01
+ #define	LAPB_REJECT_CONDITION		0x02
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 333e0fae6796c8..5b712582f9a9ce 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -6770,14 +6770,12 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
+ /**
+  * ieee80211_channel_switch_disconnect - disconnect due to channel switch error
+  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+- * @block_tx: if %true, do not send deauth frame.
+  *
+  * Instruct mac80211 to disconnect due to a channel switch error. The channel
+  * switch can request to block the tx and so, we need to make sure we do not send
+  * a deauth frame in this case.
+  */
+-void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif,
+-					 bool block_tx);
++void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif);
+ 
+ /**
+  * ieee80211_request_smps - request SM PS transition
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index e67b483cc8bbb8..9398c8f4995368 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -80,6 +80,7 @@ struct net {
+ 						 * or to unregister pernet ops
+ 						 * (pernet_ops_rwsem write locked).
+ 						 */
++	struct llist_node	defer_free_list;
+ 	struct llist_node	cleanup_list;	/* namespaces on death row */
+ 
+ #ifdef CONFIG_KEYS
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 066a3ea33b12e9..91ae20cb76485b 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1103,7 +1103,6 @@ struct nft_rule_blob {
+  *	@name: name of the chain
+  *	@udlen: user data length
+  *	@udata: user data in the chain
+- *	@rcu_head: rcu head for deferred release
+  *	@blob_next: rule blob pointer to the next in the chain
+  */
+ struct nft_chain {
+@@ -1121,7 +1120,6 @@ struct nft_chain {
+ 	char				*name;
+ 	u16				udlen;
+ 	u8				*udata;
+-	struct rcu_head			rcu_head;
+ 
+ 	/* Only used during control plane commit phase: */
+ 	struct nft_rule_blob		*blob_next;
+@@ -1265,7 +1263,6 @@ static inline void nft_use_inc_restore(u32 *use)
+  *	@sets: sets in the table
+  *	@objects: stateful objects in the table
+  *	@flowtables: flow tables in the table
+- *	@net: netnamespace this table belongs to
+  *	@hgenerator: handle generator state
+  *	@handle: table handle
+  *	@use: number of chain references to this table
+@@ -1285,7 +1282,6 @@ struct nft_table {
+ 	struct list_head		sets;
+ 	struct list_head		objects;
+ 	struct list_head		flowtables;
+-	possible_net_t			net;
+ 	u64				hgenerator;
+ 	u64				handle;
+ 	u32				use;
+diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
+index 462c653e101746..2db9ae0575b609 100644
+--- a/include/soc/mscc/ocelot.h
++++ b/include/soc/mscc/ocelot.h
+@@ -778,7 +778,6 @@ struct ocelot_port {
+ 
+ 	phy_interface_t			phy_mode;
+ 
+-	unsigned int			ptp_skbs_in_flight;
+ 	struct sk_buff_head		tx_skbs;
+ 
+ 	unsigned int			trap_proto;
+@@ -786,7 +785,6 @@ struct ocelot_port {
+ 	u16				mrp_ring_id;
+ 
+ 	u8				ptp_cmd;
+-	u8				ts_id;
+ 
+ 	u8				index;
+ 
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 346826e3c933da..41d20b7199c4af 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -6415,6 +6415,101 @@ int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
+ 	return off;
+ }
+ 
++struct bpf_raw_tp_null_args {
++	const char *func;
++	u64 mask;
++};
++
++static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
++	/* sched */
++	{ "sched_pi_setprio", 0x10 },
++	/* ... from sched_numa_pair_template event class */
++	{ "sched_stick_numa", 0x100 },
++	{ "sched_swap_numa", 0x100 },
++	/* afs */
++	{ "afs_make_fs_call", 0x10 },
++	{ "afs_make_fs_calli", 0x10 },
++	{ "afs_make_fs_call1", 0x10 },
++	{ "afs_make_fs_call2", 0x10 },
++	{ "afs_protocol_error", 0x1 },
++	{ "afs_flock_ev", 0x10 },
++	/* cachefiles */
++	{ "cachefiles_lookup", 0x1 | 0x200 },
++	{ "cachefiles_unlink", 0x1 },
++	{ "cachefiles_rename", 0x1 },
++	{ "cachefiles_prep_read", 0x1 },
++	{ "cachefiles_mark_active", 0x1 },
++	{ "cachefiles_mark_failed", 0x1 },
++	{ "cachefiles_mark_inactive", 0x1 },
++	{ "cachefiles_vfs_error", 0x1 },
++	{ "cachefiles_io_error", 0x1 },
++	{ "cachefiles_ondemand_open", 0x1 },
++	{ "cachefiles_ondemand_copen", 0x1 },
++	{ "cachefiles_ondemand_close", 0x1 },
++	{ "cachefiles_ondemand_read", 0x1 },
++	{ "cachefiles_ondemand_cread", 0x1 },
++	{ "cachefiles_ondemand_fd_write", 0x1 },
++	{ "cachefiles_ondemand_fd_release", 0x1 },
++	/* ext4, from ext4__mballoc event class */
++	{ "ext4_mballoc_discard", 0x10 },
++	{ "ext4_mballoc_free", 0x10 },
++	/* fib */
++	{ "fib_table_lookup", 0x100 },
++	/* filelock */
++	/* ... from filelock_lock event class */
++	{ "posix_lock_inode", 0x10 },
++	{ "fcntl_setlk", 0x10 },
++	{ "locks_remove_posix", 0x10 },
++	{ "flock_lock_inode", 0x10 },
++	/* ... from filelock_lease event class */
++	{ "break_lease_noblock", 0x10 },
++	{ "break_lease_block", 0x10 },
++	{ "break_lease_unblock", 0x10 },
++	{ "generic_delete_lease", 0x10 },
++	{ "time_out_leases", 0x10 },
++	/* host1x */
++	{ "host1x_cdma_push_gather", 0x10000 },
++	/* huge_memory */
++	{ "mm_khugepaged_scan_pmd", 0x10 },
++	{ "mm_collapse_huge_page_isolate", 0x1 },
++	{ "mm_khugepaged_scan_file", 0x10 },
++	{ "mm_khugepaged_collapse_file", 0x10 },
++	/* kmem */
++	{ "mm_page_alloc", 0x1 },
++	{ "mm_page_pcpu_drain", 0x1 },
++	/* .. from mm_page event class */
++	{ "mm_page_alloc_zone_locked", 0x1 },
++	/* netfs */
++	{ "netfs_failure", 0x10 },
++	/* power */
++	{ "device_pm_callback_start", 0x10 },
++	/* qdisc */
++	{ "qdisc_dequeue", 0x1000 },
++	/* rxrpc */
++	{ "rxrpc_recvdata", 0x1 },
++	{ "rxrpc_resend", 0x10 },
++	/* sunrpc */
++	{ "xs_stream_read_data", 0x1 },
++	/* ... from xprt_cong_event event class */
++	{ "xprt_reserve_cong", 0x10 },
++	{ "xprt_release_cong", 0x10 },
++	{ "xprt_get_cong", 0x10 },
++	{ "xprt_put_cong", 0x10 },
++	/* tcp */
++	{ "tcp_send_reset", 0x11 },
++	/* tegra_apb_dma */
++	{ "tegra_dma_tx_status", 0x100 },
++	/* timer_migration */
++	{ "tmigr_update_events", 0x1 },
++	/* writeback, from writeback_folio_template event class */
++	{ "writeback_dirty_folio", 0x10 },
++	{ "folio_wait_writeback", 0x10 },
++	/* rdma */
++	{ "mr_integ_alloc", 0x2000 },
++	/* bpf_testmod */
++	{ "bpf_testmod_test_read", 0x0 },
++};
++
+ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ 		    const struct bpf_prog *prog,
+ 		    struct bpf_insn_access_aux *info)
+@@ -6425,6 +6520,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ 	const char *tname = prog->aux->attach_func_name;
+ 	struct bpf_verifier_log *log = info->log;
+ 	const struct btf_param *args;
++	bool ptr_err_raw_tp = false;
+ 	const char *tag_value;
+ 	u32 nr_args, arg;
+ 	int i, ret;
+@@ -6519,6 +6615,12 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ 		return false;
+ 	}
+ 
++	if (size != sizeof(u64)) {
++		bpf_log(log, "func '%s' size %d must be 8\n",
++			tname, size);
++		return false;
++	}
++
+ 	/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
+ 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
+ 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
+@@ -6564,12 +6666,42 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ 	if (prog_args_trusted(prog))
+ 		info->reg_type |= PTR_TRUSTED;
+ 
+-	/* Raw tracepoint arguments always get marked as maybe NULL */
+-	if (bpf_prog_is_raw_tp(prog))
+-		info->reg_type |= PTR_MAYBE_NULL;
+-	else if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
++	if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
+ 		info->reg_type |= PTR_MAYBE_NULL;
+ 
++	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
++		struct btf *btf = prog->aux->attach_btf;
++		const struct btf_type *t;
++		const char *tname;
++
++		/* BTF lookups cannot fail, return false on error */
++		t = btf_type_by_id(btf, prog->aux->attach_btf_id);
++		if (!t)
++			return false;
++		tname = btf_name_by_offset(btf, t->name_off);
++		if (!tname)
++			return false;
++		/* Checked by bpf_check_attach_target */
++		tname += sizeof("btf_trace_") - 1;
++		for (i = 0; i < ARRAY_SIZE(raw_tp_null_args); i++) {
++			/* Is this a func with potential NULL args? */
++			if (strcmp(tname, raw_tp_null_args[i].func))
++				continue;
++			if (raw_tp_null_args[i].mask & (0x1 << (arg * 4)))
++				info->reg_type |= PTR_MAYBE_NULL;
++			/* Is the current arg IS_ERR? */
++			if (raw_tp_null_args[i].mask & (0x2 << (arg * 4)))
++				ptr_err_raw_tp = true;
++			break;
++		}
++		/* If we don't know NULL-ness specification and the tracepoint
++		 * is coming from a loadable module, be conservative and mark
++		 * argument as PTR_MAYBE_NULL.
++		 */
++		if (i == ARRAY_SIZE(raw_tp_null_args) && btf_is_module(btf))
++			info->reg_type |= PTR_MAYBE_NULL;
++	}
++
+ 	if (tgt_prog) {
+ 		enum bpf_prog_type tgt_type;
+ 
+@@ -6614,6 +6746,15 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ 	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
+ 		tname, arg, info->btf_id, btf_type_str(t),
+ 		__btf_name_by_offset(btf, t->name_off));
++
++	/* Perform all checks on the validity of type for this argument, but if
++	 * we know it can be IS_ERR at runtime, scrub pointer type and mark as
++	 * scalar.
++	 */
++	if (ptr_err_raw_tp) {
++		bpf_log(log, "marking pointer arg%d as scalar as it may encode error", arg);
++		info->reg_type = SCALAR_VALUE;
++	}
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(btf_ctx_access);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index b2008076df9c26..4c486a0bfcc4d8 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -418,25 +418,6 @@ static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
+ 	return rec;
+ }
+ 
+-static bool mask_raw_tp_reg_cond(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) {
+-	return reg->type == (PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL) &&
+-	       bpf_prog_is_raw_tp(env->prog) && !reg->ref_obj_id;
+-}
+-
+-static bool mask_raw_tp_reg(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+-{
+-	if (!mask_raw_tp_reg_cond(env, reg))
+-		return false;
+-	reg->type &= ~PTR_MAYBE_NULL;
+-	return true;
+-}
+-
+-static void unmask_raw_tp_reg(struct bpf_reg_state *reg, bool result)
+-{
+-	if (result)
+-		reg->type |= PTR_MAYBE_NULL;
+-}
+-
+ static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
+ {
+ 	struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
+@@ -6618,7 +6599,6 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 	const char *field_name = NULL;
+ 	enum bpf_type_flag flag = 0;
+ 	u32 btf_id = 0;
+-	bool mask;
+ 	int ret;
+ 
+ 	if (!env->allow_ptr_leaks) {
+@@ -6690,21 +6670,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 
+ 	if (ret < 0)
+ 		return ret;
+-	/* For raw_tp progs, we allow dereference of PTR_MAYBE_NULL
+-	 * trusted PTR_TO_BTF_ID, these are the ones that are possibly
+-	 * arguments to the raw_tp. Since internal checks in for trusted
+-	 * reg in check_ptr_to_btf_access would consider PTR_MAYBE_NULL
+-	 * modifier as problematic, mask it out temporarily for the
+-	 * check. Don't apply this to pointers with ref_obj_id > 0, as
+-	 * those won't be raw_tp args.
+-	 *
+-	 * We may end up applying this relaxation to other trusted
+-	 * PTR_TO_BTF_ID with maybe null flag, since we cannot
+-	 * distinguish PTR_MAYBE_NULL tagged for arguments vs normal
+-	 * tagging, but that should expand allowed behavior, and not
+-	 * cause regression for existing behavior.
+-	 */
+-	mask = mask_raw_tp_reg(env, reg);
++
+ 	if (ret != PTR_TO_BTF_ID) {
+ 		/* just mark; */
+ 
+@@ -6765,13 +6731,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 		clear_trusted_flags(&flag);
+ 	}
+ 
+-	if (atype == BPF_READ && value_regno >= 0) {
++	if (atype == BPF_READ && value_regno >= 0)
+ 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
+-		/* We've assigned a new type to regno, so don't undo masking. */
+-		if (regno == value_regno)
+-			mask = false;
+-	}
+-	unmask_raw_tp_reg(reg, mask);
+ 
+ 	return 0;
+ }
+@@ -7146,7 +7107,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ 		if (!err && t == BPF_READ && value_regno >= 0)
+ 			mark_reg_unknown(env, regs, value_regno);
+ 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
+-		   (mask_raw_tp_reg_cond(env, reg) || !type_may_be_null(reg->type))) {
++		   !type_may_be_null(reg->type)) {
+ 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
+ 					      value_regno);
+ 	} else if (reg->type == CONST_PTR_TO_MAP) {
+@@ -8844,7 +8805,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ 	enum bpf_reg_type type = reg->type;
+ 	u32 *arg_btf_id = NULL;
+ 	int err = 0;
+-	bool mask;
+ 
+ 	if (arg_type == ARG_DONTCARE)
+ 		return 0;
+@@ -8885,11 +8845,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ 	    base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
+ 		arg_btf_id = fn->arg_btf_id[arg];
+ 
+-	mask = mask_raw_tp_reg(env, reg);
+ 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
++	if (err)
++		return err;
+ 
+-	err = err ?: check_func_arg_reg_off(env, reg, regno, arg_type);
+-	unmask_raw_tp_reg(reg, mask);
++	err = check_func_arg_reg_off(env, reg, regno, arg_type);
+ 	if (err)
+ 		return err;
+ 
+@@ -9684,17 +9644,14 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
+ 				return ret;
+ 		} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
+ 			struct bpf_call_arg_meta meta;
+-			bool mask;
+ 			int err;
+ 
+ 			if (register_is_null(reg) && type_may_be_null(arg->arg_type))
+ 				continue;
+ 
+ 			memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
+-			mask = mask_raw_tp_reg(env, reg);
+ 			err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta);
+ 			err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type);
+-			unmask_raw_tp_reg(reg, mask);
+ 			if (err)
+ 				return err;
+ 		} else {
+@@ -12009,7 +11966,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 		enum bpf_arg_type arg_type = ARG_DONTCARE;
+ 		u32 regno = i + 1, ref_id, type_size;
+ 		bool is_ret_buf_sz = false;
+-		bool mask = false;
+ 		int kf_arg_type;
+ 
+ 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
+@@ -12068,15 +12024,12 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			return -EINVAL;
+ 		}
+ 
+-		mask = mask_raw_tp_reg(env, reg);
+ 		if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) &&
+ 		    (register_is_null(reg) || type_may_be_null(reg->type)) &&
+ 			!is_kfunc_arg_nullable(meta->btf, &args[i])) {
+ 			verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
+-			unmask_raw_tp_reg(reg, mask);
+ 			return -EACCES;
+ 		}
+-		unmask_raw_tp_reg(reg, mask);
+ 
+ 		if (reg->ref_obj_id) {
+ 			if (is_kfunc_release(meta) && meta->ref_obj_id) {
+@@ -12134,24 +12087,16 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
+ 				break;
+ 
+-			/* Allow passing maybe NULL raw_tp arguments to
+-			 * kfuncs for compatibility. Don't apply this to
+-			 * arguments with ref_obj_id > 0.
+-			 */
+-			mask = mask_raw_tp_reg(env, reg);
+ 			if (!is_trusted_reg(reg)) {
+ 				if (!is_kfunc_rcu(meta)) {
+ 					verbose(env, "R%d must be referenced or trusted\n", regno);
+-					unmask_raw_tp_reg(reg, mask);
+ 					return -EINVAL;
+ 				}
+ 				if (!is_rcu_reg(reg)) {
+ 					verbose(env, "R%d must be a rcu pointer\n", regno);
+-					unmask_raw_tp_reg(reg, mask);
+ 					return -EINVAL;
+ 				}
+ 			}
+-			unmask_raw_tp_reg(reg, mask);
+ 			fallthrough;
+ 		case KF_ARG_PTR_TO_CTX:
+ 		case KF_ARG_PTR_TO_DYNPTR:
+@@ -12174,9 +12119,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 
+ 		if (is_kfunc_release(meta) && reg->ref_obj_id)
+ 			arg_type |= OBJ_RELEASE;
+-		mask = mask_raw_tp_reg(env, reg);
+ 		ret = check_func_arg_reg_off(env, reg, regno, arg_type);
+-		unmask_raw_tp_reg(reg, mask);
+ 		if (ret < 0)
+ 			return ret;
+ 
+@@ -12353,7 +12296,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			ref_tname = btf_name_by_offset(btf, ref_t->name_off);
+ 			fallthrough;
+ 		case KF_ARG_PTR_TO_BTF_ID:
+-			mask = mask_raw_tp_reg(env, reg);
+ 			/* Only base_type is checked, further checks are done here */
+ 			if ((base_type(reg->type) != PTR_TO_BTF_ID ||
+ 			     (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
+@@ -12362,11 +12304,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 				verbose(env, "expected %s or socket\n",
+ 					reg_type_str(env, base_type(reg->type) |
+ 							  (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
+-				unmask_raw_tp_reg(reg, mask);
+ 				return -EINVAL;
+ 			}
+ 			ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
+-			unmask_raw_tp_reg(reg, mask);
+ 			if (ret < 0)
+ 				return ret;
+ 			break;
+@@ -13336,7 +13276,7 @@ static int sanitize_check_bounds(struct bpf_verifier_env *env,
+  */
+ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 				   struct bpf_insn *insn,
+-				   struct bpf_reg_state *ptr_reg,
++				   const struct bpf_reg_state *ptr_reg,
+ 				   const struct bpf_reg_state *off_reg)
+ {
+ 	struct bpf_verifier_state *vstate = env->cur_state;
+@@ -13350,7 +13290,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	struct bpf_sanitize_info info = {};
+ 	u8 opcode = BPF_OP(insn->code);
+ 	u32 dst = insn->dst_reg;
+-	bool mask;
+ 	int ret;
+ 
+ 	dst_reg = &regs[dst];
+@@ -13377,14 +13316,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 		return -EACCES;
+ 	}
+ 
+-	mask = mask_raw_tp_reg(env, ptr_reg);
+ 	if (ptr_reg->type & PTR_MAYBE_NULL) {
+ 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
+ 			dst, reg_type_str(env, ptr_reg->type));
+-		unmask_raw_tp_reg(ptr_reg, mask);
+ 		return -EACCES;
+ 	}
+-	unmask_raw_tp_reg(ptr_reg, mask);
+ 
+ 	switch (base_type(ptr_reg->type)) {
+ 	case PTR_TO_CTX:
+@@ -19934,7 +19870,6 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
+ 		 * for this case.
+ 		 */
+ 		case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
+-		case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL:
+ 			if (type == BPF_READ) {
+ 				if (BPF_MODE(insn->code) == BPF_MEM)
+ 					insn->code = BPF_LDX | BPF_PROBE_MEM |
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 40a1ad4493b4d9..fc6f41ac33eb13 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -781,7 +781,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
+ 	 * If it is a deferred reservation, and the server
+ 	 * is not handling an starvation case, defer it.
+ 	 */
+-	if (dl_se->dl_defer & !dl_se->dl_defer_running) {
++	if (dl_se->dl_defer && !dl_se->dl_defer_running) {
+ 		dl_se->dl_throttled = 1;
+ 		dl_se->dl_defer_armed = 1;
+ 	}
+diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
+index 5259cda486d058..bb7d066a7c3979 100644
+--- a/kernel/static_call_inline.c
++++ b/kernel/static_call_inline.c
+@@ -15,7 +15,7 @@ extern struct static_call_site __start_static_call_sites[],
+ extern struct static_call_tramp_key __start_static_call_tramp_key[],
+ 				    __stop_static_call_tramp_key[];
+ 
+-static int static_call_initialized;
++int static_call_initialized;
+ 
+ /*
+  * Must be called before early_initcall() to be effective.
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 792dc35414a3c3..50881898e758d8 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2215,6 +2215,9 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
+ 		goto unlock;
+ 
+ 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
++	if (!old_array)
++		goto put;
++
+ 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
+ 	if (ret < 0) {
+ 		bpf_prog_array_delete_safe(old_array, event->prog);
+@@ -2223,6 +2226,14 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
+ 		bpf_prog_array_free_sleepable(old_array);
+ 	}
+ 
++put:
++	/*
++	 * It could be that the bpf_prog is not sleepable (and will be freed
++	 * via normal RCU), but is called from a point that supports sleepable
++	 * programs and uses tasks-trace-RCU.
++	 */
++	synchronize_rcu_tasks_trace();
++
+ 	bpf_prog_put(event->prog);
+ 	event->prog = NULL;
+ 
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index b30fc8fcd0956a..b085a8a164ea03 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1400,9 +1400,13 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
+ 
+ #ifdef CONFIG_BPF_EVENTS
+ 	if (bpf_prog_array_valid(call)) {
++		const struct bpf_prog_array *array;
+ 		u32 ret;
+ 
+-		ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run);
++		rcu_read_lock_trace();
++		array = rcu_dereference_check(call->prog_array, rcu_read_lock_trace_held());
++		ret = bpf_prog_run_array_uprobe(array, regs, bpf_prog_run);
++		rcu_read_unlock_trace();
+ 		if (!ret)
+ 			return;
+ 	}
+diff --git a/mm/slub.c b/mm/slub.c
+index 15ba89fef89a1f..b9447a955f6112 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2199,9 +2199,24 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
+ 
+ 	folio = virt_to_folio(p);
+ 	if (!folio_test_slab(folio)) {
+-		return folio_memcg_kmem(folio) ||
+-			(__memcg_kmem_charge_page(folio_page(folio, 0), flags,
+-						  folio_order(folio)) == 0);
++		int size;
++
++		if (folio_memcg_kmem(folio))
++			return true;
++
++		if (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
++					     folio_order(folio)))
++			return false;
++
++		/*
++		 * This folio has already been accounted in the global stats but
++		 * not in the memcg stats. So, subtract from the global and use
++		 * the interface which adds to both global and memcg stats.
++		 */
++		size = folio_size(folio);
++		node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size);
++		lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size);
++		return true;
+ 	}
+ 
+ 	slab = folio_slab(folio);
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 2243cec18ecc86..53dea8ae96e477 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -990,16 +990,25 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ 	int tt_diff_len, tt_change_len = 0;
+ 	int tt_diff_entries_num = 0;
+ 	int tt_diff_entries_count = 0;
++	bool drop_changes = false;
++	size_t tt_extra_len = 0;
+ 	u16 tvlv_len;
+ 
+ 	tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+ 	tt_diff_len = batadv_tt_len(tt_diff_entries_num);
+ 
+ 	/* if we have too many changes for one packet don't send any
+-	 * and wait for the tt table request which will be fragmented
++	 * and wait for the tt table request so we can reply with the full
++	 * (fragmented) table.
++	 *
++	 * The local change history should still be cleaned up so the next
++	 * TT round can start again with a clean state.
+ 	 */
+-	if (tt_diff_len > bat_priv->soft_iface->mtu)
++	if (tt_diff_len > bat_priv->soft_iface->mtu) {
+ 		tt_diff_len = 0;
++		tt_diff_entries_num = 0;
++		drop_changes = true;
++	}
+ 
+ 	tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
+ 						     &tt_change, &tt_diff_len);
+@@ -1008,7 +1017,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ 
+ 	tt_data->flags = BATADV_TT_OGM_DIFF;
+ 
+-	if (tt_diff_len == 0)
++	if (!drop_changes && tt_diff_len == 0)
+ 		goto container_register;
+ 
+ 	spin_lock_bh(&bat_priv->tt.changes_list_lock);
+@@ -1027,6 +1036,9 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ 	}
+ 	spin_unlock_bh(&bat_priv->tt.changes_list_lock);
+ 
++	tt_extra_len = batadv_tt_len(tt_diff_entries_num -
++				     tt_diff_entries_count);
++
+ 	/* Keep the buffer for possible tt_request */
+ 	spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+ 	kfree(bat_priv->tt.last_changeset);
+@@ -1035,6 +1047,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ 	tt_change_len = batadv_tt_len(tt_diff_entries_count);
+ 	/* check whether this new OGM has no changes due to size problems */
+ 	if (tt_diff_entries_count > 0) {
++		tt_diff_len -= tt_extra_len;
+ 		/* if kmalloc() fails we will reply with the full table
+ 		 * instead of providing the diff
+ 		 */
+@@ -1047,6 +1060,8 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ 	}
+ 	spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+ 
++	/* Remove extra packet space for OGM */
++	tvlv_len -= tt_extra_len;
+ container_register:
+ 	batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
+ 				       tvlv_len);
+@@ -2747,14 +2762,16 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
+  *
+  * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
+  * is not provided then this becomes a no-op.
++ *
++ * Return: Remaining unused length in tvlv_buff.
+  */
+-static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+-				    struct batadv_hashtable *hash,
+-				    void *tvlv_buff, u16 tt_len,
+-				    bool (*valid_cb)(const void *,
+-						     const void *,
+-						     u8 *flags),
+-				    void *cb_data)
++static u16 batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
++				   struct batadv_hashtable *hash,
++				   void *tvlv_buff, u16 tt_len,
++				   bool (*valid_cb)(const void *,
++						    const void *,
++						    u8 *flags),
++				   void *cb_data)
+ {
+ 	struct batadv_tt_common_entry *tt_common_entry;
+ 	struct batadv_tvlv_tt_change *tt_change;
+@@ -2768,7 +2785,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ 	tt_change = tvlv_buff;
+ 
+ 	if (!valid_cb)
+-		return;
++		return tt_len;
+ 
+ 	rcu_read_lock();
+ 	for (i = 0; i < hash->size; i++) {
+@@ -2794,6 +2811,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ 		}
+ 	}
+ 	rcu_read_unlock();
++
++	return batadv_tt_len(tt_tot - tt_num_entries);
+ }
+ 
+ /**
+@@ -3069,10 +3088,11 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+ 			goto out;
+ 
+ 		/* fill the rest of the tvlv with the real TT entries */
+-		batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
+-					tt_change, tt_len,
+-					batadv_tt_global_valid,
+-					req_dst_orig_node);
++		tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
++						    bat_priv->tt.global_hash,
++						    tt_change, tt_len,
++						    batadv_tt_global_valid,
++						    req_dst_orig_node);
+ 	}
+ 
+ 	/* Don't send the response, if larger than fragmented packet. */
+@@ -3196,9 +3216,11 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ 			goto out;
+ 
+ 		/* fill the rest of the tvlv with the real TT entries */
+-		batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
+-					tt_change, tt_len,
+-					batadv_tt_local_valid, NULL);
++		tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
++						    bat_priv->tt.local_hash,
++						    tt_change, tt_len,
++						    batadv_tt_local_valid,
++						    NULL);
+ 	}
+ 
+ 	tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 2b5ba8acd1d84a..388d46c6a043d4 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6872,38 +6872,27 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
+ 		return;
+ 
+ 	hci_dev_lock(hdev);
+-	rcu_read_lock();
+ 
+ 	/* Connect all BISes that are bound to the BIG */
+-	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+-		if (bacmp(&conn->dst, BDADDR_ANY) ||
+-		    conn->type != ISO_LINK ||
+-		    conn->iso_qos.bcast.big != ev->handle)
++	while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
++						      BT_BOUND))) {
++		if (ev->status) {
++			hci_connect_cfm(conn, ev->status);
++			hci_conn_del(conn);
+ 			continue;
++		}
+ 
+ 		if (hci_conn_set_handle(conn,
+ 					__le16_to_cpu(ev->bis_handle[i++])))
+ 			continue;
+ 
+-		if (!ev->status) {
+-			conn->state = BT_CONNECTED;
+-			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
+-			rcu_read_unlock();
+-			hci_debugfs_create_conn(conn);
+-			hci_conn_add_sysfs(conn);
+-			hci_iso_setup_path(conn);
+-			rcu_read_lock();
+-			continue;
+-		}
+-
+-		hci_connect_cfm(conn, ev->status);
+-		rcu_read_unlock();
+-		hci_conn_del(conn);
+-		rcu_read_lock();
++		conn->state = BT_CONNECTED;
++		set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
++		hci_debugfs_create_conn(conn);
++		hci_conn_add_sysfs(conn);
++		hci_iso_setup_path(conn);
+ 	}
+ 
+-	rcu_read_unlock();
+-
+ 	if (!ev->status && !i)
+ 		/* If no BISes have been connected for the BIG,
+ 		 * terminate. This is in case all bound connections
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 2272e1849ebd89..022b86797acdc5 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -1926,7 +1926,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ }
+ 
+ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+-				   sockptr_t optval, unsigned int len)
++				   sockptr_t optval, unsigned int optlen)
+ {
+ 	struct hci_ufilter uf = { .opcode = 0 };
+ 	struct sock *sk = sock->sk;
+@@ -1943,7 +1943,7 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ 
+ 	switch (optname) {
+ 	case HCI_DATA_DIR:
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1954,7 +1954,7 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ 		break;
+ 
+ 	case HCI_TIME_STAMP:
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1974,7 +1974,7 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&uf, sizeof(uf), optval, len);
++		err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -2005,7 +2005,7 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ }
+ 
+ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+-			       sockptr_t optval, unsigned int len)
++			       sockptr_t optval, unsigned int optlen)
+ {
+ 	struct sock *sk = sock->sk;
+ 	int err = 0;
+@@ -2015,7 +2015,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 	if (level == SOL_HCI)
+ 		return hci_sock_setsockopt_old(sock, level, optname, optval,
+-					       len);
++					       optlen);
+ 
+ 	if (level != SOL_BLUETOOTH)
+ 		return -ENOPROTOOPT;
+@@ -2035,7 +2035,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			goto done;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 5e2d9758bd3c1c..644b606743e212 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1129,6 +1129,7 @@ static int iso_listen_bis(struct sock *sk)
+ 		return -EHOSTUNREACH;
+ 
+ 	hci_dev_lock(hdev);
++	lock_sock(sk);
+ 
+ 	/* Fail if user set invalid QoS */
+ 	if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) {
+@@ -1158,10 +1159,10 @@ static int iso_listen_bis(struct sock *sk)
+ 		goto unlock;
+ 	}
+ 
+-	hci_dev_put(hdev);
+-
+ unlock:
++	release_sock(sk);
+ 	hci_dev_unlock(hdev);
++	hci_dev_put(hdev);
+ 	return err;
+ }
+ 
+@@ -1188,6 +1189,7 @@ static int iso_sock_listen(struct socket *sock, int backlog)
+ 
+ 	BT_DBG("sk %p backlog %d", sk, backlog);
+ 
++	sock_hold(sk);
+ 	lock_sock(sk);
+ 
+ 	if (sk->sk_state != BT_BOUND) {
+@@ -1200,10 +1202,16 @@ static int iso_sock_listen(struct socket *sock, int backlog)
+ 		goto done;
+ 	}
+ 
+-	if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY))
++	if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) {
+ 		err = iso_listen_cis(sk);
+-	else
++	} else {
++		/* Drop sock lock to avoid potential
++		 * deadlock with the hdev lock.
++		 */
++		release_sock(sk);
+ 		err = iso_listen_bis(sk);
++		lock_sock(sk);
++	}
+ 
+ 	if (err)
+ 		goto done;
+@@ -1215,6 +1223,7 @@ static int iso_sock_listen(struct socket *sock, int backlog)
+ 
+ done:
+ 	release_sock(sk);
++	sock_put(sk);
+ 	return err;
+ }
+ 
+@@ -1226,7 +1235,11 @@ static int iso_sock_accept(struct socket *sock, struct socket *newsock,
+ 	long timeo;
+ 	int err = 0;
+ 
+-	lock_sock(sk);
++	/* Use explicit nested locking to avoid lockdep warnings generated
++	 * because the parent socket and the child socket are locked on the
++	 * same thread.
++	 */
++	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ 
+ 	timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
+ 
+@@ -1257,7 +1270,7 @@ static int iso_sock_accept(struct socket *sock, struct socket *newsock,
+ 		release_sock(sk);
+ 
+ 		timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+-		lock_sock(sk);
++		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ 	}
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 
+@@ -1398,6 +1411,7 @@ static void iso_conn_big_sync(struct sock *sk)
+ 	 * change.
+ 	 */
+ 	hci_dev_lock(hdev);
++	lock_sock(sk);
+ 
+ 	if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
+ 		err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
+@@ -1410,6 +1424,7 @@ static void iso_conn_big_sync(struct sock *sk)
+ 				   err);
+ 	}
+ 
++	release_sock(sk);
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -1418,39 +1433,57 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct iso_pinfo *pi = iso_pi(sk);
++	bool early_ret = false;
++	int err = 0;
+ 
+ 	BT_DBG("sk %p", sk);
+ 
+ 	if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
++		sock_hold(sk);
+ 		lock_sock(sk);
++
+ 		switch (sk->sk_state) {
+ 		case BT_CONNECT2:
+ 			if (test_bit(BT_SK_PA_SYNC, &pi->flags)) {
++				release_sock(sk);
+ 				iso_conn_big_sync(sk);
++				lock_sock(sk);
++
+ 				sk->sk_state = BT_LISTEN;
+ 			} else {
+ 				iso_conn_defer_accept(pi->conn->hcon);
+ 				sk->sk_state = BT_CONFIG;
+ 			}
+-			release_sock(sk);
+-			return 0;
++
++			early_ret = true;
++			break;
+ 		case BT_CONNECTED:
+ 			if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
++				release_sock(sk);
+ 				iso_conn_big_sync(sk);
++				lock_sock(sk);
++
+ 				sk->sk_state = BT_LISTEN;
+-				release_sock(sk);
+-				return 0;
++				early_ret = true;
+ 			}
+ 
+-			release_sock(sk);
+ 			break;
+ 		case BT_CONNECT:
+ 			release_sock(sk);
+-			return iso_connect_cis(sk);
++			err = iso_connect_cis(sk);
++			lock_sock(sk);
++
++			early_ret = true;
++			break;
+ 		default:
+-			release_sock(sk);
+ 			break;
+ 		}
++
++		release_sock(sk);
++		sock_put(sk);
++
++		if (early_ret)
++			return err;
+ 	}
+ 
+ 	return bt_sock_recvmsg(sock, msg, len, flags);
+@@ -1566,7 +1599,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1577,7 +1610,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 
+ 	case BT_PKT_STATUS:
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1596,7 +1629,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&qos, sizeof(qos), optval, optlen);
++		err = copy_safe_from_sockptr(&qos, sizeof(qos), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1617,8 +1650,8 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(iso_pi(sk)->base, optlen, optval,
+-					   optlen);
++		err = copy_safe_from_sockptr(iso_pi(sk)->base, optlen, optval,
++					     optlen);
+ 		if (err)
+ 			break;
+ 
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 18e89e764f3b42..3d2553dcdb1b3c 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -755,7 +755,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ 		opts.max_tx   = chan->max_tx;
+ 		opts.txwin_size = chan->tx_win;
+ 
+-		err = bt_copy_from_sockptr(&opts, sizeof(opts), optval, optlen);
++		err = copy_safe_from_sockptr(&opts, sizeof(opts), optval,
++					     optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -800,7 +801,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ 		break;
+ 
+ 	case L2CAP_LM:
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -909,7 +910,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		sec.level = BT_SECURITY_LOW;
+ 
+-		err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
++		err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -956,7 +957,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -970,7 +971,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 
+ 	case BT_FLUSHABLE:
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1004,7 +1005,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+ 
+-		err = bt_copy_from_sockptr(&pwr, sizeof(pwr), optval, optlen);
++		err = copy_safe_from_sockptr(&pwr, sizeof(pwr), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1015,7 +1016,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 
+ 	case BT_CHANNEL_POLICY:
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1046,7 +1047,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&mtu, sizeof(mtu), optval, optlen);
++		err = copy_safe_from_sockptr(&mtu, sizeof(mtu), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -1076,7 +1077,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&mode, sizeof(mode), optval, optlen);
++		err = copy_safe_from_sockptr(&mode, sizeof(mode), optval,
++					     optlen);
+ 		if (err)
+ 			break;
+ 
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 40766f8119ed9c..913402806fa0d4 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -629,10 +629,9 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname,
+ 
+ 	switch (optname) {
+ 	case RFCOMM_LM:
+-		if (bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen)) {
+-			err = -EFAULT;
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opt & RFCOMM_LM_FIPS) {
+ 			err = -EINVAL;
+@@ -685,7 +684,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		sec.level = BT_SECURITY_LOW;
+ 
+-		err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
++		err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -703,7 +702,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 1c7252a3686694..b872a2ca3ff38b 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -267,10 +267,13 @@ static int sco_connect(struct sock *sk)
+ 	else
+ 		type = SCO_LINK;
+ 
+-	if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
+-	    (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
+-		err = -EOPNOTSUPP;
+-		goto unlock;
++	switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) {
++	case SCO_AIRMODE_TRANSP:
++		if (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)) {
++			err = -EOPNOTSUPP;
++			goto unlock;
++		}
++		break;
+ 	}
+ 
+ 	hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
+@@ -853,7 +856,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -872,18 +875,11 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		voice.setting = sco_pi(sk)->setting;
+ 
+-		err = bt_copy_from_sockptr(&voice, sizeof(voice), optval,
+-					   optlen);
++		err = copy_safe_from_sockptr(&voice, sizeof(voice), optval,
++					     optlen);
+ 		if (err)
+ 			break;
+ 
+-		/* Explicitly check for these values */
+-		if (voice.setting != BT_VOICE_TRANSPARENT &&
+-		    voice.setting != BT_VOICE_CVSD_16BIT) {
+-			err = -EINVAL;
+-			break;
+-		}
+-
+ 		sco_pi(sk)->setting = voice.setting;
+ 		hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src,
+ 				     BDADDR_BREDR);
+@@ -891,14 +887,19 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			err = -EBADFD;
+ 			break;
+ 		}
+-		if (enhanced_sync_conn_capable(hdev) &&
+-		    voice.setting == BT_VOICE_TRANSPARENT)
+-			sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT;
++
++		switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) {
++		case SCO_AIRMODE_TRANSP:
++			if (enhanced_sync_conn_capable(hdev))
++				sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT;
++			break;
++		}
++
+ 		hci_dev_put(hdev);
+ 		break;
+ 
+ 	case BT_PKT_STATUS:
+-		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 		if (err)
+ 			break;
+ 
+@@ -941,7 +942,8 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		err = bt_copy_from_sockptr(buffer, optlen, optval, optlen);
++		err = copy_struct_from_sockptr(buffer, sizeof(buffer), optval,
++					       optlen);
+ 		if (err) {
+ 			hci_dev_put(hdev);
+ 			break;
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index e39479f1c9a486..70fea7c1a4b0a4 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -443,6 +443,21 @@ static struct net *net_alloc(void)
+ 	goto out;
+ }
+ 
++static LLIST_HEAD(defer_free_list);
++
++static void net_complete_free(void)
++{
++	struct llist_node *kill_list;
++	struct net *net, *next;
++
++	/* Get the list of namespaces to free from last round. */
++	kill_list = llist_del_all(&defer_free_list);
++
++	llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
++		kmem_cache_free(net_cachep, net);
++
++}
++
+ static void net_free(struct net *net)
+ {
+ 	if (refcount_dec_and_test(&net->passive)) {
+@@ -451,7 +466,8 @@ static void net_free(struct net *net)
+ 		/* There should not be any trackers left there. */
+ 		ref_tracker_dir_exit(&net->notrefcnt_tracker);
+ 
+-		kmem_cache_free(net_cachep, net);
++		/* Wait for an extra rcu_barrier() before final free. */
++		llist_add(&net->defer_free_list, &defer_free_list);
+ 	}
+ }
+ 
+@@ -636,6 +652,8 @@ static void cleanup_net(struct work_struct *work)
+ 	 */
+ 	rcu_barrier();
+ 
++	net_complete_free();
++
+ 	/* Finally it is safe to free my network namespace structure */
+ 	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
+ 		list_del_init(&net->exit_list);
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 78347d7d25ef31..f1b9b3958792cd 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -159,6 +159,7 @@ static void sock_map_del_link(struct sock *sk,
+ 				verdict_stop = true;
+ 			list_del(&link->list);
+ 			sk_psock_free_link(link);
++			break;
+ 		}
+ 	}
+ 	spin_unlock_bh(&psock->link_lock);
+@@ -411,12 +412,11 @@ static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
+ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
+ 			     struct sock **psk)
+ {
+-	struct sock *sk;
++	struct sock *sk = NULL;
+ 	int err = 0;
+ 
+ 	spin_lock_bh(&stab->lock);
+-	sk = *psk;
+-	if (!sk_test || sk_test == sk)
++	if (!sk_test || sk_test == *psk)
+ 		sk = xchg(psk, NULL);
+ 
+ 	if (likely(sk))
+diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
+index 8e8b1bef6af69d..11ea8cfd62661c 100644
+--- a/net/dsa/tag_ocelot_8021q.c
++++ b/net/dsa/tag_ocelot_8021q.c
+@@ -79,7 +79,7 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
+ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
+ 				  struct net_device *netdev)
+ {
+-	int src_port, switch_id;
++	int src_port = -1, switch_id = -1;
+ 
+ 	dsa_8021q_rcv(skb, &src_port, &switch_id, NULL, NULL);
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 68804fd01dafc4..8efc58716ce969 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -883,8 +883,10 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
+ 		unsigned int size;
+ 
+ 		if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
+-			opts->options |= OPTION_MPTCP;
+-			remaining -= size;
++			if (remaining >= size) {
++				opts->options |= OPTION_MPTCP;
++				remaining -= size;
++			}
+ 		}
+ 	}
+ 
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 6dfc61a9acd4a5..1b1bf044378d48 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1061,13 +1061,13 @@ ieee80211_copy_mbssid_beacon(u8 *pos, struct cfg80211_mbssid_elems *dst,
+ {
+ 	int i, offset = 0;
+ 
++	dst->cnt = src->cnt;
+ 	for (i = 0; i < src->cnt; i++) {
+ 		memcpy(pos + offset, src->elem[i].data, src->elem[i].len);
+ 		dst->elem[i].len = src->elem[i].len;
+ 		dst->elem[i].data = pos + offset;
+ 		offset += dst->elem[i].len;
+ 	}
+-	dst->cnt = src->cnt;
+ 
+ 	return offset;
+ }
+@@ -1911,6 +1911,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ 						    params->eht_capa_len,
+ 						    link_sta);
+ 
++	ieee80211_sta_init_nss(link_sta);
++
+ 	if (params->opmode_notif_used) {
+ 		/* returned value is only needed for rc update, but the
+ 		 * rc isn't initialized here yet, so ignore it
+@@ -1920,8 +1922,6 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ 					      sband->band);
+ 	}
+ 
+-	ieee80211_sta_init_nss(link_sta);
+-
+ 	return 0;
+ }
+ 
+@@ -3674,13 +3674,12 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id)
+ }
+ EXPORT_SYMBOL(ieee80211_csa_finish);
+ 
+-void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_tx)
++void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif)
+ {
+ 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ 	struct ieee80211_local *local = sdata->local;
+ 
+-	sdata->csa_blocked_queues = block_tx;
+ 	sdata_info(sdata, "channel switch failed, disconnecting\n");
+ 	wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work);
+ }
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 3d3c9139ff5e45..7a0242e937d364 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1106,8 +1106,6 @@ struct ieee80211_sub_if_data {
+ 
+ 	unsigned long state;
+ 
+-	bool csa_blocked_queues;
+-
+ 	char name[IFNAMSIZ];
+ 
+ 	struct ieee80211_fragment_cache frags;
+@@ -2411,17 +2409,13 @@ void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+ 				   struct ieee80211_sub_if_data *sdata);
+ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
+ 			     struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
+-
++unsigned int
++ieee80211_get_vif_queues(struct ieee80211_local *local,
++			 struct ieee80211_sub_if_data *sdata);
+ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
+ 				     unsigned long queues,
+ 				     enum queue_stop_reason reason,
+ 				     bool refcounted);
+-void ieee80211_stop_vif_queues(struct ieee80211_local *local,
+-			       struct ieee80211_sub_if_data *sdata,
+-			       enum queue_stop_reason reason);
+-void ieee80211_wake_vif_queues(struct ieee80211_local *local,
+-			       struct ieee80211_sub_if_data *sdata,
+-			       enum queue_stop_reason reason);
+ void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
+ 				     unsigned long queues,
+ 				     enum queue_stop_reason reason,
+@@ -2432,6 +2426,43 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
+ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
+ 				    enum queue_stop_reason reason,
+ 				    bool refcounted);
++static inline void
++ieee80211_stop_vif_queues(struct ieee80211_local *local,
++			  struct ieee80211_sub_if_data *sdata,
++			  enum queue_stop_reason reason)
++{
++	ieee80211_stop_queues_by_reason(&local->hw,
++					ieee80211_get_vif_queues(local, sdata),
++					reason, true);
++}
++
++static inline void
++ieee80211_wake_vif_queues(struct ieee80211_local *local,
++			  struct ieee80211_sub_if_data *sdata,
++			  enum queue_stop_reason reason)
++{
++	ieee80211_wake_queues_by_reason(&local->hw,
++					ieee80211_get_vif_queues(local, sdata),
++					reason, true);
++}
++static inline void
++ieee80211_stop_vif_queues_norefcount(struct ieee80211_local *local,
++				     struct ieee80211_sub_if_data *sdata,
++				     enum queue_stop_reason reason)
++{
++	ieee80211_stop_queues_by_reason(&local->hw,
++					ieee80211_get_vif_queues(local, sdata),
++					reason, false);
++}
++static inline void
++ieee80211_wake_vif_queues_norefcount(struct ieee80211_local *local,
++				     struct ieee80211_sub_if_data *sdata,
++				     enum queue_stop_reason reason)
++{
++	ieee80211_wake_queues_by_reason(&local->hw,
++					ieee80211_get_vif_queues(local, sdata),
++					reason, false);
++}
+ void ieee80211_add_pending_skb(struct ieee80211_local *local,
+ 			       struct sk_buff *skb);
+ void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 6ef0990d3d296a..af9055252e6dfa 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -2364,18 +2364,14 @@ void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata)
+ 	if (ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA))
+ 		return;
+ 
+-	ieee80211_stop_vif_queues(local, sdata,
+-				  IEEE80211_QUEUE_STOP_REASON_CSA);
+-	sdata->csa_blocked_queues = true;
++	ieee80211_stop_vif_queues_norefcount(local, sdata,
++					     IEEE80211_QUEUE_STOP_REASON_CSA);
+ }
+ 
+ void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata)
+ {
+ 	struct ieee80211_local *local = sdata->local;
+ 
+-	if (sdata->csa_blocked_queues) {
+-		ieee80211_wake_vif_queues(local, sdata,
+-					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_queues = false;
+-	}
++	ieee80211_wake_vif_queues_norefcount(local, sdata,
++					     IEEE80211_QUEUE_STOP_REASON_CSA);
+ }
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 0303972c23e4cb..111066928b963c 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2636,8 +2636,6 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
+ 	 */
+ 	link->conf->csa_active = true;
+ 	link->u.mgd.csa.blocked_tx = csa_ie.mode;
+-	sdata->csa_blocked_queues =
+-		csa_ie.mode && !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA);
+ 
+ 	wiphy_work_queue(sdata->local->hw.wiphy,
+ 			 &ifmgd->csa_connection_drop_work);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index f94faa86ba8a35..b4814e97cf7422 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -657,7 +657,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw)
+ }
+ EXPORT_SYMBOL(ieee80211_wake_queues);
+ 
+-static unsigned int
++unsigned int
+ ieee80211_get_vif_queues(struct ieee80211_local *local,
+ 			 struct ieee80211_sub_if_data *sdata)
+ {
+@@ -669,7 +669,8 @@ ieee80211_get_vif_queues(struct ieee80211_local *local,
+ 		queues = 0;
+ 
+ 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+-			queues |= BIT(sdata->vif.hw_queue[ac]);
++			if (sdata->vif.hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
++				queues |= BIT(sdata->vif.hw_queue[ac]);
+ 		if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ 			queues |= BIT(sdata->vif.cab_queue);
+ 	} else {
+@@ -724,24 +725,6 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
+ 	__ieee80211_flush_queues(local, sdata, 0, drop);
+ }
+ 
+-void ieee80211_stop_vif_queues(struct ieee80211_local *local,
+-			       struct ieee80211_sub_if_data *sdata,
+-			       enum queue_stop_reason reason)
+-{
+-	ieee80211_stop_queues_by_reason(&local->hw,
+-					ieee80211_get_vif_queues(local, sdata),
+-					reason, true);
+-}
+-
+-void ieee80211_wake_vif_queues(struct ieee80211_local *local,
+-			       struct ieee80211_sub_if_data *sdata,
+-			       enum queue_stop_reason reason)
+-{
+-	ieee80211_wake_queues_by_reason(&local->hw,
+-					ieee80211_get_vif_queues(local, sdata),
+-					reason, true);
+-}
+-
+ static void __iterate_interfaces(struct ieee80211_local *local,
+ 				 u32 iter_flags,
+ 				 void (*iterator)(void *data, u8 *mac,
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4a137afaf0b87e..0c5ff4afc37022 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1495,7 +1495,6 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
+ 	INIT_LIST_HEAD(&table->sets);
+ 	INIT_LIST_HEAD(&table->objects);
+ 	INIT_LIST_HEAD(&table->flowtables);
+-	write_pnet(&table->net, net);
+ 	table->family = family;
+ 	table->flags = flags;
+ 	table->handle = ++nft_net->table_handle;
+@@ -3884,8 +3883,11 @@ void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule)
+ 	kfree(rule);
+ }
+ 
++/* can only be used if rule is no longer visible to dumps */
+ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
+ {
++	lockdep_commit_lock_is_held(ctx->net);
++
+ 	nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
+ 	nf_tables_rule_destroy(ctx, rule);
+ }
+@@ -5650,6 +5652,8 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase)
+ {
++	lockdep_commit_lock_is_held(ctx->net);
++
+ 	switch (phase) {
+ 	case NFT_TRANS_PREPARE_ERROR:
+ 		nft_set_trans_unbind(ctx, set);
+@@ -11456,19 +11460,6 @@ static void __nft_release_basechain_now(struct nft_ctx *ctx)
+ 	nf_tables_chain_destroy(ctx->chain);
+ }
+ 
+-static void nft_release_basechain_rcu(struct rcu_head *head)
+-{
+-	struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
+-	struct nft_ctx ctx = {
+-		.family	= chain->table->family,
+-		.chain	= chain,
+-		.net	= read_pnet(&chain->table->net),
+-	};
+-
+-	__nft_release_basechain_now(&ctx);
+-	put_net(ctx.net);
+-}
+-
+ int __nft_release_basechain(struct nft_ctx *ctx)
+ {
+ 	struct nft_rule *rule;
+@@ -11483,11 +11474,18 @@ int __nft_release_basechain(struct nft_ctx *ctx)
+ 	nft_chain_del(ctx->chain);
+ 	nft_use_dec(&ctx->table->use);
+ 
+-	if (maybe_get_net(ctx->net))
+-		call_rcu(&ctx->chain->rcu_head, nft_release_basechain_rcu);
+-	else
++	if (!maybe_get_net(ctx->net)) {
+ 		__nft_release_basechain_now(ctx);
++		return 0;
++	}
++
++	/* wait for ruleset dumps to complete.  Owning chain is no longer in
++	 * lists, so new dumps can't find any of these rules anymore.
++	 */
++	synchronize_rcu();
+ 
++	__nft_release_basechain_now(ctx);
++	put_net(ctx->net);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(__nft_release_basechain);
+diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
+index f8b25b6f5da736..9869ef3c2ab378 100644
+--- a/net/netfilter/xt_IDLETIMER.c
++++ b/net/netfilter/xt_IDLETIMER.c
+@@ -409,21 +409,23 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
+ 
+ 	mutex_lock(&list_mutex);
+ 
+-	if (--info->timer->refcnt == 0) {
+-		pr_debug("deleting timer %s\n", info->label);
+-
+-		list_del(&info->timer->entry);
+-		timer_shutdown_sync(&info->timer->timer);
+-		cancel_work_sync(&info->timer->work);
+-		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+-		kfree(info->timer->attr.attr.name);
+-		kfree(info->timer);
+-	} else {
++	if (--info->timer->refcnt > 0) {
+ 		pr_debug("decreased refcnt of timer %s to %u\n",
+ 			 info->label, info->timer->refcnt);
++		mutex_unlock(&list_mutex);
++		return;
+ 	}
+ 
++	pr_debug("deleting timer %s\n", info->label);
++
++	list_del(&info->timer->entry);
+ 	mutex_unlock(&list_mutex);
++
++	timer_shutdown_sync(&info->timer->timer);
++	cancel_work_sync(&info->timer->work);
++	sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
++	kfree(info->timer->attr.attr.name);
++	kfree(info->timer);
+ }
+ 
+ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
+@@ -434,25 +436,27 @@ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
+ 
+ 	mutex_lock(&list_mutex);
+ 
+-	if (--info->timer->refcnt == 0) {
+-		pr_debug("deleting timer %s\n", info->label);
+-
+-		list_del(&info->timer->entry);
+-		if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
+-			alarm_cancel(&info->timer->alarm);
+-		} else {
+-			timer_shutdown_sync(&info->timer->timer);
+-		}
+-		cancel_work_sync(&info->timer->work);
+-		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+-		kfree(info->timer->attr.attr.name);
+-		kfree(info->timer);
+-	} else {
++	if (--info->timer->refcnt > 0) {
+ 		pr_debug("decreased refcnt of timer %s to %u\n",
+ 			 info->label, info->timer->refcnt);
++		mutex_unlock(&list_mutex);
++		return;
+ 	}
+ 
++	pr_debug("deleting timer %s\n", info->label);
++
++	list_del(&info->timer->entry);
+ 	mutex_unlock(&list_mutex);
++
++	if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
++		alarm_cancel(&info->timer->alarm);
++	} else {
++		timer_shutdown_sync(&info->timer->timer);
++	}
++	cancel_work_sync(&info->timer->work);
++	sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
++	kfree(info->timer->attr.attr.name);
++	kfree(info->timer);
+ }
+ 
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 39382ee1e33108..3b519adc01259f 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -78,6 +78,8 @@ struct netem_sched_data {
+ 	struct sk_buff	*t_head;
+ 	struct sk_buff	*t_tail;
+ 
++	u32 t_len;
++
+ 	/* optional qdisc for classful handling (NULL at netem init) */
+ 	struct Qdisc	*qdisc;
+ 
+@@ -382,6 +384,7 @@ static void tfifo_reset(struct Qdisc *sch)
+ 	rtnl_kfree_skbs(q->t_head, q->t_tail);
+ 	q->t_head = NULL;
+ 	q->t_tail = NULL;
++	q->t_len = 0;
+ }
+ 
+ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+@@ -411,6 +414,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+ 		rb_link_node(&nskb->rbnode, parent, p);
+ 		rb_insert_color(&nskb->rbnode, &q->t_root);
+ 	}
++	q->t_len++;
+ 	sch->q.qlen++;
+ }
+ 
+@@ -517,7 +521,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 			1<<get_random_u32_below(8);
+ 	}
+ 
+-	if (unlikely(sch->q.qlen >= sch->limit)) {
++	if (unlikely(q->t_len >= sch->limit)) {
+ 		/* re-link segs, so that qdisc_drop_all() frees them all */
+ 		skb->next = segs;
+ 		qdisc_drop_all(skb, sch, to_free);
+@@ -701,8 +705,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ tfifo_dequeue:
+ 	skb = __qdisc_dequeue_head(&sch->q);
+ 	if (skb) {
+-		qdisc_qstats_backlog_dec(sch, skb);
+ deliver:
++		qdisc_qstats_backlog_dec(sch, skb);
+ 		qdisc_bstats_update(sch, skb);
+ 		return skb;
+ 	}
+@@ -718,8 +722,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ 
+ 		if (time_to_send <= now && q->slot.slot_next <= now) {
+ 			netem_erase_head(q, skb);
+-			sch->q.qlen--;
+-			qdisc_qstats_backlog_dec(sch, skb);
++			q->t_len--;
+ 			skb->next = NULL;
+ 			skb->prev = NULL;
+ 			/* skb->dev shares skb->rbnode area,
+@@ -746,16 +749,21 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ 					if (net_xmit_drop_count(err))
+ 						qdisc_qstats_drop(sch);
+ 					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
++					sch->qstats.backlog -= pkt_len;
++					sch->q.qlen--;
+ 				}
+ 				goto tfifo_dequeue;
+ 			}
++			sch->q.qlen--;
+ 			goto deliver;
+ 		}
+ 
+ 		if (q->qdisc) {
+ 			skb = q->qdisc->ops->dequeue(q->qdisc);
+-			if (skb)
++			if (skb) {
++				sch->q.qlen--;
+ 				goto deliver;
++			}
+ 		}
+ 
+ 		qdisc_watchdog_schedule_ns(&q->watchdog,
+@@ -765,8 +773,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ 
+ 	if (q->qdisc) {
+ 		skb = q->qdisc->ops->dequeue(q->qdisc);
+-		if (skb)
++		if (skb) {
++			sch->q.qlen--;
+ 			goto deliver;
++		}
+ 	}
+ 	return NULL;
+ }
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index b7e25e7e9933b6..108a4cc2e00107 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -807,6 +807,7 @@ static void cleanup_bearer(struct work_struct *work)
+ {
+ 	struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
+ 	struct udp_replicast *rcast, *tmp;
++	struct tipc_net *tn;
+ 
+ 	list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ 		dst_cache_destroy(&rcast->dst_cache);
+@@ -814,10 +815,14 @@ static void cleanup_bearer(struct work_struct *work)
+ 		kfree_rcu(rcast, rcu);
+ 	}
+ 
++	tn = tipc_net(sock_net(ub->ubsock->sk));
++
+ 	dst_cache_destroy(&ub->rcast.dst_cache);
+ 	udp_tunnel_sock_release(ub->ubsock);
++
++	/* Note: could use a call_rcu() to avoid another synchronize_net() */
+ 	synchronize_net();
+-	atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
++	atomic_dec(&tn->wq_count);
+ 	kfree(ub);
+ }
+ 
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 001ccc55ef0f93..6b176230044397 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2313,6 +2313,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 		fds_sent = true;
+ 
+ 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
+ 						   sk->sk_allocation);
+ 			if (err < 0) {
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 9b1b9dc5a7eb2a..1e78f575fb5630 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -814,7 +814,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 	[NL80211_ATTR_MLO_LINKS] =
+ 		NLA_POLICY_NESTED_ARRAY(nl80211_policy),
+ 	[NL80211_ATTR_MLO_LINK_ID] =
+-		NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS),
++		NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS - 1),
+ 	[NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
+ 	[NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
+ 	[NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 431da30817a6f6..26817160008766 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -83,6 +83,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
+ 	if (!request)
+ 		return -ENOMEM;
+ 
++	request->n_channels = n_channels;
+ 	if (wdev->conn->params.channel) {
+ 		enum nl80211_band band = wdev->conn->params.channel->band;
+ 		struct ieee80211_supported_band *sband =
+diff --git a/rust/Makefile b/rust/Makefile
+index b5e0a73b78f3e5..9f59baacaf7730 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -267,9 +267,22 @@ endif
+ 
+ bindgen_c_flags_final = $(bindgen_c_flags_lto) -D__BINDGEN__
+ 
++# Each `bindgen` release may upgrade the list of Rust target versions. By
++# default, the highest stable release in their list is used. Thus we need to set
++# a `--rust-target` to avoid future `bindgen` releases emitting code that
++# `rustc` may not understand. On top of that, `bindgen` does not support passing
++# an unknown Rust target version.
++#
++# Therefore, the Rust target for `bindgen` can be only as high as the minimum
++# Rust version the kernel supports and only as high as the greatest stable Rust
++# target supported by the minimum `bindgen` version the kernel supports (that
++# is, if we do not test the actual `rustc`/`bindgen` versions running).
++#
++# Starting with `bindgen` 0.71.0, we will be able to set any future Rust version
++# instead, i.e. we will be able to set here our minimum supported Rust version.
+ quiet_cmd_bindgen = BINDGEN $@
+       cmd_bindgen = \
+-	$(BINDGEN) $< $(bindgen_target_flags) \
++	$(BINDGEN) $< $(bindgen_target_flags) --rust-target 1.68 \
+ 		--use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
+ 		--no-debug '.*' --enable-function-attribute-detection \
+ 		-o $@ -- $(bindgen_c_flags_final) -DMODULE \
+diff --git a/sound/core/control_led.c b/sound/core/control_led.c
+index 65a1ebe877768f..e33dfcf863cf13 100644
+--- a/sound/core/control_led.c
++++ b/sound/core/control_led.c
+@@ -668,10 +668,16 @@ static void snd_ctl_led_sysfs_add(struct snd_card *card)
+ 			goto cerr;
+ 		led->cards[card->number] = led_card;
+ 		snprintf(link_name, sizeof(link_name), "led-%s", led->name);
+-		WARN(sysfs_create_link(&card->ctl_dev->kobj, &led_card->dev.kobj, link_name),
+-			"can't create symlink to controlC%i device\n", card->number);
+-		WARN(sysfs_create_link(&led_card->dev.kobj, &card->card_dev.kobj, "card"),
+-			"can't create symlink to card%i\n", card->number);
++		if (sysfs_create_link(&card->ctl_dev->kobj, &led_card->dev.kobj,
++				      link_name))
++			dev_err(card->dev,
++				"%s: can't create symlink to controlC%i device\n",
++				 __func__, card->number);
++		if (sysfs_create_link(&led_card->dev.kobj, &card->card_dev.kobj,
++				      "card"))
++			dev_err(card->dev,
++				"%s: can't create symlink to card%i\n",
++				__func__, card->number);
+ 
+ 		continue;
+ cerr:
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 973671e0cdb09d..192fc75b51e6db 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10127,6 +10127,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1534, "Acer Predator PH315-54", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1025, 0x159c, "Acer Nitro 5 AN515-58", ALC2XX_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x169a, "Acer Swift SFG16", ALC256_FIXUP_ACER_SFG16_MICMUTE_LED),
+ 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ 	SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index e38c5885dadfbc..ecf57a6cb7c37d 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -578,14 +578,19 @@ static int acp6x_probe(struct platform_device *pdev)
+ 
+ 	handle = ACPI_HANDLE(pdev->dev.parent);
+ 	ret = acpi_evaluate_integer(handle, "_WOV", NULL, &dmic_status);
+-	if (!ACPI_FAILURE(ret))
++	if (!ACPI_FAILURE(ret)) {
+ 		wov_en = dmic_status;
++		if (!wov_en)
++			return -ENODEV;
++	} else {
++		/* Incase of ACPI method read failure then jump to check_dmi_entry */
++		goto check_dmi_entry;
++	}
+ 
+-	if (is_dmic_enable && wov_en)
++	if (is_dmic_enable)
+ 		platform_set_drvdata(pdev, &acp6x_card);
+-	else
+-		return 0;
+ 
++check_dmi_entry:
+ 	/* check for any DMI overrides */
+ 	dmi_id = dmi_first_match(yc_acp_quirk_table);
+ 	if (dmi_id)
+diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
+index 12d093437ba9b6..1b2f55030c3961 100644
+--- a/sound/soc/codecs/tas2781-i2c.c
++++ b/sound/soc/codecs/tas2781-i2c.c
+@@ -370,7 +370,7 @@ static void sngl_calib_start(struct tasdevice_priv *tas_priv, int i,
+ 			tasdevice_dev_read(tas_priv, i, p[j].reg,
+ 				(int *)&p[j].val[0]);
+ 		} else {
+-			switch (p[j].reg) {
++			switch (tas2781_cali_start_reg[j].reg) {
+ 			case 0: {
+ 				if (!reg[0])
+ 					continue;
+diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
+index b6ff04f7138a2c..ee946e0d3f4969 100644
+--- a/sound/soc/fsl/fsl_spdif.c
++++ b/sound/soc/fsl/fsl_spdif.c
+@@ -1204,7 +1204,7 @@ static struct snd_kcontrol_new fsl_spdif_ctrls[] = {
+ 	},
+ 	/* DPLL lock info get controller */
+ 	{
+-		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
++		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ 		.name = RX_SAMPLE_RATE_KCONTROL,
+ 		.access = SNDRV_CTL_ELEM_ACCESS_READ |
+ 			SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
+index beede7344efd63..4341269eb97780 100644
+--- a/sound/soc/fsl/fsl_xcvr.c
++++ b/sound/soc/fsl/fsl_xcvr.c
+@@ -169,7 +169,7 @@ static int fsl_xcvr_capds_put(struct snd_kcontrol *kcontrol,
+ }
+ 
+ static struct snd_kcontrol_new fsl_xcvr_earc_capds_kctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ 	.name = "Capabilities Data Structure",
+ 	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ 	.info = fsl_xcvr_type_capds_bytes_info,
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index a58842a8c8a641..db57292c00ca1e 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -1003,8 +1003,12 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
+ 		return ret;
+ 	}
+ 
+-	/* One per DAI link, worst case is a DAI link for every endpoint */
+-	sof_dais = kcalloc(num_ends, sizeof(*sof_dais), GFP_KERNEL);
++	/*
++	 * One per DAI link, worst case is a DAI link for every endpoint, also
++	 * add one additional to act as a terminator such that code can iterate
++	 * until it hits an uninitialised DAI.
++	 */
++	sof_dais = kcalloc(num_ends + 1, sizeof(*sof_dais), GFP_KERNEL);
+ 	if (!sof_dais)
+ 		return -ENOMEM;
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 00101875d9a8d5..a0767de7f1b7ed 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2179,6 +2179,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
+ 	DEVICE_FLG(0x046d, 0x09a4, /* Logitech QuickCam E 3500 */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
++	DEVICE_FLG(0x0499, 0x1506, /* Yamaha THR5 */
++		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ 	DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ 	DEVICE_FLG(0x0499, 0x3108, /* Yamaha YIT-W12TX */
+diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
+index c6d67fc9e57ef0..83c43dc13313cc 100644
+--- a/tools/lib/perf/evlist.c
++++ b/tools/lib/perf/evlist.c
+@@ -47,6 +47,20 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
+ 		 */
+ 		perf_cpu_map__put(evsel->cpus);
+ 		evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
++
++		/*
++		 * Empty cpu lists would eventually get opened as "any" so remove
++		 * genuinely empty ones before they're opened in the wrong place.
++		 */
++		if (perf_cpu_map__is_empty(evsel->cpus)) {
++			struct perf_evsel *next = perf_evlist__next(evlist, evsel);
++
++			perf_evlist__remove(evlist, evsel);
++			/* Keep idx contiguous */
++			if (next)
++				list_for_each_entry_from(next, &evlist->entries, node)
++					next->idx--;
++		}
+ 	} else if (!evsel->own_cpus || evlist->has_user_cpus ||
+ 		(!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
+ 		/*
+@@ -80,11 +94,11 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
+ 
+ static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
+ {
+-	struct perf_evsel *evsel;
++	struct perf_evsel *evsel, *n;
+ 
+ 	evlist->needs_map_propagation = true;
+ 
+-	perf_evlist__for_each_evsel(evlist, evsel)
++	list_for_each_entry_safe(evsel, n, &evlist->entries, node)
+ 		__perf_evlist__propagate_maps(evlist, evsel);
+ }
+ 
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 6604f5d038aadf..f0d8796b984a80 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -3820,9 +3820,12 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ 			break;
+ 
+ 		case INSN_CONTEXT_SWITCH:
+-			if (func && (!next_insn || !next_insn->hint)) {
+-				WARN_INSN(insn, "unsupported instruction in callable function");
+-				return 1;
++			if (func) {
++				if (!next_insn || !next_insn->hint) {
++					WARN_INSN(insn, "unsupported instruction in callable function");
++					return 1;
++				}
++				break;
+ 			}
+ 			return 0;
+ 
+diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
+index 272d3c70810e7d..a56cf8b0a7d405 100644
+--- a/tools/perf/builtin-ftrace.c
++++ b/tools/perf/builtin-ftrace.c
+@@ -1151,8 +1151,9 @@ static int cmp_profile_data(const void *a, const void *b)
+ 
+ 	if (v1 > v2)
+ 		return -1;
+-	else
++	if (v1 < v2)
+ 		return 1;
++	return 0;
+ }
+ 
+ static void print_profile_result(struct perf_ftrace *ftrace)
+diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
+index 8982f68e7230cd..e763e8d99a4367 100644
+--- a/tools/perf/util/build-id.c
++++ b/tools/perf/util/build-id.c
+@@ -277,7 +277,7 @@ static int write_buildid(const char *name, size_t name_len, struct build_id *bid
+ 	struct perf_record_header_build_id b;
+ 	size_t len;
+ 
+-	len = sizeof(b) + name_len + 1;
++	len = name_len + 1;
+ 	len = PERF_ALIGN(len, sizeof(u64));
+ 
+ 	memset(&b, 0, sizeof(b));
+@@ -286,7 +286,7 @@ static int write_buildid(const char *name, size_t name_len, struct build_id *bid
+ 	misc |= PERF_RECORD_MISC_BUILD_ID_SIZE;
+ 	b.pid = pid;
+ 	b.header.misc = misc;
+-	b.header.size = len;
++	b.header.size = sizeof(b) + len;
+ 
+ 	err = do_write(fd, &b, sizeof(b));
+ 	if (err < 0)
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 4f0ac998b0ccfd..27d5345d2b307a 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -134,6 +134,8 @@ struct machine *machine__new_host(void)
+ 
+ 		if (machine__create_kernel_maps(machine) < 0)
+ 			goto out_delete;
++
++		machine->env = &perf_env;
+ 	}
+ 
+ 	return machine;
+diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S
+index df3230fdac3958..66ab2e0bae5fd0 100644
+--- a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S
++++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S
+@@ -81,32 +81,31 @@ do_syscall:
+ 	stp	x27, x28, [sp, #96]
+ 
+ 	// Set SVCR if we're doing SME
+-	cbz	x1, 1f
++	cbz	x1, load_gpr
+ 	adrp	x2, svcr_in
+ 	ldr	x2, [x2, :lo12:svcr_in]
+ 	msr	S3_3_C4_C2_2, x2
+-1:
+ 
+ 	// Load ZA and ZT0 if enabled - uses x12 as scratch due to SME LDR
+-	tbz	x2, #SVCR_ZA_SHIFT, 1f
++	tbz	x2, #SVCR_ZA_SHIFT, load_gpr
+ 	mov	w12, #0
+ 	ldr	x2, =za_in
+-2:	_ldr_za 12, 2
++1:	_ldr_za 12, 2
+ 	add	x2, x2, x1
+ 	add	x12, x12, #1
+ 	cmp	x1, x12
+-	bne	2b
++	bne	1b
+ 
+ 	// ZT0
+ 	mrs	x2, S3_0_C0_C4_5	// ID_AA64SMFR0_EL1
+ 	ubfx	x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \
+ 			 #ID_AA64SMFR0_EL1_SMEver_WIDTH
+-	cbz	x2, 1f
++	cbz	x2, load_gpr
+ 	adrp	x2, zt_in
+ 	add	x2, x2, :lo12:zt_in
+ 	_ldr_zt 2
+-1:
+ 
++load_gpr:
+ 	// Load GPRs x8-x28, and save our SP/FP for later comparison
+ 	ldr	x2, =gpr_in
+ 	add	x2, x2, #64
+@@ -125,9 +124,9 @@ do_syscall:
+ 	str	x30, [x2], #8		// LR
+ 
+ 	// Load FPRs if we're not doing neither SVE nor streaming SVE
+-	cbnz	x0, 1f
++	cbnz	x0, check_sve_in
+ 	ldr	x2, =svcr_in
+-	tbnz	x2, #SVCR_SM_SHIFT, 1f
++	tbnz	x2, #SVCR_SM_SHIFT, check_sve_in
+ 
+ 	ldr	x2, =fpr_in
+ 	ldp	q0, q1, [x2]
+@@ -148,8 +147,8 @@ do_syscall:
+ 	ldp	q30, q31, [x2, #16 * 30]
+ 
+ 	b	2f
+-1:
+ 
++check_sve_in:
+ 	// Load the SVE registers if we're doing SVE/SME
+ 
+ 	ldr	x2, =z_in
+@@ -256,32 +255,31 @@ do_syscall:
+ 	stp	q30, q31, [x2, #16 * 30]
+ 
+ 	// Save SVCR if we're doing SME
+-	cbz	x1, 1f
++	cbz	x1, check_sve_out
+ 	mrs	x2, S3_3_C4_C2_2
+ 	adrp	x3, svcr_out
+ 	str	x2, [x3, :lo12:svcr_out]
+-1:
+ 
+ 	// Save ZA if it's enabled - uses x12 as scratch due to SME STR
+-	tbz	x2, #SVCR_ZA_SHIFT, 1f
++	tbz	x2, #SVCR_ZA_SHIFT, check_sve_out
+ 	mov	w12, #0
+ 	ldr	x2, =za_out
+-2:	_str_za 12, 2
++1:	_str_za 12, 2
+ 	add	x2, x2, x1
+ 	add	x12, x12, #1
+ 	cmp	x1, x12
+-	bne	2b
++	bne	1b
+ 
+ 	// ZT0
+ 	mrs	x2, S3_0_C0_C4_5	// ID_AA64SMFR0_EL1
+ 	ubfx	x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \
+ 			#ID_AA64SMFR0_EL1_SMEver_WIDTH
+-	cbz	x2, 1f
++	cbz	x2, check_sve_out
+ 	adrp	x2, zt_out
+ 	add	x2, x2, :lo12:zt_out
+ 	_str_zt 2
+-1:
+ 
++check_sve_out:
+ 	// Save the SVE state if we have some
+ 	cbz	x0, 1f
+ 
+diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
+index 5aaf2b065f86c2..bba3e37f749b86 100644
+--- a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
++++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
+@@ -7,11 +7,7 @@
+ #include "bpf_misc.h"
+ 
+ SEC("tp_btf/bpf_testmod_test_nullable_bare")
+-/* This used to be a failure test, but raw_tp nullable arguments can now
+- * directly be dereferenced, whether they have nullable annotation or not,
+- * and don't need to be explicitly checked.
+- */
+-__success
++__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
+ int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx)
+ {
+ 	return nullable_ctx->len;
+diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c
+index a570e48b917acc..bfc3bf18fed4fe 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c
++++ b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c
+@@ -11,7 +11,7 @@ __success __retval(0)
+ __naked void btf_ctx_access_accept(void)
+ {
+ 	asm volatile ("					\
+-	r2 = *(u32*)(r1 + 8);		/* load 2nd argument value (int pointer) */\
++	r2 = *(u64 *)(r1 + 8);		/* load 2nd argument value (int pointer) */\
+ 	r0 = 0;						\
+ 	exit;						\
+ "	::: __clobber_all);
+@@ -23,7 +23,7 @@ __success __retval(0)
+ __naked void ctx_access_u32_pointer_accept(void)
+ {
+ 	asm volatile ("					\
+-	r2 = *(u32*)(r1 + 0);		/* load 1nd argument value (u32 pointer) */\
++	r2 = *(u64 *)(r1 + 0);		/* load 1nd argument value (u32 pointer) */\
+ 	r0 = 0;						\
+ 	exit;						\
+ "	::: __clobber_all);
+diff --git a/tools/testing/selftests/bpf/progs/verifier_d_path.c b/tools/testing/selftests/bpf/progs/verifier_d_path.c
+index ec79cbcfde91ef..87e51a215558fd 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_d_path.c
++++ b/tools/testing/selftests/bpf/progs/verifier_d_path.c
+@@ -11,7 +11,7 @@ __success __retval(0)
+ __naked void d_path_accept(void)
+ {
+ 	asm volatile ("					\
+-	r1 = *(u32*)(r1 + 0);				\
++	r1 = *(u64 *)(r1 + 0);				\
+ 	r2 = r10;					\
+ 	r2 += -8;					\
+ 	r6 = 0;						\
+@@ -31,7 +31,7 @@ __failure __msg("helper call is not allowed in probe")
+ __naked void d_path_reject(void)
+ {
+ 	asm volatile ("					\
+-	r1 = *(u32*)(r1 + 0);				\
++	r1 = *(u64 *)(r1 + 0);				\
+ 	r2 = r10;					\
+ 	r2 += -8;					\
+ 	r6 = 0;						\
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
+index 0c47faff9274b1..c068e6c2a580ea 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
+@@ -22,20 +22,34 @@ SB_ITC=0
+ h1_create()
+ {
+ 	simple_if_init $h1 192.0.1.1/24
++	tc qdisc add dev $h1 clsact
++
++	# Add egress filter on $h1 that will guarantee that the packet sent,
++	# will be the only packet being passed to the device.
++	tc filter add dev $h1 egress pref 2 handle 102 matchall action drop
+ }
+ 
+ h1_destroy()
+ {
++	tc filter del dev $h1 egress pref 2 handle 102 matchall action drop
++	tc qdisc del dev $h1 clsact
+ 	simple_if_fini $h1 192.0.1.1/24
+ }
+ 
+ h2_create()
+ {
+ 	simple_if_init $h2 192.0.1.2/24
++	tc qdisc add dev $h2 clsact
++
++	# Add egress filter on $h2 that will guarantee that the packet sent,
++	# will be the only packet being passed to the device.
++	tc filter add dev $h2 egress pref 1 handle 101 matchall action drop
+ }
+ 
+ h2_destroy()
+ {
++	tc filter del dev $h2 egress pref 1 handle 101 matchall action drop
++	tc qdisc del dev $h2 clsact
+ 	simple_if_fini $h2 192.0.1.2/24
+ }
+ 
+@@ -101,6 +115,11 @@ port_pool_test()
+ 	local exp_max_occ=$(devlink_cell_size_get)
+ 	local max_occ
+ 
++	tc filter add dev $h1 egress protocol ip pref 1 handle 101 flower \
++		src_mac $h1mac dst_mac $h2mac \
++		src_ip 192.0.1.1 dst_ip 192.0.1.2 \
++		action pass
++
+ 	devlink sb occupancy clearmax $DEVLINK_DEV
+ 
+ 	$MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \
+@@ -108,11 +127,6 @@ port_pool_test()
+ 
+ 	devlink sb occupancy snapshot $DEVLINK_DEV
+ 
+-	RET=0
+-	max_occ=$(sb_occ_pool_check $dl_port1 $SB_POOL_ING $exp_max_occ)
+-	check_err $? "Expected iPool($SB_POOL_ING) max occupancy to be $exp_max_occ, but got $max_occ"
+-	log_test "physical port's($h1) ingress pool"
+-
+ 	RET=0
+ 	max_occ=$(sb_occ_pool_check $dl_port2 $SB_POOL_ING $exp_max_occ)
+ 	check_err $? "Expected iPool($SB_POOL_ING) max occupancy to be $exp_max_occ, but got $max_occ"
+@@ -122,6 +136,11 @@ port_pool_test()
+ 	max_occ=$(sb_occ_pool_check $cpu_dl_port $SB_POOL_EGR_CPU $exp_max_occ)
+ 	check_err $? "Expected ePool($SB_POOL_EGR_CPU) max occupancy to be $exp_max_occ, but got $max_occ"
+ 	log_test "CPU port's egress pool"
++
++	tc filter del dev $h1 egress protocol ip pref 1 handle 101 flower \
++		src_mac $h1mac dst_mac $h2mac \
++		src_ip 192.0.1.1 dst_ip 192.0.1.2 \
++		action pass
+ }
+ 
+ port_tc_ip_test()
+@@ -129,6 +148,11 @@ port_tc_ip_test()
+ 	local exp_max_occ=$(devlink_cell_size_get)
+ 	local max_occ
+ 
++	tc filter add dev $h1 egress protocol ip pref 1 handle 101 flower \
++		src_mac $h1mac dst_mac $h2mac \
++		src_ip 192.0.1.1 dst_ip 192.0.1.2 \
++		action pass
++
+ 	devlink sb occupancy clearmax $DEVLINK_DEV
+ 
+ 	$MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \
+@@ -136,11 +160,6 @@ port_tc_ip_test()
+ 
+ 	devlink sb occupancy snapshot $DEVLINK_DEV
+ 
+-	RET=0
+-	max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+-	check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+-	log_test "physical port's($h1) ingress TC - IP packet"
+-
+ 	RET=0
+ 	max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+ 	check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+@@ -150,6 +169,11 @@ port_tc_ip_test()
+ 	max_occ=$(sb_occ_etc_check $cpu_dl_port $SB_ITC_CPU_IP $exp_max_occ)
+ 	check_err $? "Expected egress TC($SB_ITC_CPU_IP) max occupancy to be $exp_max_occ, but got $max_occ"
+ 	log_test "CPU port's egress TC - IP packet"
++
++	tc filter del dev $h1 egress protocol ip pref 1 handle 101 flower \
++		src_mac $h1mac dst_mac $h2mac \
++		src_ip 192.0.1.1 dst_ip 192.0.1.2 \
++		action pass
+ }
+ 
+ port_tc_arp_test()
+@@ -157,17 +181,15 @@ port_tc_arp_test()
+ 	local exp_max_occ=$(devlink_cell_size_get)
+ 	local max_occ
+ 
++	tc filter add dev $h1 egress protocol arp pref 1 handle 101 flower \
++		src_mac $h1mac action pass
++
+ 	devlink sb occupancy clearmax $DEVLINK_DEV
+ 
+ 	$MZ $h1 -c 1 -p 10 -a $h1mac -A 192.0.1.1 -t arp -q
+ 
+ 	devlink sb occupancy snapshot $DEVLINK_DEV
+ 
+-	RET=0
+-	max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+-	check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+-	log_test "physical port's($h1) ingress TC - ARP packet"
+-
+ 	RET=0
+ 	max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+ 	check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+@@ -177,6 +199,9 @@ port_tc_arp_test()
+ 	max_occ=$(sb_occ_etc_check $cpu_dl_port $SB_ITC_CPU_ARP $exp_max_occ)
+ 	check_err $? "Expected egress TC($SB_ITC_IP2ME) max occupancy to be $exp_max_occ, but got $max_occ"
+ 	log_test "CPU port's egress TC - ARP packet"
++
++	tc filter del dev $h1 egress protocol arp pref 1 handle 101 flower \
++		src_mac $h1mac action pass
+ }
+ 
+ setup_prepare()
+diff --git a/tools/testing/selftests/net/netfilter/rpath.sh b/tools/testing/selftests/net/netfilter/rpath.sh
+index 4485fd7675ed7e..86ec4e68594dc3 100755
+--- a/tools/testing/selftests/net/netfilter/rpath.sh
++++ b/tools/testing/selftests/net/netfilter/rpath.sh
+@@ -61,9 +61,20 @@ ip -net "$ns2" a a 192.168.42.1/24 dev d0
+ ip -net "$ns1" a a fec0:42::2/64 dev v0 nodad
+ ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad
+ 
++# avoid neighbor lookups and enable martian IPv6 pings
++ns2_hwaddr=$(ip -net "$ns2" link show dev v0 | \
++	     sed -n 's, *link/ether \([^ ]*\) .*,\1,p')
++ns1_hwaddr=$(ip -net "$ns1" link show dev v0 | \
++	     sed -n 's, *link/ether \([^ ]*\) .*,\1,p')
++ip -net "$ns1" neigh add fec0:42::1 lladdr "$ns2_hwaddr" nud permanent dev v0
++ip -net "$ns1" neigh add fec0:23::1 lladdr "$ns2_hwaddr" nud permanent dev v0
++ip -net "$ns2" neigh add fec0:42::2 lladdr "$ns1_hwaddr" nud permanent dev d0
++ip -net "$ns2" neigh add fec0:23::2 lladdr "$ns1_hwaddr" nud permanent dev v0
++
+ # firewall matches to test
+ [ -n "$iptables" ] && {
+ 	common='-t raw -A PREROUTING -s 192.168.0.0/16'
++	common+=' -p icmp --icmp-type echo-request'
+ 	if ! ip netns exec "$ns2" "$iptables" $common -m rpfilter;then
+ 		echo "Cannot add rpfilter rule"
+ 		exit $ksft_skip
+@@ -72,6 +83,7 @@ ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad
+ }
+ [ -n "$ip6tables" ] && {
+ 	common='-t raw -A PREROUTING -s fec0::/16'
++	common+=' -p icmpv6 --icmpv6-type echo-request'
+ 	if ! ip netns exec "$ns2" "$ip6tables" $common -m rpfilter;then
+ 		echo "Cannot add rpfilter rule"
+ 		exit $ksft_skip
+@@ -82,8 +94,10 @@ ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad
+ table inet t {
+ 	chain c {
+ 		type filter hook prerouting priority raw;
+-		ip saddr 192.168.0.0/16 fib saddr . iif oif exists counter
+-		ip6 saddr fec0::/16 fib saddr . iif oif exists counter
++		ip saddr 192.168.0.0/16 icmp type echo-request \
++			fib saddr . iif oif exists counter
++		ip6 saddr fec0::/16 icmpv6 type echo-request \
++			fib saddr . iif oif exists counter
+ 	}
+ }
+ EOF


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-15  0:02 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-15  0:02 UTC (permalink / raw
  To: gentoo-commits

commit:     1142e63b91589e751e9f9e537c19cc52f96c790d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Dec 15 00:02:06 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Dec 15 00:02:06 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1142e63b

Remove redundant patches

Removed
1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch
1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   8 --
 ...-change-caller-of-update_pkru_in_sigframe.patch | 107 ---------------------
 ...eys-ensure-updated-pkru-value-is-xrstor-d.patch |  96 ------------------
 3 files changed, 211 deletions(-)

diff --git a/0000_README b/0000_README
index 81c02320..a2c9782d 100644
--- a/0000_README
+++ b/0000_README
@@ -75,14 +75,6 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
-Patch:  1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch
-From:   https://git.kernel.org/
-Desc:   x86/pkeys: Change caller of update_pkru_in_sigframe()
-
-Patch:  1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch
-From:   https://git.kernel.org/
-Desc:   x86/pkeys: Ensure updated PKRU value is XRSTOR'd
-
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch b/1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch
deleted file mode 100644
index 3a1fbd82..00000000
--- a/1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch
+++ /dev/null
@@ -1,107 +0,0 @@
-From 5683d0ce8fb46f36315a2b508f90ec6221cda018 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 19 Nov 2024 17:45:19 +0000
-Subject: x86/pkeys: Change caller of update_pkru_in_sigframe()
-
-From: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
-
-[ Upstream commit 6a1853bdf17874392476b552398df261f75503e0 ]
-
-update_pkru_in_sigframe() will shortly need some information which
-is only available inside xsave_to_user_sigframe(). Move
-update_pkru_in_sigframe() inside the other function to make it
-easier to provide it that information.
-
-No functional changes.
-
-Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Link: https://lore.kernel.org/all/20241119174520.3987538-2-aruna.ramakrishna%40oracle.com
-Stable-dep-of: ae6012d72fa6 ("x86/pkeys: Ensure updated PKRU value is XRSTOR'd")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/kernel/fpu/signal.c | 20 ++------------------
- arch/x86/kernel/fpu/xstate.h | 15 ++++++++++++++-
- 2 files changed, 16 insertions(+), 19 deletions(-)
-
-diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
-index 1065ab995305c..8f62e0666dea5 100644
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -63,16 +63,6 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
- 	return true;
- }
- 
--/*
-- * Update the value of PKRU register that was already pushed onto the signal frame.
-- */
--static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
--{
--	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
--		return 0;
--	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
--}
--
- /*
-  * Signal frame handlers.
-  */
-@@ -168,14 +158,8 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
- 
- static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
- {
--	int err = 0;
--
--	if (use_xsave()) {
--		err = xsave_to_user_sigframe(buf);
--		if (!err)
--			err = update_pkru_in_sigframe(buf, pkru);
--		return err;
--	}
-+	if (use_xsave())
-+		return xsave_to_user_sigframe(buf, pkru);
- 
- 	if (use_fxsr())
- 		return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
-diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
-index 0b86a5002c846..6b2924fbe5b8d 100644
---- a/arch/x86/kernel/fpu/xstate.h
-+++ b/arch/x86/kernel/fpu/xstate.h
-@@ -69,6 +69,16 @@ static inline u64 xfeatures_mask_independent(void)
- 	return fpu_kernel_cfg.independent_features;
- }
- 
-+/*
-+ * Update the value of PKRU register that was already pushed onto the signal frame.
-+ */
-+static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
-+{
-+	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
-+		return 0;
-+	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
-+}
-+
- /* XSAVE/XRSTOR wrapper functions */
- 
- #ifdef CONFIG_X86_64
-@@ -256,7 +266,7 @@ static inline u64 xfeatures_need_sigframe_write(void)
-  * The caller has to zero buf::header before calling this because XSAVE*
-  * does not touch the reserved fields in the header.
-  */
--static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
-+static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
- {
- 	/*
- 	 * Include the features which are not xsaved/rstored by the kernel
-@@ -281,6 +291,9 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
- 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
- 	clac();
- 
-+	if (!err)
-+		err = update_pkru_in_sigframe(buf, pkru);
-+
- 	return err;
- }
- 
--- 
-2.43.0
-

diff --git a/1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch b/1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch
deleted file mode 100644
index 11b1f768..00000000
--- a/1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch
+++ /dev/null
@@ -1,96 +0,0 @@
-From 24fedf2768fd57e0d767137044c4f7493357b325 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 19 Nov 2024 17:45:20 +0000
-Subject: x86/pkeys: Ensure updated PKRU value is XRSTOR'd
-
-From: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
-
-[ Upstream commit ae6012d72fa60c9ff92de5bac7a8021a47458e5b ]
-
-When XSTATE_BV[i] is 0, and XRSTOR attempts to restore state component
-'i' it ignores any value in the XSAVE buffer and instead restores the
-state component's init value.
-
-This means that if XSAVE writes XSTATE_BV[PKRU]=0 then XRSTOR will
-ignore the value that update_pkru_in_sigframe() writes to the XSAVE buffer.
-
-XSTATE_BV[PKRU] only gets written as 0 if PKRU is in its init state. On
-Intel CPUs, basically never happens because the kernel usually
-overwrites the init value (aside: this is why we didn't notice this bug
-until now). But on AMD, the init tracker is more aggressive and will
-track PKRU as being in its init state upon any wrpkru(0x0).
-Unfortunately, sig_prepare_pkru() does just that: wrpkru(0x0).
-
-This writes XSTATE_BV[PKRU]=0 which makes XRSTOR ignore the PKRU value
-in the sigframe.
-
-To fix this, always overwrite the sigframe XSTATE_BV with a value that
-has XSTATE_BV[PKRU]==1.  This ensures that XRSTOR will not ignore what
-update_pkru_in_sigframe() wrote.
-
-The problematic sequence of events is something like this:
-
-Userspace does:
-	* wrpkru(0xffff0000) (or whatever)
-	* Hardware sets: XINUSE[PKRU]=1
-Signal happens, kernel is entered:
-	* sig_prepare_pkru() => wrpkru(0x00000000)
-	* Hardware sets: XINUSE[PKRU]=0 (aggressive AMD init tracker)
-	* XSAVE writes most of XSAVE buffer, including
-	  XSTATE_BV[PKRU]=XINUSE[PKRU]=0
-	* update_pkru_in_sigframe() overwrites PKRU in XSAVE buffer
-... signal handling
-	* XRSTOR sees XSTATE_BV[PKRU]==0, ignores just-written value
-	  from update_pkru_in_sigframe()
-
-Fixes: 70044df250d0 ("x86/pkeys: Update PKRU to enable all pkeys before XSAVE")
-Suggested-by: Rudi Horn <rudi.horn@oracle.com>
-Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
-Link: https://lore.kernel.org/all/20241119174520.3987538-3-aruna.ramakrishna%40oracle.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/kernel/fpu/xstate.h | 16 ++++++++++++++--
- 1 file changed, 14 insertions(+), 2 deletions(-)
-
-diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
-index 6b2924fbe5b8d..aa16f1a1bbcf1 100644
---- a/arch/x86/kernel/fpu/xstate.h
-+++ b/arch/x86/kernel/fpu/xstate.h
-@@ -72,10 +72,22 @@ static inline u64 xfeatures_mask_independent(void)
- /*
-  * Update the value of PKRU register that was already pushed onto the signal frame.
-  */
--static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
-+static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
- {
-+	u64 xstate_bv;
-+	int err;
-+
- 	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
- 		return 0;
-+
-+	/* Mark PKRU as in-use so that it is restored correctly. */
-+	xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
-+
-+	err =  __put_user(xstate_bv, &buf->header.xfeatures);
-+	if (err)
-+		return err;
-+
-+	/* Update PKRU value in the userspace xsave buffer. */
- 	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
- }
- 
-@@ -292,7 +304,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr
- 	clac();
- 
- 	if (!err)
--		err = update_pkru_in_sigframe(buf, pkru);
-+		err = update_pkru_in_sigframe(buf, mask, pkru);
- 
- 	return err;
- }
--- 
-2.43.0
-


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-14 23:59 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-14 23:59 UTC (permalink / raw
  To: gentoo-commits

commit:     19cecaf31ceabc39e8291a5e852adf5e8f726445
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Dec 14 23:59:03 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Dec 14 23:59:03 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=19cecaf3

Remove redundant patch

Removed:
2700_drm-display-GCC15.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                  |  4 ----
 2700_drm-display-GCC15.patch | 52 --------------------------------------------
 2 files changed, 56 deletions(-)

diff --git a/0000_README b/0000_README
index 6429d035..81c02320 100644
--- a/0000_README
+++ b/0000_README
@@ -87,10 +87,6 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
-Patch:  2700_drm-display-GCC15.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-Desc:   drm/display: Fix building with GCC 15
-
 Patch:  2901_tools-lib-subcmd-compile-fix.patch
 From:   https://lore.kernel.org/all/20240731085217.94928-1-michael.weiss@aisec.fraunhofer.de/
 Desc:   tools lib subcmd: Fixed uninitialized use of variable in parse-options

diff --git a/2700_drm-display-GCC15.patch b/2700_drm-display-GCC15.patch
deleted file mode 100644
index 0be775ea..00000000
--- a/2700_drm-display-GCC15.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From a500f3751d3c861be7e4463c933cf467240cca5d Mon Sep 17 00:00:00 2001
-From: Brahmajit Das <brahmajit.xyz@gmail.com>
-Date: Wed, 2 Oct 2024 14:53:11 +0530
-Subject: drm/display: Fix building with GCC 15
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-GCC 15 enables -Werror=unterminated-string-initialization by default.
-This results in the following build error
-
-drivers/gpu/drm/display/drm_dp_dual_mode_helper.c: In function ‘is_hdmi_adaptor’:
-drivers/gpu/drm/display/drm_dp_dual_mode_helper.c:164:17: error: initializer-string for array of
- ‘char’ is too long [-Werror=unterminated-string-initialization]
-  164 |                 "DP-HDMI ADAPTOR\x04";
-      |                 ^~~~~~~~~~~~~~~~~~~~~
-
-After discussion with Ville, the fix was to increase the size of
-dp_dual_mode_hdmi_id array by one, so that it can accommodate the NULL
-line character. This should let us build the kernel with GCC 15.
-
-Signed-off-by: Brahmajit Das <brahmajit.xyz@gmail.com>
-Reviewed-by: Jani Nikula <jani.nikula@intel.com>
-Link: https://patchwork.freedesktop.org/patch/msgid/20241002092311.942822-1-brahmajit.xyz@gmail.com
-Signed-off-by: Jani Nikula <jani.nikula@intel.com>
----
- drivers/gpu/drm/display/drm_dp_dual_mode_helper.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-(limited to 'drivers/gpu/drm/display/drm_dp_dual_mode_helper.c')
-
-diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
-index 14a2a8473682b0..c491e3203bf11c 100644
---- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
-+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
-@@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write);
- 
- static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
- {
--	static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
-+	static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] =
- 		"DP-HDMI ADAPTOR\x04";
- 
- 	return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
--		      sizeof(dp_dual_mode_hdmi_id)) == 0;
-+		      DP_DUAL_MODE_HDMI_ID_LEN) == 0;
- }
- 
- static bool is_type1_adaptor(uint8_t adaptor_id)
--- 
-cgit 1.2.3-korg
-


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-14 23:47 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-14 23:47 UTC (permalink / raw
  To: gentoo-commits

commit:     4c68b8a5598beeb003b0f59ae33deba3b220de9a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Dec 14 23:47:32 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Dec 14 23:47:32 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4c68b8a5

Linux patch 6.12.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1004_linux-6.12.5.patch | 33366 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 33370 insertions(+)

diff --git a/0000_README b/0000_README
index 81375872..6429d035 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-6.12.4.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.4
 
+Patch:  1004_linux-6.12.5.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.5
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1004_linux-6.12.5.patch b/1004_linux-6.12.5.patch
new file mode 100644
index 00000000..6347cd6c
--- /dev/null
+++ b/1004_linux-6.12.5.patch
@@ -0,0 +1,33366 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
+index 7f63c7e9777358..5da6a14dc326bd 100644
+--- a/Documentation/ABI/testing/sysfs-bus-pci
++++ b/Documentation/ABI/testing/sysfs-bus-pci
+@@ -163,6 +163,17 @@ Description:
+ 		will be present in sysfs.  Writing 1 to this file
+ 		will perform reset.
+ 
++What:		/sys/bus/pci/devices/.../reset_subordinate
++Date:		October 2024
++Contact:	linux-pci@vger.kernel.org
++Description:
++		This is visible only for bridge devices. If you want to reset
++		all devices attached through the subordinate bus of a specific
++		bridge device, writing 1 to this will try to do it.  This will
++		affect all devices attached to the system through this bridge
++		similiar to writing 1 to their individual "reset" file, so use
++		with caution.
++
+ What:		/sys/bus/pci/devices/.../vpd
+ Date:		February 2008
+ Contact:	Ben Hutchings <bwh@kernel.org>
+diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
+index 513296bb6f297f..3e1630c70d8ae7 100644
+--- a/Documentation/ABI/testing/sysfs-fs-f2fs
++++ b/Documentation/ABI/testing/sysfs-fs-f2fs
+@@ -822,3 +822,9 @@ Description:	It controls the valid block ratio threshold not to trigger excessiv
+ 		for zoned deivces. The initial value of it is 95(%). F2FS will stop the
+ 		background GC thread from intiating GC for sections having valid blocks
+ 		exceeding the ratio.
++
++What:		/sys/fs/f2fs/<disk>/max_read_extent_count
++Date:		November 2024
++Contact:	"Chao Yu" <chao@kernel.org>
++Description:	It controls max read extent count for per-inode, the value of threshold
++		is 10240 by default.
+diff --git a/Documentation/accel/qaic/aic080.rst b/Documentation/accel/qaic/aic080.rst
+new file mode 100644
+index 00000000000000..d563771ea6ce48
+--- /dev/null
++++ b/Documentation/accel/qaic/aic080.rst
+@@ -0,0 +1,14 @@
++.. SPDX-License-Identifier: GPL-2.0-only
++
++===============================
++ Qualcomm Cloud AI 80 (AIC080)
++===============================
++
++Overview
++========
++
++The Qualcomm Cloud AI 80/AIC080 family of products are a derivative of AIC100.
++The number of NSPs and clock rates are reduced to fit within resource
++constrained solutions. The PCIe Product ID is 0xa080.
++
++As a derivative product, all AIC100 documentation applies.
+diff --git a/Documentation/accel/qaic/index.rst b/Documentation/accel/qaic/index.rst
+index ad19b88d1a669e..967b9dd8baceac 100644
+--- a/Documentation/accel/qaic/index.rst
++++ b/Documentation/accel/qaic/index.rst
+@@ -10,4 +10,5 @@ accelerator cards.
+ .. toctree::
+ 
+    qaic
++   aic080
+    aic100
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index 65bfab1b186146..77db10e944f039 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -258,6 +258,8 @@ stable kernels.
+ | Hisilicon      | Hip{08,09,10,10C| #162001900      | N/A                         |
+ |                | ,11} SMMU PMCG  |                 |                             |
+ +----------------+-----------------+-----------------+-----------------------------+
++| Hisilicon      | Hip09           | #162100801      | HISILICON_ERRATUM_162100801 |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
+index c840b597912c87..47e8ac5b7099f7 100644
+--- a/Documentation/i2c/busses/i2c-i801.rst
++++ b/Documentation/i2c/busses/i2c-i801.rst
+@@ -49,6 +49,7 @@ Supported adapters:
+   * Intel Meteor Lake (SOC and PCH)
+   * Intel Birch Stream (SOC)
+   * Intel Arrow Lake (SOC)
++  * Intel Panther Lake (SOC)
+ 
+    Datasheets: Publicly available at the Intel website
+ 
+diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
+index 6a050d755b9cb4..f6c5d8214c7e98 100644
+--- a/Documentation/netlink/specs/ethtool.yaml
++++ b/Documentation/netlink/specs/ethtool.yaml
+@@ -96,7 +96,12 @@ attribute-sets:
+         name: bits
+         type: nest
+         nested-attributes: bitset-bits
+-
++      -
++        name: value
++        type: binary
++      -
++        name: mask
++        type: binary
+   -
+     name: string
+     attributes:
+diff --git a/Makefile b/Makefile
+index 87dc2f81086021..f158bfe6407ac9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -456,6 +456,7 @@ export rust_common_flags := --edition=2021 \
+ 			    -Wclippy::mut_mut \
+ 			    -Wclippy::needless_bitwise_bool \
+ 			    -Wclippy::needless_continue \
++			    -Aclippy::needless_lifetimes \
+ 			    -Wclippy::no_mangle_with_rust_abi \
+ 			    -Wclippy::dbg_macro
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 22f8a7bca6d21c..a11a7a42edbfb5 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1232,6 +1232,17 @@ config HISILICON_ERRATUM_161600802
+ 
+ 	  If unsure, say Y.
+ 
++config HISILICON_ERRATUM_162100801
++	bool "Hip09 162100801 erratum support"
++	default y
++	help
++	  When enabling GICv4.1 in hip09, VMAPP will fail to clear some caches
++	  during unmapping operation, which will cause some vSGIs lost.
++	  To fix the issue, invalidate related vPE cache through GICR_INVALLR
++	  after VMOVP.
++
++	  If unsure, say Y.
++
+ config QCOM_FALKOR_ERRATUM_1003
+ 	bool "Falkor E1003: Incorrect translation due to ASID change"
+ 	default y
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index b756578aeaeea1..1559a239137f32 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -719,6 +719,8 @@ static int fpmr_set(struct task_struct *target, const struct user_regset *regset
+ 	if (!system_supports_fpmr())
+ 		return -EINVAL;
+ 
++	fpmr = target->thread.uw.fpmr;
++
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
+ 	if (ret)
+ 		return ret;
+@@ -1418,7 +1420,7 @@ static int tagged_addr_ctrl_get(struct task_struct *target,
+ {
+ 	long ctrl = get_tagged_addr_ctrl(target);
+ 
+-	if (IS_ERR_VALUE(ctrl))
++	if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
+ 		return ctrl;
+ 
+ 	return membuf_write(&to, &ctrl, sizeof(ctrl));
+@@ -1432,6 +1434,10 @@ static int tagged_addr_ctrl_set(struct task_struct *target, const struct
+ 	int ret;
+ 	long ctrl;
+ 
++	ctrl = get_tagged_addr_ctrl(target);
++	if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
++		return ctrl;
++
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
+ 	if (ret)
+ 		return ret;
+@@ -1463,6 +1469,8 @@ static int poe_set(struct task_struct *target, const struct
+ 	if (!system_supports_poe())
+ 		return -EINVAL;
+ 
++	ctrl = target->thread.por_el0;
++
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
+ 	if (ret)
+ 		return ret;
+diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
+index 188197590fc9ce..b2ac062463273f 100644
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -32,9 +32,9 @@ static unsigned long nr_pinned_asids;
+ static unsigned long *pinned_asid_map;
+ 
+ #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
+-#define ASID_FIRST_VERSION	(1UL << asid_bits)
++#define ASID_FIRST_VERSION	(1UL << 16)
+ 
+-#define NUM_USER_ASIDS		ASID_FIRST_VERSION
++#define NUM_USER_ASIDS		(1UL << asid_bits)
+ #define ctxid2asid(asid)	((asid) & ~ASID_MASK)
+ #define asid2ctxid(asid, genid)	((asid) | (genid))
+ 
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 27a32ff15412aa..93ba66de160ce4 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -116,15 +116,6 @@ static void __init arch_reserve_crashkernel(void)
+ 
+ static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
+ {
+-	/**
+-	 * Information we get from firmware (e.g. DT dma-ranges) describe DMA
+-	 * bus constraints. Devices using DMA might have their own limitations.
+-	 * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
+-	 * DMA zone on platforms that have RAM there.
+-	 */
+-	if (memblock_start_of_DRAM() < U32_MAX)
+-		zone_limit = min(zone_limit, U32_MAX);
+-
+ 	return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
+ }
+ 
+@@ -140,6 +131,14 @@ static void __init zone_sizes_init(void)
+ 	acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
+ 	dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
+ 	zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
++	/*
++	 * Information we get from firmware (e.g. DT dma-ranges) describe DMA
++	 * bus constraints. Devices using DMA might have their own limitations.
++	 * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
++	 * DMA zone on platforms that have RAM there.
++	 */
++	if (memblock_start_of_DRAM() < U32_MAX)
++		zone_dma_limit = min(zone_dma_limit, U32_MAX);
+ 	arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
+ 	max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
+ #endif
+diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
+index 5da32c00d483fb..376c0708e2979b 100644
+--- a/arch/loongarch/include/asm/hugetlb.h
++++ b/arch/loongarch/include/asm/hugetlb.h
+@@ -29,6 +29,16 @@ static inline int prepare_hugepage_range(struct file *file,
+ 	return 0;
+ }
+ 
++#define __HAVE_ARCH_HUGE_PTE_CLEAR
++static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
++				  pte_t *ptep, unsigned long sz)
++{
++	pte_t clear;
++
++	pte_val(clear) = (unsigned long)invalid_pte_table;
++	set_pte_at(mm, addr, ptep, clear);
++}
++
+ #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ 					    unsigned long addr, pte_t *ptep)
+diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
+index 174734a23d0ac8..9d53eca66fcc70 100644
+--- a/arch/loongarch/kvm/vcpu.c
++++ b/arch/loongarch/kvm/vcpu.c
+@@ -240,7 +240,7 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
+  */
+ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
+ {
+-	int ret;
++	int idx, ret;
+ 
+ 	/*
+ 	 * Check conditions before entering the guest
+@@ -249,7 +249,9 @@ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	idx = srcu_read_lock(&vcpu->kvm->srcu);
+ 	ret = kvm_check_requests(vcpu);
++	srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ 
+ 	return ret;
+ }
+diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
+index 5ac9beb5f0935e..3b427b319db21d 100644
+--- a/arch/loongarch/mm/tlb.c
++++ b/arch/loongarch/mm/tlb.c
+@@ -289,7 +289,7 @@ static void setup_tlb_handler(int cpu)
+ 		/* Avoid lockdep warning */
+ 		rcutree_report_cpu_starting(cpu);
+ 
+-#ifdef CONFIG_NUMA
++#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
+ 		vec_sz = sizeof(exception_handlers);
+ 
+ 		if (pcpu_handlers[cpu])
+diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+index cce9428afc41fc..ee71045883e7e7 100644
+--- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
++++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+@@ -70,7 +70,6 @@ pci@1a000000 {
+ 			device_type = "pci";
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+-			#interrupt-cells = <2>;
+ 			msi-parent = <&msi>;
+ 
+ 			reg = <0 0x1a000000 0 0x02000000>,
+@@ -234,7 +233,7 @@ phy1: ethernet-phy@1 {
+ 				};
+ 			};
+ 
+-			pci_bridge@9,0 {
++			pcie@9,0 {
+ 				compatible = "pci0014,7a19.1",
+ 						   "pci0014,7a19",
+ 						   "pciclass060400",
+@@ -244,12 +243,16 @@ pci_bridge@9,0 {
+ 				interrupts = <32 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 32 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@a,0 {
++			pcie@a,0 {
+ 				compatible = "pci0014,7a09.1",
+ 						   "pci0014,7a09",
+ 						   "pciclass060400",
+@@ -259,12 +262,16 @@ pci_bridge@a,0 {
+ 				interrupts = <33 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 33 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@b,0 {
++			pcie@b,0 {
+ 				compatible = "pci0014,7a09.1",
+ 						   "pci0014,7a09",
+ 						   "pciclass060400",
+@@ -274,12 +281,16 @@ pci_bridge@b,0 {
+ 				interrupts = <34 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 34 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@c,0 {
++			pcie@c,0 {
+ 				compatible = "pci0014,7a09.1",
+ 						   "pci0014,7a09",
+ 						   "pciclass060400",
+@@ -289,12 +300,16 @@ pci_bridge@c,0 {
+ 				interrupts = <35 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 35 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@d,0 {
++			pcie@d,0 {
+ 				compatible = "pci0014,7a19.1",
+ 						   "pci0014,7a19",
+ 						   "pciclass060400",
+@@ -304,12 +319,16 @@ pci_bridge@d,0 {
+ 				interrupts = <36 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 36 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@e,0 {
++			pcie@e,0 {
+ 				compatible = "pci0014,7a09.1",
+ 						   "pci0014,7a09",
+ 						   "pciclass060400",
+@@ -319,12 +338,16 @@ pci_bridge@e,0 {
+ 				interrupts = <37 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 37 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@f,0 {
++			pcie@f,0 {
+ 				compatible = "pci0014,7a29.1",
+ 						   "pci0014,7a29",
+ 						   "pciclass060400",
+@@ -334,12 +357,16 @@ pci_bridge@f,0 {
+ 				interrupts = <40 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 40 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@10,0 {
++			pcie@10,0 {
+ 				compatible = "pci0014,7a19.1",
+ 						   "pci0014,7a19",
+ 						   "pciclass060400",
+@@ -349,12 +376,16 @@ pci_bridge@10,0 {
+ 				interrupts = <41 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 41 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@11,0 {
++			pcie@11,0 {
+ 				compatible = "pci0014,7a29.1",
+ 						   "pci0014,7a29",
+ 						   "pciclass060400",
+@@ -364,12 +395,16 @@ pci_bridge@11,0 {
+ 				interrupts = <42 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 42 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@12,0 {
++			pcie@12,0 {
+ 				compatible = "pci0014,7a19.1",
+ 						   "pci0014,7a19",
+ 						   "pciclass060400",
+@@ -379,12 +414,16 @@ pci_bridge@12,0 {
+ 				interrupts = <43 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 43 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@13,0 {
++			pcie@13,0 {
+ 				compatible = "pci0014,7a29.1",
+ 						   "pci0014,7a29",
+ 						   "pciclass060400",
+@@ -394,12 +433,16 @@ pci_bridge@13,0 {
+ 				interrupts = <38 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 38 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 
+-			pci_bridge@14,0 {
++			pcie@14,0 {
+ 				compatible = "pci0014,7a19.1",
+ 						   "pci0014,7a19",
+ 						   "pciclass060400",
+@@ -409,9 +452,13 @@ pci_bridge@14,0 {
+ 				interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&pic>;
+ 
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+ 				interrupt-map = <0 0 0 0 &pic 39 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 			};
+ 		};
+ 
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index fbb68fc28ed3a5..935568d68196d0 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -2932,7 +2932,7 @@ static void __init fixup_device_tree_chrp(void)
+ #endif
+ 
+ #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
+-static void __init fixup_device_tree_pmac(void)
++static void __init fixup_device_tree_pmac64(void)
+ {
+ 	phandle u3, i2c, mpic;
+ 	u32 u3_rev;
+@@ -2972,7 +2972,31 @@ static void __init fixup_device_tree_pmac(void)
+ 		     &parent, sizeof(parent));
+ }
+ #else
+-#define fixup_device_tree_pmac()
++#define fixup_device_tree_pmac64()
++#endif
++
++#ifdef CONFIG_PPC_PMAC
++static void __init fixup_device_tree_pmac(void)
++{
++	__be32 val = 1;
++	char type[8];
++	phandle node;
++
++	// Some pmacs are missing #size-cells on escc nodes
++	for (node = 0; prom_next_node(&node); ) {
++		type[0] = '\0';
++		prom_getprop(node, "device_type", type, sizeof(type));
++		if (prom_strcmp(type, "escc"))
++			continue;
++
++		if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
++			continue;
++
++		prom_setprop(node, NULL, "#size-cells", &val, sizeof(val));
++	}
++}
++#else
++static inline void fixup_device_tree_pmac(void) { }
+ #endif
+ 
+ #ifdef CONFIG_PPC_EFIKA
+@@ -3197,6 +3221,7 @@ static void __init fixup_device_tree(void)
+ 	fixup_device_tree_maple_memory_controller();
+ 	fixup_device_tree_chrp();
+ 	fixup_device_tree_pmac();
++	fixup_device_tree_pmac64();
+ 	fixup_device_tree_efika();
+ 	fixup_device_tree_pasemi();
+ }
+diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
+index 2341393cfac1ae..26c01b9e3434c4 100644
+--- a/arch/riscv/configs/defconfig
++++ b/arch/riscv/configs/defconfig
+@@ -301,7 +301,6 @@ CONFIG_DEBUG_MEMORY_INIT=y
+ CONFIG_DEBUG_PER_CPU_MAPS=y
+ CONFIG_SOFTLOCKUP_DETECTOR=y
+ CONFIG_WQ_WATCHDOG=y
+-CONFIG_DEBUG_TIMEKEEPING=y
+ CONFIG_DEBUG_RT_MUTEXES=y
+ CONFIG_DEBUG_SPINLOCK=y
+ CONFIG_DEBUG_MUTEXES=y
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index 30b20ce9a70033..83789e39d1d5e5 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -106,9 +106,10 @@ struct zpci_bus {
+ 	struct list_head	resources;
+ 	struct list_head	bus_next;
+ 	struct resource		bus_resource;
+-	int			pchid;
++	int			topo;		/* TID if topo_is_tid, PCHID otherwise */
+ 	int			domain_nr;
+-	bool			multifunction;
++	u8			multifunction	: 1;
++	u8			topo_is_tid	: 1;
+ 	enum pci_bus_speed	max_bus_speed;
+ };
+ 
+@@ -129,6 +130,8 @@ struct zpci_dev {
+ 	u16		vfn;		/* virtual function number */
+ 	u16		pchid;		/* physical channel ID */
+ 	u16		maxstbl;	/* Maximum store block size */
++	u16		rid;		/* RID as supplied by firmware */
++	u16		tid;		/* Topology for which RID is valid */
+ 	u8		pfgid;		/* function group ID */
+ 	u8		pft;		/* pci function type */
+ 	u8		port;
+@@ -139,7 +142,8 @@ struct zpci_dev {
+ 	u8		is_physfn	: 1;
+ 	u8		util_str_avail	: 1;
+ 	u8		irqs_registered	: 1;
+-	u8		reserved	: 2;
++	u8		tid_avail	: 1;
++	u8		reserved	: 1;
+ 	unsigned int	devfn;		/* DEVFN part of the RID*/
+ 
+ 	u8 pfip[CLP_PFIP_NR_SEGMENTS];	/* pci function internal path */
+@@ -210,12 +214,14 @@ extern struct airq_iv *zpci_aif_sbv;
+ ----------------------------------------------------------------------------- */
+ /* Base stuff */
+ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
++int zpci_add_device(struct zpci_dev *zdev);
+ int zpci_enable_device(struct zpci_dev *);
+ int zpci_disable_device(struct zpci_dev *);
+ int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
+ int zpci_deconfigure_device(struct zpci_dev *zdev);
+ void zpci_device_reserved(struct zpci_dev *zdev);
+ bool zpci_is_device_configured(struct zpci_dev *zdev);
++int zpci_scan_devices(void);
+ 
+ int zpci_hot_reset_device(struct zpci_dev *zdev);
+ int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *);
+@@ -225,7 +231,7 @@ void zpci_update_fh(struct zpci_dev *zdev, u32 fh);
+ 
+ /* CLP */
+ int clp_setup_writeback_mio(void);
+-int clp_scan_pci_devices(void);
++int clp_scan_pci_devices(struct list_head *scan_list);
+ int clp_query_pci_fn(struct zpci_dev *zdev);
+ int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
+ int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
+diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
+index f0c677ddd27060..14afb9ce91f3bc 100644
+--- a/arch/s390/include/asm/pci_clp.h
++++ b/arch/s390/include/asm/pci_clp.h
+@@ -110,7 +110,8 @@ struct clp_req_query_pci {
+ struct clp_rsp_query_pci {
+ 	struct clp_rsp_hdr hdr;
+ 	u16 vfn;			/* virtual fn number */
+-	u16			:  3;
++	u16			:  2;
++	u16 tid_avail		:  1;
+ 	u16 rid_avail		:  1;
+ 	u16 is_physfn		:  1;
+ 	u16 reserved1		:  1;
+@@ -130,8 +131,9 @@ struct clp_rsp_query_pci {
+ 	u64 edma;			/* end dma as */
+ #define ZPCI_RID_MASK_DEVFN 0x00ff
+ 	u16 rid;			/* BUS/DEVFN PCI address */
+-	u16 reserved0;
+-	u32 reserved[10];
++	u32 reserved0;
++	u16 tid;
++	u32 reserved[9];
+ 	u32 uid;			/* user defined id */
+ 	u8 util_str[CLP_UTIL_STR_LEN];	/* utility string */
+ 	u32 reserved2[16];
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 3317f4878eaa70..331e0654d61d78 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -1780,7 +1780,9 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
+ 	event->hw.state |= PERF_HES_STOPPED;
+ 
+ 	if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+-		hw_perf_event_update(event, 1);
++		/* CPU hotplug off removes SDBs. No samples to extract. */
++		if (cpuhw->flags & PMU_F_RESERVED)
++			hw_perf_event_update(event, 1);
+ 		event->hw.state |= PERF_HES_UPTODATE;
+ 	}
+ 	perf_pmu_enable(event->pmu);
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 635fd8f2acbaa2..88f72745fa59e1 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -29,6 +29,7 @@
+ #include <linux/pci.h>
+ #include <linux/printk.h>
+ #include <linux/lockdep.h>
++#include <linux/list_sort.h>
+ 
+ #include <asm/isc.h>
+ #include <asm/airq.h>
+@@ -778,8 +779,9 @@ int zpci_hot_reset_device(struct zpci_dev *zdev)
+  * @fh: Current Function Handle of the device to be created
+  * @state: Initial state after creation either Standby or Configured
+  *
+- * Creates a new zpci device and adds it to its, possibly newly created, zbus
+- * as well as zpci_list.
++ * Allocates a new struct zpci_dev and queries the platform for its details.
++ * If successful the device can subsequently be added to the zPCI subsystem
++ * using zpci_add_device().
+  *
+  * Returns: the zdev on success or an error pointer otherwise
+  */
+@@ -788,7 +790,6 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
+ 	struct zpci_dev *zdev;
+ 	int rc;
+ 
+-	zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
+ 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
+ 	if (!zdev)
+ 		return ERR_PTR(-ENOMEM);
+@@ -803,11 +804,34 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
+ 		goto error;
+ 	zdev->state =  state;
+ 
+-	kref_init(&zdev->kref);
+ 	mutex_init(&zdev->state_lock);
+ 	mutex_init(&zdev->fmb_lock);
+ 	mutex_init(&zdev->kzdev_lock);
+ 
++	return zdev;
++
++error:
++	zpci_dbg(0, "crt fid:%x, rc:%d\n", fid, rc);
++	kfree(zdev);
++	return ERR_PTR(rc);
++}
++
++/**
++ * zpci_add_device() - Add a previously created zPCI device to the zPCI subsystem
++ * @zdev: The zPCI device to be added
++ *
++ * A struct zpci_dev is added to the zPCI subsystem and to a virtual PCI bus creating
++ * a new one as necessary. A hotplug slot is created and events start to be handled.
++ * If successful from this point on zpci_zdev_get() and zpci_zdev_put() must be used.
++ * If adding the struct zpci_dev fails the device was not added and should be freed.
++ *
++ * Return: 0 on success, or an error code otherwise
++ */
++int zpci_add_device(struct zpci_dev *zdev)
++{
++	int rc;
++
++	zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state);
+ 	rc = zpci_init_iommu(zdev);
+ 	if (rc)
+ 		goto error;
+@@ -816,18 +840,17 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
+ 	if (rc)
+ 		goto error_destroy_iommu;
+ 
++	kref_init(&zdev->kref);
+ 	spin_lock(&zpci_list_lock);
+ 	list_add_tail(&zdev->entry, &zpci_list);
+ 	spin_unlock(&zpci_list_lock);
+-
+-	return zdev;
++	return 0;
+ 
+ error_destroy_iommu:
+ 	zpci_destroy_iommu(zdev);
+ error:
+-	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
+-	kfree(zdev);
+-	return ERR_PTR(rc);
++	zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc);
++	return rc;
+ }
+ 
+ bool zpci_is_device_configured(struct zpci_dev *zdev)
+@@ -1069,6 +1092,50 @@ bool zpci_is_enabled(void)
+ 	return s390_pci_initialized;
+ }
+ 
++static int zpci_cmp_rid(void *priv, const struct list_head *a,
++			const struct list_head *b)
++{
++	struct zpci_dev *za = container_of(a, struct zpci_dev, entry);
++	struct zpci_dev *zb = container_of(b, struct zpci_dev, entry);
++
++	/*
++	 * PCI functions without RID available maintain original order
++	 * between themselves but sort before those with RID.
++	 */
++	if (za->rid == zb->rid)
++		return za->rid_available > zb->rid_available;
++	/*
++	 * PCI functions with RID sort by RID ascending.
++	 */
++	return za->rid > zb->rid;
++}
++
++static void zpci_add_devices(struct list_head *scan_list)
++{
++	struct zpci_dev *zdev, *tmp;
++
++	list_sort(NULL, scan_list, &zpci_cmp_rid);
++	list_for_each_entry_safe(zdev, tmp, scan_list, entry) {
++		list_del_init(&zdev->entry);
++		if (zpci_add_device(zdev))
++			kfree(zdev);
++	}
++}
++
++int zpci_scan_devices(void)
++{
++	LIST_HEAD(scan_list);
++	int rc;
++
++	rc = clp_scan_pci_devices(&scan_list);
++	if (rc)
++		return rc;
++
++	zpci_add_devices(&scan_list);
++	zpci_bus_scan_busses();
++	return 0;
++}
++
+ static int __init pci_base_init(void)
+ {
+ 	int rc;
+@@ -1098,10 +1165,9 @@ static int __init pci_base_init(void)
+ 	if (rc)
+ 		goto out_irq;
+ 
+-	rc = clp_scan_pci_devices();
++	rc = zpci_scan_devices();
+ 	if (rc)
+ 		goto out_find;
+-	zpci_bus_scan_busses();
+ 
+ 	s390_pci_initialized = 1;
+ 	return 0;
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index daa5d7450c7d38..1b74a000ff6459 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -168,9 +168,16 @@ void zpci_bus_scan_busses(void)
+ 	mutex_unlock(&zbus_list_lock);
+ }
+ 
++static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
++{
++	return !s390_pci_no_rid && zdev->rid_available &&
++		zpci_is_device_configured(zdev) &&
++		!zdev->vfn;
++}
++
+ /* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus
+  * @zbus: the zbus holding the zdevices
+- * @fr: PCI root function that will determine the bus's domain, and bus speeed
++ * @fr: PCI root function that will determine the bus's domain, and bus speed
+  * @ops: the pci operations
+  *
+  * The PCI function @fr determines the domain (its UID), multifunction property
+@@ -188,7 +195,7 @@ static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, s
+ 		return domain;
+ 
+ 	zbus->domain_nr = domain;
+-	zbus->multifunction = fr->rid_available;
++	zbus->multifunction = zpci_bus_is_multifunction_root(fr);
+ 	zbus->max_bus_speed = fr->max_bus_speed;
+ 
+ 	/*
+@@ -232,13 +239,15 @@ static void zpci_bus_put(struct zpci_bus *zbus)
+ 	kref_put(&zbus->kref, zpci_bus_release);
+ }
+ 
+-static struct zpci_bus *zpci_bus_get(int pchid)
++static struct zpci_bus *zpci_bus_get(int topo, bool topo_is_tid)
+ {
+ 	struct zpci_bus *zbus;
+ 
+ 	mutex_lock(&zbus_list_lock);
+ 	list_for_each_entry(zbus, &zbus_list, bus_next) {
+-		if (pchid == zbus->pchid) {
++		if (!zbus->multifunction)
++			continue;
++		if (topo_is_tid == zbus->topo_is_tid && topo == zbus->topo) {
+ 			kref_get(&zbus->kref);
+ 			goto out_unlock;
+ 		}
+@@ -249,7 +258,7 @@ static struct zpci_bus *zpci_bus_get(int pchid)
+ 	return zbus;
+ }
+ 
+-static struct zpci_bus *zpci_bus_alloc(int pchid)
++static struct zpci_bus *zpci_bus_alloc(int topo, bool topo_is_tid)
+ {
+ 	struct zpci_bus *zbus;
+ 
+@@ -257,7 +266,8 @@ static struct zpci_bus *zpci_bus_alloc(int pchid)
+ 	if (!zbus)
+ 		return NULL;
+ 
+-	zbus->pchid = pchid;
++	zbus->topo = topo;
++	zbus->topo_is_tid = topo_is_tid;
+ 	INIT_LIST_HEAD(&zbus->bus_next);
+ 	mutex_lock(&zbus_list_lock);
+ 	list_add_tail(&zbus->bus_next, &zbus_list);
+@@ -292,19 +302,22 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+ {
+ 	int rc = -EINVAL;
+ 
++	if (zbus->multifunction) {
++		if (!zdev->rid_available) {
++			WARN_ONCE(1, "rid_available not set for multifunction\n");
++			return rc;
++		}
++		zdev->devfn = zdev->rid & ZPCI_RID_MASK_DEVFN;
++	}
++
+ 	if (zbus->function[zdev->devfn]) {
+ 		pr_err("devfn %04x is already assigned\n", zdev->devfn);
+ 		return rc;
+ 	}
+-
+ 	zdev->zbus = zbus;
+ 	zbus->function[zdev->devfn] = zdev;
+ 	zpci_nb_devices++;
+ 
+-	if (zbus->multifunction && !zdev->rid_available) {
+-		WARN_ONCE(1, "rid_available not set for multifunction\n");
+-		goto error;
+-	}
+ 	rc = zpci_init_slot(zdev);
+ 	if (rc)
+ 		goto error;
+@@ -321,8 +334,9 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+ 
+ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+ {
++	bool topo_is_tid = zdev->tid_avail;
+ 	struct zpci_bus *zbus = NULL;
+-	int rc = -EBADF;
++	int topo, rc = -EBADF;
+ 
+ 	if (zpci_nb_devices == ZPCI_NR_DEVICES) {
+ 		pr_warn("Adding PCI function %08x failed because the configured limit of %d is reached\n",
+@@ -330,14 +344,10 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+ 		return -ENOSPC;
+ 	}
+ 
+-	if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS)
+-		return -EINVAL;
+-
+-	if (!s390_pci_no_rid && zdev->rid_available)
+-		zbus = zpci_bus_get(zdev->pchid);
+-
++	topo = topo_is_tid ? zdev->tid : zdev->pchid;
++	zbus = zpci_bus_get(topo, topo_is_tid);
+ 	if (!zbus) {
+-		zbus = zpci_bus_alloc(zdev->pchid);
++		zbus = zpci_bus_alloc(topo, topo_is_tid);
+ 		if (!zbus)
+ 			return -ENOMEM;
+ 	}
+diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
+index 6f55a59a087115..74dac6da03d5bb 100644
+--- a/arch/s390/pci/pci_clp.c
++++ b/arch/s390/pci/pci_clp.c
+@@ -164,10 +164,13 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
+ 	zdev->port = response->port;
+ 	zdev->uid = response->uid;
+ 	zdev->fmb_length = sizeof(u32) * response->fmb_len;
+-	zdev->rid_available = response->rid_avail;
+ 	zdev->is_physfn = response->is_physfn;
+-	if (!s390_pci_no_rid && zdev->rid_available)
+-		zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
++	zdev->rid_available = response->rid_avail;
++	if (zdev->rid_available)
++		zdev->rid = response->rid;
++	zdev->tid_avail = response->tid_avail;
++	if (zdev->tid_avail)
++		zdev->tid = response->tid;
+ 
+ 	memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
+ 	if (response->util_str_avail) {
+@@ -407,6 +410,7 @@ static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
+ 
+ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
+ {
++	struct list_head *scan_list = data;
+ 	struct zpci_dev *zdev;
+ 
+ 	if (!entry->vendor_id)
+@@ -417,10 +421,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
+ 		zpci_zdev_put(zdev);
+ 		return;
+ 	}
+-	zpci_create_device(entry->fid, entry->fh, entry->config_state);
++	zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
++	list_add_tail(&zdev->entry, scan_list);
+ }
+ 
+-int clp_scan_pci_devices(void)
++int clp_scan_pci_devices(struct list_head *scan_list)
+ {
+ 	struct clp_req_rsp_list_pci *rrb;
+ 	int rc;
+@@ -429,7 +434,7 @@ int clp_scan_pci_devices(void)
+ 	if (!rrb)
+ 		return -ENOMEM;
+ 
+-	rc = clp_list_pci(rrb, NULL, __clp_add);
++	rc = clp_list_pci(rrb, scan_list, __clp_add);
+ 
+ 	clp_free_block(rrb);
+ 	return rc;
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index d4f19d33914cbc..7f7b732b3f3efa 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -340,6 +340,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 			zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
+ 			if (IS_ERR(zdev))
+ 				break;
++			if (zpci_add_device(zdev)) {
++				kfree(zdev);
++				break;
++			}
+ 		} else {
+ 			/* the configuration request may be stale */
+ 			if (zdev->state != ZPCI_FN_STATE_STANDBY)
+@@ -349,10 +353,17 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 		zpci_scan_configured_device(zdev, ccdf->fh);
+ 		break;
+ 	case 0x0302: /* Reserved -> Standby */
+-		if (!zdev)
+-			zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
+-		else
++		if (!zdev) {
++			zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
++			if (IS_ERR(zdev))
++				break;
++			if (zpci_add_device(zdev)) {
++				kfree(zdev);
++				break;
++			}
++		} else {
+ 			zpci_update_fh(zdev, ccdf->fh);
++		}
+ 		break;
+ 	case 0x0303: /* Deconfiguration requested */
+ 		if (zdev) {
+@@ -381,7 +392,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 		break;
+ 	case 0x0306: /* 0x308 or 0x302 for multiple devices */
+ 		zpci_remove_reserved_devices();
+-		clp_scan_pci_devices();
++		zpci_scan_devices();
+ 		break;
+ 	case 0x0308: /* Standby -> Reserved */
+ 		if (!zdev)
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 7b9a7e8f39acc8..171be04eca1f5d 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -145,7 +145,6 @@ config X86
+ 	select ARCH_HAS_PARANOID_L1D_FLUSH
+ 	select BUILDTIME_TABLE_SORT
+ 	select CLKEVT_I8253
+-	select CLOCKSOURCE_VALIDATE_LAST_CYCLE
+ 	select CLOCKSOURCE_WATCHDOG
+ 	# Word-size accesses may read uninitialized data past the trailing \0
+ 	# in strings and cause false KMSAN reports.
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index 920e3a640caddd..b4a1a2576510e0 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -943,11 +943,12 @@ static int amd_pmu_v2_snapshot_branch_stack(struct perf_branch_entry *entries, u
+ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ {
+ 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++	static atomic64_t status_warned = ATOMIC64_INIT(0);
++	u64 reserved, status, mask, new_bits, prev_bits;
+ 	struct perf_sample_data data;
+ 	struct hw_perf_event *hwc;
+ 	struct perf_event *event;
+ 	int handled = 0, idx;
+-	u64 reserved, status, mask;
+ 	bool pmu_enabled;
+ 
+ 	/*
+@@ -1012,7 +1013,12 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 	 * the corresponding PMCs are expected to be inactive according to the
+ 	 * active_mask
+ 	 */
+-	WARN_ON(status > 0);
++	if (status > 0) {
++		prev_bits = atomic64_fetch_or(status, &status_warned);
++		// A new bit was set for the very first time.
++		new_bits = status & ~prev_bits;
++		WARN(new_bits, "New overflows for inactive PMCs: %llx\n", new_bits);
++	}
+ 
+ 	/* Clear overflow and freeze bits */
+ 	amd_pmu_ack_global_status(~status);
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 6f82e75b61494e..4b804531b03c3c 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -36,10 +36,12 @@
+ #define _PAGE_BIT_DEVMAP	_PAGE_BIT_SOFTW4
+ 
+ #ifdef CONFIG_X86_64
+-#define _PAGE_BIT_SAVED_DIRTY	_PAGE_BIT_SOFTW5 /* Saved Dirty bit */
++#define _PAGE_BIT_SAVED_DIRTY	_PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */
++#define _PAGE_BIT_NOPTISHADOW	_PAGE_BIT_SOFTW5 /* No PTI shadow (root PGD) */
+ #else
+ /* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */
+-#define _PAGE_BIT_SAVED_DIRTY	_PAGE_BIT_SOFTW2 /* Saved Dirty bit */
++#define _PAGE_BIT_SAVED_DIRTY	_PAGE_BIT_SOFTW2 /* Saved Dirty bit (leaf) */
++#define _PAGE_BIT_NOPTISHADOW	_PAGE_BIT_SOFTW2 /* No PTI shadow (root PGD) */
+ #endif
+ 
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -139,6 +141,8 @@
+ 
+ #define _PAGE_PROTNONE	(_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+ 
++#define _PAGE_NOPTISHADOW (_AT(pteval_t, 1) << _PAGE_BIT_NOPTISHADOW)
++
+ /*
+  * Set of bits not changed in pte_modify.  The pte's
+  * protection key is treated like _PAGE_RW, for
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index d8408aafeed988..79d2e17f6582e9 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1065,7 +1065,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 	 */
+ 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+ 	    cpu_has(c, X86_FEATURE_AUTOIBRS))
+-		WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
++		WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0);
+ 
+ 	/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
+ 	clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
+diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
+index 392d09c936d60c..e6fa03ed9172c0 100644
+--- a/arch/x86/kernel/cpu/cacheinfo.c
++++ b/arch/x86/kernel/cpu/cacheinfo.c
+@@ -178,8 +178,6 @@ struct _cpuid4_info_regs {
+ 	struct amd_northbridge *nb;
+ };
+ 
+-static unsigned short num_cache_leaves;
+-
+ /* AMD doesn't have CPUID4. Emulate it here to report the same
+    information to the user.  This makes some assumptions about the machine:
+    L2 not shared, no SMT etc. that is currently true on AMD CPUs.
+@@ -717,20 +715,23 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
+ 
+ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
+ {
++	struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
+ 
+ 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+-		num_cache_leaves = find_num_cache_leaves(c);
++		ci->num_leaves = find_num_cache_leaves(c);
+ 	} else if (c->extended_cpuid_level >= 0x80000006) {
+ 		if (cpuid_edx(0x80000006) & 0xf000)
+-			num_cache_leaves = 4;
++			ci->num_leaves = 4;
+ 		else
+-			num_cache_leaves = 3;
++			ci->num_leaves = 3;
+ 	}
+ }
+ 
+ void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
+ {
+-	num_cache_leaves = find_num_cache_leaves(c);
++	struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
++
++	ci->num_leaves = find_num_cache_leaves(c);
+ }
+ 
+ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
+@@ -740,21 +741,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
+ 	unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
+ 	unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
+ 	unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
++	struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
+ 
+ 	if (c->cpuid_level > 3) {
+-		static int is_initialized;
+-
+-		if (is_initialized == 0) {
+-			/* Init num_cache_leaves from boot CPU */
+-			num_cache_leaves = find_num_cache_leaves(c);
+-			is_initialized++;
+-		}
++		/*
++		 * There should be at least one leaf. A non-zero value means
++		 * that the number of leaves has been initialized.
++		 */
++		if (!ci->num_leaves)
++			ci->num_leaves = find_num_cache_leaves(c);
+ 
+ 		/*
+ 		 * Whenever possible use cpuid(4), deterministic cache
+ 		 * parameters cpuid leaf to find the cache details
+ 		 */
+-		for (i = 0; i < num_cache_leaves; i++) {
++		for (i = 0; i < ci->num_leaves; i++) {
+ 			struct _cpuid4_info_regs this_leaf = {};
+ 			int retval;
+ 
+@@ -790,14 +791,14 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
+ 	 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
+ 	 * trace cache
+ 	 */
+-	if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
++	if ((!ci->num_leaves || c->x86 == 15) && c->cpuid_level > 1) {
+ 		/* supports eax=2  call */
+ 		int j, n;
+ 		unsigned int regs[4];
+ 		unsigned char *dp = (unsigned char *)regs;
+ 		int only_trace = 0;
+ 
+-		if (num_cache_leaves != 0 && c->x86 == 15)
++		if (ci->num_leaves && c->x86 == 15)
+ 			only_trace = 1;
+ 
+ 		/* Number of times to iterate */
+@@ -991,14 +992,12 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
+ 
+ int init_cache_level(unsigned int cpu)
+ {
+-	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
++	struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+ 
+-	if (!num_cache_leaves)
++	/* There should be at least one leaf. */
++	if (!ci->num_leaves)
+ 		return -ENOENT;
+-	if (!this_cpu_ci)
+-		return -EINVAL;
+-	this_cpu_ci->num_levels = 3;
+-	this_cpu_ci->num_leaves = num_cache_leaves;
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index e7656cbef68d54..4b5f3d0521517a 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -586,7 +586,9 @@ static void init_intel(struct cpuinfo_x86 *c)
+ 	     c->x86_vfm == INTEL_WESTMERE_EX))
+ 		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
+ 
+-	if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT)
++	if (boot_cpu_has(X86_FEATURE_MWAIT) &&
++	    (c->x86_vfm == INTEL_ATOM_GOLDMONT ||
++	     c->x86_vfm == INTEL_LUNARLAKE_M))
+ 		set_cpu_bug(c, X86_BUG_MONITOR);
+ 
+ #ifdef CONFIG_X86_64
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index 621a151ccf7d0a..b2e313ea17bf6f 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -428,8 +428,8 @@ void __init topology_apply_cmdline_limits_early(void)
+ {
+ 	unsigned int possible = nr_cpu_ids;
+ 
+-	/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' 'noapic' */
+-	if (!setup_max_cpus || ioapic_is_disabled || apic_is_disabled)
++	/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' */
++	if (!setup_max_cpus || apic_is_disabled)
+ 		possible = 1;
+ 
+ 	/* 'possible_cpus=N' */
+@@ -443,7 +443,7 @@ void __init topology_apply_cmdline_limits_early(void)
+ 
+ static __init bool restrict_to_up(void)
+ {
+-	if (!smp_found_config || ioapic_is_disabled)
++	if (!smp_found_config)
+ 		return true;
+ 	/*
+ 	 * XEN PV is special as it does not advertise the local APIC
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 1065ab995305cd..8f62e0666dea51 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -63,16 +63,6 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
+ 	return true;
+ }
+ 
+-/*
+- * Update the value of PKRU register that was already pushed onto the signal frame.
+- */
+-static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
+-{
+-	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
+-		return 0;
+-	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
+-}
+-
+ /*
+  * Signal frame handlers.
+  */
+@@ -168,14 +158,8 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
+ 
+ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
+ {
+-	int err = 0;
+-
+-	if (use_xsave()) {
+-		err = xsave_to_user_sigframe(buf);
+-		if (!err)
+-			err = update_pkru_in_sigframe(buf, pkru);
+-		return err;
+-	}
++	if (use_xsave())
++		return xsave_to_user_sigframe(buf, pkru);
+ 
+ 	if (use_fxsr())
+ 		return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index 0b86a5002c846d..aa16f1a1bbcf17 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -69,6 +69,28 @@ static inline u64 xfeatures_mask_independent(void)
+ 	return fpu_kernel_cfg.independent_features;
+ }
+ 
++/*
++ * Update the value of PKRU register that was already pushed onto the signal frame.
++ */
++static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
++{
++	u64 xstate_bv;
++	int err;
++
++	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
++		return 0;
++
++	/* Mark PKRU as in-use so that it is restored correctly. */
++	xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
++
++	err =  __put_user(xstate_bv, &buf->header.xfeatures);
++	if (err)
++		return err;
++
++	/* Update PKRU value in the userspace xsave buffer. */
++	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
++}
++
+ /* XSAVE/XRSTOR wrapper functions */
+ 
+ #ifdef CONFIG_X86_64
+@@ -256,7 +278,7 @@ static inline u64 xfeatures_need_sigframe_write(void)
+  * The caller has to zero buf::header before calling this because XSAVE*
+  * does not touch the reserved fields in the header.
+  */
+-static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
++static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
+ {
+ 	/*
+ 	 * Include the features which are not xsaved/rstored by the kernel
+@@ -281,6 +303,9 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
+ 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
+ 	clac();
+ 
++	if (!err)
++		err = update_pkru_in_sigframe(buf, mask, pkru);
++
+ 	return err;
+ }
+ 
+diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
+index e9e88c342f752e..540443d699e3c2 100644
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -13,6 +13,7 @@
+ #include <asm/pgtable_types.h>
+ #include <asm/nospec-branch.h>
+ #include <asm/unwind_hints.h>
++#include <asm/asm-offsets.h>
+ 
+ /*
+  * Must be relocatable PIC code callable as a C function, in particular
+@@ -242,6 +243,13 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
+ 	movq	CR0(%r8), %r8
+ 	movq	%rax, %cr3
+ 	movq	%r8, %cr0
++
++#ifdef CONFIG_KEXEC_JUMP
++	/* Saved in save_processor_state. */
++	movq    $saved_context, %rax
++	lgdt    saved_context_gdt_desc(%rax)
++#endif
++
+ 	movq	%rbp, %rax
+ 
+ 	popf
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 3e353ed1f76736..1b4438e24814b4 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4580,6 +4580,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
+ 
+ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+ {
++	kvm_pfn_t orig_pfn;
+ 	int r;
+ 
+ 	/* Dummy roots are used only for shadowing bad guest roots. */
+@@ -4601,6 +4602,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
+ 	if (r != RET_PF_CONTINUE)
+ 		return r;
+ 
++	orig_pfn = fault->pfn;
++
+ 	r = RET_PF_RETRY;
+ 	write_lock(&vcpu->kvm->mmu_lock);
+ 
+@@ -4615,7 +4618,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
+ 
+ out_unlock:
+ 	write_unlock(&vcpu->kvm->mmu_lock);
+-	kvm_release_pfn_clean(fault->pfn);
++	kvm_release_pfn_clean(orig_pfn);
+ 	return r;
+ }
+ 
+@@ -4675,6 +4678,7 @@ EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
+ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
+ 				  struct kvm_page_fault *fault)
+ {
++	kvm_pfn_t orig_pfn;
+ 	int r;
+ 
+ 	if (page_fault_handle_page_track(vcpu, fault))
+@@ -4692,6 +4696,8 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
+ 	if (r != RET_PF_CONTINUE)
+ 		return r;
+ 
++	orig_pfn = fault->pfn;
++
+ 	r = RET_PF_RETRY;
+ 	read_lock(&vcpu->kvm->mmu_lock);
+ 
+@@ -4702,7 +4708,7 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
+ 
+ out_unlock:
+ 	read_unlock(&vcpu->kvm->mmu_lock);
+-	kvm_release_pfn_clean(fault->pfn);
++	kvm_release_pfn_clean(orig_pfn);
+ 	return r;
+ }
+ #endif
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
+index ae7d39ff2d07f0..b08017683920f0 100644
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -778,6 +778,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+ {
+ 	struct guest_walker walker;
++	kvm_pfn_t orig_pfn;
+ 	int r;
+ 
+ 	WARN_ON_ONCE(fault->is_tdp);
+@@ -836,6 +837,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
+ 			walker.pte_access &= ~ACC_EXEC_MASK;
+ 	}
+ 
++	orig_pfn = fault->pfn;
++
+ 	r = RET_PF_RETRY;
+ 	write_lock(&vcpu->kvm->mmu_lock);
+ 
+@@ -849,7 +852,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
+ 
+ out_unlock:
+ 	write_unlock(&vcpu->kvm->mmu_lock);
+-	kvm_release_pfn_clean(fault->pfn);
++	kvm_release_pfn_clean(orig_pfn);
+ 	return r;
+ }
+ 
+diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
+index 437e96fb497734..5ab7bd2f1983c1 100644
+--- a/arch/x86/mm/ident_map.c
++++ b/arch/x86/mm/ident_map.c
+@@ -174,7 +174,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
+ 		if (result)
+ 			return result;
+ 
+-		set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
++		set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
+ 	}
+ 
+ 	return 0;
+@@ -218,14 +218,14 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
+ 		if (result)
+ 			return result;
+ 		if (pgtable_l5_enabled()) {
+-			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
++			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
+ 		} else {
+ 			/*
+ 			 * With p4d folded, pgd is equal to p4d.
+ 			 * The pgd entry has to point to the pud page table in this case.
+ 			 */
+ 			pud_t *pud = pud_offset(p4d, 0);
+-			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
++			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
+ 		}
+ 	}
+ 
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index 851ec8f1363a8b..5f0d579932c688 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -132,7 +132,7 @@ pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+ 	 * Top-level entries added to init_mm's usermode pgd after boot
+ 	 * will not be automatically propagated to other mms.
+ 	 */
+-	if (!pgdp_maps_userspace(pgdp))
++	if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
+ 		return pgd;
+ 
+ 	/*
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 55c4b07ec1f631..0c316bae1726ee 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -250,6 +250,125 @@ void __init pci_acpi_crs_quirks(void)
+ 		pr_info("Please notify linux-pci@vger.kernel.org so future kernels can do this automatically\n");
+ }
+ 
++/*
++ * Check if pdev is part of a PCIe switch that is directly below the
++ * specified bridge.
++ */
++static bool pcie_switch_directly_under(struct pci_dev *bridge,
++				       struct pci_dev *pdev)
++{
++	struct pci_dev *parent = pci_upstream_bridge(pdev);
++
++	/* If the device doesn't have a parent, it's not under anything */
++	if (!parent)
++		return false;
++
++	/*
++	 * If the device has a PCIe type, check if it is below the
++	 * corresponding PCIe switch components (if applicable). Then check
++	 * if its upstream port is directly beneath the specified bridge.
++	 */
++	switch (pci_pcie_type(pdev)) {
++	case PCI_EXP_TYPE_UPSTREAM:
++		return parent == bridge;
++
++	case PCI_EXP_TYPE_DOWNSTREAM:
++		if (pci_pcie_type(parent) != PCI_EXP_TYPE_UPSTREAM)
++			return false;
++		parent = pci_upstream_bridge(parent);
++		return parent == bridge;
++
++	case PCI_EXP_TYPE_ENDPOINT:
++		if (pci_pcie_type(parent) != PCI_EXP_TYPE_DOWNSTREAM)
++			return false;
++		parent = pci_upstream_bridge(parent);
++		if (!parent || pci_pcie_type(parent) != PCI_EXP_TYPE_UPSTREAM)
++			return false;
++		parent = pci_upstream_bridge(parent);
++		return parent == bridge;
++	}
++
++	return false;
++}
++
++static bool pcie_has_usb4_host_interface(struct pci_dev *pdev)
++{
++	struct fwnode_handle *fwnode;
++
++	/*
++	 * For USB4, the tunneled PCIe Root or Downstream Ports are marked
++	 * with the "usb4-host-interface" ACPI property, so we look for
++	 * that first. This should cover most cases.
++	 */
++	fwnode = fwnode_find_reference(dev_fwnode(&pdev->dev),
++				       "usb4-host-interface", 0);
++	if (!IS_ERR(fwnode)) {
++		fwnode_handle_put(fwnode);
++		return true;
++	}
++
++	/*
++	 * Any integrated Thunderbolt 3/4 PCIe Root Ports from Intel
++	 * before Alder Lake do not have the "usb4-host-interface"
++	 * property so we use their PCI IDs instead. All these are
++	 * tunneled. This list is not expected to grow.
++	 */
++	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
++		switch (pdev->device) {
++		/* Ice Lake Thunderbolt 3 PCIe Root Ports */
++		case 0x8a1d:
++		case 0x8a1f:
++		case 0x8a21:
++		case 0x8a23:
++		/* Tiger Lake-LP Thunderbolt 4 PCIe Root Ports */
++		case 0x9a23:
++		case 0x9a25:
++		case 0x9a27:
++		case 0x9a29:
++		/* Tiger Lake-H Thunderbolt 4 PCIe Root Ports */
++		case 0x9a2b:
++		case 0x9a2d:
++		case 0x9a2f:
++		case 0x9a31:
++			return true;
++		}
++	}
++
++	return false;
++}
++
++bool arch_pci_dev_is_removable(struct pci_dev *pdev)
++{
++	struct pci_dev *parent, *root;
++
++	/* pdev without a parent or Root Port is never tunneled */
++	parent = pci_upstream_bridge(pdev);
++	if (!parent)
++		return false;
++	root = pcie_find_root_port(pdev);
++	if (!root)
++		return false;
++
++	/* Internal PCIe devices are not tunneled */
++	if (!root->external_facing)
++		return false;
++
++	/* Anything directly behind a "usb4-host-interface" is tunneled */
++	if (pcie_has_usb4_host_interface(parent))
++		return true;
++
++	/*
++	 * Check if this is a discrete Thunderbolt/USB4 controller that is
++	 * directly behind the non-USB4 PCIe Root Port marked as
++	 * "ExternalFacingPort". Those are not behind a PCIe tunnel.
++	 */
++	if (pcie_switch_directly_under(root, pdev))
++		return false;
++
++	/* PCIe devices after the discrete chip are tunneled */
++	return true;
++}
++
+ #ifdef	CONFIG_PCI_MMCONFIG
+ static int check_segment(u16 seg, struct device *dev, char *estr)
+ {
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 95e517723db3e4..0b1184176ce77a 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -350,9 +350,15 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
+ 
+ static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector)
+ {
+-	if (!disk->conv_zones_bitmap)
+-		return false;
+-	return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
++	unsigned long *bitmap;
++	bool is_conv;
++
++	rcu_read_lock();
++	bitmap = rcu_dereference(disk->conv_zones_bitmap);
++	is_conv = bitmap && test_bit(disk_zone_no(disk, sector), bitmap);
++	rcu_read_unlock();
++
++	return is_conv;
+ }
+ 
+ static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
+@@ -1455,6 +1461,24 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
+ 	disk->zone_wplugs_hash_bits = 0;
+ }
+ 
++static unsigned int disk_set_conv_zones_bitmap(struct gendisk *disk,
++					       unsigned long *bitmap)
++{
++	unsigned int nr_conv_zones = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
++	if (bitmap)
++		nr_conv_zones = bitmap_weight(bitmap, disk->nr_zones);
++	bitmap = rcu_replace_pointer(disk->conv_zones_bitmap, bitmap,
++				     lockdep_is_held(&disk->zone_wplugs_lock));
++	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
++
++	kfree_rcu_mightsleep(bitmap);
++
++	return nr_conv_zones;
++}
++
+ void disk_free_zone_resources(struct gendisk *disk)
+ {
+ 	if (!disk->zone_wplugs_pool)
+@@ -1478,8 +1502,7 @@ void disk_free_zone_resources(struct gendisk *disk)
+ 	mempool_destroy(disk->zone_wplugs_pool);
+ 	disk->zone_wplugs_pool = NULL;
+ 
+-	bitmap_free(disk->conv_zones_bitmap);
+-	disk->conv_zones_bitmap = NULL;
++	disk_set_conv_zones_bitmap(disk, NULL);
+ 	disk->zone_capacity = 0;
+ 	disk->last_zone_capacity = 0;
+ 	disk->nr_zones = 0;
+@@ -1538,7 +1561,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
+ 				      struct blk_revalidate_zone_args *args)
+ {
+ 	struct request_queue *q = disk->queue;
+-	unsigned int nr_seq_zones, nr_conv_zones = 0;
++	unsigned int nr_seq_zones, nr_conv_zones;
+ 	unsigned int pool_size;
+ 	struct queue_limits lim;
+ 	int ret;
+@@ -1546,10 +1569,8 @@ static int disk_update_zone_resources(struct gendisk *disk,
+ 	disk->nr_zones = args->nr_zones;
+ 	disk->zone_capacity = args->zone_capacity;
+ 	disk->last_zone_capacity = args->last_zone_capacity;
+-	swap(disk->conv_zones_bitmap, args->conv_zones_bitmap);
+-	if (disk->conv_zones_bitmap)
+-		nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap,
+-					      disk->nr_zones);
++	nr_conv_zones =
++		disk_set_conv_zones_bitmap(disk, args->conv_zones_bitmap);
+ 	if (nr_conv_zones >= disk->nr_zones) {
+ 		pr_warn("%s: Invalid number of conventional zones %u / %u\n",
+ 			disk->disk_name, nr_conv_zones, disk->nr_zones);
+@@ -1829,8 +1850,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
+ 		blk_mq_unfreeze_queue(q);
+ 	}
+ 
+-	kfree(args.conv_zones_bitmap);
+-
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
+diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
+index d5a10959ec281c..80ef16ae6a40b4 100644
+--- a/crypto/ecdsa.c
++++ b/crypto/ecdsa.c
+@@ -36,29 +36,24 @@ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
+ 				  const void *value, size_t vlen, unsigned int ndigits)
+ {
+ 	size_t bufsize = ndigits * sizeof(u64);
+-	ssize_t diff = vlen - bufsize;
+ 	const char *d = value;
+ 
+-	if (!value || !vlen)
++	if (!value || !vlen || vlen > bufsize + 1)
+ 		return -EINVAL;
+ 
+-	/* diff = 0: 'value' has exacly the right size
+-	 * diff > 0: 'value' has too many bytes; one leading zero is allowed that
+-	 *           makes the value a positive integer; error on more
+-	 * diff < 0: 'value' is missing leading zeros
++	/*
++	 * vlen may be 1 byte larger than bufsize due to a leading zero byte
++	 * (necessary if the most significant bit of the integer is set).
+ 	 */
+-	if (diff > 0) {
++	if (vlen > bufsize) {
+ 		/* skip over leading zeros that make 'value' a positive int */
+ 		if (*d == 0) {
+ 			vlen -= 1;
+-			diff--;
+ 			d++;
+-		}
+-		if (diff)
++		} else {
+ 			return -EINVAL;
++		}
+ 	}
+-	if (-diff >= bufsize)
+-		return -EINVAL;
+ 
+ 	ecc_digits_from_bytes(d, vlen, dest, ndigits);
+ 
+diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
+index bf10156c334e71..f139c564eadf9f 100644
+--- a/drivers/accel/qaic/qaic_drv.c
++++ b/drivers/accel/qaic/qaic_drv.c
+@@ -34,6 +34,7 @@
+ 
+ MODULE_IMPORT_NS(DMA_BUF);
+ 
++#define PCI_DEV_AIC080			0xa080
+ #define PCI_DEV_AIC100			0xa100
+ #define QAIC_NAME			"qaic"
+ #define QAIC_DESC			"Qualcomm Cloud AI Accelerators"
+@@ -365,7 +366,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
+ 		return NULL;
+ 
+ 	qdev->dev_state = QAIC_OFFLINE;
+-	if (id->device == PCI_DEV_AIC100) {
++	if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) {
+ 		qdev->num_dbc = 16;
+ 		qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ 		if (!qdev->dbc)
+@@ -607,6 +608,7 @@ static struct mhi_driver qaic_mhi_driver = {
+ };
+ 
+ static const struct pci_device_id qaic_ids[] = {
++	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
+ 	{ }
+ };
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 015bd8e66c1cf8..d507d5e084354b 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -549,6 +549,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Apple MacBook Air 7,2 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir7,2"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_native,
+ 	 /* Apple MacBook Air 9,1 */
+@@ -565,6 +573,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,2"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Apple MacBook Pro 11,2 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro11,2"),
++		},
++	},
+ 	{
+ 	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
+ 	 .callback = video_detect_force_native,
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index 6af546b21574f9..cb45ef5240dab6 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -12,6 +12,7 @@
+ 
+ #include <linux/acpi.h>
+ #include <linux/dmi.h>
++#include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
+@@ -295,6 +296,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 	/*
+ 	 * 2. Devices which also have the skip i2c/serdev quirks and which
+ 	 *    need the x86-android-tablets module to properly work.
++	 *    Sorted alphabetically.
+ 	 */
+ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+ 	{
+@@ -308,6 +310,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ 	},
+ 	{
++		/* Acer Iconia One 8 A1-840 (non FHD version) */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
++			/* Above strings are too generic also match BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"),
++		},
++		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
++	},
++	{
++		/* Asus ME176C tablet */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
+@@ -318,23 +333,24 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ 	},
+ 	{
+-		/* Lenovo Yoga Book X90F/L */
++		/* Asus TF103C transformer 2-in-1 */
+ 		.matches = {
+-			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ 		},
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+-					ACPI_QUIRK_UART1_SKIP |
+ 					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ 					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ 	},
+ 	{
++		/* Lenovo Yoga Book X90F/L */
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ 		},
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_UART1_SKIP |
+ 					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ 					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ 	},
+@@ -391,6 +407,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ 					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ 	},
++	{
++		/* Vexia Edu Atla 10 tablet 9V version */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Above strings are too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
++		},
++		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_UART1_SKIP |
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
++	},
+ 	{
+ 		/* Whitelabel (sold as various brands) TM800A550L */
+ 		.matches = {
+@@ -411,6 +440,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
+ 	{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
+ 	{ "10EC5651", 0 }, /* RealTek ALC5651 audio codec */
+ 	{ "INT33F4", 0 },  /* X-Powers AXP288 PMIC */
++	{ "INT33F5", 0 },  /* TI Dollar Cove PMIC */
+ 	{ "INT33FD", 0 },  /* Intel Crystal Cove PMIC */
+ 	{ "INT34D3", 0 },  /* Intel Whiskey Cove PMIC */
+ 	{ "NPCE69A", 0 },  /* Asus Transformer keyboard dock */
+@@ -439,18 +469,35 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
+ 	struct acpi_device *adev = ACPI_COMPANION(controller_parent);
+ 	const struct dmi_system_id *dmi_id;
+ 	long quirks = 0;
+-	u64 uid;
+-	int ret;
++	u64 uid = 0;
+ 
+-	ret = acpi_dev_uid_to_integer(adev, &uid);
+-	if (ret)
++	dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
++	if (!dmi_id)
+ 		return 0;
+ 
+-	dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+-	if (dmi_id)
+-		quirks = (unsigned long)dmi_id->driver_data;
++	quirks = (unsigned long)dmi_id->driver_data;
++
++	/* uid is left at 0 on errors and 0 is not a valid UART UID */
++	acpi_dev_uid_to_integer(adev, &uid);
++
++	/* For PCI UARTs without an UID */
++	if (!uid && dev_is_pci(controller_parent)) {
++		struct pci_dev *pdev = to_pci_dev(controller_parent);
++
++		/*
++		 * Devfn values for PCI UARTs on Bay Trail SoCs, which are
++		 * the only devices where this fallback is necessary.
++		 */
++		if (pdev->devfn == PCI_DEVFN(0x1e, 3))
++			uid = 1;
++		else if (pdev->devfn == PCI_DEVFN(0x1e, 4))
++			uid = 2;
++	}
++
++	if (!uid)
++		return 0;
+ 
+-	if (!dev_is_platform(controller_parent)) {
++	if (!dev_is_platform(controller_parent) && !dev_is_pci(controller_parent)) {
+ 		/* PNP enumerated UARTs */
+ 		if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
+ 			*skip = true;
+@@ -505,7 +552,7 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
+ 	 * Set skip to true so that the tty core creates a serdev ctrl device.
+ 	 * The backlight driver will manually create the serdev client device.
+ 	 */
+-	if (acpi_dev_hid_match(adev, "DELL0501")) {
++	if (adev && acpi_dev_hid_match(adev, "DELL0501")) {
+ 		*skip = true;
+ 		/*
+ 		 * Create a platform dev for dell-uart-backlight to bind to.
+diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
+index e1870167642658..c99f2ab105e5b7 100644
+--- a/drivers/base/arch_numa.c
++++ b/drivers/base/arch_numa.c
+@@ -208,6 +208,10 @@ static int __init numa_register_nodes(void)
+ {
+ 	int nid;
+ 
++	/* Check the validity of the memblock/node mapping */
++	if (!memblock_validate_numa_coverage(0))
++		return -EINVAL;
++
+ 	/* Finally register nodes. */
+ 	for_each_node_mask(nid, numa_nodes_parsed) {
+ 		unsigned long start_pfn, end_pfn;
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 7a7609298e18bd..89410127089b93 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
+ {
+ 	struct cacheinfo *llc;
+ 
+-	if (!cache_leaves(cpu))
++	if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
+ 		return false;
+ 
+ 	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+@@ -463,11 +463,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
+ 	return -ENOENT;
+ }
+ 
+-static inline
+-int allocate_cache_info(int cpu)
++static inline int allocate_cache_info(int cpu)
+ {
+-	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+-					 sizeof(struct cacheinfo), GFP_ATOMIC);
++	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
+ 	if (!per_cpu_cacheinfo(cpu)) {
+ 		cache_leaves(cpu) = 0;
+ 		return -ENOMEM;
+@@ -539,7 +537,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
+ 	 */
+ 	ci_cacheinfo(cpu)->early_ci_levels = false;
+ 
+-	if (cache_leaves(cpu) <= early_leaves)
++	/*
++	 * Some architectures (e.g., x86) do not use early initialization.
++	 * Allocate memory now in such case.
++	 */
++	if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
+ 		return 0;
+ 
+ 	kfree(per_cpu_cacheinfo(cpu));
+diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
+index 83acccdc100897..bdb450436cbc53 100644
+--- a/drivers/base/regmap/internal.h
++++ b/drivers/base/regmap/internal.h
+@@ -59,6 +59,7 @@ struct regmap {
+ 			unsigned long raw_spinlock_flags;
+ 		};
+ 	};
++	struct lock_class_key *lock_key;
+ 	regmap_lock lock;
+ 	regmap_unlock unlock;
+ 	void *lock_arg; /* This is passed to lock/unlock functions */
+diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
+index 8d27d3653ea3e7..23da7b31d71534 100644
+--- a/drivers/base/regmap/regcache-maple.c
++++ b/drivers/base/regmap/regcache-maple.c
+@@ -355,6 +355,9 @@ static int regcache_maple_init(struct regmap *map)
+ 
+ 	mt_init(mt);
+ 
++	if (!mt_external_lock(mt) && map->lock_key)
++		lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
++
+ 	if (!map->num_reg_defaults)
+ 		return 0;
+ 
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 4ded93687c1f0a..e3e2afc2c83c6b 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
+ }
+ EXPORT_SYMBOL_GPL(regmap_attach_dev);
+ 
++static int dev_get_regmap_match(struct device *dev, void *res, void *data);
++
++static int regmap_detach_dev(struct device *dev, struct regmap *map)
++{
++	if (!dev)
++		return 0;
++
++	return devres_release(dev, dev_get_regmap_release,
++			      dev_get_regmap_match, (void *)map->name);
++}
++
+ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
+ 					const struct regmap_config *config)
+ {
+@@ -745,6 +756,7 @@ struct regmap *__regmap_init(struct device *dev,
+ 						   lock_key, lock_name);
+ 		}
+ 		map->lock_arg = map;
++		map->lock_key = lock_key;
+ 	}
+ 
+ 	/*
+@@ -1444,6 +1456,7 @@ void regmap_exit(struct regmap *map)
+ {
+ 	struct regmap_async *async;
+ 
++	regmap_detach_dev(map->dev, map);
+ 	regcache_exit(map);
+ 
+ 	regmap_debugfs_exit(map);
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index d6a1ba969266a4..d0432b1707ceb6 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -298,17 +298,30 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
+ 		/*
+ 		 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
+ 		 * See the comment in writeback_store.
++		 *
++		 * Also do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
++		 * post-processing (recompress, writeback) happens to the
++		 * ZRAM_SAME slot.
++		 *
++		 * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
+ 		 */
+ 		zram_slot_lock(zram, index);
+-		if (zram_allocated(zram, index) &&
+-				!zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
++		if (!zram_allocated(zram, index) ||
++		    zram_test_flag(zram, index, ZRAM_WB) ||
++		    zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
++		    zram_test_flag(zram, index, ZRAM_SAME)) {
++			zram_slot_unlock(zram, index);
++			continue;
++		}
++
+ #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
+-			is_idle = !cutoff || ktime_after(cutoff,
+-							 zram->table[index].ac_time);
++		is_idle = !cutoff ||
++			ktime_after(cutoff, zram->table[index].ac_time);
+ #endif
+-			if (is_idle)
+-				zram_set_flag(zram, index, ZRAM_IDLE);
+-		}
++		if (is_idle)
++			zram_set_flag(zram, index, ZRAM_IDLE);
++		else
++			zram_clear_flag(zram, index, ZRAM_IDLE);
+ 		zram_slot_unlock(zram, index);
+ 	}
+ }
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 4ccaddb46ddd81..11755cb1eb1635 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -524,6 +524,8 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 
+@@ -563,6 +565,16 @@ static const struct usb_device_id quirks_table[] = {
+ 	{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 
++	/* Additional MediaTek MT7920 Bluetooth devices */
++	{ USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3622), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++
+ 	/* Additional MediaTek MT7921 Bluetooth devices */
+ 	{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+@@ -630,12 +642,24 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Additional MediaTek MT7925 Bluetooth devices */
++	{ USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe124), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe151), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
+@@ -3897,6 +3921,8 @@ static int btusb_probe(struct usb_interface *intf,
+ 		set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
+ 		set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ 		set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
++		set_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &hdev->quirks);
++		set_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, &hdev->quirks);
+ 	}
+ 
+ 	if (!reset)
+diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
+index fdd8ea989ed24a..bc21b292144926 100644
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -508,6 +508,8 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
+ 	u32 rate;
+ 	int i;
+ 
++	clk_data->num = EN7523_NUM_CLOCKS;
++
+ 	for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
+ 		const struct en_clk_desc *desc = &en7523_base_clks[i];
+ 		u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+@@ -529,8 +531,6 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
+ 
+ 	hw = en7523_register_pcie_clk(dev, np_base);
+ 	clk_data->hws[EN7523_CLK_PCIE] = hw;
+-
+-	clk_data->num = EN7523_NUM_CLOCKS;
+ }
+ 
+ static int en7523_clk_hw_init(struct platform_device *pdev,
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 4444dafa4e3dfa..9ba675f229b144 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -959,10 +959,10 @@ config SM_DISPCC_8450
+ config SM_DISPCC_8550
+ 	tristate "SM8550 Display Clock Controller"
+ 	depends on ARM64 || COMPILE_TEST
+-	depends on SM_GCC_8550 || SM_GCC_8650
++	depends on SM_GCC_8550 || SM_GCC_8650 || SAR_GCC_2130P
+ 	help
+ 	  Support for the display clock controller on Qualcomm Technologies, Inc
+-	  SM8550 or SM8650 devices.
++	  SAR2130P, SM8550 or SM8650 devices.
+ 	  Say Y if you want to support display devices and functionality such as
+ 	  splash screen.
+ 
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index be9bee6ab65f6e..49687512184b92 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -267,6 +267,17 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ 		[PLL_OFF_OPMODE] = 0x30,
+ 		[PLL_OFF_STATUS] = 0x3c,
+ 	},
++	[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] =  {
++		[PLL_OFF_L_VAL] = 0x04,
++		[PLL_OFF_ALPHA_VAL] = 0x08,
++		[PLL_OFF_TEST_CTL] = 0x0c,
++		[PLL_OFF_TEST_CTL_U] = 0x10,
++		[PLL_OFF_USER_CTL] = 0x14,
++		[PLL_OFF_CONFIG_CTL] = 0x18,
++		[PLL_OFF_CONFIG_CTL_U] = 0x1c,
++		[PLL_OFF_STATUS] = 0x20,
++	},
++
+ };
+ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
+ 
+diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
+index 55eca04b23a1fc..c6d1b8429f951a 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.h
++++ b/drivers/clk/qcom/clk-alpha-pll.h
+@@ -32,6 +32,7 @@ enum {
+ 	CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
+ 	CLK_ALPHA_PLL_TYPE_STROMER,
+ 	CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
++	CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
+ 	CLK_ALPHA_PLL_TYPE_MAX,
+ };
+ 
+diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
+index 8e0f3372dc7a83..80f1f4fcd52a68 100644
+--- a/drivers/clk/qcom/clk-rcg.h
++++ b/drivers/clk/qcom/clk-rcg.h
+@@ -198,6 +198,7 @@ extern const struct clk_ops clk_byte2_ops;
+ extern const struct clk_ops clk_pixel_ops;
+ extern const struct clk_ops clk_gfx3d_ops;
+ extern const struct clk_ops clk_rcg2_shared_ops;
++extern const struct clk_ops clk_rcg2_shared_floor_ops;
+ extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
+ extern const struct clk_ops clk_dp_ops;
+ 
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index bf26c5448f0067..bf6406f5279a4c 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -1186,15 +1186,23 @@ clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
+ 	return clk_rcg2_clear_force_enable(hw);
+ }
+ 
+-static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+-				    unsigned long parent_rate)
++static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
++				      unsigned long parent_rate,
++				      enum freq_policy policy)
+ {
+ 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ 	const struct freq_tbl *f;
+ 
+-	f = qcom_find_freq(rcg->freq_tbl, rate);
+-	if (!f)
++	switch (policy) {
++	case FLOOR:
++		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
++		break;
++	case CEIL:
++		f = qcom_find_freq(rcg->freq_tbl, rate);
++		break;
++	default:
+ 		return -EINVAL;
++	}
+ 
+ 	/*
+ 	 * In case clock is disabled, update the M, N and D registers, cache
+@@ -1207,10 +1215,28 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	return clk_rcg2_shared_force_enable_clear(hw, f);
+ }
+ 
++static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
++				    unsigned long parent_rate)
++{
++	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
++}
++
+ static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
+ 		unsigned long rate, unsigned long parent_rate, u8 index)
+ {
+-	return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
++	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
++}
++
++static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate,
++					  unsigned long parent_rate)
++{
++	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
++}
++
++static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw,
++		unsigned long rate, unsigned long parent_rate, u8 index)
++{
++	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
+ }
+ 
+ static int clk_rcg2_shared_enable(struct clk_hw *hw)
+@@ -1348,6 +1374,18 @@ const struct clk_ops clk_rcg2_shared_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+ 
++const struct clk_ops clk_rcg2_shared_floor_ops = {
++	.enable = clk_rcg2_shared_enable,
++	.disable = clk_rcg2_shared_disable,
++	.get_parent = clk_rcg2_shared_get_parent,
++	.set_parent = clk_rcg2_shared_set_parent,
++	.recalc_rate = clk_rcg2_shared_recalc_rate,
++	.determine_rate = clk_rcg2_determine_floor_rate,
++	.set_rate = clk_rcg2_shared_set_floor_rate,
++	.set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent,
++};
++EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops);
++
+ static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
+ {
+ 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
+index 4acde937114af3..eefc322ce36798 100644
+--- a/drivers/clk/qcom/clk-rpmh.c
++++ b/drivers/clk/qcom/clk-rpmh.c
+@@ -389,6 +389,18 @@ DEFINE_CLK_RPMH_BCM(ipa, "IP0");
+ DEFINE_CLK_RPMH_BCM(pka, "PKA0");
+ DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0");
+ 
++static struct clk_hw *sar2130p_rpmh_clocks[] = {
++	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div1.hw,
++	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div1_ao.hw,
++	[RPMH_RF_CLK1]		= &clk_rpmh_rf_clk1_a.hw,
++	[RPMH_RF_CLK1_A]	= &clk_rpmh_rf_clk1_a_ao.hw,
++};
++
++static const struct clk_rpmh_desc clk_rpmh_sar2130p = {
++	.clks = sar2130p_rpmh_clocks,
++	.num_clks = ARRAY_SIZE(sar2130p_rpmh_clocks),
++};
++
+ static struct clk_hw *sdm845_rpmh_clocks[] = {
+ 	[RPMH_CXO_CLK]		= &clk_rpmh_bi_tcxo_div2.hw,
+ 	[RPMH_CXO_CLK_A]	= &clk_rpmh_bi_tcxo_div2_ao.hw,
+@@ -880,6 +892,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
+ static const struct of_device_id clk_rpmh_match_table[] = {
+ 	{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
+ 	{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
++	{ .compatible = "qcom,sar2130p-rpmh-clk", .data = &clk_rpmh_sar2130p},
+ 	{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
+ 	{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
+ 	{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
+diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
+index 7f9021ca0ecb0e..e41d4104d77021 100644
+--- a/drivers/clk/qcom/dispcc-sm8550.c
++++ b/drivers/clk/qcom/dispcc-sm8550.c
+@@ -75,7 +75,7 @@ static struct pll_vco lucid_ole_vco[] = {
+ 	{ 249600000, 2000000000, 0 },
+ };
+ 
+-static const struct alpha_pll_config disp_cc_pll0_config = {
++static struct alpha_pll_config disp_cc_pll0_config = {
+ 	.l = 0xd,
+ 	.alpha = 0x6492,
+ 	.config_ctl_val = 0x20485699,
+@@ -106,7 +106,7 @@ static struct clk_alpha_pll disp_cc_pll0 = {
+ 	},
+ };
+ 
+-static const struct alpha_pll_config disp_cc_pll1_config = {
++static struct alpha_pll_config disp_cc_pll1_config = {
+ 	.l = 0x1f,
+ 	.alpha = 0x4000,
+ 	.config_ctl_val = 0x20485699,
+@@ -594,6 +594,13 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ 	{ }
+ };
+ 
++static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sar2130p[] = {
++	F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
++	F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
++	F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
++	{ }
++};
++
+ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = {
+ 	F(19200000, P_BI_TCXO, 1, 0, 0),
+ 	F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+@@ -1750,6 +1757,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = {
+ };
+ 
+ static const struct of_device_id disp_cc_sm8550_match_table[] = {
++	{ .compatible = "qcom,sar2130p-dispcc" },
+ 	{ .compatible = "qcom,sm8550-dispcc" },
+ 	{ .compatible = "qcom,sm8650-dispcc" },
+ 	{ }
+@@ -1780,6 +1788,12 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
+ 		disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650;
+ 		disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] =
+ 			&disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw;
++	} else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sar2130p-dispcc")) {
++		disp_cc_pll0_config.l = 0x1f;
++		disp_cc_pll0_config.alpha = 0x4000;
++		disp_cc_pll0_config.user_ctl_val = 0x1;
++		disp_cc_pll1_config.user_ctl_val = 0x1;
++		disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sar2130p;
+ 	}
+ 
+ 	clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+diff --git a/drivers/clk/qcom/tcsrcc-sm8550.c b/drivers/clk/qcom/tcsrcc-sm8550.c
+index e5e8f2e82b949d..41d73f92a000ab 100644
+--- a/drivers/clk/qcom/tcsrcc-sm8550.c
++++ b/drivers/clk/qcom/tcsrcc-sm8550.c
+@@ -129,6 +129,13 @@ static struct clk_branch tcsr_usb3_clkref_en = {
+ 	},
+ };
+ 
++static struct clk_regmap *tcsr_cc_sar2130p_clocks[] = {
++	[TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
++	[TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
++	[TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
++	[TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
++};
++
+ static struct clk_regmap *tcsr_cc_sm8550_clocks[] = {
+ 	[TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ 	[TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+@@ -146,6 +153,12 @@ static const struct regmap_config tcsr_cc_sm8550_regmap_config = {
+ 	.fast_io = true,
+ };
+ 
++static const struct qcom_cc_desc tcsr_cc_sar2130p_desc = {
++	.config = &tcsr_cc_sm8550_regmap_config,
++	.clks = tcsr_cc_sar2130p_clocks,
++	.num_clks = ARRAY_SIZE(tcsr_cc_sar2130p_clocks),
++};
++
+ static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
+ 	.config = &tcsr_cc_sm8550_regmap_config,
+ 	.clks = tcsr_cc_sm8550_clocks,
+@@ -153,7 +166,8 @@ static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
+ };
+ 
+ static const struct of_device_id tcsr_cc_sm8550_match_table[] = {
+-	{ .compatible = "qcom,sm8550-tcsr" },
++	{ .compatible = "qcom,sar2130p-tcsr", .data = &tcsr_cc_sar2130p_desc },
++	{ .compatible = "qcom,sm8550-tcsr", .data = &tcsr_cc_sm8550_desc },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, tcsr_cc_sm8550_match_table);
+@@ -162,7 +176,7 @@ static int tcsr_cc_sm8550_probe(struct platform_device *pdev)
+ {
+ 	struct regmap *regmap;
+ 
+-	regmap = qcom_cc_map(pdev, &tcsr_cc_sm8550_desc);
++	regmap = qcom_cc_map(pdev, of_device_get_match_data(&pdev->dev));
+ 	if (IS_ERR(regmap))
+ 		return PTR_ERR(regmap);
+ 
+diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
+index 8a08ffde31e758..6657d4b30af9dc 100644
+--- a/drivers/dma-buf/dma-fence-array.c
++++ b/drivers/dma-buf/dma-fence-array.c
+@@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
+ static bool dma_fence_array_signaled(struct dma_fence *fence)
+ {
+ 	struct dma_fence_array *array = to_dma_fence_array(fence);
++	int num_pending;
++	unsigned int i;
+ 
+-	if (atomic_read(&array->num_pending) > 0)
++	/*
++	 * We need to read num_pending before checking the enable_signal bit
++	 * to avoid racing with the enable_signaling() implementation, which
++	 * might decrement the counter, and cause a partial check.
++	 * atomic_read_acquire() pairs with atomic_dec_and_test() in
++	 * dma_fence_array_enable_signaling()
++	 *
++	 * The !--num_pending check is here to account for the any_signaled case
++	 * if we race with enable_signaling(), that means the !num_pending check
++	 * in the is_signalling_enabled branch might be outdated (num_pending
++	 * might have been decremented), but that's fine. The user will get the
++	 * right value when testing again later.
++	 */
++	num_pending = atomic_read_acquire(&array->num_pending);
++	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
++		if (num_pending <= 0)
++			goto signal;
+ 		return false;
++	}
++
++	for (i = 0; i < array->num_fences; ++i) {
++		if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
++			goto signal;
++	}
++	return false;
+ 
++signal:
+ 	dma_fence_array_clear_pending_error(array);
+ 	return true;
+ }
+diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
+index 628af51c81af3d..6345062731f153 100644
+--- a/drivers/dma-buf/dma-fence-unwrap.c
++++ b/drivers/dma-buf/dma-fence-unwrap.c
+@@ -12,6 +12,7 @@
+ #include <linux/dma-fence-chain.h>
+ #include <linux/dma-fence-unwrap.h>
+ #include <linux/slab.h>
++#include <linux/sort.h>
+ 
+ /* Internal helper to start new array iteration, don't use directly */
+ static struct dma_fence *
+@@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
+ }
+ EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
+ 
++
++static int fence_cmp(const void *_a, const void *_b)
++{
++	struct dma_fence *a = *(struct dma_fence **)_a;
++	struct dma_fence *b = *(struct dma_fence **)_b;
++
++	if (a->context < b->context)
++		return -1;
++	else if (a->context > b->context)
++		return 1;
++
++	if (dma_fence_is_later(b, a))
++		return 1;
++	else if (dma_fence_is_later(a, b))
++		return -1;
++
++	return 0;
++}
++
+ /* Implementation for the dma_fence_merge() marco, don't use directly */
+ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ 					   struct dma_fence **fences,
+@@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ 	struct dma_fence_array *result;
+ 	struct dma_fence *tmp, **array;
+ 	ktime_t timestamp;
+-	unsigned int i;
+-	size_t count;
++	int i, j, count;
+ 
+ 	count = 0;
+ 	timestamp = ns_to_ktime(0);
+@@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ 	if (!array)
+ 		return NULL;
+ 
+-	/*
+-	 * This trashes the input fence array and uses it as position for the
+-	 * following merge loop. This works because the dma_fence_merge()
+-	 * wrapper macro is creating this temporary array on the stack together
+-	 * with the iterators.
+-	 */
+-	for (i = 0; i < num_fences; ++i)
+-		fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
+-
+ 	count = 0;
+-	do {
+-		unsigned int sel;
+-
+-restart:
+-		tmp = NULL;
+-		for (i = 0; i < num_fences; ++i) {
+-			struct dma_fence *next;
+-
+-			while (fences[i] && dma_fence_is_signaled(fences[i]))
+-				fences[i] = dma_fence_unwrap_next(&iter[i]);
+-
+-			next = fences[i];
+-			if (!next)
+-				continue;
+-
+-			/*
+-			 * We can't guarantee that inpute fences are ordered by
+-			 * context, but it is still quite likely when this
+-			 * function is used multiple times. So attempt to order
+-			 * the fences by context as we pass over them and merge
+-			 * fences with the same context.
+-			 */
+-			if (!tmp || tmp->context > next->context) {
+-				tmp = next;
+-				sel = i;
+-
+-			} else if (tmp->context < next->context) {
+-				continue;
+-
+-			} else if (dma_fence_is_later(tmp, next)) {
+-				fences[i] = dma_fence_unwrap_next(&iter[i]);
+-				goto restart;
++	for (i = 0; i < num_fences; ++i) {
++		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
++			if (!dma_fence_is_signaled(tmp)) {
++				array[count++] = dma_fence_get(tmp);
+ 			} else {
+-				fences[sel] = dma_fence_unwrap_next(&iter[sel]);
+-				goto restart;
++				ktime_t t = dma_fence_timestamp(tmp);
++
++				if (ktime_after(t, timestamp))
++					timestamp = t;
+ 			}
+ 		}
++	}
+ 
+-		if (tmp) {
+-			array[count++] = dma_fence_get(tmp);
+-			fences[sel] = dma_fence_unwrap_next(&iter[sel]);
+-		}
+-	} while (tmp);
++	if (count == 0 || count == 1)
++		goto return_fastpath;
+ 
+-	if (count == 0) {
+-		tmp = dma_fence_allocate_private_stub(ktime_get());
+-		goto return_tmp;
+-	}
++	sort(array, count, sizeof(*array), fence_cmp, NULL);
+ 
+-	if (count == 1) {
+-		tmp = array[0];
+-		goto return_tmp;
++	/*
++	 * Only keep the most recent fence for each context.
++	 */
++	j = 0;
++	for (i = 1; i < count; i++) {
++		if (array[i]->context == array[j]->context)
++			dma_fence_put(array[i]);
++		else
++			array[++j] = array[i];
+ 	}
+-
+-	result = dma_fence_array_create(count, array,
+-					dma_fence_context_alloc(1),
+-					1, false);
+-	if (!result) {
+-		tmp = NULL;
+-		goto return_tmp;
++	count = ++j;
++
++	if (count > 1) {
++		result = dma_fence_array_create(count, array,
++						dma_fence_context_alloc(1),
++						1, false);
++		if (!result) {
++			for (i = 0; i < count; i++)
++				dma_fence_put(array[i]);
++			tmp = NULL;
++			goto return_tmp;
++		}
++		return &result->base;
+ 	}
+-	return &result->base;
++
++return_fastpath:
++	if (count == 0)
++		tmp = dma_fence_allocate_private_stub(timestamp);
++	else
++		tmp = array[0];
+ 
+ return_tmp:
+ 	kfree(array);
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 2e4260ba5f793c..14afd68664a911 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -1742,9 +1742,11 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+  + any potential issues with this, only allow validated machines for now.
+  */
+ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
++	{ .compatible = "dell,xps13-9345" },
+ 	{ .compatible = "lenovo,flex-5g" },
+ 	{ .compatible = "lenovo,thinkpad-t14s" },
+ 	{ .compatible = "lenovo,thinkpad-x13s", },
++	{ .compatible = "lenovo,yoga-slim7x" },
+ 	{ .compatible = "microsoft,romulus13", },
+ 	{ .compatible = "microsoft,romulus15", },
+ 	{ .compatible = "qcom,sc8180x-primus" },
+diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
+index 017c7170eb57c4..620793740c6681 100644
+--- a/drivers/gpio/gpio-grgpio.c
++++ b/drivers/gpio/gpio-grgpio.c
+@@ -328,6 +328,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
+ static int grgpio_probe(struct platform_device *ofdev)
+ {
+ 	struct device_node *np = ofdev->dev.of_node;
++	struct device *dev = &ofdev->dev;
+ 	void  __iomem *regs;
+ 	struct gpio_chip *gc;
+ 	struct grgpio_priv *priv;
+@@ -337,7 +338,7 @@ static int grgpio_probe(struct platform_device *ofdev)
+ 	int size;
+ 	int i;
+ 
+-	priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
++	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+@@ -346,28 +347,31 @@ static int grgpio_probe(struct platform_device *ofdev)
+ 		return PTR_ERR(regs);
+ 
+ 	gc = &priv->gc;
+-	err = bgpio_init(gc, &ofdev->dev, 4, regs + GRGPIO_DATA,
++	err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA,
+ 			 regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
+ 			 BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ 	if (err) {
+-		dev_err(&ofdev->dev, "bgpio_init() failed\n");
++		dev_err(dev, "bgpio_init() failed\n");
+ 		return err;
+ 	}
+ 
+ 	priv->regs = regs;
+ 	priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
+-	priv->dev = &ofdev->dev;
++	priv->dev = dev;
+ 
+ 	gc->owner = THIS_MODULE;
+ 	gc->to_irq = grgpio_to_irq;
+-	gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
++	gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
++	if (!gc->label)
++		return -ENOMEM;
++
+ 	gc->base = -1;
+ 
+ 	err = of_property_read_u32(np, "nbits", &prop);
+ 	if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
+ 		gc->ngpio = GRGPIO_MAX_NGPIO;
+-		dev_dbg(&ofdev->dev,
+-			"No or invalid nbits property: assume %d\n", gc->ngpio);
++		dev_dbg(dev, "No or invalid nbits property: assume %d\n",
++			gc->ngpio);
+ 	} else {
+ 		gc->ngpio = prop;
+ 	}
+@@ -379,7 +383,7 @@ static int grgpio_probe(struct platform_device *ofdev)
+ 	irqmap = (s32 *)of_get_property(np, "irqmap", &size);
+ 	if (irqmap) {
+ 		if (size < gc->ngpio) {
+-			dev_err(&ofdev->dev,
++			dev_err(dev,
+ 				"irqmap shorter than ngpio (%d < %d)\n",
+ 				size, gc->ngpio);
+ 			return -EINVAL;
+@@ -389,7 +393,7 @@ static int grgpio_probe(struct platform_device *ofdev)
+ 						     &grgpio_irq_domain_ops,
+ 						     priv);
+ 		if (!priv->domain) {
+-			dev_err(&ofdev->dev, "Could not add irq domain\n");
++			dev_err(dev, "Could not add irq domain\n");
+ 			return -EINVAL;
+ 		}
+ 
+@@ -419,13 +423,13 @@ static int grgpio_probe(struct platform_device *ofdev)
+ 
+ 	err = gpiochip_add_data(gc, priv);
+ 	if (err) {
+-		dev_err(&ofdev->dev, "Could not add gpiochip\n");
++		dev_err(dev, "Could not add gpiochip\n");
+ 		if (priv->domain)
+ 			irq_domain_remove(priv->domain);
+ 		return err;
+ 	}
+ 
+-	dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
++	dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
+ 		 priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
+ 
+ 	return 0;
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 2b02655abb56ea..44372f8647d51a 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -14,6 +14,7 @@
+ #include <linux/idr.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
++#include <linux/irqdesc.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+ #include <linux/lockdep.h>
+@@ -713,6 +714,45 @@ bool gpiochip_line_is_valid(const struct gpio_chip *gc,
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
+ 
++static void gpiod_free_irqs(struct gpio_desc *desc)
++{
++	int irq = gpiod_to_irq(desc);
++	struct irq_desc *irqd = irq_to_desc(irq);
++	void *cookie;
++
++	for (;;) {
++		/*
++		 * Make sure the action doesn't go away while we're
++		 * dereferencing it. Retrieve and store the cookie value.
++		 * If the irq is freed after we release the lock, that's
++		 * alright - the underlying maple tree lookup will return NULL
++		 * and nothing will happen in free_irq().
++		 */
++		scoped_guard(mutex, &irqd->request_mutex) {
++			if (!irq_desc_has_action(irqd))
++				return;
++
++			cookie = irqd->action->dev_id;
++		}
++
++		free_irq(irq, cookie);
++	}
++}
++
++/*
++ * The chip is going away but there may be users who had requested interrupts
++ * on its GPIO lines who have no idea about its removal and have no way of
++ * being notified about it. We need to free any interrupts still in use here or
++ * we'll leak memory and resources (like procfs files).
++ */
++static void gpiochip_free_remaining_irqs(struct gpio_chip *gc)
++{
++	struct gpio_desc *desc;
++
++	for_each_gpio_desc_with_flag(gc, desc, FLAG_USED_AS_IRQ)
++		gpiod_free_irqs(desc);
++}
++
+ static void gpiodev_release(struct device *dev)
+ {
+ 	struct gpio_device *gdev = to_gpio_device(dev);
+@@ -1125,6 +1165,7 @@ void gpiochip_remove(struct gpio_chip *gc)
+ 	/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
+ 	gpiochip_sysfs_unregister(gdev);
+ 	gpiochip_free_hogs(gc);
++	gpiochip_free_remaining_irqs(gc);
+ 
+ 	scoped_guard(mutex, &gpio_devices_lock)
+ 		list_del_rcu(&gdev->list);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 7dd55ed57c1d97..b8d4e07d2043ed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -800,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+ 		return -EIO;
+ 	}
+ 
++	kfree(info);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 1f08cb88d51be5..51904906545e59 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3666,7 +3666,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
+  *
+  * @adev: amdgpu_device pointer
+  *
+- * First resume function for hardware IPs.  The list of all the hardware
++ * Second resume function for hardware IPs.  The list of all the hardware
+  * IPs that make up the asic is walked and the resume callbacks are run for
+  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
+  * functional state after a suspend and updates the software state as
+@@ -3684,6 +3684,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
+ 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
++		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
+ 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
+ 			continue;
+ 		r = adev->ip_blocks[i].version->funcs->resume(adev);
+@@ -3698,6 +3699,36 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
+ 	return 0;
+ }
+ 
++/**
++ * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Third resume function for hardware IPs.  The list of all the hardware
++ * IPs that make up the asic is walked and the resume callbacks are run for
++ * all DCE.  resume puts the hardware into a functional state after a suspend
++ * and updates the software state as necessary.  This function is also used
++ * for restoring the GPU after a GPU reset.
++ *
++ * Returns 0 on success, negative error code on failure.
++ */
++static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
++{
++	int i, r;
++
++	for (i = 0; i < adev->num_ip_blocks; i++) {
++		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
++			continue;
++		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
++			r = adev->ip_blocks[i].version->funcs->resume(adev);
++			if (r)
++				return r;
++		}
++	}
++
++	return 0;
++}
++
+ /**
+  * amdgpu_device_ip_resume - run resume for hardware IPs
+  *
+@@ -3727,6 +3758,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
+ 	if (adev->mman.buffer_funcs_ring->sched.ready)
+ 		amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ 
++	if (r)
++		return r;
++
++	amdgpu_fence_driver_hw_init(adev);
++
++	r = amdgpu_device_ip_resume_phase3(adev);
++
+ 	return r;
+ }
+ 
+@@ -4809,7 +4847,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
+ 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
+ 		goto exit;
+ 	}
+-	amdgpu_fence_driver_hw_init(adev);
+ 
+ 	if (!adev->in_s0ix) {
+ 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+@@ -5431,6 +5468,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ 				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
+ 					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
+ 
++				r = amdgpu_device_ip_resume_phase3(tmp_adev);
++				if (r)
++					goto out;
++
+ 				if (vram_lost)
+ 					amdgpu_device_fill_reset_magic(tmp_adev);
+ 
+@@ -6344,6 +6385,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
+ 	struct amdgpu_device *adev = drm_to_adev(dev);
+ 	int r;
+ 
++	if (amdgpu_sriov_vf(adev))
++		return false;
++
+ 	r = pci_save_state(pdev);
+ 	if (!r) {
+ 		kfree(adev->pci_state);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 74adb983ab03e0..9f922ec50ea2dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -812,7 +812,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
+ 	/* Map SG to device */
+ 	r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
+ 	if (r)
+-		goto release_sg;
++		goto release_sg_table;
+ 
+ 	/* convert SG to linear array of pages and dma addresses */
+ 	drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+@@ -820,6 +820,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
+ 
+ 	return 0;
+ 
++release_sg_table:
++	sg_free_table(ttm->sg);
+ release_sg:
+ 	kfree(ttm->sg);
+ 	ttm->sg = NULL;
+@@ -1849,6 +1851,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ 
+ 	mutex_init(&adev->mman.gtt_window_lock);
+ 
++	dma_set_max_seg_size(adev->dev, UINT_MAX);
+ 	/* No others user of address space so set it to 0 */
+ 	r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
+ 			       adev_to_drm(adev)->anon_inode->i_mapping,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 785a343a95f0ff..e7cd51c95141e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2223,6 +2223,18 @@ static int gfx_v9_0_sw_init(void *handle)
+ 	}
+ 
+ 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
++	case IP_VERSION(9, 4, 2):
++		adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
++		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
++		if (adev->gfx.mec_fw_version >= 88) {
++			adev->gfx.enable_cleaner_shader = true;
++			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
++			if (r) {
++				adev->gfx.enable_cleaner_shader = false;
++				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
++			}
++		}
++		break;
+ 	default:
+ 		adev->gfx.enable_cleaner_shader = false;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
+index 36c0292b511067..0b6bd09b752993 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: MIT */
+ /*
+- * Copyright 2018 Advanced Micro Devices, Inc.
++ * Copyright 2024 Advanced Micro Devices, Inc.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the "Software"),
+@@ -24,3 +24,45 @@
+ static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = {
+ 	/* Add the cleaner shader code here */
+ };
++
++/* Define the cleaner shader gfx_9_4_2 */
++static const u32 gfx_9_4_2_cleaner_shader_hex[] = {
++	0xbf068100, 0xbf84003b,
++	0xbf8a0000, 0xb07c0000,
++	0xbe8200ff, 0x00000078,
++	0xbf110802, 0x7e000280,
++	0x7e020280, 0x7e040280,
++	0x7e060280, 0x7e080280,
++	0x7e0a0280, 0x7e0c0280,
++	0x7e0e0280, 0x80828802,
++	0xbe803202, 0xbf84fff5,
++	0xbf9c0000, 0xbe8200ff,
++	0x80000000, 0x86020102,
++	0xbf840011, 0xbefe00c1,
++	0xbeff00c1, 0xd28c0001,
++	0x0001007f, 0xd28d0001,
++	0x0002027e, 0x10020288,
++	0xbe8200bf, 0xbefc00c1,
++	0xd89c2000, 0x00020201,
++	0xd89c6040, 0x00040401,
++	0x320202ff, 0x00000400,
++	0x80828102, 0xbf84fff8,
++	0xbefc00ff, 0x0000005c,
++	0xbf800000, 0xbe802c80,
++	0xbe812c80, 0xbe822c80,
++	0xbe832c80, 0x80fc847c,
++	0xbf84fffa, 0xbee60080,
++	0xbee70080, 0xbeea0180,
++	0xbeec0180, 0xbeee0180,
++	0xbef00180, 0xbef20180,
++	0xbef40180, 0xbef60180,
++	0xbef80180, 0xbefa0180,
++	0xbf810000, 0xbf8d0001,
++	0xbefc00ff, 0x0000005c,
++	0xbf800000, 0xbe802c80,
++	0xbe812c80, 0xbe822c80,
++	0xbe832c80, 0x80fc847c,
++	0xbf84fffa, 0xbee60080,
++	0xbee70080, 0xbeea01ff,
++	0x000000ee, 0xbf810000,
++};
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
+new file mode 100644
+index 00000000000000..35b8cf9070bd98
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
+@@ -0,0 +1,153 @@
++/* SPDX-License-Identifier: MIT */
++/*
++ * Copyright 2024 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++// This shader is to clean LDS, SGPRs and VGPRs. It is  first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
++//To turn this shader program on for complitaion change this to main and lower shader main to main_1
++
++// MI200 : Clear SGPRs, VGPRs and LDS
++//   Uses two kernels launched separately:
++//   1. Clean VGPRs, LDS, and lower SGPRs
++//        Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU
++//        Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD
++//        Waves in the workgroup share the 64KB of LDS
++//        Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383
++//        Each wave clears 128 VGPRs, so all 512 in the SIMD
++//        The first wave of the workgroup clears its 64KB of LDS
++//        The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
++//          before any wave in the workgroup could end.  Without this, it is possible not all SGPRs get cleared.
++//    2. Clean remaining SGPRs
++//        Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU
++//        Waves are allocating 96 SGPRs
++//          CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223.
++//          As such, these 6 waves per SIMD are allocated physical SGPRs 224-799
++//        Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER
++//          Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command
++//        The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799
++
++shader main
++  asic(MI200)
++  type(CS)
++  wave_size(64)
++// Note: original source code from SQ team
++
++//   (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr  = 2176 clks)
++
++  s_cmp_eq_u32 s0, 1                                // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3
++  s_cbranch_scc0  label_0023                        // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1)
++  S_BARRIER
++
++  s_movk_i32    m0, 0x0000
++  s_mov_b32     s2, 0x00000078  // Loop 128/8=16 times  (loop unrolled for performance)
++  //
++  // CLEAR VGPRs
++  //
++  s_set_gpr_idx_on  s2, 0x8    // enable Dest VGPR indexing
++label_0005:
++  v_mov_b32     v0, 0
++  v_mov_b32     v1, 0
++  v_mov_b32     v2, 0
++  v_mov_b32     v3, 0
++  v_mov_b32     v4, 0
++  v_mov_b32     v5, 0
++  v_mov_b32     v6, 0
++  v_mov_b32     v7, 0
++  s_sub_u32     s2, s2, 8
++  s_set_gpr_idx_idx  s2
++  s_cbranch_scc0  label_0005
++  s_set_gpr_idx_off
++
++  //
++  //
++
++  s_mov_b32     s2, 0x80000000                      // Bit31 is first_wave
++  s_and_b32     s2, s2, s1                          // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
++  s_cbranch_scc0  label_clean_sgpr_1                // Clean LDS if its first wave of ThreadGroup/WorkGroup
++  // CLEAR LDS
++  //
++  s_mov_b32 exec_lo, 0xffffffff
++  s_mov_b32 exec_hi, 0xffffffff
++  v_mbcnt_lo_u32_b32  v1, exec_hi, 0          // Set V1 to thread-ID (0..63)
++  v_mbcnt_hi_u32_b32  v1, exec_lo, v1         // Set V1 to thread-ID (0..63)
++  v_mul_u32_u24  v1, 0x00000008, v1           // * 8, so each thread is a double-dword address (8byte)
++  s_mov_b32     s2, 0x00000003f               // 64 loop iterations
++  s_mov_b32     m0, 0xffffffff
++  // Clear all of LDS space
++  // Each FirstWave of WorkGroup clears 64kbyte block
++
++label_001F:
++  ds_write2_b64  v1, v[2:3], v[2:3] offset1:32
++  ds_write2_b64  v1, v[4:5], v[4:5] offset0:64 offset1:96
++  v_add_co_u32     v1, vcc, 0x00000400, v1
++  s_sub_u32     s2, s2, 1
++  s_cbranch_scc0  label_001F
++  //
++  // CLEAR SGPRs
++  //
++label_clean_sgpr_1:
++  s_mov_b32     m0, 0x0000005c   // Loop 96/4=24 times  (loop unrolled for performance)
++  s_nop 0
++label_sgpr_loop:
++  s_movreld_b32     s0, 0
++  s_movreld_b32     s1, 0
++  s_movreld_b32     s2, 0
++  s_movreld_b32     s3, 0
++  s_sub_u32         m0, m0, 4
++  s_cbranch_scc0  label_sgpr_loop
++
++  //clear vcc, flat scratch
++  s_mov_b32 flat_scratch_lo, 0   //clear  flat scratch lo SGPR
++  s_mov_b32 flat_scratch_hi, 0   //clear  flat scratch hi SGPR
++  s_mov_b64 vcc, 0               //clear vcc
++  s_mov_b64 ttmp0, 0             //Clear ttmp0 and ttmp1
++  s_mov_b64 ttmp2, 0             //Clear ttmp2 and ttmp3
++  s_mov_b64 ttmp4, 0             //Clear ttmp4 and ttmp5
++  s_mov_b64 ttmp6, 0             //Clear ttmp6 and ttmp7
++  s_mov_b64 ttmp8, 0             //Clear ttmp8 and ttmp9
++  s_mov_b64 ttmp10, 0            //Clear ttmp10 and ttmp11
++  s_mov_b64 ttmp12, 0            //Clear ttmp12 and ttmp13
++  s_mov_b64 ttmp14, 0            //Clear ttmp14 and ttmp15
++s_endpgm
++
++label_0023:
++
++  s_sethalt 1
++
++  s_mov_b32     m0, 0x0000005c   // Loop 96/4=24 times  (loop unrolled for performance)
++  s_nop 0
++label_sgpr_loop1:
++
++  s_movreld_b32     s0, 0
++  s_movreld_b32     s1, 0
++  s_movreld_b32     s2, 0
++  s_movreld_b32     s3, 0
++  s_sub_u32         m0, m0, 4
++  s_cbranch_scc0  label_sgpr_loop1
++
++  //clear vcc, flat scratch
++  s_mov_b32 flat_scratch_lo, 0   //clear  flat scratch lo SGPR
++  s_mov_b32 flat_scratch_hi, 0   //clear  flat scratch hi SGPR
++  s_mov_b64 vcc, 0xee            //clear vcc
++
++s_endpgm
++end
++
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+index e019249883fb2f..194026e9be3331 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+@@ -40,10 +40,12 @@
+ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
+ 				struct amdgpu_ring *ring)
+ {
+-	if (!ring || !ring->funcs->emit_wreg)
++	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+-	else
++		RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++	} else {
+ 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
++	}
+ }
+ 
+ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
+@@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
+ 	    amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5))
+ 		return;
+ 
+-	if (!ring || !ring->funcs->emit_wreg)
++	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+-	else
++		RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
++	} else {
+ 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ 			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
++	}
+ }
+ 
+ static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+index ed7facacf2fe30..d3962d46908811 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+@@ -31,10 +31,12 @@
+ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
+ 				struct amdgpu_ring *ring)
+ {
+-	if (!ring || !ring->funcs->emit_wreg)
++	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+-	else
++		RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++	} else {
+ 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
++	}
+ }
+ 
+ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
+@@ -42,6 +44,7 @@ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
+ {
+ 	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
++		RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
+ 	} else {
+ 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ 					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+index 29c3484ae1f166..f52552c5fa27b6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+@@ -31,13 +31,15 @@
+ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
+ 				struct amdgpu_ring *ring)
+ {
+-	if (!ring || !ring->funcs->emit_wreg)
++	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
+ 			0);
+-	else
++		RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++	} else {
+ 		amdgpu_ring_emit_wreg(ring,
+ 			(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
+ 			0);
++	}
+ }
+ 
+ static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+index 33736d361dd0bc..6948fe9956ce47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+@@ -34,10 +34,12 @@
+ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
+ 				struct amdgpu_ring *ring)
+ {
+-	if (!ring || !ring->funcs->emit_wreg)
++	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+-	else
++		RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++	} else {
+ 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
++	}
+ }
+ 
+ static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+index 1c99bb09e2a129..63820329f67eb6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+@@ -31,10 +31,12 @@
+ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
+ 				struct amdgpu_ring *ring)
+ {
+-	if (!ring || !ring->funcs->emit_wreg)
++	if (!ring || !ring->funcs->emit_wreg) {
+ 		WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+-	else
++		RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++	} else {
+ 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
++	}
+ }
+ 
+ static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+index 0fda703363004f..6fca2915ea8fd5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+@@ -116,6 +116,20 @@ static int vcn_v4_0_3_early_init(void *handle)
+ 	return amdgpu_vcn_early_init(adev);
+ }
+ 
++static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
++{
++	struct amdgpu_vcn4_fw_shared *fw_shared;
++
++	fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
++	fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
++	fw_shared->sq.is_enabled = 1;
++
++	if (amdgpu_vcnfw_log)
++		amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
++
++	return 0;
++}
++
+ /**
+  * vcn_v4_0_3_sw_init - sw init for VCN block
+  *
+@@ -148,8 +162,6 @@ static int vcn_v4_0_3_sw_init(void *handle)
+ 		return r;
+ 
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+-		volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+-
+ 		vcn_inst = GET_INST(VCN, i);
+ 
+ 		ring = &adev->vcn.inst[i].ring_enc[0];
+@@ -172,12 +184,7 @@ static int vcn_v4_0_3_sw_init(void *handle)
+ 		if (r)
+ 			return r;
+ 
+-		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+-		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+-		fw_shared->sq.is_enabled = true;
+-
+-		if (amdgpu_vcnfw_log)
+-			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
++		vcn_v4_0_3_fw_shared_init(adev, i);
+ 	}
+ 
+ 	if (amdgpu_sriov_vf(adev)) {
+@@ -273,6 +280,8 @@ static int vcn_v4_0_3_hw_init(void *handle)
+ 		}
+ 	} else {
+ 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
++			struct amdgpu_vcn4_fw_shared *fw_shared;
++
+ 			vcn_inst = GET_INST(VCN, i);
+ 			ring = &adev->vcn.inst[i].ring_enc[0];
+ 
+@@ -296,6 +305,11 @@ static int vcn_v4_0_3_hw_init(void *handle)
+ 					regVCN_RB1_DB_CTRL);
+ 			}
+ 
++			/* Re-init fw_shared when RAS fatal error occurred */
++			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
++			if (!fw_shared->sq.is_enabled)
++				vcn_v4_0_3_fw_shared_init(adev, i);
++
+ 			r = amdgpu_ring_test_helper(ring);
+ 			if (r)
+ 				return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+index ac439f0565e357..16f5561fb86ec5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+@@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
+ 
++	if (enable) {
++		/* Unset the CLEAR_OVERFLOW bit to make sure the next step
++		 * is switching the bit from 0 to 1
++		 */
++		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++		if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
++			if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
++				return -ETIMEDOUT;
++		} else {
++			WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++		}
++
++		/* Clear RB_OVERFLOW bit */
++		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
++		if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
++			if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
++				return -ETIMEDOUT;
++		} else {
++			WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++		}
++
++		/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++		 * can be detected.
++		 */
++		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	}
++
+ 	/* enable_intr field is only valid in ring0 */
+ 	if (ih == &adev->irq.ih)
+ 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 48caecf7e72ed1..8de61cc524c943 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -1509,6 +1509,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+ 	if (adev->gfx.config.gc_tcp_size_per_cu) {
+ 		pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
+ 		pcache_info[i].cache_level = 1;
++		/* Cacheline size not available in IP discovery for gc943,gc944 */
++		pcache_info[i].cache_line_size = 128;
+ 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ 					CRAT_CACHE_FLAGS_DATA_CACHE |
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+@@ -1520,6 +1522,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+ 		pcache_info[i].cache_size =
+ 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
+ 		pcache_info[i].cache_level = 1;
++		pcache_info[i].cache_line_size = 64;
+ 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ 					CRAT_CACHE_FLAGS_INST_CACHE |
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+@@ -1530,6 +1533,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+ 	if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
+ 		pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
+ 		pcache_info[i].cache_level = 1;
++		pcache_info[i].cache_line_size = 64;
+ 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ 					CRAT_CACHE_FLAGS_DATA_CACHE |
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+@@ -1540,6 +1544,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+ 	if (adev->gfx.config.gc_tcc_size) {
+ 		pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
+ 		pcache_info[i].cache_level = 2;
++		pcache_info[i].cache_line_size = 128;
+ 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ 					CRAT_CACHE_FLAGS_DATA_CACHE |
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+@@ -1550,6 +1555,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+ 	if (adev->gmc.mall_size) {
+ 		pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
+ 		pcache_info[i].cache_level = 3;
++		pcache_info[i].cache_line_size = 64;
+ 		pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+ 					CRAT_CACHE_FLAGS_DATA_CACHE |
+ 					CRAT_CACHE_FLAGS_SIMD_CACHE);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index fad1c8f2bc8334..b05be24531e187 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -235,6 +235,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
+ 			 */
+ 			kfd->device_info.needs_pci_atomics = true;
+ 			kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
++		} else if (gc_version < IP_VERSION(13, 0, 0)) {
++			kfd->device_info.needs_pci_atomics = true;
++			kfd->device_info.no_atomic_fw_version = 2090;
+ 		} else {
+ 			kfd->device_info.needs_pci_atomics = true;
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 24fbde7dd1c425..ad3a3aa72b51f3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1910,7 +1910,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		else
+ 			init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
+ 	} else {
+-		init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
++		if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3))
++			init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1);
++		else
++			init_data.flags.gpu_vm_support =
++				(amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+ 	}
+ 
+ 	adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
+@@ -7337,10 +7341,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
+ 	enum dc_status dc_result = DC_OK;
++	uint8_t bpc_limit = 6;
+ 
+ 	if (!dm_state)
+ 		return NULL;
+ 
++	if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
++	    aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
++		bpc_limit = 8;
++
+ 	do {
+ 		stream = create_stream_for_sink(connector, drm_mode,
+ 						dm_state, old_stream,
+@@ -7361,11 +7370,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 			dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
+ 
+ 		if (dc_result != DC_OK) {
+-			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
++			DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
+ 				      drm_mode->hdisplay,
+ 				      drm_mode->vdisplay,
+ 				      drm_mode->clock,
+-				      dc_result,
++				      dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
++				      dc_color_depth_to_str(stream->timing.display_color_depth),
+ 				      dc_status_to_str(dc_result));
+ 
+ 			dc_stream_release(stream);
+@@ -7373,10 +7383,13 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 			requested_bpc -= 2; /* lower bpc to retry validation */
+ 		}
+ 
+-	} while (stream == NULL && requested_bpc >= 6);
++	} while (stream == NULL && requested_bpc >= bpc_limit);
+ 
+-	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+-		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
++	if ((dc_result == DC_FAIL_ENC_VALIDATE ||
++	     dc_result == DC_EXCEED_DONGLE_CAP) &&
++	     !aconnector->force_yuv420_output) {
++		DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
++				     __func__, __LINE__);
+ 
+ 		aconnector->force_yuv420_output = true;
+ 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+index b46a3afe48ca7c..3bd0d46c170109 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+@@ -132,6 +132,8 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
+ 	for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ 		struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
++		struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
++		struct dccg *dccg = clk_mgr_internal->dccg;
+ 		struct pipe_ctx *pipe = safe_to_lower
+ 			? &context->res_ctx.pipe_ctx[i]
+ 			: &dc->current_state->res_ctx.pipe_ctx[i];
+@@ -148,8 +150,13 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
+ 		new_pipe->stream_res.stream_enc &&
+ 		new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
+ 		new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
+-		if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+-			!pipe->stream->link_enc) && !stream_changed_otg_dig_on) {
++		bool has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe);
++
++		if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
++					(pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
++					!pipe->stream->link_enc) && !stream_changed_otg_dig_on)) {
++
++
+ 			/* This w/a should not trigger when we have a dig active */
+ 			if (disable) {
+ 				if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+@@ -257,11 +264,11 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
+ 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ 	uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 };
+ 	int i;
+-
+ 	for (i = 0; i < context->stream_count; ++i) {
+ 		const struct dc_stream_state *stream = context->streams[i];
+ 		const struct dc_link *link = stream->link;
+-		uint8_t lowest_dpia_index = 0, hr_index = 0;
++		uint8_t lowest_dpia_index = 0;
++		unsigned int hr_index = 0;
+ 
+ 		if (!link)
+ 			continue;
+@@ -271,6 +278,8 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
+ 			continue;
+ 
+ 		hr_index = (link->link_index - lowest_dpia_index) / 2;
++		if (hr_index >= MAX_HOST_ROUTERS_NUM)
++			continue;
+ 		host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing(
+ 			&stream->timing, dc_link_get_highest_encoding_format(link));
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index a6911bb2cf0c6c..9f570d447c2099 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -6006,3 +6006,21 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
+ 
+ 	return profile;
+ }
++
++/*
++ **********************************************************************************
++ * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
++ *
++ * Called when DM wants to log detile buffer size from dc_state
++ *
++ **********************************************************************************
++ */
++unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
++{
++	struct dc *dc = context->clk_mgr->ctx->dc;
++
++	if (dc->res_pool->funcs->get_det_buffer_size)
++		return dc->res_pool->funcs->get_det_buffer_size(context);
++	else
++		return 0;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+index 801cdbc8117d9b..e255c204b7e855 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+@@ -434,3 +434,43 @@ char *dc_status_to_str(enum dc_status status)
+ 
+ 	return "Unexpected status error";
+ }
++
++char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding)
++{
++	switch (pixel_encoding) {
++	case PIXEL_ENCODING_RGB:
++		return "RGB";
++	case PIXEL_ENCODING_YCBCR422:
++		return "YUV422";
++	case PIXEL_ENCODING_YCBCR444:
++		return "YUV444";
++	case PIXEL_ENCODING_YCBCR420:
++		return "YUV420";
++	default:
++		return "Unknown";
++	}
++}
++
++char *dc_color_depth_to_str(enum dc_color_depth color_depth)
++{
++	switch (color_depth) {
++	case COLOR_DEPTH_666:
++		return "6-bpc";
++	case COLOR_DEPTH_888:
++		return "8-bpc";
++	case COLOR_DEPTH_101010:
++		return "10-bpc";
++	case COLOR_DEPTH_121212:
++		return "12-bpc";
++	case COLOR_DEPTH_141414:
++		return "14-bpc";
++	case COLOR_DEPTH_161616:
++		return "16-bpc";
++	case COLOR_DEPTH_999:
++		return "9-bpc";
++	case COLOR_DEPTH_111111:
++		return "11-bpc";
++	default:
++		return "Unknown";
++	}
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index c7599c40d4be38..d915020a429582 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -765,25 +765,6 @@ static inline void get_vp_scan_direction(
+ 		*flip_horz_scan_dir = !*flip_horz_scan_dir;
+ }
+ 
+-/*
+- * This is a preliminary vp size calculation to allow us to check taps support.
+- * The result is completely overridden afterwards.
+- */
+-static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
+-{
+-	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+-
+-	data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width));
+-	data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height));
+-	data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width));
+-	data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height));
+-	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+-			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+-		swap(data->viewport.width, data->viewport.height);
+-		swap(data->viewport_c.width, data->viewport_c.height);
+-	}
+-}
+-
+ static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
+ {
+ 	struct rect rec;
+@@ -1468,6 +1449,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ 	const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
++	struct scaling_taps temp = {0};
+ 	bool res = false;
+ 
+ 	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+@@ -1519,14 +1501,16 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ 		res = spl_calculate_scaler_params(spl_in, spl_out);
+ 		// Convert respective out params from SPL to scaler data
+ 		translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out);
++
++		/* Ignore scaler failure if pipe context plane is phantom plane */
++		if (!res && plane_state->is_phantom)
++			res = true;
+ 	} else {
+ #endif
+ 	/* depends on h_active */
+ 	calculate_recout(pipe_ctx);
+ 	/* depends on pixel format */
+ 	calculate_scaling_ratios(pipe_ctx);
+-	/* depends on scaling ratios and recout, does not calculate offset yet */
+-	calculate_viewport_size(pipe_ctx);
+ 
+ 	/*
+ 	 * LB calculations depend on vp size, h/v_active and scaling ratios
+@@ -1547,6 +1531,24 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ 
+ 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
+ 
++	// get TAP value with 100x100 dummy data for max scaling qualify, override
++	// if a new scaling quality required
++	pipe_ctx->plane_res.scl_data.viewport.width = 100;
++	pipe_ctx->plane_res.scl_data.viewport.height = 100;
++	pipe_ctx->plane_res.scl_data.viewport_c.width = 100;
++	pipe_ctx->plane_res.scl_data.viewport_c.height = 100;
++	if (pipe_ctx->plane_res.xfm != NULL)
++		res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
++				pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
++
++	if (pipe_ctx->plane_res.dpp != NULL)
++		res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
++				pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
++
++	temp = pipe_ctx->plane_res.scl_data.taps;
++
++	calculate_inits_and_viewports(pipe_ctx);
++
+ 	if (pipe_ctx->plane_res.xfm != NULL)
+ 		res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ 				pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+@@ -1573,11 +1575,14 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ 					&plane_state->scaling_quality);
+ 	}
+ 
+-	/*
+-	 * Depends on recout, scaling ratios, h_active and taps
+-	 * May need to re-check lb size after this in some obscure scenario
+-	 */
+-	if (res)
++	/* Ignore scaler failure if pipe context plane is phantom plane */
++	if (!res && plane_state->is_phantom)
++		res = true;
++
++	if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps ||
++		pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps ||
++		pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c ||
++		pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c))
+ 		calculate_inits_and_viewports(pipe_ctx);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 9a406d74c0dd76..3d93efdc1026dd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -819,12 +819,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
+ 			stream->dst.height,
+ 			stream->output_color_space);
+ 	DC_LOG_DC(
+-			"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
++			"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixel_encoding:%s, color_depth:%s\n",
+ 			stream->timing.pix_clk_100hz / 10,
+ 			stream->timing.h_total,
+ 			stream->timing.v_total,
+-			stream->timing.pixel_encoding,
+-			stream->timing.display_color_depth);
++			dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
++			dc_color_depth_to_str(stream->timing.display_color_depth));
+ 	DC_LOG_DC(
+ 			"\tlink: %d\n",
+ 			stream->link->link_index);
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 3992ad73165bc6..7c163aa7e8bd2d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -285,6 +285,7 @@ struct dc_caps {
+ 	uint16_t subvp_vertical_int_margin_us;
+ 	bool seamless_odm;
+ 	uint32_t max_v_total;
++	bool vtotal_limited_by_fp2;
+ 	uint32_t max_disp_clock_khz_at_vmin;
+ 	uint8_t subvp_drr_vblank_start_margin_us;
+ 	bool cursor_not_scaled;
+@@ -2543,6 +2544,8 @@ struct dc_power_profile {
+ 
+ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context);
+ 
++unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
++
+ /* DSC Interfaces */
+ #include "dc_dsc.h"
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+index 1e7de0f03290a3..ec5009f411eb0d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+@@ -1294,6 +1294,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
+ 
+ 		memset(&new_signals, 0, sizeof(new_signals));
+ 
++		new_signals.bits.allow_idle = 1; /* always set */
++
+ 		if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
+ 		    dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
+ 			new_signals.bits.allow_pg = 1;
+@@ -1389,7 +1391,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
+ 		 */
+ 		dc_dmub_srv->needs_idle_wake = false;
+ 
+-		if (prev_driver_signals.bits.allow_ips2 &&
++		if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
+ 		    (!dc->debug.optimize_ips_handshake ||
+ 		     ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ 			DC_LOG_IPS(
+@@ -1450,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
+ 		}
+ 
+ 		dc_dmub_srv_notify_idle(dc, false);
+-		if (prev_driver_signals.bits.allow_ips1) {
++		if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
+ 			DC_LOG_IPS(
+ 				"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
+ 				ips_fw->signals.bits.ips1_commit,
+diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+index 5b343f745cf333..ae81451a3a725c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+@@ -83,6 +83,15 @@ void enc314_disable_fifo(struct stream_encoder *enc)
+ 	REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
+ }
+ 
++static bool enc314_is_fifo_enabled(struct stream_encoder *enc)
++{
++	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
++	uint32_t reset_val;
++
++	REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
++	return (reset_val != 0);
++}
++
+ void enc314_dp_set_odm_combine(
+ 	struct stream_encoder *enc,
+ 	bool odm_combine)
+@@ -468,6 +477,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
+ 
+ 	.enable_fifo = enc314_enable_fifo,
+ 	.disable_fifo = enc314_disable_fifo,
++	.is_fifo_enabled = enc314_is_fifo_enabled,
+ 	.set_input_mode = enc314_set_dig_input_mode,
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+index d851c081e3768a..8dabb1ac0b684d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+@@ -1222,6 +1222,7 @@ static dml_bool_t CalculatePrefetchSchedule(struct display_mode_lib_scratch_st *
+ 	s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
+ 
+ 	s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + dml_max(p->TWait + p->TCalc, *p->Tdmdl)) / s->LineTime - (*p->DSTYAfterScaler + (dml_float_t) *p->DSTXAfterScaler / (dml_float_t)p->myPipe->HTotal);
++	s->dst_y_prefetch_equ = dml_min(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
+ 
+ #ifdef __DML_VBA_DEBUG__
+ 	dml_print("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+index 8697eac1e1f7e1..8dee0d397e0322 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+@@ -339,11 +339,22 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
+ 	// }
+ }
+ 
++static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
++{
++	unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
++
++	if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
++		max_hw_v_total -= stream->timing.v_front_porch + 1;
++	}
++
++	return max_hw_v_total;
++}
++
+ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
+ 		struct dc_stream_state *stream,
+ 		struct dml2_context *dml_ctx)
+ {
+-	unsigned int hblank_start, vblank_start;
++	unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
+ 
+ 	timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ 	timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+@@ -371,11 +382,23 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
+ 		- stream->timing.v_border_top - stream->timing.v_border_bottom;
+ 
+ 	timing->drr_config.enabled = stream->ignore_msa_timing_param;
+-	timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
+ 	timing->drr_config.drr_active_variable = stream->vrr_active_variable;
+ 	timing->drr_config.drr_active_fixed = stream->vrr_active_fixed;
+ 	timing->drr_config.disallowed = !stream->allow_freesync;
+ 
++	/* limit min refresh rate to DC cap */
++	min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
++	if (stream->ctx->dc->caps.max_v_total != 0) {
++		min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
++				(stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
++	}
++
++	if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
++		timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
++	} else {
++		timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
++	}
++
+ 	if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
+ 			stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
+ 		timing->drr_config.max_instant_vtotal_delta = dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase(stream, false);
+@@ -859,7 +882,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
+ 	plane->immediate_flip = plane_state->flip_immediate;
+ 
+ 	plane->composition.rect_out_height_spans_vactive =
+-		plane_state->dst_rect.height >= stream->timing.v_addressable &&
++		plane_state->dst_rect.height >= stream->src.height &&
+ 		stream->dst.height >= stream->timing.v_addressable;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+index 4e93eeedfc1bbd..efcc1a6b364c27 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+@@ -355,6 +355,20 @@ void dcn314_calculate_pix_rate_divider(
+ 	}
+ }
+ 
++static bool dcn314_is_pipe_dig_fifo_on(struct pipe_ctx *pipe)
++{
++	return pipe && pipe->stream
++		// Check dig's otg instance.
++		&& pipe->stream_res.stream_enc
++		&& pipe->stream_res.stream_enc->funcs->dig_source_otg
++		&& pipe->stream_res.tg->inst == pipe->stream_res.stream_enc->funcs->dig_source_otg(pipe->stream_res.stream_enc)
++		&& pipe->stream->link && pipe->stream->link->link_enc
++		&& pipe->stream->link->link_enc->funcs->is_dig_enabled
++		&& pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc)
++		&& pipe->stream_res.stream_enc->funcs->is_fifo_enabled
++		&& pipe->stream_res.stream_enc->funcs->is_fifo_enabled(pipe->stream_res.stream_enc);
++}
++
+ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
+ {
+ 	unsigned int i;
+@@ -371,7 +385,11 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc
+ 		if (pipe->top_pipe || pipe->prev_odm_pipe)
+ 			continue;
+ 
+-		if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
++		if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) &&
++			!pipe->stream->apply_seamless_boot_optimization &&
++			!pipe->stream->apply_edp_fast_boot_optimization) {
++			if (dcn314_is_pipe_dig_fifo_on(pipe))
++				continue;
+ 			pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
+ 			reset_sync_context_for_pipe(dc, context, i);
+ 			otg_disabled[i] = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+index fa5edd03d00439..b5afd8c3103dba 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+@@ -60,5 +60,7 @@ enum dc_status {
+ };
+ 
+ char *dc_status_to_str(enum dc_status status);
++char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding);
++char *dc_color_depth_to_str(enum dc_color_depth color_depth);
+ 
+ #endif /* _CORE_STATUS_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index bfb8b8502d2026..e1e3142cdc00ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -215,6 +215,7 @@ struct resource_funcs {
+ 
+ 	void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
+ 	void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
++	unsigned int (*get_det_buffer_size)(const struct dc_state *context);
+ };
+ 
+ struct audio_support{
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+index eea2b3b307cd5f..45e4de8d5cff8d 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+@@ -1511,6 +1511,7 @@ bool dcn20_split_stream_for_odm(
+ 
+ 	if (prev_odm_pipe->plane_state) {
+ 		struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
++		struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp;
+ 		int new_width;
+ 
+ 		/* HACTIVE halved for odm combine */
+@@ -1544,7 +1545,28 @@ bool dcn20_split_stream_for_odm(
+ 		sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
+ 				sd->ratios.horz_c, sd->h_active - sd->recout.x));
+ 		sd->recout.x = 0;
++
++		/*
++		 * When odm is used in YcbCr422 or 420 colour space, a split screen
++		 * will be seen with the previous calculations since the extra left
++		 *  edge pixel is accounted for in fmt but not in viewport.
++		 *
++		 * Below are calculations which fix the split by fixing the calculations
++		 * if there is an extra left edge pixel.
++		 */
++		if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count
++				&& opp->funcs->opp_get_left_edge_extra_pixel_count(
++					opp, next_odm_pipe->stream->timing.pixel_encoding,
++					resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) {
++			sd->h_active += 1;
++			sd->recout.width += 1;
++			sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
++			sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
++			sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
++			sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
++		}
+ 	}
++
+ 	if (!next_odm_pipe->top_pipe)
+ 		next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
+ 	else
+@@ -2133,6 +2155,7 @@ bool dcn20_fast_validate_bw(
+ 			ASSERT(0);
+ 		}
+ 	}
++
+ 	/* Actual dsc count per stream dsc validation*/
+ 	if (!dcn20_validate_dsc(dc, context)) {
+ 		context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+index 347e6aaea582fb..14b28841657d21 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+@@ -1298,7 +1298,7 @@ static struct link_encoder *dcn21_link_encoder_create(
+ 		kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
+ 	int link_regs_id;
+ 
+-	if (!enc21)
++	if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ 		return NULL;
+ 
+ 	link_regs_id =
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+index 5040a4c6ed1862..75cc84473a577e 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+@@ -2354,6 +2354,7 @@ static bool dcn30_resource_construct(
+ 
+ 	dc->caps.dp_hdmi21_pcon_support = true;
+ 	dc->caps.max_v_total = (1 << 15) - 1;
++	dc->caps.vtotal_limited_by_fp2 = true;
+ 
+ 	/* read VBIOS LTTPR caps */
+ 	{
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+index 5791b5cc287529..320b040d591d1e 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+@@ -1234,6 +1234,7 @@ static bool dcn302_resource_construct(
+ 	dc->caps.extended_aux_timeout_support = true;
+ 	dc->caps.dmcub_support = true;
+ 	dc->caps.max_v_total = (1 << 15) - 1;
++	dc->caps.vtotal_limited_by_fp2 = true;
+ 
+ 	/* Color pipeline capabilities */
+ 	dc->caps.color.dpp.dcn_arch = 1;
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+index 63f0f882c8610c..297cf4b5600dae 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+@@ -1179,6 +1179,7 @@ static bool dcn303_resource_construct(
+ 	dc->caps.extended_aux_timeout_support = true;
+ 	dc->caps.dmcub_support = true;
+ 	dc->caps.max_v_total = (1 << 15) - 1;
++	dc->caps.vtotal_limited_by_fp2 = true;
+ 
+ 	/* Color pipeline capabilities */
+ 	dc->caps.color.dpp.dcn_arch = 1;
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+index ac8cb20e2e3b64..80386f698ae4de 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+@@ -1721,6 +1721,12 @@ int dcn31_populate_dml_pipes_from_context(
+ 	return pipe_cnt;
+ }
+ 
++unsigned int dcn31_get_det_buffer_size(
++	const struct dc_state *context)
++{
++	return context->bw_ctx.dml.ip.det_buffer_size_kbytes;
++}
++
+ void dcn31_calculate_wm_and_dlg(
+ 		struct dc *dc, struct dc_state *context,
+ 		display_e2e_pipe_params_st *pipes,
+@@ -1843,6 +1849,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
+ 	.update_bw_bounding_box = dcn31_update_bw_bounding_box,
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ 	.get_panel_config_defaults = dcn31_get_panel_config_defaults,
++	.get_det_buffer_size = dcn31_get_det_buffer_size,
+ };
+ 
+ static struct clock_source *dcn30_clock_source_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+index 901436591ed45c..551ad912f7bea8 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+@@ -63,6 +63,9 @@ struct resource_pool *dcn31_create_resource_pool(
+ 		const struct dc_init_data *init_data,
+ 		struct dc *dc);
+ 
++unsigned int dcn31_get_det_buffer_size(
++	const struct dc_state *context);
++
+ /*temp: B0 specific before switch to dcn313 headers*/
+ #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
+ #define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+index 169924d0a8393e..01d95108ce662b 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+@@ -1778,6 +1778,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ 	.get_panel_config_defaults = dcn314_get_panel_config_defaults,
+ 	.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
++	.get_det_buffer_size = dcn31_get_det_buffer_size,
+ };
+ 
+ static struct clock_source *dcn30_clock_source_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+index 3f4b9dba411244..f2ce687c0e03ca 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+@@ -1840,6 +1840,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
+ 	.update_bw_bounding_box = dcn315_update_bw_bounding_box,
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ 	.get_panel_config_defaults = dcn315_get_panel_config_defaults,
++	.get_det_buffer_size = dcn31_get_det_buffer_size,
+ };
+ 
+ static bool dcn315_resource_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+index 5fd52c5fcee458..af82e13029c9e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+@@ -1720,6 +1720,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
+ 	.update_bw_bounding_box = dcn316_update_bw_bounding_box,
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ 	.get_panel_config_defaults = dcn316_get_panel_config_defaults,
++	.get_det_buffer_size = dcn31_get_det_buffer_size,
+ };
+ 
+ static bool dcn316_resource_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+index a124ad9bd108c8..6b889c8be0ca3f 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+@@ -2186,6 +2186,7 @@ static bool dcn32_resource_construct(
+ 	dc->caps.dmcub_support = true;
+ 	dc->caps.seamless_odm = true;
+ 	dc->caps.max_v_total = (1 << 15) - 1;
++	dc->caps.vtotal_limited_by_fp2 = true;
+ 
+ 	/* Color pipeline capabilities */
+ 	dc->caps.color.dpp.dcn_arch = 1;
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+index 827a94f84f1001..74113c578bac40 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+@@ -1743,6 +1743,7 @@ static bool dcn321_resource_construct(
+ 	dc->caps.extended_aux_timeout_support = true;
+ 	dc->caps.dmcub_support = true;
+ 	dc->caps.max_v_total = (1 << 15) - 1;
++	dc->caps.vtotal_limited_by_fp2 = true;
+ 
+ 	/* Color pipeline capabilities */
+ 	dc->caps.color.dpp.dcn_arch = 1;
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+index 893a9d9ee870df..d0c4693c12241b 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+@@ -1779,6 +1779,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ 	.get_panel_config_defaults = dcn35_get_panel_config_defaults,
+ 	.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
++	.get_det_buffer_size = dcn31_get_det_buffer_size,
+ };
+ 
+ static bool dcn35_resource_construct(
+@@ -1850,6 +1851,7 @@ static bool dcn35_resource_construct(
+ 	dc->caps.zstate_support = true;
+ 	dc->caps.ips_support = true;
+ 	dc->caps.max_v_total = (1 << 15) - 1;
++	dc->caps.vtotal_limited_by_fp2 = true;
+ 
+ 	/* Color pipeline capabilities */
+ 	dc->caps.color.dpp.dcn_arch = 1;
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+index 70abd32ce2ad18..575c0aa12229cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+@@ -1758,6 +1758,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ 	.get_panel_config_defaults = dcn35_get_panel_config_defaults,
+ 	.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
++	.get_det_buffer_size = dcn31_get_det_buffer_size,
+ };
+ 
+ static bool dcn351_resource_construct(
+@@ -1829,6 +1830,7 @@ static bool dcn351_resource_construct(
+ 	dc->caps.zstate_support = true;
+ 	dc->caps.ips_support = true;
+ 	dc->caps.max_v_total = (1 << 15) - 1;
++	dc->caps.vtotal_limited_by_fp2 = true;
+ 
+ 	/* Color pipeline capabilities */
+ 	dc->caps.color.dpp.dcn_arch = 1;
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+index 9d56fbdcd06afd..4aa975418fb18d 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+@@ -1826,6 +1826,7 @@ static bool dcn401_resource_construct(
+ 	dc->caps.extended_aux_timeout_support = true;
+ 	dc->caps.dmcub_support = true;
+ 	dc->caps.max_v_total = (1 << 15) - 1;
++	dc->caps.vtotal_limited_by_fp2 = true;
+ 
+ 	if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev))
+ 		dc->caps.dcc_plane_width_limit = 7680;
+diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+index ebcf68bfae2b32..7835100b37c41e 100644
+--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
++++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+@@ -747,7 +747,8 @@ union dmub_shared_state_ips_driver_signals {
+ 		uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
+ 		uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
+ 		uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
+-		uint32_t reserved_bits : 28; /**< Reversed bits */
++		uint32_t allow_idle : 1; /**< 1 if driver is allowing idle */
++		uint32_t reserved_bits : 27; /**< Reversed bits */
+ 	} bits;
+ 	uint32_t all;
+ };
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index bbd259cea4f4f6..ab62a76d48cf76 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -121,6 +121,17 @@ static unsigned int calc_duration_in_us_from_v_total(
+ 	return duration_in_us;
+ }
+ 
++static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
++{
++	unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
++
++	if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
++		max_hw_v_total -= stream->timing.v_front_porch + 1;
++	}
++
++	return max_hw_v_total;
++}
++
+ unsigned int mod_freesync_calc_v_total_from_refresh(
+ 		const struct dc_stream_state *stream,
+ 		unsigned int refresh_in_uhz)
+@@ -1002,7 +1013,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ 
+ 	if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) {
+ 		min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
+-			(stream->timing.h_total * (long long)stream->ctx->dc->caps.max_v_total));
++			(stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
+ 	}
+ 	/* Limit minimum refresh rate to what can be supported by hardware */
+ 	min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ?
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index d5d6ab484e5add..0fa6fbee197899 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -1409,7 +1409,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
+  * create a custom set of heuristics, write a string of numbers to the file
+  * starting with the number of the custom profile along with a setting
+  * for each heuristic parameter.  Due to differences across asic families
+- * the heuristic parameters vary from family to family.
++ * the heuristic parameters vary from family to family. Additionally,
++ * you can apply the custom heuristics to different clock domains.  Each
++ * clock domain is considered a distinct operation so if you modify the
++ * gfxclk heuristics and then the memclk heuristics, the all of the
++ * custom heuristics will be retained until you switch to another profile.
+  *
+  */
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 32bdeac2676b5c..0c0b9aa44dfa3a 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
+ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
+ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+ static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
++static void smu_power_profile_mode_get(struct smu_context *smu,
++				       enum PP_SMC_POWER_PROFILE profile_mode);
++static void smu_power_profile_mode_put(struct smu_context *smu,
++				       enum PP_SMC_POWER_PROFILE profile_mode);
+ 
+ static int smu_sys_get_pp_feature_mask(void *handle,
+ 				       char *buf)
+@@ -1257,35 +1261,19 @@ static int smu_sw_init(void *handle)
+ 	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
+ 	atomic64_set(&smu->throttle_int_counter, 0);
+ 	smu->watermarks_bitmap = 0;
+-	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+-	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ 
+ 	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
+ 	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
+ 	atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ 	atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
+ 
+-	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+-	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+-	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+-	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+-	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
+-	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+-	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+-
+ 	if (smu->is_apu ||
+ 	    !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
+-		smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
++		smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ 	else
+-		smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
+-
+-	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+-	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+-	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
+-	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
+-	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
+-	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
+-	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
++		smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
++	smu_power_profile_mode_get(smu, smu->power_profile_mode);
++
+ 	smu->display_config = &adev->pm.pm_display_cfg;
+ 
+ 	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+@@ -1338,6 +1326,11 @@ static int smu_sw_fini(void *handle)
+ 		return ret;
+ 	}
+ 
++	if (smu->custom_profile_params) {
++		kfree(smu->custom_profile_params);
++		smu->custom_profile_params = NULL;
++	}
++
+ 	smu_fini_microcode(smu);
+ 
+ 	return 0;
+@@ -2117,6 +2110,9 @@ static int smu_suspend(void *handle)
+ 	if (!ret)
+ 		adev->gfx.gfx_off_entrycount = count;
+ 
++	/* clear this on suspend so it will get reprogrammed on resume */
++	smu->workload_mask = 0;
++
+ 	return 0;
+ }
+ 
+@@ -2229,25 +2225,49 @@ static int smu_enable_umd_pstate(void *handle,
+ }
+ 
+ static int smu_bump_power_profile_mode(struct smu_context *smu,
+-					   long *param,
+-					   uint32_t param_size)
++				       long *custom_params,
++				       u32 custom_params_max_idx)
+ {
+-	int ret = 0;
++	u32 workload_mask = 0;
++	int i, ret = 0;
++
++	for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
++		if (smu->workload_refcount[i])
++			workload_mask |= 1 << i;
++	}
++
++	if (smu->workload_mask == workload_mask)
++		return 0;
+ 
+ 	if (smu->ppt_funcs->set_power_profile_mode)
+-		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
++		ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
++							     custom_params,
++							     custom_params_max_idx);
++
++	if (!ret)
++		smu->workload_mask = workload_mask;
+ 
+ 	return ret;
+ }
+ 
++static void smu_power_profile_mode_get(struct smu_context *smu,
++				       enum PP_SMC_POWER_PROFILE profile_mode)
++{
++	smu->workload_refcount[profile_mode]++;
++}
++
++static void smu_power_profile_mode_put(struct smu_context *smu,
++				       enum PP_SMC_POWER_PROFILE profile_mode)
++{
++	if (smu->workload_refcount[profile_mode])
++		smu->workload_refcount[profile_mode]--;
++}
++
+ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ 					  enum amd_dpm_forced_level level,
+-					  bool skip_display_settings,
+-					  bool init)
++					  bool skip_display_settings)
+ {
+ 	int ret = 0;
+-	int index = 0;
+-	long workload[1];
+ 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ 
+ 	if (!skip_display_settings) {
+@@ -2284,14 +2304,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ 	}
+ 
+ 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+-		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+-		index = fls(smu->workload_mask);
+-		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+-		workload[0] = smu->workload_setting[index];
+-
+-		if (init || smu->power_profile_mode != workload[0])
+-			smu_bump_power_profile_mode(smu, workload, 0);
+-	}
++	    smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
++		smu_bump_power_profile_mode(smu, NULL, 0);
+ 
+ 	return ret;
+ }
+@@ -2310,13 +2324,13 @@ static int smu_handle_task(struct smu_context *smu,
+ 		ret = smu_pre_display_config_changed(smu);
+ 		if (ret)
+ 			return ret;
+-		ret = smu_adjust_power_state_dynamic(smu, level, false, false);
++		ret = smu_adjust_power_state_dynamic(smu, level, false);
+ 		break;
+ 	case AMD_PP_TASK_COMPLETE_INIT:
+-		ret = smu_adjust_power_state_dynamic(smu, level, true, true);
++		ret = smu_adjust_power_state_dynamic(smu, level, true);
+ 		break;
+ 	case AMD_PP_TASK_READJUST_POWER_STATE:
+-		ret = smu_adjust_power_state_dynamic(smu, level, true, false);
++		ret = smu_adjust_power_state_dynamic(smu, level, true);
+ 		break;
+ 	default:
+ 		break;
+@@ -2338,12 +2352,11 @@ static int smu_handle_dpm_task(void *handle,
+ 
+ static int smu_switch_power_profile(void *handle,
+ 				    enum PP_SMC_POWER_PROFILE type,
+-				    bool en)
++				    bool enable)
+ {
+ 	struct smu_context *smu = handle;
+ 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+-	long workload[1];
+-	uint32_t index;
++	int ret;
+ 
+ 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ 		return -EOPNOTSUPP;
+@@ -2351,21 +2364,21 @@ static int smu_switch_power_profile(void *handle,
+ 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+ 		return -EINVAL;
+ 
+-	if (!en) {
+-		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+-		index = fls(smu->workload_mask);
+-		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+-		workload[0] = smu->workload_setting[index];
+-	} else {
+-		smu->workload_mask |= (1 << smu->workload_prority[type]);
+-		index = fls(smu->workload_mask);
+-		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+-		workload[0] = smu->workload_setting[index];
+-	}
+-
+ 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+-		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+-		smu_bump_power_profile_mode(smu, workload, 0);
++	    smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
++		if (enable)
++			smu_power_profile_mode_get(smu, type);
++		else
++			smu_power_profile_mode_put(smu, type);
++		ret = smu_bump_power_profile_mode(smu, NULL, 0);
++		if (ret) {
++			if (enable)
++				smu_power_profile_mode_put(smu, type);
++			else
++				smu_power_profile_mode_get(smu, type);
++			return ret;
++		}
++	}
+ 
+ 	return 0;
+ }
+@@ -3053,12 +3066,35 @@ static int smu_set_power_profile_mode(void *handle,
+ 				      uint32_t param_size)
+ {
+ 	struct smu_context *smu = handle;
++	bool custom = false;
++	int ret = 0;
+ 
+ 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
+ 	    !smu->ppt_funcs->set_power_profile_mode)
+ 		return -EOPNOTSUPP;
+ 
+-	return smu_bump_power_profile_mode(smu, param, param_size);
++	if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
++		custom = true;
++		/* clear frontend mask so custom changes propogate */
++		smu->workload_mask = 0;
++	}
++
++	if ((param[param_size] != smu->power_profile_mode) || custom) {
++		/* clear the old user preference */
++		smu_power_profile_mode_put(smu, smu->power_profile_mode);
++		/* set the new user preference */
++		smu_power_profile_mode_get(smu, param[param_size]);
++		ret = smu_bump_power_profile_mode(smu,
++						  custom ? param : NULL,
++						  custom ? param_size : 0);
++		if (ret)
++			smu_power_profile_mode_put(smu, param[param_size]);
++		else
++			/* store the user's preference */
++			smu->power_profile_mode = param[param_size];
++	}
++
++	return ret;
+ }
+ 
+ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index b44a185d07e84c..2b8a18ce25d943 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -556,11 +556,13 @@ struct smu_context {
+ 	uint32_t hard_min_uclk_req_from_dal;
+ 	bool disable_uclk_switch;
+ 
++	/* asic agnostic workload mask */
+ 	uint32_t workload_mask;
+-	uint32_t workload_prority[WORKLOAD_POLICY_MAX];
+-	uint32_t workload_setting[WORKLOAD_POLICY_MAX];
++	/* default/user workload preference */
+ 	uint32_t power_profile_mode;
+-	uint32_t default_power_profile_mode;
++	uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
++	/* backend specific custom workload settings */
++	long *custom_profile_params;
+ 	bool pm_enabled;
+ 	bool is_apu;
+ 
+@@ -731,9 +733,12 @@ struct pptable_funcs {
+ 	 * @set_power_profile_mode: Set a power profile mode. Also used to
+ 	 *                          create/set custom power profile modes.
+ 	 * &input: Power profile mode parameters.
+-	 * &size: Size of &input.
++	 * &workload_mask: mask of workloads to enable
++	 * &custom_params: custom profile parameters
++	 * &custom_params_max_idx: max valid idx into custom_params
+ 	 */
+-	int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
++	int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
++				      long *custom_params, u32 custom_params_max_idx);
+ 
+ 	/**
+ 	 * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+index d52512f5f1bd9d..fc1297fecc62e0 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+@@ -1445,98 +1445,120 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
+ 	return size;
+ }
+ 
+-static int arcturus_set_power_profile_mode(struct smu_context *smu,
+-					   long *input,
+-					   uint32_t size)
++#define ARCTURUS_CUSTOM_PARAMS_COUNT 10
++#define ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT 2
++#define ARCTURUS_CUSTOM_PARAMS_SIZE (ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT * ARCTURUS_CUSTOM_PARAMS_COUNT * sizeof(long))
++
++static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu,
++						 long *input)
+ {
+ 	DpmActivityMonitorCoeffInt_t activity_monitor;
+-	int workload_type = 0;
+-	uint32_t profile_mode = input[size];
+-	int ret = 0;
++	int ret, idx;
+ 
+-	if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+-		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
+-		return -EINVAL;
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
++				   WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor),
++				   false);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
++		return ret;
+ 	}
+ 
++	idx = 0 * ARCTURUS_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Gfxclk */
++		activity_monitor.Gfx_FPS = input[idx + 1];
++		activity_monitor.Gfx_UseRlcBusy = input[idx + 2];
++		activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
++		activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
++		activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
++		activity_monitor.Gfx_BoosterFreq = input[idx + 6];
++		activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
++		activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
++		activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
++	}
++	idx = 1 * ARCTURUS_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Uclk */
++		activity_monitor.Mem_FPS = input[idx + 1];
++		activity_monitor.Mem_UseRlcBusy = input[idx + 2];
++		activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
++		activity_monitor.Mem_MinActiveFreq = input[idx + 4];
++		activity_monitor.Mem_BoosterFreqType = input[idx + 5];
++		activity_monitor.Mem_BoosterFreq = input[idx + 6];
++		activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
++		activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
++		activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
++	}
+ 
+-	if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
+-	     (smu->smc_fw_version >= 0x360d00)) {
+-		if (size != 10)
+-			return -EINVAL;
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
++				   WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor),
++				   true);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++		return ret;
++	}
+ 
+-		ret = smu_cmn_update_table(smu,
+-				       SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-				       WORKLOAD_PPLIB_CUSTOM_BIT,
+-				       (void *)(&activity_monitor),
+-				       false);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return ret;
+-		}
++	return ret;
++}
+ 
+-		switch (input[0]) {
+-		case 0: /* Gfxclk */
+-			activity_monitor.Gfx_FPS = input[1];
+-			activity_monitor.Gfx_UseRlcBusy = input[2];
+-			activity_monitor.Gfx_MinActiveFreqType = input[3];
+-			activity_monitor.Gfx_MinActiveFreq = input[4];
+-			activity_monitor.Gfx_BoosterFreqType = input[5];
+-			activity_monitor.Gfx_BoosterFreq = input[6];
+-			activity_monitor.Gfx_PD_Data_limit_c = input[7];
+-			activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+-			activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+-			break;
+-		case 1: /* Uclk */
+-			activity_monitor.Mem_FPS = input[1];
+-			activity_monitor.Mem_UseRlcBusy = input[2];
+-			activity_monitor.Mem_MinActiveFreqType = input[3];
+-			activity_monitor.Mem_MinActiveFreq = input[4];
+-			activity_monitor.Mem_BoosterFreqType = input[5];
+-			activity_monitor.Mem_BoosterFreq = input[6];
+-			activity_monitor.Mem_PD_Data_limit_c = input[7];
+-			activity_monitor.Mem_PD_Data_error_coeff = input[8];
+-			activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+-			break;
+-		default:
++static int arcturus_set_power_profile_mode(struct smu_context *smu,
++					   u32 workload_mask,
++					   long *custom_params,
++					   u32 custom_params_max_idx)
++{
++	u32 backend_workload_mask = 0;
++	int ret, idx = -1, i;
++
++	smu_cmn_get_backend_workload_mask(smu, workload_mask,
++					  &backend_workload_mask);
++
++	if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
++		if (smu->smc_fw_version < 0x360d00)
+ 			return -EINVAL;
++		if (!smu->custom_profile_params) {
++			smu->custom_profile_params =
++				kzalloc(ARCTURUS_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
++			if (!smu->custom_profile_params)
++				return -ENOMEM;
+ 		}
+-
+-		ret = smu_cmn_update_table(smu,
+-				       SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-				       WORKLOAD_PPLIB_CUSTOM_BIT,
+-				       (void *)(&activity_monitor),
+-				       true);
++		if (custom_params && custom_params_max_idx) {
++			if (custom_params_max_idx != ARCTURUS_CUSTOM_PARAMS_COUNT)
++				return -EINVAL;
++			if (custom_params[0] >= ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT)
++				return -EINVAL;
++			idx = custom_params[0] * ARCTURUS_CUSTOM_PARAMS_COUNT;
++			smu->custom_profile_params[idx] = 1;
++			for (i = 1; i < custom_params_max_idx; i++)
++				smu->custom_profile_params[idx + i] = custom_params[i];
++		}
++		ret = arcturus_set_power_profile_mode_coeff(smu,
++							    smu->custom_profile_params);
+ 		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++			if (idx != -1)
++				smu->custom_profile_params[idx] = 0;
+ 			return ret;
+ 		}
+-	}
+-
+-	/*
+-	 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+-	 * Not all profile modes are supported on arcturus.
+-	 */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       profile_mode);
+-	if (workload_type < 0) {
+-		dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
+-		return -EINVAL;
++	} else if (smu->custom_profile_params) {
++		memset(smu->custom_profile_params, 0, ARCTURUS_CUSTOM_PARAMS_SIZE);
+ 	}
+ 
+ 	ret = smu_cmn_send_smc_msg_with_param(smu,
+-					  SMU_MSG_SetWorkloadMask,
+-					  1 << workload_type,
+-					  NULL);
++					      SMU_MSG_SetWorkloadMask,
++					      backend_workload_mask,
++					      NULL);
+ 	if (ret) {
+-		dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
++		dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
++			workload_mask);
++		if (idx != -1)
++			smu->custom_profile_params[idx] = 0;
+ 		return ret;
+ 	}
+ 
+-	smu->power_profile_mode = profile_mode;
+-
+-	return 0;
++	return ret;
+ }
+ 
+ static int arcturus_set_performance_level(struct smu_context *smu,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index 16af1a329621f1..27c1892b2c7493 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -2004,87 +2004,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
+ 	return size;
+ }
+ 
+-static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
++#define NAVI10_CUSTOM_PARAMS_COUNT 10
++#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3
++#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long))
++
++static int navi10_set_power_profile_mode_coeff(struct smu_context *smu,
++					       long *input)
+ {
+ 	DpmActivityMonitorCoeffInt_t activity_monitor;
+-	int workload_type, ret = 0;
++	int ret, idx;
+ 
+-	smu->power_profile_mode = input[size];
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor), false);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
++		return ret;
++	}
+ 
+-	if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+-		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
+-		return -EINVAL;
++	idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Gfxclk */
++		activity_monitor.Gfx_FPS = input[idx + 1];
++		activity_monitor.Gfx_MinFreqStep = input[idx + 2];
++		activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
++		activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
++		activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
++		activity_monitor.Gfx_BoosterFreq = input[idx + 6];
++		activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
++		activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
++		activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
++	}
++	idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Socclk */
++		activity_monitor.Soc_FPS = input[idx + 1];
++		activity_monitor.Soc_MinFreqStep = input[idx + 2];
++		activity_monitor.Soc_MinActiveFreqType = input[idx + 3];
++		activity_monitor.Soc_MinActiveFreq = input[idx + 4];
++		activity_monitor.Soc_BoosterFreqType = input[idx + 5];
++		activity_monitor.Soc_BoosterFreq = input[idx + 6];
++		activity_monitor.Soc_PD_Data_limit_c = input[idx + 7];
++		activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8];
++		activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9];
++	}
++	idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Memclk */
++		activity_monitor.Mem_FPS = input[idx + 1];
++		activity_monitor.Mem_MinFreqStep = input[idx + 2];
++		activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
++		activity_monitor.Mem_MinActiveFreq = input[idx + 4];
++		activity_monitor.Mem_BoosterFreqType = input[idx + 5];
++		activity_monitor.Mem_BoosterFreq = input[idx + 6];
++		activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
++		activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
++		activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
++	}
++
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor), true);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++		return ret;
+ 	}
+ 
+-	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+-		if (size != 10)
+-			return -EINVAL;
++	return ret;
++}
+ 
+-		ret = smu_cmn_update_table(smu,
+-				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+-				       (void *)(&activity_monitor), false);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return ret;
+-		}
++static int navi10_set_power_profile_mode(struct smu_context *smu,
++					 u32 workload_mask,
++					 long *custom_params,
++					 u32 custom_params_max_idx)
++{
++	u32 backend_workload_mask = 0;
++	int ret, idx = -1, i;
+ 
+-		switch (input[0]) {
+-		case 0: /* Gfxclk */
+-			activity_monitor.Gfx_FPS = input[1];
+-			activity_monitor.Gfx_MinFreqStep = input[2];
+-			activity_monitor.Gfx_MinActiveFreqType = input[3];
+-			activity_monitor.Gfx_MinActiveFreq = input[4];
+-			activity_monitor.Gfx_BoosterFreqType = input[5];
+-			activity_monitor.Gfx_BoosterFreq = input[6];
+-			activity_monitor.Gfx_PD_Data_limit_c = input[7];
+-			activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+-			activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+-			break;
+-		case 1: /* Socclk */
+-			activity_monitor.Soc_FPS = input[1];
+-			activity_monitor.Soc_MinFreqStep = input[2];
+-			activity_monitor.Soc_MinActiveFreqType = input[3];
+-			activity_monitor.Soc_MinActiveFreq = input[4];
+-			activity_monitor.Soc_BoosterFreqType = input[5];
+-			activity_monitor.Soc_BoosterFreq = input[6];
+-			activity_monitor.Soc_PD_Data_limit_c = input[7];
+-			activity_monitor.Soc_PD_Data_error_coeff = input[8];
+-			activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
+-			break;
+-		case 2: /* Memclk */
+-			activity_monitor.Mem_FPS = input[1];
+-			activity_monitor.Mem_MinFreqStep = input[2];
+-			activity_monitor.Mem_MinActiveFreqType = input[3];
+-			activity_monitor.Mem_MinActiveFreq = input[4];
+-			activity_monitor.Mem_BoosterFreqType = input[5];
+-			activity_monitor.Mem_BoosterFreq = input[6];
+-			activity_monitor.Mem_PD_Data_limit_c = input[7];
+-			activity_monitor.Mem_PD_Data_error_coeff = input[8];
+-			activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+-			break;
+-		default:
+-			return -EINVAL;
+-		}
++	smu_cmn_get_backend_workload_mask(smu, workload_mask,
++					  &backend_workload_mask);
+ 
+-		ret = smu_cmn_update_table(smu,
+-				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+-				       (void *)(&activity_monitor), true);
++	if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
++		if (!smu->custom_profile_params) {
++			smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
++			if (!smu->custom_profile_params)
++				return -ENOMEM;
++		}
++		if (custom_params && custom_params_max_idx) {
++			if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT)
++				return -EINVAL;
++			if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT)
++				return -EINVAL;
++			idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT;
++			smu->custom_profile_params[idx] = 1;
++			for (i = 1; i < custom_params_max_idx; i++)
++				smu->custom_profile_params[idx + i] = custom_params[i];
++		}
++		ret = navi10_set_power_profile_mode_coeff(smu,
++							  smu->custom_profile_params);
+ 		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++			if (idx != -1)
++				smu->custom_profile_params[idx] = 0;
+ 			return ret;
+ 		}
++	} else if (smu->custom_profile_params) {
++		memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE);
+ 	}
+ 
+-	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       smu->power_profile_mode);
+-	if (workload_type < 0)
+-		return -EINVAL;
+ 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+-				    1 << workload_type, NULL);
+-	if (ret)
+-		dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
++					      backend_workload_mask, NULL);
++	if (ret) {
++		dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
++			workload_mask);
++		if (idx != -1)
++			smu->custom_profile_params[idx] = 0;
++		return ret;
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 9c3c48297cba03..1af90990d05c8f 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -1706,90 +1706,126 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
+ 	return size;
+ }
+ 
+-static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
++#define SIENNA_CICHLID_CUSTOM_PARAMS_COUNT 10
++#define SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT 3
++#define SIENNA_CICHLID_CUSTOM_PARAMS_SIZE (SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT * sizeof(long))
++
++static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu,
++						       long *input)
+ {
+ 
+ 	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ 	DpmActivityMonitorCoeffInt_t *activity_monitor =
+ 		&(activity_monitor_external.DpmActivityMonitorCoeffInt);
+-	int workload_type, ret = 0;
++	int ret, idx;
+ 
+-	smu->power_profile_mode = input[size];
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor_external), false);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
++		return ret;
++	}
+ 
+-	if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+-		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
+-		return -EINVAL;
++	idx = 0 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Gfxclk */
++		activity_monitor->Gfx_FPS = input[idx + 1];
++		activity_monitor->Gfx_MinFreqStep = input[idx + 2];
++		activity_monitor->Gfx_MinActiveFreqType = input[idx + 3];
++		activity_monitor->Gfx_MinActiveFreq = input[idx + 4];
++		activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
++		activity_monitor->Gfx_BoosterFreq = input[idx + 6];
++		activity_monitor->Gfx_PD_Data_limit_c = input[idx + 7];
++		activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 8];
++		activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 9];
++	}
++	idx = 1 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Socclk */
++		activity_monitor->Fclk_FPS = input[idx + 1];
++		activity_monitor->Fclk_MinFreqStep = input[idx + 2];
++		activity_monitor->Fclk_MinActiveFreqType = input[idx + 3];
++		activity_monitor->Fclk_MinActiveFreq = input[idx + 4];
++		activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
++		activity_monitor->Fclk_BoosterFreq = input[idx + 6];
++		activity_monitor->Fclk_PD_Data_limit_c = input[idx + 7];
++		activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 8];
++		activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 9];
++	}
++	idx = 2 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Memclk */
++		activity_monitor->Mem_FPS = input[idx + 1];
++		activity_monitor->Mem_MinFreqStep = input[idx + 2];
++		activity_monitor->Mem_MinActiveFreqType = input[idx + 3];
++		activity_monitor->Mem_MinActiveFreq = input[idx + 4];
++		activity_monitor->Mem_BoosterFreqType = input[idx + 5];
++		activity_monitor->Mem_BoosterFreq = input[idx + 6];
++		activity_monitor->Mem_PD_Data_limit_c = input[idx + 7];
++		activity_monitor->Mem_PD_Data_error_coeff = input[idx + 8];
++		activity_monitor->Mem_PD_Data_error_rate_coeff = input[idx + 9];
+ 	}
+ 
+-	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+-		if (size != 10)
+-			return -EINVAL;
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor_external), true);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++		return ret;
++	}
+ 
+-		ret = smu_cmn_update_table(smu,
+-				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+-				       (void *)(&activity_monitor_external), false);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return ret;
+-		}
++	return ret;
++}
+ 
+-		switch (input[0]) {
+-		case 0: /* Gfxclk */
+-			activity_monitor->Gfx_FPS = input[1];
+-			activity_monitor->Gfx_MinFreqStep = input[2];
+-			activity_monitor->Gfx_MinActiveFreqType = input[3];
+-			activity_monitor->Gfx_MinActiveFreq = input[4];
+-			activity_monitor->Gfx_BoosterFreqType = input[5];
+-			activity_monitor->Gfx_BoosterFreq = input[6];
+-			activity_monitor->Gfx_PD_Data_limit_c = input[7];
+-			activity_monitor->Gfx_PD_Data_error_coeff = input[8];
+-			activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
+-			break;
+-		case 1: /* Socclk */
+-			activity_monitor->Fclk_FPS = input[1];
+-			activity_monitor->Fclk_MinFreqStep = input[2];
+-			activity_monitor->Fclk_MinActiveFreqType = input[3];
+-			activity_monitor->Fclk_MinActiveFreq = input[4];
+-			activity_monitor->Fclk_BoosterFreqType = input[5];
+-			activity_monitor->Fclk_BoosterFreq = input[6];
+-			activity_monitor->Fclk_PD_Data_limit_c = input[7];
+-			activity_monitor->Fclk_PD_Data_error_coeff = input[8];
+-			activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
+-			break;
+-		case 2: /* Memclk */
+-			activity_monitor->Mem_FPS = input[1];
+-			activity_monitor->Mem_MinFreqStep = input[2];
+-			activity_monitor->Mem_MinActiveFreqType = input[3];
+-			activity_monitor->Mem_MinActiveFreq = input[4];
+-			activity_monitor->Mem_BoosterFreqType = input[5];
+-			activity_monitor->Mem_BoosterFreq = input[6];
+-			activity_monitor->Mem_PD_Data_limit_c = input[7];
+-			activity_monitor->Mem_PD_Data_error_coeff = input[8];
+-			activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
+-			break;
+-		default:
+-			return -EINVAL;
++static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu,
++						 u32 workload_mask,
++						 long *custom_params,
++						 u32 custom_params_max_idx)
++{
++	u32 backend_workload_mask = 0;
++	int ret, idx = -1, i;
++
++	smu_cmn_get_backend_workload_mask(smu, workload_mask,
++					  &backend_workload_mask);
++
++	if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
++		if (!smu->custom_profile_params) {
++			smu->custom_profile_params =
++				kzalloc(SIENNA_CICHLID_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
++			if (!smu->custom_profile_params)
++				return -ENOMEM;
+ 		}
+-
+-		ret = smu_cmn_update_table(smu,
+-				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+-				       (void *)(&activity_monitor_external), true);
++		if (custom_params && custom_params_max_idx) {
++			if (custom_params_max_idx != SIENNA_CICHLID_CUSTOM_PARAMS_COUNT)
++				return -EINVAL;
++			if (custom_params[0] >= SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT)
++				return -EINVAL;
++			idx = custom_params[0] * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
++			smu->custom_profile_params[idx] = 1;
++			for (i = 1; i < custom_params_max_idx; i++)
++				smu->custom_profile_params[idx + i] = custom_params[i];
++		}
++		ret = sienna_cichlid_set_power_profile_mode_coeff(smu,
++								  smu->custom_profile_params);
+ 		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++			if (idx != -1)
++				smu->custom_profile_params[idx] = 0;
+ 			return ret;
+ 		}
++	} else if (smu->custom_profile_params) {
++		memset(smu->custom_profile_params, 0, SIENNA_CICHLID_CUSTOM_PARAMS_SIZE);
+ 	}
+ 
+-	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       smu->power_profile_mode);
+-	if (workload_type < 0)
+-		return -EINVAL;
+ 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+-				    1 << workload_type, NULL);
+-	if (ret)
+-		dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
++					      backend_workload_mask, NULL);
++	if (ret) {
++		dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
++			workload_mask);
++		if (idx != -1)
++			smu->custom_profile_params[idx] = 0;
++		return ret;
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index 1fe020f1f4dbe2..9bca748ac2e947 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -1054,42 +1054,27 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
+ 	return size;
+ }
+ 
+-static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
++static int vangogh_set_power_profile_mode(struct smu_context *smu,
++					  u32 workload_mask,
++					  long *custom_params,
++					  u32 custom_params_max_idx)
+ {
+-	int workload_type, ret;
+-	uint32_t profile_mode = input[size];
++	u32 backend_workload_mask = 0;
++	int ret;
+ 
+-	if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
+-		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
+-		return -EINVAL;
+-	}
+-
+-	if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+-			profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+-		return 0;
+-
+-	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       profile_mode);
+-	if (workload_type < 0) {
+-		dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
+-					profile_mode);
+-		return -EINVAL;
+-	}
++	smu_cmn_get_backend_workload_mask(smu, workload_mask,
++					  &backend_workload_mask);
+ 
+ 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+-				    1 << workload_type,
+-				    NULL);
++					      backend_workload_mask,
++					      NULL);
+ 	if (ret) {
+-		dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
+-					workload_type);
++		dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n",
++			     workload_mask);
+ 		return ret;
+ 	}
+ 
+-	smu->power_profile_mode = profile_mode;
+-
+-	return 0;
++	return ret;
+ }
+ 
+ static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+index cc0504b063fa3a..1a8a42b176e520 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+@@ -862,44 +862,27 @@ static int renoir_force_clk_levels(struct smu_context *smu,
+ 	return ret;
+ }
+ 
+-static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
++static int renoir_set_power_profile_mode(struct smu_context *smu,
++					 u32 workload_mask,
++					 long *custom_params,
++					 u32 custom_params_max_idx)
+ {
+-	int workload_type, ret;
+-	uint32_t profile_mode = input[size];
++	int ret;
++	u32 backend_workload_mask = 0;
+ 
+-	if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+-		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
+-		return -EINVAL;
+-	}
+-
+-	if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+-			profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+-		return 0;
+-
+-	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       profile_mode);
+-	if (workload_type < 0) {
+-		/*
+-		 * TODO: If some case need switch to powersave/default power mode
+-		 * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
+-		 */
+-		dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode);
+-		return -EINVAL;
+-	}
++	smu_cmn_get_backend_workload_mask(smu, workload_mask,
++					  &backend_workload_mask);
+ 
+ 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+-				    1 << workload_type,
+-				    NULL);
++					      backend_workload_mask,
++					      NULL);
+ 	if (ret) {
+-		dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
++		dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n",
++			     workload_mask);
+ 		return ret;
+ 	}
+ 
+-	smu->power_profile_mode = profile_mode;
+-
+-	return 0;
++	return ret;
+ }
+ 
+ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index d53e162dcd8de2..a9373968807164 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -2477,82 +2477,76 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
+ 	return size;
+ }
+ 
+-static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+-					      long *input,
+-					      uint32_t size)
++#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9
++#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2
++#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long))
++
++static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu,
++						    long *input)
+ {
+ 	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ 	DpmActivityMonitorCoeffInt_t *activity_monitor =
+ 		&(activity_monitor_external.DpmActivityMonitorCoeffInt);
+-	int workload_type, ret = 0;
+-	u32 workload_mask, selected_workload_mask;
+-
+-	smu->power_profile_mode = input[size];
++	int ret, idx;
+ 
+-	if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
+-		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
+-		return -EINVAL;
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
++				   WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor_external),
++				   false);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
++		return ret;
+ 	}
+ 
+-	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+-		if (size != 9)
+-			return -EINVAL;
+-
+-		ret = smu_cmn_update_table(smu,
+-					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-					   WORKLOAD_PPLIB_CUSTOM_BIT,
+-					   (void *)(&activity_monitor_external),
+-					   false);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return ret;
+-		}
+-
+-		switch (input[0]) {
+-		case 0: /* Gfxclk */
+-			activity_monitor->Gfx_FPS = input[1];
+-			activity_monitor->Gfx_MinActiveFreqType = input[2];
+-			activity_monitor->Gfx_MinActiveFreq = input[3];
+-			activity_monitor->Gfx_BoosterFreqType = input[4];
+-			activity_monitor->Gfx_BoosterFreq = input[5];
+-			activity_monitor->Gfx_PD_Data_limit_c = input[6];
+-			activity_monitor->Gfx_PD_Data_error_coeff = input[7];
+-			activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
+-			break;
+-		case 1: /* Fclk */
+-			activity_monitor->Fclk_FPS = input[1];
+-			activity_monitor->Fclk_MinActiveFreqType = input[2];
+-			activity_monitor->Fclk_MinActiveFreq = input[3];
+-			activity_monitor->Fclk_BoosterFreqType = input[4];
+-			activity_monitor->Fclk_BoosterFreq = input[5];
+-			activity_monitor->Fclk_PD_Data_limit_c = input[6];
+-			activity_monitor->Fclk_PD_Data_error_coeff = input[7];
+-			activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
+-			break;
+-		default:
+-			return -EINVAL;
+-		}
++	idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Gfxclk */
++		activity_monitor->Gfx_FPS = input[idx + 1];
++		activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
++		activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
++		activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
++		activity_monitor->Gfx_BoosterFreq = input[idx + 5];
++		activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
++		activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
++		activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
++	}
++	idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Fclk */
++		activity_monitor->Fclk_FPS = input[idx + 1];
++		activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
++		activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
++		activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
++		activity_monitor->Fclk_BoosterFreq = input[idx + 5];
++		activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
++		activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
++		activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
++	}
+ 
+-		ret = smu_cmn_update_table(smu,
+-					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-					   WORKLOAD_PPLIB_CUSTOM_BIT,
+-					   (void *)(&activity_monitor_external),
+-					   true);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+-			return ret;
+-		}
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
++				   WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor_external),
++				   true);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++		return ret;
+ 	}
+ 
+-	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       smu->power_profile_mode);
++	return ret;
++}
+ 
+-	if (workload_type < 0)
+-		return -EINVAL;
++static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
++					      u32 workload_mask,
++					      long *custom_params,
++					      u32 custom_params_max_idx)
++{
++	u32 backend_workload_mask = 0;
++	int workload_type, ret, idx = -1, i;
+ 
+-	selected_workload_mask = workload_mask = 1 << workload_type;
++	smu_cmn_get_backend_workload_mask(smu, workload_mask,
++					  &backend_workload_mask);
+ 
+ 	/* Add optimizations for SMU13.0.0/10.  Reuse the power saving profile */
+ 	if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+@@ -2564,15 +2558,48 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ 							       CMN2ASIC_MAPPING_WORKLOAD,
+ 							       PP_SMC_POWER_PROFILE_POWERSAVING);
+ 		if (workload_type >= 0)
+-			workload_mask |= 1 << workload_type;
++			backend_workload_mask |= 1 << workload_type;
++	}
++
++	if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
++		if (!smu->custom_profile_params) {
++			smu->custom_profile_params =
++				kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
++			if (!smu->custom_profile_params)
++				return -ENOMEM;
++		}
++		if (custom_params && custom_params_max_idx) {
++			if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT)
++				return -EINVAL;
++			if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT)
++				return -EINVAL;
++			idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
++			smu->custom_profile_params[idx] = 1;
++			for (i = 1; i < custom_params_max_idx; i++)
++				smu->custom_profile_params[idx + i] = custom_params[i];
++		}
++		ret = smu_v13_0_0_set_power_profile_mode_coeff(smu,
++							       smu->custom_profile_params);
++		if (ret) {
++			if (idx != -1)
++				smu->custom_profile_params[idx] = 0;
++			return ret;
++		}
++	} else if (smu->custom_profile_params) {
++		memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE);
+ 	}
+ 
+ 	ret = smu_cmn_send_smc_msg_with_param(smu,
+-					       SMU_MSG_SetWorkloadMask,
+-					       workload_mask,
+-					       NULL);
+-	if (!ret)
+-		smu->workload_mask = selected_workload_mask;
++					      SMU_MSG_SetWorkloadMask,
++					      backend_workload_mask,
++					      NULL);
++	if (ret) {
++		dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
++			workload_mask);
++		if (idx != -1)
++			smu->custom_profile_params[idx] = 0;
++		return ret;
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index ceaf4572db2527..d0e6d051e9cf9f 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -2436,78 +2436,110 @@ do {													\
+ 	return result;
+ }
+ 
+-static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
++#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8
++#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2
++#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long))
++
++static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu,
++						    long *input)
+ {
+ 
+ 	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ 	DpmActivityMonitorCoeffInt_t *activity_monitor =
+ 		&(activity_monitor_external.DpmActivityMonitorCoeffInt);
+-	int workload_type, ret = 0;
++	int ret, idx;
+ 
+-	smu->power_profile_mode = input[size];
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor_external), false);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
++		return ret;
++	}
+ 
+-	if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) {
+-		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
+-		return -EINVAL;
++	idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Gfxclk */
++		activity_monitor->Gfx_ActiveHystLimit = input[idx + 1];
++		activity_monitor->Gfx_IdleHystLimit = input[idx + 2];
++		activity_monitor->Gfx_FPS = input[idx + 3];
++		activity_monitor->Gfx_MinActiveFreqType = input[idx + 4];
++		activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
++		activity_monitor->Gfx_MinActiveFreq = input[idx + 6];
++		activity_monitor->Gfx_BoosterFreq = input[idx + 7];
++	}
++	idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Fclk */
++		activity_monitor->Fclk_ActiveHystLimit = input[idx + 1];
++		activity_monitor->Fclk_IdleHystLimit = input[idx + 2];
++		activity_monitor->Fclk_FPS = input[idx + 3];
++		activity_monitor->Fclk_MinActiveFreqType = input[idx + 4];
++		activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
++		activity_monitor->Fclk_MinActiveFreq = input[idx + 6];
++		activity_monitor->Fclk_BoosterFreq = input[idx + 7];
+ 	}
+ 
+-	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+-		if (size != 8)
+-			return -EINVAL;
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor_external), true);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++		return ret;
++	}
+ 
+-		ret = smu_cmn_update_table(smu,
+-				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+-				       (void *)(&activity_monitor_external), false);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return ret;
+-		}
++	return ret;
++}
+ 
+-		switch (input[0]) {
+-		case 0: /* Gfxclk */
+-			activity_monitor->Gfx_ActiveHystLimit = input[1];
+-			activity_monitor->Gfx_IdleHystLimit = input[2];
+-			activity_monitor->Gfx_FPS = input[3];
+-			activity_monitor->Gfx_MinActiveFreqType = input[4];
+-			activity_monitor->Gfx_BoosterFreqType = input[5];
+-			activity_monitor->Gfx_MinActiveFreq = input[6];
+-			activity_monitor->Gfx_BoosterFreq = input[7];
+-			break;
+-		case 1: /* Fclk */
+-			activity_monitor->Fclk_ActiveHystLimit = input[1];
+-			activity_monitor->Fclk_IdleHystLimit = input[2];
+-			activity_monitor->Fclk_FPS = input[3];
+-			activity_monitor->Fclk_MinActiveFreqType = input[4];
+-			activity_monitor->Fclk_BoosterFreqType = input[5];
+-			activity_monitor->Fclk_MinActiveFreq = input[6];
+-			activity_monitor->Fclk_BoosterFreq = input[7];
+-			break;
+-		default:
+-			return -EINVAL;
++static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu,
++					      u32 workload_mask,
++					      long *custom_params,
++					      u32 custom_params_max_idx)
++{
++	u32 backend_workload_mask = 0;
++	int ret, idx = -1, i;
++
++	smu_cmn_get_backend_workload_mask(smu, workload_mask,
++					  &backend_workload_mask);
++
++	if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
++		if (!smu->custom_profile_params) {
++			smu->custom_profile_params =
++				kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
++			if (!smu->custom_profile_params)
++				return -ENOMEM;
+ 		}
+-
+-		ret = smu_cmn_update_table(smu,
+-				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+-				       (void *)(&activity_monitor_external), true);
++		if (custom_params && custom_params_max_idx) {
++			if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT)
++				return -EINVAL;
++			if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT)
++				return -EINVAL;
++			idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
++			smu->custom_profile_params[idx] = 1;
++			for (i = 1; i < custom_params_max_idx; i++)
++				smu->custom_profile_params[idx + i] = custom_params[i];
++		}
++		ret = smu_v13_0_7_set_power_profile_mode_coeff(smu,
++							       smu->custom_profile_params);
+ 		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++			if (idx != -1)
++				smu->custom_profile_params[idx] = 0;
+ 			return ret;
+ 		}
++	} else if (smu->custom_profile_params) {
++		memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE);
+ 	}
+ 
+-	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       smu->power_profile_mode);
+-	if (workload_type < 0)
+-		return -EINVAL;
+ 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+-				    1 << workload_type, NULL);
++					      backend_workload_mask, NULL);
+ 
+-	if (ret)
+-		dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+-	else
+-		smu->workload_mask = (1 << workload_type);
++	if (ret) {
++		dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
++			workload_mask);
++		if (idx != -1)
++			smu->custom_profile_params[idx] = 0;
++		return ret;
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+index 82aef8626afa97..b22fb7eafcd3f2 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+@@ -1751,90 +1751,120 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
+ 	return size;
+ }
+ 
+-static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
+-					      long *input,
+-					      uint32_t size)
++#define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
++#define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
++#define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
++
++static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
++						    long *input)
+ {
+ 	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ 	DpmActivityMonitorCoeffInt_t *activity_monitor =
+ 		&(activity_monitor_external.DpmActivityMonitorCoeffInt);
+-	int workload_type, ret = 0;
+-	uint32_t current_profile_mode = smu->power_profile_mode;
+-	smu->power_profile_mode = input[size];
++	int ret, idx;
+ 
+-	if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
+-		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
+-		return -EINVAL;
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
++				   WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor_external),
++				   false);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
++		return ret;
+ 	}
+ 
+-	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+-		if (size != 9)
+-			return -EINVAL;
++	idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Gfxclk */
++		activity_monitor->Gfx_FPS = input[idx + 1];
++		activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
++		activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
++		activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
++		activity_monitor->Gfx_BoosterFreq = input[idx + 5];
++		activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
++		activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
++		activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
++	}
++	idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
++	if (input[idx]) {
++		/* Fclk */
++		activity_monitor->Fclk_FPS = input[idx + 1];
++		activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
++		activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
++		activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
++		activity_monitor->Fclk_BoosterFreq = input[idx + 5];
++		activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
++		activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
++		activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
++	}
+ 
+-		ret = smu_cmn_update_table(smu,
+-					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-					   WORKLOAD_PPLIB_CUSTOM_BIT,
+-					   (void *)(&activity_monitor_external),
+-					   false);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return ret;
+-		}
++	ret = smu_cmn_update_table(smu,
++				   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
++				   WORKLOAD_PPLIB_CUSTOM_BIT,
++				   (void *)(&activity_monitor_external),
++				   true);
++	if (ret) {
++		dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++		return ret;
++	}
+ 
+-		switch (input[0]) {
+-		case 0: /* Gfxclk */
+-			activity_monitor->Gfx_FPS = input[1];
+-			activity_monitor->Gfx_MinActiveFreqType = input[2];
+-			activity_monitor->Gfx_MinActiveFreq = input[3];
+-			activity_monitor->Gfx_BoosterFreqType = input[4];
+-			activity_monitor->Gfx_BoosterFreq = input[5];
+-			activity_monitor->Gfx_PD_Data_limit_c = input[6];
+-			activity_monitor->Gfx_PD_Data_error_coeff = input[7];
+-			activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
+-			break;
+-		case 1: /* Fclk */
+-			activity_monitor->Fclk_FPS = input[1];
+-			activity_monitor->Fclk_MinActiveFreqType = input[2];
+-			activity_monitor->Fclk_MinActiveFreq = input[3];
+-			activity_monitor->Fclk_BoosterFreqType = input[4];
+-			activity_monitor->Fclk_BoosterFreq = input[5];
+-			activity_monitor->Fclk_PD_Data_limit_c = input[6];
+-			activity_monitor->Fclk_PD_Data_error_coeff = input[7];
+-			activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
+-			break;
+-		default:
+-			return -EINVAL;
+-		}
++	return ret;
++}
+ 
+-		ret = smu_cmn_update_table(smu,
+-					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-					   WORKLOAD_PPLIB_CUSTOM_BIT,
+-					   (void *)(&activity_monitor_external),
+-					   true);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+-			return ret;
+-		}
+-	}
++static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
++					      u32 workload_mask,
++					      long *custom_params,
++					      u32 custom_params_max_idx)
++{
++	u32 backend_workload_mask = 0;
++	int ret, idx = -1, i;
++
++	smu_cmn_get_backend_workload_mask(smu, workload_mask,
++					  &backend_workload_mask);
+ 
+-	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
++	/* disable deep sleep if compute is enabled */
++	if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
+ 		smu_v14_0_deep_sleep_control(smu, false);
+-	else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
++	else
+ 		smu_v14_0_deep_sleep_control(smu, true);
+ 
+-	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       smu->power_profile_mode);
+-	if (workload_type < 0)
+-		return -EINVAL;
++	if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
++		if (!smu->custom_profile_params) {
++			smu->custom_profile_params =
++				kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
++			if (!smu->custom_profile_params)
++				return -ENOMEM;
++		}
++		if (custom_params && custom_params_max_idx) {
++			if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
++				return -EINVAL;
++			if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
++				return -EINVAL;
++			idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
++			smu->custom_profile_params[idx] = 1;
++			for (i = 1; i < custom_params_max_idx; i++)
++				smu->custom_profile_params[idx + i] = custom_params[i];
++		}
++		ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
++							       smu->custom_profile_params);
++		if (ret) {
++			if (idx != -1)
++				smu->custom_profile_params[idx] = 0;
++			return ret;
++		}
++	} else if (smu->custom_profile_params) {
++		memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
++	}
+ 
+-	ret = smu_cmn_send_smc_msg_with_param(smu,
+-					       SMU_MSG_SetWorkloadMask,
+-					       1 << workload_type,
+-					       NULL);
+-	if (!ret)
+-		smu->workload_mask = 1 << workload_type;
++	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
++					      backend_workload_mask, NULL);
++	if (ret) {
++		dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
++			workload_mask);
++		if (idx != -1)
++			smu->custom_profile_params[idx] = 0;
++		return ret;
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+index 91ad434bcdaeb4..0d71db7be325da 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+@@ -1215,3 +1215,28 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
+ {
+ 	policy->desc = &xgmi_plpd_policy_desc;
+ }
++
++void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
++				       u32 workload_mask,
++				       u32 *backend_workload_mask)
++{
++	int workload_type;
++	u32 profile_mode;
++
++	*backend_workload_mask = 0;
++
++	for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
++		if (!(workload_mask & (1 << profile_mode)))
++			continue;
++
++		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++		workload_type = smu_cmn_to_asic_specific_index(smu,
++							       CMN2ASIC_MAPPING_WORKLOAD,
++							       profile_mode);
++
++		if (workload_type < 0)
++			continue;
++
++		*backend_workload_mask |= 1 << workload_type;
++	}
++}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+index 1de685defe85b1..a020277dec3e96 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+@@ -147,5 +147,9 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
+ void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
+ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
+ 
++void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
++				       u32 workload_mask,
++				       u32 *backend_workload_mask);
++
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index 65b57de20203f5..008d86cc562af7 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -3507,6 +3507,7 @@ static const struct of_device_id it6505_of_match[] = {
+ 	{ .compatible = "ite,it6505" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, it6505_of_match);
+ 
+ static struct i2c_driver it6505_i2c_driver = {
+ 	.driver = {
+diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+index 14a2a8473682b0..c491e3203bf11c 100644
+--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+@@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write);
+ 
+ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
+ {
+-	static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
++	static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] =
+ 		"DP-HDMI ADAPTOR\x04";
+ 
+ 	return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
+-		      sizeof(dp_dual_mode_hdmi_id)) == 0;
++		      DP_DUAL_MODE_HDMI_ID_LEN) == 0;
+ }
+ 
+ static bool is_type1_adaptor(uint8_t adaptor_id)
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index ac90118b9e7a81..bcf3a33123be1c 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -320,6 +320,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr
+ 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
+ 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
+ 	hdr->msg_len = buf[idx] & 0x3f;
++	if (hdr->msg_len < 1)		/* min space for body CRC */
++		return false;
++
+ 	idx++;
+ 	hdr->somt = (buf[idx] >> 7) & 0x1;
+ 	hdr->eomt = (buf[idx] >> 6) & 0x1;
+@@ -3697,8 +3700,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ 		ret = 0;
+ 		mgr->payload_id_table_cleared = false;
+ 
+-		memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
+-		memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
++		mgr->reset_rx_state = true;
+ 	}
+ 
+ out_unlock:
+@@ -3856,6 +3858,11 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ }
+ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+ 
++static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
++{
++	memset(msg, 0, sizeof(*msg));
++}
++
+ static bool
+ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
+ 		      struct drm_dp_mst_branch **mstb)
+@@ -3934,6 +3941,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
+ 	return true;
+ }
+ 
++static int get_msg_request_type(u8 data)
++{
++	return data & 0x7f;
++}
++
++static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
++				   const struct drm_dp_sideband_msg_tx *txmsg,
++				   const struct drm_dp_sideband_msg_rx *rxmsg)
++{
++	const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
++	const struct drm_dp_mst_branch *mstb = txmsg->dst;
++	int tx_req_type = get_msg_request_type(txmsg->msg[0]);
++	int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
++	char rad_str[64];
++
++	if (tx_req_type == rx_req_type)
++		return true;
++
++	drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
++	drm_dbg_kms(mgr->dev,
++		    "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
++		    mstb, hdr->seqno, mstb->lct, rad_str,
++		    drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
++		    drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
++
++	return false;
++}
++
+ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+ {
+ 	struct drm_dp_sideband_msg_tx *txmsg;
+@@ -3963,6 +3998,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+ 		goto out_clear_reply;
+ 	}
+ 
++	if (!verify_rx_request_type(mgr, txmsg, msg))
++		goto out_clear_reply;
++
+ 	drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
+ 
+ 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+@@ -4138,6 +4176,17 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 	return 0;
+ }
+ 
++static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
++{
++	mutex_lock(&mgr->lock);
++	if (mgr->reset_rx_state) {
++		mgr->reset_rx_state = false;
++		reset_msg_rx_state(&mgr->down_rep_recv);
++		reset_msg_rx_state(&mgr->up_req_recv);
++	}
++	mutex_unlock(&mgr->lock);
++}
++
+ /**
+  * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
+  * @mgr: manager to notify irq for.
+@@ -4172,6 +4221,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u
+ 		*handled = true;
+ 	}
+ 
++	update_msg_rx_state(mgr);
++
+ 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
+ 		ret = drm_dp_mst_handle_down_rep(mgr);
+ 		*handled = true;
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 2d84d7ea1ab7a0..4a73821b81f6fd 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -184,6 +184,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* AYA NEO AYANEO 2 */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
++		},
++		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* AYA NEO 2021 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
+@@ -196,6 +202,18 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
+ 		},
+ 		.driver_data = (void *)&lcd1080x1920_leftside_up,
++	}, {	/* AYA NEO Founder */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
++		  DMI_MATCH(DMI_PRODUCT_NAME, "AYA NEO Founder"),
++		},
++		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* AYA NEO GEEK */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
++		  DMI_MATCH(DMI_PRODUCT_NAME, "GEEK"),
++		},
++		.driver_data = (void *)&lcd800x1280_rightside_up,
+ 	}, {	/* AYA NEO NEXT */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
+index 74412b7bf936c2..0a9ecc1380d2a4 100644
+--- a/drivers/gpu/drm/drm_panic.c
++++ b/drivers/gpu/drm/drm_panic.c
+@@ -209,6 +209,14 @@ static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+ 	return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+ }
+ 
++static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
++{
++	pix = ((pix & 0x00FF0000) >> 14) |
++	      ((pix & 0x0000FF00) << 4) |
++	      ((pix & 0x000000FF) << 22);
++	return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
++}
++
+ /*
+  * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+  * @color: input color, in xrgb8888 format
+@@ -242,6 +250,8 @@ static u32 convert_from_xrgb8888(u32 color, u32 format)
+ 		return convert_xrgb8888_to_xrgb2101010(color);
+ 	case DRM_FORMAT_ARGB2101010:
+ 		return convert_xrgb8888_to_argb2101010(color);
++	case DRM_FORMAT_ABGR2101010:
++		return convert_xrgb8888_to_abgr2101010(color);
+ 	default:
+ 		WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+ 		return 0;
+diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
+index 10c06440c7e73e..f1bb38f4e67349 100644
+--- a/drivers/gpu/drm/mcde/mcde_drv.c
++++ b/drivers/gpu/drm/mcde/mcde_drv.c
+@@ -473,6 +473,7 @@ static const struct of_device_id mcde_of_match[] = {
+ 	},
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, mcde_of_match);
+ 
+ static struct platform_driver mcde_driver = {
+ 	.driver = {
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 86735430462fa6..06381c62820975 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -4565,6 +4565,31 @@ static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
+ 	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
++static const struct drm_display_mode mchp_ac69t88a_mode = {
++	.clock = 25000,
++	.hdisplay = 800,
++	.hsync_start = 800 + 88,
++	.hsync_end = 800 + 88 + 5,
++	.htotal = 800 + 88 + 5 + 40,
++	.vdisplay = 480,
++	.vsync_start = 480 + 23,
++	.vsync_end = 480 + 23 + 5,
++	.vtotal = 480 + 23 + 5 + 1,
++};
++
++static const struct panel_desc mchp_ac69t88a = {
++	.modes = &mchp_ac69t88a_mode,
++	.num_modes = 1,
++	.bpc = 8,
++	.size = {
++		.width = 108,
++		.height = 65,
++	},
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
++	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
++	.connector_type = DRM_MODE_CONNECTOR_LVDS,
++};
++
+ static const struct drm_display_mode arm_rtsm_mode[] = {
+ 	{
+ 		.clock = 65000,
+@@ -5048,6 +5073,9 @@ static const struct of_device_id platform_of_match[] = {
+ 	}, {
+ 		.compatible = "yes-optoelectronics,ytc700tlag-05-201c",
+ 		.data = &yes_optoelectronics_ytc700tlag_05_201c,
++	}, {
++		.compatible = "microchip,ac69t88a",
++		.data = &mchp_ac69t88a,
+ 	}, {
+ 		/* Must be the last entry */
+ 		.compatible = "panel-dpi",
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index 1b2d31c4d77caa..ac77d1246b9453 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -2104,7 +2104,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 				return -EINVAL;
+ 			}
+ 
+-			offset = radeon_get_ib_value(p, idx+1) << 8;
++			offset = (u64)radeon_get_ib_value(p, idx+1) << 8;
+ 			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+ 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+ 					  offset, track->vgt_strmout_bo_offset[idx_value]);
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index e97c6c60bc96ef..416590ea0dc3d6 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -803,6 +803,14 @@ int drm_sched_job_init(struct drm_sched_job *job,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * We don't know for sure how the user has allocated. Thus, zero the
++	 * struct so that unallowed (i.e., too early) usage of pointers that
++	 * this function does not set is guaranteed to lead to a NULL pointer
++	 * exception instead of UB.
++	 */
++	memset(job, 0, sizeof(*job));
++
+ 	job->entity = entity;
+ 	job->credits = credits;
+ 	job->s_fence = drm_sched_fence_alloc(entity, owner);
+diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
+index 7e5f14646625b4..06c1b81912f79f 100644
+--- a/drivers/gpu/drm/sti/sti_mixer.c
++++ b/drivers/gpu/drm/sti/sti_mixer.c
+@@ -137,7 +137,7 @@ static void mixer_dbg_crb(struct seq_file *s, int val)
+ 	}
+ }
+ 
+-static void mixer_dbg_mxn(struct seq_file *s, void *addr)
++static void mixer_dbg_mxn(struct seq_file *s, void __iomem *addr)
+ {
+ 	int i;
+ 
+diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
+index 00cd081d787327..6ee56cbd3f1bfc 100644
+--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
++++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
+@@ -254,9 +254,9 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
+ 		V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
+ 	}
+ 
++	V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
+ 	V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask);
+ 	V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask);
+-	V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
+ 
+ 	v3d->active_perfmon = perfmon;
+ }
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 2d7d3e90f3be44..7e0a5ea7ab859a 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -1924,7 +1924,7 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data)
+ 	}
+ 
+ 	if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
+-		ret = -ENODEV;
++		ret = -ENOTSUPP;
+ 		goto out_dev_exit;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
+index 863539e1f7e04b..c389e82463bfdb 100644
+--- a/drivers/gpu/drm/vc4/vc4_hvs.c
++++ b/drivers/gpu/drm/vc4/vc4_hvs.c
+@@ -963,6 +963,17 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+ 			      SCALER_DISPCTRL_SCLEIRQ);
+ 
+ 
++	/* Set AXI panic mode.
++	 * VC4 panics when < 2 lines in FIFO.
++	 * VC5 panics when less than 1 line in the FIFO.
++	 */
++	dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
++		      SCALER_DISPCTRL_PANIC1_MASK |
++		      SCALER_DISPCTRL_PANIC2_MASK);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
++
+ 	/* Set AXI panic mode.
+ 	 * VC4 panics when < 2 lines in FIFO.
+ 	 * VC5 panics when less than 1 line in the FIFO.
+diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+index 81b71903675e0d..7c78496e6213cc 100644
+--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+@@ -186,6 +186,7 @@
+ 
+ #define VDBOX_CGCTL3F10(base)			XE_REG((base) + 0x3f10)
+ #define   IECPUNIT_CLKGATE_DIS			REG_BIT(22)
++#define   RAMDFTUNIT_CLKGATE_DIS		REG_BIT(9)
+ 
+ #define VDBOX_CGCTL3F18(base)			XE_REG((base) + 0x3f18)
+ #define   ALNUNIT_CLKGATE_DIS			REG_BIT(13)
+diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+index bd604b9f08e4fa..5404de2aea5457 100644
+--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+@@ -286,6 +286,9 @@
+ #define   GAMTLBVEBOX0_CLKGATE_DIS		REG_BIT(16)
+ #define   LTCDD_CLKGATE_DIS			REG_BIT(10)
+ 
++#define UNSLCGCTL9454				XE_REG(0x9454)
++#define   LSCFE_CLKGATE_DIS			REG_BIT(4)
++
+ #define XEHP_SLICE_UNIT_LEVEL_CLKGATE		XE_REG_MCR(0x94d4)
+ #define   L3_CR2X_CLKGATE_DIS			REG_BIT(17)
+ #define   L3_CLKGATE_DIS			REG_BIT(16)
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
+index bdb76e834e4c36..5221ee3f12149b 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.c
++++ b/drivers/gpu/drm/xe/xe_devcoredump.c
+@@ -6,6 +6,7 @@
+ #include "xe_devcoredump.h"
+ #include "xe_devcoredump_types.h"
+ 
++#include <linux/ascii85.h>
+ #include <linux/devcoredump.h>
+ #include <generated/utsrelease.h>
+ 
+@@ -85,9 +86,9 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+ 
+ 	p = drm_coredump_printer(&iter);
+ 
+-	drm_printf(&p, "**** Xe Device Coredump ****\n");
+-	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+-	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
++	drm_puts(&p, "**** Xe Device Coredump ****\n");
++	drm_puts(&p, "kernel: " UTS_RELEASE "\n");
++	drm_puts(&p, "module: " KBUILD_MODNAME "\n");
+ 
+ 	ts = ktime_to_timespec64(ss->snapshot_time);
+ 	drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
+@@ -96,20 +97,25 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+ 	drm_printf(&p, "Process: %s\n", ss->process_name);
+ 	xe_device_snapshot_print(xe, &p);
+ 
+-	drm_printf(&p, "\n**** GuC CT ****\n");
+-	xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
+-	xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
++	drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id);
++	drm_printf(&p, "\tTile: %d\n", ss->gt->tile->id);
+ 
+-	drm_printf(&p, "\n**** Job ****\n");
+-	xe_sched_job_snapshot_print(coredump->snapshot.job, &p);
++	drm_puts(&p, "\n**** GuC CT ****\n");
++	xe_guc_ct_snapshot_print(ss->ct, &p);
+ 
+-	drm_printf(&p, "\n**** HW Engines ****\n");
++	drm_puts(&p, "\n**** Contexts ****\n");
++	xe_guc_exec_queue_snapshot_print(ss->ge, &p);
++
++	drm_puts(&p, "\n**** Job ****\n");
++	xe_sched_job_snapshot_print(ss->job, &p);
++
++	drm_puts(&p, "\n**** HW Engines ****\n");
+ 	for (i = 0; i < XE_NUM_HW_ENGINES; i++)
+-		if (coredump->snapshot.hwe[i])
+-			xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
+-						    &p);
+-	drm_printf(&p, "\n**** VM state ****\n");
+-	xe_vm_snapshot_print(coredump->snapshot.vm, &p);
++		if (ss->hwe[i])
++			xe_hw_engine_snapshot_print(ss->hwe[i], &p);
++
++	drm_puts(&p, "\n**** VM state ****\n");
++	xe_vm_snapshot_print(ss->vm, &p);
+ 
+ 	return count - iter.remain;
+ }
+@@ -141,13 +147,15 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+ {
+ 	struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+ 	struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
++	unsigned int fw_ref;
+ 
+ 	/* keep going if fw fails as we still want to save the memory and SW data */
+-	if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
++	fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
++	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ 		xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ 	xe_vm_snapshot_capture_delayed(ss->vm);
+ 	xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
+-	xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
++	xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
+ 
+ 	/* Calculate devcoredump size */
+ 	ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
+@@ -220,8 +228,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ 	u32 width_mask = (0x1 << q->width) - 1;
+ 	const char *process_name = "no process";
+ 
+-	int i;
++	unsigned int fw_ref;
+ 	bool cookie;
++	int i;
+ 
+ 	ss->snapshot_time = ktime_get_real();
+ 	ss->boot_time = ktime_get_boottime();
+@@ -244,26 +253,25 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ 	}
+ 
+ 	/* keep going if fw fails as we still want to save the memory and SW data */
+-	if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL))
+-		xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
++	fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ 
+-	coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
+-	coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
+-	coredump->snapshot.job = xe_sched_job_snapshot_capture(job);
+-	coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm);
++	ss->ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
++	ss->ge = xe_guc_exec_queue_snapshot_capture(q);
++	ss->job = xe_sched_job_snapshot_capture(job);
++	ss->vm = xe_vm_snapshot_capture(q->vm);
+ 
+ 	for_each_hw_engine(hwe, q->gt, id) {
+ 		if (hwe->class != q->hwe->class ||
+ 		    !(BIT(hwe->logical_instance) & adj_logical_mask)) {
+-			coredump->snapshot.hwe[id] = NULL;
++			ss->hwe[id] = NULL;
+ 			continue;
+ 		}
+-		coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
++		ss->hwe[id] = xe_hw_engine_snapshot_capture(hwe);
+ 	}
+ 
+ 	queue_work(system_unbound_wq, &ss->work);
+ 
+-	xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
++	xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
+ 	dma_fence_end_signalling(cookie);
+ }
+ 
+@@ -310,3 +318,89 @@ int xe_devcoredump_init(struct xe_device *xe)
+ }
+ 
+ #endif
++
++/**
++ * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
++ *
++ * The output is split to multiple lines because some print targets, e.g. dmesg
++ * cannot handle arbitrarily long lines. Note also that printing to dmesg in
++ * piece-meal fashion is not possible, each separate call to drm_puts() has a
++ * line-feed automatically added! Therefore, the entire output line must be
++ * constructed in a local buffer first, then printed in one atomic output call.
++ *
++ * There is also a scheduler yield call to prevent the 'task has been stuck for
++ * 120s' kernel hang check feature from firing when printing to a slow target
++ * such as dmesg over a serial port.
++ *
++ * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
++ *
++ * @p: the printer object to output to
++ * @prefix: optional prefix to add to output string
++ * @blob: the Binary Large OBject to dump out
++ * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
++ * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
++ */
++void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
++			   const void *blob, size_t offset, size_t size)
++{
++	const u32 *blob32 = (const u32 *)blob;
++	char buff[ASCII85_BUFSZ], *line_buff;
++	size_t line_pos = 0;
++
++#define DMESG_MAX_LINE_LEN	800
++#define MIN_SPACE		(ASCII85_BUFSZ + 2)		/* 85 + "\n\0" */
++
++	if (size & 3)
++		drm_printf(p, "Size not word aligned: %zu", size);
++	if (offset & 3)
++		drm_printf(p, "Offset not word aligned: %zu", size);
++
++	line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
++	if (IS_ERR_OR_NULL(line_buff)) {
++		drm_printf(p, "Failed to allocate line buffer: %pe", line_buff);
++		return;
++	}
++
++	blob32 += offset / sizeof(*blob32);
++	size /= sizeof(*blob32);
++
++	if (prefix) {
++		strscpy(line_buff, prefix, DMESG_MAX_LINE_LEN - MIN_SPACE - 2);
++		line_pos = strlen(line_buff);
++
++		line_buff[line_pos++] = ':';
++		line_buff[line_pos++] = ' ';
++	}
++
++	while (size--) {
++		u32 val = *(blob32++);
++
++		strscpy(line_buff + line_pos, ascii85_encode(val, buff),
++			DMESG_MAX_LINE_LEN - line_pos);
++		line_pos += strlen(line_buff + line_pos);
++
++		if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
++			line_buff[line_pos++] = '\n';
++			line_buff[line_pos++] = 0;
++
++			drm_puts(p, line_buff);
++
++			line_pos = 0;
++
++			/* Prevent 'stuck thread' time out errors */
++			cond_resched();
++		}
++	}
++
++	if (line_pos) {
++		line_buff[line_pos++] = '\n';
++		line_buff[line_pos++] = 0;
++
++		drm_puts(p, line_buff);
++	}
++
++	kfree(line_buff);
++
++#undef MIN_SPACE
++#undef DMESG_MAX_LINE_LEN
++}
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
+index e2fa65ce093226..a4eebc285fc837 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.h
++++ b/drivers/gpu/drm/xe/xe_devcoredump.h
+@@ -6,6 +6,9 @@
+ #ifndef _XE_DEVCOREDUMP_H_
+ #define _XE_DEVCOREDUMP_H_
+ 
++#include <linux/types.h>
++
++struct drm_printer;
+ struct xe_device;
+ struct xe_sched_job;
+ 
+@@ -23,4 +26,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
+ }
+ #endif
+ 
++void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
++			   const void *blob, size_t offset, size_t size);
++
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
+index 440d05d77a5af8..3cc2f095fdfbd1 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
++++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
+@@ -37,7 +37,8 @@ struct xe_devcoredump_snapshot {
+ 	/* GuC snapshots */
+ 	/** @ct: GuC CT snapshot */
+ 	struct xe_guc_ct_snapshot *ct;
+-	/** @ge: Guc Engine snapshot */
++
++	/** @ge: GuC Submission Engine snapshot */
+ 	struct xe_guc_submit_exec_queue_snapshot *ge;
+ 
+ 	/** @hwe: HW Engine snapshot array */
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index a1987b554a8d2a..bb85208cf1a94c 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -919,6 +919,7 @@ void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
+ 
+ 	for_each_gt(gt, xe, id) {
+ 		drm_printf(p, "GT id: %u\n", id);
++		drm_printf(p, "\tTile: %u\n", gt->tile->id);
+ 		drm_printf(p, "\tType: %s\n",
+ 			   gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
+ 		drm_printf(p, "\tIP ver: %u.%u.%u\n",
+diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h
+index a2577672f4e3e6..1608a55edc846e 100644
+--- a/drivers/gpu/drm/xe/xe_force_wake.h
++++ b/drivers/gpu/drm/xe/xe_force_wake.h
+@@ -46,4 +46,20 @@ xe_force_wake_assert_held(struct xe_force_wake *fw,
+ 	xe_gt_assert(fw->gt, fw->awake_domains & domain);
+ }
+ 
++/**
++ * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref
++ * @fw_ref : the force_wake reference
++ * @domain : forcewake domain to verify
++ *
++ * This function confirms whether the @fw_ref includes a reference to the
++ * specified @domain.
++ *
++ * Return: true if domain is refcounted.
++ */
++static inline bool
++xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain)
++{
++	return fw_ref & domain;
++}
++
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
+index 0662f71c6ede78..3e113422b88de2 100644
+--- a/drivers/gpu/drm/xe/xe_gt_topology.c
++++ b/drivers/gpu/drm/xe/xe_gt_topology.c
+@@ -5,6 +5,7 @@
+ 
+ #include "xe_gt_topology.h"
+ 
++#include <generated/xe_wa_oob.h>
+ #include <linux/bitmap.h>
+ #include <linux/compiler.h>
+ 
+@@ -12,6 +13,7 @@
+ #include "xe_assert.h"
+ #include "xe_gt.h"
+ #include "xe_mmio.h"
++#include "xe_wa.h"
+ 
+ static void
+ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
+@@ -129,6 +131,18 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
+ 	struct xe_device *xe = gt_to_xe(gt);
+ 	u32 fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3);
+ 
++	/*
++	 * PTL platforms with media version 30.00 do not provide proper values
++	 * for the media GT's L3 bank registers.  Skip the readout since we
++	 * don't have any way to obtain real values.
++	 *
++	 * This may get re-described as an official workaround in the future,
++	 * but there's no tracking number assigned yet so we use a custom
++	 * OOB workaround descriptor.
++	 */
++	if (XE_WA(gt, no_media_l3))
++		return;
++
+ 	if (GRAPHICS_VER(xe) >= 20) {
+ 		xe_l3_bank_mask_t per_node = {};
+ 		u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
+index 9c505d3517cd1a..cd6a5f09d631e4 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct.c
++++ b/drivers/gpu/drm/xe/xe_guc_ct.c
+@@ -906,6 +906,24 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ 		}
+ 	}
+ 
++	/*
++	 * Occasionally it is seen that the G2H worker starts running after a delay of more than
++	 * a second even after being queued and activated by the Linux workqueue subsystem. This
++	 * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
++	 * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
++	 * and this is beyond xe kmd.
++	 *
++	 * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
++	 */
++	if (!ret) {
++		flush_work(&ct->g2h_worker);
++		if (g2h_fence.done) {
++			xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
++				   g2h_fence.seqno, action[0]);
++			ret = 1;
++		}
++	}
++
+ 	/*
+ 	 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
+ 	 * the stack, since we have no clue if it will fire after the timeout before we can erase
+diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
+index a37ee341942844..be47780ec2a7e7 100644
+--- a/drivers/gpu/drm/xe/xe_guc_log.c
++++ b/drivers/gpu/drm/xe/xe_guc_log.c
+@@ -6,9 +6,12 @@
+ #include "xe_guc_log.h"
+ 
+ #include <drm/drm_managed.h>
++#include <linux/vmalloc.h>
+ 
+ #include "xe_bo.h"
++#include "xe_devcoredump.h"
+ #include "xe_gt.h"
++#include "xe_gt_printk.h"
+ #include "xe_map.h"
+ #include "xe_module.h"
+ 
+@@ -49,32 +52,35 @@ static size_t guc_log_size(void)
+ 		CAPTURE_BUFFER_SIZE;
+ }
+ 
++/**
++ * xe_guc_log_print - dump a copy of the GuC log to some useful location
++ * @log: GuC log structure
++ * @p: the printer object to output to
++ */
+ void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
+ {
+ 	struct xe_device *xe = log_to_xe(log);
+ 	size_t size;
+-	int i, j;
++	void *copy;
+ 
+-	xe_assert(xe, log->bo);
++	if (!log->bo) {
++		drm_puts(p, "GuC log buffer not allocated");
++		return;
++	}
+ 
+ 	size = log->bo->size;
+ 
+-#define DW_PER_READ		128
+-	xe_assert(xe, !(size % (DW_PER_READ * sizeof(u32))));
+-	for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) {
+-		u32 read[DW_PER_READ];
+-
+-		xe_map_memcpy_from(xe, read, &log->bo->vmap, i * sizeof(u32),
+-				   DW_PER_READ * sizeof(u32));
+-#define DW_PER_PRINT		4
+-		for (j = 0; j < DW_PER_READ / DW_PER_PRINT; ++j) {
+-			u32 *print = read + j * DW_PER_PRINT;
+-
+-			drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+-				   *(print + 0), *(print + 1),
+-				   *(print + 2), *(print + 3));
+-		}
++	copy = vmalloc(size);
++	if (!copy) {
++		drm_printf(p, "Failed to allocate %zu", size);
++		return;
+ 	}
++
++	xe_map_memcpy_from(xe, copy, &log->bo->vmap, 0, size);
++
++	xe_print_blob_ascii85(p, "Log data", copy, 0, size);
++
++	vfree(copy);
+ }
+ 
+ int xe_guc_log_init(struct xe_guc_log *log)
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index 2927745d689549..fed23304e4da58 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -2193,7 +2193,7 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
+ 	if (!snapshot)
+ 		return;
+ 
+-	drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
++	drm_printf(p, "GuC ID: %d\n", snapshot->guc.id);
+ 	drm_printf(p, "\tName: %s\n", snapshot->name);
+ 	drm_printf(p, "\tClass: %d\n", snapshot->class);
+ 	drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
+diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
+index c9c3beb3ce8d06..547919e8ce9e45 100644
+--- a/drivers/gpu/drm/xe/xe_hw_engine.c
++++ b/drivers/gpu/drm/xe/xe_hw_engine.c
+@@ -1053,7 +1053,6 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
+ 	if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
+ 		drm_printf(p, "\tRCU_MODE: 0x%08x\n",
+ 			   snapshot->reg.rcu_mode);
+-	drm_puts(p, "\n");
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
+index 5e962e72c97ea6..025d649434673d 100644
+--- a/drivers/gpu/drm/xe/xe_pci.c
++++ b/drivers/gpu/drm/xe/xe_pci.c
+@@ -383,10 +383,12 @@ static const struct pci_device_id pciidlist[] = {
+ 	XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
+ 	XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ 	XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
++	XE_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ 	XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ 	XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
+ 	XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
+ 	XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
++	XE_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
+ 	XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
+ 	XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
+ 	XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
+diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
+index 848da8e68c7a83..1c96375bd7df75 100644
+--- a/drivers/gpu/drm/xe/xe_query.c
++++ b/drivers/gpu/drm/xe/xe_query.c
+@@ -9,6 +9,7 @@
+ #include <linux/sched/clock.h>
+ 
+ #include <drm/ttm/ttm_placement.h>
++#include <generated/xe_wa_oob.h>
+ #include <uapi/drm/xe_drm.h>
+ 
+ #include "regs/xe_engine_regs.h"
+@@ -23,6 +24,7 @@
+ #include "xe_macros.h"
+ #include "xe_mmio.h"
+ #include "xe_ttm_vram_mgr.h"
++#include "xe_wa.h"
+ 
+ static const u16 xe_to_user_engine_class[] = {
+ 	[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
+@@ -458,12 +460,23 @@ static int query_hwconfig(struct xe_device *xe,
+ 
+ static size_t calc_topo_query_size(struct xe_device *xe)
+ {
+-	return xe->info.gt_count *
+-		(4 * sizeof(struct drm_xe_query_topology_mask) +
+-		 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
+-		 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
+-		 sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask) +
+-		 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
++	struct xe_gt *gt;
++	size_t query_size = 0;
++	int id;
++
++	for_each_gt(gt, xe, id) {
++		query_size += 3 * sizeof(struct drm_xe_query_topology_mask) +
++			sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
++			sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
++			sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
++
++		/* L3bank mask may not be available for some GTs */
++		if (!XE_WA(gt, no_media_l3))
++			query_size += sizeof(struct drm_xe_query_topology_mask) +
++				sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
++	}
++
++	return query_size;
+ }
+ 
+ static int copy_mask(void __user **ptr,
+@@ -516,11 +529,18 @@ static int query_gt_topology(struct xe_device *xe,
+ 		if (err)
+ 			return err;
+ 
+-		topo.type = DRM_XE_TOPO_L3_BANK;
+-		err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
+-				sizeof(gt->fuse_topo.l3_bank_mask));
+-		if (err)
+-			return err;
++		/*
++		 * If the kernel doesn't have a way to obtain a correct L3bank
++		 * mask, then it's better to omit L3 from the query rather than
++		 * reporting bogus or zeroed information to userspace.
++		 */
++		if (!XE_WA(gt, no_media_l3)) {
++			topo.type = DRM_XE_TOPO_L3_BANK;
++			err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
++					sizeof(gt->fuse_topo.l3_bank_mask));
++			if (err)
++				return err;
++		}
+ 
+ 		topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
+ 			DRM_XE_TOPO_SIMD16_EU_PER_DSS :
+diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
+index 353936a0f877de..37e592b2bf062a 100644
+--- a/drivers/gpu/drm/xe/xe_wa.c
++++ b/drivers/gpu/drm/xe/xe_wa.c
+@@ -251,6 +251,34 @@ static const struct xe_rtp_entry_sr gt_was[] = {
+ 	  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ 	},
+ 
++	/* Xe3_LPG */
++
++	{ XE_RTP_NAME("14021871409"),
++	  XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)),
++	  XE_RTP_ACTIONS(SET(UNSLCGCTL9454, LSCFE_CLKGATE_DIS))
++	},
++
++	/* Xe3_LPM */
++
++	{ XE_RTP_NAME("16021867713"),
++	  XE_RTP_RULES(MEDIA_VERSION(3000),
++		       ENGINE_CLASS(VIDEO_DECODE)),
++	  XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
++	  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
++	},
++	{ XE_RTP_NAME("16021865536"),
++	  XE_RTP_RULES(MEDIA_VERSION(3000),
++		       ENGINE_CLASS(VIDEO_DECODE)),
++	  XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
++	  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
++	},
++	{ XE_RTP_NAME("14021486841"),
++	  XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0),
++		       ENGINE_CLASS(VIDEO_DECODE)),
++	  XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)),
++	  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
++	},
++
+ 	{}
+ };
+ 
+@@ -567,6 +595,13 @@ static const struct xe_rtp_entry_sr engine_was[] = {
+ 			     XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ 	},
+ 
++	/* Xe3_LPG */
++
++	{ XE_RTP_NAME("14021402888"),
++	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), FUNC(xe_rtp_match_first_render_or_compute)),
++	  XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
++	},
++
+ 	{}
+ };
+ 
+@@ -742,6 +777,18 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
+ 	  XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ 	},
+ 
++	/* Xe3_LPG */
++	{ XE_RTP_NAME("14021490052"),
++	  XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
++		       ENGINE_CLASS(RENDER)),
++	  XE_RTP_ACTIONS(SET(FF_MODE,
++			     DIS_MESH_PARTIAL_AUTOSTRIP |
++			     DIS_MESH_AUTOSTRIP),
++			 SET(VFLSKPD,
++			     DIS_PARTIAL_AUTOSTRIP |
++			     DIS_AUTOSTRIP))
++	},
++
+ 	{}
+ };
+ 
+diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
+index 920ca506014661..264d6e116499ce 100644
+--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
++++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
+@@ -33,7 +33,9 @@
+ 		GRAPHICS_VERSION(2004)
+ 22019338487	MEDIA_VERSION(2000)
+ 		GRAPHICS_VERSION(2001)
++		MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
+ 22019338487_display	PLATFORM(LUNARLAKE)
+ 16023588340	GRAPHICS_VERSION(2001)
+ 14019789679	GRAPHICS_VERSION(1255)
+ 		GRAPHICS_VERSION_RANGE(1270, 2004)
++no_media_l3	MEDIA_VERSION(3000)
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 582fd234eec789..935ccc38d12958 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2674,9 +2674,10 @@ static bool hid_check_device_match(struct hid_device *hdev,
+ 	/*
+ 	 * hid-generic implements .match(), so we must be dealing with a
+ 	 * different HID driver here, and can simply check if
+-	 * hid_ignore_special_drivers is set or not.
++	 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
++	 * are set or not.
+ 	 */
+-	return !hid_ignore_special_drivers;
++	return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
+ }
+ 
+ static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
+diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
+index d2439399fb357a..9e04c6d0fcc874 100644
+--- a/drivers/hid/hid-generic.c
++++ b/drivers/hid/hid-generic.c
+@@ -40,6 +40,9 @@ static bool hid_generic_match(struct hid_device *hdev,
+ 	if (ignore_special_driver)
+ 		return true;
+ 
++	if (hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER)
++		return true;
++
+ 	if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)
+ 		return false;
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 92cff3f2658cf5..0f23be98c56e22 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -94,6 +94,7 @@
+ #define USB_DEVICE_ID_APPLE_MAGICMOUSE2	0x0269
+ #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD	0x030e
+ #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2	0x0265
++#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC	0x0324
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI	0x020e
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO	0x020f
+ #define USB_DEVICE_ID_APPLE_GEYSER_ANSI	0x0214
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index 8a73b59e0827b9..ec110dea87726d 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -227,7 +227,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
+ 		touch_minor = tdata[4];
+ 		state = tdata[7] & TOUCH_STATE_MASK;
+ 		down = state != TOUCH_STATE_NONE;
+-	} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
++	} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++		   input->id.product ==
++			   USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+ 		id = tdata[8] & 0xf;
+ 		x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
+ 		y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19);
+@@ -259,8 +261,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
+ 	/* If requested, emulate a scroll wheel by detecting small
+ 	 * vertical touch motions.
+ 	 */
+-	if (emulate_scroll_wheel && (input->id.product !=
+-			USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) {
++	if (emulate_scroll_wheel &&
++	    input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
++	    input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+ 		unsigned long now = jiffies;
+ 		int step_x = msc->touches[id].scroll_x - x;
+ 		int step_y = msc->touches[id].scroll_y - y;
+@@ -359,7 +362,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
+ 		input_report_abs(input, ABS_MT_POSITION_X, x);
+ 		input_report_abs(input, ABS_MT_POSITION_Y, y);
+ 
+-		if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
++		if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++		    input->id.product ==
++			    USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)
+ 			input_report_abs(input, ABS_MT_PRESSURE, pressure);
+ 
+ 		if (report_undeciphered) {
+@@ -367,7 +372,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
+ 			    input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
+ 				input_event(input, EV_MSC, MSC_RAW, tdata[7]);
+ 			else if (input->id.product !=
+-					USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
++					 USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
++				 input->id.product !=
++					 USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)
+ 				input_event(input, EV_MSC, MSC_RAW, tdata[8]);
+ 		}
+ 	}
+@@ -493,7 +500,9 @@ static int magicmouse_raw_event(struct hid_device *hdev,
+ 		magicmouse_emit_buttons(msc, clicks & 3);
+ 		input_report_rel(input, REL_X, x);
+ 		input_report_rel(input, REL_Y, y);
+-	} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
++	} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++		   input->id.product ==
++			   USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+ 		input_mt_sync_frame(input);
+ 		input_report_key(input, BTN_MOUSE, clicks & 1);
+ 	} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+@@ -545,7 +554,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
+ 			__set_bit(REL_WHEEL_HI_RES, input->relbit);
+ 			__set_bit(REL_HWHEEL_HI_RES, input->relbit);
+ 		}
+-	} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
++	} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++		   input->id.product ==
++			   USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+ 		/* If the trackpad has been connected to a Mac, the name is
+ 		 * automatically personalized, e.g., "José Expósito's Trackpad".
+ 		 * When connected through Bluetooth, the personalized name is
+@@ -621,7 +632,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
+ 				  MOUSE_RES_X);
+ 		input_abs_set_res(input, ABS_MT_POSITION_Y,
+ 				  MOUSE_RES_Y);
+-	} else if (input->id.product ==  USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
++	} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++		   input->id.product ==
++			   USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+ 		input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0);
+ 		input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0);
+ 		input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0);
+@@ -660,7 +673,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
+ 	input_set_events_per_packet(input, 60);
+ 
+ 	if (report_undeciphered &&
+-	    input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
++	    input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
++	    input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+ 		__set_bit(EV_MSC, input->evbit);
+ 		__set_bit(MSC_RAW, input->mscbit);
+ 	}
+@@ -685,7 +699,9 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
+ 
+ 	/* Magic Trackpad does not give relative data after switching to MT */
+ 	if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD ||
+-	     hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
++	     hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++	     hi->input->id.product ==
++		     USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
+ 	    field->flags & HID_MAIN_ITEM_RELATIVE)
+ 		return -1;
+ 
+@@ -721,7 +737,8 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev)
+ 	int ret;
+ 	int feature_size;
+ 
+-	if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
++	if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++	    hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+ 		if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+ 			feature_size = sizeof(feature_mt_trackpad2_bt);
+ 			feature = feature_mt_trackpad2_bt;
+@@ -766,7 +783,8 @@ static int magicmouse_fetch_battery(struct hid_device *hdev)
+ 
+ 	if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
+ 	    (hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+-	     hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2))
++	     hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
++	     hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
+ 		return -1;
+ 
+ 	report_enum = &hdev->report_enum[hdev->battery_report_type];
+@@ -835,7 +853,9 @@ static int magicmouse_probe(struct hid_device *hdev,
+ 
+ 	if (id->vendor == USB_VENDOR_ID_APPLE &&
+ 	    (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+-	     (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && hdev->type != HID_TYPE_USBMOUSE)))
++	     ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++	       id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
++	      hdev->type != HID_TYPE_USBMOUSE)))
+ 		return 0;
+ 
+ 	if (!msc->input) {
+@@ -850,7 +870,8 @@ static int magicmouse_probe(struct hid_device *hdev,
+ 	else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
+ 		report = hid_register_report(hdev, HID_INPUT_REPORT,
+ 			MOUSE2_REPORT_ID, 0);
+-	else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
++	else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++		 id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+ 		if (id->vendor == BT_VENDOR_ID_APPLE)
+ 			report = hid_register_report(hdev, HID_INPUT_REPORT,
+ 				TRACKPAD2_BT_REPORT_ID, 0);
+@@ -920,7 +941,8 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	 */
+ 	if (hdev->vendor == USB_VENDOR_ID_APPLE &&
+ 	    (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+-	     hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
++	     hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
++	     hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
+ 	    *rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
+ 		hid_info(hdev,
+ 			 "fixing up magicmouse battery report descriptor\n");
+@@ -951,6 +973,10 @@ static const struct hid_device_id magic_mice[] = {
+ 		USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ 		USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
++	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
++		USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
++		USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, magic_mice);
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 43664a24176fca..4e87380d3edd6b 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -414,7 +414,19 @@ static int i2c_hid_set_power(struct i2c_hid *ihid, int power_state)
+ 
+ 	i2c_hid_dbg(ihid, "%s\n", __func__);
+ 
++	/*
++	 * Some STM-based devices need 400µs after a rising clock edge to wake
++	 * from deep sleep, in which case the first request will fail due to
++	 * the address not being acknowledged. Try after a short sleep to see
++	 * if the device came alive on the bus. Certain Weida Tech devices also
++	 * need this.
++	 */
+ 	ret = i2c_hid_set_power_command(ihid, power_state);
++	if (ret && power_state == I2C_HID_PWR_ON) {
++		usleep_range(400, 500);
++		ret = i2c_hid_set_power_command(ihid, I2C_HID_PWR_ON);
++	}
++
+ 	if (ret)
+ 		dev_err(&ihid->client->dev,
+ 			"failed to change power setting.\n");
+@@ -976,14 +988,6 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid)
+ 
+ 	enable_irq(client->irq);
+ 
+-	/* Make sure the device is awake on the bus */
+-	ret = i2c_hid_probe_address(ihid);
+-	if (ret < 0) {
+-		dev_err(&client->dev, "nothing at address after resume: %d\n",
+-			ret);
+-		return -ENXIO;
+-	}
+-
+ 	/* On Goodix 27c6:0d42 wait extra time before device wakeup.
+ 	 * It's not clear why but if we send wakeup too early, the device will
+ 	 * never trigger input interrupts.
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 2bc45b24075c3f..9843b52bd017a0 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2241,7 +2241,8 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
+ 		if (hid_is_usb(wacom->hdev)) {
+ 			struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
+ 			struct usb_device *dev = interface_to_usbdev(intf);
+-			product_name = dev->product;
++			if (dev->product != NULL)
++				product_name = dev->product;
+ 		}
+ 
+ 		if (wacom->hdev->bus == BUS_I2C) {
+diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
+index 096f1daa8f2bcf..1218a3b449a801 100644
+--- a/drivers/hwmon/nct6775-platform.c
++++ b/drivers/hwmon/nct6775-platform.c
+@@ -1350,6 +1350,8 @@ static const char * const asus_msi_boards[] = {
+ 	"Pro H610M-CT D4",
+ 	"Pro H610T D4",
+ 	"Pro Q670M-C",
++	"Pro WS 600M-CL",
++	"Pro WS 665-ACE",
+ 	"Pro WS W680-ACE",
+ 	"Pro WS W680-ACE IPMI",
+ 	"Pro WS W790-ACE",
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 6b3ba7e5723aa1..2254abda5c46c9 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -160,6 +160,7 @@ config I2C_I801
+ 	    Meteor Lake (SOC and PCH)
+ 	    Birch Stream (SOC)
+ 	    Arrow Lake (SOC)
++	    Panther Lake (SOC)
+ 
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 299fe9d3afab0a..75dab01d43a750 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -81,6 +81,8 @@
+  * Meteor Lake PCH-S (PCH)	0x7f23	32	hard	yes	yes	yes
+  * Birch Stream (SOC)		0x5796	32	hard	yes	yes	yes
+  * Arrow Lake-H (SOC)		0x7722	32	hard	yes	yes	yes
++ * Panther Lake-H (SOC)		0xe322	32	hard	yes	yes	yes
++ * Panther Lake-P (SOC)		0xe422	32	hard	yes	yes	yes
+  *
+  * Features supported by this driver:
+  * Software PEC				no
+@@ -261,6 +263,8 @@
+ #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS		0xa323
+ #define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS		0xa3a3
+ #define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS	0xae22
++#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_H_SMBUS	0xe322
++#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_P_SMBUS	0xe422
+ 
+ struct i801_mux_config {
+ 	char *gpio_chip;
+@@ -1055,6 +1059,8 @@ static const struct pci_device_id i801_ids[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS,	FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
++	{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
++	{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS,		FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ 	{ 0, }
+ };
+ 
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index da83c49223b33e..42310c9a00c2d1 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -282,7 +282,8 @@ static int i3c_device_uevent(const struct device *dev, struct kobj_uevent_env *e
+ 	struct i3c_device_info devinfo;
+ 	u16 manuf, part, ext;
+ 
+-	i3c_device_get_info(i3cdev, &devinfo);
++	if (i3cdev->desc)
++		devinfo = i3cdev->desc->info;
+ 	manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ 	part = I3C_PID_PART_ID(devinfo.pid);
+ 	ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+@@ -345,10 +346,10 @@ const struct bus_type i3c_bus_type = {
+ EXPORT_SYMBOL_GPL(i3c_bus_type);
+ 
+ static enum i3c_addr_slot_status
+-i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
++i3c_bus_get_addr_slot_status_mask(struct i3c_bus *bus, u16 addr, u32 mask)
+ {
+ 	unsigned long status;
+-	int bitpos = addr * 2;
++	int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS;
+ 
+ 	if (addr > I2C_MAX_ADDR)
+ 		return I3C_ADDR_SLOT_RSVD;
+@@ -356,22 +357,33 @@ i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+ 	status = bus->addrslots[bitpos / BITS_PER_LONG];
+ 	status >>= bitpos % BITS_PER_LONG;
+ 
+-	return status & I3C_ADDR_SLOT_STATUS_MASK;
++	return status & mask;
+ }
+ 
+-static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
+-					 enum i3c_addr_slot_status status)
++static enum i3c_addr_slot_status
++i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
++{
++	return i3c_bus_get_addr_slot_status_mask(bus, addr, I3C_ADDR_SLOT_STATUS_MASK);
++}
++
++static void i3c_bus_set_addr_slot_status_mask(struct i3c_bus *bus, u16 addr,
++					      enum i3c_addr_slot_status status, u32 mask)
+ {
+-	int bitpos = addr * 2;
++	int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS;
+ 	unsigned long *ptr;
+ 
+ 	if (addr > I2C_MAX_ADDR)
+ 		return;
+ 
+ 	ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
+-	*ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
+-						(bitpos % BITS_PER_LONG));
+-	*ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
++	*ptr &= ~((unsigned long)mask << (bitpos % BITS_PER_LONG));
++	*ptr |= ((unsigned long)status & mask) << (bitpos % BITS_PER_LONG);
++}
++
++static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
++					 enum i3c_addr_slot_status status)
++{
++	i3c_bus_set_addr_slot_status_mask(bus, addr, status, I3C_ADDR_SLOT_STATUS_MASK);
+ }
+ 
+ static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
+@@ -383,13 +395,44 @@ static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
+ 	return status == I3C_ADDR_SLOT_FREE;
+ }
+ 
++/*
++ * ┌────┬─────────────┬───┬─────────┬───┐
++ * │S/Sr│ 7'h7E RnW=0 │ACK│ ENTDAA  │ T ├────┐
++ * └────┴─────────────┴───┴─────────┴───┘    │
++ * ┌─────────────────────────────────────────┘
++ * │  ┌──┬─────────────┬───┬─────────────────┬────────────────┬───┬─────────┐
++ * └─►│Sr│7'h7E RnW=1  │ACK│48bit UID BCR DCR│Assign 7bit Addr│PAR│ ACK/NACK│
++ *    └──┴─────────────┴───┴─────────────────┴────────────────┴───┴─────────┘
++ * Some master controllers (such as HCI) need to prepare the entire above transaction before
++ * sending it out to the I3C bus. This means that a 7-bit dynamic address needs to be allocated
++ * before knowing the target device's UID information.
++ *
++ * However, some I3C targets may request specific addresses (called as "init_dyn_addr"), which is
++ * typically specified by the DT-'s assigned-address property. Lower addresses having higher IBI
++ * priority. If it is available, i3c_bus_get_free_addr() preferably return a free address that is
++ * not in the list of desired addresses (called as "init_dyn_addr"). This allows the device with
++ * the "init_dyn_addr" to switch to its "init_dyn_addr" when it hot-joins the I3C bus. Otherwise,
++ * if the "init_dyn_addr" is already in use by another I3C device, the target device will not be
++ * able to switch to its desired address.
++ *
++ * If the previous step fails, fallback returning one of the remaining unassigned address,
++ * regardless of its state in the desired list.
++ */
+ static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
+ {
+ 	enum i3c_addr_slot_status status;
+ 	u8 addr;
+ 
+ 	for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
+-		status = i3c_bus_get_addr_slot_status(bus, addr);
++		status = i3c_bus_get_addr_slot_status_mask(bus, addr,
++							   I3C_ADDR_SLOT_EXT_STATUS_MASK);
++		if (status == I3C_ADDR_SLOT_FREE)
++			return addr;
++	}
++
++	for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
++		status = i3c_bus_get_addr_slot_status_mask(bus, addr,
++							   I3C_ADDR_SLOT_STATUS_MASK);
+ 		if (status == I3C_ADDR_SLOT_FREE)
+ 			return addr;
+ 	}
+@@ -1506,16 +1549,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ 				       u8 old_dyn_addr)
+ {
+ 	struct i3c_master_controller *master = i3c_dev_get_master(dev);
+-	enum i3c_addr_slot_status status;
+ 	int ret;
+ 
+-	if (dev->info.dyn_addr != old_dyn_addr &&
+-	    (!dev->boardinfo ||
+-	     dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) {
+-		status = i3c_bus_get_addr_slot_status(&master->bus,
+-						      dev->info.dyn_addr);
+-		if (status != I3C_ADDR_SLOT_FREE)
+-			return -EBUSY;
++	if (dev->info.dyn_addr != old_dyn_addr) {
+ 		i3c_bus_set_addr_slot_status(&master->bus,
+ 					     dev->info.dyn_addr,
+ 					     I3C_ADDR_SLOT_I3C_DEV);
+@@ -1918,9 +1954,11 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
+ 			goto err_rstdaa;
+ 		}
+ 
+-		i3c_bus_set_addr_slot_status(&master->bus,
+-					     i3cboardinfo->init_dyn_addr,
+-					     I3C_ADDR_SLOT_I3C_DEV);
++		/* Do not mark as occupied until real device exist in bus */
++		i3c_bus_set_addr_slot_status_mask(&master->bus,
++						  i3cboardinfo->init_dyn_addr,
++						  I3C_ADDR_SLOT_EXT_DESIRED,
++						  I3C_ADDR_SLOT_EXT_STATUS_MASK);
+ 
+ 		/*
+ 		 * Only try to create/attach devices that have a static
+@@ -2088,7 +2126,8 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ 	else
+ 		expected_dyn_addr = newdev->info.dyn_addr;
+ 
+-	if (newdev->info.dyn_addr != expected_dyn_addr) {
++	if (newdev->info.dyn_addr != expected_dyn_addr &&
++	    i3c_bus_get_addr_slot_status(&master->bus, expected_dyn_addr) == I3C_ADDR_SLOT_FREE) {
+ 		/*
+ 		 * Try to apply the expected dynamic address. If it fails, keep
+ 		 * the address assigned by the master.
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index a918e96b21fddc..13adc584009429 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -159,10 +159,10 @@ static void hci_dma_cleanup(struct i3c_hci *hci)
+ 	for (i = 0; i < rings->total; i++) {
+ 		rh = &rings->headers[i];
+ 
++		rh_reg_write(INTR_SIGNAL_ENABLE, 0);
+ 		rh_reg_write(RING_CONTROL, 0);
+ 		rh_reg_write(CR_SETUP, 0);
+ 		rh_reg_write(IBI_SETUP, 0);
+-		rh_reg_write(INTR_SIGNAL_ENABLE, 0);
+ 
+ 		if (rh->xfer)
+ 			dma_free_coherent(&hci->master.dev,
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index 7042ddfdfc03ee..955e9eff0099e5 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -1394,6 +1394,9 @@ static int ad7192_probe(struct spi_device *spi)
+ 	st->int_vref_mv = ret == -ENODEV ? avdd_mv : ret / MILLI;
+ 
+ 	st->chip_info = spi_get_device_match_data(spi);
++	if (!st->chip_info)
++		return -ENODEV;
++
+ 	indio_dev->name = st->chip_info->name;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 	indio_dev->info = st->chip_info->info;
+diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
+index 8c516ede911619..640a5d3aa2c6e7 100644
+--- a/drivers/iio/light/ltr501.c
++++ b/drivers/iio/light/ltr501.c
+@@ -1613,6 +1613,8 @@ static const struct acpi_device_id ltr_acpi_match[] = {
+ 	{ "LTER0501", ltr501 },
+ 	{ "LTER0559", ltr559 },
+ 	{ "LTER0301", ltr301 },
++	/* https://www.catalog.update.microsoft.com/Search.aspx?q=lter0303 */
++	{ "LTER0303", ltr303 },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, ltr_acpi_match);
+diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
+index d81d89af6283b7..acd291f3e7924c 100644
+--- a/drivers/iio/magnetometer/af8133j.c
++++ b/drivers/iio/magnetometer/af8133j.c
+@@ -312,10 +312,11 @@ static int af8133j_set_scale(struct af8133j_data *data,
+ 	 * When suspended, just store the new range to data->range to be
+ 	 * applied later during power up.
+ 	 */
+-	if (!pm_runtime_status_suspended(dev))
++	if (!pm_runtime_status_suspended(dev)) {
+ 		scoped_guard(mutex, &data->mutex)
+ 			ret = regmap_write(data->regmap,
+ 					   AF8133J_REG_RANGE, range);
++	}
+ 
+ 	pm_runtime_enable(dev);
+ 
+diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
+index 65011a8598d332..c55a38650c0d47 100644
+--- a/drivers/iio/magnetometer/yamaha-yas530.c
++++ b/drivers/iio/magnetometer/yamaha-yas530.c
+@@ -372,6 +372,7 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
+ 	u8 data[8];
+ 	u16 xy1y2[3];
+ 	s32 h[3], s[3];
++	int half_range = BIT(13);
+ 	int i, ret;
+ 
+ 	mutex_lock(&yas5xx->lock);
+@@ -406,13 +407,13 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
+ 	/* The second version of YAS537 needs to include calibration coefficients */
+ 	if (yas5xx->version == YAS537_VERSION_1) {
+ 		for (i = 0; i < 3; i++)
+-			s[i] = xy1y2[i] - BIT(13);
+-		h[0] = (c->k *   (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13);
+-		h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13);
+-		h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13);
++			s[i] = xy1y2[i] - half_range;
++		h[0] = (c->k *   (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / half_range;
++		h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / half_range;
++		h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / half_range;
+ 		for (i = 0; i < 3; i++) {
+-			clamp_val(h[i], -BIT(13), BIT(13) - 1);
+-			xy1y2[i] = h[i] + BIT(13);
++			h[i] = clamp(h[i], -half_range, half_range - 1);
++			xy1y2[i] = h[i] + half_range;
+ 		}
+ 	}
+ 
+diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
+index 804b788f3f167d..f3399087859fd1 100644
+--- a/drivers/iommu/amd/io_pgtable.c
++++ b/drivers/iommu/amd/io_pgtable.c
+@@ -118,6 +118,7 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
+  */
+ static bool increase_address_space(struct amd_io_pgtable *pgtable,
+ 				   unsigned long address,
++				   unsigned int page_size_level,
+ 				   gfp_t gfp)
+ {
+ 	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
+@@ -133,7 +134,8 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
+ 
+ 	spin_lock_irqsave(&domain->lock, flags);
+ 
+-	if (address <= PM_LEVEL_SIZE(pgtable->mode))
++	if (address <= PM_LEVEL_SIZE(pgtable->mode) &&
++	    pgtable->mode - 1 >= page_size_level)
+ 		goto out;
+ 
+ 	ret = false;
+@@ -163,18 +165,21 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
+ 		      gfp_t gfp,
+ 		      bool *updated)
+ {
++	unsigned long last_addr = address + (page_size - 1);
+ 	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
+ 	int level, end_lvl;
+ 	u64 *pte, *page;
+ 
+ 	BUG_ON(!is_power_of_2(page_size));
+ 
+-	while (address > PM_LEVEL_SIZE(pgtable->mode)) {
++	while (last_addr > PM_LEVEL_SIZE(pgtable->mode) ||
++	       pgtable->mode - 1 < PAGE_SIZE_LEVEL(page_size)) {
+ 		/*
+ 		 * Return an error if there is no memory to update the
+ 		 * page-table.
+ 		 */
+-		if (!increase_address_space(pgtable, address, gfp))
++		if (!increase_address_space(pgtable, last_addr,
++					    PAGE_SIZE_LEVEL(page_size), gfp))
+ 			return NULL;
+ 	}
+ 
+diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
+index e590973ce5cfa2..b8393a8c075396 100644
+--- a/drivers/iommu/iommufd/fault.c
++++ b/drivers/iommu/iommufd/fault.c
+@@ -415,8 +415,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
+ 	put_unused_fd(fdno);
+ out_fput:
+ 	fput(filep);
+-	refcount_dec(&fault->obj.users);
+-	iommufd_ctx_put(fault->ictx);
+ out_abort:
+ 	iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj);
+ 
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index d82bcab233a1b0..66ce15027f28d7 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -407,7 +407,7 @@ config PARTITION_PERCPU
+ config STM32MP_EXTI
+ 	tristate "STM32MP extended interrupts and event controller"
+ 	depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST
+-	default y
++	default ARCH_STM32 && !ARM_SINGLE_ARMV7M
+ 	select IRQ_DOMAIN_HIERARCHY
+ 	select GENERIC_IRQ_CHIP
+ 	help
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 52f625e07658cb..d9b6ec844cdda0 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -44,6 +44,7 @@
+ #define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
+ #define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
+ #define ITS_FLAGS_FORCE_NON_SHAREABLE		(1ULL << 3)
++#define ITS_FLAGS_WORKAROUND_HISILICON_162100801	(1ULL << 4)
+ 
+ #define RD_LOCAL_LPI_ENABLED                    BIT(0)
+ #define RD_LOCAL_PENDTABLE_PREALLOCATED         BIT(1)
+@@ -61,6 +62,7 @@ static u32 lpi_id_bits;
+ #define LPI_PENDBASE_SZ		ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
+ 
+ static u8 __ro_after_init lpi_prop_prio;
++static struct its_node *find_4_1_its(void);
+ 
+ /*
+  * Collection structure - just an ID, and a redistributor address to
+@@ -3797,6 +3799,20 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
+ 	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+ }
+ 
++static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe)
++{
++	void __iomem *rdbase;
++	u64 val;
++
++	val  = GICR_INVALLR_V;
++	val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
++
++	guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock);
++	rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
++	gic_write_lpir(val, rdbase + GICR_INVALLR);
++	wait_for_syncr(rdbase);
++}
++
+ static int its_vpe_set_affinity(struct irq_data *d,
+ 				const struct cpumask *mask_val,
+ 				bool force)
+@@ -3804,6 +3820,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
+ 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ 	unsigned int from, cpu = nr_cpu_ids;
+ 	struct cpumask *table_mask;
++	struct its_node *its;
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -3866,6 +3883,11 @@ static int its_vpe_set_affinity(struct irq_data *d,
+ 	vpe->col_idx = cpu;
+ 
+ 	its_send_vmovp(vpe);
++
++	its = find_4_1_its();
++	if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801)
++		its_vpe_4_1_invall_locked(cpu, vpe);
++
+ 	its_vpe_db_proxy_move(vpe, from, cpu);
+ 
+ out:
+@@ -4173,22 +4195,12 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
+ 
+ static void its_vpe_4_1_invall(struct its_vpe *vpe)
+ {
+-	void __iomem *rdbase;
+ 	unsigned long flags;
+-	u64 val;
+ 	int cpu;
+ 
+-	val  = GICR_INVALLR_V;
+-	val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
+-
+ 	/* Target the redistributor this vPE is currently known on */
+ 	cpu = vpe_to_cpuid_lock(vpe, &flags);
+-	raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+-	rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
+-	gic_write_lpir(val, rdbase + GICR_INVALLR);
+-
+-	wait_for_syncr(rdbase);
+-	raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
++	its_vpe_4_1_invall_locked(cpu, vpe);
+ 	vpe_to_cpuid_unlock(vpe, flags);
+ }
+ 
+@@ -4781,6 +4793,14 @@ static bool its_set_non_coherent(void *data)
+ 	return true;
+ }
+ 
++static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
++{
++	struct its_node *its = data;
++
++	its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801;
++	return true;
++}
++
+ static const struct gic_quirk its_quirks[] = {
+ #ifdef CONFIG_CAVIUM_ERRATUM_22375
+ 	{
+@@ -4827,6 +4847,14 @@ static const struct gic_quirk its_quirks[] = {
+ 		.init	= its_enable_quirk_hip07_161600802,
+ 	},
+ #endif
++#ifdef CONFIG_HISILICON_ERRATUM_162100801
++	{
++		.desc	= "ITS: Hip09 erratum 162100801",
++		.iidr	= 0x00051736,
++		.mask	= 0xffffffff,
++		.init	= its_enable_quirk_hip09_162100801,
++	},
++#endif
+ #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
+ 	{
+ 		.desc   = "ITS: Rockchip erratum RK3588001",
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 06b97fd49ad9a2..f69f4e928d6143 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -29,11 +29,14 @@ static ssize_t brightness_show(struct device *dev,
+ 		struct device_attribute *attr, char *buf)
+ {
+ 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
++	unsigned int brightness;
+ 
+-	/* no lock needed for this */
++	mutex_lock(&led_cdev->led_access);
+ 	led_update_brightness(led_cdev);
++	brightness = led_cdev->brightness;
++	mutex_unlock(&led_cdev->led_access);
+ 
+-	return sprintf(buf, "%u\n", led_cdev->brightness);
++	return sprintf(buf, "%u\n", brightness);
+ }
+ 
+ static ssize_t brightness_store(struct device *dev,
+@@ -70,8 +73,13 @@ static ssize_t max_brightness_show(struct device *dev,
+ 		struct device_attribute *attr, char *buf)
+ {
+ 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
++	unsigned int max_brightness;
++
++	mutex_lock(&led_cdev->led_access);
++	max_brightness = led_cdev->max_brightness;
++	mutex_unlock(&led_cdev->led_access);
+ 
+-	return sprintf(buf, "%u\n", led_cdev->max_brightness);
++	return sprintf(buf, "%u\n", max_brightness);
+ }
+ static DEVICE_ATTR_RO(max_brightness);
+ 
+diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
+index 94885e411085ad..82102a4c5d6883 100644
+--- a/drivers/mailbox/pcc.c
++++ b/drivers/mailbox/pcc.c
+@@ -269,6 +269,35 @@ static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
+ 	return !!val;
+ }
+ 
++static void check_and_ack(struct pcc_chan_info *pchan, struct mbox_chan *chan)
++{
++	struct acpi_pcct_ext_pcc_shared_memory pcc_hdr;
++
++	if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
++		return;
++	/* If the memory region has not been mapped, we cannot
++	 * determine if we need to send the message, but we still
++	 * need to set the cmd_update flag before returning.
++	 */
++	if (pchan->chan.shmem == NULL) {
++		pcc_chan_reg_read_modify_write(&pchan->cmd_update);
++		return;
++	}
++	memcpy_fromio(&pcc_hdr, pchan->chan.shmem,
++		      sizeof(struct acpi_pcct_ext_pcc_shared_memory));
++	/*
++	 * The PCC slave subspace channel needs to set the command complete bit
++	 * after processing message. If the PCC_ACK_FLAG is set, it should also
++	 * ring the doorbell.
++	 *
++	 * The PCC master subspace channel clears chan_in_use to free channel.
++	 */
++	if (le32_to_cpup(&pcc_hdr.flags) & PCC_ACK_FLAG_MASK)
++		pcc_send_data(chan, NULL);
++	else
++		pcc_chan_reg_read_modify_write(&pchan->cmd_update);
++}
++
+ /**
+  * pcc_mbox_irq - PCC mailbox interrupt handler
+  * @irq:	interrupt number
+@@ -306,14 +335,7 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
+ 
+ 	mbox_chan_received_data(chan, NULL);
+ 
+-	/*
+-	 * The PCC slave subspace channel needs to set the command complete bit
+-	 * and ring doorbell after processing message.
+-	 *
+-	 * The PCC master subspace channel clears chan_in_use to free channel.
+-	 */
+-	if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+-		pcc_send_data(chan, NULL);
++	check_and_ack(pchan, chan);
+ 	pchan->chan_in_use = false;
+ 
+ 	return IRQ_HANDLED;
+@@ -365,14 +387,37 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
+ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
+ {
+ 	struct mbox_chan *chan = pchan->mchan;
++	struct pcc_chan_info *pchan_info;
++	struct pcc_mbox_chan *pcc_mbox_chan;
+ 
+ 	if (!chan || !chan->cl)
+ 		return;
++	pchan_info = chan->con_priv;
++	pcc_mbox_chan = &pchan_info->chan;
++	if (pcc_mbox_chan->shmem) {
++		iounmap(pcc_mbox_chan->shmem);
++		pcc_mbox_chan->shmem = NULL;
++	}
+ 
+ 	mbox_free_channel(chan);
+ }
+ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+ 
++int pcc_mbox_ioremap(struct mbox_chan *chan)
++{
++	struct pcc_chan_info *pchan_info;
++	struct pcc_mbox_chan *pcc_mbox_chan;
++
++	if (!chan || !chan->cl)
++		return -1;
++	pchan_info = chan->con_priv;
++	pcc_mbox_chan = &pchan_info->chan;
++	pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr,
++				       pcc_mbox_chan->shmem_size);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(pcc_mbox_ioremap);
++
+ /**
+  * pcc_send_data - Called from Mailbox Controller code. Used
+  *		here only to ring the channel doorbell. The PCC client
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index e7abfdd77c3b66..e42f1400cea9d7 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1718,7 +1718,7 @@ static CLOSURE_CALLBACK(cache_set_flush)
+ 	if (!IS_ERR_OR_NULL(c->gc_thread))
+ 		kthread_stop(c->gc_thread);
+ 
+-	if (!IS_ERR(c->root))
++	if (!IS_ERR_OR_NULL(c->root))
+ 		list_add(&c->root->list, &c->btree_cache);
+ 
+ 	/*
+diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig
+index a4537818a58c05..cd1c545293574a 100644
+--- a/drivers/media/pci/intel/ipu6/Kconfig
++++ b/drivers/media/pci/intel/ipu6/Kconfig
+@@ -8,7 +8,7 @@ config VIDEO_INTEL_IPU6
+ 	select IOMMU_IOVA
+ 	select VIDEO_V4L2_SUBDEV_API
+ 	select MEDIA_CONTROLLER
+-	select VIDEOBUF2_DMA_CONTIG
++	select VIDEOBUF2_DMA_SG
+ 	select V4L2_FWNODE
+ 	help
+ 	  This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+index 03dbb0e0ea7957..bbb66b56ee88c9 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+@@ -13,17 +13,48 @@
+ 
+ #include <media/media-entity.h>
+ #include <media/v4l2-subdev.h>
+-#include <media/videobuf2-dma-contig.h>
++#include <media/videobuf2-dma-sg.h>
+ #include <media/videobuf2-v4l2.h>
+ 
+ #include "ipu6-bus.h"
++#include "ipu6-dma.h"
+ #include "ipu6-fw-isys.h"
+ #include "ipu6-isys.h"
+ #include "ipu6-isys-video.h"
+ 
+-static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+-		       unsigned int *num_planes, unsigned int sizes[],
+-		       struct device *alloc_devs[])
++static int ipu6_isys_buf_init(struct vb2_buffer *vb)
++{
++	struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue);
++	struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
++	struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
++	struct ipu6_isys_video_buffer *ivb =
++		vb2_buffer_to_ipu6_isys_video_buffer(vvb);
++	int ret;
++
++	ret = ipu6_dma_map_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0);
++	if (ret)
++		return ret;
++
++	ivb->dma_addr = sg_dma_address(sg->sgl);
++
++	return 0;
++}
++
++static void ipu6_isys_buf_cleanup(struct vb2_buffer *vb)
++{
++	struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue);
++	struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
++	struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
++	struct ipu6_isys_video_buffer *ivb =
++		vb2_buffer_to_ipu6_isys_video_buffer(vvb);
++
++	ivb->dma_addr = 0;
++	ipu6_dma_unmap_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0);
++}
++
++static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
++				 unsigned int *num_planes, unsigned int sizes[],
++				 struct device *alloc_devs[])
+ {
+ 	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
+ 	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+@@ -207,9 +238,11 @@ ipu6_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb,
+ 				  struct ipu6_fw_isys_frame_buff_set_abi *set)
+ {
+ 	struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
++	struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
++	struct ipu6_isys_video_buffer *ivb =
++		vb2_buffer_to_ipu6_isys_video_buffer(vvb);
+ 
+-	set->output_pins[aq->fw_output].addr =
+-		vb2_dma_contig_plane_dma_addr(vb, 0);
++	set->output_pins[aq->fw_output].addr = ivb->dma_addr;
+ 	set->output_pins[aq->fw_output].out_buf_id = vb->index + 1;
+ }
+ 
+@@ -332,7 +365,7 @@ static void buf_queue(struct vb2_buffer *vb)
+ 
+ 	dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name);
+ 
+-	dma = vb2_dma_contig_plane_dma_addr(vb, 0);
++	dma = ivb->dma_addr;
+ 	dev_dbg(dev, "iova: iova %pad\n", &dma);
+ 
+ 	spin_lock_irqsave(&aq->lock, flags);
+@@ -724,10 +757,14 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
+ 	}
+ 
+ 	list_for_each_entry_reverse(ib, &aq->active, head) {
++		struct ipu6_isys_video_buffer *ivb;
++		struct vb2_v4l2_buffer *vvb;
+ 		dma_addr_t addr;
+ 
+ 		vb = ipu6_isys_buffer_to_vb2_buffer(ib);
+-		addr = vb2_dma_contig_plane_dma_addr(vb, 0);
++		vvb = to_vb2_v4l2_buffer(vb);
++		ivb = vb2_buffer_to_ipu6_isys_video_buffer(vvb);
++		addr = ivb->dma_addr;
+ 
+ 		if (info->pin.addr != addr) {
+ 			if (first)
+@@ -766,10 +803,12 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
+ }
+ 
+ static const struct vb2_ops ipu6_isys_queue_ops = {
+-	.queue_setup = queue_setup,
++	.queue_setup = ipu6_isys_queue_setup,
+ 	.wait_prepare = vb2_ops_wait_prepare,
+ 	.wait_finish = vb2_ops_wait_finish,
++	.buf_init = ipu6_isys_buf_init,
+ 	.buf_prepare = ipu6_isys_buf_prepare,
++	.buf_cleanup = ipu6_isys_buf_cleanup,
+ 	.start_streaming = start_streaming,
+ 	.stop_streaming = stop_streaming,
+ 	.buf_queue = buf_queue,
+@@ -779,16 +818,17 @@ int ipu6_isys_queue_init(struct ipu6_isys_queue *aq)
+ {
+ 	struct ipu6_isys *isys = ipu6_isys_queue_to_video(aq)->isys;
+ 	struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
++	struct ipu6_bus_device *adev = isys->adev;
+ 	int ret;
+ 
+ 	/* no support for userptr */
+ 	if (!aq->vbq.io_modes)
+ 		aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF;
+ 
+-	aq->vbq.drv_priv = aq;
++	aq->vbq.drv_priv = isys;
+ 	aq->vbq.ops = &ipu6_isys_queue_ops;
+ 	aq->vbq.lock = &av->mutex;
+-	aq->vbq.mem_ops = &vb2_dma_contig_memops;
++	aq->vbq.mem_ops = &vb2_dma_sg_memops;
+ 	aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ 	aq->vbq.min_queued_buffers = 1;
+ 	aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+@@ -797,8 +837,8 @@ int ipu6_isys_queue_init(struct ipu6_isys_queue *aq)
+ 	if (ret)
+ 		return ret;
+ 
+-	aq->dev = &isys->adev->auxdev.dev;
+-	aq->vbq.dev = &isys->adev->auxdev.dev;
++	aq->dev = &adev->auxdev.dev;
++	aq->vbq.dev = &adev->isp->pdev->dev;
+ 	spin_lock_init(&aq->lock);
+ 	INIT_LIST_HEAD(&aq->active);
+ 	INIT_LIST_HEAD(&aq->incoming);
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
+index 95cfd4869d9356..fe8fc796a58f5d 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
++++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
+@@ -38,6 +38,7 @@ struct ipu6_isys_buffer {
+ struct ipu6_isys_video_buffer {
+ 	struct vb2_v4l2_buffer vb_v4l2;
+ 	struct ipu6_isys_buffer ib;
++	dma_addr_t dma_addr;
+ };
+ 
+ #define IPU6_ISYS_BUFFER_LIST_FL_INCOMING	BIT(0)
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
+index c4aff2e2009bab..c85e056cb904b2 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
+@@ -34,6 +34,7 @@
+ 
+ #include "ipu6-bus.h"
+ #include "ipu6-cpd.h"
++#include "ipu6-dma.h"
+ #include "ipu6-isys.h"
+ #include "ipu6-isys-csi2.h"
+ #include "ipu6-mmu.h"
+@@ -933,29 +934,27 @@ static const struct dev_pm_ops isys_pm_ops = {
+ 
+ static void free_fw_msg_bufs(struct ipu6_isys *isys)
+ {
+-	struct device *dev = &isys->adev->auxdev.dev;
+ 	struct isys_fw_msgs *fwmsg, *safe;
+ 
+ 	list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head)
+-		dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg,
+-			       fwmsg->dma_addr, 0);
++		ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg,
++			      fwmsg->dma_addr, 0);
+ 
+ 	list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head)
+-		dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg,
+-			       fwmsg->dma_addr, 0);
++		ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg,
++			      fwmsg->dma_addr, 0);
+ }
+ 
+ static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount)
+ {
+-	struct device *dev = &isys->adev->auxdev.dev;
+ 	struct isys_fw_msgs *addr;
+ 	dma_addr_t dma_addr;
+ 	unsigned long flags;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < amount; i++) {
+-		addr = dma_alloc_attrs(dev, sizeof(struct isys_fw_msgs),
+-				       &dma_addr, GFP_KERNEL, 0);
++		addr = ipu6_dma_alloc(isys->adev, sizeof(*addr),
++				      &dma_addr, GFP_KERNEL, 0);
+ 		if (!addr)
+ 			break;
+ 		addr->dma_addr = dma_addr;
+@@ -974,8 +973,8 @@ static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount)
+ 					struct isys_fw_msgs, head);
+ 		list_del(&addr->head);
+ 		spin_unlock_irqrestore(&isys->listlock, flags);
+-		dma_free_attrs(dev, sizeof(struct isys_fw_msgs), addr,
+-			       addr->dma_addr, 0);
++		ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), addr,
++			      addr->dma_addr, 0);
+ 		spin_lock_irqsave(&isys->listlock, flags);
+ 	}
+ 	spin_unlock_irqrestore(&isys->listlock, flags);
+diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
+index 92efe6c1f47bae..bda729b42d05fe 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
+@@ -994,6 +994,8 @@ const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
+ 
+ /* table of devices that work with this driver */
+ struct usb_device_id cx231xx_id_table[] = {
++	{USB_DEVICE(0x1D19, 0x6108),
++	.driver_info = CX231XX_BOARD_PV_XCAPTURE_USB},
+ 	{USB_DEVICE(0x1D19, 0x6109),
+ 	.driver_info = CX231XX_BOARD_PV_XCAPTURE_USB},
+ 	{USB_DEVICE(0x0572, 0x5A3C),
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 675be4858366f0..9f38a9b23c0181 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2474,9 +2474,22 @@ static const struct uvc_device_info uvc_quirk_force_y8 = {
+  * The Logitech cameras listed below have their interface class set to
+  * VENDOR_SPEC because they don't announce themselves as UVC devices, even
+  * though they are compliant.
++ *
++ * Sort these by vendor/product ID.
+  */
+ static const struct usb_device_id uvc_ids[] = {
+ 	/* Quanta ACER HD User Facing */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x0408,
++	  .idProduct		= 0x4033,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= UVC_PC_PROTOCOL_15,
++	  .driver_info		= (kernel_ulong_t)&(const struct uvc_device_info){
++		.uvc_version = 0x010a,
++	  } },
++	/* Quanta ACER HD User Facing */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+ 	  .idVendor		= 0x0408,
+@@ -3010,6 +3023,15 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX
+ 					| UVC_QUIRK_IGNORE_SELECTOR_UNIT) },
++	/* NXP Semiconductors IR VIDEO */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x1fc9,
++	  .idProduct		= 0x009b,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ 	/* Oculus VR Positional Tracker DK2 */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+@@ -3118,6 +3140,15 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= UVC_INFO_META(V4L2_META_FMT_D4XX) },
++	/* Intel D421 Depth Module */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x8086,
++	  .idProduct		= 0x1155,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_META(V4L2_META_FMT_D4XX) },
+ 	/* Generic USB Video Class */
+ 	{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) },
+ 	{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) },
+diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
+index 9627294fe3e951..4c9827fe921731 100644
+--- a/drivers/misc/eeprom/eeprom_93cx6.c
++++ b/drivers/misc/eeprom/eeprom_93cx6.c
+@@ -186,6 +186,11 @@ void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word,
+ 	eeprom_93cx6_write_bits(eeprom, command,
+ 		PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+ 
++	if (has_quirk_extra_read_cycle(eeprom)) {
++		eeprom_93cx6_pulse_high(eeprom);
++		eeprom_93cx6_pulse_low(eeprom);
++	}
++
+ 	/*
+ 	 * Read the requested 16 bits.
+ 	 */
+@@ -252,6 +257,11 @@ void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte,
+ 	eeprom_93cx6_write_bits(eeprom, command,
+ 		PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1);
+ 
++	if (has_quirk_extra_read_cycle(eeprom)) {
++		eeprom_93cx6_pulse_high(eeprom);
++		eeprom_93cx6_pulse_low(eeprom);
++	}
++
+ 	/*
+ 	 * Read the requested 8 bits.
+ 	 */
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index ef06a4d5d65bb2..1d08009f2bd83f 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -50,6 +50,7 @@
+ #include <linux/mmc/sd.h>
+ 
+ #include <linux/uaccess.h>
++#include <linux/unaligned.h>
+ 
+ #include "queue.h"
+ #include "block.h"
+@@ -993,11 +994,12 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
+ 	int err;
+ 	u32 result;
+ 	__be32 *blocks;
++	u8 resp_sz = mmc_card_ult_capacity(card) ? 8 : 4;
++	unsigned int noio_flag;
+ 
+ 	struct mmc_request mrq = {};
+ 	struct mmc_command cmd = {};
+ 	struct mmc_data data = {};
+-
+ 	struct scatterlist sg;
+ 
+ 	err = mmc_app_cmd(card->host, card);
+@@ -1008,7 +1010,7 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
+ 	cmd.arg = 0;
+ 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ 
+-	data.blksz = 4;
++	data.blksz = resp_sz;
+ 	data.blocks = 1;
+ 	data.flags = MMC_DATA_READ;
+ 	data.sg = &sg;
+@@ -1018,15 +1020,29 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
+ 	mrq.cmd = &cmd;
+ 	mrq.data = &data;
+ 
+-	blocks = kmalloc(4, GFP_KERNEL);
++	noio_flag = memalloc_noio_save();
++	blocks = kmalloc(resp_sz, GFP_KERNEL);
++	memalloc_noio_restore(noio_flag);
+ 	if (!blocks)
+ 		return -ENOMEM;
+ 
+-	sg_init_one(&sg, blocks, 4);
++	sg_init_one(&sg, blocks, resp_sz);
+ 
+ 	mmc_wait_for_req(card->host, &mrq);
+ 
+-	result = ntohl(*blocks);
++	if (mmc_card_ult_capacity(card)) {
++		/*
++		 * Normally, ACMD22 returns the number of written sectors as
++		 * u32. SDUC, however, returns it as u64.  This is not a
++		 * superfluous requirement, because SDUC writes may exceed 2TB.
++		 * For Linux mmc however, the previously write operation could
++		 * not be more than the block layer limits, thus just make room
++		 * for a u64 and cast the response back to u32.
++		 */
++		result = clamp_val(get_unaligned_be64(blocks), 0, UINT_MAX);
++	} else {
++		result = ntohl(*blocks);
++	}
+ 	kfree(blocks);
+ 
+ 	if (cmd.error || data.error)
+diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
+index 0ddaee0eae54f0..4f3a26676ccb86 100644
+--- a/drivers/mmc/core/bus.c
++++ b/drivers/mmc/core/bus.c
+@@ -149,6 +149,8 @@ static void mmc_bus_shutdown(struct device *dev)
+ 	if (dev->driver && drv->shutdown)
+ 		drv->shutdown(card);
+ 
++	__mmc_stop_host(host);
++
+ 	if (host->bus_ops->shutdown) {
+ 		ret = host->bus_ops->shutdown(host);
+ 		if (ret)
+@@ -321,7 +323,9 @@ int mmc_add_card(struct mmc_card *card)
+ 	case MMC_TYPE_SD:
+ 		type = "SD";
+ 		if (mmc_card_blockaddr(card)) {
+-			if (mmc_card_ext_capacity(card))
++			if (mmc_card_ult_capacity(card))
++				type = "SDUC";
++			else if (mmc_card_ext_capacity(card))
+ 				type = "SDXC";
+ 			else
+ 				type = "SDHC";
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index b7754a1b8d9788..3205feb1e8ff6a 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -23,6 +23,7 @@
+ #define MMC_CARD_SDXC		(1<<3)		/* card is SDXC */
+ #define MMC_CARD_REMOVED	(1<<4)		/* card has been removed */
+ #define MMC_STATE_SUSPENDED	(1<<5)		/* card is suspended */
++#define MMC_CARD_SDUC		(1<<6)		/* card is SDUC */
+ 
+ #define mmc_card_present(c)	((c)->state & MMC_STATE_PRESENT)
+ #define mmc_card_readonly(c)	((c)->state & MMC_STATE_READONLY)
+@@ -30,11 +31,13 @@
+ #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
+ #define mmc_card_removed(c)	((c) && ((c)->state & MMC_CARD_REMOVED))
+ #define mmc_card_suspended(c)	((c)->state & MMC_STATE_SUSPENDED)
++#define mmc_card_ult_capacity(c) ((c)->state & MMC_CARD_SDUC)
+ 
+ #define mmc_card_set_present(c)	((c)->state |= MMC_STATE_PRESENT)
+ #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
+ #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
+ #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
++#define mmc_card_set_ult_capacity(c) ((c)->state |= MMC_CARD_SDUC)
+ #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
+ #define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
+ #define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
+@@ -82,6 +85,7 @@ struct mmc_fixup {
+ #define CID_MANFID_SANDISK_SD   0x3
+ #define CID_MANFID_ATP          0x9
+ #define CID_MANFID_TOSHIBA      0x11
++#define CID_MANFID_GIGASTONE    0x12
+ #define CID_MANFID_MICRON       0x13
+ #define CID_MANFID_SAMSUNG      0x15
+ #define CID_MANFID_APACER       0x27
+@@ -284,4 +288,10 @@ static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
+ {
+ 	return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
+ }
++
++static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c)
++{
++	return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY;
++}
++
+ #endif
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index d6c819dd68ed47..327029f5c59b79 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -2296,6 +2296,9 @@ void mmc_start_host(struct mmc_host *host)
+ 
+ void __mmc_stop_host(struct mmc_host *host)
+ {
++	if (host->rescan_disable)
++		return;
++
+ 	if (host->slot.cd_irq >= 0) {
+ 		mmc_gpio_set_cd_wake(host, false);
+ 		disable_irq(host->slot.cd_irq);
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 92905fc46436dd..89b512905be140 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -25,6 +25,15 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
+ 		   0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ 		   MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+ 
++	/*
++	 * GIGASTONE Gaming Plus microSD cards manufactured on 02/2022 never
++	 * clear Flush Cache bit and set Poweroff Notification Ready bit.
++	 */
++	_FIXUP_EXT("ASTC", CID_MANFID_GIGASTONE, 0x3456, 2022, 2,
++		   0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
++		   MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY,
++		   EXT_CSD_REV_ANY),
++
+ 	END_FIXUP
+ };
+ 
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 12fe282bea77ef..63915541c0e494 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -100,7 +100,7 @@ void mmc_decode_cid(struct mmc_card *card)
+ /*
+  * Given a 128-bit response, decode to our card CSD structure.
+  */
+-static int mmc_decode_csd(struct mmc_card *card)
++static int mmc_decode_csd(struct mmc_card *card, bool is_sduc)
+ {
+ 	struct mmc_csd *csd = &card->csd;
+ 	unsigned int e, m, csd_struct;
+@@ -144,9 +144,10 @@ static int mmc_decode_csd(struct mmc_card *card)
+ 			mmc_card_set_readonly(card);
+ 		break;
+ 	case 1:
++	case 2:
+ 		/*
+-		 * This is a block-addressed SDHC or SDXC card. Most
+-		 * interesting fields are unused and have fixed
++		 * This is a block-addressed SDHC, SDXC or SDUC card.
++		 * Most interesting fields are unused and have fixed
+ 		 * values. To avoid getting tripped by buggy cards,
+ 		 * we assume those fixed values ourselves.
+ 		 */
+@@ -159,14 +160,19 @@ static int mmc_decode_csd(struct mmc_card *card)
+ 		e = unstuff_bits(resp, 96, 3);
+ 		csd->max_dtr	  = tran_exp[e] * tran_mant[m];
+ 		csd->cmdclass	  = unstuff_bits(resp, 84, 12);
+-		csd->c_size	  = unstuff_bits(resp, 48, 22);
+ 
+-		/* SDXC cards have a minimum C_SIZE of 0x00FFFF */
+-		if (csd->c_size >= 0xFFFF)
++		if (csd_struct == 1)
++			m = unstuff_bits(resp, 48, 22);
++		else
++			m = unstuff_bits(resp, 48, 28);
++		csd->c_size = m;
++
++		if (csd->c_size >= 0x400000 && is_sduc)
++			mmc_card_set_ult_capacity(card);
++		else if (csd->c_size >= 0xFFFF)
+ 			mmc_card_set_ext_capacity(card);
+ 
+-		m = unstuff_bits(resp, 48, 22);
+-		csd->capacity     = (1 + m) << 10;
++		csd->capacity     = (1 + (typeof(sector_t))m) << 10;
+ 
+ 		csd->read_blkbits = 9;
+ 		csd->read_partial = 0;
+@@ -876,7 +882,7 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
+ 	return err;
+ }
+ 
+-int mmc_sd_get_csd(struct mmc_card *card)
++int mmc_sd_get_csd(struct mmc_card *card, bool is_sduc)
+ {
+ 	int err;
+ 
+@@ -887,7 +893,7 @@ int mmc_sd_get_csd(struct mmc_card *card)
+ 	if (err)
+ 		return err;
+ 
+-	err = mmc_decode_csd(card);
++	err = mmc_decode_csd(card, is_sduc);
+ 	if (err)
+ 		return err;
+ 
+@@ -1107,7 +1113,7 @@ static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
+ 	card->ext_power.rev = reg_buf[0] & 0xf;
+ 
+ 	/* Power Off Notification support at bit 4. */
+-	if (reg_buf[1] & BIT(4))
++	if ((reg_buf[1] & BIT(4)) && !mmc_card_broken_sd_poweroff_notify(card))
+ 		card->ext_power.feature_support |= SD_EXT_POWER_OFF_NOTIFY;
+ 
+ 	/* Power Sustenance support at bit 5. */
+@@ -1442,7 +1448,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
+ 	}
+ 
+ 	if (!oldcard) {
+-		err = mmc_sd_get_csd(card);
++		err = mmc_sd_get_csd(card, false);
+ 		if (err)
+ 			goto free_card;
+ 
+diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
+index fe6dd46927a423..7e8beface2ca61 100644
+--- a/drivers/mmc/core/sd.h
++++ b/drivers/mmc/core/sd.h
+@@ -10,7 +10,7 @@ struct mmc_host;
+ struct mmc_card;
+ 
+ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
+-int mmc_sd_get_csd(struct mmc_card *card);
++int mmc_sd_get_csd(struct mmc_card *card, bool is_sduc);
+ void mmc_decode_cid(struct mmc_card *card);
+ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
+ 	bool reinit);
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 4fb247fde5c080..9566837c9848e6 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -769,7 +769,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
+ 	 * Read CSD, before selecting the card
+ 	 */
+ 	if (!oldcard && mmc_card_sd_combo(card)) {
+-		err = mmc_sd_get_csd(card);
++		err = mmc_sd_get_csd(card, false);
+ 		if (err)
+ 			goto remove;
+ 
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 89018b6c97b9a7..813bc20cfb5a6c 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2736,20 +2736,18 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Allocate MMC host for this device */
+-	mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
++	mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct msdc_host));
+ 	if (!mmc)
+ 		return -ENOMEM;
+ 
+ 	host = mmc_priv(mmc);
+ 	ret = mmc_of_parse(mmc);
+ 	if (ret)
+-		goto host_free;
++		return ret;
+ 
+ 	host->base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(host->base)) {
+-		ret = PTR_ERR(host->base);
+-		goto host_free;
+-	}
++	if (IS_ERR(host->base))
++		return PTR_ERR(host->base);
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ 	if (res) {
+@@ -2760,53 +2758,45 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ 
+ 	ret = mmc_regulator_get_supply(mmc);
+ 	if (ret)
+-		goto host_free;
++		return ret;
+ 
+ 	ret = msdc_of_clock_parse(pdev, host);
+ 	if (ret)
+-		goto host_free;
++		return ret;
+ 
+ 	host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ 								"hrst");
+-	if (IS_ERR(host->reset)) {
+-		ret = PTR_ERR(host->reset);
+-		goto host_free;
+-	}
++	if (IS_ERR(host->reset))
++		return PTR_ERR(host->reset);
+ 
+ 	/* only eMMC has crypto property */
+ 	if (!(mmc->caps2 & MMC_CAP2_NO_MMC)) {
+ 		host->crypto_clk = devm_clk_get_optional(&pdev->dev, "crypto");
+ 		if (IS_ERR(host->crypto_clk))
+-			host->crypto_clk = NULL;
+-		else
++			return PTR_ERR(host->crypto_clk);
++		else if (host->crypto_clk)
+ 			mmc->caps2 |= MMC_CAP2_CRYPTO;
+ 	}
+ 
+ 	host->irq = platform_get_irq(pdev, 0);
+-	if (host->irq < 0) {
+-		ret = host->irq;
+-		goto host_free;
+-	}
++	if (host->irq < 0)
++		return host->irq;
+ 
+ 	host->pinctrl = devm_pinctrl_get(&pdev->dev);
+-	if (IS_ERR(host->pinctrl)) {
+-		ret = PTR_ERR(host->pinctrl);
+-		dev_err(&pdev->dev, "Cannot find pinctrl!\n");
+-		goto host_free;
+-	}
++	if (IS_ERR(host->pinctrl))
++		return dev_err_probe(&pdev->dev, PTR_ERR(host->pinctrl),
++				     "Cannot find pinctrl");
+ 
+ 	host->pins_default = pinctrl_lookup_state(host->pinctrl, "default");
+ 	if (IS_ERR(host->pins_default)) {
+-		ret = PTR_ERR(host->pins_default);
+ 		dev_err(&pdev->dev, "Cannot find pinctrl default!\n");
+-		goto host_free;
++		return PTR_ERR(host->pins_default);
+ 	}
+ 
+ 	host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
+ 	if (IS_ERR(host->pins_uhs)) {
+-		ret = PTR_ERR(host->pins_uhs);
+ 		dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n");
+-		goto host_free;
++		return PTR_ERR(host->pins_uhs);
+ 	}
+ 
+ 	/* Support for SDIO eint irq ? */
+@@ -2885,7 +2875,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ 	ret = msdc_ungate_clock(host);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Cannot ungate clocks!\n");
+-		goto release_mem;
++		goto release_clk;
+ 	}
+ 	msdc_init_hw(host);
+ 
+@@ -2895,14 +2885,14 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ 					     GFP_KERNEL);
+ 		if (!host->cq_host) {
+ 			ret = -ENOMEM;
+-			goto host_free;
++			goto release;
+ 		}
+ 		host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ 		host->cq_host->mmio = host->base + 0x800;
+ 		host->cq_host->ops = &msdc_cmdq_ops;
+ 		ret = cqhci_init(host->cq_host, mmc, true);
+ 		if (ret)
+-			goto host_free;
++			goto release;
+ 		mmc->max_segs = 128;
+ 		/* cqhci 16bit length */
+ 		/* 0 size, means 65536 so we don't have to -1 here */
+@@ -2929,9 +2919,10 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ end:
+ 	pm_runtime_disable(host->dev);
+ release:
+-	platform_set_drvdata(pdev, NULL);
+ 	msdc_deinit_hw(host);
++release_clk:
+ 	msdc_gate_clock(host);
++	platform_set_drvdata(pdev, NULL);
+ release_mem:
+ 	if (host->dma.gpd)
+ 		dma_free_coherent(&pdev->dev,
+@@ -2939,11 +2930,8 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ 			host->dma.gpd, host->dma.gpd_addr);
+ 	if (host->dma.bd)
+ 		dma_free_coherent(&pdev->dev,
+-			MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+-			host->dma.bd, host->dma.bd_addr);
+-host_free:
+-	mmc_free_host(mmc);
+-
++				  MAX_BD_NUM * sizeof(struct mt_bdma_desc),
++				  host->dma.bd, host->dma.bd_addr);
+ 	return ret;
+ }
+ 
+@@ -2968,9 +2956,7 @@ static void msdc_drv_remove(struct platform_device *pdev)
+ 			2 * sizeof(struct mt_gpdma_desc),
+ 			host->dma.gpd, host->dma.gpd_addr);
+ 	dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+-			host->dma.bd, host->dma.bd_addr);
+-
+-	mmc_free_host(mmc);
++			  host->dma.bd, host->dma.bd_addr);
+ }
+ 
+ static void msdc_save_reg(struct msdc_host *host)
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 8f0bc6dca2b040..ef3a44f2dff16d 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -238,6 +238,7 @@ struct esdhc_platform_data {
+ 
+ struct esdhc_soc_data {
+ 	u32 flags;
++	u32 quirks;
+ };
+ 
+ static const struct esdhc_soc_data esdhc_imx25_data = {
+@@ -309,10 +310,12 @@ static struct esdhc_soc_data usdhc_imx7ulp_data = {
+ 			| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ 			| ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400
+ 			| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
++	.quirks = SDHCI_QUIRK_NO_LED,
+ };
+ static struct esdhc_soc_data usdhc_imxrt1050_data = {
+ 	.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ 			| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
++	.quirks = SDHCI_QUIRK_NO_LED,
+ };
+ 
+ static struct esdhc_soc_data usdhc_imx8qxp_data = {
+@@ -321,6 +324,7 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
+ 			| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ 			| ESDHC_FLAG_STATE_LOST_IN_LPMODE
+ 			| ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
++	.quirks = SDHCI_QUIRK_NO_LED,
+ };
+ 
+ static struct esdhc_soc_data usdhc_imx8mm_data = {
+@@ -328,6 +332,7 @@ static struct esdhc_soc_data usdhc_imx8mm_data = {
+ 			| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ 			| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ 			| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
++	.quirks = SDHCI_QUIRK_NO_LED,
+ };
+ 
+ struct pltfm_imx_data {
+@@ -1687,6 +1692,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ 
+ 	imx_data->socdata = device_get_match_data(&pdev->dev);
+ 
++	host->quirks |= imx_data->socdata->quirks;
+ 	if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
+ 		cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index ed45ed0bdafd96..2e2e15e2d8fb8b 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -21,6 +21,7 @@
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+ #include <linux/gpio.h>
++#include <linux/gpio/machine.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/pm_qos.h>
+ #include <linux/debugfs.h>
+@@ -1235,6 +1236,29 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+ 	.priv_size	= sizeof(struct intel_host),
+ };
+ 
++/* DMI quirks for devices with missing or broken CD GPIO info */
++static const struct gpiod_lookup_table vexia_edu_atla10_cd_gpios = {
++	.dev_id = "0000:00:12.0",
++	.table = {
++		GPIO_LOOKUP("INT33FC:00", 38, "cd", GPIO_ACTIVE_HIGH),
++		{ }
++	},
++};
++
++static const struct dmi_system_id sdhci_intel_byt_cd_gpio_override[] = {
++	{
++		/* Vexia Edu Atla 10 tablet 9V version */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Above strings are too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
++		},
++		.driver_data = (void *)&vexia_edu_atla10_cd_gpios,
++	},
++	{ }
++};
++
+ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+ #ifdef CONFIG_PM_SLEEP
+ 	.resume		= byt_resume,
+@@ -1253,6 +1277,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+ 	.add_host	= byt_add_host,
+ 	.remove_slot	= byt_remove_slot,
+ 	.ops		= &sdhci_intel_byt_ops,
++	.cd_gpio_override = sdhci_intel_byt_cd_gpio_override,
+ 	.priv_size	= sizeof(struct intel_host),
+ };
+ 
+@@ -2054,6 +2079,42 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = {
+  *                                                                           *
+ \*****************************************************************************/
+ 
++static struct gpiod_lookup_table *sdhci_pci_add_gpio_lookup_table(
++	struct sdhci_pci_chip *chip)
++{
++	struct gpiod_lookup_table *cd_gpio_lookup_table;
++	const struct dmi_system_id *dmi_id = NULL;
++	size_t count;
++
++	if (chip->fixes && chip->fixes->cd_gpio_override)
++		dmi_id = dmi_first_match(chip->fixes->cd_gpio_override);
++
++	if (!dmi_id)
++		return NULL;
++
++	cd_gpio_lookup_table = dmi_id->driver_data;
++	for (count = 0; cd_gpio_lookup_table->table[count].key; count++)
++		;
++
++	cd_gpio_lookup_table = kmemdup(dmi_id->driver_data,
++				       /* count + 1 terminating entry */
++				       struct_size(cd_gpio_lookup_table, table, count + 1),
++				       GFP_KERNEL);
++	if (!cd_gpio_lookup_table)
++		return ERR_PTR(-ENOMEM);
++
++	gpiod_add_lookup_table(cd_gpio_lookup_table);
++	return cd_gpio_lookup_table;
++}
++
++static void sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table *lookup_table)
++{
++	if (lookup_table) {
++		gpiod_remove_lookup_table(lookup_table);
++		kfree(lookup_table);
++	}
++}
++
+ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
+ 	struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
+ 	int slotno)
+@@ -2129,8 +2190,19 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
+ 		device_init_wakeup(&pdev->dev, true);
+ 
+ 	if (slot->cd_idx >= 0) {
++		struct gpiod_lookup_table *cd_gpio_lookup_table;
++
++		cd_gpio_lookup_table = sdhci_pci_add_gpio_lookup_table(chip);
++		if (IS_ERR(cd_gpio_lookup_table)) {
++			ret = PTR_ERR(cd_gpio_lookup_table);
++			goto remove;
++		}
++
+ 		ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
+ 					   slot->cd_override_level, 0);
++
++		sdhci_pci_remove_gpio_lookup_table(cd_gpio_lookup_table);
++
+ 		if (ret && ret != -EPROBE_DEFER)
+ 			ret = mmc_gpiod_request_cd(host->mmc, NULL,
+ 						   slot->cd_idx,
+diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
+index 153704f812edc8..4973fa8592175e 100644
+--- a/drivers/mmc/host/sdhci-pci.h
++++ b/drivers/mmc/host/sdhci-pci.h
+@@ -156,6 +156,7 @@ struct sdhci_pci_fixes {
+ #endif
+ 
+ 	const struct sdhci_ops	*ops;
++	const struct dmi_system_id *cd_gpio_override;
+ 	size_t			priv_size;
+ };
+ 
+diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
+index 511615dc334196..cc371d0c9f3c76 100644
+--- a/drivers/net/can/c_can/c_can_main.c
++++ b/drivers/net/can/c_can/c_can_main.c
+@@ -1014,49 +1014,57 @@ static int c_can_handle_bus_err(struct net_device *dev,
+ 
+ 	/* propagate the error condition to the CAN stack */
+ 	skb = alloc_can_err_skb(dev, &cf);
+-	if (unlikely(!skb))
+-		return 0;
+ 
+ 	/* check for 'last error code' which tells us the
+ 	 * type of the last error to occur on the CAN bus
+ 	 */
+-	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
++	if (likely(skb))
++		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+ 	switch (lec_type) {
+ 	case LEC_STUFF_ERROR:
+ 		netdev_dbg(dev, "stuff error\n");
+-		cf->data[2] |= CAN_ERR_PROT_STUFF;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_STUFF;
+ 		stats->rx_errors++;
+ 		break;
+ 	case LEC_FORM_ERROR:
+ 		netdev_dbg(dev, "form error\n");
+-		cf->data[2] |= CAN_ERR_PROT_FORM;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_FORM;
+ 		stats->rx_errors++;
+ 		break;
+ 	case LEC_ACK_ERROR:
+ 		netdev_dbg(dev, "ack error\n");
+-		cf->data[3] = CAN_ERR_PROT_LOC_ACK;
++		if (likely(skb))
++			cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ 		stats->tx_errors++;
+ 		break;
+ 	case LEC_BIT1_ERROR:
+ 		netdev_dbg(dev, "bit1 error\n");
+-		cf->data[2] |= CAN_ERR_PROT_BIT1;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_BIT1;
+ 		stats->tx_errors++;
+ 		break;
+ 	case LEC_BIT0_ERROR:
+ 		netdev_dbg(dev, "bit0 error\n");
+-		cf->data[2] |= CAN_ERR_PROT_BIT0;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_BIT0;
+ 		stats->tx_errors++;
+ 		break;
+ 	case LEC_CRC_ERROR:
+ 		netdev_dbg(dev, "CRC error\n");
+-		cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
++		if (likely(skb))
++			cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ 		stats->rx_errors++;
+ 		break;
+ 	default:
+ 		break;
+ 	}
+ 
++	if (unlikely(!skb))
++		return 0;
++
+ 	netif_receive_skb(skb);
+ 	return 1;
+ }
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+index 6792c14fd7eb00..681643ab37804e 100644
+--- a/drivers/net/can/dev/dev.c
++++ b/drivers/net/can/dev/dev.c
+@@ -468,7 +468,7 @@ static int can_set_termination(struct net_device *ndev, u16 term)
+ 	else
+ 		set = 0;
+ 
+-	gpiod_set_value(priv->termination_gpio, set);
++	gpiod_set_value_cansleep(priv->termination_gpio, set);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
+index d32b10900d2f62..c86b57d47085fd 100644
+--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
++++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
+@@ -390,36 +390,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
+ 		return 0;
+ 
+ 	priv->can.can_stats.bus_error++;
+-	stats->rx_errors++;
+ 
+ 	/* Propagate the error condition to the CAN stack. */
+ 	skb = alloc_can_err_skb(ndev, &cf);
+-	if (unlikely(!skb))
+-		return 0;
+ 
+ 	/* Read the error counter register and check for new errors. */
+-	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
++	if (likely(skb))
++		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+-	if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
+-		cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
++	if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) {
++		stats->rx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
++	}
+ 
+-	if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
+-		cf->data[3] = CAN_ERR_PROT_LOC_ACK;
++	if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) {
++		stats->tx_errors++;
++		if (likely(skb))
++			cf->data[3] = CAN_ERR_PROT_LOC_ACK;
++	}
+ 
+-	if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
+-		cf->data[2] |= CAN_ERR_PROT_BIT0;
++	if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) {
++		stats->tx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_BIT0;
++	}
+ 
+-	if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
+-		cf->data[2] |= CAN_ERR_PROT_BIT1;
++	if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) {
++		stats->tx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_BIT1;
++	}
+ 
+-	if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
+-		cf->data[2] |= CAN_ERR_PROT_STUFF;
++	if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) {
++		stats->rx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_STUFF;
++	}
+ 
+-	if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
+-		cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
++	if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) {
++		stats->rx_errors++;
++		if (likely(skb))
++			cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
++	}
+ 
+-	if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
+-		cf->data[2] |= CAN_ERR_PROT_FORM;
++	if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) {
++		stats->rx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_FORM;
++	}
+ 
+ 	/* Reset the error counter, ack the IRQ and re-enable the counter. */
+ 	writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+@@ -427,6 +446,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
+ 	       priv->base + IFI_CANFD_INTERRUPT);
+ 	writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+ 
++	if (unlikely(!skb))
++		return 0;
++
+ 	netif_receive_skb(skb);
+ 
+ 	return 1;
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 16e9e7d7527d97..533bcb77c9f934 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -695,47 +695,60 @@ static int m_can_handle_lec_err(struct net_device *dev,
+ 	u32 timestamp = 0;
+ 
+ 	cdev->can.can_stats.bus_error++;
+-	stats->rx_errors++;
+ 
+ 	/* propagate the error condition to the CAN stack */
+ 	skb = alloc_can_err_skb(dev, &cf);
+-	if (unlikely(!skb))
+-		return 0;
+ 
+ 	/* check for 'last error code' which tells us the
+ 	 * type of the last error to occur on the CAN bus
+ 	 */
+-	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
++	if (likely(skb))
++		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+ 	switch (lec_type) {
+ 	case LEC_STUFF_ERROR:
+ 		netdev_dbg(dev, "stuff error\n");
+-		cf->data[2] |= CAN_ERR_PROT_STUFF;
++		stats->rx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_STUFF;
+ 		break;
+ 	case LEC_FORM_ERROR:
+ 		netdev_dbg(dev, "form error\n");
+-		cf->data[2] |= CAN_ERR_PROT_FORM;
++		stats->rx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_FORM;
+ 		break;
+ 	case LEC_ACK_ERROR:
+ 		netdev_dbg(dev, "ack error\n");
+-		cf->data[3] = CAN_ERR_PROT_LOC_ACK;
++		stats->tx_errors++;
++		if (likely(skb))
++			cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ 		break;
+ 	case LEC_BIT1_ERROR:
+ 		netdev_dbg(dev, "bit1 error\n");
+-		cf->data[2] |= CAN_ERR_PROT_BIT1;
++		stats->tx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_BIT1;
+ 		break;
+ 	case LEC_BIT0_ERROR:
+ 		netdev_dbg(dev, "bit0 error\n");
+-		cf->data[2] |= CAN_ERR_PROT_BIT0;
++		stats->tx_errors++;
++		if (likely(skb))
++			cf->data[2] |= CAN_ERR_PROT_BIT0;
+ 		break;
+ 	case LEC_CRC_ERROR:
+ 		netdev_dbg(dev, "CRC error\n");
+-		cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
++		stats->rx_errors++;
++		if (likely(skb))
++			cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ 		break;
+ 	default:
+ 		break;
+ 	}
+ 
++	if (unlikely(!skb))
++		return 0;
++
+ 	if (cdev->is_peripheral)
+ 		timestamp = m_can_get_timestamp(cdev);
+ 
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index ddb3247948ad2f..4d245857ef1cec 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -416,8 +416,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+ 	int ret = 0;
+ 
+ 	skb = alloc_can_err_skb(dev, &cf);
+-	if (skb == NULL)
+-		return -ENOMEM;
+ 
+ 	txerr = priv->read_reg(priv, SJA1000_TXERR);
+ 	rxerr = priv->read_reg(priv, SJA1000_RXERR);
+@@ -425,8 +423,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+ 	if (isrc & IRQ_DOI) {
+ 		/* data overrun interrupt */
+ 		netdev_dbg(dev, "data overrun interrupt\n");
+-		cf->can_id |= CAN_ERR_CRTL;
+-		cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
++		if (skb) {
++			cf->can_id |= CAN_ERR_CRTL;
++			cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
++		}
++
+ 		stats->rx_over_errors++;
+ 		stats->rx_errors++;
+ 		sja1000_write_cmdreg(priv, CMD_CDO);	/* clear bit */
+@@ -452,7 +453,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+ 		else
+ 			state = CAN_STATE_ERROR_ACTIVE;
+ 	}
+-	if (state != CAN_STATE_BUS_OFF) {
++	if (state != CAN_STATE_BUS_OFF && skb) {
+ 		cf->can_id |= CAN_ERR_CNT;
+ 		cf->data[6] = txerr;
+ 		cf->data[7] = rxerr;
+@@ -460,33 +461,38 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+ 	if (isrc & IRQ_BEI) {
+ 		/* bus error interrupt */
+ 		priv->can.can_stats.bus_error++;
+-		stats->rx_errors++;
+ 
+ 		ecc = priv->read_reg(priv, SJA1000_ECC);
++		if (skb) {
++			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+-		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+-
+-		/* set error type */
+-		switch (ecc & ECC_MASK) {
+-		case ECC_BIT:
+-			cf->data[2] |= CAN_ERR_PROT_BIT;
+-			break;
+-		case ECC_FORM:
+-			cf->data[2] |= CAN_ERR_PROT_FORM;
+-			break;
+-		case ECC_STUFF:
+-			cf->data[2] |= CAN_ERR_PROT_STUFF;
+-			break;
+-		default:
+-			break;
+-		}
++			/* set error type */
++			switch (ecc & ECC_MASK) {
++			case ECC_BIT:
++				cf->data[2] |= CAN_ERR_PROT_BIT;
++				break;
++			case ECC_FORM:
++				cf->data[2] |= CAN_ERR_PROT_FORM;
++				break;
++			case ECC_STUFF:
++				cf->data[2] |= CAN_ERR_PROT_STUFF;
++				break;
++			default:
++				break;
++			}
+ 
+-		/* set error location */
+-		cf->data[3] = ecc & ECC_SEG;
++			/* set error location */
++			cf->data[3] = ecc & ECC_SEG;
++		}
+ 
+ 		/* Error occurred during transmission? */
+-		if ((ecc & ECC_DIR) == 0)
+-			cf->data[2] |= CAN_ERR_PROT_TX;
++		if ((ecc & ECC_DIR) == 0) {
++			stats->tx_errors++;
++			if (skb)
++				cf->data[2] |= CAN_ERR_PROT_TX;
++		} else {
++			stats->rx_errors++;
++		}
+ 	}
+ 	if (isrc & IRQ_EPI) {
+ 		/* error passive interrupt */
+@@ -502,8 +508,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+ 		netdev_dbg(dev, "arbitration lost interrupt\n");
+ 		alc = priv->read_reg(priv, SJA1000_ALC);
+ 		priv->can.can_stats.arbitration_lost++;
+-		cf->can_id |= CAN_ERR_LOSTARB;
+-		cf->data[0] = alc & 0x1f;
++		if (skb) {
++			cf->can_id |= CAN_ERR_LOSTARB;
++			cf->data[0] = alc & 0x1f;
++		}
+ 	}
+ 
+ 	if (state != priv->can.state) {
+@@ -516,6 +524,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+ 			can_bus_off(dev);
+ 	}
+ 
++	if (!skb)
++		return -ENOMEM;
++
+ 	netif_rx(skb);
+ 
+ 	return ret;
+diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
+index 148d974ebb2107..1b9501ee10deb5 100644
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -671,9 +671,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+ 			tx_state = txerr >= rxerr ? new_state : 0;
+ 			rx_state = txerr <= rxerr ? new_state : 0;
+ 			can_change_state(net, cf, tx_state, rx_state);
+-			netif_rx(skb);
+ 
+ 			if (new_state == CAN_STATE_BUS_OFF) {
++				netif_rx(skb);
+ 				can_bus_off(net);
+ 				if (priv->can.restart_ms == 0) {
+ 					priv->force_quit = 1;
+@@ -684,6 +684,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+ 				cf->can_id |= CAN_ERR_CNT;
+ 				cf->data[6] = txerr;
+ 				cf->data[7] = rxerr;
++				netif_rx(skb);
+ 			}
+ 		}
+ 
+@@ -696,27 +697,38 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+ 			/* Check for protocol errors */
+ 			if (eflag & HI3110_ERR_PROTOCOL_MASK) {
+ 				skb = alloc_can_err_skb(net, &cf);
+-				if (!skb)
+-					break;
++				if (skb)
++					cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+-				cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 				priv->can.can_stats.bus_error++;
+-				priv->net->stats.rx_errors++;
+-				if (eflag & HI3110_ERR_BITERR)
+-					cf->data[2] |= CAN_ERR_PROT_BIT;
+-				else if (eflag & HI3110_ERR_FRMERR)
+-					cf->data[2] |= CAN_ERR_PROT_FORM;
+-				else if (eflag & HI3110_ERR_STUFERR)
+-					cf->data[2] |= CAN_ERR_PROT_STUFF;
+-				else if (eflag & HI3110_ERR_CRCERR)
+-					cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+-				else if (eflag & HI3110_ERR_ACKERR)
+-					cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+-
+-				cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
+-				cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
++				if (eflag & HI3110_ERR_BITERR) {
++					priv->net->stats.tx_errors++;
++					if (skb)
++						cf->data[2] |= CAN_ERR_PROT_BIT;
++				} else if (eflag & HI3110_ERR_FRMERR) {
++					priv->net->stats.rx_errors++;
++					if (skb)
++						cf->data[2] |= CAN_ERR_PROT_FORM;
++				} else if (eflag & HI3110_ERR_STUFERR) {
++					priv->net->stats.rx_errors++;
++					if (skb)
++						cf->data[2] |= CAN_ERR_PROT_STUFF;
++				} else if (eflag & HI3110_ERR_CRCERR) {
++					priv->net->stats.rx_errors++;
++					if (skb)
++						cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
++				} else if (eflag & HI3110_ERR_ACKERR) {
++					priv->net->stats.tx_errors++;
++					if (skb)
++						cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
++				}
++
+ 				netdev_dbg(priv->net, "Bus Error\n");
+-				netif_rx(skb);
++				if (skb) {
++					cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
++					cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
++					netif_rx(skb);
++				}
+ 			}
+ 		}
+ 
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+index d3ac865933fdf6..e94321849fd7e6 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+@@ -21,6 +21,11 @@ static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta)
+ 	return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
+ }
+ 
++static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta)
++{
++	return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF;
++}
++
+ static inline int
+ mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ 				 u8 *tef_tail)
+@@ -147,7 +152,29 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
+ 	BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
+ 
+ 	len = (chip_tx_tail << shift) - (tail << shift);
+-	*len_p = len >> shift;
++	len >>= shift;
++
++	/* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
++	 * bits of a FIFOSTA register, here the TX-FIFO tail index
++	 * might be corrupted.
++	 *
++	 * However here it seems the bit indicating that the TX-FIFO
++	 * is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct
++	 * while the TX-FIFO tail index is.
++	 *
++	 * We assume the TX-FIFO is empty, i.e. all pending CAN frames
++	 * haven been send, if:
++	 * - Chip's head and tail index are equal (len == 0).
++	 * - The TX-FIFO is less than half full.
++	 *   (The TX-FIFO empty case has already been checked at the
++	 *    beginning of this function.)
++	 * - No free buffers in the TX ring.
++	 */
++	if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) &&
++	    mcp251xfd_get_tx_free(tx_ring) == 0)
++		len = tx_ring->obj_num;
++
++	*len_p = len;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index 360158c295d348..4311c1f0eafd8d 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -579,11 +579,9 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
+ 		/* bus error interrupt */
+ 		netdev_dbg(dev, "bus error interrupt\n");
+ 		priv->can.can_stats.bus_error++;
+-		stats->rx_errors++;
++		ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
+ 
+ 		if (likely(skb)) {
+-			ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
+-
+ 			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+ 			switch (ecc & SUN4I_STA_MASK_ERR) {
+@@ -601,9 +599,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
+ 					       >> 16;
+ 				break;
+ 			}
+-			/* error occurred during transmission? */
+-			if ((ecc & SUN4I_STA_ERR_DIR) == 0)
++		}
++
++		/* error occurred during transmission? */
++		if ((ecc & SUN4I_STA_ERR_DIR) == 0) {
++			if (likely(skb))
+ 				cf->data[2] |= CAN_ERR_PROT_TX;
++			stats->tx_errors++;
++		} else {
++			stats->rx_errors++;
+ 		}
+ 	}
+ 	if (isrc & SUN4I_INT_ERR_PASSIVE) {
+@@ -629,10 +633,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
+ 		tx_state = txerr >= rxerr ? state : 0;
+ 		rx_state = txerr <= rxerr ? state : 0;
+ 
+-		if (likely(skb))
+-			can_change_state(dev, cf, tx_state, rx_state);
+-		else
+-			priv->can.state = state;
++		/* The skb allocation might fail, but can_change_state()
++		 * handles cf == NULL.
++		 */
++		can_change_state(dev, cf, tx_state, rx_state);
+ 		if (state == CAN_STATE_BUS_OFF)
+ 			can_bus_off(dev);
+ 	}
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index 050c0b49938a42..5355bac4dccbe0 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -335,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
+ 	struct net_device_stats *stats = &dev->netdev->stats;
+ 
+ 	skb = alloc_can_err_skb(dev->netdev, &cf);
+-	if (skb == NULL)
+-		return;
+ 
+ 	if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
+ 		u8 state = msg->msg.can_state;
+ 
+ 		if (state & SJA1000_SR_BS) {
+ 			dev->can.state = CAN_STATE_BUS_OFF;
+-			cf->can_id |= CAN_ERR_BUSOFF;
++			if (skb)
++				cf->can_id |= CAN_ERR_BUSOFF;
+ 
+ 			dev->can.can_stats.bus_off++;
+ 			can_bus_off(dev->netdev);
+@@ -361,44 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
+ 
+ 		/* bus error interrupt */
+ 		dev->can.can_stats.bus_error++;
+-		stats->rx_errors++;
+ 
+-		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
++		if (skb) {
++			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+-		switch (ecc & SJA1000_ECC_MASK) {
+-		case SJA1000_ECC_BIT:
+-			cf->data[2] |= CAN_ERR_PROT_BIT;
+-			break;
+-		case SJA1000_ECC_FORM:
+-			cf->data[2] |= CAN_ERR_PROT_FORM;
+-			break;
+-		case SJA1000_ECC_STUFF:
+-			cf->data[2] |= CAN_ERR_PROT_STUFF;
+-			break;
+-		default:
+-			cf->data[3] = ecc & SJA1000_ECC_SEG;
+-			break;
++			switch (ecc & SJA1000_ECC_MASK) {
++			case SJA1000_ECC_BIT:
++				cf->data[2] |= CAN_ERR_PROT_BIT;
++				break;
++			case SJA1000_ECC_FORM:
++				cf->data[2] |= CAN_ERR_PROT_FORM;
++				break;
++			case SJA1000_ECC_STUFF:
++				cf->data[2] |= CAN_ERR_PROT_STUFF;
++				break;
++			default:
++				cf->data[3] = ecc & SJA1000_ECC_SEG;
++				break;
++			}
+ 		}
+ 
+ 		/* Error occurred during transmission? */
+-		if ((ecc & SJA1000_ECC_DIR) == 0)
+-			cf->data[2] |= CAN_ERR_PROT_TX;
++		if ((ecc & SJA1000_ECC_DIR) == 0) {
++			stats->tx_errors++;
++			if (skb)
++				cf->data[2] |= CAN_ERR_PROT_TX;
++		} else {
++			stats->rx_errors++;
++		}
+ 
+-		if (dev->can.state == CAN_STATE_ERROR_WARNING ||
+-		    dev->can.state == CAN_STATE_ERROR_PASSIVE) {
++		if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING ||
++			    dev->can.state == CAN_STATE_ERROR_PASSIVE)) {
+ 			cf->can_id |= CAN_ERR_CRTL;
+ 			cf->data[1] = (txerr > rxerr) ?
+ 			    CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
+ 		}
+ 	} else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
+-		cf->can_id |= CAN_ERR_CRTL;
+-		cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
++		if (skb) {
++			cf->can_id |= CAN_ERR_CRTL;
++			cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
++		}
+ 
+ 		stats->rx_over_errors++;
+ 		stats->rx_errors++;
+ 	}
+ 
+-	netif_rx(skb);
++	if (skb)
++		netif_rx(skb);
+ }
+ 
+ /*
+diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c
+index bc0c8903fe7794..e0cfa1460b0b83 100644
+--- a/drivers/net/can/usb/f81604.c
++++ b/drivers/net/can/usb/f81604.c
+@@ -526,7 +526,6 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
+ 		netdev_dbg(netdev, "bus error interrupt\n");
+ 
+ 		priv->can.can_stats.bus_error++;
+-		stats->rx_errors++;
+ 
+ 		if (skb) {
+ 			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+@@ -548,10 +547,15 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
+ 
+ 			/* set error location */
+ 			cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG;
++		}
+ 
+-			/* Error occurred during transmission? */
+-			if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0)
++		/* Error occurred during transmission? */
++		if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) {
++			stats->tx_errors++;
++			if (skb)
+ 				cf->data[2] |= CAN_ERR_PROT_TX;
++		} else {
++			stats->rx_errors++;
+ 		}
+ 
+ 		set_bit(F81604_CLEAR_ECC, &priv->clear_flags);
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index bc86e9b329fd10..b6f4de375df75d 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -43,9 +43,6 @@
+ #define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
+ #define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
+ 
+-#define GS_USB_ENDPOINT_IN 1
+-#define GS_USB_ENDPOINT_OUT 2
+-
+ /* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
+  * for timer overflow (will be after ~71 minutes)
+  */
+@@ -336,6 +333,9 @@ struct gs_usb {
+ 
+ 	unsigned int hf_size_rx;
+ 	u8 active_channels;
++
++	unsigned int pipe_in;
++	unsigned int pipe_out;
+ };
+ 
+ /* 'allocate' a tx context.
+@@ -687,7 +687,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 
+ resubmit_urb:
+ 	usb_fill_bulk_urb(urb, parent->udev,
+-			  usb_rcvbulkpipe(parent->udev, GS_USB_ENDPOINT_IN),
++			  parent->pipe_in,
+ 			  hf, dev->parent->hf_size_rx,
+ 			  gs_usb_receive_bulk_callback, parent);
+ 
+@@ -819,7 +819,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
+ 	}
+ 
+ 	usb_fill_bulk_urb(urb, dev->udev,
+-			  usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
++			  dev->parent->pipe_out,
+ 			  hf, dev->hf_size_tx,
+ 			  gs_usb_xmit_callback, txc);
+ 
+@@ -925,8 +925,7 @@ static int gs_can_open(struct net_device *netdev)
+ 			/* fill, anchor, and submit rx urb */
+ 			usb_fill_bulk_urb(urb,
+ 					  dev->udev,
+-					  usb_rcvbulkpipe(dev->udev,
+-							  GS_USB_ENDPOINT_IN),
++					  dev->parent->pipe_in,
+ 					  buf,
+ 					  dev->parent->hf_size_rx,
+ 					  gs_usb_receive_bulk_callback, parent);
+@@ -1413,6 +1412,7 @@ static int gs_usb_probe(struct usb_interface *intf,
+ 			const struct usb_device_id *id)
+ {
+ 	struct usb_device *udev = interface_to_usbdev(intf);
++	struct usb_endpoint_descriptor *ep_in, *ep_out;
+ 	struct gs_host_frame *hf;
+ 	struct gs_usb *parent;
+ 	struct gs_host_config hconf = {
+@@ -1422,6 +1422,13 @@ static int gs_usb_probe(struct usb_interface *intf,
+ 	unsigned int icount, i;
+ 	int rc;
+ 
++	rc = usb_find_common_endpoints(intf->cur_altsetting,
++				       &ep_in, &ep_out, NULL, NULL);
++	if (rc) {
++		dev_err(&intf->dev, "Required endpoints not found\n");
++		return rc;
++	}
++
+ 	/* send host config */
+ 	rc = usb_control_msg_send(udev, 0,
+ 				  GS_USB_BREQ_HOST_FORMAT,
+@@ -1466,6 +1473,10 @@ static int gs_usb_probe(struct usb_interface *intf,
+ 	usb_set_intfdata(intf, parent);
+ 	parent->udev = udev;
+ 
++	/* store the detected endpoints */
++	parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress);
++	parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress);
++
+ 	for (i = 0; i < icount; i++) {
+ 		unsigned int hf_size_rx = 0;
+ 
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index f8d8c70642c4ff..59b4a7240b5832 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -673,7 +673,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ 	 * We therefore need to lock the MDIO bus onto which the switch is
+ 	 * connected.
+ 	 */
+-	mutex_lock(&priv->bus->mdio_lock);
++	mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ 
+ 	/* Actually start the request:
+ 	 * 1. Send mdio master packet
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 20ba14eb87e00b..b901ecb57f2552 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1193,10 +1193,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+ 		}
+ 	}
+ 
+-	if (fltr->base.flags & BNXT_ACT_DROP)
++	if (fltr->base.flags & BNXT_ACT_DROP) {
+ 		fs->ring_cookie = RX_CLS_FLOW_DISC;
+-	else
++	} else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
++		fs->flow_type |= FLOW_RSS;
++		cmd->rss_context = fltr->base.fw_vnic_id;
++	} else {
+ 		fs->ring_cookie = fltr->base.rxq;
++	}
+ 	rc = 0;
+ 
+ fltr_err:
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index c09370eab319b2..16a7908c79f703 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -28,6 +28,9 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
+ static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
+ 					 u8 preemptible_tcs)
+ {
++	if (!(priv->si->hw_features & ENETC_SI_F_QBU))
++		return;
++
+ 	priv->preemptible_tcs = preemptible_tcs;
+ 	enetc_mm_commit_preemptible_tcs(priv);
+ }
+diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+index 39689826cc8ffc..ce253aac5344cc 100644
+--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
++++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+@@ -94,7 +94,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
+ 		goto out_free;
+ 	}
+ 
+-	snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
++	snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
+ 	bus->priv = priv;
+ 
+ 	bus->parent = dev;
+diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
+index d96028f01770cf..fb416d60dcd727 100644
+--- a/drivers/net/ethernet/freescale/fman/fman.c
++++ b/drivers/net/ethernet/freescale/fman/fman.c
+@@ -24,7 +24,6 @@
+ 
+ /* General defines */
+ #define FMAN_LIODN_TBL			64	/* size of LIODN table */
+-#define MAX_NUM_OF_MACS			10
+ #define FM_NUM_OF_FMAN_CTRL_EVENT_REGS	4
+ #define BASE_RX_PORTID			0x08
+ #define BASE_TX_PORTID			0x28
+diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
+index 2ea575a46675b0..74eb62eba0d7ff 100644
+--- a/drivers/net/ethernet/freescale/fman/fman.h
++++ b/drivers/net/ethernet/freescale/fman/fman.h
+@@ -74,6 +74,9 @@
+ #define BM_MAX_NUM_OF_POOLS		64 /* Buffers pools */
+ #define FMAN_PORT_MAX_EXT_POOLS_NUM	8  /* External BM pools per Rx port */
+ 
++/* General defines */
++#define MAX_NUM_OF_MACS			10
++
+ struct fman; /* FMan data */
+ 
+ /* Enum for defining port types */
+diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
+index 11da139082e1bf..1916a2ac48b9f1 100644
+--- a/drivers/net/ethernet/freescale/fman/mac.c
++++ b/drivers/net/ethernet/freescale/fman/mac.c
+@@ -259,6 +259,11 @@ static int mac_probe(struct platform_device *_of_dev)
+ 		err = -EINVAL;
+ 		goto _return_dev_put;
+ 	}
++	if (val >= MAX_NUM_OF_MACS) {
++		dev_err(dev, "cell-index value is too big for %pOF\n", mac_node);
++		err = -EINVAL;
++		goto _return_dev_put;
++	}
+ 	priv->cell_index = (u8)val;
+ 
+ 	/* Get the MAC address */
+diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+index 2e210a00355843..249b482e32d3bd 100644
+--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
++++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+@@ -123,7 +123,7 @@ static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
+ 	 * we get is an int, and the odds of multiple bitbang mdio buses
+ 	 * is low enough that it's not worth going too crazy.
+ 	 */
+-	snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
++	snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
+ 
+ 	data = of_get_property(np, "fsl,mdio-pin", &len);
+ 	if (!data || len != 4)
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
+index 009716a12a26af..f1324e25b2af1c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -542,7 +542,8 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ /**
+  * ice_find_netlist_node
+  * @hw: pointer to the hw struct
+- * @node_type_ctx: type of netlist node to look for
++ * @node_type: type of netlist node to look for
++ * @ctx: context of the search
+  * @node_part_number: node part number to look for
+  * @node_handle: output parameter if node found - optional
+  *
+@@ -552,10 +553,12 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+  * valid if the function returns zero, and should be ignored on any non-zero
+  * return value.
+  *
+- * Returns: 0 if the node is found, -ENOENT if no handle was found, and
+- * a negative error code on failure to access the AQ.
++ * Return:
++ * * 0 if the node is found,
++ * * -ENOENT if no handle was found,
++ * * negative error code on failure to access the AQ.
+  */
+-static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
++static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
+ 				 u8 node_part_number, u16 *node_handle)
+ {
+ 	u8 idx;
+@@ -566,8 +569,8 @@ static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+ 		int status;
+ 
+ 		cmd.addr.topo_params.node_type_ctx =
+-			FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
+-				   node_type_ctx);
++			FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) |
++			FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx);
+ 		cmd.addr.topo_params.index = idx;
+ 
+ 		status = ice_aq_get_netlist_node(hw, &cmd,
+@@ -2726,9 +2729,11 @@ bool ice_is_pf_c827(struct ice_hw *hw)
+  */
+ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
+ {
+-	if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
++	if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
++				  ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
+ 				  ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
+-	    ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
++	    ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
++				  ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
+ 				  ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
+ 		return false;
+ 
+@@ -2744,6 +2749,7 @@ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
+ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
+ {
+ 	if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
++				  ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
+ 				  ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
+ 				  NULL))
+ 		return false;
+@@ -2764,12 +2770,14 @@ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
+ bool ice_is_cgu_in_netlist(struct ice_hw *hw)
+ {
+ 	if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
++				   ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
+ 				   ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
+ 				   NULL)) {
+ 		hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
+ 		return true;
+ 	} else if (!ice_find_netlist_node(hw,
+ 					  ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
++					  ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
+ 					  ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
+ 					  NULL)) {
+ 		hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
+@@ -2788,6 +2796,7 @@ bool ice_is_cgu_in_netlist(struct ice_hw *hw)
+ bool ice_is_gps_in_netlist(struct ice_hw *hw)
+ {
+ 	if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
++				  ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
+ 				  ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
+ 		return false;
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index b1e7727b8677f9..8f2e758c394277 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -6361,10 +6361,12 @@ ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
+ 	int err = 0;
+ 
+ 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
+-	 * if either bit is set
++	 * if either bit is set. In switchdev mode Rx filtering should never be
++	 * enabled.
+ 	 */
+-	if (features &
+-	    (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
++	if ((features &
++	     (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
++	     !ice_is_eswitch_mode_switchdev(vsi->back))
+ 		err = vlan_ops->ena_rx_filtering(vsi);
+ 	else
+ 		err = vlan_ops->dis_rx_filtering(vsi);
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+index ec8db830ac73ae..3816e45b6ab44a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+@@ -1495,7 +1495,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
+ 	 * lower 8 bits in the low register, and the upper 32 bits in the high
+ 	 * register.
+ 	 */
+-	*tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
++	*tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) |
++		  FIELD_PREP(TS_PHY_LOW_M, lo);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+index 6cedc1a906afb6..4c8b8457134427 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+@@ -663,9 +663,8 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
+ #define TS_HIGH_M			0xFF
+ #define TS_HIGH_S			32
+ 
+-#define TS_PHY_LOW_M			0xFF
+-#define TS_PHY_HIGH_M			0xFFFFFFFF
+-#define TS_PHY_HIGH_S			8
++#define TS_PHY_LOW_M			GENMASK(7, 0)
++#define TS_PHY_HIGH_M			GENMASK_ULL(39, 8)
+ 
+ #define BYTES_PER_IDX_ADDR_L_U		8
+ #define BYTES_PER_IDX_ADDR_L		4
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index d4e6f0e104872d..60d15b3e6e2faa 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -2448,6 +2448,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
+ 			 * rest of the packet.
+ 			 */
+ 			tx_buf->type = LIBETH_SQE_EMPTY;
++			idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+ 
+ 			/* Adjust the DMA offset and the remaining size of the
+ 			 * fragment.  On the first iteration of this loop,
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index f1d0881687233e..18284a838e2424 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -637,6 +637,10 @@ static int __init igb_init_module(void)
+ 	dca_register_notify(&dca_notifier);
+ #endif
+ 	ret = pci_register_driver(&igb_driver);
++#ifdef CONFIG_IGB_DCA
++	if (ret)
++		dca_unregister_notify(&dca_notifier);
++#endif
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+index 6493abf189de5e..6639069ad52834 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+@@ -194,6 +194,8 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
+ 	dev_err(&adapter->pdev->dev, format, ## arg)
+ #define e_dev_notice(format, arg...) \
+ 	dev_notice(&adapter->pdev->dev, format, ## arg)
++#define e_dbg(msglvl, format, arg...) \
++	netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg)
+ #define e_info(msglvl, format, arg...) \
+ 	netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+ #define e_err(msglvl, format, arg...) \
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+index 14aa2ca51f70ec..81179c60af4e01 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+@@ -40,7 +40,7 @@
+ #define IXGBE_SFF_1GBASESX_CAPABLE		0x1
+ #define IXGBE_SFF_1GBASELX_CAPABLE		0x2
+ #define IXGBE_SFF_1GBASET_CAPABLE		0x8
+-#define IXGBE_SFF_BASEBX10_CAPABLE		0x64
++#define IXGBE_SFF_BASEBX10_CAPABLE		0x40
+ #define IXGBE_SFF_10GBASESR_CAPABLE		0x10
+ #define IXGBE_SFF_10GBASELR_CAPABLE		0x20
+ #define IXGBE_SFF_SOFT_RS_SELECT_MASK		0x8
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index e71715f5da2287..20415c1238ef8d 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -1047,7 +1047,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
+ 		break;
+ 	}
+ 
+-	e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
++	e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api);
+ 
+ 	return -1;
+ }
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+index 66cf17f1940820..f804b35d79c726 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+@@ -629,7 +629,6 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
+ 
+ 	switch (adapter->hw.api_version) {
+ 	case ixgbe_mbox_api_14:
+-	case ixgbe_mbox_api_15:
+ 		break;
+ 	default:
+ 		return;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index 878cbdbf5ec8b4..e7e01f3298efb0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -5,6 +5,7 @@
+ #include <net/nexthop.h>
+ #include <net/ip_tunnels.h>
+ #include "tc_tun_encap.h"
++#include "fs_core.h"
+ #include "en_tc.h"
+ #include "tc_tun.h"
+ #include "rep/tc.h"
+@@ -24,10 +25,18 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
+ 
+ 	route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
+ 
+-	if (!route_dev || !netif_is_ovs_master(route_dev) ||
+-	    attr->parse_attr->filter_dev == e->out_dev)
++	if (!route_dev || !netif_is_ovs_master(route_dev))
+ 		goto out;
+ 
++	if (priv->mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS &&
++	    mlx5e_eswitch_uplink_rep(attr->parse_attr->filter_dev) &&
++	    (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) {
++		mlx5_core_warn(priv->mdev,
++			       "Matching on external port with encap + fwd to table actions is not allowed for firmware steering\n");
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
+ 						MLX5E_TC_INT_PORT_EGRESS,
+ 						&attr->action, out_index);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 13a3fa8dc0cb09..c14bef83d84d0f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2652,11 +2652,11 @@ void mlx5e_trigger_napi_sched(struct napi_struct *napi)
+ 
+ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ 			      struct mlx5e_params *params,
+-			      struct mlx5e_channel_param *cparam,
+ 			      struct xsk_buff_pool *xsk_pool,
+ 			      struct mlx5e_channel **cp)
+ {
+ 	struct net_device *netdev = priv->netdev;
++	struct mlx5e_channel_param *cparam;
+ 	struct mlx5_core_dev *mdev;
+ 	struct mlx5e_xsk_param xsk;
+ 	struct mlx5e_channel *c;
+@@ -2678,8 +2678,15 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ 		return err;
+ 
+ 	c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+-	if (!c)
+-		return -ENOMEM;
++	cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
++	if (!c || !cparam) {
++		err = -ENOMEM;
++		goto err_free;
++	}
++
++	err = mlx5e_build_channel_param(mdev, params, cparam);
++	if (err)
++		goto err_free;
+ 
+ 	c->priv     = priv;
+ 	c->mdev     = mdev;
+@@ -2713,6 +2720,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ 
+ 	*cp = c;
+ 
++	kvfree(cparam);
+ 	return 0;
+ 
+ err_close_queues:
+@@ -2721,6 +2729,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ err_napi_del:
+ 	netif_napi_del(&c->napi);
+ 
++err_free:
++	kvfree(cparam);
+ 	kvfree(c);
+ 
+ 	return err;
+@@ -2779,20 +2789,14 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
+ int mlx5e_open_channels(struct mlx5e_priv *priv,
+ 			struct mlx5e_channels *chs)
+ {
+-	struct mlx5e_channel_param *cparam;
+ 	int err = -ENOMEM;
+ 	int i;
+ 
+ 	chs->num = chs->params.num_channels;
+ 
+ 	chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
+-	cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+-	if (!chs->c || !cparam)
+-		goto err_free;
+-
+-	err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
+-	if (err)
+-		goto err_free;
++	if (!chs->c)
++		goto err_out;
+ 
+ 	for (i = 0; i < chs->num; i++) {
+ 		struct xsk_buff_pool *xsk_pool = NULL;
+@@ -2800,7 +2804,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
+ 		if (chs->params.xdp_prog)
+ 			xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
+ 
+-		err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
++		err = mlx5e_open_channel(priv, i, &chs->params, xsk_pool, &chs->c[i]);
+ 		if (err)
+ 			goto err_close_channels;
+ 	}
+@@ -2818,7 +2822,6 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
+ 	}
+ 
+ 	mlx5e_health_channels_update(priv);
+-	kvfree(cparam);
+ 	return 0;
+ 
+ err_close_ptp:
+@@ -2829,9 +2832,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
+ 	for (i--; i >= 0; i--)
+ 		mlx5e_close_channel(chs->c[i]);
+ 
+-err_free:
+ 	kfree(chs->c);
+-	kvfree(cparam);
++err_out:
+ 	chs->num = 0;
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 6e4f8aaf8d2f21..2eabfcc247c6ae 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -3698,6 +3698,7 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev)
+ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
+ {
+ 	struct mlx5_flow_steering *steering;
++	char name[80];
+ 	int err = 0;
+ 
+ 	err = mlx5_init_fc_stats(dev);
+@@ -3722,10 +3723,12 @@ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
+ 	else
+ 		steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+ 
+-	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
++	snprintf(name, sizeof(name), "%s-mlx5_fs_fgs", dev_name(dev->device));
++	steering->fgs_cache = kmem_cache_create(name,
+ 						sizeof(struct mlx5_flow_group), 0,
+ 						0, NULL);
+-	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
++	snprintf(name, sizeof(name), "%s-mlx5_fs_ftes", dev_name(dev->device));
++	steering->ftes_cache = kmem_cache_create(name, sizeof(struct fs_fte), 0,
+ 						 0, NULL);
+ 	if (!steering->ftes_cache || !steering->fgs_cache) {
+ 		err = -ENOMEM;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
+index 601fad5fc54a39..ee4058bafe119b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
+@@ -39,6 +39,8 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ 		} else {
+ 			mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ 		}
++	} else {
++		kfree(mt->fc);
+ 	}
+ 
+ 	mlx5hws_match_template_destroy(mt);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
+index 6d443e6ee8d9e9..08be034bd1e16d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
+@@ -990,6 +990,7 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
+ 	for (i = 0; i < bwc_queues; i++) {
+ 		mutex_init(&ctx->bwc_send_queue_locks[i]);
+ 		lockdep_register_key(ctx->bwc_lock_class_keys + i);
++		lockdep_set_class(ctx->bwc_send_queue_locks + i, ctx->bwc_lock_class_keys + i);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+index 947500f8ed7142..7aa1a462a1035b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+@@ -67,7 +67,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
+ 
+ 		for (j = 0; j < block->instances_count; j++) {
+ 			const struct mlxsw_afk_element_info *elinfo;
+-			struct mlxsw_afk_element_inst *elinst;
++			const struct mlxsw_afk_element_inst *elinst;
+ 
+ 			elinst = &block->instances[j];
+ 			elinfo = &mlxsw_afk_element_infos[elinst->element];
+@@ -154,7 +154,7 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
+ 		const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
+ 
+ 		for (j = 0; j < block->instances_count; j++) {
+-			struct mlxsw_afk_element_inst *elinst;
++			const struct mlxsw_afk_element_inst *elinst;
+ 
+ 			elinst = &block->instances[j];
+ 			if (elinst->element == element) {
+@@ -386,7 +386,7 @@ mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
+ 	int i;
+ 
+ 	for (i = 0; i < block->instances_count; i++) {
+-		struct mlxsw_afk_element_inst *elinst;
++		const struct mlxsw_afk_element_inst *elinst;
+ 
+ 		elinst = &block->instances[i];
+ 		if (elinst->element == element)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+index 98a05598178b3b..5aa1afb3f2ca81 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+@@ -117,7 +117,7 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */
+ 
+ struct mlxsw_afk_block {
+ 	u16 encoding; /* block ID */
+-	struct mlxsw_afk_element_inst *instances;
++	const struct mlxsw_afk_element_inst *instances;
+ 	unsigned int instances_count;
+ 	bool high_entropy;
+ };
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+index eaad7860560271..1850a975b38044 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+@@ -7,7 +7,7 @@
+ #include "item.h"
+ #include "core_acl_flex_keys.h"
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
+ 	MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+@@ -15,7 +15,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
+ 	MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+@@ -23,27 +23,27 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+ 	MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ 	MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ 	MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
+ 	MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ 	MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
+@@ -51,35 +51,35 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ 	MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
+ 	MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
+ 	MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
+ };
+ 
+@@ -124,90 +124,90 @@ const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
+ 	.clear_block	= mlxsw_sp1_afk_clear_block,
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ 	MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ 	MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ 	MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ 	MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 8, -1, true), /* RX_ACL_SYSTEM_PORT */
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
+ 	MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
+ 	MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+ 	MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+ 	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
+ 	MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
+ 	MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16),
+ };
+@@ -319,16 +319,20 @@ const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
+ 	.clear_block	= mlxsw_sp2_afk_clear_block,
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12),
+ 	MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1b[] = {
++	MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
++};
++
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
+ };
+ 
+-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
++static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4),
+ 	MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+ };
+@@ -341,7 +345,7 @@ static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
+ 	MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ 	MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b),
+ 	MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+-	MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1),
++	MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x3F, mlxsw_sp_afk_element_info_ipv4_1b),
+ 	MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ 	MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b),
+ 	MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index c47266d1c7c279..b2d206dec70c8a 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -2439,6 +2439,7 @@ void mana_query_gf_stats(struct mana_port_context *apc)
+ 
+ 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
+ 			     sizeof(req), sizeof(resp));
++	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+ 	req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
+ 			STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
+ 			STATISTICS_FLAGS_HC_RX_BYTES |
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index 16e6bd4661433f..6218d9c2685546 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -3314,7 +3314,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
+ 	if (rc)
+ 		return rc;
+ 
+-	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
++	if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED))
++		rc = -EOPNOTSUPP;
++	else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ 		rc = -EINVAL;
+ 
+ 	return rc;
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 713a89bb21e93b..5ed2818bac257c 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4233,8 +4233,8 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
+ {
+ 	unsigned int padto = 0, len = skb->len;
+ 
+-	if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
+-	    rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
++	if (len < 128 + RTL_MIN_PATCH_LEN && rtl_skb_is_udp(skb) &&
++	    skb_transport_header_was_set(skb)) {
+ 		unsigned int trans_data_len = skb_tail_pointer(skb) -
+ 					      skb_transport_header(skb);
+ 
+@@ -4258,9 +4258,15 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
+ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
+ 					   struct sk_buff *skb)
+ {
+-	unsigned int padto;
++	unsigned int padto = 0;
+ 
+-	padto = rtl8125_quirk_udp_padto(tp, skb);
++	switch (tp->mac_version) {
++	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
++		padto = rtl8125_quirk_udp_padto(tp, skb);
++		break;
++	default:
++		break;
++	}
+ 
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_34:
+diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
+index 84fa911c78db55..fe0bf1d3217af2 100644
+--- a/drivers/net/ethernet/rocker/rocker_main.c
++++ b/drivers/net/ethernet/rocker/rocker_main.c
+@@ -2502,7 +2502,7 @@ static void rocker_carrier_init(const struct rocker_port *rocker_port)
+ 	u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
+ 	bool link_up;
+ 
+-	link_up = link_status & (1 << rocker_port->pport);
++	link_up = link_status & (1ULL << rocker_port->pport);
+ 	if (link_up)
+ 		netif_carrier_on(rocker_port->dev);
+ 	else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+index 93a78fd0737b6c..28fff6cab812e4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+@@ -44,6 +44,7 @@
+ #define GMAC_MDIO_DATA			0x00000204
+ #define GMAC_GPIO_STATUS		0x0000020C
+ #define GMAC_ARP_ADDR			0x00000210
++#define GMAC_EXT_CFG1			0x00000238
+ #define GMAC_ADDR_HIGH(reg)		(0x300 + reg * 8)
+ #define GMAC_ADDR_LOW(reg)		(0x304 + reg * 8)
+ #define GMAC_L3L4_CTRL(reg)		(0x900 + (reg) * 0x30)
+@@ -284,6 +285,10 @@ enum power_event {
+ #define GMAC_HW_FEAT_DVLAN		BIT(5)
+ #define GMAC_HW_FEAT_NRVF		GENMASK(2, 0)
+ 
++/* MAC extended config 1 */
++#define GMAC_CONFIG1_SAVE_EN		BIT(24)
++#define GMAC_CONFIG1_SPLM(v)		FIELD_PREP(GENMASK(9, 8), v)
++
+ /* GMAC GPIO Status reg */
+ #define GMAC_GPO0			BIT(16)
+ #define GMAC_GPO1			BIT(17)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+index 77b35abc6f6fa4..22a044d93e172f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+@@ -534,6 +534,11 @@ static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
+ 	value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ 	writel(value, ioaddr + GMAC_EXT_CONFIG);
+ 
++	value = readl(ioaddr + GMAC_EXT_CFG1);
++	value |= GMAC_CONFIG1_SPLM(1); /* Split mode set to L2OFST */
++	value |= GMAC_CONFIG1_SAVE_EN; /* Enable Split AV mode */
++	writel(value, ioaddr + GMAC_EXT_CFG1);
++
+ 	value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
+ 	if (en)
+ 		value |= DMA_CONTROL_SPH;
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 7f611c74eb629b..ba15a0a4ce629e 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -895,7 +895,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 		if (geneve->cfg.df == GENEVE_DF_SET) {
+ 			df = htons(IP_DF);
+ 		} else if (geneve->cfg.df == GENEVE_DF_INHERIT) {
+-			struct ethhdr *eth = eth_hdr(skb);
++			struct ethhdr *eth = skb_eth_hdr(skb);
+ 
+ 			if (ntohs(eth->h_proto) == ETH_P_IPV6) {
+ 				df = htons(IP_DF);
+diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
+index d3273bc0da4a1f..691969a4910f2b 100644
+--- a/drivers/net/phy/microchip.c
++++ b/drivers/net/phy/microchip.c
+@@ -351,6 +351,22 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
+ static void lan88xx_link_change_notify(struct phy_device *phydev)
+ {
+ 	int temp;
++	int ret;
++
++	/* Reset PHY to ensure MII_LPA provides up-to-date information. This
++	 * issue is reproducible only after parallel detection, as described
++	 * in IEEE 802.3-2022, Section 28.2.3.1 ("Parallel detection function"),
++	 * where the link partner does not support auto-negotiation.
++	 */
++	if (phydev->state == PHY_NOLINK) {
++		ret = phy_init_hw(phydev);
++		if (ret < 0)
++			goto link_change_notify_failed;
++
++		ret = _phy_start_aneg(phydev);
++		if (ret < 0)
++			goto link_change_notify_failed;
++	}
+ 
+ 	/* At forced 100 F/H mode, chip may fail to set mode correctly
+ 	 * when cable is switched between long(~50+m) and short one.
+@@ -377,6 +393,11 @@ static void lan88xx_link_change_notify(struct phy_device *phydev)
+ 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+ 		phy_write(phydev, LAN88XX_INT_MASK, temp);
+ 	}
++
++	return;
++
++link_change_notify_failed:
++	phydev_err(phydev, "Link change process failed %pe\n", ERR_PTR(ret));
+ }
+ 
+ /**
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index a5684ef5884bda..dcec92625cf651 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -466,7 +466,8 @@ static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
+ static const struct sfp_quirk sfp_quirks[] = {
+ 	// Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly
+ 	// report 2500MBd NRZ in their EEPROM
+-	SFP_QUIRK_M("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex),
++	SFP_QUIRK("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex,
++		  sfp_fixup_ignore_tx_fault),
+ 
+ 	// Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd
+ 	// NRZ in their EEPROM
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 53a038fcbe991d..c897afef0b414c 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -946,9 +946,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
+ 	void *buf, *head;
+ 	dma_addr_t addr;
+ 
+-	if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
+-		return NULL;
+-
+ 	head = page_address(alloc_frag->page);
+ 
+ 	if (rq->do_dma) {
+@@ -2443,6 +2440,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
+ 	len = SKB_DATA_ALIGN(len) +
+ 	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ 
++	if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
++		return -ENOMEM;
++
+ 	buf = virtnet_rq_alloc(rq, len, gfp);
+ 	if (unlikely(!buf))
+ 		return -ENOMEM;
+@@ -2545,6 +2545,12 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
+ 	 */
+ 	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
+ 
++	if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
++		return -ENOMEM;
++
++	if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size)
++		len -= sizeof(struct virtnet_rq_dma);
++
+ 	buf = virtnet_rq_alloc(rq, len + room, gfp);
+ 	if (unlikely(!buf))
+ 		return -ENOMEM;
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index 08a6f36a6be9cb..6805357ee29e6d 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -3,7 +3,7 @@
+  * Copyright (c) 2004-2011 Atheros Communications Inc.
+  * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
+  * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/module.h>
+@@ -2648,9 +2648,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
+ 
+ 	netif_napi_del(&ar->napi);
+ 
+-	ath10k_core_destroy(ar);
+-
+ 	destroy_workqueue(ar_sdio->workqueue);
++
++	ath10k_core_destroy(ar);
+ }
+ 
+ static const struct sdio_device_id ath10k_sdio_devices[] = {
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 6d0784a21558ea..8946141aa0dce6 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -8186,9 +8186,9 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ 				    arvif->vdev_id, ret);
+ 			goto out;
+ 		}
+-		ieee80211_iterate_stations_atomic(hw,
+-						  ath12k_mac_disable_peer_fixed_rate,
+-						  arvif);
++		ieee80211_iterate_stations_mtx(hw,
++					       ath12k_mac_disable_peer_fixed_rate,
++					       arvif);
+ 	} else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ 							  &single_nss)) {
+ 		rate = WMI_FIXED_RATE_NONE;
+@@ -8233,16 +8233,16 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ 			goto out;
+ 		}
+ 
+-		ieee80211_iterate_stations_atomic(hw,
+-						  ath12k_mac_disable_peer_fixed_rate,
+-						  arvif);
++		ieee80211_iterate_stations_mtx(hw,
++					       ath12k_mac_disable_peer_fixed_rate,
++					       arvif);
+ 
+ 		mutex_lock(&ar->conf_mutex);
+ 
+ 		arvif->bitrate_mask = *mask;
+-		ieee80211_iterate_stations_atomic(hw,
+-						  ath12k_mac_set_bitrate_mask_iter,
+-						  arvif);
++		ieee80211_iterate_stations_mtx(hw,
++					       ath12k_mac_set_bitrate_mask_iter,
++					       arvif);
+ 
+ 		mutex_unlock(&ar->conf_mutex);
+ 	}
+diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
+index b51fce5ae26020..f5ca2fe0d07490 100644
+--- a/drivers/net/wireless/ath/ath5k/pci.c
++++ b/drivers/net/wireless/ath/ath5k/pci.c
+@@ -46,6 +46,8 @@ static const struct pci_device_id ath5k_pci_id_table[] = {
+ 	{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
+ 	{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
+ 	{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
++	{ PCI_VDEVICE(ATHEROS, 0xff16) }, /* Gigaset SX76[23] AR241[34]A */
++	{ PCI_VDEVICE(ATHEROS, 0xff1a) }, /* Arcadyan ARV45XX AR2417 */
+ 	{ PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
+ 	{ 0 }
+ };
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+index d35262335eaf79..8a1e3376424487 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+@@ -770,7 +770,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
+ 
+ 	nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
+ 		      sdiodev->settings->bus.sdio.txglomsz);
+-	nents += (nents >> 4) + 1;
++	nents *= 2;
+ 
+ 	WARN_ON(nents > sdiodev->max_segment_count);
+ 
+diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+index 48d6870bbf4e25..9a97ab9b89ae8b 100644
+--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
++++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+@@ -870,8 +870,8 @@ void libipw_rx_any(struct libipw_device *ieee,
+ 	switch (ieee->iw_mode) {
+ 	case IW_MODE_ADHOC:
+ 		/* our BSS and not from/to DS */
+-		if (ether_addr_equal(hdr->addr3, ieee->bssid))
+-		if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
++		if (ether_addr_equal(hdr->addr3, ieee->bssid) &&
++		    ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == 0)) {
+ 			/* promisc: get all */
+ 			if (ieee->dev->flags & IFF_PROMISC)
+ 				is_packet_for_us = 1;
+@@ -885,8 +885,8 @@ void libipw_rx_any(struct libipw_device *ieee,
+ 		break;
+ 	case IW_MODE_INFRA:
+ 		/* our BSS (== from our AP) and from DS */
+-		if (ether_addr_equal(hdr->addr2, ieee->bssid))
+-		if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
++		if (ether_addr_equal(hdr->addr2, ieee->bssid) &&
++		    ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS)) {
+ 			/* promisc: get all */
+ 			if (ieee->dev->flags & IFF_PROMISC)
+ 				is_packet_for_us = 1;
+diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
+index 21d0754dd7f6ac..b67e551fcee3ef 100644
+--- a/drivers/net/wireless/realtek/rtw88/sdio.c
++++ b/drivers/net/wireless/realtek/rtw88/sdio.c
+@@ -1297,12 +1297,12 @@ static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev)
+ 	struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
+ 	int i;
+ 
+-	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
+-		skb_queue_purge(&rtwsdio->tx_queue[i]);
+-
+ 	flush_workqueue(rtwsdio->txwq);
+ 	destroy_workqueue(rtwsdio->txwq);
+ 	kfree(rtwsdio->tx_handler_data);
++
++	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
++		ieee80211_purge_tx_queue(rtwdev->hw, &rtwsdio->tx_queue[i]);
+ }
+ 
+ int rtw_sdio_probe(struct sdio_func *sdio_func,
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index b17a429bcd2994..07695294767acb 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -423,10 +423,11 @@ static void rtw_usb_tx_handler(struct work_struct *work)
+ 
+ static void rtw_usb_tx_queue_purge(struct rtw_usb *rtwusb)
+ {
++	struct rtw_dev *rtwdev = rtwusb->rtwdev;
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++)
+-		skb_queue_purge(&rtwusb->tx_queue[i]);
++		ieee80211_purge_tx_queue(rtwdev->hw, &rtwusb->tx_queue[i]);
+ }
+ 
+ static void rtw_usb_write_port_complete(struct urb *urb)
+@@ -888,9 +889,9 @@ static void rtw_usb_deinit_tx(struct rtw_dev *rtwdev)
+ {
+ 	struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ 
+-	rtw_usb_tx_queue_purge(rtwusb);
+ 	flush_workqueue(rtwusb->txwq);
+ 	destroy_workqueue(rtwusb->txwq);
++	rtw_usb_tx_queue_purge(rtwusb);
+ }
+ 
+ static int rtw_usb_intf_init(struct rtw_dev *rtwdev,
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index 13a7c39ceb6f55..e6bceef691e9be 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -6074,6 +6074,9 @@ static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
+ 
+ 		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
+ 					     NULL, 0, req->ie_len);
++		if (!skb)
++			return -ENOMEM;
++
+ 		skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
+ 		skb_put_data(skb, ies->common_ies, ies->common_ie_len);
+ 		hdr = (struct ieee80211_hdr *)skb->data;
+diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
+index 6b4922de30477e..37b743acbb7bad 100644
+--- a/drivers/nvdimm/dax_devs.c
++++ b/drivers/nvdimm/dax_devs.c
+@@ -106,12 +106,12 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
+ 
+ 	nvdimm_bus_lock(&ndns->dev);
+ 	nd_dax = nd_dax_alloc(nd_region);
+-	nd_pfn = &nd_dax->nd_pfn;
+-	dax_dev = nd_pfn_devinit(nd_pfn, ndns);
++	dax_dev = nd_dax_devinit(nd_dax, ndns);
+ 	nvdimm_bus_unlock(&ndns->dev);
+ 	if (!dax_dev)
+ 		return -ENOMEM;
+ 	pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
++	nd_pfn = &nd_dax->nd_pfn;
+ 	nd_pfn->pfn_sb = pfn_sb;
+ 	rc = nd_pfn_validate(nd_pfn, DAX_SIG);
+ 	dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
+diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
+index 2dbb1dca17b534..5ca06e9a2d2925 100644
+--- a/drivers/nvdimm/nd.h
++++ b/drivers/nvdimm/nd.h
+@@ -600,6 +600,13 @@ struct nd_dax *to_nd_dax(struct device *dev);
+ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
+ bool is_nd_dax(const struct device *dev);
+ struct device *nd_dax_create(struct nd_region *nd_region);
++static inline struct device *nd_dax_devinit(struct nd_dax *nd_dax,
++					    struct nd_namespace_common *ndns)
++{
++	if (!nd_dax)
++		return NULL;
++	return nd_pfn_devinit(&nd_dax->nd_pfn, ndns);
++}
+ #else
+ static inline int nd_dax_probe(struct device *dev,
+ 		struct nd_namespace_common *ndns)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f0d4c6f3cb0555..249914b90dbfa7 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1303,9 +1303,10 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
+ 	queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
+ }
+ 
+-static void nvme_keep_alive_finish(struct request *rq,
+-		blk_status_t status, struct nvme_ctrl *ctrl)
++static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
++						 blk_status_t status)
+ {
++	struct nvme_ctrl *ctrl = rq->end_io_data;
+ 	unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
+ 	unsigned long delay = nvme_keep_alive_work_period(ctrl);
+ 	enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+@@ -1322,17 +1323,20 @@ static void nvme_keep_alive_finish(struct request *rq,
+ 		delay = 0;
+ 	}
+ 
++	blk_mq_free_request(rq);
++
+ 	if (status) {
+ 		dev_err(ctrl->device,
+ 			"failed nvme_keep_alive_end_io error=%d\n",
+ 				status);
+-		return;
++		return RQ_END_IO_NONE;
+ 	}
+ 
+ 	ctrl->ka_last_check_time = jiffies;
+ 	ctrl->comp_seen = false;
+ 	if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING)
+ 		queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
++	return RQ_END_IO_NONE;
+ }
+ 
+ static void nvme_keep_alive_work(struct work_struct *work)
+@@ -1341,7 +1345,6 @@ static void nvme_keep_alive_work(struct work_struct *work)
+ 			struct nvme_ctrl, ka_work);
+ 	bool comp_seen = ctrl->comp_seen;
+ 	struct request *rq;
+-	blk_status_t status;
+ 
+ 	ctrl->ka_last_check_time = jiffies;
+ 
+@@ -1364,9 +1367,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
+ 	nvme_init_request(rq, &ctrl->ka_cmd);
+ 
+ 	rq->timeout = ctrl->kato * HZ;
+-	status = blk_execute_rq(rq, false);
+-	nvme_keep_alive_finish(rq, status, ctrl);
+-	blk_mq_free_request(rq);
++	rq->end_io = nvme_keep_alive_end_io;
++	rq->end_io_data = ctrl;
++	blk_execute_rq_nowait(rq, false);
+ }
+ 
+ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+@@ -2064,7 +2067,8 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
+ 	lim->physical_block_size = min(phys_bs, atomic_bs);
+ 	lim->io_min = phys_bs;
+ 	lim->io_opt = io_opt;
+-	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
++	if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
++	    (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
+ 		lim->max_write_zeroes_sectors = UINT_MAX;
+ 	else
+ 		lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
+@@ -3250,8 +3254,9 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
+ 	}
+ 
+ 	if (!ctrl->maxcmd) {
+-		dev_err(ctrl->device, "Maximum outstanding commands is 0\n");
+-		return -EINVAL;
++		dev_warn(ctrl->device,
++			"Firmware bug: maximum outstanding commands is 0\n");
++		ctrl->maxcmd = ctrl->sqsize + 1;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 24a2759798d01e..913e6e5a80705f 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1091,13 +1091,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
+ 	}
+ destroy_admin:
+ 	nvme_stop_keep_alive(&ctrl->ctrl);
+-	nvme_quiesce_admin_queue(&ctrl->ctrl);
+-	blk_sync_queue(ctrl->ctrl.admin_q);
+-	nvme_rdma_stop_queue(&ctrl->queues[0]);
+-	nvme_cancel_admin_tagset(&ctrl->ctrl);
+-	if (new)
+-		nvme_remove_admin_tag_set(&ctrl->ctrl);
+-	nvme_rdma_destroy_admin_queue(ctrl);
++	nvme_rdma_teardown_admin_queue(ctrl, new);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 3e416af2659f19..55abfe5e1d2548 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -2278,7 +2278,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
+ 	}
+ destroy_admin:
+ 	nvme_stop_keep_alive(ctrl);
+-	nvme_tcp_teardown_admin_queue(ctrl, false);
++	nvme_tcp_teardown_admin_queue(ctrl, new);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index b5447228696dc4..6483e1874477ef 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1830,6 +1830,7 @@ static const struct of_device_id qcom_pcie_match[] = {
+ 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
+ 	{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ 	{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
++	{ .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
+ 	{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
+ 	{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ 	{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
+diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
+index c9933ecf683382..0564fdce47c2a3 100644
+--- a/drivers/pci/controller/plda/pcie-starfive.c
++++ b/drivers/pci/controller/plda/pcie-starfive.c
+@@ -404,6 +404,9 @@ static int starfive_pcie_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
++	pm_runtime_enable(&pdev->dev);
++	pm_runtime_get_sync(&pdev->dev);
++
+ 	plda->host_ops = &sf_host_ops;
+ 	plda->num_events = PLDA_MAX_EVENT_NUM;
+ 	/* mask doorbell event */
+@@ -413,11 +416,12 @@ static int starfive_pcie_probe(struct platform_device *pdev)
+ 	plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS;
+ 	ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops,
+ 				  &stf_pcie_event);
+-	if (ret)
++	if (ret) {
++		pm_runtime_put_sync(&pdev->dev);
++		pm_runtime_disable(&pdev->dev);
+ 		return ret;
++	}
+ 
+-	pm_runtime_enable(&pdev->dev);
+-	pm_runtime_get_sync(&pdev->dev);
+ 	platform_set_drvdata(pdev, pcie);
+ 
+ 	return 0;
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index 264a180403a0ec..9d9596947350f5 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -740,11 +740,9 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
+ 	if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
+ 		return 0;
+ 
+-	pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+-
+ 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
+ 	if (!pos)
+-		return 0;
++		goto out_state_change;
+ 
+ 	/*
+ 	 * Skip if the max snoop LTR is non-zero, indicating BIOS has set it
+@@ -752,7 +750,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
+ 	 */
+ 	pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
+ 	if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
+-		return 0;
++		goto out_state_change;
+ 
+ 	/*
+ 	 * Set the default values to the maximum required by the platform to
+@@ -764,6 +762,13 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
+ 	pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
+ 	pci_info(pdev, "VMD: Default LTR value set by driver\n");
+ 
++out_state_change:
++	/*
++	 * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
++	 * PCIe r6.0, sec 5.5.4.
++	 */
++	pci_set_power_state_locked(pdev, PCI_D0);
++	pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+ 	return 0;
+ }
+ 
+@@ -1100,6 +1105,10 @@ static const struct pci_device_id vmd_ids[] = {
+ 		.driver_data = VMD_FEATS_CLIENT,},
+ 	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ 		.driver_data = VMD_FEATS_CLIENT,},
++	{PCI_VDEVICE(INTEL, 0xb60b),
++                .driver_data = VMD_FEATS_CLIENT,},
++	{PCI_VDEVICE(INTEL, 0xb06f),
++                .driver_data = VMD_FEATS_CLIENT,},
+ 	{0,}
+ };
+ MODULE_DEVICE_TABLE(pci, vmd_ids);
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 5d0f4db1cab786..3e5a117f5b5d60 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -521,6 +521,31 @@ static ssize_t bus_rescan_store(struct device *dev,
+ static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
+ 							    bus_rescan_store);
+ 
++static ssize_t reset_subordinate_store(struct device *dev,
++				struct device_attribute *attr,
++				const char *buf, size_t count)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++	struct pci_bus *bus = pdev->subordinate;
++	unsigned long val;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	if (kstrtoul(buf, 0, &val) < 0)
++		return -EINVAL;
++
++	if (val) {
++		int ret = __pci_reset_bus(bus);
++
++		if (ret)
++			return ret;
++	}
++
++	return count;
++}
++static DEVICE_ATTR_WO(reset_subordinate);
++
+ #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
+ static ssize_t d3cold_allowed_store(struct device *dev,
+ 				    struct device_attribute *attr,
+@@ -625,6 +650,7 @@ static struct attribute *pci_dev_attrs[] = {
+ static struct attribute *pci_bridge_attrs[] = {
+ 	&dev_attr_subordinate_bus_number.attr,
+ 	&dev_attr_secondary_bus_number.attr,
++	&dev_attr_reset_subordinate.attr,
+ 	NULL,
+ };
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 08f170fd3efb3e..dd3c6dcb47ae4a 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5885,7 +5885,7 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
+  *
+  * Same as above except return -EAGAIN if the bus cannot be locked
+  */
+-static int __pci_reset_bus(struct pci_bus *bus)
++int __pci_reset_bus(struct pci_bus *bus)
+ {
+ 	int rc;
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 14d00ce45bfa95..1cdc2c9547a7e1 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -104,6 +104,7 @@ bool pci_reset_supported(struct pci_dev *dev);
+ void pci_init_reset_methods(struct pci_dev *dev);
+ int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+ int pci_bus_error_reset(struct pci_dev *dev);
++int __pci_reset_bus(struct pci_bus *bus);
+ 
+ struct pci_cap_saved_data {
+ 	u16		cap_nr;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index f1615805f5b078..ebb0c1d5cae255 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1633,23 +1633,33 @@ static void set_pcie_thunderbolt(struct pci_dev *dev)
+ 
+ static void set_pcie_untrusted(struct pci_dev *dev)
+ {
+-	struct pci_dev *parent;
++	struct pci_dev *parent = pci_upstream_bridge(dev);
+ 
++	if (!parent)
++		return;
+ 	/*
+-	 * If the upstream bridge is untrusted we treat this device
++	 * If the upstream bridge is untrusted we treat this device as
+ 	 * untrusted as well.
+ 	 */
+-	parent = pci_upstream_bridge(dev);
+-	if (parent && (parent->untrusted || parent->external_facing))
++	if (parent->untrusted) {
++		dev->untrusted = true;
++		return;
++	}
++
++	if (arch_pci_dev_is_removable(dev)) {
++		pci_dbg(dev, "marking as untrusted\n");
+ 		dev->untrusted = true;
++	}
+ }
+ 
+ static void pci_set_removable(struct pci_dev *dev)
+ {
+ 	struct pci_dev *parent = pci_upstream_bridge(dev);
+ 
++	if (!parent)
++		return;
+ 	/*
+-	 * We (only) consider everything downstream from an external_facing
++	 * We (only) consider everything tunneled below an external_facing
+ 	 * device to be removable by the user. We're mainly concerned with
+ 	 * consumer platforms with user accessible thunderbolt ports that are
+ 	 * vulnerable to DMA attacks, and we expect those ports to be marked by
+@@ -1659,9 +1669,15 @@ static void pci_set_removable(struct pci_dev *dev)
+ 	 * accessible to user / may not be removed by end user, and thus not
+ 	 * exposed as "removable" to userspace.
+ 	 */
+-	if (parent &&
+-	    (parent->external_facing || dev_is_removable(&parent->dev)))
++	if (dev_is_removable(&parent->dev)) {
++		dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
++		return;
++	}
++
++	if (arch_pci_dev_is_removable(dev)) {
++		pci_dbg(dev, "marking as removable\n");
+ 		dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index dccb60c1d9cc3d..8103bc24a54ea4 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4996,18 +4996,21 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
+ }
+ 
+ /*
+- * Wangxun 10G/1G NICs have no ACS capability, and on multi-function
+- * devices, peer-to-peer transactions are not be used between the functions.
+- * So add an ACS quirk for below devices to isolate functions.
++ * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on
++ * multi-function devices, the hardware isolates the functions by
++ * directing all peer-to-peer traffic upstream as though PCI_ACS_RR and
++ * PCI_ACS_CR were set.
+  * SFxxx 1G NICs(em).
+  * RP1000/RP2000 10G NICs(sp).
++ * FF5xxx 40G/25G/10G NICs(aml).
+  */
+ static int  pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
+ {
+ 	switch (dev->device) {
+-	case 0x0100 ... 0x010F:
+-	case 0x1001:
+-	case 0x2001:
++	case 0x0100 ... 0x010F: /* EM */
++	case 0x1001: case 0x2001: /* SP */
++	case 0x5010: case 0x5025: case 0x5040: /* AML */
++	case 0x5110: case 0x5125: case 0x5140: /* AML */
+ 		return pci_acs_ctrl_enabled(acs_flags,
+ 			PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ 	}
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 4061890a174835..b3eec63c00ba04 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -220,6 +220,9 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
+ 
+ 	/* Set owner */
+ 	pindesc->pctldev = pctldev;
++#ifdef CONFIG_PINMUX
++	mutex_init(&pindesc->mux_lock);
++#endif
+ 
+ 	/* Copy basic pin info */
+ 	if (pin->name) {
+diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
+index 4e07707d2435bd..d6c24978e7081a 100644
+--- a/drivers/pinctrl/core.h
++++ b/drivers/pinctrl/core.h
+@@ -177,6 +177,7 @@ struct pin_desc {
+ 	const char *mux_owner;
+ 	const struct pinctrl_setting_mux *mux_setting;
+ 	const char *gpio_owner;
++	struct mutex mux_lock;
+ #endif
+ };
+ 
+diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
+index 3b59d71890045b..139bc0fb8a9dbf 100644
+--- a/drivers/pinctrl/freescale/Kconfig
++++ b/drivers/pinctrl/freescale/Kconfig
+@@ -20,7 +20,7 @@ config PINCTRL_IMX_SCMI
+ 
+ config PINCTRL_IMX_SCU
+ 	tristate
+-	depends on IMX_SCU
++	depends on IMX_SCU || COMPILE_TEST
+ 	select PINCTRL_IMX
+ 
+ config PINCTRL_IMX1_CORE
+diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
+index 02033ea1c64384..0743190da59e81 100644
+--- a/drivers/pinctrl/pinmux.c
++++ b/drivers/pinctrl/pinmux.c
+@@ -14,6 +14,7 @@
+ 
+ #include <linux/array_size.h>
+ #include <linux/ctype.h>
++#include <linux/cleanup.h>
+ #include <linux/debugfs.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+@@ -93,6 +94,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin)
+ 	if (!desc || !ops)
+ 		return true;
+ 
++	guard(mutex)(&desc->mux_lock);
+ 	if (ops->strict && desc->mux_usecount)
+ 		return false;
+ 
+@@ -127,29 +129,31 @@ static int pin_request(struct pinctrl_dev *pctldev,
+ 	dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n",
+ 		pin, desc->name, owner);
+ 
+-	if ((!gpio_range || ops->strict) &&
+-	    desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
+-		dev_err(pctldev->dev,
+-			"pin %s already requested by %s; cannot claim for %s\n",
+-			desc->name, desc->mux_owner, owner);
+-		goto out;
+-	}
++	scoped_guard(mutex, &desc->mux_lock) {
++		if ((!gpio_range || ops->strict) &&
++		    desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
++			dev_err(pctldev->dev,
++				"pin %s already requested by %s; cannot claim for %s\n",
++				desc->name, desc->mux_owner, owner);
++			goto out;
++		}
+ 
+-	if ((gpio_range || ops->strict) && desc->gpio_owner) {
+-		dev_err(pctldev->dev,
+-			"pin %s already requested by %s; cannot claim for %s\n",
+-			desc->name, desc->gpio_owner, owner);
+-		goto out;
+-	}
++		if ((gpio_range || ops->strict) && desc->gpio_owner) {
++			dev_err(pctldev->dev,
++				"pin %s already requested by %s; cannot claim for %s\n",
++				desc->name, desc->gpio_owner, owner);
++			goto out;
++		}
+ 
+-	if (gpio_range) {
+-		desc->gpio_owner = owner;
+-	} else {
+-		desc->mux_usecount++;
+-		if (desc->mux_usecount > 1)
+-			return 0;
++		if (gpio_range) {
++			desc->gpio_owner = owner;
++		} else {
++			desc->mux_usecount++;
++			if (desc->mux_usecount > 1)
++				return 0;
+ 
+-		desc->mux_owner = owner;
++			desc->mux_owner = owner;
++		}
+ 	}
+ 
+ 	/* Let each pin increase references to this module */
+@@ -178,12 +182,14 @@ static int pin_request(struct pinctrl_dev *pctldev,
+ 
+ out_free_pin:
+ 	if (status) {
+-		if (gpio_range) {
+-			desc->gpio_owner = NULL;
+-		} else {
+-			desc->mux_usecount--;
+-			if (!desc->mux_usecount)
+-				desc->mux_owner = NULL;
++		scoped_guard(mutex, &desc->mux_lock) {
++			if (gpio_range) {
++				desc->gpio_owner = NULL;
++			} else {
++				desc->mux_usecount--;
++				if (!desc->mux_usecount)
++					desc->mux_owner = NULL;
++			}
+ 		}
+ 	}
+ out:
+@@ -219,15 +225,17 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
+ 		return NULL;
+ 	}
+ 
+-	if (!gpio_range) {
+-		/*
+-		 * A pin should not be freed more times than allocated.
+-		 */
+-		if (WARN_ON(!desc->mux_usecount))
+-			return NULL;
+-		desc->mux_usecount--;
+-		if (desc->mux_usecount)
+-			return NULL;
++	scoped_guard(mutex, &desc->mux_lock) {
++		if (!gpio_range) {
++			/*
++			 * A pin should not be freed more times than allocated.
++			 */
++			if (WARN_ON(!desc->mux_usecount))
++				return NULL;
++			desc->mux_usecount--;
++			if (desc->mux_usecount)
++				return NULL;
++		}
+ 	}
+ 
+ 	/*
+@@ -239,13 +247,15 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
+ 	else if (ops->free)
+ 		ops->free(pctldev, pin);
+ 
+-	if (gpio_range) {
+-		owner = desc->gpio_owner;
+-		desc->gpio_owner = NULL;
+-	} else {
+-		owner = desc->mux_owner;
+-		desc->mux_owner = NULL;
+-		desc->mux_setting = NULL;
++	scoped_guard(mutex, &desc->mux_lock) {
++		if (gpio_range) {
++			owner = desc->gpio_owner;
++			desc->gpio_owner = NULL;
++		} else {
++			owner = desc->mux_owner;
++			desc->mux_owner = NULL;
++			desc->mux_setting = NULL;
++		}
+ 	}
+ 
+ 	module_put(pctldev->owner);
+@@ -458,7 +468,8 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
+ 				 pins[i]);
+ 			continue;
+ 		}
+-		desc->mux_setting = &(setting->data.mux);
++		scoped_guard(mutex, &desc->mux_lock)
++			desc->mux_setting = &(setting->data.mux);
+ 	}
+ 
+ 	ret = ops->set_mux(pctldev, setting->data.mux.func,
+@@ -472,8 +483,10 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
+ err_set_mux:
+ 	for (i = 0; i < num_pins; i++) {
+ 		desc = pin_desc_get(pctldev, pins[i]);
+-		if (desc)
+-			desc->mux_setting = NULL;
++		if (desc) {
++			scoped_guard(mutex, &desc->mux_lock)
++				desc->mux_setting = NULL;
++		}
+ 	}
+ err_pin_request:
+ 	/* On error release all taken pins */
+@@ -492,6 +505,7 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting)
+ 	unsigned int num_pins = 0;
+ 	int i;
+ 	struct pin_desc *desc;
++	bool is_equal;
+ 
+ 	if (pctlops->get_group_pins)
+ 		ret = pctlops->get_group_pins(pctldev, setting->data.mux.group,
+@@ -517,7 +531,10 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting)
+ 				 pins[i]);
+ 			continue;
+ 		}
+-		if (desc->mux_setting == &(setting->data.mux)) {
++		scoped_guard(mutex, &desc->mux_lock)
++			is_equal = (desc->mux_setting == &(setting->data.mux));
++
++		if (is_equal) {
+ 			pin_free(pctldev, pins[i], NULL);
+ 		} else {
+ 			const char *gname;
+@@ -608,40 +625,42 @@ static int pinmux_pins_show(struct seq_file *s, void *what)
+ 		if (desc == NULL)
+ 			continue;
+ 
+-		if (desc->mux_owner &&
+-		    !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev)))
+-			is_hog = true;
+-
+-		if (pmxops->strict) {
+-			if (desc->mux_owner)
+-				seq_printf(s, "pin %d (%s): device %s%s",
+-					   pin, desc->name, desc->mux_owner,
++		scoped_guard(mutex, &desc->mux_lock) {
++			if (desc->mux_owner &&
++			    !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev)))
++				is_hog = true;
++
++			if (pmxops->strict) {
++				if (desc->mux_owner)
++					seq_printf(s, "pin %d (%s): device %s%s",
++						   pin, desc->name, desc->mux_owner,
++						   is_hog ? " (HOG)" : "");
++				else if (desc->gpio_owner)
++					seq_printf(s, "pin %d (%s): GPIO %s",
++						   pin, desc->name, desc->gpio_owner);
++				else
++					seq_printf(s, "pin %d (%s): UNCLAIMED",
++						   pin, desc->name);
++			} else {
++				/* For non-strict controllers */
++				seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name,
++					   desc->mux_owner ? desc->mux_owner
++					   : "(MUX UNCLAIMED)",
++					   desc->gpio_owner ? desc->gpio_owner
++					   : "(GPIO UNCLAIMED)",
+ 					   is_hog ? " (HOG)" : "");
+-			else if (desc->gpio_owner)
+-				seq_printf(s, "pin %d (%s): GPIO %s",
+-					   pin, desc->name, desc->gpio_owner);
++			}
++
++			/* If mux: print function+group claiming the pin */
++			if (desc->mux_setting)
++				seq_printf(s, " function %s group %s\n",
++					   pmxops->get_function_name(pctldev,
++						desc->mux_setting->func),
++					   pctlops->get_group_name(pctldev,
++						desc->mux_setting->group));
+ 			else
+-				seq_printf(s, "pin %d (%s): UNCLAIMED",
+-					   pin, desc->name);
+-		} else {
+-			/* For non-strict controllers */
+-			seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name,
+-				   desc->mux_owner ? desc->mux_owner
+-				   : "(MUX UNCLAIMED)",
+-				   desc->gpio_owner ? desc->gpio_owner
+-				   : "(GPIO UNCLAIMED)",
+-				   is_hog ? " (HOG)" : "");
++				seq_putc(s, '\n');
+ 		}
+-
+-		/* If mux: print function+group claiming the pin */
+-		if (desc->mux_setting)
+-			seq_printf(s, " function %s group %s\n",
+-				   pmxops->get_function_name(pctldev,
+-					desc->mux_setting->func),
+-				   pctlops->get_group_name(pctldev,
+-					desc->mux_setting->group));
+-		else
+-			seq_putc(s, '\n');
+ 	}
+ 
+ 	mutex_unlock(&pctldev->mutex);
+diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+index a0eb4e01b3a755..1b7eecff3ffa43 100644
+--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+@@ -1226,6 +1226,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
+ 	{ .compatible = "qcom,pm8550ve-gpio", .data = (void *) 8 },
+ 	{ .compatible = "qcom,pm8550vs-gpio", .data = (void *) 6 },
+ 	{ .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
++	/* pm8937 has 8 GPIOs with holes on 3, 4 and 6 */
++	{ .compatible = "qcom,pm8937-gpio", .data = (void *) 8 },
+ 	{ .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
+ 	/* pm8950 has 8 GPIOs with holes on 3 */
+ 	{ .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
+diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+index d16ece90d926cf..5fa04e7c1d5c4d 100644
+--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
++++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+@@ -983,6 +983,7 @@ static const struct of_device_id pmic_mpp_of_match[] = {
+ 	{ .compatible = "qcom,pm8226-mpp", .data = (void *) 8 },
+ 	{ .compatible = "qcom,pm8841-mpp", .data = (void *) 4 },
+ 	{ .compatible = "qcom,pm8916-mpp", .data = (void *) 4 },
++	{ .compatible = "qcom,pm8937-mpp", .data = (void *) 4 },
+ 	{ .compatible = "qcom,pm8941-mpp", .data = (void *) 8 },
+ 	{ .compatible = "qcom,pm8950-mpp", .data = (void *) 4 },
+ 	{ .compatible = "qcom,pmi8950-mpp", .data = (void *) 4 },
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 89f5f44857d555..1101e5b2488e52 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -3696,7 +3696,6 @@ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
+ /* Throttle thermal policy ****************************************************/
+ static int throttle_thermal_policy_write(struct asus_wmi *asus)
+ {
+-	u32 retval;
+ 	u8 value;
+ 	int err;
+ 
+@@ -3718,8 +3717,8 @@ static int throttle_thermal_policy_write(struct asus_wmi *asus)
+ 		value = asus->throttle_thermal_policy_mode;
+ 	}
+ 
+-	err = asus_wmi_set_devstate(asus->throttle_thermal_policy_dev,
+-				    value, &retval);
++	/* Some machines do not return an error code as a result, so we ignore it */
++	err = asus_wmi_set_devstate(asus->throttle_thermal_policy_dev, value, NULL);
+ 
+ 	sysfs_notify(&asus->platform_device->dev.kobj, NULL,
+ 			"throttle_thermal_policy");
+@@ -3729,12 +3728,6 @@ static int throttle_thermal_policy_write(struct asus_wmi *asus)
+ 		return err;
+ 	}
+ 
+-	if (retval != 1) {
+-		pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
+-			retval);
+-		return -EIO;
+-	}
+-
+ 	/* Must set to disabled if mode is toggled */
+ 	if (asus->cpu_fan_curve_available)
+ 		asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
+diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
+index 29ad510e881c39..778ff187ac59e6 100644
+--- a/drivers/pmdomain/core.c
++++ b/drivers/pmdomain/core.c
+@@ -2171,8 +2171,24 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
+ 	}
+ 
+ 	genpd->gd = gd;
+-	return 0;
++	device_initialize(&genpd->dev);
++
++	if (!genpd_is_dev_name_fw(genpd)) {
++		dev_set_name(&genpd->dev, "%s", genpd->name);
++	} else {
++		ret = ida_alloc(&genpd_ida, GFP_KERNEL);
++		if (ret < 0)
++			goto put;
+ 
++		genpd->device_id = ret;
++		dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
++	}
++
++	return 0;
++put:
++	put_device(&genpd->dev);
++	if (genpd->free_states == genpd_free_default_power_state)
++		kfree(genpd->states);
+ free:
+ 	if (genpd_is_cpu_domain(genpd))
+ 		free_cpumask_var(genpd->cpus);
+@@ -2182,6 +2198,9 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
+ 
+ static void genpd_free_data(struct generic_pm_domain *genpd)
+ {
++	put_device(&genpd->dev);
++	if (genpd->device_id != -ENXIO)
++		ida_free(&genpd_ida, genpd->device_id);
+ 	if (genpd_is_cpu_domain(genpd))
+ 		free_cpumask_var(genpd->cpus);
+ 	if (genpd->free_states)
+@@ -2270,20 +2289,6 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
+ 	if (ret)
+ 		return ret;
+ 
+-	device_initialize(&genpd->dev);
+-
+-	if (!genpd_is_dev_name_fw(genpd)) {
+-		dev_set_name(&genpd->dev, "%s", genpd->name);
+-	} else {
+-		ret = ida_alloc(&genpd_ida, GFP_KERNEL);
+-		if (ret < 0) {
+-			put_device(&genpd->dev);
+-			return ret;
+-		}
+-		genpd->device_id = ret;
+-		dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
+-	}
+-
+ 	mutex_lock(&gpd_list_lock);
+ 	list_add(&genpd->gpd_list_node, &gpd_list);
+ 	mutex_unlock(&gpd_list_lock);
+@@ -2324,8 +2329,6 @@ static int genpd_remove(struct generic_pm_domain *genpd)
+ 	genpd_unlock(genpd);
+ 	genpd_debug_remove(genpd);
+ 	cancel_work_sync(&genpd->power_off_work);
+-	if (genpd->device_id != -ENXIO)
+-		ida_free(&genpd_ida, genpd->device_id);
+ 	genpd_free_data(genpd);
+ 
+ 	pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
+diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
+index 963d61c5af6d5e..3f0e6960f47fc2 100644
+--- a/drivers/pmdomain/imx/gpcv2.c
++++ b/drivers/pmdomain/imx/gpcv2.c
+@@ -403,7 +403,7 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
+ 		 * already reaches target before udelay()
+ 		 */
+ 		regmap_read_bypassed(domain->regmap, domain->regs->hsk, &reg_val);
+-		udelay(5);
++		udelay(10);
+ 	}
+ 
+ 	/* Disable reset clocks for all devices in the domain */
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index c56cd0f63909a2..77a36e7bddd54e 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -150,7 +150,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
+ 		if (ppb > ops->max_adj || ppb < -ops->max_adj)
+ 			return -ERANGE;
+ 		err = ops->adjfine(ops, tx->freq);
+-		ptp->dialed_frequency = tx->freq;
++		if (!err)
++			ptp->dialed_frequency = tx->freq;
+ 	} else if (tx->modes & ADJ_OFFSET) {
+ 		if (ops->adjphase) {
+ 			s32 max_phase_adj = ops->getmaxphase(ops);
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index 6c343b4b9d15a8..7870722b6ee21c 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -843,26 +843,15 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = {
+ 	.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+ };
+ 
+-static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = {
++static const struct rpmh_vreg_hw_data pmic5_ftsmps525 = {
+ 	.regulator_type = VRM,
+ 	.ops = &rpmh_regulator_vrm_ops,
+ 	.voltage_ranges = (struct linear_range[]) {
+ 		REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000),
++		REGULATOR_LINEAR_RANGE(1376000, 268, 438, 8000),
+ 	},
+-	.n_linear_ranges = 1,
+-	.n_voltages = 268,
+-	.pmic_mode_map = pmic_mode_map_pmic5_smps,
+-	.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+-};
+-
+-static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = {
+-	.regulator_type = VRM,
+-	.ops = &rpmh_regulator_vrm_ops,
+-	.voltage_ranges = (struct linear_range[]) {
+-		REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000),
+-	},
+-	.n_linear_ranges = 1,
+-	.n_voltages = 268,
++	.n_linear_ranges = 2,
++	.n_voltages = 439,
+ 	.pmic_mode_map = pmic_mode_map_pmic5_smps,
+ 	.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+ };
+@@ -1190,12 +1179,12 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
+ };
+ 
+ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = {
+-	RPMH_VREG("smps1",  "smp%s1",  &pmic5_ftsmps525_lv, "vdd-s1"),
+-	RPMH_VREG("smps2",  "smp%s2",  &pmic5_ftsmps525_lv, "vdd-s2"),
+-	RPMH_VREG("smps3",  "smp%s3",  &pmic5_ftsmps525_lv, "vdd-s3"),
+-	RPMH_VREG("smps4",  "smp%s4",  &pmic5_ftsmps525_lv, "vdd-s4"),
+-	RPMH_VREG("smps5",  "smp%s5",  &pmic5_ftsmps525_lv, "vdd-s5"),
+-	RPMH_VREG("smps6",  "smp%s6",  &pmic5_ftsmps525_mv, "vdd-s6"),
++	RPMH_VREG("smps1",  "smp%s1",  &pmic5_ftsmps525, "vdd-s1"),
++	RPMH_VREG("smps2",  "smp%s2",  &pmic5_ftsmps525, "vdd-s2"),
++	RPMH_VREG("smps3",  "smp%s3",  &pmic5_ftsmps525, "vdd-s3"),
++	RPMH_VREG("smps4",  "smp%s4",  &pmic5_ftsmps525, "vdd-s4"),
++	RPMH_VREG("smps5",  "smp%s5",  &pmic5_ftsmps525, "vdd-s5"),
++	RPMH_VREG("smps6",  "smp%s6",  &pmic5_ftsmps525, "vdd-s6"),
+ 	RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo515,   "vdd-l1"),
+ 	RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_nldo515,   "vdd-l2"),
+ 	RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo515,   "vdd-l3"),
+@@ -1203,14 +1192,14 @@ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = {
+ };
+ 
+ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
+-	RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
+-	RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
+-	RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
+-	RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
+-	RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
+-	RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
+-	RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
+-	RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"),
++	RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"),
++	RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"),
++	RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"),
++	RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"),
++	RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"),
++	RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"),
++	RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"),
++	RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"),
+ 	RPMH_VREG("ldo1",  "ldo%s1", &pmic5_nldo515,   "vdd-l1"),
+ 	RPMH_VREG("ldo2",  "ldo%s2", &pmic5_nldo515,   "vdd-l2"),
+ 	RPMH_VREG("ldo3",  "ldo%s3", &pmic5_nldo515,   "vdd-l3"),
+@@ -1218,14 +1207,14 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
+ };
+ 
+ static const struct rpmh_vreg_init_data pmc8380_vreg_data[] = {
+-	RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
+-	RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
+-	RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
+-	RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
+-	RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
+-	RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
+-	RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
+-	RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"),
++	RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"),
++	RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"),
++	RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"),
++	RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"),
++	RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"),
++	RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"),
++	RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"),
++	RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"),
+ 	RPMH_VREG("ldo1",  "ldo%s1", &pmic5_nldo515,   "vdd-l1"),
+ 	RPMH_VREG("ldo2",  "ldo%s2", &pmic5_nldo515,   "vdd-l2"),
+ 	RPMH_VREG("ldo3",  "ldo%s3", &pmic5_nldo515,   "vdd-l3"),
+@@ -1409,16 +1398,16 @@ static const struct rpmh_vreg_init_data pmx65_vreg_data[] = {
+ };
+ 
+ static const struct rpmh_vreg_init_data pmx75_vreg_data[] = {
+-	RPMH_VREG("smps1",   "smp%s1",    &pmic5_ftsmps525_lv, "vdd-s1"),
+-	RPMH_VREG("smps2",   "smp%s2",    &pmic5_ftsmps525_lv, "vdd-s2"),
+-	RPMH_VREG("smps3",   "smp%s3",    &pmic5_ftsmps525_lv, "vdd-s3"),
+-	RPMH_VREG("smps4",   "smp%s4",    &pmic5_ftsmps525_mv, "vdd-s4"),
+-	RPMH_VREG("smps5",   "smp%s5",    &pmic5_ftsmps525_lv, "vdd-s5"),
+-	RPMH_VREG("smps6",   "smp%s6",    &pmic5_ftsmps525_lv, "vdd-s6"),
+-	RPMH_VREG("smps7",   "smp%s7",    &pmic5_ftsmps525_lv, "vdd-s7"),
+-	RPMH_VREG("smps8",   "smp%s8",    &pmic5_ftsmps525_lv, "vdd-s8"),
+-	RPMH_VREG("smps9",   "smp%s9",    &pmic5_ftsmps525_lv, "vdd-s9"),
+-	RPMH_VREG("smps10",  "smp%s10",   &pmic5_ftsmps525_lv, "vdd-s10"),
++	RPMH_VREG("smps1",   "smp%s1",    &pmic5_ftsmps525, "vdd-s1"),
++	RPMH_VREG("smps2",   "smp%s2",    &pmic5_ftsmps525, "vdd-s2"),
++	RPMH_VREG("smps3",   "smp%s3",    &pmic5_ftsmps525, "vdd-s3"),
++	RPMH_VREG("smps4",   "smp%s4",    &pmic5_ftsmps525, "vdd-s4"),
++	RPMH_VREG("smps5",   "smp%s5",    &pmic5_ftsmps525, "vdd-s5"),
++	RPMH_VREG("smps6",   "smp%s6",    &pmic5_ftsmps525, "vdd-s6"),
++	RPMH_VREG("smps7",   "smp%s7",    &pmic5_ftsmps525, "vdd-s7"),
++	RPMH_VREG("smps8",   "smp%s8",    &pmic5_ftsmps525, "vdd-s8"),
++	RPMH_VREG("smps9",   "smp%s9",    &pmic5_ftsmps525, "vdd-s9"),
++	RPMH_VREG("smps10",  "smp%s10",   &pmic5_ftsmps525, "vdd-s10"),
+ 	RPMH_VREG("ldo1",    "ldo%s1",    &pmic5_nldo515,   "vdd-l1"),
+ 	RPMH_VREG("ldo2",    "ldo%s2",    &pmic5_nldo515,   "vdd-l2-18"),
+ 	RPMH_VREG("ldo3",    "ldo%s3",    &pmic5_nldo515,   "vdd-l3"),
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
+index 793b1d274be33a..1a2d08ec9de9ef 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -1433,6 +1433,7 @@ static const struct of_device_id adsp_of_match[] = {
+ 	{ .compatible = "qcom,sa8775p-cdsp1-pas", .data = &sa8775p_cdsp1_resource},
+ 	{ .compatible = "qcom,sa8775p-gpdsp0-pas", .data = &sa8775p_gpdsp0_resource},
+ 	{ .compatible = "qcom,sa8775p-gpdsp1-pas", .data = &sa8775p_gpdsp1_resource},
++	{ .compatible = "qcom,sar2130p-adsp-pas", .data = &sm8350_adsp_resource},
+ 	{ .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
+ 	{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
+ 	{ .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource},
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 35dca2accbb8df..5849d2970bba45 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -645,18 +645,17 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val,
+ 	unsigned char *buf = val;
+ 
+ 	off += NVRAM_OFFSET;
+-	spin_lock_irq(&rtc_lock);
+-	for (; count; count--, off++) {
++	for (; count; count--, off++, buf++) {
++		guard(spinlock_irq)(&rtc_lock);
+ 		if (off < 128)
+-			*buf++ = CMOS_READ(off);
++			*buf = CMOS_READ(off);
+ 		else if (can_bank2)
+-			*buf++ = cmos_read_bank2(off);
++			*buf = cmos_read_bank2(off);
+ 		else
+-			break;
++			return -EIO;
+ 	}
+-	spin_unlock_irq(&rtc_lock);
+ 
+-	return count ? -EIO : 0;
++	return 0;
+ }
+ 
+ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+@@ -671,23 +670,23 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+ 	 * NVRAM to update, updating checksums is also part of its job.
+ 	 */
+ 	off += NVRAM_OFFSET;
+-	spin_lock_irq(&rtc_lock);
+-	for (; count; count--, off++) {
++	for (; count; count--, off++, buf++) {
+ 		/* don't trash RTC registers */
+ 		if (off == cmos->day_alrm
+ 				|| off == cmos->mon_alrm
+ 				|| off == cmos->century)
+-			buf++;
+-		else if (off < 128)
+-			CMOS_WRITE(*buf++, off);
++			continue;
++
++		guard(spinlock_irq)(&rtc_lock);
++		if (off < 128)
++			CMOS_WRITE(*buf, off);
+ 		else if (can_bank2)
+-			cmos_write_bank2(*buf++, off);
++			cmos_write_bank2(*buf, off);
+ 		else
+-			break;
++			return -EIO;
+ 	}
+-	spin_unlock_irq(&rtc_lock);
+ 
+-	return count ? -EIO : 0;
++	return 0;
+ }
+ 
+ /*----------------------------------------------------------------*/
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 4cd3a3eab6f1c4..cd394d8c9f07f0 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -2493,6 +2493,7 @@ static int complete_v3_hw(struct hisi_sas_cq *cq)
+ 	/* update rd_point */
+ 	cq->rd_point = rd_point;
+ 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
++	cond_resched();
+ 
+ 	return completed;
+ }
+@@ -3550,6 +3551,11 @@ debugfs_to_reg_name_v3_hw(int off, int base_off,
+ 	return NULL;
+ }
+ 
++static bool debugfs_dump_is_generated_v3_hw(void *p)
++{
++	return p ? true : false;
++}
++
+ static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
+ 				    const struct hisi_sas_debugfs_reg *reg)
+ {
+@@ -3575,6 +3581,9 @@ static int debugfs_global_v3_hw_show(struct seq_file *s, void *p)
+ {
+ 	struct hisi_sas_debugfs_regs *global = s->private;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(global->data))
++		return -EPERM;
++
+ 	debugfs_print_reg_v3_hw(global->data, s,
+ 				&debugfs_global_reg);
+ 
+@@ -3586,6 +3595,9 @@ static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p)
+ {
+ 	struct hisi_sas_debugfs_regs *axi = s->private;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(axi->data))
++		return -EPERM;
++
+ 	debugfs_print_reg_v3_hw(axi->data, s,
+ 				&debugfs_axi_reg);
+ 
+@@ -3597,6 +3609,9 @@ static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p)
+ {
+ 	struct hisi_sas_debugfs_regs *ras = s->private;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(ras->data))
++		return -EPERM;
++
+ 	debugfs_print_reg_v3_hw(ras->data, s,
+ 				&debugfs_ras_reg);
+ 
+@@ -3609,6 +3624,9 @@ static int debugfs_port_v3_hw_show(struct seq_file *s, void *p)
+ 	struct hisi_sas_debugfs_port *port = s->private;
+ 	const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(port->data))
++		return -EPERM;
++
+ 	debugfs_print_reg_v3_hw(port->data, s, reg_port);
+ 
+ 	return 0;
+@@ -3664,6 +3682,9 @@ static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p)
+ 	struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
+ 	int slot;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(debugfs_cq->complete_hdr))
++		return -EPERM;
++
+ 	for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
+ 		debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq);
+ 
+@@ -3685,8 +3706,12 @@ static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot,
+ 
+ static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p)
+ {
++	struct hisi_sas_debugfs_dq *debugfs_dq = s->private;
+ 	int slot;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(debugfs_dq->hdr))
++		return -EPERM;
++
+ 	for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
+ 		debugfs_dq_show_slot_v3_hw(s, slot, s->private);
+ 
+@@ -3700,6 +3725,9 @@ static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p)
+ 	struct hisi_sas_iost *iost = debugfs_iost->iost;
+ 	int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(iost))
++		return -EPERM;
++
+ 	for (i = 0; i < max_command_entries; i++, iost++) {
+ 		__le64 *data = &iost->qw0;
+ 
+@@ -3719,6 +3747,9 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p)
+ 	int i, tab_idx;
+ 	__le64 *iost;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(iost_cache))
++		return -EPERM;
++
+ 	for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
+ 		/*
+ 		 * Data struct of IOST cache:
+@@ -3742,6 +3773,9 @@ static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p)
+ 	struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
+ 	struct hisi_sas_itct *itct = debugfs_itct->itct;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(itct))
++		return -EPERM;
++
+ 	for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ 		__le64 *data = &itct->qw0;
+ 
+@@ -3761,6 +3795,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
+ 	int i, tab_idx;
+ 	__le64 *itct;
+ 
++	if (!debugfs_dump_is_generated_v3_hw(itct_cache))
++		return -EPERM;
++
+ 	for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
+ 		/*
+ 		 * Data struct of ITCT cache:
+@@ -3778,10 +3815,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
+ }
+ DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw);
+ 
+-static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
++static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
+ {
+ 	u64 *debugfs_timestamp;
+-	int dump_index = hisi_hba->debugfs_dump_index;
+ 	struct dentry *dump_dentry;
+ 	struct dentry *dentry;
+ 	char name[256];
+@@ -3789,17 +3825,17 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+ 	int c;
+ 	int d;
+ 
+-	snprintf(name, 256, "%d", dump_index);
++	snprintf(name, 256, "%d", index);
+ 
+ 	dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
+ 
+-	debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
++	debugfs_timestamp = &hisi_hba->debugfs_timestamp[index];
+ 
+ 	debugfs_create_u64("timestamp", 0400, dump_dentry,
+ 			   debugfs_timestamp);
+ 
+ 	debugfs_create_file("global", 0400, dump_dentry,
+-			    &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
++			    &hisi_hba->debugfs_regs[index][DEBUGFS_GLOBAL],
+ 			    &debugfs_global_v3_hw_fops);
+ 
+ 	/* Create port dir and files */
+@@ -3808,7 +3844,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+ 		snprintf(name, 256, "%d", p);
+ 
+ 		debugfs_create_file(name, 0400, dentry,
+-				    &hisi_hba->debugfs_port_reg[dump_index][p],
++				    &hisi_hba->debugfs_port_reg[index][p],
+ 				    &debugfs_port_v3_hw_fops);
+ 	}
+ 
+@@ -3818,7 +3854,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+ 		snprintf(name, 256, "%d", c);
+ 
+ 		debugfs_create_file(name, 0400, dentry,
+-				    &hisi_hba->debugfs_cq[dump_index][c],
++				    &hisi_hba->debugfs_cq[index][c],
+ 				    &debugfs_cq_v3_hw_fops);
+ 	}
+ 
+@@ -3828,32 +3864,32 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+ 		snprintf(name, 256, "%d", d);
+ 
+ 		debugfs_create_file(name, 0400, dentry,
+-				    &hisi_hba->debugfs_dq[dump_index][d],
++				    &hisi_hba->debugfs_dq[index][d],
+ 				    &debugfs_dq_v3_hw_fops);
+ 	}
+ 
+ 	debugfs_create_file("iost", 0400, dump_dentry,
+-			    &hisi_hba->debugfs_iost[dump_index],
++			    &hisi_hba->debugfs_iost[index],
+ 			    &debugfs_iost_v3_hw_fops);
+ 
+ 	debugfs_create_file("iost_cache", 0400, dump_dentry,
+-			    &hisi_hba->debugfs_iost_cache[dump_index],
++			    &hisi_hba->debugfs_iost_cache[index],
+ 			    &debugfs_iost_cache_v3_hw_fops);
+ 
+ 	debugfs_create_file("itct", 0400, dump_dentry,
+-			    &hisi_hba->debugfs_itct[dump_index],
++			    &hisi_hba->debugfs_itct[index],
+ 			    &debugfs_itct_v3_hw_fops);
+ 
+ 	debugfs_create_file("itct_cache", 0400, dump_dentry,
+-			    &hisi_hba->debugfs_itct_cache[dump_index],
++			    &hisi_hba->debugfs_itct_cache[index],
+ 			    &debugfs_itct_cache_v3_hw_fops);
+ 
+ 	debugfs_create_file("axi", 0400, dump_dentry,
+-			    &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
++			    &hisi_hba->debugfs_regs[index][DEBUGFS_AXI],
+ 			    &debugfs_axi_v3_hw_fops);
+ 
+ 	debugfs_create_file("ras", 0400, dump_dentry,
+-			    &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
++			    &hisi_hba->debugfs_regs[index][DEBUGFS_RAS],
+ 			    &debugfs_ras_v3_hw_fops);
+ }
+ 
+@@ -4516,22 +4552,34 @@ static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
+ 	int i;
+ 
+ 	devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
++	hisi_hba->debugfs_iost_cache[dump_index].cache = NULL;
+ 	devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
++	hisi_hba->debugfs_itct_cache[dump_index].cache = NULL;
+ 	devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
++	hisi_hba->debugfs_iost[dump_index].iost = NULL;
+ 	devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
++	hisi_hba->debugfs_itct[dump_index].itct = NULL;
+ 
+-	for (i = 0; i < hisi_hba->queue_count; i++)
++	for (i = 0; i < hisi_hba->queue_count; i++) {
+ 		devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
++		hisi_hba->debugfs_dq[dump_index][i].hdr = NULL;
++	}
+ 
+-	for (i = 0; i < hisi_hba->queue_count; i++)
++	for (i = 0; i < hisi_hba->queue_count; i++) {
+ 		devm_kfree(dev,
+ 			   hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
++		hisi_hba->debugfs_cq[dump_index][i].complete_hdr = NULL;
++	}
+ 
+-	for (i = 0; i < DEBUGFS_REGS_NUM; i++)
++	for (i = 0; i < DEBUGFS_REGS_NUM; i++) {
+ 		devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
++		hisi_hba->debugfs_regs[dump_index][i].data = NULL;
++	}
+ 
+-	for (i = 0; i < hisi_hba->n_phy; i++)
++	for (i = 0; i < hisi_hba->n_phy; i++) {
+ 		devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
++		hisi_hba->debugfs_port_reg[dump_index][i].data = NULL;
++	}
+ }
+ 
+ static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = {
+@@ -4658,8 +4706,6 @@ static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
+ 	debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
+ 	debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
+ 
+-	debugfs_create_files_v3_hw(hisi_hba);
+-
+ 	debugfs_snapshot_restore_v3_hw(hisi_hba);
+ 	hisi_hba->debugfs_dump_index++;
+ 
+@@ -4743,6 +4789,17 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+ 	hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ }
+ 
++static void debugfs_dump_init_v3_hw(struct hisi_hba *hisi_hba)
++{
++	int i;
++
++	hisi_hba->debugfs_dump_dentry =
++			debugfs_create_dir("dump", hisi_hba->debugfs_dir);
++
++	for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
++		debugfs_create_files_v3_hw(hisi_hba, i);
++}
++
+ static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ 	debugfs_remove_recursive(hisi_hba->debugfs_dir);
+@@ -4763,8 +4820,7 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ 	/* create bist structures */
+ 	debugfs_bist_init_v3_hw(hisi_hba);
+ 
+-	hisi_hba->debugfs_dump_dentry =
+-			debugfs_create_dir("dump", hisi_hba->debugfs_dir);
++	debugfs_dump_init_v3_hw(hisi_hba);
+ 
+ 	debugfs_phy_down_cnt_init_v3_hw(hisi_hba);
+ 	debugfs_fifo_init_v3_hw(hisi_hba);
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index 134bc96dd13400..ce3a1f42713dd8 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -2226,6 +2226,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		ulp_status, ulp_word4, latt);
+ 
+ 	if (latt || ulp_status) {
++		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
++				 "0229 FDMI cmd %04x failed, latt = %d "
++				 "ulp_status: (x%x/x%x), sli_flag x%x\n",
++				 be16_to_cpu(fdmi_cmd), latt, ulp_status,
++				 ulp_word4, phba->sli.sli_flag);
+ 
+ 		/* Look for a retryable error */
+ 		if (ulp_status == IOSTAT_LOCAL_REJECT) {
+@@ -2234,8 +2239,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 			case IOERR_SLI_DOWN:
+ 				/* Driver aborted this IO.  No retry as error
+ 				 * is likely Offline->Online or some adapter
+-				 * error.  Recovery will try again.
++				 * error.  Recovery will try again, but if port
++				 * is not active there's no point to continue
++				 * issuing follow up FDMI commands.
+ 				 */
++				if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
++					free_ndlp = cmdiocb->ndlp;
++					lpfc_ct_free_iocb(phba, cmdiocb);
++					lpfc_nlp_put(free_ndlp);
++					return;
++				}
+ 				break;
+ 			case IOERR_ABORT_IN_PROGRESS:
+ 			case IOERR_SEQUENCE_TIMEOUT:
+@@ -2256,12 +2269,6 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 				break;
+ 			}
+ 		}
+-
+-		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+-				 "0229 FDMI cmd %04x latt = %d "
+-				 "ulp_status: x%x, rid x%x\n",
+-				 be16_to_cpu(fdmi_cmd), latt, ulp_status,
+-				 ulp_word4);
+ 	}
+ 
+ 	free_ndlp = cmdiocb->ndlp;
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 9241075f72fa4b..6e8d8a96c54fb3 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -155,6 +155,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 	struct lpfc_hba   *phba;
+ 	struct lpfc_work_evt *evtp;
+ 	unsigned long iflags;
++	bool nvme_reg = false;
+ 
+ 	ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
+ 	if (!ndlp)
+@@ -177,38 +178,49 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 	/* Don't schedule a worker thread event if the vport is going down. */
+ 	if (test_bit(FC_UNLOADING, &vport->load_flag) ||
+ 	    !test_bit(HBA_SETUP, &phba->hba_flag)) {
++
+ 		spin_lock_irqsave(&ndlp->lock, iflags);
+ 		ndlp->rport = NULL;
+ 
++		if (ndlp->fc4_xpt_flags & NVME_XPT_REGD)
++			nvme_reg = true;
++
+ 		/* The scsi_transport is done with the rport so lpfc cannot
+-		 * call to unregister. Remove the scsi transport reference
+-		 * and clean up the SCSI transport node details.
++		 * call to unregister.
+ 		 */
+-		if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) {
++		if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
+ 			ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ 
+-			/* NVME transport-registered rports need the
+-			 * NLP_XPT_REGD flag to complete an unregister.
++			/* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
++			 * unregister calls were made to the scsi and nvme
++			 * transports and refcnt was already decremented. Clear
++			 * the NLP_XPT_REGD flag only if the NVME Rport is
++			 * confirmed unregistered.
+ 			 */
+-			if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
++			if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ 				ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
++				spin_unlock_irqrestore(&ndlp->lock, iflags);
++				lpfc_nlp_put(ndlp); /* may free ndlp */
++			} else {
++				spin_unlock_irqrestore(&ndlp->lock, iflags);
++			}
++		} else {
+ 			spin_unlock_irqrestore(&ndlp->lock, iflags);
+-			lpfc_nlp_put(ndlp);
+-			spin_lock_irqsave(&ndlp->lock, iflags);
+ 		}
+ 
++		spin_lock_irqsave(&ndlp->lock, iflags);
++
+ 		/* Only 1 thread can drop the initial node reference.  If
+ 		 * another thread has set NLP_DROPPED, this thread is done.
+ 		 */
+-		if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
+-		    !(ndlp->nlp_flag & NLP_DROPPED)) {
+-			ndlp->nlp_flag |= NLP_DROPPED;
++		if (nvme_reg || (ndlp->nlp_flag & NLP_DROPPED)) {
+ 			spin_unlock_irqrestore(&ndlp->lock, iflags);
+-			lpfc_nlp_put(ndlp);
+ 			return;
+ 		}
+ 
++		ndlp->nlp_flag |= NLP_DROPPED;
+ 		spin_unlock_irqrestore(&ndlp->lock, iflags);
++		lpfc_nlp_put(ndlp);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 0dd451009b0791..a3658ef1141b26 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -13518,6 +13518,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+ 	/* Disable FW logging to host memory */
+ 	lpfc_ras_stop_fwlog(phba);
+ 
++	lpfc_sli4_queue_unset(phba);
++
+ 	/* Reset SLI4 HBA FCoE function */
+ 	lpfc_pci_function_reset(phba);
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 2ec6e55771b45a..6748fba48a07ed 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -5291,6 +5291,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
+ 			"0296 Restart HBA Data: x%x x%x\n",
+ 			phba->pport->port_state, psli->sli_flag);
+ 
++	lpfc_sli4_queue_unset(phba);
++
+ 	rc = lpfc_sli4_brdreset(phba);
+ 	if (rc) {
+ 		phba->link_state = LPFC_HBA_ERROR;
+@@ -17625,6 +17627,9 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
+ 	if (!eq)
+ 		return -ENODEV;
+ 
++	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
++		goto list_remove;
++
+ 	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
+ 	if (!mbox)
+ 		return -ENOMEM;
+@@ -17651,10 +17656,12 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
+ 				shdr_status, shdr_add_status, rc);
+ 		status = -ENXIO;
+ 	}
++	mempool_free(mbox, eq->phba->mbox_mem_pool);
+ 
++list_remove:
+ 	/* Remove eq from any list */
+ 	list_del_init(&eq->list);
+-	mempool_free(mbox, eq->phba->mbox_mem_pool);
++
+ 	return status;
+ }
+ 
+@@ -17682,6 +17689,10 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
+ 	/* sanity check on queue memory */
+ 	if (!cq)
+ 		return -ENODEV;
++
++	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
++		goto list_remove;
++
+ 	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
+ 	if (!mbox)
+ 		return -ENOMEM;
+@@ -17707,9 +17718,11 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
+ 				shdr_status, shdr_add_status, rc);
+ 		status = -ENXIO;
+ 	}
++	mempool_free(mbox, cq->phba->mbox_mem_pool);
++
++list_remove:
+ 	/* Remove cq from any list */
+ 	list_del_init(&cq->list);
+-	mempool_free(mbox, cq->phba->mbox_mem_pool);
+ 	return status;
+ }
+ 
+@@ -17737,6 +17750,10 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
+ 	/* sanity check on queue memory */
+ 	if (!mq)
+ 		return -ENODEV;
++
++	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
++		goto list_remove;
++
+ 	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
+ 	if (!mbox)
+ 		return -ENOMEM;
+@@ -17762,9 +17779,11 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
+ 				shdr_status, shdr_add_status, rc);
+ 		status = -ENXIO;
+ 	}
++	mempool_free(mbox, mq->phba->mbox_mem_pool);
++
++list_remove:
+ 	/* Remove mq from any list */
+ 	list_del_init(&mq->list);
+-	mempool_free(mbox, mq->phba->mbox_mem_pool);
+ 	return status;
+ }
+ 
+@@ -17792,6 +17811,10 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
+ 	/* sanity check on queue memory */
+ 	if (!wq)
+ 		return -ENODEV;
++
++	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
++		goto list_remove;
++
+ 	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
+ 	if (!mbox)
+ 		return -ENOMEM;
+@@ -17816,11 +17839,13 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
+ 				shdr_status, shdr_add_status, rc);
+ 		status = -ENXIO;
+ 	}
++	mempool_free(mbox, wq->phba->mbox_mem_pool);
++
++list_remove:
+ 	/* Remove wq from any list */
+ 	list_del_init(&wq->list);
+ 	kfree(wq->pring);
+ 	wq->pring = NULL;
+-	mempool_free(mbox, wq->phba->mbox_mem_pool);
+ 	return status;
+ }
+ 
+@@ -17850,6 +17875,10 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ 	/* sanity check on queue memory */
+ 	if (!hrq || !drq)
+ 		return -ENODEV;
++
++	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
++		goto list_remove;
++
+ 	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
+ 	if (!mbox)
+ 		return -ENOMEM;
+@@ -17890,9 +17919,11 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ 				shdr_status, shdr_add_status, rc);
+ 		status = -ENXIO;
+ 	}
++	mempool_free(mbox, hrq->phba->mbox_mem_pool);
++
++list_remove:
+ 	list_del_init(&hrq->list);
+ 	list_del_init(&drq->list);
+-	mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ 	return status;
+ }
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 2810608acd963a..e6ece30c43486c 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -3304,6 +3304,7 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
+ 	.show_host_node_name = 1,
+ 	.show_host_port_name = 1,
+ 	.show_host_supported_classes = 1,
++	.show_host_supported_speeds = 1,
+ 
+ 	.get_host_port_id = qla2x00_get_host_port_id,
+ 	.show_host_port_id = 1,
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index 52dc9604f56746..10431a67d202bb 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -24,6 +24,7 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
+ {
+ 	struct bsg_job *bsg_job = sp->u.bsg_job;
+ 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
++	struct completion *comp = sp->comp;
+ 
+ 	ql_dbg(ql_dbg_user, sp->vha, 0x7009,
+ 	    "%s: sp hdl %x, result=%x bsg ptr %p\n",
+@@ -35,6 +36,9 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
+ 	bsg_reply->result = res;
+ 	bsg_job_done(bsg_job, bsg_reply->result,
+ 		       bsg_reply->reply_payload_rcv_len);
++
++	if (comp)
++		complete(comp);
+ }
+ 
+ void qla2x00_bsg_sp_free(srb_t *sp)
+@@ -490,16 +494,6 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
+ 		goto done;
+ 	}
+ 
+-	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+-	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+-		ql_log(ql_log_warn, vha, 0x7011,
+-		    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+-		    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+-		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+-		rval = -EAGAIN;
+-		goto done_unmap_sg;
+-	}
+-
+ 	if (!vha->flags.online) {
+ 		ql_log(ql_log_warn, vha, 0x7012,
+ 		    "Host is not online.\n");
+@@ -3061,7 +3055,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
+ 
+ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
+ {
+-	bool found = false;
++	bool found, do_bsg_done;
+ 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ 	struct qla_hw_data *ha = vha->hw;
+@@ -3069,6 +3063,11 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
+ 	int cnt;
+ 	unsigned long flags;
+ 	struct req_que *req;
++	int rval;
++	DECLARE_COMPLETION_ONSTACK(comp);
++	uint32_t ratov_j;
++
++	found = do_bsg_done = false;
+ 
+ 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ 	req = qpair->req;
+@@ -3080,42 +3079,104 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
+ 		     sp->type == SRB_ELS_CMD_HST ||
+ 		     sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
+ 		    sp->u.bsg_job == bsg_job) {
+-			req->outstanding_cmds[cnt] = NULL;
+-			spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+-
+-			if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
+-				ql_log(ql_log_warn, vha, 0x7089,
+-						"mbx abort_command failed.\n");
+-				bsg_reply->result = -EIO;
+-			} else {
+-				ql_dbg(ql_dbg_user, vha, 0x708a,
+-						"mbx abort_command success.\n");
+-				bsg_reply->result = 0;
+-			}
+-			/* ref: INIT */
+-			kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ 
+ 			found = true;
+-			goto done;
++			sp->comp = &comp;
++			break;
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ 
+-done:
+-	return found;
++	if (!found)
++		return false;
++
++	if (ha->flags.eeh_busy) {
++		/* skip over abort.  EEH handling will return the bsg. Wait for it */
++		rval = QLA_SUCCESS;
++		ql_dbg(ql_dbg_user, vha, 0x802c,
++			"eeh encounter. bsg %p sp=%p handle=%x \n",
++			bsg_job, sp, sp->handle);
++	} else {
++		rval = ha->isp_ops->abort_command(sp);
++		ql_dbg(ql_dbg_user, vha, 0x802c,
++			"Aborting bsg %p sp=%p handle=%x rval=%x\n",
++			bsg_job, sp, sp->handle, rval);
++	}
++
++	switch (rval) {
++	case QLA_SUCCESS:
++		/* Wait for the command completion. */
++		ratov_j = ha->r_a_tov / 10 * 4 * 1000;
++		ratov_j = msecs_to_jiffies(ratov_j);
++
++		if (!wait_for_completion_timeout(&comp, ratov_j)) {
++			ql_log(ql_log_info, vha, 0x7089,
++				"bsg abort timeout.  bsg=%p sp=%p handle %#x .\n",
++				bsg_job, sp, sp->handle);
++
++			do_bsg_done = true;
++		} else {
++			/* fw had returned the bsg */
++			ql_dbg(ql_dbg_user, vha, 0x708a,
++				"bsg abort success. bsg %p sp=%p handle=%#x\n",
++				bsg_job, sp, sp->handle);
++			do_bsg_done = false;
++		}
++		break;
++	default:
++		ql_log(ql_log_info, vha, 0x704f,
++			"bsg abort fail.  bsg=%p sp=%p rval=%x.\n",
++			bsg_job, sp, rval);
++
++		do_bsg_done = true;
++		break;
++	}
++
++	if (!do_bsg_done)
++		return true;
++
++	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
++	/*
++	 * recheck to make sure it's still the same bsg_job due to
++	 * qp_lock_ptr was released earlier.
++	 */
++	if (req->outstanding_cmds[cnt] &&
++	    req->outstanding_cmds[cnt]->u.bsg_job != bsg_job) {
++		/* fw had returned the bsg */
++		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++		return true;
++	}
++	req->outstanding_cmds[cnt] = NULL;
++	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++
++	/* ref: INIT */
++	sp->comp = NULL;
++	kref_put(&sp->cmd_kref, qla2x00_sp_release);
++	bsg_reply->result = -ENXIO;
++	bsg_reply->reply_payload_rcv_len = 0;
++
++	ql_dbg(ql_dbg_user, vha, 0x7051,
++	       "%s bsg_job_done : bsg %p result %#x sp %p.\n",
++	       __func__, bsg_job, bsg_reply->result, sp);
++
++	bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
++
++	return true;
+ }
+ 
+ int
+ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
+ {
+-	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
++	struct fc_bsg_request *bsg_request = bsg_job->request;
+ 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ 	struct qla_hw_data *ha = vha->hw;
+ 	int i;
+ 	struct qla_qpair *qpair;
+ 
+-	ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
+-	    __func__, bsg_job);
++	ql_log(ql_log_info, vha, 0x708b,
++	       "%s CMD timeout. bsg ptr %p msgcode %x vendor cmd %x\n",
++	       __func__, bsg_job, bsg_request->msgcode,
++	       bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
+ 
+ 	if (qla2x00_isp_reg_stat(ha)) {
+ 		ql_log(ql_log_info, vha, 0x9007,
+@@ -3136,7 +3197,6 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
+ 	}
+ 
+ 	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
+-	bsg_reply->result = -ENXIO;
+ 
+ done:
+ 	return 0;
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index 76703f2706b8e3..79879c4743e6dc 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -506,6 +506,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
+ 		return(NULL);
+ 	}
+ 
++	vha->irq_offset = QLA_BASE_VECTORS;
+ 	host = vha->host;
+ 	fc_vport->dd_data = vha;
+ 	/* New host info */
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 7f980e6141c282..7ab717ed72327e 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -6902,12 +6902,15 @@ qla2x00_do_dpc(void *data)
+ 	set_user_nice(current, MIN_NICE);
+ 
+ 	set_current_state(TASK_INTERRUPTIBLE);
+-	while (!kthread_should_stop()) {
++	while (1) {
+ 		ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
+ 		    "DPC handler sleeping.\n");
+ 
+ 		schedule();
+ 
++		if (kthread_should_stop())
++			break;
++
+ 		if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
+ 			qla_pci_set_eeh_busy(base_vha);
+ 
+@@ -6920,15 +6923,16 @@ qla2x00_do_dpc(void *data)
+ 			goto end_loop;
+ 		}
+ 
++		if (test_bit(UNLOADING, &base_vha->dpc_flags))
++			/* don't do any work. Wait to be terminated by kthread_stop */
++			goto end_loop;
++
+ 		ha->dpc_active = 1;
+ 
+ 		ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+ 		    "DPC handler waking up, dpc_flags=0x%lx.\n",
+ 		    base_vha->dpc_flags);
+ 
+-		if (test_bit(UNLOADING, &base_vha->dpc_flags))
+-			break;
+-
+ 		if (IS_P3P_TYPE(ha)) {
+ 			if (IS_QLA8044(ha)) {
+ 				if (test_and_clear_bit(ISP_UNRECOVERABLE,
+@@ -7241,9 +7245,6 @@ qla2x00_do_dpc(void *data)
+ 	 */
+ 	ha->dpc_active = 0;
+ 
+-	/* Cleanup any residual CTX SRBs. */
+-	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index b52513eeeafa75..680ba180a67252 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -6447,7 +6447,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
+ 	}
+ 	sd_dp = &sqcp->sd_dp;
+ 
+-	if (polled)
++	if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
+ 		ns_from_boot = ktime_get_boottime_ns();
+ 
+ 	/* one of the resp_*() response functions is called here */
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 84334ab39c8107..94127868bedf8a 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -386,7 +386,6 @@ sg_release(struct inode *inode, struct file *filp)
+ 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
+ 
+ 	mutex_lock(&sdp->open_rel_lock);
+-	kref_put(&sfp->f_ref, sg_remove_sfp);
+ 	sdp->open_cnt--;
+ 
+ 	/* possibly many open()s waiting on exlude clearing, start many;
+@@ -398,6 +397,7 @@ sg_release(struct inode *inode, struct file *filp)
+ 		wake_up_interruptible(&sdp->open_wait);
+ 	}
+ 	mutex_unlock(&sdp->open_rel_lock);
++	kref_put(&sfp->f_ref, sg_remove_sfp);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index beb88f25dbb993..c9038284bc893d 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -3506,6 +3506,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+ 	int i, cmd_nr, cmd_type, bt;
+ 	int retval = 0;
+ 	unsigned int blk;
++	bool cmd_mtiocget;
+ 	struct scsi_tape *STp = file->private_data;
+ 	struct st_modedef *STm;
+ 	struct st_partstat *STps;
+@@ -3619,6 +3620,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+ 			 */
+ 			if (mtc.mt_op != MTREW &&
+ 			    mtc.mt_op != MTOFFL &&
++			    mtc.mt_op != MTLOAD &&
+ 			    mtc.mt_op != MTRETEN &&
+ 			    mtc.mt_op != MTERASE &&
+ 			    mtc.mt_op != MTSEEK &&
+@@ -3732,17 +3734,28 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+ 		goto out;
+ 	}
+ 
++	cmd_mtiocget = cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET);
++
+ 	if ((i = flush_buffer(STp, 0)) < 0) {
+-		retval = i;
+-		goto out;
+-	}
+-	if (STp->can_partitions &&
+-	    (i = switch_partition(STp)) < 0) {
+-		retval = i;
+-		goto out;
++		if (cmd_mtiocget && STp->pos_unknown) {
++			/* flush fails -> modify status accordingly */
++			reset_state(STp);
++			STp->pos_unknown = 1;
++		} else { /* return error */
++			retval = i;
++			goto out;
++		}
++	} else { /* flush_buffer succeeds */
++		if (STp->can_partitions) {
++			i = switch_partition(STp);
++			if (i < 0) {
++				retval = i;
++				goto out;
++			}
++		}
+ 	}
+ 
+-	if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) {
++	if (cmd_mtiocget) {
+ 		struct mtget mt_status;
+ 
+ 		if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) {
+@@ -3756,7 +3769,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+ 		    ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK);
+ 		mt_status.mt_blkno = STps->drv_block;
+ 		mt_status.mt_fileno = STps->drv_file;
+-		if (STp->block_size != 0) {
++		if (STp->block_size != 0 && mt_status.mt_blkno >= 0) {
+ 			if (STps->rw == ST_WRITING)
+ 				mt_status.mt_blkno +=
+ 				    (STp->buffer)->buffer_bytes / STp->block_size;
+diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
+index fe111bae38c8e1..5ea8887828c064 100644
+--- a/drivers/soc/imx/soc-imx8m.c
++++ b/drivers/soc/imx/soc-imx8m.c
+@@ -30,7 +30,7 @@
+ 
+ struct imx8_soc_data {
+ 	char *name;
+-	u32 (*soc_revision)(void);
++	int (*soc_revision)(u32 *socrev);
+ };
+ 
+ static u64 soc_uid;
+@@ -51,24 +51,29 @@ static u32 imx8mq_soc_revision_from_atf(void)
+ static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
+ #endif
+ 
+-static u32 __init imx8mq_soc_revision(void)
++static int imx8mq_soc_revision(u32 *socrev)
+ {
+ 	struct device_node *np;
+ 	void __iomem *ocotp_base;
+ 	u32 magic;
+ 	u32 rev;
+ 	struct clk *clk;
++	int ret;
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
+ 	if (!np)
+-		return 0;
++		return -EINVAL;
+ 
+ 	ocotp_base = of_iomap(np, 0);
+-	WARN_ON(!ocotp_base);
++	if (!ocotp_base) {
++		ret = -EINVAL;
++		goto err_iomap;
++	}
++
+ 	clk = of_clk_get_by_name(np, NULL);
+ 	if (IS_ERR(clk)) {
+-		WARN_ON(IS_ERR(clk));
+-		return 0;
++		ret = PTR_ERR(clk);
++		goto err_clk;
+ 	}
+ 
+ 	clk_prepare_enable(clk);
+@@ -88,32 +93,45 @@ static u32 __init imx8mq_soc_revision(void)
+ 	soc_uid <<= 32;
+ 	soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ 
++	*socrev = rev;
++
+ 	clk_disable_unprepare(clk);
+ 	clk_put(clk);
+ 	iounmap(ocotp_base);
+ 	of_node_put(np);
+ 
+-	return rev;
++	return 0;
++
++err_clk:
++	iounmap(ocotp_base);
++err_iomap:
++	of_node_put(np);
++	return ret;
+ }
+ 
+-static void __init imx8mm_soc_uid(void)
++static int imx8mm_soc_uid(void)
+ {
+ 	void __iomem *ocotp_base;
+ 	struct device_node *np;
+ 	struct clk *clk;
++	int ret = 0;
+ 	u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ 		     IMX8MP_OCOTP_UID_OFFSET : 0;
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
+ 	if (!np)
+-		return;
++		return -EINVAL;
+ 
+ 	ocotp_base = of_iomap(np, 0);
+-	WARN_ON(!ocotp_base);
++	if (!ocotp_base) {
++		ret = -EINVAL;
++		goto err_iomap;
++	}
++
+ 	clk = of_clk_get_by_name(np, NULL);
+ 	if (IS_ERR(clk)) {
+-		WARN_ON(IS_ERR(clk));
+-		return;
++		ret = PTR_ERR(clk);
++		goto err_clk;
+ 	}
+ 
+ 	clk_prepare_enable(clk);
+@@ -124,31 +142,41 @@ static void __init imx8mm_soc_uid(void)
+ 
+ 	clk_disable_unprepare(clk);
+ 	clk_put(clk);
++
++err_clk:
+ 	iounmap(ocotp_base);
++err_iomap:
+ 	of_node_put(np);
++
++	return ret;
+ }
+ 
+-static u32 __init imx8mm_soc_revision(void)
++static int imx8mm_soc_revision(u32 *socrev)
+ {
+ 	struct device_node *np;
+ 	void __iomem *anatop_base;
+-	u32 rev;
++	int ret;
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ 	if (!np)
+-		return 0;
++		return -EINVAL;
+ 
+ 	anatop_base = of_iomap(np, 0);
+-	WARN_ON(!anatop_base);
++	if (!anatop_base) {
++		ret = -EINVAL;
++		goto err_iomap;
++	}
+ 
+-	rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
++	*socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+ 
+ 	iounmap(anatop_base);
+ 	of_node_put(np);
+ 
+-	imx8mm_soc_uid();
++	return imx8mm_soc_uid();
+ 
+-	return rev;
++err_iomap:
++	of_node_put(np);
++	return ret;
+ }
+ 
+ static const struct imx8_soc_data imx8mq_soc_data = {
+@@ -184,7 +212,7 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
+ 	kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf,  soc_rev & 0xf) : \
+ 	"unknown"
+ 
+-static int __init imx8_soc_init(void)
++static int imx8m_soc_probe(struct platform_device *pdev)
+ {
+ 	struct soc_device_attribute *soc_dev_attr;
+ 	struct soc_device *soc_dev;
+@@ -212,8 +240,11 @@ static int __init imx8_soc_init(void)
+ 	data = id->data;
+ 	if (data) {
+ 		soc_dev_attr->soc_id = data->name;
+-		if (data->soc_revision)
+-			soc_rev = data->soc_revision();
++		if (data->soc_revision) {
++			ret = data->soc_revision(&soc_rev);
++			if (ret)
++				goto free_soc;
++		}
+ 	}
+ 
+ 	soc_dev_attr->revision = imx8_revision(soc_rev);
+@@ -251,6 +282,38 @@ static int __init imx8_soc_init(void)
+ 	kfree(soc_dev_attr);
+ 	return ret;
+ }
++
++static struct platform_driver imx8m_soc_driver = {
++	.probe = imx8m_soc_probe,
++	.driver = {
++		.name = "imx8m-soc",
++	},
++};
++
++static int __init imx8_soc_init(void)
++{
++	struct platform_device *pdev;
++	int ret;
++
++	/* No match means this is non-i.MX8M hardware, do nothing. */
++	if (!of_match_node(imx8_soc_match, of_root))
++		return 0;
++
++	ret = platform_driver_register(&imx8m_soc_driver);
++	if (ret) {
++		pr_err("Failed to register imx8m-soc platform driver: %d\n", ret);
++		return ret;
++	}
++
++	pdev = platform_device_register_simple("imx8m-soc", -1, NULL, 0);
++	if (IS_ERR(pdev)) {
++		pr_err("Failed to register imx8m-soc platform device: %ld\n", PTR_ERR(pdev));
++		platform_driver_unregister(&imx8m_soc_driver);
++		return PTR_ERR(pdev);
++	}
++
++	return 0;
++}
+ device_initcall(imx8_soc_init);
+ MODULE_DESCRIPTION("NXP i.MX8M SoC driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 28bcc65e91beb3..a470285f54a875 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -153,325 +153,2431 @@ enum llcc_reg_offset {
+ };
+ 
+ static const struct llcc_slice_config sa8775p_data[] =  {
+-	{LLCC_CPUSS,    1, 2048, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 1, 0, 0},
+-	{LLCC_VIDSC0,   2, 512, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_CPUSS1,   3, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_CPUHWT,   5, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_AUDIO,    6, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CMPT,     10, 4096, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_GPUHTW,   11, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_GPU,      12, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 1, 0},
+-	{LLCC_MMUHWT,   13, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0},
+-	{LLCC_CMPTDMA,  15, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_DISP,     16, 4096, 2, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_VIDFW,    17, 3072, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_AUDHW,    22, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CVP,      28, 256, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_APTCM,    30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0},
+-	{LLCC_WRCACHE,    31, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0},
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 2048,
++		.priority = 1,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CPUSS1,
++		.slice_id = 3,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CPUHWT,
++		.slice_id = 5,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 4096,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CMPTDMA,
++		.slice_id = 15,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 4096,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_VIDFW,
++		.slice_id = 17,
++		.max_cap = 3072,
++		.priority = 1,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CVP,
++		.slice_id = 28,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 1024,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0xf0,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config sc7180_data[] =  {
+-	{ LLCC_CPUSS,    1,  256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 },
+-	{ LLCC_MDM,      8,  128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+-	{ LLCC_GPUHTW,   11, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+-	{ LLCC_GPU,      12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 256,
++		.priority = 1,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MDM,
++		.slice_id = 8,
++		.max_cap = 128,
++		.priority = 1,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 128,
++		.priority = 1,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 128,
++		.priority = 1,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config sc7280_data[] =  {
+-	{ LLCC_CPUSS,    1,  768, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
+-	{ LLCC_MDMHPGRW, 7,  512, 2, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+-	{ LLCC_CMPT,     10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+-	{ LLCC_GPUHTW,   11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+-	{ LLCC_GPU,      12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+-	{ LLCC_MMUHWT,   13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 0, 1, 0},
+-	{ LLCC_MDMPNG,   21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+-	{ LLCC_WLHW,     24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+-	{ LLCC_MODPE,    29, 64,  1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 768,
++		.priority = 1,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 512,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 768,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 512,
++		.priority = 1,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 768,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WLHW,
++		.slice_id = 24,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 64,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3f,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config sc8180x_data[] = {
+-	{ LLCC_CPUSS,    1, 6144,  1, 1, 0xfff, 0x0,   0, 0, 0, 1, 1 },
+-	{ LLCC_VIDSC0,   2, 512,   2, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_VIDSC1,   3, 512,   2, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_AUDIO,    6, 1024,  1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_MDMHPGRW, 7, 3072,  1, 1, 0x3ff, 0xc00, 0, 0, 0, 1, 0 },
+-	{ LLCC_MDM,      8, 3072,  1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_MODHW,    9, 1024,  1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_CMPT,     10, 6144, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_GPUHTW,   11, 1024, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_GPU,      12, 5120, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_MMUHWT,   13, 1024, 1, 1, 0xfff, 0x0,   0, 0, 0, 0, 1 },
+-	{ LLCC_CMPTDMA,  15, 6144, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_DISP,     16, 6144, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_VIDFW,    17, 1024, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_MDMHPFX,  20, 1024, 2, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_MDMPNG,   21, 1024, 0, 1, 0xc,   0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_AUDHW,    22, 1024, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_NPU,      23, 6144, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_WLHW,     24, 6144, 1, 1, 0xfff, 0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_MODPE,    29, 512,  1, 1, 0xc,   0x0,   0, 0, 0, 1, 0 },
+-	{ LLCC_APTCM,    30, 512,  3, 1, 0x0,   0x1,   1, 0, 0, 1, 0 },
+-	{ LLCC_WRCACHE,  31, 128,  1, 1, 0xfff, 0x0,   0, 0, 0, 0, 0 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_VIDSC1,
++		.slice_id = 3,
++		.max_cap = 512,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3ff,
++		.res_ways = 0xc00,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDM,
++		.slice_id = 8,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 9,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 5120,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CMPTDMA,
++		.slice_id = 15,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_VIDFW,
++		.slice_id = 17,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMHPFX,
++		.slice_id = 20,
++		.max_cap = 1024,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 1024,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0xc,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_NPU,
++		.slice_id = 23,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WLHW,
++		.slice_id = 24,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xc,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0x1,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 128,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	},
+ };
+ 
+ static const struct llcc_slice_config sc8280xp_data[] = {
+-	{ LLCC_CPUSS,    1,  6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+-	{ LLCC_VIDSC0,   2,  512,  3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_AUDIO,    6,  1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+-	{ LLCC_CMPT,     10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+-	{ LLCC_GPUHTW,   11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_GPU,      12, 4096, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+-	{ LLCC_MMUHWT,   13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_DISP,     16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_AUDHW,    22, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_ECC,      26, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_CVP,      28, 512,  3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_APTCM,    30, 1024, 3, 1, 0x0,   0x1, 1, 0, 0, 1, 0, 0 },
+-	{ LLCC_WRCACHE,  31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_CVPFW,    17, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_CPUSS1,   3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_CPUHWT,   5, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 4096,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 2048,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_ECC,
++		.slice_id = 26,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CVP,
++		.slice_id = 28,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 1024,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0x1,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CVPFW,
++		.slice_id = 17,
++		.max_cap = 512,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CPUSS1,
++		.slice_id = 3,
++		.max_cap = 2048,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CPUHWT,
++		.slice_id = 5,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	},
+ };
+ 
+-static const struct llcc_slice_config sdm845_data[] =  {
+-	{ LLCC_CPUSS,    1,  2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 1 },
+-	{ LLCC_VIDSC0,   2,  512,  2, 1, 0x0,   0x0f0, 0, 0, 1, 1, 0 },
+-	{ LLCC_VIDSC1,   3,  512,  2, 1, 0x0,   0x0f0, 0, 0, 1, 1, 0 },
+-	{ LLCC_ROTATOR,  4,  563,  2, 1, 0x0,   0x00e, 2, 0, 1, 1, 0 },
+-	{ LLCC_VOICE,    5,  2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0 },
+-	{ LLCC_AUDIO,    6,  2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0 },
+-	{ LLCC_MDMHPGRW, 7,  1024, 2, 0, 0xfc,  0xf00, 0, 0, 1, 1, 0 },
+-	{ LLCC_MDM,      8,  2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0 },
+-	{ LLCC_CMPT,     10, 2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0 },
+-	{ LLCC_GPUHTW,   11, 512,  1, 1, 0xc,   0x0,   0, 0, 1, 1, 0 },
+-	{ LLCC_GPU,      12, 2304, 1, 0, 0xff0, 0x2,   0, 0, 1, 1, 0 },
+-	{ LLCC_MMUHWT,   13, 256,  2, 0, 0x0,   0x1,   0, 0, 1, 0, 1 },
+-	{ LLCC_CMPTDMA,  15, 2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0 },
+-	{ LLCC_DISP,     16, 2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0 },
+-	{ LLCC_VIDFW,    17, 2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0 },
+-	{ LLCC_MDMHPFX,  20, 1024, 2, 1, 0x0,   0xf00, 0, 0, 1, 1, 0 },
+-	{ LLCC_MDMPNG,   21, 1024, 0, 1, 0x1e,  0x0,   0, 0, 1, 1, 0 },
+-	{ LLCC_AUDHW,    22, 1024, 1, 1, 0xffc, 0x2,   0, 0, 1, 1, 0 },
++static const struct llcc_slice_config sdm845_data[] =  {{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 2816,
++		.priority = 1,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 2,
++		.fixed_size = true,
++		.res_ways = 0xf0,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_VIDSC1,
++		.slice_id = 3,
++		.max_cap = 512,
++		.priority = 2,
++		.fixed_size = true,
++		.res_ways = 0xf0,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_ROTATOR,
++		.slice_id = 4,
++		.max_cap = 563,
++		.priority = 2,
++		.fixed_size = true,
++		.res_ways = 0xe,
++		.cache_mode = 2,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_VOICE,
++		.slice_id = 5,
++		.max_cap = 2816,
++		.priority = 1,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 2816,
++		.priority = 1,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 1024,
++		.priority = 2,
++		.bonus_ways = 0xfc,
++		.res_ways = 0xf00,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDM,
++		.slice_id = 8,
++		.max_cap = 2816,
++		.priority = 1,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 2816,
++		.priority = 1,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xc,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 2304,
++		.priority = 1,
++		.bonus_ways = 0xff0,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 256,
++		.priority = 2,
++		.res_ways = 0x1,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CMPTDMA,
++		.slice_id = 15,
++		.max_cap = 2816,
++		.priority = 1,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 2816,
++		.priority = 1,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_VIDFW,
++		.slice_id = 17,
++		.max_cap = 2816,
++		.priority = 1,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMHPFX,
++		.slice_id = 20,
++		.max_cap = 1024,
++		.priority = 2,
++		.fixed_size = true,
++		.res_ways = 0xf00,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 1024,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0x1e,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffc,
++		.res_ways = 0x2,
++		.cache_mode = 0,
++		.dis_cap_alloc = true,
++		.retain_on_pc = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config sm6350_data[] =  {
+-	{ LLCC_CPUSS,    1,  768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1 },
+-	{ LLCC_MDM,      8,  512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_GPUHTW,   11, 256, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_GPU,      12, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_MDMPNG,   21, 768, 0, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_NPU,      23, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_MODPE,    29,  64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 768,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_MDM,
++		.slice_id = 8,
++		.max_cap = 512,
++		.priority = 2,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 256,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 512,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 768,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_NPU,
++		.slice_id = 23,
++		.max_cap = 768,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 64,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config sm7150_data[] =  {
+-	{ LLCC_CPUSS,    1,  512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1 },
+-	{ LLCC_MDM,      8,  128, 2, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
+-	{ LLCC_GPUHTW,   11, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+-	{ LLCC_GPU,      12, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+-	{ LLCC_NPU,      23, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 512,
++		.priority = 1,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MDM,
++		.slice_id = 8,
++		.max_cap = 128,
++		.priority = 2,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_NPU,
++		.slice_id = 23,
++		.max_cap = 512,
++		.priority = 1,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config sm8150_data[] =  {
+-	{  LLCC_CPUSS,    1, 3072, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 1 },
+-	{  LLCC_VIDSC0,   2, 512,  2, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_VIDSC1,   3, 512,  2, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_AUDIO,    6, 1024, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF,  0xF00, 0, 0, 0, 1, 0 },
+-	{  LLCC_MDM,      8, 3072, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_MODHW,    9, 1024, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_CMPT,    10, 3072, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_GPUHTW , 11, 512,  1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_GPU,     12, 2560, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_MMUHWT,  13, 1024, 1, 1, 0xFFF, 0x0,   0, 0, 0, 0, 1 },
+-	{  LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_DISP,    16, 3072, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF,   0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_AUDHW,   22, 1024, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_NPU,     23, 3072, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_WLHW,    24, 3072, 1, 1, 0xFFF, 0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_MODPE,   29, 256,  1, 1, 0xF,   0x0,   0, 0, 0, 1, 0 },
+-	{  LLCC_APTCM,   30, 256,  3, 1, 0x0,   0x1,   1, 0, 0, 1, 0 },
+-	{  LLCC_WRCACHE, 31, 128,  1, 1, 0xFFF, 0x0,   0, 0, 0, 0, 0 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_VIDSC1,
++		.slice_id = 3,
++		.max_cap = 512,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 3072,
++		.priority = 1,
++		.bonus_ways = 0xff,
++		.res_ways = 0xf00,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDM,
++		.slice_id = 8,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 9,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 2560,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CMPTDMA,
++		.slice_id = 15,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMHPFX,
++		.slice_id = 20,
++		.max_cap = 1024,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMHPFX,
++		.slice_id = 21,
++		.max_cap = 1024,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_NPU,
++		.slice_id = 23,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WLHW,
++		.slice_id = 24,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0x1,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 128,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	},
+ };
+ 
+ static const struct llcc_slice_config sm8250_data[] =  {
+-	{ LLCC_CPUSS,    1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+-	{ LLCC_VIDSC0,   2, 512,  3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_AUDIO,    6, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+-	{ LLCC_CMPT,    10, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+-	{ LLCC_GPUHTW,  11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_GPU,     12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+-	{ LLCC_MMUHWT,  13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_CMPTDMA, 15, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_DISP,    16, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_VIDFW,   17, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_AUDHW,   22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_NPU,     23, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_WLHW,    24, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_CVP,     28, 256,  3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_APTCM,   30, 128,  3, 0, 0x0,   0x3, 1, 0, 0, 1, 0, 0 },
+-	{ LLCC_WRCACHE, 31, 256,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 1024,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 1024,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CMPTDMA,
++		.slice_id = 15,
++		.max_cap = 1024,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_VIDFW,
++		.slice_id = 17,
++		.max_cap = 512,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_NPU,
++		.slice_id = 23,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WLHW,
++		.slice_id = 24,
++		.max_cap = 1024,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CVP,
++		.slice_id = 28,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 128,
++		.priority = 3,
++		.res_ways = 0x3,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config sm8350_data[] =  {
+-	{ LLCC_CPUSS,    1, 3072,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 },
+-	{ LLCC_VIDSC0,   2, 512,   3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_AUDIO,    6, 1024,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+-	{ LLCC_MDMHPGRW, 7, 1024,  3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_MODHW,    9, 1024,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_CMPT,     10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_GPUHTW,   11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_GPU,      12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+-	{ LLCC_MMUHWT,   13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+-	{ LLCC_DISP,     16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_MDMPNG,   21, 1024, 0, 1, 0xf,   0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_AUDHW,    22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_CVP,      28, 512,  3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_MODPE,    29, 256,  1, 1, 0xf,   0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_APTCM,    30, 1024, 3, 1, 0x0,   0x1, 1, 0, 0, 0, 1, 0 },
+-	{ LLCC_WRCACHE,  31, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+-	{ LLCC_CVPFW,    17, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_CPUSS1,   3, 1024,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_CPUHWT,   5, 512,   1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 1024,
++		.priority = 3,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 9,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 1024,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 3072,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 1024,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CVP,
++		.slice_id = 28,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 1024,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0x1,
++		.cache_mode = 1,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_CVPFW,
++		.slice_id = 17,
++		.max_cap = 512,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CPUSS1,
++		.slice_id = 3,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CPUHWT,
++		.slice_id = 5,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.write_scid_en = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config sm8450_data[] =  {
+-	{LLCC_CPUSS,     1, 3072, 1, 0, 0xFFFF, 0x0,   0, 0, 0, 1, 1, 0, 0 },
+-	{LLCC_VIDSC0,    2,  512, 3, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_AUDIO,     6, 1024, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0 },
+-	{LLCC_MDMHPGRW,  7, 1024, 3, 0, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_MODHW,     9, 1024, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_CMPT,     10, 4096, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_GPUHTW,   11,  512, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_GPU,      12, 2048, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 1, 0 },
+-	{LLCC_MMUHWT,   13,  768, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 0, 1, 0, 0 },
+-	{LLCC_DISP,     16, 4096, 2, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_MDMPNG,   21, 1024, 1, 1, 0xF000, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_AUDHW,    22, 1024, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0 },
+-	{LLCC_CVP,      28,  256, 3, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_MODPE,    29,   64, 1, 1, 0xF000, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_APTCM,    30, 1024, 3, 1, 0x0,    0xF0,  1, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_WRCACHE,  31,  512, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 0, 1, 0, 0 },
+-	{LLCC_CVPFW,    17,  512, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_CPUSS1,    3, 1024, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_CAMEXP0,   4,  256, 3, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_CPUMTE,   23,  256, 1, 1, 0x0FFF, 0x0,   0, 0, 0, 0, 1, 0, 0 },
+-	{LLCC_CPUHWT,    5,  512, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 1, 0, 0 },
+-	{LLCC_CAMEXP1,  27,  256, 3, 1, 0xFFFF, 0x0,   0, 0, 0, 1, 0, 0, 0 },
+-	{LLCC_AENPU,     8, 2048, 1, 1, 0xFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0 },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 3072,
++		.priority = 1,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 1024,
++		.priority = 3,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 9,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 4096,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 12,
++		.max_cap = 2048,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 13,
++		.max_cap = 768,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 4096,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xf000,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CVP,
++		.slice_id = 28,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 64,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xf000,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 1024,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0xf0,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CVPFW,
++		.slice_id = 17,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CPUSS1,
++		.slice_id = 3,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CAMEXP0,
++		.slice_id = 4,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_CPUMTE,
++		.slice_id = 23,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CPUHWT,
++		.slice_id = 5,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CAMEXP1,
++		.slice_id = 27,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_AENPU,
++		.slice_id = 8,
++		.max_cap = 2048,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffff,
++		.cache_mode = 0,
++	},
+ };
+ 
+ static const struct llcc_slice_config sm8550_data[] =  {
+-	{LLCC_CPUSS,     1, 5120, 1, 0, 0xFFFFFF, 0x0,   0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_VIDSC0,    2,  512, 4, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_AUDIO,     6, 1024, 1, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_MDMHPGRW, 25, 1024, 4, 0, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_MODHW,    26, 1024, 1, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CMPT,     10, 4096, 1, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_GPUHTW,   11,  512, 1, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_GPU,       9, 3096, 1, 0, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_MMUHWT,   18,  768, 1, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_DISP,     16, 6144, 1, 1, 0xFFFFFF, 0x0,   2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_MDMPNG,   27, 1024, 0, 1, 0xF00000, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_AUDHW,    22, 1024, 1, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CVP,       8,  256, 4, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_MODPE,    29,   64, 1, 1, 0xF00000, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, },
+-	{LLCC_WRCACHE,  31,  512, 1, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CAMEXP0,   4,  256, 4, 1,      0xF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CPUHWT,    5,  512, 1, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CAMEXP1,   7, 3200, 3, 1, 0xFFFFF0, 0x0,   2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CMPTHCP,  17,  256, 4, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_LCPDARE,  30,  128, 4, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, },
+-	{LLCC_AENPU,     3, 3072, 1, 1, 0xFE01FF, 0x0,   2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_ISLAND1,  12, 1792, 7, 1,   0xFE00, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_ISLAND4,  15,  256, 7, 1,  0x10000, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CAMEXP2,  19, 3200, 3, 1, 0xFFFFF0, 0x0,   2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CAMEXP3,  20, 3200, 2, 1, 0xFFFFF0, 0x0,   2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_CAMEXP4,  21, 3200, 2, 1, 0xFFFFF0, 0x0,   2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_DISP_WB,  23, 1024, 4, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_DISP_1,   24, 6144, 1, 1, 0xFFFFFF, 0x0,   2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-	{LLCC_VIDVSP,   28,  256, 4, 1, 0xFFFFFF, 0x0,   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 5120,
++		.priority = 1,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++		.write_scid_en = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 25,
++		.max_cap = 1024,
++		.priority = 4,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 26,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 4096,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 9,
++		.max_cap = 3096,
++		.priority = 1,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.write_scid_en = true,
++		.write_scid_cacheable_en = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 18,
++		.max_cap = 768,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 27,
++		.max_cap = 1024,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0xf00000,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CVP,
++		.slice_id = 8,
++		.max_cap = 256,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 64,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xf00000,
++		.cache_mode = 0,
++		.alloc_oneway_en = true,
++		.vict_prio = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CAMEXP0,
++		.slice_id = 4,
++		.max_cap = 256,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CPUHWT,
++		.slice_id = 5,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CAMEXP1,
++		.slice_id = 7,
++		.max_cap = 3200,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfffff0,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_CMPTHCP,
++		.slice_id = 17,
++		.max_cap = 256,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_LCPDARE,
++		.slice_id = 30,
++		.max_cap = 128,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++		.alloc_oneway_en = true,
++		.vict_prio = true,
++	}, {
++		.usecase_id = LLCC_AENPU,
++		.slice_id = 3,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfe01ff,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_ISLAND1,
++		.slice_id = 12,
++		.max_cap = 1792,
++		.priority = 7,
++		.fixed_size = true,
++		.bonus_ways = 0xfe00,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_ISLAND4,
++		.slice_id = 15,
++		.max_cap = 256,
++		.priority = 7,
++		.fixed_size = true,
++		.bonus_ways = 0x10000,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CAMEXP2,
++		.slice_id = 19,
++		.max_cap = 3200,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfffff0,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_CAMEXP3,
++		.slice_id = 20,
++		.max_cap = 3200,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfffff0,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_CAMEXP4,
++		.slice_id = 21,
++		.max_cap = 3200,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xfffff0,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_DISP_WB,
++		.slice_id = 23,
++		.max_cap = 1024,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_DISP_1,
++		.slice_id = 24,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_VIDVSP,
++		.slice_id = 28,
++		.max_cap = 256,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	},
+ };
+ 
+ static const struct llcc_slice_config sm8650_data[] = {
+-	{LLCC_CPUSS,     1, 5120, 1, 0, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_VIDSC0,    2,  512, 3, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_AUDIO,     6,  512, 1, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_MDMHPGRW, 25, 1024, 3, 0, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_MODHW,    26, 1024, 1, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CMPT,     10, 4096, 1, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_GPUHTW,   11,  512, 1, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_GPU,       9, 3096, 1, 0, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_MMUHWT,   18,  768, 1, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_DISP,     16, 6144, 1, 1, 0xFFFFFF, 0x0,      2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_MDMHPFX,  24, 1024, 3, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_MDMPNG,   27, 1024, 0, 1, 0x000000, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_AUDHW,    22, 1024, 1, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CVP,       8,  256, 3, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_MODPE,    29,  128, 1, 1, 0xF00000, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_WRCACHE,  31,  512, 1, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CAMEXP0,   4,  256, 3, 1,      0xF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CAMEXP1,   7, 3200, 3, 1, 0xFFFFF0, 0x0,      2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CMPTHCP,  17,  256, 3, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_LCPDARE,  30,  128, 3, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_AENPU,     3, 3072, 1, 1, 0xFFFFFF, 0x0,      2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_ISLAND1,  12, 5888, 7, 1,      0x0, 0x7FFFFF, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_DISP_WB,  23, 1024, 3, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_VIDVSP,   28,  256, 3, 1, 0xFFFFFF, 0x0,      0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 5120,
++		.priority = 1,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++		.stale_en = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 25,
++		.max_cap = 1024,
++		.priority = 3,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 26,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 4096,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 9,
++		.max_cap = 3096,
++		.priority = 1,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.write_scid_en = true,
++		.write_scid_cacheable_en = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 18,
++		.max_cap = 768,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_DISP,
++		.slice_id = 16,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_MDMHPFX,
++		.slice_id = 24,
++		.max_cap = 1024,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 27,
++		.max_cap = 1024,
++		.priority = 0,
++		.fixed_size = true,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CVP,
++		.slice_id = 8,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 128,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xf00000,
++		.cache_mode = 0,
++		.alloc_oneway_en = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_CAMEXP0,
++		.slice_id = 4,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xf,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CAMEXP1,
++		.slice_id = 7,
++		.max_cap = 3200,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfffff0,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_CMPTHCP,
++		.slice_id = 17,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_LCPDARE,
++		.slice_id = 30,
++		.max_cap = 128,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++		.alloc_oneway_en = true,
++	}, {
++		.usecase_id = LLCC_AENPU,
++		.slice_id = 3,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_ISLAND1,
++		.slice_id = 12,
++		.max_cap = 5888,
++		.priority = 7,
++		.fixed_size = true,
++		.res_ways = 0x7fffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_DISP_WB,
++		.slice_id = 23,
++		.max_cap = 1024,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_VIDVSP,
++		.slice_id = 28,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffffff,
++		.cache_mode = 0,
++	},
+ };
+ 
+ static const struct llcc_slice_config qdu1000_data_2ch[] = {
+-	{ LLCC_MDMHPGRW, 7, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_MODHW,    9, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_MDMPNG,  21, 256, 0, 1, 0x3,   0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_ECC,     26, 512, 3, 1, 0xffc, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_MODPE,   29, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_APTCM,   30, 256, 3, 1, 0x0,   0xc, 1, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_WRCACHE, 31, 128, 1, 1, 0x3,   0x0, 0, 0, 0, 0, 1, 0, 0 },
++	{
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 9,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 256,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0x3,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_ECC,
++		.slice_id = 26,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffc,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 256,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0xc,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 128,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config qdu1000_data_4ch[] = {
+-	{ LLCC_MDMHPGRW, 7, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_MODHW,    9, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_MDMPNG,  21, 512,  0, 1, 0x3,   0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_ECC,     26, 1024, 3, 1, 0xffc, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_MODPE,   29, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_APTCM,   30, 512,  3, 1, 0x0,   0xc, 1, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_WRCACHE, 31, 256,  1, 1, 0x3,   0x0, 0, 0, 0, 0, 1, 0, 0 },
++	{
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 9,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 512,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0x3,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_ECC,
++		.slice_id = 26,
++		.max_cap = 1024,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffc,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0xc,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 256,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config qdu1000_data_8ch[] = {
+-	{ LLCC_MDMHPGRW, 7, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_MODHW,    9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_MDMPNG,  21, 1024, 0, 1, 0x3,   0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_ECC,     26, 2048, 3, 1, 0xffc, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_MODPE,   29, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_APTCM,   30, 1024, 3, 1, 0x0,   0xc, 1, 0, 0, 1, 0, 0, 0 },
+-	{ LLCC_WRCACHE, 31, 512,  1, 1, 0x3,   0x0, 0, 0, 0, 0, 1, 0, 0 },
++	{
++		.usecase_id = LLCC_MDMHPGRW,
++		.slice_id = 7,
++		.max_cap = 2048,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MODHW,
++		.slice_id = 9,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_MDMPNG,
++		.slice_id = 21,
++		.max_cap = 1024,
++		.priority = 0,
++		.fixed_size = true,
++		.bonus_ways = 0x3,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_ECC,
++		.slice_id = 26,
++		.max_cap = 2048,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffc,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_MODPE,
++		.slice_id = 29,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_APTCM,
++		.slice_id = 30,
++		.max_cap = 1024,
++		.priority = 3,
++		.fixed_size = true,
++		.res_ways = 0xc,
++		.cache_mode = 1,
++		.retain_on_pc = true,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0x3,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	},
+ };
+ 
+ static const struct llcc_slice_config x1e80100_data[] = {
+-	{LLCC_CPUSS,	 1, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_VIDSC0,	 2,  512, 4, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_AUDIO,	 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CMPT,     10, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_GPUHTW,   11,  512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_GPU,       9, 4608, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_MMUHWT,   18,  512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_AUDHW,    22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CVP,       8,  512, 4, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_WRCACHE,  31, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CAMEXP0,   4,  256, 4, 1,   0x3, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CAMEXP1,   7, 3072, 3, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_LCPDARE,  30,  512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
+-	{LLCC_AENPU,     3, 3072, 1, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_ISLAND1,  12, 2048, 7, 1,   0x0, 0xF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CAMEXP2,  19, 3072, 3, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CAMEXP3,  20, 3072, 2, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+-	{LLCC_CAMEXP4,  21, 3072, 2, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 512,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_AUDIO,
++		.slice_id = 6,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CMPT,
++		.slice_id = 10,
++		.max_cap = 6144,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_GPUHTW,
++		.slice_id = 11,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_GPU,
++		.slice_id = 9,
++		.max_cap = 4608,
++		.priority = 1,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.write_scid_en = true,
++		.write_scid_cacheable_en = true,
++		.stale_en = true,
++	}, {
++		.usecase_id = LLCC_MMUHWT,
++		.slice_id = 18,
++		.max_cap = 512,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++	}, {
++		.usecase_id = LLCC_AUDHW,
++		.slice_id = 22,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CVP,
++		.slice_id = 8,
++		.max_cap = 512,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_WRCACHE,
++		.slice_id = 31,
++		.max_cap = 1024,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CAMEXP0,
++		.slice_id = 4,
++		.max_cap = 256,
++		.priority = 4,
++		.fixed_size = true,
++		.bonus_ways = 0x3,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CAMEXP1,
++		.slice_id = 7,
++		.max_cap = 3072,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffc,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_LCPDARE,
++		.slice_id = 30,
++		.max_cap = 512,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 0,
++		.activate_on_init = true,
++		.alloc_oneway_en = true,
++	}, {
++		.usecase_id = LLCC_AENPU,
++		.slice_id = 3,
++		.max_cap = 3072,
++		.priority = 1,
++		.fixed_size = true,
++		.bonus_ways = 0xfff,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_ISLAND1,
++		.slice_id = 12,
++		.max_cap = 2048,
++		.priority = 7,
++		.fixed_size = true,
++		.res_ways = 0xf,
++		.cache_mode = 0,
++	}, {
++		.usecase_id = LLCC_CAMEXP2,
++		.slice_id = 19,
++		.max_cap = 3072,
++		.priority = 3,
++		.fixed_size = true,
++		.bonus_ways = 0xffc,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_CAMEXP3,
++		.slice_id = 20,
++		.max_cap = 3072,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xffc,
++		.cache_mode = 2,
++	}, {
++		.usecase_id = LLCC_CAMEXP4,
++		.slice_id = 21,
++		.max_cap = 3072,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xffc,
++		.cache_mode = 2,
++	},
+ };
+ 
+ static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
+diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
+index c940f4da28ed5c..6e30f08761aa43 100644
+--- a/drivers/soc/qcom/qcom_pd_mapper.c
++++ b/drivers/soc/qcom/qcom_pd_mapper.c
+@@ -540,6 +540,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
+ 	{ .compatible = "qcom,msm8996", .data = msm8996_domains, },
+ 	{ .compatible = "qcom,msm8998", .data = msm8998_domains, },
+ 	{ .compatible = "qcom,qcm2290", .data = qcm2290_domains, },
++	{ .compatible = "qcom,qcm6490", .data = sc7280_domains, },
+ 	{ .compatible = "qcom,qcs404", .data = qcs404_domains, },
+ 	{ .compatible = "qcom,sc7180", .data = sc7180_domains, },
+ 	{ .compatible = "qcom,sc7280", .data = sc7280_domains, },
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 9573b8fa4fbfc6..29b9676fe43d89 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -315,9 +315,10 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
+ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ {
+ 	struct lpspi_config config = fsl_lpspi->config;
+-	unsigned int perclk_rate, scldiv, div;
++	unsigned int perclk_rate, div;
+ 	u8 prescale_max;
+ 	u8 prescale;
++	int scldiv;
+ 
+ 	perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
+ 	prescale_max = fsl_lpspi->devtype_data->prescale_max;
+@@ -338,13 +339,13 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ 
+ 	for (prescale = 0; prescale <= prescale_max; prescale++) {
+ 		scldiv = div / (1 << prescale) - 2;
+-		if (scldiv < 256) {
++		if (scldiv >= 0 && scldiv < 256) {
+ 			fsl_lpspi->config.prescale = prescale;
+ 			break;
+ 		}
+ 	}
+ 
+-	if (scldiv >= 256)
++	if (scldiv < 0 || scldiv >= 256)
+ 		return -EINVAL;
+ 
+ 	writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
+diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
+index d5ac60c135c20a..159f359d7501aa 100644
+--- a/drivers/spi/spi-mpc52xx.c
++++ b/drivers/spi/spi-mpc52xx.c
+@@ -520,6 +520,7 @@ static void mpc52xx_spi_remove(struct platform_device *op)
+ 	struct mpc52xx_spi *ms = spi_controller_get_devdata(host);
+ 	int i;
+ 
++	cancel_work_sync(&ms->work);
+ 	free_irq(ms->irq0, ms);
+ 	free_irq(ms->irq1, ms);
+ 
+diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
+index dc1c4ae2d8b01b..1a7874676f68e4 100644
+--- a/drivers/thermal/qcom/tsens-v1.c
++++ b/drivers/thermal/qcom/tsens-v1.c
+@@ -162,28 +162,35 @@ struct tsens_plat_data data_tsens_v1 = {
+ 	.fields	= tsens_v1_regfields,
+ };
+ 
+-static const struct tsens_ops ops_8956 = {
+-	.init		= init_8956,
++static const struct tsens_ops ops_common = {
++	.init		= init_common,
+ 	.calibrate	= tsens_calibrate_common,
+ 	.get_temp	= get_temp_tsens_valid,
+ };
+ 
+-struct tsens_plat_data data_8956 = {
++struct tsens_plat_data data_8937 = {
+ 	.num_sensors	= 11,
+-	.ops		= &ops_8956,
++	.ops		= &ops_common,
+ 	.feat		= &tsens_v1_feat,
+ 	.fields		= tsens_v1_regfields,
+ };
+ 
+-static const struct tsens_ops ops_8976 = {
+-	.init		= init_common,
++static const struct tsens_ops ops_8956 = {
++	.init		= init_8956,
+ 	.calibrate	= tsens_calibrate_common,
+ 	.get_temp	= get_temp_tsens_valid,
+ };
+ 
++struct tsens_plat_data data_8956 = {
++	.num_sensors	= 11,
++	.ops		= &ops_8956,
++	.feat		= &tsens_v1_feat,
++	.fields		= tsens_v1_regfields,
++};
++
+ struct tsens_plat_data data_8976 = {
+ 	.num_sensors	= 11,
+-	.ops		= &ops_8976,
++	.ops		= &ops_common,
+ 	.feat		= &tsens_v1_feat,
+ 	.fields		= tsens_v1_regfields,
+ };
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index 0b4421bf478544..d2db804692f01d 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -1119,6 +1119,9 @@ static const struct of_device_id tsens_table[] = {
+ 	}, {
+ 		.compatible = "qcom,msm8916-tsens",
+ 		.data = &data_8916,
++	}, {
++		.compatible = "qcom,msm8937-tsens",
++		.data = &data_8937,
+ 	}, {
+ 		.compatible = "qcom,msm8939-tsens",
+ 		.data = &data_8939,
+diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
+index cab39de045b100..7b36a0318fa6a0 100644
+--- a/drivers/thermal/qcom/tsens.h
++++ b/drivers/thermal/qcom/tsens.h
+@@ -647,7 +647,7 @@ extern struct tsens_plat_data data_8960;
+ extern struct tsens_plat_data data_8226, data_8909, data_8916, data_8939, data_8974, data_9607;
+ 
+ /* TSENS v1 targets */
+-extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956;
++extern struct tsens_plat_data data_tsens_v1, data_8937, data_8976, data_8956;
+ 
+ /* TSENS v2 targets */
+ extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2;
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index ab9e7f20426025..51894c93c8a313 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -750,7 +750,7 @@ static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
+ 	.quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC,
+ };
+ 
+-static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
++static const struct dw8250_platform_data dw8250_skip_set_rate_data = {
+ 	.usr_reg = DW_UART_USR,
+ 	.quirks = DW_UART_QUIRK_SKIP_SET_RATE,
+ };
+@@ -760,7 +760,8 @@ static const struct of_device_id dw8250_of_match[] = {
+ 	{ .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data },
+ 	{ .compatible = "marvell,armada-38x-uart", .data = &dw8250_armada_38x_data },
+ 	{ .compatible = "renesas,rzn1-uart", .data = &dw8250_renesas_rzn1_data },
+-	{ .compatible = "starfive,jh7100-uart", .data = &dw8250_starfive_jh7100_data },
++	{ .compatible = "sophgo,sg2044-uart", .data = &dw8250_skip_set_rate_data },
++	{ .compatible = "starfive,jh7100-uart", .data = &dw8250_skip_set_rate_data },
+ 	{ /* Sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, dw8250_of_match);
+diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
+index 265f21133b633e..796e37a1d859f2 100644
+--- a/drivers/ufs/core/ufs-sysfs.c
++++ b/drivers/ufs/core/ufs-sysfs.c
+@@ -670,6 +670,9 @@ static ssize_t read_req_latency_avg_show(struct device *dev,
+ 	struct ufs_hba *hba = dev_get_drvdata(dev);
+ 	struct ufs_hba_monitor *m = &hba->monitor;
+ 
++	if (!m->nr_req[READ])
++		return sysfs_emit(buf, "0\n");
++
+ 	return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
+ 						 m->nr_req[READ]));
+ }
+@@ -737,6 +740,9 @@ static ssize_t write_req_latency_avg_show(struct device *dev,
+ 	struct ufs_hba *hba = dev_get_drvdata(dev);
+ 	struct ufs_hba_monitor *m = &hba->monitor;
+ 
++	if (!m->nr_req[WRITE])
++		return sysfs_emit(buf, "0\n");
++
+ 	return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
+ 						 m->nr_req[WRITE]));
+ }
+diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
+index 433d0480391ea6..6c09d97ae00658 100644
+--- a/drivers/ufs/core/ufs_bsg.c
++++ b/drivers/ufs/core/ufs_bsg.c
+@@ -170,7 +170,7 @@ static int ufs_bsg_request(struct bsg_job *job)
+ 		break;
+ 	case UPIU_TRANSACTION_UIC_CMD:
+ 		memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
+-		ret = ufshcd_send_uic_cmd(hba, &uc);
++		ret = ufshcd_send_bsg_uic_cmd(hba, &uc);
+ 		if (ret)
+ 			dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
+ 
+diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
+index 7aea8fbaeee882..9ffd94ddf8c7ce 100644
+--- a/drivers/ufs/core/ufshcd-priv.h
++++ b/drivers/ufs/core/ufshcd-priv.h
+@@ -84,6 +84,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ 			    u8 **buf, bool ascii);
+ 
+ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
++int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+ 
+ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+ 			     struct utp_upiu_req *req_upiu,
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index abbe7135a97787..cfebe4a1af9e84 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -2411,8 +2411,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
+ 	int err;
+ 
+ 	hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+-	if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
+-		hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
+ 
+ 	/* nutrs and nutmrs are 0 based values */
+ 	hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
+@@ -2551,13 +2549,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+  * @hba: per adapter instance
+  * @uic_cmd: UIC command
+- * @completion: initialize the completion only if this is set to true
+  *
+  * Return: 0 only if success.
+  */
+ static int
+-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+-		      bool completion)
++__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ {
+ 	lockdep_assert_held(&hba->uic_cmd_mutex);
+ 
+@@ -2567,8 +2563,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ 		return -EIO;
+ 	}
+ 
+-	if (completion)
+-		init_completion(&uic_cmd->done);
++	init_completion(&uic_cmd->done);
+ 
+ 	uic_cmd->cmd_active = 1;
+ 	ufshcd_dispatch_uic_cmd(hba, uic_cmd);
+@@ -2594,7 +2589,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ 	mutex_lock(&hba->uic_cmd_mutex);
+ 	ufshcd_add_delay_before_dme_cmd(hba);
+ 
+-	ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
++	ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ 	if (!ret)
+ 		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+ 
+@@ -4288,7 +4283,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 		reenable_intr = true;
+ 	}
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
+-	ret = __ufshcd_send_uic_cmd(hba, cmd, false);
++	ret = __ufshcd_send_uic_cmd(hba, cmd);
+ 	if (ret) {
+ 		dev_err(hba->dev,
+ 			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+@@ -4343,6 +4338,42 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 	return ret;
+ }
+ 
++/**
++ * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
++ * @hba: per adapter instance
++ * @uic_cmd: UIC command
++ *
++ * Return: 0 only if success.
++ */
++int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
++{
++	int ret;
++
++	if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
++		return 0;
++
++	ufshcd_hold(hba);
++
++	if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
++	    uic_cmd->command == UIC_CMD_DME_SET) {
++		ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
++		goto out;
++	}
++
++	mutex_lock(&hba->uic_cmd_mutex);
++	ufshcd_add_delay_before_dme_cmd(hba);
++
++	ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
++	if (!ret)
++		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
++
++	mutex_unlock(&hba->uic_cmd_mutex);
++
++out:
++	ufshcd_release(hba);
++	return ret;
++}
++
+ /**
+  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+  *				using DME_SET primitives.
+@@ -4651,9 +4682,6 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ 		dev_err(hba->dev,
+ 			"%s: power mode change failed %d\n", __func__, ret);
+ 	} else {
+-		ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
+-								pwr_mode);
+-
+ 		memcpy(&hba->pwr_info, pwr_mode,
+ 			sizeof(struct ufs_pa_layer_attr));
+ 	}
+@@ -4682,6 +4710,10 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ 
+ 	ret = ufshcd_change_power_mode(hba, &final_params);
+ 
++	if (!ret)
++		ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
++					&final_params);
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
+@@ -10231,6 +10263,7 @@ void ufshcd_remove(struct ufs_hba *hba)
+ 	ufs_hwmon_remove(hba);
+ 	ufs_bsg_remove(hba);
+ 	ufs_sysfs_remove_nodes(hba->dev);
++	cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
+ 	blk_mq_destroy_queue(hba->tmf_queue);
+ 	blk_put_queue(hba->tmf_queue);
+ 	blk_mq_free_tag_set(&hba->tmf_tag_set);
+@@ -10309,6 +10342,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
+  */
+ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+ {
++	if (hba->vops && hba->vops->set_dma_mask)
++		return hba->vops->set_dma_mask(hba);
+ 	if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
+ 		if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
+ 			return 0;
+diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
+index 66811d8d1929c1..b31aa84111511b 100644
+--- a/drivers/ufs/host/cdns-pltfrm.c
++++ b/drivers/ufs/host/cdns-pltfrm.c
+@@ -307,9 +307,7 @@ static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
+  */
+ static void cdns_ufs_pltfrm_remove(struct platform_device *pdev)
+ {
+-	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+-
+-	ufshcd_remove(hba);
++	ufshcd_pltfrm_remove(pdev);
+ }
+ 
+ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
+diff --git a/drivers/ufs/host/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
+index a3877592604d5d..c6f8565ede21a1 100644
+--- a/drivers/ufs/host/tc-dwc-g210-pltfrm.c
++++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
+@@ -76,10 +76,7 @@ static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
+  */
+ static void tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
+ {
+-	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+-
+-	pm_runtime_get_sync(&(pdev)->dev);
+-	ufshcd_remove(hba);
++	ufshcd_pltfrm_remove(pdev);
+ }
+ 
+ static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
+diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
+index fb550a7c16b34b..98505c68103d0e 100644
+--- a/drivers/ufs/host/ufs-exynos.c
++++ b/drivers/ufs/host/ufs-exynos.c
+@@ -1963,8 +1963,7 @@ static void exynos_ufs_remove(struct platform_device *pdev)
+ 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+ 	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ 
+-	pm_runtime_get_sync(&(pdev)->dev);
+-	ufshcd_remove(hba);
++	ufshcd_pltfrm_remove(pdev);
+ 
+ 	phy_power_off(ufs->phy);
+ 	phy_exit(ufs->phy);
+diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
+index 5ee73ff052512b..501609521b2609 100644
+--- a/drivers/ufs/host/ufs-hisi.c
++++ b/drivers/ufs/host/ufs-hisi.c
+@@ -576,9 +576,7 @@ static int ufs_hisi_probe(struct platform_device *pdev)
+ 
+ static void ufs_hisi_remove(struct platform_device *pdev)
+ {
+-	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+-
+-	ufshcd_remove(hba);
++	ufshcd_pltfrm_remove(pdev);
+ }
+ 
+ static const struct dev_pm_ops ufs_hisi_pm_ops = {
+diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
+index 9a5919434c4e0d..c834d38921b6cb 100644
+--- a/drivers/ufs/host/ufs-mediatek.c
++++ b/drivers/ufs/host/ufs-mediatek.c
+@@ -1869,10 +1869,7 @@ static int ufs_mtk_probe(struct platform_device *pdev)
+  */
+ static void ufs_mtk_remove(struct platform_device *pdev)
+ {
+-	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+-
+-	pm_runtime_get_sync(&(pdev)->dev);
+-	ufshcd_remove(hba);
++	ufshcd_pltfrm_remove(pdev);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index ecdfff2456e31d..91127fb171864f 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -1843,10 +1843,11 @@ static int ufs_qcom_probe(struct platform_device *pdev)
+ static void ufs_qcom_remove(struct platform_device *pdev)
+ {
+ 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
++	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ 
+-	pm_runtime_get_sync(&(pdev)->dev);
+-	ufshcd_remove(hba);
+-	platform_device_msi_free_irqs_all(hba->dev);
++	ufshcd_pltfrm_remove(pdev);
++	if (host->esi_enabled)
++		platform_device_msi_free_irqs_all(hba->dev);
+ }
+ 
+ static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
+diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
+index 8711e5cbc9680a..21a64b34397d8c 100644
+--- a/drivers/ufs/host/ufs-renesas.c
++++ b/drivers/ufs/host/ufs-renesas.c
+@@ -7,6 +7,7 @@
+ 
+ #include <linux/clk.h>
+ #include <linux/delay.h>
++#include <linux/dma-mapping.h>
+ #include <linux/err.h>
+ #include <linux/iopoll.h>
+ #include <linux/kernel.h>
+@@ -364,14 +365,20 @@ static int ufs_renesas_init(struct ufs_hba *hba)
+ 		return -ENOMEM;
+ 	ufshcd_set_variant(hba, priv);
+ 
+-	hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO;
++	hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO;
+ 
+ 	return 0;
+ }
+ 
++static int ufs_renesas_set_dma_mask(struct ufs_hba *hba)
++{
++	return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
++}
++
+ static const struct ufs_hba_variant_ops ufs_renesas_vops = {
+ 	.name		= "renesas",
+ 	.init		= ufs_renesas_init,
++	.set_dma_mask	= ufs_renesas_set_dma_mask,
+ 	.setup_clocks	= ufs_renesas_setup_clocks,
+ 	.hce_enable_notify = ufs_renesas_hce_enable_notify,
+ 	.dbg_register_dump = ufs_renesas_dbg_register_dump,
+@@ -390,9 +397,7 @@ static int ufs_renesas_probe(struct platform_device *pdev)
+ 
+ static void ufs_renesas_remove(struct platform_device *pdev)
+ {
+-	struct ufs_hba *hba = platform_get_drvdata(pdev);
+-
+-	ufshcd_remove(hba);
++	ufshcd_pltfrm_remove(pdev);
+ }
+ 
+ static struct platform_driver ufs_renesas_platform = {
+diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
+index d8b165908809d6..d220978c2d8c8a 100644
+--- a/drivers/ufs/host/ufs-sprd.c
++++ b/drivers/ufs/host/ufs-sprd.c
+@@ -427,10 +427,7 @@ static int ufs_sprd_probe(struct platform_device *pdev)
+ 
+ static void ufs_sprd_remove(struct platform_device *pdev)
+ {
+-	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+-
+-	pm_runtime_get_sync(&(pdev)->dev);
+-	ufshcd_remove(hba);
++	ufshcd_pltfrm_remove(pdev);
+ }
+ 
+ static const struct dev_pm_ops ufs_sprd_pm_ops = {
+diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
+index 1f4f30d6cb4234..505572d4fa878c 100644
+--- a/drivers/ufs/host/ufshcd-pltfrm.c
++++ b/drivers/ufs/host/ufshcd-pltfrm.c
+@@ -524,6 +524,22 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
+ 
++/**
++ * ufshcd_pltfrm_remove - Remove ufshcd platform
++ * @pdev: pointer to Platform device handle
++ */
++void ufshcd_pltfrm_remove(struct platform_device *pdev)
++{
++	struct ufs_hba *hba =  platform_get_drvdata(pdev);
++
++	pm_runtime_get_sync(&pdev->dev);
++	ufshcd_remove(hba);
++	ufshcd_dealloc_host(hba);
++	pm_runtime_disable(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
++}
++EXPORT_SYMBOL_GPL(ufshcd_pltfrm_remove);
++
+ MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+ MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+ MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
+diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
+index df387be5216bd4..3017f8e8f93c67 100644
+--- a/drivers/ufs/host/ufshcd-pltfrm.h
++++ b/drivers/ufs/host/ufshcd-pltfrm.h
+@@ -31,6 +31,7 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
+ void ufshcd_init_host_params(struct ufs_host_params *host_params);
+ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 		       const struct ufs_hba_variant_ops *vops);
++void ufshcd_pltfrm_remove(struct platform_device *pdev);
+ int ufshcd_populate_vreg(struct device *dev, const char *name,
+ 			 struct ufs_vreg **out_vreg, bool skip_current);
+ 
+diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
+index 2a38e1eb65466c..97437de52ef681 100644
+--- a/drivers/usb/chipidea/ci.h
++++ b/drivers/usb/chipidea/ci.h
+@@ -25,6 +25,7 @@
+ #define TD_PAGE_COUNT      5
+ #define CI_HDRC_PAGE_SIZE  4096ul /* page size for TD's */
+ #define ENDPT_MAX          32
++#define CI_MAX_REQ_SIZE	(4 * CI_HDRC_PAGE_SIZE)
+ #define CI_MAX_BUF_SIZE	(TD_PAGE_COUNT * CI_HDRC_PAGE_SIZE)
+ 
+ /******************************************************************************
+@@ -260,6 +261,7 @@ struct ci_hdrc {
+ 	bool				b_sess_valid_event;
+ 	bool				imx28_write_fix;
+ 	bool				has_portsc_pec_bug;
++	bool				has_short_pkt_limit;
+ 	bool				supports_runtime_pm;
+ 	bool				in_lpm;
+ 	bool				wakeup_int;
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index c64ab0e07ea030..17b3ac2ac8a1e8 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -342,6 +342,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 	struct ci_hdrc_platform_data pdata = {
+ 		.name		= dev_name(&pdev->dev),
+ 		.capoffset	= DEF_CAPOFFSET,
++		.flags		= CI_HDRC_HAS_SHORT_PKT_LIMIT,
+ 		.notify_event	= ci_hdrc_imx_notify_event,
+ 	};
+ 	int ret;
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 835bf2428dc6ec..5aa16dbfc289ce 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -1076,6 +1076,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 		CI_HDRC_SUPPORTS_RUNTIME_PM);
+ 	ci->has_portsc_pec_bug = !!(ci->platdata->flags &
+ 		CI_HDRC_HAS_PORTSC_PEC_MISSED);
++	ci->has_short_pkt_limit = !!(ci->platdata->flags &
++		CI_HDRC_HAS_SHORT_PKT_LIMIT);
+ 	platform_set_drvdata(pdev, ci);
+ 
+ 	ret = hw_device_init(ci, base);
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 69ef3cd8d4f836..fd6032874bf33a 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -10,6 +10,7 @@
+ #include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/dmapool.h>
++#include <linux/dma-direct.h>
+ #include <linux/err.h>
+ #include <linux/irqreturn.h>
+ #include <linux/kernel.h>
+@@ -540,6 +541,126 @@ static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+ 	return ret;
+ }
+ 
++/*
++ * Verify if the scatterlist is valid by iterating each sg entry.
++ * Return invalid sg entry index which is less than num_sgs.
++ */
++static int sglist_get_invalid_entry(struct device *dma_dev, u8 dir,
++			struct usb_request *req)
++{
++	int i;
++	struct scatterlist *s = req->sg;
++
++	if (req->num_sgs == 1)
++		return 1;
++
++	dir = dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
++
++	for (i = 0; i < req->num_sgs; i++, s = sg_next(s)) {
++		/* Only small sg (generally last sg) may be bounced. If
++		 * that happens. we can't ensure the addr is page-aligned
++		 * after dma map.
++		 */
++		if (dma_kmalloc_needs_bounce(dma_dev, s->length, dir))
++			break;
++
++		/* Make sure each sg start address (except first sg) is
++		 * page-aligned and end address (except last sg) is also
++		 * page-aligned.
++		 */
++		if (i == 0) {
++			if (!IS_ALIGNED(s->offset + s->length,
++						CI_HDRC_PAGE_SIZE))
++				break;
++		} else {
++			if (s->offset)
++				break;
++			if (!sg_is_last(s) && !IS_ALIGNED(s->length,
++						CI_HDRC_PAGE_SIZE))
++				break;
++		}
++	}
++
++	return i;
++}
++
++static int sglist_do_bounce(struct ci_hw_req *hwreq, int index,
++			bool copy, unsigned int *bounced)
++{
++	void *buf;
++	int i, ret, nents, num_sgs;
++	unsigned int rest, rounded;
++	struct scatterlist *sg, *src, *dst;
++
++	nents = index + 1;
++	ret = sg_alloc_table(&hwreq->sgt, nents, GFP_KERNEL);
++	if (ret)
++		return ret;
++
++	sg = src = hwreq->req.sg;
++	num_sgs = hwreq->req.num_sgs;
++	rest = hwreq->req.length;
++	dst = hwreq->sgt.sgl;
++
++	for (i = 0; i < index; i++) {
++		memcpy(dst, src, sizeof(*src));
++		rest -= src->length;
++		src = sg_next(src);
++		dst = sg_next(dst);
++	}
++
++	/* create one bounce buffer */
++	rounded = round_up(rest, CI_HDRC_PAGE_SIZE);
++	buf = kmalloc(rounded, GFP_KERNEL);
++	if (!buf) {
++		sg_free_table(&hwreq->sgt);
++		return -ENOMEM;
++	}
++
++	sg_set_buf(dst, buf, rounded);
++
++	hwreq->req.sg = hwreq->sgt.sgl;
++	hwreq->req.num_sgs = nents;
++	hwreq->sgt.sgl = sg;
++	hwreq->sgt.nents = num_sgs;
++
++	if (copy)
++		sg_copy_to_buffer(src, num_sgs - index, buf, rest);
++
++	*bounced = rest;
++
++	return 0;
++}
++
++static void sglist_do_debounce(struct ci_hw_req *hwreq, bool copy)
++{
++	void *buf;
++	int i, nents, num_sgs;
++	struct scatterlist *sg, *src, *dst;
++
++	sg = hwreq->req.sg;
++	num_sgs = hwreq->req.num_sgs;
++	src = sg_last(sg, num_sgs);
++	buf = sg_virt(src);
++
++	if (copy) {
++		dst = hwreq->sgt.sgl;
++		for (i = 0; i < num_sgs - 1; i++)
++			dst = sg_next(dst);
++
++		nents = hwreq->sgt.nents - num_sgs + 1;
++		sg_copy_from_buffer(dst, nents, buf, sg_dma_len(src));
++	}
++
++	hwreq->req.sg = hwreq->sgt.sgl;
++	hwreq->req.num_sgs = hwreq->sgt.nents;
++	hwreq->sgt.sgl = sg;
++	hwreq->sgt.nents = num_sgs;
++
++	kfree(buf);
++	sg_free_table(&hwreq->sgt);
++}
++
+ /**
+  * _hardware_enqueue: configures a request at hardware level
+  * @hwep:   endpoint
+@@ -552,6 +673,8 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+ 	struct ci_hdrc *ci = hwep->ci;
+ 	int ret = 0;
+ 	struct td_node *firstnode, *lastnode;
++	unsigned int bounced_size;
++	struct scatterlist *sg;
+ 
+ 	/* don't queue twice */
+ 	if (hwreq->req.status == -EALREADY)
+@@ -559,11 +682,29 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+ 
+ 	hwreq->req.status = -EALREADY;
+ 
++	if (hwreq->req.num_sgs && hwreq->req.length &&
++		ci->has_short_pkt_limit) {
++		ret = sglist_get_invalid_entry(ci->dev->parent, hwep->dir,
++					&hwreq->req);
++		if (ret < hwreq->req.num_sgs) {
++			ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX,
++					&bounced_size);
++			if (ret)
++				return ret;
++		}
++	}
++
+ 	ret = usb_gadget_map_request_by_dev(ci->dev->parent,
+ 					    &hwreq->req, hwep->dir);
+ 	if (ret)
+ 		return ret;
+ 
++	if (hwreq->sgt.sgl) {
++		/* We've mapped a bigger buffer, now recover the actual size */
++		sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs);
++		sg_dma_len(sg) = min(sg_dma_len(sg), bounced_size);
++	}
++
+ 	if (hwreq->req.num_mapped_sgs)
+ 		ret = prepare_td_for_sg(hwep, hwreq);
+ 	else
+@@ -733,6 +874,10 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+ 	usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
+ 					&hwreq->req, hwep->dir);
+ 
++	/* sglist bounced */
++	if (hwreq->sgt.sgl)
++		sglist_do_debounce(hwreq, hwep->dir == RX);
++
+ 	hwreq->req.actual += actual;
+ 
+ 	if (hwreq->req.status)
+@@ -960,6 +1105,12 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
+ 		return -EMSGSIZE;
+ 	}
+ 
++	if (ci->has_short_pkt_limit &&
++		hwreq->req.length > CI_MAX_REQ_SIZE) {
++		dev_err(hwep->ci->dev, "request length too big (max 16KB)\n");
++		return -EMSGSIZE;
++	}
++
+ 	/* first nuke then test link, e.g. previous status has not sent */
+ 	if (!list_empty(&hwreq->queue)) {
+ 		dev_err(hwep->ci->dev, "request already in queue\n");
+@@ -1574,6 +1725,9 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+ 
+ 	usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
+ 
++	if (hwreq->sgt.sgl)
++		sglist_do_debounce(hwreq, false);
++
+ 	req->status = -ECONNRESET;
+ 
+ 	if (hwreq->req.complete != NULL) {
+@@ -2063,7 +2217,7 @@ static irqreturn_t udc_irq(struct ci_hdrc *ci)
+ 			}
+ 		}
+ 
+-		if (USBi_UI  & intr)
++		if ((USBi_UI | USBi_UEI) & intr)
+ 			isr_tr_complete_handler(ci);
+ 
+ 		if ((USBi_SLI & intr) && !(ci->suspended)) {
+diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
+index 5193df1e18c75b..c8a47389a46bbb 100644
+--- a/drivers/usb/chipidea/udc.h
++++ b/drivers/usb/chipidea/udc.h
+@@ -69,11 +69,13 @@ struct td_node {
+  * @req: request structure for gadget drivers
+  * @queue: link to QH list
+  * @tds: link to TD list
++ * @sgt: hold original sglist when bounce sglist
+  */
+ struct ci_hw_req {
+ 	struct usb_request	req;
+ 	struct list_head	queue;
+ 	struct list_head	tds;
++	struct sg_table		sgt;
+ };
+ 
+ #ifdef CONFIG_USB_CHIPIDEA_UDC
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index 7a5dff8d9cc6c3..accf15ff1306a2 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -61,9 +61,11 @@ static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
+ 	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ 	int ret;
+ 
+-	ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+-	if (ret)
+-		return ret;
++	if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
++		ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
++		if (ret)
++			return ret;
++	}
+ 
+ 	memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
+ 
+@@ -73,11 +75,6 @@ static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
+ static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
+ {
+ 	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+-	int ret;
+-
+-	ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+-	if (ret)
+-		return ret;
+ 
+ 	memcpy(val, ua->base + UCSI_MESSAGE_IN, val_len);
+ 
+@@ -102,42 +99,6 @@ static const struct ucsi_operations ucsi_acpi_ops = {
+ 	.async_control = ucsi_acpi_async_control
+ };
+ 
+-static int
+-ucsi_zenbook_read_cci(struct ucsi *ucsi, u32 *cci)
+-{
+-	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+-	int ret;
+-
+-	if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
+-		ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
+-
+-	return 0;
+-}
+-
+-static int
+-ucsi_zenbook_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
+-{
+-	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+-
+-	/* UCSI_MESSAGE_IN is never read for PPM_RESET, return stored data */
+-	memcpy(val, ua->base + UCSI_MESSAGE_IN, val_len);
+-
+-	return 0;
+-}
+-
+-static const struct ucsi_operations ucsi_zenbook_ops = {
+-	.read_version = ucsi_acpi_read_version,
+-	.read_cci = ucsi_zenbook_read_cci,
+-	.read_message_in = ucsi_zenbook_read_message_in,
+-	.sync_control = ucsi_sync_control_common,
+-	.async_control = ucsi_acpi_async_control
+-};
+-
+ static int ucsi_gram_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
+ {
+ 	u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
+@@ -190,13 +151,6 @@ static const struct ucsi_operations ucsi_gram_ops = {
+ };
+ 
+ static const struct dmi_system_id ucsi_acpi_quirks[] = {
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+-		},
+-		.driver_data = (void *)&ucsi_zenbook_ops,
+-	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index f7000d383a4e62..9b6cb76e632807 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -172,12 +172,12 @@ static int pmic_glink_ucsi_async_control(struct ucsi *__ucsi, u64 command)
+ static void pmic_glink_ucsi_update_connector(struct ucsi_connector *con)
+ {
+ 	struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(con->ucsi);
+-	int i;
+ 
+-	for (i = 0; i < PMIC_GLINK_MAX_PORTS; i++) {
+-		if (ucsi->port_orientation[i])
+-			con->typec_cap.orientation_aware = true;
+-	}
++	if (con->num > PMIC_GLINK_MAX_PORTS ||
++	    !ucsi->port_orientation[con->num - 1])
++		return;
++
++	con->typec_cap.orientation_aware = true;
+ }
+ 
+ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
+diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
+index 7527e277c89897..eb7387ee6ebd10 100644
+--- a/drivers/vfio/pci/mlx5/cmd.c
++++ b/drivers/vfio/pci/mlx5/cmd.c
+@@ -1517,7 +1517,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ 	struct mlx5_vhca_qp *host_qp;
+ 	struct mlx5_vhca_qp *fw_qp;
+ 	struct mlx5_core_dev *mdev;
+-	u32 max_msg_size = PAGE_SIZE;
++	u32 log_max_msg_size;
++	u32 max_msg_size;
+ 	u64 rq_size = SZ_2M;
+ 	u32 max_recv_wr;
+ 	int err;
+@@ -1534,6 +1535,12 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ 	}
+ 
+ 	mdev = mvdev->mdev;
++	log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size);
++	max_msg_size = (1ULL << log_max_msg_size);
++	/* The RQ must hold at least 4 WQEs/messages for successful QP creation */
++	if (rq_size < 4 * max_msg_size)
++		rq_size = 4 * max_msg_size;
++
+ 	memset(tracker, 0, sizeof(*tracker));
+ 	tracker->uar = mlx5_get_uars_page(mdev);
+ 	if (IS_ERR(tracker->uar)) {
+@@ -1623,25 +1630,41 @@ set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp,
+ {
+ 	u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry);
+ 	u32 nent = size / entry_size;
++	u32 nent_in_page;
++	u32 nent_to_set;
+ 	struct page *page;
++	u32 page_offset;
++	u32 page_index;
++	u32 buf_offset;
++	void *kaddr;
+ 	u64 addr;
+ 	u64 *buf;
+ 	int i;
+ 
+-	if (WARN_ON(index >= qp->recv_buf.npages ||
++	buf_offset = index * qp->max_msg_size;
++	if (WARN_ON(buf_offset + size >= qp->recv_buf.npages * PAGE_SIZE ||
+ 		    (nent > qp->max_msg_size / entry_size)))
+ 		return;
+ 
+-	page = qp->recv_buf.page_list[index];
+-	buf = kmap_local_page(page);
+-	for (i = 0; i < nent; i++) {
+-		addr = MLX5_GET(page_track_report_entry, buf + i,
+-				dirty_address_low);
+-		addr |= (u64)MLX5_GET(page_track_report_entry, buf + i,
+-				      dirty_address_high) << 32;
+-		iova_bitmap_set(dirty, addr, qp->tracked_page_size);
+-	}
+-	kunmap_local(buf);
++	do {
++		page_index = buf_offset / PAGE_SIZE;
++		page_offset = buf_offset % PAGE_SIZE;
++		nent_in_page = (PAGE_SIZE - page_offset) / entry_size;
++		page = qp->recv_buf.page_list[page_index];
++		kaddr = kmap_local_page(page);
++		buf = kaddr + page_offset;
++		nent_to_set = min(nent, nent_in_page);
++		for (i = 0; i < nent_to_set; i++) {
++			addr = MLX5_GET(page_track_report_entry, buf + i,
++					dirty_address_low);
++			addr |= (u64)MLX5_GET(page_track_report_entry, buf + i,
++					      dirty_address_high) << 32;
++			iova_bitmap_set(dirty, addr, qp->tracked_page_size);
++		}
++		kunmap_local(kaddr);
++		buf_offset += (nent_to_set * entry_size);
++		nent -= nent_to_set;
++	} while (nent);
+ }
+ 
+ static void
+diff --git a/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c b/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
+index 56a3859dda8a15..4230b817a80bd8 100644
+--- a/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
++++ b/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
+@@ -87,12 +87,8 @@ static int mmio_guard_ioremap_hook(phys_addr_t phys, size_t size,
+ 
+ 	while (phys < end) {
+ 		const int func_id = ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID;
+-		int err;
+-
+-		err = arm_smccc_do_one_page(func_id, phys);
+-		if (err)
+-			return err;
+ 
++		WARN_ON_ONCE(arm_smccc_do_one_page(func_id, phys));
+ 		phys += PAGE_SIZE;
+ 	}
+ 
+diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
+index d4f739932f0be8..62dabf223d9096 100644
+--- a/drivers/watchdog/apple_wdt.c
++++ b/drivers/watchdog/apple_wdt.c
+@@ -130,7 +130,7 @@ static int apple_wdt_restart(struct watchdog_device *wdd, unsigned long mode,
+ 	 * can take up to ~20-25ms until the SoC is actually reset. Just wait
+ 	 * 50ms here to be safe.
+ 	 */
+-	(void)readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME);
++	(void)readl(wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ 	mdelay(50);
+ 
+ 	return 0;
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index 35b358bcf94ce6..f01ed38aba6751 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -82,6 +82,13 @@
+ #define TCO2_CNT(p)	(TCOBASE(p) + 0x0a) /* TCO2 Control Register	*/
+ #define TCOv2_TMR(p)	(TCOBASE(p) + 0x12) /* TCOv2 Timer Initial Value*/
+ 
++/*
++ * NMI_NOW is bit 8 of TCO1_CNT register
++ * Read/Write
++ * This bit is implemented as RW but has no effect on HW.
++ */
++#define NMI_NOW		BIT(8)
++
+ /* internal variables */
+ struct iTCO_wdt_private {
+ 	struct watchdog_device wddev;
+@@ -219,13 +226,23 @@ static int update_no_reboot_bit_cnt(void *priv, bool set)
+ 	struct iTCO_wdt_private *p = priv;
+ 	u16 val, newval;
+ 
+-	val = inw(TCO1_CNT(p));
++	/*
++	 * writing back 1b1 to NMI_NOW of TCO1_CNT register
++	 * causes NMI_NOW bit inversion what consequently does
++	 * not allow to perform the register's value comparison
++	 * properly.
++	 *
++	 * NMI_NOW bit masking for TCO1_CNT register values
++	 * helps to avoid possible NMI_NOW bit inversions on
++	 * following write operation.
++	 */
++	val = inw(TCO1_CNT(p)) & ~NMI_NOW;
+ 	if (set)
+ 		val |= BIT(0);
+ 	else
+ 		val &= ~BIT(0);
+ 	outw(val, TCO1_CNT(p));
+-	newval = inw(TCO1_CNT(p));
++	newval = inw(TCO1_CNT(p)) & ~NMI_NOW;
+ 
+ 	/* make sure the update is successful */
+ 	return val != newval ? -EIO : 0;
+diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
+index c35f85ce8d69cc..e2d7a57d6ea2e7 100644
+--- a/drivers/watchdog/mtk_wdt.c
++++ b/drivers/watchdog/mtk_wdt.c
+@@ -225,9 +225,15 @@ static int mtk_wdt_restart(struct watchdog_device *wdt_dev,
+ {
+ 	struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
+ 	void __iomem *wdt_base;
++	u32 reg;
+ 
+ 	wdt_base = mtk_wdt->wdt_base;
+ 
++	/* Enable reset in order to issue a system reset instead of an IRQ */
++	reg = readl(wdt_base + WDT_MODE);
++	reg &= ~WDT_MODE_IRQ_EN;
++	writel(reg | WDT_MODE_KEY, wdt_base + WDT_MODE);
++
+ 	while (1) {
+ 		writel(WDT_SWRST_KEY, wdt_base + WDT_SWRST);
+ 		mdelay(5);
+diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
+index 4895a69015a8ea..563d842014dfba 100644
+--- a/drivers/watchdog/rti_wdt.c
++++ b/drivers/watchdog/rti_wdt.c
+@@ -61,7 +61,7 @@
+ 
+ #define MAX_HW_ERROR		250
+ 
+-static int heartbeat = DEFAULT_HEARTBEAT;
++static int heartbeat;
+ 
+ /*
+  * struct to hold data for each WDT device
+@@ -252,6 +252,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ 	wdd->min_timeout = 1;
+ 	wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) /
+ 		wdt->freq * 1000;
++	wdd->timeout = DEFAULT_HEARTBEAT;
+ 	wdd->parent = dev;
+ 
+ 	watchdog_set_drvdata(wdd, wdt);
+diff --git a/drivers/watchdog/xilinx_wwdt.c b/drivers/watchdog/xilinx_wwdt.c
+index d271e2e8d6e271..3d2a156f718009 100644
+--- a/drivers/watchdog/xilinx_wwdt.c
++++ b/drivers/watchdog/xilinx_wwdt.c
+@@ -2,7 +2,7 @@
+ /*
+  * Window watchdog device driver for Xilinx Versal WWDT
+  *
+- * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
++ * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
+  */
+ 
+ #include <linux/clk.h>
+@@ -36,6 +36,12 @@
+ 
+ #define XWWDT_CLOSE_WINDOW_PERCENT	50
+ 
++/* Maximum count value of each 32 bit window */
++#define XWWDT_MAX_COUNT_WINDOW		GENMASK(31, 0)
++
++/* Maximum count value of closed and open window combined */
++#define XWWDT_MAX_COUNT_WINDOW_COMBINED	GENMASK_ULL(32, 1)
++
+ static int wwdt_timeout;
+ static int closed_window_percent;
+ 
+@@ -54,6 +60,8 @@ MODULE_PARM_DESC(closed_window_percent,
+  * @xilinx_wwdt_wdd: watchdog device structure
+  * @freq: source clock frequency of WWDT
+  * @close_percent: Closed window percent
++ * @closed_timeout: Closed window timeout in ticks
++ * @open_timeout: Open window timeout in ticks
+  */
+ struct xwwdt_device {
+ 	void __iomem *base;
+@@ -61,27 +69,22 @@ struct xwwdt_device {
+ 	struct watchdog_device xilinx_wwdt_wdd;
+ 	unsigned long freq;
+ 	u32 close_percent;
++	u64 closed_timeout;
++	u64 open_timeout;
+ };
+ 
+ static int xilinx_wwdt_start(struct watchdog_device *wdd)
+ {
+ 	struct xwwdt_device *xdev = watchdog_get_drvdata(wdd);
+ 	struct watchdog_device *xilinx_wwdt_wdd = &xdev->xilinx_wwdt_wdd;
+-	u64 time_out, closed_timeout, open_timeout;
+ 	u32 control_status_reg;
+ 
+-	/* Calculate timeout count */
+-	time_out = xdev->freq * wdd->timeout;
+-	closed_timeout = div_u64(time_out * xdev->close_percent, 100);
+-	open_timeout = time_out - closed_timeout;
+-	wdd->min_hw_heartbeat_ms = xdev->close_percent * 10 * wdd->timeout;
+-
+ 	spin_lock(&xdev->spinlock);
+ 
+ 	iowrite32(XWWDT_MWR_MASK, xdev->base + XWWDT_MWR_OFFSET);
+ 	iowrite32(~(u32)XWWDT_ESR_WEN_MASK, xdev->base + XWWDT_ESR_OFFSET);
+-	iowrite32((u32)closed_timeout, xdev->base + XWWDT_FWR_OFFSET);
+-	iowrite32((u32)open_timeout, xdev->base + XWWDT_SWR_OFFSET);
++	iowrite32((u32)xdev->closed_timeout, xdev->base + XWWDT_FWR_OFFSET);
++	iowrite32((u32)xdev->open_timeout, xdev->base + XWWDT_SWR_OFFSET);
+ 
+ 	/* Enable the window watchdog timer */
+ 	control_status_reg = ioread32(xdev->base + XWWDT_ESR_OFFSET);
+@@ -133,7 +136,12 @@ static int xwwdt_probe(struct platform_device *pdev)
+ 	struct watchdog_device *xilinx_wwdt_wdd;
+ 	struct device *dev = &pdev->dev;
+ 	struct xwwdt_device *xdev;
++	u64 max_per_window_ms;
++	u64 min_per_window_ms;
++	u64 timeout_count;
+ 	struct clk *clk;
++	u32 timeout_ms;
++	u64 ms_count;
+ 	int ret;
+ 
+ 	xdev = devm_kzalloc(dev, sizeof(*xdev), GFP_KERNEL);
+@@ -154,12 +162,13 @@ static int xwwdt_probe(struct platform_device *pdev)
+ 		return PTR_ERR(clk);
+ 
+ 	xdev->freq = clk_get_rate(clk);
+-	if (!xdev->freq)
++	if (xdev->freq < 1000000)
+ 		return -EINVAL;
+ 
+ 	xilinx_wwdt_wdd->min_timeout = XWWDT_MIN_TIMEOUT;
+ 	xilinx_wwdt_wdd->timeout = XWWDT_DEFAULT_TIMEOUT;
+-	xilinx_wwdt_wdd->max_hw_heartbeat_ms = 1000 * xilinx_wwdt_wdd->timeout;
++	xilinx_wwdt_wdd->max_hw_heartbeat_ms =
++		div64_u64(XWWDT_MAX_COUNT_WINDOW_COMBINED, xdev->freq) * 1000;
+ 
+ 	if (closed_window_percent == 0 || closed_window_percent >= 100)
+ 		xdev->close_percent = XWWDT_CLOSE_WINDOW_PERCENT;
+@@ -167,6 +176,48 @@ static int xwwdt_probe(struct platform_device *pdev)
+ 		xdev->close_percent = closed_window_percent;
+ 
+ 	watchdog_init_timeout(xilinx_wwdt_wdd, wwdt_timeout, &pdev->dev);
++
++	/* Calculate ticks for 1 milli-second */
++	ms_count = div_u64(xdev->freq, 1000);
++	timeout_ms = xilinx_wwdt_wdd->timeout * 1000;
++	timeout_count = timeout_ms * ms_count;
++
++	if (timeout_ms > xilinx_wwdt_wdd->max_hw_heartbeat_ms) {
++		/*
++		 * To avoid ping restrictions until the minimum hardware heartbeat,
++		 * we will solely rely on the open window and
++		 * adjust the minimum hardware heartbeat to 0.
++		 */
++		xdev->closed_timeout = 0;
++		xdev->open_timeout = XWWDT_MAX_COUNT_WINDOW;
++		xilinx_wwdt_wdd->min_hw_heartbeat_ms = 0;
++		xilinx_wwdt_wdd->max_hw_heartbeat_ms = xilinx_wwdt_wdd->max_hw_heartbeat_ms / 2;
++	} else {
++		xdev->closed_timeout  = div64_u64(timeout_count * xdev->close_percent, 100);
++		xilinx_wwdt_wdd->min_hw_heartbeat_ms =
++			div64_u64(timeout_ms * xdev->close_percent, 100);
++
++		if (timeout_ms > xilinx_wwdt_wdd->max_hw_heartbeat_ms / 2) {
++			max_per_window_ms = xilinx_wwdt_wdd->max_hw_heartbeat_ms / 2;
++			min_per_window_ms = timeout_ms - max_per_window_ms;
++
++			if (xilinx_wwdt_wdd->min_hw_heartbeat_ms > max_per_window_ms) {
++				dev_info(xilinx_wwdt_wdd->parent,
++					 "Closed window cannot be set to %d%%. Using maximum supported value.\n",
++					xdev->close_percent);
++				xdev->closed_timeout = max_per_window_ms * ms_count;
++				xilinx_wwdt_wdd->min_hw_heartbeat_ms = max_per_window_ms;
++			} else if (xilinx_wwdt_wdd->min_hw_heartbeat_ms < min_per_window_ms) {
++				dev_info(xilinx_wwdt_wdd->parent,
++					 "Closed window cannot be set to %d%%. Using minimum supported value.\n",
++					xdev->close_percent);
++				xdev->closed_timeout = min_per_window_ms * ms_count;
++				xilinx_wwdt_wdd->min_hw_heartbeat_ms = min_per_window_ms;
++			}
++		}
++		xdev->open_timeout = timeout_count - xdev->closed_timeout;
++	}
++
+ 	spin_lock_init(&xdev->spinlock);
+ 	watchdog_set_drvdata(xilinx_wwdt_wdd, xdev);
+ 	watchdog_set_nowayout(xilinx_wwdt_wdd, 1);
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 83d5cdd77f293e..604399e59a3d10 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -641,6 +641,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
+ 		return ret;
+ 
+ 	down_write(&dev_replace->rwsem);
++	dev_replace->replace_task = current;
+ 	switch (dev_replace->replace_state) {
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+@@ -994,6 +995,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
+ 	list_add(&tgt_device->dev_alloc_list, &fs_devices->alloc_list);
+ 	fs_devices->rw_devices++;
+ 
++	dev_replace->replace_task = NULL;
+ 	up_write(&dev_replace->rwsem);
+ 	btrfs_rm_dev_replace_blocked(fs_info);
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index b11bfe68dd65fb..43b7b331b2da36 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3202,8 +3202,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ 	return 0;
+ }
+ 
+-int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
+-		      const char *options)
++int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices)
+ {
+ 	u32 sectorsize;
+ 	u32 nodesize;
+diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
+index 99af64d3f27781..127e31e0834709 100644
+--- a/fs/btrfs/disk-io.h
++++ b/fs/btrfs/disk-io.h
+@@ -52,8 +52,7 @@ struct extent_buffer *btrfs_find_create_tree_block(
+ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info);
+ int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
+ 			   const struct btrfs_super_block *disk_sb);
+-int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
+-		      const char *options);
++int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices);
+ void __cold close_ctree(struct btrfs_fs_info *fs_info);
+ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
+ 			 const struct btrfs_super_block *sb, int mirror_num);
+diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
+index 79f64e383eddf8..cbfb225858a59f 100644
+--- a/fs/btrfs/fs.h
++++ b/fs/btrfs/fs.h
+@@ -317,6 +317,8 @@ struct btrfs_dev_replace {
+ 
+ 	struct percpu_counter bio_counter;
+ 	wait_queue_head_t replace_wait;
++
++	struct task_struct *replace_task;
+ };
+ 
+ /*
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d067db2619713f..58ffe78132d9d6 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9857,6 +9857,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	if (btrfs_root_dead(root)) {
+ 		spin_unlock(&root->root_item_lock);
+ 
++		btrfs_drew_write_unlock(&root->snapshot_lock);
+ 		btrfs_exclop_finish(fs_info);
+ 		btrfs_warn(fs_info,
+ 		"cannot activate swapfile because subvolume %llu is being deleted",
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index c64d0713412231..8292e488d3d777 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -946,8 +946,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
+ }
+ 
+ static int btrfs_fill_super(struct super_block *sb,
+-			    struct btrfs_fs_devices *fs_devices,
+-			    void *data)
++			    struct btrfs_fs_devices *fs_devices)
+ {
+ 	struct inode *inode;
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+@@ -971,7 +970,7 @@ static int btrfs_fill_super(struct super_block *sb,
+ 		return err;
+ 	}
+ 
+-	err = open_ctree(sb, fs_devices, (char *)data);
++	err = open_ctree(sb, fs_devices);
+ 	if (err) {
+ 		btrfs_err(fs_info, "open_ctree failed");
+ 		return err;
+@@ -1887,18 +1886,21 @@ static int btrfs_get_tree_super(struct fs_context *fc)
+ 
+ 	if (sb->s_root) {
+ 		btrfs_close_devices(fs_devices);
+-		if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY)
+-			ret = -EBUSY;
++		/*
++		 * At this stage we may have RO flag mismatch between
++		 * fc->sb_flags and sb->s_flags.  Caller should detect such
++		 * mismatch and reconfigure with sb->s_umount rwsem held if
++		 * needed.
++		 */
+ 	} else {
+ 		snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
+ 		shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);
+ 		btrfs_sb(sb)->bdev_holder = &btrfs_fs_type;
+-		ret = btrfs_fill_super(sb, fs_devices, NULL);
+-	}
+-
+-	if (ret) {
+-		deactivate_locked_super(sb);
+-		return ret;
++		ret = btrfs_fill_super(sb, fs_devices);
++		if (ret) {
++			deactivate_locked_super(sb);
++			return ret;
++		}
+ 	}
+ 
+ 	btrfs_clear_oneshot_options(fs_info);
+@@ -1984,39 +1986,18 @@ static int btrfs_get_tree_super(struct fs_context *fc)
+  * btrfs or not, setting the whole super block RO.  To make per-subvolume mounting
+  * work with different options work we need to keep backward compatibility.
+  */
+-static struct vfsmount *btrfs_reconfigure_for_mount(struct fs_context *fc)
++static int btrfs_reconfigure_for_mount(struct fs_context *fc, struct vfsmount *mnt)
+ {
+-	struct vfsmount *mnt;
+-	int ret;
+-	const bool ro2rw = !(fc->sb_flags & SB_RDONLY);
+-
+-	/*
+-	 * We got an EBUSY because our SB_RDONLY flag didn't match the existing
+-	 * super block, so invert our setting here and retry the mount so we
+-	 * can get our vfsmount.
+-	 */
+-	if (ro2rw)
+-		fc->sb_flags |= SB_RDONLY;
+-	else
+-		fc->sb_flags &= ~SB_RDONLY;
+-
+-	mnt = fc_mount(fc);
+-	if (IS_ERR(mnt))
+-		return mnt;
++	int ret = 0;
+ 
+-	if (!ro2rw)
+-		return mnt;
++	if (fc->sb_flags & SB_RDONLY)
++		return ret;
+ 
+-	/* We need to convert to rw, call reconfigure. */
+-	fc->sb_flags &= ~SB_RDONLY;
+ 	down_write(&mnt->mnt_sb->s_umount);
+-	ret = btrfs_reconfigure(fc);
++	if (!(fc->sb_flags & SB_RDONLY) && (mnt->mnt_sb->s_flags & SB_RDONLY))
++		ret = btrfs_reconfigure(fc);
+ 	up_write(&mnt->mnt_sb->s_umount);
+-	if (ret) {
+-		mntput(mnt);
+-		return ERR_PTR(ret);
+-	}
+-	return mnt;
++	return ret;
+ }
+ 
+ static int btrfs_get_tree_subvol(struct fs_context *fc)
+@@ -2026,6 +2007,7 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
+ 	struct fs_context *dup_fc;
+ 	struct dentry *dentry;
+ 	struct vfsmount *mnt;
++	int ret = 0;
+ 
+ 	/*
+ 	 * Setup a dummy root and fs_info for test/set super.  This is because
+@@ -2068,11 +2050,16 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
+ 	fc->security = NULL;
+ 
+ 	mnt = fc_mount(dup_fc);
+-	if (PTR_ERR_OR_ZERO(mnt) == -EBUSY)
+-		mnt = btrfs_reconfigure_for_mount(dup_fc);
+-	put_fs_context(dup_fc);
+-	if (IS_ERR(mnt))
++	if (IS_ERR(mnt)) {
++		put_fs_context(dup_fc);
+ 		return PTR_ERR(mnt);
++	}
++	ret = btrfs_reconfigure_for_mount(dup_fc, mnt);
++	put_fs_context(dup_fc);
++	if (ret) {
++		mntput(mnt);
++		return ret;
++	}
+ 
+ 	/*
+ 	 * This free's ->subvol_name, because if it isn't set we have to
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index eb51b609190fb5..0c4d14c59ebec5 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -732,6 +732,114 @@ const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb)
+ 	return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
+ }
+ 
++/*
++ * We can have very weird soft links passed in.
++ * One example is "/proc/self/fd/<fd>", which can be a soft link to
++ * a block device.
++ *
++ * But it's never a good idea to use those weird names.
++ * Here we check if the path (not following symlinks) is a good one inside
++ * "/dev/".
++ */
++static bool is_good_dev_path(const char *dev_path)
++{
++	struct path path = { .mnt = NULL, .dentry = NULL };
++	char *path_buf = NULL;
++	char *resolved_path;
++	bool is_good = false;
++	int ret;
++
++	if (!dev_path)
++		goto out;
++
++	path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
++	if (!path_buf)
++		goto out;
++
++	/*
++	 * Do not follow soft link, just check if the original path is inside
++	 * "/dev/".
++	 */
++	ret = kern_path(dev_path, 0, &path);
++	if (ret)
++		goto out;
++	resolved_path = d_path(&path, path_buf, PATH_MAX);
++	if (IS_ERR(resolved_path))
++		goto out;
++	if (strncmp(resolved_path, "/dev/", strlen("/dev/")))
++		goto out;
++	is_good = true;
++out:
++	kfree(path_buf);
++	path_put(&path);
++	return is_good;
++}
++
++static int get_canonical_dev_path(const char *dev_path, char *canonical)
++{
++	struct path path = { .mnt = NULL, .dentry = NULL };
++	char *path_buf = NULL;
++	char *resolved_path;
++	int ret;
++
++	if (!dev_path) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	path_buf = kmalloc(PATH_MAX, GFP_KERNEL);
++	if (!path_buf) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	ret = kern_path(dev_path, LOOKUP_FOLLOW, &path);
++	if (ret)
++		goto out;
++	resolved_path = d_path(&path, path_buf, PATH_MAX);
++	ret = strscpy(canonical, resolved_path, PATH_MAX);
++out:
++	kfree(path_buf);
++	path_put(&path);
++	return ret;
++}
++
++static bool is_same_device(struct btrfs_device *device, const char *new_path)
++{
++	struct path old = { .mnt = NULL, .dentry = NULL };
++	struct path new = { .mnt = NULL, .dentry = NULL };
++	char *old_path = NULL;
++	bool is_same = false;
++	int ret;
++
++	if (!device->name)
++		goto out;
++
++	old_path = kzalloc(PATH_MAX, GFP_NOFS);
++	if (!old_path)
++		goto out;
++
++	rcu_read_lock();
++	ret = strscpy(old_path, rcu_str_deref(device->name), PATH_MAX);
++	rcu_read_unlock();
++	if (ret < 0)
++		goto out;
++
++	ret = kern_path(old_path, LOOKUP_FOLLOW, &old);
++	if (ret)
++		goto out;
++	ret = kern_path(new_path, LOOKUP_FOLLOW, &new);
++	if (ret)
++		goto out;
++	if (path_equal(&old, &new))
++		is_same = true;
++out:
++	kfree(old_path);
++	path_put(&old);
++	path_put(&new);
++	return is_same;
++}
++
+ /*
+  * Add new device to list of registered devices
+  *
+@@ -852,7 +960,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 				MAJOR(path_devt), MINOR(path_devt),
+ 				current->comm, task_pid_nr(current));
+ 
+-	} else if (!device->name || strcmp(device->name->str, path)) {
++	} else if (!device->name || !is_same_device(device, path)) {
+ 		/*
+ 		 * When FS is already mounted.
+ 		 * 1. If you are here and if the device->name is NULL that
+@@ -1383,12 +1491,23 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+ 	bool new_device_added = false;
+ 	struct btrfs_device *device = NULL;
+ 	struct file *bdev_file;
++	char *canonical_path = NULL;
+ 	u64 bytenr;
+ 	dev_t devt;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&uuid_mutex);
+ 
++	if (!is_good_dev_path(path)) {
++		canonical_path = kmalloc(PATH_MAX, GFP_KERNEL);
++		if (canonical_path) {
++			ret = get_canonical_dev_path(path, canonical_path);
++			if (ret < 0) {
++				kfree(canonical_path);
++				canonical_path = NULL;
++			}
++		}
++	}
+ 	/*
+ 	 * Avoid an exclusive open here, as the systemd-udev may initiate the
+ 	 * device scan which may race with the user's mount or mkfs command,
+@@ -1433,7 +1552,8 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+ 		goto free_disk_super;
+ 	}
+ 
+-	device = device_list_add(path, disk_super, &new_device_added);
++	device = device_list_add(canonical_path ? : path, disk_super,
++				 &new_device_added);
+ 	if (!IS_ERR(device) && new_device_added)
+ 		btrfs_free_stale_devices(device->devt, device);
+ 
+@@ -1442,6 +1562,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+ 
+ error_bdev_put:
+ 	fput(bdev_file);
++	kfree(canonical_path);
+ 
+ 	return device;
+ }
+@@ -2721,8 +2842,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
+ 	set_blocksize(device->bdev_file, BTRFS_BDEV_BLOCKSIZE);
+ 
+ 	if (seeding_dev) {
+-		btrfs_clear_sb_rdonly(sb);
+-
+ 		/* GFP_KERNEL allocation must not be under device_list_mutex */
+ 		seed_devices = btrfs_init_sprout(fs_info);
+ 		if (IS_ERR(seed_devices)) {
+@@ -2865,8 +2984,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
+ 	mutex_unlock(&fs_info->chunk_mutex);
+ 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ error_trans:
+-	if (seeding_dev)
+-		btrfs_set_sb_rdonly(sb);
+ 	if (trans)
+ 		btrfs_end_transaction(trans);
+ error_free_zone:
+@@ -6481,13 +6598,15 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
+ 	max_len = btrfs_max_io_len(map, map_offset, &io_geom);
+ 	*length = min_t(u64, map->chunk_len - map_offset, max_len);
+ 
+-	down_read(&dev_replace->rwsem);
++	if (dev_replace->replace_task != current)
++		down_read(&dev_replace->rwsem);
++
+ 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+ 	/*
+ 	 * Hold the semaphore for read during the whole operation, write is
+ 	 * requested at commit time but must wait.
+ 	 */
+-	if (!dev_replace_is_ongoing)
++	if (!dev_replace_is_ongoing && dev_replace->replace_task != current)
+ 		up_read(&dev_replace->rwsem);
+ 
+ 	switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+@@ -6627,7 +6746,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
+ 	bioc->mirror_num = io_geom.mirror_num;
+ 
+ out:
+-	if (dev_replace_is_ongoing) {
++	if (dev_replace_is_ongoing && dev_replace->replace_task != current) {
+ 		lockdep_assert_held(&dev_replace->rwsem);
+ 		/* Unlock and let waiting writers proceed */
+ 		up_read(&dev_replace->rwsem);
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index 865dc70a9dfc47..dddedaef5e93dd 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -2861,16 +2861,14 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ 	case -EINVAL:
+ 		/* annoy the user because dlm usage is wrong */
+ 		WARN_ON(1);
+-		log_error(ls, "%s %d %x %x %x %d %d %s", __func__,
++		log_error(ls, "%s %d %x %x %x %d %d", __func__,
+ 			  rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags,
+-			  lkb->lkb_status, lkb->lkb_wait_type,
+-			  lkb->lkb_resource->res_name);
++			  lkb->lkb_status, lkb->lkb_wait_type);
+ 		break;
+ 	default:
+-		log_debug(ls, "%s %d %x %x %x %d %d %s", __func__,
++		log_debug(ls, "%s %d %x %x %x %d %d", __func__,
+ 			  rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags,
+-			  lkb->lkb_status, lkb->lkb_wait_type,
+-			  lkb->lkb_resource->res_name);
++			  lkb->lkb_status, lkb->lkb_wait_type);
+ 		break;
+ 	}
+ 
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 1ae4542f0bd88b..90fbab6b6f0363 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -823,7 +823,8 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
+ 	to_free = NULL;
+ 	head = file->f_ep;
+ 	if (head->first == &epi->fllink && !epi->fllink.next) {
+-		file->f_ep = NULL;
++		/* See eventpoll_release() for details. */
++		WRITE_ONCE(file->f_ep, NULL);
+ 		if (!is_file_epoll(file)) {
+ 			struct epitems_head *v;
+ 			v = container_of(head, struct epitems_head, epitems);
+@@ -1603,7 +1604,8 @@ static int attach_epitem(struct file *file, struct epitem *epi)
+ 			spin_unlock(&file->f_lock);
+ 			goto allocate;
+ 		}
+-		file->f_ep = head;
++		/* See eventpoll_release() for details. */
++		WRITE_ONCE(file->f_ep, head);
+ 		to_free = NULL;
+ 	}
+ 	hlist_add_head_rcu(&epi->fllink, file->f_ep);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 88f98dc4402753..60909af2d4a537 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4482,7 +4482,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+ 	int depth = 0;
+ 	struct ext4_map_blocks map;
+ 	unsigned int credits;
+-	loff_t epos;
++	loff_t epos, old_size = i_size_read(inode);
+ 
+ 	BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
+ 	map.m_lblk = offset;
+@@ -4541,6 +4541,11 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+ 			if (ext4_update_inode_size(inode, epos) & 0x1)
+ 				inode_set_mtime_to_ts(inode,
+ 						      inode_get_ctime(inode));
++			if (epos > old_size) {
++				pagecache_isize_extended(inode, old_size, epos);
++				ext4_zero_partial_blocks(handle, inode,
++						     old_size, epos - old_size);
++			}
+ 		}
+ 		ret2 = ext4_mark_inode_dirty(handle, inode);
+ 		ext4_update_inode_fsync_trans(handle, inode, 1);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 99d09cd9c6a37e..67a5b937f5a92d 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1307,8 +1307,10 @@ static int ext4_write_end(struct file *file,
+ 	folio_unlock(folio);
+ 	folio_put(folio);
+ 
+-	if (old_size < pos && !verity)
++	if (old_size < pos && !verity) {
+ 		pagecache_isize_extended(inode, old_size, pos);
++		ext4_zero_partial_blocks(handle, inode, old_size, pos - old_size);
++	}
+ 	/*
+ 	 * Don't mark the inode dirty under folio lock. First, it unnecessarily
+ 	 * makes the holding time of folio lock longer. Second, it forces lock
+@@ -1423,8 +1425,10 @@ static int ext4_journalled_write_end(struct file *file,
+ 	folio_unlock(folio);
+ 	folio_put(folio);
+ 
+-	if (old_size < pos && !verity)
++	if (old_size < pos && !verity) {
+ 		pagecache_isize_extended(inode, old_size, pos);
++		ext4_zero_partial_blocks(handle, inode, old_size, pos - old_size);
++	}
+ 
+ 	if (size_changed) {
+ 		ret2 = ext4_mark_inode_dirty(handle, inode);
+@@ -2985,7 +2989,8 @@ static int ext4_da_do_write_end(struct address_space *mapping,
+ 	struct inode *inode = mapping->host;
+ 	loff_t old_size = inode->i_size;
+ 	bool disksize_changed = false;
+-	loff_t new_i_size;
++	loff_t new_i_size, zero_len = 0;
++	handle_t *handle;
+ 
+ 	if (unlikely(!folio_buffers(folio))) {
+ 		folio_unlock(folio);
+@@ -3029,18 +3034,21 @@ static int ext4_da_do_write_end(struct address_space *mapping,
+ 	folio_unlock(folio);
+ 	folio_put(folio);
+ 
+-	if (old_size < pos)
++	if (pos > old_size) {
+ 		pagecache_isize_extended(inode, old_size, pos);
++		zero_len = pos - old_size;
++	}
+ 
+-	if (disksize_changed) {
+-		handle_t *handle;
++	if (!disksize_changed && !zero_len)
++		return copied;
+ 
+-		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+-		if (IS_ERR(handle))
+-			return PTR_ERR(handle);
+-		ext4_mark_inode_dirty(handle, inode);
+-		ext4_journal_stop(handle);
+-	}
++	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++	if (zero_len)
++		ext4_zero_partial_blocks(handle, inode, old_size, zero_len);
++	ext4_mark_inode_dirty(handle, inode);
++	ext4_journal_stop(handle);
+ 
+ 	return copied;
+ }
+@@ -5426,6 +5434,14 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 		}
+ 
+ 		if (attr->ia_size != inode->i_size) {
++			/* attach jbd2 jinode for EOF folio tail zeroing */
++			if (attr->ia_size & (inode->i_sb->s_blocksize - 1) ||
++			    oldsize & (inode->i_sb->s_blocksize - 1)) {
++				error = ext4_inode_attach_jinode(inode);
++				if (error)
++					goto err_out;
++			}
++
+ 			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
+ 			if (IS_ERR(handle)) {
+ 				error = PTR_ERR(handle);
+@@ -5436,12 +5452,17 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 				orphan = 1;
+ 			}
+ 			/*
+-			 * Update c/mtime on truncate up, ext4_truncate() will
+-			 * update c/mtime in shrink case below
++			 * Update c/mtime and tail zero the EOF folio on
++			 * truncate up. ext4_truncate() handles the shrink case
++			 * below.
+ 			 */
+-			if (!shrink)
++			if (!shrink) {
+ 				inode_set_mtime_to_ts(inode,
+ 						      inode_set_ctime_current(inode));
++				if (oldsize & (inode->i_sb->s_blocksize - 1))
++					ext4_block_truncate_page(handle,
++							inode->i_mapping, oldsize);
++			}
+ 
+ 			if (shrink)
+ 				ext4_fc_track_range(handle, inode,
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 9efe4c00d75bb3..da0960d496ae09 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1819,16 +1819,6 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
+ 	return true;
+ }
+ 
+-static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
+-{
+-	return (bytes >> inode->i_blkbits);
+-}
+-
+-static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
+-{
+-	return (blks << inode->i_blkbits);
+-}
+-
+ static int f2fs_xattr_fiemap(struct inode *inode,
+ 				struct fiemap_extent_info *fieinfo)
+ {
+@@ -1854,7 +1844,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
+ 			return err;
+ 		}
+ 
+-		phys = blks_to_bytes(inode, ni.blk_addr);
++		phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
+ 		offset = offsetof(struct f2fs_inode, i_addr) +
+ 					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
+ 					get_inline_xattr_addrs(inode));
+@@ -1886,7 +1876,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
+ 			return err;
+ 		}
+ 
+-		phys = blks_to_bytes(inode, ni.blk_addr);
++		phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
+ 		len = inode->i_sb->s_blocksize;
+ 
+ 		f2fs_put_page(page, 1);
+@@ -1906,7 +1896,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 		u64 start, u64 len)
+ {
+ 	struct f2fs_map_blocks map;
+-	sector_t start_blk, last_blk;
++	sector_t start_blk, last_blk, blk_len, max_len;
+ 	pgoff_t next_pgofs;
+ 	u64 logical = 0, phys = 0, size = 0;
+ 	u32 flags = 0;
+@@ -1948,16 +1938,15 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 			goto out;
+ 	}
+ 
+-	if (bytes_to_blks(inode, len) == 0)
+-		len = blks_to_bytes(inode, 1);
+-
+-	start_blk = bytes_to_blks(inode, start);
+-	last_blk = bytes_to_blks(inode, start + len - 1);
++	start_blk = F2FS_BYTES_TO_BLK(start);
++	last_blk = F2FS_BYTES_TO_BLK(start + len - 1);
++	blk_len = last_blk - start_blk + 1;
++	max_len = F2FS_BYTES_TO_BLK(maxbytes) - start_blk;
+ 
+ next:
+ 	memset(&map, 0, sizeof(map));
+ 	map.m_lblk = start_blk;
+-	map.m_len = bytes_to_blks(inode, len);
++	map.m_len = blk_len;
+ 	map.m_next_pgofs = &next_pgofs;
+ 	map.m_seg_type = NO_CHECK_TYPE;
+ 
+@@ -1974,12 +1963,23 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 	if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
+ 		start_blk = next_pgofs;
+ 
+-		if (blks_to_bytes(inode, start_blk) < maxbytes)
++		if (F2FS_BLK_TO_BYTES(start_blk) < maxbytes)
+ 			goto prep_next;
+ 
+ 		flags |= FIEMAP_EXTENT_LAST;
+ 	}
+ 
++	/*
++	 * current extent may cross boundary of inquiry, increase len to
++	 * requery.
++	 */
++	if (!compr_cluster && (map.m_flags & F2FS_MAP_MAPPED) &&
++				map.m_lblk + map.m_len - 1 == last_blk &&
++				blk_len != max_len) {
++		blk_len = max_len;
++		goto next;
++	}
++
+ 	compr_appended = false;
+ 	/* In a case of compressed cluster, append this to the last extent */
+ 	if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
+@@ -2011,14 +2011,14 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 	} else if (compr_appended) {
+ 		unsigned int appended_blks = cluster_size -
+ 						count_in_cluster + 1;
+-		size += blks_to_bytes(inode, appended_blks);
++		size += F2FS_BLK_TO_BYTES(appended_blks);
+ 		start_blk += appended_blks;
+ 		compr_cluster = false;
+ 	} else {
+-		logical = blks_to_bytes(inode, start_blk);
++		logical = F2FS_BLK_TO_BYTES(start_blk);
+ 		phys = __is_valid_data_blkaddr(map.m_pblk) ?
+-			blks_to_bytes(inode, map.m_pblk) : 0;
+-		size = blks_to_bytes(inode, map.m_len);
++			F2FS_BLK_TO_BYTES(map.m_pblk) : 0;
++		size = F2FS_BLK_TO_BYTES(map.m_len);
+ 		flags = 0;
+ 
+ 		if (compr_cluster) {
+@@ -2026,13 +2026,13 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 			count_in_cluster += map.m_len;
+ 			if (count_in_cluster == cluster_size) {
+ 				compr_cluster = false;
+-				size += blks_to_bytes(inode, 1);
++				size += F2FS_BLKSIZE;
+ 			}
+ 		} else if (map.m_flags & F2FS_MAP_DELALLOC) {
+ 			flags = FIEMAP_EXTENT_UNWRITTEN;
+ 		}
+ 
+-		start_blk += bytes_to_blks(inode, size);
++		start_blk += F2FS_BYTES_TO_BLK(size);
+ 	}
+ 
+ prep_next:
+@@ -2070,7 +2070,7 @@ static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
+ 					struct readahead_control *rac)
+ {
+ 	struct bio *bio = *bio_ret;
+-	const unsigned blocksize = blks_to_bytes(inode, 1);
++	const unsigned int blocksize = F2FS_BLKSIZE;
+ 	sector_t block_in_file;
+ 	sector_t last_block;
+ 	sector_t last_block_in_file;
+@@ -2080,8 +2080,8 @@ static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
+ 
+ 	block_in_file = (sector_t)index;
+ 	last_block = block_in_file + nr_pages;
+-	last_block_in_file = bytes_to_blks(inode,
+-			f2fs_readpage_limit(inode) + blocksize - 1);
++	last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
++							blocksize - 1);
+ 	if (last_block > last_block_in_file)
+ 		last_block = last_block_in_file;
+ 
+@@ -2181,7 +2181,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ 	struct bio *bio = *bio_ret;
+ 	unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
+ 	sector_t last_block_in_file;
+-	const unsigned blocksize = blks_to_bytes(inode, 1);
++	const unsigned int blocksize = F2FS_BLKSIZE;
+ 	struct decompress_io_ctx *dic = NULL;
+ 	struct extent_info ei = {};
+ 	bool from_dnode = true;
+@@ -2190,8 +2190,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ 
+ 	f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
+ 
+-	last_block_in_file = bytes_to_blks(inode,
+-			f2fs_readpage_limit(inode) + blocksize - 1);
++	last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
++							blocksize - 1);
+ 
+ 	/* get rid of pages beyond EOF */
+ 	for (i = 0; i < cc->cluster_size; i++) {
+@@ -3952,7 +3952,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
+ 	 * to be very smart.
+ 	 */
+ 	cur_lblock = 0;
+-	last_lblock = bytes_to_blks(inode, i_size_read(inode));
++	last_lblock = F2FS_BYTES_TO_BLK(i_size_read(inode));
+ 
+ 	while (cur_lblock < last_lblock && cur_lblock < sis->max) {
+ 		struct f2fs_map_blocks map;
+@@ -4195,8 +4195,8 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 	pgoff_t next_pgofs = 0;
+ 	int err;
+ 
+-	map.m_lblk = bytes_to_blks(inode, offset);
+-	map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
++	map.m_lblk = F2FS_BYTES_TO_BLK(offset);
++	map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1;
+ 	map.m_next_pgofs = &next_pgofs;
+ 	map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
+ 						inode->i_write_hint);
+@@ -4207,7 +4207,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 	if (err)
+ 		return err;
+ 
+-	iomap->offset = blks_to_bytes(inode, map.m_lblk);
++	iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk);
+ 
+ 	/*
+ 	 * When inline encryption is enabled, sometimes I/O to an encrypted file
+@@ -4227,21 +4227,21 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 		if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
+ 			return -EINVAL;
+ 
+-		iomap->length = blks_to_bytes(inode, map.m_len);
++		iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
+ 		iomap->type = IOMAP_MAPPED;
+ 		iomap->flags |= IOMAP_F_MERGED;
+ 		iomap->bdev = map.m_bdev;
+-		iomap->addr = blks_to_bytes(inode, map.m_pblk);
++		iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk);
+ 	} else {
+ 		if (flags & IOMAP_WRITE)
+ 			return -ENOTBLK;
+ 
+ 		if (map.m_pblk == NULL_ADDR) {
+-			iomap->length = blks_to_bytes(inode, next_pgofs) -
+-								iomap->offset;
++			iomap->length = F2FS_BLK_TO_BYTES(next_pgofs) -
++							iomap->offset;
+ 			iomap->type = IOMAP_HOLE;
+ 		} else if (map.m_pblk == NEW_ADDR) {
+-			iomap->length = blks_to_bytes(inode, map.m_len);
++			iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
+ 			iomap->type = IOMAP_UNWRITTEN;
+ 		} else {
+ 			f2fs_bug_on(F2FS_I_SB(inode), 1);
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 62ac440d94168a..fb09c8e9bc5732 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -346,21 +346,22 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode,
+ }
+ 
+ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+-					struct extent_tree *et)
++				struct extent_tree *et, unsigned int nr_shrink)
+ {
+ 	struct rb_node *node, *next;
+ 	struct extent_node *en;
+-	unsigned int count = atomic_read(&et->node_cnt);
++	unsigned int count;
+ 
+ 	node = rb_first_cached(&et->root);
+-	while (node) {
++
++	for (count = 0; node && count < nr_shrink; count++) {
+ 		next = rb_next(node);
+ 		en = rb_entry(node, struct extent_node, rb_node);
+ 		__release_extent_node(sbi, et, en);
+ 		node = next;
+ 	}
+ 
+-	return count - atomic_read(&et->node_cnt);
++	return count;
+ }
+ 
+ static void __drop_largest_extent(struct extent_tree *et,
+@@ -579,6 +580,30 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+ 	return en;
+ }
+ 
++static unsigned int __destroy_extent_node(struct inode *inode,
++					enum extent_type type)
++{
++	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
++	unsigned int nr_shrink = type == EX_READ ?
++				READ_EXTENT_CACHE_SHRINK_NUMBER :
++				AGE_EXTENT_CACHE_SHRINK_NUMBER;
++	unsigned int node_cnt = 0;
++
++	if (!et || !atomic_read(&et->node_cnt))
++		return 0;
++
++	while (atomic_read(&et->node_cnt)) {
++		write_lock(&et->lock);
++		node_cnt += __free_extent_tree(sbi, et, nr_shrink);
++		write_unlock(&et->lock);
++	}
++
++	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
++
++	return node_cnt;
++}
++
+ static void __update_extent_tree_range(struct inode *inode,
+ 			struct extent_info *tei, enum extent_type type)
+ {
+@@ -649,7 +674,9 @@ static void __update_extent_tree_range(struct inode *inode,
+ 		}
+ 
+ 		if (end < org_end && (type != EX_READ ||
+-				org_end - end >= F2FS_MIN_EXTENT_LEN)) {
++			(org_end - end >= F2FS_MIN_EXTENT_LEN &&
++			atomic_read(&et->node_cnt) <
++					sbi->max_read_extent_count))) {
+ 			if (parts) {
+ 				__set_extent_info(&ei,
+ 					end, org_end - end,
+@@ -717,9 +744,6 @@ static void __update_extent_tree_range(struct inode *inode,
+ 		}
+ 	}
+ 
+-	if (is_inode_flag_set(inode, FI_NO_EXTENT))
+-		__free_extent_tree(sbi, et);
+-
+ 	if (et->largest_updated) {
+ 		et->largest_updated = false;
+ 		updated = true;
+@@ -737,6 +761,9 @@ static void __update_extent_tree_range(struct inode *inode,
+ out_read_extent_cache:
+ 	write_unlock(&et->lock);
+ 
++	if (is_inode_flag_set(inode, FI_NO_EXTENT))
++		__destroy_extent_node(inode, EX_READ);
++
+ 	if (updated)
+ 		f2fs_mark_inode_dirty_sync(inode, true);
+ }
+@@ -899,10 +926,14 @@ static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink
+ 	list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
+ 		if (atomic_read(&et->node_cnt)) {
+ 			write_lock(&et->lock);
+-			node_cnt += __free_extent_tree(sbi, et);
++			node_cnt += __free_extent_tree(sbi, et,
++					nr_shrink - node_cnt - tree_cnt);
+ 			write_unlock(&et->lock);
+ 		}
+-		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
++
++		if (atomic_read(&et->node_cnt))
++			goto unlock_out;
++
+ 		list_del_init(&et->list);
+ 		radix_tree_delete(&eti->extent_tree_root, et->ino);
+ 		kmem_cache_free(extent_tree_slab, et);
+@@ -1041,23 +1072,6 @@ unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink
+ 	return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
+ }
+ 
+-static unsigned int __destroy_extent_node(struct inode *inode,
+-					enum extent_type type)
+-{
+-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+-	unsigned int node_cnt = 0;
+-
+-	if (!et || !atomic_read(&et->node_cnt))
+-		return 0;
+-
+-	write_lock(&et->lock);
+-	node_cnt = __free_extent_tree(sbi, et);
+-	write_unlock(&et->lock);
+-
+-	return node_cnt;
+-}
+-
+ void f2fs_destroy_extent_node(struct inode *inode)
+ {
+ 	__destroy_extent_node(inode, EX_READ);
+@@ -1066,7 +1080,6 @@ void f2fs_destroy_extent_node(struct inode *inode)
+ 
+ static void __drop_extent_tree(struct inode *inode, enum extent_type type)
+ {
+-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ 	bool updated = false;
+ 
+@@ -1074,7 +1087,6 @@ static void __drop_extent_tree(struct inode *inode, enum extent_type type)
+ 		return;
+ 
+ 	write_lock(&et->lock);
+-	__free_extent_tree(sbi, et);
+ 	if (type == EX_READ) {
+ 		set_inode_flag(inode, FI_NO_EXTENT);
+ 		if (et->largest.len) {
+@@ -1083,6 +1095,9 @@ static void __drop_extent_tree(struct inode *inode, enum extent_type type)
+ 		}
+ 	}
+ 	write_unlock(&et->lock);
++
++	__destroy_extent_node(inode, type);
++
+ 	if (updated)
+ 		f2fs_mark_inode_dirty_sync(inode, true);
+ }
+@@ -1156,6 +1171,7 @@ void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
+ 	sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
+ 	sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
+ 	sbi->last_age_weight = LAST_AGE_WEIGHT;
++	sbi->max_read_extent_count = DEF_MAX_READ_EXTENT_COUNT;
+ }
+ 
+ int __init f2fs_create_extent_cache(void)
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 93a5e1c24e566e..cec3dd205b3df8 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -634,6 +634,9 @@ enum {
+ #define DEF_HOT_DATA_AGE_THRESHOLD	262144
+ #define DEF_WARM_DATA_AGE_THRESHOLD	2621440
+ 
++/* default max read extent count per inode */
++#define DEF_MAX_READ_EXTENT_COUNT	10240
++
+ /* extent cache type */
+ enum extent_type {
+ 	EX_READ,
+@@ -1619,6 +1622,7 @@ struct f2fs_sb_info {
+ 	/* for extent tree cache */
+ 	struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
+ 	atomic64_t allocated_data_blocks;	/* for block age extent_cache */
++	unsigned int max_read_extent_count;	/* max read extent count per inode */
+ 
+ 	/* The threshold used for hot and warm data seperation*/
+ 	unsigned int hot_data_age_threshold;
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 1ed86df343a5d1..10780e37fc7b68 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -775,8 +775,10 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 		!is_inode_flag_set(inode, FI_DIRTY_INODE))
+ 		return 0;
+ 
+-	if (!f2fs_is_checkpoint_ready(sbi))
++	if (!f2fs_is_checkpoint_ready(sbi)) {
++		f2fs_mark_inode_dirty_sync(inode, true);
+ 		return -ENOSPC;
++	}
+ 
+ 	/*
+ 	 * We need to balance fs here to prevent from producing dirty node pages
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index af36c6d6542b8c..4d7b9fd6ef31ab 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1341,7 +1341,12 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+ 		err = -EFSCORRUPTED;
+ 		dec_valid_node_count(sbi, dn->inode, !ofs);
+ 		set_sbi_flag(sbi, SBI_NEED_FSCK);
+-		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
++		f2fs_warn_ratelimited(sbi,
++			"f2fs_new_node_page: inconsistent nat entry, "
++			"ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
++			new_ni.ino, new_ni.nid, new_ni.blk_addr,
++			new_ni.version, new_ni.flag);
++		f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
+ 		goto fail;
+ 	}
+ #endif
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index c56e8c87393523..d9a44f03e558bf 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -789,6 +789,13 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
+ 		return count;
+ 	}
+ 
++	if (!strcmp(a->attr.name, "max_read_extent_count")) {
++		if (t > UINT_MAX)
++			return -EINVAL;
++		*ui = (unsigned int)t;
++		return count;
++	}
++
+ 	if (!strcmp(a->attr.name, "ipu_policy")) {
+ 		if (t >= BIT(F2FS_IPU_MAX))
+ 			return -EINVAL;
+@@ -1054,6 +1061,8 @@ F2FS_SBI_GENERAL_RW_ATTR(revoked_atomic_block);
+ F2FS_SBI_GENERAL_RW_ATTR(hot_data_age_threshold);
+ F2FS_SBI_GENERAL_RW_ATTR(warm_data_age_threshold);
+ F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
++/* read extent cache */
++F2FS_SBI_GENERAL_RW_ATTR(max_read_extent_count);
+ #ifdef CONFIG_BLK_DEV_ZONED
+ F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+ F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
+@@ -1244,6 +1253,7 @@ static struct attribute *f2fs_attrs[] = {
+ 	ATTR_LIST(hot_data_age_threshold),
+ 	ATTR_LIST(warm_data_age_threshold),
+ 	ATTR_LIST(last_age_weight),
++	ATTR_LIST(max_read_extent_count),
+ 	NULL,
+ };
+ ATTRIBUTE_GROUPS(f2fs);
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index e22c1edc32b39e..b9cef63c78717f 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1537,11 +1537,13 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
+ 	if (!ip)
+ 		return NULL;
+ 	ip->i_no_addr = 0;
++	ip->i_no_formal_ino = 0;
+ 	ip->i_flags = 0;
+ 	ip->i_gl = NULL;
+ 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
+ 	memset(&ip->i_res, 0, sizeof(ip->i_res));
+ 	RB_CLEAR_NODE(&ip->i_res.rs_node);
++	ip->i_diskflags = 0;
+ 	ip->i_rahead = 0;
+ 	return &ip->i_inode;
+ }
+diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
+index 79e771ab624f47..3bd9d2f3bece20 100644
+--- a/fs/jffs2/compr_rtime.c
++++ b/fs/jffs2/compr_rtime.c
+@@ -95,6 +95,9 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
+ 
+ 		positions[value]=outpos;
+ 		if (repeat) {
++			if ((outpos + repeat) > destlen) {
++				return 1;
++			}
+ 			if (backoffs + repeat >= outpos) {
+ 				while(repeat) {
+ 					cpage_out[outpos++] = cpage_out[backoffs++];
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 3ab410059dc202..f9009e4f9ffd89 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -1820,6 +1820,9 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
+ 			return -EIO;
+ 		dp = (struct dmap *) mp->data;
+ 
++		if (dp->tree.budmin < 0)
++			return -EIO;
++
+ 		/* try to allocate the blocks.
+ 		 */
+ 		rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results);
+@@ -2888,6 +2891,9 @@ static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl)
+ 	/* bubble the new value up the tree as required.
+ 	 */
+ 	for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
++		if (lp == 0)
++			break;
++
+ 		/* get the index of the first leaf of the 4 leaf
+ 		 * group containing the specified leaf (leafno).
+ 		 */
+diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
+index 5d3127ca68a42d..8f85177f284b5a 100644
+--- a/fs/jfs/jfs_dtree.c
++++ b/fs/jfs/jfs_dtree.c
+@@ -2891,6 +2891,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
+ 		stbl = DT_GETSTBL(p);
+ 
+ 		for (i = index; i < p->header.nextindex; i++) {
++			if (stbl[i] < 0 || stbl[i] > 127) {
++				jfs_err("JFS: Invalid stbl[%d] = %d for inode %ld, block = %lld",
++					i, stbl[i], (long)ip->i_ino, (long long)bn);
++				free_page(dirent_buf);
++				DT_PUTPAGE(mp);
++				return -EIO;
++			}
++
+ 			d = (struct ldtentry *) & p->slot[stbl[i]];
+ 
+ 			if (((long) jfs_dirent + d->namlen + 1) >
+@@ -3086,6 +3094,13 @@ static int dtReadFirst(struct inode *ip, struct btstack * btstack)
+ 
+ 		/* get the leftmost entry */
+ 		stbl = DT_GETSTBL(p);
++
++		if (stbl[0] < 0 || stbl[0] > 127) {
++			DT_PUTPAGE(mp);
++			jfs_error(ip->i_sb, "stbl[0] out of bound\n");
++			return -EIO;
++		}
++
+ 		xd = (pxd_t *) & p->slot[stbl[0]];
+ 
+ 		/* get the child page block address */
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index a8602729586ab7..f61c58fbf117d3 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -70,7 +70,7 @@ static inline unsigned int nilfs_chunk_size(struct inode *inode)
+  */
+ static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr)
+ {
+-	unsigned int last_byte = inode->i_size;
++	u64 last_byte = inode->i_size;
+ 
+ 	last_byte -= page_nr << PAGE_SHIFT;
+ 	if (last_byte > PAGE_SIZE)
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 9644bc72e4573b..8e2d43fc6f7c1f 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -266,13 +266,6 @@ static int create_fd(struct fsnotify_group *group, const struct path *path,
+ 			       group->fanotify_data.f_flags | __FMODE_NONOTIFY,
+ 			       current_cred());
+ 	if (IS_ERR(new_file)) {
+-		/*
+-		 * we still send an event even if we can't open the file.  this
+-		 * can happen when say tasks are gone and we try to open their
+-		 * /proc files or we try to open a WRONLY file like in sysfs
+-		 * we just send the errno to userspace since there isn't much
+-		 * else we can do.
+-		 */
+ 		put_unused_fd(client_fd);
+ 		client_fd = PTR_ERR(new_file);
+ 	} else {
+@@ -663,7 +656,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 	unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES);
+ 	unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD;
+ 	struct file *f = NULL, *pidfd_file = NULL;
+-	int ret, pidfd = FAN_NOPIDFD, fd = FAN_NOFD;
++	int ret, pidfd = -ESRCH, fd = -EBADF;
+ 
+ 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+ 
+@@ -691,10 +684,39 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 	if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
+ 	    path && path->mnt && path->dentry) {
+ 		fd = create_fd(group, path, &f);
+-		if (fd < 0)
+-			return fd;
++		/*
++		 * Opening an fd from dentry can fail for several reasons.
++		 * For example, when tasks are gone and we try to open their
++		 * /proc files or we try to open a WRONLY file like in sysfs
++		 * or when trying to open a file that was deleted on the
++		 * remote network server.
++		 *
++		 * For a group with FAN_REPORT_FD_ERROR, we will send the
++		 * event with the error instead of the open fd, otherwise
++		 * Userspace may not get the error at all.
++		 * In any case, userspace will not know which file failed to
++		 * open, so add a debug print for further investigation.
++		 */
++		if (fd < 0) {
++			pr_debug("fanotify: create_fd(%pd2) failed err=%d\n",
++				 path->dentry, fd);
++			if (!FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR)) {
++				/*
++				 * Historically, we've handled EOPENSTALE in a
++				 * special way and silently dropped such
++				 * events. Now we have to keep it to maintain
++				 * backward compatibility...
++				 */
++				if (fd == -EOPENSTALE)
++					fd = 0;
++				return fd;
++			}
++		}
+ 	}
+-	metadata.fd = fd;
++	if (FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR))
++		metadata.fd = fd;
++	else
++		metadata.fd = fd >= 0 ? fd : FAN_NOFD;
+ 
+ 	if (pidfd_mode) {
+ 		/*
+@@ -709,18 +731,16 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 		 * The PIDTYPE_TGID check for an event->pid is performed
+ 		 * preemptively in an attempt to catch out cases where the event
+ 		 * listener reads events after the event generating process has
+-		 * already terminated. Report FAN_NOPIDFD to the event listener
+-		 * in those cases, with all other pidfd creation errors being
+-		 * reported as FAN_EPIDFD.
++		 * already terminated.  Depending on flag FAN_REPORT_FD_ERROR,
++		 * report either -ESRCH or FAN_NOPIDFD to the event listener in
++		 * those cases with all other pidfd creation errors reported as
++		 * the error code itself or as FAN_EPIDFD.
+ 		 */
+-		if (metadata.pid == 0 ||
+-		    !pid_has_task(event->pid, PIDTYPE_TGID)) {
+-			pidfd = FAN_NOPIDFD;
+-		} else {
++		if (metadata.pid && pid_has_task(event->pid, PIDTYPE_TGID))
+ 			pidfd = pidfd_prepare(event->pid, 0, &pidfd_file);
+-			if (pidfd < 0)
+-				pidfd = FAN_EPIDFD;
+-		}
++
++		if (!FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR) && pidfd < 0)
++			pidfd = pidfd == -ESRCH ? FAN_NOPIDFD : FAN_EPIDFD;
+ 	}
+ 
+ 	ret = -EFAULT;
+@@ -737,9 +757,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 	buf += FAN_EVENT_METADATA_LEN;
+ 	count -= FAN_EVENT_METADATA_LEN;
+ 
+-	if (fanotify_is_perm_event(event->mask))
+-		FANOTIFY_PERM(event)->fd = fd;
+-
+ 	if (info_mode) {
+ 		ret = copy_info_records_to_user(event, info, info_mode, pidfd,
+ 						buf, count);
+@@ -753,15 +770,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 	if (pidfd_file)
+ 		fd_install(pidfd, pidfd_file);
+ 
++	if (fanotify_is_perm_event(event->mask))
++		FANOTIFY_PERM(event)->fd = fd;
++
+ 	return metadata.event_len;
+ 
+ out_close_fd:
+-	if (fd != FAN_NOFD) {
++	if (f) {
+ 		put_unused_fd(fd);
+ 		fput(f);
+ 	}
+ 
+-	if (pidfd >= 0) {
++	if (pidfd_file) {
+ 		put_unused_fd(pidfd);
+ 		fput(pidfd_file);
+ 	}
+@@ -828,15 +848,6 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
+ 		}
+ 
+ 		ret = copy_event_to_user(group, event, buf, count);
+-		if (unlikely(ret == -EOPENSTALE)) {
+-			/*
+-			 * We cannot report events with stale fd so drop it.
+-			 * Setting ret to 0 will continue the event loop and
+-			 * do the right thing if there are no more events to
+-			 * read (i.e. return bytes read, -EAGAIN or wait).
+-			 */
+-			ret = 0;
+-		}
+ 
+ 		/*
+ 		 * Permission events get queued to wait for response.  Other
+@@ -845,7 +856,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
+ 		if (!fanotify_is_perm_event(event->mask)) {
+ 			fsnotify_destroy_event(group, &event->fse);
+ 		} else {
+-			if (ret <= 0) {
++			if (ret <= 0 || FANOTIFY_PERM(event)->fd < 0) {
+ 				spin_lock(&group->notification_lock);
+ 				finish_permission_event(group,
+ 					FANOTIFY_PERM(event), FAN_DENY, NULL);
+@@ -1954,7 +1965,7 @@ static int __init fanotify_user_setup(void)
+ 				     FANOTIFY_DEFAULT_MAX_USER_MARKS);
+ 
+ 	BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
+-	BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 12);
++	BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 13);
+ 	BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 11);
+ 
+ 	fanotify_mark_cache = KMEM_CACHE(fanotify_mark,
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 0763202d00c992..8d789b017fa9b6 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -977,7 +977,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ 
+ 	/* Check for compressed frame. */
+ 	err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT,
+-				       &hint);
++				       &hint, run);
+ 	if (err)
+ 		goto out;
+ 
+@@ -1521,16 +1521,16 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
+  * attr_is_frame_compressed - Used to detect compressed frame.
+  *
+  * attr - base (primary) attribute segment.
++ * run  - run to use, usually == &ni->file.run.
+  * Only base segments contains valid 'attr->nres.c_unit'
+  */
+ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
+-			     CLST frame, CLST *clst_data)
++			     CLST frame, CLST *clst_data, struct runs_tree *run)
+ {
+ 	int err;
+ 	u32 clst_frame;
+ 	CLST clen, lcn, vcn, alen, slen, vcn_next;
+ 	size_t idx;
+-	struct runs_tree *run;
+ 
+ 	*clst_data = 0;
+ 
+@@ -1542,7 +1542,6 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 
+ 	clst_frame = 1u << attr->nres.c_unit;
+ 	vcn = frame * clst_frame;
+-	run = &ni->file.run;
+ 
+ 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
+ 		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
+@@ -1678,7 +1677,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ 	if (err)
+ 		goto out;
+ 
+-	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
++	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run);
+ 	if (err)
+ 		goto out;
+ 
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 41c7ffad279016..c33e818b3164cd 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -1900,46 +1900,6 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 	return REPARSE_LINK;
+ }
+ 
+-/*
+- * fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent
+- * but it uses 'fe_k' instead of fieinfo->fi_extents_start
+- */
+-static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo,
+-				     struct fiemap_extent *fe_k, u64 logical,
+-				     u64 phys, u64 len, u32 flags)
+-{
+-	struct fiemap_extent extent;
+-
+-	/* only count the extents */
+-	if (fieinfo->fi_extents_max == 0) {
+-		fieinfo->fi_extents_mapped++;
+-		return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
+-	}
+-
+-	if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
+-		return 1;
+-
+-	if (flags & FIEMAP_EXTENT_DELALLOC)
+-		flags |= FIEMAP_EXTENT_UNKNOWN;
+-	if (flags & FIEMAP_EXTENT_DATA_ENCRYPTED)
+-		flags |= FIEMAP_EXTENT_ENCODED;
+-	if (flags & (FIEMAP_EXTENT_DATA_TAIL | FIEMAP_EXTENT_DATA_INLINE))
+-		flags |= FIEMAP_EXTENT_NOT_ALIGNED;
+-
+-	memset(&extent, 0, sizeof(extent));
+-	extent.fe_logical = logical;
+-	extent.fe_physical = phys;
+-	extent.fe_length = len;
+-	extent.fe_flags = flags;
+-
+-	memcpy(fe_k + fieinfo->fi_extents_mapped, &extent, sizeof(extent));
+-
+-	fieinfo->fi_extents_mapped++;
+-	if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
+-		return 1;
+-	return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
+-}
+-
+ /*
+  * ni_fiemap - Helper for file_fiemap().
+  *
+@@ -1950,11 +1910,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 	      __u64 vbo, __u64 len)
+ {
+ 	int err = 0;
+-	struct fiemap_extent *fe_k = NULL;
+ 	struct ntfs_sb_info *sbi = ni->mi.sbi;
+ 	u8 cluster_bits = sbi->cluster_bits;
+-	struct runs_tree *run;
+-	struct rw_semaphore *run_lock;
++	struct runs_tree run;
+ 	struct ATTRIB *attr;
+ 	CLST vcn = vbo >> cluster_bits;
+ 	CLST lcn, clen;
+@@ -1965,13 +1923,11 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 	u32 flags;
+ 	bool ok;
+ 
++	run_init(&run);
+ 	if (S_ISDIR(ni->vfs_inode.i_mode)) {
+-		run = &ni->dir.alloc_run;
+ 		attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME,
+ 				    ARRAY_SIZE(I30_NAME), NULL, NULL);
+-		run_lock = &ni->dir.run_lock;
+ 	} else {
+-		run = &ni->file.run;
+ 		attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
+ 				    NULL);
+ 		if (!attr) {
+@@ -1986,7 +1942,6 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 				"fiemap is not supported for compressed file (cp -r)");
+ 			goto out;
+ 		}
+-		run_lock = &ni->file.run_lock;
+ 	}
+ 
+ 	if (!attr || !attr->non_res) {
+@@ -1998,51 +1953,33 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * To avoid lock problems replace pointer to user memory by pointer to kernel memory.
+-	 */
+-	fe_k = kmalloc_array(fieinfo->fi_extents_max,
+-			     sizeof(struct fiemap_extent),
+-			     GFP_NOFS | __GFP_ZERO);
+-	if (!fe_k) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
+-
+ 	end = vbo + len;
+ 	alloc_size = le64_to_cpu(attr->nres.alloc_size);
+ 	if (end > alloc_size)
+ 		end = alloc_size;
+ 
+-	down_read(run_lock);
+ 
+ 	while (vbo < end) {
+ 		if (idx == -1) {
+-			ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
++			ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx);
+ 		} else {
+ 			CLST vcn_next = vcn;
+ 
+-			ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) &&
++			ok = run_get_entry(&run, ++idx, &vcn, &lcn, &clen) &&
+ 			     vcn == vcn_next;
+ 			if (!ok)
+ 				vcn = vcn_next;
+ 		}
+ 
+ 		if (!ok) {
+-			up_read(run_lock);
+-			down_write(run_lock);
+-
+ 			err = attr_load_runs_vcn(ni, attr->type,
+ 						 attr_name(attr),
+-						 attr->name_len, run, vcn);
+-
+-			up_write(run_lock);
+-			down_read(run_lock);
++						 attr->name_len, &run, vcn);
+ 
+ 			if (err)
+ 				break;
+ 
+-			ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
++			ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx);
+ 
+ 			if (!ok) {
+ 				err = -EINVAL;
+@@ -2067,8 +2004,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 		} else if (is_attr_compressed(attr)) {
+ 			CLST clst_data;
+ 
+-			err = attr_is_frame_compressed(
+-				ni, attr, vcn >> attr->nres.c_unit, &clst_data);
++			err = attr_is_frame_compressed(ni, attr,
++						       vcn >> attr->nres.c_unit,
++						       &clst_data, &run);
+ 			if (err)
+ 				break;
+ 			if (clst_data < NTFS_LZNT_CLUSTERS)
+@@ -2097,8 +2035,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 			if (vbo + dlen >= end)
+ 				flags |= FIEMAP_EXTENT_LAST;
+ 
+-			err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo,
+-							dlen, flags);
++			err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
++						      flags);
+ 
+ 			if (err < 0)
+ 				break;
+@@ -2119,8 +2057,7 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 		if (vbo + bytes >= end)
+ 			flags |= FIEMAP_EXTENT_LAST;
+ 
+-		err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo, bytes,
+-						flags);
++		err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
+ 		if (err < 0)
+ 			break;
+ 		if (err == 1) {
+@@ -2131,19 +2068,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 		vbo += bytes;
+ 	}
+ 
+-	up_read(run_lock);
+-
+-	/*
+-	 * Copy to user memory out of lock
+-	 */
+-	if (copy_to_user(fieinfo->fi_extents_start, fe_k,
+-			 fieinfo->fi_extents_max *
+-				 sizeof(struct fiemap_extent))) {
+-		err = -EFAULT;
+-	}
+-
+ out:
+-	kfree(fe_k);
++	run_close(&run);
+ 	return err;
+ }
+ 
+@@ -2672,7 +2598,8 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
+ 		down_write(&ni->file.run_lock);
+ 		run_truncate_around(run, le64_to_cpu(attr->nres.svcn));
+ 		frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT);
+-		err = attr_is_frame_compressed(ni, attr, frame, &clst_data);
++		err = attr_is_frame_compressed(ni, attr, frame, &clst_data,
++					       run);
+ 		up_write(&ni->file.run_lock);
+ 		if (err)
+ 			goto out1;
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 26e1e1379c04e2..cd8e8374bb5a0a 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -446,7 +446,8 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 			struct runs_tree *run, u64 frame, u64 frames,
+ 			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data);
+ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
+-			     CLST frame, CLST *clst_data);
++			     CLST frame, CLST *clst_data,
++			     struct runs_tree *run);
+ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ 			u64 new_valid);
+ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
+diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
+index 58e988cd80490d..48566dff0dc92b 100644
+--- a/fs/ntfs3/run.c
++++ b/fs/ntfs3/run.c
+@@ -1055,8 +1055,8 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+ {
+ 	int ret, err;
+ 	CLST next_vcn, lcn, len;
+-	size_t index;
+-	bool ok;
++	size_t index, done;
++	bool ok, zone;
+ 	struct wnd_bitmap *wnd;
+ 
+ 	ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
+@@ -1087,8 +1087,9 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+ 			continue;
+ 
+ 		down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
++		zone = max(wnd->zone_bit, lcn) < min(wnd->zone_end, lcn + len);
+ 		/* Check for free blocks. */
+-		ok = wnd_is_used(wnd, lcn, len);
++		ok = !zone && wnd_is_used(wnd, lcn, len);
+ 		up_read(&wnd->rw_lock);
+ 		if (ok)
+ 			continue;
+@@ -1096,14 +1097,33 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+ 		/* Looks like volume is corrupted. */
+ 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ 
+-		if (down_write_trylock(&wnd->rw_lock)) {
+-			/* Mark all zero bits as used in range [lcn, lcn+len). */
+-			size_t done;
+-			err = wnd_set_used_safe(wnd, lcn, len, &done);
+-			up_write(&wnd->rw_lock);
+-			if (err)
+-				return err;
++		if (!down_write_trylock(&wnd->rw_lock))
++			continue;
++
++		if (zone) {
++			/*
++			 * Range [lcn, lcn + len) intersects with zone.
++			 * To avoid complex with zone just turn it off.
++			 */
++			wnd_zone_set(wnd, 0, 0);
++		}
++
++		/* Mark all zero bits as used in range [lcn, lcn+len). */
++		err = wnd_set_used_safe(wnd, lcn, len, &done);
++		if (zone) {
++			/* Restore zone. Lock mft run. */
++			struct rw_semaphore *lock;
++			lock = is_mounted(sbi) ? &sbi->mft.ni->file.run_lock :
++						 NULL;
++			if (lock)
++				down_read(lock);
++			ntfs_refresh_zone(sbi);
++			if (lock)
++				up_read(lock);
+ 		}
++		up_write(&wnd->rw_lock);
++		if (err)
++			return err;
+ 	}
+ 
+ 	return ret;
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 60df52e4c1f878..764ecbd5ad41dd 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -3110,6 +3110,7 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
+ 	struct ocfs2_lock_res *iter = v;
+ 	struct ocfs2_lock_res *dummy = &priv->p_iter_res;
+ 
++	(*pos)++;
+ 	spin_lock(&ocfs2_dlm_tracking_lock);
+ 	iter = ocfs2_dlm_next_res(iter, priv);
+ 	list_del_init(&dummy->l_debug_list);
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index 8ac42ea81a17bd..5df34561c551c6 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -1002,25 +1002,6 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
+ 		start = bit_off + 1;
+ 	}
+ 
+-	/* clear the contiguous bits until the end boundary */
+-	if (count) {
+-		blkno = la_start_blk +
+-			ocfs2_clusters_to_blocks(osb->sb,
+-					start - count);
+-
+-		trace_ocfs2_sync_local_to_main_free(
+-				count, start - count,
+-				(unsigned long long)la_start_blk,
+-				(unsigned long long)blkno);
+-
+-		status = ocfs2_release_clusters(handle,
+-				main_bm_inode,
+-				main_bm_bh, blkno,
+-				count);
+-		if (status < 0)
+-			mlog_errno(status);
+-	}
+-
+ bail:
+ 	if (status)
+ 		mlog_errno(status);
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 59c92353151a85..5550f8afa43802 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -200,8 +200,10 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
+ 	mode = mode_strip_sgid(&nop_mnt_idmap, dir, mode);
+ 	inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
+ 	status = dquot_initialize(inode);
+-	if (status)
++	if (status) {
++		iput(inode);
+ 		return ERR_PTR(status);
++	}
+ 
+ 	return inode;
+ }
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 0c6468844c4b54..a697e53ccee2be 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -677,6 +677,7 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ 		       struct dentry *dentry, struct cifs_tcon *tcon,
+ 		       const char *full_path, umode_t mode, dev_t dev);
++umode_t wire_mode_to_posix(u32 wire, bool is_dir);
+ 
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index c6f15dbe860a41..0eae60731c20c0 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -5406,7 +5406,7 @@ CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ 	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+ 				InformationLevel) - 4;
+ 	offset = param_offset + params;
+-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
++	data_offset = (char *)pSMB + offsetof(typeof(*pSMB), hdr.Protocol) + offset;
+ 	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+ 	pSMB->DataOffset = cpu_to_le16(offset);
+ 	pSMB->SetupCount = 1;
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index a94c538ff86368..feff3324d39c6d 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2512,9 +2512,6 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
+ 
+ 	list_del_init(&tcon->tcon_list);
+ 	tcon->status = TID_EXITING;
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	list_replace_init(&tcon->dfs_ses_list, &ses_list);
+-#endif
+ 	spin_unlock(&tcon->tc_lock);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+@@ -2522,6 +2519,7 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
+ 	cancel_delayed_work_sync(&tcon->query_interfaces);
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ 	cancel_delayed_work_sync(&tcon->dfs_cache_work);
++	list_replace_init(&tcon->dfs_ses_list, &ses_list);
+ #endif
+ 
+ 	if (tcon->use_witness) {
+diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
+index 3f6077c68d68aa..c35953843373ea 100644
+--- a/fs/smb/client/dfs.c
++++ b/fs/smb/client/dfs.c
+@@ -321,49 +321,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ 	return rc;
+ }
+ 
+-/* Update dfs referral path of superblock */
+-static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
+-				  const char *target)
+-{
+-	int rc = 0;
+-	size_t len = strlen(target);
+-	char *refpath, *npath;
+-
+-	if (unlikely(len < 2 || *target != '\\'))
+-		return -EINVAL;
+-
+-	if (target[1] == '\\') {
+-		len += 1;
+-		refpath = kmalloc(len, GFP_KERNEL);
+-		if (!refpath)
+-			return -ENOMEM;
+-
+-		scnprintf(refpath, len, "%s", target);
+-	} else {
+-		len += sizeof("\\");
+-		refpath = kmalloc(len, GFP_KERNEL);
+-		if (!refpath)
+-			return -ENOMEM;
+-
+-		scnprintf(refpath, len, "\\%s", target);
+-	}
+-
+-	npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb));
+-	kfree(refpath);
+-
+-	if (IS_ERR(npath)) {
+-		rc = PTR_ERR(npath);
+-	} else {
+-		mutex_lock(&server->refpath_lock);
+-		spin_lock(&server->srv_lock);
+-		kfree(server->leaf_fullpath);
+-		server->leaf_fullpath = npath;
+-		spin_unlock(&server->srv_lock);
+-		mutex_unlock(&server->refpath_lock);
+-	}
+-	return rc;
+-}
+-
+ static int target_share_matches_server(struct TCP_Server_Info *server, char *share,
+ 				       bool *target_match)
+ {
+@@ -388,77 +345,22 @@ static int target_share_matches_server(struct TCP_Server_Info *server, char *sha
+ 	return rc;
+ }
+ 
+-static void __tree_connect_ipc(const unsigned int xid, char *tree,
+-			       struct cifs_sb_info *cifs_sb,
+-			       struct cifs_ses *ses)
+-{
+-	struct TCP_Server_Info *server = ses->server;
+-	struct cifs_tcon *tcon = ses->tcon_ipc;
+-	int rc;
+-
+-	spin_lock(&ses->ses_lock);
+-	spin_lock(&ses->chan_lock);
+-	if (cifs_chan_needs_reconnect(ses, server) ||
+-	    ses->ses_status != SES_GOOD) {
+-		spin_unlock(&ses->chan_lock);
+-		spin_unlock(&ses->ses_lock);
+-		cifs_server_dbg(FYI, "%s: skipping ipc reconnect due to disconnected ses\n",
+-				__func__);
+-		return;
+-	}
+-	spin_unlock(&ses->chan_lock);
+-	spin_unlock(&ses->ses_lock);
+-
+-	cifs_server_lock(server);
+-	scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+-	cifs_server_unlock(server);
+-
+-	rc = server->ops->tree_connect(xid, ses, tree, tcon,
+-				       cifs_sb->local_nls);
+-	cifs_server_dbg(FYI, "%s: tree_reconnect %s: %d\n", __func__, tree, rc);
+-	spin_lock(&tcon->tc_lock);
+-	if (rc) {
+-		tcon->status = TID_NEED_TCON;
+-	} else {
+-		tcon->status = TID_GOOD;
+-		tcon->need_reconnect = false;
+-	}
+-	spin_unlock(&tcon->tc_lock);
+-}
+-
+-static void tree_connect_ipc(const unsigned int xid, char *tree,
+-			     struct cifs_sb_info *cifs_sb,
+-			     struct cifs_tcon *tcon)
+-{
+-	struct cifs_ses *ses = tcon->ses;
+-
+-	__tree_connect_ipc(xid, tree, cifs_sb, ses);
+-	__tree_connect_ipc(xid, tree, cifs_sb, CIFS_DFS_ROOT_SES(ses));
+-}
+-
+-static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+-				     struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+-				     struct dfs_cache_tgt_list *tl)
++static int tree_connect_dfs_target(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   char *tree, bool islink,
++				   struct dfs_cache_tgt_list *tl)
+ {
+-	int rc;
++	const struct smb_version_operations *ops = tcon->ses->server->ops;
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+-	const struct smb_version_operations *ops = server->ops;
+-	struct cifs_ses *root_ses = CIFS_DFS_ROOT_SES(tcon->ses);
+-	char *share = NULL, *prefix = NULL;
+ 	struct dfs_cache_tgt_iterator *tit;
++	char *share = NULL, *prefix = NULL;
+ 	bool target_match;
+-
+-	tit = dfs_cache_get_tgt_iterator(tl);
+-	if (!tit) {
+-		rc = -ENOENT;
+-		goto out;
+-	}
++	int rc = -ENOENT;
+ 
+ 	/* Try to tree connect to all dfs targets */
+-	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
+-		const char *target = dfs_cache_get_tgt_name(tit);
+-		DFS_CACHE_TGT_LIST(ntl);
+-
++	for (tit = dfs_cache_get_tgt_iterator(tl);
++	     tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
+ 		kfree(share);
+ 		kfree(prefix);
+ 		share = prefix = NULL;
+@@ -479,69 +381,16 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
+ 		}
+ 
+ 		dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, tit);
+-		tree_connect_ipc(xid, tree, cifs_sb, tcon);
+-
+ 		scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
+-		if (!islink) {
+-			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+-			break;
+-		}
+-
+-		/*
+-		 * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
+-		 * to it.  Otherwise, cache the dfs referral and then mark current tcp ses for
+-		 * reconnect so either the demultiplex thread or the echo worker will reconnect to
+-		 * newly resolved target.
+-		 */
+-		if (dfs_cache_find(xid, root_ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target,
+-				   NULL, &ntl)) {
+-			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+-			if (rc)
+-				continue;
+-
++		rc = ops->tree_connect(xid, tcon->ses, tree,
++				       tcon, tcon->ses->local_nls);
++		if (islink && !rc && cifs_sb)
+ 			rc = cifs_update_super_prepath(cifs_sb, prefix);
+-		} else {
+-			/* Target is another dfs share */
+-			rc = update_server_fullpath(server, cifs_sb, target);
+-			dfs_cache_free_tgts(tl);
+-
+-			if (!rc) {
+-				rc = -EREMOTE;
+-				list_replace_init(&ntl.tl_list, &tl->tl_list);
+-			} else
+-				dfs_cache_free_tgts(&ntl);
+-		}
+ 		break;
+ 	}
+ 
+-out:
+ 	kfree(share);
+ 	kfree(prefix);
+-
+-	return rc;
+-}
+-
+-static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+-				   struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+-				   struct dfs_cache_tgt_list *tl)
+-{
+-	int rc;
+-	int num_links = 0;
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	char *old_fullpath = server->leaf_fullpath;
+-
+-	do {
+-		rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl);
+-		if (!rc || rc != -EREMOTE)
+-			break;
+-	} while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
+-	/*
+-	 * If we couldn't tree connect to any targets from last referral path, then
+-	 * retry it from newly resolved dfs referral.
+-	 */
+-	if (rc && server->leaf_fullpath != old_fullpath)
+-		cifs_signal_cifsd_for_reconnect(server, true);
+-
+ 	dfs_cache_free_tgts(tl);
+ 	return rc;
+ }
+@@ -596,14 +445,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ 	if (!IS_ERR(sb))
+ 		cifs_sb = CIFS_SB(sb);
+ 
+-	/*
+-	 * Tree connect to last share in @tcon->tree_name whether dfs super or
+-	 * cached dfs referral was not found.
+-	 */
+-	if (!cifs_sb || !server->leaf_fullpath ||
++	/* Tree connect to last share in @tcon->tree_name if no DFS referral */
++	if (!server->leaf_fullpath ||
+ 	    dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) {
+-		rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon,
+-				       cifs_sb ? cifs_sb->local_nls : nlsc);
++		rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name,
++				       tcon, tcon->ses->local_nls);
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 6d567b16998119..b35fe1075503e1 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -724,6 +724,88 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
+ #endif
+ }
+ 
++#define POSIX_TYPE_FILE    0
++#define POSIX_TYPE_DIR     1
++#define POSIX_TYPE_SYMLINK 2
++#define POSIX_TYPE_CHARDEV 3
++#define POSIX_TYPE_BLKDEV  4
++#define POSIX_TYPE_FIFO    5
++#define POSIX_TYPE_SOCKET  6
++
++#define POSIX_X_OTH      0000001
++#define POSIX_W_OTH      0000002
++#define POSIX_R_OTH      0000004
++#define POSIX_X_GRP      0000010
++#define POSIX_W_GRP      0000020
++#define POSIX_R_GRP      0000040
++#define POSIX_X_USR      0000100
++#define POSIX_W_USR      0000200
++#define POSIX_R_USR      0000400
++#define POSIX_STICKY     0001000
++#define POSIX_SET_GID    0002000
++#define POSIX_SET_UID    0004000
++
++#define POSIX_OTH_MASK      0000007
++#define POSIX_GRP_MASK      0000070
++#define POSIX_USR_MASK      0000700
++#define POSIX_PERM_MASK     0000777
++#define POSIX_FILETYPE_MASK 0070000
++
++#define POSIX_FILETYPE_SHIFT 12
++
++static u32 wire_perms_to_posix(u32 wire)
++{
++	u32 mode = 0;
++
++	mode |= (wire & POSIX_X_OTH) ? S_IXOTH : 0;
++	mode |= (wire & POSIX_W_OTH) ? S_IWOTH : 0;
++	mode |= (wire & POSIX_R_OTH) ? S_IROTH : 0;
++	mode |= (wire & POSIX_X_GRP) ? S_IXGRP : 0;
++	mode |= (wire & POSIX_W_GRP) ? S_IWGRP : 0;
++	mode |= (wire & POSIX_R_GRP) ? S_IRGRP : 0;
++	mode |= (wire & POSIX_X_USR) ? S_IXUSR : 0;
++	mode |= (wire & POSIX_W_USR) ? S_IWUSR : 0;
++	mode |= (wire & POSIX_R_USR) ? S_IRUSR : 0;
++	mode |= (wire & POSIX_STICKY) ? S_ISVTX : 0;
++	mode |= (wire & POSIX_SET_GID) ? S_ISGID : 0;
++	mode |= (wire & POSIX_SET_UID) ? S_ISUID : 0;
++
++	return mode;
++}
++
++static u32 posix_filetypes[] = {
++	S_IFREG,
++	S_IFDIR,
++	S_IFLNK,
++	S_IFCHR,
++	S_IFBLK,
++	S_IFIFO,
++	S_IFSOCK
++};
++
++static u32 wire_filetype_to_posix(u32 wire_type)
++{
++	if (wire_type >= ARRAY_SIZE(posix_filetypes)) {
++		pr_warn("Unexpected type %u", wire_type);
++		return 0;
++	}
++	return posix_filetypes[wire_type];
++}
++
++umode_t wire_mode_to_posix(u32 wire, bool is_dir)
++{
++	u32 wire_type;
++	u32 mode;
++
++	wire_type = (wire & POSIX_FILETYPE_MASK) >> POSIX_FILETYPE_SHIFT;
++	/* older servers do not set POSIX file type in the mode field in the response */
++	if ((wire_type == 0) && is_dir)
++		mode = wire_perms_to_posix(wire) | S_IFDIR;
++	else
++		mode = (wire_perms_to_posix(wire) | wire_filetype_to_posix(wire_type));
++	return (umode_t)mode;
++}
++
+ /* Fill a cifs_fattr struct with info from POSIX info struct */
+ static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr,
+ 				       struct cifs_open_info_data *data,
+@@ -760,20 +842,14 @@ static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr,
+ 	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+ 	fattr->cf_createtime = le64_to_cpu(info->CreationTime);
+ 	fattr->cf_nlink = le32_to_cpu(info->HardLinks);
+-	fattr->cf_mode = (umode_t) le32_to_cpu(info->Mode);
++	fattr->cf_mode = wire_mode_to_posix(le32_to_cpu(info->Mode),
++					    fattr->cf_cifsattrs & ATTR_DIRECTORY);
+ 
+ 	if (cifs_open_data_reparse(data) &&
+ 	    cifs_reparse_point_to_fattr(cifs_sb, fattr, data))
+ 		goto out_reparse;
+ 
+-	fattr->cf_mode &= ~S_IFMT;
+-	if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+-		fattr->cf_mode |= S_IFDIR;
+-		fattr->cf_dtype = DT_DIR;
+-	} else { /* file */
+-		fattr->cf_mode |= S_IFREG;
+-		fattr->cf_dtype = DT_REG;
+-	}
++	fattr->cf_dtype = S_DT(fattr->cf_mode);
+ 
+ out_reparse:
+ 	if (S_ISLNK(fattr->cf_mode)) {
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index b3a8f9c6fcff6f..273358d20a46c9 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -71,6 +71,8 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+ 	struct inode *inode;
+ 	struct super_block *sb = parent->d_sb;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions;
++	bool reparse_need_reval = false;
+ 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ 	int rc;
+ 
+@@ -85,7 +87,21 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+ 		 * this spares us an invalidation.
+ 		 */
+ retry:
+-		if ((fattr->cf_cifsattrs & ATTR_REPARSE) ||
++		if (posix) {
++			switch (fattr->cf_mode & S_IFMT) {
++			case S_IFLNK:
++			case S_IFBLK:
++			case S_IFCHR:
++				reparse_need_reval = true;
++				break;
++			default:
++				break;
++			}
++		} else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
++			reparse_need_reval = true;
++		}
++
++		if (reparse_need_reval ||
+ 		    (fattr->cf_flags & CIFS_FATTR_NEED_REVAL))
+ 			return;
+ 
+@@ -241,31 +257,29 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
+ 	fattr->cf_nlink = le32_to_cpu(info->HardLinks);
+ 	fattr->cf_cifsattrs = le32_to_cpu(info->DosAttributes);
+ 
+-	/*
+-	 * Since we set the inode type below we need to mask off
+-	 * to avoid strange results if bits set above.
+-	 * XXX: why not make server&client use the type bits?
+-	 */
+-	fattr->cf_mode = le32_to_cpu(info->Mode) & ~S_IFMT;
++	if (fattr->cf_cifsattrs & ATTR_REPARSE)
++		fattr->cf_cifstag = le32_to_cpu(info->ReparseTag);
++
++	/* The Mode field in the response can now include the file type as well */
++	fattr->cf_mode = wire_mode_to_posix(le32_to_cpu(info->Mode),
++					    fattr->cf_cifsattrs & ATTR_DIRECTORY);
++	fattr->cf_dtype = S_DT(le32_to_cpu(info->Mode));
++
++	switch (fattr->cf_mode & S_IFMT) {
++	case S_IFLNK:
++	case S_IFBLK:
++	case S_IFCHR:
++		fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
++		break;
++	default:
++		break;
++	}
+ 
+ 	cifs_dbg(FYI, "posix fattr: dev %d, reparse %d, mode %o\n",
+ 		 le32_to_cpu(info->DeviceId),
+ 		 le32_to_cpu(info->ReparseTag),
+ 		 le32_to_cpu(info->Mode));
+ 
+-	if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+-		fattr->cf_mode |= S_IFDIR;
+-		fattr->cf_dtype = DT_DIR;
+-	} else {
+-		/*
+-		 * mark anything that is not a dir as regular
+-		 * file. special files should have the REPARSE
+-		 * attribute and will be marked as needing revaluation
+-		 */
+-		fattr->cf_mode |= S_IFREG;
+-		fattr->cf_dtype = DT_REG;
+-	}
+-
+ 	sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
+ 	sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
+ }
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+index f74d0a86f44a4e..d3abb99cc99094 100644
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -730,44 +730,60 @@ static void wsl_to_fattr(struct cifs_open_info_data *data,
+ 	fattr->cf_dtype = S_DT(fattr->cf_mode);
+ }
+ 
+-bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
+-				 struct cifs_fattr *fattr,
+-				 struct cifs_open_info_data *data)
++static bool posix_reparse_to_fattr(struct cifs_sb_info *cifs_sb,
++				   struct cifs_fattr *fattr,
++				   struct cifs_open_info_data *data)
+ {
+ 	struct reparse_posix_data *buf = data->reparse.posix;
+-	u32 tag = data->reparse.tag;
+ 
+-	if (tag == IO_REPARSE_TAG_NFS && buf) {
+-		if (le16_to_cpu(buf->ReparseDataLength) < sizeof(buf->InodeType))
++
++	if (buf == NULL)
++		return true;
++
++	if (le16_to_cpu(buf->ReparseDataLength) < sizeof(buf->InodeType)) {
++		WARN_ON_ONCE(1);
++		return false;
++	}
++
++	switch (le64_to_cpu(buf->InodeType)) {
++	case NFS_SPECFILE_CHR:
++		if (le16_to_cpu(buf->ReparseDataLength) != sizeof(buf->InodeType) + 8) {
++			WARN_ON_ONCE(1);
+ 			return false;
+-		switch (le64_to_cpu(buf->InodeType)) {
+-		case NFS_SPECFILE_CHR:
+-			if (le16_to_cpu(buf->ReparseDataLength) != sizeof(buf->InodeType) + 8)
+-				return false;
+-			fattr->cf_mode |= S_IFCHR;
+-			fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
+-			break;
+-		case NFS_SPECFILE_BLK:
+-			if (le16_to_cpu(buf->ReparseDataLength) != sizeof(buf->InodeType) + 8)
+-				return false;
+-			fattr->cf_mode |= S_IFBLK;
+-			fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
+-			break;
+-		case NFS_SPECFILE_FIFO:
+-			fattr->cf_mode |= S_IFIFO;
+-			break;
+-		case NFS_SPECFILE_SOCK:
+-			fattr->cf_mode |= S_IFSOCK;
+-			break;
+-		case NFS_SPECFILE_LNK:
+-			fattr->cf_mode |= S_IFLNK;
+-			break;
+-		default:
++		}
++		fattr->cf_mode |= S_IFCHR;
++		fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
++		break;
++	case NFS_SPECFILE_BLK:
++		if (le16_to_cpu(buf->ReparseDataLength) != sizeof(buf->InodeType) + 8) {
+ 			WARN_ON_ONCE(1);
+ 			return false;
+ 		}
+-		goto out;
++		fattr->cf_mode |= S_IFBLK;
++		fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
++		break;
++	case NFS_SPECFILE_FIFO:
++		fattr->cf_mode |= S_IFIFO;
++		break;
++	case NFS_SPECFILE_SOCK:
++		fattr->cf_mode |= S_IFSOCK;
++		break;
++	case NFS_SPECFILE_LNK:
++		fattr->cf_mode |= S_IFLNK;
++		break;
++	default:
++		WARN_ON_ONCE(1);
++		return false;
+ 	}
++	return true;
++}
++
++bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
++				 struct cifs_fattr *fattr,
++				 struct cifs_open_info_data *data)
++{
++	u32 tag = data->reparse.tag;
++	bool ok;
+ 
+ 	switch (tag) {
+ 	case IO_REPARSE_TAG_INTERNAL:
+@@ -787,15 +803,19 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
+ 	case IO_REPARSE_TAG_LX_BLK:
+ 		wsl_to_fattr(data, cifs_sb, tag, fattr);
+ 		break;
++	case IO_REPARSE_TAG_NFS:
++		ok = posix_reparse_to_fattr(cifs_sb, fattr, data);
++		if (!ok)
++			return false;
++		break;
+ 	case 0: /* SMB1 symlink */
+ 	case IO_REPARSE_TAG_SYMLINK:
+-	case IO_REPARSE_TAG_NFS:
+ 		fattr->cf_mode |= S_IFLNK;
+ 		break;
+ 	default:
+ 		return false;
+ 	}
+-out:
++
+ 	fattr->cf_dtype = S_DT(fattr->cf_mode);
+ 	return true;
+ }
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index a188908914fe8f..a55f0044d30bde 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -943,7 +943,8 @@ int smb2_query_path_info(const unsigned int xid,
+ 		if (rc || !data->reparse_point)
+ 			goto out;
+ 
+-		cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
++		if (!tcon->posix_extensions)
++			cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
+ 		/*
+ 		 * Skip SMB2_OP_GET_REPARSE if symlink already parsed in create
+ 		 * response.
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 599118aed20539..d0836d710f1814 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -6651,6 +6651,10 @@ int smb2_read(struct ksmbd_work *work)
+ 	}
+ 
+ 	offset = le64_to_cpu(req->Offset);
++	if (offset < 0) {
++		err = -EINVAL;
++		goto out;
++	}
+ 	length = le32_to_cpu(req->Length);
+ 	mincount = le32_to_cpu(req->MinimumCount);
+ 
+@@ -6864,6 +6868,8 @@ int smb2_write(struct ksmbd_work *work)
+ 	}
+ 
+ 	offset = le64_to_cpu(req->Offset);
++	if (offset < 0)
++		return -EINVAL;
+ 	length = le32_to_cpu(req->Length);
+ 
+ 	if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
+diff --git a/fs/unicode/mkutf8data.c b/fs/unicode/mkutf8data.c
+index b2bd08250c7a09..77b685db827511 100644
+--- a/fs/unicode/mkutf8data.c
++++ b/fs/unicode/mkutf8data.c
+@@ -2230,6 +2230,75 @@ static void nfdicf_init(void)
+ 		file_fail(fold_name);
+ }
+ 
++static void ignore_init(void)
++{
++	FILE *file;
++	unsigned int unichar;
++	unsigned int first;
++	unsigned int last;
++	unsigned int *um;
++	int count;
++	int ret;
++
++	if (verbose > 0)
++		printf("Parsing %s\n", prop_name);
++	file = fopen(prop_name, "r");
++	if (!file)
++		open_fail(prop_name, errno);
++	assert(file);
++	count = 0;
++	while (fgets(line, LINESIZE, file)) {
++		ret = sscanf(line, "%X..%X ; %s # ", &first, &last, buf0);
++		if (ret == 3) {
++			if (strcmp(buf0, "Default_Ignorable_Code_Point"))
++				continue;
++			if (!utf32valid(first) || !utf32valid(last))
++				line_fail(prop_name, line);
++			for (unichar = first; unichar <= last; unichar++) {
++				free(unicode_data[unichar].utf32nfdi);
++				um = malloc(sizeof(unsigned int));
++				*um = 0;
++				unicode_data[unichar].utf32nfdi = um;
++				free(unicode_data[unichar].utf32nfdicf);
++				um = malloc(sizeof(unsigned int));
++				*um = 0;
++				unicode_data[unichar].utf32nfdicf = um;
++				count++;
++			}
++			if (verbose > 1)
++				printf(" %X..%X Default_Ignorable_Code_Point\n",
++					first, last);
++			continue;
++		}
++		ret = sscanf(line, "%X ; %s # ", &unichar, buf0);
++		if (ret == 2) {
++			if (strcmp(buf0, "Default_Ignorable_Code_Point"))
++				continue;
++			if (!utf32valid(unichar))
++				line_fail(prop_name, line);
++			free(unicode_data[unichar].utf32nfdi);
++			um = malloc(sizeof(unsigned int));
++			*um = 0;
++			unicode_data[unichar].utf32nfdi = um;
++			free(unicode_data[unichar].utf32nfdicf);
++			um = malloc(sizeof(unsigned int));
++			*um = 0;
++			unicode_data[unichar].utf32nfdicf = um;
++			if (verbose > 1)
++				printf(" %X Default_Ignorable_Code_Point\n",
++					unichar);
++			count++;
++			continue;
++		}
++	}
++	fclose(file);
++
++	if (verbose > 0)
++		printf("Found %d entries\n", count);
++	if (count == 0)
++		file_fail(prop_name);
++}
++
+ static void corrections_init(void)
+ {
+ 	FILE *file;
+@@ -3342,6 +3411,7 @@ int main(int argc, char *argv[])
+ 	ccc_init();
+ 	nfdi_init();
+ 	nfdicf_init();
++	ignore_init();
+ 	corrections_init();
+ 	hangul_decompose();
+ 	nfdi_decompose();
+diff --git a/fs/unicode/utf8data.c_shipped b/fs/unicode/utf8data.c_shipped
+index ac2da4ba2dc0f9..dafa5fed761d83 100644
+--- a/fs/unicode/utf8data.c_shipped
++++ b/fs/unicode/utf8data.c_shipped
+@@ -82,58 +82,58 @@ static const struct utf8data utf8nfdidata[] = {
+ 	{ 0xc0100, 20736 }
+ };
+ 
+-static const unsigned char utf8data[64080] = {
++static const unsigned char utf8data[64256] = {
+ 	/* nfdicf_30100 */
+-	0xd7,0x07,0x66,0x84,0x0c,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x96,0x1a,0xe3,0x60,0x15,
+-	0xe2,0x49,0x0e,0xc1,0xe0,0x4b,0x0d,0xcf,0x86,0x65,0x2d,0x0d,0x01,0x00,0xd4,0xb8,
+-	0xd3,0x27,0xe2,0x03,0xa3,0xe1,0xcb,0x35,0xe0,0x29,0x22,0xcf,0x86,0xc5,0xe4,0xfa,
+-	0x6c,0xe3,0x45,0x68,0xe2,0xdb,0x65,0xe1,0x0e,0x65,0xe0,0xd3,0x64,0xcf,0x86,0xe5,
+-	0x98,0x64,0x64,0x7b,0x64,0x0b,0x00,0xd2,0x0e,0xe1,0xb3,0x3c,0xe0,0x34,0xa3,0xcf,
+-	0x86,0xcf,0x06,0x01,0x00,0xd1,0x0c,0xe0,0x98,0xa8,0xcf,0x86,0xcf,0x06,0x02,0xff,
++	0xd7,0x07,0x66,0x84,0x0c,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x99,0x1a,0xe3,0x63,0x15,
++	0xe2,0x4c,0x0e,0xc1,0xe0,0x4e,0x0d,0xcf,0x86,0x65,0x2d,0x0d,0x01,0x00,0xd4,0xb8,
++	0xd3,0x27,0xe2,0x89,0xa3,0xe1,0xce,0x35,0xe0,0x2c,0x22,0xcf,0x86,0xc5,0xe4,0x15,
++	0x6d,0xe3,0x60,0x68,0xe2,0xf6,0x65,0xe1,0x29,0x65,0xe0,0xee,0x64,0xcf,0x86,0xe5,
++	0xb3,0x64,0x64,0x96,0x64,0x0b,0x00,0xd2,0x0e,0xe1,0xb5,0x3c,0xe0,0xba,0xa3,0xcf,
++	0x86,0xcf,0x06,0x01,0x00,0xd1,0x0c,0xe0,0x1e,0xa9,0xcf,0x86,0xcf,0x06,0x02,0xff,
+ 	0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,
+-	0x00,0xe4,0xdf,0x45,0xe3,0x39,0x45,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x01,0xad,
+-	0xd0,0x21,0xcf,0x86,0xe5,0xfb,0xa9,0xe4,0x7a,0xa9,0xe3,0x39,0xa9,0xe2,0x18,0xa9,
+-	0xe1,0x07,0xa9,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,
+-	0x00,0xcf,0x86,0xe5,0xdd,0xab,0xd4,0x19,0xe3,0x1c,0xab,0xe2,0xfb,0xaa,0xe1,0xea,
+-	0xaa,0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,
+-	0x83,0xab,0xe2,0x62,0xab,0xe1,0x51,0xab,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,
+-	0x01,0xff,0xe9,0x9b,0xbb,0x00,0x83,0xe2,0x68,0xf9,0xe1,0x52,0xf6,0xe0,0xcf,0xf4,
+-	0xcf,0x86,0xd5,0x31,0xc4,0xe3,0x51,0x4e,0xe2,0xf2,0x4c,0xe1,0x09,0xcc,0xe0,0x99,
+-	0x4b,0xcf,0x86,0xe5,0x8b,0x49,0xe4,0xac,0x46,0xe3,0x76,0xbc,0xe2,0xcd,0xbb,0xe1,
+-	0xa8,0xbb,0xe0,0x81,0xbb,0xcf,0x86,0xe5,0x4e,0xbb,0x94,0x07,0x63,0x39,0xbb,0x07,
+-	0x00,0x07,0x00,0xe4,0x3b,0xf4,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,
+-	0xe1,0x4a,0xe1,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x39,0xe2,0xcf,0x86,
+-	0xe5,0xfe,0xe1,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x39,0xe2,0xcf,0x06,
+-	0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xd4,0xf3,0xe3,0xbd,0xf2,
+-	0xd2,0xa0,0xe1,0x73,0xe6,0xd0,0x21,0xcf,0x86,0xe5,0x74,0xe3,0xe4,0xf0,0xe2,0xe3,
+-	0xae,0xe2,0xe2,0x8d,0xe2,0xe1,0x7b,0xe2,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,
+-	0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xd0,0xe4,0xe3,0x8f,0xe4,
+-	0xe2,0x6e,0xe4,0xe1,0x5d,0xe4,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,
+-	0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0x57,0xe5,0xe1,0x46,0xe5,0x10,0x09,
+-	0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x77,
+-	0xe5,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,
+-	0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0xbd,0xe5,0xd2,0x14,0xe1,0x8c,0xe5,
++	0x00,0xe4,0xe1,0x45,0xe3,0x3b,0x45,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x87,0xad,
++	0xd0,0x21,0xcf,0x86,0xe5,0x81,0xaa,0xe4,0x00,0xaa,0xe3,0xbf,0xa9,0xe2,0x9e,0xa9,
++	0xe1,0x8d,0xa9,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,
++	0x00,0xcf,0x86,0xe5,0x63,0xac,0xd4,0x19,0xe3,0xa2,0xab,0xe2,0x81,0xab,0xe1,0x70,
++	0xab,0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,
++	0x09,0xac,0xe2,0xe8,0xab,0xe1,0xd7,0xab,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,
++	0x01,0xff,0xe9,0x9b,0xbb,0x00,0x83,0xe2,0x19,0xfa,0xe1,0xf2,0xf6,0xe0,0x6f,0xf5,
++	0xcf,0x86,0xd5,0x31,0xc4,0xe3,0x54,0x4e,0xe2,0xf5,0x4c,0xe1,0xa4,0xcc,0xe0,0x9c,
++	0x4b,0xcf,0x86,0xe5,0x8e,0x49,0xe4,0xaf,0x46,0xe3,0x11,0xbd,0xe2,0x68,0xbc,0xe1,
++	0x43,0xbc,0xe0,0x1c,0xbc,0xcf,0x86,0xe5,0xe9,0xbb,0x94,0x07,0x63,0xd4,0xbb,0x07,
++	0x00,0x07,0x00,0xe4,0xdb,0xf4,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,
++	0xe1,0xea,0xe1,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xd9,0xe2,0xcf,0x86,
++	0xe5,0x9e,0xe2,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xd9,0xe2,0xcf,0x06,
++	0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x74,0xf4,0xe3,0x5d,0xf3,
++	0xd2,0xa0,0xe1,0x13,0xe7,0xd0,0x21,0xcf,0x86,0xe5,0x14,0xe4,0xe4,0x90,0xe3,0xe3,
++	0x4e,0xe3,0xe2,0x2d,0xe3,0xe1,0x1b,0xe3,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,
++	0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x70,0xe5,0xe3,0x2f,0xe5,
++	0xe2,0x0e,0xe5,0xe1,0xfd,0xe4,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,
++	0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xf7,0xe5,0xe1,0xe6,0xe5,0x10,0x09,
++	0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x17,
++	0xe6,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,
++	0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x5d,0xe6,0xd2,0x14,0xe1,0x2c,0xe6,
+ 	0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,
+-	0x98,0xe5,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,
+-	0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0xed,0xea,0xd4,0x19,0xe3,0x26,0xea,0xe2,0x04,
+-	0xea,0xe1,0xf3,0xe9,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,
+-	0xb7,0x00,0xd3,0x18,0xe2,0x70,0xea,0xe1,0x5f,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3,
+-	0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x88,0xea,0x10,
++	0x38,0xe6,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,
++	0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x8d,0xeb,0xd4,0x19,0xe3,0xc6,0xea,0xe2,0xa4,
++	0xea,0xe1,0x93,0xea,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,
++	0xb7,0x00,0xd3,0x18,0xe2,0x10,0xeb,0xe1,0xff,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3,
++	0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x28,0xeb,0x10,
+ 	0x08,0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,
+ 	0x08,0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,
+-	0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x8a,
+-	0xec,0xd4,0x1a,0xe3,0xc2,0xeb,0xe2,0xa8,0xeb,0xe1,0x95,0xeb,0x10,0x08,0x05,0xff,
+-	0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x0a,0xec,
+-	0xe1,0xf8,0xeb,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,
+-	0x00,0xd2,0x13,0xe1,0x26,0xec,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,
++	0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x2a,
++	0xed,0xd4,0x1a,0xe3,0x62,0xec,0xe2,0x48,0xec,0xe1,0x35,0xec,0x10,0x08,0x05,0xff,
++	0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0xaa,0xec,
++	0xe1,0x98,0xec,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,
++	0x00,0xd2,0x13,0xe1,0xc6,0xec,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,
+ 	0xe7,0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,
+ 	0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,
+-	0xff,0xe7,0xaa,0xae,0x00,0xe0,0x3c,0xef,0xcf,0x86,0xd5,0x1d,0xe4,0xb1,0xed,0xe3,
+-	0x6d,0xed,0xe2,0x4b,0xed,0xe1,0x3a,0xed,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,
+-	0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x58,0xee,0xe2,0x34,0xee,0xe1,
+-	0x23,0xee,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,
+-	0xd3,0x18,0xe2,0xa3,0xee,0xe1,0x92,0xee,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,
+-	0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xbb,0xee,0x10,0x08,0x05,
++	0xff,0xe7,0xaa,0xae,0x00,0xe0,0xdc,0xef,0xcf,0x86,0xd5,0x1d,0xe4,0x51,0xee,0xe3,
++	0x0d,0xee,0xe2,0xeb,0xed,0xe1,0xda,0xed,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,
++	0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xf8,0xee,0xe2,0xd4,0xee,0xe1,
++	0xc3,0xee,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,
++	0xd3,0x18,0xe2,0x43,0xef,0xe1,0x32,0xef,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,
++	0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x5b,0xef,0x10,0x08,0x05,
+ 	0xff,0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,
+ 	0xff,0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,
+ 	0x9e,0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+@@ -141,152 +141,152 @@ static const unsigned char utf8data[64080] = {
+ 	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 	/* nfdi_30100 */
+-	0x57,0x04,0x01,0x00,0xc6,0xd5,0x13,0xe4,0xa8,0x59,0xe3,0xe2,0x54,0xe2,0x5b,0x4f,
+-	0xc1,0xe0,0x87,0x4d,0xcf,0x06,0x01,0x00,0xd4,0xb8,0xd3,0x27,0xe2,0x89,0x9f,0xe1,
+-	0x91,0x8d,0xe0,0x21,0x71,0xcf,0x86,0xc5,0xe4,0x80,0x69,0xe3,0xcb,0x64,0xe2,0x61,
+-	0x62,0xe1,0x94,0x61,0xe0,0x59,0x61,0xcf,0x86,0xe5,0x1e,0x61,0x64,0x01,0x61,0x0b,
+-	0x00,0xd2,0x0e,0xe1,0x3f,0xa0,0xe0,0xba,0x9f,0xcf,0x86,0xcf,0x06,0x01,0x00,0xd1,
+-	0x0c,0xe0,0x1e,0xa5,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf,
+-	0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x1b,0xb6,0xe3,0x95,
+-	0xad,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x87,0xa9,0xd0,0x21,0xcf,0x86,0xe5,0x81,
+-	0xa6,0xe4,0x00,0xa6,0xe3,0xbf,0xa5,0xe2,0x9e,0xa5,0xe1,0x8d,0xa5,0x10,0x08,0x01,
+-	0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0xcf,0x86,0xe5,0x63,0xa8,
+-	0xd4,0x19,0xe3,0xa2,0xa7,0xe2,0x81,0xa7,0xe1,0x70,0xa7,0x10,0x08,0x01,0xff,0xe9,
+-	0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,0x09,0xa8,0xe2,0xe8,0xa7,0xe1,
+-	0xd7,0xa7,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9,0x9b,0xbb,0x00,
+-	0x83,0xe2,0xee,0xf5,0xe1,0xd8,0xf2,0xe0,0x55,0xf1,0xcf,0x86,0xd5,0x31,0xc4,0xe3,
+-	0xd5,0xcb,0xe2,0xae,0xc9,0xe1,0x8f,0xc8,0xe0,0x1f,0xbf,0xcf,0x86,0xe5,0x12,0xbb,
+-	0xe4,0x0b,0xba,0xe3,0xfc,0xb8,0xe2,0x53,0xb8,0xe1,0x2e,0xb8,0xe0,0x07,0xb8,0xcf,
+-	0x86,0xe5,0xd4,0xb7,0x94,0x07,0x63,0xbf,0xb7,0x07,0x00,0x07,0x00,0xe4,0xc1,0xf0,
+-	0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0xd0,0xdd,0xcf,0x86,0xcf,
+-	0x06,0x05,0x00,0xd1,0x0e,0xe0,0xbf,0xde,0xcf,0x86,0xe5,0x84,0xde,0xcf,0x06,0x11,
+-	0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xbf,0xde,0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06,
+-	0xcf,0x06,0x00,0x00,0xe4,0x5a,0xf0,0xe3,0x43,0xef,0xd2,0xa0,0xe1,0xf9,0xe2,0xd0,
+-	0x21,0xcf,0x86,0xe5,0xfa,0xdf,0xe4,0x76,0xdf,0xe3,0x34,0xdf,0xe2,0x13,0xdf,0xe1,
+-	0x01,0xdf,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00,
+-	0xcf,0x86,0xd5,0x1c,0xe4,0x56,0xe1,0xe3,0x15,0xe1,0xe2,0xf4,0xe0,0xe1,0xe3,0xe0,
+-	0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34,
+-	0xd3,0x18,0xe2,0xdd,0xe1,0xe1,0xcc,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa1,0x9a,0xa8,
+-	0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0xfd,0xe1,0x91,0x11,0x10,0x09,0x05,
+-	0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac,
+-	0xbe,0x00,0xe3,0x43,0xe2,0xd2,0x14,0xe1,0x12,0xe2,0x10,0x08,0x05,0xff,0xe5,0xaf,
+-	0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x1e,0xe2,0x10,0x08,0x05,0xff,
+-	0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86,
+-	0xe5,0x73,0xe7,0xd4,0x19,0xe3,0xac,0xe6,0xe2,0x8a,0xe6,0xe1,0x79,0xe6,0x10,0x08,
+-	0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0xf6,
+-	0xe6,0xe1,0xe5,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0,
+-	0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x0e,0xe7,0x10,0x08,0x05,0xff,0xe7,0x81,0xbd,
+-	0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff,0xe7,0x85,0x85,
+-	0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7,0x86,0x9c,0x00,
+-	0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x10,0xe9,0xd4,0x1a,0xe3,0x48,0xe8,
+-	0xe2,0x2e,0xe8,0xe1,0x1b,0xe8,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4,0x00,0x05,0xff,
+-	0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x90,0xe8,0xe1,0x7e,0xe8,0x10,0x08,0x05,
+-	0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13,0xe1,0xac,0xe8,
+-	0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80,0x00,0xd1,0x12,
+-	0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,
+-	0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0,
+-	0xc2,0xeb,0xcf,0x86,0xd5,0x1d,0xe4,0x37,0xea,0xe3,0xf3,0xe9,0xe2,0xd1,0xe9,0xe1,
+-	0xc0,0xe9,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95,
+-	0x00,0xd4,0x19,0xe3,0xde,0xea,0xe2,0xba,0xea,0xe1,0xa9,0xea,0x10,0x08,0x05,0xff,
+-	0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2,0x29,0xeb,0xe1,
+-	0x18,0xeb,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83,
+-	0x92,0x00,0xd2,0x13,0xe1,0x41,0xeb,0x10,0x08,0x05,0xff,0xe8,0x9a,0x88,0x00,0x05,
+-	0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c,0xa8,0x00,0x05,
+-	0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00,0x05,0xff,0xe4,
+-	0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0x57,0x04,0x01,0x00,0xc6,0xd5,0x16,0xe4,0xc2,0x59,0xe3,0xfb,0x54,0xe2,0x74,0x4f,
++	0xc1,0xe0,0xa0,0x4d,0xcf,0x86,0x65,0x84,0x4d,0x01,0x00,0xd4,0xb8,0xd3,0x27,0xe2,
++	0x0c,0xa0,0xe1,0xdf,0x8d,0xe0,0x39,0x71,0xcf,0x86,0xc5,0xe4,0x98,0x69,0xe3,0xe3,
++	0x64,0xe2,0x79,0x62,0xe1,0xac,0x61,0xe0,0x71,0x61,0xcf,0x86,0xe5,0x36,0x61,0x64,
++	0x19,0x61,0x0b,0x00,0xd2,0x0e,0xe1,0xc2,0xa0,0xe0,0x3d,0xa0,0xcf,0x86,0xcf,0x06,
++	0x01,0x00,0xd1,0x0c,0xe0,0xa1,0xa5,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd0,0x08,
++	0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x9e,
++	0xb6,0xe3,0x18,0xae,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x0a,0xaa,0xd0,0x21,0xcf,
++	0x86,0xe5,0x04,0xa7,0xe4,0x83,0xa6,0xe3,0x42,0xa6,0xe2,0x21,0xa6,0xe1,0x10,0xa6,
++	0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0xcf,0x86,
++	0xe5,0xe6,0xa8,0xd4,0x19,0xe3,0x25,0xa8,0xe2,0x04,0xa8,0xe1,0xf3,0xa7,0x10,0x08,
++	0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,0x8c,0xa8,0xe2,
++	0x6b,0xa8,0xe1,0x5a,0xa8,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9,
++	0x9b,0xbb,0x00,0x83,0xe2,0x9c,0xf6,0xe1,0x75,0xf3,0xe0,0xf2,0xf1,0xcf,0x86,0xd5,
++	0x31,0xc4,0xe3,0x6d,0xcc,0xe2,0x46,0xca,0xe1,0x27,0xc9,0xe0,0xb7,0xbf,0xcf,0x86,
++	0xe5,0xaa,0xbb,0xe4,0xa3,0xba,0xe3,0x94,0xb9,0xe2,0xeb,0xb8,0xe1,0xc6,0xb8,0xe0,
++	0x9f,0xb8,0xcf,0x86,0xe5,0x6c,0xb8,0x94,0x07,0x63,0x57,0xb8,0x07,0x00,0x07,0x00,
++	0xe4,0x5e,0xf1,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0x6d,0xde,
++	0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x5c,0xdf,0xcf,0x86,0xe5,0x21,0xdf,
++	0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x5c,0xdf,0xcf,0x06,0x13,0x00,0xcf,
++	0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xf7,0xf0,0xe3,0xe0,0xef,0xd2,0xa0,0xe1,
++	0x96,0xe3,0xd0,0x21,0xcf,0x86,0xe5,0x97,0xe0,0xe4,0x13,0xe0,0xe3,0xd1,0xdf,0xe2,
++	0xb0,0xdf,0xe1,0x9e,0xdf,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4,
++	0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xf3,0xe1,0xe3,0xb2,0xe1,0xe2,0x91,0xe1,
++	0xe1,0x80,0xe1,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6,
++	0x00,0xd4,0x34,0xd3,0x18,0xe2,0x7a,0xe2,0xe1,0x69,0xe2,0x10,0x09,0x05,0xff,0xf0,
++	0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x9a,0xe2,0x91,0x11,
++	0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05,
++	0xff,0xe5,0xac,0xbe,0x00,0xe3,0xe0,0xe2,0xd2,0x14,0xe1,0xaf,0xe2,0x10,0x08,0x05,
++	0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0xbb,0xe2,0x10,
++	0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0,
++	0x6a,0xcf,0x86,0xe5,0x10,0xe8,0xd4,0x19,0xe3,0x49,0xe7,0xe2,0x27,0xe7,0xe1,0x16,
++	0xe7,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3,
++	0x18,0xe2,0x93,0xe7,0xe1,0x82,0xe7,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00,
++	0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xab,0xe7,0x10,0x08,0x05,0xff,
++	0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff,
++	0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7,
++	0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0xad,0xe9,0xd4,0x1a,
++	0xe3,0xe5,0xe8,0xe2,0xcb,0xe8,0xe1,0xb8,0xe8,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4,
++	0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x2d,0xe9,0xe1,0x1b,0xe9,
++	0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13,
++	0xe1,0x49,0xe9,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80,
++	0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5,
++	0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa,
++	0xae,0x00,0xe0,0x5f,0xec,0xcf,0x86,0xd5,0x1d,0xe4,0xd4,0xea,0xe3,0x90,0xea,0xe2,
++	0x6e,0xea,0xe1,0x5d,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff,
++	0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x7b,0xeb,0xe2,0x57,0xeb,0xe1,0x46,0xeb,0x10,
++	0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2,
++	0xc6,0xeb,0xe1,0xb5,0xeb,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff,
++	0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xde,0xeb,0x10,0x08,0x05,0xff,0xe8,0x9a,
++	0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c,
++	0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00,
++	0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 	/* nfdicf_30200 */
+-	0xd7,0x07,0x66,0x84,0x05,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x96,0x13,0xe3,0x60,0x0e,
+-	0xe2,0x49,0x07,0xc1,0xe0,0x4b,0x06,0xcf,0x86,0x65,0x2d,0x06,0x01,0x00,0xd4,0x2a,
+-	0xe3,0xce,0x35,0xe2,0x02,0x9c,0xe1,0xca,0x2e,0xe0,0x28,0x1b,0xcf,0x86,0xc5,0xe4,
+-	0xf9,0x65,0xe3,0x44,0x61,0xe2,0xda,0x5e,0xe1,0x0d,0x5e,0xe0,0xd2,0x5d,0xcf,0x86,
+-	0xe5,0x97,0x5d,0x64,0x7a,0x5d,0x0b,0x00,0x83,0xe2,0xf6,0xf2,0xe1,0xe0,0xef,0xe0,
+-	0x5d,0xee,0xcf,0x86,0xd5,0x31,0xc4,0xe3,0xdf,0x47,0xe2,0x80,0x46,0xe1,0x97,0xc5,
+-	0xe0,0x27,0x45,0xcf,0x86,0xe5,0x19,0x43,0xe4,0x3a,0x40,0xe3,0x04,0xb6,0xe2,0x5b,
+-	0xb5,0xe1,0x36,0xb5,0xe0,0x0f,0xb5,0xcf,0x86,0xe5,0xdc,0xb4,0x94,0x07,0x63,0xc7,
+-	0xb4,0x07,0x00,0x07,0x00,0xe4,0xc9,0xed,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,
+-	0xd2,0x0b,0xe1,0xd8,0xda,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xc7,0xdb,
+-	0xcf,0x86,0xe5,0x8c,0xdb,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xc7,0xdb,
+-	0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x62,0xed,0xe3,
+-	0x4b,0xec,0xd2,0xa0,0xe1,0x01,0xe0,0xd0,0x21,0xcf,0x86,0xe5,0x02,0xdd,0xe4,0x7e,
+-	0xdc,0xe3,0x3c,0xdc,0xe2,0x1b,0xdc,0xe1,0x09,0xdc,0x10,0x08,0x05,0xff,0xe4,0xb8,
+-	0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x5e,0xde,0xe3,
+-	0x1d,0xde,0xe2,0xfc,0xdd,0xe1,0xeb,0xdd,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,
+-	0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xe5,0xde,0xe1,0xd4,0xde,
++	0xd7,0x07,0x66,0x84,0x05,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x99,0x13,0xe3,0x63,0x0e,
++	0xe2,0x4c,0x07,0xc1,0xe0,0x4e,0x06,0xcf,0x86,0x65,0x2d,0x06,0x01,0x00,0xd4,0x2a,
++	0xe3,0xd0,0x35,0xe2,0x88,0x9c,0xe1,0xcd,0x2e,0xe0,0x2b,0x1b,0xcf,0x86,0xc5,0xe4,
++	0x14,0x66,0xe3,0x5f,0x61,0xe2,0xf5,0x5e,0xe1,0x28,0x5e,0xe0,0xed,0x5d,0xcf,0x86,
++	0xe5,0xb2,0x5d,0x64,0x95,0x5d,0x0b,0x00,0x83,0xe2,0xa7,0xf3,0xe1,0x80,0xf0,0xe0,
++	0xfd,0xee,0xcf,0x86,0xd5,0x31,0xc4,0xe3,0xe2,0x47,0xe2,0x83,0x46,0xe1,0x32,0xc6,
++	0xe0,0x2a,0x45,0xcf,0x86,0xe5,0x1c,0x43,0xe4,0x3d,0x40,0xe3,0x9f,0xb6,0xe2,0xf6,
++	0xb5,0xe1,0xd1,0xb5,0xe0,0xaa,0xb5,0xcf,0x86,0xe5,0x77,0xb5,0x94,0x07,0x63,0x62,
++	0xb5,0x07,0x00,0x07,0x00,0xe4,0x69,0xee,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,
++	0xd2,0x0b,0xe1,0x78,0xdb,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x67,0xdc,
++	0xcf,0x86,0xe5,0x2c,0xdc,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x67,0xdc,
++	0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x02,0xee,0xe3,
++	0xeb,0xec,0xd2,0xa0,0xe1,0xa1,0xe0,0xd0,0x21,0xcf,0x86,0xe5,0xa2,0xdd,0xe4,0x1e,
++	0xdd,0xe3,0xdc,0xdc,0xe2,0xbb,0xdc,0xe1,0xa9,0xdc,0x10,0x08,0x05,0xff,0xe4,0xb8,
++	0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xfe,0xde,0xe3,
++	0xbd,0xde,0xe2,0x9c,0xde,0xe1,0x8b,0xde,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,
++	0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0x85,0xdf,0xe1,0x74,0xdf,
+ 	0x10,0x09,0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,
+-	0xe2,0x05,0xdf,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,
+-	0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x4b,0xdf,0xd2,0x14,0xe1,
+-	0x1a,0xdf,0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,
+-	0x00,0xe1,0x26,0xdf,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,
+-	0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x7b,0xe4,0xd4,0x19,0xe3,0xb4,0xe3,
+-	0xe2,0x92,0xe3,0xe1,0x81,0xe3,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,
+-	0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0xfe,0xe3,0xe1,0xed,0xe3,0x10,0x09,0x05,0xff,
+-	0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x16,
++	0xe2,0xa5,0xdf,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,
++	0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0xeb,0xdf,0xd2,0x14,0xe1,
++	0xba,0xdf,0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,
++	0x00,0xe1,0xc6,0xdf,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,
++	0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x1b,0xe5,0xd4,0x19,0xe3,0x54,0xe4,
++	0xe2,0x32,0xe4,0xe1,0x21,0xe4,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,
++	0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0x9e,0xe4,0xe1,0x8d,0xe4,0x10,0x09,0x05,0xff,
++	0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xb6,
+ 	0xe4,0x10,0x08,0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,
+ 	0x11,0x10,0x08,0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,
+ 	0x10,0x08,0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,
+-	0xe5,0x18,0xe6,0xd4,0x1a,0xe3,0x50,0xe5,0xe2,0x36,0xe5,0xe1,0x23,0xe5,0x10,0x08,
++	0xe5,0xb8,0xe6,0xd4,0x1a,0xe3,0xf0,0xe5,0xe2,0xd6,0xe5,0xe1,0xc3,0xe5,0x10,0x08,
+ 	0x05,0xff,0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,
+-	0x98,0xe5,0xe1,0x86,0xe5,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,
+-	0x83,0xa3,0x00,0xd2,0x13,0xe1,0xb4,0xe5,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,
++	0x38,0xe6,0xe1,0x26,0xe6,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,
++	0x83,0xa3,0x00,0xd2,0x13,0xe1,0x54,0xe6,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,
+ 	0x05,0xff,0xe7,0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,
+ 	0x00,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,
+-	0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0,0xca,0xe8,0xcf,0x86,0xd5,0x1d,0xe4,0x3f,
+-	0xe7,0xe3,0xfb,0xe6,0xe2,0xd9,0xe6,0xe1,0xc8,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa3,
+-	0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xe6,0xe7,0xe2,0xc2,
+-	0xe7,0xe1,0xb1,0xe7,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,
+-	0x8a,0x00,0xd3,0x18,0xe2,0x31,0xe8,0xe1,0x20,0xe8,0x10,0x09,0x05,0xff,0xf0,0xa6,
+-	0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x49,0xe8,0x10,
++	0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0,0x6a,0xe9,0xcf,0x86,0xd5,0x1d,0xe4,0xdf,
++	0xe7,0xe3,0x9b,0xe7,0xe2,0x79,0xe7,0xe1,0x68,0xe7,0x10,0x09,0x05,0xff,0xf0,0xa3,
++	0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x86,0xe8,0xe2,0x62,
++	0xe8,0xe1,0x51,0xe8,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,
++	0x8a,0x00,0xd3,0x18,0xe2,0xd1,0xe8,0xe1,0xc0,0xe8,0x10,0x09,0x05,0xff,0xf0,0xa6,
++	0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xe9,0xe8,0x10,
+ 	0x08,0x05,0xff,0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,
+ 	0x08,0x05,0xff,0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,
+ 	0xff,0xe8,0x9e,0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,
+ 	/* nfdi_30200 */
+-	0x57,0x04,0x01,0x00,0xc6,0xd5,0x13,0xe4,0x68,0x53,0xe3,0xa2,0x4e,0xe2,0x1b,0x49,
+-	0xc1,0xe0,0x47,0x47,0xcf,0x06,0x01,0x00,0xd4,0x2a,0xe3,0x99,0x99,0xe2,0x48,0x99,
+-	0xe1,0x50,0x87,0xe0,0xe0,0x6a,0xcf,0x86,0xc5,0xe4,0x3f,0x63,0xe3,0x8a,0x5e,0xe2,
+-	0x20,0x5c,0xe1,0x53,0x5b,0xe0,0x18,0x5b,0xcf,0x86,0xe5,0xdd,0x5a,0x64,0xc0,0x5a,
+-	0x0b,0x00,0x83,0xe2,0x3c,0xf0,0xe1,0x26,0xed,0xe0,0xa3,0xeb,0xcf,0x86,0xd5,0x31,
+-	0xc4,0xe3,0x23,0xc6,0xe2,0xfc,0xc3,0xe1,0xdd,0xc2,0xe0,0x6d,0xb9,0xcf,0x86,0xe5,
+-	0x60,0xb5,0xe4,0x59,0xb4,0xe3,0x4a,0xb3,0xe2,0xa1,0xb2,0xe1,0x7c,0xb2,0xe0,0x55,
+-	0xb2,0xcf,0x86,0xe5,0x22,0xb2,0x94,0x07,0x63,0x0d,0xb2,0x07,0x00,0x07,0x00,0xe4,
+-	0x0f,0xeb,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0x1e,0xd8,0xcf,
+-	0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x0d,0xd9,0xcf,0x86,0xe5,0xd2,0xd8,0xcf,
+-	0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x0d,0xd9,0xcf,0x06,0x13,0x00,0xcf,0x86,
+-	0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xa8,0xea,0xe3,0x91,0xe9,0xd2,0xa0,0xe1,0x47,
+-	0xdd,0xd0,0x21,0xcf,0x86,0xe5,0x48,0xda,0xe4,0xc4,0xd9,0xe3,0x82,0xd9,0xe2,0x61,
+-	0xd9,0xe1,0x4f,0xd9,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4,0xb8,
+-	0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xa4,0xdb,0xe3,0x63,0xdb,0xe2,0x42,0xdb,0xe1,
+-	0x31,0xdb,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6,0x00,
+-	0xd4,0x34,0xd3,0x18,0xe2,0x2b,0xdc,0xe1,0x1a,0xdc,0x10,0x09,0x05,0xff,0xf0,0xa1,
+-	0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x4b,0xdc,0x91,0x11,0x10,
+-	0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05,0xff,
+-	0xe5,0xac,0xbe,0x00,0xe3,0x91,0xdc,0xd2,0x14,0xe1,0x60,0xdc,0x10,0x08,0x05,0xff,
+-	0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x6c,0xdc,0x10,0x08,
+-	0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0,0x6a,
+-	0xcf,0x86,0xe5,0xc1,0xe1,0xd4,0x19,0xe3,0xfa,0xe0,0xe2,0xd8,0xe0,0xe1,0xc7,0xe0,
+-	0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3,0x18,
+-	0xe2,0x44,0xe1,0xe1,0x33,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00,0x05,
+-	0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x5c,0xe1,0x10,0x08,0x05,0xff,0xe7,
+-	0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff,0xe7,
+-	0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7,0x86,
+-	0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x5e,0xe3,0xd4,0x1a,0xe3,
+-	0x96,0xe2,0xe2,0x7c,0xe2,0xe1,0x69,0xe2,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4,0x00,
+-	0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0xde,0xe2,0xe1,0xcc,0xe2,0x10,
+-	0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13,0xe1,
+-	0xfa,0xe2,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80,0x00,
+-	0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5,0xaa,
+-	0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa,0xae,
+-	0x00,0xe0,0x10,0xe6,0xcf,0x86,0xd5,0x1d,0xe4,0x85,0xe4,0xe3,0x41,0xe4,0xe2,0x1f,
+-	0xe4,0xe1,0x0e,0xe4,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff,0xe4,
+-	0x8f,0x95,0x00,0xd4,0x19,0xe3,0x2c,0xe5,0xe2,0x08,0xe5,0xe1,0xf7,0xe4,0x10,0x08,
+-	0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2,0x77,
+-	0xe5,0xe1,0x66,0xe5,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff,0xf0,
+-	0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x8f,0xe5,0x10,0x08,0x05,0xff,0xe8,0x9a,0x88,
+-	0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c,0xa8,
+-	0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00,0x05,
+-	0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0x57,0x04,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x82,0x53,0xe3,0xbb,0x4e,0xe2,0x34,0x49,
++	0xc1,0xe0,0x60,0x47,0xcf,0x86,0x65,0x44,0x47,0x01,0x00,0xd4,0x2a,0xe3,0x1c,0x9a,
++	0xe2,0xcb,0x99,0xe1,0x9e,0x87,0xe0,0xf8,0x6a,0xcf,0x86,0xc5,0xe4,0x57,0x63,0xe3,
++	0xa2,0x5e,0xe2,0x38,0x5c,0xe1,0x6b,0x5b,0xe0,0x30,0x5b,0xcf,0x86,0xe5,0xf5,0x5a,
++	0x64,0xd8,0x5a,0x0b,0x00,0x83,0xe2,0xea,0xf0,0xe1,0xc3,0xed,0xe0,0x40,0xec,0xcf,
++	0x86,0xd5,0x31,0xc4,0xe3,0xbb,0xc6,0xe2,0x94,0xc4,0xe1,0x75,0xc3,0xe0,0x05,0xba,
++	0xcf,0x86,0xe5,0xf8,0xb5,0xe4,0xf1,0xb4,0xe3,0xe2,0xb3,0xe2,0x39,0xb3,0xe1,0x14,
++	0xb3,0xe0,0xed,0xb2,0xcf,0x86,0xe5,0xba,0xb2,0x94,0x07,0x63,0xa5,0xb2,0x07,0x00,
++	0x07,0x00,0xe4,0xac,0xeb,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,
++	0xbb,0xd8,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xaa,0xd9,0xcf,0x86,0xe5,
++	0x6f,0xd9,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xaa,0xd9,0xcf,0x06,0x13,
++	0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x45,0xeb,0xe3,0x2e,0xea,0xd2,
++	0xa0,0xe1,0xe4,0xdd,0xd0,0x21,0xcf,0x86,0xe5,0xe5,0xda,0xe4,0x61,0xda,0xe3,0x1f,
++	0xda,0xe2,0xfe,0xd9,0xe1,0xec,0xd9,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,
++	0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x41,0xdc,0xe3,0x00,0xdc,0xe2,
++	0xdf,0xdb,0xe1,0xce,0xdb,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,
++	0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xc8,0xdc,0xe1,0xb7,0xdc,0x10,0x09,0x05,
++	0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0xe8,0xdc,
++	0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,
++	0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x2e,0xdd,0xd2,0x14,0xe1,0xfd,0xdc,0x10,
++	0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x09,
++	0xdd,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,
++	0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x5e,0xe2,0xd4,0x19,0xe3,0x97,0xe1,0xe2,0x75,0xe1,
++	0xe1,0x64,0xe1,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,
++	0x00,0xd3,0x18,0xe2,0xe1,0xe1,0xe1,0xd0,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,
++	0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xf9,0xe1,0x10,0x08,
++	0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,
++	0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,
++	0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0xfb,0xe3,
++	0xd4,0x1a,0xe3,0x33,0xe3,0xe2,0x19,0xe3,0xe1,0x06,0xe3,0x10,0x08,0x05,0xff,0xe7,
++	0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x7b,0xe3,0xe1,
++	0x69,0xe3,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,
++	0xd2,0x13,0xe1,0x97,0xe3,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,
++	0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,
++	0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,
++	0xe7,0xaa,0xae,0x00,0xe0,0xad,0xe6,0xcf,0x86,0xd5,0x1d,0xe4,0x22,0xe5,0xe3,0xde,
++	0xe4,0xe2,0xbc,0xe4,0xe1,0xab,0xe4,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,
++	0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xc9,0xe5,0xe2,0xa5,0xe5,0xe1,0x94,
++	0xe5,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,
++	0x18,0xe2,0x14,0xe6,0xe1,0x03,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,
++	0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x2c,0xe6,0x10,0x08,0x05,0xff,
++	0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,
++	0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,
++	0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 	/* nfdicf_c0100 */
+ 	0xd7,0xb0,0x56,0x04,0x01,0x00,0x95,0xa8,0xd4,0x5e,0xd3,0x2e,0xd2,0x16,0xd1,0x0a,
+ 	0x10,0x04,0x01,0x00,0x01,0xff,0x61,0x00,0x10,0x06,0x01,0xff,0x62,0x00,0x01,0xff,
+@@ -299,3174 +299,3184 @@ static const unsigned char utf8data[64080] = {
+ 	0xd1,0x0c,0x10,0x06,0x01,0xff,0x74,0x00,0x01,0xff,0x75,0x00,0x10,0x06,0x01,0xff,
+ 	0x76,0x00,0x01,0xff,0x77,0x00,0x92,0x16,0xd1,0x0c,0x10,0x06,0x01,0xff,0x78,0x00,
+ 	0x01,0xff,0x79,0x00,0x10,0x06,0x01,0xff,0x7a,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+-	0xc6,0xe5,0xf6,0x14,0xe4,0x6c,0x0d,0xe3,0x36,0x08,0xe2,0x1f,0x01,0xc1,0xd0,0x21,
+-	0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x13,0x52,0x04,0x01,0x00,
+-	0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xce,0xbc,0x00,0x01,0x00,0x01,0x00,0xcf,
+-	0x86,0xe5,0x9d,0x44,0xd4,0x7f,0xd3,0x3f,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,
+-	0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,
+-	0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x07,0x01,0xff,0xc3,0xa6,0x00,0x01,
+-	0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x80,
+-	0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x82,0x00,0x01,
+-	0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x80,0x00,0x01,
+-	0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x82,0x00,0x01,0xff,0x69,
+-	0xcc,0x88,0x00,0xd3,0x3b,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb0,0x00,
+-	0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80,0x00,0x01,0xff,
+-	0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82,0x00,0x01,0xff,
+-	0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1f,
+-	0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb8,0x00,0x01,0xff,0x75,0xcc,0x80,0x00,0x10,
+-	0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x07,0x01,
+-	0xff,0xc3,0xbe,0x00,0x01,0xff,0x73,0x73,0x00,0xe1,0xd4,0x03,0xe0,0xeb,0x01,0xcf,
+-	0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,
+-	0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x86,
+-	0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa8,
+-	0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,0x81,0x00,0x01,
+-	0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc,0x82,
+-	0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,0x87,0x00,0x01,
+-	0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc,0x8c,0x00,0x01,
+-	0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x8c,0x00,0x01,0xff,0x64,
+-	0xcc,0x8c,0x00,0xd3,0x3b,0xd2,0x1b,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc4,0x91,0x00,
+-	0x01,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x84,0x00,0x01,0xff,0x65,0xcc,0x84,0x00,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0x86,0x00,
+-	0x10,0x08,0x01,0xff,0x65,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87,0x00,0xd2,0x20,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8,0x00,
+-	0x10,0x08,0x01,0xff,0x65,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x67,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82,0x00,0x10,0x08,
+-	0x01,0xff,0x67,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4,0x7b,0xd3,0x3b,
+-	0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x87,0x00,0x01,0xff,0x67,0xcc,
+-	0x87,0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7,0x00,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0xff,0x68,0xcc,0x82,0x00,
+-	0x10,0x07,0x01,0xff,0xc4,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0x69,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x69,
+-	0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,
+-	0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0xa8,
+-	0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x37,0xd2,0x17,0xd1,0x0c,0x10,0x08,0x01,
+-	0xff,0x69,0xcc,0x87,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc4,0xb3,0x00,0x01,0x00,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0x6a,0xcc,0x82,0x00,0x01,0xff,0x6a,0xcc,0x82,0x00,
+-	0x10,0x08,0x01,0xff,0x6b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,0x00,0xd2,0x1c,
+-	0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6c,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,
+-	0x6c,0xcc,0x81,0x00,0x01,0xff,0x6c,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x6c,0xcc,0xa7,0x00,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,
+-	0x8c,0x00,0x01,0xff,0xc5,0x80,0x00,0xcf,0x86,0xd5,0xed,0xd4,0x72,0xd3,0x37,0xd2,
+-	0x17,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc5,0x82,0x00,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0x6e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00,
+-	0x01,0xff,0x6e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff,
+-	0x6e,0xcc,0x8c,0x00,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00,
+-	0x01,0xff,0xca,0xbc,0x6e,0x00,0x10,0x07,0x01,0xff,0xc5,0x8b,0x00,0x01,0x00,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0x84,0x00,0x10,
+-	0x08,0x01,0xff,0x6f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00,0xd3,0x3b,0xd2,
+-	0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0xff,0x6f,0xcc,0x8b,
+-	0x00,0x10,0x07,0x01,0xff,0xc5,0x93,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x72,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,
+-	0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x72,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x73,0xcc,
+-	0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x73,0xcc,
+-	0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x73,0xcc,0xa7,0x00,
+-	0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x7b,0xd3,0x3b,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x73,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,
+-	0x74,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x74,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x10,0x07,0x01,0xff,0xc5,0xa7,
+-	0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x83,0x00,0x01,
+-	0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x84,0x00,0x01,0xff,0x75,
+-	0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x86,0x00,0x01,0xff,0x75,
+-	0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a,
+-	0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x8b,0x00,0x01,
+-	0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa8,0x00,0x01,0xff,0x75,
+-	0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x82,0x00,0x01,0xff,0x77,
+-	0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x82,0x00,0x01,0xff,0x79,0xcc,0x82,
+-	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,0x88,0x00,0x01,0xff,0x7a,
+-	0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff,0x7a,0xcc,0x87,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff,0x7a,0xcc,0x8c,
+-	0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0xff,0x73,0x00,0xe0,0x65,0x01,
+-	0xcf,0x86,0xd5,0xb4,0xd4,0x5a,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0xc9,0x93,0x00,0x10,0x07,0x01,0xff,0xc6,0x83,0x00,0x01,0x00,0xd1,0x0b,
+-	0x10,0x07,0x01,0xff,0xc6,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0x94,0x00,
+-	0x01,0xff,0xc6,0x88,0x00,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc9,
+-	0x96,0x00,0x10,0x07,0x01,0xff,0xc9,0x97,0x00,0x01,0xff,0xc6,0x8c,0x00,0x51,0x04,
+-	0x01,0x00,0x10,0x07,0x01,0xff,0xc7,0x9d,0x00,0x01,0xff,0xc9,0x99,0x00,0xd3,0x32,
+-	0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0x9b,0x00,0x01,0xff,0xc6,0x92,0x00,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xa0,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc9,
+-	0xa3,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0xa9,0x00,0x01,0xff,0xc9,0xa8,0x00,
+-	0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0x99,0x00,0x01,0x00,0x01,0x00,0xd1,
+-	0x0e,0x10,0x07,0x01,0xff,0xc9,0xaf,0x00,0x01,0xff,0xc9,0xb2,0x00,0x10,0x04,0x01,
+-	0x00,0x01,0xff,0xc9,0xb5,0x00,0xd4,0x5d,0xd3,0x34,0xd2,0x1b,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x10,0x07,0x01,0xff,
+-	0xc6,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0xa5,0x00,0x01,0x00,
+-	0x10,0x07,0x01,0xff,0xca,0x80,0x00,0x01,0xff,0xc6,0xa8,0x00,0xd2,0x0f,0x91,0x0b,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x83,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,
+-	0xff,0xc6,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x88,0x00,0x01,0xff,0x75,
+-	0xcc,0x9b,0x00,0xd3,0x33,0xd2,0x1d,0xd1,0x0f,0x10,0x08,0x01,0xff,0x75,0xcc,0x9b,
+-	0x00,0x01,0xff,0xca,0x8a,0x00,0x10,0x07,0x01,0xff,0xca,0x8b,0x00,0x01,0xff,0xc6,
+-	0xb4,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc6,0xb6,0x00,0x10,0x04,0x01,
+-	0x00,0x01,0xff,0xca,0x92,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xb9,
+-	0x00,0x01,0x00,0x01,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xbd,0x00,0x01,0x00,
+-	0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x44,0xd3,0x16,0x52,0x04,0x01,0x00,0x51,0x07,
+-	0x01,0xff,0xc7,0x86,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc7,0x89,0x00,0xd2,0x12,
+-	0x91,0x0b,0x10,0x07,0x01,0xff,0xc7,0x89,0x00,0x01,0x00,0x01,0xff,0xc7,0x8c,0x00,
+-	0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x61,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,
+-	0x61,0xcc,0x8c,0x00,0x01,0xff,0x69,0xcc,0x8c,0x00,0xd3,0x46,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x10,0x08,
+-	0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x8c,0x00,0xd1,0x12,0x10,0x08,
+-	0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x10,0x0a,
+-	0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00,
+-	0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,
+-	0x75,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00,
+-	0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0x75,0xcc,
+-	0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,
+-	0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x87,0xd3,0x41,0xd2,0x26,0xd1,0x14,
+-	0x10,0x0a,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x87,0xcc,
+-	0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x01,0xff,0xc3,0xa6,0xcc,
+-	0x84,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc7,0xa5,0x00,0x01,0x00,0x10,0x08,0x01,
+-	0xff,0x67,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x10,0x08,0x01,
+-	0xff,0x6f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,0x10,0x0a,0x01,
+-	0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x10,
+-	0x09,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0xd3,
+-	0x38,0xd2,0x1a,0xd1,0x0f,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,0x01,0xff,0xc7,
+-	0xb3,0x00,0x10,0x07,0x01,0xff,0xc7,0xb3,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0x67,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x10,0x07,0x04,0xff,0xc6,
+-	0x95,0x00,0x04,0xff,0xc6,0xbf,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x6e,
+-	0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x8a,
+-	0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,
+-	0xff,0xc3,0xa6,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01,
+-	0xff,0xc3,0xb8,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x31,0x02,
+-	0xe1,0xad,0x44,0xe0,0xc8,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00,
+-	0x10,0x08,0x01,0xff,0x61,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x65,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08,
+-	0x01,0xff,0x65,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x69,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08,
+-	0x01,0xff,0x69,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x6f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,
+-	0x6f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x72,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08,
+-	0x01,0xff,0x72,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x75,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,
+-	0x75,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x04,0xff,0x73,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff,
+-	0x74,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff,
+-	0xc8,0x9d,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x68,0xcc,0x8c,0x00,0x04,0xff,0x68,
+-	0xcc,0x8c,0x00,0xd4,0x79,0xd3,0x31,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xc6,
+-	0x9e,0x00,0x07,0x00,0x10,0x07,0x04,0xff,0xc8,0xa3,0x00,0x04,0x00,0xd1,0x0b,0x10,
+-	0x07,0x04,0xff,0xc8,0xa5,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x61,0xcc,0x87,0x00,
+-	0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x65,0xcc,
+-	0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x88,0xcc,
+-	0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,0x00,0xd1,0x14,0x10,0x0a,0x04,0xff,
+-	0x6f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x10,0x08,
+-	0x04,0xff,0x6f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0xd3,0x27,0xe2,0x0b,
+-	0x43,0xd1,0x14,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x04,0xff,0x6f,
+-	0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x79,0xcc,0x84,0x00,0x04,0xff,0x79,
+-	0xcc,0x84,0x00,0xd2,0x13,0x51,0x04,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa5,
+-	0x00,0x08,0xff,0xc8,0xbc,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff,0xc6,0x9a,
+-	0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa6,0x00,0x08,0x00,0xcf,0x86,0x95,0x5f,0x94,
+-	0x5b,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff,0xc9,0x82,0x00,
+-	0x10,0x04,0x09,0x00,0x09,0xff,0xc6,0x80,0x00,0xd1,0x0e,0x10,0x07,0x09,0xff,0xca,
+-	0x89,0x00,0x09,0xff,0xca,0x8c,0x00,0x10,0x07,0x09,0xff,0xc9,0x87,0x00,0x09,0x00,
+-	0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x89,0x00,0x09,0x00,0x10,0x07,0x09,
+-	0xff,0xc9,0x8b,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x8d,0x00,0x09,
+-	0x00,0x10,0x07,0x09,0xff,0xc9,0x8f,0x00,0x09,0x00,0x01,0x00,0x01,0x00,0xd1,0x8b,
+-	0xd0,0x0c,0xcf,0x86,0xe5,0xfa,0x42,0x64,0xd9,0x42,0x01,0xe6,0xcf,0x86,0xd5,0x2a,
+-	0xe4,0x82,0x43,0xe3,0x69,0x43,0xd2,0x11,0xe1,0x48,0x43,0x10,0x07,0x01,0xff,0xcc,
+-	0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0xe1,0x4f,0x43,0x10,0x09,0x01,0xff,0xcc,0x88,
+-	0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0x00,0xd4,0x0f,0x93,0x0b,0x92,0x07,0x61,0x94,
+-	0x43,0x01,0xea,0x06,0xe6,0x06,0xe6,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x0a,
+-	0xff,0xcd,0xb1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb3,0x00,0x0a,0x00,0xd1,
+-	0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb7,
+-	0x00,0x0a,0x00,0xd2,0x07,0x61,0x80,0x43,0x00,0x00,0x51,0x04,0x09,0x00,0x10,0x06,
+-	0x01,0xff,0x3b,0x00,0x10,0xff,0xcf,0xb3,0x00,0xe0,0x31,0x01,0xcf,0x86,0xd5,0xd3,
+-	0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,
+-	0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x81,0x00,0x01,0xff,
+-	0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,
+-	0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,
+-	0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x00,0x00,0x10,
+-	0x09,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0xd3,
+-	0x3c,0xd2,0x20,0xd1,0x12,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,
+-	0x01,0xff,0xce,0xb1,0x00,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb3,
+-	0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb4,0x00,0x01,0xff,0xce,0xb5,0x00,0x10,
+-	0x07,0x01,0xff,0xce,0xb6,0x00,0x01,0xff,0xce,0xb7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,
+-	0x07,0x01,0xff,0xce,0xb8,0x00,0x01,0xff,0xce,0xb9,0x00,0x10,0x07,0x01,0xff,0xce,
+-	0xba,0x00,0x01,0xff,0xce,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xbc,0x00,
+-	0x01,0xff,0xce,0xbd,0x00,0x10,0x07,0x01,0xff,0xce,0xbe,0x00,0x01,0xff,0xce,0xbf,
+-	0x00,0xe4,0x6e,0x43,0xd3,0x35,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcf,0x80,
+-	0x00,0x01,0xff,0xcf,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x83,0x00,0xd1,
+-	0x0e,0x10,0x07,0x01,0xff,0xcf,0x84,0x00,0x01,0xff,0xcf,0x85,0x00,0x10,0x07,0x01,
+-	0xff,0xcf,0x86,0x00,0x01,0xff,0xcf,0x87,0x00,0xe2,0x14,0x43,0xd1,0x0e,0x10,0x07,
+-	0x01,0xff,0xcf,0x88,0x00,0x01,0xff,0xcf,0x89,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,
+-	0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xcf,0x86,0xd5,0x94,0xd4,0x3c,
+-	0xd3,0x13,0x92,0x0f,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0x83,0x00,0x01,
+-	0x00,0x01,0x00,0xd2,0x07,0x61,0x23,0x43,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,
+-	0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,
+-	0xcf,0x89,0xcc,0x81,0x00,0x0a,0xff,0xcf,0x97,0x00,0xd3,0x2c,0xd2,0x11,0xe1,0x2f,
+-	0x43,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb8,0x00,0xd1,0x10,0x10,
+-	0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0xff,0xcf,0x86,0x00,0x10,0x07,0x01,
+-	0xff,0xcf,0x80,0x00,0x04,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xcf,0x99,
+-	0x00,0x06,0x00,0x10,0x07,0x01,0xff,0xcf,0x9b,0x00,0x04,0x00,0xd1,0x0b,0x10,0x07,
+-	0x01,0xff,0xcf,0x9d,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0x9f,0x00,0x04,0x00,
+-	0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa1,0x00,0x04,
+-	0x00,0x10,0x07,0x01,0xff,0xcf,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,
+-	0xcf,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xa7,0x00,0x01,0x00,0xd2,0x16,
+-	0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,
+-	0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xad,0x00,0x01,0x00,0x10,
+-	0x07,0x01,0xff,0xcf,0xaf,0x00,0x01,0x00,0xd3,0x2b,0xd2,0x12,0x91,0x0e,0x10,0x07,
+-	0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xcf,0x81,0x00,0x01,0x00,0xd1,0x0e,0x10,0x07,
+-	0x05,0xff,0xce,0xb8,0x00,0x05,0xff,0xce,0xb5,0x00,0x10,0x04,0x06,0x00,0x07,0xff,
+-	0xcf,0xb8,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x07,0x00,0x07,0xff,0xcf,0xb2,0x00,
+-	0x10,0x07,0x07,0xff,0xcf,0xbb,0x00,0x07,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,
+-	0xff,0xcd,0xbb,0x00,0x10,0x07,0x08,0xff,0xcd,0xbc,0x00,0x08,0xff,0xcd,0xbd,0x00,
+-	0xe3,0xd6,0x46,0xe2,0x3d,0x05,0xe1,0x27,0x02,0xe0,0x66,0x01,0xcf,0x86,0xd5,0xf0,
+-	0xd4,0x7e,0xd3,0x40,0xd2,0x22,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0xb5,0xcc,0x80,
+-	0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x07,0x01,0xff,0xd1,0x92,0x00,0x01,
+-	0xff,0xd0,0xb3,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x94,0x00,0x01,
+-	0xff,0xd1,0x95,0x00,0x10,0x07,0x01,0xff,0xd1,0x96,0x00,0x01,0xff,0xd1,0x96,0xcc,
+-	0x88,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x98,0x00,0x01,0xff,0xd1,
+-	0x99,0x00,0x10,0x07,0x01,0xff,0xd1,0x9a,0x00,0x01,0xff,0xd1,0x9b,0x00,0xd1,0x12,
+-	0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80,0x00,
+-	0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0xff,0xd1,0x9f,0x00,0xd3,0x38,
+-	0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xb0,0x00,0x01,0xff,0xd0,0xb1,0x00,
+-	0x10,0x07,0x01,0xff,0xd0,0xb2,0x00,0x01,0xff,0xd0,0xb3,0x00,0xd1,0x0e,0x10,0x07,
+-	0x01,0xff,0xd0,0xb4,0x00,0x01,0xff,0xd0,0xb5,0x00,0x10,0x07,0x01,0xff,0xd0,0xb6,
+-	0x00,0x01,0xff,0xd0,0xb7,0x00,0xd2,0x1e,0xd1,0x10,0x10,0x07,0x01,0xff,0xd0,0xb8,
+-	0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x10,0x07,0x01,0xff,0xd0,0xba,0x00,0x01,
+-	0xff,0xd0,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xbc,0x00,0x01,0xff,0xd0,
+-	0xbd,0x00,0x10,0x07,0x01,0xff,0xd0,0xbe,0x00,0x01,0xff,0xd0,0xbf,0x00,0xe4,0x0e,
+-	0x42,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x80,0x00,0x01,0xff,
+-	0xd1,0x81,0x00,0x10,0x07,0x01,0xff,0xd1,0x82,0x00,0x01,0xff,0xd1,0x83,0x00,0xd1,
+-	0x0e,0x10,0x07,0x01,0xff,0xd1,0x84,0x00,0x01,0xff,0xd1,0x85,0x00,0x10,0x07,0x01,
+-	0xff,0xd1,0x86,0x00,0x01,0xff,0xd1,0x87,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,
+-	0xff,0xd1,0x88,0x00,0x01,0xff,0xd1,0x89,0x00,0x10,0x07,0x01,0xff,0xd1,0x8a,0x00,
+-	0x01,0xff,0xd1,0x8b,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x8c,0x00,0x01,0xff,
+-	0xd1,0x8d,0x00,0x10,0x07,0x01,0xff,0xd1,0x8e,0x00,0x01,0xff,0xd1,0x8f,0x00,0xcf,
+-	0x86,0xd5,0x07,0x64,0xb8,0x41,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,
+-	0x10,0x07,0x01,0xff,0xd1,0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xa3,0x00,
+-	0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,
+-	0xff,0xd1,0xa7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa9,
+-	0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,
+-	0x01,0xff,0xd1,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xaf,0x00,0x01,0x00,
+-	0xd3,0x33,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb1,0x00,0x01,0x00,0x10,
+-	0x07,0x01,0xff,0xd1,0xb3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb5,
+-	0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01,0xff,0xd1,0xb5,
+-	0xcc,0x8f,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb9,0x00,0x01,0x00,
+-	0x10,0x07,0x01,0xff,0xd1,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,
+-	0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbf,0x00,0x01,0x00,0xe0,0x41,0x01,
+-	0xcf,0x86,0xd5,0x8e,0xd4,0x36,0xd3,0x11,0xe2,0x7a,0x41,0xe1,0x71,0x41,0x10,0x07,
+-	0x01,0xff,0xd2,0x81,0x00,0x01,0x00,0xd2,0x0f,0x51,0x04,0x04,0x00,0x10,0x07,0x06,
+-	0xff,0xd2,0x8b,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff,0xd2,0x8d,0x00,0x04,
+-	0x00,0x10,0x07,0x04,0xff,0xd2,0x8f,0x00,0x04,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,
+-	0x10,0x07,0x01,0xff,0xd2,0x91,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x93,0x00,
+-	0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x95,0x00,0x01,0x00,0x10,0x07,0x01,
+-	0xff,0xd2,0x97,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x99,
+-	0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9b,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,
+-	0x01,0xff,0xd2,0x9d,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9f,0x00,0x01,0x00,
+-	0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa1,0x00,0x01,
+-	0x00,0x10,0x07,0x01,0xff,0xd2,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,
+-	0xd2,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa7,0x00,0x01,0x00,0xd2,0x16,
+-	0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,
+-	0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xad,0x00,0x01,0x00,0x10,
+-	0x07,0x01,0xff,0xd2,0xaf,0x00,0x01,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,
+-	0x01,0xff,0xd2,0xb1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xb3,0x00,0x01,0x00,
+-	0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,
+-	0xb7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb9,0x00,0x01,
+-	0x00,0x10,0x07,0x01,0xff,0xd2,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,
+-	0xd2,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbf,0x00,0x01,0x00,0xcf,0x86,
+-	0xd5,0xdc,0xd4,0x5a,0xd3,0x36,0xd2,0x20,0xd1,0x10,0x10,0x07,0x01,0xff,0xd3,0x8f,
+-	0x00,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb6,0xcc,0x86,
+-	0x00,0x01,0xff,0xd3,0x84,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x86,
+-	0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x88,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04,
+-	0x01,0x00,0x06,0xff,0xd3,0x8a,0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x8c,0x00,
+-	0xe1,0x52,0x40,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8e,0x00,0xd3,0x41,0xd2,0x24,
+-	0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb0,0xcc,
+-	0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb0,0xcc,
+-	0x88,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x95,0x00,0x01,0x00,0x10,0x09,0x01,
+-	0xff,0xd0,0xb5,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0xd2,0x1d,0xd1,
+-	0x0b,0x10,0x07,0x01,0xff,0xd3,0x99,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x99,
+-	0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,
+-	0xd0,0xb6,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,
+-	0xd0,0xb7,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4,0x82,0xd3,0x41,
+-	0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa1,0x00,0x01,0x00,0x10,0x09,0x01,
+-	0xff,0xd0,0xb8,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0xd1,0x12,0x10,
+-	0x09,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x10,
+-	0x09,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0xd2,
+-	0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa9,0x00,0x01,0x00,0x10,0x09,0x01,0xff,
+-	0xd3,0xa9,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,
+-	0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09,
+-	0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x41,
+-	0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x01,0xff,0xd1,
+-	0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0x01,0xff,0xd1,
+-	0x83,0xcc,0x8b,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00,0x01,
+-	0xff,0xd1,0x87,0xcc,0x88,0x00,0x10,0x07,0x08,0xff,0xd3,0xb7,0x00,0x08,0x00,0xd2,
+-	0x1d,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x01,0xff,0xd1,0x8b,
+-	0xcc,0x88,0x00,0x10,0x07,0x09,0xff,0xd3,0xbb,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,
+-	0x09,0xff,0xd3,0xbd,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd3,0xbf,0x00,0x09,0x00,
+-	0xe1,0x26,0x02,0xe0,0x78,0x01,0xcf,0x86,0xd5,0xb0,0xd4,0x58,0xd3,0x2c,0xd2,0x16,
+-	0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x81,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,
+-	0x83,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x85,0x00,0x06,0x00,0x10,
+-	0x07,0x06,0xff,0xd4,0x87,0x00,0x06,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,
+-	0xd4,0x89,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8b,0x00,0x06,0x00,0xd1,0x0b,
+-	0x10,0x07,0x06,0xff,0xd4,0x8d,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8f,0x00,
+-	0x06,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xd4,0x91,0x00,0x09,
+-	0x00,0x10,0x07,0x09,0xff,0xd4,0x93,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff,
+-	0xd4,0x95,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x97,0x00,0x0a,0x00,0xd2,0x16,
+-	0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x99,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,
+-	0x9b,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x9d,0x00,0x0a,0x00,0x10,
+-	0x07,0x0a,0xff,0xd4,0x9f,0x00,0x0a,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,
+-	0x10,0x07,0x0a,0xff,0xd4,0xa1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0xa3,0x00,
+-	0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xd4,0xa5,0x00,0x0b,0x00,0x10,0x07,0x0c,
+-	0xff,0xd4,0xa7,0x00,0x0c,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x10,0xff,0xd4,0xa9,
+-	0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xab,0x00,0x10,0x00,0xd1,0x0b,0x10,0x07,
+-	0x10,0xff,0xd4,0xad,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xaf,0x00,0x10,0x00,
+-	0xd3,0x35,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xa1,0x00,0x10,
+-	0x07,0x01,0xff,0xd5,0xa2,0x00,0x01,0xff,0xd5,0xa3,0x00,0xd1,0x0e,0x10,0x07,0x01,
+-	0xff,0xd5,0xa4,0x00,0x01,0xff,0xd5,0xa5,0x00,0x10,0x07,0x01,0xff,0xd5,0xa6,0x00,
+-	0x01,0xff,0xd5,0xa7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xa8,0x00,
+-	0x01,0xff,0xd5,0xa9,0x00,0x10,0x07,0x01,0xff,0xd5,0xaa,0x00,0x01,0xff,0xd5,0xab,
+-	0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xac,0x00,0x01,0xff,0xd5,0xad,0x00,0x10,
+-	0x07,0x01,0xff,0xd5,0xae,0x00,0x01,0xff,0xd5,0xaf,0x00,0xcf,0x86,0xe5,0xf1,0x3e,
+-	0xd4,0x70,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb0,0x00,0x01,
+-	0xff,0xd5,0xb1,0x00,0x10,0x07,0x01,0xff,0xd5,0xb2,0x00,0x01,0xff,0xd5,0xb3,0x00,
+-	0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb4,0x00,0x01,0xff,0xd5,0xb5,0x00,0x10,0x07,
+-	0x01,0xff,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,
+-	0x01,0xff,0xd5,0xb8,0x00,0x01,0xff,0xd5,0xb9,0x00,0x10,0x07,0x01,0xff,0xd5,0xba,
+-	0x00,0x01,0xff,0xd5,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xbc,0x00,0x01,
+-	0xff,0xd5,0xbd,0x00,0x10,0x07,0x01,0xff,0xd5,0xbe,0x00,0x01,0xff,0xd5,0xbf,0x00,
+-	0xe3,0x70,0x3e,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x80,0x00,0x01,0xff,
+-	0xd6,0x81,0x00,0x10,0x07,0x01,0xff,0xd6,0x82,0x00,0x01,0xff,0xd6,0x83,0x00,0xd1,
+-	0x0e,0x10,0x07,0x01,0xff,0xd6,0x84,0x00,0x01,0xff,0xd6,0x85,0x00,0x10,0x07,0x01,
+-	0xff,0xd6,0x86,0x00,0x00,0x00,0xe0,0x18,0x3f,0xcf,0x86,0xe5,0xa9,0x3e,0xe4,0x80,
+-	0x3e,0xe3,0x5f,0x3e,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0xd5,0xa5,0xd6,0x82,0x00,0xe4,0x3e,0x25,0xe3,0xc4,0x1a,0xe2,0xf8,0x80,
+-	0xe1,0xc0,0x13,0xd0,0x1e,0xcf,0x86,0xc5,0xe4,0xf0,0x4a,0xe3,0x3b,0x46,0xe2,0xd1,
+-	0x43,0xe1,0x04,0x43,0xe0,0xc9,0x42,0xcf,0x86,0xe5,0x8e,0x42,0x64,0x71,0x42,0x0b,
+-	0x00,0xcf,0x86,0xe5,0xfa,0x01,0xe4,0xd5,0x55,0xe3,0x76,0x01,0xe2,0x76,0x53,0xd1,
+-	0x0c,0xe0,0xd7,0x52,0xcf,0x86,0x65,0x75,0x52,0x04,0x00,0xe0,0x0d,0x01,0xcf,0x86,
+-	0xd5,0x0a,0xe4,0xf8,0x52,0x63,0xe7,0x52,0x0a,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x80,0x00,0x01,0xff,0xe2,0xb4,0x81,0x00,
+-	0x10,0x08,0x01,0xff,0xe2,0xb4,0x82,0x00,0x01,0xff,0xe2,0xb4,0x83,0x00,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0xe2,0xb4,0x84,0x00,0x01,0xff,0xe2,0xb4,0x85,0x00,0x10,0x08,
+-	0x01,0xff,0xe2,0xb4,0x86,0x00,0x01,0xff,0xe2,0xb4,0x87,0x00,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0xe2,0xb4,0x88,0x00,0x01,0xff,0xe2,0xb4,0x89,0x00,0x10,0x08,
+-	0x01,0xff,0xe2,0xb4,0x8a,0x00,0x01,0xff,0xe2,0xb4,0x8b,0x00,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0xe2,0xb4,0x8c,0x00,0x01,0xff,0xe2,0xb4,0x8d,0x00,0x10,0x08,0x01,0xff,
+-	0xe2,0xb4,0x8e,0x00,0x01,0xff,0xe2,0xb4,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0xe2,0xb4,0x90,0x00,0x01,0xff,0xe2,0xb4,0x91,0x00,0x10,0x08,
+-	0x01,0xff,0xe2,0xb4,0x92,0x00,0x01,0xff,0xe2,0xb4,0x93,0x00,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0xe2,0xb4,0x94,0x00,0x01,0xff,0xe2,0xb4,0x95,0x00,0x10,0x08,0x01,0xff,
+-	0xe2,0xb4,0x96,0x00,0x01,0xff,0xe2,0xb4,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0xe2,0xb4,0x98,0x00,0x01,0xff,0xe2,0xb4,0x99,0x00,0x10,0x08,0x01,0xff,
+-	0xe2,0xb4,0x9a,0x00,0x01,0xff,0xe2,0xb4,0x9b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0xe2,0xb4,0x9c,0x00,0x01,0xff,0xe2,0xb4,0x9d,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,
+-	0x9e,0x00,0x01,0xff,0xe2,0xb4,0x9f,0x00,0xcf,0x86,0xe5,0x2a,0x52,0x94,0x50,0xd3,
+-	0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa0,0x00,0x01,0xff,0xe2,
+-	0xb4,0xa1,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa2,0x00,0x01,0xff,0xe2,0xb4,0xa3,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa4,0x00,0x01,0xff,0xe2,0xb4,0xa5,
+-	0x00,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xa7,0x00,0x52,0x04,0x00,0x00,0x91,
+-	0x0c,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xad,0x00,0x00,0x00,0x01,0x00,0xd2,
+-	0x1b,0xe1,0xce,0x52,0xe0,0x7f,0x52,0xcf,0x86,0x95,0x0f,0x94,0x0b,0x93,0x07,0x62,
+-	0x64,0x52,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd1,0x13,0xe0,0xa5,0x53,0xcf,
+-	0x86,0x95,0x0a,0xe4,0x7a,0x53,0x63,0x69,0x53,0x04,0x00,0x04,0x00,0xd0,0x0d,0xcf,
+-	0x86,0x95,0x07,0x64,0xf4,0x53,0x08,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,
+-	0x54,0x04,0x04,0x00,0xd3,0x07,0x62,0x01,0x54,0x04,0x00,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x11,0xff,0xe1,0x8f,0xb0,0x00,0x11,0xff,0xe1,0x8f,0xb1,0x00,0x10,0x08,0x11,
+-	0xff,0xe1,0x8f,0xb2,0x00,0x11,0xff,0xe1,0x8f,0xb3,0x00,0x91,0x10,0x10,0x08,0x11,
+-	0xff,0xe1,0x8f,0xb4,0x00,0x11,0xff,0xe1,0x8f,0xb5,0x00,0x00,0x00,0xd4,0x1c,0xe3,
+-	0x92,0x56,0xe2,0xc9,0x55,0xe1,0x8c,0x55,0xe0,0x6d,0x55,0xcf,0x86,0x95,0x0a,0xe4,
+-	0x56,0x55,0x63,0x45,0x55,0x04,0x00,0x04,0x00,0xe3,0xd2,0x01,0xe2,0xdd,0x59,0xd1,
+-	0x0c,0xe0,0xfe,0x58,0xcf,0x86,0x65,0xd7,0x58,0x0a,0x00,0xe0,0x4e,0x59,0xcf,0x86,
+-	0xd5,0xc5,0xd4,0x45,0xd3,0x31,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x12,0xff,0xd0,0xb2,
+-	0x00,0x12,0xff,0xd0,0xb4,0x00,0x10,0x07,0x12,0xff,0xd0,0xbe,0x00,0x12,0xff,0xd1,
+-	0x81,0x00,0x51,0x07,0x12,0xff,0xd1,0x82,0x00,0x10,0x07,0x12,0xff,0xd1,0x8a,0x00,
+-	0x12,0xff,0xd1,0xa3,0x00,0x92,0x10,0x91,0x0c,0x10,0x08,0x12,0xff,0xea,0x99,0x8b,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,
+-	0xff,0xe1,0x83,0x90,0x00,0x14,0xff,0xe1,0x83,0x91,0x00,0x10,0x08,0x14,0xff,0xe1,
+-	0x83,0x92,0x00,0x14,0xff,0xe1,0x83,0x93,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
+-	0x83,0x94,0x00,0x14,0xff,0xe1,0x83,0x95,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x96,
+-	0x00,0x14,0xff,0xe1,0x83,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
+-	0x83,0x98,0x00,0x14,0xff,0xe1,0x83,0x99,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x9a,
+-	0x00,0x14,0xff,0xe1,0x83,0x9b,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0x9c,
+-	0x00,0x14,0xff,0xe1,0x83,0x9d,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x9e,0x00,0x14,
+-	0xff,0xe1,0x83,0x9f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,
+-	0xff,0xe1,0x83,0xa0,0x00,0x14,0xff,0xe1,0x83,0xa1,0x00,0x10,0x08,0x14,0xff,0xe1,
+-	0x83,0xa2,0x00,0x14,0xff,0xe1,0x83,0xa3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
+-	0x83,0xa4,0x00,0x14,0xff,0xe1,0x83,0xa5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xa6,
+-	0x00,0x14,0xff,0xe1,0x83,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
+-	0x83,0xa8,0x00,0x14,0xff,0xe1,0x83,0xa9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xaa,
+-	0x00,0x14,0xff,0xe1,0x83,0xab,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xac,
+-	0x00,0x14,0xff,0xe1,0x83,0xad,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xae,0x00,0x14,
+-	0xff,0xe1,0x83,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
+-	0x83,0xb0,0x00,0x14,0xff,0xe1,0x83,0xb1,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xb2,
+-	0x00,0x14,0xff,0xe1,0x83,0xb3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xb4,
+-	0x00,0x14,0xff,0xe1,0x83,0xb5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xb6,0x00,0x14,
+-	0xff,0xe1,0x83,0xb7,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xb8,
+-	0x00,0x14,0xff,0xe1,0x83,0xb9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xba,0x00,0x00,
+-	0x00,0xd1,0x0c,0x10,0x04,0x00,0x00,0x14,0xff,0xe1,0x83,0xbd,0x00,0x10,0x08,0x14,
+-	0xff,0xe1,0x83,0xbe,0x00,0x14,0xff,0xe1,0x83,0xbf,0x00,0xe2,0x9d,0x08,0xe1,0x48,
+-	0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3,0x40,0xd2,0x20,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa5,0x00,0x01,0xff,0x61,0xcc,0xa5,0x00,0x10,
+-	0x08,0x01,0xff,0x62,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87,0x00,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x62,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3,0x00,0x10,0x08,0x01,
+-	0xff,0x62,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2,0x24,0xd1,0x14,0x10,
+-	0x0a,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81,
+-	0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x87,0x00,0x01,0xff,0x64,0xcc,0x87,0x00,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa3,0x00,0x01,0xff,0x64,0xcc,0xa3,0x00,0x10,
+-	0x08,0x01,0xff,0x64,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1,0x00,0xd3,0x48,0xd2,
+-	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa7,0x00,0x01,0xff,0x64,0xcc,0xa7,
+-	0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xad,0x00,0x01,0xff,0x64,0xcc,0xad,0x00,0xd1,
+-	0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x84,
+-	0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0x01,0xff,0x65,
+-	0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xad,
+-	0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0xb0,0x00,0x01,
+-	0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,
+-	0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x66,0xcc,0x87,
+-	0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x67,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84,0x00,0x10,0x08,0x01,
+-	0xff,0x68,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0x68,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x68,
+-	0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0x68,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x68,
+-	0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,
+-	0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01,0xff,0x69,0xcc,0x88,
+-	0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3,0x40,0xd2,0x20,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x81,0x00,0x01,0xff,0x6b,0xcc,0x81,0x00,0x10,
+-	0x08,0x01,0xff,0x6b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3,0x00,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x10,0x08,0x01,
+-	0xff,0x6c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2,0x24,0xd1,0x14,0x10,
+-	0x0a,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84,
+-	0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0xb1,0x00,0x01,0xff,0x6c,0xcc,0xb1,0x00,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xad,0x00,0x01,0xff,0x6c,0xcc,0xad,0x00,0x10,
+-	0x08,0x01,0xff,0x6d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81,0x00,0xcf,0x86,0xe5,
+-	0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6d,0xcc,
+-	0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6d,0xcc,0xa3,0x00,
+-	0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x87,0x00,
+-	0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa3,0x00,0x01,0xff,
+-	0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0xb1,0x00,
+-	0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xad,0x00,0x01,0xff,
+-	0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,
+-	0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83,0xcc,
+-	0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48,0xd2,0x28,0xd1,0x14,
+-	0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x84,0xcc,
+-	0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,
+-	0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x70,0xcc,0x81,0x00,0x01,0xff,
+-	0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x70,0xcc,0x87,0x00,0x01,0xff,0x70,0xcc,
+-	0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x87,0x00,0x01,0xff,
+-	0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xa3,0x00,0x01,0xff,0x72,0xcc,
+-	0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,
+-	0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xb1,0x00,0x01,0xff,
+-	0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x73,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x73,0xcc,
+-	0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc,
+-	0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x10,0x0a,0x01,0xff,
+-	0x73,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87,0x00,0xd2,0x24,
+-	0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,
+-	0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0x87,0x00,0x01,0xff,0x74,0xcc,
+-	0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xa3,0x00,0x01,0xff,0x74,0xcc,
+-	0xa3,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc,0xb1,0x00,
+-	0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xad,0x00,0x01,0xff,
+-	0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa4,0x00,0x01,0xff,0x75,0xcc,
+-	0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xb0,0x00,0x01,0xff,0x75,0xcc,
+-	0xb0,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xad,0x00,0x01,0xff,0x75,0xcc,0xad,0x00,
+-	0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x01,0xff,
+-	0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,
+-	0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x76,0xcc,
+-	0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x76,0xcc,0xa3,0x00,
+-	0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x11,0x02,0xcf,0x86,0xd5,0xe2,0xd4,0x80,0xd3,
+-	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x80,0x00,0x01,0xff,0x77,
+-	0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x81,0x00,0x01,0xff,0x77,0xcc,0x81,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x88,0x00,0x01,0xff,0x77,0xcc,0x88,
+-	0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x87,0x00,0x01,0xff,0x77,0xcc,0x87,0x00,0xd2,
+-	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0xa3,0x00,0x01,0xff,0x77,0xcc,0xa3,
+-	0x00,0x10,0x08,0x01,0xff,0x78,0xcc,0x87,0x00,0x01,0xff,0x78,0xcc,0x87,0x00,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x78,0xcc,0x88,0x00,0x01,0xff,0x78,0xcc,0x88,0x00,0x10,
+-	0x08,0x01,0xff,0x79,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87,0x00,0xd3,0x33,0xd2,
+-	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x82,0x00,0x01,0xff,0x7a,0xcc,0x82,
+-	0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0xa3,0x00,0x01,0xff,0x7a,0xcc,0xa3,0x00,0xe1,
+-	0xc4,0x58,0x10,0x08,0x01,0xff,0x7a,0xcc,0xb1,0x00,0x01,0xff,0x7a,0xcc,0xb1,0x00,
+-	0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff,0x79,0xcc,
+-	0x8a,0x00,0x10,0x08,0x01,0xff,0x61,0xca,0xbe,0x00,0x02,0xff,0x73,0xcc,0x87,0x00,
+-	0x51,0x04,0x0a,0x00,0x10,0x07,0x0a,0xff,0x73,0x73,0x00,0x0a,0x00,0xd4,0x98,0xd3,
+-	0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa3,0x00,0x01,0xff,0x61,
+-	0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x89,
+-	0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x01,0xff,0x61,
+-	0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0x01,
+-	0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,
+-	0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01,
+-	0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0xd1,
+-	0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0xa3,
+-	0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0x01,0xff,0x61,
+-	0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,
+-	0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,0x10,0x0a,0x01,
+-	0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0xd1,
+-	0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x86,
+-	0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0x01,0xff,0x61,
+-	0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa3,
+-	0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x89,0x00,0x01,
+-	0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x83,0x00,0x01,
+-	0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0x01,
+-	0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,0xd4,0x90,0xd3,0x50,
+-	0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x01,0xff,
+-	0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,
+-	0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,
+-	0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,
+-	0x65,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,0x00,0xd2,0x20,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x89,0x00,0x01,0xff,0x69,0xcc,0x89,0x00,
+-	0x10,0x08,0x01,0xff,0x69,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3,0x00,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x10,0x08,
+-	0x01,0xff,0x6f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3,0x50,0xd2,0x28,
+-	0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,
+-	0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0x01,0xff,
+-	0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,
+-	0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,
+-	0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2,0x28,0xd1,0x14,
+-	0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f,0xcc,0xa3,0xcc,
+-	0x82,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,
+-	0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,
+-	0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,
+-	0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,0xd3,0x48,0xd2,0x28,
+-	0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,
+-	0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,
+-	0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xa3,0x00,
+-	0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x89,0x00,0x01,0xff,
+-	0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,
+-	0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,
+-	0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,
+-	0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00,
+-	0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,
+-	0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,
+-	0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,
+-	0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,
+-	0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x89,0x00,
+-	0x01,0xff,0x79,0xcc,0x89,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,
+-	0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbb,0x00,
+-	0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbd,0x00,0x0a,0x00,0x10,0x08,
+-	0x0a,0xff,0xe1,0xbb,0xbf,0x00,0x0a,0x00,0xe1,0xbf,0x02,0xe0,0xa1,0x01,0xcf,0x86,
+-	0xd5,0xc6,0xd4,0x6c,0xd3,0x18,0xe2,0xc0,0x58,0xe1,0xa9,0x58,0x10,0x09,0x01,0xff,
+-	0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,
+-	0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00,
+-	0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb1,0xcc,
+-	0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,
+-	0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,
+-	0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x18,
+-	0xe2,0xfc,0x58,0xe1,0xe5,0x58,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x93,0x00,0x01,
+-	0xff,0xce,0xb5,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,
+-	0xcc,0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5,
+-	0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,
+-	0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,
+-	0x94,0xcc,0x81,0x00,0x00,0x00,0xd4,0x6c,0xd3,0x18,0xe2,0x26,0x59,0xe1,0x0f,0x59,
+-	0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,
+-	0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,
+-	0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01,
+-	0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,
+-	0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,
+-	0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,
+-	0x82,0x00,0xd3,0x18,0xe2,0x62,0x59,0xe1,0x4b,0x59,0x10,0x09,0x01,0xff,0xce,0xb9,
+-	0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,
+-	0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0x10,0x0b,
+-	0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,
+-	0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x81,0x00,0x01,
+-	0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,
+-	0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00,0xcf,0x86,0xd5,0xac,
+-	0xd4,0x5a,0xd3,0x18,0xe2,0x9f,0x59,0xe1,0x88,0x59,0x10,0x09,0x01,0xff,0xce,0xbf,
+-	0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,
+-	0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0x10,0x0b,
+-	0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,
+-	0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81,0x00,0x01,
+-	0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x18,0xe2,0xc9,0x59,0xe1,
+-	0xb2,0x59,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc,
+-	0x94,0x00,0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,
+-	0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f,
+-	0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00,
+-	0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xe4,0x85,0x5a,0xd3,0x18,0xe2,
+-	0x04,0x5a,0xe1,0xed,0x59,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,
+-	0xcf,0x89,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,
+-	0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,
+-	0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,
+-	0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,
+-	0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,
+-	0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xe0,0xd9,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4,
+-	0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xce,
+-	0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,
+-	0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80,
+-	0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xce,
+-	0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,
+-	0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,
+-	0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,
+-	0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,
+-	0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,
+-	0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,
+-	0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,
+-	0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,
+-	0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,
+-	0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xce,0xb9,
+-	0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,
+-	0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,
+-	0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,
+-	0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,
+-	0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,
+-	0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,
+-	0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,
+-	0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,
+-	0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,
+-	0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0xce,
+-	0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd4,0xc8,0xd3,
+-	0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xce,0xb9,0x00,
+-	0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,
+-	0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0xce,0xb9,
+-	0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,
+-	0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf,
+-	0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,
+-	0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xce,
+-	0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf,
+-	0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,
+-	0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0xce,
+-	0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,
+-	0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,
+-	0xcd,0x82,0xce,0xb9,0x00,0xd3,0x49,0xd2,0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
+-	0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,
+-	0xb1,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00,0xd1,0x0f,0x10,
+-	0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,
+-	0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,
+-	0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc,
+-	0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff,0xce,0xb1,0xcc,
+-	0x81,0x00,0xe1,0xa5,0x5a,0x10,0x09,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00,0x01,0x00,
+-	0xcf,0x86,0xd5,0xbd,0xd4,0x7e,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x80,0xce,
+-	0xb9,0x00,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,
+-	0xb7,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcd,0x82,
+-	0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,
+-	0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10,0x09,
+-	0x01,0xff,0xce,0xb7,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0xe1,0xb4,
+-	0x5a,0x10,0x09,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0x01,0xff,0xe1,0xbe,0xbf,0xcc,
+-	0x80,0x00,0xd3,0x18,0xe2,0xda,0x5a,0xe1,0xc3,0x5a,0x10,0x09,0x01,0xff,0xce,0xb9,
+-	0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0xe2,0xfe,0x5a,0xd1,0x12,0x10,
+-	0x09,0x01,0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10,
+-	0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd4,
+-	0x51,0xd3,0x18,0xe2,0x21,0x5b,0xe1,0x0a,0x5b,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,
+-	0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,
+-	0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0x10,0x09,0x01,
+-	0xff,0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0xe1,0x41,0x5b,
+-	0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00,
+-	0xd3,0x3b,0xd2,0x18,0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80,
+-	0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,
+-	0xcf,0x89,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd,
+-	0x82,0x00,0x01,0xff,0xcf,0x89,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1,0x12,0x10,
+-	0x09,0x01,0xff,0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10,
+-	0x09,0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0xe1,
+-	0x4b,0x5b,0x10,0x09,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0x01,0xff,0xc2,0xb4,0x00,
+-	0xe0,0xa2,0x67,0xcf,0x86,0xe5,0x24,0x02,0xe4,0x26,0x01,0xe3,0x1b,0x5e,0xd2,0x2b,
+-	0xe1,0xf5,0x5b,0xe0,0x7a,0x5b,0xcf,0x86,0xe5,0x5f,0x5b,0x94,0x1c,0x93,0x18,0x92,
+-	0x14,0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83,
+-	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd1,0xd6,0xd0,0x46,0xcf,0x86,0x55,
+-	0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
+-	0x07,0x01,0xff,0xcf,0x89,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01,0x00,0x10,0x06,
+-	0x01,0xff,0x6b,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x01,0x00,0xe3,0xba,0x5c,0x92,
+-	0x10,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0x8e,0x00,0x01,0x00,0x01,
+-	0x00,0xcf,0x86,0xd5,0x0a,0xe4,0xd7,0x5c,0x63,0xc2,0x5c,0x06,0x00,0x94,0x80,0xd3,
+-	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb0,0x00,0x01,0xff,0xe2,
+-	0x85,0xb1,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb2,0x00,0x01,0xff,0xe2,0x85,0xb3,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb4,0x00,0x01,0xff,0xe2,0x85,0xb5,
+-	0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb6,0x00,0x01,0xff,0xe2,0x85,0xb7,0x00,0xd2,
+-	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb8,0x00,0x01,0xff,0xe2,0x85,0xb9,
+-	0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xba,0x00,0x01,0xff,0xe2,0x85,0xbb,0x00,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xbc,0x00,0x01,0xff,0xe2,0x85,0xbd,0x00,0x10,
+-	0x08,0x01,0xff,0xe2,0x85,0xbe,0x00,0x01,0xff,0xe2,0x85,0xbf,0x00,0x01,0x00,0xe0,
+-	0xc9,0x5c,0xcf,0x86,0xe5,0xa8,0x5c,0xe4,0x87,0x5c,0xe3,0x76,0x5c,0xe2,0x69,0x5c,
+-	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0xff,0xe2,0x86,0x84,0x00,0xe3,0xb8,
+-	0x60,0xe2,0x85,0x60,0xd1,0x0c,0xe0,0x32,0x60,0xcf,0x86,0x65,0x13,0x60,0x01,0x00,
+-	0xd0,0x62,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x18,0x52,0x04,
+-	0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x90,0x00,0x01,0xff,
+-	0xe2,0x93,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x92,0x00,
+-	0x01,0xff,0xe2,0x93,0x93,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x94,0x00,0x01,0xff,
+-	0xe2,0x93,0x95,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x96,0x00,0x01,0xff,
+-	0xe2,0x93,0x97,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x98,0x00,0x01,0xff,0xe2,0x93,
+-	0x99,0x00,0xcf,0x86,0xe5,0xec,0x5f,0x94,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0xe2,0x93,0x9a,0x00,0x01,0xff,0xe2,0x93,0x9b,0x00,0x10,0x08,0x01,
+-	0xff,0xe2,0x93,0x9c,0x00,0x01,0xff,0xe2,0x93,0x9d,0x00,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xe2,0x93,0x9e,0x00,0x01,0xff,0xe2,0x93,0x9f,0x00,0x10,0x08,0x01,0xff,0xe2,
+-	0x93,0xa0,0x00,0x01,0xff,0xe2,0x93,0xa1,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xe2,0x93,0xa2,0x00,0x01,0xff,0xe2,0x93,0xa3,0x00,0x10,0x08,0x01,0xff,0xe2,
+-	0x93,0xa4,0x00,0x01,0xff,0xe2,0x93,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,
+-	0x93,0xa6,0x00,0x01,0xff,0xe2,0x93,0xa7,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0xa8,
+-	0x00,0x01,0xff,0xe2,0x93,0xa9,0x00,0x01,0x00,0xd4,0x0c,0xe3,0xc8,0x61,0xe2,0xc1,
+-	0x61,0xcf,0x06,0x04,0x00,0xe3,0xa1,0x64,0xe2,0x94,0x63,0xe1,0x2e,0x02,0xe0,0x84,
+-	0x01,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x08,0xff,0xe2,0xb0,0xb0,0x00,0x08,0xff,0xe2,0xb0,0xb1,0x00,0x10,0x08,0x08,0xff,
+-	0xe2,0xb0,0xb2,0x00,0x08,0xff,0xe2,0xb0,0xb3,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
+-	0xe2,0xb0,0xb4,0x00,0x08,0xff,0xe2,0xb0,0xb5,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,
+-	0xb6,0x00,0x08,0xff,0xe2,0xb0,0xb7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
+-	0xe2,0xb0,0xb8,0x00,0x08,0xff,0xe2,0xb0,0xb9,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,
+-	0xba,0x00,0x08,0xff,0xe2,0xb0,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb0,
+-	0xbc,0x00,0x08,0xff,0xe2,0xb0,0xbd,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,0xbe,0x00,
+-	0x08,0xff,0xe2,0xb0,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
+-	0xe2,0xb1,0x80,0x00,0x08,0xff,0xe2,0xb1,0x81,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
+-	0x82,0x00,0x08,0xff,0xe2,0xb1,0x83,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
+-	0x84,0x00,0x08,0xff,0xe2,0xb1,0x85,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x86,0x00,
+-	0x08,0xff,0xe2,0xb1,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
+-	0x88,0x00,0x08,0xff,0xe2,0xb1,0x89,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8a,0x00,
+-	0x08,0xff,0xe2,0xb1,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8c,0x00,
+-	0x08,0xff,0xe2,0xb1,0x8d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8e,0x00,0x08,0xff,
+-	0xe2,0xb1,0x8f,0x00,0x94,0x7c,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
+-	0xe2,0xb1,0x90,0x00,0x08,0xff,0xe2,0xb1,0x91,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
+-	0x92,0x00,0x08,0xff,0xe2,0xb1,0x93,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
+-	0x94,0x00,0x08,0xff,0xe2,0xb1,0x95,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x96,0x00,
+-	0x08,0xff,0xe2,0xb1,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
+-	0x98,0x00,0x08,0xff,0xe2,0xb1,0x99,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9a,0x00,
+-	0x08,0xff,0xe2,0xb1,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9c,0x00,
+-	0x08,0xff,0xe2,0xb1,0x9d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9e,0x00,0x00,0x00,
+-	0x08,0x00,0xcf,0x86,0xd5,0x07,0x64,0x84,0x61,0x08,0x00,0xd4,0x63,0xd3,0x32,0xd2,
+-	0x1b,0xd1,0x0c,0x10,0x08,0x09,0xff,0xe2,0xb1,0xa1,0x00,0x09,0x00,0x10,0x07,0x09,
+-	0xff,0xc9,0xab,0x00,0x09,0xff,0xe1,0xb5,0xbd,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff,
+-	0xc9,0xbd,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xa8,0x00,0xd2,
+-	0x18,0xd1,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xaa,0x00,0x10,0x04,0x09,
+-	0x00,0x09,0xff,0xe2,0xb1,0xac,0x00,0xd1,0x0b,0x10,0x04,0x09,0x00,0x0a,0xff,0xc9,
+-	0x91,0x00,0x10,0x07,0x0a,0xff,0xc9,0xb1,0x00,0x0a,0xff,0xc9,0x90,0x00,0xd3,0x27,
+-	0xd2,0x17,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xc9,0x92,0x00,0x0a,0x00,0x10,0x08,0x0a,
+-	0xff,0xe2,0xb1,0xb3,0x00,0x0a,0x00,0x91,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,
+-	0xb1,0xb6,0x00,0x09,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x07,0x0b,
+-	0xff,0xc8,0xbf,0x00,0x0b,0xff,0xc9,0x80,0x00,0xe0,0x83,0x01,0xcf,0x86,0xd5,0xc0,
+-	0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x81,0x00,
+-	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x83,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
+-	0x08,0xff,0xe2,0xb2,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x87,0x00,
+-	0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x89,0x00,0x08,0x00,
+-	0x10,0x08,0x08,0xff,0xe2,0xb2,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+-	0xe2,0xb2,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8f,0x00,0x08,0x00,
+-	0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x91,0x00,0x08,0x00,
+-	0x10,0x08,0x08,0xff,0xe2,0xb2,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+-	0xe2,0xb2,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x97,0x00,0x08,0x00,
+-	0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x99,0x00,0x08,0x00,0x10,0x08,
+-	0x08,0xff,0xe2,0xb2,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
+-	0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x9f,0x00,0x08,0x00,0xd4,0x60,
+-	0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa1,0x00,0x08,0x00,
+-	0x10,0x08,0x08,0xff,0xe2,0xb2,0xa3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+-	0xe2,0xb2,0xa5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa7,0x00,0x08,0x00,
+-	0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa9,0x00,0x08,0x00,0x10,0x08,
+-	0x08,0xff,0xe2,0xb2,0xab,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
+-	0xad,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xaf,0x00,0x08,0x00,0xd3,0x30,
+-	0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb1,0x00,0x08,0x00,0x10,0x08,
+-	0x08,0xff,0xe2,0xb2,0xb3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
+-	0xb5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb7,0x00,0x08,0x00,0xd2,0x18,
+-	0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb9,0x00,0x08,0x00,0x10,0x08,0x08,0xff,
+-	0xe2,0xb2,0xbb,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbd,0x00,
+-	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbf,0x00,0x08,0x00,0xcf,0x86,0xd5,0xc0,
+-	0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x81,0x00,
+-	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x83,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
+-	0x08,0xff,0xe2,0xb3,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x87,0x00,
+-	0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x89,0x00,0x08,0x00,
+-	0x10,0x08,0x08,0xff,0xe2,0xb3,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+-	0xe2,0xb3,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8f,0x00,0x08,0x00,
+-	0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x91,0x00,0x08,0x00,
+-	0x10,0x08,0x08,0xff,0xe2,0xb3,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+-	0xe2,0xb3,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x97,0x00,0x08,0x00,
+-	0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x99,0x00,0x08,0x00,0x10,0x08,
+-	0x08,0xff,0xe2,0xb3,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,
+-	0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x9f,0x00,0x08,0x00,0xd4,0x3b,
+-	0xd3,0x1c,0x92,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa1,0x00,0x08,0x00,
+-	0x10,0x08,0x08,0xff,0xe2,0xb3,0xa3,0x00,0x08,0x00,0x08,0x00,0xd2,0x10,0x51,0x04,
+-	0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0xff,0xe2,0xb3,0xac,0x00,0xe1,0xd0,0x5e,0x10,
+-	0x04,0x0b,0x00,0x0b,0xff,0xe2,0xb3,0xae,0x00,0xe3,0xd5,0x5e,0x92,0x10,0x51,0x04,
+-	0x0b,0xe6,0x10,0x08,0x0d,0xff,0xe2,0xb3,0xb3,0x00,0x0d,0x00,0x00,0x00,0xe2,0x98,
+-	0x08,0xd1,0x0b,0xe0,0x8d,0x66,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe0,0xe1,0x6b,0xcf,
+-	0x86,0xe5,0xa7,0x05,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x0c,0xe2,0x74,0x67,0xe1,
+-	0x0b,0x67,0xcf,0x06,0x04,0x00,0xe2,0xdb,0x01,0xe1,0x26,0x01,0xd0,0x09,0xcf,0x86,
+-	0x65,0x70,0x67,0x0a,0x00,0xcf,0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,
+-	0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,
+-	0x99,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x85,0x00,0x0a,
+-	0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,
+-	0x08,0x0a,0xff,0xea,0x99,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x8b,
+-	0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x8d,0x00,0x0a,0x00,0x10,
+-	0x08,0x0a,0xff,0xea,0x99,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,
+-	0x08,0x0a,0xff,0xea,0x99,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x93,
+-	0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x95,0x00,0x0a,0x00,0x10,
+-	0x08,0x0a,0xff,0xea,0x99,0x97,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,
+-	0xff,0xea,0x99,0x99,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x9b,0x00,0x0a,
+-	0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a,
+-	0xff,0xea,0x99,0x9f,0x00,0x0a,0x00,0xe4,0xd9,0x66,0xd3,0x30,0xd2,0x18,0xd1,0x0c,
+-	0x10,0x08,0x0c,0xff,0xea,0x99,0xa1,0x00,0x0c,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,
+-	0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0xa5,0x00,0x0a,0x00,
+-	0x10,0x08,0x0a,0xff,0xea,0x99,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,
+-	0x0a,0xff,0xea,0x99,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0xab,0x00,
+-	0x0a,0x00,0xe1,0x88,0x66,0x10,0x08,0x0a,0xff,0xea,0x99,0xad,0x00,0x0a,0x00,0xe0,
+-	0xb1,0x66,0xcf,0x86,0x95,0xab,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,
+-	0x0a,0xff,0xea,0x9a,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x83,0x00,
+-	0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x85,0x00,0x0a,0x00,0x10,0x08,
+-	0x0a,0xff,0xea,0x9a,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,
+-	0xea,0x9a,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8b,0x00,0x0a,0x00,
+-	0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,
+-	0xea,0x9a,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,
+-	0xea,0x9a,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x93,0x00,0x0a,0x00,
+-	0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,
+-	0xea,0x9a,0x97,0x00,0x0a,0x00,0xe2,0x0e,0x66,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,
+-	0x9a,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9a,0x9b,0x00,0x10,0x00,0x0b,
+-	0x00,0xe1,0x10,0x02,0xd0,0xb9,0xcf,0x86,0xd5,0x07,0x64,0x1a,0x66,0x08,0x00,0xd4,
+-	0x58,0xd3,0x28,0xd2,0x10,0x51,0x04,0x09,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa3,
+-	0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa5,0x00,0x0a,0x00,0x10,
+-	0x08,0x0a,0xff,0xea,0x9c,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,
+-	0xff,0xea,0x9c,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xab,0x00,0x0a,
+-	0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a,
+-	0xff,0xea,0x9c,0xaf,0x00,0x0a,0x00,0xd3,0x28,0xd2,0x10,0x51,0x04,0x0a,0x00,0x10,
+-	0x08,0x0a,0xff,0xea,0x9c,0xb3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
+-	0x9c,0xb5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb7,0x00,0x0a,0x00,0xd2,
+-	0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb9,0x00,0x0a,0x00,0x10,0x08,0x0a,
+-	0xff,0xea,0x9c,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbd,
+-	0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbf,0x00,0x0a,0x00,0xcf,0x86,0xd5,
+-	0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x81,
+-	0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,
+-	0x08,0x0a,0xff,0xea,0x9d,0x85,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x87,
+-	0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x89,0x00,0x0a,
+-	0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
+-	0xff,0xea,0x9d,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8f,0x00,0x0a,
+-	0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x91,0x00,0x0a,
+-	0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
+-	0xff,0xea,0x9d,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x97,0x00,0x0a,
+-	0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x99,0x00,0x0a,0x00,0x10,
+-	0x08,0x0a,0xff,0xea,0x9d,0x9b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
+-	0x9d,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9f,0x00,0x0a,0x00,0xd4,
+-	0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa1,0x00,0x0a,
+-	0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
+-	0xff,0xea,0x9d,0xa5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa7,0x00,0x0a,
+-	0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa9,0x00,0x0a,0x00,0x10,
+-	0x08,0x0a,0xff,0xea,0x9d,0xab,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
+-	0x9d,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xaf,0x00,0x0a,0x00,0x53,
+-	0x04,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xba,
+-	0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xbc,0x00,0xd1,0x0c,0x10,0x04,0x0a,
+-	0x00,0x0a,0xff,0xe1,0xb5,0xb9,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xbf,0x00,0x0a,
+-	0x00,0xe0,0x71,0x01,0xcf,0x86,0xd5,0xa6,0xd4,0x4e,0xd3,0x30,0xd2,0x18,0xd1,0x0c,
+-	0x10,0x08,0x0a,0xff,0xea,0x9e,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9e,
+-	0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x85,0x00,0x0a,0x00,
+-	0x10,0x08,0x0a,0xff,0xea,0x9e,0x87,0x00,0x0a,0x00,0xd2,0x10,0x51,0x04,0x0a,0x00,
+-	0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9e,0x8c,0x00,0xe1,0x16,0x64,0x10,0x04,0x0a,
+-	0x00,0x0c,0xff,0xc9,0xa5,0x00,0xd3,0x28,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff,
+-	0xea,0x9e,0x91,0x00,0x0c,0x00,0x10,0x08,0x0d,0xff,0xea,0x9e,0x93,0x00,0x0d,0x00,
+-	0x51,0x04,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x97,0x00,0x10,0x00,0xd2,0x18,
+-	0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff,
+-	0xea,0x9e,0x9b,0x00,0x10,0x00,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x9d,0x00,
+-	0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x9f,0x00,0x10,0x00,0xd4,0x63,0xd3,0x30,
+-	0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa1,0x00,0x0c,0x00,0x10,0x08,
+-	0x0c,0xff,0xea,0x9e,0xa3,0x00,0x0c,0x00,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,
+-	0xa5,0x00,0x0c,0x00,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa7,0x00,0x0c,0x00,0xd2,0x1a,
+-	0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa9,0x00,0x0c,0x00,0x10,0x07,0x0d,0xff,
+-	0xc9,0xa6,0x00,0x10,0xff,0xc9,0x9c,0x00,0xd1,0x0e,0x10,0x07,0x10,0xff,0xc9,0xa1,
+-	0x00,0x10,0xff,0xc9,0xac,0x00,0x10,0x07,0x12,0xff,0xc9,0xaa,0x00,0x14,0x00,0xd3,
+-	0x35,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x10,0xff,0xca,0x9e,0x00,0x10,0xff,0xca,0x87,
+-	0x00,0x10,0x07,0x11,0xff,0xca,0x9d,0x00,0x11,0xff,0xea,0xad,0x93,0x00,0xd1,0x0c,
+-	0x10,0x08,0x11,0xff,0xea,0x9e,0xb5,0x00,0x11,0x00,0x10,0x08,0x11,0xff,0xea,0x9e,
+-	0xb7,0x00,0x11,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x14,0xff,0xea,0x9e,0xb9,0x00,
+-	0x14,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbb,0x00,0x15,0x00,0xd1,0x0c,0x10,0x08,
+-	0x15,0xff,0xea,0x9e,0xbd,0x00,0x15,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbf,0x00,
+-	0x15,0x00,0xcf,0x86,0xe5,0x50,0x63,0x94,0x2f,0x93,0x2b,0xd2,0x10,0x51,0x04,0x00,
+-	0x00,0x10,0x08,0x15,0xff,0xea,0x9f,0x83,0x00,0x15,0x00,0xd1,0x0f,0x10,0x08,0x15,
+-	0xff,0xea,0x9e,0x94,0x00,0x15,0xff,0xca,0x82,0x00,0x10,0x08,0x15,0xff,0xe1,0xb6,
+-	0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0x30,0x66,0xd3,0x1d,0xe2,0xd7,0x63,
+-	0xe1,0x86,0x63,0xe0,0x73,0x63,0xcf,0x86,0xe5,0x54,0x63,0x94,0x0b,0x93,0x07,0x62,
+-	0x3f,0x63,0x08,0x00,0x08,0x00,0x08,0x00,0xd2,0x0f,0xe1,0xd6,0x64,0xe0,0xa3,0x64,
+-	0xcf,0x86,0x65,0x88,0x64,0x0a,0x00,0xd1,0xab,0xd0,0x1a,0xcf,0x86,0xe5,0x93,0x65,
+-	0xe4,0x76,0x65,0xe3,0x5d,0x65,0xe2,0x50,0x65,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,
+-	0x00,0x0c,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x0b,0x93,0x07,0x62,0xa3,0x65,
+-	0x11,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,
+-	0xa0,0x00,0x11,0xff,0xe1,0x8e,0xa1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa2,0x00,
+-	0x11,0xff,0xe1,0x8e,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa4,0x00,
+-	0x11,0xff,0xe1,0x8e,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa6,0x00,0x11,0xff,
+-	0xe1,0x8e,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa8,0x00,
+-	0x11,0xff,0xe1,0x8e,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xaa,0x00,0x11,0xff,
+-	0xe1,0x8e,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xac,0x00,0x11,0xff,
+-	0xe1,0x8e,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xae,0x00,0x11,0xff,0xe1,0x8e,
+-	0xaf,0x00,0xe0,0x2e,0x65,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,
+-	0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb0,0x00,0x11,0xff,0xe1,0x8e,0xb1,0x00,
+-	0x10,0x08,0x11,0xff,0xe1,0x8e,0xb2,0x00,0x11,0xff,0xe1,0x8e,0xb3,0x00,0xd1,0x10,
+-	0x10,0x08,0x11,0xff,0xe1,0x8e,0xb4,0x00,0x11,0xff,0xe1,0x8e,0xb5,0x00,0x10,0x08,
+-	0x11,0xff,0xe1,0x8e,0xb6,0x00,0x11,0xff,0xe1,0x8e,0xb7,0x00,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x11,0xff,0xe1,0x8e,0xb8,0x00,0x11,0xff,0xe1,0x8e,0xb9,0x00,0x10,0x08,
+-	0x11,0xff,0xe1,0x8e,0xba,0x00,0x11,0xff,0xe1,0x8e,0xbb,0x00,0xd1,0x10,0x10,0x08,
+-	0x11,0xff,0xe1,0x8e,0xbc,0x00,0x11,0xff,0xe1,0x8e,0xbd,0x00,0x10,0x08,0x11,0xff,
+-	0xe1,0x8e,0xbe,0x00,0x11,0xff,0xe1,0x8e,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x11,0xff,0xe1,0x8f,0x80,0x00,0x11,0xff,0xe1,0x8f,0x81,0x00,0x10,0x08,
+-	0x11,0xff,0xe1,0x8f,0x82,0x00,0x11,0xff,0xe1,0x8f,0x83,0x00,0xd1,0x10,0x10,0x08,
+-	0x11,0xff,0xe1,0x8f,0x84,0x00,0x11,0xff,0xe1,0x8f,0x85,0x00,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0x86,0x00,0x11,0xff,0xe1,0x8f,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x11,0xff,0xe1,0x8f,0x88,0x00,0x11,0xff,0xe1,0x8f,0x89,0x00,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0x8a,0x00,0x11,0xff,0xe1,0x8f,0x8b,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0x8c,0x00,0x11,0xff,0xe1,0x8f,0x8d,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
+-	0x8e,0x00,0x11,0xff,0xe1,0x8f,0x8f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x11,0xff,0xe1,0x8f,0x90,0x00,0x11,0xff,0xe1,0x8f,0x91,0x00,0x10,0x08,
+-	0x11,0xff,0xe1,0x8f,0x92,0x00,0x11,0xff,0xe1,0x8f,0x93,0x00,0xd1,0x10,0x10,0x08,
+-	0x11,0xff,0xe1,0x8f,0x94,0x00,0x11,0xff,0xe1,0x8f,0x95,0x00,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0x96,0x00,0x11,0xff,0xe1,0x8f,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x11,0xff,0xe1,0x8f,0x98,0x00,0x11,0xff,0xe1,0x8f,0x99,0x00,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0x9a,0x00,0x11,0xff,0xe1,0x8f,0x9b,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0x9c,0x00,0x11,0xff,0xe1,0x8f,0x9d,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
+-	0x9e,0x00,0x11,0xff,0xe1,0x8f,0x9f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x11,0xff,0xe1,0x8f,0xa0,0x00,0x11,0xff,0xe1,0x8f,0xa1,0x00,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0xa2,0x00,0x11,0xff,0xe1,0x8f,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0xa4,0x00,0x11,0xff,0xe1,0x8f,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
+-	0xa6,0x00,0x11,0xff,0xe1,0x8f,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,
+-	0xe1,0x8f,0xa8,0x00,0x11,0xff,0xe1,0x8f,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
+-	0xaa,0x00,0x11,0xff,0xe1,0x8f,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,
+-	0xac,0x00,0x11,0xff,0xe1,0x8f,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,0xae,0x00,
+-	0x11,0xff,0xe1,0x8f,0xaf,0x00,0xd1,0x0c,0xe0,0x67,0x63,0xcf,0x86,0xcf,0x06,0x02,
+-	0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,
+-	0x01,0x00,0xd4,0xae,0xd3,0x09,0xe2,0xd0,0x63,0xcf,0x06,0x01,0x00,0xd2,0x27,0xe1,
+-	0x9b,0x6f,0xe0,0xa2,0x6d,0xcf,0x86,0xe5,0xbb,0x6c,0xe4,0x4a,0x6c,0xe3,0x15,0x6c,
+-	0xe2,0xf4,0x6b,0xe1,0xe3,0x6b,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00,0x01,0xff,
+-	0xe5,0xba,0xa6,0x00,0xe1,0xf0,0x73,0xe0,0x64,0x73,0xcf,0x86,0xe5,0x9e,0x72,0xd4,
+-	0x3b,0x93,0x37,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x01,0xff,0x66,0x66,0x00,0x01,0xff,
+-	0x66,0x69,0x00,0x10,0x07,0x01,0xff,0x66,0x6c,0x00,0x01,0xff,0x66,0x66,0x69,0x00,
+-	0xd1,0x0f,0x10,0x08,0x01,0xff,0x66,0x66,0x6c,0x00,0x01,0xff,0x73,0x74,0x00,0x10,
+-	0x07,0x01,0xff,0x73,0x74,0x00,0x00,0x00,0x00,0x00,0xe3,0x44,0x72,0xd2,0x11,0x51,
+-	0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xb6,0x00,0xd1,0x12,
+-	0x10,0x09,0x01,0xff,0xd5,0xb4,0xd5,0xa5,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xab,0x00,
+-	0x10,0x09,0x01,0xff,0xd5,0xbe,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xad,0x00,
+-	0xd3,0x09,0xe2,0xbc,0x73,0xcf,0x06,0x01,0x00,0xd2,0x12,0xe1,0xab,0x74,0xe0,0x3c,
+-	0x74,0xcf,0x86,0xe5,0x19,0x74,0x64,0x08,0x74,0x06,0x00,0xe1,0x11,0x75,0xe0,0xde,
+-	0x74,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
+-	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x7c,0xd3,0x3c,0xd2,
+-	0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xef,0xbd,0x81,0x00,0x10,0x08,0x01,
+-	0xff,0xef,0xbd,0x82,0x00,0x01,0xff,0xef,0xbd,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xef,0xbd,0x84,0x00,0x01,0xff,0xef,0xbd,0x85,0x00,0x10,0x08,0x01,0xff,0xef,
+-	0xbd,0x86,0x00,0x01,0xff,0xef,0xbd,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xef,0xbd,0x88,0x00,0x01,0xff,0xef,0xbd,0x89,0x00,0x10,0x08,0x01,0xff,0xef,
+-	0xbd,0x8a,0x00,0x01,0xff,0xef,0xbd,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xef,
+-	0xbd,0x8c,0x00,0x01,0xff,0xef,0xbd,0x8d,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x8e,
+-	0x00,0x01,0xff,0xef,0xbd,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xef,0xbd,0x90,0x00,0x01,0xff,0xef,0xbd,0x91,0x00,0x10,0x08,0x01,0xff,0xef,
+-	0xbd,0x92,0x00,0x01,0xff,0xef,0xbd,0x93,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xef,
+-	0xbd,0x94,0x00,0x01,0xff,0xef,0xbd,0x95,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x96,
+-	0x00,0x01,0xff,0xef,0xbd,0x97,0x00,0x92,0x1c,0xd1,0x10,0x10,0x08,0x01,0xff,0xef,
+-	0xbd,0x98,0x00,0x01,0xff,0xef,0xbd,0x99,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x9a,
+-	0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0xd9,0xb2,0xe1,0xc3,0xaf,0xe0,0x40,0xae,0xcf,
+-	0x86,0xe5,0xe4,0x9a,0xc4,0xe3,0xc1,0x07,0xe2,0x62,0x06,0xe1,0x79,0x85,0xe0,0x09,
+-	0x05,0xcf,0x86,0xe5,0xfb,0x02,0xd4,0x1c,0xe3,0xe7,0x75,0xe2,0x3e,0x75,0xe1,0x19,
+-	0x75,0xe0,0xf2,0x74,0xcf,0x86,0xe5,0xbf,0x74,0x94,0x07,0x63,0xaa,0x74,0x07,0x00,
+-	0x07,0x00,0xe3,0x93,0x77,0xe2,0x58,0x77,0xe1,0x77,0x01,0xe0,0xf0,0x76,0xcf,0x86,
+-	0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,
+-	0x90,0x90,0xa8,0x00,0x05,0xff,0xf0,0x90,0x90,0xa9,0x00,0x10,0x09,0x05,0xff,0xf0,
+-	0x90,0x90,0xaa,0x00,0x05,0xff,0xf0,0x90,0x90,0xab,0x00,0xd1,0x12,0x10,0x09,0x05,
+-	0xff,0xf0,0x90,0x90,0xac,0x00,0x05,0xff,0xf0,0x90,0x90,0xad,0x00,0x10,0x09,0x05,
+-	0xff,0xf0,0x90,0x90,0xae,0x00,0x05,0xff,0xf0,0x90,0x90,0xaf,0x00,0xd2,0x24,0xd1,
+-	0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb0,0x00,0x05,0xff,0xf0,0x90,0x90,0xb1,
+-	0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb2,0x00,0x05,0xff,0xf0,0x90,0x90,0xb3,
+-	0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb4,0x00,0x05,0xff,0xf0,0x90,
+-	0x90,0xb5,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb6,0x00,0x05,0xff,0xf0,0x90,
+-	0x90,0xb7,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,
+-	0xb8,0x00,0x05,0xff,0xf0,0x90,0x90,0xb9,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,
+-	0xba,0x00,0x05,0xff,0xf0,0x90,0x90,0xbb,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,
+-	0x90,0x90,0xbc,0x00,0x05,0xff,0xf0,0x90,0x90,0xbd,0x00,0x10,0x09,0x05,0xff,0xf0,
+-	0x90,0x90,0xbe,0x00,0x05,0xff,0xf0,0x90,0x90,0xbf,0x00,0xd2,0x24,0xd1,0x12,0x10,
+-	0x09,0x05,0xff,0xf0,0x90,0x91,0x80,0x00,0x05,0xff,0xf0,0x90,0x91,0x81,0x00,0x10,
+-	0x09,0x05,0xff,0xf0,0x90,0x91,0x82,0x00,0x05,0xff,0xf0,0x90,0x91,0x83,0x00,0xd1,
+-	0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x84,0x00,0x05,0xff,0xf0,0x90,0x91,0x85,
+-	0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x86,0x00,0x05,0xff,0xf0,0x90,0x91,0x87,
+-	0x00,0x94,0x4c,0x93,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,
+-	0x88,0x00,0x05,0xff,0xf0,0x90,0x91,0x89,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,
+-	0x8a,0x00,0x05,0xff,0xf0,0x90,0x91,0x8b,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,
+-	0x90,0x91,0x8c,0x00,0x05,0xff,0xf0,0x90,0x91,0x8d,0x00,0x10,0x09,0x07,0xff,0xf0,
+-	0x90,0x91,0x8e,0x00,0x07,0xff,0xf0,0x90,0x91,0x8f,0x00,0x05,0x00,0x05,0x00,0xd0,
+-	0xa0,0xcf,0x86,0xd5,0x07,0x64,0x98,0x75,0x07,0x00,0xd4,0x07,0x63,0xa5,0x75,0x07,
+-	0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0x98,0x00,
+-	0x12,0xff,0xf0,0x90,0x93,0x99,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0x9a,0x00,
+-	0x12,0xff,0xf0,0x90,0x93,0x9b,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,
+-	0x9c,0x00,0x12,0xff,0xf0,0x90,0x93,0x9d,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,
+-	0x9e,0x00,0x12,0xff,0xf0,0x90,0x93,0x9f,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,
+-	0xff,0xf0,0x90,0x93,0xa0,0x00,0x12,0xff,0xf0,0x90,0x93,0xa1,0x00,0x10,0x09,0x12,
+-	0xff,0xf0,0x90,0x93,0xa2,0x00,0x12,0xff,0xf0,0x90,0x93,0xa3,0x00,0xd1,0x12,0x10,
+-	0x09,0x12,0xff,0xf0,0x90,0x93,0xa4,0x00,0x12,0xff,0xf0,0x90,0x93,0xa5,0x00,0x10,
+-	0x09,0x12,0xff,0xf0,0x90,0x93,0xa6,0x00,0x12,0xff,0xf0,0x90,0x93,0xa7,0x00,0xcf,
+-	0x86,0xe5,0x2e,0x75,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,
+-	0xf0,0x90,0x93,0xa8,0x00,0x12,0xff,0xf0,0x90,0x93,0xa9,0x00,0x10,0x09,0x12,0xff,
+-	0xf0,0x90,0x93,0xaa,0x00,0x12,0xff,0xf0,0x90,0x93,0xab,0x00,0xd1,0x12,0x10,0x09,
+-	0x12,0xff,0xf0,0x90,0x93,0xac,0x00,0x12,0xff,0xf0,0x90,0x93,0xad,0x00,0x10,0x09,
+-	0x12,0xff,0xf0,0x90,0x93,0xae,0x00,0x12,0xff,0xf0,0x90,0x93,0xaf,0x00,0xd2,0x24,
+-	0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb0,0x00,0x12,0xff,0xf0,0x90,0x93,
+-	0xb1,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb2,0x00,0x12,0xff,0xf0,0x90,0x93,
+-	0xb3,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb4,0x00,0x12,0xff,0xf0,
+-	0x90,0x93,0xb5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb6,0x00,0x12,0xff,0xf0,
+-	0x90,0x93,0xb7,0x00,0x93,0x28,0x92,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,
+-	0x93,0xb8,0x00,0x12,0xff,0xf0,0x90,0x93,0xb9,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,
+-	0x93,0xba,0x00,0x12,0xff,0xf0,0x90,0x93,0xbb,0x00,0x00,0x00,0x12,0x00,0xd4,0x1f,
+-	0xe3,0x47,0x76,0xe2,0xd2,0x75,0xe1,0x71,0x75,0xe0,0x52,0x75,0xcf,0x86,0xe5,0x1f,
+-	0x75,0x94,0x0a,0xe3,0x0a,0x75,0x62,0x01,0x75,0x07,0x00,0x07,0x00,0xe3,0x46,0x78,
+-	0xe2,0x17,0x78,0xd1,0x09,0xe0,0xb4,0x77,0xcf,0x06,0x0b,0x00,0xe0,0xe7,0x77,0xcf,
+-	0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,
+-	0xf0,0x90,0xb3,0x80,0x00,0x11,0xff,0xf0,0x90,0xb3,0x81,0x00,0x10,0x09,0x11,0xff,
+-	0xf0,0x90,0xb3,0x82,0x00,0x11,0xff,0xf0,0x90,0xb3,0x83,0x00,0xd1,0x12,0x10,0x09,
+-	0x11,0xff,0xf0,0x90,0xb3,0x84,0x00,0x11,0xff,0xf0,0x90,0xb3,0x85,0x00,0x10,0x09,
+-	0x11,0xff,0xf0,0x90,0xb3,0x86,0x00,0x11,0xff,0xf0,0x90,0xb3,0x87,0x00,0xd2,0x24,
+-	0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x88,0x00,0x11,0xff,0xf0,0x90,0xb3,
+-	0x89,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8a,0x00,0x11,0xff,0xf0,0x90,0xb3,
+-	0x8b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8c,0x00,0x11,0xff,0xf0,
+-	0x90,0xb3,0x8d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8e,0x00,0x11,0xff,0xf0,
+-	0x90,0xb3,0x8f,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,
+-	0xb3,0x90,0x00,0x11,0xff,0xf0,0x90,0xb3,0x91,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,
+-	0xb3,0x92,0x00,0x11,0xff,0xf0,0x90,0xb3,0x93,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,
+-	0xf0,0x90,0xb3,0x94,0x00,0x11,0xff,0xf0,0x90,0xb3,0x95,0x00,0x10,0x09,0x11,0xff,
+-	0xf0,0x90,0xb3,0x96,0x00,0x11,0xff,0xf0,0x90,0xb3,0x97,0x00,0xd2,0x24,0xd1,0x12,
+-	0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x98,0x00,0x11,0xff,0xf0,0x90,0xb3,0x99,0x00,
+-	0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9a,0x00,0x11,0xff,0xf0,0x90,0xb3,0x9b,0x00,
+-	0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9c,0x00,0x11,0xff,0xf0,0x90,0xb3,
+-	0x9d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9e,0x00,0x11,0xff,0xf0,0x90,0xb3,
+-	0x9f,0x00,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,
+-	0xb3,0xa0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa1,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,
+-	0xb3,0xa2,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa3,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,
+-	0xf0,0x90,0xb3,0xa4,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa5,0x00,0x10,0x09,0x11,0xff,
+-	0xf0,0x90,0xb3,0xa6,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa7,0x00,0xd2,0x24,0xd1,0x12,
+-	0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xa8,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa9,0x00,
+-	0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xaa,0x00,0x11,0xff,0xf0,0x90,0xb3,0xab,0x00,
+-	0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xac,0x00,0x11,0xff,0xf0,0x90,0xb3,
+-	0xad,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xae,0x00,0x11,0xff,0xf0,0x90,0xb3,
+-	0xaf,0x00,0x93,0x23,0x92,0x1f,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xb0,
+-	0x00,0x11,0xff,0xf0,0x90,0xb3,0xb1,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xb2,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x15,0xe4,0xf9,0x7a,0xe3,0x03,
+-	0x79,0xe2,0xfc,0x77,0xe1,0x4c,0x77,0xe0,0x05,0x77,0xcf,0x06,0x0c,0x00,0xe4,0x53,
+-	0x7e,0xe3,0xac,0x7d,0xe2,0x55,0x7d,0xd1,0x0c,0xe0,0x1a,0x7d,0xcf,0x86,0x65,0xfb,
+-	0x7c,0x14,0x00,0xe0,0x1e,0x7d,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x90,0xd3,0x48,
+-	0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x80,0x00,0x10,0xff,0xf0,
+-	0x91,0xa3,0x81,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x82,0x00,0x10,0xff,0xf0,
+-	0x91,0xa3,0x83,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x84,0x00,0x10,
+-	0xff,0xf0,0x91,0xa3,0x85,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x86,0x00,0x10,
+-	0xff,0xf0,0x91,0xa3,0x87,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,
+-	0xa3,0x88,0x00,0x10,0xff,0xf0,0x91,0xa3,0x89,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,
+-	0xa3,0x8a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8b,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,
+-	0xf0,0x91,0xa3,0x8c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8d,0x00,0x10,0x09,0x10,0xff,
+-	0xf0,0x91,0xa3,0x8e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8f,0x00,0xd3,0x48,0xd2,0x24,
+-	0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x90,0x00,0x10,0xff,0xf0,0x91,0xa3,
+-	0x91,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x92,0x00,0x10,0xff,0xf0,0x91,0xa3,
+-	0x93,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x94,0x00,0x10,0xff,0xf0,
+-	0x91,0xa3,0x95,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x96,0x00,0x10,0xff,0xf0,
+-	0x91,0xa3,0x97,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x98,
+-	0x00,0x10,0xff,0xf0,0x91,0xa3,0x99,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x9a,
+-	0x00,0x10,0xff,0xf0,0x91,0xa3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,
+-	0xa3,0x9c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9d,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,
+-	0xa3,0x9e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9f,0x00,0xd1,0x11,0xe0,0x7a,0x80,0xcf,
+-	0x86,0xe5,0x71,0x80,0xe4,0x3a,0x80,0xcf,0x06,0x00,0x00,0xe0,0x43,0x82,0xcf,0x86,
+-	0xd5,0x06,0xcf,0x06,0x00,0x00,0xd4,0x09,0xe3,0x78,0x80,0xcf,0x06,0x0c,0x00,0xd3,
+-	0x06,0xcf,0x06,0x00,0x00,0xe2,0xa3,0x81,0xe1,0x7e,0x81,0xd0,0x06,0xcf,0x06,0x00,
+-	0x00,0xcf,0x86,0xa5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,
+-	0x14,0xff,0xf0,0x96,0xb9,0xa0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa1,0x00,0x10,0x09,
+-	0x14,0xff,0xf0,0x96,0xb9,0xa2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa3,0x00,0xd1,0x12,
+-	0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa5,0x00,
+-	0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa7,0x00,
+-	0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa8,0x00,0x14,0xff,0xf0,
+-	0x96,0xb9,0xa9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xaa,0x00,0x14,0xff,0xf0,
+-	0x96,0xb9,0xab,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xac,0x00,0x14,
+-	0xff,0xf0,0x96,0xb9,0xad,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xae,0x00,0x14,
+-	0xff,0xf0,0x96,0xb9,0xaf,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,
+-	0xf0,0x96,0xb9,0xb0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb1,0x00,0x10,0x09,0x14,0xff,
+-	0xf0,0x96,0xb9,0xb2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb3,0x00,0xd1,0x12,0x10,0x09,
+-	0x14,0xff,0xf0,0x96,0xb9,0xb4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb5,0x00,0x10,0x09,
+-	0x14,0xff,0xf0,0x96,0xb9,0xb6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb7,0x00,0xd2,0x24,
+-	0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb8,0x00,0x14,0xff,0xf0,0x96,0xb9,
+-	0xb9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xba,0x00,0x14,0xff,0xf0,0x96,0xb9,
+-	0xbb,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbc,0x00,0x14,0xff,0xf0,
+-	0x96,0xb9,0xbd,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbe,0x00,0x14,0xff,0xf0,
+-	0x96,0xb9,0xbf,0x00,0x14,0x00,0xd2,0x14,0xe1,0x8d,0x81,0xe0,0x84,0x81,0xcf,0x86,
+-	0xe5,0x45,0x81,0xe4,0x02,0x81,0xcf,0x06,0x12,0x00,0xd1,0x0b,0xe0,0xb8,0x82,0xcf,
+-	0x86,0xcf,0x06,0x00,0x00,0xe0,0xf8,0x8a,0xcf,0x86,0xd5,0x22,0xe4,0x33,0x88,0xe3,
+-	0xf6,0x87,0xe2,0x9b,0x87,0xe1,0x94,0x87,0xe0,0x8d,0x87,0xcf,0x86,0xe5,0x5e,0x87,
+-	0xe4,0x45,0x87,0x93,0x07,0x62,0x34,0x87,0x12,0xe6,0x12,0xe6,0xe4,0x99,0x88,0xe3,
+-	0x92,0x88,0xd2,0x09,0xe1,0x1b,0x88,0xcf,0x06,0x10,0x00,0xe1,0x82,0x88,0xe0,0x4f,
+-	0x88,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,
+-	0x12,0xff,0xf0,0x9e,0xa4,0xa2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa3,0x00,0x10,0x09,
+-	0x12,0xff,0xf0,0x9e,0xa4,0xa4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa5,0x00,0xd1,0x12,
+-	0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa7,0x00,
+-	0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa9,0x00,
+-	0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xaa,0x00,0x12,0xff,0xf0,
+-	0x9e,0xa4,0xab,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xac,0x00,0x12,0xff,0xf0,
+-	0x9e,0xa4,0xad,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xae,0x00,0x12,
+-	0xff,0xf0,0x9e,0xa4,0xaf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb0,0x00,0x12,
+-	0xff,0xf0,0x9e,0xa4,0xb1,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,
+-	0xf0,0x9e,0xa4,0xb2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb3,0x00,0x10,0x09,0x12,0xff,
+-	0xf0,0x9e,0xa4,0xb4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb5,0x00,0xd1,0x12,0x10,0x09,
+-	0x12,0xff,0xf0,0x9e,0xa4,0xb6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb7,0x00,0x10,0x09,
+-	0x12,0xff,0xf0,0x9e,0xa4,0xb8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb9,0x00,0xd2,0x24,
+-	0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xba,0x00,0x12,0xff,0xf0,0x9e,0xa4,
+-	0xbb,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbc,0x00,0x12,0xff,0xf0,0x9e,0xa4,
+-	0xbd,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbe,0x00,0x12,0xff,0xf0,
+-	0x9e,0xa4,0xbf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa5,0x80,0x00,0x12,0xff,0xf0,
+-	0x9e,0xa5,0x81,0x00,0x94,0x1e,0x93,0x1a,0x92,0x16,0x91,0x12,0x10,0x09,0x12,0xff,
+-	0xf0,0x9e,0xa5,0x82,0x00,0x12,0xff,0xf0,0x9e,0xa5,0x83,0x00,0x12,0x00,0x12,0x00,
+-	0x12,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+-	/* nfdi_c0100 */
+-	0x57,0x04,0x01,0x00,0xc6,0xe5,0x91,0x13,0xe4,0x27,0x0c,0xe3,0x61,0x07,0xe2,0xda,
+-	0x01,0xc1,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0xe4,0xd4,0x7c,0xd3,0x3c,
+-	0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x80,0x00,0x01,0xff,0x41,0xcc,
+-	0x81,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x82,0x00,0x01,0xff,0x41,0xcc,0x83,0x00,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x88,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0x43,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x45,0xcc,0x80,0x00,0x01,0xff,0x45,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,
+-	0x45,0xcc,0x82,0x00,0x01,0xff,0x45,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x49,0xcc,0x80,0x00,0x01,0xff,0x49,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,
+-	0x82,0x00,0x01,0xff,0x49,0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04,
+-	0x01,0x00,0x01,0xff,0x4e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x80,0x00,
+-	0x01,0xff,0x4f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x82,0x00,
+-	0x01,0xff,0x4f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x88,0x00,0x01,0x00,
+-	0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x55,0xcc,0x80,0x00,0x10,0x08,
+-	0x01,0xff,0x55,0xcc,0x81,0x00,0x01,0xff,0x55,0xcc,0x82,0x00,0x91,0x10,0x10,0x08,
+-	0x01,0xff,0x55,0xcc,0x88,0x00,0x01,0xff,0x59,0xcc,0x81,0x00,0x01,0x00,0xd4,0x7c,
+-	0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff,
+-	0x61,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,
+-	0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc,
+-	0x8a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x65,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,
+-	0x01,0xff,0x65,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x69,0xcc,0x80,0x00,0x01,0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,
+-	0x69,0xcc,0x82,0x00,0x01,0xff,0x69,0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,
+-	0x80,0x00,0x01,0xff,0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,
+-	0x82,0x00,0x01,0xff,0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,
+-	0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x75,0xcc,0x80,0x00,
+-	0x10,0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x04,
+-	0x01,0x00,0x01,0xff,0x79,0xcc,0x88,0x00,0xe1,0x9a,0x03,0xe0,0xd3,0x01,0xcf,0x86,
+-	0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,
+-	0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x86,0x00,
+-	0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa8,0x00,
+-	0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x43,0xcc,0x81,0x00,0x01,0xff,
+-	0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x82,0x00,
+-	0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x43,0xcc,0x87,0x00,0x01,0xff,
+-	0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x8c,0x00,0x01,0xff,
+-	0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x8c,0x00,0x01,0xff,0x64,0xcc,
+-	0x8c,0x00,0xd3,0x34,0xd2,0x14,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,
+-	0x84,0x00,0x01,0xff,0x65,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,
+-	0x86,0x00,0x01,0xff,0x65,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x87,0x00,
+-	0x01,0xff,0x65,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,
+-	0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x8c,0x00,
+-	0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x82,0x00,
+-	0x01,0xff,0x67,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0x86,0x00,0x01,0xff,
+-	0x67,0xcc,0x86,0x00,0xd4,0x74,0xd3,0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x47,0xcc,0x87,0x00,0x01,0xff,0x67,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,
+-	0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,
+-	0x82,0x00,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x49,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,
+-	0x49,0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x49,0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,
+-	0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x30,0xd2,0x10,0x91,0x0c,0x10,0x08,
+-	0x01,0xff,0x49,0xcc,0x87,0x00,0x01,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x4a,0xcc,0x82,0x00,0x01,0xff,0x6a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,
+-	0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0x4c,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff,
+-	0x4c,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff,
+-	0x4c,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x01,0x00,0xcf,0x86,
+-	0xd5,0xd4,0xd4,0x60,0xd3,0x30,0xd2,0x10,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0x4e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00,
+-	0x01,0xff,0x4e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff,
+-	0x4e,0xcc,0x8c,0x00,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00,
+-	0x01,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x84,0x00,0x01,0xff,
+-	0x6f,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,
+-	0x86,0x00,0xd3,0x34,0xd2,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8b,0x00,
+-	0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,
+-	0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa7,0x00,
+-	0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,
+-	0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0x81,0x00,
+-	0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x82,0x00,
+-	0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0xa7,0x00,0x01,0xff,
+-	0x73,0xcc,0xa7,0x00,0xd4,0x74,0xd3,0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x53,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,
+-	0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,
+-	0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x55,0xcc,0x83,0x00,0x01,0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,
+-	0x55,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x55,0xcc,0x86,0x00,0x01,0xff,0x75,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,
+-	0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x55,0xcc,0x8b,0x00,0x01,0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,
+-	0x55,0xcc,0xa8,0x00,0x01,0xff,0x75,0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x57,0xcc,0x82,0x00,0x01,0xff,0x77,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,
+-	0x82,0x00,0x01,0xff,0x79,0xcc,0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x59,0xcc,0x88,0x00,0x01,0xff,0x5a,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,
+-	0x81,0x00,0x01,0xff,0x5a,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,
+-	0x87,0x00,0x01,0xff,0x5a,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,
+-	0x01,0x00,0xd0,0x4a,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x2c,0xd3,0x18,0x92,0x14,
+-	0x91,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,
+-	0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0x55,0xcc,0x9b,0x00,0x93,0x14,0x92,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,
+-	0x75,0xcc,0x9b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xb4,
+-	0xd4,0x24,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0x41,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff,
+-	0x49,0xcc,0x8c,0x00,0xd3,0x46,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,
+-	0x8c,0x00,0x01,0xff,0x4f,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00,
+-	0x01,0xff,0x55,0xcc,0x8c,0x00,0xd1,0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00,
+-	0x01,0xff,0x55,0xcc,0x88,0xcc,0x84,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,
+-	0x84,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,
+-	0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x8c,0x00,
+-	0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,
+-	0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00,
+-	0x10,0x0a,0x01,0xff,0x41,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc,
+-	0x84,0x00,0xd4,0x80,0xd3,0x3a,0xd2,0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,
+-	0x87,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,
+-	0xc3,0x86,0xcc,0x84,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x51,0x04,0x01,0x00,
+-	0x10,0x08,0x01,0xff,0x47,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,
+-	0x10,0x08,0x01,0xff,0x4f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,
+-	0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,
+-	0x84,0x00,0x10,0x09,0x01,0xff,0xc6,0xb7,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,
+-	0x8c,0x00,0xd3,0x24,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,
+-	0x01,0x00,0x01,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x81,0x00,0x01,0xff,
+-	0x67,0xcc,0x81,0x00,0x04,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x4e,0xcc,
+-	0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x8a,0xcc,
+-	0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,
+-	0xc3,0x86,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,
+-	0xc3,0x98,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x07,0x02,0xe1,
+-	0xae,0x01,0xe0,0x93,0x01,0xcf,0x86,0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00,0x10,
+-	0x08,0x01,0xff,0x41,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x45,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08,0x01,
+-	0xff,0x45,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x49,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08,0x01,
+-	0xff,0x49,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0x4f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x4f,
+-	0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x52,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08,0x01,
+-	0xff,0x52,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0x55,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x55,
+-	0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x04,
+-	0xff,0x53,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff,0x54,
+-	0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04,
+-	0xff,0x48,0xcc,0x8c,0x00,0x04,0xff,0x68,0xcc,0x8c,0x00,0xd4,0x68,0xd3,0x20,0xd2,
+-	0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x07,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,
+-	0x08,0x04,0xff,0x41,0xcc,0x87,0x00,0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,
+-	0x10,0x10,0x08,0x04,0xff,0x45,0xcc,0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,
+-	0x0a,0x04,0xff,0x4f,0xcc,0x88,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,
+-	0x00,0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,
+-	0xcc,0x83,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x4f,0xcc,0x87,0x00,0x04,0xff,0x6f,
+-	0xcc,0x87,0x00,0x93,0x30,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x87,
+-	0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x59,
+-	0xcc,0x84,0x00,0x04,0xff,0x79,0xcc,0x84,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07,
+-	0x00,0x08,0x00,0x08,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,
+-	0x04,0x08,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x01,0x00,0x01,0x00,0xd0,0x22,0xcf,
+-	0x86,0x55,0x04,0x01,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2,0x0c,0x91,0x08,0x10,
+-	0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x07,0x00,0x01,0x00,0xcf,
+-	0x86,0xd5,0x18,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,
+-	0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2,
+-	0x08,0x11,0x04,0x01,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,
+-	0x00,0x07,0x00,0xe1,0x34,0x01,0xd0,0x72,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0xe6,
+-	0xd3,0x10,0x52,0x04,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0xe8,0x01,0xdc,
+-	0x92,0x0c,0x51,0x04,0x01,0xdc,0x10,0x04,0x01,0xe8,0x01,0xd8,0x01,0xdc,0xd4,0x2c,
+-	0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0xdc,0x01,0xca,0x10,0x04,0x01,0xca,
+-	0x01,0xdc,0x51,0x04,0x01,0xdc,0x10,0x04,0x01,0xdc,0x01,0xca,0x92,0x0c,0x91,0x08,
+-	0x10,0x04,0x01,0xca,0x01,0xdc,0x01,0xdc,0x01,0xdc,0xd3,0x08,0x12,0x04,0x01,0xdc,
+-	0x01,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x01,0x01,0xdc,0x01,0xdc,0x91,0x08,
+-	0x10,0x04,0x01,0xdc,0x01,0xe6,0x01,0xe6,0xcf,0x86,0xd5,0x7e,0xd4,0x46,0xd3,0x2e,
+-	0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00,
+-	0x10,0x04,0x01,0xe6,0x01,0xff,0xcc,0x93,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcc,
+-	0x88,0xcc,0x81,0x00,0x01,0xf0,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x08,0x11,0x04,
+-	0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x04,0xdc,
+-	0x06,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x07,0xe6,0x10,0x04,0x07,0xe6,0x07,0xdc,
+-	0x51,0x04,0x07,0xdc,0x10,0x04,0x07,0xdc,0x07,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,
+-	0x08,0xe8,0x08,0xdc,0x10,0x04,0x08,0xdc,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xe9,
+-	0x07,0xea,0x10,0x04,0x07,0xea,0x07,0xe9,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,
+-	0x01,0xea,0x10,0x04,0x04,0xe9,0x06,0xe6,0x06,0xe6,0x06,0xe6,0xd3,0x13,0x52,0x04,
+-	0x0a,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x0a,0x00,0xd2,
+-	0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x01,0x00,0x09,0x00,0x51,0x04,0x09,0x00,0x10,
+-	0x06,0x01,0xff,0x3b,0x00,0x10,0x00,0xd0,0xe1,0xcf,0x86,0xd5,0x7a,0xd4,0x5f,0xd3,
+-	0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcc,
+-	0x81,0x00,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0x01,0xff,0xc2,0xb7,0x00,
+-	0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x01,0xff,0xce,
+-	0x97,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0x00,0x00,0xd1,
+-	0x0d,0x10,0x09,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x00,0x00,0x10,0x09,0x01,0xff,
+-	0xce,0xa5,0xcc,0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13,
+-	0x91,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01,
+-	0x00,0x01,0x00,0x01,0x00,0xd4,0x4a,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,
+-	0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,
+-	0xff,0xce,0x99,0xcc,0x88,0x00,0x01,0xff,0xce,0xa5,0xcc,0x88,0x00,0xd1,0x12,0x10,
+-	0x09,0x01,0xff,0xce,0xb1,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10,
+-	0x09,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0x93,
+-	0x17,0x92,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x81,0x00,
+-	0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x39,0x53,0x04,
+-	0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x88,
+-	0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf,
+-	0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,
+-	0xcc,0x81,0x00,0x0a,0x00,0xd3,0x26,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+-	0x00,0x01,0xff,0xcf,0x92,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcf,0x92,
+-	0xcc,0x88,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd2,0x0c,0x51,0x04,0x06,
+-	0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,
+-	0x04,0x01,0x00,0x04,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,
+-	0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,
+-	0x04,0x05,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0x12,0x04,0x07,0x00,0x08,0x00,0xe3,
+-	0x47,0x04,0xe2,0xbe,0x02,0xe1,0x07,0x01,0xd0,0x8b,0xcf,0x86,0xd5,0x6c,0xd4,0x53,
+-	0xd3,0x30,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0x95,0xcc,0x80,0x00,0x01,
+-	0xff,0xd0,0x95,0xcc,0x88,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x93,0xcc,0x81,
+-	0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x86,0xcc,0x88,0x00,
+-	0x52,0x04,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x9a,0xcc,0x81,0x00,0x04,
+-	0xff,0xd0,0x98,0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x86,0x00,0x01,
+-	0x00,0x53,0x04,0x01,0x00,0x92,0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,
+-	0x98,0xcc,0x86,0x00,0x01,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,
+-	0x92,0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x01,
+-	0x00,0x01,0x00,0xcf,0x86,0xd5,0x57,0x54,0x04,0x01,0x00,0xd3,0x30,0xd2,0x1f,0xd1,
+-	0x12,0x10,0x09,0x04,0xff,0xd0,0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,
+-	0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0xb3,0xcc,0x81,0x00,0x51,0x04,0x01,0x00,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0xd1,0x96,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1,
+-	0x12,0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80,
+-	0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0x00,0x54,0x04,0x01,0x00,
+-	0x93,0x1a,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb4,
+-	0xcc,0x8f,0x00,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01,0x00,0xd0,0x2e,0xcf,0x86,
+-	0x95,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+-	0x01,0xe6,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6,0x0a,0xe6,0x92,0x08,0x11,0x04,
+-	0x04,0x00,0x06,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xbe,0xd4,0x4a,
+-	0xd3,0x2a,0xd2,0x1a,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x96,0xcc,0x86,
+-	0x00,0x10,0x09,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,
+-	0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
+-	0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,
+-	0x06,0x00,0x10,0x04,0x06,0x00,0x09,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09,
+-	0x01,0xff,0xd0,0x90,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09,
+-	0x01,0xff,0xd0,0x90,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x51,0x04,
+-	0x01,0x00,0x10,0x09,0x01,0xff,0xd0,0x95,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,
+-	0x86,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x98,0xcc,0x88,
+-	0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x96,
+-	0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x97,
+-	0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4,0x74,0xd3,0x3a,0xd2,0x16,
+-	0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x84,0x00,0x01,0xff,0xd0,
+-	0xb8,0xcc,0x84,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x88,0x00,0x01,
+-	0xff,0xd0,0xb8,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x9e,0xcc,0x88,0x00,0x01,
+-	0xff,0xd0,0xbe,0xcc,0x88,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,
+-	0xd3,0xa8,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,
+-	0x04,0xff,0xd0,0xad,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09,
+-	0x01,0xff,0xd0,0xa3,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x3a,
+-	0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x88,0x00,0x01,0xff,0xd1,
+-	0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x8b,0x00,0x01,0xff,0xd1,
+-	0x83,0xcc,0x8b,0x00,0x91,0x12,0x10,0x09,0x01,0xff,0xd0,0xa7,0xcc,0x88,0x00,0x01,
+-	0xff,0xd1,0x87,0xcc,0x88,0x00,0x08,0x00,0x92,0x16,0x91,0x12,0x10,0x09,0x01,0xff,
+-	0xd0,0xab,0xcc,0x88,0x00,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x09,0x00,0x09,0x00,
+-	0xd1,0x74,0xd0,0x36,0xcf,0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,
+-	0x09,0x00,0x0a,0x00,0x0a,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04,
+-	0x0b,0x00,0x0c,0x00,0x10,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
+-	0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0x00,
+-	0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,
+-	0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14,
+-	0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+-	0x01,0x00,0x01,0x00,0xd0,0xba,0xcf,0x86,0xd5,0x4c,0xd4,0x24,0x53,0x04,0x01,0x00,
+-	0xd2,0x10,0xd1,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x10,0x04,0x04,0x00,0x00,0x00,
+-	0xd1,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x0d,0x00,0xd3,0x18,
+-	0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02,0xdc,0x02,0xe6,0x51,0x04,0x02,0xe6,
+-	0x10,0x04,0x02,0xdc,0x02,0xe6,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xde,
+-	0x02,0xdc,0x02,0xe6,0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,
+-	0x08,0xdc,0x02,0xdc,0x02,0xdc,0xd2,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc,
+-	0x02,0xe6,0xd1,0x08,0x10,0x04,0x02,0xe6,0x02,0xde,0x10,0x04,0x02,0xe4,0x02,0xe6,
+-	0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x0a,0x01,0x0b,0x10,0x04,0x01,0x0c,
+-	0x01,0x0d,0xd1,0x08,0x10,0x04,0x01,0x0e,0x01,0x0f,0x10,0x04,0x01,0x10,0x01,0x11,
+-	0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x12,0x01,0x13,0x10,0x04,0x09,0x13,0x01,0x14,
+-	0xd1,0x08,0x10,0x04,0x01,0x15,0x01,0x16,0x10,0x04,0x01,0x00,0x01,0x17,0xcf,0x86,
+-	0xd5,0x28,0x94,0x24,0x93,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x18,
+-	0x10,0x04,0x01,0x19,0x01,0x00,0xd1,0x08,0x10,0x04,0x02,0xe6,0x08,0xdc,0x10,0x04,
+-	0x08,0x00,0x08,0x12,0x00,0x00,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c,
+-	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
+-	0x00,0x00,0x14,0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0xe2,0xfa,0x01,0xe1,0x2a,0x01,0xd0,0xa7,0xcf,0x86,
+-	0xd5,0x54,0xd4,0x28,0xd3,0x10,0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,
+-	0x10,0x00,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x08,0x00,
+-	0x91,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0xd3,0x0c,0x52,0x04,0x07,0xe6,
+-	0x11,0x04,0x07,0xe6,0x0a,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0a,0x1e,0x0a,0x1f,
+-	0x10,0x04,0x0a,0x20,0x01,0x00,0xd1,0x08,0x10,0x04,0x0f,0x00,0x00,0x00,0x10,0x04,
+-	0x08,0x00,0x01,0x00,0xd4,0x3d,0x93,0x39,0xd2,0x1a,0xd1,0x08,0x10,0x04,0x0c,0x00,
+-	0x01,0x00,0x10,0x09,0x01,0xff,0xd8,0xa7,0xd9,0x93,0x00,0x01,0xff,0xd8,0xa7,0xd9,
+-	0x94,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd9,0x88,0xd9,0x94,0x00,0x01,0xff,0xd8,
+-	0xa7,0xd9,0x95,0x00,0x10,0x09,0x01,0xff,0xd9,0x8a,0xd9,0x94,0x00,0x01,0x00,0x01,
+-	0x00,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0a,
+-	0x00,0x0a,0x00,0xcf,0x86,0xd5,0x5c,0xd4,0x20,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,
+-	0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0x1b,0xd1,0x08,0x10,0x04,0x01,0x1c,0x01,
+-	0x1d,0x10,0x04,0x01,0x1e,0x01,0x1f,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,
+-	0x20,0x01,0x21,0x10,0x04,0x01,0x22,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04,
+-	0xdc,0x10,0x04,0x07,0xdc,0x07,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0xe6,0x08,
+-	0xe6,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xdc,0x08,0xe6,0x10,0x04,0x08,0xe6,0x0c,
+-	0xdc,0xd4,0x10,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06,
+-	0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x23,0x01,0x00,0x01,0x00,0x01,
+-	0x00,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,
+-	0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x04,0x00,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
+-	0x04,0x01,0x00,0x04,0x00,0xcf,0x86,0xd5,0x5b,0xd4,0x2e,0xd3,0x1e,0x92,0x1a,0xd1,
+-	0x0d,0x10,0x09,0x01,0xff,0xdb,0x95,0xd9,0x94,0x00,0x01,0x00,0x10,0x09,0x01,0xff,
+-	0xdb,0x81,0xd9,0x94,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,
+-	0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd3,0x19,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,
+-	0x04,0x01,0x00,0x01,0xff,0xdb,0x92,0xd9,0x94,0x00,0x11,0x04,0x01,0x00,0x01,0xe6,
+-	0x52,0x04,0x01,0xe6,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,
+-	0x01,0xe6,0xd4,0x38,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6,
+-	0x01,0xdc,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,
+-	0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0xdc,0x01,0xe6,
+-	0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0xdc,0x07,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,
+-	0x11,0x04,0x01,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00,
+-	0xd1,0xc8,0xd0,0x76,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04,
+-	0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x93,0x10,0x92,0x0c,
+-	0x91,0x08,0x10,0x04,0x04,0x00,0x04,0x24,0x04,0x00,0x04,0x00,0x04,0x00,0xd4,0x14,
+-	0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07,0x00,
+-	0x07,0x00,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x04,0xe6,
+-	0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x0c,
+-	0x51,0x04,0x04,0xdc,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd1,0x08,0x10,0x04,0x04,0xdc,
+-	0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xcf,0x86,0xd5,0x3c,0x94,0x38,0xd3,0x1c,
+-	0xd2,0x0c,0x51,0x04,0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,
+-	0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,
+-	0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,
+-	0x07,0x00,0x07,0x00,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00,
+-	0x11,0x04,0x08,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x04,0x00,
+-	0x54,0x04,0x04,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x14,0x53,0x04,
+-	0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xe6,0x09,0xe6,
+-	0xd3,0x10,0x92,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0x00,
+-	0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x91,0x08,0x10,0x04,
+-	0x00,0x00,0x14,0xdc,0x14,0x00,0xe4,0x78,0x57,0xe3,0xda,0x3e,0xe2,0x89,0x3e,0xe1,
+-	0x91,0x2c,0xe0,0x21,0x10,0xcf,0x86,0xc5,0xe4,0x80,0x08,0xe3,0xcb,0x03,0xe2,0x61,
+-	0x01,0xd1,0x94,0xd0,0x5a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52,
+-	0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6,0x92,0x0c,0x51,0x04,0x0b,0xe6,0x10,
+-	0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6,0xd4,0x24,0xd3,0x10,0x52,0x04,0x0b,0xe6,0x91,
+-	0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,
+-	0x00,0x0b,0xe6,0x0b,0xe6,0x11,0x04,0x0b,0xe6,0x00,0x00,0x53,0x04,0x0b,0x00,0x52,
+-	0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x86,0xd5,
+-	0x20,0x54,0x04,0x0c,0x00,0x53,0x04,0x0c,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0c,
+-	0x00,0x0c,0xdc,0x0c,0xdc,0x51,0x04,0x00,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x94,
+-	0x14,0x53,0x04,0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0xd0,0x4a,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x20,0xd3,
+-	0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x0d,0x00,0x0d,0x00,0x52,
+-	0x04,0x0d,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2,
+-	0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x11,0x00,0x91,0x08,0x10,0x04,0x11,
+-	0x00,0x00,0x00,0x12,0x00,0x52,0x04,0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0xcf,
+-	0x86,0xd5,0x18,0x54,0x04,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,
+-	0x04,0x00,0x00,0x14,0xdc,0x12,0xe6,0x12,0xe6,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x51,
+-	0x04,0x12,0xe6,0x10,0x04,0x12,0x00,0x11,0xdc,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,
+-	0xdc,0x0d,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xe6,0x91,
+-	0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xdc,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,
+-	0x04,0x0d,0x1b,0x0d,0x1c,0x10,0x04,0x0d,0x1d,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10,
+-	0x04,0x0d,0xdc,0x0d,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x10,
+-	0x04,0x0d,0xdc,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x10,0xe6,0xe1,
+-	0x3a,0x01,0xd0,0x77,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08,
+-	0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00,
+-	0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x1b,0x53,0x04,0x01,0x00,0x92,0x13,0x91,0x0f,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xa8,0xe0,0xa4,0xbc,0x00,0x01,0x00,0x01,
+-	0x00,0xd3,0x26,0xd2,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xb0,
+-	0xe0,0xa4,0xbc,0x00,0x01,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xb3,0xe0,
+-	0xa4,0xbc,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x91,
+-	0x08,0x10,0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x8c,0xd4,0x18,0x53,
+-	0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x10,
+-	0x04,0x0b,0x00,0x0c,0x00,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,
+-	0xe6,0x10,0x04,0x01,0xdc,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x0b,0x00,0x0c,
+-	0x00,0xd2,0x2c,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x95,0xe0,0xa4,0xbc,0x00,
+-	0x01,0xff,0xe0,0xa4,0x96,0xe0,0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x97,
+-	0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0x9c,0xe0,0xa4,0xbc,0x00,0xd1,0x16,0x10,
+-	0x0b,0x01,0xff,0xe0,0xa4,0xa1,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xa2,0xe0,
+-	0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xab,0xe0,0xa4,0xbc,0x00,0x01,0xff,
+-	0xe0,0xa4,0xaf,0xe0,0xa4,0xbc,0x00,0x54,0x04,0x01,0x00,0xd3,0x14,0x92,0x10,0xd1,
+-	0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0c,0x00,0x0c,0x00,0xd2,
+-	0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x0b,0x00,0x10,0x04,0x0b,0x00,0x09,0x00,0x91,
+-	0x08,0x10,0x04,0x09,0x00,0x08,0x00,0x09,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4,
+-	0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,
+-	0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,
+-	0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,
+-	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,
+-	0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,
+-	0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,
+-	0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11,
+-	0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x07,0x00,0x01,0x00,0xcf,
+-	0x86,0xd5,0x7b,0xd4,0x42,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,
+-	0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01,
+-	0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa6,0xbe,0x00,
+-	0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa7,0x97,0x00,0x01,0x09,0x10,
+-	0x04,0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,
+-	0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,
+-	0xa6,0xa1,0xe0,0xa6,0xbc,0x00,0x01,0xff,0xe0,0xa6,0xa2,0xe0,0xa6,0xbc,0x00,0x10,
+-	0x04,0x00,0x00,0x01,0xff,0xe0,0xa6,0xaf,0xe0,0xa6,0xbc,0x00,0xd4,0x10,0x93,0x0c,
+-	0x52,0x04,0x01,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,
+-	0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x13,0x00,
+-	0x10,0x04,0x14,0xe6,0x00,0x00,0xe2,0x48,0x02,0xe1,0x4f,0x01,0xd0,0xa4,0xcf,0x86,
+-	0xd5,0x4c,0xd4,0x34,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x07,0x00,
+-	0x10,0x04,0x01,0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,
+-	0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,
+-	0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,
+-	0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,
+-	0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,
+-	0xd3,0x2e,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0xe0,0xa8,0xb2,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,
+-	0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0xb8,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd2,0x08,
+-	0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x00,0x00,0x01,0x00,
+-	0xcf,0x86,0xd5,0x80,0xd4,0x34,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,
+-	0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x10,
+-	0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,
+-	0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
+-	0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0xd2,0x25,0xd1,0x0f,0x10,0x04,0x00,0x00,
+-	0x01,0xff,0xe0,0xa8,0x96,0xe0,0xa8,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0x97,
+-	0xe0,0xa8,0xbc,0x00,0x01,0xff,0xe0,0xa8,0x9c,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,
+-	0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0xab,0xe0,0xa8,0xbc,0x00,
+-	0x00,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,
+-	0x01,0x00,0x93,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00,
+-	0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0xd0,0x82,0xcf,0x86,0xd5,0x40,0xd4,0x2c,
+-	0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08,
+-	0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,
+-	0x07,0x00,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,
+-	0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,
+-	0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,
+-	0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,
+-	0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,
+-	0x91,0x08,0x10,0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x3c,0xd4,0x28,
+-	0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
+-	0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,
+-	0x01,0x00,0x01,0x09,0x00,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x91,0x08,
+-	0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,
+-	0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x07,0x00,0x00,0x00,0x00,0x00,
+-	0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x11,0x00,0x13,0x00,0x13,0x00,0xe1,0x24,
+-	0x01,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,
+-	0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,
+-	0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,
+-	0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,
+-	0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,
+-	0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,
+-	0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,
+-	0x00,0x07,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,
+-	0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x45,0xd3,0x14,0x52,
+-	0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,
+-	0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x96,0x00,
+-	0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xad,0x87,0xe0,0xac,0xbe,0x00,0x91,
+-	0x0f,0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x97,0x00,0x01,0x09,0x00,0x00,
+-	0xd3,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00,
+-	0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xac,0xa1,0xe0,0xac,0xbc,0x00,0x01,0xff,0xe0,
+-	0xac,0xa2,0xe0,0xac,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd4,0x14,0x93,0x10,
+-	0xd2,0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,
+-	0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x0c,0x00,0x0c,0x00,
+-	0x00,0x00,0xd0,0xb1,0xcf,0x86,0xd5,0x63,0xd4,0x28,0xd3,0x14,0xd2,0x08,0x11,0x04,
+-	0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c,
+-	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,
+-	0xd3,0x1f,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x0f,
+-	0x10,0x0b,0x01,0xff,0xe0,0xae,0x92,0xe0,0xaf,0x97,0x00,0x01,0x00,0x00,0x00,0xd2,
+-	0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91,
+-	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x51,
+-	0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,
+-	0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,
+-	0x04,0x00,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
+-	0x04,0x08,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,
+-	0x00,0x01,0x00,0xcf,0x86,0xd5,0x61,0xd4,0x45,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x01,
+-	0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,
+-	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0,0xae,
+-	0xbe,0x00,0x01,0xff,0xe0,0xaf,0x87,0xe0,0xae,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01,
+-	0xff,0xe0,0xaf,0x86,0xe0,0xaf,0x97,0x00,0x01,0x09,0x00,0x00,0x93,0x18,0xd2,0x0c,
+-	0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
+-	0x00,0x00,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,
+-	0x00,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,
+-	0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,
+-	0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xe3,0x1c,0x04,0xe2,0x1a,0x02,0xd1,0xf3,
+-	0xd0,0x76,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
+-	0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,
+-	0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,
+-	0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+-	0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,
+-	0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x10,0x00,
+-	0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,
+-	0x00,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x53,0xd4,0x2f,0xd3,0x10,0x52,0x04,
+-	0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd2,0x13,0x91,0x0f,
+-	0x10,0x0b,0x01,0xff,0xe0,0xb1,0x86,0xe0,0xb1,0x96,0x00,0x00,0x00,0x01,0x00,0x91,
+-	0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,
+-	0x08,0x10,0x04,0x00,0x00,0x01,0x54,0x10,0x04,0x01,0x5b,0x00,0x00,0x92,0x0c,0x51,
+-	0x04,0x0a,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,
+-	0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,
+-	0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0a,
+-	0x00,0xd0,0x76,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,
+-	0x04,0x12,0x00,0x10,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,
+-	0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,
+-	0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,
+-	0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,
+-	0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x00,
+-	0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,
+-	0x04,0x07,0x07,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x82,0xd4,0x5e,0xd3,0x2a,0xd2,
+-	0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb2,0xbf,0xe0,0xb3,0x95,0x00,0x01,0x00,
+-	0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
+-	0xe0,0xb3,0x86,0xe0,0xb3,0x95,0x00,0xd2,0x28,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,
+-	0xb3,0x86,0xe0,0xb3,0x96,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0,
+-	0xb3,0x82,0x00,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x82,0xe0,0xb3,0x95,0x00,0x91,
+-	0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,
+-	0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x00,
+-	0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,
+-	0x08,0x11,0x04,0x01,0x00,0x09,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,
+-	0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0xe1,0x06,0x01,0xd0,0x6e,0xcf,0x86,0xd5,0x3c,0xd4,0x28,
+-	0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x10,0x00,0x01,0x00,0x91,0x08,
+-	0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,
+-	0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,
+-	0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,
+-	0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,
+-	0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x0c,0x00,0x13,0x09,0x91,0x08,0x10,0x04,
+-	0x13,0x09,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x65,0xd4,0x45,0xd3,0x10,0x52,0x04,
+-	0x01,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08,
+-	0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb4,0xbe,
+-	0x00,0x01,0xff,0xe0,0xb5,0x87,0xe0,0xb4,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,
+-	0xe0,0xb5,0x86,0xe0,0xb5,0x97,0x00,0x01,0x09,0x10,0x04,0x0c,0x00,0x12,0x00,0xd3,
+-	0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x01,0x00,0x52,
+-	0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x11,0x00,0xd4,0x14,0x93,
+-	0x10,0xd2,0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,
+-	0x00,0xd3,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x12,0x00,0x92,0x0c,0x91,
+-	0x08,0x10,0x04,0x12,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x5a,0xcf,0x86,0xd5,
+-	0x34,0xd4,0x18,0x93,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10,
+-	0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,
+-	0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x04,
+-	0x00,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10,
+-	0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x00,
+-	0x00,0x04,0x00,0x00,0x00,0xcf,0x86,0xd5,0x77,0xd4,0x28,0xd3,0x10,0x52,0x04,0x04,
+-	0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x00,
+-	0x00,0x10,0x04,0x04,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x04,
+-	0x00,0xd3,0x14,0x52,0x04,0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x10,
+-	0x04,0x04,0x00,0x00,0x00,0xd2,0x13,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe0,
+-	0xb7,0x99,0xe0,0xb7,0x8a,0x00,0x04,0x00,0xd1,0x19,0x10,0x0b,0x04,0xff,0xe0,0xb7,
+-	0x99,0xe0,0xb7,0x8f,0x00,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0xe0,0xb7,0x8a,
+-	0x00,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x9f,0x00,0x04,0x00,0xd4,0x10,
+-	0x93,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x14,
+-	0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0xe2,0x31,0x01,0xd1,0x58,0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94,
+-	0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,
+-	0x00,0x01,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,
+-	0x04,0x01,0x67,0x10,0x04,0x01,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,
+-	0x00,0x01,0x00,0xcf,0x86,0x95,0x18,0xd4,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01,
+-	0x6b,0x01,0x00,0x53,0x04,0x01,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd0,
+-	0x9e,0xcf,0x86,0xd5,0x54,0xd4,0x3c,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,
+-	0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,
+-	0x00,0x10,0x04,0x15,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x15,
+-	0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x15,
+-	0x00,0xd3,0x08,0x12,0x04,0x15,0x00,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,
+-	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,
+-	0x04,0x15,0x00,0x01,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,
+-	0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x15,0x00,0x01,0x00,0x91,0x08,0x10,
+-	0x04,0x15,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
+-	0x76,0x10,0x04,0x15,0x09,0x01,0x00,0x11,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0x95,
+-	0x34,0xd4,0x20,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,
+-	0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x01,0x7a,0x11,0x04,0x01,0x00,0x00,
+-	0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x01,
+-	0x00,0x0d,0x00,0x00,0x00,0xe1,0x2b,0x01,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04,
+-	0x02,0x00,0x53,0x04,0x02,0x00,0x92,0x08,0x11,0x04,0x02,0xdc,0x02,0x00,0x02,0x00,
+-	0x54,0x04,0x02,0x00,0xd3,0x14,0x52,0x04,0x02,0x00,0xd1,0x08,0x10,0x04,0x02,0x00,
+-	0x02,0xdc,0x10,0x04,0x02,0x00,0x02,0xdc,0x92,0x0c,0x91,0x08,0x10,0x04,0x02,0x00,
+-	0x02,0xd8,0x02,0x00,0x02,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x36,0xd3,0x17,0x92,0x13,
+-	0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x82,0xe0,0xbe,0xb7,
+-	0x00,0x02,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91,
+-	0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x8c,0xe0,0xbe,0xb7,0x00,0x02,0x00,
+-	0xd3,0x26,0xd2,0x13,0x51,0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x91,0xe0,
+-	0xbe,0xb7,0x00,0x02,0x00,0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,
+-	0xbd,0x96,0xe0,0xbe,0xb7,0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff,
+-	0xe0,0xbd,0x9b,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x02,0x00,0xd4,0x27,0x53,0x04,0x02,
+-	0x00,0xd2,0x17,0xd1,0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x80,0xe0,0xbe,
+-	0xb5,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,
+-	0x00,0x00,0xd3,0x35,0xd2,0x17,0xd1,0x08,0x10,0x04,0x00,0x00,0x02,0x81,0x10,0x04,
+-	0x02,0x82,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbd,0xb2,0x00,0xd1,0x0f,0x10,0x04,0x02,
+-	0x84,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbd,0xb4,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe,
+-	0xb2,0xe0,0xbe,0x80,0x00,0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,
+-	0xbe,0xb3,0xe0,0xbe,0x80,0x00,0x02,0x00,0x02,0x82,0x11,0x04,0x02,0x82,0x02,0x00,
+-	0xd0,0xd3,0xcf,0x86,0xd5,0x65,0xd4,0x27,0xd3,0x1f,0xd2,0x13,0x91,0x0f,0x10,0x04,
+-	0x02,0x82,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbe,0x80,0x00,0x02,0xe6,0x91,0x08,0x10,
+-	0x04,0x02,0x09,0x02,0x00,0x02,0xe6,0x12,0x04,0x02,0x00,0x0c,0x00,0xd3,0x1f,0xd2,
+-	0x13,0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x92,0xe0,0xbe,
+-	0xb7,0x00,0x51,0x04,0x02,0x00,0x10,0x04,0x04,0x00,0x02,0x00,0xd2,0x0c,0x91,0x08,
+-	0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00,0x02,0xff,
+-	0xe0,0xbe,0x9c,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd4,0x3d,0xd3,0x26,0xd2,0x13,0x51,
+-	0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xa1,0xe0,0xbe,0xb7,0x00,0x02,0x00,
+-	0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0xa6,0xe0,0xbe,0xb7,
+-	0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xab,0xe0,0xbe,
+-	0xb7,0x00,0x02,0x00,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,
+-	0x02,0x00,0x02,0x00,0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x04,0x04,0x00,0x02,0xff,
+-	0xe0,0xbe,0x90,0xe0,0xbe,0xb5,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,
+-	0x00,0x04,0x00,0xcf,0x86,0x95,0x4c,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,
+-	0x04,0x04,0x00,0x10,0x04,0x04,0xdc,0x04,0x00,0x52,0x04,0x04,0x00,0xd1,0x08,0x10,
+-	0x04,0x04,0x00,0x00,0x00,0x10,0x04,0x0a,0x00,0x04,0x00,0xd3,0x14,0xd2,0x08,0x11,
+-	0x04,0x08,0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92,
+-	0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0xcf,0x86,0xe5,0xcc,0x04,0xe4,0x63,0x03,0xe3,0x65,0x01,0xe2,0x04,
+-	0x01,0xd1,0x7f,0xd0,0x65,0xcf,0x86,0x55,0x04,0x04,0x00,0xd4,0x33,0xd3,0x1f,0xd2,
+-	0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x0a,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,
+-	0x0b,0x04,0xff,0xe1,0x80,0xa5,0xe1,0x80,0xae,0x00,0x04,0x00,0x92,0x10,0xd1,0x08,
+-	0x10,0x04,0x0a,0x00,0x04,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x04,0x00,0xd3,0x18,
+-	0xd2,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x51,0x04,0x0a,0x00,
+-	0x10,0x04,0x04,0x00,0x04,0x07,0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0x09,
+-	0x10,0x04,0x0a,0x09,0x0a,0x00,0x0a,0x00,0xcf,0x86,0x95,0x14,0x54,0x04,0x04,0x00,
+-	0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,
+-	0xd0,0x2e,0xcf,0x86,0x95,0x28,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,
+-	0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08,
+-	0x11,0x04,0x0a,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86,
+-	0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,
+-	0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d,0x00,
+-	0x00,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,
+-	0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x06,0x00,
+-	0x08,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0d,0x00,
+-	0x0d,0x00,0xd1,0x28,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x1c,0x54,0x04,
+-	0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,
+-	0x0b,0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,
+-	0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+-	0x0b,0x00,0x0b,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,
+-	0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x0b,0x00,
+-	0xe2,0x21,0x01,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x52,
+-	0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x04,
+-	0x00,0x04,0x00,0xcf,0x86,0x95,0x48,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,
+-	0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,
+-	0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x04,
+-	0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,
+-	0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd0,
+-	0x62,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,
+-	0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,
+-	0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd4,0x14,0x53,0x04,0x04,
+-	0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3,
+-	0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,
+-	0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,
+-	0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,
+-	0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51,
+-	0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x93,0x10,0x52,0x04,0x04,0x00,0x51,
+-	0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x94,0x14,0x53,0x04,0x04,
+-	0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,
+-	0x00,0xd1,0x9c,0xd0,0x3e,0xcf,0x86,0x95,0x38,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,
+-	0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3,0x14,0xd2,
+-	0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,
+-	0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,
+-	0x00,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,
+-	0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0xd2,0x0c,0x51,
+-	0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x0c,
+-	0xe6,0x10,0x04,0x0c,0xe6,0x08,0xe6,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,
+-	0x04,0x08,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0x52,
+-	0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf,
+-	0x86,0x95,0x14,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,
+-	0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,
+-	0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x11,0x00,0x00,
+-	0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0xd3,0x30,0xd2,0x2a,0xd1,
+-	0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,
+-	0x04,0x0b,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,
+-	0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd2,0x6c,0xd1,0x24,0xd0,
+-	0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,
+-	0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0b,0x00,0x0b,
+-	0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x52,
+-	0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf,
+-	0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,
+-	0x04,0x04,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x46,0xcf,0x86,0xd5,0x28,0xd4,
+-	0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x00,
+-	0x00,0x06,0x00,0x93,0x10,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x09,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x06,0x00,0x93,0x14,0x52,0x04,0x06,0x00,0xd1,
+-	0x08,0x10,0x04,0x06,0x09,0x06,0x00,0x10,0x04,0x06,0x00,0x00,0x00,0x00,0x00,0xcf,
+-	0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x06,0x00,0x00,0x00,0x00,
+-	0x00,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,
+-	0x00,0x00,0x00,0x06,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x00,
+-	0x00,0x06,0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0xd5,
+-	0x24,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04,
+-	0x09,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07,
+-	0xe6,0x00,0x00,0xd4,0x10,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x00,
+-	0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x08,0x11,0x04,0x07,0x00,0x00,0x00,0x00,
+-	0x00,0xe4,0xac,0x03,0xe3,0x4d,0x01,0xd2,0x84,0xd1,0x48,0xd0,0x2a,0xcf,0x86,0x95,
+-	0x24,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,
+-	0x04,0x04,0x00,0x00,0x00,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x00,
+-	0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x53,
+-	0x04,0x04,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x04,0x00,0x94,0x18,0x53,0x04,0x04,0x00,0x92,
+-	0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0xe4,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,
+-	0x00,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c,0x52,
+-	0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x42,0xcf,
+-	0x86,0xd5,0x1c,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0xd1,
+-	0x08,0x10,0x04,0x07,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd4,0x0c,0x53,
+-	0x04,0x07,0x00,0x12,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x10,0xd1,
+-	0x08,0x10,0x04,0x07,0x00,0x07,0xde,0x10,0x04,0x07,0xe6,0x07,0xdc,0x00,0x00,0xcf,
+-	0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x00,
+-	0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,
+-	0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x07,0x00,0x91,
+-	0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55,
+-	0x04,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x0b,
+-	0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x95,0x28,0xd4,0x10,0x53,0x04,0x08,0x00,0x92,
+-	0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,
+-	0x04,0x08,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x08,0x00,0x07,
+-	0x00,0xd2,0xe4,0xd1,0x80,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x54,0x04,0x08,0x00,0xd3,
+-	0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x08,0xe6,0xd2,
+-	0x0c,0x91,0x08,0x10,0x04,0x08,0xdc,0x08,0x00,0x08,0x00,0x11,0x04,0x00,0x00,0x08,
+-	0x00,0x0b,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,
+-	0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xd4,0x14,0x93,
+-	0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,
+-	0x00,0xd3,0x10,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b,
+-	0xe6,0x52,0x04,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xe6,0x00,0x00,0x10,0x04,0x00,
+-	0x00,0x0b,0xdc,0xd0,0x5e,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0b,0x00,0x92,
+-	0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,0x11,
+-	0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd4,0x10,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,
+-	0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x10,0xe6,0x91,0x08,0x10,
+-	0x04,0x10,0xe6,0x10,0xdc,0x10,0xdc,0xd2,0x0c,0x51,0x04,0x10,0xdc,0x10,0x04,0x10,
+-	0xdc,0x10,0xe6,0xd1,0x08,0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0x04,0x10,0x00,0x00,
+-	0x00,0xcf,0x06,0x00,0x00,0xe1,0x1e,0x01,0xd0,0xaa,0xcf,0x86,0xd5,0x6e,0xd4,0x53,
+-	0xd3,0x17,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,
+-	0x85,0xe1,0xac,0xb5,0x00,0x09,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1,
+-	0xac,0x87,0xe1,0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x89,0xe1,
+-	0xac,0xb5,0x00,0x09,0x00,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8b,0xe1,0xac,
+-	0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8d,0xe1,0xac,0xb5,0x00,0x09,
+-	0x00,0x93,0x17,0x92,0x13,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x91,
+-	0xe1,0xac,0xb5,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x54,0x04,0x09,0x00,0xd3,0x10,
+-	0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x07,0x09,0x00,0x09,0x00,0xd2,0x13,
+-	0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xba,0xe1,0xac,0xb5,
+-	0x00,0x91,0x0f,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xbc,0xe1,0xac,0xb5,0x00,
+-	0x09,0x00,0xcf,0x86,0xd5,0x3d,0x94,0x39,0xd3,0x31,0xd2,0x25,0xd1,0x16,0x10,0x0b,
+-	0x09,0xff,0xe1,0xac,0xbe,0xe1,0xac,0xb5,0x00,0x09,0xff,0xe1,0xac,0xbf,0xe1,0xac,
+-	0xb5,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xad,0x82,0xe1,0xac,0xb5,0x00,0x91,
+-	0x08,0x10,0x04,0x09,0x09,0x09,0x00,0x09,0x00,0x12,0x04,0x09,0x00,0x00,0x00,0x09,
+-	0x00,0xd4,0x1c,0x53,0x04,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,
+-	0x00,0x09,0xe6,0x91,0x08,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0xe6,0xd3,0x08,0x12,
+-	0x04,0x09,0xe6,0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x00,
+-	0x00,0x00,0x00,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x18,0x53,0x04,0x0a,
+-	0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x09,0x0d,0x09,0x11,0x04,0x0d,
+-	0x00,0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x0d,0x00,0x0d,
+-	0x00,0xcf,0x86,0x55,0x04,0x0c,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x0c,0x00,0x51,
+-	0x04,0x0c,0x00,0x10,0x04,0x0c,0x07,0x0c,0x00,0x0c,0x00,0xd3,0x0c,0x92,0x08,0x11,
+-	0x04,0x0c,0x00,0x0c,0x09,0x00,0x00,0x12,0x04,0x00,0x00,0x0c,0x00,0xe3,0xb2,0x01,
+-	0xe2,0x09,0x01,0xd1,0x4c,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04,0x0a,
+-	0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,
+-	0x07,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0xcf,
+-	0x86,0x95,0x1c,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,0x00,
+-	0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,
+-	0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,
+-	0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x54,0x04,0x14,0x00,0x53,
+-	0x04,0x14,0x00,0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x91,
+-	0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x14,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x08,0x13,
+-	0x04,0x0d,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04,0x0b,
+-	0xe6,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x01,0x0b,0xdc,0x0b,0xdc,0x92,0x08,0x11,
+-	0x04,0x0b,0xdc,0x0b,0xe6,0x0b,0xdc,0xd4,0x28,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,
+-	0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x01,0x0b,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,
+-	0x01,0x0b,0x00,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xdc,0x0b,0x00,0xd3,
+-	0x1c,0xd2,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0d,0x00,0xd1,0x08,0x10,
+-	0x04,0x0d,0xe6,0x0d,0x00,0x10,0x04,0x0d,0x00,0x13,0x00,0x92,0x0c,0x51,0x04,0x10,
+-	0xe6,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x07,
+-	0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x94,0x0c,0x53,0x04,0x07,0x00,0x12,0x04,0x07,
+-	0x00,0x08,0x00,0x08,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0xd5,0x40,0xd4,
+-	0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0xe6,0x10,0x04,0x08,0xdc,0x08,0xe6,0x09,
+-	0xe6,0xd2,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x0a,0xe6,0xd1,0x08,0x10,
+-	0x04,0x0a,0xe6,0x0a,0xea,0x10,0x04,0x0a,0xd6,0x0a,0xdc,0x93,0x10,0x92,0x0c,0x91,
+-	0x08,0x10,0x04,0x0a,0xca,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0xd4,0x14,0x93,
+-	0x10,0x52,0x04,0x0a,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xe6,0x10,
+-	0xe6,0xd3,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x13,0xe8,0x13,
+-	0xe4,0xd2,0x10,0xd1,0x08,0x10,0x04,0x13,0xe4,0x13,0xdc,0x10,0x04,0x00,0x00,0x12,
+-	0xe6,0xd1,0x08,0x10,0x04,0x0c,0xe9,0x0b,0xdc,0x10,0x04,0x09,0xe6,0x09,0xdc,0xe2,
+-	0x80,0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3,
+-	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa5,0x00,0x01,0xff,0x61,
+-	0xcc,0xa5,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x42,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3,
+-	0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2,
+-	0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x43,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63,
+-	0xcc,0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x87,0x00,0x01,0xff,0x64,
+-	0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa3,0x00,0x01,0xff,0x64,
+-	0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1,
+-	0x00,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa7,0x00,0x01,
+-	0xff,0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xad,0x00,0x01,0xff,0x64,
+-	0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x80,0x00,0x01,
+-	0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x81,
+-	0x00,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0x45,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x45,
+-	0xcc,0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,
+-	0xcc,0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01,
+-	0xff,0x46,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2,
+-	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84,
+-	0x00,0x10,0x08,0x01,0xff,0x48,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10,
+-	0x08,0x01,0xff,0x48,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10,
+-	0x08,0x01,0xff,0x48,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0x49,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01,
+-	0xff,0x49,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3,
+-	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x81,0x00,0x01,0xff,0x6b,
+-	0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1,
+-	0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2,
+-	0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c,
+-	0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xb1,0x00,0x01,0xff,0x6c,
+-	0xcc,0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4c,0xcc,0xad,0x00,0x01,0xff,0x6c,
+-	0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x4d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81,
+-	0x00,0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x4d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,
+-	0x4d,0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x4e,0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x4e,0xcc,
+-	0xa3,0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x4e,0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x4e,0xcc,
+-	0xad,0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,
+-	0x83,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,
+-	0x4f,0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48,
+-	0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,
+-	0x6f,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x81,0x00,
+-	0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x50,0xcc,
+-	0x81,0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x50,0xcc,0x87,0x00,
+-	0x01,0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,
+-	0x87,0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa3,0x00,
+-	0x01,0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x52,0xcc,0xa3,0xcc,
+-	0x84,0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,
+-	0xb1,0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x01,0xff,0x53,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08,
+-	0x01,0xff,0x53,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,
+-	0x01,0xff,0x53,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,
+-	0x10,0x0a,0x01,0xff,0x53,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc,
+-	0x87,0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x53,0xcc,0xa3,0xcc,0x87,0x00,
+-	0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0x87,0x00,
+-	0x01,0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0xa3,0x00,
+-	0x01,0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xb1,0x00,0x01,0xff,
+-	0x74,0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,
+-	0xad,0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa4,0x00,
+-	0x01,0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0xb0,0x00,
+-	0x01,0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xad,0x00,0x01,0xff,
+-	0x75,0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x83,0xcc,
+-	0x81,0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc,
+-	0x84,0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0x56,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,
+-	0x56,0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x10,0x02,0xcf,0x86,0xd5,
+-	0xe1,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x80,
+-	0x00,0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x81,0x00,0x01,
+-	0xff,0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x88,0x00,0x01,
+-	0xff,0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x87,0x00,0x01,0xff,0x77,
+-	0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0xa3,0x00,0x01,
+-	0xff,0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x58,0xcc,0x87,0x00,0x01,0xff,0x78,
+-	0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x58,0xcc,0x88,0x00,0x01,0xff,0x78,
+-	0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87,
+-	0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0x82,0x00,0x01,
+-	0xff,0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x5a,0xcc,0xa3,0x00,0x01,0xff,0x7a,
+-	0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0xb1,0x00,0x01,0xff,0x7a,
+-	0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x68,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc,0x88,
+-	0x00,0x92,0x1d,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff,0x79,
+-	0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x02,0xff,0xc5,0xbf,0xcc,0x87,0x00,0x0a,0x00,
+-	0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa3,0x00,
+-	0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x89,0x00,0x01,0xff,
+-	0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x81,0x00,
+-	0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,
++	0xc6,0xe5,0xf9,0x14,0xe4,0x6f,0x0d,0xe3,0x39,0x08,0xe2,0x22,0x01,0xc1,0xd0,0x24,
++	0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x07,0x63,0xd8,0x43,0x01,0x00,0x93,0x13,0x52,
++	0x04,0x01,0x00,0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xce,0xbc,0x00,0x01,0x00,
++	0x01,0x00,0xcf,0x86,0xe5,0xb3,0x44,0xd4,0x7f,0xd3,0x3f,0xd2,0x20,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01,
++	0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x07,0x01,0xff,0xc3,
++	0xa6,0x00,0x01,0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0x65,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,
++	0x82,0x00,0x01,0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,
++	0x80,0x00,0x01,0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x82,0x00,
++	0x01,0xff,0x69,0xcc,0x88,0x00,0xd3,0x3b,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff,
++	0xc3,0xb0,0x00,0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80,
++	0x00,0x01,0xff,0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82,
++	0x00,0x01,0xff,0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01,
++	0x00,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb8,0x00,0x01,0xff,0x75,0xcc,
++	0x80,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00,
++	0x10,0x07,0x01,0xff,0xc3,0xbe,0x00,0x01,0xff,0x73,0x73,0x00,0xe1,0xd4,0x03,0xe0,
++	0xeb,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0x61,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,
++	0x61,0xcc,0x86,0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0x61,0xcc,0xa8,0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,
++	0x81,0x00,0x01,0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0x63,0xcc,0x82,0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,
++	0x87,0x00,0x01,0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc,
++	0x8c,0x00,0x01,0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x8c,0x00,
++	0x01,0xff,0x64,0xcc,0x8c,0x00,0xd3,0x3b,0xd2,0x1b,0xd1,0x0b,0x10,0x07,0x01,0xff,
++	0xc4,0x91,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x84,0x00,0x01,0xff,0x65,
++	0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x86,0x00,0x01,0xff,0x65,
++	0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87,
++	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa8,0x00,0x01,0xff,0x65,
++	0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82,
++	0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4,
++	0x7b,0xd3,0x3b,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x87,0x00,0x01,
++	0xff,0x67,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0xa7,0x00,0x01,0xff,0x67,
++	0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0xff,0x68,
++	0xcc,0x82,0x00,0x10,0x07,0x01,0xff,0xc4,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0x69,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08,
++	0x01,0xff,0x69,0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0x69,0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,
++	0x69,0xcc,0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x37,0xd2,0x17,0xd1,0x0c,
++	0x10,0x08,0x01,0xff,0x69,0xcc,0x87,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc4,0xb3,
++	0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6a,0xcc,0x82,0x00,0x01,0xff,0x6a,
++	0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x6b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,
++	0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6c,0xcc,0x81,0x00,0x10,
++	0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff,0x6c,0xcc,0xa7,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x10,0x08,0x01,
++	0xff,0x6c,0xcc,0x8c,0x00,0x01,0xff,0xc5,0x80,0x00,0xcf,0x86,0xd5,0xed,0xd4,0x72,
++	0xd3,0x37,0xd2,0x17,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc5,0x82,0x00,0x10,
++	0x04,0x01,0x00,0x01,0xff,0x6e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,
++	0xcc,0x81,0x00,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7,
++	0x00,0x01,0xff,0x6e,0xcc,0x8c,0x00,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,
++	0xcc,0x8c,0x00,0x01,0xff,0xca,0xbc,0x6e,0x00,0x10,0x07,0x01,0xff,0xc5,0x8b,0x00,
++	0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,
++	0x84,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00,
++	0xd3,0x3b,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0xff,
++	0x6f,0xcc,0x8b,0x00,0x10,0x07,0x01,0xff,0xc5,0x93,0x00,0x01,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x72,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01,
++	0xff,0x72,0xcc,0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x72,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01,
++	0xff,0x73,0xcc,0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x73,0xcc,0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x73,
++	0xcc,0xa7,0x00,0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x7b,0xd3,0x3b,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x73,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10,
++	0x08,0x01,0xff,0x74,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x74,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x10,0x07,0x01,
++	0xff,0xc5,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,
++	0x83,0x00,0x01,0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x84,0x00,
++	0x01,0xff,0x75,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x86,0x00,
++	0x01,0xff,0x75,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x8a,0x00,0x01,0xff,
++	0x75,0xcc,0x8a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,
++	0x8b,0x00,0x01,0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa8,0x00,
++	0x01,0xff,0x75,0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x82,0x00,
++	0x01,0xff,0x77,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x82,0x00,0x01,0xff,
++	0x79,0xcc,0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,0x88,0x00,
++	0x01,0xff,0x7a,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff,
++	0x7a,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff,
++	0x7a,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0xff,0x73,0x00,
++	0xe0,0x65,0x01,0xcf,0x86,0xd5,0xb4,0xd4,0x5a,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,
++	0x04,0x01,0x00,0x01,0xff,0xc9,0x93,0x00,0x10,0x07,0x01,0xff,0xc6,0x83,0x00,0x01,
++	0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,
++	0xc9,0x94,0x00,0x01,0xff,0xc6,0x88,0x00,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x01,0x00,
++	0x01,0xff,0xc9,0x96,0x00,0x10,0x07,0x01,0xff,0xc9,0x97,0x00,0x01,0xff,0xc6,0x8c,
++	0x00,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xc7,0x9d,0x00,0x01,0xff,0xc9,0x99,
++	0x00,0xd3,0x32,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0x9b,0x00,0x01,0xff,
++	0xc6,0x92,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xa0,0x00,0xd1,0x0b,0x10,0x07,
++	0x01,0xff,0xc9,0xa3,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0xa9,0x00,0x01,0xff,
++	0xc9,0xa8,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0x99,0x00,0x01,0x00,
++	0x01,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0xaf,0x00,0x01,0xff,0xc9,0xb2,0x00,
++	0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xb5,0x00,0xd4,0x5d,0xd3,0x34,0xd2,0x1b,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x10,
++	0x07,0x01,0xff,0xc6,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0xa5,
++	0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x80,0x00,0x01,0xff,0xc6,0xa8,0x00,0xd2,
++	0x0f,0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x83,0x00,0x01,0x00,0xd1,0x0b,
++	0x10,0x07,0x01,0xff,0xc6,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x88,0x00,
++	0x01,0xff,0x75,0xcc,0x9b,0x00,0xd3,0x33,0xd2,0x1d,0xd1,0x0f,0x10,0x08,0x01,0xff,
++	0x75,0xcc,0x9b,0x00,0x01,0xff,0xca,0x8a,0x00,0x10,0x07,0x01,0xff,0xca,0x8b,0x00,
++	0x01,0xff,0xc6,0xb4,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc6,0xb6,0x00,
++	0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x92,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,
++	0xff,0xc6,0xb9,0x00,0x01,0x00,0x01,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xbd,
++	0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x44,0xd3,0x16,0x52,0x04,0x01,
++	0x00,0x51,0x07,0x01,0xff,0xc7,0x86,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc7,0x89,
++	0x00,0xd2,0x12,0x91,0x0b,0x10,0x07,0x01,0xff,0xc7,0x89,0x00,0x01,0x00,0x01,0xff,
++	0xc7,0x8c,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x61,0xcc,0x8c,0x00,0x10,
++	0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff,0x69,0xcc,0x8c,0x00,0xd3,0x46,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x6f,0xcc,0x8c,
++	0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x8c,0x00,0xd1,
++	0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,
++	0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x88,
++	0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,
++	0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,
++	0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,
++	0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x88,
++	0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x87,0xd3,0x41,0xd2,
++	0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61,
++	0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x01,0xff,
++	0xc3,0xa6,0xcc,0x84,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc7,0xa5,0x00,0x01,0x00,
++	0x10,0x08,0x01,0xff,0x67,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,
++	0x10,0x08,0x01,0xff,0x6f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,
++	0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,
++	0x84,0x00,0x10,0x09,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,
++	0x8c,0x00,0xd3,0x38,0xd2,0x1a,0xd1,0x0f,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,
++	0x01,0xff,0xc7,0xb3,0x00,0x10,0x07,0x01,0xff,0xc7,0xb3,0x00,0x01,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0x67,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x10,0x07,
++	0x04,0xff,0xc6,0x95,0x00,0x04,0xff,0xc6,0xbf,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,
++	0x04,0xff,0x6e,0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,
++	0x61,0xcc,0x8a,0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12,
++	0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,
++	0x10,0x09,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,
++	0xe2,0x31,0x02,0xe1,0xc3,0x44,0xe0,0xc8,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3,
++	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x8f,0x00,0x01,0xff,0x61,
++	0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f,
++	0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f,
++	0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10,
++	0x08,0x01,0xff,0x6f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f,
++	0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10,
++	0x08,0x01,0xff,0x75,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x04,0xff,0x73,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10,
++	0x08,0x04,0xff,0x74,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0xd1,0x0b,0x10,
++	0x07,0x04,0xff,0xc8,0x9d,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x68,0xcc,0x8c,0x00,
++	0x04,0xff,0x68,0xcc,0x8c,0x00,0xd4,0x79,0xd3,0x31,0xd2,0x16,0xd1,0x0b,0x10,0x07,
++	0x06,0xff,0xc6,0x9e,0x00,0x07,0x00,0x10,0x07,0x04,0xff,0xc8,0xa3,0x00,0x04,0x00,
++	0xd1,0x0b,0x10,0x07,0x04,0xff,0xc8,0xa5,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x61,
++	0xcc,0x87,0x00,0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,
++	0xff,0x65,0xcc,0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x6f,
++	0xcc,0x88,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,0x00,0xd1,0x14,0x10,
++	0x0a,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,
++	0x00,0x10,0x08,0x04,0xff,0x6f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0xd3,
++	0x27,0xe2,0x21,0x43,0xd1,0x14,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,
++	0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x79,0xcc,0x84,0x00,
++	0x04,0xff,0x79,0xcc,0x84,0x00,0xd2,0x13,0x51,0x04,0x08,0x00,0x10,0x08,0x08,0xff,
++	0xe2,0xb1,0xa5,0x00,0x08,0xff,0xc8,0xbc,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,
++	0xff,0xc6,0x9a,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa6,0x00,0x08,0x00,0xcf,0x86,
++	0x95,0x5f,0x94,0x5b,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff,
++	0xc9,0x82,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xc6,0x80,0x00,0xd1,0x0e,0x10,0x07,
++	0x09,0xff,0xca,0x89,0x00,0x09,0xff,0xca,0x8c,0x00,0x10,0x07,0x09,0xff,0xc9,0x87,
++	0x00,0x09,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x89,0x00,0x09,0x00,
++	0x10,0x07,0x09,0xff,0xc9,0x8b,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,
++	0x8d,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xc9,0x8f,0x00,0x09,0x00,0x01,0x00,0x01,
++	0x00,0xd1,0x8b,0xd0,0x0c,0xcf,0x86,0xe5,0x10,0x43,0x64,0xef,0x42,0x01,0xe6,0xcf,
++	0x86,0xd5,0x2a,0xe4,0x99,0x43,0xe3,0x7f,0x43,0xd2,0x11,0xe1,0x5e,0x43,0x10,0x07,
++	0x01,0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0xe1,0x65,0x43,0x10,0x09,0x01,
++	0xff,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0x00,0xd4,0x0f,0x93,0x0b,0x92,
++	0x07,0x61,0xab,0x43,0x01,0xea,0x06,0xe6,0x06,0xe6,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,
++	0x10,0x07,0x0a,0xff,0xcd,0xb1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb3,0x00,
++	0x0a,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x10,0x07,0x0a,
++	0xff,0xcd,0xb7,0x00,0x0a,0x00,0xd2,0x07,0x61,0x97,0x43,0x00,0x00,0x51,0x04,0x09,
++	0x00,0x10,0x06,0x01,0xff,0x3b,0x00,0x10,0xff,0xcf,0xb3,0x00,0xe0,0x31,0x01,0xcf,
++	0x86,0xd5,0xd3,0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01,
++	0x00,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x81,
++	0x00,0x01,0xff,0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,
++	0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,
++	0xcc,0x81,0x00,0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,
++	0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc,
++	0x81,0x00,0xd3,0x3c,0xd2,0x20,0xd1,0x12,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88,
++	0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0x00,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,
++	0xff,0xce,0xb3,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb4,0x00,0x01,0xff,0xce,
++	0xb5,0x00,0x10,0x07,0x01,0xff,0xce,0xb6,0x00,0x01,0xff,0xce,0xb7,0x00,0xd2,0x1c,
++	0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb8,0x00,0x01,0xff,0xce,0xb9,0x00,0x10,0x07,
++	0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xce,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,
++	0xce,0xbc,0x00,0x01,0xff,0xce,0xbd,0x00,0x10,0x07,0x01,0xff,0xce,0xbe,0x00,0x01,
++	0xff,0xce,0xbf,0x00,0xe4,0x85,0x43,0xd3,0x35,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,
++	0xff,0xcf,0x80,0x00,0x01,0xff,0xcf,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,
++	0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcf,0x84,0x00,0x01,0xff,0xcf,0x85,0x00,
++	0x10,0x07,0x01,0xff,0xcf,0x86,0x00,0x01,0xff,0xcf,0x87,0x00,0xe2,0x2b,0x43,0xd1,
++	0x0e,0x10,0x07,0x01,0xff,0xcf,0x88,0x00,0x01,0xff,0xcf,0x89,0x00,0x10,0x09,0x01,
++	0xff,0xce,0xb9,0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xcf,0x86,0xd5,
++	0x94,0xd4,0x3c,0xd3,0x13,0x92,0x0f,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,
++	0x83,0x00,0x01,0x00,0x01,0x00,0xd2,0x07,0x61,0x3a,0x43,0x01,0x00,0xd1,0x12,0x10,
++	0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10,
++	0x09,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x0a,0xff,0xcf,0x97,0x00,0xd3,0x2c,0xd2,
++	0x11,0xe1,0x46,0x43,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb8,0x00,
++	0xd1,0x10,0x10,0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0xff,0xcf,0x86,0x00,
++	0x10,0x07,0x01,0xff,0xcf,0x80,0x00,0x04,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,
++	0xff,0xcf,0x99,0x00,0x06,0x00,0x10,0x07,0x01,0xff,0xcf,0x9b,0x00,0x04,0x00,0xd1,
++	0x0b,0x10,0x07,0x01,0xff,0xcf,0x9d,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0x9f,
++	0x00,0x04,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,
++	0xa1,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,
++	0x07,0x01,0xff,0xcf,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xa7,0x00,0x01,
++	0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa9,0x00,0x01,0x00,0x10,0x07,
++	0x01,0xff,0xcf,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xad,0x00,
++	0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xaf,0x00,0x01,0x00,0xd3,0x2b,0xd2,0x12,0x91,
++	0x0e,0x10,0x07,0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xcf,0x81,0x00,0x01,0x00,0xd1,
++	0x0e,0x10,0x07,0x05,0xff,0xce,0xb8,0x00,0x05,0xff,0xce,0xb5,0x00,0x10,0x04,0x06,
++	0x00,0x07,0xff,0xcf,0xb8,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x07,0x00,0x07,0xff,
++	0xcf,0xb2,0x00,0x10,0x07,0x07,0xff,0xcf,0xbb,0x00,0x07,0x00,0xd1,0x0b,0x10,0x04,
++	0x08,0x00,0x08,0xff,0xcd,0xbb,0x00,0x10,0x07,0x08,0xff,0xcd,0xbc,0x00,0x08,0xff,
++	0xcd,0xbd,0x00,0xe3,0xed,0x46,0xe2,0x3d,0x05,0xe1,0x27,0x02,0xe0,0x66,0x01,0xcf,
++	0x86,0xd5,0xf0,0xd4,0x7e,0xd3,0x40,0xd2,0x22,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,
++	0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x07,0x01,0xff,0xd1,
++	0x92,0x00,0x01,0xff,0xd0,0xb3,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,
++	0x94,0x00,0x01,0xff,0xd1,0x95,0x00,0x10,0x07,0x01,0xff,0xd1,0x96,0x00,0x01,0xff,
++	0xd1,0x96,0xcc,0x88,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x98,0x00,
++	0x01,0xff,0xd1,0x99,0x00,0x10,0x07,0x01,0xff,0xd1,0x9a,0x00,0x01,0xff,0xd1,0x9b,
++	0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,
++	0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0xff,0xd1,0x9f,
++	0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xb0,0x00,0x01,0xff,
++	0xd0,0xb1,0x00,0x10,0x07,0x01,0xff,0xd0,0xb2,0x00,0x01,0xff,0xd0,0xb3,0x00,0xd1,
++	0x0e,0x10,0x07,0x01,0xff,0xd0,0xb4,0x00,0x01,0xff,0xd0,0xb5,0x00,0x10,0x07,0x01,
++	0xff,0xd0,0xb6,0x00,0x01,0xff,0xd0,0xb7,0x00,0xd2,0x1e,0xd1,0x10,0x10,0x07,0x01,
++	0xff,0xd0,0xb8,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x10,0x07,0x01,0xff,0xd0,
++	0xba,0x00,0x01,0xff,0xd0,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xbc,0x00,
++	0x01,0xff,0xd0,0xbd,0x00,0x10,0x07,0x01,0xff,0xd0,0xbe,0x00,0x01,0xff,0xd0,0xbf,
++	0x00,0xe4,0x25,0x42,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x80,
++	0x00,0x01,0xff,0xd1,0x81,0x00,0x10,0x07,0x01,0xff,0xd1,0x82,0x00,0x01,0xff,0xd1,
++	0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x84,0x00,0x01,0xff,0xd1,0x85,0x00,
++	0x10,0x07,0x01,0xff,0xd1,0x86,0x00,0x01,0xff,0xd1,0x87,0x00,0xd2,0x1c,0xd1,0x0e,
++	0x10,0x07,0x01,0xff,0xd1,0x88,0x00,0x01,0xff,0xd1,0x89,0x00,0x10,0x07,0x01,0xff,
++	0xd1,0x8a,0x00,0x01,0xff,0xd1,0x8b,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x8c,
++	0x00,0x01,0xff,0xd1,0x8d,0x00,0x10,0x07,0x01,0xff,0xd1,0x8e,0x00,0x01,0xff,0xd1,
++	0x8f,0x00,0xcf,0x86,0xd5,0x07,0x64,0xcf,0x41,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2,
++	0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,
++	0xd1,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa5,0x00,0x01,0x00,
++	0x10,0x07,0x01,0xff,0xd1,0xa7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,
++	0xff,0xd1,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xab,0x00,0x01,0x00,0xd1,
++	0x0b,0x10,0x07,0x01,0xff,0xd1,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xaf,
++	0x00,0x01,0x00,0xd3,0x33,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb1,0x00,
++	0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xb3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,
++	0xff,0xd1,0xb5,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01,
++	0xff,0xd1,0xb5,0xcc,0x8f,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb9,
++	0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,
++	0x01,0xff,0xd1,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbf,0x00,0x01,0x00,
++	0xe0,0x41,0x01,0xcf,0x86,0xd5,0x8e,0xd4,0x36,0xd3,0x11,0xe2,0x91,0x41,0xe1,0x88,
++	0x41,0x10,0x07,0x01,0xff,0xd2,0x81,0x00,0x01,0x00,0xd2,0x0f,0x51,0x04,0x04,0x00,
++	0x10,0x07,0x06,0xff,0xd2,0x8b,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff,0xd2,
++	0x8d,0x00,0x04,0x00,0x10,0x07,0x04,0xff,0xd2,0x8f,0x00,0x04,0x00,0xd3,0x2c,0xd2,
++	0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x91,0x00,0x01,0x00,0x10,0x07,0x01,0xff,
++	0xd2,0x93,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x95,0x00,0x01,0x00,
++	0x10,0x07,0x01,0xff,0xd2,0x97,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,
++	0xff,0xd2,0x99,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9b,0x00,0x01,0x00,0xd1,
++	0x0b,0x10,0x07,0x01,0xff,0xd2,0x9d,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9f,
++	0x00,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,
++	0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,
++	0x07,0x01,0xff,0xd2,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa7,0x00,0x01,
++	0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa9,0x00,0x01,0x00,0x10,0x07,
++	0x01,0xff,0xd2,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xad,0x00,
++	0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xaf,0x00,0x01,0x00,0xd3,0x2c,0xd2,0x16,0xd1,
++	0x0b,0x10,0x07,0x01,0xff,0xd2,0xb1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xb3,
++	0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb5,0x00,0x01,0x00,0x10,0x07,
++	0x01,0xff,0xd2,0xb7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,
++	0xb9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,
++	0x07,0x01,0xff,0xd2,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbf,0x00,0x01,
++	0x00,0xcf,0x86,0xd5,0xdc,0xd4,0x5a,0xd3,0x36,0xd2,0x20,0xd1,0x10,0x10,0x07,0x01,
++	0xff,0xd3,0x8f,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,
++	0xb6,0xcc,0x86,0x00,0x01,0xff,0xd3,0x84,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x06,
++	0xff,0xd3,0x86,0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x88,0x00,0xd2,0x16,0xd1,
++	0x0b,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8a,0x00,0x10,0x04,0x06,0x00,0x01,0xff,
++	0xd3,0x8c,0x00,0xe1,0x69,0x40,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8e,0x00,0xd3,
++	0x41,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x01,0xff,
++	0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x01,0xff,
++	0xd0,0xb0,0xcc,0x88,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x95,0x00,0x01,0x00,
++	0x10,0x09,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,
++	0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x99,0x00,0x01,0x00,0x10,0x09,0x01,
++	0xff,0xd3,0x99,0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10,
++	0x09,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10,
++	0x09,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4,
++	0x82,0xd3,0x41,0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa1,0x00,0x01,0x00,
++	0x10,0x09,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,
++	0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc,
++	0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc,
++	0x88,0x00,0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa9,0x00,0x01,0x00,0x10,
++	0x09,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,
++	0x12,0x10,0x09,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,
++	0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,
++	0x00,0xd3,0x41,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,
++	0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,
++	0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x87,0xcc,
++	0x88,0x00,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00,0x10,0x07,0x08,0xff,0xd3,0xb7,0x00,
++	0x08,0x00,0xd2,0x1d,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x01,
++	0xff,0xd1,0x8b,0xcc,0x88,0x00,0x10,0x07,0x09,0xff,0xd3,0xbb,0x00,0x09,0x00,0xd1,
++	0x0b,0x10,0x07,0x09,0xff,0xd3,0xbd,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd3,0xbf,
++	0x00,0x09,0x00,0xe1,0x26,0x02,0xe0,0x78,0x01,0xcf,0x86,0xd5,0xb0,0xd4,0x58,0xd3,
++	0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x81,0x00,0x06,0x00,0x10,0x07,
++	0x06,0xff,0xd4,0x83,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x85,0x00,
++	0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x87,0x00,0x06,0x00,0xd2,0x16,0xd1,0x0b,0x10,
++	0x07,0x06,0xff,0xd4,0x89,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8b,0x00,0x06,
++	0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x8d,0x00,0x06,0x00,0x10,0x07,0x06,0xff,
++	0xd4,0x8f,0x00,0x06,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xd4,
++	0x91,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd4,0x93,0x00,0x09,0x00,0xd1,0x0b,0x10,
++	0x07,0x0a,0xff,0xd4,0x95,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x97,0x00,0x0a,
++	0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x99,0x00,0x0a,0x00,0x10,0x07,
++	0x0a,0xff,0xd4,0x9b,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x9d,0x00,
++	0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x9f,0x00,0x0a,0x00,0xd4,0x58,0xd3,0x2c,0xd2,
++	0x16,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0xa1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,
++	0xd4,0xa3,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xd4,0xa5,0x00,0x0b,0x00,
++	0x10,0x07,0x0c,0xff,0xd4,0xa7,0x00,0x0c,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x10,
++	0xff,0xd4,0xa9,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xab,0x00,0x10,0x00,0xd1,
++	0x0b,0x10,0x07,0x10,0xff,0xd4,0xad,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xaf,
++	0x00,0x10,0x00,0xd3,0x35,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,
++	0xa1,0x00,0x10,0x07,0x01,0xff,0xd5,0xa2,0x00,0x01,0xff,0xd5,0xa3,0x00,0xd1,0x0e,
++	0x10,0x07,0x01,0xff,0xd5,0xa4,0x00,0x01,0xff,0xd5,0xa5,0x00,0x10,0x07,0x01,0xff,
++	0xd5,0xa6,0x00,0x01,0xff,0xd5,0xa7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,
++	0xd5,0xa8,0x00,0x01,0xff,0xd5,0xa9,0x00,0x10,0x07,0x01,0xff,0xd5,0xaa,0x00,0x01,
++	0xff,0xd5,0xab,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xac,0x00,0x01,0xff,0xd5,
++	0xad,0x00,0x10,0x07,0x01,0xff,0xd5,0xae,0x00,0x01,0xff,0xd5,0xaf,0x00,0xcf,0x86,
++	0xe5,0x08,0x3f,0xd4,0x70,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,
++	0xb0,0x00,0x01,0xff,0xd5,0xb1,0x00,0x10,0x07,0x01,0xff,0xd5,0xb2,0x00,0x01,0xff,
++	0xd5,0xb3,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb4,0x00,0x01,0xff,0xd5,0xb5,
++	0x00,0x10,0x07,0x01,0xff,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb7,0x00,0xd2,0x1c,0xd1,
++	0x0e,0x10,0x07,0x01,0xff,0xd5,0xb8,0x00,0x01,0xff,0xd5,0xb9,0x00,0x10,0x07,0x01,
++	0xff,0xd5,0xba,0x00,0x01,0xff,0xd5,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,
++	0xbc,0x00,0x01,0xff,0xd5,0xbd,0x00,0x10,0x07,0x01,0xff,0xd5,0xbe,0x00,0x01,0xff,
++	0xd5,0xbf,0x00,0xe3,0x87,0x3e,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x80,
++	0x00,0x01,0xff,0xd6,0x81,0x00,0x10,0x07,0x01,0xff,0xd6,0x82,0x00,0x01,0xff,0xd6,
++	0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x84,0x00,0x01,0xff,0xd6,0x85,0x00,
++	0x10,0x07,0x01,0xff,0xd6,0x86,0x00,0x00,0x00,0xe0,0x2f,0x3f,0xcf,0x86,0xe5,0xc0,
++	0x3e,0xe4,0x97,0x3e,0xe3,0x76,0x3e,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
++	0x04,0x01,0x00,0x01,0xff,0xd5,0xa5,0xd6,0x82,0x00,0xe4,0x3e,0x25,0xe3,0xc3,0x1a,
++	0xe2,0x7b,0x81,0xe1,0xc0,0x13,0xd0,0x1e,0xcf,0x86,0xc5,0xe4,0x08,0x4b,0xe3,0x53,
++	0x46,0xe2,0xe9,0x43,0xe1,0x1c,0x43,0xe0,0xe1,0x42,0xcf,0x86,0xe5,0xa6,0x42,0x64,
++	0x89,0x42,0x0b,0x00,0xcf,0x86,0xe5,0xfa,0x01,0xe4,0x03,0x56,0xe3,0x76,0x01,0xe2,
++	0x8e,0x53,0xd1,0x0c,0xe0,0xef,0x52,0xcf,0x86,0x65,0x8d,0x52,0x04,0x00,0xe0,0x0d,
++	0x01,0xcf,0x86,0xd5,0x0a,0xe4,0x10,0x53,0x63,0xff,0x52,0x0a,0x00,0xd4,0x80,0xd3,
++	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x80,0x00,0x01,0xff,0xe2,
++	0xb4,0x81,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x82,0x00,0x01,0xff,0xe2,0xb4,0x83,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x84,0x00,0x01,0xff,0xe2,0xb4,0x85,
++	0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x86,0x00,0x01,0xff,0xe2,0xb4,0x87,0x00,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x88,0x00,0x01,0xff,0xe2,0xb4,0x89,
++	0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x8a,0x00,0x01,0xff,0xe2,0xb4,0x8b,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x8c,0x00,0x01,0xff,0xe2,0xb4,0x8d,0x00,0x10,
++	0x08,0x01,0xff,0xe2,0xb4,0x8e,0x00,0x01,0xff,0xe2,0xb4,0x8f,0x00,0xd3,0x40,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x90,0x00,0x01,0xff,0xe2,0xb4,0x91,
++	0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x92,0x00,0x01,0xff,0xe2,0xb4,0x93,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x94,0x00,0x01,0xff,0xe2,0xb4,0x95,0x00,0x10,
++	0x08,0x01,0xff,0xe2,0xb4,0x96,0x00,0x01,0xff,0xe2,0xb4,0x97,0x00,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x98,0x00,0x01,0xff,0xe2,0xb4,0x99,0x00,0x10,
++	0x08,0x01,0xff,0xe2,0xb4,0x9a,0x00,0x01,0xff,0xe2,0xb4,0x9b,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0xe2,0xb4,0x9c,0x00,0x01,0xff,0xe2,0xb4,0x9d,0x00,0x10,0x08,0x01,
++	0xff,0xe2,0xb4,0x9e,0x00,0x01,0xff,0xe2,0xb4,0x9f,0x00,0xcf,0x86,0xe5,0x42,0x52,
++	0x94,0x50,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa0,0x00,
++	0x01,0xff,0xe2,0xb4,0xa1,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa2,0x00,0x01,0xff,
++	0xe2,0xb4,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa4,0x00,0x01,0xff,
++	0xe2,0xb4,0xa5,0x00,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xa7,0x00,0x52,0x04,
++	0x00,0x00,0x91,0x0c,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xad,0x00,0x00,0x00,
++	0x01,0x00,0xd2,0x1b,0xe1,0xfc,0x52,0xe0,0xad,0x52,0xcf,0x86,0x95,0x0f,0x94,0x0b,
++	0x93,0x07,0x62,0x92,0x52,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd1,0x13,0xe0,
++	0xd3,0x53,0xcf,0x86,0x95,0x0a,0xe4,0xa8,0x53,0x63,0x97,0x53,0x04,0x00,0x04,0x00,
++	0xd0,0x0d,0xcf,0x86,0x95,0x07,0x64,0x22,0x54,0x08,0x00,0x04,0x00,0xcf,0x86,0x55,
++	0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x07,0x62,0x2f,0x54,0x04,0x00,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0xb0,0x00,0x11,0xff,0xe1,0x8f,0xb1,0x00,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0xb2,0x00,0x11,0xff,0xe1,0x8f,0xb3,0x00,0x91,0x10,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0xb4,0x00,0x11,0xff,0xe1,0x8f,0xb5,0x00,0x00,0x00,
++	0xd4,0x1c,0xe3,0xe0,0x56,0xe2,0x17,0x56,0xe1,0xda,0x55,0xe0,0xbb,0x55,0xcf,0x86,
++	0x95,0x0a,0xe4,0xa4,0x55,0x63,0x88,0x55,0x04,0x00,0x04,0x00,0xe3,0xd2,0x01,0xe2,
++	0x2b,0x5a,0xd1,0x0c,0xe0,0x4c,0x59,0xcf,0x86,0x65,0x25,0x59,0x0a,0x00,0xe0,0x9c,
++	0x59,0xcf,0x86,0xd5,0xc5,0xd4,0x45,0xd3,0x31,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x12,
++	0xff,0xd0,0xb2,0x00,0x12,0xff,0xd0,0xb4,0x00,0x10,0x07,0x12,0xff,0xd0,0xbe,0x00,
++	0x12,0xff,0xd1,0x81,0x00,0x51,0x07,0x12,0xff,0xd1,0x82,0x00,0x10,0x07,0x12,0xff,
++	0xd1,0x8a,0x00,0x12,0xff,0xd1,0xa3,0x00,0x92,0x10,0x91,0x0c,0x10,0x08,0x12,0xff,
++	0xea,0x99,0x8b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x14,0xff,0xe1,0x83,0x90,0x00,0x14,0xff,0xe1,0x83,0x91,0x00,0x10,0x08,
++	0x14,0xff,0xe1,0x83,0x92,0x00,0x14,0xff,0xe1,0x83,0x93,0x00,0xd1,0x10,0x10,0x08,
++	0x14,0xff,0xe1,0x83,0x94,0x00,0x14,0xff,0xe1,0x83,0x95,0x00,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0x96,0x00,0x14,0xff,0xe1,0x83,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x14,0xff,0xe1,0x83,0x98,0x00,0x14,0xff,0xe1,0x83,0x99,0x00,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0x9a,0x00,0x14,0xff,0xe1,0x83,0x9b,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0x9c,0x00,0x14,0xff,0xe1,0x83,0x9d,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,
++	0x9e,0x00,0x14,0xff,0xe1,0x83,0x9f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x14,0xff,0xe1,0x83,0xa0,0x00,0x14,0xff,0xe1,0x83,0xa1,0x00,0x10,0x08,
++	0x14,0xff,0xe1,0x83,0xa2,0x00,0x14,0xff,0xe1,0x83,0xa3,0x00,0xd1,0x10,0x10,0x08,
++	0x14,0xff,0xe1,0x83,0xa4,0x00,0x14,0xff,0xe1,0x83,0xa5,0x00,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0xa6,0x00,0x14,0xff,0xe1,0x83,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x14,0xff,0xe1,0x83,0xa8,0x00,0x14,0xff,0xe1,0x83,0xa9,0x00,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0xaa,0x00,0x14,0xff,0xe1,0x83,0xab,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0xac,0x00,0x14,0xff,0xe1,0x83,0xad,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,
++	0xae,0x00,0x14,0xff,0xe1,0x83,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x14,0xff,0xe1,0x83,0xb0,0x00,0x14,0xff,0xe1,0x83,0xb1,0x00,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0xb2,0x00,0x14,0xff,0xe1,0x83,0xb3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0xb4,0x00,0x14,0xff,0xe1,0x83,0xb5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,
++	0xb6,0x00,0x14,0xff,0xe1,0x83,0xb7,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x14,0xff,
++	0xe1,0x83,0xb8,0x00,0x14,0xff,0xe1,0x83,0xb9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,
++	0xba,0x00,0x00,0x00,0xd1,0x0c,0x10,0x04,0x00,0x00,0x14,0xff,0xe1,0x83,0xbd,0x00,
++	0x10,0x08,0x14,0xff,0xe1,0x83,0xbe,0x00,0x14,0xff,0xe1,0x83,0xbf,0x00,0xe2,0x9d,
++	0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3,0x40,
++	0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa5,0x00,0x01,0xff,0x61,0xcc,
++	0xa5,0x00,0x10,0x08,0x01,0xff,0x62,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87,0x00,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0x62,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3,0x00,
++	0x10,0x08,0x01,0xff,0x62,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2,0x24,
++	0xd1,0x14,0x10,0x0a,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc,
++	0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x87,0x00,0x01,0xff,0x64,0xcc,
++	0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa3,0x00,0x01,0xff,0x64,0xcc,
++	0xa3,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1,0x00,
++	0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa7,0x00,0x01,0xff,
++	0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xad,0x00,0x01,0xff,0x64,0xcc,
++	0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,
++	0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,
++	0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0x65,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,
++	0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,
++	0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,
++	0x66,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84,0x00,
++	0x10,0x08,0x01,0xff,0x68,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0x68,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10,0x08,
++	0x01,0xff,0x68,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0x68,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10,0x08,
++	0x01,0xff,0x68,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0x69,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01,0xff,
++	0x69,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3,0x40,
++	0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x81,0x00,0x01,0xff,0x6b,0xcc,
++	0x81,0x00,0x10,0x08,0x01,0xff,0x6b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3,0x00,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1,0x00,
++	0x10,0x08,0x01,0xff,0x6c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2,0x24,
++	0xd1,0x14,0x10,0x0a,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c,0xcc,
++	0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0xb1,0x00,0x01,0xff,0x6c,0xcc,
++	0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xad,0x00,0x01,0xff,0x6c,0xcc,
++	0xad,0x00,0x10,0x08,0x01,0xff,0x6d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81,0x00,
++	0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x6d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6d,
++	0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,
++	0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa3,
++	0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,
++	0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xad,
++	0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83,
++	0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,
++	0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48,0xd2,
++	0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x6f,
++	0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0x01,
++	0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x70,0xcc,0x81,
++	0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x70,0xcc,0x87,0x00,0x01,
++	0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x87,
++	0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xa3,0x00,0x01,
++	0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,
++	0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xb1,
++	0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x73,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08,0x01,
++	0xff,0x73,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,
++	0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x10,
++	0x0a,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87,
++	0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x01,
++	0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0x87,0x00,0x01,
++	0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xa3,0x00,0x01,
++	0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0xb1,0x00,0x01,0xff,0x74,
++	0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xad,
++	0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa4,0x00,0x01,
++	0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xb0,0x00,0x01,
++	0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xad,0x00,0x01,0xff,0x75,
++	0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,
++	0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x84,
++	0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x76,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x76,
++	0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x11,0x02,0xcf,0x86,0xd5,0xe2,
++	0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x80,0x00,
++	0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x81,0x00,0x01,0xff,
++	0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x88,0x00,0x01,0xff,
++	0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x87,0x00,0x01,0xff,0x77,0xcc,
++	0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0xa3,0x00,0x01,0xff,
++	0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x78,0xcc,0x87,0x00,0x01,0xff,0x78,0xcc,
++	0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x78,0xcc,0x88,0x00,0x01,0xff,0x78,0xcc,
++	0x88,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87,0x00,
++	0xd3,0x33,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x82,0x00,0x01,0xff,
++	0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0xa3,0x00,0x01,0xff,0x7a,0xcc,
++	0xa3,0x00,0xe1,0x12,0x59,0x10,0x08,0x01,0xff,0x7a,0xcc,0xb1,0x00,0x01,0xff,0x7a,
++	0xcc,0xb1,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,
++	0xff,0x79,0xcc,0x8a,0x00,0x10,0x08,0x01,0xff,0x61,0xca,0xbe,0x00,0x02,0xff,0x73,
++	0xcc,0x87,0x00,0x51,0x04,0x0a,0x00,0x10,0x07,0x0a,0xff,0x73,0x73,0x00,0x0a,0x00,
++	0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa3,0x00,
++	0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x89,0x00,0x01,0xff,
++	0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,
++	0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,
+ 	0x80,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,
+-	0x01,0xff,0x41,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,
+-	0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,
+-	0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,
+-	0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x81,0x00,
++	0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,
++	0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,
++	0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,
++	0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,
+ 	0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,
+-	0x01,0xff,0x41,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,
+-	0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,
+-	0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff,
+-	0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x86,0x00,
++	0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,
++	0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,
++	0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff,
++	0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,
+ 	0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0x45,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,
+-	0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,
+-	0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc,
++	0x65,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,
++	0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,
++	0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,
+ 	0x81,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,0xd4,
+-	0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc,0x80,
+-	0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,
++	0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,
++	0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,
+ 	0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,
+-	0xff,0x45,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10,
+-	0x0a,0x01,0xff,0x45,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,
+-	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x89,0x00,0x01,0xff,0x69,
+-	0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3,
+-	0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3,
+-	0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x81,0x00,0x01,
+-	0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x80,
+-	0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,
++	0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10,
++	0x0a,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,
++	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x89,0x00,0x01,0xff,0x69,
++	0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3,
++	0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3,
++	0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x01,
++	0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,
++	0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,
+ 	0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01,
+-	0xff,0x4f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2,
+-	0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f,
+-	0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x81,0x00,0x01,
+-	0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,
+-	0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f,
++	0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2,
++	0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f,
++	0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0x01,
++	0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,
++	0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,
+ 	0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,0xd3,
+-	0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x83,0x00,0x01,
+-	0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0xa3,
+-	0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,
+-	0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x89,
+-	0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,
++	0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x01,
++	0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,
++	0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,
++	0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x89,
++	0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,
+ 	0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,0x01,
+-	0xff,0x55,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1,
+-	0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b,
+-	0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75,
+-	0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,
++	0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1,
++	0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b,
++	0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75,
++	0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,
+ 	0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,0x01,
+-	0xff,0x59,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0x59,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x59,
+-	0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0x92,0x14,0x91,0x10,0x10,0x08,0x01,
+-	0xff,0x59,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x0a,0x00,0x0a,0x00,0xe1,
+-	0xc0,0x04,0xe0,0x80,0x02,0xcf,0x86,0xe5,0x2d,0x01,0xd4,0xa8,0xd3,0x54,0xd2,0x28,
+-	0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,
+-	0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,
+-	0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,
+-	0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,
+-	0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0x00,
+-	0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x93,0x00,0x01,0xff,0xce,
+-	0x91,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0x00,0x01,
+-	0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x91,
+-	0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,
+-	0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd,
+-	0x82,0x00,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x93,
+-	0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,
+-	0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,
+-	0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,
+-	0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x93,
+-	0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x95,0xcc,0x93,
+-	0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,
+-	0x01,0xff,0xce,0x95,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc,
+-	0x81,0x00,0x00,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,
+-	0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,
+-	0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00,
+-	0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,
+-	0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,
+-	0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,
+-	0x01,0xff,0xce,0x97,0xcc,0x93,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0x00,0x10,0x0b,
+-	0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,
+-	0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0x00,0x01,
+-	0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,
+-	0xcd,0x82,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x54,0xd2,0x28,
+-	0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,
+-	0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,
+-	0xb9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,
+-	0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,
+-	0xce,0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00,
+-	0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x93,0x00,0x01,0xff,0xce,
+-	0x99,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcc,0x80,0x00,0x01,
+-	0xff,0xce,0x99,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x99,
+-	0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,
+-	0x01,0xff,0xce,0x99,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcd,
+-	0x82,0x00,0xcf,0x86,0xe5,0x13,0x01,0xd4,0x84,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10,
+-	0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0x10,
+-	0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,
+-	0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81,0x00,
+-	0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10,
+-	0x09,0x01,0xff,0xce,0x9f,0xcc,0x93,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0x00,0x10,
+-	0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,
+-	0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x81,0x00,
+-	0x01,0xff,0xce,0x9f,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x54,0xd2,0x28,0xd1,
+-	0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,
+-	0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,
+-	0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc,
+-	0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,
+-	0x85,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xd2,
+-	0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0x00,0x10,0x04,
+-	0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f,0x10,0x04,0x00,
+-	0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff,
+-	0xce,0xa5,0xcc,0x94,0xcd,0x82,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10,
+-	0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10,
+-	0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,
+-	0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00,
+-	0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,
+-	0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1,
+-	0x12,0x10,0x09,0x01,0xff,0xce,0xa9,0xcc,0x93,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,
+-	0x00,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xa9,
+-	0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,
+-	0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,
+-	0xa9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0x00,0xd3,
+-	0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff,
+-	0xce,0xb1,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff,
+-	0xce,0xb5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80,0x00,
+-	0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00,
+-	0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
+-	0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf,
+-	0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x91,0x12,0x10,0x09,0x01,
+-	0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x00,0x00,0xe0,
+-	0xe1,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,
+-	0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,
+-	0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,
+-	0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,
+-	0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,
+-	0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xcd,
+-	0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,
+-	0x16,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,
+-	0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0xcd,
+-	0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,
+-	0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,
+-	0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,
+-	0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3,
+-	0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x85,0x00,
+-	0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,
+-	0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xcd,0x85,
+-	0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,
+-	0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,
+-	0xb7,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,
+-	0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcd,
+-	0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,
+-	0x97,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x80,
+-	0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0xcd,
+-	0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,
+-	0xff,0xce,0x97,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,
+-	0xcd,0x82,0xcd,0x85,0x00,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,
+-	0xff,0xcf,0x89,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x85,
+-	0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,
+-	0xcf,0x89,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,
+-	0x89,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,
+-	0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,
+-	0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10,
+-	0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,
+-	0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,
+-	0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,
+-	0xff,0xce,0xa9,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,
+-	0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x82,0xcd,
+-	0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3,0x49,0xd2,
+-	0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,
+-	0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,
+-	0xce,0xb1,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xcd,
+-	0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce,
+-	0xb1,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91,
+-	0xcc,0x86,0x00,0x01,0xff,0xce,0x91,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x91,
+-	0xcc,0x80,0x00,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,
+-	0xce,0x91,0xcd,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xb9,0x00,0x01,0x00,
+-	0xcf,0x86,0xe5,0x16,0x01,0xd4,0x8f,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04,0x01,
+-	0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x80,
+-	0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,
+-	0xce,0xb7,0xcc,0x81,0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcd,
+-	0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,
+-	0x09,0x01,0xff,0xce,0x95,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x10,
+-	0x09,0x01,0xff,0xce,0x97,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00,0xd1,
+-	0x13,0x10,0x09,0x01,0xff,0xce,0x97,0xcd,0x85,0x00,0x01,0xff,0xe1,0xbe,0xbf,0xcc,
+-	0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbe,0xbf,0xcc,0x81,0x00,0x01,0xff,0xe1,0xbe,
+-	0xbf,0xcd,0x82,0x00,0xd3,0x40,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,
+-	0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,
+-	0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x51,0x04,
+-	0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,
+-	0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x86,
+-	0x00,0x01,0xff,0xce,0x99,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x80,
+-	0x00,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x04,0x00,0x00,0x01,0xff,
+-	0xe1,0xbf,0xbe,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbf,0xbe,0xcc,0x81,0x00,
+-	0x01,0xff,0xe1,0xbf,0xbe,0xcd,0x82,0x00,0xd4,0x93,0xd3,0x4e,0xd2,0x28,0xd1,0x12,
++	0xff,0x79,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x79,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79,
++	0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x79,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x10,0x08,0x0a,0xff,0xe1,
++	0xbb,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbd,0x00,0x0a,
++	0x00,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbf,0x00,0x0a,0x00,0xe1,0xbf,0x02,0xe0,0xa1,
++	0x01,0xcf,0x86,0xd5,0xc6,0xd4,0x6c,0xd3,0x18,0xe2,0x0e,0x59,0xe1,0xf7,0x58,0x10,
++	0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00,0xd2,
++	0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,
++	0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,
++	0xce,0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,
++	0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,
++	0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,
++	0x00,0xd3,0x18,0xe2,0x4a,0x59,0xe1,0x33,0x59,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,
++	0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,
++	0xff,0xce,0xb5,0xcc,0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,
++	0xff,0xce,0xb5,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,
++	0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,
++	0xce,0xb5,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd4,0x6c,0xd3,0x18,0xe2,0x74,0x59,
++	0xe1,0x5d,0x59,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,
++	0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,
++	0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,
++	0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,
++	0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,
++	0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,
++	0xcc,0x94,0xcd,0x82,0x00,0xd3,0x18,0xe2,0xb0,0x59,0xe1,0x99,0x59,0x10,0x09,0x01,
++	0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0xd2,0x28,0xd1,
++	0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,
++	0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,
++	0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,
++	0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,
++	0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00,0xcf,
++	0x86,0xd5,0xac,0xd4,0x5a,0xd3,0x18,0xe2,0xed,0x59,0xe1,0xd6,0x59,0x10,0x09,0x01,
++	0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0xd2,0x28,0xd1,
++	0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,
++	0x00,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,
++	0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,
++	0x81,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x18,0xe2,
++	0x17,0x5a,0xe1,0x00,0x5a,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,
++	0xcf,0x85,0xcc,0x94,0x00,0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,
++	0x85,0xcc,0x94,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x80,
++	0x00,0xd1,0x0f,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,
++	0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xe4,0xd3,0x5a,
++	0xd3,0x18,0xe2,0x52,0x5a,0xe1,0x3b,0x5a,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,
++	0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,
++	0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,
++	0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0x00,
++	0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xcf,
++	0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,
++	0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xe0,0xd9,0x02,0xcf,0x86,0xe5,
++	0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,
++	0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,
++	0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,
++	0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,
++	0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,
++	0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,
++	0xb1,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,
++	0xce,0xb1,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,
++	0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,
++	0xb1,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,
++	0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,
++	0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,
++	0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x64,0xd2,0x30,0xd1,0x16,
++	0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,
++	0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9,
++	0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,
++	0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,
++	0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,
++	0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,
++	0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,
++	0xb7,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,
++	0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,
++	0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,
++	0xb7,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,
++	0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,
++	0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,
++	0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,
++	0xcf,0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,
++	0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,
++	0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,
++	0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,
++	0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,
++	0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,
++	0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,
++	0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,
++	0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,
++	0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,
++	0x89,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x49,0xd2,0x26,0xd1,0x12,0x10,0x09,
++	0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc,0x84,0x00,0x10,0x0b,
++	0x01,0xff,0xce,0xb1,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00,
++	0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,
++	0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcd,0x82,0xce,0xb9,
++	0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,
++	0xce,0xb1,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff,
++	0xce,0xb1,0xcc,0x81,0x00,0xe1,0xf3,0x5a,0x10,0x09,0x01,0xff,0xce,0xb1,0xce,0xb9,
++	0x00,0x01,0x00,0xcf,0x86,0xd5,0xbd,0xd4,0x7e,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,
++	0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,
++	0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b,
++	0x01,0xff,0xce,0xb7,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,
++	0xb7,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1,
++	0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,
++	0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,
++	0x00,0xe1,0x02,0x5b,0x10,0x09,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0x01,0xff,0xe1,
++	0xbe,0xbf,0xcc,0x80,0x00,0xd3,0x18,0xe2,0x28,0x5b,0xe1,0x11,0x5b,0x10,0x09,0x01,
++	0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0xe2,0x4c,0x5b,
++	0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,
++	0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,
++	0x81,0x00,0xd4,0x51,0xd3,0x18,0xe2,0x6f,0x5b,0xe1,0x58,0x5b,0x10,0x09,0x01,0xff,
++	0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0xd2,0x24,0xd1,0x12,
+ 	0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,
+-	0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,
+-	0x88,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x93,0x00,0x01,
+-	0xff,0xcf,0x81,0xcc,0x94,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcd,0x82,0x00,0x01,
+-	0xff,0xcf,0x85,0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,
+-	0xce,0xa5,0xcc,0x86,0x00,0x01,0xff,0xce,0xa5,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,
+-	0xce,0xa5,0xcc,0x80,0x00,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,
+-	0x01,0xff,0xce,0xa1,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00,0x10,0x09,
+-	0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x01,0xff,0x60,0x00,0xd3,0x3b,0xd2,0x18,0x51,
+-	0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,
+-	0xcf,0x89,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81,0xcd,
+-	0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd,0x82,0x00,0x01,0xff,0xcf,
+-	0x89,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x9f,
+-	0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xa9,
+-	0xcc,0x80,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0xd1,0x10,0x10,0x09,0x01,0xff,
+-	0xce,0xa9,0xcd,0x85,0x00,0x01,0xff,0xc2,0xb4,0x00,0x10,0x04,0x01,0x00,0x00,0x00,
+-	0xe0,0x62,0x0c,0xcf,0x86,0xe5,0x9f,0x08,0xe4,0xf8,0x05,0xe3,0xdb,0x02,0xe2,0xa1,
+-	0x01,0xd1,0xb4,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0x92,0x14,0x91,
+-	0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83,0x00,0x01,
+-	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
+-	0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x01,0x00,0xcf,0x86,0xd5,
+-	0x48,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+-	0x00,0x06,0x00,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x06,0x00,0xd3,0x1c,0xd2,
+-	0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0xd1,0x08,0x10,0x04,0x07,
+-	0x00,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,
+-	0x00,0x10,0x04,0x08,0x00,0x06,0x00,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x06,0x00,0x91,
+-	0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x0f,0x00,0x92,0x08,0x11,0x04,0x0f,0x00,0x01,
+-	0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x00,
+-	0x00,0x01,0x00,0x01,0x00,0xd0,0x7e,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x53,0x04,0x01,
+-	0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd3,
+-	0x10,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0c,0x00,0x0c,0x00,0x52,
+-	0x04,0x0c,0x00,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xd4,0x1c,0x53,
+-	0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x02,0x00,0x91,
+-	0x08,0x10,0x04,0x03,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0xd2,0x08,0x11,0x04,0x06,
+-	0x00,0x08,0x00,0x11,0x04,0x08,0x00,0x0b,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0b,
+-	0x00,0x0c,0x00,0x10,0x04,0x0e,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11,
+-	0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0x54,0x04,0x00,0x00,0xd3,0x0c,0x92,0x08,0x11,
+-	0x04,0x01,0xe6,0x01,0x01,0x01,0xe6,0xd2,0x0c,0x51,0x04,0x01,0x01,0x10,0x04,0x01,
+-	0x01,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3,
+-	0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x04,0x00,0xd1,0x08,0x10,
+-	0x04,0x06,0x00,0x06,0x01,0x10,0x04,0x06,0x01,0x06,0xe6,0x92,0x10,0xd1,0x08,0x10,
+-	0x04,0x06,0xdc,0x06,0xe6,0x10,0x04,0x06,0x01,0x08,0x01,0x09,0xdc,0x93,0x10,0x92,
+-	0x0c,0x91,0x08,0x10,0x04,0x0a,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,
+-	0x81,0xd0,0x4f,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,
+-	0x00,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xa9,0x00,0x01,0x00,0x92,0x12,
+-	0x51,0x04,0x01,0x00,0x10,0x06,0x01,0xff,0x4b,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,
+-	0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,
+-	0x10,0x04,0x04,0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x06,0x00,0x06,0x00,
+-	0xcf,0x86,0x95,0x2c,0xd4,0x18,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0xd1,0x08,
+-	0x10,0x04,0x08,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x93,0x10,0x92,0x0c,
+-	0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+-	0xd0,0x68,0xcf,0x86,0xd5,0x48,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,
+-	0x10,0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
+-	0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x11,0x00,0x00,0x00,0x53,0x04,
+-	0x01,0x00,0x92,0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x90,0xcc,
+-	0xb8,0x00,0x01,0xff,0xe2,0x86,0x92,0xcc,0xb8,0x00,0x01,0x00,0x94,0x1a,0x53,0x04,
+-	0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,
+-	0x94,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0x53,0x04,
+-	0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x87,
+-	0x90,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x87,0x94,0xcc,0xb8,0x00,0x01,0xff,
+-	0xe2,0x87,0x92,0xcc,0xb8,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,
+-	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x93,0x08,0x12,0x04,
+-	0x04,0x00,0x06,0x00,0x06,0x00,0xe2,0x38,0x02,0xe1,0x3f,0x01,0xd0,0x68,0xcf,0x86,
+-	0xd5,0x3e,0x94,0x3a,0xd3,0x16,0x52,0x04,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff,
+-	0xe2,0x88,0x83,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd2,0x12,0x91,0x0e,0x10,0x04,
+-	0x01,0x00,0x01,0xff,0xe2,0x88,0x88,0xcc,0xb8,0x00,0x01,0x00,0x91,0x0e,0x10,0x0a,
+-	0x01,0xff,0xe2,0x88,0x8b,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x24,
+-	0x93,0x20,0x52,0x04,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa3,0xcc,
+-	0xb8,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa5,0xcc,0xb8,0x00,0x01,0x00,
+-	0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x48,0x94,0x44,0xd3,0x2e,0xd2,0x12,0x91,0x0e,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x88,0xbc,0xcc,0xb8,0x00,0x01,0x00,0xd1,0x0e,
+-	0x10,0x0a,0x01,0xff,0xe2,0x89,0x83,0xcc,0xb8,0x00,0x01,0x00,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0xe2,0x89,0x85,0xcc,0xb8,0x00,0x92,0x12,0x91,0x0e,0x10,0x04,0x01,0x00,
+-	0x01,0xff,0xe2,0x89,0x88,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x40,
+-	0xd3,0x1e,0x92,0x1a,0xd1,0x0c,0x10,0x08,0x01,0xff,0x3d,0xcc,0xb8,0x00,0x01,0x00,
+-	0x10,0x0a,0x01,0xff,0xe2,0x89,0xa1,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x52,0x04,
+-	0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x89,0x8d,0xcc,0xb8,0x00,
+-	0x10,0x08,0x01,0xff,0x3c,0xcc,0xb8,0x00,0x01,0xff,0x3e,0xcc,0xb8,0x00,0xd3,0x30,
+-	0xd2,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xa4,0xcc,0xb8,0x00,0x01,0xff,
+-	0xe2,0x89,0xa5,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,
+-	0xb2,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xb3,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18,
+-	0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xb6,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,
+-	0xb7,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x50,0x94,0x4c,
+-	0xd3,0x30,0xd2,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xba,0xcc,0xb8,0x00,
+-	0x01,0xff,0xe2,0x89,0xbb,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,
+-	0xe2,0x8a,0x82,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x83,0xcc,0xb8,0x00,0x01,0x00,
+-	0x92,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x86,0xcc,0xb8,0x00,0x01,0xff,
+-	0xe2,0x8a,0x87,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x30,0x53,0x04,
+-	0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa2,0xcc,
+-	0xb8,0x00,0x01,0xff,0xe2,0x8a,0xa8,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,
+-	0xa9,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xab,0xcc,0xb8,0x00,0x01,0x00,0xcf,0x86,
+-	0x55,0x04,0x01,0x00,0xd4,0x5c,0xd3,0x2c,0x92,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,
+-	0xe2,0x89,0xbc,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xbd,0xcc,0xb8,0x00,0x10,0x0a,
+-	0x01,0xff,0xe2,0x8a,0x91,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x92,0xcc,0xb8,0x00,
+-	0x01,0x00,0xd2,0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb2,0xcc,
+-	0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb3,0xcc,0xb8,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,
+-	0xe2,0x8a,0xb4,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb5,0xcc,0xb8,0x00,0x01,0x00,
+-	0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xd1,0x64,
+-	0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
+-	0x01,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x20,0x53,0x04,
+-	0x01,0x00,0x92,0x18,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x80,0x88,0x00,
+-	0x10,0x08,0x01,0xff,0xe3,0x80,0x89,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,
+-	0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,
+-	0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00,
+-	0x04,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,
+-	0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x06,0x00,0x06,0x00,
+-	0xcf,0x86,0xd5,0x2c,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,
+-	0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
+-	0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x12,0x04,0x08,0x00,0x09,0x00,0xd4,0x14,
+-	0x53,0x04,0x09,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x0c,0x00,
+-	0x0c,0x00,0xd3,0x08,0x12,0x04,0x0c,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10,0x00,
+-	0x10,0x04,0x10,0x00,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x13,0x00,
+-	0xd3,0xa6,0xd2,0x74,0xd1,0x40,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18,
+-	0x93,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,
+-	0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,
+-	0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,
+-	0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+-	0x06,0x00,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00,
+-	0x10,0x04,0x06,0x00,0x07,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x1a,0xcf,0x86,
+-	0x95,0x14,0x54,0x04,0x01,0x00,0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,
+-	0x06,0x00,0x06,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,
+-	0x13,0x04,0x04,0x00,0x06,0x00,0xd2,0xdc,0xd1,0x48,0xd0,0x26,0xcf,0x86,0x95,0x20,
+-	0x54,0x04,0x01,0x00,0xd3,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x07,0x00,0x06,0x00,
+-	0x92,0x0c,0x91,0x08,0x10,0x04,0x08,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+-	0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,
+-	0x04,0x00,0x06,0x00,0x06,0x00,0x52,0x04,0x06,0x00,0x11,0x04,0x06,0x00,0x08,0x00,
+-	0xd0,0x5e,0xcf,0x86,0xd5,0x2c,0xd4,0x10,0x53,0x04,0x06,0x00,0x92,0x08,0x11,0x04,
+-	0x06,0x00,0x07,0x00,0x07,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,
+-	0x08,0x00,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0a,0x00,0x0b,0x00,
+-	0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00,
+-	0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
+-	0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,
+-	0xd5,0x1c,0x94,0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,
+-	0x51,0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0b,0x00,0x94,0x14,0x93,0x10,
+-	0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0c,0x00,0x0b,0x00,
+-	0x0b,0x00,0xd1,0xa8,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,
+-	0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+-	0x0c,0x00,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00,
+-	0x94,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,
+-	0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x18,0x53,0x04,0x01,0x00,
+-	0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x04,0x0c,0x00,
+-	0x01,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,
+-	0x51,0x04,0x0c,0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x52,0x04,0x01,0x00,0x51,0x04,
+-	0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
+-	0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x06,0x00,0x93,0x0c,0x52,0x04,
+-	0x06,0x00,0x11,0x04,0x06,0x00,0x01,0x00,0x01,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18,
+-	0x54,0x04,0x01,0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+-	0x0c,0x00,0x0c,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,
+-	0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,
+-	0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10,
+-	0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0xd2,0x0c,
+-	0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0d,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,
+-	0x0d,0x00,0x0c,0x00,0x06,0x00,0x94,0x0c,0x53,0x04,0x06,0x00,0x12,0x04,0x06,0x00,
+-	0x0a,0x00,0x06,0x00,0xe4,0x39,0x01,0xd3,0x0c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xcf,
+-	0x06,0x06,0x00,0xd2,0x30,0xd1,0x06,0xcf,0x06,0x06,0x00,0xd0,0x06,0xcf,0x06,0x06,
+-	0x00,0xcf,0x86,0x95,0x1e,0x54,0x04,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,
+-	0x00,0x91,0x0e,0x10,0x0a,0x06,0xff,0xe2,0xab,0x9d,0xcc,0xb8,0x00,0x06,0x00,0x06,
+-	0x00,0x06,0x00,0xd1,0x80,0xd0,0x3a,0xcf,0x86,0xd5,0x28,0xd4,0x10,0x53,0x04,0x07,
+-	0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x08,0x00,0xd3,0x08,0x12,0x04,0x08,
+-	0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,
+-	0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf,
+-	0x86,0xd5,0x30,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,
+-	0x04,0x0a,0x00,0x10,0x00,0x10,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,
+-	0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x10,0x00,0x10,
+-	0x00,0x54,0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,
+-	0x00,0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c,0x52,
+-	0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x53,
+-	0x04,0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x14,0x00,0x91,0x08,0x10,0x04,0x14,
+-	0x00,0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,
+-	0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x15,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x92,
+-	0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd4,
+-	0x0c,0x53,0x04,0x14,0x00,0x12,0x04,0x14,0x00,0x11,0x00,0x53,0x04,0x14,0x00,0x52,
+-	0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0xe3,0xb9,0x01,
+-	0xd2,0xac,0xd1,0x68,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x14,0x53,0x04,
+-	0x08,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,
+-	0x08,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04,
+-	0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xd4,0x14,0x53,0x04,
+-	0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
+-	0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0a,0x00,0x0a,0x00,0x09,0x00,
+-	0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x0b,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,
+-	0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04,
+-	0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,
+-	0x0b,0xe6,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0d,0x00,0x00,0x00,0x92,0x0c,
+-	0x91,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd1,0x6c,0xd0,0x2a,
+-	0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,
+-	0x08,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,
+-	0x00,0x00,0x0d,0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c,
+-	0xd3,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,
+-	0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,
+-	0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,
+-	0x00,0x00,0x10,0x04,0x00,0x00,0x0c,0x09,0xd0,0x5a,0xcf,0x86,0xd5,0x18,0x54,0x04,
+-	0x08,0x00,0x93,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,
+-	0x00,0x00,0x00,0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,
+-	0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,
+-	0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,
+-	0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,
+-	0x00,0x00,0xcf,0x86,0x95,0x40,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,
+-	0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,
+-	0x10,0x04,0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,
+-	0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,
+-	0x08,0x00,0x00,0x00,0x0a,0xe6,0xd2,0x9c,0xd1,0x68,0xd0,0x32,0xcf,0x86,0xd5,0x14,
+-	0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x0a,0x00,0x11,0x04,0x08,0x00,
+-	0x0a,0x00,0x54,0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,
+-	0x0b,0x00,0x0d,0x00,0x0d,0x00,0x12,0x04,0x0d,0x00,0x10,0x00,0xcf,0x86,0x95,0x30,
+-	0x94,0x2c,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x12,0x00,
+-	0x91,0x08,0x10,0x04,0x12,0x00,0x13,0x00,0x13,0x00,0xd2,0x08,0x11,0x04,0x13,0x00,
+-	0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0x00,0x00,0x00,0x00,
+-	0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,
+-	0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,
+-	0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x00,0x00,
+-	0x00,0x00,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,
+-	0xd5,0x14,0x54,0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,
+-	0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x04,0x00,0x12,0x04,0x04,0x00,
+-	0x00,0x00,0xcf,0x86,0xe5,0x8d,0x05,0xe4,0x86,0x05,0xe3,0x7d,0x04,0xe2,0xe4,0x03,
+-	0xe1,0xc0,0x01,0xd0,0x3e,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,
+-	0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0xda,0x01,0xe4,0x91,0x08,0x10,
+-	0x04,0x01,0xe8,0x01,0xde,0x01,0xe0,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x04,
+-	0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x04,0x00,0x01,
+-	0x00,0xcf,0x86,0xd5,0xaa,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
+-	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,
+-	0xff,0xe3,0x81,0x8b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,
+-	0x8d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,
+-	0xff,0xe3,0x81,0x8f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,
+-	0x91,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x93,
+-	0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x95,0xe3,0x82,0x99,
+-	0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x97,0xe3,0x82,
+-	0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x99,0xe3,0x82,0x99,0x00,0x01,
+-	0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9b,0xe3,0x82,0x99,0x00,0x01,0x00,
+-	0x10,0x0b,0x01,0xff,0xe3,0x81,0x9d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd4,0x53,0xd3,
+-	0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9f,0xe3,0x82,0x99,0x00,
+-	0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xa1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,
+-	0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xa4,0xe3,0x82,0x99,0x00,0x10,0x04,
+-	0x01,0x00,0x01,0xff,0xe3,0x81,0xa6,0xe3,0x82,0x99,0x00,0x92,0x13,0x91,0x0f,0x10,
+-	0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xa8,0xe3,0x82,0x99,0x00,0x01,0x00,0x01,0x00,
+-	0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x99,
+-	0x00,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
+-	0xe3,0x81,0xb2,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb2,
+-	0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x99,
+-	0x00,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x04,
+-	0x01,0x00,0x01,0xff,0xe3,0x81,0xb8,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3,
+-	0x81,0xb8,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,
+-	0xbb,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82,0x9a,0x00,0x01,0x00,
+-	0xd0,0xee,0xcf,0x86,0xd5,0x42,0x54,0x04,0x01,0x00,0xd3,0x1b,0x52,0x04,0x01,0x00,
+-	0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x86,0xe3,0x82,0x99,0x00,0x06,0x00,0x10,
+-	0x04,0x06,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x08,0x10,
+-	0x04,0x01,0x08,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0x9d,
+-	0xe3,0x82,0x99,0x00,0x06,0x00,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
+-	0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,
+-	0x01,0xff,0xe3,0x82,0xab,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,
+-	0x82,0xad,0xe3,0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,
+-	0x01,0xff,0xe3,0x82,0xaf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,
+-	0x82,0xb1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,
+-	0xb3,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb5,0xe3,0x82,
+-	0x99,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb7,0xe3,
+-	0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb9,0xe3,0x82,0x99,0x00,
+-	0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbb,0xe3,0x82,0x99,0x00,0x01,
+-	0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbd,0xe3,0x82,0x99,0x00,0x01,0x00,0xcf,0x86,
+-	0xd5,0xd5,0xd4,0x53,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,
+-	0xbf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x81,0xe3,0x82,
+-	0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x84,0xe3,
+-	0x82,0x99,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x86,0xe3,0x82,0x99,0x00,
+-	0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x88,0xe3,0x82,0x99,
+-	0x00,0x01,0x00,0x01,0x00,0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,
+-	0x83,0x8f,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x8f,0xe3,0x82,0x9a,0x00,0x10,
+-	0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,
+-	0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,
+-	0x83,0x95,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x95,0xe3,0x82,0x9a,0x00,0xd2,
+-	0x1e,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x99,0x00,
+-	0x10,0x0b,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,
+-	0x0b,0x01,0xff,0xe3,0x83,0x9b,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x9b,0xe3,
+-	0x82,0x9a,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x22,0x52,0x04,0x01,0x00,0xd1,
+-	0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xa6,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x04,
+-	0x01,0x00,0x01,0xff,0xe3,0x83,0xaf,0xe3,0x82,0x99,0x00,0xd2,0x25,0xd1,0x16,0x10,
+-	0x0b,0x01,0xff,0xe3,0x83,0xb0,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0xb1,0xe3,
+-	0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xb2,0xe3,0x82,0x99,0x00,0x01,0x00,
+-	0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xbd,0xe3,0x82,0x99,0x00,0x06,
+-	0x00,0xd1,0x4c,0xd0,0x46,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x00,
+-	0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,
+-	0x18,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,
+-	0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
+-	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x06,0x01,0x00,0xd0,0x32,0xcf,
+-	0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,
+-	0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x54,0x04,0x04,0x00,0x53,0x04,0x04,
+-	0x00,0x92,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xcf,
+-	0x86,0xd5,0x08,0x14,0x04,0x08,0x00,0x0a,0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x0a,
+-	0x00,0x00,0x00,0x00,0x00,0x06,0x00,0xd2,0xa4,0xd1,0x5c,0xd0,0x22,0xcf,0x86,0x95,
+-	0x1c,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,
+-	0x04,0x01,0x00,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x01,0x00,0xcf,0x86,0xd5,
+-	0x20,0xd4,0x0c,0x93,0x08,0x12,0x04,0x01,0x00,0x0b,0x00,0x0b,0x00,0x93,0x10,0x92,
+-	0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x54,
+-	0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x07,0x00,0x10,
+-	0x04,0x08,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,
+-	0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06,
+-	0x00,0x06,0x00,0xcf,0x86,0xd5,0x10,0x94,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01,
+-	0x00,0x07,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
+-	0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x16,0x00,0xd1,0x30,0xd0,0x06,0xcf,
+-	0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x52,
+-	0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0x92,0x0c,0x51,
+-	0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01,
+-	0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
+-	0x00,0x11,0x04,0x01,0x00,0x07,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,
+-	0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0xcf,0x06,0x04,
+-	0x00,0xcf,0x06,0x04,0x00,0xd1,0x48,0xd0,0x40,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x04,
+-	0x00,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x2c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xd1,
+-	0x06,0xcf,0x06,0x04,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,
+-	0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xcf,
+-	0x06,0x07,0x00,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf,
+-	0x06,0x01,0x00,0xe2,0x71,0x05,0xd1,0x8c,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,
+-	0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xd4,0x06,0xcf,0x06,0x01,0x00,0xd3,0x06,
+-	0xcf,0x06,0x01,0x00,0xd2,0x06,0xcf,0x06,0x01,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00,
+-	0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x01,0x00,
+-	0x11,0x04,0x01,0x00,0x08,0x00,0x08,0x00,0x53,0x04,0x08,0x00,0x12,0x04,0x08,0x00,
+-	0x0a,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00,
+-	0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x11,0x00,0x11,0x00,0x93,0x0c,
+-	0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x13,0x00,0x13,0x00,0x94,0x14,0x53,0x04,
+-	0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00,
+-	0x00,0x00,0xe0,0xdb,0x04,0xcf,0x86,0xe5,0xdf,0x01,0xd4,0x06,0xcf,0x06,0x04,0x00,
+-	0xd3,0x74,0xd2,0x6e,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18,
+-	0x94,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,
+-	0x00,0x00,0x00,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x04,0x00,
+-	0x06,0x00,0x04,0x00,0x04,0x00,0x93,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,
+-	0x06,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,0x95,0x24,0x94,0x20,0x93,0x1c,
+-	0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x04,0x00,0xd1,0x08,0x10,0x04,
+-	0x04,0x00,0x06,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0x0b,0x00,
+-	0xcf,0x06,0x0a,0x00,0xd2,0x84,0xd1,0x4c,0xd0,0x16,0xcf,0x86,0x55,0x04,0x0a,0x00,
+-	0x94,0x0c,0x53,0x04,0x0a,0x00,0x12,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,
+-	0x55,0x04,0x0a,0x00,0xd4,0x1c,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x0a,0x00,
+-	0x0a,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xe6,
+-	0xd3,0x08,0x12,0x04,0x0a,0x00,0x0d,0xe6,0x52,0x04,0x0d,0xe6,0x11,0x04,0x0a,0xe6,
+-	0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,
+-	0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11,0xe6,0x0d,0xe6,0x0b,0x00,
+-	0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,
+-	0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x24,
+-	0x54,0x04,0x08,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,
+-	0x08,0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,
+-	0x0a,0x00,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
+-	0x0a,0x00,0x0a,0x00,0xcf,0x06,0x0a,0x00,0xd0,0x5e,0xcf,0x86,0xd5,0x28,0xd4,0x18,
+-	0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0xd1,0x08,0x10,0x04,0x0a,0x00,0x0c,0x00,
+-	0x10,0x04,0x0c,0x00,0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x0d,0x00,
+-	0x10,0x00,0x10,0x00,0xd4,0x1c,0x53,0x04,0x0c,0x00,0xd2,0x0c,0x51,0x04,0x0c,0x00,
+-	0x10,0x04,0x0d,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x12,0x00,0x14,0x00,
+-	0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0x92,0x08,0x11,0x04,
+-	0x14,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x1c,0x94,0x18,0x93,0x14,0xd2,0x08,
+-	0x11,0x04,0x00,0x00,0x15,0x00,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04,
+-	0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x92,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04,
+-	0x0c,0x00,0x0a,0x00,0x0a,0x00,0xe4,0xf2,0x02,0xe3,0x65,0x01,0xd2,0x98,0xd1,0x48,
+-	0xd0,0x36,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x08,0x00,0x51,0x04,
+-	0x08,0x00,0x10,0x04,0x08,0x09,0x08,0x00,0x08,0x00,0x08,0x00,0xd4,0x0c,0x53,0x04,
+-	0x08,0x00,0x12,0x04,0x08,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,0x11,0x04,
+-	0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0x54,0x04,0x09,0x00,
+-	0x13,0x04,0x09,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x0a,0x00,0xcf,0x86,0xd5,0x2c,
+-	0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x09,0x12,0x00,
+-	0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x0a,0x00,0x53,0x04,0x0a,0x00,
+-	0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x0b,0xe6,0xd3,0x0c,
+-	0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x11,0x04,
+-	0x11,0x00,0x14,0x00,0xd1,0x60,0xd0,0x22,0xcf,0x86,0x55,0x04,0x0a,0x00,0x94,0x18,
+-	0x53,0x04,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xdc,
+-	0x11,0x04,0x0a,0xdc,0x0a,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x0a,0x00,
+-	0xd3,0x10,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0x09,0x00,0x00,
+-	0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x54,0x04,
+-	0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,
+-	0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,
+-	0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0x07,0x0b,0x00,
+-	0x0b,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x20,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
+-	0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,
+-	0x10,0x04,0x00,0x00,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x08,0x11,0x04,0x0b,0x00,
+-	0x00,0x00,0x11,0x04,0x00,0x00,0x0b,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,
+-	0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd2,0xd0,
+-	0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04,0x0a,0x00,0x93,0x10,
+-	0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,
+-	0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x11,0x04,
+-	0x0a,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00,
+-	0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x10,0x00,
+-	0xd0,0x3a,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0xd3,0x1c,0xd2,0x0c,
+-	0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xdc,
+-	0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0xe6,
+-	0x0b,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6,0xcf,0x86,0xd5,0x2c,0xd4,0x18,
+-	0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x10,0x04,0x0b,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,
+-	0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04,0x0d,0x00,0x93,0x10,0x52,0x04,
+-	0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x00,0x00,0x00,0x00,0xd1,0x8c,
+-	0xd0,0x72,0xcf,0x86,0xd5,0x4c,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
+-	0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,
++	0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,
++	0xe1,0x8f,0x5b,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,
++	0xcc,0x80,0x00,0xd3,0x3b,0xd2,0x18,0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,
++	0x89,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0xd1,0x0f,0x10,
++	0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,
++	0xcf,0x89,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,
++	0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,
++	0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,
++	0x81,0x00,0xe1,0x99,0x5b,0x10,0x09,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0x01,0xff,
++	0xc2,0xb4,0x00,0xe0,0x0c,0x68,0xcf,0x86,0xe5,0x23,0x02,0xe4,0x25,0x01,0xe3,0x85,
++	0x5e,0xd2,0x2a,0xe1,0x5f,0x5c,0xe0,0xdd,0x5b,0xcf,0x86,0xe5,0xbb,0x5b,0x94,0x1b,
++	0xe3,0xa4,0x5b,0x92,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,
++	0xff,0xe2,0x80,0x83,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd1,0xd6,0xd0,0x46,0xcf,
++	0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04,0x01,
++	0x00,0x10,0x07,0x01,0xff,0xcf,0x89,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01,0x00,
++	0x10,0x06,0x01,0xff,0x6b,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x01,0x00,0xe3,0x25,
++	0x5d,0x92,0x10,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0x8e,0x00,0x01,
++	0x00,0x01,0x00,0xcf,0x86,0xd5,0x0a,0xe4,0x42,0x5d,0x63,0x2d,0x5d,0x06,0x00,0x94,
++	0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb0,0x00,0x01,
++	0xff,0xe2,0x85,0xb1,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb2,0x00,0x01,0xff,0xe2,
++	0x85,0xb3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb4,0x00,0x01,0xff,0xe2,
++	0x85,0xb5,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb6,0x00,0x01,0xff,0xe2,0x85,0xb7,
++	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb8,0x00,0x01,0xff,0xe2,
++	0x85,0xb9,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xba,0x00,0x01,0xff,0xe2,0x85,0xbb,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xbc,0x00,0x01,0xff,0xe2,0x85,0xbd,
++	0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xbe,0x00,0x01,0xff,0xe2,0x85,0xbf,0x00,0x01,
++	0x00,0xe0,0x34,0x5d,0xcf,0x86,0xe5,0x13,0x5d,0xe4,0xf2,0x5c,0xe3,0xe1,0x5c,0xe2,
++	0xd4,0x5c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0xff,0xe2,0x86,0x84,0x00,
++	0xe3,0x23,0x61,0xe2,0xf0,0x60,0xd1,0x0c,0xe0,0x9d,0x60,0xcf,0x86,0x65,0x7e,0x60,
++	0x01,0x00,0xd0,0x62,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x18,
++	0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x90,0x00,
++	0x01,0xff,0xe2,0x93,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,
++	0x92,0x00,0x01,0xff,0xe2,0x93,0x93,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x94,0x00,
++	0x01,0xff,0xe2,0x93,0x95,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x96,0x00,
++	0x01,0xff,0xe2,0x93,0x97,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x98,0x00,0x01,0xff,
++	0xe2,0x93,0x99,0x00,0xcf,0x86,0xe5,0x57,0x60,0x94,0x80,0xd3,0x40,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x9a,0x00,0x01,0xff,0xe2,0x93,0x9b,0x00,0x10,
++	0x08,0x01,0xff,0xe2,0x93,0x9c,0x00,0x01,0xff,0xe2,0x93,0x9d,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0xe2,0x93,0x9e,0x00,0x01,0xff,0xe2,0x93,0x9f,0x00,0x10,0x08,0x01,
++	0xff,0xe2,0x93,0xa0,0x00,0x01,0xff,0xe2,0x93,0xa1,0x00,0xd2,0x20,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0xe2,0x93,0xa2,0x00,0x01,0xff,0xe2,0x93,0xa3,0x00,0x10,0x08,0x01,
++	0xff,0xe2,0x93,0xa4,0x00,0x01,0xff,0xe2,0x93,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0xe2,0x93,0xa6,0x00,0x01,0xff,0xe2,0x93,0xa7,0x00,0x10,0x08,0x01,0xff,0xe2,
++	0x93,0xa8,0x00,0x01,0xff,0xe2,0x93,0xa9,0x00,0x01,0x00,0xd4,0x0c,0xe3,0x33,0x62,
++	0xe2,0x2c,0x62,0xcf,0x06,0x04,0x00,0xe3,0x0c,0x65,0xe2,0xff,0x63,0xe1,0x2e,0x02,
++	0xe0,0x84,0x01,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x08,0xff,0xe2,0xb0,0xb0,0x00,0x08,0xff,0xe2,0xb0,0xb1,0x00,0x10,0x08,
++	0x08,0xff,0xe2,0xb0,0xb2,0x00,0x08,0xff,0xe2,0xb0,0xb3,0x00,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe2,0xb0,0xb4,0x00,0x08,0xff,0xe2,0xb0,0xb5,0x00,0x10,0x08,0x08,0xff,
++	0xe2,0xb0,0xb6,0x00,0x08,0xff,0xe2,0xb0,0xb7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe2,0xb0,0xb8,0x00,0x08,0xff,0xe2,0xb0,0xb9,0x00,0x10,0x08,0x08,0xff,
++	0xe2,0xb0,0xba,0x00,0x08,0xff,0xe2,0xb0,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe2,0xb0,0xbc,0x00,0x08,0xff,0xe2,0xb0,0xbd,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,
++	0xbe,0x00,0x08,0xff,0xe2,0xb0,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe2,0xb1,0x80,0x00,0x08,0xff,0xe2,0xb1,0x81,0x00,0x10,0x08,0x08,0xff,
++	0xe2,0xb1,0x82,0x00,0x08,0xff,0xe2,0xb1,0x83,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe2,0xb1,0x84,0x00,0x08,0xff,0xe2,0xb1,0x85,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
++	0x86,0x00,0x08,0xff,0xe2,0xb1,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe2,0xb1,0x88,0x00,0x08,0xff,0xe2,0xb1,0x89,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
++	0x8a,0x00,0x08,0xff,0xe2,0xb1,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
++	0x8c,0x00,0x08,0xff,0xe2,0xb1,0x8d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8e,0x00,
++	0x08,0xff,0xe2,0xb1,0x8f,0x00,0x94,0x7c,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe2,0xb1,0x90,0x00,0x08,0xff,0xe2,0xb1,0x91,0x00,0x10,0x08,0x08,0xff,
++	0xe2,0xb1,0x92,0x00,0x08,0xff,0xe2,0xb1,0x93,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe2,0xb1,0x94,0x00,0x08,0xff,0xe2,0xb1,0x95,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
++	0x96,0x00,0x08,0xff,0xe2,0xb1,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe2,0xb1,0x98,0x00,0x08,0xff,0xe2,0xb1,0x99,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
++	0x9a,0x00,0x08,0xff,0xe2,0xb1,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
++	0x9c,0x00,0x08,0xff,0xe2,0xb1,0x9d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9e,0x00,
++	0x00,0x00,0x08,0x00,0xcf,0x86,0xd5,0x07,0x64,0xef,0x61,0x08,0x00,0xd4,0x63,0xd3,
++	0x32,0xd2,0x1b,0xd1,0x0c,0x10,0x08,0x09,0xff,0xe2,0xb1,0xa1,0x00,0x09,0x00,0x10,
++	0x07,0x09,0xff,0xc9,0xab,0x00,0x09,0xff,0xe1,0xb5,0xbd,0x00,0xd1,0x0b,0x10,0x07,
++	0x09,0xff,0xc9,0xbd,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xa8,
++	0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xaa,0x00,0x10,
++	0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xac,0x00,0xd1,0x0b,0x10,0x04,0x09,0x00,0x0a,
++	0xff,0xc9,0x91,0x00,0x10,0x07,0x0a,0xff,0xc9,0xb1,0x00,0x0a,0xff,0xc9,0x90,0x00,
++	0xd3,0x27,0xd2,0x17,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xc9,0x92,0x00,0x0a,0x00,0x10,
++	0x08,0x0a,0xff,0xe2,0xb1,0xb3,0x00,0x0a,0x00,0x91,0x0c,0x10,0x04,0x09,0x00,0x09,
++	0xff,0xe2,0xb1,0xb6,0x00,0x09,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,
++	0x07,0x0b,0xff,0xc8,0xbf,0x00,0x0b,0xff,0xc9,0x80,0x00,0xe0,0x83,0x01,0xcf,0x86,
++	0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
++	0x81,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x83,0x00,0x08,0x00,0xd1,0x0c,
++	0x10,0x08,0x08,0xff,0xe2,0xb2,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,
++	0x87,0x00,0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x89,0x00,
++	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
++	0x08,0xff,0xe2,0xb2,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8f,0x00,
++	0x08,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x91,0x00,
++	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
++	0x08,0xff,0xe2,0xb2,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x97,0x00,
++	0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x99,0x00,0x08,0x00,
++	0x10,0x08,0x08,0xff,0xe2,0xb2,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++	0xe2,0xb2,0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x9f,0x00,0x08,0x00,
++	0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa1,0x00,
++	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
++	0x08,0xff,0xe2,0xb2,0xa5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa7,0x00,
++	0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa9,0x00,0x08,0x00,
++	0x10,0x08,0x08,0xff,0xe2,0xb2,0xab,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++	0xe2,0xb2,0xad,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xaf,0x00,0x08,0x00,
++	0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb1,0x00,0x08,0x00,
++	0x10,0x08,0x08,0xff,0xe2,0xb2,0xb3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++	0xe2,0xb2,0xb5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb7,0x00,0x08,0x00,
++	0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb9,0x00,0x08,0x00,0x10,0x08,
++	0x08,0xff,0xe2,0xb2,0xbb,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
++	0xbd,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbf,0x00,0x08,0x00,0xcf,0x86,
++	0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,
++	0x81,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x83,0x00,0x08,0x00,0xd1,0x0c,
++	0x10,0x08,0x08,0xff,0xe2,0xb3,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,
++	0x87,0x00,0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x89,0x00,
++	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
++	0x08,0xff,0xe2,0xb3,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8f,0x00,
++	0x08,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x91,0x00,
++	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
++	0x08,0xff,0xe2,0xb3,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x97,0x00,
++	0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x99,0x00,0x08,0x00,
++	0x10,0x08,0x08,0xff,0xe2,0xb3,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++	0xe2,0xb3,0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x9f,0x00,0x08,0x00,
++	0xd4,0x3b,0xd3,0x1c,0x92,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa1,0x00,
++	0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa3,0x00,0x08,0x00,0x08,0x00,0xd2,0x10,
++	0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0xff,0xe2,0xb3,0xac,0x00,0xe1,0x3b,
++	0x5f,0x10,0x04,0x0b,0x00,0x0b,0xff,0xe2,0xb3,0xae,0x00,0xe3,0x40,0x5f,0x92,0x10,
++	0x51,0x04,0x0b,0xe6,0x10,0x08,0x0d,0xff,0xe2,0xb3,0xb3,0x00,0x0d,0x00,0x00,0x00,
++	0xe2,0x98,0x08,0xd1,0x0b,0xe0,0x11,0x67,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe0,0x65,
++	0x6c,0xcf,0x86,0xe5,0xa7,0x05,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x0c,0xe2,0xf8,
++	0x67,0xe1,0x8f,0x67,0xcf,0x06,0x04,0x00,0xe2,0xdb,0x01,0xe1,0x26,0x01,0xd0,0x09,
++	0xcf,0x86,0x65,0xf4,0x67,0x0a,0x00,0xcf,0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,
++	0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,
++	0xff,0xea,0x99,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x85,
++	0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,
++	0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,
++	0x99,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x8d,0x00,0x0a,
++	0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,
++	0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,
++	0x99,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x95,0x00,0x0a,
++	0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x97,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,
++	0x08,0x0a,0xff,0xea,0x99,0x99,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x9b,
++	0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x9d,0x00,0x0a,0x00,0x10,
++	0x08,0x0a,0xff,0xea,0x99,0x9f,0x00,0x0a,0x00,0xe4,0x5d,0x67,0xd3,0x30,0xd2,0x18,
++	0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x99,0xa1,0x00,0x0c,0x00,0x10,0x08,0x0a,0xff,
++	0xea,0x99,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0xa5,0x00,
++	0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,
++	0x10,0x08,0x0a,0xff,0xea,0x99,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,
++	0xab,0x00,0x0a,0x00,0xe1,0x0c,0x67,0x10,0x08,0x0a,0xff,0xea,0x99,0xad,0x00,0x0a,
++	0x00,0xe0,0x35,0x67,0xcf,0x86,0x95,0xab,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,
++	0x10,0x08,0x0a,0xff,0xea,0x9a,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,
++	0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x85,0x00,0x0a,0x00,
++	0x10,0x08,0x0a,0xff,0xea,0x9a,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,
++	0x0a,0xff,0xea,0x9a,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8b,0x00,
++	0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8d,0x00,0x0a,0x00,0x10,0x08,
++	0x0a,0xff,0xea,0x9a,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,
++	0x0a,0xff,0xea,0x9a,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x93,0x00,
++	0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x95,0x00,0x0a,0x00,0x10,0x08,
++	0x0a,0xff,0xea,0x9a,0x97,0x00,0x0a,0x00,0xe2,0x92,0x66,0xd1,0x0c,0x10,0x08,0x10,
++	0xff,0xea,0x9a,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9a,0x9b,0x00,0x10,
++	0x00,0x0b,0x00,0xe1,0x10,0x02,0xd0,0xb9,0xcf,0x86,0xd5,0x07,0x64,0x9e,0x66,0x08,
++	0x00,0xd4,0x58,0xd3,0x28,0xd2,0x10,0x51,0x04,0x09,0x00,0x10,0x08,0x0a,0xff,0xea,
++	0x9c,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa5,0x00,0x0a,
++	0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,
++	0x08,0x0a,0xff,0xea,0x9c,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xab,
++	0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xad,0x00,0x0a,0x00,0x10,
++	0x08,0x0a,0xff,0xea,0x9c,0xaf,0x00,0x0a,0x00,0xd3,0x28,0xd2,0x10,0x51,0x04,0x0a,
++	0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
++	0xff,0xea,0x9c,0xb5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb7,0x00,0x0a,
++	0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb9,0x00,0x0a,0x00,0x10,
++	0x08,0x0a,0xff,0xea,0x9c,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
++	0x9c,0xbd,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbf,0x00,0x0a,0x00,0xcf,
++	0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
++	0x9d,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x83,0x00,0x0a,0x00,0xd1,
++	0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x85,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,
++	0x9d,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x89,
++	0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10,
++	0x08,0x0a,0xff,0xea,0x9d,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8f,
++	0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x91,
++	0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10,
++	0x08,0x0a,0xff,0xea,0x9d,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x97,
++	0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x99,0x00,0x0a,
++	0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
++	0xff,0xea,0x9d,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9f,0x00,0x0a,
++	0x00,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa1,
++	0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,
++	0x08,0x0a,0xff,0xea,0x9d,0xa5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa7,
++	0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa9,0x00,0x0a,
++	0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xab,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
++	0xff,0xea,0x9d,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xaf,0x00,0x0a,
++	0x00,0x53,0x04,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,
++	0x9d,0xba,0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xbc,0x00,0xd1,0x0c,0x10,
++	0x04,0x0a,0x00,0x0a,0xff,0xe1,0xb5,0xb9,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xbf,
++	0x00,0x0a,0x00,0xe0,0x71,0x01,0xcf,0x86,0xd5,0xa6,0xd4,0x4e,0xd3,0x30,0xd2,0x18,
++	0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,
++	0xea,0x9e,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x85,0x00,
++	0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9e,0x87,0x00,0x0a,0x00,0xd2,0x10,0x51,0x04,
++	0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9e,0x8c,0x00,0xe1,0x9a,0x64,0x10,
++	0x04,0x0a,0x00,0x0c,0xff,0xc9,0xa5,0x00,0xd3,0x28,0xd2,0x18,0xd1,0x0c,0x10,0x08,
++	0x0c,0xff,0xea,0x9e,0x91,0x00,0x0c,0x00,0x10,0x08,0x0d,0xff,0xea,0x9e,0x93,0x00,
++	0x0d,0x00,0x51,0x04,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x97,0x00,0x10,0x00,
++	0xd2,0x18,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x99,0x00,0x10,0x00,0x10,0x08,
++	0x10,0xff,0xea,0x9e,0x9b,0x00,0x10,0x00,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,
++	0x9d,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x9f,0x00,0x10,0x00,0xd4,0x63,
++	0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa1,0x00,0x0c,0x00,
++	0x10,0x08,0x0c,0xff,0xea,0x9e,0xa3,0x00,0x0c,0x00,0xd1,0x0c,0x10,0x08,0x0c,0xff,
++	0xea,0x9e,0xa5,0x00,0x0c,0x00,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa7,0x00,0x0c,0x00,
++	0xd2,0x1a,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa9,0x00,0x0c,0x00,0x10,0x07,
++	0x0d,0xff,0xc9,0xa6,0x00,0x10,0xff,0xc9,0x9c,0x00,0xd1,0x0e,0x10,0x07,0x10,0xff,
++	0xc9,0xa1,0x00,0x10,0xff,0xc9,0xac,0x00,0x10,0x07,0x12,0xff,0xc9,0xaa,0x00,0x14,
++	0x00,0xd3,0x35,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x10,0xff,0xca,0x9e,0x00,0x10,0xff,
++	0xca,0x87,0x00,0x10,0x07,0x11,0xff,0xca,0x9d,0x00,0x11,0xff,0xea,0xad,0x93,0x00,
++	0xd1,0x0c,0x10,0x08,0x11,0xff,0xea,0x9e,0xb5,0x00,0x11,0x00,0x10,0x08,0x11,0xff,
++	0xea,0x9e,0xb7,0x00,0x11,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x14,0xff,0xea,0x9e,
++	0xb9,0x00,0x14,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbb,0x00,0x15,0x00,0xd1,0x0c,
++	0x10,0x08,0x15,0xff,0xea,0x9e,0xbd,0x00,0x15,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,
++	0xbf,0x00,0x15,0x00,0xcf,0x86,0xe5,0xd4,0x63,0x94,0x2f,0x93,0x2b,0xd2,0x10,0x51,
++	0x04,0x00,0x00,0x10,0x08,0x15,0xff,0xea,0x9f,0x83,0x00,0x15,0x00,0xd1,0x0f,0x10,
++	0x08,0x15,0xff,0xea,0x9e,0x94,0x00,0x15,0xff,0xca,0x82,0x00,0x10,0x08,0x15,0xff,
++	0xe1,0xb6,0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0xb4,0x66,0xd3,0x1d,0xe2,
++	0x5b,0x64,0xe1,0x0a,0x64,0xe0,0xf7,0x63,0xcf,0x86,0xe5,0xd8,0x63,0x94,0x0b,0x93,
++	0x07,0x62,0xc3,0x63,0x08,0x00,0x08,0x00,0x08,0x00,0xd2,0x0f,0xe1,0x5a,0x65,0xe0,
++	0x27,0x65,0xcf,0x86,0x65,0x0c,0x65,0x0a,0x00,0xd1,0xab,0xd0,0x1a,0xcf,0x86,0xe5,
++	0x17,0x66,0xe4,0xfa,0x65,0xe3,0xe1,0x65,0xe2,0xd4,0x65,0x91,0x08,0x10,0x04,0x00,
++	0x00,0x0c,0x00,0x0c,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x0b,0x93,0x07,0x62,
++	0x27,0x66,0x11,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,
++	0xe1,0x8e,0xa0,0x00,0x11,0xff,0xe1,0x8e,0xa1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,
++	0xa2,0x00,0x11,0xff,0xe1,0x8e,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,
++	0xa4,0x00,0x11,0xff,0xe1,0x8e,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa6,0x00,
++	0x11,0xff,0xe1,0x8e,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,
++	0xa8,0x00,0x11,0xff,0xe1,0x8e,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xaa,0x00,
++	0x11,0xff,0xe1,0x8e,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xac,0x00,
++	0x11,0xff,0xe1,0x8e,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xae,0x00,0x11,0xff,
++	0xe1,0x8e,0xaf,0x00,0xe0,0xb2,0x65,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,
++	0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb0,0x00,0x11,0xff,0xe1,0x8e,
++	0xb1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb2,0x00,0x11,0xff,0xe1,0x8e,0xb3,0x00,
++	0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb4,0x00,0x11,0xff,0xe1,0x8e,0xb5,0x00,
++	0x10,0x08,0x11,0xff,0xe1,0x8e,0xb6,0x00,0x11,0xff,0xe1,0x8e,0xb7,0x00,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb8,0x00,0x11,0xff,0xe1,0x8e,0xb9,0x00,
++	0x10,0x08,0x11,0xff,0xe1,0x8e,0xba,0x00,0x11,0xff,0xe1,0x8e,0xbb,0x00,0xd1,0x10,
++	0x10,0x08,0x11,0xff,0xe1,0x8e,0xbc,0x00,0x11,0xff,0xe1,0x8e,0xbd,0x00,0x10,0x08,
++	0x11,0xff,0xe1,0x8e,0xbe,0x00,0x11,0xff,0xe1,0x8e,0xbf,0x00,0xd3,0x40,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0x80,0x00,0x11,0xff,0xe1,0x8f,0x81,0x00,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0x82,0x00,0x11,0xff,0xe1,0x8f,0x83,0x00,0xd1,0x10,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0x84,0x00,0x11,0xff,0xe1,0x8f,0x85,0x00,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0x86,0x00,0x11,0xff,0xe1,0x8f,0x87,0x00,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0x88,0x00,0x11,0xff,0xe1,0x8f,0x89,0x00,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0x8a,0x00,0x11,0xff,0xe1,0x8f,0x8b,0x00,0xd1,0x10,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0x8c,0x00,0x11,0xff,0xe1,0x8f,0x8d,0x00,0x10,0x08,0x11,0xff,
++	0xe1,0x8f,0x8e,0x00,0x11,0xff,0xe1,0x8f,0x8f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0x90,0x00,0x11,0xff,0xe1,0x8f,0x91,0x00,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0x92,0x00,0x11,0xff,0xe1,0x8f,0x93,0x00,0xd1,0x10,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0x94,0x00,0x11,0xff,0xe1,0x8f,0x95,0x00,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0x96,0x00,0x11,0xff,0xe1,0x8f,0x97,0x00,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0x98,0x00,0x11,0xff,0xe1,0x8f,0x99,0x00,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0x9a,0x00,0x11,0xff,0xe1,0x8f,0x9b,0x00,0xd1,0x10,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0x9c,0x00,0x11,0xff,0xe1,0x8f,0x9d,0x00,0x10,0x08,0x11,0xff,
++	0xe1,0x8f,0x9e,0x00,0x11,0xff,0xe1,0x8f,0x9f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x11,0xff,0xe1,0x8f,0xa0,0x00,0x11,0xff,0xe1,0x8f,0xa1,0x00,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0xa2,0x00,0x11,0xff,0xe1,0x8f,0xa3,0x00,0xd1,0x10,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0xa4,0x00,0x11,0xff,0xe1,0x8f,0xa5,0x00,0x10,0x08,0x11,0xff,
++	0xe1,0x8f,0xa6,0x00,0x11,0xff,0xe1,0x8f,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x11,0xff,0xe1,0x8f,0xa8,0x00,0x11,0xff,0xe1,0x8f,0xa9,0x00,0x10,0x08,0x11,0xff,
++	0xe1,0x8f,0xaa,0x00,0x11,0xff,0xe1,0x8f,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,
++	0xe1,0x8f,0xac,0x00,0x11,0xff,0xe1,0x8f,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
++	0xae,0x00,0x11,0xff,0xe1,0x8f,0xaf,0x00,0xd1,0x0c,0xe0,0xeb,0x63,0xcf,0x86,0xcf,
++	0x06,0x02,0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,
++	0xcf,0x06,0x01,0x00,0xd4,0xae,0xd3,0x09,0xe2,0x54,0x64,0xcf,0x06,0x01,0x00,0xd2,
++	0x27,0xe1,0x1f,0x70,0xe0,0x26,0x6e,0xcf,0x86,0xe5,0x3f,0x6d,0xe4,0xce,0x6c,0xe3,
++	0x99,0x6c,0xe2,0x78,0x6c,0xe1,0x67,0x6c,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00,
++	0x01,0xff,0xe5,0xba,0xa6,0x00,0xe1,0x74,0x74,0xe0,0xe8,0x73,0xcf,0x86,0xe5,0x22,
++	0x73,0xd4,0x3b,0x93,0x37,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x01,0xff,0x66,0x66,0x00,
++	0x01,0xff,0x66,0x69,0x00,0x10,0x07,0x01,0xff,0x66,0x6c,0x00,0x01,0xff,0x66,0x66,
++	0x69,0x00,0xd1,0x0f,0x10,0x08,0x01,0xff,0x66,0x66,0x6c,0x00,0x01,0xff,0x73,0x74,
++	0x00,0x10,0x07,0x01,0xff,0x73,0x74,0x00,0x00,0x00,0x00,0x00,0xe3,0xc8,0x72,0xd2,
++	0x11,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xb6,0x00,
++	0xd1,0x12,0x10,0x09,0x01,0xff,0xd5,0xb4,0xd5,0xa5,0x00,0x01,0xff,0xd5,0xb4,0xd5,
++	0xab,0x00,0x10,0x09,0x01,0xff,0xd5,0xbe,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb4,0xd5,
++	0xad,0x00,0xd3,0x09,0xe2,0x40,0x74,0xcf,0x06,0x01,0x00,0xd2,0x13,0xe1,0x30,0x75,
++	0xe0,0xc1,0x74,0xcf,0x86,0xe5,0x9e,0x74,0x64,0x8d,0x74,0x06,0xff,0x00,0xe1,0x96,
++	0x75,0xe0,0x63,0x75,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x7c,
++	0xd3,0x3c,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xef,0xbd,0x81,0x00,
++	0x10,0x08,0x01,0xff,0xef,0xbd,0x82,0x00,0x01,0xff,0xef,0xbd,0x83,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xef,0xbd,0x84,0x00,0x01,0xff,0xef,0xbd,0x85,0x00,0x10,0x08,
++	0x01,0xff,0xef,0xbd,0x86,0x00,0x01,0xff,0xef,0xbd,0x87,0x00,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xef,0xbd,0x88,0x00,0x01,0xff,0xef,0xbd,0x89,0x00,0x10,0x08,
++	0x01,0xff,0xef,0xbd,0x8a,0x00,0x01,0xff,0xef,0xbd,0x8b,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xef,0xbd,0x8c,0x00,0x01,0xff,0xef,0xbd,0x8d,0x00,0x10,0x08,0x01,0xff,
++	0xef,0xbd,0x8e,0x00,0x01,0xff,0xef,0xbd,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xef,0xbd,0x90,0x00,0x01,0xff,0xef,0xbd,0x91,0x00,0x10,0x08,
++	0x01,0xff,0xef,0xbd,0x92,0x00,0x01,0xff,0xef,0xbd,0x93,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xef,0xbd,0x94,0x00,0x01,0xff,0xef,0xbd,0x95,0x00,0x10,0x08,0x01,0xff,
++	0xef,0xbd,0x96,0x00,0x01,0xff,0xef,0xbd,0x97,0x00,0x92,0x1c,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xef,0xbd,0x98,0x00,0x01,0xff,0xef,0xbd,0x99,0x00,0x10,0x08,0x01,0xff,
++	0xef,0xbd,0x9a,0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0x87,0xb3,0xe1,0x60,0xb0,0xe0,
++	0xdd,0xae,0xcf,0x86,0xe5,0x81,0x9b,0xc4,0xe3,0xc1,0x07,0xe2,0x62,0x06,0xe1,0x11,
++	0x86,0xe0,0x09,0x05,0xcf,0x86,0xe5,0xfb,0x02,0xd4,0x1c,0xe3,0x7f,0x76,0xe2,0xd6,
++	0x75,0xe1,0xb1,0x75,0xe0,0x8a,0x75,0xcf,0x86,0xe5,0x57,0x75,0x94,0x07,0x63,0x42,
++	0x75,0x07,0x00,0x07,0x00,0xe3,0x2b,0x78,0xe2,0xf0,0x77,0xe1,0x77,0x01,0xe0,0x88,
++	0x77,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,
++	0x05,0xff,0xf0,0x90,0x90,0xa8,0x00,0x05,0xff,0xf0,0x90,0x90,0xa9,0x00,0x10,0x09,
++	0x05,0xff,0xf0,0x90,0x90,0xaa,0x00,0x05,0xff,0xf0,0x90,0x90,0xab,0x00,0xd1,0x12,
++	0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xac,0x00,0x05,0xff,0xf0,0x90,0x90,0xad,0x00,
++	0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xae,0x00,0x05,0xff,0xf0,0x90,0x90,0xaf,0x00,
++	0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb0,0x00,0x05,0xff,0xf0,
++	0x90,0x90,0xb1,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb2,0x00,0x05,0xff,0xf0,
++	0x90,0x90,0xb3,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb4,0x00,0x05,
++	0xff,0xf0,0x90,0x90,0xb5,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb6,0x00,0x05,
++	0xff,0xf0,0x90,0x90,0xb7,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,
++	0xf0,0x90,0x90,0xb8,0x00,0x05,0xff,0xf0,0x90,0x90,0xb9,0x00,0x10,0x09,0x05,0xff,
++	0xf0,0x90,0x90,0xba,0x00,0x05,0xff,0xf0,0x90,0x90,0xbb,0x00,0xd1,0x12,0x10,0x09,
++	0x05,0xff,0xf0,0x90,0x90,0xbc,0x00,0x05,0xff,0xf0,0x90,0x90,0xbd,0x00,0x10,0x09,
++	0x05,0xff,0xf0,0x90,0x90,0xbe,0x00,0x05,0xff,0xf0,0x90,0x90,0xbf,0x00,0xd2,0x24,
++	0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x80,0x00,0x05,0xff,0xf0,0x90,0x91,
++	0x81,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x82,0x00,0x05,0xff,0xf0,0x90,0x91,
++	0x83,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x84,0x00,0x05,0xff,0xf0,
++	0x90,0x91,0x85,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x86,0x00,0x05,0xff,0xf0,
++	0x90,0x91,0x87,0x00,0x94,0x4c,0x93,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,
++	0xf0,0x90,0x91,0x88,0x00,0x05,0xff,0xf0,0x90,0x91,0x89,0x00,0x10,0x09,0x05,0xff,
++	0xf0,0x90,0x91,0x8a,0x00,0x05,0xff,0xf0,0x90,0x91,0x8b,0x00,0xd1,0x12,0x10,0x09,
++	0x05,0xff,0xf0,0x90,0x91,0x8c,0x00,0x05,0xff,0xf0,0x90,0x91,0x8d,0x00,0x10,0x09,
++	0x07,0xff,0xf0,0x90,0x91,0x8e,0x00,0x07,0xff,0xf0,0x90,0x91,0x8f,0x00,0x05,0x00,
++	0x05,0x00,0xd0,0xa0,0xcf,0x86,0xd5,0x07,0x64,0x30,0x76,0x07,0x00,0xd4,0x07,0x63,
++	0x3d,0x76,0x07,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,
++	0x93,0x98,0x00,0x12,0xff,0xf0,0x90,0x93,0x99,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,
++	0x93,0x9a,0x00,0x12,0xff,0xf0,0x90,0x93,0x9b,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,
++	0xf0,0x90,0x93,0x9c,0x00,0x12,0xff,0xf0,0x90,0x93,0x9d,0x00,0x10,0x09,0x12,0xff,
++	0xf0,0x90,0x93,0x9e,0x00,0x12,0xff,0xf0,0x90,0x93,0x9f,0x00,0xd2,0x24,0xd1,0x12,
++	0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa0,0x00,0x12,0xff,0xf0,0x90,0x93,0xa1,0x00,
++	0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa2,0x00,0x12,0xff,0xf0,0x90,0x93,0xa3,0x00,
++	0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa4,0x00,0x12,0xff,0xf0,0x90,0x93,
++	0xa5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa6,0x00,0x12,0xff,0xf0,0x90,0x93,
++	0xa7,0x00,0xcf,0x86,0xe5,0xc6,0x75,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,
++	0x09,0x12,0xff,0xf0,0x90,0x93,0xa8,0x00,0x12,0xff,0xf0,0x90,0x93,0xa9,0x00,0x10,
++	0x09,0x12,0xff,0xf0,0x90,0x93,0xaa,0x00,0x12,0xff,0xf0,0x90,0x93,0xab,0x00,0xd1,
++	0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xac,0x00,0x12,0xff,0xf0,0x90,0x93,0xad,
++	0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xae,0x00,0x12,0xff,0xf0,0x90,0x93,0xaf,
++	0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb0,0x00,0x12,0xff,
++	0xf0,0x90,0x93,0xb1,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb2,0x00,0x12,0xff,
++	0xf0,0x90,0x93,0xb3,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb4,0x00,
++	0x12,0xff,0xf0,0x90,0x93,0xb5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb6,0x00,
++	0x12,0xff,0xf0,0x90,0x93,0xb7,0x00,0x93,0x28,0x92,0x24,0xd1,0x12,0x10,0x09,0x12,
++	0xff,0xf0,0x90,0x93,0xb8,0x00,0x12,0xff,0xf0,0x90,0x93,0xb9,0x00,0x10,0x09,0x12,
++	0xff,0xf0,0x90,0x93,0xba,0x00,0x12,0xff,0xf0,0x90,0x93,0xbb,0x00,0x00,0x00,0x12,
++	0x00,0xd4,0x1f,0xe3,0xdf,0x76,0xe2,0x6a,0x76,0xe1,0x09,0x76,0xe0,0xea,0x75,0xcf,
++	0x86,0xe5,0xb7,0x75,0x94,0x0a,0xe3,0xa2,0x75,0x62,0x99,0x75,0x07,0x00,0x07,0x00,
++	0xe3,0xde,0x78,0xe2,0xaf,0x78,0xd1,0x09,0xe0,0x4c,0x78,0xcf,0x06,0x0b,0x00,0xe0,
++	0x7f,0x78,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,
++	0x09,0x11,0xff,0xf0,0x90,0xb3,0x80,0x00,0x11,0xff,0xf0,0x90,0xb3,0x81,0x00,0x10,
++	0x09,0x11,0xff,0xf0,0x90,0xb3,0x82,0x00,0x11,0xff,0xf0,0x90,0xb3,0x83,0x00,0xd1,
++	0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x84,0x00,0x11,0xff,0xf0,0x90,0xb3,0x85,
++	0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x86,0x00,0x11,0xff,0xf0,0x90,0xb3,0x87,
++	0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x88,0x00,0x11,0xff,
++	0xf0,0x90,0xb3,0x89,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8a,0x00,0x11,0xff,
++	0xf0,0x90,0xb3,0x8b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8c,0x00,
++	0x11,0xff,0xf0,0x90,0xb3,0x8d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8e,0x00,
++	0x11,0xff,0xf0,0x90,0xb3,0x8f,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,
++	0xff,0xf0,0x90,0xb3,0x90,0x00,0x11,0xff,0xf0,0x90,0xb3,0x91,0x00,0x10,0x09,0x11,
++	0xff,0xf0,0x90,0xb3,0x92,0x00,0x11,0xff,0xf0,0x90,0xb3,0x93,0x00,0xd1,0x12,0x10,
++	0x09,0x11,0xff,0xf0,0x90,0xb3,0x94,0x00,0x11,0xff,0xf0,0x90,0xb3,0x95,0x00,0x10,
++	0x09,0x11,0xff,0xf0,0x90,0xb3,0x96,0x00,0x11,0xff,0xf0,0x90,0xb3,0x97,0x00,0xd2,
++	0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x98,0x00,0x11,0xff,0xf0,0x90,
++	0xb3,0x99,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9a,0x00,0x11,0xff,0xf0,0x90,
++	0xb3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9c,0x00,0x11,0xff,
++	0xf0,0x90,0xb3,0x9d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9e,0x00,0x11,0xff,
++	0xf0,0x90,0xb3,0x9f,0x00,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,
++	0xff,0xf0,0x90,0xb3,0xa0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa1,0x00,0x10,0x09,0x11,
++	0xff,0xf0,0x90,0xb3,0xa2,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa3,0x00,0xd1,0x12,0x10,
++	0x09,0x11,0xff,0xf0,0x90,0xb3,0xa4,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa5,0x00,0x10,
++	0x09,0x11,0xff,0xf0,0x90,0xb3,0xa6,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa7,0x00,0xd2,
++	0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xa8,0x00,0x11,0xff,0xf0,0x90,
++	0xb3,0xa9,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xaa,0x00,0x11,0xff,0xf0,0x90,
++	0xb3,0xab,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xac,0x00,0x11,0xff,
++	0xf0,0x90,0xb3,0xad,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xae,0x00,0x11,0xff,
++	0xf0,0x90,0xb3,0xaf,0x00,0x93,0x23,0x92,0x1f,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,
++	0x90,0xb3,0xb0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xb1,0x00,0x10,0x09,0x11,0xff,0xf0,
++	0x90,0xb3,0xb2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x15,0xe4,0x91,
++	0x7b,0xe3,0x9b,0x79,0xe2,0x94,0x78,0xe1,0xe4,0x77,0xe0,0x9d,0x77,0xcf,0x06,0x0c,
++	0x00,0xe4,0xeb,0x7e,0xe3,0x44,0x7e,0xe2,0xed,0x7d,0xd1,0x0c,0xe0,0xb2,0x7d,0xcf,
++	0x86,0x65,0x93,0x7d,0x14,0x00,0xe0,0xb6,0x7d,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,
++	0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x80,0x00,
++	0x10,0xff,0xf0,0x91,0xa3,0x81,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x82,0x00,
++	0x10,0xff,0xf0,0x91,0xa3,0x83,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,
++	0x84,0x00,0x10,0xff,0xf0,0x91,0xa3,0x85,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,
++	0x86,0x00,0x10,0xff,0xf0,0x91,0xa3,0x87,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,
++	0xff,0xf0,0x91,0xa3,0x88,0x00,0x10,0xff,0xf0,0x91,0xa3,0x89,0x00,0x10,0x09,0x10,
++	0xff,0xf0,0x91,0xa3,0x8a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8b,0x00,0xd1,0x12,0x10,
++	0x09,0x10,0xff,0xf0,0x91,0xa3,0x8c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8d,0x00,0x10,
++	0x09,0x10,0xff,0xf0,0x91,0xa3,0x8e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8f,0x00,0xd3,
++	0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x90,0x00,0x10,0xff,
++	0xf0,0x91,0xa3,0x91,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x92,0x00,0x10,0xff,
++	0xf0,0x91,0xa3,0x93,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x94,0x00,
++	0x10,0xff,0xf0,0x91,0xa3,0x95,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x96,0x00,
++	0x10,0xff,0xf0,0x91,0xa3,0x97,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,
++	0x91,0xa3,0x98,0x00,0x10,0xff,0xf0,0x91,0xa3,0x99,0x00,0x10,0x09,0x10,0xff,0xf0,
++	0x91,0xa3,0x9a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x10,
++	0xff,0xf0,0x91,0xa3,0x9c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9d,0x00,0x10,0x09,0x10,
++	0xff,0xf0,0x91,0xa3,0x9e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9f,0x00,0xd1,0x11,0xe0,
++	0x12,0x81,0xcf,0x86,0xe5,0x09,0x81,0xe4,0xd2,0x80,0xcf,0x06,0x00,0x00,0xe0,0xdb,
++	0x82,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xd4,0x09,0xe3,0x10,0x81,0xcf,0x06,
++	0x0c,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xe2,0x3b,0x82,0xe1,0x16,0x82,0xd0,0x06,
++	0xcf,0x06,0x00,0x00,0xcf,0x86,0xa5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,
++	0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa1,
++	0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa3,
++	0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa4,0x00,0x14,0xff,0xf0,0x96,
++	0xb9,0xa5,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa6,0x00,0x14,0xff,0xf0,0x96,
++	0xb9,0xa7,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa8,0x00,
++	0x14,0xff,0xf0,0x96,0xb9,0xa9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xaa,0x00,
++	0x14,0xff,0xf0,0x96,0xb9,0xab,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,
++	0xac,0x00,0x14,0xff,0xf0,0x96,0xb9,0xad,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,
++	0xae,0x00,0x14,0xff,0xf0,0x96,0xb9,0xaf,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,
++	0x09,0x14,0xff,0xf0,0x96,0xb9,0xb0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb1,0x00,0x10,
++	0x09,0x14,0xff,0xf0,0x96,0xb9,0xb2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb3,0x00,0xd1,
++	0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb5,
++	0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb7,
++	0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb8,0x00,0x14,0xff,
++	0xf0,0x96,0xb9,0xb9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xba,0x00,0x14,0xff,
++	0xf0,0x96,0xb9,0xbb,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbc,0x00,
++	0x14,0xff,0xf0,0x96,0xb9,0xbd,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbe,0x00,
++	0x14,0xff,0xf0,0x96,0xb9,0xbf,0x00,0x14,0x00,0xd2,0x14,0xe1,0x25,0x82,0xe0,0x1c,
++	0x82,0xcf,0x86,0xe5,0xdd,0x81,0xe4,0x9a,0x81,0xcf,0x06,0x12,0x00,0xd1,0x0b,0xe0,
++	0x51,0x83,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0x95,0x8b,0xcf,0x86,0xd5,0x22,0xe4,
++	0xd0,0x88,0xe3,0x93,0x88,0xe2,0x38,0x88,0xe1,0x31,0x88,0xe0,0x2a,0x88,0xcf,0x86,
++	0xe5,0xfb,0x87,0xe4,0xe2,0x87,0x93,0x07,0x62,0xd1,0x87,0x12,0xe6,0x12,0xe6,0xe4,
++	0x36,0x89,0xe3,0x2f,0x89,0xd2,0x09,0xe1,0xb8,0x88,0xcf,0x06,0x10,0x00,0xe1,0x1f,
++	0x89,0xe0,0xec,0x88,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,
++	0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa3,
++	0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa5,
++	0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa6,0x00,0x12,0xff,0xf0,0x9e,
++	0xa4,0xa7,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa8,0x00,0x12,0xff,0xf0,0x9e,
++	0xa4,0xa9,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xaa,0x00,
++	0x12,0xff,0xf0,0x9e,0xa4,0xab,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xac,0x00,
++	0x12,0xff,0xf0,0x9e,0xa4,0xad,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,
++	0xae,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xaf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,
++	0xb0,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb1,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,
++	0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb3,0x00,0x10,
++	0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb5,0x00,0xd1,
++	0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb7,
++	0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb9,
++	0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xba,0x00,0x12,0xff,
++	0xf0,0x9e,0xa4,0xbb,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbc,0x00,0x12,0xff,
++	0xf0,0x9e,0xa4,0xbd,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbe,0x00,
++	0x12,0xff,0xf0,0x9e,0xa4,0xbf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa5,0x80,0x00,
++	0x12,0xff,0xf0,0x9e,0xa5,0x81,0x00,0x94,0x1e,0x93,0x1a,0x92,0x16,0x91,0x12,0x10,
++	0x09,0x12,0xff,0xf0,0x9e,0xa5,0x82,0x00,0x12,0xff,0xf0,0x9e,0xa5,0x83,0x00,0x12,
++	0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	/* nfdi_c0100 */
++	0x57,0x04,0x01,0x00,0xc6,0xe5,0xac,0x13,0xe4,0x41,0x0c,0xe3,0x7a,0x07,0xe2,0xf3,
++	0x01,0xc1,0xd0,0x1f,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x15,0x53,0x04,0x01,0x00,
++	0x52,0x04,0x01,0x00,0x91,0x09,0x10,0x04,0x01,0x00,0x01,0xff,0x00,0x01,0x00,0x01,
++	0x00,0xcf,0x86,0xd5,0xe4,0xd4,0x7c,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x41,0xcc,0x80,0x00,0x01,0xff,0x41,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x41,
++	0xcc,0x82,0x00,0x01,0xff,0x41,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,
++	0xcc,0x88,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x43,
++	0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x80,0x00,0x01,
++	0xff,0x45,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x82,0x00,0x01,0xff,0x45,
++	0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x80,0x00,0x01,0xff,0x49,
++	0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x82,0x00,0x01,0xff,0x49,0xcc,0x88,
++	0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x4e,0xcc,0x83,
++	0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x80,0x00,0x01,0xff,0x4f,0xcc,0x81,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x82,0x00,0x01,0xff,0x4f,0xcc,0x83,0x00,0x10,
++	0x08,0x01,0xff,0x4f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,
++	0x00,0x01,0xff,0x55,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x81,0x00,0x01,
++	0xff,0x55,0xcc,0x82,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x88,0x00,0x01,
++	0xff,0x59,0xcc,0x81,0x00,0x01,0x00,0xd4,0x7c,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01,
++	0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x01,
++	0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x80,
++	0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x82,0x00,0x01,
++	0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x80,0x00,0x01,
++	0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x82,0x00,0x01,0xff,0x69,
++	0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6e,
++	0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x81,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82,0x00,0x01,0xff,0x6f,0xcc,0x83,
++	0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10,
++	0x04,0x01,0x00,0x01,0xff,0x75,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x81,
++	0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x88,
++	0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x79,0xcc,0x88,
++	0x00,0xe1,0x9a,0x03,0xe0,0xd3,0x01,0xcf,0x86,0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84,
++	0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x86,0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa8,0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,
++	0x08,0x01,0xff,0x43,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x82,0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10,
++	0x08,0x01,0xff,0x43,0xcc,0x87,0x00,0x01,0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x43,0xcc,0x8c,0x00,0x01,0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,
++	0xff,0x44,0xcc,0x8c,0x00,0x01,0xff,0x64,0xcc,0x8c,0x00,0xd3,0x34,0xd2,0x14,0x51,
++	0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x84,0x00,0x01,0xff,0x65,0xcc,0x84,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0x86,
++	0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87,0x00,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8,
++	0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82,0x00,0x10,
++	0x08,0x01,0xff,0x47,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4,0x74,0xd3,
++	0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x87,0x00,0x01,0xff,0x67,
++	0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7,
++	0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0x82,0x00,0x01,0xff,0x68,0xcc,0x82,
++	0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x83,0x00,0x01,
++	0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x84,0x00,0x01,0xff,0x69,
++	0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x86,0x00,0x01,0xff,0x69,
++	0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8,
++	0x00,0xd3,0x30,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x49,0xcc,0x87,0x00,0x01,
++	0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4a,0xcc,0x82,0x00,0x01,0xff,0x6a,
++	0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,
++	0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x4c,0xcc,0x81,0x00,0x10,
++	0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff,0x4c,0xcc,0xa7,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff,0x4c,0xcc,0x8c,0x00,0x10,0x08,0x01,
++	0xff,0x6c,0xcc,0x8c,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x60,0xd3,0x30,0xd2,
++	0x10,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x4e,0xcc,0x81,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00,0x01,0xff,0x4e,0xcc,0xa7,0x00,0x10,
++	0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff,0x4e,0xcc,0x8c,0x00,0xd2,0x10,0x91,
++	0x0c,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00,0x01,0x00,0x01,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x4f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0x84,0x00,0x10,0x08,0x01,
++	0xff,0x4f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00,0xd3,0x34,0xd2,0x14,0x91,
++	0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8b,0x00,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81,
++	0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,
++	0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10,
++	0x08,0x01,0xff,0x53,0xcc,0xa7,0x00,0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x74,0xd3,
++	0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x8c,0x00,0x01,0xff,0x73,
++	0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,
++	0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,
++	0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x83,0x00,0x01,
++	0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x84,0x00,0x01,0xff,0x75,
++	0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x86,0x00,0x01,0xff,0x75,
++	0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a,
++	0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x8b,0x00,0x01,
++	0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa8,0x00,0x01,0xff,0x75,
++	0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x82,0x00,0x01,0xff,0x77,
++	0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x82,0x00,0x01,0xff,0x79,0xcc,0x82,
++	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x59,0xcc,0x88,0x00,0x01,0xff,0x5a,
++	0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff,0x5a,0xcc,0x87,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff,0x5a,0xcc,0x8c,
++	0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0x00,0xd0,0x4a,0xcf,0x86,0x55,
++	0x04,0x01,0x00,0xd4,0x2c,0xd3,0x18,0x92,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0x4f,
++	0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,
++	0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x55,0xcc,0x9b,0x00,0x93,
++	0x14,0x92,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x75,0xcc,0x9b,0x00,0x01,0x00,0x01,
++	0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xb4,0xd4,0x24,0x53,0x04,0x01,0x00,0x52,
++	0x04,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x41,0xcc,0x8c,0x00,0x10,
++	0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff,0x49,0xcc,0x8c,0x00,0xd3,0x46,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x4f,0xcc,0x8c,
++	0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x8c,0x00,0xd1,
++	0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x84,
++	0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x55,0xcc,0x88,
++	0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,
++	0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,
++	0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,
++	0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x88,
++	0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x80,0xd3,0x3a,0xd2,
++	0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61,
++	0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0x86,0xcc,0x84,0x00,0x01,0xff,
++	0xc3,0xa6,0xcc,0x84,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0x8c,
++	0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,
++	0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa8,
++	0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa8,
++	0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc6,
++	0xb7,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0xd3,0x24,0xd2,0x10,0x91,
++	0x0c,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,0x01,0x00,0x01,0x00,0x91,0x10,0x10,
++	0x08,0x01,0xff,0x47,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x04,0x00,0xd2,
++	0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x4e,0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,
++	0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x8a,0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,
++	0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xc3,0x86,0xcc,0x81,0x00,0x01,0xff,
++	0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xc3,0x98,0xcc,0x81,0x00,0x01,0xff,
++	0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x07,0x02,0xe1,0xae,0x01,0xe0,0x93,0x01,0xcf,0x86,
++	0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,
++	0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x91,0x00,
++	0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x8f,0x00,
++	0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x91,0x00,0x01,0xff,
++	0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x8f,0x00,
++	0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x91,0x00,0x01,0xff,
++	0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8f,0x00,0x01,0xff,
++	0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,
++	0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x8f,0x00,
++	0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0x91,0x00,0x01,0xff,
++	0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x8f,0x00,0x01,0xff,
++	0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,
++	0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x04,0xff,0x53,0xcc,0xa6,0x00,0x04,0xff,
++	0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff,0x54,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,
++	0xa6,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04,0xff,0x48,0xcc,0x8c,0x00,0x04,0xff,
++	0x68,0xcc,0x8c,0x00,0xd4,0x68,0xd3,0x20,0xd2,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,
++	0x07,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04,0xff,0x41,0xcc,0x87,0x00,
++	0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x45,0xcc,
++	0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x88,0xcc,
++	0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,0x00,0xd1,0x14,0x10,0x0a,0x04,0xff,
++	0x4f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x10,0x08,
++	0x04,0xff,0x4f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0x93,0x30,0xd2,0x24,
++	0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x87,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,
++	0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x59,0xcc,0x84,0x00,0x04,0xff,0x79,0xcc,
++	0x84,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0xcf,0x86,
++	0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x08,0x00,0x09,0x00,0x09,0x00,
++	0x09,0x00,0x01,0x00,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18,
++	0x53,0x04,0x01,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00,
++	0x11,0x04,0x04,0x00,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01,0x00,
++	0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++	0x04,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x04,0x00,
++	0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00,0x07,0x00,0xe1,0x35,0x01,0xd0,
++	0x72,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0xe6,0xd3,0x10,0x52,0x04,0x01,0xe6,0x91,
++	0x08,0x10,0x04,0x01,0xe6,0x01,0xe8,0x01,0xdc,0x92,0x0c,0x51,0x04,0x01,0xdc,0x10,
++	0x04,0x01,0xe8,0x01,0xd8,0x01,0xdc,0xd4,0x2c,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,
++	0x04,0x01,0xdc,0x01,0xca,0x10,0x04,0x01,0xca,0x01,0xdc,0x51,0x04,0x01,0xdc,0x10,
++	0x04,0x01,0xdc,0x01,0xca,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0xca,0x01,0xdc,0x01,
++	0xdc,0x01,0xdc,0xd3,0x08,0x12,0x04,0x01,0xdc,0x01,0x01,0xd2,0x0c,0x91,0x08,0x10,
++	0x04,0x01,0x01,0x01,0xdc,0x01,0xdc,0x91,0x08,0x10,0x04,0x01,0xdc,0x01,0xe6,0x01,
++	0xe6,0xcf,0x86,0xd5,0x7f,0xd4,0x47,0xd3,0x2e,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,
++	0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0x10,0x04,0x01,0xe6,0x01,0xff,0xcc,
++	0x93,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcc,0x88,0xcc,0x81,0x00,0x01,0xf0,0x10,
++	0x04,0x04,0xe6,0x04,0xdc,0xd2,0x08,0x11,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,
++	0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x04,0xdc,0x06,0xff,0x00,0xd3,0x18,0xd2,0x0c,
++	0x51,0x04,0x07,0xe6,0x10,0x04,0x07,0xe6,0x07,0xdc,0x51,0x04,0x07,0xdc,0x10,0x04,
++	0x07,0xdc,0x07,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe8,0x08,0xdc,0x10,0x04,
++	0x08,0xdc,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xe9,0x07,0xea,0x10,0x04,0x07,0xea,
++	0x07,0xe9,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0xea,0x10,0x04,0x04,0xe9,
++	0x06,0xe6,0x06,0xe6,0x06,0xe6,0xd3,0x13,0x52,0x04,0x0a,0x00,0x91,0x0b,0x10,0x07,
++	0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,
++	0x04,0x01,0x00,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x06,0x01,0xff,0x3b,0x00,0x10,
++	0x00,0xd0,0xe1,0xcf,0x86,0xd5,0x7a,0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1,
++	0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,
++	0xce,0x91,0xcc,0x81,0x00,0x01,0xff,0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09,
++	0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00,0x10,0x09,
++	0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce,
++	0x9f,0xcc,0x81,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0x01,
++	0xff,0xce,0xa9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,
++	0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,
++	0x4a,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,
++	0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x88,0x00,
++	0x01,0xff,0xce,0xa5,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,
++	0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,
++	0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13,0x91,0x0f,0x10,
++	0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++	0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x39,0x53,0x04,0x01,0x00,0xd2,0x16,0x51,0x04,
++	0x01,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc,
++	0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf,
++	0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x0a,0x00,0xd3,
++	0x26,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xcf,0x92,0xcc,
++	0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0x00,0x10,
++	0x04,0x01,0x00,0x04,0x00,0xd2,0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x01,0x00,0x04,
++	0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd4,
++	0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x01,0x00,0x01,
++	0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x06,
++	0x00,0x07,0x00,0x12,0x04,0x07,0x00,0x08,0x00,0xe3,0x47,0x04,0xe2,0xbe,0x02,0xe1,
++	0x07,0x01,0xd0,0x8b,0xcf,0x86,0xd5,0x6c,0xd4,0x53,0xd3,0x30,0xd2,0x1f,0xd1,0x12,
++	0x10,0x09,0x04,0xff,0xd0,0x95,0xcc,0x80,0x00,0x01,0xff,0xd0,0x95,0xcc,0x88,0x00,
++	0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x93,0xcc,0x81,0x00,0x51,0x04,0x01,0x00,0x10,
++	0x04,0x01,0x00,0x01,0xff,0xd0,0x86,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1,0x12,
++	0x10,0x09,0x01,0xff,0xd0,0x9a,0xcc,0x81,0x00,0x04,0xff,0xd0,0x98,0xcc,0x80,0x00,
++	0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x86,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0x92,
++	0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x98,0xcc,0x86,0x00,0x01,0x00,
++	0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x11,0x91,0x0d,0x10,0x04,
++	0x01,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,
++	0x57,0x54,0x04,0x01,0x00,0xd3,0x30,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,
++	0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x04,0x01,0x00,0x01,
++	0xff,0xd0,0xb3,0xcc,0x81,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
++	0xd1,0x96,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,
++	0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd1,
++	0x83,0xcc,0x86,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x1a,0x52,0x04,0x01,0x00,
++	0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb4,0xcc,0x8f,0x00,0x01,0xff,0xd1,
++	0xb5,0xcc,0x8f,0x00,0x01,0x00,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x94,0x24,0xd3,0x18,
++	0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0x51,0x04,0x01,0xe6,
++	0x10,0x04,0x01,0xe6,0x0a,0xe6,0x92,0x08,0x11,0x04,0x04,0x00,0x06,0x00,0x04,0x00,
++	0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xbe,0xd4,0x4a,0xd3,0x2a,0xd2,0x1a,0xd1,0x0d,
++	0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x96,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,
++	0xb6,0xcc,0x86,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,
++	0x06,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,
++	0x06,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,
++	0x09,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x90,0xcc,0x86,
++	0x00,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0x90,0xcc,0x88,
++	0x00,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,
++	0xd0,0x95,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0xd2,0x16,0x51,0x04,
++	0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x98,0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc,
++	0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x96,0xcc,0x88,0x00,0x01,0xff,0xd0,
++	0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x97,0xcc,0x88,0x00,0x01,0xff,0xd0,
++	0xb7,0xcc,0x88,0x00,0xd4,0x74,0xd3,0x3a,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,
++	0x01,0xff,0xd0,0x98,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0xd1,0x12,
++	0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,
++	0x10,0x09,0x01,0xff,0xd0,0x9e,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,
++	0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0xa8,0xcc,0x88,0x00,0x01,
++	0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0xad,0xcc,0x88,
++	0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x84,
++	0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09,
++	0x01,0xff,0xd0,0xa3,0xcc,0x88,0x00,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x10,0x09,
++	0x01,0xff,0xd0,0xa3,0xcc,0x8b,0x00,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0x91,0x12,
++	0x10,0x09,0x01,0xff,0xd0,0xa7,0xcc,0x88,0x00,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00,
++	0x08,0x00,0x92,0x16,0x91,0x12,0x10,0x09,0x01,0xff,0xd0,0xab,0xcc,0x88,0x00,0x01,
++	0xff,0xd1,0x8b,0xcc,0x88,0x00,0x09,0x00,0x09,0x00,0xd1,0x74,0xd0,0x36,0xcf,0x86,
++	0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
++	0xd4,0x10,0x93,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04,0x0b,0x00,0x0c,0x00,0x10,0x00,
++	0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++	0x01,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,
++	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
++	0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0xba,
++	0xcf,0x86,0xd5,0x4c,0xd4,0x24,0x53,0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
++	0x14,0x00,0x01,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,
++	0x10,0x00,0x10,0x04,0x10,0x00,0x0d,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
++	0x00,0x00,0x02,0xdc,0x02,0xe6,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc,0x02,0xe6,
++	0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xde,0x02,0xdc,0x02,0xe6,0xd4,0x2c,
++	0xd3,0x10,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x08,0xdc,0x02,0xdc,0x02,0xdc,
++	0xd2,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc,0x02,0xe6,0xd1,0x08,0x10,0x04,
++	0x02,0xe6,0x02,0xde,0x10,0x04,0x02,0xe4,0x02,0xe6,0xd3,0x20,0xd2,0x10,0xd1,0x08,
++	0x10,0x04,0x01,0x0a,0x01,0x0b,0x10,0x04,0x01,0x0c,0x01,0x0d,0xd1,0x08,0x10,0x04,
++	0x01,0x0e,0x01,0x0f,0x10,0x04,0x01,0x10,0x01,0x11,0xd2,0x10,0xd1,0x08,0x10,0x04,
++	0x01,0x12,0x01,0x13,0x10,0x04,0x09,0x13,0x01,0x14,0xd1,0x08,0x10,0x04,0x01,0x15,
++	0x01,0x16,0x10,0x04,0x01,0x00,0x01,0x17,0xcf,0x86,0xd5,0x28,0x94,0x24,0x93,0x20,
++	0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x18,0x10,0x04,0x01,0x19,0x01,0x00,
++	0xd1,0x08,0x10,0x04,0x02,0xe6,0x08,0xdc,0x10,0x04,0x08,0x00,0x08,0x12,0x00,0x00,
++	0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,
++	0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x93,0x10,
++	0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0xe2,0xfb,0x01,0xe1,0x2b,0x01,0xd0,0xa8,0xcf,0x86,0xd5,0x55,0xd4,0x28,0xd3,0x10,
++	0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x0a,0x00,0xd2,0x0c,
++	0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x08,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++	0x07,0x00,0x07,0x00,0xd3,0x0c,0x52,0x04,0x07,0xe6,0x11,0x04,0x07,0xe6,0x0a,0xe6,
++	0xd2,0x10,0xd1,0x08,0x10,0x04,0x0a,0x1e,0x0a,0x1f,0x10,0x04,0x0a,0x20,0x01,0x00,
++	0xd1,0x09,0x10,0x05,0x0f,0xff,0x00,0x00,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0xd4,
++	0x3d,0x93,0x39,0xd2,0x1a,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x09,0x01,
++	0xff,0xd8,0xa7,0xd9,0x93,0x00,0x01,0xff,0xd8,0xa7,0xd9,0x94,0x00,0xd1,0x12,0x10,
++	0x09,0x01,0xff,0xd9,0x88,0xd9,0x94,0x00,0x01,0xff,0xd8,0xa7,0xd9,0x95,0x00,0x10,
++	0x09,0x01,0xff,0xd9,0x8a,0xd9,0x94,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,
++	0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x86,
++	0xd5,0x5c,0xd4,0x20,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,
++	0x01,0x00,0x01,0x1b,0xd1,0x08,0x10,0x04,0x01,0x1c,0x01,0x1d,0x10,0x04,0x01,0x1e,
++	0x01,0x1f,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x20,0x01,0x21,0x10,0x04,
++	0x01,0x22,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x07,0xdc,
++	0x07,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0xe6,0x08,0xe6,0x08,0xe6,0xd1,0x08,
++	0x10,0x04,0x08,0xdc,0x08,0xe6,0x10,0x04,0x08,0xe6,0x0c,0xdc,0xd4,0x10,0x53,0x04,
++	0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06,0x00,0x93,0x10,0x92,0x0c,
++	0x91,0x08,0x10,0x04,0x01,0x23,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x22,
++	0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,
++	0x11,0x04,0x04,0x00,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,
++	0xcf,0x86,0xd5,0x5b,0xd4,0x2e,0xd3,0x1e,0x92,0x1a,0xd1,0x0d,0x10,0x09,0x01,0xff,
++	0xdb,0x95,0xd9,0x94,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xdb,0x81,0xd9,0x94,0x00,
++	0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++	0x04,0x00,0xd3,0x19,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
++	0xdb,0x92,0xd9,0x94,0x00,0x11,0x04,0x01,0x00,0x01,0xe6,0x52,0x04,0x01,0xe6,0xd1,
++	0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0xd4,0x38,0xd3,
++	0x1c,0xd2,0x0c,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6,0x01,0xdc,0xd1,0x08,0x10,
++	0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0xd2,0x10,0xd1,0x08,0x10,
++	0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0xdc,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,
++	0xe6,0x01,0xdc,0x07,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x04,
++	0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00,0xd1,0xc8,0xd0,0x76,0xcf,
++	0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,
++	0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,
++	0x00,0x04,0x24,0x04,0x00,0x04,0x00,0x04,0x00,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,
++	0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07,0x00,0x07,0x00,0xd3,0x1c,0xd2,
++	0x0c,0x91,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,
++	0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x0c,0x51,0x04,0x04,0xdc,0x10,
++	0x04,0x04,0xe6,0x04,0xdc,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,
++	0xdc,0x04,0xe6,0xcf,0x86,0xd5,0x3c,0x94,0x38,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x04,
++	0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,
++	0x04,0x04,0xdc,0x04,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,
++	0x04,0x04,0xe6,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x08,
++	0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0a,
++	0x00,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,
++	0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
++	0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x14,0x53,0x04,0x09,0x00,0x92,0x0c,0x51,
++	0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xe6,0x09,0xe6,0xd3,0x10,0x92,0x0c,0x51,
++	0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,
++	0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x14,0xdc,0x14,
++	0x00,0xe4,0xf8,0x57,0xe3,0x45,0x3f,0xe2,0xf4,0x3e,0xe1,0xc7,0x2c,0xe0,0x21,0x10,
++	0xcf,0x86,0xc5,0xe4,0x80,0x08,0xe3,0xcb,0x03,0xe2,0x61,0x01,0xd1,0x94,0xd0,0x5a,
++	0xcf,0x86,0xd5,0x20,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04,
++	0x0b,0x00,0x0b,0xe6,0x92,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04,0x0b,0x00,0x0b,0xe6,
++	0x0b,0xe6,0xd4,0x24,0xd3,0x10,0x52,0x04,0x0b,0xe6,0x91,0x08,0x10,0x04,0x0b,0x00,
++	0x0b,0xe6,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6,
++	0x11,0x04,0x0b,0xe6,0x00,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,
++	0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x86,0xd5,0x20,0x54,0x04,0x0c,0x00,
++	0x53,0x04,0x0c,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x0c,0xdc,0x0c,0xdc,
++	0x51,0x04,0x00,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x13,0x00,
++	0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0xd0,0x4a,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x20,0xd3,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x0d,0x00,0x10,0x00,0x0d,0x00,0x0d,0x00,0x52,0x04,0x0d,0x00,0x91,0x08,
++	0x10,0x04,0x0d,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,
++	0x10,0x04,0x10,0x00,0x11,0x00,0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x12,0x00,
++	0x52,0x04,0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,
++	0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x14,0xdc,
++	0x12,0xe6,0x12,0xe6,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x12,0xe6,0x10,0x04,
++	0x12,0x00,0x11,0xdc,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xdc,0x0d,0xe6,0xd2,0x0c,
++	0x91,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xe6,0x91,0x08,0x10,0x04,0x0d,0xe6,
++	0x0d,0xdc,0x0d,0xdc,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0x1b,0x0d,0x1c,
++	0x10,0x04,0x0d,0x1d,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xdc,0x0d,0xe6,
++	0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x10,0x04,0x0d,0xdc,0x0d,0xe6,
++	0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x10,0xe6,0xe1,0x3a,0x01,0xd0,0x77,0xcf,
++	0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x01,
++	0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,
++	0x00,0xd4,0x1b,0x53,0x04,0x01,0x00,0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01,
++	0xff,0xe0,0xa4,0xa8,0xe0,0xa4,0xbc,0x00,0x01,0x00,0x01,0x00,0xd3,0x26,0xd2,0x13,
++	0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xb0,0xe0,0xa4,0xbc,0x00,0x01,
++	0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xb3,0xe0,0xa4,0xbc,0x00,0x01,0x00,
++	0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x91,0x08,0x10,0x04,0x01,0x07,
++	0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x8c,0xd4,0x18,0x53,0x04,0x01,0x00,0x52,0x04,
++	0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x10,0x04,0x0b,0x00,0x0c,0x00,
++	0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x10,0x04,0x01,0xdc,
++	0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x0b,0x00,0x0c,0x00,0xd2,0x2c,0xd1,0x16,
++	0x10,0x0b,0x01,0xff,0xe0,0xa4,0x95,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0x96,
++	0xe0,0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x97,0xe0,0xa4,0xbc,0x00,0x01,
++	0xff,0xe0,0xa4,0x9c,0xe0,0xa4,0xbc,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa4,
++	0xa1,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xa2,0xe0,0xa4,0xbc,0x00,0x10,0x0b,
++	0x01,0xff,0xe0,0xa4,0xab,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xaf,0xe0,0xa4,
++	0xbc,0x00,0x54,0x04,0x01,0x00,0xd3,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,
++	0x0a,0x00,0x10,0x04,0x0a,0x00,0x0c,0x00,0x0c,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
++	0x10,0x00,0x0b,0x00,0x10,0x04,0x0b,0x00,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,
++	0x08,0x00,0x09,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,
++	0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,
++	0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,
++	0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,
++	0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,
++	0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,
++	0xd3,0x18,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,
++	0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,
++	0x91,0x08,0x10,0x04,0x01,0x07,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x42,
++	0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,
++	0x00,0x00,0x01,0x00,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,
++	0x00,0x00,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa6,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01,
++	0xff,0xe0,0xa7,0x87,0xe0,0xa7,0x97,0x00,0x01,0x09,0x10,0x04,0x08,0x00,0x00,0x00,
++	0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
++	0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa6,0xa1,0xe0,0xa6,0xbc,
++	0x00,0x01,0xff,0xe0,0xa6,0xa2,0xe0,0xa6,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0xff,
++	0xe0,0xa6,0xaf,0xe0,0xa6,0xbc,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x01,0x00,0x11,
++	0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
++	0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x14,0xe6,0x00,
++	0x00,0xe2,0x48,0x02,0xe1,0x4f,0x01,0xd0,0xa4,0xcf,0x86,0xd5,0x4c,0xd4,0x34,0xd3,
++	0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x10,0x04,0x01,0x00,0x07,
++	0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
++	0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,
++	0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,
++	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,
++	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x2e,0xd2,0x17,0xd1,
++	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa8,0xb2,
++	0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,
++	0xe0,0xa8,0xb8,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,
++	0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x00,0x00,0x01,0x00,0xcf,0x86,0xd5,0x80,0xd4,
++	0x34,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,
++	0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,
++	0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x01,
++	0x09,0x00,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x00,
++	0x00,0x00,0x00,0xd2,0x25,0xd1,0x0f,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xa8,0x96,
++	0xe0,0xa8,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0x97,0xe0,0xa8,0xbc,0x00,0x01,
++	0xff,0xe0,0xa8,0x9c,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,
++	0x10,0x0b,0x01,0xff,0xe0,0xa8,0xab,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd4,0x10,0x93,
++	0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x14,0x52,
++	0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04,0x14,0x00,0x00,
++	0x00,0x00,0x00,0xd0,0x82,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,
++	0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,
++	0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x10,
++	0x04,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00,
++	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,
++	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x0c,0x91,
++	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,
++	0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,
++	0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x10,0x52,0x04,0x01,
++	0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
++	0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00,
++	0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
++	0x00,0x00,0x00,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x07,
++	0x00,0x07,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91,
++	0x08,0x10,0x04,0x0d,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,
++	0x04,0x00,0x00,0x11,0x00,0x13,0x00,0x13,0x00,0xe1,0x24,0x01,0xd0,0x86,0xcf,0x86,
++	0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,
++	0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,
++	0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,
++	0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
++	0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
++	0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
++	0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x01,0x00,
++	0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x01,0x00,
++	0x01,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x45,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,
++	0x10,0x04,0x0a,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f,
++	0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x96,0x00,0x00,0x00,0x10,0x04,0x00,
++	0x00,0x01,0xff,0xe0,0xad,0x87,0xe0,0xac,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,
++	0xe0,0xad,0x87,0xe0,0xad,0x97,0x00,0x01,0x09,0x00,0x00,0xd3,0x0c,0x52,0x04,0x00,
++	0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01,
++	0xff,0xe0,0xac,0xa1,0xe0,0xac,0xbc,0x00,0x01,0xff,0xe0,0xac,0xa2,0xe0,0xac,0xbc,
++	0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01,
++	0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,
++	0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x0c,0x00,0x0c,0x00,0x00,0x00,0xd0,0xb1,0xcf,
++	0x86,0xd5,0x63,0xd4,0x28,0xd3,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x91,
++	0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,
++	0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd3,0x1f,0xd2,0x0c,0x91,
++	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,
++	0xae,0x92,0xe0,0xaf,0x97,0x00,0x01,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
++	0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++	0x00,0x00,0x01,0x00,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,
++	0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c,
++	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,
++	0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x08,0x00,0x01,0x00,
++	0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xcf,0x86,
++	0xd5,0x61,0xd4,0x45,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++	0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08,0x10,0x04,0x01,0x00,
++	0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0,0xae,0xbe,0x00,0x01,0xff,0xe0,
++	0xaf,0x87,0xe0,0xae,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0,
++	0xaf,0x97,0x00,0x01,0x09,0x00,0x00,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0a,
++	0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x00,
++	0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x08,
++	0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++	0x00,0x07,0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,
++	0x00,0x00,0x00,0xe3,0x1c,0x04,0xe2,0x1a,0x02,0xd1,0xf3,0xd0,0x76,0xcf,0x86,0xd5,
++	0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,
++	0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91,
++	0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,
++	0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,
++	0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,
++	0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0xd2,
++	0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x01,
++	0x00,0xcf,0x86,0xd5,0x53,0xd4,0x2f,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,
++	0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,
++	0xb1,0x86,0xe0,0xb1,0x96,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++	0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,
++	0x01,0x54,0x10,0x04,0x01,0x5b,0x00,0x00,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,
++	0x11,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01,0x00,
++	0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x52,0x04,0x00,0x00,
++	0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0a,0x00,0xd0,0x76,0xcf,0x86,
++	0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x10,0x00,
++	0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,
++	0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,
++	0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,
++	0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,
++	0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x07,0x07,0x07,0x00,
++	0x01,0x00,0xcf,0x86,0xd5,0x82,0xd4,0x5e,0xd3,0x2a,0xd2,0x13,0x91,0x0f,0x10,0x0b,
++	0x01,0xff,0xe0,0xb2,0xbf,0xe0,0xb3,0x95,0x00,0x01,0x00,0x01,0x00,0xd1,0x08,0x10,
++	0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,
++	0x95,0x00,0xd2,0x28,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x96,
++	0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x82,0x00,0x01,0xff,
++	0xe0,0xb3,0x86,0xe0,0xb3,0x82,0xe0,0xb3,0x95,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++	0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,
++	0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,
++	0x10,0x04,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01,0x00,
++	0x09,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,
++	0x10,0x04,0x00,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0xe1,0x06,0x01,0xd0,0x6e,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,
++	0x08,0x10,0x04,0x13,0x00,0x10,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,
++	0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,
++	0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,
++	0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,
++	0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
++	0x00,0x10,0x04,0x0c,0x00,0x13,0x09,0x91,0x08,0x10,0x04,0x13,0x09,0x0a,0x00,0x01,
++	0x00,0xcf,0x86,0xd5,0x65,0xd4,0x45,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,
++	0x04,0x0a,0x00,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,
++	0x00,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb4,0xbe,0x00,0x01,0xff,0xe0,0xb5,
++	0x87,0xe0,0xb4,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb5,
++	0x97,0x00,0x01,0x09,0x10,0x04,0x0c,0x00,0x12,0x00,0xd3,0x10,0x52,0x04,0x00,0x00,
++	0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x01,0x00,0x52,0x04,0x12,0x00,0x51,0x04,
++	0x12,0x00,0x10,0x04,0x12,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,
++	0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x0c,0x52,0x04,
++	0x0a,0x00,0x11,0x04,0x0a,0x00,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,
++	0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x5a,0xcf,0x86,0xd5,0x34,0xd4,0x18,0x93,0x14,
++	0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x04,0x00,
++	0x04,0x00,0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,
++	0x04,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x54,0x04,
++	0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00,
++	0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x04,0x00,0x00,0x00,
++	0xcf,0x86,0xd5,0x77,0xd4,0x28,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,
++	0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x04,0x09,
++	0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0xd3,0x14,0x52,0x04,
++	0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x10,0x04,0x04,0x00,0x00,0x00,
++	0xd2,0x13,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8a,
++	0x00,0x04,0x00,0xd1,0x19,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0x00,
++	0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0xe0,0xb7,0x8a,0x00,0x10,0x0b,0x04,0xff,
++	0xe0,0xb7,0x99,0xe0,0xb7,0x9f,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x00,
++	0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x14,0xd2,0x08,0x11,0x04,0x00,
++	0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe2,
++	0x31,0x01,0xd1,0x58,0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,
++	0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++	0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x67,0x10,0x04,
++	0x01,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xcf,0x86,
++	0x95,0x18,0xd4,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01,0x6b,0x01,0x00,0x53,0x04,
++	0x01,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd0,0x9e,0xcf,0x86,0xd5,0x54,
++	0xd4,0x3c,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,
++	0x01,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x15,0x00,
++	0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x15,0x00,0x10,0x04,0x01,0x00,
++	0x00,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x15,0x00,0xd3,0x08,0x12,0x04,
++	0x15,0x00,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x01,0x00,
++	0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,
++	0x01,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
++	0xd2,0x08,0x11,0x04,0x15,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,
++	0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x76,0x10,0x04,0x15,0x09,
++	0x01,0x00,0x11,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0x95,0x34,0xd4,0x20,0xd3,0x14,
++	0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,
++	0x00,0x00,0x52,0x04,0x01,0x7a,0x11,0x04,0x01,0x00,0x00,0x00,0x53,0x04,0x01,0x00,
++	0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x01,0x00,0x0d,0x00,0x00,0x00,
++	0xe1,0x2b,0x01,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04,0x02,0x00,0x53,0x04,0x02,
++	0x00,0x92,0x08,0x11,0x04,0x02,0xdc,0x02,0x00,0x02,0x00,0x54,0x04,0x02,0x00,0xd3,
++	0x14,0x52,0x04,0x02,0x00,0xd1,0x08,0x10,0x04,0x02,0x00,0x02,0xdc,0x10,0x04,0x02,
++	0x00,0x02,0xdc,0x92,0x0c,0x91,0x08,0x10,0x04,0x02,0x00,0x02,0xd8,0x02,0x00,0x02,
++	0x00,0xcf,0x86,0xd5,0x73,0xd4,0x36,0xd3,0x17,0x92,0x13,0x51,0x04,0x02,0x00,0x10,
++	0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x82,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd2,0x0c,
++	0x91,0x08,0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00,
++	0x02,0xff,0xe0,0xbd,0x8c,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd3,0x26,0xd2,0x13,0x51,
++	0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x91,0xe0,0xbe,0xb7,0x00,0x02,0x00,
++	0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x96,0xe0,0xbe,0xb7,
++	0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x9b,0xe0,0xbe,
++	0xb7,0x00,0x02,0x00,0x02,0x00,0xd4,0x27,0x53,0x04,0x02,0x00,0xd2,0x17,0xd1,0x0f,
++	0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x80,0xe0,0xbe,0xb5,0x00,0x10,0x04,0x04,
++	0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0xd3,0x35,0xd2,
++	0x17,0xd1,0x08,0x10,0x04,0x00,0x00,0x02,0x81,0x10,0x04,0x02,0x82,0x02,0xff,0xe0,
++	0xbd,0xb1,0xe0,0xbd,0xb2,0x00,0xd1,0x0f,0x10,0x04,0x02,0x84,0x02,0xff,0xe0,0xbd,
++	0xb1,0xe0,0xbd,0xb4,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xb2,0xe0,0xbe,0x80,0x00,
++	0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xb3,0xe0,0xbe,0x80,
++	0x00,0x02,0x00,0x02,0x82,0x11,0x04,0x02,0x82,0x02,0x00,0xd0,0xd3,0xcf,0x86,0xd5,
++	0x65,0xd4,0x27,0xd3,0x1f,0xd2,0x13,0x91,0x0f,0x10,0x04,0x02,0x82,0x02,0xff,0xe0,
++	0xbd,0xb1,0xe0,0xbe,0x80,0x00,0x02,0xe6,0x91,0x08,0x10,0x04,0x02,0x09,0x02,0x00,
++	0x02,0xe6,0x12,0x04,0x02,0x00,0x0c,0x00,0xd3,0x1f,0xd2,0x13,0x51,0x04,0x02,0x00,
++	0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x92,0xe0,0xbe,0xb7,0x00,0x51,0x04,0x02,
++	0x00,0x10,0x04,0x04,0x00,0x02,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02,
++	0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x9c,0xe0,0xbe,
++	0xb7,0x00,0x02,0x00,0xd4,0x3d,0xd3,0x26,0xd2,0x13,0x51,0x04,0x02,0x00,0x10,0x0b,
++	0x02,0xff,0xe0,0xbe,0xa1,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x51,0x04,0x02,0x00,0x10,
++	0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0xa6,0xe0,0xbe,0xb7,0x00,0x52,0x04,0x02,0x00,
++	0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xab,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x04,
++	0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x02,0x00,0x02,0x00,0x02,
++	0x00,0xd2,0x13,0x91,0x0f,0x10,0x04,0x04,0x00,0x02,0xff,0xe0,0xbe,0x90,0xe0,0xbe,
++	0xb5,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,
++	0x95,0x4c,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,
++	0x04,0xdc,0x04,0x00,0x52,0x04,0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00,
++	0x10,0x04,0x0a,0x00,0x04,0x00,0xd3,0x14,0xd2,0x08,0x11,0x04,0x08,0x00,0x0a,0x00,
++	0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,
++	0x0b,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,
++	0xe5,0xf7,0x04,0xe4,0x79,0x03,0xe3,0x7b,0x01,0xe2,0x04,0x01,0xd1,0x7f,0xd0,0x65,
++	0xcf,0x86,0x55,0x04,0x04,0x00,0xd4,0x33,0xd3,0x1f,0xd2,0x0c,0x51,0x04,0x04,0x00,
++	0x10,0x04,0x0a,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe1,0x80,
++	0xa5,0xe1,0x80,0xae,0x00,0x04,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0a,0x00,0x04,
++	0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x04,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x04,
++	0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x04,0x00,0x04,
++	0x07,0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0x09,0x10,0x04,0x0a,0x09,0x0a,
++	0x00,0x0a,0x00,0xcf,0x86,0x95,0x14,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,
++	0x08,0x11,0x04,0x04,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x2e,0xcf,0x86,0x95,
++	0x28,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,
++	0x00,0x0a,0xdc,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,0x0b,
++	0x00,0x11,0x04,0x0b,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,
++	0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x52,
++	0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d,0x00,0x00,0x00,0x01,0x00,0x54,
++	0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++	0x00,0x06,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x06,0x00,0x08,0x00,0x10,0x04,0x08,
++	0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0d,0x00,0x0d,0x00,0xd1,0x3e,0xd0,
++	0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x1d,0x54,0x04,0x01,0x00,0x53,0x04,0x01,
++	0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,
++	0x00,0x01,0xff,0x00,0x94,0x15,0x93,0x11,0x92,0x0d,0x91,0x09,0x10,0x05,0x01,0xff,
++	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,
++	0x04,0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++	0x00,0x0b,0x00,0x0b,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,
++	0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x0b,
++	0x00,0xe2,0x21,0x01,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,
++	0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,
++	0x04,0x00,0x04,0x00,0xcf,0x86,0x95,0x48,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,
++	0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,
++	0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,
++	0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,
++	0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,
++	0xd0,0x62,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,
++	0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,
++	0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd4,0x14,0x53,0x04,
++	0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,
++	0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,
++	0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,
++	0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,
++	0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00,
++	0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x93,0x10,0x52,0x04,0x04,0x00,
++	0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x94,0x14,0x53,0x04,
++	0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,
++	0x04,0x00,0xd1,0x9c,0xd0,0x3e,0xcf,0x86,0x95,0x38,0xd4,0x14,0x53,0x04,0x04,0x00,
++	0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3,0x14,
++	0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,
++	0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,
++	0x04,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04,
++	0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0xd2,0x0c,
++	0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,
++	0x0c,0xe6,0x10,0x04,0x0c,0xe6,0x08,0xe6,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x08,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x53,0x04,0x04,0x00,
++	0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,
++	0xcf,0x86,0x95,0x14,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,
++	0x08,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,
++	0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x11,0x00,
++	0x00,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0xd3,0x30,0xd2,0x2a,
++	0xd1,0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x0b,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,
++	0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd2,0x6c,0xd1,0x24,
++	0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,
++	0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0b,0x00,
++	0x0b,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,
++	0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00,
++	0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x04,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
++	0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x46,0xcf,0x86,0xd5,0x28,
++	0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x00,
++	0x00,0x00,0x06,0x00,0x93,0x10,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x09,
++	0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x06,0x00,0x93,0x14,0x52,0x04,0x06,0x00,
++	0xd1,0x08,0x10,0x04,0x06,0x09,0x06,0x00,0x10,0x04,0x06,0x00,0x00,0x00,0x00,0x00,
++	0xcf,0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x06,0x00,0x00,0x00,
++	0x00,0x00,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,
++	0x06,0x00,0x00,0x00,0x06,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,
++	0x00,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0xd0,0x1b,0xcf,0x86,0x55,0x04,0x04,0x00,
++	0x54,0x04,0x04,0x00,0x93,0x0d,0x52,0x04,0x04,0x00,0x11,0x05,0x04,0xff,0x00,0x04,
++	0x00,0x04,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,
++	0x04,0x04,0x00,0x10,0x04,0x04,0x09,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91,
++	0x08,0x10,0x04,0x04,0x00,0x07,0xe6,0x00,0x00,0xd4,0x10,0x53,0x04,0x04,0x00,0x92,
++	0x08,0x11,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x08,0x11,
++	0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xe4,0xb7,0x03,0xe3,0x58,0x01,0xd2,0x8f,0xd1,
++	0x53,0xd0,0x35,0xcf,0x86,0x95,0x2f,0xd4,0x1f,0x53,0x04,0x04,0x00,0xd2,0x0d,0x51,
++	0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x04,0xff,0x00,0x51,0x05,0x04,0xff,0x00,0x10,
++	0x05,0x04,0xff,0x00,0x00,0x00,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,
++	0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,
++	0x53,0x04,0x04,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,
++	0x00,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x04,0x00,0x94,0x18,0x53,0x04,0x04,0x00,
++	0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0xe4,0x10,0x04,0x0a,0x00,0x00,0x00,
++	0x00,0x00,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c,
++	0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x42,
++	0xcf,0x86,0xd5,0x1c,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,
++	0xd1,0x08,0x10,0x04,0x07,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd4,0x0c,
++	0x53,0x04,0x07,0x00,0x12,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x10,
++	0xd1,0x08,0x10,0x04,0x07,0x00,0x07,0xde,0x10,0x04,0x07,0xe6,0x07,0xdc,0x00,0x00,
++	0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,
++	0x00,0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd4,0x10,0x53,0x04,0x07,0x00,
++	0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x07,0x00,
++	0x91,0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf,0x86,
++	0x55,0x04,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,
++	0x0b,0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x95,0x28,0xd4,0x10,0x53,0x04,0x08,0x00,
++	0x92,0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0xd2,0x0c,
++	0x51,0x04,0x08,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x08,0x00,
++	0x07,0x00,0xd2,0xe4,0xd1,0x80,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x54,0x04,0x08,0x00,
++	0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x08,0xe6,
++	0xd2,0x0c,0x91,0x08,0x10,0x04,0x08,0xdc,0x08,0x00,0x08,0x00,0x11,0x04,0x00,0x00,
++	0x08,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,
++	0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xd4,0x14,
++	0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,
++	0x0b,0x00,0xd3,0x10,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,
++	0x0b,0xe6,0x52,0x04,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xe6,0x00,0x00,0x10,0x04,
++	0x00,0x00,0x0b,0xdc,0xd0,0x5e,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0b,0x00,
++	0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,
++	0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd4,0x10,0x53,0x04,0x0b,0x00,0x52,0x04,
++	0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x10,0xe6,0x91,0x08,
++	0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0xdc,0xd2,0x0c,0x51,0x04,0x10,0xdc,0x10,0x04,
++	0x10,0xdc,0x10,0xe6,0xd1,0x08,0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0x04,0x10,0x00,
++	0x00,0x00,0xcf,0x06,0x00,0x00,0xe1,0x1e,0x01,0xd0,0xaa,0xcf,0x86,0xd5,0x6e,0xd4,
++	0x53,0xd3,0x17,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,
++	0xac,0x85,0xe1,0xac,0xb5,0x00,0x09,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x09,0xff,
++	0xe1,0xac,0x87,0xe1,0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x89,
++	0xe1,0xac,0xb5,0x00,0x09,0x00,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8b,0xe1,
++	0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8d,0xe1,0xac,0xb5,0x00,
++	0x09,0x00,0x93,0x17,0x92,0x13,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,
++	0x91,0xe1,0xac,0xb5,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x54,0x04,0x09,0x00,0xd3,
++	0x10,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x07,0x09,0x00,0x09,0x00,0xd2,
++	0x13,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xba,0xe1,0xac,
++	0xb5,0x00,0x91,0x0f,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xbc,0xe1,0xac,0xb5,
++	0x00,0x09,0x00,0xcf,0x86,0xd5,0x3d,0x94,0x39,0xd3,0x31,0xd2,0x25,0xd1,0x16,0x10,
++	0x0b,0x09,0xff,0xe1,0xac,0xbe,0xe1,0xac,0xb5,0x00,0x09,0xff,0xe1,0xac,0xbf,0xe1,
++	0xac,0xb5,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xad,0x82,0xe1,0xac,0xb5,0x00,
++	0x91,0x08,0x10,0x04,0x09,0x09,0x09,0x00,0x09,0x00,0x12,0x04,0x09,0x00,0x00,0x00,
++	0x09,0x00,0xd4,0x1c,0x53,0x04,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,
++	0x09,0x00,0x09,0xe6,0x91,0x08,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0xe6,0xd3,0x08,
++	0x12,0x04,0x09,0xe6,0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,
++	0x00,0x00,0x00,0x00,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x18,0x53,0x04,
++	0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x09,0x0d,0x09,0x11,0x04,
++	0x0d,0x00,0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x0d,0x00,
++	0x0d,0x00,0xcf,0x86,0x55,0x04,0x0c,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x0c,0x00,
++	0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x07,0x0c,0x00,0x0c,0x00,0xd3,0x0c,0x92,0x08,
++	0x11,0x04,0x0c,0x00,0x0c,0x09,0x00,0x00,0x12,0x04,0x00,0x00,0x0c,0x00,0xe3,0xb2,
++	0x01,0xe2,0x09,0x01,0xd1,0x4c,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04,
++	0x0a,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,
++	0x0a,0x07,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,
++	0xcf,0x86,0x95,0x1c,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,
++	0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,
++	0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x54,0x04,0x14,0x00,
++	0x53,0x04,0x14,0x00,0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,
++	0x91,0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x14,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x08,
++	0x13,0x04,0x0d,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04,
++	0x0b,0xe6,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x01,0x0b,0xdc,0x0b,0xdc,0x92,0x08,
++	0x11,0x04,0x0b,0xdc,0x0b,0xe6,0x0b,0xdc,0xd4,0x28,0xd3,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x01,0x0b,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04,
++	0x0b,0x01,0x0b,0x00,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xdc,0x0b,0x00,
++	0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0d,0x00,0xd1,0x08,
++	0x10,0x04,0x0d,0xe6,0x0d,0x00,0x10,0x04,0x0d,0x00,0x13,0x00,0x92,0x0c,0x51,0x04,
++	0x10,0xe6,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,
++	0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x94,0x0c,0x53,0x04,0x07,0x00,0x12,0x04,
++	0x07,0x00,0x08,0x00,0x08,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0xd5,0x40,
++	0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0xe6,0x10,0x04,0x08,0xdc,0x08,0xe6,
++	0x09,0xe6,0xd2,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x0a,0xe6,0xd1,0x08,
++	0x10,0x04,0x0a,0xe6,0x0a,0xea,0x10,0x04,0x0a,0xd6,0x0a,0xdc,0x93,0x10,0x92,0x0c,
++	0x91,0x08,0x10,0x04,0x0a,0xca,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0xd4,0x14,
++	0x93,0x10,0x52,0x04,0x0a,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xe6,
++	0x10,0xe6,0xd3,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x13,0xe8,
++	0x13,0xe4,0xd2,0x10,0xd1,0x08,0x10,0x04,0x13,0xe4,0x13,0xdc,0x10,0x04,0x00,0x00,
++	0x12,0xe6,0xd1,0x08,0x10,0x04,0x0c,0xe9,0x0b,0xdc,0x10,0x04,0x09,0xe6,0x09,0xdc,
++	0xe2,0x80,0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,
++	0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa5,0x00,0x01,0xff,
++	0x61,0xcc,0xa5,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,
++	0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x42,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,
++	0xa3,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,
++	0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x43,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,
++	0x63,0xcc,0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x87,0x00,0x01,0xff,
++	0x64,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa3,0x00,0x01,0xff,
++	0x64,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,
++	0xb1,0x00,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa7,0x00,
++	0x01,0xff,0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xad,0x00,0x01,0xff,
++	0x64,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x80,0x00,
++	0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,
++	0x81,0x00,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0x45,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,
++	0x45,0xcc,0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,
++	0x45,0xcc,0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,
++	0x01,0xff,0x46,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,
++	0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,
++	0x84,0x00,0x10,0x08,0x01,0xff,0x48,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,
++	0x10,0x08,0x01,0xff,0x48,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,
++	0x10,0x08,0x01,0xff,0x48,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0x49,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,
++	0x01,0xff,0x49,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,
++	0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x81,0x00,0x01,0xff,
++	0x6b,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,
++	0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,
++	0xb1,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,
++	0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,
++	0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xb1,0x00,0x01,0xff,
++	0x6c,0xcc,0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4c,0xcc,0xad,0x00,0x01,0xff,
++	0x6c,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x4d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,
++	0x81,0x00,0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x4d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,
++	0xff,0x4d,0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x4e,0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x4e,
++	0xcc,0xa3,0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x4e,0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x4e,
++	0xcc,0xad,0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,
++	0xcc,0x83,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,
++	0xff,0x4f,0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,
++	0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x80,0x00,0x01,
++	0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x81,
++	0x00,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x50,
++	0xcc,0x81,0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x50,0xcc,0x87,
++	0x00,0x01,0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,
++	0xcc,0x87,0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa3,
++	0x00,0x01,0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x52,0xcc,0xa3,
++	0xcc,0x84,0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x52,
++	0xcc,0xb1,0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,
++	0x08,0x01,0xff,0x53,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,
++	0x0a,0x01,0xff,0x53,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,
++	0x00,0x10,0x0a,0x01,0xff,0x53,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,
++	0xcc,0x87,0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x53,0xcc,0xa3,0xcc,0x87,
++	0x00,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0x87,
++	0x00,0x01,0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0xa3,
++	0x00,0x01,0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xb1,0x00,0x01,
++	0xff,0x74,0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,
++	0xcc,0xad,0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa4,
++	0x00,0x01,0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0xb0,
++	0x00,0x01,0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xad,0x00,0x01,
++	0xff,0x75,0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x83,
++	0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x55,
++	0xcc,0x84,0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0x56,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,
++	0xff,0x56,0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x10,0x02,0xcf,0x86,
++	0xd5,0xe1,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,
++	0x80,0x00,0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x81,0x00,
++	0x01,0xff,0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x88,0x00,
++	0x01,0xff,0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x87,0x00,0x01,0xff,
++	0x77,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0xa3,0x00,
++	0x01,0xff,0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x58,0xcc,0x87,0x00,0x01,0xff,
++	0x78,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x58,0xcc,0x88,0x00,0x01,0xff,
++	0x78,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,
++	0x87,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0x82,0x00,
++	0x01,0xff,0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x5a,0xcc,0xa3,0x00,0x01,0xff,
++	0x7a,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0xb1,0x00,0x01,0xff,
++	0x7a,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x68,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc,
++	0x88,0x00,0x92,0x1d,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff,
++	0x79,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x02,0xff,0xc5,0xbf,0xcc,0x87,0x00,0x0a,
++	0x00,0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa3,
++	0x00,0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x89,0x00,0x01,
++	0xff,0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x81,
++	0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,
++	0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,
++	0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,
++	0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,
++	0xcc,0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x82,0x00,0x01,
++	0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x81,
++	0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,
++	0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,
++	0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,
++	0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x83,0x00,0x01,
++	0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x86,
++	0x00,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0x45,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x45,
++	0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,
++	0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,
++	0xcc,0x81,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,
++	0xd4,0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc,
++	0x80,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,
++	0x82,0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,
++	0x01,0xff,0x45,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,
++	0x10,0x0a,0x01,0xff,0x45,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,
++	0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x89,0x00,0x01,0xff,
++	0x69,0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,
++	0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,
++	0xa3,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,
++	0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x81,0x00,
++	0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,
++	0x80,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,
++	0x4f,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,
++	0x01,0xff,0x4f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,
++	0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,
++	0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x81,0x00,
++	0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,
++	0x9b,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,
++	0x4f,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,
++	0xd3,0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x83,0x00,
++	0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,
++	0xa3,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0x55,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,
++	0x89,0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,
++	0x55,0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,
++	0x01,0xff,0x55,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,
++	0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,
++	0x9b,0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,
++	0x75,0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,
++	0x55,0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,
++	0x01,0xff,0x59,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0x59,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,
++	0x59,0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0x92,0x14,0x91,0x10,0x10,0x08,
++	0x01,0xff,0x59,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x0a,0x00,0x0a,0x00,
++	0xe1,0xc0,0x04,0xe0,0x80,0x02,0xcf,0x86,0xe5,0x2d,0x01,0xd4,0xa8,0xd3,0x54,0xd2,
++	0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,
++	0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,
++	0xce,0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,
++	0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,
++	0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,
++	0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x93,0x00,0x01,0xff,
++	0xce,0x91,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0x00,
++	0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,
++	0x91,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x81,0x00,0x10,
++	0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,
++	0xcd,0x82,0x00,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,
++	0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,
++	0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,
++	0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,
++	0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc,
++	0x93,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x95,0xcc,
++	0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,
++	0x0b,0x01,0xff,0xce,0x95,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,
++	0xcc,0x81,0x00,0x00,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,
++	0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,
++	0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,
++	0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,
++	0xce,0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,
++	0x82,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1,0x12,0x10,
++	0x09,0x01,0xff,0xce,0x97,0xcc,0x93,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0x00,0x10,
++	0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,
++	0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0x00,
++	0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,
++	0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x54,0xd2,
++	0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,
++	0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,
++	0xce,0xb9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,
++	0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,
++	0xff,0xce,0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,
++	0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x93,0x00,0x01,0xff,
++	0xce,0x99,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcc,0x80,0x00,
++	0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,
++	0x99,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x81,0x00,0x10,
++	0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,
++	0xcd,0x82,0x00,0xcf,0x86,0xe5,0x13,0x01,0xd4,0x84,0xd3,0x42,0xd2,0x28,0xd1,0x12,
++	0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,
++	0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,
++	0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81,
++	0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,
++	0x10,0x09,0x01,0xff,0xce,0x9f,0xcc,0x93,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0x00,
++	0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,
++	0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x81,
++	0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x54,0xd2,0x28,
++	0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc,
++	0x94,0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,
++	0x85,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,
++	0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,
++	0xcf,0x85,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,
++	0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0x00,0x10,
++	0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f,0x10,0x04,
++	0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00,0x00,0x01,
++	0xff,0xce,0xa5,0xcc,0x94,0xcd,0x82,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,
++	0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,
++	0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,
++	0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,
++	0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,
++	0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,
++	0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xa9,0xcc,0x93,0x00,0x01,0xff,0xce,0xa9,0xcc,
++	0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,
++	0xa9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,
++	0xcc,0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,
++	0xce,0xa9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0x00,
++	0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,
++	0xff,0xce,0xb1,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,
++	0xff,0xce,0xb5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80,
++	0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,
++	0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,
++	0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,
++	0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x91,0x12,0x10,0x09,
++	0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x00,0x00,
++	0xe0,0xe1,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,
++	0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,
++	0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xcd,0x85,
++	0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,
++	0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,
++	0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,
++	0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,
++	0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,
++	0x91,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,
++	0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,
++	0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,
++	0x91,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,
++	0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,
++	0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x85,
++	0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,
++	0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xcd,
++	0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xcd,0x85,
++	0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,
++	0xce,0xb7,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,
++	0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,
++	0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,
++	0xce,0x97,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,
++	0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,
++	0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,
++	0x01,0xff,0xce,0x97,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,
++	0x94,0xcd,0x82,0xcd,0x85,0x00,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,
++	0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,
++	0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,
++	0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,
++	0xcf,0x89,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,
++	0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xcd,0x85,
++	0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,
++	0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,
++	0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0xcd,0x85,
++	0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,
++	0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,
++	0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x82,
++	0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3,0x49,
++	0xd2,0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,
++	0xb1,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x80,0xcd,0x85,0x00,0x01,
++	0xff,0xce,0xb1,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,
++	0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,
++	0xce,0xb1,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
++	0x91,0xcc,0x86,0x00,0x01,0xff,0xce,0x91,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,
++	0x91,0xcc,0x80,0x00,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,
++	0xff,0xce,0x91,0xcd,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xb9,0x00,0x01,
++	0x00,0xcf,0x86,0xe5,0x16,0x01,0xd4,0x8f,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04,
++	0x01,0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,
++	0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,
++	0xff,0xce,0xb7,0xcc,0x81,0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,
++	0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,
++	0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x81,0x00,
++	0x10,0x09,0x01,0xff,0xce,0x97,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00,
++	0xd1,0x13,0x10,0x09,0x01,0xff,0xce,0x97,0xcd,0x85,0x00,0x01,0xff,0xe1,0xbe,0xbf,
++	0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbe,0xbf,0xcc,0x81,0x00,0x01,0xff,0xe1,
++	0xbe,0xbf,0xcd,0x82,0x00,0xd3,0x40,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
++	0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,
++	0xb9,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x51,
++	0x04,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,
++	0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,
++	0x86,0x00,0x01,0xff,0xce,0x99,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,
++	0x80,0x00,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x04,0x00,0x00,0x01,
++	0xff,0xe1,0xbf,0xbe,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbf,0xbe,0xcc,0x81,
++	0x00,0x01,0xff,0xe1,0xbf,0xbe,0xcd,0x82,0x00,0xd4,0x93,0xd3,0x4e,0xd2,0x28,0xd1,
++	0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,
++	0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,
++	0xcc,0x88,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x93,0x00,
++	0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcd,0x82,0x00,
++	0x01,0xff,0xcf,0x85,0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,
++	0xff,0xce,0xa5,0xcc,0x86,0x00,0x01,0xff,0xce,0xa5,0xcc,0x84,0x00,0x10,0x09,0x01,
++	0xff,0xce,0xa5,0xcc,0x80,0x00,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0xd1,0x12,0x10,
++	0x09,0x01,0xff,0xce,0xa1,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00,0x10,
++	0x09,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x01,0xff,0x60,0x00,0xd3,0x3b,0xd2,0x18,
++	0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80,0xcd,0x85,0x00,0x01,
++	0xff,0xcf,0x89,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81,
++	0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd,0x82,0x00,0x01,0xff,
++	0xcf,0x89,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
++	0x9f,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,
++	0xa9,0xcc,0x80,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0xd1,0x10,0x10,0x09,0x01,
++	0xff,0xce,0xa9,0xcd,0x85,0x00,0x01,0xff,0xc2,0xb4,0x00,0x10,0x04,0x01,0x00,0x00,
++	0x00,0xe0,0x7e,0x0c,0xcf,0x86,0xe5,0xbb,0x08,0xe4,0x14,0x06,0xe3,0xf7,0x02,0xe2,
++	0xbd,0x01,0xd1,0xd0,0xd0,0x4f,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0xd3,0x18,0x92,0x14,
++	0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83,0x00,
++	0x01,0x00,0x01,0x00,0x92,0x0d,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
++	0x00,0x01,0xff,0x00,0x01,0x00,0x94,0x1b,0x53,0x04,0x01,0x00,0xd2,0x09,0x11,0x04,
++	0x01,0x00,0x01,0xff,0x00,0x51,0x05,0x01,0xff,0x00,0x10,0x05,0x01,0xff,0x00,0x04,
++	0x00,0x01,0x00,0xcf,0x86,0xd5,0x48,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,
++	0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x52,0x04,0x04,0x00,0x11,0x04,0x04,
++	0x00,0x06,0x00,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x06,0x00,0x07,
++	0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0x52,
++	0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0xd4,0x23,0xd3,
++	0x14,0x52,0x05,0x06,0xff,0x00,0x91,0x0a,0x10,0x05,0x0a,0xff,0x00,0x00,0xff,0x00,
++	0x0f,0xff,0x00,0x92,0x0a,0x11,0x05,0x0f,0xff,0x00,0x01,0xff,0x00,0x01,0xff,0x00,
++	0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x00,0x00,0x01,0x00,
++	0x01,0x00,0xd0,0x7e,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x53,0x04,0x01,0x00,0x52,0x04,
++	0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,
++	0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0c,0x00,0x0c,0x00,0x52,0x04,0x0c,0x00,
++	0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,
++	0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x02,0x00,0x91,0x08,0x10,0x04,
++	0x03,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0xd2,0x08,0x11,0x04,0x06,0x00,0x08,0x00,
++	0x11,0x04,0x08,0x00,0x0b,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,
++	0x10,0x04,0x0e,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11,0x00,0x13,0x00,
++	0xcf,0x86,0xd5,0x28,0x54,0x04,0x00,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x01,0xe6,
++	0x01,0x01,0x01,0xe6,0xd2,0x0c,0x51,0x04,0x01,0x01,0x10,0x04,0x01,0x01,0x01,0xe6,
++	0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c,
++	0x91,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x04,0x00,0xd1,0x08,0x10,0x04,0x06,0x00,
++	0x06,0x01,0x10,0x04,0x06,0x01,0x06,0xe6,0x92,0x10,0xd1,0x08,0x10,0x04,0x06,0xdc,
++	0x06,0xe6,0x10,0x04,0x06,0x01,0x08,0x01,0x09,0xdc,0x93,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x0a,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x81,0xd0,0x4f,
++	0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04,
++	0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xa9,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01,
++	0x00,0x10,0x06,0x01,0xff,0x4b,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,0x01,0x00,0x53,
++	0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x04,
++	0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0x95,
++	0x2c,0xd4,0x18,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0xd1,0x08,0x10,0x04,0x08,
++	0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,
++	0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x68,0xcf,
++	0x86,0xd5,0x48,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++	0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x92,0x0c,0x91,
++	0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x11,0x00,0x00,0x00,0x53,0x04,0x01,0x00,0x92,
++	0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x90,0xcc,0xb8,0x00,0x01,
++	0xff,0xe2,0x86,0x92,0xcc,0xb8,0x00,0x01,0x00,0x94,0x1a,0x53,0x04,0x01,0x00,0x52,
++	0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x94,0xcc,0xb8,
++	0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0x53,0x04,0x01,0x00,0x52,
++	0x04,0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x87,0x90,0xcc,0xb8,
++	0x00,0x10,0x0a,0x01,0xff,0xe2,0x87,0x94,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x87,0x92,
++	0xcc,0xb8,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,
++	0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x06,
++	0x00,0x06,0x00,0xe2,0x38,0x02,0xe1,0x3f,0x01,0xd0,0x68,0xcf,0x86,0xd5,0x3e,0x94,
++	0x3a,0xd3,0x16,0x52,0x04,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0x83,
++	0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd2,0x12,0x91,0x0e,0x10,0x04,0x01,0x00,0x01,
++	0xff,0xe2,0x88,0x88,0xcc,0xb8,0x00,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff,0xe2,
++	0x88,0x8b,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x24,0x93,0x20,0x52,
++	0x04,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa3,0xcc,0xb8,0x00,0x01,
++	0x00,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa5,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,
++	0x00,0xcf,0x86,0xd5,0x48,0x94,0x44,0xd3,0x2e,0xd2,0x12,0x91,0x0e,0x10,0x04,0x01,
++	0x00,0x01,0xff,0xe2,0x88,0xbc,0xcc,0xb8,0x00,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01,
++	0xff,0xe2,0x89,0x83,0xcc,0xb8,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,
++	0x89,0x85,0xcc,0xb8,0x00,0x92,0x12,0x91,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,
++	0x89,0x88,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x40,0xd3,0x1e,0x92,
++	0x1a,0xd1,0x0c,0x10,0x08,0x01,0xff,0x3d,0xcc,0xb8,0x00,0x01,0x00,0x10,0x0a,0x01,
++	0xff,0xe2,0x89,0xa1,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,
++	0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x89,0x8d,0xcc,0xb8,0x00,0x10,0x08,0x01,
++	0xff,0x3c,0xcc,0xb8,0x00,0x01,0xff,0x3e,0xcc,0xb8,0x00,0xd3,0x30,0xd2,0x18,0x91,
++	0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xa4,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xa5,
++	0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xb2,0xcc,0xb8,
++	0x00,0x01,0xff,0xe2,0x89,0xb3,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18,0x91,0x14,0x10,
++	0x0a,0x01,0xff,0xe2,0x89,0xb6,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xb7,0xcc,0xb8,
++	0x00,0x01,0x00,0x01,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x50,0x94,0x4c,0xd3,0x30,0xd2,
++	0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xba,0xcc,0xb8,0x00,0x01,0xff,0xe2,
++	0x89,0xbb,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x82,
++	0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x83,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18,0x91,
++	0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x86,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x87,
++	0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x30,0x53,0x04,0x01,0x00,0x52,
++	0x04,0x01,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa2,0xcc,0xb8,0x00,0x01,
++	0xff,0xe2,0x8a,0xa8,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa9,0xcc,0xb8,
++	0x00,0x01,0xff,0xe2,0x8a,0xab,0xcc,0xb8,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,
++	0x00,0xd4,0x5c,0xd3,0x2c,0x92,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xbc,
++	0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xbd,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,
++	0x8a,0x91,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x92,0xcc,0xb8,0x00,0x01,0x00,0xd2,
++	0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb2,0xcc,0xb8,0x00,0x01,
++	0xff,0xe2,0x8a,0xb3,0xcc,0xb8,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb4,
++	0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb5,0xcc,0xb8,0x00,0x01,0x00,0x93,0x0c,0x92,
++	0x08,0x11,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xd1,0x64,0xd0,0x3e,0xcf,
++	0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04,
++	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x20,0x53,0x04,0x01,0x00,0x92,
++	0x18,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x80,0x88,0x00,0x10,0x08,0x01,
++	0xff,0xe3,0x80,0x89,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,
++	0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,
++	0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00,0x04,0x00,0xd0,
++	0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51,
++	0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0xd5,
++	0x2c,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00,0x10,
++	0x04,0x06,0x00,0x07,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x08,
++	0x00,0x08,0x00,0x08,0x00,0x12,0x04,0x08,0x00,0x09,0x00,0xd4,0x14,0x53,0x04,0x09,
++	0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0xd3,
++	0x08,0x12,0x04,0x0c,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,
++	0x00,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x13,0x00,0xd3,0xa6,0xd2,
++	0x74,0xd1,0x40,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18,0x93,0x14,0x52,
++	0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x04,0x00,0x00,
++	0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x92,
++	0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,
++	0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x14,0x53,
++	0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x06,
++	0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x06,
++	0x00,0x07,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54,
++	0x04,0x01,0x00,0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06,0x00,0x06,
++	0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x13,0x04,0x04,
++	0x00,0x06,0x00,0xd2,0xdc,0xd1,0x48,0xd0,0x26,0xcf,0x86,0x95,0x20,0x54,0x04,0x01,
++	0x00,0xd3,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x07,0x00,0x06,0x00,0x92,0x0c,0x91,
++	0x08,0x10,0x04,0x08,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,
++	0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x04,0x00,0x06,
++	0x00,0x06,0x00,0x52,0x04,0x06,0x00,0x11,0x04,0x06,0x00,0x08,0x00,0xd0,0x5e,0xcf,
++	0x86,0xd5,0x2c,0xd4,0x10,0x53,0x04,0x06,0x00,0x92,0x08,0x11,0x04,0x06,0x00,0x07,
++	0x00,0x07,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x52,
++	0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0a,0x00,0x0b,0x00,0xd4,0x10,0x93,
++	0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd3,0x10,0x92,
++	0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x52,0x04,0x0a,
++	0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x1c,0x94,
++	0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,
++	0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0b,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,
++	0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0c,0x00,0x0b,0x00,0x0b,0x00,0xd1,
++	0xa8,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,
++	0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x01,
++	0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x53,
++	0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01,
++	0x00,0x01,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x18,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
++	0x00,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x04,0x0c,0x00,0x01,0x00,0xd3,
++	0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0x51,0x04,0x0c,
++	0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
++	0x04,0x01,0x00,0x0c,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,
++	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x06,0x00,0x93,0x0c,0x52,0x04,0x06,0x00,0x11,
++	0x04,0x06,0x00,0x01,0x00,0x01,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01,
++	0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x0c,
++	0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,
++	0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
++	0x04,0x01,0x00,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10,0x52,0x04,0x08,
++	0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,
++	0x00,0x10,0x04,0x09,0x00,0x0d,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0d,0x00,0x0c,
++	0x00,0x06,0x00,0x94,0x0c,0x53,0x04,0x06,0x00,0x12,0x04,0x06,0x00,0x0a,0x00,0x06,
++	0x00,0xe4,0x39,0x01,0xd3,0x0c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xcf,0x06,0x06,0x00,
++	0xd2,0x30,0xd1,0x06,0xcf,0x06,0x06,0x00,0xd0,0x06,0xcf,0x06,0x06,0x00,0xcf,0x86,
++	0x95,0x1e,0x54,0x04,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x0e,
++	0x10,0x0a,0x06,0xff,0xe2,0xab,0x9d,0xcc,0xb8,0x00,0x06,0x00,0x06,0x00,0x06,0x00,
++	0xd1,0x80,0xd0,0x3a,0xcf,0x86,0xd5,0x28,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04,
++	0x07,0x00,0x11,0x04,0x07,0x00,0x08,0x00,0xd3,0x08,0x12,0x04,0x08,0x00,0x09,0x00,
++	0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x94,0x0c,
++	0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x30,
++	0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,
++	0x10,0x00,0x10,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,
++	0x0b,0x00,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x10,0x00,0x10,0x00,0x54,0x04,
++	0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00,
++	0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,
++	0x11,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,
++	0xd2,0x08,0x11,0x04,0x10,0x00,0x14,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x10,0x00,
++	0x10,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x10,0x00,0x15,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,
++	0x10,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd4,0x0c,0x53,0x04,
++	0x14,0x00,0x12,0x04,0x14,0x00,0x11,0x00,0x53,0x04,0x14,0x00,0x52,0x04,0x14,0x00,
++	0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0xe3,0xb9,0x01,0xd2,0xac,0xd1,
++	0x68,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x14,0x53,0x04,0x08,0x00,0x52,
++	0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x08,0x00,0xcf,
++	0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00,0x51,
++	0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xd4,0x14,0x53,0x04,0x09,0x00,0x52,
++	0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0xd3,0x10,0x92,
++	0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0a,0x00,0x0a,0x00,0x09,0x00,0x52,0x04,0x0a,
++	0x00,0x11,0x04,0x0a,0x00,0x0b,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0x55,
++	0x04,0x08,0x00,0xd4,0x1c,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04,0x08,0x00,0x10,
++	0x04,0x08,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd3,
++	0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0d,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,
++	0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd1,0x6c,0xd0,0x2a,0xcf,0x86,0x55,
++	0x04,0x08,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,
++	0x04,0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d,
++	0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c,0xd3,0x0c,0x52,
++	0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,
++	0x00,0x10,0x04,0x00,0x00,0x08,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,
++	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,
++	0x04,0x00,0x00,0x0c,0x09,0xd0,0x5a,0xcf,0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x93,
++	0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x00,
++	0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,
++	0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,
++	0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,
++	0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xcf,
++	0x86,0x95,0x40,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,
++	0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,
++	0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,
++	0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,
++	0x00,0x0a,0xe6,0xd2,0x9c,0xd1,0x68,0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x08,
++	0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x0a,0x00,0x11,0x04,0x08,0x00,0x0a,0x00,0x54,
++	0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0d,
++	0x00,0x0d,0x00,0x12,0x04,0x0d,0x00,0x10,0x00,0xcf,0x86,0x95,0x30,0x94,0x2c,0xd3,
++	0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x12,0x00,0x91,0x08,0x10,
++	0x04,0x12,0x00,0x13,0x00,0x13,0x00,0xd2,0x08,0x11,0x04,0x13,0x00,0x14,0x00,0x51,
++	0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,
++	0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51,0x04,0x04,
++	0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,
++	0x00,0x54,0x04,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd1,
++	0x06,0xcf,0x06,0x04,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0xd5,0x14,0x54,
++	0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x00,
++	0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x04,0x00,0x12,0x04,0x04,0x00,0x00,0x00,0xcf,
++	0x86,0xe5,0xa6,0x05,0xe4,0x9f,0x05,0xe3,0x96,0x04,0xe2,0xe4,0x03,0xe1,0xc0,0x01,
++	0xd0,0x3e,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c,
++	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0xda,0x01,0xe4,0x91,0x08,0x10,0x04,0x01,0xe8,
++	0x01,0xde,0x01,0xe0,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,
++	0x04,0x00,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x04,0x00,0x01,0x00,0xcf,0x86,
++	0xd5,0xaa,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,
++	0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,
++	0x8b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x8d,0xe3,0x82,
++	0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,
++	0x8f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x91,0xe3,0x82,
++	0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x93,0xe3,0x82,0x99,
++	0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x95,0xe3,0x82,0x99,0x00,0x01,0x00,
++	0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x97,0xe3,0x82,0x99,0x00,0x01,
++	0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x99,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,
++	0x10,0x0b,0x01,0xff,0xe3,0x81,0x9b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,
++	0xff,0xe3,0x81,0x9d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd4,0x53,0xd3,0x3c,0xd2,0x1e,
++	0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,
++	0x0b,0x01,0xff,0xe3,0x81,0xa1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x04,
++	0x01,0x00,0x01,0xff,0xe3,0x81,0xa4,0xe3,0x82,0x99,0x00,0x10,0x04,0x01,0x00,0x01,
++	0xff,0xe3,0x81,0xa6,0xe3,0x82,0x99,0x00,0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,
++	0x01,0xff,0xe3,0x81,0xa8,0xe3,0x82,0x99,0x00,0x01,0x00,0x01,0x00,0xd3,0x4a,0xd2,
++	0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x99,0x00,0x01,0xff,
++	0xe3,0x81,0xaf,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xb2,
++	0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb2,0xe3,0x82,0x9a,
++	0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x99,0x00,0x01,0xff,
++	0xe3,0x81,0xb5,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,
++	0xff,0xe3,0x81,0xb8,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb8,0xe3,
++	0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82,
++	0x99,0x00,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82,0x9a,0x00,0x01,0x00,0xd0,0xee,0xcf,
++	0x86,0xd5,0x42,0x54,0x04,0x01,0x00,0xd3,0x1b,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,
++	0x0b,0x01,0xff,0xe3,0x81,0x86,0xe3,0x82,0x99,0x00,0x06,0x00,0x10,0x04,0x06,0x00,
++	0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x08,0x10,0x04,0x01,0x08,
++	0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0x9d,0xe3,0x82,0x99,
++	0x00,0x06,0x00,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x01,
++	0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,
++	0x82,0xab,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xad,0xe3,
++	0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,
++	0x82,0xaf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb1,0xe3,
++	0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb3,0xe3,0x82,
++	0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb5,0xe3,0x82,0x99,0x00,0x01,
++	0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb7,0xe3,0x82,0x99,0x00,
++	0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb9,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,
++	0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbb,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,
++	0x01,0xff,0xe3,0x82,0xbd,0xe3,0x82,0x99,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd5,0xd4,
++	0x53,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbf,0xe3,0x82,
++	0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x81,0xe3,0x82,0x99,0x00,0x01,
++	0x00,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x84,0xe3,0x82,0x99,0x00,
++	0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x86,0xe3,0x82,0x99,0x00,0x92,0x13,0x91,
++	0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x88,0xe3,0x82,0x99,0x00,0x01,0x00,
++	0x01,0x00,0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x83,0x8f,0xe3,
++	0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x8f,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00,
++	0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,
++	0x83,0x92,0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x95,0xe3,
++	0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x95,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f,
++	0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01,
++	0xff,0xe3,0x83,0x98,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,
++	0xe3,0x83,0x9b,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x9b,0xe3,0x82,0x9a,0x00,
++	0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x22,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,
++	0x01,0xff,0xe3,0x82,0xa6,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x01,
++	0xff,0xe3,0x83,0xaf,0xe3,0x82,0x99,0x00,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,
++	0xe3,0x83,0xb0,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0xb1,0xe3,0x82,0x99,0x00,
++	0x10,0x0b,0x01,0xff,0xe3,0x83,0xb2,0xe3,0x82,0x99,0x00,0x01,0x00,0x51,0x04,0x01,
++	0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xbd,0xe3,0x82,0x99,0x00,0x06,0x00,0xd1,0x65,
++	0xd0,0x46,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x91,0x08,
++	0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x18,0x53,0x04,
++	0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04,
++	0x13,0x00,0x14,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,
++	0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x15,0x93,0x11,
++	0x52,0x04,0x01,0x00,0x91,0x09,0x10,0x05,0x01,0xff,0x00,0x01,0x00,0x01,0x00,0x01,
++	0x00,0x01,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x52,
++	0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x54,
++	0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,
++	0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x08,0x00,0x0a,0x00,0x94,
++	0x0c,0x93,0x08,0x12,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0xd2,0xa4,0xd1,
++	0x5c,0xd0,0x22,0xcf,0x86,0x95,0x1c,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,
++	0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x10,0x04,0x07,0x00,0x00,
++	0x00,0x01,0x00,0xcf,0x86,0xd5,0x20,0xd4,0x0c,0x93,0x08,0x12,0x04,0x01,0x00,0x0b,
++	0x00,0x0b,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x06,0x00,0x06,
++	0x00,0x06,0x00,0x06,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
++	0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,
++	0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,
++	0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0xd5,0x10,0x94,0x0c,0x53,
++	0x04,0x01,0x00,0x12,0x04,0x01,0x00,0x07,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,
++	0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x16,
++	0x00,0xd1,0x30,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,
++	0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++	0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x01,0x00,0x01,
++	0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x01,0x00,0x53,
++	0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x07,0x00,0x54,0x04,0x01,
++	0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++	0x00,0x07,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd1,0x48,0xd0,0x40,0xcf,
++	0x86,0xd5,0x06,0xcf,0x06,0x04,0x00,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x2c,0xd2,
++	0x06,0xcf,0x06,0x04,0x00,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x1a,0xcf,0x86,0x55,
++	0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,
++	0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x07,0x00,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf,
++	0x06,0x01,0x00,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe2,0x71,0x05,0xd1,0x8c,0xd0,0x08,
++	0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xd4,0x06,
++	0xcf,0x06,0x01,0x00,0xd3,0x06,0xcf,0x06,0x01,0x00,0xd2,0x06,0xcf,0x06,0x01,0x00,
++	0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x10,
++	0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x08,0x00,0x08,0x00,0x53,0x04,
++	0x08,0x00,0x12,0x04,0x08,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0xd3,0x08,
++	0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,
++	0x11,0x00,0x11,0x00,0x93,0x0c,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x13,0x00,
++	0x13,0x00,0x94,0x14,0x53,0x04,0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,
++	0x13,0x00,0x14,0x00,0x14,0x00,0x00,0x00,0xe0,0xdb,0x04,0xcf,0x86,0xe5,0xdf,0x01,
++	0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x74,0xd2,0x6e,0xd1,0x06,0xcf,0x06,0x04,0x00,
++	0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,
++	0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c,
++	0x92,0x08,0x11,0x04,0x04,0x00,0x06,0x00,0x04,0x00,0x04,0x00,0x93,0x10,0x52,0x04,
++	0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,
++	0x95,0x24,0x94,0x20,0x93,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,
++	0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x10,0x04,0x04,0x00,0x00,0x00,
++	0x00,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x06,0x0a,0x00,0xd2,0x84,0xd1,0x4c,0xd0,0x16,
++	0xcf,0x86,0x55,0x04,0x0a,0x00,0x94,0x0c,0x53,0x04,0x0a,0x00,0x12,0x04,0x0a,0x00,
++	0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x1c,0xd3,0x0c,0x92,0x08,
++	0x11,0x04,0x0c,0x00,0x0a,0x00,0x0a,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,
++	0x10,0x04,0x0a,0x00,0x0a,0xe6,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0d,0xe6,0x52,0x04,
++	0x0d,0xe6,0x11,0x04,0x0a,0xe6,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,
++	0x0a,0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,
++	0x11,0xe6,0x0d,0xe6,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,
++	0x93,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x00,0x00,0xd1,0x40,
++	0xd0,0x3a,0xcf,0x86,0xd5,0x24,0x54,0x04,0x08,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,
++	0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,
++	0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,
++	0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x06,0x0a,0x00,0xd0,0x5e,
++	0xcf,0x86,0xd5,0x28,0xd4,0x18,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0xd1,0x08,
++	0x10,0x04,0x0a,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x11,0x00,0x93,0x0c,0x92,0x08,
++	0x11,0x04,0x0c,0x00,0x0d,0x00,0x10,0x00,0x10,0x00,0xd4,0x1c,0x53,0x04,0x0c,0x00,
++	0xd2,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0d,0x00,0x10,0x00,0x51,0x04,0x10,0x00,
++	0x10,0x04,0x12,0x00,0x14,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00,
++	0x11,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x1c,
++	0x94,0x18,0x93,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x51,0x04,0x15,0x00,
++	0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0xd3,0x10,
++	0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x92,0x0c,
++	0x51,0x04,0x0d,0x00,0x10,0x04,0x0c,0x00,0x0a,0x00,0x0a,0x00,0xe4,0xf2,0x02,0xe3,
++	0x65,0x01,0xd2,0x98,0xd1,0x48,0xd0,0x36,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,
++	0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x09,0x08,0x00,0x08,0x00,
++	0x08,0x00,0xd4,0x0c,0x53,0x04,0x08,0x00,0x12,0x04,0x08,0x00,0x00,0x00,0x53,0x04,
++	0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,
++	0x09,0x00,0x54,0x04,0x09,0x00,0x13,0x04,0x09,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,
++	0x0a,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,
++	0x10,0x04,0x0a,0x09,0x12,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,
++	0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,
++	0x54,0x04,0x0b,0xe6,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,
++	0x52,0x04,0x0b,0x00,0x11,0x04,0x11,0x00,0x14,0x00,0xd1,0x60,0xd0,0x22,0xcf,0x86,
++	0x55,0x04,0x0a,0x00,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,
++	0x10,0x04,0x0a,0x00,0x0a,0xdc,0x11,0x04,0x0a,0xdc,0x0a,0x00,0x0a,0x00,0xcf,0x86,
++	0xd5,0x24,0x54,0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,
++	0x0a,0x00,0x0a,0x09,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
++	0x00,0x00,0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,
++	0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,
++	0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,
++	0x0b,0x00,0x0b,0x07,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x20,0xd3,0x10,
++	0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x52,0x04,
++	0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x53,0x04,0x0b,0x00,
++	0xd2,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x0b,0x00,0x54,0x04,
++	0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,
++	0x10,0x00,0x00,0x00,0xd2,0xd0,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0a,0x00,
++	0x54,0x04,0x0a,0x00,0x93,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,
++	0x0a,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0a,0x00,
++	0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,
++	0x11,0x04,0x0a,0x00,0x00,0x00,0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,
++	0x12,0x04,0x0b,0x00,0x10,0x00,0xd0,0x3a,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,
++	0x0b,0x00,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0xe6,
++	0xd1,0x08,0x10,0x04,0x0b,0xdc,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd2,0x0c,
++	0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6,
++	0xcf,0x86,0xd5,0x2c,0xd4,0x18,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,
++	0x0b,0xe6,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x00,0x00,
++	0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04,
++	0x0d,0x00,0x93,0x10,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,
++	0x00,0x00,0x00,0x00,0xd1,0x8c,0xd0,0x72,0xcf,0x86,0xd5,0x4c,0xd4,0x30,0xd3,0x18,
+ 	0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,
+-	0x10,0x04,0x0c,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
+-	0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,
+-	0x94,0x20,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,
+-	0x00,0x00,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,
+-	0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x11,0x00,
+-	0x11,0x04,0x10,0x00,0x15,0x00,0x00,0x00,0x11,0x00,0xd0,0x06,0xcf,0x06,0x11,0x00,
+-	0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,
+-	0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,
+-	0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x02,0xff,0xff,0xcf,0x86,0xcf,
+-	0x06,0x02,0xff,0xff,0xd1,0x76,0xd0,0x09,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xcf,
+-	0x86,0x85,0xd4,0x07,0xcf,0x06,0x02,0xff,0xff,0xd3,0x07,0xcf,0x06,0x02,0xff,0xff,
+-	0xd2,0x07,0xcf,0x06,0x02,0xff,0xff,0xd1,0x07,0xcf,0x06,0x02,0xff,0xff,0xd0,0x18,
+-	0xcf,0x86,0x55,0x05,0x02,0xff,0xff,0x94,0x0d,0x93,0x09,0x12,0x05,0x02,0xff,0xff,
+-	0x00,0x00,0x00,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04,
+-	0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x51,0x04,
+-	0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04,0x0b,0x00,
+-	0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,
+-	0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x9c,0x10,0xe3,0x16,0x08,
+-	0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x08,0x04,0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01,
+-	0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,
+-	0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0x10,0x08,0x01,0xff,0xe8,0xbb,0x8a,0x00,0x01,
+-	0xff,0xe8,0xb3,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xbb,0x91,0x00,0x01,
+-	0xff,0xe4,0xb8,0xb2,0x00,0x10,0x08,0x01,0xff,0xe5,0x8f,0xa5,0x00,0x01,0xff,0xe9,
+-	0xbe,0x9c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xbe,0x9c,0x00,0x01,
+-	0xff,0xe5,0xa5,0x91,0x00,0x10,0x08,0x01,0xff,0xe9,0x87,0x91,0x00,0x01,0xff,0xe5,
+-	0x96,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa5,0x88,0x00,0x01,0xff,0xe6,
+-	0x87,0xb6,0x00,0x10,0x08,0x01,0xff,0xe7,0x99,0xa9,0x00,0x01,0xff,0xe7,0xbe,0x85,
+-	0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x98,0xbf,0x00,0x01,
+-	0xff,0xe8,0x9e,0xba,0x00,0x10,0x08,0x01,0xff,0xe8,0xa3,0xb8,0x00,0x01,0xff,0xe9,
+-	0x82,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x82,0x00,0x01,0xff,0xe6,
+-	0xb4,0x9b,0x00,0x10,0x08,0x01,0xff,0xe7,0x83,0x99,0x00,0x01,0xff,0xe7,0x8f,0x9e,
+-	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x90,0xbd,0x00,0x01,0xff,0xe9,
+-	0x85,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0xa7,0xb1,0x00,0x01,0xff,0xe4,0xba,0x82,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x8d,0xb5,0x00,0x01,0xff,0xe6,0xac,0x84,
+-	0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x9b,0x00,0x01,0xff,0xe8,0x98,0xad,0x00,0xd4,
+-	0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xb8,0x9e,0x00,0x01,
+-	0xff,0xe5,0xb5,0x90,0x00,0x10,0x08,0x01,0xff,0xe6,0xbf,0xab,0x00,0x01,0xff,0xe8,
+-	0x97,0x8d,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa5,0xa4,0x00,0x01,0xff,0xe6,
+-	0x8b,0x89,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0x98,0x00,0x01,0xff,0xe8,0xa0,0x9f,
+-	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xbb,0x8a,0x00,0x01,0xff,0xe6,
+-	0x9c,0x97,0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0xaa,0x00,0x01,0xff,0xe7,0x8b,0xbc,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x83,0x8e,0x00,0x01,0xff,0xe4,0xbe,0x86,
+-	0x00,0x10,0x08,0x01,0xff,0xe5,0x86,0xb7,0x00,0x01,0xff,0xe5,0x8b,0x9e,0x00,0xd3,
+-	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x93,0x84,0x00,0x01,0xff,0xe6,
+-	0xab,0x93,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x90,0x00,0x01,0xff,0xe7,0x9b,0xa7,
+-	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x80,0x81,0x00,0x01,0xff,0xe8,0x98,0x86,
+-	0x00,0x10,0x08,0x01,0xff,0xe8,0x99,0x9c,0x00,0x01,0xff,0xe8,0xb7,0xaf,0x00,0xd2,
+-	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9c,0xb2,0x00,0x01,0xff,0xe9,0xad,0xaf,
+-	0x00,0x10,0x08,0x01,0xff,0xe9,0xb7,0xba,0x00,0x01,0xff,0xe7,0xa2,0x8c,0x00,0xd1,
+-	0x10,0x10,0x08,0x01,0xff,0xe7,0xa5,0xbf,0x00,0x01,0xff,0xe7,0xb6,0xa0,0x00,0x10,
+-	0x08,0x01,0xff,0xe8,0x8f,0x89,0x00,0x01,0xff,0xe9,0x8c,0x84,0x00,0xcf,0x86,0xe5,
+-	0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xb9,
+-	0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0x10,0x08,0x01,0xff,0xe5,0xa3,0x9f,0x00,
+-	0x01,0xff,0xe5,0xbc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xb1,0xa0,0x00,
+-	0x01,0xff,0xe8,0x81,0xbe,0x00,0x10,0x08,0x01,0xff,0xe7,0x89,0xa2,0x00,0x01,0xff,
+-	0xe7,0xa3,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xb3,0x82,0x00,
+-	0x01,0xff,0xe9,0x9b,0xb7,0x00,0x10,0x08,0x01,0xff,0xe5,0xa3,0x98,0x00,0x01,0xff,
+-	0xe5,0xb1,0xa2,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x93,0x00,0x01,0xff,
+-	0xe6,0xb7,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc,0x8f,0x00,0x01,0xff,0xe7,0xb4,
+-	0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,
+-	0x01,0xff,0xe9,0x99,0x8b,0x00,0x10,0x08,0x01,0xff,0xe5,0x8b,0x92,0x00,0x01,0xff,
+-	0xe8,0x82,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x87,0x9c,0x00,0x01,0xff,
+-	0xe5,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe7,0xa8,0x9c,0x00,0x01,0xff,0xe7,0xb6,
+-	0xbe,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8f,0xb1,0x00,0x01,0xff,
+-	0xe9,0x99,0xb5,0x00,0x10,0x08,0x01,0xff,0xe8,0xae,0x80,0x00,0x01,0xff,0xe6,0x8b,
+-	0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x82,0x00,0x01,0xff,0xe8,0xab,
+-	0xbe,0x00,0x10,0x08,0x01,0xff,0xe4,0xb8,0xb9,0x00,0x01,0xff,0xe5,0xaf,0xa7,0x00,
+-	0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x80,0x92,0x00,
+-	0x01,0xff,0xe7,0x8e,0x87,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xb0,0x00,0x01,0xff,
+-	0xe5,0x8c,0x97,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa3,0xbb,0x00,0x01,0xff,
+-	0xe4,0xbe,0xbf,0x00,0x10,0x08,0x01,0xff,0xe5,0xbe,0xa9,0x00,0x01,0xff,0xe4,0xb8,
+-	0x8d,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xb3,0x8c,0x00,0x01,0xff,
+-	0xe6,0x95,0xb8,0x00,0x10,0x08,0x01,0xff,0xe7,0xb4,0xa2,0x00,0x01,0xff,0xe5,0x8f,
+-	0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa1,0x9e,0x00,0x01,0xff,0xe7,0x9c,
+-	0x81,0x00,0x10,0x08,0x01,0xff,0xe8,0x91,0x89,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00,
+-	0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xae,0xba,0x00,0x01,0xff,
+-	0xe8,0xbe,0xb0,0x00,0x10,0x08,0x01,0xff,0xe6,0xb2,0x88,0x00,0x01,0xff,0xe6,0x8b,
+-	0xbe,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8b,0xa5,0x00,0x01,0xff,0xe6,0x8e,
+-	0xa0,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xa5,0x00,0x01,0xff,0xe4,0xba,0xae,0x00,
+-	0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,0xa9,0x00,0x01,0xff,0xe5,0x87,
+-	0x89,0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0x81,0x00,0x01,0xff,0xe7,0xb3,0xa7,0x00,
+-	0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x89,0xaf,0x00,0x01,0xff,0xe8,0xab,0x92,0x00,
+-	0x10,0x08,0x01,0xff,0xe9,0x87,0x8f,0x00,0x01,0xff,0xe5,0x8b,0xb5,0x00,0xe0,0x04,
+-	0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+-	0x01,0xff,0xe5,0x91,0x82,0x00,0x01,0xff,0xe5,0xa5,0xb3,0x00,0x10,0x08,0x01,0xff,
+-	0xe5,0xbb,0xac,0x00,0x01,0xff,0xe6,0x97,0x85,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0xe6,0xbf,0xbe,0x00,0x01,0xff,0xe7,0xa4,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0x96,
+-	0xad,0x00,0x01,0xff,0xe9,0xa9,0xaa,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0xe9,0xba,0x97,0x00,0x01,0xff,0xe9,0xbb,0x8e,0x00,0x10,0x08,0x01,0xff,0xe5,0x8a,
+-	0x9b,0x00,0x01,0xff,0xe6,0x9b,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xad,
+-	0xb7,0x00,0x01,0xff,0xe8,0xbd,0xa2,0x00,0x10,0x08,0x01,0xff,0xe5,0xb9,0xb4,0x00,
+-	0x01,0xff,0xe6,0x86,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0xe6,0x88,0x80,0x00,0x01,0xff,0xe6,0x92,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc,
+-	0xa3,0x00,0x01,0xff,0xe7,0x85,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0x92,
+-	0x89,0x00,0x01,0xff,0xe7,0xa7,0x8a,0x00,0x10,0x08,0x01,0xff,0xe7,0xb7,0xb4,0x00,
+-	0x01,0xff,0xe8,0x81,0xaf,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xbc,
+-	0xa6,0x00,0x01,0xff,0xe8,0x93,0xae,0x00,0x10,0x08,0x01,0xff,0xe9,0x80,0xa3,0x00,
+-	0x01,0xff,0xe9,0x8d,0x8a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x88,0x97,0x00,
+-	0x01,0xff,0xe5,0x8a,0xa3,0x00,0x10,0x08,0x01,0xff,0xe5,0x92,0xbd,0x00,0x01,0xff,
+-	0xe7,0x83,0x88,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0xe8,0xa3,0x82,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00,0x10,0x08,0x01,0xff,0xe5,0xbb,
+-	0x89,0x00,0x01,0xff,0xe5,0xbf,0xb5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x8d,
+-	0xbb,0x00,0x01,0xff,0xe6,0xae,0xae,0x00,0x10,0x08,0x01,0xff,0xe7,0xb0,0xbe,0x00,
+-	0x01,0xff,0xe7,0x8d,0xb5,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe4,0xbb,
+-	0xa4,0x00,0x01,0xff,0xe5,0x9b,0xb9,0x00,0x10,0x08,0x01,0xff,0xe5,0xaf,0xa7,0x00,
+-	0x01,0xff,0xe5,0xb6,0xba,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x80,0x9c,0x00,
+-	0x01,0xff,0xe7,0x8e,0xb2,0x00,0x10,0x08,0x01,0xff,0xe7,0x91,0xa9,0x00,0x01,0xff,
+-	0xe7,0xbe,0x9a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x81,
+-	0x86,0x00,0x01,0xff,0xe9,0x88,0xb4,0x00,0x10,0x08,0x01,0xff,0xe9,0x9b,0xb6,0x00,
+-	0x01,0xff,0xe9,0x9d,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa0,0x98,0x00,
+-	0x01,0xff,0xe4,0xbe,0x8b,0x00,0x10,0x08,0x01,0xff,0xe7,0xa6,0xae,0x00,0x01,0xff,
+-	0xe9,0x86,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9a,0xb8,0x00,
+-	0x01,0xff,0xe6,0x83,0xa1,0x00,0x10,0x08,0x01,0xff,0xe4,0xba,0x86,0x00,0x01,0xff,
+-	0xe5,0x83,0x9a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xaf,0xae,0x00,0x01,0xff,
+-	0xe5,0xb0,0xbf,0x00,0x10,0x08,0x01,0xff,0xe6,0x96,0x99,0x00,0x01,0xff,0xe6,0xa8,
+-	0x82,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x01,0xff,0xe7,0x87,0x8e,0x00,0x01,0xff,0xe7,0x99,0x82,0x00,0x10,0x08,0x01,
+-	0xff,0xe8,0x93,0xbc,0x00,0x01,0xff,0xe9,0x81,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xe9,0xbe,0x8d,0x00,0x01,0xff,0xe6,0x9a,0x88,0x00,0x10,0x08,0x01,0xff,0xe9,
+-	0x98,0xae,0x00,0x01,0xff,0xe5,0x8a,0x89,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xe6,0x9d,0xbb,0x00,0x01,0xff,0xe6,0x9f,0xb3,0x00,0x10,0x08,0x01,0xff,0xe6,
+-	0xb5,0x81,0x00,0x01,0xff,0xe6,0xba,0x9c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,
+-	0x90,0x89,0x00,0x01,0xff,0xe7,0x95,0x99,0x00,0x10,0x08,0x01,0xff,0xe7,0xa1,0xab,
+-	0x00,0x01,0xff,0xe7,0xb4,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xe9,0xa1,0x9e,0x00,0x01,0xff,0xe5,0x85,0xad,0x00,0x10,0x08,0x01,0xff,0xe6,
+-	0x88,0xae,0x00,0x01,0xff,0xe9,0x99,0xb8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,
+-	0x80,0xab,0x00,0x01,0xff,0xe5,0xb4,0x99,0x00,0x10,0x08,0x01,0xff,0xe6,0xb7,0xaa,
+-	0x00,0x01,0xff,0xe8,0xbc,0xaa,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,
+-	0xbe,0x8b,0x00,0x01,0xff,0xe6,0x85,0x84,0x00,0x10,0x08,0x01,0xff,0xe6,0xa0,0x97,
+-	0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9a,0x86,
+-	0x00,0x01,0xff,0xe5,0x88,0xa9,0x00,0x10,0x08,0x01,0xff,0xe5,0x90,0x8f,0x00,0x01,
+-	0xff,0xe5,0xb1,0xa5,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+-	0xff,0xe6,0x98,0x93,0x00,0x01,0xff,0xe6,0x9d,0x8e,0x00,0x10,0x08,0x01,0xff,0xe6,
+-	0xa2,0xa8,0x00,0x01,0xff,0xe6,0xb3,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,
+-	0x90,0x86,0x00,0x01,0xff,0xe7,0x97,0xa2,0x00,0x10,0x08,0x01,0xff,0xe7,0xbd,0xb9,
+-	0x00,0x01,0xff,0xe8,0xa3,0x8f,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,
+-	0xa3,0xa1,0x00,0x01,0xff,0xe9,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe9,0x9b,0xa2,
+-	0x00,0x01,0xff,0xe5,0x8c,0xbf,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xba,0xba,
+-	0x00,0x01,0xff,0xe5,0x90,0x9d,0x00,0x10,0x08,0x01,0xff,0xe7,0x87,0x90,0x00,0x01,
+-	0xff,0xe7,0x92,0x98,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,
+-	0x97,0xba,0x00,0x01,0xff,0xe9,0x9a,0xa3,0x00,0x10,0x08,0x01,0xff,0xe9,0xb1,0x97,
+-	0x00,0x01,0xff,0xe9,0xba,0x9f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x9e,0x97,
+-	0x00,0x01,0xff,0xe6,0xb7,0x8b,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0xa8,0x00,0x01,
+-	0xff,0xe7,0xab,0x8b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xac,0xa0,
+-	0x00,0x01,0xff,0xe7,0xb2,0x92,0x00,0x10,0x08,0x01,0xff,0xe7,0x8b,0x80,0x00,0x01,
+-	0xff,0xe7,0x82,0x99,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xad,0x98,0x00,0x01,
+-	0xff,0xe4,0xbb,0x80,0x00,0x10,0x08,0x01,0xff,0xe8,0x8c,0xb6,0x00,0x01,0xff,0xe5,
+-	0x88,0xba,0x00,0xe2,0xad,0x06,0xe1,0xc4,0x03,0xe0,0xcb,0x01,0xcf,0x86,0xd5,0xe4,
+-	0xd4,0x74,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00,
+-	0x01,0xff,0xe5,0xba,0xa6,0x00,0x10,0x08,0x01,0xff,0xe6,0x8b,0x93,0x00,0x01,0xff,
+-	0xe7,0xb3,0x96,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xae,0x85,0x00,0x01,0xff,
+-	0xe6,0xb4,0x9e,0x00,0x10,0x08,0x01,0xff,0xe6,0x9a,0xb4,0x00,0x01,0xff,0xe8,0xbc,
+-	0xbb,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa1,0x8c,0x00,0x01,0xff,
+-	0xe9,0x99,0x8d,0x00,0x10,0x08,0x01,0xff,0xe8,0xa6,0x8b,0x00,0x01,0xff,0xe5,0xbb,
+-	0x93,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,0x80,0x00,0x01,0xff,0xe5,0x97,
+-	0x80,0x00,0x01,0x00,0xd3,0x34,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x01,0xff,0xe5,0xa1,
+-	0x9a,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe6,0x99,0xb4,0x00,0x01,0x00,0xd1,0x0c,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0xe5,0x87,0x9e,0x00,0x10,0x08,0x01,0xff,0xe7,0x8c,
+-	0xaa,0x00,0x01,0xff,0xe7,0x9b,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+-	0xe7,0xa4,0xbc,0x00,0x01,0xff,0xe7,0xa5,0x9e,0x00,0x10,0x08,0x01,0xff,0xe7,0xa5,
+-	0xa5,0x00,0x01,0xff,0xe7,0xa6,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9d,
+-	0x96,0x00,0x01,0xff,0xe7,0xb2,0xbe,0x00,0x10,0x08,0x01,0xff,0xe7,0xbe,0xbd,0x00,
+-	0x01,0x00,0xd4,0x64,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x01,0xff,0xe8,0x98,
+-	0x92,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe8,0xab,0xb8,0x00,0x01,0x00,0xd1,0x0c,
+-	0x10,0x04,0x01,0x00,0x01,0xff,0xe9,0x80,0xb8,0x00,0x10,0x08,0x01,0xff,0xe9,0x83,
+-	0xbd,0x00,0x01,0x00,0xd2,0x14,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe9,0xa3,
+-	0xaf,0x00,0x01,0xff,0xe9,0xa3,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa4,
+-	0xa8,0x00,0x01,0xff,0xe9,0xb6,0xb4,0x00,0x10,0x08,0x0d,0xff,0xe9,0x83,0x9e,0x00,
+-	0x0d,0xff,0xe9,0x9a,0xb7,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,
+-	0xe4,0xbe,0xae,0x00,0x06,0xff,0xe5,0x83,0xa7,0x00,0x10,0x08,0x06,0xff,0xe5,0x85,
+-	0x8d,0x00,0x06,0xff,0xe5,0x8b,0x89,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0x8b,
+-	0xa4,0x00,0x06,0xff,0xe5,0x8d,0x91,0x00,0x10,0x08,0x06,0xff,0xe5,0x96,0x9d,0x00,
+-	0x06,0xff,0xe5,0x98,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0x99,
+-	0xa8,0x00,0x06,0xff,0xe5,0xa1,0x80,0x00,0x10,0x08,0x06,0xff,0xe5,0xa2,0xa8,0x00,
+-	0x06,0xff,0xe5,0xb1,0xa4,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0xb1,0xae,0x00,
+-	0x06,0xff,0xe6,0x82,0x94,0x00,0x10,0x08,0x06,0xff,0xe6,0x85,0xa8,0x00,0x06,0xff,
+-	0xe6,0x86,0x8e,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,
+-	0x10,0x10,0x08,0x06,0xff,0xe6,0x87,0xb2,0x00,0x06,0xff,0xe6,0x95,0x8f,0x00,0x10,
+-	0x08,0x06,0xff,0xe6,0x97,0xa2,0x00,0x06,0xff,0xe6,0x9a,0x91,0x00,0xd1,0x10,0x10,
+-	0x08,0x06,0xff,0xe6,0xa2,0x85,0x00,0x06,0xff,0xe6,0xb5,0xb7,0x00,0x10,0x08,0x06,
+-	0xff,0xe6,0xb8,0x9a,0x00,0x06,0xff,0xe6,0xbc,0xa2,0x00,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x06,0xff,0xe7,0x85,0xae,0x00,0x06,0xff,0xe7,0x88,0xab,0x00,0x10,0x08,0x06,
+-	0xff,0xe7,0x90,0xa2,0x00,0x06,0xff,0xe7,0xa2,0x91,0x00,0xd1,0x10,0x10,0x08,0x06,
+-	0xff,0xe7,0xa4,0xbe,0x00,0x06,0xff,0xe7,0xa5,0x89,0x00,0x10,0x08,0x06,0xff,0xe7,
+-	0xa5,0x88,0x00,0x06,0xff,0xe7,0xa5,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x06,0xff,0xe7,0xa5,0x96,0x00,0x06,0xff,0xe7,0xa5,0x9d,0x00,0x10,0x08,0x06,
+-	0xff,0xe7,0xa6,0x8d,0x00,0x06,0xff,0xe7,0xa6,0x8e,0x00,0xd1,0x10,0x10,0x08,0x06,
+-	0xff,0xe7,0xa9,0x80,0x00,0x06,0xff,0xe7,0xaa,0x81,0x00,0x10,0x08,0x06,0xff,0xe7,
+-	0xaf,0x80,0x00,0x06,0xff,0xe7,0xb7,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,
+-	0xff,0xe7,0xb8,0x89,0x00,0x06,0xff,0xe7,0xb9,0x81,0x00,0x10,0x08,0x06,0xff,0xe7,
+-	0xbd,0xb2,0x00,0x06,0xff,0xe8,0x80,0x85,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8,
+-	0x87,0xad,0x00,0x06,0xff,0xe8,0x89,0xb9,0x00,0x10,0x08,0x06,0xff,0xe8,0x89,0xb9,
+-	0x00,0x06,0xff,0xe8,0x91,0x97,0x00,0xd4,0x75,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
+-	0x08,0x06,0xff,0xe8,0xa4,0x90,0x00,0x06,0xff,0xe8,0xa6,0x96,0x00,0x10,0x08,0x06,
+-	0xff,0xe8,0xac,0x81,0x00,0x06,0xff,0xe8,0xac,0xb9,0x00,0xd1,0x10,0x10,0x08,0x06,
+-	0xff,0xe8,0xb3,0x93,0x00,0x06,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08,0x06,0xff,0xe8,
+-	0xbe,0xb6,0x00,0x06,0xff,0xe9,0x80,0xb8,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,
+-	0xff,0xe9,0x9b,0xa3,0x00,0x06,0xff,0xe9,0x9f,0xbf,0x00,0x10,0x08,0x06,0xff,0xe9,
+-	0xa0,0xbb,0x00,0x0b,0xff,0xe6,0x81,0xb5,0x00,0x91,0x11,0x10,0x09,0x0b,0xff,0xf0,
+-	0xa4,0x8b,0xae,0x00,0x0b,0xff,0xe8,0x88,0x98,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,
+-	0xd1,0x10,0x10,0x08,0x08,0xff,0xe4,0xb8,0xa6,0x00,0x08,0xff,0xe5,0x86,0xb5,0x00,
+-	0x10,0x08,0x08,0xff,0xe5,0x85,0xa8,0x00,0x08,0xff,0xe4,0xbe,0x80,0x00,0xd1,0x10,
+-	0x10,0x08,0x08,0xff,0xe5,0x85,0x85,0x00,0x08,0xff,0xe5,0x86,0x80,0x00,0x10,0x08,
+-	0x08,0xff,0xe5,0x8b,0x87,0x00,0x08,0xff,0xe5,0x8b,0xba,0x00,0xd2,0x20,0xd1,0x10,
+-	0x10,0x08,0x08,0xff,0xe5,0x96,0x9d,0x00,0x08,0xff,0xe5,0x95,0x95,0x00,0x10,0x08,
+-	0x08,0xff,0xe5,0x96,0x99,0x00,0x08,0xff,0xe5,0x97,0xa2,0x00,0xd1,0x10,0x10,0x08,
+-	0x08,0xff,0xe5,0xa1,0x9a,0x00,0x08,0xff,0xe5,0xa2,0xb3,0x00,0x10,0x08,0x08,0xff,
+-	0xe5,0xa5,0x84,0x00,0x08,0xff,0xe5,0xa5,0x94,0x00,0xe0,0x04,0x02,0xcf,0x86,0xe5,
+-	0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xa9,
+-	0xa2,0x00,0x08,0xff,0xe5,0xac,0xa8,0x00,0x10,0x08,0x08,0xff,0xe5,0xbb,0x92,0x00,
+-	0x08,0xff,0xe5,0xbb,0x99,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xbd,0xa9,0x00,
+-	0x08,0xff,0xe5,0xbe,0xad,0x00,0x10,0x08,0x08,0xff,0xe6,0x83,0x98,0x00,0x08,0xff,
+-	0xe6,0x85,0x8e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x84,0x88,0x00,
+-	0x08,0xff,0xe6,0x86,0x8e,0x00,0x10,0x08,0x08,0xff,0xe6,0x85,0xa0,0x00,0x08,0xff,
+-	0xe6,0x87,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x88,0xb4,0x00,0x08,0xff,
+-	0xe6,0x8f,0x84,0x00,0x10,0x08,0x08,0xff,0xe6,0x90,0x9c,0x00,0x08,0xff,0xe6,0x91,
+-	0x92,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x95,0x96,0x00,
+-	0x08,0xff,0xe6,0x99,0xb4,0x00,0x10,0x08,0x08,0xff,0xe6,0x9c,0x97,0x00,0x08,0xff,
+-	0xe6,0x9c,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x9d,0x96,0x00,0x08,0xff,
+-	0xe6,0xad,0xb9,0x00,0x10,0x08,0x08,0xff,0xe6,0xae,0xba,0x00,0x08,0xff,0xe6,0xb5,
+-	0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0xbb,0x9b,0x00,0x08,0xff,
+-	0xe6,0xbb,0x8b,0x00,0x10,0x08,0x08,0xff,0xe6,0xbc,0xa2,0x00,0x08,0xff,0xe7,0x80,
+-	0x9e,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x85,0xae,0x00,0x08,0xff,0xe7,0x9e,
+-	0xa7,0x00,0x10,0x08,0x08,0xff,0xe7,0x88,0xb5,0x00,0x08,0xff,0xe7,0x8a,0xaf,0x00,
+-	0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x8c,0xaa,0x00,
+-	0x08,0xff,0xe7,0x91,0xb1,0x00,0x10,0x08,0x08,0xff,0xe7,0x94,0x86,0x00,0x08,0xff,
+-	0xe7,0x94,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x98,0x9d,0x00,0x08,0xff,
+-	0xe7,0x98,0x9f,0x00,0x10,0x08,0x08,0xff,0xe7,0x9b,0x8a,0x00,0x08,0xff,0xe7,0x9b,
+-	0x9b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x9b,0xb4,0x00,0x08,0xff,
+-	0xe7,0x9d,0x8a,0x00,0x10,0x08,0x08,0xff,0xe7,0x9d,0x80,0x00,0x08,0xff,0xe7,0xa3,
+-	0x8c,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xaa,0xb1,0x00,0x08,0xff,0xe7,0xaf,
+-	0x80,0x00,0x10,0x08,0x08,0xff,0xe7,0xb1,0xbb,0x00,0x08,0xff,0xe7,0xb5,0x9b,0x00,
+-	0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xb7,0xb4,0x00,0x08,0xff,
+-	0xe7,0xbc,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0x80,0x85,0x00,0x08,0xff,0xe8,0x8d,
+-	0x92,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0x8f,0xaf,0x00,0x08,0xff,0xe8,0x9d,
+-	0xb9,0x00,0x10,0x08,0x08,0xff,0xe8,0xa5,0x81,0x00,0x08,0xff,0xe8,0xa6,0x86,0x00,
+-	0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xa6,0x96,0x00,0x08,0xff,0xe8,0xaa,
+-	0xbf,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xb8,0x00,0x08,0xff,0xe8,0xab,0x8b,0x00,
+-	0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xac,0x81,0x00,0x08,0xff,0xe8,0xab,0xbe,0x00,
+-	0x10,0x08,0x08,0xff,0xe8,0xab,0xad,0x00,0x08,0xff,0xe8,0xac,0xb9,0x00,0xcf,0x86,
+-	0x95,0xde,0xd4,0x81,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xae,
+-	0x8a,0x00,0x08,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08,0x08,0xff,0xe8,0xbc,0xb8,0x00,
+-	0x08,0xff,0xe9,0x81,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0x86,0x99,0x00,
+-	0x08,0xff,0xe9,0x89,0xb6,0x00,0x10,0x08,0x08,0xff,0xe9,0x99,0xbc,0x00,0x08,0xff,
+-	0xe9,0x9b,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0x9d,0x96,0x00,
+-	0x08,0xff,0xe9,0x9f,0x9b,0x00,0x10,0x08,0x08,0xff,0xe9,0x9f,0xbf,0x00,0x08,0xff,
+-	0xe9,0xa0,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0xa0,0xbb,0x00,0x08,0xff,
+-	0xe9,0xac,0x92,0x00,0x10,0x08,0x08,0xff,0xe9,0xbe,0x9c,0x00,0x08,0xff,0xf0,0xa2,
+-	0xa1,0x8a,0x00,0xd3,0x45,0xd2,0x22,0xd1,0x12,0x10,0x09,0x08,0xff,0xf0,0xa2,0xa1,
+-	0x84,0x00,0x08,0xff,0xf0,0xa3,0x8f,0x95,0x00,0x10,0x08,0x08,0xff,0xe3,0xae,0x9d,
+-	0x00,0x08,0xff,0xe4,0x80,0x98,0x00,0xd1,0x11,0x10,0x08,0x08,0xff,0xe4,0x80,0xb9,
+-	0x00,0x08,0xff,0xf0,0xa5,0x89,0x89,0x00,0x10,0x09,0x08,0xff,0xf0,0xa5,0xb3,0x90,
+-	0x00,0x08,0xff,0xf0,0xa7,0xbb,0x93,0x00,0x92,0x14,0x91,0x10,0x10,0x08,0x08,0xff,
+-	0xe9,0xbd,0x83,0x00,0x08,0xff,0xe9,0xbe,0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0xe1,0x94,0x01,0xe0,0x08,0x01,0xcf,0x86,0xd5,0x42,0xd4,0x14,0x93,0x10,0x52,0x04,
+-	0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd3,0x10,
+-	0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,
+-	0x00,0x00,0xd1,0x0d,0x10,0x04,0x00,0x00,0x04,0xff,0xd7,0x99,0xd6,0xb4,0x00,0x10,
+-	0x04,0x01,0x1a,0x01,0xff,0xd7,0xb2,0xd6,0xb7,0x00,0xd4,0x42,0x53,0x04,0x01,0x00,
+-	0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd7,0xa9,0xd7,0x81,0x00,0x01,
+-	0xff,0xd7,0xa9,0xd7,0x82,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xd7,0xa9,0xd6,0xbc,
+-	0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7,0x82,0x00,0x10,0x09,0x01,0xff,
+-	0xd7,0x90,0xd6,0xb7,0x00,0x01,0xff,0xd7,0x90,0xd6,0xb8,0x00,0xd3,0x43,0xd2,0x24,
+-	0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x91,0xd6,
+-	0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x92,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x93,0xd6,
+-	0xbc,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x94,0xd6,0xbc,0x00,0x01,0xff,0xd7,
+-	0x95,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x96,0xd6,0xbc,0x00,0x00,0x00,0xd2,
+-	0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x98,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x99,
+-	0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x9a,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x9b,
+-	0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0x9c,0xd6,0xbc,0x00,0x00,0x00,
+-	0x10,0x09,0x01,0xff,0xd7,0x9e,0xd6,0xbc,0x00,0x00,0x00,0xcf,0x86,0x95,0x85,0x94,
+-	0x81,0xd3,0x3e,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa0,0xd6,0xbc,0x00,
+-	0x01,0xff,0xd7,0xa1,0xd6,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd7,0xa3,0xd6,
+-	0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0xa4,0xd6,0xbc,0x00,0x00,0x00,0x10,
+-	0x09,0x01,0xff,0xd7,0xa6,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa7,0xd6,0xbc,0x00,0xd2,
+-	0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa8,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa9,
+-	0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0xaa,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x95,
+-	0xd6,0xb9,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x91,0xd6,0xbf,0x00,0x01,0xff,
+-	0xd7,0x9b,0xd6,0xbf,0x00,0x10,0x09,0x01,0xff,0xd7,0xa4,0xd6,0xbf,0x00,0x01,0x00,
+-	0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,
+-	0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0xcf,0x86,
+-	0x95,0x24,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
+-	0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x5a,0xd2,0x06,0xcf,0x06,0x01,0x00,0xd1,0x14,
+-	0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x08,0x14,0x04,0x00,0x00,0x01,0x00,
+-	0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54,0x04,0x01,0x00,0x93,0x0c,0x92,0x08,
+-	0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x0c,
+-	0x94,0x08,0x13,0x04,0x01,0x00,0x00,0x00,0x05,0x00,0x54,0x04,0x05,0x00,0x53,0x04,
+-	0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x07,0x00,0x00,0x00,
+-	0xd2,0xcc,0xd1,0xa4,0xd0,0x36,0xcf,0x86,0xd5,0x14,0x54,0x04,0x06,0x00,0x53,0x04,
+-	0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x94,0x1c,0xd3,0x10,
+-	0x52,0x04,0x01,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xdc,0x52,0x04,
+-	0x10,0xdc,0x11,0x04,0x10,0xdc,0x11,0xe6,0x01,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,
+-	0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,
+-	0x06,0x00,0x07,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00,
+-	0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,
+-	0x01,0x00,0x01,0x00,0xd4,0x18,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,
+-	0x10,0x04,0x01,0x00,0x00,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,
+-	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+-	0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,
+-	0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,
+-	0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd1,0x50,0xd0,0x1e,
+-	0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
+-	0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18,
+-	0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,
+-	0x10,0x04,0x01,0x00,0x06,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
+-	0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,
+-	0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,
+-	0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x18,
+-	0xd3,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x92,0x08,0x11,0x04,
+-	0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,
+-	0x01,0x00,0xd2,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+-	0x00,0x00,0x00,0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,
+-	0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,
+-	0x01,0x00,0x00,0x00,0x53,0x04,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
+-	0x04,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x03,0x00,0x01,0x00,0x01,0x00,0x83,0xe2,
+-	0x30,0x3e,0xe1,0x1a,0x3b,0xe0,0x97,0x39,0xcf,0x86,0xe5,0x3b,0x26,0xc4,0xe3,0x16,
+-	0x14,0xe2,0xef,0x11,0xe1,0xd0,0x10,0xe0,0x60,0x07,0xcf,0x86,0xe5,0x53,0x03,0xe4,
+-	0x4c,0x02,0xe3,0x3d,0x01,0xd2,0x94,0xd1,0x70,0xd0,0x4a,0xcf,0x86,0xd5,0x18,0x94,
+-	0x14,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,
+-	0x00,0x07,0x00,0x07,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07,
+-	0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x07,0x00,0x53,0x04,0x07,0x00,0xd2,0x0c,0x51,
+-	0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,
+-	0x00,0x07,0x00,0xcf,0x86,0x95,0x20,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04,0x07,
+-	0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11,
+-	0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55,
+-	0x04,0x07,0x00,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,
+-	0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,
+-	0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,
+-	0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,
+-	0x04,0x07,0x00,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,
+-	0x00,0x07,0x00,0x07,0x00,0xcf,0x06,0x08,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4,
+-	0x20,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x10,
+-	0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x53,
+-	0x04,0x0a,0x00,0x12,0x04,0x0a,0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,
+-	0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,
+-	0x86,0xd5,0x08,0x14,0x04,0x00,0x00,0x0a,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,
+-	0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x00,0x00,0xd2,
+-	0x5e,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a,
+-	0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,
+-	0x00,0x00,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0a,0x00,0x93,0x10,0x92,
+-	0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,
+-	0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0xdc,0x10,0x00,0x10,0x00,0x10,
+-	0x00,0x10,0x00,0x53,0x04,0x10,0x00,0x12,0x04,0x10,0x00,0x00,0x00,0xd1,0x70,0xd0,
+-	0x36,0xcf,0x86,0xd5,0x18,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,
+-	0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x10,0x00,0x94,0x18,0xd3,0x08,0x12,
+-	0x04,0x05,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x13,
+-	0x00,0x13,0x00,0x05,0x00,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x05,0x00,0x92,
+-	0x0c,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x54,
+-	0x04,0x10,0x00,0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x10,0xe6,0x92,
+-	0x0c,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,
+-	0x86,0x95,0x18,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x51,
+-	0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x08,0x00,0xcf,0x86,0x95,0x1c,0xd4,
+-	0x0c,0x93,0x08,0x12,0x04,0x08,0x00,0x00,0x00,0x08,0x00,0x93,0x0c,0x52,0x04,0x08,
+-	0x00,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0xba,0xd2,0x80,0xd1,
+-	0x34,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x05,
+-	0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0x95,0x14,0x94,
+-	0x10,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x07,
+-	0x00,0x07,0x00,0xd0,0x2a,0xcf,0x86,0xd5,0x14,0x54,0x04,0x07,0x00,0x53,0x04,0x07,
+-	0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x07,
+-	0x00,0x92,0x08,0x11,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xcf,0x86,0xd5,
+-	0x10,0x54,0x04,0x12,0x00,0x93,0x08,0x12,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x54,
+-	0x04,0x12,0x00,0x53,0x04,0x12,0x00,0x12,0x04,0x12,0x00,0x00,0x00,0xd1,0x34,0xd0,
+-	0x12,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x10,
+-	0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x10,0x00,0x00,
+-	0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x00,
+-	0x00,0xcf,0x06,0x00,0x00,0xd2,0x06,0xcf,0x06,0x10,0x00,0xd1,0x40,0xd0,0x1e,0xcf,
+-	0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51,
+-	0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x14,0x54,
+-	0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x00,
+-	0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4,
+-	0xce,0x02,0xe3,0x45,0x01,0xd2,0xd0,0xd1,0x70,0xd0,0x52,0xcf,0x86,0xd5,0x20,0x94,
+-	0x1c,0xd3,0x0c,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x92,0x0c,0x91,
+-	0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07,
+-	0x00,0xd3,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,
+-	0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x08,0x10,
+-	0x04,0x07,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86,0x95,0x18,0x54,
+-	0x04,0x0b,0x00,0x93,0x10,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00,
+-	0x00,0x0b,0x00,0x0b,0x00,0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,
+-	0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,
+-	0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,
+-	0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,
+-	0x04,0x11,0x00,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,
+-	0x00,0x11,0x04,0x11,0x00,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,
+-	0x00,0x11,0x00,0x11,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x1c,0x54,0x04,0x09,
+-	0x00,0x53,0x04,0x09,0x00,0xd2,0x08,0x11,0x04,0x09,0x00,0x0b,0x00,0x51,0x04,0x00,
+-	0x00,0x10,0x04,0x00,0x00,0x09,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,
+-	0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,
+-	0x00,0xcf,0x06,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,
+-	0x00,0x53,0x04,0x0d,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x11,0x00,0x0d,0x00,0xcf,
+-	0x86,0x95,0x14,0x54,0x04,0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x11,
+-	0x00,0x11,0x00,0x11,0x00,0x11,0x00,0xd2,0xec,0xd1,0xa4,0xd0,0x76,0xcf,0x86,0xd5,
+-	0x48,0xd4,0x28,0xd3,0x14,0x52,0x04,0x08,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x08,
+-	0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x08,
+-	0x00,0x08,0xdc,0x10,0x04,0x08,0x00,0x08,0xe6,0xd3,0x10,0x52,0x04,0x08,0x00,0x91,
+-	0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
+-	0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x54,0x04,0x08,0x00,0xd3,0x0c,0x52,0x04,0x08,
+-	0x00,0x11,0x04,0x14,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe6,0x08,
+-	0x01,0x10,0x04,0x08,0xdc,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08,
+-	0x09,0xcf,0x86,0x95,0x28,0xd4,0x14,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,
+-	0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,
+-	0x08,0x10,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x0a,0xcf,
+-	0x86,0x15,0x04,0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x24,0xd3,
+-	0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0xe6,0x10,0x04,0x10,
+-	0xdc,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,
+-	0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,
+-	0x00,0x00,0x00,0xd1,0x54,0xd0,0x26,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,
+-	0x00,0xd3,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x91,
+-	0x08,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x14,0x54,
+-	0x04,0x0b,0x00,0x93,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x0b,
+-	0x00,0x54,0x04,0x0b,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,
+-	0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x54,0x04,0x10,
+-	0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c,0x91,
+-	0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,
+-	0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
+-	0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x96,0xd2,
+-	0x68,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x0b,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,
+-	0x04,0x0b,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11,
+-	0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0xd3,0x10,0x92,
+-	0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x92,0x08,0x11,
+-	0x04,0x00,0x00,0x11,0x00,0x11,0x00,0xd1,0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14,
+-	0x00,0xd4,0x0c,0x93,0x08,0x12,0x04,0x14,0x00,0x14,0xe6,0x00,0x00,0x53,0x04,0x14,
+-	0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,
+-	0x06,0x00,0x00,0xd2,0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,
+-	0x04,0x00,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,
+-	0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x58,0xd0,
+-	0x12,0xcf,0x86,0x55,0x04,0x14,0x00,0x94,0x08,0x13,0x04,0x14,0x00,0x00,0x00,0x14,
+-	0x00,0xcf,0x86,0x95,0x40,0xd4,0x24,0xd3,0x0c,0x52,0x04,0x14,0x00,0x11,0x04,0x14,
+-	0x00,0x14,0xdc,0xd2,0x0c,0x51,0x04,0x14,0xe6,0x10,0x04,0x14,0xe6,0x14,0xdc,0x91,
+-	0x08,0x10,0x04,0x14,0xe6,0x14,0xdc,0x14,0xdc,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,
+-	0x04,0x14,0xdc,0x14,0x00,0x14,0x00,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,
+-	0x00,0x54,0x04,0x15,0x00,0x93,0x10,0x52,0x04,0x15,0x00,0x51,0x04,0x15,0x00,0x10,
+-	0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xe5,0x0f,0x06,0xe4,0xf8,0x03,0xe3,
+-	0x02,0x02,0xd2,0xfb,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0c,0x00,0xcf,0x86,0xd5,0x2c,
+-	0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x09,
+-	0x0c,0x00,0x52,0x04,0x0c,0x00,0x11,0x04,0x0c,0x00,0x00,0x00,0x93,0x0c,0x92,0x08,
+-	0x11,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04,
+-	0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x09,
+-	0xd0,0x69,0xcf,0x86,0xd5,0x32,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x15,
+-	0x51,0x04,0x0b,0x00,0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x99,0xf0,0x91,0x82,0xba,
+-	0x00,0x0b,0x00,0x91,0x11,0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x9b,0xf0,0x91,0x82,
+-	0xba,0x00,0x0b,0x00,0x0b,0x00,0xd4,0x1d,0x53,0x04,0x0b,0x00,0x92,0x15,0x51,0x04,
+-	0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xff,0xf0,0x91,0x82,0xa5,0xf0,0x91,0x82,0xba,
+-	0x00,0x0b,0x00,0x53,0x04,0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b,
+-	0x09,0x10,0x04,0x0b,0x07,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3,
+-	0x0c,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91,
+-	0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x00,0x00,0x0d,0x00,0xd4,0x14,0x53,0x04,0x0d,
+-	0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,
+-	0x04,0x0d,0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0xd1,0x96,0xd0,
+-	0x5c,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x0d,0xe6,0x10,
+-	0x04,0x0d,0xe6,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd4,0x26,0x53,0x04,0x0d,
+-	0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x0d,0x0d,0xff,0xf0,0x91,0x84,
+-	0xb1,0xf0,0x91,0x84,0xa7,0x00,0x0d,0xff,0xf0,0x91,0x84,0xb2,0xf0,0x91,0x84,0xa7,
+-	0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x0d,0x09,0x91,
+-	0x08,0x10,0x04,0x0d,0x09,0x00,0x00,0x0d,0x00,0x0d,0x00,0xcf,0x86,0xd5,0x18,0x94,
+-	0x14,0x93,0x10,0x52,0x04,0x0d,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,
+-	0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x10,
+-	0x00,0x10,0x04,0x10,0x00,0x10,0x07,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,
+-	0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3,
+-	0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x09,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd2,
+-	0x10,0xd1,0x08,0x10,0x04,0x0d,0x00,0x11,0x00,0x10,0x04,0x11,0x07,0x11,0x00,0x91,
+-	0x08,0x10,0x04,0x11,0x00,0x10,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x0c,0x51,
+-	0x04,0x0d,0x00,0x10,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0x92,
+-	0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x93,
+-	0x10,0x52,0x04,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0xd2,0xc8,0xd1,0x48,0xd0,0x42,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x93,
+-	0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,
+-	0x00,0x54,0x04,0x10,0x00,0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,
+-	0x00,0x10,0x09,0x10,0x04,0x10,0x07,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,
+-	0x00,0x10,0x04,0x12,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x52,0xcf,0x86,0xd5,
+-	0x3c,0xd4,0x28,0xd3,0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11,
+-	0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x00,0x51,
+-	0x04,0x11,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11,
+-	0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x94,0x10,0x53,0x04,0x11,
+-	0x00,0x92,0x08,0x11,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,
+-	0x04,0x10,0x00,0xd4,0x18,0x53,0x04,0x10,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x10,
+-	0x00,0x10,0x07,0x10,0x04,0x10,0x09,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,
+-	0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xe1,0x27,0x01,0xd0,0x8a,0xcf,0x86,
+-	0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x10,0x00,
+-	0x10,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10,0x00,
+-	0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x93,0x14,
+-	0x92,0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,
+-	0x10,0x00,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
+-	0x10,0x00,0x00,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
+-	0x10,0x00,0x00,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,
+-	0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x14,0x07,0x91,0x08,0x10,0x04,
+-	0x10,0x07,0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x6a,0xd4,0x42,0xd3,0x14,0x52,0x04,
+-	0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,
+-	0xd2,0x19,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0xff,
+-	0xf0,0x91,0x8d,0x87,0xf0,0x91,0x8c,0xbe,0x00,0x91,0x11,0x10,0x0d,0x10,0xff,0xf0,
+-	0x91,0x8d,0x87,0xf0,0x91,0x8d,0x97,0x00,0x10,0x09,0x00,0x00,0xd3,0x18,0xd2,0x0c,
+-	0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
+-	0x00,0x00,0x10,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,
+-	0x10,0x00,0xd4,0x1c,0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0xe6,
+-	0x52,0x04,0x10,0xe6,0x91,0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x93,0x10,
+-	0x52,0x04,0x10,0xe6,0x91,0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0xcf,0x06,0x00,0x00,0xe3,0x30,0x01,0xd2,0xb7,0xd1,0x48,0xd0,0x06,0xcf,0x06,0x12,
+-	0x00,0xcf,0x86,0x95,0x3c,0xd4,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,
+-	0x04,0x12,0x09,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x07,0x12,0x00,0x12,
+-	0x00,0x53,0x04,0x12,0x00,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x00,0x00,0x12,
+-	0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x12,0x00,0x10,0x04,0x14,0xe6,0x15,0x00,0x00,
+-	0x00,0xd0,0x45,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,
+-	0x00,0xd2,0x15,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10,0xff,0xf0,0x91,0x92,
+-	0xb9,0xf0,0x91,0x92,0xba,0x00,0xd1,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,
+-	0xf0,0x91,0x92,0xb0,0x00,0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,
+-	0x91,0x92,0xbd,0x00,0x10,0x00,0xcf,0x86,0x95,0x24,0xd4,0x14,0x93,0x10,0x92,0x0c,
+-	0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x09,0x10,0x07,0x10,0x00,0x00,0x00,0x53,0x04,
+-	0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x06,
+-	0xcf,0x06,0x00,0x00,0xd0,0x40,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,
+-	0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0xd2,0x1e,0x51,0x04,
+-	0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x96,0xb8,0xf0,0x91,0x96,0xaf,0x00,0x10,
+-	0xff,0xf0,0x91,0x96,0xb9,0xf0,0x91,0x96,0xaf,0x00,0x51,0x04,0x10,0x00,0x10,0x04,
+-	0x10,0x00,0x10,0x09,0xcf,0x86,0x95,0x2c,0xd4,0x1c,0xd3,0x10,0x92,0x0c,0x91,0x08,
+-	0x10,0x04,0x10,0x07,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,
+-	0x11,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,
+-	0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1,0x5c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,
+-	0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,
+-	0x10,0x04,0x10,0x00,0x10,0x09,0xcf,0x86,0xd5,0x24,0xd4,0x14,0x93,0x10,0x52,0x04,
+-	0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,
+-	0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14,0x53,0x04,
+-	0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0xd3,0x10,
+-	0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x0d,0x07,0x92,0x0c,
+-	0x91,0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x14,
+-	0x94,0x10,0x53,0x04,0x0d,0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x11,0x00,
+-	0x53,0x04,0x11,0x00,0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x14,0x00,0x00,0x00,
+-	0x91,0x08,0x10,0x04,0x00,0x00,0x11,0x00,0x11,0x00,0x94,0x14,0x53,0x04,0x11,0x00,
+-	0x92,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x11,0x09,0x00,0x00,0x11,0x00,
+-	0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4,0x59,0x01,0xd3,0xb2,0xd2,0x5c,0xd1,
+-	0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14,
+-	0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x14,0x00,0x14,0x09,0x10,0x04,0x14,0x07,0x14,
+-	0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x00,0x00,0x10,
+-	0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x10,0x92,0x0c,0x51,
+-	0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,
+-	0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,
+-	0x1a,0xcf,0x86,0x55,0x04,0x00,0x00,0x94,0x10,0x53,0x04,0x15,0x00,0x92,0x08,0x11,
+-	0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x15,
+-	0x00,0x53,0x04,0x15,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x94,
+-	0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x15,0x09,0x15,0x00,0x15,0x00,0x91,
+-	0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1,
+-	0x3c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x93,0x10,0x52,
+-	0x04,0x13,0x00,0x91,0x08,0x10,0x04,0x13,0x09,0x13,0x00,0x13,0x00,0x13,0x00,0xcf,
+-	0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,
+-	0x04,0x13,0x00,0x13,0x09,0x00,0x00,0x13,0x00,0x13,0x00,0xd0,0x46,0xcf,0x86,0xd5,
+-	0x2c,0xd4,0x10,0x93,0x0c,0x52,0x04,0x13,0x00,0x11,0x04,0x15,0x00,0x13,0x00,0x13,
+-	0x00,0x53,0x04,0x13,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x13,0x09,0x13,
+-	0x00,0x91,0x08,0x10,0x04,0x13,0x00,0x14,0x00,0x13,0x00,0x94,0x14,0x93,0x10,0x92,
+-	0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92,
+-	0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,
+-	0x00,0xe3,0xa9,0x01,0xd2,0xb0,0xd1,0x6c,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,
+-	0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x12,0x00,
+-	0x12,0x00,0x12,0x00,0x54,0x04,0x12,0x00,0xd3,0x10,0x52,0x04,0x12,0x00,0x51,0x04,
+-	0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,
+-	0x10,0x04,0x12,0x00,0x12,0x09,0xcf,0x86,0xd5,0x14,0x94,0x10,0x93,0x0c,0x52,0x04,
+-	0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x94,0x14,0x53,0x04,
+-	0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,
+-	0x12,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04,0x12,0x00,0x93,0x0c,0x92,0x08,
+-	0x11,0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00,
+-	0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x93,0x10,
+-	0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,
+-	0xcf,0x06,0x00,0x00,0xd1,0xa0,0xd0,0x52,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10,
+-	0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x92,0x0c,
+-	0x51,0x04,0x13,0x00,0x10,0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x54,0x04,
+-	0x13,0x00,0xd3,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,
+-	0x00,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x51,0x04,
+-	0x13,0x00,0x10,0x04,0x00,0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0x93,0x14,
+-	0xd2,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x07,0x13,0x00,0x11,0x04,0x13,0x09,
+-	0x13,0x00,0x00,0x00,0x53,0x04,0x13,0x00,0x92,0x08,0x11,0x04,0x13,0x00,0x00,0x00,
+-	0x00,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,
+-	0x00,0x00,0x14,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x14,0x00,
+-	0x14,0x00,0x14,0x00,0xd0,0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x14,0x53,0x04,0x14,0x00,
+-	0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0xd3,0x18,
+-	0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x51,0x04,0x14,0x00,
+-	0x10,0x04,0x14,0x00,0x14,0x09,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06,
+-	0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,
+-	0x14,0x00,0x53,0x04,0x14,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,
+-	0xcf,0x86,0x55,0x04,0x15,0x00,0x54,0x04,0x15,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,
+-	0x15,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
+-	0x00,0x00,0x15,0x00,0xd0,0xca,0xcf,0x86,0xd5,0xc2,0xd4,0x54,0xd3,0x06,0xcf,0x06,
+-	0x09,0x00,0xd2,0x06,0xcf,0x06,0x09,0x00,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x09,0x00,
+-	0xcf,0x86,0x55,0x04,0x09,0x00,0x94,0x14,0x53,0x04,0x09,0x00,0x52,0x04,0x09,0x00,
+-	0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00,0x10,0x00,0xd0,0x1e,0xcf,0x86,
+-	0x95,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
+-	0x10,0x00,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x68,
+-	0xd2,0x46,0xd1,0x40,0xd0,0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,
+-	0xd4,0x20,0xd3,0x10,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00,
+-	0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,
+-	0x93,0x10,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0xcf,0x06,0x11,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,
+-	0x95,0x10,0x94,0x0c,0x93,0x08,0x12,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,
+-	0xd5,0x4c,0xd4,0x06,0xcf,0x06,0x0b,0x00,0xd3,0x40,0xd2,0x3a,0xd1,0x34,0xd0,0x2e,
+-	0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,
+-	0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x53,0x04,0x15,0x00,0x92,0x0c,
+-	0x91,0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,
+-	0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,
+-	0xd1,0x4c,0xd0,0x44,0xcf,0x86,0xd5,0x3c,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,
+-	0xcf,0x06,0x11,0x00,0xd2,0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,
+-	0x95,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,
+-	0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,
+-	0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0xd2,0x01,0xcf,
+-	0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x0b,0x01,0xd3,0x06,0xcf,0x06,0x0c,0x00,
+-	0xd2,0x84,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0c,0x00,0x54,0x04,0x0c,0x00,
+-	0x53,0x04,0x0c,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,
+-	0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14,0x53,0x04,
+-	0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00,
+-	0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x00,0x00,
+-	0x10,0x00,0xd4,0x10,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,
+-	0x00,0x00,0x93,0x10,0x52,0x04,0x10,0x01,0x91,0x08,0x10,0x04,0x10,0x01,0x10,0x00,
+-	0x00,0x00,0x00,0x00,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,
+-	0x10,0x00,0x93,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6,
+-	0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x24,0xd4,0x10,0x93,0x0c,0x52,0x04,0x10,0x00,
+-	0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04,
+-	0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,
+-	0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04,
+-	0x10,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,
+-	0xd0,0x0e,0xcf,0x86,0x95,0x08,0x14,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,
+-	0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2,0x30,0xd1,0x0c,0xd0,0x06,0xcf,0x06,
+-	0x00,0x00,0xcf,0x06,0x14,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x14,0x00,
+-	0x53,0x04,0x14,0x00,0x92,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0d,0x00,
+-	0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10,0x52,0x04,0x0d,0x00,0x91,0x08,0x10,0x04,
+-	0x0d,0x00,0x15,0x00,0x15,0x00,0xd2,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,
+-	0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0d,0x00,0x54,0x04,
+-	0x0d,0x00,0x53,0x04,0x0d,0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,
+-	0x0d,0x00,0x15,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x15,0x00,
+-	0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x0d,0x00,
+-	0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
+-	0x10,0x04,0x12,0x00,0x13,0x00,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,
+-	0xcf,0x06,0x12,0x00,0xe2,0xc5,0x01,0xd1,0x8e,0xd0,0x86,0xcf,0x86,0xd5,0x48,0xd4,
+-	0x06,0xcf,0x06,0x12,0x00,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x06,0xcf,0x06,0x12,
+-	0x00,0xd1,0x06,0xcf,0x06,0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,
+-	0x04,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,
+-	0x04,0x12,0x00,0x14,0x00,0x14,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x14,0x00,0x15,
+-	0x00,0x15,0x00,0x00,0x00,0xd4,0x36,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x2a,0xd1,
+-	0x06,0xcf,0x06,0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12,
+-	0x00,0x54,0x04,0x12,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12,
++	0x10,0x04,0x0c,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,
++	0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,
++	0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,
++	0x0c,0x00,0x00,0x00,0x00,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04,
++	0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,
++	0x10,0x04,0x0c,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x10,
++	0x93,0x0c,0x52,0x04,0x11,0x00,0x11,0x04,0x10,0x00,0x15,0x00,0x00,0x00,0x11,0x00,
++	0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04,
++	0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x00,0x00,
++	0x53,0x04,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,
++	0x02,0xff,0xff,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd1,0x76,0xd0,0x09,0xcf,0x86,
++	0xcf,0x06,0x02,0xff,0xff,0xcf,0x86,0x85,0xd4,0x07,0xcf,0x06,0x02,0xff,0xff,0xd3,
++	0x07,0xcf,0x06,0x02,0xff,0xff,0xd2,0x07,0xcf,0x06,0x02,0xff,0xff,0xd1,0x07,0xcf,
++	0x06,0x02,0xff,0xff,0xd0,0x18,0xcf,0x86,0x55,0x05,0x02,0xff,0xff,0x94,0x0d,0x93,
++	0x09,0x12,0x05,0x02,0xff,0xff,0x00,0x00,0x00,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x24,
++	0x94,0x20,0xd3,0x10,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,
++	0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,
++	0x0b,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x00,0x00,
++	0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,
++	0xe4,0x9c,0x10,0xe3,0x16,0x08,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x08,0x04,0xe0,
++	0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0x10,0x08,0x01,
++	0xff,0xe8,0xbb,0x8a,0x00,0x01,0xff,0xe8,0xb3,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0xe6,0xbb,0x91,0x00,0x01,0xff,0xe4,0xb8,0xb2,0x00,0x10,0x08,0x01,0xff,0xe5,
++	0x8f,0xa5,0x00,0x01,0xff,0xe9,0xbe,0x9c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0xe9,0xbe,0x9c,0x00,0x01,0xff,0xe5,0xa5,0x91,0x00,0x10,0x08,0x01,0xff,0xe9,
++	0x87,0x91,0x00,0x01,0xff,0xe5,0x96,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,
++	0xa5,0x88,0x00,0x01,0xff,0xe6,0x87,0xb6,0x00,0x10,0x08,0x01,0xff,0xe7,0x99,0xa9,
++	0x00,0x01,0xff,0xe7,0xbe,0x85,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0xe8,0x98,0xbf,0x00,0x01,0xff,0xe8,0x9e,0xba,0x00,0x10,0x08,0x01,0xff,0xe8,
++	0xa3,0xb8,0x00,0x01,0xff,0xe9,0x82,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,
++	0xa8,0x82,0x00,0x01,0xff,0xe6,0xb4,0x9b,0x00,0x10,0x08,0x01,0xff,0xe7,0x83,0x99,
++	0x00,0x01,0xff,0xe7,0x8f,0x9e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,
++	0x90,0xbd,0x00,0x01,0xff,0xe9,0x85,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0xa7,0xb1,
++	0x00,0x01,0xff,0xe4,0xba,0x82,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x8d,0xb5,
++	0x00,0x01,0xff,0xe6,0xac,0x84,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x9b,0x00,0x01,
++	0xff,0xe8,0x98,0xad,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0xe9,0xb8,0x9e,0x00,0x01,0xff,0xe5,0xb5,0x90,0x00,0x10,0x08,0x01,0xff,0xe6,
++	0xbf,0xab,0x00,0x01,0xff,0xe8,0x97,0x8d,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,
++	0xa5,0xa4,0x00,0x01,0xff,0xe6,0x8b,0x89,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0x98,
++	0x00,0x01,0xff,0xe8,0xa0,0x9f,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,
++	0xbb,0x8a,0x00,0x01,0xff,0xe6,0x9c,0x97,0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0xaa,
++	0x00,0x01,0xff,0xe7,0x8b,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x83,0x8e,
++	0x00,0x01,0xff,0xe4,0xbe,0x86,0x00,0x10,0x08,0x01,0xff,0xe5,0x86,0xb7,0x00,0x01,
++	0xff,0xe5,0x8b,0x9e,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,
++	0x93,0x84,0x00,0x01,0xff,0xe6,0xab,0x93,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x90,
++	0x00,0x01,0xff,0xe7,0x9b,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x80,0x81,
++	0x00,0x01,0xff,0xe8,0x98,0x86,0x00,0x10,0x08,0x01,0xff,0xe8,0x99,0x9c,0x00,0x01,
++	0xff,0xe8,0xb7,0xaf,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9c,0xb2,
++	0x00,0x01,0xff,0xe9,0xad,0xaf,0x00,0x10,0x08,0x01,0xff,0xe9,0xb7,0xba,0x00,0x01,
++	0xff,0xe7,0xa2,0x8c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa5,0xbf,0x00,0x01,
++	0xff,0xe7,0xb6,0xa0,0x00,0x10,0x08,0x01,0xff,0xe8,0x8f,0x89,0x00,0x01,0xff,0xe9,
++	0x8c,0x84,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0x10,0x08,
++	0x01,0xff,0xe5,0xa3,0x9f,0x00,0x01,0xff,0xe5,0xbc,0x84,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe7,0xb1,0xa0,0x00,0x01,0xff,0xe8,0x81,0xbe,0x00,0x10,0x08,0x01,0xff,
++	0xe7,0x89,0xa2,0x00,0x01,0xff,0xe7,0xa3,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe8,0xb3,0x82,0x00,0x01,0xff,0xe9,0x9b,0xb7,0x00,0x10,0x08,0x01,0xff,
++	0xe5,0xa3,0x98,0x00,0x01,0xff,0xe5,0xb1,0xa2,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe6,0xa8,0x93,0x00,0x01,0xff,0xe6,0xb7,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc,
++	0x8f,0x00,0x01,0xff,0xe7,0xb4,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9,0x99,0x8b,0x00,0x10,0x08,0x01,0xff,
++	0xe5,0x8b,0x92,0x00,0x01,0xff,0xe8,0x82,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe5,0x87,0x9c,0x00,0x01,0xff,0xe5,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe7,0xa8,
++	0x9c,0x00,0x01,0xff,0xe7,0xb6,0xbe,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe8,0x8f,0xb1,0x00,0x01,0xff,0xe9,0x99,0xb5,0x00,0x10,0x08,0x01,0xff,0xe8,0xae,
++	0x80,0x00,0x01,0xff,0xe6,0x8b,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,
++	0x82,0x00,0x01,0xff,0xe8,0xab,0xbe,0x00,0x10,0x08,0x01,0xff,0xe4,0xb8,0xb9,0x00,
++	0x01,0xff,0xe5,0xaf,0xa7,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe6,0x80,0x92,0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0x10,0x08,0x01,0xff,
++	0xe7,0x95,0xb0,0x00,0x01,0xff,0xe5,0x8c,0x97,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe7,0xa3,0xbb,0x00,0x01,0xff,0xe4,0xbe,0xbf,0x00,0x10,0x08,0x01,0xff,0xe5,0xbe,
++	0xa9,0x00,0x01,0xff,0xe4,0xb8,0x8d,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe6,0xb3,0x8c,0x00,0x01,0xff,0xe6,0x95,0xb8,0x00,0x10,0x08,0x01,0xff,0xe7,0xb4,
++	0xa2,0x00,0x01,0xff,0xe5,0x8f,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa1,
++	0x9e,0x00,0x01,0xff,0xe7,0x9c,0x81,0x00,0x10,0x08,0x01,0xff,0xe8,0x91,0x89,0x00,
++	0x01,0xff,0xe8,0xaa,0xaa,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe6,0xae,0xba,0x00,0x01,0xff,0xe8,0xbe,0xb0,0x00,0x10,0x08,0x01,0xff,0xe6,0xb2,
++	0x88,0x00,0x01,0xff,0xe6,0x8b,0xbe,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8b,
++	0xa5,0x00,0x01,0xff,0xe6,0x8e,0xa0,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xa5,0x00,
++	0x01,0xff,0xe4,0xba,0xae,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,
++	0xa9,0x00,0x01,0xff,0xe5,0x87,0x89,0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0x81,0x00,
++	0x01,0xff,0xe7,0xb3,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x89,0xaf,0x00,
++	0x01,0xff,0xe8,0xab,0x92,0x00,0x10,0x08,0x01,0xff,0xe9,0x87,0x8f,0x00,0x01,0xff,
++	0xe5,0x8b,0xb5,0x00,0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,
++	0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x91,0x82,0x00,0x01,0xff,0xe5,0xa5,
++	0xb3,0x00,0x10,0x08,0x01,0xff,0xe5,0xbb,0xac,0x00,0x01,0xff,0xe6,0x97,0x85,0x00,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xbf,0xbe,0x00,0x01,0xff,0xe7,0xa4,0xaa,0x00,
++	0x10,0x08,0x01,0xff,0xe9,0x96,0xad,0x00,0x01,0xff,0xe9,0xa9,0xaa,0x00,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xba,0x97,0x00,0x01,0xff,0xe9,0xbb,0x8e,0x00,
++	0x10,0x08,0x01,0xff,0xe5,0x8a,0x9b,0x00,0x01,0xff,0xe6,0x9b,0x86,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe6,0xad,0xb7,0x00,0x01,0xff,0xe8,0xbd,0xa2,0x00,0x10,0x08,
++	0x01,0xff,0xe5,0xb9,0xb4,0x00,0x01,0xff,0xe6,0x86,0x90,0x00,0xd3,0x40,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x88,0x80,0x00,0x01,0xff,0xe6,0x92,0x9a,0x00,
++	0x10,0x08,0x01,0xff,0xe6,0xbc,0xa3,0x00,0x01,0xff,0xe7,0x85,0x89,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe7,0x92,0x89,0x00,0x01,0xff,0xe7,0xa7,0x8a,0x00,0x10,0x08,
++	0x01,0xff,0xe7,0xb7,0xb4,0x00,0x01,0xff,0xe8,0x81,0xaf,0x00,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe8,0xbc,0xa6,0x00,0x01,0xff,0xe8,0x93,0xae,0x00,0x10,0x08,
++	0x01,0xff,0xe9,0x80,0xa3,0x00,0x01,0xff,0xe9,0x8d,0x8a,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe5,0x88,0x97,0x00,0x01,0xff,0xe5,0x8a,0xa3,0x00,0x10,0x08,0x01,0xff,
++	0xe5,0x92,0xbd,0x00,0x01,0xff,0xe7,0x83,0x88,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa3,0x82,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00,
++	0x10,0x08,0x01,0xff,0xe5,0xbb,0x89,0x00,0x01,0xff,0xe5,0xbf,0xb5,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe6,0x8d,0xbb,0x00,0x01,0xff,0xe6,0xae,0xae,0x00,0x10,0x08,
++	0x01,0xff,0xe7,0xb0,0xbe,0x00,0x01,0xff,0xe7,0x8d,0xb5,0x00,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe4,0xbb,0xa4,0x00,0x01,0xff,0xe5,0x9b,0xb9,0x00,0x10,0x08,
++	0x01,0xff,0xe5,0xaf,0xa7,0x00,0x01,0xff,0xe5,0xb6,0xba,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe6,0x80,0x9c,0x00,0x01,0xff,0xe7,0x8e,0xb2,0x00,0x10,0x08,0x01,0xff,
++	0xe7,0x91,0xa9,0x00,0x01,0xff,0xe7,0xbe,0x9a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe8,0x81,0x86,0x00,0x01,0xff,0xe9,0x88,0xb4,0x00,0x10,0x08,
++	0x01,0xff,0xe9,0x9b,0xb6,0x00,0x01,0xff,0xe9,0x9d,0x88,0x00,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe9,0xa0,0x98,0x00,0x01,0xff,0xe4,0xbe,0x8b,0x00,0x10,0x08,0x01,0xff,
++	0xe7,0xa6,0xae,0x00,0x01,0xff,0xe9,0x86,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe9,0x9a,0xb8,0x00,0x01,0xff,0xe6,0x83,0xa1,0x00,0x10,0x08,0x01,0xff,
++	0xe4,0xba,0x86,0x00,0x01,0xff,0xe5,0x83,0x9a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe5,0xaf,0xae,0x00,0x01,0xff,0xe5,0xb0,0xbf,0x00,0x10,0x08,0x01,0xff,0xe6,0x96,
++	0x99,0x00,0x01,0xff,0xe6,0xa8,0x82,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,
++	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0x87,0x8e,0x00,0x01,0xff,0xe7,
++	0x99,0x82,0x00,0x10,0x08,0x01,0xff,0xe8,0x93,0xbc,0x00,0x01,0xff,0xe9,0x81,0xbc,
++	0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xbe,0x8d,0x00,0x01,0xff,0xe6,0x9a,0x88,
++	0x00,0x10,0x08,0x01,0xff,0xe9,0x98,0xae,0x00,0x01,0xff,0xe5,0x8a,0x89,0x00,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x9d,0xbb,0x00,0x01,0xff,0xe6,0x9f,0xb3,
++	0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0x81,0x00,0x01,0xff,0xe6,0xba,0x9c,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe7,0x90,0x89,0x00,0x01,0xff,0xe7,0x95,0x99,0x00,0x10,
++	0x08,0x01,0xff,0xe7,0xa1,0xab,0x00,0x01,0xff,0xe7,0xb4,0x90,0x00,0xd3,0x40,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa1,0x9e,0x00,0x01,0xff,0xe5,0x85,0xad,
++	0x00,0x10,0x08,0x01,0xff,0xe6,0x88,0xae,0x00,0x01,0xff,0xe9,0x99,0xb8,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe5,0x80,0xab,0x00,0x01,0xff,0xe5,0xb4,0x99,0x00,0x10,
++	0x08,0x01,0xff,0xe6,0xb7,0xaa,0x00,0x01,0xff,0xe8,0xbc,0xaa,0x00,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe5,0xbe,0x8b,0x00,0x01,0xff,0xe6,0x85,0x84,0x00,0x10,
++	0x08,0x01,0xff,0xe6,0xa0,0x97,0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0xe9,0x9a,0x86,0x00,0x01,0xff,0xe5,0x88,0xa9,0x00,0x10,0x08,0x01,
++	0xff,0xe5,0x90,0x8f,0x00,0x01,0xff,0xe5,0xb1,0xa5,0x00,0xd4,0x80,0xd3,0x40,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x98,0x93,0x00,0x01,0xff,0xe6,0x9d,0x8e,
++	0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0xa8,0x00,0x01,0xff,0xe6,0xb3,0xa5,0x00,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe7,0x90,0x86,0x00,0x01,0xff,0xe7,0x97,0xa2,0x00,0x10,
++	0x08,0x01,0xff,0xe7,0xbd,0xb9,0x00,0x01,0xff,0xe8,0xa3,0x8f,0x00,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe8,0xa3,0xa1,0x00,0x01,0xff,0xe9,0x87,0x8c,0x00,0x10,
++	0x08,0x01,0xff,0xe9,0x9b,0xa2,0x00,0x01,0xff,0xe5,0x8c,0xbf,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0xe6,0xba,0xba,0x00,0x01,0xff,0xe5,0x90,0x9d,0x00,0x10,0x08,0x01,
++	0xff,0xe7,0x87,0x90,0x00,0x01,0xff,0xe7,0x92,0x98,0x00,0xd3,0x40,0xd2,0x20,0xd1,
++	0x10,0x10,0x08,0x01,0xff,0xe8,0x97,0xba,0x00,0x01,0xff,0xe9,0x9a,0xa3,0x00,0x10,
++	0x08,0x01,0xff,0xe9,0xb1,0x97,0x00,0x01,0xff,0xe9,0xba,0x9f,0x00,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0xe6,0x9e,0x97,0x00,0x01,0xff,0xe6,0xb7,0x8b,0x00,0x10,0x08,0x01,
++	0xff,0xe8,0x87,0xa8,0x00,0x01,0xff,0xe7,0xab,0x8b,0x00,0xd2,0x20,0xd1,0x10,0x10,
++	0x08,0x01,0xff,0xe7,0xac,0xa0,0x00,0x01,0xff,0xe7,0xb2,0x92,0x00,0x10,0x08,0x01,
++	0xff,0xe7,0x8b,0x80,0x00,0x01,0xff,0xe7,0x82,0x99,0x00,0xd1,0x10,0x10,0x08,0x01,
++	0xff,0xe8,0xad,0x98,0x00,0x01,0xff,0xe4,0xbb,0x80,0x00,0x10,0x08,0x01,0xff,0xe8,
++	0x8c,0xb6,0x00,0x01,0xff,0xe5,0x88,0xba,0x00,0xe2,0xad,0x06,0xe1,0xc4,0x03,0xe0,
++	0xcb,0x01,0xcf,0x86,0xd5,0xe4,0xd4,0x74,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x01,0xff,0xe5,0x88,0x87,0x00,0x01,0xff,0xe5,0xba,0xa6,0x00,0x10,0x08,0x01,0xff,
++	0xe6,0x8b,0x93,0x00,0x01,0xff,0xe7,0xb3,0x96,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe5,0xae,0x85,0x00,0x01,0xff,0xe6,0xb4,0x9e,0x00,0x10,0x08,0x01,0xff,0xe6,0x9a,
++	0xb4,0x00,0x01,0xff,0xe8,0xbc,0xbb,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++	0xe8,0xa1,0x8c,0x00,0x01,0xff,0xe9,0x99,0x8d,0x00,0x10,0x08,0x01,0xff,0xe8,0xa6,
++	0x8b,0x00,0x01,0xff,0xe5,0xbb,0x93,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,
++	0x80,0x00,0x01,0xff,0xe5,0x97,0x80,0x00,0x01,0x00,0xd3,0x34,0xd2,0x18,0xd1,0x0c,
++	0x10,0x08,0x01,0xff,0xe5,0xa1,0x9a,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe6,0x99,
++	0xb4,0x00,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe5,0x87,0x9e,0x00,
++	0x10,0x08,0x01,0xff,0xe7,0x8c,0xaa,0x00,0x01,0xff,0xe7,0x9b,0x8a,0x00,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa4,0xbc,0x00,0x01,0xff,0xe7,0xa5,0x9e,0x00,
++	0x10,0x08,0x01,0xff,0xe7,0xa5,0xa5,0x00,0x01,0xff,0xe7,0xa6,0x8f,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe9,0x9d,0x96,0x00,0x01,0xff,0xe7,0xb2,0xbe,0x00,0x10,0x08,
++	0x01,0xff,0xe7,0xbe,0xbd,0x00,0x01,0x00,0xd4,0x64,0xd3,0x30,0xd2,0x18,0xd1,0x0c,
++	0x10,0x08,0x01,0xff,0xe8,0x98,0x92,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe8,0xab,
++	0xb8,0x00,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe9,0x80,0xb8,0x00,
++	0x10,0x08,0x01,0xff,0xe9,0x83,0xbd,0x00,0x01,0x00,0xd2,0x14,0x51,0x04,0x01,0x00,
++	0x10,0x08,0x01,0xff,0xe9,0xa3,0xaf,0x00,0x01,0xff,0xe9,0xa3,0xbc,0x00,0xd1,0x10,
++	0x10,0x08,0x01,0xff,0xe9,0xa4,0xa8,0x00,0x01,0xff,0xe9,0xb6,0xb4,0x00,0x10,0x08,
++	0x0d,0xff,0xe9,0x83,0x9e,0x00,0x0d,0xff,0xe9,0x9a,0xb7,0x00,0xd3,0x40,0xd2,0x20,
++	0xd1,0x10,0x10,0x08,0x06,0xff,0xe4,0xbe,0xae,0x00,0x06,0xff,0xe5,0x83,0xa7,0x00,
++	0x10,0x08,0x06,0xff,0xe5,0x85,0x8d,0x00,0x06,0xff,0xe5,0x8b,0x89,0x00,0xd1,0x10,
++	0x10,0x08,0x06,0xff,0xe5,0x8b,0xa4,0x00,0x06,0xff,0xe5,0x8d,0x91,0x00,0x10,0x08,
++	0x06,0xff,0xe5,0x96,0x9d,0x00,0x06,0xff,0xe5,0x98,0x86,0x00,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x06,0xff,0xe5,0x99,0xa8,0x00,0x06,0xff,0xe5,0xa1,0x80,0x00,0x10,0x08,
++	0x06,0xff,0xe5,0xa2,0xa8,0x00,0x06,0xff,0xe5,0xb1,0xa4,0x00,0xd1,0x10,0x10,0x08,
++	0x06,0xff,0xe5,0xb1,0xae,0x00,0x06,0xff,0xe6,0x82,0x94,0x00,0x10,0x08,0x06,0xff,
++	0xe6,0x85,0xa8,0x00,0x06,0xff,0xe6,0x86,0x8e,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,
++	0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe6,0x87,0xb2,0x00,0x06,
++	0xff,0xe6,0x95,0x8f,0x00,0x10,0x08,0x06,0xff,0xe6,0x97,0xa2,0x00,0x06,0xff,0xe6,
++	0x9a,0x91,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe6,0xa2,0x85,0x00,0x06,0xff,0xe6,
++	0xb5,0xb7,0x00,0x10,0x08,0x06,0xff,0xe6,0xb8,0x9a,0x00,0x06,0xff,0xe6,0xbc,0xa2,
++	0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0x85,0xae,0x00,0x06,0xff,0xe7,
++	0x88,0xab,0x00,0x10,0x08,0x06,0xff,0xe7,0x90,0xa2,0x00,0x06,0xff,0xe7,0xa2,0x91,
++	0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa4,0xbe,0x00,0x06,0xff,0xe7,0xa5,0x89,
++	0x00,0x10,0x08,0x06,0xff,0xe7,0xa5,0x88,0x00,0x06,0xff,0xe7,0xa5,0x90,0x00,0xd3,
++	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa5,0x96,0x00,0x06,0xff,0xe7,
++	0xa5,0x9d,0x00,0x10,0x08,0x06,0xff,0xe7,0xa6,0x8d,0x00,0x06,0xff,0xe7,0xa6,0x8e,
++	0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa9,0x80,0x00,0x06,0xff,0xe7,0xaa,0x81,
++	0x00,0x10,0x08,0x06,0xff,0xe7,0xaf,0x80,0x00,0x06,0xff,0xe7,0xb7,0xb4,0x00,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xb8,0x89,0x00,0x06,0xff,0xe7,0xb9,0x81,
++	0x00,0x10,0x08,0x06,0xff,0xe7,0xbd,0xb2,0x00,0x06,0xff,0xe8,0x80,0x85,0x00,0xd1,
++	0x10,0x10,0x08,0x06,0xff,0xe8,0x87,0xad,0x00,0x06,0xff,0xe8,0x89,0xb9,0x00,0x10,
++	0x08,0x06,0xff,0xe8,0x89,0xb9,0x00,0x06,0xff,0xe8,0x91,0x97,0x00,0xd4,0x75,0xd3,
++	0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8,0xa4,0x90,0x00,0x06,0xff,0xe8,
++	0xa6,0x96,0x00,0x10,0x08,0x06,0xff,0xe8,0xac,0x81,0x00,0x06,0xff,0xe8,0xac,0xb9,
++	0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8,0xb3,0x93,0x00,0x06,0xff,0xe8,0xb4,0x88,
++	0x00,0x10,0x08,0x06,0xff,0xe8,0xbe,0xb6,0x00,0x06,0xff,0xe9,0x80,0xb8,0x00,0xd2,
++	0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe9,0x9b,0xa3,0x00,0x06,0xff,0xe9,0x9f,0xbf,
++	0x00,0x10,0x08,0x06,0xff,0xe9,0xa0,0xbb,0x00,0x0b,0xff,0xe6,0x81,0xb5,0x00,0x91,
++	0x11,0x10,0x09,0x0b,0xff,0xf0,0xa4,0x8b,0xae,0x00,0x0b,0xff,0xe8,0x88,0x98,0x00,
++	0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe4,0xb8,0xa6,0x00,
++	0x08,0xff,0xe5,0x86,0xb5,0x00,0x10,0x08,0x08,0xff,0xe5,0x85,0xa8,0x00,0x08,0xff,
++	0xe4,0xbe,0x80,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0x85,0x85,0x00,0x08,0xff,
++	0xe5,0x86,0x80,0x00,0x10,0x08,0x08,0xff,0xe5,0x8b,0x87,0x00,0x08,0xff,0xe5,0x8b,
++	0xba,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0x96,0x9d,0x00,0x08,0xff,
++	0xe5,0x95,0x95,0x00,0x10,0x08,0x08,0xff,0xe5,0x96,0x99,0x00,0x08,0xff,0xe5,0x97,
++	0xa2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xa1,0x9a,0x00,0x08,0xff,0xe5,0xa2,
++	0xb3,0x00,0x10,0x08,0x08,0xff,0xe5,0xa5,0x84,0x00,0x08,0xff,0xe5,0xa5,0x94,0x00,
++	0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x08,0xff,0xe5,0xa9,0xa2,0x00,0x08,0xff,0xe5,0xac,0xa8,0x00,0x10,0x08,
++	0x08,0xff,0xe5,0xbb,0x92,0x00,0x08,0xff,0xe5,0xbb,0x99,0x00,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe5,0xbd,0xa9,0x00,0x08,0xff,0xe5,0xbe,0xad,0x00,0x10,0x08,0x08,0xff,
++	0xe6,0x83,0x98,0x00,0x08,0xff,0xe6,0x85,0x8e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe6,0x84,0x88,0x00,0x08,0xff,0xe6,0x86,0x8e,0x00,0x10,0x08,0x08,0xff,
++	0xe6,0x85,0xa0,0x00,0x08,0xff,0xe6,0x87,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe6,0x88,0xb4,0x00,0x08,0xff,0xe6,0x8f,0x84,0x00,0x10,0x08,0x08,0xff,0xe6,0x90,
++	0x9c,0x00,0x08,0xff,0xe6,0x91,0x92,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe6,0x95,0x96,0x00,0x08,0xff,0xe6,0x99,0xb4,0x00,0x10,0x08,0x08,0xff,
++	0xe6,0x9c,0x97,0x00,0x08,0xff,0xe6,0x9c,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe6,0x9d,0x96,0x00,0x08,0xff,0xe6,0xad,0xb9,0x00,0x10,0x08,0x08,0xff,0xe6,0xae,
++	0xba,0x00,0x08,0xff,0xe6,0xb5,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe6,0xbb,0x9b,0x00,0x08,0xff,0xe6,0xbb,0x8b,0x00,0x10,0x08,0x08,0xff,0xe6,0xbc,
++	0xa2,0x00,0x08,0xff,0xe7,0x80,0x9e,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x85,
++	0xae,0x00,0x08,0xff,0xe7,0x9e,0xa7,0x00,0x10,0x08,0x08,0xff,0xe7,0x88,0xb5,0x00,
++	0x08,0xff,0xe7,0x8a,0xaf,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe7,0x8c,0xaa,0x00,0x08,0xff,0xe7,0x91,0xb1,0x00,0x10,0x08,0x08,0xff,
++	0xe7,0x94,0x86,0x00,0x08,0xff,0xe7,0x94,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe7,0x98,0x9d,0x00,0x08,0xff,0xe7,0x98,0x9f,0x00,0x10,0x08,0x08,0xff,0xe7,0x9b,
++	0x8a,0x00,0x08,0xff,0xe7,0x9b,0x9b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe7,0x9b,0xb4,0x00,0x08,0xff,0xe7,0x9d,0x8a,0x00,0x10,0x08,0x08,0xff,0xe7,0x9d,
++	0x80,0x00,0x08,0xff,0xe7,0xa3,0x8c,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xaa,
++	0xb1,0x00,0x08,0xff,0xe7,0xaf,0x80,0x00,0x10,0x08,0x08,0xff,0xe7,0xb1,0xbb,0x00,
++	0x08,0xff,0xe7,0xb5,0x9b,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe7,0xb7,0xb4,0x00,0x08,0xff,0xe7,0xbc,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0x80,
++	0x85,0x00,0x08,0xff,0xe8,0x8d,0x92,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0x8f,
++	0xaf,0x00,0x08,0xff,0xe8,0x9d,0xb9,0x00,0x10,0x08,0x08,0xff,0xe8,0xa5,0x81,0x00,
++	0x08,0xff,0xe8,0xa6,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xa6,
++	0x96,0x00,0x08,0xff,0xe8,0xaa,0xbf,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xb8,0x00,
++	0x08,0xff,0xe8,0xab,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xac,0x81,0x00,
++	0x08,0xff,0xe8,0xab,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xad,0x00,0x08,0xff,
++	0xe8,0xac,0xb9,0x00,0xcf,0x86,0x95,0xde,0xd4,0x81,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++	0x10,0x08,0x08,0xff,0xe8,0xae,0x8a,0x00,0x08,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08,
++	0x08,0xff,0xe8,0xbc,0xb8,0x00,0x08,0xff,0xe9,0x81,0xb2,0x00,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe9,0x86,0x99,0x00,0x08,0xff,0xe9,0x89,0xb6,0x00,0x10,0x08,0x08,0xff,
++	0xe9,0x99,0xbc,0x00,0x08,0xff,0xe9,0x9b,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++	0x08,0xff,0xe9,0x9d,0x96,0x00,0x08,0xff,0xe9,0x9f,0x9b,0x00,0x10,0x08,0x08,0xff,
++	0xe9,0x9f,0xbf,0x00,0x08,0xff,0xe9,0xa0,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
++	0xe9,0xa0,0xbb,0x00,0x08,0xff,0xe9,0xac,0x92,0x00,0x10,0x08,0x08,0xff,0xe9,0xbe,
++	0x9c,0x00,0x08,0xff,0xf0,0xa2,0xa1,0x8a,0x00,0xd3,0x45,0xd2,0x22,0xd1,0x12,0x10,
++	0x09,0x08,0xff,0xf0,0xa2,0xa1,0x84,0x00,0x08,0xff,0xf0,0xa3,0x8f,0x95,0x00,0x10,
++	0x08,0x08,0xff,0xe3,0xae,0x9d,0x00,0x08,0xff,0xe4,0x80,0x98,0x00,0xd1,0x11,0x10,
++	0x08,0x08,0xff,0xe4,0x80,0xb9,0x00,0x08,0xff,0xf0,0xa5,0x89,0x89,0x00,0x10,0x09,
++	0x08,0xff,0xf0,0xa5,0xb3,0x90,0x00,0x08,0xff,0xf0,0xa7,0xbb,0x93,0x00,0x92,0x14,
++	0x91,0x10,0x10,0x08,0x08,0xff,0xe9,0xbd,0x83,0x00,0x08,0xff,0xe9,0xbe,0x8e,0x00,
++	0x00,0x00,0x00,0x00,0x00,0x00,0xe1,0x94,0x01,0xe0,0x08,0x01,0xcf,0x86,0xd5,0x42,
++	0xd4,0x14,0x93,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++	0x00,0x00,0x00,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,
++	0x01,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x00,0x00,0x04,0xff,
++	0xd7,0x99,0xd6,0xb4,0x00,0x10,0x04,0x01,0x1a,0x01,0xff,0xd7,0xb2,0xd6,0xb7,0x00,
++	0xd4,0x42,0x53,0x04,0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,
++	0xd7,0xa9,0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd7,0x82,0x00,0xd1,0x16,0x10,0x0b,
++	0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7,
++	0x82,0x00,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xb7,0x00,0x01,0xff,0xd7,0x90,0xd6,
++	0xb8,0x00,0xd3,0x43,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xbc,
++	0x00,0x01,0xff,0xd7,0x91,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x92,0xd6,0xbc,
++	0x00,0x01,0xff,0xd7,0x93,0xd6,0xbc,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x94,
++	0xd6,0xbc,0x00,0x01,0xff,0xd7,0x95,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x96,
++	0xd6,0xbc,0x00,0x00,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x98,0xd6,
++	0xbc,0x00,0x01,0xff,0xd7,0x99,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x9a,0xd6,
++	0xbc,0x00,0x01,0xff,0xd7,0x9b,0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,
++	0x9c,0xd6,0xbc,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xd7,0x9e,0xd6,0xbc,0x00,0x00,
++	0x00,0xcf,0x86,0x95,0x85,0x94,0x81,0xd3,0x3e,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,
++	0xff,0xd7,0xa0,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa1,0xd6,0xbc,0x00,0x10,0x04,0x00,
++	0x00,0x01,0xff,0xd7,0xa3,0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0xa4,
++	0xd6,0xbc,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xd7,0xa6,0xd6,0xbc,0x00,0x01,0xff,
++	0xd7,0xa7,0xd6,0xbc,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa8,0xd6,
++	0xbc,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0xaa,0xd6,
++	0xbc,0x00,0x01,0xff,0xd7,0x95,0xd6,0xb9,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,
++	0x91,0xd6,0xbf,0x00,0x01,0xff,0xd7,0x9b,0xd6,0xbf,0x00,0x10,0x09,0x01,0xff,0xd7,
++	0xa4,0xd6,0xbf,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,
++	0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,
++	0x0c,0x00,0x0c,0x00,0xcf,0x86,0x95,0x24,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,
++	0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,
++	0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x5a,0xd2,0x06,
++	0xcf,0x06,0x01,0x00,0xd1,0x14,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x08,
++	0x14,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54,0x04,
++	0x01,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++	0x01,0x00,0xcf,0x86,0xd5,0x0c,0x94,0x08,0x13,0x04,0x01,0x00,0x00,0x00,0x05,0x00,
++	0x54,0x04,0x05,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,
++	0x06,0x00,0x07,0x00,0x00,0x00,0xd2,0xce,0xd1,0xa5,0xd0,0x37,0xcf,0x86,0xd5,0x15,
++	0x54,0x05,0x06,0xff,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x00,
++	0x00,0x00,0x00,0x94,0x1c,0xd3,0x10,0x52,0x04,0x01,0xe6,0x51,0x04,0x0a,0xe6,0x10,
++	0x04,0x0a,0xe6,0x10,0xdc,0x52,0x04,0x10,0xdc,0x11,0x04,0x10,0xdc,0x11,0xe6,0x01,
++	0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,
++	0x04,0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0x92,0x0c,0x91,0x08,0x10,
++	0x04,0x07,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,
++	0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd4,0x18,0xd3,0x10,0x52,
++	0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x12,0x04,0x01,
++	0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,
++	0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf,
++	0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,
++	0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,
++	0x00,0x01,0xff,0x00,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,
++	0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++	0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,
++	0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x94,0x14,
++	0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++	0x01,0x00,0x01,0x00,0xd0,0x2f,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x15,0x93,0x11,
++	0x92,0x0d,0x91,0x09,0x10,0x05,0x01,0xff,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,
++	0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++	0x00,0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x18,0xd3,0x0c,0x92,0x08,0x11,0x04,0x00,
++	0x00,0x01,0x00,0x01,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,
++	0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x00,
++	0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd4,0x20,0xd3,
++	0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,
++	0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x53,0x05,0x00,
++	0xff,0x00,0xd2,0x0d,0x91,0x09,0x10,0x05,0x00,0xff,0x00,0x04,0x00,0x04,0x00,0x91,
++	0x08,0x10,0x04,0x03,0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0x46,0x3e,0xe1,0x1f,0x3b,
++	0xe0,0x9c,0x39,0xcf,0x86,0xe5,0x40,0x26,0xc4,0xe3,0x16,0x14,0xe2,0xef,0x11,0xe1,
++	0xd0,0x10,0xe0,0x60,0x07,0xcf,0x86,0xe5,0x53,0x03,0xe4,0x4c,0x02,0xe3,0x3d,0x01,
++	0xd2,0x94,0xd1,0x70,0xd0,0x4a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x07,0x00,
++	0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,
++	0xd4,0x14,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,
++	0x00,0x00,0x07,0x00,0x53,0x04,0x07,0x00,0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,
++	0x07,0x00,0x00,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86,
++	0x95,0x20,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,
++	0x00,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,
++	0x00,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x54,0x04,
++	0x07,0x00,0x53,0x04,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,
++	0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,
++	0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x51,0x04,0x00,0x00,
++	0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07,0x00,0x93,0x10,
++	0x52,0x04,0x07,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,
++	0xcf,0x06,0x08,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4,0x20,0x53,0x04,0x08,0x00,
++	0xd2,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x10,0x00,0xd1,0x08,0x10,0x04,
++	0x10,0x00,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x12,0x04,
++	0x0a,0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,
++	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,
++	0x00,0x00,0x0a,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,
++	0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x00,0x00,0xd2,0x5e,0xd1,0x06,0xcf,0x06,
++	0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,
++	0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,
++	0xcf,0x86,0xd5,0x18,0x54,0x04,0x0a,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
++	0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,
++	0x91,0x08,0x10,0x04,0x10,0xdc,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04,
++	0x10,0x00,0x12,0x04,0x10,0x00,0x00,0x00,0xd1,0x70,0xd0,0x36,0xcf,0x86,0xd5,0x18,
++	0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x51,0x04,0x05,0x00,
++	0x10,0x04,0x05,0x00,0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x05,0x00,0x00,0x00,
++	0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x05,0x00,
++	0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x05,0x00,0x92,0x0c,0x51,0x04,0x05,0x00,
++	0x10,0x04,0x05,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x0c,
++	0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x10,0xe6,0x92,0x0c,0x51,0x04,0x10,0xe6,
++	0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,
++	0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,
++	0x00,0x00,0x07,0x00,0x08,0x00,0xcf,0x86,0x95,0x1c,0xd4,0x0c,0x93,0x08,0x12,0x04,
++	0x08,0x00,0x00,0x00,0x08,0x00,0x93,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00,
++	0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0xba,0xd2,0x80,0xd1,0x34,0xd0,0x1a,0xcf,0x86,
++	0x55,0x04,0x05,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,
++	0x07,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x53,0x04,0x05,0x00,
++	0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd0,0x2a,
++	0xcf,0x86,0xd5,0x14,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,
++	0x11,0x04,0x07,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x07,0x00,0x92,0x08,0x11,0x04,
++	0x07,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xcf,0x86,0xd5,0x10,0x54,0x04,0x12,0x00,
++	0x93,0x08,0x12,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x54,0x04,0x12,0x00,0x53,0x04,
++	0x12,0x00,0x12,0x04,0x12,0x00,0x00,0x00,0xd1,0x34,0xd0,0x12,0xcf,0x86,0x55,0x04,
++	0x10,0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,
++	0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x10,0x00,0x00,0x00,0x52,0x04,0x00,0x00,
++	0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,
++	0xd2,0x06,0xcf,0x06,0x10,0x00,0xd1,0x40,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,
++	0x54,0x04,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,
++	0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c,
++	0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x08,0x13,0x04,
++	0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4,0xce,0x02,0xe3,0x45,0x01,
++	0xd2,0xd0,0xd1,0x70,0xd0,0x52,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3,0x0c,0x52,0x04,
++	0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,
++	0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07,0x00,0xd3,0x10,0x52,0x04,
++	0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xd2,0x0c,0x91,0x08,
++	0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x00,0x00,
++	0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86,0x95,0x18,0x54,0x04,0x0b,0x00,0x93,0x10,
++	0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,
++	0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,
++	0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14,
++	0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,
++	0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x11,0x00,0xd3,0x14,
++	0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x04,0x11,0x00,
++	0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x11,0x00,
++	0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x1c,0x54,0x04,0x09,0x00,0x53,0x04,0x09,0x00,
++	0xd2,0x08,0x11,0x04,0x09,0x00,0x0b,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,
++	0x09,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,
++	0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0xcf,0x06,0x00,0x00,
++	0xd0,0x1a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0x53,0x04,0x0d,0x00,
++	0x52,0x04,0x00,0x00,0x11,0x04,0x11,0x00,0x0d,0x00,0xcf,0x86,0x95,0x14,0x54,0x04,
++	0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x11,0x00,0x11,0x00,0x11,0x00,
++	0x11,0x00,0xd2,0xec,0xd1,0xa4,0xd0,0x76,0xcf,0x86,0xd5,0x48,0xd4,0x28,0xd3,0x14,
++	0x52,0x04,0x08,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x10,0x04,0x08,0x00,
++	0x00,0x00,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x08,0x00,0x08,0xdc,0x10,0x04,
++	0x08,0x00,0x08,0xe6,0xd3,0x10,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x00,0x00,
++	0x08,0x00,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00,
++	0x08,0x00,0x54,0x04,0x08,0x00,0xd3,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x14,0x00,
++	0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe6,0x08,0x01,0x10,0x04,0x08,0xdc,
++	0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08,0x09,0xcf,0x86,0x95,0x28,
++	0xd4,0x14,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,
++	0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x08,0x00,
++	0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x10,0x00,
++	0x00,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x24,0xd3,0x14,0x52,0x04,0x10,0x00,
++	0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0xe6,0x10,0x04,0x10,0xdc,0x00,0x00,0x92,0x0c,
++	0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x52,0x04,
++	0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd1,0x54,
++	0xd0,0x26,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52,0x04,
++	0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
++	0x0b,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x0b,0x00,0x93,0x0c,
++	0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x0b,0x00,0x54,0x04,0x0b,0x00,
++	0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,
++	0x0b,0x00,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x54,0x04,0x10,0x00,0xd3,0x0c,0x92,0x08,
++	0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
++	0x10,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14,
++	0x53,0x04,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,
++	0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x96,0xd2,0x68,0xd1,0x24,0xd0,0x06,
++	0xcf,0x06,0x0b,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x0b,0x00,0x92,0x0c,
++	0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0xd0,0x1e,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0x93,0x10,0x92,0x0c,
++	0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,
++	0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x11,0x00,
++	0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x11,0x00,
++	0x11,0x00,0xd1,0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14,0x00,0xd4,0x0c,0x93,0x08,
++	0x12,0x04,0x14,0x00,0x14,0xe6,0x00,0x00,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04,
++	0x14,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a,
++	0xd1,0x24,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,
++	0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,
++	0x0b,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x58,0xd0,0x12,0xcf,0x86,0x55,0x04,
++	0x14,0x00,0x94,0x08,0x13,0x04,0x14,0x00,0x00,0x00,0x14,0x00,0xcf,0x86,0x95,0x40,
++	0xd4,0x24,0xd3,0x0c,0x52,0x04,0x14,0x00,0x11,0x04,0x14,0x00,0x14,0xdc,0xd2,0x0c,
++	0x51,0x04,0x14,0xe6,0x10,0x04,0x14,0xe6,0x14,0xdc,0x91,0x08,0x10,0x04,0x14,0xe6,
++	0x14,0xdc,0x14,0xdc,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0xdc,0x14,0x00,
++	0x14,0x00,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x15,0x00,
++	0x93,0x10,0x52,0x04,0x15,0x00,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,
++	0x00,0x00,0xcf,0x86,0xe5,0x0f,0x06,0xe4,0xf8,0x03,0xe3,0x02,0x02,0xd2,0xfb,0xd1,
++	0x4c,0xd0,0x06,0xcf,0x06,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x1c,0xd3,0x10,0x52,
++	0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x09,0x0c,0x00,0x52,0x04,0x0c,
++	0x00,0x11,0x04,0x0c,0x00,0x00,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x0c,
++	0x00,0x0c,0x00,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04,0x00,0x00,0x52,0x04,0x00,
++	0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x09,0xd0,0x69,0xcf,0x86,0xd5,
++	0x32,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x15,0x51,0x04,0x0b,0x00,0x10,
++	0x0d,0x0b,0xff,0xf0,0x91,0x82,0x99,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x91,0x11,
++	0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x9b,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x0b,
++	0x00,0xd4,0x1d,0x53,0x04,0x0b,0x00,0x92,0x15,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,
++	0x00,0x0b,0xff,0xf0,0x91,0x82,0xa5,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x53,0x04,
++	0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x10,0x04,0x0b,0x07,
++	0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3,0x0c,0x92,0x08,0x11,0x04,
++	0x0b,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,
++	0x14,0x00,0x00,0x00,0x0d,0x00,0xd4,0x14,0x53,0x04,0x0d,0x00,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x08,
++	0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0xd1,0x96,0xd0,0x5c,0xcf,0x86,0xd5,0x18,
++	0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x0d,0x00,
++	0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd4,0x26,0x53,0x04,0x0d,0x00,0x52,0x04,0x0d,0x00,
++	0x51,0x04,0x0d,0x00,0x10,0x0d,0x0d,0xff,0xf0,0x91,0x84,0xb1,0xf0,0x91,0x84,0xa7,
++	0x00,0x0d,0xff,0xf0,0x91,0x84,0xb2,0xf0,0x91,0x84,0xa7,0x00,0x93,0x18,0xd2,0x0c,
++	0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x0d,0x09,0x91,0x08,0x10,0x04,0x0d,0x09,
++	0x00,0x00,0x0d,0x00,0x0d,0x00,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,
++	0x0d,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x10,0x00,
++	0x54,0x04,0x10,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,
++	0x10,0x07,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0,0x06,
++	0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x91,0x08,
++	0x10,0x04,0x0d,0x09,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
++	0x0d,0x00,0x11,0x00,0x10,0x04,0x11,0x07,0x11,0x00,0x91,0x08,0x10,0x04,0x11,0x00,
++	0x10,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04,
++	0x10,0x00,0x11,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
++	0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00,
++	0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xc8,0xd1,0x48,
++	0xd0,0x42,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,
++	0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x54,0x04,0x10,0x00,
++	0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0x09,0x10,0x04,
++	0x10,0x07,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x12,0x00,
++	0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x10,
++	0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0xd2,0x0c,
++	0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,
++	0x00,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,
++	0x10,0x04,0x00,0x00,0x11,0x00,0x94,0x10,0x53,0x04,0x11,0x00,0x92,0x08,0x11,0x04,
++	0x11,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x18,
++	0x53,0x04,0x10,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0x07,0x10,0x04,
++	0x10,0x09,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,
++	0x00,0x00,0x00,0x00,0xe1,0x27,0x01,0xd0,0x8a,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3,
++	0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x10,0x00,0x10,0x00,0x91,0x08,0x10,
++	0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,
++	0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,
++	0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0xd4,
++	0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,
++	0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,
++	0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10,
++	0x00,0x10,0x04,0x00,0x00,0x14,0x07,0x91,0x08,0x10,0x04,0x10,0x07,0x10,0x00,0x10,
++	0x00,0xcf,0x86,0xd5,0x6a,0xd4,0x42,0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,
++	0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0xd2,0x19,0xd1,0x08,0x10,
++	0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0xff,0xf0,0x91,0x8d,0x87,0xf0,
++	0x91,0x8c,0xbe,0x00,0x91,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x8d,0x87,0xf0,0x91,
++	0x8d,0x97,0x00,0x10,0x09,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,
++	0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x52,
++	0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd4,0x1c,0xd3,
++	0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0xe6,0x52,0x04,0x10,0xe6,0x91,
++	0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x10,0xe6,0x91,
++	0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe3,
++	0x30,0x01,0xd2,0xb7,0xd1,0x48,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x95,0x3c,
++	0xd4,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x09,0x12,0x00,
++	0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x07,0x12,0x00,0x12,0x00,0x53,0x04,0x12,0x00,
++	0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x00,0x00,0x12,0x00,0xd1,0x08,0x10,0x04,
++	0x00,0x00,0x12,0x00,0x10,0x04,0x14,0xe6,0x15,0x00,0x00,0x00,0xd0,0x45,0xcf,0x86,
++	0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0xd2,0x15,0x51,0x04,
++	0x10,0x00,0x10,0x04,0x10,0x00,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xba,
++	0x00,0xd1,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xb0,0x00,
++	0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xbd,0x00,0x10,
++	0x00,0xcf,0x86,0x95,0x24,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,
++	0x04,0x10,0x09,0x10,0x07,0x10,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11,
++	0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,
++	0x40,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x0c,0x52,0x04,0x10,
++	0x00,0x11,0x04,0x10,0x00,0x00,0x00,0xd2,0x1e,0x51,0x04,0x10,0x00,0x10,0x0d,0x10,
++	0xff,0xf0,0x91,0x96,0xb8,0xf0,0x91,0x96,0xaf,0x00,0x10,0xff,0xf0,0x91,0x96,0xb9,
++	0xf0,0x91,0x96,0xaf,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10,0x09,0xcf,
++	0x86,0x95,0x2c,0xd4,0x1c,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x07,0x10,
++	0x00,0x10,0x00,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0x53,
++	0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0xd2,
++	0xa0,0xd1,0x5c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,
++	0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10,
++	0x09,0xcf,0x86,0xd5,0x24,0xd4,0x14,0x93,0x10,0x52,0x04,0x10,0x00,0x91,0x08,0x10,
++	0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11,
++	0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12,
++	0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x2a,0xcf,
++	0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0xd3,0x10,0x52,0x04,0x0d,0x00,0x51,
++	0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x0d,0x07,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,
++	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x53,0x04,0x0d,
++	0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,
++	0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x11,0x00,0x53,0x04,0x11,0x00,0xd2,
++	0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00,
++	0x00,0x11,0x00,0x11,0x00,0x94,0x14,0x53,0x04,0x11,0x00,0x92,0x0c,0x51,0x04,0x11,
++	0x00,0x10,0x04,0x11,0x00,0x11,0x09,0x00,0x00,0x11,0x00,0xcf,0x06,0x00,0x00,0xcf,
++	0x06,0x00,0x00,0xe4,0x59,0x01,0xd3,0xb2,0xd2,0x5c,0xd1,0x28,0xd0,0x22,0xcf,0x86,
++	0x55,0x04,0x14,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14,0x00,0x92,0x10,0xd1,0x08,
++	0x10,0x04,0x14,0x00,0x14,0x09,0x10,0x04,0x14,0x07,0x14,0x00,0x00,0x00,0xcf,0x06,
++	0x00,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,
++	0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,
++	0x10,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
++	0x00,0x00,0x10,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,
++	0x00,0x00,0x94,0x10,0x53,0x04,0x15,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00,
++	0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x15,0x00,0x53,0x04,0x15,0x00,
++	0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x94,0x1c,0x93,0x18,0xd2,0x0c,
++	0x91,0x08,0x10,0x04,0x15,0x09,0x15,0x00,0x15,0x00,0x91,0x08,0x10,0x04,0x15,0x00,
++	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1,0x3c,0xd0,0x1e,0xcf,0x86,
++	0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x93,0x10,0x52,0x04,0x13,0x00,0x91,0x08,
++	0x10,0x04,0x13,0x09,0x13,0x00,0x13,0x00,0x13,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,
++	0x93,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x13,0x09,
++	0x00,0x00,0x13,0x00,0x13,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4,0x10,0x93,0x0c,
++	0x52,0x04,0x13,0x00,0x11,0x04,0x15,0x00,0x13,0x00,0x13,0x00,0x53,0x04,0x13,0x00,
++	0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x13,0x09,0x13,0x00,0x91,0x08,0x10,0x04,
++	0x13,0x00,0x14,0x00,0x13,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x13,0x00,
++	0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,
++	0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
++	0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe3,0xa9,0x01,0xd2,
++	0xb0,0xd1,0x6c,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92,
++	0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x54,
++	0x04,0x12,0x00,0xd3,0x10,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,
++	0x00,0x00,0x00,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x12,
++	0x09,0xcf,0x86,0xd5,0x14,0x94,0x10,0x93,0x0c,0x52,0x04,0x12,0x00,0x11,0x04,0x12,
++	0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x94,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12,
++	0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xd0,0x3e,0xcf,
++	0x86,0xd5,0x14,0x54,0x04,0x12,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x12,
++	0x00,0x12,0x00,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,
++	0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x93,0x10,0x52,0x04,0x12,0x00,0x51,
++	0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,
++	0xa0,0xd0,0x52,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04,0x13,0x00,0x51,
++	0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,
++	0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x54,0x04,0x13,0x00,0xd3,0x10,0x52,
++	0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0xd2,0x0c,0x51,
++	0x04,0x00,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x00,
++	0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x51,0x04,0x13,
++	0x00,0x10,0x04,0x13,0x07,0x13,0x00,0x11,0x04,0x13,0x09,0x13,0x00,0x00,0x00,0x53,
++	0x04,0x13,0x00,0x92,0x08,0x11,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x94,0x20,0xd3,
++	0x10,0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x92,
++	0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd0,
++	0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x14,0x53,0x04,0x14,0x00,0x52,0x04,0x14,0x00,0x51,
++	0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x14,
++	0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x14,
++	0x09,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x94,
++	0x10,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,
++	0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,
++	0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14,
++	0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,
++	0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x15,
++	0x00,0x54,0x04,0x15,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x15,0x00,0x00,0x00,0x00,
++	0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0xd0,
++	0xca,0xcf,0x86,0xd5,0xc2,0xd4,0x54,0xd3,0x06,0xcf,0x06,0x09,0x00,0xd2,0x06,0xcf,
++	0x06,0x09,0x00,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09,
++	0x00,0x94,0x14,0x53,0x04,0x09,0x00,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10,
++	0x04,0x09,0x00,0x10,0x00,0x10,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x10,
++	0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x11,0x00,0x00,
++	0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x68,0xd2,0x46,0xd1,0x40,0xd0,
++	0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x20,0xd3,0x10,0x92,
++	0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10,
++	0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x09,
++	0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x11,
++	0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x95,0x10,0x94,0x0c,0x93,
++	0x08,0x12,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,
++	0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x4c,0xd4,0x06,0xcf,
++	0x06,0x0b,0x00,0xd3,0x40,0xd2,0x3a,0xd1,0x34,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0b,
++	0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,
++	0x04,0x0b,0x00,0x00,0x00,0x53,0x04,0x15,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,
+ 	0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,
+-	0x86,0xcf,0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,
+-	0xa2,0xd4,0x9c,0xd3,0x74,0xd2,0x26,0xd1,0x20,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x94,
+-	0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13,
+-	0x00,0x13,0x00,0xcf,0x06,0x13,0x00,0xcf,0x06,0x13,0x00,0xd1,0x48,0xd0,0x1e,0xcf,
+-	0x86,0x95,0x18,0x54,0x04,0x13,0x00,0x53,0x04,0x13,0x00,0x52,0x04,0x13,0x00,0x51,
+-	0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,
+-	0x04,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,
+-	0x00,0x00,0x00,0x00,0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x00,0x00,0x15,0x00,0x00,
+-	0x00,0x13,0x00,0xcf,0x06,0x13,0x00,0xd2,0x22,0xd1,0x06,0xcf,0x06,0x13,0x00,0xd0,
+-	0x06,0xcf,0x06,0x13,0x00,0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x53,
+-	0x04,0x13,0x00,0x12,0x04,0x13,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,
+-	0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x7e,0xd2,0x78,0xd1,0x34,0xd0,0x06,0xcf,
+-	0x06,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,
+-	0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,
+-	0x00,0x52,0x04,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0,
+-	0x3e,0xcf,0x86,0xd5,0x2c,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,
+-	0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0xd2,0x08,0x11,
+-	0x04,0x10,0x00,0x00,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x01,0x10,0x00,0x94,
+-	0x0c,0x93,0x08,0x12,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,
+-	0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xe1,0x92,0x04,0xd0,0x08,0xcf,0x86,
+-	0xcf,0x06,0x00,0x00,0xcf,0x86,0xe5,0x2f,0x04,0xe4,0x7f,0x02,0xe3,0xf4,0x01,0xd2,
+-	0x26,0xd1,0x06,0xcf,0x06,0x05,0x00,0xd0,0x06,0xcf,0x06,0x05,0x00,0xcf,0x86,0x55,
+-	0x04,0x05,0x00,0x54,0x04,0x05,0x00,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05,
+-	0x00,0x00,0x00,0x00,0x00,0xd1,0xeb,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94,
+-	0x20,0xd3,0x10,0x52,0x04,0x05,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00,
+-	0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x05,0x00,0x05,0x00,0x05,
+-	0x00,0xcf,0x86,0xd5,0x2a,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,
+-	0x00,0x51,0x04,0x05,0x00,0x10,0x0d,0x05,0xff,0xf0,0x9d,0x85,0x97,0xf0,0x9d,0x85,
+-	0xa5,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0x00,0xd4,0x75,0xd3,
+-	0x61,0xd2,0x44,0xd1,0x22,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,
+-	0xa5,0xf0,0x9d,0x85,0xae,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,
+-	0xf0,0x9d,0x85,0xaf,0x00,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,
+-	0xa5,0xf0,0x9d,0x85,0xb0,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,
+-	0xf0,0x9d,0x85,0xb1,0x00,0xd1,0x15,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,
+-	0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xb2,0x00,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x01,
+-	0xd2,0x08,0x11,0x04,0x05,0x01,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe2,
+-	0x05,0xd8,0xd3,0x10,0x92,0x0c,0x51,0x04,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x00,
+-	0x05,0x00,0x92,0x0c,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x05,0xdc,0x05,0xdc,
++	0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x4c,0xd0,0x44,0xcf,
++	0x86,0xd5,0x3c,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x11,0x00,0xd2,
++	0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,
++	0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,
++	0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,
++	0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0xd2,0x01,0xcf,0x86,0xd5,0x06,0xcf,0x06,
++	0x00,0x00,0xe4,0x0b,0x01,0xd3,0x06,0xcf,0x06,0x0c,0x00,0xd2,0x84,0xd1,0x50,0xd0,
++	0x1e,0xcf,0x86,0x55,0x04,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04,0x0c,0x00,0x92,
++	0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,
++	0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,
++	0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x10,0x00,0xd2,0x08,0x11,
++	0x04,0x10,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x00,0x00,0xd0,0x06,0xcf,
++	0x06,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x00,0x00,0x10,0x00,0xd4,0x10,0x53,
++	0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x93,0x10,0x52,
++	0x04,0x10,0x01,0x91,0x08,0x10,0x04,0x10,0x01,0x10,0x00,0x00,0x00,0x00,0x00,0xd1,
++	0x6c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x10,0x52,
++	0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6,0x10,0x00,0x10,0x00,0xcf,
++	0x86,0xd5,0x24,0xd4,0x10,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,
++	0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00,
++	0x00,0x10,0x00,0x10,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,
++	0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x00,
++	0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd0,0x0e,0xcf,0x86,0x95,
++	0x08,0x14,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,
++	0x06,0x00,0x00,0xd2,0x30,0xd1,0x0c,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x06,0x14,
++	0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x14,0x00,0x53,0x04,0x14,0x00,0x92,
++	0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,
++	0x06,0x00,0x00,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x2c,0x94,
++	0x28,0xd3,0x10,0x52,0x04,0x0d,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x15,0x00,0x15,
++	0x00,0xd2,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,0x51,0x04,0x00,
++	0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0d,0x00,0x54,0x04,0x0d,0x00,0x53,0x04,0x0d,
++	0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x15,0x00,0xd0,
++	0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x15,0x00,0x52,0x04,0x00,0x00,0x51,
++	0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x0d,0x00,0x00,0x00,0xcf,0x86,0x55,
++	0x04,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x13,
++	0x00,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xcf,0x06,0x12,0x00,0xe2,
++	0xc6,0x01,0xd1,0x8e,0xd0,0x86,0xcf,0x86,0xd5,0x48,0xd4,0x06,0xcf,0x06,0x12,0x00,
++	0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x06,0xcf,0x06,0x12,0x00,0xd1,0x06,0xcf,0x06,
++	0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12,0x00,0xd4,0x14,
++	0x53,0x04,0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x14,0x00,
++	0x14,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x14,0x00,0x15,0x00,0x15,0x00,0x00,0x00,
++	0xd4,0x36,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06,0x12,0x00,
++	0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12,0x00,0x54,0x04,0x12,0x00,
++	0x93,0x10,0x92,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,
++	0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,
++	0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0xa2,0xd4,0x9c,0xd3,0x74,
++	0xd2,0x26,0xd1,0x20,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08,
++	0x11,0x04,0x0c,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0xcf,0x06,
++	0x13,0x00,0xcf,0x06,0x13,0x00,0xd1,0x48,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,
++	0x13,0x00,0x53,0x04,0x13,0x00,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,
++	0x13,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x00,0x00,0x93,0x10,
++	0x92,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++	0x94,0x0c,0x93,0x08,0x12,0x04,0x00,0x00,0x15,0x00,0x00,0x00,0x13,0x00,0xcf,0x06,
++	0x13,0x00,0xd2,0x22,0xd1,0x06,0xcf,0x06,0x13,0x00,0xd0,0x06,0xcf,0x06,0x13,0x00,
++	0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x53,0x04,0x13,0x00,0x12,0x04,
++	0x13,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd4,0x06,0xcf,0x06,
++	0x00,0x00,0xd3,0x7f,0xd2,0x79,0xd1,0x34,0xd0,0x06,0xcf,0x06,0x10,0x00,0xcf,0x86,
++	0x55,0x04,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04,0x10,0x00,
++	0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,
++	0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0,0x3f,0xcf,0x86,0xd5,0x2c,
++	0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,
++	0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x00,0x00,
++	0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x01,0x10,0x00,0x94,0x0d,0x93,0x09,0x12,0x05,
++	0x10,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,
++	0x00,0xcf,0x06,0x00,0x00,0xe1,0x96,0x04,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,
++	0xcf,0x86,0xe5,0x33,0x04,0xe4,0x83,0x02,0xe3,0xf8,0x01,0xd2,0x26,0xd1,0x06,0xcf,
++	0x06,0x05,0x00,0xd0,0x06,0xcf,0x06,0x05,0x00,0xcf,0x86,0x55,0x04,0x05,0x00,0x54,
++	0x04,0x05,0x00,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x00,0x00,0x00,
++	0x00,0xd1,0xef,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94,0x20,0xd3,0x10,0x52,
++	0x04,0x05,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00,0x00,0x92,0x0c,0x91,
++	0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0xd5,
++	0x2a,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x51,0x04,0x05,
++	0x00,0x10,0x0d,0x05,0xff,0xf0,0x9d,0x85,0x97,0xf0,0x9d,0x85,0xa5,0x00,0x05,0xff,
++	0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0x00,0xd4,0x75,0xd3,0x61,0xd2,0x44,0xd1,
++	0x22,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,
++	0xae,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xaf,
++	0x00,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,
++	0xb0,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xb1,
++	0x00,0xd1,0x15,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,
++	0x9d,0x85,0xb2,0x00,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x01,0xd2,0x08,0x11,0x04,
++	0x05,0x01,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe2,0x05,0xd8,0xd3,0x12,
++	0x92,0x0d,0x51,0x04,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0xff,0x00,0x05,0xff,0x00,
++	0x92,0x0e,0x51,0x05,0x05,0xff,0x00,0x10,0x05,0x05,0xff,0x00,0x05,0xdc,0x05,0xdc,
+ 	0xd0,0x97,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x05,0xdc,
+ 	0x10,0x04,0x05,0xdc,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe6,0x05,0xe6,
+ 	0x92,0x08,0x11,0x04,0x05,0xe6,0x05,0xdc,0x05,0x00,0x05,0x00,0xd4,0x14,0x53,0x04,
+@@ -4080,20 +4090,21 @@ static const unsigned char utf8data[64080] = {
+ 	0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2,0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,
+ 	0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,
+ 	0x04,0x00,0x00,0x53,0x04,0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,
+-	0x00,0xd4,0xc8,0xd3,0x70,0xd2,0x68,0xd1,0x60,0xd0,0x58,0xcf,0x86,0xd5,0x50,0xd4,
+-	0x4a,0xd3,0x44,0xd2,0x2a,0xd1,0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,
+-	0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x00,
+-	0x00,0x00,0x00,0x05,0x00,0xcf,0x06,0x05,0x00,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,
+-	0x06,0x07,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x14,
+-	0x04,0x07,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,
+-	0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,
+-	0x06,0x00,0x00,0xd2,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xd1,0x08,0xcf,0x86,0xcf,
+-	0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x06,0xcf,
+-	0x06,0x00,0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2,
+-	0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,
+-	0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x00,0x00,0x52,
+-	0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,0x00,0xcf,0x86,0xcf,0x06,0x02,0x00,0x81,
+-	0x80,0xcf,0x86,0x85,0x84,0xcf,0x86,0xcf,0x06,0x02,0x00,0x00,0x00,0x00,0x00,0x00
++	0x00,0xd4,0xd9,0xd3,0x81,0xd2,0x79,0xd1,0x71,0xd0,0x69,0xcf,0x86,0xd5,0x60,0xd4,
++	0x59,0xd3,0x52,0xd2,0x33,0xd1,0x2c,0xd0,0x25,0xcf,0x86,0x95,0x1e,0x94,0x19,0x93,
++	0x14,0x92,0x0f,0x91,0x0a,0x10,0x05,0x00,0xff,0x00,0x05,0xff,0x00,0x00,0xff,0x00,
++	0x00,0xff,0x00,0x00,0xff,0x00,0x00,0xff,0x00,0x05,0xff,0x00,0xcf,0x06,0x05,0xff,
++	0x00,0xcf,0x06,0x00,0xff,0x00,0xd1,0x07,0xcf,0x06,0x07,0xff,0x00,0xd0,0x07,0xcf,
++	0x06,0x07,0xff,0x00,0xcf,0x86,0x55,0x05,0x07,0xff,0x00,0x14,0x05,0x07,0xff,0x00,
++	0x00,0xff,0x00,0xcf,0x06,0x00,0xff,0x00,0xcf,0x06,0x00,0xff,0x00,0xcf,0x06,0x00,
++	0xff,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,
++	0xcf,0x06,0x00,0x00,0xd2,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xd1,0x08,0xcf,0x86,
++	0xcf,0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x06,
++	0xcf,0x06,0x00,0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,
++	0xd2,0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,
++	0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x00,0x00,
++	0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,0x00,0xcf,0x86,0xcf,0x06,0x02,0x00,
++	0x81,0x80,0xcf,0x86,0x85,0x84,0xcf,0x86,0xcf,0x06,0x02,0x00,0x00,0x00,0x00,0x00
+ };
+ 
+ struct utf8data_table utf8_data_table = {
+diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
+index 9b373d172a7760..699c1a37b8e784 100644
+--- a/include/acpi/pcc.h
++++ b/include/acpi/pcc.h
+@@ -12,6 +12,7 @@
+ struct pcc_mbox_chan {
+ 	struct mbox_chan *mchan;
+ 	u64 shmem_base_addr;
++	void __iomem *shmem;
+ 	u64 shmem_size;
+ 	u32 latency;
+ 	u32 max_access_rate;
+@@ -31,11 +32,13 @@ struct pcc_mbox_chan {
+ #define PCC_CMD_COMPLETION_NOTIFY	BIT(0)
+ 
+ #define MAX_PCC_SUBSPACES	256
++#define PCC_ACK_FLAG_MASK	0x1
+ 
+ #ifdef CONFIG_PCC
+ extern struct pcc_mbox_chan *
+ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);
+ extern void pcc_mbox_free_channel(struct pcc_mbox_chan *chan);
++extern int pcc_mbox_ioremap(struct mbox_chan *chan);
+ #else
+ static inline struct pcc_mbox_chan *
+ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
+@@ -43,6 +46,10 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
+ 	return ERR_PTR(-ENODEV);
+ }
+ static inline void pcc_mbox_free_channel(struct pcc_mbox_chan *chan) { }
++static inline int pcc_mbox_ioremap(struct mbox_chan *chan)
++{
++	return 0;
++};
+ #endif
+ 
+ #endif /* _PCC_H */
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index f6a1cbb0f600fa..a80ba457a858f3 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -699,6 +699,13 @@ struct drm_dp_mst_topology_mgr {
+ 	 */
+ 	bool payload_id_table_cleared : 1;
+ 
++	/**
++	 * @reset_rx_state: The down request's reply and up request message
++	 * receiver state must be reset, after the topology manager got
++	 * removed. Protected by @lock.
++	 */
++	bool reset_rx_state : 1;
++
+ 	/**
+ 	 * @payload_count: The number of currently active payloads in hardware. This value is only
+ 	 * intended to be used internally by MST helpers for payload tracking, and is only safe to
+diff --git a/include/drm/intel/xe_pciids.h b/include/drm/intel/xe_pciids.h
+index 644872a35c3526..4ba88d2dccd4b3 100644
+--- a/include/drm/intel/xe_pciids.h
++++ b/include/drm/intel/xe_pciids.h
+@@ -120,7 +120,6 @@
+ 
+ /* RPL-P */
+ #define XE_RPLP_IDS(MACRO__, ...)		\
+-	XE_RPLU_IDS(MACRO__, ## __VA_ARGS__),	\
+ 	MACRO__(0xA720, ## __VA_ARGS__),	\
+ 	MACRO__(0xA7A0, ## __VA_ARGS__),	\
+ 	MACRO__(0xA7A8, ## __VA_ARGS__),	\
+@@ -175,18 +174,38 @@
+ 	XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\
+ 	XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
+ 
+-/* MTL / ARL */
++/* ARL */
++#define XE_ARL_IDS(MACRO__, ...)		\
++	MACRO__(0x7D41, ## __VA_ARGS__),	\
++	MACRO__(0x7D51, ## __VA_ARGS__),        \
++	MACRO__(0x7D67, ## __VA_ARGS__),	\
++	MACRO__(0x7DD1, ## __VA_ARGS__),	\
++	MACRO__(0xB640, ## __VA_ARGS__)
++
++/* MTL */
+ #define XE_MTL_IDS(MACRO__, ...)		\
+ 	MACRO__(0x7D40, ## __VA_ARGS__),	\
+-	MACRO__(0x7D41, ## __VA_ARGS__),	\
+ 	MACRO__(0x7D45, ## __VA_ARGS__),	\
+-	MACRO__(0x7D51, ## __VA_ARGS__),        \
+ 	MACRO__(0x7D55, ## __VA_ARGS__),	\
+ 	MACRO__(0x7D60, ## __VA_ARGS__),	\
+-	MACRO__(0x7D67, ## __VA_ARGS__),	\
+-	MACRO__(0x7DD1, ## __VA_ARGS__),        \
+ 	MACRO__(0x7DD5, ## __VA_ARGS__)
+ 
++/* PVC */
++#define XE_PVC_IDS(MACRO__, ...)		\
++	MACRO__(0x0B69, ## __VA_ARGS__),	\
++	MACRO__(0x0B6E, ## __VA_ARGS__),	\
++	MACRO__(0x0BD4, ## __VA_ARGS__),	\
++	MACRO__(0x0BD5, ## __VA_ARGS__),	\
++	MACRO__(0x0BD6, ## __VA_ARGS__),	\
++	MACRO__(0x0BD7, ## __VA_ARGS__),	\
++	MACRO__(0x0BD8, ## __VA_ARGS__),	\
++	MACRO__(0x0BD9, ## __VA_ARGS__),	\
++	MACRO__(0x0BDA, ## __VA_ARGS__),	\
++	MACRO__(0x0BDB, ## __VA_ARGS__),	\
++	MACRO__(0x0BE0, ## __VA_ARGS__),	\
++	MACRO__(0x0BE1, ## __VA_ARGS__),	\
++	MACRO__(0x0BE5, ## __VA_ARGS__)
++
+ #define XE_LNL_IDS(MACRO__, ...) \
+ 	MACRO__(0x6420, ## __VA_ARGS__), \
+ 	MACRO__(0x64A0, ## __VA_ARGS__), \
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index e84a93c4013207..6b4bc85f4999ba 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -195,7 +195,7 @@ struct gendisk {
+ 	unsigned int		nr_zones;
+ 	unsigned int		zone_capacity;
+ 	unsigned int		last_zone_capacity;
+-	unsigned long		*conv_zones_bitmap;
++	unsigned long __rcu	*conv_zones_bitmap;
+ 	unsigned int            zone_wplugs_hash_bits;
+ 	spinlock_t              zone_wplugs_lock;
+ 	struct mempool_s	*zone_wplugs_pool;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index bc2e3dab0487ea..cbe2350912460b 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1300,8 +1300,12 @@ void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
+ bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
+ 
+ #ifdef CONFIG_BPF_JIT
+-int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+-int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
++int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
++			     struct bpf_trampoline *tr,
++			     struct bpf_prog *tgt_prog);
++int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
++			       struct bpf_trampoline *tr,
++			       struct bpf_prog *tgt_prog);
+ struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ 					  struct bpf_attach_target_info *tgt_info);
+ void bpf_trampoline_put(struct bpf_trampoline *tr);
+@@ -1383,12 +1387,14 @@ void bpf_jit_uncharge_modmem(u32 size);
+ bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
+ #else
+ static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+-					   struct bpf_trampoline *tr)
++					   struct bpf_trampoline *tr,
++					   struct bpf_prog *tgt_prog)
+ {
+ 	return -ENOTSUPP;
+ }
+ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+-					     struct bpf_trampoline *tr)
++					     struct bpf_trampoline *tr,
++					     struct bpf_prog *tgt_prog)
+ {
+ 	return -ENOTSUPP;
+ }
+@@ -1492,6 +1498,9 @@ struct bpf_prog_aux {
+ 	bool xdp_has_frags;
+ 	bool exception_cb;
+ 	bool exception_boundary;
++	bool is_extended; /* true if extended by freplace program */
++	u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
++	struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
+ 	struct bpf_arena *arena;
+ 	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
+ 	const struct btf_type *attach_func_proto;
+diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
+index 518bd1fd86fbe0..0cc66f8d28e7b6 100644
+--- a/include/linux/cleanup.h
++++ b/include/linux/cleanup.h
+@@ -285,14 +285,20 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+  *      similar to scoped_guard(), except it does fail when the lock
+  *      acquire fails.
+  *
++ *      Only for conditional locks.
+  */
+ 
++#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond)	\
++static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
++
+ #define DEFINE_GUARD(_name, _type, _lock, _unlock) \
++	__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+ 	DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ 	static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ 	{ return (void *)(__force unsigned long)*_T; }
+ 
+ #define DEFINE_GUARD_COND(_name, _ext, _condlock) \
++	__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
+ 	EXTEND_CLASS(_name, _ext, \
+ 		     ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
+ 		     class_##_name##_t _T) \
+@@ -303,17 +309,40 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+ 	CLASS(_name, __UNIQUE_ID(guard))
+ 
+ #define __guard_ptr(_name) class_##_name##_lock_ptr
++#define __is_cond_ptr(_name) class_##_name##_is_conditional
+ 
+-#define scoped_guard(_name, args...)					\
+-	for (CLASS(_name, scope)(args),					\
+-	     *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
+-
+-#define scoped_cond_guard(_name, _fail, args...) \
+-	for (CLASS(_name, scope)(args), \
+-	     *done = NULL; !done; done = (void *)1) \
+-		if (!__guard_ptr(_name)(&scope)) _fail; \
+-		else
+-
++/*
++ * Helper macro for scoped_guard().
++ *
++ * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
++ * compiler would be sure that for the unconditional locks the body of the
++ * loop (caller-provided code glued to the else clause) could not be skipped.
++ * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
++ * hard to deduce (even if could be proven true for unconditional locks).
++ */
++#define __scoped_guard(_name, _label, args...)				\
++	for (CLASS(_name, scope)(args);					\
++	     __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name);	\
++	     ({ goto _label; }))					\
++		if (0) {						\
++_label:									\
++			break;						\
++		} else
++
++#define scoped_guard(_name, args...)	\
++	__scoped_guard(_name, __UNIQUE_ID(label), args)
++
++#define __scoped_cond_guard(_name, _fail, _label, args...)		\
++	for (CLASS(_name, scope)(args); true; ({ goto _label; }))	\
++		if (!__guard_ptr(_name)(&scope)) {			\
++			BUILD_BUG_ON(!__is_cond_ptr(_name));		\
++			_fail;						\
++_label:									\
++			break;						\
++		} else
++
++#define scoped_cond_guard(_name, _fail, args...)	\
++	__scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
+ /*
+  * Additional helper macros for generating lock guards with types, either for
+  * locks that don't have a native type (eg. RCU, preempt) or those that need a
+@@ -369,14 +398,17 @@ static inline class_##_name##_t class_##_name##_constructor(void)	\
+ }
+ 
+ #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...)		\
++__DEFINE_CLASS_IS_CONDITIONAL(_name, false);				\
+ __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__)		\
+ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
+ 
+ #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...)			\
++__DEFINE_CLASS_IS_CONDITIONAL(_name, false);				\
+ __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__)		\
+ __DEFINE_LOCK_GUARD_0(_name, _lock)
+ 
+ #define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock)		\
++	__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true);		\
+ 	EXTEND_CLASS(_name, _ext,					\
+ 		     ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+ 		        if (_T->lock && !(_condlock)) _T->lock = NULL;	\
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index d35b677b08fe13..c846436b64593e 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -49,6 +49,7 @@ struct module;
+  * @archdata:		Optional arch-specific data
+  * @max_cycles:		Maximum safe cycle value which won't overflow on
+  *			multiplication
++ * @max_raw_delta:	Maximum safe delta value for negative motion detection
+  * @name:		Pointer to clocksource name
+  * @list:		List head for registration (internal)
+  * @freq_khz:		Clocksource frequency in khz.
+@@ -109,6 +110,7 @@ struct clocksource {
+ 	struct arch_clocksource_data archdata;
+ #endif
+ 	u64			max_cycles;
++	u64			max_raw_delta;
+ 	const char		*name;
+ 	struct list_head	list;
+ 	u32			freq_khz;
+diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
+index c860c72a921d03..3a485cc0e0fa0b 100644
+--- a/include/linux/eeprom_93cx6.h
++++ b/include/linux/eeprom_93cx6.h
+@@ -11,6 +11,8 @@
+ 	Supported chipsets: 93c46, 93c56 and 93c66.
+  */
+ 
++#include <linux/bits.h>
++
+ /*
+  * EEPROM operation defines.
+  */
+@@ -34,6 +36,7 @@
+  * @register_write(struct eeprom_93cx6 *eeprom): handler to
+  * write to the eeprom register by using all reg_* fields.
+  * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
++ * @quirks: eeprom or controller quirks
+  * @drive_data: Set if we're driving the data line.
+  * @reg_data_in: register field to indicate data input
+  * @reg_data_out: register field to indicate data output
+@@ -50,6 +53,9 @@ struct eeprom_93cx6 {
+ 	void (*register_write)(struct eeprom_93cx6 *eeprom);
+ 
+ 	int width;
++	unsigned int quirks;
++/* Some EEPROMs require an extra clock cycle before reading */
++#define PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE	BIT(0)
+ 
+ 	char drive_data;
+ 	char reg_data_in;
+@@ -71,3 +77,8 @@ extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
+ 
+ extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
+ 			       u8 addr, u16 data);
++
++static inline bool has_quirk_extra_read_cycle(struct eeprom_93cx6 *eeprom)
++{
++	return eeprom->quirks & PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE;
++}
+diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
+index 3337745d81bd69..0c0d00fcd131f9 100644
+--- a/include/linux/eventpoll.h
++++ b/include/linux/eventpoll.h
+@@ -42,7 +42,7 @@ static inline void eventpoll_release(struct file *file)
+ 	 * because the file in on the way to be removed and nobody ( but
+ 	 * eventpoll ) has still a reference to this file.
+ 	 */
+-	if (likely(!file->f_ep))
++	if (likely(!READ_ONCE(file->f_ep)))
+ 		return;
+ 
+ 	/*
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index 3b2ad444c002ee..c24f8bc01045df 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -24,6 +24,7 @@
+ #define NEW_ADDR		((block_t)-1)	/* used as block_t addresses */
+ #define COMPRESS_ADDR		((block_t)-2)	/* used as compressed data flag */
+ 
++#define F2FS_BLKSIZE_MASK		(F2FS_BLKSIZE - 1)
+ #define F2FS_BYTES_TO_BLK(bytes)	((unsigned long long)(bytes) >> F2FS_BLKSIZE_BITS)
+ #define F2FS_BLK_TO_BYTES(blk)		((unsigned long long)(blk) << F2FS_BLKSIZE_BITS)
+ #define F2FS_BLK_END_BYTES(blk)		(F2FS_BLK_TO_BYTES(blk + 1) - 1)
+diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
+index 4f1c4f60311808..89ff45bd6f01ba 100644
+--- a/include/linux/fanotify.h
++++ b/include/linux/fanotify.h
+@@ -36,6 +36,7 @@
+ #define FANOTIFY_ADMIN_INIT_FLAGS	(FANOTIFY_PERM_CLASSES | \
+ 					 FAN_REPORT_TID | \
+ 					 FAN_REPORT_PIDFD | \
++					 FAN_REPORT_FD_ERROR | \
+ 					 FAN_UNLIMITED_QUEUE | \
+ 					 FAN_UNLIMITED_MARKS)
+ 
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 121d5b8bc86753..a7d60a1c72a09a 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -359,6 +359,7 @@ struct hid_item {
+  * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
+  * | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
+  * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
++ * | @HID_QUIRK_IGNORE_SPECIAL_DRIVER
+  * | @HID_QUIRK_FULLSPEED_INTERVAL:
+  * | @HID_QUIRK_NO_INIT_REPORTS:
+  * | @HID_QUIRK_NO_IGNORE:
+@@ -384,6 +385,7 @@ struct hid_item {
+ #define HID_QUIRK_HAVE_SPECIAL_DRIVER		BIT(19)
+ #define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE	BIT(20)
+ #define HID_QUIRK_NOINVERT			BIT(21)
++#define HID_QUIRK_IGNORE_SPECIAL_DRIVER		BIT(22)
+ #define HID_QUIRK_FULLSPEED_INTERVAL		BIT(28)
+ #define HID_QUIRK_NO_INIT_REPORTS		BIT(29)
+ #define HID_QUIRK_NO_IGNORE			BIT(30)
+diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
+index 2a1ed05d5782a8..6e5328c6c6afd2 100644
+--- a/include/linux/i3c/master.h
++++ b/include/linux/i3c/master.h
+@@ -298,7 +298,8 @@ enum i3c_open_drain_speed {
+  * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
+  * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
+  * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
+- *
++ * @I3C_ADDR_SLOT_EXT_DESIRED: the bitmask represents addresses that are preferred by some devices,
++ *			       such as the "assigned-address" property in a device tree source.
+  * On an I3C bus, addresses are assigned dynamically, and we need to know which
+  * addresses are free to use and which ones are already assigned.
+  *
+@@ -311,8 +312,12 @@ enum i3c_addr_slot_status {
+ 	I3C_ADDR_SLOT_I2C_DEV,
+ 	I3C_ADDR_SLOT_I3C_DEV,
+ 	I3C_ADDR_SLOT_STATUS_MASK = 3,
++	I3C_ADDR_SLOT_EXT_STATUS_MASK = 7,
++	I3C_ADDR_SLOT_EXT_DESIRED = BIT(2),
+ };
+ 
++#define I3C_ADDR_SLOT_STATUS_BITS 4
++
+ /**
+  * struct i3c_bus - I3C bus object
+  * @cur_master: I3C master currently driving the bus. Since I3C is multi-master
+@@ -354,7 +359,7 @@ enum i3c_addr_slot_status {
+ struct i3c_bus {
+ 	struct i3c_dev_desc *cur_master;
+ 	int id;
+-	unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
++	unsigned long addrslots[((I2C_MAX_ADDR + 1) * I3C_ADDR_SLOT_STATUS_BITS) / BITS_PER_LONG];
+ 	enum i3c_bus_mode mode;
+ 	struct {
+ 		unsigned long i3c;
+diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
+index c189d36ad55ea6..968de0cde25d58 100644
+--- a/include/linux/io_uring/cmd.h
++++ b/include/linux/io_uring/cmd.h
+@@ -43,7 +43,7 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+  * Note: the caller should never hard code @issue_flags and is only allowed
+  * to pass the mask provided by the core io_uring code.
+  */
+-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
++void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
+ 			unsigned issue_flags);
+ 
+ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+@@ -67,7 +67,7 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ 	return -EOPNOTSUPP;
+ }
+ static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+-		ssize_t ret2, unsigned issue_flags)
++		u64 ret2, unsigned issue_flags)
+ {
+ }
+ static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+diff --git a/include/linux/leds.h b/include/linux/leds.h
+index e5968c3ed4ae08..2337f516fa7c2c 100644
+--- a/include/linux/leds.h
++++ b/include/linux/leds.h
+@@ -238,7 +238,7 @@ struct led_classdev {
+ 	struct kernfs_node	*brightness_hw_changed_kn;
+ #endif
+ 
+-	/* Ensures consistent access to the LED Flash Class device */
++	/* Ensures consistent access to the LED class device */
+ 	struct mutex		led_access;
+ };
+ 
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index f34407cc27888d..eb67d3d5ff5b22 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -35,7 +35,7 @@ struct mmc_csd {
+ 	unsigned int		wp_grp_size;
+ 	unsigned int		read_blkbits;
+ 	unsigned int		write_blkbits;
+-	unsigned int		capacity;
++	sector_t		capacity;
+ 	unsigned int		read_partial:1,
+ 				read_misalign:1,
+ 				write_partial:1,
+@@ -294,6 +294,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_SD_DISCARD	(1<<14)	/* Disable broken SD discard support */
+ #define MMC_QUIRK_BROKEN_SD_CACHE	(1<<15)	/* Disable broken SD cache support */
+ #define MMC_QUIRK_BROKEN_CACHE_FLUSH	(1<<16)	/* Don't flush cache until the write has occurred */
++#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY	(1<<17) /* Disable broken SD poweroff notify support */
+ 
+ 	bool			written_flag;	/* Indicates eMMC has been written since power on */
+ 	bool			reenable_cmdq;	/* Re-enable Command Queue */
+diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
+index 6727576a875559..865cc0ca8543d1 100644
+--- a/include/linux/mmc/sd.h
++++ b/include/linux/mmc/sd.h
+@@ -36,6 +36,7 @@
+ /* OCR bit definitions */
+ #define SD_OCR_S18R		(1 << 24)    /* 1.8V switching request */
+ #define SD_ROCR_S18A		SD_OCR_S18R  /* 1.8V switching accepted by card */
++#define SD_OCR_2T		(1 << 27)    /* HO2T/CO2T - SDUC support */
+ #define SD_OCR_XPC		(1 << 28)    /* SDXC power control */
+ #define SD_OCR_CCS		(1 << 30)    /* Card Capacity Status */
+ 
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index cc839e4365c182..74aa9fbbdae70b 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -306,7 +306,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
+ {
+ 	const struct page *page = &folio->page;
+ 
+-	VM_BUG_ON_PGFLAGS(PageTail(page), page);
++	VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
+ 	return &page[n].flags;
+ }
+@@ -315,7 +315,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n)
+ {
+ 	struct page *page = &folio->page;
+ 
+-	VM_BUG_ON_PGFLAGS(PageTail(page), page);
++	VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
+ 	return &page[n].flags;
+ }
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 573b4c4c2be61f..4e77c4230c0a19 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2609,6 +2609,12 @@ pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
+ static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
+ #endif
+ 
++#if defined(CONFIG_X86) && defined(CONFIG_ACPI)
++bool arch_pci_dev_is_removable(struct pci_dev *pdev);
++#else
++static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
++#endif
++
+ #ifdef CONFIG_EEH
+ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
+ {
+diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
+index e61d164622db47..1bad36e3e4ef1f 100644
+--- a/include/linux/scatterlist.h
++++ b/include/linux/scatterlist.h
+@@ -313,7 +313,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+ }
+ 
+ /**
+- * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
++ * sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address
+  * @sg:		 SG entry
+  *
+  * Description:
+diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
+index e9ec32fb97d4a7..2cc21ffcdaf9e4 100644
+--- a/include/linux/stackdepot.h
++++ b/include/linux/stackdepot.h
+@@ -147,7 +147,7 @@ static inline int stack_depot_early_init(void)	{ return 0; }
+  * If the provided stack trace comes from the interrupt context, only the part
+  * up to the interrupt entry is saved.
+  *
+- * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
++ * Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if
+  *          alloc_pages() cannot be used from the current context. Currently
+  *          this is the case for contexts where neither %GFP_ATOMIC nor
+  *          %GFP_NOWAIT can be used (NMI, raw_spin_lock).
+@@ -156,7 +156,7 @@ static inline int stack_depot_early_init(void)	{ return 0; }
+  */
+ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ 					    unsigned int nr_entries,
+-					    gfp_t gfp_flags,
++					    gfp_t alloc_flags,
+ 					    depot_flags_t depot_flags);
+ 
+ /**
+@@ -175,7 +175,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+  * Return: Handle of the stack trace stored in depot, 0 on failure
+  */
+ depot_stack_handle_t stack_depot_save(unsigned long *entries,
+-				      unsigned int nr_entries, gfp_t gfp_flags);
++				      unsigned int nr_entries, gfp_t alloc_flags);
+ 
+ /**
+  * __stack_depot_get_stack_record - Get a pointer to a stack_record struct
+diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
+index 902c20ef495acb..715e0919972e4c 100644
+--- a/include/linux/timekeeper_internal.h
++++ b/include/linux/timekeeper_internal.h
+@@ -68,9 +68,6 @@ struct tk_read_base {
+  *			shifted nano seconds.
+  * @ntp_error_shift:	Shift conversion between clock shifted nano seconds and
+  *			ntp shifted nano seconds.
+- * @last_warning:	Warning ratelimiter (DEBUG_TIMEKEEPING)
+- * @underflow_seen:	Underflow warning flag (DEBUG_TIMEKEEPING)
+- * @overflow_seen:	Overflow warning flag (DEBUG_TIMEKEEPING)
+  *
+  * Note: For timespec(64) based interfaces wall_to_monotonic is what
+  * we need to add to xtime (or xtime corrected for sub jiffy times)
+@@ -124,18 +121,6 @@ struct timekeeper {
+ 	u32			ntp_err_mult;
+ 	/* Flag used to avoid updating NTP twice with same second */
+ 	u32			skip_second_overflow;
+-#ifdef CONFIG_DEBUG_TIMEKEEPING
+-	long			last_warning;
+-	/*
+-	 * These simple flag variables are managed
+-	 * without locks, which is racy, but they are
+-	 * ok since we don't really care about being
+-	 * super precise about how many events were
+-	 * seen, just that a problem was observed.
+-	 */
+-	int			underflow_seen;
+-	int			overflow_seen;
+-#endif
+ };
+ 
+ #ifdef CONFIG_GENERIC_TIME_VSYSCALL
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index 5a7f96684ea226..ebdfef124b2bc0 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -65,6 +65,7 @@ struct ci_hdrc_platform_data {
+ #define CI_HDRC_PHY_VBUS_CONTROL	BIT(16)
+ #define CI_HDRC_HAS_PORTSC_PEC_MISSED	BIT(17)
+ #define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS	BIT(18)
++#define	CI_HDRC_HAS_SHORT_PKT_LIMIT	BIT(19)
+ 	enum usb_dr_mode	dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT		0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT	1
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index a1864cff616aee..5bb4eaa52e14cf 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -301,6 +301,20 @@ enum {
+ 	 */
+ 	HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+ 
++	/*
++	 * When this quirk is set, the HCI_OP_LE_EXT_CREATE_CONN command is
++	 * disabled. This is required for the Actions Semiconductor ATS2851
++	 * based controllers, which erroneously claims to support it.
++	 */
++	HCI_QUIRK_BROKEN_EXT_CREATE_CONN,
++
++	/*
++	 * When this quirk is set, the command WRITE_AUTH_PAYLOAD_TIMEOUT is
++	 * skipped. This is required for the Actions Semiconductor ATS2851
++	 * based controllers, due to a race condition in pairing process.
++	 */
++	HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
++
+ 	/* When this quirk is set, MSFT extension monitor tracking by
+ 	 * address filter is supported. Since tracking quantity of each
+ 	 * pattern is limited, this feature supports tracking multiple
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 4c185a08c3a3af..c95f7e6ba25514 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1934,8 +1934,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ 			   !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
+ 
+ /* Use ext create connection if command is supported */
+-#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
+-
++#define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \
++	!test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks))
+ /* Extended advertising support */
+ #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+ 
+@@ -1948,8 +1948,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+  * C24: Mandatory if the LE Controller supports Connection State and either
+  * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
+  */
+-#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
+-					 ext_adv_capable(dev))
++#define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \
++					 ext_adv_capable(dev)) && \
++					 !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \
++						 &(dev)->quirks))
+ 
+ /* Periodic advertising support */
+ #define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index ff27cb2e166207..03b6165756fc5d 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -161,6 +161,7 @@ enum {
+ };
+ 
+ struct nft_inner_tun_ctx {
++	unsigned long cookie;
+ 	u16	type;
+ 	u16	inner_tunoff;
+ 	u16	inner_lloff;
+diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
+index 1d46460d0fefab..df655ce6987d37 100644
+--- a/include/net/tcp_ao.h
++++ b/include/net/tcp_ao.h
+@@ -183,7 +183,8 @@ int tcp_ao_hash_skb(unsigned short int family,
+ 		    const u8 *tkey, int hash_offset, u32 sne);
+ int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family,
+ 		 sockptr_t optval, int optlen);
+-struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao,
++struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk,
++					  struct tcp_ao_info *ao,
+ 					  int sndid, int rcvid);
+ int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
+ 			     struct request_sock *req, struct sk_buff *skb,
+diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
+index f68c1f193b3b46..0150b3735b4bd5 100644
+--- a/include/sound/soc_sdw_utils.h
++++ b/include/sound/soc_sdw_utils.h
+@@ -28,6 +28,7 @@
+  *   - SOC_SDW_CODEC_SPKR | SOF_SIDECAR_AMPS - Not currently supported
+  */
+ #define SOC_SDW_SIDECAR_AMPS		BIT(16)
++#define SOC_SDW_CODEC_MIC		BIT(17)
+ 
+ #define SOC_SDW_UNUSED_DAI_ID		-1
+ #define SOC_SDW_JACK_OUT_DAI_ID		0
+@@ -59,6 +60,7 @@ struct asoc_sdw_dai_info {
+ 	int (*rtd_init)(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+ 	bool rtd_init_done; /* Indicate that the rtd_init callback is done */
+ 	unsigned long quirk;
++	bool quirk_exclude;
+ };
+ 
+ struct asoc_sdw_codec_info {
+diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
+index 23200aabccacb1..da4bd9fd11625e 100644
+--- a/include/trace/events/damon.h
++++ b/include/trace/events/damon.h
+@@ -15,7 +15,7 @@ TRACE_EVENT_CONDITION(damos_before_apply,
+ 		unsigned int target_idx, struct damon_region *r,
+ 		unsigned int nr_regions, bool do_trace),
+ 
+-	TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace),
++	TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace),
+ 
+ 	TP_CONDITION(do_trace),
+ 
+diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
+index c2f9cabf154d11..fa0d51cad57a80 100644
+--- a/include/trace/trace_events.h
++++ b/include/trace/trace_events.h
+@@ -244,6 +244,9 @@ static struct trace_event_fields trace_event_fields_##call[] = {	\
+ 	tstruct								\
+ 	{} };
+ 
++#undef DECLARE_EVENT_SYSCALL_CLASS
++#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
++
+ #undef DEFINE_EVENT_PRINT
+ #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
+ 
+@@ -374,11 +377,11 @@ static inline notrace int trace_event_get_offsets_##call(		\
+ 
+ #include "stages/stage6_event_callback.h"
+ 
+-#undef DECLARE_EVENT_CLASS
+-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
+-									\
++
++#undef __DECLARE_EVENT_CLASS
++#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ static notrace void							\
+-trace_event_raw_event_##call(void *__data, proto)			\
++do_trace_event_raw_event_##call(void *__data, proto)			\
+ {									\
+ 	struct trace_event_file *trace_file = __data;			\
+ 	struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+@@ -403,6 +406,29 @@ trace_event_raw_event_##call(void *__data, proto)			\
+ 									\
+ 	trace_event_buffer_commit(&fbuffer);				\
+ }
++
++#undef DECLARE_EVENT_CLASS
++#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
++__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
++		      PARAMS(assign), PARAMS(print))			\
++static notrace void							\
++trace_event_raw_event_##call(void *__data, proto)			\
++{									\
++	do_trace_event_raw_event_##call(__data, args);			\
++}
++
++#undef DECLARE_EVENT_SYSCALL_CLASS
++#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
++__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
++		      PARAMS(assign), PARAMS(print))			\
++static notrace void							\
++trace_event_raw_event_##call(void *__data, proto)			\
++{									\
++	preempt_disable_notrace();					\
++	do_trace_event_raw_event_##call(__data, args);			\
++	preempt_enable_notrace();					\
++}
++
+ /*
+  * The ftrace_test_probe is compiled out, it is only here as a build time check
+  * to make sure that if the tracepoint handling changes, the ftrace probe will
+@@ -418,6 +444,8 @@ static inline void ftrace_test_probe_##call(void)			\
+ 
+ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+ 
++#undef __DECLARE_EVENT_CLASS
++
+ #include "stages/stage7_class_define.h"
+ 
+ #undef DECLARE_EVENT_CLASS
+diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
+index b6fbe4988f2e9e..c4182e95a61955 100644
+--- a/include/uapi/drm/xe_drm.h
++++ b/include/uapi/drm/xe_drm.h
+@@ -512,7 +512,9 @@ struct drm_xe_query_gt_list {
+  *    containing the following in mask:
+  *    ``DSS_COMPUTE    ff ff ff ff 00 00 00 00``
+  *    means 32 DSS are available for compute.
+- *  - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks
++ *  - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks.  This type
++ *    may be omitted if the driver is unable to query the mask from the
++ *    hardware.
+  *  - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
+  *    available per Dual Sub Slices (DSS). For example a query response
+  *    containing the following in mask:
+diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
+index a37de58ca571ae..34f221d3a1b957 100644
+--- a/include/uapi/linux/fanotify.h
++++ b/include/uapi/linux/fanotify.h
+@@ -60,6 +60,7 @@
+ #define FAN_REPORT_DIR_FID	0x00000400	/* Report unique directory id */
+ #define FAN_REPORT_NAME		0x00000800	/* Report events with name */
+ #define FAN_REPORT_TARGET_FID	0x00001000	/* Report dirent target id  */
++#define FAN_REPORT_FD_ERROR	0x00002000	/* event->fd can report error */
+ 
+ /* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */
+ #define FAN_REPORT_DFID_NAME	(FAN_REPORT_DIR_FID | FAN_REPORT_NAME)
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 3f68ae3e4330dc..8932ec5bd7c029 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -299,6 +299,8 @@ struct ufs_pwr_mode_info {
+  * @max_num_rtt: maximum RTT supported by the host
+  * @init: called when the driver is initialized
+  * @exit: called to cleanup everything done in init
++ * @set_dma_mask: For setting another DMA mask than indicated by the 64AS
++ *	capability bit.
+  * @get_ufs_hci_version: called to get UFS HCI version
+  * @clk_scale_notify: notifies that clks are scaled up/down
+  * @setup_clocks: called before touching any of the controller registers
+@@ -308,7 +310,9 @@ struct ufs_pwr_mode_info {
+  *                       to allow variant specific Uni-Pro initialization.
+  * @pwr_change_notify: called before and after a power mode change
+  *			is carried out to allow vendor spesific capabilities
+- *			to be set.
++ *			to be set. PRE_CHANGE can modify final_params based
++ *			on desired_pwr_mode, but POST_CHANGE must not alter
++ *			the final_params parameter
+  * @setup_xfer_req: called before any transfer request is issued
+  *                  to set some things
+  * @setup_task_mgmt: called before any task management request is issued
+@@ -341,6 +345,7 @@ struct ufs_hba_variant_ops {
+ 	int	(*init)(struct ufs_hba *);
+ 	void    (*exit)(struct ufs_hba *);
+ 	u32	(*get_ufs_hci_version)(struct ufs_hba *);
++	int	(*set_dma_mask)(struct ufs_hba *);
+ 	int	(*clk_scale_notify)(struct ufs_hba *, bool,
+ 				    enum ufs_notify_change_status);
+ 	int	(*setup_clocks)(struct ufs_hba *, bool,
+@@ -350,9 +355,9 @@ struct ufs_hba_variant_ops {
+ 	int	(*link_startup_notify)(struct ufs_hba *,
+ 				       enum ufs_notify_change_status);
+ 	int	(*pwr_change_notify)(struct ufs_hba *,
+-					enum ufs_notify_change_status status,
+-					struct ufs_pa_layer_attr *,
+-					struct ufs_pa_layer_attr *);
++				enum ufs_notify_change_status status,
++				struct ufs_pa_layer_attr *desired_pwr_mode,
++				struct ufs_pa_layer_attr *final_params);
+ 	void	(*setup_xfer_req)(struct ufs_hba *hba, int tag,
+ 				  bool is_scsi_cmd);
+ 	void	(*setup_task_mgmt)(struct ufs_hba *, int, u8);
+@@ -623,12 +628,6 @@ enum ufshcd_quirks {
+ 	 */
+ 	UFSHCD_QUIRK_SKIP_PH_CONFIGURATION		= 1 << 16,
+ 
+-	/*
+-	 * This quirk needs to be enabled if the host controller has
+-	 * 64-bit addressing supported capability but it doesn't work.
+-	 */
+-	UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS		= 1 << 17,
+-
+ 	/*
+ 	 * This quirk needs to be enabled if the host controller has
+ 	 * auto-hibernate capability but it's FASTAUTO only.
+diff --git a/io_uring/tctx.c b/io_uring/tctx.c
+index c043fe93a3f232..84f6a838572040 100644
+--- a/io_uring/tctx.c
++++ b/io_uring/tctx.c
+@@ -47,8 +47,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
+ void __io_uring_free(struct task_struct *tsk)
+ {
+ 	struct io_uring_task *tctx = tsk->io_uring;
++	struct io_tctx_node *node;
++	unsigned long index;
+ 
+-	WARN_ON_ONCE(!xa_empty(&tctx->xa));
++	/*
++	 * Fault injection forcing allocation errors in the xa_store() path
++	 * can lead to xa_empty() returning false, even though no actual
++	 * node is stored in the xarray. Until that gets sorted out, attempt
++	 * an iteration here and warn if any entries are found.
++	 */
++	xa_for_each(&tctx->xa, index, node) {
++		WARN_ON_ONCE(1);
++		break;
++	}
+ 	WARN_ON_ONCE(tctx->io_wq);
+ 	WARN_ON_ONCE(tctx->cached_refs);
+ 
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index 39c3c816ec7882..883510a3e8d075 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -147,7 +147,7 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
+  * Called by consumers of io_uring_cmd, if they originally returned
+  * -EIOCBQUEUED upon receiving the command.
+  */
+-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
++void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
+ 		       unsigned issue_flags)
+ {
+ 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 79660e3fca4c1b..6cdbb4c33d31d5 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -947,22 +947,44 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
+ 				   struct file *map_file, int fd)
+ {
+ 	struct bpf_prog *prog = bpf_prog_get(fd);
++	bool is_extended;
+ 
+ 	if (IS_ERR(prog))
+ 		return prog;
+ 
+-	if (!bpf_prog_map_compatible(map, prog)) {
++	if (prog->type == BPF_PROG_TYPE_EXT ||
++	    !bpf_prog_map_compatible(map, prog)) {
+ 		bpf_prog_put(prog);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	mutex_lock(&prog->aux->ext_mutex);
++	is_extended = prog->aux->is_extended;
++	if (!is_extended)
++		prog->aux->prog_array_member_cnt++;
++	mutex_unlock(&prog->aux->ext_mutex);
++	if (is_extended) {
++		/* Extended prog can not be tail callee. It's to prevent a
++		 * potential infinite loop like:
++		 * tail callee prog entry -> tail callee prog subprog ->
++		 * freplace prog entry --tailcall-> tail callee prog entry.
++		 */
++		bpf_prog_put(prog);
++		return ERR_PTR(-EBUSY);
++	}
++
+ 	return prog;
+ }
+ 
+ static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
++	struct bpf_prog *prog = ptr;
++
++	mutex_lock(&prog->aux->ext_mutex);
++	prog->aux->prog_array_member_cnt--;
++	mutex_unlock(&prog->aux->ext_mutex);
+ 	/* bpf_prog is freed after one RCU or tasks trace grace period */
+-	bpf_prog_put(ptr);
++	bpf_prog_put(prog);
+ }
+ 
+ static u32 prog_fd_array_sys_lookup_elem(void *ptr)
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 5e77c58e06010e..233ea78f8f1bd9 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -131,6 +131,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
+ 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
+ #endif
+ 	mutex_init(&fp->aux->used_maps_mutex);
++	mutex_init(&fp->aux->ext_mutex);
+ 	mutex_init(&fp->aux->dst_mutex);
+ 
+ 	return fp;
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 7878be18e9d264..3aa002a47a9666 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -184,7 +184,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
+ static void dev_map_free(struct bpf_map *map)
+ {
+ 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+-	int i;
++	u32 i;
+ 
+ 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
+ 	 * so the programs (can be more than one that used this map) were
+@@ -821,7 +821,7 @@ static long dev_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ 	struct bpf_dtab_netdev *old_dev;
+-	int k = *(u32 *)key;
++	u32 k = *(u32 *)key;
+ 
+ 	if (k >= map->max_entries)
+ 		return -EINVAL;
+@@ -838,7 +838,7 @@ static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ 	struct bpf_dtab_netdev *old_dev;
+-	int k = *(u32 *)key;
++	u32 k = *(u32 *)key;
+ 	unsigned long flags;
+ 	int ret = -ENOENT;
+ 
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index b14b87463ee04e..3ec941a0ea41c5 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -896,9 +896,12 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
+ {
+ 	check_and_free_fields(htab, l);
++
++	migrate_disable();
+ 	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
+ 		bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
+ 	bpf_mem_cache_free(&htab->ma, l);
++	migrate_enable();
+ }
+ 
+ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
+@@ -948,7 +951,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+ 	if (htab_is_prealloc(htab)) {
+ 		bpf_map_dec_elem_count(&htab->map);
+ 		check_and_free_fields(htab, l);
+-		__pcpu_freelist_push(&htab->freelist, &l->fnode);
++		pcpu_freelist_push(&htab->freelist, &l->fnode);
+ 	} else {
+ 		dec_elem_count(htab);
+ 		htab_elem_free(htab, l);
+@@ -1018,7 +1021,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ 			 */
+ 			pl_new = this_cpu_ptr(htab->extra_elems);
+ 			l_new = *pl_new;
+-			htab_put_fd_value(htab, old_elem);
+ 			*pl_new = old_elem;
+ 		} else {
+ 			struct pcpu_freelist_node *l;
+@@ -1105,6 +1107,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
+ 	struct htab_elem *l_new = NULL, *l_old;
+ 	struct hlist_nulls_head *head;
+ 	unsigned long flags;
++	void *old_map_ptr;
+ 	struct bucket *b;
+ 	u32 key_size, hash;
+ 	int ret;
+@@ -1183,12 +1186,27 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
+ 	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
+ 	if (l_old) {
+ 		hlist_nulls_del_rcu(&l_old->hash_node);
++
++		/* l_old has already been stashed in htab->extra_elems, free
++		 * its special fields before it is available for reuse. Also
++		 * save the old map pointer in htab of maps before unlock
++		 * and release it after unlock.
++		 */
++		old_map_ptr = NULL;
++		if (htab_is_prealloc(htab)) {
++			if (map->ops->map_fd_put_ptr)
++				old_map_ptr = fd_htab_map_get_ptr(map, l_old);
++			check_and_free_fields(htab, l_old);
++		}
++	}
++	htab_unlock_bucket(htab, b, hash, flags);
++	if (l_old) {
++		if (old_map_ptr)
++			map->ops->map_fd_put_ptr(map, old_map_ptr, true);
+ 		if (!htab_is_prealloc(htab))
+ 			free_htab_elem(htab, l_old);
+-		else
+-			check_and_free_fields(htab, l_old);
+ 	}
+-	ret = 0;
++	return 0;
+ err:
+ 	htab_unlock_bucket(htab, b, hash, flags);
+ 	return ret;
+@@ -1432,15 +1450,15 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
+ 		return ret;
+ 
+ 	l = lookup_elem_raw(head, hash, key, key_size);
+-
+-	if (l) {
++	if (l)
+ 		hlist_nulls_del_rcu(&l->hash_node);
+-		free_htab_elem(htab, l);
+-	} else {
++	else
+ 		ret = -ENOENT;
+-	}
+ 
+ 	htab_unlock_bucket(htab, b, hash, flags);
++
++	if (l)
++		free_htab_elem(htab, l);
+ 	return ret;
+ }
+ 
+@@ -1853,13 +1871,14 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
+ 			 * may cause deadlock. See comments in function
+ 			 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
+ 			 * after releasing the bucket lock.
++			 *
++			 * For htab of maps, htab_put_fd_value() in
++			 * free_htab_elem() may acquire a spinlock with bucket
++			 * lock being held and it violates the lock rule, so
++			 * invoke free_htab_elem() after unlock as well.
+ 			 */
+-			if (is_lru_map) {
+-				l->batch_flink = node_to_free;
+-				node_to_free = l;
+-			} else {
+-				free_htab_elem(htab, l);
+-			}
++			l->batch_flink = node_to_free;
++			node_to_free = l;
+ 		}
+ 		dst_key += key_size;
+ 		dst_val += value_size;
+@@ -1871,7 +1890,10 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
+ 	while (node_to_free) {
+ 		l = node_to_free;
+ 		node_to_free = node_to_free->batch_flink;
+-		htab_lru_push_free(htab, l);
++		if (is_lru_map)
++			htab_lru_push_free(htab, l);
++		else
++			free_htab_elem(htab, l);
+ 	}
+ 
+ next_batch:
+diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
+index 9b60eda0f727b3..010e91ac978e62 100644
+--- a/kernel/bpf/lpm_trie.c
++++ b/kernel/bpf/lpm_trie.c
+@@ -310,12 +310,22 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie,
+ 	return node;
+ }
+ 
++static int trie_check_add_elem(struct lpm_trie *trie, u64 flags)
++{
++	if (flags == BPF_EXIST)
++		return -ENOENT;
++	if (trie->n_entries == trie->map.max_entries)
++		return -ENOSPC;
++	trie->n_entries++;
++	return 0;
++}
++
+ /* Called from syscall or from eBPF program */
+ static long trie_update_elem(struct bpf_map *map,
+ 			     void *_key, void *value, u64 flags)
+ {
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+-	struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
++	struct lpm_trie_node *node, *im_node, *new_node = NULL;
+ 	struct lpm_trie_node *free_node = NULL;
+ 	struct lpm_trie_node __rcu **slot;
+ 	struct bpf_lpm_trie_key_u8 *key = _key;
+@@ -333,20 +343,12 @@ static long trie_update_elem(struct bpf_map *map,
+ 	spin_lock_irqsave(&trie->lock, irq_flags);
+ 
+ 	/* Allocate and fill a new node */
+-
+-	if (trie->n_entries == trie->map.max_entries) {
+-		ret = -ENOSPC;
+-		goto out;
+-	}
+-
+ 	new_node = lpm_trie_node_alloc(trie, value);
+ 	if (!new_node) {
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+ 
+-	trie->n_entries++;
+-
+ 	new_node->prefixlen = key->prefixlen;
+ 	RCU_INIT_POINTER(new_node->child[0], NULL);
+ 	RCU_INIT_POINTER(new_node->child[1], NULL);
+@@ -376,6 +378,10 @@ static long trie_update_elem(struct bpf_map *map,
+ 	 * simply assign the @new_node to that slot and be done.
+ 	 */
+ 	if (!node) {
++		ret = trie_check_add_elem(trie, flags);
++		if (ret)
++			goto out;
++
+ 		rcu_assign_pointer(*slot, new_node);
+ 		goto out;
+ 	}
+@@ -384,18 +390,30 @@ static long trie_update_elem(struct bpf_map *map,
+ 	 * which already has the correct data array set.
+ 	 */
+ 	if (node->prefixlen == matchlen) {
++		if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) {
++			if (flags == BPF_NOEXIST) {
++				ret = -EEXIST;
++				goto out;
++			}
++		} else {
++			ret = trie_check_add_elem(trie, flags);
++			if (ret)
++				goto out;
++		}
++
+ 		new_node->child[0] = node->child[0];
+ 		new_node->child[1] = node->child[1];
+ 
+-		if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+-			trie->n_entries--;
+-
+ 		rcu_assign_pointer(*slot, new_node);
+ 		free_node = node;
+ 
+ 		goto out;
+ 	}
+ 
++	ret = trie_check_add_elem(trie, flags);
++	if (ret)
++		goto out;
++
+ 	/* If the new node matches the prefix completely, it must be inserted
+ 	 * as an ancestor. Simply insert it between @node and *@slot.
+ 	 */
+@@ -408,6 +426,7 @@ static long trie_update_elem(struct bpf_map *map,
+ 
+ 	im_node = lpm_trie_node_alloc(trie, NULL);
+ 	if (!im_node) {
++		trie->n_entries--;
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -429,14 +448,8 @@ static long trie_update_elem(struct bpf_map *map,
+ 	rcu_assign_pointer(*slot, im_node);
+ 
+ out:
+-	if (ret) {
+-		if (new_node)
+-			trie->n_entries--;
+-
++	if (ret)
+ 		kfree(new_node);
+-		kfree(im_node);
+-	}
+-
+ 	spin_unlock_irqrestore(&trie->lock, irq_flags);
+ 	kfree_rcu(free_node, rcu);
+ 
+@@ -633,7 +646,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
+ 	struct lpm_trie_node **node_stack = NULL;
+ 	int err = 0, stack_ptr = -1;
+ 	unsigned int next_bit;
+-	size_t matchlen;
++	size_t matchlen = 0;
+ 
+ 	/* The get_next_key follows postorder. For the 4 node example in
+ 	 * the top of this file, the trie_get_next_key() returns the following
+@@ -672,7 +685,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
+ 		next_bit = extract_bit(key->data, node->prefixlen);
+ 		node = rcu_dereference(node->child[next_bit]);
+ 	}
+-	if (!node || node->prefixlen != key->prefixlen ||
++	if (!node || node->prefixlen != matchlen ||
+ 	    (node->flags & LPM_TREE_NODE_FLAG_IM))
+ 		goto find_leftmost;
+ 
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index c5aa127ed4cc01..368ae8d231d417 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -2976,12 +2976,24 @@ void bpf_link_inc(struct bpf_link *link)
+ 	atomic64_inc(&link->refcnt);
+ }
+ 
++static void bpf_link_dealloc(struct bpf_link *link)
++{
++	/* now that we know that bpf_link itself can't be reached, put underlying BPF program */
++	if (link->prog)
++		bpf_prog_put(link->prog);
++
++	/* free bpf_link and its containing memory */
++	if (link->ops->dealloc_deferred)
++		link->ops->dealloc_deferred(link);
++	else
++		link->ops->dealloc(link);
++}
++
+ static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
+ {
+ 	struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
+ 
+-	/* free bpf_link and its containing memory */
+-	link->ops->dealloc_deferred(link);
++	bpf_link_dealloc(link);
+ }
+ 
+ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
+@@ -3003,7 +3015,6 @@ static void bpf_link_free(struct bpf_link *link)
+ 		sleepable = link->prog->sleepable;
+ 		/* detach BPF program, clean up used resources */
+ 		ops->release(link);
+-		bpf_prog_put(link->prog);
+ 	}
+ 	if (ops->dealloc_deferred) {
+ 		/* schedule BPF link deallocation; if underlying BPF program
+@@ -3014,8 +3025,9 @@ static void bpf_link_free(struct bpf_link *link)
+ 			call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
+ 		else
+ 			call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
+-	} else if (ops->dealloc)
+-		ops->dealloc(link);
++	} else if (ops->dealloc) {
++		bpf_link_dealloc(link);
++	}
+ }
+ 
+ static void bpf_link_put_deferred(struct work_struct *work)
+@@ -3218,7 +3230,8 @@ static void bpf_tracing_link_release(struct bpf_link *link)
+ 		container_of(link, struct bpf_tracing_link, link.link);
+ 
+ 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
+-						tr_link->trampoline));
++						tr_link->trampoline,
++						tr_link->tgt_prog));
+ 
+ 	bpf_trampoline_put(tr_link->trampoline);
+ 
+@@ -3358,7 +3371,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ 	 *   in prog->aux
+ 	 *
+ 	 * - if prog->aux->dst_trampoline is NULL, the program has already been
+-         *   attached to a target and its initial target was cleared (below)
++	 *   attached to a target and its initial target was cleared (below)
+ 	 *
+ 	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
+ 	 *   target_btf_id using the link_create API.
+@@ -3433,7 +3446,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ 	if (err)
+ 		goto out_unlock;
+ 
+-	err = bpf_trampoline_link_prog(&link->link, tr);
++	err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog);
+ 	if (err) {
+ 		bpf_link_cleanup(&link_primer);
+ 		link = NULL;
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 1166d9dd3e8b5d..ecdd2660561f5b 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -528,7 +528,27 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
+ 	}
+ }
+ 
+-static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
++static int bpf_freplace_check_tgt_prog(struct bpf_prog *tgt_prog)
++{
++	struct bpf_prog_aux *aux = tgt_prog->aux;
++
++	guard(mutex)(&aux->ext_mutex);
++	if (aux->prog_array_member_cnt)
++		/* Program extensions can not extend target prog when the target
++		 * prog has been updated to any prog_array map as tail callee.
++		 * It's to prevent a potential infinite loop like:
++		 * tgt prog entry -> tgt prog subprog -> freplace prog entry
++		 * --tailcall-> tgt prog entry.
++		 */
++		return -EBUSY;
++
++	aux->is_extended = true;
++	return 0;
++}
++
++static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link,
++				      struct bpf_trampoline *tr,
++				      struct bpf_prog *tgt_prog)
+ {
+ 	enum bpf_tramp_prog_type kind;
+ 	struct bpf_tramp_link *link_exiting;
+@@ -549,6 +569,9 @@ static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_tr
+ 		/* Cannot attach extension if fentry/fexit are in use. */
+ 		if (cnt)
+ 			return -EBUSY;
++		err = bpf_freplace_check_tgt_prog(tgt_prog);
++		if (err)
++			return err;
+ 		tr->extension_prog = link->link.prog;
+ 		return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
+ 					  link->link.prog->bpf_func);
+@@ -575,17 +598,21 @@ static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_tr
+ 	return err;
+ }
+ 
+-int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
++int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
++			     struct bpf_trampoline *tr,
++			     struct bpf_prog *tgt_prog)
+ {
+ 	int err;
+ 
+ 	mutex_lock(&tr->mutex);
+-	err = __bpf_trampoline_link_prog(link, tr);
++	err = __bpf_trampoline_link_prog(link, tr, tgt_prog);
+ 	mutex_unlock(&tr->mutex);
+ 	return err;
+ }
+ 
+-static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
++static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
++					struct bpf_trampoline *tr,
++					struct bpf_prog *tgt_prog)
+ {
+ 	enum bpf_tramp_prog_type kind;
+ 	int err;
+@@ -596,6 +623,8 @@ static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_
+ 		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
+ 					 tr->extension_prog->bpf_func, NULL);
+ 		tr->extension_prog = NULL;
++		guard(mutex)(&tgt_prog->aux->ext_mutex);
++		tgt_prog->aux->is_extended = false;
+ 		return err;
+ 	}
+ 	hlist_del_init(&link->tramp_hlist);
+@@ -604,12 +633,14 @@ static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_
+ }
+ 
+ /* bpf_trampoline_unlink_prog() should never fail. */
+-int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
++int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
++			       struct bpf_trampoline *tr,
++			       struct bpf_prog *tgt_prog)
+ {
+ 	int err;
+ 
+ 	mutex_lock(&tr->mutex);
+-	err = __bpf_trampoline_unlink_prog(link, tr);
++	err = __bpf_trampoline_unlink_prog(link, tr, tgt_prog);
+ 	mutex_unlock(&tr->mutex);
+ 	return err;
+ }
+@@ -624,7 +655,7 @@ static void bpf_shim_tramp_link_release(struct bpf_link *link)
+ 	if (!shim_link->trampoline)
+ 		return;
+ 
+-	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline));
++	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline, NULL));
+ 	bpf_trampoline_put(shim_link->trampoline);
+ }
+ 
+@@ -738,7 +769,7 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ 		goto err;
+ 	}
+ 
+-	err = __bpf_trampoline_link_prog(&shim_link->link, tr);
++	err = __bpf_trampoline_link_prog(&shim_link->link, tr, NULL);
+ 	if (err)
+ 		goto err;
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 91317857ea3ee5..b2008076df9c26 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1200,14 +1200,17 @@ static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
+ /* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which
+  * case they are equivalent, or it's STACK_ZERO, in which case we preserve
+  * more precise STACK_ZERO.
+- * Note, in uprivileged mode leaving STACK_INVALID is wrong, so we take
+- * env->allow_ptr_leaks into account and force STACK_MISC, if necessary.
++ * Regardless of allow_ptr_leaks setting (i.e., privileged or unprivileged
++ * mode), we won't promote STACK_INVALID to STACK_MISC. In privileged case it is
++ * unnecessary as both are considered equivalent when loading data and pruning,
++ * in case of unprivileged mode it will be incorrect to allow reads of invalid
++ * slots.
+  */
+ static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
+ {
+ 	if (*stype == STACK_ZERO)
+ 		return;
+-	if (env->allow_ptr_leaks && *stype == STACK_INVALID)
++	if (*stype == STACK_INVALID)
+ 		return;
+ 	*stype = STACK_MISC;
+ }
+@@ -4646,6 +4649,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 	 */
+ 	if (!env->allow_ptr_leaks &&
+ 	    is_spilled_reg(&state->stack[spi]) &&
++	    !is_spilled_scalar_reg(&state->stack[spi]) &&
+ 	    size != BPF_REG_SIZE) {
+ 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
+ 		return -EACCES;
+@@ -8021,6 +8025,11 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
+ 	const struct btf_type *t;
+ 	int spi, err, i, nr_slots, btf_id;
+ 
++	if (reg->type != PTR_TO_STACK) {
++		verbose(env, "arg#%d expected pointer to an iterator on stack\n", regno - 1);
++		return -EINVAL;
++	}
++
+ 	/* For iter_{new,next,destroy} functions, btf_check_iter_kfuncs()
+ 	 * ensures struct convention, so we wouldn't need to do any BTF
+ 	 * validation here. But given iter state can be passed as a parameter
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index d570535342cb78..f6f0387761d05a 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -1052,9 +1052,13 @@ static void check_unmap(struct dma_debug_entry *ref)
+ 	}
+ 
+ 	hash_bucket_del(entry);
+-	dma_entry_free(entry);
+-
+ 	put_hash_bucket(bucket, flags);
++
++	/*
++	 * Free the entry outside of bucket_lock to avoid ABBA deadlocks
++	 * between that and radix_lock.
++	 */
++	dma_entry_free(entry);
+ }
+ 
+ static void check_for_stack(struct device *dev,
+diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
+index 53b21ae30e00ee..b14072071889fb 100644
+--- a/kernel/kcsan/debugfs.c
++++ b/kernel/kcsan/debugfs.c
+@@ -46,14 +46,8 @@ static struct {
+ 	int		used;		/* number of elements used */
+ 	bool		sorted;		/* if elements are sorted */
+ 	bool		whitelist;	/* if list is a blacklist or whitelist */
+-} report_filterlist = {
+-	.addrs		= NULL,
+-	.size		= 8,		/* small initial size */
+-	.used		= 0,
+-	.sorted		= false,
+-	.whitelist	= false,	/* default is blacklist */
+-};
+-static DEFINE_SPINLOCK(report_filterlist_lock);
++} report_filterlist;
++static DEFINE_RAW_SPINLOCK(report_filterlist_lock);
+ 
+ /*
+  * The microbenchmark allows benchmarking KCSAN core runtime only. To run
+@@ -110,7 +104,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr)
+ 		return false;
+ 	func_addr -= offset; /* Get function start */
+ 
+-	spin_lock_irqsave(&report_filterlist_lock, flags);
++	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
+ 	if (report_filterlist.used == 0)
+ 		goto out;
+ 
+@@ -127,7 +121,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr)
+ 		ret = !ret;
+ 
+ out:
+-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
++	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+ 	return ret;
+ }
+ 
+@@ -135,9 +129,9 @@ static void set_report_filterlist_whitelist(bool whitelist)
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&report_filterlist_lock, flags);
++	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
+ 	report_filterlist.whitelist = whitelist;
+-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
++	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+ }
+ 
+ /* Returns 0 on success, error-code otherwise. */
+@@ -145,6 +139,9 @@ static ssize_t insert_report_filterlist(const char *func)
+ {
+ 	unsigned long flags;
+ 	unsigned long addr = kallsyms_lookup_name(func);
++	unsigned long *delay_free = NULL;
++	unsigned long *new_addrs = NULL;
++	size_t new_size = 0;
+ 	ssize_t ret = 0;
+ 
+ 	if (!addr) {
+@@ -152,32 +149,33 @@ static ssize_t insert_report_filterlist(const char *func)
+ 		return -ENOENT;
+ 	}
+ 
+-	spin_lock_irqsave(&report_filterlist_lock, flags);
++retry_alloc:
++	/*
++	 * Check if we need an allocation, and re-validate under the lock. Since
++	 * the report_filterlist_lock is a raw, cannot allocate under the lock.
++	 */
++	if (data_race(report_filterlist.used == report_filterlist.size)) {
++		new_size = (report_filterlist.size ?: 4) * 2;
++		delay_free = new_addrs = kmalloc_array(new_size, sizeof(unsigned long), GFP_KERNEL);
++		if (!new_addrs)
++			return -ENOMEM;
++	}
+ 
+-	if (report_filterlist.addrs == NULL) {
+-		/* initial allocation */
+-		report_filterlist.addrs =
+-			kmalloc_array(report_filterlist.size,
+-				      sizeof(unsigned long), GFP_ATOMIC);
+-		if (report_filterlist.addrs == NULL) {
+-			ret = -ENOMEM;
+-			goto out;
+-		}
+-	} else if (report_filterlist.used == report_filterlist.size) {
+-		/* resize filterlist */
+-		size_t new_size = report_filterlist.size * 2;
+-		unsigned long *new_addrs =
+-			krealloc(report_filterlist.addrs,
+-				 new_size * sizeof(unsigned long), GFP_ATOMIC);
+-
+-		if (new_addrs == NULL) {
+-			/* leave filterlist itself untouched */
+-			ret = -ENOMEM;
+-			goto out;
++	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
++	if (report_filterlist.used == report_filterlist.size) {
++		/* Check we pre-allocated enough, and retry if not. */
++		if (report_filterlist.used >= new_size) {
++			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
++			kfree(new_addrs); /* kfree(NULL) is safe */
++			delay_free = new_addrs = NULL;
++			goto retry_alloc;
+ 		}
+ 
++		if (report_filterlist.used)
++			memcpy(new_addrs, report_filterlist.addrs, report_filterlist.used * sizeof(unsigned long));
++		delay_free = report_filterlist.addrs; /* free the old list */
++		report_filterlist.addrs = new_addrs;  /* switch to the new list */
+ 		report_filterlist.size = new_size;
+-		report_filterlist.addrs = new_addrs;
+ 	}
+ 
+ 	/* Note: deduplicating should be done in userspace. */
+@@ -185,9 +183,9 @@ static ssize_t insert_report_filterlist(const char *func)
+ 		kallsyms_lookup_name(func);
+ 	report_filterlist.sorted = false;
+ 
+-out:
+-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
++	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+ 
++	kfree(delay_free);
+ 	return ret;
+ }
+ 
+@@ -204,13 +202,13 @@ static int show_info(struct seq_file *file, void *v)
+ 	}
+ 
+ 	/* show filter functions, and filter type */
+-	spin_lock_irqsave(&report_filterlist_lock, flags);
++	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
+ 	seq_printf(file, "\n%s functions: %s\n",
+ 		   report_filterlist.whitelist ? "whitelisted" : "blacklisted",
+ 		   report_filterlist.used == 0 ? "none" : "");
+ 	for (i = 0; i < report_filterlist.used; ++i)
+ 		seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
+-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
++	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 76b27b2a9c56ad..6cc12777bb11ab 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1242,9 +1242,9 @@ static void nohz_csd_func(void *info)
+ 	WARN_ON(!(flags & NOHZ_KICK_MASK));
+ 
+ 	rq->idle_balance = idle_cpu(cpu);
+-	if (rq->idle_balance && !need_resched()) {
++	if (rq->idle_balance) {
+ 		rq->nohz_idle_balance = flags;
+-		raise_softirq_irqoff(SCHED_SOFTIRQ);
++		__raise_softirq_irqoff(SCHED_SOFTIRQ);
+ 	}
+ }
+ 
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index be1b917dc8ce4c..40a1ad4493b4d9 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2042,6 +2042,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
+ 	} else if (flags & ENQUEUE_REPLENISH) {
+ 		replenish_dl_entity(dl_se);
+ 	} else if ((flags & ENQUEUE_RESTORE) &&
++		   !is_dl_boosted(dl_se) &&
+ 		   dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
+ 		setup_new_dl_entity(dl_se);
+ 	}
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 16613631543f18..79bb18651cdb8b 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -3105,6 +3105,12 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ 
+ 	*found = false;
+ 
++
++	/*
++	 * This is necessary to protect llc_cpus.
++	 */
++	rcu_read_lock();
++
+ 	/*
+ 	 * If WAKE_SYNC, the waker's local DSQ is empty, and the system is
+ 	 * under utilized, wake up @p to the local DSQ of the waker. Checking
+@@ -3147,9 +3153,12 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ 	if (cpu >= 0)
+ 		goto cpu_found;
+ 
++	rcu_read_unlock();
+ 	return prev_cpu;
+ 
+ cpu_found:
++	rcu_read_unlock();
++
+ 	*found = true;
+ 	return cpu;
+ }
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 2d16c8545c71ed..782ce70ebd1b08 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3399,10 +3399,16 @@ static void task_numa_work(struct callback_head *work)
+ 
+ 		/* Initialise new per-VMA NUMAB state. */
+ 		if (!vma->numab_state) {
+-			vma->numab_state = kzalloc(sizeof(struct vma_numab_state),
+-				GFP_KERNEL);
+-			if (!vma->numab_state)
++			struct vma_numab_state *ptr;
++
++			ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
++			if (!ptr)
++				continue;
++
++			if (cmpxchg(&vma->numab_state, NULL, ptr)) {
++				kfree(ptr);
+ 				continue;
++			}
+ 
+ 			vma->numab_state->start_scan_seq = mm->numa_scan_seq;
+ 
+@@ -12574,7 +12580,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
+ 		 * work being done for other CPUs. Next load
+ 		 * balancing owner will pick it up.
+ 		 */
+-		if (need_resched()) {
++		if (!idle_cpu(this_cpu) && need_resched()) {
+ 			if (flags & NOHZ_STATS_KICK)
+ 				has_blocked_load = true;
+ 			if (flags & NOHZ_NEXT_KICK)
+diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
+index 24f9f90b6574e5..1784ed1fb3fe5d 100644
+--- a/kernel/sched/syscalls.c
++++ b/kernel/sched/syscalls.c
+@@ -1238,7 +1238,7 @@ int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
+ 			bool empty = !cpumask_and(new_mask, new_mask,
+ 						  ctx->user_mask);
+ 
+-			if (WARN_ON_ONCE(empty))
++			if (empty)
+ 				cpumask_copy(new_mask, cpus_allowed);
+ 		}
+ 		__set_cpus_allowed_ptr(p, ctx);
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index d082e7840f8802..8c4524ce65fafe 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -280,17 +280,24 @@ static inline void invoke_softirq(void)
+ 		wakeup_softirqd();
+ }
+ 
++#define SCHED_SOFTIRQ_MASK	BIT(SCHED_SOFTIRQ)
++
+ /*
+  * flush_smp_call_function_queue() can raise a soft interrupt in a function
+- * call. On RT kernels this is undesired and the only known functionality
+- * in the block layer which does this is disabled on RT. If soft interrupts
+- * get raised which haven't been raised before the flush, warn so it can be
++ * call. On RT kernels this is undesired and the only known functionalities
++ * are in the block layer which is disabled on RT, and in the scheduler for
++ * idle load balancing. If soft interrupts get raised which haven't been
++ * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
+  * investigated.
+  */
+ void do_softirq_post_smp_call_flush(unsigned int was_pending)
+ {
+-	if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
++	unsigned int is_pending = local_softirq_pending();
++
++	if (unlikely(was_pending != is_pending)) {
++		WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
+ 		invoke_softirq();
++	}
+ }
+ 
+ #else /* CONFIG_PREEMPT_RT */
+diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
+index 8ebb6d5a106bea..b0b97a60aaa6fc 100644
+--- a/kernel/time/Kconfig
++++ b/kernel/time/Kconfig
+@@ -17,11 +17,6 @@ config ARCH_CLOCKSOURCE_DATA
+ config ARCH_CLOCKSOURCE_INIT
+ 	bool
+ 
+-# Clocksources require validation of the clocksource against the last
+-# cycle update - x86/TSC misfeature
+-config CLOCKSOURCE_VALIDATE_LAST_CYCLE
+-	bool
+-
+ # Timekeeping vsyscall support
+ config GENERIC_TIME_VSYSCALL
+ 	bool
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 23336eecb4f43b..8a40a616288b81 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -22,7 +22,7 @@
+ 
+ static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
+ {
+-	u64 delta = clocksource_delta(end, start, cs->mask);
++	u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta);
+ 
+ 	if (likely(delta < cs->max_cycles))
+ 		return clocksource_cyc2ns(delta, cs->mult, cs->shift);
+@@ -985,6 +985,15 @@ static inline void clocksource_update_max_deferment(struct clocksource *cs)
+ 	cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
+ 						cs->maxadj, cs->mask,
+ 						&cs->max_cycles);
++
++	/*
++	 * Threshold for detecting negative motion in clocksource_delta().
++	 *
++	 * Allow for 0.875 of the counter width so that overly long idle
++	 * sleeps, which go slightly over mask/2, do not trigger the
++	 * negative motion detection.
++	 */
++	cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3);
+ }
+ 
+ static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 802b336f4b8c2f..de3547d63aa975 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -804,7 +804,7 @@ int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
+ 		txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+ 				  NTP_SCALE_SHIFT);
+ 		if (!(time_status & STA_NANO))
+-			txc->offset = (u32)txc->offset / NSEC_PER_USEC;
++			txc->offset = div_s64(txc->offset, NSEC_PER_USEC);
+ 	}
+ 
+ 	result = time_state;	/* mostly `TIME_OK' */
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 7e6f409bf3114a..96933082431fe0 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -195,97 +195,6 @@ static inline u64 tk_clock_read(const struct tk_read_base *tkr)
+ 	return clock->read(clock);
+ }
+ 
+-#ifdef CONFIG_DEBUG_TIMEKEEPING
+-#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
+-
+-static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
+-{
+-
+-	u64 max_cycles = tk->tkr_mono.clock->max_cycles;
+-	const char *name = tk->tkr_mono.clock->name;
+-
+-	if (offset > max_cycles) {
+-		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
+-				offset, name, max_cycles);
+-		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
+-	} else {
+-		if (offset > (max_cycles >> 1)) {
+-			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
+-					offset, name, max_cycles >> 1);
+-			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
+-		}
+-	}
+-
+-	if (tk->underflow_seen) {
+-		if (jiffies - tk->last_warning > WARNING_FREQ) {
+-			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
+-			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
+-			printk_deferred("         Your kernel is probably still fine.\n");
+-			tk->last_warning = jiffies;
+-		}
+-		tk->underflow_seen = 0;
+-	}
+-
+-	if (tk->overflow_seen) {
+-		if (jiffies - tk->last_warning > WARNING_FREQ) {
+-			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
+-			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
+-			printk_deferred("         Your kernel is probably still fine.\n");
+-			tk->last_warning = jiffies;
+-		}
+-		tk->overflow_seen = 0;
+-	}
+-}
+-
+-static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles);
+-
+-static inline u64 timekeeping_debug_get_ns(const struct tk_read_base *tkr)
+-{
+-	struct timekeeper *tk = &tk_core.timekeeper;
+-	u64 now, last, mask, max, delta;
+-	unsigned int seq;
+-
+-	/*
+-	 * Since we're called holding a seqcount, the data may shift
+-	 * under us while we're doing the calculation. This can cause
+-	 * false positives, since we'd note a problem but throw the
+-	 * results away. So nest another seqcount here to atomically
+-	 * grab the points we are checking with.
+-	 */
+-	do {
+-		seq = read_seqcount_begin(&tk_core.seq);
+-		now = tk_clock_read(tkr);
+-		last = tkr->cycle_last;
+-		mask = tkr->mask;
+-		max = tkr->clock->max_cycles;
+-	} while (read_seqcount_retry(&tk_core.seq, seq));
+-
+-	delta = clocksource_delta(now, last, mask);
+-
+-	/*
+-	 * Try to catch underflows by checking if we are seeing small
+-	 * mask-relative negative values.
+-	 */
+-	if (unlikely((~delta & mask) < (mask >> 3)))
+-		tk->underflow_seen = 1;
+-
+-	/* Check for multiplication overflows */
+-	if (unlikely(delta > max))
+-		tk->overflow_seen = 1;
+-
+-	/* timekeeping_cycles_to_ns() handles both under and overflow */
+-	return timekeeping_cycles_to_ns(tkr, now);
+-}
+-#else
+-static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
+-{
+-}
+-static inline u64 timekeeping_debug_get_ns(const struct tk_read_base *tkr)
+-{
+-	BUG();
+-}
+-#endif
+-
+ /**
+  * tk_setup_internals - Set up internals to use clocksource clock.
+  *
+@@ -390,19 +299,11 @@ static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 c
+ 	return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
+ }
+ 
+-static __always_inline u64 __timekeeping_get_ns(const struct tk_read_base *tkr)
++static __always_inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
+ {
+ 	return timekeeping_cycles_to_ns(tkr, tk_clock_read(tkr));
+ }
+ 
+-static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
+-{
+-	if (IS_ENABLED(CONFIG_DEBUG_TIMEKEEPING))
+-		return timekeeping_debug_get_ns(tkr);
+-
+-	return __timekeeping_get_ns(tkr);
+-}
+-
+ /**
+  * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
+  * @tkr: Timekeeping readout base from which we take the update
+@@ -446,7 +347,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+ 		seq = raw_read_seqcount_latch(&tkf->seq);
+ 		tkr = tkf->base + (seq & 0x01);
+ 		now = ktime_to_ns(tkr->base);
+-		now += __timekeeping_get_ns(tkr);
++		now += timekeeping_get_ns(tkr);
+ 	} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
+ 
+ 	return now;
+@@ -562,7 +463,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
+ 		tkr = tkf->base + (seq & 0x01);
+ 		basem = ktime_to_ns(tkr->base);
+ 		baser = ktime_to_ns(tkr->base_real);
+-		delta = __timekeeping_get_ns(tkr);
++		delta = timekeeping_get_ns(tkr);
+ 	} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
+ 
+ 	if (mono)
+@@ -793,7 +694,8 @@ static void timekeeping_forward_now(struct timekeeper *tk)
+ 	u64 cycle_now, delta;
+ 
+ 	cycle_now = tk_clock_read(&tk->tkr_mono);
+-	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
++	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
++				  tk->tkr_mono.clock->max_raw_delta);
+ 	tk->tkr_mono.cycle_last = cycle_now;
+ 	tk->tkr_raw.cycle_last  = cycle_now;
+ 
+@@ -2292,15 +2194,13 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
+ 		goto out;
+ 
+ 	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
+-				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
++				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
++				   tk->tkr_mono.clock->max_raw_delta);
+ 
+ 	/* Check if there's really nothing to do */
+ 	if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
+ 		goto out;
+ 
+-	/* Do some additional sanity checking */
+-	timekeeping_check_update(tk, offset);
+-
+ 	/*
+ 	 * With NO_HZ we may have to accumulate many cycle_intervals
+ 	 * (think "ticks") worth of time at once. To do this efficiently,
+diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h
+index 4ca2787d1642e2..feb366b0142887 100644
+--- a/kernel/time/timekeeping_internal.h
++++ b/kernel/time/timekeeping_internal.h
+@@ -15,23 +15,16 @@ extern void tk_debug_account_sleep_time(const struct timespec64 *t);
+ #define tk_debug_account_sleep_time(x)
+ #endif
+ 
+-#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE
+-static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
++static inline u64 clocksource_delta(u64 now, u64 last, u64 mask, u64 max_delta)
+ {
+ 	u64 ret = (now - last) & mask;
+ 
+ 	/*
+-	 * Prevent time going backwards by checking the MSB of mask in
+-	 * the result. If set, return 0.
++	 * Prevent time going backwards by checking the result against
++	 * @max_delta. If greater, return 0.
+ 	 */
+-	return ret & ~(mask >> 1) ? 0 : ret;
++	return ret > max_delta ? 0 : ret;
+ }
+-#else
+-static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
+-{
+-	return (now - last) & mask;
+-}
+-#endif
+ 
+ /* Semi public for serialization of non timekeeper VDSO updates. */
+ extern raw_spinlock_t timekeeper_lock;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 5807116bcd0bf7..366eb4c4f28e57 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -482,6 +482,8 @@ struct ring_buffer_per_cpu {
+ 	unsigned long			nr_pages;
+ 	unsigned int			current_context;
+ 	struct list_head		*pages;
++	/* pages generation counter, incremented when the list changes */
++	unsigned long			cnt;
+ 	struct buffer_page		*head_page;	/* read from head */
+ 	struct buffer_page		*tail_page;	/* write to tail */
+ 	struct buffer_page		*commit_page;	/* committed pages */
+@@ -1475,40 +1477,87 @@ static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+ 	RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
+ }
+ 
++static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer,
++			   struct list_head *list)
++{
++	if (RB_WARN_ON(cpu_buffer,
++		       rb_list_head(rb_list_head(list->next)->prev) != list))
++		return false;
++
++	if (RB_WARN_ON(cpu_buffer,
++		       rb_list_head(rb_list_head(list->prev)->next) != list))
++		return false;
++
++	return true;
++}
++
+ /**
+  * rb_check_pages - integrity check of buffer pages
+  * @cpu_buffer: CPU buffer with pages to test
+  *
+  * As a safety measure we check to make sure the data pages have not
+  * been corrupted.
+- *
+- * Callers of this function need to guarantee that the list of pages doesn't get
+- * modified during the check. In particular, if it's possible that the function
+- * is invoked with concurrent readers which can swap in a new reader page then
+- * the caller should take cpu_buffer->reader_lock.
+  */
+ static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+-	struct list_head *head = rb_list_head(cpu_buffer->pages);
+-	struct list_head *tmp;
++	struct list_head *head, *tmp;
++	unsigned long buffer_cnt;
++	unsigned long flags;
++	int nr_loops = 0;
+ 
+-	if (RB_WARN_ON(cpu_buffer,
+-			rb_list_head(rb_list_head(head->next)->prev) != head))
++	/*
++	 * Walk the linked list underpinning the ring buffer and validate all
++	 * its next and prev links.
++	 *
++	 * The check acquires the reader_lock to avoid concurrent processing
++	 * with code that could be modifying the list. However, the lock cannot
++	 * be held for the entire duration of the walk, as this would make the
++	 * time when interrupts are disabled non-deterministic, dependent on the
++	 * ring buffer size. Therefore, the code releases and re-acquires the
++	 * lock after checking each page. The ring_buffer_per_cpu.cnt variable
++	 * is then used to detect if the list was modified while the lock was
++	 * not held, in which case the check needs to be restarted.
++	 *
++	 * The code attempts to perform the check at most three times before
++	 * giving up. This is acceptable because this is only a self-validation
++	 * to detect problems early on. In practice, the list modification
++	 * operations are fairly spaced, and so this check typically succeeds at
++	 * most on the second try.
++	 */
++again:
++	if (++nr_loops > 3)
+ 		return;
+ 
+-	if (RB_WARN_ON(cpu_buffer,
+-			rb_list_head(rb_list_head(head->prev)->next) != head))
+-		return;
++	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	head = rb_list_head(cpu_buffer->pages);
++	if (!rb_check_links(cpu_buffer, head))
++		goto out_locked;
++	buffer_cnt = cpu_buffer->cnt;
++	tmp = head;
++	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ 
+-	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
+-		if (RB_WARN_ON(cpu_buffer,
+-				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
+-			return;
++	while (true) {
++		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ 
+-		if (RB_WARN_ON(cpu_buffer,
+-				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
+-			return;
++		if (buffer_cnt != cpu_buffer->cnt) {
++			/* The list was updated, try again. */
++			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++			goto again;
++		}
++
++		tmp = rb_list_head(tmp->next);
++		if (tmp == head)
++			/* The iteration circled back, all is done. */
++			goto out_locked;
++
++		if (!rb_check_links(cpu_buffer, tmp))
++			goto out_locked;
++
++		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ 	}
++
++out_locked:
++	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ }
+ 
+ /*
+@@ -2532,6 +2581,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ 
+ 	/* make sure pages points to a valid page in the ring buffer */
+ 	cpu_buffer->pages = next_page;
++	cpu_buffer->cnt++;
+ 
+ 	/* update head page */
+ 	if (head_bit)
+@@ -2638,6 +2688,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ 			 * pointer to point to end of list
+ 			 */
+ 			head_page->prev = last_page;
++			cpu_buffer->cnt++;
+ 			success = true;
+ 			break;
+ 		}
+@@ -2873,12 +2924,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 		 */
+ 		synchronize_rcu();
+ 		for_each_buffer_cpu(buffer, cpu) {
+-			unsigned long flags;
+-
+ 			cpu_buffer = buffer->buffers[cpu];
+-			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ 			rb_check_pages(cpu_buffer);
+-			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ 		}
+ 		atomic_dec(&buffer->record_disabled);
+ 	}
+@@ -5296,6 +5343,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ 	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
+ 	rb_inc_page(&cpu_buffer->head_page);
+ 
++	cpu_buffer->cnt++;
+ 	local_inc(&cpu_buffer->pages_read);
+ 
+ 	/* Finally update the reader page to the new head */
+@@ -5835,12 +5883,9 @@ void
+ ring_buffer_read_finish(struct ring_buffer_iter *iter)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+-	unsigned long flags;
+ 
+ 	/* Use this opportunity to check the integrity of the ring buffer. */
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ 	rb_check_pages(cpu_buffer);
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ 
+ 	atomic_dec(&cpu_buffer->resize_disabled);
+ 	kfree(iter->event);
+@@ -6757,6 +6802,7 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
+ 		/* Install the new pages, remove the head from the list */
+ 		cpu_buffer->pages = cpu_buffer->new_pages.next;
+ 		list_del_init(&cpu_buffer->new_pages);
++		cpu_buffer->cnt++;
+ 
+ 		cpu_buffer->head_page
+ 			= list_entry(cpu_buffer->pages, struct buffer_page, list);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 6a891e00aa7f46..17d2ffde0bb604 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -988,7 +988,8 @@ static inline void trace_access_lock_init(void)
+ #endif
+ 
+ #ifdef CONFIG_STACKTRACE
+-static void __ftrace_trace_stack(struct trace_buffer *buffer,
++static void __ftrace_trace_stack(struct trace_array *tr,
++				 struct trace_buffer *buffer,
+ 				 unsigned int trace_ctx,
+ 				 int skip, struct pt_regs *regs);
+ static inline void ftrace_trace_stack(struct trace_array *tr,
+@@ -997,7 +998,8 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
+ 				      int skip, struct pt_regs *regs);
+ 
+ #else
+-static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
++static inline void __ftrace_trace_stack(struct trace_array *tr,
++					struct trace_buffer *buffer,
+ 					unsigned int trace_ctx,
+ 					int skip, struct pt_regs *regs)
+ {
+@@ -2947,7 +2949,8 @@ struct ftrace_stacks {
+ static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
+ static DEFINE_PER_CPU(int, ftrace_stack_reserve);
+ 
+-static void __ftrace_trace_stack(struct trace_buffer *buffer,
++static void __ftrace_trace_stack(struct trace_array *tr,
++				 struct trace_buffer *buffer,
+ 				 unsigned int trace_ctx,
+ 				 int skip, struct pt_regs *regs)
+ {
+@@ -2994,6 +2997,20 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+ 		nr_entries = stack_trace_save(fstack->calls, size, skip);
+ 	}
+ 
++#ifdef CONFIG_DYNAMIC_FTRACE
++	/* Mark entry of stack trace as trampoline code */
++	if (tr->ops && tr->ops->trampoline) {
++		unsigned long tramp_start = tr->ops->trampoline;
++		unsigned long tramp_end = tramp_start + tr->ops->trampoline_size;
++		unsigned long *calls = fstack->calls;
++
++		for (int i = 0; i < nr_entries; i++) {
++			if (calls[i] >= tramp_start && calls[i] < tramp_end)
++				calls[i] = FTRACE_TRAMPOLINE_MARKER;
++		}
++	}
++#endif
++
+ 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
+ 				    struct_size(entry, caller, nr_entries),
+ 				    trace_ctx);
+@@ -3024,7 +3041,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
+ 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
+ 		return;
+ 
+-	__ftrace_trace_stack(buffer, trace_ctx, skip, regs);
++	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
+ }
+ 
+ void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+@@ -3033,7 +3050,7 @@ void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+ 	struct trace_buffer *buffer = tr->array_buffer.buffer;
+ 
+ 	if (rcu_is_watching()) {
+-		__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
++		__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
+ 		return;
+ 	}
+ 
+@@ -3050,7 +3067,7 @@ void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+ 		return;
+ 
+ 	ct_irq_enter_irqson();
+-	__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
++	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
+ 	ct_irq_exit_irqson();
+ }
+ 
+@@ -3067,8 +3084,8 @@ void trace_dump_stack(int skip)
+ 	/* Skip 1 to skip this function. */
+ 	skip++;
+ #endif
+-	__ftrace_trace_stack(printk_trace->array_buffer.buffer,
+-			     tracing_gen_ctx(), skip, NULL);
++	__ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer,
++				tracing_gen_ctx(), skip, NULL);
+ }
+ EXPORT_SYMBOL_GPL(trace_dump_stack);
+ 
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index c866991b9c78bf..30d6675c78cfe1 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -2176,4 +2176,11 @@ static inline int rv_init_interface(void)
+ }
+ #endif
+ 
++/*
++ * This is used only to distinguish
++ * function address from trampoline code.
++ * So this value has no meaning.
++ */
++#define FTRACE_TRAMPOLINE_MARKER  ((unsigned long) INT_MAX)
++
+ #endif /* _LINUX_KERNEL_TRACE_H */
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index 4702efb00ff21e..4cb2ebc439be68 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -154,5 +154,5 @@ static atomic64_t trace_counter;
+  */
+ u64 notrace trace_clock_counter(void)
+ {
+-	return atomic64_add_return(1, &trace_counter);
++	return atomic64_inc_return(&trace_counter);
+ }
+diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
+index ebda68ee9abff9..be8be0c1aaf0f1 100644
+--- a/kernel/trace/trace_eprobe.c
++++ b/kernel/trace/trace_eprobe.c
+@@ -963,6 +963,11 @@ static int __trace_eprobe_create(int argc, const char *argv[])
+ 		goto error;
+ 	}
+ 	ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
++	if (ret < 0) {
++		trace_probe_unregister_event_call(&ep->tp);
++		mutex_unlock(&event_mutex);
++		goto error;
++	}
+ 	mutex_unlock(&event_mutex);
+ 	return ret;
+ parse_error:
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 868f2f912f2809..c14573e5a90337 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -1246,6 +1246,10 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
+ 			break;
+ 
+ 		trace_seq_puts(s, " => ");
++		if ((*p) == FTRACE_TRAMPOLINE_MARKER) {
++			trace_seq_puts(s, "[FTRACE TRAMPOLINE]\n");
++			continue;
++		}
+ 		seq_print_ip_sym(s, (*p) + delta, flags);
+ 		trace_seq_putc(s, '\n');
+ 	}
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index 785733245eadf5..f9b21bac9d45e6 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -299,6 +299,12 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
+ 	int syscall_nr;
+ 	int size;
+ 
++	/*
++	 * Syscall probe called with preemption enabled, but the ring
++	 * buffer and per-cpu data require preemption to be disabled.
++	 */
++	guard(preempt_notrace)();
++
+ 	syscall_nr = trace_get_syscall_nr(current, regs);
+ 	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ 		return;
+@@ -338,6 +344,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+ 	struct trace_event_buffer fbuffer;
+ 	int syscall_nr;
+ 
++	/*
++	 * Syscall probe called with preemption enabled, but the ring
++	 * buffer and per-cpu data require preemption to be disabled.
++	 */
++	guard(preempt_notrace)();
++
+ 	syscall_nr = trace_get_syscall_nr(current, regs);
+ 	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ 		return;
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index 3a56e7c8aa4f67..1921ade45be38b 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -845,15 +845,11 @@ int tracing_map_init(struct tracing_map *map)
+ static int cmp_entries_dup(const void *A, const void *B)
+ {
+ 	const struct tracing_map_sort_entry *a, *b;
+-	int ret = 0;
+ 
+ 	a = *(const struct tracing_map_sort_entry **)A;
+ 	b = *(const struct tracing_map_sort_entry **)B;
+ 
+-	if (memcmp(a->key, b->key, a->elt->map->key_size))
+-		ret = 1;
+-
+-	return ret;
++	return memcmp(a->key, b->key, a->elt->map->key_size);
+ }
+ 
+ static int cmp_entries_sum(const void *A, const void *B)
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 7312ae7c3cc57b..3f9c238bb58ea3 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1328,19 +1328,6 @@ config SCHEDSTATS
+ 
+ endmenu
+ 
+-config DEBUG_TIMEKEEPING
+-	bool "Enable extra timekeeping sanity checking"
+-	help
+-	  This option will enable additional timekeeping sanity checks
+-	  which may be helpful when diagnosing issues where timekeeping
+-	  problems are suspected.
+-
+-	  This may include checks in the timekeeping hotpaths, so this
+-	  option may have a (very small) performance impact to some
+-	  workloads.
+-
+-	  If unsure, say N.
+-
+ config DEBUG_PREEMPT
+ 	bool "Debug preemptible kernel"
+ 	depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT
+diff --git a/lib/stackdepot.c b/lib/stackdepot.c
+index 5ed34cc963fc38..245d5b41669995 100644
+--- a/lib/stackdepot.c
++++ b/lib/stackdepot.c
+@@ -630,7 +630,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ 			prealloc = page_address(page);
+ 	}
+ 
+-	raw_spin_lock_irqsave(&pool_lock, flags);
++	if (in_nmi()) {
++		/* We can never allocate in NMI context. */
++		WARN_ON_ONCE(can_alloc);
++		/* Best effort; bail if we fail to take the lock. */
++		if (!raw_spin_trylock_irqsave(&pool_lock, flags))
++			goto exit;
++	} else {
++		raw_spin_lock_irqsave(&pool_lock, flags);
++	}
+ 	printk_deferred_enter();
+ 
+ 	/* Try to find again, to avoid concurrently inserting duplicates. */
+diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
+index c14c6f8e6308df..c40818ec9c1801 100644
+--- a/lib/stackinit_kunit.c
++++ b/lib/stackinit_kunit.c
+@@ -212,6 +212,7 @@ static noinline void test_ ## name (struct kunit *test)		\
+ static noinline DO_NOTHING_TYPE_ ## which(var_type)		\
+ do_nothing_ ## name(var_type *ptr)				\
+ {								\
++	OPTIMIZER_HIDE_VAR(ptr);				\
+ 	/* Will always be true, but compiler doesn't know. */	\
+ 	if ((unsigned long)ptr > 0x2)				\
+ 		return DO_NOTHING_RETURN_ ## which(ptr);	\
+diff --git a/mm/debug.c b/mm/debug.c
+index aa57d3ffd4edf6..95b6ab809c0ee6 100644
+--- a/mm/debug.c
++++ b/mm/debug.c
+@@ -124,19 +124,22 @@ static void __dump_page(const struct page *page)
+ {
+ 	struct folio *foliop, folio;
+ 	struct page precise;
++	unsigned long head;
+ 	unsigned long pfn = page_to_pfn(page);
+ 	unsigned long idx, nr_pages = 1;
+ 	int loops = 5;
+ 
+ again:
+ 	memcpy(&precise, page, sizeof(*page));
+-	foliop = page_folio(&precise);
+-	if (foliop == (struct folio *)&precise) {
++	head = precise.compound_head;
++	if ((head & 1) == 0) {
++		foliop = (struct folio *)&precise;
+ 		idx = 0;
+ 		if (!folio_test_large(foliop))
+ 			goto dump;
+ 		foliop = (struct folio *)page;
+ 	} else {
++		foliop = (struct folio *)(head - 1);
+ 		idx = folio_page_idx(foliop, page);
+ 	}
+ 
+diff --git a/mm/gup.c b/mm/gup.c
+index ad0c8922dac3cb..7053f8114e0127 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -52,7 +52,12 @@ static inline void sanity_check_pinned_pages(struct page **pages,
+ 	 */
+ 	for (; npages; npages--, pages++) {
+ 		struct page *page = *pages;
+-		struct folio *folio = page_folio(page);
++		struct folio *folio;
++
++		if (!page)
++			continue;
++
++		folio = page_folio(page);
+ 
+ 		if (is_zero_page(page) ||
+ 		    !folio_test_anon(folio))
+@@ -409,6 +414,10 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
+ 
+ 	sanity_check_pinned_pages(pages, npages);
+ 	for (i = 0; i < npages; i += nr) {
++		if (!pages[i]) {
++			nr = 1;
++			continue;
++		}
+ 		folio = gup_folio_next(pages, npages, i, &nr);
+ 		gup_put_folio(folio, nr, FOLL_PIN);
+ 	}
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index b48c768acc84d2..c7c0083203cb73 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -200,7 +200,7 @@ static inline void fail_non_kasan_kunit_test(void) { }
+ 
+ #endif /* CONFIG_KUNIT */
+ 
+-static DEFINE_SPINLOCK(report_lock);
++static DEFINE_RAW_SPINLOCK(report_lock);
+ 
+ static void start_report(unsigned long *flags, bool sync)
+ {
+@@ -211,7 +211,7 @@ static void start_report(unsigned long *flags, bool sync)
+ 	lockdep_off();
+ 	/* Make sure we don't end up in loop. */
+ 	report_suppress_start();
+-	spin_lock_irqsave(&report_lock, *flags);
++	raw_spin_lock_irqsave(&report_lock, *flags);
+ 	pr_err("==================================================================\n");
+ }
+ 
+@@ -221,7 +221,7 @@ static void end_report(unsigned long *flags, const void *addr, bool is_write)
+ 		trace_error_report_end(ERROR_DETECTOR_KASAN,
+ 				       (unsigned long)addr);
+ 	pr_err("==================================================================\n");
+-	spin_unlock_irqrestore(&report_lock, *flags);
++	raw_spin_unlock_irqrestore(&report_lock, *flags);
+ 	if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
+ 		check_panic_on_warn("KASAN");
+ 	switch (kasan_arg_fault) {
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 0389ce5cd281e1..095c18b5c430da 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -735,7 +735,7 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
+ /**
+  * memblock_validate_numa_coverage - check if amount of memory with
+  * no node ID assigned is less than a threshold
+- * @threshold_bytes: maximal number of pages that can have unassigned node
++ * @threshold_bytes: maximal memory size that can have unassigned node
+  * ID (in bytes).
+  *
+  * A buggy firmware may report memory that does not belong to any node.
+@@ -755,7 +755,7 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
+ 			nr_pages += end_pfn - start_pfn;
+ 	}
+ 
+-	if ((nr_pages << PAGE_SHIFT) >= threshold_bytes) {
++	if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
+ 		mem_size_mb = memblock_phys_mem_size() >> 20;
+ 		pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
+ 		       (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
+diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h
+index c0672e25bcdb20..6fbc78e0e440ce 100644
+--- a/mm/memcontrol-v1.h
++++ b/mm/memcontrol-v1.h
+@@ -38,7 +38,7 @@ void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);
+ 	     iter = mem_cgroup_iter(NULL, iter, NULL))
+ 
+ /* Whether legacy memory+swap accounting is active */
+-static bool do_memsw_account(void)
++static inline bool do_memsw_account(void)
+ {
+ 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index b646fab3e45e10..7b908c4cc7eecb 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1080,6 +1080,10 @@ static long migrate_to_node(struct mm_struct *mm, int source, int dest,
+ 
+ 	mmap_read_lock(mm);
+ 	vma = find_vma(mm, 0);
++	if (unlikely(!vma)) {
++		mmap_read_unlock(mm);
++		return 0;
++	}
+ 
+ 	/*
+ 	 * This does not migrate the range, but isolates all pages that
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 4f6e566d52faa6..7fb4c1e97175f9 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -901,6 +901,7 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ 	if (get_area) {
+ 		addr = get_area(file, addr, len, pgoff, flags);
+ 	} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)
++		   && !addr /* no hint */
+ 		   && IS_ALIGNED(len, PMD_SIZE)) {
+ 		/* Ensures that larger anonymous mappings are THP aligned. */
+ 		addr = thp_get_unmapped_area_vmflags(file, addr, len,
+diff --git a/mm/readahead.c b/mm/readahead.c
+index 3dc6c7a128dd35..99fdb2b5b56862 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -453,8 +453,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ 		struct file_ra_state *ra, unsigned int new_order)
+ {
+ 	struct address_space *mapping = ractl->mapping;
+-	pgoff_t start = readahead_index(ractl);
+-	pgoff_t index = start;
++	pgoff_t index = readahead_index(ractl);
+ 	unsigned int min_order = mapping_min_folio_order(mapping);
+ 	pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
+ 	pgoff_t mark = index + ra->size - ra->async_size;
+@@ -517,7 +516,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ 	if (!err)
+ 		return;
+ fallback:
+-	do_page_cache_ra(ractl, ra->size - (index - start), ra->async_size);
++	do_page_cache_ra(ractl, ra->size, ra->async_size);
+ }
+ 
+ static unsigned long ractl_max_pages(struct readahead_control *ractl,
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 5480b77f4167d7..0161cb4391e1d1 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -4093,7 +4093,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ 		/* Zero out spare memory. */
+ 		if (want_init_on_alloc(flags))
+ 			memset((void *)p + size, 0, old_size - size);
+-
++		kasan_poison_vmalloc(p + size, old_size - size);
++		kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
+ 		return (void *)p;
+ 	}
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 6354cdf9c2b372..e6591f487a5119 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1128,9 +1128,9 @@ void hci_conn_del(struct hci_conn *conn)
+ 
+ 	hci_conn_unlink(conn);
+ 
+-	cancel_delayed_work_sync(&conn->disc_work);
+-	cancel_delayed_work_sync(&conn->auto_accept_work);
+-	cancel_delayed_work_sync(&conn->idle_work);
++	disable_delayed_work_sync(&conn->disc_work);
++	disable_delayed_work_sync(&conn->auto_accept_work);
++	disable_delayed_work_sync(&conn->idle_work);
+ 
+ 	if (conn->type == ACL_LINK) {
+ 		/* Unacked frames */
+@@ -2345,13 +2345,9 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 					  conn->iso_qos.bcast.big);
+ 	if (parent && parent != conn) {
+ 		link = hci_conn_link(parent, conn);
+-		if (!link) {
+-			hci_conn_drop(conn);
+-			return ERR_PTR(-ENOLINK);
+-		}
+-
+-		/* Link takes the refcount */
+ 		hci_conn_drop(conn);
++		if (!link)
++			return ERR_PTR(-ENOLINK);
+ 	}
+ 
+ 	return conn;
+@@ -2441,15 +2437,12 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ 	}
+ 
+ 	link = hci_conn_link(le, cis);
++	hci_conn_drop(cis);
+ 	if (!link) {
+ 		hci_conn_drop(le);
+-		hci_conn_drop(cis);
+ 		return ERR_PTR(-ENOLINK);
+ 	}
+ 
+-	/* Link takes the refcount */
+-	hci_conn_drop(cis);
+-
+ 	cis->state = BT_CONNECT;
+ 
+ 	hci_le_create_cis_pending(hdev);
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 0ac354db817794..72439764186ed2 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -3771,18 +3771,22 @@ static void hci_tx_work(struct work_struct *work)
+ /* ACL data packet */
+ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+-	struct hci_acl_hdr *hdr = (void *) skb->data;
++	struct hci_acl_hdr *hdr;
+ 	struct hci_conn *conn;
+ 	__u16 handle, flags;
+ 
+-	skb_pull(skb, HCI_ACL_HDR_SIZE);
++	hdr = skb_pull_data(skb, sizeof(*hdr));
++	if (!hdr) {
++		bt_dev_err(hdev, "ACL packet too small");
++		goto drop;
++	}
+ 
+ 	handle = __le16_to_cpu(hdr->handle);
+ 	flags  = hci_flags(handle);
+ 	handle = hci_handle(handle);
+ 
+-	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
+-	       handle, flags);
++	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
++		   handle, flags);
+ 
+ 	hdev->stat.acl_rx++;
+ 
+@@ -3801,6 +3805,7 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ 			   handle);
+ 	}
+ 
++drop:
+ 	kfree_skb(skb);
+ }
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 2e4bd3e961ce09..2b5ba8acd1d84a 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3626,6 +3626,13 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
+ 		goto unlock;
+ 	}
+ 
++	/* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
++	 * to avoid unexpected SMP command errors when pairing.
++	 */
++	if (test_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
++		     &hdev->quirks))
++		goto notify;
++
+ 	/* Set the default Authenticated Payload Timeout after
+ 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
+ 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index c0203a2b510756..c86f4e42e69cab 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4842,6 +4842,13 @@ static const struct {
+ 	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
+ 			 "HCI LE Set Random Private Address Timeout command is "
+ 			 "advertised, but not supported."),
++	HCI_QUIRK_BROKEN(EXT_CREATE_CONN,
++			 "HCI LE Extended Create Connection command is "
++			 "advertised, but not supported."),
++	HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT,
++			 "HCI WRITE AUTH PAYLOAD TIMEOUT command leads "
++			 "to unexpected SMP errors when pairing "
++			 "and will not be used."),
+ 	HCI_QUIRK_BROKEN(LE_CODED,
+ 			 "HCI LE Coded PHY feature bit is set, "
+ 			 "but its usage is not supported.")
+@@ -6477,7 +6484,7 @@ static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
+ 					     &own_addr_type);
+ 	if (err)
+ 		goto done;
+-
++	/* Send command LE Extended Create Connection if supported */
+ 	if (use_ext_conn(hdev)) {
+ 		err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
+ 		goto done;
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index ba437c6f6ee591..18e89e764f3b42 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1886,6 +1886,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ 	chan = l2cap_chan_create();
+ 	if (!chan) {
+ 		sk_free(sk);
++		sock->sk = NULL;
+ 		return NULL;
+ 	}
+ 
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 8af1bf518321fd..40766f8119ed9c 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -274,13 +274,13 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock,
+ 	struct rfcomm_dlc *d;
+ 	struct sock *sk;
+ 
+-	sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern);
+-	if (!sk)
++	d = rfcomm_dlc_alloc(prio);
++	if (!d)
+ 		return NULL;
+ 
+-	d = rfcomm_dlc_alloc(prio);
+-	if (!d) {
+-		sk_free(sk);
++	sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern);
++	if (!sk) {
++		rfcomm_dlc_free(d);
+ 		return NULL;
+ 	}
+ 
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 707576eeeb5823..01f3fbb3b67dc6 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -171,6 +171,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
+ 		/* release sk on errors */
+ 		sock_orphan(sk);
+ 		sock_put(sk);
++		sock->sk = NULL;
+ 	}
+ 
+  errout:
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index 319f47df33300c..95f7a7e65a73fa 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1505,7 +1505,7 @@ static struct j1939_session *j1939_session_new(struct j1939_priv *priv,
+ 	session->state = J1939_SESSION_NEW;
+ 
+ 	skb_queue_head_init(&session->skb_queue);
+-	skb_queue_tail(&session->skb_queue, skb);
++	skb_queue_tail(&session->skb_queue, skb_get(skb));
+ 
+ 	skcb = j1939_skb_to_cb(skb);
+ 	memcpy(&session->skcb, skcb, sizeof(session->skcb));
+diff --git a/net/core/link_watch.c b/net/core/link_watch.c
+index ab150641142aa1..1b4d39e3808427 100644
+--- a/net/core/link_watch.c
++++ b/net/core/link_watch.c
+@@ -45,9 +45,14 @@ static unsigned int default_operstate(const struct net_device *dev)
+ 		int iflink = dev_get_iflink(dev);
+ 		struct net_device *peer;
+ 
+-		if (iflink == dev->ifindex)
++		/* If called from netdev_run_todo()/linkwatch_sync_dev(),
++		 * dev_net(dev) can be already freed, and RTNL is not held.
++		 */
++		if (dev->reg_state == NETREG_UNREGISTERED ||
++		    iflink == dev->ifindex)
+ 			return IF_OPER_DOWN;
+ 
++		ASSERT_RTNL();
+ 		peer = __dev_get_by_index(dev_net(dev), iflink);
+ 		if (!peer)
+ 			return IF_OPER_DOWN;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 77b819cd995b25..cc58315a40a79c 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2876,6 +2876,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
+ 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
+ 	if (err < 0 && cb->strict_check)
+ 		return err;
++	err = 0;
+ 
+ 	s_t = cb->args[0];
+ 
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index aa49b92e9194ba..45fb60bc480395 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -626,7 +626,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
+ 		goto out;
+ 	}
+ 
+-	if (!ndev->npinfo) {
++	if (!rcu_access_pointer(ndev->npinfo)) {
+ 		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
+ 		if (!npinfo) {
+ 			err = -ENOMEM;
+diff --git a/net/dccp/feat.c b/net/dccp/feat.c
+index 54086bb05c42cd..f7554dcdaaba93 100644
+--- a/net/dccp/feat.c
++++ b/net/dccp/feat.c
+@@ -1166,8 +1166,12 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
+ 			goto not_valid_or_not_known;
+ 		}
+ 
+-		return dccp_feat_push_confirm(fn, feat, local, &fval);
++		if (dccp_feat_push_confirm(fn, feat, local, &fval)) {
++			kfree(fval.sp.vec);
++			return DCCP_RESET_CODE_TOO_BUSY;
++		}
+ 
++		return 0;
+ 	} else if (entry->state == FEAT_UNSTABLE) {	/* 6.6.2 */
+ 		return 0;
+ 	}
+diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c
+index 0515d6604b3b9d..f0883357d12e52 100644
+--- a/net/ethtool/bitset.c
++++ b/net/ethtool/bitset.c
+@@ -425,12 +425,32 @@ static int ethnl_parse_bit(unsigned int *index, bool *val, unsigned int nbits,
+ 	return 0;
+ }
+ 
++/**
++ * ethnl_bitmap32_equal() - Compare two bitmaps
++ * @map1:  first bitmap
++ * @map2:  second bitmap
++ * @nbits: bit size to compare
++ *
++ * Return: true if first @nbits are equal, false if not
++ */
++static bool ethnl_bitmap32_equal(const u32 *map1, const u32 *map2,
++				 unsigned int nbits)
++{
++	if (memcmp(map1, map2, nbits / 32 * sizeof(u32)))
++		return false;
++	if (nbits % 32 == 0)
++		return true;
++	return !((map1[nbits / 32] ^ map2[nbits / 32]) &
++		 ethnl_lower_bits(nbits % 32));
++}
++
+ static int
+ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
+ 			      const struct nlattr *attr, struct nlattr **tb,
+ 			      ethnl_string_array_t names,
+ 			      struct netlink_ext_ack *extack, bool *mod)
+ {
++	u32 *saved_bitmap = NULL;
+ 	struct nlattr *bit_attr;
+ 	bool no_mask;
+ 	int rem;
+@@ -448,8 +468,20 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
+ 	}
+ 
+ 	no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
+-	if (no_mask)
+-		ethnl_bitmap32_clear(bitmap, 0, nbits, mod);
++	if (no_mask) {
++		unsigned int nwords = DIV_ROUND_UP(nbits, 32);
++		unsigned int nbytes = nwords * sizeof(u32);
++		bool dummy;
++
++		/* The bitmap size is only the size of the map part without
++		 * its mask part.
++		 */
++		saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL);
++		if (!saved_bitmap)
++			return -ENOMEM;
++		memcpy(saved_bitmap, bitmap, nbytes);
++		ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy);
++	}
+ 
+ 	nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) {
+ 		bool old_val, new_val;
+@@ -458,22 +490,30 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
+ 		if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) {
+ 			NL_SET_ERR_MSG_ATTR(extack, bit_attr,
+ 					    "only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS");
++			kfree(saved_bitmap);
+ 			return -EINVAL;
+ 		}
+ 		ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask,
+ 				      names, extack);
+-		if (ret < 0)
++		if (ret < 0) {
++			kfree(saved_bitmap);
+ 			return ret;
++		}
+ 		old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32));
+ 		if (new_val != old_val) {
+ 			if (new_val)
+ 				bitmap[idx / 32] |= ((u32)1 << (idx % 32));
+ 			else
+ 				bitmap[idx / 32] &= ~((u32)1 << (idx % 32));
+-			*mod = true;
++			if (!no_mask)
++				*mod = true;
+ 		}
+ 	}
+ 
++	if (no_mask && !ethnl_bitmap32_equal(saved_bitmap, bitmap, nbits))
++		*mod = true;
++
++	kfree(saved_bitmap);
+ 	return 0;
+ }
+ 
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index f630d6645636dd..44048d7538ddc3 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -246,20 +246,22 @@ static const struct header_ops hsr_header_ops = {
+ 	.parse	 = eth_header_parse,
+ };
+ 
+-static struct sk_buff *hsr_init_skb(struct hsr_port *master)
++static struct sk_buff *hsr_init_skb(struct hsr_port *master, int extra)
+ {
+ 	struct hsr_priv *hsr = master->hsr;
+ 	struct sk_buff *skb;
+ 	int hlen, tlen;
++	int len;
+ 
+ 	hlen = LL_RESERVED_SPACE(master->dev);
+ 	tlen = master->dev->needed_tailroom;
++	len = sizeof(struct hsr_sup_tag) + sizeof(struct hsr_sup_payload);
+ 	/* skb size is same for PRP/HSR frames, only difference
+ 	 * being, for PRP it is a trailer and for HSR it is a
+-	 * header
++	 * header.
++	 * RedBox might use @extra more bytes.
+ 	 */
+-	skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
+-			    sizeof(struct hsr_sup_payload) + hlen + tlen);
++	skb = dev_alloc_skb(len + extra + hlen + tlen);
+ 
+ 	if (!skb)
+ 		return skb;
+@@ -295,6 +297,7 @@ static void send_hsr_supervision_frame(struct hsr_port *port,
+ 	struct hsr_sup_tlv *hsr_stlv;
+ 	struct hsr_sup_tag *hsr_stag;
+ 	struct sk_buff *skb;
++	int extra = 0;
+ 
+ 	*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ 	if (hsr->announce_count < 3 && hsr->prot_version == 0) {
+@@ -303,7 +306,11 @@ static void send_hsr_supervision_frame(struct hsr_port *port,
+ 		hsr->announce_count++;
+ 	}
+ 
+-	skb = hsr_init_skb(port);
++	if (hsr->redbox)
++		extra = sizeof(struct hsr_sup_tlv) +
++			sizeof(struct hsr_sup_payload);
++
++	skb = hsr_init_skb(port, extra);
+ 	if (!skb) {
+ 		netdev_warn_once(port->dev, "HSR: Could not send supervision frame\n");
+ 		return;
+@@ -362,7 +369,7 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+ 	struct hsr_sup_tag *hsr_stag;
+ 	struct sk_buff *skb;
+ 
+-	skb = hsr_init_skb(master);
++	skb = hsr_init_skb(master, 0);
+ 	if (!skb) {
+ 		netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n");
+ 		return;
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index b38060246e62e8..40c5fbbd155d66 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -688,6 +688,8 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 		frame->is_vlan = true;
+ 
+ 	if (frame->is_vlan) {
++		if (skb->mac_len < offsetofend(struct hsr_vlan_ethhdr, vlanhdr))
++			return -EINVAL;
+ 		vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
+ 		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
+ 		/* FIXME: */
+diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
+index 990a83455dcfb5..18d267921bb531 100644
+--- a/net/ieee802154/socket.c
++++ b/net/ieee802154/socket.c
+@@ -1043,19 +1043,21 @@ static int ieee802154_create(struct net *net, struct socket *sock,
+ 
+ 	if (sk->sk_prot->hash) {
+ 		rc = sk->sk_prot->hash(sk);
+-		if (rc) {
+-			sk_common_release(sk);
+-			goto out;
+-		}
++		if (rc)
++			goto out_sk_release;
+ 	}
+ 
+ 	if (sk->sk_prot->init) {
+ 		rc = sk->sk_prot->init(sk);
+ 		if (rc)
+-			sk_common_release(sk);
++			goto out_sk_release;
+ 	}
+ out:
+ 	return rc;
++out_sk_release:
++	sk_common_release(sk);
++	sock->sk = NULL;
++	goto out;
+ }
+ 
+ static const struct net_proto_family ieee802154_family_ops = {
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index b24d74616637a0..8095e82de8083d 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -376,32 +376,30 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
+ 		inet->inet_sport = htons(inet->inet_num);
+ 		/* Add to protocol hash chains. */
+ 		err = sk->sk_prot->hash(sk);
+-		if (err) {
+-			sk_common_release(sk);
+-			goto out;
+-		}
++		if (err)
++			goto out_sk_release;
+ 	}
+ 
+ 	if (sk->sk_prot->init) {
+ 		err = sk->sk_prot->init(sk);
+-		if (err) {
+-			sk_common_release(sk);
+-			goto out;
+-		}
++		if (err)
++			goto out_sk_release;
+ 	}
+ 
+ 	if (!kern) {
+ 		err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
+-		if (err) {
+-			sk_common_release(sk);
+-			goto out;
+-		}
++		if (err)
++			goto out_sk_release;
+ 	}
+ out:
+ 	return err;
+ out_rcu_unlock:
+ 	rcu_read_unlock();
+ 	goto out;
++out_sk_release:
++	sk_common_release(sk);
++	sock->sk = NULL;
++	goto out;
+ }
+ 
+ 
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index e1384e7331d82f..c3ad41573b33ea 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -519,6 +519,9 @@ static struct rtable *icmp_route_lookup(struct net *net,
+ 	if (!IS_ERR(dst)) {
+ 		if (rt != rt2)
+ 			return rt;
++		if (inet_addr_type_dev_table(net, route_lookup_dev,
++					     fl4->daddr) == RTN_LOCAL)
++			return rt;
+ 	} else if (PTR_ERR(dst) == -EPERM) {
+ 		rt = NULL;
+ 	} else {
+diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
+index db6516092daf5b..bbb8d5f0eae7d3 100644
+--- a/net/ipv4/tcp_ao.c
++++ b/net/ipv4/tcp_ao.c
+@@ -109,12 +109,13 @@ bool tcp_ao_ignore_icmp(const struct sock *sk, int family, int type, int code)
+  * it's known that the keys in ao_info are matching peer's
+  * family/address/VRF/etc.
+  */
+-struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao,
++struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk,
++					  struct tcp_ao_info *ao,
+ 					  int sndid, int rcvid)
+ {
+ 	struct tcp_ao_key *key;
+ 
+-	hlist_for_each_entry_rcu(key, &ao->head, node) {
++	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
+ 		if ((sndid >= 0 && key->sndid != sndid) ||
+ 		    (rcvid >= 0 && key->rcvid != rcvid))
+ 			continue;
+@@ -205,7 +206,7 @@ static struct tcp_ao_key *__tcp_ao_do_lookup(const struct sock *sk, int l3index,
+ 	if (!ao)
+ 		return NULL;
+ 
+-	hlist_for_each_entry_rcu(key, &ao->head, node) {
++	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) {
+ 		u8 prefixlen = min(prefix, key->prefixlen);
+ 
+ 		if (!tcp_ao_key_cmp(key, l3index, addr, prefixlen,
+@@ -793,7 +794,7 @@ int tcp_ao_prepare_reset(const struct sock *sk, struct sk_buff *skb,
+ 		if (!ao_info)
+ 			return -ENOENT;
+ 
+-		*key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
++		*key = tcp_ao_established_key(sk, ao_info, aoh->rnext_keyid, -1);
+ 		if (!*key)
+ 			return -ENOENT;
+ 		*traffic_key = snd_other_key(*key);
+@@ -979,7 +980,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
+ 		 */
+ 		key = READ_ONCE(info->rnext_key);
+ 		if (key->rcvid != aoh->keyid) {
+-			key = tcp_ao_established_key(info, -1, aoh->keyid);
++			key = tcp_ao_established_key(sk, info, -1, aoh->keyid);
+ 			if (!key)
+ 				goto key_not_found;
+ 		}
+@@ -1003,7 +1004,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
+ 						   aoh->rnext_keyid,
+ 						   tcp_ao_hdr_maclen(aoh));
+ 			/* If the key is not found we do nothing. */
+-			key = tcp_ao_established_key(info, aoh->rnext_keyid, -1);
++			key = tcp_ao_established_key(sk, info, aoh->rnext_keyid, -1);
+ 			if (key)
+ 				/* pairs with tcp_ao_del_cmd */
+ 				WRITE_ONCE(info->current_key, key);
+@@ -1163,7 +1164,7 @@ void tcp_ao_established(struct sock *sk)
+ 	if (!ao)
+ 		return;
+ 
+-	hlist_for_each_entry_rcu(key, &ao->head, node)
++	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
+ 		tcp_ao_cache_traffic_keys(sk, ao, key);
+ }
+ 
+@@ -1180,7 +1181,7 @@ void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
+ 	WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
+ 	ao->rcv_sne = 0;
+ 
+-	hlist_for_each_entry_rcu(key, &ao->head, node)
++	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
+ 		tcp_ao_cache_traffic_keys(sk, ao, key);
+ }
+ 
+@@ -1256,14 +1257,14 @@ int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
+ 	key_head = rcu_dereference(hlist_first_rcu(&new_ao->head));
+ 	first_key = hlist_entry_safe(key_head, struct tcp_ao_key, node);
+ 
+-	key = tcp_ao_established_key(new_ao, tcp_rsk(req)->ao_keyid, -1);
++	key = tcp_ao_established_key(req_to_sk(req), new_ao, tcp_rsk(req)->ao_keyid, -1);
+ 	if (key)
+ 		new_ao->current_key = key;
+ 	else
+ 		new_ao->current_key = first_key;
+ 
+ 	/* set rnext_key */
+-	key = tcp_ao_established_key(new_ao, -1, tcp_rsk(req)->ao_rcv_next);
++	key = tcp_ao_established_key(req_to_sk(req), new_ao, -1, tcp_rsk(req)->ao_rcv_next);
+ 	if (key)
+ 		new_ao->rnext_key = key;
+ 	else
+@@ -1857,12 +1858,12 @@ static int tcp_ao_del_cmd(struct sock *sk, unsigned short int family,
+ 	 * if there's any.
+ 	 */
+ 	if (cmd.set_current) {
+-		new_current = tcp_ao_established_key(ao_info, cmd.current_key, -1);
++		new_current = tcp_ao_established_key(sk, ao_info, cmd.current_key, -1);
+ 		if (!new_current)
+ 			return -ENOENT;
+ 	}
+ 	if (cmd.set_rnext) {
+-		new_rnext = tcp_ao_established_key(ao_info, -1, cmd.rnext);
++		new_rnext = tcp_ao_established_key(sk, ao_info, -1, cmd.rnext);
+ 		if (!new_rnext)
+ 			return -ENOENT;
+ 	}
+@@ -1902,7 +1903,8 @@ static int tcp_ao_del_cmd(struct sock *sk, unsigned short int family,
+ 	 * "It is presumed that an MKT affecting a particular
+ 	 * connection cannot be destroyed during an active connection"
+ 	 */
+-	hlist_for_each_entry_rcu(key, &ao_info->head, node) {
++	hlist_for_each_entry_rcu(key, &ao_info->head, node,
++				 lockdep_sock_is_held(sk)) {
+ 		if (cmd.sndid != key->sndid ||
+ 		    cmd.rcvid != key->rcvid)
+ 			continue;
+@@ -2000,14 +2002,14 @@ static int tcp_ao_info_cmd(struct sock *sk, unsigned short int family,
+ 	 * if there's any.
+ 	 */
+ 	if (cmd.set_current) {
+-		new_current = tcp_ao_established_key(ao_info, cmd.current_key, -1);
++		new_current = tcp_ao_established_key(sk, ao_info, cmd.current_key, -1);
+ 		if (!new_current) {
+ 			err = -ENOENT;
+ 			goto out;
+ 		}
+ 	}
+ 	if (cmd.set_rnext) {
+-		new_rnext = tcp_ao_established_key(ao_info, -1, cmd.rnext);
++		new_rnext = tcp_ao_established_key(sk, ao_info, -1, cmd.rnext);
+ 		if (!new_rnext) {
+ 			err = -ENOENT;
+ 			goto out;
+@@ -2101,7 +2103,8 @@ int tcp_v4_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen)
+  * The layout of the fields in the user and kernel structures is expected to
+  * be the same (including in the 32bit vs 64bit case).
+  */
+-static int tcp_ao_copy_mkts_to_user(struct tcp_ao_info *ao_info,
++static int tcp_ao_copy_mkts_to_user(const struct sock *sk,
++				    struct tcp_ao_info *ao_info,
+ 				    sockptr_t optval, sockptr_t optlen)
+ {
+ 	struct tcp_ao_getsockopt opt_in, opt_out;
+@@ -2229,7 +2232,8 @@ static int tcp_ao_copy_mkts_to_user(struct tcp_ao_info *ao_info,
+ 	/* May change in RX, while we're dumping, pre-fetch it */
+ 	current_key = READ_ONCE(ao_info->current_key);
+ 
+-	hlist_for_each_entry_rcu(key, &ao_info->head, node) {
++	hlist_for_each_entry_rcu(key, &ao_info->head, node,
++				 lockdep_sock_is_held(sk)) {
+ 		if (opt_in.get_all)
+ 			goto match;
+ 
+@@ -2309,7 +2313,7 @@ int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen)
+ 	if (!ao_info)
+ 		return -ENOENT;
+ 
+-	return tcp_ao_copy_mkts_to_user(ao_info, optval, optlen);
++	return tcp_ao_copy_mkts_to_user(sk, ao_info, optval, optlen);
+ }
+ 
+ int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen)
+@@ -2396,7 +2400,7 @@ int tcp_ao_set_repair(struct sock *sk, sockptr_t optval, unsigned int optlen)
+ 	WRITE_ONCE(ao->snd_sne, cmd.snd_sne);
+ 	WRITE_ONCE(ao->rcv_sne, cmd.rcv_sne);
+ 
+-	hlist_for_each_entry_rcu(key, &ao->head, node)
++	hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
+ 		tcp_ao_cache_traffic_keys(sk, ao, key);
+ 
+ 	return 0;
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 370993c03d3136..99cef92e6290cf 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -441,7 +441,6 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ 			cork = true;
+ 			psock->cork = NULL;
+ 		}
+-		sk_msg_return(sk, msg, tosend);
+ 		release_sock(sk);
+ 
+ 		origsize = msg->sg.size;
+@@ -453,8 +452,9 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ 			sock_put(sk_redir);
+ 
+ 		lock_sock(sk);
++		sk_mem_uncharge(sk, sent);
+ 		if (unlikely(ret < 0)) {
+-			int free = sk_msg_free_nocharge(sk, msg);
++			int free = sk_msg_free(sk, msg);
+ 
+ 			if (!cork)
+ 				*copied -= free;
+@@ -468,7 +468,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ 		break;
+ 	case __SK_DROP:
+ 	default:
+-		sk_msg_free_partial(sk, msg, tosend);
++		sk_msg_free(sk, msg);
+ 		sk_msg_apply_bytes(psock, tosend);
+ 		*copied -= (tosend + delta);
+ 		return -EACCES;
+@@ -484,11 +484,8 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ 		}
+ 		if (msg &&
+ 		    msg->sg.data[msg->sg.start].page_link &&
+-		    msg->sg.data[msg->sg.start].length) {
+-			if (eval == __SK_REDIRECT)
+-				sk_mem_charge(sk, tosend - sent);
++		    msg->sg.data[msg->sg.start].length)
+ 			goto more_data;
+-		}
+ 	}
+ 	return ret;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 5afe5e57c89b5c..a7cd433a54c9ae 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1053,7 +1053,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ 			}
+ 
+ 			if (aoh)
+-				key.ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
++				key.ao_key = tcp_ao_established_key(sk, ao_info,
++								    aoh->rnext_keyid, -1);
+ 		}
+ 	}
+ 	if (key.ao_key) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 2849b273b13107..ff85242720a0a9 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1516,7 +1516,6 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+ 	struct sk_buff_head *list = &sk->sk_receive_queue;
+ 	int rmem, err = -ENOMEM;
+ 	spinlock_t *busy = NULL;
+-	bool becomes_readable;
+ 	int size, rcvbuf;
+ 
+ 	/* Immediately drop when the receive queue is full.
+@@ -1557,19 +1556,12 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+ 	 */
+ 	sock_skb_set_dropcount(sk, skb);
+ 
+-	becomes_readable = skb_queue_empty(list);
+ 	__skb_queue_tail(list, skb);
+ 	spin_unlock(&list->lock);
+ 
+-	if (!sock_flag(sk, SOCK_DEAD)) {
+-		if (becomes_readable ||
+-		    sk->sk_data_ready != sock_def_readable ||
+-		    READ_ONCE(sk->sk_peek_off) >= 0)
+-			INDIRECT_CALL_1(sk->sk_data_ready,
+-					sock_def_readable, sk);
+-		else
+-			sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
+-	}
++	if (!sock_flag(sk, SOCK_DEAD))
++		INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk);
++
+ 	busylock_release(busy);
+ 	return 0;
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 01115e1a34cb66..f7c17388ff6aaf 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4821,7 +4821,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 			      ifm->ifa_prefixlen, extack);
+ }
+ 
+-static int modify_prefix_route(struct inet6_ifaddr *ifp,
++static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp,
+ 			       unsigned long expires, u32 flags,
+ 			       bool modify_peer)
+ {
+@@ -4845,7 +4845,9 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
+ 				      ifp->prefix_len,
+ 				      ifp->rt_priority, ifp->idev->dev,
+ 				      expires, flags, GFP_KERNEL);
+-	} else {
++		return 0;
++	}
++	if (f6i != net->ipv6.fib6_null_entry) {
+ 		table = f6i->fib6_table;
+ 		spin_lock_bh(&table->tb6_lock);
+ 
+@@ -4858,9 +4860,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
+ 		}
+ 
+ 		spin_unlock_bh(&table->tb6_lock);
+-
+-		fib6_info_release(f6i);
+ 	}
++	fib6_info_release(f6i);
+ 
+ 	return 0;
+ }
+@@ -4939,7 +4940,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
+ 		int rc = -ENOENT;
+ 
+ 		if (had_prefixroute)
+-			rc = modify_prefix_route(ifp, expires, flags, false);
++			rc = modify_prefix_route(net, ifp, expires, flags, false);
+ 
+ 		/* prefix route could have been deleted; if so restore it */
+ 		if (rc == -ENOENT) {
+@@ -4949,7 +4950,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
+ 		}
+ 
+ 		if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
+-			rc = modify_prefix_route(ifp, expires, flags, true);
++			rc = modify_prefix_route(net, ifp, expires, flags, true);
+ 
+ 		if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
+ 			addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index ba69b86f1c7d5e..f60ec8b0f8ea40 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -252,31 +252,29 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
+ 		 */
+ 		inet->inet_sport = htons(inet->inet_num);
+ 		err = sk->sk_prot->hash(sk);
+-		if (err) {
+-			sk_common_release(sk);
+-			goto out;
+-		}
++		if (err)
++			goto out_sk_release;
+ 	}
+ 	if (sk->sk_prot->init) {
+ 		err = sk->sk_prot->init(sk);
+-		if (err) {
+-			sk_common_release(sk);
+-			goto out;
+-		}
++		if (err)
++			goto out_sk_release;
+ 	}
+ 
+ 	if (!kern) {
+ 		err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
+-		if (err) {
+-			sk_common_release(sk);
+-			goto out;
+-		}
++		if (err)
++			goto out_sk_release;
+ 	}
+ out:
+ 	return err;
+ out_rcu_unlock:
+ 	rcu_read_unlock();
+ 	goto out;
++out_sk_release:
++	sk_common_release(sk);
++	sock->sk = NULL;
++	goto out;
+ }
+ 
+ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index cff4fbbc66efb2..8ebfed5d63232e 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2780,10 +2780,10 @@ static void ip6_negative_advice(struct sock *sk,
+ 	if (rt->rt6i_flags & RTF_CACHE) {
+ 		rcu_read_lock();
+ 		if (rt6_check_expired(rt)) {
+-			/* counteract the dst_release() in sk_dst_reset() */
+-			dst_hold(dst);
++			/* rt/dst can not be destroyed yet,
++			 * because of rcu_read_lock()
++			 */
+ 			sk_dst_reset(sk);
+-
+ 			rt6_remove_exception_rt(rt);
+ 		}
+ 		rcu_read_unlock();
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index c9de5ef8f26750..59173f58ce9923 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1169,8 +1169,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ 			if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
+ 				goto out;
+ 			if (aoh)
+-				key.ao_key = tcp_ao_established_key(ao_info,
+-						aoh->rnext_keyid, -1);
++				key.ao_key = tcp_ao_established_key(sk, ao_info,
++								    aoh->rnext_keyid, -1);
+ 		}
+ 	}
+ 	if (key.ao_key) {
+diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
+index 2d3efb405437d8..02205f7994d752 100644
+--- a/net/mptcp/diag.c
++++ b/net/mptcp/diag.c
+@@ -47,7 +47,7 @@ static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
+ 		flags |= MPTCP_SUBFLOW_FLAG_BKUP_REM;
+ 	if (sf->request_bkup)
+ 		flags |= MPTCP_SUBFLOW_FLAG_BKUP_LOC;
+-	if (sf->fully_established)
++	if (READ_ONCE(sf->fully_established))
+ 		flags |= MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED;
+ 	if (sf->conn_finished)
+ 		flags |= MPTCP_SUBFLOW_FLAG_CONNECTED;
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 370c3836b7712f..1603b3702e2207 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -461,7 +461,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ 		return false;
+ 
+ 	/* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */
+-	if (subflow->fully_established || snd_data_fin_enable ||
++	if (READ_ONCE(subflow->fully_established) || snd_data_fin_enable ||
+ 	    subflow->snd_isn != TCP_SKB_CB(skb)->seq ||
+ 	    sk->sk_state != TCP_ESTABLISHED)
+ 		return false;
+@@ -930,7 +930,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ 	/* here we can process OoO, in-window pkts, only in-sequence 4th ack
+ 	 * will make the subflow fully established
+ 	 */
+-	if (likely(subflow->fully_established)) {
++	if (likely(READ_ONCE(subflow->fully_established))) {
+ 		/* on passive sockets, check for 3rd ack retransmission
+ 		 * note that msk is always set by subflow_syn_recv_sock()
+ 		 * for mp_join subflows
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 48d480982b7870..8a8e8fee337f5e 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2728,8 +2728,8 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
+ 	if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
+ 		return;
+ 
+-	close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies +
+-			mptcp_close_timeout(sk);
++	close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp -
++			tcp_jiffies32 + jiffies + mptcp_close_timeout(sk);
+ 
+ 	/* the close timeout takes precedence on the fail one, and here at least one of
+ 	 * them is active
+@@ -3519,7 +3519,7 @@ static void schedule_3rdack_retransmission(struct sock *ssk)
+ 	struct tcp_sock *tp = tcp_sk(ssk);
+ 	unsigned long timeout;
+ 
+-	if (mptcp_subflow_ctx(ssk)->fully_established)
++	if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established))
+ 		return;
+ 
+ 	/* reschedule with a timeout above RTT, as we must look only for drop */
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 568a72702b080d..a93e661ef5c435 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -513,7 +513,6 @@ struct mptcp_subflow_context {
+ 		request_bkup : 1,
+ 		mp_capable : 1,	    /* remote is MPTCP capable */
+ 		mp_join : 1,	    /* remote is JOINing */
+-		fully_established : 1,	    /* path validated */
+ 		pm_notified : 1,    /* PM hook called for established status */
+ 		conn_finished : 1,
+ 		map_valid : 1,
+@@ -532,10 +531,11 @@ struct mptcp_subflow_context {
+ 		is_mptfo : 1,	    /* subflow is doing TFO */
+ 		close_event_done : 1,       /* has done the post-closed part */
+ 		mpc_drop : 1,	    /* the MPC option has been dropped in a rtx */
+-		__unused : 8;
++		__unused : 9;
+ 	bool	data_avail;
+ 	bool	scheduled;
+ 	bool	pm_listener;	    /* a listener managed by the kernel PM? */
++	bool	fully_established;  /* path validated */
+ 	u32	remote_nonce;
+ 	u64	thmac;
+ 	u32	local_nonce;
+@@ -780,7 +780,7 @@ static inline bool __tcp_can_send(const struct sock *ssk)
+ static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
+ {
+ 	/* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
+-	if (subflow->request_join && !subflow->fully_established)
++	if (subflow->request_join && !READ_ONCE(subflow->fully_established))
+ 		return false;
+ 
+ 	return __tcp_can_send(mptcp_subflow_tcp_sock(subflow));
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 6170f2fff71e4f..860903e0642255 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -800,7 +800,7 @@ void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
+ 				       const struct mptcp_options_received *mp_opt)
+ {
+ 	subflow_set_remote_key(msk, subflow, mp_opt);
+-	subflow->fully_established = 1;
++	WRITE_ONCE(subflow->fully_established, true);
+ 	WRITE_ONCE(msk->fully_established, true);
+ 
+ 	if (subflow->is_mptfo)
+@@ -2062,7 +2062,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ 	} else if (subflow_req->mp_join) {
+ 		new_ctx->ssn_offset = subflow_req->ssn_offset;
+ 		new_ctx->mp_join = 1;
+-		new_ctx->fully_established = 1;
++		WRITE_ONCE(new_ctx->fully_established, true);
+ 		new_ctx->remote_key_valid = 1;
+ 		new_ctx->backup = subflow_req->backup;
+ 		new_ctx->request_bkup = subflow_req->request_bkup;
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 61431690cbd5f1..cc20e6d56807c6 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -104,14 +104,19 @@ find_set_type(const char *name, u8 family, u8 revision)
+ static bool
+ load_settype(const char *name)
+ {
++	if (!try_module_get(THIS_MODULE))
++		return false;
++
+ 	nfnl_unlock(NFNL_SUBSYS_IPSET);
+ 	pr_debug("try to load ip_set_%s\n", name);
+ 	if (request_module("ip_set_%s", name) < 0) {
+ 		pr_warn("Can't find ip_set type %s\n", name);
+ 		nfnl_lock(NFNL_SUBSYS_IPSET);
++		module_put(THIS_MODULE);
+ 		return false;
+ 	}
+ 	nfnl_lock(NFNL_SUBSYS_IPSET);
++	module_put(THIS_MODULE);
+ 	return true;
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
+index f100da4ba3bc3c..a9fd1d3fc2cbfe 100644
+--- a/net/netfilter/ipvs/ip_vs_proto.c
++++ b/net/netfilter/ipvs/ip_vs_proto.c
+@@ -340,7 +340,7 @@ void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs)
+ 
+ int __init ip_vs_protocol_init(void)
+ {
+-	char protocols[64];
++	char protocols[64] = { 0 };
+ #define REGISTER_PROTOCOL(p)			\
+ 	do {					\
+ 		register_ip_vs_protocol(p);	\
+@@ -348,8 +348,6 @@ int __init ip_vs_protocol_init(void)
+ 		strcat(protocols, (p)->name);	\
+ 	} while (0)
+ 
+-	protocols[0] = '\0';
+-	protocols[2] = '\0';
+ #ifdef CONFIG_IP_VS_PROTO_TCP
+ 	REGISTER_PROTOCOL(&ip_vs_protocol_tcp);
+ #endif
+diff --git a/net/netfilter/nft_inner.c b/net/netfilter/nft_inner.c
+index 928312d01eb1d6..817ab978d24a19 100644
+--- a/net/netfilter/nft_inner.c
++++ b/net/netfilter/nft_inner.c
+@@ -210,35 +210,66 @@ static int nft_inner_parse(const struct nft_inner *priv,
+ 			   struct nft_pktinfo *pkt,
+ 			   struct nft_inner_tun_ctx *tun_ctx)
+ {
+-	struct nft_inner_tun_ctx ctx = {};
+ 	u32 off = pkt->inneroff;
+ 
+ 	if (priv->flags & NFT_INNER_HDRSIZE &&
+-	    nft_inner_parse_tunhdr(priv, pkt, &ctx, &off) < 0)
++	    nft_inner_parse_tunhdr(priv, pkt, tun_ctx, &off) < 0)
+ 		return -1;
+ 
+ 	if (priv->flags & (NFT_INNER_LL | NFT_INNER_NH)) {
+-		if (nft_inner_parse_l2l3(priv, pkt, &ctx, off) < 0)
++		if (nft_inner_parse_l2l3(priv, pkt, tun_ctx, off) < 0)
+ 			return -1;
+ 	} else if (priv->flags & NFT_INNER_TH) {
+-		ctx.inner_thoff = off;
+-		ctx.flags |= NFT_PAYLOAD_CTX_INNER_TH;
++		tun_ctx->inner_thoff = off;
++		tun_ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH;
+ 	}
+ 
+-	*tun_ctx = ctx;
+ 	tun_ctx->type = priv->type;
++	tun_ctx->cookie = (unsigned long)pkt->skb;
+ 	pkt->flags |= NFT_PKTINFO_INNER_FULL;
+ 
+ 	return 0;
+ }
+ 
++static bool nft_inner_restore_tun_ctx(const struct nft_pktinfo *pkt,
++				      struct nft_inner_tun_ctx *tun_ctx)
++{
++	struct nft_inner_tun_ctx *this_cpu_tun_ctx;
++
++	local_bh_disable();
++	this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
++	if (this_cpu_tun_ctx->cookie != (unsigned long)pkt->skb) {
++		local_bh_enable();
++		return false;
++	}
++	*tun_ctx = *this_cpu_tun_ctx;
++	local_bh_enable();
++
++	return true;
++}
++
++static void nft_inner_save_tun_ctx(const struct nft_pktinfo *pkt,
++				   const struct nft_inner_tun_ctx *tun_ctx)
++{
++	struct nft_inner_tun_ctx *this_cpu_tun_ctx;
++
++	local_bh_disable();
++	this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
++	if (this_cpu_tun_ctx->cookie != tun_ctx->cookie)
++		*this_cpu_tun_ctx = *tun_ctx;
++	local_bh_enable();
++}
++
+ static bool nft_inner_parse_needed(const struct nft_inner *priv,
+ 				   const struct nft_pktinfo *pkt,
+-				   const struct nft_inner_tun_ctx *tun_ctx)
++				   struct nft_inner_tun_ctx *tun_ctx)
+ {
+ 	if (!(pkt->flags & NFT_PKTINFO_INNER_FULL))
+ 		return true;
+ 
++	if (!nft_inner_restore_tun_ctx(pkt, tun_ctx))
++		return true;
++
+ 	if (priv->type != tun_ctx->type)
+ 		return true;
+ 
+@@ -248,27 +279,29 @@ static bool nft_inner_parse_needed(const struct nft_inner *priv,
+ static void nft_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 			   const struct nft_pktinfo *pkt)
+ {
+-	struct nft_inner_tun_ctx *tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
+ 	const struct nft_inner *priv = nft_expr_priv(expr);
++	struct nft_inner_tun_ctx tun_ctx = {};
+ 
+ 	if (nft_payload_inner_offset(pkt) < 0)
+ 		goto err;
+ 
+-	if (nft_inner_parse_needed(priv, pkt, tun_ctx) &&
+-	    nft_inner_parse(priv, (struct nft_pktinfo *)pkt, tun_ctx) < 0)
++	if (nft_inner_parse_needed(priv, pkt, &tun_ctx) &&
++	    nft_inner_parse(priv, (struct nft_pktinfo *)pkt, &tun_ctx) < 0)
+ 		goto err;
+ 
+ 	switch (priv->expr_type) {
+ 	case NFT_INNER_EXPR_PAYLOAD:
+-		nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
++		nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx);
+ 		break;
+ 	case NFT_INNER_EXPR_META:
+-		nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
++		nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx);
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 		goto err;
+ 	}
++	nft_inner_save_tun_ctx(pkt, &tun_ctx);
++
+ 	return;
+ err:
+ 	regs->verdict.code = NFT_BREAK;
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index daa56dda737ae2..b93f046ac7d1e1 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -24,11 +24,13 @@
+ struct nft_rhash {
+ 	struct rhashtable		ht;
+ 	struct delayed_work		gc_work;
++	u32				wq_gc_seq;
+ };
+ 
+ struct nft_rhash_elem {
+ 	struct nft_elem_priv		priv;
+ 	struct rhash_head		node;
++	u32				wq_gc_seq;
+ 	struct nft_set_ext		ext;
+ };
+ 
+@@ -338,6 +340,10 @@ static void nft_rhash_gc(struct work_struct *work)
+ 	if (!gc)
+ 		goto done;
+ 
++	/* Elements never collected use a zero gc worker sequence number. */
++	if (unlikely(++priv->wq_gc_seq == 0))
++		priv->wq_gc_seq++;
++
+ 	rhashtable_walk_enter(&priv->ht, &hti);
+ 	rhashtable_walk_start(&hti);
+ 
+@@ -355,6 +361,14 @@ static void nft_rhash_gc(struct work_struct *work)
+ 			goto try_later;
+ 		}
+ 
++		/* rhashtable walk is unstable, already seen in this gc run?
++		 * Then, skip this element. In case of (unlikely) sequence
++		 * wraparound and stale element wq_gc_seq, next gc run will
++		 * just find this expired element.
++		 */
++		if (he->wq_gc_seq == priv->wq_gc_seq)
++			continue;
++
+ 		if (nft_set_elem_is_dead(&he->ext))
+ 			goto dead_elem;
+ 
+@@ -371,6 +385,8 @@ static void nft_rhash_gc(struct work_struct *work)
+ 		if (!gc)
+ 			goto try_later;
+ 
++		/* annotate gc sequence for this attempt. */
++		he->wq_gc_seq = priv->wq_gc_seq;
+ 		nft_trans_gc_elem_add(gc, he);
+ 	}
+ 
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index f5da0c1775f2e7..35d0409b009501 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -68,7 +68,7 @@ static noinline int nft_socket_cgroup_subtree_level(void)
+ 
+ 	cgroup_put(cgrp);
+ 
+-	if (WARN_ON_ONCE(level > 255))
++	if (level > 255)
+ 		return -ERANGE;
+ 
+ 	if (WARN_ON_ONCE(level < 0))
+diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
+index f7b0286d106ac1..8a80fd76fe45b2 100644
+--- a/net/netfilter/xt_LED.c
++++ b/net/netfilter/xt_LED.c
+@@ -96,7 +96,9 @@ static int led_tg_check(const struct xt_tgchk_param *par)
+ 	struct xt_led_info_internal *ledinternal;
+ 	int err;
+ 
+-	if (ledinfo->id[0] == '\0')
++	/* Bail out if empty string or not a string at all. */
++	if (ledinfo->id[0] == '\0' ||
++	    !memchr(ledinfo->id, '\0', sizeof(ledinfo->id)))
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&xt_led_mutex);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a705ec21425409..97774bd4b6cb11 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3421,17 +3421,17 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
+ 	if (sock->type == SOCK_PACKET)
+ 		sock->ops = &packet_ops_spkt;
+ 
++	po = pkt_sk(sk);
++	err = packet_alloc_pending(po);
++	if (err)
++		goto out_sk_free;
++
+ 	sock_init_data(sock, sk);
+ 
+-	po = pkt_sk(sk);
+ 	init_completion(&po->skb_completion);
+ 	sk->sk_family = PF_PACKET;
+ 	po->num = proto;
+ 
+-	err = packet_alloc_pending(po);
+-	if (err)
+-		goto out2;
+-
+ 	packet_cached_dev_reset(po);
+ 
+ 	sk->sk_destruct = packet_sock_destruct;
+@@ -3463,7 +3463,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
+ 	sock_prot_inuse_add(net, &packet_proto, 1);
+ 
+ 	return 0;
+-out2:
++out_sk_free:
+ 	sk_free(sk);
+ out:
+ 	return err;
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index e280c27cb9f9af..1008ec8a464c93 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -1369,7 +1369,6 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
+ 	int err;
+ 
+ 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
+-	memset(md, 0xff, sizeof(*md));
+ 	md->version = 1;
+ 
+ 	if (!depth)
+@@ -1398,9 +1397,9 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
+ 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
+ 			return -EINVAL;
+ 		}
++		memset(&md->u.index, 0xff, sizeof(md->u.index));
+ 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
+ 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
+-			memset(&md->u, 0x00, sizeof(md->u));
+ 			md->u.index = nla_get_be32(nla);
+ 		}
+ 	} else if (md->version == 2) {
+@@ -1409,10 +1408,12 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
+ 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
+ 			return -EINVAL;
+ 		}
++		md->u.md2.dir = 1;
+ 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
+ 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
+ 			md->u.md2.dir = nla_get_u8(nla);
+ 		}
++		set_hwid(&md->u.md2, 0xff);
+ 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
+ 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
+ 			set_hwid(&md->u.md2, nla_get_u8(nla));
+diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
+index 939425da18955b..8c9a0400c8622c 100644
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -310,7 +310,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
+ {
+ 	struct ethtool_link_ksettings ecmd;
+ 	int speed = SPEED_10;
+-	int port_rate;
++	s64 port_rate;
+ 	int err;
+ 
+ 	err = __ethtool_get_link_ksettings(dev, &ecmd);
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index f1d09183ae632d..dc26b22d53c734 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -208,7 +208,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
+ 	struct tbf_sched_data *q = qdisc_priv(sch);
+ 	struct sk_buff *segs, *nskb;
+ 	netdev_features_t features = netif_skb_features(skb);
+-	unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
++	unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len;
+ 	int ret, nb;
+ 
+ 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+@@ -219,21 +219,27 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
+ 	nb = 0;
+ 	skb_list_walk_safe(segs, segs, nskb) {
+ 		skb_mark_not_on_list(segs);
+-		qdisc_skb_cb(segs)->pkt_len = segs->len;
+-		len += segs->len;
++		seg_len = segs->len;
++		qdisc_skb_cb(segs)->pkt_len = seg_len;
+ 		ret = qdisc_enqueue(segs, q->qdisc, to_free);
+ 		if (ret != NET_XMIT_SUCCESS) {
+ 			if (net_xmit_drop_count(ret))
+ 				qdisc_qstats_drop(sch);
+ 		} else {
+ 			nb++;
++			len += seg_len;
+ 		}
+ 	}
+ 	sch->q.qlen += nb;
+-	if (nb > 1)
++	sch->qstats.backlog += len;
++	if (nb > 0) {
+ 		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+-	consume_skb(skb);
+-	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
++		consume_skb(skb);
++		return NET_XMIT_SUCCESS;
++	}
++
++	kfree_skb(skb);
++	return NET_XMIT_DROP;
+ }
+ 
+ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 9d76e902fd770f..9e6c69d18581ce 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -383,6 +383,7 @@ void smc_sk_init(struct net *net, struct sock *sk, int protocol)
+ 	smc->limit_smc_hs = net->smc.limit_smc_hs;
+ 	smc->use_fallback = false; /* assume rdma capability first */
+ 	smc->fallback_rsn = 0;
++	smc_close_init(smc);
+ }
+ 
+ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
+@@ -1299,7 +1300,6 @@ static int smc_connect_rdma(struct smc_sock *smc,
+ 		goto connect_abort;
+ 	}
+ 
+-	smc_close_init(smc);
+ 	smc_rx_init(smc);
+ 
+ 	if (ini->first_contact_local) {
+@@ -1435,7 +1435,6 @@ static int smc_connect_ism(struct smc_sock *smc,
+ 			goto connect_abort;
+ 		}
+ 	}
+-	smc_close_init(smc);
+ 	smc_rx_init(smc);
+ 	smc_tx_init(smc);
+ 
+@@ -1901,6 +1900,7 @@ static void smc_listen_out(struct smc_sock *new_smc)
+ 	if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
+ 		atomic_dec(&lsmc->queued_smc_hs);
+ 
++	release_sock(newsmcsk); /* lock in smc_listen_work() */
+ 	if (lsmc->sk.sk_state == SMC_LISTEN) {
+ 		lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
+ 		smc_accept_enqueue(&lsmc->sk, newsmcsk);
+@@ -2422,6 +2422,7 @@ static void smc_listen_work(struct work_struct *work)
+ 	u8 accept_version;
+ 	int rc = 0;
+ 
++	lock_sock(&new_smc->sk); /* release in smc_listen_out() */
+ 	if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
+ 		return smc_listen_out_err(new_smc);
+ 
+@@ -2479,7 +2480,6 @@ static void smc_listen_work(struct work_struct *work)
+ 		goto out_decl;
+ 
+ 	mutex_lock(&smc_server_lgr_pending);
+-	smc_close_init(new_smc);
+ 	smc_rx_init(new_smc);
+ 	smc_tx_init(new_smc);
+ 
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index 439f7553997728..b7e25e7e9933b6 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -814,10 +814,10 @@ static void cleanup_bearer(struct work_struct *work)
+ 		kfree_rcu(rcast, rcu);
+ 	}
+ 
+-	atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
+ 	dst_cache_destroy(&ub->rcast.dst_cache);
+ 	udp_tunnel_sock_release(ub->ubsock);
+ 	synchronize_net();
++	atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
+ 	kfree(ub);
+ }
+ 
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index dfd29160fe11c4..b52b798aa4c292 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -117,12 +117,14 @@
+ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
+ static void vsock_sk_destruct(struct sock *sk);
+ static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
++static void vsock_close(struct sock *sk, long timeout);
+ 
+ /* Protocol family. */
+ struct proto vsock_proto = {
+ 	.name = "AF_VSOCK",
+ 	.owner = THIS_MODULE,
+ 	.obj_size = sizeof(struct vsock_sock),
++	.close = vsock_close,
+ #ifdef CONFIG_BPF_SYSCALL
+ 	.psock_update_sk_prot = vsock_bpf_update_proto,
+ #endif
+@@ -797,39 +799,37 @@ static bool sock_type_connectible(u16 type)
+ 
+ static void __vsock_release(struct sock *sk, int level)
+ {
+-	if (sk) {
+-		struct sock *pending;
+-		struct vsock_sock *vsk;
+-
+-		vsk = vsock_sk(sk);
+-		pending = NULL;	/* Compiler warning. */
++	struct vsock_sock *vsk;
++	struct sock *pending;
+ 
+-		/* When "level" is SINGLE_DEPTH_NESTING, use the nested
+-		 * version to avoid the warning "possible recursive locking
+-		 * detected". When "level" is 0, lock_sock_nested(sk, level)
+-		 * is the same as lock_sock(sk).
+-		 */
+-		lock_sock_nested(sk, level);
++	vsk = vsock_sk(sk);
++	pending = NULL;	/* Compiler warning. */
+ 
+-		if (vsk->transport)
+-			vsk->transport->release(vsk);
+-		else if (sock_type_connectible(sk->sk_type))
+-			vsock_remove_sock(vsk);
++	/* When "level" is SINGLE_DEPTH_NESTING, use the nested
++	 * version to avoid the warning "possible recursive locking
++	 * detected". When "level" is 0, lock_sock_nested(sk, level)
++	 * is the same as lock_sock(sk).
++	 */
++	lock_sock_nested(sk, level);
+ 
+-		sock_orphan(sk);
+-		sk->sk_shutdown = SHUTDOWN_MASK;
++	if (vsk->transport)
++		vsk->transport->release(vsk);
++	else if (sock_type_connectible(sk->sk_type))
++		vsock_remove_sock(vsk);
+ 
+-		skb_queue_purge(&sk->sk_receive_queue);
++	sock_orphan(sk);
++	sk->sk_shutdown = SHUTDOWN_MASK;
+ 
+-		/* Clean up any sockets that never were accepted. */
+-		while ((pending = vsock_dequeue_accept(sk)) != NULL) {
+-			__vsock_release(pending, SINGLE_DEPTH_NESTING);
+-			sock_put(pending);
+-		}
++	skb_queue_purge(&sk->sk_receive_queue);
+ 
+-		release_sock(sk);
+-		sock_put(sk);
++	/* Clean up any sockets that never were accepted. */
++	while ((pending = vsock_dequeue_accept(sk)) != NULL) {
++		__vsock_release(pending, SINGLE_DEPTH_NESTING);
++		sock_put(pending);
+ 	}
++
++	release_sock(sk);
++	sock_put(sk);
+ }
+ 
+ static void vsock_sk_destruct(struct sock *sk)
+@@ -901,9 +901,22 @@ void vsock_data_ready(struct sock *sk)
+ }
+ EXPORT_SYMBOL_GPL(vsock_data_ready);
+ 
++/* Dummy callback required by sockmap.
++ * See unconditional call of saved_close() in sock_map_close().
++ */
++static void vsock_close(struct sock *sk, long timeout)
++{
++}
++
+ static int vsock_release(struct socket *sock)
+ {
+-	__vsock_release(sock->sk, 0);
++	struct sock *sk = sock->sk;
++
++	if (!sk)
++		return 0;
++
++	sk->sk_prot->close(sk, 0);
++	__vsock_release(sk, 0);
+ 	sock->sk = NULL;
+ 	sock->state = SS_FREE;
+ 
+@@ -1054,6 +1067,9 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
+ 		mask |= EPOLLRDHUP;
+ 	}
+ 
++	if (sk_is_readable(sk))
++		mask |= EPOLLIN | EPOLLRDNORM;
++
+ 	if (sock->type == SOCK_DGRAM) {
+ 		/* For datagram sockets we can read if there is something in
+ 		 * the queue and write as long as the socket isn't shutdown for
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index 521a2938e50a12..0662d34b09ee78 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -387,10 +387,9 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
+ 		return;
+ 	}
+ 
+-	if (!refcount_dec_and_test(&dma_map->users))
+-		return;
++	if (refcount_dec_and_test(&dma_map->users))
++		__xp_dma_unmap(dma_map, attrs);
+ 
+-	__xp_dma_unmap(dma_map, attrs);
+ 	kvfree(pool->dma_pages);
+ 	pool->dma_pages = NULL;
+ 	pool->dma_pages_cnt = 0;
+diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
+index e1c526f97ce31f..afa457506274c1 100644
+--- a/net/xdp/xskmap.c
++++ b/net/xdp/xskmap.c
+@@ -224,7 +224,7 @@ static long xsk_map_delete_elem(struct bpf_map *map, void *key)
+ 	struct xsk_map *m = container_of(map, struct xsk_map, map);
+ 	struct xdp_sock __rcu **map_entry;
+ 	struct xdp_sock *old_xs;
+-	int k = *(u32 *)key;
++	u32 k = *(u32 *)key;
+ 
+ 	if (k >= map->max_entries)
+ 		return -EINVAL;
+diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
+index 032c9089e6862d..e936254531fd0a 100644
+--- a/rust/kernel/lib.rs
++++ b/rust/kernel/lib.rs
+@@ -12,10 +12,10 @@
+ //! do so first instead of bypassing this crate.
+ 
+ #![no_std]
++#![feature(arbitrary_self_types)]
+ #![feature(coerce_unsized)]
+ #![feature(dispatch_from_dyn)]
+ #![feature(new_uninit)]
+-#![feature(receiver_trait)]
+ #![feature(unsize)]
+ 
+ // Ensure conditional compilation based on the kernel configuration works;
+diff --git a/rust/kernel/list/arc.rs b/rust/kernel/list/arc.rs
+index d801b9dc6291db..3483d8c232c4f1 100644
+--- a/rust/kernel/list/arc.rs
++++ b/rust/kernel/list/arc.rs
+@@ -441,9 +441,6 @@ fn as_ref(&self) -> &Arc<T> {
+     }
+ }
+ 
+-// This is to allow [`ListArc`] (and variants) to be used as the type of `self`.
+-impl<T, const ID: u64> core::ops::Receiver for ListArc<T, ID> where T: ListArcSafe<ID> + ?Sized {}
+-
+ // This is to allow coercion from `ListArc<T>` to `ListArc<U>` if `T` can be converted to the
+ // dynamically-sized type (DST) `U`.
+ impl<T, U, const ID: u64> core::ops::CoerceUnsized<ListArc<U, ID>> for ListArc<T, ID>
+diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
+index 3021f30fd822f6..28743a7c74a847 100644
+--- a/rust/kernel/sync/arc.rs
++++ b/rust/kernel/sync/arc.rs
+@@ -171,9 +171,6 @@ unsafe fn container_of(ptr: *const T) -> NonNull<ArcInner<T>> {
+     }
+ }
+ 
+-// This is to allow [`Arc`] (and variants) to be used as the type of `self`.
+-impl<T: ?Sized> core::ops::Receiver for Arc<T> {}
+-
+ // This is to allow coercion from `Arc<T>` to `Arc<U>` if `T` can be converted to the
+ // dynamically-sized type (DST) `U`.
+ impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {}
+@@ -480,9 +477,6 @@ pub struct ArcBorrow<'a, T: ?Sized + 'a> {
+     _p: PhantomData<&'a ()>,
+ }
+ 
+-// This is to allow [`ArcBorrow`] (and variants) to be used as the type of `self`.
+-impl<T: ?Sized> core::ops::Receiver for ArcBorrow<'_, T> {}
+-
+ // This is to allow `ArcBorrow<U>` to be dispatched on when `ArcBorrow<T>` can be coerced into
+ // `ArcBorrow<U>`.
+ impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>>
+diff --git a/samples/bpf/test_cgrp2_sock.c b/samples/bpf/test_cgrp2_sock.c
+index a0811df888f453..8ca2a445ffa155 100644
+--- a/samples/bpf/test_cgrp2_sock.c
++++ b/samples/bpf/test_cgrp2_sock.c
+@@ -178,8 +178,10 @@ static int show_sockopts(int family)
+ 		return 1;
+ 	}
+ 
+-	if (get_bind_to_device(sd, name, sizeof(name)) < 0)
++	if (get_bind_to_device(sd, name, sizeof(name)) < 0) {
++		close(sd);
+ 		return 1;
++	}
+ 
+ 	mark = get_somark(sd);
+ 	prio = get_priority(sd);
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 8f423a1faf5077..880785b52c04ad 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -248,7 +248,7 @@ $(obj)/%.lst: $(obj)/%.c FORCE
+ # Compile Rust sources (.rs)
+ # ---------------------------------------------------------------------------
+ 
+-rust_allowed_features := new_uninit
++rust_allowed_features := arbitrary_self_types,new_uninit
+ 
+ # `--out-dir` is required to avoid temporaries being created by `rustc` in the
+ # current working directory, which may be not accessible in the out-of-tree
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 107393a8c48a59..971eda0c6ba737 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -785,7 +785,7 @@ static void check_section(const char *modname, struct elf_info *elf,
+ 		".ltext", ".ltext.*"
+ #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
+ 		".fixup", ".entry.text", ".exception.text", \
+-		".coldtext", ".softirqentry.text"
++		".coldtext", ".softirqentry.text", ".irqentry.text"
+ 
+ #define ALL_TEXT_SECTIONS  ".init.text", ".exit.text", \
+ 		TEXT_SECTIONS, OTHER_TEXT_SECTIONS
+diff --git a/scripts/setlocalversion b/scripts/setlocalversion
+index 38b96c6797f408..5818465abba984 100755
+--- a/scripts/setlocalversion
++++ b/scripts/setlocalversion
+@@ -30,6 +30,27 @@ if test $# -gt 0 -o ! -d "$srctree"; then
+ 	usage
+ fi
+ 
++try_tag() {
++	tag="$1"
++
++	# Is $tag an annotated tag?
++	[ "$(git cat-file -t "$tag" 2> /dev/null)" = tag ] || return 1
++
++	# Is it an ancestor of HEAD, and if so, how many commits are in $tag..HEAD?
++	# shellcheck disable=SC2046 # word splitting is the point here
++	set -- $(git rev-list --count --left-right "$tag"...HEAD 2> /dev/null)
++
++	# $1 is 0 if and only if $tag is an ancestor of HEAD. Use
++	# string comparison, because $1 is empty if the 'git rev-list'
++	# command somehow failed.
++	[ "$1" = 0 ] || return 1
++
++	# $2 is the number of commits in the range $tag..HEAD, possibly 0.
++	count="$2"
++
++	return 0
++}
++
+ scm_version()
+ {
+ 	local short=false
+@@ -61,33 +82,33 @@ scm_version()
+ 	# stable kernel:    6.1.7      ->  v6.1.7
+ 	version_tag=v$(echo "${KERNELVERSION}" | sed -E 's/^([0-9]+\.[0-9]+)\.0(.*)$/\1\2/')
+ 
++	# try_tag initializes count if the tag is usable.
++	count=
++
+ 	# If a localversion* file exists, and the corresponding
+ 	# annotated tag exists and is an ancestor of HEAD, use
+ 	# it. This is the case in linux-next.
+-	tag=${file_localversion#-}
+-	desc=
+-	if [ -n "${tag}" ]; then
+-		desc=$(git describe --match=$tag 2>/dev/null)
++	if [ -n "${file_localversion#-}" ] ; then
++		try_tag "${file_localversion#-}"
+ 	fi
+ 
+ 	# Otherwise, if a localversion* file exists, and the tag
+ 	# obtained by appending it to the tag derived from
+ 	# KERNELVERSION exists and is an ancestor of HEAD, use
+ 	# it. This is e.g. the case in linux-rt.
+-	if [ -z "${desc}" ] && [ -n "${file_localversion}" ]; then
+-		tag="${version_tag}${file_localversion}"
+-		desc=$(git describe --match=$tag 2>/dev/null)
++	if [ -z "${count}" ] && [ -n "${file_localversion}" ]; then
++		try_tag "${version_tag}${file_localversion}"
+ 	fi
+ 
+ 	# Otherwise, default to the annotated tag derived from KERNELVERSION.
+-	if [ -z "${desc}" ]; then
+-		tag="${version_tag}"
+-		desc=$(git describe --match=$tag 2>/dev/null)
++	if [ -z "${count}" ]; then
++		try_tag "${version_tag}"
+ 	fi
+ 
+-	# If we are at the tagged commit, we ignore it because the version is
+-	# well-defined.
+-	if [ "${tag}" != "${desc}" ]; then
++	# If we are at the tagged commit, we ignore it because the
++	# version is well-defined. If none of the attempted tags exist
++	# or were usable, $count is still empty.
++	if [ -z "${count}" ] || [ "${count}" -gt 0 ]; then
+ 
+ 		# If only the short version is requested, don't bother
+ 		# running further git commands
+@@ -95,14 +116,15 @@ scm_version()
+ 			echo "+"
+ 			return
+ 		fi
++
+ 		# If we are past the tagged commit, we pretty print it.
+ 		# (like 6.1.0-14595-g292a089d78d3)
+-		if [ -n "${desc}" ]; then
+-			echo "${desc}" | awk -F- '{printf("-%05d", $(NF-1))}'
++		if [ -n "${count}" ]; then
++			printf "%s%05d" "-" "${count}"
+ 		fi
+ 
+ 		# Add -g and exactly 12 hex chars.
+-		printf '%s%s' -g "$(echo $head | cut -c1-12)"
++		printf '%s%.12s' -g "$head"
+ 	fi
+ 
+ 	if ${no_dirty}; then
+diff --git a/sound/core/seq/seq_ump_client.c b/sound/core/seq/seq_ump_client.c
+index e5d3f4d206bf6a..e956f17f379282 100644
+--- a/sound/core/seq/seq_ump_client.c
++++ b/sound/core/seq/seq_ump_client.c
+@@ -257,12 +257,12 @@ static void update_port_infos(struct seq_ump_client *client)
+ 			continue;
+ 
+ 		old->addr.client = client->seq_client;
+-		old->addr.port = i;
++		old->addr.port = ump_group_to_seq_port(i);
+ 		err = snd_seq_kernel_client_ctl(client->seq_client,
+ 						SNDRV_SEQ_IOCTL_GET_PORT_INFO,
+ 						old);
+ 		if (err < 0)
+-			return;
++			continue;
+ 		fill_port_info(new, client, &client->ump->groups[i]);
+ 		if (old->capability == new->capability &&
+ 		    !strcmp(old->name, new->name))
+@@ -271,7 +271,7 @@ static void update_port_infos(struct seq_ump_client *client)
+ 						SNDRV_SEQ_IOCTL_SET_PORT_INFO,
+ 						new);
+ 		if (err < 0)
+-			return;
++			continue;
+ 		/* notify to system port */
+ 		snd_seq_system_client_ev_port_change(client->seq_client, i);
+ 	}
+diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
+index 7c6b1fe8dfcce3..8e74be038b0fad 100644
+--- a/sound/pci/hda/hda_auto_parser.c
++++ b/sound/pci/hda/hda_auto_parser.c
+@@ -956,6 +956,28 @@ void snd_hda_pick_pin_fixup(struct hda_codec *codec,
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_pick_pin_fixup);
+ 
++/* check whether the given quirk entry matches with vendor/device pair */
++static bool hda_quirk_match(u16 vendor, u16 device, const struct hda_quirk *q)
++{
++	if (q->subvendor != vendor)
++		return false;
++	return !q->subdevice ||
++		(device & q->subdevice_mask) == q->subdevice;
++}
++
++/* look through the quirk list and return the matching entry */
++static const struct hda_quirk *
++hda_quirk_lookup_id(u16 vendor, u16 device, const struct hda_quirk *list)
++{
++	const struct hda_quirk *q;
++
++	for (q = list; q->subvendor || q->subdevice; q++) {
++		if (hda_quirk_match(vendor, device, q))
++			return q;
++	}
++	return NULL;
++}
++
+ /**
+  * snd_hda_pick_fixup - Pick up a fixup matching with PCI/codec SSID or model string
+  * @codec: the HDA codec
+@@ -975,14 +997,16 @@ EXPORT_SYMBOL_GPL(snd_hda_pick_pin_fixup);
+  */
+ void snd_hda_pick_fixup(struct hda_codec *codec,
+ 			const struct hda_model_fixup *models,
+-			const struct snd_pci_quirk *quirk,
++			const struct hda_quirk *quirk,
+ 			const struct hda_fixup *fixlist)
+ {
+-	const struct snd_pci_quirk *q;
++	const struct hda_quirk *q;
+ 	int id = HDA_FIXUP_ID_NOT_SET;
+ 	const char *name = NULL;
+ 	const char *type = NULL;
+ 	unsigned int vendor, device;
++	u16 pci_vendor, pci_device;
++	u16 codec_vendor, codec_device;
+ 
+ 	if (codec->fixup_id != HDA_FIXUP_ID_NOT_SET)
+ 		return;
+@@ -1013,27 +1037,42 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
+ 	if (!quirk)
+ 		return;
+ 
++	if (codec->bus->pci) {
++		pci_vendor = codec->bus->pci->subsystem_vendor;
++		pci_device = codec->bus->pci->subsystem_device;
++	}
++
++	codec_vendor = codec->core.subsystem_id >> 16;
++	codec_device = codec->core.subsystem_id & 0xffff;
++
+ 	/* match with the SSID alias given by the model string "XXXX:YYYY" */
+ 	if (codec->modelname &&
+ 	    sscanf(codec->modelname, "%04x:%04x", &vendor, &device) == 2) {
+-		q = snd_pci_quirk_lookup_id(vendor, device, quirk);
++		q = hda_quirk_lookup_id(vendor, device, quirk);
+ 		if (q) {
+ 			type = "alias SSID";
+ 			goto found_device;
+ 		}
+ 	}
+ 
+-	/* match with the PCI SSID */
+-	q = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+-	if (q) {
+-		type = "PCI SSID";
+-		goto found_device;
++	/* match primarily with the PCI SSID */
++	for (q = quirk; q->subvendor || q->subdevice; q++) {
++		/* if the entry is specific to codec SSID, check with it */
++		if (!codec->bus->pci || q->match_codec_ssid) {
++			if (hda_quirk_match(codec_vendor, codec_device, q)) {
++				type = "codec SSID";
++				goto found_device;
++			}
++		} else {
++			if (hda_quirk_match(pci_vendor, pci_device, q)) {
++				type = "PCI SSID";
++				goto found_device;
++			}
++		}
+ 	}
+ 
+ 	/* match with the codec SSID */
+-	q = snd_pci_quirk_lookup_id(codec->core.subsystem_id >> 16,
+-				    codec->core.subsystem_id & 0xffff,
+-				    quirk);
++	q = hda_quirk_lookup_id(codec_vendor, codec_device, quirk);
+ 	if (q) {
+ 		type = "codec SSID";
+ 		goto found_device;
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index 53a5a62b78fa98..763f79f6f32e70 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -292,6 +292,32 @@ struct hda_fixup {
+ 	} v;
+ };
+ 
++/*
++ * extended form of snd_pci_quirk:
++ * for PCI SSID matching, use SND_PCI_QUIRK() like before;
++ * for codec SSID matching, use the new HDA_CODEC_QUIRK() instead
++ */
++struct hda_quirk {
++	unsigned short subvendor;	/* PCI subvendor ID */
++	unsigned short subdevice;	/* PCI subdevice ID */
++	unsigned short subdevice_mask;	/* bitmask to match */
++	bool match_codec_ssid;		/* match only with codec SSID */
++	int value;			/* value */
++#ifdef CONFIG_SND_DEBUG_VERBOSE
++	const char *name;		/* name of the device (optional) */
++#endif
++};
++
++#ifdef CONFIG_SND_DEBUG_VERBOSE
++#define HDA_CODEC_QUIRK(vend, dev, xname, val) \
++	{ _SND_PCI_QUIRK_ID(vend, dev), .value = (val), .name = (xname),\
++			.match_codec_ssid = true }
++#else
++#define HDA_CODEC_QUIRK(vend, dev, xname, val) \
++	{ _SND_PCI_QUIRK_ID(vend, dev), .value = (val), \
++			.match_codec_ssid = true }
++#endif
++
+ struct snd_hda_pin_quirk {
+ 	unsigned int codec;             /* Codec vendor/device ID */
+ 	unsigned short subvendor;	/* PCI subvendor ID */
+@@ -351,7 +377,7 @@ void snd_hda_apply_fixup(struct hda_codec *codec, int action);
+ void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth);
+ void snd_hda_pick_fixup(struct hda_codec *codec,
+ 			const struct hda_model_fixup *models,
+-			const struct snd_pci_quirk *quirk,
++			const struct hda_quirk *quirk,
+ 			const struct hda_fixup *fixlist);
+ void snd_hda_pick_pin_fixup(struct hda_codec *codec,
+ 			    const struct snd_hda_pin_quirk *pin_quirk,
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 1e9dadcdc51be2..56354fe060a1aa 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -345,7 +345,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
++static const struct hda_quirk ad1986a_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9V", AD1986A_FIXUP_LAPTOP_IMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
+@@ -588,7 +588,7 @@ static const struct hda_fixup ad1981_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk ad1981_fixup_tbl[] = {
++static const struct hda_quirk ad1981_fixup_tbl[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x1014, "Lenovo", AD1981_FIXUP_AMP_OVERRIDE),
+ 	SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1981_FIXUP_HP_EAPD),
+ 	SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", AD1981_FIXUP_AMP_OVERRIDE),
+@@ -1061,7 +1061,7 @@ static const struct hda_fixup ad1884_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk ad1884_fixup_tbl[] = {
++static const struct hda_quirk ad1884_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART),
+ 	SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD),
+ 	SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_THINKPAD),
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 654724559355ef..06e046214a4134 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -385,7 +385,7 @@ static const struct hda_model_fixup cs420x_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
++static const struct hda_quirk cs420x_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53),
+ 	SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
+ 	SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
+@@ -634,13 +634,13 @@ static const struct hda_model_fixup cs4208_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
++static const struct hda_quirk cs4208_fixup_tbl[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS4208_MAC_AUTO),
+ 	{} /* terminator */
+ };
+ 
+ /* codec SSID matching */
+-static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
++static const struct hda_quirk cs4208_mac_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+ 	SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
+ 	SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+@@ -818,7 +818,7 @@ static const struct hda_model_fixup cs421x_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk cs421x_fixup_tbl[] = {
++static const struct hda_quirk cs421x_fixup_tbl[] = {
+ 	/* Test Intel board + CDB2410  */
+ 	SND_PCI_QUIRK(0x8086, 0x5001, "DP45SG/CDB4210", CS421X_CDB4210),
+ 	{} /* terminator */
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index b2bcdf76da3058..2e9f817b948eb3 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -828,23 +828,6 @@ static const struct hda_pintbl cxt_pincfg_sws_js201d[] = {
+ 	{}
+ };
+ 
+-/* pincfg quirk for Tuxedo Sirius;
+- * unfortunately the (PCI) SSID conflicts with System76 Pangolin pang14,
+- * which has incompatible pin setup, so we check the codec SSID (luckily
+- * different one!) and conditionally apply the quirk here
+- */
+-static void cxt_fixup_sirius_top_speaker(struct hda_codec *codec,
+-					 const struct hda_fixup *fix,
+-					 int action)
+-{
+-	/* ignore for incorrectly picked-up pang14 */
+-	if (codec->core.subsystem_id == 0x278212b3)
+-		return;
+-	/* set up the top speaker pin */
+-	if (action == HDA_FIXUP_ACT_PRE_PROBE)
+-		snd_hda_codec_set_pincfg(codec, 0x1d, 0x82170111);
+-}
+-
+ static const struct hda_fixup cxt_fixups[] = {
+ 	[CXT_PINCFG_LENOVO_X200] = {
+ 		.type = HDA_FIXUP_PINS,
+@@ -1009,12 +992,15 @@ static const struct hda_fixup cxt_fixups[] = {
+ 		.v.pins = cxt_pincfg_sws_js201d,
+ 	},
+ 	[CXT_PINCFG_TOP_SPEAKER] = {
+-		.type = HDA_FIXUP_FUNC,
+-		.v.func = cxt_fixup_sirius_top_speaker,
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1d, 0x82170111 },
++			{ }
++		},
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk cxt5045_fixups[] = {
++static const struct hda_quirk cxt5045_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT_FIXUP_HP_530),
+ 	SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P105", CXT_FIXUP_TOSHIBA_P105),
+ 	/* HP, Packard Bell, Fujitsu-Siemens & Lenovo laptops have
+@@ -1034,7 +1020,7 @@ static const struct hda_model_fixup cxt5045_fixup_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk cxt5047_fixups[] = {
++static const struct hda_quirk cxt5047_fixups[] = {
+ 	/* HP laptops have really bad sound over 0 dB on NID 0x10.
+ 	 */
+ 	SND_PCI_QUIRK_VENDOR(0x103c, "HP", CXT_FIXUP_CAP_MIX_AMP_5047),
+@@ -1046,7 +1032,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk cxt5051_fixups[] = {
++static const struct hda_quirk cxt5051_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
+ 	{}
+@@ -1057,7 +1043,7 @@ static const struct hda_model_fixup cxt5051_fixup_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk cxt5066_fixups[] = {
++static const struct hda_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
+@@ -1109,8 +1095,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
+ 	SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
+-	SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
+-	SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER),
++	HDA_CODEC_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
++	HDA_CODEC_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER),
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_cs8409-tables.c b/sound/pci/hda/patch_cs8409-tables.c
+index 36b411d1a9609a..759f48038273df 100644
+--- a/sound/pci/hda/patch_cs8409-tables.c
++++ b/sound/pci/hda/patch_cs8409-tables.c
+@@ -473,7 +473,7 @@ struct sub_codec dolphin_cs42l42_1 = {
+  *                    Arrays Used for all projects using CS8409
+  ******************************************************************************/
+ 
+-const struct snd_pci_quirk cs8409_fixup_tbl[] = {
++const struct hda_quirk cs8409_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0A11, "Bullseye", CS8409_BULLSEYE),
+ 	SND_PCI_QUIRK(0x1028, 0x0A12, "Bullseye", CS8409_BULLSEYE),
+ 	SND_PCI_QUIRK(0x1028, 0x0A23, "Bullseye", CS8409_BULLSEYE),
+diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
+index 937e9387abdc7a..5e48115caf096b 100644
+--- a/sound/pci/hda/patch_cs8409.h
++++ b/sound/pci/hda/patch_cs8409.h
+@@ -355,7 +355,7 @@ int cs42l42_volume_put(struct snd_kcontrol *kctrl, struct snd_ctl_elem_value *uc
+ 
+ extern const struct hda_pcm_stream cs42l42_48k_pcm_analog_playback;
+ extern const struct hda_pcm_stream cs42l42_48k_pcm_analog_capture;
+-extern const struct snd_pci_quirk cs8409_fixup_tbl[];
++extern const struct hda_quirk cs8409_fixup_tbl[];
+ extern const struct hda_model_fixup cs8409_models[];
+ extern const struct hda_fixup cs8409_fixups[];
+ extern const struct hda_verb cs8409_cs42l42_init_verbs[];
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 18e6779a83be2f..973671e0cdb09d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1567,7 +1567,7 @@ static const struct hda_fixup alc880_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk alc880_fixup_tbl[] = {
++static const struct hda_quirk alc880_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1019, 0x0f69, "Coeus G610P", ALC880_FIXUP_W810),
+ 	SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS W5A", ALC880_FIXUP_ASUS_W5A),
+ 	SND_PCI_QUIRK(0x1043, 0x1964, "ASUS Z71V", ALC880_FIXUP_Z71V),
+@@ -1876,7 +1876,7 @@ static const struct hda_fixup alc260_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk alc260_fixup_tbl[] = {
++static const struct hda_quirk alc260_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_FIXUP_GPIO1),
+ 	SND_PCI_QUIRK(0x1025, 0x007f, "Acer Aspire 9500", ALC260_FIXUP_COEF),
+ 	SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1),
+@@ -2568,7 +2568,7 @@ static const struct hda_fixup alc882_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk alc882_fixup_tbl[] = {
++static const struct hda_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+@@ -2912,7 +2912,7 @@ static const struct hda_fixup alc262_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk alc262_fixup_tbl[] = {
++static const struct hda_quirk alc262_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", ALC262_FIXUP_HP_Z200),
+ 	SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
+ 	SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
+@@ -3073,7 +3073,7 @@ static const struct hda_model_fixup alc268_fixup_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk alc268_fixup_tbl[] = {
++static const struct hda_quirk alc268_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0139, "Acer TravelMate 6293", ALC268_FIXUP_SPDIF),
+ 	SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),
+ 	/* below is codec SSID since multiple Toshiba laptops have the
+@@ -7726,8 +7726,6 @@ enum {
+ 	ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+ 	ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
+ 	ALC298_FIXUP_LENOVO_C940_DUET7,
+-	ALC287_FIXUP_LENOVO_14IRP8_DUETITL,
+-	ALC287_FIXUP_LENOVO_LEGION_7,
+ 	ALC287_FIXUP_13S_GEN2_SPEAKERS,
+ 	ALC256_FIXUP_SET_COEF_DEFAULTS,
+ 	ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+@@ -7772,8 +7770,6 @@ enum {
+ 	ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1,
+ 	ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318,
+ 	ALC256_FIXUP_CHROME_BOOK,
+-	ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7,
+-	ALC287_FIXUP_LENOVO_SSID_17AA3820,
+ 	ALC245_FIXUP_CLEVO_NOISY_MIC,
+ 	ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE,
+ 	ALC233_FIXUP_MEDION_MTL_SPK,
+@@ -7796,72 +7792,6 @@ static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec,
+ 	__snd_hda_apply_fixup(codec, id, action, 0);
+ }
+ 
+-/* A special fixup for Lenovo Slim/Yoga Pro 9 14IRP8 and Yoga DuetITL 2021;
+- * 14IRP8 PCI SSID will mistakenly be matched with the DuetITL codec SSID,
+- * so we need to apply a different fixup in this case. The only DuetITL codec
+- * SSID reported so far is the 17aa:3802 while the 14IRP8 has the 17aa:38be
+- * and 17aa:38bf. If it weren't for the PCI SSID, the 14IRP8 models would
+- * have matched correctly by their codecs.
+- */
+-static void alc287_fixup_lenovo_14irp8_duetitl(struct hda_codec *codec,
+-					      const struct hda_fixup *fix,
+-					      int action)
+-{
+-	int id;
+-
+-	if (codec->core.subsystem_id == 0x17aa3802)
+-		id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* DuetITL */
+-	else
+-		id = ALC287_FIXUP_TAS2781_I2C; /* 14IRP8 */
+-	__snd_hda_apply_fixup(codec, id, action, 0);
+-}
+-
+-/* Similar to above the Lenovo Yoga Pro 7 14ARP8 PCI SSID matches the codec SSID of the
+-   Legion Y9000X 2022 IAH7.*/
+-static void alc287_fixup_lenovo_14arp8_legion_iah7(struct hda_codec *codec,
+-					   const struct hda_fixup *fix,
+-					   int action)
+-{
+-	int id;
+-
+-	if (codec->core.subsystem_id == 0x17aa386e)
+-		id = ALC287_FIXUP_CS35L41_I2C_2; /* Legion Y9000X 2022 IAH7 */
+-	else
+-		id = ALC285_FIXUP_SPEAKER2_TO_DAC1; /* Yoga Pro 7 14ARP8 */
+-	__snd_hda_apply_fixup(codec, id, action, 0);
+-}
+-
+-/* Another hilarious PCI SSID conflict with Lenovo Legion Pro 7 16ARX8H (with
+- * TAS2781 codec) and Legion 7i 16IAX7 (with CS35L41 codec);
+- * we apply a corresponding fixup depending on the codec SSID instead
+- */
+-static void alc287_fixup_lenovo_legion_7(struct hda_codec *codec,
+-					 const struct hda_fixup *fix,
+-					 int action)
+-{
+-	int id;
+-
+-	if (codec->core.subsystem_id == 0x17aa38a8)
+-		id = ALC287_FIXUP_TAS2781_I2C; /* Legion Pro 7 16ARX8H */
+-	else
+-		id = ALC287_FIXUP_CS35L41_I2C_2; /* Legion 7i 16IAX7 */
+-	__snd_hda_apply_fixup(codec, id, action, 0);
+-}
+-
+-/* Yet more conflicting PCI SSID (17aa:3820) on two Lenovo models */
+-static void alc287_fixup_lenovo_ssid_17aa3820(struct hda_codec *codec,
+-					      const struct hda_fixup *fix,
+-					      int action)
+-{
+-	int id;
+-
+-	if (codec->core.subsystem_id == 0x17aa3820)
+-		id = ALC269_FIXUP_ASPIRE_HEADSET_MIC; /* IdeaPad 330-17IKB 81DM */
+-	else /* 0x17aa3802 */
+-		id =  ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* "Yoga Duet 7 13ITL6 */
+-	__snd_hda_apply_fixup(codec, id, action, 0);
+-}
+-
+ static const struct hda_fixup alc269_fixups[] = {
+ 	[ALC269_FIXUP_GPIO2] = {
+ 		.type = HDA_FIXUP_FUNC,
+@@ -9810,14 +9740,6 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc298_fixup_lenovo_c940_duet7,
+ 	},
+-	[ALC287_FIXUP_LENOVO_14IRP8_DUETITL] = {
+-		.type = HDA_FIXUP_FUNC,
+-		.v.func = alc287_fixup_lenovo_14irp8_duetitl,
+-	},
+-	[ALC287_FIXUP_LENOVO_LEGION_7] = {
+-		.type = HDA_FIXUP_FUNC,
+-		.v.func = alc287_fixup_lenovo_legion_7,
+-	},
+ 	[ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -10002,10 +9924,6 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
+ 	},
+-	[ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7] = {
+-		.type = HDA_FIXUP_FUNC,
+-		.v.func = alc287_fixup_lenovo_14arp8_legion_iah7,
+-	},
+ 	[ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc287_fixup_yoga9_14iap7_bass_spk_pin,
+@@ -10140,10 +10058,6 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC225_FIXUP_HEADSET_JACK
+ 	},
+-	[ALC287_FIXUP_LENOVO_SSID_17AA3820] = {
+-		.type = HDA_FIXUP_FUNC,
+-		.v.func = alc287_fixup_lenovo_ssid_17aa3820,
+-	},
+ 	[ALC245_FIXUP_CLEVO_NOISY_MIC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc269_fixup_limit_int_mic_boost,
+@@ -10169,7 +10083,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk alc269_fixup_tbl[] = {
++static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+@@ -10411,6 +10325,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++	SND_PCI_QUIRK(0x103c, 0x87df, "HP ProBook 430 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+@@ -10592,7 +10507,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d91, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d92, "HP ZBook Firefly 16 G12", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -10746,6 +10667,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
++	SND_PCI_QUIRK(0x144d, 0xca06, "Samsung Galaxy Book3 360 (NP730QFG)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc870, "Samsung Galaxy Book2 Pro (NP950XED)", ALC298_FIXUP_SAMSUNG_AMP_V2_2_AMPS),
+ 	SND_PCI_QUIRK(0x144d, 0xc872, "Samsung Galaxy Book2 Pro (NP950XEE)", ALC298_FIXUP_SAMSUNG_AMP_V2_2_AMPS),
+@@ -10903,11 +10825,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ 	SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+-	SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8 / DuetITL 2021", ALC287_FIXUP_LENOVO_14IRP8_DUETITL),
++	HDA_CODEC_QUIRK(0x17aa, 0x3802, "DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
++	SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
+ 	SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
+-	SND_PCI_QUIRK(0x17aa, 0x3820, "IdeaPad 330 / Yoga Duet 7", ALC287_FIXUP_LENOVO_SSID_17AA3820),
++	HDA_CODEC_QUIRK(0x17aa, 0x3820, "IdeaPad 330-17IKB 81DM", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
++	SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+ 	SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+@@ -10921,8 +10845,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+-	SND_PCI_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7 / Yoga Pro 7 14ARP8",  ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7),
+-	SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7/7i", ALC287_FIXUP_LENOVO_LEGION_7),
++	HDA_CODEC_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x386e, "Yoga Pro 7 14ARP8", ALC285_FIXUP_SPEAKER2_TO_DAC1),
++	HDA_CODEC_QUIRK(0x17aa, 0x386f, "Legion Pro 7 16ARX8H", ALC287_FIXUP_TAS2781_I2C),
++	SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3878, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -11096,7 +11022,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk alc269_fixup_vendor_tbl[] = {
++static const struct hda_quirk alc269_fixup_vendor_tbl[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
+ 	SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+@@ -12032,7 +11958,7 @@ static const struct hda_fixup alc861_fixups[] = {
+ 	}
+ };
+ 
+-static const struct snd_pci_quirk alc861_fixup_tbl[] = {
++static const struct hda_quirk alc861_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1253, "ASUS W7J", ALC660_FIXUP_ASUS_W7J),
+ 	SND_PCI_QUIRK(0x1043, 0x1263, "ASUS Z35HL", ALC660_FIXUP_ASUS_W7J),
+ 	SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
+@@ -12136,7 +12062,7 @@ static const struct hda_fixup alc861vd_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk alc861vd_fixup_tbl[] = {
++static const struct hda_quirk alc861vd_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x30bf, "HP TX1000", ALC861VD_FIX_DALLAS),
+ 	SND_PCI_QUIRK(0x1043, 0x1339, "ASUS A7-K", ALC660VD_FIX_ASUS_GPIO1),
+ 	SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba L30-149", ALC861VD_FIX_DALLAS),
+@@ -12937,7 +12863,7 @@ static const struct hda_fixup alc662_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk alc662_fixup_tbl[] = {
++static const struct hda_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
+ 	SND_PCI_QUIRK(0x1019, 0x9859, "JP-IK LEAP W502", ALC897_FIXUP_HEADSET_MIC_PIN3),
+ 	SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index ae1a34c68c6161..bde6b737385831 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1462,7 +1462,7 @@ static const struct hda_model_fixup stac9200_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk stac9200_fixup_tbl[] = {
++static const struct hda_quirk stac9200_fixup_tbl[] = {
+ 	/* SigmaTel reference board */
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
+ 		      "DFI LanParty", STAC_REF),
+@@ -1683,7 +1683,7 @@ static const struct hda_model_fixup stac925x_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk stac925x_fixup_tbl[] = {
++static const struct hda_quirk stac925x_fixup_tbl[] = {
+ 	/* SigmaTel reference board */
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_REF),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, "DFI LanParty", STAC_REF),
+@@ -1957,7 +1957,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
++static const struct hda_quirk stac92hd73xx_fixup_tbl[] = {
+ 	/* SigmaTel reference board */
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
+ 				"DFI LanParty", STAC_92HD73XX_REF),
+@@ -2753,7 +2753,7 @@ static const struct hda_model_fixup stac92hd83xxx_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
++static const struct hda_quirk stac92hd83xxx_fixup_tbl[] = {
+ 	/* SigmaTel reference board */
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
+ 		      "DFI LanParty", STAC_92HD83XXX_REF),
+@@ -3236,7 +3236,7 @@ static const struct hda_model_fixup stac92hd71bxx_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk stac92hd71bxx_fixup_tbl[] = {
++static const struct hda_quirk stac92hd71bxx_fixup_tbl[] = {
+ 	/* SigmaTel reference board */
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
+ 		      "DFI LanParty", STAC_92HD71BXX_REF),
+@@ -3496,7 +3496,7 @@ static const struct hda_pintbl ecs202_pin_configs[] = {
+ };
+ 
+ /* codec SSIDs for Intel Mac sharing the same PCI SSID 8384:7680 */
+-static const struct snd_pci_quirk stac922x_intel_mac_fixup_tbl[] = {
++static const struct hda_quirk stac922x_intel_mac_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x0000, 0x0100, "Mac Mini", STAC_INTEL_MAC_V3),
+ 	SND_PCI_QUIRK(0x106b, 0x0800, "Mac", STAC_INTEL_MAC_V1),
+ 	SND_PCI_QUIRK(0x106b, 0x0600, "Mac", STAC_INTEL_MAC_V2),
+@@ -3640,7 +3640,7 @@ static const struct hda_model_fixup stac922x_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk stac922x_fixup_tbl[] = {
++static const struct hda_quirk stac922x_fixup_tbl[] = {
+ 	/* SigmaTel reference board */
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
+ 		      "DFI LanParty", STAC_D945_REF),
+@@ -3968,7 +3968,7 @@ static const struct hda_model_fixup stac927x_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk stac927x_fixup_tbl[] = {
++static const struct hda_quirk stac927x_fixup_tbl[] = {
+ 	/* SigmaTel reference board */
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
+ 		      "DFI LanParty", STAC_D965_REF),
+@@ -4178,7 +4178,7 @@ static const struct hda_model_fixup stac9205_models[] = {
+ 	{}
+ };
+ 
+-static const struct snd_pci_quirk stac9205_fixup_tbl[] = {
++static const struct hda_quirk stac9205_fixup_tbl[] = {
+ 	/* SigmaTel reference board */
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
+ 		      "DFI LanParty", STAC_9205_REF),
+@@ -4255,7 +4255,7 @@ static const struct hda_fixup stac92hd95_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk stac92hd95_fixup_tbl[] = {
++static const struct hda_quirk stac92hd95_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1911, "HP Spectre 13", STAC_92HD95_HP_BASS),
+ 	{} /* terminator */
+ };
+@@ -5002,7 +5002,7 @@ static const struct hda_fixup stac9872_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk stac9872_fixup_tbl[] = {
++static const struct hda_quirk stac9872_fixup_tbl[] = {
+ 	SND_PCI_QUIRK_MASK(0x104d, 0xfff0, 0x81e0,
+ 			   "Sony VAIO F/S", STAC_9872_VAIO),
+ 	{} /* terminator */
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index a8ef4bb70dd057..d0893059b1b9b7 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -1035,7 +1035,7 @@ static const struct hda_fixup via_fixups[] = {
+ 	},
+ };
+ 
+-static const struct snd_pci_quirk vt2002p_fixups[] = {
++static const struct hda_quirk vt2002p_fixups[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE),
+ 	SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
+ 	SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 5153a68d8c0795..e38c5885dadfbc 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -220,6 +220,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21M1"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -416,6 +423,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Xiaomi Book Pro 14 2022"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "TIMI"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Redmi G 2022"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 74caae52e1273f..d9df29a26f4f21 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -185,84 +185,97 @@ static const struct snd_pcm_chmap_elem hdmi_codec_8ch_chmaps[] = {
+ /*
+  * hdmi_codec_channel_alloc: speaker configuration available for CEA
+  *
+- * This is an ordered list that must match with hdmi_codec_8ch_chmaps struct
++ * This is an ordered list where ca_id must exist in hdmi_codec_8ch_chmaps
+  * The preceding ones have better chances to be selected by
+  * hdmi_codec_get_ch_alloc_table_idx().
+  */
+ static const struct hdmi_codec_cea_spk_alloc hdmi_codec_channel_alloc[] = {
+ 	{ .ca_id = 0x00, .n_ch = 2,
+-	  .mask = FL | FR},
+-	/* 2.1 */
+-	{ .ca_id = 0x01, .n_ch = 4,
+-	  .mask = FL | FR | LFE},
+-	/* Dolby Surround */
++	  .mask = FL | FR },
++	{ .ca_id = 0x03, .n_ch = 4,
++	  .mask = FL | FR | LFE | FC },
+ 	{ .ca_id = 0x02, .n_ch = 4,
+ 	  .mask = FL | FR | FC },
+-	/* surround51 */
++	{ .ca_id = 0x01, .n_ch = 4,
++	  .mask = FL | FR | LFE },
+ 	{ .ca_id = 0x0b, .n_ch = 6,
+-	  .mask = FL | FR | LFE | FC | RL | RR},
+-	/* surround40 */
+-	{ .ca_id = 0x08, .n_ch = 6,
+-	  .mask = FL | FR | RL | RR },
+-	/* surround41 */
+-	{ .ca_id = 0x09, .n_ch = 6,
+-	  .mask = FL | FR | LFE | RL | RR },
+-	/* surround50 */
++	  .mask = FL | FR | LFE | FC | RL | RR },
+ 	{ .ca_id = 0x0a, .n_ch = 6,
+ 	  .mask = FL | FR | FC | RL | RR },
+-	/* 6.1 */
+-	{ .ca_id = 0x0f, .n_ch = 8,
+-	  .mask = FL | FR | LFE | FC | RL | RR | RC },
+-	/* surround71 */
++	{ .ca_id = 0x09, .n_ch = 6,
++	  .mask = FL | FR | LFE | RL | RR },
++	{ .ca_id = 0x08, .n_ch = 6,
++	  .mask = FL | FR | RL | RR },
++	{ .ca_id = 0x07, .n_ch = 6,
++	  .mask = FL | FR | LFE | FC | RC },
++	{ .ca_id = 0x06, .n_ch = 6,
++	  .mask = FL | FR | FC | RC },
++	{ .ca_id = 0x05, .n_ch = 6,
++	  .mask = FL | FR | LFE | RC },
++	{ .ca_id = 0x04, .n_ch = 6,
++	  .mask = FL | FR | RC },
+ 	{ .ca_id = 0x13, .n_ch = 8,
+ 	  .mask = FL | FR | LFE | FC | RL | RR | RLC | RRC },
+-	/* others */
+-	{ .ca_id = 0x03, .n_ch = 8,
+-	  .mask = FL | FR | LFE | FC },
+-	{ .ca_id = 0x04, .n_ch = 8,
+-	  .mask = FL | FR | RC},
+-	{ .ca_id = 0x05, .n_ch = 8,
+-	  .mask = FL | FR | LFE | RC },
+-	{ .ca_id = 0x06, .n_ch = 8,
+-	  .mask = FL | FR | FC | RC },
+-	{ .ca_id = 0x07, .n_ch = 8,
+-	  .mask = FL | FR | LFE | FC | RC },
+-	{ .ca_id = 0x0c, .n_ch = 8,
+-	  .mask = FL | FR | RC | RL | RR },
+-	{ .ca_id = 0x0d, .n_ch = 8,
+-	  .mask = FL | FR | LFE | RL | RR | RC },
+-	{ .ca_id = 0x0e, .n_ch = 8,
+-	  .mask = FL | FR | FC | RL | RR | RC },
+-	{ .ca_id = 0x10, .n_ch = 8,
+-	  .mask = FL | FR | RL | RR | RLC | RRC },
+-	{ .ca_id = 0x11, .n_ch = 8,
+-	  .mask = FL | FR | LFE | RL | RR | RLC | RRC },
++	{ .ca_id = 0x1f, .n_ch = 8,
++	  .mask = FL | FR | LFE | FC | RL | RR | FLC | FRC },
+ 	{ .ca_id = 0x12, .n_ch = 8,
+ 	  .mask = FL | FR | FC | RL | RR | RLC | RRC },
+-	{ .ca_id = 0x14, .n_ch = 8,
+-	  .mask = FL | FR | FLC | FRC },
+-	{ .ca_id = 0x15, .n_ch = 8,
+-	  .mask = FL | FR | LFE | FLC | FRC },
+-	{ .ca_id = 0x16, .n_ch = 8,
+-	  .mask = FL | FR | FC | FLC | FRC },
+-	{ .ca_id = 0x17, .n_ch = 8,
+-	  .mask = FL | FR | LFE | FC | FLC | FRC },
+-	{ .ca_id = 0x18, .n_ch = 8,
+-	  .mask = FL | FR | RC | FLC | FRC },
+-	{ .ca_id = 0x19, .n_ch = 8,
+-	  .mask = FL | FR | LFE | RC | FLC | FRC },
+-	{ .ca_id = 0x1a, .n_ch = 8,
+-	  .mask = FL | FR | RC | FC | FLC | FRC },
+-	{ .ca_id = 0x1b, .n_ch = 8,
+-	  .mask = FL | FR | LFE | RC | FC | FLC | FRC },
+-	{ .ca_id = 0x1c, .n_ch = 8,
+-	  .mask = FL | FR | RL | RR | FLC | FRC },
+-	{ .ca_id = 0x1d, .n_ch = 8,
+-	  .mask = FL | FR | LFE | RL | RR | FLC | FRC },
+ 	{ .ca_id = 0x1e, .n_ch = 8,
+ 	  .mask = FL | FR | FC | RL | RR | FLC | FRC },
+-	{ .ca_id = 0x1f, .n_ch = 8,
+-	  .mask = FL | FR | LFE | FC | RL | RR | FLC | FRC },
++	{ .ca_id = 0x11, .n_ch = 8,
++	  .mask = FL | FR | LFE | RL | RR | RLC | RRC },
++	{ .ca_id = 0x1d, .n_ch = 8,
++	  .mask = FL | FR | LFE | RL | RR | FLC | FRC },
++	{ .ca_id = 0x10, .n_ch = 8,
++	  .mask = FL | FR | RL | RR | RLC | RRC },
++	{ .ca_id = 0x1c, .n_ch = 8,
++	  .mask = FL | FR | RL | RR | FLC | FRC },
++	{ .ca_id = 0x0f, .n_ch = 8,
++	  .mask = FL | FR | LFE | FC | RL | RR | RC },
++	{ .ca_id = 0x1b, .n_ch = 8,
++	  .mask = FL | FR | LFE | RC | FC | FLC | FRC },
++	{ .ca_id = 0x0e, .n_ch = 8,
++	  .mask = FL | FR | FC | RL | RR | RC },
++	{ .ca_id = 0x1a, .n_ch = 8,
++	  .mask = FL | FR | RC | FC | FLC | FRC },
++	{ .ca_id = 0x0d, .n_ch = 8,
++	  .mask = FL | FR | LFE | RL | RR | RC },
++	{ .ca_id = 0x19, .n_ch = 8,
++	  .mask = FL | FR | LFE | RC | FLC | FRC },
++	{ .ca_id = 0x0c, .n_ch = 8,
++	  .mask = FL | FR | RC | RL | RR },
++	{ .ca_id = 0x18, .n_ch = 8,
++	  .mask = FL | FR | RC | FLC | FRC },
++	{ .ca_id = 0x17, .n_ch = 8,
++	  .mask = FL | FR | LFE | FC | FLC | FRC },
++	{ .ca_id = 0x16, .n_ch = 8,
++	  .mask = FL | FR | FC | FLC | FRC },
++	{ .ca_id = 0x15, .n_ch = 8,
++	  .mask = FL | FR | LFE | FLC | FRC },
++	{ .ca_id = 0x14, .n_ch = 8,
++	  .mask = FL | FR | FLC | FRC },
++	{ .ca_id = 0x0b, .n_ch = 8,
++	  .mask = FL | FR | LFE | FC | RL | RR },
++	{ .ca_id = 0x0a, .n_ch = 8,
++	  .mask = FL | FR | FC | RL | RR },
++	{ .ca_id = 0x09, .n_ch = 8,
++	  .mask = FL | FR | LFE | RL | RR },
++	{ .ca_id = 0x08, .n_ch = 8,
++	  .mask = FL | FR | RL | RR },
++	{ .ca_id = 0x07, .n_ch = 8,
++	  .mask = FL | FR | LFE | FC | RC },
++	{ .ca_id = 0x06, .n_ch = 8,
++	  .mask = FL | FR | FC | RC },
++	{ .ca_id = 0x05, .n_ch = 8,
++	  .mask = FL | FR | LFE | RC },
++	{ .ca_id = 0x04, .n_ch = 8,
++	  .mask = FL | FR | RC },
++	{ .ca_id = 0x03, .n_ch = 8,
++	  .mask = FL | FR | LFE | FC },
++	{ .ca_id = 0x02, .n_ch = 8,
++	  .mask = FL | FR | FC },
++	{ .ca_id = 0x01, .n_ch = 8,
++	  .mask = FL | FR | LFE },
+ };
+ 
+ struct hdmi_codec_priv {
+@@ -371,7 +384,8 @@ static int hdmi_codec_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+ 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+ 	struct hdmi_codec_priv *hcp = info->private_data;
+ 
+-	map = info->chmap[hcp->chmap_idx].map;
++	if (hcp->chmap_idx != HDMI_CODEC_CHMAP_IDX_UNKNOWN)
++		map = info->chmap[hcp->chmap_idx].map;
+ 
+ 	for (i = 0; i < info->max_channels; i++) {
+ 		if (hcp->chmap_idx == HDMI_CODEC_CHMAP_IDX_UNKNOWN)
+diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
+index 4af81158035681..945f9c0a6a5455 100644
+--- a/sound/soc/intel/avs/pcm.c
++++ b/sound/soc/intel/avs/pcm.c
+@@ -509,7 +509,7 @@ static int avs_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
+ 			    SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_CHANNELS,
+ 			    SNDRV_PCM_HW_PARAM_RATE, -1);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int avs_dai_fe_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index bc581fea0e3a16..866589fece7a3d 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -870,6 +870,13 @@ static const struct platform_device_id board_ids[] = {
+ 					SOF_SSP_PORT_BT_OFFLOAD(2) |
+ 					SOF_BT_OFFLOAD_PRESENT),
+ 	},
++	{
++		.name = "mtl_rt5682_c1_h02",
++		.driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
++					SOF_SSP_PORT_CODEC(1) |
++					/* SSP 0 and SSP 2 are used for HDMI IN */
++					SOF_SSP_MASK_HDMI_CAPTURE(0x5)),
++	},
+ 	{
+ 		.name = "arl_rt5682_c1_h02",
+ 		.driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 4a0ab50d1e50dc..a58842a8c8a641 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -580,6 +580,47 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)(SOC_SDW_CODEC_SPKR),
+ 	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "3838")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "3832")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "380E")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "233C")
++		},
++		/* Note this quirk excludes the CODEC mic */
++		.driver_data = (void *)(SOC_SDW_CODEC_MIC),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "233B")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++	},
+ 
+ 	/* ArrowLake devices */
+ 	{
+diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+index 072b8486d0727c..24d850df77ca8e 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-arl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+@@ -44,6 +44,31 @@ static const struct snd_soc_acpi_endpoint spk_3_endpoint = {
+ 	.group_id = 1,
+ };
+ 
++/*
++ * RT722 is a multi-function codec, three endpoints are created for
++ * its headset, amp and dmic functions.
++ */
++static const struct snd_soc_acpi_endpoint rt722_endpoints[] = {
++	{
++		.num = 0,
++		.aggregated = 0,
++		.group_position = 0,
++		.group_id = 0,
++	},
++	{
++		.num = 1,
++		.aggregated = 0,
++		.group_position = 0,
++		.group_id = 0,
++	},
++	{
++		.num = 2,
++		.aggregated = 0,
++		.group_position = 0,
++		.group_id = 0,
++	},
++};
++
+ static const struct snd_soc_acpi_adr_device cs35l56_2_lr_adr[] = {
+ 	{
+ 		.adr = 0x00023001FA355601ull,
+@@ -185,6 +210,24 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
+ 	}
+ };
+ 
++static const struct snd_soc_acpi_adr_device rt722_0_single_adr[] = {
++	{
++		.adr = 0x000030025D072201ull,
++		.num_endpoints = ARRAY_SIZE(rt722_endpoints),
++		.endpoints = rt722_endpoints,
++		.name_prefix = "rt722"
++	}
++};
++
++static const struct snd_soc_acpi_adr_device rt1320_2_single_adr[] = {
++	{
++		.adr = 0x000230025D132001ull,
++		.num_endpoints = 1,
++		.endpoints = &single_endpoint,
++		.name_prefix = "rt1320-1"
++	}
++};
++
+ static const struct snd_soc_acpi_link_adr arl_cs42l43_l0[] = {
+ 	{
+ 		.mask = BIT(0),
+@@ -287,6 +330,20 @@ static const struct snd_soc_acpi_link_adr arl_sdca_rvp[] = {
+ 	{}
+ };
+ 
++static const struct snd_soc_acpi_link_adr arl_rt722_l0_rt1320_l2[] = {
++	{
++		.mask = BIT(0),
++		.num_adr = ARRAY_SIZE(rt722_0_single_adr),
++		.adr_d = rt722_0_single_adr,
++	},
++	{
++		.mask = BIT(2),
++		.num_adr = ARRAY_SIZE(rt1320_2_single_adr),
++		.adr_d = rt1320_2_single_adr,
++	},
++	{}
++};
++
+ static const struct snd_soc_acpi_codecs arl_essx_83x6 = {
+ 	.num_codecs = 3,
+ 	.codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
+@@ -385,6 +442,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = {
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-arl-rt711-l0.tplg",
+ 	},
++	{
++		.link_mask = BIT(0) | BIT(2),
++		.links = arl_rt722_l0_rt1320_l2,
++		.drv_name = "sof_sdw",
++		.sof_tplg_filename = "sof-arl-rt722-l0_rt1320-l2.tplg",
++	},
+ 	{},
+ };
+ EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_arl_sdw_machines);
+diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+index d4435a34a3a3f4..fd02c864e25ef9 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+@@ -42,6 +42,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
+ 					SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
+ 					SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
+ 	},
++	{
++		.comp_ids = &mtl_rt5682_rt5682s_hp,
++		.drv_name = "mtl_rt5682_c1_h02",
++		.machine_quirk = snd_soc_acpi_codec_list,
++		.quirk_data = &mtl_lt6911_hdmi,
++		.sof_tplg_filename = "sof-mtl-rt5682-ssp1-hdmi-ssp02.tplg",
++	},
+ 	/* place boards for each headphone codec: sof driver will complete the
+ 	 * tplg name and machine driver will detect the amp type
+ 	 */
+diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+index 4eed90d13a5326..62429e8e57b559 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
++++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+@@ -188,9 +188,7 @@ SND_SOC_DAILINK_DEFS(pcm1,
+ SND_SOC_DAILINK_DEFS(ul_src,
+ 		     DAILINK_COMP_ARRAY(COMP_CPU("UL_SRC")),
+ 		     DAILINK_COMP_ARRAY(COMP_CODEC("mt6359-sound",
+-						   "mt6359-snd-codec-aif1"),
+-					COMP_CODEC("dmic-codec",
+-						   "dmic-hifi")),
++						   "mt6359-snd-codec-aif1")),
+ 		     DAILINK_COMP_ARRAY(COMP_EMPTY()));
+ 
+ SND_SOC_DAILINK_DEFS(AFE_SOF_DL2,
+diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c
+index a6070f822eb9e4..e6ac5c0fd3bec8 100644
+--- a/sound/soc/sdw_utils/soc_sdw_utils.c
++++ b/sound/soc/sdw_utils/soc_sdw_utils.c
+@@ -363,6 +363,8 @@ struct asoc_sdw_codec_info codec_info_list[] = {
+ 				.num_controls = ARRAY_SIZE(generic_spk_controls),
+ 				.widgets = generic_spk_widgets,
+ 				.num_widgets = ARRAY_SIZE(generic_spk_widgets),
++				.quirk = SOC_SDW_CODEC_SPKR,
++				.quirk_exclude = true,
+ 			},
+ 			{
+ 				.direction = {false, true},
+@@ -487,6 +489,8 @@ struct asoc_sdw_codec_info codec_info_list[] = {
+ 				.rtd_init = asoc_sdw_cs42l43_dmic_rtd_init,
+ 				.widgets = generic_dmic_widgets,
+ 				.num_widgets = ARRAY_SIZE(generic_dmic_widgets),
++				.quirk = SOC_SDW_CODEC_MIC,
++				.quirk_exclude = true,
+ 			},
+ 			{
+ 				.direction = {false, true},
+@@ -1112,7 +1116,8 @@ int asoc_sdw_parse_sdw_endpoints(struct snd_soc_card *card,
+ 				dai_info = &codec_info->dais[adr_end->num];
+ 				soc_dai = asoc_sdw_find_dailink(soc_dais, adr_end);
+ 
+-				if (dai_info->quirk && !(dai_info->quirk & ctx->mc_quirk))
++				if (dai_info->quirk &&
++				    !(dai_info->quirk_exclude ^ !!(dai_info->quirk & ctx->mc_quirk)))
+ 					continue;
+ 
+ 				dev_dbg(dev,
+diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
+index be61e377e59e03..e98b53b67d12b9 100644
+--- a/sound/soc/sof/ipc3-topology.c
++++ b/sound/soc/sof/ipc3-topology.c
+@@ -20,6 +20,9 @@
+ /* size of tplg ABI in bytes */
+ #define SOF_IPC3_TPLG_ABI_SIZE 3
+ 
++/* Base of SOF_DAI_INTEL_ALH, this should be aligned with SOC_SDW_INTEL_BIDIR_PDI_BASE */
++#define INTEL_ALH_DAI_INDEX_BASE 2
++
+ struct sof_widget_data {
+ 	int ctrl_type;
+ 	int ipc_cmd;
+@@ -1585,14 +1588,26 @@ static int sof_ipc3_widget_setup_comp_dai(struct snd_sof_widget *swidget)
+ 	ret = sof_update_ipc_object(scomp, comp_dai, SOF_DAI_TOKENS, swidget->tuples,
+ 				    swidget->num_tuples, sizeof(*comp_dai), 1);
+ 	if (ret < 0)
+-		goto free;
++		goto free_comp;
+ 
+ 	/* update comp_tokens */
+ 	ret = sof_update_ipc_object(scomp, &comp_dai->config, SOF_COMP_TOKENS,
+ 				    swidget->tuples, swidget->num_tuples,
+ 				    sizeof(comp_dai->config), 1);
+ 	if (ret < 0)
+-		goto free;
++		goto free_comp;
++
++	/* Subtract the base to match the FW dai index. */
++	if (comp_dai->type == SOF_DAI_INTEL_ALH) {
++		if (comp_dai->dai_index < INTEL_ALH_DAI_INDEX_BASE) {
++			dev_err(sdev->dev,
++				"Invalid ALH dai index %d, only Pin numbers >= %d can be used\n",
++				comp_dai->dai_index, INTEL_ALH_DAI_INDEX_BASE);
++			ret = -EINVAL;
++			goto free_comp;
++		}
++		comp_dai->dai_index -= INTEL_ALH_DAI_INDEX_BASE;
++	}
+ 
+ 	dev_dbg(scomp->dev, "dai %s: type %d index %d\n",
+ 		swidget->widget->name, comp_dai->type, comp_dai->dai_index);
+@@ -2167,8 +2182,16 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *
+ 	case SOF_DAI_INTEL_ALH:
+ 		if (data) {
+ 			/* save the dai_index during hw_params and reuse it for hw_free */
+-			if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS)
+-				config->dai_index = data->dai_index;
++			if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
++				/* Subtract the base to match the FW dai index. */
++				if (data->dai_index < INTEL_ALH_DAI_INDEX_BASE) {
++					dev_err(sdev->dev,
++						"Invalid ALH dai index %d, only Pin numbers >= %d can be used\n",
++						config->dai_index, INTEL_ALH_DAI_INDEX_BASE);
++					return -EINVAL;
++				}
++				config->dai_index = data->dai_index - INTEL_ALH_DAI_INDEX_BASE;
++			}
+ 			config->alh.stream_id = data->dai_data;
+ 		}
+ 		break;
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 568099467dbbcc..a29f28eb7d0c64 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -403,10 +403,15 @@ static int prepare_inbound_urb(struct snd_usb_endpoint *ep,
+ static void notify_xrun(struct snd_usb_endpoint *ep)
+ {
+ 	struct snd_usb_substream *data_subs;
++	struct snd_pcm_substream *psubs;
+ 
+ 	data_subs = READ_ONCE(ep->data_subs);
+-	if (data_subs && data_subs->pcm_substream)
+-		snd_pcm_stop_xrun(data_subs->pcm_substream);
++	if (!data_subs)
++		return;
++	psubs = data_subs->pcm_substream;
++	if (psubs && psubs->runtime &&
++	    psubs->runtime->state == SNDRV_PCM_STATE_RUNNING)
++		snd_pcm_stop_xrun(psubs);
+ }
+ 
+ static struct snd_usb_packet_info *
+@@ -562,7 +567,10 @@ static void snd_complete_urb(struct urb *urb)
+ 			push_back_to_ready_list(ep, ctx);
+ 			clear_bit(ctx->index, &ep->active_mask);
+ 			snd_usb_queue_pending_output_urbs(ep, false);
+-			atomic_dec(&ep->submitted_urbs); /* decrement at last */
++			/* decrement at last, and check xrun */
++			if (atomic_dec_and_test(&ep->submitted_urbs) &&
++			    !snd_usb_endpoint_implicit_feedback_sink(ep))
++				notify_xrun(ep);
+ 			return;
+ 		}
+ 
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index bd67027c767751..0591da2839269b 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1084,6 +1084,21 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 				  struct snd_kcontrol *kctl)
+ {
+ 	struct snd_usb_audio *chip = cval->head.mixer->chip;
++
++	if (chip->quirk_flags & QUIRK_FLAG_MIC_RES_384) {
++		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
++			usb_audio_info(chip,
++				"set resolution quirk: cval->res = 384\n");
++			cval->res = 384;
++		}
++	} else if (chip->quirk_flags & QUIRK_FLAG_MIC_RES_16) {
++		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
++			usb_audio_info(chip,
++				"set resolution quirk: cval->res = 16\n");
++			cval->res = 16;
++		}
++	}
++
+ 	switch (chip->usb_id) {
+ 	case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+ 	case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
+@@ -1168,27 +1183,6 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 		}
+ 		break;
+ 
+-	case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
+-	case USB_ID(0x046d, 0x0808):
+-	case USB_ID(0x046d, 0x0809):
+-	case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
+-	case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
+-	case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+-	case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+-	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
+-	case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
+-	case USB_ID(0x046d, 0x0991):
+-	case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
+-	/* Most audio usb devices lie about volume resolution.
+-	 * Most Logitech webcams have res = 384.
+-	 * Probably there is some logitech magic behind this number --fishor
+-	 */
+-		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+-			usb_audio_info(chip,
+-				"set resolution quirk: cval->res = 384\n");
+-			cval->res = 384;
+-		}
+-		break;
+ 	case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */
+ 		if ((strstr(kctl->id.name, "Playback Volume") != NULL) ||
+ 			strstr(kctl->id.name, "Capture Volume") != NULL) {
+@@ -1197,28 +1191,6 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 			cval->res = 1;
+ 		}
+ 		break;
+-	case USB_ID(0x1224, 0x2a25): /* Jieli Technology USB PHY 2.0 */
+-		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+-			usb_audio_info(chip,
+-				"set resolution quirk: cval->res = 16\n");
+-			cval->res = 16;
+-		}
+-		break;
+-	case USB_ID(0x1bcf, 0x2283): /* NexiGo N930AF FHD Webcam */
+-	case USB_ID(0x03f0, 0x654a): /* HP 320 FHD Webcam */
+-		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+-			usb_audio_info(chip,
+-				"set resolution quirk: cval->res = 16\n");
+-			cval->res = 16;
+-		}
+-		break;
+-	case USB_ID(0x1bcf, 0x2281): /* HD Webcam */
+-		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+-			usb_audio_info(chip,
+-				"set resolution quirk: cval->res = 16\n");
+-			cval->res = 16;
+-		}
+-		break;
+ 	}
+ }
+ 
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 23260aa1919d32..0e9b5431a47f20 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -621,6 +621,16 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x1b1c, 0x0a42),
+ 		.map = corsair_virtuoso_map,
+ 	},
++	{
++		/* Corsair HS80 RGB Wireless (wired mode) */
++		.id = USB_ID(0x1b1c, 0x0a6a),
++		.map = corsair_virtuoso_map,
++	},
++	{
++		/* Corsair HS80 RGB Wireless (wireless mode) */
++		.id = USB_ID(0x1b1c, 0x0a6b),
++		.map = corsair_virtuoso_map,
++	},
+ 	{	/* Gigabyte TRX40 Aorus Master (rear panel + front mic) */
+ 		.id = USB_ID(0x0414, 0xa001),
+ 		.map = aorus_master_alc1220vb_map,
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 6456e87e2f3974..a95ebcf4e46e76 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -4059,6 +4059,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ 		err = snd_bbfpro_controls_create(mixer);
+ 		break;
+ 	case USB_ID(0x2a39, 0x3f8c): /* RME Digiface USB */
++	case USB_ID(0x2a39, 0x3fa0): /* RME Digiface USB (alternate) */
+ 		err = snd_rme_digiface_controls_create(mixer);
+ 		break;
+ 	case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 199d0603cf8e59..3f8beacca27a17 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3616,176 +3616,181 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 		}
+ 	}
+ },
+-{
+-	/* Only claim interface 0 */
+-	.match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+-		       USB_DEVICE_ID_MATCH_PRODUCT |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_NUMBER,
+-	.idVendor = 0x2a39,
+-	.idProduct = 0x3f8c,
+-	.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+-	.bInterfaceNumber = 0,
+-	QUIRK_DRIVER_INFO {
+-		QUIRK_DATA_COMPOSITE {
++#define QUIRK_RME_DIGIFACE(pid) \
++{ \
++	/* Only claim interface 0 */ \
++	.match_flags = USB_DEVICE_ID_MATCH_VENDOR | \
++		       USB_DEVICE_ID_MATCH_PRODUCT | \
++		       USB_DEVICE_ID_MATCH_INT_CLASS | \
++		       USB_DEVICE_ID_MATCH_INT_NUMBER, \
++	.idVendor = 0x2a39, \
++	.idProduct = pid, \
++	.bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
++	.bInterfaceNumber = 0, \
++	QUIRK_DRIVER_INFO { \
++		QUIRK_DATA_COMPOSITE { \
+ 			/*
+ 			 * Three modes depending on sample rate band,
+ 			 * with different channel counts for in/out
+-			 */
+-			{ QUIRK_DATA_STANDARD_MIXER(0) },
+-			{
+-				QUIRK_DATA_AUDIOFORMAT(0) {
+-					.formats = SNDRV_PCM_FMTBIT_S32_LE,
+-					.channels = 34, // outputs
+-					.fmt_bits = 24,
+-					.iface = 0,
+-					.altsetting = 1,
+-					.altset_idx = 1,
+-					.endpoint = 0x02,
+-					.ep_idx = 1,
+-					.ep_attr = USB_ENDPOINT_XFER_ISOC |
+-						USB_ENDPOINT_SYNC_ASYNC,
+-					.rates = SNDRV_PCM_RATE_32000 |
+-						SNDRV_PCM_RATE_44100 |
+-						SNDRV_PCM_RATE_48000,
+-					.rate_min = 32000,
+-					.rate_max = 48000,
+-					.nr_rates = 3,
+-					.rate_table = (unsigned int[]) {
+-						32000, 44100, 48000,
+-					},
+-					.sync_ep = 0x81,
+-					.sync_iface = 0,
+-					.sync_altsetting = 1,
+-					.sync_ep_idx = 0,
+-					.implicit_fb = 1,
+-				},
+-			},
+-			{
+-				QUIRK_DATA_AUDIOFORMAT(0) {
+-					.formats = SNDRV_PCM_FMTBIT_S32_LE,
+-					.channels = 18, // outputs
+-					.fmt_bits = 24,
+-					.iface = 0,
+-					.altsetting = 1,
+-					.altset_idx = 1,
+-					.endpoint = 0x02,
+-					.ep_idx = 1,
+-					.ep_attr = USB_ENDPOINT_XFER_ISOC |
+-						USB_ENDPOINT_SYNC_ASYNC,
+-					.rates = SNDRV_PCM_RATE_64000 |
+-						SNDRV_PCM_RATE_88200 |
+-						SNDRV_PCM_RATE_96000,
+-					.rate_min = 64000,
+-					.rate_max = 96000,
+-					.nr_rates = 3,
+-					.rate_table = (unsigned int[]) {
+-						64000, 88200, 96000,
+-					},
+-					.sync_ep = 0x81,
+-					.sync_iface = 0,
+-					.sync_altsetting = 1,
+-					.sync_ep_idx = 0,
+-					.implicit_fb = 1,
+-				},
+-			},
+-			{
+-				QUIRK_DATA_AUDIOFORMAT(0) {
+-					.formats = SNDRV_PCM_FMTBIT_S32_LE,
+-					.channels = 10, // outputs
+-					.fmt_bits = 24,
+-					.iface = 0,
+-					.altsetting = 1,
+-					.altset_idx = 1,
+-					.endpoint = 0x02,
+-					.ep_idx = 1,
+-					.ep_attr = USB_ENDPOINT_XFER_ISOC |
+-						USB_ENDPOINT_SYNC_ASYNC,
+-					.rates = SNDRV_PCM_RATE_KNOT |
+-						SNDRV_PCM_RATE_176400 |
+-						SNDRV_PCM_RATE_192000,
+-					.rate_min = 128000,
+-					.rate_max = 192000,
+-					.nr_rates = 3,
+-					.rate_table = (unsigned int[]) {
+-						128000, 176400, 192000,
+-					},
+-					.sync_ep = 0x81,
+-					.sync_iface = 0,
+-					.sync_altsetting = 1,
+-					.sync_ep_idx = 0,
+-					.implicit_fb = 1,
+-				},
+-			},
+-			{
+-				QUIRK_DATA_AUDIOFORMAT(0) {
+-					.formats = SNDRV_PCM_FMTBIT_S32_LE,
+-					.channels = 32, // inputs
+-					.fmt_bits = 24,
+-					.iface = 0,
+-					.altsetting = 1,
+-					.altset_idx = 1,
+-					.endpoint = 0x81,
+-					.ep_attr = USB_ENDPOINT_XFER_ISOC |
+-						USB_ENDPOINT_SYNC_ASYNC,
+-					.rates = SNDRV_PCM_RATE_32000 |
+-						SNDRV_PCM_RATE_44100 |
+-						SNDRV_PCM_RATE_48000,
+-					.rate_min = 32000,
+-					.rate_max = 48000,
+-					.nr_rates = 3,
+-					.rate_table = (unsigned int[]) {
+-						32000, 44100, 48000,
+-					}
+-				}
+-			},
+-			{
+-				QUIRK_DATA_AUDIOFORMAT(0) {
+-					.formats = SNDRV_PCM_FMTBIT_S32_LE,
+-					.channels = 16, // inputs
+-					.fmt_bits = 24,
+-					.iface = 0,
+-					.altsetting = 1,
+-					.altset_idx = 1,
+-					.endpoint = 0x81,
+-					.ep_attr = USB_ENDPOINT_XFER_ISOC |
+-						USB_ENDPOINT_SYNC_ASYNC,
+-					.rates = SNDRV_PCM_RATE_64000 |
+-						SNDRV_PCM_RATE_88200 |
+-						SNDRV_PCM_RATE_96000,
+-					.rate_min = 64000,
+-					.rate_max = 96000,
+-					.nr_rates = 3,
+-					.rate_table = (unsigned int[]) {
+-						64000, 88200, 96000,
+-					}
+-				}
+-			},
+-			{
+-				QUIRK_DATA_AUDIOFORMAT(0) {
+-					.formats = SNDRV_PCM_FMTBIT_S32_LE,
+-					.channels = 8, // inputs
+-					.fmt_bits = 24,
+-					.iface = 0,
+-					.altsetting = 1,
+-					.altset_idx = 1,
+-					.endpoint = 0x81,
+-					.ep_attr = USB_ENDPOINT_XFER_ISOC |
+-						USB_ENDPOINT_SYNC_ASYNC,
+-					.rates = SNDRV_PCM_RATE_KNOT |
+-						SNDRV_PCM_RATE_176400 |
+-						SNDRV_PCM_RATE_192000,
+-					.rate_min = 128000,
+-					.rate_max = 192000,
+-					.nr_rates = 3,
+-					.rate_table = (unsigned int[]) {
+-						128000, 176400, 192000,
+-					}
+-				}
+-			},
+-			QUIRK_COMPOSITE_END
+-		}
+-	}
+-},
++			 */ \
++			{ QUIRK_DATA_STANDARD_MIXER(0) }, \
++			{ \
++				QUIRK_DATA_AUDIOFORMAT(0) { \
++					.formats = SNDRV_PCM_FMTBIT_S32_LE, \
++					.channels = 34, /* outputs */ \
++					.fmt_bits = 24, \
++					.iface = 0, \
++					.altsetting = 1, \
++					.altset_idx = 1, \
++					.endpoint = 0x02, \
++					.ep_idx = 1, \
++					.ep_attr = USB_ENDPOINT_XFER_ISOC | \
++						USB_ENDPOINT_SYNC_ASYNC, \
++					.rates = SNDRV_PCM_RATE_32000 | \
++						SNDRV_PCM_RATE_44100 | \
++						SNDRV_PCM_RATE_48000, \
++					.rate_min = 32000, \
++					.rate_max = 48000, \
++					.nr_rates = 3, \
++					.rate_table = (unsigned int[]) { \
++						32000, 44100, 48000, \
++					}, \
++					.sync_ep = 0x81, \
++					.sync_iface = 0, \
++					.sync_altsetting = 1, \
++					.sync_ep_idx = 0, \
++					.implicit_fb = 1, \
++				}, \
++			}, \
++			{ \
++				QUIRK_DATA_AUDIOFORMAT(0) { \
++					.formats = SNDRV_PCM_FMTBIT_S32_LE, \
++					.channels = 18, /* outputs */ \
++					.fmt_bits = 24, \
++					.iface = 0, \
++					.altsetting = 1, \
++					.altset_idx = 1, \
++					.endpoint = 0x02, \
++					.ep_idx = 1, \
++					.ep_attr = USB_ENDPOINT_XFER_ISOC | \
++						USB_ENDPOINT_SYNC_ASYNC, \
++					.rates = SNDRV_PCM_RATE_64000 | \
++						SNDRV_PCM_RATE_88200 | \
++						SNDRV_PCM_RATE_96000, \
++					.rate_min = 64000, \
++					.rate_max = 96000, \
++					.nr_rates = 3, \
++					.rate_table = (unsigned int[]) { \
++						64000, 88200, 96000, \
++					}, \
++					.sync_ep = 0x81, \
++					.sync_iface = 0, \
++					.sync_altsetting = 1, \
++					.sync_ep_idx = 0, \
++					.implicit_fb = 1, \
++				}, \
++			}, \
++			{ \
++				QUIRK_DATA_AUDIOFORMAT(0) { \
++					.formats = SNDRV_PCM_FMTBIT_S32_LE, \
++					.channels = 10, /* outputs */ \
++					.fmt_bits = 24, \
++					.iface = 0, \
++					.altsetting = 1, \
++					.altset_idx = 1, \
++					.endpoint = 0x02, \
++					.ep_idx = 1, \
++					.ep_attr = USB_ENDPOINT_XFER_ISOC | \
++						USB_ENDPOINT_SYNC_ASYNC, \
++					.rates = SNDRV_PCM_RATE_KNOT | \
++						SNDRV_PCM_RATE_176400 | \
++						SNDRV_PCM_RATE_192000, \
++					.rate_min = 128000, \
++					.rate_max = 192000, \
++					.nr_rates = 3, \
++					.rate_table = (unsigned int[]) { \
++						128000, 176400, 192000, \
++					}, \
++					.sync_ep = 0x81, \
++					.sync_iface = 0, \
++					.sync_altsetting = 1, \
++					.sync_ep_idx = 0, \
++					.implicit_fb = 1, \
++				}, \
++			}, \
++			{ \
++				QUIRK_DATA_AUDIOFORMAT(0) { \
++					.formats = SNDRV_PCM_FMTBIT_S32_LE, \
++					.channels = 32, /* inputs */ \
++					.fmt_bits = 24, \
++					.iface = 0, \
++					.altsetting = 1, \
++					.altset_idx = 1, \
++					.endpoint = 0x81, \
++					.ep_attr = USB_ENDPOINT_XFER_ISOC | \
++						USB_ENDPOINT_SYNC_ASYNC, \
++					.rates = SNDRV_PCM_RATE_32000 | \
++						SNDRV_PCM_RATE_44100 | \
++						SNDRV_PCM_RATE_48000, \
++					.rate_min = 32000, \
++					.rate_max = 48000, \
++					.nr_rates = 3, \
++					.rate_table = (unsigned int[]) { \
++						32000, 44100, 48000, \
++					} \
++				} \
++			}, \
++			{ \
++				QUIRK_DATA_AUDIOFORMAT(0) { \
++					.formats = SNDRV_PCM_FMTBIT_S32_LE, \
++					.channels = 16, /* inputs */ \
++					.fmt_bits = 24, \
++					.iface = 0, \
++					.altsetting = 1, \
++					.altset_idx = 1, \
++					.endpoint = 0x81, \
++					.ep_attr = USB_ENDPOINT_XFER_ISOC | \
++						USB_ENDPOINT_SYNC_ASYNC, \
++					.rates = SNDRV_PCM_RATE_64000 | \
++						SNDRV_PCM_RATE_88200 | \
++						SNDRV_PCM_RATE_96000, \
++					.rate_min = 64000, \
++					.rate_max = 96000, \
++					.nr_rates = 3, \
++					.rate_table = (unsigned int[]) { \
++						64000, 88200, 96000, \
++					} \
++				} \
++			}, \
++			{ \
++				QUIRK_DATA_AUDIOFORMAT(0) { \
++					.formats = SNDRV_PCM_FMTBIT_S32_LE, \
++					.channels = 8, /* inputs */ \
++					.fmt_bits = 24, \
++					.iface = 0, \
++					.altsetting = 1, \
++					.altset_idx = 1, \
++					.endpoint = 0x81, \
++					.ep_attr = USB_ENDPOINT_XFER_ISOC | \
++						USB_ENDPOINT_SYNC_ASYNC, \
++					.rates = SNDRV_PCM_RATE_KNOT | \
++						SNDRV_PCM_RATE_176400 | \
++						SNDRV_PCM_RATE_192000, \
++					.rate_min = 128000, \
++					.rate_max = 192000, \
++					.nr_rates = 3, \
++					.rate_table = (unsigned int[]) { \
++						128000, 176400, 192000, \
++					} \
++				} \
++			}, \
++			QUIRK_COMPOSITE_END \
++		} \
++	} \
++}
++
++QUIRK_RME_DIGIFACE(0x3f8c),
++QUIRK_RME_DIGIFACE(0x3fa0),
++
+ #undef USB_DEVICE_VENDOR_SPEC
+ #undef USB_AUDIO_DEVICE
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 8538fdfce3535b..00101875d9a8d5 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -555,7 +555,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
+ static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf)
+ {
+ 	struct usb_host_config *config = dev->actconfig;
+-	struct usb_device_descriptor new_device_descriptor;
++	struct usb_device_descriptor *new_device_descriptor __free(kfree) = NULL;
+ 	int err;
+ 
+ 	if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD ||
+@@ -566,15 +566,19 @@ static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interfac
+ 				      0x10, 0x43, 0x0001, 0x000a, NULL, 0);
+ 		if (err < 0)
+ 			dev_dbg(&dev->dev, "error sending boot message: %d\n", err);
++
++		new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL);
++		if (!new_device_descriptor)
++			return -ENOMEM;
+ 		err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+-				&new_device_descriptor, sizeof(new_device_descriptor));
++				new_device_descriptor, sizeof(*new_device_descriptor));
+ 		if (err < 0)
+ 			dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err);
+-		if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
++		if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations)
+ 			dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n",
+-				new_device_descriptor.bNumConfigurations);
++				new_device_descriptor->bNumConfigurations);
+ 		else
+-			memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
++			memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor));
+ 		err = usb_reset_configuration(dev);
+ 		if (err < 0)
+ 			dev_dbg(&dev->dev, "error usb_reset_configuration: %d\n", err);
+@@ -906,7 +910,7 @@ static void mbox2_setup_48_24_magic(struct usb_device *dev)
+ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
+ {
+ 	struct usb_host_config *config = dev->actconfig;
+-	struct usb_device_descriptor new_device_descriptor;
++	struct usb_device_descriptor *new_device_descriptor __free(kfree) = NULL;
+ 	int err;
+ 	u8 bootresponse[0x12];
+ 	int fwsize;
+@@ -941,15 +945,19 @@ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
+ 
+ 	dev_dbg(&dev->dev, "device initialised!\n");
+ 
++	new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL);
++	if (!new_device_descriptor)
++		return -ENOMEM;
++
+ 	err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+-		&new_device_descriptor, sizeof(new_device_descriptor));
++		new_device_descriptor, sizeof(*new_device_descriptor));
+ 	if (err < 0)
+ 		dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err);
+-	if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
++	if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations)
+ 		dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n",
+-			new_device_descriptor.bNumConfigurations);
++			new_device_descriptor->bNumConfigurations);
+ 	else
+-		memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
++		memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor));
+ 
+ 	err = usb_reset_configuration(dev);
+ 	if (err < 0)
+@@ -1259,7 +1267,7 @@ static void mbox3_setup_defaults(struct usb_device *dev)
+ static int snd_usb_mbox3_boot_quirk(struct usb_device *dev)
+ {
+ 	struct usb_host_config *config = dev->actconfig;
+-	struct usb_device_descriptor new_device_descriptor;
++	struct usb_device_descriptor *new_device_descriptor __free(kfree) = NULL;
+ 	int err;
+ 	int descriptor_size;
+ 
+@@ -1272,15 +1280,19 @@ static int snd_usb_mbox3_boot_quirk(struct usb_device *dev)
+ 
+ 	dev_dbg(&dev->dev, "MBOX3: device initialised!\n");
+ 
++	new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL);
++	if (!new_device_descriptor)
++		return -ENOMEM;
++
+ 	err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+-		&new_device_descriptor, sizeof(new_device_descriptor));
++		new_device_descriptor, sizeof(*new_device_descriptor));
+ 	if (err < 0)
+ 		dev_dbg(&dev->dev, "MBOX3: error usb_get_descriptor: %d\n", err);
+-	if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
++	if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations)
+ 		dev_dbg(&dev->dev, "MBOX3: error too large bNumConfigurations: %d\n",
+-			new_device_descriptor.bNumConfigurations);
++			new_device_descriptor->bNumConfigurations);
+ 	else
+-		memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
++		memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor));
+ 
+ 	err = usb_reset_configuration(dev);
+ 	if (err < 0)
+@@ -1653,6 +1665,7 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
+ 			return snd_usb_motu_microbookii_boot_quirk(dev);
+ 		break;
+ 	case USB_ID(0x2a39, 0x3f8c): /* RME Digiface USB */
++	case USB_ID(0x2a39, 0x3fa0): /* RME Digiface USB (alternate) */
+ 		return snd_usb_rme_digiface_boot_quirk(dev);
+ 	}
+ 
+@@ -1866,6 +1879,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ 		mbox3_set_format_quirk(subs, fmt); /* Digidesign Mbox 3 */
+ 		break;
+ 	case USB_ID(0x2a39, 0x3f8c): /* RME Digiface USB */
++	case USB_ID(0x2a39, 0x3fa0): /* RME Digiface USB (alternate) */
+ 		rme_digiface_set_format_quirk(subs);
+ 		break;
+ 	}
+@@ -2130,7 +2144,7 @@ struct usb_audio_quirk_flags_table {
+ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 	/* Device matches */
+ 	DEVICE_FLG(0x03f0, 0x654a, /* HP 320 FHD Webcam */
+-		   QUIRK_FLAG_GET_SAMPLE_RATE),
++		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16),
+ 	DEVICE_FLG(0x041e, 0x3000, /* Creative SB Extigy */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x041e, 0x4080, /* Creative Live Cam VF0610 */
+@@ -2138,10 +2152,31 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 	DEVICE_FLG(0x045e, 0x083c, /* MS USB Link headset */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY |
+ 		   QUIRK_FLAG_DISABLE_AUTOSUSPEND),
++	DEVICE_FLG(0x046d, 0x0807, /* Logitech Webcam C500 */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
++	DEVICE_FLG(0x046d, 0x0808, /* Logitech Webcam C600 */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
++	DEVICE_FLG(0x046d, 0x0809,
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
++	DEVICE_FLG(0x046d, 0x0819, /* Logitech Webcam C210 */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
++	DEVICE_FLG(0x046d, 0x081b, /* HD Webcam c310 */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
++	DEVICE_FLG(0x046d, 0x081d, /* HD Webcam c510 */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
++	DEVICE_FLG(0x046d, 0x0825, /* HD Webcam c270 */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
++	DEVICE_FLG(0x046d, 0x0826, /* HD Webcam c525 */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
+ 	DEVICE_FLG(0x046d, 0x084c, /* Logitech ConferenceCam Connect */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY_1M),
++	DEVICE_FLG(0x046d, 0x08ca, /* Logitech Quickcam Fusion */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
+ 	DEVICE_FLG(0x046d, 0x0991, /* Logitech QuickCam Pro */
+-		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR |
++		   QUIRK_FLAG_MIC_RES_384),
++	DEVICE_FLG(0x046d, 0x09a2, /* QuickCam Communicate Deluxe/S7500 */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384),
+ 	DEVICE_FLG(0x046d, 0x09a4, /* Logitech QuickCam E 3500 */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */
+@@ -2209,7 +2244,7 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 	DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */
+ 		   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ 	DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+-		   QUIRK_FLAG_GET_SAMPLE_RATE),
++		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16),
+ 	DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */
+@@ -2247,9 +2282,9 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 	DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x1bcf, 0x2281, /* HD Webcam */
+-		   QUIRK_FLAG_GET_SAMPLE_RATE),
++		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16),
+ 	DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
+-		   QUIRK_FLAG_GET_SAMPLE_RATE),
++		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16),
+ 	DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */
+ 		   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ 	DEVICE_FLG(0x2040, 0x7201, /* Hauppauge HVR-950Q-MXL */
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index b0f042c996087e..158ec053dc44dd 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -194,6 +194,8 @@ extern bool snd_usb_skip_validation;
+  * QUIRK_FLAG_FIXED_RATE
+  *  Do not set PCM rate (frequency) when only one rate is available
+  *  for the given endpoint.
++ * QUIRK_FLAG_MIC_RES_16 and QUIRK_FLAG_MIC_RES_384
++ *  Set the fixed resolution for Mic Capture Volume (mostly for webcams)
+  */
+ 
+ #define QUIRK_FLAG_GET_SAMPLE_RATE	(1U << 0)
+@@ -218,5 +220,7 @@ extern bool snd_usb_skip_validation;
+ #define QUIRK_FLAG_IFACE_SKIP_CLOSE	(1U << 19)
+ #define QUIRK_FLAG_FORCE_IFACE_RESET	(1U << 20)
+ #define QUIRK_FLAG_FIXED_RATE		(1U << 21)
++#define QUIRK_FLAG_MIC_RES_16		(1U << 22)
++#define QUIRK_FLAG_MIC_RES_384		(1U << 23)
+ 
+ #endif /* __USBAUDIO_H */
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 2ff949ea82fa66..e71be67f1d8658 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -822,11 +822,18 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
+ 					printf("%s:\n", sym_name);
+ 				}
+ 
+-				if (disasm_print_insn(img, lens[i], opcodes,
+-						      name, disasm_opt, btf,
+-						      prog_linfo, ksyms[i], i,
+-						      linum))
+-					goto exit_free;
++				if (ksyms) {
++					if (disasm_print_insn(img, lens[i], opcodes,
++							      name, disasm_opt, btf,
++							      prog_linfo, ksyms[i], i,
++							      linum))
++						goto exit_free;
++				} else {
++					if (disasm_print_insn(img, lens[i], opcodes,
++							      name, disasm_opt, btf,
++							      NULL, 0, 0, false))
++						goto exit_free;
++				}
+ 
+ 				img += lens[i];
+ 
+diff --git a/tools/scripts/Makefile.arch b/tools/scripts/Makefile.arch
+index f6a50f06dfc453..eabfe9f411d914 100644
+--- a/tools/scripts/Makefile.arch
++++ b/tools/scripts/Makefile.arch
+@@ -7,8 +7,8 @@ HOSTARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
+                                   -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
+                                   -e s/riscv.*/riscv/ -e s/loongarch.*/loongarch/)
+ 
+-ifndef ARCH
+-ARCH := $(HOSTARCH)
++ifeq ($(strip $(ARCH)),)
++override ARCH := $(HOSTARCH)
+ endif
+ 
+ SRCARCH := $(ARCH)
+diff --git a/tools/testing/selftests/arm64/fp/fp-stress.c b/tools/testing/selftests/arm64/fp/fp-stress.c
+index faac24bdefeb94..80f22789504d66 100644
+--- a/tools/testing/selftests/arm64/fp/fp-stress.c
++++ b/tools/testing/selftests/arm64/fp/fp-stress.c
+@@ -79,7 +79,7 @@ static void child_start(struct child_data *child, const char *program)
+ 		 */
+ 		ret = dup2(pipefd[1], 1);
+ 		if (ret == -1) {
+-			fprintf(stderr, "dup2() %d\n", errno);
++			printf("dup2() %d\n", errno);
+ 			exit(EXIT_FAILURE);
+ 		}
+ 
+@@ -89,7 +89,7 @@ static void child_start(struct child_data *child, const char *program)
+ 		 */
+ 		ret = dup2(startup_pipe[0], 3);
+ 		if (ret == -1) {
+-			fprintf(stderr, "dup2() %d\n", errno);
++			printf("dup2() %d\n", errno);
+ 			exit(EXIT_FAILURE);
+ 		}
+ 
+@@ -107,16 +107,15 @@ static void child_start(struct child_data *child, const char *program)
+ 		 */
+ 		ret = read(3, &i, sizeof(i));
+ 		if (ret < 0)
+-			fprintf(stderr, "read(startp pipe) failed: %s (%d)\n",
+-				strerror(errno), errno);
++			printf("read(startp pipe) failed: %s (%d)\n",
++			       strerror(errno), errno);
+ 		if (ret > 0)
+-			fprintf(stderr, "%d bytes of data on startup pipe\n",
+-				ret);
++			printf("%d bytes of data on startup pipe\n", ret);
+ 		close(3);
+ 
+ 		ret = execl(program, program, NULL);
+-		fprintf(stderr, "execl(%s) failed: %d (%s)\n",
+-			program, errno, strerror(errno));
++		printf("execl(%s) failed: %d (%s)\n",
++		       program, errno, strerror(errno));
+ 
+ 		exit(EXIT_FAILURE);
+ 	} else {
+diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
+index b743daa772f55f..5a07b3958fbf29 100644
+--- a/tools/testing/selftests/arm64/pauth/pac.c
++++ b/tools/testing/selftests/arm64/pauth/pac.c
+@@ -182,6 +182,9 @@ int exec_sign_all(struct signatures *signed_vals, size_t val)
+ 		return -1;
+ 	}
+ 
++	close(new_stdin[1]);
++	close(new_stdout[0]);
++
+ 	return 0;
+ }
+ 
+diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
+index 7c881bca9af5c7..a7a6ae6c162fe0 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
++++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
+@@ -35,9 +35,9 @@ __description("uninitialized iter in ->next()")
+ __failure __msg("expected an initialized iter_bits as arg #1")
+ int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+ {
+-	struct bpf_iter_bits *it = NULL;
++	struct bpf_iter_bits it = {};
+ 
+-	bpf_iter_bits_next(it);
++	bpf_iter_bits_next(&it);
+ 	return 0;
+ }
+ 
+diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
+index 5b2a6a5dd1af7f..812f656260fba9 100644
+--- a/tools/testing/selftests/damon/Makefile
++++ b/tools/testing/selftests/damon/Makefile
+@@ -6,7 +6,7 @@ TEST_GEN_FILES += debugfs_target_ids_read_before_terminate_race
+ TEST_GEN_FILES += debugfs_target_ids_pid_leak
+ TEST_GEN_FILES += access_memory access_memory_even
+ 
+-TEST_FILES = _chk_dependency.sh _debugfs_common.sh
++TEST_FILES = _chk_dependency.sh _debugfs_common.sh _damon_sysfs.py
+ 
+ # functionality tests
+ TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+index a16c6a6f6055cf..8f1c58f0c2397f 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+@@ -111,7 +111,7 @@ check_error 'p vfs_read $arg* ^$arg*'		# DOUBLE_ARGS
+ if !grep -q 'kernel return probes support:' README; then
+ check_error 'r vfs_read ^$arg*'			# NOFENTRY_ARGS
+ fi
+-check_error 'p vfs_read+8 ^$arg*'		# NOFENTRY_ARGS
++check_error 'p vfs_read+20 ^$arg*'		# NOFENTRY_ARGS
+ check_error 'p vfs_read ^hoge'			# NO_BTFARG
+ check_error 'p kfree ^$arg10'			# NO_BTFARG (exceed the number of parameters)
+ check_error 'r kfree ^$retval'			# NO_RETVAL
+diff --git a/tools/testing/selftests/hid/run-hid-tools-tests.sh b/tools/testing/selftests/hid/run-hid-tools-tests.sh
+index bdae8464da8656..af1682a53c27e1 100755
+--- a/tools/testing/selftests/hid/run-hid-tools-tests.sh
++++ b/tools/testing/selftests/hid/run-hid-tools-tests.sh
+@@ -2,24 +2,26 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Runs tests for the HID subsystem
+ 
++KSELFTEST_SKIP_TEST=4
++
+ if ! command -v python3 > /dev/null 2>&1; then
+ 	echo "hid-tools: [SKIP] python3 not installed"
+-	exit 77
++	exit $KSELFTEST_SKIP_TEST
+ fi
+ 
+ if ! python3 -c "import pytest" > /dev/null 2>&1; then
+-	echo "hid: [SKIP/ pytest module not installed"
+-	exit 77
++	echo "hid: [SKIP] pytest module not installed"
++	exit $KSELFTEST_SKIP_TEST
+ fi
+ 
+ if ! python3 -c "import pytest_tap" > /dev/null 2>&1; then
+-	echo "hid: [SKIP/ pytest_tap module not installed"
+-	exit 77
++	echo "hid: [SKIP] pytest_tap module not installed"
++	exit $KSELFTEST_SKIP_TEST
+ fi
+ 
+ if ! python3 -c "import hidtools" > /dev/null 2>&1; then
+-	echo "hid: [SKIP/ hid-tools module not installed"
+-	exit 77
++	echo "hid: [SKIP] hid-tools module not installed"
++	exit $KSELFTEST_SKIP_TEST
+ fi
+ 
+ TARGET=${TARGET:=.}
+diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c
+index 432d5af15e66b7..db63abe5ee5e85 100644
+--- a/tools/testing/selftests/mm/hugetlb_dio.c
++++ b/tools/testing/selftests/mm/hugetlb_dio.c
+@@ -76,19 +76,15 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
+ 	/* Get the free huge pages after unmap*/
+ 	free_hpage_a = get_free_hugepages();
+ 
++	ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b);
++	ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a);
++
+ 	/*
+ 	 * If the no. of free hugepages before allocation and after unmap does
+ 	 * not match - that means there could still be a page which is pinned.
+ 	 */
+-	if (free_hpage_a != free_hpage_b) {
+-		ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b);
+-		ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a);
+-		ksft_test_result_fail(": Huge pages not freed!\n");
+-	} else {
+-		ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b);
+-		ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a);
+-		ksft_test_result_pass(": Huge pages freed successfully !\n");
+-	}
++	ksft_test_result(free_hpage_a == free_hpage_b,
++			 "free huge pages from %u-%u\n", start_off, end_off);
+ }
+ 
+ int main(void)
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index f118f659e89600..e92e4f463f37bb 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -159,7 +159,7 @@ static int read_from_imc_dir(char *imc_dir, int count)
+ 
+ 		return -1;
+ 	}
+-	if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
++	if (fscanf(fp, "%1023s", cas_count_cfg) <= 0) {
+ 		ksft_perror("Could not get iMC cas count read");
+ 		fclose(fp);
+ 
+@@ -177,7 +177,7 @@ static int read_from_imc_dir(char *imc_dir, int count)
+ 
+ 		return -1;
+ 	}
+-	if  (fscanf(fp, "%s", cas_count_cfg) <= 0) {
++	if  (fscanf(fp, "%1023s", cas_count_cfg) <= 0) {
+ 		ksft_perror("Could not get iMC cas count write");
+ 		fclose(fp);
+ 
+diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
+index 250c320349a785..a53cd1cb6e0c64 100644
+--- a/tools/testing/selftests/resctrl/resctrlfs.c
++++ b/tools/testing/selftests/resctrl/resctrlfs.c
+@@ -182,7 +182,7 @@ int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size
+ 
+ 		return -1;
+ 	}
+-	if (fscanf(fp, "%s", cache_str) <= 0) {
++	if (fscanf(fp, "%63s", cache_str) <= 0) {
+ 		ksft_perror("Could not get cache_size");
+ 		fclose(fp);
+ 
+diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config
+index 9d172210e2c63f..139fd9aa8b1218 100644
+--- a/tools/testing/selftests/wireguard/qemu/debug.config
++++ b/tools/testing/selftests/wireguard/qemu/debug.config
+@@ -31,7 +31,6 @@ CONFIG_SCHED_DEBUG=y
+ CONFIG_SCHED_INFO=y
+ CONFIG_SCHEDSTATS=y
+ CONFIG_SCHED_STACK_END_CHECK=y
+-CONFIG_DEBUG_TIMEKEEPING=y
+ CONFIG_DEBUG_PREEMPT=y
+ CONFIG_DEBUG_RT_MUTEXES=y
+ CONFIG_DEBUG_SPINLOCK=y
+diff --git a/tools/testing/vsock/vsock_perf.c b/tools/testing/vsock/vsock_perf.c
+index 4e8578f815e08a..8e0a6c0770d372 100644
+--- a/tools/testing/vsock/vsock_perf.c
++++ b/tools/testing/vsock/vsock_perf.c
+@@ -33,7 +33,7 @@
+ 
+ static unsigned int port = DEFAULT_PORT;
+ static unsigned long buf_size_bytes = DEFAULT_BUF_SIZE_BYTES;
+-static unsigned long vsock_buf_bytes = DEFAULT_VSOCK_BUF_BYTES;
++static unsigned long long vsock_buf_bytes = DEFAULT_VSOCK_BUF_BYTES;
+ static bool zerocopy;
+ 
+ static void error(const char *s)
+@@ -133,7 +133,7 @@ static float get_gbps(unsigned long bits, time_t ns_delta)
+ 	       ((float)ns_delta / NSEC_PER_SEC);
+ }
+ 
+-static void run_receiver(unsigned long rcvlowat_bytes)
++static void run_receiver(int rcvlowat_bytes)
+ {
+ 	unsigned int read_cnt;
+ 	time_t rx_begin_ns;
+@@ -162,8 +162,8 @@ static void run_receiver(unsigned long rcvlowat_bytes)
+ 	printf("Run as receiver\n");
+ 	printf("Listen port %u\n", port);
+ 	printf("RX buffer %lu bytes\n", buf_size_bytes);
+-	printf("vsock buffer %lu bytes\n", vsock_buf_bytes);
+-	printf("SO_RCVLOWAT %lu bytes\n", rcvlowat_bytes);
++	printf("vsock buffer %llu bytes\n", vsock_buf_bytes);
++	printf("SO_RCVLOWAT %d bytes\n", rcvlowat_bytes);
+ 
+ 	fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+ 
+@@ -439,7 +439,7 @@ static long strtolx(const char *arg)
+ int main(int argc, char **argv)
+ {
+ 	unsigned long to_send_bytes = DEFAULT_TO_SEND_BYTES;
+-	unsigned long rcvlowat_bytes = DEFAULT_RCVLOWAT_BYTES;
++	int rcvlowat_bytes = DEFAULT_RCVLOWAT_BYTES;
+ 	int peer_cid = -1;
+ 	bool sender = false;
+ 
+diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
+index 8d38dbf8f41f04..0b7f5bf546da56 100644
+--- a/tools/testing/vsock/vsock_test.c
++++ b/tools/testing/vsock/vsock_test.c
+@@ -429,7 +429,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
+ 
+ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
+ {
+-	unsigned long sock_buf_size;
++	unsigned long long sock_buf_size;
+ 	unsigned long remote_hash;
+ 	unsigned long curr_hash;
+ 	int fd;
+@@ -634,7 +634,8 @@ static void test_seqpacket_timeout_server(const struct test_opts *opts)
+ 
+ static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
+ {
+-	unsigned long sock_buf_size;
++	unsigned long long sock_buf_size;
++	size_t buf_size;
+ 	socklen_t len;
+ 	void *data;
+ 	int fd;
+@@ -655,13 +656,20 @@ static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
+ 
+ 	sock_buf_size++;
+ 
+-	data = malloc(sock_buf_size);
++	/* size_t can be < unsigned long long */
++	buf_size = (size_t)sock_buf_size;
++	if (buf_size != sock_buf_size) {
++		fprintf(stderr, "Returned BUFFER_SIZE too large\n");
++		exit(EXIT_FAILURE);
++	}
++
++	data = malloc(buf_size);
+ 	if (!data) {
+ 		perror("malloc");
+ 		exit(EXIT_FAILURE);
+ 	}
+ 
+-	send_buf(fd, data, sock_buf_size, 0, -EMSGSIZE);
++	send_buf(fd, data, buf_size, 0, -EMSGSIZE);
+ 
+ 	control_writeln("CLISENT");
+ 
+@@ -835,7 +843,7 @@ static void test_stream_poll_rcvlowat_server(const struct test_opts *opts)
+ 
+ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
+ {
+-	unsigned long lowat_val = RCVLOWAT_BUF_SIZE;
++	int lowat_val = RCVLOWAT_BUF_SIZE;
+ 	char buf[RCVLOWAT_BUF_SIZE];
+ 	struct pollfd fds;
+ 	short poll_flags;
+@@ -1357,9 +1365,10 @@ static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opt
+ static void test_stream_credit_update_test(const struct test_opts *opts,
+ 					   bool low_rx_bytes_test)
+ {
+-	size_t recv_buf_size;
++	int recv_buf_size;
+ 	struct pollfd fds;
+ 	size_t buf_size;
++	unsigned long long sock_buf_size;
+ 	void *buf;
+ 	int fd;
+ 
+@@ -1371,8 +1380,11 @@ static void test_stream_credit_update_test(const struct test_opts *opts,
+ 
+ 	buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE;
+ 
++	/* size_t can be < unsigned long long */
++	sock_buf_size = buf_size;
++
+ 	if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
+-		       &buf_size, sizeof(buf_size))) {
++		       &sock_buf_size, sizeof(sock_buf_size))) {
+ 		perror("setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
+ 		exit(EXIT_FAILURE);
+ 	}
+diff --git a/tools/tracing/rtla/sample/timerlat_load.py b/tools/tracing/rtla/sample/timerlat_load.py
+index 8cc5eb2d2e69e5..52eccb6225f92d 100644
+--- a/tools/tracing/rtla/sample/timerlat_load.py
++++ b/tools/tracing/rtla/sample/timerlat_load.py
+@@ -25,13 +25,12 @@ import sys
+ import os
+ 
+ parser = argparse.ArgumentParser(description='user-space timerlat thread in Python')
+-parser.add_argument("cpu", help='CPU to run timerlat thread')
+-parser.add_argument("-p", "--prio", help='FIFO priority')
+-
++parser.add_argument("cpu", type=int, help='CPU to run timerlat thread')
++parser.add_argument("-p", "--prio", type=int, help='FIFO priority')
+ args = parser.parse_args()
+ 
+ try:
+-    affinity_mask = { int(args.cpu) }
++    affinity_mask = {args.cpu}
+ except:
+     print("Invalid cpu: " + args.cpu)
+     exit(1)
+@@ -44,7 +43,7 @@ except:
+ 
+ if (args.prio):
+     try:
+-        param = os.sched_param(int(args.prio))
++        param = os.sched_param(args.prio)
+         os.sched_setscheduler(0, os.SCHED_FIFO, param)
+     except:
+         print("Error setting priority")
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index 829511a712224f..ae55cd79128336 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -62,9 +62,9 @@ struct timerlat_hist_cpu {
+ 	int			*thread;
+ 	int			*user;
+ 
+-	int			irq_count;
+-	int			thread_count;
+-	int			user_count;
++	unsigned long long	irq_count;
++	unsigned long long	thread_count;
++	unsigned long long	user_count;
+ 
+ 	unsigned long long	min_irq;
+ 	unsigned long long	sum_irq;
+@@ -304,15 +304,15 @@ timerlat_print_summary(struct timerlat_hist_params *params,
+ 			continue;
+ 
+ 		if (!params->no_irq)
+-			trace_seq_printf(trace->seq, "%9d ",
++			trace_seq_printf(trace->seq, "%9llu ",
+ 					data->hist[cpu].irq_count);
+ 
+ 		if (!params->no_thread)
+-			trace_seq_printf(trace->seq, "%9d ",
++			trace_seq_printf(trace->seq, "%9llu ",
+ 					data->hist[cpu].thread_count);
+ 
+ 		if (params->user_hist)
+-			trace_seq_printf(trace->seq, "%9d ",
++			trace_seq_printf(trace->seq, "%9llu ",
+ 					 data->hist[cpu].user_count);
+ 	}
+ 	trace_seq_printf(trace->seq, "\n");
+@@ -488,15 +488,15 @@ timerlat_print_stats_all(struct timerlat_hist_params *params,
+ 		trace_seq_printf(trace->seq, "count:");
+ 
+ 	if (!params->no_irq)
+-		trace_seq_printf(trace->seq, "%9d ",
++		trace_seq_printf(trace->seq, "%9llu ",
+ 				 sum.irq_count);
+ 
+ 	if (!params->no_thread)
+-		trace_seq_printf(trace->seq, "%9d ",
++		trace_seq_printf(trace->seq, "%9llu ",
+ 				 sum.thread_count);
+ 
+ 	if (params->user_hist)
+-		trace_seq_printf(trace->seq, "%9d ",
++		trace_seq_printf(trace->seq, "%9llu ",
+ 				 sum.user_count);
+ 
+ 	trace_seq_printf(trace->seq, "\n");
+@@ -778,7 +778,7 @@ static struct timerlat_hist_params
+ 		/* getopt_long stores the option index here. */
+ 		int option_index = 0;
+ 
+-		c = getopt_long(argc, argv, "a:c:C::b:d:e:E:DhH:i:knp:P:s:t::T:uU0123456:7:8:9\1\2:\3",
++		c = getopt_long(argc, argv, "a:c:C::b:d:e:E:DhH:i:knp:P:s:t::T:uU0123456:7:8:9\1\2:\3:",
+ 				 long_options, &option_index);
+ 
+ 		/* detect the end of the options. */
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index 3b62519a412fc9..ac2ff38a57ee55 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -54,9 +54,9 @@ struct timerlat_top_params {
+ };
+ 
+ struct timerlat_top_cpu {
+-	int			irq_count;
+-	int			thread_count;
+-	int			user_count;
++	unsigned long long	irq_count;
++	unsigned long long	thread_count;
++	unsigned long long	user_count;
+ 
+ 	unsigned long long	cur_irq;
+ 	unsigned long long	min_irq;
+@@ -280,7 +280,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
+ 	/*
+ 	 * Unless trace is being lost, IRQ counter is always the max.
+ 	 */
+-	trace_seq_printf(s, "%3d #%-9d |", cpu, cpu_data->irq_count);
++	trace_seq_printf(s, "%3d #%-9llu |", cpu, cpu_data->irq_count);
+ 
+ 	if (!cpu_data->irq_count) {
+ 		trace_seq_printf(s, "%s %s %s %s |", no_value, no_value, no_value, no_value);
+diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
+index 9ac71a66840c1b..0735fcb827ed76 100644
+--- a/tools/tracing/rtla/src/utils.c
++++ b/tools/tracing/rtla/src/utils.c
+@@ -233,7 +233,7 @@ long parse_ns_duration(char *val)
+ 
+ #define SCHED_DEADLINE		6
+ 
+-static inline int sched_setattr(pid_t pid, const struct sched_attr *attr,
++static inline int syscall_sched_setattr(pid_t pid, const struct sched_attr *attr,
+ 				unsigned int flags) {
+ 	return syscall(__NR_sched_setattr, pid, attr, flags);
+ }
+@@ -243,7 +243,7 @@ int __set_sched_attr(int pid, struct sched_attr *attr)
+ 	int flags = 0;
+ 	int retval;
+ 
+-	retval = sched_setattr(pid, attr, flags);
++	retval = syscall_sched_setattr(pid, attr, flags);
+ 	if (retval < 0) {
+ 		err_msg("Failed to set sched attributes to the pid %d: %s\n",
+ 			pid, strerror(errno));
+diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h
+index d44513e6c66a01..99c9cf81bcd02c 100644
+--- a/tools/tracing/rtla/src/utils.h
++++ b/tools/tracing/rtla/src/utils.h
+@@ -46,6 +46,7 @@ update_sum(unsigned long long *a, unsigned long long *b)
+ 	*a += *b;
+ }
+ 
++#ifndef SCHED_ATTR_SIZE_VER0
+ struct sched_attr {
+ 	uint32_t size;
+ 	uint32_t sched_policy;
+@@ -56,6 +57,7 @@ struct sched_attr {
+ 	uint64_t sched_deadline;
+ 	uint64_t sched_period;
+ };
++#endif /* SCHED_ATTR_SIZE_VER0 */
+ 
+ int parse_prio(char *arg, struct sched_attr *sched_param);
+ int parse_cpu_set(char *cpu_list, cpu_set_t *set);
+diff --git a/tools/verification/dot2/automata.py b/tools/verification/dot2/automata.py
+index baffeb960ff0b3..bdeb98baa8b065 100644
+--- a/tools/verification/dot2/automata.py
++++ b/tools/verification/dot2/automata.py
+@@ -29,11 +29,11 @@ class Automata:
+ 
+     def __get_model_name(self):
+         basename = ntpath.basename(self.__dot_path)
+-        if basename.endswith(".dot") == False:
++        if not basename.endswith(".dot") and not basename.endswith(".gv"):
+             print("not a dot file")
+             raise Exception("not a dot file: %s" % self.__dot_path)
+ 
+-        model_name = basename[0:-4]
++        model_name = ntpath.splitext(basename)[0]
+         if model_name.__len__() == 0:
+             raise Exception("not a dot file: %s" % self.__dot_path)
+ 
+@@ -68,9 +68,9 @@ class Automata:
+     def __get_cursor_begin_events(self):
+         cursor = 0
+         while self.__dot_lines[cursor].split()[0] != "{node":
+-           cursor += 1
++            cursor += 1
+         while self.__dot_lines[cursor].split()[0] == "{node":
+-           cursor += 1
++            cursor += 1
+         # skip initial state transition
+         cursor += 1
+         return cursor
+@@ -94,11 +94,11 @@ class Automata:
+                 initial_state = state[7:]
+             else:
+                 states.append(state)
+-                if self.__dot_lines[cursor].__contains__("doublecircle") == True:
++                if "doublecircle" in self.__dot_lines[cursor]:
+                     final_states.append(state)
+                     has_final_states = True
+ 
+-                if self.__dot_lines[cursor].__contains__("ellipse") == True:
++                if "ellipse" in self.__dot_lines[cursor]:
+                     final_states.append(state)
+                     has_final_states = True
+ 
+@@ -110,7 +110,7 @@ class Automata:
+         # Insert the initial state at the bein og the states
+         states.insert(0, initial_state)
+ 
+-        if has_final_states == False:
++        if not has_final_states:
+             final_states.append(initial_state)
+ 
+         return states, initial_state, final_states
+@@ -120,7 +120,7 @@ class Automata:
+         cursor = self.__get_cursor_begin_events()
+ 
+         events = []
+-        while self.__dot_lines[cursor][1] == '"':
++        while self.__dot_lines[cursor].lstrip()[0] == '"':
+             # transitions have the format:
+             # "all_fired" -> "both_fired" [ label = "disable_irq" ];
+             #  ------------ event is here ------------^^^^^
+@@ -161,7 +161,7 @@ class Automata:
+         # and we are back! Let's fill the matrix
+         cursor = self.__get_cursor_begin_events()
+ 
+-        while self.__dot_lines[cursor][1] == '"':
++        while self.__dot_lines[cursor].lstrip()[0] == '"':
+             if self.__dot_lines[cursor].split()[1] == "->":
+                 line = self.__dot_lines[cursor].split()
+                 origin_state = line[0].replace('"','').replace(',','_')


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-11 21:01 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-11 21:01 UTC (permalink / raw
  To: gentoo-commits

commit:     3cf228ef3b389e949f1242512c85121af823b397
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 11 21:01:01 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec 11 21:01:01 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3cf228ef

Add x86/pkeys fixes

Bug: https://bugs.gentoo.org/946182

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   8 ++
 ...-change-caller-of-update_pkru_in_sigframe.patch | 107 +++++++++++++++++++++
 ...eys-ensure-updated-pkru-value-is-xrstor-d.patch |  96 ++++++++++++++++++
 3 files changed, 211 insertions(+)

diff --git a/0000_README b/0000_README
index b2e6beb3..81375872 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,14 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
+Patch:  1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch
+From:   https://git.kernel.org/
+Desc:   x86/pkeys: Change caller of update_pkru_in_sigframe()
+
+Patch:  1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch
+From:   https://git.kernel.org/
+Desc:   x86/pkeys: Ensure updated PKRU value is XRSTOR'd
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch b/1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch
new file mode 100644
index 00000000..3a1fbd82
--- /dev/null
+++ b/1740_x86-pkeys-change-caller-of-update_pkru_in_sigframe.patch
@@ -0,0 +1,107 @@
+From 5683d0ce8fb46f36315a2b508f90ec6221cda018 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Nov 2024 17:45:19 +0000
+Subject: x86/pkeys: Change caller of update_pkru_in_sigframe()
+
+From: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+
+[ Upstream commit 6a1853bdf17874392476b552398df261f75503e0 ]
+
+update_pkru_in_sigframe() will shortly need some information which
+is only available inside xsave_to_user_sigframe(). Move
+update_pkru_in_sigframe() inside the other function to make it
+easier to provide it that information.
+
+No functional changes.
+
+Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20241119174520.3987538-2-aruna.ramakrishna%40oracle.com
+Stable-dep-of: ae6012d72fa6 ("x86/pkeys: Ensure updated PKRU value is XRSTOR'd")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/fpu/signal.c | 20 ++------------------
+ arch/x86/kernel/fpu/xstate.h | 15 ++++++++++++++-
+ 2 files changed, 16 insertions(+), 19 deletions(-)
+
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 1065ab995305c..8f62e0666dea5 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -63,16 +63,6 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
+ 	return true;
+ }
+ 
+-/*
+- * Update the value of PKRU register that was already pushed onto the signal frame.
+- */
+-static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
+-{
+-	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
+-		return 0;
+-	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
+-}
+-
+ /*
+  * Signal frame handlers.
+  */
+@@ -168,14 +158,8 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
+ 
+ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
+ {
+-	int err = 0;
+-
+-	if (use_xsave()) {
+-		err = xsave_to_user_sigframe(buf);
+-		if (!err)
+-			err = update_pkru_in_sigframe(buf, pkru);
+-		return err;
+-	}
++	if (use_xsave())
++		return xsave_to_user_sigframe(buf, pkru);
+ 
+ 	if (use_fxsr())
+ 		return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index 0b86a5002c846..6b2924fbe5b8d 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -69,6 +69,16 @@ static inline u64 xfeatures_mask_independent(void)
+ 	return fpu_kernel_cfg.independent_features;
+ }
+ 
++/*
++ * Update the value of PKRU register that was already pushed onto the signal frame.
++ */
++static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
++{
++	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
++		return 0;
++	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
++}
++
+ /* XSAVE/XRSTOR wrapper functions */
+ 
+ #ifdef CONFIG_X86_64
+@@ -256,7 +266,7 @@ static inline u64 xfeatures_need_sigframe_write(void)
+  * The caller has to zero buf::header before calling this because XSAVE*
+  * does not touch the reserved fields in the header.
+  */
+-static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
++static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
+ {
+ 	/*
+ 	 * Include the features which are not xsaved/rstored by the kernel
+@@ -281,6 +291,9 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
+ 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
+ 	clac();
+ 
++	if (!err)
++		err = update_pkru_in_sigframe(buf, pkru);
++
+ 	return err;
+ }
+ 
+-- 
+2.43.0
+

diff --git a/1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch b/1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch
new file mode 100644
index 00000000..11b1f768
--- /dev/null
+++ b/1741_x86-pkeys-ensure-updated-pkru-value-is-xrstor-d.patch
@@ -0,0 +1,96 @@
+From 24fedf2768fd57e0d767137044c4f7493357b325 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Nov 2024 17:45:20 +0000
+Subject: x86/pkeys: Ensure updated PKRU value is XRSTOR'd
+
+From: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+
+[ Upstream commit ae6012d72fa60c9ff92de5bac7a8021a47458e5b ]
+
+When XSTATE_BV[i] is 0, and XRSTOR attempts to restore state component
+'i' it ignores any value in the XSAVE buffer and instead restores the
+state component's init value.
+
+This means that if XSAVE writes XSTATE_BV[PKRU]=0 then XRSTOR will
+ignore the value that update_pkru_in_sigframe() writes to the XSAVE buffer.
+
+XSTATE_BV[PKRU] only gets written as 0 if PKRU is in its init state. On
+Intel CPUs, basically never happens because the kernel usually
+overwrites the init value (aside: this is why we didn't notice this bug
+until now). But on AMD, the init tracker is more aggressive and will
+track PKRU as being in its init state upon any wrpkru(0x0).
+Unfortunately, sig_prepare_pkru() does just that: wrpkru(0x0).
+
+This writes XSTATE_BV[PKRU]=0 which makes XRSTOR ignore the PKRU value
+in the sigframe.
+
+To fix this, always overwrite the sigframe XSTATE_BV with a value that
+has XSTATE_BV[PKRU]==1.  This ensures that XRSTOR will not ignore what
+update_pkru_in_sigframe() wrote.
+
+The problematic sequence of events is something like this:
+
+Userspace does:
+	* wrpkru(0xffff0000) (or whatever)
+	* Hardware sets: XINUSE[PKRU]=1
+Signal happens, kernel is entered:
+	* sig_prepare_pkru() => wrpkru(0x00000000)
+	* Hardware sets: XINUSE[PKRU]=0 (aggressive AMD init tracker)
+	* XSAVE writes most of XSAVE buffer, including
+	  XSTATE_BV[PKRU]=XINUSE[PKRU]=0
+	* update_pkru_in_sigframe() overwrites PKRU in XSAVE buffer
+... signal handling
+	* XRSTOR sees XSTATE_BV[PKRU]==0, ignores just-written value
+	  from update_pkru_in_sigframe()
+
+Fixes: 70044df250d0 ("x86/pkeys: Update PKRU to enable all pkeys before XSAVE")
+Suggested-by: Rudi Horn <rudi.horn@oracle.com>
+Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20241119174520.3987538-3-aruna.ramakrishna%40oracle.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/fpu/xstate.h | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index 6b2924fbe5b8d..aa16f1a1bbcf1 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -72,10 +72,22 @@ static inline u64 xfeatures_mask_independent(void)
+ /*
+  * Update the value of PKRU register that was already pushed onto the signal frame.
+  */
+-static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
++static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
+ {
++	u64 xstate_bv;
++	int err;
++
+ 	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
+ 		return 0;
++
++	/* Mark PKRU as in-use so that it is restored correctly. */
++	xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
++
++	err =  __put_user(xstate_bv, &buf->header.xfeatures);
++	if (err)
++		return err;
++
++	/* Update PKRU value in the userspace xsave buffer. */
+ 	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
+ }
+ 
+@@ -292,7 +304,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr
+ 	clac();
+ 
+ 	if (!err)
+-		err = update_pkru_in_sigframe(buf, pkru);
++		err = update_pkru_in_sigframe(buf, mask, pkru);
+ 
+ 	return err;
+ }
+-- 
+2.43.0
+


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-09 23:13 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-09 23:13 UTC (permalink / raw
  To: gentoo-commits

commit:     42337dcbb74c47c507f2628074a83f937cd1cf1a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Dec  9 23:12:52 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Dec  9 23:12:52 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=42337dcb

drm/display: Fix building with GCC 15

Bug: https://bugs.gentoo.org/946130

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                  |  4 ++++
 2700_drm-display-GCC15.patch | 52 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 56 insertions(+)

diff --git a/0000_README b/0000_README
index 87f43cf7..b2e6beb3 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2700_drm-display-GCC15.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+Desc:   drm/display: Fix building with GCC 15
+
 Patch:  2901_tools-lib-subcmd-compile-fix.patch
 From:   https://lore.kernel.org/all/20240731085217.94928-1-michael.weiss@aisec.fraunhofer.de/
 Desc:   tools lib subcmd: Fixed uninitialized use of variable in parse-options

diff --git a/2700_drm-display-GCC15.patch b/2700_drm-display-GCC15.patch
new file mode 100644
index 00000000..0be775ea
--- /dev/null
+++ b/2700_drm-display-GCC15.patch
@@ -0,0 +1,52 @@
+From a500f3751d3c861be7e4463c933cf467240cca5d Mon Sep 17 00:00:00 2001
+From: Brahmajit Das <brahmajit.xyz@gmail.com>
+Date: Wed, 2 Oct 2024 14:53:11 +0530
+Subject: drm/display: Fix building with GCC 15
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+GCC 15 enables -Werror=unterminated-string-initialization by default.
+This results in the following build error
+
+drivers/gpu/drm/display/drm_dp_dual_mode_helper.c: In function ‘is_hdmi_adaptor’:
+drivers/gpu/drm/display/drm_dp_dual_mode_helper.c:164:17: error: initializer-string for array of
+ ‘char’ is too long [-Werror=unterminated-string-initialization]
+  164 |                 "DP-HDMI ADAPTOR\x04";
+      |                 ^~~~~~~~~~~~~~~~~~~~~
+
+After discussion with Ville, the fix was to increase the size of
+dp_dual_mode_hdmi_id array by one, so that it can accommodate the NULL
+line character. This should let us build the kernel with GCC 15.
+
+Signed-off-by: Brahmajit Das <brahmajit.xyz@gmail.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241002092311.942822-1-brahmajit.xyz@gmail.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+---
+ drivers/gpu/drm/display/drm_dp_dual_mode_helper.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+(limited to 'drivers/gpu/drm/display/drm_dp_dual_mode_helper.c')
+
+diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+index 14a2a8473682b0..c491e3203bf11c 100644
+--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+@@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write);
+ 
+ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
+ {
+-	static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
++	static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] =
+ 		"DP-HDMI ADAPTOR\x04";
+ 
+ 	return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
+-		      sizeof(dp_dual_mode_hdmi_id)) == 0;
++		      DP_DUAL_MODE_HDMI_ID_LEN) == 0;
+ }
+ 
+ static bool is_type1_adaptor(uint8_t adaptor_id)
+-- 
+cgit 1.2.3-korg
+


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-09 11:35 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-09 11:35 UTC (permalink / raw
  To: gentoo-commits

commit:     a86bef4a2fd2b250f44dcf0c300bf7d7b26f05e5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Dec  9 11:34:48 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Dec  9 11:34:48 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a86bef4a

Linux patch 6.12.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1003_linux-6.12.4.patch | 4189 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4193 insertions(+)

diff --git a/0000_README b/0000_README
index c7f77bd5..87f43cf7 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-6.12.3.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.3
 
+Patch:  1003_linux-6.12.4.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.4
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1003_linux-6.12.4.patch b/1003_linux-6.12.4.patch
new file mode 100644
index 00000000..42f90cf9
--- /dev/null
+++ b/1003_linux-6.12.4.patch
@@ -0,0 +1,4189 @@
+diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml
+index 5536c06139cae5..24e863fdbdab08 100644
+--- a/Documentation/devicetree/bindings/net/fsl,fec.yaml
++++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml
+@@ -183,6 +183,13 @@ properties:
+     description:
+       Register bits of stop mode control, the format is <&gpr req_gpr req_bit>.
+ 
++  fsl,pps-channel:
++    $ref: /schemas/types.yaml#/definitions/uint32
++    default: 0
++    description:
++      Specifies to which timer instance the PPS signal is routed.
++    enum: [0, 1, 2, 3]
++
+   mdio:
+     $ref: mdio.yaml#
+     unevaluatedProperties: false
+diff --git a/Makefile b/Makefile
+index e81030ec683143..87dc2f81086021 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 1dfae1af8e31b0..ef6a657c8d1306 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -25,6 +25,7 @@
+ #include <asm/tls.h>
+ #include <asm/system_info.h>
+ #include <asm/uaccess-asm.h>
++#include <asm/kasan_def.h>
+ 
+ #include "entry-header.S"
+ #include <asm/probes.h>
+@@ -561,6 +562,13 @@ ENTRY(__switch_to)
+ 	@ entries covering the vmalloc region.
+ 	@
+ 	ldr	r2, [ip]
++#ifdef CONFIG_KASAN_VMALLOC
++	@ Also dummy read from the KASAN shadow memory for the new stack if we
++	@ are using KASAN
++	mov_l	r2, KASAN_SHADOW_OFFSET
++	add	r2, r2, ip, lsr #KASAN_SHADOW_SCALE_SHIFT
++	ldr	r2, [r2]
++#endif
+ #endif
+ 
+ 	@ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index 794cfea9f9d4c8..89f1c97f3079c1 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -23,6 +23,7 @@
+  */
+ #include <linux/module.h>
+ #include <linux/errno.h>
++#include <linux/kasan.h>
+ #include <linux/mm.h>
+ #include <linux/vmalloc.h>
+ #include <linux/io.h>
+@@ -115,16 +116,40 @@ int ioremap_page(unsigned long virt, unsigned long phys,
+ }
+ EXPORT_SYMBOL(ioremap_page);
+ 
++#ifdef CONFIG_KASAN
++static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
++{
++	return (unsigned long)kasan_mem_to_shadow((void *)addr);
++}
++#else
++static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
++{
++	return 0;
++}
++#endif
++
++static void memcpy_pgd(struct mm_struct *mm, unsigned long start,
++		       unsigned long end)
++{
++	end = ALIGN(end, PGDIR_SIZE);
++	memcpy(pgd_offset(mm, start), pgd_offset_k(start),
++	       sizeof(pgd_t) * (pgd_index(end) - pgd_index(start)));
++}
++
+ void __check_vmalloc_seq(struct mm_struct *mm)
+ {
+ 	int seq;
+ 
+ 	do {
+-		seq = atomic_read(&init_mm.context.vmalloc_seq);
+-		memcpy(pgd_offset(mm, VMALLOC_START),
+-		       pgd_offset_k(VMALLOC_START),
+-		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
+-					pgd_index(VMALLOC_START)));
++		seq = atomic_read_acquire(&init_mm.context.vmalloc_seq);
++		memcpy_pgd(mm, VMALLOC_START, VMALLOC_END);
++		if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
++			unsigned long start =
++				arm_kasan_mem_to_shadow(VMALLOC_START);
++			unsigned long end =
++				arm_kasan_mem_to_shadow(VMALLOC_END);
++			memcpy_pgd(mm, start, end);
++		}
+ 		/*
+ 		 * Use a store-release so that other CPUs that observe the
+ 		 * counter's new value are guaranteed to see the results of the
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+index 6eab61a12cd8f8..b844759f52c0d8 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+@@ -212,6 +212,9 @@ accelerometer@68 {
+ 		interrupts = <7 5 IRQ_TYPE_EDGE_RISING>; /* PH5 */
+ 		vdd-supply = <&reg_dldo1>;
+ 		vddio-supply = <&reg_dldo1>;
++		mount-matrix = "0", "1", "0",
++			       "-1", "0", "0",
++			       "0", "0", "1";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index 5fa39591419115..aee79a50d0e26a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -162,7 +162,7 @@ reg_usdhc2_vmmc: regulator-usdhc2 {
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-name = "+V3.3_SD";
+-		startup-delay-us = <2000>;
++		startup-delay-us = <20000>;
+ 	};
+ 
+ 	reserved-memory {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+index a19ad5ee7f792b..1689fe44099396 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+@@ -175,7 +175,7 @@ reg_usdhc2_vmmc: regulator-usdhc2 {
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-name = "+V3.3_SD";
+-		startup-delay-us = <2000>;
++		startup-delay-us = <20000>;
+ 	};
+ 
+ 	reserved-memory {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+index 0c0b3ac5974525..cfcc7909dfe68d 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+@@ -423,7 +423,7 @@ it6505dptx: dp-bridge@5c {
+ 		#sound-dai-cells = <0>;
+ 		ovdd-supply = <&mt6366_vsim2_reg>;
+ 		pwr18-supply = <&pp1800_dpbrdg_dx>;
+-		reset-gpios = <&pio 177 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&pio 177 GPIO_ACTIVE_LOW>;
+ 
+ 		ports {
+ 			#address-cells = <1>;
+@@ -1336,7 +1336,7 @@ mt6366_vgpu_reg: vgpu {
+ 				regulator-allowed-modes = <MT6397_BUCK_MODE_AUTO
+ 							   MT6397_BUCK_MODE_FORCE_PWM>;
+ 				regulator-coupled-with = <&mt6366_vsram_gpu_reg>;
+-				regulator-coupled-max-spread = <10000>;
++				regulator-coupled-max-spread = <100000>;
+ 			};
+ 
+ 			mt6366_vproc11_reg: vproc11 {
+@@ -1545,7 +1545,7 @@ mt6366_vsram_gpu_reg: vsram-gpu {
+ 				regulator-ramp-delay = <6250>;
+ 				regulator-enable-ramp-delay = <240>;
+ 				regulator-coupled-with = <&mt6366_vgpu_reg>;
+-				regulator-coupled-max-spread = <10000>;
++				regulator-coupled-max-spread = <100000>;
+ 			};
+ 
+ 			mt6366_vsram_others_reg: vsram-others {
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+index 5bef31b8577be5..f0eac05f7483ea 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+@@ -160,7 +160,7 @@ reg_sdhc1_vmmc: regulator-sdhci1 {
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-name = "+V3.3_SD";
+-		startup-delay-us = <2000>;
++		startup-delay-us = <20000>;
+ 	};
+ 
+ 	reg_sdhc1_vqmmc: regulator-sdhci1-vqmmc {
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 1a2ff0276365b4..c7b420d6787ca1 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -275,8 +275,8 @@ config PPC
+ 	select HAVE_RSEQ
+ 	select HAVE_SETUP_PER_CPU_AREA		if PPC64
+ 	select HAVE_SOFTIRQ_ON_OWN_STACK
+-	select HAVE_STACKPROTECTOR		if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
+-	select HAVE_STACKPROTECTOR		if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
++	select HAVE_STACKPROTECTOR		if PPC32 && $(cc-option,$(m32-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 -mstack-protector-guard-offset=0)
++	select HAVE_STACKPROTECTOR		if PPC64 && $(cc-option,$(m64-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 -mstack-protector-guard-offset=0)
+ 	select HAVE_STATIC_CALL			if PPC32
+ 	select HAVE_SYSCALL_TRACEPOINTS
+ 	select HAVE_VIRT_CPU_ACCOUNTING
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index bbfe4a1f06ef9d..cbb353ddacb7ad 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -100,13 +100,6 @@ KBUILD_AFLAGS	+= -m$(BITS)
+ KBUILD_LDFLAGS	+= -m elf$(BITS)$(LDEMULATION)
+ endif
+ 
+-cflags-$(CONFIG_STACKPROTECTOR)	+= -mstack-protector-guard=tls
+-ifdef CONFIG_PPC64
+-cflags-$(CONFIG_STACKPROTECTOR)	+= -mstack-protector-guard-reg=r13
+-else
+-cflags-$(CONFIG_STACKPROTECTOR)	+= -mstack-protector-guard-reg=r2
+-endif
+-
+ LDFLAGS_vmlinux-y := -Bstatic
+ LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
+ LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) += -z notext
+@@ -402,9 +395,11 @@ prepare: stack_protector_prepare
+ PHONY += stack_protector_prepare
+ stack_protector_prepare: prepare0
+ ifdef CONFIG_PPC64
+-	$(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
++	$(eval KBUILD_CFLAGS += -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 \
++				-mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
+ else
+-	$(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h))
++	$(eval KBUILD_CFLAGS += -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 \
++				-mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h))
+ endif
+ endif
+ 
+diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
+index 31ca5a5470047e..c568cad6a22e6b 100644
+--- a/arch/powerpc/kernel/vdso/Makefile
++++ b/arch/powerpc/kernel/vdso/Makefile
+@@ -54,10 +54,14 @@ ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -W
+ 
+ CC32FLAGS := -m32
+ CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc
+-  # This flag is supported by clang for 64-bit but not 32-bit so it will cause
+-  # an unused command line flag warning for this file.
+ ifdef CONFIG_CC_IS_CLANG
++# This flag is supported by clang for 64-bit but not 32-bit so it will cause
++# an unused command line flag warning for this file.
+ CC32FLAGSREMOVE += -fno-stack-clash-protection
++# -mstack-protector-guard values from the 64-bit build are not valid for the
++# 32-bit one. clang validates the values passed to these arguments during
++# parsing, even when -fno-stack-protector is passed afterwards.
++CC32FLAGSREMOVE += -mstack-protector-guard%
+ endif
+ LD32FLAGS := -Wl,-soname=linux-vdso32.so.1
+ AS32FLAGS := -D__VDSO32__
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index d6d5317f768e82..594da4cba707a6 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -450,9 +450,13 @@ SYM_CODE_START(\name)
+ SYM_CODE_END(\name)
+ .endm
+ 
++	.section .irqentry.text, "ax"
++
+ INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
+ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
+ 
++	.section .kprobes.text, "ax"
++
+ /*
+  * Machine check handler routines
+  */
+diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
+index 6295faf0987d86..8b80ea57125f3c 100644
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -489,6 +489,12 @@ int __init arch_init_kprobes(void)
+ 	return 0;
+ }
+ 
++int __init arch_populate_kprobe_blacklist(void)
++{
++	return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
++					 (unsigned long)__irqentry_text_end);
++}
++
+ int arch_trampoline_kprobe(struct kprobe *p)
+ {
+ 	return 0;
+diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
+index 9f59837d159e0c..40edfde25f5b97 100644
+--- a/arch/s390/kernel/stacktrace.c
++++ b/arch/s390/kernel/stacktrace.c
+@@ -151,7 +151,7 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo
+ 				break;
+ 		}
+ 		if (!store_ip(consume_entry, cookie, entry, perf, ip))
+-			return;
++			break;
+ 		first = false;
+ 	}
+ 	pagefault_enable();
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 978740537a1aac..ef353ca13c356a 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -1225,6 +1225,12 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
+ 		binder_dequeue_work(ref->proc, &ref->death->work);
+ 		binder_stats_deleted(BINDER_STAT_DEATH);
+ 	}
++
++	if (ref->freeze) {
++		binder_dequeue_work(ref->proc, &ref->freeze->work);
++		binder_stats_deleted(BINDER_STAT_FREEZE);
++	}
++
+ 	binder_stats_deleted(BINDER_STAT_REF);
+ }
+ 
+@@ -3850,7 +3856,6 @@ binder_request_freeze_notification(struct binder_proc *proc,
+ {
+ 	struct binder_ref_freeze *freeze;
+ 	struct binder_ref *ref;
+-	bool is_frozen;
+ 
+ 	freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
+ 	if (!freeze)
+@@ -3866,32 +3871,31 @@ binder_request_freeze_notification(struct binder_proc *proc,
+ 	}
+ 
+ 	binder_node_lock(ref->node);
+-
+-	if (ref->freeze || !ref->node->proc) {
+-		binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n",
+-				  proc->pid, thread->pid,
+-				  ref->freeze ? "already set" : "dead node");
++	if (ref->freeze) {
++		binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
++				  proc->pid, thread->pid);
+ 		binder_node_unlock(ref->node);
+ 		binder_proc_unlock(proc);
+ 		kfree(freeze);
+ 		return -EINVAL;
+ 	}
+-	binder_inner_proc_lock(ref->node->proc);
+-	is_frozen = ref->node->proc->is_frozen;
+-	binder_inner_proc_unlock(ref->node->proc);
+ 
+ 	binder_stats_created(BINDER_STAT_FREEZE);
+ 	INIT_LIST_HEAD(&freeze->work.entry);
+ 	freeze->cookie = handle_cookie->cookie;
+ 	freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+-	freeze->is_frozen = is_frozen;
+-
+ 	ref->freeze = freeze;
+ 
+-	binder_inner_proc_lock(proc);
+-	binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo);
+-	binder_wakeup_proc_ilocked(proc);
+-	binder_inner_proc_unlock(proc);
++	if (ref->node->proc) {
++		binder_inner_proc_lock(ref->node->proc);
++		freeze->is_frozen = ref->node->proc->is_frozen;
++		binder_inner_proc_unlock(ref->node->proc);
++
++		binder_inner_proc_lock(proc);
++		binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
++		binder_wakeup_proc_ilocked(proc);
++		binder_inner_proc_unlock(proc);
++	}
+ 
+ 	binder_node_unlock(ref->node);
+ 	binder_proc_unlock(proc);
+@@ -5151,6 +5155,16 @@ static void binder_release_work(struct binder_proc *proc,
+ 		} break;
+ 		case BINDER_WORK_NODE:
+ 			break;
++		case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
++			struct binder_ref_freeze *freeze;
++
++			freeze = container_of(w, struct binder_ref_freeze, work);
++			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
++				     "undelivered freeze notification, %016llx\n",
++				     (u64)freeze->cookie);
++			kfree(freeze);
++			binder_stats_deleted(BINDER_STAT_FREEZE);
++		} break;
+ 		default:
+ 			pr_err("unexpected work type, %d, not freed\n",
+ 			       wtype);
+@@ -5552,6 +5566,7 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
+ 
+ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
+ {
++	struct binder_node *prev = NULL;
+ 	struct rb_node *n;
+ 	struct binder_ref *ref;
+ 
+@@ -5560,7 +5575,10 @@ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
+ 		struct binder_node *node;
+ 
+ 		node = rb_entry(n, struct binder_node, rb_node);
++		binder_inc_node_tmpref_ilocked(node);
+ 		binder_inner_proc_unlock(proc);
++		if (prev)
++			binder_put_node(prev);
+ 		binder_node_lock(node);
+ 		hlist_for_each_entry(ref, &node->refs, node_entry) {
+ 			/*
+@@ -5586,10 +5604,15 @@ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
+ 			}
+ 			binder_inner_proc_unlock(ref->proc);
+ 		}
++		prev = node;
+ 		binder_node_unlock(node);
+ 		binder_inner_proc_lock(proc);
++		if (proc->is_dead)
++			break;
+ 	}
+ 	binder_inner_proc_unlock(proc);
++	if (prev)
++		binder_put_node(prev);
+ }
+ 
+ static int binder_ioctl_freeze(struct binder_freeze_info *info,
+@@ -6260,6 +6283,7 @@ static void binder_deferred_release(struct binder_proc *proc)
+ 
+ 	binder_release_work(proc, &proc->todo);
+ 	binder_release_work(proc, &proc->delivered_death);
++	binder_release_work(proc, &proc->delivered_freeze);
+ 
+ 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
+@@ -6393,6 +6417,12 @@ static void print_binder_work_ilocked(struct seq_file *m,
+ 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+ 		seq_printf(m, "%shas cleared death notification\n", prefix);
+ 		break;
++	case BINDER_WORK_FROZEN_BINDER:
++		seq_printf(m, "%shas frozen binder\n", prefix);
++		break;
++	case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
++		seq_printf(m, "%shas cleared freeze notification\n", prefix);
++		break;
+ 	default:
+ 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+ 		break;
+@@ -6539,6 +6569,10 @@ static void print_binder_proc(struct seq_file *m,
+ 		seq_puts(m, "  has delivered dead binder\n");
+ 		break;
+ 	}
++	list_for_each_entry(w, &proc->delivered_freeze, entry) {
++		seq_puts(m, "  has delivered freeze binder\n");
++		break;
++	}
+ 	binder_inner_proc_unlock(proc);
+ 	if (!print_all && m->count == header_pos)
+ 		m->count = start_pos;
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 048ff98dbdfd84..d922cefc1e6625 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1989,10 +1989,10 @@ static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwn
+  *
+  * Return true if one or more cycles were found. Otherwise, return false.
+  */
+-static bool __fw_devlink_relax_cycles(struct device *con,
++static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
+ 				 struct fwnode_handle *sup_handle)
+ {
+-	struct device *sup_dev = NULL, *par_dev = NULL;
++	struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL;
+ 	struct fwnode_link *link;
+ 	struct device_link *dev_link;
+ 	bool ret = false;
+@@ -2009,22 +2009,22 @@ static bool __fw_devlink_relax_cycles(struct device *con,
+ 
+ 	sup_handle->flags |= FWNODE_FLAG_VISITED;
+ 
+-	sup_dev = get_dev_from_fwnode(sup_handle);
+-
+ 	/* Termination condition. */
+-	if (sup_dev == con) {
++	if (sup_handle == con_handle) {
+ 		pr_debug("----- cycle: start -----\n");
+ 		ret = true;
+ 		goto out;
+ 	}
+ 
++	sup_dev = get_dev_from_fwnode(sup_handle);
++	con_dev = get_dev_from_fwnode(con_handle);
+ 	/*
+ 	 * If sup_dev is bound to a driver and @con hasn't started binding to a
+ 	 * driver, sup_dev can't be a consumer of @con. So, no need to check
+ 	 * further.
+ 	 */
+ 	if (sup_dev && sup_dev->links.status ==  DL_DEV_DRIVER_BOUND &&
+-	    con->links.status == DL_DEV_NO_DRIVER) {
++	    con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) {
+ 		ret = false;
+ 		goto out;
+ 	}
+@@ -2033,7 +2033,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
+ 		if (link->flags & FWLINK_FLAG_IGNORE)
+ 			continue;
+ 
+-		if (__fw_devlink_relax_cycles(con, link->supplier)) {
++		if (__fw_devlink_relax_cycles(con_handle, link->supplier)) {
+ 			__fwnode_link_cycle(link);
+ 			ret = true;
+ 		}
+@@ -2048,7 +2048,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
+ 	else
+ 		par_dev = fwnode_get_next_parent_dev(sup_handle);
+ 
+-	if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
++	if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) {
+ 		pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
+ 			 par_dev->fwnode);
+ 		ret = true;
+@@ -2066,7 +2066,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
+ 		    !(dev_link->flags & DL_FLAG_CYCLE))
+ 			continue;
+ 
+-		if (__fw_devlink_relax_cycles(con,
++		if (__fw_devlink_relax_cycles(con_handle,
+ 					      dev_link->supplier->fwnode)) {
+ 			pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
+ 				 dev_link->supplier->fwnode);
+@@ -2114,11 +2114,6 @@ static int fw_devlink_create_devlink(struct device *con,
+ 	if (link->flags & FWLINK_FLAG_IGNORE)
+ 		return 0;
+ 
+-	if (con->fwnode == link->consumer)
+-		flags = fw_devlink_get_flags(link->flags);
+-	else
+-		flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+-
+ 	/*
+ 	 * In some cases, a device P might also be a supplier to its child node
+ 	 * C. However, this would defer the probe of C until the probe of P
+@@ -2139,25 +2134,23 @@ static int fw_devlink_create_devlink(struct device *con,
+ 		return -EINVAL;
+ 
+ 	/*
+-	 * SYNC_STATE_ONLY device links don't block probing and supports cycles.
+-	 * So, one might expect that cycle detection isn't necessary for them.
+-	 * However, if the device link was marked as SYNC_STATE_ONLY because
+-	 * it's part of a cycle, then we still need to do cycle detection. This
+-	 * is because the consumer and supplier might be part of multiple cycles
+-	 * and we need to detect all those cycles.
++	 * Don't try to optimize by not calling the cycle detection logic under
++	 * certain conditions. There's always some corner case that won't get
++	 * detected.
+ 	 */
+-	if (!device_link_flag_is_sync_state_only(flags) ||
+-	    flags & DL_FLAG_CYCLE) {
+-		device_links_write_lock();
+-		if (__fw_devlink_relax_cycles(con, sup_handle)) {
+-			__fwnode_link_cycle(link);
+-			flags = fw_devlink_get_flags(link->flags);
+-			pr_debug("----- cycle: end -----\n");
+-			dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
+-				 sup_handle);
+-		}
+-		device_links_write_unlock();
++	device_links_write_lock();
++	if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) {
++		__fwnode_link_cycle(link);
++		pr_debug("----- cycle: end -----\n");
++		pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n",
++			link->consumer, sup_handle);
+ 	}
++	device_links_write_unlock();
++
++	if (con->fwnode == link->consumer)
++		flags = fw_devlink_get_flags(link->flags);
++	else
++		flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ 
+ 	if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
+ 		sup_dev = fwnode_get_next_parent_dev(sup_handle);
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index e682797cdee783..d6a1ba969266a4 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1692,6 +1692,13 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * We touched this entry so mark it as non-IDLE. This makes sure that
++	 * we don't preserve IDLE flag and don't incorrectly pick this entry
++	 * for different post-processing type (e.g. writeback).
++	 */
++	zram_clear_flag(zram, index, ZRAM_IDLE);
++
+ 	class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
+ 	/*
+ 	 * Iterate the secondary comp algorithms list (in order of priority)
+diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
+index c3cfd572e7c1e0..5ca003c9bfba89 100644
+--- a/drivers/clk/qcom/gcc-qcs404.c
++++ b/drivers/clk/qcom/gcc-qcs404.c
+@@ -131,6 +131,7 @@ static struct clk_alpha_pll gpll1_out_main = {
+ /* 930MHz configuration */
+ static const struct alpha_pll_config gpll3_config = {
+ 	.l = 48,
++	.alpha_hi = 0x70,
+ 	.alpha = 0x0,
+ 	.alpha_en_mask = BIT(24),
+ 	.post_div_mask = 0xf << 8,
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index 5892c73e129d2b..07d6f9a9b7c820 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -287,7 +287,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
+ 		ret = cpufreq_enable_boost_support();
+ 		if (ret) {
+ 			dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+-			goto out_free_opp;
++			goto out_free_table;
+ 		} else {
+ 			scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ 			scmi_cpufreq_driver.boost_enabled = true;
+@@ -296,6 +296,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
+ 
+ 	return 0;
+ 
++out_free_table:
++	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+ out_free_opp:
+ 	dev_pm_opp_remove_all_dynamic(cpu_dev);
+ 
+diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
+index 2a1b43f9e0fa2b..df5ffe23644298 100644
+--- a/drivers/firmware/efi/libstub/efi-stub.c
++++ b/drivers/firmware/efi/libstub/efi-stub.c
+@@ -149,7 +149,7 @@ efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
+ 	return EFI_SUCCESS;
+ 
+ fail_free_cmdline:
+-	efi_bs_call(free_pool, cmdline_ptr);
++	efi_bs_call(free_pool, cmdline);
+ 	return status;
+ }
+ 
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index cf5bc77e2362c4..610e159d362ad6 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -327,7 +327,7 @@ config DRM_TTM_HELPER
+ config DRM_GEM_DMA_HELPER
+ 	tristate
+ 	depends on DRM
+-	select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
++	select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
+ 	help
+ 	  Choose this if you need the GEM DMA helper functions
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index c2394c8b4d6b21..1f08cb88d51be5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4584,8 +4584,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ 	int idx;
+ 	bool px;
+ 
+-	amdgpu_fence_driver_sw_fini(adev);
+ 	amdgpu_device_ip_fini(adev);
++	amdgpu_fence_driver_sw_fini(adev);
+ 	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
+ 	adev->accel_working = false;
+ 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 74fdbf71d95b74..599d3ca4e0ef9e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -214,15 +214,15 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
+ 
+ 	drm_sched_entity_destroy(&adev->vce.entity);
+ 
+-	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
+-		(void **)&adev->vce.cpu_addr);
+-
+ 	for (i = 0; i < adev->vce.num_rings; i++)
+ 		amdgpu_ring_fini(&adev->vce.ring[i]);
+ 
+ 	amdgpu_ucode_release(&adev->vce.fw);
+ 	mutex_destroy(&adev->vce.idle_mutex);
+ 
++	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
++		(void **)&adev->vce.cpu_addr);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+index 7a9adfda5814a6..814ab59fdd4a3a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+@@ -275,6 +275,15 @@ static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
+ 	if (def != data)
+ 		WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
+ 
++	switch (adev->ip_versions[NBIO_HWIP][0]) {
++	case IP_VERSION(7, 11, 0):
++	case IP_VERSION(7, 11, 1):
++	case IP_VERSION(7, 11, 2):
++	case IP_VERSION(7, 11, 3):
++		data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
++		WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
++		break;
++	}
+ }
+ 
+ static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 4843dcb9a5f796..d6037577c53278 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -125,7 +125,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
+ 
+ 	memset(kq->pq_kernel_addr, 0, queue_size);
+ 	memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel));
+-	memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel));
++	memset(kq->wptr_kernel, 0, dev->kfd->device_info.doorbell_size);
+ 
+ 	prop.queue_size = queue_size;
+ 	prop.is_interop = false;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index a88f1b6ea64cfa..a6911bb2cf0c6c 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -3066,7 +3066,10 @@ static void restore_planes_and_stream_state(
+ 		return;
+ 
+ 	for (i = 0; i < status->plane_count; i++) {
++		/* refcount will always be valid, restore everything else */
++		struct kref refcount = status->plane_states[i]->refcount;
+ 		*status->plane_states[i] = scratch->plane_states[i];
++		status->plane_states[i]->refcount = refcount;
+ 	}
+ 	*stream = scratch->stream_state;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+index 838d72eaa87fbd..b363f5360818d8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+@@ -1392,10 +1392,10 @@ static void dccg35_set_dtbclk_dto(
+ 
+ 		/* The recommended programming sequence to enable DTBCLK DTO to generate
+ 		 * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
+-		 * be set only after DTO is enabled
++		 * be set only after DTO is enabled.
++		 * PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
++		 * programming is handled in program_pix_clk() regardless, so it can be removed from here.
+ 		 */
+-		REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+-				PIPE_DTO_SRC_SEL[params->otg_inst], 2);
+ 	} else {
+ 		switch (params->otg_inst) {
+ 		case 0:
+@@ -1412,9 +1412,12 @@ static void dccg35_set_dtbclk_dto(
+ 			break;
+ 		}
+ 
+-		REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+-				DTBCLK_DTO_ENABLE[params->otg_inst], 0,
+-				PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
++		/**
++		 * PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
++		 * programming is handled in program_pix_clk() regardless, so it can be removed from here.
++		 */
++		REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
++				DTBCLK_DTO_ENABLE[params->otg_inst], 0);
+ 
+ 		REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+ 		REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+index 6eccf0241d857d..1ed21c1b86a5bb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+@@ -258,12 +258,25 @@ static unsigned int find_preferred_pipe_candidates(const struct dc_state *existi
+ 	 * However this condition comes with a caveat. We need to ignore pipes that will
+ 	 * require a change in OPP but still have the same stream id. For example during
+ 	 * an MPC to ODM transiton.
++	 *
++	 * Adding check to avoid pipe select on the head pipe by utilizing dc resource
++	 * helper function resource_get_primary_dpp_pipe and comparing the pipe index.
+ 	 */
+ 	if (existing_state) {
+ 		for (i = 0; i < pipe_count; i++) {
+ 			if (existing_state->res_ctx.pipe_ctx[i].stream && existing_state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
++				struct pipe_ctx *head_pipe =
++					resource_is_pipe_type(&existing_state->res_ctx.pipe_ctx[i], DPP_PIPE) ?
++						resource_get_primary_dpp_pipe(&existing_state->res_ctx.pipe_ctx[i]) :
++							NULL;
++
++				// we should always respect the head pipe from selection
++				if (head_pipe && head_pipe->pipe_idx == i)
++					continue;
+ 				if (existing_state->res_ctx.pipe_ctx[i].plane_res.hubp &&
+-					existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i)
++					existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i &&
++						(existing_state->res_ctx.pipe_ctx[i].prev_odm_pipe ||
++						existing_state->res_ctx.pipe_ctx[i].next_odm_pipe))
+ 					continue;
+ 
+ 				preferred_pipe_candidates[num_preferred_candidates++] = i;
+@@ -292,6 +305,14 @@ static unsigned int find_last_resort_pipe_candidates(const struct dc_state *exis
+ 	 */
+ 	if (existing_state) {
+ 		for (i  = 0; i < pipe_count; i++) {
++			struct pipe_ctx *head_pipe =
++				resource_is_pipe_type(&existing_state->res_ctx.pipe_ctx[i], DPP_PIPE) ?
++					resource_get_primary_dpp_pipe(&existing_state->res_ctx.pipe_ctx[i]) :
++						NULL;
++
++			// we should always respect the head pipe from selection
++			if (head_pipe && head_pipe->pipe_idx == i)
++				continue;
+ 			if ((existing_state->res_ctx.pipe_ctx[i].plane_res.hubp &&
+ 				existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i) ||
+ 				existing_state->res_ctx.pipe_ctx[i].stream_res.tg)
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+index 5ebe4cb40f9db6..c38a01742d6f0e 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+@@ -7571,6 +7571,8 @@
+ // base address: 0x10100000
+ #define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0                                                              0xd000
+ #define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_BASE_IDX                                                     5
++#define regRCC_DEV0_EPF5_STRAP4                                                                         0xd284
++#define regRCC_DEV0_EPF5_STRAP4_BASE_IDX                                                                5
+ 
+ 
+ // addressBlock: nbio_nbif0_bif_rst_bif_rst_regblk
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
+index eb8c556d9c9300..3b96f1e5a1802c 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
+@@ -50665,6 +50665,19 @@
+ #define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK                                        0x40000000L
+ #define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK                                        0x80000000L
+ 
++//RCC_DEV0_EPF5_STRAP4
++#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F5__SHIFT                                            0x14
++#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_EN_DEV0_F5__SHIFT                                                  0x15
++#define RCC_DEV0_EPF5_STRAP4__STRAP_FLR_EN_DEV0_F5__SHIFT                                                     0x16
++#define RCC_DEV0_EPF5_STRAP4__STRAP_PME_SUPPORT_DEV0_F5__SHIFT                                                0x17
++#define RCC_DEV0_EPF5_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F5__SHIFT                                              0x1c
++#define RCC_DEV0_EPF5_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F5__SHIFT                                             0x1f
++#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F5_MASK                                              0x00100000L
++#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_EN_DEV0_F5_MASK                                                    0x00200000L
++#define RCC_DEV0_EPF5_STRAP4__STRAP_FLR_EN_DEV0_F5_MASK                                                       0x00400000L
++#define RCC_DEV0_EPF5_STRAP4__STRAP_PME_SUPPORT_DEV0_F5_MASK                                                  0x0F800000L
++#define RCC_DEV0_EPF5_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F5_MASK                                                0x70000000L
++#define RCC_DEV0_EPF5_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F5_MASK                                               0x80000000L
+ 
+ // addressBlock: nbio_nbif0_bif_rst_bif_rst_regblk
+ //HARD_RST_CTRL
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 80e60ea2d11e3c..32bdeac2676b5c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1695,7 +1695,9 @@ static int smu_smc_hw_setup(struct smu_context *smu)
+ 		return ret;
+ 	}
+ 
+-	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
++	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
++		pcie_gen = 4;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ 		pcie_gen = 3;
+ 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ 		pcie_gen = 2;
+@@ -1708,7 +1710,9 @@ static int smu_smc_hw_setup(struct smu_context *smu)
+ 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
+ 	 */
+-	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
++	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
++		pcie_width = 7;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ 		pcie_width = 6;
+ 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ 		pcie_width = 5;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+index 727d5b405435d0..3c1b4aa4a68d7e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+@@ -53,7 +53,7 @@
+ #define CTF_OFFSET_MEM			5
+ 
+ extern const int decoded_link_speed[5];
+-extern const int decoded_link_width[7];
++extern const int decoded_link_width[8];
+ 
+ #define DECODE_GEN_SPEED(gen_speed_idx)		(decoded_link_speed[gen_speed_idx])
+ #define DECODE_LANE_WIDTH(lane_width_idx)	(decoded_link_width[lane_width_idx])
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+index c0f6b59369b7c4..d52512f5f1bd9d 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+@@ -1344,8 +1344,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
+ 		*default_power_limit = power_limit;
+ 	if (max_power_limit)
+ 		*max_power_limit = power_limit;
++	/**
++	 * No lower bound is imposed on the limit. Any unreasonable limit set
++	 * will result in frequent throttling.
++	 */
+ 	if (min_power_limit)
+-		*min_power_limit = power_limit;
++		*min_power_limit = 0;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index b891a5e0a3969a..ceaf4572db2527 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -2061,6 +2061,8 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
+ 	gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
+ 
+ 	gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
++	gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
++	gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
+ 	gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
+ 	gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
+ 	gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+index 865e916fc42544..452589adaf0468 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+@@ -49,7 +49,7 @@
+ #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX   0
+ 
+ const int decoded_link_speed[5] = {1, 2, 3, 4, 5};
+-const int decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
++const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
+ /*
+  * DO NOT use these for err/warn/info/debug messages.
+  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+index 1e16a281f2dcde..82aef8626afa97 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+@@ -1186,13 +1186,15 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
+ 					(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
+ 					(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
+ 					(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
+-					(pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "",
++					(pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
++					(pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," : "",
+ 					(pcie_table->pcie_lane[i] == 1) ? "x1" :
+ 					(pcie_table->pcie_lane[i] == 2) ? "x2" :
+ 					(pcie_table->pcie_lane[i] == 3) ? "x4" :
+ 					(pcie_table->pcie_lane[i] == 4) ? "x8" :
+ 					(pcie_table->pcie_lane[i] == 5) ? "x12" :
+-					(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
++					(pcie_table->pcie_lane[i] == 6) ? "x16" :
++					(pcie_table->pcie_lane[i] == 7) ? "x32" : "",
+ 					pcie_table->clk_freq[i],
+ 					(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+ 					(lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
+@@ -1475,15 +1477,35 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
+ 	struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	struct smu_14_0_pcie_table *pcie_table =
+ 				&dpm_context->dpm_tables.pcie_table;
++	int num_of_levels = pcie_table->num_of_link_levels;
+ 	uint32_t smu_pcie_arg;
+ 	int ret, i;
+ 
+-	for (i = 0; i < pcie_table->num_of_link_levels; i++) {
+-		if (pcie_table->pcie_gen[i] > pcie_gen_cap)
++	if (!num_of_levels)
++		return 0;
++
++	if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
++		if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
++			pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
++
++		if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
++			pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
++
++		/* Force all levels to use the same settings */
++		for (i = 0; i < num_of_levels; i++) {
+ 			pcie_table->pcie_gen[i] = pcie_gen_cap;
+-		if (pcie_table->pcie_lane[i] > pcie_width_cap)
+ 			pcie_table->pcie_lane[i] = pcie_width_cap;
++		}
++	} else {
++		for (i = 0; i < num_of_levels; i++) {
++			if (pcie_table->pcie_gen[i] > pcie_gen_cap)
++				pcie_table->pcie_gen[i] = pcie_gen_cap;
++			if (pcie_table->pcie_lane[i] > pcie_width_cap)
++				pcie_table->pcie_lane[i] = pcie_width_cap;
++		}
++	}
+ 
++	for (i = 0; i < num_of_levels; i++) {
+ 		smu_pcie_arg = i << 16;
+ 		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ 		smu_pcie_arg |= pcie_table->pcie_lane[i];
+@@ -2767,7 +2789,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
+ 	.get_unique_id = smu_v14_0_2_get_unique_id,
+ 	.get_power_limit = smu_v14_0_2_get_power_limit,
+ 	.set_power_limit = smu_v14_0_2_set_power_limit,
+-	.set_power_source = smu_v14_0_set_power_source,
+ 	.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
+ 	.set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
+ 	.run_btc = smu_v14_0_run_btc,
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index e3a9832c742cb1..65b57de20203f5 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -2614,9 +2614,9 @@ static int it6505_poweron(struct it6505 *it6505)
+ 	/* time interval between OVDD and SYSRSTN at least be 10ms */
+ 	if (pdata->gpiod_reset) {
+ 		usleep_range(10000, 20000);
+-		gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
+-		usleep_range(1000, 2000);
+ 		gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
++		usleep_range(1000, 2000);
++		gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
+ 		usleep_range(25000, 35000);
+ 	}
+ 
+@@ -2647,7 +2647,7 @@ static int it6505_poweroff(struct it6505 *it6505)
+ 	disable_irq_nosync(it6505->irq);
+ 
+ 	if (pdata->gpiod_reset)
+-		gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
++		gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+ 
+ 	if (pdata->pwr18) {
+ 		err = regulator_disable(pdata->pwr18);
+@@ -3135,7 +3135,7 @@ static int it6505_init_pdata(struct it6505 *it6505)
+ 		return PTR_ERR(pdata->ovdd);
+ 	}
+ 
+-	pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
++	pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(pdata->gpiod_reset)) {
+ 		dev_err(dev, "gpiod_reset gpio not found");
+ 		return PTR_ERR(pdata->gpiod_reset);
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 43cdf39019a445..5186d2114a5037 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -3015,7 +3015,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+ 				  bool stall)
+ {
+ 	int i, ret;
+-	unsigned long flags;
++	unsigned long flags = 0;
+ 	struct drm_connector *connector;
+ 	struct drm_connector_state *old_conn_state, *new_conn_state;
+ 	struct drm_crtc *crtc;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+index 384df1659be60d..b13a17276d07cd 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+@@ -482,7 +482,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ 	} else {
+ 		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ 				       VIVS_GL_FLUSH_CACHE_DEPTH |
+-				       VIVS_GL_FLUSH_CACHE_COLOR);
++				       VIVS_GL_FLUSH_CACHE_COLOR |
++				       VIVS_GL_FLUSH_CACHE_SHADER_L1);
+ 		if (has_blt) {
+ 			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ 			CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 3e807195a0d03a..2c1cb335d8623f 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -405,8 +405,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ 		if (temp_drm_priv->mtk_drm_bound)
+ 			cnt++;
+ 
+-		if (cnt == MAX_CRTC)
++		if (cnt == MAX_CRTC) {
++			of_node_put(node);
+ 			break;
++		}
+ 	}
+ 
+ 	if (drm_priv->data->mmsys_dev_num == cnt) {
+diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+index 44897e5218a69f..45d09e6fa667fd 100644
+--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
++++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+@@ -26,7 +26,6 @@ struct jadard_panel_desc {
+ 	unsigned int lanes;
+ 	enum mipi_dsi_pixel_format format;
+ 	int (*init)(struct jadard *jadard);
+-	u32 num_init_cmds;
+ 	bool lp11_before_reset;
+ 	bool reset_before_power_off_vcioo;
+ 	unsigned int vcioo_to_lp11_delay_ms;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index f9c73c55f04f76..f9996304d94313 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1255,16 +1255,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ 			goto exit;
+ 		}
+ 	}
+-
+-	if (dret && radeon_connector->hpd.hpd != RADEON_HPD_NONE &&
+-	    !radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+-	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+-		DRM_DEBUG_KMS("EDID is readable when HPD disconnected\n");
+-		schedule_delayed_work(&rdev->hotplug_work, msecs_to_jiffies(1000));
+-		ret = connector_status_disconnected;
+-		goto exit;
+-	}
+-
+ 	if (dret) {
+ 		radeon_connector->detected_by_load = false;
+ 		radeon_connector_free_edid(connector);
+diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
+index db0a1eb535328f..c59fcb4dca3249 100644
+--- a/drivers/gpu/drm/sti/sti_cursor.c
++++ b/drivers/gpu/drm/sti/sti_cursor.c
+@@ -200,6 +200,9 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
+ 		return 0;
+ 
+ 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
++	if (IS_ERR(crtc_state))
++		return PTR_ERR(crtc_state);
++
+ 	mode = &crtc_state->mode;
+ 	dst_x = new_plane_state->crtc_x;
+ 	dst_y = new_plane_state->crtc_y;
+diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
+index 43c72c2604a0cd..f046f5f7ad259d 100644
+--- a/drivers/gpu/drm/sti/sti_gdp.c
++++ b/drivers/gpu/drm/sti/sti_gdp.c
+@@ -638,6 +638,9 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
+ 
+ 	mixer = to_sti_mixer(crtc);
+ 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
++	if (IS_ERR(crtc_state))
++		return PTR_ERR(crtc_state);
++
+ 	mode = &crtc_state->mode;
+ 	dst_x = new_plane_state->crtc_x;
+ 	dst_y = new_plane_state->crtc_y;
+diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
+index acbf70b95aeb97..5793cf2cb8972c 100644
+--- a/drivers/gpu/drm/sti/sti_hqvdp.c
++++ b/drivers/gpu/drm/sti/sti_hqvdp.c
+@@ -1037,6 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
+ 		return 0;
+ 
+ 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
++	if (IS_ERR(crtc_state))
++		return PTR_ERR(crtc_state);
++
+ 	mode = &crtc_state->mode;
+ 	dst_x = new_plane_state->crtc_x;
+ 	dst_y = new_plane_state->crtc_y;
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index 4f5d00aea7168b..2927745d689549 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -1846,16 +1846,29 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
+ 		xe_gt_assert(guc_to_gt(guc), runnable_state == 0);
+ 		xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
+ 
+-		clear_exec_queue_pending_disable(q);
+ 		if (q->guc->suspend_pending) {
+ 			suspend_fence_signal(q);
++			clear_exec_queue_pending_disable(q);
+ 		} else {
+ 			if (exec_queue_banned(q) || check_timeout) {
+ 				smp_wmb();
+ 				wake_up_all(&guc->ct.wq);
+ 			}
+-			if (!check_timeout)
++			if (!check_timeout && exec_queue_destroyed(q)) {
++				/*
++				 * Make sure to clear the pending_disable only
++				 * after sampling the destroyed state. We want
++				 * to ensure we don't trigger the unregister too
++				 * early with something intending to only
++				 * disable scheduling. The caller doing the
++				 * destroy must wait for an ongoing
++				 * pending_disable before marking as destroyed.
++				 */
++				clear_exec_queue_pending_disable(q);
+ 				deregister_exec_queue(guc, q);
++			} else {
++				clear_exec_queue_pending_disable(q);
++			}
+ 		}
+ 	}
+ }
+diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
+index cfd31ae49cc1f7..1b97d90aaddaf4 100644
+--- a/drivers/gpu/drm/xe/xe_migrate.c
++++ b/drivers/gpu/drm/xe/xe_migrate.c
+@@ -209,7 +209,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
+ 				  num_entries * XE_PAGE_SIZE,
+ 				  ttm_bo_type_kernel,
+ 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+-				  XE_BO_FLAG_PINNED);
++				  XE_BO_FLAG_PINNED |
++				  XE_BO_FLAG_PAGETABLE);
+ 	if (IS_ERR(bo))
+ 		return PTR_ERR(bo);
+ 
+@@ -1350,6 +1351,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
+ 
+ 	/* For sysmem PTE's, need to map them in our hole.. */
+ 	if (!IS_DGFX(xe)) {
++		u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+ 		u32 ptes, ofs;
+ 
+ 		ppgtt_ofs = NUM_KERNEL_PDE - 1;
+@@ -1409,7 +1411,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
+ 						pt_bo->update_index = current_update;
+ 
+ 					addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
+-									 XE_CACHE_WB, 0);
++									 pat_index, 0);
+ 					bb->cs[bb->len++] = lower_32_bits(addr);
+ 					bb->cs[bb->len++] = upper_32_bits(addr);
+ 				}
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
+index 4556af2faa0f19..1565a7dd4f04d0 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
+@@ -509,12 +509,12 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
+ 	if (ret)
+ 		return ret;
+ 
+-	drm_kms_helper_poll_init(drm);
+-
+ 	ret = zynqmp_dpsub_kms_init(dpsub);
+ 	if (ret < 0)
+ 		goto err_poll_fini;
+ 
++	drm_kms_helper_poll_init(drm);
++
+ 	/* Reset all components and register the DRM device. */
+ 	drm_mode_config_reset(drm);
+ 
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index ffe99f0c6acef5..da83c49223b33e 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1417,7 +1417,7 @@ static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev)
+ 					     I3C_ADDR_SLOT_FREE);
+ 
+ 	if (dev->boardinfo && dev->boardinfo->init_dyn_addr)
+-		i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
++		i3c_bus_set_addr_slot_status(&master->bus, dev->boardinfo->init_dyn_addr,
+ 					     I3C_ADDR_SLOT_FREE);
+ }
+ 
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index a7bfc678153e6c..565af3759813bd 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -130,8 +130,8 @@
+ #define SVC_I3C_PPBAUD_MAX 15
+ #define SVC_I3C_QUICK_I2C_CLK 4170000
+ 
+-#define SVC_I3C_EVENT_IBI	BIT(0)
+-#define SVC_I3C_EVENT_HOTJOIN	BIT(1)
++#define SVC_I3C_EVENT_IBI	GENMASK(7, 0)
++#define SVC_I3C_EVENT_HOTJOIN	BIT(31)
+ 
+ struct svc_i3c_cmd {
+ 	u8 addr;
+@@ -214,7 +214,7 @@ struct svc_i3c_master {
+ 		spinlock_t lock;
+ 	} ibi;
+ 	struct mutex lock;
+-	int enabled_events;
++	u32 enabled_events;
+ 	u32 mctrl_config;
+ };
+ 
+@@ -1056,12 +1056,27 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
+ 	if (ret)
+ 		goto rpm_out;
+ 
+-	/* Register all devices who participated to the core */
+-	for (i = 0; i < dev_nb; i++) {
+-		ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
+-		if (ret)
+-			goto rpm_out;
+-	}
++	/*
++	 * Register all devices who participated to the core
++	 *
++	 * If two devices (A and B) are detected in DAA and address 0xa is assigned to
++	 * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
++	 * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
++	 * registered on the bus. The I3C stack might still consider 0xb a free
++	 * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
++	 * causing both devices A and B to use the same address 0xb, violating the I3C
++	 * specification.
++	 *
++	 * The return value for i3c_master_add_i3c_dev_locked() should not be checked
++	 * because subsequent steps will scan the entire I3C bus, independent of
++	 * whether i3c_master_add_i3c_dev_locked() returns success.
++	 *
++	 * If device A registration fails, there is still a chance to register device
++	 * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
++	 * retrieving device information.
++	 */
++	for (i = 0; i < dev_nb; i++)
++		i3c_master_add_i3c_dev_locked(m, addrs[i]);
+ 
+ 	/* Configure IBI auto-rules */
+ 	ret = svc_i3c_update_ibirules(master);
+@@ -1624,7 +1639,7 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+ 		return ret;
+ 	}
+ 
+-	master->enabled_events |= SVC_I3C_EVENT_IBI;
++	master->enabled_events++;
+ 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+ 
+ 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+@@ -1636,7 +1651,7 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+ 	struct svc_i3c_master *master = to_svc_i3c_master(m);
+ 	int ret;
+ 
+-	master->enabled_events &= ~SVC_I3C_EVENT_IBI;
++	master->enabled_events--;
+ 	if (!master->enabled_events)
+ 		svc_i3c_master_disable_interrupts(master);
+ 
+@@ -1827,8 +1842,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
+ rpm_disable:
+ 	pm_runtime_dont_use_autosuspend(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+-	pm_runtime_set_suspended(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
+ 
+ err_disable_clks:
+ 	svc_i3c_master_unprepare_clks(master);
+diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
+index 53d59a04ae15e9..b6a828a6df934f 100644
+--- a/drivers/iio/accel/kionix-kx022a.c
++++ b/drivers/iio/accel/kionix-kx022a.c
+@@ -594,7 +594,7 @@ static int kx022a_get_axis(struct kx022a_data *data,
+ 	if (ret)
+ 		return ret;
+ 
+-	*val = le16_to_cpu(data->buffer[0]);
++	*val = (s16)le16_to_cpu(data->buffer[0]);
+ 
+ 	return IIO_VAL_INT;
+ }
+diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
+index e9b0c577c9cca4..8ccb74f470309f 100644
+--- a/drivers/iio/adc/ad7780.c
++++ b/drivers/iio/adc/ad7780.c
+@@ -152,7 +152,7 @@ static int ad7780_write_raw(struct iio_dev *indio_dev,
+ 
+ 	switch (m) {
+ 	case IIO_CHAN_INFO_SCALE:
+-		if (val != 0)
++		if (val != 0 || val2 == 0)
+ 			return -EINVAL;
+ 
+ 		vref = st->int_vref_mv * 1000000LL;
+diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
+index 09680015a7ab54..acc44cb34f8245 100644
+--- a/drivers/iio/adc/ad7923.c
++++ b/drivers/iio/adc/ad7923.c
+@@ -48,7 +48,7 @@
+ 
+ struct ad7923_state {
+ 	struct spi_device		*spi;
+-	struct spi_transfer		ring_xfer[5];
++	struct spi_transfer		ring_xfer[9];
+ 	struct spi_transfer		scan_single_xfer[2];
+ 	struct spi_message		ring_msg;
+ 	struct spi_message		scan_single_msg;
+@@ -64,7 +64,7 @@ struct ad7923_state {
+ 	 * Length = 8 channels + 4 extra for 8 byte timestamp
+ 	 */
+ 	__be16				rx_buf[12] __aligned(IIO_DMA_MINALIGN);
+-	__be16				tx_buf[4];
++	__be16				tx_buf[8];
+ };
+ 
+ struct ad7923_chip_info {
+diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+index f44458c380d928..37d0bdaa8d824f 100644
+--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
++++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+@@ -70,6 +70,10 @@ int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ 	if (mult != ts->mult)
+ 		ts->new_mult = mult;
+ 
++	/* When FIFO is off, directly apply the new ODR */
++	if (!fifo)
++		inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_update_odr, IIO_INV_SENSORS_TIMESTAMP);
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+index 56ac198142500a..7968aa27f9fd79 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+@@ -200,7 +200,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
+ {
+ 	struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+ 	struct inv_icm42600_sensor_state *accel_st = iio_priv(indio_dev);
+-	struct inv_sensors_timestamp *ts = &accel_st->ts;
+ 	struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
+ 	unsigned int fifo_en = 0;
+ 	unsigned int sleep_temp = 0;
+@@ -229,7 +228,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
+ 	}
+ 
+ 	/* update data FIFO write */
+-	inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
+ 	ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+ 
+ out_unlock:
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+index 938af5b640b00f..c6bb68bf5e1449 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+@@ -99,8 +99,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
+ 					      const unsigned long *scan_mask)
+ {
+ 	struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+-	struct inv_icm42600_sensor_state *gyro_st = iio_priv(indio_dev);
+-	struct inv_sensors_timestamp *ts = &gyro_st->ts;
+ 	struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
+ 	unsigned int fifo_en = 0;
+ 	unsigned int sleep_gyro = 0;
+@@ -128,7 +126,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
+ 	}
+ 
+ 	/* update data FIFO write */
+-	inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
+ 	ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+ 
+ out_unlock:
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+index 3bfeabab0ec4f6..5b1088cc3704f1 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+@@ -112,7 +112,6 @@ int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable)
+ 	if (enable) {
+ 		/* reset timestamping */
+ 		inv_sensors_timestamp_reset(&st->timestamp);
+-		inv_sensors_timestamp_apply_odr(&st->timestamp, 0, 0, 0);
+ 		/* reset FIFO */
+ 		d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
+ 		ret = regmap_write(st->map, st->reg->user_ctrl, d);
+diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
+index 4ad949672210ba..291c0fc332c978 100644
+--- a/drivers/iio/industrialio-gts-helper.c
++++ b/drivers/iio/industrialio-gts-helper.c
+@@ -205,7 +205,7 @@ static int gain_to_scaletables(struct iio_gts *gts, int **gains, int **scales)
+ 	memcpy(all_gains, gains[time_idx], gain_bytes);
+ 	new_idx = gts->num_hwgain;
+ 
+-	while (time_idx--) {
++	while (time_idx-- > 0) {
+ 		for (j = 0; j < gts->num_hwgain; j++) {
+ 			int candidate = gains[time_idx][j];
+ 			int chk;
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 151099be2863c6..3305ebbdbc0787 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -269,7 +269,7 @@ struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ 			return ERR_PTR(-ENODEV);
+ 		}
+ 
+-		chan = __fwnode_iio_channel_get_by_name(fwnode, name);
++		chan = __fwnode_iio_channel_get_by_name(parent, name);
+ 		if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
+ 			fwnode_handle_put(parent);
+  			return chan;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+index 6b479592140c47..c8ec74f089f3d6 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
++++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+@@ -801,7 +801,9 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_IOMMU_DEBUGFS
+ static struct dentry *cmdqv_debugfs_dir;
++#endif
+ 
+ static struct arm_smmu_device *
+ __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
+index 8321962b37148b..14618772a3d6e4 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
+@@ -1437,6 +1437,17 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
+ 			goto out_free;
+ 	} else {
+ 		smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
++
++		/*
++		 * Defer probe if the relevant SMMU instance hasn't finished
++		 * probing yet. This is a fragile hack and we'd ideally
++		 * avoid this race in the core code. Until that's ironed
++		 * out, however, this is the most pragmatic option on the
++		 * table.
++		 */
++		if (!smmu)
++			return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER,
++						"smmu dev has not bound yet\n"));
+ 	}
+ 
+ 	ret = -EINVAL;
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index 0e67f1721a3d98..a286c5404ea701 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -199,6 +199,18 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
+ 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
+ }
+ 
++/*
++ * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
++ * a concatenated PGD, into the maximum number of entries that can be
++ * mapped in the same table page.
++ */
++static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
++{
++	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
++
++	return ptes_per_table - (i & (ptes_per_table - 1));
++}
++
+ static bool selftest_running = false;
+ 
+ static dma_addr_t __arm_lpae_dma_addr(void *pages)
+@@ -390,7 +402,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
+ 
+ 	/* If we can install a leaf entry at this level, then do so */
+ 	if (size == block_size) {
+-		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
++		max_entries = arm_lpae_max_entries(map_idx_start, data);
+ 		num_entries = min_t(int, pgcount, max_entries);
+ 		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
+ 		if (!ret)
+@@ -592,7 +604,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+ 
+ 	if (size == split_sz) {
+ 		unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
+-		max_entries = ptes_per_table - unmap_idx_start;
++		max_entries = arm_lpae_max_entries(unmap_idx_start, data);
+ 		num_entries = min_t(int, pgcount, max_entries);
+ 	}
+ 
+@@ -650,7 +662,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ 
+ 	/* If the size matches this level, we're in the right place */
+ 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
+-		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
++		max_entries = arm_lpae_max_entries(unmap_idx_start, data);
+ 		num_entries = min_t(int, pgcount, max_entries);
+ 
+ 		/* Find and handle non-leaf entries */
+diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c
+index 4c74f1cf01f00d..676236c19ec415 100644
+--- a/drivers/leds/flash/leds-mt6360.c
++++ b/drivers/leds/flash/leds-mt6360.c
+@@ -784,7 +784,6 @@ static void mt6360_v4l2_flash_release(struct mt6360_priv *priv)
+ static int mt6360_led_probe(struct platform_device *pdev)
+ {
+ 	struct mt6360_priv *priv;
+-	struct fwnode_handle *child;
+ 	size_t count;
+ 	int i = 0, ret;
+ 
+@@ -811,7 +810,7 @@ static int mt6360_led_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	device_for_each_child_node(&pdev->dev, child) {
++	device_for_each_child_node_scoped(&pdev->dev, child) {
+ 		struct mt6360_led *led = priv->leds + i;
+ 		struct led_init_data init_data = { .fwnode = child, };
+ 		u32 reg, led_color;
+diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
+index 5a2e259679cfdf..e71456a56ab8da 100644
+--- a/drivers/leds/leds-lp55xx-common.c
++++ b/drivers/leds/leds-lp55xx-common.c
+@@ -1132,9 +1132,6 @@ static int lp55xx_parse_common_child(struct device_node *np,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (*chan_nr < 0 || *chan_nr > cfg->max_channel)
+-		return -EINVAL;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 89632ce9776056..c9f47d0cccf9bb 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2484,6 +2484,7 @@ static void pool_work_wait(struct pool_work *pw, struct pool *pool,
+ 	init_completion(&pw->complete);
+ 	queue_work(pool->wq, &pw->worker);
+ 	wait_for_completion(&pw->complete);
++	destroy_work_on_stack(&pw->worker);
+ }
+ 
+ /*----------------------------------------------------------------*/
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 29da10e6f703e2..c3a42dd66ce551 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1285,6 +1285,7 @@ static void bitmap_unplug_async(struct bitmap *bitmap)
+ 
+ 	queue_work(md_bitmap_wq, &unplug_work.work);
+ 	wait_for_completion(&done);
++	destroy_work_on_stack(&unplug_work.work);
+ }
+ 
+ static void bitmap_unplug(struct mddev *mddev, bool sync)
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index 3a19124ee27932..22a551c407da49 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -51,7 +51,7 @@ static int index_check(const struct dm_block_validator *v,
+ 					       block_size - sizeof(__le32),
+ 					       INDEX_CSUM_XOR));
+ 	if (csum_disk != mi_le->csum) {
+-		DMERR_LIMIT("i%s failed: csum %u != wanted %u", __func__,
++		DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__,
+ 			    le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
+ 		return -EILSEQ;
+ 	}
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index dc2ea636d17342..2fa1f270fb1d3c 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -7177,6 +7177,8 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+ 	err = mddev_suspend_and_lock(mddev);
+ 	if (err)
+ 		return err;
++	raid5_quiesce(mddev, true);
++
+ 	conf = mddev->private;
+ 	if (!conf)
+ 		err = -ENODEV;
+@@ -7198,6 +7200,8 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+ 			kfree(old_groups);
+ 		}
+ 	}
++
++	raid5_quiesce(mddev, false);
+ 	mddev_unlock_and_resume(mddev);
+ 
+ 	return err ?: len;
+diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
+index a5baca2449c76d..e25add6cc38e94 100644
+--- a/drivers/media/dvb-frontends/ts2020.c
++++ b/drivers/media/dvb-frontends/ts2020.c
+@@ -553,13 +553,19 @@ static void ts2020_regmap_unlock(void *__dev)
+ static int ts2020_probe(struct i2c_client *client)
+ {
+ 	struct ts2020_config *pdata = client->dev.platform_data;
+-	struct dvb_frontend *fe = pdata->fe;
++	struct dvb_frontend *fe;
+ 	struct ts2020_priv *dev;
+ 	int ret;
+ 	u8 u8tmp;
+ 	unsigned int utmp;
+ 	char *chip_str;
+ 
++	if (!pdata) {
++		dev_err(&client->dev, "platform data is mandatory\n");
++		return -EINVAL;
++	}
++
++	fe = pdata->fe;
+ 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ 	if (!dev) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
+index 18ef2b35c9aa3d..87a7c3ceeb119e 100644
+--- a/drivers/media/i2c/dw9768.c
++++ b/drivers/media/i2c/dw9768.c
+@@ -471,10 +471,9 @@ static int dw9768_probe(struct i2c_client *client)
+ 	 * to be powered on in an ACPI system. Similarly for power off in
+ 	 * remove.
+ 	 */
+-	pm_runtime_enable(dev);
+ 	full_power = (is_acpi_node(dev_fwnode(dev)) &&
+ 		      acpi_dev_state_d0(dev)) ||
+-		     (is_of_node(dev_fwnode(dev)) && !pm_runtime_enabled(dev));
++		     (is_of_node(dev_fwnode(dev)) && !IS_ENABLED(CONFIG_PM));
+ 	if (full_power) {
+ 		ret = dw9768_runtime_resume(dev);
+ 		if (ret < 0) {
+@@ -484,6 +483,7 @@ static int dw9768_probe(struct i2c_client *client)
+ 		pm_runtime_set_active(dev);
+ 	}
+ 
++	pm_runtime_enable(dev);
+ 	ret = v4l2_async_register_subdev(&dw9768->sd);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to register V4L2 subdev: %d", ret);
+@@ -495,12 +495,12 @@ static int dw9768_probe(struct i2c_client *client)
+ 	return 0;
+ 
+ err_power_off:
++	pm_runtime_disable(dev);
+ 	if (full_power) {
+ 		dw9768_runtime_suspend(dev);
+ 		pm_runtime_set_suspended(dev);
+ 	}
+ err_clean_entity:
+-	pm_runtime_disable(dev);
+ 	media_entity_cleanup(&dw9768->sd.entity);
+ err_free_handler:
+ 	v4l2_ctrl_handler_free(&dw9768->ctrls);
+@@ -517,12 +517,12 @@ static void dw9768_remove(struct i2c_client *client)
+ 	v4l2_async_unregister_subdev(&dw9768->sd);
+ 	v4l2_ctrl_handler_free(&dw9768->ctrls);
+ 	media_entity_cleanup(&dw9768->sd.entity);
++	pm_runtime_disable(dev);
+ 	if ((is_acpi_node(dev_fwnode(dev)) && acpi_dev_state_d0(dev)) ||
+-	    (is_of_node(dev_fwnode(dev)) && !pm_runtime_enabled(dev))) {
++	    (is_of_node(dev_fwnode(dev)) && !IS_ENABLED(CONFIG_PM))) {
+ 		dw9768_runtime_suspend(dev);
+ 		pm_runtime_set_suspended(dev);
+ 	}
+-	pm_runtime_disable(dev);
+ }
+ 
+ static const struct of_device_id dw9768_of_table[] = {
+diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
+index 7ead3c720e0e11..67b86dabc67eb1 100644
+--- a/drivers/media/i2c/ov08x40.c
++++ b/drivers/media/i2c/ov08x40.c
+@@ -1339,15 +1339,13 @@ static int ov08x40_read_reg(struct ov08x40 *ov08x,
+ 	return 0;
+ }
+ 
+-static int ov08x40_burst_fill_regs(struct ov08x40 *ov08x, u16 first_reg,
+-				   u16 last_reg,  u8 val)
++static int __ov08x40_burst_fill_regs(struct i2c_client *client, u16 first_reg,
++				     u16 last_reg, size_t num_regs, u8 val)
+ {
+-	struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
+ 	struct i2c_msg msgs;
+-	size_t i, num_regs;
++	size_t i;
+ 	int ret;
+ 
+-	num_regs = last_reg - first_reg + 1;
+ 	msgs.addr = client->addr;
+ 	msgs.flags = 0;
+ 	msgs.len = 2 + num_regs;
+@@ -1373,6 +1371,31 @@ static int ov08x40_burst_fill_regs(struct ov08x40 *ov08x, u16 first_reg,
+ 	return 0;
+ }
+ 
++static int ov08x40_burst_fill_regs(struct ov08x40 *ov08x, u16 first_reg,
++				   u16 last_reg,  u8 val)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
++	size_t num_regs, num_write_regs;
++	int ret;
++
++	num_regs = last_reg - first_reg + 1;
++	num_write_regs = num_regs;
++
++	if (client->adapter->quirks && client->adapter->quirks->max_write_len)
++		num_write_regs = client->adapter->quirks->max_write_len - 2;
++
++	while (first_reg < last_reg) {
++		ret = __ov08x40_burst_fill_regs(client, first_reg, last_reg,
++						num_write_regs, val);
++		if (ret)
++			return ret;
++
++		first_reg += num_write_regs;
++	}
++
++	return 0;
++}
++
+ /* Write registers up to 4 at a time */
+ static int ov08x40_write_reg(struct ov08x40 *ov08x,
+ 			     u16 reg, u32 len, u32 __val)
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index 65d58ddf02870d..344a670e732fa5 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -2168,8 +2168,10 @@ static int tc358743_probe(struct i2c_client *client)
+ 
+ err_work_queues:
+ 	cec_unregister_adapter(state->cec_adap);
+-	if (!state->i2c_client->irq)
++	if (!state->i2c_client->irq) {
++		del_timer(&state->timer);
+ 		flush_work(&state->work_i2c_poll);
++	}
+ 	cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ 	mutex_destroy(&state->confctl_mutex);
+ err_hdl:
+diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
+index 73606cee586ede..88c36eb6174ad6 100644
+--- a/drivers/media/platform/allegro-dvt/allegro-core.c
++++ b/drivers/media/platform/allegro-dvt/allegro-core.c
+@@ -1509,8 +1509,10 @@ static int allocate_buffers_internal(struct allegro_channel *channel,
+ 		INIT_LIST_HEAD(&buffer->head);
+ 
+ 		err = allegro_alloc_buffer(dev, buffer, size);
+-		if (err)
++		if (err) {
++			kfree(buffer);
+ 			goto err;
++		}
+ 		list_add(&buffer->head, list);
+ 	}
+ 
+diff --git a/drivers/media/platform/amphion/vpu_drv.c b/drivers/media/platform/amphion/vpu_drv.c
+index 2bf70aafd2baab..51d5234869f57d 100644
+--- a/drivers/media/platform/amphion/vpu_drv.c
++++ b/drivers/media/platform/amphion/vpu_drv.c
+@@ -151,8 +151,8 @@ static int vpu_probe(struct platform_device *pdev)
+ 	media_device_cleanup(&vpu->mdev);
+ 	v4l2_device_unregister(&vpu->v4l2_dev);
+ err_vpu_deinit:
+-	pm_runtime_set_suspended(dev);
+ 	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
+index 83db57bc80b70f..f0b1ec79d2961c 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -841,6 +841,7 @@ int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func)
+ 		vfd->fops = vdec_get_fops();
+ 		vfd->ioctl_ops = vdec_get_ioctl_ops();
+ 	}
++	video_set_drvdata(vfd, vpu);
+ 
+ 	ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ 	if (ret) {
+@@ -848,7 +849,6 @@ int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func)
+ 		v4l2_m2m_release(func->m2m_dev);
+ 		return ret;
+ 	}
+-	video_set_drvdata(vfd, vpu);
+ 	func->vfd = vfd;
+ 
+ 	ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function);
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index ac48658e2de403..ff269467635561 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -1293,6 +1293,11 @@ static int mtk_jpeg_single_core_init(struct platform_device *pdev,
+ 	return 0;
+ }
+ 
++static void mtk_jpeg_destroy_workqueue(void *data)
++{
++	destroy_workqueue(data);
++}
++
+ static int mtk_jpeg_probe(struct platform_device *pdev)
+ {
+ 	struct mtk_jpeg_dev *jpeg;
+@@ -1337,6 +1342,11 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
+ 							  | WQ_FREEZABLE);
+ 		if (!jpeg->workqueue)
+ 			return -EINVAL;
++		ret = devm_add_action_or_reset(&pdev->dev,
++					       mtk_jpeg_destroy_workqueue,
++					       jpeg->workqueue);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
+index 4a6ee211e18f97..2c5d74939d0a92 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
+@@ -578,11 +578,6 @@ static int mtk_jpegdec_hw_init_irq(struct mtk_jpegdec_comp_dev *dev)
+ 	return 0;
+ }
+ 
+-static void mtk_jpegdec_destroy_workqueue(void *data)
+-{
+-	destroy_workqueue(data);
+-}
+-
+ static int mtk_jpegdec_hw_probe(struct platform_device *pdev)
+ {
+ 	struct mtk_jpegdec_clk *jpegdec_clk;
+@@ -606,12 +601,6 @@ static int mtk_jpegdec_hw_probe(struct platform_device *pdev)
+ 	dev->plat_dev = pdev;
+ 	dev->dev = &pdev->dev;
+ 
+-	ret = devm_add_action_or_reset(&pdev->dev,
+-				       mtk_jpegdec_destroy_workqueue,
+-				       master_dev->workqueue);
+-	if (ret)
+-		return ret;
+-
+ 	spin_lock_init(&dev->hw_lock);
+ 	dev->hw_state = MTK_JPEG_HW_IDLE;
+ 
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index 1d891381303722..1bf85c1cf96435 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -2679,6 +2679,8 @@ static void mxc_jpeg_detach_pm_domains(struct mxc_jpeg_dev *jpeg)
+ 	int i;
+ 
+ 	for (i = 0; i < jpeg->num_domains; i++) {
++		if (jpeg->pd_dev[i] && !pm_runtime_suspended(jpeg->pd_dev[i]))
++			pm_runtime_force_suspend(jpeg->pd_dev[i]);
+ 		if (jpeg->pd_link[i] && !IS_ERR(jpeg->pd_link[i]))
+ 			device_link_del(jpeg->pd_link[i]);
+ 		if (jpeg->pd_dev[i] && !IS_ERR(jpeg->pd_dev[i]))
+@@ -2842,6 +2844,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
+ 	jpeg->dec_vdev->vfl_dir = VFL_DIR_M2M;
+ 	jpeg->dec_vdev->device_caps = V4L2_CAP_STREAMING |
+ 					V4L2_CAP_VIDEO_M2M_MPLANE;
++	video_set_drvdata(jpeg->dec_vdev, jpeg);
+ 	if (mode == MXC_JPEG_ENCODE) {
+ 		v4l2_disable_ioctl(jpeg->dec_vdev, VIDIOC_DECODER_CMD);
+ 		v4l2_disable_ioctl(jpeg->dec_vdev, VIDIOC_TRY_DECODER_CMD);
+@@ -2854,7 +2857,6 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
+ 		dev_err(dev, "failed to register video device\n");
+ 		goto err_vdev_register;
+ 	}
+-	video_set_drvdata(jpeg->dec_vdev, jpeg);
+ 	if (mode == MXC_JPEG_ENCODE)
+ 		v4l2_info(&jpeg->v4l2_dev,
+ 			  "encoder device registered as /dev/video%d (%d,%d)\n",
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index d64985ca6e884f..8c3bce738f2a8f 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -2130,10 +2130,8 @@ static int camss_configure_pd(struct camss *camss)
+ 	if (camss->res->pd_name) {
+ 		camss->genpd = dev_pm_domain_attach_by_name(camss->dev,
+ 							    camss->res->pd_name);
+-		if (IS_ERR(camss->genpd)) {
+-			ret = PTR_ERR(camss->genpd);
+-			goto fail_pm;
+-		}
++		if (IS_ERR(camss->genpd))
++			return PTR_ERR(camss->genpd);
+ 	}
+ 
+ 	if (!camss->genpd) {
+@@ -2143,14 +2141,13 @@ static int camss_configure_pd(struct camss *camss)
+ 		 */
+ 		camss->genpd = dev_pm_domain_attach_by_id(camss->dev,
+ 							  camss->genpd_num - 1);
++		if (IS_ERR(camss->genpd))
++			return PTR_ERR(camss->genpd);
+ 	}
+-	if (IS_ERR_OR_NULL(camss->genpd)) {
+-		if (!camss->genpd)
+-			ret = -ENODEV;
+-		else
+-			ret = PTR_ERR(camss->genpd);
+-		goto fail_pm;
+-	}
++
++	if (!camss->genpd)
++		return -ENODEV;
++
+ 	camss->genpd_link = device_link_add(camss->dev, camss->genpd,
+ 					    DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
+ 					    DL_FLAG_RPM_ACTIVE);
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index 84e95a46dfc983..cabcf710c0462a 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -412,8 +412,8 @@ static int venus_probe(struct platform_device *pdev)
+ 	of_platform_depopulate(dev);
+ err_runtime_disable:
+ 	pm_runtime_put_noidle(dev);
+-	pm_runtime_set_suspended(dev);
+ 	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
+ 	hfi_destroy(core);
+ err_core_deinit:
+ 	hfi_core_deinit(core, false);
+diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
+index 0e768f3e9edab4..de532b7ecd74c1 100644
+--- a/drivers/media/platform/rockchip/rga/rga.c
++++ b/drivers/media/platform/rockchip/rga/rga.c
+@@ -102,7 +102,7 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+ 	src_vq->drv_priv = ctx;
+ 	src_vq->ops = &rga_qops;
+ 	src_vq->mem_ops = &vb2_dma_sg_memops;
+-	dst_vq->gfp_flags = __GFP_DMA32;
++	src_vq->gfp_flags = __GFP_DMA32;
+ 	src_vq->buf_struct_size = sizeof(struct rga_vb_buffer);
+ 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ 	src_vq->lock = &ctx->rga->mutex;
+diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.h b/drivers/media/platform/samsung/exynos4-is/media-dev.h
+index 786264cf79dc14..a50e58ab7ef773 100644
+--- a/drivers/media/platform/samsung/exynos4-is/media-dev.h
++++ b/drivers/media/platform/samsung/exynos4-is/media-dev.h
+@@ -178,8 +178,9 @@ int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
+ #ifdef CONFIG_OF
+ static inline bool fimc_md_is_isp_available(struct device_node *node)
+ {
+-	node = of_get_child_by_name(node, FIMC_IS_OF_NODE_NAME);
+-	return node ? of_device_is_available(node) : false;
++	struct device_node *child __free(device_node) =
++		of_get_child_by_name(node, FIMC_IS_OF_NODE_NAME);
++	return child ? of_device_is_available(child) : false;
+ }
+ #else
+ #define fimc_md_is_isp_available(node) (false)
+diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
+index 65e8f2d074005c..e54f5fac325bd6 100644
+--- a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
++++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
+@@ -161,8 +161,7 @@ static int rockchip_vpu981_av1_dec_frame_ref(struct hantro_ctx *ctx,
+ 		av1_dec->frame_refs[i].timestamp = timestamp;
+ 		av1_dec->frame_refs[i].frame_type = frame->frame_type;
+ 		av1_dec->frame_refs[i].order_hint = frame->order_hint;
+-		if (!av1_dec->frame_refs[i].vb2_ref)
+-			av1_dec->frame_refs[i].vb2_ref = hantro_get_dst_buf(ctx);
++		av1_dec->frame_refs[i].vb2_ref = hantro_get_dst_buf(ctx);
+ 
+ 		for (j = 0; j < V4L2_AV1_TOTAL_REFS_PER_FRAME; j++)
+ 			av1_dec->frame_refs[i].order_hints[j] = frame->order_hints[j];
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index 8b6a57f170d0dd..bdff64a29a33a2 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -847,7 +847,7 @@ static void set_frame_rate(struct gspca_dev *gspca_dev)
+ 		r = rate_1;
+ 		i = ARRAY_SIZE(rate_1);
+ 	}
+-	while (--i > 0) {
++	while (--i >= 0) {
+ 		if (sd->frame_rate >= r->fps)
+ 			break;
+ 		r++;
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 13db0026dc1aad..675be4858366f0 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -775,14 +775,27 @@ static const u8 uvc_media_transport_input_guid[16] =
+ 	UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+ 
+-static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+-		unsigned int num_pads, unsigned int extra_size)
++static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
++					       u16 id, unsigned int num_pads,
++					       unsigned int extra_size)
+ {
+ 	struct uvc_entity *entity;
+ 	unsigned int num_inputs;
+ 	unsigned int size;
+ 	unsigned int i;
+ 
++	/* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
++	if (id == 0) {
++		dev_err(&dev->udev->dev, "Found Unit with invalid ID 0.\n");
++		return ERR_PTR(-EINVAL);
++	}
++
++	/* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
++	if (uvc_entity_by_id(dev, id)) {
++		dev_err(&dev->udev->dev, "Found multiple Units with ID %u\n", id);
++		return ERR_PTR(-EINVAL);
++	}
++
+ 	extra_size = roundup(extra_size, sizeof(*entity->pads));
+ 	if (num_pads)
+ 		num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+@@ -792,7 +805,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ 	     + num_inputs;
+ 	entity = kzalloc(size, GFP_KERNEL);
+ 	if (entity == NULL)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	entity->id = id;
+ 	entity->type = type;
+@@ -904,10 +917,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ 			break;
+ 		}
+ 
+-		unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
+-					p + 1, 2*n);
+-		if (unit == NULL)
+-			return -ENOMEM;
++		unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
++					    buffer[3], p + 1, 2 * n);
++		if (IS_ERR(unit))
++			return PTR_ERR(unit);
+ 
+ 		memcpy(unit->guid, &buffer[4], 16);
+ 		unit->extension.bNumControls = buffer[20];
+@@ -1016,10 +1029,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
+-					1, n + p);
+-		if (term == NULL)
+-			return -ENOMEM;
++		term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
++					    buffer[3], 1, n + p);
++		if (IS_ERR(term))
++			return PTR_ERR(term);
+ 
+ 		if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
+ 			term->camera.bControlSize = n;
+@@ -1075,10 +1088,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return 0;
+ 		}
+ 
+-		term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
+-					1, 0);
+-		if (term == NULL)
+-			return -ENOMEM;
++		term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
++					    buffer[3], 1, 0);
++		if (IS_ERR(term))
++			return PTR_ERR(term);
+ 
+ 		memcpy(term->baSourceID, &buffer[7], 1);
+ 
+@@ -1097,9 +1110,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
+-		if (unit == NULL)
+-			return -ENOMEM;
++		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++					    p + 1, 0);
++		if (IS_ERR(unit))
++			return PTR_ERR(unit);
+ 
+ 		memcpy(unit->baSourceID, &buffer[5], p);
+ 
+@@ -1119,9 +1133,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
+-		if (unit == NULL)
+-			return -ENOMEM;
++		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
++		if (IS_ERR(unit))
++			return PTR_ERR(unit);
+ 
+ 		memcpy(unit->baSourceID, &buffer[4], 1);
+ 		unit->processing.wMaxMultiplier =
+@@ -1148,9 +1162,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
+-		if (unit == NULL)
+-			return -ENOMEM;
++		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++					    p + 1, n);
++		if (IS_ERR(unit))
++			return PTR_ERR(unit);
+ 
+ 		memcpy(unit->guid, &buffer[4], 16);
+ 		unit->extension.bNumControls = buffer[20];
+@@ -1290,9 +1305,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ 		return dev_err_probe(&dev->udev->dev, irq,
+ 				     "No IRQ for privacy GPIO\n");
+ 
+-	unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+-	if (!unit)
+-		return -ENOMEM;
++	unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
++				    UVC_EXT_GPIO_UNIT_ID, 0, 1);
++	if (IS_ERR(unit))
++		return PTR_ERR(unit);
+ 
+ 	unit->gpio.gpio_privacy = gpio_privacy;
+ 	unit->gpio.irq = irq;
+@@ -1919,11 +1935,41 @@ static void uvc_unregister_video(struct uvc_device *dev)
+ 	struct uvc_streaming *stream;
+ 
+ 	list_for_each_entry(stream, &dev->streams, list) {
++		/* Nothing to do here, continue. */
+ 		if (!video_is_registered(&stream->vdev))
+ 			continue;
+ 
++		/*
++		 * For stream->vdev we follow the same logic as:
++		 * vb2_video_unregister_device().
++		 */
++
++		/* 1. Take a reference to vdev */
++		get_device(&stream->vdev.dev);
++
++		/* 2. Ensure that no new ioctls can be called. */
+ 		video_unregister_device(&stream->vdev);
+-		video_unregister_device(&stream->meta.vdev);
++
++		/* 3. Wait for old ioctls to finish. */
++		mutex_lock(&stream->mutex);
++
++		/* 4. Stop streaming. */
++		uvc_queue_release(&stream->queue);
++
++		mutex_unlock(&stream->mutex);
++
++		put_device(&stream->vdev.dev);
++
++		/*
++		 * For stream->meta.vdev we can directly call:
++		 * vb2_video_unregister_device().
++		 */
++		vb2_video_unregister_device(&stream->meta.vdev);
++
++		/*
++		 * Now both vdevs are not streaming and all the ioctls will
++		 * return -ENODEV.
++		 */
+ 
+ 		uvc_debugfs_cleanup_stream(stream);
+ 	}
+diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
+index f3bb81d7e46045..a33ad04e99cc8e 100644
+--- a/drivers/mtd/nand/spi/winbond.c
++++ b/drivers/mtd/nand/spi/winbond.c
+@@ -201,30 +201,30 @@ static const struct spinand_info winbond_spinand_table[] = {
+ 	SPINAND_INFO("W25N01JW",
+ 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
+ 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+-		     NAND_ECCREQ(4, 512),
++		     NAND_ECCREQ(1, 512),
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+ 		     0,
+-		     SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)),
++		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ 	SPINAND_INFO("W25N02JWZEIF",
+ 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
+ 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
+-		     NAND_ECCREQ(4, 512),
++		     NAND_ECCREQ(1, 512),
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+ 		     0,
+-		     SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
++		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ 	SPINAND_INFO("W25N512GW",
+ 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x20),
+ 		     NAND_MEMORG(1, 2048, 64, 64, 512, 10, 1, 1, 1),
+-		     NAND_ECCREQ(4, 512),
++		     NAND_ECCREQ(1, 512),
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+ 		     0,
+-		     SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
++		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ 	SPINAND_INFO("W25N02KWZEIR",
+ 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x22),
+ 		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+@@ -237,12 +237,12 @@ static const struct spinand_info winbond_spinand_table[] = {
+ 	SPINAND_INFO("W25N01GWZEIG",
+ 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x21),
+ 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+-		     NAND_ECCREQ(4, 512),
++		     NAND_ECCREQ(1, 512),
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+ 		     0,
+-		     SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)),
++		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ 	SPINAND_INFO("W25N04KV",
+ 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x23),
+ 		     NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 2, 1, 1),
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index a4eb6edb850add..7f6b5743207166 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -84,8 +84,7 @@
+ #define FEC_CC_MULT	(1 << 31)
+ #define FEC_COUNTER_PERIOD	(1 << 31)
+ #define PPS_OUPUT_RELOAD_PERIOD	NSEC_PER_SEC
+-#define FEC_CHANNLE_0		0
+-#define DEFAULT_PPS_CHANNEL	FEC_CHANNLE_0
++#define DEFAULT_PPS_CHANNEL	0
+ 
+ #define FEC_PTP_MAX_NSEC_PERIOD		4000000000ULL
+ #define FEC_PTP_MAX_NSEC_COUNTER	0x80000000ULL
+@@ -525,7 +524,6 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ 	int ret = 0;
+ 
+ 	if (rq->type == PTP_CLK_REQ_PPS) {
+-		fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ 		fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+ 
+ 		ret = fec_ptp_enable_pps(fep, on);
+@@ -536,10 +534,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ 		if (rq->perout.flags)
+ 			return -EOPNOTSUPP;
+ 
+-		if (rq->perout.index != DEFAULT_PPS_CHANNEL)
++		if (rq->perout.index != fep->pps_channel)
+ 			return -EOPNOTSUPP;
+ 
+-		fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ 		period.tv_sec = rq->perout.period.sec;
+ 		period.tv_nsec = rq->perout.period.nsec;
+ 		period_ns = timespec64_to_ns(&period);
+@@ -707,12 +704,16 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
+ {
+ 	struct net_device *ndev = platform_get_drvdata(pdev);
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
++	struct device_node *np = fep->pdev->dev.of_node;
+ 	int irq;
+ 	int ret;
+ 
+ 	fep->ptp_caps.owner = THIS_MODULE;
+ 	strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ 
++	fep->pps_channel = DEFAULT_PPS_CHANNEL;
++	of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel);
++
+ 	fep->ptp_caps.max_adj = 250000000;
+ 	fep->ptp_caps.n_alarm = 0;
+ 	fep->ptp_caps.n_ext_ts = 0;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 7bf275f127c9d7..766213ee82c16e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1205,6 +1205,9 @@ static int stmmac_init_phy(struct net_device *dev)
+ 			return -ENODEV;
+ 		}
+ 
++		if (priv->dma_cap.eee)
++			phy_support_eee(phydev);
++
+ 		ret = phylink_connect_phy(priv->phylink, phydev);
+ 	} else {
+ 		fwnode_handle_put(phy_fwnode);
+diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
+index 059269557d9264..fba2c734f0ec7f 100644
+--- a/drivers/net/netkit.c
++++ b/drivers/net/netkit.c
+@@ -20,6 +20,7 @@ struct netkit {
+ 	struct net_device __rcu *peer;
+ 	struct bpf_mprog_entry __rcu *active;
+ 	enum netkit_action policy;
++	enum netkit_scrub scrub;
+ 	struct bpf_mprog_bundle	bundle;
+ 
+ 	/* Needed in slow-path */
+@@ -50,12 +51,24 @@ netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
+ 	return ret;
+ }
+ 
+-static void netkit_prep_forward(struct sk_buff *skb, bool xnet)
++static void netkit_xnet(struct sk_buff *skb)
+ {
+-	skb_scrub_packet(skb, xnet);
+ 	skb->priority = 0;
++	skb->mark = 0;
++}
++
++static void netkit_prep_forward(struct sk_buff *skb,
++				bool xnet, bool xnet_scrub)
++{
++	skb_scrub_packet(skb, false);
+ 	nf_skip_egress(skb, true);
+ 	skb_reset_mac_header(skb);
++	if (!xnet)
++		return;
++	ipvs_reset(skb);
++	skb_clear_tstamp(skb);
++	if (xnet_scrub)
++		netkit_xnet(skb);
+ }
+ 
+ static struct netkit *netkit_priv(const struct net_device *dev)
+@@ -80,7 +93,8 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     !pskb_may_pull(skb, ETH_HLEN) ||
+ 		     skb_orphan_frags(skb, GFP_ATOMIC)))
+ 		goto drop;
+-	netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)));
++	netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)),
++			    nk->scrub);
+ 	eth_skb_pkt_type(skb, peer);
+ 	skb->dev = peer;
+ 	entry = rcu_dereference(nk->active);
+@@ -332,8 +346,10 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
+ 			   struct netlink_ext_ack *extack)
+ {
+ 	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr;
+-	enum netkit_action default_prim = NETKIT_PASS;
+-	enum netkit_action default_peer = NETKIT_PASS;
++	enum netkit_action policy_prim = NETKIT_PASS;
++	enum netkit_action policy_peer = NETKIT_PASS;
++	enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
++	enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
+ 	enum netkit_mode mode = NETKIT_L3;
+ 	unsigned char ifname_assign_type;
+ 	struct ifinfomsg *ifmp = NULL;
+@@ -362,17 +378,21 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
+ 				return err;
+ 			tbp = peer_tb;
+ 		}
++		if (data[IFLA_NETKIT_SCRUB])
++			scrub_prim = nla_get_u32(data[IFLA_NETKIT_SCRUB]);
++		if (data[IFLA_NETKIT_PEER_SCRUB])
++			scrub_peer = nla_get_u32(data[IFLA_NETKIT_PEER_SCRUB]);
+ 		if (data[IFLA_NETKIT_POLICY]) {
+ 			attr = data[IFLA_NETKIT_POLICY];
+-			default_prim = nla_get_u32(attr);
+-			err = netkit_check_policy(default_prim, attr, extack);
++			policy_prim = nla_get_u32(attr);
++			err = netkit_check_policy(policy_prim, attr, extack);
+ 			if (err < 0)
+ 				return err;
+ 		}
+ 		if (data[IFLA_NETKIT_PEER_POLICY]) {
+ 			attr = data[IFLA_NETKIT_PEER_POLICY];
+-			default_peer = nla_get_u32(attr);
+-			err = netkit_check_policy(default_peer, attr, extack);
++			policy_peer = nla_get_u32(attr);
++			err = netkit_check_policy(policy_peer, attr, extack);
+ 			if (err < 0)
+ 				return err;
+ 		}
+@@ -409,7 +429,8 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
+ 
+ 	nk = netkit_priv(peer);
+ 	nk->primary = false;
+-	nk->policy = default_peer;
++	nk->policy = policy_peer;
++	nk->scrub = scrub_peer;
+ 	nk->mode = mode;
+ 	bpf_mprog_bundle_init(&nk->bundle);
+ 
+@@ -434,7 +455,8 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
+ 
+ 	nk = netkit_priv(dev);
+ 	nk->primary = true;
+-	nk->policy = default_prim;
++	nk->policy = policy_prim;
++	nk->scrub = scrub_prim;
+ 	nk->mode = mode;
+ 	bpf_mprog_bundle_init(&nk->bundle);
+ 
+@@ -874,6 +896,18 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
+ 		return -EACCES;
+ 	}
+ 
++	if (data[IFLA_NETKIT_SCRUB]) {
++		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_SCRUB],
++				    "netkit scrubbing cannot be changed after device creation");
++		return -EACCES;
++	}
++
++	if (data[IFLA_NETKIT_PEER_SCRUB]) {
++		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_SCRUB],
++				    "netkit scrubbing cannot be changed after device creation");
++		return -EACCES;
++	}
++
+ 	if (data[IFLA_NETKIT_PEER_INFO]) {
+ 		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
+ 				    "netkit peer info cannot be changed after device creation");
+@@ -908,8 +942,10 @@ static size_t netkit_get_size(const struct net_device *dev)
+ {
+ 	return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */
+ 	       nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */
+-	       nla_total_size(sizeof(u8))  + /* IFLA_NETKIT_PRIMARY */
++	       nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_SCRUB */
++	       nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
+ 	       nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
++	       nla_total_size(sizeof(u8))  + /* IFLA_NETKIT_PRIMARY */
+ 	       0;
+ }
+ 
+@@ -924,11 +960,15 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
+ 		return -EMSGSIZE;
+ 	if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode))
+ 		return -EMSGSIZE;
++	if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
++		return -EMSGSIZE;
+ 
+ 	if (peer) {
+ 		nk = netkit_priv(peer);
+ 		if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy))
+ 			return -EMSGSIZE;
++		if (nla_put_u32(skb, IFLA_NETKIT_PEER_SCRUB, nk->scrub))
++			return -EMSGSIZE;
+ 	}
+ 
+ 	return 0;
+@@ -936,9 +976,11 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
+ 
+ static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
+ 	[IFLA_NETKIT_PEER_INFO]		= { .len = sizeof(struct ifinfomsg) },
+-	[IFLA_NETKIT_POLICY]		= { .type = NLA_U32 },
+ 	[IFLA_NETKIT_MODE]		= { .type = NLA_U32 },
++	[IFLA_NETKIT_POLICY]		= { .type = NLA_U32 },
+ 	[IFLA_NETKIT_PEER_POLICY]	= { .type = NLA_U32 },
++	[IFLA_NETKIT_SCRUB]		= NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
++	[IFLA_NETKIT_PEER_SCRUB]	= NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
+ 	[IFLA_NETKIT_PRIMARY]		= { .type = NLA_REJECT,
+ 					    .reject_message = "Primary attribute is read-only" },
+ };
+diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
+index 5f056d7db83eed..b6b38caf9c0ed0 100644
+--- a/drivers/net/phy/dp83869.c
++++ b/drivers/net/phy/dp83869.c
+@@ -153,19 +153,32 @@ struct dp83869_private {
+ 	int mode;
+ };
+ 
++static int dp83869_config_aneg(struct phy_device *phydev)
++{
++	struct dp83869_private *dp83869 = phydev->priv;
++
++	if (dp83869->mode != DP83869_RGMII_1000_BASE)
++		return genphy_config_aneg(phydev);
++
++	return genphy_c37_config_aneg(phydev);
++}
++
+ static int dp83869_read_status(struct phy_device *phydev)
+ {
+ 	struct dp83869_private *dp83869 = phydev->priv;
++	bool changed;
+ 	int ret;
+ 
++	if (dp83869->mode == DP83869_RGMII_1000_BASE)
++		return genphy_c37_read_status(phydev, &changed);
++
+ 	ret = genphy_read_status(phydev);
+ 	if (ret)
+ 		return ret;
+ 
+-	if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
++	if (dp83869->mode == DP83869_RGMII_100_BASE) {
+ 		if (phydev->link) {
+-			if (dp83869->mode == DP83869_RGMII_100_BASE)
+-				phydev->speed = SPEED_100;
++			phydev->speed = SPEED_100;
+ 		} else {
+ 			phydev->speed = SPEED_UNKNOWN;
+ 			phydev->duplex = DUPLEX_UNKNOWN;
+@@ -898,6 +911,7 @@ static int dp83869_phy_reset(struct phy_device *phydev)
+ 	.soft_reset	= dp83869_phy_reset,			\
+ 	.config_intr	= dp83869_config_intr,			\
+ 	.handle_interrupt = dp83869_handle_interrupt,		\
++	.config_aneg    = dp83869_config_aneg,                  \
+ 	.read_status	= dp83869_read_status,			\
+ 	.get_tunable	= dp83869_get_tunable,			\
+ 	.set_tunable	= dp83869_set_tunable,			\
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 33ffa2aa4c1152..e1a15fbc6ad025 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -267,7 +267,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
+ 
+ 	count = round_down(count, nvmem->word_size);
+ 
+-	if (!nvmem->reg_write)
++	if (!nvmem->reg_write || nvmem->read_only)
+ 		return -EPERM;
+ 
+ 	rc = nvmem_reg_write(nvmem, pos, buf, count);
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 808d1f10541733..c8d5c90aa4d45b 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -82,6 +82,11 @@ enum imx_pcie_variants {
+ #define IMX_PCIE_FLAG_HAS_SERDES		BIT(6)
+ #define IMX_PCIE_FLAG_SUPPORT_64BIT		BIT(7)
+ #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP		BIT(8)
++/*
++ * Because of ERR005723 (PCIe does not support L2 power down) we need to
++ * workaround suspend resume on some devices which are affected by this errata.
++ */
++#define IMX_PCIE_FLAG_BROKEN_SUSPEND		BIT(9)
+ 
+ #define imx_check_flag(pci, val)	(pci->drvdata->flags & val)
+ 
+@@ -1237,9 +1242,19 @@ static int imx_pcie_suspend_noirq(struct device *dev)
+ 		return 0;
+ 
+ 	imx_pcie_msi_save_restore(imx_pcie, true);
+-	imx_pcie_pm_turnoff(imx_pcie);
+-	imx_pcie_stop_link(imx_pcie->pci);
+-	imx_pcie_host_exit(pp);
++	if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
++		/*
++		 * The minimum for a workaround would be to set PERST# and to
++		 * set the PCIE_TEST_PD flag. However, we can also disable the
++		 * clock which saves some power.
++		 */
++		imx_pcie_assert_core_reset(imx_pcie);
++		imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
++	} else {
++		imx_pcie_pm_turnoff(imx_pcie);
++		imx_pcie_stop_link(imx_pcie->pci);
++		imx_pcie_host_exit(pp);
++	}
+ 
+ 	return 0;
+ }
+@@ -1253,14 +1268,32 @@ static int imx_pcie_resume_noirq(struct device *dev)
+ 	if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
+ 		return 0;
+ 
+-	ret = imx_pcie_host_init(pp);
+-	if (ret)
+-		return ret;
+-	imx_pcie_msi_save_restore(imx_pcie, false);
+-	dw_pcie_setup_rc(pp);
++	if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
++		ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
++		if (ret)
++			return ret;
++		ret = imx_pcie_deassert_core_reset(imx_pcie);
++		if (ret)
++			return ret;
++		/*
++		 * Using PCIE_TEST_PD seems to disable MSI and powers down the
++		 * root complex. This is why we have to setup the rc again and
++		 * why we have to restore the MSI register.
++		 */
++		ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
++		if (ret)
++			return ret;
++		imx_pcie_msi_save_restore(imx_pcie, false);
++	} else {
++		ret = imx_pcie_host_init(pp);
++		if (ret)
++			return ret;
++		imx_pcie_msi_save_restore(imx_pcie, false);
++		dw_pcie_setup_rc(pp);
+ 
+-	if (imx_pcie->link_is_up)
+-		imx_pcie_start_link(imx_pcie->pci);
++		if (imx_pcie->link_is_up)
++			imx_pcie_start_link(imx_pcie->pci);
++	}
+ 
+ 	return 0;
+ }
+@@ -1485,7 +1518,9 @@ static const struct imx_pcie_drvdata drvdata[] = {
+ 	[IMX6Q] = {
+ 		.variant = IMX6Q,
+ 		.flags = IMX_PCIE_FLAG_IMX_PHY |
+-			 IMX_PCIE_FLAG_IMX_SPEED_CHANGE,
++			 IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
++			 IMX_PCIE_FLAG_BROKEN_SUSPEND |
++			 IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ 		.dbi_length = 0x200,
+ 		.gpr = "fsl,imx6q-iomuxc-gpr",
+ 		.clk_names = imx6q_clks,
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 2219b1a866faf2..44b34559de1ac5 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -455,6 +455,17 @@ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+ 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ 	u32 reg;
+ 
++	/*
++	 * Checking whether the link is up here is a last line of defense
++	 * against platforms that forward errors on the system bus as
++	 * SError upon PCI configuration transactions issued when the link
++	 * is down. This check is racy by definition and does not stop
++	 * the system from triggering an SError if the link goes down
++	 * after this check is performed.
++	 */
++	if (!dw_pcie_link_up(pci))
++		return NULL;
++
+ 	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ 		CFG_FUNC(PCI_FUNC(devfn));
+ 	if (!pci_is_root_bus(bus->parent))
+@@ -1093,6 +1104,7 @@ static int ks_pcie_am654_set_mode(struct device *dev,
+ 
+ static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+ 	.host_ops = &ks_pcie_host_ops,
++	.mode = DW_PCIE_RC_TYPE,
+ 	.version = DW_PCIE_VER_365A,
+ };
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 43ba5c6738df1a..cc8ff4a014368c 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -689,7 +689,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
+ 		 * for 1 MB BAR size only.
+ 		 */
+ 		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+-			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
++			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ 	}
+ 
+ 	dw_pcie_setup(pci);
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 2b33d03ed05416..b5447228696dc4 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1845,7 +1845,7 @@ static const struct of_device_id qcom_pcie_match[] = {
+ 	{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
+ 	{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
+ 	{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+-	{ .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
++	{ .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
+ 	{ }
+ };
+ 
+diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
+index 1362745336568e..a6805b005798c3 100644
+--- a/drivers/pci/controller/pcie-rockchip-ep.c
++++ b/drivers/pci/controller/pcie-rockchip-ep.c
+@@ -63,15 +63,25 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
+ 			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
+ }
+ 
++static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip,
++					    u64 pci_addr, size_t size)
++{
++	int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1));
++
++	return clamp(num_pass_bits,
++		     ROCKCHIP_PCIE_AT_MIN_NUM_BITS,
++		     ROCKCHIP_PCIE_AT_MAX_NUM_BITS);
++}
++
+ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+ 					 u32 r, u64 cpu_addr, u64 pci_addr,
+ 					 size_t size)
+ {
+-	int num_pass_bits = fls64(size - 1);
++	int num_pass_bits;
+ 	u32 addr0, addr1, desc0;
+ 
+-	if (num_pass_bits < 8)
+-		num_pass_bits = 8;
++	num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip,
++							 pci_addr, size);
+ 
+ 	addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ 		(lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
+index 6111de35f84ca2..15ee949f2485e3 100644
+--- a/drivers/pci/controller/pcie-rockchip.h
++++ b/drivers/pci/controller/pcie-rockchip.h
+@@ -245,6 +245,10 @@
+ 	(PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12)))
+ #define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \
+ 	(PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12)))
++
++#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS  8
++#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS  20
++
+ #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ 	(PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008)
+ #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index 17f00710925508..62f7dff437309f 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -660,18 +660,18 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ 	if (IS_ERR_OR_NULL(epc) || !epf)
+ 		return;
+ 
++	mutex_lock(&epc->list_lock);
+ 	if (type == PRIMARY_INTERFACE) {
+ 		func_no = epf->func_no;
+ 		list = &epf->list;
++		epf->epc = NULL;
+ 	} else {
+ 		func_no = epf->sec_epc_func_no;
+ 		list = &epf->sec_epc_list;
++		epf->sec_epc = NULL;
+ 	}
+-
+-	mutex_lock(&epc->list_lock);
+ 	clear_bit(func_no, &epc->function_num_map);
+ 	list_del(list);
+-	epf->epc = NULL;
+ 	mutex_unlock(&epc->list_lock);
+ }
+ EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
+@@ -837,11 +837,10 @@ EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
+ void pci_epc_destroy(struct pci_epc *epc)
+ {
+ 	pci_ep_cfs_remove_epc_group(epc->group);
+-	device_unregister(&epc->dev);
+-
+ #ifdef CONFIG_PCI_DOMAINS_GENERIC
+-	pci_bus_release_domain_nr(&epc->dev, epc->domain_nr);
++	pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
+ #endif
++	device_unregister(&epc->dev);
+ }
+ EXPORT_SYMBOL_GPL(pci_epc_destroy);
+ 
+diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
+index 5a0b98e697954a..886c236e5de6e6 100644
+--- a/drivers/pci/of_property.c
++++ b/drivers/pci/of_property.c
+@@ -126,7 +126,7 @@ static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs,
+ 		if (of_pci_get_addr_flags(&res[j], &flags))
+ 			continue;
+ 
+-		val64 = res[j].start;
++		val64 = pci_bus_address(pdev, &res[j] - pdev->resource);
+ 		of_pci_set_address(pdev, rp[i].parent_addr, val64, 0, flags,
+ 				   false);
+ 		if (pci_is_bridge(pdev)) {
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
+index f4f4b3df3884ef..793b1d274be33a 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -1356,7 +1356,7 @@ static const struct adsp_data sc7280_wpss_resource = {
+ 	.crash_reason_smem = 626,
+ 	.firmware_name = "wpss.mdt",
+ 	.pas_id = 6,
+-	.auto_boot = true,
++	.auto_boot = false,
+ 	.proxy_pd_names = (char*[]){
+ 		"cx",
+ 		"mx",
+diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
+index 9ba9495fcc4bae..ea843159b745d5 100644
+--- a/drivers/spmi/spmi-pmic-arb.c
++++ b/drivers/spmi/spmi-pmic-arb.c
+@@ -1763,14 +1763,13 @@ static int spmi_pmic_arb_register_buses(struct spmi_pmic_arb *pmic_arb,
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *node = dev->of_node;
+-	struct device_node *child;
+ 	int ret;
+ 
+ 	/* legacy mode doesn't provide child node for the bus */
+ 	if (of_device_is_compatible(node, "qcom,spmi-pmic-arb"))
+ 		return spmi_pmic_arb_bus_init(pdev, node, pmic_arb);
+ 
+-	for_each_available_child_of_node(node, child) {
++	for_each_available_child_of_node_scoped(node, child) {
+ 		if (of_node_name_eq(child, "spmi")) {
+ 			ret = spmi_pmic_arb_bus_init(pdev, child, pmic_arb);
+ 			if (ret)
+diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+index b0c0f0ffdcb046..f547d386ae805b 100644
+--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+@@ -137,7 +137,7 @@ static ssize_t current_uuid_show(struct device *dev,
+ 	struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+ 	int i, length = 0;
+ 
+-	if (priv->current_uuid_index > 0)
++	if (priv->current_uuid_index >= 0)
+ 		return sprintf(buf, "%s\n",
+ 			       int3400_thermal_uuids[priv->current_uuid_index]);
+ 
+diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
+index 5867e633856233..fb550a7c16b34b 100644
+--- a/drivers/ufs/host/ufs-exynos.c
++++ b/drivers/ufs/host/ufs-exynos.c
+@@ -724,6 +724,9 @@ static void exynos_ufs_config_smu(struct exynos_ufs *ufs)
+ {
+ 	u32 reg, val;
+ 
++	if (ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)
++		return;
++
+ 	exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
+ 
+ 	/* make encryption disabled by default */
+@@ -1440,8 +1443,8 @@ static int exynos_ufs_init(struct ufs_hba *hba)
+ 	if (ret)
+ 		goto out;
+ 	exynos_ufs_specify_phy_time_attr(ufs);
+-	if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE))
+-		exynos_ufs_config_smu(ufs);
++
++	exynos_ufs_config_smu(ufs);
+ 
+ 	hba->host->dma_alignment = DATA_UNIT_SIZE - 1;
+ 	return 0;
+@@ -1484,12 +1487,12 @@ static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
+ 	hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
+ }
+ 
+-static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
++static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd)
+ {
+ 	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ 	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ 
+-	if (!enter) {
++	if (cmd == UIC_CMD_DME_HIBER_EXIT) {
+ 		if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ 			exynos_ufs_disable_auto_ctrl_hcc(ufs);
+ 		exynos_ufs_ungate_clks(ufs);
+@@ -1517,11 +1520,11 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
+ 	}
+ }
+ 
+-static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
++static void exynos_ufs_post_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd)
+ {
+ 	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+ 
+-	if (!enter) {
++	if (cmd == UIC_CMD_DME_HIBER_EXIT) {
+ 		u32 cur_mode = 0;
+ 		u32 pwrmode;
+ 
+@@ -1540,7 +1543,7 @@ static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
+ 
+ 		if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB))
+ 			exynos_ufs_establish_connt(ufs);
+-	} else {
++	} else if (cmd == UIC_CMD_DME_HIBER_ENTER) {
+ 		ufs->entry_hibern8_t = ktime_get();
+ 		exynos_ufs_gate_clks(ufs);
+ 		if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+@@ -1627,15 +1630,15 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
+ }
+ 
+ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
+-				     enum uic_cmd_dme enter,
++				     enum uic_cmd_dme cmd,
+ 				     enum ufs_notify_change_status notify)
+ {
+ 	switch ((u8)notify) {
+ 	case PRE_CHANGE:
+-		exynos_ufs_pre_hibern8(hba, enter);
++		exynos_ufs_pre_hibern8(hba, cmd);
+ 		break;
+ 	case POST_CHANGE:
+-		exynos_ufs_post_hibern8(hba, enter);
++		exynos_ufs_post_hibern8(hba, cmd);
+ 		break;
+ 	}
+ }
+diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c
+index be3644ced17be4..c78cb6de93906c 100644
+--- a/drivers/vfio/pci/qat/main.c
++++ b/drivers/vfio/pci/qat/main.c
+@@ -304,7 +304,7 @@ static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf,
+ 	offs = &filp->f_pos;
+ 
+ 	if (*offs < 0 ||
+-	    check_add_overflow((loff_t)len, *offs, &end))
++	    check_add_overflow(len, *offs, &end))
+ 		return -EOVERFLOW;
+ 
+ 	if (end > mig_dev->state_size)
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index e152fde888fc9a..db53a3263fbd05 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -613,11 +613,17 @@ int btrfs_writepage_cow_fixup(struct folio *folio);
+ int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
+ 					     int compress_type);
+ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
+-					  u64 file_offset, u64 disk_bytenr,
+-					  u64 disk_io_size,
++					  u64 disk_bytenr, u64 disk_io_size,
+ 					  struct page **pages);
+ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+-			   struct btrfs_ioctl_encoded_io_args *encoded);
++			   struct btrfs_ioctl_encoded_io_args *encoded,
++			   struct extent_state **cached_state,
++			   u64 *disk_bytenr, u64 *disk_io_size);
++ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
++				   u64 start, u64 lockend,
++				   struct extent_state **cached_state,
++				   u64 disk_bytenr, u64 disk_io_size,
++				   size_t count, bool compressed, bool *unlocked);
+ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ 			       const struct btrfs_ioctl_encoded_io_args *encoded);
+ 
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 0cc919d15b1441..9c05cab473f577 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -2010,7 +2010,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 		      const struct btrfs_key *key, struct btrfs_path *p,
+ 		      int ins_len, int cow)
+ {
+-	struct btrfs_fs_info *fs_info = root->fs_info;
++	struct btrfs_fs_info *fs_info;
+ 	struct extent_buffer *b;
+ 	int slot;
+ 	int ret;
+@@ -2023,6 +2023,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 	int min_write_lock_level;
+ 	int prev_cmp;
+ 
++	if (!root)
++		return -EINVAL;
++
++	fs_info = root->fs_info;
+ 	might_sleep();
+ 
+ 	lowest_level = p->lowest_level;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index d9f511babd89ab..b43a8611aca5c6 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2446,7 +2446,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
+ 			goto out;
+ 
+ 		ret = check_delayed_ref(root, path, objectid, offset, bytenr);
+-	} while (ret == -EAGAIN);
++	} while (ret == -EAGAIN && !path->nowait);
+ 
+ out:
+ 	btrfs_release_path(path);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1e4ca1e7d2e58d..d067db2619713f 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9126,26 +9126,31 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
+ 		 */
+ 		WRITE_ONCE(priv->status, bbio->bio.bi_status);
+ 	}
+-	if (!atomic_dec_return(&priv->pending))
++	if (atomic_dec_and_test(&priv->pending))
+ 		wake_up(&priv->wait);
+ 	bio_put(&bbio->bio);
+ }
+ 
+ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
+-					  u64 file_offset, u64 disk_bytenr,
+-					  u64 disk_io_size, struct page **pages)
++					  u64 disk_bytenr, u64 disk_io_size,
++					  struct page **pages)
+ {
+ 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+-	struct btrfs_encoded_read_private priv = {
+-		.pending = ATOMIC_INIT(1),
+-	};
++	struct btrfs_encoded_read_private *priv;
+ 	unsigned long i = 0;
+ 	struct btrfs_bio *bbio;
++	int ret;
++
++	priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
++	if (!priv)
++		return -ENOMEM;
+ 
+-	init_waitqueue_head(&priv.wait);
++	init_waitqueue_head(&priv->wait);
++	atomic_set(&priv->pending, 1);
++	priv->status = 0;
+ 
+ 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
+-			       btrfs_encoded_read_endio, &priv);
++			       btrfs_encoded_read_endio, priv);
+ 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+ 	bbio->inode = inode;
+ 
+@@ -9153,11 +9158,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
+ 		size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
+ 
+ 		if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
+-			atomic_inc(&priv.pending);
++			atomic_inc(&priv->pending);
+ 			btrfs_submit_bbio(bbio, 0);
+ 
+ 			bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
+-					       btrfs_encoded_read_endio, &priv);
++					       btrfs_encoded_read_endio, priv);
+ 			bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+ 			bbio->inode = inode;
+ 			continue;
+@@ -9168,22 +9173,22 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
+ 		disk_io_size -= bytes;
+ 	} while (disk_io_size);
+ 
+-	atomic_inc(&priv.pending);
++	atomic_inc(&priv->pending);
+ 	btrfs_submit_bbio(bbio, 0);
+ 
+-	if (atomic_dec_return(&priv.pending))
+-		io_wait_event(priv.wait, !atomic_read(&priv.pending));
++	if (atomic_dec_return(&priv->pending))
++		io_wait_event(priv->wait, !atomic_read(&priv->pending));
+ 	/* See btrfs_encoded_read_endio() for ordering. */
+-	return blk_status_to_errno(READ_ONCE(priv.status));
++	ret = blk_status_to_errno(READ_ONCE(priv->status));
++	kfree(priv);
++	return ret;
+ }
+ 
+-static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
+-					  struct iov_iter *iter,
+-					  u64 start, u64 lockend,
+-					  struct extent_state **cached_state,
+-					  u64 disk_bytenr, u64 disk_io_size,
+-					  size_t count, bool compressed,
+-					  bool *unlocked)
++ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
++				   u64 start, u64 lockend,
++				   struct extent_state **cached_state,
++				   u64 disk_bytenr, u64 disk_io_size,
++				   size_t count, bool compressed, bool *unlocked)
+ {
+ 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ 	struct extent_io_tree *io_tree = &inode->io_tree;
+@@ -9203,7 +9208,7 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
+ 		goto out;
+ 		}
+ 
+-	ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
++	ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
+ 						    disk_io_size, pages);
+ 	if (ret)
+ 		goto out;
+@@ -9244,15 +9249,16 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
+ }
+ 
+ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+-			   struct btrfs_ioctl_encoded_io_args *encoded)
++			   struct btrfs_ioctl_encoded_io_args *encoded,
++			   struct extent_state **cached_state,
++			   u64 *disk_bytenr, u64 *disk_io_size)
+ {
+ 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
+ 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ 	struct extent_io_tree *io_tree = &inode->io_tree;
+ 	ssize_t ret;
+ 	size_t count = iov_iter_count(iter);
+-	u64 start, lockend, disk_bytenr, disk_io_size;
+-	struct extent_state *cached_state = NULL;
++	u64 start, lockend;
+ 	struct extent_map *em;
+ 	bool unlocked = false;
+ 
+@@ -9278,13 +9284,13 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+ 					       lockend - start + 1);
+ 		if (ret)
+ 			goto out_unlock_inode;
+-		lock_extent(io_tree, start, lockend, &cached_state);
++		lock_extent(io_tree, start, lockend, cached_state);
+ 		ordered = btrfs_lookup_ordered_range(inode, start,
+ 						     lockend - start + 1);
+ 		if (!ordered)
+ 			break;
+ 		btrfs_put_ordered_extent(ordered);
+-		unlock_extent(io_tree, start, lockend, &cached_state);
++		unlock_extent(io_tree, start, lockend, cached_state);
+ 		cond_resched();
+ 	}
+ 
+@@ -9304,7 +9310,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+ 		free_extent_map(em);
+ 		em = NULL;
+ 		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
+-						&cached_state, extent_start,
++						cached_state, extent_start,
+ 						count, encoded, &unlocked);
+ 		goto out;
+ 	}
+@@ -9317,12 +9323,12 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+ 			     inode->vfs_inode.i_size) - iocb->ki_pos;
+ 	if (em->disk_bytenr == EXTENT_MAP_HOLE ||
+ 	    (em->flags & EXTENT_FLAG_PREALLOC)) {
+-		disk_bytenr = EXTENT_MAP_HOLE;
++		*disk_bytenr = EXTENT_MAP_HOLE;
+ 		count = min_t(u64, count, encoded->len);
+ 		encoded->len = count;
+ 		encoded->unencoded_len = count;
+ 	} else if (extent_map_is_compressed(em)) {
+-		disk_bytenr = em->disk_bytenr;
++		*disk_bytenr = em->disk_bytenr;
+ 		/*
+ 		 * Bail if the buffer isn't large enough to return the whole
+ 		 * compressed extent.
+@@ -9331,7 +9337,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+ 			ret = -ENOBUFS;
+ 			goto out_em;
+ 		}
+-		disk_io_size = em->disk_num_bytes;
++		*disk_io_size = em->disk_num_bytes;
+ 		count = em->disk_num_bytes;
+ 		encoded->unencoded_len = em->ram_bytes;
+ 		encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
+@@ -9341,35 +9347,32 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+ 			goto out_em;
+ 		encoded->compression = ret;
+ 	} else {
+-		disk_bytenr = extent_map_block_start(em) + (start - em->start);
++		*disk_bytenr = extent_map_block_start(em) + (start - em->start);
+ 		if (encoded->len > count)
+ 			encoded->len = count;
+ 		/*
+ 		 * Don't read beyond what we locked. This also limits the page
+ 		 * allocations that we'll do.
+ 		 */
+-		disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
+-		count = start + disk_io_size - iocb->ki_pos;
++		*disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
++		count = start + *disk_io_size - iocb->ki_pos;
+ 		encoded->len = count;
+ 		encoded->unencoded_len = count;
+-		disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
++		*disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
+ 	}
+ 	free_extent_map(em);
+ 	em = NULL;
+ 
+-	if (disk_bytenr == EXTENT_MAP_HOLE) {
+-		unlock_extent(io_tree, start, lockend, &cached_state);
++	if (*disk_bytenr == EXTENT_MAP_HOLE) {
++		unlock_extent(io_tree, start, lockend, cached_state);
+ 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ 		unlocked = true;
+ 		ret = iov_iter_zero(count, iter);
+ 		if (ret != count)
+ 			ret = -EFAULT;
+ 	} else {
+-		ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
+-						 &cached_state, disk_bytenr,
+-						 disk_io_size, count,
+-						 encoded->compression,
+-						 &unlocked);
++		ret = -EIOCBQUEUED;
++		goto out_em;
+ 	}
+ 
+ out:
+@@ -9378,10 +9381,11 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
+ out_em:
+ 	free_extent_map(em);
+ out_unlock_extent:
+-	if (!unlocked)
+-		unlock_extent(io_tree, start, lockend, &cached_state);
++	/* Leave inode and extent locked if we need to do a read. */
++	if (!unlocked && ret != -EIOCBQUEUED)
++		unlock_extent(io_tree, start, lockend, cached_state);
+ out_unlock_inode:
+-	if (!unlocked)
++	if (!unlocked && ret != -EIOCBQUEUED)
+ 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ 	return ret;
+ }
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 226c91fe31a707..3e3722a7323936 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4514,12 +4514,17 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
+ 	size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args,
+ 					     flags);
+ 	size_t copy_end;
++	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
++	struct extent_io_tree *io_tree = &inode->io_tree;
+ 	struct iovec iovstack[UIO_FASTIOV];
+ 	struct iovec *iov = iovstack;
+ 	struct iov_iter iter;
+ 	loff_t pos;
+ 	struct kiocb kiocb;
+ 	ssize_t ret;
++	u64 disk_bytenr, disk_io_size;
++	struct extent_state *cached_state = NULL;
+ 
+ 	if (!capable(CAP_SYS_ADMIN)) {
+ 		ret = -EPERM;
+@@ -4572,7 +4577,32 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
+ 	init_sync_kiocb(&kiocb, file);
+ 	kiocb.ki_pos = pos;
+ 
+-	ret = btrfs_encoded_read(&kiocb, &iter, &args);
++	ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
++				 &disk_bytenr, &disk_io_size);
++
++	if (ret == -EIOCBQUEUED) {
++		bool unlocked = false;
++		u64 start, lockend, count;
++
++		start = ALIGN_DOWN(kiocb.ki_pos, fs_info->sectorsize);
++		lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
++
++		if (args.compression)
++			count = disk_io_size;
++		else
++			count = args.len;
++
++		ret = btrfs_encoded_read_regular(&kiocb, &iter, start, lockend,
++						 &cached_state, disk_bytenr,
++						 disk_io_size, count,
++						 args.compression, &unlocked);
++
++		if (!unlocked) {
++			unlock_extent(io_tree, start, lockend, &cached_state);
++			btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
++		}
++	}
++
+ 	if (ret >= 0) {
+ 		fsnotify_access(file);
+ 		if (copy_to_user(argp + copy_end,
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 9522a8b79d22b5..2928abf7eb8271 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -857,6 +857,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ "dropping a ref for a root that doesn't have a ref on the block");
+ 			dump_block_entry(fs_info, be);
+ 			dump_ref_action(fs_info, ra);
++			rb_erase(&ref->node, &be->refs);
+ 			kfree(ref);
+ 			kfree(ra);
+ 			goto out_unlock;
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index b068469871f8e5..0cb11dcd10cd4b 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -5677,7 +5677,7 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
+ 	 * Note that send_buf is a mapping of send_buf_pages, so this is really
+ 	 * reading into send_buf.
+ 	 */
+-	ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset,
++	ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode),
+ 						    disk_bytenr, disk_num_bytes,
+ 						    sctx->send_buf_pages +
+ 						    (data_offset >> PAGE_SHIFT));
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index c4a5fd94bbbb3b..cf92b75745e2a5 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -5609,9 +5609,9 @@ void send_flush_mdlog(struct ceph_mds_session *s)
+ 
+ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
+ 			       struct ceph_mds_cap_auth *auth,
++			       const struct cred *cred,
+ 			       char *tpath)
+ {
+-	const struct cred *cred = get_current_cred();
+ 	u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
+ 	u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
+ 	struct ceph_client *cl = mdsc->fsc->client;
+@@ -5734,8 +5734,9 @@ int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
+ 	for (i = 0; i < mdsc->s_cap_auths_num; i++) {
+ 		struct ceph_mds_cap_auth *s = &mdsc->s_cap_auths[i];
+ 
+-		err = ceph_mds_auth_match(mdsc, s, tpath);
++		err = ceph_mds_auth_match(mdsc, s, cred, tpath);
+ 		if (err < 0) {
++			put_cred(cred);
+ 			return err;
+ 		} else if (err > 0) {
+ 			/* always follow the last auth caps' permision */
+@@ -5751,6 +5752,8 @@ int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
+ 		}
+ 	}
+ 
++	put_cred(cred);
++
+ 	doutc(cl, "root_squash_perms %d, rw_perms_s %p\n", root_squash_perms,
+ 	      rw_perms_s);
+ 	if (root_squash_perms && rw_perms_s == NULL) {
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 73f321b52895ea..86480e5a215e51 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -285,7 +285,9 @@ static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end,
+ 	size_t len;
+ 	struct ceph_fsid fsid;
+ 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
++	struct ceph_options *opts = pctx->copts;
+ 	struct ceph_mount_options *fsopt = pctx->opts;
++	const char *name_start = dev_name;
+ 	char *fsid_start, *fs_name_start;
+ 
+ 	if (*dev_name_end != '=') {
+@@ -296,8 +298,14 @@ static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end,
+ 	fsid_start = strchr(dev_name, '@');
+ 	if (!fsid_start)
+ 		return invalfc(fc, "missing cluster fsid");
+-	++fsid_start; /* start of cluster fsid */
++	len = fsid_start - name_start;
++	kfree(opts->name);
++	opts->name = kstrndup(name_start, len, GFP_KERNEL);
++	if (!opts->name)
++		return -ENOMEM;
++	dout("using %s entity name", opts->name);
+ 
++	++fsid_start; /* start of cluster fsid */
+ 	fs_name_start = strchr(fsid_start, '.');
+ 	if (!fs_name_start)
+ 		return invalfc(fc, "missing file system name");
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index edf205093f4358..b9ffb2ee9548ae 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1290,16 +1290,18 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
+ 						wait_list, issued);
+ 			return 0;
+ 		}
+-
+-		/*
+-		 * Issue discard for conventional zones only if the device
+-		 * supports discard.
+-		 */
+-		if (!bdev_max_discard_sectors(bdev))
+-			return -EOPNOTSUPP;
+ 	}
+ #endif
+ 
++	/*
++	 * stop issuing discard for any of below cases:
++	 * 1. device is conventional zone, but it doesn't support discard.
++	 * 2. device is regulare device, after snapshot it doesn't support
++	 * discard.
++	 */
++	if (!bdev_max_discard_sectors(bdev))
++		return -EOPNOTSUPP;
++
+ 	trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
+ 
+ 	lstart = dc->di.lstart;
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 983fdd98fc3755..a622056f27f3a2 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1748,6 +1748,18 @@ static int f2fs_freeze(struct super_block *sb)
+ 
+ static int f2fs_unfreeze(struct super_block *sb)
+ {
++	struct f2fs_sb_info *sbi = F2FS_SB(sb);
++
++	/*
++	 * It will update discard_max_bytes of mounted lvm device to zero
++	 * after creating snapshot on this lvm device, let's drop all
++	 * remained discards.
++	 * We don't need to disable real-time discard because discard_max_bytes
++	 * will recover after removal of snapshot.
++	 */
++	if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
++		f2fs_issue_discard_timeout(sbi);
++
+ 	clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
+ 	return 0;
+ }
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 984f8e6379dd47..6d0455973d641e 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -1425,9 +1425,12 @@ static int e_show(struct seq_file *m, void *p)
+ 		return 0;
+ 	}
+ 
+-	exp_get(exp);
++	if (!cache_get_rcu(&exp->h))
++		return 0;
++
+ 	if (cache_check(cd, &exp->h, NULL))
+ 		return 0;
++
+ 	exp_put(exp);
+ 	return svc_export_show(m, cd, cp);
+ }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index d3cfc647153993..57f8818aa47c5f 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1660,6 +1660,14 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp)
+ 	free_ol_stateid_reaplist(&reaplist);
+ }
+ 
++static bool nfs4_openowner_unhashed(struct nfs4_openowner *oo)
++{
++	lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
++
++	return list_empty(&oo->oo_owner.so_strhash) &&
++		list_empty(&oo->oo_perclient);
++}
++
+ static void unhash_openowner_locked(struct nfs4_openowner *oo)
+ {
+ 	struct nfs4_client *clp = oo->oo_owner.so_client;
+@@ -4975,6 +4983,12 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
+ 	spin_lock(&oo->oo_owner.so_client->cl_lock);
+ 	spin_lock(&fp->fi_lock);
+ 
++	if (nfs4_openowner_unhashed(oo)) {
++		mutex_unlock(&stp->st_mutex);
++		stp = NULL;
++		goto out_unlock;
++	}
++
+ 	retstp = nfsd4_find_existing_open(fp, open);
+ 	if (retstp)
+ 		goto out_unlock;
+@@ -6126,6 +6140,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ 
+ 	if (!stp) {
+ 		stp = init_open_stateid(fp, open);
++		if (!stp) {
++			status = nfserr_jukebox;
++			goto out;
++		}
++
+ 		if (!open->op_stp)
+ 			new_stp = true;
+ 	}
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 35fd3e3e177807..baa54c718bd722 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -616,8 +616,13 @@ static int ovl_security_fileattr(const struct path *realpath, struct fileattr *f
+ 	struct file *file;
+ 	unsigned int cmd;
+ 	int err;
++	unsigned int flags;
++
++	flags = O_RDONLY;
++	if (force_o_largefile())
++		flags |= O_LARGEFILE;
+ 
+-	file = dentry_open(realpath, O_RDONLY, current_cred());
++	file = dentry_open(realpath, flags, current_cred());
+ 	if (IS_ERR(file))
+ 		return PTR_ERR(file);
+ 
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index edc9216f6e27ad..8f080046c59d9a 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -197,6 +197,9 @@ void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry,
+ 
+ bool ovl_dentry_weird(struct dentry *dentry)
+ {
++	if (!d_can_lookup(dentry) && !d_is_file(dentry) && !d_is_symlink(dentry))
++		return true;
++
+ 	return dentry->d_flags & (DCACHE_NEED_AUTOMOUNT |
+ 				  DCACHE_MANAGE_TRANSIT |
+ 				  DCACHE_OP_HASH |
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index 7a85735d584f35..e376f48c4b8bf4 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -600,6 +600,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 					ret = -EFAULT;
+ 					goto out;
+ 				}
++				ret = 0;
+ 			/*
+ 			 * We know the bounce buffer is safe to copy from, so
+ 			 * use _copy_to_iter() directly.
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index b40410cd39af42..71c0ce31a4c4db 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -689,6 +689,8 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ 
+ 	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
+ 
++	flush_delayed_work(&quota_release_work);
++
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ 		if (type != -1 && cnt != type)
+ 			continue;
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index d95409f3cba667..02ebcbc4882f5b 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -297,13 +297,6 @@ xfs_validate_sb_write(
+ 	 * the kernel cannot support since we checked for unsupported bits in
+ 	 * the read verifier, which means that memory is corrupt.
+ 	 */
+-	if (xfs_sb_has_compat_feature(sbp, XFS_SB_FEAT_COMPAT_UNKNOWN)) {
+-		xfs_warn(mp,
+-"Corruption detected in superblock compatible features (0x%x)!",
+-			(sbp->sb_features_compat & XFS_SB_FEAT_COMPAT_UNKNOWN));
+-		return -EFSCORRUPTED;
+-	}
+-
+ 	if (!xfs_is_readonly(mp) &&
+ 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ 		xfs_alert(mp,
+diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
+index 54085d5d05c345..f4e1fa9ae607a8 100644
+--- a/include/drm/drm_panic.h
++++ b/include/drm/drm_panic.h
+@@ -64,6 +64,8 @@ struct drm_scanout_buffer {
+ 
+ };
+ 
++#ifdef CONFIG_DRM_PANIC
++
+ /**
+  * drm_panic_trylock - try to enter the panic printing critical section
+  * @dev: struct drm_device
+@@ -149,4 +151,16 @@ struct drm_scanout_buffer {
+ #define drm_panic_unlock(dev, flags) \
+ 	raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags)
+ 
++#else
++
++static inline bool drm_panic_trylock(struct drm_device *dev, unsigned long flags)
++{
++	return true;
++}
++
++static inline void drm_panic_lock(struct drm_device *dev, unsigned long flags) {}
++static inline void drm_panic_unlock(struct drm_device *dev, unsigned long flags) {}
++
++#endif
++
+ #endif /* __DRM_PANIC_H__ */
+diff --git a/include/linux/kasan.h b/include/linux/kasan.h
+index 00a3bf7c0d8f0e..6bbfc8aa42e8f4 100644
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -29,6 +29,9 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t;
+ #define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
+ #define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)
+ 
++#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
++#define KASAN_VMALLOC_TLB_FLUSH  0x2 /* TLB flush */
++
+ #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+ 
+ #include <linux/pgtable.h>
+@@ -564,7 +567,8 @@ void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
+ int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
+ void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ 			   unsigned long free_region_start,
+-			   unsigned long free_region_end);
++			   unsigned long free_region_end,
++			   unsigned long flags);
+ 
+ #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+ 
+@@ -579,7 +583,8 @@ static inline int kasan_populate_vmalloc(unsigned long start,
+ static inline void kasan_release_vmalloc(unsigned long start,
+ 					 unsigned long end,
+ 					 unsigned long free_region_start,
+-					 unsigned long free_region_end) { }
++					 unsigned long free_region_end,
++					 unsigned long flags) { }
+ 
+ #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+ 
+@@ -614,7 +619,8 @@ static inline int kasan_populate_vmalloc(unsigned long start,
+ static inline void kasan_release_vmalloc(unsigned long start,
+ 					 unsigned long end,
+ 					 unsigned long free_region_start,
+-					 unsigned long free_region_end) { }
++					 unsigned long free_region_end,
++					 unsigned long flags) { }
+ 
+ static inline void *kasan_unpoison_vmalloc(const void *start,
+ 					   unsigned long size,
+diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
+index 6bb460c3e818b3..825487fb66faf9 100644
+--- a/include/linux/util_macros.h
++++ b/include/linux/util_macros.h
+@@ -4,19 +4,6 @@
+ 
+ #include <linux/math.h>
+ 
+-#define __find_closest(x, a, as, op)					\
+-({									\
+-	typeof(as) __fc_i, __fc_as = (as) - 1;				\
+-	typeof(x) __fc_x = (x);						\
+-	typeof(*a) const *__fc_a = (a);					\
+-	for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) {			\
+-		if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] +	\
+-						__fc_a[__fc_i + 1], 2))	\
+-			break;						\
+-	}								\
+-	(__fc_i);							\
+-})
+-
+ /**
+  * find_closest - locate the closest element in a sorted array
+  * @x: The reference value.
+@@ -25,8 +12,27 @@
+  * @as: Size of 'a'.
+  *
+  * Returns the index of the element closest to 'x'.
++ * Note: If using an array of negative numbers (or mixed positive numbers),
++ *       then be sure that 'x' is of a signed-type to get good results.
+  */
+-#define find_closest(x, a, as) __find_closest(x, a, as, <=)
++#define find_closest(x, a, as)						\
++({									\
++	typeof(as) __fc_i, __fc_as = (as) - 1;				\
++	long __fc_mid_x, __fc_x = (x);					\
++	long __fc_left, __fc_right;					\
++	typeof(*a) const *__fc_a = (a);					\
++	for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) {			\
++		__fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i + 1]) / 2;	\
++		if (__fc_x <= __fc_mid_x) {				\
++			__fc_left = __fc_x - __fc_a[__fc_i];		\
++			__fc_right = __fc_a[__fc_i + 1] - __fc_x;	\
++			if (__fc_right < __fc_left)			\
++				__fc_i++;				\
++			break;						\
++		}							\
++	}								\
++	(__fc_i);							\
++})
+ 
+ /**
+  * find_closest_descending - locate the closest element in a sorted array
+@@ -36,9 +42,27 @@
+  * @as: Size of 'a'.
+  *
+  * Similar to find_closest() but 'a' is expected to be sorted in descending
+- * order.
++ * order. The iteration is done in reverse order, so that the comparison
++ * of '__fc_right' & '__fc_left' also works for unsigned numbers.
+  */
+-#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
++#define find_closest_descending(x, a, as)				\
++({									\
++	typeof(as) __fc_i, __fc_as = (as) - 1;				\
++	long __fc_mid_x, __fc_x = (x);					\
++	long __fc_left, __fc_right;					\
++	typeof(*a) const *__fc_a = (a);					\
++	for (__fc_i = __fc_as; __fc_i >= 1; __fc_i--) {			\
++		__fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i - 1]) / 2;	\
++		if (__fc_x <= __fc_mid_x) {				\
++			__fc_left = __fc_x - __fc_a[__fc_i];		\
++			__fc_right = __fc_a[__fc_i - 1] - __fc_x;	\
++			if (__fc_right < __fc_left)			\
++				__fc_i--;				\
++			break;						\
++		}							\
++	}								\
++	(__fc_i);							\
++})
+ 
+ /**
+  * is_insidevar - check if the @ptr points inside the @var memory range.
+diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
+index 6dc258993b1770..2acc7687e017a9 100644
+--- a/include/uapi/linux/if_link.h
++++ b/include/uapi/linux/if_link.h
+@@ -1292,6 +1292,19 @@ enum netkit_mode {
+ 	NETKIT_L3,
+ };
+ 
++/* NETKIT_SCRUB_NONE leaves clearing skb->{mark,priority} up to
++ * the BPF program if attached. This also means the latter can
++ * consume the two fields if they were populated earlier.
++ *
++ * NETKIT_SCRUB_DEFAULT zeroes skb->{mark,priority} fields before
++ * invoking the attached BPF program when the peer device resides
++ * in a different network namespace. This is the default behavior.
++ */
++enum netkit_scrub {
++	NETKIT_SCRUB_NONE,
++	NETKIT_SCRUB_DEFAULT,
++};
++
+ enum {
+ 	IFLA_NETKIT_UNSPEC,
+ 	IFLA_NETKIT_PEER_INFO,
+@@ -1299,6 +1312,8 @@ enum {
+ 	IFLA_NETKIT_POLICY,
+ 	IFLA_NETKIT_PEER_POLICY,
+ 	IFLA_NETKIT_MODE,
++	IFLA_NETKIT_SCRUB,
++	IFLA_NETKIT_PEER_SCRUB,
+ 	__IFLA_NETKIT_MAX,
+ };
+ #define IFLA_NETKIT_MAX	(__IFLA_NETKIT_MAX - 1)
+diff --git a/kernel/signal.c b/kernel/signal.c
+index cbabb2d05e0ac8..2ae45e6eb6bb8e 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1986,14 +1986,15 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
+ 	 * into t->pending).
+ 	 *
+ 	 * Where type is not PIDTYPE_PID, signals must be delivered to the
+-	 * process. In this case, prefer to deliver to current if it is in
+-	 * the same thread group as the target process, which avoids
+-	 * unnecessarily waking up a potentially idle task.
++	 * process. In this case, prefer to deliver to current if it is in the
++	 * same thread group as the target process and its sighand is stable,
++	 * which avoids unnecessarily waking up a potentially idle task.
+ 	 */
+ 	t = pid_task(pid, type);
+ 	if (!t)
+ 		goto ret;
+-	if (type != PIDTYPE_PID && same_thread_group(t, current))
++	if (type != PIDTYPE_PID &&
++	    same_thread_group(t, current) && !current->exit_state)
+ 		t = current;
+ 	if (!likely(lock_task_sighand(t, &flags)))
+ 		goto ret;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 4c28dd177ca650..3dd3b97d8049ae 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -883,6 +883,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
+ }
+ 
+ static struct fgraph_ops fprofiler_ops = {
++	.ops = {
++		.flags = FTRACE_OPS_FL_INITIALIZED,
++		INIT_OPS_HASH(fprofiler_ops.ops)
++	},
+ 	.entryfunc = &profile_graph_entry,
+ 	.retfunc = &profile_graph_return,
+ };
+@@ -5076,6 +5080,9 @@ ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
+ 	char *func;
+ 	int ret;
+ 
++	if (!tr)
++		return -ENODEV;
++
+ 	/* match_records() modifies func, and we need the original */
+ 	func = kstrdup(func_orig, GFP_KERNEL);
+ 	if (!func)
+diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
+index d548750a325ace..b25d214b93e161 100644
+--- a/lib/kunit/debugfs.c
++++ b/lib/kunit/debugfs.c
+@@ -212,8 +212,11 @@ void kunit_debugfs_create_suite(struct kunit_suite *suite)
+ 
+ err:
+ 	string_stream_destroy(suite->log);
+-	kunit_suite_for_each_test_case(suite, test_case)
++	suite->log = NULL;
++	kunit_suite_for_each_test_case(suite, test_case) {
+ 		string_stream_destroy(test_case->log);
++		test_case->log = NULL;
++	}
+ }
+ 
+ void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
+diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
+index 37e02be1e71015..d9c781c859fde1 100644
+--- a/lib/kunit/kunit-test.c
++++ b/lib/kunit/kunit-test.c
+@@ -805,6 +805,8 @@ static void kunit_device_driver_test(struct kunit *test)
+ 	struct device *test_device;
+ 	struct driver_test_state *test_state = kunit_kzalloc(test, sizeof(*test_state), GFP_KERNEL);
+ 
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_state);
++
+ 	test->priv = test_state;
+ 	test_driver = kunit_driver_create(test, "my_driver");
+ 
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 3619301dda2ebe..8d83e217271967 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -3439,9 +3439,20 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
+ 	return slot;
+ }
+ 
++/*
++ * mas_store_root() - Storing value into root.
++ * @mas: The maple state
++ * @entry: The entry to store.
++ *
++ * There is no root node now and we are storing a value into the root - this
++ * function either assigns the pointer or expands into a node.
++ */
+ static inline void mas_store_root(struct ma_state *mas, void *entry)
+ {
+-	if (likely((mas->last != 0) || (mas->index != 0)))
++	if (!entry) {
++		if (!mas->index)
++			rcu_assign_pointer(mas->tree->ma_root, NULL);
++	} else if (likely((mas->last != 0) || (mas->index != 0)))
+ 		mas_root_expand(mas, entry);
+ 	else if (((unsigned long) (entry) & 3) == 2)
+ 		mas_root_expand(mas, entry);
+diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h
+index a339d117150fba..a149e354bb2689 100644
+--- a/mm/damon/tests/vaddr-kunit.h
++++ b/mm/damon/tests/vaddr-kunit.h
+@@ -300,6 +300,7 @@ static void damon_test_split_evenly(struct kunit *test)
+ 	damon_test_split_evenly_fail(test, 0, 100, 0);
+ 	damon_test_split_evenly_succ(test, 0, 100, 10);
+ 	damon_test_split_evenly_succ(test, 5, 59, 5);
++	damon_test_split_evenly_succ(test, 0, 3, 2);
+ 	damon_test_split_evenly_fail(test, 5, 6, 2);
+ }
+ 
+diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
+index 08cfd22b524925..dba3b2f4d75813 100644
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -67,6 +67,7 @@ static int damon_va_evenly_split_region(struct damon_target *t,
+ 	unsigned long sz_orig, sz_piece, orig_end;
+ 	struct damon_region *n = NULL, *next;
+ 	unsigned long start;
++	unsigned int i;
+ 
+ 	if (!r || !nr_pieces)
+ 		return -EINVAL;
+@@ -80,8 +81,7 @@ static int damon_va_evenly_split_region(struct damon_target *t,
+ 
+ 	r->ar.end = r->ar.start + sz_piece;
+ 	next = damon_next_region(r);
+-	for (start = r->ar.end; start + sz_piece <= orig_end;
+-			start += sz_piece) {
++	for (start = r->ar.end, i = 1; i < nr_pieces; start += sz_piece, i++) {
+ 		n = damon_new_region(start, start + sz_piece);
+ 		if (!n)
+ 			return -ENOMEM;
+diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
+index d6210ca48ddab9..88d1c9dcb50721 100644
+--- a/mm/kasan/shadow.c
++++ b/mm/kasan/shadow.c
+@@ -489,7 +489,8 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
+  */
+ void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ 			   unsigned long free_region_start,
+-			   unsigned long free_region_end)
++			   unsigned long free_region_end,
++			   unsigned long flags)
+ {
+ 	void *shadow_start, *shadow_end;
+ 	unsigned long region_start, region_end;
+@@ -522,12 +523,17 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ 			__memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
+ 			return;
+ 		}
+-		apply_to_existing_page_range(&init_mm,
++
++
++		if (flags & KASAN_VMALLOC_PAGE_RANGE)
++			apply_to_existing_page_range(&init_mm,
+ 					     (unsigned long)shadow_start,
+ 					     size, kasan_depopulate_vmalloc_pte,
+ 					     NULL);
+-		flush_tlb_kernel_range((unsigned long)shadow_start,
+-				       (unsigned long)shadow_end);
++
++		if (flags & KASAN_VMALLOC_TLB_FLUSH)
++			flush_tlb_kernel_range((unsigned long)shadow_start,
++					       (unsigned long)shadow_end);
+ 	}
+ }
+ 
+diff --git a/mm/slab.h b/mm/slab.h
+index 6c6fe6d630ce3d..92ca5ff2037534 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -73,6 +73,11 @@ struct slab {
+ 						struct {
+ 							unsigned inuse:16;
+ 							unsigned objects:15;
++							/*
++							 * If slab debugging is enabled then the
++							 * frozen bit can be reused to indicate
++							 * that the slab was corrupted
++							 */
+ 							unsigned frozen:1;
+ 						};
+ 					};
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 893d3205991518..477fa471da1859 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -230,7 +230,7 @@ static struct kmem_cache *create_cache(const char *name,
+ 	if (args->use_freeptr_offset &&
+ 	    (args->freeptr_offset >= object_size ||
+ 	     !(flags & SLAB_TYPESAFE_BY_RCU) ||
+-	     !IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t))))
++	     !IS_ALIGNED(args->freeptr_offset, __alignof__(freeptr_t))))
+ 		goto out;
+ 
+ 	err = -ENOMEM;
+diff --git a/mm/slub.c b/mm/slub.c
+index 5b832512044e3e..15ba89fef89a1f 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1423,6 +1423,11 @@ static int check_slab(struct kmem_cache *s, struct slab *slab)
+ 			slab->inuse, slab->objects);
+ 		return 0;
+ 	}
++	if (slab->frozen) {
++		slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
++		return 0;
++	}
++
+ 	/* Slab_pad_check fixes things up after itself */
+ 	slab_pad_check(s, slab);
+ 	return 1;
+@@ -1603,6 +1608,7 @@ static noinline bool alloc_debug_processing(struct kmem_cache *s,
+ 		slab_fix(s, "Marking all objects used");
+ 		slab->inuse = slab->objects;
+ 		slab->freelist = NULL;
++		slab->frozen = 1; /* mark consistency-failed slab as frozen */
+ 	}
+ 	return false;
+ }
+@@ -2744,7 +2750,8 @@ static void *alloc_single_from_partial(struct kmem_cache *s,
+ 	slab->inuse++;
+ 
+ 	if (!alloc_debug_processing(s, slab, object, orig_size)) {
+-		remove_partial(n, slab);
++		if (folio_test_slab(slab_folio(slab)))
++			remove_partial(n, slab);
+ 		return NULL;
+ 	}
+ 
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 634162271c0045..5480b77f4167d7 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -2182,6 +2182,25 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
+ 	reclaim_list_global(&decay_list);
+ }
+ 
++static void
++kasan_release_vmalloc_node(struct vmap_node *vn)
++{
++	struct vmap_area *va;
++	unsigned long start, end;
++
++	start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
++	end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
++
++	list_for_each_entry(va, &vn->purge_list, list) {
++		if (is_vmalloc_or_module_addr((void *) va->va_start))
++			kasan_release_vmalloc(va->va_start, va->va_end,
++				va->va_start, va->va_end,
++				KASAN_VMALLOC_PAGE_RANGE);
++	}
++
++	kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);
++}
++
+ static void purge_vmap_node(struct work_struct *work)
+ {
+ 	struct vmap_node *vn = container_of(work,
+@@ -2190,20 +2209,17 @@ static void purge_vmap_node(struct work_struct *work)
+ 	struct vmap_area *va, *n_va;
+ 	LIST_HEAD(local_list);
+ 
++	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
++		kasan_release_vmalloc_node(vn);
++
+ 	vn->nr_purged = 0;
+ 
+ 	list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
+ 		unsigned long nr = va_size(va) >> PAGE_SHIFT;
+-		unsigned long orig_start = va->va_start;
+-		unsigned long orig_end = va->va_end;
+ 		unsigned int vn_id = decode_vn_id(va->flags);
+ 
+ 		list_del_init(&va->list);
+ 
+-		if (is_vmalloc_or_module_addr((void *)orig_start))
+-			kasan_release_vmalloc(orig_start, orig_end,
+-					      va->va_start, va->va_end);
+-
+ 		nr_purged_pages += nr;
+ 		vn->nr_purged++;
+ 
+@@ -4784,7 +4800,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+ 				&free_vmap_area_list);
+ 		if (va)
+ 			kasan_release_vmalloc(orig_start, orig_end,
+-				va->va_start, va->va_end);
++				va->va_start, va->va_end,
++				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
+ 		vas[area] = NULL;
+ 	}
+ 
+@@ -4834,7 +4851,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+ 				&free_vmap_area_list);
+ 		if (va)
+ 			kasan_release_vmalloc(orig_start, orig_end,
+-				va->va_start, va->va_end);
++				va->va_start, va->va_end,
++				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
+ 		vas[area] = NULL;
+ 		kfree(vms[area]);
+ 	}
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index ac6a5aa34eabba..3f41344239126b 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1780,6 +1780,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
+ 			   zone_page_state(zone, i));
+ 
+ #ifdef CONFIG_NUMA
++	fold_vm_zone_numa_events(zone);
+ 	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
+ 		seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
+ 			   zone_numa_event_state(zone, i));
+diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
+index 873e9fb2041f02..a9263bd948c41d 100644
+--- a/tools/perf/pmu-events/empty-pmu-events.c
++++ b/tools/perf/pmu-events/empty-pmu-events.c
+@@ -539,17 +539,7 @@ const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pm
+         if (!map)
+                 return NULL;
+ 
+-        if (!pmu)
+-                return &map->metric_table;
+-
+-        for (size_t i = 0; i < map->metric_table.num_pmus; i++) {
+-                const struct pmu_table_entry *table_pmu = &map->metric_table.pmus[i];
+-                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+-
+-                if (pmu__name_match(pmu, pmu_name))
+-                           return &map->metric_table;
+-        }
+-        return NULL;
++	return &map->metric_table;
+ }
+ 
+ const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
+diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
+index d46a22fb5573de..4145e027775316 100755
+--- a/tools/perf/pmu-events/jevents.py
++++ b/tools/perf/pmu-events/jevents.py
+@@ -1089,17 +1089,7 @@ const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pm
+         if (!map)
+                 return NULL;
+ 
+-        if (!pmu)
+-                return &map->metric_table;
+-
+-        for (size_t i = 0; i < map->metric_table.num_pmus; i++) {
+-                const struct pmu_table_entry *table_pmu = &map->metric_table.pmus[i];
+-                const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+-
+-                if (pmu__name_match(pmu, pmu_name))
+-                           return &map->metric_table;
+-        }
+-        return NULL;
++	return &map->metric_table;
+ }
+ 
+ const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-06 12:44 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-06 12:44 UTC (permalink / raw
  To: gentoo-commits

commit:     7ff281e950aa65bc7416b43f48eeb7cabcbb7195
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec  6 12:43:43 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec  6 12:43:43 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7ff281e9

Linux patch 6.12.3, remove redundant patch

Removed:
1800_sched-init-idle-tasks-only-once.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                |  8 +--
 1002_linux-6.12.3.patch                    | 57 +++++++++++++++++++++
 1800_sched-init-idle-tasks-only-once.patch | 80 ------------------------------
 3 files changed, 61 insertions(+), 84 deletions(-)

diff --git a/0000_README b/0000_README
index f7334645..c7f77bd5 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-6.12.2.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.2
 
+Patch:  1002_linux-6.12.3.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.3
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
@@ -63,10 +67,6 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
-Patch:  1800_sched-init-idle-tasks-only-once.patch
-From:   https://git.kernel.org/
-Desc:   sched: Initialize idle tasks only once
-
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1002_linux-6.12.3.patch b/1002_linux-6.12.3.patch
new file mode 100644
index 00000000..2e07970b
--- /dev/null
+++ b/1002_linux-6.12.3.patch
@@ -0,0 +1,57 @@
+diff --git a/Makefile b/Makefile
+index da6e99309a4da4..e81030ec683143 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index a1c353a62c5684..76b27b2a9c56ad 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4424,7 +4424,8 @@ int wake_up_state(struct task_struct *p, unsigned int state)
+  * Perform scheduler related setup for a newly forked process p.
+  * p is forked by current.
+  *
+- * __sched_fork() is basic setup used by init_idle() too:
++ * __sched_fork() is basic setup which is also used by sched_init() to
++ * initialize the boot CPU's idle task.
+  */
+ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+ {
+@@ -7683,8 +7684,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
+ 	struct rq *rq = cpu_rq(cpu);
+ 	unsigned long flags;
+ 
+-	__sched_fork(0, idle);
+-
+ 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
+ 	raw_spin_rq_lock(rq);
+ 
+@@ -7699,10 +7698,8 @@ void __init init_idle(struct task_struct *idle, int cpu)
+ 
+ #ifdef CONFIG_SMP
+ 	/*
+-	 * It's possible that init_idle() gets called multiple times on a task,
+-	 * in that case do_set_cpus_allowed() will not do the right thing.
+-	 *
+-	 * And since this is boot we can forgo the serialization.
++	 * No validation and serialization required at boot time and for
++	 * setting up the idle tasks of not yet online CPUs.
+ 	 */
+ 	set_cpus_allowed_common(idle, &ac);
+ #endif
+@@ -8546,6 +8543,7 @@ void __init sched_init(void)
+ 	 * but because we are the idle thread, we just pick up running again
+ 	 * when this runqueue becomes "idle".
+ 	 */
++	__sched_fork(0, current);
+ 	init_idle(current, smp_processor_id());
+ 
+ 	calc_load_update = jiffies + LOAD_FREQ;

diff --git a/1800_sched-init-idle-tasks-only-once.patch b/1800_sched-init-idle-tasks-only-once.patch
deleted file mode 100644
index 013a45fc..00000000
--- a/1800_sched-init-idle-tasks-only-once.patch
+++ /dev/null
@@ -1,80 +0,0 @@
-From b23decf8ac9102fc52c4de5196f4dc0a5f3eb80b Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 28 Oct 2024 11:43:42 +0100
-Subject: sched: Initialize idle tasks only once
-
-Idle tasks are initialized via __sched_fork() twice:
-
-     fork_idle()
-        copy_process()
-	  sched_fork()
-             __sched_fork()
-	init_idle()
-          __sched_fork()
-
-Instead of cleaning this up, sched_ext hacked around it. Even when analyis
-and solution were provided in a discussion, nobody cared to clean this up.
-
-init_idle() is also invoked from sched_init() to initialize the boot CPU's
-idle task, which requires the __sched_fork() invocation. But this can be
-trivially solved by invoking __sched_fork() before init_idle() in
-sched_init() and removing the __sched_fork() invocation from init_idle().
-
-Do so and clean up the comments explaining this historical leftover.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Link: https://lore.kernel.org/r/20241028103142.359584747@linutronix.de
----
- kernel/sched/core.c | 12 +++++-------
- 1 file changed, 5 insertions(+), 7 deletions(-)
-
-(limited to 'kernel/sched/core.c')
-
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index c57a79e3491103..aad48850c1ef0d 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -4423,7 +4423,8 @@ int wake_up_state(struct task_struct *p, unsigned int state)
-  * Perform scheduler related setup for a newly forked process p.
-  * p is forked by current.
-  *
-- * __sched_fork() is basic setup used by init_idle() too:
-+ * __sched_fork() is basic setup which is also used by sched_init() to
-+ * initialize the boot CPU's idle task.
-  */
- static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
- {
-@@ -7697,8 +7698,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
- 	struct rq *rq = cpu_rq(cpu);
- 	unsigned long flags;
- 
--	__sched_fork(0, idle);
--
- 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
- 	raw_spin_rq_lock(rq);
- 
-@@ -7713,10 +7712,8 @@ void __init init_idle(struct task_struct *idle, int cpu)
- 
- #ifdef CONFIG_SMP
- 	/*
--	 * It's possible that init_idle() gets called multiple times on a task,
--	 * in that case do_set_cpus_allowed() will not do the right thing.
--	 *
--	 * And since this is boot we can forgo the serialization.
-+	 * No validation and serialization required at boot time and for
-+	 * setting up the idle tasks of not yet online CPUs.
- 	 */
- 	set_cpus_allowed_common(idle, &ac);
- #endif
-@@ -8561,6 +8558,7 @@ void __init sched_init(void)
- 	 * but because we are the idle thread, we just pick up running again
- 	 * when this runqueue becomes "idle".
- 	 */
-+	__sched_fork(0, current);
- 	init_idle(current, smp_processor_id());
- 
- 	calc_load_update = jiffies + LOAD_FREQ;
--- 
-cgit 1.2.3-korg
-


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-05 20:05 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-05 20:05 UTC (permalink / raw
  To: gentoo-commits

commit:     2fcc7a615b8b2de79d0b1b3ce13cb5430b8c80d4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Dec  5 20:05:18 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Dec  5 20:05:18 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2fcc7a61

sched: Initialize idle tasks only once

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                |  4 ++
 1800_sched-init-idle-tasks-only-once.patch | 80 ++++++++++++++++++++++++++++++
 2 files changed, 84 insertions(+)

diff --git a/0000_README b/0000_README
index ac1104a1..f7334645 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
+Patch:  1800_sched-init-idle-tasks-only-once.patch
+From:   https://git.kernel.org/
+Desc:   sched: Initialize idle tasks only once
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1800_sched-init-idle-tasks-only-once.patch b/1800_sched-init-idle-tasks-only-once.patch
new file mode 100644
index 00000000..013a45fc
--- /dev/null
+++ b/1800_sched-init-idle-tasks-only-once.patch
@@ -0,0 +1,80 @@
+From b23decf8ac9102fc52c4de5196f4dc0a5f3eb80b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 28 Oct 2024 11:43:42 +0100
+Subject: sched: Initialize idle tasks only once
+
+Idle tasks are initialized via __sched_fork() twice:
+
+     fork_idle()
+        copy_process()
+	  sched_fork()
+             __sched_fork()
+	init_idle()
+          __sched_fork()
+
+Instead of cleaning this up, sched_ext hacked around it. Even when analyis
+and solution were provided in a discussion, nobody cared to clean this up.
+
+init_idle() is also invoked from sched_init() to initialize the boot CPU's
+idle task, which requires the __sched_fork() invocation. But this can be
+trivially solved by invoking __sched_fork() before init_idle() in
+sched_init() and removing the __sched_fork() invocation from init_idle().
+
+Do so and clean up the comments explaining this historical leftover.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20241028103142.359584747@linutronix.de
+---
+ kernel/sched/core.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+(limited to 'kernel/sched/core.c')
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c57a79e3491103..aad48850c1ef0d 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4423,7 +4423,8 @@ int wake_up_state(struct task_struct *p, unsigned int state)
+  * Perform scheduler related setup for a newly forked process p.
+  * p is forked by current.
+  *
+- * __sched_fork() is basic setup used by init_idle() too:
++ * __sched_fork() is basic setup which is also used by sched_init() to
++ * initialize the boot CPU's idle task.
+  */
+ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+ {
+@@ -7697,8 +7698,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
+ 	struct rq *rq = cpu_rq(cpu);
+ 	unsigned long flags;
+ 
+-	__sched_fork(0, idle);
+-
+ 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
+ 	raw_spin_rq_lock(rq);
+ 
+@@ -7713,10 +7712,8 @@ void __init init_idle(struct task_struct *idle, int cpu)
+ 
+ #ifdef CONFIG_SMP
+ 	/*
+-	 * It's possible that init_idle() gets called multiple times on a task,
+-	 * in that case do_set_cpus_allowed() will not do the right thing.
+-	 *
+-	 * And since this is boot we can forgo the serialization.
++	 * No validation and serialization required at boot time and for
++	 * setting up the idle tasks of not yet online CPUs.
+ 	 */
+ 	set_cpus_allowed_common(idle, &ac);
+ #endif
+@@ -8561,6 +8558,7 @@ void __init sched_init(void)
+ 	 * but because we are the idle thread, we just pick up running again
+ 	 * when this runqueue becomes "idle".
+ 	 */
++	__sched_fork(0, current);
+ 	init_idle(current, smp_processor_id());
+ 
+ 	calc_load_update = jiffies + LOAD_FREQ;
+-- 
+cgit 1.2.3-korg
+


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-05 14:06 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-05 14:06 UTC (permalink / raw
  To: gentoo-commits

commit:     667267c9cd00cf85da39630df8c81d77fda4ec4d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Dec  5 14:06:06 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Dec  5 14:06:06 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=667267c9

Linux patch 6.12.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1001_linux-6.12.2.patch | 47740 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 47744 insertions(+)

diff --git a/0000_README b/0000_README
index bc514d88..ac1104a1 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-6.12.1.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.12.1
 
+Patch:  1001_linux-6.12.2.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.2
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1001_linux-6.12.2.patch b/1001_linux-6.12.2.patch
new file mode 100644
index 00000000..f10548d7
--- /dev/null
+++ b/1001_linux-6.12.2.patch
@@ -0,0 +1,47740 @@
+diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
+index fdedf1ea944ba8..513296bb6f297f 100644
+--- a/Documentation/ABI/testing/sysfs-fs-f2fs
++++ b/Documentation/ABI/testing/sysfs-fs-f2fs
+@@ -311,10 +311,13 @@ Description:	Do background GC aggressively when set. Set to 0 by default.
+ 		GC approach and turns SSR mode on.
+ 		gc urgent low(2): lowers the bar of checking I/O idling in
+ 		order to process outstanding discard commands and GC a
+-		little bit aggressively. uses cost benefit GC approach.
++		little bit aggressively. always uses cost benefit GC approach,
++		and will override age-threshold GC approach if ATGC is enabled
++		at the same time.
+ 		gc urgent mid(3): does GC forcibly in a period of given
+ 		gc_urgent_sleep_time and executes a mid level of I/O idling check.
+-		uses cost benefit GC approach.
++		always uses cost benefit GC approach, and will override
++		age-threshold GC approach if ATGC is enabled at the same time.
+ 
+ What:		/sys/fs/f2fs/<disk>/gc_urgent_sleep_time
+ Date:		August 2017
+diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst
+index ca7b7cd806a16c..30080ff6f4062d 100644
+--- a/Documentation/RCU/stallwarn.rst
++++ b/Documentation/RCU/stallwarn.rst
+@@ -249,7 +249,7 @@ ticks this GP)" indicates that this CPU has not taken any scheduling-clock
+ interrupts during the current stalled grace period.
+ 
+ The "idle=" portion of the message prints the dyntick-idle state.
+-The hex number before the first "/" is the low-order 12 bits of the
++The hex number before the first "/" is the low-order 16 bits of the
+ dynticks counter, which will have an even-numbered value if the CPU
+ is in dyntick-idle mode and an odd-numbered value otherwise.  The hex
+ number between the two "/"s is the value of the nesting, which will be
+diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst
+index 678d70d6e1c3ac..714a5171bfc0b8 100644
+--- a/Documentation/admin-guide/blockdev/zram.rst
++++ b/Documentation/admin-guide/blockdev/zram.rst
+@@ -47,6 +47,8 @@ The list of possible return codes:
+ -ENOMEM	  zram was not able to allocate enough memory to fulfil your
+ 	  needs.
+ -EINVAL	  invalid input has been provided.
++-EAGAIN	  re-try operation later (e.g. when attempting to run recompress
++	  and writeback simultaneously).
+ ========  =============================================================
+ 
+ If you use 'echo', the returned value is set by the 'echo' utility,
+diff --git a/Documentation/admin-guide/media/building.rst b/Documentation/admin-guide/media/building.rst
+index a0647342991637..7a413ba07f93bb 100644
+--- a/Documentation/admin-guide/media/building.rst
++++ b/Documentation/admin-guide/media/building.rst
+@@ -15,7 +15,7 @@ Please notice, however, that, if:
+ 
+ you should use the main media development tree ``master`` branch:
+ 
+-    https://git.linuxtv.org/media_tree.git/
++    https://git.linuxtv.org/media.git/
+ 
+ In this case, you may find some useful information at the
+ `LinuxTv wiki pages <https://linuxtv.org/wiki>`_:
+diff --git a/Documentation/admin-guide/media/saa7134.rst b/Documentation/admin-guide/media/saa7134.rst
+index 51eae7eb5ab7f4..18d7cbc897db4b 100644
+--- a/Documentation/admin-guide/media/saa7134.rst
++++ b/Documentation/admin-guide/media/saa7134.rst
+@@ -67,7 +67,7 @@ Changes / Fixes
+ Please mail to linux-media AT vger.kernel.org unified diffs against
+ the linux media git tree:
+ 
+-    https://git.linuxtv.org/media_tree.git/
++    https://git.linuxtv.org/media.git/
+ 
+ This is done by committing a patch at a clone of the git tree and
+ submitting the patch using ``git send-email``. Don't forget to
+diff --git a/Documentation/arch/x86/boot.rst b/Documentation/arch/x86/boot.rst
+index 4fd492cb49704f..ad2d8ddad27fe4 100644
+--- a/Documentation/arch/x86/boot.rst
++++ b/Documentation/arch/x86/boot.rst
+@@ -896,10 +896,19 @@ Offset/size:	0x260/4
+ 
+   The kernel runtime start address is determined by the following algorithm::
+ 
+-	if (relocatable_kernel)
+-	runtime_start = align_up(load_address, kernel_alignment)
+-	else
+-	runtime_start = pref_address
++   	if (relocatable_kernel) {
++   		if (load_address < pref_address)
++   			load_address = pref_address;
++   		runtime_start = align_up(load_address, kernel_alignment);
++   	} else {
++   		runtime_start = pref_address;
++   	}
++
++Hence the necessary memory window location and size can be estimated by
++a boot loader as::
++
++   	memory_window_start = runtime_start;
++   	memory_window_size = init_size;
+ 
+ ============	===============
+ Field name:	handover_offset
+diff --git a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
+index 68ea5f70b75f03..ee7edc6f60e2b4 100644
+--- a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
++++ b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
+@@ -39,11 +39,11 @@ properties:
+ 
+   reg:
+     minItems: 2
+-    maxItems: 9
++    maxItems: 10
+ 
+   reg-names:
+     minItems: 2
+-    maxItems: 9
++    maxItems: 10
+ 
+   interrupts:
+     maxItems: 1
+@@ -134,6 +134,36 @@ allOf:
+               - qcom,qdu1000-llcc
+               - qcom,sc8180x-llcc
+               - qcom,sc8280xp-llcc
++    then:
++      properties:
++        reg:
++          items:
++            - description: LLCC0 base register region
++            - description: LLCC1 base register region
++            - description: LLCC2 base register region
++            - description: LLCC3 base register region
++            - description: LLCC4 base register region
++            - description: LLCC5 base register region
++            - description: LLCC6 base register region
++            - description: LLCC7 base register region
++            - description: LLCC broadcast base register region
++        reg-names:
++          items:
++            - const: llcc0_base
++            - const: llcc1_base
++            - const: llcc2_base
++            - const: llcc3_base
++            - const: llcc4_base
++            - const: llcc5_base
++            - const: llcc6_base
++            - const: llcc7_base
++            - const: llcc_broadcast_base
++
++  - if:
++      properties:
++        compatible:
++          contains:
++            enum:
+               - qcom,x1e80100-llcc
+     then:
+       properties:
+@@ -148,6 +178,7 @@ allOf:
+             - description: LLCC6 base register region
+             - description: LLCC7 base register region
+             - description: LLCC broadcast base register region
++            - description: LLCC broadcast AND register region
+         reg-names:
+           items:
+             - const: llcc0_base
+@@ -159,6 +190,7 @@ allOf:
+             - const: llcc6_base
+             - const: llcc7_base
+             - const: llcc_broadcast_base
++            - const: llcc_broadcast_and_base
+ 
+   - if:
+       properties:
+diff --git a/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml b/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml
+index 5e942bccf27787..2b2041818a0a44 100644
+--- a/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml
++++ b/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml
+@@ -26,9 +26,21 @@ properties:
+     description:
+       Specifies the reference clock(s) from which the output frequency is
+       derived. This must either reference one clock if only the first clock
+-      input is connected or two if both clock inputs are connected.
+-    minItems: 1
+-    maxItems: 2
++      input is connected or two if both clock inputs are connected. The last
++      clock is the AXI bus clock that needs to be enabled so we can access the
++      core registers.
++    minItems: 2
++    maxItems: 3
++
++  clock-names:
++    oneOf:
++      - items:
++          - const: clkin1
++          - const: s_axi_aclk
++      - items:
++          - const: clkin1
++          - const: clkin2
++          - const: s_axi_aclk
+ 
+   '#clock-cells':
+     const: 0
+@@ -40,6 +52,7 @@ required:
+   - compatible
+   - reg
+   - clocks
++  - clock-names
+   - '#clock-cells'
+ 
+ additionalProperties: false
+@@ -50,5 +63,6 @@ examples:
+       compatible = "adi,axi-clkgen-2.00.a";
+       #clock-cells = <0>;
+       reg = <0xff000000 0x1000>;
+-      clocks = <&osc 1>;
++      clocks = <&osc 1>, <&clkc 15>;
++      clock-names = "clkin1", "s_axi_aclk";
+     };
+diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
+index fc8b97f820775b..41fe0003474285 100644
+--- a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
++++ b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
+@@ -30,7 +30,7 @@ properties:
+     maxItems: 1
+ 
+   spi-max-frequency:
+-    maximum: 30000000
++    maximum: 66000000
+ 
+   reset-gpios:
+     maxItems: 1
+diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
+index 898c1be2d6a435..f05aab2b1addca 100644
+--- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
++++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
+@@ -149,7 +149,7 @@ allOf:
+     then:
+       properties:
+         clocks:
+-          minItems: 4
++          minItems: 6
+ 
+         clock-names:
+           items:
+@@ -178,7 +178,7 @@ allOf:
+     then:
+       properties:
+         clocks:
+-          minItems: 4
++          minItems: 6
+ 
+         clock-names:
+           items:
+@@ -207,6 +207,7 @@ allOf:
+       properties:
+         clocks:
+           minItems: 4
++          maxItems: 4
+ 
+         clock-names:
+           items:
+diff --git a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
+index 4dfb49b0e07f73..f82a3c7e6c29e4 100644
+--- a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
+@@ -91,14 +91,17 @@ allOf:
+   - if:
+       properties:
+         compatible:
+-          # Match without "contains", to skip newer variants which are still
+-          # compatible with samsung,exynos7-wakeup-eint
+-          enum:
+-            - samsung,s5pv210-wakeup-eint
+-            - samsung,exynos4210-wakeup-eint
+-            - samsung,exynos5433-wakeup-eint
+-            - samsung,exynos7-wakeup-eint
+-            - samsung,exynos7885-wakeup-eint
++          oneOf:
++            # Match without "contains", to skip newer variants which are still
++            # compatible with samsung,exynos7-wakeup-eint
++            - enum:
++                - samsung,exynos4210-wakeup-eint
++                - samsung,exynos7-wakeup-eint
++                - samsung,s5pv210-wakeup-eint
++            - contains:
++                enum:
++                  - samsung,exynos5433-wakeup-eint
++                  - samsung,exynos7885-wakeup-eint
+     then:
+       properties:
+         interrupts:
+diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
+index 9418fd66a8e95a..b93254ad2a287a 100644
+--- a/Documentation/devicetree/bindings/serial/rs485.yaml
++++ b/Documentation/devicetree/bindings/serial/rs485.yaml
+@@ -18,16 +18,15 @@ properties:
+     description: prop-encoded-array <a b>
+     $ref: /schemas/types.yaml#/definitions/uint32-array
+     items:
+-      items:
+-        - description: Delay between rts signal and beginning of data sent in
+-            milliseconds. It corresponds to the delay before sending data.
+-          default: 0
+-          maximum: 100
+-        - description: Delay between end of data sent and rts signal in milliseconds.
+-            It corresponds to the delay after sending data and actual release
+-            of the line.
+-          default: 0
+-          maximum: 100
++      - description: Delay between rts signal and beginning of data sent in
++          milliseconds. It corresponds to the delay before sending data.
++        default: 0
++        maximum: 100
++      - description: Delay between end of data sent and rts signal in milliseconds.
++          It corresponds to the delay after sending data and actual release
++          of the line.
++        default: 0
++        maximum: 100
+ 
+   rs485-rts-active-high:
+     description: drive RTS high when sending (this is the default).
+diff --git a/Documentation/devicetree/bindings/sound/mt6359.yaml b/Documentation/devicetree/bindings/sound/mt6359.yaml
+index 23d411fc4200e6..128698630c865f 100644
+--- a/Documentation/devicetree/bindings/sound/mt6359.yaml
++++ b/Documentation/devicetree/bindings/sound/mt6359.yaml
+@@ -23,8 +23,8 @@ properties:
+       Indicates how many data pins are used to transmit two channels of PDM
+       signal. 0 means two wires, 1 means one wire. Default value is 0.
+     enum:
+-      - 0 # one wire
+-      - 1 # two wires
++      - 0 # two wires
++      - 1 # one wire
+ 
+   mediatek,mic-type-0:
+     $ref: /schemas/types.yaml#/definitions/uint32
+@@ -53,9 +53,9 @@ additionalProperties: false
+ 
+ examples:
+   - |
+-    mt6359codec: mt6359codec {
+-      mediatek,dmic-mode = <0>;
+-      mediatek,mic-type-0 = <2>;
++    mt6359codec: audio-codec {
++        mediatek,dmic-mode = <0>;
++        mediatek,mic-type-0 = <2>;
+     };
+ 
+ ...
+diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
+index b320a39de7fe40..fbfce9b4ae6b8e 100644
+--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
++++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
+@@ -1013,6 +1013,8 @@ patternProperties:
+     description: Shanghai Neardi Technology Co., Ltd.
+   "^nec,.*":
+     description: NEC LCD Technologies, Ltd.
++  "^neofidelity,.*":
++    description: Neofidelity Inc.
+   "^neonode,.*":
+     description: Neonode Inc.
+   "^netgear,.*":
+diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst
+index 317934c9e8fcac..d92c276f1575af 100644
+--- a/Documentation/filesystems/mount_api.rst
++++ b/Documentation/filesystems/mount_api.rst
+@@ -770,7 +770,8 @@ process the parameters it is given.
+ 
+    * ::
+ 
+-       bool fs_validate_description(const struct fs_parameter_description *desc);
++       bool fs_validate_description(const char *name,
++                                    const struct fs_parameter_description *desc);
+ 
+      This performs some validation checks on a parameter description.  It
+      returns true if the description is good and false if it is not.  It will
+diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst
+index bfda1a5fecadc6..ec6411d02ac8f5 100644
+--- a/Documentation/locking/seqlock.rst
++++ b/Documentation/locking/seqlock.rst
+@@ -153,7 +153,7 @@ Use seqcount_latch_t when the write side sections cannot be protected
+ from interruption by readers. This is typically the case when the read
+ side can be invoked from NMI handlers.
+ 
+-Check `raw_write_seqcount_latch()` for more information.
++Check `write_seqcount_latch()` for more information.
+ 
+ 
+ .. _seqlock_t:
+diff --git a/MAINTAINERS b/MAINTAINERS
+index b878ddc99f94e7..6bb4ec0c162a53 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -701,7 +701,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-aimslab*
+ 
+ AIO
+@@ -809,7 +809,7 @@ ALLWINNER A10 CSI DRIVER
+ M:	Maxime Ripard <mripard@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
+ F:	drivers/media/platform/sunxi/sun4i-csi/
+ 
+@@ -818,7 +818,7 @@ M:	Yong Deng <yong.deng@magewell.com>
+ M:	Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
+ F:	drivers/media/platform/sunxi/sun6i-csi/
+ 
+@@ -826,7 +826,7 @@ ALLWINNER A31 ISP DRIVER
+ M:	Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/allwinner,sun6i-a31-isp.yaml
+ F:	drivers/staging/media/sunxi/sun6i-isp/
+ F:	drivers/staging/media/sunxi/sun6i-isp/uapi/sun6i-isp-config.h
+@@ -835,7 +835,7 @@ ALLWINNER A31 MIPI CSI-2 BRIDGE DRIVER
+ M:	Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/allwinner,sun6i-a31-mipi-csi2.yaml
+ F:	drivers/media/platform/sunxi/sun6i-mipi-csi2/
+ 
+@@ -3348,7 +3348,7 @@ ASAHI KASEI AK7375 LENS VOICE COIL DRIVER
+ M:	Tianshu Qiu <tian.shu.qiu@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/asahi-kasei,ak7375.yaml
+ F:	drivers/media/i2c/ak7375.c
+ 
+@@ -3765,7 +3765,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/dvb-usb-v2/az6007.c
+ 
+ AZTECH FM RADIO RECEIVER DRIVER
+@@ -3773,7 +3773,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-aztech*
+ 
+ B43 WIRELESS DRIVER
+@@ -3857,7 +3857,7 @@ M:	Fabien Dessenne <fabien.dessenne@foss.st.com>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/platform/st/sti/bdisp
+ 
+ BECKHOFF CX5020 ETHERCAT MASTER DRIVER
+@@ -4865,7 +4865,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Odd fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/driver-api/media/drivers/bttv*
+ F:	drivers/media/pci/bt8xx/bttv*
+ 
+@@ -4979,13 +4979,13 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-cadet*
+ 
+ CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/admin-guide/media/cafe_ccic*
+ F:	drivers/media/platform/marvell/
+ 
+@@ -5169,7 +5169,7 @@ M:	Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	http://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/ABI/testing/debugfs-cec-error-inj
+ F:	Documentation/devicetree/bindings/media/cec/cec-common.yaml
+ F:	Documentation/driver-api/media/cec-core.rst
+@@ -5186,7 +5186,7 @@ M:	Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	http://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/cec/cec-gpio.yaml
+ F:	drivers/media/cec/platform/cec-gpio/
+ 
+@@ -5393,7 +5393,7 @@ CHRONTEL CH7322 CEC DRIVER
+ M:	Joe Tessler <jrt@google.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/chrontel,ch7322.yaml
+ F:	drivers/media/cec/i2c/ch7322.c
+ 
+@@ -5582,7 +5582,7 @@ M:	Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/pci/cobalt/
+ 
+ COCCINELLE/Semantic Patches (SmPL)
+@@ -6026,7 +6026,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	http://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/cs3308.c
+ 
+ CS5535 Audio ALSA driver
+@@ -6057,7 +6057,7 @@ M:	Andy Walls <awalls@md.metrocast.net>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/pci/cx18/
+ F:	include/uapi/linux/ivtv*
+ 
+@@ -6066,7 +6066,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/common/cx2341x*
+ F:	include/media/drv-intf/cx2341x.h
+ 
+@@ -6084,7 +6084,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Odd fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/driver-api/media/drivers/cx88*
+ F:	drivers/media/pci/cx88/
+ 
+@@ -6320,7 +6320,7 @@ DEINTERLACE DRIVERS FOR ALLWINNER H3
+ M:	Jernej Skrabec <jernej.skrabec@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
+ F:	drivers/media/platform/sunxi/sun8i-di/
+ 
+@@ -6447,7 +6447,7 @@ M:	Hugues Fruchet <hugues.fruchet@foss.st.com>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/platform/st/sti/delta
+ 
+ DENALI NAND DRIVER
+@@ -6855,7 +6855,7 @@ DONGWOON DW9714 LENS VOICE COIL DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml
+ F:	drivers/media/i2c/dw9714.c
+ 
+@@ -6863,13 +6863,13 @@ DONGWOON DW9719 LENS VOICE COIL DRIVER
+ M:	Daniel Scally <djrscally@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/dw9719.c
+ 
+ DONGWOON DW9768 LENS VOICE COIL DRIVER
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml
+ F:	drivers/media/i2c/dw9768.c
+ 
+@@ -6877,7 +6877,7 @@ DONGWOON DW9807 LENS VOICE COIL DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.yaml
+ F:	drivers/media/i2c/dw9807-vcm.c
+ 
+@@ -7860,7 +7860,7 @@ DSBR100 USB FM RADIO DRIVER
+ M:	Alexey Klimov <klimov.linux@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/dsbr100.c
+ 
+ DT3155 MEDIA DRIVER
+@@ -7868,7 +7868,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/pci/dt3155/
+ 
+ DVB_USB_AF9015 MEDIA DRIVER
+@@ -7913,7 +7913,7 @@ S:	Maintained
+ W:	https://linuxtv.org
+ W:	http://github.com/mkrufky
+ Q:	http://patchwork.linuxtv.org/project/linux-media/list/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/dvb-usb/cxusb*
+ 
+ DVB_USB_EC168 MEDIA DRIVER
+@@ -8282,7 +8282,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/admin-guide/media/em28xx*
+ F:	drivers/media/usb/em28xx/
+ 
+@@ -8578,7 +8578,7 @@ EXTRON DA HD 4K PLUS CEC DRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/cec/usb/extron-da-hd-4k-plus/
+ 
+ EXYNOS DP DRIVER
+@@ -9400,7 +9400,7 @@ GALAXYCORE GC2145 SENSOR DRIVER
+ M:	Alain Volmat <alain.volmat@foss.st.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/galaxycore,gc2145.yaml
+ F:	drivers/media/i2c/gc2145.c
+ 
+@@ -9448,7 +9448,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-gemtek*
+ 
+ GENERIC ARCHITECTURE TOPOLOGY
+@@ -9830,56 +9830,56 @@ GS1662 VIDEO SERIALIZER
+ M:	Charles-Antoine Couret <charles-antoine.couret@nexvision.fr>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/spi/gs1662.c
+ 
+ GSPCA FINEPIX SUBDRIVER
+ M:	Frank Zago <frank@zago.net>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/gspca/finepix.c
+ 
+ GSPCA GL860 SUBDRIVER
+ M:	Olivier Lorin <o.lorin@laposte.net>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/gspca/gl860/
+ 
+ GSPCA M5602 SUBDRIVER
+ M:	Erik Andren <erik.andren@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/gspca/m5602/
+ 
+ GSPCA PAC207 SONIXB SUBDRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/gspca/pac207.c
+ 
+ GSPCA SN9C20X SUBDRIVER
+ M:	Brian Johnson <brijohn@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/gspca/sn9c20x.c
+ 
+ GSPCA T613 SUBDRIVER
+ M:	Leandro Costantino <lcostantino@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/gspca/t613.c
+ 
+ GSPCA USB WEBCAM DRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/gspca/
+ 
+ GTP (GPRS Tunneling Protocol)
+@@ -9996,7 +9996,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/hdpvr/
+ 
+ HEWLETT PACKARD ENTERPRISE ILO CHIF DRIVER
+@@ -10503,7 +10503,7 @@ M:	Jean-Christophe Trotin <jean-christophe.trotin@foss.st.com>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/platform/st/sti/hva
+ 
+ HWPOISON MEMORY FAILURE HANDLING
+@@ -10531,7 +10531,7 @@ HYNIX HI556 SENSOR DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/hi556.c
+ 
+ HYNIX HI846 SENSOR DRIVER
+@@ -11502,7 +11502,7 @@ M:	Dan Scally <djrscally@gmail.com>
+ R:	Tianshu Qiu <tian.shu.qiu@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/userspace-api/media/v4l/pixfmt-srggb10-ipu3.rst
+ F:	drivers/media/pci/intel/ipu3/
+ 
+@@ -11523,7 +11523,7 @@ M:	Bingbu Cao <bingbu.cao@intel.com>
+ R:	Tianshu Qiu <tian.shu.qiu@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/admin-guide/media/ipu6-isys.rst
+ F:	drivers/media/pci/intel/ipu6/
+ 
+@@ -12036,7 +12036,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-isa*
+ 
+ ISAPNP
+@@ -12138,7 +12138,7 @@ M:	Andy Walls <awalls@md.metrocast.net>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/admin-guide/media/ivtv*
+ F:	drivers/media/pci/ivtv/
+ F:	include/uapi/linux/ivtv*
+@@ -12286,7 +12286,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-keene*
+ 
+ KERNEL AUTOMOUNTER
+@@ -13573,7 +13573,7 @@ MA901 MASTERKIT USB FM RADIO DRIVER
+ M:	Alexey Klimov <klimov.linux@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-ma901.c
+ 
+ MAC80211
+@@ -13868,7 +13868,7 @@ MAX2175 SDR TUNER DRIVER
+ M:	Ramesh Shanmugasundaram <rashanmu@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/max2175.txt
+ F:	Documentation/userspace-api/media/drivers/max2175.rst
+ F:	drivers/media/i2c/max2175*
+@@ -14048,7 +14048,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-maxiradio*
+ 
+ MAXLINEAR ETHERNET PHY DRIVER
+@@ -14131,7 +14131,7 @@ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://www.linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/mc/
+ F:	include/media/media-*.h
+ F:	include/uapi/linux/media.h
+@@ -14140,7 +14140,7 @@ MEDIA DRIVER FOR FREESCALE IMX PXP
+ M:	Philipp Zabel <p.zabel@pengutronix.de>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/platform/nxp/imx-pxp.[ch]
+ 
+ MEDIA DRIVERS FOR ASCOT2E
+@@ -14149,7 +14149,7 @@ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+ W:	http://netup.tv/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/ascot2e*
+ 
+ MEDIA DRIVERS FOR CXD2099AR CI CONTROLLERS
+@@ -14157,7 +14157,7 @@ M:	Jasmin Jessich <jasmin@anw.at>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/cxd2099*
+ 
+ MEDIA DRIVERS FOR CXD2841ER
+@@ -14166,7 +14166,7 @@ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+ W:	http://netup.tv/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/cxd2841er*
+ 
+ MEDIA DRIVERS FOR CXD2880
+@@ -14174,7 +14174,7 @@ M:	Yasunari Takiguchi <Yasunari.Takiguchi@sony.com>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	http://linuxtv.org/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/cxd2880/*
+ F:	drivers/media/spi/cxd2880*
+ 
+@@ -14182,7 +14182,7 @@ MEDIA DRIVERS FOR DIGITAL DEVICES PCIE DEVICES
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/pci/ddbridge/*
+ 
+ MEDIA DRIVERS FOR FREESCALE IMX
+@@ -14190,7 +14190,7 @@ M:	Steve Longerbeam <slongerbeam@gmail.com>
+ M:	Philipp Zabel <p.zabel@pengutronix.de>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/admin-guide/media/imx.rst
+ F:	Documentation/devicetree/bindings/media/imx.txt
+ F:	drivers/staging/media/imx/
+@@ -14204,7 +14204,7 @@ M:	Martin Kepplinger <martin.kepplinger@puri.sm>
+ R:	Purism Kernel Team <kernel@puri.sm>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/admin-guide/media/imx7.rst
+ F:	Documentation/devicetree/bindings/media/nxp,imx-mipi-csi2.yaml
+ F:	Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
+@@ -14219,7 +14219,7 @@ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+ W:	http://netup.tv/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/helene*
+ 
+ MEDIA DRIVERS FOR HORUS3A
+@@ -14228,7 +14228,7 @@ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+ W:	http://netup.tv/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/horus3a*
+ 
+ MEDIA DRIVERS FOR LNBH25
+@@ -14237,14 +14237,14 @@ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+ W:	http://netup.tv/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/lnbh25*
+ 
+ MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/mxl5xx*
+ 
+ MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices
+@@ -14253,7 +14253,7 @@ L:	linux-media@vger.kernel.org
+ S:	Supported
+ W:	https://linuxtv.org
+ W:	http://netup.tv/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/pci/netup_unidvb/*
+ 
+ MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
+@@ -14261,7 +14261,7 @@ M:	Dmitry Osipenko <digetx@gmail.com>
+ L:	linux-media@vger.kernel.org
+ L:	linux-tegra@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/nvidia,tegra-vde.yaml
+ F:	drivers/media/platform/nvidia/tegra-vde/
+ 
+@@ -14270,7 +14270,7 @@ M:	Jacopo Mondi <jacopo@jmondi.org>
+ L:	linux-media@vger.kernel.org
+ L:	linux-renesas-soc@vger.kernel.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/renesas,ceu.yaml
+ F:	drivers/media/platform/renesas/renesas-ceu.c
+ F:	include/media/drv-intf/renesas-ceu.h
+@@ -14280,7 +14280,7 @@ M:	Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+ L:	linux-media@vger.kernel.org
+ L:	linux-renesas-soc@vger.kernel.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/renesas,drif.yaml
+ F:	drivers/media/platform/renesas/rcar_drif.c
+ 
+@@ -14289,7 +14289,7 @@ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ L:	linux-renesas-soc@vger.kernel.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/renesas,fcp.yaml
+ F:	drivers/media/platform/renesas/rcar-fcp.c
+ F:	include/media/rcar-fcp.h
+@@ -14299,7 +14299,7 @@ M:	Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ L:	linux-renesas-soc@vger.kernel.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/renesas,fdp1.yaml
+ F:	drivers/media/platform/renesas/rcar_fdp1.c
+ 
+@@ -14308,7 +14308,7 @@ M:	Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ L:	linux-media@vger.kernel.org
+ L:	linux-renesas-soc@vger.kernel.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/renesas,csi2.yaml
+ F:	Documentation/devicetree/bindings/media/renesas,isp.yaml
+ F:	Documentation/devicetree/bindings/media/renesas,vin.yaml
+@@ -14322,7 +14322,7 @@ M:	Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ L:	linux-renesas-soc@vger.kernel.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/renesas,vsp1.yaml
+ F:	drivers/media/platform/renesas/vsp1/
+ 
+@@ -14330,14 +14330,14 @@ MEDIA DRIVERS FOR ST STV0910 DEMODULATOR ICs
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/stv0910*
+ 
+ MEDIA DRIVERS FOR ST STV6111 TUNER ICs
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/dvb-frontends/stv6111*
+ 
+ MEDIA DRIVERS FOR STM32 - DCMI / DCMIPP
+@@ -14345,7 +14345,7 @@ M:	Hugues Fruchet <hugues.fruchet@foss.st.com>
+ M:	Alain Volmat <alain.volmat@foss.st.com>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
+ F:	Documentation/devicetree/bindings/media/st,stm32-dcmipp.yaml
+ F:	drivers/media/platform/st/stm32/stm32-dcmi.c
+@@ -14357,7 +14357,7 @@ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+ Q:	http://patchwork.kernel.org/project/linux-media/list/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/admin-guide/media/
+ F:	Documentation/devicetree/bindings/media/
+ F:	Documentation/driver-api/media/
+@@ -14933,7 +14933,7 @@ L:	linux-media@vger.kernel.org
+ L:	linux-amlogic@lists.infradead.org
+ S:	Supported
+ W:	http://linux-meson.com/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/cec/amlogic,meson-gx-ao-cec.yaml
+ F:	drivers/media/cec/platform/meson/ao-cec-g12a.c
+ F:	drivers/media/cec/platform/meson/ao-cec.c
+@@ -14943,7 +14943,7 @@ M:	Neil Armstrong <neil.armstrong@linaro.org>
+ L:	linux-media@vger.kernel.org
+ L:	linux-amlogic@lists.infradead.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml
+ F:	drivers/media/platform/amlogic/meson-ge2d/
+ 
+@@ -14959,7 +14959,7 @@ M:	Neil Armstrong <neil.armstrong@linaro.org>
+ L:	linux-media@vger.kernel.org
+ L:	linux-amlogic@lists.infradead.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
+ F:	drivers/staging/media/meson/vdec/
+ 
+@@ -15557,7 +15557,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-miropcm20*
+ 
+ MITSUMI MM8013 FG DRIVER
+@@ -15709,7 +15709,7 @@ MR800 AVERMEDIA USB FM RADIO DRIVER
+ M:	Alexey Klimov <klimov.linux@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-mr800.c
+ 
+ MRF24J40 IEEE 802.15.4 RADIO DRIVER
+@@ -15776,7 +15776,7 @@ MT9M114 ONSEMI SENSOR DRIVER
+ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
+ F:	drivers/media/i2c/mt9m114.c
+ 
+@@ -15784,7 +15784,7 @@ MT9P031 APTINA CAMERA SENSOR
+ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/aptina,mt9p031.yaml
+ F:	drivers/media/i2c/mt9p031.c
+ F:	include/media/i2c/mt9p031.h
+@@ -15793,7 +15793,7 @@ MT9T112 APTINA CAMERA SENSOR
+ M:	Jacopo Mondi <jacopo@jmondi.org>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/mt9t112.c
+ F:	include/media/i2c/mt9t112.h
+ 
+@@ -15801,7 +15801,7 @@ MT9V032 APTINA CAMERA SENSOR
+ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/mt9v032.txt
+ F:	drivers/media/i2c/mt9v032.c
+ F:	include/media/i2c/mt9v032.h
+@@ -15810,7 +15810,7 @@ MT9V111 APTINA CAMERA SENSOR
+ M:	Jacopo Mondi <jacopo@jmondi.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml
+ F:	drivers/media/i2c/mt9v111.c
+ 
+@@ -17005,13 +17005,13 @@ OMNIVISION OV01A10 SENSOR DRIVER
+ M:	Bingbu Cao <bingbu.cao@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov01a10.c
+ 
+ OMNIVISION OV02A10 SENSOR DRIVER
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
+ F:	drivers/media/i2c/ov02a10.c
+ 
+@@ -17019,28 +17019,28 @@ OMNIVISION OV08D10 SENSOR DRIVER
+ M:	Jimmy Su <jimmy.su@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov08d10.c
+ 
+ OMNIVISION OV08X40 SENSOR DRIVER
+ M:	Jason Chen <jason.z.chen@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov08x40.c
+ 
+ OMNIVISION OV13858 SENSOR DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov13858.c
+ 
+ OMNIVISION OV13B10 SENSOR DRIVER
+ M:	Arec Kao <arec.kao@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov13b10.c
+ 
+ OMNIVISION OV2680 SENSOR DRIVER
+@@ -17048,7 +17048,7 @@ M:	Rui Miguel Silva <rmfrfs@gmail.com>
+ M:	Hans de Goede <hansg@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+ F:	drivers/media/i2c/ov2680.c
+ 
+@@ -17056,7 +17056,7 @@ OMNIVISION OV2685 SENSOR DRIVER
+ M:	Shunqian Zheng <zhengsq@rock-chips.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml
+ F:	drivers/media/i2c/ov2685.c
+ 
+@@ -17066,14 +17066,14 @@ R:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ R:	Bingbu Cao <bingbu.cao@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov2740.c
+ 
+ OMNIVISION OV4689 SENSOR DRIVER
+ M:	Mikhail Rudenko <mike.rudenko@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml
+ F:	drivers/media/i2c/ov4689.c
+ 
+@@ -17081,7 +17081,7 @@ OMNIVISION OV5640 SENSOR DRIVER
+ M:	Steve Longerbeam <slongerbeam@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov5640.c
+ 
+ OMNIVISION OV5647 SENSOR DRIVER
+@@ -17089,7 +17089,7 @@ M:	Dave Stevenson <dave.stevenson@raspberrypi.com>
+ M:	Jacopo Mondi <jacopo@jmondi.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
+ F:	drivers/media/i2c/ov5647.c
+ 
+@@ -17097,7 +17097,7 @@ OMNIVISION OV5670 SENSOR DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov5670.yaml
+ F:	drivers/media/i2c/ov5670.c
+ 
+@@ -17105,7 +17105,7 @@ OMNIVISION OV5675 SENSOR DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov5675.yaml
+ F:	drivers/media/i2c/ov5675.c
+ 
+@@ -17113,7 +17113,7 @@ OMNIVISION OV5693 SENSOR DRIVER
+ M:	Daniel Scally <djrscally@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
+ F:	drivers/media/i2c/ov5693.c
+ 
+@@ -17121,21 +17121,21 @@ OMNIVISION OV5695 SENSOR DRIVER
+ M:	Shunqian Zheng <zhengsq@rock-chips.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov5695.c
+ 
+ OMNIVISION OV64A40 SENSOR DRIVER
+ M:	Jacopo Mondi <jacopo.mondi@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov64a40.yaml
+ F:	drivers/media/i2c/ov64a40.c
+ 
+ OMNIVISION OV7670 SENSOR DRIVER
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ov7670.txt
+ F:	drivers/media/i2c/ov7670.c
+ 
+@@ -17143,7 +17143,7 @@ OMNIVISION OV772x SENSOR DRIVER
+ M:	Jacopo Mondi <jacopo@jmondi.org>
+ L:	linux-media@vger.kernel.org
+ S:	Odd fixes
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov772x.yaml
+ F:	drivers/media/i2c/ov772x.c
+ F:	include/media/i2c/ov772x.h
+@@ -17151,7 +17151,7 @@ F:	include/media/i2c/ov772x.h
+ OMNIVISION OV7740 SENSOR DRIVER
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ov7740.txt
+ F:	drivers/media/i2c/ov7740.c
+ 
+@@ -17159,7 +17159,7 @@ OMNIVISION OV8856 SENSOR DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov8856.yaml
+ F:	drivers/media/i2c/ov8856.c
+ 
+@@ -17168,7 +17168,7 @@ M:	Jacopo Mondi <jacopo.mondi@ideasonboard.com>
+ M:	Nicholas Roth <nicholas@rothemail.net>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml
+ F:	drivers/media/i2c/ov8858.c
+ 
+@@ -17176,7 +17176,7 @@ OMNIVISION OV9282 SENSOR DRIVER
+ M:	Dave Stevenson <dave.stevenson@raspberrypi.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
+ F:	drivers/media/i2c/ov9282.c
+ 
+@@ -17192,7 +17192,7 @@ R:	Akinobu Mita <akinobu.mita@gmail.com>
+ R:	Sylwester Nawrocki <s.nawrocki@samsung.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/ov9650.txt
+ F:	drivers/media/i2c/ov9650.c
+ 
+@@ -17201,7 +17201,7 @@ M:	Tianshu Qiu <tian.shu.qiu@intel.com>
+ R:	Bingbu Cao <bingbu.cao@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/ov9734.c
+ 
+ ONBOARD USB HUB DRIVER
+@@ -18646,7 +18646,7 @@ PULSE8-CEC DRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/cec/usb/pulse8/
+ 
+ PURELIFI PLFXLC DRIVER
+@@ -18661,7 +18661,7 @@ L:	pvrusb2@isely.net	(subscribers-only)
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	http://www.isely.net/pvrusb2/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/driver-api/media/drivers/pvrusb2*
+ F:	drivers/media/usb/pvrusb2/
+ 
+@@ -18669,7 +18669,7 @@ PWC WEBCAM DRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/pwc/*
+ F:	include/trace/events/pwc.h
+ 
+@@ -19173,7 +19173,7 @@ R:	Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+ L:	linux-media@vger.kernel.org
+ L:	linux-arm-msm@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/*venus*
+ F:	drivers/media/platform/qcom/venus/
+ 
+@@ -19218,14 +19218,14 @@ RADIOSHARK RADIO DRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-shark.c
+ 
+ RADIOSHARK2 RADIO DRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-shark2.c
+ F:	drivers/media/radio/radio-tea5777.c
+ 
+@@ -19249,7 +19249,7 @@ RAINSHADOW-CEC DRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/cec/usb/rainshadow/
+ 
+ RALINK MIPS ARCHITECTURE
+@@ -19333,7 +19333,7 @@ M:	Sean Young <sean@mess.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	http://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/driver-api/media/rc-core.rst
+ F:	Documentation/userspace-api/media/rc/
+ F:	drivers/media/rc/
+@@ -20077,7 +20077,7 @@ ROTATION DRIVER FOR ALLWINNER A83T
+ M:	Jernej Skrabec <jernej.skrabec@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
+ F:	drivers/media/platform/sunxi/sun8i-rotate/
+ 
+@@ -20331,7 +20331,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/saa6588*
+ 
+ SAA7134 VIDEO4LINUX DRIVER
+@@ -20339,7 +20339,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Odd fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/driver-api/media/drivers/saa7134*
+ F:	drivers/media/pci/saa7134/
+ 
+@@ -20347,7 +20347,7 @@ SAA7146 VIDEO4LINUX-2 DRIVER
+ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/common/saa7146/
+ F:	drivers/media/pci/saa7146/
+ F:	include/media/drv-intf/saa7146*
+@@ -20965,7 +20965,7 @@ SHARP RJ54N1CB0C SENSOR DRIVER
+ M:	Jacopo Mondi <jacopo@jmondi.org>
+ L:	linux-media@vger.kernel.org
+ S:	Odd fixes
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/rj54n1cb0c.c
+ F:	include/media/i2c/rj54n1cb0c.h
+ 
+@@ -21015,7 +21015,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/silabs,si470x.yaml
+ F:	drivers/media/radio/si470x/radio-si470x-i2c.c
+ 
+@@ -21024,7 +21024,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/si470x/radio-si470x-common.c
+ F:	drivers/media/radio/si470x/radio-si470x-usb.c
+ F:	drivers/media/radio/si470x/radio-si470x.h
+@@ -21034,7 +21034,7 @@ M:	Eduardo Valentin <edubezval@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/si4713/si4713.?
+ 
+ SI4713 FM RADIO TRANSMITTER PLATFORM DRIVER
+@@ -21042,7 +21042,7 @@ M:	Eduardo Valentin <edubezval@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/si4713/radio-platform-si4713.c
+ 
+ SI4713 FM RADIO TRANSMITTER USB DRIVER
+@@ -21050,7 +21050,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/si4713/radio-usb-si4713.c
+ 
+ SIANO DVB DRIVER
+@@ -21058,7 +21058,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Odd fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/common/siano/
+ F:	drivers/media/mmc/siano/
+ F:	drivers/media/usb/siano/
+@@ -21434,14 +21434,14 @@ SONY IMX208 SENSOR DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/imx208.c
+ 
+ SONY IMX214 SENSOR DRIVER
+ M:	Ricardo Ribalda <ribalda@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
+ F:	drivers/media/i2c/imx214.c
+ 
+@@ -21449,7 +21449,7 @@ SONY IMX219 SENSOR DRIVER
+ M:	Dave Stevenson <dave.stevenson@raspberrypi.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/imx219.yaml
+ F:	drivers/media/i2c/imx219.c
+ 
+@@ -21457,7 +21457,7 @@ SONY IMX258 SENSOR DRIVER
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx258.yaml
+ F:	drivers/media/i2c/imx258.c
+ 
+@@ -21465,7 +21465,7 @@ SONY IMX274 SENSOR DRIVER
+ M:	Leon Luo <leonl@leopardimaging.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
+ F:	drivers/media/i2c/imx274.c
+ 
+@@ -21474,7 +21474,7 @@ M:	Kieran Bingham <kieran.bingham@ideasonboard.com>
+ M:	Umang Jain <umang.jain@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx283.yaml
+ F:	drivers/media/i2c/imx283.c
+ 
+@@ -21482,7 +21482,7 @@ SONY IMX290 SENSOR DRIVER
+ M:	Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
+ F:	drivers/media/i2c/imx290.c
+ 
+@@ -21491,7 +21491,7 @@ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ M:	Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx296.yaml
+ F:	drivers/media/i2c/imx296.c
+ 
+@@ -21499,20 +21499,20 @@ SONY IMX319 SENSOR DRIVER
+ M:	Bingbu Cao <bingbu.cao@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/imx319.c
+ 
+ SONY IMX334 SENSOR DRIVER
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
+ F:	drivers/media/i2c/imx334.c
+ 
+ SONY IMX335 SENSOR DRIVER
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
+ F:	drivers/media/i2c/imx335.c
+ 
+@@ -21520,13 +21520,13 @@ SONY IMX355 SENSOR DRIVER
+ M:	Tianshu Qiu <tian.shu.qiu@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/imx355.c
+ 
+ SONY IMX412 SENSOR DRIVER
+ L:	linux-media@vger.kernel.org
+ S:	Orphan
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
+ F:	drivers/media/i2c/imx412.c
+ 
+@@ -21534,7 +21534,7 @@ SONY IMX415 SENSOR DRIVER
+ M:	Michael Riesch <michael.riesch@wolfvision.net>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
+ F:	drivers/media/i2c/imx415.c
+ 
+@@ -21823,7 +21823,7 @@ M:	Benjamin Mugnier <benjamin.mugnier@foss.st.com>
+ M:	Sylvain Petinot <sylvain.petinot@foss.st.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
+ F:	drivers/media/i2c/st-mipid02.c
+ 
+@@ -21859,7 +21859,7 @@ M:	Benjamin Mugnier <benjamin.mugnier@foss.st.com>
+ M:	Sylvain Petinot <sylvain.petinot@foss.st.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/st,st-vgxy61.yaml
+ F:	Documentation/userspace-api/media/drivers/vgxy61.rst
+ F:	drivers/media/i2c/vgxy61.c
+@@ -22149,7 +22149,7 @@ STK1160 USB VIDEO CAPTURE DRIVER
+ M:	Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/stk1160/
+ 
+ STM32 AUDIO (ASoC) DRIVERS
+@@ -22586,7 +22586,7 @@ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+ Q:	http://patchwork.linuxtv.org/project/linux-media/list/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/tuners/tda18250*
+ 
+ TDA18271 MEDIA DRIVER
+@@ -22632,7 +22632,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/tda9840*
+ 
+ TEA5761 TUNER DRIVER
+@@ -22640,7 +22640,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Odd fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/tuners/tea5761.*
+ 
+ TEA5767 TUNER DRIVER
+@@ -22648,7 +22648,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/tuners/tea5767.*
+ 
+ TEA6415C MEDIA DRIVER
+@@ -22656,7 +22656,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/tea6415c*
+ 
+ TEA6420 MEDIA DRIVER
+@@ -22664,7 +22664,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/i2c/tea6420*
+ 
+ TEAM DRIVER
+@@ -22952,7 +22952,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/radio/radio-raremono.c
+ 
+ THERMAL
+@@ -23028,7 +23028,7 @@ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ M:	Paul Elder <paul.elder@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml
+ F:	Documentation/userspace-api/media/drivers/thp7312.rst
+ F:	drivers/media/i2c/thp7312.c
+@@ -23615,7 +23615,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Odd Fixes
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/pci/tw68/
+ 
+ TW686X VIDEO4LINUX DRIVER
+@@ -23623,7 +23623,7 @@ M:	Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	http://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/pci/tw686x/
+ 
+ U-BOOT ENVIRONMENT VARIABLES
+@@ -24106,7 +24106,7 @@ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	http://www.ideasonboard.org/uvc/
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/usb/uvc/
+ F:	include/uapi/linux/uvcvideo.h
+ 
+@@ -24212,7 +24212,7 @@ V4L2 ASYNC AND FWNODE FRAMEWORKS
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/v4l2-core/v4l2-async.c
+ F:	drivers/media/v4l2-core/v4l2-fwnode.c
+ F:	include/media/v4l2-async.h
+@@ -24378,7 +24378,7 @@ M:	Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/test-drivers/vicodec/*
+ 
+ VIDEO I2C POLLING DRIVER
+@@ -24406,7 +24406,7 @@ M:	Daniel W. S. Almeida <dwlsalmeida@gmail.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/test-drivers/vidtv/*
+ 
+ VIMC VIRTUAL MEDIA CONTROLLER DRIVER
+@@ -24415,7 +24415,7 @@ R:	Kieran Bingham <kieran.bingham@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/test-drivers/vimc/*
+ 
+ VIRT LIB
+@@ -24663,7 +24663,7 @@ M:	Hans Verkuil <hverkuil@xs4all.nl>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/test-drivers/vivid/*
+ 
+ VM SOCKETS (AF_VSOCK)
+@@ -25217,7 +25217,7 @@ M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+ W:	https://linuxtv.org
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	drivers/media/tuners/xc2028.*
+ 
+ XDP (eXpress Data Path)
+@@ -25358,8 +25358,7 @@ F:	include/xen/arm/swiotlb-xen.h
+ F:	include/xen/swiotlb-xen.h
+ 
+ XFS FILESYSTEM
+-M:	Carlos Maiolino <cem@kernel.org>
+-R:	Darrick J. Wong <djwong@kernel.org>
++M:	Darrick J. Wong <djwong@kernel.org>
+ L:	linux-xfs@vger.kernel.org
+ S:	Supported
+ W:	http://xfs.org/
+@@ -25441,7 +25440,7 @@ XILINX VIDEO IP CORES
+ M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ L:	linux-media@vger.kernel.org
+ S:	Supported
+-T:	git git://linuxtv.org/media_tree.git
++T:	git git://linuxtv.org/media.git
+ F:	Documentation/devicetree/bindings/media/xilinx/
+ F:	drivers/media/platform/xilinx/
+ F:	include/uapi/linux/xilinx-v4l2-controls.h
+diff --git a/Makefile b/Makefile
+index 70070e64d267c1..da6e99309a4da4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
+index 4c9e61457b2f69..cc6ac7d128aa1a 100644
+--- a/arch/arc/kernel/devtree.c
++++ b/arch/arc/kernel/devtree.c
+@@ -62,7 +62,7 @@ const struct machine_desc * __init setup_machine_fdt(void *dt)
+ 	const struct machine_desc *mdesc;
+ 	unsigned long dt_root;
+ 
+-	if (!early_init_dt_scan(dt))
++	if (!early_init_dt_scan(dt, __pa(dt)))
+ 		return NULL;
+ 
+ 	mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach);
+diff --git a/arch/arm/boot/dts/allwinner/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/allwinner/sun9i-a80-cubieboard4.dts
+index c8ca8cb7f5c94e..52ad95a2063aaf 100644
+--- a/arch/arm/boot/dts/allwinner/sun9i-a80-cubieboard4.dts
++++ b/arch/arm/boot/dts/allwinner/sun9i-a80-cubieboard4.dts
+@@ -280,8 +280,8 @@ reg_dcdc4: dcdc4 {
+ 
+ 			reg_dcdc5: dcdc5 {
+ 				regulator-always-on;
+-				regulator-min-microvolt = <1425000>;
+-				regulator-max-microvolt = <1575000>;
++				regulator-min-microvolt = <1450000>;
++				regulator-max-microvolt = <1550000>;
+ 				regulator-name = "vcc-dram";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi
+index 04a6d716ecaf8a..1e8fcb5d4700d8 100644
+--- a/arch/arm/boot/dts/microchip/sam9x60.dtsi
++++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi
+@@ -186,6 +186,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -388,6 +389,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 32>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -439,6 +441,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 33>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -598,6 +601,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -649,6 +653,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -700,6 +705,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 11>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -751,6 +757,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -821,6 +828,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -891,6 +899,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -961,6 +970,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -1086,6 +1096,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 15>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+@@ -1137,6 +1148,7 @@ AT91_XDMAC_DT_PER_IF(1) |
+ 					dma-names = "tx", "rx";
+ 					clocks = <&pmc PMC_TYPE_PERIPHERAL 16>;
+ 					clock-names = "usart";
++					atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ 					atmel,use-dma-rx;
+ 					atmel,use-dma-tx;
+ 					atmel,fifo-size = <16>;
+diff --git a/arch/arm/boot/dts/renesas/r7s72100-genmai.dts b/arch/arm/boot/dts/renesas/r7s72100-genmai.dts
+index 29ba098f5dd5e8..28e703e0f152b2 100644
+--- a/arch/arm/boot/dts/renesas/r7s72100-genmai.dts
++++ b/arch/arm/boot/dts/renesas/r7s72100-genmai.dts
+@@ -53,7 +53,7 @@ partition@0 {
+ 
+ 			partition@4000000 {
+ 				label = "user1";
+-				reg = <0x04000000 0x40000000>;
++				reg = <0x04000000 0x04000000>;
+ 			};
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/ti/omap/omap36xx.dtsi b/arch/arm/boot/dts/ti/omap/omap36xx.dtsi
+index c3d79ecd56e398..c217094b50abc9 100644
+--- a/arch/arm/boot/dts/ti/omap/omap36xx.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap36xx.dtsi
+@@ -72,6 +72,7 @@ opp-1000000000 {
+ 					 <1375000 1375000 1375000>;
+ 			/* only on am/dm37x with speed-binned bit set */
+ 			opp-supported-hw = <0xffffffff 2>;
++			turbo-mode;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
+index fdb74e64206a8a..3b78966e750a2d 100644
+--- a/arch/arm/kernel/devtree.c
++++ b/arch/arm/kernel/devtree.c
+@@ -200,7 +200,7 @@ const struct machine_desc * __init setup_machine_fdt(void *dt_virt)
+ 
+ 	mdesc_best = &__mach_desc_GENERIC_DT;
+ 
+-	if (!dt_virt || !early_init_dt_verify(dt_virt))
++	if (!dt_virt || !early_init_dt_verify(dt_virt, __pa(dt_virt)))
+ 		return NULL;
+ 
+ 	mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl-mba8mx-usbotg.dtso b/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl-mba8mx-usbotg.dtso
+index 96db07fc9becea..1f2a0fe70a0a26 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl-mba8mx-usbotg.dtso
++++ b/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl-mba8mx-usbotg.dtso
+@@ -29,12 +29,37 @@ usb_dr_connector: endpoint {
+ 	};
+ };
+ 
++/*
++ * rst_usb_hub_hog and sel_usb_hub_hog have property 'output-high',
++ * dt overlay don't support /delete-property/. Both 'output-low' and
++ * 'output-high' will be exist under hog nodes if overlay file set
++ * 'output-low'. Workaround is disable these hog and create new hog with
++ * 'output-low'.
++ */
++
+ &rst_usb_hub_hog {
+-	output-low;
++	status = "disabled";
++};
++
++&expander0 {
++	rst-usb-low-hub-hog {
++		gpio-hog;
++		gpios = <13 0>;
++		output-low;
++		line-name = "RST_USB_HUB#";
++	};
+ };
+ 
+ &sel_usb_hub_hog {
+-	output-low;
++	status = "disabled";
++};
++
++&gpio2 {
++	sel-usb-low-hub-hog {
++		gpio-hog;
++		gpios = <1 GPIO_ACTIVE_HIGH>;
++		output-low;
++	};
+ };
+ 
+ &usbotg1 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt6358.dtsi b/arch/arm64/boot/dts/mediatek/mt6358.dtsi
+index 641d452fbc0830..e23672a2eea4af 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6358.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6358.dtsi
+@@ -15,12 +15,12 @@ pmic_adc: adc {
+ 			#io-channel-cells = <1>;
+ 		};
+ 
+-		mt6358codec: mt6358codec {
++		mt6358codec: audio-codec {
+ 			compatible = "mediatek,mt6358-sound";
+ 			mediatek,dmic-mode = <0>; /* two-wires */
+ 		};
+ 
+-		mt6358regulator: mt6358regulator {
++		mt6358regulator: regulators {
+ 			compatible = "mediatek,mt6358-regulator";
+ 
+ 			mt6358_vdram1_reg: buck_vdram1 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
+index 8d1cbc92bce320..ae0379fd42a91c 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
+@@ -49,6 +49,14 @@ trackpad2: trackpad@2c {
+ 		interrupts-extended = <&pio 117 IRQ_TYPE_LEVEL_LOW>;
+ 		reg = <0x2c>;
+ 		hid-descr-addr = <0x0020>;
++		/*
++		 * The trackpad needs a post-power-on delay of 100ms,
++		 * but at time of writing, the power supply for it on
++		 * this board is always on. The delay is therefore not
++		 * added to avoid impacting the readiness of the
++		 * trackpad.
++		 */
++		vdd-supply = <&mt6397_vgp6_reg>;
+ 		wakeup-source;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts
+index 19c1e2bee494c9..20b71f2e7159ad 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts
+@@ -30,3 +30,6 @@ touchscreen@2c {
+ 	};
+ };
+ 
++&i2c2 {
++	i2c-scl-internal-delay-ns = <4100>;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts
+index f34964afe39b53..83bbcfe620835a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts
+@@ -18,6 +18,8 @@ &i2c_tunnel {
+ };
+ 
+ &i2c2 {
++	i2c-scl-internal-delay-ns = <25000>;
++
+ 	trackpad@2c {
+ 		compatible = "hid-over-i2c";
+ 		reg = <0x2c>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
+index 0b45aee2e29953..65860b33c01fe8 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
+@@ -30,3 +30,6 @@ &qca_wifi {
+ 	qcom,ath10k-calibration-variant = "GO_DAMU";
+ };
+ 
++&i2c2 {
++	i2c-scl-internal-delay-ns = <20000>;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi
+index bbe6c338f465ee..f9c1ec366b2660 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi
+@@ -25,3 +25,6 @@ trackpad@2c {
+ 	};
+ };
+ 
++&i2c2 {
++	i2c-scl-internal-delay-ns = <21500>;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+index 783c333107bcbf..49e053b932e76c 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+@@ -8,28 +8,32 @@
+ #include <arm/cros-ec-keyboard.dtsi>
+ 
+ / {
+-	pp1200_mipibrdg: pp1200-mipibrdg {
++	pp1000_mipibrdg: pp1000-mipibrdg {
+ 		compatible = "regulator-fixed";
+-		regulator-name = "pp1200_mipibrdg";
++		regulator-name = "pp1000_mipibrdg";
++		regulator-min-microvolt = <1000000>;
++		regulator-max-microvolt = <1000000>;
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&pp1200_mipibrdg_en>;
++		pinctrl-0 = <&pp1000_mipibrdg_en>;
+ 
+ 		enable-active-high;
+ 		regulator-boot-on;
+ 
+ 		gpio = <&pio 54 GPIO_ACTIVE_HIGH>;
++		vin-supply = <&pp1800_alw>;
+ 	};
+ 
+ 	pp1800_mipibrdg: pp1800-mipibrdg {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "pp1800_mipibrdg";
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&pp1800_lcd_en>;
++		pinctrl-0 = <&pp1800_mipibrdg_en>;
+ 
+ 		enable-active-high;
+ 		regulator-boot-on;
+ 
+ 		gpio = <&pio 36 GPIO_ACTIVE_HIGH>;
++		vin-supply = <&pp1800_alw>;
+ 	};
+ 
+ 	pp3300_panel: pp3300-panel {
+@@ -44,18 +48,20 @@ pp3300_panel: pp3300-panel {
+ 		regulator-boot-on;
+ 
+ 		gpio = <&pio 35 GPIO_ACTIVE_HIGH>;
++		vin-supply = <&pp3300_alw>;
+ 	};
+ 
+-	vddio_mipibrdg: vddio-mipibrdg {
++	pp3300_mipibrdg: pp3300-mipibrdg {
+ 		compatible = "regulator-fixed";
+-		regulator-name = "vddio_mipibrdg";
++		regulator-name = "pp3300_mipibrdg";
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&vddio_mipibrdg_en>;
++		pinctrl-0 = <&pp3300_mipibrdg_en>;
+ 
+ 		enable-active-high;
+ 		regulator-boot-on;
+ 
+ 		gpio = <&pio 37 GPIO_ACTIVE_HIGH>;
++		vin-supply = <&pp3300_alw>;
+ 	};
+ 
+ 	volume_buttons: volume-buttons {
+@@ -146,9 +152,9 @@ anx_bridge: anx7625@58 {
+ 		pinctrl-0 = <&anx7625_pins>;
+ 		enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
+ 		reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>;
+-		vdd10-supply = <&pp1200_mipibrdg>;
++		vdd10-supply = <&pp1000_mipibrdg>;
+ 		vdd18-supply = <&pp1800_mipibrdg>;
+-		vdd33-supply = <&vddio_mipibrdg>;
++		vdd33-supply = <&pp3300_mipibrdg>;
+ 
+ 		ports {
+ 			#address-cells = <1>;
+@@ -391,14 +397,14 @@ &pio {
+ 		"",
+ 		"";
+ 
+-	pp1200_mipibrdg_en: pp1200-mipibrdg-en {
++	pp1000_mipibrdg_en: pp1000-mipibrdg-en {
+ 		pins1 {
+ 			pinmux = <PINMUX_GPIO54__FUNC_GPIO54>;
+ 			output-low;
+ 		};
+ 	};
+ 
+-	pp1800_lcd_en: pp1800-lcd-en {
++	pp1800_mipibrdg_en: pp1800-mipibrdg-en {
+ 		pins1 {
+ 			pinmux = <PINMUX_GPIO36__FUNC_GPIO36>;
+ 			output-low;
+@@ -460,7 +466,7 @@ trackpad-int {
+ 		};
+ 	};
+ 
+-	vddio_mipibrdg_en: vddio-mipibrdg-en {
++	pp3300_mipibrdg_en: pp3300-mipibrdg-en {
+ 		pins1 {
+ 			pinmux = <PINMUX_GPIO37__FUNC_GPIO37>;
+ 			output-low;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
+index bfb9e42c8acaa7..ff02f63bac29b2 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
+@@ -92,9 +92,9 @@ &i2c4 {
+ 	clock-frequency = <400000>;
+ 	vbus-supply = <&mt6358_vcn18_reg>;
+ 
+-	eeprom@54 {
++	eeprom@50 {
+ 		compatible = "atmel,24c32";
+-		reg = <0x54>;
++		reg = <0x50>;
+ 		pagesize = <32>;
+ 		vcc-supply = <&mt6358_vcn18_reg>;
+ 	};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
+index 5c1bf6a1e47586..da6e767b4ceede 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
+@@ -79,9 +79,9 @@ &i2c4 {
+ 	clock-frequency = <400000>;
+ 	vbus-supply = <&mt6358_vcn18_reg>;
+ 
+-	eeprom@54 {
++	eeprom@50 {
+ 		compatible = "atmel,24c64";
+-		reg = <0x54>;
++		reg = <0x50>;
+ 		pagesize = <32>;
+ 		vcc-supply = <&mt6358_vcn18_reg>;
+ 	};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
+index 0f5fa893a77426..8b56b8564ed7a2 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
+@@ -88,9 +88,9 @@ &i2c4 {
+ 	clock-frequency = <400000>;
+ 	vbus-supply = <&mt6358_vcn18_reg>;
+ 
+-	eeprom@54 {
++	eeprom@50 {
+ 		compatible = "atmel,24c32";
+-		reg = <0x54>;
++		reg = <0x50>;
+ 		pagesize = <32>;
+ 		vcc-supply = <&mt6358_vcn18_reg>;
+ 	};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index 22924f61ec9ed2..07ae3c8e897b7d 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -290,6 +290,11 @@ dsi_out: endpoint {
+ 	};
+ };
+ 
++&dpi0 {
++	/* TODO Re-enable after DP to Type-C port muxing can be described */
++	status = "disabled";
++};
++
+ &gic {
+ 	mediatek,broken-save-restore-fw;
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 266441e999f211..0a6578aacf8280 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1845,6 +1845,10 @@ dpi0: dpi@14015000 {
+ 				 <&mmsys CLK_MM_DPI_MM>,
+ 				 <&apmixedsys CLK_APMIXED_TVDPLL>;
+ 			clock-names = "pixel", "engine", "pll";
++
++			port {
++				dpi_out: endpoint { };
++			};
+ 		};
+ 
+ 		mutex: mutex@14016000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb.dtsi
+index 52ec58128d5615..b495a241b4432b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-voltorb.dtsi
+@@ -10,12 +10,6 @@
+ 
+ / {
+ 	chassis-type = "laptop";
+-
+-	max98360a: max98360a {
+-		compatible = "maxim,max98360a";
+-		sdmode-gpios = <&pio 150 GPIO_ACTIVE_HIGH>;
+-		#sound-dai-cells = <0>;
+-	};
+ };
+ 
+ &cpu6 {
+@@ -59,19 +53,14 @@ &cluster1_opp_15 {
+ 	opp-hz = /bits/ 64 <2200000000>;
+ };
+ 
+-&rt1019p{
+-	status = "disabled";
+-};
+-
+ &sound {
+ 	compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound";
+-	status = "okay";
++};
+ 
+-	spk-hdmi-playback-dai-link {
+-		codec {
+-			sound-dai = <&it6505dptx>, <&max98360a>;
+-		};
+-	};
++&speaker_codec {
++	compatible = "maxim,max98360a";
++	sdmode-gpios = <&pio 150 GPIO_ACTIVE_HIGH>;
++	/delete-property/ sdb-gpios;
+ };
+ 
+ &spmi {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+index 682c6ad2574d00..0c0b3ac5974525 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+@@ -259,15 +259,15 @@ spk-hdmi-playback-dai-link {
+ 			mediatek,clk-provider = "cpu";
+ 			/* RT1019P and IT6505 connected to the same I2S line */
+ 			codec {
+-				sound-dai = <&it6505dptx>, <&rt1019p>;
++				sound-dai = <&it6505dptx>, <&speaker_codec>;
+ 			};
+ 		};
+ 	};
+ 
+-	rt1019p: speaker-codec {
++	speaker_codec: speaker-codec {
+ 		compatible = "realtek,rt1019p";
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&rt1019p_pins_default>;
++		pinctrl-0 = <&speaker_codec_pins_default>;
+ 		#sound-dai-cells = <0>;
+ 		sdb-gpios = <&pio 150 GPIO_ACTIVE_HIGH>;
+ 	};
+@@ -1179,7 +1179,7 @@ pins {
+ 		};
+ 	};
+ 
+-	rt1019p_pins_default: rt1019p-default-pins {
++	speaker_codec_pins_default: speaker-codec-default-pins {
+ 		pins-sdb {
+ 			pinmux = <PINMUX_GPIO150__FUNC_GPIO150>;
+ 			output-low;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+index cd27966d2e3c05..91beef22e0a9c6 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+@@ -956,9 +956,9 @@ mfg0: power-domain@MT8188_POWER_DOMAIN_MFG0 {
+ 					#size-cells = <0>;
+ 					#power-domain-cells = <1>;
+ 
+-					power-domain@MT8188_POWER_DOMAIN_MFG1 {
++					mfg1: power-domain@MT8188_POWER_DOMAIN_MFG1 {
+ 						reg = <MT8188_POWER_DOMAIN_MFG1>;
+-						clocks = <&topckgen CLK_APMIXED_MFGPLL>,
++						clocks = <&apmixedsys CLK_APMIXED_MFGPLL>,
+ 							 <&topckgen CLK_TOP_MFG_CORE_TMP>;
+ 						clock-names = "mfg", "alt";
+ 						mediatek,infracfg = <&infracfg_ao>;
+@@ -1689,7 +1689,6 @@ u3port1: usb-phy@700 {
+ 					 <&clk26m>;
+ 				clock-names = "ref", "da_ref";
+ 				#phy-cells = <1>;
+-				status = "disabled";
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+index 75d56b2d5a3d34..2c7b2223ee76b1 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+@@ -438,7 +438,7 @@ audio_codec: codec@1a {
+ 		/* Realtek RT5682i or RT5682s, sharing the same configuration */
+ 		reg = <0x1a>;
+ 		interrupts-extended = <&pio 89 IRQ_TYPE_EDGE_BOTH>;
+-		#sound-dai-cells = <0>;
++		#sound-dai-cells = <1>;
+ 		realtek,jd-src = <1>;
+ 
+ 		AVDD-supply = <&mt6359_vio18_ldo_reg>;
+@@ -1181,7 +1181,7 @@ hs-playback-dai-link {
+ 		link-name = "ETDM1_OUT_BE";
+ 		mediatek,clk-provider = "cpu";
+ 		codec {
+-			sound-dai = <&audio_codec>;
++			sound-dai = <&audio_codec 0>;
+ 		};
+ 	};
+ 
+@@ -1189,7 +1189,7 @@ hs-capture-dai-link {
+ 		link-name = "ETDM2_IN_BE";
+ 		mediatek,clk-provider = "cpu";
+ 		codec {
+-			sound-dai = <&audio_codec>;
++			sound-dai = <&audio_codec 0>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index e89ba384c4aafc..ade685ed2190b7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -487,7 +487,7 @@ topckgen: syscon@10000000 {
+ 		};
+ 
+ 		infracfg_ao: syscon@10001000 {
+-			compatible = "mediatek,mt8195-infracfg_ao", "syscon", "simple-mfd";
++			compatible = "mediatek,mt8195-infracfg_ao", "syscon";
+ 			reg = <0 0x10001000 0 0x1000>;
+ 			#clock-cells = <1>;
+ 			#reset-cells = <1>;
+@@ -3331,11 +3331,9 @@ &larb19 &larb21 &larb24 &larb25
+ 		mutex1: mutex@1c101000 {
+ 			compatible = "mediatek,mt8195-disp-mutex";
+ 			reg = <0 0x1c101000 0 0x1000>;
+-			reg-names = "vdo1_mutex";
+ 			interrupts = <GIC_SPI 494 IRQ_TYPE_LEVEL_HIGH 0>;
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ 			clocks = <&vdosys1 CLK_VDO1_DISP_MUTEX>;
+-			clock-names = "vdo1_mutex";
+ 			mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x1000 0x1000>;
+ 			mediatek,gce-events = <CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0>;
+ 		};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+index 1ef6262b65c9ac..b4b48eb93f3c54 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+@@ -187,7 +187,7 @@ mdio {
+ 		compatible = "snps,dwmac-mdio";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+-		eth_phy0: eth-phy0@1 {
++		eth_phy0: ethernet-phy@1 {
+ 			compatible = "ethernet-phy-id001c.c916";
+ 			reg = <0x1>;
+ 		};
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+index c00db75e391057..1c53ccc5e3cbf3 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+@@ -351,7 +351,7 @@ mmc@700b0200 {
+ 		#size-cells = <0>;
+ 
+ 		wifi@1 {
+-			compatible = "brcm,bcm4354-fmac";
++			compatible = "brcm,bcm4354-fmac", "brcm,bcm4329-fmac";
+ 			reg = <1>;
+ 			interrupt-parent = <&gpio>;
+ 			interrupts = <TEGRA_GPIO(H, 2) IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
+index 0d45662b8028bf..5d0167fbc70982 100644
+--- a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
++++ b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
+@@ -707,7 +707,7 @@ &remoteproc_cdsp {
+ };
+ 
+ &remoteproc_mpss {
+-	firmware-name = "qcom/qcs6490/modem.mdt";
++	firmware-name = "qcom/qcs6490/modem.mbn";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+index 0e9429684dd97b..60f71b49026153 100644
+--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+@@ -3889,7 +3889,7 @@ lmh@18358800 {
+ 		};
+ 
+ 		cpufreq_hw: cpufreq@18323000 {
+-			compatible = "qcom,cpufreq-hw";
++			compatible = "qcom,sc8180x-cpufreq-hw", "qcom,cpufreq-hw";
+ 			reg = <0 0x18323000 0 0x1400>, <0 0x18325800 0 0x1400>;
+ 			reg-names = "freq-domain0", "freq-domain1";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
+index 60412281ab27de..962c8aa4004401 100644
+--- a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
++++ b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts
+@@ -104,7 +104,7 @@ vreg_l10a_1p8: vreg-l10a-regulator {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vreg_l10a_1p8";
+ 		regulator-min-microvolt = <1804000>;
+-		regulator-max-microvolt = <1896000>;
++		regulator-max-microvolt = <1804000>;
+ 		regulator-always-on;
+ 		regulator-boot-on;
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 7986ddb30f6e8c..4f8477de7e1b1e 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -1376,43 +1376,43 @@ gpu_opp_table: opp-table {
+ 				opp-850000000 {
+ 					opp-hz = /bits/ 64 <850000000>;
+ 					opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+-					opp-supported-hw = <0x02>;
++					opp-supported-hw = <0x03>;
+ 				};
+ 
+ 				opp-800000000 {
+ 					opp-hz = /bits/ 64 <800000000>;
+ 					opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+-					opp-supported-hw = <0x04>;
++					opp-supported-hw = <0x07>;
+ 				};
+ 
+ 				opp-650000000 {
+ 					opp-hz = /bits/ 64 <650000000>;
+ 					opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+-					opp-supported-hw = <0x08>;
++					opp-supported-hw = <0x0f>;
+ 				};
+ 
+ 				opp-565000000 {
+ 					opp-hz = /bits/ 64 <565000000>;
+ 					opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+-					opp-supported-hw = <0x10>;
++					opp-supported-hw = <0x1f>;
+ 				};
+ 
+ 				opp-430000000 {
+ 					opp-hz = /bits/ 64 <430000000>;
+ 					opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+-					opp-supported-hw = <0xff>;
++					opp-supported-hw = <0x1f>;
+ 				};
+ 
+ 				opp-355000000 {
+ 					opp-hz = /bits/ 64 <355000000>;
+ 					opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+-					opp-supported-hw = <0xff>;
++					opp-supported-hw = <0x1f>;
+ 				};
+ 
+ 				opp-253000000 {
+ 					opp-hz = /bits/ 64 <253000000>;
+ 					opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+-					opp-supported-hw = <0xff>;
++					opp-supported-hw = <0x1f>;
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+index fb4a48a1e2a8a5..2926a1aba76873 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+@@ -594,8 +594,6 @@ &usb_1_ss0_qmpphy {
+ 	vdda-phy-supply = <&vreg_l3e_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+-	orientation-switch;
+-
+ 	status = "okay";
+ };
+ 
+@@ -628,8 +626,6 @@ &usb_1_ss1_qmpphy {
+ 	vdda-phy-supply = <&vreg_l3e_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+-	orientation-switch;
+-
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+index 0cdaff9c8cf0fc..f22e5c840a2e55 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+@@ -898,8 +898,6 @@ &usb_1_ss0_qmpphy {
+ 	vdda-phy-supply = <&vreg_l3e_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+-	orientation-switch;
+-
+ 	status = "okay";
+ };
+ 
+@@ -932,8 +930,6 @@ &usb_1_ss1_qmpphy {
+ 	vdda-phy-supply = <&vreg_l3e_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+-	orientation-switch;
+-
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index 0510abc0edf0ff..914f9cb3aca215 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -279,8 +279,8 @@ CLUSTER_C4: cpu-sleep-0 {
+ 				idle-state-name = "ret";
+ 				arm,psci-suspend-param = <0x00000004>;
+ 				entry-latency-us = <180>;
+-				exit-latency-us = <320>;
+-				min-residency-us = <1000>;
++				exit-latency-us = <500>;
++				min-residency-us = <600>;
+ 			};
+ 		};
+ 
+@@ -299,7 +299,7 @@ CLUSTER_CL5: cluster-sleep-1 {
+ 				idle-state-name = "ret-pll-off";
+ 				arm,psci-suspend-param = <0x01000054>;
+ 				entry-latency-us = <2200>;
+-				exit-latency-us = <2500>;
++				exit-latency-us = <4000>;
+ 				min-residency-us = <7000>;
+ 			};
+ 		};
+@@ -5752,7 +5752,7 @@ apps_smmu: iommu@15000000 {
+ 		intc: interrupt-controller@17000000 {
+ 			compatible = "arm,gic-v3";
+ 			reg = <0 0x17000000 0 0x10000>,     /* GICD */
+-			      <0 0x17080000 0 0x480000>;    /* GICR * 12 */
++			      <0 0x17080000 0 0x300000>;    /* GICR * 12 */
+ 
+ 			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ 
+diff --git a/arch/arm64/boot/dts/renesas/hihope-rev2.dtsi b/arch/arm64/boot/dts/renesas/hihope-rev2.dtsi
+index 8e2db1d6ca81e2..25c55b32aafe5a 100644
+--- a/arch/arm64/boot/dts/renesas/hihope-rev2.dtsi
++++ b/arch/arm64/boot/dts/renesas/hihope-rev2.dtsi
+@@ -69,9 +69,6 @@ &rcar_sound {
+ 
+ 	status = "okay";
+ 
+-	/* Single DAI */
+-	#sound-dai-cells = <0>;
+-
+ 	rsnd_port: port {
+ 		rsnd_endpoint: endpoint {
+ 			remote-endpoint = <&dw_hdmi0_snd_in>;
+diff --git a/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi b/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi
+index 66f3affe046973..deb69c27277566 100644
+--- a/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi
++++ b/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi
+@@ -84,9 +84,6 @@ &rcar_sound {
+ 	pinctrl-names = "default";
+ 	status = "okay";
+ 
+-	/* Single DAI */
+-	#sound-dai-cells = <0>;
+-
+ 	/* audio_clkout0/1/2/3 */
+ 	#clock-cells = <1>;
+ 	clock-frequency = <12288000 11289600>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-io-expander.dtso b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-io-expander.dtso
+index ebcaeafc3800d0..fa61633aea1526 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-io-expander.dtso
++++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-io-expander.dtso
+@@ -49,7 +49,6 @@ vcc1v8_eth: vcc1v8-eth-regulator {
+ 
+ 	vcc3v3_eth: vcc3v3-eth-regulator {
+ 		compatible = "regulator-fixed";
+-		enable-active-low;
+ 		gpio = <&gpio0 RK_PC0 GPIO_ACTIVE_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&vcc3v3_eth_enn>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+index 8ba111d9283fef..d9d2bf822443bc 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+@@ -62,7 +62,7 @@ sdio_pwrseq: sdio-pwrseq {
+ 
+ 	sound {
+ 		compatible = "audio-graph-card";
+-		label = "rockchip,es8388-codec";
++		label = "rockchip,es8388";
+ 		widgets = "Microphone", "Mic Jack",
+ 			  "Headphone", "Headphones";
+ 		routing = "LINPUT2", "Mic Jack",
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
+index feea6b20a6bf54..6b77be64324950 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
+@@ -71,7 +71,6 @@ vcc5v0_sys: vcc5v0-sys-regulator {
+ 
+ 	vcc_3v3_sd_s0: vcc-3v3-sd-s0-regulator {
+ 		compatible = "regulator-fixed";
+-		enable-active-low;
+ 		gpios = <&gpio4 RK_PB5 GPIO_ACTIVE_LOW>;
+ 		regulator-name = "vcc_3v3_sd_s0";
+ 		regulator-boot-on;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
+index e4633af87eb9c5..d6ce53c6d74814 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
+@@ -433,8 +433,6 @@ &mcasp2 {
+ 			0 0 0 0
+ 			0 0 0 0
+ 	>;
+-	tx-num-evt = <32>;
+-	rx-num-evt = <32>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+index 6593c5da82c064..df39f2b1ff6ba6 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+@@ -254,7 +254,7 @@ J721E_IOPAD(0x38, PIN_OUTPUT, 0) /* (Y21) MCAN3_TX */
+ 	};
+ };
+ 
+-&main_pmx1 {
++&main_pmx2 {
+ 	main_usbss0_pins_default: main-usbss0-default-pins {
+ 		pinctrl-single,pins = <
+ 			J721E_IOPAD(0x04, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+index 9386bf3ef9f684..1d11da926a8714 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+@@ -426,10 +426,28 @@ main_pmx0: pinctrl@11c000 {
+ 		pinctrl-single,function-mask = <0xffffffff>;
+ 	};
+ 
+-	main_pmx1: pinctrl@11c11c {
++	main_pmx1: pinctrl@11c110 {
+ 		compatible = "ti,j7200-padconf", "pinctrl-single";
+ 		/* Proxy 0 addressing */
+-		reg = <0x00 0x11c11c 0x00 0xc>;
++		reg = <0x00 0x11c110 0x00 0x004>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	main_pmx2: pinctrl@11c11c {
++		compatible = "ti,j7200-padconf", "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x11c11c 0x00 0x00c>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	main_pmx3: pinctrl@11c164 {
++		compatible = "ti,j7200-padconf", "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x11c164 0x00 0x008>;
+ 		#pinctrl-cells = <1>;
+ 		pinctrl-single,register-width = <32>;
+ 		pinctrl-single,function-mask = <0xffffffff>;
+@@ -1145,7 +1163,7 @@ main_spi0: spi@2100000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 266 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 266 1>;
++		clocks = <&k3_clks 266 4>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1156,7 +1174,7 @@ main_spi1: spi@2110000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 267 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 267 1>;
++		clocks = <&k3_clks 267 4>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1167,7 +1185,7 @@ main_spi2: spi@2120000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 268 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 268 1>;
++		clocks = <&k3_clks 268 4>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1178,7 +1196,7 @@ main_spi3: spi@2130000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 269 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 269 1>;
++		clocks = <&k3_clks 269 4>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1189,7 +1207,7 @@ main_spi4: spi@2140000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 270 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 270 1>;
++		clocks = <&k3_clks 270 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1200,7 +1218,7 @@ main_spi5: spi@2150000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 271 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 271 1>;
++		clocks = <&k3_clks 271 4>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1211,7 +1229,7 @@ main_spi6: spi@2160000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 272 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 272 1>;
++		clocks = <&k3_clks 272 4>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1222,7 +1240,7 @@ main_spi7: spi@2170000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 273 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 273 1>;
++		clocks = <&k3_clks 273 4>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+index 5097d192c2b208..b18b2f2deb969f 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+@@ -494,7 +494,7 @@ mcu_spi0: spi@40300000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 274 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 274 0>;
++		clocks = <&k3_clks 274 4>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -505,7 +505,7 @@ mcu_spi1: spi@40310000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 275 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 275 0>;
++		clocks = <&k3_clks 275 4>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -516,7 +516,7 @@ mcu_spi2: spi@40320000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 276 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 276 0>;
++		clocks = <&k3_clks 276 2>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+index 3731ffb4a5c963..6f5c1401ebd6a0 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+@@ -654,7 +654,7 @@ mcu_spi0: spi@40300000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 274 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 274 0>;
++		clocks = <&k3_clks 274 1>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -665,7 +665,7 @@ mcu_spi1: spi@40310000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 275 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 275 0>;
++		clocks = <&k3_clks 275 1>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -676,7 +676,7 @@ mcu_spi2: spi@40320000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 276 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 276 0>;
++		clocks = <&k3_clks 276 1>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
+index 9ed6949b40e9df..fae534b5c8a43f 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
+@@ -1708,7 +1708,7 @@ main_spi0: spi@2100000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 339 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 339 1>;
++		clocks = <&k3_clks 339 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1719,7 +1719,7 @@ main_spi1: spi@2110000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 340 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 340 1>;
++		clocks = <&k3_clks 340 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1730,7 +1730,7 @@ main_spi2: spi@2120000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 341 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 341 1>;
++		clocks = <&k3_clks 341 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1741,7 +1741,7 @@ main_spi3: spi@2130000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 342 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 342 1>;
++		clocks = <&k3_clks 342 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1752,7 +1752,7 @@ main_spi4: spi@2140000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 343 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 343 1>;
++		clocks = <&k3_clks 343 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1763,7 +1763,7 @@ main_spi5: spi@2150000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 344 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 344 1>;
++		clocks = <&k3_clks 344 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1774,7 +1774,7 @@ main_spi6: spi@2160000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 345 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 345 1>;
++		clocks = <&k3_clks 345 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -1785,7 +1785,7 @@ main_spi7: spi@2170000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 346 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 346 1>;
++		clocks = <&k3_clks 346 2>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+index 9d96b19d0e7cf5..8232d308c23cc6 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+@@ -425,7 +425,7 @@ mcu_spi0: spi@40300000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 347 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 347 0>;
++		clocks = <&k3_clks 347 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -436,7 +436,7 @@ mcu_spi1: spi@40310000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 348 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 348 0>;
++		clocks = <&k3_clks 348 2>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -447,7 +447,7 @@ mcu_spi2: spi@40320000 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 349 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 349 0>;
++		clocks = <&k3_clks 349 2>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
+index 8c0a36f72d6fcd..bc77869dbd43b2 100644
+--- a/arch/arm64/include/asm/insn.h
++++ b/arch/arm64/include/asm/insn.h
+@@ -353,6 +353,7 @@ __AARCH64_INSN_FUNCS(ldrsw_lit,	0xFF000000, 0x98000000)
+ __AARCH64_INSN_FUNCS(exclusive,	0x3F800000, 0x08000000)
+ __AARCH64_INSN_FUNCS(load_ex,	0x3F400000, 0x08400000)
+ __AARCH64_INSN_FUNCS(store_ex,	0x3F400000, 0x08000000)
++__AARCH64_INSN_FUNCS(mops,	0x3B200C00, 0x19000400)
+ __AARCH64_INSN_FUNCS(stp,	0x7FC00000, 0x29000000)
+ __AARCH64_INSN_FUNCS(ldp,	0x7FC00000, 0x29400000)
+ __AARCH64_INSN_FUNCS(stp_post,	0x7FC00000, 0x28800000)
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index bf64fed9820ea0..c315bc1a4e9adf 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -74,8 +74,6 @@ enum kvm_mode kvm_get_mode(void);
+ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
+ #endif
+ 
+-DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+-
+ extern unsigned int __ro_after_init kvm_sve_max_vl;
+ extern unsigned int __ro_after_init kvm_host_sve_max_vl;
+ int __init kvm_arm_init_sve(void);
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 718728a85430fa..db994d1fd97e70 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -228,6 +228,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+ };
+ 
+ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
++	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_XS_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0),
+diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
+index 3496d6169e59b2..42b69936cee34b 100644
+--- a/arch/arm64/kernel/probes/decode-insn.c
++++ b/arch/arm64/kernel/probes/decode-insn.c
+@@ -58,10 +58,13 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
+ 	 * Instructions which load PC relative literals are not going to work
+ 	 * when executed from an XOL slot. Instructions doing an exclusive
+ 	 * load/store are not going to complete successfully when single-step
+-	 * exception handling happens in the middle of the sequence.
++	 * exception handling happens in the middle of the sequence. Memory
++	 * copy/set instructions require that all three instructions be placed
++	 * consecutively in memory.
+ 	 */
+ 	if (aarch64_insn_uses_literal(insn) ||
+-	    aarch64_insn_is_exclusive(insn))
++	    aarch64_insn_is_exclusive(insn) ||
++	    aarch64_insn_is_mops(insn))
+ 		return false;
+ 
+ 	return true;
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 3e7c8c8195c3c9..2bbcbb11d844c9 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -442,7 +442,7 @@ static void tls_thread_switch(struct task_struct *next)
+ 
+ 	if (is_compat_thread(task_thread_info(next)))
+ 		write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
+-	else if (!arm64_kernel_unmapped_at_el0())
++	else
+ 		write_sysreg(0, tpidrro_el0);
+ 
+ 	write_sysreg(*task_user_tls(next), tpidr_el0);
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index b22d28ec80284b..87f61fd6783c20 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -175,7 +175,11 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
+ 	if (dt_virt)
+ 		memblock_reserve(dt_phys, size);
+ 
+-	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
++	/*
++	 * dt_virt is a fixmap address, hence __pa(dt_virt) can't be used.
++	 * Pass dt_phys directly.
++	 */
++	if (!early_init_dt_scan(dt_virt, dt_phys)) {
+ 		pr_crit("\n"
+ 			"Error: invalid device tree blob at physical address %pa (virtual address 0x%px)\n"
+ 			"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index 58d89d997d050f..f84c71f04d9ea9 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -287,6 +287,9 @@ SECTIONS
+ 	__initdata_end = .;
+ 	__init_end = .;
+ 
++	.data.rel.ro : { *(.data.rel.ro) }
++	ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
++
+ 	_data = .;
+ 	_sdata = .;
+ 	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
+@@ -343,9 +346,6 @@ SECTIONS
+ 		*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
+ 	}
+ 	ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
+-
+-	.data.rel.ro : { *(.data.rel.ro) }
+-	ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
+ }
+ 
+ #include "image-vars.h"
+diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
+index 879982b1cc739e..1215df59041856 100644
+--- a/arch/arm64/kvm/arch_timer.c
++++ b/arch/arm64/kvm/arch_timer.c
+@@ -206,8 +206,7 @@ void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
+ 
+ static inline bool userspace_irqchip(struct kvm *kvm)
+ {
+-	return static_branch_unlikely(&userspace_irqchip_in_use) &&
+-		unlikely(!irqchip_in_kernel(kvm));
++	return unlikely(!irqchip_in_kernel(kvm));
+ }
+ 
+ static void soft_timer_start(struct hrtimer *hrt, u64 ns)
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 48cafb65d6acff..70ff9a20ef3af3 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -69,7 +69,6 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+ static bool vgic_present, kvm_arm_initialised;
+ 
+ static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
+-DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+ 
+ bool is_kvm_arm_initialised(void)
+ {
+@@ -503,9 +502,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+ 
+ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+-	if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
+-		static_branch_dec(&userspace_irqchip_in_use);
+-
+ 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ 	kvm_timer_vcpu_terminate(vcpu);
+ 	kvm_pmu_vcpu_destroy(vcpu);
+@@ -848,14 +844,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+ 			return ret;
+ 	}
+ 
+-	if (!irqchip_in_kernel(kvm)) {
+-		/*
+-		 * Tell the rest of the code that there are userspace irqchip
+-		 * VMs in the wild.
+-		 */
+-		static_branch_inc(&userspace_irqchip_in_use);
+-	}
+-
+ 	/*
+ 	 * Initialize traps for protected VMs.
+ 	 * NOTE: Move to run in EL2 directly, rather than via a hypercall, once
+@@ -1077,7 +1065,7 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
+ 	 * state gets updated in kvm_timer_update_run and
+ 	 * kvm_pmu_update_run below).
+ 	 */
+-	if (static_branch_unlikely(&userspace_irqchip_in_use)) {
++	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
+ 		if (kvm_timer_should_notify_user(vcpu) ||
+ 		    kvm_pmu_should_notify_user(vcpu)) {
+ 			*ret = -EINTR;
+@@ -1199,7 +1187,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 			vcpu->mode = OUTSIDE_GUEST_MODE;
+ 			isb(); /* Ensure work in x_flush_hwstate is committed */
+ 			kvm_pmu_sync_hwstate(vcpu);
+-			if (static_branch_unlikely(&userspace_irqchip_in_use))
++			if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ 				kvm_timer_sync_user(vcpu);
+ 			kvm_vgic_sync_hwstate(vcpu);
+ 			local_irq_enable();
+@@ -1245,7 +1233,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 		 * we don't want vtimer interrupts to race with syncing the
+ 		 * timer virtual interrupt state.
+ 		 */
+-		if (static_branch_unlikely(&userspace_irqchip_in_use))
++		if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ 			kvm_timer_sync_user(vcpu);
+ 
+ 		kvm_arch_vcpu_ctxsync_fp(vcpu);
+diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
+index cd6b7b83e2c370..ab365e839874e5 100644
+--- a/arch/arm64/kvm/mmio.c
++++ b/arch/arm64/kvm/mmio.c
+@@ -72,6 +72,31 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
+ 	return data;
+ }
+ 
++static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu)
++{
++	if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION))
++		return false;
++
++	if (vcpu_el1_is_32bit(vcpu)) {
++		switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
++		case unpack_vcpu_flag(EXCEPT_AA32_UND):
++		case unpack_vcpu_flag(EXCEPT_AA32_IABT):
++		case unpack_vcpu_flag(EXCEPT_AA32_DABT):
++			return true;
++		default:
++			return false;
++		}
++	} else {
++		switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
++		case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
++		case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
++			return true;
++		default:
++			return false;
++		}
++	}
++}
++
+ /**
+  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+  *			     or in-kernel IO emulation
+@@ -84,8 +109,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
+ 	unsigned int len;
+ 	int mask;
+ 
+-	/* Detect an already handled MMIO return */
+-	if (unlikely(!vcpu->mmio_needed))
++	/*
++	 * Detect if the MMIO return was already handled or if userspace aborted
++	 * the MMIO access.
++	 */
++	if (unlikely(!vcpu->mmio_needed || kvm_pending_sync_exception(vcpu)))
+ 		return 1;
+ 
+ 	vcpu->mmio_needed = 0;
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index ac36c438b8c18c..3940fe893783c8 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -342,7 +342,6 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
+ 
+ 	if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
+ 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
+-		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
+ 	}
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index ba945ba78cc7d7..198296933e7ebf 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -782,6 +782,9 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
+ 
+ 	ite = find_ite(its, device_id, event_id);
+ 	if (ite && its_is_collection_mapped(ite->collection)) {
++		struct its_device *device = find_its_device(its, device_id);
++		int ite_esz = vgic_its_get_abi(its)->ite_esz;
++		gpa_t gpa = device->itt_addr + ite->event_id * ite_esz;
+ 		/*
+ 		 * Though the spec talks about removing the pending state, we
+ 		 * don't bother here since we clear the ITTE anyway and the
+@@ -790,7 +793,8 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
+ 		vgic_its_invalidate_cache(its);
+ 
+ 		its_free_ite(kvm, ite);
+-		return 0;
++
++		return vgic_its_write_entry_lock(its, gpa, 0, ite_esz);
+ 	}
+ 
+ 	return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
+@@ -1139,9 +1143,11 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
+ 	bool valid = its_cmd_get_validbit(its_cmd);
+ 	u8 num_eventid_bits = its_cmd_get_size(its_cmd);
+ 	gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
++	int dte_esz = vgic_its_get_abi(its)->dte_esz;
+ 	struct its_device *device;
++	gpa_t gpa;
+ 
+-	if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
++	if (!vgic_its_check_id(its, its->baser_device_table, device_id, &gpa))
+ 		return E_ITS_MAPD_DEVICE_OOR;
+ 
+ 	if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
+@@ -1162,7 +1168,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
+ 	 * is an error, so we are done in any case.
+ 	 */
+ 	if (!valid)
+-		return 0;
++		return vgic_its_write_entry_lock(its, gpa, 0, dte_esz);
+ 
+ 	device = vgic_its_alloc_device(its, device_id, itt_addr,
+ 				       num_eventid_bits);
+@@ -2086,7 +2092,6 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
+ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
+ 			      struct its_ite *ite, gpa_t gpa, int ite_esz)
+ {
+-	struct kvm *kvm = its->dev->kvm;
+ 	u32 next_offset;
+ 	u64 val;
+ 
+@@ -2095,7 +2100,8 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
+ 	       ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
+ 		ite->collection->collection_id;
+ 	val = cpu_to_le64(val);
+-	return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
++
++	return vgic_its_write_entry_lock(its, gpa, val, ite_esz);
+ }
+ 
+ /**
+@@ -2239,7 +2245,6 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
+ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
+ 			     gpa_t ptr, int dte_esz)
+ {
+-	struct kvm *kvm = its->dev->kvm;
+ 	u64 val, itt_addr_field;
+ 	u32 next_offset;
+ 
+@@ -2250,7 +2255,8 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
+ 	       (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
+ 		(dev->num_eventid_bits - 1));
+ 	val = cpu_to_le64(val);
+-	return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
++
++	return vgic_its_write_entry_lock(its, ptr, val, dte_esz);
+ }
+ 
+ /**
+@@ -2437,7 +2443,8 @@ static int vgic_its_save_cte(struct vgic_its *its,
+ 	       ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
+ 	       collection->collection_id);
+ 	val = cpu_to_le64(val);
+-	return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
++
++	return vgic_its_write_entry_lock(its, gpa, val, esz);
+ }
+ 
+ /*
+@@ -2453,8 +2460,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
+ 	u64 val;
+ 	int ret;
+ 
+-	BUG_ON(esz > sizeof(val));
+-	ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
++	ret = vgic_its_read_entry_lock(its, gpa, &val, esz);
+ 	if (ret)
+ 		return ret;
+ 	val = le64_to_cpu(val);
+@@ -2492,7 +2498,6 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
+ 	u64 baser = its->baser_coll_table;
+ 	gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
+ 	struct its_collection *collection;
+-	u64 val;
+ 	size_t max_size, filled = 0;
+ 	int ret, cte_esz = abi->cte_esz;
+ 
+@@ -2516,10 +2521,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
+ 	 * table is not fully filled, add a last dummy element
+ 	 * with valid bit unset
+ 	 */
+-	val = 0;
+-	BUG_ON(cte_esz > sizeof(val));
+-	ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
+-	return ret;
++	return vgic_its_write_entry_lock(its, gpa, 0, cte_esz);
+ }
+ 
+ /*
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 9e50928f5d7dfd..70a44852cbafe3 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -530,6 +530,7 @@ static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
+ 				   unsigned long val)
+ {
+ 	struct vgic_irq *irq;
++	u32 intid;
+ 
+ 	/*
+ 	 * If the guest wrote only to the upper 32bit part of the
+@@ -541,9 +542,13 @@ static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
+ 	if ((addr & 4) || !vgic_lpis_enabled(vcpu))
+ 		return;
+ 
++	intid = lower_32_bits(val);
++	if (intid < VGIC_MIN_LPI)
++		return;
++
+ 	vgic_set_rdist_busy(vcpu, true);
+ 
+-	irq = vgic_get_irq(vcpu->kvm, NULL, lower_32_bits(val));
++	irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+ 	if (irq) {
+ 		vgic_its_inv_lpi(vcpu->kvm, irq);
+ 		vgic_put_irq(vcpu->kvm, irq);
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index f2486b4d9f9566..309295f5e1b074 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -146,6 +146,29 @@ static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+ 	return ret;
+ }
+ 
++static inline int vgic_its_read_entry_lock(struct vgic_its *its, gpa_t eaddr,
++					   u64 *eval, unsigned long esize)
++{
++	struct kvm *kvm = its->dev->kvm;
++
++	if (KVM_BUG_ON(esize != sizeof(*eval), kvm))
++		return -EINVAL;
++
++	return kvm_read_guest_lock(kvm, eaddr, eval, esize);
++
++}
++
++static inline int vgic_its_write_entry_lock(struct vgic_its *its, gpa_t eaddr,
++					    u64 eval, unsigned long esize)
++{
++	struct kvm *kvm = its->dev->kvm;
++
++	if (KVM_BUG_ON(esize != sizeof(eval), kvm))
++		return -EINVAL;
++
++	return vgic_write_guest_lock(kvm, eaddr, &eval, esize);
++}
++
+ /*
+  * This struct provides an intermediate representation of the fields contained
+  * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 5db82bfc9dc115..27ef366363e4e2 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -2094,6 +2094,12 @@ static void restore_args(struct jit_ctx *ctx, int args_off, int nregs)
+ 	}
+ }
+ 
++static bool is_struct_ops_tramp(const struct bpf_tramp_links *fentry_links)
++{
++	return fentry_links->nr_links == 1 &&
++		fentry_links->links[0]->link.type == BPF_LINK_TYPE_STRUCT_OPS;
++}
++
+ /* Based on the x86's implementation of arch_prepare_bpf_trampoline().
+  *
+  * bpf prog and function entry before bpf trampoline hooked:
+@@ -2123,6 +2129,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
+ 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ 	bool save_ret;
+ 	__le32 **branches = NULL;
++	bool is_struct_ops = is_struct_ops_tramp(fentry);
+ 
+ 	/* trampoline stack layout:
+ 	 *                  [ parent ip         ]
+@@ -2191,11 +2198,14 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
+ 	 */
+ 	emit_bti(A64_BTI_JC, ctx);
+ 
+-	/* frame for parent function */
+-	emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx);
+-	emit(A64_MOV(1, A64_FP, A64_SP), ctx);
++	/* x9 is not set for struct_ops */
++	if (!is_struct_ops) {
++		/* frame for parent function */
++		emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx);
++		emit(A64_MOV(1, A64_FP, A64_SP), ctx);
++	}
+ 
+-	/* frame for patched function */
++	/* frame for patched function for tracing, or caller for struct_ops */
+ 	emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ 	emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+ 
+@@ -2289,19 +2299,24 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
+ 	/* reset SP  */
+ 	emit(A64_MOV(1, A64_SP, A64_FP), ctx);
+ 
+-	/* pop frames  */
+-	emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+-	emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx);
+-
+-	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
+-		/* skip patched function, return to parent */
+-		emit(A64_MOV(1, A64_LR, A64_R(9)), ctx);
+-		emit(A64_RET(A64_R(9)), ctx);
++	if (is_struct_ops) {
++		emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
++		emit(A64_RET(A64_LR), ctx);
+ 	} else {
+-		/* return to patched function */
+-		emit(A64_MOV(1, A64_R(10), A64_LR), ctx);
+-		emit(A64_MOV(1, A64_LR, A64_R(9)), ctx);
+-		emit(A64_RET(A64_R(10)), ctx);
++		/* pop frames */
++		emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
++		emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx);
++
++		if (flags & BPF_TRAMP_F_SKIP_FRAME) {
++			/* skip patched function, return to parent */
++			emit(A64_MOV(1, A64_LR, A64_R(9)), ctx);
++			emit(A64_RET(A64_R(9)), ctx);
++		} else {
++			/* return to patched function */
++			emit(A64_MOV(1, A64_R(10), A64_LR), ctx);
++			emit(A64_MOV(1, A64_LR, A64_R(9)), ctx);
++			emit(A64_RET(A64_R(10)), ctx);
++		}
+ 	}
+ 
+ 	kfree(branches);
+diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
+index 51012e90780d6b..fe715b707fd0a4 100644
+--- a/arch/csky/kernel/setup.c
++++ b/arch/csky/kernel/setup.c
+@@ -112,9 +112,9 @@ asmlinkage __visible void __init csky_start(unsigned int unused,
+ 	pre_trap_init();
+ 
+ 	if (dtb_start == NULL)
+-		early_init_dt_scan(__dtb_start);
++		early_init_dt_scan(__dtb_start, __pa(dtb_start));
+ 	else
+-		early_init_dt_scan(dtb_start);
++		early_init_dt_scan(dtb_start, __pa(dtb_start));
+ 
+ 	start_kernel();
+ 
+diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
+index ae3f80622f4c60..567bd122a9ee47 100644
+--- a/arch/loongarch/Makefile
++++ b/arch/loongarch/Makefile
+@@ -59,7 +59,7 @@ endif
+ 
+ ifdef CONFIG_64BIT
+ ld-emul			= $(64bit-emul)
+-cflags-y		+= -mabi=lp64s
++cflags-y		+= -mabi=lp64s -mcmodel=normal
+ endif
+ 
+ cflags-y			+= -pipe $(CC_FLAGS_NO_FPU)
+@@ -104,7 +104,7 @@ ifdef CONFIG_OBJTOOL
+ KBUILD_CFLAGS			+= -fno-jump-tables
+ endif
+ 
+-KBUILD_RUSTFLAGS		+= --target=loongarch64-unknown-none-softfloat
++KBUILD_RUSTFLAGS		+= --target=loongarch64-unknown-none-softfloat -Ccode-model=small
+ KBUILD_RUSTFLAGS_KERNEL		+= -Zdirect-access-external-data=yes
+ KBUILD_RUSTFLAGS_MODULE		+= -Zdirect-access-external-data=no
+ 
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index cbd3c09a93c14c..56934fe58170e0 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -291,7 +291,7 @@ static void __init fdt_setup(void)
+ 	if (!fdt_pointer || fdt_check_header(fdt_pointer))
+ 		return;
+ 
+-	early_init_dt_scan(fdt_pointer);
++	early_init_dt_scan(fdt_pointer, __pa(fdt_pointer));
+ 	early_init_fdt_reserve_self();
+ 
+ 	max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index 7dbefd4ba21071..dd350cba1252f9 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -179,7 +179,7 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
+ 
+ 	if (!is_tail_call) {
+ 		/* Set return value */
+-		move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
++		emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
+ 		/* Return to the caller */
+ 		emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
+ 	} else {
+diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
+index 40c1175823d61d..fdde1bcd4e2663 100644
+--- a/arch/loongarch/vdso/Makefile
++++ b/arch/loongarch/vdso/Makefile
+@@ -19,7 +19,7 @@ ccflags-vdso := \
+ cflags-vdso := $(ccflags-vdso) \
+ 	-isystem $(shell $(CC) -print-file-name=include) \
+ 	$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
+-	-O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
++	-std=gnu11 -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
+ 	-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
+ 	$(call cc-option, -fno-asynchronous-unwind-tables) \
+ 	$(call cc-option, -fno-stack-protector)
+diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
+index 7dab46728aedaf..b6958ec2a220cf 100644
+--- a/arch/m68k/coldfire/device.c
++++ b/arch/m68k/coldfire/device.c
+@@ -93,7 +93,7 @@ static struct platform_device mcf_uart = {
+ 	.dev.platform_data	= mcf_uart_platform_data,
+ };
+ 
+-#if IS_ENABLED(CONFIG_FEC)
++#ifdef MCFFEC_BASE0
+ 
+ #ifdef CONFIG_M5441x
+ #define FEC_NAME	"enet-fec"
+@@ -145,6 +145,7 @@ static struct platform_device mcf_fec0 = {
+ 		.platform_data		= FEC_PDATA,
+ 	}
+ };
++#endif /* MCFFEC_BASE0 */
+ 
+ #ifdef MCFFEC_BASE1
+ static struct resource mcf_fec1_resources[] = {
+@@ -182,7 +183,6 @@ static struct platform_device mcf_fec1 = {
+ 	}
+ };
+ #endif /* MCFFEC_BASE1 */
+-#endif /* CONFIG_FEC */
+ 
+ #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
+ /*
+@@ -624,12 +624,12 @@ static struct platform_device mcf_flexcan0 = {
+ 
+ static struct platform_device *mcf_devices[] __initdata = {
+ 	&mcf_uart,
+-#if IS_ENABLED(CONFIG_FEC)
++#ifdef MCFFEC_BASE0
+ 	&mcf_fec0,
++#endif
+ #ifdef MCFFEC_BASE1
+ 	&mcf_fec1,
+ #endif
+-#endif
+ #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
+ 	&mcf_qspi,
+ #endif
+diff --git a/arch/m68k/include/asm/mcfgpio.h b/arch/m68k/include/asm/mcfgpio.h
+index 019f244395464d..9c91ecdafc4539 100644
+--- a/arch/m68k/include/asm/mcfgpio.h
++++ b/arch/m68k/include/asm/mcfgpio.h
+@@ -136,7 +136,7 @@ static inline void gpio_free(unsigned gpio)
+  * read-modify-write as well as those controlled by the EPORT and GPIO modules.
+  */
+ #define MCFGPIO_SCR_START		40
+-#elif defined(CONFIGM5441x)
++#elif defined(CONFIG_M5441x)
+ /* The m5441x EPORT doesn't have its own GPIO port, uses PORT C */
+ #define MCFGPIO_SCR_START		0
+ #else
+diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
+index e28eb1c0e0bfb3..dbf88059e47a4d 100644
+--- a/arch/m68k/include/asm/mvme147hw.h
++++ b/arch/m68k/include/asm/mvme147hw.h
+@@ -93,8 +93,8 @@ struct pcc_regs {
+ #define M147_SCC_B_ADDR		0xfffe3000
+ #define M147_SCC_PCLK		5000000
+ 
+-#define MVME147_IRQ_SCSI_PORT	(IRQ_USER+0x45)
+-#define MVME147_IRQ_SCSI_DMA	(IRQ_USER+0x46)
++#define MVME147_IRQ_SCSI_PORT	(IRQ_USER + 5)
++#define MVME147_IRQ_SCSI_DMA	(IRQ_USER + 6)
+ 
+ /* SCC interrupts, for MVME147 */
+ 
+diff --git a/arch/m68k/kernel/early_printk.c b/arch/m68k/kernel/early_printk.c
+index 3cc944df04f65e..f11ef9f1f56fcf 100644
+--- a/arch/m68k/kernel/early_printk.c
++++ b/arch/m68k/kernel/early_printk.c
+@@ -13,6 +13,7 @@
+ #include <asm/setup.h>
+ 
+ 
++#include "../mvme147/mvme147.h"
+ #include "../mvme16x/mvme16x.h"
+ 
+ asmlinkage void __init debug_cons_nputs(const char *s, unsigned n);
+@@ -22,7 +23,9 @@ static void __ref debug_cons_write(struct console *c,
+ {
+ #if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
+       defined(CONFIG_COLDFIRE))
+-	if (MACH_IS_MVME16x)
++	if (MACH_IS_MVME147)
++		mvme147_scc_write(c, s, n);
++	else if (MACH_IS_MVME16x)
+ 		mvme16x_cons_write(c, s, n);
+ 	else
+ 		debug_cons_nputs(s, n);
+diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
+index 8b5dc07f0811f2..cc2fb0a83cf0b4 100644
+--- a/arch/m68k/mvme147/config.c
++++ b/arch/m68k/mvme147/config.c
+@@ -32,6 +32,7 @@
+ #include <asm/mvme147hw.h>
+ #include <asm/config.h>
+ 
++#include "mvme147.h"
+ 
+ static void mvme147_get_model(char *model);
+ extern void mvme147_sched_init(void);
+@@ -185,3 +186,32 @@ int mvme147_hwclk(int op, struct rtc_time *t)
+ 	}
+ 	return 0;
+ }
++
++static void scc_delay(void)
++{
++	__asm__ __volatile__ ("nop; nop;");
++}
++
++static void scc_write(char ch)
++{
++	do {
++		scc_delay();
++	} while (!(in_8(M147_SCC_A_ADDR) & BIT(2)));
++	scc_delay();
++	out_8(M147_SCC_A_ADDR, 8);
++	scc_delay();
++	out_8(M147_SCC_A_ADDR, ch);
++}
++
++void mvme147_scc_write(struct console *co, const char *str, unsigned int count)
++{
++	unsigned long flags;
++
++	local_irq_save(flags);
++	while (count--)	{
++		if (*str == '\n')
++			scc_write('\r');
++		scc_write(*str++);
++	}
++	local_irq_restore(flags);
++}
+diff --git a/arch/m68k/mvme147/mvme147.h b/arch/m68k/mvme147/mvme147.h
+new file mode 100644
+index 00000000000000..140bc98b0102aa
+--- /dev/null
++++ b/arch/m68k/mvme147/mvme147.h
+@@ -0,0 +1,6 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++struct console;
++
++/* config.c */
++void mvme147_scc_write(struct console *co, const char *str, unsigned int count);
+diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
+index c892e173ec990b..a8553f54152b76 100644
+--- a/arch/microblaze/kernel/microblaze_ksyms.c
++++ b/arch/microblaze/kernel/microblaze_ksyms.c
+@@ -16,6 +16,7 @@
+ #include <asm/page.h>
+ #include <linux/ftrace.h>
+ #include <linux/uaccess.h>
++#include <asm/xilinx_mb_manager.h>
+ 
+ #ifdef CONFIG_FUNCTION_TRACER
+ extern void _mcount(void);
+@@ -46,3 +47,12 @@ extern void __udivsi3(void);
+ EXPORT_SYMBOL(__udivsi3);
+ extern void __umodsi3(void);
+ EXPORT_SYMBOL(__umodsi3);
++
++#ifdef CONFIG_MB_MANAGER
++extern void xmb_manager_register(uintptr_t phys_baseaddr, u32 cr_val,
++				 void (*callback)(void *data),
++				 void *priv, void (*reset_callback)(void *data));
++EXPORT_SYMBOL(xmb_manager_register);
++extern asmlinkage void xmb_inject_err(void);
++EXPORT_SYMBOL(xmb_inject_err);
++#endif
+diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
+index e424c796e297c5..76ac4cfdfb42ce 100644
+--- a/arch/microblaze/kernel/prom.c
++++ b/arch/microblaze/kernel/prom.c
+@@ -18,7 +18,7 @@ void __init early_init_devtree(void *params)
+ {
+ 	pr_debug(" -> early_init_devtree(%p)\n", params);
+ 
+-	early_init_dt_scan(params);
++	early_init_dt_scan(params, __pa(params));
+ 	if (!strlen(boot_command_line))
+ 		strscpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
+ 
+diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
+index a4374b4cb88fd8..d6ccd534402133 100644
+--- a/arch/mips/include/asm/switch_to.h
++++ b/arch/mips/include/asm/switch_to.h
+@@ -97,7 +97,7 @@ do {									\
+ 	}								\
+ } while (0)
+ #else
+-# define __sanitize_fcr31(next)
++# define __sanitize_fcr31(next) do { (void) (next); } while (0)
+ #endif
+ 
+ /*
+diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
+index 6062e6fa589a87..4fd6da0a06c372 100644
+--- a/arch/mips/kernel/prom.c
++++ b/arch/mips/kernel/prom.c
+@@ -41,7 +41,7 @@ char *mips_get_machine_name(void)
+ 
+ void __init __dt_setup_arch(void *bph)
+ {
+-	if (!early_init_dt_scan(bph))
++	if (!early_init_dt_scan(bph, __pa(bph)))
+ 		return;
+ 
+ 	mips_set_machine_name(of_flat_dt_get_machine_name());
+diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
+index 7eeeaf1ff95d26..cda7983e7c18d4 100644
+--- a/arch/mips/kernel/relocate.c
++++ b/arch/mips/kernel/relocate.c
+@@ -337,7 +337,7 @@ void *__init relocate_kernel(void)
+ #if defined(CONFIG_USE_OF)
+ 	/* Deal with the device tree */
+ 	fdt = plat_get_fdt();
+-	early_init_dt_scan(fdt);
++	early_init_dt_scan(fdt, __pa(fdt));
+ 	if (boot_command_line[0]) {
+ 		/* Boot command line was passed in device tree */
+ 		strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
+index 9a8393e6b4a85e..db049249766fc2 100644
+--- a/arch/nios2/kernel/prom.c
++++ b/arch/nios2/kernel/prom.c
+@@ -27,7 +27,7 @@ void __init early_init_devtree(void *params)
+ 	if (be32_to_cpup((__be32 *)CONFIG_NIOS2_DTB_PHYS_ADDR) ==
+ 		 OF_DT_HEADER) {
+ 		params = (void *)CONFIG_NIOS2_DTB_PHYS_ADDR;
+-		early_init_dt_scan(params);
++		early_init_dt_scan(params, __pa(params));
+ 		return;
+ 	}
+ #endif
+@@ -37,5 +37,5 @@ void __init early_init_devtree(void *params)
+ 		params = (void *)__dtb_start;
+ #endif
+ 
+-	early_init_dt_scan(params);
++	early_init_dt_scan(params, __pa(params));
+ }
+diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
+index 69c0258700b28a..3279ef457c573a 100644
+--- a/arch/openrisc/Kconfig
++++ b/arch/openrisc/Kconfig
+@@ -65,6 +65,9 @@ config STACKTRACE_SUPPORT
+ config LOCKDEP_SUPPORT
+ 	def_bool  y
+ 
++config FIX_EARLYCON_MEM
++	def_bool y
++
+ menu "Processor type and features"
+ 
+ choice
+diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h
+index ecdb98a5839f7c..aaa6a26a3e9215 100644
+--- a/arch/openrisc/include/asm/fixmap.h
++++ b/arch/openrisc/include/asm/fixmap.h
+@@ -26,29 +26,18 @@
+ #include <linux/bug.h>
+ #include <asm/page.h>
+ 
+-/*
+- * On OpenRISC we use these special fixed_addresses for doing ioremap
+- * early in the boot process before memory initialization is complete.
+- * This is used, in particular, by the early serial console code.
+- *
+- * It's not really 'fixmap', per se, but fits loosely into the same
+- * paradigm.
+- */
+ enum fixed_addresses {
+-	/*
+-	 * FIX_IOREMAP entries are useful for mapping physical address
+-	 * space before ioremap() is useable, e.g. really early in boot
+-	 * before kmalloc() is working.
+-	 */
+-#define FIX_N_IOREMAPS  32
+-	FIX_IOREMAP_BEGIN,
+-	FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1,
++	FIX_EARLYCON_MEM_BASE,
+ 	__end_of_fixed_addresses
+ };
+ 
+ #define FIXADDR_SIZE		(__end_of_fixed_addresses << PAGE_SHIFT)
+ /* FIXADDR_BOTTOM might be a better name here... */
+ #define FIXADDR_START		(FIXADDR_TOP - FIXADDR_SIZE)
++#define FIXMAP_PAGE_IO		PAGE_KERNEL_NOCACHE
++
++extern void __set_fixmap(enum fixed_addresses idx,
++			 phys_addr_t phys, pgprot_t flags);
+ 
+ #include <asm-generic/fixmap.h>
+ 
+diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
+index 19e6008bf114c6..e424e9bd12a793 100644
+--- a/arch/openrisc/kernel/prom.c
++++ b/arch/openrisc/kernel/prom.c
+@@ -22,6 +22,6 @@
+ 
+ void __init early_init_devtree(void *params)
+ {
+-	early_init_dt_scan(params);
++	early_init_dt_scan(params, __pa(params));
+ 	memblock_allow_resize();
+ }
+diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
+index 1dcd78c8f0e99b..d0cb1a0126f95d 100644
+--- a/arch/openrisc/mm/init.c
++++ b/arch/openrisc/mm/init.c
+@@ -207,6 +207,43 @@ void __init mem_init(void)
+ 	return;
+ }
+ 
++static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
++{
++	p4d_t *p4d;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	p4d = p4d_offset(pgd_offset_k(va), va);
++	pud = pud_offset(p4d, va);
++	pmd = pmd_offset(pud, va);
++	pte = pte_alloc_kernel(pmd, va);
++
++	if (pte == NULL)
++		return -ENOMEM;
++
++	if (pgprot_val(prot))
++		set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot));
++	else
++		pte_clear(&init_mm, va, pte);
++
++	local_flush_tlb_page(NULL, va);
++	return 0;
++}
++
++void __init __set_fixmap(enum fixed_addresses idx,
++			 phys_addr_t phys, pgprot_t prot)
++{
++	unsigned long address = __fix_to_virt(idx);
++
++	if (idx >= __end_of_fixed_addresses) {
++		BUG();
++		return;
++	}
++
++	map_page(address, phys, prot);
++}
++
+ static const pgprot_t protection_map[16] = {
+ 	[VM_NONE]					= PAGE_NONE,
+ 	[VM_READ]					= PAGE_READONLY_X,
+diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
+index c91f9c2e61ed25..f8d08eab7db8b0 100644
+--- a/arch/parisc/kernel/ftrace.c
++++ b/arch/parisc/kernel/ftrace.c
+@@ -87,7 +87,7 @@ int ftrace_enable_ftrace_graph_caller(void)
+ 
+ int ftrace_disable_ftrace_graph_caller(void)
+ {
+-	static_key_enable(&ftrace_graph_enable.key);
++	static_key_disable(&ftrace_graph_enable.key);
+ 	return 0;
+ }
+ #endif
+diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h
+index d6f43d149f8dcb..a5c21bc623cb00 100644
+--- a/arch/powerpc/include/asm/dtl.h
++++ b/arch/powerpc/include/asm/dtl.h
+@@ -1,8 +1,8 @@
+ #ifndef _ASM_POWERPC_DTL_H
+ #define _ASM_POWERPC_DTL_H
+ 
++#include <linux/rwsem.h>
+ #include <asm/lppaca.h>
+-#include <linux/spinlock_types.h>
+ 
+ /*
+  * Layout of entries in the hypervisor's dispatch trace log buffer.
+@@ -35,7 +35,7 @@ struct dtl_entry {
+ #define DTL_LOG_ALL		(DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
+ 
+ extern struct kmem_cache *dtl_cache;
+-extern rwlock_t dtl_access_lock;
++extern struct rw_semaphore dtl_access_lock;
+ 
+ extern void register_dtl_buffer(int cpu);
+ extern void alloc_dtl_buffers(unsigned long *time_limit);
+diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
+index ef40c9b6972a6e..a48f54dde4f656 100644
+--- a/arch/powerpc/include/asm/fadump.h
++++ b/arch/powerpc/include/asm/fadump.h
+@@ -19,6 +19,7 @@ extern int is_fadump_active(void);
+ extern int should_fadump_crash(void);
+ extern void crash_fadump(struct pt_regs *, const char *);
+ extern void fadump_cleanup(void);
++void fadump_setup_param_area(void);
+ extern void fadump_append_bootargs(void);
+ 
+ #else	/* CONFIG_FA_DUMP */
+@@ -26,6 +27,7 @@ static inline int is_fadump_active(void) { return 0; }
+ static inline int should_fadump_crash(void) { return 0; }
+ static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
+ static inline void fadump_cleanup(void) { }
++static inline void fadump_setup_param_area(void) { }
+ static inline void fadump_append_bootargs(void) { }
+ #endif /* !CONFIG_FA_DUMP */
+ 
+@@ -34,4 +36,11 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
+ 				      int depth, void *data);
+ extern int fadump_reserve_mem(void);
+ #endif
++
++#if defined(CONFIG_FA_DUMP) && defined(CONFIG_CMA)
++void fadump_cma_init(void);
++#else
++static inline void fadump_cma_init(void) { }
++#endif
++
+ #endif /* _ASM_POWERPC_FADUMP_H */
+diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
+index 2ef9a5f4e5d14c..11065313d4c123 100644
+--- a/arch/powerpc/include/asm/kvm_book3s_64.h
++++ b/arch/powerpc/include/asm/kvm_book3s_64.h
+@@ -684,8 +684,8 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
+ int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
+ int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa);
+ 
+-int kmvhv_counters_tracepoint_regfunc(void);
+-void kmvhv_counters_tracepoint_unregfunc(void);
++int kvmhv_counters_tracepoint_regfunc(void);
++void kvmhv_counters_tracepoint_unregfunc(void);
+ int kvmhv_get_l2_counters_status(void);
+ void kvmhv_set_l2_counters_status(int cpu, bool status);
+ 
+diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
+index 50950deedb8734..e3d0e714ff280e 100644
+--- a/arch/powerpc/include/asm/sstep.h
++++ b/arch/powerpc/include/asm/sstep.h
+@@ -173,9 +173,4 @@ int emulate_step(struct pt_regs *regs, ppc_inst_t instr);
+  */
+ extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
+ 
+-extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
+-			     const void *mem, bool cross_endian);
+-extern void emulate_vsx_store(struct instruction_op *op,
+-			      const union vsx_reg *reg, void *mem,
+-			      bool cross_endian);
+ extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
+diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
+index 7650b6ce14c85a..8d972bc98b55fe 100644
+--- a/arch/powerpc/include/asm/vdso.h
++++ b/arch/powerpc/include/asm/vdso.h
+@@ -25,6 +25,7 @@ int vdso_getcpu_init(void);
+ #ifdef __VDSO64__
+ #define V_FUNCTION_BEGIN(name)		\
+ 	.globl name;			\
++	.type name,@function; 		\
+ 	name:				\
+ 
+ #define V_FUNCTION_END(name)		\
+diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
+index af4263594eb2c9..1bee15c013e75f 100644
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -867,7 +867,7 @@ bool __init dt_cpu_ftrs_init(void *fdt)
+ 	using_dt_cpu_ftrs = false;
+ 
+ 	/* Setup and verify the FDT, if it fails we just bail */
+-	if (!early_init_dt_verify(fdt))
++	if (!early_init_dt_verify(fdt, __pa(fdt)))
+ 		return false;
+ 
+ 	if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
+diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
+index a612e7513a4f8a..4641de75f7fc1e 100644
+--- a/arch/powerpc/kernel/fadump.c
++++ b/arch/powerpc/kernel/fadump.c
+@@ -78,27 +78,23 @@ static struct cma *fadump_cma;
+  * But for some reason even if it fails we still have the memory reservation
+  * with us and we can still continue doing fadump.
+  */
+-static int __init fadump_cma_init(void)
++void __init fadump_cma_init(void)
+ {
+ 	unsigned long long base, size;
+ 	int rc;
+ 
+-	if (!fw_dump.fadump_enabled)
+-		return 0;
+-
++	if (!fw_dump.fadump_supported || !fw_dump.fadump_enabled ||
++			fw_dump.dump_active)
++		return;
+ 	/*
+ 	 * Do not use CMA if user has provided fadump=nocma kernel parameter.
+-	 * Return 1 to continue with fadump old behaviour.
+ 	 */
+-	if (fw_dump.nocma)
+-		return 1;
++	if (fw_dump.nocma || !fw_dump.boot_memory_size)
++		return;
+ 
+ 	base = fw_dump.reserve_dump_area_start;
+ 	size = fw_dump.boot_memory_size;
+ 
+-	if (!size)
+-		return 0;
+-
+ 	rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
+ 	if (rc) {
+ 		pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc);
+@@ -108,7 +104,7 @@ static int __init fadump_cma_init(void)
+ 		 * blocked from production system usage.  Hence return 1,
+ 		 * so that we can continue with fadump.
+ 		 */
+-		return 1;
++		return;
+ 	}
+ 
+ 	/*
+@@ -125,10 +121,7 @@ static int __init fadump_cma_init(void)
+ 		cma_get_size(fadump_cma),
+ 		(unsigned long)cma_get_base(fadump_cma) >> 20,
+ 		fw_dump.reserve_dump_area_size);
+-	return 1;
+ }
+-#else
+-static int __init fadump_cma_init(void) { return 1; }
+ #endif /* CONFIG_CMA */
+ 
+ /*
+@@ -143,7 +136,7 @@ void __init fadump_append_bootargs(void)
+ 	if (!fw_dump.dump_active || !fw_dump.param_area_supported || !fw_dump.param_area)
+ 		return;
+ 
+-	if (fw_dump.param_area >= fw_dump.boot_mem_top) {
++	if (fw_dump.param_area < fw_dump.boot_mem_top) {
+ 		if (memblock_reserve(fw_dump.param_area, COMMAND_LINE_SIZE)) {
+ 			pr_warn("WARNING: Can't use additional parameters area!\n");
+ 			fw_dump.param_area = 0;
+@@ -637,8 +630,6 @@ int __init fadump_reserve_mem(void)
+ 
+ 		pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
+ 			(size >> 20), base, (memblock_phys_mem_size() >> 20));
+-
+-		ret = fadump_cma_init();
+ 	}
+ 
+ 	return ret;
+@@ -1586,6 +1577,12 @@ static void __init fadump_init_files(void)
+ 		return;
+ 	}
+ 
++	if (fw_dump.param_area) {
++		rc = sysfs_create_file(fadump_kobj, &bootargs_append_attr.attr);
++		if (rc)
++			pr_err("unable to create bootargs_append sysfs file (%d)\n", rc);
++	}
++
+ 	debugfs_create_file("fadump_region", 0444, arch_debugfs_dir, NULL,
+ 			    &fadump_region_fops);
+ 
+@@ -1740,7 +1737,7 @@ static void __init fadump_process(void)
+  * Reserve memory to store additional parameters to be passed
+  * for fadump/capture kernel.
+  */
+-static void __init fadump_setup_param_area(void)
++void __init fadump_setup_param_area(void)
+ {
+ 	phys_addr_t range_start, range_end;
+ 
+@@ -1748,7 +1745,7 @@ static void __init fadump_setup_param_area(void)
+ 		return;
+ 
+ 	/* This memory can't be used by PFW or bootloader as it is shared across kernels */
+-	if (radix_enabled()) {
++	if (early_radix_enabled()) {
+ 		/*
+ 		 * Anywhere in the upper half should be good enough as all memory
+ 		 * is accessible in real mode.
+@@ -1776,12 +1773,12 @@ static void __init fadump_setup_param_area(void)
+ 						       COMMAND_LINE_SIZE,
+ 						       range_start,
+ 						       range_end);
+-	if (!fw_dump.param_area || sysfs_create_file(fadump_kobj, &bootargs_append_attr.attr)) {
++	if (!fw_dump.param_area) {
+ 		pr_warn("WARNING: Could not setup area to pass additional parameters!\n");
+ 		return;
+ 	}
+ 
+-	memset(phys_to_virt(fw_dump.param_area), 0, COMMAND_LINE_SIZE);
++	memset((void *)fw_dump.param_area, 0, COMMAND_LINE_SIZE);
+ }
+ 
+ /*
+@@ -1807,7 +1804,6 @@ int __init setup_fadump(void)
+ 	}
+ 	/* Initialize the kernel dump memory structure and register with f/w */
+ 	else if (fw_dump.reserve_dump_area_size) {
+-		fadump_setup_param_area();
+ 		fw_dump.ops->fadump_init_mem_struct(&fw_dump);
+ 		register_fadump();
+ 	}
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 0be07ed407c703..e0059842a1c64b 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -791,7 +791,7 @@ void __init early_init_devtree(void *params)
+ 	DBG(" -> early_init_devtree(%px)\n", params);
+ 
+ 	/* Too early to BUG_ON(), do it by hand */
+-	if (!early_init_dt_verify(params))
++	if (!early_init_dt_verify(params, __pa(params)))
+ 		panic("BUG: Failed verifying flat device tree, bad version?");
+ 
+ 	of_scan_flat_dt(early_init_dt_scan_model, NULL);
+@@ -908,6 +908,9 @@ void __init early_init_devtree(void *params)
+ 
+ 	mmu_early_init_devtree();
+ 
++	/* Setup param area for passing additional parameters to fadump capture kernel. */
++	fadump_setup_param_area();
++
+ #ifdef CONFIG_PPC_POWERNV
+ 	/* Scan and build the list of machine check recoverable ranges */
+ 	of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 943430077375a4..b6b01502e50472 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -997,9 +997,11 @@ void __init setup_arch(char **cmdline_p)
+ 	initmem_init();
+ 
+ 	/*
+-	 * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
+-	 * be called after initmem_init(), so that pageblock_order is initialised.
++	 * Reserve large chunks of memory for use by CMA for fadump, KVM and
++	 * hugetlb. These must be called after initmem_init(), so that
++	 * pageblock_order is initialised.
+ 	 */
++	fadump_cma_init();
+ 	kvm_cma_reserve();
+ 	gigantic_hugetlb_cma_reserve();
+ 
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 22f83fbbc762ac..1edc7cd68c10d0 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -920,6 +920,7 @@ static int __init disable_hardlockup_detector(void)
+ 	hardlockup_detector_disable();
+ #else
+ 	if (firmware_has_feature(FW_FEATURE_LPAR)) {
++		check_kvm_guest();
+ 		if (is_kvm_guest())
+ 			hardlockup_detector_disable();
+ 	}
+diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
+index 9738adabeb1fee..dc65c139115772 100644
+--- a/arch/powerpc/kexec/file_load_64.c
++++ b/arch/powerpc/kexec/file_load_64.c
+@@ -736,13 +736,18 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
+ 	if (dn) {
+ 		u64 val;
+ 
+-		of_property_read_u64(dn, "opal-base-address", &val);
++		ret = of_property_read_u64(dn, "opal-base-address", &val);
++		if (ret)
++			goto out;
++
+ 		ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
+ 						     sizeof(val), false);
+ 		if (ret)
+ 			goto out;
+ 
+-		of_property_read_u64(dn, "opal-entry-address", &val);
++		ret = of_property_read_u64(dn, "opal-entry-address", &val);
++		if (ret)
++			goto out;
+ 		ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
+ 						     sizeof(val), false);
+ 	}
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index ad8dc4ccdaab9e..57b6c1ba84d47e 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -4154,7 +4154,7 @@ void kvmhv_set_l2_counters_status(int cpu, bool status)
+ 		lppaca_of(cpu).l2_counters_enable = 0;
+ }
+ 
+-int kmvhv_counters_tracepoint_regfunc(void)
++int kvmhv_counters_tracepoint_regfunc(void)
+ {
+ 	int cpu;
+ 
+@@ -4164,7 +4164,7 @@ int kmvhv_counters_tracepoint_regfunc(void)
+ 	return 0;
+ }
+ 
+-void kmvhv_counters_tracepoint_unregfunc(void)
++void kvmhv_counters_tracepoint_unregfunc(void)
+ {
+ 	int cpu;
+ 
+@@ -4309,6 +4309,15 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
+ 	}
+ 	hvregs.hdec_expiry = time_limit;
+ 
++	/*
++	 * hvregs has the doorbell status, so zero it here which
++	 * enables us to receive doorbells when H_ENTER_NESTED is
++	 * in progress for this vCPU
++	 */
++
++	if (vcpu->arch.doorbell_request)
++		vcpu->arch.doorbell_request = 0;
++
+ 	/*
+ 	 * When setting DEC, we must always deal with irq_work_raise
+ 	 * via NMI vs setting DEC. The problem occurs right as we
+@@ -4912,7 +4921,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
+ 			lpcr &= ~LPCR_MER;
+ 		}
+ 	} else if (vcpu->arch.pending_exceptions ||
+-		   vcpu->arch.doorbell_request ||
+ 		   xive_interrupt_pending(vcpu)) {
+ 		vcpu->arch.ret = RESUME_HOST;
+ 		goto out;
+diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
+index 05f5220960c63b..125440a606ee3b 100644
+--- a/arch/powerpc/kvm/book3s_hv_nested.c
++++ b/arch/powerpc/kvm/book3s_hv_nested.c
+@@ -32,7 +32,7 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+ 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ 
+ 	hr->pcr = vc->pcr | PCR_MASK;
+-	hr->dpdes = vc->dpdes;
++	hr->dpdes = vcpu->arch.doorbell_request;
+ 	hr->hfscr = vcpu->arch.hfscr;
+ 	hr->tb_offset = vc->tb_offset;
+ 	hr->dawr0 = vcpu->arch.dawr0;
+@@ -105,7 +105,7 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu,
+ {
+ 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ 
+-	hr->dpdes = vc->dpdes;
++	hr->dpdes = vcpu->arch.doorbell_request;
+ 	hr->purr = vcpu->arch.purr;
+ 	hr->spurr = vcpu->arch.spurr;
+ 	hr->ic = vcpu->arch.ic;
+@@ -143,7 +143,7 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *
+ 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ 
+ 	vc->pcr = hr->pcr | PCR_MASK;
+-	vc->dpdes = hr->dpdes;
++	vcpu->arch.doorbell_request = hr->dpdes;
+ 	vcpu->arch.hfscr = hr->hfscr;
+ 	vcpu->arch.dawr0 = hr->dawr0;
+ 	vcpu->arch.dawrx0 = hr->dawrx0;
+@@ -170,7 +170,13 @@ void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
+ {
+ 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ 
+-	vc->dpdes = hr->dpdes;
++	/*
++	 * This L2 vCPU might have received a doorbell while H_ENTER_NESTED was being handled.
++	 * Make sure we preserve the doorbell if it was either:
++	 *   a) Sent after H_ENTER_NESTED was called on this vCPU (arch.doorbell_request would be 1)
++	 *   b) Doorbell was not handled and L2 exited for some other reason (hr->dpdes would be 1)
++	 */
++	vcpu->arch.doorbell_request = vcpu->arch.doorbell_request | hr->dpdes;
+ 	vcpu->arch.hfscr = hr->hfscr;
+ 	vcpu->arch.purr = hr->purr;
+ 	vcpu->arch.spurr = hr->spurr;
+diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
+index 77ebc724e6cdf4..35fccaa575cc15 100644
+--- a/arch/powerpc/kvm/trace_hv.h
++++ b/arch/powerpc/kvm/trace_hv.h
+@@ -538,7 +538,7 @@ TRACE_EVENT_FN_COND(kvmppc_vcpu_stats,
+ 	TP_printk("VCPU %d: l1_to_l2_cs_time=%llu ns l2_to_l1_cs_time=%llu ns l2_runtime=%llu ns",
+ 		__entry->vcpu_id,  __entry->l1_to_l2_cs,
+ 		__entry->l2_to_l1_cs, __entry->l2_runtime),
+-	kmvhv_counters_tracepoint_regfunc, kmvhv_counters_tracepoint_unregfunc
++	kvmhv_counters_tracepoint_regfunc, kvmhv_counters_tracepoint_unregfunc
+ );
+ #endif
+ #endif /* _TRACE_KVM_HV_H */
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index e65f3fb68d06ba..ac3ee19531d8ac 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -780,8 +780,8 @@ static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
+ #endif /* __powerpc64 */
+ 
+ #ifdef CONFIG_VSX
+-void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
+-		      const void *mem, bool rev)
++static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
++					     const void *mem, bool rev)
+ {
+ 	int size, read_size;
+ 	int i, j;
+@@ -863,11 +863,9 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
+ 		break;
+ 	}
+ }
+-EXPORT_SYMBOL_GPL(emulate_vsx_load);
+-NOKPROBE_SYMBOL(emulate_vsx_load);
+ 
+-void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
+-		       void *mem, bool rev)
++static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
++					      void *mem, bool rev)
+ {
+ 	int size, write_size;
+ 	int i, j;
+@@ -955,8 +953,6 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
+ 		break;
+ 	}
+ }
+-EXPORT_SYMBOL_GPL(emulate_vsx_store);
+-NOKPROBE_SYMBOL(emulate_vsx_store);
+ 
+ static nokprobe_inline int do_vsx_load(struct instruction_op *op,
+ 				       unsigned long ea, struct pt_regs *regs,
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 81c77ddce2e30a..c156fe0d53c378 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -439,10 +439,16 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
+ 	/*
+ 	 * The kernel should never take an execute fault nor should it
+ 	 * take a page fault to a kernel address or a page fault to a user
+-	 * address outside of dedicated places
++	 * address outside of dedicated places.
++	 *
++	 * Rather than kfence directly reporting false negatives, search whether
++	 * the NIP belongs to the fixup table for cases where fault could come
++	 * from functions like copy_from_kernel_nofault().
+ 	 */
+ 	if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
+-		if (kfence_handle_page_fault(address, is_write, regs))
++		if (is_kfence_address((void *)address) &&
++		    !search_exception_tables(instruction_pointer(regs)) &&
++		    kfence_handle_page_fault(address, is_write, regs))
+ 			return 0;
+ 
+ 		return SIGSEGV;
+diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
+index 8cb9d36ea49159..f293588b8c7b51 100644
+--- a/arch/powerpc/platforms/pseries/dtl.c
++++ b/arch/powerpc/platforms/pseries/dtl.c
+@@ -191,7 +191,7 @@ static int dtl_enable(struct dtl *dtl)
+ 		return -EBUSY;
+ 
+ 	/* ensure there are no other conflicting dtl users */
+-	if (!read_trylock(&dtl_access_lock))
++	if (!down_read_trylock(&dtl_access_lock))
+ 		return -EBUSY;
+ 
+ 	n_entries = dtl_buf_entries;
+@@ -199,7 +199,7 @@ static int dtl_enable(struct dtl *dtl)
+ 	if (!buf) {
+ 		printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
+ 				__func__, dtl->cpu);
+-		read_unlock(&dtl_access_lock);
++		up_read(&dtl_access_lock);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -217,7 +217,7 @@ static int dtl_enable(struct dtl *dtl)
+ 	spin_unlock(&dtl->lock);
+ 
+ 	if (rc) {
+-		read_unlock(&dtl_access_lock);
++		up_read(&dtl_access_lock);
+ 		kmem_cache_free(dtl_cache, buf);
+ 	}
+ 
+@@ -232,7 +232,7 @@ static void dtl_disable(struct dtl *dtl)
+ 	dtl->buf = NULL;
+ 	dtl->buf_entries = 0;
+ 	spin_unlock(&dtl->lock);
+-	read_unlock(&dtl_access_lock);
++	up_read(&dtl_access_lock);
+ }
+ 
+ /* file interface */
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index c1d8bee8f7018c..bb09990eec309a 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -169,7 +169,7 @@ struct vcpu_dispatch_data {
+  */
+ #define NR_CPUS_H	NR_CPUS
+ 
+-DEFINE_RWLOCK(dtl_access_lock);
++DECLARE_RWSEM(dtl_access_lock);
+ static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data);
+ static DEFINE_PER_CPU(u64, dtl_entry_ridx);
+ static DEFINE_PER_CPU(struct dtl_worker, dtl_workers);
+@@ -463,7 +463,7 @@ static int dtl_worker_enable(unsigned long *time_limit)
+ {
+ 	int rc = 0, state;
+ 
+-	if (!write_trylock(&dtl_access_lock)) {
++	if (!down_write_trylock(&dtl_access_lock)) {
+ 		rc = -EBUSY;
+ 		goto out;
+ 	}
+@@ -479,7 +479,7 @@ static int dtl_worker_enable(unsigned long *time_limit)
+ 		pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
+ 		free_dtl_buffers(time_limit);
+ 		reset_global_dtl_mask();
+-		write_unlock(&dtl_access_lock);
++		up_write(&dtl_access_lock);
+ 		rc = -EINVAL;
+ 		goto out;
+ 	}
+@@ -494,7 +494,7 @@ static void dtl_worker_disable(unsigned long *time_limit)
+ 	cpuhp_remove_state(dtl_worker_state);
+ 	free_dtl_buffers(time_limit);
+ 	reset_global_dtl_mask();
+-	write_unlock(&dtl_access_lock);
++	up_write(&dtl_access_lock);
+ }
+ 
+ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
+diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
+index 4a595493d28ae3..b1667ed05f9882 100644
+--- a/arch/powerpc/platforms/pseries/plpks.c
++++ b/arch/powerpc/platforms/pseries/plpks.c
+@@ -683,7 +683,7 @@ void __init plpks_early_init_devtree(void)
+ out:
+ 	fdt_nop_property(fdt, chosen_node, "ibm,plpks-pw");
+ 	// Since we've cleared the password, we must update the FDT checksum
+-	early_init_dt_verify(fdt);
++	early_init_dt_verify(fdt, __pa(fdt));
+ }
+ 
+ static __init int pseries_plpks_init(void)
+diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
+index 45f9c1171a486a..dfa5cdddd3671b 100644
+--- a/arch/riscv/include/asm/cpufeature.h
++++ b/arch/riscv/include/asm/cpufeature.h
+@@ -8,6 +8,7 @@
+ 
+ #include <linux/bitmap.h>
+ #include <linux/jump_label.h>
++#include <linux/workqueue.h>
+ #include <asm/hwcap.h>
+ #include <asm/alternative-macros.h>
+ #include <asm/errno.h>
+@@ -60,6 +61,7 @@ void riscv_user_isa_enable(void);
+ 
+ #if defined(CONFIG_RISCV_MISALIGNED)
+ bool check_unaligned_access_emulated_all_cpus(void);
++void check_unaligned_access_emulated(struct work_struct *work __always_unused);
+ void unaligned_emulation_finish(void);
+ bool unaligned_ctl_available(void);
+ DECLARE_PER_CPU(long, misaligned_access_speed);
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index a2cde65b69e950..26c886db4fb3d1 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -227,7 +227,7 @@ static void __init init_resources(void)
+ static void __init parse_dtb(void)
+ {
+ 	/* Early scan of device tree from init memory */
+-	if (early_init_dt_scan(dtb_early_va)) {
++	if (early_init_dt_scan(dtb_early_va, __pa(dtb_early_va))) {
+ 		const char *name = of_flat_dt_get_machine_name();
+ 
+ 		if (name) {
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index 1b9867136b6100..9a80a12f6b48f2 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -524,11 +524,11 @@ int handle_misaligned_store(struct pt_regs *regs)
+ 	return 0;
+ }
+ 
+-static bool check_unaligned_access_emulated(int cpu)
++void check_unaligned_access_emulated(struct work_struct *work __always_unused)
+ {
++	int cpu = smp_processor_id();
+ 	long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
+ 	unsigned long tmp_var, tmp_val;
+-	bool misaligned_emu_detected;
+ 
+ 	*mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
+ 
+@@ -536,19 +536,16 @@ static bool check_unaligned_access_emulated(int cpu)
+ 		"       "REG_L" %[tmp], 1(%[ptr])\n"
+ 		: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
+ 
+-	misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED);
+ 	/*
+ 	 * If unaligned_ctl is already set, this means that we detected that all
+ 	 * CPUS uses emulated misaligned access at boot time. If that changed
+ 	 * when hotplugging the new cpu, this is something we don't handle.
+ 	 */
+-	if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
++	if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
+ 		pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
+ 		while (true)
+ 			cpu_relax();
+ 	}
+-
+-	return misaligned_emu_detected;
+ }
+ 
+ bool check_unaligned_access_emulated_all_cpus(void)
+@@ -560,8 +557,11 @@ bool check_unaligned_access_emulated_all_cpus(void)
+ 	 * accesses emulated since tasks requesting such control can run on any
+ 	 * CPU.
+ 	 */
++	schedule_on_each_cpu(check_unaligned_access_emulated);
++
+ 	for_each_online_cpu(cpu)
+-		if (!check_unaligned_access_emulated(cpu))
++		if (per_cpu(misaligned_access_speed, cpu)
++		    != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
+ 			return false;
+ 
+ 	unaligned_ctl = true;
+diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
+index 160628a2116de4..f3508cc54f91ae 100644
+--- a/arch/riscv/kernel/unaligned_access_speed.c
++++ b/arch/riscv/kernel/unaligned_access_speed.c
+@@ -191,6 +191,7 @@ static int riscv_online_cpu(unsigned int cpu)
+ 	if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
+ 		goto exit;
+ 
++	check_unaligned_access_emulated(NULL);
+ 	buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ 	if (!buf) {
+ 		pr_warn("Allocation failure, not measuring misaligned performance\n");
+diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c
+index da6ff1bade0df5..f59d1c0c8c43a7 100644
+--- a/arch/riscv/kvm/aia_aplic.c
++++ b/arch/riscv/kvm/aia_aplic.c
+@@ -143,7 +143,7 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
+ 	if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
+ 	    sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
+ 		if (!pending)
+-			goto skip_write_pending;
++			goto noskip_write_pending;
+ 		if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
+ 		    sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
+ 			goto skip_write_pending;
+@@ -152,6 +152,7 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
+ 			goto skip_write_pending;
+ 	}
+ 
++noskip_write_pending:
+ 	if (pending)
+ 		irqd->state |= APLIC_IRQ_STATE_PENDING;
+ 	else
+diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
+index 7de128be8db9bc..6e704ed86a83a9 100644
+--- a/arch/riscv/kvm/vcpu_sbi.c
++++ b/arch/riscv/kvm/vcpu_sbi.c
+@@ -486,19 +486,22 @@ void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
+ 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+ 	const struct kvm_riscv_sbi_extension_entry *entry;
+ 	const struct kvm_vcpu_sbi_extension *ext;
+-	int i;
++	int idx, i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
+ 		entry = &sbi_ext[i];
+ 		ext = entry->ext_ptr;
++		idx = entry->ext_idx;
++
++		if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
++			continue;
+ 
+ 		if (ext->probe && !ext->probe(vcpu)) {
+-			scontext->ext_status[entry->ext_idx] =
+-				KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
++			scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
+ 			continue;
+ 		}
+ 
+-		scontext->ext_status[entry->ext_idx] = ext->default_disabled ?
++		scontext->ext_status[idx] = ext->default_disabled ?
+ 					KVM_RISCV_SBI_EXT_STATUS_DISABLED :
+ 					KVM_RISCV_SBI_EXT_STATUS_ENABLED;
+ 	}
+diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
+index 715bcf8fb69a51..5f5b1aa6c23312 100644
+--- a/arch/s390/include/asm/facility.h
++++ b/arch/s390/include/asm/facility.h
+@@ -88,7 +88,7 @@ static __always_inline bool test_facility(unsigned long nr)
+ 	return __test_facility(nr, &stfle_fac_list);
+ }
+ 
+-static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
++static inline unsigned long __stfle_asm(u64 *fac_list, int size)
+ {
+ 	unsigned long reg0 = size - 1;
+ 
+@@ -96,7 +96,7 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
+ 		"	lgr	0,%[reg0]\n"
+ 		"	.insn	s,0xb2b00000,%[list]\n" /* stfle */
+ 		"	lgr	%[reg0],0\n"
+-		: [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list)
++		: [reg0] "+&d" (reg0), [list] "+Q" (*fac_list)
+ 		:
+ 		: "memory", "cc", "0");
+ 	return reg0;
+@@ -104,10 +104,10 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
+ 
+ /**
+  * stfle - Store facility list extended
+- * @stfle_fac_list: array where facility list can be stored
++ * @fac_list: array where facility list can be stored
+  * @size: size of passed in array in double words
+  */
+-static inline void __stfle(u64 *stfle_fac_list, int size)
++static inline void __stfle(u64 *fac_list, int size)
+ {
+ 	unsigned long nr;
+ 	u32 stfl_fac_list;
+@@ -116,20 +116,20 @@ static inline void __stfle(u64 *stfle_fac_list, int size)
+ 		"	stfl	0(0)\n"
+ 		: "=m" (get_lowcore()->stfl_fac_list));
+ 	stfl_fac_list = get_lowcore()->stfl_fac_list;
+-	memcpy(stfle_fac_list, &stfl_fac_list, 4);
++	memcpy(fac_list, &stfl_fac_list, 4);
+ 	nr = 4; /* bytes stored by stfl */
+ 	if (stfl_fac_list & 0x01000000) {
+ 		/* More facility bits available with stfle */
+-		nr = __stfle_asm(stfle_fac_list, size);
++		nr = __stfle_asm(fac_list, size);
+ 		nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
+ 	}
+-	memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
++	memset((char *)fac_list + nr, 0, size * 8 - nr);
+ }
+ 
+-static inline void stfle(u64 *stfle_fac_list, int size)
++static inline void stfle(u64 *fac_list, int size)
+ {
+ 	preempt_disable();
+-	__stfle(stfle_fac_list, size);
++	__stfle(fac_list, size);
+ 	preempt_enable();
+ }
+ 
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index 9d920ced604754..30b20ce9a70033 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -96,7 +96,6 @@ struct zpci_bar_struct {
+ 	u8		size;		/* order 2 exponent */
+ };
+ 
+-struct s390_domain;
+ struct kvm_zdev;
+ 
+ #define ZPCI_FUNCTIONS_PER_BUS 256
+@@ -181,9 +180,10 @@ struct zpci_dev {
+ 	struct dentry	*debugfs_dev;
+ 
+ 	/* IOMMU and passthrough */
+-	struct s390_domain *s390_domain; /* s390 IOMMU domain data */
++	struct iommu_domain *s390_domain; /* attached IOMMU domain */
+ 	struct kvm_zdev *kzdev;
+ 	struct mutex kzdev_lock;
++	spinlock_t dom_lock;		/* protect s390_domain change */
+ };
+ 
+ static inline bool zdev_enabled(struct zpci_dev *zdev)
+diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
+index 06fbabe2f66c98..cb4cc0f59012f7 100644
+--- a/arch/s390/include/asm/set_memory.h
++++ b/arch/s390/include/asm/set_memory.h
+@@ -62,5 +62,6 @@ __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
+ 
+ int set_direct_map_invalid_noflush(struct page *page);
+ int set_direct_map_default_noflush(struct page *page);
++bool kernel_page_present(struct page *page);
+ 
+ #endif
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 5b765e3ccf0cad..3317f4878eaa70 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -759,7 +759,6 @@ static int __hw_perf_event_init(struct perf_event *event)
+ 		reserve_pmc_hardware();
+ 		refcount_set(&num_events, 1);
+ 	}
+-	mutex_unlock(&pmc_reserve_mutex);
+ 	event->destroy = hw_perf_event_destroy;
+ 
+ 	/* Access per-CPU sampling information (query sampling info) */
+@@ -848,6 +847,7 @@ static int __hw_perf_event_init(struct perf_event *event)
+ 		if (is_default_overflow_handler(event))
+ 			event->overflow_handler = cpumsf_output_event_pid;
+ out:
++	mutex_unlock(&pmc_reserve_mutex);
+ 	return err;
+ }
+ 
+diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile
+index 1bb78b9468e8a9..e85c14f9058b92 100644
+--- a/arch/s390/kernel/syscalls/Makefile
++++ b/arch/s390/kernel/syscalls/Makefile
+@@ -12,7 +12,7 @@ kapi-hdrs-y := $(kapi)/unistd_nr.h
+ uapi-hdrs-y := $(uapi)/unistd_32.h
+ uapi-hdrs-y += $(uapi)/unistd_64.h
+ 
+-targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
++targets += $(addprefix ../../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
+ 
+ PHONY += kapi uapi
+ 
+diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
+index 5f805ad42d4c3f..aec9eb16b6f7be 100644
+--- a/arch/s390/mm/pageattr.c
++++ b/arch/s390/mm/pageattr.c
+@@ -406,6 +406,21 @@ int set_direct_map_default_noflush(struct page *page)
+ 	return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
+ }
+ 
++bool kernel_page_present(struct page *page)
++{
++	unsigned long addr;
++	unsigned int cc;
++
++	addr = (unsigned long)page_address(page);
++	asm volatile(
++		"	lra	%[addr],0(%[addr])\n"
++		"	ipm	%[cc]\n"
++		: [cc] "=d" (cc), [addr] "+a" (addr)
++		:
++		: "cc");
++	return (cc >> 28) == 0;
++}
++
+ #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+ 
+ static void ipte_range(pte_t *pte, unsigned long address, int nr)
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index bd9624c20b8020..635fd8f2acbaa2 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -160,6 +160,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
+ 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
+ 	struct zpci_iommu_ctrs *ctrs;
+ 	struct zpci_fib fib = {0};
++	unsigned long flags;
+ 	u8 cc, status;
+ 
+ 	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
+@@ -171,6 +172,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
+ 	WARN_ON((u64) zdev->fmb & 0xf);
+ 
+ 	/* reset software counters */
++	spin_lock_irqsave(&zdev->dom_lock, flags);
+ 	ctrs = zpci_get_iommu_ctrs(zdev);
+ 	if (ctrs) {
+ 		atomic64_set(&ctrs->mapped_pages, 0);
+@@ -179,6 +181,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
+ 		atomic64_set(&ctrs->sync_map_rpcits, 0);
+ 		atomic64_set(&ctrs->sync_rpcits, 0);
+ 	}
++	spin_unlock_irqrestore(&zdev->dom_lock, flags);
+ 
+ 
+ 	fib.fmb_addr = virt_to_phys(zdev->fmb);
+@@ -914,10 +917,8 @@ void zpci_device_reserved(struct zpci_dev *zdev)
+ void zpci_release_device(struct kref *kref)
+ {
+ 	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+-	int ret;
+ 
+-	if (zdev->has_hp_slot)
+-		zpci_exit_slot(zdev);
++	WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED);
+ 
+ 	if (zdev->zbus->bus)
+ 		zpci_bus_remove_device(zdev, false);
+@@ -925,28 +926,14 @@ void zpci_release_device(struct kref *kref)
+ 	if (zdev_enabled(zdev))
+ 		zpci_disable_device(zdev);
+ 
+-	switch (zdev->state) {
+-	case ZPCI_FN_STATE_CONFIGURED:
+-		ret = sclp_pci_deconfigure(zdev->fid);
+-		zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
+-		fallthrough;
+-	case ZPCI_FN_STATE_STANDBY:
+-		if (zdev->has_hp_slot)
+-			zpci_exit_slot(zdev);
+-		spin_lock(&zpci_list_lock);
+-		list_del(&zdev->entry);
+-		spin_unlock(&zpci_list_lock);
+-		zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+-		fallthrough;
+-	case ZPCI_FN_STATE_RESERVED:
+-		if (zdev->has_resources)
+-			zpci_cleanup_bus_resources(zdev);
+-		zpci_bus_device_unregister(zdev);
+-		zpci_destroy_iommu(zdev);
+-		fallthrough;
+-	default:
+-		break;
+-	}
++	if (zdev->has_hp_slot)
++		zpci_exit_slot(zdev);
++
++	if (zdev->has_resources)
++		zpci_cleanup_bus_resources(zdev);
++
++	zpci_bus_device_unregister(zdev);
++	zpci_destroy_iommu(zdev);
+ 	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
+ 	kfree_rcu(zdev, rcu);
+ }
+diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
+index 2cb5043a997d53..38014206c16b96 100644
+--- a/arch/s390/pci/pci_debug.c
++++ b/arch/s390/pci/pci_debug.c
+@@ -71,17 +71,23 @@ static void pci_fmb_show(struct seq_file *m, char *name[], int length,
+ 
+ static void pci_sw_counter_show(struct seq_file *m)
+ {
+-	struct zpci_iommu_ctrs  *ctrs = zpci_get_iommu_ctrs(m->private);
++	struct zpci_dev *zdev = m->private;
++	struct zpci_iommu_ctrs *ctrs;
+ 	atomic64_t *counter;
++	unsigned long flags;
+ 	int i;
+ 
++	spin_lock_irqsave(&zdev->dom_lock, flags);
++	ctrs = zpci_get_iommu_ctrs(m->private);
+ 	if (!ctrs)
+-		return;
++		goto unlock;
+ 
+ 	counter = &ctrs->mapped_pages;
+ 	for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
+ 		seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
+ 			   atomic64_read(counter));
++unlock:
++	spin_unlock_irqrestore(&zdev->dom_lock, flags);
+ }
+ 
+ static int pci_perf_show(struct seq_file *m, void *v)
+diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
+index a306bcd6b34130..5f6d0e827baeb0 100644
+--- a/arch/sh/kernel/cpu/proc.c
++++ b/arch/sh/kernel/cpu/proc.c
+@@ -132,7 +132,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 
+ static void *c_start(struct seq_file *m, loff_t *pos)
+ {
+-	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
++	return *pos < nr_cpu_ids ? cpu_data + *pos : NULL;
+ }
+ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+ {
+diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
+index 620e5cf8ae1e74..f2b6f16a46b85d 100644
+--- a/arch/sh/kernel/setup.c
++++ b/arch/sh/kernel/setup.c
+@@ -255,7 +255,7 @@ void __ref sh_fdt_init(phys_addr_t dt_phys)
+ 	dt_virt = phys_to_virt(dt_phys);
+ #endif
+ 
+-	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
++	if (!dt_virt || !early_init_dt_scan(dt_virt, __pa(dt_virt))) {
+ 		pr_crit("Error: invalid device tree blob"
+ 			" at physical address %p\n", (void *)dt_phys);
+ 
+diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
+index 77c4afb8ab9071..75d04fb4994a06 100644
+--- a/arch/um/drivers/net_kern.c
++++ b/arch/um/drivers/net_kern.c
+@@ -336,7 +336,7 @@ static struct platform_driver uml_net_driver = {
+ 
+ static void net_device_release(struct device *dev)
+ {
+-	struct uml_net *device = dev_get_drvdata(dev);
++	struct uml_net *device = container_of(dev, struct uml_net, pdev.dev);
+ 	struct net_device *netdev = device->dev;
+ 	struct uml_net_private *lp = netdev_priv(netdev);
+ 
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index 7f28ec1929dc0b..2bfb17373244bb 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -779,7 +779,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
+ 
+ static void ubd_device_release(struct device *dev)
+ {
+-	struct ubd *ubd_dev = dev_get_drvdata(dev);
++	struct ubd *ubd_dev = container_of(dev, struct ubd, pdev.dev);
+ 
+ 	blk_mq_free_tag_set(&ubd_dev->tag_set);
+ 	*ubd_dev = ((struct ubd) DEFAULT_UBD);
+@@ -898,6 +898,8 @@ static int ubd_add(int n, char **error_out)
+ 	if (err)
+ 		goto out_cleanup_disk;
+ 
++	ubd_dev->disk = disk;
++
+ 	return 0;
+ 
+ out_cleanup_disk:
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index c992da83268dd8..64c09db392c16a 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -815,7 +815,8 @@ static struct platform_driver uml_net_driver = {
+ 
+ static void vector_device_release(struct device *dev)
+ {
+-	struct vector_device *device = dev_get_drvdata(dev);
++	struct vector_device *device =
++		container_of(dev, struct vector_device, pdev.dev);
+ 	struct net_device *netdev = device->dev;
+ 
+ 	list_del(&device->list);
+diff --git a/arch/um/kernel/dtb.c b/arch/um/kernel/dtb.c
+index 4954188a6a0908..8d78ced9e08f6d 100644
+--- a/arch/um/kernel/dtb.c
++++ b/arch/um/kernel/dtb.c
+@@ -17,7 +17,7 @@ void uml_dtb_init(void)
+ 
+ 	area = uml_load_file(dtb, &size);
+ 	if (area) {
+-		if (!early_init_dt_scan(area)) {
++		if (!early_init_dt_scan(area, __pa(area))) {
+ 			pr_err("invalid DTB %s\n", dtb);
+ 			memblock_free(area, size);
+ 			return;
+diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
+index fb2adfb499452b..ee693e0b2b58bf 100644
+--- a/arch/um/kernel/physmem.c
++++ b/arch/um/kernel/physmem.c
+@@ -81,10 +81,10 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
+ 			  unsigned long len, unsigned long long highmem)
+ {
+ 	unsigned long reserve = reserve_end - start;
+-	long map_size = len - reserve;
++	unsigned long map_size = len - reserve;
+ 	int err;
+ 
+-	if(map_size <= 0) {
++	if (len <= reserve) {
+ 		os_warn("Too few physical memory! Needed=%lu, given=%lu\n",
+ 			reserve, len);
+ 		exit(1);
+@@ -95,7 +95,7 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
+ 	err = os_map_memory((void *) reserve_end, physmem_fd, reserve,
+ 			    map_size, 1, 1, 1);
+ 	if (err < 0) {
+-		os_warn("setup_physmem - mapping %ld bytes of memory at 0x%p "
++		os_warn("setup_physmem - mapping %lu bytes of memory at 0x%p "
+ 			"failed - errno = %d\n", map_size,
+ 			(void *) reserve_end, err);
+ 		exit(1);
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index be2856af6d4c31..9c6cf03ed02b03 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -292,6 +292,6 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
+ {
+ 	int cpu = current_thread_info()->cpu;
+ 
+-	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
++	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu) == 0;
+ }
+ 
+diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
+index 4bb8622dc51226..e3b6a2fd75d996 100644
+--- a/arch/um/kernel/sysrq.c
++++ b/arch/um/kernel/sysrq.c
+@@ -52,5 +52,5 @@ void show_stack(struct task_struct *task, unsigned long *stack,
+ 	}
+ 
+ 	printk("%sCall Trace:\n", loglvl);
+-	dump_trace(current, &stackops, (void *)loglvl);
++	dump_trace(task ?: current, &stackops, (void *)loglvl);
+ }
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index 327c45c5013fea..2f85ed005c42f1 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -78,6 +78,32 @@ static inline void tdcall(u64 fn, struct tdx_module_args *args)
+ 		panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
+ }
+ 
++/* Read TD-scoped metadata */
++static inline u64 tdg_vm_rd(u64 field, u64 *value)
++{
++	struct tdx_module_args args = {
++		.rdx = field,
++	};
++	u64 ret;
++
++	ret = __tdcall_ret(TDG_VM_RD, &args);
++	*value = args.r8;
++
++	return ret;
++}
++
++/* Write TD-scoped metadata */
++static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask)
++{
++	struct tdx_module_args args = {
++		.rdx = field,
++		.r8 = value,
++		.r9 = mask,
++	};
++
++	return __tdcall(TDG_VM_WR, &args);
++}
++
+ /**
+  * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
+  *                           subtype 0) using TDG.MR.REPORT TDCALL.
+@@ -168,7 +194,61 @@ static void __noreturn tdx_panic(const char *msg)
+ 		__tdx_hypercall(&args);
+ }
+ 
+-static void tdx_parse_tdinfo(u64 *cc_mask)
++/*
++ * The kernel cannot handle #VEs when accessing normal kernel memory. Ensure
++ * that no #VE will be delivered for accesses to TD-private memory.
++ *
++ * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
++ * controls if the guest will receive such #VE with TD attribute
++ * ATTR_SEPT_VE_DISABLE.
++ *
++ * Newer TDX modules allow the guest to control if it wants to receive SEPT
++ * violation #VEs.
++ *
++ * Check if the feature is available and disable SEPT #VE if possible.
++ *
++ * If the TD is allowed to disable/enable SEPT #VEs, the ATTR_SEPT_VE_DISABLE
++ * attribute is no longer reliable. It reflects the initial state of the
++ * control for the TD, but it will not be updated if someone (e.g. bootloader)
++ * changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to
++ * determine if SEPT #VEs are enabled or disabled.
++ */
++static void disable_sept_ve(u64 td_attr)
++{
++	const char *msg = "TD misconfiguration: SEPT #VE has to be disabled";
++	bool debug = td_attr & ATTR_DEBUG;
++	u64 config, controls;
++
++	/* Is this TD allowed to disable SEPT #VE */
++	tdg_vm_rd(TDCS_CONFIG_FLAGS, &config);
++	if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) {
++		/* No SEPT #VE controls for the guest: check the attribute */
++		if (td_attr & ATTR_SEPT_VE_DISABLE)
++			return;
++
++		/* Relax SEPT_VE_DISABLE check for debug TD for backtraces */
++		if (debug)
++			pr_warn("%s\n", msg);
++		else
++			tdx_panic(msg);
++		return;
++	}
++
++	/* Check if SEPT #VE has been disabled before us */
++	tdg_vm_rd(TDCS_TD_CTLS, &controls);
++	if (controls & TD_CTLS_PENDING_VE_DISABLE)
++		return;
++
++	/* Keep #VEs enabled for splats in debugging environments */
++	if (debug)
++		return;
++
++	/* Disable SEPT #VEs */
++	tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_PENDING_VE_DISABLE,
++		  TD_CTLS_PENDING_VE_DISABLE);
++}
++
++static void tdx_setup(u64 *cc_mask)
+ {
+ 	struct tdx_module_args args = {};
+ 	unsigned int gpa_width;
+@@ -193,21 +273,12 @@ static void tdx_parse_tdinfo(u64 *cc_mask)
+ 	gpa_width = args.rcx & GENMASK(5, 0);
+ 	*cc_mask = BIT_ULL(gpa_width - 1);
+ 
+-	/*
+-	 * The kernel can not handle #VE's when accessing normal kernel
+-	 * memory.  Ensure that no #VE will be delivered for accesses to
+-	 * TD-private memory.  Only VMM-shared memory (MMIO) will #VE.
+-	 */
+ 	td_attr = args.rdx;
+-	if (!(td_attr & ATTR_SEPT_VE_DISABLE)) {
+-		const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set.";
+ 
+-		/* Relax SEPT_VE_DISABLE check for debug TD. */
+-		if (td_attr & ATTR_DEBUG)
+-			pr_warn("%s\n", msg);
+-		else
+-			tdx_panic(msg);
+-	}
++	/* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
++	tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL);
++
++	disable_sept_ve(td_attr);
+ }
+ 
+ /*
+@@ -929,10 +1000,6 @@ static void tdx_kexec_finish(void)
+ 
+ void __init tdx_early_init(void)
+ {
+-	struct tdx_module_args args = {
+-		.rdx = TDCS_NOTIFY_ENABLES,
+-		.r9 = -1ULL,
+-	};
+ 	u64 cc_mask;
+ 	u32 eax, sig[3];
+ 
+@@ -947,11 +1014,11 @@ void __init tdx_early_init(void)
+ 	setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+ 
+ 	cc_vendor = CC_VENDOR_INTEL;
+-	tdx_parse_tdinfo(&cc_mask);
+-	cc_set_mask(cc_mask);
+ 
+-	/* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
+-	tdcall(TDG_VM_WR, &args);
++	/* Configure the TD */
++	tdx_setup(&cc_mask);
++
++	cc_set_mask(cc_mask);
+ 
+ 	/*
+ 	 * All bits above GPA width are reserved and kernel treats shared bit
+diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
+index ad7f4c89162568..2de859173940eb 100644
+--- a/arch/x86/crypto/aegis128-aesni-asm.S
++++ b/arch/x86/crypto/aegis128-aesni-asm.S
+@@ -21,7 +21,7 @@
+ #define T1	%xmm7
+ 
+ #define STATEP	%rdi
+-#define LEN	%rsi
++#define LEN	%esi
+ #define SRC	%rdx
+ #define DST	%rcx
+ 
+@@ -76,32 +76,32 @@ SYM_FUNC_START_LOCAL(__load_partial)
+ 	xor %r9d, %r9d
+ 	pxor MSG, MSG
+ 
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	and $0x1, %r8
+ 	jz .Lld_partial_1
+ 
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	and $0x1E, %r8
+ 	add SRC, %r8
+ 	mov (%r8), %r9b
+ 
+ .Lld_partial_1:
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	and $0x2, %r8
+ 	jz .Lld_partial_2
+ 
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	and $0x1C, %r8
+ 	add SRC, %r8
+ 	shl $0x10, %r9
+ 	mov (%r8), %r9w
+ 
+ .Lld_partial_2:
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	and $0x4, %r8
+ 	jz .Lld_partial_4
+ 
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	and $0x18, %r8
+ 	add SRC, %r8
+ 	shl $32, %r9
+@@ -111,11 +111,11 @@ SYM_FUNC_START_LOCAL(__load_partial)
+ .Lld_partial_4:
+ 	movq %r9, MSG
+ 
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	and $0x8, %r8
+ 	jz .Lld_partial_8
+ 
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	and $0x10, %r8
+ 	add SRC, %r8
+ 	pslldq $8, MSG
+@@ -139,7 +139,7 @@ SYM_FUNC_END(__load_partial)
+  *   %r10
+  */
+ SYM_FUNC_START_LOCAL(__store_partial)
+-	mov LEN, %r8
++	mov LEN, %r8d
+ 	mov DST, %r9
+ 
+ 	movq T0, %r10
+@@ -677,7 +677,7 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
+ 	call __store_partial
+ 
+ 	/* mask with byte count: */
+-	movq LEN, T0
++	movd LEN, T0
+ 	punpcklbw T0, T0
+ 	punpcklbw T0, T0
+ 	punpcklbw T0, T0
+@@ -702,7 +702,8 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
+ 
+ /*
+  * void crypto_aegis128_aesni_final(void *state, void *tag_xor,
+- *                                  u64 assoclen, u64 cryptlen);
++ *                                  unsigned int assoclen,
++ *                                  unsigned int cryptlen);
+  */
+ SYM_FUNC_START(crypto_aegis128_aesni_final)
+ 	FRAME_BEGIN
+@@ -715,8 +716,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
+ 	movdqu 0x40(STATEP), STATE4
+ 
+ 	/* prepare length block: */
+-	movq %rdx, MSG
+-	movq %rcx, T0
++	movd %edx, MSG
++	movd %ecx, T0
+ 	pslldq $8, T0
+ 	pxor T0, MSG
+ 	psllq $3, MSG /* multiply by 8 (to get bit count) */
+diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
+index fd4670a6694e77..a087bc0c549875 100644
+--- a/arch/x86/events/intel/pt.c
++++ b/arch/x86/events/intel/pt.c
+@@ -828,11 +828,13 @@ static void pt_buffer_advance(struct pt_buffer *buf)
+ 	buf->cur_idx++;
+ 
+ 	if (buf->cur_idx == buf->cur->last) {
+-		if (buf->cur == buf->last)
++		if (buf->cur == buf->last) {
+ 			buf->cur = buf->first;
+-		else
++			buf->wrapped = true;
++		} else {
+ 			buf->cur = list_entry(buf->cur->list.next, struct topa,
+ 					      list);
++		}
+ 		buf->cur_idx = 0;
+ 	}
+ }
+@@ -846,8 +848,11 @@ static void pt_buffer_advance(struct pt_buffer *buf)
+ static void pt_update_head(struct pt *pt)
+ {
+ 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
++	bool wrapped = buf->wrapped;
+ 	u64 topa_idx, base, old;
+ 
++	buf->wrapped = false;
++
+ 	if (buf->single) {
+ 		local_set(&buf->data_size, buf->output_off);
+ 		return;
+@@ -865,7 +870,7 @@ static void pt_update_head(struct pt *pt)
+ 	} else {
+ 		old = (local64_xchg(&buf->head, base) &
+ 		       ((buf->nr_pages << PAGE_SHIFT) - 1));
+-		if (base < old)
++		if (base < old || (base == old && wrapped))
+ 			base += buf->nr_pages << PAGE_SHIFT;
+ 
+ 		local_add(base - old, &buf->data_size);
+diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
+index f5e46c04c145d0..a1b6c04b7f6848 100644
+--- a/arch/x86/events/intel/pt.h
++++ b/arch/x86/events/intel/pt.h
+@@ -65,6 +65,7 @@ struct pt_pmu {
+  * @head:	logical write offset inside the buffer
+  * @snapshot:	if this is for a snapshot/overwrite counter
+  * @single:	use Single Range Output instead of ToPA
++ * @wrapped:	buffer advance wrapped back to the first topa table
+  * @stop_pos:	STOP topa entry index
+  * @intr_pos:	INT topa entry index
+  * @stop_te:	STOP topa entry pointer
+@@ -82,6 +83,7 @@ struct pt_buffer {
+ 	local64_t		head;
+ 	bool			snapshot;
+ 	bool			single;
++	bool			wrapped;
+ 	long			stop_pos, intr_pos;
+ 	struct topa_entry	*stop_te, *intr_te;
+ 	void			**data_pages;
+diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
+index 1f650b4dde509b..6c6e9b9f98a456 100644
+--- a/arch/x86/include/asm/atomic64_32.h
++++ b/arch/x86/include/asm/atomic64_32.h
+@@ -51,7 +51,8 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
+ #ifdef CONFIG_X86_CMPXCHG64
+ #define __alternative_atomic64(f, g, out, in...) \
+ 	asm volatile("call %c[func]" \
+-		     : out : [func] "i" (atomic64_##g##_cx8), ## in)
++		     : ALT_OUTPUT_SP(out) \
++		     : [func] "i" (atomic64_##g##_cx8), ## in)
+ 
+ #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
+ #else
+diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
+index 62cef2113ca749..fd1282a783ddbf 100644
+--- a/arch/x86/include/asm/cmpxchg_32.h
++++ b/arch/x86/include/asm/cmpxchg_32.h
+@@ -94,7 +94,7 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
+ 	asm volatile(ALTERNATIVE(_lock_loc				\
+ 				 "call cmpxchg8b_emu",			\
+ 				 _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
+-		     : "+a" (o.low), "+d" (o.high)			\
++		     : ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high))	\
+ 		     : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr)	\
+ 		     : "memory");					\
+ 									\
+@@ -123,8 +123,8 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
+ 				 "call cmpxchg8b_emu",			\
+ 				 _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
+ 		     CC_SET(e)						\
+-		     : CC_OUT(e) (ret),					\
+-		       "+a" (o.low), "+d" (o.high)			\
++		     : ALT_OUTPUT_SP(CC_OUT(e) (ret),			\
++				     "+a" (o.low), "+d" (o.high))	\
+ 		     : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr)	\
+ 		     : "memory");					\
+ 									\
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 6d9f763a7bb9d5..427d1daf06d06a 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -26,6 +26,7 @@
+ #include <linux/irqbypass.h>
+ #include <linux/hyperv.h>
+ #include <linux/kfifo.h>
++#include <linux/sched/vhost_task.h>
+ 
+ #include <asm/apic.h>
+ #include <asm/pvclock-abi.h>
+@@ -1443,7 +1444,8 @@ struct kvm_arch {
+ 	bool sgx_provisioning_allowed;
+ 
+ 	struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
+-	struct task_struct *nx_huge_page_recovery_thread;
++	struct vhost_task *nx_huge_page_recovery_thread;
++	u64 nx_huge_page_last;
+ 
+ #ifdef CONFIG_X86_64
+ 	/* The number of TDP MMU pages across all roots. */
+diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h
+index fdfd41511b0211..fecb2a6e864be1 100644
+--- a/arch/x86/include/asm/shared/tdx.h
++++ b/arch/x86/include/asm/shared/tdx.h
+@@ -16,11 +16,20 @@
+ #define TDG_VP_VEINFO_GET		3
+ #define TDG_MR_REPORT			4
+ #define TDG_MEM_PAGE_ACCEPT		6
++#define TDG_VM_RD			7
+ #define TDG_VM_WR			8
+ 
+-/* TDCS fields. To be used by TDG.VM.WR and TDG.VM.RD module calls */
++/* TDX TD-Scope Metadata. To be used by TDG.VM.WR and TDG.VM.RD */
++#define TDCS_CONFIG_FLAGS		0x1110000300000016
++#define TDCS_TD_CTLS			0x1110000300000017
+ #define TDCS_NOTIFY_ENABLES		0x9100000000000010
+ 
++/* TDCS_CONFIG_FLAGS bits */
++#define TDCS_CONFIG_FLEXIBLE_PENDING_VE	BIT_ULL(1)
++
++/* TDCS_TD_CTLS bits */
++#define TD_CTLS_PENDING_VE_DISABLE	BIT_ULL(0)
++
+ /* TDX hypercall Leaf IDs */
+ #define TDVMCALL_MAP_GPA		0x10001
+ #define TDVMCALL_GET_QUOTE		0x10002
+diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
+index 580636cdc257b7..4d3c9d00d6b6b2 100644
+--- a/arch/x86/include/asm/tlb.h
++++ b/arch/x86/include/asm/tlb.h
+@@ -34,4 +34,8 @@ static inline void __tlb_remove_table(void *table)
+ 	free_page_and_swap_cache(table);
+ }
+ 
++static inline void invlpg(unsigned long addr)
++{
++	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
++}
+ #endif /* _ASM_X86_TLB_H */
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 823f44f7bc9465..d8408aafeed988 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -798,6 +798,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ static const struct x86_cpu_desc erratum_1386_microcode[] = {
+ 	AMD_CPU_DESC(0x17,  0x1, 0x2, 0x0800126e),
+ 	AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052),
++	{},
+ };
+ 
+ static void fix_erratum_1386(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index f43bb974fc66d7..b17bcf9b67eed4 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -2392,12 +2392,12 @@ void __init arch_cpu_finalize_init(void)
+ 	alternative_instructions();
+ 
+ 	if (IS_ENABLED(CONFIG_X86_64)) {
+-		unsigned long USER_PTR_MAX = TASK_SIZE_MAX-1;
++		unsigned long USER_PTR_MAX = TASK_SIZE_MAX;
+ 
+ 		/*
+ 		 * Enable this when LAM is gated on LASS support
+ 		if (cpu_feature_enabled(X86_FEATURE_LAM))
+-			USER_PTR_MAX = (1ul << 63) - PAGE_SIZE - 1;
++			USER_PTR_MAX = (1ul << 63) - PAGE_SIZE;
+ 		 */
+ 		runtime_const_init(ptr, USER_PTR_MAX);
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 31a73715d75531..fb5d0c67fbab17 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -34,6 +34,7 @@
+ #include <asm/setup.h>
+ #include <asm/cpu.h>
+ #include <asm/msr.h>
++#include <asm/tlb.h>
+ 
+ #include "internal.h"
+ 
+@@ -483,11 +484,25 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
+ 	}
+ }
+ 
+-static int __apply_microcode_amd(struct microcode_amd *mc)
++static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
+ {
++	unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
+ 	u32 rev, dummy;
+ 
+-	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
++	native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
++
++	if (x86_family(bsp_cpuid_1_eax) == 0x17) {
++		unsigned long p_addr_end = p_addr + psize - 1;
++
++		invlpg(p_addr);
++
++		/*
++		 * Flush next page too if patch image is crossing a page
++		 * boundary.
++		 */
++		if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
++			invlpg(p_addr_end);
++	}
+ 
+ 	/* verify patch application was successful */
+ 	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+@@ -529,7 +544,7 @@ static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
+ 	if (old_rev > mc->hdr.patch_id)
+ 		return ret;
+ 
+-	return !__apply_microcode_amd(mc);
++	return !__apply_microcode_amd(mc, desc.psize);
+ }
+ 
+ static bool get_builtin_microcode(struct cpio_data *cp)
+@@ -745,7 +760,7 @@ void reload_ucode_amd(unsigned int cpu)
+ 	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ 
+ 	if (rev < mc->hdr.patch_id) {
+-		if (!__apply_microcode_amd(mc))
++		if (!__apply_microcode_amd(mc, p->size))
+ 			pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
+ 	}
+ }
+@@ -798,7 +813,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
+ 		goto out;
+ 	}
+ 
+-	if (__apply_microcode_amd(mc_amd)) {
++	if (__apply_microcode_amd(mc_amd, p->size)) {
+ 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
+ 			cpu, mc_amd->hdr.patch_id);
+ 		return UCODE_ERROR;
+diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
+index 64280879c68c02..59d23cdf4ed0fa 100644
+--- a/arch/x86/kernel/devicetree.c
++++ b/arch/x86/kernel/devicetree.c
+@@ -305,7 +305,7 @@ void __init x86_flattree_get_config(void)
+ 			map_len = size;
+ 		}
+ 
+-		early_init_dt_verify(dt);
++		early_init_dt_verify(dt, __pa(dt));
+ 	}
+ 
+ 	unflatten_and_copy_device_tree();
+diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
+index d00c28aaa5be45..d4705a348a8045 100644
+--- a/arch/x86/kernel/unwind_orc.c
++++ b/arch/x86/kernel/unwind_orc.c
+@@ -723,7 +723,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ 		state->sp = task->thread.sp + sizeof(*frame);
+ 		state->bp = READ_ONCE_NOCHECK(frame->bp);
+ 		state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
+-		state->signal = (void *)state->ip == ret_from_fork;
++		state->signal = (void *)state->ip == ret_from_fork_asm;
+ 	}
+ 
+ 	if (get_stack_info((unsigned long *)state->sp, state->task,
+diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
+index f09f13c01c6bbd..d7f27a3276549b 100644
+--- a/arch/x86/kvm/Kconfig
++++ b/arch/x86/kvm/Kconfig
+@@ -18,8 +18,7 @@ menuconfig VIRTUALIZATION
+ if VIRTUALIZATION
+ 
+ config KVM_X86
+-	def_tristate KVM if KVM_INTEL || KVM_AMD
+-	depends on X86_LOCAL_APIC
++	def_tristate KVM if (KVM_INTEL != n || KVM_AMD != n)
+ 	select KVM_COMMON
+ 	select KVM_GENERIC_MMU_NOTIFIER
+ 	select HAVE_KVM_IRQCHIP
+@@ -29,6 +28,7 @@ config KVM_X86
+ 	select HAVE_KVM_IRQ_BYPASS
+ 	select HAVE_KVM_IRQ_ROUTING
+ 	select HAVE_KVM_READONLY_MEM
++	select VHOST_TASK
+ 	select KVM_ASYNC_PF
+ 	select USER_RETURN_NOTIFIER
+ 	select KVM_MMIO
+@@ -49,6 +49,7 @@ config KVM_X86
+ 
+ config KVM
+ 	tristate "Kernel-based Virtual Machine (KVM) support"
++	depends on X86_LOCAL_APIC
+ 	help
+ 	  Support hosting fully virtualized guest machines using hardware
+ 	  virtualization extensions.  You will need a fairly recent
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 8e853a5fc867b7..3e353ed1f76736 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -7281,7 +7281,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+ 			kvm_mmu_zap_all_fast(kvm);
+ 			mutex_unlock(&kvm->slots_lock);
+ 
+-			wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
++			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
+ 		}
+ 		mutex_unlock(&kvm_lock);
+ 	}
+@@ -7427,7 +7427,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
+ 		mutex_lock(&kvm_lock);
+ 
+ 		list_for_each_entry(kvm, &vm_list, vm_list)
+-			wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
++			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
+ 
+ 		mutex_unlock(&kvm_lock);
+ 	}
+@@ -7530,62 +7530,56 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
+ 	srcu_read_unlock(&kvm->srcu, rcu_idx);
+ }
+ 
+-static long get_nx_huge_page_recovery_timeout(u64 start_time)
++static void kvm_nx_huge_page_recovery_worker_kill(void *data)
+ {
+-	bool enabled;
+-	uint period;
+-
+-	enabled = calc_nx_huge_pages_recovery_period(&period);
+-
+-	return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
+-		       : MAX_SCHEDULE_TIMEOUT;
+ }
+ 
+-static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
++static bool kvm_nx_huge_page_recovery_worker(void *data)
+ {
+-	u64 start_time;
++	struct kvm *kvm = data;
++	bool enabled;
++	uint period;
+ 	long remaining_time;
+ 
+-	while (true) {
+-		start_time = get_jiffies_64();
+-		remaining_time = get_nx_huge_page_recovery_timeout(start_time);
+-
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		while (!kthread_should_stop() && remaining_time > 0) {
+-			schedule_timeout(remaining_time);
+-			remaining_time = get_nx_huge_page_recovery_timeout(start_time);
+-			set_current_state(TASK_INTERRUPTIBLE);
+-		}
+-
+-		set_current_state(TASK_RUNNING);
+-
+-		if (kthread_should_stop())
+-			return 0;
++	enabled = calc_nx_huge_pages_recovery_period(&period);
++	if (!enabled)
++		return false;
+ 
+-		kvm_recover_nx_huge_pages(kvm);
++	remaining_time = kvm->arch.nx_huge_page_last + msecs_to_jiffies(period)
++		- get_jiffies_64();
++	if (remaining_time > 0) {
++		schedule_timeout(remaining_time);
++		/* check for signals and come back */
++		return true;
+ 	}
++
++	__set_current_state(TASK_RUNNING);
++	kvm_recover_nx_huge_pages(kvm);
++	kvm->arch.nx_huge_page_last = get_jiffies_64();
++	return true;
+ }
+ 
+ int kvm_mmu_post_init_vm(struct kvm *kvm)
+ {
+-	int err;
+-
+ 	if (nx_hugepage_mitigation_hard_disabled)
+ 		return 0;
+ 
+-	err = kvm_vm_create_worker_thread(kvm, kvm_nx_huge_page_recovery_worker, 0,
+-					  "kvm-nx-lpage-recovery",
+-					  &kvm->arch.nx_huge_page_recovery_thread);
+-	if (!err)
+-		kthread_unpark(kvm->arch.nx_huge_page_recovery_thread);
++	kvm->arch.nx_huge_page_last = get_jiffies_64();
++	kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
++		kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
++		kvm, "kvm-nx-lpage-recovery");
+ 
+-	return err;
++	if (!kvm->arch.nx_huge_page_recovery_thread)
++		return -ENOMEM;
++
++	vhost_task_start(kvm->arch.nx_huge_page_recovery_thread);
++	return 0;
+ }
+ 
+ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
+ {
+ 	if (kvm->arch.nx_huge_page_recovery_thread)
+-		kthread_stop(kvm->arch.nx_huge_page_recovery_thread);
++		vhost_task_stop(kvm->arch.nx_huge_page_recovery_thread);
+ }
+ 
+ #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
+index 8f7eb3ad88fcb9..5521608077ec09 100644
+--- a/arch/x86/kvm/mmu/spte.c
++++ b/arch/x86/kvm/mmu/spte.c
+@@ -226,12 +226,20 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ 		spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
+ 
+ 		/*
+-		 * Optimization: for pte sync, if spte was writable the hash
+-		 * lookup is unnecessary (and expensive). Write protection
+-		 * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
+-		 * Same reasoning can be applied to dirty page accounting.
++		 * When overwriting an existing leaf SPTE, and the old SPTE was
++		 * writable, skip trying to unsync shadow pages as any relevant
++		 * shadow pages must already be unsync, i.e. the hash lookup is
++		 * unnecessary (and expensive).
++		 *
++		 * The same reasoning applies to dirty page/folio accounting;
++		 * KVM will mark the folio dirty using the old SPTE, thus
++		 * there's no need to immediately mark the new SPTE as dirty.
++		 *
++		 * Note, both cases rely on KVM not changing PFNs without first
++		 * zapping the old SPTE, which is guaranteed by both the shadow
++		 * MMU and the TDP MMU.
+ 		 */
+-		if (is_writable_pte(old_spte))
++		if (is_last_spte(old_spte, level) && is_writable_pte(old_spte))
+ 			goto out;
+ 
+ 		/*
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index d28618e9277ede..92fee5e8a3c741 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2551,28 +2551,6 @@ static bool cpu_has_sgx(void)
+ 	return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
+ }
+ 
+-/*
+- * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
+- * can't be used due to errata where VM Exit may incorrectly clear
+- * IA32_PERF_GLOBAL_CTRL[34:32]. Work around the errata by using the
+- * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
+- */
+-static bool cpu_has_perf_global_ctrl_bug(void)
+-{
+-	switch (boot_cpu_data.x86_vfm) {
+-	case INTEL_NEHALEM_EP:	/* AAK155 */
+-	case INTEL_NEHALEM:	/* AAP115 */
+-	case INTEL_WESTMERE:	/* AAT100 */
+-	case INTEL_WESTMERE_EP:	/* BC86,AAY89,BD102 */
+-	case INTEL_NEHALEM_EX:	/* BA97 */
+-		return true;
+-	default:
+-		break;
+-	}
+-
+-	return false;
+-}
+-
+ static int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result)
+ {
+ 	u32 vmx_msr_low, vmx_msr_high;
+@@ -2732,6 +2710,27 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
+ 		_vmexit_control &= ~x_ctrl;
+ 	}
+ 
++	/*
++	 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
++	 * can't be used due to an errata where VM Exit may incorrectly clear
++	 * IA32_PERF_GLOBAL_CTRL[34:32].  Workaround the errata by using the
++	 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
++	 */
++	switch (boot_cpu_data.x86_vfm) {
++	case INTEL_NEHALEM_EP:	/* AAK155 */
++	case INTEL_NEHALEM:	/* AAP115 */
++	case INTEL_WESTMERE:	/* AAT100 */
++	case INTEL_WESTMERE_EP:	/* BC86,AAY89,BD102 */
++	case INTEL_NEHALEM_EX:	/* BA97 */
++		_vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
++		_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
++		pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
++			     "does not work properly. Using workaround\n");
++		break;
++	default:
++		break;
++	}
++
+ 	rdmsrl(MSR_IA32_VMX_BASIC, basic_msr);
+ 
+ 	/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
+@@ -4422,9 +4421,6 @@ static u32 vmx_vmentry_ctrl(void)
+ 			  VM_ENTRY_LOAD_IA32_EFER |
+ 			  VM_ENTRY_IA32E_MODE);
+ 
+-	if (cpu_has_perf_global_ctrl_bug())
+-		vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+-
+ 	return vmentry_ctrl;
+ }
+ 
+@@ -4442,10 +4438,6 @@ static u32 vmx_vmexit_ctrl(void)
+ 	if (vmx_pt_mode_is_system())
+ 		vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
+ 				 VM_EXIT_CLEAR_IA32_RTIT_CTL);
+-
+-	if (cpu_has_perf_global_ctrl_bug())
+-		vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+-
+ 	/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
+ 	return vmexit_ctrl &
+ 		~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
+@@ -8400,10 +8392,6 @@ __init int vmx_hardware_setup(void)
+ 	if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
+ 		return -EIO;
+ 
+-	if (cpu_has_perf_global_ctrl_bug())
+-		pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
+-			     "does not work properly. Using workaround\n");
+-
+ 	if (boot_cpu_has(X86_FEATURE_NX))
+ 		kvm_enable_efer_bits(EFER_NX);
+ 
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 86593d1b787d8a..b0678d59ebdb4a 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -20,6 +20,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/apic.h>
+ #include <asm/perf_event.h>
++#include <asm/tlb.h>
+ 
+ #include "mm_internal.h"
+ 
+@@ -1140,7 +1141,7 @@ STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
+ 	bool cpu_pcide;
+ 
+ 	/* Flush 'addr' from the kernel PCID: */
+-	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
++	invlpg(addr);
+ 
+ 	/* If PTI is off there is no user PCID and nothing to flush. */
+ 	if (!static_cpu_has(X86_FEATURE_PTI))
+diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
+index 64fca49cd88ff9..ce4fd8d33da467 100644
+--- a/arch/x86/platform/pvh/head.S
++++ b/arch/x86/platform/pvh/head.S
+@@ -172,7 +172,14 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
+ 	movq %rbp, %rbx
+ 	subq $_pa(pvh_start_xen), %rbx
+ 	movq %rbx, phys_base(%rip)
+-	call xen_prepare_pvh
++
++	/* Call xen_prepare_pvh() via the kernel virtual mapping */
++	leaq xen_prepare_pvh(%rip), %rax
++	subq phys_base(%rip), %rax
++	addq $__START_KERNEL_map, %rax
++	ANNOTATE_RETPOLINE_SAFE
++	call *%rax
++
+ 	/*
+ 	 * Clear phys_base.  __startup_64 will *add* to its value,
+ 	 * so reset to 0.
+diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
+index bdec4a773af098..e51f2060e83089 100644
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -216,7 +216,7 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ 
+ void __init early_init_devtree(void *params)
+ {
+-	early_init_dt_scan(params);
++	early_init_dt_scan(params, __pa(params));
+ 	of_scan_flat_dt(xtensa_dt_io_area, NULL);
+ 
+ 	if (!command_line[0])
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+index e831aedb464329..9fb9f353315025 100644
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -736,6 +736,7 @@ static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
+ 		 */
+ 		bfq_put_cooperator(sync_bfqq);
+ 		bic_set_bfqq(bic, NULL, true, act_idx);
++		bfq_release_process_ref(bfqd, sync_bfqq);
+ 	}
+ }
+ 
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 0747d9d0e48c8a..95dd7b79593565 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -582,23 +582,31 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
+ #define BFQ_LIMIT_INLINE_DEPTH 16
+ 
+ #ifdef CONFIG_BFQ_GROUP_IOSCHED
+-static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
++static bool bfqq_request_over_limit(struct bfq_data *bfqd,
++				    struct bfq_io_cq *bic, blk_opf_t opf,
++				    unsigned int act_idx, int limit)
+ {
+-	struct bfq_data *bfqd = bfqq->bfqd;
+-	struct bfq_entity *entity = &bfqq->entity;
+ 	struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
+ 	struct bfq_entity **entities = inline_entities;
+-	int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
+-	int class_idx = bfqq->ioprio_class - 1;
++	int alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
+ 	struct bfq_sched_data *sched_data;
++	struct bfq_entity *entity;
++	struct bfq_queue *bfqq;
+ 	unsigned long wsum;
+ 	bool ret = false;
+-
+-	if (!entity->on_st_or_in_serv)
+-		return false;
++	int depth;
++	int level;
+ 
+ retry:
+ 	spin_lock_irq(&bfqd->lock);
++	bfqq = bic_to_bfqq(bic, op_is_sync(opf), act_idx);
++	if (!bfqq)
++		goto out;
++
++	entity = &bfqq->entity;
++	if (!entity->on_st_or_in_serv)
++		goto out;
++
+ 	/* +1 for bfqq entity, root cgroup not included */
+ 	depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
+ 	if (depth > alloc_depth) {
+@@ -643,7 +651,7 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
+ 			 * class.
+ 			 */
+ 			wsum = 0;
+-			for (i = 0; i <= class_idx; i++) {
++			for (i = 0; i <= bfqq->ioprio_class - 1; i++) {
+ 				wsum = wsum * IOPRIO_BE_NR +
+ 					sched_data->service_tree[i].wsum;
+ 			}
+@@ -666,7 +674,9 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
+ 	return ret;
+ }
+ #else
+-static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
++static bool bfqq_request_over_limit(struct bfq_data *bfqd,
++				    struct bfq_io_cq *bic, blk_opf_t opf,
++				    unsigned int act_idx, int limit)
+ {
+ 	return false;
+ }
+@@ -704,8 +714,9 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
+ 	}
+ 
+ 	for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
+-		struct bfq_queue *bfqq =
+-			bic_to_bfqq(bic, op_is_sync(opf), act_idx);
++		/* Fast path to check if bfqq is already allocated. */
++		if (!bic_to_bfqq(bic, op_is_sync(opf), act_idx))
++			continue;
+ 
+ 		/*
+ 		 * Does queue (or any parent entity) exceed number of
+@@ -713,7 +724,7 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
+ 		 * limit depth so that it cannot consume more
+ 		 * available requests and thus starve other entities.
+ 		 */
+-		if (bfqq && bfqq_request_over_limit(bfqq, limit)) {
++		if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) {
+ 			depth = 1;
+ 			break;
+ 		}
+@@ -5434,8 +5445,6 @@ void bfq_put_cooperator(struct bfq_queue *bfqq)
+ 		bfq_put_queue(__bfqq);
+ 		__bfqq = next;
+ 	}
+-
+-	bfq_release_process_ref(bfqq->bfqd, bfqq);
+ }
+ 
+ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+@@ -5448,6 +5457,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ 	bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
+ 
+ 	bfq_put_cooperator(bfqq);
++
++	bfq_release_process_ref(bfqd, bfqq);
+ }
+ 
+ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync,
+@@ -6734,6 +6745,8 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ 	bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx);
+ 
+ 	bfq_put_cooperator(bfqq);
++
++	bfq_release_process_ref(bfqq->bfqd, bfqq);
+ 	return NULL;
+ }
+ 
+diff --git a/block/blk-core.c b/block/blk-core.c
+index bc5e8c5eaac9ff..4f791a3114a12c 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -261,6 +261,8 @@ static void blk_free_queue(struct request_queue *q)
+ 		blk_mq_release(q);
+ 
+ 	ida_free(&blk_queue_ida, q->id);
++	lockdep_unregister_key(&q->io_lock_cls_key);
++	lockdep_unregister_key(&q->q_lock_cls_key);
+ 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
+ }
+ 
+@@ -278,18 +280,20 @@ void blk_put_queue(struct request_queue *q)
+ }
+ EXPORT_SYMBOL(blk_put_queue);
+ 
+-void blk_queue_start_drain(struct request_queue *q)
++bool blk_queue_start_drain(struct request_queue *q)
+ {
+ 	/*
+ 	 * When queue DYING flag is set, we need to block new req
+ 	 * entering queue, so we call blk_freeze_queue_start() to
+ 	 * prevent I/O from crossing blk_queue_enter().
+ 	 */
+-	blk_freeze_queue_start(q);
++	bool freeze = __blk_freeze_queue_start(q, current);
+ 	if (queue_is_mq(q))
+ 		blk_mq_wake_waiters(q);
+ 	/* Make blk_queue_enter() reexamine the DYING flag. */
+ 	wake_up_all(&q->mq_freeze_wq);
++
++	return freeze;
+ }
+ 
+ /**
+@@ -321,6 +325,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
+ 			return -ENODEV;
+ 	}
+ 
++	rwsem_acquire_read(&q->q_lockdep_map, 0, 0, _RET_IP_);
++	rwsem_release(&q->q_lockdep_map, _RET_IP_);
+ 	return 0;
+ }
+ 
+@@ -352,6 +358,8 @@ int __bio_queue_enter(struct request_queue *q, struct bio *bio)
+ 			goto dead;
+ 	}
+ 
++	rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
++	rwsem_release(&q->io_lockdep_map, _RET_IP_);
+ 	return 0;
+ dead:
+ 	bio_io_error(bio);
+@@ -441,6 +449,12 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
+ 				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
+ 	if (error)
+ 		goto fail_stats;
++	lockdep_register_key(&q->io_lock_cls_key);
++	lockdep_register_key(&q->q_lock_cls_key);
++	lockdep_init_map(&q->io_lockdep_map, "&q->q_usage_counter(io)",
++			 &q->io_lock_cls_key, 0);
++	lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)",
++			 &q->q_lock_cls_key, 0);
+ 
+ 	q->nr_requests = BLKDEV_DEFAULT_RQ;
+ 
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index ad763ec313b6ad..5baa950f34fe21 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -166,17 +166,6 @@ struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
+ 	return bio_submit_split(bio, split_sectors);
+ }
+ 
+-struct bio *bio_split_write_zeroes(struct bio *bio,
+-		const struct queue_limits *lim, unsigned *nsegs)
+-{
+-	*nsegs = 0;
+-	if (!lim->max_write_zeroes_sectors)
+-		return bio;
+-	if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
+-		return bio;
+-	return bio_submit_split(bio, lim->max_write_zeroes_sectors);
+-}
+-
+ static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
+ 						bool is_atomic)
+ {
+@@ -211,7 +200,9 @@ static inline unsigned get_max_io_size(struct bio *bio,
+ 	 * We ignore lim->max_sectors for atomic writes because it may less
+ 	 * than the actual bio size, which we cannot tolerate.
+ 	 */
+-	if (is_atomic)
++	if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
++		max_sectors = lim->max_write_zeroes_sectors;
++	else if (is_atomic)
+ 		max_sectors = lim->atomic_write_max_sectors;
+ 	else
+ 		max_sectors = lim->max_sectors;
+@@ -296,6 +287,14 @@ static bool bvec_split_segs(const struct queue_limits *lim,
+ 	return len > 0 || bv->bv_len > max_len;
+ }
+ 
++static unsigned int bio_split_alignment(struct bio *bio,
++		const struct queue_limits *lim)
++{
++	if (op_is_write(bio_op(bio)) && lim->zone_write_granularity)
++		return lim->zone_write_granularity;
++	return lim->logical_block_size;
++}
++
+ /**
+  * bio_split_rw_at - check if and where to split a read/write bio
+  * @bio:  [in] bio to be split
+@@ -358,7 +357,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
+ 	 * split size so that each bio is properly block size aligned, even if
+ 	 * we do not use the full hardware limits.
+ 	 */
+-	bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
++	bytes = ALIGN_DOWN(bytes, bio_split_alignment(bio, lim));
+ 
+ 	/*
+ 	 * Bio splitting may cause subtle trouble such as hang when doing sync
+@@ -398,6 +397,26 @@ struct bio *bio_split_zone_append(struct bio *bio,
+ 	return bio_submit_split(bio, split_sectors);
+ }
+ 
++struct bio *bio_split_write_zeroes(struct bio *bio,
++		const struct queue_limits *lim, unsigned *nsegs)
++{
++	unsigned int max_sectors = get_max_io_size(bio, lim);
++
++	*nsegs = 0;
++
++	/*
++	 * An unset limit should normally not happen, as bio submission is keyed
++	 * off having a non-zero limit.  But SCSI can clear the limit in the
++	 * I/O completion handler, and we can race and see this.  Splitting to a
++	 * zero limit obviously doesn't make sense, so band-aid it here.
++	 */
++	if (!max_sectors)
++		return bio;
++	if (bio_sectors(bio) <= max_sectors)
++		return bio;
++	return bio_submit_split(bio, max_sectors);
++}
++
+ /**
+  * bio_split_to_limits - split a bio to fit the queue limits
+  * @bio:     bio to be split
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index cf626e061dd774..b4fba7b398e5bc 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -120,9 +120,59 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
+ 	inflight[1] = mi.inflight[1];
+ }
+ 
+-void blk_freeze_queue_start(struct request_queue *q)
++#ifdef CONFIG_LOCKDEP
++static bool blk_freeze_set_owner(struct request_queue *q,
++				 struct task_struct *owner)
++{
++	if (!owner)
++		return false;
++
++	if (!q->mq_freeze_depth) {
++		q->mq_freeze_owner = owner;
++		q->mq_freeze_owner_depth = 1;
++		return true;
++	}
++
++	if (owner == q->mq_freeze_owner)
++		q->mq_freeze_owner_depth += 1;
++	return false;
++}
++
++/* verify the last unfreeze in owner context */
++static bool blk_unfreeze_check_owner(struct request_queue *q)
++{
++	if (!q->mq_freeze_owner)
++		return false;
++	if (q->mq_freeze_owner != current)
++		return false;
++	if (--q->mq_freeze_owner_depth == 0) {
++		q->mq_freeze_owner = NULL;
++		return true;
++	}
++	return false;
++}
++
++#else
++
++static bool blk_freeze_set_owner(struct request_queue *q,
++				 struct task_struct *owner)
++{
++	return false;
++}
++
++static bool blk_unfreeze_check_owner(struct request_queue *q)
+ {
++	return false;
++}
++#endif
++
++bool __blk_freeze_queue_start(struct request_queue *q,
++			      struct task_struct *owner)
++{
++	bool freeze;
++
+ 	mutex_lock(&q->mq_freeze_lock);
++	freeze = blk_freeze_set_owner(q, owner);
+ 	if (++q->mq_freeze_depth == 1) {
+ 		percpu_ref_kill(&q->q_usage_counter);
+ 		mutex_unlock(&q->mq_freeze_lock);
+@@ -131,6 +181,14 @@ void blk_freeze_queue_start(struct request_queue *q)
+ 	} else {
+ 		mutex_unlock(&q->mq_freeze_lock);
+ 	}
++
++	return freeze;
++}
++
++void blk_freeze_queue_start(struct request_queue *q)
++{
++	if (__blk_freeze_queue_start(q, current))
++		blk_freeze_acquire_lock(q, false, false);
+ }
+ EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
+ 
+@@ -176,8 +234,10 @@ void blk_mq_freeze_queue(struct request_queue *q)
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
+ 
+-void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
++bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
+ {
++	bool unfreeze;
++
+ 	mutex_lock(&q->mq_freeze_lock);
+ 	if (force_atomic)
+ 		q->q_usage_counter.data->force_atomic = true;
+@@ -187,15 +247,39 @@ void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
+ 		percpu_ref_resurrect(&q->q_usage_counter);
+ 		wake_up_all(&q->mq_freeze_wq);
+ 	}
++	unfreeze = blk_unfreeze_check_owner(q);
+ 	mutex_unlock(&q->mq_freeze_lock);
++
++	return unfreeze;
+ }
+ 
+ void blk_mq_unfreeze_queue(struct request_queue *q)
+ {
+-	__blk_mq_unfreeze_queue(q, false);
++	if (__blk_mq_unfreeze_queue(q, false))
++		blk_unfreeze_release_lock(q, false, false);
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+ 
++/*
++ * non_owner variant of blk_freeze_queue_start
++ *
++ * Unlike blk_freeze_queue_start, the queue doesn't need to be unfrozen
++ * by the same task.  This is fragile and should not be used if at all
++ * possible.
++ */
++void blk_freeze_queue_start_non_owner(struct request_queue *q)
++{
++	__blk_freeze_queue_start(q, NULL);
++}
++EXPORT_SYMBOL_GPL(blk_freeze_queue_start_non_owner);
++
++/* non_owner variant of blk_mq_unfreeze_queue */
++void blk_mq_unfreeze_queue_non_owner(struct request_queue *q)
++{
++	__blk_mq_unfreeze_queue(q, false);
++}
++EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_non_owner);
++
+ /*
+  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
+  * mpt3sas driver such that this function can be removed.
+@@ -283,8 +367,9 @@ void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
+ 		if (!blk_queue_skip_tagset_quiesce(q))
+ 			blk_mq_quiesce_queue_nowait(q);
+ 	}
+-	blk_mq_wait_quiesce_done(set);
+ 	mutex_unlock(&set->tag_list_lock);
++
++	blk_mq_wait_quiesce_done(set);
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
+ 
+@@ -2200,6 +2285,24 @@ void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
+ }
+ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
+ 
++static inline bool blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx *hctx)
++{
++	bool need_run;
++
++	/*
++	 * When queue is quiesced, we may be switching io scheduler, or
++	 * updating nr_hw_queues, or other things, and we can't run queue
++	 * any more, even blk_mq_hctx_has_pending() can't be called safely.
++	 *
++	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
++	 * quiesced.
++	 */
++	__blk_mq_run_dispatch_ops(hctx->queue, false,
++		need_run = !blk_queue_quiesced(hctx->queue) &&
++		blk_mq_hctx_has_pending(hctx));
++	return need_run;
++}
++
+ /**
+  * blk_mq_run_hw_queue - Start to run a hardware queue.
+  * @hctx: Pointer to the hardware queue to run.
+@@ -2220,20 +2323,23 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+ 
+ 	might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
+ 
+-	/*
+-	 * When queue is quiesced, we may be switching io scheduler, or
+-	 * updating nr_hw_queues, or other things, and we can't run queue
+-	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
+-	 *
+-	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
+-	 * quiesced.
+-	 */
+-	__blk_mq_run_dispatch_ops(hctx->queue, false,
+-		need_run = !blk_queue_quiesced(hctx->queue) &&
+-		blk_mq_hctx_has_pending(hctx));
++	need_run = blk_mq_hw_queue_need_run(hctx);
++	if (!need_run) {
++		unsigned long flags;
+ 
+-	if (!need_run)
+-		return;
++		/*
++		 * Synchronize with blk_mq_unquiesce_queue(), because we check
++		 * if hw queue is quiesced locklessly above, we need the use
++		 * ->queue_lock to make sure we see the up-to-date status to
++		 * not miss rerunning the hw queue.
++		 */
++		spin_lock_irqsave(&hctx->queue->queue_lock, flags);
++		need_run = blk_mq_hw_queue_need_run(hctx);
++		spin_unlock_irqrestore(&hctx->queue->queue_lock, flags);
++
++		if (!need_run)
++			return;
++	}
+ 
+ 	if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
+ 		blk_mq_delay_run_hw_queue(hctx, 0);
+@@ -2390,6 +2496,12 @@ void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+ 		return;
+ 
+ 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
++	/*
++	 * Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the
++	 * clearing of BLK_MQ_S_STOPPED above and the checking of dispatch
++	 * list in the subsequent routine.
++	 */
++	smp_mb__after_atomic();
+ 	blk_mq_run_hw_queue(hctx, async);
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
+@@ -2620,6 +2732,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ 
+ 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
+ 		blk_mq_insert_request(rq, 0);
++		blk_mq_run_hw_queue(hctx, false);
+ 		return;
+ 	}
+ 
+@@ -2650,6 +2763,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
+ 
+ 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
+ 		blk_mq_insert_request(rq, 0);
++		blk_mq_run_hw_queue(hctx, false);
+ 		return BLK_STS_OK;
+ 	}
+ 
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 3bd43b10032f83..f4ac1af77a267e 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -230,6 +230,19 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
+ 
+ static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
+ {
++	/* Fast path: hardware queue is not stopped most of the time. */
++	if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
++		return false;
++
++	/*
++	 * This barrier is used to order adding of dispatch list before and
++	 * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
++	 * in blk_mq_start_stopped_hw_queue() so that dispatch code could
++	 * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
++	 * empty to avoid missing dispatching requests.
++	 */
++	smp_mb();
++
+ 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
+ }
+ 
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index a446654ddee5ef..7abf034089cd96 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -249,6 +249,13 @@ static int blk_validate_limits(struct queue_limits *lim)
+ 	if (lim->io_min < lim->physical_block_size)
+ 		lim->io_min = lim->physical_block_size;
+ 
++	/*
++	 * The optimal I/O size may not be aligned to physical block size
++	 * (because it may be limited by dma engines which have no clue about
++	 * block size of the disks attached to them), so we round it down here.
++	 */
++	lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
++
+ 	/*
+ 	 * max_hw_sectors has a somewhat weird default for historical reason,
+ 	 * but driver really should set their own instead of relying on this
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index e85941bec857b6..207577145c54f4 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -794,10 +794,8 @@ int blk_register_queue(struct gendisk *disk)
+ 	 * faster to shut down and is made fully functional here as
+ 	 * request_queues for non-existent devices never get registered.
+ 	 */
+-	if (!blk_queue_init_done(q)) {
+-		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
+-		percpu_ref_switch_to_percpu(&q->q_usage_counter);
+-	}
++	blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
++	percpu_ref_switch_to_percpu(&q->q_usage_counter);
+ 
+ 	return ret;
+ 
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index af19296fa50df1..95e517723db3e4 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1541,6 +1541,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
+ 	unsigned int nr_seq_zones, nr_conv_zones = 0;
+ 	unsigned int pool_size;
+ 	struct queue_limits lim;
++	int ret;
+ 
+ 	disk->nr_zones = args->nr_zones;
+ 	disk->zone_capacity = args->zone_capacity;
+@@ -1593,7 +1594,11 @@ static int disk_update_zone_resources(struct gendisk *disk,
+ 	}
+ 
+ commit:
+-	return queue_limits_commit_update(q, &lim);
++	blk_mq_freeze_queue(q);
++	ret = queue_limits_commit_update(q, &lim);
++	blk_mq_unfreeze_queue(q);
++
++	return ret;
+ }
+ 
+ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
+@@ -1814,14 +1819,15 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
+ 	 * Set the new disk zone parameters only once the queue is frozen and
+ 	 * all I/Os are completed.
+ 	 */
+-	blk_mq_freeze_queue(q);
+ 	if (ret > 0)
+ 		ret = disk_update_zone_resources(disk, &args);
+ 	else
+ 		pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
+-	if (ret)
++	if (ret) {
++		blk_mq_freeze_queue(q);
+ 		disk_free_zone_resources(disk);
+-	blk_mq_unfreeze_queue(q);
++		blk_mq_unfreeze_queue(q);
++	}
+ 
+ 	kfree(args.conv_zones_bitmap);
+ 
+diff --git a/block/blk.h b/block/blk.h
+index c718e4291db062..88fab6a81701ed 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/bio-integrity.h>
+ #include <linux/blk-crypto.h>
++#include <linux/lockdep.h>
+ #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
+ #include <linux/sched/sysctl.h>
+ #include <linux/timekeeping.h>
+@@ -35,8 +36,10 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
+ void blk_free_flush_queue(struct blk_flush_queue *q);
+ 
+ void blk_freeze_queue(struct request_queue *q);
+-void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
+-void blk_queue_start_drain(struct request_queue *q);
++bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
++bool blk_queue_start_drain(struct request_queue *q);
++bool __blk_freeze_queue_start(struct request_queue *q,
++			      struct task_struct *owner);
+ int __bio_queue_enter(struct request_queue *q, struct bio *bio);
+ void submit_bio_noacct_nocheck(struct bio *bio);
+ void bio_await_chain(struct bio *bio);
+@@ -69,8 +72,11 @@ static inline int bio_queue_enter(struct bio *bio)
+ {
+ 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ 
+-	if (blk_try_enter_queue(q, false))
++	if (blk_try_enter_queue(q, false)) {
++		rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
++		rwsem_release(&q->io_lockdep_map, _RET_IP_);
+ 		return 0;
++	}
+ 	return __bio_queue_enter(q, bio);
+ }
+ 
+@@ -734,4 +740,22 @@ void blk_integrity_verify(struct bio *bio);
+ void blk_integrity_prepare(struct request *rq);
+ void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
+ 
++static inline void blk_freeze_acquire_lock(struct request_queue *q, bool
++		disk_dead, bool queue_dying)
++{
++	if (!disk_dead)
++		rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
++	if (!queue_dying)
++		rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
++}
++
++static inline void blk_unfreeze_release_lock(struct request_queue *q, bool
++		disk_dead, bool queue_dying)
++{
++	if (!queue_dying)
++		rwsem_release(&q->q_lockdep_map, _RET_IP_);
++	if (!disk_dead)
++		rwsem_release(&q->io_lockdep_map, _RET_IP_);
++}
++
+ #endif /* BLK_INTERNAL_H */
+diff --git a/block/elevator.c b/block/elevator.c
+index 9430cde13d1a41..43ba4ab1ada7fd 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -598,13 +598,19 @@ void elevator_init_mq(struct request_queue *q)
+ 	 * drain any dispatch activities originated from passthrough
+ 	 * requests, then no need to quiesce queue which may add long boot
+ 	 * latency, especially when lots of disks are involved.
++	 *
++	 * Disk isn't added yet, so verifying queue lock only manually.
+ 	 */
+-	blk_mq_freeze_queue(q);
++	blk_freeze_queue_start_non_owner(q);
++	blk_freeze_acquire_lock(q, true, false);
++	blk_mq_freeze_queue_wait(q);
++
+ 	blk_mq_cancel_work_sync(q);
+ 
+ 	err = blk_mq_init_sched(q, e);
+ 
+-	blk_mq_unfreeze_queue(q);
++	blk_unfreeze_release_lock(q, true, false);
++	blk_mq_unfreeze_queue_non_owner(q);
+ 
+ 	if (err) {
+ 		pr_warn("\"%s\" elevator initialization failed, "
+diff --git a/block/fops.c b/block/fops.c
+index e696ae53bf1e08..13a67940d0408d 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -35,13 +35,10 @@ static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
+ 	return opf;
+ }
+ 
+-static bool blkdev_dio_invalid(struct block_device *bdev, loff_t pos,
+-				struct iov_iter *iter, bool is_atomic)
++static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
++				struct iov_iter *iter)
+ {
+-	if (is_atomic && !generic_atomic_write_valid(iter, pos))
+-		return true;
+-
+-	return pos & (bdev_logical_block_size(bdev) - 1) ||
++	return iocb->ki_pos & (bdev_logical_block_size(bdev) - 1) ||
+ 		!bdev_iter_is_aligned(bdev, iter);
+ }
+ 
+@@ -368,13 +365,12 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
+ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ 	struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
+-	bool is_atomic = iocb->ki_flags & IOCB_ATOMIC;
+ 	unsigned int nr_pages;
+ 
+ 	if (!iov_iter_count(iter))
+ 		return 0;
+ 
+-	if (blkdev_dio_invalid(bdev, iocb->ki_pos, iter, is_atomic))
++	if (blkdev_dio_invalid(bdev, iocb, iter))
+ 		return -EINVAL;
+ 
+ 	nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
+@@ -383,7 +379,7 @@ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+ 			return __blkdev_direct_IO_simple(iocb, iter, bdev,
+ 							nr_pages);
+ 		return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
+-	} else if (is_atomic) {
++	} else if (iocb->ki_flags & IOCB_ATOMIC) {
+ 		return -EINVAL;
+ 	}
+ 	return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
+@@ -625,7 +621,7 @@ static int blkdev_open(struct inode *inode, struct file *filp)
+ 	if (!bdev)
+ 		return -ENXIO;
+ 
+-	if (bdev_can_atomic_write(bdev) && filp->f_flags & O_DIRECT)
++	if (bdev_can_atomic_write(bdev))
+ 		filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
+ 
+ 	ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
+@@ -681,6 +677,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	struct file *file = iocb->ki_filp;
+ 	struct inode *bd_inode = bdev_file_inode(file);
+ 	struct block_device *bdev = I_BDEV(bd_inode);
++	bool atomic = iocb->ki_flags & IOCB_ATOMIC;
+ 	loff_t size = bdev_nr_bytes(bdev);
+ 	size_t shorted = 0;
+ 	ssize_t ret;
+@@ -700,8 +697,16 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
+ 		return -EOPNOTSUPP;
+ 
++	if (atomic) {
++		ret = generic_atomic_write_valid(iocb, from);
++		if (ret)
++			return ret;
++	}
++
+ 	size -= iocb->ki_pos;
+ 	if (iov_iter_count(from) > size) {
++		if (atomic)
++			return -EINVAL;
+ 		shorted = iov_iter_count(from) - size;
+ 		iov_iter_truncate(from, size);
+ 	}
+diff --git a/block/genhd.c b/block/genhd.c
+index 1c05dd4c6980b5..8645cf3b0816e4 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -581,13 +581,13 @@ static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
+ 	rcu_read_unlock();
+ }
+ 
+-static void __blk_mark_disk_dead(struct gendisk *disk)
++static bool __blk_mark_disk_dead(struct gendisk *disk)
+ {
+ 	/*
+ 	 * Fail any new I/O.
+ 	 */
+ 	if (test_and_set_bit(GD_DEAD, &disk->state))
+-		return;
++		return false;
+ 
+ 	if (test_bit(GD_OWNS_QUEUE, &disk->state))
+ 		blk_queue_flag_set(QUEUE_FLAG_DYING, disk->queue);
+@@ -600,7 +600,7 @@ static void __blk_mark_disk_dead(struct gendisk *disk)
+ 	/*
+ 	 * Prevent new I/O from crossing bio_queue_enter().
+ 	 */
+-	blk_queue_start_drain(disk->queue);
++	return blk_queue_start_drain(disk->queue);
+ }
+ 
+ /**
+@@ -641,6 +641,7 @@ void del_gendisk(struct gendisk *disk)
+ 	struct request_queue *q = disk->queue;
+ 	struct block_device *part;
+ 	unsigned long idx;
++	bool start_drain, queue_dying;
+ 
+ 	might_sleep();
+ 
+@@ -668,7 +669,10 @@ void del_gendisk(struct gendisk *disk)
+ 	 * Drop all partitions now that the disk is marked dead.
+ 	 */
+ 	mutex_lock(&disk->open_mutex);
+-	__blk_mark_disk_dead(disk);
++	start_drain = __blk_mark_disk_dead(disk);
++	queue_dying = blk_queue_dying(q);
++	if (start_drain)
++		blk_freeze_acquire_lock(q, true, queue_dying);
+ 	xa_for_each_start(&disk->part_tbl, idx, part, 1)
+ 		drop_partition(part);
+ 	mutex_unlock(&disk->open_mutex);
+@@ -718,13 +722,13 @@ void del_gendisk(struct gendisk *disk)
+ 	 * If the disk does not own the queue, allow using passthrough requests
+ 	 * again.  Else leave the queue frozen to fail all I/O.
+ 	 */
+-	if (!test_bit(GD_OWNS_QUEUE, &disk->state)) {
+-		blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
++	if (!test_bit(GD_OWNS_QUEUE, &disk->state))
+ 		__blk_mq_unfreeze_queue(q, true);
+-	} else {
+-		if (queue_is_mq(q))
+-			blk_mq_exit_queue(q);
+-	}
++	else if (queue_is_mq(q))
++		blk_mq_exit_queue(q);
++
++	if (start_drain)
++		blk_unfreeze_release_lock(q, true, queue_dying);
+ }
+ EXPORT_SYMBOL(del_gendisk);
+ 
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index d0d954fe9d54f3..7fc79e7dce44a9 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -117,8 +117,10 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
+ 	err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
+ 	if (!err)
+ 		return -EINPROGRESS;
+-	if (err == -EBUSY)
+-		return -EAGAIN;
++	if (err == -EBUSY) {
++		/* try non-parallel mode */
++		return crypto_aead_encrypt(creq);
++	}
+ 
+ 	return err;
+ }
+@@ -166,8 +168,10 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
+ 	err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
+ 	if (!err)
+ 		return -EINPROGRESS;
+-	if (err == -EBUSY)
+-		return -EAGAIN;
++	if (err == -EBUSY) {
++		/* try non-parallel mode */
++		return crypto_aead_decrypt(creq);
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
+index 78b32a8232419e..29b723039a3459 100644
+--- a/drivers/accel/ivpu/ivpu_ipc.c
++++ b/drivers/accel/ivpu/ivpu_ipc.c
+@@ -291,15 +291,16 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ 	return ret;
+ }
+ 
+-static int
++int
+ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ 			       enum vpu_ipc_msg_type expected_resp_type,
+-			       struct vpu_jsm_msg *resp, u32 channel,
+-			       unsigned long timeout_ms)
++			       struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms)
+ {
+ 	struct ivpu_ipc_consumer cons;
+ 	int ret;
+ 
++	drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
++
+ 	ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
+ 
+ 	ret = ivpu_ipc_send(vdev, &cons, req);
+@@ -325,19 +326,21 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
+ 	return ret;
+ }
+ 
+-int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+-				 enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+-				 u32 channel, unsigned long timeout_ms)
++int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
++			  enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
++			  u32 channel, unsigned long timeout_ms)
+ {
+ 	struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
+ 	struct vpu_jsm_msg hb_resp;
+ 	int ret, hb_ret;
+ 
+-	drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
++	ret = ivpu_rpm_get(vdev);
++	if (ret < 0)
++		return ret;
+ 
+ 	ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
+ 	if (ret != -ETIMEDOUT)
+-		return ret;
++		goto rpm_put;
+ 
+ 	hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
+ 						&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
+@@ -345,21 +348,7 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
+ 	if (hb_ret == -ETIMEDOUT)
+ 		ivpu_pm_trigger_recovery(vdev, "IPC timeout");
+ 
+-	return ret;
+-}
+-
+-int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+-			  enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+-			  u32 channel, unsigned long timeout_ms)
+-{
+-	int ret;
+-
+-	ret = ivpu_rpm_get(vdev);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
+-
++rpm_put:
+ 	ivpu_rpm_put(vdev);
+ 	return ret;
+ }
+diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
+index 4fe38141045ea3..fb4de7fb8210ea 100644
+--- a/drivers/accel/ivpu/ivpu_ipc.h
++++ b/drivers/accel/ivpu/ivpu_ipc.h
+@@ -101,10 +101,9 @@ int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ 		     struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
+ 		     unsigned long timeout_ms);
+-
+-int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+-				 enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+-				 u32 channel, unsigned long timeout_ms);
++int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
++				   enum vpu_ipc_msg_type expected_resp_type,
++				   struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms);
+ int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ 			  enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ 			  u32 channel, unsigned long timeout_ms);
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
+index 46ef16c3c06910..88105963c1b288 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -270,9 +270,8 @@ int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
+ 
+ 	req.payload.pwr_d0i3_enter.send_response = 1;
+ 
+-	ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
+-					   &resp, VPU_IPC_CHAN_GEN_CMD,
+-					   vdev->timeout.d0i3_entry_msg);
++	ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
++					     VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -430,8 +429,8 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
+ 
+ 	req.payload.hws_priority_band_setup.normal_band_percentage = 10;
+ 
+-	ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
+-					   &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
++	ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
++					     &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ 	if (ret)
+ 		ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
+ 
+@@ -544,9 +543,8 @@ int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us
+ 	req.payload.pwr_dct_control.dct_active_us = active_us;
+ 	req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
+ 
+-	return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE,
+-					    &resp, VPU_IPC_CHAN_ASYNC_CMD,
+-					    vdev->timeout.jsm);
++	return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
++					      VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ }
+ 
+ int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
+@@ -554,7 +552,6 @@ int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
+ 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
+ 	struct vpu_jsm_msg resp;
+ 
+-	return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE,
+-					    &resp, VPU_IPC_CHAN_ASYNC_CMD,
+-					    vdev->timeout.jsm);
++	return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
++					      VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ }
+diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
+index c0e77c1c8e09d6..eb6c2d3603874a 100644
+--- a/drivers/acpi/arm64/gtdt.c
++++ b/drivers/acpi/arm64/gtdt.c
+@@ -283,7 +283,7 @@ static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block,
+ 		if (frame->virt_irq > 0)
+ 			acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt);
+ 		frame->virt_irq = 0;
+-	} while (i-- >= 0 && gtdt_frame--);
++	} while (i-- > 0 && gtdt_frame--);
+ 
+ 	return -EINVAL;
+ }
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 5c0cc7aae8726b..e78e3754d99e1d 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -1140,7 +1140,6 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ 			return -EFAULT;
+ 		}
+ 		val = MASK_VAL_WRITE(reg, prev_val, val);
+-		val |= prev_val;
+ 	}
+ 
+ 	switch (size) {
+diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
+index 324a9a3c087aa2..c6664a78796979 100644
+--- a/drivers/base/firmware_loader/main.c
++++ b/drivers/base/firmware_loader/main.c
+@@ -829,19 +829,18 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st
+ 	shash->tfm = alg;
+ 
+ 	if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
+-		goto out_shash;
++		goto out_free;
+ 
+ 	for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
+ 		sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
+ 	outbuf[SHA256_BLOCK_SIZE] = 0;
+ 	dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
+ 
+-out_shash:
+-	crypto_free_shash(alg);
+ out_free:
+ 	kfree(shash);
+ 	kfree(outbuf);
+ 	kfree(sha256buf);
++	crypto_free_shash(alg);
+ }
+ #else
+ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index a750e48a26b87c..6981e5f974e9a4 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -514,12 +514,16 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
+ 		return IRQ_NONE;
+ }
+ 
++static struct lock_class_key regmap_irq_lock_class;
++static struct lock_class_key regmap_irq_request_class;
++
+ static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
+ 			  irq_hw_number_t hw)
+ {
+ 	struct regmap_irq_chip_data *data = h->host_data;
+ 
+ 	irq_set_chip_data(virq, data);
++	irq_set_lockdep_class(virq, &regmap_irq_lock_class, &regmap_irq_request_class);
+ 	irq_set_chip(virq, &data->irq_chip);
+ 	irq_set_nested_thread(virq, 1);
+ 	irq_set_parent(virq, data->irq);
+diff --git a/drivers/base/trace.h b/drivers/base/trace.h
+index e52b6eae060dde..3b83b13a57ff1e 100644
+--- a/drivers/base/trace.h
++++ b/drivers/base/trace.h
+@@ -24,18 +24,18 @@ DECLARE_EVENT_CLASS(devres,
+ 		__field(struct device *, dev)
+ 		__field(const char *, op)
+ 		__field(void *, node)
+-		__field(const char *, name)
++		__string(name, name)
+ 		__field(size_t, size)
+ 	),
+ 	TP_fast_assign(
+ 		__assign_str(devname);
+ 		__entry->op = op;
+ 		__entry->node = node;
+-		__entry->name = name;
++		__assign_str(name);
+ 		__entry->size = size;
+ 	),
+ 	TP_printk("%s %3s %p %s (%zu bytes)", __get_str(devname),
+-		  __entry->op, __entry->node, __entry->name, __entry->size)
++		  __entry->op, __entry->node, __get_str(name), __entry->size)
+ );
+ 
+ DEFINE_EVENT(devres, devres_log,
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 2fd1ed1017481b..292f127cae0abe 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -231,8 +231,10 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
+ 	xa_lock(&brd->brd_pages);
+ 	while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
+ 		page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
+-		if (page)
++		if (page) {
+ 			__free_page(page);
++			brd->brd_nr_pages--;
++		}
+ 		aligned_sector += PAGE_SECTORS;
+ 		size -= PAGE_SIZE;
+ 	}
+@@ -316,8 +318,40 @@ __setup("ramdisk_size=", ramdisk_size);
+  * (should share code eventually).
+  */
+ static LIST_HEAD(brd_devices);
++static DEFINE_MUTEX(brd_devices_mutex);
+ static struct dentry *brd_debugfs_dir;
+ 
++static struct brd_device *brd_find_or_alloc_device(int i)
++{
++	struct brd_device *brd;
++
++	mutex_lock(&brd_devices_mutex);
++	list_for_each_entry(brd, &brd_devices, brd_list) {
++		if (brd->brd_number == i) {
++			mutex_unlock(&brd_devices_mutex);
++			return ERR_PTR(-EEXIST);
++		}
++	}
++
++	brd = kzalloc(sizeof(*brd), GFP_KERNEL);
++	if (!brd) {
++		mutex_unlock(&brd_devices_mutex);
++		return ERR_PTR(-ENOMEM);
++	}
++	brd->brd_number	= i;
++	list_add_tail(&brd->brd_list, &brd_devices);
++	mutex_unlock(&brd_devices_mutex);
++	return brd;
++}
++
++static void brd_free_device(struct brd_device *brd)
++{
++	mutex_lock(&brd_devices_mutex);
++	list_del(&brd->brd_list);
++	mutex_unlock(&brd_devices_mutex);
++	kfree(brd);
++}
++
+ static int brd_alloc(int i)
+ {
+ 	struct brd_device *brd;
+@@ -340,14 +374,9 @@ static int brd_alloc(int i)
+ 					  BLK_FEAT_NOWAIT,
+ 	};
+ 
+-	list_for_each_entry(brd, &brd_devices, brd_list)
+-		if (brd->brd_number == i)
+-			return -EEXIST;
+-	brd = kzalloc(sizeof(*brd), GFP_KERNEL);
+-	if (!brd)
+-		return -ENOMEM;
+-	brd->brd_number		= i;
+-	list_add_tail(&brd->brd_list, &brd_devices);
++	brd = brd_find_or_alloc_device(i);
++	if (IS_ERR(brd))
++		return PTR_ERR(brd);
+ 
+ 	xa_init(&brd->brd_pages);
+ 
+@@ -378,8 +407,7 @@ static int brd_alloc(int i)
+ out_cleanup_disk:
+ 	put_disk(disk);
+ out_free_dev:
+-	list_del(&brd->brd_list);
+-	kfree(brd);
++	brd_free_device(brd);
+ 	return err;
+ }
+ 
+@@ -398,8 +426,7 @@ static void brd_cleanup(void)
+ 		del_gendisk(brd->brd_disk);
+ 		put_disk(brd->brd_disk);
+ 		brd_free_pages(brd);
+-		list_del(&brd->brd_list);
+-		kfree(brd);
++		brd_free_device(brd);
+ 	}
+ }
+ 
+@@ -426,16 +453,6 @@ static int __init brd_init(void)
+ {
+ 	int err, i;
+ 
+-	brd_check_and_reset_par();
+-
+-	brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
+-
+-	for (i = 0; i < rd_nr; i++) {
+-		err = brd_alloc(i);
+-		if (err)
+-			goto out_free;
+-	}
+-
+ 	/*
+ 	 * brd module now has a feature to instantiate underlying device
+ 	 * structure on-demand, provided that there is an access dev node.
+@@ -451,11 +468,18 @@ static int __init brd_init(void)
+ 	 *	dynamically.
+ 	 */
+ 
++	brd_check_and_reset_par();
++
++	brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
++
+ 	if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
+ 		err = -EIO;
+ 		goto out_free;
+ 	}
+ 
++	for (i = 0; i < rd_nr; i++)
++		brd_alloc(i);
++
+ 	pr_info("brd: module loaded\n");
+ 	return 0;
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 78a7bb28defe4c..86cc3b19faae86 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -173,7 +173,7 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
+ static bool lo_bdev_can_use_dio(struct loop_device *lo,
+ 		struct block_device *backing_bdev)
+ {
+-	unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
++	unsigned int sb_bsize = bdev_logical_block_size(backing_bdev);
+ 
+ 	if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
+ 		return false;
+@@ -977,7 +977,7 @@ loop_set_status_from_info(struct loop_device *lo,
+ 	return 0;
+ }
+ 
+-static unsigned short loop_default_blocksize(struct loop_device *lo,
++static unsigned int loop_default_blocksize(struct loop_device *lo,
+ 		struct block_device *backing_bdev)
+ {
+ 	/* In case of direct I/O, match underlying block size */
+@@ -986,7 +986,7 @@ static unsigned short loop_default_blocksize(struct loop_device *lo,
+ 	return SECTOR_SIZE;
+ }
+ 
+-static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
++static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
+ {
+ 	struct file *file = lo->lo_backing_file;
+ 	struct inode *inode = file->f_mapping->host;
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 6ba2c1dd1d878a..90bc605ff6c299 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -664,12 +664,21 @@ static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
+ 	return ublk_get_queue(ub, q_id)->io_cmd_buf;
+ }
+ 
++static inline int __ublk_queue_cmd_buf_size(int depth)
++{
++	return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
++}
++
+ static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+ {
+ 	struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ 
+-	return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
+-			PAGE_SIZE);
++	return __ublk_queue_cmd_buf_size(ubq->q_depth);
++}
++
++static int ublk_max_cmd_buf_size(void)
++{
++	return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
+ }
+ 
+ static inline bool ublk_queue_can_use_recovery_reissue(
+@@ -1322,7 +1331,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ 	struct ublk_device *ub = filp->private_data;
+ 	size_t sz = vma->vm_end - vma->vm_start;
+-	unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
++	unsigned max_sz = ublk_max_cmd_buf_size();
+ 	unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
+ 	int q_id, ret = 0;
+ 
+@@ -2965,7 +2974,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ 		ret = ublk_ctrl_end_recovery(ub, cmd);
+ 		break;
+ 	default:
+-		ret = -ENOTSUPP;
++		ret = -EOPNOTSUPP;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 194417abc1053c..43c96b73a7118f 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -471,18 +471,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
+ 	return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+ }
+ 
+-static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
++static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ 					struct request **rqlist)
+ {
++	struct request *req;
+ 	unsigned long flags;
+-	int err;
+ 	bool kick;
+ 
+ 	spin_lock_irqsave(&vq->lock, flags);
+ 
+-	while (!rq_list_empty(*rqlist)) {
+-		struct request *req = rq_list_pop(rqlist);
++	while ((req = rq_list_pop(rqlist))) {
+ 		struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
++		int err;
+ 
+ 		err = virtblk_add_req(vq->vq, vbr);
+ 		if (err) {
+@@ -495,37 +495,33 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ 	kick = virtqueue_kick_prepare(vq->vq);
+ 	spin_unlock_irqrestore(&vq->lock, flags);
+ 
+-	return kick;
++	if (kick)
++		virtqueue_notify(vq->vq);
+ }
+ 
+ static void virtio_queue_rqs(struct request **rqlist)
+ {
+-	struct request *req, *next, *prev = NULL;
++	struct request *submit_list = NULL;
+ 	struct request *requeue_list = NULL;
++	struct request **requeue_lastp = &requeue_list;
++	struct virtio_blk_vq *vq = NULL;
++	struct request *req;
+ 
+-	rq_list_for_each_safe(rqlist, req, next) {
+-		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
+-		bool kick;
+-
+-		if (!virtblk_prep_rq_batch(req)) {
+-			rq_list_move(rqlist, &requeue_list, req, prev);
+-			req = prev;
+-			if (!req)
+-				continue;
+-		}
++	while ((req = rq_list_pop(rqlist))) {
++		struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
+ 
+-		if (!next || req->mq_hctx != next->mq_hctx) {
+-			req->rq_next = NULL;
+-			kick = virtblk_add_req_batch(vq, rqlist);
+-			if (kick)
+-				virtqueue_notify(vq->vq);
++		if (vq && vq != this_vq)
++			virtblk_add_req_batch(vq, &submit_list);
++		vq = this_vq;
+ 
+-			*rqlist = next;
+-			prev = NULL;
+-		} else
+-			prev = req;
++		if (virtblk_prep_rq_batch(req))
++			rq_list_add(&submit_list, req); /* reverse order */
++		else
++			rq_list_add_tail(&requeue_lastp, req);
+ 	}
+ 
++	if (vq)
++		virtblk_add_req_batch(vq, &submit_list);
+ 	*rqlist = requeue_list;
+ }
+ 
+diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
+index 6aea609b795c2f..402b7b17586328 100644
+--- a/drivers/block/zram/Kconfig
++++ b/drivers/block/zram/Kconfig
+@@ -94,6 +94,7 @@ endchoice
+ 
+ config ZRAM_DEF_COMP
+ 	string
++	depends on ZRAM
+ 	default "lzo-rle" if ZRAM_DEF_COMP_LZORLE
+ 	default "lzo" if ZRAM_DEF_COMP_LZO
+ 	default "lz4" if ZRAM_DEF_COMP_LZ4
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index ad9c9bc3ccfc5b..e682797cdee783 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -626,6 +626,12 @@ static ssize_t writeback_store(struct device *dev,
+ 		goto release_init_lock;
+ 	}
+ 
++	/* Do not permit concurrent post-processing actions. */
++	if (atomic_xchg(&zram->pp_in_progress, 1)) {
++		up_read(&zram->init_lock);
++		return -EAGAIN;
++	}
++
+ 	if (!zram->backing_dev) {
+ 		ret = -ENODEV;
+ 		goto release_init_lock;
+@@ -752,6 +758,7 @@ static ssize_t writeback_store(struct device *dev,
+ 		free_block_bdev(zram, blk_idx);
+ 	__free_page(page);
+ release_init_lock:
++	atomic_set(&zram->pp_in_progress, 0);
+ 	up_read(&zram->init_lock);
+ 
+ 	return ret;
+@@ -1881,6 +1888,12 @@ static ssize_t recompress_store(struct device *dev,
+ 		goto release_init_lock;
+ 	}
+ 
++	/* Do not permit concurrent post-processing actions. */
++	if (atomic_xchg(&zram->pp_in_progress, 1)) {
++		up_read(&zram->init_lock);
++		return -EAGAIN;
++	}
++
+ 	if (algo) {
+ 		bool found = false;
+ 
+@@ -1948,6 +1961,7 @@ static ssize_t recompress_store(struct device *dev,
+ 	__free_page(page);
+ 
+ release_init_lock:
++	atomic_set(&zram->pp_in_progress, 0);
+ 	up_read(&zram->init_lock);
+ 	return ret;
+ }
+@@ -2144,6 +2158,7 @@ static void zram_reset_device(struct zram *zram)
+ 	zram->disksize = 0;
+ 	zram_destroy_comps(zram);
+ 	memset(&zram->stats, 0, sizeof(zram->stats));
++	atomic_set(&zram->pp_in_progress, 0);
+ 	reset_bdev(zram);
+ 
+ 	comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
+@@ -2381,6 +2396,9 @@ static int zram_add(void)
+ 	zram->disk->fops = &zram_devops;
+ 	zram->disk->private_data = zram;
+ 	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
++	atomic_set(&zram->pp_in_progress, 0);
++	zram_comp_params_reset(zram);
++	comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
+ 
+ 	/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
+ 	set_capacity(zram->disk, 0);
+@@ -2388,9 +2406,6 @@ static int zram_add(void)
+ 	if (ret)
+ 		goto out_cleanup_disk;
+ 
+-	zram_comp_params_reset(zram);
+-	comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
+-
+ 	zram_debugfs_register(zram);
+ 	pr_info("Added device: %s\n", zram->disk->disk_name);
+ 	return device_id;
+diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
+index cfc8c059db6369..8acf9d2ee42b87 100644
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -139,5 +139,6 @@ struct zram {
+ #ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ 	struct dentry *debugfs_dir;
+ #endif
++	atomic_t pp_in_progress;
+ };
+ #endif
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index eef00467905eb3..a1153ada74d206 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -541,11 +541,10 @@ static const struct bcm_subver_table bcm_usb_subver_table[] = {
+ static const char *btbcm_get_board_name(struct device *dev)
+ {
+ #ifdef CONFIG_OF
+-	struct device_node *root;
++	struct device_node *root __free(device_node) = of_find_node_by_path("/");
+ 	char *board_type;
+ 	const char *tmp;
+ 
+-	root = of_find_node_by_path("/");
+ 	if (!root)
+ 		return NULL;
+ 
+@@ -555,7 +554,6 @@ static const char *btbcm_get_board_name(struct device *dev)
+ 	/* get rid of any '/' in the compatible string */
+ 	board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ 	strreplace(board_type, '/', '-');
+-	of_node_put(root);
+ 
+ 	return board_type;
+ #else
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index 30a32ebbcc681b..645047fb92fd26 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -1841,6 +1841,37 @@ static int btintel_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec)
+ 	return 0;
+ }
+ 
++static int btintel_boot_wait_d0(struct hci_dev *hdev, ktime_t calltime,
++				int msec)
++{
++	ktime_t delta, rettime;
++	unsigned long long duration;
++	int err;
++
++	bt_dev_info(hdev, "Waiting for device transition to d0");
++
++	err = btintel_wait_on_flag_timeout(hdev, INTEL_WAIT_FOR_D0,
++					   TASK_INTERRUPTIBLE,
++					   msecs_to_jiffies(msec));
++	if (err == -EINTR) {
++		bt_dev_err(hdev, "Device d0 move interrupted");
++		return -EINTR;
++	}
++
++	if (err) {
++		bt_dev_err(hdev, "Device d0 move timeout");
++		return -ETIMEDOUT;
++	}
++
++	rettime = ktime_get();
++	delta = ktime_sub(rettime, calltime);
++	duration = (unsigned long long)ktime_to_ns(delta) >> 10;
++
++	bt_dev_info(hdev, "Device moved to D0 in %llu usecs", duration);
++
++	return 0;
++}
++
+ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
+ {
+ 	ktime_t calltime;
+@@ -1849,6 +1880,7 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
+ 	calltime = ktime_get();
+ 
+ 	btintel_set_flag(hdev, INTEL_BOOTING);
++	btintel_set_flag(hdev, INTEL_WAIT_FOR_D0);
+ 
+ 	err = btintel_send_intel_reset(hdev, boot_addr);
+ 	if (err) {
+@@ -1861,13 +1893,28 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
+ 	 * is done by the operational firmware sending bootup notification.
+ 	 *
+ 	 * Booting into operational firmware should not take longer than
+-	 * 1 second. However if that happens, then just fail the setup
++	 * 5 second. However if that happens, then just fail the setup
+ 	 * since something went wrong.
+ 	 */
+-	err = btintel_boot_wait(hdev, calltime, 1000);
+-	if (err == -ETIMEDOUT)
++	err = btintel_boot_wait(hdev, calltime, 5000);
++	if (err == -ETIMEDOUT) {
+ 		btintel_reset_to_bootloader(hdev);
++		goto exit_error;
++	}
+ 
++	if (hdev->bus == HCI_PCI) {
++		/* In case of PCIe, after receiving bootup event, driver performs
++		 * D0 entry by writing 0 to sleep control register (check
++		 * btintel_pcie_recv_event())
++		 * Firmware acks with alive interrupt indicating host is full ready to
++		 * perform BT operation. Lets wait here till INTEL_WAIT_FOR_D0
++		 * bit is cleared.
++		 */
++		calltime = ktime_get();
++		err = btintel_boot_wait_d0(hdev, calltime, 2000);
++	}
++
++exit_error:
+ 	return err;
+ }
+ 
+@@ -3273,7 +3320,7 @@ int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
+ }
+ EXPORT_SYMBOL_GPL(btintel_configure_setup);
+ 
+-static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
++int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+ 	struct intel_tlv *tlv = (void *)&skb->data[5];
+ 
+@@ -3301,6 +3348,7 @@ static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+ recv_frame:
+ 	return hci_recv_frame(hdev, skb);
+ }
++EXPORT_SYMBOL_GPL(btintel_diagnostics);
+ 
+ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+@@ -3320,7 +3368,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+ 				 * indicating that the bootup completed.
+ 				 */
+ 				btintel_bootup(hdev, ptr, len);
+-				break;
++				kfree_skb(skb);
++				return 0;
+ 			case 0x06:
+ 				/* When the firmware loading completes the
+ 				 * device sends out a vendor specific event
+@@ -3328,7 +3377,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+ 				 * loading.
+ 				 */
+ 				btintel_secure_send_result(hdev, ptr, len);
+-				break;
++				kfree_skb(skb);
++				return 0;
+ 			}
+ 		}
+ 
+diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
+index aa70e4c2741653..b448c67e8ed94d 100644
+--- a/drivers/bluetooth/btintel.h
++++ b/drivers/bluetooth/btintel.h
+@@ -178,6 +178,7 @@ enum {
+ 	INTEL_ROM_LEGACY,
+ 	INTEL_ROM_LEGACY_NO_WBS_SUPPORT,
+ 	INTEL_ACPI_RESET_ACTIVE,
++	INTEL_WAIT_FOR_D0,
+ 
+ 	__INTEL_NUM_FLAGS,
+ };
+@@ -249,6 +250,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
+ int btintel_shutdown_combined(struct hci_dev *hdev);
+ void btintel_hw_error(struct hci_dev *hdev, u8 code);
+ void btintel_print_fseq_info(struct hci_dev *hdev);
++int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb);
+ #else
+ 
+ static inline int btintel_check_bdaddr(struct hci_dev *hdev)
+@@ -382,4 +384,9 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
+ static inline void btintel_print_fseq_info(struct hci_dev *hdev)
+ {
+ }
++
++static inline int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
++{
++	return -EOPNOTSUPP;
++}
+ #endif
+diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
+index 5252125b003f58..8bd663f4bac1b7 100644
+--- a/drivers/bluetooth/btintel_pcie.c
++++ b/drivers/bluetooth/btintel_pcie.c
+@@ -48,6 +48,17 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
+ #define BTINTEL_PCIE_HCI_EVT_PKT	0x00000004
+ #define BTINTEL_PCIE_HCI_ISO_PKT	0x00000005
+ 
++/* Alive interrupt context */
++enum {
++	BTINTEL_PCIE_ROM,
++	BTINTEL_PCIE_FW_DL,
++	BTINTEL_PCIE_HCI_RESET,
++	BTINTEL_PCIE_INTEL_HCI_RESET1,
++	BTINTEL_PCIE_INTEL_HCI_RESET2,
++	BTINTEL_PCIE_D0,
++	BTINTEL_PCIE_D3
++};
++
+ static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
+ 				     u16 queue_num)
+ {
+@@ -290,8 +301,9 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
+ 	/* wait for interrupt from the device after booting up to primary
+ 	 * bootloader.
+ 	 */
++	data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
+ 	err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+-				 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT));
++				 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ 	if (!err)
+ 		return -ETIME;
+ 
+@@ -302,12 +314,78 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
+ 	return 0;
+ }
+ 
++/* BIT(0) - ROM, BIT(1) - IML and BIT(3) - OP
++ * Sometimes during firmware image switching from ROM to IML or IML to OP image,
++ * the previous image bit is not cleared by firmware when alive interrupt is
++ * received. Driver needs to take care of these sticky bits when deciding the
++ * current image running on controller.
++ * Ex: 0x10 and 0x11 - both represents that controller is running IML
++ */
++static inline bool btintel_pcie_in_rom(struct btintel_pcie_data *data)
++{
++	return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM &&
++		!(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) &&
++		!(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
++}
++
++static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
++{
++	return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
++}
++
++static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
++{
++	return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
++		!(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
++}
++
++static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
++{
++	return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
++}
++
++static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
++{
++	return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
++}
++
++static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
++					u32 dxstate)
++{
++	bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
++	btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
++}
++
++static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
++{
++	switch (alive_intr_ctxt) {
++	case BTINTEL_PCIE_ROM:
++		return "rom";
++	case BTINTEL_PCIE_FW_DL:
++		return "fw_dl";
++	case BTINTEL_PCIE_D0:
++		return "d0";
++	case BTINTEL_PCIE_D3:
++		return "d3";
++	case BTINTEL_PCIE_HCI_RESET:
++		return "hci_reset";
++	case BTINTEL_PCIE_INTEL_HCI_RESET1:
++		return "intel_reset1";
++	case BTINTEL_PCIE_INTEL_HCI_RESET2:
++		return "intel_reset2";
++	default:
++		return "unknown";
++	}
++	return "null";
++}
++
+ /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
+  * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
+  */
+ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
+ {
+-	u32 reg;
++	bool submit_rx, signal_waitq;
++	u32 reg, old_ctxt;
+ 
+ 	/* This interrupt is for three different causes and it is not easy to
+ 	 * know what causes the interrupt. So, it compares each register value
+@@ -317,20 +395,87 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
+ 	if (reg != data->boot_stage_cache)
+ 		data->boot_stage_cache = reg;
+ 
++	bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
++		   btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
++		   data->boot_stage_cache, reg);
+ 	reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
+ 	if (reg != data->img_resp_cache)
+ 		data->img_resp_cache = reg;
+ 
+ 	data->gp0_received = true;
+ 
+-	/* If the boot stage is OP or IML, reset IA and start RX again */
+-	if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW ||
+-	    data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) {
++	old_ctxt = data->alive_intr_ctxt;
++	submit_rx = false;
++	signal_waitq = false;
++
++	switch (data->alive_intr_ctxt) {
++	case BTINTEL_PCIE_ROM:
++		data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
++		signal_waitq = true;
++		break;
++	case BTINTEL_PCIE_FW_DL:
++		/* Error case is already handled. Ideally control shall not
++		 * reach here
++		 */
++		break;
++	case BTINTEL_PCIE_INTEL_HCI_RESET1:
++		if (btintel_pcie_in_op(data)) {
++			submit_rx = true;
++			break;
++		}
++
++		if (btintel_pcie_in_iml(data)) {
++			submit_rx = true;
++			data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
++			break;
++		}
++		break;
++	case BTINTEL_PCIE_INTEL_HCI_RESET2:
++		if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
++			btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
++			data->alive_intr_ctxt = BTINTEL_PCIE_D0;
++		}
++		break;
++	case BTINTEL_PCIE_D0:
++		if (btintel_pcie_in_d3(data)) {
++			data->alive_intr_ctxt = BTINTEL_PCIE_D3;
++			signal_waitq = true;
++			break;
++		}
++		break;
++	case BTINTEL_PCIE_D3:
++		if (btintel_pcie_in_d0(data)) {
++			data->alive_intr_ctxt = BTINTEL_PCIE_D0;
++			submit_rx = true;
++			signal_waitq = true;
++			break;
++		}
++		break;
++	case BTINTEL_PCIE_HCI_RESET:
++		data->alive_intr_ctxt = BTINTEL_PCIE_D0;
++		submit_rx = true;
++		signal_waitq = true;
++		break;
++	default:
++		bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
++			   data->alive_intr_ctxt);
++		break;
++	}
++
++	if (submit_rx) {
+ 		btintel_pcie_reset_ia(data);
+ 		btintel_pcie_start_rx(data);
+ 	}
+ 
+-	wake_up(&data->gp0_wait_q);
++	if (signal_waitq) {
++		bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
++		wake_up(&data->gp0_wait_q);
++	}
++
++	if (old_ctxt != data->alive_intr_ctxt)
++		bt_dev_dbg(data->hdev, "alive context changed: %s  ->  %s",
++			   btintel_pcie_alivectxt_state2str(old_ctxt),
++			   btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ }
+ 
+ /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
+@@ -364,6 +509,83 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
+ 	}
+ }
+ 
++static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
++{
++	struct hci_event_hdr *hdr = (void *)skb->data;
++	const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
++	struct btintel_pcie_data *data = hci_get_drvdata(hdev);
++
++	if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
++	    hdr->plen > 0) {
++		const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
++		unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
++
++		if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
++			switch (skb->data[2]) {
++			case 0x02:
++				/* When switching to the operational firmware
++				 * the device sends a vendor specific event
++				 * indicating that the bootup completed.
++				 */
++				btintel_bootup(hdev, ptr, len);
++
++				/* If bootup event is from operational image,
++				 * driver needs to write sleep control register to
++				 * move into D0 state
++				 */
++				if (btintel_pcie_in_op(data)) {
++					btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
++					data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
++					kfree_skb(skb);
++					return 0;
++				}
++
++				if (btintel_pcie_in_iml(data)) {
++					/* In case of IML, there is no concept
++					 * of D0 transition. Just mimic as if
++					 * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
++					 * bit and waking up the task waiting on
++					 * INTEL_WAIT_FOR_D0. This is required
++					 * as intel_boot() is common function for
++					 * both IML and OP image loading.
++					 */
++					if (btintel_test_and_clear_flag(data->hdev,
++									INTEL_WAIT_FOR_D0))
++						btintel_wake_up_flag(data->hdev,
++								     INTEL_WAIT_FOR_D0);
++				}
++				kfree_skb(skb);
++				return 0;
++			case 0x06:
++				/* When the firmware loading completes the
++				 * device sends out a vendor specific event
++				 * indicating the result of the firmware
++				 * loading.
++				 */
++				btintel_secure_send_result(hdev, ptr, len);
++				kfree_skb(skb);
++				return 0;
++			}
++		}
++
++		/* Handle all diagnostics events separately. May still call
++		 * hci_recv_frame.
++		 */
++		if (len >= sizeof(diagnostics_hdr) &&
++		    memcmp(&skb->data[2], diagnostics_hdr,
++			   sizeof(diagnostics_hdr)) == 0) {
++			return btintel_diagnostics(hdev, skb);
++		}
++
++		/* This is a debug event that comes from IML and OP image when it
++		 * starts execution. There is no need pass this event to stack.
++		 */
++		if (skb->data[2] == 0x97)
++			return 0;
++	}
++
++	return hci_recv_frame(hdev, skb);
++}
+ /* Process the received rx data
+  * It check the frame header to identify the data type and create skb
+  * and calling HCI API
+@@ -465,7 +687,7 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
+ 	hdev->stat.byte_rx += plen;
+ 
+ 	if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
+-		ret = btintel_recv_event(hdev, new_skb);
++		ret = btintel_pcie_recv_event(hdev, new_skb);
+ 	else
+ 		ret = hci_recv_frame(hdev, new_skb);
+ 
+@@ -1053,8 +1275,11 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
+ 				       struct sk_buff *skb)
+ {
+ 	struct btintel_pcie_data *data = hci_get_drvdata(hdev);
++	struct hci_command_hdr *cmd;
++	__u16 opcode = ~0;
+ 	int ret;
+ 	u32 type;
++	u32 old_ctxt;
+ 
+ 	/* Due to the fw limitation, the type header of the packet should be
+ 	 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
+@@ -1073,6 +1298,8 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
+ 	switch (hci_skb_pkt_type(skb)) {
+ 	case HCI_COMMAND_PKT:
+ 		type = BTINTEL_PCIE_HCI_CMD_PKT;
++		cmd = (void *)skb->data;
++		opcode = le16_to_cpu(cmd->opcode);
+ 		if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
+ 			struct hci_command_hdr *cmd = (void *)skb->data;
+ 			__u16 opcode = le16_to_cpu(cmd->opcode);
+@@ -1111,6 +1338,30 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
+ 		bt_dev_err(hdev, "Failed to send frame (%d)", ret);
+ 		goto exit_error;
+ 	}
++
++	if (type == BTINTEL_PCIE_HCI_CMD_PKT &&
++	    (opcode == HCI_OP_RESET || opcode == 0xfc01)) {
++		old_ctxt = data->alive_intr_ctxt;
++		data->alive_intr_ctxt =
++			(opcode == 0xfc01 ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
++				BTINTEL_PCIE_HCI_RESET);
++		bt_dev_dbg(data->hdev, "sent cmd: 0x%4.4x alive context changed: %s  ->  %s",
++			   opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
++			   btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
++		if (opcode == HCI_OP_RESET) {
++			data->gp0_received = false;
++			ret = wait_event_timeout(data->gp0_wait_q,
++						 data->gp0_received,
++						 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
++			if (!ret) {
++				hdev->stat.err_tx++;
++				bt_dev_err(hdev, "No alive interrupt received for %s",
++					   btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
++				ret = -ETIME;
++				goto exit_error;
++			}
++		}
++	}
+ 	hdev->stat.byte_tx += skb->len;
+ 	kfree_skb(skb);
+ 
+diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
+index baaff70420f575..8b7824ad005a2a 100644
+--- a/drivers/bluetooth/btintel_pcie.h
++++ b/drivers/bluetooth/btintel_pcie.h
+@@ -12,6 +12,7 @@
+ #define BTINTEL_PCIE_CSR_HW_REV_REG		(BTINTEL_PCIE_CSR_BASE + 0x028)
+ #define BTINTEL_PCIE_CSR_RF_ID_REG		(BTINTEL_PCIE_CSR_BASE + 0x09C)
+ #define BTINTEL_PCIE_CSR_BOOT_STAGE_REG		(BTINTEL_PCIE_CSR_BASE + 0x108)
++#define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG	(BTINTEL_PCIE_CSR_BASE + 0x114)
+ #define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG	(BTINTEL_PCIE_CSR_BASE + 0x118)
+ #define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG	(BTINTEL_PCIE_CSR_BASE + 0x11C)
+ #define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG	(BTINTEL_PCIE_CSR_BASE + 0x12C)
+@@ -32,6 +33,7 @@
+ #define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN	(BIT(11))
+ #define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON	(BIT(16))
+ #define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE		(BIT(23))
++#define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY	(BIT(24))
+ 
+ /* Registers for MSI-X */
+ #define BTINTEL_PCIE_CSR_MSIX_BASE		(0x2000)
+@@ -55,6 +57,16 @@ enum msix_hw_int_causes {
+ 	BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0	= BIT(0),	/* cause 32 */
+ };
+ 
++/* PCIe device states
++ * Host-Device interface is active
++ * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
++ * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
++ */
++enum {
++	BTINTEL_PCIE_STATE_D0 = 0,
++	BTINTEL_PCIE_STATE_D3_HOT = 2,
++	BTINTEL_PCIE_STATE_D3_COLD = 3,
++};
+ #define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE	BIT(7)
+ 
+ /* Minimum and Maximum number of MSI-X Vector
+@@ -67,7 +79,7 @@ enum msix_hw_int_causes {
+ #define BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US	200000
+ 
+ /* Default interrupt timeout in msec */
+-#define BTINTEL_DEFAULT_INTR_TIMEOUT	3000
++#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS	3000
+ 
+ /* The number of descriptors in TX/RX queues */
+ #define BTINTEL_DESCS_COUNT	16
+@@ -343,6 +355,7 @@ struct rxq {
+  * @ia: Index Array struct
+  * @txq: TX Queue struct
+  * @rxq: RX Queue struct
++ * @alive_intr_ctxt: Alive interrupt context
+  */
+ struct btintel_pcie_data {
+ 	struct pci_dev	*pdev;
+@@ -389,6 +402,7 @@ struct btintel_pcie_data {
+ 	struct ia	ia;
+ 	struct txq	txq;
+ 	struct rxq	rxq;
++	u32	alive_intr_ctxt;
+ };
+ 
+ static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index 9bbf205021634f..480e4adba9faa6 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -1215,7 +1215,6 @@ static int btmtk_usb_isointf_init(struct hci_dev *hdev)
+ 	struct sk_buff *skb;
+ 	int err;
+ 
+-	init_usb_anchor(&btmtk_data->isopkt_anchor);
+ 	spin_lock_init(&btmtk_data->isorxlock);
+ 
+ 	__set_mtk_intr_interface(hdev);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index e9534fbc92e32f..4ccaddb46ddd81 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2616,6 +2616,7 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
+ 	}
+ 
+ 	set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
++	init_usb_anchor(&btmtk_data->isopkt_anchor);
+ }
+ 
+ static void btusb_mtk_release_iso_intf(struct btusb_data *data)
+diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h
+index 95613c8ebe0691..3e0c41777429eb 100644
+--- a/drivers/bus/mhi/host/trace.h
++++ b/drivers/bus/mhi/host/trace.h
+@@ -9,6 +9,7 @@
+ #if !defined(_TRACE_EVENT_MHI_HOST_H) || defined(TRACE_HEADER_MULTI_READ)
+ #define _TRACE_EVENT_MHI_HOST_H
+ 
++#include <linux/byteorder/generic.h>
+ #include <linux/tracepoint.h>
+ #include <linux/trace_seq.h>
+ #include "../common.h"
+@@ -97,18 +98,18 @@ TRACE_EVENT(mhi_gen_tre,
+ 		__string(name, mhi_cntrl->mhi_dev->name)
+ 		__field(int, ch_num)
+ 		__field(void *, wp)
+-		__field(__le64, tre_ptr)
+-		__field(__le32, dword0)
+-		__field(__le32, dword1)
++		__field(uint64_t, tre_ptr)
++		__field(uint32_t, dword0)
++		__field(uint32_t, dword1)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__assign_str(name);
+ 		__entry->ch_num = mhi_chan->chan;
+ 		__entry->wp = mhi_tre;
+-		__entry->tre_ptr = mhi_tre->ptr;
+-		__entry->dword0 = mhi_tre->dword[0];
+-		__entry->dword1 = mhi_tre->dword[1];
++		__entry->tre_ptr = le64_to_cpu(mhi_tre->ptr);
++		__entry->dword0 = le32_to_cpu(mhi_tre->dword[0]);
++		__entry->dword1 = le32_to_cpu(mhi_tre->dword[1]);
+ 	),
+ 
+ 	TP_printk("%s: Chan: %d TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x\n",
+@@ -176,19 +177,19 @@ DECLARE_EVENT_CLASS(mhi_process_event_ring,
+ 
+ 	TP_STRUCT__entry(
+ 		__string(name, mhi_cntrl->mhi_dev->name)
+-		__field(__le32, dword0)
+-		__field(__le32, dword1)
++		__field(uint32_t, dword0)
++		__field(uint32_t, dword1)
+ 		__field(int, state)
+-		__field(__le64, ptr)
++		__field(uint64_t, ptr)
+ 		__field(void *, rp)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__assign_str(name);
+ 		__entry->rp = rp;
+-		__entry->ptr = rp->ptr;
+-		__entry->dword0 = rp->dword[0];
+-		__entry->dword1 = rp->dword[1];
++		__entry->ptr = le64_to_cpu(rp->ptr);
++		__entry->dword0 = le32_to_cpu(rp->dword[0]);
++		__entry->dword1 = le32_to_cpu(rp->dword[1]);
+ 		__entry->state = MHI_TRE_GET_EV_STATE(rp);
+ 	),
+ 
+diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig
+index 54ece920705525..08e26137f3d9c9 100644
+--- a/drivers/clk/.kunitconfig
++++ b/drivers/clk/.kunitconfig
+@@ -1,5 +1,6 @@
+ CONFIG_KUNIT=y
+ CONFIG_OF=y
++CONFIG_OF_OVERLAY=y
+ CONFIG_COMMON_CLK=y
+ CONFIG_CLK_KUNIT_TEST=y
+ CONFIG_CLK_FIXED_RATE_KUNIT_TEST=y
+diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
+index 299bc678ed1b9f..0fe07a594b4e1b 100644
+--- a/drivers/clk/Kconfig
++++ b/drivers/clk/Kconfig
+@@ -517,7 +517,6 @@ config CLK_KUNIT_TEST
+ 	tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS
+ 	depends on KUNIT
+ 	default KUNIT_ALL_TESTS
+-	select OF_OVERLAY if OF
+ 	select DTC
+ 	help
+ 	  Kunit tests for the common clock framework.
+@@ -526,7 +525,6 @@ config CLK_FIXED_RATE_KUNIT_TEST
+ 	tristate "Basic fixed rate clk type KUnit test" if !KUNIT_ALL_TESTS
+ 	depends on KUNIT
+ 	default KUNIT_ALL_TESTS
+-	select OF_OVERLAY if OF
+ 	select DTC
+ 	help
+ 	  KUnit tests for the basic fixed rate clk type.
+diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c
+index 39472a51530a34..457a48d4894128 100644
+--- a/drivers/clk/clk-apple-nco.c
++++ b/drivers/clk/clk-apple-nco.c
+@@ -297,6 +297,9 @@ static int applnco_probe(struct platform_device *pdev)
+ 		memset(&init, 0, sizeof(init));
+ 		init.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ 						"%s-%d", np->name, i);
++		if (!init.name)
++			return -ENOMEM;
++
+ 		init.ops = &applnco_ops;
+ 		init.parent_data = &pdata;
+ 		init.num_parents = 1;
+diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
+index bf4d8ddc93aea1..934e53a96dddac 100644
+--- a/drivers/clk/clk-axi-clkgen.c
++++ b/drivers/clk/clk-axi-clkgen.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/platform_device.h>
++#include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/slab.h>
+ #include <linux/io.h>
+@@ -512,6 +513,7 @@ static int axi_clkgen_probe(struct platform_device *pdev)
+ 	struct clk_init_data init;
+ 	const char *parent_names[2];
+ 	const char *clk_name;
++	struct clk *axi_clk;
+ 	unsigned int i;
+ 	int ret;
+ 
+@@ -528,8 +530,24 @@ static int axi_clkgen_probe(struct platform_device *pdev)
+ 		return PTR_ERR(axi_clkgen->base);
+ 
+ 	init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
+-	if (init.num_parents < 1 || init.num_parents > 2)
+-		return -EINVAL;
++
++	axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
++	if (!IS_ERR(axi_clk)) {
++		if (init.num_parents < 2 || init.num_parents > 3)
++			return -EINVAL;
++
++		init.num_parents -= 1;
++	} else {
++		/*
++		 * Legacy... So that old DTs which do not have clock-names still
++		 * work. In this case we don't explicitly enable the AXI bus
++		 * clock.
++		 */
++		if (PTR_ERR(axi_clk) != -ENOENT)
++			return PTR_ERR(axi_clk);
++		if (init.num_parents < 1 || init.num_parents > 2)
++			return -EINVAL;
++	}
+ 
+ 	for (i = 0; i < init.num_parents; i++) {
+ 		parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);
+diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
+index 22fbea61c3dcc0..fdd8ea989ed24a 100644
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -3,8 +3,10 @@
+ #include <linux/delay.h>
+ #include <linux/clk-provider.h>
+ #include <linux/io.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/platform_device.h>
+ #include <linux/property.h>
++#include <linux/regmap.h>
+ #include <linux/reset-controller.h>
+ #include <dt-bindings/clock/en7523-clk.h>
+ #include <dt-bindings/reset/airoha,en7581-reset.h>
+@@ -31,16 +33,11 @@
+ #define   REG_RESET_CONTROL_PCIE1	BIT(27)
+ #define   REG_RESET_CONTROL_PCIE2	BIT(26)
+ /* EN7581 */
+-#define REG_PCIE0_MEM			0x00
+-#define REG_PCIE0_MEM_MASK		0x04
+-#define REG_PCIE1_MEM			0x08
+-#define REG_PCIE1_MEM_MASK		0x0c
+-#define REG_PCIE2_MEM			0x10
+-#define REG_PCIE2_MEM_MASK		0x14
+ #define REG_NP_SCU_PCIC			0x88
+ #define REG_NP_SCU_SSTR			0x9c
+ #define REG_PCIE_XSI0_SEL_MASK		GENMASK(14, 13)
+ #define REG_PCIE_XSI1_SEL_MASK		GENMASK(12, 11)
++#define REG_CRYPTO_CLKSRC2		0x20c
+ 
+ #define REG_RST_CTRL2			0x00
+ #define REG_RST_CTRL1			0x04
+@@ -84,7 +81,8 @@ struct en_clk_soc_data {
+ 		const u16 *idx_map;
+ 		u16 idx_map_nr;
+ 	} reset;
+-	int (*hw_init)(struct platform_device *pdev, void __iomem *np_base);
++	int (*hw_init)(struct platform_device *pdev,
++		       struct clk_hw_onecell_data *clk_data);
+ };
+ 
+ static const u32 gsw_base[] = { 400000000, 500000000 };
+@@ -92,6 +90,10 @@ static const u32 emi_base[] = { 333000000, 400000000 };
+ static const u32 bus_base[] = { 500000000, 540000000 };
+ static const u32 slic_base[] = { 100000000, 3125000 };
+ static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
++/* EN7581 */
++static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
++static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
++static const u32 crypto_base[] = { 540000000, 480000000 };
+ 
+ static const struct en_clk_desc en7523_base_clks[] = {
+ 	{
+@@ -189,6 +191,102 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ 	}
+ };
+ 
++static const struct en_clk_desc en7581_base_clks[] = {
++	{
++		.id = EN7523_CLK_GSW,
++		.name = "gsw",
++
++		.base_reg = REG_GSW_CLK_DIV_SEL,
++		.base_bits = 1,
++		.base_shift = 8,
++		.base_values = gsw_base,
++		.n_base_values = ARRAY_SIZE(gsw_base),
++
++		.div_bits = 3,
++		.div_shift = 0,
++		.div_step = 1,
++		.div_offset = 1,
++	}, {
++		.id = EN7523_CLK_EMI,
++		.name = "emi",
++
++		.base_reg = REG_EMI_CLK_DIV_SEL,
++		.base_bits = 2,
++		.base_shift = 8,
++		.base_values = emi7581_base,
++		.n_base_values = ARRAY_SIZE(emi7581_base),
++
++		.div_bits = 3,
++		.div_shift = 0,
++		.div_step = 1,
++		.div_offset = 1,
++	}, {
++		.id = EN7523_CLK_BUS,
++		.name = "bus",
++
++		.base_reg = REG_BUS_CLK_DIV_SEL,
++		.base_bits = 1,
++		.base_shift = 8,
++		.base_values = bus_base,
++		.n_base_values = ARRAY_SIZE(bus_base),
++
++		.div_bits = 3,
++		.div_shift = 0,
++		.div_step = 1,
++		.div_offset = 1,
++	}, {
++		.id = EN7523_CLK_SLIC,
++		.name = "slic",
++
++		.base_reg = REG_SPI_CLK_FREQ_SEL,
++		.base_bits = 1,
++		.base_shift = 0,
++		.base_values = slic_base,
++		.n_base_values = ARRAY_SIZE(slic_base),
++
++		.div_reg = REG_SPI_CLK_DIV_SEL,
++		.div_bits = 5,
++		.div_shift = 24,
++		.div_val0 = 20,
++		.div_step = 2,
++	}, {
++		.id = EN7523_CLK_SPI,
++		.name = "spi",
++
++		.base_reg = REG_SPI_CLK_DIV_SEL,
++
++		.base_value = 400000000,
++
++		.div_bits = 5,
++		.div_shift = 8,
++		.div_val0 = 40,
++		.div_step = 2,
++	}, {
++		.id = EN7523_CLK_NPU,
++		.name = "npu",
++
++		.base_reg = REG_NPU_CLK_DIV_SEL,
++		.base_bits = 2,
++		.base_shift = 8,
++		.base_values = npu7581_base,
++		.n_base_values = ARRAY_SIZE(npu7581_base),
++
++		.div_bits = 3,
++		.div_shift = 0,
++		.div_step = 1,
++		.div_offset = 1,
++	}, {
++		.id = EN7523_CLK_CRYPTO,
++		.name = "crypto",
++
++		.base_reg = REG_CRYPTO_CLKSRC2,
++		.base_bits = 1,
++		.base_shift = 0,
++		.base_values = crypto_base,
++		.n_base_values = ARRAY_SIZE(crypto_base),
++	}
++};
++
+ static const u16 en7581_rst_ofs[] = {
+ 	REG_RST_CTRL2,
+ 	REG_RST_CTRL1,
+@@ -252,15 +350,11 @@ static const u16 en7581_rst_map[] = {
+ 	[EN7581_XPON_MAC_RST]		= RST_NR_PER_BANK + 31,
+ };
+ 
+-static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
++static u32 en7523_get_base_rate(const struct en_clk_desc *desc, u32 val)
+ {
+-	const struct en_clk_desc *desc = &en7523_base_clks[i];
+-	u32 val;
+-
+ 	if (!desc->base_bits)
+ 		return desc->base_value;
+ 
+-	val = readl(base + desc->base_reg);
+ 	val >>= desc->base_shift;
+ 	val &= (1 << desc->base_bits) - 1;
+ 
+@@ -270,16 +364,11 @@ static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
+ 	return desc->base_values[val];
+ }
+ 
+-static u32 en7523_get_div(void __iomem *base, int i)
++static u32 en7523_get_div(const struct en_clk_desc *desc, u32 val)
+ {
+-	const struct en_clk_desc *desc = &en7523_base_clks[i];
+-	u32 reg, val;
+-
+ 	if (!desc->div_bits)
+ 		return 1;
+ 
+-	reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+-	val = readl(base + reg);
+ 	val >>= desc->div_shift;
+ 	val &= (1 << desc->div_bits) - 1;
+ 
+@@ -412,44 +501,83 @@ static void en7581_pci_disable(struct clk_hw *hw)
+ 	usleep_range(1000, 2000);
+ }
+ 
+-static int en7581_clk_hw_init(struct platform_device *pdev,
+-			      void __iomem *np_base)
++static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
++				   void __iomem *base, void __iomem *np_base)
+ {
+-	void __iomem *pb_base;
+-	u32 val;
++	struct clk_hw *hw;
++	u32 rate;
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
++		const struct en_clk_desc *desc = &en7523_base_clks[i];
++		u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
++		u32 val = readl(base + desc->base_reg);
+ 
+-	pb_base = devm_platform_ioremap_resource(pdev, 3);
+-	if (IS_ERR(pb_base))
+-		return PTR_ERR(pb_base);
++		rate = en7523_get_base_rate(desc, val);
++		val = readl(base + reg);
++		rate /= en7523_get_div(desc, val);
+ 
+-	val = readl(np_base + REG_NP_SCU_SSTR);
+-	val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
+-	writel(val, np_base + REG_NP_SCU_SSTR);
+-	val = readl(np_base + REG_NP_SCU_PCIC);
+-	writel(val | 3, np_base + REG_NP_SCU_PCIC);
++		hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
++		if (IS_ERR(hw)) {
++			pr_err("Failed to register clk %s: %ld\n",
++			       desc->name, PTR_ERR(hw));
++			continue;
++		}
++
++		clk_data->hws[desc->id] = hw;
++	}
++
++	hw = en7523_register_pcie_clk(dev, np_base);
++	clk_data->hws[EN7523_CLK_PCIE] = hw;
++
++	clk_data->num = EN7523_NUM_CLOCKS;
++}
++
++static int en7523_clk_hw_init(struct platform_device *pdev,
++			      struct clk_hw_onecell_data *clk_data)
++{
++	void __iomem *base, *np_base;
++
++	base = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(base))
++		return PTR_ERR(base);
++
++	np_base = devm_platform_ioremap_resource(pdev, 1);
++	if (IS_ERR(np_base))
++		return PTR_ERR(np_base);
+ 
+-	writel(0x20000000, pb_base + REG_PCIE0_MEM);
+-	writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK);
+-	writel(0x24000000, pb_base + REG_PCIE1_MEM);
+-	writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK);
+-	writel(0x28000000, pb_base + REG_PCIE2_MEM);
+-	writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
++	en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
+ 
+ 	return 0;
+ }
+ 
+-static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
+-				   void __iomem *base, void __iomem *np_base)
++static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
++				   struct regmap *map, void __iomem *base)
+ {
+ 	struct clk_hw *hw;
+ 	u32 rate;
+ 	int i;
+ 
+-	for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
+-		const struct en_clk_desc *desc = &en7523_base_clks[i];
++	for (i = 0; i < ARRAY_SIZE(en7581_base_clks); i++) {
++		const struct en_clk_desc *desc = &en7581_base_clks[i];
++		u32 val, reg = desc->div_reg ? desc->div_reg : desc->base_reg;
++		int err;
+ 
+-		rate = en7523_get_base_rate(base, i);
+-		rate /= en7523_get_div(base, i);
++		err = regmap_read(map, desc->base_reg, &val);
++		if (err) {
++			pr_err("Failed reading fixed clk rate %s: %d\n",
++			       desc->name, err);
++			continue;
++		}
++		rate = en7523_get_base_rate(desc, val);
++
++		err = regmap_read(map, reg, &val);
++		if (err) {
++			pr_err("Failed reading fixed clk div %s: %d\n",
++			       desc->name, err);
++			continue;
++		}
++		rate /= en7523_get_div(desc, val);
+ 
+ 		hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
+ 		if (IS_ERR(hw)) {
+@@ -461,12 +589,38 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
+ 		clk_data->hws[desc->id] = hw;
+ 	}
+ 
+-	hw = en7523_register_pcie_clk(dev, np_base);
++	hw = en7523_register_pcie_clk(dev, base);
+ 	clk_data->hws[EN7523_CLK_PCIE] = hw;
+ 
+ 	clk_data->num = EN7523_NUM_CLOCKS;
+ }
+ 
++static int en7581_clk_hw_init(struct platform_device *pdev,
++			      struct clk_hw_onecell_data *clk_data)
++{
++	void __iomem *np_base;
++	struct regmap *map;
++	u32 val;
++
++	map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
++	if (IS_ERR(map))
++		return PTR_ERR(map);
++
++	np_base = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(np_base))
++		return PTR_ERR(np_base);
++
++	en7581_register_clocks(&pdev->dev, clk_data, map, np_base);
++
++	val = readl(np_base + REG_NP_SCU_SSTR);
++	val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
++	writel(val, np_base + REG_NP_SCU_SSTR);
++	val = readl(np_base + REG_NP_SCU_PCIC);
++	writel(val | 3, np_base + REG_NP_SCU_PCIC);
++
++	return 0;
++}
++
+ static int en7523_reset_update(struct reset_controller_dev *rcdev,
+ 			       unsigned long id, bool assert)
+ {
+@@ -533,7 +687,7 @@ static int en7523_reset_register(struct platform_device *pdev,
+ 	if (!soc_data->reset.idx_map_nr)
+ 		return 0;
+ 
+-	base = devm_platform_ioremap_resource(pdev, 2);
++	base = devm_platform_ioremap_resource(pdev, 1);
+ 	if (IS_ERR(base))
+ 		return PTR_ERR(base);
+ 
+@@ -561,31 +715,18 @@ static int en7523_clk_probe(struct platform_device *pdev)
+ 	struct device_node *node = pdev->dev.of_node;
+ 	const struct en_clk_soc_data *soc_data;
+ 	struct clk_hw_onecell_data *clk_data;
+-	void __iomem *base, *np_base;
+ 	int r;
+ 
+-	base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(base))
+-		return PTR_ERR(base);
+-
+-	np_base = devm_platform_ioremap_resource(pdev, 1);
+-	if (IS_ERR(np_base))
+-		return PTR_ERR(np_base);
+-
+-	soc_data = device_get_match_data(&pdev->dev);
+-	if (soc_data->hw_init) {
+-		r = soc_data->hw_init(pdev, np_base);
+-		if (r)
+-			return r;
+-	}
+-
+ 	clk_data = devm_kzalloc(&pdev->dev,
+ 				struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
+ 				GFP_KERNEL);
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
++	soc_data = device_get_match_data(&pdev->dev);
++	r = soc_data->hw_init(pdev, clk_data);
++	if (r)
++		return r;
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -608,6 +749,7 @@ static const struct en_clk_soc_data en7523_data = {
+ 		.prepare = en7523_pci_prepare,
+ 		.unprepare = en7523_pci_unprepare,
+ 	},
++	.hw_init = en7523_clk_hw_init,
+ };
+ 
+ static const struct en_clk_soc_data en7581_data = {
+diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
+index 820bb1e9e3b79a..7082b4309c6f15 100644
+--- a/drivers/clk/clk-loongson2.c
++++ b/drivers/clk/clk-loongson2.c
+@@ -29,8 +29,10 @@ enum loongson2_clk_type {
+ struct loongson2_clk_provider {
+ 	void __iomem *base;
+ 	struct device *dev;
+-	struct clk_hw_onecell_data clk_data;
+ 	spinlock_t clk_lock;	/* protect access to DIV registers */
++
++	/* Must be last --ends in a flexible-array member. */
++	struct clk_hw_onecell_data clk_data;
+ };
+ 
+ struct loongson2_clk_data {
+@@ -304,7 +306,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
+ 		return PTR_ERR(clp->base);
+ 
+ 	spin_lock_init(&clp->clk_lock);
+-	clp->clk_data.num = clks_num + 1;
++	clp->clk_data.num = clks_num;
+ 	clp->dev = dev;
+ 
+ 	for (i = 0; i < clks_num; i++) {
+diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
+index 591e0364ee5c11..85771afd4698ae 100644
+--- a/drivers/clk/imx/clk-fracn-gppll.c
++++ b/drivers/clk/imx/clk-fracn-gppll.c
+@@ -254,9 +254,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
+ 	pll_div = FIELD_PREP(PLL_RDIV_MASK, rate->rdiv) | rate->odiv |
+ 		FIELD_PREP(PLL_MFI_MASK, rate->mfi);
+ 	writel_relaxed(pll_div, pll->base + PLL_DIV);
++	readl(pll->base + PLL_DIV);
+ 	if (pll->flags & CLK_FRACN_GPPLL_FRACN) {
+ 		writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR);
+ 		writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR);
++		readl(pll->base + PLL_NUMERATOR);
+ 	}
+ 
+ 	/* Wait for 5us according to fracn mode pll doc */
+@@ -265,6 +267,7 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
+ 	/* Enable Powerup */
+ 	tmp |= POWERUP_MASK;
+ 	writel_relaxed(tmp, pll->base + PLL_CTRL);
++	readl(pll->base + PLL_CTRL);
+ 
+ 	/* Wait Lock */
+ 	ret = clk_fracn_gppll_wait_lock(pll);
+@@ -302,14 +305,15 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
+ 
+ 	val |= POWERUP_MASK;
+ 	writel_relaxed(val, pll->base + PLL_CTRL);
+-
+-	val |= CLKMUX_EN;
+-	writel_relaxed(val, pll->base + PLL_CTRL);
++	readl(pll->base + PLL_CTRL);
+ 
+ 	ret = clk_fracn_gppll_wait_lock(pll);
+ 	if (ret)
+ 		return ret;
+ 
++	val |= CLKMUX_EN;
++	writel_relaxed(val, pll->base + PLL_CTRL);
++
+ 	val &= ~CLKMUX_BYPASS;
+ 	writel_relaxed(val, pll->base + PLL_CTRL);
+ 
+diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
+index 6c351050b82ae0..c169fe53a35f83 100644
+--- a/drivers/clk/imx/clk-imx8-acm.c
++++ b/drivers/clk/imx/clk-imx8-acm.c
+@@ -294,9 +294,9 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev,
+ 							 DL_FLAG_STATELESS |
+ 							 DL_FLAG_PM_RUNTIME |
+ 							 DL_FLAG_RPM_ACTIVE);
+-		if (IS_ERR(dev_pm->pd_dev_link[i])) {
++		if (!dev_pm->pd_dev_link[i]) {
+ 			dev_pm_domain_detach(dev_pm->pd_dev[i], false);
+-			ret = PTR_ERR(dev_pm->pd_dev_link[i]);
++			ret = -EINVAL;
+ 			goto detach_pm;
+ 		}
+ 	}
+diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c
+index dd5abd09f3e206..620afdf8dc03e9 100644
+--- a/drivers/clk/imx/clk-lpcg-scu.c
++++ b/drivers/clk/imx/clk-lpcg-scu.c
+@@ -6,10 +6,12 @@
+ 
+ #include <linux/bits.h>
+ #include <linux/clk-provider.h>
++#include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/units.h>
+ 
+ #include "clk-scu.h"
+ 
+@@ -41,6 +43,29 @@ struct clk_lpcg_scu {
+ 
+ #define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
+ 
++/* e10858 -LPCG clock gating register synchronization errata */
++static void lpcg_e10858_writel(unsigned long rate, void __iomem *reg, u32 val)
++{
++	writel(val, reg);
++
++	if (rate >= 24 * HZ_PER_MHZ || rate == 0) {
++		/*
++		 * The time taken to access the LPCG registers from the AP core
++		 * through the interconnect is longer than the minimum delay
++		 * of 4 clock cycles required by the errata.
++		 * Adding a readl will provide sufficient delay to prevent
++		 * back-to-back writes.
++		 */
++		readl(reg);
++	} else {
++		/*
++		 * For clocks running below 24MHz, wait a minimum of
++		 * 4 clock cycles.
++		 */
++		ndelay(4 * (DIV_ROUND_UP(1000 * HZ_PER_MHZ, rate)));
++	}
++}
++
+ static int clk_lpcg_scu_enable(struct clk_hw *hw)
+ {
+ 	struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
+@@ -57,7 +82,8 @@ static int clk_lpcg_scu_enable(struct clk_hw *hw)
+ 		val |= CLK_GATE_SCU_LPCG_HW_SEL;
+ 
+ 	reg |= val << clk->bit_idx;
+-	writel(reg, clk->reg);
++
++	lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
+ 
+ 	spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
+ 
+@@ -74,7 +100,7 @@ static void clk_lpcg_scu_disable(struct clk_hw *hw)
+ 
+ 	reg = readl_relaxed(clk->reg);
+ 	reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx);
+-	writel(reg, clk->reg);
++	lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
+ 
+ 	spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
+ }
+@@ -145,13 +171,8 @@ static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev)
+ {
+ 	struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
+ 
+-	/*
+-	 * FIXME: Sometimes writes don't work unless the CPU issues
+-	 * them twice
+-	 */
+-
+-	writel(clk->state, clk->reg);
+ 	writel(clk->state, clk->reg);
++	lpcg_e10858_writel(0, clk->reg, clk->state);
+ 	dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state);
+ 
+ 	return 0;
+diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
+index b1dd0c08e091b6..b27186aaf2a156 100644
+--- a/drivers/clk/imx/clk-scu.c
++++ b/drivers/clk/imx/clk-scu.c
+@@ -596,7 +596,7 @@ static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
+ 		clk->rate = clk_scu_recalc_rate(&clk->hw, 0);
+ 	else
+ 		clk->rate = clk_hw_get_rate(&clk->hw);
+-	clk->is_enabled = clk_hw_is_enabled(&clk->hw);
++	clk->is_enabled = clk_hw_is_prepared(&clk->hw);
+ 
+ 	if (clk->parent)
+ 		dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent),
+diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
+index 70a005e7e1b180..486401e1f2f19c 100644
+--- a/drivers/clk/mediatek/Kconfig
++++ b/drivers/clk/mediatek/Kconfig
+@@ -887,13 +887,6 @@ config COMMON_CLK_MT8195_APUSYS
+ 	help
+ 	  This driver supports MediaTek MT8195 AI Processor Unit System clocks.
+ 
+-config COMMON_CLK_MT8195_AUDSYS
+-	tristate "Clock driver for MediaTek MT8195 audsys"
+-	depends on COMMON_CLK_MT8195
+-	default COMMON_CLK_MT8195
+-	help
+-	  This driver supports MediaTek MT8195 audsys clocks.
+-
+ config COMMON_CLK_MT8195_IMP_IIC_WRAP
+ 	tristate "Clock driver for MediaTek MT8195 imp_iic_wrap"
+ 	depends on COMMON_CLK_MT8195
+@@ -908,14 +901,6 @@ config COMMON_CLK_MT8195_MFGCFG
+ 	help
+ 	  This driver supports MediaTek MT8195 mfgcfg clocks.
+ 
+-config COMMON_CLK_MT8195_MSDC
+-	tristate "Clock driver for MediaTek MT8195 msdc"
+-	depends on COMMON_CLK_MT8195
+-	default COMMON_CLK_MT8195
+-	help
+-	  This driver supports MediaTek MT8195 MMC and SD Controller's
+-	  msdc and msdc_top clocks.
+-
+ config COMMON_CLK_MT8195_SCP_ADSP
+ 	tristate "Clock driver for MediaTek MT8195 scp_adsp"
+ 	depends on COMMON_CLK_MT8195
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index a3e2a09e2105b2..4444dafa4e3dfa 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -1230,11 +1230,11 @@ config SM_VIDEOCC_8350
+ config SM_VIDEOCC_8550
+ 	tristate "SM8550 Video Clock Controller"
+ 	depends on ARM64 || COMPILE_TEST
+-	select SM_GCC_8550
++	depends on SM_GCC_8550 || SM_GCC_8650
+ 	select QCOM_GDSC
+ 	help
+ 	  Support for the video clock controller on Qualcomm Technologies, Inc.
+-	  SM8550 devices.
++	  SM8550 or SM8650 devices.
+ 	  Say Y if you want to support video devices and functionality such as
+ 	  video encode/decode.
+ 
+diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
+index 50a443bf79ecd3..76285fbbdeaa2d 100644
+--- a/drivers/clk/ralink/clk-mtmips.c
++++ b/drivers/clk/ralink/clk-mtmips.c
+@@ -263,8 +263,9 @@ static int mtmips_register_pherip_clocks(struct device_node *np,
+ 		.rate = _rate		 \
+ 	}
+ 
+-static struct mtmips_clk_fixed rt305x_fixed_clocks[] = {
+-	CLK_FIXED("xtal", NULL, 40000000)
++static struct mtmips_clk_fixed rt3883_fixed_clocks[] = {
++	CLK_FIXED("xtal", NULL, 40000000),
++	CLK_FIXED("periph", "xtal", 40000000)
+ };
+ 
+ static struct mtmips_clk_fixed rt3352_fixed_clocks[] = {
+@@ -366,6 +367,12 @@ static inline struct mtmips_clk *to_mtmips_clk(struct clk_hw *hw)
+ 	return container_of(hw, struct mtmips_clk, hw);
+ }
+ 
++static unsigned long rt2880_xtal_recalc_rate(struct clk_hw *hw,
++					     unsigned long parent_rate)
++{
++	return 40000000;
++}
++
+ static unsigned long rt5350_xtal_recalc_rate(struct clk_hw *hw,
+ 					     unsigned long parent_rate)
+ {
+@@ -677,10 +684,12 @@ static unsigned long mt76x8_cpu_recalc_rate(struct clk_hw *hw,
+ }
+ 
+ static struct mtmips_clk rt2880_clks_base[] = {
++	{ CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
+ 	{ CLK_BASE("cpu", "xtal", rt2880_cpu_recalc_rate) }
+ };
+ 
+ static struct mtmips_clk rt305x_clks_base[] = {
++	{ CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
+ 	{ CLK_BASE("cpu", "xtal", rt305x_cpu_recalc_rate) }
+ };
+ 
+@@ -690,6 +699,7 @@ static struct mtmips_clk rt3352_clks_base[] = {
+ };
+ 
+ static struct mtmips_clk rt3883_clks_base[] = {
++	{ CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
+ 	{ CLK_BASE("cpu", "xtal", rt3883_cpu_recalc_rate) },
+ 	{ CLK_BASE("bus", "cpu", rt3883_bus_recalc_rate) }
+ };
+@@ -746,8 +756,8 @@ static int mtmips_register_clocks(struct device_node *np,
+ static const struct mtmips_clk_data rt2880_clk_data = {
+ 	.clk_base = rt2880_clks_base,
+ 	.num_clk_base = ARRAY_SIZE(rt2880_clks_base),
+-	.clk_fixed = rt305x_fixed_clocks,
+-	.num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
++	.clk_fixed = NULL,
++	.num_clk_fixed = 0,
+ 	.clk_factor = rt2880_factor_clocks,
+ 	.num_clk_factor = ARRAY_SIZE(rt2880_factor_clocks),
+ 	.clk_periph = rt2880_pherip_clks,
+@@ -757,8 +767,8 @@ static const struct mtmips_clk_data rt2880_clk_data = {
+ static const struct mtmips_clk_data rt305x_clk_data = {
+ 	.clk_base = rt305x_clks_base,
+ 	.num_clk_base = ARRAY_SIZE(rt305x_clks_base),
+-	.clk_fixed = rt305x_fixed_clocks,
+-	.num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
++	.clk_fixed = NULL,
++	.num_clk_fixed = 0,
+ 	.clk_factor = rt305x_factor_clocks,
+ 	.num_clk_factor = ARRAY_SIZE(rt305x_factor_clocks),
+ 	.clk_periph = rt305x_pherip_clks,
+@@ -779,8 +789,8 @@ static const struct mtmips_clk_data rt3352_clk_data = {
+ static const struct mtmips_clk_data rt3883_clk_data = {
+ 	.clk_base = rt3883_clks_base,
+ 	.num_clk_base = ARRAY_SIZE(rt3883_clks_base),
+-	.clk_fixed = rt305x_fixed_clocks,
+-	.num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
++	.clk_fixed = rt3883_fixed_clocks,
++	.num_clk_fixed = ARRAY_SIZE(rt3883_fixed_clocks),
+ 	.clk_factor = NULL,
+ 	.num_clk_factor = 0,
+ 	.clk_periph = rt5350_pherip_clks,
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 88bf39e8c79c83..b43b763dfe186a 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -548,7 +548,7 @@ static unsigned long
+ rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
+ 			       unsigned long rate)
+ {
+-	unsigned long foutpostdiv_rate;
++	unsigned long foutpostdiv_rate, foutvco_rate;
+ 
+ 	params->pl5_intin = rate / MEGA;
+ 	params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
+@@ -557,10 +557,11 @@ rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
+ 	params->pl5_postdiv2 = 1;
+ 	params->pl5_spread = 0x16;
+ 
+-	foutpostdiv_rate =
+-		EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
+-		((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
+-		(params->pl5_postdiv1 * params->pl5_postdiv2);
++	foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
++					   (params->pl5_intin << 24) + params->pl5_fracin),
++			       params->pl5_refdiv) >> 24;
++	foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
++						 params->pl5_postdiv1 * params->pl5_postdiv2);
+ 
+ 	return foutpostdiv_rate;
+ }
+diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c
+index ff9deeef509b8f..1537f4f05860ea 100644
+--- a/drivers/clk/sophgo/clk-sg2042-pll.c
++++ b/drivers/clk/sophgo/clk-sg2042-pll.c
+@@ -153,7 +153,7 @@ static unsigned long sg2042_pll_recalc_rate(unsigned int reg_value,
+ 
+ 	sg2042_pll_ctrl_decode(reg_value, &ctrl_table);
+ 
+-	numerator = parent_rate * ctrl_table.fbdiv;
++	numerator = (u64)parent_rate * ctrl_table.fbdiv;
+ 	denominator = ctrl_table.refdiv * ctrl_table.postdiv1 * ctrl_table.postdiv2;
+ 	do_div(numerator, denominator);
+ 	return numerator;
+diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+index 9b5cfac2ee70cb..3f095515f54f91 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
++++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+@@ -1371,7 +1371,7 @@ static int sun20i_d1_ccu_probe(struct platform_device *pdev)
+ 
+ 	/* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */
+ 	val = readl(reg + SUN20I_D1_PLL_AUDIO0_REG);
+-	val &= ~BIT(1) | BIT(0);
++	val &= ~(BIT(1) | BIT(0));
+ 	writel(val, reg + SUN20I_D1_PLL_AUDIO0_REG);
+ 
+ 	/* Force fanout-27M factor N to 0. */
+diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
+index 95dd4660b5b659..d546903dba4f3a 100644
+--- a/drivers/clocksource/Kconfig
++++ b/drivers/clocksource/Kconfig
+@@ -400,7 +400,8 @@ config ARM_GT_INITIAL_PRESCALER_VAL
+ 	  This affects CPU_FREQ max delta from the initial frequency.
+ 
+ config ARM_TIMER_SP804
+-	bool "Support for Dual Timer SP804 module" if COMPILE_TEST
++	bool "Support for Dual Timer SP804 module"
++	depends on ARM || ARM64 || COMPILE_TEST
+ 	depends on GENERIC_SCHED_CLOCK && HAVE_CLK
+ 	select CLKSRC_MMIO
+ 	select TIMER_OF if OF
+diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
+index c2dcd8d68e4587..d1c144d6f328cf 100644
+--- a/drivers/clocksource/timer-ti-dm-systimer.c
++++ b/drivers/clocksource/timer-ti-dm-systimer.c
+@@ -686,9 +686,9 @@ subsys_initcall(dmtimer_percpu_timer_startup);
+ 
+ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
+ {
+-	struct device_node *arm_timer;
++	struct device_node *arm_timer __free(device_node) =
++		of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
+ 
+-	arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
+ 	if (of_device_is_available(arm_timer)) {
+ 		pr_warn_once("ARM architected timer wrap issue i940 detected\n");
+ 		return 0;
+diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
+index 1b481731df964e..b9df9b19d4bd97 100644
+--- a/drivers/comedi/comedi_fops.c
++++ b/drivers/comedi/comedi_fops.c
+@@ -2407,6 +2407,18 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
+ 
+ 			start += PAGE_SIZE;
+ 		}
++
++#ifdef CONFIG_MMU
++		/*
++		 * Leaving behind a partial mapping of a buffer we're about to
++		 * drop is unsafe, see remap_pfn_range_notrack().
++		 * We need to zap the range here ourselves instead of relying
++		 * on the automatic zapping in remap_pfn_range() because we call
++		 * remap_pfn_range() in a loop.
++		 */
++		if (retval)
++			zap_vma_ptes(vma, vma->vm_start, size);
++#endif
+ 	}
+ 
+ 	if (retval == 0) {
+diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
+index 186e73d6ccb455..87b6ec567b5447 100644
+--- a/drivers/counter/stm32-timer-cnt.c
++++ b/drivers/counter/stm32-timer-cnt.c
+@@ -214,11 +214,17 @@ static int stm32_count_enable_write(struct counter_device *counter,
+ {
+ 	struct stm32_timer_cnt *const priv = counter_priv(counter);
+ 	u32 cr1;
++	int ret;
+ 
+ 	if (enable) {
+ 		regmap_read(priv->regmap, TIM_CR1, &cr1);
+-		if (!(cr1 & TIM_CR1_CEN))
+-			clk_enable(priv->clk);
++		if (!(cr1 & TIM_CR1_CEN)) {
++			ret = clk_enable(priv->clk);
++			if (ret) {
++				dev_err(counter->parent, "Cannot enable clock %d\n", ret);
++				return ret;
++			}
++		}
+ 
+ 		regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
+ 				   TIM_CR1_CEN);
+@@ -694,6 +700,7 @@ static int stm32_timer_cnt_probe_encoder(struct device *dev,
+ 	}
+ 
+ 	ret = of_property_read_u32(tnode, "reg", &idx);
++	of_node_put(tnode);
+ 	if (ret) {
+ 		dev_err(dev, "Can't get index (%d)\n", ret);
+ 		return ret;
+@@ -816,7 +823,11 @@ static int __maybe_unused stm32_timer_cnt_resume(struct device *dev)
+ 		return ret;
+ 
+ 	if (priv->enabled) {
+-		clk_enable(priv->clk);
++		ret = clk_enable(priv->clk);
++		if (ret) {
++			dev_err(dev, "Cannot enable clock %d\n", ret);
++			return ret;
++		}
+ 
+ 		/* Restore registers that may have been lost */
+ 		regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr);
+diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
+index 675447315cafb8..b119aeede693ec 100644
+--- a/drivers/counter/ti-ecap-capture.c
++++ b/drivers/counter/ti-ecap-capture.c
+@@ -574,8 +574,13 @@ static int ecap_cnt_resume(struct device *dev)
+ {
+ 	struct counter_device *counter_dev = dev_get_drvdata(dev);
+ 	struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
++	int ret;
+ 
+-	clk_enable(ecap_dev->clk);
++	ret = clk_enable(ecap_dev->clk);
++	if (ret) {
++		dev_err(dev, "Cannot enable clock %d\n", ret);
++		return ret;
++	}
+ 
+ 	ecap_cnt_capture_set_evmode(counter_dev, ecap_dev->pm_ctx.ev_mode);
+ 
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index b63863f77c6778..91d3c3b1c2d3bf 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -665,34 +665,12 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
+ {
+ 	struct amd_cpudata *cpudata = policy->driver_data;
+-	struct cppc_perf_ctrls perf_ctrls;
+-	u32 highest_perf, nominal_perf, nominal_freq, max_freq;
++	u32 nominal_freq, max_freq;
+ 	int ret = 0;
+ 
+-	highest_perf = READ_ONCE(cpudata->highest_perf);
+-	nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ 	nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ 	max_freq = READ_ONCE(cpudata->max_freq);
+ 
+-	if (boot_cpu_has(X86_FEATURE_CPPC)) {
+-		u64 value = READ_ONCE(cpudata->cppc_req_cached);
+-
+-		value &= ~GENMASK_ULL(7, 0);
+-		value |= on ? highest_perf : nominal_perf;
+-		WRITE_ONCE(cpudata->cppc_req_cached, value);
+-
+-		wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+-	} else {
+-		perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
+-		ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+-		if (ret) {
+-			cpufreq_cpu_release(policy);
+-			pr_debug("Failed to set max perf on CPU:%d. ret:%d\n",
+-				cpudata->cpu, ret);
+-			return ret;
+-		}
+-	}
+-
+ 	if (on)
+ 		policy->cpuinfo.max_freq = max_freq;
+ 	else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
+@@ -1535,7 +1513,7 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+ 	value = READ_ONCE(cpudata->cppc_req_cached);
+ 
+ 	if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+-		min_perf = max_perf;
++		min_perf = min(cpudata->nominal_perf, max_perf);
+ 
+ 	/* Initial min/max values for CPPC Performance Controls Register */
+ 	value &= ~AMD_CPPC_MIN_PERF(~0L);
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 2b8708475ac776..c1cdf0f4d0ddda 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -118,6 +118,9 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
+ 
+ 	perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
+ 				     &fb_ctrs);
++	if (!perf)
++		return;
++
+ 	cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
+ 
+ 	perf <<= SCHED_CAPACITY_SHIFT;
+@@ -420,6 +423,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
+ 	struct cppc_cpudata *cpu_data;
+ 
+ 	policy = cpufreq_cpu_get_raw(cpu_dev->id);
++	if (!policy)
++		return -EINVAL;
++
+ 	cpu_data = policy->driver_data;
+ 	perf_caps = &cpu_data->perf_caps;
+ 	max_cap = arch_scale_cpu_capacity(cpu_dev->id);
+@@ -487,6 +493,9 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
+ 	int step;
+ 
+ 	policy = cpufreq_cpu_get_raw(cpu_dev->id);
++	if (!policy)
++		return -EINVAL;
++
+ 	cpu_data = policy->driver_data;
+ 	perf_caps = &cpu_data->perf_caps;
+ 	max_cap = arch_scale_cpu_capacity(cpu_dev->id);
+@@ -724,13 +733,31 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
+ 	delta_delivered = get_delta(fb_ctrs_t1->delivered,
+ 				    fb_ctrs_t0->delivered);
+ 
+-	/* Check to avoid divide-by zero and invalid delivered_perf */
++	/*
++	 * Avoid divide-by zero and unchanged feedback counters.
++	 * Leave it for callers to handle.
++	 */
+ 	if (!delta_reference || !delta_delivered)
+-		return cpu_data->perf_ctrls.desired_perf;
++		return 0;
+ 
+ 	return (reference_perf * delta_delivered) / delta_reference;
+ }
+ 
++static int cppc_get_perf_ctrs_sample(int cpu,
++				     struct cppc_perf_fb_ctrs *fb_ctrs_t0,
++				     struct cppc_perf_fb_ctrs *fb_ctrs_t1)
++{
++	int ret;
++
++	ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0);
++	if (ret)
++		return ret;
++
++	udelay(2); /* 2usec delay between sampling */
++
++	return cppc_get_perf_ctrs(cpu, fb_ctrs_t1);
++}
++
+ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+ {
+ 	struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
+@@ -746,18 +773,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+ 
+ 	cpufreq_cpu_put(policy);
+ 
+-	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
+-	if (ret)
+-		return 0;
+-
+-	udelay(2); /* 2usec delay between sampling */
+-
+-	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
+-	if (ret)
+-		return 0;
++	ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
++	if (ret) {
++		if (ret == -EFAULT)
++			/* Any of the associated CPPC regs is 0. */
++			goto out_invalid_counters;
++		else
++			return 0;
++	}
+ 
+ 	delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
+ 					       &fb_ctrs_t1);
++	if (!delivered_perf)
++		goto out_invalid_counters;
++
++	return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
++
++out_invalid_counters:
++	/*
++	 * Feedback counters could be unchanged or 0 when a cpu enters a
++	 * low-power idle state, e.g. clock-gated or power-gated.
++	 * Use desired perf for reflecting frequency.  Get the latest register
++	 * value first as some platforms may update the actual delivered perf
++	 * there; if failed, resort to the cached desired perf.
++	 */
++	if (cppc_get_desired_perf(cpu, &delivered_perf))
++		delivered_perf = cpu_data->perf_ctrls.desired_perf;
+ 
+ 	return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
+ }
+diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
+index 6a8e97896d38ca..ed1a6dbad63894 100644
+--- a/drivers/cpufreq/loongson2_cpufreq.c
++++ b/drivers/cpufreq/loongson2_cpufreq.c
+@@ -148,7 +148,9 @@ static int __init cpufreq_init(void)
+ 
+ 	ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
+ 
+-	if (!ret && !nowait) {
++	if (ret) {
++		platform_driver_unregister(&platform_driver);
++	} else if (!nowait) {
+ 		saved_cpu_wait = cpu_wait;
+ 		cpu_wait = loongson2_cpu_wait;
+ 	}
+diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
+index 6b5e6798d9a283..a923e196ec86e7 100644
+--- a/drivers/cpufreq/loongson3_cpufreq.c
++++ b/drivers/cpufreq/loongson3_cpufreq.c
+@@ -346,8 +346,11 @@ static int loongson3_cpufreq_probe(struct platform_device *pdev)
+ {
+ 	int i, ret;
+ 
+-	for (i = 0; i < MAX_PACKAGES; i++)
+-		devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
++	for (i = 0; i < MAX_PACKAGES; i++) {
++		ret = devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
++		if (ret)
++			return ret;
++	}
+ 
+ 	ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0);
+ 	if (ret <= 0)
+diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
+index 8925e096d5b9a0..aeb5e63045421b 100644
+--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
++++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
+@@ -62,7 +62,7 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
+ 
+ 	policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ 	if (!policy)
+-		return 0;
++		return -EINVAL;
+ 
+ 	data = policy->driver_data;
+ 
+diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
+index 1a3ecd44cbaf65..20f6453670aa49 100644
+--- a/drivers/crypto/bcm/cipher.c
++++ b/drivers/crypto/bcm/cipher.c
+@@ -2415,6 +2415,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ 
+ static int ahash_hmac_init(struct ahash_request *req)
+ {
++	int ret;
+ 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+@@ -2424,7 +2425,9 @@ static int ahash_hmac_init(struct ahash_request *req)
+ 	flow_log("ahash_hmac_init()\n");
+ 
+ 	/* init the context as a hash */
+-	ahash_init(req);
++	ret = ahash_init(req);
++	if (ret)
++		return ret;
+ 
+ 	if (!spu_no_incr_hash(ctx)) {
+ 		/* SPU-M can do incr hashing but needs sw for outer HMAC */
+diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
+index 887a5f2fb9279b..cb001aa1de6618 100644
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -984,7 +984,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ 	return -ENOMEM;
+ }
+ 
+-static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
++static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+ 				       struct rsa_key *raw_key)
+ {
+ 	struct caam_rsa_key *rsa_key = &ctx->key;
+@@ -994,7 +994,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+ 
+ 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
+ 	if (!rsa_key->p)
+-		return;
++		return -ENOMEM;
+ 	rsa_key->p_sz = p_sz;
+ 
+ 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
+@@ -1029,7 +1029,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+ 
+ 	rsa_key->priv_form = FORM3;
+ 
+-	return;
++	return 0;
+ 
+ free_dq:
+ 	kfree_sensitive(rsa_key->dq);
+@@ -1043,6 +1043,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+ 	kfree_sensitive(rsa_key->q);
+ free_p:
+ 	kfree_sensitive(rsa_key->p);
++	return -ENOMEM;
+ }
+ 
+ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+@@ -1088,7 +1089,9 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ 	rsa_key->e_sz = raw_key.e_sz;
+ 	rsa_key->n_sz = raw_key.n_sz;
+ 
+-	caam_rsa_set_priv_key_form(ctx, &raw_key);
++	ret = caam_rsa_set_priv_key_form(ctx, &raw_key);
++	if (ret)
++		goto err;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
+index f6111ee9ed342d..8ed2bb01a619fd 100644
+--- a/drivers/crypto/caam/qi.c
++++ b/drivers/crypto/caam/qi.c
+@@ -794,7 +794,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
+ 
+ 	caam_debugfs_qi_init(ctrlpriv);
+ 
+-	err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
++	err = devm_add_action_or_reset(qidev, caam_qi_shutdown, qidev);
+ 	if (err)
+ 		goto fail2;
+ 
+diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
+index 6872ac3440010f..54de869e5374c2 100644
+--- a/drivers/crypto/cavium/cpt/cptpf_main.c
++++ b/drivers/crypto/cavium/cpt/cptpf_main.c
+@@ -44,7 +44,7 @@ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
+ 		dev_err(dev, "Cores still busy %llx", coremask);
+ 		grp = cpt_read_csr64(cpt->reg_base,
+ 				     CPTX_PF_EXEC_BUSY(0));
+-		if (timeout--)
++		if (!timeout--)
+ 			break;
+ 
+ 		udelay(CSR_DELAY);
+@@ -302,6 +302,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
+ 
+ 	ret = do_cpt_init(cpt, mcode);
+ 	if (ret) {
++		dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
++				  mcode->code, mcode->phys_base);
+ 		dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
+ 		goto fw_release;
+ 	}
+@@ -394,7 +396,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
+ 		dev_err(dev, "Cores still busy");
+ 		grp = cpt_read_csr64(cpt->reg_base,
+ 				     CPTX_PF_EXEC_BUSY(0));
+-		if (timeout--)
++		if (!timeout--)
+ 			break;
+ 
+ 		udelay(CSR_DELAY);
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index 6b536ad2ada52a..34d30b78381343 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -1280,11 +1280,15 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
+ 
+ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+ {
+-	u32 nfe;
+-
+ 	writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+-	nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+-	writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
++}
++
++static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
++{
++	u32 nfe_mask;
++
++	nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
++	writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
+ }
+ 
+ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
+@@ -1298,6 +1302,27 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
+ 	       qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ }
+ 
++static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
++{
++	u32 err_status;
++
++	err_status = hpre_get_hw_err_status(qm);
++	if (err_status) {
++		if (err_status & qm->err_info.ecc_2bits_mask)
++			qm->err_status.is_dev_ecc_mbit = true;
++		hpre_log_hw_error(qm, err_status);
++
++		if (err_status & qm->err_info.dev_reset_mask) {
++			/* Disable the same error reporting until device is recovered. */
++			hpre_disable_error_report(qm, err_status);
++			return ACC_ERR_NEED_RESET;
++		}
++		hpre_clear_hw_err_status(qm, err_status);
++	}
++
++	return ACC_ERR_RECOVERED;
++}
++
+ static void hpre_err_info_init(struct hisi_qm *qm)
+ {
+ 	struct hisi_qm_err_info *err_info = &qm->err_info;
+@@ -1324,12 +1349,12 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
+ 	.hw_err_disable		= hpre_hw_error_disable,
+ 	.get_dev_hw_err_status	= hpre_get_hw_err_status,
+ 	.clear_dev_hw_err_status = hpre_clear_hw_err_status,
+-	.log_dev_hw_err		= hpre_log_hw_error,
+ 	.open_axi_master_ooo	= hpre_open_axi_master_ooo,
+ 	.open_sva_prefetch	= hpre_open_sva_prefetch,
+ 	.close_sva_prefetch	= hpre_close_sva_prefetch,
+ 	.show_last_dfx_regs	= hpre_show_last_dfx_regs,
+ 	.err_info_init		= hpre_err_info_init,
++	.get_err_result		= hpre_get_err_result,
+ };
+ 
+ static int hpre_pf_probe_init(struct hpre *hpre)
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index 07983af9e3e229..b18692ee7fd563 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -271,12 +271,6 @@ enum vft_type {
+ 	SHAPER_VFT,
+ };
+ 
+-enum acc_err_result {
+-	ACC_ERR_NONE,
+-	ACC_ERR_NEED_RESET,
+-	ACC_ERR_RECOVERED,
+-};
+-
+ enum qm_alg_type {
+ 	ALG_TYPE_0,
+ 	ALG_TYPE_1,
+@@ -1425,22 +1419,25 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
+ 
+ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
+ {
+-	u32 error_status, tmp;
+-
+-	/* read err sts */
+-	tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+-	error_status = qm->error_mask & tmp;
++	u32 error_status;
+ 
+-	if (error_status) {
++	error_status = qm_get_hw_error_status(qm);
++	if (error_status & qm->error_mask) {
+ 		if (error_status & QM_ECC_MBIT)
+ 			qm->err_status.is_qm_ecc_mbit = true;
+ 
+ 		qm_log_hw_error(qm, error_status);
+-		if (error_status & qm->err_info.qm_reset_mask)
++		if (error_status & qm->err_info.qm_reset_mask) {
++			/* Disable the same error reporting until device is recovered. */
++			writel(qm->err_info.nfe & (~error_status),
++			       qm->io_base + QM_RAS_NFE_ENABLE);
+ 			return ACC_ERR_NEED_RESET;
++		}
+ 
++		/* Clear error source if not need reset. */
+ 		writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ 		writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
++		writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
+ 	}
+ 
+ 	return ACC_ERR_RECOVERED;
+@@ -3861,30 +3858,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
+ 
+ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
+ {
+-	u32 err_sts;
+-
+-	if (!qm->err_ini->get_dev_hw_err_status) {
+-		dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
++	if (!qm->err_ini->get_err_result) {
++		dev_err(&qm->pdev->dev, "Device doesn't support reset!\n");
+ 		return ACC_ERR_NONE;
+ 	}
+ 
+-	/* get device hardware error status */
+-	err_sts = qm->err_ini->get_dev_hw_err_status(qm);
+-	if (err_sts) {
+-		if (err_sts & qm->err_info.ecc_2bits_mask)
+-			qm->err_status.is_dev_ecc_mbit = true;
+-
+-		if (qm->err_ini->log_dev_hw_err)
+-			qm->err_ini->log_dev_hw_err(qm, err_sts);
+-
+-		if (err_sts & qm->err_info.dev_reset_mask)
+-			return ACC_ERR_NEED_RESET;
+-
+-		if (qm->err_ini->clear_dev_hw_err_status)
+-			qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
+-	}
+-
+-	return ACC_ERR_RECOVERED;
++	return qm->err_ini->get_err_result(qm);
+ }
+ 
+ static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index c35533d8930b21..75c25f0d5f2b82 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -1010,11 +1010,15 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
+ 
+ static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+ {
+-	u32 nfe;
+-
+ 	writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+-	nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+-	writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
++}
++
++static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
++{
++	u32 nfe_mask;
++
++	nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
++	writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
+ }
+ 
+ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
+@@ -1026,6 +1030,27 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
+ 	writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
+ }
+ 
++static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
++{
++	u32 err_status;
++
++	err_status = sec_get_hw_err_status(qm);
++	if (err_status) {
++		if (err_status & qm->err_info.ecc_2bits_mask)
++			qm->err_status.is_dev_ecc_mbit = true;
++		sec_log_hw_error(qm, err_status);
++
++		if (err_status & qm->err_info.dev_reset_mask) {
++			/* Disable the same error reporting until device is recovered. */
++			sec_disable_error_report(qm, err_status);
++			return ACC_ERR_NEED_RESET;
++		}
++		sec_clear_hw_err_status(qm, err_status);
++	}
++
++	return ACC_ERR_RECOVERED;
++}
++
+ static void sec_err_info_init(struct hisi_qm *qm)
+ {
+ 	struct hisi_qm_err_info *err_info = &qm->err_info;
+@@ -1052,12 +1077,12 @@ static const struct hisi_qm_err_ini sec_err_ini = {
+ 	.hw_err_disable		= sec_hw_error_disable,
+ 	.get_dev_hw_err_status	= sec_get_hw_err_status,
+ 	.clear_dev_hw_err_status = sec_clear_hw_err_status,
+-	.log_dev_hw_err		= sec_log_hw_error,
+ 	.open_axi_master_ooo	= sec_open_axi_master_ooo,
+ 	.open_sva_prefetch	= sec_open_sva_prefetch,
+ 	.close_sva_prefetch	= sec_close_sva_prefetch,
+ 	.show_last_dfx_regs	= sec_show_last_dfx_regs,
+ 	.err_info_init		= sec_err_info_init,
++	.get_err_result		= sec_get_err_result,
+ };
+ 
+ static int sec_pf_probe_init(struct sec_dev *sec)
+diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
+index d07e47b48be06a..80c2fcb1d26dcf 100644
+--- a/drivers/crypto/hisilicon/zip/zip_main.c
++++ b/drivers/crypto/hisilicon/zip/zip_main.c
+@@ -1059,11 +1059,15 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
+ 
+ static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+ {
+-	u32 nfe;
+-
+ 	writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+-	nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+-	writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
++}
++
++static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
++{
++	u32 nfe_mask;
++
++	nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
++	writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ }
+ 
+ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
+@@ -1093,6 +1097,27 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
+ 	       qm->io_base + HZIP_CORE_INT_SET);
+ }
+ 
++static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
++{
++	u32 err_status;
++
++	err_status = hisi_zip_get_hw_err_status(qm);
++	if (err_status) {
++		if (err_status & qm->err_info.ecc_2bits_mask)
++			qm->err_status.is_dev_ecc_mbit = true;
++		hisi_zip_log_hw_error(qm, err_status);
++
++		if (err_status & qm->err_info.dev_reset_mask) {
++			/* Disable the same error reporting until device is recovered. */
++			hisi_zip_disable_error_report(qm, err_status);
++			return ACC_ERR_NEED_RESET;
++		}
++		hisi_zip_clear_hw_err_status(qm, err_status);
++	}
++
++	return ACC_ERR_RECOVERED;
++}
++
+ static void hisi_zip_err_info_init(struct hisi_qm *qm)
+ {
+ 	struct hisi_qm_err_info *err_info = &qm->err_info;
+@@ -1120,13 +1145,13 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
+ 	.hw_err_disable		= hisi_zip_hw_error_disable,
+ 	.get_dev_hw_err_status	= hisi_zip_get_hw_err_status,
+ 	.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
+-	.log_dev_hw_err		= hisi_zip_log_hw_error,
+ 	.open_axi_master_ooo	= hisi_zip_open_axi_master_ooo,
+ 	.close_axi_master_ooo	= hisi_zip_close_axi_master_ooo,
+ 	.open_sva_prefetch	= hisi_zip_open_sva_prefetch,
+ 	.close_sva_prefetch	= hisi_zip_close_sva_prefetch,
+ 	.show_last_dfx_regs	= hisi_zip_show_last_dfx_regs,
+ 	.err_info_init		= hisi_zip_err_info_init,
++	.get_err_result		= hisi_zip_get_err_result,
+ };
+ 
+ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
+index e17577b785c33a..f44c08f5f5ec4a 100644
+--- a/drivers/crypto/inside-secure/safexcel_hash.c
++++ b/drivers/crypto/inside-secure/safexcel_hash.c
+@@ -2093,7 +2093,7 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
+ 
+ 	safexcel_ahash_cra_init(tfm);
+ 	ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
+-	return PTR_ERR_OR_ZERO(ctx->aes);
++	return ctx->aes == NULL ? -ENOMEM : 0;
+ }
+ 
+ static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
+diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+index 78f0ea49254dbb..9faef33e54bd32 100644
+--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+@@ -375,7 +375,7 @@ static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+ 	else
+ 		id = -EINVAL;
+ 
+-	if (id < 0 || id > num_objs)
++	if (id < 0 || id >= num_objs)
+ 		return NULL;
+ 
+ 	return fw_objs[id];
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+index 9fd7ec53b9f3d8..bbd92c017c28ed 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+@@ -334,7 +334,7 @@ static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+ 	else
+ 		id = -EINVAL;
+ 
+-	if (id < 0 || id > num_objs)
++	if (id < 0 || id >= num_objs)
+ 		return NULL;
+ 
+ 	return fw_objs[id];
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
+index ec7913ab00a2c7..4cb8bd83f57071 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
+@@ -281,8 +281,11 @@ int adf_init_aer(void)
+ 		return -EFAULT;
+ 
+ 	device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
+-	if (!device_sriov_wq)
++	if (!device_sriov_wq) {
++		destroy_workqueue(device_reset_wq);
++		device_reset_wq = NULL;
+ 		return -EFAULT;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+index c42f5c25aabdfa..4c11ad1ebcf0f8 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+@@ -22,18 +22,13 @@
+ void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
+ {
+ 	char name[ADF_DEVICE_NAME_LENGTH];
+-	void *ret;
+ 
+ 	/* Create dev top level debugfs entry */
+ 	snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ 		 accel_dev->hw_device->dev_class->name,
+ 		 pci_name(accel_dev->accel_pci_dev.pci_dev));
+ 
+-	ret = debugfs_create_dir(name, NULL);
+-	if (IS_ERR_OR_NULL(ret))
+-		return;
+-
+-	accel_dev->debugfs_dir = ret;
++	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+ 
+ 	adf_cfg_dev_dbgfs_add(accel_dev);
+ }
+@@ -59,9 +54,6 @@ EXPORT_SYMBOL_GPL(adf_dbgfs_exit);
+  */
+ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+ {
+-	if (!accel_dev->debugfs_dir)
+-		return;
+-
+ 	if (!accel_dev->is_vf) {
+ 		adf_fw_counters_dbgfs_add(accel_dev);
+ 		adf_heartbeat_dbgfs_add(accel_dev);
+@@ -77,9 +69,6 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+  */
+ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
+ {
+-	if (!accel_dev->debugfs_dir)
+-		return;
+-
+ 	if (!accel_dev->is_vf) {
+ 		adf_tl_dbgfs_rm(accel_dev);
+ 		adf_cnv_dbgfs_rm(accel_dev);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
+index 65bd26b25abce9..f93d9cca70cee4 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
+@@ -90,10 +90,6 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
+ 
+ 	hw_data->get_arb_info(&info);
+ 
+-	/* Reset arbiter configuration */
+-	for (i = 0; i < ADF_ARB_NUM; i++)
+-		WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
+-
+ 	/* Unmap worker threads to service arbiters */
+ 	for (i = 0; i < hw_data->num_engines; i++)
+ 		WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
+diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
+index c82775dbb557a7..77a6301f37f0af 100644
+--- a/drivers/crypto/mxs-dcp.c
++++ b/drivers/crypto/mxs-dcp.c
+@@ -225,21 +225,22 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
+ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
+ 			   struct skcipher_request *req, int init)
+ {
+-	dma_addr_t key_phys = 0;
+-	dma_addr_t src_phys, dst_phys;
++	dma_addr_t key_phys, src_phys, dst_phys;
+ 	struct dcp *sdcp = global_sdcp;
+ 	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+ 	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ 	bool key_referenced = actx->key_referenced;
+ 	int ret;
+ 
+-	if (!key_referenced) {
++	if (key_referenced)
++		key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key + AES_KEYSIZE_128,
++					  AES_KEYSIZE_128, DMA_TO_DEVICE);
++	else
+ 		key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+ 					  2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
+-		ret = dma_mapping_error(sdcp->dev, key_phys);
+-		if (ret)
+-			return ret;
+-	}
++	ret = dma_mapping_error(sdcp->dev, key_phys);
++	if (ret)
++		return ret;
+ 
+ 	src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
+ 				  DCP_BUF_SZ, DMA_TO_DEVICE);
+@@ -300,7 +301,10 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
+ err_dst:
+ 	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+ err_src:
+-	if (!key_referenced)
++	if (key_referenced)
++		dma_unmap_single(sdcp->dev, key_phys, AES_KEYSIZE_128,
++				 DMA_TO_DEVICE);
++	else
+ 		dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
+ 				 DMA_TO_DEVICE);
+ 	return ret;
+diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile
+deleted file mode 100644
+index 191c31f0d4f008..00000000000000
+--- a/drivers/dax/pmem/Makefile
++++ /dev/null
+@@ -1,7 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0-only
+-obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+-obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
+-
+-dax_pmem-y := pmem.o
+-dax_pmem_core-y := core.o
+-dax_pmem_compat-y := compat.o
+diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c
+deleted file mode 100644
+index dfe91a2990fec4..00000000000000
+--- a/drivers/dax/pmem/pmem.c
++++ /dev/null
+@@ -1,10 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
+-#include <linux/percpu-refcount.h>
+-#include <linux/memremap.h>
+-#include <linux/module.h>
+-#include <linux/pfn_t.h>
+-#include <linux/nd.h>
+-#include "../bus.h"
+-
+-
+diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
+index b46eb8a552d7be..fee04fdb08220c 100644
+--- a/drivers/dma-buf/Kconfig
++++ b/drivers/dma-buf/Kconfig
+@@ -36,6 +36,7 @@ config UDMABUF
+ 	depends on DMA_SHARED_BUFFER
+ 	depends on MEMFD_CREATE || COMPILE_TEST
+ 	depends on MMU
++	select VMAP_PFN
+ 	help
+ 	  A driver to let userspace turn memfd regions into dma-bufs.
+ 	  Qemu can use this to create host dmabufs for guest framebuffers.
+diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
+index 047c3cd2cefff6..a3638ccc15f571 100644
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -74,21 +74,29 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
+ static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+ {
+ 	struct udmabuf *ubuf = buf->priv;
+-	struct page **pages;
++	unsigned long *pfns;
+ 	void *vaddr;
+ 	pgoff_t pg;
+ 
+ 	dma_resv_assert_held(buf->resv);
+ 
+-	pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
+-	if (!pages)
++	/**
++	 * HVO may free tail pages, so just use pfn to map each folio
++	 * into vmalloc area.
++	 */
++	pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
++	if (!pfns)
+ 		return -ENOMEM;
+ 
+-	for (pg = 0; pg < ubuf->pagecount; pg++)
+-		pages[pg] = &ubuf->folios[pg]->page;
++	for (pg = 0; pg < ubuf->pagecount; pg++) {
++		unsigned long pfn = folio_pfn(ubuf->folios[pg]);
+ 
+-	vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
+-	kfree(pages);
++		pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
++		pfns[pg] = pfn;
++	}
++
++	vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
++	kvfree(pfns);
+ 	if (!vaddr)
+ 		return -EINVAL;
+ 
+@@ -196,8 +204,8 @@ static void release_udmabuf(struct dma_buf *buf)
+ 		put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+ 
+ 	unpin_all_folios(&ubuf->unpin_list);
+-	kfree(ubuf->offsets);
+-	kfree(ubuf->folios);
++	kvfree(ubuf->offsets);
++	kvfree(ubuf->folios);
+ 	kfree(ubuf);
+ }
+ 
+@@ -322,14 +330,14 @@ static long udmabuf_create(struct miscdevice *device,
+ 	if (!ubuf->pagecount)
+ 		goto err;
+ 
+-	ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
+-				    GFP_KERNEL);
++	ubuf->folios = kvmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
++				      GFP_KERNEL);
+ 	if (!ubuf->folios) {
+ 		ret = -ENOMEM;
+ 		goto err;
+ 	}
+-	ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
+-				GFP_KERNEL);
++	ubuf->offsets = kvcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
++				 GFP_KERNEL);
+ 	if (!ubuf->offsets) {
+ 		ret = -ENOMEM;
+ 		goto err;
+@@ -343,7 +351,7 @@ static long udmabuf_create(struct miscdevice *device,
+ 			goto err;
+ 
+ 		pgcnt = list[i].size >> PAGE_SHIFT;
+-		folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
++		folios = kvmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
+ 		if (!folios) {
+ 			ret = -ENOMEM;
+ 			goto err;
+@@ -353,7 +361,7 @@ static long udmabuf_create(struct miscdevice *device,
+ 		ret = memfd_pin_folios(memfd, list[i].offset, end,
+ 				       folios, pgcnt, &pgoff);
+ 		if (ret <= 0) {
+-			kfree(folios);
++			kvfree(folios);
+ 			if (!ret)
+ 				ret = -EINVAL;
+ 			goto err;
+@@ -382,7 +390,7 @@ static long udmabuf_create(struct miscdevice *device,
+ 			}
+ 		}
+ 
+-		kfree(folios);
++		kvfree(folios);
+ 		fput(memfd);
+ 		memfd = NULL;
+ 	}
+@@ -398,8 +406,8 @@ static long udmabuf_create(struct miscdevice *device,
+ 	if (memfd)
+ 		fput(memfd);
+ 	unpin_all_folios(&ubuf->unpin_list);
+-	kfree(ubuf->offsets);
+-	kfree(ubuf->folios);
++	kvfree(ubuf->offsets);
++	kvfree(ubuf->folios);
+ 	kfree(ubuf);
+ 	return ret;
+ }
+diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
+index 5b3164560648ee..0e539c1073510a 100644
+--- a/drivers/edac/bluefield_edac.c
++++ b/drivers/edac/bluefield_edac.c
+@@ -180,7 +180,7 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
+ static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
+ {
+ 	struct bluefield_edac_priv *priv = mci->pvt_info;
+-	int mem_ctrl_idx = mci->mc_idx;
++	u64 mem_ctrl_idx = mci->mc_idx;
+ 	struct dimm_info *dimm;
+ 	u64 smc_info, smc_arg;
+ 	int is_empty = 1, i;
+diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
+index d148d262d0d4de..339d94b3d04c7d 100644
+--- a/drivers/edac/fsl_ddr_edac.c
++++ b/drivers/edac/fsl_ddr_edac.c
+@@ -328,21 +328,25 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
+ 	 * TODO: Add support for 32-bit wide buses
+ 	 */
+ 	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
++		u64 cap = (u64)cap_high << 32 | cap_low;
++		u32 s = syndrome;
++
+ 		sbe_ecc_decode(cap_high, cap_low, syndrome,
+ 				&bad_data_bit, &bad_ecc_bit);
+ 
+-		if (bad_data_bit != -1)
+-			fsl_mc_printk(mci, KERN_ERR,
+-				"Faulty Data bit: %d\n", bad_data_bit);
+-		if (bad_ecc_bit != -1)
+-			fsl_mc_printk(mci, KERN_ERR,
+-				"Faulty ECC bit: %d\n", bad_ecc_bit);
++		if (bad_data_bit >= 0) {
++			fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
++			cap ^= 1ULL << bad_data_bit;
++		}
++
++		if (bad_ecc_bit >= 0) {
++			fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
++			s ^= 1 << bad_ecc_bit;
++		}
+ 
+ 		fsl_mc_printk(mci, KERN_ERR,
+ 			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
+-			cap_high ^ (1 << (bad_data_bit - 32)),
+-			cap_low ^ (1 << bad_data_bit),
+-			syndrome ^ (1 << bad_ecc_bit));
++			upper_32_bits(cap), lower_32_bits(cap), s);
+ 	}
+ 
+ 	fsl_mc_printk(mci, KERN_ERR,
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index e2a954de913b42..51556c72a96746 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -1036,6 +1036,7 @@ static int __init i10nm_init(void)
+ 		return -ENODEV;
+ 
+ 	cfg = (struct res_config *)id->driver_data;
++	skx_set_res_cfg(cfg);
+ 	res_cfg = cfg;
+ 
+ 	rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
+diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
+index 189a2fc29e74f5..07dacf8c10be3d 100644
+--- a/drivers/edac/igen6_edac.c
++++ b/drivers/edac/igen6_edac.c
+@@ -1245,6 +1245,7 @@ static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
+ 	imc->mci = mci;
+ 	return 0;
+ fail3:
++	mci->pvt_info = NULL;
+ 	kfree(mci->ctl_name);
+ fail2:
+ 	edac_mc_free(mci);
+@@ -1269,6 +1270,7 @@ static void igen6_unregister_mcis(void)
+ 
+ 		edac_mc_del_mc(mci->pdev);
+ 		kfree(mci->ctl_name);
++		mci->pvt_info = NULL;
+ 		edac_mc_free(mci);
+ 		iounmap(imc->window);
+ 	}
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index 85713646957b3e..6cf17af7d9112b 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -47,6 +47,7 @@ static skx_show_retry_log_f skx_show_retry_rd_err_log;
+ static u64 skx_tolm, skx_tohm;
+ static LIST_HEAD(dev_edac_list);
+ static bool skx_mem_cfg_2lm;
++static struct res_config *skx_res_cfg;
+ 
+ int skx_adxl_get(void)
+ {
+@@ -119,7 +120,7 @@ void skx_adxl_put(void)
+ }
+ EXPORT_SYMBOL_GPL(skx_adxl_put);
+ 
+-static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
++static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
+ {
+ 	struct skx_dev *d;
+ 	int i, len = 0;
+@@ -135,8 +136,24 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
+ 		return false;
+ 	}
+ 
++	/*
++	 * GNR with a Flat2LM memory configuration may mistakenly classify
++	 * a near-memory error(DDR5) as a far-memory error(CXL), resulting
++	 * in the incorrect selection of decoded ADXL components.
++	 * To address this, prefetch the decoded far-memory controller ID
++	 * and adjust the error source to near-memory if the far-memory
++	 * controller ID is invalid.
++	 */
++	if (skx_res_cfg && skx_res_cfg->type == GNR && err_src == ERR_SRC_2LM_FM) {
++		res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
++		if (res->imc == -1) {
++			err_src = ERR_SRC_2LM_NM;
++			edac_dbg(0, "Adjust the error source to near-memory.\n");
++		}
++	}
++
+ 	res->socket  = (int)adxl_values[component_indices[INDEX_SOCKET]];
+-	if (error_in_1st_level_mem) {
++	if (err_src == ERR_SRC_2LM_NM) {
+ 		res->imc     = (adxl_nm_bitmap & BIT_NM_MEMCTRL) ?
+ 			       (int)adxl_values[component_indices[INDEX_NM_MEMCTRL]] : -1;
+ 		res->channel = (adxl_nm_bitmap & BIT_NM_CHANNEL) ?
+@@ -191,6 +208,12 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
+ }
+ EXPORT_SYMBOL_GPL(skx_set_mem_cfg);
+ 
++void skx_set_res_cfg(struct res_config *cfg)
++{
++	skx_res_cfg = cfg;
++}
++EXPORT_SYMBOL_GPL(skx_set_res_cfg);
++
+ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
+ {
+ 	driver_decode = decode;
+@@ -620,31 +643,27 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
+ 			     optype, skx_msg);
+ }
+ 
+-static bool skx_error_in_1st_level_mem(const struct mce *m)
++static enum error_source skx_error_source(const struct mce *m)
+ {
+-	u32 errcode;
++	u32 errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
+ 
+-	if (!skx_mem_cfg_2lm)
+-		return false;
+-
+-	errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
+-
+-	return errcode == MCACOD_EXT_MEM_ERR;
+-}
++	if (errcode != MCACOD_MEM_CTL_ERR && errcode != MCACOD_EXT_MEM_ERR)
++		return ERR_SRC_NOT_MEMORY;
+ 
+-static bool skx_error_in_mem(const struct mce *m)
+-{
+-	u32 errcode;
++	if (!skx_mem_cfg_2lm)
++		return ERR_SRC_1LM;
+ 
+-	errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
++	if (errcode == MCACOD_EXT_MEM_ERR)
++		return ERR_SRC_2LM_NM;
+ 
+-	return (errcode == MCACOD_MEM_CTL_ERR || errcode == MCACOD_EXT_MEM_ERR);
++	return ERR_SRC_2LM_FM;
+ }
+ 
+ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 			void *data)
+ {
+ 	struct mce *mce = (struct mce *)data;
++	enum error_source err_src;
+ 	struct decoded_addr res;
+ 	struct mem_ctl_info *mci;
+ 	char *type;
+@@ -652,8 +671,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 	if (mce->kflags & MCE_HANDLED_CEC)
+ 		return NOTIFY_DONE;
+ 
++	err_src = skx_error_source(mce);
++
+ 	/* Ignore unless this is memory related with an address */
+-	if (!skx_error_in_mem(mce) || !(mce->status & MCI_STATUS_ADDRV))
++	if (err_src == ERR_SRC_NOT_MEMORY || !(mce->status & MCI_STATUS_ADDRV))
+ 		return NOTIFY_DONE;
+ 
+ 	memset(&res, 0, sizeof(res));
+@@ -667,7 +688,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 	/* Try driver decoder first */
+ 	if (!(driver_decode && driver_decode(&res))) {
+ 		/* Then try firmware decoder (ACPI DSM methods) */
+-		if (!(adxl_component_count && skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce))))
++		if (!(adxl_component_count && skx_adxl_decode(&res, err_src)))
+ 			return NOTIFY_DONE;
+ 	}
+ 
+diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
+index f945c1bf5ca465..54bba8a62f727c 100644
+--- a/drivers/edac/skx_common.h
++++ b/drivers/edac/skx_common.h
+@@ -146,6 +146,13 @@ enum {
+ 	INDEX_MAX
+ };
+ 
++enum error_source {
++	ERR_SRC_1LM,
++	ERR_SRC_2LM_NM,
++	ERR_SRC_2LM_FM,
++	ERR_SRC_NOT_MEMORY,
++};
++
+ #define BIT_NM_MEMCTRL	BIT_ULL(INDEX_NM_MEMCTRL)
+ #define BIT_NM_CHANNEL	BIT_ULL(INDEX_NM_CHANNEL)
+ #define BIT_NM_DIMM	BIT_ULL(INDEX_NM_DIMM)
+@@ -234,6 +241,7 @@ int skx_adxl_get(void);
+ void skx_adxl_put(void);
+ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
+ void skx_set_mem_cfg(bool mem_cfg_2lm);
++void skx_set_res_cfg(struct res_config *cfg);
+ 
+ int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
+ int skx_get_node_id(struct skx_dev *d, u8 *id);
+diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
+index 94a6b4e667de14..f4d47577f83ee7 100644
+--- a/drivers/firmware/arm_scpi.c
++++ b/drivers/firmware/arm_scpi.c
+@@ -630,6 +630,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
++	if (!buf.opp_count)
++		return ERR_PTR(-ENOENT);
++
+ 	info = kmalloc(sizeof(*info), GFP_KERNEL);
+ 	if (!info)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
+index 958a680e0660d4..2a1b43f9e0fa2b 100644
+--- a/drivers/firmware/efi/libstub/efi-stub.c
++++ b/drivers/firmware/efi/libstub/efi-stub.c
+@@ -129,7 +129,7 @@ efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
+ 
+ 	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ 	    IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+-	    cmdline_size == 0) {
++	    cmdline[0] == 0) {
+ 		status = efi_parse_options(CONFIG_CMDLINE);
+ 		if (status != EFI_SUCCESS) {
+ 			efi_err("Failed to parse options\n");
+diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
+index e8d69bd548f3fe..9c3613e6af158f 100644
+--- a/drivers/firmware/efi/tpm.c
++++ b/drivers/firmware/efi/tpm.c
+@@ -40,7 +40,8 @@ int __init efi_tpm_eventlog_init(void)
+ {
+ 	struct linux_efi_tpm_eventlog *log_tbl;
+ 	struct efi_tcg2_final_events_table *final_tbl;
+-	int tbl_size;
++	unsigned int tbl_size;
++	int final_tbl_size;
+ 	int ret = 0;
+ 
+ 	if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
+@@ -80,26 +81,26 @@ int __init efi_tpm_eventlog_init(void)
+ 		goto out;
+ 	}
+ 
+-	tbl_size = 0;
++	final_tbl_size = 0;
+ 	if (final_tbl->nr_events != 0) {
+ 		void *events = (void *)efi.tpm_final_log
+ 				+ sizeof(final_tbl->version)
+ 				+ sizeof(final_tbl->nr_events);
+ 
+-		tbl_size = tpm2_calc_event_log_size(events,
+-						    final_tbl->nr_events,
+-						    log_tbl->log);
++		final_tbl_size = tpm2_calc_event_log_size(events,
++							  final_tbl->nr_events,
++							  log_tbl->log);
+ 	}
+ 
+-	if (tbl_size < 0) {
++	if (final_tbl_size < 0) {
+ 		pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+ 		ret = -EINVAL;
+ 		goto out_calc;
+ 	}
+ 
+ 	memblock_reserve(efi.tpm_final_log,
+-			 tbl_size + sizeof(*final_tbl));
+-	efi_tpm_final_log_size = tbl_size;
++			 final_tbl_size + sizeof(*final_tbl));
++	efi_tpm_final_log_size = final_tbl_size;
+ 
+ out_calc:
+ 	early_memunmap(final_tbl, sizeof(*final_tbl));
+diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
+index d304913314e494..24e666d5c3d1a2 100644
+--- a/drivers/firmware/google/gsmi.c
++++ b/drivers/firmware/google/gsmi.c
+@@ -918,7 +918,8 @@ static __init int gsmi_init(void)
+ 	gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
+ 	if (IS_ERR(gsmi_dev.pdev)) {
+ 		printk(KERN_ERR "gsmi: unable to register platform device\n");
+-		return PTR_ERR(gsmi_dev.pdev);
++		ret = PTR_ERR(gsmi_dev.pdev);
++		goto out_unregister;
+ 	}
+ 
+ 	/* SMI access needs to be serialized */
+@@ -1056,10 +1057,11 @@ static __init int gsmi_init(void)
+ 	gsmi_buf_free(gsmi_dev.name_buf);
+ 	kmem_cache_destroy(gsmi_dev.mem_pool);
+ 	platform_device_unregister(gsmi_dev.pdev);
+-	pr_info("gsmi: failed to load: %d\n", ret);
++out_unregister:
+ #ifdef CONFIG_PM
+ 	platform_driver_unregister(&gsmi_driver_info);
+ #endif
++	pr_info("gsmi: failed to load: %d\n", ret);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
+index 5170fe7599cdf8..d5909a4f0433c1 100644
+--- a/drivers/gpio/gpio-exar.c
++++ b/drivers/gpio/gpio-exar.c
+@@ -99,11 +99,13 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
+ 	struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ 	unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
+ 	unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
++	unsigned int bit_value = value ? BIT(bit) : 0;
+ 
+-	if (value)
+-		regmap_set_bits(exar_gpio->regmap, addr, BIT(bit));
+-	else
+-		regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
++	/*
++	 * regmap_write_bits() forces value to be written when an external
++	 * pull up/down might otherwise indicate value was already set.
++	 */
++	regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
+ }
+ 
+ static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
+diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
+index 2de61337ad3b54..d7230fd83f5d68 100644
+--- a/drivers/gpio/gpio-zevio.c
++++ b/drivers/gpio/gpio-zevio.c
+@@ -11,6 +11,7 @@
+ #include <linux/io.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ 
+@@ -169,6 +170,7 @@ static const struct gpio_chip zevio_gpio_chip = {
+ /* Initialization */
+ static int zevio_gpio_probe(struct platform_device *pdev)
+ {
++	struct device *dev = &pdev->dev;
+ 	struct zevio_gpio *controller;
+ 	int status, i;
+ 
+@@ -180,6 +182,10 @@ static int zevio_gpio_probe(struct platform_device *pdev)
+ 	controller->chip = zevio_gpio_chip;
+ 	controller->chip.parent = &pdev->dev;
+ 
++	controller->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
++	if (!controller->chip.label)
++		return -ENOMEM;
++
+ 	controller->regs = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(controller->regs))
+ 		return dev_err_probe(&pdev->dev, PTR_ERR(controller->regs),
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 1cb5a4f1929335..cf5bc77e2362c4 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -152,6 +152,7 @@ config DRM_PANIC_SCREEN
+ config DRM_PANIC_SCREEN_QR_CODE
+ 	bool "Add a panic screen with a QR code"
+ 	depends on DRM_PANIC && RUST
++	select ZLIB_DEFLATE
+ 	help
+ 	  This option adds a QR code generator, and a panic screen with a QR
+ 	  code. The QR code will contain the last lines of kmsg and other debug
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+index 2ca12717313573..9d6345146495fc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+@@ -158,7 +158,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
+ 		return -EINVAL;
+ 	}
+ 
+-	if (start + count >= max_count)
++	if (start + count > max_count)
+ 		return -EINVAL;
+ 
+ 	count = min_t(int, count, max_count);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 4f08b153cb66d8..e41318bfbf4575 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -834,6 +834,9 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
+ 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ 		return -EINVAL;
+ 
++	if (!kiq_ring->sched.ready || adev->job_hang)
++		return 0;
++
+ 	ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
+ 	if (!ring_funcs)
+ 		return -ENOMEM;
+@@ -858,8 +861,14 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
+ 
+ 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
+ 
+-	if (kiq_ring->sched.ready && !adev->job_hang)
+-		r = amdgpu_ring_test_helper(kiq_ring);
++	/* Submit unmap queue packet */
++	amdgpu_ring_commit(kiq_ring);
++	/*
++	 * Ring test will do a basic scratch register change check. Just run
++	 * this to ensure that unmap queues that is submitted before got
++	 * processed successfully before returning.
++	 */
++	r = amdgpu_ring_test_helper(kiq_ring);
+ 
+ 	spin_unlock(&kiq->ring_lock);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 4bd61c169ca8d4..ca8091fd3a24f4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -1757,11 +1757,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
+ 
+ 	switch (le16_to_cpu(nps_info->v1.header.version_major)) {
+ 	case 1:
++		mem_ranges = kvcalloc(nps_info->v1.count,
++				      sizeof(*mem_ranges),
++				      GFP_KERNEL);
++		if (!mem_ranges)
++			return -ENOMEM;
+ 		*nps_type = nps_info->v1.nps_type;
+ 		*range_cnt = nps_info->v1.count;
+-		mem_ranges = kvzalloc(
+-			*range_cnt * sizeof(struct amdgpu_gmc_memrange),
+-			GFP_KERNEL);
+ 		for (i = 0; i < *range_cnt; i++) {
+ 			mem_ranges[i].base_address =
+ 				nps_info->v1.instance_info[i].base_address;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index f1ffab5a1eaed9..156abd2ba5a6c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -525,6 +525,17 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
+ 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ 		return -EINVAL;
+ 
++	if (!kiq_ring->sched.ready || adev->job_hang)
++		return 0;
++	/**
++	 * This is workaround: only skip kiq_ring test
++	 * during ras recovery in suspend stage for gfx9.4.3
++	 */
++	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
++	     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
++	    amdgpu_ras_in_recovery(adev))
++		return 0;
++
+ 	spin_lock(&kiq->ring_lock);
+ 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ 					adev->gfx.num_compute_rings)) {
+@@ -538,20 +549,15 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
+ 					   &adev->gfx.compute_ring[j],
+ 					   RESET_QUEUES, 0, 0);
+ 	}
+-
+-	/**
+-	 * This is workaround: only skip kiq_ring test
+-	 * during ras recovery in suspend stage for gfx9.4.3
++	/* Submit unmap queue packet */
++	amdgpu_ring_commit(kiq_ring);
++	/*
++	 * Ring test will do a basic scratch register change check. Just run
++	 * this to ensure that unmap queues that is submitted before got
++	 * processed successfully before returning.
+ 	 */
+-	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+-	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
+-	    amdgpu_ras_in_recovery(adev)) {
+-		spin_unlock(&kiq->ring_lock);
+-		return 0;
+-	}
++	r = amdgpu_ring_test_helper(kiq_ring);
+ 
+-	if (kiq_ring->sched.ready && !adev->job_hang)
+-		r = amdgpu_ring_test_helper(kiq_ring);
+ 	spin_unlock(&kiq->ring_lock);
+ 
+ 	return r;
+@@ -579,8 +585,11 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
+ 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ 		return -EINVAL;
+ 
+-	spin_lock(&kiq->ring_lock);
++	if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang)
++		return 0;
++
+ 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
++		spin_lock(&kiq->ring_lock);
+ 		if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ 						adev->gfx.num_gfx_rings)) {
+ 			spin_unlock(&kiq->ring_lock);
+@@ -593,11 +602,17 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
+ 						   &adev->gfx.gfx_ring[j],
+ 						   PREEMPT_QUEUES, 0, 0);
+ 		}
+-	}
++		/* Submit unmap queue packet */
++		amdgpu_ring_commit(kiq_ring);
+ 
+-	if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
++		/*
++		 * Ring test will do a basic scratch register change check.
++		 * Just run this to ensure that unmap queues that is submitted
++		 * before got processed successfully before returning.
++		 */
+ 		r = amdgpu_ring_test_helper(kiq_ring);
+-	spin_unlock(&kiq->ring_lock);
++		spin_unlock(&kiq->ring_lock);
++	}
+ 
+ 	return r;
+ }
+@@ -702,7 +717,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
+ 		kiq->pmf->kiq_map_queues(kiq_ring,
+ 					 &adev->gfx.compute_ring[j]);
+ 	}
+-
++	/* Submit map queue packet */
++	amdgpu_ring_commit(kiq_ring);
++	/*
++	 * Ring test will do a basic scratch register change check. Just run
++	 * this to ensure that map queues that is submitted before got
++	 * processed successfully before returning.
++	 */
+ 	r = amdgpu_ring_test_helper(kiq_ring);
+ 	spin_unlock(&kiq->ring_lock);
+ 	if (r)
+@@ -753,7 +774,13 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
+ 						 &adev->gfx.gfx_ring[j]);
+ 		}
+ 	}
+-
++	/* Submit map queue packet */
++	amdgpu_ring_commit(kiq_ring);
++	/*
++	 * Ring test will do a basic scratch register change check. Just run
++	 * this to ensure that map queues that is submitted before got
++	 * processed successfully before returning.
++	 */
+ 	r = amdgpu_ring_test_helper(kiq_ring);
+ 	spin_unlock(&kiq->ring_lock);
+ 	if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index bc8295812cc842..9d741695ca07d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4823,6 +4823,13 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
+ 		amdgpu_ring_write(kiq_ring, 0);
+ 		amdgpu_ring_write(kiq_ring, 0);
+ 	}
++	/* Submit unmap queue packet */
++	amdgpu_ring_commit(kiq_ring);
++	/*
++	 * Ring test will do a basic scratch register change check. Just run
++	 * this to ensure that unmap queues that is submitted before got
++	 * processed successfully before returning.
++	 */
+ 	r = amdgpu_ring_test_helper(kiq_ring);
+ 	if (r)
+ 		DRM_ERROR("KCQ disable failed\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 23f0573ae47b33..785a343a95f0ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2418,6 +2418,8 @@ static int gfx_v9_0_sw_fini(void *handle)
+ 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ 	amdgpu_gfx_kiq_fini(adev, 0);
+ 
++	amdgpu_gfx_cleaner_shader_sw_fini(adev);
++
+ 	gfx_v9_0_mec_fini(adev);
+ 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ 				&adev->gfx.rlc.clear_state_gpu_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+index 86958cb2c2ab2b..aa5815bd633eba 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+@@ -674,11 +674,12 @@ void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
+ 		amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 			0, 0, PACKETJ_TYPE0));
+ 		amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+-	}
+ 
+-	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+-		0, 0, PACKETJ_TYPE0));
+-	amdgpu_ring_write(ring, 0x80004000);
++		amdgpu_ring_write(ring,
++				  PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
++					  0, PACKETJ_TYPE0));
++		amdgpu_ring_write(ring, 0x80004000);
++	}
+ }
+ 
+ /**
+@@ -694,11 +695,12 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
+ 		amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 			0, 0, PACKETJ_TYPE0));
+ 		amdgpu_ring_write(ring, 0x62a04);
+-	}
+ 
+-	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+-		0, 0, PACKETJ_TYPE0));
+-	amdgpu_ring_write(ring, 0x00004000);
++		amdgpu_ring_write(ring,
++				  PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
++					  0, PACKETJ_TYPE0));
++		amdgpu_ring_write(ring, 0x00004000);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index d4aa843aacfdd9..ff34bb1ac9db79 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -271,11 +271,9 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
+ 	struct kfd_process *proc = NULL;
+ 	struct kfd_process_device *pdd = NULL;
+ 	int i;
+-	struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES];
++	struct kfd_cu_occupancy *cu_occupancy;
+ 	u32 queue_format;
+ 
+-	memset(cu_occupancy, 0x0, sizeof(cu_occupancy));
+-
+ 	pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
+ 	dev = pdd->dev;
+ 	if (dev->kfd2kgd->get_cu_occupancy == NULL)
+@@ -293,6 +291,10 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
+ 	wave_cnt = 0;
+ 	max_waves_per_cu = 0;
+ 
++	cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL);
++	if (!cu_occupancy)
++		return -ENOMEM;
++
+ 	/*
+ 	 * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
+ 	 * For AQL queues, because of cooperative dispatch we multiply the wave count
+@@ -318,6 +320,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
+ 
+ 	/* Translate wave count to number of compute units */
+ 	cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
++	kfree(cu_occupancy);
+ 	return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+ }
+ 
+@@ -338,8 +341,8 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
+ 							      attr_sdma);
+ 		struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
+ 
+-		INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
+-					kfd_sdma_activity_worker);
++		INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
++				  kfd_sdma_activity_worker);
+ 
+ 		sdma_activity_work_handler.pdd = pdd;
+ 		sdma_activity_work_handler.sdma_activity_counter = 0;
+@@ -347,6 +350,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
+ 		schedule_work(&sdma_activity_work_handler.sdma_activity_work);
+ 
+ 		flush_work(&sdma_activity_work_handler.sdma_activity_work);
++		destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
+ 
+ 		return snprintf(buffer, PAGE_SIZE, "%llu\n",
+ 				(sdma_activity_work_handler.sdma_activity_counter)/
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8d97f17ffe662a..24fbde7dd1c425 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1696,6 +1696,26 @@ dm_allocate_gpu_mem(
+ 	return da->cpu_ptr;
+ }
+ 
++void
++dm_free_gpu_mem(
++		struct amdgpu_device *adev,
++		enum dc_gpu_mem_alloc_type type,
++		void *pvMem)
++{
++	struct dal_allocation *da;
++
++	/* walk the da list in DM */
++	list_for_each_entry(da, &adev->dm.da_list, list) {
++		if (pvMem == da->cpu_ptr) {
++			amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
++			list_del(&da->list);
++			kfree(da);
++			break;
++		}
++	}
++
++}
++
+ static enum dmub_status
+ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
+ 				 enum dmub_gpint_command command_code,
+@@ -1762,16 +1782,20 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
+ 		/* Send the chunk */
+ 		ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
+ 		if (ret != DMUB_STATUS_OK)
+-			/* No need to free bb here since it shall be done in dm_sw_fini() */
+-			return NULL;
++			goto free_bb;
+ 	}
+ 
+ 	/* Now ask DMUB to copy the bb */
+ 	ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000);
+ 	if (ret != DMUB_STATUS_OK)
+-		return NULL;
++		goto free_bb;
+ 
+ 	return bb;
++
++free_bb:
++	dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb);
++	return NULL;
++
+ }
+ 
+ static enum dmub_ips_disable_type dm_get_default_ips_mode(
+@@ -2541,11 +2565,11 @@ static int dm_sw_fini(void *handle)
+ 			amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
+ 			list_del(&da->list);
+ 			kfree(da);
++			adev->dm.bb_from_dmub = NULL;
+ 			break;
+ 		}
+ 	}
+ 
+-	adev->dm.bb_from_dmub = NULL;
+ 
+ 	kfree(adev->dm.dmub_fb_info);
+ 	adev->dm.dmub_fb_info = NULL;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 90dfffec33cf49..a0bc2c0ac04d96 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -1004,6 +1004,9 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev,
+ 						  enum dc_gpu_mem_alloc_type type,
+ 						  size_t size,
+ 						  long long *addr);
++void dm_free_gpu_mem(struct amdgpu_device *adev,
++						  enum dc_gpu_mem_alloc_type type,
++						  void *addr);
+ 
+ bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 288be19db7c1b8..9be87b53251739 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -35,8 +35,8 @@
+ #include "amdgpu_dm_trace.h"
+ #include "amdgpu_dm_debugfs.h"
+ 
+-#define HPD_DETECTION_PERIOD_uS 5000000
+-#define HPD_DETECTION_TIME_uS 1000
++#define HPD_DETECTION_PERIOD_uS 2000000
++#define HPD_DETECTION_TIME_uS 100000
+ 
+ void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
+ {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index eea317dcbe8c34..9752548cc5b21d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -1055,17 +1055,8 @@ void dm_helpers_free_gpu_mem(
+ 		void *pvMem)
+ {
+ 	struct amdgpu_device *adev = ctx->driver_context;
+-	struct dal_allocation *da;
+-
+-	/* walk the da list in DM */
+-	list_for_each_entry(da, &adev->dm.da_list, list) {
+-		if (pvMem == da->cpu_ptr) {
+-			amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
+-			list_del(&da->list);
+-			kfree(da);
+-			break;
+-		}
+-	}
++
++	dm_free_gpu_mem(adev, type, pvMem);
+ }
+ 
+ bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index a08e8a0b696c60..32b025c92c63cf 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1120,6 +1120,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	int i, k, ret;
+ 	bool debugfs_overwrite = false;
+ 	uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
++	struct drm_connector_state *new_conn_state;
+ 
+ 	memset(params, 0, sizeof(params));
+ 
+@@ -1127,7 +1128,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 		return PTR_ERR(mst_state);
+ 
+ 	/* Set up params */
+-	DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count);
++	DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count);
+ 	for (i = 0; i < dc_state->stream_count; i++) {
+ 		struct dc_dsc_policy dsc_policy = {0};
+ 
+@@ -1143,6 +1144,14 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 		if (!aconnector->mst_output_port)
+ 			continue;
+ 
++		new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
++
++		if (!new_conn_state) {
++			DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n",
++					__func__, __LINE__, stream);
++			continue;
++		}
++
+ 		stream->timing.flags.DSC = 0;
+ 
+ 		params[count].timing = &stream->timing;
+@@ -1175,6 +1184,8 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 		count++;
+ 	}
+ 
++	DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count);
++
+ 	if (count == 0) {
+ 		ASSERT(0);
+ 		return 0;
+@@ -1302,7 +1313,7 @@ static bool is_dsc_need_re_compute(
+ 			continue;
+ 
+ 		aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
+-		if (!aconnector || !aconnector->dsc_aux)
++		if (!aconnector)
+ 			continue;
+ 
+ 		stream_on_link[new_stream_on_link_num] = aconnector;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+index 7ee2be8f82c467..bb766c2a74176a 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+@@ -881,6 +881,9 @@ void hwss_setup_dpp(union block_sequence_params *params)
+ 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ 
++	if (!plane_state)
++		return;
++
+ 	if (dpp && dpp->funcs->dpp_setup) {
+ 		// program the input csc
+ 		dpp->funcs->dpp_setup(dpp,
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+index a80c0858293207..36d12db8d02256 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -1923,9 +1923,9 @@ static void dcn20_program_pipe(
+ 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ 	}
+ 
+-	if (pipe_ctx->update_flags.raw ||
+-	    (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
+-	    pipe_ctx->stream->update_flags.raw)
++	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
++	    pipe_ctx->plane_state->update_flags.raw ||
++	    pipe_ctx->stream->update_flags.raw))
+ 		dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
+ 
+ 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index a2e9bb485c366e..a2675b121fe44b 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -2551,6 +2551,8 @@ static int __maybe_unused anx7625_runtime_pm_suspend(struct device *dev)
+ 	mutex_lock(&ctx->lock);
+ 
+ 	anx7625_stop_dp_work(ctx);
++	if (!ctx->pdata.panel_bridge)
++		anx7625_remove_edid(ctx);
+ 	anx7625_power_standby(ctx);
+ 
+ 	mutex_unlock(&ctx->lock);
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index 87b8545fccc0af..e3a9832c742cb1 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -3107,6 +3107,8 @@ static __maybe_unused int it6505_bridge_suspend(struct device *dev)
+ {
+ 	struct it6505 *it6505 = dev_get_drvdata(dev);
+ 
++	it6505_remove_edid(it6505);
++
+ 	return it6505_poweroff(it6505);
+ }
+ 
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index f3afdab55c113e..47189587643a15 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1714,6 +1714,13 @@ static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge,
+ 					   struct drm_connector *connector)
+ {
+ 	struct tc_data *tc = bridge_to_tc(bridge);
++	int ret;
++
++	ret = tc_get_display_props(tc);
++	if (ret < 0) {
++		dev_err(tc->dev, "failed to read display props: %d\n", ret);
++		return 0;
++	}
+ 
+ 	return drm_edid_read_ddc(connector, &tc->aux.ddc);
+ }
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index ad1dc638c83bb1..ce82c9451dfe7d 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -129,7 +129,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
+  */
+ struct drm_file *drm_file_alloc(struct drm_minor *minor)
+ {
+-	static atomic64_t ident = ATOMIC_INIT(0);
++	static atomic64_t ident = ATOMIC64_INIT(0);
+ 	struct drm_device *dev = minor->dev;
+ 	struct drm_file *file;
+ 	int ret;
+diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
+index 5ace481c190117..1ed68d3cd80bad 100644
+--- a/drivers/gpu/drm/drm_mm.c
++++ b/drivers/gpu/drm/drm_mm.c
+@@ -151,7 +151,7 @@ static void show_leaks(struct drm_mm *mm) { }
+ 
+ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
+ 		     u64, __subtree_last,
+-		     START, LAST, static inline, drm_mm_interval_tree)
++		     START, LAST, static inline __maybe_unused, drm_mm_interval_tree)
+ 
+ struct drm_mm_node *
+ __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+index 6500f3999c5fa5..19ec67a5a918e3 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+@@ -538,6 +538,16 @@ static int etnaviv_bind(struct device *dev)
+ 	priv->num_gpus = 0;
+ 	priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ 
++	/*
++	 * If the GPU is part of a system with DMA addressing limitations,
++	 * request pages for our SHM backend buffers from the DMA32 zone to
++	 * hopefully avoid performance killing SWIOTLB bounce buffering.
++	 */
++	if (dma_addressing_limited(dev)) {
++		priv->shm_gfp_mask |= GFP_DMA32;
++		priv->shm_gfp_mask &= ~__GFP_HIGHMEM;
++	}
++
+ 	priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
+ 	if (IS_ERR(priv->cmdbuf_suballoc)) {
+ 		dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index 7c7f97793ddd0c..df0bc828a23483 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -839,14 +839,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+ 	if (ret)
+ 		goto fail;
+ 
+-	/*
+-	 * If the GPU is part of a system with DMA addressing limitations,
+-	 * request pages for our SHM backend buffers from the DMA32 zone to
+-	 * hopefully avoid performance killing SWIOTLB bounce buffering.
+-	 */
+-	if (dma_addressing_limited(gpu->dev))
+-		priv->shm_gfp_mask |= GFP_DMA32;
+-
+ 	/* Create buffer: */
+ 	ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
+ 				  PAGE_SIZE);
+@@ -1330,6 +1322,8 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
+ {
+ 	u32 val;
+ 
++	mutex_lock(&gpu->lock);
++
+ 	/* disable clock gating */
+ 	val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
+ 	val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+@@ -1341,6 +1335,8 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
+ 	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
+ 
+ 	sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
++
++	mutex_unlock(&gpu->lock);
+ }
+ 
+ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
+@@ -1350,13 +1346,9 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
+ 	unsigned int i;
+ 	u32 val;
+ 
+-	sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
+-
+-	for (i = 0; i < submit->nr_pmrs; i++) {
+-		const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
++	mutex_lock(&gpu->lock);
+ 
+-		*pmr->bo_vma = pmr->sequence;
+-	}
++	sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
+ 
+ 	/* disable debug register */
+ 	val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+@@ -1367,6 +1359,14 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
+ 	val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
+ 	val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+ 	gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
++
++	mutex_unlock(&gpu->lock);
++
++	for (i = 0; i < submit->nr_pmrs; i++) {
++		const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
++
++		*pmr->bo_vma = pmr->sequence;
++	}
+ }
+ 
+ 
+diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
+index 5ca71ef8732590..c9ee98693b48a4 100644
+--- a/drivers/gpu/drm/fsl-dcu/Kconfig
++++ b/drivers/gpu/drm/fsl-dcu/Kconfig
+@@ -8,6 +8,7 @@ config DRM_FSL_DCU
+ 	select DRM_PANEL
+ 	select REGMAP_MMIO
+ 	select VIDEOMODE_HELPERS
++	select MFD_SYSCON if SOC_LS1021A
+ 	help
+ 	  Choose this option if you have an Freescale DCU chipset.
+ 	  If M is selected the module will be called fsl-dcu-drm.
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+index ab6c0c6cd0e2e3..c4c3d41ee53097 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+@@ -100,6 +100,7 @@ static void fsl_dcu_irq_uninstall(struct drm_device *dev)
+ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
+ {
+ 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
++	struct regmap *scfg;
+ 	int ret;
+ 
+ 	ret = fsl_dcu_drm_modeset_init(fsl_dev);
+@@ -108,6 +109,20 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
+ 		return ret;
+ 	}
+ 
++	scfg = syscon_regmap_lookup_by_compatible("fsl,ls1021a-scfg");
++	if (PTR_ERR(scfg) != -ENODEV) {
++		/*
++		 * For simplicity, enable the PIXCLK unconditionally,
++		 * resulting in increased power consumption. Disabling
++		 * the clock in PM or on unload could be implemented as
++		 * a future improvement.
++		 */
++		ret = regmap_update_bits(scfg, SCFG_PIXCLKCR, SCFG_PIXCLKCR_PXCEN,
++					 SCFG_PIXCLKCR_PXCEN);
++		if (ret < 0)
++			return dev_err_probe(dev->dev, ret, "failed to enable pixclk\n");
++	}
++
+ 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ 	if (ret < 0) {
+ 		dev_err(dev->dev, "failed to initialize vblank\n");
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+index e2049a0e8a92a5..566396013c04a5 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+@@ -160,6 +160,9 @@
+ #define FSL_DCU_ARGB4444		12
+ #define FSL_DCU_YUV422			14
+ 
++#define SCFG_PIXCLKCR			0x28
++#define SCFG_PIXCLKCR_PXCEN		BIT(31)
++
+ #define VF610_LAYER_REG_NUM		9
+ #define LS1021A_LAYER_REG_NUM		10
+ 
+diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c
+index 4deeac7ed40a4d..2bbdc05a3b9779 100644
+--- a/drivers/gpu/drm/imagination/pvr_ccb.c
++++ b/drivers/gpu/drm/imagination/pvr_ccb.c
+@@ -321,7 +321,7 @@ static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev)
+ 	bool reserved = false;
+ 	u32 retries = 0;
+ 
+-	while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT ||
++	while (time_before(jiffies, start_timestamp + RESERVE_SLOT_TIMEOUT) ||
+ 	       retries < RESERVE_SLOT_MIN_RETRIES) {
+ 		reserved = pvr_kccb_try_reserve_slot(pvr_dev);
+ 		if (reserved)
+diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
+index 7bd6ba4c6e8ab6..363f885a709826 100644
+--- a/drivers/gpu/drm/imagination/pvr_vm.c
++++ b/drivers/gpu/drm/imagination/pvr_vm.c
+@@ -654,9 +654,7 @@ pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
+ 
+ 	xa_lock(&pvr_file->vm_ctx_handles);
+ 	vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
+-	if (vm_ctx)
+-		kref_get(&vm_ctx->ref_count);
+-
++	pvr_vm_context_get(vm_ctx);
+ 	xa_unlock(&pvr_file->vm_ctx_handles);
+ 
+ 	return vm_ctx;
+diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
+index 31267c00782fc1..af91e45b5d13b7 100644
+--- a/drivers/gpu/drm/imx/dcss/dcss-crtc.c
++++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
+@@ -206,15 +206,13 @@ int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm)
+ 	if (crtc->irq < 0)
+ 		return crtc->irq;
+ 
+-	ret = request_irq(crtc->irq, dcss_crtc_irq_handler,
+-			  0, "dcss_drm", crtc);
++	ret = request_irq(crtc->irq, dcss_crtc_irq_handler, IRQF_NO_AUTOEN,
++			  "dcss_drm", crtc);
+ 	if (ret) {
+ 		dev_err(dcss->dev, "irq request failed with %d.\n", ret);
+ 		return ret;
+ 	}
+ 
+-	disable_irq(crtc->irq);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
+index ef29c9a61a4617..99db53e167bd02 100644
+--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
++++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
+@@ -410,14 +410,12 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
+ 	}
+ 
+ 	ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
+-	ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0,
+-			"imx_drm", ipu_crtc);
++	ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler,
++			       IRQF_NO_AUTOEN, "imx_drm", ipu_crtc);
+ 	if (ret < 0) {
+ 		dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
+ 		return ret;
+ 	}
+-	/* Only enable IRQ when we actually need it to trigger work. */
+-	disable_irq(ipu_crtc->irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index 37927bdd6fbed8..14db7376c712d1 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -1522,15 +1522,13 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
+ 
+ 	irq = platform_get_irq_byname(pdev, name);
+ 
+-	ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
++	ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
+ 			      name, ret);
+ 		return ret;
+ 	}
+ 
+-	disable_irq(irq);
+-
+ 	return irq;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+index 1d3e9666c7411e..64c94e919a6980 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+@@ -156,18 +156,6 @@ static const struct dpu_lm_cfg msm8998_lm[] = {
+ 		.sblk = &msm8998_lm_sblk,
+ 		.lm_pair = LM_5,
+ 		.pingpong = PINGPONG_2,
+-	}, {
+-		.name = "lm_3", .id = LM_3,
+-		.base = 0x47000, .len = 0x320,
+-		.features = MIXER_MSM8998_MASK,
+-		.sblk = &msm8998_lm_sblk,
+-		.pingpong = PINGPONG_NONE,
+-	}, {
+-		.name = "lm_4", .id = LM_4,
+-		.base = 0x48000, .len = 0x320,
+-		.features = MIXER_MSM8998_MASK,
+-		.sblk = &msm8998_lm_sblk,
+-		.pingpong = PINGPONG_NONE,
+ 	}, {
+ 		.name = "lm_5", .id = LM_5,
+ 		.base = 0x49000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+index 7a23389a573272..72bd4f7e9e504c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+@@ -155,19 +155,6 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
+ 		.lm_pair = LM_5,
+ 		.pingpong = PINGPONG_2,
+ 		.dspp = DSPP_2,
+-	}, {
+-		.name = "lm_3", .id = LM_3,
+-		.base = 0x0, .len = 0x320,
+-		.features = MIXER_SDM845_MASK,
+-		.sblk = &sdm845_lm_sblk,
+-		.pingpong = PINGPONG_NONE,
+-		.dspp = DSPP_3,
+-	}, {
+-		.name = "lm_4", .id = LM_4,
+-		.base = 0x0, .len = 0x320,
+-		.features = MIXER_SDM845_MASK,
+-		.sblk = &sdm845_lm_sblk,
+-		.pingpong = PINGPONG_NONE,
+ 	}, {
+ 		.name = "lm_5", .id = LM_5,
+ 		.base = 0x49000, .len = 0x320,
+@@ -175,6 +162,7 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	},
+ };
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+index 68fae048a9a837..260accc151d4b4 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+@@ -80,7 +80,7 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
+ 
+ 	mode = &state->adjusted_mode;
+ 
+-	crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
++	crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+ 
+ 	drm_atomic_crtc_for_each_plane(plane, crtc) {
+ 		pstate = to_dpu_plane_state(plane->state);
+diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+index ea70c1c32d9401..6970b0f7f457c8 100644
+--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
++++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+@@ -140,6 +140,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
+ {
+ 	struct msm_gpu_devfreq *df = &gpu->devfreq;
+ 	struct msm_drm_private *priv = gpu->dev->dev_private;
++	int ret;
+ 
+ 	/* We need target support to do devfreq */
+ 	if (!gpu->funcs->gpu_busy)
+@@ -156,8 +157,12 @@ void msm_devfreq_init(struct msm_gpu *gpu)
+ 
+ 	mutex_init(&df->lock);
+ 
+-	dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
+-			       DEV_PM_QOS_MIN_FREQUENCY, 0);
++	ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
++				     DEV_PM_QOS_MIN_FREQUENCY, 0);
++	if (ret < 0) {
++		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize QoS\n");
++		return;
++	}
+ 
+ 	msm_devfreq_profile.initial_freq = gpu->fast_rate;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+index 060c74a80eb14b..3ea447f6a45b51 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+@@ -443,6 +443,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
+ 		ret = gf100_grctx_generate(gr, chan, fifoch->inst);
+ 		if (ret) {
+ 			nvkm_error(&base->engine.subdev, "failed to construct context\n");
++			mutex_unlock(&gr->fecs.mutex);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
+index 5f8002f6bb7a59..a4ac113e16904b 100644
+--- a/drivers/gpu/drm/omapdrm/dss/base.c
++++ b/drivers/gpu/drm/omapdrm/dss/base.c
+@@ -139,21 +139,13 @@ static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+ }
+ 
+ int omapdss_device_connect(struct dss_device *dss,
+-			   struct omap_dss_device *src,
+ 			   struct omap_dss_device *dst)
+ {
+-	dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
+-		src ? dev_name(src->dev) : "NULL",
++	dev_dbg(&dss->pdev->dev, "connect(%s)\n",
+ 		dst ? dev_name(dst->dev) : "NULL");
+ 
+-	if (!dst) {
+-		/*
+-		 * The destination is NULL when the source is connected to a
+-		 * bridge instead of a DSS device. Stop here, we will attach
+-		 * the bridge later when we will have a DRM encoder.
+-		 */
+-		return src && src->bridge ? 0 : -EINVAL;
+-	}
++	if (!dst)
++		return -EINVAL;
+ 
+ 	if (omapdss_device_is_connected(dst))
+ 		return -EBUSY;
+@@ -163,19 +155,14 @@ int omapdss_device_connect(struct dss_device *dss,
+ 	return 0;
+ }
+ 
+-void omapdss_device_disconnect(struct omap_dss_device *src,
++void omapdss_device_disconnect(struct dss_device *dss,
+ 			       struct omap_dss_device *dst)
+ {
+-	struct dss_device *dss = src ? src->dss : dst->dss;
+-
+-	dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n",
+-		src ? dev_name(src->dev) : "NULL",
++	dev_dbg(&dss->pdev->dev, "disconnect(%s)\n",
+ 		dst ? dev_name(dst->dev) : "NULL");
+ 
+-	if (!dst) {
+-		WARN_ON(!src->bridge);
++	if (WARN_ON(!dst))
+ 		return;
+-	}
+ 
+ 	if (!dst->id && !omapdss_device_is_connected(dst)) {
+ 		WARN_ON(1);
+diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
+index 040d5a3e33d680..4c22c09c93d523 100644
+--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
++++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
+@@ -242,9 +242,8 @@ struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
+ void omapdss_device_put(struct omap_dss_device *dssdev);
+ struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node);
+ int omapdss_device_connect(struct dss_device *dss,
+-			   struct omap_dss_device *src,
+ 			   struct omap_dss_device *dst);
+-void omapdss_device_disconnect(struct omap_dss_device *src,
++void omapdss_device_disconnect(struct dss_device *dss,
+ 			       struct omap_dss_device *dst);
+ 
+ int omap_dss_get_num_overlay_managers(void);
+diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
+index d3eac4817d7687..a982378aa14119 100644
+--- a/drivers/gpu/drm/omapdrm/omap_drv.c
++++ b/drivers/gpu/drm/omapdrm/omap_drv.c
+@@ -307,7 +307,7 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
+ 	for (i = 0; i < priv->num_pipes; i++) {
+ 		struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ 
+-		omapdss_device_disconnect(NULL, pipe->output);
++		omapdss_device_disconnect(priv->dss, pipe->output);
+ 
+ 		omapdss_device_put(pipe->output);
+ 		pipe->output = NULL;
+@@ -325,7 +325,7 @@ static int omap_connect_pipelines(struct drm_device *ddev)
+ 	int r;
+ 
+ 	for_each_dss_output(output) {
+-		r = omapdss_device_connect(priv->dss, NULL, output);
++		r = omapdss_device_connect(priv->dss, output);
+ 		if (r == -EPROBE_DEFER) {
+ 			omapdss_device_put(output);
+ 			return r;
+diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
+index fdae677558f3ef..b9c67e4ca36054 100644
+--- a/drivers/gpu/drm/omapdrm/omap_gem.c
++++ b/drivers/gpu/drm/omapdrm/omap_gem.c
+@@ -1402,8 +1402,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+ 
+ 	omap_obj = to_omap_bo(obj);
+ 
+-	mutex_lock(&omap_obj->lock);
+-
+ 	omap_obj->sgt = sgt;
+ 
+ 	if (omap_gem_sgt_is_contiguous(sgt, size)) {
+@@ -1418,21 +1416,17 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+ 		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ 		if (!pages) {
+ 			omap_gem_free_object(obj);
+-			obj = ERR_PTR(-ENOMEM);
+-			goto done;
++			return ERR_PTR(-ENOMEM);
+ 		}
+ 
+ 		omap_obj->pages = pages;
+ 		ret = drm_prime_sg_to_page_array(sgt, pages, npages);
+ 		if (ret) {
+ 			omap_gem_free_object(obj);
+-			obj = ERR_PTR(-ENOMEM);
+-			goto done;
++			return ERR_PTR(-ENOMEM);
+ 		}
+ 	}
+ 
+-done:
+-	mutex_unlock(&omap_obj->lock);
+ 	return obj;
+ }
+ 
+diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+index d3baccfe6286b2..06e16a7c14a756 100644
+--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
++++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+@@ -917,7 +917,7 @@ static const struct nv3052c_panel_info wl_355608_a8_panel_info = {
+ static const struct spi_device_id nv3052c_ids[] = {
+ 	{ "ltk035c5444t", },
+ 	{ "fs035vg158", },
+-	{ "wl-355608-a8", },
++	{ "rg35xx-plus-panel", },
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(spi, nv3052c_ids);
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+index 57686340de49fc..549b86f2cc2887 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+@@ -38,6 +38,7 @@
+ 
+ #define NT35510_CMD_CORRECT_GAMMA BIT(0)
+ #define NT35510_CMD_CONTROL_DISPLAY BIT(1)
++#define NT35510_CMD_SETVCMOFF BIT(2)
+ 
+ #define MCS_CMD_MAUCCTR		0xF0 /* Manufacturer command enable */
+ #define MCS_CMD_READ_ID1	0xDA
+@@ -721,11 +722,13 @@ static int nt35510_setup_power(struct nt35510 *nt)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF,
+-				NT35510_P1_VCMOFF_LEN,
+-				nt->conf->vcmoff);
+-	if (ret)
+-		return ret;
++	if (nt->conf->cmds & NT35510_CMD_SETVCMOFF) {
++		ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF,
++					NT35510_P1_VCMOFF_LEN,
++					nt->conf->vcmoff);
++		if (ret)
++			return ret;
++	}
+ 
+ 	/* Typically 10 ms */
+ 	usleep_range(10000, 20000);
+@@ -1319,7 +1322,7 @@ static const struct nt35510_config nt35510_frida_frd400b25025 = {
+ 	},
+ 	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ 			MIPI_DSI_MODE_LPM,
+-	.cmds = NT35510_CMD_CONTROL_DISPLAY,
++	.cmds = NT35510_CMD_CONTROL_DISPLAY | NT35510_CMD_SETVCMOFF,
+ 	/* 0x03: AVDD = 6.2V */
+ 	.avdd = { 0x03, 0x03, 0x03 },
+ 	/* 0x46: PCK = 2 x Hsync, BTP = 2.5 x VDDB */
+diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+index 2d30da38c2c3e4..3385fd3ef41a47 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
++++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+@@ -38,7 +38,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
+ 		return PTR_ERR(opp);
+ 	dev_pm_opp_put(opp);
+ 
+-	err =  dev_pm_opp_set_rate(dev, *freq);
++	err = dev_pm_opp_set_rate(dev, *freq);
+ 	if (!err)
+ 		ptdev->pfdevfreq.current_frequency = *freq;
+ 
+@@ -182,6 +182,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
+ 	 * if any and will avoid a switch off by regulator_late_cleanup()
+ 	 */
+ 	ret = dev_pm_opp_set_opp(dev, opp);
++	dev_pm_opp_put(opp);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ 		return ret;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+index fd8e44992184fa..b52dd510e0367b 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+@@ -177,7 +177,6 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
+ struct panfrost_model {
+ 	const char *name;
+ 	u32 id;
+-	u32 id_mask;
+ 	u64 features;
+ 	u64 issues;
+ 	struct {
+diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
+index c6d3c327cc24c0..ecc7a52bd688ee 100644
+--- a/drivers/gpu/drm/panthor/panthor_devfreq.c
++++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
+@@ -62,14 +62,20 @@ static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq)
+ static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
+ 				  u32 flags)
+ {
++	struct panthor_device *ptdev = dev_get_drvdata(dev);
+ 	struct dev_pm_opp *opp;
++	int err;
+ 
+ 	opp = devfreq_recommended_opp(dev, freq, flags);
+ 	if (IS_ERR(opp))
+ 		return PTR_ERR(opp);
+ 	dev_pm_opp_put(opp);
+ 
+-	return dev_pm_opp_set_rate(dev, *freq);
++	err = dev_pm_opp_set_rate(dev, *freq);
++	if (!err)
++		ptdev->current_frequency = *freq;
++
++	return err;
+ }
+ 
+ static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq)
+@@ -130,6 +136,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
+ 	struct panthor_devfreq *pdevfreq;
+ 	struct dev_pm_opp *opp;
+ 	unsigned long cur_freq;
++	unsigned long freq = ULONG_MAX;
+ 	int ret;
+ 
+ 	pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL);
+@@ -156,12 +163,6 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
+ 
+ 	cur_freq = clk_get_rate(ptdev->clks.core);
+ 
+-	opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+-	if (IS_ERR(opp))
+-		return PTR_ERR(opp);
+-
+-	panthor_devfreq_profile.initial_freq = cur_freq;
+-
+ 	/* Regulator coupling only takes care of synchronizing/balancing voltage
+ 	 * updates, but the coupled regulator needs to be enabled manually.
+ 	 *
+@@ -192,16 +193,30 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
+ 		return ret;
+ 	}
+ 
++	opp = devfreq_recommended_opp(dev, &cur_freq, 0);
++	if (IS_ERR(opp))
++		return PTR_ERR(opp);
++
++	panthor_devfreq_profile.initial_freq = cur_freq;
++	ptdev->current_frequency = cur_freq;
++
+ 	/*
+ 	 * Set the recommend OPP this will enable and configure the regulator
+ 	 * if any and will avoid a switch off by regulator_late_cleanup()
+ 	 */
+ 	ret = dev_pm_opp_set_opp(dev, opp);
++	dev_pm_opp_put(opp);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ 		return ret;
+ 	}
+ 
++	/* Find the fastest defined rate  */
++	opp = dev_pm_opp_find_freq_floor(dev, &freq);
++	if (IS_ERR(opp))
++		return PTR_ERR(opp);
++	ptdev->fast_rate = freq;
++
+ 	dev_pm_opp_put(opp);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
+index e388c0472ba783..2109905813e8c4 100644
+--- a/drivers/gpu/drm/panthor/panthor_device.h
++++ b/drivers/gpu/drm/panthor/panthor_device.h
+@@ -66,6 +66,25 @@ struct panthor_irq {
+ 	atomic_t suspended;
+ };
+ 
++/**
++ * enum panthor_device_profiling_mode - Profiling state
++ */
++enum panthor_device_profiling_flags {
++	/** @PANTHOR_DEVICE_PROFILING_DISABLED: Profiling is disabled. */
++	PANTHOR_DEVICE_PROFILING_DISABLED = 0,
++
++	/** @PANTHOR_DEVICE_PROFILING_CYCLES: Sampling job cycles. */
++	PANTHOR_DEVICE_PROFILING_CYCLES = BIT(0),
++
++	/** @PANTHOR_DEVICE_PROFILING_TIMESTAMP: Sampling job timestamp. */
++	PANTHOR_DEVICE_PROFILING_TIMESTAMP = BIT(1),
++
++	/** @PANTHOR_DEVICE_PROFILING_ALL: Sampling everything. */
++	PANTHOR_DEVICE_PROFILING_ALL =
++	PANTHOR_DEVICE_PROFILING_CYCLES |
++	PANTHOR_DEVICE_PROFILING_TIMESTAMP,
++};
++
+ /**
+  * struct panthor_device - Panthor device
+  */
+@@ -162,6 +181,15 @@ struct panthor_device {
+ 		 */
+ 		struct page *dummy_latest_flush;
+ 	} pm;
++
++	/** @profile_mask: User-set profiling flags for job accounting. */
++	u32 profile_mask;
++
++	/** @current_frequency: Device clock frequency at present. Set by DVFS*/
++	unsigned long current_frequency;
++
++	/** @fast_rate: Maximum device clock frequency. Set by DVFS */
++	unsigned long fast_rate;
+ };
+ 
+ /**
+diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
+index 9929e22f4d8d2e..20135a9bc026ed 100644
+--- a/drivers/gpu/drm/panthor/panthor_sched.c
++++ b/drivers/gpu/drm/panthor/panthor_sched.c
+@@ -93,6 +93,9 @@
+ #define MIN_CSGS				3
+ #define MAX_CSG_PRIO				0xf
+ 
++#define NUM_INSTRS_PER_CACHE_LINE		(64 / sizeof(u64))
++#define MAX_INSTRS_PER_JOB			24
++
+ struct panthor_group;
+ 
+ /**
+@@ -476,6 +479,18 @@ struct panthor_queue {
+ 		 */
+ 		struct list_head in_flight_jobs;
+ 	} fence_ctx;
++
++	/** @profiling: Job profiling data slots and access information. */
++	struct {
++		/** @slots: Kernel BO holding the slots. */
++		struct panthor_kernel_bo *slots;
++
++		/** @slot_count: Number of jobs ringbuffer can hold at once. */
++		u32 slot_count;
++
++		/** @seqno: Index of the next available profiling information slot. */
++		u32 seqno;
++	} profiling;
+ };
+ 
+ /**
+@@ -662,6 +677,18 @@ struct panthor_group {
+ 	struct list_head wait_node;
+ };
+ 
++struct panthor_job_profiling_data {
++	struct {
++		u64 before;
++		u64 after;
++	} cycles;
++
++	struct {
++		u64 before;
++		u64 after;
++	} time;
++};
++
+ /**
+  * group_queue_work() - Queue a group work
+  * @group: Group to queue the work for.
+@@ -775,6 +802,15 @@ struct panthor_job {
+ 
+ 	/** @done_fence: Fence signaled when the job is finished or cancelled. */
+ 	struct dma_fence *done_fence;
++
++	/** @profiling: Job profiling information. */
++	struct {
++		/** @mask: Current device job profiling enablement bitmask. */
++		u32 mask;
++
++		/** @slot: Job index in the profiling slots BO. */
++		u32 slot;
++	} profiling;
+ };
+ 
+ static void
+@@ -839,6 +875,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
+ 
+ 	panthor_kernel_bo_destroy(queue->ringbuf);
+ 	panthor_kernel_bo_destroy(queue->iface.mem);
++	panthor_kernel_bo_destroy(queue->profiling.slots);
+ 
+ 	/* Release the last_fence we were holding, if any. */
+ 	dma_fence_put(queue->fence_ctx.last_fence);
+@@ -1989,8 +2026,6 @@ tick_ctx_init(struct panthor_scheduler *sched,
+ 	}
+ }
+ 
+-#define NUM_INSTRS_PER_SLOT		16
+-
+ static void
+ group_term_post_processing(struct panthor_group *group)
+ {
+@@ -2829,65 +2864,198 @@ static void group_sync_upd_work(struct work_struct *work)
+ 	group_put(group);
+ }
+ 
+-static struct dma_fence *
+-queue_run_job(struct drm_sched_job *sched_job)
++struct panthor_job_ringbuf_instrs {
++	u64 buffer[MAX_INSTRS_PER_JOB];
++	u32 count;
++};
++
++struct panthor_job_instr {
++	u32 profile_mask;
++	u64 instr;
++};
++
++#define JOB_INSTR(__prof, __instr) \
++	{ \
++		.profile_mask = __prof, \
++		.instr = __instr, \
++	}
++
++static void
++copy_instrs_to_ringbuf(struct panthor_queue *queue,
++		       struct panthor_job *job,
++		       struct panthor_job_ringbuf_instrs *instrs)
++{
++	u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
++	u64 start = job->ringbuf.start & (ringbuf_size - 1);
++	u64 size, written;
++
++	/*
++	 * We need to write a whole slot, including any trailing zeroes
++	 * that may come at the end of it. Also, because instrs.buffer has
++	 * been zero-initialised, there's no need to pad it with 0's
++	 */
++	instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
++	size = instrs->count * sizeof(u64);
++	WARN_ON(size > ringbuf_size);
++	written = min(ringbuf_size - start, size);
++
++	memcpy(queue->ringbuf->kmap + start, instrs->buffer, written);
++
++	if (written < size)
++		memcpy(queue->ringbuf->kmap,
++		       &instrs->buffer[written / sizeof(u64)],
++		       size - written);
++}
++
++struct panthor_job_cs_params {
++	u32 profile_mask;
++	u64 addr_reg; u64 val_reg;
++	u64 cycle_reg; u64 time_reg;
++	u64 sync_addr; u64 times_addr;
++	u64 cs_start; u64 cs_size;
++	u32 last_flush; u32 waitall_mask;
++};
++
++static void
++get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params)
+ {
+-	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+ 	struct panthor_group *group = job->group;
+ 	struct panthor_queue *queue = group->queues[job->queue_idx];
+ 	struct panthor_device *ptdev = group->ptdev;
+ 	struct panthor_scheduler *sched = ptdev->scheduler;
+-	u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
+-	u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1);
+-	u64 addr_reg = ptdev->csif_info.cs_reg_count -
+-		       ptdev->csif_info.unpreserved_cs_reg_count;
+-	u64 val_reg = addr_reg + 2;
+-	u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
+-			job->queue_idx * sizeof(struct panthor_syncobj_64b);
+-	u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
+-	struct dma_fence *done_fence;
+-	int ret;
+ 
+-	u64 call_instrs[NUM_INSTRS_PER_SLOT] = {
+-		/* MOV32 rX+2, cs.latest_flush */
+-		(2ull << 56) | (val_reg << 48) | job->call_info.latest_flush,
++	params->addr_reg = ptdev->csif_info.cs_reg_count -
++			   ptdev->csif_info.unpreserved_cs_reg_count;
++	params->val_reg = params->addr_reg + 2;
++	params->cycle_reg = params->addr_reg;
++	params->time_reg = params->val_reg;
+ 
+-		/* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
+-		(36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233,
++	params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
++			    job->queue_idx * sizeof(struct panthor_syncobj_64b);
++	params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) +
++			     (job->profiling.slot * sizeof(struct panthor_job_profiling_data));
++	params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
+ 
+-		/* MOV48 rX:rX+1, cs.start */
+-		(1ull << 56) | (addr_reg << 48) | job->call_info.start,
++	params->cs_start = job->call_info.start;
++	params->cs_size = job->call_info.size;
++	params->last_flush = job->call_info.latest_flush;
+ 
+-		/* MOV32 rX+2, cs.size */
+-		(2ull << 56) | (val_reg << 48) | job->call_info.size,
++	params->profile_mask = job->profiling.mask;
++}
+ 
+-		/* WAIT(0) => waits for FLUSH_CACHE2 instruction */
+-		(3ull << 56) | (1 << 16),
++#define JOB_INSTR_ALWAYS(instr) \
++	JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr))
++#define JOB_INSTR_TIMESTAMP(instr) \
++	JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr))
++#define JOB_INSTR_CYCLES(instr) \
++	JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr))
+ 
++static void
++prepare_job_instrs(const struct panthor_job_cs_params *params,
++		   struct panthor_job_ringbuf_instrs *instrs)
++{
++	const struct panthor_job_instr instr_seq[] = {
++		/* MOV32 rX+2, cs.latest_flush */
++		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush),
++		/* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
++		JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) |
++				 (0 << 16) | 0x233),
++		/* MOV48 rX:rX+1, cycles_offset */
++		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
++				 (params->times_addr +
++				  offsetof(struct panthor_job_profiling_data, cycles.before))),
++		/* STORE_STATE cycles */
++		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
++		/* MOV48 rX:rX+1, time_offset */
++		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
++				    (params->times_addr +
++				     offsetof(struct panthor_job_profiling_data, time.before))),
++		/* STORE_STATE timer */
++		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
++		/* MOV48 rX:rX+1, cs.start */
++		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start),
++		/* MOV32 rX+2, cs.size */
++		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size),
++		/* WAIT(0) => waits for FLUSH_CACHE2 instruction */
++		JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)),
+ 		/* CALL rX:rX+1, rX+2 */
+-		(32ull << 56) | (addr_reg << 40) | (val_reg << 32),
+-
++		JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) |
++				 (params->val_reg << 32)),
++		/* MOV48 rX:rX+1, cycles_offset */
++		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
++				 (params->times_addr +
++				  offsetof(struct panthor_job_profiling_data, cycles.after))),
++		/* STORE_STATE cycles */
++		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
++		/* MOV48 rX:rX+1, time_offset */
++		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
++			  (params->times_addr +
++			   offsetof(struct panthor_job_profiling_data, time.after))),
++		/* STORE_STATE timer */
++		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
+ 		/* MOV48 rX:rX+1, sync_addr */
+-		(1ull << 56) | (addr_reg << 48) | sync_addr,
+-
++		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr),
+ 		/* MOV48 rX+2, #1 */
+-		(1ull << 56) | (val_reg << 48) | 1,
+-
++		JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1),
+ 		/* WAIT(all) */
+-		(3ull << 56) | (waitall_mask << 16),
+-
++		JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)),
+ 		/* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
+-		(51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1,
++		JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) |
++				 (params->val_reg << 32) | (0 << 16) | 1),
++		/* ERROR_BARRIER, so we can recover from faults at job boundaries. */
++		JOB_INSTR_ALWAYS((47ull << 56)),
++	};
++	u32 pad;
+ 
+-		/* ERROR_BARRIER, so we can recover from faults at job
+-		 * boundaries.
+-		 */
+-		(47ull << 56),
++	instrs->count = 0;
++
++	/* NEED to be cacheline aligned to please the prefetcher. */
++	static_assert(sizeof(instrs->buffer) % 64 == 0,
++		      "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline");
++
++	/* Make sure we have enough storage to store the whole sequence. */
++	static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) ==
++		      ARRAY_SIZE(instrs->buffer),
++		      "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch");
++
++	for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) {
++		/* If the profile mask of this instruction is not enabled, skip it. */
++		if (instr_seq[i].profile_mask &&
++		    !(instr_seq[i].profile_mask & params->profile_mask))
++			continue;
++
++		instrs->buffer[instrs->count++] = instr_seq[i].instr;
++	}
++
++	pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
++	memset(&instrs->buffer[instrs->count], 0,
++	       (pad - instrs->count) * sizeof(instrs->buffer[0]));
++	instrs->count = pad;
++}
++
++static u32 calc_job_credits(u32 profile_mask)
++{
++	struct panthor_job_ringbuf_instrs instrs;
++	struct panthor_job_cs_params params = {
++		.profile_mask = profile_mask,
+ 	};
+ 
+-	/* Need to be cacheline aligned to please the prefetcher. */
+-	static_assert(sizeof(call_instrs) % 64 == 0,
+-		      "call_instrs is not aligned on a cacheline");
++	prepare_job_instrs(&params, &instrs);
++	return instrs.count;
++}
++
++static struct dma_fence *
++queue_run_job(struct drm_sched_job *sched_job)
++{
++	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
++	struct panthor_group *group = job->group;
++	struct panthor_queue *queue = group->queues[job->queue_idx];
++	struct panthor_device *ptdev = group->ptdev;
++	struct panthor_scheduler *sched = ptdev->scheduler;
++	struct panthor_job_ringbuf_instrs instrs;
++	struct panthor_job_cs_params cs_params;
++	struct dma_fence *done_fence;
++	int ret;
+ 
+ 	/* Stream size is zero, nothing to do except making sure all previously
+ 	 * submitted jobs are done before we signal the
+@@ -2914,17 +3082,23 @@ queue_run_job(struct drm_sched_job *sched_job)
+ 		       queue->fence_ctx.id,
+ 		       atomic64_inc_return(&queue->fence_ctx.seqno));
+ 
+-	memcpy(queue->ringbuf->kmap + ringbuf_insert,
+-	       call_instrs, sizeof(call_instrs));
++	job->profiling.slot = queue->profiling.seqno++;
++	if (queue->profiling.seqno == queue->profiling.slot_count)
++		queue->profiling.seqno = 0;
++
++	job->ringbuf.start = queue->iface.input->insert;
++
++	get_job_cs_params(job, &cs_params);
++	prepare_job_instrs(&cs_params, &instrs);
++	copy_instrs_to_ringbuf(queue, job, &instrs);
++
++	job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64));
+ 
+ 	panthor_job_get(&job->base);
+ 	spin_lock(&queue->fence_ctx.lock);
+ 	list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
+ 	spin_unlock(&queue->fence_ctx.lock);
+ 
+-	job->ringbuf.start = queue->iface.input->insert;
+-	job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs);
+-
+ 	/* Make sure the ring buffer is updated before the INSERT
+ 	 * register.
+ 	 */
+@@ -3017,6 +3191,33 @@ static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
+ 	.free_job = queue_free_job,
+ };
+ 
++static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
++					    u32 cs_ringbuf_size)
++{
++	u32 min_profiled_job_instrs = U32_MAX;
++	u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL);
++
++	/*
++	 * We want to calculate the minimum size of a profiled job's CS,
++	 * because since they need additional instructions for the sampling
++	 * of performance metrics, they might take up further slots in
++	 * the queue's ringbuffer. This means we might not need as many job
++	 * slots for keeping track of their profiling information. What we
++	 * need is the maximum number of slots we should allocate to this end,
++	 * which matches the maximum number of profiled jobs we can place
++	 * simultaneously in the queue's ring buffer.
++	 * That has to be calculated separately for every single job profiling
++	 * flag, but not in the case job profiling is disabled, since unprofiled
++	 * jobs don't need to keep track of this at all.
++	 */
++	for (u32 i = 0; i < last_flag; i++) {
++		min_profiled_job_instrs =
++			min(min_profiled_job_instrs, calc_job_credits(BIT(i)));
++	}
++
++	return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
++}
++
+ static struct panthor_queue *
+ group_create_queue(struct panthor_group *group,
+ 		   const struct drm_panthor_queue_create *args)
+@@ -3070,9 +3271,35 @@ group_create_queue(struct panthor_group *group,
+ 		goto err_free_queue;
+ 	}
+ 
++	queue->profiling.slot_count =
++		calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
++
++	queue->profiling.slots =
++		panthor_kernel_bo_create(group->ptdev, group->vm,
++					 queue->profiling.slot_count *
++					 sizeof(struct panthor_job_profiling_data),
++					 DRM_PANTHOR_BO_NO_MMAP,
++					 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
++					 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
++					 PANTHOR_VM_KERNEL_AUTO_VA);
++
++	if (IS_ERR(queue->profiling.slots)) {
++		ret = PTR_ERR(queue->profiling.slots);
++		goto err_free_queue;
++	}
++
++	ret = panthor_kernel_bo_vmap(queue->profiling.slots);
++	if (ret)
++		goto err_free_queue;
++
++	/*
++	 * Credit limit argument tells us the total number of instructions
++	 * across all CS slots in the ringbuffer, with some jobs requiring
++	 * twice as many as others, depending on their profiling status.
++	 */
+ 	ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
+ 			     group->ptdev->scheduler->wq, 1,
+-			     args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)),
++			     args->ringbuf_size / sizeof(u64),
+ 			     0, msecs_to_jiffies(JOB_TIMEOUT_MS),
+ 			     group->ptdev->reset.wq,
+ 			     NULL, "panthor-queue", group->ptdev->base.dev);
+@@ -3380,6 +3607,7 @@ panthor_job_create(struct panthor_file *pfile,
+ {
+ 	struct panthor_group_pool *gpool = pfile->groups;
+ 	struct panthor_job *job;
++	u32 credits;
+ 	int ret;
+ 
+ 	if (qsubmit->pad)
+@@ -3438,9 +3666,16 @@ panthor_job_create(struct panthor_file *pfile,
+ 		}
+ 	}
+ 
++	job->profiling.mask = pfile->ptdev->profile_mask;
++	credits = calc_job_credits(job->profiling.mask);
++	if (credits == 0) {
++		ret = -EINVAL;
++		goto err_put_job;
++	}
++
+ 	ret = drm_sched_job_init(&job->base,
+ 				 &job->group->queues[job->queue_idx]->entity,
+-				 1, job->group);
++				 credits, job->group);
+ 	if (ret)
+ 		goto err_put_job;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index 47aa06a9a94221..5b69cc8011b42b 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -760,16 +760,20 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port,
+ 	if (!rdev->audio.enabled || !rdev->mode_info.mode_config_initialized)
+ 		return 0;
+ 
+-	list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) {
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		const struct drm_connector_helper_funcs *connector_funcs =
++				connector->helper_private;
++		encoder = connector_funcs->best_encoder(connector);
++
++		if (!encoder)
++			continue;
++
+ 		if (!radeon_encoder_is_digital(encoder))
+ 			continue;
+ 		radeon_encoder = to_radeon_encoder(encoder);
+ 		dig = radeon_encoder->enc_priv;
+ 		if (!dig->pin || dig->pin->id != port)
+ 			continue;
+-		connector = radeon_get_connector_for_encoder(encoder);
+-		if (!connector)
+-			continue;
+ 		*enabled = true;
+ 		ret = drm_eld_size(connector->eld);
+ 		memcpy(buf, connector->eld, min(max_bytes, ret));
+diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
+index cf4b23369dc449..75b4725d49c7e1 100644
+--- a/drivers/gpu/drm/v3d/v3d_drv.h
++++ b/drivers/gpu/drm/v3d/v3d_drv.h
+@@ -553,6 +553,7 @@ void v3d_irq_disable(struct v3d_dev *v3d);
+ void v3d_irq_reset(struct v3d_dev *v3d);
+ 
+ /* v3d_mmu.c */
++int v3d_mmu_flush_all(struct v3d_dev *v3d);
+ int v3d_mmu_set_page_table(struct v3d_dev *v3d);
+ void v3d_mmu_insert_ptes(struct v3d_bo *bo);
+ void v3d_mmu_remove_ptes(struct v3d_bo *bo);
+diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
+index d469bda52c1a5e..20bf33702c3c4f 100644
+--- a/drivers/gpu/drm/v3d/v3d_irq.c
++++ b/drivers/gpu/drm/v3d/v3d_irq.c
+@@ -70,6 +70,8 @@ v3d_overflow_mem_work(struct work_struct *work)
+ 	list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
+ 	spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ 
++	v3d_mmu_flush_all(v3d);
++
+ 	V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT);
+ 	V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
+ 
+diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
+index 14f3af40d6f6d1..5bb7821c0243c6 100644
+--- a/drivers/gpu/drm/v3d/v3d_mmu.c
++++ b/drivers/gpu/drm/v3d/v3d_mmu.c
+@@ -28,36 +28,27 @@
+ #define V3D_PTE_WRITEABLE BIT(29)
+ #define V3D_PTE_VALID BIT(28)
+ 
+-static int v3d_mmu_flush_all(struct v3d_dev *v3d)
++int v3d_mmu_flush_all(struct v3d_dev *v3d)
+ {
+ 	int ret;
+ 
+-	/* Make sure that another flush isn't already running when we
+-	 * start this one.
+-	 */
+-	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
+-			 V3D_MMU_CTL_TLB_CLEARING), 100);
+-	if (ret)
+-		dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
+-
+-	V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
+-		  V3D_MMU_CTL_TLB_CLEAR);
+-
+-	V3D_WRITE(V3D_MMUC_CONTROL,
+-		  V3D_MMUC_CONTROL_FLUSH |
++	V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_FLUSH |
+ 		  V3D_MMUC_CONTROL_ENABLE);
+ 
+-	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
+-			 V3D_MMU_CTL_TLB_CLEARING), 100);
++	ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
++			 V3D_MMUC_CONTROL_FLUSHING), 100);
+ 	if (ret) {
+-		dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
++		dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
+ 		return ret;
+ 	}
+ 
+-	ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
+-			 V3D_MMUC_CONTROL_FLUSHING), 100);
++	V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
++		  V3D_MMU_CTL_TLB_CLEAR);
++
++	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
++			 V3D_MMU_CTL_TLB_CLEARING), 100);
+ 	if (ret)
+-		dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
++		dev_err(v3d->drm.dev, "MMU TLB clear wait idle failed\n");
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
+index 08d2a273958287..4f935f1d50a943 100644
+--- a/drivers/gpu/drm/v3d/v3d_sched.c
++++ b/drivers/gpu/drm/v3d/v3d_sched.c
+@@ -135,8 +135,31 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
+ 	struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ 	struct v3d_stats *local_stats = &file->stats[queue];
+ 	u64 now = local_clock();
+-
+-	preempt_disable();
++	unsigned long flags;
++
++	/*
++	 * We only need to disable local interrupts to appease lockdep who
++	 * otherwise would think v3d_job_start_stats vs v3d_stats_update has an
++	 * unsafe in-irq vs no-irq-off usage problem. This is a false positive
++	 * because all the locks are per queue and stats type, and all jobs are
++	 * completely one at a time serialised. More specifically:
++	 *
++	 * 1. Locks for GPU queues are updated from interrupt handlers under a
++	 *    spin lock and started here with preemption disabled.
++	 *
++	 * 2. Locks for CPU queues are updated from the worker with preemption
++	 *    disabled and equally started here with preemption disabled.
++	 *
++	 * Therefore both are consistent.
++	 *
++	 * 3. Because next job can only be queued after the previous one has
++	 *    been signaled, and locks are per queue, there is also no scope for
++	 *    the start part to race with the update part.
++	 */
++	if (IS_ENABLED(CONFIG_LOCKDEP))
++		local_irq_save(flags);
++	else
++		preempt_disable();
+ 
+ 	write_seqcount_begin(&local_stats->lock);
+ 	local_stats->start_ns = now;
+@@ -146,7 +169,10 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
+ 	global_stats->start_ns = now;
+ 	write_seqcount_end(&global_stats->lock);
+ 
+-	preempt_enable();
++	if (IS_ENABLED(CONFIG_LOCKDEP))
++		local_irq_restore(flags);
++	else
++		preempt_enable();
+ }
+ 
+ static void
+@@ -167,11 +193,21 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+ 	struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ 	struct v3d_stats *local_stats = &file->stats[queue];
+ 	u64 now = local_clock();
++	unsigned long flags;
++
++	/* See comment in v3d_job_start_stats() */
++	if (IS_ENABLED(CONFIG_LOCKDEP))
++		local_irq_save(flags);
++	else
++		preempt_disable();
+ 
+-	preempt_disable();
+ 	v3d_stats_update(local_stats, now);
+ 	v3d_stats_update(global_stats, now);
+-	preempt_enable();
++
++	if (IS_ENABLED(CONFIG_LOCKDEP))
++		local_irq_restore(flags);
++	else
++		preempt_enable();
+ }
+ 
+ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
+diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
+index 0731a7d85d7abc..922849dd4b4787 100644
+--- a/drivers/gpu/drm/vc4/tests/vc4_mock.c
++++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
+@@ -155,11 +155,11 @@ KUNIT_DEFINE_ACTION_WRAPPER(kunit_action_drm_dev_unregister,
+ 			    drm_dev_unregister,
+ 			    struct drm_device *);
+ 
+-static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
++static struct vc4_dev *__mock_device(struct kunit *test, enum vc4_gen gen)
+ {
+ 	struct drm_device *drm;
+-	const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver;
+-	const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock;
++	const struct drm_driver *drv = (gen == VC4_GEN_5) ? &vc5_drm_driver : &vc4_drm_driver;
++	const struct vc4_mock_desc *desc = (gen == VC4_GEN_5) ? &vc5_mock : &vc4_mock;
+ 	struct vc4_dev *vc4;
+ 	struct device *dev;
+ 	int ret;
+@@ -173,7 +173,7 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+ 
+ 	vc4->dev = dev;
+-	vc4->is_vc5 = is_vc5;
++	vc4->gen = gen;
+ 
+ 	vc4->hvs = __vc4_hvs_alloc(vc4, NULL);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs);
+@@ -198,10 +198,10 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
+ 
+ struct vc4_dev *vc4_mock_device(struct kunit *test)
+ {
+-	return __mock_device(test, false);
++	return __mock_device(test, VC4_GEN_4);
+ }
+ 
+ struct vc4_dev *vc5_mock_device(struct kunit *test)
+ {
+-	return __mock_device(test, true);
++	return __mock_device(test, VC4_GEN_5);
+ }
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index 3f72be7490d5b7..2a85d08b19852a 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -251,7 +251,7 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	mutex_lock(&vc4->purgeable.lock);
+@@ -265,7 +265,7 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	/* list_del_init() is used here because the caller might release
+@@ -396,7 +396,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	struct vc4_bo *bo;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return ERR_PTR(-ENODEV);
+ 
+ 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+@@ -427,7 +427,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
+ 	struct drm_gem_dma_object *dma_obj;
+ 	struct vc4_bo *bo;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return ERR_PTR(-ENODEV);
+ 
+ 	if (size == 0)
+@@ -496,7 +496,7 @@ int vc4_bo_dumb_create(struct drm_file *file_priv,
+ 	struct vc4_bo *bo = NULL;
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	ret = vc4_dumb_fixup_args(args);
+@@ -622,7 +622,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
+ 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	/* Fast path: if the BO is already retained by someone, no need to
+@@ -661,7 +661,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	/* Fast path: if the BO is still retained by someone, no need to test
+@@ -783,7 +783,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
+ 	struct vc4_bo *bo = NULL;
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	ret = vc4_grab_bin_bo(vc4, vc4file);
+@@ -813,7 +813,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_vc4_mmap_bo *args = data;
+ 	struct drm_gem_object *gem_obj;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+@@ -839,7 +839,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
+ 	struct vc4_bo *bo = NULL;
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (args->size == 0)
+@@ -918,7 +918,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
+ 	struct vc4_bo *bo;
+ 	bool t_format;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (args->flags != 0)
+@@ -964,7 +964,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_gem_object *gem_obj;
+ 	struct vc4_bo *bo;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (args->flags != 0 || args->modifier != 0)
+@@ -1007,7 +1007,7 @@ int vc4_bo_cache_init(struct drm_device *dev)
+ 	int ret;
+ 	int i;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	/* Create the initial set of BO labels that the kernel will
+@@ -1071,7 +1071,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_gem_object *gem_obj;
+ 	int ret = 0, label;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (!args->len)
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index 8b5a7e5eb1466c..26a7cf7f646515 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -263,7 +263,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
+ 		 * Removing 1 from the FIFO full level however
+ 		 * seems to completely remove that issue.
+ 		 */
+-		if (!vc4->is_vc5)
++		if (vc4->gen == VC4_GEN_4)
+ 			return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
+ 
+ 		return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
+@@ -428,7 +428,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
+ 	if (is_dsi)
+ 		CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
+ 
+-	if (vc4->is_vc5)
++	if (vc4->gen == VC4_GEN_5)
+ 		CRTC_WRITE(PV_MUX_CFG,
+ 			   VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
+ 					 PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
+@@ -913,7 +913,7 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
+ 	struct dma_fence *fence;
+ 	int ret;
+ 
+-	if (!vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_4) {
+ 		struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
+ 
+ 		return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
+@@ -1000,7 +1000,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
+ 	struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	/*
+@@ -1043,7 +1043,7 @@ int vc4_page_flip(struct drm_crtc *crtc,
+ 		struct drm_device *dev = crtc->dev;
+ 		struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 
+-		if (vc4->is_vc5)
++		if (vc4->gen == VC4_GEN_5)
+ 			return vc5_async_page_flip(crtc, fb, event, flags);
+ 		else
+ 			return vc4_async_page_flip(crtc, fb, event, flags);
+@@ -1338,9 +1338,8 @@ int __vc4_crtc_init(struct drm_device *drm,
+ 
+ 	drm_crtc_helper_add(crtc, crtc_helper_funcs);
+ 
+-	if (!vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_4) {
+ 		drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
+-
+ 		drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
+ 
+ 		/* We support CTM, but only for one CRTC at a time. It's therefore
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
+index c133e96b8aca25..550324819f37fc 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.c
++++ b/drivers/gpu/drm/vc4/vc4_drv.c
+@@ -98,7 +98,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
+ 	if (args->pad != 0)
+ 		return -EINVAL;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (!vc4->v3d)
+@@ -147,7 +147,7 @@ static int vc4_open(struct drm_device *dev, struct drm_file *file)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	struct vc4_file *vc4file;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
+@@ -165,7 +165,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	struct vc4_file *vc4file = file->driver_priv;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	if (vc4file->bin_bo_used)
+@@ -291,13 +291,17 @@ static int vc4_drm_bind(struct device *dev)
+ 	struct vc4_dev *vc4;
+ 	struct device_node *node;
+ 	struct drm_crtc *crtc;
+-	bool is_vc5;
++	enum vc4_gen gen;
+ 	int ret = 0;
+ 
+ 	dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ 
+-	is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5");
+-	if (is_vc5)
++	if (of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"))
++		gen = VC4_GEN_5;
++	else
++		gen = VC4_GEN_4;
++
++	if (gen == VC4_GEN_5)
+ 		driver = &vc5_drm_driver;
+ 	else
+ 		driver = &vc4_drm_driver;
+@@ -315,13 +319,13 @@ static int vc4_drm_bind(struct device *dev)
+ 	vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base);
+ 	if (IS_ERR(vc4))
+ 		return PTR_ERR(vc4);
+-	vc4->is_vc5 = is_vc5;
++	vc4->gen = gen;
+ 	vc4->dev = dev;
+ 
+ 	drm = &vc4->base;
+ 	platform_set_drvdata(pdev, drm);
+ 
+-	if (!is_vc5) {
++	if (gen == VC4_GEN_4) {
+ 		ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
+ 		if (ret)
+ 			goto err;
+@@ -335,7 +339,7 @@ static int vc4_drm_bind(struct device *dev)
+ 	if (ret)
+ 		goto err;
+ 
+-	if (!is_vc5) {
++	if (gen == VC4_GEN_4) {
+ 		ret = vc4_gem_init(drm);
+ 		if (ret)
+ 			goto err;
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
+index 08e29fa825635d..dd452e6a114304 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.h
++++ b/drivers/gpu/drm/vc4/vc4_drv.h
+@@ -80,11 +80,16 @@ struct vc4_perfmon {
+ 	u64 counters[] __counted_by(ncounters);
+ };
+ 
++enum vc4_gen {
++	VC4_GEN_4,
++	VC4_GEN_5,
++};
++
+ struct vc4_dev {
+ 	struct drm_device base;
+ 	struct device *dev;
+ 
+-	bool is_vc5;
++	enum vc4_gen gen;
+ 
+ 	unsigned int irq;
+ 
+@@ -315,6 +320,7 @@ struct vc4_hvs {
+ 	struct platform_device *pdev;
+ 	void __iomem *regs;
+ 	u32 __iomem *dlist;
++	unsigned int dlist_mem_size;
+ 
+ 	struct clk *core_clk;
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
+index 24fb1b57e1dd99..be9c0b72ebe869 100644
+--- a/drivers/gpu/drm/vc4/vc4_gem.c
++++ b/drivers/gpu/drm/vc4/vc4_gem.c
+@@ -76,7 +76,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
+ 	u32 i;
+ 	int ret = 0;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (!vc4->v3d) {
+@@ -389,7 +389,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
+ 	unsigned long timeout_expire;
+ 	DEFINE_WAIT(wait);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (vc4->finished_seqno >= seqno)
+@@ -474,7 +474,7 @@ vc4_submit_next_bin_job(struct drm_device *dev)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	struct vc4_exec_info *exec;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ again:
+@@ -522,7 +522,7 @@ vc4_submit_next_render_job(struct drm_device *dev)
+ 	if (!exec)
+ 		return;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	/* A previous RCL may have written to one of our textures, and
+@@ -543,7 +543,7 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	bool was_empty = list_empty(&vc4->render_job_list);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	list_move_tail(&exec->head, &vc4->render_job_list);
+@@ -970,7 +970,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
+ 	unsigned long irqflags;
+ 	struct vc4_seqno_cb *cb, *cb_temp;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	spin_lock_irqsave(&vc4->job_lock, irqflags);
+@@ -1009,7 +1009,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	unsigned long irqflags;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	cb->func = func;
+@@ -1065,7 +1065,7 @@ vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	struct drm_vc4_wait_seqno *args = data;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
+@@ -1082,7 +1082,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_gem_object *gem_obj;
+ 	struct vc4_bo *bo;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (args->pad != 0)
+@@ -1131,7 +1131,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
+ 				  args->shader_rec_size,
+ 				  args->bo_handle_count);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (!vc4->v3d) {
+@@ -1267,7 +1267,7 @@ int vc4_gem_init(struct drm_device *dev)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	vc4->dma_fence_context = dma_fence_context_alloc(1);
+@@ -1326,7 +1326,7 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ 	struct vc4_bo *bo;
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	switch (args->madv) {
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 6611ab7c26a63c..2d7d3e90f3be44 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -147,6 +147,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
+ 	if (!drm_dev_enter(drm, &idx))
+ 		return -ENODEV;
+ 
++	WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
++
+ 	drm_print_regset32(&p, &vc4_hdmi->hdmi_regset);
+ 	drm_print_regset32(&p, &vc4_hdmi->hd_regset);
+ 	drm_print_regset32(&p, &vc4_hdmi->cec_regset);
+@@ -156,6 +158,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
+ 	drm_print_regset32(&p, &vc4_hdmi->ram_regset);
+ 	drm_print_regset32(&p, &vc4_hdmi->rm_regset);
+ 
++	pm_runtime_put(&vc4_hdmi->pdev->dev);
++
+ 	drm_dev_exit(idx);
+ 
+ 	return 0;
+@@ -2047,6 +2051,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
+ 	struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ 	struct drm_device *drm = vc4_hdmi->connector.dev;
+ 	struct drm_connector *connector = &vc4_hdmi->connector;
++	struct vc4_dev *vc4 = to_vc4_dev(drm);
+ 	unsigned int sample_rate = params->sample_rate;
+ 	unsigned int channels = params->channels;
+ 	unsigned long flags;
+@@ -2104,11 +2109,18 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
+ 					     VC4_HDMI_AUDIO_PACKET_CEA_MASK);
+ 
+ 	/* Set the MAI threshold */
+-	HDMI_WRITE(HDMI_MAI_THR,
+-		   VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) |
+-		   VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) |
+-		   VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) |
+-		   VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW));
++	if (vc4->gen >= VC4_GEN_5)
++		HDMI_WRITE(HDMI_MAI_THR,
++			   VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
++			   VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
++			   VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQHIGH) |
++			   VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQLOW));
++	else
++		HDMI_WRITE(HDMI_MAI_THR,
++			   VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICHIGH) |
++			   VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICLOW) |
++			   VC4_SET_FIELD(0x6, VC4_HD_MAI_THR_DREQHIGH) |
++			   VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_DREQLOW));
+ 
+ 	HDMI_WRITE(HDMI_MAI_CONFIG,
+ 		   VC4_HDMI_MAI_CONFIG_BIT_REVERSE |
+diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
+index 2a835a5cff9dd1..863539e1f7e04b 100644
+--- a/drivers/gpu/drm/vc4/vc4_hvs.c
++++ b/drivers/gpu/drm/vc4/vc4_hvs.c
+@@ -110,7 +110,8 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	struct vc4_hvs *hvs = vc4->hvs;
+ 	struct drm_printer p = drm_seq_file_printer(m);
+-	unsigned int next_entry_start = 0;
++	unsigned int dlist_mem_size = hvs->dlist_mem_size;
++	unsigned int next_entry_start;
+ 	unsigned int i, j;
+ 	u32 dlist_word, dispstat;
+ 
+@@ -124,8 +125,9 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
+ 		}
+ 
+ 		drm_printf(&p, "HVS chan %u:\n", i);
++		next_entry_start = 0;
+ 
+-		for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) {
++		for (j = HVS_READ(SCALER_DISPLISTX(i)); j < dlist_mem_size; j++) {
+ 			dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j);
+ 			drm_printf(&p, "dlist: %02d: 0x%08x\n", j,
+ 				   dlist_word);
+@@ -222,6 +224,9 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
+ 	if (!drm_dev_enter(drm, &idx))
+ 		return;
+ 
++	if (hvs->vc4->gen != VC4_GEN_4)
++		goto exit;
++
+ 	/* The LUT memory is laid out with each HVS channel in order,
+ 	 * each of which takes 256 writes for R, 256 for G, then 256
+ 	 * for B.
+@@ -237,6 +242,7 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
+ 	for (i = 0; i < crtc->gamma_size; i++)
+ 		HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
+ 
++exit:
+ 	drm_dev_exit(idx);
+ }
+ 
+@@ -291,7 +297,7 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
+ 	u32 reg;
+ 	int ret;
+ 
+-	if (!vc4->is_vc5)
++	if (vc4->gen == VC4_GEN_4)
+ 		return output;
+ 
+ 	/*
+@@ -372,7 +378,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
+ 	dispctrl = SCALER_DISPCTRLX_ENABLE;
+ 	dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
+ 
+-	if (!vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_4) {
+ 		dispctrl |= VC4_SET_FIELD(mode->hdisplay,
+ 					  SCALER_DISPCTRLX_WIDTH) |
+ 			    VC4_SET_FIELD(mode->vdisplay,
+@@ -394,7 +400,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
+ 	dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
+ 
+ 	HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
+-		  ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
++		  ((vc4->gen == VC4_GEN_4) ? SCALER_DISPBKGND_GAMMA : 0) |
+ 		  (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
+ 
+ 	/* Reload the LUT, since the SRAMs would have been disabled if
+@@ -415,13 +421,11 @@ void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+ 	if (!drm_dev_enter(drm, &idx))
+ 		return;
+ 
+-	if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
++	if (!(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE))
+ 		goto out;
+ 
+-	HVS_WRITE(SCALER_DISPCTRLX(chan),
+-		  HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
+-	HVS_WRITE(SCALER_DISPCTRLX(chan),
+-		  HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE);
++	HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
++	HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
+ 
+ 	/* Once we leave, the scaler should be disabled and its fifo empty. */
+ 	WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET);
+@@ -580,7 +584,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
+ 	}
+ 
+ 	if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+-		return;
++		goto exit;
+ 
+ 	if (debug_dump_regs) {
+ 		DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
+@@ -663,12 +667,14 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
+ 		vc4_hvs_dump_state(hvs);
+ 	}
+ 
++exit:
+ 	drm_dev_exit(idx);
+ }
+ 
+ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
+ {
+-	struct drm_device *drm = &hvs->vc4->base;
++	struct vc4_dev *vc4 = hvs->vc4;
++	struct drm_device *drm = &vc4->base;
+ 	u32 dispctrl;
+ 	int idx;
+ 
+@@ -676,8 +682,9 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
+ 		return;
+ 
+ 	dispctrl = HVS_READ(SCALER_DISPCTRL);
+-	dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+-					 SCALER_DISPCTRL_DSPEISLUR(channel));
++	dispctrl &= ~((vc4->gen == VC4_GEN_5) ?
++		      SCALER5_DISPCTRL_DSPEISLUR(channel) :
++		      SCALER_DISPCTRL_DSPEISLUR(channel));
+ 
+ 	HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ 
+@@ -686,7 +693,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
+ 
+ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
+ {
+-	struct drm_device *drm = &hvs->vc4->base;
++	struct vc4_dev *vc4 = hvs->vc4;
++	struct drm_device *drm = &vc4->base;
+ 	u32 dispctrl;
+ 	int idx;
+ 
+@@ -694,8 +702,9 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
+ 		return;
+ 
+ 	dispctrl = HVS_READ(SCALER_DISPCTRL);
+-	dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+-					SCALER_DISPCTRL_DSPEISLUR(channel));
++	dispctrl |= ((vc4->gen == VC4_GEN_5) ?
++		     SCALER5_DISPCTRL_DSPEISLUR(channel) :
++		     SCALER_DISPCTRL_DSPEISLUR(channel));
+ 
+ 	HVS_WRITE(SCALER_DISPSTAT,
+ 		  SCALER_DISPSTAT_EUFLOW(channel));
+@@ -738,8 +747,10 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
+ 	control = HVS_READ(SCALER_DISPCTRL);
+ 
+ 	for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
+-		dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+-					  SCALER_DISPCTRL_DSPEISLUR(channel);
++		dspeislur = (vc4->gen == VC4_GEN_5) ?
++			SCALER5_DISPCTRL_DSPEISLUR(channel) :
++			SCALER_DISPCTRL_DSPEISLUR(channel);
++
+ 		/* Interrupt masking is not always honored, so check it here. */
+ 		if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
+ 		    control & dspeislur) {
+@@ -767,7 +778,7 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
+ 	if (!vc4->hvs)
+ 		return -ENODEV;
+ 
+-	if (!vc4->is_vc5)
++	if (vc4->gen == VC4_GEN_4)
+ 		debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
+ 				    minor->debugfs_root,
+ 				    &vc4->load_tracker_enabled);
+@@ -800,16 +811,17 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde
+ 	 * our 16K), since we don't want to scramble the screen when
+ 	 * transitioning from the firmware's boot setup to runtime.
+ 	 */
++	hvs->dlist_mem_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END;
+ 	drm_mm_init(&hvs->dlist_mm,
+ 		    HVS_BOOTLOADER_DLIST_END,
+-		    (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
++		    hvs->dlist_mem_size);
+ 
+ 	/* Set up the HVS LBM memory manager.  We could have some more
+ 	 * complicated data structure that allowed reuse of LBM areas
+ 	 * between planes when they don't overlap on the screen, but
+ 	 * for now we just allocate globally.
+ 	 */
+-	if (!vc4->is_vc5)
++	if (vc4->gen == VC4_GEN_4)
+ 		/* 48k words of 2x12-bit pixels */
+ 		drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
+ 	else
+@@ -843,7 +855,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+ 	hvs->regset.regs = hvs_regs;
+ 	hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
+ 
+-	if (vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_5) {
+ 		struct rpi_firmware *firmware;
+ 		struct device_node *node;
+ 		unsigned int max_rate;
+@@ -881,7 +893,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+ 		}
+ 	}
+ 
+-	if (!vc4->is_vc5)
++	if (vc4->gen == VC4_GEN_4)
+ 		hvs->dlist = hvs->regs + SCALER_DLIST_START;
+ 	else
+ 		hvs->dlist = hvs->regs + SCALER5_DLIST_START;
+@@ -922,7 +934,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+ 		    SCALER_DISPCTRL_DISPEIRQ(1) |
+ 		    SCALER_DISPCTRL_DISPEIRQ(2);
+ 
+-	if (!vc4->is_vc5)
++	if (vc4->gen == VC4_GEN_4)
+ 		dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+ 			      SCALER_DISPCTRL_SLVWREIRQ |
+ 			      SCALER_DISPCTRL_SLVRDEIRQ |
+@@ -966,7 +978,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+ 
+ 	/* Recompute Composite Output Buffer (COB) allocations for the displays
+ 	 */
+-	if (!vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_4) {
+ 		/* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
+ 		 * The bottom 2048 pixels are full 32bpp RGBA (intended for the
+ 		 * TXP composing RGBA to memory), whilst the remainder are only
+diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
+index ef93d8e22a35a4..968356d1b91dfb 100644
+--- a/drivers/gpu/drm/vc4/vc4_irq.c
++++ b/drivers/gpu/drm/vc4/vc4_irq.c
+@@ -263,7 +263,7 @@ vc4_irq_enable(struct drm_device *dev)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	if (!vc4->v3d)
+@@ -280,7 +280,7 @@ vc4_irq_disable(struct drm_device *dev)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	if (!vc4->v3d)
+@@ -303,7 +303,7 @@ int vc4_irq_install(struct drm_device *dev, int irq)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (irq == IRQ_NOTCONNECTED)
+@@ -324,7 +324,7 @@ void vc4_irq_uninstall(struct drm_device *dev)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	vc4_irq_disable(dev);
+@@ -337,7 +337,7 @@ void vc4_irq_reset(struct drm_device *dev)
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	unsigned long irqflags;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	/* Acknowledge any stale IRQs. */
+diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
+index 5495f2a94fa926..bddfcad1095013 100644
+--- a/drivers/gpu/drm/vc4/vc4_kms.c
++++ b/drivers/gpu/drm/vc4/vc4_kms.c
+@@ -369,7 +369,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
+ 		old_hvs_state->fifo_state[channel].pending_commit = NULL;
+ 	}
+ 
+-	if (vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_5) {
+ 		unsigned long state_rate = max(old_hvs_state->core_clock_rate,
+ 					       new_hvs_state->core_clock_rate);
+ 		unsigned long core_rate = clamp_t(unsigned long, state_rate,
+@@ -388,7 +388,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
+ 
+ 	vc4_ctm_commit(vc4, state);
+ 
+-	if (vc4->is_vc5)
++	if (vc4->gen == VC4_GEN_5)
+ 		vc5_hvs_pv_muxing_commit(vc4, state);
+ 	else
+ 		vc4_hvs_pv_muxing_commit(vc4, state);
+@@ -406,7 +406,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
+ 
+ 	drm_atomic_helper_cleanup_planes(dev, state);
+ 
+-	if (vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_5) {
+ 		unsigned long core_rate = min_t(unsigned long,
+ 						hvs->max_core_rate,
+ 						new_hvs_state->core_clock_rate);
+@@ -461,7 +461,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	struct drm_mode_fb_cmd2 mode_cmd_local;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return ERR_PTR(-ENODEV);
+ 
+ 	/* If the user didn't specify a modifier, use the
+@@ -1040,7 +1040,7 @@ int vc4_kms_load(struct drm_device *dev)
+ 	 * the BCM2711, but the load tracker computations are used for
+ 	 * the core clock rate calculation.
+ 	 */
+-	if (!vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_4) {
+ 		/* Start with the load tracker enabled. Can be
+ 		 * disabled through the debugfs load_tracker file.
+ 		 */
+@@ -1056,7 +1056,7 @@ int vc4_kms_load(struct drm_device *dev)
+ 		return ret;
+ 	}
+ 
+-	if (vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_5) {
+ 		dev->mode_config.max_width = 7680;
+ 		dev->mode_config.max_height = 7680;
+ 	} else {
+@@ -1064,7 +1064,7 @@ int vc4_kms_load(struct drm_device *dev)
+ 		dev->mode_config.max_height = 2048;
+ 	}
+ 
+-	dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
++	dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs;
+ 	dev->mode_config.helper_private = &vc4_mode_config_helpers;
+ 	dev->mode_config.preferred_depth = 24;
+ 	dev->mode_config.async_page_flip = true;
+diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
+index c00a5cc2316d20..e4fda72c19f92f 100644
+--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
++++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
+@@ -23,7 +23,7 @@ void vc4_perfmon_get(struct vc4_perfmon *perfmon)
+ 		return;
+ 
+ 	vc4 = perfmon->dev;
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	refcount_inc(&perfmon->refcnt);
+@@ -37,7 +37,7 @@ void vc4_perfmon_put(struct vc4_perfmon *perfmon)
+ 		return;
+ 
+ 	vc4 = perfmon->dev;
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	if (refcount_dec_and_test(&perfmon->refcnt))
+@@ -49,7 +49,7 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
+ 	unsigned int i;
+ 	u32 mask;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
+@@ -69,7 +69,7 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
+ {
+ 	unsigned int i;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	if (WARN_ON_ONCE(!vc4->active_perfmon ||
+@@ -90,7 +90,7 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
+ 	struct vc4_dev *vc4 = vc4file->dev;
+ 	struct vc4_perfmon *perfmon;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return NULL;
+ 
+ 	mutex_lock(&vc4file->perfmon.lock);
+@@ -105,7 +105,7 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
+ {
+ 	struct vc4_dev *vc4 = vc4file->dev;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	mutex_init(&vc4file->perfmon.lock);
+@@ -131,7 +131,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
+ {
+ 	struct vc4_dev *vc4 = vc4file->dev;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	mutex_lock(&vc4file->perfmon.lock);
+@@ -151,7 +151,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
+ 	unsigned int i;
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (!vc4->v3d) {
+@@ -205,7 +205,7 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_vc4_perfmon_destroy *req = data;
+ 	struct vc4_perfmon *perfmon;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (!vc4->v3d) {
+@@ -233,7 +233,7 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
+ 	struct vc4_perfmon *perfmon;
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (!vc4->v3d) {
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index 07caf2a47c6cef..866bc46ee6d53a 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -587,10 +587,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
+ 	}
+ 
+ 	/* Align it to 64 or 128 (hvs5) bytes */
+-	lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
++	lbm = roundup(lbm, vc4->gen == VC4_GEN_5 ? 128 : 64);
+ 
+ 	/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
+-	lbm /= vc4->is_vc5 ? 4 : 2;
++	lbm /= vc4->gen == VC4_GEN_5 ? 4 : 2;
+ 
+ 	return lbm;
+ }
+@@ -706,7 +706,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
+ 		ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
+ 						 &vc4_state->lbm,
+ 						 lbm_size,
+-						 vc4->is_vc5 ? 64 : 32,
++						 vc4->gen == VC4_GEN_5 ? 64 : 32,
+ 						 0, 0);
+ 		spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+ 
+@@ -1057,7 +1057,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
+ 	mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
+ 			  fb->format->has_alpha;
+ 
+-	if (!vc4->is_vc5) {
++	if (vc4->gen == VC4_GEN_4) {
+ 	/* Control word */
+ 		vc4_dlist_write(vc4_state,
+ 				SCALER_CTL0_VALID |
+@@ -1632,7 +1632,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
+ 	};
+ 
+ 	for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
+-		if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
++		if (!hvs_formats[i].hvs5_only || vc4->gen == VC4_GEN_5) {
+ 			formats[num_formats] = hvs_formats[i].drm;
+ 			num_formats++;
+ 		}
+@@ -1647,7 +1647,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
+ 		return ERR_CAST(vc4_plane);
+ 	plane = &vc4_plane->base;
+ 
+-	if (vc4->is_vc5)
++	if (vc4->gen == VC4_GEN_5)
+ 		drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
+ 	else
+ 		drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
+diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
+index 1bda5010f15a86..ae4ad956f04ff8 100644
+--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
++++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
+@@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
+ 	bool has_bin = args->bin_cl_size != 0;
+ 	int ret;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	if (args->min_x_tile > args->max_x_tile ||
+diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
+index bf5c4e36c94e4d..43f69d74e8761d 100644
+--- a/drivers/gpu/drm/vc4/vc4_v3d.c
++++ b/drivers/gpu/drm/vc4/vc4_v3d.c
+@@ -127,7 +127,7 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
+ int
+ vc4_v3d_pm_get(struct vc4_dev *vc4)
+ {
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	mutex_lock(&vc4->power_lock);
+@@ -148,7 +148,7 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
+ void
+ vc4_v3d_pm_put(struct vc4_dev *vc4)
+ {
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	mutex_lock(&vc4->power_lock);
+@@ -178,7 +178,7 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
+ 	uint64_t seqno = 0;
+ 	struct vc4_exec_info *exec;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ try_again:
+@@ -325,7 +325,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
+ {
+ 	int ret = 0;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	mutex_lock(&vc4->bin_bo_lock);
+@@ -360,7 +360,7 @@ static void bin_bo_release(struct kref *ref)
+ 
+ void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
+ {
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return;
+ 
+ 	mutex_lock(&vc4->bin_bo_lock);
+diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
+index 0c17284bf6f5bb..f3d7fdbe9083c5 100644
+--- a/drivers/gpu/drm/vc4/vc4_validate.c
++++ b/drivers/gpu/drm/vc4/vc4_validate.c
+@@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
+ 	struct drm_gem_dma_object *obj;
+ 	struct vc4_bo *bo;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return NULL;
+ 
+ 	if (hindex >= exec->bo_count) {
+@@ -169,7 +169,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo,
+ 	uint32_t utile_w = utile_width(cpp);
+ 	uint32_t utile_h = utile_height(cpp);
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return false;
+ 
+ 	/* The shaded vertex format stores signed 12.4 fixed point
+@@ -495,7 +495,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
+ 	uint32_t dst_offset = 0;
+ 	uint32_t src_offset = 0;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	while (src_offset < len) {
+@@ -942,7 +942,7 @@ vc4_validate_shader_recs(struct drm_device *dev,
+ 	uint32_t i;
+ 	int ret = 0;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return -ENODEV;
+ 
+ 	for (i = 0; i < exec->shader_state_count; i++) {
+diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+index 9745f8810eca6d..afb1a4d8268465 100644
+--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
++++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+@@ -786,7 +786,7 @@ vc4_validate_shader(struct drm_gem_dma_object *shader_obj)
+ 	struct vc4_validated_shader_info *validated_shader = NULL;
+ 	struct vc4_shader_validation_state validation_state;
+ 
+-	if (WARN_ON_ONCE(vc4->is_vc5))
++	if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
+ 		return NULL;
+ 
+ 	memset(&validation_state, 0, sizeof(validation_state));
+diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
+index 5ce70dd946aa63..24589b947dea3d 100644
+--- a/drivers/gpu/drm/vkms/vkms_output.c
++++ b/drivers/gpu/drm/vkms/vkms_output.c
+@@ -84,7 +84,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
+ 				 DRM_MODE_CONNECTOR_VIRTUAL);
+ 	if (ret) {
+ 		DRM_ERROR("Failed to init connector\n");
+-		goto err_connector;
++		return ret;
+ 	}
+ 
+ 	drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+@@ -119,8 +119,5 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
+ err_encoder:
+ 	drm_connector_cleanup(connector);
+ 
+-err_connector:
+-	drm_crtc_cleanup(crtc);
+-
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+index 6619a40aed1533..f4332f06b6c809 100644
+--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
++++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+@@ -42,7 +42,7 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe)
+ 	struct xe_gsc *gsc = &gt->uc.gsc;
+ 	bool ret = true;
+ 
+-	if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) {
++	if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) {
+ 		drm_dbg_kms(&xe->drm,
+ 			    "GSC Components not ready for HDCP2.x\n");
+ 		return false;
+diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
+index 2e72c06fd40d07..b0684e6d2047b1 100644
+--- a/drivers/gpu/drm/xe/xe_sync.c
++++ b/drivers/gpu/drm/xe/xe_sync.c
+@@ -85,8 +85,12 @@ static void user_fence_worker(struct work_struct *w)
+ 		mmput(ufence->mm);
+ 	}
+ 
+-	wake_up_all(&ufence->xe->ufence_wq);
++	/*
++	 * Wake up waiters only after updating the ufence state, allowing the UMD
++	 * to safely reuse the same ufence without encountering -EBUSY errors.
++	 */
+ 	WRITE_ONCE(ufence->signalled, 1);
++	wake_up_all(&ufence->xe->ufence_wq);
+ 	user_fence_put(ufence);
+ }
+ 
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
+index 9368acf56eaf79..e4e0e299e8a7d5 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
+@@ -1200,6 +1200,9 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
+ {
+ 	unsigned int i;
+ 
++	if (!layer->info)
++		return;
++
+ 	for (i = 0; i < layer->info->num_channels; i++) {
+ 		struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
+ 
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
+index bd1368df787034..4556af2faa0f19 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
+@@ -536,7 +536,7 @@ void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub)
+ {
+ 	struct drm_device *drm = &dpsub->drm->dev;
+ 
+-	drm_dev_unregister(drm);
++	drm_dev_unplug(drm);
+ 	drm_atomic_helper_shutdown(drm);
+ 	drm_encoder_cleanup(&dpsub->drm->encoder);
+ 	drm_kms_helper_poll_fini(drm);
+diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
+index f33485d83d24ff..0fb210e40a4127 100644
+--- a/drivers/hid/hid-hyperv.c
++++ b/drivers/hid/hid-hyperv.c
+@@ -422,6 +422,25 @@ static int mousevsc_hid_raw_request(struct hid_device *hid,
+ 	return 0;
+ }
+ 
++static int mousevsc_hid_probe(struct hid_device *hid_dev, const struct hid_device_id *id)
++{
++	int ret;
++
++	ret = hid_parse(hid_dev);
++	if (ret) {
++		hid_err(hid_dev, "parse failed\n");
++		return ret;
++	}
++
++	ret = hid_hw_start(hid_dev, HID_CONNECT_HIDINPUT | HID_CONNECT_HIDDEV);
++	if (ret) {
++		hid_err(hid_dev, "hw start failed\n");
++		return ret;
++	}
++
++	return 0;
++}
++
+ static const struct hid_ll_driver mousevsc_ll_driver = {
+ 	.parse = mousevsc_hid_parse,
+ 	.open = mousevsc_hid_open,
+@@ -431,7 +450,16 @@ static const struct hid_ll_driver mousevsc_ll_driver = {
+ 	.raw_request = mousevsc_hid_raw_request,
+ };
+ 
+-static struct hid_driver mousevsc_hid_driver;
++static const struct hid_device_id mousevsc_devices[] = {
++	{ HID_DEVICE(BUS_VIRTUAL, HID_GROUP_ANY, 0x045E, 0x0621) },
++	{ }
++};
++
++static struct hid_driver mousevsc_hid_driver = {
++	.name = "hid-hyperv",
++	.id_table = mousevsc_devices,
++	.probe = mousevsc_hid_probe,
++};
+ 
+ static int mousevsc_probe(struct hv_device *device,
+ 			const struct hv_vmbus_device_id *dev_id)
+@@ -473,7 +501,6 @@ static int mousevsc_probe(struct hv_device *device,
+ 	}
+ 
+ 	hid_dev->ll_driver = &mousevsc_ll_driver;
+-	hid_dev->driver = &mousevsc_hid_driver;
+ 	hid_dev->bus = BUS_VIRTUAL;
+ 	hid_dev->vendor = input_dev->hid_dev_info.vendor;
+ 	hid_dev->product = input_dev->hid_dev_info.product;
+@@ -488,20 +515,6 @@ static int mousevsc_probe(struct hv_device *device,
+ 	if (ret)
+ 		goto probe_err2;
+ 
+-
+-	ret = hid_parse(hid_dev);
+-	if (ret) {
+-		hid_err(hid_dev, "parse failed\n");
+-		goto probe_err2;
+-	}
+-
+-	ret = hid_hw_start(hid_dev, HID_CONNECT_HIDINPUT | HID_CONNECT_HIDDEV);
+-
+-	if (ret) {
+-		hid_err(hid_dev, "hw start failed\n");
+-		goto probe_err2;
+-	}
+-
+ 	device_init_wakeup(&device->device, true);
+ 
+ 	input_dev->connected = true;
+@@ -579,12 +592,23 @@ static struct  hv_driver mousevsc_drv = {
+ 
+ static int __init mousevsc_init(void)
+ {
+-	return vmbus_driver_register(&mousevsc_drv);
++	int ret;
++
++	ret = hid_register_driver(&mousevsc_hid_driver);
++	if (ret)
++		return ret;
++
++	ret = vmbus_driver_register(&mousevsc_drv);
++	if (ret)
++		hid_unregister_driver(&mousevsc_hid_driver);
++
++	return ret;
+ }
+ 
+ static void __exit mousevsc_exit(void)
+ {
+ 	vmbus_driver_unregister(&mousevsc_drv);
++	hid_unregister_driver(&mousevsc_hid_driver);
+ }
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 413606bdf476df..5a599c90e7a2c7 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1353,9 +1353,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 					rotation -= 1800;
+ 
+ 				input_report_abs(pen_input, ABS_TILT_X,
+-						 (char)frame[7]);
++						 (signed char)frame[7]);
+ 				input_report_abs(pen_input, ABS_TILT_Y,
+-						 (char)frame[8]);
++						 (signed char)frame[8]);
+ 				input_report_abs(pen_input, ABS_Z, rotation);
+ 				input_report_abs(pen_input, ABS_WHEEL,
+ 						 get_unaligned_le16(&frame[11]));
+diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
+index 34cac27e4ddec3..0dcb8a3a691d69 100644
+--- a/drivers/hwmon/aquacomputer_d5next.c
++++ b/drivers/hwmon/aquacomputer_d5next.c
+@@ -597,7 +597,7 @@ struct aqc_data {
+ 
+ 	/* Sensor values */
+ 	s32 temp_input[20];	/* Max 4 physical and 16 virtual or 8 physical and 12 virtual */
+-	s32 speed_input[8];
++	s32 speed_input[9];
+ 	u32 speed_input_min[1];
+ 	u32 speed_input_target[1];
+ 	u32 speed_input_max[1];
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index 934fed3dd58661..ee04795b98aabe 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -2878,8 +2878,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
+ 	if (err < 0)
+ 		return err;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0,
+-			data->target_temp_mask);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, data->target_temp_mask * 1000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->target_temp[nr] = val;
+@@ -2959,7 +2958,7 @@ store_temp_tolerance(struct device *dev, struct device_attribute *attr,
+ 		return err;
+ 
+ 	/* Limit tolerance as needed */
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, data->tolerance_mask);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, data->tolerance_mask * 1000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_tolerance[index][nr] = val;
+@@ -3085,7 +3084,7 @@ store_weight_temp(struct device *dev, struct device_attribute *attr,
+ 	if (err < 0)
+ 		return err;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->weight_temp[index][nr] = val;
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index ce7fd4ca9d89b0..a68b0a98e8d4db 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -3279,7 +3279,17 @@ static int pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
+ 
+ static int pmbus_write_smbalert_mask(struct i2c_client *client, u8 page, u8 reg, u8 val)
+ {
+-	return _pmbus_write_word_data(client, page, PMBUS_SMBALERT_MASK, reg | (val << 8));
++	int ret;
++
++	ret = _pmbus_write_word_data(client, page, PMBUS_SMBALERT_MASK, reg | (val << 8));
++
++	/*
++	 * Clear fault systematically in case writing PMBUS_SMBALERT_MASK
++	 * is not supported by the chip.
++	 */
++	pmbus_clear_fault_page(client, page);
++
++	return ret;
+ }
+ 
+ static irqreturn_t pmbus_fault_handler(int irq, void *pdata)
+diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
+index dfcfb09d9f3cdf..80fb03f30c302d 100644
+--- a/drivers/hwmon/tps23861.c
++++ b/drivers/hwmon/tps23861.c
+@@ -132,7 +132,7 @@ static int tps23861_read_temp(struct tps23861_data *data, long *val)
+ 	if (err < 0)
+ 		return err;
+ 
+-	*val = (regval * TEMPERATURE_LSB) - 20000;
++	*val = ((long)regval * TEMPERATURE_LSB) - 20000;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index 61f7c4003d2ff7..e9577f920286d0 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -251,10 +251,8 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
+ 		return -EOPNOTSUPP;
+ 
+ 	data_ptrs = kmalloc_array(nmsgs, sizeof(u8 __user *), GFP_KERNEL);
+-	if (data_ptrs == NULL) {
+-		kfree(msgs);
++	if (!data_ptrs)
+ 		return -ENOMEM;
+-	}
+ 
+ 	res = 0;
+ 	for (i = 0; i < nmsgs; i++) {
+@@ -302,7 +300,6 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
+ 		for (j = 0; j < i; ++j)
+ 			kfree(msgs[j].buf);
+ 		kfree(data_ptrs);
+-		kfree(msgs);
+ 		return res;
+ 	}
+ 
+@@ -316,7 +313,6 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
+ 		kfree(msgs[i].buf);
+ 	}
+ 	kfree(data_ptrs);
+-	kfree(msgs);
+ 	return res;
+ }
+ 
+@@ -446,6 +442,7 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	case I2C_RDWR: {
+ 		struct i2c_rdwr_ioctl_data rdwr_arg;
+ 		struct i2c_msg *rdwr_pa;
++		int res;
+ 
+ 		if (copy_from_user(&rdwr_arg,
+ 				   (struct i2c_rdwr_ioctl_data __user *)arg,
+@@ -467,7 +464,9 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		if (IS_ERR(rdwr_pa))
+ 			return PTR_ERR(rdwr_pa);
+ 
+-		return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa);
++		res = i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa);
++		kfree(rdwr_pa);
++		return res;
+ 	}
+ 
+ 	case I2C_SMBUS: {
+@@ -540,7 +539,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
+ 		struct i2c_rdwr_ioctl_data32 rdwr_arg;
+ 		struct i2c_msg32 __user *p;
+ 		struct i2c_msg *rdwr_pa;
+-		int i;
++		int i, res;
+ 
+ 		if (copy_from_user(&rdwr_arg,
+ 				   (struct i2c_rdwr_ioctl_data32 __user *)arg,
+@@ -573,7 +572,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
+ 			};
+ 		}
+ 
+-		return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa);
++		res = i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa);
++		kfree(rdwr_pa);
++		return res;
+ 	}
+ 	case I2C_SMBUS: {
+ 		struct i2c_smbus_ioctl_data32	data32;
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 6f3eb710a75d60..ffe99f0c6acef5 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -2051,11 +2051,16 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ 			ibireq.max_payload_len = olddev->ibi->max_payload_len;
+ 			ibireq.num_slots = olddev->ibi->num_slots;
+ 
+-			if (olddev->ibi->enabled) {
++			if (olddev->ibi->enabled)
+ 				enable_ibi = true;
+-				i3c_dev_disable_ibi_locked(olddev);
+-			}
+-
++			/*
++			 * The olddev should not receive any commands on the
++			 * i3c bus as it does not exist and has been assigned
++			 * a new address. This will result in NACK or timeout.
++			 * So, update the olddev->ibi->enabled flag to false
++			 * to avoid DISEC with OldAddr.
++			 */
++			olddev->ibi->enabled = false;
+ 			i3c_dev_free_ibi_locked(olddev);
+ 		}
+ 		mutex_unlock(&olddev->ibi_lock);
+diff --git a/drivers/iio/accel/adxl380.c b/drivers/iio/accel/adxl380.c
+index f80527d899be4d..b19ee37df7f12e 100644
+--- a/drivers/iio/accel/adxl380.c
++++ b/drivers/iio/accel/adxl380.c
+@@ -1181,7 +1181,7 @@ static int adxl380_read_raw(struct iio_dev *indio_dev,
+ 
+ 		ret = adxl380_read_chn(st, chan->address);
+ 		iio_device_release_direct_mode(indio_dev);
+-		if (ret)
++		if (ret < 0)
+ 			return ret;
+ 
+ 		*val = sign_extend32(ret >> chan->scan_type.shift,
+diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c
+index 6ea49124508499..b3b82535f5c14d 100644
+--- a/drivers/iio/adc/ad4000.c
++++ b/drivers/iio/adc/ad4000.c
+@@ -344,6 +344,8 @@ static int ad4000_single_conversion(struct iio_dev *indio_dev,
+ 
+ 	if (chan->scan_type.sign == 's')
+ 		*val = sign_extend32(sample, chan->scan_type.realbits - 1);
++	else
++		*val = sample;
+ 
+ 	return IIO_VAL_INT;
+ }
+@@ -637,7 +639,9 @@ static int ad4000_probe(struct spi_device *spi)
+ 	indio_dev->name = chip->dev_name;
+ 	indio_dev->num_channels = 1;
+ 
+-	devm_mutex_init(dev, &st->lock);
++	ret = devm_mutex_init(dev, &st->lock);
++	if (ret)
++		return ret;
+ 
+ 	st->gain_milli = 1000;
+ 	if (chip->has_hardware_gain) {
+diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
+index 36e813d9c73f1c..fe1d9e07fce24d 100644
+--- a/drivers/iio/adc/pac1921.c
++++ b/drivers/iio/adc/pac1921.c
+@@ -1171,7 +1171,9 @@ static int pac1921_probe(struct i2c_client *client)
+ 		return dev_err_probe(dev, (int)PTR_ERR(priv->regmap),
+ 				     "Cannot initialize register map\n");
+ 
+-	devm_mutex_init(dev, &priv->lock);
++	ret = devm_mutex_init(dev, &priv->lock);
++	if (ret)
++		return ret;
+ 
+ 	priv->dv_gain = PAC1921_DEFAULT_DV_GAIN;
+ 	priv->di_gain = PAC1921_DEFAULT_DI_GAIN;
+diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
+index 0cb00f3bec0453..b8b4171b80436b 100644
+--- a/drivers/iio/dac/adi-axi-dac.c
++++ b/drivers/iio/dac/adi-axi-dac.c
+@@ -46,7 +46,7 @@
+ #define AXI_DAC_REG_CNTRL_1		0x0044
+ #define   AXI_DAC_SYNC			BIT(0)
+ #define AXI_DAC_REG_CNTRL_2		0x0048
+-#define	  ADI_DAC_R1_MODE		BIT(4)
++#define	  ADI_DAC_R1_MODE		BIT(5)
+ #define AXI_DAC_DRP_STATUS		0x0074
+ #define   AXI_DAC_DRP_LOCKED		BIT(17)
+ /* DAC Channel controls */
+diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
+index 20b3b5212da76a..fb34a8e4d04e74 100644
+--- a/drivers/iio/industrialio-backend.c
++++ b/drivers/iio/industrialio-backend.c
+@@ -737,8 +737,8 @@ static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, con
+ 	}
+ 
+ 	fwnode_back = fwnode_find_reference(fwnode, "io-backends", index);
+-	if (IS_ERR(fwnode))
+-		return dev_err_cast_probe(dev, fwnode,
++	if (IS_ERR(fwnode_back))
++		return dev_err_cast_probe(dev, fwnode_back,
+ 					  "Cannot get Firmware reference\n");
+ 
+ 	guard(mutex)(&iio_back_lock);
+diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
+index 5f131bc1a01e97..4ad949672210ba 100644
+--- a/drivers/iio/industrialio-gts-helper.c
++++ b/drivers/iio/industrialio-gts-helper.c
+@@ -167,7 +167,7 @@ static int iio_gts_gain_cmp(const void *a, const void *b)
+ 
+ static int gain_to_scaletables(struct iio_gts *gts, int **gains, int **scales)
+ {
+-	int ret, i, j, new_idx, time_idx;
++	int i, j, new_idx, time_idx, ret = 0;
+ 	int *all_gains;
+ 	size_t gain_bytes;
+ 
+diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c
+index 53569587ccb7ba..7cbb8b20330090 100644
+--- a/drivers/iio/light/al3010.c
++++ b/drivers/iio/light/al3010.c
+@@ -87,7 +87,12 @@ static int al3010_init(struct al3010_data *data)
+ 	int ret;
+ 
+ 	ret = al3010_set_pwr(data->client, true);
++	if (ret < 0)
++		return ret;
+ 
++	ret = devm_add_action_or_reset(&data->client->dev,
++				       al3010_set_pwr_off,
++				       data);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -190,12 +195,6 @@ static int al3010_probe(struct i2c_client *client)
+ 		return ret;
+ 	}
+ 
+-	ret = devm_add_action_or_reset(&client->dev,
+-					al3010_set_pwr_off,
+-					data);
+-	if (ret < 0)
+-		return ret;
+-
+ 	return devm_iio_device_register(&client->dev, indio_dev);
+ }
+ 
+diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
+index d5131b3ba8ab04..a9f2c6b1b29ed2 100644
+--- a/drivers/infiniband/core/roce_gid_mgmt.c
++++ b/drivers/infiniband/core/roce_gid_mgmt.c
+@@ -515,6 +515,27 @@ void rdma_roce_rescan_device(struct ib_device *ib_dev)
+ }
+ EXPORT_SYMBOL(rdma_roce_rescan_device);
+ 
++/**
++ * rdma_roce_rescan_port - Rescan all of the network devices in the system
++ * and add their gids if relevant to the port of the RoCE device.
++ *
++ * @ib_dev: IB device
++ * @port: Port number
++ */
++void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port)
++{
++	struct net_device *ndev = NULL;
++
++	if (rdma_protocol_roce(ib_dev, port)) {
++		ndev = ib_device_get_netdev(ib_dev, port);
++		if (!ndev)
++			return;
++		enum_all_gids_of_dev_cb(ib_dev, port, ndev, ndev);
++		dev_put(ndev);
++	}
++}
++EXPORT_SYMBOL(rdma_roce_rescan_port);
++
+ static void callback_for_addr_gid_device_scan(struct ib_device *device,
+ 					      u32 port,
+ 					      struct net_device *rdma_ndev,
+@@ -575,16 +596,17 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u32 port,
+ 	}
+ }
+ 
+-static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
+-				      struct net_device *event_ndev)
++void roce_del_all_netdev_gids(struct ib_device *ib_dev,
++			      u32 port, struct net_device *ndev)
+ {
+-	ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
++	ib_cache_gid_del_all_netdev_gids(ib_dev, port, ndev);
+ }
++EXPORT_SYMBOL(roce_del_all_netdev_gids);
+ 
+ static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
+ 				 struct net_device *rdma_ndev, void *cookie)
+ {
+-	handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
++	handle_netdev_upper(ib_dev, port, cookie, roce_del_all_netdev_gids);
+ }
+ 
+ static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
+diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
+index 821d93c8f7123c..dfd2e5a86e6fe5 100644
+--- a/drivers/infiniband/core/uverbs.h
++++ b/drivers/infiniband/core/uverbs.h
+@@ -160,6 +160,8 @@ struct ib_uverbs_file {
+ 	struct page *disassociate_page;
+ 
+ 	struct xarray		idr;
++
++	struct mutex disassociation_lock;
+ };
+ 
+ struct ib_uverbs_event {
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 94454186ed81d5..85cfc790a7bb36 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -76,6 +76,7 @@ static dev_t dynamic_uverbs_dev;
+ static DEFINE_IDA(uverbs_ida);
+ static int ib_uverbs_add_one(struct ib_device *device);
+ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
++static struct ib_client uverbs_client;
+ 
+ static char *uverbs_devnode(const struct device *dev, umode_t *mode)
+ {
+@@ -217,6 +218,7 @@ void ib_uverbs_release_file(struct kref *ref)
+ 
+ 	if (file->disassociate_page)
+ 		__free_pages(file->disassociate_page, 0);
++	mutex_destroy(&file->disassociation_lock);
+ 	mutex_destroy(&file->umap_lock);
+ 	mutex_destroy(&file->ucontext_lock);
+ 	kfree(file);
+@@ -698,8 +700,13 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
+ 		ret = PTR_ERR(ucontext);
+ 		goto out;
+ 	}
++
++	mutex_lock(&file->disassociation_lock);
++
+ 	vma->vm_ops = &rdma_umap_ops;
+ 	ret = ucontext->device->ops.mmap(ucontext, vma);
++
++	mutex_unlock(&file->disassociation_lock);
+ out:
+ 	srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+ 	return ret;
+@@ -721,6 +728,8 @@ static void rdma_umap_open(struct vm_area_struct *vma)
+ 	/* We are racing with disassociation */
+ 	if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ 		goto out_zap;
++	mutex_lock(&ufile->disassociation_lock);
++
+ 	/*
+ 	 * Disassociation already completed, the VMA should already be zapped.
+ 	 */
+@@ -732,10 +741,12 @@ static void rdma_umap_open(struct vm_area_struct *vma)
+ 		goto out_unlock;
+ 	rdma_umap_priv_init(priv, vma, opriv->entry);
+ 
++	mutex_unlock(&ufile->disassociation_lock);
+ 	up_read(&ufile->hw_destroy_rwsem);
+ 	return;
+ 
+ out_unlock:
++	mutex_unlock(&ufile->disassociation_lock);
+ 	up_read(&ufile->hw_destroy_rwsem);
+ out_zap:
+ 	/*
+@@ -819,7 +830,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
+ {
+ 	struct rdma_umap_priv *priv, *next_priv;
+ 
+-	lockdep_assert_held(&ufile->hw_destroy_rwsem);
++	mutex_lock(&ufile->disassociation_lock);
+ 
+ 	while (1) {
+ 		struct mm_struct *mm = NULL;
+@@ -845,8 +856,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
+ 			break;
+ 		}
+ 		mutex_unlock(&ufile->umap_lock);
+-		if (!mm)
++		if (!mm) {
++			mutex_unlock(&ufile->disassociation_lock);
+ 			return;
++		}
+ 
+ 		/*
+ 		 * The umap_lock is nested under mmap_lock since it used within
+@@ -876,7 +889,31 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
+ 		mmap_read_unlock(mm);
+ 		mmput(mm);
+ 	}
++
++	mutex_unlock(&ufile->disassociation_lock);
++}
++
++/**
++ * rdma_user_mmap_disassociate() - Revoke mmaps for a device
++ * @device: device to revoke
++ *
++ * This function should be called by drivers that need to disable mmaps for the
++ * device, for instance because it is going to be reset.
++ */
++void rdma_user_mmap_disassociate(struct ib_device *device)
++{
++	struct ib_uverbs_device *uverbs_dev =
++		ib_get_client_data(device, &uverbs_client);
++	struct ib_uverbs_file *ufile;
++
++	mutex_lock(&uverbs_dev->lists_mutex);
++	list_for_each_entry(ufile, &uverbs_dev->uverbs_file_list, list) {
++		if (ufile->ucontext)
++			uverbs_user_mmap_disassociate(ufile);
++	}
++	mutex_unlock(&uverbs_dev->lists_mutex);
+ }
++EXPORT_SYMBOL(rdma_user_mmap_disassociate);
+ 
+ /*
+  * ib_uverbs_open() does not need the BKL:
+@@ -947,6 +984,8 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+ 	mutex_init(&file->umap_lock);
+ 	INIT_LIST_HEAD(&file->umaps);
+ 
++	mutex_init(&file->disassociation_lock);
++
+ 	filp->private_data = file;
+ 	list_add_tail(&file->list, &dev->uverbs_file_list);
+ 	mutex_unlock(&dev->lists_mutex);
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index e66ae9f22c710c..160096792224b1 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -3633,7 +3633,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
+ 	wc->byte_len = orig_cqe->length;
+ 	wc->qp = &gsi_qp->ib_qp;
+ 
+-	wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata));
++	wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata);
+ 	wc->src_qp = orig_cqe->src_qp;
+ 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
+ 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
+@@ -3778,7 +3778,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
+ 				 (unsigned long)(cqe->qp_handle),
+ 				 struct bnxt_re_qp, qplib_qp);
+ 			wc->qp = &qp->ib_qp;
+-			wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata));
++			if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
++				wc->ex.imm_data = cpu_to_be32(cqe->immdata);
++			else
++				wc->ex.invalidate_rkey = cqe->invrkey;
+ 			wc->src_qp = cqe->src_qp;
+ 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ 			wc->port_num = 1;
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 9eb290ec71a85d..2ac8ddbed576f5 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -2033,12 +2033,6 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
+ 	rdev = en_info->rdev;
+ 	en_dev = en_info->en_dev;
+ 	mutex_lock(&bnxt_re_mutex);
+-	/* L2 driver may invoke this callback during device error/crash or device
+-	 * reset. Current RoCE driver doesn't recover the device in case of
+-	 * error. Handle the error by dispatching fatal events to all qps
+-	 * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
+-	 * L2 driver want to modify the MSIx table.
+-	 */
+ 
+ 	ibdev_info(&rdev->ibdev, "Handle device suspend call");
+ 	/* Check the current device state from bnxt_en_dev and move the
+@@ -2046,17 +2040,12 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
+ 	 * This prevents more commands to HW during clean-up,
+ 	 * in case the device is already in error.
+ 	 */
+-	if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state))
++	if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state)) {
+ 		set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+-
+-	bnxt_re_dev_stop(rdev);
+-	bnxt_re_stop_irq(adev);
+-	/* Move the device states to detached and  avoid sending any more
+-	 * commands to HW
+-	 */
+-	set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+-	set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+-	wake_up_all(&rdev->rcfw.cmdq.waitq);
++		set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
++		wake_up_all(&rdev->rcfw.cmdq.waitq);
++		bnxt_re_dev_stop(rdev);
++	}
+ 
+ 	if (rdev->pacing.dbr_pacing)
+ 		bnxt_re_set_pacing_dev_state(rdev);
+@@ -2075,13 +2064,6 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
+ 	struct bnxt_re_dev *rdev;
+ 
+ 	mutex_lock(&bnxt_re_mutex);
+-	/* L2 driver may invoke this callback during device recovery, resume.
+-	 * reset. Current RoCE driver doesn't recover the device in case of
+-	 * error. Handle the error by dispatching fatal events to all qps
+-	 * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
+-	 * L2 driver want to modify the MSIx table.
+-	 */
+-
+ 	bnxt_re_add_device(adev, BNXT_RE_POST_RECOVERY_INIT);
+ 	rdev = en_info->rdev;
+ 	ibdev_info(&rdev->ibdev, "Device resume completed");
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 820611a239433a..f55958e5fddb4a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -391,7 +391,7 @@ struct bnxt_qplib_cqe {
+ 	u16				cfa_meta;
+ 	u64				wr_id;
+ 	union {
+-		__le32			immdata;
++		u32			immdata;
+ 		u32			invrkey;
+ 	};
+ 	u64				qp_handle;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index 4ec66611a14340..4106423a1b399d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -179,8 +179,8 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ 	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
+ 				      hr_cq->cqn);
+ 	if (ret)
+-		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
+-			hr_cq->cqn);
++		dev_err_ratelimited(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n",
++				    ret, hr_cq->cqn);
+ 
+ 	xa_erase_irq(&cq_table->array, hr_cq->cqn);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.c b/drivers/infiniband/hw/hns/hns_roce_debugfs.c
+index e8febb40f6450c..b869cdc5411893 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_debugfs.c
++++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.c
+@@ -5,6 +5,7 @@
+ 
+ #include <linux/debugfs.h>
+ #include <linux/device.h>
++#include <linux/pci.h>
+ 
+ #include "hns_roce_device.h"
+ 
+@@ -86,7 +87,7 @@ void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev)
+ {
+ 	struct hns_roce_dev_debugfs *dbgfs = &hr_dev->dbgfs;
+ 
+-	dbgfs->root = debugfs_create_dir(dev_name(&hr_dev->ib_dev.dev),
++	dbgfs->root = debugfs_create_dir(pci_name(hr_dev->pci_dev),
+ 					 hns_roce_dbgfs_root);
+ 
+ 	create_sw_stat_debugfs(hr_dev, dbgfs->root);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 0b1e21cb6d2d38..560a1d9de408ff 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -489,12 +489,6 @@ struct hns_roce_bank {
+ 	u32 next; /* Next ID to allocate. */
+ };
+ 
+-struct hns_roce_idx_table {
+-	u32 *spare_idx;
+-	u32 head;
+-	u32 tail;
+-};
+-
+ struct hns_roce_qp_table {
+ 	struct hns_roce_hem_table	qp_table;
+ 	struct hns_roce_hem_table	irrl_table;
+@@ -503,7 +497,7 @@ struct hns_roce_qp_table {
+ 	struct mutex			scc_mutex;
+ 	struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
+ 	struct mutex bank_mutex;
+-	struct hns_roce_idx_table	idx_table;
++	struct xarray			dip_xa;
+ };
+ 
+ struct hns_roce_cq_table {
+@@ -593,6 +587,7 @@ struct hns_roce_dev;
+ 
+ enum {
+ 	HNS_ROCE_FLUSH_FLAG = 0,
++	HNS_ROCE_STOP_FLUSH_FLAG = 1,
+ };
+ 
+ struct hns_roce_work {
+@@ -656,6 +651,8 @@ struct hns_roce_qp {
+ 	enum hns_roce_cong_type	cong_type;
+ 	u8			tc_mode;
+ 	u8			priority;
++	spinlock_t flush_lock;
++	struct hns_roce_dip *dip;
+ };
+ 
+ struct hns_roce_ib_iboe {
+@@ -982,8 +979,6 @@ struct hns_roce_dev {
+ 	enum hns_roce_device_state state;
+ 	struct list_head	qp_list; /* list of all qps on this dev */
+ 	spinlock_t		qp_list_lock; /* protect qp_list */
+-	struct list_head	dip_list; /* list of all dest ips on this dev */
+-	spinlock_t		dip_list_lock; /* protect dip_list */
+ 
+ 	struct list_head        pgdir_list;
+ 	struct mutex            pgdir_mutex;
+@@ -1289,6 +1284,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
+ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
+ void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
+ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
++void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn);
+ void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
+ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
+ int hns_roce_init(struct hns_roce_dev *hr_dev);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index c7c167e2a04513..f84521be3bea4a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -300,7 +300,7 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev,
+ 			   struct hns_roce_hem_mhop *mhop,
+ 			   struct hns_roce_hem_index *index)
+ {
+-	struct ib_device *ibdev = &hr_dev->ib_dev;
++	struct device *dev = hr_dev->dev;
+ 	unsigned long mhop_obj = obj;
+ 	u32 l0_idx, l1_idx, l2_idx;
+ 	u32 chunk_ba_num;
+@@ -331,14 +331,14 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev,
+ 		index->buf = l0_idx;
+ 		break;
+ 	default:
+-		ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
+-			  table->type, mhop->hop_num);
++		dev_err(dev, "table %u not support mhop.hop_num = %u!\n",
++			table->type, mhop->hop_num);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (unlikely(index->buf >= table->num_hem)) {
+-		ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
+-			  table->type, index->buf, table->num_hem);
++		dev_err(dev, "table %u exceed hem limt idx %llu, max %lu!\n",
++			table->type, index->buf, table->num_hem);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -448,14 +448,14 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev,
+ 			struct hns_roce_hem_mhop *mhop,
+ 			struct hns_roce_hem_index *index)
+ {
+-	struct ib_device *ibdev = &hr_dev->ib_dev;
++	struct device *dev = hr_dev->dev;
+ 	u32 step_idx;
+ 	int ret = 0;
+ 
+ 	if (index->inited & HEM_INDEX_L0) {
+ 		ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
+ 		if (ret) {
+-			ibdev_err(ibdev, "set HEM step 0 failed!\n");
++			dev_err(dev, "set HEM step 0 failed!\n");
+ 			goto out;
+ 		}
+ 	}
+@@ -463,7 +463,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev,
+ 	if (index->inited & HEM_INDEX_L1) {
+ 		ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
+ 		if (ret) {
+-			ibdev_err(ibdev, "set HEM step 1 failed!\n");
++			dev_err(dev, "set HEM step 1 failed!\n");
+ 			goto out;
+ 		}
+ 	}
+@@ -475,7 +475,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev,
+ 			step_idx = mhop->hop_num;
+ 		ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
+ 		if (ret)
+-			ibdev_err(ibdev, "set HEM step last failed!\n");
++			dev_err(dev, "set HEM step last failed!\n");
+ 	}
+ out:
+ 	return ret;
+@@ -485,14 +485,14 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ 				   struct hns_roce_hem_table *table,
+ 				   unsigned long obj)
+ {
+-	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_hem_index index = {};
+ 	struct hns_roce_hem_mhop mhop = {};
++	struct device *dev = hr_dev->dev;
+ 	int ret;
+ 
+ 	ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ 	if (ret) {
+-		ibdev_err(ibdev, "calc hem config failed!\n");
++		dev_err(dev, "calc hem config failed!\n");
+ 		return ret;
+ 	}
+ 
+@@ -504,7 +504,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ 
+ 	ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
+ 	if (ret) {
+-		ibdev_err(ibdev, "alloc mhop hem failed!\n");
++		dev_err(dev, "alloc mhop hem failed!\n");
+ 		goto out;
+ 	}
+ 
+@@ -512,7 +512,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ 	if (table->type < HEM_TYPE_MTT) {
+ 		ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ 		if (ret) {
+-			ibdev_err(ibdev, "set HEM address to HW failed!\n");
++			dev_err(dev, "set HEM address to HW failed!\n");
+ 			goto err_alloc;
+ 		}
+ 	}
+@@ -575,7 +575,7 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
+ 			   struct hns_roce_hem_mhop *mhop,
+ 			   struct hns_roce_hem_index *index)
+ {
+-	struct ib_device *ibdev = &hr_dev->ib_dev;
++	struct device *dev = hr_dev->dev;
+ 	u32 hop_num = mhop->hop_num;
+ 	u32 chunk_ba_num;
+ 	u32 step_idx;
+@@ -605,21 +605,21 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
+ 
+ 		ret = hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx);
+ 		if (ret)
+-			ibdev_warn(ibdev, "failed to clear hop%u HEM, ret = %d.\n",
+-				   hop_num, ret);
++			dev_warn(dev, "failed to clear hop%u HEM, ret = %d.\n",
++				 hop_num, ret);
+ 
+ 		if (index->inited & HEM_INDEX_L1) {
+ 			ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 1);
+ 			if (ret)
+-				ibdev_warn(ibdev, "failed to clear HEM step 1, ret = %d.\n",
+-					   ret);
++				dev_warn(dev, "failed to clear HEM step 1, ret = %d.\n",
++					 ret);
+ 		}
+ 
+ 		if (index->inited & HEM_INDEX_L0) {
+ 			ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0);
+ 			if (ret)
+-				ibdev_warn(ibdev, "failed to clear HEM step 0, ret = %d.\n",
+-					   ret);
++				dev_warn(dev, "failed to clear HEM step 0, ret = %d.\n",
++					 ret);
+ 		}
+ 	}
+ }
+@@ -629,14 +629,14 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ 				    unsigned long obj,
+ 				    int check_refcount)
+ {
+-	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_hem_index index = {};
+ 	struct hns_roce_hem_mhop mhop = {};
++	struct device *dev = hr_dev->dev;
+ 	int ret;
+ 
+ 	ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ 	if (ret) {
+-		ibdev_err(ibdev, "calc hem config failed!\n");
++		dev_err(dev, "calc hem config failed!\n");
+ 		return;
+ 	}
+ 
+@@ -672,8 +672,8 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
+ 
+ 	ret = hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
+ 	if (ret)
+-		dev_warn(dev, "failed to clear HEM base address, ret = %d.\n",
+-			 ret);
++		dev_warn_ratelimited(dev, "failed to clear HEM base address, ret = %d.\n",
++				     ret);
+ 
+ 	hns_roce_free_hem(hr_dev, table->hem[i]);
+ 	table->hem[i] = NULL;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 24e906b9d3ae13..697b17cca02e71 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -373,19 +373,12 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ static int check_send_valid(struct hns_roce_dev *hr_dev,
+ 			    struct hns_roce_qp *hr_qp)
+ {
+-	struct ib_device *ibdev = &hr_dev->ib_dev;
+-
+ 	if (unlikely(hr_qp->state == IB_QPS_RESET ||
+ 		     hr_qp->state == IB_QPS_INIT ||
+-		     hr_qp->state == IB_QPS_RTR)) {
+-		ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
+-			  hr_qp->state);
++		     hr_qp->state == IB_QPS_RTR))
+ 		return -EINVAL;
+-	} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
+-		ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
+-			  hr_dev->state);
++	else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
+ 		return -EIO;
+-	}
+ 
+ 	return 0;
+ }
+@@ -582,7 +575,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ 	if (WARN_ON(ret))
+ 		return ret;
+ 
+-	hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE,
++	hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
+ 		     (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+ 
+ 	hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE,
+@@ -2560,20 +2553,19 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
+ 	free_link_table_buf(hr_dev, &priv->ext_llm);
+ }
+ 
+-static void free_dip_list(struct hns_roce_dev *hr_dev)
++static void free_dip_entry(struct hns_roce_dev *hr_dev)
+ {
+ 	struct hns_roce_dip *hr_dip;
+-	struct hns_roce_dip *tmp;
+-	unsigned long flags;
++	unsigned long idx;
+ 
+-	spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
++	xa_lock(&hr_dev->qp_table.dip_xa);
+ 
+-	list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
+-		list_del(&hr_dip->node);
++	xa_for_each(&hr_dev->qp_table.dip_xa, idx, hr_dip) {
++		__xa_erase(&hr_dev->qp_table.dip_xa, hr_dip->dip_idx);
+ 		kfree(hr_dip);
+ 	}
+ 
+-	spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
++	xa_unlock(&hr_dev->qp_table.dip_xa);
+ }
+ 
+ static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
+@@ -2775,8 +2767,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+ 	ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
+ 				    IB_QPS_INIT, NULL);
+ 	if (ret) {
+-		ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
+-			  ret);
++		ibdev_err_ratelimited(ibdev, "failed to modify qp to init, ret = %d.\n",
++				      ret);
+ 		return ret;
+ 	}
+ 
+@@ -2981,7 +2973,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
+ 		hns_roce_free_link_table(hr_dev);
+ 
+ 	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
+-		free_dip_list(hr_dev);
++		free_dip_entry(hr_dev);
+ }
+ 
+ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev,
+@@ -3421,8 +3413,8 @@ static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
+ 
+ 	ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr);
+ 	if (ret) {
+-		ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n",
+-			  ret);
++		ibdev_err_ratelimited(ibdev, "failed to post wqe for free mr, ret = %d.\n",
++				      ret);
+ 		return ret;
+ 	}
+ 
+@@ -3461,9 +3453,9 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
+ 
+ 		ret = free_mr_post_send_lp_wqe(hr_qp);
+ 		if (ret) {
+-			ibdev_err(ibdev,
+-				  "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
+-				  hr_qp->qpn, ret);
++			ibdev_err_ratelimited(ibdev,
++					      "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
++					      hr_qp->qpn, ret);
+ 			break;
+ 		}
+ 
+@@ -3474,16 +3466,16 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
+ 	while (cqe_cnt) {
+ 		npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc);
+ 		if (npolled < 0) {
+-			ibdev_err(ibdev,
+-				  "failed to poll cqe for free mr, remain %d cqe.\n",
+-				  cqe_cnt);
++			ibdev_err_ratelimited(ibdev,
++					      "failed to poll cqe for free mr, remain %d cqe.\n",
++					      cqe_cnt);
+ 			goto out;
+ 		}
+ 
+ 		if (time_after(jiffies, end)) {
+-			ibdev_err(ibdev,
+-				  "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
+-				  cqe_cnt);
++			ibdev_err_ratelimited(ibdev,
++					      "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
++					      cqe_cnt);
+ 			goto out;
+ 		}
+ 		cqe_cnt -= npolled;
+@@ -4701,26 +4693,49 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, int attr_mask,
+ 	return 0;
+ }
+ 
++static int alloc_dip_entry(struct xarray *dip_xa, u32 qpn)
++{
++	struct hns_roce_dip *hr_dip;
++	int ret;
++
++	hr_dip = xa_load(dip_xa, qpn);
++	if (hr_dip)
++		return 0;
++
++	hr_dip = kzalloc(sizeof(*hr_dip), GFP_KERNEL);
++	if (!hr_dip)
++		return -ENOMEM;
++
++	ret = xa_err(xa_store(dip_xa, qpn, hr_dip, GFP_KERNEL));
++	if (ret)
++		kfree(hr_dip);
++
++	return ret;
++}
++
+ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ 			   u32 *dip_idx)
+ {
+ 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+-	u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
+-	u32 *head =  &hr_dev->qp_table.idx_table.head;
+-	u32 *tail =  &hr_dev->qp_table.idx_table.tail;
++	struct xarray *dip_xa = &hr_dev->qp_table.dip_xa;
++	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ 	struct hns_roce_dip *hr_dip;
+-	unsigned long flags;
++	unsigned long idx;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
++	ret = alloc_dip_entry(dip_xa, ibqp->qp_num);
++	if (ret)
++		return ret;
+ 
+-	spare_idx[*tail] = ibqp->qp_num;
+-	*tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
++	xa_lock(dip_xa);
+ 
+-	list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
+-		if (!memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) {
++	xa_for_each(dip_xa, idx, hr_dip) {
++		if (hr_dip->qp_cnt &&
++		    !memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) {
+ 			*dip_idx = hr_dip->dip_idx;
++			hr_dip->qp_cnt++;
++			hr_qp->dip = hr_dip;
+ 			goto out;
+ 		}
+ 	}
+@@ -4728,19 +4743,24 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ 	/* If no dgid is found, a new dip and a mapping between dgid and
+ 	 * dip_idx will be created.
+ 	 */
+-	hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
+-	if (!hr_dip) {
+-		ret = -ENOMEM;
+-		goto out;
++	xa_for_each(dip_xa, idx, hr_dip) {
++		if (hr_dip->qp_cnt)
++			continue;
++
++		*dip_idx = idx;
++		memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
++		hr_dip->dip_idx = idx;
++		hr_dip->qp_cnt++;
++		hr_qp->dip = hr_dip;
++		break;
+ 	}
+ 
+-	memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+-	hr_dip->dip_idx = *dip_idx = spare_idx[*head];
+-	*head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
+-	list_add_tail(&hr_dip->node, &hr_dev->dip_list);
++	/* This should never happen. */
++	if (WARN_ON_ONCE(!hr_qp->dip))
++		ret = -ENOSPC;
+ 
+ out:
+-	spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
++	xa_unlock(dip_xa);
+ 	return ret;
+ }
+ 
+@@ -5061,10 +5081,8 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ 	int ret = 0;
+ 
+-	if (!check_qp_state(cur_state, new_state)) {
+-		ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
++	if (!check_qp_state(cur_state, new_state))
+ 		return -EINVAL;
+-	}
+ 
+ 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ 		memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
+@@ -5325,7 +5343,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ 	/* SW pass context to HW */
+ 	ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
+ 	if (ret) {
+-		ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
++		ibdev_err_ratelimited(ibdev, "failed to modify QP, ret = %d.\n", ret);
+ 		goto out;
+ 	}
+ 
+@@ -5463,7 +5481,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ 
+ 	ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
+ 	if (ret) {
+-		ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
++		ibdev_err_ratelimited(ibdev,
++				      "failed to query QPC, ret = %d.\n",
++				      ret);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -5471,7 +5491,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ 	state = hr_reg_read(&context, QPC_QP_ST);
+ 	tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
+ 	if (tmp_qp_state == -1) {
+-		ibdev_err(ibdev, "Illegal ib_qp_state\n");
++		ibdev_err_ratelimited(ibdev, "Illegal ib_qp_state\n");
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -5564,9 +5584,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ 		ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
+ 					    hr_qp->state, IB_QPS_RESET, udata);
+ 		if (ret)
+-			ibdev_err(ibdev,
+-				  "failed to modify QP to RST, ret = %d.\n",
+-				  ret);
++			ibdev_err_ratelimited(ibdev,
++					      "failed to modify QP to RST, ret = %d.\n",
++					      ret);
+ 	}
+ 
+ 	send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
+@@ -5594,17 +5614,41 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ 	return ret;
+ }
+ 
++static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
++			    struct hns_roce_qp *hr_qp)
++{
++	struct hns_roce_dip *hr_dip = hr_qp->dip;
++
++	xa_lock(&hr_dev->qp_table.dip_xa);
++
++	hr_dip->qp_cnt--;
++	if (!hr_dip->qp_cnt)
++		memset(hr_dip->dgid, 0, GID_LEN_V2);
++
++	xa_unlock(&hr_dev->qp_table.dip_xa);
++}
++
+ int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
++	unsigned long flags;
+ 	int ret;
+ 
++	/* Make sure flush_cqe() is completed */
++	spin_lock_irqsave(&hr_qp->flush_lock, flags);
++	set_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag);
++	spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
++	flush_work(&hr_qp->flush_work.work);
++
++	if (hr_qp->cong_type == CONG_TYPE_DIP)
++		put_dip_ctx_idx(hr_dev, hr_qp);
++
+ 	ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
+ 	if (ret)
+-		ibdev_err(&hr_dev->ib_dev,
+-			  "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
+-			  hr_qp->qpn, ret);
++		ibdev_err_ratelimited(&hr_dev->ib_dev,
++				      "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
++				      hr_qp->qpn, ret);
+ 
+ 	hns_roce_qp_destroy(hr_dev, hr_qp, udata);
+ 
+@@ -5898,9 +5942,9 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+ 				HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn);
+ 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ 	if (ret)
+-		ibdev_err(&hr_dev->ib_dev,
+-			  "failed to process cmd when modifying CQ, ret = %d.\n",
+-			  ret);
++		ibdev_err_ratelimited(&hr_dev->ib_dev,
++				      "failed to process cmd when modifying CQ, ret = %d.\n",
++				      ret);
+ 
+ err_out:
+ 	if (ret)
+@@ -5924,9 +5968,9 @@ static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn,
+ 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
+ 				HNS_ROCE_CMD_QUERY_CQC, cqn);
+ 	if (ret) {
+-		ibdev_err(&hr_dev->ib_dev,
+-			  "failed to process cmd when querying CQ, ret = %d.\n",
+-			  ret);
++		ibdev_err_ratelimited(&hr_dev->ib_dev,
++				      "failed to process cmd when querying CQ, ret = %d.\n",
++				      ret);
+ 		goto err_mailbox;
+ 	}
+ 
+@@ -5967,11 +6011,10 @@ static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key,
+ 	return ret;
+ }
+ 
+-static void hns_roce_irq_work_handle(struct work_struct *work)
++static void dump_aeqe_log(struct hns_roce_work *irq_work)
+ {
+-	struct hns_roce_work *irq_work =
+-				container_of(work, struct hns_roce_work, work);
+-	struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
++	struct hns_roce_dev *hr_dev = irq_work->hr_dev;
++	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 
+ 	switch (irq_work->event_type) {
+ 	case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+@@ -6015,6 +6058,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ 	case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ 		ibdev_warn(ibdev, "DB overflow.\n");
+ 		break;
++	case HNS_ROCE_EVENT_TYPE_MB:
++		break;
+ 	case HNS_ROCE_EVENT_TYPE_FLR:
+ 		ibdev_warn(ibdev, "function level reset.\n");
+ 		break;
+@@ -6025,8 +6070,46 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ 		ibdev_err(ibdev, "invalid xrceth error.\n");
+ 		break;
+ 	default:
++		ibdev_info(ibdev, "Undefined event %d.\n",
++			   irq_work->event_type);
+ 		break;
+ 	}
++}
++
++static void hns_roce_irq_work_handle(struct work_struct *work)
++{
++	struct hns_roce_work *irq_work =
++				container_of(work, struct hns_roce_work, work);
++	struct hns_roce_dev *hr_dev = irq_work->hr_dev;
++	int event_type = irq_work->event_type;
++	u32 queue_num = irq_work->queue_num;
++
++	switch (event_type) {
++	case HNS_ROCE_EVENT_TYPE_PATH_MIG:
++	case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
++	case HNS_ROCE_EVENT_TYPE_COMM_EST:
++	case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
++	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
++	case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
++	case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
++	case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
++	case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
++	case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
++		hns_roce_qp_event(hr_dev, queue_num, event_type);
++		break;
++	case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
++	case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
++		hns_roce_srq_event(hr_dev, queue_num, event_type);
++		break;
++	case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
++	case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
++		hns_roce_cq_event(hr_dev, queue_num, event_type);
++		break;
++	default:
++		break;
++	}
++
++	dump_aeqe_log(irq_work);
+ 
+ 	kfree(irq_work);
+ }
+@@ -6087,14 +6170,14 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
+ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ 				       struct hns_roce_eq *eq)
+ {
+-	struct device *dev = hr_dev->dev;
+ 	struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
+ 	irqreturn_t aeqe_found = IRQ_NONE;
++	int num_aeqes = 0;
+ 	int event_type;
+ 	u32 queue_num;
+ 	int sub_type;
+ 
+-	while (aeqe) {
++	while (aeqe && num_aeqes < HNS_AEQ_POLLING_BUDGET) {
+ 		/* Make sure we read AEQ entry after we have checked the
+ 		 * ownership bit
+ 		 */
+@@ -6105,25 +6188,12 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ 		queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM);
+ 
+ 		switch (event_type) {
+-		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+-		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+-		case HNS_ROCE_EVENT_TYPE_COMM_EST:
+-		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+-		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ 		case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ 		case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
+-			hns_roce_qp_event(hr_dev, queue_num, event_type);
+-			break;
+-		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+-		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+-			hns_roce_srq_event(hr_dev, queue_num, event_type);
+-			break;
+-		case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+-		case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+-			hns_roce_cq_event(hr_dev, queue_num, event_type);
++			hns_roce_flush_cqe(hr_dev, queue_num);
+ 			break;
+ 		case HNS_ROCE_EVENT_TYPE_MB:
+ 			hns_roce_cmd_event(hr_dev,
+@@ -6131,12 +6201,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ 					aeqe->event.cmd.status,
+ 					le64_to_cpu(aeqe->event.cmd.out_param));
+ 			break;
+-		case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+-		case HNS_ROCE_EVENT_TYPE_FLR:
+-			break;
+ 		default:
+-			dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n",
+-				event_type, eq->eqn, eq->cons_index);
+ 			break;
+ 		}
+ 
+@@ -6150,6 +6215,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ 		hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
+ 
+ 		aeqe = next_aeqe_sw_v2(eq);
++		++num_aeqes;
+ 	}
+ 
+ 	update_eq_db(eq);
+@@ -6699,6 +6765,9 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+ 	int ret;
+ 	int i;
+ 
++	if (hr_dev->caps.aeqe_depth < HNS_AEQ_POLLING_BUDGET)
++		return -EINVAL;
++
+ 	other_num = hr_dev->caps.num_other_vectors;
+ 	comp_num = hr_dev->caps.num_comp_vectors;
+ 	aeq_num = hr_dev->caps.num_aeq_vectors;
+@@ -7017,6 +7086,7 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ 
+ 	handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
+ }
++
+ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+ {
+ 	struct hns_roce_dev *hr_dev;
+@@ -7035,6 +7105,9 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+ 
+ 	hr_dev->active = false;
+ 	hr_dev->dis_db = true;
++
++	rdma_user_mmap_disassociate(&hr_dev->ib_dev);
++
+ 	hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index c65f68a14a2608..cbdbc9edbce6ec 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -85,6 +85,11 @@
+ 
+ #define HNS_ROCE_V2_TABLE_CHUNK_SIZE		(1 << 18)
+ 
++/* budget must be smaller than aeqe_depth to guarantee that we update
++ * the ci before we polled all the entries in the EQ.
++ */
++#define HNS_AEQ_POLLING_BUDGET 64
++
+ enum {
+ 	HNS_ROCE_CMD_FLAG_IN = BIT(0),
+ 	HNS_ROCE_CMD_FLAG_OUT = BIT(1),
+@@ -919,6 +924,7 @@ struct hns_roce_v2_rc_send_wqe {
+ #define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7)
+ #define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8)
+ #define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9)
++#define RC_SEND_WQE_SO RC_SEND_WQE_FIELD_LOC(10, 10)
+ #define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11)
+ #define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12)
+ #define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15)
+@@ -1342,7 +1348,7 @@ struct hns_roce_v2_priv {
+ struct hns_roce_dip {
+ 	u8 dgid[GID_LEN_V2];
+ 	u32 dip_idx;
+-	struct list_head node; /* all dips are on a list */
++	u32 qp_cnt;
+ };
+ 
+ struct fmea_ram_ecc {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index 4cb0af73358708..ae24c81c9812d9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -466,6 +466,11 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
+ 	pgprot_t prot;
+ 	int ret;
+ 
++	if (hr_dev->dis_db) {
++		atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
++		return -EPERM;
++	}
++
+ 	rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
+ 	if (!rdma_entry) {
+ 		atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
+@@ -1130,8 +1135,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
+ 
+ 	INIT_LIST_HEAD(&hr_dev->qp_list);
+ 	spin_lock_init(&hr_dev->qp_list_lock);
+-	INIT_LIST_HEAD(&hr_dev->dip_list);
+-	spin_lock_init(&hr_dev->dip_list_lock);
+ 
+ 	ret = hns_roce_register_device(hr_dev);
+ 	if (ret)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 846da8c78b8b72..bf30b3a65a9ba9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -138,8 +138,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr
+ 					      key_to_hw_index(mr->key) &
+ 					      (hr_dev->caps.num_mtpts - 1));
+ 		if (ret)
+-			ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
+-				   ret);
++			ibdev_warn_ratelimited(ibdev, "failed to destroy mpt, ret = %d.\n",
++					       ret);
+ 	}
+ 
+ 	free_mr_pbl(hr_dev, mr);
+@@ -435,15 +435,16 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
+ }
+ 
+ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+-		       unsigned int *sg_offset)
++		       unsigned int *sg_offset_p)
+ {
++	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ 	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+ 	int ret, sg_num = 0;
+ 
+-	if (!IS_ALIGNED(*sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) ||
++	if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) ||
+ 	    ibmr->page_size < HNS_HW_PAGE_SIZE ||
+ 	    ibmr->page_size > HNS_HW_MAX_PAGE_SIZE)
+ 		return sg_num;
+@@ -454,7 +455,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ 	if (!mr->page_list)
+ 		return sg_num;
+ 
+-	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
++	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page);
+ 	if (sg_num < 1) {
+ 		ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
+ 			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 6b03ba671ff8f3..9e2e76c5940636 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -39,6 +39,25 @@
+ #include "hns_roce_device.h"
+ #include "hns_roce_hem.h"
+ 
++static struct hns_roce_qp *hns_roce_qp_lookup(struct hns_roce_dev *hr_dev,
++					      u32 qpn)
++{
++	struct device *dev = hr_dev->dev;
++	struct hns_roce_qp *qp;
++	unsigned long flags;
++
++	xa_lock_irqsave(&hr_dev->qp_table_xa, flags);
++	qp = __hns_roce_qp_lookup(hr_dev, qpn);
++	if (qp)
++		refcount_inc(&qp->refcount);
++	xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags);
++
++	if (!qp)
++		dev_warn(dev, "async event for bogus QP %08x\n", qpn);
++
++	return qp;
++}
++
+ static void flush_work_handle(struct work_struct *work)
+ {
+ 	struct hns_roce_work *flush_work = container_of(work,
+@@ -71,11 +90,18 @@ static void flush_work_handle(struct work_struct *work)
+ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+ {
+ 	struct hns_roce_work *flush_work = &hr_qp->flush_work;
++	unsigned long flags;
++
++	spin_lock_irqsave(&hr_qp->flush_lock, flags);
++	/* Exit directly after destroy_qp() */
++	if (test_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag)) {
++		spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
++		return;
++	}
+ 
+-	flush_work->hr_dev = hr_dev;
+-	INIT_WORK(&flush_work->work, flush_work_handle);
+ 	refcount_inc(&hr_qp->refcount);
+ 	queue_work(hr_dev->irq_workq, &flush_work->work);
++	spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
+ }
+ 
+ void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
+@@ -95,31 +121,28 @@ void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
+ 
+ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
+ {
+-	struct device *dev = hr_dev->dev;
+ 	struct hns_roce_qp *qp;
+ 
+-	xa_lock(&hr_dev->qp_table_xa);
+-	qp = __hns_roce_qp_lookup(hr_dev, qpn);
+-	if (qp)
+-		refcount_inc(&qp->refcount);
+-	xa_unlock(&hr_dev->qp_table_xa);
+-
+-	if (!qp) {
+-		dev_warn(dev, "async event for bogus QP %08x\n", qpn);
++	qp = hns_roce_qp_lookup(hr_dev, qpn);
++	if (!qp)
+ 		return;
+-	}
+ 
+-	if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
+-	    event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
+-	    event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
+-	    event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
+-	    event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
+-		qp->state = IB_QPS_ERR;
++	qp->event(qp, (enum hns_roce_event)event_type);
+ 
+-		flush_cqe(hr_dev, qp);
+-	}
++	if (refcount_dec_and_test(&qp->refcount))
++		complete(&qp->free);
++}
+ 
+-	qp->event(qp, (enum hns_roce_event)event_type);
++void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn)
++{
++	struct hns_roce_qp *qp;
++
++	qp = hns_roce_qp_lookup(hr_dev, qpn);
++	if (!qp)
++		return;
++
++	qp->state = IB_QPS_ERR;
++	flush_cqe(hr_dev, qp);
+ 
+ 	if (refcount_dec_and_test(&qp->refcount))
+ 		complete(&qp->free);
+@@ -1124,6 +1147,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ 				     struct ib_udata *udata,
+ 				     struct hns_roce_qp *hr_qp)
+ {
++	struct hns_roce_work *flush_work = &hr_qp->flush_work;
+ 	struct hns_roce_ib_create_qp_resp resp = {};
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_ib_create_qp ucmd = {};
+@@ -1132,9 +1156,12 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ 	mutex_init(&hr_qp->mutex);
+ 	spin_lock_init(&hr_qp->sq.lock);
+ 	spin_lock_init(&hr_qp->rq.lock);
++	spin_lock_init(&hr_qp->flush_lock);
+ 
+ 	hr_qp->state = IB_QPS_RESET;
+ 	hr_qp->flush_flag = 0;
++	flush_work->hr_dev = hr_dev;
++	INIT_WORK(&flush_work->work, flush_work_handle);
+ 
+ 	if (init_attr->create_flags)
+ 		return -EOPNOTSUPP;
+@@ -1546,14 +1573,10 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
+ 	unsigned int reserved_from_bot;
+ 	unsigned int i;
+ 
+-	qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps,
+-					sizeof(u32), GFP_KERNEL);
+-	if (!qp_table->idx_table.spare_idx)
+-		return -ENOMEM;
+-
+ 	mutex_init(&qp_table->scc_mutex);
+ 	mutex_init(&qp_table->bank_mutex);
+ 	xa_init(&hr_dev->qp_table_xa);
++	xa_init(&qp_table->dip_xa);
+ 
+ 	reserved_from_bot = hr_dev->caps.reserved_qps;
+ 
+@@ -1578,7 +1601,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
+ 
+ 	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
+ 		ida_destroy(&hr_dev->qp_table.bank[i].ida);
++	xa_destroy(&hr_dev->qp_table.dip_xa);
+ 	mutex_destroy(&hr_dev->qp_table.bank_mutex);
+ 	mutex_destroy(&hr_dev->qp_table.scc_mutex);
+-	kfree(hr_dev->qp_table.idx_table.spare_idx);
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index c9b8233f4b0577..70c06ef65603d8 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -151,8 +151,8 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ 	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ,
+ 				      srq->srqn);
+ 	if (ret)
+-		dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
+-			ret, srq->srqn);
++		dev_err_ratelimited(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
++				    ret, srq->srqn);
+ 
+ 	xa_erase_irq(&srq_table->xa, srq->srqn);
+ 
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 4999239c8f4137..ac20ab3bbabf47 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2997,7 +2997,6 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
+ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
+ {
+ 	struct mlx5_ib_resources *devr = &dev->devr;
+-	int port;
+ 	int ret;
+ 
+ 	if (!MLX5_CAP_GEN(dev->mdev, xrc))
+@@ -3013,10 +3012,6 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
+ 		return ret;
+ 	}
+ 
+-	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
+-		INIT_WORK(&devr->ports[port].pkey_change_work,
+-			  pkey_change_handler);
+-
+ 	mutex_init(&devr->cq_lock);
+ 	mutex_init(&devr->srq_lock);
+ 
+@@ -3026,16 +3021,6 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
+ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
+ {
+ 	struct mlx5_ib_resources *devr = &dev->devr;
+-	int port;
+-
+-	/*
+-	 * Make sure no change P_Key work items are still executing.
+-	 *
+-	 * At this stage, the mlx5_ib_event should be unregistered
+-	 * and it ensures that no new works are added.
+-	 */
+-	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
+-		cancel_work_sync(&devr->ports[port].pkey_change_work);
+ 
+ 	/* After s0/s1 init, they are not unset during the device lifetime. */
+ 	if (devr->s1) {
+@@ -3211,12 +3196,14 @@ static int lag_event(struct notifier_block *nb, unsigned long event, void *data)
+ 	struct mlx5_ib_dev *dev = container_of(nb, struct mlx5_ib_dev,
+ 					       lag_events);
+ 	struct mlx5_core_dev *mdev = dev->mdev;
++	struct ib_device *ibdev = &dev->ib_dev;
++	struct net_device *old_ndev = NULL;
+ 	struct mlx5_ib_port *port;
+ 	struct net_device *ndev;
+-	int  i, err;
+-	int portnum;
++	u32 portnum = 0;
++	int ret = 0;
++	int i;
+ 
+-	portnum = 0;
+ 	switch (event) {
+ 	case MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE:
+ 		ndev = data;
+@@ -3232,19 +3219,24 @@ static int lag_event(struct notifier_block *nb, unsigned long event, void *data)
+ 					}
+ 				}
+ 			}
+-			err = ib_device_set_netdev(&dev->ib_dev, ndev,
+-						   portnum + 1);
+-			dev_put(ndev);
+-			if (err)
+-				return err;
+-			/* Rescan gids after new netdev assignment */
+-			rdma_roce_rescan_device(&dev->ib_dev);
++			old_ndev = ib_device_get_netdev(ibdev, portnum + 1);
++			ret = ib_device_set_netdev(ibdev, ndev, portnum + 1);
++			if (ret)
++				goto out;
++
++			if (old_ndev)
++				roce_del_all_netdev_gids(ibdev, portnum + 1,
++							 old_ndev);
++			rdma_roce_rescan_port(ibdev, portnum + 1);
+ 		}
+ 		break;
+ 	default:
+ 		return NOTIFY_DONE;
+ 	}
+-	return NOTIFY_OK;
++
++out:
++	dev_put(old_ndev);
++	return notifier_from_errno(ret);
+ }
+ 
+ static void mlx5e_lag_event_register(struct mlx5_ib_dev *dev)
+@@ -4464,6 +4456,13 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+ 
+ static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
+ {
++	struct mlx5_ib_resources *devr = &dev->devr;
++	int port;
++
++	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
++		INIT_WORK(&devr->ports[port].pkey_change_work,
++			  pkey_change_handler);
++
+ 	dev->mdev_events.notifier_call = mlx5_ib_event;
+ 	mlx5_notifier_register(dev->mdev, &dev->mdev_events);
+ 
+@@ -4474,8 +4473,14 @@ static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
+ 
+ static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
+ {
++	struct mlx5_ib_resources *devr = &dev->devr;
++	int port;
++
+ 	mlx5r_macsec_event_unregister(dev);
+ 	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
++
++	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
++		cancel_work_sync(&devr->ports[port].pkey_change_work);
+ }
+ 
+ void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
+@@ -4565,9 +4570,6 @@ static const struct mlx5_ib_profile pf_profile = {
+ 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ 		     mlx5_ib_dev_res_init,
+ 		     mlx5_ib_dev_res_cleanup),
+-	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
+-		     mlx5_ib_stage_dev_notifier_init,
+-		     mlx5_ib_stage_dev_notifier_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_ODP,
+ 		     mlx5_ib_odp_init_one,
+ 		     mlx5_ib_odp_cleanup_one),
+@@ -4592,6 +4594,9 @@ static const struct mlx5_ib_profile pf_profile = {
+ 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ 		     mlx5_ib_stage_ib_reg_init,
+ 		     mlx5_ib_stage_ib_reg_cleanup),
++	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
++		     mlx5_ib_stage_dev_notifier_init,
++		     mlx5_ib_stage_dev_notifier_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ 		     mlx5_ib_stage_post_ib_reg_umr_init,
+ 		     NULL),
+@@ -4628,9 +4633,6 @@ const struct mlx5_ib_profile raw_eth_profile = {
+ 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ 		     mlx5_ib_dev_res_init,
+ 		     mlx5_ib_dev_res_cleanup),
+-	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
+-		     mlx5_ib_stage_dev_notifier_init,
+-		     mlx5_ib_stage_dev_notifier_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ 		     mlx5_ib_counters_init,
+ 		     mlx5_ib_counters_cleanup),
+@@ -4652,6 +4654,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
+ 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ 		     mlx5_ib_stage_ib_reg_init,
+ 		     mlx5_ib_stage_ib_reg_cleanup),
++	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
++		     mlx5_ib_stage_dev_notifier_init,
++		     mlx5_ib_stage_dev_notifier_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ 		     mlx5_ib_stage_post_ib_reg_umr_init,
+ 		     NULL),
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 23fd72f7f63df9..29bde64ea1eac9 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -972,7 +972,6 @@ enum mlx5_ib_stages {
+ 	MLX5_IB_STAGE_QP,
+ 	MLX5_IB_STAGE_SRQ,
+ 	MLX5_IB_STAGE_DEVICE_RESOURCES,
+-	MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ 	MLX5_IB_STAGE_ODP,
+ 	MLX5_IB_STAGE_COUNTERS,
+ 	MLX5_IB_STAGE_CONG_DEBUGFS,
+@@ -981,6 +980,7 @@ enum mlx5_ib_stages {
+ 	MLX5_IB_STAGE_PRE_IB_REG_UMR,
+ 	MLX5_IB_STAGE_WHITELIST_UID,
+ 	MLX5_IB_STAGE_IB_REG,
++	MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ 	MLX5_IB_STAGE_POST_IB_REG_UMR,
+ 	MLX5_IB_STAGE_DELAY_DROP,
+ 	MLX5_IB_STAGE_RESTRACK,
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index d2f7b5195c19dd..91d329e903083c 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -775,6 +775,7 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
+ 	 * Yield the processor
+ 	 */
+ 	spin_lock_irqsave(&qp->state_lock, flags);
++	attr->cur_qp_state = qp_state(qp);
+ 	if (qp->attr.sq_draining) {
+ 		spin_unlock_irqrestore(&qp->state_lock, flags);
+ 		cond_resched();
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 479c07e6e4ed3e..87a02f0deb0001 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -663,10 +663,12 @@ int rxe_requester(struct rxe_qp *qp)
+ 	if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
+ 		wqe = __req_next_wqe(qp);
+ 		spin_unlock_irqrestore(&qp->state_lock, flags);
+-		if (wqe)
++		if (wqe) {
++			wqe->status = IB_WC_WR_FLUSH_ERR;
+ 			goto err;
+-		else
++		} else {
+ 			goto exit;
++		}
+ 	}
+ 
+ 	if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
+diff --git a/drivers/input/misc/cs40l50-vibra.c b/drivers/input/misc/cs40l50-vibra.c
+index 03bdb7c26ec09f..dce3b0ec8cf368 100644
+--- a/drivers/input/misc/cs40l50-vibra.c
++++ b/drivers/input/misc/cs40l50-vibra.c
+@@ -334,11 +334,12 @@ static int cs40l50_add(struct input_dev *dev, struct ff_effect *effect,
+ 	work_data.custom_len = effect->u.periodic.custom_len;
+ 	work_data.vib = vib;
+ 	work_data.effect = effect;
+-	INIT_WORK(&work_data.work, cs40l50_add_worker);
++	INIT_WORK_ONSTACK(&work_data.work, cs40l50_add_worker);
+ 
+ 	/* Push to the workqueue to serialize with playbacks */
+ 	queue_work(vib->vib_wq, &work_data.work);
+ 	flush_work(&work_data.work);
++	destroy_work_on_stack(&work_data.work);
+ 
+ 	kfree(work_data.custom_data);
+ 
+@@ -467,11 +468,12 @@ static int cs40l50_erase(struct input_dev *dev, int effect_id)
+ 	work_data.vib = vib;
+ 	work_data.effect = &dev->ff->effects[effect_id];
+ 
+-	INIT_WORK(&work_data.work, cs40l50_erase_worker);
++	INIT_WORK_ONSTACK(&work_data.work, cs40l50_erase_worker);
+ 
+ 	/* Push to workqueue to serialize with playbacks */
+ 	queue_work(vib->vib_wq, &work_data.work);
+ 	flush_work(&work_data.work);
++	destroy_work_on_stack(&work_data.work);
+ 
+ 	return work_data.error;
+ }
+diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
+index f49a8e0cb03c06..adacd6f7d6a8f7 100644
+--- a/drivers/interconnect/qcom/icc-rpmh.c
++++ b/drivers/interconnect/qcom/icc-rpmh.c
+@@ -311,6 +311,9 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		qp->num_clks = devm_clk_bulk_get_all(qp->dev, &qp->clks);
++		if (qp->num_clks == -EPROBE_DEFER)
++			return dev_err_probe(dev, qp->num_clks, "Failed to get QoS clocks\n");
++
+ 		if (qp->num_clks < 0 || (!qp->num_clks && desc->qos_clks_required)) {
+ 			dev_info(dev, "Skipping QoS, failed to get clk: %d\n", qp->num_clks);
+ 			goto skip_qos_config;
+diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
+index 25b9042fa45307..c616de2c5926ec 100644
+--- a/drivers/iommu/amd/io_pgtable_v2.c
++++ b/drivers/iommu/amd/io_pgtable_v2.c
+@@ -268,8 +268,11 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ out:
+ 	if (updated) {
+ 		struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
++		unsigned long flags;
+ 
++		spin_lock_irqsave(&pdom->lock, flags);
+ 		amd_iommu_domain_flush_pages(pdom, o_iova, size);
++		spin_unlock_irqrestore(&pdom->lock, flags);
+ 	}
+ 
+ 	if (mapped)
+diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+index fcd13d301fff68..6b479592140c47 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
++++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+@@ -509,7 +509,8 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
+ 
+ 	snprintf(name, 16, "vcmdq%u", vcmdq->idx);
+ 
+-	q->llq.max_n_shift = VCMDQ_LOG2SIZE_MAX;
++	/* Queue size, capped to ensure natural alignment */
++	q->llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, VCMDQ_LOG2SIZE_MAX);
+ 
+ 	/* Use the common helper to init the VCMDQ, and then... */
+ 	ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
+@@ -800,7 +801,7 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
+ 	return 0;
+ }
+ 
+-struct dentry *cmdqv_debugfs_dir;
++static struct dentry *cmdqv_debugfs_dir;
+ 
+ static struct arm_smmu_device *
+ __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index e860bc9439a283..a167d59101ae2e 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -707,14 +707,15 @@ static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
+ 	while (1) {
+ 		offset = pfn_level_offset(pfn, level);
+ 		pte = &parent[offset];
+-		if (!pte || (dma_pte_superpage(pte) || !dma_pte_present(pte))) {
+-			pr_info("PTE not present at level %d\n", level);
+-			break;
+-		}
+ 
+ 		pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val);
+ 
+-		if (level == 1)
++		if (!dma_pte_present(pte)) {
++			pr_info("page table not present at level %d\n", level - 1);
++			break;
++		}
++
++		if (level == 1 || dma_pte_superpage(pte))
+ 			break;
+ 
+ 		parent = phys_to_virt(dma_pte_addr(pte));
+@@ -737,11 +738,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ 	pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
+ 
+ 	/* root entry dump */
+-	rt_entry = &iommu->root_entry[bus];
+-	if (!rt_entry) {
+-		pr_info("root table entry is not present\n");
++	if (!iommu->root_entry) {
++		pr_info("root table is not present\n");
+ 		return;
+ 	}
++	rt_entry = &iommu->root_entry[bus];
+ 
+ 	if (sm_supported(iommu))
+ 		pr_info("scalable mode root entry: hi 0x%016llx, low 0x%016llx\n",
+@@ -752,7 +753,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ 	/* context entry dump */
+ 	ctx_entry = iommu_context_addr(iommu, bus, devfn, 0);
+ 	if (!ctx_entry) {
+-		pr_info("context table entry is not present\n");
++		pr_info("context table is not present\n");
+ 		return;
+ 	}
+ 
+@@ -761,17 +762,23 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ 
+ 	/* legacy mode does not require PASID entries */
+ 	if (!sm_supported(iommu)) {
++		if (!context_present(ctx_entry)) {
++			pr_info("legacy mode page table is not present\n");
++			return;
++		}
+ 		level = agaw_to_level(ctx_entry->hi & 7);
+ 		pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
+ 		goto pgtable_walk;
+ 	}
+ 
+-	/* get the pointer to pasid directory entry */
+-	dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
+-	if (!dir) {
+-		pr_info("pasid directory entry is not present\n");
++	if (!context_present(ctx_entry)) {
++		pr_info("pasid directory table is not present\n");
+ 		return;
+ 	}
++
++	/* get the pointer to pasid directory entry */
++	dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
++
+ 	/* For request-without-pasid, get the pasid from context entry */
+ 	if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID)
+ 		pasid = IOMMU_NO_PASID;
+@@ -783,7 +790,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ 	/* get the pointer to the pasid table entry */
+ 	entries = get_pasid_table_from_pde(pde);
+ 	if (!entries) {
+-		pr_info("pasid table entry is not present\n");
++		pr_info("pasid table is not present\n");
+ 		return;
+ 	}
+ 	index = pasid & PASID_PTE_MASK;
+@@ -791,6 +798,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ 	for (i = 0; i < ARRAY_SIZE(pte->val); i++)
+ 		pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]);
+ 
++	if (!pasid_pte_is_present(pte)) {
++		pr_info("scalable mode page table is not present\n");
++		return;
++	}
++
+ 	if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) {
+ 		level = pte->val[2] & BIT_ULL(2) ? 5 : 4;
+ 		pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK);
+diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
+index d8eaa7ea380bb0..fbdeded3d48b59 100644
+--- a/drivers/iommu/s390-iommu.c
++++ b/drivers/iommu/s390-iommu.c
+@@ -33,6 +33,8 @@ struct s390_domain {
+ 	struct rcu_head		rcu;
+ };
+ 
++static struct iommu_domain blocking_domain;
++
+ static inline unsigned int calc_rtx(dma_addr_t ptr)
+ {
+ 	return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
+@@ -369,20 +371,36 @@ static void s390_domain_free(struct iommu_domain *domain)
+ 	call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
+ }
+ 
+-static void s390_iommu_detach_device(struct iommu_domain *domain,
+-				     struct device *dev)
++static void zdev_s390_domain_update(struct zpci_dev *zdev,
++				    struct iommu_domain *domain)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&zdev->dom_lock, flags);
++	zdev->s390_domain = domain;
++	spin_unlock_irqrestore(&zdev->dom_lock, flags);
++}
++
++static int blocking_domain_attach_device(struct iommu_domain *domain,
++					 struct device *dev)
+ {
+-	struct s390_domain *s390_domain = to_s390_domain(domain);
+ 	struct zpci_dev *zdev = to_zpci_dev(dev);
++	struct s390_domain *s390_domain;
+ 	unsigned long flags;
+ 
++	if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
++		return 0;
++
++	s390_domain = to_s390_domain(zdev->s390_domain);
+ 	spin_lock_irqsave(&s390_domain->list_lock, flags);
+ 	list_del_rcu(&zdev->iommu_list);
+ 	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ 
+ 	zpci_unregister_ioat(zdev, 0);
+-	zdev->s390_domain = NULL;
+ 	zdev->dma_table = NULL;
++	zdev_s390_domain_update(zdev, domain);
++
++	return 0;
+ }
+ 
+ static int s390_iommu_attach_device(struct iommu_domain *domain,
+@@ -401,20 +419,15 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ 		domain->geometry.aperture_end < zdev->start_dma))
+ 		return -EINVAL;
+ 
+-	if (zdev->s390_domain)
+-		s390_iommu_detach_device(&zdev->s390_domain->domain, dev);
++	blocking_domain_attach_device(&blocking_domain, dev);
+ 
++	/* If we fail now DMA remains blocked via blocking domain */
+ 	cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+ 				virt_to_phys(s390_domain->dma_table), &status);
+-	/*
+-	 * If the device is undergoing error recovery the reset code
+-	 * will re-establish the new domain.
+-	 */
+ 	if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ 		return -EIO;
+-
+ 	zdev->dma_table = s390_domain->dma_table;
+-	zdev->s390_domain = s390_domain;
++	zdev_s390_domain_update(zdev, domain);
+ 
+ 	spin_lock_irqsave(&s390_domain->list_lock, flags);
+ 	list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
+@@ -466,19 +479,11 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
+ 	if (zdev->tlb_refresh)
+ 		dev->iommu->shadow_on_flush = 1;
+ 
+-	return &zdev->iommu_dev;
+-}
++	/* Start with DMA blocked */
++	spin_lock_init(&zdev->dom_lock);
++	zdev_s390_domain_update(zdev, &blocking_domain);
+ 
+-static void s390_iommu_release_device(struct device *dev)
+-{
+-	struct zpci_dev *zdev = to_zpci_dev(dev);
+-
+-	/*
+-	 * release_device is expected to detach any domain currently attached
+-	 * to the device, but keep it attached to other devices in the group.
+-	 */
+-	if (zdev)
+-		s390_iommu_detach_device(&zdev->s390_domain->domain, dev);
++	return &zdev->iommu_dev;
+ }
+ 
+ static int zpci_refresh_all(struct zpci_dev *zdev)
+@@ -697,9 +702,15 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
+ 
+ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
+ {
+-	if (!zdev || !zdev->s390_domain)
++	struct s390_domain *s390_domain;
++
++	lockdep_assert_held(&zdev->dom_lock);
++
++	if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
+ 		return NULL;
+-	return &zdev->s390_domain->ctrs;
++
++	s390_domain = to_s390_domain(zdev->s390_domain);
++	return &s390_domain->ctrs;
+ }
+ 
+ int zpci_init_iommu(struct zpci_dev *zdev)
+@@ -776,11 +787,19 @@ static int __init s390_iommu_init(void)
+ }
+ subsys_initcall(s390_iommu_init);
+ 
++static struct iommu_domain blocking_domain = {
++	.type = IOMMU_DOMAIN_BLOCKED,
++	.ops = &(const struct iommu_domain_ops) {
++		.attach_dev	= blocking_domain_attach_device,
++	}
++};
++
+ static const struct iommu_ops s390_iommu_ops = {
++	.blocked_domain		= &blocking_domain,
++	.release_domain		= &blocking_domain,
+ 	.capable = s390_iommu_capable,
+ 	.domain_alloc_paging = s390_domain_alloc_paging,
+ 	.probe_device = s390_iommu_probe_device,
+-	.release_device = s390_iommu_release_device,
+ 	.device_group = generic_device_group,
+ 	.pgsize_bitmap = SZ_4K,
+ 	.get_resv_regions = s390_iommu_get_resv_regions,
+diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
+index f8c70f2d100a11..065166ab5dbc04 100644
+--- a/drivers/irqchip/irq-mvebu-sei.c
++++ b/drivers/irqchip/irq-mvebu-sei.c
+@@ -192,7 +192,6 @@ static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
+ }
+ 
+ static const struct irq_domain_ops mvebu_sei_domain_ops = {
+-	.select	= msi_lib_irq_domain_select,
+ 	.alloc	= mvebu_sei_domain_alloc,
+ 	.free	= mvebu_sei_domain_free,
+ };
+@@ -306,6 +305,7 @@ static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
+ }
+ 
+ static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
++	.select	= msi_lib_irq_domain_select,
+ 	.alloc	= mvebu_sei_cp_domain_alloc,
+ 	.free	= mvebu_sei_cp_domain_free,
+ };
+diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
+index 900e72541db9e5..93e7c51f944abe 100644
+--- a/drivers/irqchip/irq-riscv-aplic-main.c
++++ b/drivers/irqchip/irq-riscv-aplic-main.c
+@@ -207,7 +207,8 @@ static int aplic_probe(struct platform_device *pdev)
+ 	else
+ 		rc = aplic_direct_setup(dev, regs);
+ 	if (rc)
+-		dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct");
++		dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n",
++			      msi_mode ? "MSI" : "direct");
+ 
+ #ifdef CONFIG_ACPI
+ 	if (!acpi_disabled)
+diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c
+index 945bff28265cdc..fb8d1838609fb5 100644
+--- a/drivers/irqchip/irq-riscv-aplic-msi.c
++++ b/drivers/irqchip/irq-riscv-aplic-msi.c
+@@ -266,6 +266,9 @@ int aplic_msi_setup(struct device *dev, void __iomem *regs)
+ 			if (msi_domain)
+ 				dev_set_msi_domain(dev, msi_domain);
+ 		}
++
++		if (!dev_get_msi_domain(dev))
++			return -EPROBE_DEFER;
+ 	}
+ 
+ 	if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template,
+diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c
+index 16a01a200c0b75..b92adf908793e5 100644
+--- a/drivers/leds/flash/leds-ktd2692.c
++++ b/drivers/leds/flash/leds-ktd2692.c
+@@ -292,6 +292,7 @@ static int ktd2692_probe(struct platform_device *pdev)
+ 
+ 	fled_cdev = &led->fled_cdev;
+ 	led_cdev = &fled_cdev->led_cdev;
++	led->props.timing = ktd2692_timing;
+ 
+ 	ret = ktd2692_parse_dt(led, &pdev->dev, &led_cfg);
+ 	if (ret)
+diff --git a/drivers/leds/leds-max5970.c b/drivers/leds/leds-max5970.c
+index 56a584311581af..285074c53b2344 100644
+--- a/drivers/leds/leds-max5970.c
++++ b/drivers/leds/leds-max5970.c
+@@ -45,7 +45,7 @@ static int max5970_led_set_brightness(struct led_classdev *cdev,
+ 
+ static int max5970_led_probe(struct platform_device *pdev)
+ {
+-	struct fwnode_handle *led_node, *child;
++	struct fwnode_handle *child;
+ 	struct device *dev = &pdev->dev;
+ 	struct regmap *regmap;
+ 	struct max5970_led *ddata;
+@@ -55,7 +55,8 @@ static int max5970_led_probe(struct platform_device *pdev)
+ 	if (!regmap)
+ 		return -ENODEV;
+ 
+-	led_node = device_get_named_child_node(dev->parent, "leds");
++	struct fwnode_handle *led_node __free(fwnode_handle) =
++		device_get_named_child_node(dev->parent, "leds");
+ 	if (!led_node)
+ 		return -ENODEV;
+ 
+diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
+index 0ec21dcdbde723..cff7c343ee082a 100644
+--- a/drivers/mailbox/arm_mhuv2.c
++++ b/drivers/mailbox/arm_mhuv2.c
+@@ -500,7 +500,7 @@ static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
+ static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
+ {
+ 	struct mbox_chan *chans = mhu->mbox.chans;
+-	int channel = 0, i, offset = 0, windows, protocol, ch_wn;
++	int channel = 0, i, j, offset = 0, windows, protocol, ch_wn;
+ 	u32 stat;
+ 
+ 	for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) {
+@@ -510,9 +510,9 @@ static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
+ 
+ 		ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat);
+ 
+-		for (i = 0; i < mhu->length; i += 2) {
+-			protocol = mhu->protocols[i];
+-			windows = mhu->protocols[i + 1];
++		for (j = 0; j < mhu->length; j += 2) {
++			protocol = mhu->protocols[j];
++			windows = mhu->protocols[j + 1];
+ 
+ 			if (ch_wn >= offset + windows) {
+ 				if (protocol == DOORBELL)
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index 4bff73532085bd..9c43ed9bdd37b5 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -584,7 +584,7 @@ static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq)
+ 	struct clk_bulk_data *clks;
+ 
+ 	cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num,
+-				    sizeof(cmdq->clocks), GFP_KERNEL);
++				    sizeof(*cmdq->clocks), GFP_KERNEL);
+ 	if (!cmdq->clocks)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
+index 6797770474a55d..680243751d625f 100644
+--- a/drivers/mailbox/omap-mailbox.c
++++ b/drivers/mailbox/omap-mailbox.c
+@@ -15,6 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/kfifo.h>
+ #include <linux/err.h>
++#include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index 272945a878b3ce..a3f4b4ad35aab9 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -1405,12 +1405,13 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
+ 	if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0,
+ 			(stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+ 			(stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+-			false, timings))
++			false, adv76xx_get_dv_timings_cap(sd, -1), timings))
+ 		return 0;
+ 	if (v4l2_detect_gtf(stdi->lcf + 1, hfreq, stdi->lcvs,
+ 			(stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+ 			(stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+-			false, state->aspect_ratio, timings))
++			false, state->aspect_ratio,
++			adv76xx_get_dv_timings_cap(sd, -1), timings))
+ 		return 0;
+ 
+ 	v4l2_dbg(2, debug, sd,
+diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
+index 014fc913225c4a..61ea7393066d77 100644
+--- a/drivers/media/i2c/adv7842.c
++++ b/drivers/media/i2c/adv7842.c
+@@ -1431,14 +1431,15 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
+ 	}
+ 
+ 	if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0,
+-			(stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+-			(stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+-			false, timings))
++			    (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
++			    (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
++			    false, adv7842_get_dv_timings_cap(sd), timings))
+ 		return 0;
+ 	if (v4l2_detect_gtf(stdi->lcf + 1, hfreq, stdi->lcvs,
+-			(stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+-			(stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+-			false, state->aspect_ratio, timings))
++			    (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
++			    (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
++			    false, state->aspect_ratio,
++			    adv7842_get_dv_timings_cap(sd), timings))
+ 		return 0;
+ 
+ 	v4l2_dbg(2, debug, sd,
+diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
+index ffe5f25f864762..58424d8f72af03 100644
+--- a/drivers/media/i2c/ds90ub960.c
++++ b/drivers/media/i2c/ds90ub960.c
+@@ -1286,7 +1286,7 @@ static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
+ 
+ 	clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK;
+ 
+-	ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v);
++	ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
+index 4e85b8eb1e7767..9259d58ba734ee 100644
+--- a/drivers/media/i2c/max96717.c
++++ b/drivers/media/i2c/max96717.c
+@@ -697,8 +697,10 @@ static int max96717_subdev_init(struct max96717_priv *priv)
+ 	priv->pads[MAX96717_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ 
+ 	ret = media_entity_pads_init(&priv->sd.entity, 2, priv->pads);
+-	if (ret)
+-		return dev_err_probe(dev, ret, "Failed to init pads\n");
++	if (ret) {
++		dev_err_probe(dev, ret, "Failed to init pads\n");
++		goto err_free_ctrl;
++	}
+ 
+ 	ret = v4l2_subdev_init_finalize(&priv->sd);
+ 	if (ret) {
+diff --git a/drivers/media/i2c/vgxy61.c b/drivers/media/i2c/vgxy61.c
+index 409d2d4ffb4bb2..d77468c8587bc4 100644
+--- a/drivers/media/i2c/vgxy61.c
++++ b/drivers/media/i2c/vgxy61.c
+@@ -1617,7 +1617,7 @@ static int vgxy61_detect(struct vgxy61_dev *sensor)
+ 
+ 	ret = cci_read(sensor->regmap, VGXY61_REG_NVM, &st, NULL);
+ 	if (ret < 0)
+-		return st;
++		return ret;
+ 	if (st != VGXY61_NVM_OK)
+ 		dev_warn(&client->dev, "Bad nvm state got %u\n", (u8)st);
+ 
+diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig
+index 49e4fb696573f6..a4537818a58c05 100644
+--- a/drivers/media/pci/intel/ipu6/Kconfig
++++ b/drivers/media/pci/intel/ipu6/Kconfig
+@@ -4,12 +4,6 @@ config VIDEO_INTEL_IPU6
+ 	depends on VIDEO_DEV
+ 	depends on X86 && X86_64 && HAS_DMA
+ 	depends on IPU_BRIDGE || !IPU_BRIDGE
+-	#
+-	# This driver incorrectly tries to override the dma_ops.  It should
+-	# never have done that, but for now keep it working on architectures
+-	# that use dma ops
+-	#
+-	depends on ARCH_HAS_DMA_OPS
+ 	select AUXILIARY_BUS
+ 	select IOMMU_IOVA
+ 	select VIDEO_V4L2_SUBDEV_API
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.c b/drivers/media/pci/intel/ipu6/ipu6-bus.c
+index 149ec098cdbfe1..37d88ddb6ee7cd 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-bus.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-bus.c
+@@ -94,8 +94,6 @@ ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
+ 	if (!adev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	adev->dma_mask = DMA_BIT_MASK(isp->secure_mode ? IPU6_MMU_ADDR_BITS :
+-				      IPU6_MMU_ADDR_BITS_NON_SECURE);
+ 	adev->isp = isp;
+ 	adev->ctrl = ctrl;
+ 	adev->pdata = pdata;
+@@ -106,10 +104,6 @@ ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
+ 
+ 	auxdev->dev.parent = parent;
+ 	auxdev->dev.release = ipu6_bus_release;
+-	auxdev->dev.dma_ops = &ipu6_dma_ops;
+-	auxdev->dev.dma_mask = &adev->dma_mask;
+-	auxdev->dev.dma_parms = pdev->dev.dma_parms;
+-	auxdev->dev.coherent_dma_mask = adev->dma_mask;
+ 
+ 	ret = auxiliary_device_init(auxdev);
+ 	if (ret < 0) {
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
+index e47f84c30e10d6..1ee63ef4a40b22 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-buttress.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
+@@ -24,6 +24,7 @@
+ 
+ #include "ipu6.h"
+ #include "ipu6-bus.h"
++#include "ipu6-dma.h"
+ #include "ipu6-buttress.h"
+ #include "ipu6-platform-buttress-regs.h"
+ 
+@@ -345,12 +346,16 @@ irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr)
+ 	u32 disable_irqs = 0;
+ 	u32 irq_status;
+ 	u32 i, count = 0;
++	int active;
+ 
+-	pm_runtime_get_noresume(&isp->pdev->dev);
++	active = pm_runtime_get_if_active(&isp->pdev->dev);
++	if (!active)
++		return IRQ_NONE;
+ 
+ 	irq_status = readl(isp->base + reg_irq_sts);
+-	if (!irq_status) {
+-		pm_runtime_put_noidle(&isp->pdev->dev);
++	if (irq_status == 0 || WARN_ON_ONCE(irq_status == 0xffffffffu)) {
++		if (active > 0)
++			pm_runtime_put_noidle(&isp->pdev->dev);
+ 		return IRQ_NONE;
+ 	}
+ 
+@@ -426,7 +431,8 @@ irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr)
+ 		writel(BUTTRESS_IRQS & ~disable_irqs,
+ 		       isp->base + BUTTRESS_REG_ISR_ENABLE);
+ 
+-	pm_runtime_put(&isp->pdev->dev);
++	if (active > 0)
++		pm_runtime_put(&isp->pdev->dev);
+ 
+ 	return ret;
+ }
+@@ -553,6 +559,7 @@ int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
+ 			       const struct firmware *fw, struct sg_table *sgt)
+ {
+ 	bool is_vmalloc = is_vmalloc_addr(fw->data);
++	struct pci_dev *pdev = sys->isp->pdev;
+ 	struct page **pages;
+ 	const void *addr;
+ 	unsigned long n_pages;
+@@ -588,14 +595,20 @@ int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
+ 		goto out;
+ 	}
+ 
+-	ret = dma_map_sgtable(&sys->auxdev.dev, sgt, DMA_TO_DEVICE, 0);
+-	if (ret < 0) {
+-		ret = -ENOMEM;
++	ret = dma_map_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0);
++	if (ret) {
+ 		sg_free_table(sgt);
+ 		goto out;
+ 	}
+ 
+-	dma_sync_sgtable_for_device(&sys->auxdev.dev, sgt, DMA_TO_DEVICE);
++	ret = ipu6_dma_map_sgtable(sys, sgt, DMA_TO_DEVICE, 0);
++	if (ret) {
++		dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0);
++		sg_free_table(sgt);
++		goto out;
++	}
++
++	ipu6_dma_sync_sgtable(sys, sgt);
+ 
+ out:
+ 	kfree(pages);
+@@ -607,7 +620,10 @@ EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, INTEL_IPU6);
+ void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
+ 				  struct sg_table *sgt)
+ {
+-	dma_unmap_sgtable(&sys->auxdev.dev, sgt, DMA_TO_DEVICE, 0);
++	struct pci_dev *pdev = sys->isp->pdev;
++
++	ipu6_dma_unmap_sgtable(sys, sgt, DMA_TO_DEVICE, 0);
++	dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0);
+ 	sg_free_table(sgt);
+ }
+ EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, INTEL_IPU6);
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
+index 715b21ab4b8e98..21c1c128a7eaa5 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-cpd.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
+@@ -15,6 +15,7 @@
+ #include "ipu6.h"
+ #include "ipu6-bus.h"
+ #include "ipu6-cpd.h"
++#include "ipu6-dma.h"
+ 
+ /* 15 entries + header*/
+ #define MAX_PKG_DIR_ENT_CNT		16
+@@ -162,7 +163,6 @@ int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src)
+ {
+ 	dma_addr_t dma_addr_src = sg_dma_address(adev->fw_sgt.sgl);
+ 	const struct ipu6_cpd_ent *ent, *man_ent, *met_ent;
+-	struct device *dev = &adev->auxdev.dev;
+ 	struct ipu6_device *isp = adev->isp;
+ 	unsigned int man_sz, met_sz;
+ 	void *pkg_dir_pos;
+@@ -175,8 +175,8 @@ int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src)
+ 	met_sz = met_ent->len;
+ 
+ 	adev->pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz;
+-	adev->pkg_dir = dma_alloc_attrs(dev, adev->pkg_dir_size,
+-					&adev->pkg_dir_dma_addr, GFP_KERNEL, 0);
++	adev->pkg_dir = ipu6_dma_alloc(adev, adev->pkg_dir_size,
++				       &adev->pkg_dir_dma_addr, GFP_KERNEL, 0);
+ 	if (!adev->pkg_dir)
+ 		return -ENOMEM;
+ 
+@@ -198,8 +198,8 @@ int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src)
+ 					 met_ent->len);
+ 	if (ret) {
+ 		dev_err(&isp->pdev->dev, "Failed to parse module data\n");
+-		dma_free_attrs(dev, adev->pkg_dir_size,
+-			       adev->pkg_dir, adev->pkg_dir_dma_addr, 0);
++		ipu6_dma_free(adev, adev->pkg_dir_size,
++			      adev->pkg_dir, adev->pkg_dir_dma_addr, 0);
+ 		return ret;
+ 	}
+ 
+@@ -211,8 +211,8 @@ int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src)
+ 	pkg_dir_pos += man_sz;
+ 	memcpy(pkg_dir_pos, src + met_ent->offset, met_sz);
+ 
+-	dma_sync_single_range_for_device(dev, adev->pkg_dir_dma_addr,
+-					 0, adev->pkg_dir_size, DMA_TO_DEVICE);
++	ipu6_dma_sync_single(adev, adev->pkg_dir_dma_addr,
++			     adev->pkg_dir_size);
+ 
+ 	return 0;
+ }
+@@ -220,8 +220,8 @@ EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, INTEL_IPU6);
+ 
+ void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev)
+ {
+-	dma_free_attrs(&adev->auxdev.dev, adev->pkg_dir_size, adev->pkg_dir,
+-		       adev->pkg_dir_dma_addr, 0);
++	ipu6_dma_free(adev, adev->pkg_dir_size, adev->pkg_dir,
++		      adev->pkg_dir_dma_addr, 0);
+ }
+ EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, INTEL_IPU6);
+ 
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c
+index 92530a1cc90f51..b71f66bd8c1fdb 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-dma.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c
+@@ -39,8 +39,7 @@ static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova)
+ 	return NULL;
+ }
+ 
+-static void __dma_clear_buffer(struct page *page, size_t size,
+-			       unsigned long attrs)
++static void __clear_buffer(struct page *page, size_t size, unsigned long attrs)
+ {
+ 	void *ptr;
+ 
+@@ -56,8 +55,7 @@ static void __dma_clear_buffer(struct page *page, size_t size,
+ 		clflush_cache_range(ptr, size);
+ }
+ 
+-static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
+-					gfp_t gfp, unsigned long attrs)
++static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs)
+ {
+ 	int count = PHYS_PFN(size);
+ 	int array_size = count * sizeof(struct page *);
+@@ -86,7 +84,7 @@ static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
+ 				pages[i + j] = pages[i] + j;
+ 		}
+ 
+-		__dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
++		__clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ 		i += 1 << order;
+ 		count -= 1 << order;
+ 	}
+@@ -100,29 +98,26 @@ static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
+ 	return NULL;
+ }
+ 
+-static void __dma_free_buffer(struct device *dev, struct page **pages,
+-			      size_t size, unsigned long attrs)
++static void __free_buffer(struct page **pages, size_t size, unsigned long attrs)
+ {
+ 	int count = PHYS_PFN(size);
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < count && pages[i]; i++) {
+-		__dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
++		__clear_buffer(pages[i], PAGE_SIZE, attrs);
+ 		__free_pages(pages[i], 0);
+ 	}
+ 
+ 	kvfree(pages);
+ }
+ 
+-static void ipu6_dma_sync_single_for_cpu(struct device *dev,
+-					 dma_addr_t dma_handle,
+-					 size_t size,
+-					 enum dma_data_direction dir)
++void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle,
++			  size_t size)
+ {
+ 	void *vaddr;
+ 	u32 offset;
+ 	struct vm_info *info;
+-	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
++	struct ipu6_mmu *mmu = sys->mmu;
+ 
+ 	info = get_vm_info(mmu, dma_handle);
+ 	if (WARN_ON(!info))
+@@ -135,10 +130,10 @@ static void ipu6_dma_sync_single_for_cpu(struct device *dev,
+ 	vaddr = info->vaddr + offset;
+ 	clflush_cache_range(vaddr, size);
+ }
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, INTEL_IPU6);
+ 
+-static void ipu6_dma_sync_sg_for_cpu(struct device *dev,
+-				     struct scatterlist *sglist,
+-				     int nents, enum dma_data_direction dir)
++void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
++		      int nents)
+ {
+ 	struct scatterlist *sg;
+ 	int i;
+@@ -146,14 +141,22 @@ static void ipu6_dma_sync_sg_for_cpu(struct device *dev,
+ 	for_each_sg(sglist, sg, nents, i)
+ 		clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
+ }
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, INTEL_IPU6);
+ 
+-static void *ipu6_dma_alloc(struct device *dev, size_t size,
+-			    dma_addr_t *dma_handle, gfp_t gfp,
+-			    unsigned long attrs)
++void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt)
+ {
+-	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+-	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
++	ipu6_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents);
++}
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, INTEL_IPU6);
++
++void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size,
++		     dma_addr_t *dma_handle, gfp_t gfp,
++		     unsigned long attrs)
++{
++	struct device *dev = &sys->auxdev.dev;
++	struct pci_dev *pdev = sys->isp->pdev;
+ 	dma_addr_t pci_dma_addr, ipu6_iova;
++	struct ipu6_mmu *mmu = sys->mmu;
+ 	struct vm_info *info;
+ 	unsigned long count;
+ 	struct page **pages;
+@@ -173,7 +176,7 @@ static void *ipu6_dma_alloc(struct device *dev, size_t size,
+ 	if (!iova)
+ 		goto out_kfree;
+ 
+-	pages = __dma_alloc_buffer(dev, size, gfp, attrs);
++	pages = __alloc_buffer(size, gfp, attrs);
+ 	if (!pages)
+ 		goto out_free_iova;
+ 
+@@ -227,7 +230,7 @@ static void *ipu6_dma_alloc(struct device *dev, size_t size,
+ 		ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE);
+ 	}
+ 
+-	__dma_free_buffer(dev, pages, size, attrs);
++	__free_buffer(pages, size, attrs);
+ 
+ out_free_iova:
+ 	__free_iova(&mmu->dmap->iovad, iova);
+@@ -236,13 +239,13 @@ static void *ipu6_dma_alloc(struct device *dev, size_t size,
+ 
+ 	return NULL;
+ }
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, INTEL_IPU6);
+ 
+-static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr,
+-			  dma_addr_t dma_handle,
+-			  unsigned long attrs)
++void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr,
++		   dma_addr_t dma_handle, unsigned long attrs)
+ {
+-	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+-	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
++	struct ipu6_mmu *mmu = sys->mmu;
++	struct pci_dev *pdev = sys->isp->pdev;
+ 	struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle));
+ 	dma_addr_t pci_dma_addr, ipu6_iova;
+ 	struct vm_info *info;
+@@ -281,7 +284,7 @@ static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr,
+ 	ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+ 		       PFN_PHYS(iova_size(iova)));
+ 
+-	__dma_free_buffer(dev, pages, size, attrs);
++	__free_buffer(pages, size, attrs);
+ 
+ 	mmu->tlb_invalidate(mmu);
+ 
+@@ -289,13 +292,14 @@ static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr,
+ 
+ 	kfree(info);
+ }
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, INTEL_IPU6);
+ 
+-static int ipu6_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+-			 void *addr, dma_addr_t iova, size_t size,
+-			 unsigned long attrs)
++int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma,
++		  void *addr, dma_addr_t iova, size_t size,
++		  unsigned long attrs)
+ {
+-	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+-	size_t count = PHYS_PFN(PAGE_ALIGN(size));
++	struct ipu6_mmu *mmu = sys->mmu;
++	size_t count = PFN_UP(size);
+ 	struct vm_info *info;
+ 	size_t i;
+ 	int ret;
+@@ -323,18 +327,17 @@ static int ipu6_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ 	return 0;
+ }
+ 
+-static void ipu6_dma_unmap_sg(struct device *dev,
+-			      struct scatterlist *sglist,
+-			      int nents, enum dma_data_direction dir,
+-			      unsigned long attrs)
++void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
++		       int nents, enum dma_data_direction dir,
++		       unsigned long attrs)
+ {
+-	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+-	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
++	struct device *dev = &sys->auxdev.dev;
++	struct ipu6_mmu *mmu = sys->mmu;
+ 	struct iova *iova = find_iova(&mmu->dmap->iovad,
+ 				      PHYS_PFN(sg_dma_address(sglist)));
+-	int i, npages, count;
+ 	struct scatterlist *sg;
+ 	dma_addr_t pci_dma_addr;
++	unsigned int i;
+ 
+ 	if (!nents)
+ 		return;
+@@ -342,31 +345,15 @@ static void ipu6_dma_unmap_sg(struct device *dev,
+ 	if (WARN_ON(!iova))
+ 		return;
+ 
+-	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+-		ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+-
+-	/* get the nents as orig_nents given by caller */
+-	count = 0;
+-	npages = iova_size(iova);
+-	for_each_sg(sglist, sg, nents, i) {
+-		if (sg_dma_len(sg) == 0 ||
+-		    sg_dma_address(sg) == DMA_MAPPING_ERROR)
+-			break;
+-
+-		npages -= PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+-		count++;
+-		if (npages <= 0)
+-			break;
+-	}
+-
+ 	/*
+ 	 * Before IPU6 mmu unmap, return the pci dma address back to sg
+ 	 * assume the nents is less than orig_nents as the least granule
+ 	 * is 1 SZ_4K page
+ 	 */
+-	dev_dbg(dev, "trying to unmap concatenated %u ents\n", count);
+-	for_each_sg(sglist, sg, count, i) {
+-		dev_dbg(dev, "ipu unmap sg[%d] %pad\n", i, &sg_dma_address(sg));
++	dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents);
++	for_each_sg(sglist, sg, nents, i) {
++		dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i,
++			&sg_dma_address(sg), sg_dma_len(sg));
+ 		pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ 						     sg_dma_address(sg));
+ 		dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n",
+@@ -380,23 +367,21 @@ static void ipu6_dma_unmap_sg(struct device *dev,
+ 		       PFN_PHYS(iova_size(iova)));
+ 
+ 	mmu->tlb_invalidate(mmu);
+-
+-	dma_unmap_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
+-
+ 	__free_iova(&mmu->dmap->iovad, iova);
+ }
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, INTEL_IPU6);
+ 
+-static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+-			   int nents, enum dma_data_direction dir,
+-			   unsigned long attrs)
++int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
++		    int nents, enum dma_data_direction dir,
++		    unsigned long attrs)
+ {
+-	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+-	struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
++	struct device *dev = &sys->auxdev.dev;
++	struct ipu6_mmu *mmu = sys->mmu;
+ 	struct scatterlist *sg;
+ 	struct iova *iova;
+ 	size_t npages = 0;
+ 	unsigned long iova_addr;
+-	int i, count;
++	int i;
+ 
+ 	for_each_sg(sglist, sg, nents, i) {
+ 		if (sg->offset) {
+@@ -406,18 +391,12 @@ static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ 		}
+ 	}
+ 
+-	dev_dbg(dev, "pci_dma_map_sg trying to map %d ents\n", nents);
+-	count  = dma_map_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
+-	if (count <= 0) {
+-		dev_err(dev, "pci_dma_map_sg %d ents failed\n", nents);
+-		return 0;
+-	}
+-
+-	dev_dbg(dev, "pci_dma_map_sg %d ents mapped\n", count);
+-
+-	for_each_sg(sglist, sg, count, i)
++	for_each_sg(sglist, sg, nents, i)
+ 		npages += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+ 
++	dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n",
++		nents, npages);
++
+ 	iova = alloc_iova(&mmu->dmap->iovad, npages,
+ 			  PHYS_PFN(dma_get_mask(dev)), 0);
+ 	if (!iova)
+@@ -427,12 +406,13 @@ static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ 		iova->pfn_hi);
+ 
+ 	iova_addr = iova->pfn_lo;
+-	for_each_sg(sglist, sg, count, i) {
++	for_each_sg(sglist, sg, nents, i) {
++		phys_addr_t iova_pa;
+ 		int ret;
+ 
+-		dev_dbg(dev, "mapping entry %d: iova 0x%llx phy %pad size %d\n",
+-			i, PFN_PHYS(iova_addr), &sg_dma_address(sg),
+-			sg_dma_len(sg));
++		iova_pa = PFN_PHYS(iova_addr);
++		dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n",
++			i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg));
+ 
+ 		ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
+ 				   sg_dma_address(sg),
+@@ -445,25 +425,48 @@ static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ 		iova_addr += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg)));
+ 	}
+ 
+-	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+-		ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
++	dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages);
+ 
+-	return count;
++	return nents;
+ 
+ out_fail:
+-	ipu6_dma_unmap_sg(dev, sglist, i, dir, attrs);
++	ipu6_dma_unmap_sg(sys, sglist, i, dir, attrs);
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, INTEL_IPU6);
++
++int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
++			 enum dma_data_direction dir, unsigned long attrs)
++{
++	int nents;
++
++	nents = ipu6_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs);
++	if (nents < 0)
++		return nents;
++
++	sgt->nents = nents;
++
++	return 0;
++}
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, INTEL_IPU6);
++
++void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
++			    enum dma_data_direction dir, unsigned long attrs)
++{
++	ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs);
++}
++EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, INTEL_IPU6);
+ 
+ /*
+  * Create scatter-list for the already allocated DMA buffer
+  */
+-static int ipu6_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+-				void *cpu_addr, dma_addr_t handle, size_t size,
+-				unsigned long attrs)
++int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
++			 void *cpu_addr, dma_addr_t handle, size_t size,
++			 unsigned long attrs)
+ {
+-	struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
++	struct device *dev = &sys->auxdev.dev;
++	struct ipu6_mmu *mmu = sys->mmu;
+ 	struct vm_info *info;
+ 	int n_pages;
+ 	int ret = 0;
+@@ -483,20 +486,7 @@ static int ipu6_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ 	ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
+ 					GFP_KERNEL);
+ 	if (ret)
+-		dev_warn(dev, "IPU6 get sgt table failed\n");
++		dev_warn(dev, "get sgt table failed\n");
+ 
+ 	return ret;
+ }
+-
+-const struct dma_map_ops ipu6_dma_ops = {
+-	.alloc = ipu6_dma_alloc,
+-	.free = ipu6_dma_free,
+-	.mmap = ipu6_dma_mmap,
+-	.map_sg = ipu6_dma_map_sg,
+-	.unmap_sg = ipu6_dma_unmap_sg,
+-	.sync_single_for_cpu = ipu6_dma_sync_single_for_cpu,
+-	.sync_single_for_device = ipu6_dma_sync_single_for_cpu,
+-	.sync_sg_for_cpu = ipu6_dma_sync_sg_for_cpu,
+-	.sync_sg_for_device = ipu6_dma_sync_sg_for_cpu,
+-	.get_sgtable = ipu6_dma_get_sgtable,
+-};
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.h b/drivers/media/pci/intel/ipu6/ipu6-dma.h
+index 847ea5b7c925c3..b51244add9e611 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-dma.h
++++ b/drivers/media/pci/intel/ipu6/ipu6-dma.h
+@@ -5,7 +5,13 @@
+ #define IPU6_DMA_H
+ 
+ #include <linux/dma-map-ops.h>
++#include <linux/dma-mapping.h>
+ #include <linux/iova.h>
++#include <linux/iova.h>
++#include <linux/scatterlist.h>
++#include <linux/types.h>
++
++#include "ipu6-bus.h"
+ 
+ struct ipu6_mmu_info;
+ 
+@@ -14,6 +20,30 @@ struct ipu6_dma_mapping {
+ 	struct iova_domain iovad;
+ };
+ 
+-extern const struct dma_map_ops ipu6_dma_ops;
+-
++void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle,
++			  size_t size);
++void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
++		      int nents);
++void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt);
++void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size,
++		     dma_addr_t *dma_handle, gfp_t gfp,
++		     unsigned long attrs);
++void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr,
++		   dma_addr_t dma_handle, unsigned long attrs);
++int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma,
++		  void *addr, dma_addr_t iova, size_t size,
++		  unsigned long attrs);
++int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
++		    int nents, enum dma_data_direction dir,
++		    unsigned long attrs);
++void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
++		       int nents, enum dma_data_direction dir,
++		       unsigned long attrs);
++int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
++			 enum dma_data_direction dir, unsigned long attrs);
++void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
++			    enum dma_data_direction dir, unsigned long attrs);
++int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
++			 void *cpu_addr, dma_addr_t handle, size_t size,
++			 unsigned long attrs);
+ #endif /* IPU6_DMA_H */
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-com.c b/drivers/media/pci/intel/ipu6/ipu6-fw-com.c
+index 0b33fe9e703dcb..7d3d9314cb306b 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-fw-com.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-fw-com.c
+@@ -12,6 +12,7 @@
+ #include <linux/types.h>
+ 
+ #include "ipu6-bus.h"
++#include "ipu6-dma.h"
+ #include "ipu6-fw-com.h"
+ 
+ /*
+@@ -88,7 +89,6 @@ struct ipu6_fw_com_context {
+ 	void *dma_buffer;
+ 	dma_addr_t dma_addr;
+ 	unsigned int dma_size;
+-	unsigned long attrs;
+ 
+ 	struct ipu6_fw_sys_queue *input_queue;	/* array of host to SP queues */
+ 	struct ipu6_fw_sys_queue *output_queue;	/* array of SP to host */
+@@ -164,7 +164,6 @@ void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg,
+ 	struct ipu6_fw_com_context *ctx;
+ 	struct device *dev = &adev->auxdev.dev;
+ 	size_t sizeall, offset;
+-	unsigned long attrs = 0;
+ 	void *specific_host_addr;
+ 	unsigned int i;
+ 
+@@ -206,9 +205,8 @@ void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg,
+ 
+ 	sizeall += sizeinput + sizeoutput;
+ 
+-	ctx->dma_buffer = dma_alloc_attrs(dev, sizeall, &ctx->dma_addr,
+-					  GFP_KERNEL, attrs);
+-	ctx->attrs = attrs;
++	ctx->dma_buffer = ipu6_dma_alloc(adev, sizeall, &ctx->dma_addr,
++					 GFP_KERNEL, 0);
+ 	if (!ctx->dma_buffer) {
+ 		dev_err(dev, "failed to allocate dma memory\n");
+ 		kfree(ctx);
+@@ -239,6 +237,8 @@ void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg,
+ 		memcpy(specific_host_addr, cfg->specific_addr,
+ 		       cfg->specific_size);
+ 
++	ipu6_dma_sync_single(adev, ctx->config_vied_addr, sizeall);
++
+ 	/* initialize input queues */
+ 	offset += specific_size;
+ 	res.reg = SYSCOM_QPR_BASE_REG;
+@@ -315,8 +315,8 @@ int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force)
+ 	if (!force && !ctx->cell_ready(ctx->adev))
+ 		return -EBUSY;
+ 
+-	dma_free_attrs(&ctx->adev->auxdev.dev, ctx->dma_size,
+-		       ctx->dma_buffer, ctx->dma_addr, ctx->attrs);
++	ipu6_dma_free(ctx->adev, ctx->dma_size,
++		      ctx->dma_buffer, ctx->dma_addr, 0);
+ 	kfree(ctx);
+ 	return 0;
+ }
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
+index c3a20507d6dbcc..57298ac73d0722 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-mmu.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
+@@ -97,13 +97,15 @@ static void page_table_dump(struct ipu6_mmu_info *mmu_info)
+ 	for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ 		u32 l2_idx;
+ 		u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
++		phys_addr_t l2_phys;
+ 
+ 		if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
+ 			continue;
++
++		l2_phys = TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx];)
+ 		dev_dbg(mmu_info->dev,
+-			"l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pa\n",
+-			l1_idx, iova, iova + ISP_PAGE_SIZE,
+-			TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]));
++			"l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n",
++			l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys);
+ 
+ 		for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ 			u32 *l2_pt = mmu_info->l2_pts[l1_idx];
+@@ -227,7 +229,7 @@ static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info)
+ 	}
+ 
+ 	mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
+-	dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma);
++	dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma);
+ 
+ 	return pt;
+ 
+@@ -330,8 +332,8 @@ static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ 	u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
+ 
+ 	dev_dbg(mmu_info->dev,
+-		"mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
+-		iova_start, iova_end, size, paddr);
++		"mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n",
++		iova_start, iova_end, size, &paddr);
+ 
+ 	return l2_map(mmu_info, iova_start, paddr, size);
+ }
+@@ -361,10 +363,13 @@ static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ 	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+ 	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
+ 		     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
++		phys_addr_t pteval;
++
+ 		l2_pt = mmu_info->l2_pts[l1_idx];
++		pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
+ 		dev_dbg(mmu_info->dev,
+-			"unmap l2 index %u with pteval 0x%10.10llx\n",
+-			l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
++			"unmap l2 index %u with pteval 0x%p\n",
++			l2_idx, &pteval);
+ 		l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+ 
+ 		clflush_cache_range((void *)&l2_pt[l2_idx],
+@@ -525,9 +530,10 @@ static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp)
+ 		return NULL;
+ 
+ 	mmu_info->aperture_start = 0;
+-	mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
+-					      IPU6_MMU_ADDR_BITS :
+-					      IPU6_MMU_ADDR_BITS_NON_SECURE);
++	mmu_info->aperture_end =
++		(dma_addr_t)DMA_BIT_MASK(isp->secure_mode ?
++					 IPU6_MMU_ADDR_BITS :
++					 IPU6_MMU_ADDR_BITS_NON_SECURE);
+ 	mmu_info->pgsize_bitmap = SZ_4K;
+ 	mmu_info->dev = &isp->pdev->dev;
+ 
+diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
+index 7fb707d3530967..91718eabd74e57 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6.c
++++ b/drivers/media/pci/intel/ipu6/ipu6.c
+@@ -752,6 +752,9 @@ static void ipu6_pci_reset_done(struct pci_dev *pdev)
+  */
+ static int ipu6_suspend(struct device *dev)
+ {
++	struct pci_dev *pdev = to_pci_dev(dev);
++
++	synchronize_irq(pdev->irq);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
+index 3d36f323a8f8f7..4d032436691c1b 100644
+--- a/drivers/media/radio/wl128x/fmdrv_common.c
++++ b/drivers/media/radio/wl128x/fmdrv_common.c
+@@ -466,11 +466,12 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
+ 			   jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
+ 		return -ETIMEDOUT;
+ 	}
++	spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
+ 	if (!fmdev->resp_skb) {
++		spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
+ 		fmerr("Response SKB is missing\n");
+ 		return -EFAULT;
+ 	}
+-	spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
+ 	skb = fmdev->resp_skb;
+ 	fmdev->resp_skb = NULL;
+ 	spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index 6a790ac8cbe689..f25e011153642e 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -1459,12 +1459,19 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
+ 	h_freq = (u32)bt->pixelclock / total_h_pixel;
+ 
+ 	if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
++		struct v4l2_dv_timings cvt = {};
++
+ 		if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
+-				    bt->polarities, bt->interlaced, timings))
++				    bt->polarities, bt->interlaced,
++				    &vivid_dv_timings_cap, &cvt) &&
++		    cvt.bt.width == bt->width && cvt.bt.height == bt->height) {
++			*timings = cvt;
+ 			return true;
++		}
+ 	}
+ 
+ 	if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
++		struct v4l2_dv_timings gtf = {};
+ 		struct v4l2_fract aspect_ratio;
+ 
+ 		find_aspect_ratio(bt->width, bt->height,
+@@ -1472,8 +1479,12 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
+ 				  &aspect_ratio.denominator);
+ 		if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
+ 				    bt->polarities, bt->interlaced,
+-				    aspect_ratio, timings))
++				    aspect_ratio, &vivid_dv_timings_cap,
++				    &gtf) &&
++		    gtf.bt.width == bt->width && gtf.bt.height == bt->height) {
++			*timings = gtf;
+ 			return true;
++		}
+ 	}
+ 	return false;
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
+index 942d0005c55e82..2cf5dcee0ce800 100644
+--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
+@@ -481,25 +481,28 @@ EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe);
+  * @polarities - the horizontal and vertical polarities (same as struct
+  *		v4l2_bt_timings polarities).
+  * @interlaced - if this flag is true, it indicates interlaced format
+- * @fmt - the resulting timings.
++ * @cap - the v4l2_dv_timings_cap capabilities.
++ * @timings - the resulting timings.
+  *
+  * This function will attempt to detect if the given values correspond to a
+  * valid CVT format. If so, then it will return true, and fmt will be filled
+  * in with the found CVT timings.
+  */
+-bool v4l2_detect_cvt(unsigned frame_height,
+-		     unsigned hfreq,
+-		     unsigned vsync,
+-		     unsigned active_width,
++bool v4l2_detect_cvt(unsigned int frame_height,
++		     unsigned int hfreq,
++		     unsigned int vsync,
++		     unsigned int active_width,
+ 		     u32 polarities,
+ 		     bool interlaced,
+-		     struct v4l2_dv_timings *fmt)
++		     const struct v4l2_dv_timings_cap *cap,
++		     struct v4l2_dv_timings *timings)
+ {
+-	int  v_fp, v_bp, h_fp, h_bp, hsync;
+-	int  frame_width, image_height, image_width;
++	struct v4l2_dv_timings t = {};
++	int v_fp, v_bp, h_fp, h_bp, hsync;
++	int frame_width, image_height, image_width;
+ 	bool reduced_blanking;
+ 	bool rb_v2 = false;
+-	unsigned pix_clk;
++	unsigned int pix_clk;
+ 
+ 	if (vsync < 4 || vsync > 8)
+ 		return false;
+@@ -625,36 +628,39 @@ bool v4l2_detect_cvt(unsigned frame_height,
+ 		h_fp = h_blank - hsync - h_bp;
+ 	}
+ 
+-	fmt->type = V4L2_DV_BT_656_1120;
+-	fmt->bt.polarities = polarities;
+-	fmt->bt.width = image_width;
+-	fmt->bt.height = image_height;
+-	fmt->bt.hfrontporch = h_fp;
+-	fmt->bt.vfrontporch = v_fp;
+-	fmt->bt.hsync = hsync;
+-	fmt->bt.vsync = vsync;
+-	fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
++	t.type = V4L2_DV_BT_656_1120;
++	t.bt.polarities = polarities;
++	t.bt.width = image_width;
++	t.bt.height = image_height;
++	t.bt.hfrontporch = h_fp;
++	t.bt.vfrontporch = v_fp;
++	t.bt.hsync = hsync;
++	t.bt.vsync = vsync;
++	t.bt.hbackporch = frame_width - image_width - h_fp - hsync;
+ 
+ 	if (!interlaced) {
+-		fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+-		fmt->bt.interlaced = V4L2_DV_PROGRESSIVE;
++		t.bt.vbackporch = frame_height - image_height - v_fp - vsync;
++		t.bt.interlaced = V4L2_DV_PROGRESSIVE;
+ 	} else {
+-		fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp -
++		t.bt.vbackporch = (frame_height - image_height - 2 * v_fp -
+ 				      2 * vsync) / 2;
+-		fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
+-					2 * vsync - fmt->bt.vbackporch;
+-		fmt->bt.il_vfrontporch = v_fp;
+-		fmt->bt.il_vsync = vsync;
+-		fmt->bt.flags |= V4L2_DV_FL_HALF_LINE;
+-		fmt->bt.interlaced = V4L2_DV_INTERLACED;
++		t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
++					2 * vsync - t.bt.vbackporch;
++		t.bt.il_vfrontporch = v_fp;
++		t.bt.il_vsync = vsync;
++		t.bt.flags |= V4L2_DV_FL_HALF_LINE;
++		t.bt.interlaced = V4L2_DV_INTERLACED;
+ 	}
+ 
+-	fmt->bt.pixelclock = pix_clk;
+-	fmt->bt.standards = V4L2_DV_BT_STD_CVT;
++	t.bt.pixelclock = pix_clk;
++	t.bt.standards = V4L2_DV_BT_STD_CVT;
+ 
+ 	if (reduced_blanking)
+-		fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
++		t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+ 
++	if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL))
++		return false;
++	*timings = t;
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+@@ -699,22 +705,25 @@ EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+  *		image height, so it has to be passed explicitly. Usually
+  *		the native screen aspect ratio is used for this. If it
+  *		is not filled in correctly, then 16:9 will be assumed.
+- * @fmt - the resulting timings.
++ * @cap - the v4l2_dv_timings_cap capabilities.
++ * @timings - the resulting timings.
+  *
+  * This function will attempt to detect if the given values correspond to a
+  * valid GTF format. If so, then it will return true, and fmt will be filled
+  * in with the found GTF timings.
+  */
+-bool v4l2_detect_gtf(unsigned frame_height,
+-		unsigned hfreq,
+-		unsigned vsync,
+-		u32 polarities,
+-		bool interlaced,
+-		struct v4l2_fract aspect,
+-		struct v4l2_dv_timings *fmt)
++bool v4l2_detect_gtf(unsigned int frame_height,
++		     unsigned int hfreq,
++		     unsigned int vsync,
++		     u32 polarities,
++		     bool interlaced,
++		     struct v4l2_fract aspect,
++		     const struct v4l2_dv_timings_cap *cap,
++		     struct v4l2_dv_timings *timings)
+ {
++	struct v4l2_dv_timings t = {};
+ 	int pix_clk;
+-	int  v_fp, v_bp, h_fp, hsync;
++	int v_fp, v_bp, h_fp, hsync;
+ 	int frame_width, image_height, image_width;
+ 	bool default_gtf;
+ 	int h_blank;
+@@ -783,36 +792,39 @@ bool v4l2_detect_gtf(unsigned frame_height,
+ 
+ 	h_fp = h_blank / 2 - hsync;
+ 
+-	fmt->type = V4L2_DV_BT_656_1120;
+-	fmt->bt.polarities = polarities;
+-	fmt->bt.width = image_width;
+-	fmt->bt.height = image_height;
+-	fmt->bt.hfrontporch = h_fp;
+-	fmt->bt.vfrontporch = v_fp;
+-	fmt->bt.hsync = hsync;
+-	fmt->bt.vsync = vsync;
+-	fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
++	t.type = V4L2_DV_BT_656_1120;
++	t.bt.polarities = polarities;
++	t.bt.width = image_width;
++	t.bt.height = image_height;
++	t.bt.hfrontporch = h_fp;
++	t.bt.vfrontporch = v_fp;
++	t.bt.hsync = hsync;
++	t.bt.vsync = vsync;
++	t.bt.hbackporch = frame_width - image_width - h_fp - hsync;
+ 
+ 	if (!interlaced) {
+-		fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+-		fmt->bt.interlaced = V4L2_DV_PROGRESSIVE;
++		t.bt.vbackporch = frame_height - image_height - v_fp - vsync;
++		t.bt.interlaced = V4L2_DV_PROGRESSIVE;
+ 	} else {
+-		fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp -
++		t.bt.vbackporch = (frame_height - image_height - 2 * v_fp -
+ 				      2 * vsync) / 2;
+-		fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
+-					2 * vsync - fmt->bt.vbackporch;
+-		fmt->bt.il_vfrontporch = v_fp;
+-		fmt->bt.il_vsync = vsync;
+-		fmt->bt.flags |= V4L2_DV_FL_HALF_LINE;
+-		fmt->bt.interlaced = V4L2_DV_INTERLACED;
++		t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
++					2 * vsync - t.bt.vbackporch;
++		t.bt.il_vfrontporch = v_fp;
++		t.bt.il_vsync = vsync;
++		t.bt.flags |= V4L2_DV_FL_HALF_LINE;
++		t.bt.interlaced = V4L2_DV_INTERLACED;
+ 	}
+ 
+-	fmt->bt.pixelclock = pix_clk;
+-	fmt->bt.standards = V4L2_DV_BT_STD_GTF;
++	t.bt.pixelclock = pix_clk;
++	t.bt.standards = V4L2_DV_BT_STD_GTF;
+ 
+ 	if (!default_gtf)
+-		fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
++		t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+ 
++	if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL))
++		return false;
++	*timings = t;
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
+diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
+index a0bcb0864ecd2c..a798e26c6402d4 100644
+--- a/drivers/message/fusion/mptsas.c
++++ b/drivers/message/fusion/mptsas.c
+@@ -4231,10 +4231,8 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ static void
+ mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
+ {
+-	int rc;
+-
+ 	sdev->no_uld_attach = data ? 1 : 0;
+-	rc = scsi_device_reprobe(sdev);
++	WARN_ON(scsi_device_reprobe(sdev));
+ }
+ 
+ static void
+diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
+index be5f2b34e18aeb..80fc5c0cac2fb0 100644
+--- a/drivers/mfd/da9052-spi.c
++++ b/drivers/mfd/da9052-spi.c
+@@ -37,7 +37,7 @@ static int da9052_spi_probe(struct spi_device *spi)
+ 	spi_set_drvdata(spi, da9052);
+ 
+ 	config = da9052_regmap_config;
+-	config.read_flag_mask = 1;
++	config.write_flag_mask = 1;
+ 	config.reg_bits = 7;
+ 	config.pad_bits = 1;
+ 	config.val_bits = 8;
+diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
+index ccd76800d8e49b..b7204072e93ef8 100644
+--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
++++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
+@@ -148,6 +148,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip = {
+ 
+ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
+ 	.name = "bxtwc_irq_chip_pwrbtn",
++	.domain_suffix = "PWRBTN",
+ 	.status_base = BXTWC_PWRBTNIRQ,
+ 	.mask_base = BXTWC_MPWRBTNIRQ,
+ 	.irqs = bxtwc_regmap_irqs_pwrbtn,
+@@ -157,6 +158,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
+ 
+ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
+ 	.name = "bxtwc_irq_chip_tmu",
++	.domain_suffix = "TMU",
+ 	.status_base = BXTWC_TMUIRQ,
+ 	.mask_base = BXTWC_MTMUIRQ,
+ 	.irqs = bxtwc_regmap_irqs_tmu,
+@@ -166,6 +168,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
+ 
+ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
+ 	.name = "bxtwc_irq_chip_bcu",
++	.domain_suffix = "BCU",
+ 	.status_base = BXTWC_BCUIRQ,
+ 	.mask_base = BXTWC_MBCUIRQ,
+ 	.irqs = bxtwc_regmap_irqs_bcu,
+@@ -175,6 +178,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
+ 
+ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
+ 	.name = "bxtwc_irq_chip_adc",
++	.domain_suffix = "ADC",
+ 	.status_base = BXTWC_ADCIRQ,
+ 	.mask_base = BXTWC_MADCIRQ,
+ 	.irqs = bxtwc_regmap_irqs_adc,
+@@ -184,6 +188,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
+ 
+ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
+ 	.name = "bxtwc_irq_chip_chgr",
++	.domain_suffix = "CHGR",
+ 	.status_base = BXTWC_CHGR0IRQ,
+ 	.mask_base = BXTWC_MCHGR0IRQ,
+ 	.irqs = bxtwc_regmap_irqs_chgr,
+@@ -193,6 +198,7 @@ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
+ 
+ static const struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = {
+ 	.name = "bxtwc_irq_chip_crit",
++	.domain_suffix = "CRIT",
+ 	.status_base = BXTWC_CRITIRQ,
+ 	.mask_base = BXTWC_MCRITIRQ,
+ 	.irqs = bxtwc_regmap_irqs_crit,
+@@ -230,44 +236,55 @@ static const struct resource tmu_resources[] = {
+ };
+ 
+ static struct mfd_cell bxt_wc_dev[] = {
+-	{
+-		.name = "bxt_wcove_gpadc",
+-		.num_resources = ARRAY_SIZE(adc_resources),
+-		.resources = adc_resources,
+-	},
+ 	{
+ 		.name = "bxt_wcove_thermal",
+ 		.num_resources = ARRAY_SIZE(thermal_resources),
+ 		.resources = thermal_resources,
+ 	},
+ 	{
+-		.name = "bxt_wcove_usbc",
+-		.num_resources = ARRAY_SIZE(usbc_resources),
+-		.resources = usbc_resources,
++		.name = "bxt_wcove_gpio",
++		.num_resources = ARRAY_SIZE(gpio_resources),
++		.resources = gpio_resources,
+ 	},
+ 	{
+-		.name = "bxt_wcove_ext_charger",
+-		.num_resources = ARRAY_SIZE(charger_resources),
+-		.resources = charger_resources,
++		.name = "bxt_wcove_region",
++	},
++};
++
++static const struct mfd_cell bxt_wc_tmu_dev[] = {
++	{
++		.name = "bxt_wcove_tmu",
++		.num_resources = ARRAY_SIZE(tmu_resources),
++		.resources = tmu_resources,
+ 	},
++};
++
++static const struct mfd_cell bxt_wc_bcu_dev[] = {
+ 	{
+ 		.name = "bxt_wcove_bcu",
+ 		.num_resources = ARRAY_SIZE(bcu_resources),
+ 		.resources = bcu_resources,
+ 	},
++};
++
++static const struct mfd_cell bxt_wc_adc_dev[] = {
+ 	{
+-		.name = "bxt_wcove_tmu",
+-		.num_resources = ARRAY_SIZE(tmu_resources),
+-		.resources = tmu_resources,
++		.name = "bxt_wcove_gpadc",
++		.num_resources = ARRAY_SIZE(adc_resources),
++		.resources = adc_resources,
+ 	},
++};
+ 
++static struct mfd_cell bxt_wc_chgr_dev[] = {
+ 	{
+-		.name = "bxt_wcove_gpio",
+-		.num_resources = ARRAY_SIZE(gpio_resources),
+-		.resources = gpio_resources,
++		.name = "bxt_wcove_usbc",
++		.num_resources = ARRAY_SIZE(usbc_resources),
++		.resources = usbc_resources,
+ 	},
+ 	{
+-		.name = "bxt_wcove_region",
++		.name = "bxt_wcove_ext_charger",
++		.num_resources = ARRAY_SIZE(charger_resources),
++		.resources = charger_resources,
+ 	},
+ };
+ 
+@@ -425,6 +442,26 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
+ 					0, chip, data);
+ }
+ 
++static int bxtwc_add_chained_devices(struct intel_soc_pmic *pmic,
++				     const struct mfd_cell *cells, int n_devs,
++				     struct regmap_irq_chip_data *pdata,
++				     int pirq, int irq_flags,
++				     const struct regmap_irq_chip *chip,
++				     struct regmap_irq_chip_data **data)
++{
++	struct device *dev = pmic->dev;
++	struct irq_domain *domain;
++	int ret;
++
++	ret = bxtwc_add_chained_irq_chip(pmic, pdata, pirq, irq_flags, chip, data);
++	if (ret)
++		return dev_err_probe(dev, ret, "Failed to add %s IRQ chip\n", chip->name);
++
++	domain = regmap_irq_get_domain(*data);
++
++	return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, cells, n_devs, NULL, 0, domain);
++}
++
+ static int bxtwc_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -466,6 +503,15 @@ static int bxtwc_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
+ 
++	ret = bxtwc_add_chained_devices(pmic, bxt_wc_tmu_dev, ARRAY_SIZE(bxt_wc_tmu_dev),
++					pmic->irq_chip_data,
++					BXTWC_TMU_LVL1_IRQ,
++					IRQF_ONESHOT,
++					&bxtwc_regmap_irq_chip_tmu,
++					&pmic->irq_chip_data_tmu);
++	if (ret)
++		return ret;
++
+ 	ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ 					 BXTWC_PWRBTN_LVL1_IRQ,
+ 					 IRQF_ONESHOT,
+@@ -474,40 +520,32 @@ static int bxtwc_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "Failed to add PWRBTN IRQ chip\n");
+ 
+-	ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+-					 BXTWC_TMU_LVL1_IRQ,
+-					 IRQF_ONESHOT,
+-					 &bxtwc_regmap_irq_chip_tmu,
+-					 &pmic->irq_chip_data_tmu);
+-	if (ret)
+-		return dev_err_probe(dev, ret, "Failed to add TMU IRQ chip\n");
+-
+-	/* Add chained IRQ handler for BCU IRQs */
+-	ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+-					 BXTWC_BCU_LVL1_IRQ,
+-					 IRQF_ONESHOT,
+-					 &bxtwc_regmap_irq_chip_bcu,
+-					 &pmic->irq_chip_data_bcu);
++	ret = bxtwc_add_chained_devices(pmic, bxt_wc_bcu_dev, ARRAY_SIZE(bxt_wc_bcu_dev),
++					pmic->irq_chip_data,
++					BXTWC_BCU_LVL1_IRQ,
++					IRQF_ONESHOT,
++					&bxtwc_regmap_irq_chip_bcu,
++					&pmic->irq_chip_data_bcu);
+ 	if (ret)
+-		return dev_err_probe(dev, ret, "Failed to add BUC IRQ chip\n");
++		return ret;
+ 
+-	/* Add chained IRQ handler for ADC IRQs */
+-	ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+-					 BXTWC_ADC_LVL1_IRQ,
+-					 IRQF_ONESHOT,
+-					 &bxtwc_regmap_irq_chip_adc,
+-					 &pmic->irq_chip_data_adc);
++	ret = bxtwc_add_chained_devices(pmic, bxt_wc_adc_dev, ARRAY_SIZE(bxt_wc_adc_dev),
++					pmic->irq_chip_data,
++					BXTWC_ADC_LVL1_IRQ,
++					IRQF_ONESHOT,
++					&bxtwc_regmap_irq_chip_adc,
++					&pmic->irq_chip_data_adc);
+ 	if (ret)
+-		return dev_err_probe(dev, ret, "Failed to add ADC IRQ chip\n");
++		return ret;
+ 
+-	/* Add chained IRQ handler for CHGR IRQs */
+-	ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+-					 BXTWC_CHGR_LVL1_IRQ,
+-					 IRQF_ONESHOT,
+-					 &bxtwc_regmap_irq_chip_chgr,
+-					 &pmic->irq_chip_data_chgr);
++	ret = bxtwc_add_chained_devices(pmic, bxt_wc_chgr_dev, ARRAY_SIZE(bxt_wc_chgr_dev),
++					pmic->irq_chip_data,
++					BXTWC_CHGR_LVL1_IRQ,
++					IRQF_ONESHOT,
++					&bxtwc_regmap_irq_chip_chgr,
++					&pmic->irq_chip_data_chgr);
+ 	if (ret)
+-		return dev_err_probe(dev, ret, "Failed to add CHGR IRQ chip\n");
++		return ret;
+ 
+ 	/* Add chained IRQ handler for CRIT IRQs */
+ 	ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
+index 7e23ab3d5842c8..84ebc96f58e48d 100644
+--- a/drivers/mfd/rt5033.c
++++ b/drivers/mfd/rt5033.c
+@@ -81,8 +81,8 @@ static int rt5033_i2c_probe(struct i2c_client *i2c)
+ 	chip_rev = dev_id & RT5033_CHIP_REV_MASK;
+ 	dev_info(&i2c->dev, "Device found (rev. %d)\n", chip_rev);
+ 
+-	ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq,
+-			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
++	ret = devm_regmap_add_irq_chip(rt5033->dev, rt5033->regmap,
++			rt5033->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 			0, &rt5033_irq_chip, &rt5033->irq_data);
+ 	if (ret) {
+ 		dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
+diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
+index 2b9105295f3012..710364435b6b9e 100644
+--- a/drivers/mfd/tps65010.c
++++ b/drivers/mfd/tps65010.c
+@@ -544,17 +544,13 @@ static int tps65010_probe(struct i2c_client *client)
+ 	 */
+ 	if (client->irq > 0) {
+ 		status = request_irq(client->irq, tps65010_irq,
+-				     IRQF_TRIGGER_FALLING, DRIVER_NAME, tps);
++				     IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN,
++				     DRIVER_NAME, tps);
+ 		if (status < 0) {
+ 			dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
+ 					client->irq, status);
+ 			return status;
+ 		}
+-		/* annoying race here, ideally we'd have an option
+-		 * to claim the irq now and enable it later.
+-		 * FIXME genirq IRQF_NOAUTOEN now solves that ...
+-		 */
+-		disable_irq(client->irq);
+ 		set_bit(FLAG_IRQ_ENABLE, &tps->flags);
+ 	} else
+ 		dev_warn(&client->dev, "IRQ not configured!\n");
+diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
+index 6d4edd69db126a..e7d73c972f65dc 100644
+--- a/drivers/misc/apds990x.c
++++ b/drivers/misc/apds990x.c
+@@ -1147,7 +1147,7 @@ static int apds990x_probe(struct i2c_client *client)
+ 		err = chip->pdata->setup_resources();
+ 		if (err) {
+ 			err = -EINVAL;
+-			goto fail3;
++			goto fail4;
+ 		}
+ 	}
+ 
+@@ -1155,7 +1155,7 @@ static int apds990x_probe(struct i2c_client *client)
+ 				apds990x_attribute_group);
+ 	if (err < 0) {
+ 		dev_err(&chip->client->dev, "Sysfs registration failed\n");
+-		goto fail4;
++		goto fail5;
+ 	}
+ 
+ 	err = request_threaded_irq(client->irq, NULL,
+@@ -1166,15 +1166,17 @@ static int apds990x_probe(struct i2c_client *client)
+ 	if (err) {
+ 		dev_err(&client->dev, "could not get IRQ %d\n",
+ 			client->irq);
+-		goto fail5;
++		goto fail6;
+ 	}
+ 	return err;
+-fail5:
++fail6:
+ 	sysfs_remove_group(&chip->client->dev.kobj,
+ 			&apds990x_attribute_group[0]);
+-fail4:
++fail5:
+ 	if (chip->pdata && chip->pdata->release_resources)
+ 		chip->pdata->release_resources();
++fail4:
++	pm_runtime_disable(&client->dev);
+ fail3:
+ 	regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+ fail2:
+diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
+index 62ba0152547975..376047beea3d64 100644
+--- a/drivers/misc/lkdtm/bugs.c
++++ b/drivers/misc/lkdtm/bugs.c
+@@ -445,7 +445,7 @@ static void lkdtm_FAM_BOUNDS(void)
+ 
+ 	pr_err("FAIL: survived access of invalid flexible array member index!\n");
+ 
+-	if (!__has_attribute(__counted_by__))
++	if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY))
+ 		pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n",
+ 			lkdtm_kernel_info);
+ 	else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index 8fee7052f2ef4f..47443fb5eb3362 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -222,10 +222,6 @@ static int mmc_spi_response_get(struct mmc_spi_host *host,
+ 	u8 	leftover = 0;
+ 	unsigned short rotator;
+ 	int 	i;
+-	char	tag[32];
+-
+-	snprintf(tag, sizeof(tag), "  ... CMD%d response SPI_%s",
+-		cmd->opcode, maptype(cmd));
+ 
+ 	/* Except for data block reads, the whole response will already
+ 	 * be stored in the scratch buffer.  It's somewhere after the
+@@ -378,8 +374,9 @@ static int mmc_spi_response_get(struct mmc_spi_host *host,
+ 	}
+ 
+ 	if (value < 0)
+-		dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
+-			tag, cmd->resp[0], cmd->resp[1]);
++		dev_dbg(&host->spi->dev,
++			"  ... CMD%d response SPI_%s: resp %04x %08x\n",
++			cmd->opcode, maptype(cmd), cmd->resp[0], cmd->resp[1]);
+ 
+ 	/* disable chipselect on errors and some success cases */
+ 	if (value >= 0 && cs_on)
+diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
+index b22aa57119f238..e7a28f3316c3f2 100644
+--- a/drivers/mtd/hyperbus/rpc-if.c
++++ b/drivers/mtd/hyperbus/rpc-if.c
+@@ -163,9 +163,16 @@ static void rpcif_hb_remove(struct platform_device *pdev)
+ 	pm_runtime_disable(hyperbus->rpc.dev);
+ }
+ 
++static const struct platform_device_id rpc_if_hyperflash_id_table[] = {
++	{ .name = "rpc-if-hyperflash" },
++	{ /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(platform, rpc_if_hyperflash_id_table);
++
+ static struct platform_driver rpcif_platform_driver = {
+ 	.probe	= rpcif_hb_probe,
+ 	.remove_new = rpcif_hb_remove,
++	.id_table = rpc_if_hyperflash_id_table,
+ 	.driver	= {
+ 		.name	= "rpc-if-hyperflash",
+ 	},
+diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
+index 4d7dc8a9c37385..a22aab4ed4e8ab 100644
+--- a/drivers/mtd/nand/raw/atmel/pmecc.c
++++ b/drivers/mtd/nand/raw/atmel/pmecc.c
+@@ -362,7 +362,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ 	size = ALIGN(size, sizeof(s32));
+ 	size += (req->ecc.strength + 1) * sizeof(s32) * 3;
+ 
+-	user = kzalloc(size, GFP_KERNEL);
++	user = devm_kzalloc(pmecc->dev, size, GFP_KERNEL);
+ 	if (!user)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -408,12 +408,6 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ }
+ EXPORT_SYMBOL_GPL(atmel_pmecc_create_user);
+ 
+-void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user)
+-{
+-	kfree(user);
+-}
+-EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user);
+-
+ static int get_strength(struct atmel_pmecc_user *user)
+ {
+ 	const int *strengths = user->pmecc->caps->strengths;
+diff --git a/drivers/mtd/nand/raw/atmel/pmecc.h b/drivers/mtd/nand/raw/atmel/pmecc.h
+index 7851c05126cf15..cc0c5af1f4f1ab 100644
+--- a/drivers/mtd/nand/raw/atmel/pmecc.h
++++ b/drivers/mtd/nand/raw/atmel/pmecc.h
+@@ -55,8 +55,6 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev);
+ struct atmel_pmecc_user *
+ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ 			struct atmel_pmecc_user_req *req);
+-void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
+-
+ void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
+ int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
+ void atmel_pmecc_disable(struct atmel_pmecc_user *user);
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 9d6e85bf227b92..8c57df44c40fe8 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -89,7 +89,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor,
+ 		op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+ 
+ 	if (op->dummy.nbytes)
+-		op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
++		op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto);
+ 
+ 	if (op->data.nbytes)
+ 		op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
+diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
+index d6c92595f6bc9b..5a88a6096ca8c9 100644
+--- a/drivers/mtd/spi-nor/spansion.c
++++ b/drivers/mtd/spi-nor/spansion.c
+@@ -106,6 +106,7 @@ static int cypress_nor_sr_ready_and_clear_reg(struct spi_nor *nor, u64 addr)
+ 	int ret;
+ 
+ 	if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
++		op.addr.nbytes = nor->addr_nbytes;
+ 		op.dummy.nbytes = params->rdsr_dummy;
+ 		op.data.nbytes = 2;
+ 	}
+diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
+index ae5abe492b52ab..adc47b87b38a5f 100644
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -1447,7 +1447,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ 	return err;
+ }
+ 
+-static struct ubi_attach_info *alloc_ai(void)
++static struct ubi_attach_info *alloc_ai(const char *slab_name)
+ {
+ 	struct ubi_attach_info *ai;
+ 
+@@ -1461,7 +1461,7 @@ static struct ubi_attach_info *alloc_ai(void)
+ 	INIT_LIST_HEAD(&ai->alien);
+ 	INIT_LIST_HEAD(&ai->fastmap);
+ 	ai->volumes = RB_ROOT;
+-	ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
++	ai->aeb_slab_cache = kmem_cache_create(slab_name,
+ 					       sizeof(struct ubi_ainf_peb),
+ 					       0, 0, NULL);
+ 	if (!ai->aeb_slab_cache) {
+@@ -1491,7 +1491,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
+ 
+ 	err = -ENOMEM;
+ 
+-	scan_ai = alloc_ai();
++	scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap");
+ 	if (!scan_ai)
+ 		goto out;
+ 
+@@ -1557,7 +1557,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
+ 	int err;
+ 	struct ubi_attach_info *ai;
+ 
+-	ai = alloc_ai();
++	ai = alloc_ai("ubi_aeb_slab_cache");
+ 	if (!ai)
+ 		return -ENOMEM;
+ 
+@@ -1575,7 +1575,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
+ 		if (err > 0 || mtd_is_eccerr(err)) {
+ 			if (err != UBI_NO_FASTMAP) {
+ 				destroy_ai(ai);
+-				ai = alloc_ai();
++				ai = alloc_ai("ubi_aeb_slab_cache");
+ 				if (!ai)
+ 					return -ENOMEM;
+ 
+@@ -1614,7 +1614,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
+ 	if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
+ 		struct ubi_attach_info *scan_ai;
+ 
+-		scan_ai = alloc_ai();
++		scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap");
+ 		if (!scan_ai) {
+ 			err = -ENOMEM;
+ 			goto out_wl;
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 2a9cc9413c427d..9bdb6525f1281f 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -346,14 +346,27 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
+  * WL sub-system.
+  *
+  * @ubi: UBI device description object
++ * @need_fill: whether to fill wear-leveling pool when no PEBs are found
+  */
+-static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
++static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
++					    bool need_fill)
+ {
+ 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ 	int pnum;
+ 
+-	if (pool->used == pool->size)
++	if (pool->used == pool->size) {
++		if (need_fill && !ubi->fm_work_scheduled) {
++			/*
++			 * We cannot update the fastmap here because this
++			 * function is called in atomic context.
++			 * Let's fail here and refill/update it as soon as
++			 * possible.
++			 */
++			ubi->fm_work_scheduled = 1;
++			schedule_work(&ubi->fm_work);
++		}
+ 		return NULL;
++	}
+ 
+ 	pnum = pool->pebs[pool->used];
+ 	return ubi->lookuptbl[pnum];
+@@ -375,7 +388,7 @@ static bool need_wear_leveling(struct ubi_device *ubi)
+ 	if (!ubi->used.rb_node)
+ 		return false;
+ 
+-	e = next_peb_for_wl(ubi);
++	e = next_peb_for_wl(ubi, false);
+ 	if (!e) {
+ 		if (!ubi->free.rb_node)
+ 			return false;
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
+index 5a3558bbb90356..e5cf3bdca3b012 100644
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -143,8 +143,10 @@ static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
+ 		    vol->vol_id != volid)
+ 			continue;
+ 
++		fwnode_handle_put(fw_vols);
+ 		return fw_vol;
+ 	}
++	fwnode_handle_put(fw_vols);
+ 
+ 	return NULL;
+ }
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index a357f3d27f2f3d..fbd399cf650337 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -683,7 +683,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 	ubi_assert(!ubi->move_to_put);
+ 
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+-	if (!next_peb_for_wl(ubi) ||
++	if (!next_peb_for_wl(ubi, true) ||
+ #else
+ 	if (!ubi->free.rb_node ||
+ #endif
+@@ -846,7 +846,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 			goto out_not_moved;
+ 		}
+ 		if (err == MOVE_RETRY) {
+-			scrubbing = 1;
++			/*
++			 * For source PEB:
++			 * 1. The scrubbing is set for scrub type PEB, it will
++			 *    be put back into ubi->scrub list.
++			 * 2. Non-scrub type PEB will be put back into ubi->used
++			 *    list.
++			 */
++			keep = 1;
+ 			dst_leb_clean = 1;
+ 			goto out_not_moved;
+ 		}
+diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
+index 7b6715ef6d4a35..a69169c35e310f 100644
+--- a/drivers/mtd/ubi/wl.h
++++ b/drivers/mtd/ubi/wl.h
+@@ -5,7 +5,8 @@
+ static void update_fastmap_work_fn(struct work_struct *wrk);
+ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
+ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+-static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
++static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
++					    bool need_fill);
+ static bool need_wear_leveling(struct ubi_device *ubi);
+ static void ubi_fastmap_close(struct ubi_device *ubi);
+ static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 99d025b69079a8..3d9ee91e1f8be0 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4558,7 +4558,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+ 	struct net_device *dev = bp->dev;
+ 
+ 	if (page_mode) {
+-		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
++		bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
+ 		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+ 
+ 		if (bp->xdp_prog->aux->xdp_has_frags)
+@@ -9053,7 +9053,6 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+ 	struct hwrm_port_mac_ptp_qcfg_output *resp;
+ 	struct hwrm_port_mac_ptp_qcfg_input *req;
+ 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+-	bool phc_cfg;
+ 	u8 flags;
+ 	int rc;
+ 
+@@ -9100,8 +9099,9 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+ 		rc = -ENODEV;
+ 		goto exit;
+ 	}
+-	phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+-	rc = bnxt_ptp_init(bp, phc_cfg);
++	ptp->rtc_configured =
++		(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
++	rc = bnxt_ptp_init(bp);
+ 	if (rc)
+ 		netdev_warn(bp->dev, "PTP initialization failed.\n");
+ exit:
+@@ -14494,6 +14494,14 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+ 		bnxt_close_nic(bp, true, false);
+ 
+ 	WRITE_ONCE(dev->mtu, new_mtu);
++
++	/* MTU change may change the AGG ring settings if an XDP multi-buffer
++	 * program is attached.  We need to set the AGG rings settings and
++	 * rx_skb_func accordingly.
++	 */
++	if (READ_ONCE(bp->xdp_prog))
++		bnxt_set_rx_skb_mode(bp, true);
++
+ 	bnxt_set_ring_params(bp);
+ 
+ 	if (netif_running(dev))
+@@ -15231,6 +15239,13 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+ 
+ 	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ 		vnic = &bp->vnic_info[i];
++
++		rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
++		if (rc) {
++			netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
++				   vnic->vnic_id, rc);
++			return rc;
++		}
+ 		vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ 		bnxt_hwrm_vnic_update(bp, vnic,
+ 				      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+@@ -15984,6 +15999,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
+ 	if (netif_running(dev))
+ 		dev_close(dev);
+ 
++	bnxt_ptp_clear(bp);
+ 	bnxt_clear_int_mode(bp);
+ 	pci_disable_device(pdev);
+ 
+@@ -16011,6 +16027,7 @@ static int bnxt_suspend(struct device *device)
+ 		rc = bnxt_close(dev);
+ 	}
+ 	bnxt_hwrm_func_drv_unrgtr(bp);
++	bnxt_ptp_clear(bp);
+ 	pci_disable_device(bp->pdev);
+ 	bnxt_free_ctx_mem(bp);
+ 	rtnl_unlock();
+@@ -16054,6 +16071,10 @@ static int bnxt_resume(struct device *device)
+ 	if (bp->fw_crash_mem)
+ 		bnxt_hwrm_crash_dump_mem_cfg(bp);
+ 
++	if (bnxt_ptp_init(bp)) {
++		kfree(bp->ptp_cfg);
++		bp->ptp_cfg = NULL;
++	}
+ 	bnxt_get_wol_settings(bp);
+ 	if (netif_running(dev)) {
+ 		rc = bnxt_open(dev);
+@@ -16232,8 +16253,12 @@ static void bnxt_io_resume(struct pci_dev *pdev)
+ 	rtnl_lock();
+ 
+ 	err = bnxt_hwrm_func_qcaps(bp);
+-	if (!err && netif_running(netdev))
+-		err = bnxt_open(netdev);
++	if (!err) {
++		if (netif_running(netdev))
++			err = bnxt_open(netdev);
++		else
++			err = bnxt_reserve_rings(bp, true);
++	}
+ 
+ 	if (!err)
+ 		netif_device_attach(netdev);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index f71cc8188b4e5b..20ba14eb87e00b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2838,19 +2838,24 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
+ 	}
+ 
+ 	base->port = PORT_NONE;
+-	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
++	if (media == BNXT_MEDIA_TP) {
+ 		base->port = PORT_TP;
+ 		linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
+ 				 lk_ksettings->link_modes.supported);
+ 		linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
+ 				 lk_ksettings->link_modes.advertising);
++	} else if (media == BNXT_MEDIA_KR) {
++		linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
++				 lk_ksettings->link_modes.supported);
++		linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
++				 lk_ksettings->link_modes.advertising);
+ 	} else {
+ 		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ 				 lk_ksettings->link_modes.supported);
+ 		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ 				 lk_ksettings->link_modes.advertising);
+ 
+-		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
++		if (media == BNXT_MEDIA_CR)
+ 			base->port = PORT_DA;
+ 		else
+ 			base->port = PORT_FIBRE;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+index fa514be8765028..781225d3ba8ffc 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+@@ -1024,7 +1024,7 @@ static void bnxt_ptp_free(struct bnxt *bp)
+ 	}
+ }
+ 
+-int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
++int bnxt_ptp_init(struct bnxt *bp)
+ {
+ 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ 	int rc;
+@@ -1047,7 +1047,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
+ 
+ 	if (BNXT_PTP_USE_RTC(bp)) {
+ 		bnxt_ptp_timecounter_init(bp, false);
+-		rc = bnxt_ptp_init_rtc(bp, phc_cfg);
++		rc = bnxt_ptp_init_rtc(bp, ptp->rtc_configured);
+ 		if (rc)
+ 			goto out;
+ 	} else {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+index f322466ecad350..61e89bb2d2690c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+@@ -133,6 +133,7 @@ struct bnxt_ptp_cfg {
+ 					 BNXT_PTP_MSG_PDELAY_REQ |	\
+ 					 BNXT_PTP_MSG_PDELAY_RESP)
+ 	u8			tx_tstamp_en:1;
++	u8			rtc_configured:1;
+ 	int			rx_filter;
+ 	u32			tstamp_filters;
+ 
+@@ -180,6 +181,6 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ 		    struct tx_ts_cmp *tscmp);
+ void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
+ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
+-int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
++int bnxt_ptp_init(struct bnxt *bp);
+ void bnxt_ptp_clear(struct bnxt *bp);
+ #endif
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 37881591774175..d178138981a967 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17801,6 +17801,9 @@ static int tg3_init_one(struct pci_dev *pdev,
+ 	} else
+ 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ 
++	if (tg3_asic_rev(tp) == ASIC_REV_57766)
++		persist_dma_mask = DMA_BIT_MASK(31);
++
+ 	/* Configure DMA attributes. */
+ 	if (dma_mask > DMA_BIT_MASK(32)) {
+ 		err = dma_set_mask(&pdev->dev, dma_mask);
+diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
+index e44e8b139633fc..060e0e6749380f 100644
+--- a/drivers/net/ethernet/google/gve/gve_adminq.c
++++ b/drivers/net/ethernet/google/gve/gve_adminq.c
+@@ -1248,10 +1248,10 @@ gve_adminq_configure_flow_rule(struct gve_priv *priv,
+ 			sizeof(struct gve_adminq_configure_flow_rule),
+ 			flow_rule_cmd);
+ 
+-	if (err) {
++	if (err == -ETIME) {
+ 		dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset");
+ 		gve_reset(priv, true);
+-	} else {
++	} else if (!err) {
+ 		priv->flow_rules_cache.rules_cache_synced = false;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index f2506511bbfff4..bce5b76f1e7a58 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -5299,7 +5299,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ 	}
+ 
+ flags_complete:
+-	bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS);
++	bitmap_xor(changed_flags, new_flags, orig_flags, I40E_PF_FLAGS_NBITS);
+ 
+ 	if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags))
+ 		reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index 59f62306b9cb02..b6ec01f6fa73e0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -1715,8 +1715,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 
+ 		/* copy Tx queue info from VF into VSI */
+ 		if (qpi->txq.ring_len > 0) {
+-			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+-			vsi->tx_rings[i]->count = qpi->txq.ring_len;
++			vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
++			vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
+ 
+ 			/* Disable any existing queue first */
+ 			if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
+@@ -1725,7 +1725,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 			/* Configure a queue with the requested settings */
+ 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+-					 vf->vf_id, i);
++					 vf->vf_id, q_idx);
+ 				goto error_param;
+ 			}
+ 		}
+@@ -1733,24 +1733,23 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 		/* copy Rx queue info from VF into VSI */
+ 		if (qpi->rxq.ring_len > 0) {
+ 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
++			struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
+ 			u32 rxdid;
+ 
+-			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+-			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
++			ring->dma = qpi->rxq.dma_ring_addr;
++			ring->count = qpi->rxq.ring_len;
+ 
+ 			if (qpi->rxq.crc_disable)
+-				vsi->rx_rings[q_idx]->flags |=
+-					ICE_RX_FLAGS_CRC_STRIP_DIS;
++				ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ 			else
+-				vsi->rx_rings[q_idx]->flags &=
+-					~ICE_RX_FLAGS_CRC_STRIP_DIS;
++				ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+ 
+ 			if (qpi->rxq.databuffer_size != 0 &&
+ 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ 			     qpi->rxq.databuffer_size < 1024))
+ 				goto error_param;
+ 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
+-			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
++			ring->rx_buf_len = vsi->rx_buf_len;
+ 			if (qpi->rxq.max_pkt_size > max_frame_size ||
+ 			    qpi->rxq.max_pkt_size < 64)
+ 				goto error_param;
+@@ -1765,7 +1764,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 
+ 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+-					 vf->vf_id, i);
++					 vf->vf_id, q_idx);
+ 				goto error_param;
+ 			}
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 27935c54b91bc7..8216f843a7cd5f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -112,6 +112,11 @@ struct mac_ops *get_mac_ops(void *cgxd)
+ 	return ((struct cgx *)cgxd)->mac_ops;
+ }
+ 
++u32 cgx_get_fifo_len(void *cgxd)
++{
++	return ((struct cgx *)cgxd)->fifo_len;
++}
++
+ void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+ {
+ 	writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+@@ -209,6 +214,24 @@ u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
+ 	return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
+ }
+ 
++static u8 cgx_get_nix_resetbit(struct cgx *cgx)
++{
++	int first_lmac;
++	u8 p2x;
++
++	/* non 98XX silicons supports only NIX0 block */
++	if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX)
++		return CGX_NIX0_RESET;
++
++	first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
++	p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac);
++
++	if (p2x == CMR_P2X_SEL_NIX1)
++		return CGX_NIX1_RESET;
++	else
++		return CGX_NIX0_RESET;
++}
++
+ /* Ensure the required lock for event queue(where asynchronous events are
+  * posted) is acquired before calling this API. Else an asynchronous event(with
+  * latest link status) can reach the destination before this function returns
+@@ -501,7 +524,7 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
+ 	u8 num_lmacs;
+ 	u32 fifo_len;
+ 
+-	fifo_len = cgx->mac_ops->fifo_len;
++	fifo_len = cgx->fifo_len;
+ 	num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
+ 
+ 	switch (num_lmacs) {
+@@ -1719,6 +1742,8 @@ static int cgx_lmac_init(struct cgx *cgx)
+ 		lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
+ 	}
+ 
++	/* Start X2P reset on given MAC block */
++	cgx->mac_ops->mac_x2p_reset(cgx, true);
+ 	return cgx_lmac_verify_fwi_version(cgx);
+ 
+ err_bitmap_free:
+@@ -1764,7 +1789,7 @@ static void cgx_populate_features(struct cgx *cgx)
+ 	u64 cfg;
+ 
+ 	cfg = cgx_read(cgx, 0, CGX_CONST);
+-	cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
++	cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+ 	cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
+ 
+ 	if (is_dev_rpm(cgx))
+@@ -1784,6 +1809,45 @@ static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
+ 		return 0x60;
+ }
+ 
++static void cgx_x2p_reset(void *cgxd, bool enable)
++{
++	struct cgx *cgx = cgxd;
++	int lmac_id;
++	u64 cfg;
++
++	if (enable) {
++		for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac)
++			cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false);
++
++		usleep_range(1000, 2000);
++
++		cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
++		cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP;
++		cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
++	} else {
++		cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
++		cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP);
++		cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
++	}
++}
++
++static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable)
++{
++	struct cgx *cgx = cgxd;
++	u64 cfg;
++
++	if (!is_lmac_valid(cgx, lmac_id))
++		return -ENODEV;
++
++	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
++	if (enable)
++		cfg |= DATA_PKT_RX_EN;
++	else
++		cfg &= ~DATA_PKT_RX_EN;
++	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
++	return 0;
++}
++
+ static struct mac_ops	cgx_mac_ops    = {
+ 	.name		=       "cgx",
+ 	.csr_offset	=       0,
+@@ -1815,6 +1879,8 @@ static struct mac_ops	cgx_mac_ops    = {
+ 	.mac_get_pfc_frm_cfg   =        cgx_lmac_get_pfc_frm_cfg,
+ 	.mac_reset   =			cgx_lmac_reset,
+ 	.mac_stats_reset       =	cgx_stats_reset,
++	.mac_x2p_reset                   =      cgx_x2p_reset,
++	.mac_enadis_rx			 =      cgx_enadis_rx,
+ };
+ 
+ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+index dc9ace30554af6..1cf12e5c7da873 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+@@ -32,6 +32,10 @@
+ #define CGX_LMAC_TYPE_MASK		0xF
+ #define CGXX_CMRX_INT			0x040
+ #define FW_CGX_INT			BIT_ULL(1)
++#define CGXX_CMR_GLOBAL_CONFIG          0x08
++#define CGX_NIX0_RESET			BIT_ULL(2)
++#define CGX_NIX1_RESET			BIT_ULL(3)
++#define CGX_NSCI_DROP			BIT_ULL(9)
+ #define CGXX_CMRX_INT_ENA_W1S		0x058
+ #define CGXX_CMRX_RX_ID_MAP		0x060
+ #define CGXX_CMRX_RX_STAT0		0x070
+@@ -185,4 +189,5 @@ int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ 		       int pfvf_idx);
+ int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr);
++u32 cgx_get_fifo_len(void *cgxd);
+ #endif /* CGX_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+index 9ffc6790c51307..6180e68e1765a7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+@@ -72,7 +72,6 @@ struct mac_ops {
+ 	u8			irq_offset;
+ 	u8			int_ena_bit;
+ 	u8			lmac_fwi;
+-	u32			fifo_len;
+ 	bool			non_contiguous_serdes_lane;
+ 	/* RPM & CGX differs in number of Receive/transmit stats */
+ 	u8			rx_stats_cnt;
+@@ -133,6 +132,8 @@ struct mac_ops {
+ 	int			(*get_fec_stats)(void *cgxd, int lmac_id,
+ 						 struct cgx_fec_stats_rsp *rsp);
+ 	int			(*mac_stats_reset)(void *cgxd, int lmac_id);
++	void                    (*mac_x2p_reset)(void *cgxd, bool enable);
++	int			(*mac_enadis_rx)(void *cgxd, int lmac_id, bool enable);
+ };
+ 
+ struct cgx {
+@@ -142,6 +143,10 @@ struct cgx {
+ 	u8			lmac_count;
+ 	/* number of LMACs per MAC could be 4 or 8 */
+ 	u8			max_lmac_per_mac;
++	/* length of fifo varies depending on the number
++	 * of LMACS
++	 */
++	u32			fifo_len;
+ #define MAX_LMAC_COUNT		8
+ 	struct lmac             *lmac_idmap[MAX_LMAC_COUNT];
+ 	struct			work_struct cgx_cmd_work;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+index 1b34cf9c97035a..2e9945446199ec 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+@@ -39,6 +39,8 @@ static struct mac_ops		rpm_mac_ops   = {
+ 	.mac_get_pfc_frm_cfg   =        rpm_lmac_get_pfc_frm_cfg,
+ 	.mac_reset   =			rpm_lmac_reset,
+ 	.mac_stats_reset		 =	  rpm_stats_reset,
++	.mac_x2p_reset                   =        rpm_x2p_reset,
++	.mac_enadis_rx			 =        rpm_enadis_rx,
+ };
+ 
+ static struct mac_ops		rpm2_mac_ops   = {
+@@ -72,6 +74,8 @@ static struct mac_ops		rpm2_mac_ops   = {
+ 	.mac_get_pfc_frm_cfg   =        rpm_lmac_get_pfc_frm_cfg,
+ 	.mac_reset   =			rpm_lmac_reset,
+ 	.mac_stats_reset	    =	rpm_stats_reset,
++	.mac_x2p_reset              =   rpm_x2p_reset,
++	.mac_enadis_rx		    =   rpm_enadis_rx,
+ };
+ 
+ bool is_dev_rpm2(void *rpmd)
+@@ -467,7 +471,7 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
+ 	int err;
+ 
+ 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
+-	err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
++	err = cgx_fwi_cmd_generic(req, &resp, rpm, lmac_id);
+ 	if (!err)
+ 		return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
+ 	return err;
+@@ -480,7 +484,7 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
+ 	u8 num_lmacs;
+ 	u32 fifo_len;
+ 
+-	fifo_len = rpm->mac_ops->fifo_len;
++	fifo_len = rpm->fifo_len;
+ 	num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
+ 
+ 	switch (num_lmacs) {
+@@ -533,9 +537,9 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
+ 	 */
+ 	max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
+ 	if (max_lmac > 4)
+-		fifo_len = rpm->mac_ops->fifo_len / 2;
++		fifo_len = rpm->fifo_len / 2;
+ 	else
+-		fifo_len = rpm->mac_ops->fifo_len;
++		fifo_len = rpm->fifo_len;
+ 
+ 	if (lmac_id < 4) {
+ 		num_lmacs = hweight8(lmac_info & 0xF);
+@@ -699,46 +703,51 @@ int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+ 	if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
+ 		return 0;
+ 
++	/* latched registers FCFECX_CW_HI/RSFEC_STAT_FAST_DATA_HI_CDC are common
++	 * for all counters. Acquire lock to ensure serialized reads
++	 */
++	mutex_lock(&rpm->lock);
+ 	if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+-		val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO);
+-		val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
++		val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_CCW_LO(lmac_id));
++		val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id));
+ 		rsp->fec_corr_blks = (val_hi << 16 | val_lo);
+ 
+-		val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO);
+-		val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
++		val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_NCCW_LO(lmac_id));
++		val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id));
+ 		rsp->fec_uncorr_blks = (val_hi << 16 | val_lo);
+ 
+ 		/* 50G uses 2 Physical serdes lines */
+ 		if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id ==
+ 		    LMAC_MODE_50G_R) {
+-			val_lo = rpm_read(rpm, lmac_id,
+-					  RPMX_MTI_FCFECX_VL1_CCW_LO);
+-			val_hi = rpm_read(rpm, lmac_id,
+-					  RPMX_MTI_FCFECX_CW_HI);
++			val_lo = rpm_read(rpm, 0,
++					  RPMX_MTI_FCFECX_VL1_CCW_LO(lmac_id));
++			val_hi = rpm_read(rpm, 0,
++					  RPMX_MTI_FCFECX_CW_HI(lmac_id));
+ 			rsp->fec_corr_blks += (val_hi << 16 | val_lo);
+ 
+-			val_lo = rpm_read(rpm, lmac_id,
+-					  RPMX_MTI_FCFECX_VL1_NCCW_LO);
+-			val_hi = rpm_read(rpm, lmac_id,
+-					  RPMX_MTI_FCFECX_CW_HI);
++			val_lo = rpm_read(rpm, 0,
++					  RPMX_MTI_FCFECX_VL1_NCCW_LO(lmac_id));
++			val_hi = rpm_read(rpm, 0,
++					  RPMX_MTI_FCFECX_CW_HI(lmac_id));
+ 			rsp->fec_uncorr_blks += (val_hi << 16 | val_lo);
+ 		}
+ 	} else {
+ 		/* enable RS-FEC capture */
+-		cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
++		cfg = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL);
+ 		cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id);
+-		rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
++		rpm_write(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL, cfg);
+ 
+ 		val_lo = rpm_read(rpm, 0,
+ 				  RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2);
+-		val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
++		val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC);
+ 		rsp->fec_corr_blks = (val_hi << 32 | val_lo);
+ 
+ 		val_lo = rpm_read(rpm, 0,
+ 				  RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3);
+-		val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
++		val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC);
+ 		rsp->fec_uncorr_blks = (val_hi << 32 | val_lo);
+ 	}
++	mutex_unlock(&rpm->lock);
+ 
+ 	return 0;
+ }
+@@ -763,3 +772,41 @@ int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr)
+ 
+ 	return 0;
+ }
++
++void rpm_x2p_reset(void *rpmd, bool enable)
++{
++	rpm_t *rpm = rpmd;
++	int lmac_id;
++	u64 cfg;
++
++	if (enable) {
++		for_each_set_bit(lmac_id, &rpm->lmac_bmap, rpm->max_lmac_per_mac)
++			rpm->mac_ops->mac_enadis_rx(rpm, lmac_id, false);
++
++		usleep_range(1000, 2000);
++
++		cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
++		rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg | RPM_NIX0_RESET);
++	} else {
++		cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
++		cfg &= ~RPM_NIX0_RESET;
++		rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg);
++	}
++}
++
++int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable)
++{
++	rpm_t *rpm = rpmd;
++	u64 cfg;
++
++	if (!is_lmac_valid(rpm, lmac_id))
++		return -ENODEV;
++
++	cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
++	if (enable)
++		cfg |= RPM_RX_EN;
++	else
++		cfg &= ~RPM_RX_EN;
++	rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
++	return 0;
++}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+index 34b11deb0f3c1d..b8d3972e096aed 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+@@ -17,6 +17,8 @@
+ 
+ /* Registers */
+ #define RPMX_CMRX_CFG			0x00
++#define RPMX_CMR_GLOBAL_CFG		0x08
++#define RPM_NIX0_RESET			BIT_ULL(3)
+ #define RPMX_RX_TS_PREPEND              BIT_ULL(22)
+ #define RPMX_TX_PTP_1S_SUPPORT          BIT_ULL(17)
+ #define RPMX_CMRX_RX_ID_MAP		0x80
+@@ -84,16 +86,18 @@
+ /* FEC stats */
+ #define RPMX_MTI_STAT_STATN_CONTROL			0x10018
+ #define RPMX_MTI_STAT_DATA_HI_CDC			0x10038
+-#define RPMX_RSFEC_RX_CAPTURE				BIT_ULL(27)
++#define RPMX_RSFEC_RX_CAPTURE				BIT_ULL(28)
+ #define RPMX_CMD_CLEAR_RX				BIT_ULL(30)
+ #define RPMX_CMD_CLEAR_TX				BIT_ULL(31)
++#define RPMX_MTI_RSFEC_STAT_STATN_CONTROL               0x40018
++#define RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC            0x40000
+ #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2		0x40050
+ #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3		0x40058
+-#define RPMX_MTI_FCFECX_VL0_CCW_LO			0x38618
+-#define RPMX_MTI_FCFECX_VL0_NCCW_LO			0x38620
+-#define RPMX_MTI_FCFECX_VL1_CCW_LO			0x38628
+-#define RPMX_MTI_FCFECX_VL1_NCCW_LO			0x38630
+-#define RPMX_MTI_FCFECX_CW_HI				0x38638
++#define RPMX_MTI_FCFECX_VL0_CCW_LO(a)			(0x38618 + ((a) * 0x40))
++#define RPMX_MTI_FCFECX_VL0_NCCW_LO(a)			(0x38620 + ((a) * 0x40))
++#define RPMX_MTI_FCFECX_VL1_CCW_LO(a)			(0x38628 + ((a) * 0x40))
++#define RPMX_MTI_FCFECX_VL1_NCCW_LO(a)			(0x38630 + ((a) * 0x40))
++#define RPMX_MTI_FCFECX_CW_HI(a)			(0x38638 + ((a) * 0x40))
+ 
+ /* CN10KB CSR Declaration */
+ #define  RPM2_CMRX_SW_INT				0x1b0
+@@ -137,4 +141,6 @@ bool is_dev_rpm2(void *rpmd);
+ int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+ int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
+ int rpm_stats_reset(void *rpmd, int lmac_id);
++void rpm_x2p_reset(void *rpmd, bool enable);
++int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable);
+ #endif /* RPM_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 1a97fb9032fa44..cd0d7b7774f1af 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -1162,6 +1162,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
+ 	}
+ 
+ 	rvu_program_channels(rvu);
++	cgx_start_linkup(rvu);
+ 
+ 	err = rvu_mcs_init(rvu);
+ 	if (err) {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 5016ba82e1423a..8555edbb1c8f9a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -997,6 +997,7 @@ int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_
+ int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+ void rvu_mac_reset(struct rvu *rvu, u16 pcifunc);
+ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
++void cgx_start_linkup(struct rvu *rvu);
+ int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
+ 			     int type);
+ bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index 266ecbc1b97a68..992fa0b82e8d2d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -349,6 +349,7 @@ static void rvu_cgx_wq_destroy(struct rvu *rvu)
+ 
+ int rvu_cgx_init(struct rvu *rvu)
+ {
++	struct mac_ops *mac_ops;
+ 	int cgx, err;
+ 	void *cgxd;
+ 
+@@ -375,6 +376,15 @@ int rvu_cgx_init(struct rvu *rvu)
+ 	if (err)
+ 		return err;
+ 
++	/* Clear X2P reset on all MAC blocks */
++	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
++		cgxd = rvu_cgx_pdata(cgx, rvu);
++		if (!cgxd)
++			continue;
++		mac_ops = get_mac_ops(cgxd);
++		mac_ops->mac_x2p_reset(cgxd, false);
++	}
++
+ 	/* Register for CGX events */
+ 	err = cgx_lmac_event_handler_init(rvu);
+ 	if (err)
+@@ -382,10 +392,26 @@ int rvu_cgx_init(struct rvu *rvu)
+ 
+ 	mutex_init(&rvu->cgx_cfg_lock);
+ 
+-	/* Ensure event handler registration is completed, before
+-	 * we turn on the links
+-	 */
+-	mb();
++	return 0;
++}
++
++void cgx_start_linkup(struct rvu *rvu)
++{
++	unsigned long lmac_bmap;
++	struct mac_ops *mac_ops;
++	int cgx, lmac, err;
++	void *cgxd;
++
++	/* Enable receive on all LMACS */
++	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
++		cgxd = rvu_cgx_pdata(cgx, rvu);
++		if (!cgxd)
++			continue;
++		mac_ops = get_mac_ops(cgxd);
++		lmac_bmap = cgx_get_lmac_bmap(cgxd);
++		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
++			mac_ops->mac_enadis_rx(cgxd, lmac, true);
++	}
+ 
+ 	/* Do link up for all CGX ports */
+ 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+@@ -398,8 +424,6 @@ int rvu_cgx_init(struct rvu *rvu)
+ 				"Link up process failed to start on cgx %d\n",
+ 				cgx);
+ 	}
+-
+-	return 0;
+ }
+ 
+ int rvu_cgx_exit(struct rvu *rvu)
+@@ -923,13 +947,12 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
+ 
+ u32 rvu_cgx_get_fifolen(struct rvu *rvu)
+ {
+-	struct mac_ops *mac_ops;
+-	u32 fifo_len;
++	void *cgxd = rvu_first_cgx_pdata(rvu);
+ 
+-	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+-	fifo_len = mac_ops ? mac_ops->fifo_len : 0;
++	if (!cgxd)
++		return 0;
+ 
+-	return fifo_len;
++	return cgx_get_fifo_len(cgxd);
+ }
+ 
+ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+index c1c99d7054f87f..7417087b6db597 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+@@ -203,6 +203,11 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
+ 
+ 	rsp = (struct  nix_bandprof_alloc_rsp *)
+ 	       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
++	if (IS_ERR(rsp)) {
++		rc = PTR_ERR(rsp);
++		goto out;
++	}
++
+ 	if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
+ 		rc = -EIO;
+ 		goto out;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 87d5776e3b88e9..7510a918d942c0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -1837,6 +1837,10 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
+ 	if (!rc) {
+ 		rsp = (struct nix_hw_info *)
+ 		       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
++		if (IS_ERR(rsp)) {
++			rc = PTR_ERR(rsp);
++			goto out;
++		}
+ 
+ 		/* HW counts VLAN insertion bytes (8 for double tag)
+ 		 * irrespective of whether SQE is requesting to insert VLAN
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+index aa01110f04a339..294fba58b67095 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+@@ -315,6 +315,11 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+ 	if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ 		rsp = (struct cgx_pfc_rsp *)
+ 		       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
++		if (IS_ERR(rsp)) {
++			err = PTR_ERR(rsp);
++			goto unlock;
++		}
++
+ 		if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ 			dev_warn(pfvf->dev,
+ 				 "Failed to config PFC\n");
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
+index 80d853b343f98f..2046dd0da00d85 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
+@@ -28,6 +28,11 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ 	if (!err) {
+ 		rsp = (struct cgx_mac_addr_add_rsp *)
+ 			 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
++		if (IS_ERR(rsp)) {
++			mutex_unlock(&pf->mbox.lock);
++			return PTR_ERR(rsp);
++		}
++
+ 		*dmac_index = rsp->index;
+ 	}
+ 
+@@ -200,6 +205,10 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
+ 
+ 	rsp = (struct cgx_mac_addr_update_rsp *)
+ 		otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
++	if (IS_ERR(rsp)) {
++		rc = PTR_ERR(rsp);
++		goto out;
++	}
+ 
+ 	pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+index 32468c663605ef..5197ce816581e3 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+@@ -343,6 +343,11 @@ static void otx2_get_pauseparam(struct net_device *netdev,
+ 	if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ 		rsp = (struct cgx_pause_frm_cfg *)
+ 		       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
++		if (IS_ERR(rsp)) {
++			mutex_unlock(&pfvf->mbox.lock);
++			return;
++		}
++
+ 		pause->rx_pause = rsp->rx_pause;
+ 		pause->tx_pause = rsp->tx_pause;
+ 	}
+@@ -1072,6 +1077,11 @@ static int otx2_set_fecparam(struct net_device *netdev,
+ 
+ 	rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 						   0, &req->hdr);
++	if (IS_ERR(rsp)) {
++		err = PTR_ERR(rsp);
++		goto end;
++	}
++
+ 	if (rsp->fec >= 0)
+ 		pfvf->linfo.fec = rsp->fec;
+ 	else
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 98c31a16c70b4f..58720a161ee24a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -119,6 +119,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
+ 
+ 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ 			(&pfvf->mbox.mbox, 0, &req->hdr);
++		if (IS_ERR(rsp))
++			goto exit;
+ 
+ 		for (ent = 0; ent < rsp->count; ent++)
+ 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+@@ -197,6 +199,10 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+ 
+ 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ 	       (&pfvf->mbox.mbox, 0, &req->hdr);
++	if (IS_ERR(rsp)) {
++		mutex_unlock(&pfvf->mbox.lock);
++		return PTR_ERR(rsp);
++	}
+ 
+ 	if (rsp->count != req->count) {
+ 		netdev_info(pfvf->netdev,
+@@ -232,6 +238,10 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+ 
+ 	frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
+ 	       (&pfvf->mbox.mbox, 0, &freq->hdr);
++	if (IS_ERR(frsp)) {
++		mutex_unlock(&pfvf->mbox.lock);
++		return PTR_ERR(frsp);
++	}
+ 
+ 	if (frsp->enable) {
+ 		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
+index 1a59c952aa01c1..45f115e41857ba 100644
+--- a/drivers/net/ethernet/marvell/pxa168_eth.c
++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
+@@ -1394,18 +1394,15 @@ static int pxa168_eth_probe(struct platform_device *pdev)
+ 
+ 	printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+ 
+-	clk = devm_clk_get(&pdev->dev, NULL);
++	clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ 	if (IS_ERR(clk)) {
+-		dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
++		dev_err(&pdev->dev, "Fast Ethernet failed to get and enable clock\n");
+ 		return -ENODEV;
+ 	}
+-	clk_prepare_enable(clk);
+ 
+ 	dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+-	if (!dev) {
+-		err = -ENOMEM;
+-		goto err_clk;
+-	}
++	if (!dev)
++		return -ENOMEM;
+ 
+ 	platform_set_drvdata(pdev, dev);
+ 	pep = netdev_priv(dev);
+@@ -1523,8 +1520,6 @@ static int pxa168_eth_probe(struct platform_device *pdev)
+ 	mdiobus_free(pep->smi_bus);
+ err_netdev:
+ 	free_netdev(dev);
+-err_clk:
+-	clk_disable_unprepare(clk);
+ 	return err;
+ }
+ 
+@@ -1542,7 +1537,6 @@ static void pxa168_eth_remove(struct platform_device *pdev)
+ 	if (dev->phydev)
+ 		phy_disconnect(dev->phydev);
+ 
+-	clk_disable_unprepare(pep->clk);
+ 	mdiobus_unregister(pep->smi_bus);
+ 	mdiobus_free(pep->smi_bus);
+ 	unregister_netdev(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 8577db3308cc56..7f68468c2e7598 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -516,6 +516,7 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
+ 		blocking_notifier_call_chain(&dev0->priv.lag_nh,
+ 					     MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+ 					     ndev);
++		dev_put(ndev);
+ 	}
+ }
+ 
+@@ -918,6 +919,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ {
+ 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ 	struct lag_tracker tracker = { };
++	struct net_device *ndev;
+ 	bool do_bond, roce_lag;
+ 	int err;
+ 	int i;
+@@ -981,6 +983,16 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ 				return;
+ 			}
+ 		}
++		if (tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
++			ndev = mlx5_lag_active_backup_get_netdev(dev0);
++			/** Only sriov and roce lag should have tracker->TX_type
++			 *  set so no need to check the mode
++			 */
++			blocking_notifier_call_chain(&dev0->priv.lag_nh,
++						     MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
++						     ndev);
++			dev_put(ndev);
++		}
+ 	} else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
+ 		mlx5_modify_lag(ldev, &tracker);
+ 	} else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+index a4809fe0fc2496..268489b15616fd 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+@@ -319,7 +319,6 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ free_irqs:
+ 	fbnic_free_irqs(fbd);
+ free_fbd:
+-	pci_disable_device(pdev);
+ 	fbnic_devlink_free(fbd);
+ 
+ 	return err;
+@@ -349,7 +348,6 @@ static void fbnic_remove(struct pci_dev *pdev)
+ 	fbnic_fw_disable_mbx(fbd);
+ 	fbnic_free_irqs(fbd);
+ 
+-	pci_disable_device(pdev);
+ 	fbnic_devlink_free(fbd);
+ }
+ 
+diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+index 7251121ab196e3..16eb3de60eb6df 100644
+--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
++++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+@@ -366,12 +366,13 @@ static void vcap_api_iterator_init_test(struct kunit *test)
+ 	struct vcap_typegroup typegroups[] = {
+ 		{ .offset = 0, .width = 2, .value = 2, },
+ 		{ .offset = 156, .width = 1, .value = 0, },
+-		{ .offset = 0, .width = 0, .value = 0, },
++		{ }
+ 	};
+ 	struct vcap_typegroup typegroups2[] = {
+ 		{ .offset = 0, .width = 3, .value = 4, },
+ 		{ .offset = 49, .width = 2, .value = 0, },
+ 		{ .offset = 98, .width = 2, .value = 0, },
++		{ }
+ 	};
+ 
+ 	vcap_iter_init(&iter, 52, typegroups, 86);
+@@ -399,6 +400,7 @@ static void vcap_api_iterator_next_test(struct kunit *test)
+ 		{ .offset = 147, .width = 3, .value = 0, },
+ 		{ .offset = 196, .width = 2, .value = 0, },
+ 		{ .offset = 245, .width = 1, .value = 0, },
++		{ }
+ 	};
+ 	int idx;
+ 
+@@ -433,7 +435,7 @@ static void vcap_api_encode_typegroups_test(struct kunit *test)
+ 		{ .offset = 147, .width = 3, .value = 5, },
+ 		{ .offset = 196, .width = 2, .value = 2, },
+ 		{ .offset = 245, .width = 5, .value = 27, },
+-		{ .offset = 0, .width = 0, .value = 0, },
++		{ }
+ 	};
+ 
+ 	vcap_encode_typegroups(stream, 49, typegroups, false);
+@@ -463,6 +465,7 @@ static void vcap_api_encode_bit_test(struct kunit *test)
+ 		{ .offset = 147, .width = 3, .value = 5, },
+ 		{ .offset = 196, .width = 2, .value = 2, },
+ 		{ .offset = 245, .width = 1, .value = 0, },
++		{ }
+ 	};
+ 
+ 	vcap_iter_init(&iter, 49, typegroups, 44);
+@@ -489,7 +492,7 @@ static void vcap_api_encode_field_test(struct kunit *test)
+ 		{ .offset = 147, .width = 3, .value = 5, },
+ 		{ .offset = 196, .width = 2, .value = 2, },
+ 		{ .offset = 245, .width = 5, .value = 27, },
+-		{ .offset = 0, .width = 0, .value = 0, },
++		{ }
+ 	};
+ 	struct vcap_field rf = {
+ 		.type = VCAP_FIELD_U32,
+@@ -538,7 +541,7 @@ static void vcap_api_encode_short_field_test(struct kunit *test)
+ 		{ .offset = 0, .width = 3, .value = 7, },
+ 		{ .offset = 21, .width = 2, .value = 3, },
+ 		{ .offset = 42, .width = 1, .value = 1, },
+-		{ .offset = 0, .width = 0, .value = 0, },
++		{ }
+ 	};
+ 	struct vcap_field rf = {
+ 		.type = VCAP_FIELD_U32,
+@@ -608,7 +611,7 @@ static void vcap_api_encode_keyfield_test(struct kunit *test)
+ 	struct vcap_typegroup tgt[] = {
+ 		{ .offset = 0, .width = 2, .value = 2, },
+ 		{ .offset = 156, .width = 1, .value = 1, },
+-		{ .offset = 0, .width = 0, .value = 0, },
++		{ }
+ 	};
+ 
+ 	vcap_test_api_init(&admin);
+@@ -671,7 +674,7 @@ static void vcap_api_encode_max_keyfield_test(struct kunit *test)
+ 	struct vcap_typegroup tgt[] = {
+ 		{ .offset = 0, .width = 2, .value = 2, },
+ 		{ .offset = 156, .width = 1, .value = 1, },
+-		{ .offset = 0, .width = 0, .value = 0, },
++		{ }
+ 	};
+ 	u32 keyres[] = {
+ 		0x928e8a84,
+@@ -732,7 +735,7 @@ static void vcap_api_encode_actionfield_test(struct kunit *test)
+ 		{ .offset = 0, .width = 2, .value = 2, },
+ 		{ .offset = 21, .width = 1, .value = 1, },
+ 		{ .offset = 42, .width = 1, .value = 0, },
+-		{ .offset = 0, .width = 0, .value = 0, },
++		{ }
+ 	};
+ 
+ 	vcap_encode_actionfield(&rule, &caf, &rf, tgt);
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
+index 583c33930f886f..4a4434869b10a8 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase.h
++++ b/drivers/net/ethernet/realtek/rtase/rtase.h
+@@ -9,7 +9,10 @@
+ #ifndef RTASE_H
+ #define RTASE_H
+ 
+-#define RTASE_HW_VER_MASK 0x7C800000
++#define RTASE_HW_VER_MASK     0x7C800000
++#define RTASE_HW_VER_906X_7XA 0x00800000
++#define RTASE_HW_VER_906X_7XC 0x04000000
++#define RTASE_HW_VER_907XD_V1 0x04800000
+ 
+ #define RTASE_RX_DMA_BURST_256       4
+ #define RTASE_TX_DMA_BURST_UNLIMITED 7
+@@ -327,6 +330,8 @@ struct rtase_private {
+ 	u16 int_nums;
+ 	u16 tx_int_mit;
+ 	u16 rx_int_mit;
++
++	u32 hw_ver;
+ };
+ 
+ #define RTASE_LSO_64K 64000
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+index f8777b7663d35d..1bfe5ef40c522d 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
++++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
+@@ -1714,10 +1714,21 @@ static int rtase_get_settings(struct net_device *dev,
+ 			      struct ethtool_link_ksettings *cmd)
+ {
+ 	u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++	const struct rtase_private *tp = netdev_priv(dev);
+ 
+ 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ 						supported);
+-	cmd->base.speed = SPEED_5000;
++
++	switch (tp->hw_ver) {
++	case RTASE_HW_VER_906X_7XA:
++	case RTASE_HW_VER_906X_7XC:
++		cmd->base.speed = SPEED_5000;
++		break;
++	case RTASE_HW_VER_907XD_V1:
++		cmd->base.speed = SPEED_10000;
++		break;
++	}
++
+ 	cmd->base.duplex = DUPLEX_FULL;
+ 	cmd->base.port = PORT_MII;
+ 	cmd->base.autoneg = AUTONEG_DISABLE;
+@@ -1972,20 +1983,21 @@ static void rtase_init_software_variable(struct pci_dev *pdev,
+ 	tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE;
+ }
+ 
+-static bool rtase_check_mac_version_valid(struct rtase_private *tp)
++static int rtase_check_mac_version_valid(struct rtase_private *tp)
+ {
+-	u32 hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
+-	bool known_ver = false;
++	int ret = -ENODEV;
++
++	tp->hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
+ 
+-	switch (hw_ver) {
+-	case 0x00800000:
+-	case 0x04000000:
+-	case 0x04800000:
+-		known_ver = true;
++	switch (tp->hw_ver) {
++	case RTASE_HW_VER_906X_7XA:
++	case RTASE_HW_VER_906X_7XC:
++	case RTASE_HW_VER_907XD_V1:
++		ret = 0;
+ 		break;
+ 	}
+ 
+-	return known_ver;
++	return ret;
+ }
+ 
+ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
+@@ -2105,9 +2117,13 @@ static int rtase_init_one(struct pci_dev *pdev,
+ 	tp->pdev = pdev;
+ 
+ 	/* identify chip attached to board */
+-	if (!rtase_check_mac_version_valid(tp))
+-		return dev_err_probe(&pdev->dev, -ENODEV,
+-				     "unknown chip version, contact rtase maintainers (see MAINTAINERS file)\n");
++	ret = rtase_check_mac_version_valid(tp);
++	if (ret != 0) {
++		dev_err(&pdev->dev,
++			"unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
++			tp->hw_ver);
++		goto err_out_release_board;
++	}
+ 
+ 	rtase_init_software_variable(pdev, tp);
+ 	rtase_init_hardware(tp);
+@@ -2181,6 +2197,7 @@ static int rtase_init_one(struct pci_dev *pdev,
+ 		netif_napi_del(&ivec->napi);
+ 	}
+ 
++err_out_release_board:
+ 	rtase_release_board(pdev, dev, ioaddr);
+ 
+ 	return ret;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+index fdb4c773ec98ab..e897b49aa9e05e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -486,6 +486,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
+ 	plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
+ 	plat_dat->select_pcs = socfpga_dwmac_select_pcs;
+ 
++	plat_dat->riwt_off = 1;
++
+ 	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+index a4cf682dca650e..0ee73a265545c3 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+@@ -72,14 +72,6 @@ int txgbe_request_queue_irqs(struct wx *wx)
+ 	return err;
+ }
+ 
+-static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+-{
+-	txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+-	return request_threaded_irq(txgbe->gpio_irq, NULL,
+-				    txgbe_gpio_irq_handler,
+-				    IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+-}
+-
+ static int txgbe_request_link_irq(struct txgbe *txgbe)
+ {
+ 	txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+@@ -149,11 +141,6 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
+ 	u32 eicr;
+ 
+ 	eicr = wx_misc_isb(wx, WX_ISB_MISC);
+-	if (eicr & TXGBE_PX_MISC_GPIO) {
+-		sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+-		handle_nested_irq(sub_irq);
+-		nhandled++;
+-	}
+ 	if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
+ 		    TXGBE_PX_MISC_ETH_AN)) {
+ 		sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+@@ -179,7 +166,6 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
+ 
+ void txgbe_free_misc_irq(struct txgbe *txgbe)
+ {
+-	free_irq(txgbe->gpio_irq, txgbe);
+ 	free_irq(txgbe->link_irq, txgbe);
+ 	free_irq(txgbe->misc.irq, txgbe);
+ 	txgbe_del_irq_domain(txgbe);
+@@ -191,7 +177,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+ 	struct wx *wx = txgbe->wx;
+ 	int hwirq, err;
+ 
+-	txgbe->misc.nirqs = 2;
++	txgbe->misc.nirqs = 1;
+ 	txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
+ 						   &txgbe_misc_irq_domain_ops, txgbe);
+ 	if (!txgbe->misc.domain)
+@@ -216,20 +202,14 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+ 	if (err)
+ 		goto del_misc_irq;
+ 
+-	err = txgbe_request_gpio_irq(txgbe);
+-	if (err)
+-		goto free_msic_irq;
+-
+ 	err = txgbe_request_link_irq(txgbe);
+ 	if (err)
+-		goto free_gpio_irq;
++		goto free_msic_irq;
+ 
+ 	wx->misc_irq_domain = true;
+ 
+ 	return 0;
+ 
+-free_gpio_irq:
+-	free_irq(txgbe->gpio_irq, txgbe);
+ free_msic_irq:
+ 	free_irq(txgbe->misc.irq, txgbe);
+ del_misc_irq:
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+index 93180225a6f14c..f7745026803643 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -82,7 +82,6 @@ static void txgbe_up_complete(struct wx *wx)
+ {
+ 	struct net_device *netdev = wx->netdev;
+ 
+-	txgbe_reinit_gpio_intr(wx);
+ 	wx_control_hw(wx, true);
+ 	wx_configure_vectors(wx);
+ 
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+index 67b61afdde96ce..f26946198a2fb9 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+@@ -162,7 +162,7 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi
+ 	struct wx *wx = phylink_to_wx(config);
+ 	struct txgbe *txgbe = wx->priv;
+ 
+-	if (interface == PHY_INTERFACE_MODE_10GBASER)
++	if (wx->media_type != sp_media_copper)
+ 		return &txgbe->xpcs->pcs;
+ 
+ 	return NULL;
+@@ -358,169 +358,8 @@ static int txgbe_gpio_direction_out(struct gpio_chip *chip, unsigned int offset,
+ 	return 0;
+ }
+ 
+-static void txgbe_gpio_irq_ack(struct irq_data *d)
+-{
+-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+-	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+-	struct wx *wx = gpiochip_get_data(gc);
+-	unsigned long flags;
+-
+-	raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+-	wr32(wx, WX_GPIO_EOI, BIT(hwirq));
+-	raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+-}
+-
+-static void txgbe_gpio_irq_mask(struct irq_data *d)
+-{
+-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+-	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+-	struct wx *wx = gpiochip_get_data(gc);
+-	unsigned long flags;
+-
+-	gpiochip_disable_irq(gc, hwirq);
+-
+-	raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+-	wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), BIT(hwirq));
+-	raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+-}
+-
+-static void txgbe_gpio_irq_unmask(struct irq_data *d)
+-{
+-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+-	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+-	struct wx *wx = gpiochip_get_data(gc);
+-	unsigned long flags;
+-
+-	gpiochip_enable_irq(gc, hwirq);
+-
+-	raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+-	wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), 0);
+-	raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+-}
+-
+-static void txgbe_toggle_trigger(struct gpio_chip *gc, unsigned int offset)
+-{
+-	struct wx *wx = gpiochip_get_data(gc);
+-	u32 pol, val;
+-
+-	pol = rd32(wx, WX_GPIO_POLARITY);
+-	val = rd32(wx, WX_GPIO_EXT);
+-
+-	if (val & BIT(offset))
+-		pol &= ~BIT(offset);
+-	else
+-		pol |= BIT(offset);
+-
+-	wr32(wx, WX_GPIO_POLARITY, pol);
+-}
+-
+-static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type)
+-{
+-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+-	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+-	struct wx *wx = gpiochip_get_data(gc);
+-	u32 level, polarity, mask;
+-	unsigned long flags;
+-
+-	mask = BIT(hwirq);
+-
+-	if (type & IRQ_TYPE_LEVEL_MASK) {
+-		level = 0;
+-		irq_set_handler_locked(d, handle_level_irq);
+-	} else {
+-		level = mask;
+-		irq_set_handler_locked(d, handle_edge_irq);
+-	}
+-
+-	if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+-		polarity = mask;
+-	else
+-		polarity = 0;
+-
+-	raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+-
+-	wr32m(wx, WX_GPIO_INTEN, mask, mask);
+-	wr32m(wx, WX_GPIO_INTTYPE_LEVEL, mask, level);
+-	if (type == IRQ_TYPE_EDGE_BOTH)
+-		txgbe_toggle_trigger(gc, hwirq);
+-	else
+-		wr32m(wx, WX_GPIO_POLARITY, mask, polarity);
+-
+-	raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+-
+-	return 0;
+-}
+-
+-static const struct irq_chip txgbe_gpio_irq_chip = {
+-	.name = "txgbe-gpio-irq",
+-	.irq_ack = txgbe_gpio_irq_ack,
+-	.irq_mask = txgbe_gpio_irq_mask,
+-	.irq_unmask = txgbe_gpio_irq_unmask,
+-	.irq_set_type = txgbe_gpio_set_type,
+-	.flags = IRQCHIP_IMMUTABLE,
+-	GPIOCHIP_IRQ_RESOURCE_HELPERS,
+-};
+-
+-irqreturn_t txgbe_gpio_irq_handler(int irq, void *data)
+-{
+-	struct txgbe *txgbe = data;
+-	struct wx *wx = txgbe->wx;
+-	irq_hw_number_t hwirq;
+-	unsigned long gpioirq;
+-	struct gpio_chip *gc;
+-	unsigned long flags;
+-
+-	gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
+-
+-	gc = txgbe->gpio;
+-	for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
+-		int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+-		struct irq_data *d = irq_get_irq_data(gpio);
+-		u32 irq_type = irq_get_trigger_type(gpio);
+-
+-		txgbe_gpio_irq_ack(d);
+-		handle_nested_irq(gpio);
+-
+-		if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+-			raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+-			txgbe_toggle_trigger(gc, hwirq);
+-			raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+-		}
+-	}
+-
+-	return IRQ_HANDLED;
+-}
+-
+-void txgbe_reinit_gpio_intr(struct wx *wx)
+-{
+-	struct txgbe *txgbe = wx->priv;
+-	irq_hw_number_t hwirq;
+-	unsigned long gpioirq;
+-	struct gpio_chip *gc;
+-	unsigned long flags;
+-
+-	/* for gpio interrupt pending before irq enable */
+-	gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
+-
+-	gc = txgbe->gpio;
+-	for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
+-		int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+-		struct irq_data *d = irq_get_irq_data(gpio);
+-		u32 irq_type = irq_get_trigger_type(gpio);
+-
+-		txgbe_gpio_irq_ack(d);
+-
+-		if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+-			raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+-			txgbe_toggle_trigger(gc, hwirq);
+-			raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+-		}
+-	}
+-}
+-
+ static int txgbe_gpio_init(struct txgbe *txgbe)
+ {
+-	struct gpio_irq_chip *girq;
+ 	struct gpio_chip *gc;
+ 	struct device *dev;
+ 	struct wx *wx;
+@@ -550,11 +389,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
+ 	gc->direction_input = txgbe_gpio_direction_in;
+ 	gc->direction_output = txgbe_gpio_direction_out;
+ 
+-	girq = &gc->irq;
+-	gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip);
+-	girq->default_type = IRQ_TYPE_NONE;
+-	girq->handler = handle_bad_irq;
+-
+ 	ret = devm_gpiochip_add_data(dev, gc, wx);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+index 8a026d804fe24c..3938985355ed6c 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+@@ -4,8 +4,6 @@
+ #ifndef _TXGBE_PHY_H_
+ #define _TXGBE_PHY_H_
+ 
+-irqreturn_t txgbe_gpio_irq_handler(int irq, void *data);
+-void txgbe_reinit_gpio_intr(struct wx *wx);
+ irqreturn_t txgbe_link_irq_handler(int irq, void *data);
+ int txgbe_init_phy(struct txgbe *txgbe);
+ void txgbe_remove_phy(struct txgbe *txgbe);
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+index 959102c4c3797e..8ea413a7abe9d3 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+@@ -75,8 +75,7 @@
+ #define TXGBE_PX_MISC_IEN_MASK                            \
+ 	(TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \
+ 	 TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \
+-	 TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR |   \
+-	 TXGBE_PX_MISC_GPIO)
++	 TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR)
+ 
+ /* Port cfg registers */
+ #define TXGBE_CFG_PORT_ST                       0x14404
+@@ -313,8 +312,7 @@ struct txgbe_nodes {
+ };
+ 
+ enum txgbe_misc_irqs {
+-	TXGBE_IRQ_GPIO = 0,
+-	TXGBE_IRQ_LINK,
++	TXGBE_IRQ_LINK = 0,
+ 	TXGBE_IRQ_MAX
+ };
+ 
+@@ -335,7 +333,6 @@ struct txgbe {
+ 	struct clk_lookup *clock;
+ 	struct clk *clk;
+ 	struct gpio_chip *gpio;
+-	unsigned int gpio_irq;
+ 	unsigned int link_irq;
+ 
+ 	/* flow director */
+diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
+index 9d8f43b28aac5b..ea1f64596a85cf 100644
+--- a/drivers/net/mdio/mdio-ipq4019.c
++++ b/drivers/net/mdio/mdio-ipq4019.c
+@@ -352,8 +352,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
+ 	/* The platform resource is provided on the chipset IPQ5018 */
+ 	/* This resource is optional */
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+-	if (res)
++	if (res) {
+ 		priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
++		if (IS_ERR(priv->eth_ldo_rdy))
++			return PTR_ERR(priv->eth_ldo_rdy);
++	}
+ 
+ 	bus->name = "ipq4019_mdio";
+ 	bus->read = ipq4019_mdio_read_c22;
+diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
+index f0d58092e7e961..3612b0633bd177 100644
+--- a/drivers/net/netdevsim/ipsec.c
++++ b/drivers/net/netdevsim/ipsec.c
+@@ -176,14 +176,13 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs,
+ 		return ret;
+ 	}
+ 
+-	if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
++	if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ 		sa.rx = true;
+ 
+-		if (xs->props.family == AF_INET6)
+-			memcpy(sa.ipaddr, &xs->id.daddr.a6, 16);
+-		else
+-			memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4);
+-	}
++	if (xs->props.family == AF_INET6)
++		memcpy(sa.ipaddr, &xs->id.daddr.a6, 16);
++	else
++		memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4);
+ 
+ 	/* the preparations worked, so save the info */
+ 	memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa));
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 8adf77e3557e7a..531b1b6a37d190 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1652,13 +1652,13 @@ static int lan78xx_set_wol(struct net_device *netdev,
+ 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ 	int ret;
+ 
++	if (wol->wolopts & ~WAKE_ALL)
++		return -EINVAL;
++
+ 	ret = usb_autopm_get_interface(dev->intf);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (wol->wolopts & ~WAKE_ALL)
+-		return -EINVAL;
+-
+ 	pdata->wol = wol->wolopts;
+ 
+ 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+@@ -2380,6 +2380,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
+ 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
+ 			if (phy_is_pseudo_fixed_link(phydev)) {
+ 				fixed_phy_unregister(phydev);
++				phy_device_free(phydev);
+ 			} else {
+ 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
+ 							     0xfffffff0);
+@@ -4246,8 +4247,10 @@ static void lan78xx_disconnect(struct usb_interface *intf)
+ 
+ 	phy_disconnect(net->phydev);
+ 
+-	if (phy_is_pseudo_fixed_link(phydev))
++	if (phy_is_pseudo_fixed_link(phydev)) {
+ 		fixed_phy_unregister(phydev);
++		phy_device_free(phydev);
++	}
+ 
+ 	usb_scuttle_anchored_urbs(&dev->deferred);
+ 
+@@ -4414,29 +4417,30 @@ static int lan78xx_probe(struct usb_interface *intf,
+ 
+ 	period = ep_intr->desc.bInterval;
+ 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
+-	buf = kmalloc(maxp, GFP_KERNEL);
+-	if (!buf) {
++
++	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
++	if (!dev->urb_intr) {
+ 		ret = -ENOMEM;
+ 		goto out5;
+ 	}
+ 
+-	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+-	if (!dev->urb_intr) {
++	buf = kmalloc(maxp, GFP_KERNEL);
++	if (!buf) {
+ 		ret = -ENOMEM;
+-		goto out6;
+-	} else {
+-		usb_fill_int_urb(dev->urb_intr, dev->udev,
+-				 dev->pipe_intr, buf, maxp,
+-				 intr_complete, dev, period);
+-		dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
++		goto free_urbs;
+ 	}
+ 
++	usb_fill_int_urb(dev->urb_intr, dev->udev,
++			 dev->pipe_intr, buf, maxp,
++			 intr_complete, dev, period);
++	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
++
+ 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
+ 
+ 	/* Reject broken descriptors. */
+ 	if (dev->maxpacket == 0) {
+ 		ret = -ENODEV;
+-		goto out6;
++		goto free_urbs;
+ 	}
+ 
+ 	/* driver requires remote-wakeup capability during autosuspend. */
+@@ -4444,7 +4448,7 @@ static int lan78xx_probe(struct usb_interface *intf,
+ 
+ 	ret = lan78xx_phy_init(dev);
+ 	if (ret < 0)
+-		goto out7;
++		goto free_urbs;
+ 
+ 	ret = register_netdev(netdev);
+ 	if (ret != 0) {
+@@ -4466,10 +4470,8 @@ static int lan78xx_probe(struct usb_interface *intf,
+ 
+ out8:
+ 	phy_disconnect(netdev->phydev);
+-out7:
++free_urbs:
+ 	usb_free_urb(dev->urb_intr);
+-out6:
+-	kfree(buf);
+ out5:
+ 	lan78xx_unbind(dev, intf);
+ out4:
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 646e1737d4c47c..6b467696bc982c 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -9121,7 +9121,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[
+ 	{6,  {2633, 2925}, {1215, 1350}, {585,  650} },
+ 	{7,  {2925, 3250}, {1350, 1500}, {650,  722} },
+ 	{8,  {3510, 3900}, {1620, 1800}, {780,  867} },
+-	{9,  {3900, 4333}, {1800, 2000}, {780,  867} }
++	{9,  {3900, 4333}, {1800, 2000}, {865,  960} }
+ };
+ 
+ /*MCS parameters with Nss = 2 */
+@@ -9136,7 +9136,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[
+ 	{6,  {5265, 5850}, {2430, 2700}, {1170, 1300} },
+ 	{7,  {5850, 6500}, {2700, 3000}, {1300, 1444} },
+ 	{8,  {7020, 7800}, {3240, 3600}, {1560, 1733} },
+-	{9,  {7800, 8667}, {3600, 4000}, {1560, 1733} }
++	{9,  {7800, 8667}, {3600, 4000}, {1730, 1920} }
+ };
+ 
+ static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index f477afd325deaf..7a22483b35cd98 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -2180,6 +2180,9 @@ static int ath11k_qmi_request_device_info(struct ath11k_base *ab)
+ 	ab->mem = bar_addr_va;
+ 	ab->mem_len = resp.bar_size;
+ 
++	if (!ab->hw_params.ce_remap)
++		ab->mem_ce = ab->mem;
++
+ 	return 0;
+ out:
+ 	return ret;
+diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
+index 61aa78d8bd8c8f..217eb57663f058 100644
+--- a/drivers/net/wireless/ath/ath12k/dp.c
++++ b/drivers/net/wireless/ath/ath12k/dp.c
+@@ -1202,10 +1202,16 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
+ 			if (!skb)
+ 				continue;
+ 
+-			skb_cb = ATH12K_SKB_CB(skb);
+-			ar = skb_cb->ar;
+-			if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+-				wake_up(&ar->dp.tx_empty_waitq);
++			/* if we are unregistering, hw would've been destroyed and
++			 * ar is no longer valid.
++			 */
++			if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
++				skb_cb = ATH12K_SKB_CB(skb);
++				ar = skb_cb->ar;
++
++				if (atomic_dec_and_test(&ar->dp.num_tx_pending))
++					wake_up(&ar->dp.tx_empty_waitq);
++			}
+ 
+ 			dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+ 					 skb->len, DMA_TO_DEVICE);
+@@ -1241,6 +1247,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
+ 	}
+ 
+ 	kfree(dp->spt_info);
++	dp->spt_info = NULL;
+ }
+ 
+ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
+@@ -1276,8 +1283,10 @@ void ath12k_dp_free(struct ath12k_base *ab)
+ 
+ 	ath12k_dp_rx_reo_cmd_list_cleanup(ab);
+ 
+-	for (i = 0; i < ab->hw_params->max_tx_ring; i++)
++	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ 		kfree(dp->tx_ring[i].tx_status);
++		dp->tx_ring[i].tx_status = NULL;
++	}
+ 
+ 	ath12k_dp_rx_free(ab);
+ 	/* Deinit any SOC level resource */
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 137394c364603b..6d0784a21558ea 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -917,7 +917,10 @@ void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
+ 
+ 	spin_lock_bh(&ab->base_lock);
+ 	list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+-		ath12k_dp_rx_peer_tid_cleanup(ar, peer);
++		/* Skip Rx TID cleanup for self peer */
++		if (peer->sta)
++			ath12k_dp_rx_peer_tid_cleanup(ar, peer);
++
+ 		list_del(&peer->list);
+ 		kfree(peer);
+ 	}
+diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
+index 9b8684abbe40ae..3624180b25b970 100644
+--- a/drivers/net/wireless/ath/ath12k/wow.c
++++ b/drivers/net/wireless/ath/ath12k/wow.c
+@@ -191,7 +191,7 @@ ath12k_wow_convert_8023_to_80211(struct ath12k *ar,
+ 			memcpy(bytemask, eth_bytemask, eth_pat_len);
+ 
+ 			pat_len = eth_pat_len;
+-		} else if (eth_pkt_ofs + eth_pat_len < prot_ofs) {
++		} else if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
+ 			memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ 			memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index eb631fd3336d8d..b5257b2b4aa527 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -294,6 +294,9 @@ int htc_connect_service(struct htc_target *target,
+ 		return -ETIMEDOUT;
+ 	}
+ 
++	if (target->conn_rsp_epid < 0 || target->conn_rsp_epid >= ENDPOINT_MAX)
++		return -EINVAL;
++
+ 	*conn_rsp_epid = target->conn_rsp_epid;
+ 	return 0;
+ err:
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+index fe4f657561056c..af930e34c21f8a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+@@ -110,9 +110,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ 		}
+ 		strreplace(board_type, '/', '-');
+ 		settings->board_type = board_type;
+-
+-		of_node_put(root);
+ 	}
++	of_node_put(root);
+ 
+ 	if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+ 		return;
+diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
+index 14d2331ee6cb97..b0656b143f77a2 100644
+--- a/drivers/net/wireless/intel/iwlegacy/3945.c
++++ b/drivers/net/wireless/intel/iwlegacy/3945.c
+@@ -566,7 +566,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+ 	if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ 	    !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ 		D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+-		rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
++		return;
+ 	}
+ 
+ 	/* Convert 3945's rssi indicator to dBm */
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+index fcccde7bb65922..05c4af41bdb960 100644
+--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+@@ -664,7 +664,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+ 	if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ 	    !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ 		D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
+-		rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
++		return;
+ 	}
+ 
+ 	/* This will be used in several places later */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 80b9a115245fe8..d37d83d246354e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1237,6 +1237,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
+ 	fast_resume = mvm->fast_resume;
+ 
+ 	if (fast_resume) {
++		iwl_mvm_mei_device_state(mvm, true);
+ 		ret = iwl_mvm_fast_resume(mvm);
+ 		if (ret) {
+ 			iwl_mvm_stop_device(mvm);
+@@ -1377,10 +1378,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
+ 		iwl_mvm_rm_aux_sta(mvm);
+ 
+ 	if (suspend &&
+-	    mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
++	    mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ 		iwl_mvm_fast_suspend(mvm);
+-	else
++		/* From this point on, we won't touch the device */
++		iwl_mvm_mei_device_state(mvm, false);
++	} else {
+ 		iwl_mvm_stop_device(mvm);
++	}
+ 
+ 	iwl_mvm_async_handlers_purge(mvm);
+ 	/* async_handlers_list is empty and will stay empty: HW is stopped */
+diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
+index d33a994906a7bb..27f44a9f0bc1f9 100644
+--- a/drivers/net/wireless/intersil/p54/p54spi.c
++++ b/drivers/net/wireless/intersil/p54/p54spi.c
+@@ -624,7 +624,7 @@ static int p54spi_probe(struct spi_device *spi)
+ 	gpio_direction_input(p54spi_gpio_irq);
+ 
+ 	ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
+-			  p54spi_interrupt, 0, "p54spi",
++			  p54spi_interrupt, IRQF_NO_AUTOEN, "p54spi",
+ 			  priv->spi);
+ 	if (ret < 0) {
+ 		dev_err(&priv->spi->dev, "request_irq() failed");
+@@ -633,8 +633,6 @@ static int p54spi_probe(struct spi_device *spi)
+ 
+ 	irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING);
+ 
+-	disable_irq(gpio_to_irq(p54spi_gpio_irq));
+-
+ 	INIT_WORK(&priv->work, p54spi_work);
+ 	init_completion(&priv->fw_comp);
+ 	INIT_LIST_HEAD(&priv->tx_pending);
+diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+index 1cff001bdc5145..b30ed321c6251a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
++++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+@@ -938,8 +938,10 @@ void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter)
+ 		assoc_resp.links[0].bss = priv->req_bss;
+ 		assoc_resp.buf = priv->assoc_rsp_buf;
+ 		assoc_resp.len = priv->assoc_rsp_size;
++		wiphy_lock(priv->wdev.wiphy);
+ 		cfg80211_rx_assoc_resp(priv->netdev,
+ 				       &assoc_resp);
++		wiphy_unlock(priv->wdev.wiphy);
+ 		priv->assoc_rsp_size = 0;
+ 	}
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
+index d03129d5d24e3d..4a96281792cc1a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
+@@ -875,7 +875,7 @@ struct mwifiex_ietypes_chanstats {
+ struct mwifiex_ie_types_wildcard_ssid_params {
+ 	struct mwifiex_ie_types_header header;
+ 	u8 max_ssid_length;
+-	u8 ssid[1];
++	u8 ssid[];
+ } __packed;
+ 
+ #define TSF_DATA_SIZE            8
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
+index 96d1f6039fbca3..855019fe548582 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.c
++++ b/drivers/net/wireless/marvell/mwifiex/main.c
+@@ -1679,7 +1679,8 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter)
+ 	}
+ 
+ 	ret = devm_request_irq(dev, adapter->irq_wakeup,
+-			       mwifiex_irq_wakeup_handler, IRQF_TRIGGER_LOW,
++			       mwifiex_irq_wakeup_handler,
++			       IRQF_TRIGGER_LOW | IRQF_NO_AUTOEN,
+ 			       "wifi_wake", adapter);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to request irq_wakeup %d (%d)\n",
+@@ -1687,7 +1688,6 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter)
+ 		goto err_exit;
+ 	}
+ 
+-	disable_irq(adapter->irq_wakeup);
+ 	if (device_init_wakeup(dev, true)) {
+ 		dev_err(dev, "fail to init wakeup for mwifiex\n");
+ 		goto err_exit;
+diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
+index 42c04bf858da37..1f1f6280a0f251 100644
+--- a/drivers/net/wireless/marvell/mwifiex/util.c
++++ b/drivers/net/wireless/marvell/mwifiex/util.c
+@@ -494,7 +494,9 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
+ 			}
+ 		}
+ 
++		wiphy_lock(priv->wdev.wiphy);
+ 		cfg80211_rx_mlme_mgmt(priv->netdev, skb->data, pkt_len);
++		wiphy_unlock(priv->wdev.wiphy);
+ 	}
+ 
+ 	if (priv->adapter->host_mlme_enabled &&
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index 9ecf3fb29b558f..8bc127c5a538cb 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -608,6 +608,9 @@ static int wilc_mac_open(struct net_device *ndev)
+ 		return ret;
+ 	}
+ 
++	wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
++				vif->idx);
++
+ 	netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
+ 	ret = wilc_set_mac_address(vif, ndev->dev_addr);
+ 	if (ret) {
+@@ -618,9 +621,6 @@ static int wilc_mac_open(struct net_device *ndev)
+ 		return ret;
+ 	}
+ 
+-	wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
+-				vif->idx);
+-
+ 	mgmt_regs.interface_stypes = vif->mgmt_reg_stypes;
+ 	/* so we detect a change */
+ 	vif->mgmt_reg_stypes = 0;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+index 7891c988dd5f03..f95898f68d68a5 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+@@ -5058,10 +5058,12 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	}
+ 
+ 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+-		if (bss_conf->enable_beacon)
++		if (bss_conf->enable_beacon) {
+ 			rtl8xxxu_start_tx_beacon(priv);
+-		else
++			schedule_delayed_work(&priv->update_beacon_work, 0);
++		} else {
+ 			rtl8xxxu_stop_tx_beacon(priv);
++		}
+ 	}
+ 
+ 	if (changed & BSS_CHANGED_BEACON)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
+index 82cf5fb5175fef..6518e77b89f578 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
++++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
+@@ -162,10 +162,19 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
+ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
++	u16 max_attempts = 10000;
+ 	u32 value32;
+ 	u8 readbyte;
+ 	u16 retry;
+ 
++	/*
++	 * In case of USB devices, transfer speeds are limited, hence
++	 * efuse I/O reads could be (way) slower. So, decrease (a lot)
++	 * the read attempts in case of failures.
++	 */
++	if (rtlpriv->rtlhal.interface == INTF_USB)
++		max_attempts = 10;
++
+ 	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ 		       (_offset & 0xff));
+ 	readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+@@ -178,7 +187,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
+ 
+ 	retry = 0;
+ 	value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+-	while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
++	while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < max_attempts)) {
+ 		value32 = rtl_read_dword(rtlpriv,
+ 					 rtlpriv->cfg->maps[EFUSE_CTRL]);
+ 		retry++;
+diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
+index 4476fc7e53db74..8d140b94cb4403 100644
+--- a/drivers/net/wireless/realtek/rtw89/cam.c
++++ b/drivers/net/wireless/realtek/rtw89/cam.c
+@@ -211,25 +211,17 @@ static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
+ 	return 0;
+ }
+ 
+-static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
+-				    struct ieee80211_vif *vif,
+-				    struct ieee80211_sta *sta,
+-				    const struct rtw89_sec_cam_entry *sec_cam,
+-				    bool inform_fw)
++static int __rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
++				      struct rtw89_vif_link *rtwvif_link,
++				      struct rtw89_sta_link *rtwsta_link,
++				      const struct rtw89_sec_cam_entry *sec_cam,
++				      bool inform_fw)
+ {
+-	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+-	struct rtw89_vif *rtwvif;
+ 	struct rtw89_addr_cam_entry *addr_cam;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+-	if (!vif) {
+-		rtw89_err(rtwdev, "No iface for deleting sec cam\n");
+-		return -EINVAL;
+-	}
+-
+-	rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
++	addr_cam = rtw89_get_addr_cam_of(rtwvif_link, rtwsta_link);
+ 
+ 	for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) {
+ 		if (addr_cam->sec_ent[i] != sec_cam->sec_cam_idx)
+@@ -239,11 +231,11 @@ static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
+ 	}
+ 
+ 	if (inform_fw) {
+-		ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
++		ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif_link, rtwsta_link);
+ 		if (ret)
+ 			rtw89_err(rtwdev,
+ 				  "failed to update dctl cam del key: %d\n", ret);
+-		ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
++		ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL);
+ 		if (ret)
+ 			rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
+ 	}
+@@ -251,25 +243,17 @@ static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
+ 	return ret;
+ }
+ 
+-static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
+-				    struct ieee80211_vif *vif,
+-				    struct ieee80211_sta *sta,
+-				    struct ieee80211_key_conf *key,
+-				    struct rtw89_sec_cam_entry *sec_cam)
++static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
++				      struct rtw89_vif_link *rtwvif_link,
++				      struct rtw89_sta_link *rtwsta_link,
++				      struct ieee80211_key_conf *key,
++				      struct rtw89_sec_cam_entry *sec_cam)
+ {
+-	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+-	struct rtw89_vif *rtwvif;
+ 	struct rtw89_addr_cam_entry *addr_cam;
+ 	u8 key_idx = 0;
+ 	int ret;
+ 
+-	if (!vif) {
+-		rtw89_err(rtwdev, "No iface for adding sec cam\n");
+-		return -EINVAL;
+-	}
+-
+-	rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
++	addr_cam = rtw89_get_addr_cam_of(rtwvif_link, rtwsta_link);
+ 
+ 	if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ 	    key->cipher == WLAN_CIPHER_SUITE_WEP104)
+@@ -285,13 +269,13 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
+ 	addr_cam->sec_ent_keyid[key_idx] = key->keyidx;
+ 	addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx;
+ 	set_bit(key_idx, addr_cam->sec_cam_map);
+-	ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
++	ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif_link, rtwsta_link);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to update dctl cam sec entry: %d\n",
+ 			  ret);
+ 		return ret;
+ 	}
+-	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
++	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n",
+ 			  ret);
+@@ -302,6 +286,92 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
+ 	return 0;
+ }
+ 
++static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
++				    struct ieee80211_vif *vif,
++				    struct ieee80211_sta *sta,
++				    const struct rtw89_sec_cam_entry *sec_cam,
++				    bool inform_fw)
++{
++	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
++	struct rtw89_sta_link *rtwsta_link;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
++	int ret;
++
++	if (!vif) {
++		rtw89_err(rtwdev, "No iface for deleting sec cam\n");
++		return -EINVAL;
++	}
++
++	rtwvif = vif_to_rtwvif(vif);
++
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
++		rtwsta_link = rtwsta ? rtwsta->links[link_id] : NULL;
++		if (rtwsta && !rtwsta_link)
++			continue;
++
++		ret = __rtw89_cam_detach_sec_cam(rtwdev, rtwvif_link, rtwsta_link,
++						 sec_cam, inform_fw);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
++				    struct ieee80211_vif *vif,
++				    struct ieee80211_sta *sta,
++				    struct ieee80211_key_conf *key,
++				    struct rtw89_sec_cam_entry *sec_cam)
++{
++	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
++	struct rtw89_sta_link *rtwsta_link;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
++	int key_link_id;
++	int ret;
++
++	if (!vif) {
++		rtw89_err(rtwdev, "No iface for adding sec cam\n");
++		return -EINVAL;
++	}
++
++	rtwvif = vif_to_rtwvif(vif);
++
++	key_link_id = ieee80211_vif_is_mld(vif) ? key->link_id : 0;
++	if (key_link_id >= 0) {
++		rtwvif_link = rtwvif->links[key_link_id];
++		rtwsta_link = rtwsta ? rtwsta->links[key_link_id] : NULL;
++
++		if (!rtwvif_link || (rtwsta && !rtwsta_link)) {
++			rtw89_err(rtwdev, "No drv link for adding sec cam\n");
++			return -ENOLINK;
++		}
++
++		return __rtw89_cam_attach_sec_cam(rtwdev, rtwvif_link,
++						  rtwsta_link, key, sec_cam);
++	}
++
++	/* key_link_id < 0: MLD pairwise key */
++	if (!rtwsta) {
++		rtw89_err(rtwdev, "No sta for adding MLD pairwise sec cam\n");
++		return -EINVAL;
++	}
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		ret = __rtw89_cam_attach_sec_cam(rtwdev, rtwvif_link,
++						 rtwsta_link, key, sec_cam);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
+ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
+ 				     struct ieee80211_vif *vif,
+ 				     struct ieee80211_sta *sta,
+@@ -485,10 +555,10 @@ void rtw89_cam_deinit_bssid_cam(struct rtw89_dev *rtwdev,
+ 	clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map);
+ }
+ 
+-void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+-	struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+-	struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
++	struct rtw89_addr_cam_entry *addr_cam = &rtwvif_link->addr_cam;
++	struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam;
+ 
+ 	rtw89_cam_deinit_addr_cam(rtwdev, addr_cam);
+ 	rtw89_cam_deinit_bssid_cam(rtwdev, bssid_cam);
+@@ -593,7 +663,7 @@ static int rtw89_cam_get_avail_bssid_cam(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif,
++			     struct rtw89_vif_link *rtwvif_link,
+ 			     struct rtw89_bssid_cam_entry *bssid_cam,
+ 			     const u8 *bssid)
+ {
+@@ -613,7 +683,7 @@ int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+ 	}
+ 
+ 	bssid_cam->bssid_cam_idx = bssid_cam_idx;
+-	bssid_cam->phy_idx = rtwvif->phy_idx;
++	bssid_cam->phy_idx = rtwvif_link->phy_idx;
+ 	bssid_cam->len = BSSID_CAM_ENT_SIZE;
+ 	bssid_cam->offset = 0;
+ 	bssid_cam->valid = true;
+@@ -622,20 +692,21 @@ int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+ 	return 0;
+ }
+ 
+-void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+-	struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
++	struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam;
+ 
+-	ether_addr_copy(bssid_cam->bssid, rtwvif->bssid);
++	ether_addr_copy(bssid_cam->bssid, rtwvif_link->bssid);
+ }
+ 
+-int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+-	struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+-	struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
++	struct rtw89_addr_cam_entry *addr_cam = &rtwvif_link->addr_cam;
++	struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam;
+ 	int ret;
+ 
+-	ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, rtwvif->bssid);
++	ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif_link, bssid_cam,
++				       rtwvif_link->bssid);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to init bssid cam\n");
+ 		return ret;
+@@ -651,19 +722,27 @@ int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ }
+ 
+ int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
+-				  struct rtw89_vif *rtwvif,
+-				  struct rtw89_sta *rtwsta, u8 *cmd)
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link, u8 *cmd)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+-	struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
+-	u8 bss_color = vif->bss_conf.he_bss_color.color;
++	struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif_link,
++									 rtwsta_link);
++	struct ieee80211_bss_conf *bss_conf;
++	u8 bss_color;
+ 	u8 bss_mask;
+ 
+-	if (vif->bss_conf.nontransmitted)
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
++	bss_color = bss_conf->he_bss_color.color;
++
++	if (bss_conf->nontransmitted)
+ 		bss_mask = RTW89_BSSID_MATCH_5_BYTES;
+ 	else
+ 		bss_mask = RTW89_BSSID_MATCH_ALL;
+ 
++	rcu_read_unlock();
++
+ 	FWCMD_SET_ADDR_BSSID_IDX(cmd, bssid_cam->bssid_cam_idx);
+ 	FWCMD_SET_ADDR_BSSID_OFFSET(cmd, bssid_cam->offset);
+ 	FWCMD_SET_ADDR_BSSID_LEN(cmd, bssid_cam->len);
+@@ -694,19 +773,30 @@ static u8 rtw89_cam_addr_hash(u8 start, const u8 *addr)
+ }
+ 
+ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
+-				  struct rtw89_vif *rtwvif,
+-				  struct rtw89_sta *rtwsta,
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link,
+ 				  const u8 *scan_mac_addr,
+ 				  u8 *cmd)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+-	struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+-	struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
+-	const u8 *sma = scan_mac_addr ? scan_mac_addr : rtwvif->mac_addr;
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct rtw89_addr_cam_entry *addr_cam =
++		rtw89_get_addr_cam_of(rtwvif_link, rtwsta_link);
++	struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
++	struct ieee80211_link_sta *link_sta;
++	const u8 *sma = scan_mac_addr ? scan_mac_addr : rtwvif_link->mac_addr;
+ 	u8 sma_hash, tma_hash, addr_msk_start;
+ 	u8 sma_start = 0;
+ 	u8 tma_start = 0;
+-	u8 *tma = sta ? sta->addr : rtwvif->bssid;
++	const u8 *tma;
++
++	rcu_read_lock();
++
++	if (sta) {
++		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++		tma = link_sta->addr;
++	} else {
++		tma = rtwvif_link->bssid;
++	}
+ 
+ 	if (addr_cam->addr_mask != 0) {
+ 		addr_msk_start = __ffs(addr_cam->addr_mask);
+@@ -723,10 +813,10 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
+ 	FWCMD_SET_ADDR_LEN(cmd, addr_cam->len);
+ 
+ 	FWCMD_SET_ADDR_VALID(cmd, addr_cam->valid);
+-	FWCMD_SET_ADDR_NET_TYPE(cmd, rtwvif->net_type);
+-	FWCMD_SET_ADDR_BCN_HIT_COND(cmd, rtwvif->bcn_hit_cond);
+-	FWCMD_SET_ADDR_HIT_RULE(cmd, rtwvif->hit_rule);
+-	FWCMD_SET_ADDR_BB_SEL(cmd, rtwvif->phy_idx);
++	FWCMD_SET_ADDR_NET_TYPE(cmd, rtwvif_link->net_type);
++	FWCMD_SET_ADDR_BCN_HIT_COND(cmd, rtwvif_link->bcn_hit_cond);
++	FWCMD_SET_ADDR_HIT_RULE(cmd, rtwvif_link->hit_rule);
++	FWCMD_SET_ADDR_BB_SEL(cmd, rtwvif_link->phy_idx);
+ 	FWCMD_SET_ADDR_ADDR_MASK(cmd, addr_cam->addr_mask);
+ 	FWCMD_SET_ADDR_MASK_SEL(cmd, addr_cam->mask_sel);
+ 	FWCMD_SET_ADDR_SMA_HASH(cmd, sma_hash);
+@@ -748,20 +838,21 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
+ 	FWCMD_SET_ADDR_TMA4(cmd, tma[4]);
+ 	FWCMD_SET_ADDR_TMA5(cmd, tma[5]);
+ 
+-	FWCMD_SET_ADDR_PORT_INT(cmd, rtwvif->port);
+-	FWCMD_SET_ADDR_TSF_SYNC(cmd, rtwvif->port);
+-	FWCMD_SET_ADDR_TF_TRS(cmd, rtwvif->trigger);
+-	FWCMD_SET_ADDR_LSIG_TXOP(cmd, rtwvif->lsig_txop);
+-	FWCMD_SET_ADDR_TGT_IND(cmd, rtwvif->tgt_ind);
+-	FWCMD_SET_ADDR_FRM_TGT_IND(cmd, rtwvif->frm_tgt_ind);
+-	FWCMD_SET_ADDR_MACID(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id);
+-	if (rtwvif->net_type == RTW89_NET_TYPE_INFRA)
++	FWCMD_SET_ADDR_PORT_INT(cmd, rtwvif_link->port);
++	FWCMD_SET_ADDR_TSF_SYNC(cmd, rtwvif_link->port);
++	FWCMD_SET_ADDR_TF_TRS(cmd, rtwvif_link->trigger);
++	FWCMD_SET_ADDR_LSIG_TXOP(cmd, rtwvif_link->lsig_txop);
++	FWCMD_SET_ADDR_TGT_IND(cmd, rtwvif_link->tgt_ind);
++	FWCMD_SET_ADDR_FRM_TGT_IND(cmd, rtwvif_link->frm_tgt_ind);
++	FWCMD_SET_ADDR_MACID(cmd, rtwsta_link ? rtwsta_link->mac_id :
++						rtwvif_link->mac_id);
++	if (rtwvif_link->net_type == RTW89_NET_TYPE_INFRA)
+ 		FWCMD_SET_ADDR_AID12(cmd, vif->cfg.aid & 0xfff);
+-	else if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
++	else if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
+ 		FWCMD_SET_ADDR_AID12(cmd, sta ? sta->aid & 0xfff : 0);
+-	FWCMD_SET_ADDR_WOL_PATTERN(cmd, rtwvif->wowlan_pattern);
+-	FWCMD_SET_ADDR_WOL_UC(cmd, rtwvif->wowlan_uc);
+-	FWCMD_SET_ADDR_WOL_MAGIC(cmd, rtwvif->wowlan_magic);
++	FWCMD_SET_ADDR_WOL_PATTERN(cmd, rtwvif_link->wowlan_pattern);
++	FWCMD_SET_ADDR_WOL_UC(cmd, rtwvif_link->wowlan_uc);
++	FWCMD_SET_ADDR_WOL_MAGIC(cmd, rtwvif_link->wowlan_magic);
+ 	FWCMD_SET_ADDR_WAPI(cmd, addr_cam->wapi);
+ 	FWCMD_SET_ADDR_SEC_ENT_MODE(cmd, addr_cam->sec_ent_mode);
+ 	FWCMD_SET_ADDR_SEC_ENT0_KEYID(cmd, addr_cam->sec_ent_keyid[0]);
+@@ -780,18 +871,22 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
+ 	FWCMD_SET_ADDR_SEC_ENT4(cmd, addr_cam->sec_ent[4]);
+ 	FWCMD_SET_ADDR_SEC_ENT5(cmd, addr_cam->sec_ent[5]);
+ 	FWCMD_SET_ADDR_SEC_ENT6(cmd, addr_cam->sec_ent[6]);
++
++	rcu_read_unlock();
+ }
+ 
+ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
+-					 struct rtw89_vif *rtwvif,
+-					 struct rtw89_sta *rtwsta,
++					 struct rtw89_vif_link *rtwvif_link,
++					 struct rtw89_sta_link *rtwsta_link,
+ 					 struct rtw89_h2c_dctlinfo_ud_v1 *h2c)
+ {
+-	struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
++	struct rtw89_addr_cam_entry *addr_cam =
++		rtw89_get_addr_cam_of(rtwvif_link, rtwsta_link);
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv;
+ 
+-	h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
++	h2c->c0 = le32_encode_bits(rtwsta_link ? rtwsta_link->mac_id :
++						 rtwvif_link->mac_id,
+ 				   DCTLINFO_V1_C0_MACID) |
+ 		  le32_encode_bits(1, DCTLINFO_V1_C0_OP);
+ 
+@@ -862,15 +957,17 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
+ }
+ 
+ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+-					 struct rtw89_vif *rtwvif,
+-					 struct rtw89_sta *rtwsta,
++					 struct rtw89_vif_link *rtwvif_link,
++					 struct rtw89_sta_link *rtwsta_link,
+ 					 struct rtw89_h2c_dctlinfo_ud_v2 *h2c)
+ {
+-	struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
++	struct rtw89_addr_cam_entry *addr_cam =
++		rtw89_get_addr_cam_of(rtwvif_link, rtwsta_link);
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv;
+ 
+-	h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
++	h2c->c0 = le32_encode_bits(rtwsta_link ? rtwsta_link->mac_id :
++						 rtwvif_link->mac_id,
+ 				   DCTLINFO_V2_C0_MACID) |
+ 		  le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
+index 5d7b624c2dd428..a6f72edd30fe3a 100644
+--- a/drivers/net/wireless/realtek/rtw89/cam.h
++++ b/drivers/net/wireless/realtek/rtw89/cam.h
+@@ -526,34 +526,34 @@ struct rtw89_h2c_dctlinfo_ud_v2 {
+ #define DCTLINFO_V2_W12_MLD_TA_BSSID_H_V1 GENMASK(15, 0)
+ #define DCTLINFO_V2_W12_ALL GENMASK(15, 0)
+ 
+-int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+-void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
++int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
++void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
+ int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ 			    struct rtw89_addr_cam_entry *addr_cam,
+ 			    const struct rtw89_bssid_cam_entry *bssid_cam);
+ void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ 			       struct rtw89_addr_cam_entry *addr_cam);
+ int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif,
++			     struct rtw89_vif_link *rtwvif_link,
+ 			     struct rtw89_bssid_cam_entry *bssid_cam,
+ 			     const u8 *bssid);
+ void rtw89_cam_deinit_bssid_cam(struct rtw89_dev *rtwdev,
+ 				struct rtw89_bssid_cam_entry *bssid_cam);
+ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
+-				  struct rtw89_vif *vif,
+-				  struct rtw89_sta *rtwsta,
++				  struct rtw89_vif_link *vif,
++				  struct rtw89_sta_link *rtwsta_link,
+ 				  const u8 *scan_mac_addr, u8 *cmd);
+ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
+-					 struct rtw89_vif *rtwvif,
+-					 struct rtw89_sta *rtwsta,
++					 struct rtw89_vif_link *rtwvif_link,
++					 struct rtw89_sta_link *rtwsta_link,
+ 					 struct rtw89_h2c_dctlinfo_ud_v1 *h2c);
+ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+-					 struct rtw89_vif *rtwvif,
+-					 struct rtw89_sta *rtwsta,
++					 struct rtw89_vif_link *rtwvif_link,
++					 struct rtw89_sta_link *rtwsta_link,
+ 					 struct rtw89_h2c_dctlinfo_ud_v2 *h2c);
+ int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
+-				  struct rtw89_vif *rtwvif,
+-				  struct rtw89_sta *rtwsta, u8 *cmd);
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link, u8 *cmd);
+ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
+ 			  struct ieee80211_vif *vif,
+ 			  struct ieee80211_sta *sta,
+@@ -564,6 +564,6 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
+ 			  struct ieee80211_key_conf *key,
+ 			  bool inform_fw);
+ void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif);
++			     struct rtw89_vif_link *rtwvif_link);
+ void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev);
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
+index 7070c85e2c2883..ba6332da8019c1 100644
+--- a/drivers/net/wireless/realtek/rtw89/chan.c
++++ b/drivers/net/wireless/realtek/rtw89/chan.c
+@@ -234,6 +234,18 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
+ 	rtw89_config_default_chandef(rtwdev);
+ }
+ 
++static bool rtw89_vif_is_active_role(struct rtw89_vif *rtwvif)
++{
++	struct rtw89_vif_link *rtwvif_link;
++	unsigned int link_id;
++
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++		if (rtwvif_link->chanctx_assigned)
++			return true;
++
++	return false;
++}
++
+ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
+ 					  struct rtw89_entity_weight *w)
+ {
+@@ -255,7 +267,7 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
+ 	}
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+-		if (rtwvif->chanctx_assigned)
++		if (rtw89_vif_is_active_role(rtwvif))
+ 			w->active_roles++;
+ 	}
+ }
+@@ -387,9 +399,9 @@ int rtw89_iterate_mcc_roles(struct rtw89_dev *rtwdev,
+ static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
+ 				   struct rtw89_mcc_role *role, u64 tsf)
+ {
+-	struct rtw89_vif *rtwvif = role->rtwvif;
++	struct rtw89_vif_link *rtwvif_link = role->rtwvif_link;
+ 	u32 bcn_intvl_us = ieee80211_tu_to_usec(role->beacon_interval);
+-	u64 sync_tsf = READ_ONCE(rtwvif->sync_bcn_tsf);
++	u64 sync_tsf = READ_ONCE(rtwvif_link->sync_bcn_tsf);
+ 	u32 remainder;
+ 
+ 	if (tsf < sync_tsf) {
+@@ -413,8 +425,8 @@ static int __mcc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux
+ 	int ret;
+ 
+ 	req.group = mcc->group;
+-	req.macid_x = ref->rtwvif->mac_id;
+-	req.macid_y = aux->rtwvif->mac_id;
++	req.macid_x = ref->rtwvif_link->mac_id;
++	req.macid_y = aux->rtwvif_link->mac_id;
+ 	ret = rtw89_fw_h2c_mcc_req_tsf(rtwdev, &req, &rpt);
+ 	if (ret) {
+ 		rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+@@ -440,10 +452,10 @@ static int __mrc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux
+ 	BUILD_BUG_ON(RTW89_MAC_MRC_MAX_REQ_TSF_NUM < NUM_OF_RTW89_MCC_ROLES);
+ 
+ 	arg.num = 2;
+-	arg.infos[0].band = ref->rtwvif->mac_idx;
+-	arg.infos[0].port = ref->rtwvif->port;
+-	arg.infos[1].band = aux->rtwvif->mac_idx;
+-	arg.infos[1].port = aux->rtwvif->port;
++	arg.infos[0].band = ref->rtwvif_link->mac_idx;
++	arg.infos[0].port = ref->rtwvif_link->port;
++	arg.infos[1].band = aux->rtwvif_link->mac_idx;
++	arg.infos[1].port = aux->rtwvif_link->port;
+ 
+ 	ret = rtw89_fw_h2c_mrc_req_tsf(rtwdev, &arg, &rpt);
+ 	if (ret) {
+@@ -522,23 +534,31 @@ u32 rtw89_mcc_role_fw_macid_bitmap_to_u32(struct rtw89_mcc_role *mcc_role)
+ 
+ static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct rtw89_mcc_role *mcc_role = data;
+-	struct rtw89_vif *target = mcc_role->rtwvif;
++	struct rtw89_vif *target = mcc_role->rtwvif_link->rtwvif;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
++	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
++	struct rtw89_sta_link *rtwsta_link;
+ 
+ 	if (rtwvif != target)
+ 		return;
+ 
+-	rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwsta->mac_id);
++	rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
++	if (unlikely(!rtwsta_link)) {
++		rtw89_err(rtwdev, "mcc sta macid: find no link on HW-0\n");
++		return;
++	}
++
++	rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwsta_link->mac_id);
+ }
+ 
+ static void rtw89_mcc_fill_role_macid_bitmap(struct rtw89_dev *rtwdev,
+ 					     struct rtw89_mcc_role *mcc_role)
+ {
+-	struct rtw89_vif *rtwvif = mcc_role->rtwvif;
++	struct rtw89_vif_link *rtwvif_link = mcc_role->rtwvif_link;
+ 
+-	rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwvif->mac_id);
++	rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwvif_link->mac_id);
+ 	ieee80211_iterate_stations_atomic(rtwdev->hw,
+ 					  rtw89_mcc_role_macid_sta_iter,
+ 					  mcc_role);
+@@ -564,8 +584,9 @@ static void rtw89_mcc_fill_role_policy(struct rtw89_dev *rtwdev,
+ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
+ 				      struct rtw89_mcc_role *mcc_role)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(mcc_role->rtwvif);
++	struct rtw89_vif_link *rtwvif_link = mcc_role->rtwvif_link;
+ 	struct ieee80211_p2p_noa_desc *noa_desc;
++	struct ieee80211_bss_conf *bss_conf;
+ 	u32 bcn_intvl_us = ieee80211_tu_to_usec(mcc_role->beacon_interval);
+ 	u32 max_toa_us, max_tob_us, max_dur_us;
+ 	u32 start_time, interval, duration;
+@@ -576,13 +597,18 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
+ 	if (!mcc_role->is_go && !mcc_role->is_gc)
+ 		return;
+ 
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++
+ 	/* find the first periodic NoA */
+ 	for (i = 0; i < RTW89_P2P_MAX_NOA_NUM; i++) {
+-		noa_desc = &vif->bss_conf.p2p_noa_attr.desc[i];
++		noa_desc = &bss_conf->p2p_noa_attr.desc[i];
+ 		if (noa_desc->count == 255)
+ 			goto fill;
+ 	}
+ 
++	rcu_read_unlock();
+ 	return;
+ 
+ fill:
+@@ -590,6 +616,8 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
+ 	interval = le32_to_cpu(noa_desc->interval);
+ 	duration = le32_to_cpu(noa_desc->duration);
+ 
++	rcu_read_unlock();
++
+ 	if (interval != bcn_intvl_us) {
+ 		rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ 			    "MCC role limit: mismatch interval: %d vs. %d\n",
+@@ -597,7 +625,7 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
+ 		return;
+ 	}
+ 
+-	ret = rtw89_mac_port_get_tsf(rtwdev, mcc_role->rtwvif, &tsf);
++	ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
+ 		return;
+@@ -632,15 +660,21 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_mcc_fill_role(struct rtw89_dev *rtwdev,
+-			       struct rtw89_vif *rtwvif,
++			       struct rtw89_vif_link *rtwvif_link,
+ 			       struct rtw89_mcc_role *role)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct ieee80211_bss_conf *bss_conf;
+ 	const struct rtw89_chan *chan;
+ 
+ 	memset(role, 0, sizeof(*role));
+-	role->rtwvif = rtwvif;
+-	role->beacon_interval = vif->bss_conf.beacon_int;
++	role->rtwvif_link = rtwvif_link;
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	role->beacon_interval = bss_conf->beacon_int;
++
++	rcu_read_unlock();
+ 
+ 	if (!role->beacon_interval) {
+ 		rtw89_warn(rtwdev,
+@@ -650,10 +684,10 @@ static int rtw89_mcc_fill_role(struct rtw89_dev *rtwdev,
+ 
+ 	role->duration = role->beacon_interval / 2;
+ 
+-	chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
++	chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ 	role->is_2ghz = chan->band_type == RTW89_BAND_2G;
+-	role->is_go = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_GO;
+-	role->is_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
++	role->is_go = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_GO;
++	role->is_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
+ 
+ 	rtw89_mcc_fill_role_macid_bitmap(rtwdev, role);
+ 	rtw89_mcc_fill_role_policy(rtwdev, role);
+@@ -678,7 +712,7 @@ static void rtw89_mcc_fill_bt_role(struct rtw89_dev *rtwdev)
+ }
+ 
+ struct rtw89_mcc_fill_role_selector {
+-	struct rtw89_vif *bind_vif[NUM_OF_RTW89_CHANCTX];
++	struct rtw89_vif_link *bind_vif[NUM_OF_RTW89_CHANCTX];
+ };
+ 
+ static_assert((u8)NUM_OF_RTW89_CHANCTX >= NUM_OF_RTW89_MCC_ROLES);
+@@ -689,7 +723,7 @@ static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev,
+ 					void *data)
+ {
+ 	struct rtw89_mcc_fill_role_selector *sel = data;
+-	struct rtw89_vif *role_vif = sel->bind_vif[ordered_idx];
++	struct rtw89_vif_link *role_vif = sel->bind_vif[ordered_idx];
+ 	int ret;
+ 
+ 	if (!role_vif) {
+@@ -712,21 +746,28 @@ static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev,
+ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_mcc_fill_role_selector sel = {};
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
+ 	int ret;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+-		if (!rtwvif->chanctx_assigned)
++		if (!rtw89_vif_is_active_role(rtwvif))
++			continue;
++
++		rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++		if (unlikely(!rtwvif_link)) {
++			rtw89_err(rtwdev, "mcc fill roles: find no link on HW-0\n");
+ 			continue;
++		}
+ 
+-		if (sel.bind_vif[rtwvif->chanctx_idx]) {
++		if (sel.bind_vif[rtwvif_link->chanctx_idx]) {
+ 			rtw89_warn(rtwdev,
+ 				   "MCC skip extra vif <macid %d> on chanctx[%d]\n",
+-				   rtwvif->mac_id, rtwvif->chanctx_idx);
++				   rtwvif_link->mac_id, rtwvif_link->chanctx_idx);
+ 			continue;
+ 		}
+ 
+-		sel.bind_vif[rtwvif->chanctx_idx] = rtwvif;
++		sel.bind_vif[rtwvif_link->chanctx_idx] = rtwvif_link;
+ 	}
+ 
+ 	ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel);
+@@ -754,13 +795,13 @@ static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev,
+ 	memset(&pattern->courtesy, 0, sizeof(pattern->courtesy));
+ 
+ 	if (pattern->tob_aux <= 0 || pattern->toa_aux <= 0) {
+-		pattern->courtesy.macid_tgt = aux->rtwvif->mac_id;
+-		pattern->courtesy.macid_src = ref->rtwvif->mac_id;
++		pattern->courtesy.macid_tgt = aux->rtwvif_link->mac_id;
++		pattern->courtesy.macid_src = ref->rtwvif_link->mac_id;
+ 		pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
+ 		pattern->courtesy.enable = true;
+ 	} else if (pattern->tob_ref <= 0 || pattern->toa_ref <= 0) {
+-		pattern->courtesy.macid_tgt = ref->rtwvif->mac_id;
+-		pattern->courtesy.macid_src = aux->rtwvif->mac_id;
++		pattern->courtesy.macid_tgt = ref->rtwvif_link->mac_id;
++		pattern->courtesy.macid_src = aux->rtwvif_link->mac_id;
+ 		pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
+ 		pattern->courtesy.enable = true;
+ 	}
+@@ -1263,7 +1304,7 @@ static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
+ 	u64 tsf_src;
+ 	int ret;
+ 
+-	ret = rtw89_mac_port_get_tsf(rtwdev, src->rtwvif, &tsf_src);
++	ret = rtw89_mac_port_get_tsf(rtwdev, src->rtwvif_link, &tsf_src);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
+ 		return;
+@@ -1280,12 +1321,12 @@ static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
+ 	div_u64_rem(tbtt_tgt, bcn_intvl_src_us, &remainder);
+ 	tsf_ofst_tgt = bcn_intvl_src_us - remainder;
+ 
+-	config->sync.macid_tgt = tgt->rtwvif->mac_id;
+-	config->sync.band_tgt = tgt->rtwvif->mac_idx;
+-	config->sync.port_tgt = tgt->rtwvif->port;
+-	config->sync.macid_src = src->rtwvif->mac_id;
+-	config->sync.band_src = src->rtwvif->mac_idx;
+-	config->sync.port_src = src->rtwvif->port;
++	config->sync.macid_tgt = tgt->rtwvif_link->mac_id;
++	config->sync.band_tgt = tgt->rtwvif_link->mac_idx;
++	config->sync.port_tgt = tgt->rtwvif_link->port;
++	config->sync.macid_src = src->rtwvif_link->mac_id;
++	config->sync.band_src = src->rtwvif_link->mac_idx;
++	config->sync.port_src = src->rtwvif_link->port;
+ 	config->sync.offset = tsf_ofst_tgt / 1024;
+ 	config->sync.enable = true;
+ 
+@@ -1294,7 +1335,7 @@ static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
+ 		    config->sync.macid_tgt, config->sync.macid_src,
+ 		    config->sync.offset);
+ 
+-	rtw89_mac_port_tsf_sync(rtwdev, tgt->rtwvif, src->rtwvif,
++	rtw89_mac_port_tsf_sync(rtwdev, tgt->rtwvif_link, src->rtwvif_link,
+ 				config->sync.offset);
+ }
+ 
+@@ -1305,13 +1346,13 @@ static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev)
+ 	struct rtw89_mcc_config *config = &mcc->config;
+ 	u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
+ 	u32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref);
+-	struct rtw89_vif *rtwvif = ref->rtwvif;
++	struct rtw89_vif_link *rtwvif_link = ref->rtwvif_link;
+ 	u64 tsf, start_tsf;
+ 	u32 cur_tbtt_ofst;
+ 	u64 min_time;
+ 	int ret;
+ 
+-	ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf);
++	ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
+ 		return ret;
+@@ -1390,13 +1431,13 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
+ 	const struct rtw89_chan *chan;
+ 	int ret;
+ 
+-	chan = rtw89_chan_get(rtwdev, role->rtwvif->chanctx_idx);
++	chan = rtw89_chan_get(rtwdev, role->rtwvif_link->chanctx_idx);
+ 	req.central_ch_seg0 = chan->channel;
+ 	req.primary_ch = chan->primary_channel;
+ 	req.bandwidth = chan->band_width;
+ 	req.ch_band_type = chan->band_type;
+ 
+-	req.macid = role->rtwvif->mac_id;
++	req.macid = role->rtwvif_link->mac_id;
+ 	req.group = mcc->group;
+ 	req.c2h_rpt = policy->c2h_rpt;
+ 	req.tx_null_early = policy->tx_null_early;
+@@ -1421,7 +1462,7 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
+ 	}
+ 
+ 	ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
+-					    role->rtwvif->mac_id,
++					    role->rtwvif_link->mac_id,
+ 					    role->macid_bitmap);
+ 	if (ret) {
+ 		rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+@@ -1448,7 +1489,7 @@ void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role,
+ 	slot_arg->duration = role->duration;
+ 	slot_arg->role_num = 1;
+ 
+-	chan = rtw89_chan_get(rtwdev, role->rtwvif->chanctx_idx);
++	chan = rtw89_chan_get(rtwdev, role->rtwvif_link->chanctx_idx);
+ 
+ 	slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_WIFI;
+ 	slot_arg->roles[0].is_master = role == ref;
+@@ -1458,7 +1499,7 @@ void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role,
+ 	slot_arg->roles[0].primary_ch = chan->primary_channel;
+ 	slot_arg->roles[0].en_tx_null = !policy->dis_tx_null;
+ 	slot_arg->roles[0].null_early = policy->tx_null_early;
+-	slot_arg->roles[0].macid = role->rtwvif->mac_id;
++	slot_arg->roles[0].macid = role->rtwvif_link->mac_id;
+ 	slot_arg->roles[0].macid_main_bitmap =
+ 		rtw89_mcc_role_fw_macid_bitmap_to_u32(role);
+ }
+@@ -1569,7 +1610,7 @@ static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
+ 		}
+ 	}
+ 
+-	req.macid = ref->rtwvif->mac_id;
++	req.macid = ref->rtwvif_link->mac_id;
+ 	req.tsf_high = config->start_tsf >> 32;
+ 	req.tsf_low = config->start_tsf;
+ 
+@@ -1598,7 +1639,7 @@ static void __mrc_fw_add_courtesy(struct rtw89_dev *rtwdev,
+ 	if (!courtesy->enable)
+ 		return;
+ 
+-	if (courtesy->macid_src == ref->rtwvif->mac_id) {
++	if (courtesy->macid_src == ref->rtwvif_link->mac_id) {
+ 		slot_arg_src = &arg->slots[ref->slot_idx];
+ 		slot_idx_tgt = aux->slot_idx;
+ 	} else {
+@@ -1717,9 +1758,9 @@ static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_chang
+ 	struct rtw89_fw_mcc_duration req = {
+ 		.group = mcc->group,
+ 		.btc_in_group = false,
+-		.start_macid = ref->rtwvif->mac_id,
+-		.macid_x = ref->rtwvif->mac_id,
+-		.macid_y = aux->rtwvif->mac_id,
++		.start_macid = ref->rtwvif_link->mac_id,
++		.macid_x = ref->rtwvif_link->mac_id,
++		.macid_y = aux->rtwvif_link->mac_id,
+ 		.duration_x = ref->duration,
+ 		.duration_y = aux->duration,
+ 		.start_tsf_high = config->start_tsf >> 32,
+@@ -1813,18 +1854,18 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
+ 	struct ieee80211_p2p_noa_desc noa_desc = {};
+ 	u64 start_time = config->start_tsf;
+ 	u32 interval = config->mcc_interval;
+-	struct rtw89_vif *rtwvif_go;
++	struct rtw89_vif_link *rtwvif_go;
+ 	u32 duration;
+ 
+ 	if (mcc->mode != RTW89_MCC_MODE_GO_STA)
+ 		return;
+ 
+ 	if (ref->is_go) {
+-		rtwvif_go = ref->rtwvif;
++		rtwvif_go = ref->rtwvif_link;
+ 		start_time += ieee80211_tu_to_usec(ref->duration);
+ 		duration = config->mcc_interval - ref->duration;
+ 	} else if (aux->is_go) {
+-		rtwvif_go = aux->rtwvif;
++		rtwvif_go = aux->rtwvif_link;
+ 		start_time += ieee80211_tu_to_usec(pattern->tob_ref) +
+ 			      ieee80211_tu_to_usec(config->beacon_offset) +
+ 			      ieee80211_tu_to_usec(pattern->toa_aux);
+@@ -1865,9 +1906,9 @@ static void rtw89_mcc_start_beacon_noa(struct rtw89_dev *rtwdev)
+ 		return;
+ 
+ 	if (ref->is_go)
+-		rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif, true);
++		rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif_link, true);
+ 	else if (aux->is_go)
+-		rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif, true);
++		rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif_link, true);
+ 
+ 	rtw89_mcc_handle_beacon_noa(rtwdev, true);
+ }
+@@ -1882,9 +1923,9 @@ static void rtw89_mcc_stop_beacon_noa(struct rtw89_dev *rtwdev)
+ 		return;
+ 
+ 	if (ref->is_go)
+-		rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif, false);
++		rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif_link, false);
+ 	else if (aux->is_go)
+-		rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif, false);
++		rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif_link, false);
+ 
+ 	rtw89_mcc_handle_beacon_noa(rtwdev, false);
+ }
+@@ -1942,7 +1983,7 @@ struct rtw89_mcc_stop_sel {
+ static void rtw89_mcc_stop_sel_fill(struct rtw89_mcc_stop_sel *sel,
+ 				    const struct rtw89_mcc_role *mcc_role)
+ {
+-	sel->mac_id = mcc_role->rtwvif->mac_id;
++	sel->mac_id = mcc_role->rtwvif_link->mac_id;
+ 	sel->slot_idx = mcc_role->slot_idx;
+ }
+ 
+@@ -1953,7 +1994,7 @@ static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev,
+ {
+ 	struct rtw89_mcc_stop_sel *sel = data;
+ 
+-	if (!mcc_role->rtwvif->chanctx_assigned)
++	if (!mcc_role->rtwvif_link->chanctx_assigned)
+ 		return 0;
+ 
+ 	rtw89_mcc_stop_sel_fill(sel, mcc_role);
+@@ -2081,7 +2122,7 @@ static int __mcc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ 	int ret;
+ 
+ 	ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
+-					    upd->rtwvif->mac_id,
++					    upd->rtwvif_link->mac_id,
+ 					    upd->macid_bitmap);
+ 	if (ret) {
+ 		rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+@@ -2106,7 +2147,7 @@ static int __mrc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ 	int i;
+ 
+ 	arg.sch_idx = mcc->group;
+-	arg.macid = upd->rtwvif->mac_id;
++	arg.macid = upd->rtwvif_link->mac_id;
+ 
+ 	for (i = 0; i < 32; i++) {
+ 		if (add & BIT(i)) {
+@@ -2144,7 +2185,7 @@ static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
+ 				      void *data)
+ {
+ 	struct rtw89_mcc_role upd = {
+-		.rtwvif = mcc_role->rtwvif,
++		.rtwvif_link = mcc_role->rtwvif_link,
+ 	};
+ 	int ret;
+ 
+@@ -2370,6 +2411,24 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
+ 	rtw89_queue_chanctx_work(rtwdev);
+ }
+ 
++static void __rtw89_swap_chanctx(struct rtw89_vif *rtwvif,
++				 enum rtw89_chanctx_idx idx1,
++				 enum rtw89_chanctx_idx idx2)
++{
++	struct rtw89_vif_link *rtwvif_link;
++	unsigned int link_id;
++
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
++		if (!rtwvif_link->chanctx_assigned)
++			continue;
++
++		if (rtwvif_link->chanctx_idx == idx1)
++			rtwvif_link->chanctx_idx = idx2;
++		else if (rtwvif_link->chanctx_idx == idx2)
++			rtwvif_link->chanctx_idx = idx1;
++	}
++}
++
+ static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
+ 			       enum rtw89_chanctx_idx idx1,
+ 			       enum rtw89_chanctx_idx idx2)
+@@ -2386,14 +2445,8 @@ static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
+ 
+ 	swap(hal->chanctx[idx1], hal->chanctx[idx2]);
+ 
+-	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+-		if (!rtwvif->chanctx_assigned)
+-			continue;
+-		if (rtwvif->chanctx_idx == idx1)
+-			rtwvif->chanctx_idx = idx2;
+-		else if (rtwvif->chanctx_idx == idx2)
+-			rtwvif->chanctx_idx = idx1;
+-	}
++	rtw89_for_each_rtwvif(rtwdev, rtwvif)
++		__rtw89_swap_chanctx(rtwvif, idx1, idx2);
+ 
+ 	cur = atomic_read(&hal->roc_chanctx_idx);
+ 	if (cur == idx1)
+@@ -2444,14 +2497,14 @@ void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif,
++				 struct rtw89_vif_link *rtwvif_link,
+ 				 struct ieee80211_chanctx_conf *ctx)
+ {
+ 	struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ 	struct rtw89_entity_weight w = {};
+ 
+-	rtwvif->chanctx_idx = cfg->idx;
+-	rtwvif->chanctx_assigned = true;
++	rtwvif_link->chanctx_idx = cfg->idx;
++	rtwvif_link->chanctx_assigned = true;
+ 	cfg->ref_count++;
+ 
+ 	if (cfg->idx == RTW89_CHANCTX_0)
+@@ -2469,7 +2522,7 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ }
+ 
+ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif,
++				    struct rtw89_vif_link *rtwvif_link,
+ 				    struct ieee80211_chanctx_conf *ctx)
+ {
+ 	struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+@@ -2479,8 +2532,8 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ 	enum rtw89_entity_mode new;
+ 	int ret;
+ 
+-	rtwvif->chanctx_idx = RTW89_CHANCTX_0;
+-	rtwvif->chanctx_assigned = false;
++	rtwvif_link->chanctx_idx = RTW89_CHANCTX_0;
++	rtwvif_link->chanctx_assigned = false;
+ 	cfg->ref_count--;
+ 
+ 	if (cfg->ref_count != 0)
+diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
+index c6d31984e57536..4ed777ea506485 100644
+--- a/drivers/net/wireless/realtek/rtw89/chan.h
++++ b/drivers/net/wireless/realtek/rtw89/chan.h
+@@ -106,10 +106,10 @@ void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
+ 			      struct ieee80211_chanctx_conf *ctx,
+ 			      u32 changed);
+ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif,
++				 struct rtw89_vif_link *rtwvif_link,
+ 				 struct ieee80211_chanctx_conf *ctx);
+ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif,
++				    struct rtw89_vif_link *rtwvif_link,
+ 				    struct ieee80211_chanctx_conf *ctx);
+ 
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
+index 8d27374db83ca0..8d54d71fcf539e 100644
+--- a/drivers/net/wireless/realtek/rtw89/coex.c
++++ b/drivers/net/wireless/realtek/rtw89/coex.c
+@@ -2492,6 +2492,8 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
+ 	if (ver->fcxmreg == 7) {
+ 		sz = struct_size(v7, regs, n);
+ 		v7 = kmalloc(sz, GFP_KERNEL);
++		if (!v7)
++			return;
+ 		v7->type = RPT_EN_MREG;
+ 		v7->fver = ver->fcxmreg;
+ 		v7->len = n;
+@@ -2506,6 +2508,8 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
+ 	} else {
+ 		sz = struct_size(v1, regs, n);
+ 		v1 = kmalloc(sz, GFP_KERNEL);
++		if (!v1)
++			return;
+ 		v1->fver = ver->fcxmreg;
+ 		v1->reg_num = n;
+ 		memcpy(v1->regs, chip->mon_reg, flex_array_size(v1, regs, n));
+@@ -4989,18 +4993,16 @@ struct rtw89_txtime_data {
+ 	bool reenable;
+ };
+ 
+-static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
++static void __rtw89_tx_time_iter(struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link,
++				 struct rtw89_txtime_data *iter_data)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_txtime_data *iter_data =
+-				(struct rtw89_txtime_data *)data;
+ 	struct rtw89_dev *rtwdev = iter_data->rtwdev;
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct rtw89_btc *btc = &rtwdev->btc;
+ 	struct rtw89_btc_cx *cx = &btc->cx;
+ 	struct rtw89_btc_wl_info *wl = &cx->wl;
+ 	struct rtw89_btc_wl_link_info *plink = NULL;
+-	u8 port = rtwvif->port;
++	u8 port = rtwvif_link->port;
+ 	u32 tx_time = iter_data->tx_time;
+ 	u8 tx_retry = iter_data->tx_retry;
+ 	u16 enable = iter_data->enable;
+@@ -5023,8 +5025,8 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
+ 
+ 	/* backup the original tx time before tx-limit on */
+ 	if (reenable) {
+-		rtw89_mac_get_tx_time(rtwdev, rtwsta, &plink->tx_time);
+-		rtw89_mac_get_tx_retry_limit(rtwdev, rtwsta, &plink->tx_retry);
++		rtw89_mac_get_tx_time(rtwdev, rtwsta_link, &plink->tx_time);
++		rtw89_mac_get_tx_retry_limit(rtwdev, rtwsta_link, &plink->tx_retry);
+ 		rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ 			    "[BTC], %s(): reenable, tx_time=%d tx_retry= %d\n",
+ 			    __func__, plink->tx_time, plink->tx_retry);
+@@ -5032,22 +5034,37 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
+ 
+ 	/* restore the original tx time if no tx-limit */
+ 	if (!enable) {
+-		rtw89_mac_set_tx_time(rtwdev, rtwsta, true, plink->tx_time);
+-		rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta, true,
++		rtw89_mac_set_tx_time(rtwdev, rtwsta_link, true, plink->tx_time);
++		rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, true,
+ 					     plink->tx_retry);
+ 		rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ 			    "[BTC], %s(): restore, tx_time=%d tx_retry= %d\n",
+ 			    __func__, plink->tx_time, plink->tx_retry);
+ 
+ 	} else {
+-		rtw89_mac_set_tx_time(rtwdev, rtwsta, false, tx_time);
+-		rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta, false, tx_retry);
++		rtw89_mac_set_tx_time(rtwdev, rtwsta_link, false, tx_time);
++		rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false, tx_retry);
+ 		rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ 			    "[BTC], %s(): set, tx_time=%d tx_retry= %d\n",
+ 			    __func__, tx_time, tx_retry);
+ 	}
+ }
+ 
++static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
++{
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_txtime_data *iter_data =
++				(struct rtw89_txtime_data *)data;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		__rtw89_tx_time_iter(rtwvif_link, rtwsta_link, iter_data);
++	}
++}
++
+ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_btc *btc = &rtwdev->btc;
+@@ -7481,13 +7498,16 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
+ 	_run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO);
+ }
+ 
+-void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+-			      struct rtw89_sta *rtwsta, enum btc_role_state state)
++void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
++			      struct rtw89_sta_link *rtwsta_link,
++			      enum btc_role_state state)
+ {
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+-	struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
++						       rtwvif_link->chanctx_idx);
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct ieee80211_bss_conf *bss_conf;
++	struct ieee80211_link_sta *link_sta;
+ 	struct rtw89_btc *btc = &rtwdev->btc;
+ 	const struct rtw89_btc_ver *ver = btc->ver;
+ 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+@@ -7495,51 +7515,59 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
+ 	struct rtw89_btc_wl_link_info *wlinfo = NULL;
+ 	u8 mode = 0, rlink_id, link_mode_ori, pta_req_mac_ori, wa_type;
+ 
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
++
+ 	rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], state=%d\n", state);
+ 	rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ 		    "[BTC], role is STA=%d\n",
+ 		    vif->type == NL80211_IFTYPE_STATION);
+-	rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], port=%d\n", rtwvif->port);
++	rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], port=%d\n", rtwvif_link->port);
+ 	rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], band=%d ch=%d bw=%d\n",
+ 		    chan->band_type, chan->channel, chan->band_width);
+ 	rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], associated=%d\n",
+ 		    state == BTC_ROLE_MSTS_STA_CONN_END);
+ 	rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ 		    "[BTC], bcn_period=%d dtim_period=%d\n",
+-		    vif->bss_conf.beacon_int, vif->bss_conf.dtim_period);
++		    bss_conf->beacon_int, bss_conf->dtim_period);
++
++	if (rtwsta_link) {
++		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
+ 
+-	if (rtwsta) {
+ 		rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], STA mac_id=%d\n",
+-			    rtwsta->mac_id);
++			    rtwsta_link->mac_id);
+ 
+ 		rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ 			    "[BTC], STA support HE=%d VHT=%d HT=%d\n",
+-			    sta->deflink.he_cap.has_he,
+-			    sta->deflink.vht_cap.vht_supported,
+-			    sta->deflink.ht_cap.ht_supported);
+-		if (sta->deflink.he_cap.has_he)
++			    link_sta->he_cap.has_he,
++			    link_sta->vht_cap.vht_supported,
++			    link_sta->ht_cap.ht_supported);
++		if (link_sta->he_cap.has_he)
+ 			mode |= BIT(BTC_WL_MODE_HE);
+-		if (sta->deflink.vht_cap.vht_supported)
++		if (link_sta->vht_cap.vht_supported)
+ 			mode |= BIT(BTC_WL_MODE_VHT);
+-		if (sta->deflink.ht_cap.ht_supported)
++		if (link_sta->ht_cap.ht_supported)
+ 			mode |= BIT(BTC_WL_MODE_HT);
+ 
+ 		r.mode = mode;
+ 	}
+ 
+-	if (rtwvif->wifi_role >= RTW89_WIFI_ROLE_MLME_MAX)
++	if (rtwvif_link->wifi_role >= RTW89_WIFI_ROLE_MLME_MAX) {
++		rcu_read_unlock();
+ 		return;
++	}
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_BTC,
+-		    "[BTC], wifi_role=%d\n", rtwvif->wifi_role);
++		    "[BTC], wifi_role=%d\n", rtwvif_link->wifi_role);
+ 
+-	r.role = rtwvif->wifi_role;
+-	r.phy = rtwvif->phy_idx;
+-	r.pid = rtwvif->port;
++	r.role = rtwvif_link->wifi_role;
++	r.phy = rtwvif_link->phy_idx;
++	r.pid = rtwvif_link->port;
+ 	r.active = true;
+ 	r.connected = MLME_LINKED;
+-	r.bcn_period = vif->bss_conf.beacon_int;
+-	r.dtim_period = vif->bss_conf.dtim_period;
++	r.bcn_period = bss_conf->beacon_int;
++	r.dtim_period = bss_conf->dtim_period;
+ 	r.band = chan->band_type;
+ 	r.ch = chan->channel;
+ 	r.bw = chan->band_width;
+@@ -7547,10 +7575,12 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
+ 	r.chdef.center_ch = chan->channel;
+ 	r.chdef.bw = chan->band_width;
+ 	r.chdef.chan = chan->primary_channel;
+-	ether_addr_copy(r.mac_addr, rtwvif->mac_addr);
++	ether_addr_copy(r.mac_addr, rtwvif_link->mac_addr);
+ 
+-	if (rtwsta && vif->type == NL80211_IFTYPE_STATION)
+-		r.mac_id = rtwsta->mac_id;
++	rcu_read_unlock();
++
++	if (rtwsta_link && vif->type == NL80211_IFTYPE_STATION)
++		r.mac_id = rtwsta_link->mac_id;
+ 
+ 	btc->dm.cnt_notify[BTC_NCNT_ROLE_INFO]++;
+ 
+@@ -7781,26 +7811,26 @@ struct rtw89_btc_wl_sta_iter_data {
+ 	bool is_traffic_change;
+ };
+ 
+-static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
++static
++void __rtw89_btc_ntfy_wl_sta_iter(struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link,
++				  struct rtw89_btc_wl_sta_iter_data *iter_data)
+ {
+-	struct rtw89_btc_wl_sta_iter_data *iter_data =
+-				(struct rtw89_btc_wl_sta_iter_data *)data;
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct rtw89_dev *rtwdev = iter_data->rtwdev;
+ 	struct rtw89_btc *btc = &rtwdev->btc;
+ 	struct rtw89_btc_dm *dm = &btc->dm;
+ 	const struct rtw89_btc_ver *ver = btc->ver;
+ 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ 	struct rtw89_btc_wl_link_info *link_info = NULL;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ 	struct rtw89_traffic_stats *link_info_t = NULL;
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct rtw89_traffic_stats *stats = &rtwvif->stats;
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	struct rtw89_btc_wl_role_info *r;
+ 	struct rtw89_btc_wl_role_info_v1 *r1;
+ 	u32 last_tx_rate, last_rx_rate;
+ 	u16 last_tx_lvl, last_rx_lvl;
+-	u8 port = rtwvif->port;
++	u8 port = rtwvif_link->port;
+ 	u8 rssi;
+ 	u8 busy = 0;
+ 	u8 dir = 0;
+@@ -7808,11 +7838,11 @@ static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
+ 	u8 i = 0;
+ 	bool is_sta_change = false, is_traffic_change = false;
+ 
+-	rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR;
++	rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR;
+ 	rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], rssi=%d\n", rssi);
+ 
+ 	link_info = &wl->link_info[port];
+-	link_info->stat.traffic = rtwvif->stats;
++	link_info->stat.traffic = *stats;
+ 	link_info_t = &link_info->stat.traffic;
+ 
+ 	if (link_info->connected == MLME_NO_LINK) {
+@@ -7860,19 +7890,19 @@ static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
+ 	iter_data->busy_all |= busy;
+ 	iter_data->dir_all |= BIT(dir);
+ 
+-	if (rtwsta->rx_hw_rate <= RTW89_HW_RATE_CCK2 &&
++	if (rtwsta_link->rx_hw_rate <= RTW89_HW_RATE_CCK2 &&
+ 	    last_rx_rate > RTW89_HW_RATE_CCK2 &&
+ 	    link_info_t->rx_tfc_lv > RTW89_TFC_IDLE)
+ 		link_info->rx_rate_drop_cnt++;
+ 
+-	if (last_tx_rate != rtwsta->ra_report.hw_rate ||
+-	    last_rx_rate != rtwsta->rx_hw_rate ||
++	if (last_tx_rate != rtwsta_link->ra_report.hw_rate ||
++	    last_rx_rate != rtwsta_link->rx_hw_rate ||
+ 	    last_tx_lvl != link_info_t->tx_tfc_lv ||
+ 	    last_rx_lvl != link_info_t->rx_tfc_lv)
+ 		is_traffic_change = true;
+ 
+-	link_info_t->tx_rate = rtwsta->ra_report.hw_rate;
+-	link_info_t->rx_rate = rtwsta->rx_hw_rate;
++	link_info_t->tx_rate = rtwsta_link->ra_report.hw_rate;
++	link_info_t->rx_rate = rtwsta_link->rx_hw_rate;
+ 
+ 	if (link_info->role == RTW89_WIFI_ROLE_STATION ||
+ 	    link_info->role == RTW89_WIFI_ROLE_P2P_CLIENT) {
+@@ -7884,19 +7914,19 @@ static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
+ 		r = &wl->role_info;
+ 		r->active_role[port].tx_lvl = stats->tx_tfc_lv;
+ 		r->active_role[port].rx_lvl = stats->rx_tfc_lv;
+-		r->active_role[port].tx_rate = rtwsta->ra_report.hw_rate;
+-		r->active_role[port].rx_rate = rtwsta->rx_hw_rate;
++		r->active_role[port].tx_rate = rtwsta_link->ra_report.hw_rate;
++		r->active_role[port].rx_rate = rtwsta_link->rx_hw_rate;
+ 	} else if (ver->fwlrole == 1) {
+ 		r1 = &wl->role_info_v1;
+ 		r1->active_role_v1[port].tx_lvl = stats->tx_tfc_lv;
+ 		r1->active_role_v1[port].rx_lvl = stats->rx_tfc_lv;
+-		r1->active_role_v1[port].tx_rate = rtwsta->ra_report.hw_rate;
+-		r1->active_role_v1[port].rx_rate = rtwsta->rx_hw_rate;
++		r1->active_role_v1[port].tx_rate = rtwsta_link->ra_report.hw_rate;
++		r1->active_role_v1[port].rx_rate = rtwsta_link->rx_hw_rate;
+ 	} else if (ver->fwlrole == 2) {
+ 		dm->trx_info.tx_lvl = stats->tx_tfc_lv;
+ 		dm->trx_info.rx_lvl = stats->rx_tfc_lv;
+-		dm->trx_info.tx_rate = rtwsta->ra_report.hw_rate;
+-		dm->trx_info.rx_rate = rtwsta->rx_hw_rate;
++		dm->trx_info.tx_rate = rtwsta_link->ra_report.hw_rate;
++		dm->trx_info.rx_rate = rtwsta_link->rx_hw_rate;
+ 	}
+ 
+ 	dm->trx_info.tx_tp = link_info_t->tx_throughput;
+@@ -7916,6 +7946,21 @@ static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
+ 		iter_data->is_traffic_change = true;
+ }
+ 
++static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
++{
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_btc_wl_sta_iter_data *iter_data =
++				(struct rtw89_btc_wl_sta_iter_data *)data;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		__rtw89_btc_ntfy_wl_sta_iter(rtwvif_link, rtwsta_link, iter_data);
++	}
++}
++
+ #define BTC_NHM_CHK_INTVL 20
+ 
+ void rtw89_btc_ntfy_wl_sta(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
+index de53b56632f7c6..dbdb56e063ef03 100644
+--- a/drivers/net/wireless/realtek/rtw89/coex.h
++++ b/drivers/net/wireless/realtek/rtw89/coex.h
+@@ -271,8 +271,10 @@ void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work);
+ void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work);
+ void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work);
+ void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work);
+-void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+-			      struct rtw89_sta *rtwsta, enum btc_role_state state);
++void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
++			      struct rtw89_sta_link *rtwsta_link,
++			      enum btc_role_state state);
+ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_state);
+ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
+ 			   enum btc_wl_rfk_type type,
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index 4553810634c66b..5b8e65f6de6a4e 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -436,15 +436,6 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
+ 	return 0;
+ }
+ 
+-void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+-		       struct rtw89_chan *chan)
+-{
+-	const struct cfg80211_chan_def *chandef;
+-
+-	chandef = rtw89_chandef_get(rtwdev, rtwvif->chanctx_idx);
+-	rtw89_get_channel_params(chandef, chan);
+-}
+-
+ static enum rtw89_core_tx_type
+ rtw89_core_get_tx_type(struct rtw89_dev *rtwdev,
+ 		       struct sk_buff *skb)
+@@ -463,8 +454,9 @@ rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
+ 				struct rtw89_core_tx_request *tx_req,
+ 				enum btc_pkt_type pkt_type)
+ {
+-	struct ieee80211_sta *sta = tx_req->sta;
++	struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
+ 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
++	struct ieee80211_link_sta *link_sta;
+ 	struct sk_buff *skb = tx_req->skb;
+ 	struct rtw89_sta *rtwsta;
+ 	u8 ampdu_num;
+@@ -478,21 +470,26 @@ rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
+ 	if (!(IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU))
+ 		return;
+ 
+-	if (!sta) {
++	if (!rtwsta_link) {
+ 		rtw89_warn(rtwdev, "cannot set ampdu info without sta\n");
+ 		return;
+ 	}
+ 
+ 	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+-	rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	rtwsta = rtwsta_link->rtwsta;
++
++	rcu_read_lock();
+ 
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
+ 	ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ?
+ 			  rtwsta->ampdu_params[tid].agg_num :
+-			  4 << sta->deflink.ht_cap.ampdu_factor) - 1);
++			  4 << link_sta->ht_cap.ampdu_factor) - 1);
+ 
+ 	desc_info->agg_en = true;
+-	desc_info->ampdu_density = sta->deflink.ht_cap.ampdu_density;
++	desc_info->ampdu_density = link_sta->ht_cap.ampdu_density;
+ 	desc_info->ampdu_num = ampdu_num;
++
++	rcu_read_unlock();
+ }
+ 
+ static void
+@@ -569,9 +566,13 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
+ 				    const struct rtw89_chan *chan)
+ {
+ 	struct sk_buff *skb = tx_req->skb;
++	struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
+ 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_vif *vif = tx_info->control.vif;
++	struct ieee80211_bss_conf *bss_conf;
+ 	u16 lowest_rate;
++	u16 rate;
+ 
+ 	if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE ||
+ 	    (vif && vif->p2p))
+@@ -581,25 +582,35 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
+ 	else
+ 		lowest_rate = RTW89_HW_RATE_OFDM6;
+ 
+-	if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta)
++	if (!rtwvif_link)
+ 		return lowest_rate;
+ 
+-	return __ffs(vif->bss_conf.basic_rates) + lowest_rate;
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
++	if (!bss_conf->basic_rates || !rtwsta_link) {
++		rate = lowest_rate;
++		goto out;
++	}
++
++	rate = __ffs(bss_conf->basic_rates) + lowest_rate;
++
++out:
++	rcu_read_unlock();
++
++	return rate;
+ }
+ 
+ static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
+ 				   struct rtw89_core_tx_request *tx_req)
+ {
+-	struct ieee80211_vif *vif = tx_req->vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct ieee80211_sta *sta = tx_req->sta;
+-	struct rtw89_sta *rtwsta;
++	struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
+ 
+-	if (!sta)
+-		return rtwvif->mac_id;
++	if (!rtwsta_link)
++		return rtwvif_link->mac_id;
+ 
+-	rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	return rtwsta->mac_id;
++	return rtwsta_link->mac_id;
+ }
+ 
+ static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
+@@ -618,11 +629,10 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
+ 			       struct rtw89_core_tx_request *tx_req)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+-	struct ieee80211_vif *vif = tx_req->vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
+ 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
++						       rtwvif_link->chanctx_idx);
+ 	struct sk_buff *skb = tx_req->skb;
+ 	u8 qsel, ch_dma;
+ 
+@@ -631,7 +641,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
+ 
+ 	desc_info->qsel = qsel;
+ 	desc_info->ch_dma = ch_dma;
+-	desc_info->port = desc_info->hiq ? rtwvif->port : 0;
++	desc_info->port = desc_info->hiq ? rtwvif_link->port : 0;
+ 	desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
+ 	desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
+ 	desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
+@@ -701,26 +711,36 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
+ 				 struct rtw89_core_tx_request *tx_req,
+ 				 enum btc_pkt_type pkt_type)
+ {
+-	struct ieee80211_sta *sta = tx_req->sta;
+-	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
++	struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
+ 	struct sk_buff *skb = tx_req->skb;
+ 	struct ieee80211_hdr *hdr = (void *)skb->data;
++	struct ieee80211_link_sta *link_sta;
+ 	__le16 fc = hdr->frame_control;
+ 
+ 	/* AP IOT issue with EAPoL, ARP and DHCP */
+ 	if (pkt_type < PACKET_MAX)
+ 		return false;
+ 
+-	if (!sta || !sta->deflink.he_cap.has_he)
++	if (!rtwsta_link)
+ 		return false;
+ 
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
++	if (!link_sta->he_cap.has_he) {
++		rcu_read_unlock();
++		return false;
++	}
++
++	rcu_read_unlock();
++
+ 	if (!ieee80211_is_data_qos(fc))
+ 		return false;
+ 
+ 	if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN)
+ 		return false;
+ 
+-	if (rtwsta && rtwsta->ra_report.might_fallback_legacy)
++	if (rtwsta_link && rtwsta_link->ra_report.might_fallback_legacy)
+ 		return false;
+ 
+ 	return true;
+@@ -730,8 +750,7 @@ static void
+ __rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev,
+ 				  struct rtw89_core_tx_request *tx_req)
+ {
+-	struct ieee80211_sta *sta = tx_req->sta;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
+ 	struct sk_buff *skb = tx_req->skb;
+ 	struct ieee80211_hdr *hdr = (void *)skb->data;
+ 	__le16 fc = hdr->frame_control;
+@@ -747,7 +766,7 @@ __rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev,
+ 	hdr = data;
+ 	htc = data + hdr_len;
+ 	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_ORDER);
+-	*htc = rtwsta->htc_template ? rtwsta->htc_template :
++	*htc = rtwsta_link->htc_template ? rtwsta_link->htc_template :
+ 	       le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
+ 	       le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_CAS, RTW89_HTC_MASK_CTL_ID);
+ 
+@@ -761,8 +780,7 @@ rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev,
+ 				enum btc_pkt_type pkt_type)
+ {
+ 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+-	struct ieee80211_vif *vif = tx_req->vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
+ 
+ 	if (!__rtw89_core_tx_check_he_qos_htc(rtwdev, tx_req, pkt_type))
+ 		goto desc_bk;
+@@ -773,23 +791,25 @@ rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev,
+ 	desc_info->a_ctrl_bsr = true;
+ 
+ desc_bk:
+-	if (!rtwvif || rtwvif->last_a_ctrl == desc_info->a_ctrl_bsr)
++	if (!rtwvif_link || rtwvif_link->last_a_ctrl == desc_info->a_ctrl_bsr)
+ 		return;
+ 
+-	rtwvif->last_a_ctrl = desc_info->a_ctrl_bsr;
++	rtwvif_link->last_a_ctrl = desc_info->a_ctrl_bsr;
+ 	desc_info->bk = true;
+ }
+ 
+ static u16 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev,
+ 				    struct rtw89_core_tx_request *tx_req)
+ {
+-	struct ieee80211_vif *vif = tx_req->vif;
+-	struct ieee80211_sta *sta = tx_req->sta;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
+-	enum rtw89_chanctx_idx idx = rtwvif->chanctx_idx;
++	struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern;
++	enum rtw89_chanctx_idx idx = rtwvif_link->chanctx_idx;
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx);
++	struct ieee80211_link_sta *link_sta;
+ 	u16 lowest_rate;
++	u16 rate;
+ 
+ 	if (rate_pattern->enable)
+ 		return rate_pattern->rate;
+@@ -801,20 +821,31 @@ static u16 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev,
+ 	else
+ 		lowest_rate = RTW89_HW_RATE_OFDM6;
+ 
+-	if (!sta || !sta->deflink.supp_rates[chan->band_type])
++	if (!rtwsta_link)
+ 		return lowest_rate;
+ 
+-	return __ffs(sta->deflink.supp_rates[chan->band_type]) + lowest_rate;
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
++	if (!link_sta->supp_rates[chan->band_type]) {
++		rate = lowest_rate;
++		goto out;
++	}
++
++	rate = __ffs(link_sta->supp_rates[chan->band_type]) + lowest_rate;
++
++out:
++	rcu_read_unlock();
++
++	return rate;
+ }
+ 
+ static void
+ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
+ 			       struct rtw89_core_tx_request *tx_req)
+ {
+-	struct ieee80211_vif *vif = tx_req->vif;
+-	struct ieee80211_sta *sta = tx_req->sta;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
++	struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
+ 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ 	struct sk_buff *skb = tx_req->skb;
+ 	u8 tid, tid_indicate;
+@@ -829,10 +860,10 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
+ 	desc_info->tid_indicate = tid_indicate;
+ 	desc_info->qsel = qsel;
+ 	desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
+-	desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+-	desc_info->er_cap = rtwsta ? rtwsta->er_cap : false;
+-	desc_info->stbc = rtwsta ? rtwsta->ra.stbc_cap : false;
+-	desc_info->ldpc = rtwsta ? rtwsta->ra.ldpc_cap : false;
++	desc_info->port = desc_info->hiq ? rtwvif_link->port : 0;
++	desc_info->er_cap = rtwsta_link ? rtwsta_link->er_cap : false;
++	desc_info->stbc = rtwsta_link ? rtwsta_link->ra.stbc_cap : false;
++	desc_info->ldpc = rtwsta_link ? rtwsta_link->ra.ldpc_cap : false;
+ 
+ 	/* enable wd_info for AMPDU */
+ 	desc_info->en_wd_info = true;
+@@ -1027,13 +1058,34 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
+ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 			struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
+ {
++	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ 	struct rtw89_core_tx_request tx_req = {0};
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_sta_link *rtwsta_link = NULL;
++	struct rtw89_vif_link *rtwvif_link;
+ 	int ret;
+ 
++	/* By default, driver writes tx via the link on HW-0. And then,
++	 * according to links' status, HW can change tx to another link.
++	 */
++
++	if (rtwsta) {
++		rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
++		if (unlikely(!rtwsta_link)) {
++			rtw89_err(rtwdev, "tx: find no sta link on HW-0\n");
++			return -ENOLINK;
++		}
++	}
++
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev, "tx: find no vif link on HW-0\n");
++		return -ENOLINK;
++	}
++
+ 	tx_req.skb = skb;
+-	tx_req.sta = sta;
+-	tx_req.vif = vif;
++	tx_req.rtwvif_link = rtwvif_link;
++	tx_req.rtwsta_link = rtwsta_link;
+ 
+ 	rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
+ 	rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
+@@ -1514,16 +1566,24 @@ static u8 rtw89_get_data_rate_nss(struct rtw89_dev *rtwdev, u16 data_rate)
+ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
+ 						struct ieee80211_sta *sta)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ 	struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ 	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ 	struct rtw89_hal *hal = &rtwdev->hal;
++	struct rtw89_sta_link *rtwsta_link;
+ 	u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
+ 	u8 ant_pos = U8_MAX;
+ 	u8 evm_pos = 0;
+ 	int i;
+ 
+-	if (rtwsta->mac_id != phy_ppdu->mac_id || !phy_ppdu->to_self)
++	/* FIXME: For single link, taking link on HW-0 here is okay. But, when
++	 * enabling multiple active links, we should determine the right link.
++	 */
++	rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
++	if (unlikely(!rtwsta_link))
++		return;
++
++	if (rtwsta_link->mac_id != phy_ppdu->mac_id || !phy_ppdu->to_self)
+ 		return;
+ 
+ 	if (hal->ant_diversity && hal->antenna_rx) {
+@@ -1531,22 +1591,24 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
+ 		evm_pos = ant_pos;
+ 	}
+ 
+-	ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg);
++	ewma_rssi_add(&rtwsta_link->avg_rssi, phy_ppdu->rssi_avg);
+ 
+ 	if (ant_pos < ant_num) {
+-		ewma_rssi_add(&rtwsta->rssi[ant_pos], phy_ppdu->rssi[0]);
++		ewma_rssi_add(&rtwsta_link->rssi[ant_pos], phy_ppdu->rssi[0]);
+ 	} else {
+ 		for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+-			ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]);
++			ewma_rssi_add(&rtwsta_link->rssi[i], phy_ppdu->rssi[i]);
+ 	}
+ 
+ 	if (phy_ppdu->ofdm.has && (phy_ppdu->has_data || phy_ppdu->has_bcn)) {
+-		ewma_snr_add(&rtwsta->avg_snr, phy_ppdu->ofdm.avg_snr);
++		ewma_snr_add(&rtwsta_link->avg_snr, phy_ppdu->ofdm.avg_snr);
+ 		if (rtw89_get_data_rate_nss(rtwdev, phy_ppdu->rate) == 1) {
+-			ewma_evm_add(&rtwsta->evm_1ss, phy_ppdu->ofdm.evm_min);
++			ewma_evm_add(&rtwsta_link->evm_1ss, phy_ppdu->ofdm.evm_min);
+ 		} else {
+-			ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min);
+-			ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max);
++			ewma_evm_add(&rtwsta_link->evm_min[evm_pos],
++				     phy_ppdu->ofdm.evm_min);
++			ewma_evm_add(&rtwsta_link->evm_max[evm_pos],
++				     phy_ppdu->ofdm.evm_max);
+ 		}
+ 	}
+ }
+@@ -1876,17 +1938,19 @@ struct rtw89_vif_rx_stats_iter_data {
+ };
+ 
+ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
+-				      struct ieee80211_vif *vif,
++				      struct rtw89_vif_link *rtwvif_link,
++				      struct ieee80211_bss_conf *bss_conf,
+ 				      struct sk_buff *skb)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data;
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	u8 *pos, *end, type, tf_bw;
+ 	u16 aid, tf_rua;
+ 
+-	if (!ether_addr_equal(vif->bss_conf.bssid, tf->ta) ||
+-	    rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION ||
+-	    rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
++	if (!ether_addr_equal(bss_conf->bssid, tf->ta) ||
++	    rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION ||
++	    rtwvif_link->net_type == RTW89_NET_TYPE_NO_LINK)
+ 		return;
+ 
+ 	type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK);
+@@ -1915,7 +1979,7 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
+ 			rtwdev->stats.rx_tf_acc++;
+ 			if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ &&
+ 			    rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106)
+-				rtwvif->pwr_diff_en = true;
++				rtwvif_link->pwr_diff_en = true;
+ 			break;
+ 		}
+ 
+@@ -1986,7 +2050,7 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
+ 		ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
+ }
+ 
+-static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif *rtwvif,
++static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link,
+ 				   struct ieee80211_hdr *hdr, size_t len)
+ {
+ 	struct ieee80211_mgmt *mgmt = (typeof(mgmt))hdr;
+@@ -1994,20 +2058,22 @@ static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif *rtwvif,
+ 	if (len < offsetof(typeof(*mgmt), u.beacon.variable))
+ 		return;
+ 
+-	WRITE_ONCE(rtwvif->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
++	WRITE_ONCE(rtwvif_link->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
+ }
+ 
+ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
+ 				    struct ieee80211_vif *vif)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	struct rtw89_vif_rx_stats_iter_data *iter_data = data;
+ 	struct rtw89_dev *rtwdev = iter_data->rtwdev;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ 	struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ 	struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
+ 	struct sk_buff *skb = iter_data->skb;
+ 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 	struct rtw89_rx_phy_ppdu *phy_ppdu = iter_data->phy_ppdu;
++	struct ieee80211_bss_conf *bss_conf;
++	struct rtw89_vif_link *rtwvif_link;
+ 	const u8 *bssid = iter_data->bssid;
+ 
+ 	if (rtwdev->scanning &&
+@@ -2015,33 +2081,46 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
+ 	     ieee80211_is_probe_resp(hdr->frame_control)))
+ 		rtw89_core_cancel_6ghz_probe_tx(rtwdev, skb);
+ 
+-	if (!vif->bss_conf.bssid)
+-		return;
++	rcu_read_lock();
++
++	/* FIXME: For single link, taking link on HW-0 here is okay. But, when
++	 * enabling multiple active links, we should determine the right link.
++	 */
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (unlikely(!rtwvif_link))
++		goto out;
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
++	if (!bss_conf->bssid)
++		goto out;
+ 
+ 	if (ieee80211_is_trigger(hdr->frame_control)) {
+-		rtw89_stats_trigger_frame(rtwdev, vif, skb);
+-		return;
++		rtw89_stats_trigger_frame(rtwdev, rtwvif_link, bss_conf, skb);
++		goto out;
+ 	}
+ 
+-	if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
+-		return;
++	if (!ether_addr_equal(bss_conf->bssid, bssid))
++		goto out;
+ 
+ 	if (ieee80211_is_beacon(hdr->frame_control)) {
+ 		if (vif->type == NL80211_IFTYPE_STATION &&
+ 		    !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) {
+-			rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len);
++			rtw89_vif_sync_bcn_tsf(rtwvif_link, hdr, skb->len);
+ 			rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
+ 		}
+ 		pkt_stat->beacon_nr++;
+ 	}
+ 
+-	if (!ether_addr_equal(vif->addr, hdr->addr1))
+-		return;
++	if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
++		goto out;
+ 
+ 	if (desc_info->data_rate < RTW89_HW_RATE_NR)
+ 		pkt_stat->rx_rate_cnt[desc_info->data_rate]++;
+ 
+ 	rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, false);
++
++out:
++	rcu_read_unlock();
+ }
+ 
+ static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
+@@ -2432,15 +2511,23 @@ void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta)
+ 	struct rtw89_core_iter_rx_status *iter_data =
+ 				(struct rtw89_core_iter_rx_status *)data;
+ 	struct ieee80211_rx_status *rx_status = iter_data->rx_status;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ 	struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_sta_link *rtwsta_link;
+ 	u8 mac_id = iter_data->mac_id;
+ 
+-	if (mac_id != rtwsta->mac_id)
++	/* FIXME: For single link, taking link on HW-0 here is okay. But, when
++	 * enabling multiple active links, we should determine the right link.
++	 */
++	rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
++	if (unlikely(!rtwsta_link))
+ 		return;
+ 
+-	rtwsta->rx_status = *rx_status;
+-	rtwsta->rx_hw_rate = desc_info->data_rate;
++	if (mac_id != rtwsta_link->mac_id)
++		return;
++
++	rtwsta_link->rx_status = *rx_status;
++	rtwsta_link->rx_hw_rate = desc_info->data_rate;
+ }
+ 
+ static void rtw89_core_stats_sta_rx_status(struct rtw89_dev *rtwdev,
+@@ -2546,6 +2633,10 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
++	/* FIXME: Fix __rtw89_enter_ps_mode() to consider MLO cases. */
++	if (rtwdev->support_mlo)
++		return RTW89_PS_MODE_NONE;
++
+ 	if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
+ 	    RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
+ 		return RTW89_PS_MODE_NONE;
+@@ -2658,7 +2749,7 @@ static void rtw89_core_ba_work(struct work_struct *work)
+ 	list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) {
+ 		struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+ 		struct ieee80211_sta *sta = txq->sta;
+-		struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
++		struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ 		u8 tid = txq->tid;
+ 
+ 		if (!sta) {
+@@ -2686,8 +2777,8 @@ static void rtw89_core_ba_work(struct work_struct *work)
+ 	spin_unlock_bh(&rtwdev->ba_lock);
+ }
+ 
+-static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
+-					   struct ieee80211_sta *sta)
++void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
++				    struct ieee80211_sta *sta)
+ {
+ 	struct rtw89_txq *rtwtxq, *tmp;
+ 
+@@ -2701,8 +2792,8 @@ static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
+ 	spin_unlock_bh(&rtwdev->ba_lock);
+ }
+ 
+-static void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
+-						  struct ieee80211_sta *sta)
++void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
++					   struct ieee80211_sta *sta)
+ {
+ 	struct rtw89_txq *rtwtxq, *tmp;
+ 
+@@ -2718,10 +2809,10 @@ static void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
+ 	spin_unlock_bh(&rtwdev->ba_lock);
+ }
+ 
+-static void rtw89_core_free_sta_pending_roc_tx(struct rtw89_dev *rtwdev,
+-					       struct ieee80211_sta *sta)
++void rtw89_core_free_sta_pending_roc_tx(struct rtw89_dev *rtwdev,
++					struct ieee80211_sta *sta)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ 	struct sk_buff *skb, *tmp;
+ 
+ 	skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) {
+@@ -2762,7 +2853,7 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
+ 	struct ieee80211_hw *hw = rtwdev->hw;
+ 	struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+ 	struct ieee80211_sta *sta = txq->sta;
+-	struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ 
+ 	if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
+ 		return;
+@@ -2838,10 +2929,19 @@ static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev,
+ 				    bool *sched_txq, bool *reinvoke)
+ {
+ 	struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+-	struct ieee80211_sta *sta = txq->sta;
+-	struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(txq->sta);
++	struct rtw89_sta_link *rtwsta_link;
+ 
+-	if (!sta || rtwsta->max_agg_wait <= 0)
++	if (!rtwsta)
++		return false;
++
++	rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
++	if (unlikely(!rtwsta_link)) {
++		rtw89_err(rtwdev, "agg wait: find no link on HW-0\n");
++		return false;
++	}
++
++	if (rtwsta_link->max_agg_wait <= 0)
+ 		return false;
+ 
+ 	if (rtwdev->stats.tx_tfc_lv <= RTW89_TFC_MID)
+@@ -2855,7 +2955,7 @@ static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev,
+ 		return false;
+ 	}
+ 
+-	if (*frame_cnt == 1 && rtwtxq->wait_cnt < rtwsta->max_agg_wait) {
++	if (*frame_cnt == 1 && rtwtxq->wait_cnt < rtwsta_link->max_agg_wait) {
+ 		*reinvoke = true;
+ 		rtwtxq->wait_cnt++;
+ 		return true;
+@@ -2879,7 +2979,7 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv
+ 	ieee80211_txq_schedule_start(hw, ac);
+ 	while ((txq = ieee80211_next_txq(hw, ac))) {
+ 		rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+-		rtwvif = (struct rtw89_vif *)txq->vif->drv_priv;
++		rtwvif = vif_to_rtwvif(txq->vif);
+ 
+ 		if (rtwvif->offchan) {
+ 			ieee80211_return_txq(hw, txq, true);
+@@ -2955,16 +3055,23 @@ static void rtw89_forbid_ba_work(struct work_struct *w)
+ static void rtw89_core_sta_pending_tx_iter(void *data,
+ 					   struct ieee80211_sta *sta)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_vif *rtwvif_target = data, *rtwvif = rtwsta->rtwvif;
+-	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
++	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct rtw89_vif_link *target = data;
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct sk_buff *skb, *tmp;
++	unsigned int link_id;
+ 	int qsel, ret;
+ 
+-	if (rtwvif->chanctx_idx != rtwvif_target->chanctx_idx)
+-		return;
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++		if (rtwvif_link->chanctx_idx == target->chanctx_idx)
++			goto bottom;
++
++	return;
+ 
++bottom:
+ 	if (skb_queue_len(&rtwsta->roc_queue) == 0)
+ 		return;
+ 
+@@ -2982,17 +3089,17 @@ static void rtw89_core_sta_pending_tx_iter(void *data,
+ }
+ 
+ static void rtw89_core_handle_sta_pending_tx(struct rtw89_dev *rtwdev,
+-					     struct rtw89_vif *rtwvif)
++					     struct rtw89_vif_link *rtwvif_link)
+ {
+ 	ieee80211_iterate_stations_atomic(rtwdev->hw,
+ 					  rtw89_core_sta_pending_tx_iter,
+-					  rtwvif);
++					  rtwvif_link);
+ }
+ 
+ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif, bool qos, bool ps)
++				    struct rtw89_vif_link *rtwvif_link, bool qos, bool ps)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 	struct ieee80211_sta *sta;
+ 	struct ieee80211_hdr *hdr;
+ 	struct sk_buff *skb;
+@@ -3002,7 +3109,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
+ 		return 0;
+ 
+ 	rcu_read_lock();
+-	sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
++	sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
+ 	if (!sta) {
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -3040,27 +3147,43 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct ieee80211_hw *hw = rtwdev->hw;
+ 	struct rtw89_roc *roc = &rtwvif->roc;
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct cfg80211_chan_def roc_chan;
+-	struct rtw89_vif *tmp;
++	struct rtw89_vif *tmp_vif;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&rtwdev->mutex);
+ 
+ 	rtw89_leave_ips_by_hwflags(rtwdev);
+ 	rtw89_leave_lps(rtwdev);
++
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev, "roc start: find no link on HW-0\n");
++		return;
++	}
++
+ 	rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_ROC);
+ 
+-	ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, true);
++	ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, true);
+ 	if (ret)
+ 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ 			    "roc send null-1 failed: %d\n", ret);
+ 
+-	rtw89_for_each_rtwvif(rtwdev, tmp)
+-		if (tmp->chanctx_idx == rtwvif->chanctx_idx)
+-			tmp->offchan = true;
++	rtw89_for_each_rtwvif(rtwdev, tmp_vif) {
++		struct rtw89_vif_link *tmp_link;
++		unsigned int link_id;
++
++		rtw89_vif_for_each_link(tmp_vif, tmp_link, link_id) {
++			if (tmp_link->chanctx_idx == rtwvif_link->chanctx_idx) {
++				tmp_vif->offchan = true;
++				break;
++			}
++		}
++	}
+ 
+ 	cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT);
+-	rtw89_config_roc_chandef(rtwdev, rtwvif->chanctx_idx, &roc_chan);
++	rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, &roc_chan);
+ 	rtw89_set_channel(rtwdev);
+ 	rtw89_write32_clr(rtwdev,
+ 			  rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
+@@ -3077,7 +3200,8 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct ieee80211_hw *hw = rtwdev->hw;
+ 	struct rtw89_roc *roc = &rtwvif->roc;
+-	struct rtw89_vif *tmp;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_vif *tmp_vif;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&rtwdev->mutex);
+@@ -3087,24 +3211,29 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 	rtw89_leave_ips_by_hwflags(rtwdev);
+ 	rtw89_leave_lps(rtwdev);
+ 
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev, "roc end: find no link on HW-0\n");
++		return;
++	}
++
+ 	rtw89_write32_mask(rtwdev,
+ 			   rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
+ 			   B_AX_RX_FLTR_CFG_MASK,
+ 			   rtwdev->hal.rx_fltr);
+ 
+ 	roc->state = RTW89_ROC_IDLE;
+-	rtw89_config_roc_chandef(rtwdev, rtwvif->chanctx_idx, NULL);
++	rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, NULL);
+ 	rtw89_chanctx_proceed(rtwdev);
+-	ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, false);
++	ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false);
+ 	if (ret)
+ 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ 			    "roc send null-0 failed: %d\n", ret);
+ 
+-	rtw89_for_each_rtwvif(rtwdev, tmp)
+-		if (tmp->chanctx_idx == rtwvif->chanctx_idx)
+-			tmp->offchan = false;
++	rtw89_for_each_rtwvif(rtwdev, tmp_vif)
++		tmp_vif->offchan = false;
+ 
+-	rtw89_core_handle_sta_pending_tx(rtwdev, rtwvif);
++	rtw89_core_handle_sta_pending_tx(rtwdev, rtwvif_link);
+ 	queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
+ 
+ 	if (hw->conf.flags & IEEE80211_CONF_IDLE)
+@@ -3188,39 +3317,52 @@ static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev,
+ 
+ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 	bool tfc_changed;
+ 
+ 	tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats);
++
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ 		rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats);
+-		rtw89_fw_h2c_tp_offload(rtwdev, rtwvif);
++
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_fw_h2c_tp_offload(rtwdev, rtwvif_link);
+ 	}
+ 
+ 	return tfc_changed;
+ }
+ 
+-static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev,
++				struct rtw89_vif_link *rtwvif_link)
+ {
+-	if ((rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+-	     rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT) ||
+-	    rtwvif->tdls_peer)
++	if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION &&
++	    rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
+ 		return;
+ 
+-	if (rtwvif->offchan)
+-		return;
+-
+-	if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE &&
+-	    rtwvif->stats.rx_tfc_lv == RTW89_TFC_IDLE)
+-		rtw89_enter_lps(rtwdev, rtwvif, true);
++	rtw89_enter_lps(rtwdev, rtwvif_link, true);
+ }
+ 
+ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 
+-	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_vif_enter_lps(rtwdev, rtwvif);
++	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
++		if (rtwvif->tdls_peer)
++			continue;
++		if (rtwvif->offchan)
++			continue;
++
++		if (rtwvif->stats.tx_tfc_lv != RTW89_TFC_IDLE ||
++		    rtwvif->stats.rx_tfc_lv != RTW89_TFC_IDLE)
++			continue;
++
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_vif_enter_lps(rtwdev, rtwvif_link);
++	}
+ }
+ 
+ static void rtw89_core_rfk_track(struct rtw89_dev *rtwdev)
+@@ -3234,14 +3376,16 @@ static void rtw89_core_rfk_track(struct rtw89_dev *rtwdev)
+ 	rtw89_chip_rfk_track(rtwdev);
+ }
+ 
+-void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
++void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
++			      struct ieee80211_bss_conf *bss_conf)
+ {
+ 	enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
+ 
+ 	if (mode == RTW89_ENTITY_MODE_MCC)
+ 		rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_P2P_PS_CHANGE);
+ 	else
+-		rtw89_process_p2p_ps(rtwdev, vif);
++		rtw89_process_p2p_ps(rtwdev, rtwvif_link, bss_conf);
+ }
+ 
+ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
+@@ -3326,7 +3470,8 @@ void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
+ }
+ 
+ int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+-				    struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
++				    struct rtw89_sta_link *rtwsta_link, u8 tid,
++				    u8 *cam_idx)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+@@ -3363,7 +3508,7 @@ int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+ 	}
+ 
+ 	entry->tid = tid;
+-	list_add_tail(&entry->list, &rtwsta->ba_cam_list);
++	list_add_tail(&entry->list, &rtwsta_link->ba_cam_list);
+ 
+ 	*cam_idx = idx;
+ 
+@@ -3371,7 +3516,8 @@ int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+-				    struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
++				    struct rtw89_sta_link *rtwsta_link, u8 tid,
++				    u8 *cam_idx)
+ {
+ 	struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ 	struct rtw89_ba_cam_entry *entry = NULL, *tmp;
+@@ -3379,7 +3525,7 @@ int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+ 
+ 	lockdep_assert_held(&rtwdev->mutex);
+ 
+-	list_for_each_entry_safe(entry, tmp, &rtwsta->ba_cam_list, list) {
++	list_for_each_entry_safe(entry, tmp, &rtwsta_link->ba_cam_list, list) {
+ 		if (entry->tid != tid)
+ 			continue;
+ 
+@@ -3396,24 +3542,25 @@ int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+ 
+ #define RTW89_TYPE_MAPPING(_type)	\
+ 	case NL80211_IFTYPE_ ## _type:	\
+-		rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type;	\
++		rtwvif_link->wifi_role = RTW89_WIFI_ROLE_ ## _type;	\
+ 		break
+-void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
++void rtw89_vif_type_mapping(struct rtw89_vif_link *rtwvif_link, bool assoc)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	const struct ieee80211_bss_conf *bss_conf;
+ 
+ 	switch (vif->type) {
+ 	case NL80211_IFTYPE_STATION:
+ 		if (vif->p2p)
+-			rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_CLIENT;
++			rtwvif_link->wifi_role = RTW89_WIFI_ROLE_P2P_CLIENT;
+ 		else
+-			rtwvif->wifi_role = RTW89_WIFI_ROLE_STATION;
++			rtwvif_link->wifi_role = RTW89_WIFI_ROLE_STATION;
+ 		break;
+ 	case NL80211_IFTYPE_AP:
+ 		if (vif->p2p)
+-			rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_GO;
++			rtwvif_link->wifi_role = RTW89_WIFI_ROLE_P2P_GO;
+ 		else
+-			rtwvif->wifi_role = RTW89_WIFI_ROLE_AP;
++			rtwvif_link->wifi_role = RTW89_WIFI_ROLE_AP;
+ 		break;
+ 	RTW89_TYPE_MAPPING(ADHOC);
+ 	RTW89_TYPE_MAPPING(MONITOR);
+@@ -3426,23 +3573,27 @@ void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
+ 	switch (vif->type) {
+ 	case NL80211_IFTYPE_AP:
+ 	case NL80211_IFTYPE_MESH_POINT:
+-		rtwvif->net_type = RTW89_NET_TYPE_AP_MODE;
+-		rtwvif->self_role = RTW89_SELF_ROLE_AP;
++		rtwvif_link->net_type = RTW89_NET_TYPE_AP_MODE;
++		rtwvif_link->self_role = RTW89_SELF_ROLE_AP;
+ 		break;
+ 	case NL80211_IFTYPE_ADHOC:
+-		rtwvif->net_type = RTW89_NET_TYPE_AD_HOC;
+-		rtwvif->self_role = RTW89_SELF_ROLE_CLIENT;
++		rtwvif_link->net_type = RTW89_NET_TYPE_AD_HOC;
++		rtwvif_link->self_role = RTW89_SELF_ROLE_CLIENT;
+ 		break;
+ 	case NL80211_IFTYPE_STATION:
+ 		if (assoc) {
+-			rtwvif->net_type = RTW89_NET_TYPE_INFRA;
+-			rtwvif->trigger = vif->bss_conf.he_support;
++			rtwvif_link->net_type = RTW89_NET_TYPE_INFRA;
++
++			rcu_read_lock();
++			bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
++			rtwvif_link->trigger = bss_conf->he_support;
++			rcu_read_unlock();
+ 		} else {
+-			rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
+-			rtwvif->trigger = false;
++			rtwvif_link->net_type = RTW89_NET_TYPE_NO_LINK;
++			rtwvif_link->trigger = false;
+ 		}
+-		rtwvif->self_role = RTW89_SELF_ROLE_CLIENT;
+-		rtwvif->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
++		rtwvif_link->self_role = RTW89_SELF_ROLE_CLIENT;
++		rtwvif_link->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
+ 		break;
+ 	case NL80211_IFTYPE_MONITOR:
+ 		break;
+@@ -3452,137 +3603,110 @@ void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
+ 	}
+ }
+ 
+-int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
+-		       struct ieee80211_vif *vif,
+-		       struct ieee80211_sta *sta)
++int rtw89_core_sta_link_add(struct rtw89_dev *rtwdev,
++			    struct rtw89_vif_link *rtwvif_link,
++			    struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+ 	u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
+ 	int i;
+ 	int ret;
+ 
+-	rtwsta->rtwdev = rtwdev;
+-	rtwsta->rtwvif = rtwvif;
+-	rtwsta->prev_rssi = 0;
+-	INIT_LIST_HEAD(&rtwsta->ba_cam_list);
+-	skb_queue_head_init(&rtwsta->roc_queue);
+-
+-	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+-		rtw89_core_txq_init(rtwdev, sta->txq[i]);
+-
+-	ewma_rssi_init(&rtwsta->avg_rssi);
+-	ewma_snr_init(&rtwsta->avg_snr);
+-	ewma_evm_init(&rtwsta->evm_1ss);
++	rtwsta_link->prev_rssi = 0;
++	INIT_LIST_HEAD(&rtwsta_link->ba_cam_list);
++	ewma_rssi_init(&rtwsta_link->avg_rssi);
++	ewma_snr_init(&rtwsta_link->avg_snr);
++	ewma_evm_init(&rtwsta_link->evm_1ss);
+ 	for (i = 0; i < ant_num; i++) {
+-		ewma_rssi_init(&rtwsta->rssi[i]);
+-		ewma_evm_init(&rtwsta->evm_min[i]);
+-		ewma_evm_init(&rtwsta->evm_max[i]);
++		ewma_rssi_init(&rtwsta_link->rssi[i]);
++		ewma_evm_init(&rtwsta_link->evm_min[i]);
++		ewma_evm_init(&rtwsta_link->evm_max[i]);
+ 	}
+ 
+ 	if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+-		/* for station mode, assign the mac_id from itself */
+-		rtwsta->mac_id = rtwvif->mac_id;
+-
+ 		/* must do rtw89_reg_6ghz_recalc() before rfk channel */
+-		ret = rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true);
++		ret = rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, true);
+ 		if (ret)
+ 			return ret;
+ 
+-		rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
++		rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link,
+ 					 BTC_ROLE_MSTS_STA_CONN_START);
+-		rtw89_chip_rfk_channel(rtwdev, rtwvif);
++		rtw89_chip_rfk_channel(rtwdev, rtwvif_link);
+ 	} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
+-		rtwsta->mac_id = rtw89_acquire_mac_id(rtwdev);
+-		if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
+-			return -ENOSPC;
+-
+-		ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
++		ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta_link->mac_id, false);
+ 		if (ret) {
+-			rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
+ 			rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
++		ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, rtwsta_link,
+ 						 RTW89_ROLE_CREATE);
+ 		if (ret) {
+-			rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
+ 			rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
++		ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
+ 		if (ret)
+ 			return ret;
+ 
+-		ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
++		ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
+ 		if (ret)
+ 			return ret;
+-
+-		rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev,
+-			    struct ieee80211_vif *vif,
+-			    struct ieee80211_sta *sta)
++int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 
+ 	if (vif->type == NL80211_IFTYPE_STATION)
+-		rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, false);
+-
+-	rtwdev->total_sta_assoc--;
+-	if (sta->tdls)
+-		rtwvif->tdls_peer--;
+-	rtwsta->disassoc = true;
++		rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
+ 
+ 	return 0;
+ }
+ 
+-int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
+-			      struct ieee80211_vif *vif,
+-			      struct ieee80211_sta *sta)
++int rtw89_core_sta_link_disconnect(struct rtw89_dev *rtwdev,
++				   struct rtw89_vif_link *rtwvif_link,
++				   struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
+ 	int ret;
+ 
+-	rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
+-	rtw89_mac_bf_disassoc(rtwdev, vif, sta);
+-	rtw89_core_free_sta_pending_ba(rtwdev, sta);
+-	rtw89_core_free_sta_pending_forbid_ba(rtwdev, sta);
+-	rtw89_core_free_sta_pending_roc_tx(rtwdev, sta);
++	rtw89_mac_bf_monitor_calc(rtwdev, rtwsta_link, true);
++	rtw89_mac_bf_disassoc(rtwdev, rtwvif_link, rtwsta_link);
+ 
+ 	if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
+-		rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
++		rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta_link->addr_cam);
+ 	if (sta->tdls)
+-		rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
++		rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta_link->bssid_cam);
+ 
+ 	if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+-		rtw89_vif_type_mapping(vif, false);
+-		rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, true);
++		rtw89_vif_type_mapping(rtwvif_link, false);
++		rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, true);
+ 	}
+ 
+-	ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
++	ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
+ 		return ret;
+ 	}
+ 
+-	ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, true);
++	ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, rtwsta_link, true);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c join info\n");
+ 		return ret;
+ 	}
+ 
+ 	/* update cam aid mac_id net_type */
+-	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
++	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ 		return ret;
+@@ -3591,106 +3715,114 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
+ 	return ret;
+ }
+ 
+-int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
+-			 struct ieee80211_vif *vif,
+-			 struct ieee80211_sta *sta)
++int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
++			      struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
++	const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
++	struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif_link,
++									 rtwsta_link);
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
++						       rtwvif_link->chanctx_idx);
+ 	int ret;
+ 
+ 	if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
+ 		if (sta->tdls) {
+-			ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, sta->addr);
++			struct ieee80211_link_sta *link_sta;
++
++			rcu_read_lock();
++
++			link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++			ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif_link, bssid_cam,
++						       link_sta->addr);
+ 			if (ret) {
+ 				rtw89_warn(rtwdev, "failed to send h2c init bssid cam for TDLS\n");
++				rcu_read_unlock();
+ 				return ret;
+ 			}
++
++			rcu_read_unlock();
+ 		}
+ 
+-		ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, bssid_cam);
++		ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta_link->addr_cam, bssid_cam);
+ 		if (ret) {
+ 			rtw89_warn(rtwdev, "failed to send h2c init addr cam\n");
+ 			return ret;
+ 		}
+ 	}
+ 
+-	ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
++	ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
+ 		return ret;
+ 	}
+ 
+-	ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, false);
++	ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, rtwsta_link, false);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c join info\n");
+ 		return ret;
+ 	}
+ 
+ 	/* update cam aid mac_id net_type */
+-	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
++	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ 		return ret;
+ 	}
+ 
+-	rtwdev->total_sta_assoc++;
+-	if (sta->tdls)
+-		rtwvif->tdls_peer++;
+-	rtw89_phy_ra_assoc(rtwdev, sta);
+-	rtw89_mac_bf_assoc(rtwdev, vif, sta);
+-	rtw89_mac_bf_monitor_calc(rtwdev, sta, false);
++	rtw89_phy_ra_assoc(rtwdev, rtwsta_link);
++	rtw89_mac_bf_assoc(rtwdev, rtwvif_link, rtwsta_link);
++	rtw89_mac_bf_monitor_calc(rtwdev, rtwsta_link, false);
+ 
+ 	if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+-		struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
++		struct ieee80211_bss_conf *bss_conf;
+ 
++		rcu_read_lock();
++
++		bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ 		if (bss_conf->he_support &&
+ 		    !(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE))
+-			rtwsta->er_cap = true;
++			rtwsta_link->er_cap = true;
++
++		rcu_read_unlock();
+ 
+-		rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
++		rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link,
+ 					 BTC_ROLE_MSTS_STA_CONN_END);
+-		rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template, chan);
+-		rtw89_phy_ul_tb_assoc(rtwdev, rtwvif);
++		rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta_link->htc_template, chan);
++		rtw89_phy_ul_tb_assoc(rtwdev, rtwvif_link);
+ 
+-		ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id);
++		ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id);
+ 		if (ret) {
+ 			rtw89_warn(rtwdev, "failed to send h2c general packet\n");
+ 			return ret;
+ 		}
+ 
+-		rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
++		rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ 	}
+ 
+ 	return ret;
+ }
+ 
+-int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
+-			  struct ieee80211_vif *vif,
+-			  struct ieee80211_sta *sta)
++int rtw89_core_sta_link_remove(struct rtw89_dev *rtwdev,
++			       struct rtw89_vif_link *rtwvif_link,
++			       struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
+ 	int ret;
+ 
+ 	if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+-		rtw89_reg_6ghz_recalc(rtwdev, rtwvif, false);
+-		rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
++		rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, false);
++		rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link,
+ 					 BTC_ROLE_MSTS_STA_DIS_CONN);
+ 	} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
+-		rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
+-
+-		ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
++		ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, rtwsta_link,
+ 						 RTW89_ROLE_REMOVE);
+ 		if (ret) {
+ 			rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ 			return ret;
+ 		}
+-
+-		rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
+ 	}
+ 
+ 	return 0;
+@@ -4152,15 +4284,16 @@ static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
+ void rtw89_core_update_beacon_work(struct work_struct *work)
+ {
+ 	struct rtw89_dev *rtwdev;
+-	struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
+-						update_beacon_work);
++	struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link,
++							  update_beacon_work);
+ 
+-	if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE)
++	if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE)
+ 		return;
+ 
+-	rtwdev = rtwvif->rtwdev;
++	rtwdev = rtwvif_link->rtwvif->rtwdev;
++
+ 	mutex_lock(&rtwdev->mutex);
+-	rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
++	rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+@@ -4355,6 +4488,168 @@ void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id)
+ 	clear_bit(mac_id, rtwdev->mac_id_map);
+ }
+ 
++void rtw89_init_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++		    u8 mac_id, u8 port)
++{
++	const struct rtw89_chip_info *chip = rtwdev->chip;
++	u8 support_link_num = chip->support_link_num;
++	u8 support_mld_num = 0;
++	unsigned int link_id;
++	u8 index;
++
++	bitmap_zero(rtwvif->links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
++	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
++		rtwvif->links[link_id] = NULL;
++
++	rtwvif->rtwdev = rtwdev;
++
++	if (rtwdev->support_mlo) {
++		rtwvif->links_inst_valid_num = support_link_num;
++		support_mld_num = chip->support_macid_num / support_link_num;
++	} else {
++		rtwvif->links_inst_valid_num = 1;
++	}
++
++	for (index = 0; index < rtwvif->links_inst_valid_num; index++) {
++		struct rtw89_vif_link *inst = &rtwvif->links_inst[index];
++
++		inst->rtwvif = rtwvif;
++		inst->mac_id = mac_id + index * support_mld_num;
++		inst->mac_idx = RTW89_MAC_0 + index;
++		inst->phy_idx = RTW89_PHY_0 + index;
++
++		/* multi-link use the same port id on different HW bands */
++		inst->port = port;
++	}
++}
++
++void rtw89_init_sta(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++		    struct rtw89_sta *rtwsta, u8 mac_id)
++{
++	const struct rtw89_chip_info *chip = rtwdev->chip;
++	u8 support_link_num = chip->support_link_num;
++	u8 support_mld_num = 0;
++	unsigned int link_id;
++	u8 index;
++
++	bitmap_zero(rtwsta->links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
++	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
++		rtwsta->links[link_id] = NULL;
++
++	rtwsta->rtwdev = rtwdev;
++	rtwsta->rtwvif = rtwvif;
++
++	if (rtwdev->support_mlo) {
++		rtwsta->links_inst_valid_num = support_link_num;
++		support_mld_num = chip->support_macid_num / support_link_num;
++	} else {
++		rtwsta->links_inst_valid_num = 1;
++	}
++
++	for (index = 0; index < rtwsta->links_inst_valid_num; index++) {
++		struct rtw89_sta_link *inst = &rtwsta->links_inst[index];
++
++		inst->rtwvif_link = &rtwvif->links_inst[index];
++
++		inst->rtwsta = rtwsta;
++		inst->mac_id = mac_id + index * support_mld_num;
++	}
++}
++
++struct rtw89_vif_link *rtw89_vif_set_link(struct rtw89_vif *rtwvif,
++					  unsigned int link_id)
++{
++	struct rtw89_vif_link *rtwvif_link = rtwvif->links[link_id];
++	u8 index;
++	int ret;
++
++	if (rtwvif_link)
++		return rtwvif_link;
++
++	index = find_first_zero_bit(rtwvif->links_inst_map,
++				    rtwvif->links_inst_valid_num);
++	if (index == rtwvif->links_inst_valid_num) {
++		ret = -EBUSY;
++		goto err;
++	}
++
++	rtwvif_link = &rtwvif->links_inst[index];
++	rtwvif_link->link_id = link_id;
++
++	set_bit(index, rtwvif->links_inst_map);
++	rtwvif->links[link_id] = rtwvif_link;
++	return rtwvif_link;
++
++err:
++	rtw89_err(rtwvif->rtwdev, "vif (link_id %u) failed to set link: %d\n",
++		  link_id, ret);
++	return NULL;
++}
++
++void rtw89_vif_unset_link(struct rtw89_vif *rtwvif, unsigned int link_id)
++{
++	struct rtw89_vif_link **container = &rtwvif->links[link_id];
++	struct rtw89_vif_link *link = *container;
++	u8 index;
++
++	if (!link)
++		return;
++
++	index = rtw89_vif_link_inst_get_index(link);
++	clear_bit(index, rtwvif->links_inst_map);
++	*container = NULL;
++}
++
++struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
++					  unsigned int link_id)
++{
++	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
++	struct rtw89_vif_link *rtwvif_link = rtwvif->links[link_id];
++	struct rtw89_sta_link *rtwsta_link = rtwsta->links[link_id];
++	u8 index;
++	int ret;
++
++	if (rtwsta_link)
++		return rtwsta_link;
++
++	if (!rtwvif_link) {
++		ret = -ENOLINK;
++		goto err;
++	}
++
++	index = rtw89_vif_link_inst_get_index(rtwvif_link);
++	if (test_bit(index, rtwsta->links_inst_map)) {
++		ret = -EBUSY;
++		goto err;
++	}
++
++	rtwsta_link = &rtwsta->links_inst[index];
++	rtwsta_link->link_id = link_id;
++
++	set_bit(index, rtwsta->links_inst_map);
++	rtwsta->links[link_id] = rtwsta_link;
++	return rtwsta_link;
++
++err:
++	rtw89_err(rtwsta->rtwdev, "sta (link_id %u) failed to set link: %d\n",
++		  link_id, ret);
++	return NULL;
++}
++
++void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id)
++{
++	struct rtw89_sta_link **container = &rtwsta->links[link_id];
++	struct rtw89_sta_link *link = *container;
++	u8 index;
++
++	if (!link)
++		return;
++
++	index = rtw89_sta_link_inst_get_index(link);
++	clear_bit(index, rtwsta->links_inst_map);
++	*container = NULL;
++}
++
+ int rtw89_core_init(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_btc *btc = &rtwdev->btc;
+@@ -4444,38 +4739,44 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev)
+ }
+ EXPORT_SYMBOL(rtw89_core_deinit);
+ 
+-void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			   const u8 *mac_addr, bool hw_scan)
+ {
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
++						       rtwvif_link->chanctx_idx);
+ 
+ 	rtwdev->scanning = true;
+ 	rtw89_leave_lps(rtwdev);
+ 	if (hw_scan)
+ 		rtw89_leave_ips_by_hwflags(rtwdev);
+ 
+-	ether_addr_copy(rtwvif->mac_addr, mac_addr);
++	ether_addr_copy(rtwvif_link->mac_addr, mac_addr);
+ 	rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type);
+-	rtw89_chip_rfk_scan(rtwdev, rtwvif, true);
++	rtw89_chip_rfk_scan(rtwdev, rtwvif_link, true);
+ 	rtw89_hci_recalc_int_mit(rtwdev);
+ 	rtw89_phy_config_edcca(rtwdev, true);
+ 
+-	rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
++	rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, mac_addr);
+ }
+ 
+ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+-			      struct ieee80211_vif *vif, bool hw_scan)
++			      struct rtw89_vif_link *rtwvif_link, bool hw_scan)
+ {
+-	struct rtw89_vif *rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
++	struct ieee80211_bss_conf *bss_conf;
+ 
+-	if (!rtwvif)
++	if (!rtwvif_link)
+ 		return;
+ 
+-	ether_addr_copy(rtwvif->mac_addr, vif->addr);
+-	rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	ether_addr_copy(rtwvif_link->mac_addr, bss_conf->addr);
++
++	rcu_read_unlock();
++
++	rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
+ 
+-	rtw89_chip_rfk_scan(rtwdev, rtwvif, false);
++	rtw89_chip_rfk_scan(rtwdev, rtwvif_link, false);
+ 	rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
+ 	rtw89_phy_config_edcca(rtwdev, false);
+ 
+@@ -4688,17 +4989,39 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
+ }
+ EXPORT_SYMBOL(rtw89_chip_info_setup);
+ 
++void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
++				       struct rtw89_vif_link *rtwvif_link)
++{
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	const struct rtw89_chip_info *chip = rtwdev->chip;
++	struct ieee80211_bss_conf *bss_conf;
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
++	if (!bss_conf->he_support || !vif->cfg.assoc) {
++		rcu_read_unlock();
++		return;
++	}
++
++	rcu_read_unlock();
++
++	if (chip->ops->set_txpwr_ul_tb_offset)
++		chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif_link->mac_idx);
++}
++
+ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
++	u8 n = rtwdev->support_mlo ? chip->support_link_num : 1;
+ 	struct ieee80211_hw *hw = rtwdev->hw;
+ 	struct rtw89_efuse *efuse = &rtwdev->efuse;
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+ 	int ret;
+ 	int tx_headroom = IEEE80211_HT_CTL_LEN;
+ 
+-	hw->vif_data_size = sizeof(struct rtw89_vif);
+-	hw->sta_data_size = sizeof(struct rtw89_sta);
++	hw->vif_data_size = struct_size_t(struct rtw89_vif, links_inst, n);
++	hw->sta_data_size = struct_size_t(struct rtw89_sta, links_inst, n);
+ 	hw->txq_data_size = sizeof(struct rtw89_txq);
+ 	hw->chanctx_data_size = sizeof(struct rtw89_chanctx_cfg);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
+index 4ed9034fdb4641..de33320b1354cd 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -829,6 +829,8 @@ enum rtw89_phy_idx {
+ 	RTW89_PHY_MAX
+ };
+ 
++#define __RTW89_MLD_MAX_LINK_NUM 2
++
+ enum rtw89_chanctx_idx {
+ 	RTW89_CHANCTX_0 = 0,
+ 	RTW89_CHANCTX_1 = 1,
+@@ -1166,8 +1168,8 @@ struct rtw89_core_tx_request {
+ 	enum rtw89_core_tx_type tx_type;
+ 
+ 	struct sk_buff *skb;
+-	struct ieee80211_vif *vif;
+-	struct ieee80211_sta *sta;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
+ 	struct rtw89_tx_desc_info desc_info;
+ };
+ 
+@@ -3354,12 +3356,13 @@ struct rtw89_sec_cam_entry {
+ 	u8 key[32];
+ };
+ 
+-struct rtw89_sta {
++struct rtw89_sta_link {
++	struct rtw89_sta *rtwsta;
++	unsigned int link_id;
++
+ 	u8 mac_id;
+-	bool disassoc;
+ 	bool er_cap;
+-	struct rtw89_dev *rtwdev;
+-	struct rtw89_vif *rtwvif;
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_ra_info ra;
+ 	struct rtw89_ra_report ra_report;
+ 	int max_agg_wait;
+@@ -3370,15 +3373,12 @@ struct rtw89_sta {
+ 	struct ewma_evm evm_1ss;
+ 	struct ewma_evm evm_min[RF_PATH_MAX];
+ 	struct ewma_evm evm_max[RF_PATH_MAX];
+-	struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+-	DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
+ 	struct ieee80211_rx_status rx_status;
+ 	u16 rx_hw_rate;
+ 	__le32 htc_template;
+ 	struct rtw89_addr_cam_entry addr_cam; /* AP mode or TDLS peer only */
+ 	struct rtw89_bssid_cam_entry bssid_cam; /* TDLS peer only */
+ 	struct list_head ba_cam_list;
+-	struct sk_buff_head roc_queue;
+ 
+ 	bool use_cfg_mask;
+ 	struct cfg80211_bitrate_mask mask;
+@@ -3460,10 +3460,10 @@ struct rtw89_p2p_noa_setter {
+ 	u8 noa_index;
+ };
+ 
+-struct rtw89_vif {
+-	struct list_head list;
+-	struct rtw89_dev *rtwdev;
+-	struct rtw89_roc roc;
++struct rtw89_vif_link {
++	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
++
+ 	bool chanctx_assigned; /* only valid when running with chanctx_ops */
+ 	enum rtw89_chanctx_idx chanctx_idx;
+ 	enum rtw89_reg_6ghz_power reg_6ghz_power;
+@@ -3473,7 +3473,6 @@ struct rtw89_vif {
+ 	u8 port;
+ 	u8 mac_addr[ETH_ALEN];
+ 	u8 bssid[ETH_ALEN];
+-	__be32 ip_addr;
+ 	u8 phy_idx;
+ 	u8 mac_idx;
+ 	u8 net_type;
+@@ -3484,7 +3483,6 @@ struct rtw89_vif {
+ 	u8 hit_rule;
+ 	u8 last_noa_nr;
+ 	u64 sync_bcn_tsf;
+-	bool offchan;
+ 	bool trigger;
+ 	bool lsig_txop;
+ 	u8 tgt_ind;
+@@ -3498,15 +3496,11 @@ struct rtw89_vif {
+ 	bool pre_pwr_diff_en;
+ 	bool pwr_diff_en;
+ 	u8 def_tri_idx;
+-	u32 tdls_peer;
+ 	struct work_struct update_beacon_work;
+ 	struct rtw89_addr_cam_entry addr_cam;
+ 	struct rtw89_bssid_cam_entry bssid_cam;
+ 	struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
+-	struct rtw89_traffic_stats stats;
+ 	struct rtw89_phy_rate_pattern rate_pattern;
+-	struct cfg80211_scan_request *scan_req;
+-	struct ieee80211_scan_ies *scan_ies;
+ 	struct list_head general_pkt_list;
+ 	struct rtw89_p2p_noa_setter p2p_noa;
+ };
+@@ -3599,11 +3593,11 @@ struct rtw89_chip_ops {
+ 	void (*rfk_hw_init)(struct rtw89_dev *rtwdev);
+ 	void (*rfk_init)(struct rtw89_dev *rtwdev);
+ 	void (*rfk_init_late)(struct rtw89_dev *rtwdev);
+-	void (*rfk_channel)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
++	void (*rfk_channel)(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+ 	void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
+ 				 enum rtw89_phy_idx phy_idx,
+ 				 const struct rtw89_chan *chan);
+-	void (*rfk_scan)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++	void (*rfk_scan)(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			 bool start);
+ 	void (*rfk_track)(struct rtw89_dev *rtwdev);
+ 	void (*power_trim)(struct rtw89_dev *rtwdev);
+@@ -3646,23 +3640,25 @@ struct rtw89_chip_ops {
+ 			   u32 *tx_en, enum rtw89_sch_tx_sel sel);
+ 	int (*resume_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+ 	int (*h2c_dctl_sec_cam)(struct rtw89_dev *rtwdev,
+-				struct rtw89_vif *rtwvif,
+-				struct rtw89_sta *rtwsta);
++				struct rtw89_vif_link *rtwvif_link,
++				struct rtw89_sta_link *rtwsta_link);
+ 	int (*h2c_default_cmac_tbl)(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif,
+-				    struct rtw89_sta *rtwsta);
++				    struct rtw89_vif_link *rtwvif_link,
++				    struct rtw89_sta_link *rtwsta_link);
+ 	int (*h2c_assoc_cmac_tbl)(struct rtw89_dev *rtwdev,
+-				  struct ieee80211_vif *vif,
+-				  struct ieee80211_sta *sta);
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link);
+ 	int (*h2c_ampdu_cmac_tbl)(struct rtw89_dev *rtwdev,
+-				  struct ieee80211_vif *vif,
+-				  struct ieee80211_sta *sta);
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link);
+ 	int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif,
+-				    struct rtw89_sta *rtwsta);
++				    struct rtw89_vif_link *rtwvif_link,
++				    struct rtw89_sta_link *rtwsta_link);
+ 	int (*h2c_update_beacon)(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif);
+-	int (*h2c_ba_cam)(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++				 struct rtw89_vif_link *rtwvif_link);
++	int (*h2c_ba_cam)(struct rtw89_dev *rtwdev,
++			  struct rtw89_vif_link *rtwvif_link,
++			  struct rtw89_sta_link *rtwsta_link,
+ 			  bool valid, struct ieee80211_ampdu_params *params);
+ 
+ 	void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
+@@ -5196,7 +5192,7 @@ struct rtw89_early_h2c {
+ };
+ 
+ struct rtw89_hw_scan_info {
+-	struct ieee80211_vif *scanning_vif;
++	struct rtw89_vif_link *scanning_vif;
+ 	struct list_head pkt_list[NUM_NL80211_BANDS];
+ 	struct rtw89_chan op_chan;
+ 	bool abort;
+@@ -5371,7 +5367,7 @@ struct rtw89_wow_aoac_report {
+ };
+ 
+ struct rtw89_wow_param {
+-	struct ieee80211_vif *wow_vif;
++	struct rtw89_vif_link *rtwvif_link;
+ 	DECLARE_BITMAP(flags, RTW89_WOW_FLAG_NUM);
+ 	struct rtw89_wow_cam_info patterns[RTW89_MAX_PATTERN_NUM];
+ 	struct rtw89_wow_key_info key_info;
+@@ -5408,7 +5404,7 @@ struct rtw89_mcc_policy {
+ };
+ 
+ struct rtw89_mcc_role {
+-	struct rtw89_vif *rtwvif;
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_mcc_policy policy;
+ 	struct rtw89_mcc_limit limit;
+ 
+@@ -5608,6 +5604,121 @@ struct rtw89_dev {
+ 	u8 priv[] __aligned(sizeof(void *));
+ };
+ 
++struct rtw89_vif {
++	struct rtw89_dev *rtwdev;
++	struct list_head list;
++
++	u8 mac_addr[ETH_ALEN];
++	__be32 ip_addr;
++
++	struct rtw89_traffic_stats stats;
++	u32 tdls_peer;
++
++	struct ieee80211_scan_ies *scan_ies;
++	struct cfg80211_scan_request *scan_req;
++
++	struct rtw89_roc roc;
++	bool offchan;
++
++	u8 links_inst_valid_num;
++	DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
++	struct rtw89_vif_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
++	struct rtw89_vif_link links_inst[] __counted_by(links_inst_valid_num);
++};
++
++static inline bool rtw89_vif_assign_link_is_valid(struct rtw89_vif_link **rtwvif_link,
++						  const struct rtw89_vif *rtwvif,
++						  unsigned int link_id)
++{
++	*rtwvif_link = rtwvif->links[link_id];
++	return !!*rtwvif_link;
++}
++
++#define rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) \
++	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) \
++		if (rtw89_vif_assign_link_is_valid(&(rtwvif_link), rtwvif, link_id))
++
++struct rtw89_sta {
++	struct rtw89_dev *rtwdev;
++	struct rtw89_vif *rtwvif;
++
++	bool disassoc;
++
++	struct sk_buff_head roc_queue;
++
++	struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
++	DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
++
++	u8 links_inst_valid_num;
++	DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
++	struct rtw89_sta_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
++	struct rtw89_sta_link links_inst[] __counted_by(links_inst_valid_num);
++};
++
++static inline bool rtw89_sta_assign_link_is_valid(struct rtw89_sta_link **rtwsta_link,
++						  const struct rtw89_sta *rtwsta,
++						  unsigned int link_id)
++{
++	*rtwsta_link = rtwsta->links[link_id];
++	return !!*rtwsta_link;
++}
++
++#define rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) \
++	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) \
++		if (rtw89_sta_assign_link_is_valid(&(rtwsta_link), rtwsta, link_id))
++
++static inline u8 rtw89_vif_get_main_macid(struct rtw89_vif *rtwvif)
++{
++	/* const after init, so no need to check if active first */
++	return rtwvif->links_inst[0].mac_id;
++}
++
++static inline u8 rtw89_vif_get_main_port(struct rtw89_vif *rtwvif)
++{
++	/* const after init, so no need to check if active first */
++	return rtwvif->links_inst[0].port;
++}
++
++static inline struct rtw89_vif_link *
++rtw89_vif_get_link_inst(struct rtw89_vif *rtwvif, u8 index)
++{
++	if (index >= rtwvif->links_inst_valid_num ||
++	    !test_bit(index, rtwvif->links_inst_map))
++		return NULL;
++	return &rtwvif->links_inst[index];
++}
++
++static inline
++u8 rtw89_vif_link_inst_get_index(struct rtw89_vif_link *rtwvif_link)
++{
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
++
++	return rtwvif_link - rtwvif->links_inst;
++}
++
++static inline u8 rtw89_sta_get_main_macid(struct rtw89_sta *rtwsta)
++{
++	/* const after init, so no need to check if active first */
++	return rtwsta->links_inst[0].mac_id;
++}
++
++static inline struct rtw89_sta_link *
++rtw89_sta_get_link_inst(struct rtw89_sta *rtwsta, u8 index)
++{
++	if (index >= rtwsta->links_inst_valid_num ||
++	    !test_bit(index, rtwsta->links_inst_map))
++		return NULL;
++	return &rtwsta->links_inst[index];
++}
++
++static inline
++u8 rtw89_sta_link_inst_get_index(struct rtw89_sta_link *rtwsta_link)
++{
++	struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
++
++	return rtwsta_link - rtwsta->links_inst;
++}
++
+ static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
+ 				     struct rtw89_core_tx_request *tx_req)
+ {
+@@ -5972,9 +6083,26 @@ static inline struct ieee80211_vif *rtwvif_to_vif_safe(struct rtw89_vif *rtwvif)
+ 	return rtwvif ? rtwvif_to_vif(rtwvif) : NULL;
+ }
+ 
++static inline
++struct ieee80211_vif *rtwvif_link_to_vif(struct rtw89_vif_link *rtwvif_link)
++{
++	return rtwvif_to_vif(rtwvif_link->rtwvif);
++}
++
++static inline
++struct ieee80211_vif *rtwvif_link_to_vif_safe(struct rtw89_vif_link *rtwvif_link)
++{
++	return rtwvif_link ? rtwvif_link_to_vif(rtwvif_link) : NULL;
++}
++
++static inline struct rtw89_vif *vif_to_rtwvif(struct ieee80211_vif *vif)
++{
++	return (struct rtw89_vif *)vif->drv_priv;
++}
++
+ static inline struct rtw89_vif *vif_to_rtwvif_safe(struct ieee80211_vif *vif)
+ {
+-	return vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
++	return vif ? vif_to_rtwvif(vif) : NULL;
+ }
+ 
+ static inline struct ieee80211_sta *rtwsta_to_sta(struct rtw89_sta *rtwsta)
+@@ -5989,11 +6117,88 @@ static inline struct ieee80211_sta *rtwsta_to_sta_safe(struct rtw89_sta *rtwsta)
+ 	return rtwsta ? rtwsta_to_sta(rtwsta) : NULL;
+ }
+ 
++static inline
++struct ieee80211_sta *rtwsta_link_to_sta(struct rtw89_sta_link *rtwsta_link)
++{
++	return rtwsta_to_sta(rtwsta_link->rtwsta);
++}
++
++static inline
++struct ieee80211_sta *rtwsta_link_to_sta_safe(struct rtw89_sta_link *rtwsta_link)
++{
++	return rtwsta_link ? rtwsta_link_to_sta(rtwsta_link) : NULL;
++}
++
++static inline struct rtw89_sta *sta_to_rtwsta(struct ieee80211_sta *sta)
++{
++	return (struct rtw89_sta *)sta->drv_priv;
++}
++
+ static inline struct rtw89_sta *sta_to_rtwsta_safe(struct ieee80211_sta *sta)
+ {
+-	return sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
++	return sta ? sta_to_rtwsta(sta) : NULL;
+ }
+ 
++static inline struct ieee80211_bss_conf *
++__rtw89_vif_rcu_dereference_link(struct rtw89_vif_link *rtwvif_link, bool *nolink)
++{
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct ieee80211_bss_conf *bss_conf;
++
++	bss_conf = rcu_dereference(vif->link_conf[rtwvif_link->link_id]);
++	if (unlikely(!bss_conf)) {
++		*nolink = true;
++		return &vif->bss_conf;
++	}
++
++	*nolink = false;
++	return bss_conf;
++}
++
++#define rtw89_vif_rcu_dereference_link(rtwvif_link, assert)		\
++({									\
++	typeof(rtwvif_link) p = rtwvif_link;				\
++	struct ieee80211_bss_conf *bss_conf;				\
++	bool nolink;							\
++									\
++	bss_conf = __rtw89_vif_rcu_dereference_link(p, &nolink);	\
++	if (unlikely(nolink) && (assert))				\
++		rtw89_err(p->rtwvif->rtwdev,				\
++			  "%s: cannot find exact bss_conf for link_id %u\n",\
++			  __func__, p->link_id);			\
++	bss_conf;							\
++})
++
++static inline struct ieee80211_link_sta *
++__rtw89_sta_rcu_dereference_link(struct rtw89_sta_link *rtwsta_link, bool *nolink)
++{
++	struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
++	struct ieee80211_link_sta *link_sta;
++
++	link_sta = rcu_dereference(sta->link[rtwsta_link->link_id]);
++	if (unlikely(!link_sta)) {
++		*nolink = true;
++		return &sta->deflink;
++	}
++
++	*nolink = false;
++	return link_sta;
++}
++
++#define rtw89_sta_rcu_dereference_link(rtwsta_link, assert)		\
++({									\
++	typeof(rtwsta_link) p = rtwsta_link;				\
++	struct ieee80211_link_sta *link_sta;				\
++	bool nolink;							\
++									\
++	link_sta = __rtw89_sta_rcu_dereference_link(p, &nolink);	\
++	if (unlikely(nolink) && (assert))				\
++		rtw89_err(p->rtwsta->rtwdev,				\
++			  "%s: cannot find exact link_sta for link_id %u\n",\
++			  __func__, p->link_id);			\
++	link_sta;							\
++})
++
+ static inline u8 rtw89_hw_to_rate_info_bw(enum rtw89_bandwidth hw_bw)
+ {
+ 	if (hw_bw == RTW89_CHANNEL_WIDTH_160)
+@@ -6078,29 +6283,29 @@ enum nl80211_he_ru_alloc rtw89_he_rua_to_ru_alloc(u16 rua)
+ }
+ 
+ static inline
+-struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif,
+-						   struct rtw89_sta *rtwsta)
++struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif_link *rtwvif_link,
++						   struct rtw89_sta_link *rtwsta_link)
+ {
+-	if (rtwsta) {
+-		struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
++	if (rtwsta_link) {
++		struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
+ 
+-		if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+-			return &rtwsta->addr_cam;
++		if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
++			return &rtwsta_link->addr_cam;
+ 	}
+-	return &rtwvif->addr_cam;
++	return &rtwvif_link->addr_cam;
+ }
+ 
+ static inline
+-struct rtw89_bssid_cam_entry *rtw89_get_bssid_cam_of(struct rtw89_vif *rtwvif,
+-						     struct rtw89_sta *rtwsta)
++struct rtw89_bssid_cam_entry *rtw89_get_bssid_cam_of(struct rtw89_vif_link *rtwvif_link,
++						     struct rtw89_sta_link *rtwsta_link)
+ {
+-	if (rtwsta) {
+-		struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
++	if (rtwsta_link) {
++		struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
+ 
+ 		if (sta->tdls)
+-			return &rtwsta->bssid_cam;
++			return &rtwsta_link->bssid_cam;
+ 	}
+-	return &rtwvif->bssid_cam;
++	return &rtwvif_link->bssid_cam;
+ }
+ 
+ static inline
+@@ -6159,11 +6364,10 @@ const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev,
+ static inline
+ const struct rtw89_chan *rtw89_scan_chan_get(struct rtw89_dev *rtwdev)
+ {
+-	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+-	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
++	struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
+ 
+-	if (rtwvif)
+-		return rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
++	if (rtwvif_link)
++		return rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ 	else
+ 		return rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ }
+@@ -6240,12 +6444,12 @@ static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev)
+ }
+ 
+ static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev,
+-					  struct rtw89_vif *rtwvif)
++					  struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
+ 	if (chip->ops->rfk_channel)
+-		chip->ops->rfk_channel(rtwdev, rtwvif);
++		chip->ops->rfk_channel(rtwdev, rtwvif_link);
+ }
+ 
+ static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
+@@ -6259,12 +6463,12 @@ static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
+ }
+ 
+ static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev,
+-				       struct rtw89_vif *rtwvif, bool start)
++				       struct rtw89_vif_link *rtwvif_link, bool start)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
+ 	if (chip->ops->rfk_scan)
+-		chip->ops->rfk_scan(rtwdev, rtwvif, start);
++		chip->ops->rfk_scan(rtwdev, rtwvif_link, start);
+ }
+ 
+ static inline void rtw89_chip_rfk_track(struct rtw89_dev *rtwdev)
+@@ -6347,20 +6551,6 @@ static inline void rtw89_chip_cfg_txrx_path(struct rtw89_dev *rtwdev)
+ 		chip->ops->cfg_txrx_path(rtwdev);
+ }
+ 
+-static inline
+-void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+-				       struct ieee80211_vif *vif)
+-{
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	const struct rtw89_chip_info *chip = rtwdev->chip;
+-
+-	if (!vif->bss_conf.he_support || !vif->cfg.assoc)
+-		return;
+-
+-	if (chip->ops->set_txpwr_ul_tb_offset)
+-		chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif->mac_idx);
+-}
+-
+ static inline void rtw89_chip_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ 					       enum rtw89_phy_idx phy_idx)
+ {
+@@ -6457,14 +6647,14 @@ int rtw89_chip_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+ 
+ static inline
+ int rtw89_chip_h2c_dctl_sec_cam(struct rtw89_dev *rtwdev,
+-				struct rtw89_vif *rtwvif,
+-				struct rtw89_sta *rtwsta)
++				struct rtw89_vif_link *rtwvif_link,
++				struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
+ 	if (!chip->ops->h2c_dctl_sec_cam)
+ 		return 0;
+-	return chip->ops->h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
++	return chip->ops->h2c_dctl_sec_cam(rtwdev, rtwvif_link, rtwsta_link);
+ }
+ 
+ static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+@@ -6479,13 +6669,14 @@ static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+ 		return hdr->addr3;
+ }
+ 
+-static inline bool rtw89_sta_has_beamformer_cap(struct ieee80211_sta *sta)
++static inline
++bool rtw89_sta_has_beamformer_cap(struct ieee80211_link_sta *link_sta)
+ {
+-	if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+-	    (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) ||
+-	    (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
++	if ((link_sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
++	    (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) ||
++	    (link_sta->he_cap.he_cap_elem.phy_cap_info[3] &
+ 			IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
+-	    (sta->deflink.he_cap.he_cap_elem.phy_cap_info[4] &
++	    (link_sta->he_cap.he_cap_elem.phy_cap_info[4] &
+ 			IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER))
+ 		return true;
+ 	return false;
+@@ -6605,21 +6796,21 @@ void rtw89_core_napi_start(struct rtw89_dev *rtwdev);
+ void rtw89_core_napi_stop(struct rtw89_dev *rtwdev);
+ int rtw89_core_napi_init(struct rtw89_dev *rtwdev);
+ void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev);
+-int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
+-		       struct ieee80211_vif *vif,
+-		       struct ieee80211_sta *sta);
+-int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
+-			 struct ieee80211_vif *vif,
+-			 struct ieee80211_sta *sta);
+-int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev,
+-			    struct ieee80211_vif *vif,
+-			    struct ieee80211_sta *sta);
+-int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
+-			      struct ieee80211_vif *vif,
+-			      struct ieee80211_sta *sta);
+-int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
+-			  struct ieee80211_vif *vif,
+-			  struct ieee80211_sta *sta);
++int rtw89_core_sta_link_add(struct rtw89_dev *rtwdev,
++			    struct rtw89_vif_link *rtwvif_link,
++			    struct rtw89_sta_link *rtwsta_link);
++int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
++			      struct rtw89_sta_link *rtwsta_link);
++int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link);
++int rtw89_core_sta_link_disconnect(struct rtw89_dev *rtwdev,
++				   struct rtw89_vif_link *rtwvif_link,
++				   struct rtw89_sta_link *rtwsta_link);
++int rtw89_core_sta_link_remove(struct rtw89_dev *rtwdev,
++			       struct rtw89_vif_link *rtwvif_link,
++			       struct rtw89_sta_link *rtwsta_link);
+ void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ 			       struct ieee80211_sta *sta,
+ 			       struct cfg80211_tid_config *tid_config);
+@@ -6635,22 +6826,40 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
+ void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev);
+ u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev);
+ void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id);
++void rtw89_init_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++		    u8 mac_id, u8 port);
++void rtw89_init_sta(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++		    struct rtw89_sta *rtwsta, u8 mac_id);
++struct rtw89_vif_link *rtw89_vif_set_link(struct rtw89_vif *rtwvif,
++					  unsigned int link_id);
++void rtw89_vif_unset_link(struct rtw89_vif *rtwvif, unsigned int link_id);
++struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
++					  unsigned int link_id);
++void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id);
+ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
+ void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
+ void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
+ 			      struct rtw89_chan *chan);
+ int rtw89_set_channel(struct rtw89_dev *rtwdev);
+-void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+-		       struct rtw89_chan *chan);
+ u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
+ void rtw89_core_release_bit_map(unsigned long *addr, u8 bit);
+ void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits);
+ int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+-				    struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
++				    struct rtw89_sta_link *rtwsta_link, u8 tid,
++				    u8 *cam_idx);
+ int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+-				    struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+-void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
++				    struct rtw89_sta_link *rtwsta_link, u8 tid,
++				    u8 *cam_idx);
++void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
++				    struct ieee80211_sta *sta);
++void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
++					   struct ieee80211_sta *sta);
++void rtw89_core_free_sta_pending_roc_tx(struct rtw89_dev *rtwdev,
++					struct ieee80211_sta *sta);
++void rtw89_vif_type_mapping(struct rtw89_vif_link *rtwvif_link, bool assoc);
+ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
++void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
++				       struct rtw89_vif_link *rtwvif_link);
+ bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
+ int rtw89_regd_setup(struct rtw89_dev *rtwdev);
+ int rtw89_regd_init(struct rtw89_dev *rtwdev,
+@@ -6667,13 +6876,15 @@ void rtw89_core_update_beacon_work(struct work_struct *work);
+ void rtw89_roc_work(struct work_struct *work);
+ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+-void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			   const u8 *mac_addr, bool hw_scan);
+ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+-			      struct ieee80211_vif *vif, bool hw_scan);
+-int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++			      struct rtw89_vif_link *rtwvif_link, bool hw_scan);
++int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			  bool active);
+-void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
++void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
++			      struct ieee80211_bss_conf *bss_conf);
+ void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event);
+ 
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index 29f85210f91964..7391f131229a58 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -3506,7 +3506,9 @@ static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp,
+ 	return count;
+ }
+ 
+-static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
++static void rtw89_sta_link_info_get_iter(struct seq_file *m,
++					 struct rtw89_dev *rtwdev,
++					 struct rtw89_sta_link *rtwsta_link)
+ {
+ 	static const char * const he_gi_str[] = {
+ 		[NL80211_RATE_INFO_HE_GI_0_8] = "0.8",
+@@ -3518,20 +3520,26 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
+ 		[NL80211_RATE_INFO_EHT_GI_1_6] = "1.6",
+ 		[NL80211_RATE_INFO_EHT_GI_3_2] = "3.2",
+ 	};
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rate_info *rate = &rtwsta->ra_report.txrate;
+-	struct ieee80211_rx_status *status = &rtwsta->rx_status;
+-	struct seq_file *m = (struct seq_file *)data;
+-	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
++	struct rate_info *rate = &rtwsta_link->ra_report.txrate;
++	struct ieee80211_rx_status *status = &rtwsta_link->rx_status;
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+ 	u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
+ 	bool ant_asterisk = hal->tx_path_diversity || hal->ant_diversity;
++	struct ieee80211_link_sta *link_sta;
+ 	u8 evm_min, evm_max, evm_1ss;
++	u16 max_rc_amsdu_len;
+ 	u8 rssi;
+ 	u8 snr;
+ 	int i;
+ 
+-	seq_printf(m, "TX rate [%d]: ", rtwsta->mac_id);
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++	max_rc_amsdu_len = link_sta->agg.max_rc_amsdu_len;
++
++	rcu_read_unlock();
++
++	seq_printf(m, "TX rate [%u, %u]: ", rtwsta_link->mac_id, rtwsta_link->link_id);
+ 
+ 	if (rate->flags & RATE_INFO_FLAGS_MCS)
+ 		seq_printf(m, "HT MCS-%d%s", rate->mcs,
+@@ -3549,13 +3557,13 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
+ 			   eht_gi_str[rate->eht_gi] : "N/A");
+ 	else
+ 		seq_printf(m, "Legacy %d", rate->legacy);
+-	seq_printf(m, "%s", rtwsta->ra_report.might_fallback_legacy ? " FB_G" : "");
++	seq_printf(m, "%s", rtwsta_link->ra_report.might_fallback_legacy ? " FB_G" : "");
+ 	seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(rate->bw));
+-	seq_printf(m, "\t(hw_rate=0x%x)", rtwsta->ra_report.hw_rate);
+-	seq_printf(m, "\t==> agg_wait=%d (%d)\n", rtwsta->max_agg_wait,
+-		   sta->deflink.agg.max_rc_amsdu_len);
++	seq_printf(m, " (hw_rate=0x%x)", rtwsta_link->ra_report.hw_rate);
++	seq_printf(m, " ==> agg_wait=%d (%d)\n", rtwsta_link->max_agg_wait,
++		   max_rc_amsdu_len);
+ 
+-	seq_printf(m, "RX rate [%d]: ", rtwsta->mac_id);
++	seq_printf(m, "RX rate [%u, %u]: ", rtwsta_link->mac_id, rtwsta_link->link_id);
+ 
+ 	switch (status->encoding) {
+ 	case RX_ENC_LEGACY:
+@@ -3582,24 +3590,24 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
+ 		break;
+ 	}
+ 	seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(status->bw));
+-	seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate);
++	seq_printf(m, " (hw_rate=0x%x)\n", rtwsta_link->rx_hw_rate);
+ 
+-	rssi = ewma_rssi_read(&rtwsta->avg_rssi);
++	rssi = ewma_rssi_read(&rtwsta_link->avg_rssi);
+ 	seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d) [",
+-		   RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta->prev_rssi);
++		   RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta_link->prev_rssi);
+ 	for (i = 0; i < ant_num; i++) {
+-		rssi = ewma_rssi_read(&rtwsta->rssi[i]);
++		rssi = ewma_rssi_read(&rtwsta_link->rssi[i]);
+ 		seq_printf(m, "%d%s%s", RTW89_RSSI_RAW_TO_DBM(rssi),
+ 			   ant_asterisk && (hal->antenna_tx & BIT(i)) ? "*" : "",
+ 			   i + 1 == ant_num ? "" : ", ");
+ 	}
+ 	seq_puts(m, "]\n");
+ 
+-	evm_1ss = ewma_evm_read(&rtwsta->evm_1ss);
++	evm_1ss = ewma_evm_read(&rtwsta_link->evm_1ss);
+ 	seq_printf(m, "EVM: [%2u.%02u, ", evm_1ss >> 2, (evm_1ss & 0x3) * 25);
+ 	for (i = 0; i < (hal->ant_diversity ? 2 : 1); i++) {
+-		evm_min = ewma_evm_read(&rtwsta->evm_min[i]);
+-		evm_max = ewma_evm_read(&rtwsta->evm_max[i]);
++		evm_min = ewma_evm_read(&rtwsta_link->evm_min[i]);
++		evm_max = ewma_evm_read(&rtwsta_link->evm_max[i]);
+ 
+ 		seq_printf(m, "%s(%2u.%02u, %2u.%02u)", i == 0 ? "" : " ",
+ 			   evm_min >> 2, (evm_min & 0x3) * 25,
+@@ -3607,10 +3615,22 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
+ 	}
+ 	seq_puts(m, "]\t");
+ 
+-	snr = ewma_snr_read(&rtwsta->avg_snr);
++	snr = ewma_snr_read(&rtwsta_link->avg_snr);
+ 	seq_printf(m, "SNR: %u\n", snr);
+ }
+ 
++static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
++{
++	struct seq_file *m = (struct seq_file *)data;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
++		rtw89_sta_link_info_get_iter(m, rtwdev, rtwsta_link);
++}
++
+ static void
+ rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat,
+ 			   enum rtw89_hw_rate first_rate, int len)
+@@ -3737,28 +3757,41 @@ static void rtw89_dump_pkt_offload(struct seq_file *m, struct list_head *pkt_lis
+ 	seq_puts(m, "\n");
+ }
+ 
++static void rtw89_vif_link_ids_get(struct seq_file *m, u8 *mac,
++				   struct rtw89_dev *rtwdev,
++				   struct rtw89_vif_link *rtwvif_link)
++{
++	struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam;
++
++	seq_printf(m, "    [%u] %pM\n", rtwvif_link->mac_id, rtwvif_link->mac_addr);
++	seq_printf(m, "\tlink_id=%u\n", rtwvif_link->link_id);
++	seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
++	rtw89_dump_addr_cam(m, rtwdev, &rtwvif_link->addr_cam);
++	rtw89_dump_pkt_offload(m, &rtwvif_link->general_pkt_list,
++			       "\tpkt_ofld[GENERAL]: ");
++}
++
+ static
+ void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ 	struct seq_file *m = (struct seq_file *)data;
+-	struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++	struct rtw89_vif_link *rtwvif_link;
++	unsigned int link_id;
+ 
+-	seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
+-	seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
+-	rtw89_dump_addr_cam(m, rtwdev, &rtwvif->addr_cam);
+-	rtw89_dump_pkt_offload(m, &rtwvif->general_pkt_list, "\tpkt_ofld[GENERAL]: ");
++	seq_printf(m, "VIF %pM\n", rtwvif->mac_addr);
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++		rtw89_vif_link_ids_get(m, mac, rtwdev, rtwvif_link);
+ }
+ 
+-static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
++static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_dev *rtwdev,
++			      struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+-	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ 	struct rtw89_ba_cam_entry *entry;
+ 	bool first = true;
+ 
+-	list_for_each_entry(entry, &rtwsta->ba_cam_list, list) {
++	list_for_each_entry(entry, &rtwsta_link->ba_cam_list, list) {
+ 		if (first) {
+ 			seq_puts(m, "\tba_cam ");
+ 			first = false;
+@@ -3771,16 +3804,36 @@ static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
+ 	seq_puts(m, "\n");
+ }
+ 
++static void rtw89_sta_link_ids_get(struct seq_file *m,
++				   struct rtw89_dev *rtwdev,
++				   struct rtw89_sta_link *rtwsta_link)
++{
++	struct ieee80211_link_sta *link_sta;
++
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++
++	seq_printf(m, "    [%u] %pM\n", rtwsta_link->mac_id, link_sta->addr);
++
++	rcu_read_unlock();
++
++	seq_printf(m, "\tlink_id=%u\n", rtwsta_link->link_id);
++	rtw89_dump_addr_cam(m, rtwdev, &rtwsta_link->addr_cam);
++	rtw89_dump_ba_cam(m, rtwdev, rtwsta_link);
++}
++
+ static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ 	struct seq_file *m = (struct seq_file *)data;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
+ 
+-	seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr,
+-		   sta->tdls ? "(TDLS)" : "");
+-	rtw89_dump_addr_cam(m, rtwdev, &rtwsta->addr_cam);
+-	rtw89_dump_ba_cam(m, rtwsta);
++	seq_printf(m, "STA %pM %s\n", sta->addr, sta->tdls ? "(TDLS)" : "");
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
++		rtw89_sta_link_ids_get(m, rtwdev, rtwsta_link);
+ }
+ 
+ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index d9b0e7ebe619a3..13a7c39ceb6f55 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -1741,8 +1741,8 @@ void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
+ }
+ 
+ #define H2C_CAM_LEN 60
+-int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+-		     struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
++int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
++		     struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr)
+ {
+ 	struct sk_buff *skb;
+ 	int ret;
+@@ -1753,8 +1753,9 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 		return -ENOMEM;
+ 	}
+ 	skb_put(skb, H2C_CAM_LEN);
+-	rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
+-	rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
++	rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr,
++				     skb->data);
++	rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC,
+@@ -1776,8 +1777,8 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ }
+ 
+ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif,
+-				 struct rtw89_sta *rtwsta)
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link)
+ {
+ 	struct rtw89_h2c_dctlinfo_ud_v1 *h2c;
+ 	u32 len = sizeof(*h2c);
+@@ -1792,7 +1793,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
+ 	skb_put(skb, len);
+ 	h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data;
+ 
+-	rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, h2c);
++	rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC,
+@@ -1815,8 +1816,8 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
+ EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
+ 
+ int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif,
+-				 struct rtw89_sta *rtwsta)
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link)
+ {
+ 	struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ 	u32 len = sizeof(*h2c);
+@@ -1831,7 +1832,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ 	skb_put(skb, len);
+ 	h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+ 
+-	rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c);
++	rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC,
+@@ -1854,10 +1855,10 @@ int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
+ 
+ int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+-				     struct rtw89_vif *rtwvif,
+-				     struct rtw89_sta *rtwsta)
++				     struct rtw89_vif_link *rtwvif_link,
++				     struct rtw89_sta_link *rtwsta_link)
+ {
+-	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
++	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ 	struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ 	u32 len = sizeof(*h2c);
+ 	struct sk_buff *skb;
+@@ -1908,21 +1909,24 @@ int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ }
+ EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
+ 
+-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev,
++			struct rtw89_vif_link *rtwvif_link,
++			struct rtw89_sta_link *rtwsta_link,
+ 			bool valid, struct ieee80211_ampdu_params *params)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct rtw89_h2c_ba_cam *h2c;
+-	u8 macid = rtwsta->mac_id;
++	u8 macid = rtwsta_link->mac_id;
+ 	u32 len = sizeof(*h2c);
+ 	struct sk_buff *skb;
+ 	u8 entry_idx;
+ 	int ret;
+ 
+ 	ret = valid ?
+-	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+-	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
++	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
++					      &entry_idx) :
++	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
++					      &entry_idx);
+ 	if (ret) {
+ 		/* it still works even if we don't have static BA CAM, because
+ 		 * hardware can create dynamic BA CAM automatically.
+@@ -1960,7 +1964,8 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ 
+ 	if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
+ 		h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
+-			   le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND);
++			   le32_encode_bits(rtwvif_link->mac_idx,
++					    RTW89_H2C_BA_CAM_W1_BAND);
+ 	}
+ 
+ end:
+@@ -2039,13 +2044,14 @@ void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
+ 	}
+ }
+ 
+-int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev,
++			   struct rtw89_vif_link *rtwvif_link,
++			   struct rtw89_sta_link *rtwsta_link,
+ 			   bool valid, struct ieee80211_ampdu_params *params)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct rtw89_h2c_ba_cam_v1 *h2c;
+-	u8 macid = rtwsta->mac_id;
++	u8 macid = rtwsta_link->mac_id;
+ 	u32 len = sizeof(*h2c);
+ 	struct sk_buff *skb;
+ 	u8 entry_idx;
+@@ -2053,8 +2059,10 @@ int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ 	int ret;
+ 
+ 	ret = valid ?
+-	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+-	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
++	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
++					      &entry_idx) :
++	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
++					      &entry_idx);
+ 	if (ret) {
+ 		/* it still works even if we don't have static BA CAM, because
+ 		 * hardware can create dynamic BA CAM automatically.
+@@ -2092,7 +2100,8 @@ int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ 	entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
+ 	h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
+ 		  le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
+-		  le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
++		  le32_encode_bits(!!rtwvif_link->mac_idx,
++				   RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC,
+@@ -2197,15 +2206,14 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
+ }
+ 
+ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
+-				       struct rtw89_vif *rtwvif)
++				       struct rtw89_vif_link *rtwvif_link)
+ {
+ 	static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88,
+ 				     0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+-	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ 	u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct rtw89_eapol_2_of_2 *eapol_pkt;
++	struct ieee80211_bss_conf *bss_conf;
+ 	struct ieee80211_hdr_3addr *hdr;
+ 	struct sk_buff *skb;
+ 	u8 key_des_ver;
+@@ -2227,10 +2235,17 @@ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
+ 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ 					 IEEE80211_FCTL_TODS |
+ 					 IEEE80211_FCTL_PROTECTED);
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++
+ 	ether_addr_copy(hdr->addr1, bss_conf->bssid);
+-	ether_addr_copy(hdr->addr2, vif->addr);
++	ether_addr_copy(hdr->addr2, bss_conf->addr);
+ 	ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ 
++	rcu_read_unlock();
++
+ 	skb_put_zero(skb, sec_hdr_len);
+ 
+ 	eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
+@@ -2241,11 +2256,10 @@ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
+ }
+ 
+ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
+-					  struct rtw89_vif *rtwvif)
++					  struct rtw89_vif_link *rtwvif_link)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+-	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ 	u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
++	struct ieee80211_bss_conf *bss_conf;
+ 	struct ieee80211_hdr_3addr *hdr;
+ 	struct rtw89_sa_query *sa_query;
+ 	struct sk_buff *skb;
+@@ -2258,10 +2272,17 @@ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
+ 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ 					 IEEE80211_STYPE_ACTION |
+ 					 IEEE80211_FCTL_PROTECTED);
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++
+ 	ether_addr_copy(hdr->addr1, bss_conf->bssid);
+-	ether_addr_copy(hdr->addr2, vif->addr);
++	ether_addr_copy(hdr->addr2, bss_conf->addr);
+ 	ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ 
++	rcu_read_unlock();
++
+ 	skb_put_zero(skb, sec_hdr_len);
+ 
+ 	sa_query = skb_put_zero(skb, sizeof(*sa_query));
+@@ -2272,8 +2293,9 @@ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
+ }
+ 
+ static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
+-					      struct rtw89_vif *rtwvif)
++					      struct rtw89_vif_link *rtwvif_link)
+ {
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct ieee80211_hdr_3addr *hdr;
+@@ -2295,9 +2317,9 @@ static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
+ 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
+ 
+ 	hdr->frame_control = fc;
+-	ether_addr_copy(hdr->addr1, rtwvif->bssid);
+-	ether_addr_copy(hdr->addr2, rtwvif->mac_addr);
+-	ether_addr_copy(hdr->addr3, rtwvif->bssid);
++	ether_addr_copy(hdr->addr1, rtwvif_link->bssid);
++	ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr);
++	ether_addr_copy(hdr->addr3, rtwvif_link->bssid);
+ 
+ 	skb_put_zero(skb, sec_hdr_len);
+ 
+@@ -2312,18 +2334,18 @@ static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
+ 	arp_hdr->ar_pln = 4;
+ 	arp_hdr->ar_op = htons(ARPOP_REPLY);
+ 
+-	ether_addr_copy(arp_skb->sender_hw, rtwvif->mac_addr);
++	ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr);
+ 	arp_skb->sender_ip = rtwvif->ip_addr;
+ 
+ 	return skb;
+ }
+ 
+ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
+-					struct rtw89_vif *rtwvif,
++					struct rtw89_vif_link *rtwvif_link,
+ 					enum rtw89_fw_pkt_ofld_type type,
+ 					u8 *id)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 	struct rtw89_pktofld_info *info;
+ 	struct sk_buff *skb;
+ 	int ret;
+@@ -2346,13 +2368,13 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
+ 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
+ 		break;
+ 	case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
+-		skb = rtw89_eapol_get(rtwdev, rtwvif);
++		skb = rtw89_eapol_get(rtwdev, rtwvif_link);
+ 		break;
+ 	case RTW89_PKT_OFLD_TYPE_SA_QUERY:
+-		skb = rtw89_sa_query_get(rtwdev, rtwvif);
++		skb = rtw89_sa_query_get(rtwdev, rtwvif_link);
+ 		break;
+ 	case RTW89_PKT_OFLD_TYPE_ARP_RSP:
+-		skb = rtw89_arp_response_get(rtwdev, rtwvif);
++		skb = rtw89_arp_response_get(rtwdev, rtwvif_link);
+ 		break;
+ 	default:
+ 		goto err;
+@@ -2367,7 +2389,7 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
+ 	if (ret)
+ 		goto err;
+ 
+-	list_add_tail(&info->list, &rtwvif->general_pkt_list);
++	list_add_tail(&info->list, &rtwvif_link->general_pkt_list);
+ 	*id = info->id;
+ 	return 0;
+ 
+@@ -2377,9 +2399,10 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
+ }
+ 
+ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
+-					   struct rtw89_vif *rtwvif, bool notify_fw)
++					   struct rtw89_vif_link *rtwvif_link,
++					   bool notify_fw)
+ {
+-	struct list_head *pkt_list = &rtwvif->general_pkt_list;
++	struct list_head *pkt_list = &rtwvif_link->general_pkt_list;
+ 	struct rtw89_pktofld_info *info, *tmp;
+ 
+ 	list_for_each_entry_safe(info, tmp, pkt_list, list) {
+@@ -2394,16 +2417,20 @@ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
+ 
+ void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw);
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link,
++							      notify_fw);
+ }
+ 
+ #define H2C_GENERAL_PKT_LEN 6
+ #define H2C_GENERAL_PKT_ID_UND 0xff
+ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif, u8 macid)
++			     struct rtw89_vif_link *rtwvif_link, u8 macid)
+ {
+ 	u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
+ 	u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
+@@ -2411,11 +2438,11 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
+ 	struct sk_buff *skb;
+ 	int ret;
+ 
+-	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
++	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
+ 				     RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
+-	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
++	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
+ 				     RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
+-	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
++	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
+ 				     RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
+ 
+ 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
+@@ -2494,10 +2521,10 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
+ 	return ret;
+ }
+ 
+-int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
++						       rtwvif_link->chanctx_idx);
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	struct rtw89_h2c_lps_ch_info *h2c;
+ 	u32 len = sizeof(*h2c);
+@@ -2546,13 +2573,14 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ }
+ 
+ #define H2C_P2P_ACT_LEN 20
+-int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
++int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link,
++			 struct ieee80211_bss_conf *bss_conf,
+ 			 struct ieee80211_p2p_noa_desc *desc,
+ 			 u8 act, u8 noa_id)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
+-	u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
++	bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
++	u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow;
+ 	struct sk_buff *skb;
+ 	u8 *cmd;
+ 	int ret;
+@@ -2565,7 +2593,7 @@ int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 	skb_put(skb, H2C_P2P_ACT_LEN);
+ 	cmd = skb->data;
+ 
+-	RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
++	RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id);
+ 	RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
+ 	RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
+ 	RTW89_SET_FWCMD_P2P_ACT(cmd, act);
+@@ -2622,11 +2650,11 @@ static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
+ 
+ #define H2C_CMC_TBL_LEN 68
+ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+-				  struct rtw89_vif *rtwvif,
+-				  struct rtw89_sta *rtwsta)
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+-	u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
++	u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ 	struct sk_buff *skb;
+ 	int ret;
+ 
+@@ -2648,7 +2676,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ 	}
+ 	SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
+ 	SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
+-	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
++	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
+ 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+@@ -2671,10 +2699,10 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
+ 
+ int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+-				     struct rtw89_vif *rtwvif,
+-				     struct rtw89_sta *rtwsta)
++				     struct rtw89_vif_link *rtwvif_link,
++				     struct rtw89_sta_link *rtwsta_link)
+ {
+-	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
++	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ 	u32 len = sizeof(*h2c);
+ 	struct sk_buff *skb;
+@@ -2755,24 +2783,25 @@ int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
+ 
+ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
+-				     struct ieee80211_sta *sta, u8 *pads)
++				     struct ieee80211_link_sta *link_sta,
++				     u8 *pads)
+ {
+ 	bool ppe_th;
+ 	u8 ppe16, ppe8;
+-	u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
+-	u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
++	u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
++	u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0];
+ 	u8 ru_bitmap;
+ 	u8 n, idx, sh;
+ 	u16 ppe;
+ 	int i;
+ 
+ 	ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+-			   sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
++			   link_sta->he_cap.he_cap_elem.phy_cap_info[6]);
+ 	if (!ppe_th) {
+ 		u8 pad;
+ 
+ 		pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
+-				sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
++				link_sta->he_cap.he_cap_elem.phy_cap_info[9]);
+ 
+ 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
+ 			pads[i] = pad;
+@@ -2794,7 +2823,7 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
+ 		sh = n & 7;
+ 		n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
+ 
+-		ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
++		ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx]));
+ 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+ 		sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+@@ -2809,23 +2838,35 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+-				struct ieee80211_vif *vif,
+-				struct ieee80211_sta *sta)
++				struct rtw89_vif_link *rtwvif_link,
++				struct rtw89_sta_link *rtwsta_link)
+ {
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+-	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
++						       rtwvif_link->chanctx_idx);
++	struct ieee80211_link_sta *link_sta;
+ 	struct sk_buff *skb;
+ 	u8 pads[RTW89_PPE_BW_NUM];
+-	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
++	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ 	u16 lowest_rate;
+ 	int ret;
+ 
+ 	memset(pads, 0, sizeof(pads));
+-	if (sta && sta->deflink.he_cap.has_he)
+-		__get_sta_he_pkt_padding(rtwdev, sta, pads);
++
++	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
++	if (!skb) {
++		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
++		return -ENOMEM;
++	}
++
++	rcu_read_lock();
++
++	if (rtwsta_link)
++		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++
++	if (rtwsta_link && link_sta->he_cap.has_he)
++		__get_sta_he_pkt_padding(rtwdev, link_sta, pads);
+ 
+ 	if (vif->p2p)
+ 		lowest_rate = RTW89_HW_RATE_OFDM6;
+@@ -2834,11 +2875,6 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ 	else
+ 		lowest_rate = RTW89_HW_RATE_OFDM6;
+ 
+-	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
+-	if (!skb) {
+-		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+-		return -ENOMEM;
+-	}
+ 	skb_put(skb, H2C_CMC_TBL_LEN);
+ 	SET_CTRL_INFO_MACID(skb->data, mac_id);
+ 	SET_CTRL_INFO_OPERATION(skb->data, 1);
+@@ -2851,7 +2887,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ 		SET_CMC_TBL_ULDL(skb->data, 1);
+ 	else
+ 		SET_CMC_TBL_ULDL(skb->data, 0);
+-	SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
++	SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port);
+ 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
+ 		SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
+ 		SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
+@@ -2863,12 +2899,14 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ 		SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
+ 		SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
+ 	}
+-	if (sta)
++	if (rtwsta_link)
+ 		SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
+-						  sta->deflink.he_cap.has_he);
+-	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
++						  link_sta->he_cap.has_he);
++	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
+ 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
+ 
++	rcu_read_unlock();
++
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ 			      chip->h2c_cctl_func_id, 0, 1,
+@@ -2889,9 +2927,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
+ 
+ static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+-				      struct ieee80211_sta *sta, u8 *pads)
++				      struct ieee80211_link_sta *link_sta,
++				      u8 *pads)
+ {
+-	u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
++	u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
+ 	u16 ppe_thres_hdr;
+ 	u8 ppe16, ppe8;
+ 	u8 n, idx, sh;
+@@ -2900,12 +2939,12 @@ static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+ 	u16 ppe;
+ 	int i;
+ 
+-	ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
++	ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
+ 			       IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
+ 	if (!ppe_th) {
+ 		u8 pad;
+ 
+-		pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
++		pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
+ 				  IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+ 
+ 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
+@@ -2914,7 +2953,7 @@ static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+ 		return;
+ 	}
+ 
+-	ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres);
++	ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres);
+ 	ru_bitmap = u16_get_bits(ppe_thres_hdr,
+ 				 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ 	n = hweight8(ru_bitmap);
+@@ -2931,7 +2970,7 @@ static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+ 		sh = n & 7;
+ 		n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
+ 
+-		ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx);
++		ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx);
+ 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+ 		sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
+ 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+@@ -2946,14 +2985,15 @@ static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+-				   struct ieee80211_vif *vif,
+-				   struct ieee80211_sta *sta)
++				   struct rtw89_vif_link *rtwvif_link,
++				   struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+-	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
+-	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
++	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
++	struct ieee80211_bss_conf *bss_conf;
++	struct ieee80211_link_sta *link_sta;
+ 	u8 pads[RTW89_PPE_BW_NUM];
+ 	u32 len = sizeof(*h2c);
+ 	struct sk_buff *skb;
+@@ -2961,11 +3001,24 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ 	int ret;
+ 
+ 	memset(pads, 0, sizeof(pads));
+-	if (sta) {
+-		if (sta->deflink.eht_cap.has_eht)
+-			__get_sta_eht_pkt_padding(rtwdev, sta, pads);
+-		else if (sta->deflink.he_cap.has_he)
+-			__get_sta_he_pkt_padding(rtwdev, sta, pads);
++
++	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
++	if (!skb) {
++		rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
++		return -ENOMEM;
++	}
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++
++	if (rtwsta_link) {
++		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++
++		if (link_sta->eht_cap.has_eht)
++			__get_sta_eht_pkt_padding(rtwdev, link_sta, pads);
++		else if (link_sta->he_cap.has_he)
++			__get_sta_he_pkt_padding(rtwdev, link_sta, pads);
+ 	}
+ 
+ 	if (vif->p2p)
+@@ -2975,11 +3028,6 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ 	else
+ 		lowest_rate = RTW89_HW_RATE_OFDM6;
+ 
+-	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+-	if (!skb) {
+-		rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+-		return -ENOMEM;
+-	}
+ 	skb_put(skb, len);
+ 	h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+ 
+@@ -3000,16 +3048,16 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ 	h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+ 	h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+ 
+-	h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
++	h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
+ 	h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
+ 
+-	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
++	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
+ 		h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
+ 		h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
+ 	}
+ 
+-	if (vif->bss_conf.eht_support) {
+-		u16 punct = vif->bss_conf.chanreq.oper.punctured;
++	if (bss_conf->eht_support) {
++		u16 punct = bss_conf->chanreq.oper.punctured;
+ 
+ 		h2c->w4 |= le32_encode_bits(~punct,
+ 					    CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+@@ -3036,12 +3084,14 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ 				   CCTLINFO_G7_W6_ULDL);
+ 	h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
+ 
+-	if (sta) {
+-		h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he,
++	if (rtwsta_link) {
++		h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
+ 					   CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ 		h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ 	}
+ 
++	rcu_read_unlock();
++
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ 			      H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+@@ -3062,10 +3112,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
+ 
+ int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+-				   struct ieee80211_vif *vif,
+-				   struct ieee80211_sta *sta)
++				   struct rtw89_vif_link *rtwvif_link,
++				   struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
+ 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ 	u32 len = sizeof(*h2c);
+ 	struct sk_buff *skb;
+@@ -3102,7 +3152,7 @@ int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ 	else if (agg_num > 0x200 && agg_num <= 0x400)
+ 		ba_bmap = 5;
+ 
+-	h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) |
++	h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
+ 		  le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+ 
+ 	h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
+@@ -3128,7 +3178,7 @@ int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
+ 
+ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_sta *rtwsta)
++				 struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	struct sk_buff *skb;
+@@ -3140,15 +3190,15 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
+ 		return -ENOMEM;
+ 	}
+ 	skb_put(skb, H2C_CMC_TBL_LEN);
+-	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
++	SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
+ 	SET_CTRL_INFO_OPERATION(skb->data, 1);
+-	if (rtwsta->cctl_tx_time) {
++	if (rtwsta_link->cctl_tx_time) {
+ 		SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
+-		SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
++		SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time);
+ 	}
+-	if (rtwsta->cctl_tx_retry_limit) {
++	if (rtwsta_link->cctl_tx_retry_limit) {
+ 		SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
+-		SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
++		SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt);
+ 	}
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+@@ -3170,7 +3220,7 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_sta *rtwsta)
++				 struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	struct sk_buff *skb;
+@@ -3185,7 +3235,7 @@ int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+ 		return -ENOMEM;
+ 	}
+ 	skb_put(skb, H2C_CMC_TBL_LEN);
+-	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
++	SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
+ 	SET_CTRL_INFO_OPERATION(skb->data, 1);
+ 
+ 	__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
+@@ -3209,11 +3259,11 @@ int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+-			       struct rtw89_vif *rtwvif)
++			       struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++						       rtwvif_link->chanctx_idx);
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 	struct rtw89_h2c_bcn_upd *h2c;
+ 	struct sk_buff *skb_beacon;
+ 	struct ieee80211_hdr *hdr;
+@@ -3240,7 +3290,7 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ 		return -ENOMEM;
+ 	}
+ 
+-	noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
++	noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
+ 	if (noa_len &&
+ 	    (noa_len <= skb_tailroom(skb_beacon) ||
+ 	     pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
+@@ -3260,11 +3310,11 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ 	skb_put(skb, len);
+ 	h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
+ 
+-	h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) |
++	h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) |
+ 		  le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
+-		  le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
++		  le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
+ 		  le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
+-	h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
++	h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
+ 		  le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
+ 		  le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
+ 		  le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
+@@ -3289,10 +3339,10 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
+ 
+ int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+-				  struct rtw89_vif *rtwvif)
++				  struct rtw89_vif_link *rtwvif_link)
+ {
+-	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 	struct rtw89_h2c_bcn_upd_be *h2c;
+ 	struct sk_buff *skb_beacon;
+ 	struct ieee80211_hdr *hdr;
+@@ -3319,7 +3369,7 @@ int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ 		return -ENOMEM;
+ 	}
+ 
+-	noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
++	noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
+ 	if (noa_len &&
+ 	    (noa_len <= skb_tailroom(skb_beacon) ||
+ 	     pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
+@@ -3339,11 +3389,11 @@ int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ 	skb_put(skb, len);
+ 	h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
+ 
+-	h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
++	h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
+ 		  le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
+-		  le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
++		  le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
+ 		  le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
+-	h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
++	h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
+ 		  le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
+ 		  le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
+ 		  le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
+@@ -3373,22 +3423,22 @@ EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
+ 
+ #define H2C_ROLE_MAINTAIN_LEN 4
+ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+-			       struct rtw89_vif *rtwvif,
+-			       struct rtw89_sta *rtwsta,
++			       struct rtw89_vif_link *rtwvif_link,
++			       struct rtw89_sta_link *rtwsta_link,
+ 			       enum rtw89_upd_mode upd_mode)
+ {
+ 	struct sk_buff *skb;
+-	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
++	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ 	u8 self_role;
+ 	int ret;
+ 
+-	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+-		if (rtwsta)
++	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
++		if (rtwsta_link)
+ 			self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ 		else
+-			self_role = rtwvif->self_role;
++			self_role = rtwvif_link->self_role;
+ 	} else {
+-		self_role = rtwvif->self_role;
++		self_role = rtwvif_link->self_role;
+ 	}
+ 
+ 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
+@@ -3400,7 +3450,7 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ 	SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
+ 	SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
+ 	SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
+-	SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
++	SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
+@@ -3421,39 +3471,53 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ }
+ 
+ static enum rtw89_fw_sta_type
+-rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+-		      struct rtw89_sta *rtwsta)
++rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
++		      struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct ieee80211_bss_conf *bss_conf;
++	struct ieee80211_link_sta *link_sta;
++	enum rtw89_fw_sta_type type;
++
++	rcu_read_lock();
+ 
+-	if (!sta)
++	if (!rtwsta_link)
+ 		goto by_vif;
+ 
+-	if (sta->deflink.eht_cap.has_eht)
+-		return RTW89_FW_BE_STA;
+-	else if (sta->deflink.he_cap.has_he)
+-		return RTW89_FW_AX_STA;
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++
++	if (link_sta->eht_cap.has_eht)
++		type = RTW89_FW_BE_STA;
++	else if (link_sta->he_cap.has_he)
++		type = RTW89_FW_AX_STA;
+ 	else
+-		return RTW89_FW_N_AC_STA;
++		type = RTW89_FW_N_AC_STA;
++
++	goto out;
+ 
+ by_vif:
+-	if (vif->bss_conf.eht_support)
+-		return RTW89_FW_BE_STA;
+-	else if (vif->bss_conf.he_support)
+-		return RTW89_FW_AX_STA;
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++
++	if (bss_conf->eht_support)
++		type = RTW89_FW_BE_STA;
++	else if (bss_conf->he_support)
++		type = RTW89_FW_AX_STA;
+ 	else
+-		return RTW89_FW_N_AC_STA;
++		type = RTW89_FW_N_AC_STA;
++
++out:
++	rcu_read_unlock();
++
++	return type;
+ }
+ 
+-int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+-			   struct rtw89_sta *rtwsta, bool dis_conn)
++int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
++			   struct rtw89_sta_link *rtwsta_link, bool dis_conn)
+ {
+ 	struct sk_buff *skb;
+-	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+-	u8 self_role = rtwvif->self_role;
++	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
++	u8 self_role = rtwvif_link->self_role;
+ 	enum rtw89_fw_sta_type sta_type;
+-	u8 net_type = rtwvif->net_type;
++	u8 net_type = rtwvif_link->net_type;
+ 	struct rtw89_h2c_join_v1 *h2c_v1;
+ 	struct rtw89_h2c_join *h2c;
+ 	u32 len = sizeof(*h2c);
+@@ -3465,7 +3529,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 		format_v1 = true;
+ 	}
+ 
+-	if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
++	if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) {
+ 		self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ 		net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
+ 	}
+@@ -3480,16 +3544,17 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 
+ 	h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
+ 		  le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
+-		  le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
+-		  le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) |
+-		  le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) |
++		  le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
++		  le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) |
++		  le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) |
+ 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
+ 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
+ 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
+ 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
+-		  le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
++		  le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
+ 		  le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
+-		  le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
++		  le32_encode_bits(rtwvif_link->wifi_role,
++				   RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
+ 		  le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
+ 
+ 	if (!format_v1)
+@@ -3497,7 +3562,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 
+ 	h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
+ 
+-	sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta);
++	sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link);
+ 
+ 	h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
+ 	h2c_v1->w2 = 0;
+@@ -3618,7 +3683,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
+ }
+ 
+ #define H2C_EDCA_LEN 12
+-int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			  u8 ac, u32 val)
+ {
+ 	struct sk_buff *skb;
+@@ -3631,7 +3696,7 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	}
+ 	skb_put(skb, H2C_EDCA_LEN);
+ 	RTW89_SET_EDCA_SEL(skb->data, 0);
+-	RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
++	RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx);
+ 	RTW89_SET_EDCA_WMM(skb->data, 0);
+ 	RTW89_SET_EDCA_AC(skb->data, ac);
+ 	RTW89_SET_EDCA_PARAM(skb->data, val);
+@@ -3655,7 +3720,8 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ }
+ 
+ #define H2C_TSF32_TOGL_LEN 4
+-int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool en)
+ {
+ 	struct sk_buff *skb;
+@@ -3671,9 +3737,9 @@ int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
+ 	skb_put(skb, H2C_TSF32_TOGL_LEN);
+ 	cmd = skb->data;
+ 
+-	RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
++	RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx);
+ 	RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
+-	RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
++	RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port);
+ 	RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+@@ -3727,11 +3793,10 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
+ }
+ 
+ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
+-				  struct ieee80211_vif *vif,
++				  struct rtw89_vif_link *rtwvif_link,
+ 				  bool connect)
+ {
+-	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+-	struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
++	struct ieee80211_bss_conf *bss_conf;
+ 	s32 thold = RTW89_DEFAULT_CQM_THOLD;
+ 	u32 hyst = RTW89_DEFAULT_CQM_HYST;
+ 	struct rtw89_h2c_bcnfltr *h2c;
+@@ -3742,9 +3807,20 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
+ 	if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
+ 		return -EINVAL;
+ 
+-	if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA)
++	if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
+ 		return -EINVAL;
+ 
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
++
++	if (bss_conf->cqm_rssi_hyst)
++		hyst = bss_conf->cqm_rssi_hyst;
++	if (bss_conf->cqm_rssi_thold)
++		thold = bss_conf->cqm_rssi_thold;
++
++	rcu_read_unlock();
++
+ 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ 	if (!skb) {
+ 		rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
+@@ -3754,11 +3830,6 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
+ 	skb_put(skb, len);
+ 	h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
+ 
+-	if (bss_conf->cqm_rssi_hyst)
+-		hyst = bss_conf->cqm_rssi_hyst;
+-	if (bss_conf->cqm_rssi_thold)
+-		thold = bss_conf->cqm_rssi_thold;
+-
+ 	h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
+ 		  le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
+ 		  le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
+@@ -3768,7 +3839,7 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
+ 		  le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
+ 		  le32_encode_bits(thold + MAX_RSSI,
+ 				   RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
+-		  le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
++		  le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+@@ -3833,15 +3904,16 @@ int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
+ 	return ret;
+ }
+ 
+-int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct rtw89_traffic_stats *stats = &rtwvif->stats;
+ 	struct rtw89_h2c_ofld *h2c;
+ 	u32 len = sizeof(*h2c);
+ 	struct sk_buff *skb;
+ 	int ret;
+ 
+-	if (rtwvif->net_type != RTW89_NET_TYPE_INFRA)
++	if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
+ 		return -EINVAL;
+ 
+ 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+@@ -3853,7 +3925,7 @@ int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 	skb_put(skb, len);
+ 	h2c = (struct rtw89_h2c_ofld *)skb->data;
+ 
+-	h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
++	h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
+ 		  le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
+ 		  le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
+ 
+@@ -4858,7 +4930,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ #define RTW89_SCAN_DELAY_TSF_UNIT 104800
+ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ 				 struct rtw89_scan_option *option,
+-				 struct rtw89_vif *rtwvif,
++				 struct rtw89_vif_link *rtwvif_link,
+ 				 bool wowlan)
+ {
+ 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+@@ -4880,7 +4952,7 @@ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ 	h2c = (struct rtw89_h2c_scanofld *)skb->data;
+ 
+ 	if (option->delay) {
+-		ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf);
++		ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
+ 		if (ret) {
+ 			rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret);
+ 			scan_mode = RTW89_SCAN_IMMEDIATE;
+@@ -4890,8 +4962,8 @@ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ 		}
+ 	}
+ 
+-	h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
+-		  le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
++	h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
++		  le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
+ 		  le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) |
+ 		  le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
+ 
+@@ -4963,9 +5035,10 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
+ 
+ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ 				 struct rtw89_scan_option *option,
+-				 struct rtw89_vif *rtwvif,
++				 struct rtw89_vif_link *rtwvif_link,
+ 				 bool wowlan)
+ {
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ 	struct cfg80211_scan_request *req = rtwvif->scan_req;
+@@ -5016,8 +5089,8 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ 		  le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
+ 		  le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
+ 		  le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
+-		  le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
+-		  le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
++		  le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
++		  le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
+ 		  le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
+ 
+ 	h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
+@@ -5082,11 +5155,11 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ 
+ 	for (i = 0; i < option->num_opch; i++) {
+ 		opch = ptr;
+-		opch->w0 = le32_encode_bits(rtwvif->mac_id,
++		opch->w0 = le32_encode_bits(rtwvif_link->mac_id,
+ 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
+ 			   le32_encode_bits(option->band,
+ 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
+-			   le32_encode_bits(rtwvif->port,
++			   le32_encode_bits(rtwvif_link->port,
+ 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
+ 			   le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
+ 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
+@@ -5871,12 +5944,10 @@ static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
+ }
+ 
+ static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
+-					     struct rtw89_vif *rtwvif,
++					     struct cfg80211_scan_request *req,
+ 					     struct rtw89_pktofld_info *info,
+ 					     enum nl80211_band band, u8 ssid_idx)
+ {
+-	struct cfg80211_scan_request *req = rtwvif->scan_req;
+-
+ 	if (band != NL80211_BAND_6GHZ)
+ 		return false;
+ 
+@@ -5892,11 +5963,13 @@ static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+-				     struct rtw89_vif *rtwvif,
++				     struct rtw89_vif_link *rtwvif_link,
+ 				     struct sk_buff *skb, u8 ssid_idx)
+ {
+ 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
++	struct cfg80211_scan_request *req = rtwvif->scan_req;
+ 	struct rtw89_pktofld_info *info;
+ 	struct sk_buff *new;
+ 	int ret = 0;
+@@ -5921,8 +5994,7 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+ 			goto out;
+ 		}
+ 
+-		rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
+-						 ssid_idx);
++		rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx);
+ 
+ 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
+ 		if (ret) {
+@@ -5939,22 +6011,23 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
+-					  struct rtw89_vif *rtwvif)
++					  struct rtw89_vif_link *rtwvif_link)
+ {
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct cfg80211_scan_request *req = rtwvif->scan_req;
+ 	struct sk_buff *skb;
+ 	u8 num = req->n_ssids, i;
+ 	int ret;
+ 
+ 	for (i = 0; i < num; i++) {
+-		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
++		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
+ 					     req->ssids[i].ssid,
+ 					     req->ssids[i].ssid_len,
+ 					     req->ie_len);
+ 		if (!skb)
+ 			return -ENOMEM;
+ 
+-		ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i);
++		ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i);
+ 		kfree_skb(skb);
+ 
+ 		if (ret)
+@@ -5965,13 +6038,12 @@ static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
++				      struct ieee80211_scan_ies *ies,
+ 				      struct cfg80211_scan_request *req,
+ 				      struct rtw89_mac_chinfo *ch_info)
+ {
+-	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
+ 	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
+-	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+-	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
+ 	struct cfg80211_scan_6ghz_params *params;
+ 	struct rtw89_pktofld_info *info, *tmp;
+ 	struct ieee80211_hdr *hdr;
+@@ -6000,7 +6072,7 @@ static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
+ 		if (found)
+ 			continue;
+ 
+-		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
++		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
+ 					     NULL, 0, req->ie_len);
+ 		skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
+ 		skb_put_data(skb, ies->common_ies, ies->common_ie_len);
+@@ -6090,8 +6162,9 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
+ 				   struct rtw89_mac_chinfo *ch_info)
+ {
+ 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+-	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
++	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
+ 	struct cfg80211_scan_request *req = rtwvif->scan_req;
+ 	struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
+ 	struct rtw89_pktofld_info *info;
+@@ -6117,7 +6190,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
+ 		}
+ 	}
+ 
+-	ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info);
++	ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info);
+ 	if (ret)
+ 		rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
+ 
+@@ -6207,8 +6280,8 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
+ 				      struct rtw89_mac_chinfo_be *ch_info)
+ {
+ 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+-	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct cfg80211_scan_request *req = rtwvif->scan_req;
+ 	struct rtw89_pktofld_info *info;
+ 	u8 band, probe_count = 0, i;
+@@ -6265,7 +6338,7 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
+ }
+ 
+ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif)
++				    struct rtw89_vif_link *rtwvif_link)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+@@ -6315,8 +6388,9 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+-				   struct rtw89_vif *rtwvif, bool connected)
++				   struct rtw89_vif_link *rtwvif_link, bool connected)
+ {
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct cfg80211_scan_request *req = rtwvif->scan_req;
+ 	struct rtw89_mac_chinfo	*ch_info, *tmp;
+ 	struct ieee80211_channel *channel;
+@@ -6392,7 +6466,7 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif)
++				    struct rtw89_vif_link *rtwvif_link)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+@@ -6444,8 +6518,9 @@ int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+-				   struct rtw89_vif *rtwvif, bool connected)
++				   struct rtw89_vif_link *rtwvif_link, bool connected)
+ {
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	struct cfg80211_scan_request *req = rtwvif->scan_req;
+ 	struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ 	struct ieee80211_channel *channel;
+@@ -6503,45 +6578,50 @@ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
+-				   struct rtw89_vif *rtwvif, bool connected)
++				   struct rtw89_vif_link *rtwvif_link, bool connected)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	int ret;
+ 
+-	ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
++	ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "Update probe request failed\n");
+ 		goto out;
+ 	}
+-	ret = mac->add_chan_list(rtwdev, rtwvif, connected);
++	ret = mac->add_chan_list(rtwdev, rtwvif_link, connected);
+ out:
+ 	return ret;
+ }
+ 
+-void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
++void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link,
+ 			 struct ieee80211_scan_request *scan_req)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct cfg80211_scan_request *req = &scan_req->req;
++	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
++						       rtwvif_link->chanctx_idx);
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	u32 rx_fltr = rtwdev->hal.rx_fltr;
+ 	u8 mac_addr[ETH_ALEN];
+ 
+-	rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
+-	rtwdev->scan_info.scanning_vif = vif;
++	/* clone op and keep it during scan */
++	rtwdev->scan_info.op_chan = *chan;
++
++	rtwdev->scan_info.scanning_vif = rtwvif_link;
+ 	rtwdev->scan_info.last_chan_idx = 0;
+ 	rtwdev->scan_info.abort = false;
+ 	rtwvif->scan_ies = &scan_req->ies;
+ 	rtwvif->scan_req = req;
+ 	ieee80211_stop_queues(rtwdev->hw);
+-	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false);
++	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
+ 
+ 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ 		get_random_mask_addr(mac_addr, req->mac_addr,
+ 				     req->mac_addr_mask);
+ 	else
+-		ether_addr_copy(mac_addr, vif->addr);
+-	rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
++		ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
++	rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true);
+ 
+ 	rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+ 	rx_fltr &= ~B_AX_A_BC;
+@@ -6554,28 +6634,33 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 	rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
+ }
+ 
+-void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
++void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
++			    struct rtw89_vif_link *rtwvif_link,
+ 			    bool aborted)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+-	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+ 	struct cfg80211_scan_info info = {
+ 		.aborted = aborted,
+ 	};
++	struct rtw89_vif *rtwvif;
+ 
+-	if (!vif)
++	if (!rtwvif_link)
+ 		return;
+ 
++	rtw89_chanctx_proceed(rtwdev);
++
++	rtwvif = rtwvif_link->rtwvif;
++
+ 	rtw89_write32_mask(rtwdev,
+ 			   rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
+ 			   B_AX_RX_FLTR_CFG_MASK,
+ 			   rtwdev->hal.rx_fltr);
+ 
+-	rtw89_core_scan_complete(rtwdev, vif, true);
++	rtw89_core_scan_complete(rtwdev, rtwvif_link, true);
+ 	ieee80211_scan_completed(rtwdev->hw, &info);
+ 	ieee80211_wake_queues(rtwdev->hw);
+-	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true);
++	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
+ 	rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
+ 
+ 	rtw89_release_pkt_list(rtwdev);
+@@ -6584,18 +6669,17 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 	scan_info->last_chan_idx = 0;
+ 	scan_info->scanning_vif = NULL;
+ 	scan_info->abort = false;
+-
+-	rtw89_chanctx_proceed(rtwdev);
+ }
+ 
+-void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
++void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link)
+ {
+ 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ 	int ret;
+ 
+ 	scan_info->abort = true;
+ 
+-	ret = rtw89_hw_scan_offload(rtwdev, vif, false);
++	ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false);
+ 	if (ret)
+ 		rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret);
+ 
+@@ -6604,40 +6688,43 @@ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+ 	 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan
+ 	 * work properly.
+ 	 */
+-	rtw89_hw_scan_complete(rtwdev, vif, true);
++	rtw89_hw_scan_complete(rtwdev, rtwvif_link, true);
+ }
+ 
+ static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+-		/* This variable implies connected or during attempt to connect */
+-		if (!is_zero_ether_addr(rtwvif->bssid))
+-			return true;
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
++			/* This variable implies connected or during attempt to connect */
++			if (!is_zero_ether_addr(rtwvif_link->bssid))
++				return true;
++		}
+ 	}
+ 
+ 	return false;
+ }
+ 
+-int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
++int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
++			  struct rtw89_vif_link *rtwvif_link,
+ 			  bool enable)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct rtw89_scan_option opt = {0};
+-	struct rtw89_vif *rtwvif;
+ 	bool connected;
+ 	int ret = 0;
+ 
+-	rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+-	if (!rtwvif)
++	if (!rtwvif_link)
+ 		return -EINVAL;
+ 
+ 	connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
+ 	opt.enable = enable;
+ 	opt.target_ch_mode = connected;
+ 	if (enable) {
+-		ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected);
++		ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected);
+ 		if (ret)
+ 			goto out;
+ 	}
+@@ -6652,7 +6739,7 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 		opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
+ 	}
+ 
+-	ret = mac->scan_offload(rtwdev, &opt, rtwvif, false);
++	ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false);
+ out:
+ 	return ret;
+ }
+@@ -6758,7 +6845,7 @@ int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
+ }
+ 
+ #define H2C_KEEP_ALIVE_LEN 4
+-int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			    bool enable)
+ {
+ 	struct sk_buff *skb;
+@@ -6766,7 +6853,7 @@ int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	int ret;
+ 
+ 	if (enable) {
+-		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
++		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
+ 						   RTW89_PKT_OFLD_TYPE_NULL_DATA,
+ 						   &pkt_id);
+ 		if (ret)
+@@ -6784,7 +6871,7 @@ int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
+ 	RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
+ 	RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
+-	RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id);
++	RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ 			      H2C_CAT_MAC,
+@@ -6806,7 +6893,7 @@ int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	return ret;
+ }
+ 
+-int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			     bool enable)
+ {
+ 	struct rtw89_h2c_arp_offload *h2c;
+@@ -6816,7 +6903,7 @@ int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	int ret;
+ 
+ 	if (enable) {
+-		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
++		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
+ 						   RTW89_PKT_OFLD_TYPE_ARP_RSP,
+ 						   &pkt_id);
+ 		if (ret)
+@@ -6834,7 +6921,7 @@ int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 
+ 	h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) |
+ 		  le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) |
+-		  le32_encode_bits(rtwvif->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
++		  le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
+ 		  le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+@@ -6859,11 +6946,11 @@ int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 
+ #define H2C_DISCONNECT_DETECT_LEN 8
+ int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
+-				   struct rtw89_vif *rtwvif, bool enable)
++				   struct rtw89_vif_link *rtwvif_link, bool enable)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct sk_buff *skb;
+-	u8 macid = rtwvif->mac_id;
++	u8 macid = rtwvif_link->mac_id;
+ 	int ret;
+ 
+ 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
+@@ -6902,7 +6989,7 @@ int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
+ 	return ret;
+ }
+ 
+-int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			 bool enable)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+@@ -6923,7 +7010,7 @@ int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 
+ 	h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) |
+ 		  le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) |
+-		  le32_encode_bits(rtwvif->mac_id, RTW89_H2C_NLO_W0_MACID);
++		  le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID);
+ 
+ 	if (enable) {
+ 		h2c->nlo_cnt = nd_config->n_match_sets;
+@@ -6953,12 +7040,12 @@ int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	return ret;
+ }
+ 
+-int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			    bool enable)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct rtw89_h2c_wow_global *h2c;
+-	u8 macid = rtwvif->mac_id;
++	u8 macid = rtwvif_link->mac_id;
+ 	u32 len = sizeof(*h2c);
+ 	struct sk_buff *skb;
+ 	int ret;
+@@ -7002,12 +7089,12 @@ int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 
+ #define H2C_WAKEUP_CTRL_LEN 4
+ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif,
++				 struct rtw89_vif_link *rtwvif_link,
+ 				 bool enable)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct sk_buff *skb;
+-	u8 macid = rtwvif->mac_id;
++	u8 macid = rtwvif_link->mac_id;
+ 	int ret;
+ 
+ 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
+@@ -7100,13 +7187,13 @@ int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
+-			      struct rtw89_vif *rtwvif,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool enable)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+ 	struct rtw89_h2c_wow_gtk_ofld *h2c;
+-	u8 macid = rtwvif->mac_id;
++	u8 macid = rtwvif_link->mac_id;
+ 	u32 len = sizeof(*h2c);
+ 	u8 pkt_id_sa_query = 0;
+ 	struct sk_buff *skb;
+@@ -7128,14 +7215,14 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
+ 	if (!enable)
+ 		goto hdr;
+ 
+-	ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
++	ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
+ 					   RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
+ 					   &pkt_id_eapol);
+ 	if (ret)
+ 		goto fail;
+ 
+ 	if (gtk_info->igtk_keyid) {
+-		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
++		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
+ 						   RTW89_PKT_OFLD_TYPE_SA_QUERY,
+ 						   &pkt_id_sa_query);
+ 		if (ret)
+@@ -7173,7 +7260,7 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
+ 	return ret;
+ }
+ 
+-int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		       bool enable)
+ {
+ 	struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
+@@ -7189,7 +7276,7 @@ int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	skb_put(skb, len);
+ 	h2c = (struct rtw89_h2c_fwips *)skb->data;
+ 
+-	h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
++	h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
+ 		  le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE);
+ 
+ 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
+index ad47e77d740b25..ccbbc43f33feed 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.h
++++ b/drivers/net/wireless/realtek/rtw89/fw.h
+@@ -4404,59 +4404,59 @@ void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ 			   u8 type, u8 cat, u8 class, u8 func,
+ 			   bool rack, bool dack, u32 len);
+ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+-				  struct rtw89_vif *rtwvif,
+-				  struct rtw89_sta *rtwsta);
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+-				     struct rtw89_vif *rtwvif,
+-				     struct rtw89_sta *rtwsta);
++				     struct rtw89_vif_link *rtwvif_link,
++				     struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+-				     struct rtw89_vif *rtwvif,
+-				     struct rtw89_sta *rtwsta);
++				     struct rtw89_vif_link *rtwvif_link,
++				     struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+-				struct ieee80211_vif *vif,
+-				struct ieee80211_sta *sta);
++				struct rtw89_vif_link *rtwvif_link,
++				struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+-				   struct ieee80211_vif *vif,
+-				   struct ieee80211_sta *sta);
++				   struct rtw89_vif_link *rtwvif_link,
++				   struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+-				   struct ieee80211_vif *vif,
+-				   struct ieee80211_sta *sta);
++				   struct rtw89_vif_link *rtwvif_link,
++				   struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_sta *rtwsta);
++				 struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_sta *rtwsta);
++				 struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+-			       struct rtw89_vif *rtwvif);
++			       struct rtw89_vif_link *rtwvif_link);
+ int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+-				  struct rtw89_vif *rtwvif);
+-int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
+-		     struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
++				  struct rtw89_vif_link *rtwvif_link);
++int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif,
++		     struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr);
+ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif,
+-				 struct rtw89_sta *rtwsta);
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link);
+ int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif,
+-				 struct rtw89_sta *rtwsta);
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link);
+ void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
+ void rtw89_fw_c2h_work(struct work_struct *work);
+ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+-			       struct rtw89_vif *rtwvif,
+-			       struct rtw89_sta *rtwsta,
++			       struct rtw89_vif_link *rtwvif_link,
++			       struct rtw89_sta_link *rtwsta_link,
+ 			       enum rtw89_upd_mode upd_mode);
+-int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+-			   struct rtw89_sta *rtwsta, bool dis_conn);
++int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
++			   struct rtw89_sta_link *rtwsta_link, bool dis_conn);
+ int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en);
+ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
+ 			     bool pause);
+-int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			  u8 ac, u32 val);
+ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev);
+ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
+-				  struct ieee80211_vif *vif,
++				  struct rtw89_vif_link *rtwvif_link,
+ 				  bool connect);
+ int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
+ 			      struct rtw89_rx_phy_ppdu *phy_ppdu);
+-int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
++int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi);
+ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type);
+ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
+@@ -4478,11 +4478,11 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ 				      struct list_head *chan_list);
+ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ 				 struct rtw89_scan_option *opt,
+-				 struct rtw89_vif *vif,
++				 struct rtw89_vif_link *vif,
+ 				 bool wowlan);
+ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ 				 struct rtw89_scan_option *opt,
+-				 struct rtw89_vif *vif,
++				 struct rtw89_vif_link *vif,
+ 				 bool wowlan);
+ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
+ 			struct rtw89_fw_h2c_rf_reg_info *info,
+@@ -4508,14 +4508,19 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
+ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
+ void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
+ void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
+-int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			     u8 macid);
+ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
+-					   struct rtw89_vif *rtwvif, bool notify_fw);
++					   struct rtw89_vif_link *rtwvif_link,
++					   bool notify_fw);
+ void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw);
+-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev,
++			struct rtw89_vif_link *rtwvif_link,
++			struct rtw89_sta_link *rtwsta_link,
+ 			bool valid, struct ieee80211_ampdu_params *params);
+-int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev,
++			   struct rtw89_vif_link *rtwvif_link,
++			   struct rtw89_sta_link *rtwsta_link,
+ 			   bool valid, struct ieee80211_ampdu_params *params);
+ void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev);
+ int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+@@ -4524,8 +4529,8 @@ int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
+ 			  struct rtw89_lps_parm *lps_param);
+ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif);
+-int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++			     struct rtw89_vif_link *rtwvif_link);
++int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		       bool enable);
+ struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
+ struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
+@@ -4534,49 +4539,56 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
+ 		     struct rtw89_mac_c2h_info *c2h_info);
+ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
+ void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
+-void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+-			 struct ieee80211_scan_request *req);
+-void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
++void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link,
++			 struct ieee80211_scan_request *scan_req);
++void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
++			    struct rtw89_vif_link *rtwvif_link,
+ 			    bool aborted);
+-int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
++int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
++			  struct rtw89_vif_link *rtwvif_link,
+ 			  bool enable);
+-void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
++void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link);
+ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+-				   struct rtw89_vif *rtwvif, bool connected);
++				   struct rtw89_vif_link *rtwvif_link, bool connected);
+ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif);
++				    struct rtw89_vif_link *rtwvif_link);
+ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+-				   struct rtw89_vif *rtwvif, bool connected);
++				   struct rtw89_vif_link *rtwvif_link, bool connected);
+ int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif);
++				    struct rtw89_vif_link *rtwvif_link);
+ int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
+ int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
+ 			  const struct rtw89_pkt_drop_params *params);
+-int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
++int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link,
++			 struct ieee80211_bss_conf *bss_conf,
+ 			 struct ieee80211_p2p_noa_desc *desc,
+ 			 u8 act, u8 noa_id);
+-int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool en);
+-int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			    bool enable);
+ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif, bool enable);
+-int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++				 struct rtw89_vif_link *rtwvif_link, bool enable);
++int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			 bool enable);
+-int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			    bool enable);
+ int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif, bool enable);
++			     struct rtw89_vif_link *rtwvif_link, bool enable);
+ int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
+-				   struct rtw89_vif *rtwvif, bool enable);
+-int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++				   struct rtw89_vif_link *rtwvif_link, bool enable);
++int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			    bool enable);
+ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif, bool enable);
++				 struct rtw89_vif_link *rtwvif_link, bool enable);
+ int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
+ 			    struct rtw89_wow_cam_info *cam_info);
+ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
+-			      struct rtw89_vif *rtwvif,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool enable);
+ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev);
+ int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
+@@ -4621,51 +4633,73 @@ static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
+ }
+ 
+ static inline int rtw89_chip_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+-						  struct rtw89_vif *rtwvif,
+-						  struct rtw89_sta *rtwsta)
++						  struct rtw89_vif_link *rtwvif_link,
++						  struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
+-	return chip->ops->h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
++	return chip->ops->h2c_default_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
+ }
+ 
+ static inline int rtw89_chip_h2c_default_dmac_tbl(struct rtw89_dev *rtwdev,
+-						  struct rtw89_vif *rtwvif,
+-						  struct rtw89_sta *rtwsta)
++						  struct rtw89_vif_link *rtwvif_link,
++						  struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
+ 	if (chip->ops->h2c_default_dmac_tbl)
+-		return chip->ops->h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
++		return chip->ops->h2c_default_dmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
+ 
+ 	return 0;
+ }
+ 
+ static inline int rtw89_chip_h2c_update_beacon(struct rtw89_dev *rtwdev,
+-					       struct rtw89_vif *rtwvif)
++					       struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
+-	return chip->ops->h2c_update_beacon(rtwdev, rtwvif);
++	return chip->ops->h2c_update_beacon(rtwdev, rtwvif_link);
+ }
+ 
+ static inline int rtw89_chip_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+-						struct ieee80211_vif *vif,
+-						struct ieee80211_sta *sta)
++						struct rtw89_vif_link *rtwvif_link,
++						struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
+-	return chip->ops->h2c_assoc_cmac_tbl(rtwdev, vif, sta);
++	return chip->ops->h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
+ }
+ 
+-static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev,
+-						struct ieee80211_vif *vif,
+-						struct ieee80211_sta *sta)
++static inline
++int rtw89_chip_h2c_ampdu_link_cmac_tbl(struct rtw89_dev *rtwdev,
++				       struct rtw89_vif_link *rtwvif_link,
++				       struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 
+ 	if (chip->ops->h2c_ampdu_cmac_tbl)
+-		return chip->ops->h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
++		return chip->ops->h2c_ampdu_cmac_tbl(rtwdev, rtwvif_link,
++						     rtwsta_link);
++
++	return 0;
++}
++
++static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev,
++						struct rtw89_vif *rtwvif,
++						struct rtw89_sta *rtwsta)
++{
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++	int ret;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		ret = rtw89_chip_h2c_ampdu_link_cmac_tbl(rtwdev, rtwvif_link,
++							 rtwsta_link);
++		if (ret)
++			return ret;
++	}
+ 
+ 	return 0;
+ }
+@@ -4675,8 +4709,20 @@ int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ 			  bool valid, struct ieee80211_ampdu_params *params)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++	int ret;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		ret = chip->ops->h2c_ba_cam(rtwdev, rtwvif_link, rtwsta_link,
++					    valid, params);
++		if (ret)
++			return ret;
++	}
+ 
+-	return chip->ops->h2c_ba_cam(rtwdev, rtwsta, valid, params);
++	return 0;
+ }
+ 
+ /* must consider compatibility; don't insert new in the mid */
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index c70a23a763b0ee..4e15d539e3d1c4 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -4076,17 +4076,17 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
+ };
+ 
+ static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev,
+-					struct rtw89_vif *rtwvif, u8 type)
++					struct rtw89_vif_link *rtwvif_link, u8 type)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	u8 mask = B_AX_PTCL_DBG_INFO_MASK_BY_PORT(rtwvif->port);
++	u8 mask = B_AX_PTCL_DBG_INFO_MASK_BY_PORT(rtwvif_link->port);
+ 	u32 reg_info, reg_ctrl;
+ 	u32 val;
+ 	int ret;
+ 
+-	reg_info = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg_info, rtwvif->mac_idx);
+-	reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg, rtwvif->mac_idx);
++	reg_info = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg_info, rtwvif_link->mac_idx);
++	reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg, rtwvif_link->mac_idx);
+ 
+ 	rtw89_write32_mask(rtwdev, reg_ctrl, B_AX_PTCL_DBG_SEL_MASK, type);
+ 	rtw89_write32_set(rtwdev, reg_ctrl, B_AX_PTCL_DBG_EN);
+@@ -4098,26 +4098,32 @@ static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev,
+ 		rtw89_warn(rtwdev, "Polling beacon packet empty fail\n");
+ }
+ 
+-static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev,
++			       struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+-	rtw89_write32_set(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, 1);
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, 0);
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 0);
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, 2);
+-	rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK, 1);
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, 1);
+-	rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
+-
+-	rtw89_mac_check_packet_ctrl(rtwdev, rtwvif, AX_PTCL_DBG_BCNQ_NUM0);
+-	if (rtwvif->port == RTW89_PORT_0)
+-		rtw89_mac_check_packet_ctrl(rtwdev, rtwvif, AX_PTCL_DBG_BCNQ_NUM1);
+-
+-	rtw89_write32_clr(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
+-	rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TBTT_PROHIB_EN);
++	rtw89_write32_set(rtwdev, p->bcn_drop_all, BIT(rtwvif_link->port));
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK,
++				1);
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_area, B_AX_BCN_MSK_AREA_MASK,
++				0);
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK,
++				0);
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_early, B_AX_BCNERLY_MASK, 2);
++	rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_early,
++				B_AX_TBTTERLY_MASK, 1);
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_space,
++				B_AX_BCN_SPACE_MASK, 1);
++	rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, B_AX_BCNTX_EN);
++
++	rtw89_mac_check_packet_ctrl(rtwdev, rtwvif_link, AX_PTCL_DBG_BCNQ_NUM0);
++	if (rtwvif_link->port == RTW89_PORT_0)
++		rtw89_mac_check_packet_ctrl(rtwdev, rtwvif_link, AX_PTCL_DBG_BCNQ_NUM1);
++
++	rtw89_write32_clr(rtwdev, p->bcn_drop_all, BIT(rtwvif_link->port));
++	rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, B_AX_TBTT_PROHIB_EN);
+ 	fsleep(2000);
+ }
+ 
+@@ -4131,286 +4137,329 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
+ #define BCN_ERLY_SET_DLY (10 * 2)
+ 
+ static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev,
+-				       struct rtw89_vif *rtwvif)
++				       struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
++	struct ieee80211_bss_conf *bss_conf;
+ 	bool need_backup = false;
+ 	u32 backup_val;
++	u16 beacon_int;
+ 
+-	if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN))
++	if (!rtw89_read32_port_mask(rtwdev, rtwvif_link, p->port_cfg, B_AX_PORT_FUNC_EN))
+ 		return;
+ 
+-	if (chip->chip_id == RTL8852A && rtwvif->port != RTW89_PORT_0) {
++	if (chip->chip_id == RTL8852A && rtwvif_link->port != RTW89_PORT_0) {
+ 		need_backup = true;
+-		backup_val = rtw89_read32_port(rtwdev, rtwvif, p->tbtt_prohib);
++		backup_val = rtw89_read32_port(rtwdev, rtwvif_link, p->tbtt_prohib);
+ 	}
+ 
+-	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+-		rtw89_mac_bcn_drop(rtwdev, rtwvif);
++	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
++		rtw89_mac_bcn_drop(rtwdev, rtwvif_link);
+ 
+ 	if (chip->chip_id == RTL8852A) {
+-		rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK);
+-		rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1);
+-		rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK);
+-		rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK);
++		rtw89_write32_port_clr(rtwdev, rtwvif_link, p->tbtt_prohib,
++				       B_AX_TBTT_SETUP_MASK);
++		rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib,
++					B_AX_TBTT_HOLD_MASK, 1);
++		rtw89_write16_port_clr(rtwdev, rtwvif_link, p->tbtt_early,
++				       B_AX_TBTTERLY_MASK);
++		rtw89_write16_port_clr(rtwdev, rtwvif_link, p->bcn_early,
++				       B_AX_BCNERLY_MASK);
+ 	}
+ 
+-	msleep(vif->bss_conf.beacon_int + 1);
+-	rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN |
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	beacon_int = bss_conf->beacon_int;
++
++	rcu_read_unlock();
++
++	msleep(beacon_int + 1);
++	rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, B_AX_PORT_FUNC_EN |
+ 							    B_AX_BRK_SETUP);
+-	rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST);
+-	rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0);
++	rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, B_AX_TSFTR_RST);
++	rtw89_write32_port(rtwdev, rtwvif_link, p->bcn_cnt_tmr, 0);
+ 
+ 	if (need_backup)
+-		rtw89_write32_port(rtwdev, rtwvif, p->tbtt_prohib, backup_val);
++		rtw89_write32_port(rtwdev, rtwvif_link, p->tbtt_prohib, backup_val);
+ }
+ 
+ static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev,
+-				      struct rtw89_vif *rtwvif, bool en)
++				      struct rtw89_vif_link *rtwvif_link, bool en)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+ 	if (en)
+-		rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN);
++		rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg,
++				       B_AX_TXBCN_RPT_EN);
+ 	else
+-		rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN);
++		rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg,
++				       B_AX_TXBCN_RPT_EN);
+ }
+ 
+ static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev,
+-				      struct rtw89_vif *rtwvif, bool en)
++				      struct rtw89_vif_link *rtwvif_link, bool en)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+ 	if (en)
+-		rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN);
++		rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg,
++				       B_AX_RXBCN_RPT_EN);
+ 	else
+-		rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN);
++		rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg,
++				       B_AX_RXBCN_RPT_EN);
+ }
+ 
+ static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev,
+-					struct rtw89_vif *rtwvif)
++					struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK,
+-				rtwvif->net_type);
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->port_cfg, B_AX_NET_TYPE_MASK,
++				rtwvif_link->net_type);
+ }
+ 
+ static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev,
+-					struct rtw89_vif *rtwvif)
++					struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
++	bool en = rtwvif_link->net_type != RTW89_NET_TYPE_NO_LINK;
+ 	u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP;
+ 
+ 	if (en)
+-		rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bits);
++		rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, bits);
+ 	else
+-		rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bits);
++		rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, bits);
+ }
+ 
+ static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev,
+-				     struct rtw89_vif *rtwvif)
++				     struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
+-		  rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
++	bool en = rtwvif_link->net_type == RTW89_NET_TYPE_INFRA ||
++		  rtwvif_link->net_type == RTW89_NET_TYPE_AD_HOC;
+ 	u32 bit = B_AX_RX_BSSID_FIT_EN;
+ 
+ 	if (en)
+-		rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bit);
++		rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, bit);
+ 	else
+-		rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit);
++		rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, bit);
+ }
+ 
+ void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+-				struct rtw89_vif *rtwvif, bool en)
++				struct rtw89_vif_link *rtwvif_link, bool en)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+ 	if (en)
+-		rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
++		rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, B_AX_TSF_UDT_EN);
+ 	else
+-		rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
++		rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, B_AX_TSF_UDT_EN);
+ }
+ 
+ static void rtw89_mac_port_cfg_rx_sync_by_nettype(struct rtw89_dev *rtwdev,
+-						  struct rtw89_vif *rtwvif)
++						  struct rtw89_vif_link *rtwvif_link)
+ {
+-	bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
+-		  rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
++	bool en = rtwvif_link->net_type == RTW89_NET_TYPE_INFRA ||
++		  rtwvif_link->net_type == RTW89_NET_TYPE_AD_HOC;
+ 
+-	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, en);
++	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, en);
+ }
+ 
+ static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
+-				     struct rtw89_vif *rtwvif, bool en)
++				     struct rtw89_vif_link *rtwvif_link, bool en)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+ 	if (en)
+-		rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
++		rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, B_AX_BCNTX_EN);
+ 	else
+-		rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
++		rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, B_AX_BCNTX_EN);
+ }
+ 
+ static void rtw89_mac_port_cfg_tx_sw_by_nettype(struct rtw89_dev *rtwdev,
+-						struct rtw89_vif *rtwvif)
++						struct rtw89_vif_link *rtwvif_link)
+ {
+-	bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ||
+-		  rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
++	bool en = rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE ||
++		  rtwvif_link->net_type == RTW89_NET_TYPE_AD_HOC;
+ 
+-	rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif, en);
++	rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif_link, en);
+ }
+ 
+ void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+-			rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif, en);
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
++				rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif_link, en);
+ }
+ 
+ static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev,
+-					struct rtw89_vif *rtwvif)
++					struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+-	u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL;
++	struct ieee80211_bss_conf *bss_conf;
++	u16 bcn_int;
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	if (bss_conf->beacon_int)
++		bcn_int = bss_conf->beacon_int;
++	else
++		bcn_int = BCN_INTERVAL;
++
++	rcu_read_unlock();
+ 
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK,
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_space, B_AX_BCN_SPACE_MASK,
+ 				bcn_int);
+ }
+ 
+ static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev,
+-				       struct rtw89_vif *rtwvif)
++				       struct rtw89_vif_link *rtwvif_link)
+ {
+-	u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0;
++	u8 win = rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0;
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	u8 port = rtwvif->port;
++	u8 port = rtwvif_link->port;
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_idx(rtwdev, p->hiq_win[port], rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, p->hiq_win[port], rtwvif_link->mac_idx);
+ 	rtw89_write8(rtwdev, reg, win);
+ }
+ 
+ static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev,
+-					struct rtw89_vif *rtwvif)
++					struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct ieee80211_bss_conf *bss_conf;
++	u8 dtim_period;
+ 	u32 addr;
+ 
+-	addr = rtw89_mac_reg_by_idx(rtwdev, p->md_tsft, rtwvif->mac_idx);
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	dtim_period = bss_conf->dtim_period;
++
++	rcu_read_unlock();
++
++	addr = rtw89_mac_reg_by_idx(rtwdev, p->md_tsft, rtwvif_link->mac_idx);
+ 	rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE);
+ 
+-	rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK,
+-				vif->bss_conf.dtim_period);
++	rtw89_write16_port_mask(rtwdev, rtwvif_link, p->dtim_ctrl, B_AX_DTIM_NUM_MASK,
++				dtim_period);
+ }
+ 
+ static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev,
+-					      struct rtw89_vif *rtwvif)
++					      struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib,
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib,
+ 				B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF);
+ }
+ 
+ static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev,
+-					     struct rtw89_vif *rtwvif)
++					     struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib,
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib,
+ 				B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF);
+ }
+ 
+ static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev,
+-					     struct rtw89_vif *rtwvif)
++					     struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area,
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_area,
+ 				B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF);
+ }
+ 
+ static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev,
+-					  struct rtw89_vif *rtwvif)
++					  struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+-	rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early,
++	rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_early,
+ 				B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF);
+ }
+ 
+ static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev,
+-					 struct rtw89_vif *rtwvif)
++					 struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ 	static const u32 masks[RTW89_PORT_NUM] = {
+ 		B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK,
+ 		B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK,
+ 		B_AX_BSS_COLOB_AX_PORT_4_MASK,
+ 	};
+-	u8 port = rtwvif->port;
++	struct ieee80211_bss_conf *bss_conf;
++	u8 port = rtwvif_link->port;
+ 	u32 reg_base;
+ 	u32 reg;
+ 	u8 bss_color;
+ 
+-	bss_color = vif->bss_conf.he_bss_color.color;
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	bss_color = bss_conf->he_bss_color.color;
++
++	rcu_read_unlock();
++
+ 	reg_base = port >= 4 ? p->bss_color + 4 : p->bss_color;
+-	reg = rtw89_mac_reg_by_idx(rtwdev, reg_base, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, reg_base, rtwvif_link->mac_idx);
+ 	rtw89_write32_mask(rtwdev, reg, masks[port], bss_color);
+ }
+ 
+ static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev,
+-				      struct rtw89_vif *rtwvif)
++				      struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	u8 port = rtwvif->port;
++	u8 port = rtwvif_link->port;
+ 	u32 reg;
+ 
+-	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
++	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
+ 		return;
+ 
+ 	if (port == 0) {
+-		reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid, rtwvif->mac_idx);
++		reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid, rtwvif_link->mac_idx);
+ 		rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK);
+ 	}
+ }
+ 
+ static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev,
+-					struct rtw89_vif *rtwvif)
++					struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+-	u8 port = rtwvif->port;
++	u8 port = rtwvif_link->port;
+ 	u32 reg;
+ 	u32 val;
+ 
+-	reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid_drop, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid_drop, rtwvif_link->mac_idx);
+ 	val = rtw89_read32(rtwdev, reg);
+ 	val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port));
+ 	if (port == 0)
+@@ -4419,31 +4468,31 @@ static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev,
+ }
+ 
+ static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev,
+-				       struct rtw89_vif *rtwvif, bool enable)
++				       struct rtw89_vif_link *rtwvif_link, bool enable)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+ 	if (enable)
+-		rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg,
++		rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg,
+ 				       B_AX_PORT_FUNC_EN);
+ 	else
+-		rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg,
++		rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg,
+ 				       B_AX_PORT_FUNC_EN);
+ }
+ 
+ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
+-					 struct rtw89_vif *rtwvif)
++					 struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+ 
+-	rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK,
++	rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_early, B_AX_BCNERLY_MASK,
+ 				BCN_ERLY_DEF);
+ }
+ 
+ static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
+-					  struct rtw89_vif *rtwvif)
++					  struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	const struct rtw89_port_reg *p = mac->port_base;
+@@ -4452,20 +4501,20 @@ static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
+ 	if (rtwdev->chip->chip_id != RTL8852C)
+ 		return;
+ 
+-	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
+-	    rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
++	if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
++	    rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
+ 		return;
+ 
+ 	val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) |
+ 			 B_AX_TBTT_SHIFT_OFST_SIGN;
+ 
+-	rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_shift,
++	rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift,
+ 				B_AX_TBTT_SHIFT_OFST_MASK, val);
+ }
+ 
+ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif,
+-			     struct rtw89_vif *rtwvif_src,
++			     struct rtw89_vif_link *rtwvif_link,
++			     struct rtw89_vif_link *rtwvif_src,
+ 			     u16 offset_tu)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+@@ -4473,8 +4522,8 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
+ 	u32 val, reg;
+ 
+ 	val = RTW89_PORT_OFFSET_TU_TO_32US(offset_tu);
+-	reg = rtw89_mac_reg_by_idx(rtwdev, p->tsf_sync + rtwvif->port * 4,
+-				   rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, p->tsf_sync + rtwvif_link->port * 4,
++				   rtwvif_link->mac_idx);
+ 
+ 	rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_SRC, rtwvif_src->port);
+ 	rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_OFFSET_VAL, val);
+@@ -4482,16 +4531,16 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
+ }
+ 
+ static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev,
+-					 struct rtw89_vif *rtwvif,
+-					 struct rtw89_vif *rtwvif_src,
++					 struct rtw89_vif_link *rtwvif_link,
++					 struct rtw89_vif_link *rtwvif_src,
+ 					 u8 offset, int *n_offset)
+ {
+-	if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif == rtwvif_src)
++	if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif_link == rtwvif_src)
+ 		return;
+ 
+ 	/* adjust offset randomly to avoid beacon conflict */
+ 	offset = offset - offset / 4 + get_random_u32() % (offset / 2);
+-	rtw89_mac_port_tsf_sync(rtwdev, rtwvif, rtwvif_src,
++	rtw89_mac_port_tsf_sync(rtwdev, rtwvif_link, rtwvif_src,
+ 				(*n_offset) * offset);
+ 
+ 	(*n_offset)++;
+@@ -4499,15 +4548,19 @@ static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev,
+ 
+ static void rtw89_mac_port_tsf_resync_all(struct rtw89_dev *rtwdev)
+ {
+-	struct rtw89_vif *src = NULL, *tmp;
++	struct rtw89_vif_link *src = NULL, *tmp;
+ 	u8 offset = 100, vif_aps = 0;
++	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 	int n_offset = 1;
+ 
+-	rtw89_for_each_rtwvif(rtwdev, tmp) {
+-		if (!src || tmp->net_type == RTW89_NET_TYPE_INFRA)
+-			src = tmp;
+-		if (tmp->net_type == RTW89_NET_TYPE_AP_MODE)
+-			vif_aps++;
++	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
++		rtw89_vif_for_each_link(rtwvif, tmp, link_id) {
++			if (!src || tmp->net_type == RTW89_NET_TYPE_INFRA)
++				src = tmp;
++			if (tmp->net_type == RTW89_NET_TYPE_AP_MODE)
++				vif_aps++;
++		}
+ 	}
+ 
+ 	if (vif_aps == 0)
+@@ -4515,104 +4568,106 @@ static void rtw89_mac_port_tsf_resync_all(struct rtw89_dev *rtwdev)
+ 
+ 	offset /= (vif_aps + 1);
+ 
+-	rtw89_for_each_rtwvif(rtwdev, tmp)
+-		rtw89_mac_port_tsf_sync_rand(rtwdev, tmp, src, offset, &n_offset);
++	rtw89_for_each_rtwvif(rtwdev, rtwvif)
++		rtw89_vif_for_each_link(rtwvif, tmp, link_id)
++			rtw89_mac_port_tsf_sync_rand(rtwdev, tmp, src, offset,
++						     &n_offset);
+ }
+ 
+-int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+ 	int ret;
+ 
+-	ret = rtw89_mac_port_update(rtwdev, rtwvif);
++	ret = rtw89_mac_port_update(rtwdev, rtwvif_link);
+ 	if (ret)
+ 		return ret;
+ 
+-	rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id);
+-	rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id);
++	rtw89_mac_dmac_tbl_init(rtwdev, rtwvif_link->mac_id);
++	rtw89_mac_cmac_tbl_init(rtwdev, rtwvif_link->mac_id);
+ 
+-	ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif->mac_id, false);
++	ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif_link->mac_id, false);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_CREATE);
++	ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, NULL, RTW89_ROLE_CREATE);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
++	ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = rtw89_cam_init(rtwdev, rtwvif);
++	ret = rtw89_cam_init(rtwdev, rtwvif_link);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
++	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, NULL);
++	ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif_link, NULL);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, NULL);
++	ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif_link, NULL);
+ 	if (ret)
+ 		return ret;
+ 
+ 	return 0;
+ }
+ 
+-int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+ 	int ret;
+ 
+-	ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_REMOVE);
++	ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, NULL, RTW89_ROLE_REMOVE);
+ 	if (ret)
+ 		return ret;
+ 
+-	rtw89_cam_deinit(rtwdev, rtwvif);
++	rtw89_cam_deinit(rtwdev, rtwvif_link);
+ 
+-	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
++	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
+ 	if (ret)
+ 		return ret;
+ 
+ 	return 0;
+ }
+ 
+-int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+-	u8 port = rtwvif->port;
++	u8 port = rtwvif_link->port;
+ 
+ 	if (port >= RTW89_PORT_NUM)
+ 		return -EINVAL;
+ 
+-	rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif, false);
+-	rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif, false);
+-	rtw89_mac_port_cfg_net_type(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_rx_sync_by_nettype(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
+-	rtw89_mac_port_cfg_func_en(rtwdev, rtwvif, true);
++	rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif_link, false);
++	rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif_link, false);
++	rtw89_mac_port_cfg_net_type(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_rx_sync_by_nettype(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif_link);
++	rtw89_mac_port_cfg_func_en(rtwdev, rtwvif_link, true);
+ 	rtw89_mac_port_tsf_resync_all(rtwdev);
+ 	fsleep(BCN_ERLY_SET_DLY);
+-	rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif);
++	rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif_link);
+ 
+ 	return 0;
+ }
+ 
+-int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			   u64 *tsf)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+@@ -4620,12 +4675,12 @@ int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	u32 tsf_low, tsf_high;
+ 	int ret;
+ 
+-	ret = rtw89_mac_check_mac_en(rtwdev, rtwvif->mac_idx, RTW89_CMAC_SEL);
++	ret = rtw89_mac_check_mac_en(rtwdev, rtwvif_link->mac_idx, RTW89_CMAC_SEL);
+ 	if (ret)
+ 		return ret;
+ 
+-	tsf_low = rtw89_read32_port(rtwdev, rtwvif, p->tsftr_l);
+-	tsf_high = rtw89_read32_port(rtwdev, rtwvif, p->tsftr_h);
++	tsf_low = rtw89_read32_port(rtwdev, rtwvif_link, p->tsftr_l);
++	tsf_high = rtw89_read32_port(rtwdev, rtwvif_link, p->tsftr_h);
+ 	*tsf = (u64)tsf_high << 32 | tsf_low;
+ 
+ 	return 0;
+@@ -4651,65 +4706,57 @@ static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ }
+ 
+ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+-					struct ieee80211_vif *vif)
++					struct rtw89_vif_link *rtwvif_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct ieee80211_hw *hw = rtwdev->hw;
++	struct ieee80211_bss_conf *bss_conf;
++	struct cfg80211_chan_def oper;
+ 	bool tolerated = true;
+ 	u32 reg;
+ 
+-	if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	if (!bss_conf->he_support || vif->type != NL80211_IFTYPE_STATION) {
++		rcu_read_unlock();
+ 		return;
++	}
+ 
+-	if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
++	oper = bss_conf->chanreq.oper;
++	if (!(oper.chan->flags & IEEE80211_CHAN_RADAR)) {
++		rcu_read_unlock();
+ 		return;
++	}
++
++	rcu_read_unlock();
+ 
+-	cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
++	cfg80211_bss_iter(hw->wiphy, &oper,
+ 			  rtw89_mac_check_he_obss_narrow_bw_ru_iter,
+ 			  &tolerated);
+ 
+ 	reg = rtw89_mac_reg_by_idx(rtwdev, mac->narrow_bw_ru_dis.addr,
+-				   rtwvif->mac_idx);
++				   rtwvif_link->mac_idx);
+ 	if (tolerated)
+ 		rtw89_write32_clr(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
+ 	else
+ 		rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
+ }
+ 
+-void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+-	rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif);
++	rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link);
+ }
+ 
+-int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+-	int ret;
+-
+-	rtwvif->mac_id = rtw89_acquire_mac_id(rtwdev);
+-	if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM)
+-		return -ENOSPC;
+-
+-	ret = rtw89_mac_vif_init(rtwdev, rtwvif);
+-	if (ret)
+-		goto release_mac_id;
+-
+-	return 0;
+-
+-release_mac_id:
+-	rtw89_release_mac_id(rtwdev, rtwvif->mac_id);
+-
+-	return ret;
++	return rtw89_mac_vif_init(rtwdev, rtwvif_link);
+ }
+ 
+-int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+-	int ret;
+-
+-	ret = rtw89_mac_vif_deinit(rtwdev, rtwvif);
+-	rtw89_release_mac_id(rtwdev, rtwvif->mac_id);
+-
+-	return ret;
++	return rtw89_mac_vif_deinit(rtwdev, rtwvif_link);
+ }
+ 
+ static void
+@@ -4730,8 +4777,8 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ {
+ 	const struct rtw89_c2h_scanofld *c2h =
+ 		(const struct rtw89_c2h_scanofld *)skb->data;
+-	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+-	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
++	struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
++	struct rtw89_vif *rtwvif;
+ 	struct rtw89_chan new;
+ 	u8 reason, status, tx_fail, band, actual_period, expect_period;
+ 	u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
+@@ -4739,9 +4786,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ 	u16 chan;
+ 	int ret;
+ 
+-	if (!rtwvif)
++	if (!rtwvif_link)
+ 		return;
+ 
++	rtwvif = rtwvif_link->rtwvif;
++
+ 	tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
+ 	status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ 	chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
+@@ -4781,28 +4830,28 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ 		if (rtwdev->scan_info.abort)
+ 			return;
+ 
+-		if (rtwvif && rtwvif->scan_req &&
++		if (rtwvif_link && rtwvif->scan_req &&
+ 		    last_chan < rtwvif->scan_req->n_channels) {
+-			ret = rtw89_hw_scan_offload(rtwdev, vif, true);
++			ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true);
+ 			if (ret) {
+-				rtw89_hw_scan_abort(rtwdev, vif);
++				rtw89_hw_scan_abort(rtwdev, rtwvif_link);
+ 				rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
+ 			}
+ 		} else {
+-			rtw89_hw_scan_complete(rtwdev, vif, false);
++			rtw89_hw_scan_complete(rtwdev, rtwvif_link, false);
+ 		}
+ 		break;
+ 	case RTW89_SCAN_ENTER_OP_NOTIFY:
+ 	case RTW89_SCAN_ENTER_CH_NOTIFY:
+ 		if (rtw89_is_op_chan(rtwdev, band, chan)) {
+-			rtw89_assign_entity_chan(rtwdev, rtwvif->chanctx_idx,
++			rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx,
+ 						 &rtwdev->scan_info.op_chan);
+ 			rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
+ 			ieee80211_wake_queues(rtwdev->hw);
+ 		} else {
+ 			rtw89_chan_create(&new, chan, chan, band,
+ 					  RTW89_CHANNEL_WIDTH_20);
+-			rtw89_assign_entity_chan(rtwdev, rtwvif->chanctx_idx,
++			rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx,
+ 						 &new);
+ 		}
+ 		break;
+@@ -4812,10 +4861,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ }
+ 
+ static void
+-rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		       struct sk_buff *skb)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif_safe(rtwvif);
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ 	enum nl80211_cqm_rssi_threshold_event nl_event;
+ 	const struct rtw89_c2h_mac_bcnfltr_rpt *c2h =
+ 		(const struct rtw89_c2h_mac_bcnfltr_rpt *)skb->data;
+@@ -4827,7 +4877,7 @@ rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	event = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_EVENT);
+ 	mac_id = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_MACID);
+ 
+-	if (mac_id != rtwvif->mac_id)
++	if (mac_id != rtwvif_link->mac_id)
+ 		return;
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_FW,
+@@ -4839,7 +4889,7 @@ rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 		if (!rtwdev->scanning && !rtwvif->offchan)
+ 			ieee80211_connection_loss(vif);
+ 		else
+-			rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
++			rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ 		return;
+ 	case RTW89_BCN_FLTR_NOTIFY:
+ 		nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+@@ -4863,10 +4913,13 @@ static void
+ rtw89_mac_c2h_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ 			   u32 len)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_mac_bcn_fltr_rpt(rtwdev, rtwvif, c2h);
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_mac_bcn_fltr_rpt(rtwdev, rtwvif_link, c2h);
+ }
+ 
+ static void
+@@ -5931,15 +5984,15 @@ static int rtw89_mac_init_bfee_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
+ }
+ 
+ static int rtw89_mac_set_csi_para_reg_ax(struct rtw89_dev *rtwdev,
+-					 struct ieee80211_vif *vif,
+-					 struct ieee80211_sta *sta)
++					 struct rtw89_vif_link *rtwvif_link,
++					 struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	u8 mac_idx = rtwvif->mac_idx;
+ 	u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1;
+-	u8 port_sel = rtwvif->port;
++	struct ieee80211_link_sta *link_sta;
++	u8 mac_idx = rtwvif_link->mac_idx;
++	u8 port_sel = rtwvif_link->port;
+ 	u8 sound_dim = 3, t;
+-	u8 *phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
++	u8 *phy_cap;
+ 	u32 reg;
+ 	u16 val;
+ 	int ret;
+@@ -5948,6 +6001,11 @@ static int rtw89_mac_set_csi_para_reg_ax(struct rtw89_dev *rtwdev,
+ 	if (ret)
+ 		return ret;
+ 
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++	phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info;
++
+ 	if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
+ 	    (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) {
+ 		ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD);
+@@ -5956,17 +6014,19 @@ static int rtw89_mac_set_csi_para_reg_ax(struct rtw89_dev *rtwdev,
+ 			      phy_cap[5]);
+ 		sound_dim = min(sound_dim, t);
+ 	}
+-	if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+-	    (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+-		ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+-		stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
++	if ((link_sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
++	    (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
++		ldpc_en &= !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
++		stbc_en &= !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
+ 		t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+-			      sta->deflink.vht_cap.cap);
++			      link_sta->vht_cap.cap);
+ 		sound_dim = min(sound_dim, t);
+ 	}
+ 	nc = min(nc, sound_dim);
+ 	nr = min(nr, sound_dim);
+ 
++	rcu_read_unlock();
++
+ 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
+ 	rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL);
+ 
+@@ -5989,34 +6049,41 @@ static int rtw89_mac_set_csi_para_reg_ax(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_mac_csi_rrsc_ax(struct rtw89_dev *rtwdev,
+-				 struct ieee80211_vif *vif,
+-				 struct ieee80211_sta *sta)
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M);
++	struct ieee80211_link_sta *link_sta;
++	u8 mac_idx = rtwvif_link->mac_idx;
+ 	u32 reg;
+-	u8 mac_idx = rtwvif->mac_idx;
+ 	int ret;
+ 
+ 	ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ 	if (ret)
+ 		return ret;
+ 
+-	if (sta->deflink.he_cap.has_he) {
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++
++	if (link_sta->he_cap.has_he) {
+ 		rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) |
+ 			 BIT(RTW89_MAC_BF_RRSC_HE_MSC3) |
+ 			 BIT(RTW89_MAC_BF_RRSC_HE_MSC5));
+ 	}
+-	if (sta->deflink.vht_cap.vht_supported) {
++	if (link_sta->vht_cap.vht_supported) {
+ 		rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) |
+ 			 BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) |
+ 			 BIT(RTW89_MAC_BF_RRSC_VHT_MSC5));
+ 	}
+-	if (sta->deflink.ht_cap.ht_supported) {
++	if (link_sta->ht_cap.ht_supported) {
+ 		rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) |
+ 			 BIT(RTW89_MAC_BF_RRSC_HT_MSC3) |
+ 			 BIT(RTW89_MAC_BF_RRSC_HT_MSC5));
+ 	}
++
++	rcu_read_unlock();
++
+ 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
+ 	rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL);
+ 	rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN);
+@@ -6028,35 +6095,53 @@ static int rtw89_mac_csi_rrsc_ax(struct rtw89_dev *rtwdev,
+ }
+ 
+ static void rtw89_mac_bf_assoc_ax(struct rtw89_dev *rtwdev,
+-				  struct ieee80211_vif *vif,
+-				  struct ieee80211_sta *sta)
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct ieee80211_link_sta *link_sta;
++	bool has_beamformer_cap;
++
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++	has_beamformer_cap = rtw89_sta_has_beamformer_cap(link_sta);
++
++	rcu_read_unlock();
+ 
+-	if (rtw89_sta_has_beamformer_cap(sta)) {
++	if (has_beamformer_cap) {
+ 		rtw89_debug(rtwdev, RTW89_DBG_BF,
+ 			    "initialize bfee for new association\n");
+-		rtw89_mac_init_bfee_ax(rtwdev, rtwvif->mac_idx);
+-		rtw89_mac_set_csi_para_reg_ax(rtwdev, vif, sta);
+-		rtw89_mac_csi_rrsc_ax(rtwdev, vif, sta);
++		rtw89_mac_init_bfee_ax(rtwdev, rtwvif_link->mac_idx);
++		rtw89_mac_set_csi_para_reg_ax(rtwdev, rtwvif_link, rtwsta_link);
++		rtw89_mac_csi_rrsc_ax(rtwdev, rtwvif_link, rtwsta_link);
+ 	}
+ }
+ 
+-void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+-			   struct ieee80211_sta *sta)
++void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev,
++			   struct rtw89_vif_link *rtwvif_link,
++			   struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-
+-	rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, false);
++	rtw89_mac_bfee_ctrl(rtwdev, rtwvif_link->mac_idx, false);
+ }
+ 
+ void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 				struct ieee80211_bss_conf *conf)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+-	u8 mac_idx = rtwvif->mac_idx;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
++	u8 mac_idx;
+ 	__le32 *p;
+ 
++	rtwvif_link = rtwvif->links[conf->link_id];
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev,
++			  "%s: rtwvif link (link_id %u) is not active\n",
++			  __func__, conf->link_id);
++		return;
++	}
++
++	mac_idx = rtwvif_link->mac_idx;
++
+ 	rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n");
+ 
+ 	p = (__le32 *)conf->mu_group.membership;
+@@ -6080,7 +6165,7 @@ void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *
+ 
+ struct rtw89_mac_bf_monitor_iter_data {
+ 	struct rtw89_dev *rtwdev;
+-	struct ieee80211_sta *down_sta;
++	struct rtw89_sta_link *down_rtwsta_link;
+ 	int count;
+ };
+ 
+@@ -6089,23 +6174,41 @@ void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta)
+ {
+ 	struct rtw89_mac_bf_monitor_iter_data *iter_data =
+ 				(struct rtw89_mac_bf_monitor_iter_data *)data;
+-	struct ieee80211_sta *down_sta = iter_data->down_sta;
++	struct rtw89_sta_link *down_rtwsta_link = iter_data->down_rtwsta_link;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct ieee80211_link_sta *link_sta;
++	struct rtw89_sta_link *rtwsta_link;
++	bool has_beamformer_cap = false;
+ 	int *count = &iter_data->count;
++	unsigned int link_id;
+ 
+-	if (down_sta == sta)
+-		return;
++	rcu_read_lock();
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		if (rtwsta_link == down_rtwsta_link)
++			continue;
+ 
+-	if (rtw89_sta_has_beamformer_cap(sta))
++		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
++		if (rtw89_sta_has_beamformer_cap(link_sta)) {
++			has_beamformer_cap = true;
++			break;
++		}
++	}
++
++	if (has_beamformer_cap)
+ 		(*count)++;
++
++	rcu_read_unlock();
+ }
+ 
+ void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev,
+-			       struct ieee80211_sta *sta, bool disconnect)
++			       struct rtw89_sta_link *rtwsta_link,
++			       bool disconnect)
+ {
+ 	struct rtw89_mac_bf_monitor_iter_data data;
+ 
+ 	data.rtwdev = rtwdev;
+-	data.down_sta = disconnect ? sta : NULL;
++	data.down_rtwsta_link = disconnect ? rtwsta_link : NULL;
+ 	data.count = 0;
+ 	ieee80211_iterate_stations_atomic(rtwdev->hw,
+ 					  rtw89_mac_bf_monitor_calc_iter,
+@@ -6121,10 +6224,12 @@ void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev,
+ void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
+-	struct rtw89_vif *rtwvif;
++	struct rtw89_vif_link *rtwvif_link;
+ 	bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv;
+ 	bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
++	struct rtw89_vif *rtwvif;
+ 	bool keep_timer = true;
++	unsigned int link_id;
+ 	bool old_keep_timer;
+ 
+ 	old_keep_timer = test_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags);
+@@ -6134,30 +6239,32 @@ void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
+ 
+ 	if (keep_timer != old_keep_timer) {
+ 		rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-			rtw89_mac_bfee_standby_timer(rtwdev, rtwvif->mac_idx,
+-						     keep_timer);
++			rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++				rtw89_mac_bfee_standby_timer(rtwdev, rtwvif_link->mac_idx,
++							     keep_timer);
+ 	}
+ 
+ 	if (en == old)
+ 		return;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, en);
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_mac_bfee_ctrl(rtwdev, rtwvif_link->mac_idx, en);
+ }
+ 
+ static int
+-__rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++__rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link,
+ 			u32 tx_time)
+ {
+ #define MAC_AX_DFLT_TX_TIME 5280
+-	u8 mac_idx = rtwsta->rtwvif->mac_idx;
++	u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
+ 	u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time;
+ 	u32 reg;
+ 	int ret = 0;
+ 
+-	if (rtwsta->cctl_tx_time) {
+-		rtwsta->ampdu_max_time = (max_tx_time - 512) >> 9;
+-		ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
++	if (rtwsta_link->cctl_tx_time) {
++		rtwsta_link->ampdu_max_time = (max_tx_time - 512) >> 9;
++		ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+ 	} else {
+ 		ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ 		if (ret) {
+@@ -6173,31 +6280,31 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ 	return ret;
+ }
+ 
+-int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link,
+ 			  bool resume, u32 tx_time)
+ {
+ 	int ret = 0;
+ 
+ 	if (!resume) {
+-		rtwsta->cctl_tx_time = true;
+-		ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time);
++		rtwsta_link->cctl_tx_time = true;
++		ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta_link, tx_time);
+ 	} else {
+-		ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time);
+-		rtwsta->cctl_tx_time = false;
++		ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta_link, tx_time);
++		rtwsta_link->cctl_tx_time = false;
+ 	}
+ 
+ 	return ret;
+ }
+ 
+-int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link,
+ 			  u32 *tx_time)
+ {
+-	u8 mac_idx = rtwsta->rtwvif->mac_idx;
++	u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
+ 	u32 reg;
+ 	int ret = 0;
+ 
+-	if (rtwsta->cctl_tx_time) {
+-		*tx_time = (rtwsta->ampdu_max_time + 1) << 9;
++	if (rtwsta_link->cctl_tx_time) {
++		*tx_time = (rtwsta_link->ampdu_max_time + 1) << 9;
+ 	} else {
+ 		ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ 		if (ret) {
+@@ -6213,33 +6320,33 @@ int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ }
+ 
+ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
+-				 struct rtw89_sta *rtwsta,
++				 struct rtw89_sta_link *rtwsta_link,
+ 				 bool resume, u8 tx_retry)
+ {
+ 	int ret = 0;
+ 
+-	rtwsta->data_tx_cnt_lmt = tx_retry;
++	rtwsta_link->data_tx_cnt_lmt = tx_retry;
+ 
+ 	if (!resume) {
+-		rtwsta->cctl_tx_retry_limit = true;
+-		ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
++		rtwsta_link->cctl_tx_retry_limit = true;
++		ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+ 	} else {
+-		ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
+-		rtwsta->cctl_tx_retry_limit = false;
++		ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
++		rtwsta_link->cctl_tx_retry_limit = false;
+ 	}
+ 
+ 	return ret;
+ }
+ 
+ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
+-				 struct rtw89_sta *rtwsta, u8 *tx_retry)
++				 struct rtw89_sta_link *rtwsta_link, u8 *tx_retry)
+ {
+-	u8 mac_idx = rtwsta->rtwvif->mac_idx;
++	u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
+ 	u32 reg;
+ 	int ret = 0;
+ 
+-	if (rtwsta->cctl_tx_retry_limit) {
+-		*tx_retry = rtwsta->data_tx_cnt_lmt;
++	if (rtwsta_link->cctl_tx_retry_limit) {
++		*tx_retry = rtwsta_link->data_tx_cnt_lmt;
+ 	} else {
+ 		ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ 		if (ret) {
+@@ -6255,10 +6362,10 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
+ }
+ 
+ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif, bool en)
++				 struct rtw89_vif_link *rtwvif_link, bool en)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+-	u8 mac_idx = rtwvif->mac_idx;
++	u8 mac_idx = rtwvif_link->mac_idx;
+ 	u16 set = mac->muedca_ctrl.mask;
+ 	u32 reg;
+ 	u32 ret;
+@@ -6326,7 +6433,9 @@ int rtw89_mac_read_xtal_si_ax(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
+ }
+ 
+ static
+-void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
++void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev,
++			    struct rtw89_vif_link *rtwvif_link,
++			    struct rtw89_sta_link *rtwsta_link)
+ {
+ 	static const enum rtw89_pkt_drop_sel sels[] = {
+ 		RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
+@@ -6334,15 +6443,14 @@ void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
+ 		RTW89_PKT_DROP_SEL_MACID_VI_ONCE,
+ 		RTW89_PKT_DROP_SEL_MACID_VO_ONCE,
+ 	};
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct rtw89_pkt_drop_params params = {0};
+ 	int i;
+ 
+ 	params.mac_band = RTW89_MAC_0;
+-	params.macid = rtwsta->mac_id;
+-	params.port = rtwvif->port;
++	params.macid = rtwsta_link->mac_id;
++	params.port = rtwvif_link->port;
+ 	params.mbssid = 0;
+-	params.tf_trs = rtwvif->trigger;
++	params.tf_trs = rtwvif_link->trigger;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(sels); i++) {
+ 		params.sel = sels[i];
+@@ -6352,15 +6460,21 @@ void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
+ 
+ static void rtw89_mac_pkt_drop_vif_iter(void *data, struct ieee80211_sta *sta)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+-	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
+ 	struct rtw89_vif *target = data;
++	unsigned int link_id;
+ 
+ 	if (rtwvif != target)
+ 		return;
+ 
+-	rtw89_mac_pkt_drop_sta(rtwdev, rtwsta);
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		rtw89_mac_pkt_drop_sta(rtwdev, rtwvif_link, rtwsta_link);
++	}
+ }
+ 
+ void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
+index 67c2a45071244d..0c269961a57311 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.h
++++ b/drivers/net/wireless/realtek/rtw89/mac.h
+@@ -951,8 +951,9 @@ struct rtw89_mac_gen_def {
+ 	void (*dmac_func_pre_en)(struct rtw89_dev *rtwdev);
+ 	void (*dle_func_en)(struct rtw89_dev *rtwdev, bool enable);
+ 	void (*dle_clk_en)(struct rtw89_dev *rtwdev, bool enable);
+-	void (*bf_assoc)(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+-			 struct ieee80211_sta *sta);
++	void (*bf_assoc)(struct rtw89_dev *rtwdev,
++			 struct rtw89_vif_link *rtwvif_link,
++			 struct rtw89_sta_link *rtwsta_link);
+ 
+ 	int (*typ_fltr_opt)(struct rtw89_dev *rtwdev,
+ 			    enum rtw89_machdr_frame_type type,
+@@ -1004,12 +1005,12 @@ struct rtw89_mac_gen_def {
+ 	bool (*is_txq_empty)(struct rtw89_dev *rtwdev);
+ 
+ 	int (*add_chan_list)(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif, bool connected);
++			     struct rtw89_vif_link *rtwvif_link, bool connected);
+ 	int (*add_chan_list_pno)(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif);
++				 struct rtw89_vif_link *rtwvif_link);
+ 	int (*scan_offload)(struct rtw89_dev *rtwdev,
+ 			    struct rtw89_scan_option *option,
+-			    struct rtw89_vif *rtwvif,
++			    struct rtw89_vif_link *rtwvif_link,
+ 			    bool wowlan);
+ 
+ 	int (*wow_config_mac)(struct rtw89_dev *rtwdev, bool enable_wow);
+@@ -1033,81 +1034,89 @@ u32 rtw89_mac_reg_by_port(struct rtw89_dev *rtwdev, u32 base, u8 port, u8 mac_id
+ }
+ 
+ static inline u32
+-rtw89_read32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base)
++rtw89_read32_port(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, u32 base)
+ {
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif_link->port,
++				    rtwvif_link->mac_idx);
+ 	return rtw89_read32(rtwdev, reg);
+ }
+ 
+ static inline u32
+-rtw89_read32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++rtw89_read32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		       u32 base, u32 mask)
+ {
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif_link->port,
++				    rtwvif_link->mac_idx);
+ 	return rtw89_read32_mask(rtwdev, reg, mask);
+ }
+ 
+ static inline void
+-rtw89_write32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base,
++rtw89_write32_port(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, u32 base,
+ 		   u32 data)
+ {
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif_link->port,
++				    rtwvif_link->mac_idx);
+ 	rtw89_write32(rtwdev, reg, data);
+ }
+ 
+ static inline void
+-rtw89_write32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++rtw89_write32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			u32 base, u32 mask, u32 data)
+ {
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif_link->port,
++				    rtwvif_link->mac_idx);
+ 	rtw89_write32_mask(rtwdev, reg, mask, data);
+ }
+ 
+ static inline void
+-rtw89_write16_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++rtw89_write16_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			u32 base, u32 mask, u16 data)
+ {
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif_link->port,
++				    rtwvif_link->mac_idx);
+ 	rtw89_write16_mask(rtwdev, reg, mask, data);
+ }
+ 
+ static inline void
+-rtw89_write32_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++rtw89_write32_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		       u32 base, u32 bit)
+ {
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif_link->port,
++				    rtwvif_link->mac_idx);
+ 	rtw89_write32_clr(rtwdev, reg, bit);
+ }
+ 
+ static inline void
+-rtw89_write16_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++rtw89_write16_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		       u32 base, u16 bit)
+ {
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif_link->port,
++				    rtwvif_link->mac_idx);
+ 	rtw89_write16_clr(rtwdev, reg, bit);
+ }
+ 
+ static inline void
+-rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		       u32 base, u32 bit)
+ {
+ 	u32 reg;
+ 
+-	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif_link->port,
++				    rtwvif_link->mac_idx);
+ 	rtw89_write32_set(rtwdev, reg, bit);
+ }
+ 
+@@ -1139,21 +1148,21 @@ int rtw89_mac_dle_dfi_qempty_cfg(struct rtw89_dev *rtwdev,
+ 				 struct rtw89_mac_dle_dfi_qempty *qempty);
+ void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev,
+ 			     enum mac_ax_err_info err);
+-int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+-int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
++int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
++int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
+-			     struct rtw89_vif *rtwvif,
+-			     struct rtw89_vif *rtwvif_src,
++			     struct rtw89_vif_link *rtwvif_link,
++			     struct rtw89_vif_link *rtwvif_src,
+ 			     u16 offset_tu);
+-int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			   u64 *tsf);
+ void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+-				struct rtw89_vif *rtwvif, bool en);
++				struct rtw89_vif_link *rtwvif_link, bool en);
+ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+-					struct ieee80211_vif *vif);
+-void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
++					struct rtw89_vif_link *rtwvif_link);
++void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+ void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en);
+-int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
++int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
+ int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
+ int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
+ 
+@@ -1251,27 +1260,30 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
+ void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev);
+ 
+ static inline
+-void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+-			struct ieee80211_sta *sta)
++void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev,
++			struct rtw89_vif_link *rtwvif_link,
++			struct rtw89_sta_link *rtwsta_link)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 
+ 	if (mac->bf_assoc)
+-		mac->bf_assoc(rtwdev, vif, sta);
++		mac->bf_assoc(rtwdev, rtwvif_link, rtwsta_link);
+ }
+ 
+-void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+-			   struct ieee80211_sta *sta);
++void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev,
++			   struct rtw89_vif_link *rtwvif_link,
++			   struct rtw89_sta_link *rtwsta_link);
+ void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 				struct ieee80211_bss_conf *conf);
+ void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev,
+-			       struct ieee80211_sta *sta, bool disconnect);
++			       struct rtw89_sta_link *rtwsta_link,
++			       bool disconnect);
+ void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev);
+ void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en);
+-int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+-int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
++int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
++int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
+-				 struct rtw89_vif *rtwvif, bool en);
++				 struct rtw89_vif_link *rtwvif_link, bool en);
+ int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause);
+ 
+ static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
+@@ -1376,15 +1388,15 @@ static inline bool rtw89_mac_get_power_state(struct rtw89_dev *rtwdev)
+ 	return !!val;
+ }
+ 
+-int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link,
+ 			  bool resume, u32 tx_time);
+-int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link,
+ 			  u32 *tx_time);
+ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
+-				 struct rtw89_sta *rtwsta,
++				 struct rtw89_sta_link *rtwsta_link,
+ 				 bool resume, u8 tx_retry);
+ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
+-				 struct rtw89_sta *rtwsta, u8 *tx_retry);
++				 struct rtw89_sta_link *rtwsta_link, u8 *tx_retry);
+ 
+ enum rtw89_mac_xtal_si_offset {
+ 	XTAL0 = 0x0,
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index 48ad0d0f76bff4..13fb3cac27016b 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -23,13 +23,13 @@ static void rtw89_ops_tx(struct ieee80211_hw *hw,
+ 	struct rtw89_dev *rtwdev = hw->priv;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_vif *vif = info->control.vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ 	struct ieee80211_sta *sta = control->sta;
+ 	u32 flags = IEEE80211_SKB_CB(skb)->flags;
+ 	int ret, qsel;
+ 
+ 	if (rtwvif->offchan && !(flags & IEEE80211_TX_CTL_TX_OFFCHAN) && sta) {
+-		struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++		struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ 
+ 		rtw89_debug(rtwdev, RTW89_DBG_TXRX, "ops_tx during offchan\n");
+ 		skb_queue_tail(&rtwsta->roc_queue, skb);
+@@ -105,11 +105,61 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
+ 	return 0;
+ }
+ 
++static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
++				      struct rtw89_vif_link *rtwvif_link)
++{
++	struct ieee80211_bss_conf *bss_conf;
++	int ret;
++
++	rtw89_leave_ps_mode(rtwdev);
++
++	rtw89_vif_type_mapping(rtwvif_link, false);
++
++	INIT_WORK(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
++	INIT_LIST_HEAD(&rtwvif_link->general_pkt_list);
++
++	rtwvif_link->hit_rule = 0;
++	rtwvif_link->bcn_hit_cond = 0;
++	rtwvif_link->chanctx_assigned = false;
++	rtwvif_link->chanctx_idx = RTW89_CHANCTX_0;
++	rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	ether_addr_copy(rtwvif_link->mac_addr, bss_conf->addr);
++
++	rcu_read_unlock();
++
++	ret = rtw89_mac_add_vif(rtwdev, rtwvif_link);
++	if (ret)
++		return ret;
++
++	rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, NULL, BTC_ROLE_START);
++	return 0;
++}
++
++static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev,
++					  struct rtw89_vif_link *rtwvif_link)
++{
++	mutex_unlock(&rtwdev->mutex);
++	cancel_work_sync(&rtwvif_link->update_beacon_work);
++	mutex_lock(&rtwdev->mutex);
++
++	rtw89_leave_ps_mode(rtwdev);
++
++	rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, NULL, BTC_ROLE_STOP);
++
++	rtw89_mac_remove_vif(rtwdev, rtwvif_link);
++}
++
+ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
+ 				   struct ieee80211_vif *vif)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
++	u8 mac_id, port;
+ 	int ret = 0;
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_STATE, "add vif %pM type %d, p2p %d\n",
+@@ -123,49 +173,56 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
+ 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ 				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ 
+-	rtwvif->rtwdev = rtwdev;
+-	rtwvif->roc.state = RTW89_ROC_IDLE;
+-	rtwvif->offchan = false;
++	mac_id = rtw89_acquire_mac_id(rtwdev);
++	if (mac_id == RTW89_MAX_MAC_ID_NUM) {
++		ret = -ENOSPC;
++		goto err;
++	}
++
++	port = rtw89_core_acquire_bit_map(rtwdev->hw_port, RTW89_PORT_NUM);
++	if (port == RTW89_PORT_NUM) {
++		ret = -ENOSPC;
++		goto release_macid;
++	}
++
++	rtw89_init_vif(rtwdev, rtwvif, mac_id, port);
++
++	rtw89_core_txq_init(rtwdev, vif->txq);
++
+ 	if (!rtw89_rtwvif_in_list(rtwdev, rtwvif))
+ 		list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+ 
+-	INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work);
++	ether_addr_copy(rtwvif->mac_addr, vif->addr);
++
++	rtwvif->offchan = false;
++	rtwvif->roc.state = RTW89_ROC_IDLE;
+ 	INIT_DELAYED_WORK(&rtwvif->roc.roc_work, rtw89_roc_work);
+-	rtw89_leave_ps_mode(rtwdev);
+ 
+ 	rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
+-	rtw89_vif_type_mapping(vif, false);
+-	rtwvif->port = rtw89_core_acquire_bit_map(rtwdev->hw_port,
+-						  RTW89_PORT_NUM);
+-	if (rtwvif->port == RTW89_PORT_NUM) {
+-		ret = -ENOSPC;
+-		list_del_init(&rtwvif->list);
+-		goto out;
+-	}
+-
+-	rtwvif->bcn_hit_cond = 0;
+-	rtwvif->mac_idx = RTW89_MAC_0;
+-	rtwvif->phy_idx = RTW89_PHY_0;
+-	rtwvif->chanctx_idx = RTW89_CHANCTX_0;
+-	rtwvif->chanctx_assigned = false;
+-	rtwvif->hit_rule = 0;
+-	rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+-	ether_addr_copy(rtwvif->mac_addr, vif->addr);
+-	INIT_LIST_HEAD(&rtwvif->general_pkt_list);
+ 
+-	ret = rtw89_mac_add_vif(rtwdev, rtwvif);
+-	if (ret) {
+-		rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
+-		list_del_init(&rtwvif->list);
+-		goto out;
++	rtwvif_link = rtw89_vif_set_link(rtwvif, 0);
++	if (!rtwvif_link) {
++		ret = -EINVAL;
++		goto release_port;
+ 	}
+ 
+-	rtw89_core_txq_init(rtwdev, vif->txq);
+-
+-	rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START);
++	ret = __rtw89_ops_add_iface_link(rtwdev, rtwvif_link);
++	if (ret)
++		goto unset_link;
+ 
+ 	rtw89_recalc_lps(rtwdev);
+-out:
++
++	mutex_unlock(&rtwdev->mutex);
++	return 0;
++
++unset_link:
++	rtw89_vif_unset_link(rtwvif, 0);
++release_port:
++	list_del_init(&rtwvif->list);
++	rtw89_core_release_bit_map(rtwdev->hw_port, port);
++release_macid:
++	rtw89_release_mac_id(rtwdev, mac_id);
++err:
+ 	mutex_unlock(&rtwdev->mutex);
+ 
+ 	return ret;
+@@ -175,20 +232,35 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
+ 				       struct ieee80211_vif *vif)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	u8 macid = rtw89_vif_get_main_macid(rtwvif);
++	u8 port = rtw89_vif_get_main_port(rtwvif);
++	struct rtw89_vif_link *rtwvif_link;
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_STATE, "remove vif %pM type %d p2p %d\n",
+ 		    vif->addr, vif->type, vif->p2p);
+ 
+-	cancel_work_sync(&rtwvif->update_beacon_work);
+ 	cancel_delayed_work_sync(&rtwvif->roc.roc_work);
+ 
+ 	mutex_lock(&rtwdev->mutex);
+-	rtw89_leave_ps_mode(rtwdev);
+-	rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_STOP);
+-	rtw89_mac_remove_vif(rtwdev, rtwvif);
+-	rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
++
++	rtwvif_link = rtwvif->links[0];
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev,
++			  "%s: rtwvif link (link_id %u) is not active\n",
++			  __func__, 0);
++		goto bottom;
++	}
++
++	__rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
++
++	rtw89_vif_unset_link(rtwvif, 0);
++
++bottom:
+ 	list_del_init(&rtwvif->list);
++	rtw89_core_release_bit_map(rtwdev->hw_port, port);
++	rtw89_release_mac_id(rtwdev, macid);
++
+ 	rtw89_recalc_lps(rtwdev);
+ 	rtw89_enter_ips_by_hwflags(rtwdev);
+ 
+@@ -311,24 +383,30 @@ static const u8 ac_to_fw_idx[IEEE80211_NUM_ACS] = {
+ };
+ 
+ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
+-			      struct rtw89_vif *rtwvif, u8 aifsn)
++			      struct rtw89_vif_link *rtwvif_link, u8 aifsn)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
++						       rtwvif_link->chanctx_idx);
++	struct ieee80211_bss_conf *bss_conf;
+ 	u8 slot_time;
+ 	u8 sifs;
+ 
+-	slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	slot_time = bss_conf->use_short_slot ? 9 : 20;
++
++	rcu_read_unlock();
++
+ 	sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16;
+ 
+ 	return aifsn * slot_time + sifs;
+ }
+ 
+ static void ____rtw89_conf_tx_edca(struct rtw89_dev *rtwdev,
+-				   struct rtw89_vif *rtwvif, u16 ac)
++				   struct rtw89_vif_link *rtwvif_link, u16 ac)
+ {
+-	struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac];
++	struct ieee80211_tx_queue_params *params = &rtwvif_link->tx_params[ac];
+ 	u32 val;
+ 	u8 ecw_max, ecw_min;
+ 	u8 aifs;
+@@ -336,12 +414,12 @@ static void ____rtw89_conf_tx_edca(struct rtw89_dev *rtwdev,
+ 	/* 2^ecw - 1 = cw; ecw = log2(cw + 1) */
+ 	ecw_max = ilog2(params->cw_max + 1);
+ 	ecw_min = ilog2(params->cw_min + 1);
+-	aifs = rtw89_aifsn_to_aifs(rtwdev, rtwvif, params->aifs);
++	aifs = rtw89_aifsn_to_aifs(rtwdev, rtwvif_link, params->aifs);
+ 	val = FIELD_PREP(FW_EDCA_PARAM_TXOPLMT_MSK, params->txop) |
+ 	      FIELD_PREP(FW_EDCA_PARAM_CWMAX_MSK, ecw_max) |
+ 	      FIELD_PREP(FW_EDCA_PARAM_CWMIN_MSK, ecw_min) |
+ 	      FIELD_PREP(FW_EDCA_PARAM_AIFS_MSK, aifs);
+-	rtw89_fw_h2c_set_edca(rtwdev, rtwvif, ac_to_fw_idx[ac], val);
++	rtw89_fw_h2c_set_edca(rtwdev, rtwvif_link, ac_to_fw_idx[ac], val);
+ }
+ 
+ #define R_MUEDCA_ACS_PARAM(acs) {R_AX_MUEDCA_ ## acs ## _PARAM_0, \
+@@ -355,9 +433,9 @@ static const u32 ac_to_mu_edca_param[IEEE80211_NUM_ACS][RTW89_CHIP_GEN_NUM] = {
+ };
+ 
+ static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev,
+-				      struct rtw89_vif *rtwvif, u16 ac)
++				      struct rtw89_vif_link *rtwvif_link, u16 ac)
+ {
+-	struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac];
++	struct ieee80211_tx_queue_params *params = &rtwvif_link->tx_params[ac];
+ 	struct ieee80211_he_mu_edca_param_ac_rec *mu_edca;
+ 	int gen = rtwdev->chip->chip_gen;
+ 	u8 aifs, aifsn;
+@@ -370,32 +448,199 @@ static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev,
+ 
+ 	mu_edca = &params->mu_edca_param_rec;
+ 	aifsn = FIELD_GET(GENMASK(3, 0), mu_edca->aifsn);
+-	aifs = aifsn ? rtw89_aifsn_to_aifs(rtwdev, rtwvif, aifsn) : 0;
++	aifs = aifsn ? rtw89_aifsn_to_aifs(rtwdev, rtwvif_link, aifsn) : 0;
+ 	timer_32us = mu_edca->mu_edca_timer << 8;
+ 
+ 	val = FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_TIMER_MASK, timer_32us) |
+ 	      FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_CW_MASK, mu_edca->ecw_min_max) |
+ 	      FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_AIFS_MASK, aifs);
+-	reg = rtw89_mac_reg_by_idx(rtwdev, ac_to_mu_edca_param[ac][gen], rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, ac_to_mu_edca_param[ac][gen],
++				   rtwvif_link->mac_idx);
+ 	rtw89_write32(rtwdev, reg, val);
+ 
+-	rtw89_mac_set_hw_muedca_ctrl(rtwdev, rtwvif, true);
++	rtw89_mac_set_hw_muedca_ctrl(rtwdev, rtwvif_link, true);
+ }
+ 
+ static void __rtw89_conf_tx(struct rtw89_dev *rtwdev,
+-			    struct rtw89_vif *rtwvif, u16 ac)
++			    struct rtw89_vif_link *rtwvif_link, u16 ac)
+ {
+-	____rtw89_conf_tx_edca(rtwdev, rtwvif, ac);
+-	____rtw89_conf_tx_mu_edca(rtwdev, rtwvif, ac);
++	____rtw89_conf_tx_edca(rtwdev, rtwvif_link, ac);
++	____rtw89_conf_tx_mu_edca(rtwdev, rtwvif_link, ac);
+ }
+ 
+ static void rtw89_conf_tx(struct rtw89_dev *rtwdev,
+-			  struct rtw89_vif *rtwvif)
++			  struct rtw89_vif_link *rtwvif_link)
+ {
+ 	u16 ac;
+ 
+ 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+-		__rtw89_conf_tx(rtwdev, rtwvif, ac);
++		__rtw89_conf_tx(rtwdev, rtwvif_link, ac);
++}
++
++static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
++			       struct ieee80211_vif *vif,
++			       struct ieee80211_sta *sta)
++{
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	bool acquire_macid = false;
++	u8 macid;
++	int ret;
++	int i;
++
++	if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
++		/* for station mode, assign the mac_id from itself */
++		macid = rtw89_vif_get_main_macid(rtwvif);
++	} else {
++		macid = rtw89_acquire_mac_id(rtwdev);
++		if (macid == RTW89_MAX_MAC_ID_NUM)
++			return -ENOSPC;
++
++		acquire_macid = true;
++	}
++
++	rtw89_init_sta(rtwdev, rtwvif, rtwsta, macid);
++
++	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
++		rtw89_core_txq_init(rtwdev, sta->txq[i]);
++
++	skb_queue_head_init(&rtwsta->roc_queue);
++
++	rtwsta_link = rtw89_sta_set_link(rtwsta, sta->deflink.link_id);
++	if (!rtwsta_link) {
++		ret = -EINVAL;
++		goto err;
++	}
++
++	rtwvif_link = rtwsta_link->rtwvif_link;
++
++	ret = rtw89_core_sta_link_add(rtwdev, rtwvif_link, rtwsta_link);
++	if (ret)
++		goto unset_link;
++
++	if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
++		rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
++
++	return 0;
++
++unset_link:
++	rtw89_sta_unset_link(rtwsta, sta->deflink.link_id);
++err:
++	if (acquire_macid)
++		rtw89_release_mac_id(rtwdev, macid);
++
++	return ret;
++}
++
++static int __rtw89_ops_sta_assoc(struct rtw89_dev *rtwdev,
++				 struct ieee80211_vif *vif,
++				 struct ieee80211_sta *sta,
++				 bool station_mode)
++{
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++	int ret;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++
++		if (station_mode)
++			rtw89_vif_type_mapping(rtwvif_link, true);
++
++		ret = rtw89_core_sta_link_assoc(rtwdev, rtwvif_link, rtwsta_link);
++		if (ret)
++			return ret;
++	}
++
++	rtwdev->total_sta_assoc++;
++	if (sta->tdls)
++		rtwvif->tdls_peer++;
++
++	return 0;
++}
++
++static int __rtw89_ops_sta_disassoc(struct rtw89_dev *rtwdev,
++				    struct ieee80211_vif *vif,
++				    struct ieee80211_sta *sta)
++{
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++	int ret;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		ret = rtw89_core_sta_link_disassoc(rtwdev, rtwvif_link, rtwsta_link);
++		if (ret)
++			return ret;
++	}
++
++	rtwsta->disassoc = true;
++
++	rtwdev->total_sta_assoc--;
++	if (sta->tdls)
++		rtwvif->tdls_peer--;
++
++	return 0;
++}
++
++static int __rtw89_ops_sta_disconnect(struct rtw89_dev *rtwdev,
++				      struct ieee80211_vif *vif,
++				      struct ieee80211_sta *sta)
++{
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++	int ret;
++
++	rtw89_core_free_sta_pending_ba(rtwdev, sta);
++	rtw89_core_free_sta_pending_forbid_ba(rtwdev, sta);
++	rtw89_core_free_sta_pending_roc_tx(rtwdev, sta);
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		ret = rtw89_core_sta_link_disconnect(rtwdev, rtwvif_link, rtwsta_link);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static int __rtw89_ops_sta_remove(struct rtw89_dev *rtwdev,
++				  struct ieee80211_vif *vif,
++				  struct ieee80211_sta *sta)
++{
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	u8 macid = rtw89_sta_get_main_macid(rtwsta);
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++	int ret;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		ret = rtw89_core_sta_link_remove(rtwdev, rtwvif_link, rtwsta_link);
++		if (ret)
++			return ret;
++
++		rtw89_sta_unset_link(rtwsta, link_id);
++	}
++
++	if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
++		rtw89_release_mac_id(rtwdev, macid);
++		rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
++	}
++
++	return 0;
+ }
+ 
+ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
+@@ -412,16 +657,34 @@ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
+ 		return;
+ 	}
+ 
+-	rtw89_vif_type_mapping(vif, true);
++	__rtw89_ops_sta_assoc(rtwdev, vif, sta, true);
++}
++
++static void __rtw89_ops_bss_link_assoc(struct rtw89_dev *rtwdev,
++				       struct rtw89_vif_link *rtwvif_link)
++{
++	rtw89_phy_set_bss_color(rtwdev, rtwvif_link);
++	rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, rtwvif_link);
++	rtw89_mac_port_update(rtwdev, rtwvif_link);
++	rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, rtwvif_link);
++}
++
++static void __rtw89_ops_bss_assoc(struct rtw89_dev *rtwdev,
++				  struct ieee80211_vif *vif)
++{
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
++	unsigned int link_id;
+ 
+-	rtw89_core_sta_assoc(rtwdev, vif, sta);
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++		__rtw89_ops_bss_link_assoc(rtwdev, rtwvif_link);
+ }
+ 
+ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
+ 				      struct ieee80211_vif *vif, u64 changed)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ 
+ 	mutex_lock(&rtwdev->mutex);
+ 	rtw89_leave_ps_mode(rtwdev);
+@@ -429,10 +692,7 @@ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
+ 	if (changed & BSS_CHANGED_ASSOC) {
+ 		if (vif->cfg.assoc) {
+ 			rtw89_station_mode_sta_assoc(rtwdev, vif);
+-			rtw89_phy_set_bss_color(rtwdev, vif);
+-			rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
+-			rtw89_mac_port_update(rtwdev, rtwvif);
+-			rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, vif);
++			__rtw89_ops_bss_assoc(rtwdev, vif);
+ 
+ 			rtw89_queue_chanctx_work(rtwdev);
+ 		} else {
+@@ -459,39 +719,49 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
+ 					u64 changed)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+ 	rtw89_leave_ps_mode(rtwdev);
+ 
++	rtwvif_link = rtwvif->links[conf->link_id];
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev,
++			  "%s: rtwvif link (link_id %u) is not active\n",
++			  __func__, conf->link_id);
++		goto out;
++	}
++
+ 	if (changed & BSS_CHANGED_BSSID) {
+-		ether_addr_copy(rtwvif->bssid, conf->bssid);
+-		rtw89_cam_bssid_changed(rtwdev, rtwvif);
+-		rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+-		WRITE_ONCE(rtwvif->sync_bcn_tsf, 0);
++		ether_addr_copy(rtwvif_link->bssid, conf->bssid);
++		rtw89_cam_bssid_changed(rtwdev, rtwvif_link);
++		rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
++		WRITE_ONCE(rtwvif_link->sync_bcn_tsf, 0);
+ 	}
+ 
+ 	if (changed & BSS_CHANGED_BEACON)
+-		rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
++		rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
+ 
+ 	if (changed & BSS_CHANGED_ERP_SLOT)
+-		rtw89_conf_tx(rtwdev, rtwvif);
++		rtw89_conf_tx(rtwdev, rtwvif_link);
+ 
+ 	if (changed & BSS_CHANGED_HE_BSS_COLOR)
+-		rtw89_phy_set_bss_color(rtwdev, vif);
++		rtw89_phy_set_bss_color(rtwdev, rtwvif_link);
+ 
+ 	if (changed & BSS_CHANGED_MU_GROUPS)
+ 		rtw89_mac_bf_set_gid_table(rtwdev, vif, conf);
+ 
+ 	if (changed & BSS_CHANGED_P2P_PS)
+-		rtw89_core_update_p2p_ps(rtwdev, vif);
++		rtw89_core_update_p2p_ps(rtwdev, rtwvif_link, conf);
+ 
+ 	if (changed & BSS_CHANGED_CQM)
+-		rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
++		rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ 
+ 	if (changed & BSS_CHANGED_TPE)
+-		rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true);
++		rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, true);
+ 
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+@@ -500,12 +770,21 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
+ 			      struct ieee80211_bss_conf *link_conf)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
+ 	const struct rtw89_chan *chan;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+ 
+-	chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
++	rtwvif_link = rtwvif->links[link_conf->link_id];
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev,
++			  "%s: rtwvif link (link_id %u) is not active\n",
++			  __func__, link_conf->link_id);
++		goto out;
++	}
++
++	chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ 	if (chan->band_type == RTW89_BAND_6G) {
+ 		mutex_unlock(&rtwdev->mutex);
+ 		return -EOPNOTSUPP;
+@@ -514,16 +793,18 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
+ 	if (rtwdev->scanning)
+ 		rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
+ 
+-	ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
+-	rtw89_cam_bssid_changed(rtwdev, rtwvif);
+-	rtw89_mac_port_update(rtwdev, rtwvif);
+-	rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+-	rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
+-	rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
+-	rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+-	rtw89_chip_rfk_channel(rtwdev, rtwvif);
++	ether_addr_copy(rtwvif_link->bssid, link_conf->bssid);
++	rtw89_cam_bssid_changed(rtwdev, rtwvif_link);
++	rtw89_mac_port_update(rtwdev, rtwvif_link);
++	rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL);
++	rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, NULL, RTW89_ROLE_TYPE_CHANGE);
++	rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
++	rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
++	rtw89_chip_rfk_channel(rtwdev, rtwvif_link);
+ 
+ 	rtw89_queue_chanctx_work(rtwdev);
++
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ 
+ 	return 0;
+@@ -534,12 +815,24 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 		       struct ieee80211_bss_conf *link_conf)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+-	rtw89_mac_stop_ap(rtwdev, rtwvif);
+-	rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+-	rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
++
++	rtwvif_link = rtwvif->links[link_conf->link_id];
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev,
++			  "%s: rtwvif link (link_id %u) is not active\n",
++			  __func__, link_conf->link_id);
++		goto out;
++	}
++
++	rtw89_mac_stop_ap(rtwdev, rtwvif_link);
++	rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL);
++	rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
++
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+@@ -547,10 +840,13 @@ static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ 			     bool set)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
++	struct rtw89_vif_link *rtwvif_link;
++	unsigned int link_id;
+ 
+-	ieee80211_queue_work(rtwdev->hw, &rtwvif->update_beacon_work);
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++		ieee80211_queue_work(rtwdev->hw, &rtwvif_link->update_beacon_work);
+ 
+ 	return 0;
+ }
+@@ -561,15 +857,29 @@ static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
+ 			     const struct ieee80211_tx_queue_params *params)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
++	int ret = 0;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+ 	rtw89_leave_ps_mode(rtwdev);
+-	rtwvif->tx_params[ac] = *params;
+-	__rtw89_conf_tx(rtwdev, rtwvif, ac);
++
++	rtwvif_link = rtwvif->links[link_id];
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev,
++			  "%s: rtwvif link (link_id %u) is not active\n",
++			  __func__, link_id);
++		ret = -ENOLINK;
++		goto out;
++	}
++
++	rtwvif_link->tx_params[ac] = *params;
++	__rtw89_conf_tx(rtwdev, rtwvif_link, ac);
++
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int __rtw89_ops_sta_state(struct ieee80211_hw *hw,
+@@ -582,26 +892,26 @@ static int __rtw89_ops_sta_state(struct ieee80211_hw *hw,
+ 
+ 	if (old_state == IEEE80211_STA_NOTEXIST &&
+ 	    new_state == IEEE80211_STA_NONE)
+-		return rtw89_core_sta_add(rtwdev, vif, sta);
++		return __rtw89_ops_sta_add(rtwdev, vif, sta);
+ 
+ 	if (old_state == IEEE80211_STA_AUTH &&
+ 	    new_state == IEEE80211_STA_ASSOC) {
+ 		if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ 			return 0; /* defer to bss_info_changed to have vif info */
+-		return rtw89_core_sta_assoc(rtwdev, vif, sta);
++		return __rtw89_ops_sta_assoc(rtwdev, vif, sta, false);
+ 	}
+ 
+ 	if (old_state == IEEE80211_STA_ASSOC &&
+ 	    new_state == IEEE80211_STA_AUTH)
+-		return rtw89_core_sta_disassoc(rtwdev, vif, sta);
++		return __rtw89_ops_sta_disassoc(rtwdev, vif, sta);
+ 
+ 	if (old_state == IEEE80211_STA_AUTH &&
+ 	    new_state == IEEE80211_STA_NONE)
+-		return rtw89_core_sta_disconnect(rtwdev, vif, sta);
++		return __rtw89_ops_sta_disconnect(rtwdev, vif, sta);
+ 
+ 	if (old_state == IEEE80211_STA_NONE &&
+ 	    new_state == IEEE80211_STA_NOTEXIST)
+-		return rtw89_core_sta_remove(rtwdev, vif, sta);
++		return __rtw89_ops_sta_remove(rtwdev, vif, sta);
+ 
+ 	return 0;
+ }
+@@ -667,7 +977,8 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+ 	struct ieee80211_sta *sta = params->sta;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ 	u16 tid = params->tid;
+ 	struct ieee80211_txq *txq = sta->txq[tid];
+ 	struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+@@ -681,7 +992,7 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
+ 		mutex_lock(&rtwdev->mutex);
+ 		clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
+ 		clear_bit(tid, rtwsta->ampdu_map);
+-		rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
++		rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, rtwvif, rtwsta);
+ 		mutex_unlock(&rtwdev->mutex);
+ 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ 		break;
+@@ -692,7 +1003,7 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
+ 		rtwsta->ampdu_params[tid].amsdu = params->amsdu;
+ 		set_bit(tid, rtwsta->ampdu_map);
+ 		rtw89_leave_ps_mode(rtwdev);
+-		rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
++		rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, rtwvif, rtwsta);
+ 		mutex_unlock(&rtwdev->mutex);
+ 		break;
+ 	case IEEE80211_AMPDU_RX_START:
+@@ -731,9 +1042,14 @@ static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw,
+ 				     struct ieee80211_sta *sta,
+ 				     struct station_info *sinfo)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_sta_link *rtwsta_link;
+ 
+-	sinfo->txrate = rtwsta->ra_report.txrate;
++	rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
++	if (unlikely(!rtwsta_link))
++		return;
++
++	sinfo->txrate = rtwsta_link->ra_report.txrate;
+ 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+ 
+@@ -743,7 +1059,7 @@ void __rtw89_drop_packets(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+ 	struct rtw89_vif *rtwvif;
+ 
+ 	if (vif) {
+-		rtwvif = (struct rtw89_vif *)vif->drv_priv;
++		rtwvif = vif_to_rtwvif(vif);
+ 		rtw89_mac_pkt_drop_vif(rtwdev, rtwvif);
+ 	} else {
+ 		rtw89_for_each_rtwvif(rtwdev, rtwvif)
+@@ -777,14 +1093,20 @@ struct rtw89_iter_bitrate_mask_data {
+ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta)
+ {
+ 	struct rtw89_iter_bitrate_mask_data *br_data = data;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
++	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
+ 
+ 	if (vif != br_data->vif || vif->p2p)
+ 		return;
+ 
+-	rtwsta->use_cfg_mask = true;
+-	rtwsta->mask = *br_data->mask;
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwsta_link->use_cfg_mask = true;
++		rtwsta_link->mask = *br_data->mask;
++	}
++
+ 	rtw89_phy_ra_update_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
+ }
+ 
+@@ -854,10 +1176,20 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
+ 				    const u8 *mac_addr)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+-	rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, false);
++
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev, "sw scan start: find no link on HW-0\n");
++		goto out;
++	}
++
++	rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, false);
++
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+@@ -865,9 +1197,20 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
+ 				       struct ieee80211_vif *vif)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+-	rtw89_core_scan_complete(rtwdev, vif, false);
++
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev, "sw scan complete: find no link on HW-0\n");
++		goto out;
++	}
++
++	rtw89_core_scan_complete(rtwdev, rtwvif_link, false);
++
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+@@ -884,22 +1227,35 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 			     struct ieee80211_scan_request *req)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+-	int ret = 0;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
++	int ret;
+ 
+ 	if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
+ 		return 1;
+ 
+-	if (rtwdev->scanning || rtwvif->offchan)
+-		return -EBUSY;
+-
+ 	mutex_lock(&rtwdev->mutex);
+-	rtw89_hw_scan_start(rtwdev, vif, req);
+-	ret = rtw89_hw_scan_offload(rtwdev, vif, true);
++
++	if (rtwdev->scanning || rtwvif->offchan) {
++		ret = -EBUSY;
++		goto out;
++	}
++
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev, "hw scan: find no link on HW-0\n");
++		ret = -ENOLINK;
++		goto out;
++	}
++
++	rtw89_hw_scan_start(rtwdev, rtwvif_link, req);
++	ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true);
+ 	if (ret) {
+-		rtw89_hw_scan_abort(rtwdev, vif);
++		rtw89_hw_scan_abort(rtwdev, rtwvif_link);
+ 		rtw89_err(rtwdev, "HW scan failed with status: %d\n", ret);
+ 	}
++
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ 
+ 	return ret;
+@@ -909,6 +1265,8 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ 				     struct ieee80211_vif *vif)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
+ 
+ 	if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
+ 		return;
+@@ -917,7 +1275,16 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ 		return;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+-	rtw89_hw_scan_abort(rtwdev, vif);
++
++	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
++		goto out;
++	}
++
++	rtw89_hw_scan_abort(rtwdev, rtwvif_link);
++
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+@@ -970,11 +1337,24 @@ static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw,
+ 					struct ieee80211_chanctx_conf *ctx)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
+ 	int ret;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+-	ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif, ctx);
++
++	rtwvif_link = rtwvif->links[link_conf->link_id];
++	if (unlikely(!rtwvif_link)) {
++		rtw89_err(rtwdev,
++			  "%s: rtwvif link (link_id %u) is not active\n",
++			  __func__, link_conf->link_id);
++		ret = -ENOLINK;
++		goto out;
++	}
++
++	ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif_link, ctx);
++
++out:
+ 	mutex_unlock(&rtwdev->mutex);
+ 
+ 	return ret;
+@@ -986,10 +1366,21 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ 					   struct ieee80211_chanctx_conf *ctx)
+ {
+ 	struct rtw89_dev *rtwdev = hw->priv;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+-	rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif, ctx);
++
++	rtwvif_link = rtwvif->links[link_conf->link_id];
++	if (unlikely(!rtwvif_link)) {
++		mutex_unlock(&rtwdev->mutex);
++		rtw89_err(rtwdev,
++			  "%s: rtwvif link (link_id %u) is not active\n",
++			  __func__, link_conf->link_id);
++		return;
++	}
++
++	rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif_link, ctx);
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+@@ -1003,7 +1394,7 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
+ 	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+ 	struct rtw89_roc *roc = &rtwvif->roc;
+ 
+-	if (!vif)
++	if (!rtwvif)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&rtwdev->mutex);
+@@ -1053,8 +1444,8 @@ static int rtw89_ops_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ static void rtw89_set_tid_config_iter(void *data, struct ieee80211_sta *sta)
+ {
+ 	struct cfg80211_tid_config *tid_config = data;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_dev *rtwdev = rtwsta->rtwvif->rtwdev;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ 
+ 	rtw89_core_set_tid_config(rtwdev, sta, tid_config);
+ }
+diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
+index 31f0a5225b115e..f22eaa83297fb4 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
++++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
+@@ -2091,13 +2091,13 @@ static int rtw89_mac_init_bfee_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+ }
+ 
+ static int rtw89_mac_set_csi_para_reg_be(struct rtw89_dev *rtwdev,
+-					 struct ieee80211_vif *vif,
+-					 struct ieee80211_sta *sta)
++					 struct rtw89_vif_link *rtwvif_link,
++					 struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1;
+-	u8 mac_idx = rtwvif->mac_idx;
+-	u8 port_sel = rtwvif->port;
++	struct ieee80211_link_sta *link_sta;
++	u8 mac_idx = rtwvif_link->mac_idx;
++	u8 port_sel = rtwvif_link->port;
+ 	u8 sound_dim = 3, t;
+ 	u8 *phy_cap;
+ 	u32 reg;
+@@ -2108,7 +2108,10 @@ static int rtw89_mac_set_csi_para_reg_be(struct rtw89_dev *rtwdev,
+ 	if (ret)
+ 		return ret;
+ 
+-	phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++	phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info;
+ 
+ 	if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
+ 	    (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) {
+@@ -2119,11 +2122,11 @@ static int rtw89_mac_set_csi_para_reg_be(struct rtw89_dev *rtwdev,
+ 		sound_dim = min(sound_dim, t);
+ 	}
+ 
+-	if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+-	    (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+-		ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+-		stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
+-		t = u32_get_bits(sta->deflink.vht_cap.cap,
++	if ((link_sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
++	    (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
++		ldpc_en &= !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
++		stbc_en &= !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
++		t = u32_get_bits(link_sta->vht_cap.cap,
+ 				 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ 		sound_dim = min(sound_dim, t);
+ 	}
+@@ -2131,6 +2134,8 @@ static int rtw89_mac_set_csi_para_reg_be(struct rtw89_dev *rtwdev,
+ 	nc = min(nc, sound_dim);
+ 	nr = min(nr, sound_dim);
+ 
++	rcu_read_unlock();
++
+ 	reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
+ 	rtw89_write32_set(rtwdev, reg, B_BE_BFMEE_BFPARAM_SEL);
+ 
+@@ -2155,12 +2160,12 @@ static int rtw89_mac_set_csi_para_reg_be(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_mac_csi_rrsc_be(struct rtw89_dev *rtwdev,
+-				 struct ieee80211_vif *vif,
+-				 struct ieee80211_sta *sta)
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M);
+-	u8 mac_idx = rtwvif->mac_idx;
++	struct ieee80211_link_sta *link_sta;
++	u8 mac_idx = rtwvif_link->mac_idx;
+ 	int ret;
+ 	u32 reg;
+ 
+@@ -2168,22 +2173,28 @@ static int rtw89_mac_csi_rrsc_be(struct rtw89_dev *rtwdev,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (sta->deflink.he_cap.has_he) {
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++
++	if (link_sta->he_cap.has_he) {
+ 		rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) |
+ 			 BIT(RTW89_MAC_BF_RRSC_HE_MSC3) |
+ 			 BIT(RTW89_MAC_BF_RRSC_HE_MSC5));
+ 	}
+-	if (sta->deflink.vht_cap.vht_supported) {
++	if (link_sta->vht_cap.vht_supported) {
+ 		rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) |
+ 			 BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) |
+ 			 BIT(RTW89_MAC_BF_RRSC_VHT_MSC5));
+ 	}
+-	if (sta->deflink.ht_cap.ht_supported) {
++	if (link_sta->ht_cap.ht_supported) {
+ 		rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) |
+ 			 BIT(RTW89_MAC_BF_RRSC_HT_MSC3) |
+ 			 BIT(RTW89_MAC_BF_RRSC_HT_MSC5));
+ 	}
+ 
++	rcu_read_unlock();
++
+ 	reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
+ 	rtw89_write32_set(rtwdev, reg, B_BE_BFMEE_BFPARAM_SEL);
+ 	rtw89_write32_clr(rtwdev, reg, B_BE_BFMEE_CSI_FORCE_RETE_EN);
+@@ -2195,17 +2206,25 @@ static int rtw89_mac_csi_rrsc_be(struct rtw89_dev *rtwdev,
+ }
+ 
+ static void rtw89_mac_bf_assoc_be(struct rtw89_dev *rtwdev,
+-				  struct ieee80211_vif *vif,
+-				  struct ieee80211_sta *sta)
++				  struct rtw89_vif_link *rtwvif_link,
++				  struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
++	struct ieee80211_link_sta *link_sta;
++	bool has_beamformer_cap;
++
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++	has_beamformer_cap = rtw89_sta_has_beamformer_cap(link_sta);
++
++	rcu_read_unlock();
+ 
+-	if (rtw89_sta_has_beamformer_cap(sta)) {
++	if (has_beamformer_cap) {
+ 		rtw89_debug(rtwdev, RTW89_DBG_BF,
+ 			    "initialize bfee for new association\n");
+-		rtw89_mac_init_bfee_be(rtwdev, rtwvif->mac_idx);
+-		rtw89_mac_set_csi_para_reg_be(rtwdev, vif, sta);
+-		rtw89_mac_csi_rrsc_be(rtwdev, vif, sta);
++		rtw89_mac_init_bfee_be(rtwdev, rtwvif_link->mac_idx);
++		rtw89_mac_set_csi_para_reg_be(rtwdev, rtwvif_link, rtwsta_link);
++		rtw89_mac_csi_rrsc_be(rtwdev, rtwvif_link, rtwsta_link);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
+index c7165e757842be..4b47b45f897cbc 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.c
++++ b/drivers/net/wireless/realtek/rtw89/phy.c
+@@ -75,12 +75,12 @@ static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
+ 	return ra_mask;
+ }
+ 
+-static u64 get_he_ra_mask(struct ieee80211_sta *sta)
++static u64 get_he_ra_mask(struct ieee80211_link_sta *link_sta)
+ {
+-	struct ieee80211_sta_he_cap cap = sta->deflink.he_cap;
++	struct ieee80211_sta_he_cap cap = link_sta->he_cap;
+ 	u16 mcs_map;
+ 
+-	switch (sta->deflink.bandwidth) {
++	switch (link_sta->bandwidth) {
+ 	case IEEE80211_STA_RX_BW_160:
+ 		if (cap.he_cap_elem.phy_cap_info[0] &
+ 		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+@@ -118,14 +118,14 @@ static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss)
+ 	return mask;
+ }
+ 
+-static u64 get_eht_ra_mask(struct ieee80211_sta *sta)
++static u64 get_eht_ra_mask(struct ieee80211_link_sta *link_sta)
+ {
+-	struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
++	struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
+ 	struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
+ 	struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
+-	u8 *he_phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
++	u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info;
+ 
+-	switch (sta->deflink.bandwidth) {
++	switch (link_sta->bandwidth) {
+ 	case IEEE80211_STA_RX_BW_320:
+ 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320;
+ 		/* MCS 9, 11, 13 */
+@@ -195,15 +195,16 @@ static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+ 	return ra_mask;
+ }
+ 
+-static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
++static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev,
++				 struct rtw89_sta_link *rtwsta_link,
++				 struct ieee80211_link_sta *link_sta,
+ 				 const struct rtw89_chan *chan)
+ {
+-	struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+-	struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
++	struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
+ 	enum nl80211_band band;
+ 	u64 cfg_mask;
+ 
+-	if (!rtwsta->use_cfg_mask)
++	if (!rtwsta_link->use_cfg_mask)
+ 		return -1;
+ 
+ 	switch (chan->band_type) {
+@@ -227,17 +228,17 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
+ 		return -1;
+ 	}
+ 
+-	if (sta->deflink.he_cap.has_he) {
++	if (link_sta->he_cap.has_he) {
+ 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
+ 					    RA_MASK_HE_1SS_RATES);
+ 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
+ 					    RA_MASK_HE_2SS_RATES);
+-	} else if (sta->deflink.vht_cap.vht_supported) {
++	} else if (link_sta->vht_cap.vht_supported) {
+ 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
+ 					    RA_MASK_VHT_1SS_RATES);
+ 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
+ 					    RA_MASK_VHT_2SS_RATES);
+-	} else if (sta->deflink.ht_cap.ht_supported) {
++	} else if (link_sta->ht_cap.ht_supported) {
+ 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
+ 					    RA_MASK_HT_1SS_RATES);
+ 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
+@@ -261,17 +262,17 @@ rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
+ 			      RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES};
+ 
+ static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
+-				struct rtw89_sta *rtwsta,
++				struct rtw89_sta_link *rtwsta_link,
+ 				const struct rtw89_chan *chan,
+ 				bool *fix_giltf_en, u8 *fix_giltf)
+ {
+-	struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
++	struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
+ 	u8 band = chan->band_type;
+ 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ 	u8 he_gi = mask->control[nl_band].he_gi;
+ 	u8 he_ltf = mask->control[nl_band].he_ltf;
+ 
+-	if (!rtwsta->use_cfg_mask)
++	if (!rtwsta_link->use_cfg_mask)
+ 		return;
+ 
+ 	if (he_ltf == 2 && he_gi == 2) {
+@@ -295,17 +296,17 @@ static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
+ }
+ 
+ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
+-				    struct ieee80211_sta *sta, bool csi)
++				    struct rtw89_vif_link *rtwvif_link,
++				    struct rtw89_sta_link *rtwsta_link,
++				    struct ieee80211_link_sta *link_sta,
++				    bool p2p, bool csi)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+-	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
+-	struct rtw89_ra_info *ra = &rtwsta->ra;
++	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern;
++	struct rtw89_ra_info *ra = &rtwsta_link->ra;
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
++						       rtwvif_link->chanctx_idx);
+ 	const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
+-	u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
++	u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi);
+ 	u64 ra_mask = 0;
+ 	u64 ra_mask_bak;
+ 	u8 mode = 0;
+@@ -320,65 +321,65 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
+ 
+ 	memset(ra, 0, sizeof(*ra));
+ 	/* Set the ra mask from sta's capability */
+-	if (sta->deflink.eht_cap.has_eht) {
++	if (link_sta->eht_cap.has_eht) {
+ 		mode |= RTW89_RA_MODE_EHT;
+-		ra_mask |= get_eht_ra_mask(sta);
++		ra_mask |= get_eht_ra_mask(link_sta);
+ 		high_rate_masks = rtw89_ra_mask_eht_rates;
+-	} else if (sta->deflink.he_cap.has_he) {
++	} else if (link_sta->he_cap.has_he) {
+ 		mode |= RTW89_RA_MODE_HE;
+ 		csi_mode = RTW89_RA_RPT_MODE_HE;
+-		ra_mask |= get_he_ra_mask(sta);
++		ra_mask |= get_he_ra_mask(link_sta);
+ 		high_rate_masks = rtw89_ra_mask_he_rates;
+-		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] &
++		if (link_sta->he_cap.he_cap_elem.phy_cap_info[2] &
+ 		    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ 			stbc_en = 1;
+-		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
++		if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
+ 		    IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
+ 			ldpc_en = 1;
+-		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, chan, &fix_giltf_en, &fix_giltf);
+-	} else if (sta->deflink.vht_cap.vht_supported) {
+-		u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
++		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, chan, &fix_giltf_en, &fix_giltf);
++	} else if (link_sta->vht_cap.vht_supported) {
++		u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
+ 
+ 		mode |= RTW89_RA_MODE_VHT;
+ 		csi_mode = RTW89_RA_RPT_MODE_VHT;
+ 		/* MCS9 (non-20MHz), MCS8, MCS7 */
+-		if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
++		if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ 			ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1);
+ 		else
+ 			ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
+ 		high_rate_masks = rtw89_ra_mask_vht_rates;
+-		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
++		if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ 			stbc_en = 1;
+-		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
++		if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ 			ldpc_en = 1;
+-	} else if (sta->deflink.ht_cap.ht_supported) {
++	} else if (link_sta->ht_cap.ht_supported) {
+ 		mode |= RTW89_RA_MODE_HT;
+ 		csi_mode = RTW89_RA_RPT_MODE_HT;
+-		ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
+-			   ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
+-			   ((u64)sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
+-			   ((u64)sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
++		ra_mask |= ((u64)link_sta->ht_cap.mcs.rx_mask[3] << 48) |
++			   ((u64)link_sta->ht_cap.mcs.rx_mask[2] << 36) |
++			   ((u64)link_sta->ht_cap.mcs.rx_mask[1] << 24) |
++			   ((u64)link_sta->ht_cap.mcs.rx_mask[0] << 12);
+ 		high_rate_masks = rtw89_ra_mask_ht_rates;
+-		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
++		if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ 			stbc_en = 1;
+-		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
++		if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ 			ldpc_en = 1;
+ 	}
+ 
+ 	switch (chan->band_type) {
+ 	case RTW89_BAND_2G:
+-		ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
+-		if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf)
++		ra_mask |= link_sta->supp_rates[NL80211_BAND_2GHZ];
++		if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xf)
+ 			mode |= RTW89_RA_MODE_CCK;
+-		if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0)
++		if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0)
+ 			mode |= RTW89_RA_MODE_OFDM;
+ 		break;
+ 	case RTW89_BAND_5G:
+-		ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
++		ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ 		mode |= RTW89_RA_MODE_OFDM;
+ 		break;
+ 	case RTW89_BAND_6G:
+-		ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4;
++		ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_6GHZ] << 4;
+ 		mode |= RTW89_RA_MODE_OFDM;
+ 		break;
+ 	default:
+@@ -405,48 +406,48 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
+ 		ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
+ 
+ 	ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
+-	ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan);
++	ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
+ 
+-	switch (sta->deflink.bandwidth) {
++	switch (link_sta->bandwidth) {
+ 	case IEEE80211_STA_RX_BW_160:
+ 		bw_mode = RTW89_CHANNEL_WIDTH_160;
+-		sgi = sta->deflink.vht_cap.vht_supported &&
+-		      (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
++		sgi = link_sta->vht_cap.vht_supported &&
++		      (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+ 		break;
+ 	case IEEE80211_STA_RX_BW_80:
+ 		bw_mode = RTW89_CHANNEL_WIDTH_80;
+-		sgi = sta->deflink.vht_cap.vht_supported &&
+-		      (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
++		sgi = link_sta->vht_cap.vht_supported &&
++		      (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ 		break;
+ 	case IEEE80211_STA_RX_BW_40:
+ 		bw_mode = RTW89_CHANNEL_WIDTH_40;
+-		sgi = sta->deflink.ht_cap.ht_supported &&
+-		      (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
++		sgi = link_sta->ht_cap.ht_supported &&
++		      (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ 		break;
+ 	default:
+ 		bw_mode = RTW89_CHANNEL_WIDTH_20;
+-		sgi = sta->deflink.ht_cap.ht_supported &&
+-		      (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
++		sgi = link_sta->ht_cap.ht_supported &&
++		      (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ 		break;
+ 	}
+ 
+-	if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
++	if (link_sta->he_cap.he_cap_elem.phy_cap_info[3] &
+ 	    IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
+ 		ra->dcm_cap = 1;
+ 
+-	if (rate_pattern->enable && !vif->p2p) {
+-		ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan);
++	if (rate_pattern->enable && !p2p) {
++		ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
+ 		ra_mask &= rate_pattern->ra_mask;
+ 		mode = rate_pattern->ra_mode;
+ 	}
+ 
+ 	ra->bw_cap = bw_mode;
+-	ra->er_cap = rtwsta->er_cap;
++	ra->er_cap = rtwsta_link->er_cap;
+ 	ra->mode_ctrl = mode;
+-	ra->macid = rtwsta->mac_id;
++	ra->macid = rtwsta_link->mac_id;
+ 	ra->stbc_cap = stbc_en;
+ 	ra->ldpc_cap = ldpc_en;
+-	ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
++	ra->ss_num = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
+ 	ra->en_sgi = sgi;
+ 	ra->ra_mask = ra_mask;
+ 	ra->fix_giltf_en = fix_giltf_en;
+@@ -458,20 +459,29 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
+ 	ra->fixed_csi_rate_en = false;
+ 	ra->ra_csi_rate_en = true;
+ 	ra->cr_tbl_sel = false;
+-	ra->band_num = rtwvif->phy_idx;
++	ra->band_num = rtwvif_link->phy_idx;
+ 	ra->csi_bw = bw_mode;
+ 	ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
+ 	ra->csi_mcs_ss_idx = 5;
+ 	ra->csi_mode = csi_mode;
+ }
+ 
+-void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+-			     u32 changed)
++static void __rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev,
++				      struct rtw89_vif_link *rtwvif_link,
++				      struct rtw89_sta_link *rtwsta_link,
++				      u32 changed)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_ra_info *ra = &rtwsta->ra;
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct rtw89_ra_info *ra = &rtwsta_link->ra;
++	struct ieee80211_link_sta *link_sta;
+ 
+-	rtw89_phy_ra_sta_update(rtwdev, sta, false);
++	rcu_read_lock();
++
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
++	rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
++				link_sta, vif->p2p, false);
++
++	rcu_read_unlock();
+ 
+ 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
+ 		ra->upd_mask = 1;
+@@ -489,6 +499,20 @@ void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta
+ 	rtw89_fw_h2c_ra(rtwdev, ra, false);
+ }
+ 
++void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
++			     u32 changed)
++{
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		__rtw89_phy_ra_update_sta(rtwdev, rtwvif_link, rtwsta_link, changed);
++	}
++}
++
+ static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
+ 				 u16 rate_base, u64 ra_mask, u8 ra_mode,
+ 				 u32 rate_ctrl, u32 ctrl_skip, bool force)
+@@ -523,15 +547,15 @@ static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
+ 		[RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
+ 	}
+ 
+-void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
+-				struct ieee80211_vif *vif,
+-				const struct cfg80211_bitrate_mask *mask)
++static
++void __rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
++				  struct rtw89_vif_link *rtwvif_link,
++				  const struct cfg80211_bitrate_mask *mask)
+ {
+ 	struct ieee80211_supported_band *sband;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	struct rtw89_phy_rate_pattern next_pattern = {0};
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
++						       rtwvif_link->chanctx_idx);
+ 	static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
+ 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
+ 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
+@@ -600,7 +624,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
+ 	if (!next_pattern.enable)
+ 		goto out;
+ 
+-	rtwvif->rate_pattern = next_pattern;
++	rtwvif_link->rate_pattern = next_pattern;
+ 	rtw89_debug(rtwdev, RTW89_DBG_RA,
+ 		    "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
+ 		    next_pattern.rate,
+@@ -609,10 +633,22 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
+ 	return;
+ 
+ out:
+-	rtwvif->rate_pattern.enable = false;
++	rtwvif_link->rate_pattern.enable = false;
+ 	rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
+ }
+ 
++void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
++				struct ieee80211_vif *vif,
++				const struct cfg80211_bitrate_mask *mask)
++{
++	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
++	struct rtw89_vif_link *rtwvif_link;
++	unsigned int link_id;
++
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++		__rtw89_phy_rate_pattern_vif(rtwdev, rtwvif_link, mask);
++}
++
+ static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta)
+ {
+ 	struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
+@@ -627,14 +663,24 @@ void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
+ 					  rtwdev);
+ }
+ 
+-void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
++void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_ra_info *ra = &rtwsta->ra;
+-	u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR;
+-	bool csi = rtw89_sta_has_beamformer_cap(sta);
++	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++	struct rtw89_ra_info *ra = &rtwsta_link->ra;
++	u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR;
++	struct ieee80211_link_sta *link_sta;
++	bool csi;
++
++	rcu_read_lock();
+ 
+-	rtw89_phy_ra_sta_update(rtwdev, sta, csi);
++	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
++	csi = rtw89_sta_has_beamformer_cap(link_sta);
++
++	rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
++				link_sta, vif->p2p, csi);
++
++	rcu_read_unlock();
+ 
+ 	if (rssi > 40)
+ 		ra->init_rate_lv = 1;
+@@ -2553,14 +2599,14 @@ struct rtw89_phy_iter_ra_data {
+ 	struct sk_buff *c2h;
+ };
+ 
+-static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
++static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link,
++					struct ieee80211_link_sta *link_sta,
++					struct rtw89_phy_iter_ra_data *ra_data)
+ {
+-	struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
+ 	struct rtw89_dev *rtwdev = ra_data->rtwdev;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ 	const struct rtw89_c2h_ra_rpt *c2h =
+ 		(const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data;
+-	struct rtw89_ra_report *ra_report = &rtwsta->ra_report;
++	struct rtw89_ra_report *ra_report = &rtwsta_link->ra_report;
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	bool format_v1 = chip->chip_gen == RTW89_CHIP_BE;
+ 	u8 mode, rate, bw, giltf, mac_id;
+@@ -2570,7 +2616,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
+ 	u8 t;
+ 
+ 	mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID);
+-	if (mac_id != rtwsta->mac_id)
++	if (mac_id != rtwsta_link->mac_id)
+ 		return;
+ 
+ 	rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS);
+@@ -2661,8 +2707,26 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
+ 			     u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) |
+ 			     u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL);
+ 	ra_report->might_fallback_legacy = mcs <= 2;
+-	sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
+-	rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1;
++	link_sta->agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
++	rtwsta_link->max_agg_wait = link_sta->agg.max_rc_amsdu_len / 1500 - 1;
++}
++
++static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
++{
++	struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_sta_link *rtwsta_link;
++	struct ieee80211_link_sta *link_sta;
++	unsigned int link_id;
++
++	rcu_read_lock();
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
++		__rtw89_phy_c2h_ra_rpt_iter(rtwsta_link, link_sta, ra_data);
++	}
++
++	rcu_read_unlock();
+ }
+ 
+ static void
+@@ -4290,33 +4354,33 @@ void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
+ 	cfo->packet_count++;
+ }
+ 
+-void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+-						       rtwvif->chanctx_idx);
++						       rtwvif_link->chanctx_idx);
+ 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
+ 
+ 	if (!chip->ul_tb_waveform_ctrl)
+ 		return;
+ 
+-	rtwvif->def_tri_idx =
++	rtwvif_link->def_tri_idx =
+ 		rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
+ 
+ 	if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
+-		rtwvif->dyn_tb_bedge_en = false;
++		rtwvif_link->dyn_tb_bedge_en = false;
+ 	else if (chan->band_type >= RTW89_BAND_5G &&
+ 		 chan->band_width >= RTW89_CHANNEL_WIDTH_40)
+-		rtwvif->dyn_tb_bedge_en = true;
++		rtwvif_link->dyn_tb_bedge_en = true;
+ 	else
+-		rtwvif->dyn_tb_bedge_en = false;
++		rtwvif_link->dyn_tb_bedge_en = false;
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
+ 		    "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
+-		    ul_tb_info->def_if_bandedge, rtwvif->def_tri_idx);
++		    ul_tb_info->def_if_bandedge, rtwvif_link->def_tri_idx);
+ 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
+ 		    "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
+-		    rtwvif->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
++		    rtwvif_link->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
+ }
+ 
+ struct rtw89_phy_ul_tb_check_data {
+@@ -4338,7 +4402,7 @@ struct rtw89_phy_power_diff {
+ };
+ 
+ static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev,
+-				       struct rtw89_vif *rtwvif)
++				       struct rtw89_vif_link *rtwvif_link)
+ {
+ 	static const struct rtw89_phy_power_diff table[2] = {
+ 		{0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3},
+@@ -4350,13 +4414,13 @@ static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev,
+ 	if (!rtwdev->chip->ul_tb_pwr_diff)
+ 		return;
+ 
+-	if (rtwvif->pwr_diff_en == rtwvif->pre_pwr_diff_en) {
+-		rtwvif->pwr_diff_en = false;
++	if (rtwvif_link->pwr_diff_en == rtwvif_link->pre_pwr_diff_en) {
++		rtwvif_link->pwr_diff_en = false;
+ 		return;
+ 	}
+ 
+-	rtwvif->pre_pwr_diff_en = rtwvif->pwr_diff_en;
+-	param = &table[rtwvif->pwr_diff_en];
++	rtwvif_link->pre_pwr_diff_en = rtwvif_link->pwr_diff_en;
++	param = &table[rtwvif_link->pwr_diff_en];
+ 
+ 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL,
+ 			       param->q_00);
+@@ -4365,32 +4429,32 @@ static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev,
+ 	rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX,
+ 			       B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en);
+ 
+-	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif_link->mac_idx);
+ 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160,
+ 			   param->ultb_1t_norm_160);
+ 
+-	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif_link->mac_idx);
+ 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160,
+ 			   param->ultb_2t_norm_160);
+ 
+-	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif_link->mac_idx);
+ 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS,
+ 			   param->com1_norm_1sts);
+ 
+-	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif->mac_idx);
++	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif_link->mac_idx);
+ 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH,
+ 			   param->com2_resp_1sts_path);
+ }
+ 
+ static
+ void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
+-				struct rtw89_vif *rtwvif,
++				struct rtw89_vif_link *rtwvif_link,
+ 				struct rtw89_phy_ul_tb_check_data *ul_tb_data)
+ {
+ 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 
+-	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
++	if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
+ 		return;
+ 
+ 	if (!vif->cfg.assoc)
+@@ -4403,11 +4467,11 @@ void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
+ 			ul_tb_data->low_tf_client = true;
+ 
+ 		ul_tb_data->valid = true;
+-		ul_tb_data->def_tri_idx = rtwvif->def_tri_idx;
+-		ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en;
++		ul_tb_data->def_tri_idx = rtwvif_link->def_tri_idx;
++		ul_tb_data->dyn_tb_bedge_en = rtwvif_link->dyn_tb_bedge_en;
+ 	}
+ 
+-	rtw89_phy_ofdma_power_diff(rtwdev, rtwvif);
++	rtw89_phy_ofdma_power_diff(rtwdev, rtwvif_link);
+ }
+ 
+ static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev,
+@@ -4453,7 +4517,9 @@ void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
+ {
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 
+ 	if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff)
+ 		return;
+@@ -4462,7 +4528,8 @@ void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
+ 		return;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data);
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif_link, &ul_tb_data);
+ 
+ 	if (!ul_tb_data.valid)
+ 		return;
+@@ -4626,30 +4693,42 @@ struct rtw89_phy_iter_rssi_data {
+ 	bool rssi_changed;
+ };
+ 
+-static void rtw89_phy_stat_rssi_update_iter(void *data,
+-					    struct ieee80211_sta *sta)
++static
++void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link,
++				       struct rtw89_phy_iter_rssi_data *rssi_data)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_phy_iter_rssi_data *rssi_data =
+-					(struct rtw89_phy_iter_rssi_data *)data;
+ 	struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info;
+ 	unsigned long rssi_curr;
+ 
+-	rssi_curr = ewma_rssi_read(&rtwsta->avg_rssi);
++	rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi);
+ 
+ 	if (rssi_curr < ch_info->rssi_min) {
+ 		ch_info->rssi_min = rssi_curr;
+-		ch_info->rssi_min_macid = rtwsta->mac_id;
++		ch_info->rssi_min_macid = rtwsta_link->mac_id;
+ 	}
+ 
+-	if (rtwsta->prev_rssi == 0) {
+-		rtwsta->prev_rssi = rssi_curr;
+-	} else if (abs((int)rtwsta->prev_rssi - (int)rssi_curr) > (3 << RSSI_FACTOR)) {
+-		rtwsta->prev_rssi = rssi_curr;
++	if (rtwsta_link->prev_rssi == 0) {
++		rtwsta_link->prev_rssi = rssi_curr;
++	} else if (abs((int)rtwsta_link->prev_rssi - (int)rssi_curr) >
++		   (3 << RSSI_FACTOR)) {
++		rtwsta_link->prev_rssi = rssi_curr;
+ 		rssi_data->rssi_changed = true;
+ 	}
+ }
+ 
++static void rtw89_phy_stat_rssi_update_iter(void *data,
++					    struct ieee80211_sta *sta)
++{
++	struct rtw89_phy_iter_rssi_data *rssi_data =
++					(struct rtw89_phy_iter_rssi_data *)data;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
++		__rtw89_phy_stat_rssi_update_iter(rtwsta_link, rssi_data);
++}
++
+ static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_phy_iter_rssi_data rssi_data = {0};
+@@ -5753,26 +5832,15 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
+ 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
+ }
+ 
+-static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
++static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev,
++					     struct rtw89_sta_link *rtwsta_link)
+ {
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+-	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+-	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+-	bool *done = data;
+ 	u8 rssi_a, rssi_b;
+ 	u32 candidate;
+ 
+-	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls)
+-		return;
+-
+-	if (*done)
+-		return;
+-
+-	*done = true;
+-
+-	rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]);
+-	rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]);
++	rssi_a = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_A]);
++	rssi_b = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_B]);
+ 
+ 	if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
+ 		candidate = RF_A;
+@@ -5785,7 +5853,7 @@ static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta
+ 		return;
+ 
+ 	hal->antenna_tx = candidate;
+-	rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta);
++	rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta_link);
+ 
+ 	if (hal->antenna_tx == RF_A) {
+ 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
+@@ -5796,6 +5864,37 @@ static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta
+ 	}
+ }
+ 
++static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
++{
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
++	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
++	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
++	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
++	bool *done = data;
++
++	if (WARN(ieee80211_vif_is_mld(vif), "MLD mix path_div\n"))
++		return;
++
++	if (sta->tdls)
++		return;
++
++	if (*done)
++		return;
++
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
++		if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
++			continue;
++
++		*done = true;
++		__rtw89_phy_tx_path_div_sta_iter(rtwdev, rtwsta_link);
++		return;
++	}
++}
++
+ void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+@@ -6002,17 +6101,27 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
+ 	rtw89_chip_cfg_txrx_path(rtwdev);
+ }
+ 
+-void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
++void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
++			     struct rtw89_vif_link *rtwvif_link)
+ {
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld;
+ 	enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
++	struct ieee80211_bss_conf *bss_conf;
+ 	u8 bss_color;
+ 
+-	if (!vif->bss_conf.he_support || !vif->cfg.assoc)
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	if (!bss_conf->he_support || !vif->cfg.assoc) {
++		rcu_read_unlock();
+ 		return;
++	}
++
++	bss_color = bss_conf->he_bss_color.color;
+ 
+-	bss_color = vif->bss_conf.he_bss_color.color;
++	rcu_read_unlock();
+ 
+ 	rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1,
+ 			      phy_idx);
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
+index 6dd8ec46939acd..7e335c02ee6fbf 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.h
++++ b/drivers/net/wireless/realtek/rtw89/phy.h
+@@ -892,7 +892,7 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ 	phy->set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ }
+ 
+-void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
++void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link);
+ void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
+ void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+ 			     u32 changed);
+@@ -953,11 +953,12 @@ void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
+ 			    struct rtw89_rx_phy_ppdu *phy_ppdu);
+ void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev);
+ void rtw89_phy_antdiv_work(struct work_struct *work);
+-void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
++void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
++			     struct rtw89_vif_link *rtwvif_link);
+ void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
+ 					  enum rtw89_mac_idx mac_idx,
+ 					  enum rtw89_tssi_bandedge_cfg bandedge_cfg);
+-void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
++void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+ void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev);
+ u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band);
+ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
+diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
+index aebd6404f80250..c1c12abc2ea93a 100644
+--- a/drivers/net/wireless/realtek/rtw89/ps.c
++++ b/drivers/net/wireless/realtek/rtw89/ps.c
+@@ -62,9 +62,9 @@ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
+ 		rtw89_mac_power_mode_change(rtwdev, enter);
+ }
+ 
+-void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+ {
+-	if (rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
++	if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ 		return;
+ 
+ 	if (!rtwdev->ps_mode)
+@@ -85,23 +85,25 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
+ 		rtw89_ps_power_mode_change(rtwdev, false);
+ }
+ 
+-static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link)
+ {
+ 	struct rtw89_lps_parm lps_param = {
+-		.macid = rtwvif->mac_id,
++		.macid = rtwvif_link->mac_id,
+ 		.psmode = RTW89_MAC_AX_PS_MODE_LEGACY,
+ 		.lastrpwm = RTW89_LAST_RPWM_PS,
+ 	};
+ 
+ 	rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
+ 	rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+-	rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
++	rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif_link);
+ }
+ 
+-static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link)
+ {
+ 	struct rtw89_lps_parm lps_param = {
+-		.macid = rtwvif->mac_id,
++		.macid = rtwvif_link->mac_id,
+ 		.psmode = RTW89_MAC_AX_PS_MODE_ACTIVE,
+ 		.lastrpwm = RTW89_LAST_RPWM_ACTIVE,
+ 	};
+@@ -109,7 +111,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
+ 	rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+ 	rtw89_fw_leave_lps_check(rtwdev, 0);
+ 	rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
+-	rtw89_chip_digital_pwr_comp(rtwdev, rtwvif->phy_idx);
++	rtw89_chip_digital_pwr_comp(rtwdev, rtwvif_link->phy_idx);
+ }
+ 
+ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
+@@ -119,7 +121,7 @@ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
+ 	__rtw89_leave_ps_mode(rtwdev);
+ }
+ 
+-void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		     bool ps_mode)
+ {
+ 	lockdep_assert_held(&rtwdev->mutex);
+@@ -127,23 +129,26 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
+ 		return;
+ 
+-	__rtw89_enter_lps(rtwdev, rtwvif);
++	__rtw89_enter_lps(rtwdev, rtwvif_link);
+ 	if (ps_mode)
+-		__rtw89_enter_ps_mode(rtwdev, rtwvif);
++		__rtw89_enter_ps_mode(rtwdev, rtwvif_link);
+ }
+ 
+-static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev,
++				struct rtw89_vif_link *rtwvif_link)
+ {
+-	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+-	    rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
++	if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION &&
++	    rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
+ 		return;
+ 
+-	__rtw89_leave_lps(rtwdev, rtwvif);
++	__rtw89_leave_lps(rtwdev, rtwvif_link);
+ }
+ 
+ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 
+ 	lockdep_assert_held(&rtwdev->mutex);
+ 
+@@ -153,12 +158,15 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
+ 	__rtw89_leave_ps_mode(rtwdev);
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_leave_lps_vif(rtwdev, rtwvif);
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_leave_lps_vif(rtwdev, rtwvif_link);
+ }
+ 
+ void rtw89_enter_ips(struct rtw89_dev *rtwdev)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 
+ 	set_bit(RTW89_FLAG_INACTIVE_PS, rtwdev->flags);
+ 
+@@ -166,14 +174,17 @@ void rtw89_enter_ips(struct rtw89_dev *rtwdev)
+ 		return;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_mac_vif_deinit(rtwdev, rtwvif);
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_mac_vif_deinit(rtwdev, rtwvif_link);
+ 
+ 	rtw89_core_stop(rtwdev);
+ }
+ 
+ void rtw89_leave_ips(struct rtw89_dev *rtwdev)
+ {
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 	int ret;
+ 
+ 	if (test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
+@@ -186,7 +197,8 @@ void rtw89_leave_ips(struct rtw89_dev *rtwdev)
+ 	rtw89_set_channel(rtwdev);
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_mac_vif_init(rtwdev, rtwvif);
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++			rtw89_mac_vif_init(rtwdev, rtwvif_link);
+ 
+ 	clear_bit(RTW89_FLAG_INACTIVE_PS, rtwdev->flags);
+ }
+@@ -197,48 +209,50 @@ void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl)
+ 		rtw89_leave_lps(rtwdev);
+ }
+ 
+-static void rtw89_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++static void rtw89_tsf32_toggle(struct rtw89_dev *rtwdev,
++			       struct rtw89_vif_link *rtwvif_link,
+ 			       enum rtw89_p2pps_action act)
+ {
+ 	if (act == RTW89_P2P_ACT_UPDATE || act == RTW89_P2P_ACT_REMOVE)
+ 		return;
+ 
+ 	if (act == RTW89_P2P_ACT_INIT)
+-		rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif, true);
++		rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif_link, true);
+ 	else if (act == RTW89_P2P_ACT_TERMINATE)
+-		rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif, false);
++		rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif_link, false);
+ }
+ 
+ static void rtw89_p2p_disable_all_noa(struct rtw89_dev *rtwdev,
+-				      struct ieee80211_vif *vif)
++				      struct rtw89_vif_link *rtwvif_link,
++				      struct ieee80211_bss_conf *bss_conf)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	enum rtw89_p2pps_action act;
+ 	u8 noa_id;
+ 
+-	if (rtwvif->last_noa_nr == 0)
++	if (rtwvif_link->last_noa_nr == 0)
+ 		return;
+ 
+-	for (noa_id = 0; noa_id < rtwvif->last_noa_nr; noa_id++) {
+-		if (noa_id == rtwvif->last_noa_nr - 1)
++	for (noa_id = 0; noa_id < rtwvif_link->last_noa_nr; noa_id++) {
++		if (noa_id == rtwvif_link->last_noa_nr - 1)
+ 			act = RTW89_P2P_ACT_TERMINATE;
+ 		else
+ 			act = RTW89_P2P_ACT_REMOVE;
+-		rtw89_tsf32_toggle(rtwdev, rtwvif, act);
+-		rtw89_fw_h2c_p2p_act(rtwdev, vif, NULL, act, noa_id);
++		rtw89_tsf32_toggle(rtwdev, rtwvif_link, act);
++		rtw89_fw_h2c_p2p_act(rtwdev, rtwvif_link, bss_conf,
++				     NULL, act, noa_id);
+ 	}
+ }
+ 
+ static void rtw89_p2p_update_noa(struct rtw89_dev *rtwdev,
+-				 struct ieee80211_vif *vif)
++				 struct rtw89_vif_link *rtwvif_link,
++				 struct ieee80211_bss_conf *bss_conf)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ 	struct ieee80211_p2p_noa_desc *desc;
+ 	enum rtw89_p2pps_action act;
+ 	u8 noa_id;
+ 
+ 	for (noa_id = 0; noa_id < RTW89_P2P_MAX_NOA_NUM; noa_id++) {
+-		desc = &vif->bss_conf.p2p_noa_attr.desc[noa_id];
++		desc = &bss_conf->p2p_noa_attr.desc[noa_id];
+ 		if (!desc->count || !desc->duration)
+ 			break;
+ 
+@@ -246,16 +260,19 @@ static void rtw89_p2p_update_noa(struct rtw89_dev *rtwdev,
+ 			act = RTW89_P2P_ACT_INIT;
+ 		else
+ 			act = RTW89_P2P_ACT_UPDATE;
+-		rtw89_tsf32_toggle(rtwdev, rtwvif, act);
+-		rtw89_fw_h2c_p2p_act(rtwdev, vif, desc, act, noa_id);
++		rtw89_tsf32_toggle(rtwdev, rtwvif_link, act);
++		rtw89_fw_h2c_p2p_act(rtwdev, rtwvif_link, bss_conf,
++				     desc, act, noa_id);
+ 	}
+-	rtwvif->last_noa_nr = noa_id;
++	rtwvif_link->last_noa_nr = noa_id;
+ }
+ 
+-void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
++void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev,
++			  struct rtw89_vif_link *rtwvif_link,
++			  struct ieee80211_bss_conf *bss_conf)
+ {
+-	rtw89_p2p_disable_all_noa(rtwdev, vif);
+-	rtw89_p2p_update_noa(rtwdev, vif);
++	rtw89_p2p_disable_all_noa(rtwdev, rtwvif_link, bss_conf);
++	rtw89_p2p_update_noa(rtwdev, rtwvif_link, bss_conf);
+ }
+ 
+ void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
+@@ -265,6 +282,12 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
+ 	enum rtw89_entity_mode mode;
+ 	int count = 0;
+ 
++	/* FIXME: Fix rtw89_enter_lps() and __rtw89_enter_ps_mode()
++	 * to take MLO cases into account before doing the following.
++	 */
++	if (rtwdev->support_mlo)
++		goto disable_lps;
++
+ 	mode = rtw89_get_entity_mode(rtwdev);
+ 	if (mode == RTW89_ENTITY_MODE_MCC)
+ 		goto disable_lps;
+@@ -291,9 +314,9 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
+ 	rtwdev->lps_enabled = false;
+ }
+ 
+-void rtw89_p2p_noa_renew(struct rtw89_vif *rtwvif)
++void rtw89_p2p_noa_renew(struct rtw89_vif_link *rtwvif_link)
+ {
+-	struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa;
++	struct rtw89_p2p_noa_setter *setter = &rtwvif_link->p2p_noa;
+ 	struct rtw89_p2p_noa_ie *ie = &setter->ie;
+ 	struct rtw89_p2p_ie_head *p2p_head = &ie->p2p_head;
+ 	struct rtw89_noa_attr_head *noa_head = &ie->noa_head;
+@@ -318,10 +341,10 @@ void rtw89_p2p_noa_renew(struct rtw89_vif *rtwvif)
+ 	noa_head->oppps_ctwindow = 0;
+ }
+ 
+-void rtw89_p2p_noa_append(struct rtw89_vif *rtwvif,
++void rtw89_p2p_noa_append(struct rtw89_vif_link *rtwvif_link,
+ 			  const struct ieee80211_p2p_noa_desc *desc)
+ {
+-	struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa;
++	struct rtw89_p2p_noa_setter *setter = &rtwvif_link->p2p_noa;
+ 	struct rtw89_p2p_noa_ie *ie = &setter->ie;
+ 	struct rtw89_p2p_ie_head *p2p_head = &ie->p2p_head;
+ 	struct rtw89_noa_attr_head *noa_head = &ie->noa_head;
+@@ -338,9 +361,9 @@ void rtw89_p2p_noa_append(struct rtw89_vif *rtwvif,
+ 	ie->noa_desc[setter->noa_count++] = *desc;
+ }
+ 
+-u8 rtw89_p2p_noa_fetch(struct rtw89_vif *rtwvif, void **data)
++u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data)
+ {
+-	struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa;
++	struct rtw89_p2p_noa_setter *setter = &rtwvif_link->p2p_noa;
+ 	struct rtw89_p2p_noa_ie *ie = &setter->ie;
+ 	void *tail;
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
+index 54486e4550b61e..cdd712966b09d9 100644
+--- a/drivers/net/wireless/realtek/rtw89/ps.h
++++ b/drivers/net/wireless/realtek/rtw89/ps.h
+@@ -5,21 +5,23 @@
+ #ifndef __RTW89_PS_H_
+ #define __RTW89_PS_H_
+ 
+-void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 		     bool ps_mode);
+ void rtw89_leave_lps(struct rtw89_dev *rtwdev);
+ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
+-void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
++void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
+ void rtw89_enter_ips(struct rtw89_dev *rtwdev);
+ void rtw89_leave_ips(struct rtw89_dev *rtwdev);
+ void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
+-void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
++void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev,
++			  struct rtw89_vif_link *rtwvif_link,
++			  struct ieee80211_bss_conf *bss_conf);
+ void rtw89_recalc_lps(struct rtw89_dev *rtwdev);
+-void rtw89_p2p_noa_renew(struct rtw89_vif *rtwvif);
+-void rtw89_p2p_noa_append(struct rtw89_vif *rtwvif,
++void rtw89_p2p_noa_renew(struct rtw89_vif_link *rtwvif_link);
++void rtw89_p2p_noa_append(struct rtw89_vif_link *rtwvif_link,
+ 			  const struct ieee80211_p2p_noa_desc *desc);
+-u8 rtw89_p2p_noa_fetch(struct rtw89_vif *rtwvif, void **data);
++u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data);
+ 
+ static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev)
+ {
+diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
+index a7720a1f17a743..bb064a086970bb 100644
+--- a/drivers/net/wireless/realtek/rtw89/regd.c
++++ b/drivers/net/wireless/realtek/rtw89/regd.c
+@@ -793,22 +793,26 @@ static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ 	struct rtw89_reg_6ghz_tpe new = {};
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 	bool changed = false;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ 		const struct rtw89_reg_6ghz_tpe *tmp;
+ 		const struct rtw89_chan *chan;
+ 
+-		chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
+-		if (chan->band_type != RTW89_BAND_6G)
+-			continue;
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
++			chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
++			if (chan->band_type != RTW89_BAND_6G)
++				continue;
+ 
+-		tmp = &rtwvif->reg_6ghz_tpe;
+-		if (!tmp->valid)
+-			continue;
++			tmp = &rtwvif_link->reg_6ghz_tpe;
++			if (!tmp->valid)
++				continue;
+ 
+-		tpe_intersect_constraint(&new, tmp->constraint);
++			tpe_intersect_constraint(&new, tmp->constraint);
++		}
+ 	}
+ 
+ 	if (memcmp(&regulatory->reg_6ghz_tpe, &new,
+@@ -831,19 +835,24 @@ static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev)
+ }
+ 
+ static int rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev,
+-				     struct rtw89_vif *rtwvif, bool active,
++				     struct rtw89_vif_link *rtwvif_link, bool active,
+ 				     unsigned int *changed)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+-	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+-	struct rtw89_reg_6ghz_tpe *tpe = &rtwvif->reg_6ghz_tpe;
++	struct rtw89_reg_6ghz_tpe *tpe = &rtwvif_link->reg_6ghz_tpe;
++	struct ieee80211_bss_conf *bss_conf;
+ 
+ 	memset(tpe, 0, sizeof(*tpe));
+ 
+-	if (!active || rtwvif->reg_6ghz_power != RTW89_REG_6GHZ_POWER_STD)
++	if (!active || rtwvif_link->reg_6ghz_power != RTW89_REG_6GHZ_POWER_STD)
+ 		goto bottom;
+ 
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ 	rtw89_calculate_tpe(rtwdev, tpe, &bss_conf->tpe);
++
++	rcu_read_unlock();
++
+ 	if (!tpe->valid)
+ 		goto bottom;
+ 
+@@ -867,20 +876,24 @@ static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
+ 	const struct rtw89_regd *regd = regulatory->regd;
+ 	enum rtw89_reg_6ghz_power sel;
+ 	const struct rtw89_chan *chan;
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
++	unsigned int link_id;
+ 	int count = 0;
+ 	u8 index;
+ 
+ 	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+-		chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
+-		if (chan->band_type != RTW89_BAND_6G)
+-			continue;
++		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
++			chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
++			if (chan->band_type != RTW89_BAND_6G)
++				continue;
+ 
+-		if (count != 0 && rtwvif->reg_6ghz_power == sel)
+-			continue;
++			if (count != 0 && rtwvif_link->reg_6ghz_power == sel)
++				continue;
+ 
+-		sel = rtwvif->reg_6ghz_power;
+-		count++;
++			sel = rtwvif_link->reg_6ghz_power;
++			count++;
++		}
+ 	}
+ 
+ 	if (count != 1)
+@@ -908,35 +921,41 @@ static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
+ }
+ 
+ static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
+-				       struct rtw89_vif *rtwvif, bool active,
++				       struct rtw89_vif_link *rtwvif_link, bool active,
+ 				       unsigned int *changed)
+ {
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct ieee80211_bss_conf *bss_conf;
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ 
+ 	if (active) {
+-		switch (vif->bss_conf.power_type) {
++		switch (bss_conf->power_type) {
+ 		case IEEE80211_REG_VLP_AP:
+-			rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_VLP;
++			rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_VLP;
+ 			break;
+ 		case IEEE80211_REG_LPI_AP:
+-			rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_LPI;
++			rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_LPI;
+ 			break;
+ 		case IEEE80211_REG_SP_AP:
+-			rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_STD;
++			rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_STD;
+ 			break;
+ 		default:
+-			rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
++			rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+ 			break;
+ 		}
+ 	} else {
+-		rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
++		rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+ 	}
+ 
++	rcu_read_unlock();
++
+ 	*changed += __rtw89_reg_6ghz_power_recalc(rtwdev);
+ 	return 0;
+ }
+ 
+-int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ 			  bool active)
+ {
+ 	unsigned int changed = 0;
+@@ -948,11 +967,11 @@ int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ 	 * so must do reg_6ghz_tpe_recalc() after reg_6ghz_power_recalc().
+ 	 */
+ 
+-	ret = rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, active, &changed);
++	ret = rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif_link, active, &changed);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = rtw89_reg_6ghz_tpe_recalc(rtwdev, rtwvif, active, &changed);
++	ret = rtw89_reg_6ghz_tpe_recalc(rtwdev, rtwvif_link, active, &changed);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+index 1679bd408ef3f3..f9766bf30e71df 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+@@ -1590,10 +1590,11 @@ static void rtw8851b_rfk_init(struct rtw89_dev *rtwdev)
+ 	rtw8851b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
+ }
+ 
+-static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev,
++				 struct rtw89_vif_link *rtwvif_link)
+ {
+-	enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+-	enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
++	enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
++	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ 
+ 	rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ 	rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx);
+@@ -1608,10 +1609,12 @@ static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev,
+ 	rtw8851b_tssi_scan(rtwdev, phy_idx, chan);
+ }
+ 
+-static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool start)
+ {
+-	rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
++	rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif_link->phy_idx,
++				  rtwvif_link->chanctx_idx);
+ }
+ 
+ static void rtw8851b_rfk_track(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+index dde96bd63021ff..42d369d2e916a6 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+@@ -1350,10 +1350,11 @@ static void rtw8852a_rfk_init(struct rtw89_dev *rtwdev)
+ 	rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true, RTW89_CHANCTX_0);
+ }
+ 
+-static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev,
++				 struct rtw89_vif_link *rtwvif_link)
+ {
+-	enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+-	enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
++	enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
++	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ 
+ 	rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx);
+ 	rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx);
+@@ -1368,10 +1369,11 @@ static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ 	rtw8852a_tssi_scan(rtwdev, phy_idx, chan);
+ }
+ 
+-static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool start)
+ {
+-	rtw8852a_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
++	rtw8852a_wifi_scan_notify(rtwdev, start, rtwvif_link->phy_idx);
+ }
+ 
+ static void rtw8852a_rfk_track(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+index 12be52f76427a1..364aa21cbd446f 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+@@ -562,10 +562,11 @@ static void rtw8852b_rfk_init(struct rtw89_dev *rtwdev)
+ 	rtw8852b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
+ }
+ 
+-static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev,
++				 struct rtw89_vif_link *rtwvif_link)
+ {
+-	enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+-	enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
++	enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
++	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ 
+ 	rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ 	rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx);
+@@ -580,10 +581,12 @@ static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
+ 	rtw8852b_tssi_scan(rtwdev, phy_idx, chan);
+ }
+ 
+-static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool start)
+ {
+-	rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
++	rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif_link->phy_idx,
++				  rtwvif_link->chanctx_idx);
+ }
+ 
+ static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+index 7dfdcb5964e117..dab7e71ec6a140 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+@@ -535,10 +535,11 @@ static void rtw8852bt_rfk_init(struct rtw89_dev *rtwdev)
+ 	rtw8852bt_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
+ }
+ 
+-static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev,
++				  struct rtw89_vif_link *rtwvif_link)
+ {
+-	enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+-	enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
++	enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
++	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ 
+ 	rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ 	rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx);
+@@ -553,10 +554,12 @@ static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
+ 	rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
+ }
+ 
+-static void rtw8852bt_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++static void rtw8852bt_rfk_scan(struct rtw89_dev *rtwdev,
++			       struct rtw89_vif_link *rtwvif_link,
+ 			       bool start)
+ {
+-	rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
++	rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif_link->phy_idx,
++				   rtwvif_link->chanctx_idx);
+ }
+ 
+ static void rtw8852bt_rfk_track(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+index 1c6e89ab0f4bcb..dbe77abb2c488f 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+@@ -1846,10 +1846,11 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev)
+ 	rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
+ }
+ 
+-static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev,
++				 struct rtw89_vif_link *rtwvif_link)
+ {
+-	enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+-	enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
++	enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
++	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ 
+ 	rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
+ 	rtw8852c_rx_dck(rtwdev, phy_idx, false);
+@@ -1866,10 +1867,11 @@ static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev,
+ 	rtw8852c_tssi_scan(rtwdev, phy_idx, chan);
+ }
+ 
+-static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool start)
+ {
+-	rtw8852c_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
++	rtw8852c_wifi_scan_notify(rtwdev, start, rtwvif_link->phy_idx);
+ }
+ 
+ static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+index 63b1ff2f98ed31..ef7747adbcc2b8 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+@@ -2020,11 +2020,12 @@ static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+ 	}
+ }
+ 
+-static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev,
++				 struct rtw89_vif_link *rtwvif_link)
+ {
+-	enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
++	enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
+ 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+-	enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
++	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
+ 	u32 tx_en;
+ 
+@@ -2050,7 +2051,8 @@ static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ 	rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_SCAN, 6);
+ }
+ 
+-static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
++static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev,
++			      struct rtw89_vif_link *rtwvif_link,
+ 			      bool start)
+ {
+ }
+diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
+index 5fc2faa9ba5a7e..7b203bb7f151a7 100644
+--- a/drivers/net/wireless/realtek/rtw89/ser.c
++++ b/drivers/net/wireless/realtek/rtw89/ser.c
+@@ -300,37 +300,54 @@ static void drv_resume_rx(struct rtw89_ser *ser)
+ 
+ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ {
+-	rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
+-	rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
+-	rtwvif->trigger = false;
++	struct rtw89_vif_link *rtwvif_link;
++	unsigned int link_id;
++
+ 	rtwvif->tdls_peer = 0;
++
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
++		rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif_link->port);
++		rtwvif_link->net_type = RTW89_NET_TYPE_NO_LINK;
++		rtwvif_link->trigger = false;
++	}
+ }
+ 
+ static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
+ {
+ 	struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data;
+-	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ 	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++	struct rtw89_vif_link *rtwvif_link;
++	struct rtw89_sta_link *rtwsta_link;
++	unsigned int link_id;
+ 
+ 	if (rtwvif != target_rtwvif)
+ 		return;
+ 
+-	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+-		rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+-	if (sta->tdls)
+-		rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
++	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
++		rtwvif_link = rtwsta_link->rtwvif_link;
+ 
+-	INIT_LIST_HEAD(&rtwsta->ba_cam_list);
++		if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
++			rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta_link->addr_cam);
++		if (sta->tdls)
++			rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta_link->bssid_cam);
++
++		INIT_LIST_HEAD(&rtwsta_link->ba_cam_list);
++	}
+ }
+ 
+ static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ {
++	struct rtw89_vif_link *rtwvif_link;
++	unsigned int link_id;
++
+ 	ieee80211_iterate_stations_atomic(rtwdev->hw,
+ 					  ser_sta_deinit_cam_iter,
+ 					  rtwvif);
+ 
+-	rtw89_cam_deinit(rtwdev, rtwvif);
++	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
++		rtw89_cam_deinit(rtwdev, rtwvif_link);
+ 
+ 	bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
+ }
+diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
+index 86e24e07780d9b..3e81fd974ec180 100644
+--- a/drivers/net/wireless/realtek/rtw89/wow.c
++++ b/drivers/net/wireless/realtek/rtw89/wow.c
+@@ -421,7 +421,8 @@ static void rtw89_wow_construct_key_info(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct rtw89_wow_key_info *key_info = &rtw_wow->key_info;
+-	struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
++	struct ieee80211_vif *wow_vif = rtwvif_link_to_vif(rtwvif_link);
+ 	bool err = false;
+ 
+ 	rcu_read_lock();
+@@ -596,7 +597,8 @@ static int rtw89_wow_get_aoac_rpt(struct rtw89_dev *rtwdev, bool rx_ready)
+ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
+ 						      u32 cipher, u8 keyidx, u8 *gtk)
+ {
+-	struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
++	struct ieee80211_vif *wow_vif = rtwvif_link_to_vif(rtwvif_link);
+ 	const struct rtw89_cipher_info *cipher_info;
+ 	struct ieee80211_key_conf *rekey_conf;
+ 	struct ieee80211_key_conf *key;
+@@ -632,11 +634,13 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
+ 
+ static void rtw89_wow_update_key_info(struct rtw89_dev *rtwdev, bool rx_ready)
+ {
+-	struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
++	struct ieee80211_vif *wow_vif = rtwvif_link_to_vif(rtwvif_link);
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ 	struct rtw89_set_key_info_iter_data data = {.error = false,
+ 						    .rx_ready = rx_ready};
++	struct ieee80211_bss_conf *bss_conf;
+ 	struct ieee80211_key_conf *key;
+ 
+ 	rcu_read_lock();
+@@ -669,9 +673,15 @@ static void rtw89_wow_update_key_info(struct rtw89_dev *rtwdev, bool rx_ready)
+ 		return;
+ 
+ 	rtw89_rx_pn_set_pmf(rtwdev, key, aoac_rpt->igtk_ipn);
+-	ieee80211_gtk_rekey_notify(wow_vif, wow_vif->bss_conf.bssid,
++
++	rcu_read_lock();
++
++	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
++	ieee80211_gtk_rekey_notify(wow_vif, bss_conf->bssid,
+ 				   aoac_rpt->eapol_key_replay_count,
+-				   GFP_KERNEL);
++				   GFP_ATOMIC);
++
++	rcu_read_unlock();
+ }
+ 
+ static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev)
+@@ -681,27 +691,24 @@ static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev)
+ 
+ static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev)
+ {
+-	struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
+ 
+-	__rtw89_enter_ps_mode(rtwdev, rtwvif);
++	__rtw89_enter_ps_mode(rtwdev, rtwvif_link);
+ }
+ 
+ static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
+ {
+-	struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
+ 
+ 	if (rtw89_wow_mgd_linked(rtwdev))
+-		rtw89_enter_lps(rtwdev, rtwvif, false);
++		rtw89_enter_lps(rtwdev, rtwvif_link, false);
+ 	else if (rtw89_wow_no_link(rtwdev))
+-		rtw89_fw_h2c_fwips(rtwdev, rtwvif, true);
++		rtw89_fw_h2c_fwips(rtwdev, rtwvif_link, true);
+ }
+ 
+ static void rtw89_wow_leave_ps(struct rtw89_dev *rtwdev, bool enable_wow)
+ {
+-	struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
+ 
+ 	if (rtw89_wow_mgd_linked(rtwdev)) {
+ 		rtw89_leave_lps(rtwdev);
+@@ -709,7 +716,7 @@ static void rtw89_wow_leave_ps(struct rtw89_dev *rtwdev, bool enable_wow)
+ 		if (enable_wow)
+ 			rtw89_leave_ips(rtwdev);
+ 		else
+-			rtw89_fw_h2c_fwips(rtwdev, rtwvif, false);
++			rtw89_fw_h2c_fwips(rtwdev, rtwvif_link, false);
+ 	}
+ }
+ 
+@@ -734,6 +741,8 @@ static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
+ 
+ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
+ {
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
++	struct ieee80211_vif *wow_vif = rtwvif_link_to_vif(rtwvif_link);
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ 	struct cfg80211_wowlan_nd_info nd_info;
+@@ -780,35 +789,34 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
+ 		break;
+ 	default:
+ 		rtw89_warn(rtwdev, "Unknown wakeup reason %x\n", reason);
+-		ieee80211_report_wowlan_wakeup(rtwdev->wow.wow_vif, NULL,
+-					       GFP_KERNEL);
++		ieee80211_report_wowlan_wakeup(wow_vif, NULL, GFP_KERNEL);
+ 		return;
+ 	}
+ 
+-	ieee80211_report_wowlan_wakeup(rtwdev->wow.wow_vif, &wakeup,
+-				       GFP_KERNEL);
++	ieee80211_report_wowlan_wakeup(wow_vif, &wakeup, GFP_KERNEL);
+ }
+ 
+-static void rtw89_wow_vif_iter(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
++static void rtw89_wow_vif_iter(struct rtw89_dev *rtwdev,
++			       struct rtw89_vif_link *rtwvif_link)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+-	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
++	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ 
+ 	/* Current WoWLAN function support setting of only vif in
+ 	 * infra mode or no link mode. When one suitable vif is found,
+ 	 * stop the iteration.
+ 	 */
+-	if (rtw_wow->wow_vif || vif->type != NL80211_IFTYPE_STATION)
++	if (rtw_wow->rtwvif_link || vif->type != NL80211_IFTYPE_STATION)
+ 		return;
+ 
+-	switch (rtwvif->net_type) {
++	switch (rtwvif_link->net_type) {
+ 	case RTW89_NET_TYPE_INFRA:
+ 		if (rtw_wow_has_mgd_features(rtwdev))
+-			rtw_wow->wow_vif = vif;
++			rtw_wow->rtwvif_link = rtwvif_link;
+ 		break;
+ 	case RTW89_NET_TYPE_NO_LINK:
+ 		if (rtw_wow->pno_inited)
+-			rtw_wow->wow_vif = vif;
++			rtw_wow->rtwvif_link = rtwvif_link;
+ 		break;
+ 	default:
+ 		break;
+@@ -865,7 +873,7 @@ static u16 rtw89_calc_crc(u8 *pdata, int length)
+ 	return ~crc;
+ }
+ 
+-static int rtw89_wow_pattern_get_type(struct rtw89_vif *rtwvif,
++static int rtw89_wow_pattern_get_type(struct rtw89_vif_link *rtwvif_link,
+ 				      struct rtw89_wow_cam_info *rtw_pattern,
+ 				      const u8 *pattern, u8 da_mask)
+ {
+@@ -885,7 +893,7 @@ static int rtw89_wow_pattern_get_type(struct rtw89_vif *rtwvif,
+ 		rtw_pattern->bc = true;
+ 	else if (is_multicast_ether_addr(da))
+ 		rtw_pattern->mc = true;
+-	else if (ether_addr_equal(da, rtwvif->mac_addr) &&
++	else if (ether_addr_equal(da, rtwvif_link->mac_addr) &&
+ 		 da_mask == GENMASK(5, 0))
+ 		rtw_pattern->uc = true;
+ 	else if (!da_mask) /*da_mask == 0 mean wildcard*/
+@@ -897,7 +905,7 @@ static int rtw89_wow_pattern_get_type(struct rtw89_vif *rtwvif,
+ }
+ 
+ static int rtw89_wow_pattern_generate(struct rtw89_dev *rtwdev,
+-				      struct rtw89_vif *rtwvif,
++				      struct rtw89_vif_link *rtwvif_link,
+ 				      const struct cfg80211_pkt_pattern *pkt_pattern,
+ 				      struct rtw89_wow_cam_info *rtw_pattern)
+ {
+@@ -916,7 +924,7 @@ static int rtw89_wow_pattern_generate(struct rtw89_dev *rtwdev,
+ 	mask_len = DIV_ROUND_UP(len, 8);
+ 	memset(rtw_pattern, 0, sizeof(*rtw_pattern));
+ 
+-	ret = rtw89_wow_pattern_get_type(rtwvif, rtw_pattern, pattern,
++	ret = rtw89_wow_pattern_get_type(rtwvif_link, rtw_pattern, pattern,
+ 					 mask[0] & GENMASK(5, 0));
+ 	if (ret)
+ 		return ret;
+@@ -970,7 +978,7 @@ static int rtw89_wow_pattern_generate(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_wow_parse_patterns(struct rtw89_dev *rtwdev,
+-				    struct rtw89_vif *rtwvif,
++				    struct rtw89_vif_link *rtwvif_link,
+ 				    struct cfg80211_wowlan *wowlan)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+@@ -983,7 +991,7 @@ static int rtw89_wow_parse_patterns(struct rtw89_dev *rtwdev,
+ 
+ 	for (i = 0; i < wowlan->n_patterns; i++) {
+ 		rtw_pattern = &rtw_wow->patterns[i];
+-		ret = rtw89_wow_pattern_generate(rtwdev, rtwvif,
++		ret = rtw89_wow_pattern_generate(rtwdev, rtwvif_link,
+ 						 &wowlan->patterns[i],
+ 						 rtw_pattern);
+ 		if (ret) {
+@@ -1040,7 +1048,7 @@ static void rtw89_wow_clear_wakeups(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 
+-	rtw_wow->wow_vif = NULL;
++	rtw_wow->rtwvif_link = NULL;
+ 	rtw89_core_release_all_bits_map(rtw_wow->flags, RTW89_WOW_FLAG_NUM);
+ 	rtw_wow->pattern_cnt = 0;
+ 	rtw_wow->pno_inited = false;
+@@ -1066,6 +1074,7 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
+ 				 struct cfg80211_wowlan *wowlan)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
++	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_vif *rtwvif;
+ 
+ 	if (wowlan->disconnect)
+@@ -1078,36 +1087,40 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
+ 	if (wowlan->nd_config)
+ 		rtw89_wow_init_pno(rtwdev, wowlan->nd_config);
+ 
+-	rtw89_for_each_rtwvif(rtwdev, rtwvif)
+-		rtw89_wow_vif_iter(rtwdev, rtwvif);
++	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
++		/* use the link on HW-0 to do wow flow */
++		rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
++		if (!rtwvif_link)
++			continue;
++
++		rtw89_wow_vif_iter(rtwdev, rtwvif_link);
++	}
+ 
+-	if (!rtw_wow->wow_vif)
++	rtwvif_link = rtw_wow->rtwvif_link;
++	if (!rtwvif_link)
+ 		return -EPERM;
+ 
+-	rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv;
+-	return rtw89_wow_parse_patterns(rtwdev, rtwvif, wowlan);
++	return rtw89_wow_parse_patterns(rtwdev, rtwvif_link, wowlan);
+ }
+ 
+ static int rtw89_wow_cfg_wake_pno(struct rtw89_dev *rtwdev, bool wow)
+ {
+-	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+-	struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
+ 	int ret;
+ 
+-	ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif, true);
++	ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif_link, true);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to config pno\n");
+ 		return ret;
+ 	}
+ 
+-	ret = rtw89_fw_h2c_wow_wakeup_ctrl(rtwdev, rtwvif, wow);
++	ret = rtw89_fw_h2c_wow_wakeup_ctrl(rtwdev, rtwvif_link, wow);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to fw wow wakeup ctrl\n");
+ 		return ret;
+ 	}
+ 
+-	ret = rtw89_fw_h2c_wow_global(rtwdev, rtwvif, wow);
++	ret = rtw89_fw_h2c_wow_global(rtwdev, rtwvif_link, wow);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to fw wow global\n");
+ 		return ret;
+@@ -1119,34 +1132,39 @@ static int rtw89_wow_cfg_wake_pno(struct rtw89_dev *rtwdev, bool wow)
+ static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+-	struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtw_wow->rtwvif_link;
++	struct ieee80211_vif *wow_vif = rtwvif_link_to_vif(rtwvif_link);
+ 	struct ieee80211_sta *wow_sta;
+-	struct rtw89_sta *rtwsta = NULL;
++	struct rtw89_sta_link *rtwsta_link = NULL;
++	struct rtw89_sta *rtwsta;
+ 	int ret;
+ 
+-	wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid);
+-	if (wow_sta)
+-		rtwsta = (struct rtw89_sta *)wow_sta->drv_priv;
++	wow_sta = ieee80211_find_sta(wow_vif, wow_vif->cfg.ap_addr);
++	if (wow_sta) {
++		rtwsta = sta_to_rtwsta(wow_sta);
++		rtwsta_link = rtwsta->links[rtwvif_link->link_id];
++		if (!rtwsta_link)
++			return -ENOLINK;
++	}
+ 
+ 	if (wow) {
+ 		if (rtw_wow->pattern_cnt)
+-			rtwvif->wowlan_pattern = true;
++			rtwvif_link->wowlan_pattern = true;
+ 		if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
+-			rtwvif->wowlan_magic = true;
++			rtwvif_link->wowlan_magic = true;
+ 	} else {
+-		rtwvif->wowlan_pattern = false;
+-		rtwvif->wowlan_magic = false;
++		rtwvif_link->wowlan_pattern = false;
++		rtwvif_link->wowlan_magic = false;
+ 	}
+ 
+-	ret = rtw89_fw_h2c_wow_wakeup_ctrl(rtwdev, rtwvif, wow);
++	ret = rtw89_fw_h2c_wow_wakeup_ctrl(rtwdev, rtwvif_link, wow);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to fw wow wakeup ctrl\n");
+ 		return ret;
+ 	}
+ 
+ 	if (wow) {
+-		ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
++		ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif_link, rtwsta_link);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "failed to update dctl cam sec entry: %d\n",
+ 				  ret);
+@@ -1154,13 +1172,13 @@ static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow)
+ 		}
+ 	}
+ 
+-	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
++	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ 		return ret;
+ 	}
+ 
+-	ret = rtw89_fw_h2c_wow_global(rtwdev, rtwvif, wow);
++	ret = rtw89_fw_h2c_wow_global(rtwdev, rtwvif_link, wow);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to fw wow global\n");
+ 		return ret;
+@@ -1190,25 +1208,30 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
+ 	enum rtw89_fw_type fw_type = wow ? RTW89_FW_WOWLAN : RTW89_FW_NORMAL;
+ 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+-	struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtw_wow->rtwvif_link;
++	struct ieee80211_vif *wow_vif = rtwvif_link_to_vif(rtwvif_link);
+ 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	bool include_bb = !!chip->bbmcu_nr;
+ 	bool disable_intr_for_dlfw = false;
+ 	struct ieee80211_sta *wow_sta;
+-	struct rtw89_sta *rtwsta = NULL;
++	struct rtw89_sta_link *rtwsta_link = NULL;
++	struct rtw89_sta *rtwsta;
+ 	bool is_conn = true;
+ 	int ret;
+ 
+ 	if (chip_id == RTL8852C || chip_id == RTL8922A)
+ 		disable_intr_for_dlfw = true;
+ 
+-	wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid);
+-	if (wow_sta)
+-		rtwsta = (struct rtw89_sta *)wow_sta->drv_priv;
+-	else
++	wow_sta = ieee80211_find_sta(wow_vif, wow_vif->cfg.ap_addr);
++	if (wow_sta) {
++		rtwsta = sta_to_rtwsta(wow_sta);
++		rtwsta_link = rtwsta->links[rtwvif_link->link_id];
++		if (!rtwsta_link)
++			return -ENOLINK;
++	} else {
+ 		is_conn = false;
++	}
+ 
+ 	if (disable_intr_for_dlfw)
+ 		rtw89_hci_disable_intr(rtwdev);
+@@ -1224,14 +1247,14 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
+ 
+ 	rtw89_phy_init_rf_reg(rtwdev, true);
+ 
+-	ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
++	ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, rtwsta_link,
+ 					 RTW89_ROLE_FW_RESTORE);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c role maintain\n");
+ 		return ret;
+ 	}
+ 
+-	ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
++	ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c assoc cmac tbl\n");
+ 		return ret;
+@@ -1240,27 +1263,27 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
+ 	if (!is_conn)
+ 		rtw89_cam_reset_keys(rtwdev);
+ 
+-	ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, !is_conn);
++	ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, rtwsta_link, !is_conn);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c join info\n");
+ 		return ret;
+ 	}
+ 
+-	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
++	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ 		return ret;
+ 	}
+ 
+ 	if (is_conn) {
+-		ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id);
++		ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id);
+ 		if (ret) {
+ 			rtw89_warn(rtwdev, "failed to send h2c general packet\n");
+ 			return ret;
+ 		}
+-		rtw89_phy_ra_assoc(rtwdev, wow_sta);
+-		rtw89_phy_set_bss_color(rtwdev, wow_vif);
+-		rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, wow_vif);
++		rtw89_phy_ra_assoc(rtwdev, rtwsta_link);
++		rtw89_phy_set_bss_color(rtwdev, rtwvif_link);
++		rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, rtwvif_link);
+ 	}
+ 
+ 	if (chip_gen == RTW89_CHIP_BE)
+@@ -1363,21 +1386,20 @@ static int rtw89_wow_disable_trx_pre(struct rtw89_dev *rtwdev)
+ 
+ static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev)
+ {
+-	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+-	struct ieee80211_vif *vif = rtw_wow->wow_vif;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
+ 	int ret;
+ 
+ 	ret = rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ 	if (ret)
+ 		rtw89_err(rtwdev, "cfg ppdu status\n");
+ 
+-	rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
++	rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ 
+ 	return ret;
+ }
+ 
+ static void rtw89_fw_release_pno_pkt_list(struct rtw89_dev *rtwdev,
+-					  struct rtw89_vif *rtwvif)
++					  struct rtw89_vif_link *rtwvif_link)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct list_head *pkt_list = &rtw_wow->pno_pkt_list;
+@@ -1391,7 +1413,7 @@ static void rtw89_fw_release_pno_pkt_list(struct rtw89_dev *rtwdev,
+ }
+ 
+ static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
+-					   struct rtw89_vif *rtwvif)
++					   struct rtw89_vif_link *rtwvif_link)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+@@ -1401,7 +1423,7 @@ static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ 	int ret;
+ 
+ 	for (i = 0; i < num; i++) {
+-		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
++		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
+ 					     nd_config->match_sets[i].ssid.ssid,
+ 					     nd_config->match_sets[i].ssid.ssid_len,
+ 					     nd_config->ie_len);
+@@ -1413,7 +1435,7 @@ static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ 		info = kzalloc(sizeof(*info), GFP_KERNEL);
+ 		if (!info) {
+ 			kfree_skb(skb);
+-			rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
++			rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif_link);
+ 			return -ENOMEM;
+ 		}
+ 
+@@ -1421,7 +1443,7 @@ static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ 		if (ret) {
+ 			kfree_skb(skb);
+ 			kfree(info);
+-			rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
++			rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif_link);
+ 			return ret;
+ 		}
+ 
+@@ -1436,20 +1458,19 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+-	struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
+ 	int interval = rtw_wow->nd_config->scan_plans[0].interval;
+ 	struct rtw89_scan_option opt = {};
+ 	int ret;
+ 
+ 	if (enable) {
+-		ret = rtw89_pno_scan_update_probe_req(rtwdev, rtwvif);
++		ret = rtw89_pno_scan_update_probe_req(rtwdev, rtwvif_link);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "Update probe request failed\n");
+ 			return ret;
+ 		}
+ 
+-		ret = mac->add_chan_list_pno(rtwdev, rtwvif);
++		ret = mac->add_chan_list_pno(rtwdev, rtwvif_link);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "Update channel list failed\n");
+ 			return ret;
+@@ -1471,7 +1492,7 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
+ 		opt.opch_end = RTW89_CHAN_INVALID;
+ 	}
+ 
+-	mac->scan_offload(rtwdev, &opt, rtwvif, true);
++	mac->scan_offload(rtwdev, &opt, rtwvif_link, true);
+ 
+ 	return 0;
+ }
+@@ -1479,8 +1500,7 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
+ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+-	struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtw_wow->rtwvif_link;
+ 	int ret;
+ 
+ 	if (rtw89_wow_no_link(rtwdev)) {
+@@ -1499,25 +1519,25 @@ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
+ 		rtw89_wow_pattern_write(rtwdev);
+ 		rtw89_wow_construct_key_info(rtwdev);
+ 
+-		ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true);
++		ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif_link, true);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "wow: failed to enable keep alive\n");
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, true);
++		ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif_link, true);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "wow: failed to enable disconnect detect\n");
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true);
++		ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif_link, true);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "wow: failed to enable GTK offload\n");
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true);
++		ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif_link, true);
+ 		if (ret)
+ 			rtw89_warn(rtwdev, "wow: failed to enable arp offload\n");
+ 	}
+@@ -1548,8 +1568,7 @@ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
+ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+-	struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtw_wow->rtwvif_link;
+ 	int ret;
+ 
+ 	if (rtw89_wow_no_link(rtwdev)) {
+@@ -1559,35 +1578,35 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif, false);
++		ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif_link, false);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "wow: failed to disable pno\n");
+ 			return ret;
+ 		}
+ 
+-		rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
++		rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif_link);
+ 	} else {
+ 		rtw89_wow_pattern_clear(rtwdev);
+ 
+-		ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, false);
++		ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif_link, false);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "wow: failed to disable keep alive\n");
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false);
++		ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif_link, false);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n");
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false);
++		ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif_link, false);
+ 		if (ret) {
+ 			rtw89_err(rtwdev, "wow: failed to disable GTK offload\n");
+ 			return ret;
+ 		}
+ 
+-		ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false);
++		ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif_link, false);
+ 		if (ret)
+ 			rtw89_warn(rtwdev, "wow: failed to disable arp offload\n");
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
+index 3fbc2b87c058ac..f91991e8f2e30e 100644
+--- a/drivers/net/wireless/realtek/rtw89/wow.h
++++ b/drivers/net/wireless/realtek/rtw89/wow.h
+@@ -97,18 +97,16 @@ static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)
+ #ifdef CONFIG_PM
+ static inline bool rtw89_wow_mgd_linked(struct rtw89_dev *rtwdev)
+ {
+-	struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
+ 
+-	return rtwvif->net_type == RTW89_NET_TYPE_INFRA;
++	return rtwvif_link->net_type == RTW89_NET_TYPE_INFRA;
+ }
+ 
+ static inline bool rtw89_wow_no_link(struct rtw89_dev *rtwdev)
+ {
+-	struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
+ 
+-	return rtwvif->net_type == RTW89_NET_TYPE_NO_LINK;
++	return rtwvif_link->net_type == RTW89_NET_TYPE_NO_LINK;
+ }
+ 
+ static inline bool rtw_wow_has_mgd_features(struct rtw89_dev *rtwdev)
+diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
+index e7198520bdffc7..64441c8bc4606c 100644
+--- a/drivers/net/wireless/silabs/wfx/main.c
++++ b/drivers/net/wireless/silabs/wfx/main.c
+@@ -480,10 +480,23 @@ static int __init wfx_core_init(void)
+ {
+ 	int ret = 0;
+ 
+-	if (IS_ENABLED(CONFIG_SPI))
++	if (IS_ENABLED(CONFIG_SPI)) {
+ 		ret = spi_register_driver(&wfx_spi_driver);
+-	if (IS_ENABLED(CONFIG_MMC) && !ret)
++		if (ret)
++			goto out;
++	}
++	if (IS_ENABLED(CONFIG_MMC)) {
+ 		ret = sdio_register_driver(&wfx_sdio_driver);
++		if (ret)
++			goto unregister_spi;
++	}
++
++	return 0;
++
++unregister_spi:
++	if (IS_ENABLED(CONFIG_SPI))
++		spi_unregister_driver(&wfx_spi_driver);
++out:
+ 	return ret;
+ }
+ module_init(wfx_core_init);
+diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
+index 4f346fb977a989..862964a8cc8761 100644
+--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
++++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
+@@ -450,7 +450,7 @@ static int __maybe_unused cw1200_spi_suspend(struct device *dev)
+ {
+ 	struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
+ 
+-	if (!cw1200_can_suspend(self->core))
++	if (self && !cw1200_can_suspend(self->core))
+ 		return -EAGAIN;
+ 
+ 	/* XXX notify host that we have to keep CW1200 powered on? */
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 855b42c92284df..f0d4c6f3cb0555 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4591,6 +4591,11 @@ EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
+ 
+ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
+ {
++	/*
++	 * As we're about to destroy the queue and free tagset
++	 * we can not have keep-alive work running.
++	 */
++	nvme_stop_keep_alive(ctrl);
+ 	blk_mq_destroy_queue(ctrl->admin_q);
+ 	blk_put_queue(ctrl->admin_q);
+ 	if (ctrl->ops->flags & NVME_F_FABRICS) {
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 6a15873055b951..f25582e4d88bb0 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -165,7 +165,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+ 	int srcu_idx;
+ 
+ 	srcu_idx = srcu_read_lock(&ctrl->srcu);
+-	list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
++	list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
++				 srcu_read_lock_held(&ctrl->srcu)) {
+ 		if (!ns->head->disk)
+ 			continue;
+ 		kblockd_schedule_work(&ns->head->requeue_work);
+@@ -209,7 +210,8 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+ 	int srcu_idx;
+ 
+ 	srcu_idx = srcu_read_lock(&ctrl->srcu);
+-	list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
++	list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
++				 srcu_read_lock_held(&ctrl->srcu)) {
+ 		nvme_mpath_clear_current_path(ns);
+ 		kblockd_schedule_work(&ns->head->requeue_work);
+ 	}
+@@ -224,7 +226,8 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+ 	int srcu_idx;
+ 
+ 	srcu_idx = srcu_read_lock(&head->srcu);
+-	list_for_each_entry_rcu(ns, &head->list, siblings) {
++	list_for_each_entry_srcu(ns, &head->list, siblings,
++				 srcu_read_lock_held(&head->srcu)) {
+ 		if (capacity != get_capacity(ns->disk))
+ 			clear_bit(NVME_NS_READY, &ns->flags);
+ 	}
+@@ -257,7 +260,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
+ 	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
+ 	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
+ 
+-	list_for_each_entry_rcu(ns, &head->list, siblings) {
++	list_for_each_entry_srcu(ns, &head->list, siblings,
++				 srcu_read_lock_held(&head->srcu)) {
+ 		if (nvme_path_is_disabled(ns))
+ 			continue;
+ 
+@@ -356,7 +360,8 @@ static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
+ 	unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
+ 	unsigned int depth;
+ 
+-	list_for_each_entry_rcu(ns, &head->list, siblings) {
++	list_for_each_entry_srcu(ns, &head->list, siblings,
++				 srcu_read_lock_held(&head->srcu)) {
+ 		if (nvme_path_is_disabled(ns))
+ 			continue;
+ 
+@@ -424,7 +429,8 @@ static bool nvme_available_path(struct nvme_ns_head *head)
+ 	if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+ 		return NULL;
+ 
+-	list_for_each_entry_rcu(ns, &head->list, siblings) {
++	list_for_each_entry_srcu(ns, &head->list, siblings,
++				 srcu_read_lock_held(&head->srcu)) {
+ 		if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
+ 			continue;
+ 		switch (nvme_ctrl_state(ns->ctrl)) {
+@@ -785,7 +791,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ 		return 0;
+ 
+ 	srcu_idx = srcu_read_lock(&ctrl->srcu);
+-	list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
++	list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
++				 srcu_read_lock_held(&ctrl->srcu)) {
+ 		unsigned nsid;
+ again:
+ 		nsid = le32_to_cpu(desc->nsids[n]);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 4b9fda0b1d9a33..55af3dfbc2607b 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -153,6 +153,7 @@ struct nvme_dev {
+ 	/* host memory buffer support: */
+ 	u64 host_mem_size;
+ 	u32 nr_host_mem_descs;
++	u32 host_mem_descs_size;
+ 	dma_addr_t host_mem_descs_dma;
+ 	struct nvme_host_mem_buf_desc *host_mem_descs;
+ 	void **host_mem_desc_bufs;
+@@ -904,9 +905,10 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 
+ static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+ {
++	struct request *req;
++
+ 	spin_lock(&nvmeq->sq_lock);
+-	while (!rq_list_empty(*rqlist)) {
+-		struct request *req = rq_list_pop(rqlist);
++	while ((req = rq_list_pop(rqlist))) {
+ 		struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ 
+ 		nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+@@ -931,31 +933,25 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
+ 
+ static void nvme_queue_rqs(struct request **rqlist)
+ {
+-	struct request *req, *next, *prev = NULL;
++	struct request *submit_list = NULL;
+ 	struct request *requeue_list = NULL;
++	struct request **requeue_lastp = &requeue_list;
++	struct nvme_queue *nvmeq = NULL;
++	struct request *req;
+ 
+-	rq_list_for_each_safe(rqlist, req, next) {
+-		struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+-
+-		if (!nvme_prep_rq_batch(nvmeq, req)) {
+-			/* detach 'req' and add to remainder list */
+-			rq_list_move(rqlist, &requeue_list, req, prev);
+-
+-			req = prev;
+-			if (!req)
+-				continue;
+-		}
++	while ((req = rq_list_pop(rqlist))) {
++		if (nvmeq && nvmeq != req->mq_hctx->driver_data)
++			nvme_submit_cmds(nvmeq, &submit_list);
++		nvmeq = req->mq_hctx->driver_data;
+ 
+-		if (!next || req->mq_hctx != next->mq_hctx) {
+-			/* detach rest of list, and submit */
+-			req->rq_next = NULL;
+-			nvme_submit_cmds(nvmeq, rqlist);
+-			*rqlist = next;
+-			prev = NULL;
+-		} else
+-			prev = req;
++		if (nvme_prep_rq_batch(nvmeq, req))
++			rq_list_add(&submit_list, req); /* reverse order */
++		else
++			rq_list_add_tail(&requeue_lastp, req);
+ 	}
+ 
++	if (nvmeq)
++		nvme_submit_cmds(nvmeq, &submit_list);
+ 	*rqlist = requeue_list;
+ }
+ 
+@@ -1966,10 +1962,10 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
+ 
+ 	kfree(dev->host_mem_desc_bufs);
+ 	dev->host_mem_desc_bufs = NULL;
+-	dma_free_coherent(dev->dev,
+-			dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
++	dma_free_coherent(dev->dev, dev->host_mem_descs_size,
+ 			dev->host_mem_descs, dev->host_mem_descs_dma);
+ 	dev->host_mem_descs = NULL;
++	dev->host_mem_descs_size = 0;
+ 	dev->nr_host_mem_descs = 0;
+ }
+ 
+@@ -1977,7 +1973,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+ 		u32 chunk_size)
+ {
+ 	struct nvme_host_mem_buf_desc *descs;
+-	u32 max_entries, len;
++	u32 max_entries, len, descs_size;
+ 	dma_addr_t descs_dma;
+ 	int i = 0;
+ 	void **bufs;
+@@ -1990,8 +1986,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+ 	if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
+ 		max_entries = dev->ctrl.hmmaxd;
+ 
+-	descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
+-				   &descs_dma, GFP_KERNEL);
++	descs_size = max_entries * sizeof(*descs);
++	descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma,
++			GFP_KERNEL);
+ 	if (!descs)
+ 		goto out;
+ 
+@@ -2020,6 +2017,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+ 	dev->host_mem_size = size;
+ 	dev->host_mem_descs = descs;
+ 	dev->host_mem_descs_dma = descs_dma;
++	dev->host_mem_descs_size = descs_size;
+ 	dev->host_mem_desc_bufs = bufs;
+ 	return 0;
+ 
+@@ -2034,8 +2032,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+ 
+ 	kfree(bufs);
+ out_free_descs:
+-	dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+-			descs_dma);
++	dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
+ out:
+ 	dev->host_mem_descs = NULL;
+ 	return -ENOMEM;
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 4d528c10df3a9a..546e76ac407cfd 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -457,6 +457,7 @@ int __initdata dt_root_addr_cells;
+ int __initdata dt_root_size_cells;
+ 
+ void *initial_boot_params __ro_after_init;
++phys_addr_t initial_boot_params_pa __ro_after_init;
+ 
+ #ifdef CONFIG_OF_EARLY_FLATTREE
+ 
+@@ -1136,17 +1137,18 @@ static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+ 	return ptr;
+ }
+ 
+-bool __init early_init_dt_verify(void *params)
++bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys)
+ {
+-	if (!params)
++	if (!dt_virt)
+ 		return false;
+ 
+ 	/* check device tree validity */
+-	if (fdt_check_header(params))
++	if (fdt_check_header(dt_virt))
+ 		return false;
+ 
+ 	/* Setup flat device-tree pointer */
+-	initial_boot_params = params;
++	initial_boot_params = dt_virt;
++	initial_boot_params_pa = dt_phys;
+ 	of_fdt_crc32 = crc32_be(~0, initial_boot_params,
+ 				fdt_totalsize(initial_boot_params));
+ 
+@@ -1173,11 +1175,11 @@ void __init early_init_dt_scan_nodes(void)
+ 	early_init_dt_check_for_usable_mem_range();
+ }
+ 
+-bool __init early_init_dt_scan(void *params)
++bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys)
+ {
+ 	bool status;
+ 
+-	status = early_init_dt_verify(params);
++	status = early_init_dt_verify(dt_virt, dt_phys);
+ 	if (!status)
+ 		return false;
+ 
+diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
+index 9ccde2fd77cbf5..5b924597a4debe 100644
+--- a/drivers/of/kexec.c
++++ b/drivers/of/kexec.c
+@@ -301,7 +301,7 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
+ 	}
+ 
+ 	/* Remove memory reservation for the current device tree. */
+-	ret = fdt_find_and_del_mem_rsv(fdt, __pa(initial_boot_params),
++	ret = fdt_find_and_del_mem_rsv(fdt, initial_boot_params_pa,
+ 				       fdt_totalsize(initial_boot_params));
+ 	if (ret == -EINVAL) {
+ 		pr_err("Error removing memory reservation.\n");
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index 284f2e0e4d2615..e091c3e55b5c6f 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -572,15 +572,14 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ 		pcie->refclk = clk;
+ 
+ 		/*
+-		 * The "Power Sequencing and Reset Signal Timings" table of the
+-		 * PCI Express Card Electromechanical Specification, Revision
+-		 * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST#
+-		 * should be deasserted after minimum of 100us once REFCLK is
+-		 * stable. The REFCLK to the connector in RC mode is selected
+-		 * while enabling the PHY. So deassert PERST# after 100 us.
++		 * Section 2.2 of the PCI Express Card Electromechanical
++		 * Specification (Revision 5.1) mandates that the deassertion
++		 * of the PERST# signal should be delayed by 100 ms (TPVPERL).
++		 * This shall ensure that the power and the reference clock
++		 * are stable.
+ 		 */
+ 		if (gpiod) {
+-			fsleep(PCIE_T_PERST_CLK_US);
++			msleep(PCIE_T_PVPERL_MS);
+ 			gpiod_set_value_cansleep(gpiod, 1);
+ 		}
+ 
+@@ -671,15 +670,14 @@ static int j721e_pcie_resume_noirq(struct device *dev)
+ 			return ret;
+ 
+ 		/*
+-		 * The "Power Sequencing and Reset Signal Timings" table of the
+-		 * PCI Express Card Electromechanical Specification, Revision
+-		 * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST#
+-		 * should be deasserted after minimum of 100us once REFCLK is
+-		 * stable. The REFCLK to the connector in RC mode is selected
+-		 * while enabling the PHY. So deassert PERST# after 100 us.
++		 * Section 2.2 of the PCI Express Card Electromechanical
++		 * Specification (Revision 5.1) mandates that the deassertion
++		 * of the PERST# signal should be delayed by 100 ms (TPVPERL).
++		 * This shall ensure that the power and the reference clock
++		 * are stable.
+ 		 */
+ 		if (pcie->reset_gpio) {
+-			fsleep(PCIE_T_PERST_CLK_US);
++			msleep(PCIE_T_PVPERL_MS);
+ 			gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ 		}
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index e588fcc5458936..b5ca5260f9049f 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -396,6 +396,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+ 		return ret;
+ 	}
+ 
++	/* Perform cleanup that requires refclk */
++	pci_epc_deinit_notify(pci->ep.epc);
++	dw_pcie_ep_cleanup(&pci->ep);
++
+ 	/* Assert WAKE# to RC to indicate device is ready */
+ 	gpiod_set_value_cansleep(pcie_ep->wake, 1);
+ 	usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
+@@ -540,8 +544,6 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
+ {
+ 	struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ 
+-	pci_epc_deinit_notify(pci->ep.epc);
+-	dw_pcie_ep_cleanup(&pci->ep);
+ 	qcom_pcie_disable_resources(pcie_ep);
+ 	pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index ef44a82be058b2..2b33d03ed05416 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -133,6 +133,7 @@
+ 
+ /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+ #define PARF_INT_ALL_LINK_UP			BIT(13)
++#define PARF_INT_MSI_DEV_0_7			GENMASK(30, 23)
+ 
+ /* PARF_NO_SNOOP_OVERIDE register fields */
+ #define WR_NO_SNOOP_OVERIDE_EN			BIT(1)
+@@ -1716,7 +1717,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
+ 			goto err_host_deinit;
+ 		}
+ 
+-		writel_relaxed(PARF_INT_ALL_LINK_UP, pcie->parf + PARF_INT_ALL_MASK);
++		writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7,
++			       pcie->parf + PARF_INT_ALL_MASK);
+ 	}
+ 
+ 	qcom_pcie_icc_opp_update(pcie);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index c1394f2ab63ff1..ced3b7e7bdaded 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -1704,9 +1704,6 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+ 	if (ret)
+ 		dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+ 
+-	pci_epc_deinit_notify(pcie->pci.ep.epc);
+-	dw_pcie_ep_cleanup(&pcie->pci.ep);
+-
+ 	reset_control_assert(pcie->core_rst);
+ 
+ 	tegra_pcie_disable_phy(pcie);
+@@ -1785,6 +1782,10 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+ 		goto fail_phy;
+ 	}
+ 
++	/* Perform cleanup that requires refclk */
++	pci_epc_deinit_notify(pcie->pci.ep.epc);
++	dw_pcie_ep_cleanup(&pcie->pci.ep);
++
+ 	/* Clear any stale interrupt statuses */
+ 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
+index 7d070b1def1166..54286a40bdfbf7 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
++++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
+@@ -867,12 +867,18 @@ static int pci_epf_mhi_bind(struct pci_epf *epf)
+ {
+ 	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ 	struct pci_epc *epc = epf->epc;
++	struct device *dev = &epf->dev;
+ 	struct platform_device *pdev = to_platform_device(epc->dev.parent);
+ 	struct resource *res;
+ 	int ret;
+ 
+ 	/* Get MMIO base address from Endpoint controller */
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
++	if (!res) {
++		dev_err(dev, "Failed to get \"mmio\" resource\n");
++		return -ENODEV;
++	}
++
+ 	epf_mhi->mmio_phys = res->start;
+ 	epf_mhi->mmio_size = resource_size(res);
+ 
+diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
+index 718bc6cf12cb3c..974c7db3265b5a 100644
+--- a/drivers/pci/hotplug/cpqphp_pci.c
++++ b/drivers/pci/hotplug/cpqphp_pci.c
+@@ -135,11 +135,13 @@ int cpqhp_unconfigure_device(struct pci_func *func)
+ static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value)
+ {
+ 	u32 vendID = 0;
++	int ret;
+ 
+-	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1)
+-		return -1;
++	ret = pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID);
++	if (ret != PCIBIOS_SUCCESSFUL)
++		return PCIBIOS_DEVICE_NOT_FOUND;
+ 	if (PCI_POSSIBLE_ERROR(vendID))
+-		return -1;
++		return PCIBIOS_DEVICE_NOT_FOUND;
+ 	return pci_bus_read_config_dword(bus, devfn, offset, value);
+ }
+ 
+@@ -202,13 +204,15 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_
+ {
+ 	u16 tdevice;
+ 	u32 work;
++	int ret;
+ 	u8 tbus;
+ 
+ 	ctrl->pci_bus->number = bus_num;
+ 
+ 	for (tdevice = 0; tdevice < 0xFF; tdevice++) {
+ 		/* Scan for access first */
+-		if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
++		ret = PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work);
++		if (ret)
+ 			continue;
+ 		dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
+ 		/* Yep we got one. Not a bridge ? */
+@@ -220,7 +224,8 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_
+ 	}
+ 	for (tdevice = 0; tdevice < 0xFF; tdevice++) {
+ 		/* Scan for access first */
+-		if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
++		ret = PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work);
++		if (ret)
+ 			continue;
+ 		dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
+ 		/* Yep we got one. bridge ? */
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 225a6cd2e9ca3b..08f170fd3efb3e 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5248,7 +5248,7 @@ static ssize_t reset_method_store(struct device *dev,
+ 				  const char *buf, size_t count)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+-	char *options, *name;
++	char *options, *tmp_options, *name;
+ 	int m, n;
+ 	u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
+ 
+@@ -5268,7 +5268,8 @@ static ssize_t reset_method_store(struct device *dev,
+ 		return -ENOMEM;
+ 
+ 	n = 0;
+-	while ((name = strsep(&options, " ")) != NULL) {
++	tmp_options = options;
++	while ((name = strsep(&tmp_options, " ")) != NULL) {
+ 		if (sysfs_streq(name, ""))
+ 			continue;
+ 
+diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
+index 0f87cade10f74b..ed645c7a4e4b41 100644
+--- a/drivers/pci/slot.c
++++ b/drivers/pci/slot.c
+@@ -79,6 +79,7 @@ static void pci_slot_release(struct kobject *kobj)
+ 	up_read(&pci_bus_sem);
+ 
+ 	list_del(&slot->list);
++	pci_bus_put(slot->bus);
+ 
+ 	kfree(slot);
+ }
+@@ -261,7 +262,7 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
+ 		goto err;
+ 	}
+ 
+-	slot->bus = parent;
++	slot->bus = pci_bus_get(parent);
+ 	slot->number = slot_nr;
+ 
+ 	slot->kobj.kset = pci_slots_kset;
+@@ -269,6 +270,7 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
+ 	slot_name = make_slot_name(name);
+ 	if (!slot_name) {
+ 		err = -ENOMEM;
++		pci_bus_put(slot->bus);
+ 		kfree(slot);
+ 		goto err;
+ 	}
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 397a46410f7cb7..30506c43776f15 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -2178,8 +2178,6 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ 			continue;
+ 
+ 		xp = arm_cmn_node_to_xp(cmn, dn);
+-		dn->portid_bits = xp->portid_bits;
+-		dn->deviceid_bits = xp->deviceid_bits;
+ 		dn->dtc = xp->dtc;
+ 		dn->dtm = xp->dtm;
+ 		if (cmn->multi_dtm)
+@@ -2420,6 +2418,8 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 			}
+ 
+ 			arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
++			dn->portid_bits = xp->portid_bits;
++			dn->deviceid_bits = xp->deviceid_bits;
+ 
+ 			switch (dn->type) {
+ 			case CMN_TYPE_DTC:
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
+index d5fa92ba837397..dabdb9f7bb82c4 100644
+--- a/drivers/perf/arm_smmuv3_pmu.c
++++ b/drivers/perf/arm_smmuv3_pmu.c
+@@ -431,6 +431,17 @@ static int smmu_pmu_event_init(struct perf_event *event)
+ 			return -EINVAL;
+ 	}
+ 
++	/*
++	 * Ensure all events are on the same cpu so all events are in the
++	 * same cpu context, to avoid races on pmu_enable etc.
++	 */
++	event->cpu = smmu_pmu->on_cpu;
++
++	hwc->idx = -1;
++
++	if (event->group_leader == event)
++		return 0;
++
+ 	for_each_sibling_event(sibling, event->group_leader) {
+ 		if (is_software_event(sibling))
+ 			continue;
+@@ -442,14 +453,6 @@ static int smmu_pmu_event_init(struct perf_event *event)
+ 			return -EINVAL;
+ 	}
+ 
+-	hwc->idx = -1;
+-
+-	/*
+-	 * Ensure all events are on the same cpu so all events are in the
+-	 * same cpu context, to avoid races on pmu_enable etc.
+-	 */
+-	event->cpu = smmu_pmu->on_cpu;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/phy/phy-airoha-pcie-regs.h b/drivers/phy/phy-airoha-pcie-regs.h
+index bb1f679ca1dfa0..b938a7b468fee3 100644
+--- a/drivers/phy/phy-airoha-pcie-regs.h
++++ b/drivers/phy/phy-airoha-pcie-regs.h
+@@ -197,9 +197,9 @@
+ #define CSR_2L_PXP_TX1_MULTLANE_EN		BIT(0)
+ 
+ #define REG_CSR_2L_RX0_REV0			0x00fc
+-#define CSR_2L_PXP_VOS_PNINV			GENMASK(3, 2)
+-#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE		GENMASK(6, 4)
+-#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE		GENMASK(10, 8)
++#define CSR_2L_PXP_VOS_PNINV			GENMASK(19, 18)
++#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE		GENMASK(22, 20)
++#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE		GENMASK(26, 24)
+ 
+ #define REG_CSR_2L_RX0_PHYCK_DIV		0x0100
+ #define CSR_2L_PXP_RX0_PHYCK_SEL		GENMASK(9, 8)
+diff --git a/drivers/phy/phy-airoha-pcie.c b/drivers/phy/phy-airoha-pcie.c
+index 1e410eb410580c..56e9ade8a9fd3d 100644
+--- a/drivers/phy/phy-airoha-pcie.c
++++ b/drivers/phy/phy-airoha-pcie.c
+@@ -459,7 +459,7 @@ static void airoha_pcie_phy_init_clk_out(struct airoha_pcie_phy *pcie_phy)
+ 	airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET,
+ 				     CSR_2L_PXP_CLKTX1_SR);
+ 	airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0,
+-				       CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd);
++				       CSR_2L_PXP_PLL_RESERVE_MASK, 0xd0d);
+ }
+ 
+ static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy)
+@@ -471,9 +471,9 @@ static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy)
+ 				 PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST |
+ 				 PCIE_SW_RX_RST);
+ 	airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
+-				 PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
++				 PCIE_TX_TOP_RST | PCIE_TX_CAL_RST);
+ 	airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET,
+-				 PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET);
++				 PCIE_TX_TOP_RST | PCIE_TX_CAL_RST);
+ }
+ 
+ static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy)
+@@ -802,7 +802,7 @@ static void airoha_pcie_phy_init_ssc_jcpll(struct airoha_pcie_phy *pcie_phy)
+ 	airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM,
+ 				   CSR_2L_PXP_JCPLL_SDM_IFM);
+ 	airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN,
+-				   REG_CSR_2L_JCPLL_SDM_HREN);
++				   CSR_2L_PXP_JCPLL_SDM_HREN);
+ 	airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY,
+ 				     CSR_2L_PXP_JCPLL_SDM_DI_EN);
+ 	airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC,
+diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
+index e3ad7cea510998..e8ca2ec5998fe6 100644
+--- a/drivers/phy/realtek/phy-rtk-usb2.c
++++ b/drivers/phy/realtek/phy-rtk-usb2.c
+@@ -1023,6 +1023,8 @@ static int rtk_usb2phy_probe(struct platform_device *pdev)
+ 
+ 	rtk_phy->dev			= &pdev->dev;
+ 	rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
++	if (!rtk_phy->phy_cfg)
++		return -ENOMEM;
+ 
+ 	memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+ 
+diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
+index dfcf4b921bba63..96af483e5444b9 100644
+--- a/drivers/phy/realtek/phy-rtk-usb3.c
++++ b/drivers/phy/realtek/phy-rtk-usb3.c
+@@ -577,6 +577,8 @@ static int rtk_usb3phy_probe(struct platform_device *pdev)
+ 
+ 	rtk_phy->dev			= &pdev->dev;
+ 	rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
++	if (!rtk_phy->phy_cfg)
++		return -ENOMEM;
+ 
+ 	memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+ 
+diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
+index 0f6b55fec31de7..a71805997b028a 100644
+--- a/drivers/pinctrl/pinctrl-k210.c
++++ b/drivers/pinctrl/pinctrl-k210.c
+@@ -183,7 +183,7 @@ static const u32 k210_pinconf_mode_id_to_mode[] = {
+ 	[K210_PC_DEFAULT_INT13] = K210_PC_MODE_IN | K210_PC_PU,
+ };
+ 
+-#undef DEFAULT
++#undef K210_PC_DEFAULT
+ 
+ /*
+  * Pin functions configuration information.
+diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
+index 3c6d56fdb8c964..93454d2a26bcc6 100644
+--- a/drivers/pinctrl/pinctrl-zynqmp.c
++++ b/drivers/pinctrl/pinctrl-zynqmp.c
+@@ -49,7 +49,6 @@
+  * @name:	Name of the pin mux function
+  * @groups:	List of pin groups for this function
+  * @ngroups:	Number of entries in @groups
+- * @node:	Firmware node matching with the function
+  *
+  * This structure holds information about pin control function
+  * and function group names supporting that function.
+diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+index d2dd66769aa891..a0eb4e01b3a755 100644
+--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+@@ -667,7 +667,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
+ 		"push-pull", "open-drain", "open-source"
+ 	};
+ 	static const char *const strengths[] = {
+-		"no", "high", "medium", "low"
++		"no", "low", "medium", "high"
+ 	};
+ 
+ 	pad = pctldev->desc->pins[pin].drv_data;
+diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
+index 14bd55d647319b..7f3f41c7fe54c8 100644
+--- a/drivers/pinctrl/renesas/Kconfig
++++ b/drivers/pinctrl/renesas/Kconfig
+@@ -41,6 +41,7 @@ config PINCTRL_RENESAS
+ 	select PINCTRL_PFC_R8A779H0 if ARCH_R8A779H0
+ 	select PINCTRL_RZG2L if ARCH_RZG2L
+ 	select PINCTRL_RZV2M if ARCH_R9A09G011
++	select PINCTRL_RZG2L if ARCH_R9A09G057
+ 	select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
+ 	select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
+ 	select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 5a403915fed2c6..3a81837b5e623b 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -2710,7 +2710,7 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
+ 
+ 	ret = pinctrl_enable(pctrl->pctl);
+ 	if (ret)
+-		dev_err_probe(pctrl->dev, ret, "pinctrl enable failed\n");
++		return dev_err_probe(pctrl->dev, ret, "pinctrl enable failed\n");
+ 
+ 	ret = rzg2l_gpio_register(pctrl);
+ 	if (ret)
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
+index c7781aea0b88b2..f1324466efac65 100644
+--- a/drivers/platform/chrome/cros_ec_typec.c
++++ b/drivers/platform/chrome/cros_ec_typec.c
+@@ -409,6 +409,7 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
+ 	return 0;
+ 
+ unregister_ports:
++	fwnode_handle_put(fwnode);
+ 	cros_unregister_ports(typec);
+ 	return ret;
+ }
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index abdca3f05c5c15..89f5f44857d555 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -3696,10 +3696,28 @@ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
+ /* Throttle thermal policy ****************************************************/
+ static int throttle_thermal_policy_write(struct asus_wmi *asus)
+ {
+-	u8 value = asus->throttle_thermal_policy_mode;
+ 	u32 retval;
++	u8 value;
+ 	int err;
+ 
++	if (asus->throttle_thermal_policy_dev == ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO) {
++		switch (asus->throttle_thermal_policy_mode) {
++		case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT:
++			value = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO;
++			break;
++		case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST:
++			value = ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO;
++			break;
++		case ASUS_THROTTLE_THERMAL_POLICY_SILENT:
++			value = ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO;
++			break;
++		default:
++			return -EINVAL;
++		}
++	} else {
++		value = asus->throttle_thermal_policy_mode;
++	}
++
+ 	err = asus_wmi_set_devstate(asus->throttle_thermal_policy_dev,
+ 				    value, &retval);
+ 
+@@ -3804,46 +3822,6 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
+ static DEVICE_ATTR_RW(throttle_thermal_policy);
+ 
+ /* Platform profile ***********************************************************/
+-static int asus_wmi_platform_profile_to_vivo(struct asus_wmi *asus, int mode)
+-{
+-	bool vivo;
+-
+-	vivo = asus->throttle_thermal_policy_dev == ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO;
+-
+-	if (vivo) {
+-		switch (mode) {
+-		case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT:
+-			return ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO;
+-		case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST:
+-			return ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO;
+-		case ASUS_THROTTLE_THERMAL_POLICY_SILENT:
+-			return ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO;
+-		}
+-	}
+-
+-	return mode;
+-}
+-
+-static int asus_wmi_platform_profile_mode_from_vivo(struct asus_wmi *asus, int mode)
+-{
+-	bool vivo;
+-
+-	vivo = asus->throttle_thermal_policy_dev == ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO;
+-
+-	if (vivo) {
+-		switch (mode) {
+-		case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO:
+-			return ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+-		case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO:
+-			return ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST;
+-		case ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO:
+-			return ASUS_THROTTLE_THERMAL_POLICY_SILENT;
+-		}
+-	}
+-
+-	return mode;
+-}
+-
+ static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+ 					enum platform_profile_option *profile)
+ {
+@@ -3853,7 +3831,7 @@ static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+ 	asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+ 	tp = asus->throttle_thermal_policy_mode;
+ 
+-	switch (asus_wmi_platform_profile_mode_from_vivo(asus, tp)) {
++	switch (tp) {
+ 	case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT:
+ 		*profile = PLATFORM_PROFILE_BALANCED;
+ 		break;
+@@ -3892,7 +3870,7 @@ static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	asus->throttle_thermal_policy_mode = asus_wmi_platform_profile_to_vivo(asus, tp);
++	asus->throttle_thermal_policy_mode = tp;
+ 	return throttle_thermal_policy_write(asus);
+ }
+ 
+diff --git a/drivers/platform/x86/intel/bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c
+index d0e2a3c293b0b0..9ac801b929b93c 100644
+--- a/drivers/platform/x86/intel/bxtwc_tmu.c
++++ b/drivers/platform/x86/intel/bxtwc_tmu.c
+@@ -48,9 +48,8 @@ static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data)
+ static int bxt_wcove_tmu_probe(struct platform_device *pdev)
+ {
+ 	struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+-	struct regmap_irq_chip_data *regmap_irq_chip;
+ 	struct wcove_tmu *wctmu;
+-	int ret, virq, irq;
++	int ret;
+ 
+ 	wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL);
+ 	if (!wctmu)
+@@ -59,27 +58,18 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev)
+ 	wctmu->dev = &pdev->dev;
+ 	wctmu->regmap = pmic->regmap;
+ 
+-	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0)
+-		return irq;
++	wctmu->irq = platform_get_irq(pdev, 0);
++	if (wctmu->irq < 0)
++		return wctmu->irq;
+ 
+-	regmap_irq_chip = pmic->irq_chip_data_tmu;
+-	virq = regmap_irq_get_virq(regmap_irq_chip, irq);
+-	if (virq < 0) {
+-		dev_err(&pdev->dev,
+-			"failed to get virtual interrupt=%d\n", irq);
+-		return virq;
+-	}
+-
+-	ret = devm_request_threaded_irq(&pdev->dev, virq,
++	ret = devm_request_threaded_irq(&pdev->dev, wctmu->irq,
+ 					NULL, bxt_wcove_tmu_irq_handler,
+ 					IRQF_ONESHOT, "bxt_wcove_tmu", wctmu);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n",
+-							ret, virq);
++			ret, wctmu->irq);
+ 		return ret;
+ 	}
+-	wctmu->irq = virq;
+ 
+ 	/* Unmask TMU second level Wake & System alarm */
+ 	regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
+index c04bb7f97a4db1..c3ca2ac91b0569 100644
+--- a/drivers/platform/x86/intel/pmt/class.c
++++ b/drivers/platform/x86/intel/pmt/class.c
+@@ -59,10 +59,12 @@ pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
+ }
+ 
+ int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
+-			void __iomem *addr, u32 count)
++			void __iomem *addr, loff_t off, u32 count)
+ {
+ 	if (cb && cb->read_telem)
+-		return cb->read_telem(pdev, guid, buf, count);
++		return cb->read_telem(pdev, guid, buf, off, count);
++
++	addr += off;
+ 
+ 	if (guid == GUID_SPR_PUNIT)
+ 		/* PUNIT on SPR only supports aligned 64-bit read */
+@@ -96,7 +98,7 @@ intel_pmt_read(struct file *filp, struct kobject *kobj,
+ 		count = entry->size - off;
+ 
+ 	count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf,
+-				    entry->base + off, count);
++				    entry->base, off, count);
+ 
+ 	return count;
+ }
+diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
+index a267ac96442301..b2006d57779d66 100644
+--- a/drivers/platform/x86/intel/pmt/class.h
++++ b/drivers/platform/x86/intel/pmt/class.h
+@@ -62,7 +62,7 @@ struct intel_pmt_namespace {
+ };
+ 
+ int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
+-			void __iomem *addr, u32 count);
++			void __iomem *addr, loff_t off, u32 count);
+ bool intel_pmt_is_early_client_hw(struct device *dev);
+ int intel_pmt_dev_create(struct intel_pmt_entry *entry,
+ 			 struct intel_pmt_namespace *ns,
+diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
+index c9feac859e574c..0cea617c6c2e25 100644
+--- a/drivers/platform/x86/intel/pmt/telemetry.c
++++ b/drivers/platform/x86/intel/pmt/telemetry.c
+@@ -219,7 +219,7 @@ int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count)
+ 	if (offset + NUM_BYTES_QWORD(count) > size)
+ 		return -EINVAL;
+ 
+-	pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base + offset,
++	pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base, offset,
+ 			    NUM_BYTES_QWORD(count));
+ 
+ 	return ep->present ? 0 : -EPIPE;
+diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
+index 2bf94d0ab32432..22ca70eb822718 100644
+--- a/drivers/platform/x86/panasonic-laptop.c
++++ b/drivers/platform/x86/panasonic-laptop.c
+@@ -614,8 +614,7 @@ static ssize_t eco_mode_show(struct device *dev, struct device_attribute *attr,
+ 		result = 1;
+ 		break;
+ 	default:
+-		result = -EIO;
+-		break;
++		return -EIO;
+ 	}
+ 	return sysfs_emit(buf, "%u\n", result);
+ }
+@@ -761,7 +760,12 @@ static ssize_t current_brightness_store(struct device *dev, struct device_attrib
+ static ssize_t cdpower_show(struct device *dev, struct device_attribute *attr,
+ 			    char *buf)
+ {
+-	return sysfs_emit(buf, "%d\n", get_optd_power_state());
++	int state = get_optd_power_state();
++
++	if (state < 0)
++		return state;
++
++	return sysfs_emit(buf, "%d\n", state);
+ }
+ 
+ static ssize_t cdpower_store(struct device *dev, struct device_attribute *attr,
+diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
+index 1510d5ddae3dec..0df3eb7ff09a3d 100644
+--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
++++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
+@@ -161,6 +161,7 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+ 				break;
+ 
+ 			if (args.args_count >= 1 && args.np == dev->of_node) {
++				of_node_put(args.np);
+ 				if (args.args[0] > max_id) {
+ 					max_id = args.args[0];
+ 				} else {
+@@ -192,7 +193,10 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+ 				pm_genpd_init(&pd->pd, NULL, true);
+ 
+ 				list_add(&pd->node, &pd_provider->pd_list);
++			} else {
++				of_node_put(args.np);
+ 			}
++
+ 			index++;
+ 		}
+ 	}
+diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
+index 389d5a193e5dce..f5fc33a8bf4431 100644
+--- a/drivers/power/reset/Kconfig
++++ b/drivers/power/reset/Kconfig
+@@ -79,6 +79,7 @@ config POWER_RESET_EP93XX
+ 	bool "Cirrus EP93XX reset driver" if COMPILE_TEST
+ 	depends on MFD_SYSCON
+ 	default ARCH_EP93XX
++	select AUXILIARY_BUS
+ 	help
+ 	  This driver provides restart support for Cirrus EP93XX SoC.
+ 
+diff --git a/drivers/power/sequencing/Kconfig b/drivers/power/sequencing/Kconfig
+index c9f1cdb6652488..ddcc42a984921c 100644
+--- a/drivers/power/sequencing/Kconfig
++++ b/drivers/power/sequencing/Kconfig
+@@ -16,6 +16,7 @@ if POWER_SEQUENCING
+ config POWER_SEQUENCING_QCOM_WCN
+ 	tristate "Qualcomm WCN family PMU driver"
+ 	default m if ARCH_QCOM
++	depends on OF
+ 	help
+ 	  Say Y here to enable the power sequencing driver for Qualcomm
+ 	  WCN Bluetooth/WLAN chipsets.
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 750fda543308c8..51fb88aca0f9fd 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -449,9 +449,29 @@ static u8
+ 		[BQ27XXX_REG_AP] = 0x18,
+ 		BQ27XXX_DM_REG_ROWS,
+ 	},
++	bq27426_regs[BQ27XXX_REG_MAX] = {
++		[BQ27XXX_REG_CTRL] = 0x00,
++		[BQ27XXX_REG_TEMP] = 0x02,
++		[BQ27XXX_REG_INT_TEMP] = 0x1e,
++		[BQ27XXX_REG_VOLT] = 0x04,
++		[BQ27XXX_REG_AI] = 0x10,
++		[BQ27XXX_REG_FLAGS] = 0x06,
++		[BQ27XXX_REG_TTE] = INVALID_REG_ADDR,
++		[BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
++		[BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
++		[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
++		[BQ27XXX_REG_NAC] = 0x08,
++		[BQ27XXX_REG_RC] = 0x0c,
++		[BQ27XXX_REG_FCC] = 0x0e,
++		[BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
++		[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
++		[BQ27XXX_REG_SOC] = 0x1c,
++		[BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
++		[BQ27XXX_REG_AP] = 0x18,
++		BQ27XXX_DM_REG_ROWS,
++	},
+ #define bq27411_regs bq27421_regs
+ #define bq27425_regs bq27421_regs
+-#define bq27426_regs bq27421_regs
+ #define bq27441_regs bq27421_regs
+ #define bq27621_regs bq27421_regs
+ 	bq27z561_regs[BQ27XXX_REG_MAX] = {
+@@ -769,10 +789,23 @@ static enum power_supply_property bq27421_props[] = {
+ };
+ #define bq27411_props bq27421_props
+ #define bq27425_props bq27421_props
+-#define bq27426_props bq27421_props
+ #define bq27441_props bq27421_props
+ #define bq27621_props bq27421_props
+ 
++static enum power_supply_property bq27426_props[] = {
++	POWER_SUPPLY_PROP_STATUS,
++	POWER_SUPPLY_PROP_PRESENT,
++	POWER_SUPPLY_PROP_VOLTAGE_NOW,
++	POWER_SUPPLY_PROP_CURRENT_NOW,
++	POWER_SUPPLY_PROP_CAPACITY,
++	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
++	POWER_SUPPLY_PROP_TEMP,
++	POWER_SUPPLY_PROP_TECHNOLOGY,
++	POWER_SUPPLY_PROP_CHARGE_FULL,
++	POWER_SUPPLY_PROP_CHARGE_NOW,
++	POWER_SUPPLY_PROP_MANUFACTURER,
++};
++
+ static enum power_supply_property bq27z561_props[] = {
+ 	POWER_SUPPLY_PROP_STATUS,
+ 	POWER_SUPPLY_PROP_PRESENT,
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 49534458a9f7d3..73cc9c236e8333 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -484,8 +484,6 @@ EXPORT_SYMBOL_GPL(power_supply_get_by_name);
+  */
+ void power_supply_put(struct power_supply *psy)
+ {
+-	might_sleep();
+-
+ 	atomic_dec(&psy->use_cnt);
+ 	put_device(&psy->dev);
+ }
+diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c
+index c04af1ee89c675..67b86ac91a21dd 100644
+--- a/drivers/power/supply/rt9471.c
++++ b/drivers/power/supply/rt9471.c
+@@ -139,6 +139,19 @@ enum {
+ 	RT9471_PORTSTAT_DCP,
+ };
+ 
++enum {
++	RT9471_ICSTAT_SLEEP = 0,
++	RT9471_ICSTAT_VBUSRDY,
++	RT9471_ICSTAT_TRICKLECHG,
++	RT9471_ICSTAT_PRECHG,
++	RT9471_ICSTAT_FASTCHG,
++	RT9471_ICSTAT_IEOC,
++	RT9471_ICSTAT_BGCHG,
++	RT9471_ICSTAT_CHGDONE,
++	RT9471_ICSTAT_CHGFAULT,
++	RT9471_ICSTAT_OTG = 15,
++};
++
+ struct rt9471_chip {
+ 	struct device *dev;
+ 	struct regmap *regmap;
+@@ -153,8 +166,8 @@ struct rt9471_chip {
+ };
+ 
+ static const struct reg_field rt9471_reg_fields[F_MAX_FIELDS] = {
+-	[F_WDT]		= REG_FIELD(RT9471_REG_TOP, 0, 0),
+-	[F_WDT_RST]	= REG_FIELD(RT9471_REG_TOP, 1, 1),
++	[F_WDT]		= REG_FIELD(RT9471_REG_TOP, 0, 1),
++	[F_WDT_RST]	= REG_FIELD(RT9471_REG_TOP, 2, 2),
+ 	[F_CHG_EN]	= REG_FIELD(RT9471_REG_FUNC, 0, 0),
+ 	[F_HZ]		= REG_FIELD(RT9471_REG_FUNC, 5, 5),
+ 	[F_BATFET_DIS]	= REG_FIELD(RT9471_REG_FUNC, 7, 7),
+@@ -255,31 +268,32 @@ static int rt9471_get_ieoc(struct rt9471_chip *chip, int *microamp)
+ 
+ static int rt9471_get_status(struct rt9471_chip *chip, int *status)
+ {
+-	unsigned int chg_ready, chg_done, fault_stat;
++	unsigned int ic_stat;
+ 	int ret;
+ 
+-	ret = regmap_field_read(chip->rm_fields[F_ST_CHG_RDY], &chg_ready);
+-	if (ret)
+-		return ret;
+-
+-	ret = regmap_field_read(chip->rm_fields[F_ST_CHG_DONE], &chg_done);
++	ret = regmap_field_read(chip->rm_fields[F_IC_STAT], &ic_stat);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = regmap_read(chip->regmap, RT9471_REG_STAT1, &fault_stat);
+-	if (ret)
+-		return ret;
+-
+-	fault_stat &= RT9471_CHGFAULT_MASK;
+-
+-	if (chg_ready && chg_done)
+-		*status = POWER_SUPPLY_STATUS_FULL;
+-	else if (chg_ready && fault_stat)
++	switch (ic_stat) {
++	case RT9471_ICSTAT_VBUSRDY:
++	case RT9471_ICSTAT_CHGFAULT:
+ 		*status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+-	else if (chg_ready && !fault_stat)
++		break;
++	case RT9471_ICSTAT_TRICKLECHG ... RT9471_ICSTAT_BGCHG:
+ 		*status = POWER_SUPPLY_STATUS_CHARGING;
+-	else
++		break;
++	case RT9471_ICSTAT_CHGDONE:
++		*status = POWER_SUPPLY_STATUS_FULL;
++		break;
++	case RT9471_ICSTAT_SLEEP:
++	case RT9471_ICSTAT_OTG:
+ 		*status = POWER_SUPPLY_STATUS_DISCHARGING;
++		break;
++	default:
++		*status = POWER_SUPPLY_STATUS_UNKNOWN;
++		break;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index 6e752e148b98cc..210368099a0642 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -75,7 +75,7 @@ static void pwm_apply_debug(struct pwm_device *pwm,
+ 	    state->duty_cycle < state->period)
+ 		dev_warn(pwmchip_parent(chip), ".apply ignored .polarity\n");
+ 
+-	if (state->enabled &&
++	if (state->enabled && s2.enabled &&
+ 	    last->polarity == state->polarity &&
+ 	    last->period > s2.period &&
+ 	    last->period <= state->period)
+@@ -83,7 +83,11 @@ static void pwm_apply_debug(struct pwm_device *pwm,
+ 			 ".apply didn't pick the best available period (requested: %llu, applied: %llu, possible: %llu)\n",
+ 			 state->period, s2.period, last->period);
+ 
+-	if (state->enabled && state->period < s2.period)
++	/*
++	 * Rounding period up is fine only if duty_cycle is 0 then, because a
++	 * flat line doesn't have a characteristic period.
++	 */
++	if (state->enabled && s2.enabled && state->period < s2.period && s2.duty_cycle)
+ 		dev_warn(pwmchip_parent(chip),
+ 			 ".apply is supposed to round down period (requested: %llu, applied: %llu)\n",
+ 			 state->period, s2.period);
+@@ -99,7 +103,7 @@ static void pwm_apply_debug(struct pwm_device *pwm,
+ 			 s2.duty_cycle, s2.period,
+ 			 last->duty_cycle, last->period);
+ 
+-	if (state->enabled && state->duty_cycle < s2.duty_cycle)
++	if (state->enabled && s2.enabled && state->duty_cycle < s2.duty_cycle)
+ 		dev_warn(pwmchip_parent(chip),
+ 			 ".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n",
+ 			 state->duty_cycle, state->period,
+diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
+index 9e2bbf5b4a8ce7..0375987194318f 100644
+--- a/drivers/pwm/pwm-imx27.c
++++ b/drivers/pwm/pwm-imx27.c
+@@ -26,6 +26,7 @@
+ #define MX3_PWMSR			0x04    /* PWM Status Register */
+ #define MX3_PWMSAR			0x0C    /* PWM Sample Register */
+ #define MX3_PWMPR			0x10    /* PWM Period Register */
++#define MX3_PWMCNR			0x14    /* PWM Counter Register */
+ 
+ #define MX3_PWMCR_FWM			GENMASK(27, 26)
+ #define MX3_PWMCR_STOPEN		BIT(25)
+@@ -219,10 +220,12 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
+ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 			   const struct pwm_state *state)
+ {
+-	unsigned long period_cycles, duty_cycles, prescale;
++	unsigned long period_cycles, duty_cycles, prescale, period_us, tmp;
+ 	struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
+ 	unsigned long long c;
+ 	unsigned long long clkrate;
++	unsigned long flags;
++	int val;
+ 	int ret;
+ 	u32 cr;
+ 
+@@ -263,7 +266,98 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		pwm_imx27_sw_reset(chip);
+ 	}
+ 
+-	writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
++	val = readl(imx->mmio_base + MX3_PWMPR);
++	val = val >= MX3_PWMPR_MAX ? MX3_PWMPR_MAX : val;
++	cr = readl(imx->mmio_base + MX3_PWMCR);
++	tmp = NSEC_PER_SEC * (u64)(val + 2) * MX3_PWMCR_PRESCALER_GET(cr);
++	tmp = DIV_ROUND_UP_ULL(tmp, clkrate);
++	period_us = DIV_ROUND_UP_ULL(tmp, 1000);
++
++	/*
++	 * ERR051198:
++	 * PWM: PWM output may not function correctly if the FIFO is empty when
++	 * a new SAR value is programmed
++	 *
++	 * Description:
++	 * When the PWM FIFO is empty, a new value programmed to the PWM Sample
++	 * register (PWM_PWMSAR) will be directly applied even if the current
++	 * timer period has not expired.
++	 *
++	 * If the new SAMPLE value programmed in the PWM_PWMSAR register is
++	 * less than the previous value, and the PWM counter register
++	 * (PWM_PWMCNR) that contains the current COUNT value is greater than
++	 * the new programmed SAMPLE value, the current period will not flip
++	 * the level. This may result in an output pulse with a duty cycle of
++	 * 100%.
++	 *
++	 * Consider a change from
++	 *     ________
++	 *    /        \______/
++	 *    ^      *        ^
++	 * to
++	 *     ____
++	 *    /    \__________/
++	 *    ^               ^
++	 * At the time marked by *, the new write value will be directly applied
++	 * to SAR even the current period is not over if FIFO is empty.
++	 *
++	 *     ________        ____________________
++	 *    /        \______/                    \__________/
++	 *    ^               ^      *        ^               ^
++	 *    |<-- old SAR -->|               |<-- new SAR -->|
++	 *
++	 * That is the output is active for a whole period.
++	 *
++	 * Workaround:
++	 * Check new SAR less than old SAR and current counter is in errata
++	 * windows, write extra old SAR into FIFO and new SAR will effect at
++	 * next period.
++	 *
++	 * Sometime period is quite long, such as over 1 second. If add old SAR
++	 * into FIFO unconditional, new SAR have to wait for next period. It
++	 * may be too long.
++	 *
++	 * Turn off the interrupt to ensure that not IRQ and schedule happen
++	 * during above operations. If any irq and schedule happen, counter
++	 * in PWM will be out of data and take wrong action.
++	 *
++	 * Add a safety margin 1.5us because it needs some time to complete
++	 * IO write.
++	 *
++	 * Use writel_relaxed() to minimize the interval between two writes to
++	 * the SAR register to increase the fastest PWM frequency supported.
++	 *
++	 * When the PWM period is longer than 2us(or <500kHz), this workaround
++	 * can solve this problem. No software workaround is available if PWM
++	 * period is shorter than IO write. Just try best to fill old data
++	 * into FIFO.
++	 */
++	c = clkrate * 1500;
++	do_div(c, NSEC_PER_SEC);
++
++	local_irq_save(flags);
++	val = FIELD_GET(MX3_PWMSR_FIFOAV, readl_relaxed(imx->mmio_base + MX3_PWMSR));
++
++	if (duty_cycles < imx->duty_cycle && (cr & MX3_PWMCR_EN)) {
++		if (period_us < 2) { /* 2us = 500 kHz */
++			/* Best effort attempt to fix up >500 kHz case */
++			udelay(3 * period_us);
++			writel_relaxed(imx->duty_cycle, imx->mmio_base + MX3_PWMSAR);
++			writel_relaxed(imx->duty_cycle, imx->mmio_base + MX3_PWMSAR);
++		} else if (val < MX3_PWMSR_FIFOAV_2WORDS) {
++			val = readl_relaxed(imx->mmio_base + MX3_PWMCNR);
++			/*
++			 * If counter is close to period, controller may roll over when
++			 * next IO write.
++			 */
++			if ((val + c >= duty_cycles && val < imx->duty_cycle) ||
++			    val + c >= period_cycles)
++				writel_relaxed(imx->duty_cycle, imx->mmio_base + MX3_PWMSAR);
++		}
++	}
++	writel_relaxed(duty_cycles, imx->mmio_base + MX3_PWMSAR);
++	local_irq_restore(flags);
++
+ 	writel(period_cycles, imx->mmio_base + MX3_PWMPR);
+ 
+ 	/*
+diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
+index 28e7ce60cb617c..25ed9f713974ba 100644
+--- a/drivers/regulator/qcom_smd-regulator.c
++++ b/drivers/regulator/qcom_smd-regulator.c
+@@ -11,7 +11,7 @@
+ #include <linux/regulator/of_regulator.h>
+ #include <linux/soc/qcom/smd-rpm.h>
+ 
+-struct qcom_smd_rpm *smd_vreg_rpm;
++static struct qcom_smd_rpm *smd_vreg_rpm;
+ 
+ struct qcom_rpm_reg {
+ 	struct device *dev;
+diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
+index 01a8d04879184c..37476d2558fda7 100644
+--- a/drivers/regulator/rk808-regulator.c
++++ b/drivers/regulator/rk808-regulator.c
+@@ -1853,7 +1853,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
+ 		}
+ 
+ 		if (!pdata->dvs_gpio[i]) {
+-			dev_info(dev, "there is no dvs%d gpio\n", i);
++			dev_dbg(dev, "there is no dvs%d gpio\n", i);
+ 			continue;
+ 		}
+ 
+@@ -1889,12 +1889,6 @@ static int rk808_regulator_probe(struct platform_device *pdev)
+ 	if (!pdata)
+ 		return -ENOMEM;
+ 
+-	ret = rk808_regulator_dt_parse_pdata(&pdev->dev, regmap, pdata);
+-	if (ret < 0)
+-		return ret;
+-
+-	platform_set_drvdata(pdev, pdata);
+-
+ 	switch (rk808->variant) {
+ 	case RK805_ID:
+ 		regulators = rk805_reg;
+@@ -1905,6 +1899,11 @@ static int rk808_regulator_probe(struct platform_device *pdev)
+ 		nregulators = ARRAY_SIZE(rk806_reg);
+ 		break;
+ 	case RK808_ID:
++		/* DVS0/1 GPIOs are supported on the RK808 only */
++		ret = rk808_regulator_dt_parse_pdata(&pdev->dev, regmap, pdata);
++		if (ret < 0)
++			return ret;
++
+ 		regulators = rk808_reg;
+ 		nregulators = RK808_NUM_REGULATORS;
+ 		break;
+@@ -1930,6 +1929,8 @@ static int rk808_regulator_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
++	platform_set_drvdata(pdev, pdata);
++
+ 	config.dev = &pdev->dev;
+ 	config.driver_data = pdata;
+ 	config.regmap = regmap;
+diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
+index 955e4e38477e6f..62f8548fb46a5d 100644
+--- a/drivers/remoteproc/Kconfig
++++ b/drivers/remoteproc/Kconfig
+@@ -341,9 +341,9 @@ config TI_K3_DSP_REMOTEPROC
+ 
+ config TI_K3_M4_REMOTEPROC
+ 	tristate "TI K3 M4 remoteproc support"
+-	depends on ARCH_OMAP2PLUS || ARCH_K3
+-	select MAILBOX
+-	select OMAP2PLUS_MBOX
++	depends on ARCH_K3 || COMPILE_TEST
++	depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
++	depends on OMAP2PLUS_MBOX
+ 	help
+ 	  Say m here to support TI's M4 remote processor subsystems
+ 	  on various TI K3 family of SoCs through the remote processor
+diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
+index 572dcb0f055b76..223f6ca0745d3d 100644
+--- a/drivers/remoteproc/qcom_q6v5_adsp.c
++++ b/drivers/remoteproc/qcom_q6v5_adsp.c
+@@ -734,15 +734,22 @@ static int adsp_probe(struct platform_device *pdev)
+ 					      desc->ssctl_id);
+ 	if (IS_ERR(adsp->sysmon)) {
+ 		ret = PTR_ERR(adsp->sysmon);
+-		goto disable_pm;
++		goto deinit_remove_glink_pdm_ssr;
+ 	}
+ 
+ 	ret = rproc_add(rproc);
+ 	if (ret)
+-		goto disable_pm;
++		goto remove_sysmon;
+ 
+ 	return 0;
+ 
++remove_sysmon:
++	qcom_remove_sysmon_subdev(adsp->sysmon);
++deinit_remove_glink_pdm_ssr:
++	qcom_q6v5_deinit(&adsp->q6v5);
++	qcom_remove_glink_subdev(rproc, &adsp->glink_subdev);
++	qcom_remove_pdm_subdev(rproc, &adsp->pdm_subdev);
++	qcom_remove_ssr_subdev(rproc, &adsp->ssr_subdev);
+ disable_pm:
+ 	qcom_rproc_pds_detach(adsp);
+ 
+diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
+index 2a42215ce8e07b..32c3531b20c70a 100644
+--- a/drivers/remoteproc/qcom_q6v5_mss.c
++++ b/drivers/remoteproc/qcom_q6v5_mss.c
+@@ -1162,6 +1162,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
+ 		goto disable_active_clks;
+ 	}
+ 
++	if (qproc->has_mba_logs)
++		qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
++
+ 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+ 	if (qproc->dp_size) {
+ 		writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+@@ -1172,9 +1175,6 @@ static int q6v5_mba_load(struct q6v5 *qproc)
+ 	if (ret)
+ 		goto reclaim_mba;
+ 
+-	if (qproc->has_mba_logs)
+-		qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
+-
+ 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
+ 	if (ret == -ETIMEDOUT) {
+ 		dev_err(qproc->dev, "MBA boot timed out\n");
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
+index ef82835e98a4ef..f4f4b3df3884ef 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -759,16 +759,16 @@ static int adsp_probe(struct platform_device *pdev)
+ 
+ 	ret = adsp_init_clock(adsp);
+ 	if (ret)
+-		goto free_rproc;
++		goto unassign_mem;
+ 
+ 	ret = adsp_init_regulator(adsp);
+ 	if (ret)
+-		goto free_rproc;
++		goto unassign_mem;
+ 
+ 	ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
+ 			      desc->proxy_pd_names);
+ 	if (ret < 0)
+-		goto free_rproc;
++		goto unassign_mem;
+ 	adsp->proxy_pd_count = ret;
+ 
+ 	ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, desc->load_state,
+@@ -784,18 +784,28 @@ static int adsp_probe(struct platform_device *pdev)
+ 					      desc->ssctl_id);
+ 	if (IS_ERR(adsp->sysmon)) {
+ 		ret = PTR_ERR(adsp->sysmon);
+-		goto detach_proxy_pds;
++		goto deinit_remove_pdm_smd_glink;
+ 	}
+ 
+ 	qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
+ 	ret = rproc_add(rproc);
+ 	if (ret)
+-		goto detach_proxy_pds;
++		goto remove_ssr_sysmon;
+ 
+ 	return 0;
+ 
++remove_ssr_sysmon:
++	qcom_remove_ssr_subdev(rproc, &adsp->ssr_subdev);
++	qcom_remove_sysmon_subdev(adsp->sysmon);
++deinit_remove_pdm_smd_glink:
++	qcom_remove_pdm_subdev(rproc, &adsp->pdm_subdev);
++	qcom_remove_smd_subdev(rproc, &adsp->smd_subdev);
++	qcom_remove_glink_subdev(rproc, &adsp->glink_subdev);
++	qcom_q6v5_deinit(&adsp->q6v5);
+ detach_proxy_pds:
+ 	adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
++unassign_mem:
++	adsp_unassign_memory_region(adsp);
+ free_rproc:
+ 	device_init_wakeup(adsp->dev, false);
+ 
+@@ -907,6 +917,7 @@ static const struct adsp_data sm8250_adsp_resource = {
+ 	.crash_reason_smem = 423,
+ 	.firmware_name = "adsp.mdt",
+ 	.pas_id = 1,
++	.minidump_id = 5,
+ 	.auto_boot = true,
+ 	.proxy_pd_names = (char*[]){
+ 		"lcx",
+@@ -1124,6 +1135,7 @@ static const struct adsp_data sm8350_cdsp_resource = {
+ 	.crash_reason_smem = 601,
+ 	.firmware_name = "cdsp.mdt",
+ 	.pas_id = 18,
++	.minidump_id = 7,
+ 	.auto_boot = true,
+ 	.proxy_pd_names = (char*[]){
+ 		"cx",
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index d3af1dfa3c7d71..a2f9d85c7156dc 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1204,7 +1204,8 @@ void qcom_glink_native_rx(struct qcom_glink *glink)
+ 			ret = qcom_glink_rx_open_ack(glink, param1);
+ 			break;
+ 		case GLINK_CMD_OPEN:
+-			ret = qcom_glink_rx_defer(glink, param2);
++			/* upper 16 bits of param2 are the "prio" field */
++			ret = qcom_glink_rx_defer(glink, param2 & 0xffff);
+ 			break;
+ 		case GLINK_CMD_TX_DATA:
+ 		case GLINK_CMD_TX_DATA_CONT:
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index cca650b2e0b94d..aaf76406cd7d7d 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -904,13 +904,18 @@ void rtc_timer_do_work(struct work_struct *work)
+ 	struct timerqueue_node *next;
+ 	ktime_t now;
+ 	struct rtc_time tm;
++	int err;
+ 
+ 	struct rtc_device *rtc =
+ 		container_of(work, struct rtc_device, irqwork);
+ 
+ 	mutex_lock(&rtc->ops_lock);
+ again:
+-	__rtc_read_time(rtc, &tm);
++	err = __rtc_read_time(rtc, &tm);
++	if (err) {
++		mutex_unlock(&rtc->ops_lock);
++		return;
++	}
+ 	now = rtc_tm_to_ktime(tm);
+ 	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
+ 		if (next->expires > now)
+diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
+index 02f7d071128772..e17bce9a27468b 100644
+--- a/drivers/rtc/rtc-ab-eoz9.c
++++ b/drivers/rtc/rtc-ab-eoz9.c
+@@ -396,13 +396,6 @@ static int abeoz9z3_temp_read(struct device *dev,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if ((val & ABEOZ9_REG_CTRL_STATUS_V1F) ||
+-	    (val & ABEOZ9_REG_CTRL_STATUS_V2F)) {
+-		dev_err(dev,
+-			"thermometer might be disabled due to low voltage\n");
+-		return -EINVAL;
+-	}
+-
+ 	switch (attr) {
+ 	case hwmon_temp_input:
+ 		ret = regmap_read(regmap, ABEOZ9_REG_REG_TEMP, &val);
+diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
+index 1298962402ff47..3fee27914ba805 100644
+--- a/drivers/rtc/rtc-abx80x.c
++++ b/drivers/rtc/rtc-abx80x.c
+@@ -39,7 +39,7 @@
+ #define ABX8XX_REG_STATUS	0x0f
+ #define ABX8XX_STATUS_AF	BIT(2)
+ #define ABX8XX_STATUS_BLF	BIT(4)
+-#define ABX8XX_STATUS_WDT	BIT(6)
++#define ABX8XX_STATUS_WDT	BIT(5)
+ 
+ #define ABX8XX_REG_CTRL1	0x10
+ #define ABX8XX_CTRL_WRITE	BIT(0)
+diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
+index 56ebbd4d048147..8570c8e63d70c3 100644
+--- a/drivers/rtc/rtc-rzn1.c
++++ b/drivers/rtc/rtc-rzn1.c
+@@ -111,8 +111,8 @@ static int rzn1_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ 	tm->tm_hour = bcd2bin(tm->tm_hour);
+ 	tm->tm_wday = bcd2bin(tm->tm_wday);
+ 	tm->tm_mday = bcd2bin(tm->tm_mday);
+-	tm->tm_mon = bcd2bin(tm->tm_mon);
+-	tm->tm_year = bcd2bin(tm->tm_year);
++	tm->tm_mon = bcd2bin(tm->tm_mon) - 1;
++	tm->tm_year = bcd2bin(tm->tm_year) + 100;
+ 
+ 	return 0;
+ }
+@@ -128,8 +128,8 @@ static int rzn1_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ 	tm->tm_hour = bin2bcd(tm->tm_hour);
+ 	tm->tm_wday = bin2bcd(rzn1_rtc_tm_to_wday(tm));
+ 	tm->tm_mday = bin2bcd(tm->tm_mday);
+-	tm->tm_mon = bin2bcd(tm->tm_mon);
+-	tm->tm_year = bin2bcd(tm->tm_year);
++	tm->tm_mon = bin2bcd(tm->tm_mon + 1);
++	tm->tm_year = bin2bcd(tm->tm_year - 100);
+ 
+ 	val = readl(rtc->base + RZN1_RTC_CTL2);
+ 	if (!(val & RZN1_RTC_CTL2_STOPPED)) {
+diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
+index d492a2d26600c1..c6d4522411b312 100644
+--- a/drivers/rtc/rtc-st-lpc.c
++++ b/drivers/rtc/rtc-st-lpc.c
+@@ -218,15 +218,14 @@ static int st_rtc_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	ret = devm_request_irq(&pdev->dev, rtc->irq, st_rtc_handler, 0,
+-			       pdev->name, rtc);
++	ret = devm_request_irq(&pdev->dev, rtc->irq, st_rtc_handler,
++			       IRQF_NO_AUTOEN, pdev->name, rtc);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to request irq %i\n", rtc->irq);
+ 		return ret;
+ 	}
+ 
+ 	enable_irq_wake(rtc->irq);
+-	disable_irq(rtc->irq);
+ 
+ 	rtc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ 	if (IS_ERR(rtc->clk))
+diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
+index c32e818f06dbad..ad17ab0a931494 100644
+--- a/drivers/s390/cio/cio.c
++++ b/drivers/s390/cio/cio.c
+@@ -459,10 +459,14 @@ int cio_update_schib(struct subchannel *sch)
+ {
+ 	struct schib schib;
+ 
+-	if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
++	if (stsch(sch->schid, &schib))
+ 		return -ENODEV;
+ 
+ 	memcpy(&sch->schib, &schib, sizeof(schib));
++
++	if (!css_sch_is_valid(&schib))
++		return -EACCES;
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(cio_update_schib);
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index b0f23242e17145..9498825d9c7a5c 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -1387,14 +1387,18 @@ enum io_sch_action {
+ 	IO_SCH_VERIFY,
+ 	IO_SCH_DISC,
+ 	IO_SCH_NOP,
++	IO_SCH_ORPH_CDEV,
+ };
+ 
+ static enum io_sch_action sch_get_action(struct subchannel *sch)
+ {
+ 	struct ccw_device *cdev;
++	int rc;
+ 
+ 	cdev = sch_get_cdev(sch);
+-	if (cio_update_schib(sch)) {
++	rc = cio_update_schib(sch);
++
++	if (rc == -ENODEV) {
+ 		/* Not operational. */
+ 		if (!cdev)
+ 			return IO_SCH_UNREG;
+@@ -1402,6 +1406,16 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
+ 			return IO_SCH_UNREG;
+ 		return IO_SCH_ORPH_UNREG;
+ 	}
++
++	/* Avoid unregistering subchannels without working device. */
++	if (rc == -EACCES) {
++		if (!cdev)
++			return IO_SCH_NOP;
++		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
++			return IO_SCH_UNREG_CDEV;
++		return IO_SCH_ORPH_CDEV;
++	}
++
+ 	/* Operational. */
+ 	if (!cdev)
+ 		return IO_SCH_ATTACH;
+@@ -1471,6 +1485,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
+ 		rc = 0;
+ 		goto out_unlock;
+ 	case IO_SCH_ORPH_UNREG:
++	case IO_SCH_ORPH_CDEV:
+ 	case IO_SCH_ORPH_ATTACH:
+ 		ccw_device_set_disconnected(cdev);
+ 		break;
+@@ -1502,6 +1517,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
+ 	/* Handle attached ccw device. */
+ 	switch (action) {
+ 	case IO_SCH_ORPH_UNREG:
++	case IO_SCH_ORPH_CDEV:
+ 	case IO_SCH_ORPH_ATTACH:
+ 		/* Move ccw device to orphanage. */
+ 		rc = ccw_device_move_to_orph(cdev);
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index 62eca9419ad76e..21fa7ac849e5c3 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -58,6 +58,8 @@ struct virtio_ccw_device {
+ 	struct virtio_device vdev;
+ 	__u8 config[VIRTIO_CCW_CONFIG_SIZE];
+ 	struct ccw_device *cdev;
++	/* we make cdev->dev.dma_parms point to this */
++	struct device_dma_parameters dma_parms;
+ 	__u32 curr_io;
+ 	int err;
+ 	unsigned int revision; /* Transport revision */
+@@ -1303,6 +1305,7 @@ static int virtio_ccw_offline(struct ccw_device *cdev)
+ 	unregister_virtio_device(&vcdev->vdev);
+ 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ 	dev_set_drvdata(&cdev->dev, NULL);
++	cdev->dev.dma_parms = NULL;
+ 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ 	return 0;
+ }
+@@ -1366,6 +1369,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
+ 	}
+ 	vcdev->vdev.dev.parent = &cdev->dev;
+ 	vcdev->cdev = cdev;
++	cdev->dev.dma_parms = &vcdev->dma_parms;
+ 	vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
+ 						sizeof(*vcdev->dma_area),
+ 						&vcdev->dma_area_addr);
+diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
+index 62cb7a864fd53d..70c7515a822f52 100644
+--- a/drivers/scsi/bfa/bfad.c
++++ b/drivers/scsi/bfa/bfad.c
+@@ -1693,9 +1693,8 @@ bfad_init(void)
+ 
+ 	error = bfad_im_module_init();
+ 	if (error) {
+-		error = -ENOMEM;
+ 		printk(KERN_WARNING "bfad_im_module_init failure\n");
+-		goto ext;
++		return -ENOMEM;
+ 	}
+ 
+ 	if (strcmp(FCPI_NAME, " fcpim") == 0)
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 6219807ce3b9e1..ffd15fa4f9e596 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1545,10 +1545,16 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
+ 	/* Init and wait for PHYs to come up and all libsas event finished. */
+ 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ 		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
++		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ 
+-		if (!(hisi_hba->phy_state & BIT(phy_no)))
++		if (!sas_phy->phy->enabled)
+ 			continue;
+ 
++		if (!(hisi_hba->phy_state & BIT(phy_no))) {
++			hisi_sas_phy_enable(hisi_hba, phy_no, 1);
++			continue;
++		}
++
+ 		async_schedule_domain(hisi_sas_async_init_wait_phyup,
+ 				      phy, &async);
+ 	}
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index cf13148ba281c1..e979ec1478c184 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -2738,6 +2738,7 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
+ 	    sb_id, QED_SB_TYPE_STORAGE);
+ 
+ 	if (ret) {
++		dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys);
+ 		QEDF_ERR(&qedf->dbg_ctx,
+ 			 "Status block initialization failed (0x%x) for id = %d.\n",
+ 			 ret, sb_id);
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index c5aec26019d6ab..628d59dda20cc4 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -369,6 +369,7 @@ static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
+ 	ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
+ 				       sb_id, QED_SB_TYPE_STORAGE);
+ 	if (ret) {
++		dma_free_coherent(&qedi->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys);
+ 		QEDI_ERR(&qedi->dbg_ctx,
+ 			 "Status block initialization failed for id = %d.\n",
+ 			  sb_id);
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index f86be197fedd04..84334ab39c8107 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -307,10 +307,6 @@ sg_open(struct inode *inode, struct file *filp)
+ 	if (retval)
+ 		goto sg_put;
+ 
+-	retval = scsi_autopm_get_device(device);
+-	if (retval)
+-		goto sdp_put;
+-
+ 	/* scsi_block_when_processing_errors() may block so bypass
+ 	 * check if O_NONBLOCK. Permits SCSI commands to be issued
+ 	 * during error recovery. Tread carefully. */
+@@ -318,7 +314,7 @@ sg_open(struct inode *inode, struct file *filp)
+ 	      scsi_block_when_processing_errors(device))) {
+ 		retval = -ENXIO;
+ 		/* we are in error recovery for this device */
+-		goto error_out;
++		goto sdp_put;
+ 	}
+ 
+ 	mutex_lock(&sdp->open_rel_lock);
+@@ -371,8 +367,6 @@ sg_open(struct inode *inode, struct file *filp)
+ 	}
+ error_mutex_locked:
+ 	mutex_unlock(&sdp->open_rel_lock);
+-error_out:
+-	scsi_autopm_put_device(device);
+ sdp_put:
+ 	kref_put(&sdp->d_ref, sg_device_destroy);
+ 	scsi_device_put(device);
+@@ -392,7 +386,6 @@ sg_release(struct inode *inode, struct file *filp)
+ 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
+ 
+ 	mutex_lock(&sdp->open_rel_lock);
+-	scsi_autopm_put_device(sdp->device);
+ 	kref_put(&sfp->f_ref, sg_remove_sfp);
+ 	sdp->open_cnt--;
+ 
+diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
+index 74350b5871dc8e..ea571eeb307878 100644
+--- a/drivers/sh/intc/core.c
++++ b/drivers/sh/intc/core.c
+@@ -209,7 +209,6 @@ int __init register_intc_controller(struct intc_desc *desc)
+ 		goto err0;
+ 
+ 	INIT_LIST_HEAD(&d->list);
+-	list_add_tail(&d->list, &intc_list);
+ 
+ 	raw_spin_lock_init(&d->lock);
+ 	INIT_RADIX_TREE(&d->tree, GFP_ATOMIC);
+@@ -369,6 +368,7 @@ int __init register_intc_controller(struct intc_desc *desc)
+ 
+ 	d->skip_suspend = desc->skip_syscore_suspend;
+ 
++	list_add_tail(&d->list, &intc_list);
+ 	nr_intc_controllers++;
+ 
+ 	return 0;
+diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
+index 19cc581b06d0c8..b3f773e135fd49 100644
+--- a/drivers/soc/fsl/qe/qmc.c
++++ b/drivers/soc/fsl/qe/qmc.c
+@@ -2004,8 +2004,10 @@ static int qmc_probe(struct platform_device *pdev)
+ 
+ 	/* Set the irq handler */
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0)
++	if (irq < 0) {
++		ret = irq;
+ 		goto err_exit_xcc;
++	}
+ 	ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
+ 	if (ret < 0)
+ 		goto err_exit_xcc;
+diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
+index 3d0cae30c769ea..06bd94b29fb321 100644
+--- a/drivers/soc/fsl/rcpm.c
++++ b/drivers/soc/fsl/rcpm.c
+@@ -36,6 +36,7 @@ static void copy_ippdexpcr1_setting(u32 val)
+ 		return;
+ 
+ 	regs = of_iomap(np, 0);
++	of_node_put(np);
+ 	if (!regs)
+ 		return;
+ 
+diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
+index 2e8f24d5da80b6..4cb959106efa9e 100644
+--- a/drivers/soc/qcom/qcom-geni-se.c
++++ b/drivers/soc/qcom/qcom-geni-se.c
+@@ -585,7 +585,8 @@ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
+ 
+ 	for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
+ 		freq = clk_round_rate(se->clk, freq + 1);
+-		if (freq <= 0 || freq == se->clk_perf_tbl[i - 1])
++		if (freq <= 0 ||
++		    (i > 0 && freq == se->clk_perf_tbl[i - 1]))
+ 			break;
+ 		se->clk_perf_tbl[i] = freq;
+ 	}
+diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
+index d6219060b616d6..38add2ab561372 100644
+--- a/drivers/soc/ti/smartreflex.c
++++ b/drivers/soc/ti/smartreflex.c
+@@ -202,10 +202,10 @@ static int sr_late_init(struct omap_sr *sr_info)
+ 
+ 	if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
+ 		ret = devm_request_irq(&sr_info->pdev->dev, sr_info->irq,
+-				       sr_interrupt, 0, sr_info->name, sr_info);
++				       sr_interrupt, IRQF_NO_AUTOEN,
++				       sr_info->name, sr_info);
+ 		if (ret)
+ 			goto error;
+-		disable_irq(sr_info->irq);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
+index f529e1346247cc..85df6b9c04ee69 100644
+--- a/drivers/soc/xilinx/xlnx_event_manager.c
++++ b/drivers/soc/xilinx/xlnx_event_manager.c
+@@ -188,8 +188,10 @@ static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
+ 	INIT_LIST_HEAD(&eve_data->cb_list_head);
+ 
+ 	cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+-	if (!cb_data)
++	if (!cb_data) {
++		kfree(eve_data);
+ 		return -ENOMEM;
++	}
+ 	cb_data->eve_cb = cb_fun;
+ 	cb_data->agent_data = data;
+ 
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index 95cdfc28361ef7..caecb2ad2a150d 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -183,7 +183,7 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
+ 	case QSPI_MR:
+ 		return "MR";
+ 	case QSPI_RD:
+-		return "MR";
++		return "RD";
+ 	case QSPI_TD:
+ 		return "TD";
+ 	case QSPI_SR:
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 977e8b55c82b7d..9573b8fa4fbfc6 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -891,7 +891,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
++	ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, IRQF_NO_AUTOEN,
+ 			       dev_name(&pdev->dev), fsl_lpspi);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
+@@ -948,14 +948,10 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ 	ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
+ 	if (ret == -EPROBE_DEFER)
+ 		goto out_pm_get;
+-	if (ret < 0)
++	if (ret < 0) {
+ 		dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
+-	else
+-		/*
+-		 * disable LPSPI module IRQ when enable DMA mode successfully,
+-		 * to prevent the unexpected LPSPI module IRQ events.
+-		 */
+-		disable_irq(irq);
++		enable_irq(irq);
++	}
+ 
+ 	ret = devm_spi_register_controller(&pdev->dev, controller);
+ 	if (ret < 0) {
+diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
+index afbd64a217eb06..43f11b0e9e765c 100644
+--- a/drivers/spi/spi-tegra210-quad.c
++++ b/drivers/spi/spi-tegra210-quad.c
+@@ -341,7 +341,7 @@ tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_t
+ 		for (count = 0; count < max_n_32bit; count++) {
+ 			u32 x = 0;
+ 
+-			for (i = 0; len && (i < bytes_per_word); i++, len--)
++			for (i = 0; len && (i < min(4, bytes_per_word)); i++, len--)
+ 				x |= (u32)(*tx_buf++) << (i * 8);
+ 			tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
+ 		}
+diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
+index fcd0ca99668419..b9df39e06e7cd4 100644
+--- a/drivers/spi/spi-zynqmp-gqspi.c
++++ b/drivers/spi/spi-zynqmp-gqspi.c
+@@ -1351,6 +1351,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 
+ clk_dis_all:
+ 	pm_runtime_disable(&pdev->dev);
++	pm_runtime_dont_use_autosuspend(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_set_suspended(&pdev->dev);
+ 	clk_disable_unprepare(xqspi->refclk);
+@@ -1379,6 +1380,7 @@ static void zynqmp_qspi_remove(struct platform_device *pdev)
+ 	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+ 
+ 	pm_runtime_disable(&pdev->dev);
++	pm_runtime_dont_use_autosuspend(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_set_suspended(&pdev->dev);
+ 	clk_disable_unprepare(xqspi->refclk);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index c1dad30a4528b7..0f3e6e2c24743c 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -424,6 +424,16 @@ static int spi_probe(struct device *dev)
+ 			spi->irq = 0;
+ 	}
+ 
++	if (has_acpi_companion(dev) && spi->irq < 0) {
++		struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
++
++		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
++		if (spi->irq == -EPROBE_DEFER)
++			return -EPROBE_DEFER;
++		if (spi->irq < 0)
++			spi->irq = 0;
++	}
++
+ 	ret = dev_pm_domain_attach(dev, true);
+ 	if (ret)
+ 		return ret;
+@@ -2869,9 +2879,6 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
+ 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
+ 			  sizeof(spi->modalias));
+ 
+-	if (spi->irq < 0)
+-		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
+-
+ 	acpi_device_set_enumerated(adev);
+ 
+ 	adev->power.flags.ignore_parent = true;
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
+index 232744973ab887..b1feb6f6ebe895 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_params.c
++++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
+@@ -4181,6 +4181,8 @@ ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
+ 		goto err;
+ 	/* No weighted histogram, no structure, treat the histogram data as a byte dump in a byte array */
+ 	me->rgby_data = kvmalloc(sizeof_hmem(HMEM0_ID), GFP_KERNEL);
++	if (!me->rgby_data)
++		goto err;
+ 
+ 	IA_CSS_LEAVE("return=%p", me);
+ 	return me;
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index 6c488b1e262485..5fab33adf58ed0 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -1715,7 +1715,6 @@ MODULE_DEVICE_TABLE(of, vchiq_of_match);
+ 
+ static int vchiq_probe(struct platform_device *pdev)
+ {
+-	struct device_node *fw_node;
+ 	const struct vchiq_platform_info *info;
+ 	struct vchiq_drv_mgmt *mgmt;
+ 	int ret;
+@@ -1724,8 +1723,8 @@ static int vchiq_probe(struct platform_device *pdev)
+ 	if (!info)
+ 		return -EINVAL;
+ 
+-	fw_node = of_find_compatible_node(NULL, NULL,
+-					  "raspberrypi,bcm2835-firmware");
++	struct device_node *fw_node __free(device_node) =
++		of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
+ 	if (!fw_node) {
+ 		dev_err(&pdev->dev, "Missing firmware node\n");
+ 		return -ENOENT;
+@@ -1736,7 +1735,6 @@ static int vchiq_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
+-	of_node_put(fw_node);
+ 	if (!mgmt->fw)
+ 		return -EPROBE_DEFER;
+ 
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 440e07b1d5cdb1..287ac5b0495f9a 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -369,7 +369,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
+ 	bdev_file = bdev_file_open_by_path(dev->udev_path,
+ 				BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL);
+ 	if (IS_ERR(bdev_file)) {
+-		pr_err("pSCSI: bdev_open_by_path() failed\n");
++		pr_err("pSCSI: bdev_file_open_by_path() failed\n");
+ 		scsi_device_put(sd);
+ 		return PTR_ERR(bdev_file);
+ 	}
+diff --git a/drivers/thermal/testing/zone.c b/drivers/thermal/testing/zone.c
+index c6d8c66f40f980..1f01f495270313 100644
+--- a/drivers/thermal/testing/zone.c
++++ b/drivers/thermal/testing/zone.c
+@@ -185,7 +185,7 @@ static void tt_add_tz_work_fn(struct work_struct *work)
+ int tt_add_tz(void)
+ {
+ 	struct tt_thermal_zone *tt_zone __free(kfree);
+-	struct tt_work *tt_work __free(kfree);
++	struct tt_work *tt_work __free(kfree) = NULL;
+ 	int ret;
+ 
+ 	tt_zone = kzalloc(sizeof(*tt_zone), GFP_KERNEL);
+@@ -237,7 +237,7 @@ static void tt_zone_unregister_tz(struct tt_thermal_zone *tt_zone)
+ 
+ int tt_del_tz(const char *arg)
+ {
+-	struct tt_work *tt_work __free(kfree);
++	struct tt_work *tt_work __free(kfree) = NULL;
+ 	struct tt_thermal_zone *tt_zone, *aux;
+ 	int ret;
+ 	int id;
+@@ -310,6 +310,9 @@ static void tt_put_tt_zone(struct tt_thermal_zone *tt_zone)
+ 	tt_zone->refcount--;
+ }
+ 
++DEFINE_FREE(put_tt_zone, struct tt_thermal_zone *,
++	    if (!IS_ERR_OR_NULL(_T)) tt_put_tt_zone(_T))
++
+ static void tt_zone_add_trip_work_fn(struct work_struct *work)
+ {
+ 	struct tt_work *tt_work = tt_work_of_work(work);
+@@ -332,9 +335,9 @@ static void tt_zone_add_trip_work_fn(struct work_struct *work)
+ 
+ int tt_zone_add_trip(const char *arg)
+ {
++	struct tt_thermal_zone *tt_zone __free(put_tt_zone) = NULL;
++	struct tt_trip *tt_trip __free(kfree) = NULL;
+ 	struct tt_work *tt_work __free(kfree);
+-	struct tt_trip *tt_trip __free(kfree);
+-	struct tt_thermal_zone *tt_zone;
+ 	int id;
+ 
+ 	tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
+@@ -350,10 +353,8 @@ int tt_zone_add_trip(const char *arg)
+ 		return PTR_ERR(tt_zone);
+ 
+ 	id = ida_alloc(&tt_zone->ida, GFP_KERNEL);
+-	if (id < 0) {
+-		tt_put_tt_zone(tt_zone);
++	if (id < 0)
+ 		return id;
+-	}
+ 
+ 	tt_trip->trip.type = THERMAL_TRIP_ACTIVE;
+ 	tt_trip->trip.temperature = THERMAL_TEMP_INVALID;
+@@ -366,7 +367,7 @@ int tt_zone_add_trip(const char *arg)
+ 	tt_zone->num_trips++;
+ 
+ 	INIT_WORK(&tt_work->work, tt_zone_add_trip_work_fn);
+-	tt_work->tt_zone = tt_zone;
++	tt_work->tt_zone = no_free_ptr(tt_zone);
+ 	tt_work->tt_trip = no_free_ptr(tt_trip);
+ 	schedule_work(&(no_free_ptr(tt_work)->work));
+ 
+@@ -391,7 +392,7 @@ static struct thermal_zone_device_ops tt_zone_ops = {
+ 
+ static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
+ {
+-	struct thermal_trip *trips __free(kfree);
++	struct thermal_trip *trips __free(kfree) = NULL;
+ 	struct thermal_zone_device *tz;
+ 	struct tt_trip *tt_trip;
+ 	int i;
+@@ -425,23 +426,18 @@ static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
+ 
+ int tt_zone_reg(const char *arg)
+ {
+-	struct tt_thermal_zone *tt_zone;
+-	int ret;
++	struct tt_thermal_zone *tt_zone __free(put_tt_zone);
+ 
+ 	tt_zone = tt_get_tt_zone(arg);
+ 	if (IS_ERR(tt_zone))
+ 		return PTR_ERR(tt_zone);
+ 
+-	ret = tt_zone_register_tz(tt_zone);
+-
+-	tt_put_tt_zone(tt_zone);
+-
+-	return ret;
++	return tt_zone_register_tz(tt_zone);
+ }
+ 
+ int tt_zone_unreg(const char *arg)
+ {
+-	struct tt_thermal_zone *tt_zone;
++	struct tt_thermal_zone *tt_zone __free(put_tt_zone);
+ 
+ 	tt_zone = tt_get_tt_zone(arg);
+ 	if (IS_ERR(tt_zone))
+@@ -449,8 +445,6 @@ int tt_zone_unreg(const char *arg)
+ 
+ 	tt_zone_unregister_tz(tt_zone);
+ 
+-	tt_put_tt_zone(tt_zone);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 8f03985f971c30..1d2f2b307bac50 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -40,6 +40,8 @@ static DEFINE_MUTEX(thermal_governor_lock);
+ 
+ static struct thermal_governor *def_governor;
+ 
++static bool thermal_pm_suspended;
++
+ /*
+  * Governor section: set of functions to handle thermal governors
+  *
+@@ -547,7 +549,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
+ 	int low = -INT_MAX, high = INT_MAX;
+ 	int temp, ret;
+ 
+-	if (tz->suspended || tz->mode != THERMAL_DEVICE_ENABLED)
++	if (tz->state != TZ_STATE_READY || tz->mode != THERMAL_DEVICE_ENABLED)
+ 		return;
+ 
+ 	ret = __thermal_zone_get_temp(tz, &temp);
+@@ -1332,6 +1334,24 @@ int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp)
+ }
+ EXPORT_SYMBOL_GPL(thermal_zone_get_crit_temp);
+ 
++static void thermal_zone_init_complete(struct thermal_zone_device *tz)
++{
++	mutex_lock(&tz->lock);
++
++	tz->state &= ~TZ_STATE_FLAG_INIT;
++	/*
++	 * If system suspend or resume is in progress at this point, the
++	 * new thermal zone needs to be marked as suspended because
++	 * thermal_pm_notify() has run already.
++	 */
++	if (thermal_pm_suspended)
++		tz->state |= TZ_STATE_FLAG_SUSPENDED;
++
++	__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
++
++	mutex_unlock(&tz->lock);
++}
++
+ /**
+  * thermal_zone_device_register_with_trips() - register a new thermal zone device
+  * @type:	the thermal zone device type
+@@ -1451,6 +1471,8 @@ thermal_zone_device_register_with_trips(const char *type,
+ 	tz->passive_delay_jiffies = msecs_to_jiffies(passive_delay);
+ 	tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY;
+ 
++	tz->state = TZ_STATE_FLAG_INIT;
++
+ 	/* sys I/F */
+ 	/* Add nodes that are always present via .groups */
+ 	result = thermal_zone_create_device_groups(tz);
+@@ -1465,6 +1487,7 @@ thermal_zone_device_register_with_trips(const char *type,
+ 		thermal_zone_destroy_device_groups(tz);
+ 		goto remove_id;
+ 	}
++	thermal_zone_device_init(tz);
+ 	result = device_register(&tz->device);
+ 	if (result)
+ 		goto release_device;
+@@ -1501,12 +1524,9 @@ thermal_zone_device_register_with_trips(const char *type,
+ 	list_for_each_entry(cdev, &thermal_cdev_list, node)
+ 		thermal_zone_cdev_bind(tz, cdev);
+ 
+-	mutex_unlock(&thermal_list_lock);
++	thermal_zone_init_complete(tz);
+ 
+-	thermal_zone_device_init(tz);
+-	/* Update the new thermal zone and mark it as already updated. */
+-	if (atomic_cmpxchg(&tz->need_update, 1, 0))
+-		thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
++	mutex_unlock(&thermal_list_lock);
+ 
+ 	thermal_notify_tz_create(tz);
+ 
+@@ -1662,7 +1682,7 @@ static void thermal_zone_device_resume(struct work_struct *work)
+ 
+ 	mutex_lock(&tz->lock);
+ 
+-	tz->suspended = false;
++	tz->state &= ~(TZ_STATE_FLAG_SUSPENDED | TZ_STATE_FLAG_RESUMING);
+ 
+ 	thermal_debug_tz_resume(tz);
+ 	thermal_zone_device_init(tz);
+@@ -1670,7 +1690,48 @@ static void thermal_zone_device_resume(struct work_struct *work)
+ 	__thermal_zone_device_update(tz, THERMAL_TZ_RESUME);
+ 
+ 	complete(&tz->resume);
+-	tz->resuming = false;
++
++	mutex_unlock(&tz->lock);
++}
++
++static void thermal_zone_pm_prepare(struct thermal_zone_device *tz)
++{
++	mutex_lock(&tz->lock);
++
++	if (tz->state & TZ_STATE_FLAG_RESUMING) {
++		/*
++		 * thermal_zone_device_resume() queued up for this zone has not
++		 * acquired the lock yet, so release it to let the function run
++		 * and wait util it has done the work.
++		 */
++		mutex_unlock(&tz->lock);
++
++		wait_for_completion(&tz->resume);
++
++		mutex_lock(&tz->lock);
++	}
++
++	tz->state |= TZ_STATE_FLAG_SUSPENDED;
++
++	mutex_unlock(&tz->lock);
++}
++
++static void thermal_zone_pm_complete(struct thermal_zone_device *tz)
++{
++	mutex_lock(&tz->lock);
++
++	cancel_delayed_work(&tz->poll_queue);
++
++	reinit_completion(&tz->resume);
++	tz->state |= TZ_STATE_FLAG_RESUMING;
++
++	/*
++	 * Replace the work function with the resume one, which will restore the
++	 * original work function and schedule the polling work if needed.
++	 */
++	INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_resume);
++	/* Queue up the work without a delay. */
++	mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, 0);
+ 
+ 	mutex_unlock(&tz->lock);
+ }
+@@ -1686,27 +1747,10 @@ static int thermal_pm_notify(struct notifier_block *nb,
+ 	case PM_SUSPEND_PREPARE:
+ 		mutex_lock(&thermal_list_lock);
+ 
+-		list_for_each_entry(tz, &thermal_tz_list, node) {
+-			mutex_lock(&tz->lock);
+-
+-			if (tz->resuming) {
+-				/*
+-				 * thermal_zone_device_resume() queued up for
+-				 * this zone has not acquired the lock yet, so
+-				 * release it to let the function run and wait
+-				 * util it has done the work.
+-				 */
+-				mutex_unlock(&tz->lock);
+-
+-				wait_for_completion(&tz->resume);
+-
+-				mutex_lock(&tz->lock);
+-			}
++		thermal_pm_suspended = true;
+ 
+-			tz->suspended = true;
+-
+-			mutex_unlock(&tz->lock);
+-		}
++		list_for_each_entry(tz, &thermal_tz_list, node)
++			thermal_zone_pm_prepare(tz);
+ 
+ 		mutex_unlock(&thermal_list_lock);
+ 		break;
+@@ -1715,27 +1759,10 @@ static int thermal_pm_notify(struct notifier_block *nb,
+ 	case PM_POST_SUSPEND:
+ 		mutex_lock(&thermal_list_lock);
+ 
+-		list_for_each_entry(tz, &thermal_tz_list, node) {
+-			mutex_lock(&tz->lock);
+-
+-			cancel_delayed_work(&tz->poll_queue);
++		thermal_pm_suspended = false;
+ 
+-			reinit_completion(&tz->resume);
+-			tz->resuming = true;
+-
+-			/*
+-			 * Replace the work function with the resume one, which
+-			 * will restore the original work function and schedule
+-			 * the polling work if needed.
+-			 */
+-			INIT_DELAYED_WORK(&tz->poll_queue,
+-					  thermal_zone_device_resume);
+-			/* Queue up the work without a delay. */
+-			mod_delayed_work(system_freezable_power_efficient_wq,
+-					 &tz->poll_queue, 0);
+-
+-			mutex_unlock(&tz->lock);
+-		}
++		list_for_each_entry(tz, &thermal_tz_list, node)
++			thermal_zone_pm_complete(tz);
+ 
+ 		mutex_unlock(&thermal_list_lock);
+ 		break;
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index a64d39b1c86b23..421522a2bb9d4c 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -61,6 +61,12 @@ struct thermal_governor {
+ 	struct list_head	governor_list;
+ };
+ 
++#define	TZ_STATE_FLAG_SUSPENDED	BIT(0)
++#define	TZ_STATE_FLAG_RESUMING	BIT(1)
++#define	TZ_STATE_FLAG_INIT	BIT(2)
++
++#define TZ_STATE_READY		0
++
+ /**
+  * struct thermal_zone_device - structure for a thermal zone
+  * @id:		unique id number for each thermal zone
+@@ -100,8 +106,7 @@ struct thermal_governor {
+  * @node:	node in thermal_tz_list (in thermal_core.c)
+  * @poll_queue:	delayed work for polling
+  * @notify_event: Last notification event
+- * @suspended: thermal zone suspend indicator
+- * @resuming:	indicates whether or not thermal zone resume is in progress
++ * @state: 	current state of the thermal zone
+  * @trips:	array of struct thermal_trip objects
+  */
+ struct thermal_zone_device {
+@@ -134,8 +139,7 @@ struct thermal_zone_device {
+ 	struct list_head node;
+ 	struct delayed_work poll_queue;
+ 	enum thermal_notify_event notify_event;
+-	bool suspended;
+-	bool resuming;
++	u8 state;
+ #ifdef CONFIG_THERMAL_DEBUGFS
+ 	struct thermal_debugfs *debugfs;
+ #endif
+diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
+index e2aa2a1a02ddf5..ecbce226b8747e 100644
+--- a/drivers/tty/serial/8250/8250_fintek.c
++++ b/drivers/tty/serial/8250/8250_fintek.c
+@@ -21,6 +21,7 @@
+ #define CHIP_ID_F81866 0x1010
+ #define CHIP_ID_F81966 0x0215
+ #define CHIP_ID_F81216AD 0x1602
++#define CHIP_ID_F81216E 0x1617
+ #define CHIP_ID_F81216H 0x0501
+ #define CHIP_ID_F81216 0x0802
+ #define VENDOR_ID1 0x23
+@@ -158,6 +159,7 @@ static int fintek_8250_check_id(struct fintek_8250 *pdata)
+ 	case CHIP_ID_F81866:
+ 	case CHIP_ID_F81966:
+ 	case CHIP_ID_F81216AD:
++	case CHIP_ID_F81216E:
+ 	case CHIP_ID_F81216H:
+ 	case CHIP_ID_F81216:
+ 		break;
+@@ -181,6 +183,7 @@ static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
+ 		return 0;
+ 
+ 	case CHIP_ID_F81216AD:
++	case CHIP_ID_F81216E:
+ 	case CHIP_ID_F81216H:
+ 	case CHIP_ID_F81216:
+ 		*min = F81216_LDN_LOW;
+@@ -250,6 +253,7 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level)
+ 		break;
+ 
+ 	case CHIP_ID_F81216AD:
++	case CHIP_ID_F81216E:
+ 	case CHIP_ID_F81216H:
+ 	case CHIP_ID_F81216:
+ 		sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_SHARE,
+@@ -263,7 +267,8 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level)
+ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
+ {
+ 	switch (pdata->pid) {
+-	case CHIP_ID_F81216H: /* 128Bytes FIFO */
++	case CHIP_ID_F81216E: /* 128Bytes FIFO */
++	case CHIP_ID_F81216H:
+ 	case CHIP_ID_F81966:
+ 	case CHIP_ID_F81866:
+ 		sio_write_mask_reg(pdata, FIFO_CTRL,
+@@ -297,6 +302,7 @@ static void fintek_8250_set_termios(struct uart_port *port,
+ 		goto exit;
+ 
+ 	switch (pdata->pid) {
++	case CHIP_ID_F81216E:
+ 	case CHIP_ID_F81216H:
+ 		reg = RS485;
+ 		break;
+@@ -346,6 +352,7 @@ static void fintek_8250_set_termios_handler(struct uart_8250_port *uart)
+ 	struct fintek_8250 *pdata = uart->port.private_data;
+ 
+ 	switch (pdata->pid) {
++	case CHIP_ID_F81216E:
+ 	case CHIP_ID_F81216H:
+ 	case CHIP_ID_F81966:
+ 	case CHIP_ID_F81866:
+@@ -438,6 +445,11 @@ static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
+ 			uart->port.rs485_supported = fintek_8250_rs485_supported;
+ 		break;
+ 
++	case CHIP_ID_F81216E: /* F81216E does not support RS485 delays */
++		uart->port.rs485_config = fintek_8250_rs485_config;
++		uart->port.rs485_supported = fintek_8250_rs485_supported;
++		break;
++
+ 	default: /* No RS485 Auto direction functional */
+ 		break;
+ 	}
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 88b58f44e4e976..0dd68bdbfbcf7c 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -776,12 +776,12 @@ static void omap_8250_shutdown(struct uart_port *port)
+ 	struct uart_8250_port *up = up_to_u8250p(port);
+ 	struct omap8250_priv *priv = port->private_data;
+ 
++	pm_runtime_get_sync(port->dev);
++
+ 	flush_work(&priv->qos_work);
+ 	if (up->dma)
+ 		omap_8250_rx_dma_flush(up);
+ 
+-	pm_runtime_get_sync(port->dev);
+-
+ 	serial_out(up, UART_OMAP_WER, 0);
+ 	if (priv->habit & UART_HAS_EFR2)
+ 		serial_out(up, UART_OMAP_EFR2, 0x0);
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 7d0134ecd82fa5..9529a512cbd40f 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1819,6 +1819,13 @@ static void pl011_unthrottle_rx(struct uart_port *port)
+ 
+ 	pl011_write(uap->im, uap, REG_IMSC);
+ 
++#ifdef CONFIG_DMA_ENGINE
++	if (uap->using_rx_dma) {
++		uap->dmacr |= UART011_RXDMAE;
++		pl011_write(uap->dmacr, uap, REG_DMACR);
++	}
++#endif
++
+ 	uart_port_unlock_irqrestore(&uap->port, flags);
+ }
+ 
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 9771072da177cb..dcb1769c3625cd 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -3631,7 +3631,7 @@ static struct ctl_table tty_table[] = {
+ 		.data		= &tty_ldisc_autoload,
+ 		.maxlen		= sizeof(tty_ldisc_autoload),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= SYSCTL_ZERO,
+ 		.extra2		= SYSCTL_ONE,
+ 	},
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index eab81dfdcc3502..0b9ba338b2654c 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -915,6 +915,7 @@ struct dwc3_hwparams {
+ #define DWC3_MODE(n)		((n) & 0x7)
+ 
+ /* HWPARAMS1 */
++#define DWC3_SPRAM_TYPE(n)	(((n) >> 23) & 1)
+ #define DWC3_NUM_INT(n)		(((n) & (0x3f << 15)) >> 15)
+ 
+ /* HWPARAMS3 */
+@@ -925,6 +926,9 @@ struct dwc3_hwparams {
+ #define DWC3_NUM_IN_EPS(p)	(((p)->hwparams3 &		\
+ 			(DWC3_NUM_IN_EPS_MASK)) >> 18)
+ 
++/* HWPARAMS6 */
++#define DWC3_RAM0_DEPTH(n)	(((n) & (0xffff0000)) >> 16)
++
+ /* HWPARAMS7 */
+ #define DWC3_RAM1_DEPTH(n)	((n) & 0xffff)
+ 
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index c9533a99e47c89..874497f86499b3 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -232,7 +232,7 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+ 	/* stall is always issued on EP0 */
+ 	dep = dwc->eps[0];
+ 	__dwc3_gadget_ep_set_halt(dep, 1, false);
+-	dep->flags &= DWC3_EP_RESOURCE_ALLOCATED;
++	dep->flags &= DWC3_EP_RESOURCE_ALLOCATED | DWC3_EP_TRANSFER_STARTED;
+ 	dep->flags |= DWC3_EP_ENABLED;
+ 	dwc->delayed_status = false;
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 4959c26d3b71b8..56744b11e67cb9 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -687,6 +687,44 @@ static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
+ 	return fifo_size;
+ }
+ 
++/**
++ * dwc3_gadget_calc_ram_depth - calculates the ram depth for txfifo
++ * @dwc: pointer to the DWC3 context
++ */
++static int dwc3_gadget_calc_ram_depth(struct dwc3 *dwc)
++{
++	int ram_depth;
++	int fifo_0_start;
++	bool is_single_port_ram;
++
++	/* Check supporting RAM type by HW */
++	is_single_port_ram = DWC3_SPRAM_TYPE(dwc->hwparams.hwparams1);
++
++	/*
++	 * If a single port RAM is utilized, then allocate TxFIFOs from
++	 * RAM0. otherwise, allocate them from RAM1.
++	 */
++	ram_depth = is_single_port_ram ? DWC3_RAM0_DEPTH(dwc->hwparams.hwparams6) :
++			DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
++
++	/*
++	 * In a single port RAM configuration, the available RAM is shared
++	 * between the RX and TX FIFOs. This means that the txfifo can begin
++	 * at a non-zero address.
++	 */
++	if (is_single_port_ram) {
++		u32 reg;
++
++		/* Check if TXFIFOs start at non-zero addr */
++		reg = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
++		fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(reg);
++
++		ram_depth -= (fifo_0_start >> 16);
++	}
++
++	return ram_depth;
++}
++
+ /**
+  * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
+  * @dwc: pointer to the DWC3 context
+@@ -753,7 +791,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+ {
+ 	struct dwc3 *dwc = dep->dwc;
+ 	int fifo_0_start;
+-	int ram1_depth;
++	int ram_depth;
+ 	int fifo_size;
+ 	int min_depth;
+ 	int num_in_ep;
+@@ -773,7 +811,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+ 	if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
+ 		return 0;
+ 
+-	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
++	ram_depth = dwc3_gadget_calc_ram_depth(dwc);
+ 
+ 	if ((dep->endpoint.maxburst > 1 &&
+ 	     usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
+@@ -794,7 +832,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+ 
+ 	/* Reserve at least one FIFO for the number of IN EPs */
+ 	min_depth = num_in_ep * (fifo + 1);
+-	remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
++	remaining = ram_depth - min_depth - dwc->last_fifo_depth;
+ 	remaining = max_t(int, 0, remaining);
+ 	/*
+ 	 * We've already reserved 1 FIFO per EP, so check what we can fit in
+@@ -820,9 +858,9 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+ 		dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
+ 
+ 	/* Check fifo size allocation doesn't exceed available RAM size. */
+-	if (dwc->last_fifo_depth >= ram1_depth) {
++	if (dwc->last_fifo_depth >= ram_depth) {
+ 		dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
+-			dwc->last_fifo_depth, ram1_depth,
++			dwc->last_fifo_depth, ram_depth,
+ 			dep->endpoint.name, fifo_size);
+ 		if (DWC3_IP_IS(DWC3))
+ 			fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
+@@ -1177,11 +1215,14 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
+ 	 * pending to be processed by the driver.
+ 	 */
+ 	if (dep->trb_enqueue == dep->trb_dequeue) {
++		struct dwc3_request *req;
++
+ 		/*
+-		 * If there is any request remained in the started_list at
+-		 * this point, that means there is no TRB available.
++		 * If there is any request remained in the started_list with
++		 * active TRBs at this point, then there is no TRB available.
+ 		 */
+-		if (!list_empty(&dep->started_list))
++		req = next_request(&dep->started_list);
++		if (req && req->num_trbs)
+ 			return 0;
+ 
+ 		return DWC3_TRB_NUM - 1;
+@@ -1414,8 +1455,8 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
+ 	struct scatterlist *s;
+ 	int		i;
+ 	unsigned int length = req->request.length;
+-	unsigned int remaining = req->request.num_mapped_sgs
+-		- req->num_queued_sgs;
++	unsigned int remaining = req->num_pending_sgs;
++	unsigned int num_queued_sgs = req->request.num_mapped_sgs - remaining;
+ 	unsigned int num_trbs = req->num_trbs;
+ 	bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
+ 
+@@ -1423,7 +1464,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
+ 	 * If we resume preparing the request, then get the remaining length of
+ 	 * the request and resume where we left off.
+ 	 */
+-	for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
++	for_each_sg(req->request.sg, s, num_queued_sgs, i)
+ 		length -= sg_dma_len(s);
+ 
+ 	for_each_sg(sg, s, remaining, i) {
+@@ -3075,7 +3116,7 @@ static int dwc3_gadget_check_config(struct usb_gadget *g)
+ 	struct dwc3 *dwc = gadget_to_dwc(g);
+ 	struct usb_ep *ep;
+ 	int fifo_size = 0;
+-	int ram1_depth;
++	int ram_depth;
+ 	int ep_num = 0;
+ 
+ 	if (!dwc->do_fifo_resize)
+@@ -3098,8 +3139,8 @@ static int dwc3_gadget_check_config(struct usb_gadget *g)
+ 	fifo_size += dwc->max_cfg_eps;
+ 
+ 	/* Check if we can fit a single fifo per endpoint */
+-	ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
+-	if (fifo_size > ram1_depth)
++	ram_depth = dwc3_gadget_calc_ram_depth(dwc);
++	if (fifo_size > ram_depth)
+ 		return -ENOMEM;
+ 
+ 	return 0;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index f25dd2cb5d03b1..cec86c0c6369ca 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -2111,8 +2111,20 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 			memset(buf, 0, w_length);
+ 			buf[5] = 0x01;
+ 			switch (ctrl->bRequestType & USB_RECIP_MASK) {
++			/*
++			 * The Microsoft CompatID OS Descriptor Spec(w_index = 0x4) and
++			 * Extended Prop OS Desc Spec(w_index = 0x5) state that the
++			 * HighByte of wValue is the InterfaceNumber and the LowByte is
++			 * the PageNumber. This high/low byte ordering is incorrectly
++			 * documented in the Spec. USB analyzer output on the below
++			 * request packets show the high/low byte inverted i.e LowByte
++			 * is the InterfaceNumber and the HighByte is the PageNumber.
++			 * Since we dont support >64KB CompatID/ExtendedProp descriptors,
++			 * PageNumber is set to 0. Hence verify that the HighByte is 0
++			 * for below two cases.
++			 */
+ 			case USB_RECIP_DEVICE:
+-				if (w_index != 0x4 || (w_value & 0xff))
++				if (w_index != 0x4 || (w_value >> 8))
+ 					break;
+ 				buf[6] = w_index;
+ 				/* Number of ext compat interfaces */
+@@ -2128,9 +2140,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 				}
+ 				break;
+ 			case USB_RECIP_INTERFACE:
+-				if (w_index != 0x5 || (w_value & 0xff))
++				if (w_index != 0x5 || (w_value >> 8))
+ 					break;
+-				interface = w_value >> 8;
++				interface = w_value & 0xFF;
+ 				if (interface >= MAX_CONFIG_INTERFACES ||
+ 				    !os_desc_cfg->interface[interface])
+ 					break;
+diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
+index 57a851151225de..002bf724d8025d 100644
+--- a/drivers/usb/gadget/function/uvc_video.c
++++ b/drivers/usb/gadget/function/uvc_video.c
+@@ -480,6 +480,10 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
+ 		 * up later.
+ 		 */
+ 		list_add_tail(&to_queue->list, &video->req_free);
++		/*
++		 * There is a new free request - wake up the pump.
++		 */
++		queue_work(video->async_wq, &video->pump);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&video->req_lock, flags);
+diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
+index d0e94e4c9fe274..11294f196ee335 100644
+--- a/drivers/usb/host/ehci-spear.c
++++ b/drivers/usb/host/ehci-spear.c
+@@ -105,7 +105,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
+ 	/* registers start at offset 0x0 */
+ 	hcd_to_ehci(hcd)->caps = hcd->regs;
+ 
+-	clk_prepare_enable(sehci->clk);
++	retval = clk_prepare_enable(sehci->clk);
++	if (retval)
++		goto err_put_hcd;
+ 	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ 	if (retval)
+ 		goto err_stop_ehci;
+@@ -130,8 +132,7 @@ static void spear_ehci_hcd_drv_remove(struct platform_device *pdev)
+ 
+ 	usb_remove_hcd(hcd);
+ 
+-	if (sehci->clk)
+-		clk_disable_unprepare(sehci->clk);
++	clk_disable_unprepare(sehci->clk);
+ 	usb_put_hcd(hcd);
+ }
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index cb07cee9ed0c75..3ba9902dd2093c 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -395,14 +395,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+-			pdev->device == PCI_DEVICE_ID_EJ168) {
+-		xhci->quirks |= XHCI_RESET_ON_RESUME;
+-		xhci->quirks |= XHCI_BROKEN_STREAMS;
+-	}
+-	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+-			pdev->device == PCI_DEVICE_ID_EJ188) {
++	    (pdev->device == PCI_DEVICE_ID_EJ168 ||
++	     pdev->device == PCI_DEVICE_ID_EJ188)) {
++		xhci->quirks |= XHCI_ETRON_HOST;
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
++		xhci->quirks |= XHCI_NO_SOFT_RETRY;
+ 	}
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 928b93ad1ee866..f318864732f2db 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -52,6 +52,7 @@
+  *   endpoint rings; it generates events on the event ring for these.
+  */
+ 
++#include <linux/jiffies.h>
+ #include <linux/scatterlist.h>
+ #include <linux/slab.h>
+ #include <linux/dma-mapping.h>
+@@ -972,6 +973,13 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ 	unsigned int		slot_id = ep->vdev->slot_id;
+ 	int			err;
+ 
++	/*
++	 * This is not going to work if the hardware is changing its dequeue
++	 * pointers as we look at them. Completion handler will call us later.
++	 */
++	if (ep->ep_state & SET_DEQ_PENDING)
++		return 0;
++
+ 	xhci = ep->xhci;
+ 
+ 	list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
+@@ -1061,6 +1069,19 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ 	return 0;
+ }
+ 
++/*
++ * Erase queued TDs from transfer ring(s) and give back those the xHC didn't
++ * stop on. If necessary, queue commands to move the xHC off cancelled TDs it
++ * stopped on. Those will be given back later when the commands complete.
++ *
++ * Call under xhci->lock on a stopped endpoint.
++ */
++void xhci_process_cancelled_tds(struct xhci_virt_ep *ep)
++{
++	xhci_invalidate_cancelled_tds(ep);
++	xhci_giveback_invalidated_tds(ep);
++}
++
+ /*
+  * Returns the TD the endpoint ring halted on.
+  * Only call for non-running rings without streams.
+@@ -1151,16 +1172,35 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ 			return;
+ 		case EP_STATE_STOPPED:
+ 			/*
+-			 * NEC uPD720200 sometimes sets this state and fails with
+-			 * Context Error while continuing to process TRBs.
+-			 * Be conservative and trust EP_CTX_STATE on other chips.
++			 * Per xHCI 4.6.9, Stop Endpoint command on a Stopped
++			 * EP is a Context State Error, and EP stays Stopped.
++			 *
++			 * But maybe it failed on Halted, and somebody ran Reset
++			 * Endpoint later. EP state is now Stopped and EP_HALTED
++			 * still set because Reset EP handler will run after us.
++			 */
++			if (ep->ep_state & EP_HALTED)
++				break;
++			/*
++			 * On some HCs EP state remains Stopped for some tens of
++			 * us to a few ms or more after a doorbell ring, and any
++			 * new Stop Endpoint fails without aborting the restart.
++			 * This handler may run quickly enough to still see this
++			 * Stopped state, but it will soon change to Running.
++			 *
++			 * Assume this bug on unexpected Stop Endpoint failures.
++			 * Keep retrying until the EP starts and stops again, on
++			 * chips where this is known to help. Wait for 100ms.
+ 			 */
+ 			if (!(xhci->quirks & XHCI_NEC_HOST))
+ 				break;
++			if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
++				break;
+ 			fallthrough;
+ 		case EP_STATE_RUNNING:
+ 			/* Race, HW handled stop ep cmd before ep was running */
+-			xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
++			xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
++					GET_EP_CTX_STATE(ep_ctx));
+ 
+ 			command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ 			if (!command) {
+@@ -1339,7 +1379,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 	struct xhci_ep_ctx *ep_ctx;
+ 	struct xhci_slot_ctx *slot_ctx;
+ 	struct xhci_td *td, *tmp_td;
+-	bool deferred = false;
+ 
+ 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ 	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
+@@ -1440,8 +1479,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 			xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
+ 				 __func__, td->urb);
+ 			xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
+-		} else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
+-			deferred = true;
+ 		} else {
+ 			xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
+ 				 __func__, td->urb, td->cancel_status);
+@@ -1452,11 +1489,15 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 	ep->queued_deq_seg = NULL;
+ 	ep->queued_deq_ptr = NULL;
+ 
+-	if (deferred) {
+-		/* We have more streams to clear */
++	/* Check for deferred or newly cancelled TDs */
++	if (!list_empty(&ep->cancelled_td_list)) {
+ 		xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
+ 			 __func__);
+ 		xhci_invalidate_cancelled_tds(ep);
++		/* Try to restart the endpoint if all is done */
++		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
++		/* Start giving back any TDs invalidated above */
++		xhci_giveback_invalidated_tds(ep);
+ 	} else {
+ 		/* Restart any rings with pending URBs */
+ 		xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
+@@ -3727,6 +3768,20 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	if (!urb->setup_packet)
+ 		return -EINVAL;
+ 
++	if ((xhci->quirks & XHCI_ETRON_HOST) &&
++	    urb->dev->speed >= USB_SPEED_SUPER) {
++		/*
++		 * If next available TRB is the Link TRB in the ring segment then
++		 * enqueue a No Op TRB, this can prevent the Setup and Data Stage
++		 * TRB to be breaked by the Link TRB.
++		 */
++		if (trb_is_link(ep_ring->enqueue + 1)) {
++			field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state;
++			queue_trb(xhci, ep_ring, false, 0, 0,
++					TRB_INTR_TARGET(0), field);
++		}
++	}
++
+ 	/* 1 TRB for setup, 1 for status */
+ 	num_trbs = 2;
+ 	/*
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 899c0effb5d3c1..358ed674f782fb 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -8,6 +8,7 @@
+  * Some code borrowed from the Linux EHCI driver.
+  */
+ 
++#include <linux/jiffies.h>
+ #include <linux/pci.h>
+ #include <linux/iommu.h>
+ #include <linux/iopoll.h>
+@@ -1768,15 +1769,27 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		}
+ 	}
+ 
+-	/* Queue a stop endpoint command, but only if this is
+-	 * the first cancellation to be handled.
+-	 */
+-	if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
++	/* These completion handlers will sort out cancelled TDs for us */
++	if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) {
++		xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n",
++				urb->dev->slot_id, ep_index, ep->ep_state);
++		goto done;
++	}
++
++	/* In this case no commands are pending but the endpoint is stopped */
++	if (ep->ep_state & EP_CLEARING_TT) {
++		/* and cancelled TDs can be given back right away */
++		xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
++				urb->dev->slot_id, ep_index, ep->ep_state);
++		xhci_process_cancelled_tds(ep);
++	} else {
++		/* Otherwise, queue a new Stop Endpoint command */
+ 		command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ 		if (!command) {
+ 			ret = -ENOMEM;
+ 			goto done;
+ 		}
++		ep->stop_time = jiffies;
+ 		ep->ep_state |= EP_STOP_CMD_PENDING;
+ 		xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
+ 					 ep_index, 0);
+@@ -3692,6 +3705,8 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ 				xhci->num_active_eps);
+ }
+ 
++static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
++
+ /*
+  * This submits a Reset Device Command, which will set the device state to 0,
+  * set the device address to 0, and disable all the endpoints except the default
+@@ -3762,6 +3777,23 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
+ 						SLOT_STATE_DISABLED)
+ 		return 0;
+ 
++	if (xhci->quirks & XHCI_ETRON_HOST) {
++		/*
++		 * Obtaining a new device slot to inform the xHCI host that
++		 * the USB device has been reset.
++		 */
++		ret = xhci_disable_slot(xhci, udev->slot_id);
++		xhci_free_virt_device(xhci, udev->slot_id);
++		if (!ret) {
++			ret = xhci_alloc_dev(hcd, udev);
++			if (ret == 1)
++				ret = 0;
++			else
++				ret = -EINVAL;
++		}
++		return ret;
++	}
++
+ 	trace_xhci_discover_or_reset_device(slot_ctx);
+ 
+ 	xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index f0fb696d561986..673179047eb82e 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -690,6 +690,7 @@ struct xhci_virt_ep {
+ 	/* Bandwidth checking storage */
+ 	struct xhci_bw_info	bw_info;
+ 	struct list_head	bw_endpoint_list;
++	unsigned long		stop_time;
+ 	/* Isoch Frame ID checking storage */
+ 	int			next_frame_id;
+ 	/* Use new Isoch TRB layout needed for extended TBC support */
+@@ -1624,6 +1625,7 @@ struct xhci_hcd {
+ #define XHCI_ZHAOXIN_HOST	BIT_ULL(46)
+ #define XHCI_WRITE_64_HI_LO	BIT_ULL(47)
+ #define XHCI_CDNS_SCTX_QUIRK	BIT_ULL(48)
++#define XHCI_ETRON_HOST	BIT_ULL(49)
+ 
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+@@ -1913,6 +1915,7 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+ void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
+ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
+ unsigned int count_trbs(u64 addr, u64 len);
++void xhci_process_cancelled_tds(struct xhci_virt_ep *ep);
+ 
+ /* xHCI roothub code */
+ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
+diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
+index 6fb5140e29b9dd..225863321dc479 100644
+--- a/drivers/usb/misc/chaoskey.c
++++ b/drivers/usb/misc/chaoskey.c
+@@ -27,6 +27,8 @@ static struct usb_class_driver chaoskey_class;
+ static int chaoskey_rng_read(struct hwrng *rng, void *data,
+ 			     size_t max, bool wait);
+ 
++static DEFINE_MUTEX(chaoskey_list_lock);
++
+ #define usb_dbg(usb_if, format, arg...) \
+ 	dev_dbg(&(usb_if)->dev, format, ## arg)
+ 
+@@ -233,6 +235,7 @@ static void chaoskey_disconnect(struct usb_interface *interface)
+ 	usb_deregister_dev(interface, &chaoskey_class);
+ 
+ 	usb_set_intfdata(interface, NULL);
++	mutex_lock(&chaoskey_list_lock);
+ 	mutex_lock(&dev->lock);
+ 
+ 	dev->present = false;
+@@ -244,6 +247,7 @@ static void chaoskey_disconnect(struct usb_interface *interface)
+ 	} else
+ 		mutex_unlock(&dev->lock);
+ 
++	mutex_unlock(&chaoskey_list_lock);
+ 	usb_dbg(interface, "disconnect done");
+ }
+ 
+@@ -251,6 +255,7 @@ static int chaoskey_open(struct inode *inode, struct file *file)
+ {
+ 	struct chaoskey *dev;
+ 	struct usb_interface *interface;
++	int rv = 0;
+ 
+ 	/* get the interface from minor number and driver information */
+ 	interface = usb_find_interface(&chaoskey_driver, iminor(inode));
+@@ -266,18 +271,23 @@ static int chaoskey_open(struct inode *inode, struct file *file)
+ 	}
+ 
+ 	file->private_data = dev;
++	mutex_lock(&chaoskey_list_lock);
+ 	mutex_lock(&dev->lock);
+-	++dev->open;
++	if (dev->present)
++		++dev->open;
++	else
++		rv = -ENODEV;
+ 	mutex_unlock(&dev->lock);
++	mutex_unlock(&chaoskey_list_lock);
+ 
+-	usb_dbg(interface, "open success");
+-	return 0;
++	return rv;
+ }
+ 
+ static int chaoskey_release(struct inode *inode, struct file *file)
+ {
+ 	struct chaoskey *dev = file->private_data;
+ 	struct usb_interface *interface;
++	int rv = 0;
+ 
+ 	if (dev == NULL)
+ 		return -ENODEV;
+@@ -286,14 +296,15 @@ static int chaoskey_release(struct inode *inode, struct file *file)
+ 
+ 	usb_dbg(interface, "release");
+ 
++	mutex_lock(&chaoskey_list_lock);
+ 	mutex_lock(&dev->lock);
+ 
+ 	usb_dbg(interface, "open count at release is %d", dev->open);
+ 
+ 	if (dev->open <= 0) {
+ 		usb_dbg(interface, "invalid open count (%d)", dev->open);
+-		mutex_unlock(&dev->lock);
+-		return -ENODEV;
++		rv = -ENODEV;
++		goto bail;
+ 	}
+ 
+ 	--dev->open;
+@@ -302,13 +313,15 @@ static int chaoskey_release(struct inode *inode, struct file *file)
+ 		if (dev->open == 0) {
+ 			mutex_unlock(&dev->lock);
+ 			chaoskey_free(dev);
+-		} else
+-			mutex_unlock(&dev->lock);
+-	} else
+-		mutex_unlock(&dev->lock);
+-
++			goto destruction;
++		}
++	}
++bail:
++	mutex_unlock(&dev->lock);
++destruction:
++	mutex_unlock(&chaoskey_list_lock);
+ 	usb_dbg(interface, "release success");
+-	return 0;
++	return rv;
+ }
+ 
+ static void chaos_read_callback(struct urb *urb)
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index 6d28467ce35227..365c1006934583 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -277,28 +277,45 @@ static ssize_t iowarrior_read(struct file *file, char __user *buffer,
+ 	struct iowarrior *dev;
+ 	int read_idx;
+ 	int offset;
++	int retval;
+ 
+ 	dev = file->private_data;
+ 
++	if (file->f_flags & O_NONBLOCK) {
++		retval = mutex_trylock(&dev->mutex);
++		if (!retval)
++			return -EAGAIN;
++	} else {
++		retval = mutex_lock_interruptible(&dev->mutex);
++		if (retval)
++			return -ERESTARTSYS;
++	}
++
+ 	/* verify that the device wasn't unplugged */
+-	if (!dev || !dev->present)
+-		return -ENODEV;
++	if (!dev->present) {
++		retval = -ENODEV;
++		goto exit;
++	}
+ 
+ 	dev_dbg(&dev->interface->dev, "minor %d, count = %zd\n",
+ 		dev->minor, count);
+ 
+ 	/* read count must be packet size (+ time stamp) */
+ 	if ((count != dev->report_size)
+-	    && (count != (dev->report_size + 1)))
+-		return -EINVAL;
++	    && (count != (dev->report_size + 1))) {
++		retval = -EINVAL;
++		goto exit;
++	}
+ 
+ 	/* repeat until no buffer overrun in callback handler occur */
+ 	do {
+ 		atomic_set(&dev->overflow_flag, 0);
+ 		if ((read_idx = read_index(dev)) == -1) {
+ 			/* queue empty */
+-			if (file->f_flags & O_NONBLOCK)
+-				return -EAGAIN;
++			if (file->f_flags & O_NONBLOCK) {
++				retval = -EAGAIN;
++				goto exit;
++			}
+ 			else {
+ 				//next line will return when there is either new data, or the device is unplugged
+ 				int r = wait_event_interruptible(dev->read_wait,
+@@ -309,28 +326,37 @@ static ssize_t iowarrior_read(struct file *file, char __user *buffer,
+ 								  -1));
+ 				if (r) {
+ 					//we were interrupted by a signal
+-					return -ERESTART;
++					retval = -ERESTART;
++					goto exit;
+ 				}
+ 				if (!dev->present) {
+ 					//The device was unplugged
+-					return -ENODEV;
++					retval = -ENODEV;
++					goto exit;
+ 				}
+ 				if (read_idx == -1) {
+ 					// Can this happen ???
+-					return 0;
++					retval = 0;
++					goto exit;
+ 				}
+ 			}
+ 		}
+ 
+ 		offset = read_idx * (dev->report_size + 1);
+ 		if (copy_to_user(buffer, dev->read_queue + offset, count)) {
+-			return -EFAULT;
++			retval = -EFAULT;
++			goto exit;
+ 		}
+ 	} while (atomic_read(&dev->overflow_flag));
+ 
+ 	read_idx = ++read_idx == MAX_INTERRUPT_BUFFER ? 0 : read_idx;
+ 	atomic_set(&dev->read_idx, read_idx);
++	mutex_unlock(&dev->mutex);
+ 	return count;
++
++exit:
++	mutex_unlock(&dev->mutex);
++	return retval;
+ }
+ 
+ /*
+@@ -885,7 +911,6 @@ static int iowarrior_probe(struct usb_interface *interface,
+ static void iowarrior_disconnect(struct usb_interface *interface)
+ {
+ 	struct iowarrior *dev = usb_get_intfdata(interface);
+-	int minor = dev->minor;
+ 
+ 	usb_deregister_dev(interface, &iowarrior_class);
+ 
+@@ -909,9 +934,6 @@ static void iowarrior_disconnect(struct usb_interface *interface)
+ 		mutex_unlock(&dev->mutex);
+ 		iowarrior_delete(dev);
+ 	}
+-
+-	dev_info(&interface->dev, "I/O-Warror #%d now disconnected\n",
+-		 minor - IOWARRIOR_MINOR_BASE);
+ }
+ 
+ /* usb specific object needed to register this driver with the usb subsystem */
+diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
+index 01ceafc4ab78ce..d9c21f7830557b 100644
+--- a/drivers/usb/misc/usb-ljca.c
++++ b/drivers/usb/misc/usb-ljca.c
+@@ -332,14 +332,11 @@ static int ljca_send(struct ljca_adapter *adap, u8 type, u8 cmd,
+ 
+ 	ret = usb_bulk_msg(adap->usb_dev, adap->tx_pipe, header,
+ 			   msg_len, &transferred, LJCA_WRITE_TIMEOUT_MS);
+-
+-	usb_autopm_put_interface(adap->intf);
+-
+ 	if (ret < 0)
+-		goto out;
++		goto out_put;
+ 	if (transferred != msg_len) {
+ 		ret = -EIO;
+-		goto out;
++		goto out_put;
+ 	}
+ 
+ 	if (ack) {
+@@ -347,11 +344,14 @@ static int ljca_send(struct ljca_adapter *adap, u8 type, u8 cmd,
+ 						  timeout);
+ 		if (!ret) {
+ 			ret = -ETIMEDOUT;
+-			goto out;
++			goto out_put;
+ 		}
+ 	}
+ 	ret = adap->actual_length;
+ 
++out_put:
++	usb_autopm_put_interface(adap->intf);
++
+ out:
+ 	spin_lock_irqsave(&adap->lock, flags);
+ 	adap->ex_buf = NULL;
+@@ -811,6 +811,14 @@ static int ljca_probe(struct usb_interface *interface,
+ 	if (ret)
+ 		goto err_free;
+ 
++	/*
++	 * This works around problems with ov2740 initialization on some
++	 * Lenovo platforms. The autosuspend delay, has to be smaller than
++	 * the delay after setting the reset_gpio line in ov2740_resume().
++	 * Otherwise the sensor randomly fails to initialize.
++	 */
++	pm_runtime_set_autosuspend_delay(&usb_dev->dev, 10);
++
+ 	usb_enable_autosuspend(usb_dev);
+ 
+ 	return 0;
+diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
+index 6aebc736a80c66..70dff0db5354ff 100644
+--- a/drivers/usb/misc/yurex.c
++++ b/drivers/usb/misc/yurex.c
+@@ -441,7 +441,10 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
+ 	if (count == 0)
+ 		goto error;
+ 
+-	mutex_lock(&dev->io_mutex);
++	retval = mutex_lock_interruptible(&dev->io_mutex);
++	if (retval < 0)
++		return -EINTR;
++
+ 	if (dev->disconnected) {		/* already disconnected */
+ 		mutex_unlock(&dev->io_mutex);
+ 		retval = -ENODEV;
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index bdf13911a1e590..c6076df0d50cc7 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1161,12 +1161,19 @@ void musb_free_request(struct usb_ep *ep, struct usb_request *req)
+  */
+ void musb_ep_restart(struct musb *musb, struct musb_request *req)
+ {
++	u16 csr;
++	void __iomem *epio = req->ep->hw_ep->regs;
++
+ 	trace_musb_req_start(req);
+ 	musb_ep_select(musb->mregs, req->epnum);
+-	if (req->tx)
++	if (req->tx) {
+ 		txstate(musb, req);
+-	else
+-		rxstate(musb, req);
++	} else {
++		csr = musb_readw(epio, MUSB_RXCSR);
++		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
++		musb_writew(epio, MUSB_RXCSR, csr);
++		musb_writew(epio, MUSB_RXCSR, csr);
++	}
+ }
+ 
+ static int musb_ep_restart_resume_work(struct musb *musb, void *data)
+diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
+index cf719307b3f6b9..60b2766a69bf8a 100644
+--- a/drivers/usb/typec/tcpm/wcove.c
++++ b/drivers/usb/typec/tcpm/wcove.c
+@@ -621,10 +621,6 @@ static int wcove_typec_probe(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		return irq;
+ 
+-	irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
+-	if (irq < 0)
+-		return irq;
+-
+ 	ret = guid_parse(WCOVE_DSM_UUID, &wcove->guid);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
+index bccfc03b5986d7..fcb8e61136cfd7 100644
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
+@@ -644,6 +644,10 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+ 	    uc->has_multiple_dp) {
+ 		con_index = (uc->last_cmd_sent >> 16) &
+ 			UCSI_CMD_CONNECTOR_MASK;
++		if (con_index == 0) {
++			ret = -EINVAL;
++			goto unlock;
++		}
+ 		con = &uc->ucsi->connector[con_index - 1];
+ 		ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
+ 	}
+@@ -651,6 +655,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+ 	ret = ucsi_sync_control_common(ucsi, command);
+ 
+ 	pm_runtime_put_sync(uc->dev);
++unlock:
+ 	mutex_unlock(&uc->lock);
+ 
+ 	return ret;
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index 03c0fa8edc8db5..f7000d383a4e62 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -185,7 +185,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
+ 	struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(con->ucsi);
+ 	int orientation;
+ 
+-	if (con->num >= PMIC_GLINK_MAX_PORTS ||
++	if (con->num > PMIC_GLINK_MAX_PORTS ||
+ 	    !ucsi->port_orientation[con->num - 1])
+ 		return;
+ 
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index 7d0c83b5b07158..8455f08f5d4060 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -368,7 +368,6 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
+ 	unsigned long lgcd = 0;
+ 	int log_entity_size;
+ 	unsigned long size;
+-	u64 start = 0;
+ 	int err;
+ 	struct page *pg;
+ 	unsigned int nsg;
+@@ -379,10 +378,9 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
+ 	struct device *dma = mvdev->vdev.dma_dev;
+ 
+ 	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+-	     map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
++	     map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
+ 		size = maplen(map, mr);
+ 		lgcd = gcd(lgcd, size);
+-		start += size;
+ 	}
+ 	log_entity_size = ilog2(lgcd);
+ 
+diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
+index 41a4b0cf429756..7527e277c89897 100644
+--- a/drivers/vfio/pci/mlx5/cmd.c
++++ b/drivers/vfio/pci/mlx5/cmd.c
+@@ -423,6 +423,7 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
+ 	unsigned long filled;
+ 	unsigned int to_fill;
+ 	int ret;
++	int i;
+ 
+ 	to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
+ 	page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
+@@ -443,7 +444,7 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
+ 			GFP_KERNEL_ACCOUNT);
+ 
+ 		if (ret)
+-			goto err;
++			goto err_append;
+ 		buf->allocated_length += filled * PAGE_SIZE;
+ 		/* clean input for another bulk allocation */
+ 		memset(page_list, 0, filled * sizeof(*page_list));
+@@ -454,6 +455,9 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
+ 	kvfree(page_list);
+ 	return 0;
+ 
++err_append:
++	for (i = filled - 1; i >= 0; i--)
++		__free_page(page_list[i]);
+ err:
+ 	kvfree(page_list);
+ 	return ret;
+diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
+index 242c23eef452e8..8833e60d42f566 100644
+--- a/drivers/vfio/pci/mlx5/main.c
++++ b/drivers/vfio/pci/mlx5/main.c
+@@ -640,14 +640,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track)
+ 					O_RDONLY);
+ 	if (IS_ERR(migf->filp)) {
+ 		ret = PTR_ERR(migf->filp);
+-		goto end;
++		kfree(migf);
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	migf->mvdev = mvdev;
+-	ret = mlx5vf_cmd_alloc_pd(migf);
+-	if (ret)
+-		goto out_free;
+-
+ 	stream_open(migf->filp->f_inode, migf->filp);
+ 	mutex_init(&migf->lock);
+ 	init_waitqueue_head(&migf->poll_wait);
+@@ -663,6 +660,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track)
+ 	INIT_LIST_HEAD(&migf->buf_list);
+ 	INIT_LIST_HEAD(&migf->avail_list);
+ 	spin_lock_init(&migf->list_lock);
++
++	ret = mlx5vf_cmd_alloc_pd(migf);
++	if (ret)
++		goto out;
++
+ 	ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, &full_size, 0);
+ 	if (ret)
+ 		goto out_pd;
+@@ -692,10 +694,8 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track)
+ 	mlx5vf_free_data_buffer(buf);
+ out_pd:
+ 	mlx5fv_cmd_clean_migf_resources(migf);
+-out_free:
++out:
+ 	fput(migf->filp);
+-end:
+-	kfree(migf);
+ 	return ERR_PTR(ret);
+ }
+ 
+@@ -1016,13 +1016,19 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
+ 					O_WRONLY);
+ 	if (IS_ERR(migf->filp)) {
+ 		ret = PTR_ERR(migf->filp);
+-		goto end;
++		kfree(migf);
++		return ERR_PTR(ret);
+ 	}
+ 
++	stream_open(migf->filp->f_inode, migf->filp);
++	mutex_init(&migf->lock);
++	INIT_LIST_HEAD(&migf->buf_list);
++	INIT_LIST_HEAD(&migf->avail_list);
++	spin_lock_init(&migf->list_lock);
+ 	migf->mvdev = mvdev;
+ 	ret = mlx5vf_cmd_alloc_pd(migf);
+ 	if (ret)
+-		goto out_free;
++		goto out;
+ 
+ 	buf = mlx5vf_alloc_data_buffer(migf, 0, DMA_TO_DEVICE);
+ 	if (IS_ERR(buf)) {
+@@ -1041,20 +1047,13 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
+ 	migf->buf_header[0] = buf;
+ 	migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
+ 
+-	stream_open(migf->filp->f_inode, migf->filp);
+-	mutex_init(&migf->lock);
+-	INIT_LIST_HEAD(&migf->buf_list);
+-	INIT_LIST_HEAD(&migf->avail_list);
+-	spin_lock_init(&migf->list_lock);
+ 	return migf;
+ out_buf:
+ 	mlx5vf_free_data_buffer(migf->buf[0]);
+ out_pd:
+ 	mlx5vf_cmd_dealloc_pd(migf);
+-out_free:
++out:
+ 	fput(migf->filp);
+-end:
+-	kfree(migf);
+ 	return ERR_PTR(ret);
+ }
+ 
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index 97422aafaa7b5d..ea2745c1ac5e68 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -313,6 +313,10 @@ static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos,
+ 	return count;
+ }
+ 
++static struct perm_bits direct_ro_perms = {
++	.readfn = vfio_direct_config_read,
++};
++
+ /* Default capability regions to read-only, no-virtualization */
+ static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
+ 	[0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
+@@ -1897,9 +1901,17 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user
+ 		cap_start = *ppos;
+ 	} else {
+ 		if (*ppos >= PCI_CFG_SPACE_SIZE) {
+-			WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
++			/*
++			 * We can get a cap_id that exceeds PCI_EXT_CAP_ID_MAX
++			 * if we're hiding an unknown capability at the start
++			 * of the extended capability list.  Use default, ro
++			 * access, which will virtualize the id and next values.
++			 */
++			if (cap_id > PCI_EXT_CAP_ID_MAX)
++				perm = &direct_ro_perms;
++			else
++				perm = &ecap_perms[cap_id];
+ 
+-			perm = &ecap_perms[cap_id];
+ 			cap_start = vfio_find_cap_start(vdev, *ppos);
+ 		} else {
+ 			WARN_ON(cap_id > PCI_CAP_ID_MAX);
+diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
+index 3d2a27fefc874a..130adef2e46869 100644
+--- a/drivers/video/fbdev/sh7760fb.c
++++ b/drivers/video/fbdev/sh7760fb.c
+@@ -409,12 +409,11 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
+ 		vram = PAGE_SIZE;
+ 
+ 	fbmem = dma_alloc_coherent(info->device, vram, &par->fbdma, GFP_KERNEL);
+-
+ 	if (!fbmem)
+ 		return -ENOMEM;
+ 
+ 	if ((par->fbdma & SH7760FB_DMA_MASK) != SH7760FB_DMA_MASK) {
+-		sh7760fb_free_mem(info);
++		dma_free_coherent(info->device, vram, fbmem, par->fbdma);
+ 		dev_err(info->device, "kernel gave me memory at 0x%08lx, which is"
+ 			"unusable for the LCDC\n", (unsigned long)par->fbdma);
+ 		return -ENOMEM;
+diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
+index 684b9fe84fff5b..94c96bcfefe347 100644
+--- a/drivers/watchdog/Kconfig
++++ b/drivers/watchdog/Kconfig
+@@ -1509,7 +1509,7 @@ config 60XX_WDT
+ 
+ config SBC8360_WDT
+ 	tristate "SBC8360 Watchdog Timer"
+-	depends on X86_32
++	depends on X86_32 && HAS_IOPORT
+ 	help
+ 
+ 	  This is the driver for the hardware watchdog on the SBC8360 Single
+@@ -1522,7 +1522,7 @@ config SBC8360_WDT
+ 
+ config SBC7240_WDT
+ 	tristate "SBC Nano 7240 Watchdog Timer"
+-	depends on X86_32 && !UML
++	depends on X86_32 && HAS_IOPORT
+ 	help
+ 	  This is the driver for the hardware watchdog found on the IEI
+ 	  single board computers EPIC Nano 7240 (and likely others). This
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 9f097f1f4a4cf3..6d32ffb0113650 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -313,7 +313,7 @@ int xenbus_dev_probe(struct device *_dev)
+ 	if (err) {
+ 		dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
+ 		       dev->nodename);
+-		return err;
++		goto fail_remove;
+ 	}
+ 
+ 	dev->spurious_threshold = 1;
+@@ -322,6 +322,12 @@ int xenbus_dev_probe(struct device *_dev)
+ 			 dev->nodename);
+ 
+ 	return 0;
++fail_remove:
++	if (drv->remove) {
++		down(&dev->reclaim_sem);
++		drv->remove(dev);
++		up(&dev->reclaim_sem);
++	}
+ fail_put:
+ 	module_put(drv->driver.owner);
+ fail:
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 06dc4a57ba78a7..0a216a078c3155 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1251,6 +1251,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 		}
+ 		reloc_func_desc = interp_load_addr;
+ 
++		allow_write_access(interpreter);
+ 		fput(interpreter);
+ 
+ 		kfree(interp_elf_ex);
+@@ -1347,6 +1348,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 	kfree(interp_elf_ex);
+ 	kfree(interp_elf_phdata);
+ out_free_file:
++	allow_write_access(interpreter);
+ 	if (interpreter)
+ 		fput(interpreter);
+ out_free_ph:
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 4fe5bb9f1b1f5e..7d35f0e1bc7641 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -394,6 +394,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ 			goto error;
+ 		}
+ 
++		allow_write_access(interpreter);
+ 		fput(interpreter);
+ 		interpreter = NULL;
+ 	}
+@@ -465,8 +466,10 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ 	retval = 0;
+ 
+ error:
+-	if (interpreter)
++	if (interpreter) {
++		allow_write_access(interpreter);
+ 		fput(interpreter);
++	}
+ 	kfree(interpreter_name);
+ 	kfree(exec_params.phdrs);
+ 	kfree(exec_params.loadmap);
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 31660d8cc2c610..6a3a16f910516c 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -247,10 +247,13 @@ static int load_misc_binary(struct linux_binprm *bprm)
+ 	if (retval < 0)
+ 		goto ret;
+ 
+-	if (fmt->flags & MISC_FMT_OPEN_FILE)
++	if (fmt->flags & MISC_FMT_OPEN_FILE) {
+ 		interp_file = file_clone_open(fmt->interp_file);
+-	else
++		if (!IS_ERR(interp_file))
++			deny_write_access(interp_file);
++	} else {
+ 		interp_file = open_exec(fmt->interpreter);
++	}
+ 	retval = PTR_ERR(interp_file);
+ 	if (IS_ERR(interp_file))
+ 		goto ret;
+diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
+index 35ba2117a6f652..3e63cfe1587472 100644
+--- a/fs/cachefiles/interface.c
++++ b/fs/cachefiles/interface.c
+@@ -327,6 +327,8 @@ static void cachefiles_commit_object(struct cachefiles_object *object,
+ static void cachefiles_clean_up_object(struct cachefiles_object *object,
+ 				       struct cachefiles_cache *cache)
+ {
++	struct file *file;
++
+ 	if (test_bit(FSCACHE_COOKIE_RETIRED, &object->cookie->flags)) {
+ 		if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
+ 			cachefiles_see_object(object, cachefiles_obj_see_clean_delete);
+@@ -342,10 +344,14 @@ static void cachefiles_clean_up_object(struct cachefiles_object *object,
+ 	}
+ 
+ 	cachefiles_unmark_inode_in_use(object, object->file);
+-	if (object->file) {
+-		fput(object->file);
+-		object->file = NULL;
+-	}
++
++	spin_lock(&object->lock);
++	file = object->file;
++	object->file = NULL;
++	spin_unlock(&object->lock);
++
++	if (file)
++		fput(file);
+ }
+ 
+ /*
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 470c9665838505..fe3de9ad57bf6d 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -60,26 +60,36 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
+ {
+ 	struct cachefiles_object *object = kiocb->ki_filp->private_data;
+ 	struct cachefiles_cache *cache = object->volume->cache;
+-	struct file *file = object->file;
+-	size_t len = iter->count;
++	struct file *file;
++	size_t len = iter->count, aligned_len = len;
+ 	loff_t pos = kiocb->ki_pos;
+ 	const struct cred *saved_cred;
+ 	int ret;
+ 
+-	if (!file)
++	spin_lock(&object->lock);
++	file = object->file;
++	if (!file) {
++		spin_unlock(&object->lock);
+ 		return -ENOBUFS;
++	}
++	get_file(file);
++	spin_unlock(&object->lock);
+ 
+ 	cachefiles_begin_secure(cache, &saved_cred);
+-	ret = __cachefiles_prepare_write(object, file, &pos, &len, len, true);
++	ret = __cachefiles_prepare_write(object, file, &pos, &aligned_len, len, true);
+ 	cachefiles_end_secure(cache, saved_cred);
+ 	if (ret < 0)
+-		return ret;
++		goto out;
+ 
+ 	trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
+ 	ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
+-	if (!ret)
++	if (!ret) {
+ 		ret = len;
++		kiocb->ki_pos += ret;
++	}
+ 
++out:
++	fput(file);
+ 	return ret;
+ }
+ 
+@@ -87,12 +97,22 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
+ 					    int whence)
+ {
+ 	struct cachefiles_object *object = filp->private_data;
+-	struct file *file = object->file;
++	struct file *file;
++	loff_t ret;
+ 
+-	if (!file)
++	spin_lock(&object->lock);
++	file = object->file;
++	if (!file) {
++		spin_unlock(&object->lock);
+ 		return -ENOBUFS;
++	}
++	get_file(file);
++	spin_unlock(&object->lock);
+ 
+-	return vfs_llseek(file, pos, whence);
++	ret = vfs_llseek(file, pos, whence);
++	fput(file);
++
++	return ret;
+ }
+ 
+ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
+diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
+index 742b30b61c196f..0fe8d80ce5e8d3 100644
+--- a/fs/dlm/ast.c
++++ b/fs/dlm/ast.c
+@@ -30,7 +30,7 @@ static void dlm_run_callback(uint32_t ls_id, uint32_t lkb_id, int8_t mode,
+ 		trace_dlm_bast(ls_id, lkb_id, mode, res_name, res_length);
+ 		bastfn(astparam, mode);
+ 	} else if (flags & DLM_CB_CAST) {
+-		trace_dlm_ast(ls_id, lkb_id, sb_status, sb_flags, res_name,
++		trace_dlm_ast(ls_id, lkb_id, sb_flags, sb_status, res_name,
+ 			      res_length);
+ 		lksb->sb_status = sb_status;
+ 		lksb->sb_flags = sb_flags;
+diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
+index 34f4f9f49a6ce5..12272a8f6d75f3 100644
+--- a/fs/dlm/recoverd.c
++++ b/fs/dlm/recoverd.c
+@@ -151,7 +151,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
+ 	error = dlm_recover_members(ls, rv, &neg);
+ 	if (error) {
+ 		log_rinfo(ls, "dlm_recover_members error %d", error);
+-		goto fail;
++		goto fail_root_list;
+ 	}
+ 
+ 	dlm_recover_dir_nodeid(ls, &root_list);
+diff --git a/fs/efs/super.c b/fs/efs/super.c
+index e4421c10caebe5..c59086b7eabfe9 100644
+--- a/fs/efs/super.c
++++ b/fs/efs/super.c
+@@ -15,7 +15,6 @@
+ #include <linux/vfs.h>
+ #include <linux/blkdev.h>
+ #include <linux/fs_context.h>
+-#include <linux/fs_parser.h>
+ #include "efs.h"
+ #include <linux/efs_vh.h>
+ #include <linux/efs_fs_sb.h>
+@@ -49,15 +48,6 @@ static struct pt_types sgi_pt_types[] = {
+ 	{0,		NULL}
+ };
+ 
+-enum {
+-	Opt_explicit_open,
+-};
+-
+-static const struct fs_parameter_spec efs_param_spec[] = {
+-	fsparam_flag    ("explicit-open",       Opt_explicit_open),
+-	{}
+-};
+-
+ /*
+  * File system definition and registration.
+  */
+@@ -67,7 +57,6 @@ static struct file_system_type efs_fs_type = {
+ 	.kill_sb		= efs_kill_sb,
+ 	.fs_flags		= FS_REQUIRES_DEV,
+ 	.init_fs_context	= efs_init_fs_context,
+-	.parameters		= efs_param_spec,
+ };
+ MODULE_ALIAS_FS("efs");
+ 
+@@ -265,7 +254,8 @@ static int efs_fill_super(struct super_block *s, struct fs_context *fc)
+ 	if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) {
+ 		pr_err("device does not support %d byte blocks\n",
+ 			EFS_BLOCKSIZE);
+-		return -EINVAL;
++		return invalf(fc, "device does not support %d byte blocks\n",
++			      EFS_BLOCKSIZE);
+ 	}
+ 
+ 	/* read the vh (volume header) block */
+@@ -327,43 +317,22 @@ static int efs_fill_super(struct super_block *s, struct fs_context *fc)
+ 	return 0;
+ }
+ 
+-static void efs_free_fc(struct fs_context *fc)
+-{
+-	kfree(fc->fs_private);
+-}
+-
+ static int efs_get_tree(struct fs_context *fc)
+ {
+ 	return get_tree_bdev(fc, efs_fill_super);
+ }
+ 
+-static int efs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+-{
+-	int token;
+-	struct fs_parse_result result;
+-
+-	token = fs_parse(fc, efs_param_spec, param, &result);
+-	if (token < 0)
+-		return token;
+-	return 0;
+-}
+-
+ static int efs_reconfigure(struct fs_context *fc)
+ {
+ 	sync_filesystem(fc->root->d_sb);
++	fc->sb_flags |= SB_RDONLY;
+ 
+ 	return 0;
+ }
+ 
+-struct efs_context {
+-	unsigned long s_mount_opts;
+-};
+-
+ static const struct fs_context_operations efs_context_opts = {
+-	.parse_param	= efs_parse_param,
+ 	.get_tree	= efs_get_tree,
+ 	.reconfigure	= efs_reconfigure,
+-	.free		= efs_free_fc,
+ };
+ 
+ /*
+@@ -371,12 +340,6 @@ static const struct fs_context_operations efs_context_opts = {
+  */
+ static int efs_init_fs_context(struct fs_context *fc)
+ {
+-	struct efs_context *ctx;
+-
+-	ctx = kzalloc(sizeof(struct efs_context), GFP_KERNEL);
+-	if (!ctx)
+-		return -ENOMEM;
+-	fc->fs_private = ctx;
+ 	fc->ops = &efs_context_opts;
+ 
+ 	return 0;
+diff --git a/fs/erofs/data.c b/fs/erofs/data.c
+index 61debd799cf904..fa51437e1d99d9 100644
+--- a/fs/erofs/data.c
++++ b/fs/erofs/data.c
+@@ -38,7 +38,7 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
+ 	}
+ 	if (!folio || !folio_contains(folio, index)) {
+ 		erofs_put_metabuf(buf);
+-		folio = read_mapping_folio(buf->mapping, index, NULL);
++		folio = read_mapping_folio(buf->mapping, index, buf->file);
+ 		if (IS_ERR(folio))
+ 			return folio;
+ 	}
+@@ -61,9 +61,11 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
+ {
+ 	struct erofs_sb_info *sbi = EROFS_SB(sb);
+ 
+-	if (erofs_is_fileio_mode(sbi))
+-		buf->mapping = file_inode(sbi->fdev)->i_mapping;
+-	else if (erofs_is_fscache_mode(sb))
++	buf->file = NULL;
++	if (erofs_is_fileio_mode(sbi)) {
++		buf->file = sbi->fdev;		/* some fs like FUSE needs it */
++		buf->mapping = buf->file->f_mapping;
++	} else if (erofs_is_fscache_mode(sb))
+ 		buf->mapping = sbi->s_fscache->inode->i_mapping;
+ 	else
+ 		buf->mapping = sb->s_bdev->bd_mapping;
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 4efd578d7c627b..9b03c8f323a762 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -221,6 +221,7 @@ enum erofs_kmap_type {
+ 
+ struct erofs_buf {
+ 	struct address_space *mapping;
++	struct file *file;
+ 	struct page *page;
+ 	void *base;
+ 	enum erofs_kmap_type kmap_type;
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index bed3dbe5b7cb8b..2dd7d819572f40 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -631,7 +631,11 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ 			errorfc(fc, "unsupported blksize for fscache mode");
+ 			return -EINVAL;
+ 		}
+-		if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
++
++		if (erofs_is_fileio_mode(sbi)) {
++			sb->s_blocksize = 1 << sbi->blkszbits;
++			sb->s_blocksize_bits = sbi->blkszbits;
++		} else if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
+ 			errorfc(fc, "failed to set erofs blksize");
+ 			return -EINVAL;
+ 		}
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index a076cca1f54734..4535f2f0a0147e 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -219,7 +219,7 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
+ 	unsigned int amortizedshift;
+ 	erofs_off_t pos;
+ 
+-	if (lcn >= totalidx)
++	if (lcn >= totalidx || vi->z_logical_clusterbits > 14)
+ 		return -EINVAL;
+ 
+ 	m->lcn = lcn;
+@@ -390,7 +390,7 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
+ 	u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
+ 	int err;
+ 
+-	do {
++	while (1) {
+ 		/* handle the last EOF pcluster (no next HEAD lcluster) */
+ 		if ((lcn << lclusterbits) >= inode->i_size) {
+ 			map->m_llen = inode->i_size - map->m_la;
+@@ -402,14 +402,16 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
+ 			return err;
+ 
+ 		if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+-			DBG_BUGON(!m->delta[1] &&
+-				  m->clusterofs != 1 << lclusterbits);
++			/* work around invalid d1 generated by pre-1.0 mkfs */
++			if (unlikely(!m->delta[1])) {
++				m->delta[1] = 1;
++				DBG_BUGON(1);
++			}
+ 		} else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
+ 			   m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
+ 			   m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
+-			/* go on until the next HEAD lcluster */
+ 			if (lcn != headlcn)
+-				break;
++				break;	/* ends at the next HEAD lcluster */
+ 			m->delta[1] = 1;
+ 		} else {
+ 			erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
+@@ -418,8 +420,7 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
+ 			return -EOPNOTSUPP;
+ 		}
+ 		lcn += m->delta[1];
+-	} while (m->delta[1]);
+-
++	}
+ 	map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
+ 	return 0;
+ }
+diff --git a/fs/exec.c b/fs/exec.c
+index 6c53920795c2e7..9c349a74f38589 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -883,7 +883,8 @@ EXPORT_SYMBOL(transfer_args_to_stack);
+  */
+ static struct file *do_open_execat(int fd, struct filename *name, int flags)
+ {
+-	struct file *file;
++	int err;
++	struct file *file __free(fput) = NULL;
+ 	struct open_flags open_exec_flags = {
+ 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
+ 		.acc_mode = MAY_EXEC,
+@@ -908,12 +909,14 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
+ 	 * an invariant that all non-regular files error out before we get here.
+ 	 */
+ 	if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
+-	    path_noexec(&file->f_path)) {
+-		fput(file);
++	    path_noexec(&file->f_path))
+ 		return ERR_PTR(-EACCES);
+-	}
+ 
+-	return file;
++	err = deny_write_access(file);
++	if (err)
++		return ERR_PTR(err);
++
++	return no_free_ptr(file);
+ }
+ 
+ /**
+@@ -923,7 +926,8 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
+  *
+  * Returns ERR_PTR on failure or allocated struct file on success.
+  *
+- * As this is a wrapper for the internal do_open_execat(). Also see
++ * As this is a wrapper for the internal do_open_execat(), callers
++ * must call allow_write_access() before fput() on release. Also see
+  * do_close_execat().
+  */
+ struct file *open_exec(const char *name)
+@@ -1475,8 +1479,10 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
+ /* Matches do_open_execat() */
+ static void do_close_execat(struct file *file)
+ {
+-	if (file)
+-		fput(file);
++	if (!file)
++		return;
++	allow_write_access(file);
++	fput(file);
+ }
+ 
+ static void free_bprm(struct linux_binprm *bprm)
+@@ -1801,6 +1807,7 @@ static int exec_binprm(struct linux_binprm *bprm)
+ 		bprm->file = bprm->interpreter;
+ 		bprm->interpreter = NULL;
+ 
++		allow_write_access(exec);
+ 		if (unlikely(bprm->have_execfd)) {
+ 			if (bprm->executable) {
+ 				fput(exec);
+diff --git a/fs/exfat/file.c b/fs/exfat/file.c
+index a25d7eb789f4cb..fb38769c3e39d1 100644
+--- a/fs/exfat/file.c
++++ b/fs/exfat/file.c
+@@ -584,6 +584,16 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	if (ret < 0)
+ 		goto unlock;
+ 
++	if (iocb->ki_flags & IOCB_DIRECT) {
++		unsigned long align = pos | iov_iter_alignment(iter);
++
++		if (!IS_ALIGNED(align, i_blocksize(inode)) &&
++		    !IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev))) {
++			ret = -EINVAL;
++			goto unlock;
++		}
++	}
++
+ 	if (pos > valid_size) {
+ 		ret = exfat_extend_valid_size(file, pos);
+ 		if (ret < 0 && ret != -ENOSPC) {
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index 2c4c442293529b..337197ece59955 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -345,6 +345,7 @@ static int exfat_find_empty_entry(struct inode *inode,
+ 		if (ei->start_clu == EXFAT_EOF_CLUSTER) {
+ 			ei->start_clu = clu.dir;
+ 			p_dir->dir = clu.dir;
++			hint_femp.eidx = 0;
+ 		}
+ 
+ 		/* append to the FAT chain */
+@@ -637,14 +638,26 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
+ 	info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
+ 	info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size);
+ 	info->size = le64_to_cpu(ep2->dentry.stream.size);
++
++	info->start_clu = le32_to_cpu(ep2->dentry.stream.start_clu);
++	if (!is_valid_cluster(sbi, info->start_clu) && info->size) {
++		exfat_warn(sb, "start_clu is invalid cluster(0x%x)",
++				info->start_clu);
++		info->size = 0;
++		info->valid_size = 0;
++	}
++
++	if (info->valid_size > info->size) {
++		exfat_warn(sb, "valid_size(%lld) is greater than size(%lld)",
++				info->valid_size, info->size);
++		info->valid_size = info->size;
++	}
++
+ 	if (info->size == 0) {
+ 		info->flags = ALLOC_NO_FAT_CHAIN;
+ 		info->start_clu = EXFAT_EOF_CLUSTER;
+-	} else {
++	} else
+ 		info->flags = ep2->dentry.stream.flags;
+-		info->start_clu =
+-			le32_to_cpu(ep2->dentry.stream.start_clu);
+-	}
+ 
+ 	exfat_get_entry_time(sbi, &info->crtime,
+ 			     ep->dentry.file.create_tz,
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 591fb3f710be72..8042ad87380897 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -550,7 +550,8 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
+ 	trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
+ 	ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO |
+ 			    (ignore_locked ? REQ_RAHEAD : 0),
+-			    ext4_end_bitmap_read);
++			    ext4_end_bitmap_read,
++			    ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_EIO));
+ 	return bh;
+ verify:
+ 	err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
+@@ -577,7 +578,6 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
+ 	if (!desc)
+ 		return -EFSCORRUPTED;
+ 	wait_on_buffer(bh);
+-	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO);
+ 	if (!buffer_uptodate(bh)) {
+ 		ext4_error_err(sb, EIO, "Cannot read block bitmap - "
+ 			       "block_group = %u, block_bitmap = %llu",
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 44b0d418143c2e..bbffb76d9a9049 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1865,14 +1865,6 @@ static inline bool ext4_simulate_fail(struct super_block *sb,
+ 	return false;
+ }
+ 
+-static inline void ext4_simulate_fail_bh(struct super_block *sb,
+-					 struct buffer_head *bh,
+-					 unsigned long code)
+-{
+-	if (!IS_ERR(bh) && ext4_simulate_fail(sb, code))
+-		clear_buffer_uptodate(bh);
+-}
+-
+ /*
+  * Error number codes for s_{first,last}_error_errno
+  *
+@@ -3100,9 +3092,9 @@ extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
+ extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
+ 						   sector_t block);
+ extern void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
+-				bh_end_io_t *end_io);
++				bh_end_io_t *end_io, bool simu_fail);
+ extern int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
+-			bh_end_io_t *end_io);
++			bh_end_io_t *end_io, bool simu_fail);
+ extern int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
+ extern void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block);
+ extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 34e25eee65219c..88f98dc4402753 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -568,7 +568,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
+ 
+ 	if (!bh_uptodate_or_lock(bh)) {
+ 		trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
+-		err = ext4_read_bh(bh, 0, NULL);
++		err = ext4_read_bh(bh, 0, NULL, false);
+ 		if (err < 0)
+ 			goto errout;
+ 	}
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index df853c4d3a8c91..383c6edea6dd31 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -185,6 +185,56 @@ static inline ext4_fsblk_t ext4_fsmap_next_pblk(struct ext4_fsmap *fmr)
+ 	return fmr->fmr_physical + fmr->fmr_length;
+ }
+ 
++static int ext4_getfsmap_meta_helper(struct super_block *sb,
++				     ext4_group_t agno, ext4_grpblk_t start,
++				     ext4_grpblk_t len, void *priv)
++{
++	struct ext4_getfsmap_info *info = priv;
++	struct ext4_fsmap *p;
++	struct ext4_fsmap *tmp;
++	struct ext4_sb_info *sbi = EXT4_SB(sb);
++	ext4_fsblk_t fsb, fs_start, fs_end;
++	int error;
++
++	fs_start = fsb = (EXT4_C2B(sbi, start) +
++			  ext4_group_first_block_no(sb, agno));
++	fs_end = fs_start + EXT4_C2B(sbi, len);
++
++	/* Return relevant extents from the meta_list */
++	list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) {
++		if (p->fmr_physical < info->gfi_next_fsblk) {
++			list_del(&p->fmr_list);
++			kfree(p);
++			continue;
++		}
++		if (p->fmr_physical <= fs_start ||
++		    p->fmr_physical + p->fmr_length <= fs_end) {
++			/* Emit the retained free extent record if present */
++			if (info->gfi_lastfree.fmr_owner) {
++				error = ext4_getfsmap_helper(sb, info,
++							&info->gfi_lastfree);
++				if (error)
++					return error;
++				info->gfi_lastfree.fmr_owner = 0;
++			}
++			error = ext4_getfsmap_helper(sb, info, p);
++			if (error)
++				return error;
++			fsb = p->fmr_physical + p->fmr_length;
++			if (info->gfi_next_fsblk < fsb)
++				info->gfi_next_fsblk = fsb;
++			list_del(&p->fmr_list);
++			kfree(p);
++			continue;
++		}
++	}
++	if (info->gfi_next_fsblk < fsb)
++		info->gfi_next_fsblk = fsb;
++
++	return 0;
++}
++
++
+ /* Transform a blockgroup's free record into a fsmap */
+ static int ext4_getfsmap_datadev_helper(struct super_block *sb,
+ 					ext4_group_t agno, ext4_grpblk_t start,
+@@ -539,6 +589,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
+ 		error = ext4_mballoc_query_range(sb, info->gfi_agno,
+ 				EXT4_B2C(sbi, info->gfi_low.fmr_physical),
+ 				EXT4_B2C(sbi, info->gfi_high.fmr_physical),
++				ext4_getfsmap_meta_helper,
+ 				ext4_getfsmap_datadev_helper, info);
+ 		if (error)
+ 			goto err;
+@@ -560,7 +611,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
+ 
+ 	/* Report any gaps at the end of the bg */
+ 	info->gfi_last = true;
+-	error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster, 0, info);
++	error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster + 1,
++					     0, info);
+ 	if (error)
+ 		goto err;
+ 
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 7f1a5f90dbbdff..21d228073d7954 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -193,8 +193,9 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ 	 * submit the buffer_head for reading
+ 	 */
+ 	trace_ext4_load_inode_bitmap(sb, block_group);
+-	ext4_read_bh(bh, REQ_META | REQ_PRIO, ext4_end_bitmap_read);
+-	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
++	ext4_read_bh(bh, REQ_META | REQ_PRIO,
++		     ext4_end_bitmap_read,
++		     ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_EIO));
+ 	if (!buffer_uptodate(bh)) {
+ 		put_bh(bh);
+ 		ext4_error_err(sb, EIO, "Cannot read inode bitmap - "
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 7404f0935c9032..7de327fa7b1c51 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -170,7 +170,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ 		}
+ 
+ 		if (!bh_uptodate_or_lock(bh)) {
+-			if (ext4_read_bh(bh, 0, NULL) < 0) {
++			if (ext4_read_bh(bh, 0, NULL, false) < 0) {
+ 				put_bh(bh);
+ 				goto failure;
+ 			}
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 54bdd4884fe67d..99d09cd9c6a37e 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4497,10 +4497,10 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
+ 	 * Read the block from disk.
+ 	 */
+ 	trace_ext4_load_inode(sb, ino);
+-	ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
++	ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL,
++			    ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO));
+ 	blk_finish_plug(&plug);
+ 	wait_on_buffer(bh);
+-	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
+ 	if (!buffer_uptodate(bh)) {
+ 		if (ret_block)
+ 			*ret_block = block;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index d73e38323879ce..92f49d7eb3c001 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6999,13 +6999,14 @@ int
+ ext4_mballoc_query_range(
+ 	struct super_block		*sb,
+ 	ext4_group_t			group,
+-	ext4_grpblk_t			start,
++	ext4_grpblk_t			first,
+ 	ext4_grpblk_t			end,
++	ext4_mballoc_query_range_fn	meta_formatter,
+ 	ext4_mballoc_query_range_fn	formatter,
+ 	void				*priv)
+ {
+ 	void				*bitmap;
+-	ext4_grpblk_t			next;
++	ext4_grpblk_t			start, next;
+ 	struct ext4_buddy		e4b;
+ 	int				error;
+ 
+@@ -7016,10 +7017,19 @@ ext4_mballoc_query_range(
+ 
+ 	ext4_lock_group(sb, group);
+ 
+-	start = max(e4b.bd_info->bb_first_free, start);
++	start = max(e4b.bd_info->bb_first_free, first);
+ 	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
+ 		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-
++	if (meta_formatter && start != first) {
++		if (start > end)
++			start = end;
++		ext4_unlock_group(sb, group);
++		error = meta_formatter(sb, group, first, start - first,
++				       priv);
++		if (error)
++			goto out_unload;
++		ext4_lock_group(sb, group);
++	}
+ 	while (start <= end) {
+ 		start = mb_find_next_zero_bit(bitmap, end + 1, start);
+ 		if (start > end)
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index d8553f1498d3cb..f8280de3e8820a 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -259,6 +259,7 @@ ext4_mballoc_query_range(
+ 	ext4_group_t			agno,
+ 	ext4_grpblk_t			start,
+ 	ext4_grpblk_t			end,
++	ext4_mballoc_query_range_fn	meta_formatter,
+ 	ext4_mballoc_query_range_fn	formatter,
+ 	void				*priv);
+ 
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index bd946d0c71b700..d64c04ed061ae9 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -94,7 +94,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+ 	}
+ 
+ 	lock_buffer(*bh);
+-	ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL);
++	ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL, false);
+ 	if (ret)
+ 		goto warn_exit;
+ 
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index b64661ea6e0ed7..898443e98efc9e 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -213,7 +213,7 @@ static int mext_page_mkuptodate(struct folio *folio, size_t from, size_t to)
+ 			unlock_buffer(bh);
+ 			continue;
+ 		}
+-		ext4_read_bh_nowait(bh, 0, NULL);
++		ext4_read_bh_nowait(bh, 0, NULL, false);
+ 		nr++;
+ 	} while (block++, (bh = bh->b_this_page) != head);
+ 
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index a2704f06436106..72f77f78ae8df3 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1300,7 +1300,7 @@ static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
+ 	if (unlikely(!bh))
+ 		return NULL;
+ 	if (!bh_uptodate_or_lock(bh)) {
+-		if (ext4_read_bh(bh, 0, NULL) < 0) {
++		if (ext4_read_bh(bh, 0, NULL, false) < 0) {
+ 			brelse(bh);
+ 			return NULL;
+ 		}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 16a4ce704460e1..940ac1a49b729e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -161,8 +161,14 @@ MODULE_ALIAS("ext3");
+ 
+ 
+ static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
+-				  bh_end_io_t *end_io)
++				  bh_end_io_t *end_io, bool simu_fail)
+ {
++	if (simu_fail) {
++		clear_buffer_uptodate(bh);
++		unlock_buffer(bh);
++		return;
++	}
++
+ 	/*
+ 	 * buffer's verified bit is no longer valid after reading from
+ 	 * disk again due to write out error, clear it to make sure we
+@@ -176,7 +182,7 @@ static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
+ }
+ 
+ void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
+-			 bh_end_io_t *end_io)
++			 bh_end_io_t *end_io, bool simu_fail)
+ {
+ 	BUG_ON(!buffer_locked(bh));
+ 
+@@ -184,10 +190,11 @@ void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
+ 		unlock_buffer(bh);
+ 		return;
+ 	}
+-	__ext4_read_bh(bh, op_flags, end_io);
++	__ext4_read_bh(bh, op_flags, end_io, simu_fail);
+ }
+ 
+-int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, bh_end_io_t *end_io)
++int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
++		 bh_end_io_t *end_io, bool simu_fail)
+ {
+ 	BUG_ON(!buffer_locked(bh));
+ 
+@@ -196,7 +203,7 @@ int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, bh_end_io_t *end_io
+ 		return 0;
+ 	}
+ 
+-	__ext4_read_bh(bh, op_flags, end_io);
++	__ext4_read_bh(bh, op_flags, end_io, simu_fail);
+ 
+ 	wait_on_buffer(bh);
+ 	if (buffer_uptodate(bh))
+@@ -208,10 +215,10 @@ int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
+ {
+ 	lock_buffer(bh);
+ 	if (!wait) {
+-		ext4_read_bh_nowait(bh, op_flags, NULL);
++		ext4_read_bh_nowait(bh, op_flags, NULL, false);
+ 		return 0;
+ 	}
+-	return ext4_read_bh(bh, op_flags, NULL);
++	return ext4_read_bh(bh, op_flags, NULL, false);
+ }
+ 
+ /*
+@@ -266,7 +273,7 @@ void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
+ 
+ 	if (likely(bh)) {
+ 		if (trylock_buffer(bh))
+-			ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL);
++			ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL, false);
+ 		brelse(bh);
+ 	}
+ }
+@@ -346,9 +353,9 @@ __u32 ext4_free_group_clusters(struct super_block *sb,
+ __u32 ext4_free_inodes_count(struct super_block *sb,
+ 			      struct ext4_group_desc *bg)
+ {
+-	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
++	return le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_lo)) |
+ 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+-		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
++		 (__u32)le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_hi)) << 16 : 0);
+ }
+ 
+ __u32 ext4_used_dirs_count(struct super_block *sb,
+@@ -402,9 +409,9 @@ void ext4_free_group_clusters_set(struct super_block *sb,
+ void ext4_free_inodes_set(struct super_block *sb,
+ 			  struct ext4_group_desc *bg, __u32 count)
+ {
+-	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
++	WRITE_ONCE(bg->bg_free_inodes_count_lo, cpu_to_le16((__u16)count));
+ 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
+-		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
++		WRITE_ONCE(bg->bg_free_inodes_count_hi, cpu_to_le16(count >> 16));
+ }
+ 
+ void ext4_used_dirs_set(struct super_block *sb,
+@@ -6518,9 +6525,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 		goto restore_opts;
+ 	}
+ 
+-	if (test_opt2(sb, ABORT))
+-		ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
+-
+ 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ 		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
+ 
+@@ -6689,6 +6693,14 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
+ 		ext4_stop_mmpd(sbi);
+ 
++	/*
++	 * Handle aborting the filesystem as the last thing during remount to
++	 * avoid obsure errors during remount when some option changes fail to
++	 * apply due to shutdown filesystem.
++	 */
++	if (test_opt2(sb, ABORT))
++		ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
++
+ 	return 0;
+ 
+ restore_opts:
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 7f76460b721f2c..efda9a0229816b 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -32,7 +32,7 @@ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
+ 	f2fs_build_fault_attr(sbi, 0, 0);
+ 	if (!end_io)
+ 		f2fs_flush_merged_writes(sbi);
+-	f2fs_handle_critical_error(sbi, reason, end_io);
++	f2fs_handle_critical_error(sbi, reason);
+ }
+ 
+ /*
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 94f7b084f60164..9efe4c00d75bb3 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1676,7 +1676,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
+ 		/* reserved delalloc block should be mapped for fiemap. */
+ 		if (blkaddr == NEW_ADDR)
+ 			map->m_flags |= F2FS_MAP_DELALLOC;
+-		if (flag != F2FS_GET_BLOCK_DIO || !is_hole)
++		/* DIO READ and hole case, should not map the blocks. */
++		if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create))
+ 			map->m_flags |= F2FS_MAP_MAPPED;
+ 
+ 		map->m_pblk = blkaddr;
+@@ -1901,25 +1902,6 @@ static int f2fs_xattr_fiemap(struct inode *inode,
+ 	return (err < 0 ? err : 0);
+ }
+ 
+-static loff_t max_inode_blocks(struct inode *inode)
+-{
+-	loff_t result = ADDRS_PER_INODE(inode);
+-	loff_t leaf_count = ADDRS_PER_BLOCK(inode);
+-
+-	/* two direct node blocks */
+-	result += (leaf_count * 2);
+-
+-	/* two indirect node blocks */
+-	leaf_count *= NIDS_PER_BLOCK;
+-	result += (leaf_count * 2);
+-
+-	/* one double indirect node block */
+-	leaf_count *= NIDS_PER_BLOCK;
+-	result += leaf_count;
+-
+-	return result;
+-}
+-
+ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 		u64 start, u64 len)
+ {
+@@ -1992,8 +1974,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 	if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
+ 		start_blk = next_pgofs;
+ 
+-		if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
+-						max_inode_blocks(inode)))
++		if (blks_to_bytes(inode, start_blk) < maxbytes)
+ 			goto prep_next;
+ 
+ 		flags |= FIEMAP_EXTENT_LAST;
+@@ -2385,10 +2366,10 @@ static int f2fs_mpage_readpages(struct inode *inode,
+ 		.nr_cpages = 0,
+ 	};
+ 	pgoff_t nc_cluster_idx = NULL_CLUSTER;
++	pgoff_t index;
+ #endif
+ 	unsigned nr_pages = rac ? readahead_count(rac) : 1;
+ 	unsigned max_nr_pages = nr_pages;
+-	pgoff_t index;
+ 	int ret = 0;
+ 
+ 	map.m_pblk = 0;
+@@ -2406,9 +2387,9 @@ static int f2fs_mpage_readpages(struct inode *inode,
+ 			prefetchw(&folio->flags);
+ 		}
+ 
++#ifdef CONFIG_F2FS_FS_COMPRESSION
+ 		index = folio_index(folio);
+ 
+-#ifdef CONFIG_F2FS_FS_COMPRESSION
+ 		if (!f2fs_compressed_file(inode))
+ 			goto read_single_page;
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 33f5449dc22d50..93a5e1c24e566e 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3632,8 +3632,7 @@ int f2fs_quota_sync(struct super_block *sb, int type);
+ loff_t max_file_blocks(struct inode *inode);
+ void f2fs_quota_off_umount(struct super_block *sb);
+ void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
+-void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+-							bool irq_context);
++void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason);
+ void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
+ void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
+ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 321d8ffbab6e4b..71ddecaf771f81 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -863,7 +863,11 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw)
+ 		return true;
+ 	if (f2fs_compressed_file(inode))
+ 		return true;
+-	if (f2fs_has_inline_data(inode))
++	/*
++	 * only force direct read to use buffered IO, for direct write,
++	 * it expects inline data conversion before committing IO.
++	 */
++	if (f2fs_has_inline_data(inode) && rw == READ)
+ 		return true;
+ 
+ 	/* disallow direct IO if any of devices has unaligned blksize */
+@@ -2343,9 +2347,12 @@ int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
+ 	if (readonly)
+ 		goto out;
+ 
+-	/* grab sb->s_umount to avoid racing w/ remount() */
++	/*
++	 * grab sb->s_umount to avoid racing w/ remount() and other shutdown
++	 * paths.
++	 */
+ 	if (need_lock)
+-		down_read(&sbi->sb->s_umount);
++		down_write(&sbi->sb->s_umount);
+ 
+ 	f2fs_stop_gc_thread(sbi);
+ 	f2fs_stop_discard_thread(sbi);
+@@ -2354,7 +2361,7 @@ int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
+ 	clear_opt(sbi, DISCARD);
+ 
+ 	if (need_lock)
+-		up_read(&sbi->sb->s_umount);
++		up_write(&sbi->sb->s_umount);
+ 
+ 	f2fs_update_time(sbi, REQ_TIME);
+ out:
+@@ -3792,7 +3799,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ 		to_reserved = cluster_size - compr_blocks - reserved;
+ 
+ 		/* for the case all blocks in cluster were reserved */
+-		if (to_reserved == 1) {
++		if (reserved && to_reserved == 1) {
+ 			dn->ofs_in_node += cluster_size;
+ 			goto next;
+ 		}
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 9322a7200e310d..e0469316c7cd4e 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -257,6 +257,8 @@ static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
+ 
+ 	switch (sbi->gc_mode) {
+ 	case GC_IDLE_CB:
++	case GC_URGENT_LOW:
++	case GC_URGENT_MID:
+ 		gc_mode = GC_CB;
+ 		break;
+ 	case GC_IDLE_GREEDY:
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 59b13ff243fa80..af36c6d6542b8c 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -905,6 +905,16 @@ static int truncate_node(struct dnode_of_data *dn)
+ 	if (err)
+ 		return err;
+ 
++	if (ni.blk_addr != NEW_ADDR &&
++		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) {
++		f2fs_err_ratelimited(sbi,
++			"nat entry is corrupted, run fsck to fix it, ino:%u, "
++			"nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr);
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++		f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
++		return -EFSCORRUPTED;
++	}
++
+ 	/* Deallocate node address */
+ 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
+ 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 1766254279d24c..edf205093f4358 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2926,7 +2926,8 @@ static int change_curseg(struct f2fs_sb_info *sbi, int type)
+ 	struct f2fs_summary_block *sum_node;
+ 	struct page *sum_page;
+ 
+-	write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
++	if (curseg->inited)
++		write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
+ 
+ 	__set_test_and_inuse(sbi, new_segno);
+ 
+@@ -3977,8 +3978,8 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ 		}
+ 	}
+ 
+-	f2fs_bug_on(sbi, !IS_DATASEG(type));
+ 	curseg = CURSEG_I(sbi, type);
++	f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type));
+ 
+ 	mutex_lock(&curseg->curseg_mutex);
+ 	down_write(&sit_i->sentry_lock);
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 71adb4a43bec53..51b2b8c5c749c5 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -559,18 +559,21 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
+ }
+ 
+ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+-			unsigned int node_blocks, unsigned int dent_blocks)
++			unsigned int node_blocks, unsigned int data_blocks,
++			unsigned int dent_blocks)
+ {
+ 
+-	unsigned segno, left_blocks;
++	unsigned int segno, left_blocks, blocks;
+ 	int i;
+ 
+-	/* check current node sections in the worst case. */
+-	for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
++	/* check current data/node sections in the worst case. */
++	for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) {
+ 		segno = CURSEG_I(sbi, i)->segno;
+ 		left_blocks = CAP_BLKS_PER_SEC(sbi) -
+ 				get_ckpt_valid_blocks(sbi, segno, true);
+-		if (node_blocks > left_blocks)
++
++		blocks = i <= CURSEG_COLD_DATA ? data_blocks : node_blocks;
++		if (blocks > left_blocks)
+ 			return false;
+ 	}
+ 
+@@ -584,8 +587,9 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ }
+ 
+ /*
+- * calculate needed sections for dirty node/dentry
+- * and call has_curseg_enough_space
++ * calculate needed sections for dirty node/dentry and call
++ * has_curseg_enough_space, please note that, it needs to account
++ * dirty data as well in lfs mode when checkpoint is disabled.
+  */
+ static inline void __get_secs_required(struct f2fs_sb_info *sbi,
+ 		unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p)
+@@ -594,19 +598,30 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi,
+ 					get_pages(sbi, F2FS_DIRTY_DENTS) +
+ 					get_pages(sbi, F2FS_DIRTY_IMETA);
+ 	unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
++	unsigned int total_data_blocks = 0;
+ 	unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi);
+ 	unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi);
++	unsigned int data_secs = 0;
+ 	unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi);
+ 	unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
++	unsigned int data_blocks = 0;
++
++	if (f2fs_lfs_mode(sbi) &&
++		unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
++		total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA);
++		data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi);
++		data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi);
++	}
+ 
+ 	if (lower_p)
+-		*lower_p = node_secs + dent_secs;
++		*lower_p = node_secs + dent_secs + data_secs;
+ 	if (upper_p)
+ 		*upper_p = node_secs + dent_secs +
+-			(node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
++			(node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) +
++			(data_blocks ? 1 : 0);
+ 	if (curseg_p)
+ 		*curseg_p = has_curseg_enough_space(sbi,
+-				node_blocks, dent_blocks);
++				node_blocks, data_blocks, dent_blocks);
+ }
+ 
+ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 87ab5696bd482c..983fdd98fc3755 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -150,6 +150,8 @@ enum {
+ 	Opt_mode,
+ 	Opt_fault_injection,
+ 	Opt_fault_type,
++	Opt_lazytime,
++	Opt_nolazytime,
+ 	Opt_quota,
+ 	Opt_noquota,
+ 	Opt_usrquota,
+@@ -226,6 +228,8 @@ static match_table_t f2fs_tokens = {
+ 	{Opt_mode, "mode=%s"},
+ 	{Opt_fault_injection, "fault_injection=%u"},
+ 	{Opt_fault_type, "fault_type=%u"},
++	{Opt_lazytime, "lazytime"},
++	{Opt_nolazytime, "nolazytime"},
+ 	{Opt_quota, "quota"},
+ 	{Opt_noquota, "noquota"},
+ 	{Opt_usrquota, "usrquota"},
+@@ -918,6 +922,12 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 			f2fs_info(sbi, "fault_type options not supported");
+ 			break;
+ #endif
++		case Opt_lazytime:
++			sb->s_flags |= SB_LAZYTIME;
++			break;
++		case Opt_nolazytime:
++			sb->s_flags &= ~SB_LAZYTIME;
++			break;
+ #ifdef CONFIG_QUOTA
+ 		case Opt_quota:
+ 		case Opt_usrquota:
+@@ -3322,7 +3332,7 @@ loff_t max_file_blocks(struct inode *inode)
+ 	 * fit within U32_MAX + 1 data units.
+ 	 */
+ 
+-	result = min(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
++	result = umin(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
+ 
+ 	return result;
+ }
+@@ -4155,8 +4165,7 @@ static bool system_going_down(void)
+ 		|| system_state == SYSTEM_RESTART;
+ }
+ 
+-void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+-							bool irq_context)
++void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason)
+ {
+ 	struct super_block *sb = sbi->sb;
+ 	bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
+@@ -4168,10 +4177,12 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+ 	if (!f2fs_hw_is_readonly(sbi)) {
+ 		save_stop_reason(sbi, reason);
+ 
+-		if (irq_context && !shutdown)
+-			schedule_work(&sbi->s_error_work);
+-		else
+-			f2fs_record_stop_reason(sbi);
++		/*
++		 * always create an asynchronous task to record stop_reason
++		 * in order to avoid potential deadlock when running into
++		 * f2fs_record_stop_reason() synchronously.
++		 */
++		schedule_work(&sbi->s_error_work);
+ 	}
+ 
+ 	/*
+@@ -4991,9 +5002,6 @@ static int __init init_f2fs_fs(void)
+ 	err = f2fs_init_shrinker();
+ 	if (err)
+ 		goto free_sysfs;
+-	err = register_filesystem(&f2fs_fs_type);
+-	if (err)
+-		goto free_shrinker;
+ 	f2fs_create_root_stats();
+ 	err = f2fs_init_post_read_processing();
+ 	if (err)
+@@ -5016,7 +5024,12 @@ static int __init init_f2fs_fs(void)
+ 	err = f2fs_create_casefold_cache();
+ 	if (err)
+ 		goto free_compress_cache;
++	err = register_filesystem(&f2fs_fs_type);
++	if (err)
++		goto free_casefold_cache;
+ 	return 0;
++free_casefold_cache:
++	f2fs_destroy_casefold_cache();
+ free_compress_cache:
+ 	f2fs_destroy_compress_cache();
+ free_compress_mempool:
+@@ -5031,8 +5044,6 @@ static int __init init_f2fs_fs(void)
+ 	f2fs_destroy_post_read_processing();
+ free_root_stats:
+ 	f2fs_destroy_root_stats();
+-	unregister_filesystem(&f2fs_fs_type);
+-free_shrinker:
+ 	f2fs_exit_shrinker();
+ free_sysfs:
+ 	f2fs_exit_sysfs();
+@@ -5056,6 +5067,7 @@ static int __init init_f2fs_fs(void)
+ 
+ static void __exit exit_f2fs_fs(void)
+ {
++	unregister_filesystem(&f2fs_fs_type);
+ 	f2fs_destroy_casefold_cache();
+ 	f2fs_destroy_compress_cache();
+ 	f2fs_destroy_compress_mempool();
+@@ -5064,7 +5076,6 @@ static void __exit exit_f2fs_fs(void)
+ 	f2fs_destroy_iostat_processing();
+ 	f2fs_destroy_post_read_processing();
+ 	f2fs_destroy_root_stats();
+-	unregister_filesystem(&f2fs_fs_type);
+ 	f2fs_exit_shrinker();
+ 	f2fs_exit_sysfs();
+ 	f2fs_destroy_garbage_collection_cache();
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index 22dd9dcce7ecc8..3d89de31066ae0 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -397,6 +397,9 @@ static long f_dupfd_query(int fd, struct file *filp)
+ {
+ 	CLASS(fd_raw, f)(fd);
+ 
++	if (fd_empty(f))
++		return -EBADF;
++
+ 	/*
+ 	 * We can do the 'fdput()' immediately, as the only thing that
+ 	 * matters is the pointer value which isn't changed by the fdput.
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index dafdf766b1d535..e20d91d0ae558c 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -645,7 +645,7 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
+ 	args->out_args[0].size = count;
+ }
+ 
+-static void fuse_release_user_pages(struct fuse_args_pages *ap,
++static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres,
+ 				    bool should_dirty)
+ {
+ 	unsigned int i;
+@@ -656,6 +656,9 @@ static void fuse_release_user_pages(struct fuse_args_pages *ap,
+ 		if (ap->args.is_pinned)
+ 			unpin_user_page(ap->pages[i]);
+ 	}
++
++	if (nres > 0 && ap->args.invalidate_vmap)
++		invalidate_kernel_vmap_range(ap->args.vmap_base, nres);
+ }
+ 
+ static void fuse_io_release(struct kref *kref)
+@@ -754,25 +757,29 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
+ 	struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
+ 	struct fuse_io_priv *io = ia->io;
+ 	ssize_t pos = -1;
+-
+-	fuse_release_user_pages(&ia->ap, io->should_dirty);
++	size_t nres;
+ 
+ 	if (err) {
+ 		/* Nothing */
+ 	} else if (io->write) {
+ 		if (ia->write.out.size > ia->write.in.size) {
+ 			err = -EIO;
+-		} else if (ia->write.in.size != ia->write.out.size) {
+-			pos = ia->write.in.offset - io->offset +
+-				ia->write.out.size;
++		} else {
++			nres = ia->write.out.size;
++			if (ia->write.in.size != ia->write.out.size)
++				pos = ia->write.in.offset - io->offset +
++				      ia->write.out.size;
+ 		}
+ 	} else {
+ 		u32 outsize = args->out_args[0].size;
+ 
++		nres = outsize;
+ 		if (ia->read.in.size != outsize)
+ 			pos = ia->read.in.offset - io->offset + outsize;
+ 	}
+ 
++	fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty);
++
+ 	fuse_aio_complete(io, err, pos);
+ 	fuse_io_free(ia);
+ }
+@@ -1468,24 +1475,37 @@ static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
+ 
+ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
+ 			       size_t *nbytesp, int write,
+-			       unsigned int max_pages)
++			       unsigned int max_pages,
++			       bool use_pages_for_kvec_io)
+ {
++	bool flush_or_invalidate = false;
+ 	size_t nbytes = 0;  /* # bytes already packed in req */
+ 	ssize_t ret = 0;
+ 
+-	/* Special case for kernel I/O: can copy directly into the buffer */
++	/* Special case for kernel I/O: can copy directly into the buffer.
++	 * However if the implementation of fuse_conn requires pages instead of
++	 * pointer (e.g., virtio-fs), use iov_iter_extract_pages() instead.
++	 */
+ 	if (iov_iter_is_kvec(ii)) {
+-		unsigned long user_addr = fuse_get_user_addr(ii);
+-		size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
++		void *user_addr = (void *)fuse_get_user_addr(ii);
+ 
+-		if (write)
+-			ap->args.in_args[1].value = (void *) user_addr;
+-		else
+-			ap->args.out_args[0].value = (void *) user_addr;
++		if (!use_pages_for_kvec_io) {
++			size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
+ 
+-		iov_iter_advance(ii, frag_size);
+-		*nbytesp = frag_size;
+-		return 0;
++			if (write)
++				ap->args.in_args[1].value = user_addr;
++			else
++				ap->args.out_args[0].value = user_addr;
++
++			iov_iter_advance(ii, frag_size);
++			*nbytesp = frag_size;
++			return 0;
++		}
++
++		if (is_vmalloc_addr(user_addr)) {
++			ap->args.vmap_base = user_addr;
++			flush_or_invalidate = true;
++		}
+ 	}
+ 
+ 	while (nbytes < *nbytesp && ap->num_pages < max_pages) {
+@@ -1514,6 +1534,10 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
+ 			(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
+ 	}
+ 
++	if (write && flush_or_invalidate)
++		flush_kernel_vmap_range(ap->args.vmap_base, nbytes);
++
++	ap->args.invalidate_vmap = !write && flush_or_invalidate;
+ 	ap->args.is_pinned = iov_iter_extract_will_pin(ii);
+ 	ap->args.user_pages = true;
+ 	if (write)
+@@ -1582,7 +1606,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
+ 		size_t nbytes = min(count, nmax);
+ 
+ 		err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
+-					  max_pages);
++					  max_pages, fc->use_pages_for_kvec_io);
+ 		if (err && !nbytes)
+ 			break;
+ 
+@@ -1596,7 +1620,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
+ 		}
+ 
+ 		if (!io->async || nres < 0) {
+-			fuse_release_user_pages(&ia->ap, io->should_dirty);
++			fuse_release_user_pages(&ia->ap, nres, io->should_dirty);
+ 			fuse_io_free(ia);
+ 		}
+ 		ia = NULL;
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index e6cc3d552b1382..28cf319c1c25cf 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -309,9 +309,12 @@ struct fuse_args {
+ 	bool may_block:1;
+ 	bool is_ext:1;
+ 	bool is_pinned:1;
++	bool invalidate_vmap:1;
+ 	struct fuse_in_arg in_args[3];
+ 	struct fuse_arg out_args[2];
+ 	void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
++	/* Used for kvec iter backed by vmalloc address */
++	void *vmap_base;
+ };
+ 
+ struct fuse_args_pages {
+@@ -857,6 +860,9 @@ struct fuse_conn {
+ 	/** Passthrough support for read/write IO */
+ 	unsigned int passthrough:1;
+ 
++	/* Use pages instead of pointer for kernel I/O */
++	unsigned int use_pages_for_kvec_io:1;
++
+ 	/** Maximum stack depth for passthrough backing files */
+ 	int max_stack_depth;
+ 
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 6404a189e98900..d220e28e755fef 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -1691,6 +1691,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
+ 	fc->delete_stale = true;
+ 	fc->auto_submounts = true;
+ 	fc->sync_fs = true;
++	fc->use_pages_for_kvec_io = true;
+ 
+ 	/* Tell FUSE to split requests that exceed the virtqueue's size */
+ 	fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 269c3bc7fced71..a51fe42732c4c2 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1013,14 +1013,15 @@ bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
+ 				  &gl->gl_delete, 0);
+ }
+ 
+-static bool gfs2_queue_verify_evict(struct gfs2_glock *gl)
++bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later)
+ {
+ 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++	unsigned long delay;
+ 
+-	if (test_and_set_bit(GLF_VERIFY_EVICT, &gl->gl_flags))
++	if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags))
+ 		return false;
+-	return queue_delayed_work(sdp->sd_delete_wq,
+-				  &gl->gl_delete, 5 * HZ);
++	delay = later ? 5 * HZ : 0;
++	return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay);
+ }
+ 
+ static void delete_work_func(struct work_struct *work)
+@@ -1052,19 +1053,19 @@ static void delete_work_func(struct work_struct *work)
+ 		if (gfs2_try_evict(gl)) {
+ 			if (test_bit(SDF_KILL, &sdp->sd_flags))
+ 				goto out;
+-			if (gfs2_queue_verify_evict(gl))
++			if (gfs2_queue_verify_delete(gl, true))
+ 				return;
+ 		}
+ 		goto out;
+ 	}
+ 
+-	if (test_and_clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) {
++	if (test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags)) {
+ 		inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
+ 					    GFS2_BLKST_UNLINKED);
+ 		if (IS_ERR(inode)) {
+ 			if (PTR_ERR(inode) == -EAGAIN &&
+ 			    !test_bit(SDF_KILL, &sdp->sd_flags) &&
+-			    gfs2_queue_verify_evict(gl))
++			    gfs2_queue_verify_delete(gl, true))
+ 				return;
+ 		} else {
+ 			d_prune_aliases(inode);
+@@ -2118,7 +2119,7 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
+ void gfs2_cancel_delete_work(struct gfs2_glock *gl)
+ {
+ 	clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags);
+-	clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags);
++	clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
+ 	if (cancel_delayed_work(&gl->gl_delete))
+ 		gfs2_glock_put(gl);
+ }
+@@ -2371,7 +2372,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
+ 		*p++ = 'N';
+ 	if (test_bit(GLF_TRY_TO_EVICT, gflags))
+ 		*p++ = 'e';
+-	if (test_bit(GLF_VERIFY_EVICT, gflags))
++	if (test_bit(GLF_VERIFY_DELETE, gflags))
+ 		*p++ = 'E';
+ 	*p = 0;
+ 	return buf;
+diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
+index adf0091cc98f95..63e101d448e961 100644
+--- a/fs/gfs2/glock.h
++++ b/fs/gfs2/glock.h
+@@ -245,6 +245,7 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
+ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
+ void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+ bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
++bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later);
+ void gfs2_cancel_delete_work(struct gfs2_glock *gl);
+ void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
+ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index aa4ef67a34e037..bd1348bff90ebe 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -329,7 +329,7 @@ enum {
+ 	GLF_BLOCKING			= 15,
+ 	GLF_UNLOCKED			= 16, /* Wait for glock to be unlocked */
+ 	GLF_TRY_TO_EVICT		= 17, /* iopen glocks only */
+-	GLF_VERIFY_EVICT		= 18, /* iopen glocks only */
++	GLF_VERIFY_DELETE		= 18, /* iopen glocks only */
+ };
+ 
+ struct gfs2_glock {
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index 29c77281676526..53930312971530 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -1879,7 +1879,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
+ 		 */
+ 		ip = gl->gl_object;
+ 
+-		if (ip || !gfs2_queue_try_to_evict(gl))
++		if (ip || !gfs2_queue_verify_delete(gl, false))
+ 			gfs2_glock_put(gl);
+ 		else
+ 			found++;
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 6678060ed4d2bb..e22c1edc32b39e 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1045,7 +1045,7 @@ static int gfs2_drop_inode(struct inode *inode)
+ 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
+ 
+ 		gfs2_glock_hold(gl);
+-		if (!gfs2_queue_try_to_evict(gl))
++		if (!gfs2_queue_verify_delete(gl, true))
+ 			gfs2_glock_put_async(gl);
+ 		return 0;
+ 	}
+diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
+index 59ce81dca73fce..5389918bbf29db 100644
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -156,6 +156,7 @@ struct hfsplus_sb_info {
+ 
+ 	/* Runtime variables */
+ 	u32 blockoffset;
++	u32 min_io_size;
+ 	sector_t part_start;
+ 	sector_t sect_count;
+ 	int fs_shift;
+@@ -307,7 +308,7 @@ struct hfsplus_readdir_data {
+  */
+ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
+ {
+-	return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev),
++	return max_t(unsigned short, HFSPLUS_SB(sb)->min_io_size,
+ 		     HFSPLUS_SECTOR_SIZE);
+ }
+ 
+diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
+index 9592ffcb44e5ea..74801911bc1cc4 100644
+--- a/fs/hfsplus/wrapper.c
++++ b/fs/hfsplus/wrapper.c
+@@ -172,6 +172,8 @@ int hfsplus_read_wrapper(struct super_block *sb)
+ 	if (!blocksize)
+ 		goto out;
+ 
++	sbi->min_io_size = blocksize;
++
+ 	if (hfsplus_get_last_session(sb, &part_start, &part_size))
+ 		goto out;
+ 
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 6d1cf2436ead68..084f6ed2dd7a69 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -471,8 +471,8 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping,
+ 
+ 	*foliop = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ 			mapping_gfp_mask(mapping));
+-	if (!*foliop)
+-		return -ENOMEM;
++	if (IS_ERR(*foliop))
++		return PTR_ERR(*foliop);
+ 	return 0;
+ }
+ 
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index f50311a6b4299d..47038e6608123c 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -948,8 +948,6 @@ static int isofs_fill_super(struct super_block *s, struct fs_context *fc)
+ 		goto out_no_inode;
+ 	}
+ 
+-	kfree(opt->iocharset);
+-
+ 	return 0;
+ 
+ 	/*
+@@ -987,7 +985,6 @@ static int isofs_fill_super(struct super_block *s, struct fs_context *fc)
+ 	brelse(bh);
+ 	brelse(pri_bh);
+ out_freesbi:
+-	kfree(opt->iocharset);
+ 	kfree(sbi);
+ 	s->s_fs_info = NULL;
+ 	return error;
+@@ -1528,7 +1525,10 @@ static int isofs_get_tree(struct fs_context *fc)
+ 
+ static void isofs_free_fc(struct fs_context *fc)
+ {
+-	kfree(fc->fs_private);
++	struct isofs_options *opt = fc->fs_private;
++
++	kfree(opt->iocharset);
++	kfree(opt);
+ }
+ 
+ static const struct fs_context_operations isofs_context_ops = {
+diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
+index acd32f05b51988..ef3a1e1b6cb065 100644
+--- a/fs/jffs2/erase.c
++++ b/fs/jffs2/erase.c
+@@ -338,10 +338,9 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
+ 		} while(--retlen);
+ 		mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
+ 		if (retlen) {
+-			pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
+-				*wordebuf,
+-				jeb->offset +
+-				c->sector_size-retlen * sizeof(*wordebuf));
++			*bad_offset = jeb->offset + c->sector_size - retlen * sizeof(*wordebuf);
++			pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
++				*wordebuf, *bad_offset);
+ 			return -EIO;
+ 		}
+ 		return 0;
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index 0fb05e314edf60..24afbae87225a7 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -559,7 +559,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
+ 
+       size_check:
+ 	if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
+-		int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size);
++		int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr));
+ 
+ 		printk(KERN_ERR "ea_get: invalid extended attribute\n");
+ 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
+diff --git a/fs/netfs/fscache_volume.c b/fs/netfs/fscache_volume.c
+index cb75c07b5281a5..ced14ac78cc1c2 100644
+--- a/fs/netfs/fscache_volume.c
++++ b/fs/netfs/fscache_volume.c
+@@ -322,8 +322,7 @@ void fscache_create_volume(struct fscache_volume *volume, bool wait)
+ 	}
+ 	return;
+ no_wait:
+-	clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
+-	wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
++	clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
+ }
+ 
+ /*
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 0becdec129704f..47189476b5538b 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -571,19 +571,32 @@ bl_find_get_deviceid(struct nfs_server *server,
+ 	if (!node)
+ 		return ERR_PTR(-ENODEV);
+ 
++	/*
++	 * Devices that are marked unavailable are left in the cache with a
++	 * timeout to avoid sending GETDEVINFO after every LAYOUTGET, or
++	 * constantly attempting to register the device.  Once marked as
++	 * unavailable they must be deleted and never reused.
++	 */
+ 	if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
+ 		unsigned long end = jiffies;
+ 		unsigned long start = end - PNFS_DEVICE_RETRY_TIMEOUT;
+ 
+ 		if (!time_in_range(node->timestamp_unavailable, start, end)) {
++			/* Uncork subsequent GETDEVINFO operations for this device */
+ 			nfs4_delete_deviceid(node->ld, node->nfs_client, id);
+ 			goto retry;
+ 		}
+ 		goto out_put;
+ 	}
+ 
+-	if (!bl_register_dev(container_of(node, struct pnfs_block_dev, node)))
++	if (!bl_register_dev(container_of(node, struct pnfs_block_dev, node))) {
++		/*
++		 * If we cannot register, treat this device as transient:
++		 * Make a negative cache entry for the device
++		 */
++		nfs4_mark_deviceid_unavailable(node);
+ 		goto out_put;
++	}
+ 
+ 	return node;
+ 
+diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
+index 6252f44479457b..cab8809f0e0f48 100644
+--- a/fs/nfs/blocklayout/dev.c
++++ b/fs/nfs/blocklayout/dev.c
+@@ -20,9 +20,6 @@ static void bl_unregister_scsi(struct pnfs_block_dev *dev)
+ 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ 	int status;
+ 
+-	if (!test_and_clear_bit(PNFS_BDEV_REGISTERED, &dev->flags))
+-		return;
+-
+ 	status = ops->pr_register(bdev, dev->pr_key, 0, false);
+ 	if (status)
+ 		trace_bl_pr_key_unreg_err(bdev, dev->pr_key, status);
+@@ -58,7 +55,8 @@ static void bl_unregister_dev(struct pnfs_block_dev *dev)
+ 		return;
+ 	}
+ 
+-	if (dev->type == PNFS_BLOCK_VOLUME_SCSI)
++	if (dev->type == PNFS_BLOCK_VOLUME_SCSI &&
++		test_and_clear_bit(PNFS_BDEV_REGISTERED, &dev->flags))
+ 		bl_unregister_scsi(dev);
+ }
+ 
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 430733e3eff260..6bcc4b0e00ab72 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -12,7 +12,7 @@
+ #include <linux/nfslocalio.h>
+ #include <linux/wait_bit.h>
+ 
+-#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
++#define NFS_SB_MASK (SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
+ 
+ extern const struct export_operations nfs_export_ops;
+ 
+diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
+index 8f0ce82a677e15..637528e6368ef7 100644
+--- a/fs/nfs/localio.c
++++ b/fs/nfs/localio.c
+@@ -354,6 +354,12 @@ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
+ 
+ 	nfs_local_pgio_done(hdr, status);
+ 
++	/*
++	 * Must clear replen otherwise NFSv3 data corruption will occur
++	 * if/when switching from LOCALIO back to using normal RPC.
++	 */
++	hdr->res.replen = 0;
++
+ 	if (hdr->res.count != hdr->args.count ||
+ 	    hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
+ 		hdr->res.eof = true;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 9d40319e063dea..405f17e6e0b45b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2603,12 +2603,14 @@ static void nfs4_open_release(void *calldata)
+ 	struct nfs4_opendata *data = calldata;
+ 	struct nfs4_state *state = NULL;
+ 
++	/* In case of error, no cleanup! */
++	if (data->rpc_status != 0 || !data->rpc_done) {
++		nfs_release_seqid(data->o_arg.seqid);
++		goto out_free;
++	}
+ 	/* If this request hasn't been cancelled, do nothing */
+ 	if (!data->cancelled)
+ 		goto out_free;
+-	/* In case of error, no cleanup! */
+-	if (data->rpc_status != 0 || !data->rpc_done)
+-		goto out_free;
+ 	/* In case we need an open_confirm, no cleanup! */
+ 	if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
+ 		goto out_free;
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index ead2dc55952dba..82ae2b85d393cb 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -144,6 +144,31 @@ static void nfs_io_completion_put(struct nfs_io_completion *ioc)
+ 		kref_put(&ioc->refcount, nfs_io_completion_release);
+ }
+ 
++static void
++nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
++{
++	if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) {
++		kref_get(&req->wb_kref);
++		atomic_long_inc(&NFS_I(inode)->nrequests);
++	}
++}
++
++static int
++nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
++{
++	int ret;
++
++	if (!test_bit(PG_REMOVE, &req->wb_flags))
++		return 0;
++	ret = nfs_page_group_lock(req);
++	if (ret)
++		return ret;
++	if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
++		nfs_page_set_inode_ref(req, inode);
++	nfs_page_group_unlock(req);
++	return 0;
++}
++
+ /**
+  * nfs_folio_find_head_request - find head request associated with a folio
+  * @folio: pointer to folio
+@@ -540,7 +565,6 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ 	struct inode *inode = folio->mapping->host;
+ 	struct nfs_page *head, *subreq;
+ 	struct nfs_commit_info cinfo;
+-	bool removed;
+ 	int ret;
+ 
+ 	/*
+@@ -565,18 +589,18 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ 		goto retry;
+ 	}
+ 
+-	ret = nfs_page_group_lock(head);
++	ret = nfs_cancel_remove_inode(head, inode);
+ 	if (ret < 0)
+ 		goto out_unlock;
+ 
+-	removed = test_bit(PG_REMOVE, &head->wb_flags);
++	ret = nfs_page_group_lock(head);
++	if (ret < 0)
++		goto out_unlock;
+ 
+ 	/* lock each request in the page group */
+ 	for (subreq = head->wb_this_page;
+ 	     subreq != head;
+ 	     subreq = subreq->wb_this_page) {
+-		if (test_bit(PG_REMOVE, &subreq->wb_flags))
+-			removed = true;
+ 		ret = nfs_page_group_lock_subreq(head, subreq);
+ 		if (ret < 0)
+ 			goto out_unlock;
+@@ -584,21 +608,6 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
+ 
+ 	nfs_page_group_unlock(head);
+ 
+-	/*
+-	 * If PG_REMOVE is set on any request, I/O on that request has
+-	 * completed, but some requests were still under I/O at the time
+-	 * we locked the head request.
+-	 *
+-	 * In that case the above wait for all requests means that all I/O
+-	 * has now finished, and we can restart from a clean slate.  Let the
+-	 * old requests go away and start from scratch instead.
+-	 */
+-	if (removed) {
+-		nfs_unroll_locks(head, head);
+-		nfs_unlock_and_release_request(head);
+-		goto retry;
+-	}
+-
+ 	nfs_init_cinfo_from_inode(&cinfo, inode);
+ 	nfs_join_page_group(head, &cinfo, inode);
+ 	return head;
+diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c
+index 09404d142d1ae6..a74ec08f6c96d0 100644
+--- a/fs/nfs_common/nfslocalio.c
++++ b/fs/nfs_common/nfslocalio.c
+@@ -155,11 +155,9 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
+ 	/* We have an implied reference to net thanks to nfsd_serv_try_get */
+ 	localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt,
+ 					     cred, nfs_fh, fmode);
+-	if (IS_ERR(localio)) {
+-		rcu_read_lock();
+-		nfs_to->nfsd_serv_put(net);
+-		rcu_read_unlock();
+-	}
++	if (IS_ERR(localio))
++		nfs_to_nfsd_net_put(net);
++
+ 	return localio;
+ }
+ EXPORT_SYMBOL_GPL(nfs_open_local_fh);
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index c82d8e3e0d4f28..984f8e6379dd47 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -40,15 +40,24 @@
+ #define	EXPKEY_HASHMAX		(1 << EXPKEY_HASHBITS)
+ #define	EXPKEY_HASHMASK		(EXPKEY_HASHMAX -1)
+ 
+-static void expkey_put(struct kref *ref)
++static void expkey_put_work(struct work_struct *work)
+ {
+-	struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
++	struct svc_expkey *key =
++		container_of(to_rcu_work(work), struct svc_expkey, ek_rcu_work);
+ 
+ 	if (test_bit(CACHE_VALID, &key->h.flags) &&
+ 	    !test_bit(CACHE_NEGATIVE, &key->h.flags))
+ 		path_put(&key->ek_path);
+ 	auth_domain_put(key->ek_client);
+-	kfree_rcu(key, ek_rcu);
++	kfree(key);
++}
++
++static void expkey_put(struct kref *ref)
++{
++	struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
++
++	INIT_RCU_WORK(&key->ek_rcu_work, expkey_put_work);
++	queue_rcu_work(system_wq, &key->ek_rcu_work);
+ }
+ 
+ static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
+@@ -355,16 +364,26 @@ static void export_stats_destroy(struct export_stats *stats)
+ 					    EXP_STATS_COUNTERS_NUM);
+ }
+ 
+-static void svc_export_put(struct kref *ref)
++static void svc_export_put_work(struct work_struct *work)
+ {
+-	struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
++	struct svc_export *exp =
++		container_of(to_rcu_work(work), struct svc_export, ex_rcu_work);
++
+ 	path_put(&exp->ex_path);
+ 	auth_domain_put(exp->ex_client);
+ 	nfsd4_fslocs_free(&exp->ex_fslocs);
+ 	export_stats_destroy(exp->ex_stats);
+ 	kfree(exp->ex_stats);
+ 	kfree(exp->ex_uuid);
+-	kfree_rcu(exp, ex_rcu);
++	kfree(exp);
++}
++
++static void svc_export_put(struct kref *ref)
++{
++	struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
++
++	INIT_RCU_WORK(&exp->ex_rcu_work, svc_export_put_work);
++	queue_rcu_work(system_wq, &exp->ex_rcu_work);
+ }
+ 
+ static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
+diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
+index 3794ae253a7016..081afb68681e14 100644
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -75,7 +75,7 @@ struct svc_export {
+ 	u32			ex_layout_types;
+ 	struct nfsd4_deviceid_map *ex_devid_map;
+ 	struct cache_detail	*cd;
+-	struct rcu_head		ex_rcu;
++	struct rcu_work		ex_rcu_work;
+ 	unsigned long		ex_xprtsec_modes;
+ 	struct export_stats	*ex_stats;
+ };
+@@ -92,7 +92,7 @@ struct svc_expkey {
+ 	u32			ek_fsid[6];
+ 
+ 	struct path		ek_path;
+-	struct rcu_head		ek_rcu;
++	struct rcu_work		ek_rcu_work;
+ };
+ 
+ #define EX_ISSYNC(exp)		(!((exp)->ex_flags & NFSEXP_ASYNC))
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index 2e6783f6371245..146a9463c3c230 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -391,19 +391,19 @@ nfsd_file_put(struct nfsd_file *nf)
+ }
+ 
+ /**
+- * nfsd_file_put_local - put the reference to nfsd_file and local nfsd_serv
+- * @nf: nfsd_file of which to put the references
++ * nfsd_file_put_local - put nfsd_file reference and arm nfsd_serv_put in caller
++ * @nf: nfsd_file of which to put the reference
+  *
+- * First put the reference of the nfsd_file and then put the
+- * reference to the associated nn->nfsd_serv.
++ * First save the associated net to return to caller, then put
++ * the reference of the nfsd_file.
+  */
+-void
+-nfsd_file_put_local(struct nfsd_file *nf) __must_hold(rcu)
++struct net *
++nfsd_file_put_local(struct nfsd_file *nf)
+ {
+ 	struct net *net = nf->nf_net;
+ 
+ 	nfsd_file_put(nf);
+-	nfsd_serv_put(net);
++	return net;
+ }
+ 
+ /**
+diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
+index cadf3c2689c44c..d5db6b34ba302c 100644
+--- a/fs/nfsd/filecache.h
++++ b/fs/nfsd/filecache.h
+@@ -55,7 +55,7 @@ void nfsd_file_cache_shutdown(void);
+ int nfsd_file_cache_start_net(struct net *net);
+ void nfsd_file_cache_shutdown_net(struct net *net);
+ void nfsd_file_put(struct nfsd_file *nf);
+-void nfsd_file_put_local(struct nfsd_file *nf);
++struct net *nfsd_file_put_local(struct nfsd_file *nf);
+ struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
+ struct file *nfsd_file_file(struct nfsd_file *nf);
+ void nfsd_file_close_inode_sync(struct inode *inode);
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index b5b3ab9d719a74..b8cbb15560040f 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -287,17 +287,17 @@ static int decode_cb_compound4res(struct xdr_stream *xdr,
+ 	u32 length;
+ 	__be32 *p;
+ 
+-	p = xdr_inline_decode(xdr, 4 + 4);
++	p = xdr_inline_decode(xdr, XDR_UNIT);
+ 	if (unlikely(p == NULL))
+ 		goto out_overflow;
+-	hdr->status = be32_to_cpup(p++);
++	hdr->status = be32_to_cpup(p);
+ 	/* Ignore the tag */
+-	length = be32_to_cpup(p++);
+-	p = xdr_inline_decode(xdr, length + 4);
+-	if (unlikely(p == NULL))
++	if (xdr_stream_decode_u32(xdr, &length) < 0)
++		goto out_overflow;
++	if (xdr_inline_decode(xdr, length) == NULL)
++		goto out_overflow;
++	if (xdr_stream_decode_u32(xdr, &hdr->nops) < 0)
+ 		goto out_overflow;
+-	p += XDR_QUADLEN(length);
+-	hdr->nops = be32_to_cpup(p);
+ 	return 0;
+ out_overflow:
+ 	return -EIO;
+@@ -1461,6 +1461,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
+ 		ses = c->cn_session;
+ 	}
+ 	spin_unlock(&clp->cl_lock);
++	if (!c)
++		return;
+ 
+ 	err = setup_callback_client(clp, &conn, ses);
+ 	if (err) {
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index d32f2dfd148fe3..7a1fdafa42ea17 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1292,7 +1292,7 @@ static void nfsd4_stop_copy(struct nfsd4_copy *copy)
+ 	nfs4_put_copy(copy);
+ }
+ 
+-static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp)
++static struct nfsd4_copy *nfsd4_unhash_copy(struct nfs4_client *clp)
+ {
+ 	struct nfsd4_copy *copy = NULL;
+ 
+@@ -1301,6 +1301,9 @@ static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp)
+ 		copy = list_first_entry(&clp->async_copies, struct nfsd4_copy,
+ 					copies);
+ 		refcount_inc(&copy->refcount);
++		copy->cp_clp = NULL;
++		if (!list_empty(&copy->copies))
++			list_del_init(&copy->copies);
+ 	}
+ 	spin_unlock(&clp->async_lock);
+ 	return copy;
+@@ -1310,7 +1313,7 @@ void nfsd4_shutdown_copy(struct nfs4_client *clp)
+ {
+ 	struct nfsd4_copy *copy;
+ 
+-	while ((copy = nfsd4_get_copy(clp)) != NULL)
++	while ((copy = nfsd4_unhash_copy(clp)) != NULL)
+ 		nfsd4_stop_copy(copy);
+ }
+ #ifdef CONFIG_NFSD_V4_2_INTER_SSC
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index b7d61eb8afe9e1..4a765555bf8459 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -659,7 +659,8 @@ nfs4_reset_recoverydir(char *recdir)
+ 		return status;
+ 	status = -ENOTDIR;
+ 	if (d_is_dir(path.dentry)) {
+-		strcpy(user_recovery_dirname, recdir);
++		strscpy(user_recovery_dirname, recdir,
++			sizeof(user_recovery_dirname));
+ 		status = 0;
+ 	}
+ 	path_put(&path);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 551d2958ec2905..d3cfc647153993 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -5957,7 +5957,7 @@ nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
+ 	path.dentry = file_dentry(nf->nf_file);
+ 
+ 	rc = vfs_getattr(&path, stat,
+-			 (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
++			 (STATX_MODE | STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
+ 			 AT_STATX_SYNC_AS_STAT);
+ 
+ 	nfsd_file_put(nf);
+@@ -6041,8 +6041,7 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ 		}
+ 		open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
+ 		dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
+-		dp->dl_cb_fattr.ncf_initial_cinfo =
+-			nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry));
++		dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat);
+ 		trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
+ 	} else {
+ 		open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index f118921250c316..8d25aef51ad150 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3040,7 +3040,7 @@ static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr,
+ 		return nfs_ok;
+ 	}
+ 
+-	c = nfsd4_change_attribute(&args->stat, d_inode(args->dentry));
++	c = nfsd4_change_attribute(&args->stat);
+ 	return nfsd4_encode_changeid4(xdr, c);
+ }
+ 
+diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
+index 40ad58a6a0361e..96e19c50a5d7ee 100644
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -667,20 +667,18 @@ fh_update(struct svc_fh *fhp)
+ __be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp)
+ {
+ 	bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
+-	struct inode *inode;
+ 	struct kstat stat;
+ 	__be32 err;
+ 
+ 	if (fhp->fh_no_wcc || fhp->fh_pre_saved)
+ 		return nfs_ok;
+ 
+-	inode = d_inode(fhp->fh_dentry);
+ 	err = fh_getattr(fhp, &stat);
+ 	if (err)
+ 		return err;
+ 
+ 	if (v4)
+-		fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
++		fhp->fh_pre_change = nfsd4_change_attribute(&stat);
+ 
+ 	fhp->fh_pre_mtime = stat.mtime;
+ 	fhp->fh_pre_ctime = stat.ctime;
+@@ -697,7 +695,6 @@ __be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp)
+ __be32 fh_fill_post_attrs(struct svc_fh *fhp)
+ {
+ 	bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
+-	struct inode *inode = d_inode(fhp->fh_dentry);
+ 	__be32 err;
+ 
+ 	if (fhp->fh_no_wcc)
+@@ -713,7 +710,7 @@ __be32 fh_fill_post_attrs(struct svc_fh *fhp)
+ 	fhp->fh_post_saved = true;
+ 	if (v4)
+ 		fhp->fh_post_change =
+-			nfsd4_change_attribute(&fhp->fh_post_attr, inode);
++			nfsd4_change_attribute(&fhp->fh_post_attr);
+ 	return nfs_ok;
+ }
+ 
+@@ -804,7 +801,14 @@ enum fsid_source fsid_source(const struct svc_fh *fhp)
+ 	return FSIDSOURCE_DEV;
+ }
+ 
+-/*
++/**
++ * nfsd4_change_attribute - Generate an NFSv4 change_attribute value
++ * @stat: inode attributes
++ *
++ * Caller must fill in @stat before calling, typically by invoking
++ * vfs_getattr() with STATX_MODE, STATX_CTIME, and STATX_CHANGE_COOKIE.
++ * Returns an unsigned 64-bit changeid4 value (RFC 8881 Section 3.2).
++ *
+  * We could use i_version alone as the change attribute.  However, i_version
+  * can go backwards on a regular file after an unclean shutdown.  On its own
+  * that doesn't necessarily cause a problem, but if i_version goes backwards
+@@ -821,13 +825,13 @@ enum fsid_source fsid_source(const struct svc_fh *fhp)
+  * assume that the new change attr is always logged to stable storage in some
+  * fashion before the results can be seen.
+  */
+-u64 nfsd4_change_attribute(const struct kstat *stat, const struct inode *inode)
++u64 nfsd4_change_attribute(const struct kstat *stat)
+ {
+ 	u64 chattr;
+ 
+ 	if (stat->result_mask & STATX_CHANGE_COOKIE) {
+ 		chattr = stat->change_cookie;
+-		if (S_ISREG(inode->i_mode) &&
++		if (S_ISREG(stat->mode) &&
+ 		    !(stat->attributes & STATX_ATTR_CHANGE_MONOTONIC)) {
+ 			chattr += (u64)stat->ctime.tv_sec << 30;
+ 			chattr += stat->ctime.tv_nsec;
+diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
+index 5b7394801dc427..876152a91f122f 100644
+--- a/fs/nfsd/nfsfh.h
++++ b/fs/nfsd/nfsfh.h
+@@ -297,8 +297,7 @@ static inline void fh_clear_pre_post_attrs(struct svc_fh *fhp)
+ 	fhp->fh_pre_saved = false;
+ }
+ 
+-u64 nfsd4_change_attribute(const struct kstat *stat,
+-			   const struct inode *inode);
++u64 nfsd4_change_attribute(const struct kstat *stat);
+ __be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp);
+ __be32 fh_fill_post_attrs(struct svc_fh *fhp);
+ __be32 __must_check fh_fill_both_attrs(struct svc_fh *fhp);
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 82ae8254c068be..f976949d2634a1 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -333,16 +333,19 @@ static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask,
+ 	if (!inode_mark)
+ 		return 0;
+ 
+-	if (mask & FS_EVENT_ON_CHILD) {
+-		/*
+-		 * Some events can be sent on both parent dir and child marks
+-		 * (e.g. FS_ATTRIB).  If both parent dir and child are
+-		 * watching, report the event once to parent dir with name (if
+-		 * interested) and once to child without name (if interested).
+-		 * The child watcher is expecting an event without a file name
+-		 * and without the FS_EVENT_ON_CHILD flag.
+-		 */
+-		mask &= ~FS_EVENT_ON_CHILD;
++	/*
++	 * Some events can be sent on both parent dir and child marks (e.g.
++	 * FS_ATTRIB).  If both parent dir and child are watching, report the
++	 * event once to parent dir with name (if interested) and once to child
++	 * without name (if interested).
++	 *
++	 * In any case regardless whether the parent is watching or not, the
++	 * child watcher is expecting an event without the FS_EVENT_ON_CHILD
++	 * flag. The file name is expected if and only if this is a directory
++	 * event.
++	 */
++	mask &= ~FS_EVENT_ON_CHILD;
++	if (!(mask & ALL_FSNOTIFY_DIRENT_EVENTS)) {
+ 		dir = NULL;
+ 		name = NULL;
+ 	}
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index c45b222cf9c11c..4981439e62092a 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -138,8 +138,11 @@ static void fsnotify_get_sb_watched_objects(struct super_block *sb)
+ 
+ static void fsnotify_put_sb_watched_objects(struct super_block *sb)
+ {
+-	if (atomic_long_dec_and_test(fsnotify_sb_watched_objects(sb)))
+-		wake_up_var(fsnotify_sb_watched_objects(sb));
++	atomic_long_t *watched_objects = fsnotify_sb_watched_objects(sb);
++
++	/* the superblock can go away after this decrement */
++	if (atomic_long_dec_and_test(watched_objects))
++		wake_up_var(watched_objects);
+ }
+ 
+ static void fsnotify_get_inode_ref(struct inode *inode)
+@@ -150,8 +153,11 @@ static void fsnotify_get_inode_ref(struct inode *inode)
+ 
+ static void fsnotify_put_inode_ref(struct inode *inode)
+ {
+-	fsnotify_put_sb_watched_objects(inode->i_sb);
++	/* read ->i_sb before the inode can go away */
++	struct super_block *sb = inode->i_sb;
++
+ 	iput(inode);
++	fsnotify_put_sb_watched_objects(sb);
+ }
+ 
+ /*
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index e370eaf9bfe2ed..f704ceef953948 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -222,7 +222,7 @@ static int ntfs_extend_initialized_size(struct file *file,
+ 		if (err)
+ 			goto out;
+ 
+-		folio_zero_range(folio, zerofrom, folio_size(folio));
++		folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom);
+ 
+ 		err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL);
+ 		if (err < 0)
+diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
+index 45db1781ea735a..1d1b4b7edba02e 100644
+--- a/fs/ocfs2/aops.h
++++ b/fs/ocfs2/aops.h
+@@ -70,6 +70,8 @@ enum ocfs2_iocb_lock_bits {
+ 	OCFS2_IOCB_NUM_LOCKS
+ };
+ 
++#define ocfs2_iocb_init_rw_locked(iocb) \
++	(iocb->private = NULL)
+ #define ocfs2_iocb_clear_rw_locked(iocb) \
+ 	clear_bit(OCFS2_IOCB_RW_LOCK, (unsigned long *)&iocb->private)
+ #define ocfs2_iocb_rw_locked_level(iocb) \
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 06af21982c16ab..cb09330a086119 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2398,6 +2398,8 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
+ 	} else
+ 		inode_lock(inode);
+ 
++	ocfs2_iocb_init_rw_locked(iocb);
++
+ 	/*
+ 	 * Concurrent O_DIRECT writes are allowed with
+ 	 * mount_option "coherency=buffered".
+@@ -2544,6 +2546,8 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
+ 	if (!direct_io && nowait)
+ 		return -EOPNOTSUPP;
+ 
++	ocfs2_iocb_init_rw_locked(iocb);
++
+ 	/*
+ 	 * buffered reads protect themselves in ->read_folio().  O_DIRECT reads
+ 	 * need locks to protect pending reads from racing with truncate.
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index 51446c59388f10..7a85735d584f35 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -493,13 +493,13 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 		 * the previous entry, search for a matching entry.
+ 		 */
+ 		if (!m || start < m->addr || start >= m->addr + m->size) {
+-			struct kcore_list *iter;
++			struct kcore_list *pos;
+ 
+ 			m = NULL;
+-			list_for_each_entry(iter, &kclist_head, list) {
+-				if (start >= iter->addr &&
+-				    start < iter->addr + iter->size) {
+-					m = iter;
++			list_for_each_entry(pos, &kclist_head, list) {
++				if (start >= pos->addr &&
++				    start < pos->addr + pos->size) {
++					m = pos;
+ 					break;
+ 				}
+ 			}
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 64dc24afdb3a7f..befec0b5c537a7 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -1830,18 +1830,21 @@ int generic_file_rw_checks(struct file *file_in, struct file *file_out)
+ 	return 0;
+ }
+ 
+-bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos)
++int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ 	size_t len = iov_iter_count(iter);
+ 
+ 	if (!iter_is_ubuf(iter))
+-		return false;
++		return -EINVAL;
+ 
+ 	if (!is_power_of_2(len))
+-		return false;
++		return -EINVAL;
++
++	if (!IS_ALIGNED(iocb->ki_pos, len))
++		return -EINVAL;
+ 
+-	if (!IS_ALIGNED(pos, len))
+-		return false;
++	if (!(iocb->ki_flags & IOCB_DIRECT))
++		return -EOPNOTSUPP;
+ 
+-	return true;
++	return 0;
+ }
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 0ff2491c311d8a..9c0ef4195b5829 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -17,6 +17,11 @@ static void free_cached_dir(struct cached_fid *cfid);
+ static void smb2_close_cached_fid(struct kref *ref);
+ static void cfids_laundromat_worker(struct work_struct *work);
+ 
++struct cached_dir_dentry {
++	struct list_head entry;
++	struct dentry *dentry;
++};
++
+ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 						    const char *path,
+ 						    bool lookup_only,
+@@ -59,6 +64,16 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 	list_add(&cfid->entry, &cfids->entries);
+ 	cfid->on_list = true;
+ 	kref_get(&cfid->refcount);
++	/*
++	 * Set @cfid->has_lease to true during construction so that the lease
++	 * reference can be put in cached_dir_lease_break() due to a potential
++	 * lease break right after the request is sent or while @cfid is still
++	 * being cached, or if a reconnection is triggered during construction.
++	 * Concurrent processes won't be to use it yet due to @cfid->time being
++	 * zero.
++	 */
++	cfid->has_lease = true;
++
+ 	spin_unlock(&cfids->cfid_list_lock);
+ 	return cfid;
+ }
+@@ -176,12 +191,12 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		return -ENOENT;
+ 	}
+ 	/*
+-	 * Return cached fid if it has a lease.  Otherwise, it is either a new
+-	 * entry or laundromat worker removed it from @cfids->entries.  Caller
+-	 * will put last reference if the latter.
++	 * Return cached fid if it is valid (has a lease and has a time).
++	 * Otherwise, it is either a new entry or laundromat worker removed it
++	 * from @cfids->entries.  Caller will put last reference if the latter.
+ 	 */
+ 	spin_lock(&cfids->cfid_list_lock);
+-	if (cfid->has_lease) {
++	if (cfid->has_lease && cfid->time) {
+ 		spin_unlock(&cfids->cfid_list_lock);
+ 		*ret_cfid = cfid;
+ 		kfree(utf16_path);
+@@ -212,6 +227,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		}
+ 	}
+ 	cfid->dentry = dentry;
++	cfid->tcon = tcon;
+ 
+ 	/*
+ 	 * We do not hold the lock for the open because in case
+@@ -267,15 +283,6 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	smb2_set_related(&rqst[1]);
+ 
+-	/*
+-	 * Set @cfid->has_lease to true before sending out compounded request so
+-	 * its lease reference can be put in cached_dir_lease_break() due to a
+-	 * potential lease break right after the request is sent or while @cfid
+-	 * is still being cached.  Concurrent processes won't be to use it yet
+-	 * due to @cfid->time being zero.
+-	 */
+-	cfid->has_lease = true;
+-
+ 	if (retries) {
+ 		smb2_set_replay(server, &rqst[0]);
+ 		smb2_set_replay(server, &rqst[1]);
+@@ -292,7 +299,6 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		}
+ 		goto oshr_free;
+ 	}
+-	cfid->tcon = tcon;
+ 	cfid->is_open = true;
+ 
+ 	spin_lock(&cfids->cfid_list_lock);
+@@ -347,6 +353,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	SMB2_query_info_free(&rqst[1]);
+ 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++out:
+ 	if (rc) {
+ 		spin_lock(&cfids->cfid_list_lock);
+ 		if (cfid->on_list) {
+@@ -358,23 +365,14 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 			/*
+ 			 * We are guaranteed to have two references at this
+ 			 * point. One for the caller and one for a potential
+-			 * lease. Release the Lease-ref so that the directory
+-			 * will be closed when the caller closes the cached
+-			 * handle.
++			 * lease. Release one here, and the second below.
+ 			 */
+ 			cfid->has_lease = false;
+-			spin_unlock(&cfids->cfid_list_lock);
+ 			kref_put(&cfid->refcount, smb2_close_cached_fid);
+-			goto out;
+ 		}
+ 		spin_unlock(&cfids->cfid_list_lock);
+-	}
+-out:
+-	if (rc) {
+-		if (cfid->is_open)
+-			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+-				   cfid->fid.volatile_fid);
+-		free_cached_dir(cfid);
++
++		kref_put(&cfid->refcount, smb2_close_cached_fid);
+ 	} else {
+ 		*ret_cfid = cfid;
+ 		atomic_inc(&tcon->num_remote_opens);
+@@ -401,7 +399,7 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
+ 	spin_lock(&cfids->cfid_list_lock);
+ 	list_for_each_entry(cfid, &cfids->entries, entry) {
+ 		if (dentry && cfid->dentry == dentry) {
+-			cifs_dbg(FYI, "found a cached root file handle by dentry\n");
++			cifs_dbg(FYI, "found a cached file handle by dentry\n");
+ 			kref_get(&cfid->refcount);
+ 			*ret_cfid = cfid;
+ 			spin_unlock(&cfids->cfid_list_lock);
+@@ -477,7 +475,10 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
+ 	struct cifs_tcon *tcon;
+ 	struct tcon_link *tlink;
+ 	struct cached_fids *cfids;
++	struct cached_dir_dentry *tmp_list, *q;
++	LIST_HEAD(entry);
+ 
++	spin_lock(&cifs_sb->tlink_tree_lock);
+ 	for (node = rb_first(root); node; node = rb_next(node)) {
+ 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+ 		tcon = tlink_tcon(tlink);
+@@ -486,11 +487,30 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
+ 		cfids = tcon->cfids;
+ 		if (cfids == NULL)
+ 			continue;
++		spin_lock(&cfids->cfid_list_lock);
+ 		list_for_each_entry(cfid, &cfids->entries, entry) {
+-			dput(cfid->dentry);
++			tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
++			if (tmp_list == NULL)
++				break;
++			spin_lock(&cfid->fid_lock);
++			tmp_list->dentry = cfid->dentry;
+ 			cfid->dentry = NULL;
++			spin_unlock(&cfid->fid_lock);
++
++			list_add_tail(&tmp_list->entry, &entry);
+ 		}
++		spin_unlock(&cfids->cfid_list_lock);
++	}
++	spin_unlock(&cifs_sb->tlink_tree_lock);
++
++	list_for_each_entry_safe(tmp_list, q, &entry, entry) {
++		list_del(&tmp_list->entry);
++		dput(tmp_list->dentry);
++		kfree(tmp_list);
+ 	}
++
++	/* Flush any pending work that will drop dentries */
++	flush_workqueue(cfid_put_wq);
+ }
+ 
+ /*
+@@ -501,50 +521,71 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
+ {
+ 	struct cached_fids *cfids = tcon->cfids;
+ 	struct cached_fid *cfid, *q;
+-	LIST_HEAD(entry);
+ 
+ 	if (cfids == NULL)
+ 		return;
+ 
++	/*
++	 * Mark all the cfids as closed, and move them to the cfids->dying list.
++	 * They'll be cleaned up later by cfids_invalidation_worker. Take
++	 * a reference to each cfid during this process.
++	 */
+ 	spin_lock(&cfids->cfid_list_lock);
+ 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+-		list_move(&cfid->entry, &entry);
++		list_move(&cfid->entry, &cfids->dying);
+ 		cfids->num_entries--;
+ 		cfid->is_open = false;
+ 		cfid->on_list = false;
+-		/* To prevent race with smb2_cached_lease_break() */
+-		kref_get(&cfid->refcount);
+-	}
+-	spin_unlock(&cfids->cfid_list_lock);
+-
+-	list_for_each_entry_safe(cfid, q, &entry, entry) {
+-		list_del(&cfid->entry);
+-		cancel_work_sync(&cfid->lease_break);
+ 		if (cfid->has_lease) {
+ 			/*
+-			 * We lease was never cancelled from the server so we
+-			 * need to drop the reference.
++			 * The lease was never cancelled from the server,
++			 * so steal that reference.
+ 			 */
+-			spin_lock(&cfids->cfid_list_lock);
+ 			cfid->has_lease = false;
+-			spin_unlock(&cfids->cfid_list_lock);
+-			kref_put(&cfid->refcount, smb2_close_cached_fid);
+-		}
+-		/* Drop the extra reference opened above*/
+-		kref_put(&cfid->refcount, smb2_close_cached_fid);
++		} else
++			kref_get(&cfid->refcount);
+ 	}
++	/*
++	 * Queue dropping of the dentries once locks have been dropped
++	 */
++	if (!list_empty(&cfids->dying))
++		queue_work(cfid_put_wq, &cfids->invalidation_work);
++	spin_unlock(&cfids->cfid_list_lock);
+ }
+ 
+ static void
+-smb2_cached_lease_break(struct work_struct *work)
++cached_dir_offload_close(struct work_struct *work)
+ {
+ 	struct cached_fid *cfid = container_of(work,
+-				struct cached_fid, lease_break);
++				struct cached_fid, close_work);
++	struct cifs_tcon *tcon = cfid->tcon;
++
++	WARN_ON(cfid->on_list);
+ 
+-	spin_lock(&cfid->cfids->cfid_list_lock);
+-	cfid->has_lease = false;
+-	spin_unlock(&cfid->cfids->cfid_list_lock);
+ 	kref_put(&cfid->refcount, smb2_close_cached_fid);
++	cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
++}
++
++/*
++ * Release the cached directory's dentry, and then queue work to drop cached
++ * directory itself (closing on server if needed).
++ *
++ * Must be called with a reference to the cached_fid and a reference to the
++ * tcon.
++ */
++static void cached_dir_put_work(struct work_struct *work)
++{
++	struct cached_fid *cfid = container_of(work, struct cached_fid,
++					       put_work);
++	struct dentry *dentry;
++
++	spin_lock(&cfid->fid_lock);
++	dentry = cfid->dentry;
++	cfid->dentry = NULL;
++	spin_unlock(&cfid->fid_lock);
++
++	dput(dentry);
++	queue_work(serverclose_wq, &cfid->close_work);
+ }
+ 
+ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
+@@ -561,6 +602,7 @@ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
+ 		    !memcmp(lease_key,
+ 			    cfid->fid.lease_key,
+ 			    SMB2_LEASE_KEY_SIZE)) {
++			cfid->has_lease = false;
+ 			cfid->time = 0;
+ 			/*
+ 			 * We found a lease remove it from the list
+@@ -570,8 +612,10 @@ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
+ 			cfid->on_list = false;
+ 			cfids->num_entries--;
+ 
+-			queue_work(cifsiod_wq,
+-				   &cfid->lease_break);
++			++tcon->tc_count;
++			trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++					    netfs_trace_tcon_ref_get_cached_lease_break);
++			queue_work(cfid_put_wq, &cfid->put_work);
+ 			spin_unlock(&cfids->cfid_list_lock);
+ 			return true;
+ 		}
+@@ -593,7 +637,8 @@ static struct cached_fid *init_cached_dir(const char *path)
+ 		return NULL;
+ 	}
+ 
+-	INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
++	INIT_WORK(&cfid->close_work, cached_dir_offload_close);
++	INIT_WORK(&cfid->put_work, cached_dir_put_work);
+ 	INIT_LIST_HEAD(&cfid->entry);
+ 	INIT_LIST_HEAD(&cfid->dirents.entries);
+ 	mutex_init(&cfid->dirents.de_mutex);
+@@ -606,6 +651,9 @@ static void free_cached_dir(struct cached_fid *cfid)
+ {
+ 	struct cached_dirent *dirent, *q;
+ 
++	WARN_ON(work_pending(&cfid->close_work));
++	WARN_ON(work_pending(&cfid->put_work));
++
+ 	dput(cfid->dentry);
+ 	cfid->dentry = NULL;
+ 
+@@ -623,10 +671,30 @@ static void free_cached_dir(struct cached_fid *cfid)
+ 	kfree(cfid);
+ }
+ 
++static void cfids_invalidation_worker(struct work_struct *work)
++{
++	struct cached_fids *cfids = container_of(work, struct cached_fids,
++						 invalidation_work);
++	struct cached_fid *cfid, *q;
++	LIST_HEAD(entry);
++
++	spin_lock(&cfids->cfid_list_lock);
++	/* move cfids->dying to the local list */
++	list_cut_before(&entry, &cfids->dying, &cfids->dying);
++	spin_unlock(&cfids->cfid_list_lock);
++
++	list_for_each_entry_safe(cfid, q, &entry, entry) {
++		list_del(&cfid->entry);
++		/* Drop the ref-count acquired in invalidate_all_cached_dirs */
++		kref_put(&cfid->refcount, smb2_close_cached_fid);
++	}
++}
++
+ static void cfids_laundromat_worker(struct work_struct *work)
+ {
+ 	struct cached_fids *cfids;
+ 	struct cached_fid *cfid, *q;
++	struct dentry *dentry;
+ 	LIST_HEAD(entry);
+ 
+ 	cfids = container_of(work, struct cached_fids, laundromat_work.work);
+@@ -638,33 +706,42 @@ static void cfids_laundromat_worker(struct work_struct *work)
+ 			cfid->on_list = false;
+ 			list_move(&cfid->entry, &entry);
+ 			cfids->num_entries--;
+-			/* To prevent race with smb2_cached_lease_break() */
+-			kref_get(&cfid->refcount);
++			if (cfid->has_lease) {
++				/*
++				 * Our lease has not yet been cancelled from the
++				 * server. Steal that reference.
++				 */
++				cfid->has_lease = false;
++			} else
++				kref_get(&cfid->refcount);
+ 		}
+ 	}
+ 	spin_unlock(&cfids->cfid_list_lock);
+ 
+ 	list_for_each_entry_safe(cfid, q, &entry, entry) {
+ 		list_del(&cfid->entry);
+-		/*
+-		 * Cancel and wait for the work to finish in case we are racing
+-		 * with it.
+-		 */
+-		cancel_work_sync(&cfid->lease_break);
+-		if (cfid->has_lease) {
++
++		spin_lock(&cfid->fid_lock);
++		dentry = cfid->dentry;
++		cfid->dentry = NULL;
++		spin_unlock(&cfid->fid_lock);
++
++		dput(dentry);
++		if (cfid->is_open) {
++			spin_lock(&cifs_tcp_ses_lock);
++			++cfid->tcon->tc_count;
++			trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
++					    netfs_trace_tcon_ref_get_cached_laundromat);
++			spin_unlock(&cifs_tcp_ses_lock);
++			queue_work(serverclose_wq, &cfid->close_work);
++		} else
+ 			/*
+-			 * Our lease has not yet been cancelled from the server
+-			 * so we need to drop the reference.
++			 * Drop the ref-count from above, either the lease-ref (if there
++			 * was one) or the extra one acquired.
+ 			 */
+-			spin_lock(&cfids->cfid_list_lock);
+-			cfid->has_lease = false;
+-			spin_unlock(&cfids->cfid_list_lock);
+ 			kref_put(&cfid->refcount, smb2_close_cached_fid);
+-		}
+-		/* Drop the extra reference opened above */
+-		kref_put(&cfid->refcount, smb2_close_cached_fid);
+ 	}
+-	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
+ 			   dir_cache_timeout * HZ);
+ }
+ 
+@@ -677,9 +754,11 @@ struct cached_fids *init_cached_dirs(void)
+ 		return NULL;
+ 	spin_lock_init(&cfids->cfid_list_lock);
+ 	INIT_LIST_HEAD(&cfids->entries);
++	INIT_LIST_HEAD(&cfids->dying);
+ 
++	INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
+ 	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
+-	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
+ 			   dir_cache_timeout * HZ);
+ 
+ 	return cfids;
+@@ -698,6 +777,7 @@ void free_cached_dirs(struct cached_fids *cfids)
+ 		return;
+ 
+ 	cancel_delayed_work_sync(&cfids->laundromat_work);
++	cancel_work_sync(&cfids->invalidation_work);
+ 
+ 	spin_lock(&cfids->cfid_list_lock);
+ 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+@@ -705,6 +785,11 @@ void free_cached_dirs(struct cached_fids *cfids)
+ 		cfid->is_open = false;
+ 		list_move(&cfid->entry, &entry);
+ 	}
++	list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
++		cfid->on_list = false;
++		cfid->is_open = false;
++		list_move(&cfid->entry, &entry);
++	}
+ 	spin_unlock(&cfids->cfid_list_lock);
+ 
+ 	list_for_each_entry_safe(cfid, q, &entry, entry) {
+diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
+index 81ba0fd5cc16d6..1dfe79d947a62f 100644
+--- a/fs/smb/client/cached_dir.h
++++ b/fs/smb/client/cached_dir.h
+@@ -44,7 +44,8 @@ struct cached_fid {
+ 	spinlock_t fid_lock;
+ 	struct cifs_tcon *tcon;
+ 	struct dentry *dentry;
+-	struct work_struct lease_break;
++	struct work_struct put_work;
++	struct work_struct close_work;
+ 	struct smb2_file_all_info file_all_info;
+ 	struct cached_dirents dirents;
+ };
+@@ -53,10 +54,13 @@ struct cached_fid {
+ struct cached_fids {
+ 	/* Must be held when:
+ 	 * - accessing the cfids->entries list
++	 * - accessing the cfids->dying list
+ 	 */
+ 	spinlock_t cfid_list_lock;
+ 	int num_entries;
+ 	struct list_head entries;
++	struct list_head dying;
++	struct work_struct invalidation_work;
+ 	struct delayed_work laundromat_work;
+ };
+ 
+diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
+index 1d294d53f66247..c68ad526a4de1b 100644
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -885,12 +885,17 @@ unsigned int setup_authusers_ACE(struct smb_ace *pntace)
+  * Fill in the special SID based on the mode. See
+  * https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+  */
+-unsigned int setup_special_mode_ACE(struct smb_ace *pntace, __u64 nmode)
++unsigned int setup_special_mode_ACE(struct smb_ace *pntace,
++				    bool posix,
++				    __u64 nmode)
+ {
+ 	int i;
+ 	unsigned int ace_size = 28;
+ 
+-	pntace->type = ACCESS_DENIED_ACE_TYPE;
++	if (posix)
++		pntace->type = ACCESS_ALLOWED_ACE_TYPE;
++	else
++		pntace->type = ACCESS_DENIED_ACE_TYPE;
+ 	pntace->flags = 0x0;
+ 	pntace->access_req = 0;
+ 	pntace->sid.num_subauth = 3;
+@@ -933,7 +938,8 @@ static void populate_new_aces(char *nacl_base,
+ 		struct smb_sid *pownersid,
+ 		struct smb_sid *pgrpsid,
+ 		__u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
+-		bool modefromsid)
++		bool modefromsid,
++		bool posix)
+ {
+ 	__u64 nmode;
+ 	u32 num_aces = 0;
+@@ -950,13 +956,15 @@ static void populate_new_aces(char *nacl_base,
+ 	num_aces = *pnum_aces;
+ 	nsize = *pnsize;
+ 
+-	if (modefromsid) {
+-		pnntace = (struct smb_ace *) (nacl_base + nsize);
+-		nsize += setup_special_mode_ACE(pnntace, nmode);
+-		num_aces++;
++	if (modefromsid || posix) {
+ 		pnntace = (struct smb_ace *) (nacl_base + nsize);
+-		nsize += setup_authusers_ACE(pnntace);
++		nsize += setup_special_mode_ACE(pnntace, posix, nmode);
+ 		num_aces++;
++		if (modefromsid) {
++			pnntace = (struct smb_ace *) (nacl_base + nsize);
++			nsize += setup_authusers_ACE(pnntace);
++			num_aces++;
++		}
+ 		goto set_size;
+ 	}
+ 
+@@ -1076,7 +1084,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p
+ 
+ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ 		struct smb_sid *pownersid,	struct smb_sid *pgrpsid,
+-		__u64 *pnmode, bool mode_from_sid)
++		__u64 *pnmode, bool mode_from_sid, bool posix)
+ {
+ 	int i;
+ 	u16 size = 0;
+@@ -1094,11 +1102,11 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ 	nsize = sizeof(struct smb_acl);
+ 
+ 	/* If pdacl is NULL, we don't have a src. Simply populate new ACL. */
+-	if (!pdacl) {
++	if (!pdacl || posix) {
+ 		populate_new_aces(nacl_base,
+ 				pownersid, pgrpsid,
+ 				pnmode, &num_aces, &nsize,
+-				mode_from_sid);
++				mode_from_sid, posix);
+ 		goto finalize_dacl;
+ 	}
+ 
+@@ -1115,7 +1123,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ 			populate_new_aces(nacl_base,
+ 					pownersid, pgrpsid,
+ 					pnmode, &num_aces, &nsize,
+-					mode_from_sid);
++					mode_from_sid, posix);
+ 
+ 			new_aces_set = true;
+ 		}
+@@ -1144,7 +1152,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ 		populate_new_aces(nacl_base,
+ 				pownersid, pgrpsid,
+ 				pnmode, &num_aces, &nsize,
+-				mode_from_sid);
++				mode_from_sid, posix);
+ 
+ 		new_aces_set = true;
+ 	}
+@@ -1251,7 +1259,7 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
+ /* Convert permission bits from mode to equivalent CIFS ACL */
+ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
+ 	__u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid,
+-	bool mode_from_sid, bool id_from_sid, int *aclflag)
++	bool mode_from_sid, bool id_from_sid, bool posix, int *aclflag)
+ {
+ 	int rc = 0;
+ 	__u32 dacloffset;
+@@ -1288,7 +1296,7 @@ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
+ 		ndacl_ptr->num_aces = cpu_to_le32(0);
+ 
+ 		rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
+-				    pnmode, mode_from_sid);
++				    pnmode, mode_from_sid, posix);
+ 
+ 		sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
+ 		/* copy the non-dacl portion of secdesc */
+@@ -1587,6 +1595,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+ 	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ 	struct smb_version_operations *ops;
+ 	bool mode_from_sid, id_from_sid;
++	bool posix = tlink_tcon(tlink)->posix_extensions;
+ 	const u32 info = 0;
+ 
+ 	if (IS_ERR(tlink))
+@@ -1622,12 +1631,13 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+ 		id_from_sid = false;
+ 
+ 	/* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */
+-	nsecdesclen = secdesclen;
+ 	if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
+-		if (mode_from_sid)
+-			nsecdesclen += 2 * sizeof(struct smb_ace);
++		if (posix)
++			nsecdesclen = 1 * sizeof(struct smb_ace);
++		else if (mode_from_sid)
++			nsecdesclen = secdesclen + (2 * sizeof(struct smb_ace));
+ 		else /* cifsacl */
+-			nsecdesclen += 5 * sizeof(struct smb_ace);
++			nsecdesclen = secdesclen + (5 * sizeof(struct smb_ace));
+ 	} else { /* chown */
+ 		/* When ownership changes, changes new owner sid length could be different */
+ 		nsecdesclen = sizeof(struct smb_ntsd) + (sizeof(struct smb_sid) * 2);
+@@ -1657,7 +1667,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+ 	}
+ 
+ 	rc = build_sec_desc(pntsd, pnntsd, secdesclen, &nsecdesclen, pnmode, uid, gid,
+-			    mode_from_sid, id_from_sid, &aclflag);
++			    mode_from_sid, id_from_sid, posix, &aclflag);
+ 
+ 	cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
+ 
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 20cafdff508106..bf909c2f6b963b 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -157,6 +157,7 @@ struct workqueue_struct	*fileinfo_put_wq;
+ struct workqueue_struct	*cifsoplockd_wq;
+ struct workqueue_struct	*deferredclose_wq;
+ struct workqueue_struct	*serverclose_wq;
++struct workqueue_struct	*cfid_put_wq;
+ __u32 cifs_lock_secret;
+ 
+ /*
+@@ -1895,9 +1896,16 @@ init_cifs(void)
+ 		goto out_destroy_deferredclose_wq;
+ 	}
+ 
++	cfid_put_wq = alloc_workqueue("cfid_put_wq",
++				      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++	if (!cfid_put_wq) {
++		rc = -ENOMEM;
++		goto out_destroy_serverclose_wq;
++	}
++
+ 	rc = cifs_init_inodecache();
+ 	if (rc)
+-		goto out_destroy_serverclose_wq;
++		goto out_destroy_cfid_put_wq;
+ 
+ 	rc = cifs_init_netfs();
+ 	if (rc)
+@@ -1965,6 +1973,8 @@ init_cifs(void)
+ 	cifs_destroy_netfs();
+ out_destroy_inodecache:
+ 	cifs_destroy_inodecache();
++out_destroy_cfid_put_wq:
++	destroy_workqueue(cfid_put_wq);
+ out_destroy_serverclose_wq:
+ 	destroy_workqueue(serverclose_wq);
+ out_destroy_deferredclose_wq:
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 5041b1ffc244b0..9a4b3608b7d6f3 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -588,6 +588,7 @@ struct smb_version_operations {
+ 	/* Check for STATUS_NETWORK_NAME_DELETED */
+ 	bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
+ 	int (*parse_reparse_point)(struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
+ 				   struct kvec *rsp_iov,
+ 				   struct cifs_open_info_data *data);
+ 	int (*create_reparse_symlink)(const unsigned int xid,
+@@ -1983,7 +1984,7 @@ require use of the stronger protocol */
+  * cifsInodeInfo->lock_sem	cifsInodeInfo->llist		cifs_init_once
+  *				->can_cache_brlcks
+  * cifsInodeInfo->deferred_lock	cifsInodeInfo->deferred_closes	cifsInodeInfo_alloc
+- * cached_fid->fid_mutex		cifs_tcon->crfid		tcon_info_alloc
++ * cached_fids->cfid_list_lock	cifs_tcon->cfids->entries	 init_cached_dirs
+  * cifsFileInfo->fh_mutex		cifsFileInfo			cifs_new_fileinfo
+  * cifsFileInfo->file_info_lock	cifsFileInfo->count		cifs_new_fileinfo
+  *				->invalidHandle			initiate_cifs_search
+@@ -2071,6 +2072,7 @@ extern struct workqueue_struct *fileinfo_put_wq;
+ extern struct workqueue_struct *cifsoplockd_wq;
+ extern struct workqueue_struct *deferredclose_wq;
+ extern struct workqueue_struct *serverclose_wq;
++extern struct workqueue_struct *cfid_put_wq;
+ extern __u32 cifs_lock_secret;
+ 
+ extern mempool_t *cifs_sm_req_poolp;
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 1d3470bca45edd..0c6468844c4b54 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -244,7 +244,9 @@ extern int cifs_set_acl(struct mnt_idmap *idmap,
+ extern int set_cifs_acl(struct smb_ntsd *pntsd, __u32 len, struct inode *ino,
+ 				const char *path, int flag);
+ extern unsigned int setup_authusers_ACE(struct smb_ace *pace);
+-extern unsigned int setup_special_mode_ACE(struct smb_ace *pace, __u64 nmode);
++extern unsigned int setup_special_mode_ACE(struct smb_ace *pace,
++					   bool posix,
++					   __u64 nmode);
+ extern unsigned int setup_special_user_owner_ACE(struct smb_ace *pace);
+ 
+ extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
+@@ -666,6 +668,7 @@ char *extract_hostname(const char *unc);
+ char *extract_sharename(const char *unc);
+ int parse_reparse_point(struct reparse_data_buffer *buf,
+ 			u32 plen, struct cifs_sb_info *cifs_sb,
++			const char *full_path,
+ 			bool unicode, struct cifs_open_info_data *data);
+ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ 			 struct dentry *dentry, struct cifs_tcon *tcon,
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 0ce2d704b1f3f8..a94c538ff86368 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1897,11 +1897,35 @@ static int match_session(struct cifs_ses *ses,
+ 			    CIFS_MAX_USERNAME_LEN))
+ 			return 0;
+ 		if ((ctx->username && strlen(ctx->username) != 0) &&
+-		    ses->password != NULL &&
+-		    strncmp(ses->password,
+-			    ctx->password ? ctx->password : "",
+-			    CIFS_MAX_PASSWORD_LEN))
+-			return 0;
++		    ses->password != NULL) {
++
++			/* New mount can only share sessions with an existing mount if:
++			 * 1. Both password and password2 match, or
++			 * 2. password2 of the old mount matches password of the new mount
++			 *    and password of the old mount matches password2 of the new
++			 *	  mount
++			 */
++			if (ses->password2 != NULL && ctx->password2 != NULL) {
++				if (!((strncmp(ses->password, ctx->password ?
++					ctx->password : "", CIFS_MAX_PASSWORD_LEN) == 0 &&
++					strncmp(ses->password2, ctx->password2,
++					CIFS_MAX_PASSWORD_LEN) == 0) ||
++					(strncmp(ses->password, ctx->password2,
++					CIFS_MAX_PASSWORD_LEN) == 0 &&
++					strncmp(ses->password2, ctx->password ?
++					ctx->password : "", CIFS_MAX_PASSWORD_LEN) == 0)))
++					return 0;
++
++			} else if ((ses->password2 == NULL && ctx->password2 != NULL) ||
++				(ses->password2 != NULL && ctx->password2 == NULL)) {
++				return 0;
++
++			} else {
++				if (strncmp(ses->password, ctx->password ?
++					ctx->password : "", CIFS_MAX_PASSWORD_LEN))
++					return 0;
++			}
++		}
+ 	}
+ 
+ 	if (strcmp(ctx->local_nls->charset, ses->local_nls->charset))
+@@ -2244,6 +2268,7 @@ struct cifs_ses *
+ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ {
+ 	int rc = 0;
++	int retries = 0;
+ 	unsigned int xid;
+ 	struct cifs_ses *ses;
+ 	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+@@ -2262,6 +2287,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 			cifs_dbg(FYI, "Session needs reconnect\n");
+ 
+ 			mutex_lock(&ses->session_mutex);
++
++retry_old_session:
+ 			rc = cifs_negotiate_protocol(xid, ses, server);
+ 			if (rc) {
+ 				mutex_unlock(&ses->session_mutex);
+@@ -2274,6 +2301,13 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 			rc = cifs_setup_session(xid, ses, server,
+ 						ctx->local_nls);
+ 			if (rc) {
++				if (((rc == -EACCES) || (rc == -EKEYEXPIRED) ||
++					(rc == -EKEYREVOKED)) && !retries && ses->password2) {
++					retries++;
++					cifs_dbg(FYI, "Session reconnect failed, retrying with alternate password\n");
++					swap(ses->password, ses->password2);
++					goto retry_old_session;
++				}
+ 				mutex_unlock(&ses->session_mutex);
+ 				/* problem -- put our reference */
+ 				cifs_put_smb_ses(ses);
+@@ -2349,6 +2383,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 	ses->chans_need_reconnect = 1;
+ 	spin_unlock(&ses->chan_lock);
+ 
++retry_new_session:
+ 	mutex_lock(&ses->session_mutex);
+ 	rc = cifs_negotiate_protocol(xid, ses, server);
+ 	if (!rc)
+@@ -2361,8 +2396,16 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 	       sizeof(ses->smb3signingkey));
+ 	spin_unlock(&ses->chan_lock);
+ 
+-	if (rc)
+-		goto get_ses_fail;
++	if (rc) {
++		if (((rc == -EACCES) || (rc == -EKEYEXPIRED) ||
++			(rc == -EKEYREVOKED)) && !retries && ses->password2) {
++			retries++;
++			cifs_dbg(FYI, "Session setup failed, retrying with alternate password\n");
++			swap(ses->password, ses->password2);
++			goto retry_new_session;
++		} else
++			goto get_ses_fail;
++	}
+ 
+ 	/*
+ 	 * success, put it on the list and add it as first channel
+@@ -2551,7 +2594,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 
+ 	if (ses->server->dialect >= SMB20_PROT_ID &&
+ 	    (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING))
+-		nohandlecache = ctx->nohandlecache;
++		nohandlecache = ctx->nohandlecache || !dir_cache_timeout;
+ 	else
+ 		nohandlecache = true;
+ 	tcon = tcon_info_alloc(!nohandlecache, netfs_trace_tcon_ref_new);
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 5c5a52019efada..48606e2ddffdcd 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -890,12 +890,37 @@ do {									\
+ 	cifs_sb->ctx->field = NULL;					\
+ } while (0)
+ 
++int smb3_sync_session_ctx_passwords(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
++{
++	if (ses->password &&
++	    cifs_sb->ctx->password &&
++	    strcmp(ses->password, cifs_sb->ctx->password)) {
++		kfree_sensitive(cifs_sb->ctx->password);
++		cifs_sb->ctx->password = kstrdup(ses->password, GFP_KERNEL);
++		if (!cifs_sb->ctx->password)
++			return -ENOMEM;
++	}
++	if (ses->password2 &&
++	    cifs_sb->ctx->password2 &&
++	    strcmp(ses->password2, cifs_sb->ctx->password2)) {
++		kfree_sensitive(cifs_sb->ctx->password2);
++		cifs_sb->ctx->password2 = kstrdup(ses->password2, GFP_KERNEL);
++		if (!cifs_sb->ctx->password2) {
++			kfree_sensitive(cifs_sb->ctx->password);
++			cifs_sb->ctx->password = NULL;
++			return -ENOMEM;
++		}
++	}
++	return 0;
++}
++
+ static int smb3_reconfigure(struct fs_context *fc)
+ {
+ 	struct smb3_fs_context *ctx = smb3_fc2context(fc);
+ 	struct dentry *root = fc->root;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
+ 	struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses;
++	char *new_password = NULL, *new_password2 = NULL;
+ 	bool need_recon = false;
+ 	int rc;
+ 
+@@ -915,21 +940,63 @@ static int smb3_reconfigure(struct fs_context *fc)
+ 	STEAL_STRING(cifs_sb, ctx, UNC);
+ 	STEAL_STRING(cifs_sb, ctx, source);
+ 	STEAL_STRING(cifs_sb, ctx, username);
++
+ 	if (need_recon == false)
+ 		STEAL_STRING_SENSITIVE(cifs_sb, ctx, password);
+ 	else  {
+-		kfree_sensitive(ses->password);
+-		ses->password = kstrdup(ctx->password, GFP_KERNEL);
+-		if (!ses->password)
+-			return -ENOMEM;
+-		kfree_sensitive(ses->password2);
+-		ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
+-		if (!ses->password2) {
+-			kfree_sensitive(ses->password);
+-			ses->password = NULL;
++		if (ctx->password) {
++			new_password = kstrdup(ctx->password, GFP_KERNEL);
++			if (!new_password)
++				return -ENOMEM;
++		} else
++			STEAL_STRING_SENSITIVE(cifs_sb, ctx, password);
++	}
++
++	/*
++	 * if a new password2 has been specified, then reset it's value
++	 * inside the ses struct
++	 */
++	if (ctx->password2) {
++		new_password2 = kstrdup(ctx->password2, GFP_KERNEL);
++		if (!new_password2) {
++			kfree_sensitive(new_password);
+ 			return -ENOMEM;
+ 		}
++	} else
++		STEAL_STRING_SENSITIVE(cifs_sb, ctx, password2);
++
++	/*
++	 * we may update the passwords in the ses struct below. Make sure we do
++	 * not race with smb2_reconnect
++	 */
++	mutex_lock(&ses->session_mutex);
++
++	/*
++	 * smb2_reconnect may swap password and password2 in case session setup
++	 * failed. First get ctx passwords in sync with ses passwords. It should
++	 * be okay to do this even if this function were to return an error at a
++	 * later stage
++	 */
++	rc = smb3_sync_session_ctx_passwords(cifs_sb, ses);
++	if (rc) {
++		mutex_unlock(&ses->session_mutex);
++		return rc;
+ 	}
++
++	/*
++	 * now that allocations for passwords are done, commit them
++	 */
++	if (new_password) {
++		kfree_sensitive(ses->password);
++		ses->password = new_password;
++	}
++	if (new_password2) {
++		kfree_sensitive(ses->password2);
++		ses->password2 = new_password2;
++	}
++
++	mutex_unlock(&ses->session_mutex);
++
+ 	STEAL_STRING(cifs_sb, ctx, domainname);
+ 	STEAL_STRING(cifs_sb, ctx, nodename);
+ 	STEAL_STRING(cifs_sb, ctx, iocharset);
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index 890d6d9d4a592f..c8c8b4451b3bc7 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -299,6 +299,7 @@ static inline struct smb3_fs_context *smb3_fc2context(const struct fs_context *f
+ }
+ 
+ extern int smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx);
++extern int smb3_sync_session_ctx_passwords(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses);
+ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+ 
+ /*
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index eff3f57235eef3..6d567b16998119 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1115,6 +1115,7 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
+ 			rc = 0;
+ 		} else if (iov && server->ops->parse_reparse_point) {
+ 			rc = server->ops->parse_reparse_point(cifs_sb,
++							      full_path,
+ 							      iov, data);
+ 		}
+ 		break;
+@@ -2473,13 +2474,10 @@ cifs_dentry_needs_reval(struct dentry *dentry)
+ 		return true;
+ 
+ 	if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) {
+-		spin_lock(&cfid->fid_lock);
+ 		if (cfid->time && cifs_i->time > cfid->time) {
+-			spin_unlock(&cfid->fid_lock);
+ 			close_cached_dir(cfid);
+ 			return false;
+ 		}
+-		spin_unlock(&cfid->fid_lock);
+ 		close_cached_dir(cfid);
+ 	}
+ 	/*
+@@ -3062,6 +3060,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ 	int rc = -EACCES;
+ 	__u32 dosattr = 0;
+ 	__u64 mode = NO_CHANGE_64;
++	bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions;
+ 
+ 	xid = get_xid();
+ 
+@@ -3152,7 +3151,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ 		mode = attrs->ia_mode;
+ 		rc = 0;
+ 		if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+-		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
++		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) ||
++		    posix) {
+ 			rc = id_mode_to_cifs_acl(inode, full_path, &mode,
+ 						INVALID_UID, INVALID_GID);
+ 			if (rc) {
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+index 74abbdf5026c73..f74d0a86f44a4e 100644
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -35,6 +35,9 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
+ 	u16 len, plen;
+ 	int rc = 0;
+ 
++	if (strlen(symname) > REPARSE_SYM_PATH_MAX)
++		return -ENAMETOOLONG;
++
+ 	sym = kstrdup(symname, GFP_KERNEL);
+ 	if (!sym)
+ 		return -ENOMEM;
+@@ -64,7 +67,7 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
+ 	if (rc < 0)
+ 		goto out;
+ 
+-	plen = 2 * UniStrnlen((wchar_t *)path, PATH_MAX);
++	plen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX);
+ 	len = sizeof(*buf) + plen * 2;
+ 	buf = kzalloc(len, GFP_KERNEL);
+ 	if (!buf) {
+@@ -532,9 +535,76 @@ static int parse_reparse_posix(struct reparse_posix_data *buf,
+ 	return 0;
+ }
+ 
++int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len,
++			      bool unicode, bool relative,
++			      const char *full_path,
++			      struct cifs_sb_info *cifs_sb)
++{
++	char sep = CIFS_DIR_SEP(cifs_sb);
++	char *linux_target = NULL;
++	char *smb_target = NULL;
++	int levels;
++	int rc;
++	int i;
++
++	smb_target = cifs_strndup_from_utf16(buf, len, unicode, cifs_sb->local_nls);
++	if (!smb_target) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	if (smb_target[0] == sep && relative) {
++		/*
++		 * This is a relative SMB symlink from the top of the share,
++		 * which is the top level directory of the Linux mount point.
++		 * Linux does not support such relative symlinks, so convert
++		 * it to the relative symlink from the current directory.
++		 * full_path is the SMB path to the symlink (from which is
++		 * extracted current directory) and smb_target is the SMB path
++		 * where symlink points, therefore full_path must always be on
++		 * the SMB share.
++		 */
++		int smb_target_len = strlen(smb_target)+1;
++		levels = 0;
++		for (i = 1; full_path[i]; i++) { /* i=1 to skip leading sep */
++			if (full_path[i] == sep)
++				levels++;
++		}
++		linux_target = kmalloc(levels*3 + smb_target_len, GFP_KERNEL);
++		if (!linux_target) {
++			rc = -ENOMEM;
++			goto out;
++		}
++		for (i = 0; i < levels; i++) {
++			linux_target[i*3 + 0] = '.';
++			linux_target[i*3 + 1] = '.';
++			linux_target[i*3 + 2] = sep;
++		}
++		memcpy(linux_target + levels*3, smb_target+1, smb_target_len); /* +1 to skip leading sep */
++	} else {
++		linux_target = smb_target;
++		smb_target = NULL;
++	}
++
++	if (sep == '\\')
++		convert_delimiter(linux_target, '/');
++
++	rc = 0;
++	*target = linux_target;
++
++	cifs_dbg(FYI, "%s: symlink target: %s\n", __func__, *target);
++
++out:
++	if (rc != 0)
++		kfree(linux_target);
++	kfree(smb_target);
++	return rc;
++}
++
+ static int parse_reparse_symlink(struct reparse_symlink_data_buffer *sym,
+ 				 u32 plen, bool unicode,
+ 				 struct cifs_sb_info *cifs_sb,
++				 const char *full_path,
+ 				 struct cifs_open_info_data *data)
+ {
+ 	unsigned int len;
+@@ -549,20 +619,18 @@ static int parse_reparse_symlink(struct reparse_symlink_data_buffer *sym,
+ 		return -EIO;
+ 	}
+ 
+-	data->symlink_target = cifs_strndup_from_utf16(sym->PathBuffer + offs,
+-						       len, unicode,
+-						       cifs_sb->local_nls);
+-	if (!data->symlink_target)
+-		return -ENOMEM;
+-
+-	convert_delimiter(data->symlink_target, '/');
+-	cifs_dbg(FYI, "%s: target path: %s\n", __func__, data->symlink_target);
+-
+-	return 0;
++	return smb2_parse_native_symlink(&data->symlink_target,
++					 sym->PathBuffer + offs,
++					 len,
++					 unicode,
++					 le32_to_cpu(sym->Flags) & SYMLINK_FLAG_RELATIVE,
++					 full_path,
++					 cifs_sb);
+ }
+ 
+ int parse_reparse_point(struct reparse_data_buffer *buf,
+ 			u32 plen, struct cifs_sb_info *cifs_sb,
++			const char *full_path,
+ 			bool unicode, struct cifs_open_info_data *data)
+ {
+ 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+@@ -577,7 +645,7 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
+ 	case IO_REPARSE_TAG_SYMLINK:
+ 		return parse_reparse_symlink(
+ 			(struct reparse_symlink_data_buffer *)buf,
+-			plen, unicode, cifs_sb, data);
++			plen, unicode, cifs_sb, full_path, data);
+ 	case IO_REPARSE_TAG_LX_SYMLINK:
+ 	case IO_REPARSE_TAG_AF_UNIX:
+ 	case IO_REPARSE_TAG_LX_FIFO:
+@@ -593,6 +661,7 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
+ }
+ 
+ int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
++			     const char *full_path,
+ 			     struct kvec *rsp_iov,
+ 			     struct cifs_open_info_data *data)
+ {
+@@ -602,7 +671,7 @@ int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
+ 
+ 	buf = (struct reparse_data_buffer *)((u8 *)io +
+ 					     le32_to_cpu(io->OutputOffset));
+-	return parse_reparse_point(buf, plen, cifs_sb, true, data);
++	return parse_reparse_point(buf, plen, cifs_sb, full_path, true, data);
+ }
+ 
+ static void wsl_to_fattr(struct cifs_open_info_data *data,
+diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
+index 158e7b7aae646c..ff05b0e75c9284 100644
+--- a/fs/smb/client/reparse.h
++++ b/fs/smb/client/reparse.h
+@@ -12,6 +12,8 @@
+ #include "fs_context.h"
+ #include "cifsglob.h"
+ 
++#define REPARSE_SYM_PATH_MAX 4060
++
+ /*
+  * Used only by cifs.ko to ignore reparse points from files when client or
+  * server doesn't support FSCTL_GET_REPARSE_POINT.
+@@ -115,7 +117,9 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
+ int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
+ 		       struct dentry *dentry, struct cifs_tcon *tcon,
+ 		       const char *full_path, umode_t mode, dev_t dev);
+-int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb, struct kvec *rsp_iov,
++int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
++			     const char *full_path,
++			     struct kvec *rsp_iov,
+ 			     struct cifs_open_info_data *data);
+ 
+ #endif /* _CIFS_REPARSE_H */
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index 9a6ece66c4d34e..db3695eddcf9d5 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -994,17 +994,17 @@ static int cifs_query_symlink(const unsigned int xid,
+ }
+ 
+ static int cifs_parse_reparse_point(struct cifs_sb_info *cifs_sb,
++				    const char *full_path,
+ 				    struct kvec *rsp_iov,
+ 				    struct cifs_open_info_data *data)
+ {
+ 	struct reparse_data_buffer *buf;
+ 	TRANSACT_IOCTL_RSP *io = rsp_iov->iov_base;
+-	bool unicode = !!(io->hdr.Flags2 & SMBFLG2_UNICODE);
+ 	u32 plen = le16_to_cpu(io->ByteCount);
+ 
+ 	buf = (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
+ 					     le32_to_cpu(io->DataOffset));
+-	return parse_reparse_point(buf, plen, cifs_sb, unicode, data);
++	return parse_reparse_point(buf, plen, cifs_sb, full_path, true, data);
+ }
+ 
+ static bool
+diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
+index e301349b0078d1..e836bc2193ddd3 100644
+--- a/fs/smb/client/smb2file.c
++++ b/fs/smb/client/smb2file.c
+@@ -63,12 +63,12 @@ static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov)
+ 	return sym;
+ }
+ 
+-int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path)
++int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov,
++				const char *full_path, char **path)
+ {
+ 	struct smb2_symlink_err_rsp *sym;
+ 	unsigned int sub_offs, sub_len;
+ 	unsigned int print_offs, print_len;
+-	char *s;
+ 
+ 	if (!cifs_sb || !iov || !iov->iov_base || !iov->iov_len || !path)
+ 		return -EINVAL;
+@@ -86,15 +86,13 @@ int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec
+ 	    iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + print_offs + print_len)
+ 		return -EINVAL;
+ 
+-	s = cifs_strndup_from_utf16((char *)sym->PathBuffer + sub_offs, sub_len, true,
+-				    cifs_sb->local_nls);
+-	if (!s)
+-		return -ENOMEM;
+-	convert_delimiter(s, '/');
+-	cifs_dbg(FYI, "%s: symlink target: %s\n", __func__, s);
+-
+-	*path = s;
+-	return 0;
++	return smb2_parse_native_symlink(path,
++					 (char *)sym->PathBuffer + sub_offs,
++					 sub_len,
++					 true,
++					 le32_to_cpu(sym->Flags) & SYMLINK_FLAG_RELATIVE,
++					 full_path,
++					 cifs_sb);
+ }
+ 
+ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, void *buf)
+@@ -126,6 +124,7 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32
+ 			goto out;
+ 		if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
+ 			rc = smb2_parse_symlink_response(oparms->cifs_sb, &err_iov,
++							 oparms->path,
+ 							 &data->symlink_target);
+ 			if (!rc) {
+ 				memset(smb2_data, 0, sizeof(*smb2_data));
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index e49d0c25eb0384..a188908914fe8f 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -828,6 +828,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ static int parse_create_response(struct cifs_open_info_data *data,
+ 				 struct cifs_sb_info *cifs_sb,
++				 const char *full_path,
+ 				 const struct kvec *iov)
+ {
+ 	struct smb2_create_rsp *rsp = iov->iov_base;
+@@ -841,6 +842,7 @@ static int parse_create_response(struct cifs_open_info_data *data,
+ 		break;
+ 	case STATUS_STOPPED_ON_SYMLINK:
+ 		rc = smb2_parse_symlink_response(cifs_sb, iov,
++						 full_path,
+ 						 &data->symlink_target);
+ 		if (rc)
+ 			return rc;
+@@ -930,14 +932,14 @@ int smb2_query_path_info(const unsigned int xid,
+ 
+ 	switch (rc) {
+ 	case 0:
+-		rc = parse_create_response(data, cifs_sb, &out_iov[0]);
++		rc = parse_create_response(data, cifs_sb, full_path, &out_iov[0]);
+ 		break;
+ 	case -EOPNOTSUPP:
+ 		/*
+ 		 * BB TODO: When support for special files added to Samba
+ 		 * re-verify this path.
+ 		 */
+-		rc = parse_create_response(data, cifs_sb, &out_iov[0]);
++		rc = parse_create_response(data, cifs_sb, full_path, &out_iov[0]);
+ 		if (rc || !data->reparse_point)
+ 			goto out;
+ 
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 24a2aa04a1086c..7571fefeb83aa1 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4080,7 +4080,7 @@ map_oplock_to_lease(u8 oplock)
+ 	if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+ 		return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
+ 	else if (oplock == SMB2_OPLOCK_LEVEL_II)
+-		return SMB2_LEASE_READ_CACHING_LE;
++		return SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE;
+ 	else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
+ 		return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
+ 		       SMB2_LEASE_WRITE_CACHING_LE;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 6584b5cddc280a..d1bd69cbfe09a5 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -1231,7 +1231,9 @@ SMB2_negotiate(const unsigned int xid,
+ 	 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
+ 	 * Set the cipher type manually.
+ 	 */
+-	if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
++	if ((server->dialect == SMB30_PROT_ID ||
++	     server->dialect == SMB302_PROT_ID) &&
++	    (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ 		server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
+ 
+ 	security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
+@@ -2683,7 +2685,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+ 	ptr += sizeof(struct smb3_acl);
+ 
+ 	/* create one ACE to hold the mode embedded in reserved special SID */
+-	acelen = setup_special_mode_ACE((struct smb_ace *)ptr, (__u64)mode);
++	acelen = setup_special_mode_ACE((struct smb_ace *)ptr, false, (__u64)mode);
+ 	ptr += acelen;
+ 	acl_size = acelen + sizeof(struct smb3_acl);
+ 	ace_count = 1;
+diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
+index 6f9885e4f66ca5..09349fa8da039a 100644
+--- a/fs/smb/client/smb2proto.h
++++ b/fs/smb/client/smb2proto.h
+@@ -37,8 +37,6 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
+ 					      struct smb_rqst *rqst);
+ extern struct mid_q_entry *smb2_setup_async_request(
+ 			struct TCP_Server_Info *server, struct smb_rqst *rqst);
+-extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+-					   __u64 ses_id);
+ extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+ 						__u64 ses_id, __u32  tid);
+ extern int smb2_calc_signature(struct smb_rqst *rqst,
+@@ -113,7 +111,14 @@ extern int smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 			  struct cifs_sb_info *cifs_sb,
+ 			  const unsigned char *path, char *pbuf,
+ 			  unsigned int *pbytes_read);
+-int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path);
++int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len,
++			      bool unicode, bool relative,
++			      const char *full_path,
++			      struct cifs_sb_info *cifs_sb);
++int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb,
++				const struct kvec *iov,
++				const char *full_path,
++				char **path);
+ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+ 		   void *buf);
+ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index b486b14bb3306f..475b36c27f6543 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -74,7 +74,7 @@ smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
+ 
+ 
+ static
+-int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
++int smb3_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+ {
+ 	struct cifs_chan *chan;
+ 	struct TCP_Server_Info *pserver;
+@@ -168,16 +168,41 @@ smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
+ 	return NULL;
+ }
+ 
+-struct cifs_ses *
+-smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
++static int smb2_get_sign_key(struct TCP_Server_Info *server,
++			     __u64 ses_id, u8 *key)
+ {
+ 	struct cifs_ses *ses;
++	int rc = -ENOENT;
++
++	if (SERVER_IS_CHAN(server))
++		server = server->primary_server;
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+-	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+-	spin_unlock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++		if (ses->Suid != ses_id)
++			continue;
+ 
+-	return ses;
++		rc = 0;
++		spin_lock(&ses->ses_lock);
++		switch (ses->ses_status) {
++		case SES_EXITING: /* SMB2_LOGOFF */
++		case SES_GOOD:
++			if (likely(ses->auth_key.response)) {
++				memcpy(key, ses->auth_key.response,
++				       SMB2_NTLMV2_SESSKEY_SIZE);
++			} else {
++				rc = -EIO;
++			}
++			break;
++		default:
++			rc = -EAGAIN;
++			break;
++		}
++		spin_unlock(&ses->ses_lock);
++		break;
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	return rc;
+ }
+ 
+ static struct cifs_tcon *
+@@ -236,14 +261,16 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ 	unsigned char *sigptr = smb2_signature;
+ 	struct kvec *iov = rqst->rq_iov;
+ 	struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
+-	struct cifs_ses *ses;
+ 	struct shash_desc *shash = NULL;
+ 	struct smb_rqst drqst;
++	__u64 sid = le64_to_cpu(shdr->SessionId);
++	u8 key[SMB2_NTLMV2_SESSKEY_SIZE];
+ 
+-	ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId));
+-	if (unlikely(!ses)) {
+-		cifs_server_dbg(FYI, "%s: Could not find session\n", __func__);
+-		return -ENOENT;
++	rc = smb2_get_sign_key(server, sid, key);
++	if (unlikely(rc)) {
++		cifs_server_dbg(FYI, "%s: [sesid=0x%llx] couldn't find signing key: %d\n",
++				__func__, sid, rc);
++		return rc;
+ 	}
+ 
+ 	memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE);
+@@ -260,8 +287,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ 		shash = server->secmech.hmacsha256;
+ 	}
+ 
+-	rc = crypto_shash_setkey(shash->tfm, ses->auth_key.response,
+-			SMB2_NTLMV2_SESSKEY_SIZE);
++	rc = crypto_shash_setkey(shash->tfm, key, sizeof(key));
+ 	if (rc) {
+ 		cifs_server_dbg(VFS,
+ 				"%s: Could not update with response\n",
+@@ -303,8 +329,6 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ out:
+ 	if (allocate_crypto)
+ 		cifs_free_hash(&shash);
+-	if (ses)
+-		cifs_put_smb_ses(ses);
+ 	return rc;
+ }
+ 
+@@ -570,7 +594,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ 	struct smb_rqst drqst;
+ 	u8 key[SMB3_SIGN_KEY_SIZE];
+ 
+-	rc = smb2_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
++	rc = smb3_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
+ 	if (unlikely(rc)) {
+ 		cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__);
+ 		return rc;
+diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
+index 0b52d22a91a0cb..12cbd3428a6da5 100644
+--- a/fs/smb/client/trace.h
++++ b/fs/smb/client/trace.h
+@@ -44,6 +44,8 @@
+ 	EM(netfs_trace_tcon_ref_free_ipc,		"FRE Ipc   ") \
+ 	EM(netfs_trace_tcon_ref_free_ipc_fail,		"FRE Ipc-F ") \
+ 	EM(netfs_trace_tcon_ref_free_reconnect_server,	"FRE Reconn") \
++	EM(netfs_trace_tcon_ref_get_cached_laundromat,	"GET Ch-Lau") \
++	EM(netfs_trace_tcon_ref_get_cached_lease_break,	"GET Ch-Lea") \
+ 	EM(netfs_trace_tcon_ref_get_cancelled_close,	"GET Cn-Cls") \
+ 	EM(netfs_trace_tcon_ref_get_dfs_refer,		"GET DfsRef") \
+ 	EM(netfs_trace_tcon_ref_get_find,		"GET Find  ") \
+@@ -52,6 +54,7 @@
+ 	EM(netfs_trace_tcon_ref_new,			"NEW       ") \
+ 	EM(netfs_trace_tcon_ref_new_ipc,		"NEW Ipc   ") \
+ 	EM(netfs_trace_tcon_ref_new_reconnect_server,	"NEW Reconn") \
++	EM(netfs_trace_tcon_ref_put_cached_close,	"PUT Ch-Cls") \
+ 	EM(netfs_trace_tcon_ref_put_cancelled_close,	"PUT Cn-Cls") \
+ 	EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
+ 	EM(netfs_trace_tcon_ref_put_cancelled_mid,	"PUT Cn-Mid") \
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index e6cfedba999232..c8cc6fa6fc3ebb 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -276,8 +276,12 @@ static void handle_ksmbd_work(struct work_struct *wk)
+ 	 * disconnection. waitqueue_active is safe because it
+ 	 * uses atomic operation for condition.
+ 	 */
++	atomic_inc(&conn->refcnt);
+ 	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+ 		wake_up(&conn->r_count_q);
++
++	if (atomic_dec_and_test(&conn->refcnt))
++		kfree(conn);
+ }
+ 
+ /**
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 291583005dd123..245a10cc1eeb4d 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -773,10 +773,10 @@ static void init_constants_master(struct ubifs_info *c)
+ 	 * necessary to report something for the 'statfs()' call.
+ 	 *
+ 	 * Subtract the LEB reserved for GC, the LEB which is reserved for
+-	 * deletions, minimum LEBs for the index, and assume only one journal
+-	 * head is available.
++	 * deletions, minimum LEBs for the index, the LEBs which are reserved
++	 * for each journal head.
+ 	 */
+-	tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1;
++	tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt;
+ 	tmp64 *= (long long)c->leb_size - c->leb_overhead;
+ 	tmp64 = ubifs_reported_space(c, tmp64);
+ 	c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT;
+diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
+index a55e04822d16e9..7c43e0ccf6d47d 100644
+--- a/fs/ubifs/tnc_commit.c
++++ b/fs/ubifs/tnc_commit.c
+@@ -657,6 +657,8 @@ static int get_znodes_to_commit(struct ubifs_info *c)
+ 		znode->alt = 0;
+ 		cnext = find_next_dirty(znode);
+ 		if (!cnext) {
++			ubifs_assert(c, !znode->parent);
++			znode->cparent = NULL;
+ 			znode->cnext = c->cnext;
+ 			break;
+ 		}
+diff --git a/fs/unicode/utf8-core.c b/fs/unicode/utf8-core.c
+index 8395066341a437..0400824ef4936e 100644
+--- a/fs/unicode/utf8-core.c
++++ b/fs/unicode/utf8-core.c
+@@ -198,7 +198,7 @@ struct unicode_map *utf8_load(unsigned int version)
+ 	return um;
+ 
+ out_symbol_put:
+-	symbol_put(um->tables);
++	symbol_put(utf8_data_table);
+ out_free_um:
+ 	kfree(um);
+ 	return ERR_PTR(-EINVAL);
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 4719ec90029cb7..edaf193dbd5ccc 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -546,10 +546,14 @@ xfs_can_free_eofblocks(
+ 		return false;
+ 
+ 	/*
+-	 * Check if there is an post-EOF extent to free.
++	 * Check if there is an post-EOF extent to free.  If there are any
++	 * delalloc blocks attached to the inode (data fork delalloc
++	 * reservations or CoW extents of any kind), we need to free them so
++	 * that inactivation doesn't fail to erase them.
+ 	 */
+ 	xfs_ilock(ip, XFS_ILOCK_SHARED);
+-	if (xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
++	if (ip->i_delayed_blks ||
++	    xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
+ 		found_blocks = true;
+ 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ 	return found_blocks;
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index eeadbaeccf88b7..fa284b64b2de20 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -350,9 +350,9 @@
+ 	*(.data..decrypted)						\
+ 	*(.ref.data)							\
+ 	*(.data..shared_aligned) /* percpu related */			\
+-	*(.data.unlikely)						\
++	*(.data..unlikely)						\
+ 	__start_once = .;						\
+-	*(.data.once)							\
++	*(.data..once)							\
+ 	__end_once = .;							\
+ 	STRUCT_ALIGN();							\
+ 	*(__tracepoints)						\
+diff --git a/include/kunit/skbuff.h b/include/kunit/skbuff.h
+index 44d12370939a90..345e1e8f031235 100644
+--- a/include/kunit/skbuff.h
++++ b/include/kunit/skbuff.h
+@@ -29,7 +29,7 @@ static void kunit_action_kfree_skb(void *p)
+ static inline struct sk_buff *kunit_zalloc_skb(struct kunit *test, int len,
+ 					       gfp_t gfp)
+ {
+-	struct sk_buff *res = alloc_skb(len, GFP_KERNEL);
++	struct sk_buff *res = alloc_skb(len, gfp);
+ 
+ 	if (!res || skb_pad(res, len))
+ 		return NULL;
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index 4fecf46ef681b3..c5063e0a38a058 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -925,6 +925,8 @@ void blk_freeze_queue_start(struct request_queue *q);
+ void blk_mq_freeze_queue_wait(struct request_queue *q);
+ int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
+ 				     unsigned long timeout);
++void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
++void blk_freeze_queue_start_non_owner(struct request_queue *q);
+ 
+ void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 50c3b959da2816..e84a93c4013207 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -25,6 +25,7 @@
+ #include <linux/uuid.h>
+ #include <linux/xarray.h>
+ #include <linux/file.h>
++#include <linux/lockdep.h>
+ 
+ struct module;
+ struct request_queue;
+@@ -471,6 +472,11 @@ struct request_queue {
+ 	struct xarray		hctx_table;
+ 
+ 	struct percpu_ref	q_usage_counter;
++	struct lock_class_key	io_lock_cls_key;
++	struct lockdep_map	io_lockdep_map;
++
++	struct lock_class_key	q_lock_cls_key;
++	struct lockdep_map	q_lockdep_map;
+ 
+ 	struct request		*last_merge;
+ 
+@@ -566,6 +572,10 @@ struct request_queue {
+ 	struct throtl_data *td;
+ #endif
+ 	struct rcu_head		rcu_head;
++#ifdef CONFIG_LOCKDEP
++	struct task_struct	*mq_freeze_owner;
++	int			mq_freeze_owner_depth;
++#endif
+ 	wait_queue_head_t	mq_freeze_wq;
+ 	/*
+ 	 * Protect concurrent access to q_usage_counter by
+@@ -1247,7 +1257,7 @@ static inline unsigned int queue_io_min(const struct request_queue *q)
+ 	return q->limits.io_min;
+ }
+ 
+-static inline int bdev_io_min(struct block_device *bdev)
++static inline unsigned int bdev_io_min(struct block_device *bdev)
+ {
+ 	return queue_io_min(bdev_get_queue(bdev));
+ }
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index bdadb0bb6cecd1..bc2e3dab0487ea 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1373,7 +1373,8 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
+ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
+ 				struct bpf_prog *to);
+ /* Called only from JIT-enabled code, so there's no need for stubs. */
+-void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
++void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym);
++void bpf_image_ksym_add(struct bpf_ksym *ksym);
+ void bpf_image_ksym_del(struct bpf_ksym *ksym);
+ void bpf_ksym_add(struct bpf_ksym *ksym);
+ void bpf_ksym_del(struct bpf_ksym *ksym);
+@@ -3461,4 +3462,10 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
+ 	return prog->aux->func_idx != 0;
+ }
+ 
++static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog)
++{
++	return prog->type == BPF_PROG_TYPE_TRACING &&
++	       prog->expected_attach_type == BPF_TRACE_RAW_TP;
++}
++
+ #endif /* _LINUX_BPF_H */
+diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
+index 038b2d523bf884..518bd1fd86fbe0 100644
+--- a/include/linux/cleanup.h
++++ b/include/linux/cleanup.h
+@@ -290,7 +290,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+ #define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+ 	DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ 	static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+-	{ return *_T; }
++	{ return (void *)(__force unsigned long)*_T; }
+ 
+ #define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+ 	EXTEND_CLASS(_name, _ext, \
+@@ -347,7 +347,7 @@ static inline void class_##_name##_destructor(class_##_name##_t *_T)	\
+ 									\
+ static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T)	\
+ {									\
+-	return _T->lock;						\
++	return (void *)(__force unsigned long)_T->lock;			\
+ }
+ 
+ 
+diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
+index 32284cd26d52a7..c16d4199bf9231 100644
+--- a/include/linux/compiler_attributes.h
++++ b/include/linux/compiler_attributes.h
+@@ -94,19 +94,6 @@
+ # define __copy(symbol)
+ #endif
+ 
+-/*
+- * Optional: only supported since gcc >= 15
+- * Optional: only supported since clang >= 18
+- *
+- *   gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+- * clang: https://github.com/llvm/llvm-project/pull/76348
+- */
+-#if __has_attribute(__counted_by__)
+-# define __counted_by(member)		__attribute__((__counted_by__(member)))
+-#else
+-# define __counted_by(member)
+-#endif
+-
+ /*
+  * Optional: not supported by gcc
+  * Optional: only supported since clang >= 14.0
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index 1a957ea2f4fe78..639be0f30b455d 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -323,6 +323,25 @@ struct ftrace_likely_data {
+ #define __no_sanitize_or_inline __always_inline
+ #endif
+ 
++/*
++ * Optional: only supported since gcc >= 15
++ * Optional: only supported since clang >= 18
++ *
++ *   gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
++ * clang: https://github.com/llvm/llvm-project/pull/76348
++ *
++ * __bdos on clang < 19.1.2 can erroneously return 0:
++ * https://github.com/llvm/llvm-project/pull/110497
++ *
++ * __bdos on clang < 19.1.3 can be off by 4:
++ * https://github.com/llvm/llvm-project/pull/112636
++ */
++#ifdef CONFIG_CC_HAS_COUNTED_BY
++# define __counted_by(member)		__attribute__((__counted_by__(member)))
++#else
++# define __counted_by(member)
++#endif
++
+ /*
+  * Apply __counted_by() when the Endianness matches to increase test coverage.
+  */
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index b0b821edfd97d1..3b2ad444c002ee 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -24,10 +24,10 @@
+ #define NEW_ADDR		((block_t)-1)	/* used as block_t addresses */
+ #define COMPRESS_ADDR		((block_t)-2)	/* used as compressed data flag */
+ 
+-#define F2FS_BYTES_TO_BLK(bytes)	((bytes) >> F2FS_BLKSIZE_BITS)
+-#define F2FS_BLK_TO_BYTES(blk)		((blk) << F2FS_BLKSIZE_BITS)
++#define F2FS_BYTES_TO_BLK(bytes)	((unsigned long long)(bytes) >> F2FS_BLKSIZE_BITS)
++#define F2FS_BLK_TO_BYTES(blk)		((unsigned long long)(blk) << F2FS_BLKSIZE_BITS)
+ #define F2FS_BLK_END_BYTES(blk)		(F2FS_BLK_TO_BYTES(blk + 1) - 1)
+-#define F2FS_BLK_ALIGN(x)			(F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
++#define F2FS_BLK_ALIGN(x)		(F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
+ 
+ /* 0, 1(node nid), 2(meta nid) are reserved node id */
+ #define F2FS_RESERVED_NODE_NUM		3
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 3559446279c152..4b5cad44a12683 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3726,6 +3726,6 @@ static inline bool vfs_empty_path(int dfd, const char __user *path)
+ 	return !c;
+ }
+ 
+-bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos);
++int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter);
+ 
+ #endif /* _LINUX_FS_H */
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index 9d7754ad5e9b08..43ad280935e360 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -229,6 +229,12 @@ struct hisi_qm_status {
+ 
+ struct hisi_qm;
+ 
++enum acc_err_result {
++	ACC_ERR_NONE,
++	ACC_ERR_NEED_RESET,
++	ACC_ERR_RECOVERED,
++};
++
+ struct hisi_qm_err_info {
+ 	char *acpi_rst;
+ 	u32 msi_wr_port;
+@@ -257,9 +263,9 @@ struct hisi_qm_err_ini {
+ 	void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ 	void (*open_sva_prefetch)(struct hisi_qm *qm);
+ 	void (*close_sva_prefetch)(struct hisi_qm *qm);
+-	void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
+ 	void (*show_last_dfx_regs)(struct hisi_qm *qm);
+ 	void (*err_info_init)(struct hisi_qm *qm);
++	enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
+ };
+ 
+ struct hisi_qm_cap_info {
+diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h
+index 11ee185566c31c..b94beab64610b9 100644
+--- a/include/linux/intel_vsec.h
++++ b/include/linux/intel_vsec.h
+@@ -74,10 +74,11 @@ enum intel_vsec_quirks {
+  * @pdev:  PCI device reference for the callback's use
+  * @guid:  ID of data to acccss
+  * @data:  buffer for the data to be copied
++ * @off:   offset into the requested buffer
+  * @count: size of buffer
+  */
+ struct pmt_callbacks {
+-	int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, u32 count);
++	int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count);
+ };
+ 
+ /**
+diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
+index 1220f0fbe5bf9f..5d21dacd62bc7d 100644
+--- a/include/linux/jiffies.h
++++ b/include/linux/jiffies.h
+@@ -502,7 +502,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+  * - all other values are converted to jiffies by either multiplying
+  *   the input value by a factor or dividing it with a factor and
+  *   handling any 32-bit overflows.
+- *   for the details see __msecs_to_jiffies()
++ *   for the details see _msecs_to_jiffies()
+  *
+  * msecs_to_jiffies() checks for the passed in value being a constant
+  * via __builtin_constant_p() allowing gcc to eliminate most of the
+diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
+index 564868bdce898b..fd743d4c4b4bdc 100644
+--- a/include/linux/kfifo.h
++++ b/include/linux/kfifo.h
+@@ -37,7 +37,6 @@
+  */
+ 
+ #include <linux/array_size.h>
+-#include <linux/dma-mapping.h>
+ #include <linux/spinlock.h>
+ #include <linux/stddef.h>
+ #include <linux/types.h>
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 45be36e5285ffb..85fe9d0ebb9152 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -2382,12 +2382,6 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+ }
+ #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
+ 
+-typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
+-
+-int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+-				uintptr_t data, const char *name,
+-				struct task_struct **thread_ptr);
+-
+ #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
+ {
+diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
+index 217f7abf2cbfab..67964dc4db952e 100644
+--- a/include/linux/lockdep.h
++++ b/include/linux/lockdep.h
+@@ -173,7 +173,7 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ 			      (lock)->dep_map.lock_type)
+ 
+ #define lockdep_set_subclass(lock, sub)					\
+-	lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
++	lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
+ 			      (lock)->dep_map.wait_type_inner,		\
+ 			      (lock)->dep_map.wait_type_outer,		\
+ 			      (lock)->dep_map.lock_type)
+diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
+index 39a7714605a796..d7cb1e5ecbda9d 100644
+--- a/include/linux/mmdebug.h
++++ b/include/linux/mmdebug.h
+@@ -46,7 +46,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
+ 		}							\
+ 	} while (0)
+ #define VM_WARN_ON_ONCE_PAGE(cond, page)	({			\
+-	static bool __section(".data.once") __warned;			\
++	static bool __section(".data..once") __warned;			\
+ 	int __ret_warn_once = !!(cond);					\
+ 									\
+ 	if (unlikely(__ret_warn_once && !__warned)) {			\
+@@ -66,7 +66,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
+ 	unlikely(__ret_warn);						\
+ })
+ #define VM_WARN_ON_ONCE_FOLIO(cond, folio)	({			\
+-	static bool __section(".data.once") __warned;			\
++	static bool __section(".data..once") __warned;			\
+ 	int __ret_warn_once = !!(cond);					\
+ 									\
+ 	if (unlikely(__ret_warn_once && !__warned)) {			\
+@@ -77,7 +77,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
+ 	unlikely(__ret_warn_once);					\
+ })
+ #define VM_WARN_ON_ONCE_MM(cond, mm)		({			\
+-	static bool __section(".data.once") __warned;			\
++	static bool __section(".data..once") __warned;			\
+ 	int __ret_warn_once = !!(cond);					\
+ 									\
+ 	if (unlikely(__ret_warn_once && !__warned)) {			\
+diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
+index cd4e28db0cbd77..959a4daacea1f2 100644
+--- a/include/linux/netpoll.h
++++ b/include/linux/netpoll.h
+@@ -72,7 +72,7 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
+ {
+ 	struct net_device *dev = napi->dev;
+ 
+-	if (dev && dev->npinfo) {
++	if (dev && rcu_access_pointer(dev->npinfo)) {
+ 		int owner = smp_processor_id();
+ 
+ 		while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
+diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
+index 3982fea799195e..9202f4b24343d7 100644
+--- a/include/linux/nfslocalio.h
++++ b/include/linux/nfslocalio.h
+@@ -55,7 +55,7 @@ struct nfsd_localio_operations {
+ 						const struct cred *,
+ 						const struct nfs_fh *,
+ 						const fmode_t);
+-	void (*nfsd_file_put_local)(struct nfsd_file *);
++	struct net *(*nfsd_file_put_local)(struct nfsd_file *);
+ 	struct file *(*nfsd_file_file)(struct nfsd_file *);
+ } ____cacheline_aligned;
+ 
+@@ -66,7 +66,7 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *,
+ 		   struct rpc_clnt *, const struct cred *,
+ 		   const struct nfs_fh *, const fmode_t);
+ 
+-static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio)
++static inline void nfs_to_nfsd_net_put(struct net *net)
+ {
+ 	/*
+ 	 * Once reference to nfsd_serv is dropped, NFSD could be
+@@ -74,10 +74,22 @@ static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio)
+ 	 * by always taking RCU.
+ 	 */
+ 	rcu_read_lock();
+-	nfs_to->nfsd_file_put_local(localio);
++	nfs_to->nfsd_serv_put(net);
+ 	rcu_read_unlock();
+ }
+ 
++static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio)
++{
++	/*
++	 * Must not hold RCU otherwise nfsd_file_put() can easily trigger:
++	 * "Voluntary context switch within RCU read-side critical section!"
++	 * by scheduling deep in underlying filesystem (e.g. XFS).
++	 */
++	struct net *net = nfs_to->nfsd_file_put_local(localio);
++
++	nfs_to_nfsd_net_put(net);
++}
++
+ #else   /* CONFIG_NFS_LOCALIO */
+ static inline void nfsd_localio_ops_init(void)
+ {
+diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
+index d69ad5bb1eb1e6..b8d6c0c208760a 100644
+--- a/include/linux/of_fdt.h
++++ b/include/linux/of_fdt.h
+@@ -31,6 +31,7 @@ extern void *of_fdt_unflatten_tree(const unsigned long *blob,
+ extern int __initdata dt_root_addr_cells;
+ extern int __initdata dt_root_size_cells;
+ extern void *initial_boot_params;
++extern phys_addr_t initial_boot_params_pa;
+ 
+ extern char __dtb_start[];
+ extern char __dtb_end[];
+@@ -70,8 +71,8 @@ extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
+ /* Early flat tree scan hooks */
+ extern int early_init_dt_scan_root(void);
+ 
+-extern bool early_init_dt_scan(void *params);
+-extern bool early_init_dt_verify(void *params);
++extern bool early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys);
++extern bool early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys);
+ extern void early_init_dt_scan_nodes(void);
+ 
+ extern const char *of_flat_dt_get_machine_name(void);
+diff --git a/include/linux/once.h b/include/linux/once.h
+index bc714d414448a7..30346fcdc7995d 100644
+--- a/include/linux/once.h
++++ b/include/linux/once.h
+@@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ #define DO_ONCE(func, ...)						     \
+ 	({								     \
+ 		bool ___ret = false;					     \
+-		static bool __section(".data.once") ___done = false;	     \
++		static bool __section(".data..once") ___done = false;	     \
+ 		static DEFINE_STATIC_KEY_TRUE(___once_key);		     \
+ 		if (static_branch_unlikely(&___once_key)) {		     \
+ 			unsigned long ___flags;				     \
+@@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ #define DO_ONCE_SLEEPABLE(func, ...)						\
+ 	({									\
+ 		bool ___ret = false;						\
+-		static bool __section(".data.once") ___done = false;		\
++		static bool __section(".data..once") ___done = false;		\
+ 		static DEFINE_STATIC_KEY_TRUE(___once_key);			\
+ 		if (static_branch_unlikely(&___once_key)) {			\
+ 			___ret = __do_once_sleepable_start(&___done);		\
+diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h
+index b7bce4983638f8..27de7bc32a0610 100644
+--- a/include/linux/once_lite.h
++++ b/include/linux/once_lite.h
+@@ -12,7 +12,7 @@
+ 
+ #define __ONCE_LITE_IF(condition)					\
+ 	({								\
+-		static bool __section(".data.once") __already_done;	\
++		static bool __section(".data..once") __already_done;	\
+ 		bool __ret_cond = !!(condition);			\
+ 		bool __ret_once = false;				\
+ 									\
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 58d84c59f3ddae..48e5c03df1dd83 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -401,7 +401,7 @@ static inline int debug_lockdep_rcu_enabled(void)
+  */
+ #define RCU_LOCKDEP_WARN(c, s)						\
+ 	do {								\
+-		static bool __section(".data.unlikely") __warned;	\
++		static bool __section(".data..unlikely") __warned;	\
+ 		if (debug_lockdep_rcu_enabled() && (c) &&		\
+ 		    debug_lockdep_rcu_enabled() && !__warned) {		\
+ 			__warned = true;				\
+diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
+index 8544ff05e594d7..7d81fc6918ee86 100644
+--- a/include/linux/rwlock_rt.h
++++ b/include/linux/rwlock_rt.h
+@@ -24,13 +24,13 @@ do {							\
+ 	__rt_rwlock_init(rwl, #rwl, &__key);		\
+ } while (0)
+ 
+-extern void rt_read_lock(rwlock_t *rwlock);
++extern void rt_read_lock(rwlock_t *rwlock)	__acquires(rwlock);
+ extern int rt_read_trylock(rwlock_t *rwlock);
+-extern void rt_read_unlock(rwlock_t *rwlock);
+-extern void rt_write_lock(rwlock_t *rwlock);
+-extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
++extern void rt_read_unlock(rwlock_t *rwlock)	__releases(rwlock);
++extern void rt_write_lock(rwlock_t *rwlock)	__acquires(rwlock);
++extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass)	__acquires(rwlock);
+ extern int rt_write_trylock(rwlock_t *rwlock);
+-extern void rt_write_unlock(rwlock_t *rwlock);
++extern void rt_write_unlock(rwlock_t *rwlock)	__releases(rwlock);
+ 
+ static __always_inline void read_lock(rwlock_t *rwlock)
+ {
+diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
+index 1ddbde64a31b4a..2799e7284fff72 100644
+--- a/include/linux/sched/ext.h
++++ b/include/linux/sched/ext.h
+@@ -199,7 +199,6 @@ struct sched_ext_entity {
+ #ifdef CONFIG_EXT_GROUP_SCHED
+ 	struct cgroup		*cgrp_moving_from;
+ #endif
+-	/* must be the last field, see init_scx_entity() */
+ 	struct list_head	tasks_node;
+ };
+ 
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index fffeb754880fca..5298765d6ca482 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -621,6 +621,23 @@ static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *
+ 	return READ_ONCE(s->seqcount.sequence);
+ }
+ 
++/**
++ * read_seqcount_latch() - pick even/odd latch data copy
++ * @s: Pointer to seqcount_latch_t
++ *
++ * See write_seqcount_latch() for details and a full reader/writer usage
++ * example.
++ *
++ * Return: sequence counter raw value. Use the lowest bit as an index for
++ * picking which data copy to read. The full counter must then be checked
++ * with read_seqcount_latch_retry().
++ */
++static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s)
++{
++	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
++	return raw_read_seqcount_latch(s);
++}
++
+ /**
+  * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
+  * @s:		Pointer to seqcount_latch_t
+@@ -635,9 +652,34 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+ 	return unlikely(READ_ONCE(s->seqcount.sequence) != start);
+ }
+ 
++/**
++ * read_seqcount_latch_retry() - end a seqcount_latch_t read section
++ * @s:		Pointer to seqcount_latch_t
++ * @start:	count, from read_seqcount_latch()
++ *
++ * Return: true if a read section retry is required, else false
++ */
++static __always_inline int
++read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
++{
++	kcsan_atomic_next(0);
++	return raw_read_seqcount_latch_retry(s, start);
++}
++
+ /**
+  * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
+  * @s: Pointer to seqcount_latch_t
++ */
++static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s)
++{
++	smp_wmb();	/* prior stores before incrementing "sequence" */
++	s->seqcount.sequence++;
++	smp_wmb();      /* increment "sequence" before following stores */
++}
++
++/**
++ * write_seqcount_latch_begin() - redirect latch readers to odd copy
++ * @s: Pointer to seqcount_latch_t
+  *
+  * The latch technique is a multiversion concurrency control method that allows
+  * queries during non-atomic modifications. If you can guarantee queries never
+@@ -665,17 +707,11 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+  *
+  *	void latch_modify(struct latch_struct *latch, ...)
+  *	{
+- *		smp_wmb();	// Ensure that the last data[1] update is visible
+- *		latch->seq.sequence++;
+- *		smp_wmb();	// Ensure that the seqcount update is visible
+- *
++ *		write_seqcount_latch_begin(&latch->seq);
+  *		modify(latch->data[0], ...);
+- *
+- *		smp_wmb();	// Ensure that the data[0] update is visible
+- *		latch->seq.sequence++;
+- *		smp_wmb();	// Ensure that the seqcount update is visible
+- *
++ *		write_seqcount_latch(&latch->seq);
+  *		modify(latch->data[1], ...);
++ *		write_seqcount_latch_end(&latch->seq);
+  *	}
+  *
+  * The query will have a form like::
+@@ -686,13 +722,13 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+  *		unsigned seq, idx;
+  *
+  *		do {
+- *			seq = raw_read_seqcount_latch(&latch->seq);
++ *			seq = read_seqcount_latch(&latch->seq);
+  *
+  *			idx = seq & 0x01;
+  *			entry = data_query(latch->data[idx], ...);
+  *
+  *		// This includes needed smp_rmb()
+- *		} while (raw_read_seqcount_latch_retry(&latch->seq, seq));
++ *		} while (read_seqcount_latch_retry(&latch->seq, seq));
+  *
+  *		return entry;
+  *	}
+@@ -716,11 +752,31 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+  *	When data is a dynamic data structure; one should use regular RCU
+  *	patterns to manage the lifetimes of the objects within.
+  */
+-static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
++static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s)
+ {
+-	smp_wmb();	/* prior stores before incrementing "sequence" */
+-	s->seqcount.sequence++;
+-	smp_wmb();      /* increment "sequence" before following stores */
++	kcsan_nestable_atomic_begin();
++	raw_write_seqcount_latch(s);
++}
++
++/**
++ * write_seqcount_latch() - redirect latch readers to even copy
++ * @s: Pointer to seqcount_latch_t
++ */
++static __always_inline void write_seqcount_latch(seqcount_latch_t *s)
++{
++	raw_write_seqcount_latch(s);
++}
++
++/**
++ * write_seqcount_latch_end() - end a seqcount_latch_t write section
++ * @s:		Pointer to seqcount_latch_t
++ *
++ * Marks the end of a seqcount_latch_t writer section, after all copies of the
++ * latch-protected data have been updated.
++ */
++static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
++{
++	kcsan_nestable_atomic_end();
+ }
+ 
+ #define __SEQLOCK_UNLOCKED(lockname)					\
+@@ -754,11 +810,7 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
+  */
+ static inline unsigned read_seqbegin(const seqlock_t *sl)
+ {
+-	unsigned ret = read_seqcount_begin(&sl->seqcount);
+-
+-	kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry() */
+-	kcsan_flat_atomic_begin();
+-	return ret;
++	return read_seqcount_begin(&sl->seqcount);
+ }
+ 
+ /**
+@@ -774,12 +826,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
+  */
+ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ {
+-	/*
+-	 * Assume not nested: read_seqretry() may be called multiple times when
+-	 * completing read critical section.
+-	 */
+-	kcsan_flat_atomic_end();
+-
+ 	return read_seqcount_retry(&sl->seqcount, start);
+ }
+ 
+diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
+index 61c49b16f69ab0..6175cd682ca0d8 100644
+--- a/include/linux/spinlock_rt.h
++++ b/include/linux/spinlock_rt.h
+@@ -16,26 +16,25 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+ }
+ #endif
+ 
+-#define spin_lock_init(slock)					\
++#define __spin_lock_init(slock, name, key, percpu)		\
+ do {								\
+-	static struct lock_class_key __key;			\
+-								\
+ 	rt_mutex_base_init(&(slock)->lock);			\
+-	__rt_spin_lock_init(slock, #slock, &__key, false);	\
++	__rt_spin_lock_init(slock, name, key, percpu);		\
+ } while (0)
+ 
+-#define local_spin_lock_init(slock)				\
++#define _spin_lock_init(slock, percpu)				\
+ do {								\
+ 	static struct lock_class_key __key;			\
+-								\
+-	rt_mutex_base_init(&(slock)->lock);			\
+-	__rt_spin_lock_init(slock, #slock, &__key, true);	\
++	__spin_lock_init(slock, #slock, &__key, percpu);	\
+ } while (0)
+ 
+-extern void rt_spin_lock(spinlock_t *lock);
+-extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
+-extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
+-extern void rt_spin_unlock(spinlock_t *lock);
++#define spin_lock_init(slock)		_spin_lock_init(slock, false)
++#define local_spin_lock_init(slock)	_spin_lock_init(slock, true)
++
++extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
++extern void rt_spin_lock_nested(spinlock_t *lock, int subclass)	__acquires(lock);
++extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
++extern void rt_spin_unlock(spinlock_t *lock)	__releases(lock);
+ extern void rt_spin_lock_unlock(spinlock_t *lock);
+ extern int rt_spin_trylock_bh(spinlock_t *lock);
+ extern int rt_spin_trylock(spinlock_t *lock);
+diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
+index 8fa963326bf6a2..c64096b5c78215 100644
+--- a/include/media/v4l2-dv-timings.h
++++ b/include/media/v4l2-dv-timings.h
+@@ -146,15 +146,18 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
+  * @polarities: the horizontal and vertical polarities (same as struct
+  *		v4l2_bt_timings polarities).
+  * @interlaced: if this flag is true, it indicates interlaced format
++ * @cap: the v4l2_dv_timings_cap capabilities.
+  * @fmt: the resulting timings.
+  *
+  * This function will attempt to detect if the given values correspond to a
+  * valid CVT format. If so, then it will return true, and fmt will be filled
+  * in with the found CVT timings.
+  */
+-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
+-		unsigned active_width, u32 polarities, bool interlaced,
+-		struct v4l2_dv_timings *fmt);
++bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq,
++		     unsigned int vsync, unsigned int active_width,
++		     u32 polarities, bool interlaced,
++		     const struct v4l2_dv_timings_cap *cap,
++		     struct v4l2_dv_timings *fmt);
+ 
+ /**
+  * v4l2_detect_gtf - detect if the given timings follow the GTF standard
+@@ -170,15 +173,18 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
+  *		image height, so it has to be passed explicitly. Usually
+  *		the native screen aspect ratio is used for this. If it
+  *		is not filled in correctly, then 16:9 will be assumed.
++ * @cap: the v4l2_dv_timings_cap capabilities.
+  * @fmt: the resulting timings.
+  *
+  * This function will attempt to detect if the given values correspond to a
+  * valid GTF format. If so, then it will return true, and fmt will be filled
+  * in with the found GTF timings.
+  */
+-bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
+-		u32 polarities, bool interlaced, struct v4l2_fract aspect,
+-		struct v4l2_dv_timings *fmt);
++bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq,
++		     unsigned int vsync, u32 polarities, bool interlaced,
++		     struct v4l2_fract aspect,
++		     const struct v4l2_dv_timings_cap *cap,
++		     struct v4l2_dv_timings *fmt);
+ 
+ /**
+  * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index bab1e3d7452a2c..a1864cff616aee 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -1,7 +1,7 @@
+ /*
+    BlueZ - Bluetooth protocol stack for Linux
+    Copyright (C) 2000-2001 Qualcomm Incorporated
+-   Copyright 2023 NXP
++   Copyright 2023-2024 NXP
+ 
+    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+ 
+@@ -29,6 +29,7 @@
+ #define HCI_MAX_ACL_SIZE	1024
+ #define HCI_MAX_SCO_SIZE	255
+ #define HCI_MAX_ISO_SIZE	251
++#define HCI_MAX_ISO_BIS		31
+ #define HCI_MAX_EVENT_SIZE	260
+ #define HCI_MAX_FRAME_SIZE	(HCI_MAX_ACL_SIZE + 4)
+ 
+@@ -683,6 +684,7 @@ enum {
+ #define HCI_RSSI_INVALID	127
+ 
+ #define HCI_SYNC_HANDLE_INVALID	0xffff
++#define HCI_SID_INVALID		0xff
+ 
+ #define HCI_ROLE_MASTER		0x00
+ #define HCI_ROLE_SLAVE		0x01
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 88265d37aa72e3..4c185a08c3a3af 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -668,6 +668,7 @@ struct hci_conn {
+ 	__u8		adv_instance;
+ 	__u16		handle;
+ 	__u16		sync_handle;
++	__u8		sid;
+ 	__u16		state;
+ 	__u16		mtu;
+ 	__u8		mode;
+@@ -710,6 +711,9 @@ struct hci_conn {
+ 	__s8		tx_power;
+ 	__s8		max_tx_power;
+ 	struct bt_iso_qos iso_qos;
++	__u8		num_bis;
++	__u8		bis[HCI_MAX_ISO_BIS];
++
+ 	unsigned long	flags;
+ 
+ 	enum conn_reasons conn_reason;
+@@ -945,8 +949,10 @@ enum {
+ 	HCI_CONN_PER_ADV,
+ 	HCI_CONN_BIG_CREATED,
+ 	HCI_CONN_CREATE_CIS,
++	HCI_CONN_CREATE_BIG_SYNC,
+ 	HCI_CONN_BIG_SYNC,
+ 	HCI_CONN_BIG_SYNC_FAILED,
++	HCI_CONN_CREATE_PA_SYNC,
+ 	HCI_CONN_PA_SYNC,
+ 	HCI_CONN_PA_SYNC_FAILED,
+ };
+@@ -1099,6 +1105,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
+ 	return NULL;
+ }
+ 
++static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
++							__u8 sid,
++							bdaddr_t *dst,
++							__u8 dst_type)
++{
++	struct hci_conn_hash *h = &hdev->conn_hash;
++	struct hci_conn  *c;
++
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(c, &h->list, list) {
++		if (c->type != ISO_LINK  || bacmp(&c->dst, dst) ||
++		    c->dst_type != dst_type || c->sid != sid)
++			continue;
++
++		rcu_read_unlock();
++		return c;
++	}
++
++	rcu_read_unlock();
++
++	return NULL;
++}
++
+ static inline struct hci_conn *
+ hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
+ 				 bdaddr_t *ba,
+@@ -1269,6 +1299,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
+ 	return NULL;
+ }
+ 
++static inline struct hci_conn *
++hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
++				   __u8 handle, __u8 num_bis)
++{
++	struct hci_conn_hash *h = &hdev->conn_hash;
++	struct hci_conn  *c;
++
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(c, &h->list, list) {
++		if (c->type != ISO_LINK)
++			continue;
++
++		if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) {
++			rcu_read_unlock();
++			return c;
++		}
++	}
++
++	rcu_read_unlock();
++
++	return NULL;
++}
++
+ static inline struct hci_conn *
+ hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle,  __u16 state)
+ {
+@@ -1328,6 +1382,13 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
+ 		if (c->type != ISO_LINK)
+ 			continue;
+ 
++		/* Ignore the listen hcon, we are looking
++		 * for the child hcon that was created as
++		 * a result of the PA sync established event.
++		 */
++		if (c->state == BT_LISTEN)
++			continue;
++
+ 		if (c->sync_handle == sync_handle) {
+ 			rcu_read_unlock();
+ 			return c;
+@@ -1445,6 +1506,8 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
+ void hci_sco_setup(struct hci_conn *conn, __u8 status);
+ bool hci_iso_setup_path(struct hci_conn *conn);
+ int hci_le_create_cis_pending(struct hci_dev *hdev);
++int hci_pa_create_sync_pending(struct hci_dev *hdev);
++int hci_le_big_create_sync_pending(struct hci_dev *hdev);
+ int hci_conn_check_create_cis(struct hci_conn *conn);
+ 
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+diff --git a/include/net/net_debug.h b/include/net/net_debug.h
+index 1e74684cbbdbcd..4a79204c8d306e 100644
+--- a/include/net/net_debug.h
++++ b/include/net/net_debug.h
+@@ -27,7 +27,7 @@ void netdev_info(const struct net_device *dev, const char *format, ...);
+ 
+ #define netdev_level_once(level, dev, fmt, ...)			\
+ do {								\
+-	static bool __section(".data.once") __print_once;	\
++	static bool __section(".data..once") __print_once;	\
+ 								\
+ 	if (!__print_once) {					\
+ 		__print_once = true;				\
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index aa8ede439905cb..67551133b5228e 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2948,6 +2948,14 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
+ 				      size_t length, u32 min_pgoff,
+ 				      u32 max_pgoff);
+ 
++#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
++void rdma_user_mmap_disassociate(struct ib_device *device);
++#else
++static inline void rdma_user_mmap_disassociate(struct ib_device *device)
++{
++}
++#endif
++
+ static inline int
+ rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
+ 				  struct rdma_user_mmap_entry *entry,
+@@ -4726,6 +4734,9 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
+  * @device:         the rdma device
+  */
+ void rdma_roce_rescan_device(struct ib_device *ibdev);
++void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port);
++void roce_del_all_netdev_gids(struct ib_device *ib_dev,
++			      u32 port, struct net_device *ndev);
+ 
+ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
+ 
+diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
+index 3b687d20c9ed34..db7254d52d9355 100644
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -174,7 +174,7 @@ enum {
+ #define RTM_GETLINKPROP	RTM_GETLINKPROP
+ 
+ 	RTM_NEWVLAN = 112,
+-#define RTM_NEWNVLAN	RTM_NEWVLAN
++#define RTM_NEWVLAN	RTM_NEWVLAN
+ 	RTM_DELVLAN,
+ #define RTM_DELVLAN	RTM_DELVLAN
+ 	RTM_GETVLAN,
+diff --git a/init/Kconfig b/init/Kconfig
+index c521e1421ad4ab..7256fa127530ff 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -120,6 +120,15 @@ config CC_HAS_ASM_INLINE
+ config CC_HAS_NO_PROFILE_FN_ATTR
+ 	def_bool $(success,echo '__attribute__((no_profile_instrument_function)) int x();' | $(CC) -x c - -c -o /dev/null -Werror)
+ 
++config CC_HAS_COUNTED_BY
++	# TODO: when gcc 15 is released remove the build test and add
++	# a gcc version check
++	def_bool $(success,echo 'struct flex { int count; int array[] __attribute__((__counted_by__(count))); };' | $(CC) $(CLANG_FLAGS) -x c - -c -o /dev/null -Werror)
++	# clang needs to be at least 19.1.3 to avoid __bdos miscalculations
++	# https://github.com/llvm/llvm-project/pull/110497
++	# https://github.com/llvm/llvm-project/pull/112636
++	depends on !(CC_IS_CLANG && CLANG_VERSION < 190103)
++
+ config PAHOLE_VERSION
+ 	int
+ 	default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE))
+diff --git a/init/initramfs.c b/init/initramfs.c
+index bc911e466d5bbb..b2f7583bb1f5c2 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -360,6 +360,15 @@ static int __init do_name(void)
+ {
+ 	state = SkipIt;
+ 	next_state = Reset;
++
++	/* name_len > 0 && name_len <= PATH_MAX checked in do_header */
++	if (collected[name_len - 1] != '\0') {
++		pr_err("initramfs name without nulterm: %.*s\n",
++		       (int)name_len, collected);
++		error("malformed archive");
++		return 1;
++	}
++
+ 	if (strcmp(collected, "TRAILER!!!") == 0) {
+ 		free_hash();
+ 		return 0;
+@@ -424,6 +433,12 @@ static int __init do_copy(void)
+ 
+ static int __init do_symlink(void)
+ {
++	if (collected[name_len - 1] != '\0') {
++		pr_err("initramfs symlink without nulterm: %.*s\n",
++		       (int)name_len, collected);
++		error("malformed archive");
++		return 1;
++	}
+ 	collected[N_ALIGN(name_len) + body_len] = '\0';
+ 	clean_path(collected, 0);
+ 	init_symlink(collected + N_ALIGN(name_len), collected);
+diff --git a/io_uring/memmap.c b/io_uring/memmap.c
+index a0f32a255fd1e1..6d151e46f3d69e 100644
+--- a/io_uring/memmap.c
++++ b/io_uring/memmap.c
+@@ -72,6 +72,8 @@ void *io_pages_map(struct page ***out_pages, unsigned short *npages,
+ 	ret = io_mem_alloc_compound(pages, nr_pages, size, gfp);
+ 	if (!IS_ERR(ret))
+ 		goto done;
++	if (nr_pages == 1)
++		goto fail;
+ 
+ 	ret = io_mem_alloc_single(pages, nr_pages, size, gfp);
+ 	if (!IS_ERR(ret)) {
+@@ -80,7 +82,7 @@ void *io_pages_map(struct page ***out_pages, unsigned short *npages,
+ 		*npages = nr_pages;
+ 		return ret;
+ 	}
+-
++fail:
+ 	kvfree(pages);
+ 	*out_pages = NULL;
+ 	*npages = 0;
+@@ -135,7 +137,12 @@ struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
+ 	struct page **pages;
+ 	int ret;
+ 
+-	end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	if (check_add_overflow(uaddr, len, &end))
++		return ERR_PTR(-EOVERFLOW);
++	if (check_add_overflow(end, PAGE_SIZE - 1, &end))
++		return ERR_PTR(-EOVERFLOW);
++
++	end = end >> PAGE_SHIFT;
+ 	start = uaddr >> PAGE_SHIFT;
+ 	nr_pages = end - start;
+ 	if (WARN_ON_ONCE(!nr_pages))
+diff --git a/ipc/namespace.c b/ipc/namespace.c
+index 6ecc30effd3ec6..4df91ceeeafe9f 100644
+--- a/ipc/namespace.c
++++ b/ipc/namespace.c
+@@ -83,13 +83,15 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
+ 
+ 	err = msg_init_ns(ns);
+ 	if (err)
+-		goto fail_put;
++		goto fail_ipc;
+ 
+ 	sem_init_ns(ns);
+ 	shm_init_ns(ns);
+ 
+ 	return ns;
+ 
++fail_ipc:
++	retire_ipc_sysctls(ns);
+ fail_mq:
+ 	retire_mq_sysctls(ns);
+ 
+diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
+index fda3dd2ee9844f..b3a2ce1e5e22ec 100644
+--- a/kernel/bpf/bpf_struct_ops.c
++++ b/kernel/bpf/bpf_struct_ops.c
+@@ -32,7 +32,9 @@ struct bpf_struct_ops_map {
+ 	 * (in kvalue.data).
+ 	 */
+ 	struct bpf_link **links;
+-	u32 links_cnt;
++	/* ksyms for bpf trampolines */
++	struct bpf_ksym **ksyms;
++	u32 funcs_cnt;
+ 	u32 image_pages_cnt;
+ 	/* image_pages is an array of pages that has all the trampolines
+ 	 * that stores the func args before calling the bpf_prog.
+@@ -481,11 +483,11 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
+ {
+ 	u32 i;
+ 
+-	for (i = 0; i < st_map->links_cnt; i++) {
+-		if (st_map->links[i]) {
+-			bpf_link_put(st_map->links[i]);
+-			st_map->links[i] = NULL;
+-		}
++	for (i = 0; i < st_map->funcs_cnt; i++) {
++		if (!st_map->links[i])
++			break;
++		bpf_link_put(st_map->links[i]);
++		st_map->links[i] = NULL;
+ 	}
+ }
+ 
+@@ -586,6 +588,49 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ 	return 0;
+ }
+ 
++static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
++				     void *image, unsigned int size,
++				     struct bpf_ksym *ksym)
++{
++	snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
++	INIT_LIST_HEAD_RCU(&ksym->lnode);
++	bpf_image_ksym_init(image, size, ksym);
++}
++
++static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
++{
++	u32 i;
++
++	for (i = 0; i < st_map->funcs_cnt; i++) {
++		if (!st_map->ksyms[i])
++			break;
++		bpf_image_ksym_add(st_map->ksyms[i]);
++	}
++}
++
++static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
++{
++	u32 i;
++
++	for (i = 0; i < st_map->funcs_cnt; i++) {
++		if (!st_map->ksyms[i])
++			break;
++		bpf_image_ksym_del(st_map->ksyms[i]);
++	}
++}
++
++static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
++{
++	u32 i;
++
++	for (i = 0; i < st_map->funcs_cnt; i++) {
++		if (!st_map->ksyms[i])
++			break;
++		kfree(st_map->ksyms[i]);
++		st_map->ksyms[i] = NULL;
++	}
++}
++
+ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ 					   void *value, u64 flags)
+ {
+@@ -601,6 +646,9 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ 	int prog_fd, err;
+ 	u32 i, trampoline_start, image_off = 0;
+ 	void *cur_image = NULL, *image = NULL;
++	struct bpf_link **plink;
++	struct bpf_ksym **pksym;
++	const char *tname, *mname;
+ 
+ 	if (flags)
+ 		return -EINVAL;
+@@ -639,14 +687,19 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ 	udata = &uvalue->data;
+ 	kdata = &kvalue->data;
+ 
++	plink = st_map->links;
++	pksym = st_map->ksyms;
++	tname = btf_name_by_offset(st_map->btf, t->name_off);
+ 	module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
+ 	for_each_member(i, t, member) {
+ 		const struct btf_type *mtype, *ptype;
+ 		struct bpf_prog *prog;
+ 		struct bpf_tramp_link *link;
++		struct bpf_ksym *ksym;
+ 		u32 moff;
+ 
+ 		moff = __btf_member_bit_offset(t, member) / 8;
++		mname = btf_name_by_offset(st_map->btf, member->name_off);
+ 		ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
+ 		if (ptype == module_type) {
+ 			if (*(void **)(udata + moff))
+@@ -714,7 +767,14 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ 		}
+ 		bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
+ 			      &bpf_struct_ops_link_lops, prog);
+-		st_map->links[i] = &link->link;
++		*plink++ = &link->link;
++
++		ksym = kzalloc(sizeof(*ksym), GFP_USER);
++		if (!ksym) {
++			err = -ENOMEM;
++			goto reset_unlock;
++		}
++		*pksym++ = ksym;
+ 
+ 		trampoline_start = image_off;
+ 		err = bpf_struct_ops_prepare_trampoline(tlinks, link,
+@@ -735,6 +795,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ 
+ 		/* put prog_id to udata */
+ 		*(unsigned long *)(udata + moff) = prog->aux->id;
++
++		/* init ksym for this trampoline */
++		bpf_struct_ops_ksym_init(tname, mname,
++					 image + trampoline_start,
++					 image_off - trampoline_start,
++					 ksym);
+ 	}
+ 
+ 	if (st_ops->validate) {
+@@ -783,6 +849,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ 	 */
+ 
+ reset_unlock:
++	bpf_struct_ops_map_free_ksyms(st_map);
+ 	bpf_struct_ops_map_free_image(st_map);
+ 	bpf_struct_ops_map_put_progs(st_map);
+ 	memset(uvalue, 0, map->value_size);
+@@ -790,6 +857,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ unlock:
+ 	kfree(tlinks);
+ 	mutex_unlock(&st_map->lock);
++	if (!err)
++		bpf_struct_ops_map_add_ksyms(st_map);
+ 	return err;
+ }
+ 
+@@ -849,7 +918,10 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map)
+ 
+ 	if (st_map->links)
+ 		bpf_struct_ops_map_put_progs(st_map);
++	if (st_map->ksyms)
++		bpf_struct_ops_map_free_ksyms(st_map);
+ 	bpf_map_area_free(st_map->links);
++	bpf_map_area_free(st_map->ksyms);
+ 	bpf_struct_ops_map_free_image(st_map);
+ 	bpf_map_area_free(st_map->uvalue);
+ 	bpf_map_area_free(st_map);
+@@ -866,6 +938,8 @@ static void bpf_struct_ops_map_free(struct bpf_map *map)
+ 	if (btf_is_module(st_map->btf))
+ 		module_put(st_map->st_ops_desc->st_ops->owner);
+ 
++	bpf_struct_ops_map_del_ksyms(st_map);
++
+ 	/* The struct_ops's function may switch to another struct_ops.
+ 	 *
+ 	 * For example, bpf_tcp_cc_x->init() may switch to
+@@ -895,6 +969,19 @@ static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
+ 	return 0;
+ }
+ 
++static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
++{
++	int i;
++	u32 count;
++	const struct btf_member *member;
++
++	count = 0;
++	for_each_member(i, t, member)
++		if (btf_type_resolve_func_ptr(btf, member->type, NULL))
++			count++;
++	return count;
++}
++
+ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
+ {
+ 	const struct bpf_struct_ops_desc *st_ops_desc;
+@@ -961,11 +1048,15 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
+ 	map = &st_map->map;
+ 
+ 	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
+-	st_map->links_cnt = btf_type_vlen(t);
++	st_map->funcs_cnt = count_func_ptrs(btf, t);
+ 	st_map->links =
+-		bpf_map_area_alloc(st_map->links_cnt * sizeof(struct bpf_links *),
++		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
++				   NUMA_NO_NODE);
++
++	st_map->ksyms =
++		bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
+ 				   NUMA_NO_NODE);
+-	if (!st_map->uvalue || !st_map->links) {
++	if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
+ 		ret = -ENOMEM;
+ 		goto errout_free;
+ 	}
+@@ -994,7 +1085,8 @@ static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
+ 	usage = sizeof(*st_map) +
+ 			vt->size - sizeof(struct bpf_struct_ops_value);
+ 	usage += vt->size;
+-	usage += btf_type_vlen(vt) * sizeof(struct bpf_links *);
++	usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
++	usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
+ 	usage += PAGE_SIZE;
+ 	return usage;
+ }
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 5cd1c7a23848cc..346826e3c933da 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -6564,7 +6564,10 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ 	if (prog_args_trusted(prog))
+ 		info->reg_type |= PTR_TRUSTED;
+ 
+-	if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
++	/* Raw tracepoint arguments always get marked as maybe NULL */
++	if (bpf_prog_is_raw_tp(prog))
++		info->reg_type |= PTR_MAYBE_NULL;
++	else if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
+ 		info->reg_type |= PTR_MAYBE_NULL;
+ 
+ 	if (tgt_prog) {
+diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c
+index 70fb82bf16370e..b77db7413f8c70 100644
+--- a/kernel/bpf/dispatcher.c
++++ b/kernel/bpf/dispatcher.c
+@@ -154,7 +154,8 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
+ 			d->image = NULL;
+ 			goto out;
+ 		}
+-		bpf_image_ksym_add(d->image, PAGE_SIZE, &d->ksym);
++		bpf_image_ksym_init(d->image, PAGE_SIZE, &d->ksym);
++		bpf_image_ksym_add(&d->ksym);
+ 	}
+ 
+ 	prev_num_progs = d->num_progs;
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index f8302a5ca400da..1166d9dd3e8b5d 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -115,10 +115,14 @@ bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
+ 		(ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
+ }
+ 
+-void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym)
++void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym)
+ {
+ 	ksym->start = (unsigned long) data;
+ 	ksym->end = ksym->start + size;
++}
++
++void bpf_image_ksym_add(struct bpf_ksym *ksym)
++{
+ 	bpf_ksym_add(ksym);
+ 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
+ 			   PAGE_SIZE, false, ksym->name);
+@@ -377,7 +381,8 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size)
+ 	ksym = &im->ksym;
+ 	INIT_LIST_HEAD_RCU(&ksym->lnode);
+ 	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
+-	bpf_image_ksym_add(image, size, ksym);
++	bpf_image_ksym_init(image, size, ksym);
++	bpf_image_ksym_add(ksym);
+ 	return im;
+ 
+ out_free_image:
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index bb99bada7e2ed2..91317857ea3ee5 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -418,6 +418,25 @@ static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
+ 	return rec;
+ }
+ 
++static bool mask_raw_tp_reg_cond(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) {
++	return reg->type == (PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL) &&
++	       bpf_prog_is_raw_tp(env->prog) && !reg->ref_obj_id;
++}
++
++static bool mask_raw_tp_reg(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
++{
++	if (!mask_raw_tp_reg_cond(env, reg))
++		return false;
++	reg->type &= ~PTR_MAYBE_NULL;
++	return true;
++}
++
++static void unmask_raw_tp_reg(struct bpf_reg_state *reg, bool result)
++{
++	if (result)
++		reg->type |= PTR_MAYBE_NULL;
++}
++
+ static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
+ {
+ 	struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
+@@ -6595,6 +6614,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 	const char *field_name = NULL;
+ 	enum bpf_type_flag flag = 0;
+ 	u32 btf_id = 0;
++	bool mask;
+ 	int ret;
+ 
+ 	if (!env->allow_ptr_leaks) {
+@@ -6666,7 +6686,21 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 
+ 	if (ret < 0)
+ 		return ret;
+-
++	/* For raw_tp progs, we allow dereference of PTR_MAYBE_NULL
++	 * trusted PTR_TO_BTF_ID, these are the ones that are possibly
++	 * arguments to the raw_tp. Since internal checks in for trusted
++	 * reg in check_ptr_to_btf_access would consider PTR_MAYBE_NULL
++	 * modifier as problematic, mask it out temporarily for the
++	 * check. Don't apply this to pointers with ref_obj_id > 0, as
++	 * those won't be raw_tp args.
++	 *
++	 * We may end up applying this relaxation to other trusted
++	 * PTR_TO_BTF_ID with maybe null flag, since we cannot
++	 * distinguish PTR_MAYBE_NULL tagged for arguments vs normal
++	 * tagging, but that should expand allowed behavior, and not
++	 * cause regression for existing behavior.
++	 */
++	mask = mask_raw_tp_reg(env, reg);
+ 	if (ret != PTR_TO_BTF_ID) {
+ 		/* just mark; */
+ 
+@@ -6727,8 +6761,13 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 		clear_trusted_flags(&flag);
+ 	}
+ 
+-	if (atype == BPF_READ && value_regno >= 0)
++	if (atype == BPF_READ && value_regno >= 0) {
+ 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
++		/* We've assigned a new type to regno, so don't undo masking. */
++		if (regno == value_regno)
++			mask = false;
++	}
++	unmask_raw_tp_reg(reg, mask);
+ 
+ 	return 0;
+ }
+@@ -7103,7 +7142,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ 		if (!err && t == BPF_READ && value_regno >= 0)
+ 			mark_reg_unknown(env, regs, value_regno);
+ 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
+-		   !type_may_be_null(reg->type)) {
++		   (mask_raw_tp_reg_cond(env, reg) || !type_may_be_null(reg->type))) {
+ 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
+ 					      value_regno);
+ 	} else if (reg->type == CONST_PTR_TO_MAP) {
+@@ -8796,6 +8835,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ 	enum bpf_reg_type type = reg->type;
+ 	u32 *arg_btf_id = NULL;
+ 	int err = 0;
++	bool mask;
+ 
+ 	if (arg_type == ARG_DONTCARE)
+ 		return 0;
+@@ -8836,11 +8876,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ 	    base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
+ 		arg_btf_id = fn->arg_btf_id[arg];
+ 
++	mask = mask_raw_tp_reg(env, reg);
+ 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
+-	if (err)
+-		return err;
+ 
+-	err = check_func_arg_reg_off(env, reg, regno, arg_type);
++	err = err ?: check_func_arg_reg_off(env, reg, regno, arg_type);
++	unmask_raw_tp_reg(reg, mask);
+ 	if (err)
+ 		return err;
+ 
+@@ -9635,14 +9675,17 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
+ 				return ret;
+ 		} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
+ 			struct bpf_call_arg_meta meta;
++			bool mask;
+ 			int err;
+ 
+ 			if (register_is_null(reg) && type_may_be_null(arg->arg_type))
+ 				continue;
+ 
+ 			memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
++			mask = mask_raw_tp_reg(env, reg);
+ 			err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta);
+ 			err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type);
++			unmask_raw_tp_reg(reg, mask);
+ 			if (err)
+ 				return err;
+ 		} else {
+@@ -10583,11 +10626,26 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ 
+ 	switch (func_id) {
+ 	case BPF_FUNC_tail_call:
++		if (env->cur_state->active_lock.ptr) {
++			verbose(env, "tail_call cannot be used inside bpf_spin_lock-ed region\n");
++			return -EINVAL;
++		}
++
+ 		err = check_reference_leak(env, false);
+ 		if (err) {
+ 			verbose(env, "tail_call would lead to reference leak\n");
+ 			return err;
+ 		}
++
++		if (env->cur_state->active_rcu_lock) {
++			verbose(env, "tail_call cannot be used inside bpf_rcu_read_lock-ed region\n");
++			return -EINVAL;
++		}
++
++		if (env->cur_state->active_preempt_lock) {
++			verbose(env, "tail_call cannot be used inside bpf_preempt_disable-ed region\n");
++			return -EINVAL;
++		}
+ 		break;
+ 	case BPF_FUNC_get_local_storage:
+ 		/* check that flags argument in get_local_storage(map, flags) is 0,
+@@ -11942,6 +12000,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 		enum bpf_arg_type arg_type = ARG_DONTCARE;
+ 		u32 regno = i + 1, ref_id, type_size;
+ 		bool is_ret_buf_sz = false;
++		bool mask = false;
+ 		int kf_arg_type;
+ 
+ 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
+@@ -12000,12 +12059,15 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			return -EINVAL;
+ 		}
+ 
++		mask = mask_raw_tp_reg(env, reg);
+ 		if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) &&
+ 		    (register_is_null(reg) || type_may_be_null(reg->type)) &&
+ 			!is_kfunc_arg_nullable(meta->btf, &args[i])) {
+ 			verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
++			unmask_raw_tp_reg(reg, mask);
+ 			return -EACCES;
+ 		}
++		unmask_raw_tp_reg(reg, mask);
+ 
+ 		if (reg->ref_obj_id) {
+ 			if (is_kfunc_release(meta) && meta->ref_obj_id) {
+@@ -12063,16 +12125,24 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
+ 				break;
+ 
++			/* Allow passing maybe NULL raw_tp arguments to
++			 * kfuncs for compatibility. Don't apply this to
++			 * arguments with ref_obj_id > 0.
++			 */
++			mask = mask_raw_tp_reg(env, reg);
+ 			if (!is_trusted_reg(reg)) {
+ 				if (!is_kfunc_rcu(meta)) {
+ 					verbose(env, "R%d must be referenced or trusted\n", regno);
++					unmask_raw_tp_reg(reg, mask);
+ 					return -EINVAL;
+ 				}
+ 				if (!is_rcu_reg(reg)) {
+ 					verbose(env, "R%d must be a rcu pointer\n", regno);
++					unmask_raw_tp_reg(reg, mask);
+ 					return -EINVAL;
+ 				}
+ 			}
++			unmask_raw_tp_reg(reg, mask);
+ 			fallthrough;
+ 		case KF_ARG_PTR_TO_CTX:
+ 		case KF_ARG_PTR_TO_DYNPTR:
+@@ -12095,7 +12165,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 
+ 		if (is_kfunc_release(meta) && reg->ref_obj_id)
+ 			arg_type |= OBJ_RELEASE;
++		mask = mask_raw_tp_reg(env, reg);
+ 		ret = check_func_arg_reg_off(env, reg, regno, arg_type);
++		unmask_raw_tp_reg(reg, mask);
+ 		if (ret < 0)
+ 			return ret;
+ 
+@@ -12272,6 +12344,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			ref_tname = btf_name_by_offset(btf, ref_t->name_off);
+ 			fallthrough;
+ 		case KF_ARG_PTR_TO_BTF_ID:
++			mask = mask_raw_tp_reg(env, reg);
+ 			/* Only base_type is checked, further checks are done here */
+ 			if ((base_type(reg->type) != PTR_TO_BTF_ID ||
+ 			     (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
+@@ -12280,9 +12353,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 				verbose(env, "expected %s or socket\n",
+ 					reg_type_str(env, base_type(reg->type) |
+ 							  (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
++				unmask_raw_tp_reg(reg, mask);
+ 				return -EINVAL;
+ 			}
+ 			ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
++			unmask_raw_tp_reg(reg, mask);
+ 			if (ret < 0)
+ 				return ret;
+ 			break;
+@@ -13252,7 +13327,7 @@ static int sanitize_check_bounds(struct bpf_verifier_env *env,
+  */
+ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 				   struct bpf_insn *insn,
+-				   const struct bpf_reg_state *ptr_reg,
++				   struct bpf_reg_state *ptr_reg,
+ 				   const struct bpf_reg_state *off_reg)
+ {
+ 	struct bpf_verifier_state *vstate = env->cur_state;
+@@ -13266,6 +13341,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	struct bpf_sanitize_info info = {};
+ 	u8 opcode = BPF_OP(insn->code);
+ 	u32 dst = insn->dst_reg;
++	bool mask;
+ 	int ret;
+ 
+ 	dst_reg = &regs[dst];
+@@ -13292,11 +13368,14 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 		return -EACCES;
+ 	}
+ 
++	mask = mask_raw_tp_reg(env, ptr_reg);
+ 	if (ptr_reg->type & PTR_MAYBE_NULL) {
+ 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
+ 			dst, reg_type_str(env, ptr_reg->type));
++		unmask_raw_tp_reg(ptr_reg, mask);
+ 		return -EACCES;
+ 	}
++	unmask_raw_tp_reg(ptr_reg, mask);
+ 
+ 	switch (base_type(ptr_reg->type)) {
+ 	case PTR_TO_CTX:
+@@ -15909,6 +15988,15 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
+ 			return -ENOTSUPP;
+ 		}
+ 		break;
++	case BPF_PROG_TYPE_KPROBE:
++		switch (env->prog->expected_attach_type) {
++		case BPF_TRACE_KPROBE_SESSION:
++			range = retval_range(0, 1);
++			break;
++		default:
++			return 0;
++		}
++		break;
+ 	case BPF_PROG_TYPE_SK_LOOKUP:
+ 		range = retval_range(SK_DROP, SK_PASS);
+ 		break;
+@@ -19837,6 +19925,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
+ 		 * for this case.
+ 		 */
+ 		case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
++		case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL:
+ 			if (type == BPF_READ) {
+ 				if (BPF_MODE(insn->code) == BPF_MEM)
+ 					insn->code = BPF_LDX | BPF_PROBE_MEM |
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 044c7ba1cc482b..e275eaf2de7f8f 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2140,8 +2140,10 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
+ 	if (ret)
+ 		goto exit_stats;
+ 
+-	ret = cgroup_bpf_inherit(root_cgrp);
+-	WARN_ON_ONCE(ret);
++	if (root == &cgrp_dfl_root) {
++		ret = cgroup_bpf_inherit(root_cgrp);
++		WARN_ON_ONCE(ret);
++	}
+ 
+ 	trace_cgroup_setup_root(root);
+ 
+@@ -2314,10 +2316,8 @@ static void cgroup_kill_sb(struct super_block *sb)
+ 	 * And don't kill the default root.
+ 	 */
+ 	if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
+-	    !percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
+-		cgroup_bpf_offline(&root->cgrp);
++	    !percpu_ref_is_dying(&root->cgrp.self.refcnt))
+ 		percpu_ref_kill(&root->cgrp.self.refcnt);
+-	}
+ 	cgroup_put(&root->cgrp);
+ 	kernfs_kill_sb(sb);
+ }
+@@ -5710,9 +5710,11 @@ static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
+ 	if (ret)
+ 		goto out_kernfs_remove;
+ 
+-	ret = cgroup_bpf_inherit(cgrp);
+-	if (ret)
+-		goto out_psi_free;
++	if (cgrp->root == &cgrp_dfl_root) {
++		ret = cgroup_bpf_inherit(cgrp);
++		if (ret)
++			goto out_psi_free;
++	}
+ 
+ 	/*
+ 	 * New cgroup inherits effective freeze counter, and
+@@ -6026,7 +6028,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
+ 
+ 	cgroup1_check_for_release(parent);
+ 
+-	cgroup_bpf_offline(cgrp);
++	if (cgrp->root == &cgrp_dfl_root)
++		cgroup_bpf_offline(cgrp);
+ 
+ 	/* put the base reference */
+ 	percpu_ref_kill(&cgrp->self.refcnt);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 22f43721d031d4..ce8be55e5e04b3 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -622,6 +622,12 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
+ 
+ 	exe_file = get_mm_exe_file(oldmm);
+ 	RCU_INIT_POINTER(mm->exe_file, exe_file);
++	/*
++	 * We depend on the oldmm having properly denied write access to the
++	 * exe_file already.
++	 */
++	if (exe_file && deny_write_access(exe_file))
++		pr_warn_once("deny_write_access() failed in %s\n", __func__);
+ }
+ 
+ #ifdef CONFIG_MMU
+@@ -1414,11 +1420,20 @@ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
+ 	 */
+ 	old_exe_file = rcu_dereference_raw(mm->exe_file);
+ 
+-	if (new_exe_file)
++	if (new_exe_file) {
++		/*
++		 * We expect the caller (i.e., sys_execve) to already denied
++		 * write access, so this is unlikely to fail.
++		 */
++		if (unlikely(deny_write_access(new_exe_file)))
++			return -EACCES;
+ 		get_file(new_exe_file);
++	}
+ 	rcu_assign_pointer(mm->exe_file, new_exe_file);
+-	if (old_exe_file)
++	if (old_exe_file) {
++		allow_write_access(old_exe_file);
+ 		fput(old_exe_file);
++	}
+ 	return 0;
+ }
+ 
+@@ -1457,6 +1472,9 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
+ 			return ret;
+ 	}
+ 
++	ret = deny_write_access(new_exe_file);
++	if (ret)
++		return -EACCES;
+ 	get_file(new_exe_file);
+ 
+ 	/* set the new file */
+@@ -1465,8 +1483,10 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
+ 	rcu_assign_pointer(mm->exe_file, new_exe_file);
+ 	mmap_write_unlock(mm);
+ 
+-	if (old_exe_file)
++	if (old_exe_file) {
++		allow_write_access(old_exe_file);
+ 		fput(old_exe_file);
++	}
+ 	return 0;
+ }
+ 
+diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
+index 6d37596deb1f12..d360fa44b234db 100644
+--- a/kernel/rcu/rcuscale.c
++++ b/kernel/rcu/rcuscale.c
+@@ -890,13 +890,15 @@ kfree_scale_init(void)
+ 		if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start < 2 * HZ)) {
+ 			pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n");
+ 			WARN_ON_ONCE(1);
+-			return -1;
++			firsterr = -1;
++			goto unwind;
+ 		}
+ 
+ 		if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start > 3 * HZ)) {
+ 			pr_alert("ERROR: call_rcu() CBs are being too lazy!\n");
+ 			WARN_ON_ONCE(1);
+-			return -1;
++			firsterr = -1;
++			goto unwind;
+ 		}
+ 	}
+ 
+diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
+index 549c03336ee971..4dcbf8aa80ff73 100644
+--- a/kernel/rcu/srcutiny.c
++++ b/kernel/rcu/srcutiny.c
+@@ -122,8 +122,8 @@ void srcu_drive_gp(struct work_struct *wp)
+ 	ssp = container_of(wp, struct srcu_struct, srcu_work);
+ 	preempt_disable();  // Needed for PREEMPT_AUTO
+ 	if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) {
+-		return; /* Already running or nothing to do. */
+ 		preempt_enable();
++		return; /* Already running or nothing to do. */
+ 	}
+ 
+ 	/* Remove recently arrived callbacks and wait for readers. */
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index b1f883fcd9185a..3e486ccaa4ca34 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3511,7 +3511,7 @@ static int krc_count(struct kfree_rcu_cpu *krcp)
+ }
+ 
+ static void
+-schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
++__schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
+ {
+ 	long delay, delay_left;
+ 
+@@ -3525,6 +3525,16 @@ schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
+ 	queue_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
+ }
+ 
++static void
++schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
++{
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&krcp->lock, flags);
++	__schedule_delayed_monitor_work(krcp);
++	raw_spin_unlock_irqrestore(&krcp->lock, flags);
++}
++
+ static void
+ kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
+ {
+@@ -3836,7 +3846,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
+ 
+ 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
+ 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
+-		schedule_delayed_monitor_work(krcp);
++		__schedule_delayed_monitor_work(krcp);
+ 
+ unlock_return:
+ 	krc_this_cpu_unlock(krcp, flags);
+diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
+index 16865475120ba3..2605dd234a13c8 100644
+--- a/kernel/rcu/tree_nocb.h
++++ b/kernel/rcu/tree_nocb.h
+@@ -891,7 +891,18 @@ static void nocb_cb_wait(struct rcu_data *rdp)
+ 	swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
+ 					    nocb_cb_wait_cond(rdp));
+ 	if (kthread_should_park()) {
+-		kthread_parkme();
++		/*
++		 * kthread_park() must be preceded by an rcu_barrier().
++		 * But yet another rcu_barrier() might have sneaked in between
++		 * the barrier callback execution and the callbacks counter
++		 * decrement.
++		 */
++		if (rdp->nocb_cb_sleep) {
++			rcu_nocb_lock_irqsave(rdp, flags);
++			WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
++			rcu_nocb_unlock_irqrestore(rdp, flags);
++			kthread_parkme();
++		}
+ 	} else if (READ_ONCE(rdp->nocb_cb_sleep)) {
+ 		WARN_ON(signal_pending(current));
+ 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index c6ba15388ea706..28c77904ea749f 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -783,9 +783,8 @@ static int sugov_init(struct cpufreq_policy *policy)
+ 	if (ret)
+ 		goto fail;
+ 
+-	sugov_eas_rebuild_sd();
+-
+ out:
++	sugov_eas_rebuild_sd();
+ 	mutex_unlock(&global_tunables_lock);
+ 	return 0;
+ 
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 751d73d500e51d..16613631543f18 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -3567,12 +3567,7 @@ static void scx_ops_exit_task(struct task_struct *p)
+ 
+ void init_scx_entity(struct sched_ext_entity *scx)
+ {
+-	/*
+-	 * init_idle() calls this function again after fork sequence is
+-	 * complete. Don't touch ->tasks_node as it's already linked.
+-	 */
+-	memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
+-
++	memset(scx, 0, sizeof(*scx));
+ 	INIT_LIST_HEAD(&scx->dsq_list.node);
+ 	RB_CLEAR_NODE(&scx->dsq_priq);
+ 	scx->sticky_cpu = -1;
+@@ -6478,6 +6473,8 @@ __bpf_kfunc_end_defs();
+ 
+ BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
+ BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
++BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
++BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
+ BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
+ BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
+ BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
+diff --git a/kernel/time/time.c b/kernel/time/time.c
+index 642647f5046be0..1ad88e97b4ebcf 100644
+--- a/kernel/time/time.c
++++ b/kernel/time/time.c
+@@ -556,9 +556,9 @@ EXPORT_SYMBOL(ns_to_timespec64);
+  * - all other values are converted to jiffies by either multiplying
+  *   the input value by a factor or dividing it with a factor and
+  *   handling any 32-bit overflows.
+- *   for the details see __msecs_to_jiffies()
++ *   for the details see _msecs_to_jiffies()
+  *
+- * __msecs_to_jiffies() checks for the passed in value being a constant
++ * msecs_to_jiffies() checks for the passed in value being a constant
+  * via __builtin_constant_p() allowing gcc to eliminate most of the
+  * code, __msecs_to_jiffies() is called if the value passed does not
+  * allow constant folding and the actual conversion must be done at
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 0fc9d066a7be46..7835f9b376e76a 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -2422,7 +2422,8 @@ static inline void __run_timers(struct timer_base *base)
+ 
+ static void __run_timer_base(struct timer_base *base)
+ {
+-	if (time_before(jiffies, base->next_expiry))
++	/* Can race against a remote CPU updating next_expiry under the lock */
++	if (time_before(jiffies, READ_ONCE(base->next_expiry)))
+ 		return;
+ 
+ 	timer_base_lock_expiry(base);
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 630b763e52402f..792dc35414a3c3 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -3205,7 +3205,6 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+ 	struct bpf_prog *prog = link->link.prog;
+ 	bool sleepable = prog->sleepable;
+ 	struct bpf_run_ctx *old_run_ctx;
+-	int err = 0;
+ 
+ 	if (link->task && !same_thread_group(current, link->task))
+ 		return 0;
+@@ -3218,7 +3217,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+ 	migrate_disable();
+ 
+ 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+-	err = bpf_prog_run(link->link.prog, regs);
++	bpf_prog_run(link->link.prog, regs);
+ 	bpf_reset_run_ctx(old_run_ctx);
+ 
+ 	migrate_enable();
+@@ -3227,7 +3226,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+ 		rcu_read_unlock_trace();
+ 	else
+ 		rcu_read_unlock();
+-	return err;
++	return 0;
+ }
+ 
+ static bool
+diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
+index 05e7912418126b..3ff9caa4a71bbd 100644
+--- a/kernel/trace/trace_event_perf.c
++++ b/kernel/trace/trace_event_perf.c
+@@ -352,10 +352,16 @@ void perf_uprobe_destroy(struct perf_event *p_event)
+ int perf_trace_add(struct perf_event *p_event, int flags)
+ {
+ 	struct trace_event_call *tp_event = p_event->tp_event;
++	struct hw_perf_event *hwc = &p_event->hw;
+ 
+ 	if (!(flags & PERF_EF_START))
+ 		p_event->hw.state = PERF_HES_STOPPED;
+ 
++	if (is_sampling_event(p_event)) {
++		hwc->last_period = hwc->sample_period;
++		perf_swevent_set_period(p_event);
++	}
++
+ 	/*
+ 	 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
+ 	 * and we need to take the default action of enqueueing our event on
+diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
+index 2abc78367dd110..5222c6393f1168 100644
+--- a/lib/overflow_kunit.c
++++ b/lib/overflow_kunit.c
+@@ -1187,7 +1187,7 @@ static void DEFINE_FLEX_test(struct kunit *test)
+ {
+ 	/* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ 	DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+-#if __has_attribute(__counted_by__)
++#ifdef CONFIG_CC_HAS_COUNTED_BY
+ 	int expected_raw_size = sizeof(struct foo);
+ #else
+ 	int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
+diff --git a/lib/string_helpers.c b/lib/string_helpers.c
+index 4f887aa62fa0cd..91fa37b5c510a7 100644
+--- a/lib/string_helpers.c
++++ b/lib/string_helpers.c
+@@ -57,7 +57,7 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ 	static const unsigned int rounding[] = { 500, 50, 5 };
+ 	int i = 0, j;
+ 	u32 remainder = 0, sf_cap;
+-	char tmp[8];
++	char tmp[12];
+ 	const char *unit;
+ 
+ 	tmp[0] = '\0';
+diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
+index 989a12a6787214..6dc234913dd58e 100644
+--- a/lib/strncpy_from_user.c
++++ b/lib/strncpy_from_user.c
+@@ -120,6 +120,9 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
+ 	if (unlikely(count <= 0))
+ 		return 0;
+ 
++	kasan_check_write(dst, count);
++	check_object_size(dst, count, false);
++
+ 	if (can_do_masked_user_access()) {
+ 		long retval;
+ 
+@@ -142,8 +145,6 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
+ 		if (max > count)
+ 			max = count;
+ 
+-		kasan_check_write(dst, count);
+-		check_object_size(dst, count, false);
+ 		if (user_read_access_begin(src, max)) {
+ 			retval = do_strncpy_from_user(dst, src, count, max);
+ 			user_read_access_end();
+diff --git a/mm/internal.h b/mm/internal.h
+index 64c2eb0b160e16..9bb098e78f1556 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -48,7 +48,7 @@ struct folio_batch;
+  * when we specify __GFP_NOWARN.
+  */
+ #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
+-	static bool __section(".data.once") __warned;			\
++	static bool __section(".data..once") __warned;			\
+ 	int __ret_warn_once = !!(cond);					\
+ 									\
+ 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
+diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
+index 975b76839dca1a..6b694f117aef29 100644
+--- a/net/9p/trans_usbg.c
++++ b/net/9p/trans_usbg.c
+@@ -909,9 +909,9 @@ static struct usb_function_instance *usb9pfs_alloc_instance(void)
+ 	usb9pfs_opts->buflen = DEFAULT_BUFLEN;
+ 
+ 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+-	if (IS_ERR(dev)) {
++	if (!dev) {
+ 		kfree(usb9pfs_opts);
+-		return ERR_CAST(dev);
++		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+ 	usb9pfs_opts->dev = dev;
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index dfdbe1ca533872..b9ff69c7522a19 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -286,7 +286,7 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
+ 		if (!priv->rings[i].intf)
+ 			break;
+ 		if (priv->rings[i].irq > 0)
+-			unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
++			unbind_from_irqhandler(priv->rings[i].irq, ring);
+ 		if (priv->rings[i].data.in) {
+ 			for (j = 0;
+ 			     j < (1 << priv->rings[i].intf->ring_order);
+@@ -465,6 +465,7 @@ static int xen_9pfs_front_init(struct xenbus_device *dev)
+ 		goto error;
+ 	}
+ 
++	xenbus_switch_state(dev, XenbusStateInitialised);
+ 	return 0;
+ 
+  error_xenbus:
+@@ -512,8 +513,10 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev,
+ 		break;
+ 
+ 	case XenbusStateInitWait:
+-		if (!xen_9pfs_front_init(dev))
+-			xenbus_switch_state(dev, XenbusStateInitialised);
++		if (dev->state != XenbusStateInitialising)
++			break;
++
++		xen_9pfs_front_init(dev);
+ 		break;
+ 
+ 	case XenbusStateConnected:
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index c4c74b82ed211c..6354cdf9c2b372 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -952,6 +952,7 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t
+ 	conn->tx_power = HCI_TX_POWER_INVALID;
+ 	conn->max_tx_power = HCI_TX_POWER_INVALID;
+ 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
++	conn->sid = HCI_SID_INVALID;
+ 
+ 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
+ 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+@@ -2062,105 +2063,225 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
+ 
+ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+ {
+-	struct hci_cp_le_pa_create_sync *cp = data;
+-
+ 	bt_dev_dbg(hdev, "");
+ 
+ 	if (err)
+ 		bt_dev_err(hdev, "Unable to create PA: %d", err);
++}
+ 
+-	kfree(cp);
++static bool hci_conn_check_create_pa_sync(struct hci_conn *conn)
++{
++	if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID)
++		return false;
++
++	return true;
+ }
+ 
+ static int create_pa_sync(struct hci_dev *hdev, void *data)
+ {
+-	struct hci_cp_le_pa_create_sync *cp = data;
+-	int err;
++	struct hci_cp_le_pa_create_sync *cp = NULL;
++	struct hci_conn *conn;
++	int err = 0;
+ 
+-	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
+-				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
+-	if (err) {
+-		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+-		return err;
++	hci_dev_lock(hdev);
++
++	rcu_read_lock();
++
++	/* The spec allows only one pending LE Periodic Advertising Create
++	 * Sync command at a time. If the command is pending now, don't do
++	 * anything. We check for pending connections after each PA Sync
++	 * Established event.
++	 *
++	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
++	 * page 2493:
++	 *
++	 * If the Host issues this command when another HCI_LE_Periodic_
++	 * Advertising_Create_Sync command is pending, the Controller shall
++	 * return the error code Command Disallowed (0x0C).
++	 */
++	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
++		if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags))
++			goto unlock;
++	}
++
++	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
++		if (hci_conn_check_create_pa_sync(conn)) {
++			struct bt_iso_qos *qos = &conn->iso_qos;
++
++			cp = kzalloc(sizeof(*cp), GFP_KERNEL);
++			if (!cp) {
++				err = -ENOMEM;
++				goto unlock;
++			}
++
++			cp->options = qos->bcast.options;
++			cp->sid = conn->sid;
++			cp->addr_type = conn->dst_type;
++			bacpy(&cp->addr, &conn->dst);
++			cp->skip = cpu_to_le16(qos->bcast.skip);
++			cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
++			cp->sync_cte_type = qos->bcast.sync_cte_type;
++
++			break;
++		}
+ 	}
+ 
+-	return hci_update_passive_scan_sync(hdev);
++unlock:
++	rcu_read_unlock();
++
++	hci_dev_unlock(hdev);
++
++	if (cp) {
++		hci_dev_set_flag(hdev, HCI_PA_SYNC);
++		set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
++
++		err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
++					    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
++		if (!err)
++			err = hci_update_passive_scan_sync(hdev);
++
++		kfree(cp);
++
++		if (err) {
++			hci_dev_clear_flag(hdev, HCI_PA_SYNC);
++			clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
++		}
++	}
++
++	return err;
++}
++
++int hci_pa_create_sync_pending(struct hci_dev *hdev)
++{
++	/* Queue start pa_create_sync and scan */
++	return hci_cmd_sync_queue(hdev, create_pa_sync,
++				  NULL, create_pa_complete);
+ }
+ 
+ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ 				    __u8 dst_type, __u8 sid,
+ 				    struct bt_iso_qos *qos)
+ {
+-	struct hci_cp_le_pa_create_sync *cp;
+ 	struct hci_conn *conn;
+-	int err;
+-
+-	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
+-		return ERR_PTR(-EBUSY);
+ 
+ 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
+ 	if (IS_ERR(conn))
+ 		return conn;
+ 
+ 	conn->iso_qos = *qos;
++	conn->dst_type = dst_type;
++	conn->sid = sid;
+ 	conn->state = BT_LISTEN;
+ 
+ 	hci_conn_hold(conn);
+ 
+-	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+-	if (!cp) {
+-		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+-		hci_conn_drop(conn);
+-		return ERR_PTR(-ENOMEM);
++	hci_pa_create_sync_pending(hdev);
++
++	return conn;
++}
++
++static bool hci_conn_check_create_big_sync(struct hci_conn *conn)
++{
++	if (!conn->num_bis)
++		return false;
++
++	return true;
++}
++
++static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err)
++{
++	bt_dev_dbg(hdev, "");
++
++	if (err)
++		bt_dev_err(hdev, "Unable to create BIG sync: %d", err);
++}
++
++static int big_create_sync(struct hci_dev *hdev, void *data)
++{
++	DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
++	struct hci_conn *conn;
++
++	rcu_read_lock();
++
++	pdu->num_bis = 0;
++
++	/* The spec allows only one pending LE BIG Create Sync command at
++	 * a time. If the command is pending now, don't do anything. We
++	 * check for pending connections after each BIG Sync Established
++	 * event.
++	 *
++	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
++	 * page 2586:
++	 *
++	 * If the Host sends this command when the Controller is in the
++	 * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
++	 * Established event has not been generated, the Controller shall
++	 * return the error code Command Disallowed (0x0C).
++	 */
++	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
++		if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags))
++			goto unlock;
+ 	}
+ 
+-	cp->options = qos->bcast.options;
+-	cp->sid = sid;
+-	cp->addr_type = dst_type;
+-	bacpy(&cp->addr, dst);
+-	cp->skip = cpu_to_le16(qos->bcast.skip);
+-	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
+-	cp->sync_cte_type = qos->bcast.sync_cte_type;
++	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
++		if (hci_conn_check_create_big_sync(conn)) {
++			struct bt_iso_qos *qos = &conn->iso_qos;
+ 
+-	/* Queue start pa_create_sync and scan */
+-	err = hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
+-	if (err < 0) {
+-		hci_conn_drop(conn);
+-		kfree(cp);
+-		return ERR_PTR(err);
++			set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
++
++			pdu->handle = qos->bcast.big;
++			pdu->sync_handle = cpu_to_le16(conn->sync_handle);
++			pdu->encryption = qos->bcast.encryption;
++			memcpy(pdu->bcode, qos->bcast.bcode,
++			       sizeof(pdu->bcode));
++			pdu->mse = qos->bcast.mse;
++			pdu->timeout = cpu_to_le16(qos->bcast.timeout);
++			pdu->num_bis = conn->num_bis;
++			memcpy(pdu->bis, conn->bis, conn->num_bis);
++
++			break;
++		}
+ 	}
+ 
+-	return conn;
++unlock:
++	rcu_read_unlock();
++
++	if (!pdu->num_bis)
++		return 0;
++
++	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
++			    struct_size(pdu, bis, pdu->num_bis), pdu);
++}
++
++int hci_le_big_create_sync_pending(struct hci_dev *hdev)
++{
++	/* Queue big_create_sync */
++	return hci_cmd_sync_queue_once(hdev, big_create_sync,
++				       NULL, big_create_sync_complete);
+ }
+ 
+ int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ 			   struct bt_iso_qos *qos,
+ 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
+ {
+-	DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
+ 	int err;
+ 
+-	if (num_bis < 0x01 || num_bis > pdu->num_bis)
++	if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS)
+ 		return -EINVAL;
+ 
+ 	err = qos_set_big(hdev, qos);
+ 	if (err)
+ 		return err;
+ 
+-	if (hcon)
+-		hcon->iso_qos.bcast.big = qos->bcast.big;
++	if (hcon) {
++		/* Update hcon QoS */
++		hcon->iso_qos = *qos;
+ 
+-	pdu->handle = qos->bcast.big;
+-	pdu->sync_handle = cpu_to_le16(sync_handle);
+-	pdu->encryption = qos->bcast.encryption;
+-	memcpy(pdu->bcode, qos->bcast.bcode, sizeof(pdu->bcode));
+-	pdu->mse = qos->bcast.mse;
+-	pdu->timeout = cpu_to_le16(qos->bcast.timeout);
+-	pdu->num_bis = num_bis;
+-	memcpy(pdu->bis, bis, num_bis);
++		hcon->num_bis = num_bis;
++		memcpy(hcon->bis, bis, num_bis);
++	}
+ 
+-	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
+-			    struct_size(pdu, bis, num_bis), pdu);
++	return hci_le_big_create_sync_pending(hdev);
+ }
+ 
+ static void create_big_complete(struct hci_dev *hdev, void *data, int err)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 0bbad90ddd6f87..2e4bd3e961ce09 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6345,7 +6345,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ 	struct hci_ev_le_pa_sync_established *ev = data;
+ 	int mask = hdev->link_mode;
+ 	__u8 flags = 0;
+-	struct hci_conn *pa_sync;
++	struct hci_conn *pa_sync, *conn;
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ 
+@@ -6353,6 +6353,20 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ 
+ 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+ 
++	conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr,
++					ev->bdaddr_type);
++	if (!conn) {
++		bt_dev_err(hdev,
++			   "Unable to find connection for dst %pMR sid 0x%2.2x",
++			   &ev->bdaddr, ev->sid);
++		goto unlock;
++	}
++
++	clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
++
++	conn->sync_handle = le16_to_cpu(ev->handle);
++	conn->sid = HCI_SID_INVALID;
++
+ 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
+ 	if (!(mask & HCI_LM_ACCEPT)) {
+ 		hci_le_pa_term_sync(hdev, ev->handle);
+@@ -6379,6 +6393,9 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ 	}
+ 
+ unlock:
++	/* Handle any other pending PA sync command */
++	hci_pa_create_sync_pending(hdev);
++
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -6896,7 +6913,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 					    struct sk_buff *skb)
+ {
+ 	struct hci_evt_le_big_sync_estabilished *ev = data;
+-	struct hci_conn *bis;
++	struct hci_conn *bis, *conn;
+ 	int i;
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+@@ -6907,6 +6924,20 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 
+ 	hci_dev_lock(hdev);
+ 
++	conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
++						  ev->num_bis);
++	if (!conn) {
++		bt_dev_err(hdev,
++			   "Unable to find connection for big 0x%2.2x",
++			   ev->handle);
++		goto unlock;
++	}
++
++	clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
++
++	conn->num_bis = 0;
++	memset(conn->bis, 0, sizeof(conn->num_bis));
++
+ 	for (i = 0; i < ev->num_bis; i++) {
+ 		u16 handle = le16_to_cpu(ev->bis[i]);
+ 		__le32 interval;
+@@ -6956,6 +6987,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 			hci_connect_cfm(bis, ev->status);
+ 		}
+ 
++unlock:
++	/* Handle any other pending BIG sync command */
++	hci_le_big_create_sync_pending(hdev);
++
+ 	hci_dev_unlock(hdev);
+ }
+ 
+diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
+index 367e32fe30eb84..4b54dbbf0729a3 100644
+--- a/net/bluetooth/hci_sysfs.c
++++ b/net/bluetooth/hci_sysfs.c
+@@ -21,16 +21,6 @@ static const struct device_type bt_link = {
+ 	.release = bt_link_release,
+ };
+ 
+-/*
+- * The rfcomm tty device will possibly retain even when conn
+- * is down, and sysfs doesn't support move zombie device,
+- * so we should move the device before conn device is destroyed.
+- */
+-static int __match_tty(struct device *dev, void *data)
+-{
+-	return !strncmp(dev_name(dev), "rfcomm", 6);
+-}
+-
+ void hci_conn_init_sysfs(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+@@ -73,10 +63,13 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
+ 		return;
+ 	}
+ 
++	/* If there are devices using the connection as parent reset it to NULL
++	 * before unregistering the device.
++	 */
+ 	while (1) {
+ 		struct device *dev;
+ 
+-		dev = device_find_child(&conn->dev, NULL, __match_tty);
++		dev = device_find_any_child(&conn->dev);
+ 		if (!dev)
+ 			break;
+ 		device_move(dev, NULL, DPM_ORDER_DEV_LAST);
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 7a83e400ac77a0..5e2d9758bd3c1c 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -35,6 +35,7 @@ struct iso_conn {
+ 	struct sk_buff	*rx_skb;
+ 	__u32		rx_len;
+ 	__u16		tx_sn;
++	struct kref	ref;
+ };
+ 
+ #define iso_conn_lock(c)	spin_lock(&(c)->lock)
+@@ -93,6 +94,49 @@ static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
+ #define ISO_CONN_TIMEOUT	(HZ * 40)
+ #define ISO_DISCONN_TIMEOUT	(HZ * 2)
+ 
++static void iso_conn_free(struct kref *ref)
++{
++	struct iso_conn *conn = container_of(ref, struct iso_conn, ref);
++
++	BT_DBG("conn %p", conn);
++
++	if (conn->sk)
++		iso_pi(conn->sk)->conn = NULL;
++
++	if (conn->hcon) {
++		conn->hcon->iso_data = NULL;
++		hci_conn_drop(conn->hcon);
++	}
++
++	/* Ensure no more work items will run since hci_conn has been dropped */
++	disable_delayed_work_sync(&conn->timeout_work);
++
++	kfree(conn);
++}
++
++static void iso_conn_put(struct iso_conn *conn)
++{
++	if (!conn)
++		return;
++
++	BT_DBG("conn %p refcnt %d", conn, kref_read(&conn->ref));
++
++	kref_put(&conn->ref, iso_conn_free);
++}
++
++static struct iso_conn *iso_conn_hold_unless_zero(struct iso_conn *conn)
++{
++	if (!conn)
++		return NULL;
++
++	BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref));
++
++	if (!kref_get_unless_zero(&conn->ref))
++		return NULL;
++
++	return conn;
++}
++
+ static struct sock *iso_sock_hold(struct iso_conn *conn)
+ {
+ 	if (!conn || !bt_sock_linked(&iso_sk_list, conn->sk))
+@@ -109,9 +153,14 @@ static void iso_sock_timeout(struct work_struct *work)
+ 					     timeout_work.work);
+ 	struct sock *sk;
+ 
++	conn = iso_conn_hold_unless_zero(conn);
++	if (!conn)
++		return;
++
+ 	iso_conn_lock(conn);
+ 	sk = iso_sock_hold(conn);
+ 	iso_conn_unlock(conn);
++	iso_conn_put(conn);
+ 
+ 	if (!sk)
+ 		return;
+@@ -149,9 +198,14 @@ static struct iso_conn *iso_conn_add(struct hci_conn *hcon)
+ {
+ 	struct iso_conn *conn = hcon->iso_data;
+ 
++	conn = iso_conn_hold_unless_zero(conn);
+ 	if (conn) {
+-		if (!conn->hcon)
++		if (!conn->hcon) {
++			iso_conn_lock(conn);
+ 			conn->hcon = hcon;
++			iso_conn_unlock(conn);
++		}
++		iso_conn_put(conn);
+ 		return conn;
+ 	}
+ 
+@@ -159,6 +213,7 @@ static struct iso_conn *iso_conn_add(struct hci_conn *hcon)
+ 	if (!conn)
+ 		return NULL;
+ 
++	kref_init(&conn->ref);
+ 	spin_lock_init(&conn->lock);
+ 	INIT_DELAYED_WORK(&conn->timeout_work, iso_sock_timeout);
+ 
+@@ -178,17 +233,15 @@ static void iso_chan_del(struct sock *sk, int err)
+ 	struct sock *parent;
+ 
+ 	conn = iso_pi(sk)->conn;
++	iso_pi(sk)->conn = NULL;
+ 
+ 	BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
+ 
+ 	if (conn) {
+ 		iso_conn_lock(conn);
+ 		conn->sk = NULL;
+-		iso_pi(sk)->conn = NULL;
+ 		iso_conn_unlock(conn);
+-
+-		if (conn->hcon)
+-			hci_conn_drop(conn->hcon);
++		iso_conn_put(conn);
+ 	}
+ 
+ 	sk->sk_state = BT_CLOSED;
+@@ -210,6 +263,7 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
+ 	struct iso_conn *conn = hcon->iso_data;
+ 	struct sock *sk;
+ 
++	conn = iso_conn_hold_unless_zero(conn);
+ 	if (!conn)
+ 		return;
+ 
+@@ -219,20 +273,18 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
+ 	iso_conn_lock(conn);
+ 	sk = iso_sock_hold(conn);
+ 	iso_conn_unlock(conn);
++	iso_conn_put(conn);
+ 
+-	if (sk) {
+-		lock_sock(sk);
+-		iso_sock_clear_timer(sk);
+-		iso_chan_del(sk, err);
+-		release_sock(sk);
+-		sock_put(sk);
++	if (!sk) {
++		iso_conn_put(conn);
++		return;
+ 	}
+ 
+-	/* Ensure no more work items will run before freeing conn. */
+-	cancel_delayed_work_sync(&conn->timeout_work);
+-
+-	hcon->iso_data = NULL;
+-	kfree(conn);
++	lock_sock(sk);
++	iso_sock_clear_timer(sk);
++	iso_chan_del(sk, err);
++	release_sock(sk);
++	sock_put(sk);
+ }
+ 
+ static int __iso_chan_add(struct iso_conn *conn, struct sock *sk,
+@@ -652,6 +704,8 @@ static void iso_sock_destruct(struct sock *sk)
+ {
+ 	BT_DBG("sk %p", sk);
+ 
++	iso_conn_put(iso_pi(sk)->conn);
++
+ 	skb_queue_purge(&sk->sk_receive_queue);
+ 	skb_queue_purge(&sk->sk_write_queue);
+ }
+@@ -711,6 +765,7 @@ static void iso_sock_disconn(struct sock *sk)
+ 		 */
+ 		if (bis_sk) {
+ 			hcon->state = BT_OPEN;
++			hcon->iso_data = NULL;
+ 			iso_pi(sk)->conn->hcon = NULL;
+ 			iso_sock_clear_timer(sk);
+ 			iso_chan_del(sk, bt_to_errno(hcon->abort_reason));
+@@ -720,7 +775,6 @@ static void iso_sock_disconn(struct sock *sk)
+ 	}
+ 
+ 	sk->sk_state = BT_DISCONN;
+-	iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT);
+ 	iso_conn_lock(iso_pi(sk)->conn);
+ 	hci_conn_drop(iso_pi(sk)->conn->hcon);
+ 	iso_pi(sk)->conn->hcon = NULL;
+@@ -1338,6 +1392,13 @@ static void iso_conn_big_sync(struct sock *sk)
+ 	if (!hdev)
+ 		return;
+ 
++	/* hci_le_big_create_sync requires hdev lock to be held, since
++	 * it enqueues the HCI LE BIG Create Sync command via
++	 * hci_cmd_sync_queue_once, which checks hdev flags that might
++	 * change.
++	 */
++	hci_dev_lock(hdev);
++
+ 	if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
+ 		err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
+ 					     &iso_pi(sk)->qos,
+@@ -1348,6 +1409,8 @@ static void iso_conn_big_sync(struct sock *sk)
+ 			bt_dev_err(hdev, "hci_le_big_create_sync: %d",
+ 				   err);
+ 	}
++
++	hci_dev_unlock(hdev);
+ }
+ 
+ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+@@ -1942,6 +2005,7 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 
+ 		if (sk) {
+ 			int err;
++			struct hci_conn	*hcon = iso_pi(sk)->conn->hcon;
+ 
+ 			iso_pi(sk)->qos.bcast.encryption = ev2->encryption;
+ 
+@@ -1950,7 +2014,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 
+ 			if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
+ 			    !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
+-				err = hci_le_big_create_sync(hdev, NULL,
++				err = hci_le_big_create_sync(hdev,
++							     hcon,
+ 							     &iso_pi(sk)->qos,
+ 							     iso_pi(sk)->sync_handle,
+ 							     iso_pi(sk)->bc_num_bis,
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index a429661b676a83..2343e15f8938ec 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -1317,7 +1317,8 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
+ 	struct mgmt_mode *cp;
+ 
+ 	/* Make sure cmd still outstanding. */
+-	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
++	if (err == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
+ 		return;
+ 
+ 	cp = cmd->param;
+@@ -1350,7 +1351,13 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
+ static int set_powered_sync(struct hci_dev *hdev, void *data)
+ {
+ 	struct mgmt_pending_cmd *cmd = data;
+-	struct mgmt_mode *cp = cmd->param;
++	struct mgmt_mode *cp;
++
++	/* Make sure cmd still outstanding. */
++	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
++		return -ECANCELED;
++
++	cp = cmd->param;
+ 
+ 	BT_DBG("%s", hdev->name);
+ 
+@@ -1510,7 +1517,8 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+ 	/* Make sure cmd still outstanding. */
+-	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
++	if (err == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+ 		return;
+ 
+ 	hci_dev_lock(hdev);
+@@ -1684,7 +1692,8 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+ 	/* Make sure cmd still outstanding. */
+-	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
++	if (err == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+ 		return;
+ 
+ 	hci_dev_lock(hdev);
+@@ -1916,7 +1925,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+ 	bool changed;
+ 
+ 	/* Make sure cmd still outstanding. */
+-	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
++	if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
+ 		return;
+ 
+ 	if (err) {
+@@ -3782,7 +3791,8 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
+ 
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+-	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
++	if (err == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
+ 		return;
+ 
+ 	if (status) {
+@@ -3957,7 +3967,8 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
+ 	struct sk_buff *skb = cmd->skb;
+ 	u8 status = mgmt_status(err);
+ 
+-	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
++	if (err == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
+ 		return;
+ 
+ 	if (!status) {
+@@ -5848,13 +5859,16 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ 	struct mgmt_pending_cmd *cmd = data;
+ 
++	bt_dev_dbg(hdev, "err %d", err);
++
++	if (err == -ECANCELED)
++		return;
++
+ 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
+ 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
+ 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
+ 		return;
+ 
+-	bt_dev_dbg(hdev, "err %d", err);
+-
+ 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
+ 			  cmd->param, 1);
+ 	mgmt_pending_remove(cmd);
+@@ -6087,7 +6101,8 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ 	struct mgmt_pending_cmd *cmd = data;
+ 
+-	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
++	if (err == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
+ 		return;
+ 
+ 	bt_dev_dbg(hdev, "err %d", err);
+@@ -8078,7 +8093,8 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
+ 	u8 status = mgmt_status(err);
+ 	u16 eir_len;
+ 
+-	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
++	if (err == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
+ 		return;
+ 
+ 	if (!status) {
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index f48250e3f2e103..8af1bf518321fd 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -729,7 +729,8 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
+ 	struct sock *l2cap_sk;
+ 	struct l2cap_conn *conn;
+ 	struct rfcomm_conninfo cinfo;
+-	int len, err = 0;
++	int err = 0;
++	size_t len;
+ 	u32 opt;
+ 
+ 	BT_DBG("sk %p", sk);
+@@ -783,7 +784,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
+ 		cinfo.hci_handle = conn->hcon->handle;
+ 		memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
+ 
+-		len = min_t(unsigned int, len, sizeof(cinfo));
++		len = min(len, sizeof(cinfo));
+ 		if (copy_to_user(optval, (char *) &cinfo, len))
+ 			err = -EFAULT;
+ 
+@@ -802,7 +803,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct bt_security sec;
+-	int len, err = 0;
++	int err = 0;
++	size_t len;
+ 
+ 	BT_DBG("sk %p", sk);
+ 
+@@ -827,7 +829,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
+ 		sec.level = rfcomm_pi(sk)->sec_level;
+ 		sec.key_size = 0;
+ 
+-		len = min_t(unsigned int, len, sizeof(sec));
++		len = min(len, sizeof(sec));
+ 		if (copy_to_user(optval, (char *) &sec, len))
+ 			err = -EFAULT;
+ 
+diff --git a/net/core/filter.c b/net/core/filter.c
+index fb56567c551ed6..9a459213d283f1 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2621,18 +2621,16 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
+ 
+ static void sk_msg_reset_curr(struct sk_msg *msg)
+ {
+-	u32 i = msg->sg.start;
+-	u32 len = 0;
+-
+-	do {
+-		len += sk_msg_elem(msg, i)->length;
+-		sk_msg_iter_var_next(i);
+-		if (len >= msg->sg.size)
+-			break;
+-	} while (i != msg->sg.end);
++	if (!msg->sg.size) {
++		msg->sg.curr = msg->sg.start;
++		msg->sg.copybreak = 0;
++	} else {
++		u32 i = msg->sg.end;
+ 
+-	msg->sg.curr = i;
+-	msg->sg.copybreak = 0;
++		sk_msg_iter_var_prev(i);
++		msg->sg.curr = i;
++		msg->sg.copybreak = msg->sg.data[i].length;
++	}
+ }
+ 
+ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
+@@ -2795,7 +2793,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
+ 		sk_msg_iter_var_next(i);
+ 	} while (i != msg->sg.end);
+ 
+-	if (start >= offset + l)
++	if (start > offset + l)
+ 		return -EINVAL;
+ 
+ 	space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
+@@ -2820,6 +2818,8 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
+ 
+ 		raw = page_address(page);
+ 
++		if (i == msg->sg.end)
++			sk_msg_iter_var_prev(i);
+ 		psge = sk_msg_elem(msg, i);
+ 		front = start - offset;
+ 		back = psge->length - front;
+@@ -2836,7 +2836,13 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
+ 		}
+ 
+ 		put_page(sg_page(psge));
+-	} else if (start - offset) {
++		new = i;
++		goto place_new;
++	}
++
++	if (start - offset) {
++		if (i == msg->sg.end)
++			sk_msg_iter_var_prev(i);
+ 		psge = sk_msg_elem(msg, i);
+ 		rsge = sk_msg_elem_cpy(msg, i);
+ 
+@@ -2847,39 +2853,44 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
+ 		sk_msg_iter_var_next(i);
+ 		sg_unmark_end(psge);
+ 		sg_unmark_end(&rsge);
+-		sk_msg_iter_next(msg, end);
+ 	}
+ 
+ 	/* Slot(s) to place newly allocated data */
++	sk_msg_iter_next(msg, end);
+ 	new = i;
++	sk_msg_iter_var_next(i);
++
++	if (i == msg->sg.end) {
++		if (!rsge.length)
++			goto place_new;
++		sk_msg_iter_next(msg, end);
++		goto place_new;
++	}
+ 
+ 	/* Shift one or two slots as needed */
+-	if (!copy) {
+-		sge = sk_msg_elem_cpy(msg, i);
++	sge = sk_msg_elem_cpy(msg, new);
++	sg_unmark_end(&sge);
+ 
++	nsge = sk_msg_elem_cpy(msg, i);
++	if (rsge.length) {
+ 		sk_msg_iter_var_next(i);
+-		sg_unmark_end(&sge);
++		nnsge = sk_msg_elem_cpy(msg, i);
+ 		sk_msg_iter_next(msg, end);
++	}
+ 
+-		nsge = sk_msg_elem_cpy(msg, i);
++	while (i != msg->sg.end) {
++		msg->sg.data[i] = sge;
++		sge = nsge;
++		sk_msg_iter_var_next(i);
+ 		if (rsge.length) {
+-			sk_msg_iter_var_next(i);
++			nsge = nnsge;
+ 			nnsge = sk_msg_elem_cpy(msg, i);
+-		}
+-
+-		while (i != msg->sg.end) {
+-			msg->sg.data[i] = sge;
+-			sge = nsge;
+-			sk_msg_iter_var_next(i);
+-			if (rsge.length) {
+-				nsge = nnsge;
+-				nnsge = sk_msg_elem_cpy(msg, i);
+-			} else {
+-				nsge = sk_msg_elem_cpy(msg, i);
+-			}
++		} else {
++			nsge = sk_msg_elem_cpy(msg, i);
+ 		}
+ 	}
+ 
++place_new:
+ 	/* Place newly allocated data buffer */
+ 	sk_mem_charge(msg->sk, len);
+ 	msg->sg.size += len;
+@@ -2908,8 +2919,10 @@ static const struct bpf_func_proto bpf_msg_push_data_proto = {
+ 
+ static void sk_msg_shift_left(struct sk_msg *msg, int i)
+ {
++	struct scatterlist *sge = sk_msg_elem(msg, i);
+ 	int prev;
+ 
++	put_page(sg_page(sge));
+ 	do {
+ 		prev = i;
+ 		sk_msg_iter_var_next(i);
+@@ -2946,6 +2959,9 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ 	if (unlikely(flags))
+ 		return -EINVAL;
+ 
++	if (unlikely(len == 0))
++		return 0;
++
+ 	/* First find the starting scatterlist element */
+ 	i = msg->sg.start;
+ 	do {
+@@ -2958,7 +2974,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ 	} while (i != msg->sg.end);
+ 
+ 	/* Bounds checks: start and pop must be inside message */
+-	if (start >= offset + l || last >= msg->sg.size)
++	if (start >= offset + l || last > msg->sg.size)
+ 		return -EINVAL;
+ 
+ 	space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
+@@ -2987,12 +3003,12 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ 	 */
+ 	if (start != offset) {
+ 		struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
+-		int a = start;
++		int a = start - offset;
+ 		int b = sge->length - pop - a;
+ 
+ 		sk_msg_iter_var_next(i);
+ 
+-		if (pop < sge->length - a) {
++		if (b > 0) {
+ 			if (space) {
+ 				sge->length = a;
+ 				sk_msg_shift_right(msg, i);
+@@ -3011,7 +3027,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ 				if (unlikely(!page))
+ 					return -ENOMEM;
+ 
+-				sge->length = a;
+ 				orig = sg_page(sge);
+ 				from = sg_virt(sge);
+ 				to = page_address(page);
+@@ -3021,7 +3036,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ 				put_page(orig);
+ 			}
+ 			pop = 0;
+-		} else if (pop >= sge->length - a) {
++		} else {
+ 			pop -= (sge->length - a);
+ 			sge->length = a;
+ 		}
+@@ -3055,7 +3070,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ 			pop -= sge->length;
+ 			sk_msg_shift_left(msg, i);
+ 		}
+-		sk_msg_iter_var_next(i);
+ 	}
+ 
+ 	sk_mem_uncharge(msg->sk, len - pop);
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index 1cb954f2d39e82..d2baa1af9df09e 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -215,6 +215,7 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
+ 		return -ENOMEM;
+ 
+ 	rtnl_lock();
++	rcu_read_lock();
+ 
+ 	napi = napi_by_id(napi_id);
+ 	if (napi) {
+@@ -224,6 +225,7 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
+ 		err = -ENOENT;
+ 	}
+ 
++	rcu_read_unlock();
+ 	rtnl_unlock();
+ 
+ 	if (err)
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index b1dcbd3be89e10..e90fbab703b2db 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1117,9 +1117,9 @@ static void sk_psock_strp_data_ready(struct sock *sk)
+ 		if (tls_sw_has_ctx_rx(sk)) {
+ 			psock->saved_data_ready(sk);
+ 		} else {
+-			write_lock_bh(&sk->sk_callback_lock);
++			read_lock_bh(&sk->sk_callback_lock);
+ 			strp_data_ready(&psock->strp);
+-			write_unlock_bh(&sk->sk_callback_lock);
++			read_unlock_bh(&sk->sk_callback_lock);
+ 		}
+ 	}
+ 	rcu_read_unlock();
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index ebdfd5b64e17a2..f630d6645636dd 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -268,6 +268,8 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master)
+ 	skb->dev = master->dev;
+ 	skb->priority = TC_PRIO_CONTROL;
+ 
++	skb_reset_network_header(skb);
++	skb_reset_transport_header(skb);
+ 	if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
+ 			    hsr->sup_multicast_addr,
+ 			    skb->dev->dev_addr, skb->len) <= 0)
+@@ -275,8 +277,6 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master)
+ 
+ 	skb_reset_mac_header(skb);
+ 	skb_reset_mac_len(skb);
+-	skb_reset_network_header(skb);
+-	skb_reset_transport_header(skb);
+ 
+ 	return skb;
+ out:
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 2b698f8419fe2b..fe7947f7740623 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1189,7 +1189,7 @@ static void reqsk_timer_handler(struct timer_list *t)
+ 
+ drop:
+ 	__inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
+-	reqsk_put(req);
++	reqsk_put(oreq);
+ }
+ 
+ static bool reqsk_queue_hash_req(struct request_sock *req,
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 089864c6a35eec..449a2ac40bdc00 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -137,7 +137,7 @@ static struct mr_table *ipmr_mr_table_iter(struct net *net,
+ 	return ret;
+ }
+ 
+-static struct mr_table *ipmr_get_table(struct net *net, u32 id)
++static struct mr_table *__ipmr_get_table(struct net *net, u32 id)
+ {
+ 	struct mr_table *mrt;
+ 
+@@ -148,6 +148,16 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+ 	return NULL;
+ }
+ 
++static struct mr_table *ipmr_get_table(struct net *net, u32 id)
++{
++	struct mr_table *mrt;
++
++	rcu_read_lock();
++	mrt = __ipmr_get_table(net, id);
++	rcu_read_unlock();
++	return mrt;
++}
++
+ static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
+ 			   struct mr_table **mrt)
+ {
+@@ -189,7 +199,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
+ 
+ 	arg->table = fib_rule_get_table(rule, arg);
+ 
+-	mrt = ipmr_get_table(rule->fr_net, arg->table);
++	mrt = __ipmr_get_table(rule->fr_net, arg->table);
+ 	if (!mrt)
+ 		return -EAGAIN;
+ 	res->mrt = mrt;
+@@ -315,6 +325,8 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+ 	return net->ipv4.mrt;
+ }
+ 
++#define __ipmr_get_table ipmr_get_table
++
+ static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
+ 			   struct mr_table **mrt)
+ {
+@@ -403,7 +415,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ 	if (id != RT_TABLE_DEFAULT && id >= 1000000000)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	mrt = ipmr_get_table(net, id);
++	mrt = __ipmr_get_table(net, id);
+ 	if (mrt)
+ 		return mrt;
+ 
+@@ -1374,7 +1386,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
+ 		goto out_unlock;
+ 	}
+ 
+-	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
++	mrt = __ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ 	if (!mrt) {
+ 		ret = -ENOENT;
+ 		goto out_unlock;
+@@ -2262,11 +2274,13 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ 	struct mr_table *mrt;
+ 	int err;
+ 
+-	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+-	if (!mrt)
++	rcu_read_lock();
++	mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT);
++	if (!mrt) {
++		rcu_read_unlock();
+ 		return -ENOENT;
++	}
+ 
+-	rcu_read_lock();
+ 	cache = ipmr_cache_find(mrt, saddr, daddr);
+ 	if (!cache && skb->dev) {
+ 		int vif = ipmr_find_vif(mrt, skb->dev);
+@@ -2550,7 +2564,7 @@ static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 	grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
+ 	tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
+ 
+-	mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
++	mrt = __ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
+ 	if (!mrt) {
+ 		err = -ENOENT;
+ 		goto errout_free;
+@@ -2604,7 +2618,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (filter.table_id) {
+ 		struct mr_table *mrt;
+ 
+-		mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
++		mrt = __ipmr_get_table(sock_net(skb->sk), filter.table_id);
+ 		if (!mrt) {
+ 			if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR)
+ 				return skb->len;
+@@ -2712,7 +2726,7 @@ static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
+ 			break;
+ 		}
+ 	}
+-	mrt = ipmr_get_table(net, tblid);
++	mrt = __ipmr_get_table(net, tblid);
+ 	if (!mrt) {
+ 		ret = -ENOENT;
+ 		goto out;
+@@ -2920,13 +2934,15 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
+ 	struct net *net = seq_file_net(seq);
+ 	struct mr_table *mrt;
+ 
+-	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+-	if (!mrt)
++	rcu_read_lock();
++	mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT);
++	if (!mrt) {
++		rcu_read_unlock();
+ 		return ERR_PTR(-ENOENT);
++	}
+ 
+ 	iter->mrt = mrt;
+ 
+-	rcu_read_lock();
+ 	return mr_vif_seq_start(seq, pos);
+ }
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 94dceac528842c..01115e1a34cb66 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2570,6 +2570,24 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
+ 	return idev;
+ }
+ 
++static void delete_tempaddrs(struct inet6_dev *idev,
++			     struct inet6_ifaddr *ifp)
++{
++	struct inet6_ifaddr *ift, *tmp;
++
++	write_lock_bh(&idev->lock);
++	list_for_each_entry_safe(ift, tmp, &idev->tempaddr_list, tmp_list) {
++		if (ift->ifpub != ifp)
++			continue;
++
++		in6_ifa_hold(ift);
++		write_unlock_bh(&idev->lock);
++		ipv6_del_addr(ift);
++		write_lock_bh(&idev->lock);
++	}
++	write_unlock_bh(&idev->lock);
++}
++
+ static void manage_tempaddrs(struct inet6_dev *idev,
+ 			     struct inet6_ifaddr *ifp,
+ 			     __u32 valid_lft, __u32 prefered_lft,
+@@ -3124,11 +3142,12 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
+ 			in6_ifa_hold(ifp);
+ 			read_unlock_bh(&idev->lock);
+ 
+-			if (!(ifp->flags & IFA_F_TEMPORARY) &&
+-			    (ifa_flags & IFA_F_MANAGETEMPADDR))
+-				manage_tempaddrs(idev, ifp, 0, 0, false,
+-						 jiffies);
+ 			ipv6_del_addr(ifp);
++
++			if (!(ifp->flags & IFA_F_TEMPORARY) &&
++			    (ifp->flags & IFA_F_MANAGETEMPADDR))
++				delete_tempaddrs(idev, ifp);
++
+ 			addrconf_verify_rtnl(net);
+ 			if (ipv6_addr_is_multicast(pfx)) {
+ 				ipv6_mc_config(net->ipv6.mc_autojoin_sk,
+@@ -4952,14 +4971,12 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
+ 	}
+ 
+ 	if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
+-		if (was_managetempaddr &&
+-		    !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
+-			cfg->valid_lft = 0;
+-			cfg->preferred_lft = 0;
+-		}
+-		manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
+-				 cfg->preferred_lft, !was_managetempaddr,
+-				 jiffies);
++		if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
++			delete_tempaddrs(ifp->idev, ifp);
++		else
++			manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
++					 cfg->preferred_lft, !was_managetempaddr,
++					 jiffies);
+ 	}
+ 
+ 	addrconf_verify_rtnl(net);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index eb111d20615c62..9a1c59275a1099 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1190,8 +1190,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ 		while (sibling) {
+ 			if (sibling->fib6_metric == rt->fib6_metric &&
+ 			    rt6_qualify_for_ecmp(sibling)) {
+-				list_add_tail(&rt->fib6_siblings,
+-					      &sibling->fib6_siblings);
++				list_add_tail_rcu(&rt->fib6_siblings,
++						  &sibling->fib6_siblings);
+ 				break;
+ 			}
+ 			sibling = rcu_dereference_protected(sibling->fib6_next,
+@@ -1252,7 +1252,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ 							 fib6_siblings)
+ 					sibling->fib6_nsiblings--;
+ 				rt->fib6_nsiblings = 0;
+-				list_del_init(&rt->fib6_siblings);
++				list_del_rcu(&rt->fib6_siblings);
+ 				rt6_multipath_rebalance(next_sibling);
+ 				return err;
+ 			}
+@@ -1970,7 +1970,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
+ 					 &rt->fib6_siblings, fib6_siblings)
+ 			sibling->fib6_nsiblings--;
+ 		rt->fib6_nsiblings = 0;
+-		list_del_init(&rt->fib6_siblings);
++		list_del_rcu(&rt->fib6_siblings);
+ 		rt6_multipath_rebalance(next_sibling);
+ 	}
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 2ce4ae0d8dc3b4..d5057401701c1a 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -125,7 +125,7 @@ static struct mr_table *ip6mr_mr_table_iter(struct net *net,
+ 	return ret;
+ }
+ 
+-static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
++static struct mr_table *__ip6mr_get_table(struct net *net, u32 id)
+ {
+ 	struct mr_table *mrt;
+ 
+@@ -136,6 +136,16 @@ static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
+ 	return NULL;
+ }
+ 
++static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
++{
++	struct mr_table *mrt;
++
++	rcu_read_lock();
++	mrt = __ip6mr_get_table(net, id);
++	rcu_read_unlock();
++	return mrt;
++}
++
+ static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
+ 			    struct mr_table **mrt)
+ {
+@@ -177,7 +187,7 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
+ 
+ 	arg->table = fib_rule_get_table(rule, arg);
+ 
+-	mrt = ip6mr_get_table(rule->fr_net, arg->table);
++	mrt = __ip6mr_get_table(rule->fr_net, arg->table);
+ 	if (!mrt)
+ 		return -EAGAIN;
+ 	res->mrt = mrt;
+@@ -304,6 +314,8 @@ static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
+ 	return net->ipv6.mrt6;
+ }
+ 
++#define __ip6mr_get_table ip6mr_get_table
++
+ static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
+ 			    struct mr_table **mrt)
+ {
+@@ -382,7 +394,7 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
+ {
+ 	struct mr_table *mrt;
+ 
+-	mrt = ip6mr_get_table(net, id);
++	mrt = __ip6mr_get_table(net, id);
+ 	if (mrt)
+ 		return mrt;
+ 
+@@ -411,13 +423,15 @@ static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
+ 	struct net *net = seq_file_net(seq);
+ 	struct mr_table *mrt;
+ 
+-	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+-	if (!mrt)
++	rcu_read_lock();
++	mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT);
++	if (!mrt) {
++		rcu_read_unlock();
+ 		return ERR_PTR(-ENOENT);
++	}
+ 
+ 	iter->mrt = mrt;
+ 
+-	rcu_read_lock();
+ 	return mr_vif_seq_start(seq, pos);
+ }
+ 
+@@ -2275,11 +2289,13 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
+ 	struct mfc6_cache *cache;
+ 	struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
+ 
+-	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+-	if (!mrt)
++	rcu_read_lock();
++	mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT);
++	if (!mrt) {
++		rcu_read_unlock();
+ 		return -ENOENT;
++	}
+ 
+-	rcu_read_lock();
+ 	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
+ 	if (!cache && skb->dev) {
+ 		int vif = ip6mr_find_vif(mrt, skb->dev);
+@@ -2559,7 +2575,7 @@ static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 		grp = nla_get_in6_addr(tb[RTA_DST]);
+ 	tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
+ 
+-	mrt = ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT);
++	mrt = __ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT);
+ 	if (!mrt) {
+ 		NL_SET_ERR_MSG_MOD(extack, "MR table does not exist");
+ 		return -ENOENT;
+@@ -2606,7 +2622,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (filter.table_id) {
+ 		struct mr_table *mrt;
+ 
+-		mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
++		mrt = __ip6mr_get_table(sock_net(skb->sk), filter.table_id);
+ 		if (!mrt) {
+ 			if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
+ 				return skb->len;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index b4251915585f75..cff4fbbc66efb2 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -374,6 +374,7 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
+ {
+ 	struct rt6_info *rt = dst_rt6_info(dst);
+ 	struct inet6_dev *idev = rt->rt6i_idev;
++	struct fib6_info *from;
+ 
+ 	if (idev && idev->dev != blackhole_netdev) {
+ 		struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
+@@ -383,6 +384,8 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
+ 			in6_dev_put(idev);
+ 		}
+ 	}
++	from = unrcu_pointer(xchg(&rt->from, NULL));
++	fib6_info_release(from);
+ }
+ 
+ static bool __rt6_check_expired(const struct rt6_info *rt)
+@@ -413,8 +416,8 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
+ 		      struct flowi6 *fl6, int oif, bool have_oif_match,
+ 		      const struct sk_buff *skb, int strict)
+ {
+-	struct fib6_info *sibling, *next_sibling;
+ 	struct fib6_info *match = res->f6i;
++	struct fib6_info *sibling;
+ 
+ 	if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
+ 		goto out;
+@@ -440,8 +443,8 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
+ 	if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
+ 		goto out;
+ 
+-	list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
+-				 fib6_siblings) {
++	list_for_each_entry_rcu(sibling, &match->fib6_siblings,
++				fib6_siblings) {
+ 		const struct fib6_nh *nh = sibling->fib6_nh;
+ 		int nh_upper_bound;
+ 
+@@ -1455,7 +1458,6 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
+ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
+ 				 struct rt6_exception *rt6_ex)
+ {
+-	struct fib6_info *from;
+ 	struct net *net;
+ 
+ 	if (!bucket || !rt6_ex)
+@@ -1467,8 +1469,6 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
+ 	/* purge completely the exception to allow releasing the held resources:
+ 	 * some [sk] cache may keep the dst around for unlimited time
+ 	 */
+-	from = unrcu_pointer(xchg(&rt6_ex->rt6i->from, NULL));
+-	fib6_info_release(from);
+ 	dst_dev_put(&rt6_ex->rt6i->dst);
+ 
+ 	hlist_del_rcu(&rt6_ex->hlist);
+@@ -5195,14 +5195,18 @@ static void ip6_route_mpath_notify(struct fib6_info *rt,
+ 	 * nexthop. Since sibling routes are always added at the end of
+ 	 * the list, find the first sibling of the last route appended
+ 	 */
++	rcu_read_lock();
++
+ 	if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
+-		rt = list_first_entry(&rt_last->fib6_siblings,
+-				      struct fib6_info,
+-				      fib6_siblings);
++		rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
++					    struct fib6_info,
++					    fib6_siblings);
+ 	}
+ 
+ 	if (rt)
+ 		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
++
++	rcu_read_unlock();
+ }
+ 
+ static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
+@@ -5547,17 +5551,21 @@ static size_t rt6_nlmsg_size(struct fib6_info *f6i)
+ 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
+ 					 &nexthop_len);
+ 	} else {
+-		struct fib6_info *sibling, *next_sibling;
+ 		struct fib6_nh *nh = f6i->fib6_nh;
++		struct fib6_info *sibling;
+ 
+ 		nexthop_len = 0;
+ 		if (f6i->fib6_nsiblings) {
+ 			rt6_nh_nlmsg_size(nh, &nexthop_len);
+ 
+-			list_for_each_entry_safe(sibling, next_sibling,
+-						 &f6i->fib6_siblings, fib6_siblings) {
++			rcu_read_lock();
++
++			list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
++						fib6_siblings) {
+ 				rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
+ 			}
++
++			rcu_read_unlock();
+ 		}
+ 		nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
+ 	}
+@@ -5721,7 +5729,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 		    lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ 			goto nla_put_failure;
+ 	} else if (rt->fib6_nsiblings) {
+-		struct fib6_info *sibling, *next_sibling;
++		struct fib6_info *sibling;
+ 		struct nlattr *mp;
+ 
+ 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
+@@ -5733,14 +5741,21 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 				    0) < 0)
+ 			goto nla_put_failure;
+ 
+-		list_for_each_entry_safe(sibling, next_sibling,
+-					 &rt->fib6_siblings, fib6_siblings) {
++		rcu_read_lock();
++
++		list_for_each_entry_rcu(sibling, &rt->fib6_siblings,
++					fib6_siblings) {
+ 			if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
+ 					    sibling->fib6_nh->fib_nh_weight,
+-					    AF_INET6, 0) < 0)
++					    AF_INET6, 0) < 0) {
++				rcu_read_unlock();
++
+ 				goto nla_put_failure;
++			}
+ 		}
+ 
++		rcu_read_unlock();
++
+ 		nla_nest_end(skb, mp);
+ 	} else if (rt->nh) {
+ 		if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
+@@ -6177,7 +6192,7 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
+ 	err = -ENOBUFS;
+ 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
+ 
+-	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
++	skb = nlmsg_new(rt6_nlmsg_size(rt), GFP_ATOMIC);
+ 	if (!skb)
+ 		goto errout;
+ 
+@@ -6190,7 +6205,7 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
+ 		goto errout;
+ 	}
+ 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
+-		    info->nlh, gfp_any());
++		    info->nlh, GFP_ATOMIC);
+ 	return;
+ errout:
+ 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index c00323fa9eb66e..7929df08d4e023 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1236,7 +1236,9 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 		return -EOPNOTSUPP;
+ 
+ 	/* receive/dequeue next skb:
+-	 * the function understands MSG_PEEK and, thus, does not dequeue skb */
++	 * the function understands MSG_PEEK and, thus, does not dequeue skb
++	 * only refcount is increased.
++	 */
+ 	skb = skb_recv_datagram(sk, flags, &err);
+ 	if (!skb) {
+ 		if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -1252,9 +1254,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 
+ 	cskb = skb;
+ 	if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
+-		if (!(flags & MSG_PEEK))
+-			skb_queue_head(&sk->sk_receive_queue, skb);
+-		return -EFAULT;
++		err = -EFAULT;
++		goto err_out;
+ 	}
+ 
+ 	/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
+@@ -1271,11 +1272,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 	err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
+ 		       sizeof(IUCV_SKB_CB(skb)->class),
+ 		       (void *)&IUCV_SKB_CB(skb)->class);
+-	if (err) {
+-		if (!(flags & MSG_PEEK))
+-			skb_queue_head(&sk->sk_receive_queue, skb);
+-		return err;
+-	}
++	if (err)
++		goto err_out;
+ 
+ 	/* Mark read part of skb as used */
+ 	if (!(flags & MSG_PEEK)) {
+@@ -1331,8 +1329,18 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 	/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
+ 	if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
+ 		copied = rlen;
++	if (flags & MSG_PEEK)
++		skb_unref(skb);
+ 
+ 	return copied;
++
++err_out:
++	if (!(flags & MSG_PEEK))
++		skb_queue_head(&sk->sk_receive_queue, skb);
++	else
++		skb_unref(skb);
++
++	return err;
+ }
+ 
+ static inline __poll_t iucv_accept_poll(struct sock *parent)
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 3eec23ac5ab10e..369a2f2e459cdb 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1870,15 +1870,31 @@ static __net_exit void l2tp_pre_exit_net(struct net *net)
+ 	}
+ }
+ 
++static int l2tp_idr_item_unexpected(int id, void *p, void *data)
++{
++	const char *idr_name = data;
++
++	pr_err("l2tp: %s IDR not empty at net %d exit\n", idr_name, id);
++	WARN_ON_ONCE(1);
++	return 1;
++}
++
+ static __net_exit void l2tp_exit_net(struct net *net)
+ {
+ 	struct l2tp_net *pn = l2tp_pernet(net);
+ 
+-	WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_v2_session_idr));
++	/* Our per-net IDRs should be empty. Check that is so, to
++	 * help catch cleanup races or refcnt leaks.
++	 */
++	idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected,
++		     "v2_session");
++	idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected,
++		     "v3_session");
++	idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected,
++		     "tunnel");
++
+ 	idr_destroy(&pn->l2tp_v2_session_idr);
+-	WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_v3_session_idr));
+ 	idr_destroy(&pn->l2tp_v3_session_idr);
+-	WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_tunnel_idr));
+ 	idr_destroy(&pn->l2tp_tunnel_idr);
+ }
+ 
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 4eb52add7103b0..0259cde394ba09 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -1098,7 +1098,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
+ 	lock_sock(sk);
+ 	if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
+ 		goto out;
+-	rc = copy_from_sockptr(&opt, optval, sizeof(opt));
++	rc = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ 	if (rc)
+ 		goto out;
+ 	rc = -EINVAL;
+diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
+index e4fa00abde6a2a..5988b9bb9029dc 100644
+--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
++++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
+@@ -163,11 +163,8 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ 		if (ret)
+ 			return ret;
+-		if (ip > ip_to) {
++		if (ip > ip_to)
+ 			swap(ip, ip_to);
+-			if (ip < map->first_ip)
+-				return -IPSET_ERR_BITMAP_RANGE;
+-		}
+ 	} else if (tb[IPSET_ATTR_CIDR]) {
+ 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ 
+@@ -178,7 +175,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		ip_to = ip;
+ 	}
+ 
+-	if (ip_to > map->last_ip)
++	if (ip < map->first_ip || ip_to > map->last_ip)
+ 		return -IPSET_ERR_BITMAP_RANGE;
+ 
+ 	for (; !before(ip_to, ip); ip += map->hosts) {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 588a2757986c1d..4a137afaf0b87e 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3295,25 +3295,37 @@ int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
+ 	if (!tb[NFTA_EXPR_DATA] || !tb[NFTA_EXPR_NAME])
+ 		return -EINVAL;
+ 
++	rcu_read_lock();
++
+ 	type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
+-	if (!type)
+-		return -ENOENT;
++	if (!type) {
++		err = -ENOENT;
++		goto out_unlock;
++	}
+ 
+-	if (!type->inner_ops)
+-		return -EOPNOTSUPP;
++	if (!type->inner_ops) {
++		err = -EOPNOTSUPP;
++		goto out_unlock;
++	}
+ 
+ 	err = nla_parse_nested_deprecated(info->tb, type->maxattr,
+ 					  tb[NFTA_EXPR_DATA],
+ 					  type->policy, NULL);
+ 	if (err < 0)
+-		goto err_nla_parse;
++		goto out_unlock;
+ 
+ 	info->attr = nla;
+ 	info->ops = type->inner_ops;
+ 
++	/* No module reference will be taken on type->owner.
++	 * Presence of type->inner_ops implies that the expression
++	 * is builtin, so it cannot go away.
++	 */
++	rcu_read_unlock();
+ 	return 0;
+ 
+-err_nla_parse:
++out_unlock:
++	rcu_read_unlock();
+ 	return err;
+ }
+ 
+@@ -3412,13 +3424,15 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr)
+  * Rules
+  */
+ 
+-static struct nft_rule *__nft_rule_lookup(const struct nft_chain *chain,
++static struct nft_rule *__nft_rule_lookup(const struct net *net,
++					  const struct nft_chain *chain,
+ 					  u64 handle)
+ {
+ 	struct nft_rule *rule;
+ 
+ 	// FIXME: this sucks
+-	list_for_each_entry_rcu(rule, &chain->rules, list) {
++	list_for_each_entry_rcu(rule, &chain->rules, list,
++				lockdep_commit_lock_is_held(net)) {
+ 		if (handle == rule->handle)
+ 			return rule;
+ 	}
+@@ -3426,13 +3440,14 @@ static struct nft_rule *__nft_rule_lookup(const struct nft_chain *chain,
+ 	return ERR_PTR(-ENOENT);
+ }
+ 
+-static struct nft_rule *nft_rule_lookup(const struct nft_chain *chain,
++static struct nft_rule *nft_rule_lookup(const struct net *net,
++					const struct nft_chain *chain,
+ 					const struct nlattr *nla)
+ {
+ 	if (nla == NULL)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	return __nft_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
++	return __nft_rule_lookup(net, chain, be64_to_cpu(nla_get_be64(nla)));
+ }
+ 
+ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
+@@ -3733,7 +3748,7 @@ static int nf_tables_dump_rules_done(struct netlink_callback *cb)
+ 	return 0;
+ }
+ 
+-/* called with rcu_read_lock held */
++/* Caller must hold rcu read lock or transaction mutex */
+ static struct sk_buff *
+ nf_tables_getrule_single(u32 portid, const struct nfnl_info *info,
+ 			 const struct nlattr * const nla[], bool reset)
+@@ -3760,7 +3775,7 @@ nf_tables_getrule_single(u32 portid, const struct nfnl_info *info,
+ 		return ERR_CAST(chain);
+ 	}
+ 
+-	rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
++	rule = nft_rule_lookup(net, chain, nla[NFTA_RULE_HANDLE]);
+ 	if (IS_ERR(rule)) {
+ 		NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
+ 		return ERR_CAST(rule);
+@@ -4058,7 +4073,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 	if (nla[NFTA_RULE_HANDLE]) {
+ 		handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
+-		rule = __nft_rule_lookup(chain, handle);
++		rule = __nft_rule_lookup(net, chain, handle);
+ 		if (IS_ERR(rule)) {
+ 			NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
+ 			return PTR_ERR(rule);
+@@ -4080,7 +4095,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 		if (nla[NFTA_RULE_POSITION]) {
+ 			pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+-			old_rule = __nft_rule_lookup(chain, pos_handle);
++			old_rule = __nft_rule_lookup(net, chain, pos_handle);
+ 			if (IS_ERR(old_rule)) {
+ 				NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
+ 				return PTR_ERR(old_rule);
+@@ -4297,7 +4312,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 	if (chain) {
+ 		if (nla[NFTA_RULE_HANDLE]) {
+-			rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
++			rule = nft_rule_lookup(info->net, chain, nla[NFTA_RULE_HANDLE]);
+ 			if (IS_ERR(rule)) {
+ 				if (PTR_ERR(rule) == -ENOENT &&
+ 				    NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYRULE)
+@@ -7790,9 +7805,7 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
+ 	struct nft_trans *trans;
+ 	int err = -ENOMEM;
+ 
+-	if (!try_module_get(type->owner))
+-		return -ENOENT;
+-
++	/* caller must have obtained type->owner reference. */
+ 	trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
+ 				sizeof(struct nft_trans_obj));
+ 	if (!trans)
+@@ -7860,15 +7873,16 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+ 			return -EOPNOTSUPP;
+ 
+-		type = __nft_obj_type_get(objtype, family);
+-		if (WARN_ON_ONCE(!type))
+-			return -ENOENT;
+-
+ 		if (!obj->ops->update)
+ 			return 0;
+ 
++		type = nft_obj_type_get(net, objtype, family);
++		if (WARN_ON_ONCE(IS_ERR(type)))
++			return PTR_ERR(type);
++
+ 		nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+ 
++		/* type->owner reference is put when transaction object is released. */
+ 		return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
+ 	}
+ 
+@@ -8104,7 +8118,7 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
+ 	return 0;
+ }
+ 
+-/* called with rcu_read_lock held */
++/* Caller must hold rcu read lock or transaction mutex */
+ static struct sk_buff *
+ nf_tables_getobj_single(u32 portid, const struct nfnl_info *info,
+ 			const struct nlattr * const nla[], bool reset)
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index f84aad420d4464..775d707ec708a7 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2176,9 +2176,14 @@ netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
+ 	return tlvlen;
+ }
+ 
++static bool nlmsg_check_in_payload(const struct nlmsghdr *nlh, const void *addr)
++{
++	return !WARN_ON(addr < nlmsg_data(nlh) ||
++			addr - (const void *) nlh >= nlh->nlmsg_len);
++}
++
+ static void
+-netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb,
+-		     const struct nlmsghdr *nlh, int err,
++netlink_ack_tlv_fill(struct sk_buff *skb, const struct nlmsghdr *nlh, int err,
+ 		     const struct netlink_ext_ack *extack)
+ {
+ 	if (extack->_msg)
+@@ -2190,9 +2195,7 @@ netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb,
+ 	if (!err)
+ 		return;
+ 
+-	if (extack->bad_attr &&
+-	    !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
+-		     (u8 *)extack->bad_attr >= in_skb->data + in_skb->len))
++	if (extack->bad_attr && nlmsg_check_in_payload(nlh, extack->bad_attr))
+ 		WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
+ 				    (u8 *)extack->bad_attr - (const u8 *)nlh));
+ 	if (extack->policy)
+@@ -2201,9 +2204,7 @@ netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb,
+ 	if (extack->miss_type)
+ 		WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE,
+ 				    extack->miss_type));
+-	if (extack->miss_nest &&
+-	    !WARN_ON((u8 *)extack->miss_nest < in_skb->data ||
+-		     (u8 *)extack->miss_nest > in_skb->data + in_skb->len))
++	if (extack->miss_nest && nlmsg_check_in_payload(nlh, extack->miss_nest))
+ 		WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST,
+ 				    (u8 *)extack->miss_nest - (const u8 *)nlh));
+ }
+@@ -2232,7 +2233,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
+ 	if (extack_len) {
+ 		nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
+ 		if (skb_tailroom(skb) >= extack_len) {
+-			netlink_ack_tlv_fill(cb->skb, skb, cb->nlh,
++			netlink_ack_tlv_fill(skb, cb->nlh,
+ 					     nlk->dump_done_errno, extack);
+ 			nlmsg_end(skb, nlh);
+ 		}
+@@ -2491,7 +2492,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 	}
+ 
+ 	if (tlvlen)
+-		netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack);
++		netlink_ack_tlv_fill(skb, nlh, err, extack);
+ 
+ 	nlmsg_end(skb, rep);
+ 
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index c268c2b011f428..a8e21060112ffd 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -32,8 +32,12 @@ static int rfkill_gpio_set_power(void *data, bool blocked)
+ {
+ 	struct rfkill_gpio_data *rfkill = data;
+ 
+-	if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
+-		clk_enable(rfkill->clk);
++	if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled) {
++		int ret = clk_enable(rfkill->clk);
++
++		if (ret)
++			return ret;
++	}
+ 
+ 	gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked);
+ 	gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked);
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index f4844683e12039..9d8bd0b37e41da 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -707,9 +707,10 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
+ 			ret = -EISCONN;
+ 			if (rx->sk.sk_state != RXRPC_UNBOUND)
+ 				goto error;
+-			ret = copy_from_sockptr(&min_sec_level, optval,
+-				       sizeof(unsigned int));
+-			if (ret < 0)
++			ret = copy_safe_from_sockptr(&min_sec_level,
++						     sizeof(min_sec_level),
++						     optval, optlen);
++			if (ret)
+ 				goto error;
+ 			ret = -EINVAL;
+ 			if (min_sec_level > RXRPC_SECURITY_MAX)
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 19a49af5a9e527..afefe124d9039e 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -331,6 +331,12 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
+ 		 */
+ 		if (q->internal.qlen >= 8)
+ 			return false;
++
++		/* Ordering invariants fall apart if some delayed flows
++		 * are ready but we haven't serviced them, yet.
++		 */
++		if (q->time_next_delayed_flow <= now)
++			return false;
+ 	}
+ 
+ 	sk = skb->sk;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 1bd3e531b0e090..059f6ef1ad1898 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1427,7 +1427,9 @@ static int c_show(struct seq_file *m, void *p)
+ 		seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
+ 			   convert_to_wallclock(cp->expiry_time),
+ 			   kref_read(&cp->ref), cp->flags);
+-	cache_get(cp);
++	if (!cache_get_rcu(cp))
++		return 0;
++
+ 	if (cache_check(cd, cp, NULL))
+ 		/* cache_check does a cache_put on failure */
+ 		seq_puts(m, "# ");
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 825ec53576912a..59e2c46240f5c1 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1551,6 +1551,10 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
+ 	newlen = error;
+ 
+ 	if (protocol == IPPROTO_TCP) {
++		__netns_tracker_free(net, &sock->sk->ns_tracker, false);
++		sock->sk->sk_net_refcnt = 1;
++		get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL);
++		sock_inuse_add(net, 1);
+ 		if ((error = kernel_listen(sock, 64)) < 0)
+ 			goto bummer;
+ 	}
+diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
+index 58ae6ec4f25b4f..415c0310101f0d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma.c
++++ b/net/sunrpc/xprtrdma/svc_rdma.c
+@@ -233,25 +233,34 @@ static int svc_rdma_proc_init(void)
+ 
+ 	rc = percpu_counter_init(&svcrdma_stat_read, 0, GFP_KERNEL);
+ 	if (rc)
+-		goto out_err;
++		goto err;
+ 	rc = percpu_counter_init(&svcrdma_stat_recv, 0, GFP_KERNEL);
+ 	if (rc)
+-		goto out_err;
++		goto err_read;
+ 	rc = percpu_counter_init(&svcrdma_stat_sq_starve, 0, GFP_KERNEL);
+ 	if (rc)
+-		goto out_err;
++		goto err_recv;
+ 	rc = percpu_counter_init(&svcrdma_stat_write, 0, GFP_KERNEL);
+ 	if (rc)
+-		goto out_err;
++		goto err_sq;
+ 
+ 	svcrdma_table_header = register_sysctl("sunrpc/svc_rdma",
+ 					       svcrdma_parm_table);
++	if (!svcrdma_table_header)
++		goto err_write;
++
+ 	return 0;
+ 
+-out_err:
++err_write:
++	rc = -ENOMEM;
++	percpu_counter_destroy(&svcrdma_stat_write);
++err_sq:
+ 	percpu_counter_destroy(&svcrdma_stat_sq_starve);
++err_recv:
+ 	percpu_counter_destroy(&svcrdma_stat_recv);
++err_read:
+ 	percpu_counter_destroy(&svcrdma_stat_read);
++err:
+ 	return rc;
+ }
+ 
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index ae3fb9bc8a2168..292022f0976e17 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -493,7 +493,13 @@ static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
+ 	if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
+ 		return false;
+ 
+-	/* A bogus segcount causes this buffer overflow check to fail. */
++	/* Before trusting the segcount value enough to use it in
++	 * a computation, perform a simple range check. This is an
++	 * arbitrary but sensible limit (ie, not architectural).
++	 */
++	if (unlikely(segcount > RPCSVC_MAXPAGES))
++		return false;
++
+ 	p = xdr_inline_decode(&rctxt->rc_stream,
+ 			      segcount * rpcrdma_segment_maxsz * sizeof(*p));
+ 	return p != NULL;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 1326fbf45a3479..b69e6290acfabe 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1198,6 +1198,7 @@ static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
+ 	clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
+ 	clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
+ 	clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state);
++	clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
+ }
+ 
+ static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
+@@ -1939,6 +1940,13 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
+ 		goto out;
+ 	}
+ 
++	if (protocol == IPPROTO_TCP) {
++		__netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false);
++		sock->sk->sk_net_refcnt = 1;
++		get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL);
++		sock_inuse_add(xprt->xprt_net, 1);
++	}
++
+ 	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+ 	if (IS_ERR(filp))
+ 		return ERR_CAST(filp);
+@@ -2614,11 +2622,10 @@ static int xs_tls_handshake_sync(struct rpc_xprt *lower_xprt, struct xprtsec_par
+ 	rc = wait_for_completion_interruptible_timeout(&lower_transport->handshake_done,
+ 						       XS_TLS_HANDSHAKE_TO);
+ 	if (rc <= 0) {
+-		if (!tls_handshake_cancel(sk)) {
+-			if (rc == 0)
+-				rc = -ETIMEDOUT;
+-			goto out_put_xprt;
+-		}
++		tls_handshake_cancel(sk);
++		if (rc == 0)
++			rc = -ETIMEDOUT;
++		goto out_put_xprt;
+ 	}
+ 
+ 	rc = lower_transport->xprt_err;
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 74ca18833df172..7d313fb66d76ba 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -603,16 +603,20 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ }
+ EXPORT_SYMBOL(wiphy_new_nm);
+ 
+-static int wiphy_verify_combinations(struct wiphy *wiphy)
++static
++int wiphy_verify_iface_combinations(struct wiphy *wiphy,
++				    const struct ieee80211_iface_combination *iface_comb,
++				    int n_iface_comb,
++				    bool combined_radio)
+ {
+ 	const struct ieee80211_iface_combination *c;
+ 	int i, j;
+ 
+-	for (i = 0; i < wiphy->n_iface_combinations; i++) {
++	for (i = 0; i < n_iface_comb; i++) {
+ 		u32 cnt = 0;
+ 		u16 all_iftypes = 0;
+ 
+-		c = &wiphy->iface_combinations[i];
++		c = &iface_comb[i];
+ 
+ 		/*
+ 		 * Combinations with just one interface aren't real,
+@@ -625,9 +629,13 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
+ 		if (WARN_ON(!c->num_different_channels))
+ 			return -EINVAL;
+ 
+-		/* DFS only works on one channel. */
+-		if (WARN_ON(c->radar_detect_widths &&
+-			    (c->num_different_channels > 1)))
++		/* DFS only works on one channel. Avoid this check
++		 * for multi-radio global combination, since it hold
++		 * the capabilities of all radio combinations.
++		 */
++		if (!combined_radio &&
++		    WARN_ON(c->radar_detect_widths &&
++			    c->num_different_channels > 1))
+ 			return -EINVAL;
+ 
+ 		if (WARN_ON(!c->n_limits))
+@@ -648,13 +656,21 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
+ 			if (WARN_ON(wiphy->software_iftypes & types))
+ 				return -EINVAL;
+ 
+-			/* Only a single P2P_DEVICE can be allowed */
+-			if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
++			/* Only a single P2P_DEVICE can be allowed, avoid this
++			 * check for multi-radio global combination, since it
++			 * hold the capabilities of all radio combinations.
++			 */
++			if (!combined_radio &&
++			    WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
+ 				    c->limits[j].max > 1))
+ 				return -EINVAL;
+ 
+-			/* Only a single NAN can be allowed */
+-			if (WARN_ON(types & BIT(NL80211_IFTYPE_NAN) &&
++			/* Only a single NAN can be allowed, avoid this
++			 * check for multi-radio global combination, since it
++			 * hold the capabilities of all radio combinations.
++			 */
++			if (!combined_radio &&
++			    WARN_ON(types & BIT(NL80211_IFTYPE_NAN) &&
+ 				    c->limits[j].max > 1))
+ 				return -EINVAL;
+ 
+@@ -693,6 +709,34 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
+ 	return 0;
+ }
+ 
++static int wiphy_verify_combinations(struct wiphy *wiphy)
++{
++	int i, ret;
++	bool combined_radio = false;
++
++	if (wiphy->n_radio) {
++		for (i = 0; i < wiphy->n_radio; i++) {
++			const struct wiphy_radio *radio = &wiphy->radio[i];
++
++			ret = wiphy_verify_iface_combinations(wiphy,
++							      radio->iface_combinations,
++							      radio->n_iface_combinations,
++							      false);
++			if (ret)
++				return ret;
++		}
++
++		combined_radio = true;
++	}
++
++	ret = wiphy_verify_iface_combinations(wiphy,
++					      wiphy->iface_combinations,
++					      wiphy->n_iface_combinations,
++					      combined_radio);
++
++	return ret;
++}
++
+ int wiphy_register(struct wiphy *wiphy)
+ {
+ 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index 4dac8185472100..a5eb92d93074e6 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -340,12 +340,6 @@ cfg80211_mlme_check_mlo_compat(const struct ieee80211_multi_link_elem *mle_a,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_a) !=
+-	    ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_b)) {
+-		NL_SET_ERR_MSG(extack, "link EML medium sync delay mismatch");
+-		return -EINVAL;
+-	}
+-
+ 	if (ieee80211_mle_get_eml_cap((const u8 *)mle_a) !=
+ 	    ieee80211_mle_get_eml_cap((const u8 *)mle_b)) {
+ 		NL_SET_ERR_MSG(extack, "link EML capabilities mismatch");
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index d7d099f7118ab5..9b1b9dc5a7eb2a 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -9776,6 +9776,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
+ 	request = kzalloc(size, GFP_KERNEL);
+ 	if (!request)
+ 		return ERR_PTR(-ENOMEM);
++	request->n_channels = n_channels;
+ 
+ 	if (n_ssids)
+ 		request->ssids = (void *)request +
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 1140b2a120caec..b57d5d2904eb46 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -675,6 +675,8 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ 		len = desc->len;
+ 
+ 		if (!skb) {
++			first_frag = true;
++
+ 			hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
+ 			tr = dev->needed_tailroom;
+ 			skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
+@@ -685,12 +687,8 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ 			skb_put(skb, len);
+ 
+ 			err = skb_store_bits(skb, 0, buffer, len);
+-			if (unlikely(err)) {
+-				kfree_skb(skb);
++			if (unlikely(err))
+ 				goto free_err;
+-			}
+-
+-			first_frag = true;
+ 		} else {
+ 			int nr_frags = skb_shinfo(skb)->nr_frags;
+ 			struct page *page;
+@@ -758,6 +756,9 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ 	return skb;
+ 
+ free_err:
++	if (first_frag && skb)
++		kfree_skb(skb);
++
+ 	if (err == -EOVERFLOW) {
+ 		/* Drop the packet */
+ 		xsk_set_destructor_arg(xs->skb);
+diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
+index acc1376b833c78..92f7fc41842531 100644
+--- a/rust/helpers/spinlock.c
++++ b/rust/helpers/spinlock.c
+@@ -7,10 +7,14 @@ void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
+ 				  struct lock_class_key *key)
+ {
+ #ifdef CONFIG_DEBUG_SPINLOCK
++# if defined(CONFIG_PREEMPT_RT)
++	__spin_lock_init(lock, name, key, false);
++# else /*!CONFIG_PREEMPT_RT */
+ 	__raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+-#else
++# endif /* CONFIG_PREEMPT_RT */
++#else /* !CONFIG_DEBUG_SPINLOCK */
+ 	spin_lock_init(lock);
+-#endif
++#endif /* CONFIG_DEBUG_SPINLOCK */
+ }
+ 
+ void rust_helper_spin_lock(spinlock_t *lock)
+diff --git a/rust/kernel/block/mq/request.rs b/rust/kernel/block/mq/request.rs
+index a0e22827f3f4ec..7943f43b957532 100644
+--- a/rust/kernel/block/mq/request.rs
++++ b/rust/kernel/block/mq/request.rs
+@@ -16,50 +16,55 @@
+     sync::atomic::{AtomicU64, Ordering},
+ };
+ 
+-/// A wrapper around a blk-mq `struct request`. This represents an IO request.
++/// A wrapper around a blk-mq [`struct request`]. This represents an IO request.
+ ///
+ /// # Implementation details
+ ///
+ /// There are four states for a request that the Rust bindings care about:
+ ///
+-/// A) Request is owned by block layer (refcount 0)
+-/// B) Request is owned by driver but with zero `ARef`s in existence
+-///    (refcount 1)
+-/// C) Request is owned by driver with exactly one `ARef` in existence
+-///    (refcount 2)
+-/// D) Request is owned by driver with more than one `ARef` in existence
+-///    (refcount > 2)
++/// 1. Request is owned by block layer (refcount 0).
++/// 2. Request is owned by driver but with zero [`ARef`]s in existence
++///    (refcount 1).
++/// 3. Request is owned by driver with exactly one [`ARef`] in existence
++///    (refcount 2).
++/// 4. Request is owned by driver with more than one [`ARef`] in existence
++///    (refcount > 2).
+ ///
+ ///
+-/// We need to track A and B to ensure we fail tag to request conversions for
++/// We need to track 1 and 2 to ensure we fail tag to request conversions for
+ /// requests that are not owned by the driver.
+ ///
+-/// We need to track C and D to ensure that it is safe to end the request and hand
++/// We need to track 3 and 4 to ensure that it is safe to end the request and hand
+ /// back ownership to the block layer.
+ ///
+ /// The states are tracked through the private `refcount` field of
+ /// `RequestDataWrapper`. This structure lives in the private data area of the C
+-/// `struct request`.
++/// [`struct request`].
+ ///
+ /// # Invariants
+ ///
+-/// * `self.0` is a valid `struct request` created by the C portion of the kernel.
++/// * `self.0` is a valid [`struct request`] created by the C portion of the
++///   kernel.
+ /// * The private data area associated with this request must be an initialized
+ ///   and valid `RequestDataWrapper<T>`.
+ /// * `self` is reference counted by atomic modification of
+-///   self.wrapper_ref().refcount().
++///   `self.wrapper_ref().refcount()`.
++///
++/// [`struct request`]: srctree/include/linux/blk-mq.h
+ ///
+ #[repr(transparent)]
+ pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>);
+ 
+ impl<T: Operations> Request<T> {
+-    /// Create an `ARef<Request>` from a `struct request` pointer.
++    /// Create an [`ARef<Request>`] from a [`struct request`] pointer.
+     ///
+     /// # Safety
+     ///
+     /// * The caller must own a refcount on `ptr` that is transferred to the
+-    ///   returned `ARef`.
+-    /// * The type invariants for `Request` must hold for the pointee of `ptr`.
++    ///   returned [`ARef`].
++    /// * The type invariants for [`Request`] must hold for the pointee of `ptr`.
++    ///
++    /// [`struct request`]: srctree/include/linux/blk-mq.h
+     pub(crate) unsafe fn aref_from_raw(ptr: *mut bindings::request) -> ARef<Self> {
+         // INVARIANT: By the safety requirements of this function, invariants are upheld.
+         // SAFETY: By the safety requirement of this function, we own a
+@@ -84,12 +89,14 @@ pub(crate) unsafe fn start_unchecked(this: &ARef<Self>) {
+     }
+ 
+     /// Try to take exclusive ownership of `this` by dropping the refcount to 0.
+-    /// This fails if `this` is not the only `ARef` pointing to the underlying
+-    /// `Request`.
++    /// This fails if `this` is not the only [`ARef`] pointing to the underlying
++    /// [`Request`].
+     ///
+-    /// If the operation is successful, `Ok` is returned with a pointer to the
+-    /// C `struct request`. If the operation fails, `this` is returned in the
+-    /// `Err` variant.
++    /// If the operation is successful, [`Ok`] is returned with a pointer to the
++    /// C [`struct request`]. If the operation fails, `this` is returned in the
++    /// [`Err`] variant.
++    ///
++    /// [`struct request`]: srctree/include/linux/blk-mq.h
+     fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
+         // We can race with `TagSet::tag_to_rq`
+         if let Err(_old) = this.wrapper_ref().refcount().compare_exchange(
+@@ -109,7 +116,7 @@ fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
+ 
+     /// Notify the block layer that the request has been completed without errors.
+     ///
+-    /// This function will return `Err` if `this` is not the only `ARef`
++    /// This function will return [`Err`] if `this` is not the only [`ARef`]
+     /// referencing the request.
+     pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> {
+         let request_ptr = Self::try_set_end(this)?;
+@@ -123,13 +130,13 @@ pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> {
+         Ok(())
+     }
+ 
+-    /// Return a pointer to the `RequestDataWrapper` stored in the private area
++    /// Return a pointer to the [`RequestDataWrapper`] stored in the private area
+     /// of the request structure.
+     ///
+     /// # Safety
+     ///
+     /// - `this` must point to a valid allocation of size at least size of
+-    ///   `Self` plus size of `RequestDataWrapper`.
++    ///   [`Self`] plus size of [`RequestDataWrapper`].
+     pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper> {
+         let request_ptr = this.cast::<bindings::request>();
+         // SAFETY: By safety requirements for this function, `this` is a
+@@ -141,7 +148,7 @@ pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper>
+         unsafe { NonNull::new_unchecked(wrapper_ptr) }
+     }
+ 
+-    /// Return a reference to the `RequestDataWrapper` stored in the private
++    /// Return a reference to the [`RequestDataWrapper`] stored in the private
+     /// area of the request structure.
+     pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper {
+         // SAFETY: By type invariant, `self.0` is a valid allocation. Further,
+@@ -152,13 +159,15 @@ pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper {
+     }
+ }
+ 
+-/// A wrapper around data stored in the private area of the C `struct request`.
++/// A wrapper around data stored in the private area of the C [`struct request`].
++///
++/// [`struct request`]: srctree/include/linux/blk-mq.h
+ pub(crate) struct RequestDataWrapper {
+     /// The Rust request refcount has the following states:
+     ///
+     /// - 0: The request is owned by C block layer.
+-    /// - 1: The request is owned by Rust abstractions but there are no ARef references to it.
+-    /// - 2+: There are `ARef` references to the request.
++    /// - 1: The request is owned by Rust abstractions but there are no [`ARef`] references to it.
++    /// - 2+: There are [`ARef`] references to the request.
+     refcount: AtomicU64,
+ }
+ 
+@@ -204,7 +213,7 @@ fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64
+ }
+ 
+ /// Store the result of `op(target.load)` in `target` if `target.load() !=
+-/// pred`, returning true if the target was updated.
++/// pred`, returning [`true`] if the target was updated.
+ fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {
+     target
+         .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
+diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
+index b5f4b3ce6b4820..032c9089e6862d 100644
+--- a/rust/kernel/lib.rs
++++ b/rust/kernel/lib.rs
+@@ -83,7 +83,7 @@ pub trait Module: Sized + Sync + Send {
+ 
+ /// Equivalent to `THIS_MODULE` in the C API.
+ ///
+-/// C header: [`include/linux/export.h`](srctree/include/linux/export.h)
++/// C header: [`include/linux/init.h`](srctree/include/linux/init.h)
+ pub struct ThisModule(*mut bindings::module);
+ 
+ // SAFETY: `THIS_MODULE` may be used from all threads within a module.
+diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
+index 25eb36fd1cdceb..d03e4aa1f4812b 100644
+--- a/rust/kernel/rbtree.rs
++++ b/rust/kernel/rbtree.rs
+@@ -884,7 +884,8 @@ fn get_neighbor_raw(&self, direction: Direction) -> Option<NonNull<bindings::rb_
+         NonNull::new(neighbor)
+     }
+ 
+-    /// SAFETY:
++    /// # Safety
++    ///
+     /// - `node` must be a valid pointer to a node in an [`RBTree`].
+     /// - The caller has immutable access to `node` for the duration of 'b.
+     unsafe fn to_key_value<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b V) {
+@@ -894,7 +895,8 @@ unsafe fn to_key_value<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b V) {
+         (k, unsafe { &*v })
+     }
+ 
+-    /// SAFETY:
++    /// # Safety
++    ///
+     /// - `node` must be a valid pointer to a node in an [`RBTree`].
+     /// - The caller has mutable access to `node` for the duration of 'b.
+     unsafe fn to_key_value_mut<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b mut V) {
+@@ -904,7 +906,8 @@ unsafe fn to_key_value_mut<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b
+         (k, unsafe { &mut *v })
+     }
+ 
+-    /// SAFETY:
++    /// # Safety
++    ///
+     /// - `node` must be a valid pointer to a node in an [`RBTree`].
+     /// - The caller has immutable access to the key for the duration of 'b.
+     unsafe fn to_key_value_raw<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, *mut V) {
+diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
+index a626b1145e5c4f..90e2202ba4d5a0 100644
+--- a/rust/macros/lib.rs
++++ b/rust/macros/lib.rs
+@@ -359,7 +359,7 @@ pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
+ /// macro_rules! pub_no_prefix {
+ ///     ($prefix:ident, $($newname:ident),+) => {
+ ///         kernel::macros::paste! {
+-///             $(pub(crate) const fn [<$newname:lower:span>]: u32 = [<$prefix $newname:span>];)+
++///             $(pub(crate) const fn [<$newname:lower:span>]() -> u32 { [<$prefix $newname:span>] })+
+ ///         }
+ ///     };
+ /// }
+diff --git a/samples/bpf/xdp_adjust_tail_kern.c b/samples/bpf/xdp_adjust_tail_kern.c
+index ffdd548627f0a4..da67bcad1c6381 100644
+--- a/samples/bpf/xdp_adjust_tail_kern.c
++++ b/samples/bpf/xdp_adjust_tail_kern.c
+@@ -57,6 +57,7 @@ static __always_inline void swap_mac(void *data, struct ethhdr *orig_eth)
+ 
+ static __always_inline __u16 csum_fold_helper(__u32 csum)
+ {
++	csum = (csum & 0xffff) + (csum >> 16);
+ 	return ~((csum & 0xffff) + (csum >> 16));
+ }
+ 
+diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
+index 48df719dac8c6d..8076ac410161a3 100644
+--- a/samples/kfifo/dma-example.c
++++ b/samples/kfifo/dma-example.c
+@@ -9,6 +9,7 @@
+ #include <linux/kfifo.h>
+ #include <linux/module.h>
+ #include <linux/scatterlist.h>
++#include <linux/dma-mapping.h>
+ 
+ /*
+  * This module shows how to handle fifo dma operations.
+diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
+index 4427572b24771d..b03d526e4c454a 100755
+--- a/scripts/checkpatch.pl
++++ b/scripts/checkpatch.pl
+@@ -3209,36 +3209,31 @@ sub process {
+ 
+ # Check Fixes: styles is correct
+ 		if (!$in_header_lines &&
+-		    $line =~ /^\s*fixes:?\s*(?:commit\s*)?[0-9a-f]{5,}\b/i) {
+-			my $orig_commit = "";
+-			my $id = "0123456789ab";
+-			my $title = "commit title";
+-			my $tag_case = 1;
+-			my $tag_space = 1;
+-			my $id_length = 1;
+-			my $id_case = 1;
++		    $line =~ /^\s*(fixes:?)\s*(?:commit\s*)?([0-9a-f]{5,40})(?:\s*($balanced_parens))?/i) {
++			my $tag = $1;
++			my $orig_commit = $2;
++			my $title;
+ 			my $title_has_quotes = 0;
+ 			$fixes_tag = 1;
+-
+-			if ($line =~ /(\s*fixes:?)\s+([0-9a-f]{5,})\s+($balanced_parens)/i) {
+-				my $tag = $1;
+-				$orig_commit = $2;
+-				$title = $3;
+-
+-				$tag_case = 0 if $tag eq "Fixes:";
+-				$tag_space = 0 if ($line =~ /^fixes:? [0-9a-f]{5,} ($balanced_parens)/i);
+-
+-				$id_length = 0 if ($orig_commit =~ /^[0-9a-f]{12}$/i);
+-				$id_case = 0 if ($orig_commit !~ /[A-F]/);
+-
++			if (defined $3) {
+ 				# Always strip leading/trailing parens then double quotes if existing
+-				$title = substr($title, 1, -1);
++				$title = substr($3, 1, -1);
+ 				if ($title =~ /^".*"$/) {
+ 					$title = substr($title, 1, -1);
+ 					$title_has_quotes = 1;
+ 				}
++			} else {
++				$title = "commit title"
+ 			}
+ 
++
++			my $tag_case = not ($tag eq "Fixes:");
++			my $tag_space = not ($line =~ /^fixes:? [0-9a-f]{5,40} ($balanced_parens)/i);
++
++			my $id_length = not ($orig_commit =~ /^[0-9a-f]{12}$/i);
++			my $id_case = not ($orig_commit !~ /[A-F]/);
++
++			my $id = "0123456789ab";
+ 			my ($cid, $ctitle) = git_commit_info($orig_commit, $id,
+ 							     $title);
+ 
+diff --git a/scripts/faddr2line b/scripts/faddr2line
+index fe0cc45f03be11..1fa6beef9f978e 100755
+--- a/scripts/faddr2line
++++ b/scripts/faddr2line
+@@ -252,7 +252,7 @@ __faddr2line() {
+ 				found=2
+ 				break
+ 			fi
+-		done < <(echo "${ELF_SYMS}" | sed 's/\[.*\]//' | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2 | ${GREP} -A1 --no-group-separator " ${sym_name}$")
++		done < <(echo "${ELF_SYMS}" | sed 's/\[.*\]//' | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2)
+ 
+ 		if [[ $found = 0 ]]; then
+ 			warn "can't find symbol: sym_name: $sym_name sym_sec: $sym_sec sym_addr: $sym_addr sym_elf_size: $sym_elf_size"
+diff --git a/scripts/kernel-doc b/scripts/kernel-doc
+index 2791f819520387..320544321ecba5 100755
+--- a/scripts/kernel-doc
++++ b/scripts/kernel-doc
+@@ -569,6 +569,8 @@ sub output_function_man(%) {
+     my %args = %{$_[0]};
+     my ($parameter, $section);
+     my $count;
++    my $func_macro = $args{'func_macro'};
++    my $paramcount = $#{$args{'parameterlist'}}; # -1 is empty
+ 
+     print ".TH \"$args{'function'}\" 9 \"$args{'function'}\" \"$man_date\" \"Kernel Hacker's Manual\" LINUX\n";
+ 
+@@ -600,7 +602,10 @@ sub output_function_man(%) {
+         $parenth = "";
+     }
+ 
+-    print ".SH ARGUMENTS\n";
++    $paramcount = $#{$args{'parameterlist'}}; # -1 is empty
++    if ($paramcount >= 0) {
++    	print ".SH ARGUMENTS\n";
++	}
+     foreach $parameter (@{$args{'parameterlist'}}) {
+         my $parameter_name = $parameter;
+         $parameter_name =~ s/\[.*//;
+@@ -822,10 +827,16 @@ sub output_function_rst(%) {
+     my $oldprefix = $lineprefix;
+ 
+     my $signature = "";
+-    if ($args{'functiontype'} ne "") {
+-        $signature = $args{'functiontype'} . " " . $args{'function'} . " (";
+-    } else {
+-        $signature = $args{'function'} . " (";
++    my $func_macro = $args{'func_macro'};
++    my $paramcount = $#{$args{'parameterlist'}}; # -1 is empty
++
++	if ($func_macro) {
++        $signature = $args{'function'};
++	} else {
++		if ($args{'functiontype'}) {
++        	$signature = $args{'functiontype'} . " ";
++		}
++		$signature .= $args{'function'} . " (";
+     }
+ 
+     my $count = 0;
+@@ -844,7 +855,9 @@ sub output_function_rst(%) {
+         }
+     }
+ 
+-    $signature .= ")";
++    if (!$func_macro) {
++    	$signature .= ")";
++    }
+ 
+     if ($sphinx_major < 3) {
+         if ($args{'typedef'}) {
+@@ -888,9 +901,11 @@ sub output_function_rst(%) {
+     # Put our descriptive text into a container (thus an HTML <div>) to help
+     # set the function prototypes apart.
+     #
+-    print ".. container:: kernelindent\n\n";
+     $lineprefix = "  ";
+-    print $lineprefix . "**Parameters**\n\n";
++	if ($paramcount >= 0) {
++    	print ".. container:: kernelindent\n\n";
++   		print $lineprefix . "**Parameters**\n\n";
++    }
+     foreach $parameter (@{$args{'parameterlist'}}) {
+         my $parameter_name = $parameter;
+         $parameter_name =~ s/\[.*//;
+@@ -1704,7 +1719,7 @@ sub check_return_section {
+ sub dump_function($$) {
+     my $prototype = shift;
+     my $file = shift;
+-    my $noret = 0;
++    my $func_macro = 0;
+ 
+     print_lineno($new_start_line);
+ 
+@@ -1769,7 +1784,7 @@ sub dump_function($$) {
+         # declaration_name and opening parenthesis (notice the \s+).
+         $return_type = $1;
+         $declaration_name = $2;
+-        $noret = 1;
++        $func_macro = 1;
+     } elsif ($prototype =~ m/^()($name)\s*$prototype_end/ ||
+         $prototype =~ m/^($type1)\s+($name)\s*$prototype_end/ ||
+         $prototype =~ m/^($type2+)\s*($name)\s*$prototype_end/)  {
+@@ -1796,7 +1811,7 @@ sub dump_function($$) {
+     # of warnings goes sufficiently down, the check is only performed in
+     # -Wreturn mode.
+     # TODO: always perform the check.
+-    if ($Wreturn && !$noret) {
++    if ($Wreturn && !$func_macro) {
+         check_return_section($file, $declaration_name, $return_type);
+     }
+ 
+@@ -1814,7 +1829,8 @@ sub dump_function($$) {
+                             'parametertypes' => \%parametertypes,
+                             'sectionlist' => \@sectionlist,
+                             'sections' => \%sections,
+-                            'purpose' => $declaration_purpose
++                            'purpose' => $declaration_purpose,
++							'func_macro' => $func_macro
+                            });
+     } else {
+         output_declaration($declaration_name,
+@@ -1827,7 +1843,8 @@ sub dump_function($$) {
+                             'parametertypes' => \%parametertypes,
+                             'sectionlist' => \@sectionlist,
+                             'sections' => \%sections,
+-                            'purpose' => $declaration_purpose
++                            'purpose' => $declaration_purpose,
++							'func_macro' => $func_macro
+                            });
+     }
+ }
+@@ -2322,7 +2339,6 @@ sub process_inline($$) {
+ 
+ sub process_file($) {
+     my $file;
+-    my $initial_section_counter = $section_counter;
+     my ($orig_file) = @_;
+ 
+     $file = map_filename($orig_file);
+@@ -2360,8 +2376,7 @@ sub process_file($) {
+     }
+ 
+     # Make sure we got something interesting.
+-    if ($initial_section_counter == $section_counter && $
+-        output_mode ne "none") {
++    if (!$section_counter && $output_mode ne "none") {
+         if ($output_selection == OUTPUT_INCLUDE) {
+             emit_warning("${file}:1", "'$_' not found\n")
+                 for keys %function_table;
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index c4cc11aa558f5f..634e40748287c0 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -809,10 +809,7 @@ static int do_eisa_entry(const char *filename, void *symval,
+ 		char *alias)
+ {
+ 	DEF_FIELD_ADDR(symval, eisa_device_id, sig);
+-	if (sig[0])
+-		sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", *sig);
+-	else
+-		strcat(alias, "*");
++	sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", *sig);
+ 	return 1;
+ }
+ 
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index 441b0bb66e0d0c..fb686fd3266f01 100755
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -96,16 +96,18 @@ install_linux_image_dbg () {
+ 
+ 	# Parse modules.order directly because 'make modules_install' may sign,
+ 	# compress modules, and then run unneeded depmod.
+-	while read -r mod; do
+-		mod="${mod%.o}.ko"
+-		dbg="${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/kernel/${mod}"
+-		buildid=$("${READELF}" -n "${mod}" | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p')
+-		link="${pdir}/usr/lib/debug/.build-id/${buildid}.debug"
+-
+-		mkdir -p "${dbg%/*}" "${link%/*}"
+-		"${OBJCOPY}" --only-keep-debug "${mod}" "${dbg}"
+-		ln -sf --relative "${dbg}" "${link}"
+-	done < modules.order
++	if is_enabled CONFIG_MODULES; then
++		while read -r mod; do
++			mod="${mod%.o}.ko"
++			dbg="${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/kernel/${mod}"
++			buildid=$("${READELF}" -n "${mod}" | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p')
++			link="${pdir}/usr/lib/debug/.build-id/${buildid}.debug"
++
++			mkdir -p "${dbg%/*}" "${link%/*}"
++			"${OBJCOPY}" --only-keep-debug "${mod}" "${dbg}"
++			ln -sf --relative "${dbg}" "${link}"
++		done < modules.order
++	fi
+ 
+ 	# Build debug package
+ 	# Different tools want the image in different locations
+diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
+index 9934df16c8431d..bf7df60868308d 100644
+--- a/security/apparmor/capability.c
++++ b/security/apparmor/capability.c
+@@ -96,6 +96,8 @@ static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile
+ 		return error;
+ 	} else {
+ 		aa_put_profile(ent->profile);
++		if (profile != ent->profile)
++			cap_clear(ent->caps);
+ 		ent->profile = aa_get_profile(profile);
+ 		cap_raise(ent->caps, cap);
+ 	}
+diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
+index c64733d6c98fbb..f070902da8fcce 100644
+--- a/security/apparmor/policy_unpack_test.c
++++ b/security/apparmor/policy_unpack_test.c
+@@ -281,6 +281,8 @@ static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test)
+ 			   ((uintptr_t)puf->e->start <= (uintptr_t)string)
+ 			   && ((uintptr_t)string <= (uintptr_t)puf->e->end));
+ 	KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
++
++	kfree(string);
+ }
+ 
+ static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test)
+@@ -296,6 +298,8 @@ static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test)
+ 			   ((uintptr_t)puf->e->start <= (uintptr_t)string)
+ 			   && ((uintptr_t)string <= (uintptr_t)puf->e->end));
+ 	KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
++
++	kfree(string);
+ }
+ 
+ static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test)
+@@ -313,6 +317,8 @@ static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test)
+ 	KUNIT_EXPECT_EQ(test, size, 0);
+ 	KUNIT_EXPECT_NULL(test, string);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
++
++	kfree(string);
+ }
+ 
+ static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test)
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index b465fb6e1f5f0d..0790b5fd917e12 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3793,9 +3793,11 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
+ 		return VM_FAULT_SIGBUS;
+ 	if (substream->ops->page)
+ 		page = substream->ops->page(substream, offset);
+-	else if (!snd_pcm_get_dma_buf(substream))
++	else if (!snd_pcm_get_dma_buf(substream)) {
++		if (WARN_ON_ONCE(!runtime->dma_area))
++			return VM_FAULT_SIGBUS;
+ 		page = virt_to_page(runtime->dma_area + offset);
+-	else
++	} else
+ 		page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
+ 	if (!page)
+ 		return VM_FAULT_SIGBUS;
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index 03306be5fa0245..348ce1b7725ea2 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -724,8 +724,9 @@ static int resize_runtime_buffer(struct snd_rawmidi_substream *substream,
+ 		newbuf = kvzalloc(params->buffer_size, GFP_KERNEL);
+ 		if (!newbuf)
+ 			return -ENOMEM;
+-		guard(spinlock_irq)(&substream->lock);
++		spin_lock_irq(&substream->lock);
+ 		if (runtime->buffer_ref) {
++			spin_unlock_irq(&substream->lock);
+ 			kvfree(newbuf);
+ 			return -EBUSY;
+ 		}
+@@ -733,6 +734,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_substream *substream,
+ 		runtime->buffer = newbuf;
+ 		runtime->buffer_size = params->buffer_size;
+ 		__reset_runtime_ptrs(runtime, is_input);
++		spin_unlock_irq(&substream->lock);
+ 		kvfree(oldbuf);
+ 	}
+ 	runtime->avail_min = params->avail_min;
+diff --git a/sound/core/sound_kunit.c b/sound/core/sound_kunit.c
+index bfed1a25fc8f74..84e337ecbddd0a 100644
+--- a/sound/core/sound_kunit.c
++++ b/sound/core/sound_kunit.c
+@@ -172,6 +172,7 @@ static void test_format_fill_silence(struct kunit *test)
+ 	u32 i, j;
+ 
+ 	buffer = kunit_kzalloc(test, SILENCE_BUFFER_SIZE, GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(buf_samples); i++) {
+ 		for (j = 0; j < ARRAY_SIZE(valid_fmt); j++)
+@@ -208,8 +209,12 @@ static void test_playback_avail(struct kunit *test)
+ 	struct snd_pcm_runtime *r = kunit_kzalloc(test, sizeof(*r), GFP_KERNEL);
+ 	u32 i;
+ 
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, r);
++
+ 	r->status = kunit_kzalloc(test, sizeof(*r->status), GFP_KERNEL);
+ 	r->control = kunit_kzalloc(test, sizeof(*r->control), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, r->status);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, r->control);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(p_avail_data); i++) {
+ 		r->buffer_size = p_avail_data[i].buffer_size;
+@@ -232,8 +237,12 @@ static void test_capture_avail(struct kunit *test)
+ 	struct snd_pcm_runtime *r = kunit_kzalloc(test, sizeof(*r), GFP_KERNEL);
+ 	u32 i;
+ 
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, r);
++
+ 	r->status = kunit_kzalloc(test, sizeof(*r->status), GFP_KERNEL);
+ 	r->control = kunit_kzalloc(test, sizeof(*r->control), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, r->status);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, r->control);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(c_avail_data); i++) {
+ 		r->buffer_size = c_avail_data[i].buffer_size;
+@@ -247,6 +256,7 @@ static void test_capture_avail(struct kunit *test)
+ static void test_card_set_id(struct kunit *test)
+ {
+ 	struct snd_card *card = kunit_kzalloc(test, sizeof(*card), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, card);
+ 
+ 	snd_card_set_id(card, VALID_NAME);
+ 	KUNIT_EXPECT_STREQ(test, card->id, VALID_NAME);
+@@ -280,6 +290,7 @@ static void test_pcm_format_name(struct kunit *test)
+ static void test_card_add_component(struct kunit *test)
+ {
+ 	struct snd_card *card = kunit_kzalloc(test, sizeof(*card), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, card);
+ 
+ 	snd_component_add(card, TEST_FIRST_COMPONENT);
+ 	KUNIT_ASSERT_STREQ(test, card->components, TEST_FIRST_COMPONENT);
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index 7d59a0a9b037ad..8d37f237f83b2e 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -788,7 +788,10 @@ static void fill_fb_info(struct snd_ump_endpoint *ump,
+ 	info->ui_hint = buf->fb_info.ui_hint;
+ 	info->first_group = buf->fb_info.first_group;
+ 	info->num_groups = buf->fb_info.num_groups;
+-	info->flags = buf->fb_info.midi_10;
++	if (buf->fb_info.midi_10 < 2)
++		info->flags = buf->fb_info.midi_10;
++	else
++		info->flags = SNDRV_UMP_BLOCK_IS_MIDI1 | SNDRV_UMP_BLOCK_IS_LOWSPEED;
+ 	info->active = buf->fb_info.active;
+ 	info->midi_ci_version = buf->fb_info.midi_ci_version;
+ 	info->sysex8_streams = buf->fb_info.sysex8_streams;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 24b4fe99304a40..18e6779a83be2f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -473,6 +473,8 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 		break;
+ 	case 0x10ec0234:
+ 	case 0x10ec0274:
++		alc_write_coef_idx(codec, 0x6e, 0x0c25);
++		fallthrough;
+ 	case 0x10ec0294:
+ 	case 0x10ec0700:
+ 	case 0x10ec0701:
+@@ -3613,25 +3615,22 @@ static void alc256_init(struct hda_codec *codec)
+ 
+ 	hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ 
+-	if (hp_pin_sense)
++	if (hp_pin_sense) {
+ 		msleep(2);
++		alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+ 
+-	alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+-
+-	snd_hda_codec_write(codec, hp_pin, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-
+-	if (hp_pin_sense || spec->ultra_low_power)
+-		msleep(85);
+-
+-	snd_hda_codec_write(codec, hp_pin, 0,
++		snd_hda_codec_write(codec, hp_pin, 0,
+ 			    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ 
+-	if (hp_pin_sense || spec->ultra_low_power)
+-		msleep(100);
++		msleep(75);
++
++		snd_hda_codec_write(codec, hp_pin, 0,
++			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ 
++		msleep(75);
++		alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
++	}
+ 	alc_update_coef_idx(codec, 0x46, 3 << 12, 0);
+-	alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+ 	alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */
+ 	alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15);
+ 	/*
+@@ -3655,29 +3654,28 @@ static void alc256_shutup(struct hda_codec *codec)
+ 	alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+ 	hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ 
+-	if (hp_pin_sense)
++	if (hp_pin_sense) {
+ 		msleep(2);
+ 
+-	snd_hda_codec_write(codec, hp_pin, 0,
++		snd_hda_codec_write(codec, hp_pin, 0,
+ 			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ 
+-	if (hp_pin_sense || spec->ultra_low_power)
+-		msleep(85);
++		msleep(75);
+ 
+ 	/* 3k pull low control for Headset jack. */
+ 	/* NOTE: call this before clearing the pin, otherwise codec stalls */
+ 	/* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
+ 	 * when booting with headset plugged. So skip setting it for the codec alc257
+ 	 */
+-	if (spec->en_3kpull_low)
+-		alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
++		if (spec->en_3kpull_low)
++			alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+ 
+-	if (!spec->no_shutup_pins)
+-		snd_hda_codec_write(codec, hp_pin, 0,
++		if (!spec->no_shutup_pins)
++			snd_hda_codec_write(codec, hp_pin, 0,
+ 				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ 
+-	if (hp_pin_sense || spec->ultra_low_power)
+-		msleep(100);
++		msleep(75);
++	}
+ 
+ 	alc_auto_setup_eapd(codec, false);
+ 	alc_shutup_pins(codec);
+@@ -3772,33 +3770,28 @@ static void alc225_init(struct hda_codec *codec)
+ 	hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ 	hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+ 
+-	if (hp1_pin_sense || hp2_pin_sense)
++	if (hp1_pin_sense || hp2_pin_sense) {
+ 		msleep(2);
++		alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+ 
+-	alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+-
+-	if (hp1_pin_sense || spec->ultra_low_power)
+-		snd_hda_codec_write(codec, hp_pin, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-	if (hp2_pin_sense)
+-		snd_hda_codec_write(codec, 0x16, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-
+-	if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power)
+-		msleep(85);
+-
+-	if (hp1_pin_sense || spec->ultra_low_power)
+-		snd_hda_codec_write(codec, hp_pin, 0,
+-			    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+-	if (hp2_pin_sense)
+-		snd_hda_codec_write(codec, 0x16, 0,
+-			    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++		if (hp1_pin_sense)
++			snd_hda_codec_write(codec, hp_pin, 0,
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++		if (hp2_pin_sense)
++			snd_hda_codec_write(codec, 0x16, 0,
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++		msleep(75);
+ 
+-	if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power)
+-		msleep(100);
++		if (hp1_pin_sense)
++			snd_hda_codec_write(codec, hp_pin, 0,
++				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++		if (hp2_pin_sense)
++			snd_hda_codec_write(codec, 0x16, 0,
++				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ 
+-	alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+-	alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
++		msleep(75);
++		alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
++	}
+ }
+ 
+ static void alc225_shutup(struct hda_codec *codec)
+@@ -3810,36 +3803,35 @@ static void alc225_shutup(struct hda_codec *codec)
+ 	if (!hp_pin)
+ 		hp_pin = 0x21;
+ 
+-	alc_disable_headset_jack_key(codec);
+-	/* 3k pull low control for Headset jack. */
+-	alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
+-
+ 	hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ 	hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+ 
+-	if (hp1_pin_sense || hp2_pin_sense)
++	if (hp1_pin_sense || hp2_pin_sense) {
++		alc_disable_headset_jack_key(codec);
++		/* 3k pull low control for Headset jack. */
++		alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
+ 		msleep(2);
+ 
+-	if (hp1_pin_sense || spec->ultra_low_power)
+-		snd_hda_codec_write(codec, hp_pin, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-	if (hp2_pin_sense)
+-		snd_hda_codec_write(codec, 0x16, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-
+-	if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power)
+-		msleep(85);
++		if (hp1_pin_sense)
++			snd_hda_codec_write(codec, hp_pin, 0,
++				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++		if (hp2_pin_sense)
++			snd_hda_codec_write(codec, 0x16, 0,
++				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ 
+-	if (hp1_pin_sense || spec->ultra_low_power)
+-		snd_hda_codec_write(codec, hp_pin, 0,
+-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+-	if (hp2_pin_sense)
+-		snd_hda_codec_write(codec, 0x16, 0,
+-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++		msleep(75);
+ 
+-	if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power)
+-		msleep(100);
++		if (hp1_pin_sense)
++			snd_hda_codec_write(codec, hp_pin, 0,
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++		if (hp2_pin_sense)
++			snd_hda_codec_write(codec, 0x16, 0,
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ 
++		msleep(75);
++		alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
++		alc_enable_headset_jack_key(codec);
++	}
+ 	alc_auto_setup_eapd(codec, false);
+ 	alc_shutup_pins(codec);
+ 	if (spec->ultra_low_power) {
+@@ -3850,9 +3842,6 @@ static void alc225_shutup(struct hda_codec *codec)
+ 		alc_update_coef_idx(codec, 0x4a, 3<<4, 2<<4);
+ 		msleep(30);
+ 	}
+-
+-	alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+-	alc_enable_headset_jack_key(codec);
+ }
+ 
+ static void alc_default_init(struct hda_codec *codec)
+@@ -7559,6 +7548,7 @@ enum {
+ 	ALC269_FIXUP_THINKPAD_ACPI,
+ 	ALC269_FIXUP_DMIC_THINKPAD_ACPI,
+ 	ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13,
++	ALC269VC_FIXUP_INFINIX_Y4_MAX,
+ 	ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO,
+ 	ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ 	ALC255_FIXUP_ASUS_MIC_NO_PRESENCE,
+@@ -7786,6 +7776,7 @@ enum {
+ 	ALC287_FIXUP_LENOVO_SSID_17AA3820,
+ 	ALC245_FIXUP_CLEVO_NOISY_MIC,
+ 	ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE,
++	ALC233_FIXUP_MEDION_MTL_SPK,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -8015,6 +8006,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+ 	},
++	[ALC269VC_FIXUP_INFINIX_Y4_MAX] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1b, 0x90170150 }, /* use as internal speaker */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
++	},
+ 	[ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -10160,6 +10160,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+ 	},
++	[ALC233_FIXUP_MEDION_MTL_SPK] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1b, 0x90170110 },
++			{ }
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -10585,6 +10592,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -11025,7 +11033,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13),
+ 	SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
++	SND_PCI_QUIRK(0x2782, 0x1701, "Infinix Y4 Max", ALC269VC_FIXUP_INFINIX_Y4_MAX),
++	SND_PCI_QUIRK(0x2782, 0x1705, "MEDION E15433", ALC269VC_FIXUP_INFINIX_Y4_MAX),
+ 	SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
++	SND_PCI_QUIRK(0x2782, 0x4900, "MEDION E15443", ALC233_FIXUP_MEDION_MTL_SPK),
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+diff --git a/sound/soc/amd/acp/acp-sdw-sof-mach.c b/sound/soc/amd/acp/acp-sdw-sof-mach.c
+index 306854fb08e3d7..3be401c7227040 100644
+--- a/sound/soc/amd/acp/acp-sdw-sof-mach.c
++++ b/sound/soc/amd/acp/acp-sdw-sof-mach.c
+@@ -154,7 +154,7 @@ static int create_sdw_dailink(struct snd_soc_card *card,
+ 		int num_cpus = hweight32(sof_dai->link_mask[stream]);
+ 		int num_codecs = sof_dai->num_devs[stream];
+ 		int playback, capture;
+-		int i = 0, j = 0;
++		int j = 0;
+ 		char *name;
+ 
+ 		if (!sof_dai->num_devs[stream])
+@@ -213,14 +213,14 @@ static int create_sdw_dailink(struct snd_soc_card *card,
+ 
+ 			int link_num = ffs(sof_end->link_mask) - 1;
+ 
+-			cpus[i].dai_name = devm_kasprintf(dev, GFP_KERNEL,
+-							  "SDW%d Pin%d",
+-							  link_num, cpu_pin_id);
+-			dev_dbg(dev, "cpu[%d].dai_name:%s\n", i, cpus[i].dai_name);
+-			if (!cpus[i].dai_name)
++			cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
++							"SDW%d Pin%d",
++							link_num, cpu_pin_id);
++			dev_dbg(dev, "cpu->dai_name:%s\n", cpus->dai_name);
++			if (!cpus->dai_name)
+ 				return -ENOMEM;
+ 
+-			codec_maps[j].cpu = i;
++			codec_maps[j].cpu = 0;
+ 			codec_maps[j].codec = j;
+ 
+ 			codecs[j].name = sof_end->codec_name;
+@@ -362,7 +362,7 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
+ 	dai_links = devm_kcalloc(dev, num_links, sizeof(*dai_links), GFP_KERNEL);
+ 	if (!dai_links) {
+ 		ret = -ENOMEM;
+-	goto err_end;
++		goto err_end;
+ 	}
+ 
+ 	card->codec_conf = codec_conf;
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 2436e8deb2be48..5153a68d8c0795 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -241,6 +241,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21M5"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21ME"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -537,8 +544,14 @@ static int acp6x_probe(struct platform_device *pdev)
+ 	struct acp6x_pdm *machine = NULL;
+ 	struct snd_soc_card *card;
+ 	struct acpi_device *adev;
++	acpi_handle handle;
++	acpi_integer dmic_status;
+ 	int ret;
++	bool is_dmic_enable, wov_en;
+ 
++	/* IF WOV entry not found, enable dmic based on AcpDmicConnected entry*/
++	is_dmic_enable = false;
++	wov_en = true;
+ 	/* check the parent device's firmware node has _DSD or not */
+ 	adev = ACPI_COMPANION(pdev->dev.parent);
+ 	if (adev) {
+@@ -546,9 +559,19 @@ static int acp6x_probe(struct platform_device *pdev)
+ 
+ 		if (!acpi_dev_get_property(adev, "AcpDmicConnected", ACPI_TYPE_INTEGER, &obj) &&
+ 		    obj->integer.value == 1)
+-			platform_set_drvdata(pdev, &acp6x_card);
++			is_dmic_enable = true;
+ 	}
+ 
++	handle = ACPI_HANDLE(pdev->dev.parent);
++	ret = acpi_evaluate_integer(handle, "_WOV", NULL, &dmic_status);
++	if (!ACPI_FAILURE(ret))
++		wov_en = dmic_status;
++
++	if (is_dmic_enable && wov_en)
++		platform_set_drvdata(pdev, &acp6x_card);
++	else
++		return 0;
++
+ 	/* check for any DMI overrides */
+ 	dmi_id = dmi_first_match(yc_acp_quirk_table);
+ 	if (dmi_id)
+diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
+index f3ef6fb5530471..486db60bf2dd14 100644
+--- a/sound/soc/codecs/da7213.c
++++ b/sound/soc/codecs/da7213.c
+@@ -2136,6 +2136,7 @@ static const struct regmap_config da7213_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+ 
++	.max_register = DA7213_TONE_GEN_OFF_PER,
+ 	.reg_defaults = da7213_reg_defaults,
+ 	.num_reg_defaults = ARRAY_SIZE(da7213_reg_defaults),
+ 	.volatile_reg = da7213_volatile_register,
+diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
+index 311ea7918b3124..e2da3e317b5a3e 100644
+--- a/sound/soc/codecs/da7219.c
++++ b/sound/soc/codecs/da7219.c
+@@ -1167,17 +1167,20 @@ static int da7219_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ 	struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
+ 	int ret = 0;
+ 
+-	if ((da7219->clk_src == clk_id) && (da7219->mclk_rate == freq))
++	mutex_lock(&da7219->pll_lock);
++
++	if ((da7219->clk_src == clk_id) && (da7219->mclk_rate == freq)) {
++		mutex_unlock(&da7219->pll_lock);
+ 		return 0;
++	}
+ 
+ 	if ((freq < 2000000) || (freq > 54000000)) {
++		mutex_unlock(&da7219->pll_lock);
+ 		dev_err(codec_dai->dev, "Unsupported MCLK value %d\n",
+ 			freq);
+ 		return -EINVAL;
+ 	}
+ 
+-	mutex_lock(&da7219->pll_lock);
+-
+ 	switch (clk_id) {
+ 	case DA7219_CLKSRC_MCLK_SQR:
+ 		snd_soc_component_update_bits(component, DA7219_PLL_CTRL,
+diff --git a/sound/soc/codecs/rt722-sdca.c b/sound/soc/codecs/rt722-sdca.c
+index e5bd9ef812de13..f9f7512ca36087 100644
+--- a/sound/soc/codecs/rt722-sdca.c
++++ b/sound/soc/codecs/rt722-sdca.c
+@@ -607,12 +607,8 @@ static int rt722_sdca_dmic_set_gain_get(struct snd_kcontrol *kcontrol,
+ 
+ 		if (!adc_vol_flag) /* boost gain */
+ 			ctl = regvalue / boost_step;
+-		else { /* ADC gain */
+-			if (adc_vol_flag)
+-				ctl = p->max - (((vol_max - regvalue) & 0xffff) / interval_offset);
+-			else
+-				ctl = p->max - (((0 - regvalue) & 0xffff) / interval_offset);
+-		}
++		else /* ADC gain */
++			ctl = p->max - (((vol_max - regvalue) & 0xffff) / interval_offset);
+ 
+ 		ucontrol->value.integer.value[i] = ctl;
+ 	}
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index f6c3aeff0d8eaf..a0c2ce84c32b1d 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -1033,14 +1033,15 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/*
+-	 * Properties "hp-det-gpio" and "mic-det-gpio" are optional, and
++	 * Properties "hp-det-gpios" and "mic-det-gpios" are optional, and
+ 	 * simple_util_init_jack() uses these properties for creating
+ 	 * Headphone Jack and Microphone Jack.
+ 	 *
+ 	 * The notifier is initialized in snd_soc_card_jack_new(), then
+ 	 * snd_soc_jack_notifier_register can be called.
+ 	 */
+-	if (of_property_read_bool(np, "hp-det-gpio")) {
++	if (of_property_read_bool(np, "hp-det-gpios") ||
++	    of_property_read_bool(np, "hp-det-gpio") /* deprecated */) {
+ 		ret = simple_util_init_jack(&priv->card, &priv->hp_jack,
+ 					    1, NULL, "Headphone Jack");
+ 		if (ret)
+@@ -1049,7 +1050,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 		snd_soc_jack_notifier_register(&priv->hp_jack.jack, &hp_jack_nb);
+ 	}
+ 
+-	if (of_property_read_bool(np, "mic-det-gpio")) {
++	if (of_property_read_bool(np, "mic-det-gpios") ||
++	    of_property_read_bool(np, "mic-det-gpio") /* deprecated */) {
+ 		ret = simple_util_init_jack(&priv->card, &priv->mic_jack,
+ 					    0, NULL, "Mic Jack");
+ 		if (ret)
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index 0c71a73476dfa6..67c2d4cb0dea21 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -1061,7 +1061,7 @@ static irqreturn_t micfil_isr(int irq, void *devid)
+ 			regmap_write_bits(micfil->regmap,
+ 					  REG_MICFIL_STAT,
+ 					  MICFIL_STAT_CHXF(i),
+-					  1);
++					  MICFIL_STAT_CHXF(i));
+ 	}
+ 
+ 	for (i = 0; i < MICFIL_FIFO_NUM; i++) {
+@@ -1096,7 +1096,7 @@ static irqreturn_t micfil_err_isr(int irq, void *devid)
+ 	if (stat_reg & MICFIL_STAT_LOWFREQF) {
+ 		dev_dbg(&pdev->dev, "isr: ipg_clk_app is too low\n");
+ 		regmap_write_bits(micfil->regmap, REG_MICFIL_STAT,
+-				  MICFIL_STAT_LOWFREQF, 1);
++				  MICFIL_STAT_LOWFREQF, MICFIL_STAT_LOWFREQF);
+ 	}
+ 
+ 	return IRQ_HANDLED;
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index 6fbcf33fd0dea6..8e7b75cf64db42 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -275,6 +275,9 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 		/* Add AUDMIX Backend */
+ 		be_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ 					 "audmix-%d", i);
++		if (!be_name)
++			return -ENOMEM;
++
+ 		priv->dai[num_dai + i].cpus	= &dlc[1];
+ 		priv->dai[num_dai + i].codecs	= &snd_soc_dummy_dlc;
+ 
+diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+index 08ae962afeb929..4eed90d13a5326 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
++++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+@@ -1279,10 +1279,12 @@ static int mt8188_mt6359_soc_card_probe(struct mtk_soc_card_data *soc_card_data,
+ 
+ 	for_each_card_prelinks(card, i, dai_link) {
+ 		if (strcmp(dai_link->name, "DPTX_BE") == 0) {
+-			if (strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai"))
++			if (dai_link->num_codecs &&
++			    strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai"))
+ 				dai_link->init = mt8188_dptx_codec_init;
+ 		} else if (strcmp(dai_link->name, "ETDM3_OUT_BE") == 0) {
+-			if (strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai"))
++			if (dai_link->num_codecs &&
++			    strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai"))
+ 				dai_link->init = mt8188_hdmi_codec_init;
+ 		} else if (strcmp(dai_link->name, "DL_SRC_BE") == 0 ||
+ 			   strcmp(dai_link->name, "UL_SRC_BE") == 0) {
+@@ -1294,6 +1296,9 @@ static int mt8188_mt6359_soc_card_probe(struct mtk_soc_card_data *soc_card_data,
+ 			   strcmp(dai_link->name, "ETDM2_OUT_BE") == 0 ||
+ 			   strcmp(dai_link->name, "ETDM1_IN_BE") == 0 ||
+ 			   strcmp(dai_link->name, "ETDM2_IN_BE") == 0) {
++			if (!dai_link->num_codecs)
++				continue;
++
+ 			if (!strcmp(dai_link->codecs->dai_name, MAX98390_CODEC_DAI)) {
+ 				/*
+ 				 * The TDM protocol settings with fixed 4 slots are defined in
+diff --git a/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c b/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
+index db00704e206d6d..943f8116840373 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
++++ b/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
+@@ -1099,7 +1099,7 @@ static int mt8192_mt6359_legacy_probe(struct mtk_soc_card_data *soc_card_data)
+ 			dai_link->ignore = 0;
+ 		}
+ 
+-		if (dai_link->num_codecs && dai_link->codecs[0].dai_name &&
++		if (dai_link->num_codecs &&
+ 		    strcmp(dai_link->codecs[0].dai_name, RT1015_CODEC_DAI) == 0)
+ 			dai_link->ops = &mt8192_rt1015_i2s_ops;
+ 	}
+@@ -1127,7 +1127,7 @@ static int mt8192_mt6359_soc_card_probe(struct mtk_soc_card_data *soc_card_data,
+ 		int i;
+ 
+ 		for_each_card_prelinks(card, i, dai_link)
+-			if (dai_link->num_codecs && dai_link->codecs[0].dai_name &&
++			if (dai_link->num_codecs &&
+ 			    strcmp(dai_link->codecs[0].dai_name, RT1015_CODEC_DAI) == 0)
+ 				dai_link->ops = &mt8192_rt1015_i2s_ops;
+ 	}
+diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359.c b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+index 2832ef78eaed72..8ebf6c7502aa3d 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-mt6359.c
++++ b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+@@ -1380,10 +1380,12 @@ static int mt8195_mt6359_soc_card_probe(struct mtk_soc_card_data *soc_card_data,
+ 
+ 	for_each_card_prelinks(card, i, dai_link) {
+ 		if (strcmp(dai_link->name, "DPTX_BE") == 0) {
+-			if (strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai"))
++			if (dai_link->num_codecs &&
++			    strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai"))
+ 				dai_link->init = mt8195_dptx_codec_init;
+ 		} else if (strcmp(dai_link->name, "ETDM3_OUT_BE") == 0) {
+-			if (strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai"))
++			if (dai_link->num_codecs &&
++			    strcmp(dai_link->codecs->dai_name, "snd-soc-dummy-dai"))
+ 				dai_link->init = mt8195_hdmi_codec_init;
+ 		} else if (strcmp(dai_link->name, "DL_SRC_BE") == 0 ||
+ 			   strcmp(dai_link->name, "UL_SRC1_BE") == 0 ||
+@@ -1396,6 +1398,9 @@ static int mt8195_mt6359_soc_card_probe(struct mtk_soc_card_data *soc_card_data,
+ 			   strcmp(dai_link->name, "ETDM2_OUT_BE") == 0 ||
+ 			   strcmp(dai_link->name, "ETDM1_IN_BE") == 0 ||
+ 			   strcmp(dai_link->name, "ETDM2_IN_BE") == 0) {
++			if (!dai_link->num_codecs)
++				continue;
++
+ 			if (!strcmp(dai_link->codecs->dai_name, MAX98390_CODEC_DAI)) {
+ 				if (!(codec_init & MAX98390_CODEC_INIT)) {
+ 					dai_link->init = mt8195_max98390_init;
+diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
+index 33e962178c9363..d562a30b087f01 100644
+--- a/sound/usb/6fire/chip.c
++++ b/sound/usb/6fire/chip.c
+@@ -61,8 +61,10 @@ static void usb6fire_chip_abort(struct sfire_chip *chip)
+ 	}
+ }
+ 
+-static void usb6fire_chip_destroy(struct sfire_chip *chip)
++static void usb6fire_card_free(struct snd_card *card)
+ {
++	struct sfire_chip *chip = card->private_data;
++
+ 	if (chip) {
+ 		if (chip->pcm)
+ 			usb6fire_pcm_destroy(chip);
+@@ -72,8 +74,6 @@ static void usb6fire_chip_destroy(struct sfire_chip *chip)
+ 			usb6fire_comm_destroy(chip);
+ 		if (chip->control)
+ 			usb6fire_control_destroy(chip);
+-		if (chip->card)
+-			snd_card_free(chip->card);
+ 	}
+ }
+ 
+@@ -136,6 +136,7 @@ static int usb6fire_chip_probe(struct usb_interface *intf,
+ 	chip->regidx = regidx;
+ 	chip->intf_count = 1;
+ 	chip->card = card;
++	card->private_free = usb6fire_card_free;
+ 
+ 	ret = usb6fire_comm_init(chip);
+ 	if (ret < 0)
+@@ -162,7 +163,7 @@ static int usb6fire_chip_probe(struct usb_interface *intf,
+ 	return 0;
+ 
+ destroy_chip:
+-	usb6fire_chip_destroy(chip);
++	snd_card_free(card);
+ 	return ret;
+ }
+ 
+@@ -181,7 +182,6 @@ static void usb6fire_chip_disconnect(struct usb_interface *intf)
+ 
+ 			chip->shutdown = true;
+ 			usb6fire_chip_abort(chip);
+-			usb6fire_chip_destroy(chip);
+ 		}
+ 	}
+ }
+diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
+index 772c0ecb707738..05f964347ed6c2 100644
+--- a/sound/usb/caiaq/audio.c
++++ b/sound/usb/caiaq/audio.c
+@@ -858,14 +858,20 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
+ 	return 0;
+ }
+ 
+-void snd_usb_caiaq_audio_free(struct snd_usb_caiaqdev *cdev)
++void snd_usb_caiaq_audio_disconnect(struct snd_usb_caiaqdev *cdev)
+ {
+ 	struct device *dev = caiaqdev_to_dev(cdev);
+ 
+ 	dev_dbg(dev, "%s(%p)\n", __func__, cdev);
+ 	stream_stop(cdev);
++}
++
++void snd_usb_caiaq_audio_free(struct snd_usb_caiaqdev *cdev)
++{
++	struct device *dev = caiaqdev_to_dev(cdev);
++
++	dev_dbg(dev, "%s(%p)\n", __func__, cdev);
+ 	free_urbs(cdev->data_urbs_in);
+ 	free_urbs(cdev->data_urbs_out);
+ 	kfree(cdev->data_cb_info);
+ }
+-
+diff --git a/sound/usb/caiaq/audio.h b/sound/usb/caiaq/audio.h
+index 869bf6264d6a09..07f5d064456cf7 100644
+--- a/sound/usb/caiaq/audio.h
++++ b/sound/usb/caiaq/audio.h
+@@ -3,6 +3,7 @@
+ #define CAIAQ_AUDIO_H
+ 
+ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev);
++void snd_usb_caiaq_audio_disconnect(struct snd_usb_caiaqdev *cdev);
+ void snd_usb_caiaq_audio_free(struct snd_usb_caiaqdev *cdev);
+ 
+ #endif /* CAIAQ_AUDIO_H */
+diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
+index b5cbf1f195c48c..dfd820483849eb 100644
+--- a/sound/usb/caiaq/device.c
++++ b/sound/usb/caiaq/device.c
+@@ -376,6 +376,17 @@ static void setup_card(struct snd_usb_caiaqdev *cdev)
+ 		dev_err(dev, "Unable to set up control system (ret=%d)\n", ret);
+ }
+ 
++static void card_free(struct snd_card *card)
++{
++	struct snd_usb_caiaqdev *cdev = caiaqdev(card);
++
++#ifdef CONFIG_SND_USB_CAIAQ_INPUT
++	snd_usb_caiaq_input_free(cdev);
++#endif
++	snd_usb_caiaq_audio_free(cdev);
++	usb_reset_device(cdev->chip.dev);
++}
++
+ static int create_card(struct usb_device *usb_dev,
+ 		       struct usb_interface *intf,
+ 		       struct snd_card **cardp)
+@@ -489,6 +500,7 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
+ 		       cdev->vendor_name, cdev->product_name, usbpath);
+ 
+ 	setup_card(cdev);
++	card->private_free = card_free;
+ 	return 0;
+ 
+  err_kill_urb:
+@@ -534,15 +546,14 @@ static void snd_disconnect(struct usb_interface *intf)
+ 	snd_card_disconnect(card);
+ 
+ #ifdef CONFIG_SND_USB_CAIAQ_INPUT
+-	snd_usb_caiaq_input_free(cdev);
++	snd_usb_caiaq_input_disconnect(cdev);
+ #endif
+-	snd_usb_caiaq_audio_free(cdev);
++	snd_usb_caiaq_audio_disconnect(cdev);
+ 
+ 	usb_kill_urb(&cdev->ep1_in_urb);
+ 	usb_kill_urb(&cdev->midi_out_urb);
+ 
+-	snd_card_free(card);
+-	usb_reset_device(interface_to_usbdev(intf));
++	snd_card_free_when_closed(card);
+ }
+ 
+ 
+diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
+index 84f26dce7f5d03..a9130891bb696d 100644
+--- a/sound/usb/caiaq/input.c
++++ b/sound/usb/caiaq/input.c
+@@ -829,15 +829,21 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
+ 	return ret;
+ }
+ 
+-void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *cdev)
++void snd_usb_caiaq_input_disconnect(struct snd_usb_caiaqdev *cdev)
+ {
+ 	if (!cdev || !cdev->input_dev)
+ 		return;
+ 
+ 	usb_kill_urb(cdev->ep4_in_urb);
++	input_unregister_device(cdev->input_dev);
++}
++
++void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *cdev)
++{
++	if (!cdev || !cdev->input_dev)
++		return;
++
+ 	usb_free_urb(cdev->ep4_in_urb);
+ 	cdev->ep4_in_urb = NULL;
+-
+-	input_unregister_device(cdev->input_dev);
+ 	cdev->input_dev = NULL;
+ }
+diff --git a/sound/usb/caiaq/input.h b/sound/usb/caiaq/input.h
+index c42891e7be884d..fbe267f85d025f 100644
+--- a/sound/usb/caiaq/input.h
++++ b/sound/usb/caiaq/input.h
+@@ -4,6 +4,7 @@
+ 
+ void snd_usb_caiaq_input_dispatch(struct snd_usb_caiaqdev *cdev, char *buf, unsigned int len);
+ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev);
++void snd_usb_caiaq_input_disconnect(struct snd_usb_caiaqdev *cdev);
+ void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *cdev);
+ 
+ #endif
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 8f85200292f3ff..842ba5b801eae8 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -36,6 +36,12 @@ union uac23_clock_multiplier_desc {
+ 	struct uac_clock_multiplier_descriptor v3;
+ };
+ 
++/* check whether the descriptor bLength has the minimal length */
++#define DESC_LENGTH_CHECK(p, proto) \
++	((proto) == UAC_VERSION_3 ? \
++	 ((p)->v3.bLength >= sizeof((p)->v3)) :	\
++	 ((p)->v2.bLength >= sizeof((p)->v2)))
++
+ #define GET_VAL(p, proto, field) \
+ 	((proto) == UAC_VERSION_3 ? (p)->v3.field : (p)->v2.field)
+ 
+@@ -58,6 +64,8 @@ static bool validate_clock_source(void *p, int id, int proto)
+ {
+ 	union uac23_clock_source_desc *cs = p;
+ 
++	if (!DESC_LENGTH_CHECK(cs, proto))
++		return false;
+ 	return GET_VAL(cs, proto, bClockID) == id;
+ }
+ 
+@@ -65,13 +73,27 @@ static bool validate_clock_selector(void *p, int id, int proto)
+ {
+ 	union uac23_clock_selector_desc *cs = p;
+ 
+-	return GET_VAL(cs, proto, bClockID) == id;
++	if (!DESC_LENGTH_CHECK(cs, proto))
++		return false;
++	if (GET_VAL(cs, proto, bClockID) != id)
++		return false;
++	/* additional length check for baCSourceID array (in bNrInPins size)
++	 * and two more fields (which sizes depend on the protocol)
++	 */
++	if (proto == UAC_VERSION_3)
++		return cs->v3.bLength >= sizeof(cs->v3) + cs->v3.bNrInPins +
++			4 /* bmControls */ + 2 /* wCSelectorDescrStr */;
++	else
++		return cs->v2.bLength >= sizeof(cs->v2) + cs->v2.bNrInPins +
++			1 /* bmControls */ + 1 /* iClockSelector */;
+ }
+ 
+ static bool validate_clock_multiplier(void *p, int id, int proto)
+ {
+ 	union uac23_clock_multiplier_desc *cs = p;
+ 
++	if (!DESC_LENGTH_CHECK(cs, proto))
++		return false;
+ 	return GET_VAL(cs, proto, bClockID) == id;
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index c5fd180357d1e8..8538fdfce3535b 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -555,6 +555,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
+ static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf)
+ {
+ 	struct usb_host_config *config = dev->actconfig;
++	struct usb_device_descriptor new_device_descriptor;
+ 	int err;
+ 
+ 	if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD ||
+@@ -566,10 +567,14 @@ static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interfac
+ 		if (err < 0)
+ 			dev_dbg(&dev->dev, "error sending boot message: %d\n", err);
+ 		err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+-				&dev->descriptor, sizeof(dev->descriptor));
+-		config = dev->actconfig;
++				&new_device_descriptor, sizeof(new_device_descriptor));
+ 		if (err < 0)
+ 			dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err);
++		if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
++			dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n",
++				new_device_descriptor.bNumConfigurations);
++		else
++			memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
+ 		err = usb_reset_configuration(dev);
+ 		if (err < 0)
+ 			dev_dbg(&dev->dev, "error usb_reset_configuration: %d\n", err);
+@@ -901,6 +906,7 @@ static void mbox2_setup_48_24_magic(struct usb_device *dev)
+ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
+ {
+ 	struct usb_host_config *config = dev->actconfig;
++	struct usb_device_descriptor new_device_descriptor;
+ 	int err;
+ 	u8 bootresponse[0x12];
+ 	int fwsize;
+@@ -936,10 +942,14 @@ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
+ 	dev_dbg(&dev->dev, "device initialised!\n");
+ 
+ 	err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+-		&dev->descriptor, sizeof(dev->descriptor));
+-	config = dev->actconfig;
++		&new_device_descriptor, sizeof(new_device_descriptor));
+ 	if (err < 0)
+ 		dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err);
++	if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
++		dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n",
++			new_device_descriptor.bNumConfigurations);
++	else
++		memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
+ 
+ 	err = usb_reset_configuration(dev);
+ 	if (err < 0)
+@@ -1249,6 +1259,7 @@ static void mbox3_setup_defaults(struct usb_device *dev)
+ static int snd_usb_mbox3_boot_quirk(struct usb_device *dev)
+ {
+ 	struct usb_host_config *config = dev->actconfig;
++	struct usb_device_descriptor new_device_descriptor;
+ 	int err;
+ 	int descriptor_size;
+ 
+@@ -1262,10 +1273,14 @@ static int snd_usb_mbox3_boot_quirk(struct usb_device *dev)
+ 	dev_dbg(&dev->dev, "MBOX3: device initialised!\n");
+ 
+ 	err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+-		&dev->descriptor, sizeof(dev->descriptor));
+-	config = dev->actconfig;
++		&new_device_descriptor, sizeof(new_device_descriptor));
+ 	if (err < 0)
+ 		dev_dbg(&dev->dev, "MBOX3: error usb_get_descriptor: %d\n", err);
++	if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
++		dev_dbg(&dev->dev, "MBOX3: error too large bNumConfigurations: %d\n",
++			new_device_descriptor.bNumConfigurations);
++	else
++		memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
+ 
+ 	err = usb_reset_configuration(dev);
+ 	if (err < 0)
+diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
+index 1be0e980feb958..ca5fac03ec798d 100644
+--- a/sound/usb/usx2y/us122l.c
++++ b/sound/usb/usx2y/us122l.c
+@@ -606,10 +606,7 @@ static void snd_us122l_disconnect(struct usb_interface *intf)
+ 	usb_put_intf(usb_ifnum_to_if(us122l->dev, 1));
+ 	usb_put_dev(us122l->dev);
+ 
+-	while (atomic_read(&us122l->mmap_count))
+-		msleep(500);
+-
+-	snd_card_free(card);
++	snd_card_free_when_closed(card);
+ }
+ 
+ static int snd_us122l_suspend(struct usb_interface *intf, pm_message_t message)
+diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
+index 2f9cede242b3a9..5f81c68fd42b68 100644
+--- a/sound/usb/usx2y/usbusx2y.c
++++ b/sound/usb/usx2y/usbusx2y.c
+@@ -422,7 +422,7 @@ static void snd_usx2y_disconnect(struct usb_interface *intf)
+ 	}
+ 	if (usx2y->us428ctls_sharedmem)
+ 		wake_up(&usx2y->us428ctls_wait_queue_head);
+-	snd_card_free(card);
++	snd_card_free_when_closed(card);
+ }
+ 
+ static int snd_usx2y_probe(struct usb_interface *intf,
+diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
+index 7b8d9ec89ebd35..c032d2c6ab6d55 100644
+--- a/tools/bpf/bpftool/jit_disasm.c
++++ b/tools/bpf/bpftool/jit_disasm.c
+@@ -80,7 +80,8 @@ symbol_lookup_callback(__maybe_unused void *disasm_info,
+ static int
+ init_context(disasm_ctx_t *ctx, const char *arch,
+ 	     __maybe_unused const char *disassembler_options,
+-	     __maybe_unused unsigned char *image, __maybe_unused ssize_t len)
++	     __maybe_unused unsigned char *image, __maybe_unused ssize_t len,
++	     __maybe_unused __u64 func_ksym)
+ {
+ 	char *triple;
+ 
+@@ -109,12 +110,13 @@ static void destroy_context(disasm_ctx_t *ctx)
+ }
+ 
+ static int
+-disassemble_insn(disasm_ctx_t *ctx, unsigned char *image, ssize_t len, int pc)
++disassemble_insn(disasm_ctx_t *ctx, unsigned char *image, ssize_t len, int pc,
++		 __u64 func_ksym)
+ {
+ 	char buf[256];
+ 	int count;
+ 
+-	count = LLVMDisasmInstruction(*ctx, image + pc, len - pc, pc,
++	count = LLVMDisasmInstruction(*ctx, image + pc, len - pc, func_ksym + pc,
+ 				      buf, sizeof(buf));
+ 	if (json_output)
+ 		printf_json(buf);
+@@ -136,8 +138,21 @@ int disasm_init(void)
+ #ifdef HAVE_LIBBFD_SUPPORT
+ #define DISASM_SPACER "\t"
+ 
++struct disasm_info {
++	struct disassemble_info info;
++	__u64 func_ksym;
++};
++
++static void disasm_print_addr(bfd_vma addr, struct disassemble_info *info)
++{
++	struct disasm_info *dinfo = container_of(info, struct disasm_info, info);
++
++	addr += dinfo->func_ksym;
++	generic_print_address(addr, info);
++}
++
+ typedef struct {
+-	struct disassemble_info *info;
++	struct disasm_info *info;
+ 	disassembler_ftype disassemble;
+ 	bfd *bfdf;
+ } disasm_ctx_t;
+@@ -215,7 +230,7 @@ static int fprintf_json_styled(void *out,
+ 
+ static int init_context(disasm_ctx_t *ctx, const char *arch,
+ 			const char *disassembler_options,
+-			unsigned char *image, ssize_t len)
++			unsigned char *image, ssize_t len, __u64 func_ksym)
+ {
+ 	struct disassemble_info *info;
+ 	char tpath[PATH_MAX];
+@@ -238,12 +253,13 @@ static int init_context(disasm_ctx_t *ctx, const char *arch,
+ 	}
+ 	bfdf = ctx->bfdf;
+ 
+-	ctx->info = malloc(sizeof(struct disassemble_info));
++	ctx->info = malloc(sizeof(struct disasm_info));
+ 	if (!ctx->info) {
+ 		p_err("mem alloc failed");
+ 		goto err_close;
+ 	}
+-	info = ctx->info;
++	ctx->info->func_ksym = func_ksym;
++	info = &ctx->info->info;
+ 
+ 	if (json_output)
+ 		init_disassemble_info_compat(info, stdout,
+@@ -272,6 +288,7 @@ static int init_context(disasm_ctx_t *ctx, const char *arch,
+ 		info->disassembler_options = disassembler_options;
+ 	info->buffer = image;
+ 	info->buffer_length = len;
++	info->print_address_func = disasm_print_addr;
+ 
+ 	disassemble_init_for_target(info);
+ 
+@@ -304,9 +321,10 @@ static void destroy_context(disasm_ctx_t *ctx)
+ 
+ static int
+ disassemble_insn(disasm_ctx_t *ctx, __maybe_unused unsigned char *image,
+-		 __maybe_unused ssize_t len, int pc)
++		 __maybe_unused ssize_t len, int pc,
++		 __maybe_unused __u64 func_ksym)
+ {
+-	return ctx->disassemble(pc, ctx->info);
++	return ctx->disassemble(pc, &ctx->info->info);
+ }
+ 
+ int disasm_init(void)
+@@ -331,7 +349,7 @@ int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ 	if (!len)
+ 		return -1;
+ 
+-	if (init_context(&ctx, arch, disassembler_options, image, len))
++	if (init_context(&ctx, arch, disassembler_options, image, len, func_ksym))
+ 		return -1;
+ 
+ 	if (json_output)
+@@ -360,7 +378,7 @@ int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ 			printf("%4x:" DISASM_SPACER, pc);
+ 		}
+ 
+-		count = disassemble_insn(&ctx, image, len, pc);
++		count = disassemble_insn(&ctx, image, len, pc, func_ksym);
+ 
+ 		if (json_output) {
+ 			/* Operand array, was started in fprintf_json. Before
+diff --git a/tools/gpio/gpio-sloppy-logic-analyzer.sh b/tools/gpio/gpio-sloppy-logic-analyzer.sh
+index ed21a110df5e5d..3ef2278e49f916 100755
+--- a/tools/gpio/gpio-sloppy-logic-analyzer.sh
++++ b/tools/gpio/gpio-sloppy-logic-analyzer.sh
+@@ -113,7 +113,7 @@ init_cpu()
+ 		taskset -p "$newmask" "$p" || continue
+ 	done 2>/dev/null >/dev/null
+ 
+-	# Big hammer! Working with 'rcu_momentary_dyntick_idle()' for a more fine-grained solution
++	# Big hammer! Working with 'rcu_momentary_eqs()' for a more fine-grained solution
+ 	# still printed warnings. Same for re-enabling the stall detector after sampling.
+ 	echo 1 > /sys/module/rcupdate/parameters/rcu_cpu_stall_suppress
+ 
+diff --git a/tools/include/nolibc/arch-s390.h b/tools/include/nolibc/arch-s390.h
+index 2ec13d8b9a2db8..f9ab83a219b8a2 100644
+--- a/tools/include/nolibc/arch-s390.h
++++ b/tools/include/nolibc/arch-s390.h
+@@ -10,6 +10,7 @@
+ 
+ #include "compiler.h"
+ #include "crt.h"
++#include "std.h"
+ 
+ /* Syscalls for s390:
+  *   - registers are 64-bit
+diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
+index 1b22f0f372880e..857a5f7b413d6d 100644
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -61,7 +61,8 @@ ifndef VERBOSE
+ endif
+ 
+ INCLUDES = -I$(or $(OUTPUT),.) \
+-	   -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
++	   -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi \
++	   -I$(srctree)/tools/arch/$(SRCARCH)/include
+ 
+ export prefix libdir src obj
+ 
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 219facd0e66e8b..5ff643e60d09ca 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -3985,7 +3985,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
+ 		return true;
+ 
+ 	/* global function */
+-	return bind == STB_GLOBAL && type == STT_FUNC;
++	return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC;
+ }
+ 
+ static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
+@@ -4389,7 +4389,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
+ 
+ static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
+ {
+-	return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
++	return prog->sec_idx == obj->efile.text_shndx;
+ }
+ 
+ struct bpf_program *
+@@ -5094,6 +5094,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
+ 	enum libbpf_map_type map_type = map->libbpf_type;
+ 	char *cp, errmsg[STRERR_BUFSIZE];
+ 	int err, zero = 0;
++	size_t mmap_sz;
+ 
+ 	if (obj->gen_loader) {
+ 		bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
+@@ -5107,8 +5108,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
+ 	if (err) {
+ 		err = -errno;
+ 		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
+-		pr_warn("Error setting initial map(%s) contents: %s\n",
+-			map->name, cp);
++		pr_warn("map '%s': failed to set initial contents: %s\n",
++			bpf_map__name(map), cp);
+ 		return err;
+ 	}
+ 
+@@ -5118,11 +5119,43 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
+ 		if (err) {
+ 			err = -errno;
+ 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
+-			pr_warn("Error freezing map(%s) as read-only: %s\n",
+-				map->name, cp);
++			pr_warn("map '%s': failed to freeze as read-only: %s\n",
++				bpf_map__name(map), cp);
+ 			return err;
+ 		}
+ 	}
++
++	/* Remap anonymous mmap()-ed "map initialization image" as
++	 * a BPF map-backed mmap()-ed memory, but preserving the same
++	 * memory address. This will cause kernel to change process'
++	 * page table to point to a different piece of kernel memory,
++	 * but from userspace point of view memory address (and its
++	 * contents, being identical at this point) will stay the
++	 * same. This mapping will be released by bpf_object__close()
++	 * as per normal clean up procedure.
++	 */
++	mmap_sz = bpf_map_mmap_sz(map);
++	if (map->def.map_flags & BPF_F_MMAPABLE) {
++		void *mmaped;
++		int prot;
++
++		if (map->def.map_flags & BPF_F_RDONLY_PROG)
++			prot = PROT_READ;
++		else
++			prot = PROT_READ | PROT_WRITE;
++		mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
++		if (mmaped == MAP_FAILED) {
++			err = -errno;
++			pr_warn("map '%s': failed to re-mmap() contents: %d\n",
++				bpf_map__name(map), err);
++			return err;
++		}
++		map->mmaped = mmaped;
++	} else if (map->mmaped) {
++		munmap(map->mmaped, mmap_sz);
++		map->mmaped = NULL;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -5439,8 +5472,7 @@ bpf_object__create_maps(struct bpf_object *obj)
+ 				err = bpf_object__populate_internal_map(obj, map);
+ 				if (err < 0)
+ 					goto err_out;
+-			}
+-			if (map->def.type == BPF_MAP_TYPE_ARENA) {
++			} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
+ 				map->mmaped = mmap((void *)(long)map->map_extra,
+ 						   bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
+ 						   map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
+@@ -7352,8 +7384,14 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
+ 		opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
+ 
+ 	/* special check for usdt to use uprobe_multi link */
+-	if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
++	if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) {
++		/* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type
++		 * in prog, and expected_attach_type we set in kernel is from opts, so we
++		 * update both.
++		 */
+ 		prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
++		opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
++	}
+ 
+ 	if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
+ 		int btf_obj_fd = 0, btf_type_id = 0, err;
+@@ -7443,6 +7481,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
+ 	load_attr.attach_btf_id = prog->attach_btf_id;
+ 	load_attr.kern_version = kern_version;
+ 	load_attr.prog_ifindex = prog->prog_ifindex;
++	load_attr.expected_attach_type = prog->expected_attach_type;
+ 
+ 	/* specify func_info/line_info only if kernel supports them */
+ 	if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
+@@ -7474,9 +7513,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
+ 		insns_cnt = prog->insns_cnt;
+ 	}
+ 
+-	/* allow prog_prepare_load_fn to change expected_attach_type */
+-	load_attr.expected_attach_type = prog->expected_attach_type;
+-
+ 	if (obj->gen_loader) {
+ 		bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
+ 				   license, insns, insns_cnt, &load_attr,
+@@ -13877,46 +13913,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
+ 	for (i = 0; i < s->map_cnt; i++) {
+ 		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ 		struct bpf_map *map = *map_skel->map;
+-		size_t mmap_sz = bpf_map_mmap_sz(map);
+-		int prot, map_fd = map->fd;
+-		void **mmaped = map_skel->mmaped;
+-
+-		if (!mmaped)
+-			continue;
+ 
+-		if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
+-			*mmaped = NULL;
++		if (!map_skel->mmaped)
+ 			continue;
+-		}
+-
+-		if (map->def.type == BPF_MAP_TYPE_ARENA) {
+-			*mmaped = map->mmaped;
+-			continue;
+-		}
+ 
+-		if (map->def.map_flags & BPF_F_RDONLY_PROG)
+-			prot = PROT_READ;
+-		else
+-			prot = PROT_READ | PROT_WRITE;
+-
+-		/* Remap anonymous mmap()-ed "map initialization image" as
+-		 * a BPF map-backed mmap()-ed memory, but preserving the same
+-		 * memory address. This will cause kernel to change process'
+-		 * page table to point to a different piece of kernel memory,
+-		 * but from userspace point of view memory address (and its
+-		 * contents, being identical at this point) will stay the
+-		 * same. This mapping will be released by bpf_object__close()
+-		 * as per normal clean up procedure, so we don't need to worry
+-		 * about it from skeleton's clean up perspective.
+-		 */
+-		*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
+-		if (*mmaped == MAP_FAILED) {
+-			err = -errno;
+-			*mmaped = NULL;
+-			pr_warn("failed to re-mmap() map '%s': %d\n",
+-				 bpf_map__name(map), err);
+-			return libbpf_err(err);
+-		}
++		*map_skel->mmaped = map->mmaped;
+ 	}
+ 
+ 	return 0;
+diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
+index e0005c6ade88a2..6985ab0f1ca9e8 100644
+--- a/tools/lib/bpf/linker.c
++++ b/tools/lib/bpf/linker.c
+@@ -396,6 +396,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
+ 		pr_warn_elf("failed to create SYMTAB data");
+ 		return -EINVAL;
+ 	}
++	/* Ensure libelf translates byte-order of symbol records */
++	sec->data->d_type = ELF_T_SYM;
+ 
+ 	str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
+ 	if (str_off < 0)
+diff --git a/tools/lib/thermal/commands.c b/tools/lib/thermal/commands.c
+index 73d4d4e8d6ec0b..27b4442f0e347a 100644
+--- a/tools/lib/thermal/commands.c
++++ b/tools/lib/thermal/commands.c
+@@ -261,9 +261,25 @@ static struct genl_ops thermal_cmd_ops = {
+ 	.o_ncmds	= ARRAY_SIZE(thermal_cmds),
+ };
+ 
+-static thermal_error_t thermal_genl_auto(struct thermal_handler *th, int id, int cmd,
+-					 int flags, void *arg)
++struct cmd_param {
++	int tz_id;
++};
++
++typedef int (*cmd_cb_t)(struct nl_msg *, struct cmd_param *);
++
++static int thermal_genl_tz_id_encode(struct nl_msg *msg, struct cmd_param *p)
++{
++	if (p->tz_id >= 0 && nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id))
++		return -1;
++
++	return 0;
++}
++
++static thermal_error_t thermal_genl_auto(struct thermal_handler *th, cmd_cb_t cmd_cb,
++					 struct cmd_param *param,
++					 int cmd, int flags, void *arg)
+ {
++	thermal_error_t ret = THERMAL_ERROR;
+ 	struct nl_msg *msg;
+ 	void *hdr;
+ 
+@@ -274,45 +290,55 @@ static thermal_error_t thermal_genl_auto(struct thermal_handler *th, int id, int
+ 	hdr = genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, thermal_cmd_ops.o_id,
+ 			  0, flags, cmd, THERMAL_GENL_VERSION);
+ 	if (!hdr)
+-		return THERMAL_ERROR;
++		goto out;
+ 
+-	if (id >= 0 && nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id))
+-		return THERMAL_ERROR;
++	if (cmd_cb && cmd_cb(msg, param))
++		goto out;
+ 
+ 	if (nl_send_msg(th->sk_cmd, th->cb_cmd, msg, genl_handle_msg, arg))
+-		return THERMAL_ERROR;
++		goto out;
+ 
++	ret = THERMAL_SUCCESS;
++out:
+ 	nlmsg_free(msg);
+ 
+-	return THERMAL_SUCCESS;
++	return ret;
+ }
+ 
+ thermal_error_t thermal_cmd_get_tz(struct thermal_handler *th, struct thermal_zone **tz)
+ {
+-	return thermal_genl_auto(th, -1, THERMAL_GENL_CMD_TZ_GET_ID,
++	return thermal_genl_auto(th, NULL, NULL, THERMAL_GENL_CMD_TZ_GET_ID,
+ 				 NLM_F_DUMP | NLM_F_ACK, tz);
+ }
+ 
+ thermal_error_t thermal_cmd_get_cdev(struct thermal_handler *th, struct thermal_cdev **tc)
+ {
+-	return thermal_genl_auto(th, -1, THERMAL_GENL_CMD_CDEV_GET,
++	return thermal_genl_auto(th, NULL, NULL, THERMAL_GENL_CMD_CDEV_GET,
+ 				 NLM_F_DUMP | NLM_F_ACK, tc);
+ }
+ 
+ thermal_error_t thermal_cmd_get_trip(struct thermal_handler *th, struct thermal_zone *tz)
+ {
+-	return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_TRIP,
+-				 0, tz);
++	struct cmd_param p = { .tz_id = tz->id };
++
++	return thermal_genl_auto(th, thermal_genl_tz_id_encode, &p,
++				 THERMAL_GENL_CMD_TZ_GET_TRIP, 0, tz);
+ }
+ 
+ thermal_error_t thermal_cmd_get_governor(struct thermal_handler *th, struct thermal_zone *tz)
+ {
+-	return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_GOV, 0, tz);
++	struct cmd_param p = { .tz_id = tz->id };
++
++	return thermal_genl_auto(th, thermal_genl_tz_id_encode, &p,
++				 THERMAL_GENL_CMD_TZ_GET_GOV, 0, tz);
+ }
+ 
+ thermal_error_t thermal_cmd_get_temp(struct thermal_handler *th, struct thermal_zone *tz)
+ {
+-	return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_TEMP, 0, tz);
++	struct cmd_param p = { .tz_id = tz->id };
++
++	return thermal_genl_auto(th, thermal_genl_tz_id_encode, &p,
++				 THERMAL_GENL_CMD_TZ_GET_TEMP, 0, tz);
+ }
+ 
+ thermal_error_t thermal_cmd_exit(struct thermal_handler *th)
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index d4332675babb74..2ce71d2e5fae05 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -1194,7 +1194,7 @@ endif
+ ifneq ($(NO_LIBTRACEEVENT),1)
+   $(call feature_check,libtraceevent)
+   ifeq ($(feature-libtraceevent), 1)
+-    CFLAGS += -DHAVE_LIBTRACEEVENT
++    CFLAGS += -DHAVE_LIBTRACEEVENT $(shell $(PKG_CONFIG) --cflags libtraceevent)
+     LDFLAGS += $(shell $(PKG_CONFIG) --libs-only-L libtraceevent)
+     EXTLIBS += $(shell $(PKG_CONFIG) --libs-only-l libtraceevent)
+     LIBTRACEEVENT_VERSION := $(shell $(PKG_CONFIG) --modversion libtraceevent).0.0
+diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
+index abcdc49b7a987f..272d3c70810e7d 100644
+--- a/tools/perf/builtin-ftrace.c
++++ b/tools/perf/builtin-ftrace.c
+@@ -815,7 +815,7 @@ static void display_histogram(int buckets[], bool use_nsec)
+ 
+ 	bar_len = buckets[0] * bar_total / total;
+ 	printf("  %4d - %-4d %s | %10d | %.*s%*s |\n",
+-	       0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, "");
++	       0, 1, use_nsec ? "ns" : "us", buckets[0], bar_len, bar, bar_total - bar_len, "");
+ 
+ 	for (i = 1; i < NUM_BUCKET - 1; i++) {
+ 		int start = (1 << (i - 1));
+diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
+index 65b8cba324be4b..c5331721dfee98 100644
+--- a/tools/perf/builtin-list.c
++++ b/tools/perf/builtin-list.c
+@@ -112,7 +112,7 @@ static void wordwrap(FILE *fp, const char *s, int start, int max, int corr)
+ 	}
+ }
+ 
+-static void default_print_event(void *ps, const char *pmu_name, const char *topic,
++static void default_print_event(void *ps, const char *topic, const char *pmu_name,
+ 				const char *event_name, const char *event_alias,
+ 				const char *scale_unit __maybe_unused,
+ 				bool deprecated, const char *event_type_desc,
+@@ -353,7 +353,7 @@ static void fix_escape_fprintf(FILE *fp, struct strbuf *buf, const char *fmt, ..
+ 	fputs(buf->buf, fp);
+ }
+ 
+-static void json_print_event(void *ps, const char *pmu_name, const char *topic,
++static void json_print_event(void *ps, const char *topic, const char *pmu_name,
+ 			     const char *event_name, const char *event_alias,
+ 			     const char *scale_unit,
+ 			     bool deprecated, const char *event_type_desc,
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 689a3d43c2584f..4933efdfee76fb 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -716,15 +716,19 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 	}
+ 
+ 	if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
+-		if (affinity__setup(&saved_affinity) < 0)
+-			return -1;
++		if (affinity__setup(&saved_affinity) < 0) {
++			err = -1;
++			goto err_out;
++		}
+ 		affinity = &saved_affinity;
+ 	}
+ 
+ 	evlist__for_each_entry(evsel_list, counter) {
+ 		counter->reset_group = false;
+-		if (bpf_counter__load(counter, &target))
+-			return -1;
++		if (bpf_counter__load(counter, &target)) {
++			err = -1;
++			goto err_out;
++		}
+ 		if (!(evsel__is_bperf(counter)))
+ 			all_counters_use_bpf = false;
+ 	}
+@@ -767,7 +771,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 
+ 			switch (stat_handle_error(counter)) {
+ 			case COUNTER_FATAL:
+-				return -1;
++				err = -1;
++				goto err_out;
+ 			case COUNTER_RETRY:
+ 				goto try_again;
+ 			case COUNTER_SKIP:
+@@ -808,7 +813,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 
+ 				switch (stat_handle_error(counter)) {
+ 				case COUNTER_FATAL:
+-					return -1;
++					err = -1;
++					goto err_out;
+ 				case COUNTER_RETRY:
+ 					goto try_again_reset;
+ 				case COUNTER_SKIP:
+@@ -821,6 +827,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 		}
+ 	}
+ 	affinity__cleanup(affinity);
++	affinity = NULL;
+ 
+ 	evlist__for_each_entry(evsel_list, counter) {
+ 		if (!counter->supported) {
+@@ -833,8 +840,10 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 			stat_config.unit_width = l;
+ 
+ 		if (evsel__should_store_id(counter) &&
+-		    evsel__store_ids(counter, evsel_list))
+-			return -1;
++		    evsel__store_ids(counter, evsel_list)) {
++			err = -1;
++			goto err_out;
++		}
+ 	}
+ 
+ 	if (evlist__apply_filters(evsel_list, &counter, &target)) {
+@@ -855,20 +864,23 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 		}
+ 
+ 		if (err < 0)
+-			return err;
++			goto err_out;
+ 
+ 		err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
+ 							 process_synthesized_event, is_pipe);
+ 		if (err < 0)
+-			return err;
++			goto err_out;
++
+ 	}
+ 
+ 	if (target.initial_delay) {
+ 		pr_info(EVLIST_DISABLED_MSG);
+ 	} else {
+ 		err = enable_counters();
+-		if (err)
+-			return -1;
++		if (err) {
++			err = -1;
++			goto err_out;
++		}
+ 	}
+ 
+ 	/* Exec the command, if any */
+@@ -878,8 +890,10 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 	if (target.initial_delay > 0) {
+ 		usleep(target.initial_delay * USEC_PER_MSEC);
+ 		err = enable_counters();
+-		if (err)
+-			return -1;
++		if (err) {
++			err = -1;
++			goto err_out;
++		}
+ 
+ 		pr_info(EVLIST_ENABLED_MSG);
+ 	}
+@@ -899,7 +913,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 		if (workload_exec_errno) {
+ 			const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
+ 			pr_err("Workload failed: %s\n", emsg);
+-			return -1;
++			err = -1;
++			goto err_out;
+ 		}
+ 
+ 		if (WIFSIGNALED(status))
+@@ -946,6 +961,13 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 		evlist__close(evsel_list);
+ 
+ 	return WEXITSTATUS(status);
++
++err_out:
++	if (forks)
++		evlist__cancel_workload(evsel_list);
++
++	affinity__cleanup(affinity);
++	return err;
+ }
+ 
+ static int run_perf_stat(int argc, const char **argv, int run_idx)
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index d3f11b90d0255c..ffa1295273099e 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -2702,6 +2702,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
+ 	char msg[1024];
+ 	void *args, *augmented_args = NULL;
+ 	int augmented_args_size;
++	size_t printed = 0;
+ 
+ 	if (sc == NULL)
+ 		return -1;
+@@ -2717,8 +2718,8 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
+ 
+ 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
+ 	augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
+-	syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
+-	fprintf(trace->output, "%s", msg);
++	printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
++	fprintf(trace->output, "%.*s", (int)printed, msg);
+ 	err = 0;
+ out_put:
+ 	thread__put(thread);
+@@ -3087,7 +3088,7 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
+ 		printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
+ 	}
+ 
+-	return printed + fprintf(trace->output, "%s", bf);
++	return printed + fprintf(trace->output, "%.*s", (int)printed, bf);
+ }
+ 
+ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
+@@ -3096,13 +3097,8 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
+ {
+ 	struct thread *thread;
+ 	int callchain_ret = 0;
+-	/*
+-	 * Check if we called perf_evsel__disable(evsel) due to, for instance,
+-	 * this event's max_events having been hit and this is an entry coming
+-	 * from the ring buffer that we should discard, since the max events
+-	 * have already been considered/printed.
+-	 */
+-	if (evsel->disabled)
++
++	if (evsel->nr_events_printed >= evsel->max_events)
+ 		return 0;
+ 
+ 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+@@ -4326,6 +4322,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
+ 					sizeof(__u32), BPF_ANY);
+ 		}
+ 	}
++
++	if (trace->skel)
++		trace->filter_pids.map = trace->skel->maps.pids_filtered;
+ #endif
+ 	err = trace__set_filter_pids(trace);
+ 	if (err < 0)
+@@ -5449,6 +5448,10 @@ int cmd_trace(int argc, const char **argv)
+ 	if (trace.summary_only)
+ 		trace.summary = trace.summary_only;
+ 
++	/* Keep exited threads, otherwise information might be lost for summary */
++	if (trace.summary)
++		symbol_conf.keep_exited_threads = true;
++
+ 	if (output_name != NULL) {
+ 		err = trace__open_output(&trace, output_name);
+ 		if (err < 0) {
+diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
+index c592079982fbd8..873e9fb2041f02 100644
+--- a/tools/perf/pmu-events/empty-pmu-events.c
++++ b/tools/perf/pmu-events/empty-pmu-events.c
+@@ -380,7 +380,7 @@ int pmu_events_table__for_each_event(const struct pmu_events_table *table,
+                         continue;
+ 
+                 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
+-                if (pmu || ret)
++                if (ret)
+                         return ret;
+         }
+         return 0;
+diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
+index bb0a5d92df4a15..d46a22fb5573de 100755
+--- a/tools/perf/pmu-events/jevents.py
++++ b/tools/perf/pmu-events/jevents.py
+@@ -930,7 +930,7 @@ int pmu_events_table__for_each_event(const struct pmu_events_table *table,
+                         continue;
+ 
+                 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
+-                if (pmu || ret)
++                if (ret)
+                         return ret;
+         }
+         return 0;
+diff --git a/tools/perf/tests/attr/test-stat-default b/tools/perf/tests/attr/test-stat-default
+index a1e2da0a9a6ddb..e47fb49446799b 100644
+--- a/tools/perf/tests/attr/test-stat-default
++++ b/tools/perf/tests/attr/test-stat-default
+@@ -88,98 +88,142 @@ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
++# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+ [event13:base-stat]
+ fd=13
+ group_fd=11
+ type=4
+-config=33280
++config=33024
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
++# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+ [event14:base-stat]
+ fd=14
+ group_fd=11
+ type=4
+-config=33536
++config=33280
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
++# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+ [event15:base-stat]
+ fd=15
+ group_fd=11
+ type=4
+-config=33024
++config=33536
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
++# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+ [event16:base-stat]
+ fd=16
++group_fd=11
+ type=4
+-config=4109
++config=33792
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
++# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+ [event17:base-stat]
+ fd=17
++group_fd=11
+ type=4
+-config=17039629
++config=34048
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
++# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+ [event18:base-stat]
+ fd=18
++group_fd=11
+ type=4
+-config=60
++config=34304
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
++# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+ [event19:base-stat]
+ fd=19
++group_fd=11
+ type=4
+-config=2097421
++config=34560
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
++# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+ [event20:base-stat]
+ fd=20
+ type=4
+-config=316
++config=4109
+ optional=1
+ 
+-# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
++# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
+ [event21:base-stat]
+ fd=21
+ type=4
+-config=412
++config=17039629
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
+ [event22:base-stat]
+ fd=22
+ type=4
+-config=572
++config=60
+ optional=1
+ 
+-# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
++# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
+ [event23:base-stat]
+ fd=23
+ type=4
+-config=706
++config=2097421
+ optional=1
+ 
+-# PERF_TYPE_RAW / UOPS_ISSUED.ANY
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+ [event24:base-stat]
+ fd=24
+ type=4
++config=316
++optional=1
++
++# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
++[event25:base-stat]
++fd=25
++type=4
++config=412
++optional=1
++
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
++[event26:base-stat]
++fd=26
++type=4
++config=572
++optional=1
++
++# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
++[event27:base-stat]
++fd=27
++type=4
++config=706
++optional=1
++
++# PERF_TYPE_RAW / UOPS_ISSUED.ANY
++[event28:base-stat]
++fd=28
++type=4
+ config=270
+ optional=1
+diff --git a/tools/perf/tests/attr/test-stat-detailed-1 b/tools/perf/tests/attr/test-stat-detailed-1
+index 1c52cb05c900d7..3d500d3e0c5c8a 100644
+--- a/tools/perf/tests/attr/test-stat-detailed-1
++++ b/tools/perf/tests/attr/test-stat-detailed-1
+@@ -90,99 +90,143 @@ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
++# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+ [event13:base-stat]
+ fd=13
+ group_fd=11
+ type=4
+-config=33280
++config=33024
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
++# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+ [event14:base-stat]
+ fd=14
+ group_fd=11
+ type=4
+-config=33536
++config=33280
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
++# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+ [event15:base-stat]
+ fd=15
+ group_fd=11
+ type=4
+-config=33024
++config=33536
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
++# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+ [event16:base-stat]
+ fd=16
++group_fd=11
+ type=4
+-config=4109
++config=33792
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
++# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+ [event17:base-stat]
+ fd=17
++group_fd=11
+ type=4
+-config=17039629
++config=34048
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
++# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+ [event18:base-stat]
+ fd=18
++group_fd=11
+ type=4
+-config=60
++config=34304
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
++# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+ [event19:base-stat]
+ fd=19
++group_fd=11
+ type=4
+-config=2097421
++config=34560
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
++# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+ [event20:base-stat]
+ fd=20
+ type=4
+-config=316
++config=4109
+ optional=1
+ 
+-# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
++# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
+ [event21:base-stat]
+ fd=21
+ type=4
+-config=412
++config=17039629
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
+ [event22:base-stat]
+ fd=22
+ type=4
+-config=572
++config=60
+ optional=1
+ 
+-# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
++# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
+ [event23:base-stat]
+ fd=23
+ type=4
+-config=706
++config=2097421
+ optional=1
+ 
+-# PERF_TYPE_RAW / UOPS_ISSUED.ANY
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+ [event24:base-stat]
+ fd=24
+ type=4
++config=316
++optional=1
++
++# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
++[event25:base-stat]
++fd=25
++type=4
++config=412
++optional=1
++
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
++[event26:base-stat]
++fd=26
++type=4
++config=572
++optional=1
++
++# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
++[event27:base-stat]
++fd=27
++type=4
++config=706
++optional=1
++
++# PERF_TYPE_RAW / UOPS_ISSUED.ANY
++[event28:base-stat]
++fd=28
++type=4
+ config=270
+ optional=1
+ 
+@@ -190,8 +234,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event25:base-stat]
+-fd=25
++[event29:base-stat]
++fd=29
+ type=3
+ config=0
+ optional=1
+@@ -200,8 +244,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event26:base-stat]
+-fd=26
++[event30:base-stat]
++fd=30
+ type=3
+ config=65536
+ optional=1
+@@ -210,8 +254,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event27:base-stat]
+-fd=27
++[event31:base-stat]
++fd=31
+ type=3
+ config=2
+ optional=1
+@@ -220,8 +264,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event28:base-stat]
+-fd=28
++[event32:base-stat]
++fd=32
+ type=3
+ config=65538
+ optional=1
+diff --git a/tools/perf/tests/attr/test-stat-detailed-2 b/tools/perf/tests/attr/test-stat-detailed-2
+index 7e961d24a885a7..01777a63752fe6 100644
+--- a/tools/perf/tests/attr/test-stat-detailed-2
++++ b/tools/perf/tests/attr/test-stat-detailed-2
+@@ -90,99 +90,143 @@ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
++# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+ [event13:base-stat]
+ fd=13
+ group_fd=11
+ type=4
+-config=33280
++config=33024
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
++# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+ [event14:base-stat]
+ fd=14
+ group_fd=11
+ type=4
+-config=33536
++config=33280
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
++# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+ [event15:base-stat]
+ fd=15
+ group_fd=11
+ type=4
+-config=33024
++config=33536
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
++# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+ [event16:base-stat]
+ fd=16
++group_fd=11
+ type=4
+-config=4109
++config=33792
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
++# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+ [event17:base-stat]
+ fd=17
++group_fd=11
+ type=4
+-config=17039629
++config=34048
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
++# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+ [event18:base-stat]
+ fd=18
++group_fd=11
+ type=4
+-config=60
++config=34304
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
++# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+ [event19:base-stat]
+ fd=19
++group_fd=11
+ type=4
+-config=2097421
++config=34560
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
++# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+ [event20:base-stat]
+ fd=20
+ type=4
+-config=316
++config=4109
+ optional=1
+ 
+-# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
++# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
+ [event21:base-stat]
+ fd=21
+ type=4
+-config=412
++config=17039629
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
+ [event22:base-stat]
+ fd=22
+ type=4
+-config=572
++config=60
+ optional=1
+ 
+-# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
++# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
+ [event23:base-stat]
+ fd=23
+ type=4
+-config=706
++config=2097421
+ optional=1
+ 
+-# PERF_TYPE_RAW / UOPS_ISSUED.ANY
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+ [event24:base-stat]
+ fd=24
+ type=4
++config=316
++optional=1
++
++# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
++[event25:base-stat]
++fd=25
++type=4
++config=412
++optional=1
++
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
++[event26:base-stat]
++fd=26
++type=4
++config=572
++optional=1
++
++# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
++[event27:base-stat]
++fd=27
++type=4
++config=706
++optional=1
++
++# PERF_TYPE_RAW / UOPS_ISSUED.ANY
++[event28:base-stat]
++fd=28
++type=4
+ config=270
+ optional=1
+ 
+@@ -190,8 +234,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event25:base-stat]
+-fd=25
++[event29:base-stat]
++fd=29
+ type=3
+ config=0
+ optional=1
+@@ -200,8 +244,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event26:base-stat]
+-fd=26
++[event30:base-stat]
++fd=30
+ type=3
+ config=65536
+ optional=1
+@@ -210,8 +254,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event27:base-stat]
+-fd=27
++[event31:base-stat]
++fd=31
+ type=3
+ config=2
+ optional=1
+@@ -220,8 +264,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event28:base-stat]
+-fd=28
++[event32:base-stat]
++fd=32
+ type=3
+ config=65538
+ optional=1
+@@ -230,8 +274,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event29:base-stat]
+-fd=29
++[event33:base-stat]
++fd=33
+ type=3
+ config=1
+ optional=1
+@@ -240,8 +284,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event30:base-stat]
+-fd=30
++[event34:base-stat]
++fd=34
+ type=3
+ config=65537
+ optional=1
+@@ -250,8 +294,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event31:base-stat]
+-fd=31
++[event35:base-stat]
++fd=35
+ type=3
+ config=3
+ optional=1
+@@ -260,8 +304,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event32:base-stat]
+-fd=32
++[event36:base-stat]
++fd=36
+ type=3
+ config=65539
+ optional=1
+@@ -270,8 +314,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event33:base-stat]
+-fd=33
++[event37:base-stat]
++fd=37
+ type=3
+ config=4
+ optional=1
+@@ -280,8 +324,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event34:base-stat]
+-fd=34
++[event38:base-stat]
++fd=38
+ type=3
+ config=65540
+ optional=1
+diff --git a/tools/perf/tests/attr/test-stat-detailed-3 b/tools/perf/tests/attr/test-stat-detailed-3
+index e50535f45977c6..8400abd7e1e488 100644
+--- a/tools/perf/tests/attr/test-stat-detailed-3
++++ b/tools/perf/tests/attr/test-stat-detailed-3
+@@ -90,99 +90,143 @@ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
++# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+ [event13:base-stat]
+ fd=13
+ group_fd=11
+ type=4
+-config=33280
++config=33024
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
++# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+ [event14:base-stat]
+ fd=14
+ group_fd=11
+ type=4
+-config=33536
++config=33280
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
++# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+ [event15:base-stat]
+ fd=15
+ group_fd=11
+ type=4
+-config=33024
++config=33536
+ disabled=0
+ enable_on_exec=0
+ read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
++# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+ [event16:base-stat]
+ fd=16
++group_fd=11
+ type=4
+-config=4109
++config=33792
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
++# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+ [event17:base-stat]
+ fd=17
++group_fd=11
+ type=4
+-config=17039629
++config=34048
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
++# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+ [event18:base-stat]
+ fd=18
++group_fd=11
+ type=4
+-config=60
++config=34304
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
++# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+ [event19:base-stat]
+ fd=19
++group_fd=11
+ type=4
+-config=2097421
++config=34560
++disabled=0
++enable_on_exec=0
++read_format=15
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
++# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+ [event20:base-stat]
+ fd=20
+ type=4
+-config=316
++config=4109
+ optional=1
+ 
+-# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
++# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
+ [event21:base-stat]
+ fd=21
+ type=4
+-config=412
++config=17039629
+ optional=1
+ 
+-# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
+ [event22:base-stat]
+ fd=22
+ type=4
+-config=572
++config=60
+ optional=1
+ 
+-# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
++# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
+ [event23:base-stat]
+ fd=23
+ type=4
+-config=706
++config=2097421
+ optional=1
+ 
+-# PERF_TYPE_RAW / UOPS_ISSUED.ANY
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+ [event24:base-stat]
+ fd=24
+ type=4
++config=316
++optional=1
++
++# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
++[event25:base-stat]
++fd=25
++type=4
++config=412
++optional=1
++
++# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
++[event26:base-stat]
++fd=26
++type=4
++config=572
++optional=1
++
++# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
++[event27:base-stat]
++fd=27
++type=4
++config=706
++optional=1
++
++# PERF_TYPE_RAW / UOPS_ISSUED.ANY
++[event28:base-stat]
++fd=28
++type=4
+ config=270
+ optional=1
+ 
+@@ -190,8 +234,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event25:base-stat]
+-fd=25
++[event29:base-stat]
++fd=29
+ type=3
+ config=0
+ optional=1
+@@ -200,8 +244,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event26:base-stat]
+-fd=26
++[event30:base-stat]
++fd=30
+ type=3
+ config=65536
+ optional=1
+@@ -210,8 +254,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event27:base-stat]
+-fd=27
++[event31:base-stat]
++fd=31
+ type=3
+ config=2
+ optional=1
+@@ -220,8 +264,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event28:base-stat]
+-fd=28
++[event32:base-stat]
++fd=32
+ type=3
+ config=65538
+ optional=1
+@@ -230,8 +274,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event29:base-stat]
+-fd=29
++[event33:base-stat]
++fd=33
+ type=3
+ config=1
+ optional=1
+@@ -240,8 +284,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event30:base-stat]
+-fd=30
++[event34:base-stat]
++fd=34
+ type=3
+ config=65537
+ optional=1
+@@ -250,8 +294,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event31:base-stat]
+-fd=31
++[event35:base-stat]
++fd=35
+ type=3
+ config=3
+ optional=1
+@@ -260,8 +304,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event32:base-stat]
+-fd=32
++[event36:base-stat]
++fd=36
+ type=3
+ config=65539
+ optional=1
+@@ -270,8 +314,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event33:base-stat]
+-fd=33
++[event37:base-stat]
++fd=37
+ type=3
+ config=4
+ optional=1
+@@ -280,8 +324,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event34:base-stat]
+-fd=34
++[event38:base-stat]
++fd=38
+ type=3
+ config=65540
+ optional=1
+@@ -290,8 +334,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
+-[event35:base-stat]
+-fd=35
++[event39:base-stat]
++fd=39
+ type=3
+ config=512
+ optional=1
+@@ -300,8 +344,8 @@ optional=1
+ #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
+ # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
+ # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
+-[event36:base-stat]
+-fd=36
++[event40:base-stat]
++fd=40
+ type=3
+ config=66048
+ optional=1
+diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
+index e87b6789eb9ef3..a4fdf6911ec1c3 100644
+--- a/tools/perf/util/bpf-filter.c
++++ b/tools/perf/util/bpf-filter.c
+@@ -375,7 +375,7 @@ static int create_idx_hash(struct evsel *evsel, struct perf_bpf_filter_entry *en
+ 	pfi = zalloc(sizeof(*pfi));
+ 	if (pfi == NULL) {
+ 		pr_err("Cannot save pinned filter index\n");
+-		goto err;
++		return -ENOMEM;
+ 	}
+ 
+ 	pfi->evsel = evsel;
+diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
+index 40f047baef8100..0bf9e5c27b599b 100644
+--- a/tools/perf/util/cs-etm.c
++++ b/tools/perf/util/cs-etm.c
+@@ -2490,12 +2490,6 @@ static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
+ 
+ 		/* Ignore return value */
+ 		cs_etm__process_traceid_queue(etmq, tidq);
+-
+-		/*
+-		 * Generate an instruction sample with the remaining
+-		 * branchstack entries.
+-		 */
+-		cs_etm__flush(etmq, tidq);
+ 	}
+ }
+ 
+@@ -2638,7 +2632,7 @@ static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm)
+ 
+ 	while (1) {
+ 		if (!etm->heap.heap_cnt)
+-			goto out;
++			break;
+ 
+ 		/* Take the entry at the top of the min heap */
+ 		cs_queue_nr = etm->heap.heap_array[0].queue_nr;
+@@ -2721,6 +2715,23 @@ static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm)
+ 		ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
+ 	}
+ 
++	for (i = 0; i < etm->queues.nr_queues; i++) {
++		struct int_node *inode;
++
++		etmq = etm->queues.queue_array[i].priv;
++		if (!etmq)
++			continue;
++
++		intlist__for_each_entry(inode, etmq->traceid_queues_list) {
++			int idx = (int)(intptr_t)inode->priv;
++
++			/* Flush any remaining branch stack entries */
++			tidq = etmq->traceid_queues[idx];
++			ret = cs_etm__end_block(etmq, tidq);
++			if (ret)
++				return ret;
++		}
++	}
+ out:
+ 	return ret;
+ }
+diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
+index f05ba7739c1e91..648e8d87ef1945 100644
+--- a/tools/perf/util/disasm.c
++++ b/tools/perf/util/disasm.c
+@@ -1627,12 +1627,12 @@ static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
+ 	u64 start = map__rip_2objdump(map, sym->start);
+ 	u64 len;
+ 	u64 offset;
+-	int i, count;
++	int i, count, free_count;
+ 	bool is_64bit = false;
+ 	bool needs_cs_close = false;
+ 	u8 *buf = NULL;
+ 	csh handle;
+-	cs_insn *insn;
++	cs_insn *insn = NULL;
+ 	char disasm_buf[512];
+ 	struct disasm_line *dl;
+ 
+@@ -1664,7 +1664,7 @@ static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
+ 
+ 	needs_cs_close = true;
+ 
+-	count = cs_disasm(handle, buf, len, start, len, &insn);
++	free_count = count = cs_disasm(handle, buf, len, start, len, &insn);
+ 	for (i = 0, offset = 0; i < count; i++) {
+ 		int printed;
+ 
+@@ -1702,8 +1702,11 @@ static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
+ 	}
+ 
+ out:
+-	if (needs_cs_close)
++	if (needs_cs_close) {
+ 		cs_close(&handle);
++		if (free_count > 0)
++			cs_free(insn, free_count);
++	}
+ 	free(buf);
+ 	return count < 0 ? count : 0;
+ 
+@@ -1717,7 +1720,7 @@ static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
+ 		 */
+ 		list_for_each_entry_safe(dl, tmp, &notes->src->source, al.node) {
+ 			list_del(&dl->al.node);
+-			free(dl);
++			disasm_line__free(dl);
+ 		}
+ 	}
+ 	count = -1;
+@@ -1782,7 +1785,7 @@ static int symbol__disassemble_raw(char *filename, struct symbol *sym,
+ 		sprintf(args->line, "%x", line[i]);
+ 		dl = disasm_line__new(args);
+ 		if (dl == NULL)
+-			goto err;
++			break;
+ 
+ 		annotation_line__add(&dl->al, &notes->src->source);
+ 		offset += 4;
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index f14b7e6ff1dcc2..a9df84692d4a88 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -48,6 +48,7 @@
+ #include <sys/mman.h>
+ #include <sys/prctl.h>
+ #include <sys/timerfd.h>
++#include <sys/wait.h>
+ 
+ #include <linux/bitops.h>
+ #include <linux/hash.h>
+@@ -1484,6 +1485,8 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const
+ 	int child_ready_pipe[2], go_pipe[2];
+ 	char bf;
+ 
++	evlist->workload.cork_fd = -1;
++
+ 	if (pipe(child_ready_pipe) < 0) {
+ 		perror("failed to create 'ready' pipe");
+ 		return -1;
+@@ -1536,7 +1539,7 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const
+ 		 * For cancelling the workload without actually running it,
+ 		 * the parent will just close workload.cork_fd, without writing
+ 		 * anything, i.e. read will return zero and we just exit()
+-		 * here.
++		 * here (See evlist__cancel_workload()).
+ 		 */
+ 		if (ret != 1) {
+ 			if (ret == -1)
+@@ -1600,7 +1603,7 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const
+ 
+ int evlist__start_workload(struct evlist *evlist)
+ {
+-	if (evlist->workload.cork_fd > 0) {
++	if (evlist->workload.cork_fd >= 0) {
+ 		char bf = 0;
+ 		int ret;
+ 		/*
+@@ -1611,12 +1614,24 @@ int evlist__start_workload(struct evlist *evlist)
+ 			perror("unable to write to pipe");
+ 
+ 		close(evlist->workload.cork_fd);
++		evlist->workload.cork_fd = -1;
+ 		return ret;
+ 	}
+ 
+ 	return 0;
+ }
+ 
++void evlist__cancel_workload(struct evlist *evlist)
++{
++	int status;
++
++	if (evlist->workload.cork_fd >= 0) {
++		close(evlist->workload.cork_fd);
++		evlist->workload.cork_fd = -1;
++		waitpid(evlist->workload.pid, &status, WNOHANG);
++	}
++}
++
+ int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
+ {
+ 	struct evsel *evsel = evlist__event2evsel(evlist, event);
+diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
+index bcc1c6984bb58a..888fda751e1a6e 100644
+--- a/tools/perf/util/evlist.h
++++ b/tools/perf/util/evlist.h
+@@ -186,6 +186,7 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target,
+ 			     const char *argv[], bool pipe_output,
+ 			     void (*exec_error)(int signo, siginfo_t *info, void *ucontext));
+ int evlist__start_workload(struct evlist *evlist);
++void evlist__cancel_workload(struct evlist *evlist);
+ 
+ struct option;
+ 
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index fad227b625d155..4f0ac998b0ccfd 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1343,7 +1343,7 @@ static int maps__set_module_path(struct maps *maps, const char *path, struct kmo
+ 	 * we need to update the symtab_type if needed.
+ 	 */
+ 	if (m->comp && is_kmod_dso(dso)) {
+-		dso__set_symtab_type(dso, dso__symtab_type(dso));
++		dso__set_symtab_type(dso, dso__symtab_type(dso)+1);
+ 		dso__set_comp(dso, m->comp);
+ 	}
+ 	map__put(map);
+diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
+index 051feb93ed8d40..bf5090f5220bbd 100644
+--- a/tools/perf/util/mem-events.c
++++ b/tools/perf/util/mem-events.c
+@@ -366,6 +366,12 @@ static const char * const mem_lvl[] = {
+ };
+ 
+ static const char * const mem_lvlnum[] = {
++	[PERF_MEM_LVLNUM_L1] = "L1",
++	[PERF_MEM_LVLNUM_L2] = "L2",
++	[PERF_MEM_LVLNUM_L3] = "L3",
++	[PERF_MEM_LVLNUM_L4] = "L4",
++	[PERF_MEM_LVLNUM_L2_MHB] = "L2 MHB",
++	[PERF_MEM_LVLNUM_MSC] = "Memory-side Cache",
+ 	[PERF_MEM_LVLNUM_UNC] = "Uncached",
+ 	[PERF_MEM_LVLNUM_CXL] = "CXL",
+ 	[PERF_MEM_LVLNUM_IO] = "I/O",
+@@ -448,7 +454,7 @@ int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_inf
+ 		if (mem_lvlnum[lvl])
+ 			l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
+ 		else
+-			l += scnprintf(out + l, sz - l, "L%d", lvl);
++			l += scnprintf(out + l, sz - l, "Unknown level %d", lvl);
+ 
+ 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
+ 		return l;
+diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
+index 5ccfe4b64cdfe4..0dacc133ed3960 100644
+--- a/tools/perf/util/pfm.c
++++ b/tools/perf/util/pfm.c
+@@ -233,7 +233,7 @@ print_libpfm_event(const struct print_callbacks *print_cb, void *print_state,
+ 	}
+ 
+ 	if (is_libpfm_event_supported(name, cpus, threads)) {
+-		print_cb->print_event(print_state, pinfo->name, topic,
++		print_cb->print_event(print_state, topic, pinfo->name,
+ 				      name, info->equiv,
+ 				      /*scale_unit=*/NULL,
+ 				      /*deprecated=*/NULL, "PFM event",
+@@ -267,8 +267,8 @@ print_libpfm_event(const struct print_callbacks *print_cb, void *print_state,
+ 				continue;
+ 
+ 			print_cb->print_event(print_state,
+-					pinfo->name,
+ 					topic,
++					pinfo->name,
+ 					name, /*alias=*/NULL,
+ 					/*scale_unit=*/NULL,
+ 					/*deprecated=*/NULL, "PFM event",
+diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
+index 52109af5f2f129..d7d67e09d759bb 100644
+--- a/tools/perf/util/pmus.c
++++ b/tools/perf/util/pmus.c
+@@ -494,8 +494,8 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
+ 			goto free;
+ 
+ 		print_cb->print_event(print_state,
+-				aliases[j].pmu_name,
+ 				aliases[j].topic,
++				aliases[j].pmu_name,
+ 				aliases[j].name,
+ 				aliases[j].alias,
+ 				aliases[j].scale_unit,
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
+index 630e16c54ed5cb..a30f88ed030044 100644
+--- a/tools/perf/util/probe-finder.c
++++ b/tools/perf/util/probe-finder.c
+@@ -1379,6 +1379,10 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
+ 	if (ret >= 0 && tf.pf.skip_empty_arg)
+ 		ret = fill_empty_trace_arg(pev, tf.tevs, tf.ntevs);
+ 
++#if _ELFUTILS_PREREQ(0, 142)
++	dwarf_cfi_end(tf.pf.cfi_eh);
++#endif
++
+ 	if (ret < 0 || tf.ntevs == 0) {
+ 		for (i = 0; i < tf.ntevs; i++)
+ 			clear_probe_trace_event(&tf.tevs[i]);
+@@ -1583,8 +1587,21 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
+ 
+ 	/* Find a corresponding function (name, baseline and baseaddr) */
+ 	if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {
+-		/* Get function entry information */
+-		func = basefunc = dwarf_diename(&spdie);
++		/*
++		 * Get function entry information.
++		 *
++		 * As described in the document DWARF Debugging Information
++		 * Format Version 5, section 2.22 Linkage Names, "mangled names,
++		 * are used in various ways, ... to distinguish multiple
++		 * entities that have the same name".
++		 *
++		 * Firstly try to get distinct linkage name, if fail then
++		 * rollback to get associated name in DIE.
++		 */
++		func = basefunc = die_get_linkage_name(&spdie);
++		if (!func)
++			func = basefunc = dwarf_diename(&spdie);
++
+ 		if (!func ||
+ 		    die_entrypc(&spdie, &baseaddr) != 0 ||
+ 		    dwarf_decl_line(&spdie, &baseline) != 0) {
+diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
+index 3add5ff516e12d..724db829b49e02 100644
+--- a/tools/perf/util/probe-finder.h
++++ b/tools/perf/util/probe-finder.h
+@@ -64,9 +64,9 @@ struct probe_finder {
+ 
+ 	/* For variable searching */
+ #if _ELFUTILS_PREREQ(0, 142)
+-	/* Call Frame Information from .eh_frame */
++	/* Call Frame Information from .eh_frame. Owned by this struct. */
+ 	Dwarf_CFI		*cfi_eh;
+-	/* Call Frame Information from .debug_frame */
++	/* Call Frame Information from .debug_frame. Not owned. */
+ 	Dwarf_CFI		*cfi_dbg;
+ #endif
+ 	Dwarf_Op		*fb_ops;	/* Frame base attribute */
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 089220aaa5c929..a5ebee8b23bbe3 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -5385,6 +5385,9 @@ static int parse_cpu_str(char *cpu_str, cpu_set_t *cpu_set, int cpu_set_size)
+ 		if (*next == '-')	/* no negative cpu numbers */
+ 			return 1;
+ 
++		if (*next == '\0' || *next == '\n')
++			break;
++
+ 		start = strtoul(next, &next, 10);
+ 
+ 		if (start >= CPU_SUBSET_MAXCPUS)
+@@ -9781,7 +9784,7 @@ void cmdline(int argc, char **argv)
+ 	 * Parse some options early, because they may make other options invalid,
+ 	 * like adding the MSR counter with --add and at the same time using --no-msr.
+ 	 */
+-	while ((opt = getopt_long_only(argc, argv, "MPn:", long_options, &option_index)) != -1) {
++	while ((opt = getopt_long_only(argc, argv, "+MPn:", long_options, &option_index)) != -1) {
+ 		switch (opt) {
+ 		case 'M':
+ 			no_msr = 1;
+diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
+index f2d6007a2b983e..265654ec48b9fc 100644
+--- a/tools/testing/selftests/arm64/abi/hwcap.c
++++ b/tools/testing/selftests/arm64/abi/hwcap.c
+@@ -361,8 +361,8 @@ static void sveaes_sigill(void)
+ 
+ static void sveb16b16_sigill(void)
+ {
+-	/* BFADD ZA.H[W0, 0], {Z0.H-Z1.H} */
+-	asm volatile(".inst 0xC1E41C00" : : : );
++	/* BFADD Z0.H, Z0.H, Z0.H */
++	asm volatile(".inst 0x65000000" : : : );
+ }
+ 
+ static void svepmull_sigill(void)
+@@ -490,7 +490,7 @@ static const struct hwcap_data {
+ 		.name = "F8DP2",
+ 		.at_hwcap = AT_HWCAP2,
+ 		.hwcap_bit = HWCAP2_F8DP2,
+-		.cpuinfo = "f8dp4",
++		.cpuinfo = "f8dp2",
+ 		.sigill_fn = f8dp2_sigill,
+ 	},
+ 	{
+diff --git a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
+index 2b1425b92b6991..a3d1e23fe02aff 100644
+--- a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
++++ b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
+@@ -65,7 +65,7 @@ static int check_single_included_tags(int mem_type, int mode)
+ 			ptr = mte_insert_tags(ptr, BUFFER_SIZE);
+ 			/* Check tag value */
+ 			if (MT_FETCH_TAG((uintptr_t)ptr) == tag) {
+-				ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n",
++				ksft_print_msg("FAIL: wrong tag = 0x%lx with include mask=0x%x\n",
+ 					       MT_FETCH_TAG((uintptr_t)ptr),
+ 					       MT_INCLUDE_VALID_TAG(tag));
+ 				result = KSFT_FAIL;
+@@ -97,7 +97,7 @@ static int check_multiple_included_tags(int mem_type, int mode)
+ 			ptr = mte_insert_tags(ptr, BUFFER_SIZE);
+ 			/* Check tag value */
+ 			if (MT_FETCH_TAG((uintptr_t)ptr) < tag) {
+-				ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n",
++				ksft_print_msg("FAIL: wrong tag = 0x%lx with include mask=0x%lx\n",
+ 					       MT_FETCH_TAG((uintptr_t)ptr),
+ 					       MT_INCLUDE_VALID_TAGS(excl_mask));
+ 				result = KSFT_FAIL;
+diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
+index 00ffd34c66d301..1120f5aa76550f 100644
+--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
++++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
+@@ -38,7 +38,7 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
+ 			if (cur_mte_cxt.trig_si_code == si->si_code)
+ 				cur_mte_cxt.fault_valid = true;
+ 			else
+-				ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=$lx, fault addr=%lx\n",
++				ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=%llx, fault addr=%lx\n",
+ 					       ((ucontext_t *)uc)->uc_mcontext.pc,
+ 					       addr);
+ 			return;
+@@ -64,7 +64,7 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
+ 			exit(1);
+ 		}
+ 	} else if (signum == SIGBUS) {
+-		ksft_print_msg("INFO: SIGBUS signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
++		ksft_print_msg("INFO: SIGBUS signal at pc=%llx, fault addr=%lx, si_code=%x\n",
+ 				((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
+ 		if ((cur_mte_cxt.trig_range >= 0 &&
+ 		     addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 75016962f79563..43a02931847854 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -10,6 +10,7 @@ TOOLSDIR := $(abspath ../../..)
+ LIBDIR := $(TOOLSDIR)/lib
+ BPFDIR := $(LIBDIR)/bpf
+ TOOLSINCDIR := $(TOOLSDIR)/include
++TOOLSARCHINCDIR := $(TOOLSDIR)/arch/$(SRCARCH)/include
+ BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool
+ APIDIR := $(TOOLSINCDIR)/uapi
+ ifneq ($(O),)
+@@ -44,7 +45,7 @@ CFLAGS += -g $(OPT_FLAGS) -rdynamic					\
+ 	  -Wall -Werror -fno-omit-frame-pointer				\
+ 	  $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS)			\
+ 	  -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR)		\
+-	  -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
++	  -I$(TOOLSINCDIR) -I$(TOOLSARCHINCDIR) -I$(APIDIR) -I$(OUTPUT)
+ LDFLAGS += $(SAN_LDFLAGS)
+ LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread
+ 
+diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
+index c72c16e1aff825..5764155b6d2518 100644
+--- a/tools/testing/selftests/bpf/network_helpers.h
++++ b/tools/testing/selftests/bpf/network_helpers.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ #ifndef __NETWORK_HELPERS_H
+ #define __NETWORK_HELPERS_H
++#include <arpa/inet.h>
+ #include <sys/socket.h>
+ #include <sys/types.h>
+ #include <linux/types.h>
+diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
+index 871d16cb95cfde..1a2f99596916fb 100644
+--- a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
++++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
+@@ -5,6 +5,7 @@
+ #include <test_progs.h>
+ #include <pthread.h>
+ #include <network_helpers.h>
++#include <sys/sysinfo.h>
+ 
+ #include "timer_lockup.skel.h"
+ 
+@@ -52,6 +53,11 @@ void test_timer_lockup(void)
+ 	pthread_t thrds[2];
+ 	void *ret;
+ 
++	if (get_nprocs() < 2) {
++		test__skip();
++		return;
++	}
++
+ 	skel = timer_lockup__open_and_load();
+ 	if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load"))
+ 		return;
+diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
+index 43f40c4fe241ac..1c8b678e2e9a39 100644
+--- a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
++++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
+@@ -28,8 +28,8 @@ struct {
+ 	},
+ };
+ 
+-SEC(".data.A") struct bpf_spin_lock lockA;
+-SEC(".data.B") struct bpf_spin_lock lockB;
++static struct bpf_spin_lock lockA SEC(".data.A");
++static struct bpf_spin_lock lockB SEC(".data.B");
+ 
+ SEC("?tc")
+ int lock_id_kptr_preserve(void *ctx)
+diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
+index bba3e37f749b86..5aaf2b065f86c2 100644
+--- a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
++++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
+@@ -7,7 +7,11 @@
+ #include "bpf_misc.h"
+ 
+ SEC("tp_btf/bpf_testmod_test_nullable_bare")
+-__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
++/* This used to be a failure test, but raw_tp nullable arguments can now
++ * directly be dereferenced, whether they have nullable annotation or not,
++ * and don't need to be explicitly checked.
++ */
++__success
+ int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx)
+ {
+ 	return nullable_ctx->len;
+diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
+index c7a70e1a1085a5..fa829a7854f24c 100644
+--- a/tools/testing/selftests/bpf/test_progs.c
++++ b/tools/testing/selftests/bpf/test_progs.c
+@@ -20,11 +20,13 @@
+ 
+ #include "network_helpers.h"
+ 
++/* backtrace() and backtrace_symbols_fd() are glibc specific,
++ * use header file when glibc is available and provide stub
++ * implementations when another libc implementation is used.
++ */
+ #ifdef __GLIBC__
+ #include <execinfo.h> /* backtrace */
+-#endif
+-
+-/* Default backtrace funcs if missing at link */
++#else
+ __weak int backtrace(void **buffer, int size)
+ {
+ 	return 0;
+@@ -34,6 +36,7 @@ __weak void backtrace_symbols_fd(void *const *buffer, int size, int fd)
+ {
+ 	dprintf(fd, "<backtrace not supported>\n");
+ }
++#endif /*__GLIBC__ */
+ 
+ int env_verbosity = 0;
+ 
+diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
+index 3e02d7267de8bb..61a747afcd05fb 100644
+--- a/tools/testing/selftests/bpf/test_sockmap.c
++++ b/tools/testing/selftests/bpf/test_sockmap.c
+@@ -56,6 +56,8 @@ static void running_handler(int a);
+ #define BPF_SOCKHASH_FILENAME "test_sockhash_kern.bpf.o"
+ #define CG_PATH "/sockmap"
+ 
++#define EDATAINTEGRITY 2001
++
+ /* global sockets */
+ int s1, s2, c1, c2, p1, p2;
+ int test_cnt;
+@@ -86,6 +88,10 @@ int ktls;
+ int peek_flag;
+ int skb_use_parser;
+ int txmsg_omit_skb_parser;
++int verify_push_start;
++int verify_push_len;
++int verify_pop_start;
++int verify_pop_len;
+ 
+ static const struct option long_options[] = {
+ 	{"help",	no_argument,		NULL, 'h' },
+@@ -418,16 +424,18 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt,
+ {
+ 	bool drop = opt->drop_expected;
+ 	unsigned char k = 0;
++	int i, j, fp;
+ 	FILE *file;
+-	int i, fp;
+ 
+ 	file = tmpfile();
+ 	if (!file) {
+ 		perror("create file for sendpage");
+ 		return 1;
+ 	}
+-	for (i = 0; i < iov_length * cnt; i++, k++)
+-		fwrite(&k, sizeof(char), 1, file);
++	for (i = 0; i < cnt; i++, k = 0) {
++		for (j = 0; j < iov_length; j++, k++)
++			fwrite(&k, sizeof(char), 1, file);
++	}
+ 	fflush(file);
+ 	fseek(file, 0, SEEK_SET);
+ 
+@@ -510,42 +518,111 @@ static int msg_alloc_iov(struct msghdr *msg,
+ 	return -ENOMEM;
+ }
+ 
+-static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz)
++/* In push or pop test, we need to do some calculations for msg_verify_data */
++static void msg_verify_date_prep(void)
+ {
+-	int i, j = 0, bytes_cnt = 0;
+-	unsigned char k = 0;
++	int push_range_end = txmsg_start_push + txmsg_end_push - 1;
++	int pop_range_end = txmsg_start_pop + txmsg_pop - 1;
++
++	if (txmsg_end_push && txmsg_pop &&
++	    txmsg_start_push <= pop_range_end && txmsg_start_pop <= push_range_end) {
++		/* The push range and the pop range overlap */
++		int overlap_len;
++
++		verify_push_start = txmsg_start_push;
++		verify_pop_start = txmsg_start_pop;
++		if (txmsg_start_push < txmsg_start_pop)
++			overlap_len = min(push_range_end - txmsg_start_pop + 1, txmsg_pop);
++		else
++			overlap_len = min(pop_range_end - txmsg_start_push + 1, txmsg_end_push);
++		verify_push_len = max(txmsg_end_push - overlap_len, 0);
++		verify_pop_len = max(txmsg_pop - overlap_len, 0);
++	} else {
++		/* Otherwise */
++		verify_push_start = txmsg_start_push;
++		verify_pop_start = txmsg_start_pop;
++		verify_push_len = txmsg_end_push;
++		verify_pop_len = txmsg_pop;
++	}
++}
++
++static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz,
++			   unsigned char *k_p, int *bytes_cnt_p,
++			   int *check_cnt_p, int *push_p)
++{
++	int bytes_cnt = *bytes_cnt_p, check_cnt = *check_cnt_p, push = *push_p;
++	unsigned char k = *k_p;
++	int i, j;
+ 
+-	for (i = 0; i < msg->msg_iovlen; i++) {
++	for (i = 0, j = 0; i < msg->msg_iovlen && size; i++, j = 0) {
+ 		unsigned char *d = msg->msg_iov[i].iov_base;
+ 
+ 		/* Special case test for skb ingress + ktls */
+ 		if (i == 0 && txmsg_ktls_skb) {
+ 			if (msg->msg_iov[i].iov_len < 4)
+-				return -EIO;
++				return -EDATAINTEGRITY;
+ 			if (memcmp(d, "PASS", 4) != 0) {
+ 				fprintf(stderr,
+ 					"detected skb data error with skb ingress update @iov[%i]:%i \"%02x %02x %02x %02x\" != \"PASS\"\n",
+ 					i, 0, d[0], d[1], d[2], d[3]);
+-				return -EIO;
++				return -EDATAINTEGRITY;
+ 			}
+ 			j = 4; /* advance index past PASS header */
+ 		}
+ 
+ 		for (; j < msg->msg_iov[i].iov_len && size; j++) {
++			if (push > 0 &&
++			    check_cnt == verify_push_start + verify_push_len - push) {
++				int skipped;
++revisit_push:
++				skipped = push;
++				if (j + push >= msg->msg_iov[i].iov_len)
++					skipped = msg->msg_iov[i].iov_len - j;
++				push -= skipped;
++				size -= skipped;
++				j += skipped - 1;
++				check_cnt += skipped;
++				continue;
++			}
++
++			if (verify_pop_len > 0 && check_cnt == verify_pop_start) {
++				bytes_cnt += verify_pop_len;
++				check_cnt += verify_pop_len;
++				k += verify_pop_len;
++
++				if (bytes_cnt == chunk_sz) {
++					k = 0;
++					bytes_cnt = 0;
++					check_cnt = 0;
++					push = verify_push_len;
++				}
++
++				if (push > 0 &&
++				    check_cnt == verify_push_start + verify_push_len - push)
++					goto revisit_push;
++			}
++
+ 			if (d[j] != k++) {
+ 				fprintf(stderr,
+ 					"detected data corruption @iov[%i]:%i %02x != %02x, %02x ?= %02x\n",
+ 					i, j, d[j], k - 1, d[j+1], k);
+-				return -EIO;
++				return -EDATAINTEGRITY;
+ 			}
+ 			bytes_cnt++;
++			check_cnt++;
+ 			if (bytes_cnt == chunk_sz) {
+ 				k = 0;
+ 				bytes_cnt = 0;
++				check_cnt = 0;
++				push = verify_push_len;
+ 			}
+ 			size--;
+ 		}
+ 	}
++	*k_p = k;
++	*bytes_cnt_p = bytes_cnt;
++	*check_cnt_p = check_cnt;
++	*push_p = push;
+ 	return 0;
+ }
+ 
+@@ -598,10 +675,14 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+ 		}
+ 		clock_gettime(CLOCK_MONOTONIC, &s->end);
+ 	} else {
++		float total_bytes, txmsg_pop_total, txmsg_push_total;
+ 		int slct, recvp = 0, recv, max_fd = fd;
+-		float total_bytes, txmsg_pop_total;
+ 		int fd_flags = O_NONBLOCK;
+ 		struct timeval timeout;
++		unsigned char k = 0;
++		int bytes_cnt = 0;
++		int check_cnt = 0;
++		int push = 0;
+ 		fd_set w;
+ 
+ 		fcntl(fd, fd_flags);
+@@ -615,12 +696,22 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+ 		 * This is really only useful for testing edge cases in code
+ 		 * paths.
+ 		 */
+-		total_bytes = (float)iov_count * (float)iov_length * (float)cnt;
+-		if (txmsg_apply)
++		total_bytes = (float)iov_length * (float)cnt;
++		if (!opt->sendpage)
++			total_bytes *= (float)iov_count;
++		if (txmsg_apply) {
++			txmsg_push_total = txmsg_end_push * (total_bytes / txmsg_apply);
+ 			txmsg_pop_total = txmsg_pop * (total_bytes / txmsg_apply);
+-		else
++		} else {
++			txmsg_push_total = txmsg_end_push * cnt;
+ 			txmsg_pop_total = txmsg_pop * cnt;
++		}
++		total_bytes += txmsg_push_total;
+ 		total_bytes -= txmsg_pop_total;
++		if (data) {
++			msg_verify_date_prep();
++			push = verify_push_len;
++		}
+ 		err = clock_gettime(CLOCK_MONOTONIC, &s->start);
+ 		if (err < 0)
+ 			perror("recv start time");
+@@ -693,10 +784,11 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+ 
+ 			if (data) {
+ 				int chunk_sz = opt->sendpage ?
+-						iov_length * cnt :
++						iov_length :
+ 						iov_length * iov_count;
+ 
+-				errno = msg_verify_data(&msg, recv, chunk_sz);
++				errno = msg_verify_data(&msg, recv, chunk_sz, &k, &bytes_cnt,
++							&check_cnt, &push);
+ 				if (errno) {
+ 					perror("data verify msg failed");
+ 					goto out_errno;
+@@ -704,7 +796,11 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+ 				if (recvp) {
+ 					errno = msg_verify_data(&msg_peek,
+ 								recvp,
+-								chunk_sz);
++								chunk_sz,
++								&k,
++								&bytes_cnt,
++								&check_cnt,
++								&push);
+ 					if (errno) {
+ 						perror("data verify msg_peek failed");
+ 						goto out_errno;
+@@ -786,8 +882,6 @@ static int sendmsg_test(struct sockmap_options *opt)
+ 
+ 	rxpid = fork();
+ 	if (rxpid == 0) {
+-		if (txmsg_pop || txmsg_start_pop)
+-			iov_buf -= (txmsg_pop - txmsg_start_pop + 1);
+ 		if (opt->drop_expected || txmsg_ktls_skb_drop)
+ 			_exit(0);
+ 
+@@ -812,7 +906,7 @@ static int sendmsg_test(struct sockmap_options *opt)
+ 				s.bytes_sent, sent_Bps, sent_Bps/giga,
+ 				s.bytes_recvd, recvd_Bps, recvd_Bps/giga,
+ 				peek_flag ? "(peek_msg)" : "");
+-		if (err && txmsg_cork)
++		if (err && err != -EDATAINTEGRITY && txmsg_cork)
+ 			err = 0;
+ 		exit(err ? 1 : 0);
+ 	} else if (rxpid == -1) {
+@@ -1456,8 +1550,8 @@ static void test_send_many(struct sockmap_options *opt, int cgrp)
+ 
+ static void test_send_large(struct sockmap_options *opt, int cgrp)
+ {
+-	opt->iov_length = 256;
+-	opt->iov_count = 1024;
++	opt->iov_length = 8192;
++	opt->iov_count = 32;
+ 	opt->rate = 2;
+ 	test_exec(cgrp, opt);
+ }
+@@ -1586,17 +1680,19 @@ static void test_txmsg_cork_hangs(int cgrp, struct sockmap_options *opt)
+ static void test_txmsg_pull(int cgrp, struct sockmap_options *opt)
+ {
+ 	/* Test basic start/end */
++	txmsg_pass = 1;
+ 	txmsg_start = 1;
+ 	txmsg_end = 2;
+ 	test_send(opt, cgrp);
+ 
+ 	/* Test >4k pull */
++	txmsg_pass = 1;
+ 	txmsg_start = 4096;
+ 	txmsg_end = 9182;
+ 	test_send_large(opt, cgrp);
+ 
+ 	/* Test pull + redirect */
+-	txmsg_redir = 0;
++	txmsg_redir = 1;
+ 	txmsg_start = 1;
+ 	txmsg_end = 2;
+ 	test_send(opt, cgrp);
+@@ -1618,12 +1714,16 @@ static void test_txmsg_pull(int cgrp, struct sockmap_options *opt)
+ 
+ static void test_txmsg_pop(int cgrp, struct sockmap_options *opt)
+ {
++	bool data = opt->data_test;
++
+ 	/* Test basic pop */
++	txmsg_pass = 1;
+ 	txmsg_start_pop = 1;
+ 	txmsg_pop = 2;
+ 	test_send_many(opt, cgrp);
+ 
+ 	/* Test pop with >4k */
++	txmsg_pass = 1;
+ 	txmsg_start_pop = 4096;
+ 	txmsg_pop = 4096;
+ 	test_send_large(opt, cgrp);
+@@ -1634,6 +1734,12 @@ static void test_txmsg_pop(int cgrp, struct sockmap_options *opt)
+ 	txmsg_pop = 2;
+ 	test_send_many(opt, cgrp);
+ 
++	/* TODO: Test for pop + cork should be different,
++	 * - It makes the layout of the received data difficult
++	 * - It makes it hard to calculate the total_bytes in the recvmsg
++	 * Temporarily skip the data integrity test for this case now.
++	 */
++	opt->data_test = false;
+ 	/* Test pop + cork */
+ 	txmsg_redir = 0;
+ 	txmsg_cork = 512;
+@@ -1647,16 +1753,21 @@ static void test_txmsg_pop(int cgrp, struct sockmap_options *opt)
+ 	txmsg_start_pop = 1;
+ 	txmsg_pop = 2;
+ 	test_send_many(opt, cgrp);
++	opt->data_test = data;
+ }
+ 
+ static void test_txmsg_push(int cgrp, struct sockmap_options *opt)
+ {
++	bool data = opt->data_test;
++
+ 	/* Test basic push */
++	txmsg_pass = 1;
+ 	txmsg_start_push = 1;
+ 	txmsg_end_push = 1;
+ 	test_send(opt, cgrp);
+ 
+ 	/* Test push 4kB >4k */
++	txmsg_pass = 1;
+ 	txmsg_start_push = 4096;
+ 	txmsg_end_push = 4096;
+ 	test_send_large(opt, cgrp);
+@@ -1667,16 +1778,24 @@ static void test_txmsg_push(int cgrp, struct sockmap_options *opt)
+ 	txmsg_end_push = 2;
+ 	test_send_many(opt, cgrp);
+ 
++	/* TODO: Test for push + cork should be different,
++	 * - It makes the layout of the received data difficult
++	 * - It makes it hard to calculate the total_bytes in the recvmsg
++	 * Temporarily skip the data integrity test for this case now.
++	 */
++	opt->data_test = false;
+ 	/* Test push + cork */
+ 	txmsg_redir = 0;
+ 	txmsg_cork = 512;
+ 	txmsg_start_push = 1;
+ 	txmsg_end_push = 2;
+ 	test_send_many(opt, cgrp);
++	opt->data_test = data;
+ }
+ 
+ static void test_txmsg_push_pop(int cgrp, struct sockmap_options *opt)
+ {
++	txmsg_pass = 1;
+ 	txmsg_start_push = 1;
+ 	txmsg_end_push = 10;
+ 	txmsg_start_pop = 5;
+diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c
+index c7828b13e5ffd8..dd38dc68f63592 100644
+--- a/tools/testing/selftests/bpf/uprobe_multi.c
++++ b/tools/testing/selftests/bpf/uprobe_multi.c
+@@ -12,6 +12,10 @@
+ #define MADV_POPULATE_READ 22
+ #endif
+ 
++#ifndef MADV_PAGEOUT
++#define MADV_PAGEOUT 21
++#endif
++
+ int __attribute__((weak)) uprobe(void)
+ {
+ 	return 0;
+diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+index 68801e1a9ec2d1..70f65eb320a7a7 100644
+--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
++++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+@@ -1026,7 +1026,7 @@ FIXTURE_SETUP(mount_setattr_idmapped)
+ 			"size=100000,mode=700"), 0);
+ 
+ 	ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV,
+-			"size=100000,mode=700"), 0);
++			"size=2m,mode=700"), 0);
+ 
+ 	ASSERT_EQ(mkdir("/mnt/A", 0777), 0);
+ 
+diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
+index 5e86f7a51b43c5..2c4b6e404a7c7f 100644
+--- a/tools/testing/selftests/net/Makefile
++++ b/tools/testing/selftests/net/Makefile
+@@ -97,6 +97,7 @@ TEST_PROGS += fdb_flush.sh
+ TEST_PROGS += fq_band_pktlimit.sh
+ TEST_PROGS += vlan_hw_filter.sh
+ TEST_PROGS += bpf_offload.py
++TEST_PROGS += ipv6_route_update_soft_lockup.sh
+ 
+ # YNL files, must be before "include ..lib.mk"
+ EXTRA_CLEAN += $(OUTPUT)/libynl.a
+diff --git a/tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh b/tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh
+new file mode 100755
+index 00000000000000..a6b2b1f9c641c9
+--- /dev/null
++++ b/tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh
+@@ -0,0 +1,262 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Testing for potential kernel soft lockup during IPv6 routing table
++# refresh under heavy outgoing IPv6 traffic. If a kernel soft lockup
++# occurs, a kernel panic will be triggered to prevent associated issues.
++#
++#
++#                            Test Environment Layout
++#
++# ┌----------------┐                                         ┌----------------┐
++# |     SOURCE_NS  |                                         |     SINK_NS    |
++# |    NAMESPACE   |                                         |    NAMESPACE   |
++# |(iperf3 clients)|                                         |(iperf3 servers)|
++# |                |                                         |                |
++# |                |                                         |                |
++# |    ┌-----------|                             nexthops    |---------┐      |
++# |    |veth_source|<--------------------------------------->|veth_sink|<┐    |
++# |    └-----------|2001:0DB8:1::0:1/96  2001:0DB8:1::1:1/96 |---------┘ |    |
++# |                |         ^           2001:0DB8:1::1:2/96 |           |    |
++# |                |         .                   .           |       fwd |    |
++# |  ┌---------┐   |         .                   .           |           |    |
++# |  |   IPv6  |   |         .                   .           |           V    |
++# |  | routing |   |         .           2001:0DB8:1::1:80/96|        ┌-----┐ |
++# |  |  table  |   |         .                               |        | lo  | |
++# |  | nexthop |   |         .                               └--------┴-----┴-┘
++# |  | update  |   |         ............................> 2001:0DB8:2::1:1/128
++# |  └-------- ┘   |
++# └----------------┘
++#
++# The test script sets up two network namespaces, source_ns and sink_ns,
++# connected via a veth link. Within source_ns, it continuously updates the
++# IPv6 routing table by flushing and inserting IPV6_NEXTHOP_ADDR_COUNT nexthop
++# IPs destined for SINK_LOOPBACK_IP_ADDR in sink_ns. This refresh occurs at a
++# rate of 1/ROUTING_TABLE_REFRESH_PERIOD per second for TEST_DURATION seconds.
++#
++# Simultaneously, multiple iperf3 clients within source_ns generate heavy
++# outgoing IPv6 traffic. Each client is assigned a unique port number starting
++# at 5000 and incrementing sequentially. Each client targets a unique iperf3
++# server running in sink_ns, connected to the SINK_LOOPBACK_IFACE interface
++# using the same port number.
++#
++# The number of iperf3 servers and clients is set to half of the total
++# available cores on each machine.
++#
++# NOTE: We have tested this script on machines with various CPU specifications,
++# ranging from lower to higher performance as listed below. The test script
++# effectively triggered a kernel soft lockup on machines running an unpatched
++# kernel in under a minute:
++#
++# - 1x Intel Xeon E-2278G 8-Core Processor @ 3.40GHz
++# - 1x Intel Xeon E-2378G Processor 8-Core @ 2.80GHz
++# - 1x AMD EPYC 7401P 24-Core Processor @ 2.00GHz
++# - 1x AMD EPYC 7402P 24-Core Processor @ 2.80GHz
++# - 2x Intel Xeon Gold 5120 14-Core Processor @ 2.20GHz
++# - 1x Ampere Altra Q80-30 80-Core Processor @ 3.00GHz
++# - 2x Intel Xeon Gold 5120 14-Core Processor @ 2.20GHz
++# - 2x Intel Xeon Silver 4214 24-Core Processor @ 2.20GHz
++# - 1x AMD EPYC 7502P 32-Core @ 2.50GHz
++# - 1x Intel Xeon Gold 6314U 32-Core Processor @ 2.30GHz
++# - 2x Intel Xeon Gold 6338 32-Core Processor @ 2.00GHz
++#
++# On less performant machines, you may need to increase the TEST_DURATION
++# parameter to enhance the likelihood of encountering a race condition leading
++# to a kernel soft lockup and avoid a false negative result.
++#
++# NOTE: The test may not produce the expected result in virtualized
++# environments (e.g., qemu) due to differences in timing and CPU handling,
++# which can affect the conditions needed to trigger a soft lockup.
++
++source lib.sh
++source net_helper.sh
++
++TEST_DURATION=300
++ROUTING_TABLE_REFRESH_PERIOD=0.01
++
++IPERF3_BITRATE="300m"
++
++
++IPV6_NEXTHOP_ADDR_COUNT="128"
++IPV6_NEXTHOP_ADDR_MASK="96"
++IPV6_NEXTHOP_PREFIX="2001:0DB8:1"
++
++
++SOURCE_TEST_IFACE="veth_source"
++SOURCE_TEST_IP_ADDR="2001:0DB8:1::0:1/96"
++
++SINK_TEST_IFACE="veth_sink"
++# ${SINK_TEST_IFACE} is populated with the following range of IPv6 addresses:
++# 2001:0DB8:1::1:1  to 2001:0DB8:1::1:${IPV6_NEXTHOP_ADDR_COUNT}
++SINK_LOOPBACK_IFACE="lo"
++SINK_LOOPBACK_IP_MASK="128"
++SINK_LOOPBACK_IP_ADDR="2001:0DB8:2::1:1"
++
++nexthop_ip_list=""
++termination_signal=""
++kernel_softlokup_panic_prev_val=""
++
++terminate_ns_processes_by_pattern() {
++	local ns=$1
++	local pattern=$2
++
++	for pid in $(ip netns pids ${ns}); do
++		[ -e /proc/$pid/cmdline ] && grep -qe "${pattern}" /proc/$pid/cmdline && kill -9 $pid
++	done
++}
++
++cleanup() {
++	echo "info: cleaning up namespaces and terminating all processes within them..."
++
++
++	# Terminate iperf3 instances running in the source_ns. To avoid race
++	# conditions, first iterate over the PIDs and terminate those
++	# associated with the bash shells running the
++	# `while true; do iperf3 -c ...; done` loops. In a second iteration,
++	# terminate the individual `iperf3 -c ...` instances.
++	terminate_ns_processes_by_pattern ${source_ns} while
++	terminate_ns_processes_by_pattern ${source_ns} iperf3
++
++	# Repeat the same process for sink_ns
++	terminate_ns_processes_by_pattern ${sink_ns} while
++	terminate_ns_processes_by_pattern ${sink_ns} iperf3
++
++	# Check if any iperf3 instances are still running. This could happen
++	# if a core has entered an infinite loop and the timeout for detecting
++	# the soft lockup has not expired, but either the test interval has
++	# already elapsed or the test was terminated manually (e.g., with ^C)
++	for pid in $(ip netns pids ${source_ns}); do
++		if [ -e /proc/$pid/cmdline ] && grep -qe 'iperf3' /proc/$pid/cmdline; then
++			echo "FAIL: unable to terminate some iperf3 instances. Soft lockup is underway. A kernel panic is on the way!"
++			exit ${ksft_fail}
++		fi
++	done
++
++	if [ "$termination_signal" == "SIGINT" ]; then
++		echo "SKIP: Termination due to ^C (SIGINT)"
++	elif [ "$termination_signal" == "SIGALRM" ]; then
++		echo "PASS: No kernel soft lockup occurred during this ${TEST_DURATION} second test"
++	fi
++
++	cleanup_ns ${source_ns} ${sink_ns}
++
++	sysctl -qw kernel.softlockup_panic=${kernel_softlokup_panic_prev_val}
++}
++
++setup_prepare() {
++	setup_ns source_ns sink_ns
++
++	ip -n ${source_ns} link add name ${SOURCE_TEST_IFACE} type veth peer name ${SINK_TEST_IFACE} netns ${sink_ns}
++
++	# Setting up the Source namespace
++	ip -n ${source_ns} addr add ${SOURCE_TEST_IP_ADDR} dev ${SOURCE_TEST_IFACE}
++	ip -n ${source_ns} link set dev ${SOURCE_TEST_IFACE} qlen 10000
++	ip -n ${source_ns} link set dev ${SOURCE_TEST_IFACE} up
++	ip netns exec ${source_ns} sysctl -qw net.ipv6.fib_multipath_hash_policy=1
++
++	# Setting up the Sink namespace
++	ip -n ${sink_ns} addr add ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK} dev ${SINK_LOOPBACK_IFACE}
++	ip -n ${sink_ns} link set dev ${SINK_LOOPBACK_IFACE} up
++	ip netns exec ${sink_ns} sysctl -qw net.ipv6.conf.${SINK_LOOPBACK_IFACE}.forwarding=1
++
++	ip -n ${sink_ns} link set ${SINK_TEST_IFACE} up
++	ip netns exec ${sink_ns} sysctl -qw net.ipv6.conf.${SINK_TEST_IFACE}.forwarding=1
++
++
++	# Populate nexthop IPv6 addresses on the test interface in the sink_ns
++	echo "info: populating ${IPV6_NEXTHOP_ADDR_COUNT} IPv6 addresses on the ${SINK_TEST_IFACE} interface ..."
++	for IP in $(seq 1 ${IPV6_NEXTHOP_ADDR_COUNT}); do
++		ip -n ${sink_ns} addr add ${IPV6_NEXTHOP_PREFIX}::$(printf "1:%x" "${IP}")/${IPV6_NEXTHOP_ADDR_MASK} dev ${SINK_TEST_IFACE};
++	done
++
++	# Preparing list of nexthops
++	for IP in $(seq 1 ${IPV6_NEXTHOP_ADDR_COUNT}); do
++		nexthop_ip_list=$nexthop_ip_list" nexthop via ${IPV6_NEXTHOP_PREFIX}::$(printf "1:%x" $IP) dev ${SOURCE_TEST_IFACE} weight 1"
++	done
++}
++
++
++test_soft_lockup_during_routing_table_refresh() {
++	# Start num_of_iperf_servers iperf3 servers in the sink_ns namespace,
++	# each listening on ports starting at 5001 and incrementing
++	# sequentially. Since iperf3 instances may terminate unexpectedly, a
++	# while loop is used to automatically restart them in such cases.
++	echo "info: starting ${num_of_iperf_servers} iperf3 servers in the sink_ns namespace ..."
++	for i in $(seq 1 ${num_of_iperf_servers}); do
++		cmd="iperf3 --bind ${SINK_LOOPBACK_IP_ADDR} -s -p $(printf '5%03d' ${i}) --rcv-timeout 200 &>/dev/null"
++		ip netns exec ${sink_ns} bash -c "while true; do ${cmd}; done &" &>/dev/null
++	done
++
++	# Wait for the iperf3 servers to be ready
++	for i in $(seq ${num_of_iperf_servers}); do
++		port=$(printf '5%03d' ${i});
++		wait_local_port_listen ${sink_ns} ${port} tcp
++	done
++
++	# Continuously refresh the routing table in the background within
++	# the source_ns namespace
++	ip netns exec ${source_ns} bash -c "
++		while \$(ip netns list | grep -q ${source_ns}); do
++			ip -6 route add ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK} ${nexthop_ip_list};
++			sleep ${ROUTING_TABLE_REFRESH_PERIOD};
++			ip -6 route delete ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK};
++		done &"
++
++	# Start num_of_iperf_servers iperf3 clients in the source_ns namespace,
++	# each sending TCP traffic on sequential ports starting at 5001.
++	# Since iperf3 instances may terminate unexpectedly (e.g., if the route
++	# to the server is deleted in the background during a route refresh), a
++	# while loop is used to automatically restart them in such cases.
++	echo "info: starting ${num_of_iperf_servers} iperf3 clients in the source_ns namespace ..."
++	for i in $(seq 1 ${num_of_iperf_servers}); do
++		cmd="iperf3 -c ${SINK_LOOPBACK_IP_ADDR} -p $(printf '5%03d' ${i}) --length 64 --bitrate ${IPERF3_BITRATE} -t 0 --connect-timeout 150 &>/dev/null"
++		ip netns exec ${source_ns} bash -c "while true; do ${cmd}; done &" &>/dev/null
++	done
++
++	echo "info: IPv6 routing table is being updated at the rate of $(echo "1/${ROUTING_TABLE_REFRESH_PERIOD}" | bc)/s for ${TEST_DURATION} seconds ..."
++	echo "info: A kernel soft lockup, if detected, results in a kernel panic!"
++
++	wait
++}
++
++# Make sure 'iperf3' is installed, skip the test otherwise
++if [ ! -x "$(command -v "iperf3")" ]; then
++	echo "SKIP: 'iperf3' is not installed. Skipping the test."
++	exit ${ksft_skip}
++fi
++
++# Determine the number of cores on the machine
++num_of_iperf_servers=$(( $(nproc)/2 ))
++
++# Check if we are running on a multi-core machine, skip the test otherwise
++if [ "${num_of_iperf_servers}" -eq 0 ]; then
++	echo "SKIP: This test is not valid on a single core machine!"
++	exit ${ksft_skip}
++fi
++
++# Since the kernel soft lockup we're testing causes at least one core to enter
++# an infinite loop, destabilizing the host and likely affecting subsequent
++# tests, we trigger a kernel panic instead of reporting a failure and
++# continuing
++kernel_softlokup_panic_prev_val=$(sysctl -n kernel.softlockup_panic)
++sysctl -qw kernel.softlockup_panic=1
++
++handle_sigint() {
++	termination_signal="SIGINT"
++	cleanup
++	exit ${ksft_skip}
++}
++
++handle_sigalrm() {
++	termination_signal="SIGALRM"
++	cleanup
++	exit ${ksft_pass}
++}
++
++trap handle_sigint SIGINT
++trap handle_sigalrm SIGALRM
++
++(sleep ${TEST_DURATION} && kill -s SIGALRM $$)&
++
++setup_prepare
++test_soft_lockup_during_routing_table_refresh
+diff --git a/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c b/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
+index 254ff03297f06c..5f827e10717d19 100644
+--- a/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
++++ b/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
+@@ -43,6 +43,8 @@ static int build_cta_tuple_v4(struct nlmsghdr *nlh, int type,
+ 	mnl_attr_nest_end(nlh, nest_proto);
+ 
+ 	mnl_attr_nest_end(nlh, nest);
++
++	return 0;
+ }
+ 
+ static int build_cta_tuple_v6(struct nlmsghdr *nlh, int type,
+@@ -71,6 +73,8 @@ static int build_cta_tuple_v6(struct nlmsghdr *nlh, int type,
+ 	mnl_attr_nest_end(nlh, nest_proto);
+ 
+ 	mnl_attr_nest_end(nlh, nest);
++
++	return 0;
+ }
+ 
+ static int build_cta_proto(struct nlmsghdr *nlh)
+@@ -90,6 +94,8 @@ static int build_cta_proto(struct nlmsghdr *nlh)
+ 	mnl_attr_nest_end(nlh, nest_proto);
+ 
+ 	mnl_attr_nest_end(nlh, nest);
++
++	return 0;
+ }
+ 
+ static int conntrack_data_insert(struct mnl_socket *sock, struct nlmsghdr *nlh,
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index 569bce8b6383ee..6c651c880fe83d 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -2056,7 +2056,7 @@ check_running() {
+ 	pid=${1}
+ 	cmd=${2}
+ 
+-	[ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "{cmd}" ]
++	[ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "${cmd}" ]
+ }
+ 
+ test_cleanup_vxlanX_exception() {
+diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
+index ae120f1735c0bc..34e5df721430ee 100644
+--- a/tools/testing/selftests/resctrl/fill_buf.c
++++ b/tools/testing/selftests/resctrl/fill_buf.c
+@@ -127,7 +127,7 @@ unsigned char *alloc_buffer(size_t buf_size, int memflush)
+ {
+ 	void *buf = NULL;
+ 	uint64_t *p64;
+-	size_t s64;
++	ssize_t s64;
+ 	int ret;
+ 
+ 	ret = posix_memalign(&buf, PAGE_SIZE, buf_size);
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index 6b5a3b52d861b8..cf08ba5e314e2a 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -40,7 +40,8 @@ show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, size_t span)
+ 	ksft_print_msg("%s Check MBM diff within %d%%\n",
+ 		       ret ? "Fail:" : "Pass:", MAX_DIFF_PERCENT);
+ 	ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
+-	ksft_print_msg("Span (MB): %zu\n", span / MB);
++	if (span)
++		ksft_print_msg("Span (MB): %zu\n", span / MB);
+ 	ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
+ 	ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
+ 
+@@ -138,15 +139,26 @@ static int mbm_run_test(const struct resctrl_test *test, const struct user_param
+ 		.setup		= mbm_setup,
+ 		.measure	= mbm_measure,
+ 	};
++	char *endptr = NULL;
++	size_t span = 0;
+ 	int ret;
+ 
+ 	remove(RESULT_FILE_NAME);
+ 
++	if (uparams->benchmark_cmd[0] && strcmp(uparams->benchmark_cmd[0], "fill_buf") == 0) {
++		if (uparams->benchmark_cmd[1] && *uparams->benchmark_cmd[1] != '\0') {
++			errno = 0;
++			span = strtoul(uparams->benchmark_cmd[1], &endptr, 10);
++			if (errno || *endptr != '\0')
++				return -EINVAL;
++		}
++	}
++
+ 	ret = resctrl_val(test, uparams, uparams->benchmark_cmd, &param);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = check_results(DEFAULT_SPAN);
++	ret = check_results(span);
+ 	if (ret && (get_vendor() == ARCH_INTEL))
+ 		ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+ 
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index 8c275f6b4dd777..f118f659e89600 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -83,13 +83,12 @@ void get_event_and_umask(char *cas_count_cfg, int count, bool op)
+ 	char *token[MAX_TOKENS];
+ 	int i = 0;
+ 
+-	strcat(cas_count_cfg, ",");
+ 	token[0] = strtok(cas_count_cfg, "=,");
+ 
+ 	for (i = 1; i < MAX_TOKENS; i++)
+ 		token[i] = strtok(NULL, "=,");
+ 
+-	for (i = 0; i < MAX_TOKENS; i++) {
++	for (i = 0; i < MAX_TOKENS - 1; i++) {
+ 		if (!token[i])
+ 			break;
+ 		if (strcmp(token[i], "event") == 0) {
+diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
+index 7dd5668ea8a6e3..28f35620c49919 100644
+--- a/tools/testing/selftests/vDSO/parse_vdso.c
++++ b/tools/testing/selftests/vDSO/parse_vdso.c
+@@ -222,8 +222,7 @@ void *vdso_sym(const char *version, const char *name)
+ 		ELF(Sym) *sym = &vdso_info.symtab[chain];
+ 
+ 		/* Check for a defined global or weak function w/ right name. */
+-		if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC &&
+-		    ELF64_ST_TYPE(sym->st_info) != STT_NOTYPE)
++		if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
+ 			continue;
+ 		if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
+ 		    ELF64_ST_BIND(sym->st_info) != STB_WEAK)
+diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
+index 405ff262ca93d4..55500f901fbc36 100755
+--- a/tools/testing/selftests/wireguard/netns.sh
++++ b/tools/testing/selftests/wireguard/netns.sh
+@@ -332,6 +332,7 @@ waitiface $netns1 vethc
+ waitiface $netns2 veths
+ 
+ n0 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward'
++[[ -e /proc/sys/net/netfilter/nf_conntrack_udp_timeout ]] || modprobe nf_conntrack
+ n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout'
+ n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream'
+ n0 iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -d 10.0.0.0/24 -j SNAT --to 10.0.0.1
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index a3907c390d67a5..829511a712224f 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -1064,7 +1064,7 @@ timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_hist_param
+ 	 * If the user did not specify a type of thread, try user-threads first.
+ 	 * Fall back to kernel threads otherwise.
+ 	 */
+-	if (!params->kernel_workload && !params->user_workload) {
++	if (!params->kernel_workload && !params->user_hist) {
+ 		retval = tracefs_file_exists(NULL, "osnoise/per_cpu/cpu0/timerlat_fd");
+ 		if (retval) {
+ 			debug_msg("User-space interface detected, setting user-threads\n");
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index 210b0f533534ab..3b62519a412fc9 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -830,7 +830,7 @@ timerlat_top_apply_config(struct osnoise_tool *top, struct timerlat_top_params *
+ 	 * If the user did not specify a type of thread, try user-threads first.
+ 	 * Fall back to kernel threads otherwise.
+ 	 */
+-	if (!params->kernel_workload && !params->user_workload) {
++	if (!params->kernel_workload && !params->user_top) {
+ 		retval = tracefs_file_exists(NULL, "osnoise/per_cpu/cpu0/timerlat_fd");
+ 		if (retval) {
+ 			debug_msg("User-space interface detected, setting user-threads\n");
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 6ca7a1045bbb75..279e03029ce149 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -6561,106 +6561,3 @@ void kvm_exit(void)
+ 	kvm_irqfd_exit();
+ }
+ EXPORT_SYMBOL_GPL(kvm_exit);
+-
+-struct kvm_vm_worker_thread_context {
+-	struct kvm *kvm;
+-	struct task_struct *parent;
+-	struct completion init_done;
+-	kvm_vm_thread_fn_t thread_fn;
+-	uintptr_t data;
+-	int err;
+-};
+-
+-static int kvm_vm_worker_thread(void *context)
+-{
+-	/*
+-	 * The init_context is allocated on the stack of the parent thread, so
+-	 * we have to locally copy anything that is needed beyond initialization
+-	 */
+-	struct kvm_vm_worker_thread_context *init_context = context;
+-	struct task_struct *parent;
+-	struct kvm *kvm = init_context->kvm;
+-	kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
+-	uintptr_t data = init_context->data;
+-	int err;
+-
+-	err = kthread_park(current);
+-	/* kthread_park(current) is never supposed to return an error */
+-	WARN_ON(err != 0);
+-	if (err)
+-		goto init_complete;
+-
+-	err = cgroup_attach_task_all(init_context->parent, current);
+-	if (err) {
+-		kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
+-			__func__, err);
+-		goto init_complete;
+-	}
+-
+-	set_user_nice(current, task_nice(init_context->parent));
+-
+-init_complete:
+-	init_context->err = err;
+-	complete(&init_context->init_done);
+-	init_context = NULL;
+-
+-	if (err)
+-		goto out;
+-
+-	/* Wait to be woken up by the spawner before proceeding. */
+-	kthread_parkme();
+-
+-	if (!kthread_should_stop())
+-		err = thread_fn(kvm, data);
+-
+-out:
+-	/*
+-	 * Move kthread back to its original cgroup to prevent it lingering in
+-	 * the cgroup of the VM process, after the latter finishes its
+-	 * execution.
+-	 *
+-	 * kthread_stop() waits on the 'exited' completion condition which is
+-	 * set in exit_mm(), via mm_release(), in do_exit(). However, the
+-	 * kthread is removed from the cgroup in the cgroup_exit() which is
+-	 * called after the exit_mm(). This causes the kthread_stop() to return
+-	 * before the kthread actually quits the cgroup.
+-	 */
+-	rcu_read_lock();
+-	parent = rcu_dereference(current->real_parent);
+-	get_task_struct(parent);
+-	rcu_read_unlock();
+-	cgroup_attach_task_all(parent, current);
+-	put_task_struct(parent);
+-
+-	return err;
+-}
+-
+-int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+-				uintptr_t data, const char *name,
+-				struct task_struct **thread_ptr)
+-{
+-	struct kvm_vm_worker_thread_context init_context = {};
+-	struct task_struct *thread;
+-
+-	*thread_ptr = NULL;
+-	init_context.kvm = kvm;
+-	init_context.parent = current;
+-	init_context.thread_fn = thread_fn;
+-	init_context.data = data;
+-	init_completion(&init_context.init_done);
+-
+-	thread = kthread_run(kvm_vm_worker_thread, &init_context,
+-			     "%s-%d", name, task_pid_nr(current));
+-	if (IS_ERR(thread))
+-		return PTR_ERR(thread);
+-
+-	/* kthread_run is never supposed to return NULL */
+-	WARN_ON(thread == NULL);
+-
+-	wait_for_completion(&init_context.init_done);
+-
+-	if (!init_context.err)
+-		*thread_ptr = thread;
+-
+-	return init_context.err;
+-}


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-12-02 17:15 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-12-02 17:15 UTC (permalink / raw
  To: gentoo-commits

commit:     353d9f32e0ba5f71b437ff4c970539e584a58e0b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Dec  2 17:13:57 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Dec  2 17:13:57 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=353d9f32

GCC 15 defs to -std=gnu23. Hack in CSTD_FLAG to pass -std=gnu11 everywhere

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                         |   4 ++
 2980_GCC15-gnu23-to-gnu11-fix.patch | 105 ++++++++++++++++++++++++++++++++++++
 2 files changed, 109 insertions(+)

diff --git a/0000_README b/0000_README
index 4df3304e..bc514d88 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
+Patch:  2980_GCC15-gnu23-to-gnu11-fix.patch
+From:   https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
+Desc:   GCC 15 defaults to -std=gnu23. Hack in CSTD_FLAG to pass -std=gnu11 everywhere.
+
 Patch:  2990_libbpf-v2-workaround-Wmaybe-uninitialized-false-pos.patch
 From:   https://lore.kernel.org/bpf/
 Desc:   libbpf: workaround -Wmaybe-uninitialized false positive

diff --git a/2980_GCC15-gnu23-to-gnu11-fix.patch b/2980_GCC15-gnu23-to-gnu11-fix.patch
new file mode 100644
index 00000000..c74b6180
--- /dev/null
+++ b/2980_GCC15-gnu23-to-gnu11-fix.patch
@@ -0,0 +1,105 @@
+iGCC 15 defaults to -std=gnu23. While most of the kernel builds with -std=gnu11,
+some of it forgets to pass that flag. Hack in CSTD_FLAG to pass -std=gnu11
+everywhere.
+
+https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
+--- a/Makefile
++++ b/Makefile
+@@ -416,6 +416,8 @@ export KCONFIG_CONFIG
+ # SHELL used by kbuild
+ CONFIG_SHELL := sh
+ 
++CSTD_FLAG := -std=gnu11
++
+ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
+ HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
+ HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
+@@ -437,7 +439,7 @@ HOSTRUSTC = rustc
+ HOSTPKG_CONFIG	= pkg-config
+ 
+ KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+-			 -O2 -fomit-frame-pointer -std=gnu11
++			 -O2 -fomit-frame-pointer $(CSTD_FLAG)
+ KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
+ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
+ 
+@@ -545,7 +547,7 @@ LINUXINCLUDE    := \
+ KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
+ 
+ KBUILD_CFLAGS :=
+-KBUILD_CFLAGS += -std=gnu11
++KBUILD_CFLAGS += $(CSTD_FLAG)
+ KBUILD_CFLAGS += -fshort-wchar
+ KBUILD_CFLAGS += -funsigned-char
+ KBUILD_CFLAGS += -fno-common
+@@ -589,7 +591,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AW
+ export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+ export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
+ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+-export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
++export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS CSTD_FLAG
+ 
+ export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
+ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+--- a/arch/arm64/kernel/vdso32/Makefile
++++ b/arch/arm64/kernel/vdso32/Makefile
+@@ -65,7 +65,7 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+                -fno-strict-aliasing -fno-common \
+                -Werror-implicit-function-declaration \
+                -Wno-format-security \
+-               -std=gnu11
++               $(CSTD_FLAG)
+ VDSO_CFLAGS  += -O2
+ # Some useful compiler-dependent flags from top-level Makefile
+ VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -47,7 +47,7 @@ endif
+ 
+ # How to compile the 16-bit code.  Note we always compile for -march=i386;
+ # that way we can complain to the user if the CPU is insufficient.
+-REALMODE_CFLAGS	:= -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
++REALMODE_CFLAGS	:= $(CSTD_FLAG) -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
+ 		   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
+ 		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ 		   -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -7,7 +7,7 @@
+ #
+ 
+ # non-x86 reuses KBUILD_CFLAGS, x86 does not
+-cflags-y			:= $(KBUILD_CFLAGS)
++cflags-y			:= $(KBUILD_CFLAGS) $(CSTD_FLAG)
+ 
+ cflags-$(CONFIG_X86_32)		:= -march=i386
+ cflags-$(CONFIG_X86_64)		:= -mcmodel=small
+@@ -18,7 +18,7 @@ cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
+ 				   $(call cc-disable-warning, address-of-packed-member) \
+ 				   $(call cc-disable-warning, gnu) \
+ 				   -fno-asynchronous-unwind-tables \
+-				   $(CLANG_FLAGS)
++				   $(CLANG_FLAGS) $(CSTD_FLAG)
+ 
+ # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+ # disable the stackleak plugin
+@@ -42,7 +42,7 @@ KBUILD_CFLAGS			:= $(subst $(CC_FLAGS_FTRACE),,$(cflags-y)) \
+ 				   -ffreestanding \
+ 				   -fno-stack-protector \
+ 				   $(call cc-option,-fno-addrsig) \
+-				   -D__DISABLE_EXPORTS
++				   -D__DISABLE_EXPORTS $(CSTD_FLAG)
+ 
+ #
+ # struct randomization only makes sense for Linux internal types, which the EFI
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -24,7 +24,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
+ # case of cross compiling, as it has the '--target=' flag, which is needed to
+ # avoid errors with '-march=i386', and future flags may depend on the target to
+ # be valid.
+-KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
++KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS) $(CSTD_FLAG)
+ KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
+ KBUILD_CFLAGS += -Wundef
+ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-11-30 17:33 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-11-30 17:33 UTC (permalink / raw
  To: gentoo-commits

commit:     cf5a18f21dd174f93ebf5fcc37a3e41ce8e5fdb8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Nov 30 17:29:45 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Nov 30 17:32:03 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cf5a18f2

Fix case for X86_USER_SHADOW_STACK

Bug: https://bugs.gentoo.org/945481

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 87b8fa95..74e75c40 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -254,7 +254,7 @@
 +	select RANDOMIZE_BASE
 +	select RANDOMIZE_MEMORY
 +	select RELOCATABLE
-+	select X86_USER_SHADOW_STACK if AS_WRUSS=Y
++	select X86_USER_SHADOW_STACK if AS_WRUSS=y
 +	select VMAP_STACK
 +
 +


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-11-22 17:45 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-11-22 17:45 UTC (permalink / raw
  To: gentoo-commits

commit:     84e347f66f81e2e80e29676135f13f38d88cd91e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Nov 22 17:45:09 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Nov 22 17:45:09 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=84e347f6

Linux patch 6.12.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             | 12 ++++++----
 1001_linux-6.12.1.patch | 62 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 70 insertions(+), 4 deletions(-)

diff --git a/0000_README b/0000_README
index 2f20a332..4df3304e 100644
--- a/0000_README
+++ b/0000_README
@@ -43,17 +43,21 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-6.12.1.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.12.1
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
 Patch:  1700_sparc-address-warray-bound-warnings.patch
-From:		https://github.com/KSPP/linux/issues/109
-Desc:		Address -Warray-bounds warnings 
+From:   https://github.com/KSPP/linux/issues/109
+Desc:   Address -Warray-bounds warnings 
 
 Patch:  1730_parisc-Disable-prctl.patch
-From:	  https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
-Desc:	  prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
+Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw

diff --git a/1001_linux-6.12.1.patch b/1001_linux-6.12.1.patch
new file mode 100644
index 00000000..8eed7b47
--- /dev/null
+++ b/1001_linux-6.12.1.patch
@@ -0,0 +1,62 @@
+diff --git a/Makefile b/Makefile
+index 68a8faff25432a..70070e64d267c1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 0fac689c6350b2..13db0026dc1aad 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -371,7 +371,7 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 	 * Parse the frame descriptors. Only uncompressed, MJPEG and frame
+ 	 * based formats have frame descriptors.
+ 	 */
+-	while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
++	while (ftype && buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
+ 	       buffer[2] == ftype) {
+ 		unsigned int maxIntervalIndex;
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 79d541f1502b22..4f6e566d52faa6 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1491,7 +1491,18 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr,
+ 				vm_flags = vma->vm_flags;
+ 				goto file_expanded;
+ 			}
+-			vma_iter_config(&vmi, addr, end);
++
++			/*
++			 * In the unlikely even that more memory was needed, but
++			 * not available for the vma merge, the vma iterator
++			 * will have no memory reserved for the write we told
++			 * the driver was happening.  To keep up the ruse,
++			 * ensure the allocation for the store succeeds.
++			 */
++			if (vmg_nomem(&vmg)) {
++				mas_preallocate(&vmi.mas, vma,
++						GFP_KERNEL|__GFP_NOFAIL);
++			}
+ 		}
+ 
+ 		vm_flags = vma->vm_flags;
+diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
+index e2157e38721770..56c232cf5b0f4f 100644
+--- a/net/vmw_vsock/hyperv_transport.c
++++ b/net/vmw_vsock/hyperv_transport.c
+@@ -549,6 +549,7 @@ static void hvs_destruct(struct vsock_sock *vsk)
+ 		vmbus_hvsock_device_unregister(chan);
+ 
+ 	kfree(hvs);
++	vsk->trans = NULL;
+ }
+ 
+ static int hvs_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr)


^ permalink raw reply related	[flat|nested] 82+ messages in thread
* [gentoo-commits] proj/linux-patches:6.12 commit in: /
@ 2024-11-21 13:12 Mike Pagano
  0 siblings, 0 replies; 82+ messages in thread
From: Mike Pagano @ 2024-11-21 13:12 UTC (permalink / raw
  To: gentoo-commits

commit:     67d76cc6cc2bdc81a481ca7563853da3307b9331
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov 21 13:11:30 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov 21 13:11:30 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=67d76cc6

BMQ(BitMap Queue) Scheduler. (USE=experimental)

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                  |     8 +
 5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch | 11188 +++++++++++++++++++++++++
 5021_BMQ-and-PDS-gentoo-defaults.patch       |    13 +
 3 files changed, 11209 insertions(+)

diff --git a/0000_README b/0000_README
index 79d80432..2f20a332 100644
--- a/0000_README
+++ b/0000_README
@@ -86,3 +86,11 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
+
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
+From:   https://gitlab.com/alfredchen/projectc
+Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
+
+Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch
+From:   https://gitweb.gentoo.org/proj/linux-patches.git/
+Desc:   Set defaults for BMQ. default to n

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
new file mode 100644
index 00000000..9eb3139f
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
@@ -0,0 +1,11188 @@
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index f8bc1630eba0..1b90768a0916 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1673,3 +1673,12 @@ is 10 seconds.
+ 
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield() will be performed.
++
++  0 - No yield.
++  1 - Requeue task. (default)
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 000000000000..05c84eec0f31
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index b31283d81c52..e27c5c7b05f6 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -516,7 +516,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+ 
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f681b056..59eb72bf7d5f 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index bb343136ddd0..212d9204e9aa 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -804,9 +804,13 @@ struct task_struct {
+ 	struct alloc_tag		*alloc_tag;
+ #endif
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
+ 	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
+ 	struct __call_single_node	wake_entry;
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -820,6 +824,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -828,6 +833,19 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++	struct list_head		sq_node;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
+ 	struct sched_dl_entity		dl;
+@@ -842,6 +860,7 @@ struct task_struct {
+ 	unsigned long			core_cookie;
+ 	unsigned int			core_occupation;
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+@@ -1609,6 +1628,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
+ #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
+ 
+@@ -2135,7 +2163,11 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
+ 
+ static inline bool task_is_runnable(struct task_struct *p)
+ {
++#ifdef CONFIG_SCHED_ALT
++	return p->on_rq;
++#else
+ 	return p->on_rq && !p->se.sched_delayed;
++#endif /* !CONFIG_SCHED_ALT */
+ }
+ 
+ extern bool sched_task_on_rq(struct task_struct *p);
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 3a912ab42bb5..269a1513a153 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -2,6 +2,25 @@
+ #ifndef _LINUX_SCHED_DEADLINE_H
+ #define _LINUX_SCHED_DEADLINE_H
+ 
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -23,6 +42,7 @@ static inline bool dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index 6ab43b4f72f9..ef1cff556c5e 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -19,6 +19,28 @@
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+ 
++#ifdef CONFIG_SCHED_ALT
++
++/* Undefine MAX_PRIO and DEFAULT_PRIO */
++#undef MAX_PRIO
++#undef DEFAULT_PRIO
++
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	(12)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	(0)
++#endif
++
++#define MIN_NORMAL_PRIO		(128)
++#define NORMAL_PRIO_NUM		(64)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
++#define DEFAULT_PRIO		(MAX_PRIO - MAX_PRIORITY_ADJ - NICE_WIDTH / 2)
++
++#endif /* CONFIG_SCHED_ALT */
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index 4e3338103654..6dfef878fe3b 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -45,8 +45,10 @@ static inline bool rt_or_dl_task_policy(struct task_struct *tsk)
+ 
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+ 
+diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
+index 4237daa5ac7a..3cebd93c49c8 100644
+--- a/include/linux/sched/topology.h
++++ b/include/linux/sched/topology.h
+@@ -244,7 +244,8 @@ static inline bool cpus_share_resources(int this_cpu, int that_cpu)
+ 
+ #endif	/* !CONFIG_SMP */
+ 
+-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
++	!defined(CONFIG_SCHED_ALT)
+ extern void rebuild_sched_domains_energy(void);
+ #else
+ static inline void rebuild_sched_domains_energy(void)
+diff --git a/init/Kconfig b/init/Kconfig
+index c521e1421ad4..131a599fcde2 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -652,6 +652,7 @@ config TASK_IO_ACCOUNTING
+ 
+ config PSI
+ 	bool "Pressure stall information tracking"
++	depends on !SCHED_ALT
+ 	select KERNFS
+ 	help
+ 	  Collect metrics that indicate how overcommitted the CPU, memory,
+@@ -817,6 +818,7 @@ menu "Scheduler features"
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -863,6 +865,35 @@ config UCLAMP_BUCKETS_COUNT
+ 
+ 	  If in doubt, use the default value.
+ 
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default y
++	help
++	  This feature enable alternative CPU scheduler"
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU Scheduler"
++	default SCHED_BMQ
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ endmenu
+ 
+ #
+@@ -928,6 +959,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -1036,6 +1068,7 @@ menuconfig CGROUP_SCHED
+ 	  tasks.
+ 
+ if CGROUP_SCHED
++if !SCHED_ALT
+ config GROUP_SCHED_WEIGHT
+ 	def_bool n
+ 
+@@ -1073,6 +1106,7 @@ config EXT_GROUP_SCHED
+ 	select GROUP_SCHED_WEIGHT
+ 	default y
+ 
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+ 
+ config SCHED_MM_CID
+@@ -1334,6 +1368,7 @@ config CHECKPOINT_RESTORE
+ 
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index 136a8231355a..03770079619a 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -71,9 +71,16 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_ALT
++	.on_cpu		= 1,
++	.prio		= DEFAULT_PRIO,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.user_cpus_ptr	= NULL,
+@@ -86,6 +93,16 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -93,6 +110,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index fe782cd77388..d27d2154d71a 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
+ 
+ config SCHED_CORE
+ 	bool "Core Scheduling for SMT"
+-	depends on SCHED_SMT
++	depends on SCHED_SMT && !SCHED_ALT
+ 	help
+ 	  This option permits Core Scheduling, a means of coordinated task
+ 	  selection across SMT siblings. When enabled -- see
+@@ -135,7 +135,7 @@ config SCHED_CORE
+ 
+ config SCHED_CLASS_EXT
+ 	bool "Extensible Scheduling Class"
+-	depends on BPF_SYSCALL && BPF_JIT && DEBUG_INFO_BTF
++	depends on BPF_SYSCALL && BPF_JIT && DEBUG_INFO_BTF && !SCHED_ALT
+ 	select STACKTRACE if STACKTRACE_SUPPORT
+ 	help
+ 	  This option enables a new scheduler class sched_ext (SCX), which
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index a4dd285cdf39..5b4ebe58d032 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -620,7 +620,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1031,7 +1031,7 @@ void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ void rebuild_sched_domains_locked(void)
+ {
+ }
+@@ -2926,12 +2926,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 				goto out_unlock;
+ 		}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 		if (dl_task(task)) {
+ 			cs->nr_migrate_dl_tasks++;
+ 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
+ 		}
++#endif
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (!cs->nr_migrate_dl_tasks)
+ 		goto out_success;
+ 
+@@ -2952,6 +2955,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 	}
+ 
+ out_success:
++#endif
+ 	/*
+ 	 * Mark attach is in progress.  This makes validate_change() fail
+ 	 * changes which zero cpus/mems_allowed.
+@@ -2973,12 +2977,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
+ 	mutex_lock(&cpuset_mutex);
+ 	dec_attach_in_progress_locked(cs);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (cs->nr_migrate_dl_tasks) {
+ 		int cpu = cpumask_any(cs->effective_cpus);
+ 
+ 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
+ 		reset_migrate_dl_data(cs);
+ 	}
++#endif
+ 
+ 	mutex_unlock(&cpuset_mutex);
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index dead51de8eb5..8edef9676ab3 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -149,7 +149,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+ 
+ 	d->cpu_count += t1;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 619f0014c33b..7dc53ddd45a8 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -175,7 +175,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+ 
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+ 
+ 	/*
+@@ -196,7 +196,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index ebebd0eec7f6..802112207855 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -363,7 +363,7 @@ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+ 	lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
+ 
+ 	waiter->tree.prio = __waiter_prio(task);
+-	waiter->tree.deadline = task->dl.deadline;
++	waiter->tree.deadline = __tsk_deadline(task);
+ }
+ 
+ /*
+@@ -384,16 +384,20 @@ waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+  * Only use with rt_waiter_node_{less,equal}()
+  */
+ #define task_to_waiter_node(p)	\
+-	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
++	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
+ #define task_to_waiter(p)	\
+ 	&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
+ 
+ static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
+ 					       struct rt_waiter_node *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -402,16 +406,22 @@ static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
+ 						 struct rt_waiter_node *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -420,8 +430,10 @@ static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
+index 76d204b7d29c..de1a52f963e5 100644
+--- a/kernel/locking/ww_mutex.h
++++ b/kernel/locking/ww_mutex.h
+@@ -247,6 +247,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
+ 
+ 		/* equal static prio */
+ 
++#ifndef	CONFIG_SCHED_ALT
+ 		if (dl_prio(a_prio)) {
+ 			if (dl_time_before(b->task->dl.deadline,
+ 					   a->task->dl.deadline))
+@@ -256,6 +257,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
+ 					   b->task->dl.deadline))
+ 				return false;
+ 		}
++#endif
+ 
+ 		/* equal prio */
+ 	}
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 976092b7bd45..31d587c16ec1 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -28,7 +28,12 @@ endif
+ # These compilation units have roughly the same size and complexity - so their
+ # build parallelizes well and finishes roughly at once:
+ #
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o
++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
++else
+ obj-y += core.o
+ obj-y += fair.o
++endif
+ obj-y += build_policy.o
+ obj-y += build_utility.o
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 000000000000..c59691742340
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,7458 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#include <linux/sched/clock.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/hotplug.h>
++#include <linux/sched/init.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/wake_q.h>
++
++#include <linux/blkdev.h>
++#include <linux/context_tracking.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/kcov.h>
++#include <linux/kprobes.h>
++#include <linux/nmi.h>
++#include <linux/rseq.h>
++#include <linux/scs.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <asm/irq_regs.h>
++#include <asm/switch_to.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#include <trace/events/ipi.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++#include "smp.h"
++
++#include "pelt.h"
++
++#include "../../io_uring/io-wq.h"
++#include "../smpboot.h"
++
++EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
++EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#ifdef CONFIG_SCHED_DEBUG
++#define sched_feat(x)	(1)
++/*
++ * Print a warning if need_resched is set for the given duration (if
++ * LATENCY_WARN is enabled).
++ *
++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
++ * per boot.
++ */
++__read_mostly int sysctl_resched_latency_warn_ms = 100;
++__read_mostly int sysctl_resched_latency_warn_once = 1;
++#else
++#define sched_feat(x)	(0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++#define ALT_SCHED_VERSION "v6.12-r0"
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/*
++ * Time slice
++ * (default: 4 msec, units: nanoseconds)
++ */
++unsigned int sysctl_sched_base_slice __read_mostly	= (4 << 20);
++
++#include "alt_core.h"
++#include "alt_topology.h"
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 << 10)
++
++/**
++ * sched_yield_type - Type of sched_yield() will be performed.
++ * 0: No yield.
++ * 1: Requeue task. (default)
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask);
++DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_topo_end_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++
++cpumask_t sched_smt_mask ____cacheline_aligned_in_smp;
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS + 2] ____cacheline_aligned_in_smp;
++
++cpumask_t *const sched_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS - 1];
++cpumask_t *const sched_sg_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS];
++cpumask_t *const sched_pcore_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS];
++cpumask_t *const sched_ecore_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS + 1];
++
++/* task function */
++static inline const struct cpumask *task_user_cpus(struct task_struct *p)
++{
++	if (!p->user_cpus_ptr)
++		return cpu_possible_mask; /* &init_task.cpus_mask */
++	return p->user_cpus_ptr;
++}
++
++/* sched_queue related functions */
++static inline void sched_queue_init(struct sched_queue *q)
++{
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
++	for(i = 0; i < SCHED_LEVELS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct sched_queue *q,
++					 struct task_struct *idle)
++{
++	INIT_LIST_HEAD(&q->heads[IDLE_TASK_SCHED_PRIO]);
++	list_add_tail(&idle->sq_node, &q->heads[IDLE_TASK_SCHED_PRIO]);
++	idle->on_rq = TASK_ON_RQ_QUEUED;
++}
++
++#define CLEAR_CACHED_PREEMPT_MASK(pr, low, high, cpu)		\
++	if (low < pr && pr <= high)				\
++		cpumask_clear_cpu(cpu, sched_preempt_mask + pr);
++
++#define SET_CACHED_PREEMPT_MASK(pr, low, high, cpu)		\
++	if (low < pr && pr <= high)				\
++		cpumask_set_cpu(cpu, sched_preempt_mask + pr);
++
++static atomic_t sched_prio_record = ATOMIC_INIT(0);
++
++/* water mark related functions */
++static inline void update_sched_preempt_mask(struct rq *rq)
++{
++	int prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	int last_prio = rq->prio;
++	int cpu, pr;
++
++	if (prio == last_prio)
++		return;
++
++	rq->prio = prio;
++#ifdef CONFIG_SCHED_PDS
++	rq->prio_idx = sched_prio2idx(rq->prio, rq);
++#endif
++	cpu = cpu_of(rq);
++	pr = atomic_read(&sched_prio_record);
++
++	if (prio < last_prio) {
++		if (IDLE_TASK_SCHED_PRIO == last_prio) {
++			rq->clear_idle_mask_func(cpu, sched_idle_mask);
++			last_prio -= 2;
++		}
++		CLEAR_CACHED_PREEMPT_MASK(pr, prio, last_prio, cpu);
++
++		return;
++	}
++	/* last_prio < prio */
++	if (IDLE_TASK_SCHED_PRIO == prio) {
++		rq->set_idle_mask_func(cpu, sched_idle_mask);
++		prio -= 2;
++	}
++	SET_CACHED_PREEMPT_MASK(pr, last_prio, prio, cpu);
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task():        p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ *   Additionally it is possible to be ->on_rq but still be considered not
++ *   runnable when p->se.sched_delayed is true. These tasks are on the runqueue
++ *   but will be dequeued as soon as they get picked again. See the
++ *   task_is_runnable() helper.
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq *
++task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock, unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p)) && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq && rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock, unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
++		    rq_lock_irqsave(_T->lock, &_T->rf),
++		    rq_unlock_irqrestore(_T->lock, &_T->rf),
++		    struct rq_flags rf)
++
++void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
++{
++	raw_spinlock_t *lock;
++
++	/* Matches synchronize_rcu() in __sched_core_enable() */
++	preempt_disable();
++
++	for (;;) {
++		lock = __rq_lockp(rq);
++		raw_spin_lock_nested(lock, subclass);
++		if (likely(lock == __rq_lockp(rq))) {
++			/* preempt_count *MUST* be > 1 */
++			preempt_enable_no_resched();
++			return;
++		}
++		raw_spin_unlock(lock);
++	}
++}
++
++void raw_spin_rq_unlock(struct rq *rq)
++{
++	raw_spin_unlock(rq_lockp(rq));
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}IRQ region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}IRQ
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++	delayacct_irq(rq->curr, irq_delta);
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	sched_update_rq_clock(rq);
++	update_rq_clock_task(rq, delta);
++}
++
++/*
++ * RQ Load update routine
++ */
++#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
++#define RQ_UTIL_SHIFT			(8)
++#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
++
++#define LOAD_BLOCK(t)		((t) >> 17)
++#define LOAD_HALF_BLOCK(t)	((t) >> 16)
++#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
++#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
++#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
++
++static inline void rq_load_update(struct rq *rq)
++{
++	u64 time = rq->clock;
++	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp), RQ_LOAD_HISTORY_BITS - 1);
++	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
++	u64 curr = !!rq->nr_running;
++
++	if (delta) {
++		rq->load_history = rq->load_history >> delta;
++
++		if (delta < RQ_UTIL_SHIFT) {
++			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
++			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
++				rq->load_history ^= LOAD_BLOCK_BIT(delta);
++		}
++
++		rq->load_block = BLOCK_MASK(time) * prev;
++	} else {
++		rq->load_block += (time - rq->load_stamp) * prev;
++	}
++	if (prev ^ curr)
++		rq->load_history ^= CURRENT_LOAD_BIT;
++	rq->load_stamp = time;
++}
++
++unsigned long rq_load_util(struct rq *rq, unsigned long max)
++{
++	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
++}
++
++#ifdef CONFIG_SMP
++unsigned long sched_cpu_util(int cpu)
++{
++	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
++}
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_FREQ
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu_of(rq)));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++bool sched_task_on_rq(struct task_struct *p)
++{
++	return task_on_rq_queued(p);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long ip = 0;
++	unsigned int state;
++
++	if (!p || p == current)
++		return 0;
++
++	/* Only get wchan if task is blocked and we can keep it that way. */
++	raw_spin_lock_irq(&p->pi_lock);
++	state = READ_ONCE(p->__state);
++	smp_rmb(); /* see try_to_wake_up() */
++	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
++		ip = __get_wchan(p);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	return ip;
++}
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func)					\
++	sched_info_dequeue(rq, p);							\
++											\
++	__list_del_entry(&p->sq_node);							\
++	if (p->sq_node.prev == p->sq_node.next) {					\
++		clear_bit(sched_idx2prio(p->sq_node.next - &rq->queue.heads[0], rq),	\
++			  rq->queue.bitmap);						\
++		func;									\
++	}
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags, func)					\
++	sched_info_enqueue(rq, p);							\
++	{										\
++	int idx, prio;									\
++	TASK_SCHED_PRIO_IDX(p, rq, idx, prio);						\
++	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);				\
++	if (list_is_first(&p->sq_node, &rq->queue.heads[idx])) {			\
++		set_bit(prio, rq->queue.bitmap);					\
++		func;									\
++	}										\
++	}
++
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++#endif
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %d\n", cpu_of(rq), p, p->prio);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++#endif
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++void requeue_task(struct task_struct *p, struct rq *rq)
++{
++	struct list_head *node = &p->sq_node;
++	int deq_idx, idx, prio;
++
++	TASK_SCHED_PRIO_IDX(p, rq, idx, prio);
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++#endif
++	if (list_is_last(node, &rq->queue.heads[idx]))
++		return;
++
++	__list_del_entry(node);
++	if (node->prev == node->next && (deq_idx = node->next - &rq->queue.heads[0]) != idx)
++		clear_bit(sched_idx2prio(deq_idx, rq), rq->queue.bitmap);
++
++	list_add_tail(node, &rq->queue.heads[idx]);
++	if (list_is_first(node, &rq->queue.heads[idx]))
++		set_bit(prio, rq->queue.bitmap);
++	update_sched_preempt_mask(rq);
++}
++
++/*
++ * try_cmpxchg based fetch_or() macro so it works for different integer types:
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _val = *_ptr;				\
++									\
++		do {							\
++		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
++	_val;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) val = READ_ONCE(ti->flags);
++
++	do {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
++
++	return true;
++}
++
++#else
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static inline bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++static inline void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++/*
++ * This routine will record that the CPU is going idle with tick stopped.
++ * This info will be used in performing idle load balancing in the future.
++ */
++void nohz_balance_enter_idle(int cpu) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be up to date wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++	const struct cpumask *hk_mask;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
++
++	for (mask = per_cpu(sched_cpu_topo_masks, cpu);
++	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, hk_mask)
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	/*
++	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
++	 * part of the idle loop. This forces an exit from the idle loop
++	 * and a round trip to schedule(). Now this could be optimized
++	 * because a simple new idle loop iteration is enough to
++	 * re-evaluate the next tick. Provided some re-ordering of tick
++	 * nohz functions that would need to follow TIF_NR_POLLING
++	 * clearing:
++	 *
++	 * - On most architectures, a simple fetch_or on ti::flags with a
++	 *   "0" value would be enough to know if an IPI needs to be sent.
++	 *
++	 * - x86 needs to perform a last need_resched() check between
++	 *   monitor and mwait which doesn't take timers into account.
++	 *   There a dedicated TIF_TIMER flag would be required to
++	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
++	 *   before mwait().
++	 *
++	 * However, remote timer enqueue is not such a frequent event
++	 * and testing of the above solutions didn't appear to report
++	 * much benefits.
++	 */
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void wakeup_preempt(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++static __always_inline
++int __task_state_match(struct task_struct *p, unsigned int state)
++{
++	if (READ_ONCE(p->__state) & state)
++		return 1;
++
++	if (READ_ONCE(p->saved_state) & state)
++		return -1;
++
++	return 0;
++}
++
++static __always_inline
++int task_state_match(struct task_struct *p, unsigned int state)
++{
++	/*
++	 * Serialize against current_save_and_set_rtlock_wait_state(),
++	 * current_restore_rtlock_saved_state(), and __refrigerator().
++	 */
++	guard(raw_spinlock_irq)(&p->pi_lock);
++
++	return __task_state_match(p, state);
++}
++
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * Wait for the thread to block in any of the states set in @match_state.
++ * If it changes, i.e. @p might have woken up, then return zero.  When we
++ * succeed in waiting for @p to be off its CPU, we return a positive number
++ * (its total switch count).  If a second call a short while later returns the
++ * same number, the caller can be sure that @p has remained unscheduled the
++ * whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
++{
++	unsigned long flags;
++	int running, queued, match;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_on_cpu(p)) {
++			if (!task_state_match(p, match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_on_cpu(p);
++		queued = p->on_rq;
++		ncsw = 0;
++		if ((match = __task_state_match(p, match_state))) {
++			/*
++			 * When matching on p->saved_state, consider this task
++			 * still queued so it will wait.
++			 */
++			if (match < 0)
++				queued = 1;
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		}
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(queued)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
++
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and IRQs disabled
++ */
++static inline void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and IRQs disabled
++ */
++static inline void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
++	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
++}
++
++static void block_task(struct rq *rq, struct task_struct *p)
++{
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++
++	WRITE_ONCE(p->on_rq, 0);
++	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible++;
++
++	if (p->in_iowait) {
++		atomic_inc(&rq->nr_iowait);
++		delayacct_blkio_start();
++	}
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
++
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++	trace_sched_migrate_task(p, new_cpu);
++
++	if (task_cpu(p) != new_cpu)
++	{
++		rseq_migrate(p);
++		sched_mm_cid_migrate_from(p);
++		perf_event_task_migrate(p);
++	}
++
++	__set_task_cpu(p, new_cpu);
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++	int cpu;
++
++	if (p->migration_disabled) {
++#ifdef CONFIG_DEBUG_PREEMPT
++		/*
++		 * Warn about overflow half-way through the range.
++		 */
++		WARN_ON_ONCE((s16)p->migration_disabled < 0);
++#endif
++		p->migration_disabled++;
++		return;
++	}
++
++	guard(preempt)();
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Check both overflow from migrate_disable() and superfluous
++	 * migrate_enable().
++	 */
++	if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
++		return;
++#endif
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	guard(preempt)();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_ptr(p, &p->cpus_mask);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu) && task_cpu_possible(cpu, p);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_dying(cpu))
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
++{
++	lockdep_assert_held(&rq->lock);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	WARN_ON_ONCE(task_cpu(p) != new_cpu);
++
++	sched_mm_cid_migrate_to(rq, p);
++
++	sched_task_sanity_check(p, rq);
++	enqueue_task(p, rq, 0);
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
++	wakeup_preempt(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a high-prio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_save(flags);
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_queue();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	}
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
++{
++	cpumask_copy(&p->cpus_mask, ctx->new_mask);
++	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
++
++	/*
++	 * Swap in a new user_cpus_ptr if SCA_USER flag set
++	 */
++	if (ctx->flags & SCA_USER)
++		swap(p->user_cpus_ptr, ctx->user_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
++{
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, ctx);
++}
++
++/*
++ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
++ * affinity (if any) should be destroyed too.
++ */
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.user_mask = NULL,
++		.flags     = SCA_USER,	/* clear the user requested mask */
++	};
++	union cpumask_rcuhead {
++		cpumask_t cpumask;
++		struct rcu_head rcu;
++	};
++
++	__do_set_cpus_allowed(p, &ac);
++
++	/*
++	 * Because this is called with p->pi_lock held, it is not possible
++	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
++	 * kfree_rcu().
++	 */
++	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
++}
++
++int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
++		      int node)
++{
++	cpumask_t *user_mask;
++	unsigned long flags;
++
++	/*
++	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
++	 * may differ by now due to racing.
++	 */
++	dst->user_cpus_ptr = NULL;
++
++	/*
++	 * This check is racy and losing the race is a valid situation.
++	 * It is not worth the extra overhead of taking the pi_lock on
++	 * every fork/clone.
++	 */
++	if (data_race(!src->user_cpus_ptr))
++		return 0;
++
++	user_mask = alloc_user_cpus_ptr(node);
++	if (!user_mask)
++		return -ENOMEM;
++
++	/*
++	 * Use pi_lock to protect content of user_cpus_ptr
++	 *
++	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
++	 * do_set_cpus_allowed().
++	 */
++	raw_spin_lock_irqsave(&src->pi_lock, flags);
++	if (src->user_cpus_ptr) {
++		swap(dst->user_cpus_ptr, user_mask);
++		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	}
++	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
++
++	if (unlikely(user_mask))
++		kfree(user_mask);
++
++	return 0;
++}
++
++static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = NULL;
++
++	swap(p->user_cpus_ptr, user_mask);
++
++	return user_mask;
++}
++
++void release_user_cpus_ptr(struct task_struct *p)
++{
++	kfree(clear_user_cpus_ptr(p));
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	guard(preempt)();
++	int cpu = task_cpu(p);
++
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (is_cpu_allowed(p, dest_cpu))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (cpuset_cpus_allowed_fallback(p)) {
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline void
++sched_preempt_mask_flush(cpumask_t *mask, int prio, int ref)
++{
++	int cpu;
++
++	cpumask_copy(mask, sched_preempt_mask + ref);
++	if (prio < ref) {
++		for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits) {
++			if (prio < cpu_rq(cpu)->prio)
++				cpumask_set_cpu(cpu, mask);
++		}
++	} else {
++		for_each_cpu_andnot(cpu, mask, sched_idle_mask) {
++			if (prio >= cpu_rq(cpu)->prio)
++				cpumask_clear_cpu(cpu, mask);
++		}
++	}
++}
++
++static inline int
++preempt_mask_check(cpumask_t *preempt_mask, cpumask_t *allow_mask, int prio)
++{
++	cpumask_t *mask = sched_preempt_mask + prio;
++	int pr = atomic_read(&sched_prio_record);
++
++	if (pr != prio && SCHED_QUEUE_BITS - 1 != prio) {
++		sched_preempt_mask_flush(mask, prio, pr);
++		atomic_set(&sched_prio_record, prio);
++	}
++
++	return cpumask_and(preempt_mask, allow_mask, mask);
++}
++
++__read_mostly idle_select_func_t idle_select_func ____cacheline_aligned_in_smp = cpumask_and;
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	cpumask_t allow_mask, mask;
++
++	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (idle_select_func(&mask, &allow_mask, sched_idle_mask)	||
++	    preempt_mask_check(&mask, &allow_mask, task_sched_prio(p)))
++		return best_mask_cpu(task_cpu(p), &mask);
++
++	return best_mask_cpu(task_cpu(p), &allow_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
++			    raw_spinlock_t *lock, unsigned long irq_flags)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++		if (p->migration_disabled) {
++			if (likely(p->cpus_ptr != &p->cpus_mask))
++				__do_set_cpus_ptr(p, &p->cpus_mask);
++			p->migration_disabled = 0;
++			p->migration_flags |= MDF_FORCE_ENABLED;
++			/* When p is migrate_disabled, rq->lock should be held */
++			rq->nr_pinned--;
++		}
++
++		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
++			struct migration_arg arg = { p, dest_cpu };
++
++			/* Need help from migration thread: drop lock and wait. */
++			__task_access_unlock(p, lock);
++			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++			return 0;
++		}
++		if (task_on_rq_queued(p)) {
++			/*
++			 * OK, since we're going to drop the lock immediately
++			 * afterwards anyway.
++			 */
++			update_rq_clock(rq);
++			rq = move_queued_task(rq, p, dest_cpu);
++			lock = &rq->lock;
++		}
++	}
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return 0;
++}
++
++static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
++					 struct affinity_context *ctx,
++					 struct rq *rq,
++					 raw_spinlock_t *lock,
++					 unsigned long irq_flags)
++{
++	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	bool kthread = p->flags & PF_KTHREAD;
++	int dest_cpu;
++	int ret = 0;
++
++	if (kthread || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, ctx);
++
++	return affine_move_task(rq, p, dest_cpu, lock, irq_flags);
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++int __set_cpus_allowed_ptr(struct task_struct *p,
++			   struct affinity_context *ctx)
++{
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
++	 * flags are set.
++	 */
++	if (p->user_cpus_ptr &&
++	    !(ctx->flags & SCA_USER) &&
++	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
++		ctx->new_mask = rq->scratch_mask;
++
++
++	return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.flags     = 0,
++	};
++
++	return __set_cpus_allowed_ptr(p, &ac);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Change a given task's CPU affinity to the intersection of its current
++ * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
++ * If user_cpus_ptr is defined, use it as the basis for restricting CPU
++ * affinity or use cpu_online_mask instead.
++ *
++ * If the resulting mask is empty, leave the affinity unchanged and return
++ * -EINVAL.
++ */
++static int restrict_cpus_allowed_ptr(struct task_struct *p,
++				     struct cpumask *new_mask,
++				     const struct cpumask *subset_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.flags     = 0,
++	};
++	unsigned long irq_flags;
++	raw_spinlock_t *lock;
++	struct rq *rq;
++	int err;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
++		err = -EINVAL;
++		goto err_unlock;
++	}
++
++	return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags);
++
++err_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return err;
++}
++
++/*
++ * Restrict the CPU affinity of task @p so that it is a subset of
++ * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
++ * old affinity mask. If the resulting mask is empty, we warn and walk
++ * up the cpuset hierarchy until we find a suitable mask.
++ */
++void force_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	cpumask_var_t new_mask;
++	const struct cpumask *override_mask = task_cpu_possible_mask(p);
++
++	alloc_cpumask_var(&new_mask, GFP_KERNEL);
++
++	/*
++	 * __migrate_task() can fail silently in the face of concurrent
++	 * offlining of the chosen destination CPU, so take the hotplug
++	 * lock to ensure that the migration succeeds.
++	 */
++	cpus_read_lock();
++	if (!cpumask_available(new_mask))
++		goto out_set_mask;
++
++	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
++		goto out_free_mask;
++
++	/*
++	 * We failed to find a valid subset of the affinity mask for the
++	 * task, so override it based on its cpuset hierarchy.
++	 */
++	cpuset_cpus_allowed(p, new_mask);
++	override_mask = new_mask;
++
++out_set_mask:
++	if (printk_ratelimit()) {
++		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
++				task_pid_nr(p), p->comm,
++				cpumask_pr_args(override_mask));
++	}
++
++	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
++out_free_mask:
++	cpus_read_unlock();
++	free_cpumask_var(new_mask);
++}
++
++/*
++ * Restore the affinity of a task @p which was previously restricted by a
++ * call to force_compatible_cpus_allowed_ptr().
++ *
++ * It is the caller's responsibility to serialise this with any calls to
++ * force_compatible_cpus_allowed_ptr(@p).
++ */
++void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	struct affinity_context ac = {
++		.new_mask  = task_user_cpus(p),
++		.flags     = 0,
++	};
++	int ret;
++
++	/*
++	 * Try to restore the old affinity mask with __sched_setaffinity().
++	 * Cpuset masking will be done there too.
++	 */
++	ret = __sched_setaffinity(p, &ac);
++	WARN_ON_ONCE(ret);
++}
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	return 0;
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu) {
++		__schedstat_inc(rq->ttwu_local);
++		__schedstat_inc(p->stats.nr_wakeups_local);
++	} else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++	__schedstat_inc(p->stats.nr_wakeups);
++}
++
++/*
++ * Mark the task runnable.
++ */
++static inline void ttwu_do_wakeup(struct task_struct *p)
++{
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq);
++	wakeup_preempt(rq);
++
++	ttwu_do_wakeup(p);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		if (!task_on_cpu(p)) {
++			/*
++			 * When on_rq && !on_cpu the task is preempted, see if
++			 * it should preempt the task that is current now.
++			 */
++			update_rq_clock(rq);
++			wakeup_preempt(rq);
++		}
++		ttwu_do_wakeup(p);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	/*
++	 * Must be after enqueueing at least once task such that
++	 * idle_cpu() does not observe a false-negative -- if it does,
++	 * it is possible for select_idle_siblings() to stack a number
++	 * of tasks on this CPU during that window.
++	 *
++	 * It is OK to clear ttwu_pending when another task pending.
++	 * We will receive IPI after local IRQ enabled and then enqueue it.
++	 * Since now nr_running > 0, idle_cpu() will always get correct result.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Prepare the scene for sending an IPI for a remote smp_call
++ *
++ * Returns true if the caller can proceed with sending the IPI.
++ * Returns false otherwise.
++ */
++bool call_function_single_prep_ipi(int cpu)
++{
++	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
++		trace_sched_wake_idle_without_ipi(cpu);
++		return false;
++	}
++
++	return true;
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/* Ensure the task will still be allowed to run on the CPU. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	if (cpu == smp_processor_id())
++		return false;
++
++	/*
++	 * If the wakee cpu is idle, or the task is descheduling and the
++	 * only running task on the CPU, then use the wakelist to offload
++	 * the task activation to the idle (or soon-to-be-idle) CPU as
++	 * the current CPU is likely busy. nr_running is checked to
++	 * avoid unnecessary task stacking.
++	 *
++	 * Note that we can only get here with (wakee) p->on_rq=0,
++	 * p->on_cpu can be whatever, we've done the dequeue, so
++	 * the wakee has been accounted out of ->nr_running.
++	 */
++	if (!cpu_rq(cpu)->nr_running)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	guard(rcu)();
++	if (is_idle_task(rcu_dereference(rq->curr))) {
++		guard(raw_spinlock_irqsave)(&rq->lock);
++		if (is_idle_task(rq->curr))
++			resched_curr(rq);
++	}
++}
++
++extern struct static_key_false sched_asym_cpucapacity;
++
++static __always_inline bool sched_asym_cpucap_active(void)
++{
++	return static_branch_unlikely(&sched_asym_cpucapacity);
++}
++
++bool cpus_equal_capacity(int this_cpu, int that_cpu)
++{
++	if (!sched_asym_cpucap_active())
++		return true;
++
++	if (this_cpu == that_cpu)
++		return true;
++
++	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	if (this_cpu == that_cpu)
++		return true;
++
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Invoked from try_to_wake_up() to check whether the task can be woken up.
++ *
++ * The caller holds p::pi_lock if p != current or has preemption
++ * disabled when p == current.
++ *
++ * The rules of saved_state:
++ *
++ *   The related locking code always holds p::pi_lock when updating
++ *   p::saved_state, which means the code is fully serialized in both cases.
++ *
++ *  For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
++ *  No other bits set. This allows to distinguish all wakeup scenarios.
++ *
++ *  For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
++ *  allows us to prevent early wakeup of tasks before they can be run on
++ *  asymmetric ISA architectures (eg ARMv9).
++ */
++static __always_inline
++bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
++{
++	int match;
++
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
++			     state != TASK_RTLOCK_WAIT);
++	}
++
++	*success = !!(match = __task_state_match(p, state));
++
++	/*
++	 * Saved state preserves the task state across blocking on
++	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
++	 * set p::saved_state to TASK_RUNNING, but do not wake the task
++	 * because it waits for a lock wakeup or __thaw_task(). Also
++	 * indicate success because from the regular waker's point of
++	 * view this has succeeded.
++	 *
++	 * After acquiring the lock the task will restore p::__state
++	 * from p::saved_state which ensures that the regular
++	 * wakeup is not lost. The restore will also set
++	 * p::saved_state to TASK_RUNNING so any further tests will
++	 * not result in false positives vs. @success
++	 */
++	if (match < 0)
++		p->saved_state = TASK_RUNNING;
++
++	return match > 0;
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
++{
++	guard(preempt)();
++	int cpu, success = 0;
++
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!ttwu_state_match(p, state, &success))
++			goto out;
++
++		trace_sched_waking(p);
++		ttwu_do_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
++		smp_mb__after_spinlock();
++		if (!ttwu_state_match(p, state, &success))
++			break;
++
++		trace_sched_waking(p);
++
++		/*
++		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++		 * in smp_cond_load_acquire() below.
++		 *
++		 * sched_ttwu_pending()			try_to_wake_up()
++		 *   STORE p->on_rq = 1			  LOAD p->state
++		 *   UNLOCK rq->lock
++		 *
++		 * __schedule() (switch to task 'p')
++		 *   LOCK rq->lock			  smp_rmb();
++		 *   smp_mb__after_spinlock();
++		 *   UNLOCK rq->lock
++		 *
++		 * [task p]
++		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++		 *
++		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++		 * __schedule().  See the comment for smp_mb__after_spinlock().
++		 *
++		 * A similar smp_rmb() lives in __task_needs_rq_lock().
++		 */
++		smp_rmb();
++		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++			break;
++
++#ifdef CONFIG_SMP
++		/*
++		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++		 * possible to, falsely, observe p->on_cpu == 0.
++		 *
++		 * One must be running (->on_cpu == 1) in order to remove oneself
++		 * from the runqueue.
++		 *
++		 * __schedule() (switch to task 'p')	try_to_wake_up()
++		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++		 *   UNLOCK rq->lock
++		 *
++		 * __schedule() (put 'p' to sleep)
++		 *   LOCK rq->lock			  smp_rmb();
++		 *   smp_mb__after_spinlock();
++		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++		 *
++		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++		 * __schedule().  See the comment for smp_mb__after_spinlock().
++		 *
++		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++		 * schedule()'s deactivate_task() has 'happened' and p will no longer
++		 * care about it's own p->state. See the comment in __schedule().
++		 */
++		smp_acquire__after_ctrl_dep();
++
++		/*
++		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++		 * == 0), which means we need to do an enqueue, change p->state to
++		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++		 * enqueue, such as ttwu_queue_wakelist().
++		 */
++		WRITE_ONCE(p->__state, TASK_WAKING);
++
++		/*
++		 * If the owning (remote) CPU is still in the middle of schedule() with
++		 * this task as prev, considering queueing p on the remote CPUs wake_list
++		 * which potentially sends an IPI instead of spinning on p->on_cpu to
++		 * let the waker make forward progress. This is safe because IRQs are
++		 * disabled and the IPI will deliver after on_cpu is cleared.
++		 *
++		 * Ensure we load task_cpu(p) after p->on_cpu:
++		 *
++		 * set_task_cpu(p, cpu);
++		 *   STORE p->cpu = @cpu
++		 * __schedule() (switch to task 'p')
++		 *   LOCK rq->lock
++		 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++		 *   STORE p->on_cpu = 1                LOAD p->cpu
++		 *
++		 * to ensure we observe the correct CPU on which the task is currently
++		 * scheduling.
++		 */
++		if (smp_load_acquire(&p->on_cpu) &&
++		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
++			break;
++
++		/*
++		 * If the owning (remote) CPU is still in the middle of schedule() with
++		 * this task as prev, wait until it's done referencing the task.
++		 *
++		 * Pairs with the smp_store_release() in finish_task().
++		 *
++		 * This ensures that tasks getting woken will be fully ordered against
++		 * their previous state and preserve Program Order.
++		 */
++		smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		sched_task_ttwu(p);
++
++		if ((wake_flags & WF_CURRENT_CPU) &&
++		    cpumask_test_cpu(smp_processor_id(), p->cpus_ptr))
++			cpu = smp_processor_id();
++		else
++			cpu = select_task_rq(p);
++
++		if (cpu != task_cpu(p)) {
++			if (p->in_iowait) {
++				delayacct_blkio_end(p);
++				atomic_dec(&task_rq(p)->nr_iowait);
++			}
++
++			wake_flags |= WF_MIGRATED;
++			set_task_cpu(p, cpu);
++		}
++#else
++		sched_task_ttwu(p);
++
++		cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++		ttwu_queue(p, cpu, wake_flags);
++	}
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++
++	return success;
++}
++
++static bool __task_needs_rq_lock(struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
++	 * the task is blocked. Make sure to check @state since ttwu() can drop
++	 * locks at the end, see ttwu_queue_wakelist().
++	 */
++	if (state == TASK_RUNNING || state == TASK_WAKING)
++		return true;
++
++	/*
++	 * Ensure we load p->on_rq after p->__state, otherwise it would be
++	 * possible to, falsely, observe p->on_rq == 0.
++	 *
++	 * See try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	if (p->on_rq)
++		return true;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure the task has finished __schedule() and will not be referenced
++	 * anymore. Again, see try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++#endif
++
++	return false;
++}
++
++/**
++ * task_call_func - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * Fix the task in it's current state by avoiding wakeups and or rq operations
++ * and call @func(@arg) on it.  This function can use task_is_runnable() and
++ * task_curr() to work out what the state is, if required.  Given that @func
++ * can be invoked with a runqueue lock held, it had better be quite
++ * lightweight.
++ *
++ * Returns:
++ *   Whatever @func returns
++ */
++int task_call_func(struct task_struct *p, task_call_f func, void *arg)
++{
++	struct rq *rq = NULL;
++	struct rq_flags rf;
++	int ret;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++
++	if (__task_needs_rq_lock(p))
++		rq = __task_rq_lock(p, &rf);
++
++	/*
++	 * At this point the task is pinned; either:
++	 *  - blocked and we're holding off wakeups      (pi->lock)
++	 *  - woken, and we're holding off enqueue       (rq->lock)
++	 *  - queued, and we're holding off schedule     (rq->lock)
++	 *  - running, and we're holding off de-schedule (rq->lock)
++	 *
++	 * The called function (@func) can use: task_curr(), p->on_rq and
++	 * p->__state to differentiate between these states.
++	 */
++	ret = func(p, arg);
++
++	if (rq)
++		__task_rq_unlock(rq, &rf);
++
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * cpu_curr_snapshot - Return a snapshot of the currently running task
++ * @cpu: The CPU on which to snapshot the task.
++ *
++ * Returns the task_struct pointer of the task "currently" running on
++ * the specified CPU.  If the same task is running on that CPU throughout,
++ * the return value will be a pointer to that task's task_struct structure.
++ * If the CPU did any context switches even vaguely concurrently with the
++ * execution of this function, the return value will be a pointer to the
++ * task_struct structure of a randomly chosen task that was running on
++ * that CPU somewhere around the time that this function was executing.
++ *
++ * If the specified CPU was offline, the return value is whatever it
++ * is, perhaps a pointer to the task_struct structure of that CPU's idle
++ * task, but there is no guarantee.  Callers wishing a useful return
++ * value must take some action to ensure that the specified CPU remains
++ * online throughout.
++ *
++ * This function executes full memory barriers before and after fetching
++ * the pointer, which permits the caller to confine this function's fetch
++ * with respect to the caller's accesses to other shared variables.
++ */
++struct task_struct *cpu_curr_snapshot(int cpu)
++{
++	struct task_struct *t;
++
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	t = rcu_dereference(cpu_curr(cpu));
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	return t;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_SCHEDSTATS
++	/* Even if schedstat is disabled, there should not be garbage */
++	memset(&p->stats, 0, sizeof(p->stats));
++#endif
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++	init_sched_mm_cid(p);
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->__state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = p->static_prio;
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	/*
++	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
++	 * required yet, but lockdep gets upset if rules are violated.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sysctl_sched_base_slice;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, smp_processor_id());
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++void sched_cancel_fork(struct task_struct *p)
++{
++}
++
++void sched_post_fork(struct task_struct *p)
++{
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	if (!strcmp(str, "enable")) {
++		set_schedstats(true);
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		set_schedstats(false);
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++#ifdef CONFIG_PROC_SYSCTL
++static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
++		size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++
++static struct ctl_table sched_core_sysctls[] = {
++	{
++		.procname       = "sched_schedstats",
++		.data           = NULL,
++		.maxlen         = sizeof(unsigned int),
++		.mode           = 0644,
++		.proc_handler   = sysctl_schedstats,
++		.extra1         = SYSCTL_ZERO,
++		.extra2         = SYSCTL_ONE,
++	},
++};
++static int __init sched_core_sysctl_init(void)
++{
++	register_sysctl_init("kernel", sched_core_sysctls);
++	return 0;
++}
++late_initcall(sched_core_sysctl_init);
++#endif /* CONFIG_PROC_SYSCTL */
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	rq = cpu_rq(select_task_rq(p));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 *
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	activate_task(p, rq);
++	trace_sched_wakeup_new(p);
++	wakeup_preempt(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
++	 * its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	void (*func)(struct rq *rq);
++	struct balance_callback *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++/*
++ * balance_push_callback is a right abuse of the callback interface and plays
++ * by significantly different rules.
++ *
++ * Where the normal balance_callback's purpose is to be ran in the same context
++ * that queued it (only later, when it's safe to drop rq->lock again),
++ * balance_push_callback is specifically targeted at __schedule().
++ *
++ * This abuse is tolerated because it places all the unlikely/odd cases behind
++ * a single test, namely: rq->balance_callback == NULL.
++ */
++struct balance_callback balance_push_callback = {
++	.next = NULL,
++	.func = balance_push,
++};
++
++static inline struct balance_callback *
++__splice_balance_callbacks(struct rq *rq, bool split)
++{
++	struct balance_callback *head = rq->balance_callback;
++
++	if (likely(!head))
++		return NULL;
++
++	lockdep_assert_rq_held(rq);
++	/*
++	 * Must not take balance_push_callback off the list when
++	 * splice_balance_callbacks() and balance_callbacks() are not
++	 * in the same rq->lock section.
++	 *
++	 * In that case it would be possible for __schedule() to interleave
++	 * and observe the list empty.
++	 */
++	if (split && head == &balance_push_callback)
++		head = NULL;
++	else
++		rq->balance_callback = NULL;
++
++	return head;
++}
++
++struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return __splice_balance_callbacks(rq, true);
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
++}
++
++void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. 'prev == current' is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	unsigned int prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	tick_nohz_task_switch();
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop_sched(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 *
++	 * switch_mm_cid() needs to be updated if the barriers provided
++	 * by context_switch() are modified.
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++		lru_gen_use_mm(next->mm);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	/* switch_mm_cid() requires the memory barriers above. */
++	switch_mm_cid(rq, prev, next);
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned int nr_running(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches_cpu(int cpu)
++{
++	return cpu_rq(cpu)->nr_switches;
++}
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned int nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned int nr_iowait(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	cgroup_account_cputime(p, ns);
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is OK.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++#ifdef CONFIG_SCHED_DEBUG
++static u64 cpu_resched_latency(struct rq *rq)
++{
++	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
++	u64 resched_latency, now = rq_clock(rq);
++	static bool warned_once;
++
++	if (sysctl_resched_latency_warn_once && warned_once)
++		return 0;
++
++	if (!need_resched() || !latency_warn_ms)
++		return 0;
++
++	if (system_state == SYSTEM_BOOTING)
++		return 0;
++
++	if (!rq->last_seen_need_resched_ns) {
++		rq->last_seen_need_resched_ns = now;
++		rq->ticks_without_resched = 0;
++		return 0;
++	}
++
++	rq->ticks_without_resched++;
++	resched_latency = now - rq->last_seen_need_resched_ns;
++	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
++		return 0;
++
++	warned_once = true;
++
++	return resched_latency;
++}
++
++static int __init setup_resched_latency_warn_ms(char *str)
++{
++	long val;
++
++	if ((kstrtol(str, 0, &val))) {
++		pr_warn("Unable to set resched_latency_warn_ms\n");
++		return 1;
++	}
++
++	sysctl_resched_latency_warn_ms = val;
++	return 1;
++}
++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
++#else
++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
++#endif /* CONFIG_SCHED_DEBUG */
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void sched_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr = rq->curr;
++	u64 resched_latency;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		arch_scale_freq_tick();
++
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	if (sched_feat(LATENCY_WARN))
++		resched_latency = cpu_resched_latency(rq);
++	calc_global_load_tick(rq);
++
++	task_tick_mm_cid(rq, rq->curr);
++
++	raw_spin_unlock(&rq->lock);
++
++	if (sched_feat(LATENCY_WARN) && resched_latency)
++		resched_latency_warn(cpu, resched_latency);
++
++	perf_event_task_tick();
++
++	if (curr->flags & PF_WQ_WORKER)
++		wq_worker_tick(curr);
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (tick_nohz_tick_stopped_cpu(cpu)) {
++		guard(raw_spinlock_irqsave)(&rq->lock);
++		struct task_struct *curr = rq->curr;
++
++		if (cpu_online(cpu)) {
++			update_rq_clock(rq);
++
++			if (!is_idle_task(curr)) {
++				/*
++				 * Make sure the next tick runs within a
++				 * reasonable amount of time.
++				 */
++				u64 delta = rq_clock_task(rq) - curr->last_ran;
++				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++			}
++			scheduler_task_tick(rq);
++
++			calc_load_nohz_remote(rq);
++		}
++	}
++
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++	int os;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	/* There cannot be competing actions, but don't rely on stop-machine. */
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
++	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
++	/* Don't cancel, as this would mess up the state machine. */
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	BUG_ON(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	check_panic_on_warn("scheduling while atomic");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CT_STATE_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx,"
++	       " ecore_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_idle_mask->bits[0],
++	       sched_pcore_idle_mask->bits[0],
++	       sched_ecore_idle_mask->bits[0]);
++}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#ifdef CONFIG_PREEMPT_RT
++#define SCHED_NR_MIGRATE_BREAK 8
++#else
++#define SCHED_NR_MIGRATE_BREAK 32
++#endif
++
++const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
++
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rq->curr;
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
++
++	/* WA to check rq->curr is still on rq */
++	if (!task_on_rq_queued(skip))
++		return 0;
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0, );
++			set_task_cpu(p, dest_cpu);
++			sched_task_sanity_check(p, dest_rq);
++			sched_mm_cid_migrate_to(dest_rq, p);
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0, );
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	cpumask_t *topo_mask, *end_mask, chk;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	topo_mask = per_cpu(sched_cpu_topo_masks, cpu);
++	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
++	do {
++		int i;
++
++		if (!cpumask_and(&chk, &sched_rq_pending_mask, topo_mask))
++			continue;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				rq->nr_running += nr_migrated;
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++
++				update_sched_preempt_mask(rq);
++				cpufreq_update_util(rq, 0);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++topo_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sysctl_sched_base_slice;
++
++	sched_task_renew(p, rq);
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq);
++}
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++			if (likely(rq->balance_func && rq->online))
++				rq->balance_func(rq, cpu);
++#endif /* CONFIG_SMP */
++
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, next);*/
++	return next;
++}
++
++/*
++ * Constants for the sched_mode argument of __schedule().
++ *
++ * The mode argument allows RT enabled kernels to differentiate a
++ * preemption from blocking on an 'sleeping' spin/rwlock.
++ */
++ #define SM_IDLE		(-1)
++ #define SM_NONE		0
++ #define SM_PREEMPT		1
++ #define SM_RTLOCK_WAIT		2
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler sched_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(int sched_mode)
++{
++	struct task_struct *prev, *next;
++	/*
++	 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
++	 * as a preemption by schedule_debug() and RCU.
++	 */
++	bool preempt = sched_mode > SM_NONE;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, preempt);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(preempt);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr; this
++	 * barrier matches a full barrier in the proximity of the membarrier
++	 * system call exit.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++
++	/* Task state changes only considers SM_PREEMPT as preemption */
++	preempt = sched_mode == SM_PREEMPT;
++
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that we form a control dependency vs deactivate_task() below.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	if (sched_mode == SM_IDLE) {
++		if (!rq->nr_running) {
++			next = prev;
++			goto picked;
++		}
++	} else if (!preempt && prev_state) {
++		if (signal_pending_state(prev_state, prev)) {
++			WRITE_ONCE(prev->__state, TASK_RUNNING);
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev_state & TASK_FROZEN);
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			block_task(rq, prev);
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu);
++picked:
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++#ifdef CONFIG_SCHED_DEBUG
++	rq->last_seen_need_resched_ns = 0;
++#endif
++
++	if (likely(prev != next)) {
++		next->last_ran = rq->clock_task;
++
++		/*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
++		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
++		 *   on PowerPC and on RISC-V.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 *
++		 * The barrier matches a full barrier in the proximity of
++		 * the membarrier system call entry.
++		 *
++		 * On RISC-V, this barrier pairing is also needed for the
++		 * SYNC_CORE command when switching between processes, cf.
++		 * the inline comments in membarrier_arch_switch_mm().
++		 */
++		++*switch_count;
++
++		trace_sched_switch(preempt, prev, next, prev_state);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++
++		cpu = cpu_of(rq);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(SM_NONE);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
++	unsigned int task_flags;
++
++	/*
++	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
++	 * will use a blocking primitive -- which would lead to recursion.
++	 */
++	lock_map_acquire_try(&sched_map);
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker goes to sleep, notify and ask workqueue whether it
++	 * wants to wake up a task to maintain concurrency.
++	 */
++	if (task_flags & PF_WQ_WORKER)
++		wq_worker_sleeping(tsk);
++	else if (task_flags & PF_IO_WORKER)
++		io_wq_worker_sleeping(tsk);
++
++	/*
++	 * spinlock and rwlock must not flush block requests.  This will
++	 * deadlock if the callback attempts to acquire a lock which is
++	 * already acquired.
++	 */
++	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	blk_flush_plug(tsk->plug, true);
++
++	lock_map_release(&sched_map);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
++		if (tsk->flags & PF_BLOCK_TS)
++			blk_plug_invalidate_ts(tsk);
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else if (tsk->flags & PF_IO_WORKER)
++			io_wq_worker_running(tsk);
++	}
++}
++
++static __always_inline void __schedule_loop(int sched_mode)
++{
++	do {
++		preempt_disable();
++		__schedule(sched_mode);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++#ifdef CONFIG_RT_MUTEXES
++	lockdep_assert(!tsk->sched_rt_mutex);
++#endif
++
++	if (!task_is_running(tsk))
++		sched_submit_work(tsk);
++	__schedule_loop(SM_NONE);
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a NOP when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->__state);
++	do {
++		__schedule(SM_IDLE);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CT_STATE_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace schedule_rtlock(void)
++{
++	__schedule_loop(SM_RTLOCK_WAIT);
++}
++NOKPROBE_SYMBOL(schedule_rtlock);
++#endif
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(SM_PREEMPT);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_dynamic_enabled
++#define preempt_schedule_dynamic_enabled	preempt_schedule
++#define preempt_schedule_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
++void __sched notrace dynamic_preempt_schedule(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
++		return;
++	preempt_schedule();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule);
++EXPORT_SYMBOL(dynamic_preempt_schedule);
++#endif
++#endif
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(SM_PREEMPT);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_notrace_dynamic_enabled
++#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
++#define preempt_schedule_notrace_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
++void __sched notrace dynamic_preempt_schedule_notrace(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
++		return;
++	preempt_schedule_notrace();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
++EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
++#endif
++#endif
++
++#endif /* CONFIG_PREEMPTION */
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of IRQ context.
++ * Note, that this is called and return with IRQs disabled. This will
++ * protect us against recursive calling from IRQ contexts.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	BUG_ON(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(SM_PREEMPT);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++void check_task_changed(struct task_struct *p, struct rq *rq)
++{
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		requeue_task(p, rq);
++		wakeup_preempt(rq);
++	}
++}
++
++void __setscheduler_prio(struct task_struct *p, int prio)
++{
++	p->prio = prio;
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++/*
++ * Would be more useful with typeof()/auto_type but they don't mix with
++ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
++ * name such that if someone were to implement this function we get to compare
++ * notes.
++ */
++#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
++
++void rt_mutex_pre_schedule(void)
++{
++	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
++	sched_submit_work(current);
++}
++
++void rt_mutex_schedule(void)
++{
++	lockdep_assert(current->sched_rt_mutex);
++	__schedule_loop(SM_NONE);
++}
++
++void rt_mutex_post_schedule(void)
++{
++	sched_update_worker(current);
++	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a no-no in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++
++	__setscheduler_prio(p, prio);
++
++	check_task_changed(p, rq);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	if (task_on_rq_queued(p))
++		__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#endif
++
++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
++int __sched __cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++	/*
++	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
++	 * whether the current CPU is in an RCU read-side critical section,
++	 * so the tick can report quiescent states even for CPUs looping
++	 * in kernel context.  In contrast, in non-preemptible kernels,
++	 * RCU readers leave no in-memory hints, which means that CPU-bound
++	 * processes executing in kernel context might never report an
++	 * RCU quiescent state.  Therefore, the following code causes
++	 * cond_resched() to report a quiescent state, but only when RCU
++	 * is in urgent need of one.
++	 */
++#ifndef CONFIG_PREEMPT_RCU
++	rcu_all_qs();
++#endif
++	return 0;
++}
++EXPORT_SYMBOL(__cond_resched);
++#endif
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define cond_resched_dynamic_enabled	__cond_resched
++#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(cond_resched);
++
++#define might_resched_dynamic_enabled	__cond_resched
++#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(might_resched);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
++int __sched dynamic_cond_resched(void)
++{
++	klp_sched_try_switch();
++	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_cond_resched);
++
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
++int __sched dynamic_might_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_might_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_might_resched);
++#endif
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __cond_resched_rwlock_read(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_read(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		read_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		read_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_read);
++
++int __cond_resched_rwlock_write(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_write(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		write_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		write_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_write);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++
++#ifdef CONFIG_GENERIC_ENTRY
++#include <linux/entry-common.h>
++#endif
++
++/*
++ * SC:cond_resched
++ * SC:might_resched
++ * SC:preempt_schedule
++ * SC:preempt_schedule_notrace
++ * SC:irqentry_exit_cond_resched
++ *
++ *
++ * NONE:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * VOLUNTARY:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- __cond_resched
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * FULL:
++ *   cond_resched               <- RET0
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- preempt_schedule
++ *   preempt_schedule_notrace   <- preempt_schedule_notrace
++ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
++ */
++
++enum {
++	preempt_dynamic_undefined = -1,
++	preempt_dynamic_none,
++	preempt_dynamic_voluntary,
++	preempt_dynamic_full,
++};
++
++int preempt_dynamic_mode = preempt_dynamic_undefined;
++
++int sched_dynamic_mode(const char *str)
++{
++	if (!strcmp(str, "none"))
++		return preempt_dynamic_none;
++
++	if (!strcmp(str, "voluntary"))
++		return preempt_dynamic_voluntary;
++
++	if (!strcmp(str, "full"))
++		return preempt_dynamic_full;
++
++	return -EINVAL;
++}
++
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
++#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
++#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
++#else
++#error "Unsupported PREEMPT_DYNAMIC mechanism"
++#endif
++
++static DEFINE_MUTEX(sched_dynamic_mutex);
++static bool klp_override;
++
++static void __sched_dynamic_update(int mode)
++{
++	/*
++	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
++	 * the ZERO state, which is invalid.
++	 */
++	if (!klp_override)
++		preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(might_resched);
++	preempt_dynamic_enable(preempt_schedule);
++	preempt_dynamic_enable(preempt_schedule_notrace);
++	preempt_dynamic_enable(irqentry_exit_cond_resched);
++
++	switch (mode) {
++	case preempt_dynamic_none:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: none\n");
++		break;
++
++	case preempt_dynamic_voluntary:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_enable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: voluntary\n");
++		break;
++
++	case preempt_dynamic_full:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_enable(preempt_schedule);
++		preempt_dynamic_enable(preempt_schedule_notrace);
++		preempt_dynamic_enable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: full\n");
++		break;
++	}
++
++	preempt_dynamic_mode = mode;
++}
++
++void sched_dynamic_update(int mode)
++{
++	mutex_lock(&sched_dynamic_mutex);
++	__sched_dynamic_update(mode);
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++#ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
++
++static int klp_cond_resched(void)
++{
++	__klp_sched_try_switch();
++	return __cond_resched();
++}
++
++void sched_dynamic_klp_enable(void)
++{
++	mutex_lock(&sched_dynamic_mutex);
++
++	klp_override = true;
++	static_call_update(cond_resched, klp_cond_resched);
++
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++void sched_dynamic_klp_disable(void)
++{
++	mutex_lock(&sched_dynamic_mutex);
++
++	klp_override = false;
++	__sched_dynamic_update(preempt_dynamic_mode);
++
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++#endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
++
++
++static int __init setup_preempt_mode(char *str)
++{
++	int mode = sched_dynamic_mode(str);
++	if (mode < 0) {
++		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
++		return 0;
++	}
++
++	sched_dynamic_update(mode);
++	return 1;
++}
++__setup("preempt=", setup_preempt_mode);
++
++static void __init preempt_dynamic_init(void)
++{
++	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
++		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
++			sched_dynamic_update(preempt_dynamic_none);
++		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
++			sched_dynamic_update(preempt_dynamic_voluntary);
++		} else {
++			/* Default static call setting, nothing to do */
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
++			preempt_dynamic_mode = preempt_dynamic_full;
++			pr_info("Dynamic Preempt: full\n");
++		}
++	}
++}
++
++#define PREEMPT_MODEL_ACCESSOR(mode) \
++	bool preempt_model_##mode(void)						 \
++	{									 \
++		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
++		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
++	}									 \
++	EXPORT_SYMBOL_GPL(preempt_model_##mode)
++
++PREEMPT_MODEL_ACCESSOR(none);
++PREEMPT_MODEL_ACCESSOR(voluntary);
++PREEMPT_MODEL_ACCESSOR(full);
++
++#else /* !CONFIG_PREEMPT_DYNAMIC: */
++
++static inline void preempt_dynamic_init(void) { }
++
++#endif /* CONFIG_PREEMPT_DYNAMIC */
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_flush_plug(current->plug, true);
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (task_is_running(p))
++		pr_cont("  running task    ");
++	free = stack_not_used(p);
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
++		free, task_pid_nr(p), task_tgid_nr(p),
++		ppid, read_task_thread_flags(p));
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned int state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	if (in_hardirq() && cpu == smp_processor_id()) {
++		struct pt_regs *regs;
++
++		regs = get_irq_regs();
++		if (regs) {
++			show_regs(regs);
++			return;
++		}
++	}
++
++	if (trigger_single_cpu_backtrace(cpu))
++		return;
++
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __init init_idle(struct task_struct *idle, int cpu)
++{
++#ifdef CONFIG_SMP
++	struct affinity_context ac = (struct affinity_context) {
++		.new_mask  = cpumask_of(cpu),
++		.flags     = 0,
++	};
++#endif
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++
++	idle->last_ran = rq->clock_task;
++	idle->__state = TASK_RUNNING;
++	/*
++	 * PF_KTHREAD should already be set at this point; regardless, make it
++	 * look like a proper per-CPU kthread.
++	 */
++	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
++	kthread_set_per_cpu(idle, cpu);
++
++	sched_queue_init_idle(&rq->queue, idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, &ac);
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	BUG_ON(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
++ * effective when the hotplug motion is down.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Only active while going offline and when invoked on the outgoing
++	 * CPU.
++	 */
++	if (!cpu_dying(rq->cpu) || rq != this_rq())
++		return;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 */
++	if (kthread_is_per_cpu(push_task) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	preempt_disable();
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	preempt_enable();
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online) {
++		update_rq_clock(rq);
++		rq->online = false;
++	}
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++static inline void sched_set_rq_online(struct rq *rq, int cpu)
++{
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++static inline void sched_set_rq_offline(struct rq *rq, int cpu)
++{
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++static inline void sched_smt_present_inc(int cpu)
++{
++#ifdef CONFIG_SCHED_SMT
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_inc_cpuslocked(&sched_smt_present);
++		cpumask_or(&sched_smt_mask, &sched_smt_mask, cpu_smt_mask(cpu));
++	}
++#endif
++}
++
++static inline void sched_smt_present_dec(int cpu)
++{
++#ifdef CONFIG_SCHED_SMT
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(sched_pcore_idle_mask);
++		cpumask_andnot(&sched_smt_mask, &sched_smt_mask, cpu_smt_mask(cpu));
++	}
++#endif
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	/*
++	 * Clear the balance_push callback and prepare to schedule
++	 * regular tasks.
++	 */
++	balance_push_set(cpu, false);
++
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	sched_set_rq_online(rq, cpu);
++
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	sched_smt_present_inc(cpu);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Specifically, we rely on ttwu to no longer target this CPU, see
++	 * ttwu_queue_cond() and is_cpu_allowed().
++	 *
++	 * Do sync before park smpboot threads to take care the RCU boost case.
++	 */
++	synchronize_rcu();
++
++	sched_set_rq_offline(rq, cpu);
++
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	sched_smt_present_dec(cpu);
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		sched_smt_present_inc(cpu);
++		sched_set_rq_online(rq, cpu);
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the tear-down thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last)\
++	if (cpumask_and(topo, topo, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
++		       cpu, (topo++)->bits[0]);					\
++	}									\
++	if (!last)								\
++		bitmap_complement(cpumask_bits(topo), cpumask_bits(mask),	\
++				  nr_cpumask_bits);
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *topo;
++
++	for_each_online_cpu(cpu) {
++		topo = per_cpu(sched_cpu_topo_masks, cpu);
++
++		bitmap_complement(cpumask_bits(topo), cpumask_bits(cpumask_of(cpu)),
++				  nr_cpumask_bits);
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		TOPOLOGY_CPUMASK(cluster, topology_cluster_cpumask(cpu), false);
++
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
++		BUG();
++	current->flags &= ~PF_NO_SETAFFINITY;
++
++	sched_init_topology();
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++
++static int __init migration_init(void)
++{
++	sched_cpu_starting(smp_processor_id());
++	return 0;
++}
++early_initcall(migration_init);
++
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sysctl_sched_base_slice;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __ro_after_init;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++	struct rq *rq;
++
++	printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler "ALT_SCHED_VERSION\
++			 " by Alfred Chen.\n");
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_QUEUE_BITS; i++)
++		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		rq = cpu_rq(i);
++
++		sched_queue_init(&rq->queue);
++		rq->prio = IDLE_TASK_SCHED_PRIO;
++#ifdef CONFIG_SCHED_PDS
++		rq->prio_idx = rq->prio;
++#endif
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++		rq->clear_idle_mask_func = cpumask_clear_cpu;
++		rq->set_idle_mask_func = cpumask_set_cpu;
++		rq->balance_func = NULL;
++		rq->active_balance_arg.active = 0;
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = &balance_push_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++
++		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * The idle task doesn't need the kthread struct to function, but it
++	 * is dressed up as a per-CPU kthread and thus needs to play the part
++	 * if we want to avoid special-casing it in code that deals with per-CPU
++	 * kthreads.
++	 */
++	WARN_ON(!set_kthread_struct(current));
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++	balance_push_set(smp_processor_id(), false);
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	preempt_dynamic_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++
++void __might_sleep(const char *file, int line)
++{
++	unsigned int state = get_current_state();
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%x set at [<%p>] %pS\n", state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	__might_resched(file, line, 0);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
++{
++	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
++		return;
++
++	if (preempt_count() == preempt_offset)
++		return;
++
++	pr_err("Preemption disabled at:");
++	print_ip_sym(KERN_ERR, ip);
++}
++
++static inline bool resched_offsets_ok(unsigned int offsets)
++{
++	unsigned int nested = preempt_count();
++
++	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
++
++	return nested == offsets;
++}
++
++void __might_resched(const char *file, int line, unsigned int offsets)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
++	       file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), current->non_block_count,
++	       current->pid, current->comm);
++	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
++	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
++		pr_err("RCU nest depth: %d, expected: %u\n",
++		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
++	}
++
++	if (task_stack_end_corrupted(current))
++		pr_emerg("Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++
++	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
++				 preempt_disable_ip);
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(__might_resched);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		schedstat_set(p->stats.wait_start,  0);
++		schedstat_set(p->stats.sleep_start, 0);
++		schedstat_set(p->stats.block_start, 0);
++
++		if (!rt_or_dl_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for KDB.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++static void sched_unregister_group(struct task_group *tg)
++{
++	/*
++	 * We have to wait for yet another RCU grace period to expire, as
++	 * print_cfs_stats() might run concurrently.
++	 */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* RCU callback to free various structures associated with a task group */
++static void sched_unregister_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs: */
++	sched_unregister_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete: */
++	call_rcu(&tg->rcu, sched_unregister_group_rcu);
++}
++
++void sched_release_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_release_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_unregister_group(tg);
++}
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++#endif
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
++				  struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
++				   struct cftype *cftype, s64 cfs_quota_us)
++{
++	return 0;
++}
++
++static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
++				   struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
++				    struct cftype *cftype, u64 cfs_period_us)
++{
++	return 0;
++}
++
++static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
++				  struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
++				   struct cftype *cftype, u64 cfs_burst_us)
++{
++	return 0;
++}
++
++static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
++				struct cftype *cft, s64 val)
++{
++	return 0;
++}
++
++static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
++				    struct cftype *cftype, u64 rt_period_us)
++{
++	return 0;
++}
++
++static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
++				   struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
++				    char *buf, size_t nbytes,
++				    loff_t off)
++{
++	return nbytes;
++}
++
++static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
++				    char *buf, size_t nbytes,
++				    loff_t off)
++{
++	return nbytes;
++}
++
++static struct cftype cpu_legacy_files[] = {
++	{
++		.name = "cfs_quota_us",
++		.read_s64 = cpu_cfs_quota_read_s64,
++		.write_s64 = cpu_cfs_quota_write_s64,
++	},
++	{
++		.name = "cfs_period_us",
++		.read_u64 = cpu_cfs_period_read_u64,
++		.write_u64 = cpu_cfs_period_write_u64,
++	},
++	{
++		.name = "cfs_burst_us",
++		.read_u64 = cpu_cfs_burst_read_u64,
++		.write_u64 = cpu_cfs_burst_write_u64,
++	},
++	{
++		.name = "stat",
++		.seq_show = cpu_cfs_stat_show,
++	},
++	{
++		.name = "stat.local",
++		.seq_show = cpu_cfs_local_stat_show,
++	},
++	{
++		.name = "rt_runtime_us",
++		.read_s64 = cpu_rt_runtime_read,
++		.write_s64 = cpu_rt_runtime_write,
++	},
++	{
++		.name = "rt_period_us",
++		.read_u64 = cpu_rt_period_read_uint,
++		.write_u64 = cpu_rt_period_write_uint,
++	},
++	{
++		.name = "uclamp.min",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_uclamp_min_show,
++		.write = cpu_uclamp_min_write,
++	},
++	{
++		.name = "uclamp.max",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_uclamp_max_show,
++		.write = cpu_uclamp_max_write,
++	},
++	{ }	/* Terminate */
++};
++
++static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cft, u64 weight)
++{
++	return 0;
++}
++
++static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
++				    struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
++				     struct cftype *cft, s64 nice)
++{
++	return 0;
++}
++
++static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
++				struct cftype *cft, s64 idle)
++{
++	return 0;
++}
++
++static int cpu_max_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static ssize_t cpu_max_write(struct kernfs_open_file *of,
++			     char *buf, size_t nbytes, loff_t off)
++{
++	return nbytes;
++}
++
++static struct cftype cpu_files[] = {
++	{
++		.name = "weight",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.read_u64 = cpu_weight_read_u64,
++		.write_u64 = cpu_weight_write_u64,
++	},
++	{
++		.name = "weight.nice",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.read_s64 = cpu_weight_nice_read_s64,
++		.write_s64 = cpu_weight_nice_write_s64,
++	},
++	{
++		.name = "idle",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.read_s64 = cpu_idle_read_s64,
++		.write_s64 = cpu_idle_write_s64,
++	},
++	{
++		.name = "max",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_max_show,
++		.write = cpu_max_write,
++	},
++	{
++		.name = "max.burst",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.read_u64 = cpu_cfs_burst_read_u64,
++		.write_u64 = cpu_cfs_burst_write_u64,
++	},
++	{
++		.name = "uclamp.min",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_uclamp_min_show,
++		.write = cpu_uclamp_min_write,
++	},
++	{
++		.name = "uclamp.max",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_uclamp_max_show,
++		.write = cpu_uclamp_max_write,
++	},
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++static int cpu_local_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++	.css_local_stat_show = cpu_local_stat_show,
++#ifdef CONFIG_RT_GROUP_SCHED
++	.can_attach	= cpu_cgroup_can_attach,
++#endif
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
++
++#ifdef CONFIG_SCHED_MM_CID
++
++#
++/*
++ * @cid_lock: Guarantee forward-progress of cid allocation.
++ *
++ * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
++ * is only used when contention is detected by the lock-free allocation so
++ * forward progress can be guaranteed.
++ */
++DEFINE_RAW_SPINLOCK(cid_lock);
++
++/*
++ * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
++ *
++ * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
++ * detected, it is set to 1 to ensure that all newly coming allocations are
++ * serialized by @cid_lock until the allocation which detected contention
++ * completes and sets @use_cid_lock back to 0. This guarantees forward progress
++ * of a cid allocation.
++ */
++int use_cid_lock;
++
++/*
++ * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
++ * concurrently with respect to the execution of the source runqueue context
++ * switch.
++ *
++ * There is one basic properties we want to guarantee here:
++ *
++ * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
++ * used by a task. That would lead to concurrent allocation of the cid and
++ * userspace corruption.
++ *
++ * Provide this guarantee by introducing a Dekker memory ordering to guarantee
++ * that a pair of loads observe at least one of a pair of stores, which can be
++ * shown as:
++ *
++ *      X = Y = 0
++ *
++ *      w[X]=1          w[Y]=1
++ *      MB              MB
++ *      r[Y]=y          r[X]=x
++ *
++ * Which guarantees that x==0 && y==0 is impossible. But rather than using
++ * values 0 and 1, this algorithm cares about specific state transitions of the
++ * runqueue current task (as updated by the scheduler context switch), and the
++ * per-mm/cpu cid value.
++ *
++ * Let's introduce task (Y) which has task->mm == mm and task (N) which has
++ * task->mm != mm for the rest of the discussion. There are two scheduler state
++ * transitions on context switch we care about:
++ *
++ * (TSA) Store to rq->curr with transition from (N) to (Y)
++ *
++ * (TSB) Store to rq->curr with transition from (Y) to (N)
++ *
++ * On the remote-clear side, there is one transition we care about:
++ *
++ * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
++ *
++ * There is also a transition to UNSET state which can be performed from all
++ * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
++ * guarantees that only a single thread will succeed:
++ *
++ * (TMB) cmpxchg to *pcpu_cid to mark UNSET
++ *
++ * Just to be clear, what we do _not_ want to happen is a transition to UNSET
++ * when a thread is actively using the cid (property (1)).
++ *
++ * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
++ *
++ * Scenario A) (TSA)+(TMA) (from next task perspective)
++ *
++ * CPU0                                      CPU1
++ *
++ * Context switch CS-1                       Remote-clear
++ *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
++ *                                             (implied barrier after cmpxchg)
++ *   - switch_mm_cid()
++ *     - memory barrier (see switch_mm_cid()
++ *       comment explaining how this barrier
++ *       is combined with other scheduler
++ *       barriers)
++ *     - mm_cid_get (next)
++ *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
++ *
++ * This Dekker ensures that either task (Y) is observed by the
++ * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
++ * observed.
++ *
++ * If task (Y) store is observed by rcu_dereference(), it means that there is
++ * still an active task on the cpu. Remote-clear will therefore not transition
++ * to UNSET, which fulfills property (1).
++ *
++ * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
++ * it will move its state to UNSET, which clears the percpu cid perhaps
++ * uselessly (which is not an issue for correctness). Because task (Y) is not
++ * observed, CPU1 can move ahead to set the state to UNSET. Because moving
++ * state to UNSET is done with a cmpxchg expecting that the old state has the
++ * LAZY flag set, only one thread will successfully UNSET.
++ *
++ * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
++ * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
++ * CPU1 will observe task (Y) and do nothing more, which is fine.
++ *
++ * What we are effectively preventing with this Dekker is a scenario where
++ * neither LAZY flag nor store (Y) are observed, which would fail property (1)
++ * because this would UNSET a cid which is actively used.
++ */
++
++void sched_mm_cid_migrate_from(struct task_struct *t)
++{
++	t->migrate_from_cpu = task_cpu(t);
++}
++
++static
++int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
++					  struct task_struct *t,
++					  struct mm_cid *src_pcpu_cid)
++{
++	struct mm_struct *mm = t->mm;
++	struct task_struct *src_task;
++	int src_cid, last_mm_cid;
++
++	if (!mm)
++		return -1;
++
++	last_mm_cid = t->last_mm_cid;
++	/*
++	 * If the migrated task has no last cid, or if the current
++	 * task on src rq uses the cid, it means the source cid does not need
++	 * to be moved to the destination cpu.
++	 */
++	if (last_mm_cid == -1)
++		return -1;
++	src_cid = READ_ONCE(src_pcpu_cid->cid);
++	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
++		return -1;
++
++	/*
++	 * If we observe an active task using the mm on this rq, it means we
++	 * are not the last task to be migrated from this cpu for this mm, so
++	 * there is no need to move src_cid to the destination cpu.
++	 */
++	guard(rcu)();
++	src_task = rcu_dereference(src_rq->curr);
++	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
++		t->last_mm_cid = -1;
++		return -1;
++	}
++
++	return src_cid;
++}
++
++static
++int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
++					      struct task_struct *t,
++					      struct mm_cid *src_pcpu_cid,
++					      int src_cid)
++{
++	struct task_struct *src_task;
++	struct mm_struct *mm = t->mm;
++	int lazy_cid;
++
++	if (src_cid == -1)
++		return -1;
++
++	/*
++	 * Attempt to clear the source cpu cid to move it to the destination
++	 * cpu.
++	 */
++	lazy_cid = mm_cid_set_lazy_put(src_cid);
++	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
++		return -1;
++
++	/*
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm matches the scheduler barrier in context_switch()
++	 * between store to rq->curr and load of prev and next task's
++	 * per-mm/cpu cid.
++	 *
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm_cid_active matches the barrier in
++	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
++	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
++	 * load of per-mm/cpu cid.
++	 */
++
++	/*
++	 * If we observe an active task using the mm on this rq after setting
++	 * the lazy-put flag, this task will be responsible for transitioning
++	 * from lazy-put flag set to MM_CID_UNSET.
++	 */
++	scoped_guard (rcu) {
++		src_task = rcu_dereference(src_rq->curr);
++		if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
++			rcu_read_unlock();
++			/*
++			 * We observed an active task for this mm, there is therefore
++			 * no point in moving this cid to the destination cpu.
++			 */
++			t->last_mm_cid = -1;
++			return -1;
++		}
++	}
++
++	/*
++	 * The src_cid is unused, so it can be unset.
++	 */
++	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
++		return -1;
++	return src_cid;
++}
++
++/*
++ * Migration to dst cpu. Called with dst_rq lock held.
++ * Interrupts are disabled, which keeps the window of cid ownership without the
++ * source rq lock held small.
++ */
++void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
++{
++	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
++	struct mm_struct *mm = t->mm;
++	int src_cid, dst_cid, src_cpu;
++	struct rq *src_rq;
++
++	lockdep_assert_rq_held(dst_rq);
++
++	if (!mm)
++		return;
++	src_cpu = t->migrate_from_cpu;
++	if (src_cpu == -1) {
++		t->last_mm_cid = -1;
++		return;
++	}
++	/*
++	 * Move the src cid if the dst cid is unset. This keeps id
++	 * allocation closest to 0 in cases where few threads migrate around
++	 * many CPUs.
++	 *
++	 * If destination cid is already set, we may have to just clear
++	 * the src cid to ensure compactness in frequent migrations
++	 * scenarios.
++	 *
++	 * It is not useful to clear the src cid when the number of threads is
++	 * greater or equal to the number of allowed CPUs, because user-space
++	 * can expect that the number of allowed cids can reach the number of
++	 * allowed CPUs.
++	 */
++	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
++	dst_cid = READ_ONCE(dst_pcpu_cid->cid);
++	if (!mm_cid_is_unset(dst_cid) &&
++	    atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
++		return;
++	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
++	src_rq = cpu_rq(src_cpu);
++	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
++	if (src_cid == -1)
++		return;
++	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
++							    src_cid);
++	if (src_cid == -1)
++		return;
++	if (!mm_cid_is_unset(dst_cid)) {
++		__mm_cid_put(mm, src_cid);
++		return;
++	}
++	/* Move src_cid to dst cpu. */
++	mm_cid_snapshot_time(dst_rq, mm);
++	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
++}
++
++static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
++				      int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *t;
++	int cid, lazy_cid;
++
++	cid = READ_ONCE(pcpu_cid->cid);
++	if (!mm_cid_is_valid(cid))
++		return;
++
++	/*
++	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
++	 * there happens to be other tasks left on the source cpu using this
++	 * mm, the next task using this mm will reallocate its cid on context
++	 * switch.
++	 */
++	lazy_cid = mm_cid_set_lazy_put(cid);
++	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
++		return;
++
++	/*
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm matches the scheduler barrier in context_switch()
++	 * between store to rq->curr and load of prev and next task's
++	 * per-mm/cpu cid.
++	 *
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm_cid_active matches the barrier in
++	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
++	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
++	 * load of per-mm/cpu cid.
++	 */
++
++	/*
++	 * If we observe an active task using the mm on this rq after setting
++	 * the lazy-put flag, that task will be responsible for transitioning
++	 * from lazy-put flag set to MM_CID_UNSET.
++	 */
++	scoped_guard (rcu) {
++		t = rcu_dereference(rq->curr);
++		if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
++			return;
++	}
++
++	/*
++	 * The cid is unused, so it can be unset.
++	 * Disable interrupts to keep the window of cid ownership without rq
++	 * lock small.
++	 */
++	scoped_guard (irqsave) {
++		if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
++			__mm_cid_put(mm, cid);
++	}
++}
++
++static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct mm_cid *pcpu_cid;
++	struct task_struct *curr;
++	u64 rq_clock;
++
++	/*
++	 * rq->clock load is racy on 32-bit but one spurious clear once in a
++	 * while is irrelevant.
++	 */
++	rq_clock = READ_ONCE(rq->clock);
++	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
++
++	/*
++	 * In order to take care of infrequently scheduled tasks, bump the time
++	 * snapshot associated with this cid if an active task using the mm is
++	 * observed on this rq.
++	 */
++	scoped_guard (rcu) {
++		curr = rcu_dereference(rq->curr);
++		if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
++			WRITE_ONCE(pcpu_cid->time, rq_clock);
++			return;
++		}
++	}
++
++	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
++		return;
++	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
++}
++
++static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
++					     int weight)
++{
++	struct mm_cid *pcpu_cid;
++	int cid;
++
++	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
++	cid = READ_ONCE(pcpu_cid->cid);
++	if (!mm_cid_is_valid(cid) || cid < weight)
++		return;
++	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
++}
++
++static void task_mm_cid_work(struct callback_head *work)
++{
++	unsigned long now = jiffies, old_scan, next_scan;
++	struct task_struct *t = current;
++	struct cpumask *cidmask;
++	struct mm_struct *mm;
++	int weight, cpu;
++
++	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
++
++	work->next = work;	/* Prevent double-add */
++	if (t->flags & PF_EXITING)
++		return;
++	mm = t->mm;
++	if (!mm)
++		return;
++	old_scan = READ_ONCE(mm->mm_cid_next_scan);
++	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
++	if (!old_scan) {
++		unsigned long res;
++
++		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
++		if (res != old_scan)
++			old_scan = res;
++		else
++			old_scan = next_scan;
++	}
++	if (time_before(now, old_scan))
++		return;
++	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
++		return;
++	cidmask = mm_cidmask(mm);
++	/* Clear cids that were not recently used. */
++	for_each_possible_cpu(cpu)
++		sched_mm_cid_remote_clear_old(mm, cpu);
++	weight = cpumask_weight(cidmask);
++	/*
++	 * Clear cids that are greater or equal to the cidmask weight to
++	 * recompact it.
++	 */
++	for_each_possible_cpu(cpu)
++		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
++}
++
++void init_sched_mm_cid(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	int mm_users = 0;
++
++	if (mm) {
++		mm_users = atomic_read(&mm->mm_users);
++		if (mm_users == 1)
++			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
++	}
++	t->cid_work.next = &t->cid_work;	/* Protect against double add */
++	init_task_work(&t->cid_work, task_mm_cid_work);
++}
++
++void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
++{
++	struct callback_head *work = &curr->cid_work;
++	unsigned long now = jiffies;
++
++	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
++	    work->next != work)
++		return;
++	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
++		return;
++
++	/* No page allocation under rq lock */
++	task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
++}
++
++void sched_mm_cid_exit_signals(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	guard(rq_lock_irqsave)(rq);
++	preempt_enable_no_resched();	/* holding spinlock */
++	WRITE_ONCE(t->mm_cid_active, 0);
++	/*
++	 * Store t->mm_cid_active before loading per-mm/cpu cid.
++	 * Matches barrier in sched_mm_cid_remote_clear_old().
++	 */
++	smp_mb();
++	mm_cid_put(mm);
++	t->last_mm_cid = t->mm_cid = -1;
++}
++
++void sched_mm_cid_before_execve(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	guard(rq_lock_irqsave)(rq);
++	preempt_enable_no_resched();	/* holding spinlock */
++	WRITE_ONCE(t->mm_cid_active, 0);
++	/*
++	 * Store t->mm_cid_active before loading per-mm/cpu cid.
++	 * Matches barrier in sched_mm_cid_remote_clear_old().
++	 */
++	smp_mb();
++	mm_cid_put(mm);
++	t->last_mm_cid = t->mm_cid = -1;
++}
++
++void sched_mm_cid_after_execve(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	scoped_guard (rq_lock_irqsave, rq) {
++		preempt_enable_no_resched();	/* holding spinlock */
++		WRITE_ONCE(t->mm_cid_active, 1);
++		/*
++		 * Store t->mm_cid_active before loading per-mm/cpu cid.
++		 * Matches barrier in sched_mm_cid_remote_clear_old().
++		 */
++		smp_mb();
++		t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
++	}
++	rseq_set_notify_resume(t);
++}
++
++void sched_mm_cid_fork(struct task_struct *t)
++{
++	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
++	t->mm_cid_active = 1;
++}
++#endif
+diff --git a/kernel/sched/alt_core.h b/kernel/sched/alt_core.h
+new file mode 100644
+index 000000000000..12d76d9d290e
+--- /dev/null
++++ b/kernel/sched/alt_core.h
+@@ -0,0 +1,213 @@
++#ifndef _KERNEL_SCHED_ALT_CORE_H
++#define _KERNEL_SCHED_ALT_CORE_H
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++/*
++ * Task related inlined functions
++ */
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++struct affinity_context {
++	const struct cpumask	*new_mask;
++	struct cpumask		*user_mask;
++	unsigned int		flags;
++};
++
++/* CONFIG_SCHED_CLASS_EXT is not supported */
++#define scx_switched_all()	false
++
++#define SCA_CHECK		0x01
++#define SCA_MIGRATE_DISABLE	0x02
++#define SCA_MIGRATE_ENABLE	0x04
++#define SCA_USER		0x08
++
++#ifdef CONFIG_SMP
++
++extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx);
++
++static inline cpumask_t *alloc_user_cpus_ptr(int node)
++{
++	/*
++	 * See do_set_cpus_allowed() above for the rcu_head usage.
++	 */
++	int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
++
++	return kmalloc_node(size, GFP_KERNEL, node);
++}
++
++#else /* !CONFIG_SMP: */
++
++static inline int __set_cpus_allowed_ptr(struct task_struct *p,
++					 struct affinity_context *ctx)
++{
++	return set_cpus_allowed_ptr(p, ctx->new_mask);
++}
++
++static inline cpumask_t *alloc_user_cpus_ptr(int node)
++{
++	return NULL;
++}
++
++#endif /* !CONFIG_SMP */
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++#else /* !CONFIG_RT_MUTEXES: */
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++
++#endif /* !CONFIG_RT_MUTEXES */
++
++extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi);
++extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
++extern void __setscheduler_prio(struct task_struct *p, int prio);
++
++/*
++ * Context API
++ */
++static inline struct rq *__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p)) && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void __task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++void check_task_changed(struct task_struct *p, struct rq *rq);
++
++/*
++ * RQ related inlined functions
++ */
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	const struct list_head *head = &rq->queue.heads[sched_rq_prio_idx(rq)];
++
++	return list_first_entry(head, struct task_struct, sq_node);
++}
++
++static inline struct task_struct * sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	struct list_head *next = p->sq_node.next;
++
++	if (&rq->queue.heads[0] <= next && next < &rq->queue.heads[SCHED_LEVELS]) {
++		struct list_head *head;
++		unsigned long idx = next - &rq->queue.heads[0];
++
++		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++				    sched_idx2prio(idx, rq) + 1);
++		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++		return list_first_entry(head, struct task_struct, sq_node);
++	}
++
++	return list_next_entry(p, sq_node);
++}
++
++extern void requeue_task(struct task_struct *p, struct rq *rq);
++
++#ifdef ALT_SCHED_DEBUG
++extern void alt_sched_debug(void);
++#else
++static inline void alt_sched_debug(void) {}
++#endif
++
++extern int sched_yield_type;
++
++#ifdef CONFIG_SMP
++extern cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DECLARE_STATIC_KEY_FALSE(sched_smt_present);
++DECLARE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask);
++
++extern cpumask_t sched_smt_mask ____cacheline_aligned_in_smp;
++
++extern cpumask_t *const sched_idle_mask;
++extern cpumask_t *const sched_sg_idle_mask;
++extern cpumask_t *const sched_pcore_idle_mask;
++extern cpumask_t *const sched_ecore_idle_mask;
++
++extern struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu);
++
++typedef bool (*idle_select_func_t)(struct cpumask *dstp, const struct cpumask *src1p,
++				   const struct cpumask *src2p);
++
++extern idle_select_func_t idle_select_func;
++#endif
++
++/* balance callback */
++#ifdef CONFIG_SMP
++extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
++extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
++#else
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++}
++
++#endif
++
++#endif /* _KERNEL_SCHED_ALT_CORE_H */
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 000000000000..1dbd7eb6a434
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,32 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++#include "linux/sched/debug.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 000000000000..09c9e9f80bf4
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,971 @@
++#ifndef _KERNEL_SCHED_ALT_SCHED_H
++#define _KERNEL_SCHED_ALT_SCHED_H
++
++#include <linux/context_tracking.h>
++#include <linux/profile.h>
++#include <linux/stop_machine.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <trace/events/power.h>
++#include <trace/events/sched.h>
++
++#include "../workqueue_internal.h"
++
++#include "cpupri.h"
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++};
++
++extern struct task_group *sched_create_group(struct task_group *parent);
++extern void sched_online_group(struct task_group *tg,
++			       struct task_group *parent);
++extern void sched_destroy_group(struct task_group *tg);
++extern void sched_release_group(struct task_group *tg);
++#endif /* CONFIG_CGROUP_SCHED */
++
++#define MIN_SCHED_NORMAL_PRIO	(32)
++/*
++ * levels: RT(0-24), reserved(25-31), NORMAL(32-63), cpu idle task(64)
++ *
++ * -- BMQ --
++ * NORMAL: (lower boost range 12, NICE_WIDTH 40, higher boost range 12) / 2
++ * -- PDS --
++ * NORMAL: SCHED_EDGE_DELTA + ((NICE_WIDTH 40) / 2)
++ */
++#define SCHED_LEVELS		(64 + 1)
++
++#define IDLE_TASK_SCHED_PRIO	(SCHED_LEVELS - 1)
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++extern void resched_latency_warn(int cpu, u64 latency);
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++static inline void resched_latency_warn(int cpu, u64 latency) {}
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++/*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug const
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/* Wake flags. The first three directly map to some SD flag value */
++#define WF_EXEC         0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
++#define WF_FORK         0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
++#define WF_TTWU         0x08 /* Wakeup;            maps to SD_BALANCE_WAKE */
++
++#define WF_SYNC         0x10 /* Waker goes to sleep after wakeup */
++#define WF_MIGRATED     0x20 /* Internal use, task got migrated */
++#define WF_CURRENT_CPU  0x40 /* Prefer to move the wakee to the current CPU. */
++
++#ifdef CONFIG_SMP
++static_assert(WF_EXEC == SD_BALANCE_EXEC);
++static_assert(WF_FORK == SD_BALANCE_FORK);
++static_assert(WF_TTWU == SD_BALANCE_WAKE);
++#endif
++
++#define SCHED_QUEUE_BITS	(SCHED_LEVELS - 1)
++
++struct sched_queue {
++	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
++	struct list_head heads[SCHED_LEVELS];
++};
++
++struct rq;
++struct cpuidle_state;
++
++struct balance_callback {
++	struct balance_callback *next;
++	void (*func)(struct rq *rq);
++};
++
++typedef void (*balance_func_t)(struct rq *rq, int cpu);
++typedef void (*set_idle_mask_func_t)(unsigned int cpu, struct cpumask *dstp);
++typedef void (*clear_idle_mask_func_t)(int cpu, struct cpumask *dstp);
++
++struct balance_arg {
++	struct task_struct	*task;
++	int			active;
++	cpumask_t		*cpumask;
++};
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t			lock;
++
++	struct task_struct __rcu	*curr;
++	struct task_struct		*idle;
++	struct task_struct		*stop;
++	struct mm_struct		*prev_mm;
++
++	struct sched_queue		queue		____cacheline_aligned;
++
++	int				prio;
++#ifdef CONFIG_SCHED_PDS
++	int				prio_idx;
++	u64				time_edge;
++#endif
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_SCHED_DEBUG
++	u64 last_seen_need_resched_ns;
++	int ticks_without_resched;
++#endif
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++	set_idle_mask_func_t	set_idle_mask_func;
++	clear_idle_mask_func_t	clear_idle_mask_func;
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++	balance_func_t		balance_func;
++	struct balance_arg	active_balance_arg		____cacheline_aligned;
++	struct cpu_stop_work	active_balance_work;
++
++	struct balance_callback	*balance_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* For genenal cpu load util */
++	s32 load_history;
++	u64 load_block;
++	u64 load_stamp;
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	/* Ensure that all clocks are in the same cache line */
++	u64			clock ____cacheline_aligned;
++	u64			clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer		hrtick_timer;
++	ktime_t			hrtick_time;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++
++	/* Scratch cpumask to be temporarily used under rq_lock */
++	cpumask_var_t		scratch_mask;
++};
++
++extern unsigned int sysctl_sched_base_slice;
++
++extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++
++static inline int
++__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
++{
++	int cpu;
++
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++
++	return cpu;
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
++}
++
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
++{
++	return &rq->lock;
++}
++
++static inline raw_spinlock_t *rq_lockp(struct rq *rq)
++{
++	return __rq_lockp(rq);
++}
++
++static inline void lockdep_assert_rq_held(struct rq *rq)
++{
++	lockdep_assert_held(__rq_lockp(rq));
++}
++
++extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
++extern void raw_spin_rq_unlock(struct rq *rq);
++
++static inline void raw_spin_rq_lock(struct rq *rq)
++{
++	raw_spin_rq_lock_nested(rq, 0);
++}
++
++static inline void raw_spin_rq_lock_irq(struct rq *rq)
++{
++	local_irq_disable();
++	raw_spin_rq_lock(rq);
++}
++
++static inline void raw_spin_rq_unlock_irq(struct rq *rq)
++{
++	raw_spin_rq_unlock(rq);
++	local_irq_enable();
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_on_cpu(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++extern void resched_cpu(int cpu);
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++#ifdef CONFIG_SMP
++unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
++				 unsigned long min,
++				 unsigned long max);
++#endif /* CONFIG_SMP */
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++extern void swake_up_all_locked(struct swait_queue_head *q);
++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++extern int preempt_dynamic_mode;
++extern int sched_dynamic_mode(const char *str);
++extern void sched_dynamic_update(int mode);
++#endif
++
++static inline void nohz_run_idle_balance(int cpu) { }
++
++static inline
++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
++				  struct task_struct *p)
++{
++	return util;
++}
++
++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
++
++#ifdef CONFIG_SCHED_MM_CID
++
++#define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */
++#define MM_CID_SCAN_DELAY	100			/* 100ms */
++
++extern raw_spinlock_t cid_lock;
++extern int use_cid_lock;
++
++extern void sched_mm_cid_migrate_from(struct task_struct *t);
++extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
++extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
++extern void init_sched_mm_cid(struct task_struct *t);
++
++static inline void __mm_cid_put(struct mm_struct *mm, int cid)
++{
++	if (cid < 0)
++		return;
++	cpumask_clear_cpu(cid, mm_cidmask(mm));
++}
++
++/*
++ * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to
++ * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to
++ * be held to transition to other states.
++ *
++ * State transitions synchronized with cmpxchg or try_cmpxchg need to be
++ * consistent across cpus, which prevents use of this_cpu_cmpxchg.
++ */
++static inline void mm_cid_put_lazy(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	int cid;
++
++	lockdep_assert_irqs_disabled();
++	cid = __this_cpu_read(pcpu_cid->cid);
++	if (!mm_cid_is_lazy_put(cid) ||
++	    !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
++		return;
++	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++}
++
++static inline int mm_cid_pcpu_unset(struct mm_struct *mm)
++{
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	int cid, res;
++
++	lockdep_assert_irqs_disabled();
++	cid = __this_cpu_read(pcpu_cid->cid);
++	for (;;) {
++		if (mm_cid_is_unset(cid))
++			return MM_CID_UNSET;
++		/*
++		 * Attempt transition from valid or lazy-put to unset.
++		 */
++		res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET);
++		if (res == cid)
++			break;
++		cid = res;
++	}
++	return cid;
++}
++
++static inline void mm_cid_put(struct mm_struct *mm)
++{
++	int cid;
++
++	lockdep_assert_irqs_disabled();
++	cid = mm_cid_pcpu_unset(mm);
++	if (cid == MM_CID_UNSET)
++		return;
++	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++}
++
++static inline int __mm_cid_try_get(struct mm_struct *mm)
++{
++	struct cpumask *cpumask;
++	int cid;
++
++	cpumask = mm_cidmask(mm);
++	/*
++	 * Retry finding first zero bit if the mask is temporarily
++	 * filled. This only happens during concurrent remote-clear
++	 * which owns a cid without holding a rq lock.
++	 */
++	for (;;) {
++		cid = cpumask_first_zero(cpumask);
++		if (cid < nr_cpu_ids)
++			break;
++		cpu_relax();
++	}
++	if (cpumask_test_and_set_cpu(cid, cpumask))
++		return -1;
++	return cid;
++}
++
++/*
++ * Save a snapshot of the current runqueue time of this cpu
++ * with the per-cpu cid value, allowing to estimate how recently it was used.
++ */
++static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
++{
++	struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq));
++
++	lockdep_assert_rq_held(rq);
++	WRITE_ONCE(pcpu_cid->time, rq->clock);
++}
++
++static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm)
++{
++	int cid;
++
++	/*
++	 * All allocations (even those using the cid_lock) are lock-free. If
++	 * use_cid_lock is set, hold the cid_lock to perform cid allocation to
++	 * guarantee forward progress.
++	 */
++	if (!READ_ONCE(use_cid_lock)) {
++		cid = __mm_cid_try_get(mm);
++		if (cid >= 0)
++			goto end;
++		raw_spin_lock(&cid_lock);
++	} else {
++		raw_spin_lock(&cid_lock);
++		cid = __mm_cid_try_get(mm);
++		if (cid >= 0)
++			goto unlock;
++	}
++
++	/*
++	 * cid concurrently allocated. Retry while forcing following
++	 * allocations to use the cid_lock to ensure forward progress.
++	 */
++	WRITE_ONCE(use_cid_lock, 1);
++	/*
++	 * Set use_cid_lock before allocation. Only care about program order
++	 * because this is only required for forward progress.
++	 */
++	barrier();
++	/*
++	 * Retry until it succeeds. It is guaranteed to eventually succeed once
++	 * all newcoming allocations observe the use_cid_lock flag set.
++	 */
++	do {
++		cid = __mm_cid_try_get(mm);
++		cpu_relax();
++	} while (cid < 0);
++	/*
++	 * Allocate before clearing use_cid_lock. Only care about
++	 * program order because this is for forward progress.
++	 */
++	barrier();
++	WRITE_ONCE(use_cid_lock, 0);
++unlock:
++	raw_spin_unlock(&cid_lock);
++end:
++	mm_cid_snapshot_time(rq, mm);
++	return cid;
++}
++
++static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
++{
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	struct cpumask *cpumask;
++	int cid;
++
++	lockdep_assert_rq_held(rq);
++	cpumask = mm_cidmask(mm);
++	cid = __this_cpu_read(pcpu_cid->cid);
++	if (mm_cid_is_valid(cid)) {
++		mm_cid_snapshot_time(rq, mm);
++		return cid;
++	}
++	if (mm_cid_is_lazy_put(cid)) {
++		if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
++			__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++	}
++	cid = __mm_cid_get(rq, mm);
++	__this_cpu_write(pcpu_cid->cid, cid);
++	return cid;
++}
++
++static inline void switch_mm_cid(struct rq *rq,
++				 struct task_struct *prev,
++				 struct task_struct *next)
++{
++	/*
++	 * Provide a memory barrier between rq->curr store and load of
++	 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.
++	 *
++	 * Should be adapted if context_switch() is modified.
++	 */
++	if (!next->mm) {                                // to kernel
++		/*
++		 * user -> kernel transition does not guarantee a barrier, but
++		 * we can use the fact that it performs an atomic operation in
++		 * mmgrab().
++		 */
++		if (prev->mm)                           // from user
++			smp_mb__after_mmgrab();
++		/*
++		 * kernel -> kernel transition does not change rq->curr->mm
++		 * state. It stays NULL.
++		 */
++	} else {                                        // to user
++		/*
++		 * kernel -> user transition does not provide a barrier
++		 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
++		 * Provide it here.
++		 */
++		if (!prev->mm)                          // from kernel
++			smp_mb();
++		/*
++		 * user -> user transition guarantees a memory barrier through
++		 * switch_mm() when current->mm changes. If current->mm is
++		 * unchanged, no barrier is needed.
++		 */
++	}
++	if (prev->mm_cid_active) {
++		mm_cid_snapshot_time(rq, prev->mm);
++		mm_cid_put_lazy(prev);
++		prev->mm_cid = -1;
++	}
++	if (next->mm_cid_active)
++		next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm);
++}
++
++#else
++static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
++static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
++static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
++static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
++static inline void init_sched_mm_cid(struct task_struct *t) { }
++#endif
++
++#ifdef CONFIG_SMP
++extern struct balance_callback balance_push_callback;
++
++static inline void
++queue_balance_callback(struct rq *rq,
++		       struct balance_callback *head,
++		       void (*func)(struct rq *rq))
++{
++	lockdep_assert_rq_held(rq);
++
++	/*
++	 * Don't (re)queue an already queued item; nor queue anything when
++	 * balance_push() is active, see the comment with
++	 * balance_push_callback.
++	 */
++	if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
++		return;
++
++	head->func = func;
++	head->next = rq->balance_callback;
++	rq->balance_callback = head;
++}
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++#endif /* _KERNEL_SCHED_ALT_SCHED_H */
+diff --git a/kernel/sched/alt_topology.c b/kernel/sched/alt_topology.c
+new file mode 100644
+index 000000000000..2266138ee783
+--- /dev/null
++++ b/kernel/sched/alt_topology.c
+@@ -0,0 +1,350 @@
++#include "alt_core.h"
++#include "alt_topology.h"
++
++#ifdef CONFIG_SMP
++
++static cpumask_t sched_pcore_mask ____cacheline_aligned_in_smp;
++
++static int __init sched_pcore_mask_setup(char *str)
++{
++	if (cpulist_parse(str, &sched_pcore_mask))
++		pr_warn("sched/alt: pcore_cpus= incorrect CPU range\n");
++
++	return 0;
++}
++__setup("pcore_cpus=", sched_pcore_mask_setup);
++
++/*
++ * set/clear idle mask functions
++ */
++#ifdef CONFIG_SCHED_SMT
++static void set_idle_mask_smt(unsigned int cpu, struct cpumask *dstp)
++{
++	cpumask_set_cpu(cpu, dstp);
++	if (cpumask_subset(cpu_smt_mask(cpu), sched_idle_mask))
++		cpumask_or(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
++}
++
++static void clear_idle_mask_smt(int cpu, struct cpumask *dstp)
++{
++	cpumask_clear_cpu(cpu, dstp);
++	cpumask_andnot(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
++}
++#endif
++
++static void set_idle_mask_pcore(unsigned int cpu, struct cpumask *dstp)
++{
++	cpumask_set_cpu(cpu, dstp);
++	cpumask_set_cpu(cpu, sched_pcore_idle_mask);
++}
++
++static void clear_idle_mask_pcore(int cpu, struct cpumask *dstp)
++{
++	cpumask_clear_cpu(cpu, dstp);
++	cpumask_clear_cpu(cpu, sched_pcore_idle_mask);
++}
++
++static void set_idle_mask_ecore(unsigned int cpu, struct cpumask *dstp)
++{
++	cpumask_set_cpu(cpu, dstp);
++	cpumask_set_cpu(cpu, sched_ecore_idle_mask);
++}
++
++static void clear_idle_mask_ecore(int cpu, struct cpumask *dstp)
++{
++	cpumask_clear_cpu(cpu, dstp);
++	cpumask_clear_cpu(cpu, sched_ecore_idle_mask);
++}
++
++/*
++ * Idle cpu/rq selection functions
++ */
++#ifdef CONFIG_SCHED_SMT
++static bool p1_idle_select_func(struct cpumask *dstp, const struct cpumask *src1p,
++				 const struct cpumask *src2p)
++{
++	return cpumask_and(dstp, src1p, src2p + 1)	||
++	       cpumask_and(dstp, src1p, src2p);
++}
++#endif
++
++static bool p1p2_idle_select_func(struct cpumask *dstp, const struct cpumask *src1p,
++					const struct cpumask *src2p)
++{
++	return cpumask_and(dstp, src1p, src2p + 1)	||
++	       cpumask_and(dstp, src1p, src2p + 2)	||
++	       cpumask_and(dstp, src1p, src2p);
++}
++
++/* common balance functions */
++static int active_balance_cpu_stop(void *data)
++{
++	struct balance_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++	cpumask_t tmp;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	arg->active = 0;
++
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, arg->cpumask) &&
++	    !is_migration_disabled(p)) {
++		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu_of(rq)));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++/* trigger_active_balance - for @rq */
++static inline int
++trigger_active_balance(struct rq *src_rq, struct rq *rq, cpumask_t *target_mask)
++{
++	struct balance_arg *arg;
++	unsigned long flags;
++	struct task_struct *p;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++
++	arg = &rq->active_balance_arg;
++	res = (1 == rq->nr_running) &&					\
++	      !is_migration_disabled((p = sched_rq_first_task(rq))) &&	\
++	      cpumask_intersects(p->cpus_ptr, target_mask) &&		\
++	      !arg->active;
++	if (res) {
++		arg->task = p;
++		arg->cpumask = target_mask;
++
++		arg->active = 1;
++	}
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res) {
++		preempt_disable();
++		raw_spin_unlock(&src_rq->lock);
++
++		stop_one_cpu_nowait(cpu_of(rq), active_balance_cpu_stop, arg,
++				    &rq->active_balance_work);
++
++		preempt_enable();
++		raw_spin_lock(&src_rq->lock);
++	}
++
++	return res;
++}
++
++static inline int
++ecore_source_balance(struct rq *rq, cpumask_t *single_task_mask, cpumask_t *target_mask)
++{
++	if (cpumask_andnot(single_task_mask, single_task_mask, &sched_pcore_mask)) {
++		int i, cpu = cpu_of(rq);
++
++		for_each_cpu_wrap(i, single_task_mask, cpu)
++			if (trigger_active_balance(rq, cpu_rq(i), target_mask))
++				return 1;
++	}
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct balance_callback, active_balance_head);
++
++#ifdef CONFIG_SCHED_SMT
++static inline int
++smt_pcore_source_balance(struct rq *rq, cpumask_t *single_task_mask, cpumask_t *target_mask)
++{
++	cpumask_t smt_single_mask;
++
++	if (cpumask_and(&smt_single_mask, single_task_mask, &sched_smt_mask)) {
++		int i, cpu = cpu_of(rq);
++
++		for_each_cpu_wrap(i, &smt_single_mask, cpu) {
++			if (cpumask_subset(cpu_smt_mask(i), &smt_single_mask) &&
++			    trigger_active_balance(rq, cpu_rq(i), target_mask))
++				return 1;
++		}
++	}
++
++	return 0;
++}
++
++/* smt p core balance functions */
++static inline void smt_pcore_balance(struct rq *rq)
++{
++	cpumask_t single_task_mask;
++
++	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
++	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
++	    (/* smt core group balance */
++	     (static_key_count(&sched_smt_present.key) > 1 &&
++	      smt_pcore_source_balance(rq, &single_task_mask, sched_sg_idle_mask)
++	     ) ||
++	     /* e core to idle smt core balance */
++	     ecore_source_balance(rq, &single_task_mask, sched_sg_idle_mask)))
++		return;
++}
++
++static void smt_pcore_balance_func(struct rq *rq, const int cpu)
++{
++	if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
++		queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), smt_pcore_balance);
++}
++
++/* smt balance functions */
++static inline void smt_balance(struct rq *rq)
++{
++	cpumask_t single_task_mask;
++
++	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
++	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
++	    static_key_count(&sched_smt_present.key) > 1 &&
++	    smt_pcore_source_balance(rq, &single_task_mask, sched_sg_idle_mask))
++		return;
++}
++
++static void smt_balance_func(struct rq *rq, const int cpu)
++{
++	if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
++		queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), smt_balance);
++}
++
++/* e core balance functions */
++static inline void ecore_balance(struct rq *rq)
++{
++	cpumask_t single_task_mask;
++
++	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
++	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
++	    /* smt occupied p core to idle e core balance */
++	    smt_pcore_source_balance(rq, &single_task_mask, sched_ecore_idle_mask))
++		return;
++}
++
++static void ecore_balance_func(struct rq *rq, const int cpu)
++{
++	queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), ecore_balance);
++}
++#endif /* CONFIG_SCHED_SMT */
++
++/* p core balance functions */
++static inline void pcore_balance(struct rq *rq)
++{
++	cpumask_t single_task_mask;
++
++	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
++	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
++	    /* idle e core to p core balance */
++	    ecore_source_balance(rq, &single_task_mask, sched_pcore_idle_mask))
++		return;
++}
++
++static void pcore_balance_func(struct rq *rq, const int cpu)
++{
++	queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), pcore_balance);
++}
++
++#ifdef ALT_SCHED_DEBUG
++#define SCHED_DEBUG_INFO(...)	printk(KERN_INFO __VA_ARGS__)
++#else
++#define SCHED_DEBUG_INFO(...)	do { } while(0)
++#endif
++
++#define SET_IDLE_SELECT_FUNC(func)						\
++{										\
++	idle_select_func = func;						\
++	printk(KERN_INFO "sched: "#func);					\
++}
++
++#define SET_RQ_BALANCE_FUNC(rq, cpu, func)					\
++{										\
++	rq->balance_func = func;						\
++	SCHED_DEBUG_INFO("sched: cpu#%02d -> "#func, cpu);			\
++}
++
++#define SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_func, clear_func)			\
++{										\
++	rq->set_idle_mask_func		= set_func;				\
++	rq->clear_idle_mask_func	= clear_func;				\
++	SCHED_DEBUG_INFO("sched: cpu#%02d -> "#set_func" "#clear_func, cpu);	\
++}
++
++void sched_init_topology(void)
++{
++	int cpu;
++	struct rq *rq;
++	cpumask_t sched_ecore_mask = { CPU_BITS_NONE };
++	int ecore_present = 0;
++
++#ifdef CONFIG_SCHED_SMT
++	if (!cpumask_empty(&sched_smt_mask))
++		printk(KERN_INFO "sched: smt mask: 0x%08lx\n", sched_smt_mask.bits[0]);
++#endif
++
++	if (!cpumask_empty(&sched_pcore_mask)) {
++		cpumask_andnot(&sched_ecore_mask, cpu_online_mask, &sched_pcore_mask);
++		printk(KERN_INFO "sched: pcore mask: 0x%08lx, ecore mask: 0x%08lx\n",
++		       sched_pcore_mask.bits[0], sched_ecore_mask.bits[0]);
++
++		ecore_present = !cpumask_empty(&sched_ecore_mask);
++	}
++
++#ifdef CONFIG_SCHED_SMT
++	/* idle select function */
++	if (cpumask_equal(&sched_smt_mask, cpu_online_mask)) {
++		SET_IDLE_SELECT_FUNC(p1_idle_select_func);
++	} else
++#endif
++	if (!cpumask_empty(&sched_pcore_mask)) {
++		SET_IDLE_SELECT_FUNC(p1p2_idle_select_func);
++	}
++
++	for_each_online_cpu(cpu) {
++		rq = cpu_rq(cpu);
++		/* take chance to reset time slice for idle tasks */
++		rq->idle->time_slice = sysctl_sched_base_slice;
++
++#ifdef CONFIG_SCHED_SMT
++		if (cpumask_weight(cpu_smt_mask(cpu)) > 1) {
++			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_smt, clear_idle_mask_smt);
++
++			if (cpumask_test_cpu(cpu, &sched_pcore_mask) &&
++			    !cpumask_intersects(&sched_ecore_mask, &sched_smt_mask)) {
++				SET_RQ_BALANCE_FUNC(rq, cpu, smt_pcore_balance_func);
++			} else {
++				SET_RQ_BALANCE_FUNC(rq, cpu, smt_balance_func);
++			}
++
++			continue;
++		}
++#endif
++		/* !SMT or only one cpu in sg */
++		if (cpumask_test_cpu(cpu, &sched_pcore_mask)) {
++			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_pcore, clear_idle_mask_pcore);
++
++			if (ecore_present)
++				SET_RQ_BALANCE_FUNC(rq, cpu, pcore_balance_func);
++
++			continue;
++		}
++		if (cpumask_test_cpu(cpu, &sched_ecore_mask)) {
++			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_ecore, clear_idle_mask_ecore);
++#ifdef CONFIG_SCHED_SMT
++			if (cpumask_intersects(&sched_pcore_mask, &sched_smt_mask))
++				SET_RQ_BALANCE_FUNC(rq, cpu, ecore_balance_func);
++#endif
++		}
++	}
++}
++#endif /* CONFIG_SMP */
+diff --git a/kernel/sched/alt_topology.h b/kernel/sched/alt_topology.h
+new file mode 100644
+index 000000000000..076174cd2bc6
+--- /dev/null
++++ b/kernel/sched/alt_topology.h
+@@ -0,0 +1,6 @@
++#ifndef _KERNEL_SCHED_ALT_TOPOLOGY_H
++#define _KERNEL_SCHED_ALT_TOPOLOGY_H
++
++extern void sched_init_topology(void);
++
++#endif /* _KERNEL_SCHED_ALT_TOPOLOGY_H */
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 000000000000..5a7835246ec3
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,103 @@
++#ifndef _KERNEL_SCHED_BMQ_H
++#define _KERNEL_SCHED_BMQ_H
++
++#define ALT_SCHED_NAME "BMQ"
++
++/*
++ * BMQ only routines
++ */
++static inline void boost_task(struct task_struct *p, int n)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	p->boost_prio = max(limit, p->boost_prio - n);
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms) {}
++
++/* This API is used in task_prio(), return value readed by human users */
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	return p->prio + p->boost_prio - MIN_NORMAL_PRIO;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MIN_NORMAL_PRIO)? (p->prio >> 2) :
++		MIN_SCHED_NORMAL_PRIO + (p->prio + p->boost_prio - MIN_NORMAL_PRIO) / 2;
++}
++
++#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio)	\
++	prio = task_sched_prio(p);		\
++	idx = prio;
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return prio;
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return idx;
++}
++
++static inline int sched_rq_prio_idx(struct rq *rq)
++{
++	return rq->prio;
++}
++
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO);
++}
++
++static inline void sched_update_rq_clock(struct rq *rq) {}
++
++static inline void sched_task_renew(struct task_struct *p, const struct rq *rq)
++{
++	deboost_task(p);
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
++static inline void sched_task_fork(struct task_struct *p, struct rq *rq) {}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++static inline void sched_task_ttwu(struct task_struct *p)
++{
++	s64 delta = this_rq()->clock_task > p->last_ran;
++
++	if (likely(delta > 0))
++		boost_task(p, delta  >> 22);
++}
++
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	boost_task(p, 1);
++}
++
++#endif /* _KERNEL_SCHED_BMQ_H */
+diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
+index fae1f5c921eb..1e06434b5b9b 100644
+--- a/kernel/sched/build_policy.c
++++ b/kernel/sched/build_policy.c
+@@ -49,15 +49,21 @@
+ 
+ #include "idle.c"
+ 
++#ifndef CONFIG_SCHED_ALT
+ #include "rt.c"
++#endif
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ # include "cpudeadline.c"
++#endif
+ # include "pelt.c"
+ #endif
+ 
+ #include "cputime.c"
++#ifndef CONFIG_SCHED_ALT
+ #include "deadline.c"
++#endif
+ 
+ #ifdef CONFIG_SCHED_CLASS_EXT
+ # include "ext.c"
+diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
+index 80a3df49ab47..58d04aa73634 100644
+--- a/kernel/sched/build_utility.c
++++ b/kernel/sched/build_utility.c
+@@ -56,6 +56,10 @@
+ 
+ #include "clock.c"
+ 
++#ifdef CONFIG_SCHED_ALT
++# include "alt_topology.c"
++#endif
++
+ #ifdef CONFIG_CGROUP_CPUACCT
+ # include "cpuacct.c"
+ #endif
+@@ -84,7 +88,9 @@
+ 
+ #ifdef CONFIG_SMP
+ # include "cpupri.c"
++#ifndef CONFIG_SCHED_ALT
+ # include "stop_task.c"
++#endif
+ # include "topology.c"
+ #endif
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index c6ba15388ea7..56590821f074 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -197,6 +197,7 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
+ 
+ static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu);
+ 
+ 	if (!scx_switched_all())
+@@ -205,6 +206,10 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
+ 	util = max(util, boost);
+ 	sg_cpu->bw_min = min;
+ 	sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
++#else /* CONFIG_SCHED_ALT */
++	sg_cpu->bw_min = 0;
++	sg_cpu->util = rq_load_util(cpu_rq(sg_cpu->cpu), arch_scale_cpu_capacity(sg_cpu->cpu));
++#endif /* CONFIG_SCHED_ALT */
+ }
+ 
+ /**
+@@ -364,8 +369,10 @@ static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
+ 		sg_cpu->sg_policy->limits_changed = true;
++#endif
+ }
+ 
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -684,6 +691,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+ 
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 0bed0fa1acd9..031affa09446 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -126,7 +126,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+ 
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+ 
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -150,7 +150,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+ 
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		task_group_account_field(p, CPUTIME_NICE, cputime);
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -288,7 +288,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -298,7 +298,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+ 
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+ 
+ 	return ns;
+@@ -623,7 +623,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+ 
+ 	if (task_cputime(p, &cputime.utime, &cputime.stime))
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index f4035c7a0fa1..4df4ad88d6a9 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -7,6 +7,7 @@
+  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * This allows printing both to /sys/kernel/debug/sched/debug and
+  * to the console
+@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
+ };
+ 
+ #endif /* SMP */
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 
+@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
+ 
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
+ 
++#ifndef CONFIG_SCHED_ALT
+ __read_mostly bool sched_debug_verbose;
+ 
+ #ifdef CONFIG_SMP
+@@ -468,9 +471,11 @@ static const struct file_operations fair_server_period_fops = {
+ 	.llseek		= seq_lseek,
+ 	.release	= single_release,
+ };
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ static struct dentry *debugfs_sched;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static void debugfs_fair_server_init(void)
+ {
+ 	struct dentry *d_fair;
+@@ -491,6 +496,7 @@ static void debugfs_fair_server_init(void)
+ 		debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
+ 	}
+ }
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ static __init int sched_init_debug(void)
+ {
+@@ -498,14 +504,17 @@ static __init int sched_init_debug(void)
+ 
+ 	debugfs_sched = debugfs_create_dir("sched", NULL);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+ 
+ 	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
+ 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
+ 
+@@ -530,13 +539,17 @@ static __init int sched_init_debug(void)
+ #endif
+ 
+ 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_fair_server_init();
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	return 0;
+ }
+ late_initcall(sched_init_debug);
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+ 
+ static cpumask_var_t		sd_sysctl_cpus;
+@@ -1288,6 +1301,7 @@ void proc_sched_set_task(struct task_struct *p)
+ 	memset(&p->stats, 0, sizeof(p->stats));
+ #endif
+ }
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ void resched_latency_warn(int cpu, u64 latency)
+ {
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index d2f096bb274c..36071f4b7b7f 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -424,6 +424,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -538,3 +539,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 000000000000..fe3099071eb7
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,139 @@
++#ifndef _KERNEL_SCHED_PDS_H
++#define _KERNEL_SCHED_PDS_H
++
++#define ALT_SCHED_NAME "PDS"
++
++static const u64 RT_MASK = ((1ULL << MIN_SCHED_NORMAL_PRIO) - 1);
++
++#define SCHED_NORMAL_PRIO_NUM	(32)
++#define SCHED_EDGE_DELTA	(SCHED_NORMAL_PRIO_NUM - NICE_WIDTH / 2)
++
++/* PDS assume SCHED_NORMAL_PRIO_NUM is power of 2 */
++#define SCHED_NORMAL_PRIO_MOD(x)	((x) & (SCHED_NORMAL_PRIO_NUM - 1))
++
++/* default time slice 4ms -> shift 22, 2 time slice slots -> shift 23 */
++static __read_mostly int sched_timeslice_shift = 23;
++
++/*
++ * Common interfaces
++ */
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	u64 sched_dl = max(p->deadline, rq->time_edge);
++
++#ifdef ALT_SCHED_DEBUG
++	if (WARN_ONCE(sched_dl - rq->time_edge > NORMAL_PRIO_NUM - 1,
++		      "pds: task_sched_prio_normal() delta %lld\n", sched_dl - rq->time_edge))
++		return SCHED_NORMAL_PRIO_NUM - 1;
++#endif
++
++	return sched_dl - rq->time_edge;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MIN_NORMAL_PRIO) ? (p->prio >> 2) :
++		MIN_SCHED_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
++}
++
++#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio)							\
++	if (p->prio < MIN_NORMAL_PRIO) {							\
++		prio = p->prio >> 2;								\
++		idx = prio;									\
++	} else {										\
++		u64 sched_dl = max(p->deadline, rq->time_edge);					\
++		prio = MIN_SCHED_NORMAL_PRIO + sched_dl - rq->time_edge;			\
++		idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_dl);			\
++	}
++
++static inline int sched_prio2idx(int sched_prio, struct rq *rq)
++{
++	return (IDLE_TASK_SCHED_PRIO == sched_prio || sched_prio < MIN_SCHED_NORMAL_PRIO) ?
++		sched_prio :
++		MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_prio + rq->time_edge);
++}
++
++static inline int sched_idx2prio(int sched_idx, struct rq *rq)
++{
++	return (sched_idx < MIN_SCHED_NORMAL_PRIO) ?
++		sched_idx :
++		MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_idx - rq->time_edge);
++}
++
++static inline int sched_rq_prio_idx(struct rq *rq)
++{
++	return rq->prio_idx;
++}
++
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio > DEFAULT_PRIO);
++}
++
++static inline void sched_update_rq_clock(struct rq *rq)
++{
++	struct list_head head;
++	u64 old = rq->time_edge;
++	u64 now = rq->clock >> sched_timeslice_shift;
++	u64 prio, delta;
++	DECLARE_BITMAP(normal, SCHED_QUEUE_BITS);
++
++	if (now == old)
++		return;
++
++	rq->time_edge = now;
++	delta = min_t(u64, SCHED_NORMAL_PRIO_NUM, now - old);
++	INIT_LIST_HEAD(&head);
++
++	prio = MIN_SCHED_NORMAL_PRIO;
++	for_each_set_bit_from(prio, rq->queue.bitmap, MIN_SCHED_NORMAL_PRIO + delta)
++		list_splice_tail_init(rq->queue.heads + MIN_SCHED_NORMAL_PRIO +
++				      SCHED_NORMAL_PRIO_MOD(prio + old), &head);
++
++	bitmap_shift_right(normal, rq->queue.bitmap, delta, SCHED_QUEUE_BITS);
++	if (!list_empty(&head)) {
++		u64 idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(now);
++
++		__list_splice(&head, rq->queue.heads + idx, rq->queue.heads[idx].next);
++		set_bit(MIN_SCHED_NORMAL_PRIO, normal);
++	}
++	bitmap_replace(rq->queue.bitmap, normal, rq->queue.bitmap,
++		       (const unsigned long *)&RT_MASK, SCHED_QUEUE_BITS);
++
++	if (rq->prio < MIN_SCHED_NORMAL_PRIO || IDLE_TASK_SCHED_PRIO == rq->prio)
++		return;
++
++	rq->prio = max_t(u64, MIN_SCHED_NORMAL_PRIO, rq->prio - delta);
++	rq->prio_idx = sched_prio2idx(rq->prio, rq);
++}
++
++static inline void sched_task_renew(struct task_struct *p, const struct rq *rq)
++{
++	if (p->prio >= MIN_NORMAL_PRIO)
++		p->deadline = rq->time_edge + SCHED_EDGE_DELTA +
++			      (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
++{
++	u64 max_dl = rq->time_edge + SCHED_EDGE_DELTA + NICE_WIDTH / 2 - 1;
++	if (unlikely(p->deadline > max_dl))
++		p->deadline = max_dl;
++}
++
++static inline void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	sched_task_renew(p, rq);
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sysctl_sched_base_slice;
++	sched_task_renew(p, rq);
++}
++
++static inline void sched_task_ttwu(struct task_struct *p) {}
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
++
++#endif /* _KERNEL_SCHED_PDS_H */
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index a9c65d97b3ca..a66431e6527c 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-#ifdef CONFIG_SCHED_HW_PRESSURE
++#if defined(CONFIG_SCHED_HW_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * hardware:
+  *
+@@ -468,6 +470,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
+ }
+ #endif
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * Load avg and utiliztion metrics need to be updated periodically and before
+  * consumption. This function updates the metrics for all subsystems except for
+@@ -487,3 +490,4 @@ bool update_other_load_avgs(struct rq *rq)
+ 		update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
+ 		update_irq_load_avg(rq, 0);
+ }
++#endif /* !CONFIG_SCHED_ALT */
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index f4f6a0875c66..ee780f2b6c17 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,14 +1,16 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+ bool update_other_load_avgs(struct rq *rq);
++#endif
+ 
+-#ifdef CONFIG_SCHED_HW_PRESSURE
++#if defined(CONFIG_SCHED_HW_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
+ 
+ static inline u64 hw_load_avg(struct rq *rq)
+@@ -45,6 +47,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return PELT_MIN_DIVIDER + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -181,9 +184,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #else
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -201,6 +206,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+ 
+ static inline int
+ update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index c03b3d7b320e..08ee4a9cd6a5 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -5,6 +5,10 @@
+ #ifndef _KERNEL_SCHED_SCHED_H
+ #define _KERNEL_SCHED_SCHED_H
+ 
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched/affinity.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/cpufreq.h>
+@@ -3878,4 +3882,9 @@ void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
+ 
+ #include "ext.h"
+ 
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++#endif /* !CONFIG_SCHED_ALT */
+ #endif /* _KERNEL_SCHED_SCHED_H */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index eb0cdcd4d921..72224ecb5cbf 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -115,8 +115,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -133,6 +135,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -160,6 +163,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 767e098a3bd1..4cbf4d3e611e 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
+ 
+ #endif /* CONFIG_SCHEDSTATS */
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity_stats {
+ 	struct sched_entity     se;
+@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
+ #endif
+ 	return &task_of(se)->stats;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PSI
+ void psi_task_change(struct task_struct *task, int clear, int set);
+diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
+index 24f9f90b6574..9aa01e45c920 100644
+--- a/kernel/sched/syscalls.c
++++ b/kernel/sched/syscalls.c
+@@ -16,6 +16,14 @@
+ #include "sched.h"
+ #include "autogroup.h"
+ 
++#ifdef CONFIG_SCHED_ALT
++#include "alt_core.h"
++
++static inline int __normal_prio(int policy, int rt_prio, int static_prio)
++{
++	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) : static_prio;
++}
++#else /* !CONFIG_SCHED_ALT */
+ static inline int __normal_prio(int policy, int rt_prio, int nice)
+ {
+ 	int prio;
+@@ -29,6 +37,7 @@ static inline int __normal_prio(int policy, int rt_prio, int nice)
+ 
+ 	return prio;
+ }
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ /*
+  * Calculate the expected normal priority: i.e. priority
+@@ -39,7 +48,11 @@ static inline int __normal_prio(int policy, int rt_prio, int nice)
+  */
+ static inline int normal_prio(struct task_struct *p)
+ {
++#ifdef CONFIG_SCHED_ALT
++	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
++#else /* !CONFIG_SCHED_ALT */
+ 	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
++#endif /* !CONFIG_SCHED_ALT */
+ }
+ 
+ /*
+@@ -64,6 +77,37 @@ static int effective_prio(struct task_struct *p)
+ 
+ void set_user_nice(struct task_struct *p, long nice)
+ {
++#ifdef CONFIG_SCHED_ALT
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++
++	check_task_changed(p, rq);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++#else
+ 	bool queued, running;
+ 	struct rq *rq;
+ 	int old_prio;
+@@ -112,6 +156,7 @@ void set_user_nice(struct task_struct *p, long nice)
+ 	 * lowered its priority, then reschedule its CPU:
+ 	 */
+ 	p->sched_class->prio_changed(rq, p, old_prio);
++#endif /* !CONFIG_SCHED_ALT */
+ }
+ EXPORT_SYMBOL(set_user_nice);
+ 
+@@ -190,7 +235,19 @@ SYSCALL_DEFINE1(nice, int, increment)
+  */
+ int task_prio(const struct task_struct *p)
+ {
++#ifdef CONFIG_SCHED_ALT
++/*
++ * sched policy         return value   kernel prio    user prio/nice
++ *
++ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
++ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
++		task_sched_prio_normal(p, task_rq(p));
++#else
+ 	return p->prio - MAX_RT_PRIO;
++#endif /* !CONFIG_SCHED_ALT */
+ }
+ 
+ /**
+@@ -300,10 +357,13 @@ static void __setscheduler_params(struct task_struct *p,
+ 
+ 	p->policy = policy;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_policy(policy)) {
+ 		__setparam_dl(p, attr);
+ 	} else if (fair_policy(policy)) {
++#endif /* !CONFIG_SCHED_ALT */
+ 		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++#ifndef CONFIG_SCHED_ALT
+ 		if (attr->sched_runtime) {
+ 			p->se.custom_slice = 1;
+ 			p->se.slice = clamp_t(u64, attr->sched_runtime,
+@@ -322,6 +382,7 @@ static void __setscheduler_params(struct task_struct *p,
+ 		/* when switching back to non-rt policy, restore timerslack */
+ 		p->timer_slack_ns = p->default_timer_slack_ns;
+ 	}
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	/*
+ 	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
+@@ -330,7 +391,9 @@ static void __setscheduler_params(struct task_struct *p,
+ 	 */
+ 	p->rt_priority = attr->sched_priority;
+ 	p->normal_prio = normal_prio(p);
++#ifndef CONFIG_SCHED_ALT
+ 	set_load_weight(p, true);
++#endif /* !CONFIG_SCHED_ALT */
+ }
+ 
+ /*
+@@ -346,6 +409,8 @@ static bool check_same_owner(struct task_struct *p)
+ 		uid_eq(cred->euid, pcred->uid));
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
++
+ #ifdef CONFIG_UCLAMP_TASK
+ 
+ static int uclamp_validate(struct task_struct *p,
+@@ -459,6 +524,7 @@ static inline int uclamp_validate(struct task_struct *p,
+ static void __setscheduler_uclamp(struct task_struct *p,
+ 				  const struct sched_attr *attr) { }
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ /*
+  * Allow unprivileged RT tasks to decrease priority.
+@@ -469,11 +535,13 @@ static int user_check_sched_setscheduler(struct task_struct *p,
+ 					 const struct sched_attr *attr,
+ 					 int policy, int reset_on_fork)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (fair_policy(policy)) {
+ 		if (attr->sched_nice < task_nice(p) &&
+ 		    !is_nice_reduction(p, attr->sched_nice))
+ 			goto req_priv;
+ 	}
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	if (rt_policy(policy)) {
+ 		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
+@@ -488,6 +556,7 @@ static int user_check_sched_setscheduler(struct task_struct *p,
+ 			goto req_priv;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	/*
+ 	 * Can't set/change SCHED_DEADLINE policy at all for now
+ 	 * (safest behavior); in the future we would like to allow
+@@ -505,6 +574,7 @@ static int user_check_sched_setscheduler(struct task_struct *p,
+ 		if (!is_nice_reduction(p, task_nice(p)))
+ 			goto req_priv;
+ 	}
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	/* Can't change other user's priorities: */
+ 	if (!check_same_owner(p))
+@@ -527,6 +597,158 @@ int __sched_setscheduler(struct task_struct *p,
+ 			 const struct sched_attr *attr,
+ 			 bool user, bool pi)
+ {
++#ifdef CONFIG_SCHED_ALT
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int oldpolicy = -1, policy = attr->sched_policy;
++	int retval, newprio;
++	struct balance_callback *head;
++	unsigned long flags;
++	struct rq *rq;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	BUG_ON(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	if (user) {
++		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
++		if (retval)
++			return retval;
++
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		newprio = rt_effective_prio(p, newprio);
++	}
++
++	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
++		__setscheduler_params(p, attr);
++		__setscheduler_prio(p, newprio);
++	}
++
++	check_task_changed(p, rq);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi)
++		rt_mutex_adjust_pi(p);
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	return retval;
++#else /* !CONFIG_SCHED_ALT */
+ 	int oldpolicy = -1, policy = attr->sched_policy;
+ 	int retval, oldprio, newprio, queued, running;
+ 	const struct sched_class *prev_class, *next_class;
+@@ -764,6 +986,7 @@ int __sched_setscheduler(struct task_struct *p,
+ 	if (cpuset_locked)
+ 		cpuset_unlock();
+ 	return retval;
++#endif /* !CONFIG_SCHED_ALT */
+ }
+ 
+ static int _sched_setscheduler(struct task_struct *p, int policy,
+@@ -775,8 +998,10 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
+ 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
+ 	};
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (p->se.custom_slice)
+ 		attr.sched_runtime = p->se.slice;
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
+ 	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
+@@ -944,13 +1169,18 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
+ 
+ static void get_params(struct task_struct *p, struct sched_attr *attr)
+ {
+-	if (task_has_dl_policy(p)) {
++#ifndef CONFIG_SCHED_ALT
++	if (task_has_dl_policy(p))
+ 		__getparam_dl(p, attr);
+-	} else if (task_has_rt_policy(p)) {
++	else
++#endif
++	if (task_has_rt_policy(p)) {
+ 		attr->sched_priority = p->rt_priority;
+ 	} else {
+ 		attr->sched_nice = task_nice(p);
++#ifndef CONFIG_SCHED_ALT
+ 		attr->sched_runtime = p->se.slice;
++#endif
+ 	}
+ }
+ 
+@@ -1170,6 +1400,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+ #ifdef CONFIG_SMP
+ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	/*
+ 	 * If the task isn't a deadline task or admission control is
+ 	 * disabled then we don't care about affinity changes.
+@@ -1186,6 +1417,7 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+ 	guard(rcu)();
+ 	if (!cpumask_subset(task_rq(p)->rd->span, mask))
+ 		return -EBUSY;
++#endif
+ 
+ 	return 0;
+ }
+@@ -1210,9 +1442,11 @@ int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
+ 	ctx->new_mask = new_mask;
+ 	ctx->flags |= SCA_CHECK;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	retval = dl_task_check_affinity(p, new_mask);
+ 	if (retval)
+ 		goto out_free_new_mask;
++#endif
+ 
+ 	retval = __set_cpus_allowed_ptr(p, ctx);
+ 	if (retval)
+@@ -1392,13 +1626,34 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ 
+ static void do_sched_yield(void)
+ {
+-	struct rq_flags rf;
+ 	struct rq *rq;
++	struct rq_flags rf;
++
++#ifdef CONFIG_SCHED_ALT
++	struct task_struct *p;
++
++	if (!sched_yield_type)
++		return;
+ 
+ 	rq = this_rq_lock_irq(&rf);
+ 
++	schedstat_inc(rq->yld_count);
++
++	p = current;
++	if (rt_task(p)) {
++		if (task_on_rq_queued(p))
++			requeue_task(p, rq);
++	} else if (rq->nr_running > 1) {
++		do_sched_yield_type_1(p, rq);
++		if (task_on_rq_queued(p))
++			requeue_task(p, rq);
++	}
++#else /* !CONFIG_SCHED_ALT */
++	rq = this_rq_lock_irq(&rf);
++
+ 	schedstat_inc(rq->yld_count);
+ 	current->sched_class->yield_task(rq);
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	preempt_disable();
+ 	rq_unlock_irq(rq, &rf);
+@@ -1467,6 +1722,9 @@ EXPORT_SYMBOL(yield);
+  */
+ int __sched yield_to(struct task_struct *p, bool preempt)
+ {
++#ifdef CONFIG_SCHED_ALT
++	return 0;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct task_struct *curr = current;
+ 	struct rq *rq, *p_rq;
+ 	int yielded = 0;
+@@ -1512,6 +1770,7 @@ int __sched yield_to(struct task_struct *p, bool preempt)
+ 		schedule();
+ 
+ 	return yielded;
++#endif /* !CONFIG_SCHED_ALT */
+ }
+ EXPORT_SYMBOL_GPL(yield_to);
+ 
+@@ -1532,7 +1791,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
+ 	case SCHED_RR:
+ 		ret = MAX_RT_PRIO-1;
+ 		break;
++#ifndef CONFIG_SCHED_ALT
+ 	case SCHED_DEADLINE:
++#endif
+ 	case SCHED_NORMAL:
+ 	case SCHED_BATCH:
+ 	case SCHED_IDLE:
+@@ -1560,7 +1821,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
+ 	case SCHED_RR:
+ 		ret = 1;
+ 		break;
++#ifndef CONFIG_SCHED_ALT
+ 	case SCHED_DEADLINE:
++#endif
+ 	case SCHED_NORMAL:
+ 	case SCHED_BATCH:
+ 	case SCHED_IDLE:
+@@ -1572,7 +1835,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
+ 
+ static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int time_slice = 0;
++#endif
+ 	int retval;
+ 
+ 	if (pid < 0)
+@@ -1587,6 +1852,7 @@ static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
+ 		if (retval)
+ 			return retval;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 		scoped_guard (task_rq_lock, p) {
+ 			struct rq *rq = scope.rq;
+ 			if (p->sched_class->get_rr_interval)
+@@ -1595,6 +1861,13 @@ static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
+ 	}
+ 
+ 	jiffies_to_timespec64(time_slice, t);
++#else
++	}
++
++	alt_sched_debug();
++
++	*t = ns_to_timespec64(sysctl_sched_base_slice);
++#endif /* !CONFIG_SCHED_ALT */
+ 	return 0;
+ }
+ 
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 9748a4c8d668..1e2bdd70d69a 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -3,6 +3,7 @@
+  * Scheduler topology setup/handling methods
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ #include <linux/bsearch.h>
+ 
+ DEFINE_MUTEX(sched_domains_mutex);
+@@ -1459,8 +1460,10 @@ static void asym_cpu_capacity_scan(void)
+  */
+ 
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1695,6 +1698,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ 
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ /*
+  * Topology list, bottom-up.
+@@ -1731,6 +1735,7 @@ void __init set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology_saved = NULL;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+ 
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2797,3 +2802,28 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
++
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++
++int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
++{
++	return cpumask_nth(cpu, cpus);
++}
++
++const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
++{
++	return ERR_PTR(-EOPNOTSUPP);
++}
++EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 79e6cb1d5c48..61bc0352e233 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -92,6 +92,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
+ 
+ /* Constants used for minimum and maximum */
+ 
++#ifdef CONFIG_SCHED_ALT
++extern int sched_yield_type;
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+ static const int six_hundred_forty_kb = 640 * 1024;
+ #endif
+@@ -1907,6 +1911,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_TWO,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 6bcee4704059..cf88205fd4a2 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+ 
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+ 
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -830,6 +830,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -837,6 +838,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
+ 	}
+ }
++#endif
+ 
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -864,8 +866,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+ 
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -879,7 +883,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+ 
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1115,8 +1119,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+ 
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 1469dd8075fa..803527a0e48a 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1419,10 +1419,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9949ffad8df0..90eac9d802a8 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1247,6 +1247,7 @@ static bool kick_pool(struct worker_pool *pool)
+ 
+ 	p = worker->task;
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+ 	/*
+ 	 * Idle @worker is about to execute @work and waking up provides an
+@@ -1276,6 +1277,8 @@ static bool kick_pool(struct worker_pool *pool)
+ 		}
+ 	}
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
++
+ 	wake_up_process(p);
+ 	return true;
+ }
+@@ -1404,7 +1407,11 @@ void wq_worker_running(struct task_struct *task)
+ 	 * CPU intensive auto-detection cares about how long a work item hogged
+ 	 * CPU without sleeping. Reset the starting timestamp on wakeup.
+ 	 */
++#ifdef CONFIG_SCHED_ALT
++	worker->current_at = worker->task->sched_time;
++#else
+ 	worker->current_at = worker->task->se.sum_exec_runtime;
++#endif
+ 
+ 	WRITE_ONCE(worker->sleeping, 0);
+ }
+@@ -1489,7 +1496,11 @@ void wq_worker_tick(struct task_struct *task)
+ 	 * We probably want to make this prettier in the future.
+ 	 */
+ 	if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
++#ifdef CONFIG_SCHED_ALT
++	    worker->task->sched_time - worker->current_at <
++#else
+ 	    worker->task->se.sum_exec_runtime - worker->current_at <
++#endif
+ 	    wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
+ 		return;
+ 
+@@ -3157,7 +3168,11 @@ __acquires(&pool->lock)
+ 	worker->current_func = work->func;
+ 	worker->current_pwq = pwq;
+ 	if (worker->task)
++#ifdef CONFIG_SCHED_ALT
++		worker->current_at = worker->task->sched_time;
++#else
+ 		worker->current_at = worker->task->se.sum_exec_runtime;
++#endif
+ 	work_data = *work_data_bits(work);
+ 	worker->current_color = get_work_color(work_data);
+ 

diff --git a/5021_BMQ-and-PDS-gentoo-defaults.patch b/5021_BMQ-and-PDS-gentoo-defaults.patch
new file mode 100644
index 00000000..7748d78c
--- /dev/null
+++ b/5021_BMQ-and-PDS-gentoo-defaults.patch
@@ -0,0 +1,13 @@
+--- a/init/Kconfig	2024-11-13 14:45:36.566335895 -0500
++++ b/init/Kconfig	2024-11-13 14:47:02.670787774 -0500
+@@ -860,8 +860,9 @@ config UCLAMP_BUCKETS_COUNT
+ 	  If in doubt, use the default value.
+ 
+ menuconfig SCHED_ALT
++	depends on X86_64
+ 	bool "Alternative CPU Schedulers"
+-	default y
++	default n
+ 	help
+ 	  This feature enable alternative CPU scheduler"
+ 


^ permalink raw reply related	[flat|nested] 82+ messages in thread

end of thread, other threads:[~2025-10-24  9:09 UTC | newest]

Thread overview: 82+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-24  9:18 [gentoo-commits] proj/linux-patches:6.12 commit in: / Arisu Tachibana
  -- strict thread matches above, loose matches on Subject: below --
2025-10-24  9:09 Arisu Tachibana
2025-10-20  5:30 Arisu Tachibana
2025-10-15 17:30 Arisu Tachibana
2025-10-13 11:56 Arisu Tachibana
2025-10-06 11:06 Arisu Tachibana
2025-10-02 13:25 Arisu Tachibana
2025-09-25 12:02 Arisu Tachibana
2025-09-20  6:14 Arisu Tachibana
2025-09-20  5:26 Arisu Tachibana
2025-09-12  3:56 Arisu Tachibana
2025-09-10  6:21 Arisu Tachibana
2025-09-10  5:31 Arisu Tachibana
2025-09-05 12:48 Arisu Tachibana
2025-09-04 15:48 Arisu Tachibana
2025-09-04 15:33 Arisu Tachibana
2025-08-28 15:24 Arisu Tachibana
2025-08-28 13:54 Arisu Tachibana
2025-08-28 13:27 Arisu Tachibana
2025-08-28 11:49 Arisu Tachibana
2025-08-21  1:11 Arisu Tachibana
2025-08-16  3:10 Arisu Tachibana
2025-08-01 10:31 Arisu Tachibana
2025-07-18 12:05 Arisu Tachibana
2025-07-14 16:20 Arisu Tachibana
2025-07-11  2:28 Arisu Tachibana
2025-07-06 13:42 Arisu Tachibana
2025-06-27 11:26 Mike Pagano
2025-06-27 11:17 Mike Pagano
2025-06-19 14:22 Mike Pagano
2025-06-10 12:15 Mike Pagano
2025-06-04 18:10 Mike Pagano
2025-05-29 16:35 Mike Pagano
2025-05-27 20:05 Mike Pagano
2025-05-22 13:37 Mike Pagano
2025-05-18 14:32 Mike Pagano
2025-05-09 10:57 Mike Pagano
2025-05-05 11:31 Mike Pagano
2025-05-02 11:35 Mike Pagano
2025-04-25 11:54 Mike Pagano
2025-04-25 11:47 Mike Pagano
2025-04-22 18:48 Mike Pagano
2025-04-20  9:38 Mike Pagano
2025-04-10 13:50 Mike Pagano
2025-04-10 13:29 Mike Pagano
2025-04-07 10:30 Mike Pagano
2025-03-29 10:59 Mike Pagano
2025-03-29 10:47 Mike Pagano
2025-03-23 11:31 Mike Pagano
2025-03-20 22:39 Mike Pagano
2025-03-13 12:54 Mike Pagano
2025-03-07 18:22 Mike Pagano
2025-02-27 13:22 Mike Pagano
2025-02-21 13:31 Mike Pagano
2025-02-18 11:26 Mike Pagano
2025-02-17 15:44 Mike Pagano
2025-02-17 11:25 Mike Pagano
2025-02-17 11:16 Mike Pagano
2025-02-16 21:48 Mike Pagano
2025-02-08 11:26 Mike Pagano
2025-02-01 23:07 Mike Pagano
2025-01-30 12:47 Mike Pagano
2025-01-23 17:02 Mike Pagano
2025-01-17 13:18 Mike Pagano
2025-01-17 13:18 Mike Pagano
2025-01-09 13:51 Mike Pagano
2025-01-02 12:31 Mike Pagano
2024-12-27 14:08 Mike Pagano
2024-12-19 18:07 Mike Pagano
2024-12-15  0:02 Mike Pagano
2024-12-14 23:59 Mike Pagano
2024-12-14 23:47 Mike Pagano
2024-12-11 21:01 Mike Pagano
2024-12-09 23:13 Mike Pagano
2024-12-09 11:35 Mike Pagano
2024-12-06 12:44 Mike Pagano
2024-12-05 20:05 Mike Pagano
2024-12-05 14:06 Mike Pagano
2024-12-02 17:15 Mike Pagano
2024-11-30 17:33 Mike Pagano
2024-11-22 17:45 Mike Pagano
2024-11-21 13:12 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox